summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/amdxdna/TODO1
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c65
-rw-r--r--drivers/accel/amdxdna/aie2_message.c6
-rw-r--r--drivers/accel/amdxdna/aie2_msg_priv.h10
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c13
-rw-r--r--drivers/accel/amdxdna/aie2_psp.c4
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.c22
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.c411
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.h24
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c11
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.h2
-rw-r--r--drivers/accel/habanalabs/Kconfig2
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_ioctl.c2
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c2
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c4
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h1
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c12
-rw-r--r--drivers/accel/ivpu/ivpu_fw.h1
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c101
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h2
-rw-r--r--drivers/accel/ivpu/ivpu_job.c14
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.c9
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c20
-rw-r--r--drivers/accel/qaic/Kconfig1
-rw-r--r--drivers/accel/qaic/qaic_data.c8
-rw-r--r--drivers/accel/qaic/qaic_debugfs.c2
-rw-r--r--drivers/accel/qaic/qaic_timesync.c2
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_extlog.c3
-rw-r--r--drivers/acpi/acpi_lpit.c2
-rw-r--r--drivers/acpi/acpi_mrrm.c185
-rw-r--r--drivers/acpi/acpi_pad.c4
-rw-r--r--drivers/acpi/acpi_pcc.c13
-rw-r--r--drivers/acpi/acpica/acapps.h4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconvert.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h4
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h5
-rw-r--r--drivers/acpi/acpica/aclocal.h6
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h2
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h10
-rw-r--r--drivers/acpi/acpica/dbhistry.c2
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsdebug.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c9
-rw-r--r--drivers/acpi/acpica/dsmthdat.c1
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dspkginit.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c9
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconcat.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c6
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exserial.c8
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/extrace.c53
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c4
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psobject.c54
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c13
-rw-r--r--drivers/acpi/acpica/rscalc.c22
-rw-r--r--drivers/acpi/acpica/rslist.c12
-rw-r--r--drivers/acpi/acpica/tbdata.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c6
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c4
-rw-r--r--drivers/acpi/acpica/utcksum.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c9
-rw-r--r--drivers/acpi/acpica/utresrc.c14
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/einj-core.c63
-rw-r--r--drivers/acpi/apei/ghes.c4
-rw-r--r--drivers/acpi/battery.c23
-rw-r--r--drivers/acpi/bus.c6
-rw-r--r--drivers/acpi/cppc_acpi.c331
-rw-r--r--drivers/acpi/ec.c23
-rw-r--r--drivers/acpi/internal.h6
-rw-r--r--drivers/acpi/irq.c16
-rw-r--r--drivers/acpi/numa/srat.c15
-rw-r--r--drivers/acpi/osi.c1
-rw-r--r--drivers/acpi/pci_root.c2
-rw-r--r--drivers/acpi/platform_profile.c3
-rw-r--r--drivers/acpi/pptt.c11
-rw-r--r--drivers/acpi/processor_driver.c3
-rw-r--r--drivers/acpi/processor_idle.c22
-rw-r--r--drivers/acpi/processor_perflib.c1
-rw-r--r--drivers/acpi/processor_throttling.c5
-rw-r--r--drivers/acpi/resource.c9
-rw-r--r--drivers/acpi/tables.c8
-rw-r--r--drivers/acpi/thermal.c10
-rw-r--r--drivers/acpi/viot.c2
-rw-r--r--drivers/android/binder.c248
-rw-r--r--drivers/android/binder_internal.h8
-rw-r--r--drivers/android/binderfs.c6
-rw-r--r--drivers/ata/ahci.c39
-rw-r--r--drivers/ata/libahci.c2
-rw-r--r--drivers/ata/libata-acpi.c24
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-eh.c41
-rw-r--r--drivers/ata/libata-sata.c12
-rw-r--r--drivers/ata/libata-scsi.c31
-rw-r--r--drivers/ata/libata.h3
-rw-r--r--drivers/ata/pata_cs5536.c2
-rw-r--r--drivers/ata/pata_macio.c2
-rw-r--r--drivers/ata/pata_via.c9
-rw-r--r--drivers/ata/sata_sx4.c30
-rw-r--r--drivers/atm/atmtcp.c4
-rw-r--r--drivers/atm/idt77252.c4
-rw-r--r--drivers/atm/lanai.c2
-rw-r--r--drivers/auxdisplay/line-display.c2
-rw-r--r--drivers/base/arch_topology.c52
-rw-r--r--drivers/base/auxiliary.c108
-rw-r--r--drivers/base/component.c3
-rw-r--r--drivers/base/cpu.c6
-rw-r--r--drivers/base/devres.c20
-rw-r--r--drivers/base/faux.c25
-rw-r--r--drivers/base/firmware_loader/Kconfig4
-rw-r--r--drivers/base/firmware_loader/main.c34
-rw-r--r--drivers/base/memory.c51
-rw-r--r--drivers/base/node.c11
-rw-r--r--drivers/base/platform-msi.c1
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/main.c228
-rw-r--r--drivers/base/power/runtime.c46
-rw-r--r--drivers/base/power/sysfs.c15
-rw-r--r--drivers/base/power/wakeup.c16
-rw-r--r--drivers/base/power/wakeup_stats.c2
-rw-r--r--drivers/base/property.c39
-rw-r--r--drivers/base/regmap/Kconfig4
-rw-r--r--drivers/base/regmap/regcache.c13
-rw-r--r--drivers/base/regmap/regmap-irq.c103
-rw-r--r--drivers/base/swnode.c2
-rw-r--r--drivers/base/topology.c52
-rw-r--r--drivers/bcma/driver_gpio.c8
-rw-r--r--drivers/block/Kconfig19
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/aoe/aoe.h1
-rw-r--r--drivers/block/aoe/aoecmd.c10
-rw-r--r--drivers/block/aoe/aoedev.c15
-rw-r--r--drivers/block/brd.c225
-rw-r--r--drivers/block/drbd/drbd_main.c3
-rw-r--r--drivers/block/drbd/drbd_req.c3
-rw-r--r--drivers/block/drbd/drbd_worker.c6
-rw-r--r--drivers/block/loop.c22
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c7
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/rnbd/rnbd-srv.c7
-rw-r--r--drivers/block/swim3.c8
-rw-r--r--drivers/block/ublk_drv.c683
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--drivers/block/zloop.c1385
-rw-r--r--drivers/block/zram/backend_deflate.c12
-rw-r--r--drivers/block/zram/backend_lz4.c2
-rw-r--r--drivers/block/zram/backend_lz4hc.c2
-rw-r--r--drivers/block/zram/backend_zstd.c2
-rw-r--r--drivers/block/zram/zcomp.h9
-rw-r--r--drivers/block/zram/zram_drv.c352
-rw-r--r--drivers/bluetooth/Kconfig12
-rw-r--r--drivers/bluetooth/bluecard_cs.c2
-rw-r--r--drivers/bluetooth/btintel.c13
-rw-r--r--drivers/bluetooth/btintel.h6
-rw-r--r--drivers/bluetooth/btintel_pcie.c172
-rw-r--r--drivers/bluetooth/btintel_pcie.h29
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c4
-rw-r--r--drivers/bluetooth/btmtksdio.c2
-rw-r--r--drivers/bluetooth/btnxpuart.c62
-rw-r--r--drivers/bluetooth/btusb.c302
-rw-r--r--drivers/bluetooth/hci_aml.c3
-rw-r--r--drivers/bluetooth/hci_bcsp.c2
-rw-r--r--drivers/bluetooth/hci_h5.c2
-rw-r--r--drivers/bluetooth/hci_qca.c18
-rw-r--r--drivers/bus/brcmstb_gisb.c10
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c2
-rw-r--r--drivers/bus/fsl-mc/dprc.c4
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c21
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c12
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h2
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-uapi.c11
-rw-r--r--drivers/bus/fsl-mc/mc-io.c19
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c2
-rw-r--r--drivers/bus/mhi/ep/ring.c16
-rw-r--r--drivers/bus/mhi/host/pci_generic.c46
-rw-r--r--drivers/bus/mhi/host/pm.c18
-rw-r--r--drivers/bus/moxtet.c6
-rw-r--r--drivers/bus/ti-sysc.c68
-rw-r--r--drivers/cache/sifive_ccache.c2
-rw-r--r--drivers/cdrom/cdrom.c3
-rw-r--r--drivers/cdx/cdx_msi.c4
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/agp/intel-gtt.c55
-rw-r--r--drivers/char/agp/nvidia-agp.c1
-rw-r--r--drivers/char/apm-emulation.c5
-rw-r--r--drivers/char/hpet.c3
-rw-r--r--drivers/char/hw_random/atmel-rng.c11
-rw-r--r--drivers/char/hw_random/mtk-rng.c9
-rw-r--r--drivers/char/hw_random/npcm-rng.c9
-rw-r--r--drivers/char/hw_random/rockchip-rng.c73
-rw-r--r--drivers/char/hw_random/xgene-rng.c2
-rw-r--r--drivers/char/ipmi/bt-bmc.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c761
-rw-r--r--drivers/char/ipmi/ipmi_si.h10
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c119
-rw-r--r--drivers/char/ipmi/ipmi_si_parisc.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c52
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c27
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c12
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c92
-rw-r--r--drivers/char/ipmi/ssif_bmc.c3
-rw-r--r--drivers/char/mem.c18
-rw-r--r--drivers/char/misc.c28
-rw-r--r--drivers/char/random.c56
-rw-r--r--drivers/char/tpm/Kconfig10
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/eventlog/tpm1.c7
-rw-r--r--drivers/char/tpm/tpm-buf.c6
-rw-r--r--drivers/char/tpm/tpm-dev-common.c2
-rw-r--r--drivers/char/tpm/tpm2-sessions.c20
-rw-r--r--drivers/char/tpm/tpm_crb_ffa.c74
-rw-r--r--drivers/char/tpm/tpm_svsm.c125
-rw-r--r--drivers/char/tpm/tpm_tis_core.h2
-rw-r--r--drivers/char/xillybus/xillybus_core.c3
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/bcm/clk-kona.c18
-rw-r--r--drivers/clk/bcm/clk-kona.h2
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c3
-rw-r--r--drivers/clk/clk-s2mps11.c3
-rw-r--r--drivers/clk/davinci/pll.c26
-rw-r--r--drivers/clk/meson/Kconfig16
-rw-r--r--drivers/clk/meson/g12a.c1
-rw-r--r--drivers/clk/qcom/apcs-sdx55.c6
-rw-r--r--drivers/clk/qcom/camcc-sa8775p.c103
-rw-r--r--drivers/clk/qcom/camcc-sm6350.c18
-rw-r--r--drivers/clk/qcom/clk-rpmh.c11
-rw-r--r--drivers/clk/qcom/dispcc-sm6350.c3
-rw-r--r--drivers/clk/qcom/gcc-msm8939.c4
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c6
-rw-r--r--drivers/clk/qcom/gcc-sm8650.c2
-rw-r--r--drivers/clk/qcom/gcc-sm8750.c3
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c4
-rw-r--r--drivers/clk/qcom/gpucc-sm6350.c6
-rw-r--r--drivers/clk/renesas/Kconfig5
-rw-r--r--drivers/clk/renesas/Makefile1
-rw-r--r--drivers/clk/renesas/r9a09g047-cpg.c52
-rw-r--r--drivers/clk/renesas/r9a09g056-cpg.c152
-rw-r--r--drivers/clk/renesas/r9a09g057-cpg.c36
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c3
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c3
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c186
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.h94
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-gate-grf.c105
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c24
-rw-r--r--drivers/clk/rockchip/clk-pll.c11
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c11
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c6
-rw-r--r--drivers/clk/rockchip/clk-rk3528.c83
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c3
-rw-r--r--drivers/clk/rockchip/clk-rk3576.c62
-rw-r--r--drivers/clk/rockchip/clk-rk3588.c1
-rw-r--r--drivers/clk/rockchip/clk-rv1126.c2
-rw-r--r--drivers/clk/rockchip/clk.c38
-rw-r--r--drivers/clk/rockchip/clk.h75
-rw-r--r--drivers/clk/samsung/clk-exynos4.c74
-rw-r--r--drivers/clk/samsung/clk-exynosautov920.c338
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c6
-rw-r--r--drivers/clk/socfpga/clk-pll.c4
-rw-r--r--drivers/clk/sophgo/Kconfig19
-rw-r--r--drivers/clk/sophgo/Makefile2
-rw-r--r--drivers/clk/sophgo/clk-cv1800.c2
-rw-r--r--drivers/clk/sophgo/clk-sg2044-pll.c628
-rw-r--r--drivers/clk/sophgo/clk-sg2044.c1812
-rw-r--r--drivers/clk/spacemit/Kconfig18
-rw-r--r--drivers/clk/spacemit/Makefile5
-rw-r--r--drivers/clk/spacemit/ccu-k1.c1164
-rw-r--r--drivers/clk/spacemit/ccu_common.h48
-rw-r--r--drivers/clk/spacemit/ccu_ddn.c83
-rw-r--r--drivers/clk/spacemit/ccu_ddn.h48
-rw-r--r--drivers/clk/spacemit/ccu_mix.c268
-rw-r--r--drivers/clk/spacemit/ccu_mix.h218
-rw-r--r--drivers/clk/spacemit/ccu_pll.c157
-rw-r--r--drivers/clk/spacemit/ccu_pll.h86
-rw-r--r--drivers/clk/sunxi-ng/Kconfig48
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1.c44
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c25
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.h25
-rw-r--r--drivers/clk/sunxi/Kconfig10
-rw-r--r--drivers/clk/thead/clk-th1520-ap.c196
-rw-r--r--drivers/clocksource/Kconfig20
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/renesas-ostm.c4
-rw-r--r--drivers/clocksource/timer-econet-en751221.c216
-rw-r--r--drivers/clocksource/timer-nxp-stm.c495
-rw-r--r--drivers/clocksource/timer-stm32-lp.c61
-rw-r--r--drivers/clocksource/timer-tegra186.c100
-rw-r--r--drivers/comedi/comedi_buf.c155
-rw-r--r--drivers/comedi/comedi_fops.c120
-rw-r--r--drivers/comedi/drivers/adl_pci9118.c4
-rw-r--r--drivers/comedi/drivers/comedi_test.c6
-rw-r--r--drivers/comedi/drivers/das16.c3
-rw-r--r--drivers/comedi/drivers/jr3_pci.c3
-rw-r--r--drivers/comedi/drivers/ni_atmio.c2
-rw-r--r--drivers/comedi/drivers/ni_pcidio.c2
-rw-r--r--drivers/counter/interrupt-cnt.c17
-rw-r--r--drivers/counter/microchip-tcb-capture.c25
-rw-r--r--drivers/counter/stm32-timer-cnt.c7
-rw-r--r--drivers/cpufreq/Kconfig12
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c10
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c29
-rw-r--r--drivers/cpufreq/amd-pstate.c147
-rw-r--r--drivers/cpufreq/amd-pstate.h3
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c2
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c109
-rw-r--r--drivers/cpufreq/cpufreq.c463
-rw-r--r--drivers/cpufreq/e_powersaver.c6
-rw-r--r--drivers/cpufreq/elanfreq.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c289
-rw-r--r--drivers/cpufreq/longhaul.c24
-rw-r--r--drivers/cpufreq/powernow-k7.c14
-rw-r--r--drivers/cpufreq/powernow-k8.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c3
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs226
-rw-r--r--drivers/cpufreq/sc520_freq.c1
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c36
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c2
-rw-r--r--drivers/cpuidle/cpuidle-psci.c82
-rw-r--r--drivers/cpuidle/cpuidle-psci.h4
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/cpuidle/governors/teo.c4
-rw-r--r--drivers/crypto/Kconfig8
-rw-r--r--drivers/crypto/Makefile4
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c56
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c17
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c177
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c49
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c110
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c45
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h17
-rw-r--r--drivers/crypto/atmel-aes.c5
-rw-r--r--drivers/crypto/atmel-sha.c6
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c2
-rw-r--r--drivers/crypto/caam/ctrl.c1
-rw-r--r--drivers/crypto/cavium/Makefile3
-rw-r--r--drivers/crypto/cavium/zip/Makefile12
-rw-r--r--drivers/crypto/cavium/zip/common.h222
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c261
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.h68
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.c200
-rw-r--r--drivers/crypto/cavium/zip/zip_deflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_device.c202
-rw-r--r--drivers/crypto/cavium/zip/zip_device.h108
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.c223
-rw-r--r--drivers/crypto/cavium/zip/zip_inflate.h62
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c603
-rw-r--r--drivers/crypto/cavium/zip/zip_main.h120
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.c114
-rw-r--r--drivers/crypto/cavium/zip/zip_mem.h78
-rw-r--r--drivers/crypto/cavium/zip/zip_regs.h1347
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c15
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c13
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c13
-rw-r--r--drivers/crypto/ccp/ccp-ops.c11
-rw-r--r--drivers/crypto/ccp/sev-dev.c254
-rw-r--r--drivers/crypto/ccp/sp-pci.c3
-rw-r--r--drivers/crypto/hisilicon/qm.c4
-rw-r--r--drivers/crypto/img-hash.c41
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-hash.c20
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c2
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c87
-rw-r--r--drivers/crypto/intel/qat/Kconfig12
-rw-r--r--drivers/crypto/intel/qat/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c8
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_drv.c10
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c12
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c14
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c845
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h148
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_drv.c226
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c41
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_drv.c41
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h24
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_common.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dc.c (renamed from drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c)50
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dc.h17
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_config.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c57
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.h3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c83
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c70
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h28
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c818
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h504
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c49
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h15
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_timer.c (renamed from drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c)18
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_timer.h (renamed from drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h)10
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h23
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h99
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h318
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h23
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_algs.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_hal.c13
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c449
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c41
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c2
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c2
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h9
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c3
-rw-r--r--drivers/crypto/marvell/cesa/hash.c2
-rw-r--r--drivers/crypto/marvell/cesa/tdma.c53
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c89
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h35
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c25
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c5
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h12
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c18
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c6
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c19
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c1
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c8
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c8
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c8
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c128
-rw-r--r--drivers/crypto/nx/nx-sha256.c130
-rw-r--r--drivers/crypto/nx/nx-sha512.c143
-rw-r--r--drivers/crypto/nx/nx.c19
-rw-r--r--drivers/crypto/nx/nx.h11
-rw-r--r--drivers/crypto/omap-aes.c14
-rw-r--r--drivers/crypto/omap-sham.c14
-rw-r--r--drivers/crypto/padlock-sha.c478
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c52
-rw-r--r--drivers/crypto/s5p-sss.c24
-rw-r--r--drivers/crypto/sa2ul.c63
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c52
-rw-r--r--drivers/crypto/xilinx/zynqmp-sha.c98
-rw-r--r--drivers/cxl/Kconfig71
-rw-r--r--drivers/cxl/acpi.c24
-rw-r--r--drivers/cxl/core/Makefile1
-rw-r--r--drivers/cxl/core/cdat.c2
-rw-r--r--drivers/cxl/core/core.h4
-rw-r--r--drivers/cxl/core/edac.c2102
-rw-r--r--drivers/cxl/core/features.c43
-rw-r--r--drivers/cxl/core/hdm.c11
-rw-r--r--drivers/cxl/core/mbox.c11
-rw-r--r--drivers/cxl/core/memdev.c5
-rw-r--r--drivers/cxl/core/pci.c48
-rw-r--r--drivers/cxl/core/port.c23
-rw-r--r--drivers/cxl/core/region.c189
-rw-r--r--drivers/cxl/cxl.h23
-rw-r--r--drivers/cxl/cxlmem.h30
-rw-r--r--drivers/cxl/mem.c4
-rw-r--r--drivers/cxl/port.c15
-rw-r--r--drivers/dax/kmem.c10
-rw-r--r--drivers/dma-buf/dma-buf.c265
-rw-r--r--drivers/dma-buf/dma-fence-unwrap.c51
-rw-r--r--drivers/dma-buf/dma-resv.c5
-rw-r--r--drivers/dma-buf/heaps/system_heap.c3
-rw-r--r--drivers/dma-buf/st-dma-fence.c4
-rw-r--r--drivers/dma-buf/sw_sync.c16
-rw-r--r--drivers/dma-buf/sync_debug.c70
-rw-r--r--drivers/dma-buf/sync_debug.h2
-rw-r--r--drivers/dma-buf/udmabuf.c6
-rw-r--r--drivers/dma/Kconfig8
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dmaengine.c42
-rw-r--r--drivers/dma/amd/ptdma/ptdma.h1
-rw-r--r--drivers/dma/arm-dma350.c660
-rw-r--r--drivers/dma/at_xdmac.c6
-rw-r--r--drivers/dma/dmatest.c6
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c5
-rw-r--r--drivers/dma/fsl-edma-common.c30
-rw-r--r--drivers/dma/fsl-edma-common.h18
-rw-r--r--drivers/dma/fsl-edma-main.c116
-rw-r--r--drivers/dma/fsldma.c20
-rw-r--r--drivers/dma/fsldma.h1
-rw-r--r--drivers/dma/idxd/cdev.c23
-rw-r--r--drivers/dma/idxd/idxd.h2
-rw-r--r--drivers/dma/idxd/init.c200
-rw-r--r--drivers/dma/idxd/sysfs.c6
-rw-r--r--drivers/dma/imx-dma.c3
-rw-r--r--drivers/dma/ioat/dca.c2
-rw-r--r--drivers/dma/ioat/dma.c3
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c6
-rw-r--r--drivers/dma/sh/rz-dmac.c84
-rw-r--r--drivers/dma/tegra210-adma.c185
-rw-r--r--drivers/dma/ti/k3-udma.c13
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c4
-rw-r--r--drivers/edac/altera_edac.c10
-rw-r--r--drivers/edac/amd64_edac.c10
-rw-r--r--drivers/edac/bluefield_edac.c20
-rw-r--r--drivers/edac/i10nm_base.c479
-rw-r--r--drivers/edac/ie31200_edac.c7
-rw-r--r--drivers/edac/igen6_edac.c86
-rw-r--r--drivers/edac/mce_amd.c1
-rwxr-xr-xdrivers/edac/mem_repair.c9
-rw-r--r--drivers/edac/skx_common.c1
-rw-r--r--drivers/edac/skx_common.h61
-rw-r--r--drivers/eisa/Makefile10
-rw-r--r--drivers/eisa/eisa-bus.c2
-rw-r--r--drivers/firewire/Kconfig2
-rw-r--r--drivers/firewire/core-transaction.c4
-rw-r--r--drivers/firmware/Kconfig18
-rw-r--r--drivers/firmware/arm_scmi/Kconfig13
-rw-r--r--drivers/firmware/arm_scmi/Makefile1
-rw-r--r--drivers/firmware/arm_scmi/bus.c79
-rw-r--r--drivers/firmware/arm_scmi/clock.c33
-rw-r--r--drivers/firmware/arm_scmi/common.h1
-rw-r--r--drivers/firmware/arm_scmi/driver.c119
-rw-r--r--drivers/firmware/arm_scmi/protocols.h2
-rw-r--r--drivers/firmware/arm_scmi/quirks.c322
-rw-r--r--drivers/firmware/arm_scmi/quirks.h52
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c72
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Kconfig24
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Makefile2
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c276
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c263
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx95.rst828
-rw-r--r--drivers/firmware/arm_sdei.c11
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_bin.c6
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c15
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c7
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c1
-rw-r--r--drivers/firmware/efi/Kconfig24
-rw-r--r--drivers/firmware/efi/efi.c1
-rw-r--r--drivers/firmware/efi/libstub/Makefile5
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot4
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c1
-rw-r--r--drivers/firmware/efi/libstub/x86-5lvl.c2
-rw-r--r--drivers/firmware/efi/libstub/x86-mixed.S253
-rw-r--r--drivers/firmware/efi/libstub/zboot-header.S32
-rw-r--r--drivers/firmware/efi/libstub/zboot.lds11
-rw-r--r--drivers/firmware/efi/memmap.c3
-rw-r--r--drivers/firmware/efi/test/efi_test.c4
-rw-r--r--drivers/firmware/imx/Kconfig22
-rw-r--r--drivers/firmware/imx/Makefile2
-rw-r--r--drivers/firmware/imx/sm-cpu.c85
-rw-r--r--drivers/firmware/imx/sm-lmm.c91
-rw-r--r--drivers/firmware/psci/psci.c4
-rw-r--r--drivers/firmware/psci/psci_checker.c2
-rw-r--r--drivers/firmware/qcom/qcom_scm.c3
-rw-r--r--drivers/firmware/qcom/qcom_scm.h3
-rw-r--r--drivers/firmware/qcom/qcom_tzmem.c1
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.c16
-rw-r--r--drivers/firmware/samsung/exynos-acpm.c102
-rw-r--r--drivers/firmware/smccc/kvm_guest.c10
-rw-r--r--drivers/firmware/smccc/smccc.c17
-rw-r--r--drivers/firmware/sysfb.c26
-rw-r--r--drivers/firmware/sysfb_simplefb.c31
-rw-r--r--drivers/firmware/ti_sci.c14
-rw-r--r--drivers/firmware/turris-mox-rwtm.c260
-rw-r--r--drivers/fpga/tests/fpga-mgr-test.c1
-rw-r--r--drivers/gpio/Kconfig64
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/TODO7
-rw-r--r--drivers/gpio/gpio-aggregator.c1104
-rw-r--r--drivers/gpio/gpio-bcm-kona.c1
-rw-r--r--drivers/gpio/gpio-blzp1600.c281
-rw-r--r--drivers/gpio/gpio-brcmstb.c2
-rw-r--r--drivers/gpio/gpio-davinci.c34
-rw-r--r--drivers/gpio/gpio-dln2.c7
-rw-r--r--drivers/gpio/gpio-ds4520.c6
-rw-r--r--drivers/gpio/gpio-eic-sprd.c5
-rw-r--r--drivers/gpio/gpio-em.c11
-rw-r--r--drivers/gpio/gpio-exar.c16
-rw-r--r--drivers/gpio/gpio-f7188x.c13
-rw-r--r--drivers/gpio/gpio-graniterapids.c6
-rw-r--r--drivers/gpio/gpio-grgpio.c9
-rw-r--r--drivers/gpio/gpio-gw-pld.c6
-rw-r--r--drivers/gpio/gpio-htc-egpio.c16
-rw-r--r--drivers/gpio/gpio-ich.c12
-rw-r--r--drivers/gpio/gpio-idt3243x.c2
-rw-r--r--drivers/gpio/gpio-imx-scu.c47
-rw-r--r--drivers/gpio/gpio-it87.c11
-rw-r--r--drivers/gpio/gpio-janz-ttl.c6
-rw-r--r--drivers/gpio/gpio-kempld.c7
-rw-r--r--drivers/gpio/gpio-ljca.c13
-rw-r--r--drivers/gpio/gpio-logicvc.c11
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c8
-rw-r--r--drivers/gpio/gpio-loongson.c8
-rw-r--r--drivers/gpio/gpio-lp3943.c13
-rw-r--r--drivers/gpio/gpio-lp873x.c12
-rw-r--r--drivers/gpio/gpio-lp87565.c15
-rw-r--r--drivers/gpio/gpio-lpc18xx.c29
-rw-r--r--drivers/gpio/gpio-lpc32xx.c28
-rw-r--r--drivers/gpio/gpio-madera.c18
-rw-r--r--drivers/gpio/gpio-max3191x.c16
-rw-r--r--drivers/gpio/gpio-max730x.c9
-rw-r--r--drivers/gpio/gpio-max732x.c15
-rw-r--r--drivers/gpio/gpio-max77620.c13
-rw-r--r--drivers/gpio/gpio-max77759.c530
-rw-r--r--drivers/gpio/gpio-mb86s7x.c6
-rw-r--r--drivers/gpio/gpio-mc33880.c9
-rw-r--r--drivers/gpio/gpio-ml-ioh.c6
-rw-r--r--drivers/gpio/gpio-mlxbf3.c54
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c8
-rw-r--r--drivers/gpio/gpio-mvebu.c17
-rw-r--r--drivers/gpio/gpio-mxc.c11
-rw-r--r--drivers/gpio/gpio-mxs.c4
-rw-r--r--drivers/gpio/gpio-pca953x.c40
-rw-r--r--drivers/gpio/gpio-pxa.c14
-rw-r--r--drivers/gpio/gpio-rockchip.c2
-rw-r--r--drivers/gpio/gpio-sa1100.c2
-rw-r--r--drivers/gpio/gpio-sodaville.c2
-rw-r--r--drivers/gpio/gpio-spacemit-k1.c294
-rw-r--r--drivers/gpio/gpio-tb10x.c2
-rw-r--r--drivers/gpio/gpio-timberdale.c10
-rw-r--r--drivers/gpio/gpio-twl4030.c5
-rw-r--r--drivers/gpio/gpio-vf610.c4
-rw-r--r--drivers/gpio/gpio-virtuser.c12
-rw-r--r--drivers/gpio/gpio-xgene-sb.c26
-rw-r--r--drivers/gpio/gpiolib-acpi-core.c (renamed from drivers/gpio/gpiolib-acpi.c)522
-rw-r--r--drivers/gpio/gpiolib-acpi-quirks.c363
-rw-r--r--drivers/gpio/gpiolib-acpi.h15
-rw-r--r--drivers/gpio/gpiolib-cdev.c3
-rw-r--r--drivers/gpio/gpiolib-devres.c89
-rw-r--r--drivers/gpio/gpiolib-of.c17
-rw-r--r--drivers/gpio/gpiolib-of.h6
-rw-r--r--drivers/gpio/gpiolib-sysfs.c8
-rw-r--r--drivers/gpio/gpiolib.c179
-rw-r--r--drivers/gpu/drm/Kconfig118
-rw-r--r--drivers/gpu/drm/Kconfig.debug116
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/adp/adp-mipi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c559
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c242
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c179
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c194
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c1177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c123
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c924
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h135
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c968
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c150
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c206
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c737
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c626
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c171
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c328
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c355
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.h (renamed from drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c)30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c272
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c205
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c195
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c280
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c413
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c116
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_enums.h234
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sid.h1555
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c164
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c451
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c76
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c145
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c95
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c142
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c56
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c34
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c47
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c819
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h34
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c56
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c13
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c33
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c178
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile41
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c228
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h355
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c219
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_fused_io.c148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_fused_io.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state_priv.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c4351
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c142
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c368
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c380
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h171
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h125
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c137
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c349
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c21
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_service_interface.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c53
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c48
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c73
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c3
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h38
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h12
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h26
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h23
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h41
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h44
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h188
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h22
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h39
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h6
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h3
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h1
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h74
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h25
-rw-r--r--drivers/gpu/drm/amd/include/v11_structs.h8
-rw-r--r--drivers/gpu/drm/amd/include/v12_structs.h8
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c97
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c45
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h6
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c358
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c42
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c36
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c41
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h35
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h25
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c39
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c135
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c109
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c309
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c67
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h26
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c2
-rw-r--r--drivers/gpu/drm/ast/Kconfig2
-rw-r--r--drivers/gpu/drm/ast/ast_cursor.c45
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h17
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c26
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c78
-rw-r--r--drivers/gpu/drm/ast/ast_post.c24
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c3
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c41
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c33
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c212
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h3
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c52
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c47
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c3
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c1
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c207
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h2
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c77
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c6
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c5
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c9
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c9
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.c11
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-ldb-helper.h5
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c9
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c3
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qm-ldb.c32
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c22
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c3
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c3
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c3
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c7
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c47
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c3
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c3
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c3
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c5
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c16
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c3
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c1
-rw-r--r--drivers/gpu/drm/bridge/microchip-lvds.c3
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c3
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c5
-rw-r--r--drivers/gpu/drm/bridge/panel.c3
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c1
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c3
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c10
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c6
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c1
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c14
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c5
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c3
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c3
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c41
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c45
-rw-r--r--drivers/gpu/drm/bridge/tda998x_drv.c11
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c3
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c38
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c115
-rw-r--r--drivers/gpu/drm/bridge/ti-tdp158.c6
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c5
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c3
-rw-r--r--drivers/gpu/drm/ci/arm64.config2
-rw-r--r--drivers/gpu/drm/ci/build-igt.sh2
-rw-r--r--drivers/gpu/drm/ci/build.sh20
-rw-r--r--drivers/gpu/drm/ci/build.yml14
-rw-r--r--drivers/gpu/drm/ci/container.yml24
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml55
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh11
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml4
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh5
-rw-r--r--drivers/gpu/drm/ci/test.yml76
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-fails.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-fails.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-fails.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-skips.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-fails.txt32
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt13
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt9
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-fails.txt22
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt313
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt30
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-skips.txt2
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c160
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c37
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c467
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c116
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c20
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_audio_helper.c3
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_helper.c168
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c294
-rw-r--r--drivers/gpu/drm/drm_atomic.c59
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c3
-rw-r--r--drivers/gpu/drm/drm_blend.c6
-rw-r--r--drivers/gpu/drm/drm_bridge.c167
-rw-r--r--drivers/gpu/drm/drm_bridge_helper.c58
-rw-r--r--drivers/gpu/drm/drm_client.c10
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c257
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_debugfs.c38
-rw-r--r--drivers/gpu/drm/drm_displayid_internal.h31
-rw-r--r--drivers/gpu/drm/drm_draw.c100
-rw-r--r--drivers/gpu/drm/drm_drv.c81
-rw-r--r--drivers/gpu/drm/drm_edid.c102
-rw-r--r--drivers/gpu/drm/drm_file.c34
-rw-r--r--drivers/gpu/drm/drm_format_helper.c378
-rw-r--r--drivers/gpu/drm/drm_format_internal.h160
-rw-r--r--drivers/gpu/drm/drm_gem.c26
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c6
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c147
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c41
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c37
-rw-r--r--drivers/gpu/drm/drm_mode_config.c7
-rw-r--r--drivers/gpu/drm/drm_panel.c146
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c6
-rw-r--r--drivers/gpu/drm/drm_panic.c142
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs104
-rw-r--r--drivers/gpu/drm/drm_plane.c52
-rw-r--r--drivers/gpu/drm/drm_prime.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c11
-rw-r--r--drivers/gpu/drm/drm_syncobj.c47
-rw-r--r--drivers/gpu/drm/drm_vblank.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c4
-rw-r--r--drivers/gpu/drm/gma500/Kconfig2
-rw-r--r--drivers/gpu/drm/gma500/mmu.c41
-rw-r--r--drivers/gpu/drm/gma500/mmu.h2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c31
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c33
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h1
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c16
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h10
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c91
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h36
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c94
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h130
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c71
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c104
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c74
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c87
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h12
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c3
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c4
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c4
-rw-r--r--drivers/gpu/drm/i915/Makefile5
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c2
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c62
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c32
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c8
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c73
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c1222
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.h18
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c102
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c191
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c629
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c45
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c63
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c925
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h30
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c211
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h34
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c969
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.h82
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c129
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.c68
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.h24
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_snapshot.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c152
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c240
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c140
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c330
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c218
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c64
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c52
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c79
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c60
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c38
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c124
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c181
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h63
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c276
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c670
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c672
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_load_detect.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c161
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c230
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.c340
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.h58
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c204
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c133
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c140
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c91
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c524
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_uapi.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c288
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c68
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga_regs.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c325
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c171
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.h14
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c4
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c21
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c741
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h24
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c164
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.h6
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c118
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.h5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ioctls.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c35
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.h3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c56
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_wopcm.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c2
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c9
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c54
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c19
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c49
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c158
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c4
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c8
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h150
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h108
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c3
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c1
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c15
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h3
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c2
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h11
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/librapl.c4
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c17
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c316
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.h56
-rw-r--r--drivers/gpu/drm/imagination/Makefile2
-rw-r--r--drivers/gpu/drm/imagination/pvr_debugfs.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.c147
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h40
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c16
-rw-r--r--drivers/gpu/drm/imagination/pvr_free_list.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.c40
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.h85
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c26
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_mips.c85
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_riscv.c165
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_startstop.c17
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c31
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.h2
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_util.c66
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.c18
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.h6
-rw-r--r--drivers/gpu/drm/imagination/pvr_hwrt.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_mmu.c8
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.c136
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.h3
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h153
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_riscv.h41
-rw-r--r--drivers/gpu/drm/imagination/pvr_stream.c12
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.c3
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c3
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c5
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c4
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c4
-rw-r--r--drivers/gpu/drm/loongson/Kconfig2
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c10
-rw-r--r--drivers/gpu/drm/mediatek/Makefile8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c120
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c31
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c413
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.h14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c3
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c3
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c9
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c55
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ddc.c1
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_catalog.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpummu.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_catalog.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_catalog.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_catalog.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c44
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c96
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c73
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c43
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c73
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h433
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c35
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c156
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c34
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h16
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c50
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c121
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c51
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c131
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h27
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c146
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c62
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c17
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c18
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h4
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c18
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c7
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c50
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c133
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h26
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c107
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c73
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hpd.c89
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c14
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c6
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c32
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c12
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h23
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c1
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c12
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c13
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h2
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml3
-rw-r--r--drivers/gpu/drm/msm/registers/gen_header.py8
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c14
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c22
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/coreca7d.c122
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcca7d.c98
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c50
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headca7d.c297
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c25
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c209
-rw-r--r--drivers/gpu/drm/nouveau/gv100_fence.c93
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h22
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h868
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h137
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h15
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h173
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/chan.h76
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push906f.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/pushc97b.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/layout.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h132
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h43
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h166
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h335
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h216
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h65
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h48
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h162
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h95
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h42
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h148
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h97
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h52
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h79
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h170
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h82
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h100
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h119
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h44
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h124
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h45
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h74
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h86
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h174
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h57
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h40
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c84
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c213
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan.c156
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan506f.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan906f.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chanc36f.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvif/conn.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/outp.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvif/user.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c508
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c275
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c358
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c112
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c)43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c)35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c)394
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c)60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c)417
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c356
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c)1559
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c)37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c)35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h741
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h260
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h350
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h (renamed from drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h)64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h825
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h (renamed from drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h)55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c)34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c698
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c)118
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c263
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c217
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c216
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h355
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h318
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h (renamed from drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h)241
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h634
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h249
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h191
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c271
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c306
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c2
-rw-r--r--drivers/gpu/drm/nova/Kconfig14
-rw-r--r--drivers/gpu/drm/nova/Makefile3
-rw-r--r--drivers/gpu/drm/nova/driver.rs69
-rw-r--r--drivers/gpu/drm/nova/file.rs74
-rw-r--r--drivers/gpu/drm/nova/gem.rs49
-rw-r--r--drivers/gpu/drm/nova/nova.rs18
-rw-r--r--drivers/gpu/drm/nova/uapi.rs61
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c3
-rw-r--r--drivers/gpu/drm/panel/Kconfig41
-rw-r--r--drivers/gpu/drm/panel/Makefile4
-rw-r--r--drivers/gpu/drm/panel/panel-abt-y030xx067a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c11
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c11
-rw-r--r--drivers/gpu/drm/panel/panel-auo-a030jtn01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c125
-rw-r--r--drivers/gpu/drm/panel/panel-boe-td4320.c247
-rw-r--r--drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ebbg-ft8719.c11
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c19
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8279.c1296
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c441
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3051d.c9
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c1683
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt37801.c340
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c238
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c108
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c41
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c109
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-r63353.c68
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c6
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-g2647fb105.c280
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c71
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h19
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c12
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_features.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c152
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h36
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c13
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h13
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c76
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c10
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c227
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h82
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.c6
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c19
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h4
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c13
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c2
-rw-r--r--drivers/gpu/drm/qxl/Kconfig2
-rw-r--r--drivers/gpu/drm/radeon/Kconfig2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h5
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c8
-rw-r--r--drivers/gpu/drm/radeon/cik.c42
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c42
-rw-r--r--drivers/gpu/drm/radeon/sid.h2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c5
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c10
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c3
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c3
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig15
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c2
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c120
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h1
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c9
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c5
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig2
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c103
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c55
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c315
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c94
-rw-r--r--drivers/gpu/drm/scheduler/.kunitconfig12
-rw-r--r--drivers/gpu/drm/scheduler/Makefile2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c1
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c35
-rw-r--r--drivers/gpu/drm/scheduler/tests/Makefile7
-rw-r--r--drivers/gpu/drm/scheduler/tests/mock_scheduler.c359
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h226
-rw-r--r--drivers/gpu/drm/scheduler/tests/tests_basic.c476
-rw-r--r--drivers/gpu/drm/sitronix/Kconfig52
-rw-r--r--drivers/gpu/drm/sitronix/Makefile3
-rw-r--r--drivers/gpu/drm/sitronix/st7571-i2c.c1000
-rw-r--r--drivers/gpu/drm/sitronix/st7586.c (renamed from drivers/gpu/drm/tiny/st7586.c)0
-rw-r--r--drivers/gpu/drm/sitronix/st7735r.c (renamed from drivers/gpu/drm/tiny/st7735r.c)0
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c2
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c13
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c13
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c14
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c14
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c15
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c15
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c14
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c14
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c14
-rw-r--r--drivers/gpu/drm/stm/lvds.c11
-rw-r--r--drivers/gpu/drm/sysfb/Kconfig76
-rw-r--r--drivers/gpu/drm/sysfb/Makefile12
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb.c35
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h184
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_modeset.c320
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c107
-rw-r--r--drivers/gpu/drm/sysfb/efidrm.c389
-rw-r--r--drivers/gpu/drm/sysfb/ofdrm.c (renamed from drivers/gpu/drm/tiny/ofdrm.c)376
-rw-r--r--drivers/gpu/drm/sysfb/simpledrm.c (renamed from drivers/gpu/drm/tiny/simpledrm.c)258
-rw-r--r--drivers/gpu/drm/sysfb/vesadrm.c554
-rw-r--r--drivers/gpu/drm/tegra/dc.c17
-rw-r--r--drivers/gpu/drm/tegra/dp.c67
-rw-r--r--drivers/gpu/drm/tegra/dp.h2
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c11
-rw-r--r--drivers/gpu/drm/tegra/dsi.c4
-rw-r--r--drivers/gpu/drm/tegra/falcon.c20
-rw-r--r--drivers/gpu/drm/tegra/falcon.h1
-rw-r--r--drivers/gpu/drm/tegra/gem.c1
-rw-r--r--drivers/gpu/drm/tegra/hub.c4
-rw-r--r--drivers/gpu/drm/tegra/hub.h3
-rw-r--r--drivers/gpu/drm/tegra/rgb.c14
-rw-r--r--drivers/gpu/drm/tegra/sor.c4
-rw-r--r--drivers/gpu/drm/tests/Makefile2
-rw-r--r--drivers/gpu/drm/tests/drm_atomic_test.c153
-rw-r--r--drivers/gpu/drm/tests/drm_bridge_test.c417
-rw-r--r--drivers/gpu/drm/tests/drm_client_modeset_test.c3
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c28
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c158
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c61
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c3
-rw-r--r--drivers/gpu/drm/tiny/Kconfig64
-rw-r--r--drivers/gpu/drm/tiny/Makefile4
-rw-r--r--drivers/gpu/drm/tiny/appletbdrm.c30
-rw-r--r--drivers/gpu/drm/tiny/cirrus-qemu.c145
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c46
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c5
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c24
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h20
-rw-r--r--drivers/gpu/drm/udl/udl_main.c191
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c22
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c6
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c126
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c62
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h22
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c27
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c64
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h26
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c14
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_output.c62
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c154
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c37
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c34
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h7
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c17
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c20
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c8
-rw-r--r--drivers/gpu/drm/vkms/Kconfig15
-rw-r--r--drivers/gpu/drm/vkms/Makefile5
-rw-r--r--drivers/gpu/drm/vkms/tests/.kunitconfig4
-rw-r--r--drivers/gpu/drm/vkms/tests/Makefile3
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_config_test.c951
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.c640
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.h437
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.c61
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.h26
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c2
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c45
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h17
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c176
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c844
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c52
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c874
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h71
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c63
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c85
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c7
-rw-r--r--drivers/gpu/drm/xe/Kconfig21
-rw-r--r--drivers/gpu/drm/xe/Makefile7
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h1
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h1
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h48
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h76
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h6
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c10
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c45
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.c71
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rps.c17
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c6
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c133
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c2
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_alu_commands.h79
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h9
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h9
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h14
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_mchbar_regs.h10
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pcode_regs.h7
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c6
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c11
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c563
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h27
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c393
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.h10
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c250
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.h24
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c64
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_device.c34
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c105
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h44
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c4
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c4
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c26
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.c6
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h10
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c86
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c30
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c87
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c28
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c20
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c66
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle.c90
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c8
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c48
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c12
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c100
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c13
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h5
-rw-r--r--drivers/gpu/drm/xe/xe_guc_debugfs.c159
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c203
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.h7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity_types.h12
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c3
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c20
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c34
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c1
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c509
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c2
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c258
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h5
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h9
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c2
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c238
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h4
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c21
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c11
-rw-r--r--drivers/gpu/drm/xe/xe_module.c12
-rw-r--r--drivers/gpu/drm/xe/xe_module.h1
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c7
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c38
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c21
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c13
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pcode_api.h15
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c84
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h2
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.c77
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c252
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c8
-rw-r--r--drivers/gpu/drm/xe/xe_query.c2
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.c3
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c7
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c3
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c3
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.c2
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c69
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.h1
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c138
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h87
-rw-r--r--drivers/gpu/drm/xe/xe_trace_lrc.h8
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c94
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c75
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h69
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h10
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c5
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c16
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules2
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c9
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp_audio.c5
-rw-r--r--drivers/gpu/host1x/bus.c11
-rw-r--r--drivers/gpu/host1x/cdma.c7
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c8
-rw-r--r--drivers/gpu/nova-core/Kconfig1
-rw-r--r--drivers/gpu/nova-core/driver.rs9
-rw-r--r--drivers/gpu/nova-core/firmware.rs44
-rw-r--r--drivers/gpu/nova-core/gpu.rs86
-rw-r--r--drivers/gpu/nova-core/nova_core.rs2
-rw-r--r--drivers/gpu/nova-core/regs.rs82
-rw-r--r--drivers/gpu/nova-core/regs/macros.rs380
-rw-r--r--drivers/greybus/operation.c3
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c12
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c9
-rw-r--r--drivers/hid/bpf/progs/XPPen__ACK05.bpf.c1
-rw-r--r--drivers/hid/hid-apple.c2
-rw-r--r--drivers/hid/hid-appleir.c2
-rw-r--r--drivers/hid/hid-appletb-kbd.c11
-rw-r--r--drivers/hid/hid-asus.c111
-rw-r--r--drivers/hid/hid-core.c9
-rw-r--r--drivers/hid/hid-corsair-void.c4
-rw-r--r--drivers/hid/hid-cp2112.c66
-rw-r--r--drivers/hid/hid-hyperv.c4
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-kysona.c46
-rw-r--r--drivers/hid/hid-letsketch.c3
-rw-r--r--drivers/hid/hid-lg4ff.c6
-rw-r--r--drivers/hid/hid-magicmouse.c76
-rw-r--r--drivers/hid/hid-mcp2200.c23
-rw-r--r--drivers/hid/hid-mcp2221.c10
-rw-r--r--drivers/hid/hid-multitouch.c14
-rw-r--r--drivers/hid/hid-prodikeys.c2
-rw-r--r--drivers/hid/hid-quirks.c7
-rw-r--r--drivers/hid/hid-sony.c2
-rw-r--r--drivers/hid/hid-steam.c2
-rw-r--r--drivers/hid/hid-thrustmaster.c1
-rw-r--r--drivers/hid/hid-uclogic-core.c11
-rw-r--r--drivers/hid/hid-wiimote-core.c2
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c7
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c4
-rw-r--r--drivers/hid/usbhid/hid-core.c27
-rw-r--r--drivers/hid/wacom_sys.c11
-rw-r--r--drivers/hid/wacom_wac.c2
-rw-r--r--drivers/hsi/clients/ssi_protocol.c6
-rw-r--r--drivers/hv/Kconfig7
-rw-r--r--drivers/hv/channel.c65
-rw-r--r--drivers/hv/connection.c23
-rw-r--r--drivers/hv/hv_common.c76
-rw-r--r--drivers/hv/vmbus_drv.c97
-rw-r--r--drivers/hwmon/Kconfig50
-rw-r--r--drivers/hwmon/Makefile4
-rw-r--r--drivers/hwmon/aht10.c16
-rw-r--r--drivers/hwmon/amc6821.c50
-rw-r--r--drivers/hwmon/asus-ec-sensors.c53
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c7
-rw-r--r--drivers/hwmon/fam15h_power.c6
-rw-r--r--drivers/hwmon/ftsteutates.c9
-rw-r--r--drivers/hwmon/gpio-fan.c103
-rw-r--r--drivers/hwmon/hwmon-vid.c4
-rw-r--r--drivers/hwmon/ina238.c214
-rw-r--r--drivers/hwmon/ina2xx.c8
-rw-r--r--drivers/hwmon/isl28022.c8
-rw-r--r--drivers/hwmon/k10temp.c9
-rw-r--r--drivers/hwmon/kbatt.c147
-rw-r--r--drivers/hwmon/kfan.c246
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/hwmon/ltc2992.c30
-rw-r--r--drivers/hwmon/ltc4282.c7
-rw-r--r--drivers/hwmon/max6639.c16
-rw-r--r--drivers/hwmon/max77705-hwmon.c221
-rw-r--r--drivers/hwmon/nct7363.c2
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c2
-rw-r--r--drivers/hwmon/occ/common.c240
-rw-r--r--drivers/hwmon/oxp-sensors.c716
-rw-r--r--drivers/hwmon/pmbus/Kconfig18
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/lm25066.c2
-rw-r--r--drivers/hwmon/pmbus/lt3074.c122
-rw-r--r--drivers/hwmon/pmbus/max34440.c119
-rw-r--r--drivers/hwmon/pmbus/mpq7932.c4
-rw-r--r--drivers/hwmon/pmbus/mpq8785.c91
-rw-r--r--drivers/hwmon/pmbus/pmbus.h19
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c69
-rw-r--r--drivers/hwmon/pmbus/tda38640.c2
-rw-r--r--drivers/hwmon/pmbus/tps25990.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c16
-rw-r--r--drivers/hwmon/pwm-fan.c6
-rw-r--r--drivers/hwmon/qnap-mcu-hwmon.c1
-rw-r--r--drivers/hwmon/sbrmi.c357
-rw-r--r--drivers/hwmon/spd5118.c357
-rw-r--r--drivers/hwmon/tmp102.c5
-rw-r--r--drivers/hwmon/xgene-hwmon.c39
-rw-r--r--drivers/hwtracing/coresight/Kconfig9
-rw-r--r--drivers/hwtracing/coresight/Makefile3
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c39
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-config.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c131
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c84
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h6
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c158
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-kunit-tests.c74
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h22
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg-configfs.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg.c51
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c20
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c18
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c3
-rw-r--r--drivers/i2c/busses/Kconfig15
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c3
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c223
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c2
-rw-r--r--drivers/i2c/busses/i2c-davinci.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-amdisp.c205
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c12
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c7
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c102
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c87
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c18
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c166
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h13
-rw-r--r--drivers/i2c/busses/i2c-pasemi-core.c119
-rw-r--r--drivers/i2c/busses/i2c-pasemi-pci.c10
-rw-r--r--drivers/i2c/busses/i2c-piix4.c20
-rw-r--r--drivers/i2c/busses/i2c-powermac.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c19
-rw-r--r--drivers/i2c/busses/i2c-riic.c53
-rw-r--r--drivers/i2c/busses/i2c-rzv2m.c2
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c3
-rw-r--r--drivers/i2c/busses/i2c-tegra.c5
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c5
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c3
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c24
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c24
-rw-r--r--drivers/i2c/busses/i2c-via.c15
-rw-r--r--drivers/i2c/busses/i2c-viai2c-wmt.c20
-rw-r--r--drivers/i2c/busses/i2c-viapro.c33
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c17
-rw-r--r--drivers/i2c/busses/i2c-virtio.c7
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c57
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/busses/scx200_acb.c6
-rw-r--r--drivers/i2c/i2c-atr.c570
-rw-r--r--drivers/i2c/i2c-core-base.c67
-rw-r--r--drivers/i2c/i2c-core-of.c1
-rw-r--r--drivers/i2c/i2c-core-slave.c12
-rw-r--r--drivers/i2c/i2c-core-smbus.c3
-rw-r--r--drivers/i2c/i2c-smbus.c21
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c10
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c6
-rw-r--r--drivers/i3c/master/Kconfig4
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c32
-rw-r--r--drivers/i3c/master/svc-i3c-master.c109
-rw-r--r--drivers/idle/intel_idle.c151
-rw-r--r--drivers/iio/accel/adxl345.h4
-rw-r--r--drivers/iio/accel/adxl345_core.c600
-rw-r--r--drivers/iio/accel/adxl345_i2c.c2
-rw-r--r--drivers/iio/accel/adxl345_spi.c2
-rw-r--r--drivers/iio/accel/adxl355_core.c4
-rw-r--r--drivers/iio/accel/adxl367_i2c.c2
-rw-r--r--drivers/iio/accel/adxl367_spi.c4
-rw-r--r--drivers/iio/accel/adxl372_i2c.c2
-rw-r--r--drivers/iio/accel/adxl372_spi.c2
-rw-r--r--drivers/iio/accel/bma180.c2
-rw-r--r--drivers/iio/accel/bma220_spi.c8
-rw-r--r--drivers/iio/accel/bma400_core.c5
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c6
-rw-r--r--drivers/iio/accel/bmc150-accel-spi.c4
-rw-r--r--drivers/iio/accel/bmi088-accel-i2c.c4
-rw-r--r--drivers/iio/accel/bmi088-accel-spi.c4
-rw-r--r--drivers/iio/accel/da280.c4
-rw-r--r--drivers/iio/accel/da311.c2
-rw-r--r--drivers/iio/accel/dmard10.c2
-rw-r--r--drivers/iio/accel/fxls8962af-core.c19
-rw-r--r--drivers/iio/accel/fxls8962af-i2c.c4
-rw-r--r--drivers/iio/accel/fxls8962af-spi.c4
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c4
-rw-r--r--drivers/iio/accel/kxcjk-1013.c91
-rw-r--r--drivers/iio/accel/kxsd9-i2c.c2
-rw-r--r--drivers/iio/accel/kxsd9-spi.c2
-rw-r--r--drivers/iio/accel/kxsd9.c7
-rw-r--r--drivers/iio/accel/mma7455_core.c5
-rw-r--r--drivers/iio/accel/mma7660.c4
-rw-r--r--drivers/iio/accel/mma8452.c5
-rw-r--r--drivers/iio/accel/mma9551.c4
-rw-r--r--drivers/iio/accel/mma9553.c6
-rw-r--r--drivers/iio/accel/msa311.c4
-rw-r--r--drivers/iio/accel/mxc4005.c8
-rw-r--r--drivers/iio/accel/sca3000.c2
-rw-r--r--drivers/iio/accel/sca3300.c23
-rw-r--r--drivers/iio/accel/st_accel_i2c.c6
-rw-r--r--drivers/iio/accel/st_accel_spi.c4
-rw-r--r--drivers/iio/accel/stk8312.c6
-rw-r--r--drivers/iio/accel/stk8ba50.c8
-rw-r--r--drivers/iio/adc/Kconfig160
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/ad4000.c387
-rw-r--r--drivers/iio/adc/ad4030.c68
-rw-r--r--drivers/iio/adc/ad4130.c10
-rw-r--r--drivers/iio/adc/ad4695.c19
-rw-r--r--drivers/iio/adc/ad4851.c14
-rw-r--r--drivers/iio/adc/ad7091r-base.c9
-rw-r--r--drivers/iio/adc/ad7124.c210
-rw-r--r--drivers/iio/adc/ad7173.c15
-rw-r--r--drivers/iio/adc/ad7266.c7
-rw-r--r--drivers/iio/adc/ad7280a.c2
-rw-r--r--drivers/iio/adc/ad7298.c4
-rw-r--r--drivers/iio/adc/ad7380.c57
-rw-r--r--drivers/iio/adc/ad7476.c4
-rw-r--r--drivers/iio/adc/ad7606.c375
-rw-r--r--drivers/iio/adc/ad7606.h196
-rw-r--r--drivers/iio/adc/ad7606_par.c37
-rw-r--r--drivers/iio/adc/ad7606_spi.c296
-rw-r--r--drivers/iio/adc/ad7768-1.c247
-rw-r--r--drivers/iio/adc/ad7779.c3
-rw-r--r--drivers/iio/adc/ad7791.c2
-rw-r--r--drivers/iio/adc/ad7923.c4
-rw-r--r--drivers/iio/adc/ad7944.c13
-rw-r--r--drivers/iio/adc/ad799x.c2
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c4
-rw-r--r--drivers/iio/adc/adi-axi-adc.c2
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c13
-rw-r--r--drivers/iio/adc/axp20x_adc.c8
-rw-r--r--drivers/iio/adc/axp288_adc.c4
-rw-r--r--drivers/iio/adc/cpcap-adc.c2
-rw-r--r--drivers/iio/adc/da9150-gpadc.c2
-rw-r--r--drivers/iio/adc/dln2-adc.c4
-rw-r--r--drivers/iio/adc/envelope-detector.c4
-rw-r--r--drivers/iio/adc/fsl-imx25-gcq.c2
-rw-r--r--drivers/iio/adc/hi8435.c2
-rw-r--r--drivers/iio/adc/hx711.c11
-rw-r--r--drivers/iio/adc/imx7d_adc.c2
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c2
-rw-r--r--drivers/iio/adc/imx93_adc.c2
-rw-r--r--drivers/iio/adc/ina2xx-adc.c2
-rw-r--r--drivers/iio/adc/industrialio-adc.c82
-rw-r--r--drivers/iio/adc/intel_mrfld_adc.c4
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c2
-rw-r--r--drivers/iio/adc/ltc2471.c2
-rw-r--r--drivers/iio/adc/max1118.c4
-rw-r--r--drivers/iio/adc/max11410.c4
-rw-r--r--drivers/iio/adc/max1363.c8
-rw-r--r--drivers/iio/adc/max77541-adc.c2
-rw-r--r--drivers/iio/adc/mcp3911.c62
-rw-r--r--drivers/iio/adc/meson_saradc.c36
-rw-r--r--drivers/iio/adc/mt6359-auxadc.c2
-rw-r--r--drivers/iio/adc/mt6360-adc.c4
-rw-r--r--drivers/iio/adc/mt6370-adc.c2
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c17
-rw-r--r--drivers/iio/adc/nct7201.c501
-rw-r--r--drivers/iio/adc/npcm_adc.c2
-rw-r--r--drivers/iio/adc/pac1921.c7
-rw-r--r--drivers/iio/adc/pac1934.c2
-rw-r--r--drivers/iio/adc/palmas_gpadc.c2
-rw-r--r--drivers/iio/adc/qcom-spmi-rradc.c4
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c11
-rw-r--r--drivers/iio/adc/rn5t618-adc.c2
-rw-r--r--drivers/iio/adc/rockchip_saradc.c3
-rw-r--r--drivers/iio/adc/rohm-bd79124.c1146
-rw-r--r--drivers/iio/adc/rtq6056.c3
-rw-r--r--drivers/iio/adc/rzg2l_adc.c41
-rw-r--r--drivers/iio/adc/spear_adc.c2
-rw-r--r--drivers/iio/adc/stm32-adc-core.c7
-rw-r--r--drivers/iio/adc/stm32-adc-core.h17
-rw-r--r--drivers/iio/adc/stm32-adc.c158
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c8
-rw-r--r--drivers/iio/adc/sun20i-gpadc-iio.c41
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c6
-rw-r--r--drivers/iio/adc/ti-adc081c.c4
-rw-r--r--drivers/iio/adc/ti-adc0832.c4
-rw-r--r--drivers/iio/adc/ti-adc084s021.c4
-rw-r--r--drivers/iio/adc/ti-adc12138.c4
-rw-r--r--drivers/iio/adc/ti-adc128s052.c98
-rw-r--r--drivers/iio/adc/ti-ads1015.c172
-rw-r--r--drivers/iio/adc/ti-ads1100.c44
-rw-r--r--drivers/iio/adc/ti-ads1119.c4
-rw-r--r--drivers/iio/adc/ti-ads124s08.c4
-rw-r--r--drivers/iio/adc/ti-ads131e08.c4
-rw-r--r--drivers/iio/adc/ti-ads7950.c17
-rw-r--r--drivers/iio/adc/ti-ads8688.c4
-rw-r--r--drivers/iio/adc/ti-lmp92064.c6
-rw-r--r--drivers/iio/adc/ti-tlc4541.c4
-rw-r--r--drivers/iio/adc/ti-tsc2046.c6
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c2
-rw-r--r--drivers/iio/adc/vf610_adc.c46
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c2
-rw-r--r--drivers/iio/addac/ad74115.c25
-rw-r--r--drivers/iio/addac/ad74413r.c37
-rw-r--r--drivers/iio/afe/iio-rescale.c2
-rw-r--r--drivers/iio/amplifiers/ad8366.c2
-rw-r--r--drivers/iio/amplifiers/ada4250.c4
-rw-r--r--drivers/iio/amplifiers/hmc425a.c5
-rw-r--r--drivers/iio/cdc/ad7150.c4
-rw-r--r--drivers/iio/cdc/ad7746.c4
-rw-r--r--drivers/iio/chemical/Kconfig20
-rw-r--r--drivers/iio/chemical/Makefile2
-rw-r--r--drivers/iio/chemical/ags02ma.c4
-rw-r--r--drivers/iio/chemical/atlas-ezo-sensor.c4
-rw-r--r--drivers/iio/chemical/atlas-sensor.c14
-rw-r--r--drivers/iio/chemical/bme680_core.c4
-rw-r--r--drivers/iio/chemical/bme680_i2c.c4
-rw-r--r--drivers/iio/chemical/bme680_spi.c12
-rw-r--r--drivers/iio/chemical/ccs811.c83
-rw-r--r--drivers/iio/chemical/ens160_core.c4
-rw-r--r--drivers/iio/chemical/mhz19b.c316
-rw-r--r--drivers/iio/chemical/pms7003.c4
-rw-r--r--drivers/iio/chemical/scd30_core.c3
-rw-r--r--drivers/iio/chemical/scd4x.c10
-rw-r--r--drivers/iio/chemical/sen0322.c161
-rw-r--r--drivers/iio/chemical/sps30.c4
-rw-r--r--drivers/iio/chemical/sunrise_co2.c4
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c2
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c2
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c78
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c11
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c4
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_spi.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c7
-rw-r--r--drivers/iio/dac/Kconfig11
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad3530r.c517
-rw-r--r--drivers/iio/dac/ad3552r-common.c4
-rw-r--r--drivers/iio/dac/ad3552r-hs.c182
-rw-r--r--drivers/iio/dac/ad3552r.h1
-rw-r--r--drivers/iio/dac/ad5064.c8
-rw-r--r--drivers/iio/dac/ad5360.c2
-rw-r--r--drivers/iio/dac/ad5380.c2
-rw-r--r--drivers/iio/dac/ad5446.c6
-rw-r--r--drivers/iio/dac/ad5449.c2
-rw-r--r--drivers/iio/dac/ad5504.c4
-rw-r--r--drivers/iio/dac/ad5592r-base.c156
-rw-r--r--drivers/iio/dac/ad5592r.c6
-rw-r--r--drivers/iio/dac/ad5593r.c6
-rw-r--r--drivers/iio/dac/ad5624r_spi.c4
-rw-r--r--drivers/iio/dac/ad5686-spi.c2
-rw-r--r--drivers/iio/dac/ad5686.c2
-rw-r--r--drivers/iio/dac/ad5696-i2c.c4
-rw-r--r--drivers/iio/dac/ad5755.c4
-rw-r--r--drivers/iio/dac/ad5758.c2
-rw-r--r--drivers/iio/dac/ad5761.c4
-rw-r--r--drivers/iio/dac/ad5766.c8
-rw-r--r--drivers/iio/dac/ad5770r.c4
-rw-r--r--drivers/iio/dac/ad5791.c4
-rw-r--r--drivers/iio/dac/ad7293.c17
-rw-r--r--drivers/iio/dac/ad7303.c6
-rw-r--r--drivers/iio/dac/ad8801.c2
-rw-r--r--drivers/iio/dac/ad9739a.c4
-rw-r--r--drivers/iio/dac/adi-axi-dac.c66
-rw-r--r--drivers/iio/dac/dpot-dac.c2
-rw-r--r--drivers/iio/dac/ds4424.c2
-rw-r--r--drivers/iio/dac/lpc18xx_dac.c2
-rw-r--r--drivers/iio/dac/ltc1660.c4
-rw-r--r--drivers/iio/dac/ltc2632.c6
-rw-r--r--drivers/iio/dac/ltc2688.c14
-rw-r--r--drivers/iio/dac/max5522.c4
-rw-r--r--drivers/iio/dac/max5821.c2
-rw-r--r--drivers/iio/dac/mcp4725.c4
-rw-r--r--drivers/iio/dac/mcp4728.c6
-rw-r--r--drivers/iio/dac/mcp4821.c4
-rw-r--r--drivers/iio/dac/mcp4922.c2
-rw-r--r--drivers/iio/dac/rohm-bd79703.c116
-rw-r--r--drivers/iio/dac/stm32-dac-core.c2
-rw-r--r--drivers/iio/dac/stm32-dac.c4
-rw-r--r--drivers/iio/dac/ti-dac082s085.c2
-rw-r--r--drivers/iio/dac/ti-dac5571.c6
-rw-r--r--drivers/iio/dac/ti-dac7311.c2
-rw-r--r--drivers/iio/dac/ti-dac7612.c4
-rw-r--r--drivers/iio/dac/vf610_dac.c4
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_buffer.c27
-rw-r--r--drivers/iio/filter/admv8818.c230
-rw-r--r--drivers/iio/frequency/ad9523.c2
-rw-r--r--drivers/iio/frequency/adf4350.c6
-rw-r--r--drivers/iio/frequency/adf4371.c6
-rw-r--r--drivers/iio/frequency/adf4377.c4
-rw-r--r--drivers/iio/frequency/admv1013.c8
-rw-r--r--drivers/iio/frequency/admv1014.c4
-rw-r--r--drivers/iio/frequency/adrf6780.c4
-rw-r--r--drivers/iio/gyro/adis16080.c2
-rw-r--r--drivers/iio/gyro/adis16260.c2
-rw-r--r--drivers/iio/gyro/adxrs290.c14
-rw-r--r--drivers/iio/gyro/adxrs450.c7
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c4
-rw-r--r--drivers/iio/gyro/bmg160_spi.c2
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c2
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c4
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c4
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c4
-rw-r--r--drivers/iio/health/afe4403.c4
-rw-r--r--drivers/iio/health/afe4404.c4
-rw-r--r--drivers/iio/health/max30100.c2
-rw-r--r--drivers/iio/health/max30102.c6
-rw-r--r--drivers/iio/humidity/am2315.c2
-rw-r--r--drivers/iio/humidity/hdc100x.c68
-rw-r--r--drivers/iio/humidity/hdc2010.c14
-rw-r--r--drivers/iio/humidity/hid-sensor-humidity.c2
-rw-r--r--drivers/iio/humidity/hts221_core.c91
-rw-r--r--drivers/iio/humidity/hts221_i2c.c6
-rw-r--r--drivers/iio/humidity/hts221_spi.c4
-rw-r--r--drivers/iio/humidity/htu21.c4
-rw-r--r--drivers/iio/imu/adis.c9
-rw-r--r--drivers/iio/imu/adis16400.c2
-rw-r--r--drivers/iio/imu/adis16460.c4
-rw-r--r--drivers/iio/imu/adis16475.c2
-rw-r--r--drivers/iio/imu/adis16480.c2
-rw-r--r--drivers/iio/imu/adis_buffer.c3
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c6
-rw-r--r--drivers/iio/imu/bmi160/bmi160_spi.c6
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600.h2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c23
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c14
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c23
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c5
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c5
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c15
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c14
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c4
-rw-r--r--drivers/iio/imu/kmx61.c2
-rw-r--r--drivers/iio/imu/smi240.c7
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c16
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c6
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c73
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c4
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c6
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c4
-rw-r--r--drivers/iio/industrialio-backend.c28
-rw-r--r--drivers/iio/industrialio-core.c41
-rw-r--r--drivers/iio/light/acpi-als.c2
-rw-r--r--drivers/iio/light/adux1020.c2
-rw-r--r--drivers/iio/light/al3000a.c11
-rw-r--r--drivers/iio/light/al3010.c105
-rw-r--r--drivers/iio/light/al3320a.c116
-rw-r--r--drivers/iio/light/apds9306.c7
-rw-r--r--drivers/iio/light/apds9960.c2
-rw-r--r--drivers/iio/light/as73211.c42
-rw-r--r--drivers/iio/light/bh1750.c24
-rw-r--r--drivers/iio/light/bh1780.c2
-rw-r--r--drivers/iio/light/cm3232.c4
-rw-r--r--drivers/iio/light/cm3323.c4
-rw-r--r--drivers/iio/light/cm3605.c2
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c2
-rw-r--r--drivers/iio/light/gp2ap002.c2
-rw-r--r--drivers/iio/light/gp2ap020a00f.c7
-rw-r--r--drivers/iio/light/hid-sensor-als.c2
-rw-r--r--drivers/iio/light/hid-sensor-prox.c5
-rw-r--r--drivers/iio/light/isl29018.c4
-rw-r--r--drivers/iio/light/isl29028.c4
-rw-r--r--drivers/iio/light/isl29125.c7
-rw-r--r--drivers/iio/light/jsa1212.c2
-rw-r--r--drivers/iio/light/ltr390.c4
-rw-r--r--drivers/iio/light/ltr501.c170
-rw-r--r--drivers/iio/light/ltrf216a.c4
-rw-r--r--drivers/iio/light/opt4001.c2
-rw-r--r--drivers/iio/light/opt4060.c5
-rw-r--r--drivers/iio/light/pa12203001.c4
-rw-r--r--drivers/iio/light/rohm-bu27034.c14
-rw-r--r--drivers/iio/light/rpr0521.c63
-rw-r--r--drivers/iio/light/si1145.c25
-rw-r--r--drivers/iio/light/st_uvis25_core.c7
-rw-r--r--drivers/iio/light/st_uvis25_i2c.c4
-rw-r--r--drivers/iio/light/st_uvis25_spi.c4
-rw-r--r--drivers/iio/light/stk3310.c8
-rw-r--r--drivers/iio/light/tcs3414.c9
-rw-r--r--drivers/iio/light/tcs3472.c9
-rw-r--r--drivers/iio/light/tsl2563.c4
-rw-r--r--drivers/iio/light/tsl2583.c4
-rw-r--r--drivers/iio/light/tsl2591.c2
-rw-r--r--drivers/iio/light/tsl2772.c4
-rw-r--r--drivers/iio/light/us5182d.c6
-rw-r--r--drivers/iio/light/vcnl4000.c82
-rw-r--r--drivers/iio/light/vcnl4035.c42
-rw-r--r--drivers/iio/light/veml6040.c4
-rw-r--r--drivers/iio/light/veml6075.c2
-rw-r--r--drivers/iio/light/vl6180.c2
-rw-r--r--drivers/iio/light/zopt2201.c42
-rw-r--r--drivers/iio/magnetometer/af8133j.c3
-rw-r--r--drivers/iio/magnetometer/ak8974.c10
-rw-r--r--drivers/iio/magnetometer/ak8975.c8
-rw-r--r--drivers/iio/magnetometer/als31300.c7
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c4
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c2
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_spi.c2
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c2
-rw-r--r--drivers/iio/magnetometer/hmc5843.h2
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c4
-rw-r--r--drivers/iio/magnetometer/hmc5843_i2c.c2
-rw-r--r--drivers/iio/magnetometer/hmc5843_spi.c1
-rw-r--r--drivers/iio/magnetometer/mag3110.c165
-rw-r--r--drivers/iio/magnetometer/mmc35240.c4
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c11
-rw-r--r--drivers/iio/magnetometer/rm3100-spi.c1
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c4
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c4
-rw-r--r--drivers/iio/magnetometer/tmag5273.c4
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c8
-rw-r--r--drivers/iio/multiplexer/iio-mux.c2
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c2
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c4
-rw-r--r--drivers/iio/position/hid-sensor-custom-intel-hinge.c2
-rw-r--r--drivers/iio/potentiometer/ad5272.c4
-rw-r--r--drivers/iio/potentiometer/ds1803.c4
-rw-r--r--drivers/iio/potentiometer/max5432.c2
-rw-r--r--drivers/iio/potentiometer/max5487.c2
-rw-r--r--drivers/iio/potentiometer/mcp4018.c4
-rw-r--r--drivers/iio/potentiometer/mcp41010.c4
-rw-r--r--drivers/iio/potentiometer/mcp4131.c4
-rw-r--r--drivers/iio/potentiometer/mcp4531.c4
-rw-r--r--drivers/iio/potentiometer/tpl0102.c2
-rw-r--r--drivers/iio/potentiostat/lmp91000.c4
-rw-r--r--drivers/iio/pressure/abp060mg.c2
-rw-r--r--drivers/iio/pressure/bmp280-core.c110
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c4
-rw-r--r--drivers/iio/pressure/bmp280-spi.c10
-rw-r--r--drivers/iio/pressure/bmp280.h8
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c2
-rw-r--r--drivers/iio/pressure/dlhl60d.c11
-rw-r--r--drivers/iio/pressure/dps310.c4
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c7
-rw-r--r--drivers/iio/pressure/hp03.c2
-rw-r--r--drivers/iio/pressure/hp206c.c4
-rw-r--r--drivers/iio/pressure/hsc030pa.c4
-rw-r--r--drivers/iio/pressure/hsc030pa_i2c.c4
-rw-r--r--drivers/iio/pressure/hsc030pa_spi.c4
-rw-r--r--drivers/iio/pressure/icp10100.c15
-rw-r--r--drivers/iio/pressure/mpl115_spi.c2
-rw-r--r--drivers/iio/pressure/mpl3115.c101
-rw-r--r--drivers/iio/pressure/mprls0025pa_i2c.c4
-rw-r--r--drivers/iio/pressure/mprls0025pa_spi.c4
-rw-r--r--drivers/iio/pressure/ms5611_core.c8
-rw-r--r--drivers/iio/pressure/ms5611_spi.c1
-rw-r--r--drivers/iio/pressure/ms5637.c4
-rw-r--r--drivers/iio/pressure/rohm-bm1390.c14
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c6
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c4
-rw-r--r--drivers/iio/pressure/zpa2326.c21
-rw-r--r--drivers/iio/pressure/zpa2326_spi.c3
-rw-r--r--drivers/iio/proximity/as3935.c8
-rw-r--r--drivers/iio/proximity/cros_ec_mkbp_proximity.c17
-rw-r--r--drivers/iio/proximity/hx9023s.c15
-rw-r--r--drivers/iio/proximity/irsd200.c14
-rw-r--r--drivers/iio/proximity/isl29501.c4
-rw-r--r--drivers/iio/proximity/mb1232.c6
-rw-r--r--drivers/iio/proximity/ping.c2
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c9
-rw-r--r--drivers/iio/proximity/srf04.c2
-rw-r--r--drivers/iio/proximity/srf08.c6
-rw-r--r--drivers/iio/proximity/sx9310.c6
-rw-r--r--drivers/iio/proximity/sx9324.c2
-rw-r--r--drivers/iio/proximity/sx9500.c11
-rw-r--r--drivers/iio/proximity/sx_common.c4
-rw-r--r--drivers/iio/proximity/vcnl3020.c2
-rw-r--r--drivers/iio/proximity/vl53l0x-i2c.c4
-rw-r--r--drivers/iio/resolver/ad2s1200.c2
-rw-r--r--drivers/iio/resolver/ad2s1210.c5
-rw-r--r--drivers/iio/resolver/ad2s90.c4
-rw-r--r--drivers/iio/temperature/hid-sensor-temperature.c7
-rw-r--r--drivers/iio/temperature/ltc2983.c4
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c37
-rw-r--r--drivers/iio/temperature/mcp9600.c4
-rw-r--r--drivers/iio/temperature/tmp006.c4
-rw-r--r--drivers/iio/temperature/tmp007.c2
-rw-r--r--drivers/iio/temperature/tsys01.c4
-rw-r--r--drivers/iio/temperature/tsys02d.c2
-rw-r--r--drivers/iio/test/iio-test-format.c2
-rw-r--r--drivers/iio/test/iio-test-gts.c2
-rw-r--r--drivers/iio/test/iio-test-rescale.c2
-rw-r--r--drivers/iio/trigger/stm32-lptimer-trigger.c77
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c4
-rw-r--r--drivers/infiniband/core/cm.c78
-rw-r--r--drivers/infiniband/core/cm_trace.h2
-rw-r--r--drivers/infiniband/core/cma.c25
-rw-r--r--drivers/infiniband/core/cma_trace.h2
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/iwcm.c29
-rw-r--r--drivers/infiniband/core/mad_rmpp.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c271
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/verbs.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.c20
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2
-rw-r--r--drivers/infiniband/hw/hfi1/aspm.c2
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c4
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c3
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c2
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h1
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c10
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h1
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c21
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h1
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c5
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c2
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/infiniband/hw/hns/Makefile1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h20
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c26
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_trace.h216
-rw-r--r--drivers/infiniband/hw/irdma/cm.c3
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c2
-rw-r--r--drivers/infiniband/hw/irdma/main.c129
-rw-r--r--drivers/infiniband/hw/irdma/main.h3
-rw-r--r--drivers/infiniband/hw/irdma/osdep.h2
-rw-r--r--drivers/infiniband/hw/irdma/pble.c2
-rw-r--r--drivers/infiniband/hw/irdma/type.h4
-rw-r--r--drivers/infiniband/hw/irdma/utils.c4
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c1
-rw-r--r--drivers/infiniband/hw/mana/cq.c4
-rw-r--r--drivers/infiniband/hw/mana/device.c174
-rw-r--r--drivers/infiniband/hw/mana/main.c92
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h7
-rw-r--r--drivers/infiniband/hw/mana/mr.c29
-rw-r--r--drivers/infiniband/hw/mana/qp.c5
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c8
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c58
-rw-r--r--drivers/infiniband/hw/mlx5/main.c29
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h13
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c8
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c65
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c30
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c2
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c66
-rw-r--r--drivers/infiniband/sw/rxe/rxe_odp.c144
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c40
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h2
-rw-r--r--drivers/infiniband/sw/siw/Kconfig1
-rw-r--r--drivers/infiniband/sw/siw/siw.h24
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c28
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h1
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c65
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c127
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c19
-rw-r--r--drivers/input/ff-memless.c2
-rw-r--r--drivers/input/gameport/gameport.c3
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/joystick/db9.c2
-rw-r--r--drivers/input/joystick/gamecon.c2
-rw-r--r--drivers/input/joystick/turbografx.c2
-rw-r--r--drivers/input/joystick/xpad.c56
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/gpio_keys.c6
-rw-r--r--drivers/input/keyboard/imx_keypad.c3
-rw-r--r--drivers/input/keyboard/locomokbd.c2
-rw-r--r--drivers/input/keyboard/matrix_keypad.c30
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c28
-rw-r--r--drivers/input/keyboard/tegra-kbc.c2
-rw-r--r--drivers/input/misc/ims-pcu.c6
-rw-r--r--drivers/input/misc/nxp-bbnsm-pwrkey.c2
-rw-r--r--drivers/input/mouse/alps.c2
-rw-r--r--drivers/input/mouse/byd.c2
-rw-r--r--drivers/input/rmi4/rmi_f34.c135
-rw-r--r--drivers/input/touchscreen/ad7877.c2
-rw-r--r--drivers/input/touchscreen/ad7879.c2
-rw-r--r--drivers/input/touchscreen/bu21029_ts.c3
-rw-r--r--drivers/input/touchscreen/exc3000.c2
-rw-r--r--drivers/input/touchscreen/sx8654.c2
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c2
-rw-r--r--drivers/interconnect/core.c82
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c17
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h5
-rw-r--r--drivers/interconnect/qcom/osm-l3.c38
-rw-r--r--drivers/interconnect/qcom/sa8775p.c952
-rw-r--r--drivers/interconnect/qcom/sm8650.c344
-rw-r--r--drivers/interconnect/qcom/sm8650.h1
-rw-r--r--drivers/iommu/Kconfig158
-rw-r--r--drivers/iommu/Makefile6
-rw-r--r--drivers/iommu/amd/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h2
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h10
-rw-r--r--drivers/iommu/amd/init.c97
-rw-r--r--drivers/iommu/amd/io_pgtable.c38
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c12
-rw-r--r--drivers/iommu/amd/iommu.c94
-rw-r--r--drivers/iommu/amd/ppr.c2
-rw-r--r--drivers/iommu/apple-dart.c3
-rw-r--r--drivers/iommu/arm/Kconfig144
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c86
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c138
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h39
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c9
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c44
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c6
-rw-r--r--drivers/iommu/dma-iommu.c496
-rw-r--r--drivers/iommu/exynos-iommu.c12
-rw-r--r--drivers/iommu/fsl_pamu_domain.c2
-rw-r--r--drivers/iommu/intel/Makefile7
-rw-r--r--drivers/iommu/intel/dmar.c14
-rw-r--r--drivers/iommu/intel/iommu.c244
-rw-r--r--drivers/iommu/intel/iommu.h62
-rw-r--r--drivers/iommu/intel/irq_remapping.c12
-rw-r--r--drivers/iommu/intel/nested.c20
-rw-r--r--drivers/iommu/intel/pasid.c13
-rw-r--r--drivers/iommu/intel/pasid.h1
-rw-r--r--drivers/iommu/intel/prq.c7
-rw-r--r--drivers/iommu/intel/svm.c9
-rw-r--r--drivers/iommu/io-pgtable-arm.c58
-rw-r--r--drivers/iommu/io-pgtable-dart.c23
-rw-r--r--drivers/iommu/iommu-pages.c119
-rw-r--r--drivers/iommu/iommu-pages.h195
-rw-r--r--drivers/iommu/iommu-sva.c18
-rw-r--r--drivers/iommu/iommu.c234
-rw-r--r--drivers/iommu/iommufd/device.c59
-rw-r--r--drivers/iommu/iommufd/eventq.c48
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h6
-rw-r--r--drivers/iommu/iommufd/selftest.c57
-rw-r--r--drivers/iommu/ipmmu-vmsa.c3
-rw-r--r--drivers/iommu/mtk_iommu.c37
-rw-r--r--drivers/iommu/riscv/Makefile2
-rw-r--r--drivers/iommu/riscv/iommu.c43
-rw-r--r--drivers/iommu/rockchip-iommu.c14
-rw-r--r--drivers/iommu/s390-iommu.c345
-rw-r--r--drivers/iommu/sun50i-iommu.c6
-rw-r--r--drivers/iommu/tegra-smmu.c111
-rw-r--r--drivers/iommu/virtio-iommu.c187
-rw-r--r--drivers/irqchip/Kconfig5
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/exynos-combiner.c2
-rw-r--r--drivers/irqchip/irq-al-fic.c20
-rw-r--r--drivers/irqchip/irq-alpine-msi.c7
-rw-r--r--drivers/irqchip/irq-apple-aic.c4
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c12
-rw-r--r--drivers/irqchip/irq-aspeed-i2c-ic.c2
-rw-r--r--drivers/irqchip/irq-aspeed-intc.c2
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c2
-rw-r--r--drivers/irqchip/irq-aspeed-vic.c4
-rw-r--r--drivers/irqchip/irq-ath79-misc.c4
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c2
-rw-r--r--drivers/irqchip/irq-atmel-aic.c19
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c28
-rw-r--r--drivers/irqchip/irq-bcm2712-mip.c6
-rw-r--r--drivers/irqchip/irq-bcm2835.c2
-rw-r--r--drivers/irqchip/irq-bcm2836.c2
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c2
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c2
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c24
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c10
-rw-r--r--drivers/irqchip/irq-clps711x.c4
-rw-r--r--drivers/irqchip/irq-crossbar.c6
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c5
-rw-r--r--drivers/irqchip/irq-csky-mpintc.c2
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c6
-rw-r--r--drivers/irqchip/irq-digicolor.c2
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c5
-rw-r--r--drivers/irqchip/irq-econet-en751221.c310
-rw-r--r--drivers/irqchip/irq-ftintc010.c5
-rw-r--r--drivers/irqchip/irq-gic-v2m.c20
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its-msi-parent.c41
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c76
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c20
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-goldfish-pic.c7
-rw-r--r--drivers/irqchip/irq-hip04.c6
-rw-r--r--drivers/irqchip/irq-i8259.c4
-rw-r--r--drivers/irqchip/irq-idt3243x.c2
-rw-r--r--drivers/irqchip/irq-imgpdc.c2
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c4
-rw-r--r--drivers/irqchip/irq-imx-intmux.c2
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c2
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c2
-rw-r--r--drivers/irqchip/irq-ingenic-tcu.c13
-rw-r--r--drivers/irqchip/irq-ingenic.c4
-rw-r--r--drivers/irqchip/irq-ixp4xx.c2
-rw-r--r--drivers/irqchip/irq-jcore-aic.c5
-rw-r--r--drivers/irqchip/irq-keystone.c4
-rw-r--r--drivers/irqchip/irq-lan966x-oic.c20
-rw-r--r--drivers/irqchip/irq-loongarch-avec.c2
-rw-r--r--drivers/irqchip/irq-loongarch-cpu.c2
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c2
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c2
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c11
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c4
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c2
-rw-r--r--drivers/irqchip/irq-lpc32xx.c4
-rw-r--r--drivers/irqchip/irq-ls-extirq.c4
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c10
-rw-r--r--drivers/irqchip/irq-ls1x.c4
-rw-r--r--drivers/irqchip/irq-mchp-eic.c5
-rw-r--r--drivers/irqchip/irq-meson-gpio.c2
-rw-r--r--drivers/irqchip/irq-mips-cpu.c13
-rw-r--r--drivers/irqchip/irq-mips-gic.c15
-rw-r--r--drivers/irqchip/irq-mmp.c12
-rw-r--r--drivers/irqchip/irq-mscc-ocelot.c7
-rw-r--r--drivers/irqchip/irq-msi-lib.c9
-rw-r--r--drivers/irqchip/irq-msi-lib.h27
-rw-r--r--drivers/irqchip/irq-mst-intc.c4
-rw-r--r--drivers/irqchip/irq-mtk-cirq.c5
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c4
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c28
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c2
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c29
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c4
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c24
-rw-r--r--drivers/irqchip/irq-mxs.c4
-rw-r--r--drivers/irqchip/irq-nvic.c2
-rw-r--r--drivers/irqchip/irq-omap-intc.c4
-rw-r--r--drivers/irqchip/irq-or1k-pic.c4
-rw-r--r--drivers/irqchip/irq-orion.c6
-rw-r--r--drivers/irqchip/irq-owl-sirq.c4
-rw-r--r--drivers/irqchip/irq-pic32-evic.c6
-rw-r--r--drivers/irqchip/irq-pruss-intc.c7
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c2
-rw-r--r--drivers/irqchip/irq-realtek-rtl.c2
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c6
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c4
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c6
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c6
-rw-r--r--drivers/irqchip/irq-renesas-rzv2h.c40
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c2
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c12
-rw-r--r--drivers/irqchip/irq-riscv-intc.c2
-rw-r--r--drivers/irqchip/irq-sa11x0.c2
-rw-r--r--drivers/irqchip/irq-sg2042-msi.c154
-rw-r--r--drivers/irqchip/irq-sni-exiu.c6
-rw-r--r--drivers/irqchip/irq-sp7021-intc.c4
-rw-r--r--drivers/irqchip/irq-starfive-jh8100-intc.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c25
-rw-r--r--drivers/irqchip/irq-stm32mp-exti.c9
-rw-r--r--drivers/irqchip/irq-sun4i.c2
-rw-r--r--drivers/irqchip/irq-sun6i-r.c4
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c11
-rw-r--r--drivers/irqchip/irq-tb10x.c21
-rw-r--r--drivers/irqchip/irq-tegra.c5
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c10
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c7
-rw-r--r--drivers/irqchip/irq-ts4800.c2
-rw-r--r--drivers/irqchip/irq-uniphier-aidet.c2
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c4
-rw-r--r--drivers/irqchip/irq-vf610-mscm-ir.c6
-rw-r--r--drivers/irqchip/irq-vic.c5
-rw-r--r--drivers/irqchip/irq-vt8500.c153
-rw-r--r--drivers/irqchip/irq-wpcm450-aic.c2
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c4
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c5
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c4
-rw-r--r--drivers/irqchip/irq-zevio.c4
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c2
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c2
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c2
-rw-r--r--drivers/isdn/mISDN/dsp_tones.c2
-rw-r--r--drivers/isdn/mISDN/fsm.c2
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c6
-rw-r--r--drivers/isdn/mISDN/timerdev.c2
-rw-r--r--drivers/leds/.kunitconfig4
-rw-r--r--drivers/leds/Kconfig11
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c6
-rw-r--r--drivers/leds/flash/Kconfig11
-rw-r--r--drivers/leds/flash/Makefile1
-rw-r--r--drivers/leds/flash/leds-rt8515.c2
-rw-r--r--drivers/leds/flash/leds-sgm3140.c2
-rw-r--r--drivers/leds/flash/leds-tps6131x.c815
-rw-r--r--drivers/leds/led-class-flash.c15
-rw-r--r--drivers/leds/led-class-multicolor.c3
-rw-r--r--drivers/leds/led-core.c46
-rw-r--r--drivers/leds/led-test.c132
-rw-r--r--drivers/leds/led-triggers.c13
-rw-r--r--drivers/leds/leds-cros_ec.c21
-rw-r--r--drivers/leds/leds-lp8860.c214
-rw-r--r--drivers/leds/leds-pca9532.c11
-rw-r--r--drivers/leds/leds-pca955x.c28
-rw-r--r--drivers/leds/leds-pca995x.c2
-rw-r--r--drivers/leds/leds-tca6507.c11
-rw-r--r--drivers/leds/leds-turris-omnia.c4
-rw-r--r--drivers/leds/rgb/leds-mt6370-rgb.c16
-rw-r--r--drivers/leds/rgb/leds-ncp5623.c5
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c7
-rw-r--r--drivers/leds/trigger/ledtrig-activity.c4
-rw-r--r--drivers/leds/trigger/ledtrig-backlight.c48
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c2
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c2
-rw-r--r--drivers/leds/trigger/ledtrig-transient.c2
-rw-r--r--drivers/mailbox/Kconfig14
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/cv1800-mailbox.c220
-rw-r--r--drivers/mailbox/imx-mailbox.c21
-rw-r--r--drivers/mailbox/mailbox-altera.c2
-rw-r--r--drivers/mailbox/mailbox.c199
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c51
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c16
-rw-r--r--drivers/mailbox/qcom-ipcc.c4
-rw-r--r--drivers/md/bcache/btree.c5
-rw-r--r--drivers/md/bcache/stats.c2
-rw-r--r--drivers/md/bcache/super.c61
-rw-r--r--drivers/md/dm-bufio.c191
-rw-r--r--drivers/md/dm-core.h4
-rw-r--r--drivers/md/dm-delay.c19
-rw-r--r--drivers/md/dm-dust.c4
-rw-r--r--drivers/md/dm-ebs-target.c3
-rw-r--r--drivers/md/dm-flakey.c118
-rw-r--r--drivers/md/dm-integrity.c19
-rw-r--r--drivers/md/dm-ioctl.c1
-rw-r--r--drivers/md/dm-linear.c4
-rw-r--r--drivers/md/dm-log-writes.c4
-rw-r--r--drivers/md/dm-mpath.c245
-rw-r--r--drivers/md/dm-raid.c9
-rw-r--r--drivers/md/dm-raid1.c7
-rw-r--r--drivers/md/dm-rq.c4
-rw-r--r--drivers/md/dm-stripe.c5
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-table.c263
-rw-r--r--drivers/md/dm-vdo/dedupe.c2
-rw-r--r--drivers/md/dm-vdo/indexer/volume.c24
-rw-r--r--drivers/md/dm-verity-fec.c4
-rw-r--r--drivers/md/dm-verity-target.c15
-rw-r--r--drivers/md/dm-verity-verify-sig.c17
-rw-r--r--drivers/md/dm-writecache.c4
-rw-r--r--drivers/md/dm-zone.c98
-rw-r--r--drivers/md/dm-zoned-target.c3
-rw-r--r--drivers/md/dm.c73
-rw-r--r--drivers/md/dm.h6
-rw-r--r--drivers/md/md-bitmap.c35
-rw-r--r--drivers/md/md-bitmap.h17
-rw-r--r--drivers/md/md.c206
-rw-r--r--drivers/md/md.h18
-rw-r--r--drivers/md/raid1-10.c10
-rw-r--r--drivers/md/raid1.c22
-rw-r--r--drivers/md/raid10.c20
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/media/cec/platform/cros-ec/cros-ec-cec.c5
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c4
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_vbi.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c4
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c11
-rw-r--r--drivers/media/dvb-core/dmxdev.c3
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c4
-rw-r--r--drivers/media/dvb-frontends/dib8000.c5
-rw-r--r--drivers/media/i2c/Kconfig43
-rw-r--r--drivers/media/i2c/Makefile4
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c4
-rw-r--r--drivers/media/i2c/ccs-pll.c53
-rw-r--r--drivers/media/i2c/ccs-pll.h29
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c55
-rw-r--r--drivers/media/i2c/ccs/ccs-quirk.c3
-rw-r--r--drivers/media/i2c/ccs/ccs-reg-access.c9
-rw-r--r--drivers/media/i2c/ccs/ccs.h2
-rw-r--r--drivers/media/i2c/ds90ub913.c92
-rw-r--r--drivers/media/i2c/ds90ub953.c252
-rw-r--r--drivers/media/i2c/ds90ub953.h104
-rw-r--r--drivers/media/i2c/ds90ub960.c2210
-rw-r--r--drivers/media/i2c/imx219.c38
-rw-r--r--drivers/media/i2c/imx283.c2
-rw-r--r--drivers/media/i2c/imx334.c1035
-rw-r--r--drivers/media/i2c/imx335.c5
-rw-r--r--drivers/media/i2c/lt6911uxe.c4
-rw-r--r--drivers/media/i2c/max96714.c2
-rw-r--r--drivers/media/i2c/max96717.c2
-rw-r--r--drivers/media/i2c/ov02c10.c1013
-rw-r--r--drivers/media/i2c/ov02e10.c969
-rw-r--r--drivers/media/i2c/ov08x40.c1324
-rw-r--r--drivers/media/i2c/ov13b10.c176
-rw-r--r--drivers/media/i2c/ov2740.c4
-rw-r--r--drivers/media/i2c/ov5675.c5
-rw-r--r--drivers/media/i2c/ov8856.c9
-rw-r--r--drivers/media/i2c/rdacm20.c7
-rw-r--r--drivers/media/i2c/rdacm21.c7
-rw-r--r--drivers/media/i2c/tc358743.c6
-rw-r--r--drivers/media/i2c/tvaudio.c2
-rw-r--r--drivers/media/i2c/vd55g1.c1965
-rw-r--r--drivers/media/i2c/vd56g3.c1586
-rw-r--r--drivers/media/pci/Kconfig1
-rw-r--r--drivers/media/pci/Makefile2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c4
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c2
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c2
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c5
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-bus.c2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-bus.h7
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-buttress.c6
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-buttress.h5
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.c4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.h3
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.c45
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.h10
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.c5
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.h8
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.c8
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.h4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6.c13
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.c2
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.c21
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c2
-rw-r--r--drivers/media/pci/pt3/pt3.c17
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c4
-rw-r--r--drivers/media/pci/sta2x11/Kconfig16
-rw-r--r--drivers/media/pci/sta2x11/Makefile2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c1270
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.h29
-rw-r--r--drivers/media/pci/tw5864/tw5864-core.c13
-rw-r--r--drivers/media/pci/tw686x/tw686x-core.c2
-rw-r--r--drivers/media/pci/zoran/zoran_card.c2
-rw-r--r--drivers/media/pci/zoran/zr36016.c2
-rw-r--r--drivers/media/pci/zoran/zr36050.c2
-rw-r--r--drivers/media/pci/zoran/zr36060.c2
-rw-r--r--drivers/media/platform/amlogic/Kconfig1
-rw-r--r--drivers/media/platform/amlogic/Makefile2
-rw-r--r--drivers/media/platform/amlogic/c3/Kconfig5
-rw-r--r--drivers/media/platform/amlogic/c3/Makefile5
-rw-r--r--drivers/media/platform/amlogic/c3/isp/Kconfig18
-rw-r--r--drivers/media/platform/amlogic/c3/isp/Makefile10
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c804
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-common.h340
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-core.c641
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-dev.c421
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-params.c1008
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-regs.h618
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-resizer.c892
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-stats.c326
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-adapter/Kconfig16
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-adapter/Makefile3
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-adapter/c3-mipi-adap.c842
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/Kconfig16
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/Makefile3
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c828
-rw-r--r--drivers/media/platform/amphion/vdec.c2
-rw-r--r--drivers/media/platform/amphion/vpu.h1
-rw-r--r--drivers/media/platform/amphion/vpu_core.c7
-rw-r--r--drivers/media/platform/amphion/vpu_malone.c39
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c8
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc.c9
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c5
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h4
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c73
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h1
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h8
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c33
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.h7
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c19
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c4
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c652
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c2
-rw-r--r--drivers/media/platform/nuvoton/npcm-video.c19
-rw-r--r--drivers/media/platform/nxp/dw100/dw100.c8
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h1
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c132
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h5
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c14
-rw-r--r--drivers/media/platform/qcom/camss/Makefile2
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-680.c422
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c4
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c131
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c28
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h1
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-680.c244
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c6
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h1
-rw-r--r--drivers/media/platform/qcom/camss/camss.c359
-rw-r--r--drivers/media/platform/qcom/camss/camss.h1
-rw-r--r--drivers/media/platform/qcom/iris/Makefile4
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.h2
-rw-r--r--drivers/media/platform/qcom/iris/iris_firmware.c4
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_common.h4
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_gen2.c (renamed from drivers/media/platform/qcom/iris/iris_platform_sm8550.c)119
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_qcs8300.h124
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8650.h13
-rw-r--r--drivers/media/platform/qcom/iris/iris_probe.c59
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu2.c1
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu3.c122
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu3x.c275
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.c4
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.h3
-rw-r--r--drivers/media/platform/qcom/venus/core.c16
-rw-r--r--drivers/media/platform/qcom/venus/core.h2
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c38
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c18
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe.c1
-rw-r--r--drivers/media/platform/renesas/Kconfig18
-rw-r--r--drivers/media/platform/renesas/Makefile2
-rw-r--r--drivers/media/platform/renesas/rcar-csi2.c8
-rw-r--r--drivers/media/platform/renesas/rcar-isp/Kconfig18
-rw-r--r--drivers/media/platform/renesas/rcar-isp/Makefile4
-rw-r--r--drivers/media/platform/renesas/rcar-isp/csisp.c (renamed from drivers/media/platform/renesas/rcar-isp.c)57
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c8
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c182
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c23
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-vin.h41
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c139
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru-regs.h91
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h39
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c165
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c13
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c295
-rw-r--r--drivers/media/platform/renesas/vsp1/Makefile2
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1.h4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_brx.c9
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_dl.c7
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.c30
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.h8
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c70
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.c30
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.h3
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hsit.c11
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_iif.c121
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_iif.h29
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.c187
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.h6
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_regs.h8
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rpf.c38
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rwpf.c51
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_sru.c9
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_uds.c9
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c50
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_wpf.c53
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h7
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-capture.c6
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.c8
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.h4
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-capture.c12
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/regs-mfc-v6.h1
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c16
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h1
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c5
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-debug.c8
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c14
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c2
-rw-r--r--drivers/media/platform/st/sti/delta/delta-debug.c8
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c18
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c10
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c14
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.c2
-rw-r--r--drivers/media/platform/ti/cal/cal-camerarx.c266
-rw-r--r--drivers/media/platform/ti/cal/cal-video.c157
-rw-r--r--drivers/media/platform/ti/cal/cal.c45
-rw-r--r--drivers/media/platform/ti/cal/cal.h3
-rw-r--r--drivers/media/platform/ti/davinci/vpif.c4
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccdc.c8
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.c6
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.c19
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c4
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c1
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c4
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu_hw.c34
-rw-r--r--drivers/media/radio/radio-cadet.c2
-rw-r--r--drivers/media/rc/ene_ir.c2
-rw-r--r--drivers/media/rc/igorplugusb.c2
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c5
-rw-r--r--drivers/media/rc/img-ir/img-ir-raw.c2
-rw-r--r--drivers/media/rc/imon.c2
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c3
-rw-r--r--drivers/media/rc/keymaps/rc-hauppauge.c42
-rw-r--r--drivers/media/rc/rc-ir-raw.c3
-rw-r--r--drivers/media/rc/rc-main.c4
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_channel.c2
-rw-r--r--drivers/media/test-drivers/vim2m.c327
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-cap.c20
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c8
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c2
-rw-r--r--drivers/media/usb/au0828/au0828-video.c4
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c3
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c7
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c13
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.c167
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-std.h6
-rw-r--r--drivers/media/usb/s2255/s2255drv.c2
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c91
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c38
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c115
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h5
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c99
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c14
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c7
-rw-r--r--drivers/memory/Kconfig23
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/bt1-l2-ctl.c2
-rw-r--r--drivers/memory/mtk-smi.c52
-rw-r--r--drivers/memory/omap-gpmc.c21
-rw-r--r--drivers/memory/renesas-rpc-if-regs.h147
-rw-r--r--drivers/memory/renesas-rpc-if.c714
-rw-r--r--drivers/memory/renesas-xspi-if-regs.h105
-rw-r--r--drivers/memory/stm32_omm.c479
-rw-r--r--drivers/memory/tegra/Kconfig8
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c5
-rw-r--r--drivers/memstick/core/ms_block.c2
-rw-r--r--drivers/memstick/host/jmb38x_ms.c2
-rw-r--r--drivers/memstick/host/r592.c2
-rw-r--r--drivers/memstick/host/tifm_ms.c2
-rw-r--r--drivers/mfd/88pm860x-core.c4
-rw-r--r--drivers/mfd/88pm886.c14
-rw-r--r--drivers/mfd/Kconfig55
-rw-r--r--drivers/mfd/Makefile6
-rw-r--r--drivers/mfd/aat2870-core.c4
-rw-r--r--drivers/mfd/ab8500-core.c6
-rw-r--r--drivers/mfd/arizona-irq.c3
-rw-r--r--drivers/mfd/as3722.c4
-rw-r--r--drivers/mfd/bcm590xx.c66
-rw-r--r--drivers/mfd/db8500-prcmu.c6
-rw-r--r--drivers/mfd/exynos-lpass.c31
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c5
-rw-r--r--drivers/mfd/lp8788-irq.c2
-rw-r--r--drivers/mfd/max14577.c1
-rw-r--r--drivers/mfd/max77541.c2
-rw-r--r--drivers/mfd/max77705.c4
-rw-r--r--drivers/mfd/max77759.c690
-rw-r--r--drivers/mfd/max8925-core.c4
-rw-r--r--drivers/mfd/max8925-i2c.c1
-rw-r--r--drivers/mfd/max8997-irq.c4
-rw-r--r--drivers/mfd/max8998-irq.c2
-rw-r--r--drivers/mfd/mt6358-irq.c6
-rw-r--r--drivers/mfd/mt6397-irq.c6
-rw-r--r--drivers/mfd/qcom-pm8xxx.c6
-rw-r--r--drivers/mfd/rohm-bd96801.c565
-rw-r--r--drivers/mfd/rt5033.c6
-rw-r--r--drivers/mfd/sec-acpm.c442
-rw-r--r--drivers/mfd/sec-common.c301
-rw-r--r--drivers/mfd/sec-core.c481
-rw-r--r--drivers/mfd/sec-core.h23
-rw-r--r--drivers/mfd/sec-i2c.c239
-rw-r--r--drivers/mfd/sec-irq.c460
-rw-r--r--drivers/mfd/sm501.c50
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c5
-rw-r--r--drivers/mfd/stm32-lptimer.c33
-rw-r--r--drivers/mfd/stmfx.c2
-rw-r--r--drivers/mfd/stmpe-spi.c2
-rw-r--r--drivers/mfd/stmpe.c4
-rw-r--r--drivers/mfd/tc3589x.c6
-rw-r--r--drivers/mfd/tps65010.c9
-rw-r--r--drivers/mfd/tps65217.c2
-rw-r--r--drivers/mfd/tps6586x.c2
-rw-r--r--drivers/mfd/twl4030-irq.c4
-rw-r--r--drivers/mfd/twl6030-irq.c5
-rw-r--r--drivers/mfd/ucb1x00-core.c7
-rw-r--r--drivers/mfd/wm831x-irq.c15
-rw-r--r--drivers/mfd/wm8994-irq.c4
-rw-r--r--drivers/misc/Kconfig14
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/amd-sbi/Kconfig18
-rw-r--r--drivers/misc/amd-sbi/Makefile4
-rw-r--r--drivers/misc/amd-sbi/rmi-core.c474
-rw-r--r--drivers/misc/amd-sbi/rmi-core.h74
-rw-r--r--drivers/misc/amd-sbi/rmi-hwmon.c120
-rw-r--r--drivers/misc/amd-sbi/rmi-i2c.c133
-rw-r--r--drivers/misc/bcm-vk/bcm_vk.h1
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_tty.c2
-rw-r--r--drivers/misc/cardreader/alcor_pci.c13
-rw-r--r--drivers/misc/cardreader/rts5264.c17
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c46
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h2
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c2
-rw-r--r--drivers/misc/cs5535-mfgpt.c1
-rw-r--r--drivers/misc/echo/Kconfig9
-rw-r--r--drivers/misc/echo/Makefile2
-rw-r--r--drivers/misc/echo/echo.c589
-rw-r--r--drivers/misc/echo/echo.h175
-rw-r--r--drivers/misc/echo/fir.h154
-rw-r--r--drivers/misc/echo/oslec.h81
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c75
-rw-r--r--drivers/misc/fastrpc.c16
-rw-r--r--drivers/misc/hi6421v600-irq.c5
-rw-r--r--drivers/misc/lis3lv02d/Kconfig4
-rw-r--r--drivers/misc/lkdtm/heap.c17
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c99
-rw-r--r--drivers/misc/mei/interrupt.c2
-rw-r--r--drivers/misc/mei/vsc-tp.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c3
-rw-r--r--drivers/misc/ti_fpc202.c438
-rw-r--r--drivers/misc/tps6594-pfsm.c3
-rw-r--r--drivers/misc/uacce/uacce.c40
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c11
-rw-r--r--drivers/mmc/core/block.c16
-rw-r--r--drivers/mmc/core/card.h6
-rw-r--r--drivers/mmc/core/core.c48
-rw-r--r--drivers/mmc/core/core.h10
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/core/host.h8
-rw-r--r--drivers/mmc/core/mmc.c103
-rw-r--r--drivers/mmc/core/mmc_ops.c6
-rw-r--r--drivers/mmc/core/mmc_ops.h2
-rw-r--r--drivers/mmc/core/mmc_test.c16
-rw-r--r--drivers/mmc/core/queue.c6
-rw-r--r--drivers/mmc/core/quirks.h10
-rw-r--r--drivers/mmc/core/sd.c65
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/core/slot-gpio.c8
-rw-r--r--drivers/mmc/host/Kconfig14
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/alcor.c3
-rw-r--r--drivers/mmc/host/atmel-mci.c5
-rw-r--r--drivers/mmc/host/bcm2835.c8
-rw-r--r--drivers/mmc/host/cavium-thunderx.c4
-rw-r--r--drivers/mmc/host/dw_mmc.c12
-rw-r--r--drivers/mmc/host/jz4740_mmc.c3
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c3
-rw-r--r--drivers/mmc/host/mtk-sd.c219
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/mxcmmc.c2
-rw-r--r--drivers/mmc/host/omap.c7
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c20
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c243
-rw-r--r--drivers/mmc/host/sdhci-msm.c16
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c40
-rw-r--r--drivers/mmc/host/sdhci-of-k1.c304
-rw-r--r--drivers/mmc/host/sdhci-omap.c2
-rw-r--r--drivers/mmc/host/sdhci.c16
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sdhci_am654.c35
-rw-r--r--drivers/mmc/host/sunplus-mmc.c2
-rw-r--r--drivers/mmc/host/tifm_sd.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c6
-rw-r--r--drivers/mmc/host/via-sdmmc.c2
-rw-r--r--drivers/mmc/host/vub300.c8
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/most/most_usb.c2
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/nand/ecc-mxic.c2
-rw-r--r--drivers/mtd/nand/qpic_common.c8
-rw-r--r--drivers/mtd/nand/raw/Kconfig9
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c5
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c248
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c6
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c13
-rw-r--r--drivers/mtd/nand/raw/loongson1-nand-controller.c836
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c18
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c2
-rw-r--r--drivers/mtd/nand/spi/alliancememory.c20
-rw-r--r--drivers/mtd/nand/spi/ato.c14
-rw-r--r--drivers/mtd/nand/spi/core.c21
-rw-r--r--drivers/mtd/nand/spi/esmt.c22
-rw-r--r--drivers/mtd/nand/spi/foresee.c16
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c60
-rw-r--r--drivers/mtd/nand/spi/macronix.c20
-rw-r--r--drivers/mtd/nand/spi/micron.c38
-rw-r--r--drivers/mtd/nand/spi/paragon.c20
-rw-r--r--drivers/mtd/nand/spi/skyhigh.c20
-rw-r--r--drivers/mtd/nand/spi/toshiba.c22
-rw-r--r--drivers/mtd/nand/spi/winbond.c128
-rw-r--r--drivers/mtd/nand/spi/xtx.c20
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/mtd/spi-nor/macronix.c73
-rw-r--r--drivers/mux/adg792a.c2
-rw-r--r--drivers/mux/adgs1408.c4
-rw-r--r--drivers/mux/gpio.c5
-rw-r--r--drivers/mux/mmio.c15
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/arcnet/arcnet.c2
-rw-r--r--drivers/net/bareudp.c16
-rw-r--r--drivers/net/bonding/bond_alb.c8
-rw-r--r--drivers/net/bonding/bond_main.c180
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c8
-rw-r--r--drivers/net/can/dev/dev.c12
-rw-r--r--drivers/net/can/dev/netlink.c74
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c4
-rw-r--r--drivers/net/can/grcan.c4
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c10
-rw-r--r--drivers/net/can/kvaser_pciefd.c192
-rw-r--r--drivers/net/can/m_can/m_can.c8
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c9
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c6
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c280
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c4
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-timestamp.c2
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/slcan/slcan-core.c26
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c10
-rw-r--r--drivers/net/can/usb/esd_usb.c6
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c4
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c6
-rw-r--r--drivers/net/can/usb/gs_usb.c8
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c6
-rw-r--r--drivers/net/can/xilinx_can.c16
-rw-r--r--drivers/net/dsa/b53/b53_common.c125
-rw-r--r--drivers/net/dsa/b53/b53_priv.h1
-rw-r--r--drivers/net/dsa/b53/b53_regs.h21
-rw-r--r--drivers/net/dsa/bcm_sf2.c1
-rw-r--r--drivers/net/dsa/dsa_loop.c2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c24
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h5
-rw-r--r--drivers/net/dsa/microchip/Kconfig1
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c194
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c274
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h44
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c30
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.h7
-rw-r--r--drivers/net/dsa/mt7530-mmio.c1
-rw-r--r--drivers/net/dsa/mt7530.c270
-rw-r--r--drivers/net/dsa/mt7530.h60
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c24
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h16
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c11
-rw-r--r--drivers/net/dsa/ocelot/felix.c11
-rw-r--r--drivers/net/dsa/qca/ar9331.c4
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c4
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c6
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c5
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c46
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h7
-rw-r--r--drivers/net/eql.c2
-rw-r--r--drivers/net/ethernet/3com/3c515.c2
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c2
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c3
-rw-r--r--drivers/net/ethernet/airoha/Kconfig7
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c533
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h103
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c178
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.h4
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c519
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe_debugfs.c9
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h214
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c5
-rw-r--r--drivers/net/ethernet/amd/a2065.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c3
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c4
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c7
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c2
-rw-r--r--drivers/net/ethernet/amd/sunlance.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h122
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c268
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c123
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c204
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-smn.h30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h128
-rw-r--r--drivers/net/ethernet/apple/bmac.c62
-rw-r--r--drivers/net/ethernet/apple/mace.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c6
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c7
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c176
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h78
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c36
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c13
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h3
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c339
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c277
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h32
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c25
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c18
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c25
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c18
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c5
-rw-r--r--drivers/net/ethernet/cortina/gemini.c37
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic2.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c16
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c30
-rw-r--r--drivers/net/ethernet/faraday/Kconfig1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/fealnx.c4
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c41
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c41
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig16
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c123
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h51
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c90
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c369
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c50
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c78
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c107
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h14
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c93
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.h3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c12
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp.c462
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp_private.h104
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c53
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c14
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c2
-rw-r--r--drivers/net/ethernet/huawei/Kconfig1
-rw-r--r--drivers/net/ethernet/huawei/Makefile1
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Kconfig20
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Makefile21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.c53
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.h27
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c25
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h53
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c32
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h13
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h113
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c24
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h81
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h58
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c62
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.c414
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.h21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c352
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c16
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h15
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h13
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h105
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c78
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c233
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h41
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h82
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h120
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c68
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h54
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.c341
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.h90
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c670
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.h135
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.c29
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.h76
-rw-r--r--drivers/net/ethernet/ibm/Kconfig13
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c358
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h65
-rw-r--r--drivers/net/ethernet/intel/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c95
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c11
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c300
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c17
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c45
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h67
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c207
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c130
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c66
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c82
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c266
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c3
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h21
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c14
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c67
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h13
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c103
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c9
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c873
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.h362
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c34
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c234
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h26
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c163
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h85
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c615
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h314
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c84
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c20
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c1
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h55
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c81
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c96
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c247
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h52
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.c557
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/region.c290
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c56
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c1339
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c257
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c707
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c51
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c285
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h175
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/korina.c3
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c58
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h88
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c41
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c37
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c12
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.c3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c6
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c45
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c121
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h61
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c467
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c414
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c1363
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c293
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c716
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c515
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c191
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c122
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3
-rw-r--r--drivers/net/ethernet/meta/Kconfig1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h6
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h34
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c258
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c178
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c316
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h56
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c335
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h48
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c47
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c10
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c6
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c18
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c42
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c62
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h11
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c7
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c63
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c21
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c27
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c19
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c132
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c33
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c48
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c4
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c6
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c4
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c2
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c99
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h17
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c25
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c138
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c7
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig1
-rw-r--r--drivers/net/ethernet/realtek/atp.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.h7
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c434
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c205
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h15
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c66
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c11
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c5
-rw-r--r--drivers/net/ethernet/seeq/ether3.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c3
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c2
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c3
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c3
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c2
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c5
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c108
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c146
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c88
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c295
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c89
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h62
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c174
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c164
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c374
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h64
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c30
-rw-r--r--drivers/net/ethernet/ti/cpsw.c26
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c70
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h6
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c43
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c16
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.h58
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switch_map.h33
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c2
-rw-r--r--drivers/net/ethernet/ti/tlan.c4
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c2
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c80
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig4
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c357
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h5
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c188
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.c176
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.h77
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.c30
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.c909
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.h18
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h115
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c94
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c5
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c385
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h15
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c38
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c23
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c12
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c64
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c206
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c47
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h118
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c8
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c61
-rw-r--r--drivers/net/fddi/defza.c2
-rw-r--r--drivers/net/geneve.c16
-rw-r--r--drivers/net/gtp.c18
-rw-r--r--drivers/net/hamradio/6pack.c4
-rw-r--r--drivers/net/hamradio/baycom_epp.c5
-rw-r--r--drivers/net/hamradio/scc.c14
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h13
-rw-r--r--drivers/net/hyperv/netvsc.c57
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c72
-rw-r--r--drivers/net/hyperv/rndis_filter.c24
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.0.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.5.c1
-rw-r--r--drivers/net/ipa/ipa_data.h2
-rw-r--r--drivers/net/ipa/ipa_mem.c21
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c2
-rw-r--r--drivers/net/macsec.c40
-rw-r--r--drivers/net/macvlan.c20
-rw-r--r--drivers/net/mctp/mctp-usb.c2
-rw-r--r--drivers/net/mdio/Kconfig48
-rw-r--r--drivers/net/mdio/Makefile1
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c2
-rw-r--r--drivers/net/mdio/mdio-realtek-rtl9300.c522
-rw-r--r--drivers/net/mdio/mdio-thunder.c10
-rw-r--r--drivers/net/mdio/of_mdio.c2
-rw-r--r--drivers/net/netconsole.c3
-rw-r--r--drivers/net/netdevsim/ipsec.c15
-rw-r--r--drivers/net/netdevsim/netdev.c7
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/ovpn/Makefile22
-rw-r--r--drivers/net/ovpn/bind.c55
-rw-r--r--drivers/net/ovpn/bind.h101
-rw-r--r--drivers/net/ovpn/crypto.c210
-rw-r--r--drivers/net/ovpn/crypto.h145
-rw-r--r--drivers/net/ovpn/crypto_aead.c389
-rw-r--r--drivers/net/ovpn/crypto_aead.h29
-rw-r--r--drivers/net/ovpn/io.c458
-rw-r--r--drivers/net/ovpn/io.h34
-rw-r--r--drivers/net/ovpn/main.c279
-rw-r--r--drivers/net/ovpn/main.h14
-rw-r--r--drivers/net/ovpn/netlink-gen.c213
-rw-r--r--drivers/net/ovpn/netlink-gen.h41
-rw-r--r--drivers/net/ovpn/netlink.c1258
-rw-r--r--drivers/net/ovpn/netlink.h18
-rw-r--r--drivers/net/ovpn/ovpnpriv.h55
-rw-r--r--drivers/net/ovpn/peer.c1364
-rw-r--r--drivers/net/ovpn/peer.h163
-rw-r--r--drivers/net/ovpn/pktid.c129
-rw-r--r--drivers/net/ovpn/pktid.h86
-rw-r--r--drivers/net/ovpn/proto.h118
-rw-r--r--drivers/net/ovpn/skb.h61
-rw-r--r--drivers/net/ovpn/socket.c241
-rw-r--r--drivers/net/ovpn/socket.h49
-rw-r--r--drivers/net/ovpn/stats.c21
-rw-r--r--drivers/net/ovpn/stats.h47
-rw-r--r--drivers/net/ovpn/tcp.c601
-rw-r--r--drivers/net/ovpn/tcp.h37
-rw-r--r--drivers/net/ovpn/udp.c447
-rw-r--r--drivers/net/ovpn/udp.h25
-rw-r--r--drivers/net/pfcp.c23
-rw-r--r--drivers/net/phy/Kconfig29
-rw-r--r--drivers/net/phy/Makefile22
-rw-r--r--drivers/net/phy/air_en8811h.c103
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c6
-rw-r--r--drivers/net/phy/as21xxx.c1087
-rw-r--r--drivers/net/phy/bcm87xx.c14
-rw-r--r--drivers/net/phy/dp83640.c13
-rw-r--r--drivers/net/phy/dp83822.c33
-rw-r--r--drivers/net/phy/dp83867.c76
-rw-r--r--drivers/net/phy/fixed_phy.c40
-rw-r--r--drivers/net/phy/icplus.c6
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c111
-rw-r--r--drivers/net/phy/marvell10g.c12
-rw-r--r--drivers/net/phy/mdio_bus.c488
-rw-r--r--drivers/net/phy/mdio_bus_provider.c484
-rw-r--r--drivers/net/phy/mdio_device.c1
-rw-r--r--drivers/net/phy/mediatek/Kconfig20
-rw-r--r--drivers/net/phy/mediatek/Makefile3
-rw-r--r--drivers/net/phy/mediatek/mtk-2p5ge.c321
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c91
-rw-r--r--drivers/net/phy/micrel.c30
-rw-r--r--drivers/net/phy/microchip.c2
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c5
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c20
-rw-r--r--drivers/net/phy/mxl-86110.c616
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c54
-rw-r--r--drivers/net/phy/nxp-tja11xx.c6
-rw-r--r--drivers/net/phy/phy_caps.c18
-rw-r--r--drivers/net/phy/phy_device.c102
-rw-r--r--drivers/net/phy/phylink.c7
-rw-r--r--drivers/net/phy/realtek/realtek_main.c337
-rw-r--r--drivers/net/phy/teranetics.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c25
-rw-r--r--drivers/net/slip/slip.c4
-rw-r--r--drivers/net/tap.c14
-rw-r--r--drivers/net/team/team_core.c8
-rw-r--r--drivers/net/tun.c10
-rw-r--r--drivers/net/usb/Kconfig4
-rw-r--r--drivers/net/usb/aqc111.c10
-rw-r--r--drivers/net/usb/asix.h1
-rw-r--r--drivers/net/usb/asix_common.c22
-rw-r--r--drivers/net/usb/asix_devices.c17
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/ch9200.c7
-rw-r--r--drivers/net/usb/lan78xx.c473
-rw-r--r--drivers/net/usb/r8152.c99
-rw-r--r--drivers/net/usb/sierra_net.c2
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/veth.c57
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c35
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/vxlan/vxlan_core.c562
-rw-r--r--drivers/net/vxlan/vxlan_private.h11
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c20
-rw-r--r--drivers/net/wan/hdlc_cisco.c2
-rw-r--r--drivers/net/wan/hdlc_fr.c2
-rw-r--r--drivers/net/wan/hdlc_ppp.c2
-rw-r--r--drivers/net/wireguard/allowedips.c102
-rw-r--r--drivers/net/wireguard/allowedips.h4
-rw-r--r--drivers/net/wireguard/cookie.c4
-rw-r--r--drivers/net/wireguard/device.c1
-rw-r--r--drivers/net/wireguard/netlink.c47
-rw-r--r--drivers/net/wireguard/noise.c4
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c48
-rw-r--r--drivers/net/wireguard/timers.c17
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c32
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c62
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h34
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c55
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c13
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c331
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h20
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c148
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c30
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c179
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c14
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c50
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c13
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c49
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig10
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c1155
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c105
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h18
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c341
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h179
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c537
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c154
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h53
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c1097
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h8
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c599
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h41
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c209
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c153
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h83
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h13
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c121
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h27
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c517
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h32
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c1791
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h58
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c72
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h9
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c238
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c526
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c1371
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h299
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/recovery.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c19
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c26
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c7
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c46
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c308
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h87
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h29
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c3
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c378
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c227
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c92
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c168
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c228
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c169
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c170
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h192
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c120
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h498
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-v2.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h)59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c154
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c122
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c237
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h241
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c129
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c97
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c235
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c65
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c105
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c155
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/emlsr_with_bt.c140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.c122
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c159
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c122
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c204
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c194
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c)200
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c2361
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h114
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c187
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c292
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c76
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c228
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c174
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c2
-rw-r--r--drivers/net/wireless/intersil/p54/p54.h1
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c13
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c5
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c42
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c77
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c24
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c44
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c51
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Makefile1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c158
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/testmode.c201
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c196
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c120
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c120
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c195
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h51
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c13
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c35
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c61
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723ds.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723du.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.c59
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812au.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.c12
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814ae.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814au.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821au.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c5
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c27
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c63
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c1037
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h190
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c418
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c493
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h144
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c174
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c480
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h94
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c58
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c38
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c36
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c131
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c147
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c46
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c24
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c30
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c32
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c296
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h31
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c2
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c2
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c3
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c12
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c9
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c11
-rw-r--r--drivers/net/xen-netback/netback.c3
-rw-r--r--drivers/net/xen-netfront.c3
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c3
-rw-r--r--drivers/nfc/pn533/pn533.c2
-rw-r--r--drivers/nfc/pn533/uart.c2
-rw-r--r--drivers/nfc/s3fwrn5/core.c2
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c2
-rw-r--r--drivers/nfc/s3fwrn5/firmware.h2
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c2
-rw-r--r--drivers/nfc/s3fwrn5/nci.c2
-rw-r--r--drivers/nfc/s3fwrn5/nci.h2
-rw-r--r--drivers/nfc/s3fwrn5/phy_common.c4
-rw-r--r--drivers/nfc/s3fwrn5/phy_common.h4
-rw-r--r--drivers/nfc/s3fwrn5/s3fwrn5.h2
-rw-r--r--drivers/nfc/st-nci/ndlc.c4
-rw-r--r--drivers/nfc/st-nci/se.c7
-rw-r--r--drivers/nfc/st21nfca/se.c7
-rw-r--r--drivers/nfc/virtual_ncidev.c2
-rw-r--r--drivers/ntb/msi.c22
-rw-r--r--drivers/nvme/common/auth.c21
-rw-r--r--drivers/nvme/host/Kconfig6
-rw-r--r--drivers/nvme/host/auth.c30
-rw-r--r--drivers/nvme/host/constants.c2
-rw-r--r--drivers/nvme/host/core.c238
-rw-r--r--drivers/nvme/host/fabrics.c2
-rw-r--r--drivers/nvme/host/fabrics.h6
-rw-r--r--drivers/nvme/host/fc.c17
-rw-r--r--drivers/nvme/host/ioctl.c39
-rw-r--r--drivers/nvme/host/multipath.c213
-rw-r--r--drivers/nvme/host/nvme.h36
-rw-r--r--drivers/nvme/host/pci.c310
-rw-r--r--drivers/nvme/host/pr.c2
-rw-r--r--drivers/nvme/host/rdma.c4
-rw-r--r--drivers/nvme/host/sysfs.c35
-rw-r--r--drivers/nvme/host/tcp.c162
-rw-r--r--drivers/nvme/target/Kconfig2
-rw-r--r--drivers/nvme/target/admin-cmd.c33
-rw-r--r--drivers/nvme/target/auth.c21
-rw-r--r--drivers/nvme/target/core.c105
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fabrics-cmd.c12
-rw-r--r--drivers/nvme/target/fc.c98
-rw-r--r--drivers/nvme/target/fcloop.c439
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c11
-rw-r--r--drivers/nvme/target/loop.c29
-rw-r--r--drivers/nvme/target/nvmet.h24
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/nvme/target/pci-epf.c53
-rw-r--r--drivers/nvme/target/rdma.c10
-rw-r--r--drivers/nvme/target/tcp.c102
-rw-r--r--drivers/nvmem/Kconfig26
-rw-r--r--drivers/nvmem/Makefile4
-rw-r--r--drivers/nvmem/apple-spmi-nvmem.c62
-rw-r--r--drivers/nvmem/core.c68
-rw-r--r--drivers/nvmem/max77759-nvmem.c145
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c1
-rw-r--r--drivers/of/device.c31
-rw-r--r--drivers/of/fdt.c34
-rw-r--r--drivers/of/kexec.c42
-rw-r--r--drivers/of/of_reserved_mem.c80
-rw-r--r--drivers/of/unittest.c10
-rw-r--r--drivers/opp/core.c428
-rw-r--r--drivers/opp/cpu.c30
-rw-r--r--drivers/opp/of.c205
-rw-r--r--drivers/opp/opp.h1
-rw-r--r--drivers/parport/ieee1284.c2
-rw-r--r--drivers/pci/Kconfig1
-rw-r--r--drivers/pci/bus.c4
-rw-r--r--drivers/pci/controller/Kconfig11
-rw-r--r--drivers/pci/controller/cadence/Kconfig16
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c40
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c36
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c124
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c12
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h25
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c8
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c4
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c213
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c7
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-debugfs.c252
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c30
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c83
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c29
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h32
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c106
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c4
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c12
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c11
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h2
-rw-r--r--drivers/pci/controller/pci-aardvark.c14
-rw-r--r--drivers/pci/controller/pci-ftpci100.c4
-rw-r--r--drivers/pci/controller/pci-host-common.c30
-rw-r--r--drivers/pci/controller/pci-host-common.h20
-rw-r--r--drivers/pci/controller/pci-host-generic.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c113
-rw-r--r--drivers/pci/controller/pci-mvebu.c32
-rw-r--r--drivers/pci/controller/pci-tegra.c63
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c2
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c1
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c53
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c4
-rw-r--r--drivers/pci/controller/pcie-altera.c2
-rw-r--r--drivers/pci/controller/pcie-apple.c312
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c4
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c6
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c9
-rw-r--r--drivers/pci/controller/pcie-mediatek.c6
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c8
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c10
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c4
-rw-r--r--drivers/pci/controller/pcie-rockchip.h7
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c10
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c14
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c11
-rw-r--r--drivers/pci/controller/pcie-xilinx.c5
-rw-r--r--drivers/pci/controller/plda/pcie-microchip-host.c1
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c16
-rw-r--r--drivers/pci/devres.c215
-rw-r--r--drivers/pci/ecam.c2
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c26
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c26
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c22
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c2
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c73
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_core.c29
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c78
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c2
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c2
-rw-r--r--drivers/pci/iomap.c16
-rw-r--r--drivers/pci/msi/api.c8
-rw-r--r--drivers/pci/msi/irqdomain.c5
-rw-r--r--drivers/pci/msi/msi.c187
-rw-r--r--drivers/pci/msi/msi.h4
-rw-r--r--drivers/pci/of.c44
-rw-r--r--drivers/pci/p2pdma.c38
-rw-r--r--drivers/pci/pci-acpi.c23
-rw-r--r--drivers/pci/pci-driver.c8
-rw-r--r--drivers/pci/pci-sysfs.c4
-rw-r--r--drivers/pci/pci.c93
-rw-r--r--drivers/pci/pci.h82
-rw-r--r--drivers/pci/pcie/aer.c442
-rw-r--r--drivers/pci/pcie/bwctrl.c86
-rw-r--r--drivers/pci/pcie/dpc.c75
-rw-r--r--drivers/pci/pcie/err.c1
-rw-r--r--drivers/pci/pcie/ptm.c300
-rw-r--r--drivers/pci/pcie/tlp.c6
-rw-r--r--drivers/pci/probe.c3
-rw-r--r--drivers/pci/pwrctrl/Kconfig22
-rw-r--r--drivers/pci/pwrctrl/Makefile8
-rw-r--r--drivers/pci/pwrctrl/core.c2
-rw-r--r--drivers/pci/quirks.c33
-rw-r--r--drivers/pci/setup-bus.c16
-rw-r--r--drivers/pci/tph.c44
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.c2
-rw-r--r--drivers/pcmcia/cardbus.c1
-rw-r--r--drivers/pcmcia/electra_cf.c2
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pcmcia/pd6729.c3
-rw-r--r--drivers/pcmcia/soc_common.c2
-rw-r--r--drivers/pcmcia/yenta_socket.c3
-rw-r--r--drivers/perf/Kconfig2
-rw-r--r--drivers/perf/amlogic/meson_ddr_pmu_core.c2
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c3
-rw-r--r--drivers/perf/arm-cmn.c18
-rw-r--r--drivers/perf/arm-ni.c40
-rw-r--r--drivers/perf/arm_pmuv3.c3
-rw-r--r--drivers/perf/arm_v6_pmu.c3
-rw-r--r--drivers/perf/arm_v7_pmu.c3
-rw-r--r--drivers/perf/arm_xscale_pmu.c6
-rw-r--r--drivers/phy/Kconfig8
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c10
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c10
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-pcie.c14
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb2.c10
-rw-r--r--drivers/phy/amlogic/phy-meson-gxl-usb2.c11
-rw-r--r--drivers/phy/amlogic/phy-meson8b-usb2.c35
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c61
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.h1
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c14
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c21
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8mq-usb.c84
-rw-r--r--drivers/phy/freescale/phy-fsl-samsung-hdmi.c117
-rw-r--r--drivers/phy/marvell/Kconfig4
-rw-r--r--drivers/phy/mediatek/phy-mtk-xsphy.c85
-rw-r--r--drivers/phy/phy-can-transceiver.c22
-rw-r--r--drivers/phy/phy-snps-eusb2.c627
-rw-r--r--drivers/phy/qualcomm/Kconfig9
-rw-r--r--drivers/phy/qualcomm/Makefile1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c90
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c27
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-eusb2.c442
-rw-r--r--drivers/phy/qualcomm/phy-qcom-uniphy-pcie-28lp.c45
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c173
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c81
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c294
-rw-r--r--drivers/phy/samsung/Kconfig2
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c617
-rw-r--r--drivers/phy/starfive/phy-jh7110-usb.c7
-rw-r--r--drivers/phy/tegra/Kconfig2
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c46
-rw-r--r--drivers/phy/tegra/xusb.c8
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c70
-rw-r--r--drivers/pinctrl/Kconfig4
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c9
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c6
-rw-r--r--drivers/pinctrl/core.c29
-rw-r--r--drivers/pinctrl/freescale/Kconfig11
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx-scmi.c4
-rw-r--r--drivers/pinctrl/mediatek/Kconfig22
-rw-r--r--drivers/pinctrl/mediatek/Makefile2
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c35
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.h7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c19
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c18
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6893.c879
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8196.c1860
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c9
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c15
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt6893.h2283
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8196.h3085
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c29
-rw-r--r--drivers/pinctrl/meson/Kconfig24
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-a4.c22
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c43
-rw-r--r--drivers/pinctrl/nomadik/Kconfig6
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c12
-rw-r--r--drivers/pinctrl/pinconf.h17
-rw-r--r--drivers/pinctrl/pinctrl-amd.c56
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c30
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c20
-rw-r--r--drivers/pinctrl/pinctrl-at91.c21
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c35
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c17
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c8
-rw-r--r--drivers/pinctrl/pinctrl-keembay.c2
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c8
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c8
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c17
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c8
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c26
-rw-r--r--drivers/pinctrl/pinctrl-scmi.c1
-rw-r--r--drivers/pinctrl/pinctrl-single.c9
-rw-r--r--drivers/pinctrl/pinctrl-st.c5
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c13
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c23
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8064.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8084.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5018.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5332.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5424.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq6018.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8064.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8074.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq9574.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9607.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9615.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c40
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8226.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8660.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8909.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8917.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8953.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8960.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8994.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8996.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8998.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8x74.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcm2290.c80
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs615.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs8300.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdf2xxx.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdu1000.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sa8775p.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sar2130p.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm670.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx55.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx65.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx75.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm4450.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6125.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6350.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6375.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm7150.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8150.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8650.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8750.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c7
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c7
-rw-r--r--drivers/pinctrl/qcom/pinctrl-x1e80100.c1
-rw-r--r--drivers/pinctrl/qcom/tlmm-test.c1
-rw-r--r--drivers/pinctrl/renesas/Kconfig1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c299
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c52
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c294
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h28
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c34
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h8
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.c10
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c7
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c7
-rw-r--r--drivers/pinctrl/uniphier/Kconfig2
-rw-r--r--drivers/platform/arm64/Kconfig2
-rw-r--r--drivers/platform/arm64/acer-aspire1-ec.c10
-rw-r--r--drivers/platform/arm64/huawei-gaokun-ec.c2
-rw-r--r--drivers/platform/chrome/Kconfig5
-rw-r--r--drivers/platform/chrome/Makefile3
-rw-r--r--drivers/platform/chrome/chromeos_of_hw_prober.c33
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c52
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c24
-rw-r--r--drivers/platform/chrome/cros_ec_proto_test_util.h5
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c6
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c24
-rw-r--r--drivers/platform/cznic/Kconfig17
-rw-r--r--drivers/platform/cznic/Makefile3
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-base.c4
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-gpio.c21
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-keyctl.c162
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-trng.c17
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu.h33
-rw-r--r--drivers/platform/cznic/turris-signing-key.c193
-rw-r--r--drivers/platform/loongarch/loongson-laptop.c87
-rw-r--r--drivers/platform/mellanox/Kconfig13
-rw-r--r--drivers/platform/mellanox/Makefile1
-rw-r--r--drivers/platform/mellanox/mlx-platform.c1546
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c155
-rw-r--r--drivers/platform/mellanox/mlxreg-dpu.c613
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c8
-rw-r--r--drivers/platform/mellanox/nvsw-sn2201.c112
-rw-r--r--drivers/platform/surface/Kconfig2
-rw-r--r--drivers/platform/x86/Kconfig40
-rw-r--r--drivers/platform/x86/Makefile12
-rw-r--r--drivers/platform/x86/acerhdf.c4
-rw-r--r--drivers/platform/x86/amd/Kconfig11
-rw-r--r--drivers/platform/x86/amd/Makefile1
-rw-r--r--drivers/platform/x86/amd/amd_isp4.c311
-rw-r--r--drivers/platform/x86/amd/hsmp/Kconfig2
-rw-r--r--drivers/platform/x86/amd/hsmp/Makefile1
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c274
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c41
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.h10
-rw-r--r--drivers/platform/x86/amd/hsmp/hwmon.c121
-rw-r--r--drivers/platform/x86/amd/hsmp/plat.c16
-rw-r--r--drivers/platform/x86/amd/pmc/mp1_stb.c2
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c19
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c4
-rw-r--r--drivers/platform/x86/amd/pmf/core.c5
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c105
-rw-r--r--drivers/platform/x86/asus-wmi.c151
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c10
-rw-r--r--drivers/platform/x86/dasharo-acpi.c360
-rw-r--r--drivers/platform/x86/dell/Kconfig3
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c1099
-rw-r--r--drivers/platform/x86/dell/dell-pc.c67
-rw-r--r--drivers/platform/x86/dell/dell-wmi-ddv.c246
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c2
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c10
-rw-r--r--drivers/platform/x86/eeepc-laptop.c4
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c33
-rw-r--r--drivers/platform/x86/ideapad-laptop.c19
-rw-r--r--drivers/platform/x86/intel/ifs/core.c5
-rw-r--r--drivers/platform/x86/intel/ifs/load.c21
-rw-r--r--drivers/platform/x86/intel/ifs/runtest.c17
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c9
-rw-r--r--drivers/platform/x86/intel/int3472/Makefile3
-rw-r--r--drivers/platform/x86/intel/int3472/clk_and_regulator.c173
-rw-r--r--drivers/platform/x86/intel/int3472/common.c9
-rw-r--r--drivers/platform/x86/intel/int3472/common.h131
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c69
-rw-r--r--drivers/platform/x86/intel/int3472/discrete_quirks.c21
-rw-r--r--drivers/platform/x86/intel/int3472/led.c3
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c3
-rw-r--r--drivers/platform/x86/intel/pmc/Kconfig4
-rw-r--r--drivers/platform/x86/intel/pmc/Makefile8
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c16
-rw-r--r--drivers/platform/x86/intel/pmc/cnp.c7
-rw-r--r--drivers/platform/x86/intel/pmc/core.c250
-rw-r--r--drivers/platform/x86/intel/pmc/core.h29
-rw-r--r--drivers/platform/x86/intel/pmc/core_ssram.c332
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c10
-rw-r--r--drivers/platform/x86/intel/pmc/ssram_telemetry.c207
-rw-r--r--drivers/platform/x86/intel/pmc/ssram_telemetry.h24
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c38
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c15
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c106
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c40
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.h1
-rw-r--r--drivers/platform/x86/intel/turbo_max_3.c5
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c34
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h20
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c58
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c11
-rw-r--r--drivers/platform/x86/intel/vsec.c9
-rw-r--r--drivers/platform/x86/intel_ips.c38
-rw-r--r--drivers/platform/x86/oxpec.c1054
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/portwell-ec.c291
-rw-r--r--drivers/platform/x86/samsung-galaxybook.c1
-rw-r--r--drivers/platform/x86/silicom-platform.c11
-rw-r--r--drivers/platform/x86/sony-laptop.c175
-rw-r--r--drivers/platform/x86/think-lmi.c26
-rw-r--r--drivers/platform/x86/think-lmi.h1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c56
-rw-r--r--drivers/platform/x86/topstar-laptop.c4
-rw-r--r--drivers/platform/x86/tuxedo/Kconfig8
-rw-r--r--drivers/platform/x86/tuxedo/Makefile8
-rw-r--r--drivers/platform/x86/tuxedo/nb04/Kconfig17
-rw-r--r--drivers/platform/x86/tuxedo/nb04/Makefile10
-rw-r--r--drivers/platform/x86/tuxedo/nb04/wmi_ab.c923
-rw-r--r--drivers/platform/x86/tuxedo/nb04/wmi_util.c91
-rw-r--r--drivers/platform/x86/tuxedo/nb04/wmi_util.h109
-rw-r--r--drivers/platform/x86/xo15-ebook.c10
-rw-r--r--drivers/pmdomain/amlogic/meson-ee-pwrc.c78
-rw-r--r--drivers/pmdomain/arm/Kconfig6
-rw-r--r--drivers/pmdomain/bcm/bcm2835-power.c16
-rw-r--r--drivers/pmdomain/core.c133
-rw-r--r--drivers/pmdomain/governor.c2
-rw-r--r--drivers/pmdomain/mediatek/mt6893-pm-domains.h585
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c17
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.h2
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c16
-rw-r--r--drivers/pmdomain/renesas/rcar-gen4-sysc.c5
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.c5
-rw-r--r--drivers/pmdomain/rockchip/pm-domains.c48
-rw-r--r--drivers/pmdomain/sunxi/Kconfig10
-rw-r--r--drivers/pmdomain/sunxi/Makefile1
-rw-r--r--drivers/pmdomain/sunxi/sun50i-h6-prcm-ppu.c208
-rw-r--r--drivers/pmdomain/ti/omap_prm.c8
-rw-r--r--drivers/pnp/quirks.c2
-rw-r--r--drivers/power/reset/Kconfig13
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-reset.c5
-rw-r--r--drivers/power/reset/reboot-mode.c25
-rw-r--r--drivers/power/reset/syscon-reboot.c98
-rw-r--r--drivers/power/reset/tdx-ec-poweroff.c150
-rw-r--r--drivers/power/supply/Kconfig37
-rw-r--r--drivers/power/supply/Makefile3
-rw-r--r--drivers/power/supply/bq24190_charger.c14
-rw-r--r--drivers/power/supply/bq27xxx_battery.c2
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c13
-rw-r--r--drivers/power/supply/chagall-battery.c291
-rw-r--r--drivers/power/supply/collie_battery.c1
-rw-r--r--drivers/power/supply/cros_charge-control.c23
-rw-r--r--drivers/power/supply/gpio-charger.c4
-rw-r--r--drivers/power/supply/huawei-gaokun-battery.c645
-rw-r--r--drivers/power/supply/max17040_battery.c5
-rw-r--r--drivers/power/supply/max77705_charger.c20
-rw-r--r--drivers/power/supply/max8971_charger.c752
-rw-r--r--drivers/power/supply/power_supply_sysfs.c32
-rw-r--r--drivers/power/supply/qcom_pmi8998_charger.c4
-rw-r--r--drivers/power/supply/rk817_charger.c2
-rw-r--r--drivers/power/supply/rt9471.c12
-rw-r--r--drivers/power/supply/test_power.c21
-rw-r--r--drivers/power/supply/wm831x_power.c20
-rw-r--r--drivers/powercap/intel_rapl_common.c1
-rw-r--r--drivers/powercap/intel_rapl_msr.c7
-rw-r--r--drivers/pps/clients/pps-gpio.c2
-rw-r--r--drivers/ptp/Kconfig4
-rw-r--r--drivers/ptp/ptp_chardev.c16
-rw-r--r--drivers/ptp/ptp_clock.c3
-rw-r--r--drivers/ptp/ptp_clockmatrix.c14
-rw-r--r--drivers/ptp/ptp_fc3.c1
-rw-r--r--drivers/ptp/ptp_idt82p33.c15
-rw-r--r--drivers/ptp/ptp_ocp.c28
-rw-r--r--drivers/ptp/ptp_private.h12
-rw-r--r--drivers/pwm/Kconfig113
-rw-r--r--drivers/pwm/Makefile11
-rw-r--r--drivers/pwm/core.c118
-rw-r--r--drivers/pwm/pwm-adp5585.c1
-rw-r--r--drivers/pwm/pwm-axi-pwmgen.c23
-rw-r--r--drivers/pwm/pwm-loongson.c290
-rw-r--r--drivers/pwm/pwm-mc33xs2410.c391
-rw-r--r--drivers/pwm/pwm-meson.c123
-rw-r--r--drivers/pwm/pwm-pca9685.c8
-rw-r--r--drivers/pwm/pwm-pxa.c18
-rw-r--r--drivers/pwm/pwm-rzg2l-gpt.c447
-rw-r--r--drivers/pwm/pwm-stm32-lp.c219
-rw-r--r--drivers/pwm/pwm-stm32.c15
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c20
-rw-r--r--drivers/rapidio/rio.c103
-rw-r--r--drivers/rapidio/rio.h2
-rw-r--r--drivers/rapidio/rio_cm.c9
-rw-r--r--drivers/ras/amd/atl/internal.h4
-rw-r--r--drivers/regulator/Kconfig23
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/adp5055-regulator.c424
-rw-r--r--drivers/regulator/bcm590xx-regulator.c1289
-rw-r--r--drivers/regulator/bd96801-regulator.c455
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/da9121-regulator.c2
-rw-r--r--drivers/regulator/fan53555.c14
-rw-r--r--drivers/regulator/gpio-regulator.c10
-rw-r--r--drivers/regulator/max14577-regulator.c5
-rw-r--r--drivers/regulator/max20086-regulator.c17
-rw-r--r--drivers/regulator/pca9450-regulator.c27
-rw-r--r--drivers/regulator/pf9453-regulator.c3
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c69
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c83
-rw-r--r--drivers/regulator/s5m8767.c146
-rw-r--r--drivers/regulator/tps65219-regulator.c242
-rw-r--r--drivers/remoteproc/Makefile6
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c98
-rw-r--r--drivers/remoteproc/qcom_wcnss.c3
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c7
-rw-r--r--drivers/remoteproc/stm32_rproc.c8
-rw-r--r--drivers/remoteproc/ti_k3_common.c551
-rw-r--r--drivers/remoteproc/ti_k3_common.h118
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c616
-rw-r--r--drivers/remoteproc/ti_k3_m4_remoteproc.c583
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c1018
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c34
-rw-r--r--drivers/reset/Kconfig17
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/reset-rzv2h-usb2phy.c236
-rw-r--r--drivers/reset/reset-th1520.c135
-rw-r--r--drivers/rpmsg/qcom_smd.c10
-rw-r--r--drivers/rpmsg/rpmsg_core.c63
-rw-r--r--drivers/rpmsg/rpmsg_internal.h6
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c24
-rw-r--r--drivers/rtc/Kconfig25
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/class.c2
-rw-r--r--drivers/rtc/dev.c2
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/lib.c24
-rw-r--r--drivers/rtc/lib_test.c27
-rw-r--r--drivers/rtc/rtc-at91rm9200.c1
-rw-r--r--drivers/rtc/rtc-cpcap.c1
-rw-r--r--drivers/rtc/rtc-cv1800.c218
-rw-r--r--drivers/rtc/rtc-da9063.c31
-rw-r--r--drivers/rtc/rtc-jz4740.c1
-rw-r--r--drivers/rtc/rtc-loongson.c8
-rw-r--r--drivers/rtc/rtc-m41t80.c78
-rw-r--r--drivers/rtc/rtc-mt6397.c1
-rw-r--r--drivers/rtc/rtc-pcf8563.c2
-rw-r--r--drivers/rtc/rtc-pm8xxx.c18
-rw-r--r--drivers/rtc/rtc-rzn1.c71
-rw-r--r--drivers/rtc/rtc-s32g.c385
-rw-r--r--drivers/rtc/rtc-s3c.c1
-rw-r--r--drivers/rtc/rtc-sh.c285
-rw-r--r--drivers/rtc/rtc-stm32.c1
-rw-r--r--drivers/rtc/rtc-test.c2
-rw-r--r--drivers/s390/block/dasd.c4
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/s390/char/con3270.c19
-rw-r--r--drivers/s390/char/diag_ftp.c2
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/tape_std.c3
-rw-r--r--drivers/s390/char/vmlogrdr.c4
-rw-r--r--drivers/s390/cio/device_fsm.c2
-rw-r--r--drivers/s390/cio/eadm_sch.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c76
-rw-r--r--drivers/s390/crypto/ap_bus.h30
-rw-r--r--drivers/s390/crypto/pkey_api.c50
-rw-r--r--drivers/s390/crypto/pkey_base.c34
-rw-r--r--drivers/s390/crypto/pkey_base.h37
-rw-r--r--drivers/s390/crypto/pkey_cca.c136
-rw-r--r--drivers/s390/crypto/pkey_ep11.c117
-rw-r--r--drivers/s390/crypto/pkey_pckmo.c9
-rw-r--r--drivers/s390/crypto/pkey_sysfs.c4
-rw-r--r--drivers/s390/crypto/pkey_uv.c44
-rw-r--r--drivers/s390/crypto/zcrypt_api.c167
-rw-r--r--drivers/s390/crypto/zcrypt_api.h16
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c486
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h49
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c39
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c454
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.h27
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c36
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c109
-rw-r--r--drivers/s390/net/ctcm_mpc.c2
-rw-r--r--drivers/s390/net/fsm.c2
-rw-r--r--drivers/s390/net/ism_drv.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c3
-rw-r--r--drivers/s390/scsi/zfcp_aux.c14
-rw-r--r--drivers/s390/scsi/zfcp_erp.c4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c3
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c2
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/aacraid/aacraid.h1
-rw-r--r--drivers/scsi/aacraid/commsup.c10
-rw-r--r--drivers/scsi/aha152x.c1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c4
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c6
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c4
-rw-r--r--drivers/scsi/bfa/bfad.c3
-rw-r--r--drivers/scsi/bnx2fc/Kconfig1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c4
-rw-r--r--drivers/scsi/bnx2i/Kconfig1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/dc395x.c699
-rw-r--r--drivers/scsi/elx/efct/efct_hw.c5
-rw-r--r--drivers/scsi/elx/efct/efct_xport.c2
-rw-r--r--drivers/scsi/elx/libefc/efc_els.c2
-rw-r--r--drivers/scsi/elx/libefc/efc_fabric.c2
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c6
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c2
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c2
-rw-r--r--drivers/scsi/fnic/fdls_disc.c8
-rw-r--r--drivers/scsi/fnic/fip.c16
-rw-r--r--drivers/scsi/fnic/fnic_main.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h51
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c83
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c259
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c3
-rw-r--r--drivers/scsi/imm.c1
-rw-r--r--drivers/scsi/ipr.c8
-rw-r--r--drivers/scsi/isci/host.c6
-rw-r--r--drivers/scsi/isci/phy.c2
-rw-r--r--drivers/scsi/isci/port.c2
-rw-r--r--drivers/scsi/isci/port_config.c4
-rw-r--r--drivers/scsi/isci/remote_device.c30
-rw-r--r--drivers/scsi/isci/remote_device.h15
-rw-r--r--drivers/scsi/libfc/fc_fcp.c4
-rw-r--r--drivers/scsi/libiscsi.c5
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c136
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c36
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c73
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c3
-rw-r--r--drivers/scsi/mvsas/mv_64xx.h4
-rw-r--r--drivers/scsi/mvsas/mv_defs.h4
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c2
-rw-r--r--drivers/scsi/pmcraid.c6
-rw-r--r--drivers/scsi/ppa.c1
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qedi/qedi_dbg.c22
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h12
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c8
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c53
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c129
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_debug.c361
-rw-r--r--drivers/scsi/scsi_devinfo.c29
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_ioctl.c2
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c11
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sd_zbc.c6
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c143
-rw-r--r--drivers/scsi/storvsc_drv.c11
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2
-rw-r--r--drivers/sh/intc/irqdomain.c5
-rw-r--r--drivers/soc/Kconfig2
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c461
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c17
-rw-r--r--drivers/soc/dove/pmu.c7
-rw-r--r--drivers/soc/fsl/Kconfig2
-rw-r--r--drivers/soc/fsl/qbman/qman.c2
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c19
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c42
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.h2
-rw-r--r--drivers/soc/imx/soc-imx8m.c177
-rw-r--r--drivers/soc/mediatek/mtk-dvfsrc.c53
-rw-r--r--drivers/soc/qcom/ice.c350
-rw-r--r--drivers/soc/qcom/llcc-qcom.c497
-rw-r--r--drivers/soc/qcom/pmic_glink.c4
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c30
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c11
-rw-r--r--drivers/soc/qcom/smem.c2
-rw-r--r--drivers/soc/qcom/smp2p.c4
-rw-r--r--drivers/soc/qcom/smsm.c2
-rw-r--r--drivers/soc/qcom/socinfo.c1
-rw-r--r--drivers/soc/renesas/Kconfig53
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/r9a09g056-sys.c75
-rw-r--r--drivers/soc/renesas/rz-sysc.c3
-rw-r--r--drivers/soc/renesas/rz-sysc.h1
-rw-r--r--drivers/soc/samsung/exynos-pmu.c78
-rw-r--r--drivers/soc/samsung/exynos-pmu.h1
-rw-r--r--drivers/soc/samsung/exynos-usi.c2
-rw-r--r--drivers/soc/sophgo/Kconfig34
-rw-r--r--drivers/soc/sophgo/Makefile4
-rw-r--r--drivers/soc/sophgo/cv1800-rtcsys.c63
-rw-r--r--drivers/soc/sophgo/sg2044-topsys.c45
-rw-r--r--drivers/soc/tegra/pmc.c5
-rw-r--r--drivers/soc/ti/k3-ringacc.c2
-rw-r--r--drivers/soc/ti/k3-socinfo.c2
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c3
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c10
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c8
-rw-r--r--drivers/soc/vt8500/Kconfig19
-rw-r--r--drivers/soc/vt8500/Makefile2
-rw-r--r--drivers/soc/vt8500/wmt-socinfo.c125
-rw-r--r--drivers/soundwire/bus.c40
-rw-r--r--drivers/soundwire/bus_type.c10
-rw-r--r--drivers/soundwire/generic_bandwidth_allocation.c7
-rw-r--r--drivers/soundwire/intel.h2
-rw-r--r--drivers/soundwire/intel_ace2x_debugfs.c6
-rw-r--r--drivers/soundwire/intel_init.c1
-rw-r--r--drivers/soundwire/irq.c6
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/atmel-quadspi.c26
-rw-r--r--drivers/spi/spi-amd-pci.c70
-rw-r--r--drivers/spi/spi-amd.c227
-rw-r--r--drivers/spi/spi-amd.h44
-rw-r--r--drivers/spi/spi-axi-spi-engine.c91
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c2
-rw-r--r--drivers/spi/spi-bcm63xx.c2
-rw-r--r--drivers/spi/spi-cadence-quadspi.c2
-rw-r--r--drivers/spi/spi-cavium-thunderx.c4
-rw-r--r--drivers/spi/spi-cs42l43.c4
-rw-r--r--drivers/spi/spi-dw-core.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c46
-rw-r--r--drivers/spi/spi-fsl-qspi.c81
-rw-r--r--drivers/spi/spi-gpio.c2
-rw-r--r--drivers/spi/spi-intel-pci.c8
-rw-r--r--drivers/spi/spi-intel-platform.c9
-rw-r--r--drivers/spi/spi-intel.c9
-rw-r--r--drivers/spi/spi-intel.h4
-rw-r--r--drivers/spi/spi-loongson-core.c1
-rw-r--r--drivers/spi/spi-loopback-test.c10
-rw-r--r--drivers/spi/spi-meson-spicc.c241
-rw-r--r--drivers/spi/spi-nxp-fspi.c189
-rw-r--r--drivers/spi/spi-offload.c7
-rw-r--r--drivers/spi/spi-omap2-mcspi.c30
-rw-r--r--drivers/spi/spi-pci1xxxx.c70
-rw-r--r--drivers/spi/spi-qpic-snand.c181
-rw-r--r--drivers/spi/spi-rpc-if.c16
-rw-r--r--drivers/spi/spi-sh-msiof.c397
-rw-r--r--drivers/spi/spi-stm32-ospi.c24
-rw-r--r--drivers/spi/spi-sun4i.c5
-rw-r--r--drivers/spi/spi-tegra114.c6
-rw-r--r--drivers/spi/spi-tegra210-quad.c290
-rw-r--r--drivers/spi/spi-xcomm.c8
-rw-r--r--drivers/spi/spi.c19
-rw-r--r--drivers/spmi/Kconfig9
-rw-r--r--drivers/spmi/Makefile1
-rw-r--r--drivers/spmi/spmi-apple-controller.c168
-rw-r--r--drivers/spmi/spmi-pmic-arb.c2
-rw-r--r--drivers/ssb/driver_gpio.c8
-rw-r--r--drivers/staging/fbtft/Kconfig35
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.c86
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.h28
-rw-r--r--drivers/staging/gpib/agilent_82357a/agilent_82357a.c79
-rw-r--r--drivers/staging/gpib/agilent_82357a/agilent_82357a.h4
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.c74
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.h26
-rw-r--r--drivers/staging/gpib/cec/cec_gpib.c26
-rw-r--r--drivers/staging/gpib/common/gpib_os.c321
-rw-r--r--drivers/staging/gpib/common/iblib.c64
-rw-r--r--drivers/staging/gpib/common/ibsys.h13
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.c107
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.h29
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.c144
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.h6
-rw-r--r--drivers/staging/gpib/gpio/gpib_bitbang.c142
-rw-r--r--drivers/staging/gpib/hp_82335/hp82335.c35
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.c52
-rw-r--r--drivers/staging/gpib/include/gpibP.h14
-rw-r--r--drivers/staging/gpib/include/gpib_proto.h31
-rw-r--r--drivers/staging/gpib/include/gpib_types.h162
-rw-r--r--drivers/staging/gpib/include/nec7210.h40
-rw-r--r--drivers/staging/gpib/include/nec7210_registers.h3
-rw-r--r--drivers/staging/gpib/include/tms9914.h37
-rw-r--r--drivers/staging/gpib/ines/ines.h43
-rw-r--r--drivers/staging/gpib/ines/ines_gpib.c216
-rw-r--r--drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c145
-rw-r--r--drivers/staging/gpib/nec7210/nec7210.c72
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.c73
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.h32
-rw-r--r--drivers/staging/gpib/pc2/pc2_gpib.c46
-rw-r--r--drivers/staging/gpib/tms9914/tms9914.c69
-rw-r--r--drivers/staging/gpib/tnt4882/tnt4882_gpib.c106
-rw-r--r--drivers/staging/gpib/uapi/gpib.h (renamed from drivers/staging/gpib/uapi/gpib_user.h)226
-rw-r--r--drivers/staging/gpib/uapi/gpib_ioctl.h134
-rw-r--r--drivers/staging/greybus/camera.c4
-rw-r--r--drivers/staging/greybus/fw-management.c48
-rw-r--r--drivers/staging/greybus/gpio.c16
-rw-r--r--drivers/staging/iio/accel/adis16203.c2
-rw-r--r--drivers/staging/iio/adc/ad7816.c2
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c2
-rw-r--r--drivers/staging/iio/addac/adt7316.c2
-rw-r--r--drivers/staging/iio/frequency/ad9832.c102
-rw-r--r--drivers/staging/iio/frequency/ad9832.h1
-rw-r--r--drivers/staging/iio/frequency/ad9834.c4
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c44
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig12
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile1
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c1612
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.h1768
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c73
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h244
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c92
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c60
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h2
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h2
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c3
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c2
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c2
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-h264.c64
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c239
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.h18
-rw-r--r--drivers/staging/media/starfive/camss/stf-isp.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_btcoex.c12
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c11
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c5
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c18
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c13
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c18
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c68
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c22
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c11
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_CfoTracking.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c81
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c33
-rw-r--r--drivers/staging/rtl8723bs/include/hal_pwr_seq.h2
-rw-r--r--drivers/staging/rtl8723bs/include/sta_info.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c13
-rw-r--r--drivers/staging/sm750fb/Makefile3
-rw-r--r--drivers/staging/sm750fb/TODO3
-rw-r--r--drivers/staging/sm750fb/ddk750.h3
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c1
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c62
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.h57
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.c247
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.h12
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h8
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c408
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.h174
-rw-r--r--drivers/staging/sm750fb/sm750.c24
-rw-r--r--drivers/staging/sm750fb/sm750.h30
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.c14
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.h12
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c38
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c6
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c14
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c7
-rw-r--r--drivers/target/target_core_configfs.c20
-rw-r--r--drivers/target/target_core_device.c89
-rw-r--r--drivers/target/target_core_pr.c4
-rw-r--r--drivers/target/target_core_spc.c134
-rw-r--r--drivers/target/target_core_stat.c69
-rw-r--r--drivers/target/target_core_transport.c119
-rw-r--r--drivers/target/target_core_user.c4
-rw-r--r--drivers/tee/amdtee/core.c16
-rw-r--r--drivers/tee/optee/smc_abi.c3
-rw-r--r--drivers/tee/tee_core.c11
-rw-r--r--drivers/thermal/Kconfig11
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/airoha_thermal.c489
-rw-r--r--drivers/thermal/amlogic_thermal.c16
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/Makefile1
-rw-r--r--drivers/thermal/intel/int340x_thermal/platform_temperature_control.c243
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c18
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h3
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c5
-rw-r--r--drivers/thermal/intel/intel_hfi.c14
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c4
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c5
-rw-r--r--drivers/thermal/intel/therm_throt.c10
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c2
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c18
-rw-r--r--drivers/thermal/qcom/lmh.c3
-rw-r--r--drivers/thermal/qcom/tsens-v1.c62
-rw-r--r--drivers/thermal/qcom/tsens.c27
-rw-r--r--drivers/thermal/qcom/tsens.h4
-rw-r--r--drivers/thermal/tegra/soctherm.c2
-rw-r--r--drivers/thunderbolt/ctl.c5
-rw-r--r--drivers/thunderbolt/domain.c2
-rw-r--r--drivers/thunderbolt/icm.c36
-rw-r--r--drivers/thunderbolt/switch.c1
-rw-r--r--drivers/thunderbolt/tb.c22
-rw-r--r--drivers/thunderbolt/tb.h14
-rw-r--r--drivers/thunderbolt/tb_msgs.h1
-rw-r--r--drivers/thunderbolt/tunnel.c92
-rw-r--r--drivers/thunderbolt/tunnel.h23
-rw-r--r--drivers/thunderbolt/usb4.c18
-rw-r--r--drivers/thunderbolt/usb4_port.c56
-rw-r--r--drivers/tty/ipwireless/hardware.c2
-rw-r--r--drivers/tty/mips_ejtag_fdc.c3
-rw-r--r--drivers/tty/mxser.c4
-rw-r--r--drivers/tty/n_gsm.c8
-rw-r--r--drivers/tty/serdev/core.c8
-rw-r--r--drivers/tty/serial/8250/8250.h6
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c3
-rw-r--r--drivers/tty/serial/8250/8250_core.c6
-rw-r--r--drivers/tty/serial/8250/8250_early.c2
-rw-r--r--drivers/tty/serial/8250/8250_ni.c89
-rw-r--r--drivers/tty/serial/8250/8250_of.c15
-rw-r--r--drivers/tty/serial/8250/8250_omap.c25
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c10
-rw-r--r--drivers/tty/serial/8250/8250_port.c16
-rw-r--r--drivers/tty/serial/8250/8250_rsa.c2
-rw-r--r--drivers/tty/serial/8250/Kconfig2
-rw-r--r--drivers/tty/serial/altera_uart.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c4
-rw-r--r--drivers/tty/serial/atmel_serial.c4
-rw-r--r--drivers/tty/serial/fsl_lpuart.c7
-rw-r--r--drivers/tty/serial/imx.c2
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c1
-rw-r--r--drivers/tty/serial/lantiq.c4
-rw-r--r--drivers/tty/serial/liteuart.c2
-rw-r--r--drivers/tty/serial/max3100.c5
-rw-r--r--drivers/tty/serial/max310x.c7
-rw-r--r--drivers/tty/serial/milbeaut_usio.c5
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c25
-rw-r--r--drivers/tty/serial/sa1100.c2
-rw-r--r--drivers/tty/serial/samsung_tty.c6
-rw-r--r--drivers/tty/serial/sc16is7xx.c7
-rw-r--r--drivers/tty/serial/sccnxp.c2
-rw-r--r--drivers/tty/serial/serial_core.c97
-rw-r--r--drivers/tty/serial/sh-sci-common.h167
-rw-r--r--drivers/tty/serial/sh-sci.c632
-rw-r--r--drivers/tty/serial/sh-sci.h2
-rw-r--r--drivers/tty/serial/sifive.c88
-rw-r--r--drivers/tty/serial/tegra-utc.c2
-rw-r--r--drivers/tty/serial/uartlite.c25
-rw-r--r--drivers/tty/synclink_gt.c4
-rw-r--r--drivers/tty/sysrq.c3
-rw-r--r--drivers/tty/tty_io.c96
-rw-r--r--drivers/tty/tty_ioctl.c50
-rw-r--r--drivers/tty/tty_port.c20
-rw-r--r--drivers/tty/vcc.c4
-rw-r--r--drivers/tty/vt/.gitignore3
-rw-r--r--drivers/tty/vt/Makefile34
-rw-r--r--drivers/tty/vt/consolemap.c2
-rwxr-xr-xdrivers/tty/vt/gen_ucs_fallback_table.py360
-rwxr-xr-xdrivers/tty/vt/gen_ucs_recompose_table.py257
-rwxr-xr-xdrivers/tty/vt/gen_ucs_width_table.py307
-rw-r--r--drivers/tty/vt/keyboard.c37
-rw-r--r--drivers/tty/vt/selection.c31
-rw-r--r--drivers/tty/vt/ucs.c251
-rw-r--r--drivers/tty/vt/ucs_fallback_table.h_shipped3346
-rw-r--r--drivers/tty/vt/ucs_recompose_table.h_shipped102
-rw-r--r--drivers/tty/vt/ucs_width_table.h_shipped453
-rw-r--r--drivers/tty/vt/vt.c244
-rw-r--r--drivers/tty/vt/vt_ioctl.c18
-rw-r--r--drivers/ufs/core/ufs-mcq.c6
-rw-r--r--drivers/ufs/core/ufs-sysfs.c133
-rw-r--r--drivers/ufs/core/ufshcd.c113
-rw-r--r--drivers/ufs/host/ufs-qcom.c403
-rw-r--r--drivers/ufs/host/ufs-qcom.h11
-rw-r--r--drivers/uio/uio_hv_generic.c39
-rw-r--r--drivers/usb/atm/cxacru.c4
-rw-r--r--drivers/usb/atm/speedtch.c10
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/usb/cdns3/cdns3-plat.c2
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c21
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.h4
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c37
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c77
-rw-r--r--drivers/usb/class/cdc-wdm.c23
-rw-r--r--drivers/usb/class/usbtmc.c21
-rw-r--r--drivers/usb/common/usb-conn-gpio.c25
-rw-r--r--drivers/usb/core/config.c2
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hub.c92
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/usb-acpi.c2
-rw-r--r--drivers/usb/core/usb.c14
-rw-r--r--drivers/usb/dwc2/gadget.c8
-rw-r--r--drivers/usb/dwc2/hcd.c2
-rw-r--r--drivers/usb/dwc2/hcd_queue.c2
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/core.c197
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c18
-rw-r--r--drivers/usb/dwc3/dwc3-qcom-legacy.c935
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c284
-rw-r--r--drivers/usb/dwc3/glue.h36
-rw-r--r--drivers/usb/dwc3/host.c3
-rw-r--r--drivers/usb/gadget/epautoconf.c2
-rw-r--r--drivers/usb/gadget/function/f_hid.c150
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.h2
-rw-r--r--drivers/usb/gadget/function/f_midi2.c2
-rw-r--r--drivers/usb/gadget/function/f_serial.c7
-rw-r--r--drivers/usb/gadget/function/f_tcm.c4
-rw-r--r--drivers/usb/gadget/function/u_hid.h2
-rw-r--r--drivers/usb/gadget/function/u_serial.c50
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.h4
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/udc/Kconfig44
-rw-r--r--drivers/usb/gadget/udc/Makefile5
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c2
-rw-r--r--drivers/usb/gadget/udc/core.c2
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c3
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c1516
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.h675
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c2
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c2
-rw-r--r--drivers/usb/gadget/udc/mv_u3d.h317
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c2062
-rw-r--r--drivers/usb/gadget/udc/mv_udc.h309
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2426
-rw-r--r--drivers/usb/gadget/udc/net2272.c2723
-rw-r--r--drivers/usb/gadget/udc/net2272.h584
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c2
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c6
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c2
-rw-r--r--drivers/usb/host/Kconfig11
-rw-r--r--drivers/usb/host/Makefile4
-rw-r--r--drivers/usb/host/ehci-fsl.c25
-rw-r--r--drivers/usb/host/ehci-platform.c3
-rw-r--r--drivers/usb/host/ohci-hcd.c3
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c2
-rw-r--r--drivers/usb/host/r8a66597-hcd.c7
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/uhci-q.c2
-rw-r--r--drivers/usb/host/xen-hcd.c2
-rw-r--r--drivers/usb/host/xhci-caps.h4
-rw-r--r--drivers/usb/host/xhci-debugfs.c108
-rw-r--r--drivers/usb/host/xhci-hub.c2
-rw-r--r--drivers/usb/host/xhci-mem.c245
-rw-r--r--drivers/usb/host/xhci-plat.c2
-rw-r--r--drivers/usb/host/xhci-ring.c74
-rw-r--r--drivers/usb/host/xhci-sideband.c457
-rw-r--r--drivers/usb/host/xhci.c211
-rw-r--r--drivers/usb/host/xhci.h112
-rw-r--r--drivers/usb/isp1760/isp1760-udc.c2
-rw-r--r--drivers/usb/misc/onboard_usb_dev.c113
-rw-r--r--drivers/usb/misc/onboard_usb_dev.h19
-rw-r--r--drivers/usb/misc/usbtest.c4
-rw-r--r--drivers/usb/musb/da8xx.c3
-rw-r--r--drivers/usb/musb/mpfs.c3
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_dsps.c2
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/phy/Kconfig12
-rw-r--r--drivers/usb/phy/Makefile1
-rw-r--r--drivers/usb/phy/phy-mv-usb.c881
-rw-r--r--drivers/usb/renesas_usbhs/common.c54
-rw-r--r--drivers/usb/serial/bus.c2
-rw-r--r--drivers/usb/serial/garmin_gps.c3
-rw-r--r--drivers/usb/serial/mos7840.c4
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c5
-rw-r--r--drivers/usb/storage/realtek_cr.c3
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/storage/usb.c20
-rw-r--r--drivers/usb/typec/altmodes/displayport.c4
-rw-r--r--drivers/usb/typec/bus.c2
-rw-r--r--drivers/usb/typec/mux.c4
-rw-r--r--drivers/usb/typec/mux/fsa4480.c5
-rw-r--r--drivers/usb/typec/port-mapper.c23
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c5
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim_core.c8
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c194
-rw-r--r--drivers/usb/typec/tipd/core.c2
-rw-r--r--drivers/usb/typec/tipd/tps6598x.h2
-rw-r--r--drivers/usb/typec/tipd/trace.h2
-rw-r--r--drivers/usb/typec/ucsi/Kconfig11
-rw-r--r--drivers/usb/typec/ucsi/Makefile1
-rw-r--r--drivers/usb/typec/ucsi/debugfs.c4
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h4
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c526
-rw-r--r--drivers/usb/usbip/vudc_transfer.c2
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_main.c17
-rw-r--r--drivers/vfio/pci/Kconfig2
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c121
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h14
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c371
-rw-r--r--drivers/vfio/pci/mlx5/cmd.h35
-rw-r--r--drivers/vfio/pci/mlx5/main.c87
-rw-r--r--drivers/vfio/vfio_iommu_type1.c51
-rw-r--r--drivers/vhost/net.c30
-rw-r--r--drivers/vhost/scsi.c190
-rw-r--r--drivers/vhost/vhost.c28
-rw-r--r--drivers/vhost/vringh.c19
-rw-r--r--drivers/video/backlight/backlight.c93
-rw-r--r--drivers/video/backlight/lcd.c108
-rw-r--r--drivers/video/backlight/qcom-wled.c6
-rw-r--r--drivers/video/console/dummycon.c18
-rw-r--r--drivers/video/console/vgacon.c2
-rw-r--r--drivers/video/fbdev/arkfb.c5
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c2
-rw-r--r--drivers/video/fbdev/carminefb.c8
-rw-r--r--drivers/video/fbdev/carminefb.h2
-rw-r--r--drivers/video/fbdev/core/fb_backlight.c12
-rw-r--r--drivers/video/fbdev/core/fb_info.c1
-rw-r--r--drivers/video/fbdev/core/fbcon.c7
-rw-r--r--drivers/video/fbdev/core/fbcvt.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c104
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c8
-rw-r--r--drivers/video/fbdev/geode/display_gx.c1
-rw-r--r--drivers/video/fbdev/geode/gxfb_core.c3
-rw-r--r--drivers/video/fbdev/geode/lxfb_ops.c23
-rw-r--r--drivers/video/fbdev/geode/suspend_gx.c10
-rw-r--r--drivers/video/fbdev/geode/video_gx.c16
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c2
-rw-r--r--drivers/video/fbdev/via/via-gpio.c10
-rw-r--r--drivers/video/screen_info_generic.c36
-rw-r--r--drivers/video/screen_info_pci.c79
-rw-r--r--drivers/virt/acrn/irqfd.c2
-rw-r--r--drivers/virt/coco/Kconfig6
-rw-r--r--drivers/virt/coco/Makefile2
-rw-r--r--drivers/virt/coco/arm-cca-guest/arm-cca-guest.c8
-rw-r--r--drivers/virt/coco/guest/Kconfig17
-rw-r--r--drivers/virt/coco/guest/Makefile4
-rw-r--r--drivers/virt/coco/guest/report.c (renamed from drivers/virt/coco/tsm.c)63
-rw-r--r--drivers/virt/coco/guest/tsm-mr.c251
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c12
-rw-r--r--drivers/virt/coco/tdx-guest/Kconfig1
-rw-r--r--drivers/virt/coco/tdx-guest/tdx-guest.c259
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c2
-rw-r--r--drivers/virtio/Kconfig64
-rw-r--r--drivers/virtio/Makefile5
-rw-r--r--drivers/virtio/virtio_pci_modern.c13
-rw-r--r--drivers/virtio/virtio_rtc_arm.c23
-rw-r--r--drivers/virtio/virtio_rtc_class.c262
-rw-r--r--drivers/virtio/virtio_rtc_driver.c1407
-rw-r--r--drivers/virtio/virtio_rtc_internal.h122
-rw-r--r--drivers/virtio/virtio_rtc_ptp.c347
-rw-r--r--drivers/w1/slaves/w1_ds2406.c12
-rw-r--r--drivers/w1/w1_netlink.c42
-rw-r--r--drivers/watchdog/Kconfig26
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/apple_wdt.c7
-rw-r--r--drivers/watchdog/arm_smc_wdt.c17
-rw-r--r--drivers/watchdog/at91sam9_wdt.c2
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c2
-rw-r--r--drivers/watchdog/cros_ec_wdt.c30
-rw-r--r--drivers/watchdog/da9052_wdt.c27
-rw-r--r--drivers/watchdog/diag288_wdt.c53
-rw-r--r--drivers/watchdog/exar_wdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c25
-rw-r--r--drivers/watchdog/intel_oc_wdt.c233
-rw-r--r--drivers/watchdog/lenovo_se30_wdt.c2
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c3
-rw-r--r--drivers/watchdog/pcwd_usb.c6
-rw-r--r--drivers/watchdog/pretimeout_noop.c2
-rw-r--r--drivers/watchdog/pretimeout_panic.c2
-rw-r--r--drivers/watchdog/qcom-wdt.c7
-rw-r--r--drivers/watchdog/s32g_wdt.c315
-rw-r--r--drivers/watchdog/s3c2410_wdt.c39
-rw-r--r--drivers/watchdog/shwdt.c2
-rw-r--r--drivers/watchdog/stm32_iwdg.c2
-rw-r--r--drivers/watchdog/wdt_pci.c2
-rw-r--r--drivers/xen/balloon.c13
-rw-r--r--drivers/xen/swiotlb-xen.c1
6335 files changed, 264311 insertions, 119834 deletions
diff --git a/drivers/accel/amdxdna/TODO b/drivers/accel/amdxdna/TODO
index 5119bccd1917..ad8ac6e315b6 100644
--- a/drivers/accel/amdxdna/TODO
+++ b/drivers/accel/amdxdna/TODO
@@ -1,3 +1,2 @@
-- Add import and export BO support
- Add debugfs support
- Add debug BO support
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index 00d215ac866e..e04549f64d69 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -758,27 +758,42 @@ int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *bu
static int aie2_populate_range(struct amdxdna_gem_obj *abo)
{
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
- struct mm_struct *mm = abo->mem.notifier.mm;
- struct hmm_range range = { 0 };
+ struct amdxdna_umap *mapp;
unsigned long timeout;
+ struct mm_struct *mm;
+ bool found;
int ret;
- XDNA_INFO_ONCE(xdna, "populate memory range %llx size %lx",
- abo->mem.userptr, abo->mem.size);
- range.notifier = &abo->mem.notifier;
- range.start = abo->mem.userptr;
- range.end = abo->mem.userptr + abo->mem.size;
- range.hmm_pfns = abo->mem.pfns;
- range.default_flags = HMM_PFN_REQ_FAULT;
+ timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+again:
+ found = false;
+ down_write(&xdna->notifier_lock);
+ list_for_each_entry(mapp, &abo->mem.umap_list, node) {
+ if (mapp->invalid) {
+ found = true;
+ break;
+ }
+ }
- if (!mmget_not_zero(mm))
+ if (!found) {
+ abo->mem.map_invalid = false;
+ up_write(&xdna->notifier_lock);
+ return 0;
+ }
+ kref_get(&mapp->refcnt);
+ up_write(&xdna->notifier_lock);
+
+ XDNA_DBG(xdna, "populate memory range %lx %lx",
+ mapp->vma->vm_start, mapp->vma->vm_end);
+ mm = mapp->notifier.mm;
+ if (!mmget_not_zero(mm)) {
+ amdxdna_umap_put(mapp);
return -EFAULT;
+ }
- timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
-again:
- range.notifier_seq = mmu_interval_read_begin(&abo->mem.notifier);
+ mapp->range.notifier_seq = mmu_interval_read_begin(&mapp->notifier);
mmap_read_lock(mm);
- ret = hmm_range_fault(&range);
+ ret = hmm_range_fault(&mapp->range);
mmap_read_unlock(mm);
if (ret) {
if (time_after(jiffies, timeout)) {
@@ -786,21 +801,27 @@ again:
goto put_mm;
}
- if (ret == -EBUSY)
+ if (ret == -EBUSY) {
+ amdxdna_umap_put(mapp);
goto again;
+ }
goto put_mm;
}
- down_read(&xdna->notifier_lock);
- if (mmu_interval_read_retry(&abo->mem.notifier, range.notifier_seq)) {
- up_read(&xdna->notifier_lock);
+ down_write(&xdna->notifier_lock);
+ if (mmu_interval_read_retry(&mapp->notifier, mapp->range.notifier_seq)) {
+ up_write(&xdna->notifier_lock);
+ amdxdna_umap_put(mapp);
goto again;
}
- abo->mem.map_invalid = false;
- up_read(&xdna->notifier_lock);
+ mapp->invalid = false;
+ up_write(&xdna->notifier_lock);
+ amdxdna_umap_put(mapp);
+ goto again;
put_mm:
+ amdxdna_umap_put(mapp);
mmput(mm);
return ret;
}
@@ -908,10 +929,6 @@ void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
struct drm_gem_object *gobj = to_gobj(abo);
long ret;
- down_write(&xdna->notifier_lock);
- abo->mem.map_invalid = true;
- mmu_interval_set_seq(&abo->mem.notifier, cur_seq);
- up_write(&xdna->notifier_lock);
ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
true, MAX_SCHEDULE_TIMEOUT);
if (!ret || ret == -ERESTARTSYS)
diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
index bf4219e32cc1..82412eec9a4b 100644
--- a/drivers/accel/amdxdna/aie2_message.c
+++ b/drivers/accel/amdxdna/aie2_message.c
@@ -525,7 +525,7 @@ aie2_cmdlist_fill_one_slot_cf(void *cmd_buf, u32 offset,
if (!payload)
return -EINVAL;
- if (!slot_cf_has_space(offset, payload_len))
+ if (!slot_has_space(*buf, offset, payload_len))
return -ENOSPC;
buf->cu_idx = cu_idx;
@@ -558,7 +558,7 @@ aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset,
if (payload_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE)
return -EINVAL;
- if (!slot_dpu_has_space(offset, arg_sz))
+ if (!slot_has_space(*buf, offset, arg_sz))
return -ENOSPC;
buf->inst_buf_addr = sn->buffer;
@@ -569,7 +569,7 @@ aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset,
memcpy(buf->args, sn->prop_args, arg_sz);
/* Accurate buf size to hint firmware to do necessary copy */
- *size += sizeof(*buf) + arg_sz;
+ *size = sizeof(*buf) + arg_sz;
return 0;
}
diff --git a/drivers/accel/amdxdna/aie2_msg_priv.h b/drivers/accel/amdxdna/aie2_msg_priv.h
index 4e02e744b470..6df9065b13f6 100644
--- a/drivers/accel/amdxdna/aie2_msg_priv.h
+++ b/drivers/accel/amdxdna/aie2_msg_priv.h
@@ -319,18 +319,16 @@ struct async_event_msg_resp {
} __packed;
#define MAX_CHAIN_CMDBUF_SIZE SZ_4K
-#define slot_cf_has_space(offset, payload_size) \
- (MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \
- offsetof(struct cmd_chain_slot_execbuf_cf, args[0]))
+#define slot_has_space(slot, offset, payload_size) \
+ (MAX_CHAIN_CMDBUF_SIZE >= (offset) + (payload_size) + \
+ sizeof(typeof(slot)))
+
struct cmd_chain_slot_execbuf_cf {
__u32 cu_idx;
__u32 arg_cnt;
__u32 args[] __counted_by(arg_cnt);
};
-#define slot_dpu_has_space(offset, payload_size) \
- (MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \
- offsetof(struct cmd_chain_slot_dpu, args[0]))
struct cmd_chain_slot_dpu {
__u64 inst_buf_addr;
__u32 inst_size;
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
index 5a058e565b01..c6cf7068d23c 100644
--- a/drivers/accel/amdxdna/aie2_pci.c
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -512,12 +512,6 @@ static int aie2_init(struct amdxdna_dev *xdna)
goto release_fw;
}
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- if (ret) {
- XDNA_ERR(xdna, "Enable PASID failed, ret %d", ret);
- goto free_irq;
- }
-
psp_conf.fw_size = fw->size;
psp_conf.fw_buf = fw->data;
for (i = 0; i < PSP_MAX_REGS; i++)
@@ -526,14 +520,14 @@ static int aie2_init(struct amdxdna_dev *xdna)
if (!ndev->psp_hdl) {
XDNA_ERR(xdna, "failed to create psp");
ret = -ENOMEM;
- goto disable_sva;
+ goto free_irq;
}
xdna->dev_handle = ndev;
ret = aie2_hw_start(xdna);
if (ret) {
XDNA_ERR(xdna, "start npu failed, ret %d", ret);
- goto disable_sva;
+ goto free_irq;
}
ret = aie2_mgmt_fw_query(ndev);
@@ -584,8 +578,6 @@ async_event_free:
aie2_error_async_events_free(ndev);
stop_hw:
aie2_hw_stop(xdna);
-disable_sva:
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
free_irq:
pci_free_irq_vectors(pdev);
release_fw:
@@ -601,7 +593,6 @@ static void aie2_fini(struct amdxdna_dev *xdna)
aie2_hw_stop(xdna);
aie2_error_async_events_free(ndev);
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
pci_free_irq_vectors(pdev);
}
diff --git a/drivers/accel/amdxdna/aie2_psp.c b/drivers/accel/amdxdna/aie2_psp.c
index dc3a072ce3b6..f28a060a8810 100644
--- a/drivers/accel/amdxdna/aie2_psp.c
+++ b/drivers/accel/amdxdna/aie2_psp.c
@@ -126,8 +126,8 @@ struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *
psp->ddev = ddev;
memcpy(psp->psp_regs, conf->psp_regs, sizeof(psp->psp_regs));
- psp->fw_buf_sz = ALIGN(conf->fw_size, PSP_FW_ALIGN) + PSP_FW_ALIGN;
- psp->fw_buffer = drmm_kmalloc(ddev, psp->fw_buf_sz, GFP_KERNEL);
+ psp->fw_buf_sz = ALIGN(conf->fw_size, PSP_FW_ALIGN);
+ psp->fw_buffer = drmm_kmalloc(ddev, psp->fw_buf_sz + PSP_FW_ALIGN, GFP_KERNEL);
if (!psp->fw_buffer) {
drm_err(ddev, "no memory for fw buffer");
return NULL;
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
index 43442b9e273b..be073224bd69 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.c
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -496,11 +496,11 @@ static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
struct amdxdna_drm_exec_cmd *args)
{
struct amdxdna_dev *xdna = client->xdna;
- u32 *arg_bo_hdls;
+ u32 *arg_bo_hdls = NULL;
u32 cmd_bo_hdl;
int ret;
- if (!args->arg_count || args->arg_count > MAX_ARG_COUNT) {
+ if (args->arg_count > MAX_ARG_COUNT) {
XDNA_ERR(xdna, "Invalid arg bo count %d", args->arg_count);
return -EINVAL;
}
@@ -512,14 +512,16 @@ static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
}
cmd_bo_hdl = (u32)args->cmd_handles;
- arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
- if (!arg_bo_hdls)
- return -ENOMEM;
- ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
- args->arg_count * sizeof(u32));
- if (ret) {
- ret = -EFAULT;
- goto free_cmd_bo_hdls;
+ if (args->arg_count) {
+ arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
+ if (!arg_bo_hdls)
+ return -ENOMEM;
+ ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
+ args->arg_count * sizeof(u32));
+ if (ret) {
+ ret = -EFAULT;
+ goto free_cmd_bo_hdls;
+ }
}
ret = amdxdna_cmd_submit(client, cmd_bo_hdl, arg_bo_hdls,
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
index 606433d73236..26831ec69f89 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.c
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -9,7 +9,10 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/gpu_scheduler.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-direct.h>
#include <linux/iosys-map.h>
+#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include "amdxdna_ctx.h"
@@ -18,6 +21,8 @@
#define XDNA_MAX_CMD_BO_SIZE SZ_32K
+MODULE_IMPORT_NS("DMA_BUF");
+
static int
amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
{
@@ -55,57 +60,38 @@ amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
return 0;
}
-static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
-{
- struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
- struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
-
- XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
- if (abo->pinned)
- amdxdna_gem_unpin(abo);
-
- if (abo->type == AMDXDNA_BO_DEV) {
- mutex_lock(&abo->client->mm_lock);
- drm_mm_remove_node(&abo->mm_node);
- mutex_unlock(&abo->client->mm_lock);
-
- vunmap(abo->mem.kva);
- drm_gem_object_put(to_gobj(abo->dev_heap));
- drm_gem_object_release(gobj);
- mutex_destroy(&abo->lock);
- kfree(abo);
- return;
- }
-
- if (abo->type == AMDXDNA_BO_DEV_HEAP)
- drm_mm_takedown(&abo->mm);
-
- drm_gem_vunmap_unlocked(gobj, &map);
- mutex_destroy(&abo->lock);
- drm_gem_shmem_free(&abo->base);
-}
-
-static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
- .free = amdxdna_gem_obj_free,
-};
-
static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
- struct amdxdna_gem_obj *abo = container_of(mni, struct amdxdna_gem_obj,
- mem.notifier);
- struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct amdxdna_umap *mapp = container_of(mni, struct amdxdna_umap, notifier);
+ struct amdxdna_gem_obj *abo = mapp->abo;
+ struct amdxdna_dev *xdna;
- XDNA_DBG(xdna, "Invalid range 0x%llx, 0x%lx, type %d",
- abo->mem.userptr, abo->mem.size, abo->type);
+ xdna = to_xdna_dev(to_gobj(abo)->dev);
+ XDNA_DBG(xdna, "Invalidating range 0x%lx, 0x%lx, type %d",
+ mapp->vma->vm_start, mapp->vma->vm_end, abo->type);
if (!mmu_notifier_range_blockable(range))
return false;
+ down_write(&xdna->notifier_lock);
+ abo->mem.map_invalid = true;
+ mapp->invalid = true;
+ mmu_interval_set_seq(&mapp->notifier, cur_seq);
+ up_write(&xdna->notifier_lock);
+
xdna->dev_info->ops->hmm_invalidate(abo, cur_seq);
+ if (range->event == MMU_NOTIFY_UNMAP) {
+ down_write(&xdna->notifier_lock);
+ if (!mapp->unmapped) {
+ queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
+ mapp->unmapped = true;
+ }
+ up_write(&xdna->notifier_lock);
+ }
+
return true;
}
@@ -113,102 +99,310 @@ static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = {
.invalidate = amdxdna_hmm_invalidate,
};
-static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo)
+static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
{
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct amdxdna_umap *mapp;
+
+ down_read(&xdna->notifier_lock);
+ list_for_each_entry(mapp, &abo->mem.umap_list, node) {
+ if (!vma || mapp->vma == vma) {
+ if (!mapp->unmapped) {
+ queue_work(xdna->notifier_wq, &mapp->hmm_unreg_work);
+ mapp->unmapped = true;
+ }
+ if (vma)
+ break;
+ }
+ }
+ up_read(&xdna->notifier_lock);
+}
- if (!xdna->dev_info->ops->hmm_invalidate)
- return;
+static void amdxdna_umap_release(struct kref *ref)
+{
+ struct amdxdna_umap *mapp = container_of(ref, struct amdxdna_umap, refcnt);
+ struct vm_area_struct *vma = mapp->vma;
+ struct amdxdna_dev *xdna;
+
+ mmu_interval_notifier_remove(&mapp->notifier);
+ if (is_import_bo(mapp->abo) && vma->vm_file && vma->vm_file->f_mapping)
+ mapping_clear_unevictable(vma->vm_file->f_mapping);
+
+ xdna = to_xdna_dev(to_gobj(mapp->abo)->dev);
+ down_write(&xdna->notifier_lock);
+ list_del(&mapp->node);
+ up_write(&xdna->notifier_lock);
+
+ kvfree(mapp->range.hmm_pfns);
+ kfree(mapp);
+}
+
+void amdxdna_umap_put(struct amdxdna_umap *mapp)
+{
+ kref_put(&mapp->refcnt, amdxdna_umap_release);
+}
+
+static void amdxdna_hmm_unreg_work(struct work_struct *work)
+{
+ struct amdxdna_umap *mapp = container_of(work, struct amdxdna_umap,
+ hmm_unreg_work);
- mmu_interval_notifier_remove(&abo->mem.notifier);
- kvfree(abo->mem.pfns);
- abo->mem.pfns = NULL;
+ amdxdna_umap_put(mapp);
}
-static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo, unsigned long addr,
- size_t len)
+static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
{
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ unsigned long len = vma->vm_end - vma->vm_start;
+ unsigned long addr = vma->vm_start;
+ struct amdxdna_umap *mapp;
u32 nr_pages;
int ret;
if (!xdna->dev_info->ops->hmm_invalidate)
return 0;
- if (abo->mem.pfns)
- return -EEXIST;
+ mapp = kzalloc(sizeof(*mapp), GFP_KERNEL);
+ if (!mapp)
+ return -ENOMEM;
nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
- abo->mem.pfns = kvcalloc(nr_pages, sizeof(*abo->mem.pfns),
- GFP_KERNEL);
- if (!abo->mem.pfns)
- return -ENOMEM;
+ mapp->range.hmm_pfns = kvcalloc(nr_pages, sizeof(*mapp->range.hmm_pfns),
+ GFP_KERNEL);
+ if (!mapp->range.hmm_pfns) {
+ ret = -ENOMEM;
+ goto free_map;
+ }
- ret = mmu_interval_notifier_insert_locked(&abo->mem.notifier,
+ ret = mmu_interval_notifier_insert_locked(&mapp->notifier,
current->mm,
addr,
len,
&amdxdna_hmm_ops);
if (ret) {
XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret);
- kvfree(abo->mem.pfns);
+ goto free_pfns;
}
- abo->mem.userptr = addr;
+ mapp->range.notifier = &mapp->notifier;
+ mapp->range.start = vma->vm_start;
+ mapp->range.end = vma->vm_end;
+ mapp->range.default_flags = HMM_PFN_REQ_FAULT;
+ mapp->vma = vma;
+ mapp->abo = abo;
+ kref_init(&mapp->refcnt);
+
+ if (abo->mem.userptr == AMDXDNA_INVALID_ADDR)
+ abo->mem.userptr = addr;
+ INIT_WORK(&mapp->hmm_unreg_work, amdxdna_hmm_unreg_work);
+ if (is_import_bo(abo) && vma->vm_file && vma->vm_file->f_mapping)
+ mapping_set_unevictable(vma->vm_file->f_mapping);
+
+ down_write(&xdna->notifier_lock);
+ list_add_tail(&mapp->node, &abo->mem.umap_list);
+ up_write(&xdna->notifier_lock);
+
+ return 0;
+
+free_pfns:
+ kvfree(mapp->range.hmm_pfns);
+free_map:
+ kfree(mapp);
return ret;
}
+static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
+ struct vm_area_struct *vma)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ unsigned long num_pages = vma_pages(vma);
+ unsigned long offset = 0;
+ int ret;
+
+ if (!is_import_bo(abo)) {
+ ret = drm_gem_shmem_mmap(&abo->base, vma);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed shmem mmap %d", ret);
+ return ret;
+ }
+
+ /* The buffer is based on memory pages. Fix the flag. */
+ vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
+ ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
+ &num_pages);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed insert pages %d", ret);
+ vma->vm_ops->close(vma);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ vma->vm_private_data = NULL;
+ vma->vm_ops = NULL;
+ ret = dma_buf_mmap(to_gobj(abo)->dma_buf, vma, 0);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret);
+ return ret;
+ }
+
+ do {
+ vm_fault_t fault_ret;
+
+ fault_ret = handle_mm_fault(vma, vma->vm_start + offset,
+ FAULT_FLAG_WRITE, NULL);
+ if (fault_ret & VM_FAULT_ERROR) {
+ vma->vm_ops->close(vma);
+ XDNA_ERR(xdna, "Fault in page failed");
+ return -EFAULT;
+ }
+
+ offset += PAGE_SIZE;
+ } while (--num_pages);
+
+ /* Drop the reference drm_gem_mmap_obj() acquired.*/
+ drm_gem_object_put(to_gobj(abo));
+
+ return 0;
+}
+
static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj,
struct vm_area_struct *vma)
{
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
- unsigned long num_pages;
int ret;
- ret = amdxdna_hmm_register(abo, vma->vm_start, gobj->size);
+ ret = amdxdna_hmm_register(abo, vma);
if (ret)
return ret;
+ ret = amdxdna_insert_pages(abo, vma);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed insert pages, ret %d", ret);
+ goto hmm_unreg;
+ }
+
+ XDNA_DBG(xdna, "BO map_offset 0x%llx type %d userptr 0x%lx size 0x%lx",
+ drm_vma_node_offset_addr(&gobj->vma_node), abo->type,
+ vma->vm_start, gobj->size);
+ return 0;
+
+hmm_unreg:
+ amdxdna_hmm_unregister(abo, vma);
+ return ret;
+}
+
+static int amdxdna_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gobj = dma_buf->priv;
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ unsigned long num_pages = vma_pages(vma);
+ int ret;
+
+ vma->vm_ops = &drm_gem_shmem_vm_ops;
+ vma->vm_private_data = gobj;
+
+ drm_gem_object_get(gobj);
ret = drm_gem_shmem_mmap(&abo->base, vma);
if (ret)
- goto hmm_unreg;
+ goto put_obj;
- num_pages = gobj->size >> PAGE_SHIFT;
- /* Try to insert the pages */
+ /* The buffer is based on memory pages. Fix the flag. */
vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
- ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, &num_pages);
+ ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages,
+ &num_pages);
if (ret)
- XDNA_ERR(abo->client->xdna, "Failed insert pages, ret %d", ret);
+ goto close_vma;
return 0;
-hmm_unreg:
- amdxdna_hmm_unregister(abo);
+close_vma:
+ vma->vm_ops->close(vma);
+put_obj:
+ drm_gem_object_put(gobj);
return ret;
}
-static vm_fault_t amdxdna_gem_vm_fault(struct vm_fault *vmf)
+static const struct dma_buf_ops amdxdna_dmabuf_ops = {
+ .attach = drm_gem_map_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .mmap = amdxdna_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
+static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
{
- return drm_gem_shmem_vm_ops.fault(vmf);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &amdxdna_dmabuf_ops;
+ exp_info.size = gobj->size;
+ exp_info.flags = flags;
+ exp_info.priv = gobj;
+ exp_info.resv = gobj->resv;
+
+ return drm_gem_dmabuf_export(gobj->dev, &exp_info);
}
-static void amdxdna_gem_vm_open(struct vm_area_struct *vma)
+static void amdxdna_imported_obj_free(struct amdxdna_gem_obj *abo)
{
- drm_gem_shmem_vm_ops.open(vma);
+ dma_buf_unmap_attachment_unlocked(abo->attach, abo->base.sgt, DMA_BIDIRECTIONAL);
+ dma_buf_detach(abo->dma_buf, abo->attach);
+ dma_buf_put(abo->dma_buf);
+ drm_gem_object_release(to_gobj(abo));
+ kfree(abo);
}
-static void amdxdna_gem_vm_close(struct vm_area_struct *vma)
+static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
{
- struct drm_gem_object *gobj = vma->vm_private_data;
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
+
+ XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
+
+ amdxdna_hmm_unregister(abo, NULL);
+ flush_workqueue(xdna->notifier_wq);
+
+ if (abo->pinned)
+ amdxdna_gem_unpin(abo);
- amdxdna_hmm_unregister(to_xdna_obj(gobj));
- drm_gem_shmem_vm_ops.close(vma);
+ if (abo->type == AMDXDNA_BO_DEV) {
+ mutex_lock(&abo->client->mm_lock);
+ drm_mm_remove_node(&abo->mm_node);
+ mutex_unlock(&abo->client->mm_lock);
+
+ vunmap(abo->mem.kva);
+ drm_gem_object_put(to_gobj(abo->dev_heap));
+ drm_gem_object_release(gobj);
+ mutex_destroy(&abo->lock);
+ kfree(abo);
+ return;
+ }
+
+ if (abo->type == AMDXDNA_BO_DEV_HEAP)
+ drm_mm_takedown(&abo->mm);
+
+ drm_gem_vunmap(gobj, &map);
+ mutex_destroy(&abo->lock);
+
+ if (is_import_bo(abo)) {
+ amdxdna_imported_obj_free(abo);
+ return;
+ }
+
+ drm_gem_shmem_free(&abo->base);
}
-static const struct vm_operations_struct amdxdna_gem_vm_ops = {
- .fault = amdxdna_gem_vm_fault,
- .open = amdxdna_gem_vm_open,
- .close = amdxdna_gem_vm_close,
+static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
+ .free = amdxdna_gem_obj_free,
};
static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
@@ -220,7 +414,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = amdxdna_gem_obj_mmap,
- .vm_ops = &amdxdna_gem_vm_ops,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+ .export = amdxdna_gem_prime_export,
};
static struct amdxdna_gem_obj *
@@ -239,6 +434,7 @@ amdxdna_gem_create_obj(struct drm_device *dev, size_t size)
abo->mem.userptr = AMDXDNA_INVALID_ADDR;
abo->mem.dev_addr = AMDXDNA_INVALID_ADDR;
abo->mem.size = size;
+ INIT_LIST_HEAD(&abo->mem.umap_list);
return abo;
}
@@ -258,6 +454,51 @@ amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
return to_gobj(abo);
}
+struct drm_gem_object *
+amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ struct sg_table *sgt;
+ int ret;
+
+ get_dma_buf(dma_buf);
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach)) {
+ ret = PTR_ERR(attach);
+ goto put_buf;
+ }
+
+ sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ gobj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(gobj)) {
+ ret = PTR_ERR(gobj);
+ goto fail_unmap;
+ }
+
+ abo = to_xdna_obj(gobj);
+ abo->attach = attach;
+ abo->dma_buf = dma_buf;
+
+ return gobj;
+
+fail_unmap:
+ dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+put_buf:
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+
static struct amdxdna_gem_obj *
amdxdna_drm_alloc_shmem(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
@@ -417,7 +658,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
abo->type = AMDXDNA_BO_CMD;
abo->client = filp->driver_priv;
- ret = drm_gem_vmap_unlocked(to_gobj(abo), &map);
+ ret = drm_gem_vmap(to_gobj(abo), &map);
if (ret) {
XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
goto release_obj;
@@ -483,6 +724,9 @@ int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
int ret;
+ if (is_import_bo(abo))
+ return 0;
+
switch (abo->type) {
case AMDXDNA_BO_SHMEM:
case AMDXDNA_BO_DEV_HEAP:
@@ -515,6 +759,9 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
{
+ if (is_import_bo(abo))
+ return;
+
if (abo->type == AMDXDNA_BO_DEV)
abo = abo->dev_heap;
@@ -606,7 +853,9 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
goto put_obj;
}
- if (abo->type == AMDXDNA_BO_DEV)
+ if (is_import_bo(abo))
+ drm_clflush_sg(abo->base.sgt);
+ else if (abo->type == AMDXDNA_BO_DEV)
drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages);
else
drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h
index 8ccc0375dd9d..aee97e971d6d 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.h
+++ b/drivers/accel/amdxdna/amdxdna_gem.h
@@ -6,6 +6,20 @@
#ifndef _AMDXDNA_GEM_H_
#define _AMDXDNA_GEM_H_
+#include <linux/hmm.h>
+
+struct amdxdna_umap {
+ struct vm_area_struct *vma;
+ struct mmu_interval_notifier notifier;
+ struct hmm_range range;
+ struct work_struct hmm_unreg_work;
+ struct amdxdna_gem_obj *abo;
+ struct list_head node;
+ struct kref refcnt;
+ bool invalid;
+ bool unmapped;
+};
+
struct amdxdna_mem {
u64 userptr;
void *kva;
@@ -13,8 +27,7 @@ struct amdxdna_mem {
size_t size;
struct page **pages;
u32 nr_pages;
- struct mmu_interval_notifier notifier;
- unsigned long *pfns;
+ struct list_head umap_list;
bool map_invalid;
};
@@ -31,9 +44,12 @@ struct amdxdna_gem_obj {
struct amdxdna_gem_obj *dev_heap; /* For AMDXDNA_BO_DEV */
struct drm_mm_node mm_node; /* For AMDXDNA_BO_DEV */
u32 assigned_hwctx;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *attach;
};
#define to_gobj(obj) (&(obj)->base.base)
+#define is_import_bo(obj) ((obj)->attach)
static inline struct amdxdna_gem_obj *to_xdna_obj(struct drm_gem_object *gobj)
{
@@ -47,8 +63,12 @@ static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo)
drm_gem_object_put(to_gobj(abo));
}
+void amdxdna_umap_put(struct amdxdna_umap *mapp);
+
struct drm_gem_object *
amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size);
+struct drm_gem_object *
+amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
struct amdxdna_gem_obj *
amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
index f5b8497cf5ad..f2bf1d374cc7 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.c
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -226,6 +226,7 @@ const struct drm_driver amdxdna_drm_drv = {
.num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls),
.gem_create_object = amdxdna_gem_create_object_cb,
+ .gem_prime_import = amdxdna_gem_prime_import,
};
static const struct amdxdna_dev_info *
@@ -266,12 +267,16 @@ static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
fs_reclaim_release(GFP_KERNEL);
}
+ xdna->notifier_wq = alloc_ordered_workqueue("notifier_wq", 0);
+ if (!xdna->notifier_wq)
+ return -ENOMEM;
+
mutex_lock(&xdna->dev_lock);
ret = xdna->dev_info->ops->init(xdna);
mutex_unlock(&xdna->dev_lock);
if (ret) {
XDNA_ERR(xdna, "Hardware init failed, ret %d", ret);
- return ret;
+ goto destroy_notifier_wq;
}
ret = amdxdna_sysfs_init(xdna);
@@ -301,6 +306,8 @@ failed_dev_fini:
mutex_lock(&xdna->dev_lock);
xdna->dev_info->ops->fini(xdna);
mutex_unlock(&xdna->dev_lock);
+destroy_notifier_wq:
+ destroy_workqueue(xdna->notifier_wq);
return ret;
}
@@ -310,6 +317,8 @@ static void amdxdna_remove(struct pci_dev *pdev)
struct device *dev = &pdev->dev;
struct amdxdna_client *client;
+ destroy_workqueue(xdna->notifier_wq);
+
pm_runtime_get_noresume(dev);
pm_runtime_forbid(dev);
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h
index 37848a8d8031..ab79600911aa 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.h
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h
@@ -6,6 +6,7 @@
#ifndef _AMDXDNA_PCI_DRV_H_
#define _AMDXDNA_PCI_DRV_H_
+#include <linux/workqueue.h>
#include <linux/xarray.h>
#define XDNA_INFO(xdna, fmt, args...) drm_info(&(xdna)->ddev, fmt, ##args)
@@ -98,6 +99,7 @@ struct amdxdna_dev {
struct list_head client_list;
struct amdxdna_fw_ver fw_ver;
struct rw_semaphore notifier_lock; /* for mmu notifier*/
+ struct workqueue_struct *notifier_wq;
};
/*
diff --git a/drivers/accel/habanalabs/Kconfig b/drivers/accel/habanalabs/Kconfig
index be85336107f9..1919fbb169c7 100644
--- a/drivers/accel/habanalabs/Kconfig
+++ b/drivers/accel/habanalabs/Kconfig
@@ -6,7 +6,7 @@
config DRM_ACCEL_HABANALABS
tristate "HabanaLabs AI accelerators"
depends on DRM_ACCEL
- depends on X86_64
+ depends on X86 && X86_64
depends on PCI && HAS_IOMEM
select GENERIC_ALLOCATOR
select HWMON
diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index 8729a0c57d78..dc80ca921d90 100644
--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -17,8 +17,6 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
-#include <asm/msr.h>
-
/* make sure there is space for all the signed info */
static_assert(sizeof(struct cpucp_info) <= SEC_DEV_INFO_BUF_SZ);
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index f0dad0c9ce33..cd24ccd20ba6 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -455,7 +455,7 @@ priority_bands_fops_write(struct file *file, const char __user *user_buf, size_t
if (ret < 0)
return ret;
- buf[size] = '\0';
+ buf[ret] = '\0';
ret = sscanf(buf, "%u %u %u %u", &band, &grace_period, &process_grace_period,
&process_quantum);
if (ret != 4)
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index eff1d3ca075f..0e7748c5e117 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -374,6 +374,9 @@ int ivpu_boot(struct ivpu_device *vdev)
{
int ret;
+ drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter));
+ drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
+
/* Update boot params located at first 4KB of FW memory */
ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
@@ -573,6 +576,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
atomic64_set(&vdev->unique_id_counter, 0);
+ atomic_set(&vdev->job_timeout_counter, 0);
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 92753effb1c9..5497e7030e91 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -154,6 +154,7 @@ struct ivpu_device {
struct mutex submitted_jobs_lock; /* Protects submitted_jobs */
struct xarray submitted_jobs_xa;
struct ivpu_ipc_consumer job_done_consumer;
+ atomic_t job_timeout_counter;
atomic64_t unique_id_counter;
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index ccaaf6c100c0..9db741695401 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -55,18 +55,18 @@ static struct {
int gen;
const char *name;
} fw_names[] = {
- { IVPU_HW_IP_37XX, "vpu_37xx.bin" },
+ { IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v1.bin" },
{ IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
- { IVPU_HW_IP_40XX, "vpu_40xx.bin" },
+ { IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v1.bin" },
{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
- { IVPU_HW_IP_50XX, "vpu_50xx.bin" },
+ { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v1.bin" },
{ IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v0.0.bin" },
};
/* Production fw_names from the table above */
-MODULE_FIRMWARE("intel/vpu/vpu_37xx_v0.0.bin");
-MODULE_FIRMWARE("intel/vpu/vpu_40xx_v0.0.bin");
-MODULE_FIRMWARE("intel/vpu/vpu_50xx_v0.0.bin");
+MODULE_FIRMWARE("intel/vpu/vpu_37xx_v1.bin");
+MODULE_FIRMWARE("intel/vpu/vpu_40xx_v1.bin");
+MODULE_FIRMWARE("intel/vpu/vpu_50xx_v1.bin");
static int ivpu_fw_request(struct ivpu_device *vdev)
{
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
index 1d0b2bd9d65c..9a3935be1c05 100644
--- a/drivers/accel/ivpu/ivpu_fw.h
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -39,6 +39,7 @@ struct ivpu_fw_info {
u64 read_only_addr;
u32 read_only_size;
u32 sched_mode;
+ u64 last_heartbeat;
};
int ivpu_fw_init(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 8741c73b92ce..59cfcf3eaded 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -28,9 +28,19 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
{
ivpu_dbg(vdev, BO,
"%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
- action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
+ action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id,
(bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
- (bool)bo->base.base.import_attach);
+ (bool)drm_gem_is_imported(&bo->base.base));
+}
+
+static inline int ivpu_bo_lock(struct ivpu_bo *bo)
+{
+ return dma_resv_lock(bo->base.base.resv, NULL);
+}
+
+static inline void ivpu_bo_unlock(struct ivpu_bo *bo)
+{
+ dma_resv_unlock(bo->base.base.resv);
}
/*
@@ -43,22 +53,22 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+ struct sg_table *sgt;
int ret = 0;
- mutex_lock(&bo->lock);
-
ivpu_dbg_bo(vdev, bo, "pin");
- drm_WARN_ON(&vdev->drm, !bo->ctx);
- if (!bo->mmu_mapped) {
- struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
+ return ret;
+ }
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
- goto unlock;
- }
+ ivpu_bo_lock(bo);
+ if (!bo->mmu_mapped) {
+ drm_WARN_ON(&vdev->drm, !bo->ctx);
ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
ivpu_bo_is_snooped(bo));
if (ret) {
@@ -69,7 +79,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
}
unlock:
- mutex_unlock(&bo->lock);
+ ivpu_bo_unlock(bo);
return ret;
}
@@ -84,7 +94,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
if (!drm_dev_enter(&vdev->drm, &idx))
return -ENODEV;
- mutex_lock(&bo->lock);
+ ivpu_bo_lock(bo);
ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
if (!ret) {
@@ -94,9 +104,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
}
- ivpu_dbg_bo(vdev, bo, "alloc");
-
- mutex_unlock(&bo->lock);
+ ivpu_bo_unlock(bo);
drm_dev_exit(idx);
@@ -107,7 +115,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount));
+ lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount));
if (bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->ctx);
@@ -122,17 +130,15 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
bo->ctx = NULL;
}
- if (bo->base.base.import_attach)
+ if (drm_gem_is_imported(&bo->base.base))
return;
- dma_resv_lock(bo->base.base.resv, NULL);
if (bo->base.sgt) {
dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(bo->base.sgt);
kfree(bo->base.sgt);
bo->base.sgt = NULL;
}
- dma_resv_unlock(bo->base.base.resv);
}
void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
@@ -144,12 +150,12 @@ void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
mutex_lock(&vdev->bo_list_lock);
list_for_each_entry(bo, &vdev->bo_list, bo_list_node) {
- mutex_lock(&bo->lock);
+ ivpu_bo_lock(bo);
if (bo->ctx == ctx) {
ivpu_dbg_bo(vdev, bo, "unbind");
ivpu_bo_unbind_locked(bo);
}
- mutex_unlock(&bo->lock);
+ ivpu_bo_unlock(bo);
}
mutex_unlock(&vdev->bo_list_lock);
}
@@ -169,7 +175,6 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
bo->base.pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */
INIT_LIST_HEAD(&bo->bo_list_node);
- mutex_init(&bo->lock);
return &bo->base.base;
}
@@ -215,7 +220,7 @@ fail_detach:
return ERR_PTR(ret);
}
-static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
+static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id)
{
struct drm_gem_shmem_object *shmem;
struct ivpu_bo *bo;
@@ -233,6 +238,7 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
return ERR_CAST(shmem);
bo = to_ivpu_bo(&shmem->base);
+ bo->ctx_id = ctx_id;
bo->base.map_wc = flags & DRM_IVPU_BO_WC;
bo->flags = flags;
@@ -240,6 +246,8 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
list_add_tail(&bo->bo_list_node, &vdev->bo_list);
mutex_unlock(&vdev->bo_list_lock);
+ ivpu_dbg_bo(vdev, bo, "alloc");
+
return bo;
}
@@ -277,12 +285,16 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
list_del(&bo->bo_list_node);
mutex_unlock(&vdev->bo_list_lock);
- drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
+ drm_WARN_ON(&vdev->drm, !drm_gem_is_imported(&bo->base.base) &&
+ !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
+ drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0);
+ drm_WARN_ON(&vdev->drm, bo->base.vaddr);
ivpu_bo_unbind_locked(bo);
- mutex_destroy(&bo->lock);
+ drm_WARN_ON(&vdev->drm, bo->mmu_mapped);
+ drm_WARN_ON(&vdev->drm, bo->ctx);
- drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
+ drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
drm_gem_shmem_free(&bo->base);
}
@@ -314,7 +326,7 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
if (size == 0)
return -EINVAL;
- bo = ivpu_bo_alloc(vdev, size, args->flags);
+ bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id);
if (IS_ERR(bo)) {
ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)",
bo, file_priv->ctx.id, args->size, args->flags);
@@ -322,7 +334,10 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
}
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
- if (!ret)
+ if (ret)
+ ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)",
+ bo, file_priv->ctx.id, args->size, args->flags);
+ else
args->vpu_addr = bo->vpu_addr;
drm_gem_object_put(&bo->base.base);
@@ -345,7 +360,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end));
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
- bo = ivpu_bo_alloc(vdev, size, flags);
+ bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID);
if (IS_ERR(bo)) {
ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
bo, range->start, size, flags);
@@ -361,9 +376,9 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
goto err_put;
if (flags & DRM_IVPU_BO_MAPPABLE) {
- dma_resv_lock(bo->base.base.resv, NULL);
- ret = drm_gem_shmem_vmap(&bo->base, &map);
- dma_resv_unlock(bo->base.base.resv);
+ ivpu_bo_lock(bo);
+ ret = drm_gem_shmem_vmap_locked(&bo->base, &map);
+ ivpu_bo_unlock(bo);
if (ret)
goto err_put;
@@ -386,9 +401,9 @@ void ivpu_bo_free(struct ivpu_bo *bo)
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
if (bo->flags & DRM_IVPU_BO_MAPPABLE) {
- dma_resv_lock(bo->base.base.resv, NULL);
- drm_gem_shmem_vunmap(&bo->base, &map);
- dma_resv_unlock(bo->base.base.resv);
+ ivpu_bo_lock(bo);
+ drm_gem_shmem_vunmap_locked(&bo->base, &map);
+ ivpu_bo_unlock(bo);
}
drm_gem_object_put(&bo->base.base);
@@ -407,12 +422,12 @@ int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file
bo = to_ivpu_bo(obj);
- mutex_lock(&bo->lock);
+ ivpu_bo_lock(bo);
args->flags = bo->flags;
args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node);
args->vpu_addr = bo->vpu_addr;
args->size = obj->size;
- mutex_unlock(&bo->lock);
+ ivpu_bo_unlock(bo);
drm_gem_object_put(obj);
return ret;
@@ -449,10 +464,10 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
{
- mutex_lock(&bo->lock);
+ ivpu_bo_lock(bo);
drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
- bo, bo->ctx ? bo->ctx->id : 0, bo->vpu_addr, bo->base.base.size,
+ bo, bo->ctx_id, bo->vpu_addr, bo->base.base.size,
bo->flags, kref_read(&bo->base.base.refcount));
if (bo->base.pages)
@@ -461,12 +476,12 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
if (bo->mmu_mapped)
drm_printf(p, " mmu_mapped");
- if (bo->base.base.import_attach)
+ if (drm_gem_is_imported(&bo->base.base))
drm_printf(p, " imported");
drm_printf(p, "\n");
- mutex_unlock(&bo->lock);
+ ivpu_bo_unlock(bo);
}
void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index a222a9ec9d61..aa8ff14f7aae 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -17,10 +17,10 @@ struct ivpu_bo {
struct list_head bo_list_node;
struct drm_mm_node mm_node;
- struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */
u64 vpu_addr;
u32 flags;
u32 job_status; /* Valid only for command buffer */
+ u32 ctx_id;
bool mmu_mapped;
};
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index b28da35c30b6..fae8351aa330 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -247,6 +247,10 @@ static int ivpu_cmdq_unregister(struct ivpu_file_priv *file_priv, struct ivpu_cm
if (!cmdq->db_id)
return 0;
+ ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
+ if (!ret)
+ ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id);
+
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
if (!ret)
@@ -254,10 +258,6 @@ static int ivpu_cmdq_unregister(struct ivpu_file_priv *file_priv, struct ivpu_cm
cmdq->id, file_priv->ctx.id);
}
- ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
- if (!ret)
- ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id);
-
xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
cmdq->db_id = 0;
@@ -986,7 +986,8 @@ void ivpu_context_abort_work_fn(struct work_struct *work)
return;
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
- ivpu_jsm_reset_engine(vdev, 0);
+ if (ivpu_jsm_reset_engine(vdev, 0))
+ return;
mutex_lock(&vdev->context_list_lock);
xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
@@ -1009,7 +1010,8 @@ void ivpu_context_abort_work_fn(struct work_struct *work)
if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
goto runtime_put;
- ivpu_jsm_hws_resume_engine(vdev, 0);
+ if (ivpu_jsm_hws_resume_engine(vdev, 0))
+ return;
/*
* In hardware scheduling mode NPU already has stopped processing jobs
* and won't send us any further notifications, thus we have to free job related resources
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
index 219ab8afefab..0256b2dfefc1 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
@@ -7,6 +7,7 @@
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_jsm_msg.h"
+#include "ivpu_pm.h"
#include "vpu_jsm_api.h"
const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
@@ -163,8 +164,10 @@ int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
- if (ret)
+ if (ret) {
ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
+ ivpu_pm_trigger_recovery(vdev, "Engine reset failed");
+ }
return ret;
}
@@ -354,8 +357,10 @@ int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
- if (ret)
+ if (ret) {
ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
+ ivpu_pm_trigger_recovery(vdev, "Engine resume failed");
+ }
return ret;
}
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index ac0e22454596..ea30db181cd7 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -34,6 +34,7 @@ module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
#define PM_RESCHEDULE_LIMIT 5
+#define PM_TDR_HEARTBEAT_LIMIT 30
static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
{
@@ -44,6 +45,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
ivpu_fw_log_reset(vdev);
ivpu_fw_load(vdev);
fw->entry_point = fw->cold_boot_entry_point;
+ fw->last_heartbeat = 0;
}
static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev)
@@ -189,7 +191,24 @@ static void ivpu_job_timeout_work(struct work_struct *work)
{
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
struct ivpu_device *vdev = pm->vdev;
+ u64 heartbeat;
+ if (ivpu_jsm_get_heartbeat(vdev, 0, &heartbeat) || heartbeat <= vdev->fw->last_heartbeat) {
+ ivpu_err(vdev, "Job timeout detected, heartbeat not progressed\n");
+ goto recovery;
+ }
+
+ if (atomic_fetch_inc(&vdev->job_timeout_counter) > PM_TDR_HEARTBEAT_LIMIT) {
+ ivpu_err(vdev, "Job timeout detected, heartbeat limit exceeded\n");
+ goto recovery;
+ }
+
+ vdev->fw->last_heartbeat = heartbeat;
+ ivpu_start_job_timeout_detection(vdev);
+ return;
+
+recovery:
+ atomic_set(&vdev->job_timeout_counter, 0);
ivpu_pm_trigger_recovery(vdev, "TDR");
}
@@ -204,6 +223,7 @@ void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev)
{
cancel_delayed_work_sync(&vdev->pm->job_timeout_work);
+ atomic_set(&vdev->job_timeout_counter, 0);
}
int ivpu_pm_suspend_cb(struct device *dev)
diff --git a/drivers/accel/qaic/Kconfig b/drivers/accel/qaic/Kconfig
index a9f866230058..5e405a19c157 100644
--- a/drivers/accel/qaic/Kconfig
+++ b/drivers/accel/qaic/Kconfig
@@ -8,7 +8,6 @@ config DRM_ACCEL_QAIC
depends on DRM_ACCEL
depends on PCI && HAS_IOMEM
depends on MHI_BUS
- depends on MMU
select CRC32
help
Enables driver for Qualcomm's Cloud AI accelerator PCIe cards that are
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 43aba57b48f0..1bce1af7c72c 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -609,7 +609,7 @@ static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struc
struct scatterlist *sg;
int ret = 0;
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return -EINVAL;
for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) {
@@ -630,7 +630,7 @@ static void qaic_free_object(struct drm_gem_object *obj)
{
struct qaic_bo *bo = to_qaic_bo(obj);
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
/* DMABUF/PRIME Path */
drm_prime_gem_destroy(obj, NULL);
} else {
@@ -870,7 +870,7 @@ static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo,
{
int ret;
- if (bo->base.import_attach)
+ if (drm_gem_is_imported(&bo->base))
ret = qaic_prepare_import_bo(bo, hdr);
else
ret = qaic_prepare_export_bo(qdev, bo, hdr);
@@ -894,7 +894,7 @@ static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *b
static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo)
{
- if (bo->base.import_attach)
+ if (drm_gem_is_imported(&bo->base))
qaic_unprepare_import_bo(bo);
else
qaic_unprepare_export_bo(qdev, bo);
diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c
index ba0cf2f94732..a991b8198dc4 100644
--- a/drivers/accel/qaic/qaic_debugfs.c
+++ b/drivers/accel/qaic/qaic_debugfs.c
@@ -240,7 +240,6 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
mhi_unprepare:
mhi_unprepare_from_transfer(mhi_dev);
destroy_workqueue:
- flush_workqueue(qdev->bootlog_wq);
destroy_workqueue(qdev->bootlog_wq);
out:
return ret;
@@ -253,7 +252,6 @@ static void qaic_bootlog_mhi_remove(struct mhi_device *mhi_dev)
qdev = dev_get_drvdata(&mhi_dev->dev);
mhi_unprepare_from_transfer(qdev->bootlog_ch);
- flush_workqueue(qdev->bootlog_wq);
destroy_workqueue(qdev->bootlog_wq);
qdev->bootlog_ch = NULL;
}
diff --git a/drivers/accel/qaic/qaic_timesync.c b/drivers/accel/qaic/qaic_timesync.c
index 972833fabcfc..3fac540f8e03 100644
--- a/drivers/accel/qaic/qaic_timesync.c
+++ b/drivers/accel/qaic/qaic_timesync.c
@@ -129,7 +129,7 @@ static void qaic_timesync_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_resu
static void qaic_timesync_timer(struct timer_list *t)
{
- struct mqts_dev *mqtsdev = from_timer(mqtsdev, t, timer);
+ struct mqts_dev *mqtsdev = timer_container_of(mqtsdev, t, timer);
struct qts_host_time_sync_msg_data *sync_msg;
u64 device_qtimer_us;
u64 device_qtimer;
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7f10aa38269d..7bc40c2735ac 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -576,6 +576,9 @@ config ACPI_FFH
Enable this feature if you want to set up and install the FFH Address
Space handler to handle FFH OpRegion in the firmware.
+config ACPI_MRRM
+ bool
+
source "drivers/acpi/pmic/Kconfig"
config ACPI_VIOT
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 797070fc9a3f..d1b0affb844f 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -66,6 +66,7 @@ acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
acpi-$(CONFIG_ACPI_PRMT) += prmt.o
acpi-$(CONFIG_ACPI_PCC) += acpi_pcc.o
acpi-$(CONFIG_ACPI_FFH) += acpi_ffh.o
+acpi-$(CONFIG_ACPI_MRRM) += acpi_mrrm.o
# Address translation
acpi-$(CONFIG_ACPI_ADXL) += acpi_adxl.o
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index f7fb7205028d..f6b9562779de 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -15,6 +15,7 @@
#include <acpi/ghes.h>
#include <asm/cpu.h>
#include <asm/mce.h>
+#include <asm/msr.h>
#include "apei/apei-internal.h"
#include <ras/ras_event.h>
@@ -234,7 +235,7 @@ static int __init extlog_init(void)
u64 cap;
int rc;
- if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) ||
+ if (rdmsrq_safe(MSR_IA32_MCG_CAP, &cap) ||
!(cap & MCG_ELOG_P) ||
!extlog_get_l1addr())
return -ENODEV;
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index 794962c5c88e..b8d98b1b48ae 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -39,7 +39,7 @@ static int lpit_read_residency_counter_us(u64 *counter, bool io_mem)
return 0;
}
- err = rdmsrl_safe(residency_info_ffh.gaddr.address, counter);
+ err = rdmsrq_safe(residency_info_ffh.gaddr.address, counter);
if (!err) {
u64 mask = GENMASK_ULL(residency_info_ffh.gaddr.bit_offset +
residency_info_ffh.gaddr. bit_width - 1,
diff --git a/drivers/acpi/acpi_mrrm.c b/drivers/acpi/acpi_mrrm.c
new file mode 100644
index 000000000000..47ea3ccc2142
--- /dev/null
+++ b/drivers/acpi/acpi_mrrm.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025, Intel Corporation.
+ *
+ * Memory Range and Region Mapping (MRRM) structure
+ *
+ * Parse and report the platform's MRRM table in /sys.
+ */
+
+#define pr_fmt(fmt) "acpi/mrrm: " fmt
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+
+/* Default assume one memory region covering all system memory, per the spec */
+static int max_mem_region = 1;
+
+/* Access for use by resctrl file system */
+int acpi_mrrm_max_mem_region(void)
+{
+ return max_mem_region;
+}
+
+struct mrrm_mem_range_entry {
+ u64 base;
+ u64 length;
+ int node;
+ u8 local_region_id;
+ u8 remote_region_id;
+};
+
+static struct mrrm_mem_range_entry *mrrm_mem_range_entry;
+static u32 mrrm_mem_entry_num;
+
+static int get_node_num(struct mrrm_mem_range_entry *e)
+{
+ unsigned int nid;
+
+ for_each_online_node(nid) {
+ for (int z = 0; z < MAX_NR_ZONES; z++) {
+ struct zone *zone = NODE_DATA(nid)->node_zones + z;
+
+ if (!populated_zone(zone))
+ continue;
+ if (zone_intersects(zone, PHYS_PFN(e->base), PHYS_PFN(e->length)))
+ return zone_to_nid(zone);
+ }
+ }
+
+ return -ENOENT;
+}
+
+static __init int acpi_parse_mrrm(struct acpi_table_header *table)
+{
+ struct acpi_mrrm_mem_range_entry *mre_entry;
+ struct acpi_table_mrrm *mrrm;
+ void *mre, *mrrm_end;
+ int mre_count = 0;
+
+ mrrm = (struct acpi_table_mrrm *)table;
+ if (!mrrm)
+ return -ENODEV;
+
+ if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS)
+ return -EOPNOTSUPP;
+
+ mrrm_end = (void *)mrrm + mrrm->header.length - 1;
+ mre = (void *)mrrm + sizeof(struct acpi_table_mrrm);
+ while (mre < mrrm_end) {
+ mre_entry = mre;
+ mre_count++;
+ mre += mre_entry->header.length;
+ }
+ if (!mre_count) {
+ pr_info(FW_BUG "No ranges listed in MRRM table\n");
+ return -EINVAL;
+ }
+
+ mrrm_mem_range_entry = kmalloc_array(mre_count, sizeof(*mrrm_mem_range_entry),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!mrrm_mem_range_entry)
+ return -ENOMEM;
+
+ mre = (void *)mrrm + sizeof(struct acpi_table_mrrm);
+ while (mre < mrrm_end) {
+ struct mrrm_mem_range_entry *e;
+
+ mre_entry = mre;
+ e = mrrm_mem_range_entry + mrrm_mem_entry_num;
+
+ e->base = mre_entry->addr_base;
+ e->length = mre_entry->addr_len;
+ e->node = get_node_num(e);
+
+ if (mre_entry->region_id_flags & ACPI_MRRM_VALID_REGION_ID_FLAGS_LOCAL)
+ e->local_region_id = mre_entry->local_region_id;
+ else
+ e->local_region_id = -1;
+ if (mre_entry->region_id_flags & ACPI_MRRM_VALID_REGION_ID_FLAGS_REMOTE)
+ e->remote_region_id = mre_entry->remote_region_id;
+ else
+ e->remote_region_id = -1;
+
+ mrrm_mem_entry_num++;
+ mre += mre_entry->header.length;
+ }
+
+ max_mem_region = mrrm->max_mem_region;
+
+ return 0;
+}
+
+#define RANGE_ATTR(name, fmt) \
+static ssize_t name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ struct mrrm_mem_range_entry *mre; \
+ const char *kname = kobject_name(kobj); \
+ int n, ret; \
+ \
+ ret = kstrtoint(kname + 5, 10, &n); \
+ if (ret) \
+ return ret; \
+ \
+ mre = mrrm_mem_range_entry + n; \
+ \
+ return sysfs_emit(buf, fmt, mre->name); \
+} \
+static struct kobj_attribute name##_attr = __ATTR_RO(name)
+
+RANGE_ATTR(base, "0x%llx\n");
+RANGE_ATTR(length, "0x%llx\n");
+RANGE_ATTR(node, "%d\n");
+RANGE_ATTR(local_region_id, "%d\n");
+RANGE_ATTR(remote_region_id, "%d\n");
+
+static struct attribute *memory_range_attrs[] = {
+ &base_attr.attr,
+ &length_attr.attr,
+ &node_attr.attr,
+ &local_region_id_attr.attr,
+ &remote_region_id_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(memory_range);
+
+static __init int add_boot_memory_ranges(void)
+{
+ struct kobject *pkobj, *kobj;
+ int ret = -EINVAL;
+ char *name;
+
+ pkobj = kobject_create_and_add("memory_ranges", acpi_kobj);
+
+ for (int i = 0; i < mrrm_mem_entry_num; i++) {
+ name = kasprintf(GFP_KERNEL, "range%d", i);
+ if (!name) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ kobj = kobject_create_and_add(name, pkobj);
+
+ ret = sysfs_create_groups(kobj, memory_range_groups);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static __init int mrrm_init(void)
+{
+ int ret;
+
+ ret = acpi_table_parse(ACPI_SIG_MRRM, acpi_parse_mrrm);
+ if (ret < 0)
+ return ret;
+
+ return add_boot_memory_ranges();
+}
+device_initcall(mrrm_init);
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 3fde4496f8a2..c9a0bcaba2e4 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -19,7 +19,7 @@
#include <linux/acpi.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include <asm/mwait.h>
#include <xen/xen.h>
@@ -33,7 +33,7 @@
static DEFINE_MUTEX(isolated_cpus_lock);
static DEFINE_MUTEX(round_robin_lock);
-static unsigned long power_saving_mwait_eax;
+static unsigned int power_saving_mwait_eax;
static unsigned char tsc_detected_unstable;
static unsigned char tsc_marked_unstable;
diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c
index 07a034a53aca..97064e943768 100644
--- a/drivers/acpi/acpi_pcc.c
+++ b/drivers/acpi/acpi_pcc.c
@@ -31,7 +31,6 @@
struct pcc_data {
struct pcc_mbox_chan *pcc_chan;
- void __iomem *pcc_comm_addr;
struct completion done;
struct mbox_client cl;
struct acpi_pcc_info ctx;
@@ -81,14 +80,6 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
ret = AE_SUPPORT;
goto err_free_channel;
}
- data->pcc_comm_addr = acpi_os_ioremap(pcc_chan->shmem_base_addr,
- pcc_chan->shmem_size);
- if (!data->pcc_comm_addr) {
- pr_err("Failed to ioremap PCC comm region mem for %d\n",
- ctx->subspace_id);
- ret = AE_NO_MEMORY;
- goto err_free_channel;
- }
*region_context = data;
return AE_OK;
@@ -113,7 +104,7 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
reinit_completion(&data->done);
/* Write to Shared Memory */
- memcpy_toio(data->pcc_comm_addr, (void *)value, data->ctx.length);
+ memcpy_toio(data->pcc_chan->shmem, (void *)value, data->ctx.length);
ret = mbox_send_message(data->pcc_chan->mchan, NULL);
if (ret < 0)
@@ -134,7 +125,7 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
mbox_chan_txdone(data->pcc_chan->mchan, ret);
- memcpy_fromio(value, data->pcc_comm_addr, data->ctx.length);
+ memcpy_fromio(value, data->pcc_chan->shmem, data->ctx.length);
return AE_OK;
}
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 9d4cbd956627..d7d4649ce66f 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -17,7 +17,7 @@
/* Common info for tool signons */
#define ACPICA_NAME "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2022 Intel Corporation"
+#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2025 Intel Corporation"
#if ACPI_MACHINE_WIDTH == 64
#define ACPI_WIDTH " (64-bit version)"
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 4536dc9d3979..662231f4f881 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -3,7 +3,7 @@
*
* Name: accommon.h - Common include files for generation of ACPICA source
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h
index c6ba6a36cfb5..24998f2d7539 100644
--- a/drivers/acpi/acpica/acconvert.h
+++ b/drivers/acpi/acpica/acconvert.h
@@ -3,7 +3,7 @@
*
* Module Name: acapps - common include for ACPI applications/tools
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 911875c5a5f1..fe6d38b43c9a 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -3,7 +3,7 @@
*
* Name: acdebug.h - ACPI/AML debugger
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -37,7 +37,7 @@ struct acpi_db_argument_info {
struct acpi_db_execute_walk {
u32 count;
u32 max_count;
- char name_seg[ACPI_NAMESEG_SIZE + 1];
+ char name_seg[ACPI_NAMESEG_SIZE + 1] ACPI_NONSTRING;
};
#define PARAM_LIST(pl) pl
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 73eecbf62f06..5d48a344b35f 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -3,7 +3,7 @@
*
* Name: acdispat.h - dispatcher (parser to interpreter interface)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 1c5218b79fc2..b40fb3a5ac8a 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -3,7 +3,7 @@
*
* Name: acevents.h - Event subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 309ce8efb4f6..c8a750d2674c 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -3,7 +3,7 @@
*
* Name: acglobal.h - Declarations for global variables
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index b8543a34caea..6aec56c65fa0 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -3,7 +3,7 @@
*
* Name: achware.h -- hardware specific interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 955114c926bd..1ee6ac9b2baf 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -3,7 +3,7 @@
*
* Name: acinterp.h - Interpreter subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -120,6 +120,9 @@ void
acpi_ex_trace_point(acpi_trace_event_type type,
u8 begin, u8 *aml, char *pathname);
+void
+acpi_ex_trace_args(union acpi_operand_object **params, u32 count);
+
/*
* exfield - ACPI AML (p-code) execution - field manipulation
*/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 6481c48c22bb..0c41f0097e8d 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -3,7 +3,7 @@
*
* Name: aclocal.h - Internal data types used across the ACPI subsystem
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -293,7 +293,7 @@ acpi_status (*acpi_internal_method) (struct acpi_walk_state * walk_state);
* expected_return_btypes - Allowed type(s) for the return value
*/
struct acpi_name_info {
- char name[ACPI_NAMESEG_SIZE] __nonstring;
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
u16 argument_list;
u8 expected_btypes;
};
@@ -370,7 +370,7 @@ typedef acpi_status (*acpi_object_converter) (struct acpi_namespace_node *
converted_object);
struct acpi_simple_repair_info {
- char name[ACPI_NAMESEG_SIZE] __nonstring;
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
u32 unexpected_btypes;
u32 package_index;
acpi_object_converter object_converter;
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index de83dd22292b..4e9402c02410 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -3,7 +3,7 @@
*
* Name: acmacros.h - C macros for the entire subsystem.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 9448bc026b9b..13f050fecb49 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -3,7 +3,7 @@
*
* Name: acnamesp.h - Namespace subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 8fc02946d3cd..6ffcc7a0a0c2 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -3,7 +3,7 @@
*
* Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index da96d80e6b3a..a2a9e51d7ac6 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -3,7 +3,7 @@
*
* Name: acopcode.h - AML opcode information for the AML parser and interpreter
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 6dad786a382c..65a15dee092b 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -3,7 +3,7 @@
*
* Module Name: acparser.h - AML Parser subcomponent prototypes and defines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index ef068f4c864a..76c5ed02e916 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -3,7 +3,7 @@
*
* Name: acpredef - Information table for ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index d772ff9ca07d..e8a92be5adae 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -3,7 +3,7 @@
*
* Name: acresrc.h - Resource Manager function prototypes
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index f8fee94ba708..e690f604cfa0 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -3,7 +3,7 @@
*
* Name: acstruct.h - Internal structs
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index b6ae979b01b6..ebef72bf58d0 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -3,7 +3,7 @@
*
* Name: actables.h - ACPI table management
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index edfdbbef81c1..3990d509bbab 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -3,7 +3,7 @@
*
* Name: acutils.h -- prototypes for the common (subsystem-wide) procedures
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index effe52b40dce..c5b544a006c5 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -5,7 +5,7 @@
* Declarations and definitions contained herein are derived
* directly from the ACPI specification.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 4e88f9fc2a28..54d6e51e0b9a 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -3,7 +3,7 @@
*
* Module Name: amlresrc.h - AML resource descriptors
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -504,10 +504,6 @@ struct aml_resource_pin_group_config {
#define AML_RESOURCE_PIN_GROUP_CONFIG_REVISION 1 /* ACPI 6.2 */
-/* restore default alignment */
-
-#pragma pack()
-
/* Union of all resource descriptors, so we can allocate the worst case */
union aml_resource {
@@ -562,6 +558,10 @@ union aml_resource {
u8 byte_item;
};
+/* restore default alignment */
+
+#pragma pack()
+
/* Interfaces used by both the disassembler and compiler */
void
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index e874c1dddefa..554ae35108bd 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -3,7 +3,7 @@
*
* Module Name: dbhistry - debugger HISTORY command
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 4354c175e12e..e2f00c54cb36 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -4,7 +4,7 @@
* Module Name: dsargs - Support for execution of dynamic arguments for static
* objects (regions, fields, buffer fields, etc.)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 80c69af06948..c1f79d7a2026 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -4,7 +4,7 @@
* Module Name: dscontrol - Support for execution control opcodes -
* if/else/while/return
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
index c5c8380a3114..274b74255551 100644
--- a/drivers/acpi/acpica/dsdebug.c
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: dsdebug - Parser/Interpreter interface - debugging
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 532401ecdab0..df132c9089c7 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -3,7 +3,7 @@
*
* Module Name: dsfield - Dispatcher field routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 6e0e362e461f..57cd9e2d1109 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: dsinit - Object initialization namespace walk
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index e809c2aed78a..fef6fb29ece4 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -3,7 +3,7 @@
*
* Module Name: dsmethod - Parser/Interpreter interface - control method parsing
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -483,6 +483,13 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(AE_NULL_OBJECT);
}
+ if (this_walk_state->num_operands < obj_desc->method.param_count) {
+ ACPI_ERROR((AE_INFO, "Missing argument for method [%4.4s]",
+ acpi_ut_get_node_name(method_node)));
+
+ return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
+ }
+
/* Init for new method, possibly wait on method mutex */
status =
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index eca50517ad82..5393de4dbc4c 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -188,6 +188,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
index++;
}
+ acpi_ex_trace_args(params, index);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%u args passed to method\n", index));
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 555f148d666b..1bf7eec49899 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -3,7 +3,7 @@
*
* Module Name: dsobject - Dispatcher object management routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index dd3059000885..5699b0872848 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: dsopcode - Dispatcher support for regions and fields
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index ecf793fe9919..1ed2386fab82 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -3,7 +3,7 @@
*
* Module Name: dspkginit - Completion of deferred package initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index fb9ed5e1da89..baf6a1f27605 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -668,6 +668,8 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
union acpi_parse_object *arguments[ACPI_OBJ_NUM_OPERANDS];
u32 arg_count = 0;
u32 index = walk_state->num_operands;
+ u32 prev_num_operands = walk_state->num_operands;
+ u32 new_num_operands;
u32 i;
ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg);
@@ -696,6 +698,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
/* Create the interpreter arguments, in reverse order */
+ new_num_operands = index;
index--;
for (i = 0; i < arg_count; i++) {
arg = arguments[index];
@@ -720,7 +723,11 @@ cleanup:
* pop everything off of the operand stack and delete those
* objects
*/
- acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
+ walk_state->num_operands = (u8)(i);
+ acpi_ds_obj_stack_pop_and_delete(new_num_operands, walk_state);
+
+ /* Restore operand count */
+ walk_state->num_operands = (u8)(prev_num_operands);
ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %u", index));
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index a43336f05206..5c5c6d8a4e48 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -4,7 +4,7 @@
* Module Name: dswexec - Dispatcher method execution callbacks;
* dispatch to interpreter.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index f7b8496c8bdd..666419b6a5c6 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload - Dispatcher first pass namespace load callbacks
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 541235f498c2..bfc54c914757 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -3,7 +3,7 @@
*
* Module Name: dswload2 - Dispatcher second pass namespace load callbacks
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 1fdd07ae862c..375a8fa43d9d 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -3,7 +3,7 @@
*
* Module Name: dswscope - Scope stack manipulation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 75338a13c802..02aaddb89df9 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -3,7 +3,7 @@
*
* Module Name: dswstate - Dispatcher parse tree walk management routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 9e78c5b9ad52..6cdd39c987b8 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -3,7 +3,7 @@
*
* Module Name: evevent - Fixed Event handling and dispatch
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 989dc01af03f..fa3e0d00d1ca 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -3,7 +3,7 @@
*
* Module Name: evglock - Global Lock support
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 934b201d3820..ba65b2ea49b2 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpe - General Purpose Event handling and dispatch
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 58e1890ab25b..fadd93caf1d5 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeblk - GPE block creation and initialization.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 38f408cf13ce..eb769739420e 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeinit - System GPE initialization and update
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index ee3b1ea656d4..d15b1d75c8ec 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -3,7 +3,7 @@
*
* Module Name: evgpeutil - GPE utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 1c8cb6d924df..5a35dae945e2 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -3,7 +3,7 @@
*
* Module Name: evhandler - Support for Address Space handlers
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index e68e876d3b84..04a23a6c3bb1 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: evmisc - Miscellaneous event manager support functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index cf53b9535f18..fa3475da7ea9 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -3,7 +3,7 @@
*
* Module Name: evregion - Operation Region support
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 46d1b3f5582d..b03952798af5 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -3,7 +3,7 @@
*
* Module Name: evrgnini- ACPI address_space (op_region) init
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 24fa6433d562..86a8d41c079c 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -3,7 +3,7 @@
*
* Module Name: evxface - External interfaces for ACPI events
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 48bf845191d2..4b052908d2e7 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 4eeeb3b7ab7e..60dacec1b121 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index bff2d099f469..bccc672c934c 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -4,7 +4,7 @@
* Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
* Address Spaces.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
index 2fb78b35565b..c248c9b162fa 100644
--- a/drivers/acpi/acpica/exconcat.c
+++ b/drivers/acpi/acpica/exconcat.c
@@ -3,7 +3,7 @@
*
* Module Name: exconcat - Concatenate-type AML operators
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 473115309860..4d7dd0fc6b07 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -3,7 +3,7 @@
*
* Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index bb1be42daee1..fded9bfc2436 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -3,7 +3,7 @@
*
* Module Name: exconvrt - Object conversion routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -226,8 +226,8 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
/* Copy the string to the buffer */
new_buf = return_desc->buffer.pointer;
- strncpy((char *)new_buf, (char *)obj_desc->string.pointer,
- obj_desc->string.length);
+ memcpy((char *)new_buf, (char *)obj_desc->string.pointer,
+ obj_desc->string.length);
break;
default:
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 1bea9d97652c..052c69567997 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -3,7 +3,7 @@
*
* Module Name: excreate - Named object creation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index 3f86bfada510..81a07a52b73c 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: exdebug - Support for stores to the AML Debug Object
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 2e2da8790224..d8aeebaab70a 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -3,7 +3,7 @@
*
* Module Name: exdump - Interpreter debug output routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 61ff36189ace..ced3ff9d0a86 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -3,7 +3,7 @@
*
* Module Name: exfield - AML execution - field_unit read/write
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index cf6c812a8b6d..0771934c0455 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -3,7 +3,7 @@
*
* Module Name: exfldio - Aml Field I/O
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index c6f2a9166ac0..07cbac58ed21 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -3,7 +3,7 @@
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 65c487facdda..1fa013197fcf 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -3,7 +3,7 @@
*
* Module Name: exmutex - ASL Mutex Acquire/Release functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 9a448165bfeb..76ab73c37e90 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -3,7 +3,7 @@
*
* Module Name: exnames - interpreter/scanner name load/execute
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 20fb34b68bee..6ac7e0ca5c9d 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg1 - AML execution - opcodes with 1 argument
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 743c258bf2e8..a94fa4d70e99 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg2 - AML execution - opcodes with 2 arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index d3091f619909..bf08110ed6d2 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg3 - AML execution - opcodes with 3 arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 1af35e143ba9..cb078e39abf7 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -3,7 +3,7 @@
*
* Module Name: exoparg6 - AML execution - opcodes with 6 arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 82b1fa2d201f..1b1a006e82de 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -3,7 +3,7 @@
*
* Module Name: exprep - ACPI AML field prep utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index c49b9f8de723..a390a1c2b0ab 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -3,7 +3,7 @@
*
* Module Name: exregion - ACPI default op_region (address space) handlers
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 873de01b8ad2..dd83631090fc 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -3,7 +3,7 @@
*
* Module Name: exresnte - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 24a78b5e266c..4589de3f3012 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -3,7 +3,7 @@
*
* Module Name: exresolv - AML Interpreter object resolution
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 3a437e6ace5c..782ee353a709 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -3,7 +3,7 @@
*
* Module Name: exresop - AML Interpreter operand/object resolution
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index 5241f4c01c76..6d2581ec22ad 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -3,7 +3,7 @@
*
* Module Name: exserial - field_unit support for serial address spaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -201,6 +201,12 @@ acpi_ex_read_serial_bus(union acpi_operand_object *obj_desc,
function = ACPI_READ;
break;
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+
+ buffer_length = ACPI_FFH_INPUT_BUFFER_SIZE;
+ function = ACPI_READ;
+ break;
+
default:
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 575c7a39f1aa..cbc42207496d 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -3,7 +3,7 @@
*
* Module Name: exstore - AML Interpreter object store support
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index b01ae015e1b5..0470b2639831 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -4,7 +4,7 @@
* Module Name: exstoren - AML Interpreter object store support,
* Store to Node (namespace object)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 37c3131a82fa..5b168fbc03e8 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -3,7 +3,7 @@
*
* Module Name: exstorob - AML object store support, store to object
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index 2c384bd52b9c..7f843c9d8a06 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -3,7 +3,7 @@
*
* Module Name: exsystem - Interface to OS services
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c
index f1730221ff13..d34497f3576a 100644
--- a/drivers/acpi/acpica/extrace.c
+++ b/drivers/acpi/acpica/extrace.c
@@ -3,7 +3,7 @@
*
* Module Name: extrace - Support for interpreter execution tracing
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -149,6 +149,57 @@ acpi_ex_trace_point(acpi_trace_event_type type,
/*******************************************************************************
*
+ * FUNCTION: acpi_ex_trace_args
+ *
+ * PARAMETERS: params - AML method arguments
+ * count - numer of method arguments
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Trace any arguments
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_trace_args(union acpi_operand_object **params, u32 count)
+{
+ u32 i;
+
+ ACPI_FUNCTION_NAME(ex_trace_args);
+
+ for (i = 0; i < count; i++) {
+ union acpi_operand_object *obj_desc = params[i];
+
+ if (!i) {
+ ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, " "));
+ }
+
+ switch (obj_desc->common.type) {
+ case ACPI_TYPE_INTEGER:
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "%llx", obj_desc->integer.value));
+ break;
+ case ACPI_TYPE_STRING:
+ if (!obj_desc->string.length) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "NULL"));
+ continue;
+ }
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_TRACE_POINT, _COMPONENT))
+ acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX);
+ break;
+ default:
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "Unknown"));
+ break;
+ }
+ if (i+1 == count) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "\n"));
+ } else {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, ", "));
+ }
+ }
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ex_start_trace_method
*
* PARAMETERS: method_node - Node of the method
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index f4d4a033f166..cc10c0732218 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -3,7 +3,7 @@
*
* Module Name: exutils - interpreter/scanner utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 790f342dcd25..a1e1fa787566 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -3,7 +3,7 @@
*
* Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index a9ba9190408b..631fd8e2b774 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -4,7 +4,7 @@
* Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
* extended FADT-V5 sleep registers.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index e0c847ab8324..386f4759c317 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -3,7 +3,7 @@
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index e0921f08b71a..87d78bef6323 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -4,7 +4,7 @@
* Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
* original/legacy sleep/PM registers.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 192c04b5a599..a5e0bccae6a4 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -3,7 +3,7 @@
*
* Name: hwtimer.c - ACPI Power Management Timer Interface
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index b8de458f0368..496fd9e49f0b 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -3,7 +3,7 @@
*
* Module Name: hwvalid - I/O request validation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index c31f803995c6..847cd1b2493d 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -3,7 +3,7 @@
*
* Module Name: hwxface - Public ACPICA hardware interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 8dbf83aeb455..9aabe30416da 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -3,7 +3,7 @@
*
* Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 3efb46f0dc54..366d54a1d157 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -3,7 +3,7 @@
*
* Module Name: nsarguments - Validation of args for ACPI predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 7e5a683ae957..f05a92b88642 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -4,7 +4,7 @@
* Module Name: nsconvert - Object conversions for objects returned by
* predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 90a26cb0c472..6dc20486ad51 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index fa116ebe49a3..d5b16aaec233 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -3,7 +3,7 @@
*
* Module Name: nsdump - table dumping routines for debug
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 86d126fdb27d..03373e7f7978 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -3,7 +3,7 @@
*
* Module Name: nsinit - namespace initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index fcb9de0f77a2..6ec4c646fff7 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -3,7 +3,7 @@
*
* Module Name: nsload - namespace loading/expanding/contracting procedures
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d91153f65700..22aeeeb56cff 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -194,7 +194,7 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
char *full_path, u32 path_size, u8 no_trailing)
{
u32 length = 0, i;
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
u8 do_no_trailing;
char c, *left, *right;
struct acpi_namespace_node *next_node;
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 31e551cf4ea6..959e6379bc4c 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -3,7 +3,7 @@
*
* Module Name: nsparse - namespace interface to AML parser
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index cf57bd69616d..81995ee48c49 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -3,7 +3,7 @@
*
* Module Name: nspredef - Validation of ACPI predefined methods and objects
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index dd37fc108fce..ca137ce5674f 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -3,7 +3,7 @@
*
* Module Name: nsprepkg - Validation of package objects for predefined names
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index b8657004190d..accfdcfb7e62 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -3,7 +3,7 @@
*
* Module Name: nsrepair - Repair for objects returned by predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 330b5e4711da..8dbb870f40d2 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -4,7 +4,7 @@
* Module Name: nsrepair2 - Repair for objects returned by specific
* predefined methods
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -25,7 +25,7 @@ acpi_status (*acpi_repair_function) (struct acpi_evaluate_info * info,
return_object_ptr);
typedef struct acpi_repair_info {
- char name[ACPI_NAMESEG_SIZE] __nonstring;
+ char name[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
acpi_repair_function repair_function;
} acpi_repair_info;
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 06ffdb6808f5..49cc07e2ac5a 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -4,7 +4,7 @@
* Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
* parents and siblings and Scope manipulation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index eee396a77bae..a2ac06a26e92 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: nswalk - Functions for walking the ACPI namespace
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 5d5bcf165298..1db831545ec8 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -4,7 +4,7 @@
* Module Name: nsxfname - Public interfaces to the ACPI subsystem
* ACPI Namespace oriented interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 28582adfc0ac..6f6ae38ec044 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -3,7 +3,7 @@
*
* Module Name: psargs - Parse AML opcode arguments
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index d0fd55636129..c989cadf271c 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -3,7 +3,7 @@
*
* Module Name: psloop - Main AML parse loop
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 54471083ba54..496a1c1d5b0b 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -3,7 +3,7 @@
*
* Module Name: psobject - Support for parse objects
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -636,7 +636,8 @@ acpi_status
acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
union acpi_parse_object *op, acpi_status status)
{
- acpi_status status2;
+ acpi_status return_status = status;
+ u8 ascending = TRUE;
ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
@@ -650,7 +651,7 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
op));
do {
if (op) {
- if (walk_state->ascending_callback != NULL) {
+ if (ascending && walk_state->ascending_callback != NULL) {
walk_state->op = op;
walk_state->op_info =
acpi_ps_get_opcode_info(op->common.
@@ -672,49 +673,26 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
}
if (status == AE_CTRL_TERMINATE) {
- status = AE_OK;
-
- /* Clean up */
- do {
- if (op) {
- status2 =
- acpi_ps_complete_this_op
- (walk_state, op);
- if (ACPI_FAILURE
- (status2)) {
- return_ACPI_STATUS
- (status2);
- }
- }
-
- acpi_ps_pop_scope(&
- (walk_state->
- parser_state),
- &op,
- &walk_state->
- arg_types,
- &walk_state->
- arg_count);
-
- } while (op);
-
- return_ACPI_STATUS(status);
+ ascending = FALSE;
+ return_status = AE_CTRL_TERMINATE;
}
else if (ACPI_FAILURE(status)) {
/* First error is most important */
- (void)
- acpi_ps_complete_this_op(walk_state,
- op);
- return_ACPI_STATUS(status);
+ ascending = FALSE;
+ return_status = status;
}
}
- status2 = acpi_ps_complete_this_op(walk_state, op);
- if (ACPI_FAILURE(status2)) {
- return_ACPI_STATUS(status2);
+ status = acpi_ps_complete_this_op(walk_state, op);
+ if (ACPI_FAILURE(status)) {
+ ascending = FALSE;
+ if (ACPI_SUCCESS(return_status) ||
+ return_status == AE_CTRL_TERMINATE) {
+ return_status = status;
+ }
}
}
@@ -724,5 +702,5 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
} while (op);
- return_ACPI_STATUS(status);
+ return_ACPI_STATUS(return_status);
}
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 39e31030e5f4..bf6103986f48 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -3,7 +3,7 @@
*
* Module Name: psopcode - Parser/Interpreter opcode information table
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index bccf606e08b4..1c8044ffcb97 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -3,7 +3,7 @@
*
* Module Name: psopinfo - AML opcode information functions and dispatch tables
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 10a072953d78..55a416e56fd8 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -3,7 +3,7 @@
*
* Module Name: psparse - Parser top level AML parse routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index a0035bde7556..c4e4483f0a0b 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -3,7 +3,7 @@
*
* Module Name: psscope - Parser scope stack management routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 7f7f5ecd4011..5a285d3f2cdb 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -3,7 +3,7 @@
*
* Module Name: pstree - Parser op tree manipulation/traversal/search
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index d550c4af4702..ada1dc304d25 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -3,7 +3,7 @@
*
* Module Name: psutils - Parser miscellaneous utilities (Parser only)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index d92817c72b8d..2f3ebcd8aebe 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -3,7 +3,7 @@
*
* Module Name: pswalk - Parser routines to walk parsed op tree(s)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 6f4eace0ba69..d480de075a90 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -3,7 +3,7 @@
*
* Module Name: psxface - Parser external interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 27384ee245f0..f92010e667cd 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -272,18 +272,13 @@ u8
acpi_rs_get_address_common(struct acpi_resource *resource,
union aml_resource *aml)
{
- struct aml_resource_address address;
-
ACPI_FUNCTION_ENTRY();
- /* Avoid undefined behavior: member access within misaligned address */
-
- memcpy(&address, aml, sizeof(address));
-
/* Validate the Resource Type */
- if ((address.resource_type > 2) &&
- (address.resource_type < 0xC0) && (address.resource_type != 0x0A)) {
+ if ((aml->address.resource_type > 2) &&
+ (aml->address.resource_type < 0xC0) &&
+ (aml->address.resource_type != 0x0A)) {
return (FALSE);
}
@@ -304,7 +299,7 @@ acpi_rs_get_address_common(struct acpi_resource *resource,
/* Generic resource type, just grab the type_specific byte */
resource->data.address.info.type_specific =
- address.specific_flags;
+ aml->address.specific_flags;
}
return (TRUE);
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 6e7a152d6459..242daf45e20e 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -608,18 +608,12 @@ acpi_rs_get_list_length(u8 *aml_buffer,
case ACPI_RESOURCE_NAME_SERIAL_BUS:{
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus
- common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
minimum_aml_resource_length =
acpi_gbl_resource_aml_serial_bus_sizes
- [common_serial_bus.type];
+ [aml_resource->common_serial_bus.type];
extra_struct_bytes +=
- common_serial_bus.resource_length -
+ aml_resource->common_serial_bus.
+ resource_length -
minimum_aml_resource_length;
break;
}
@@ -688,16 +682,10 @@ acpi_rs_get_list_length(u8 *aml_buffer,
*/
if (acpi_ut_get_resource_type(aml_buffer) ==
ACPI_RESOURCE_NAME_SERIAL_BUS) {
-
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
buffer_size =
acpi_gbl_resource_struct_serial_bus_sizes
- [common_serial_bus.type] + extra_struct_bytes;
+ [aml_resource->common_serial_bus.type] +
+ extra_struct_bytes;
} else {
buffer_size =
acpi_gbl_resource_struct_sizes[resource_index] +
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 164c96e063c6..e46efaa889cd 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -55,21 +55,15 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) {
-
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
- if (common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE) {
+ if (aml_resource->common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE) {
conversion_table = NULL;
} else {
/* This is an I2C, SPI, UART, or CSI2 serial_bus descriptor */
conversion_table =
acpi_gbl_convert_resource_serial_bus_dispatch
- [common_serial_bus.type];
+ [aml_resource->common_serial_bus.type];
}
} else {
conversion_table =
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index a1f10e4409a3..5b98e09fff76 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -3,7 +3,7 @@
*
* Module Name: tbdata - Table manager data structure functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 3c126c6d306b..c6658b2f3027 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfadt - FADT table utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 1c1b2e284bd9..d71a73216380 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -3,7 +3,7 @@
*
* Module Name: tbfind - find table
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -57,8 +57,8 @@ acpi_tb_find_table(char *signature,
memset(&header, 0, sizeof(struct acpi_table_header));
ACPI_COPY_NAMESEG(header.signature, signature);
- strncpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
- strncpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
+ memcpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
+ memcpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
/* Search for the table */
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 0dc003c20e4d..ee9b85bc238b 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -3,7 +3,7 @@
*
* Module Name: tbinstal - ACPI table installation and removal
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 58b02e4b254b..fd64460a2e26 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -3,7 +3,7 @@
*
* Module Name: tbprint - Table output utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index dad7425fce3f..fa64851c7b62 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -3,7 +3,7 @@
*
* Module Name: tbutils - ACPI Table utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 275b52dc42e9..a8f07d2641b6 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxface - ACPI table-oriented external interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 0f2a7343de3a..2a17c60a9a39 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfload - Table load/unload external interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 5b413bbab338..961577ba9486 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -3,7 +3,7 @@
*
* Module Name: tbxfroot - Find the root ACPI table (RSDT)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index be94d2fd99a7..c673d6c95e0a 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -3,7 +3,7 @@
*
* Module Name: utaddress - op_region address range check
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index c1fb70457e20..2418a312733a 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -3,7 +3,7 @@
*
* Module Name: utalloc - local memory allocation routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index 2be37676edd7..259c28d3fecd 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -3,7 +3,7 @@
*
* Module Name: utascii - Utility ascii functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index b054bb5eeaf0..f6e6e98e9523 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -3,7 +3,7 @@
*
* Module Name: utbuffer - Buffer dump routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 85a85f7cf750..cabec193febb 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -3,7 +3,7 @@
*
* Module Name: utcache - local cache allocation routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -251,9 +251,9 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
} else {
/* The cache is empty, create a new object */
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
ACPI_MEM_TRACKING(cache->total_allocated++);
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
if ((cache->total_allocated - cache->total_freed) >
cache->max_occupied) {
cache->max_occupied =
diff --git a/drivers/acpi/acpica/utcksum.c b/drivers/acpi/acpica/utcksum.c
index b483894c3629..e6f6030b3a3f 100644
--- a/drivers/acpi/acpica/utcksum.c
+++ b/drivers/acpi/acpica/utcksum.c
@@ -3,7 +3,7 @@
*
* Module Name: utcksum - Support generating table checksums
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 2e17e657dfa4..80458e70ac2b 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -3,7 +3,7 @@
*
* Module Name: utcopy - Internal to external object translation utilities
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 3d71bd9245c7..9f197e293c7e 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -3,7 +3,7 @@
*
* Module Name: utdebug - Debug print/trace routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 95a4b7509e01..b82130d1a8bc 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -3,7 +3,7 @@
*
* Module Name: utdecode - Utility decoding routines (value-to-string)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index c85bfa13ac1e..e8180099d01f 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -404,7 +404,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
object, object->common.type,
acpi_ut_get_object_type_name(object),
new_count));
- message = "Incremement";
+ message = "Increment";
break;
case REF_DECREMENT:
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 3e5173d03953..abc6583ed369 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -3,7 +3,7 @@
*
* Module Name: uteval - Object evaluation
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 820820ea8119..97c55a113bae 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -3,7 +3,7 @@
*
* Module Name: utglobal - Global variables for the ACPI subsystem
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index e62802791dcf..8cd050e9cad5 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -3,7 +3,7 @@
*
* Module Name: uthex -- Hex/ASCII support functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 15c2ce91d403..eb88335dea2c 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -3,7 +3,7 @@
*
* Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 6d78504e9fbc..4bef97e8223a 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utinit - Common ACPI subsystem initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index ee6d72385c5c..123dbcbc60bc 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -3,7 +3,7 @@
*
* Module Name: utlock - Reader/Writer lock interfaces
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index f4aae8f0d3a8..272e46208263 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -3,7 +3,7 @@
*
* Module Name: utobject - ACPI object create/delete/size/cache routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 99b85fd6eccf..f6ac16729e42 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -3,7 +3,7 @@
*
* Module Name: utosi - Support for the _OSI predefined control method
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index 29d2977d0746..d9bd80e2d32a 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -3,7 +3,7 @@
*
* Module Name: utpredef - support functions for predefined names
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 42b30b9f9312..423d10569736 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -3,7 +3,7 @@
*
* Module Name: utprint - Formatted printing routines
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -333,11 +333,8 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
pos = string;
- if (size != ACPI_UINT32_MAX) {
- end = string + size;
- } else {
- end = ACPI_CAST_PTR(char, ACPI_UINT32_MAX);
- }
+ size = ACPI_MIN(size, ACPI_PTR_DIFF(ACPI_MAX_PTR, string));
+ end = string + size;
for (; *format; ++format) {
if (*format != '%') {
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index cff7901f7866..e1cc3d348750 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -361,20 +361,16 @@ acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) {
- /* Avoid undefined behavior: member access within misaligned address */
-
- struct aml_resource_common_serialbus common_serial_bus;
- memcpy(&common_serial_bus, aml_resource,
- sizeof(common_serial_bus));
-
/* Validate the bus_type field */
- if ((common_serial_bus.type == 0) ||
- (common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE)) {
+ if ((aml_resource->common_serial_bus.type == 0) ||
+ (aml_resource->common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE)) {
if (walk_state) {
ACPI_ERROR((AE_INFO,
"Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
- common_serial_bus.type));
+ aml_resource->common_serial_bus.
+ type));
}
return (AE_AML_INVALID_RESOURCE_TYPE);
}
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index f5f5da441458..a99c4c9e3d39 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -3,7 +3,7 @@
*
* Module Name: uttrack - Memory allocation tracking routines (debug only)
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index 8f10b413e928..0682554934ca 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -3,7 +3,7 @@
*
* Module Name: utuuid -- UUID support functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index aa2e923462b7..56942b5f026b 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -3,7 +3,7 @@
*
* Module Name: utxface - External interfaces, miscellaneous utility functions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 70ae0afa7939..c1702f8fba67 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -3,7 +3,7 @@
*
* Module Name: utxfinit - External interfaces for ACPICA initialization
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 3cfe7e7475f2..070c07d68dfb 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -23,6 +23,7 @@ config ACPI_APEI_GHES
select ACPI_HED
select IRQ_WORK
select GENERIC_ALLOCATOR
+ select ARM_SDE_INTERFACE if ARM64
help
Generic Hardware Error Source provides a way to report
platform hardware errors (such as that from chipset). It
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index 04731a5b01fa..9b041415a9d0 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -21,7 +21,7 @@
#include <linux/nmi.h>
#include <linux/delay.h>
#include <linux/mm.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/unaligned.h>
#include "apei-internal.h"
@@ -83,6 +83,8 @@ static struct debugfs_blob_wrapper vendor_blob;
static struct debugfs_blob_wrapper vendor_errors;
static char vendor_dev[64];
+static u32 available_error_type;
+
/*
* Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
* EINJ table through an unpublished extension. Use with caution as
@@ -648,14 +650,9 @@ static struct { u32 mask; const char *str; } const einj_error_type_string[] = {
static int available_error_type_show(struct seq_file *m, void *v)
{
- int rc;
- u32 error_type = 0;
- rc = einj_get_available_error_type(&error_type);
- if (rc)
- return rc;
for (int pos = 0; pos < ARRAY_SIZE(einj_error_type_string); pos++)
- if (error_type & einj_error_type_string[pos].mask)
+ if (available_error_type & einj_error_type_string[pos].mask)
seq_printf(m, "0x%08x\t%s\n", einj_error_type_string[pos].mask,
einj_error_type_string[pos].str);
@@ -678,8 +675,7 @@ bool einj_is_cxl_error_type(u64 type)
int einj_validate_error_type(u64 type)
{
- u32 tval, vendor, available_error_type = 0;
- int rc;
+ u32 tval, vendor;
/* Only low 32 bits for error type are valid */
if (type & GENMASK_ULL(63, 32))
@@ -695,13 +691,9 @@ int einj_validate_error_type(u64 type)
/* Only one error type can be specified */
if (tval & (tval - 1))
return -EINVAL;
- if (!vendor) {
- rc = einj_get_available_error_type(&available_error_type);
- if (rc)
- return rc;
+ if (!vendor)
if (!(type & available_error_type))
return -EINVAL;
- }
return 0;
}
@@ -749,17 +741,12 @@ static int einj_check_table(struct acpi_table_einj *einj_tab)
return 0;
}
-static int __init einj_probe(struct platform_device *pdev)
+static int __init einj_probe(struct faux_device *fdev)
{
int rc;
acpi_status status;
struct apei_exec_context ctx;
- if (acpi_disabled) {
- pr_debug("ACPI disabled.\n");
- return -ENODEV;
- }
-
status = acpi_get_table(ACPI_SIG_EINJ, 0,
(struct acpi_table_header **)&einj_tab);
if (status == AE_NOT_FOUND) {
@@ -777,6 +764,10 @@ static int __init einj_probe(struct platform_device *pdev)
goto err_put_table;
}
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ goto err_put_table;
+
rc = -ENOMEM;
einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
@@ -851,7 +842,7 @@ err_put_table:
return rc;
}
-static void __exit einj_remove(struct platform_device *pdev)
+static void __exit einj_remove(struct faux_device *fdev)
{
struct apei_exec_context ctx;
@@ -872,44 +863,36 @@ static void __exit einj_remove(struct platform_device *pdev)
acpi_put_table((struct acpi_table_header *)einj_tab);
}
-static struct platform_device *einj_dev;
+static struct faux_device *einj_dev;
/*
* einj_remove() lives in .exit.text. For drivers registered via
* platform_driver_probe() this is ok because they cannot get unbound at
* runtime. So mark the driver struct with __refdata to prevent modpost
* triggering a section mismatch warning.
*/
-static struct platform_driver einj_driver __refdata = {
+static struct faux_device_ops einj_device_ops __refdata = {
+ .probe = einj_probe,
.remove = __exit_p(einj_remove),
- .driver = {
- .name = "acpi-einj",
- },
};
static int __init einj_init(void)
{
- struct platform_device_info einj_dev_info = {
- .name = "acpi-einj",
- .id = -1,
- };
- int rc;
+ if (acpi_disabled) {
+ pr_debug("ACPI disabled.\n");
+ return -ENODEV;
+ }
- einj_dev = platform_device_register_full(&einj_dev_info);
- if (IS_ERR(einj_dev))
- return PTR_ERR(einj_dev);
+ einj_dev = faux_device_create("acpi-einj", NULL, &einj_device_ops);
- rc = platform_driver_probe(&einj_driver, einj_probe);
- einj_initialized = rc == 0;
+ if (einj_dev)
+ einj_initialized = true;
return 0;
}
static void __exit einj_exit(void)
{
- if (einj_initialized)
- platform_driver_unregister(&einj_driver);
-
- platform_device_unregister(einj_dev);
+ faux_device_destroy(einj_dev);
}
module_init(einj_init);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 289e365f84b2..f0584ccad451 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1138,7 +1138,7 @@ static void ghes_add_timer(struct ghes *ghes)
static void ghes_poll_func(struct timer_list *t)
{
- struct ghes *ghes = from_timer(ghes, t, timer);
+ struct ghes *ghes = timer_container_of(ghes, t, timer);
unsigned long flags;
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
@@ -1715,7 +1715,7 @@ void __init acpi_ghes_init(void)
{
int rc;
- sdei_init();
+ acpi_sdei_init();
if (acpi_disabled)
return;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 6760330a8af5..45593612a4db 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -243,10 +243,23 @@ static int acpi_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_POWER_NOW:
- if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
+ if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) {
ret = -ENODEV;
- else
- val->intval = battery->rate_now * 1000;
+ break;
+ }
+
+ val->intval = battery->rate_now * 1000;
+ /*
+ * When discharging, the current should be reported as a
+ * negative number as per the power supply class interface
+ * definition.
+ */
+ if (psp == POWER_SUPPLY_PROP_CURRENT_NOW &&
+ (battery->state & ACPI_BATTERY_STATE_DISCHARGING) &&
+ acpi_battery_handle_discharging(battery)
+ == POWER_SUPPLY_STATUS_DISCHARGING)
+ val->intval = -val->intval;
+
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
@@ -279,8 +292,8 @@ static int acpi_battery_get_property(struct power_supply *psy,
full_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
- val->intval = battery->capacity_now * 100/
- full_capacity;
+ val->intval = DIV_ROUND_CLOSEST_ULL(battery->capacity_now * 100ULL,
+ full_capacity);
break;
case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
if (battery->state & ACPI_BATTERY_STATE_CRITICAL)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 058910af82bc..c2ab2783303f 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1446,8 +1446,10 @@ static int __init acpi_init(void)
}
acpi_kobj = kobject_create_and_add("acpi", firmware_kobj);
- if (!acpi_kobj)
- pr_debug("%s: kset create error\n", __func__);
+ if (!acpi_kobj) {
+ pr_err("Failed to register kobject\n");
+ return -ENOMEM;
+ }
init_prmt();
acpi_init_pcc();
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index f193e713825a..6b649031808f 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -47,7 +47,6 @@
struct cppc_pcc_data {
struct pcc_mbox_chan *pcc_channel;
- void __iomem *pcc_comm_addr;
bool pcc_channel_acquired;
unsigned int deadline_us;
unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
@@ -95,7 +94,7 @@ static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
/* pcc mapped address + header size + offset within PCC subspace */
-#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
+#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_channel->shmem + \
0x8 + (offs))
/* Check if a CPC register is in PCC */
@@ -129,6 +128,20 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
!!(cpc)->cpc_entry.int_value : \
!IS_NULL_REG(&(cpc)->cpc_entry.reg))
+
+/*
+ * Each bit indicates the optionality of the register in per-cpu
+ * cpc_regs[] with the corresponding index. 0 means mandatory and 1
+ * means optional.
+ */
+#define REG_OPTIONAL (0x1FC7D0)
+
+/*
+ * Use the index of the register in per-cpu cpc_regs[] to check if
+ * it's an optional one.
+ */
+#define IS_OPTIONAL_CPC_REG(reg_idx) (REG_OPTIONAL & (1U << (reg_idx)))
+
/*
* Arbitrary Retries in case the remote processor is slow to respond
* to PCC commands. Keeping it high enough to cover emulators where
@@ -223,7 +236,7 @@ static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
int ret, status;
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
struct acpi_pcct_shared_memory __iomem *generic_comm_base =
- pcc_ss_data->pcc_comm_addr;
+ pcc_ss_data->pcc_channel->shmem;
if (!pcc_ss_data->platform_owns_pcc)
return 0;
@@ -258,7 +271,7 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
int ret = -EIO, i;
struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
struct acpi_pcct_shared_memory __iomem *generic_comm_base =
- pcc_ss_data->pcc_comm_addr;
+ pcc_ss_data->pcc_channel->shmem;
unsigned int time_delta;
/*
@@ -463,7 +476,7 @@ bool cppc_allow_fast_switch(void)
struct cpc_desc *cpc_ptr;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
@@ -571,15 +584,6 @@ static int register_pcc_channel(int pcc_ss_idx)
pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
- pcc_data[pcc_ss_idx]->pcc_comm_addr =
- acpi_os_ioremap(pcc_chan->shmem_base_addr,
- pcc_chan->shmem_size);
- if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
- pr_err("Failed to ioremap PCC comm region mem for %d\n",
- pcc_ss_idx);
- return -ENOMEM;
- }
-
/* Set flag so that we don't come here for each CPU. */
pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
}
@@ -1175,43 +1179,106 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return ret_val;
}
-static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
+static int cppc_get_reg_val_in_pcc(int cpu, struct cpc_register_resource *reg, u64 *val)
{
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret;
+
+ if (pcc_ss_id < 0) {
+ pr_debug("Invalid pcc_ss_id\n");
+ return -ENODEV;
+ }
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+
+ if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
+ ret = cpc_read(cpu, reg, val);
+ else
+ ret = -EIO;
+
+ up_write(&pcc_ss_data->pcc_lock);
+
+ return ret;
+}
+
+static int cppc_get_reg_val(int cpu, enum cppc_regs reg_idx, u64 *val)
+{
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cpc_register_resource *reg;
+ if (val == NULL)
+ return -EINVAL;
+
if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
return -ENODEV;
}
reg = &cpc_desc->cpc_regs[reg_idx];
- if (CPC_IN_PCC(reg)) {
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = 0;
-
- if (pcc_ss_id < 0)
- return -EIO;
+ if ((reg->type == ACPI_TYPE_INTEGER && IS_OPTIONAL_CPC_REG(reg_idx) &&
+ !reg->cpc_entry.int_value) || (reg->type != ACPI_TYPE_INTEGER &&
+ IS_NULL_REG(&reg->cpc_entry.reg))) {
+ pr_debug("CPC register is not supported\n");
+ return -EOPNOTSUPP;
+ }
- pcc_ss_data = pcc_data[pcc_ss_id];
+ if (CPC_IN_PCC(reg))
+ return cppc_get_reg_val_in_pcc(cpu, reg, val);
- down_write(&pcc_ss_data->pcc_lock);
+ return cpc_read(cpu, reg, val);
+}
- if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
- cpc_read(cpunum, reg, perf);
- else
- ret = -EIO;
+static int cppc_set_reg_val_in_pcc(int cpu, struct cpc_register_resource *reg, u64 val)
+{
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret;
- up_write(&pcc_ss_data->pcc_lock);
+ if (pcc_ss_id < 0) {
+ pr_debug("Invalid pcc_ss_id\n");
+ return -ENODEV;
+ }
+ ret = cpc_write(cpu, reg, val);
+ if (ret)
return ret;
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+ /* after writing CPC, transfer the ownership of PCC to platform */
+ ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
+ up_write(&pcc_ss_data->pcc_lock);
+
+ return ret;
+}
+
+static int cppc_set_reg_val(int cpu, enum cppc_regs reg_idx, u64 val)
+{
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
+ struct cpc_register_resource *reg;
+
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
+ return -ENODEV;
}
- cpc_read(cpunum, reg, perf);
+ reg = &cpc_desc->cpc_regs[reg_idx];
- return 0;
+ /* if a register is writeable, it must be a buffer and not null */
+ if ((reg->type != ACPI_TYPE_BUFFER) || IS_NULL_REG(&reg->cpc_entry.reg)) {
+ pr_debug("CPC register is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (CPC_IN_PCC(reg))
+ return cppc_set_reg_val_in_pcc(cpu, reg, val);
+
+ return cpc_write(cpu, reg, val);
}
/**
@@ -1223,7 +1290,7 @@ static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
*/
int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
{
- return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
+ return cppc_get_reg_val(cpunum, DESIRED_PERF, desired_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
@@ -1236,7 +1303,7 @@ EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
*/
int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
{
- return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
+ return cppc_get_reg_val(cpunum, NOMINAL_PERF, nominal_perf);
}
/**
@@ -1248,7 +1315,7 @@ int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
*/
int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
{
- return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf);
+ return cppc_get_reg_val(cpunum, HIGHEST_PERF, highest_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
@@ -1261,7 +1328,7 @@ EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
*/
int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
{
- return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf);
+ return cppc_get_reg_val(cpunum, ENERGY_PERF, epp_perf);
}
EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
@@ -1535,53 +1602,110 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
/**
- * cppc_get_auto_sel_caps - Read autonomous selection register.
- * @cpunum : CPU from which to read register.
- * @perf_caps : struct where autonomous selection register value is updated.
+ * cppc_set_epp() - Write the EPP register.
+ * @cpu: CPU on which to write register.
+ * @epp_val: Value to write to the EPP register.
*/
-int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
+int cppc_set_epp(int cpu, u64 epp_val)
{
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
- struct cpc_register_resource *auto_sel_reg;
- u64 auto_sel;
+ if (epp_val > CPPC_ENERGY_PERF_MAX)
+ return -EINVAL;
- if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
- return -ENODEV;
- }
+ return cppc_set_reg_val(cpu, ENERGY_PERF, epp_val);
+}
+EXPORT_SYMBOL_GPL(cppc_set_epp);
- auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
+/**
+ * cppc_get_auto_act_window() - Read autonomous activity window register.
+ * @cpu: CPU from which to read register.
+ * @auto_act_window: Return address.
+ *
+ * According to ACPI 6.5, s8.4.6.1.6, the value read from the autonomous
+ * activity window register consists of two parts: a 7 bits value indicate
+ * significand and a 3 bits value indicate exponent.
+ */
+int cppc_get_auto_act_window(int cpu, u64 *auto_act_window)
+{
+ unsigned int exp;
+ u64 val, sig;
+ int ret;
- if (!CPC_SUPPORTED(auto_sel_reg))
- pr_warn_once("Autonomous mode is not unsupported!\n");
+ if (auto_act_window == NULL)
+ return -EINVAL;
- if (CPC_IN_PCC(auto_sel_reg)) {
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = 0;
+ ret = cppc_get_reg_val(cpu, AUTO_ACT_WINDOW, &val);
+ if (ret)
+ return ret;
- if (pcc_ss_id < 0)
- return -ENODEV;
+ sig = val & CPPC_AUTO_ACT_WINDOW_MAX_SIG;
+ exp = (val >> CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) & CPPC_AUTO_ACT_WINDOW_MAX_EXP;
+ *auto_act_window = sig * int_pow(10, exp);
- pcc_ss_data = pcc_data[pcc_ss_id];
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cppc_get_auto_act_window);
- down_write(&pcc_ss_data->pcc_lock);
+/**
+ * cppc_set_auto_act_window() - Write autonomous activity window register.
+ * @cpu: CPU on which to write register.
+ * @auto_act_window: usec value to write to the autonomous activity window register.
+ *
+ * According to ACPI 6.5, s8.4.6.1.6, the value to write to the autonomous
+ * activity window register consists of two parts: a 7 bits value indicate
+ * significand and a 3 bits value indicate exponent.
+ */
+int cppc_set_auto_act_window(int cpu, u64 auto_act_window)
+{
+ /* The max value to store is 1270000000 */
+ u64 max_val = CPPC_AUTO_ACT_WINDOW_MAX_SIG * int_pow(10, CPPC_AUTO_ACT_WINDOW_MAX_EXP);
+ int exp = 0;
+ u64 val;
- if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) {
- cpc_read(cpunum, auto_sel_reg, &auto_sel);
- perf_caps->auto_sel = (bool)auto_sel;
- } else {
- ret = -EIO;
- }
+ if (auto_act_window > max_val)
+ return -EINVAL;
- up_write(&pcc_ss_data->pcc_lock);
+ /*
+ * The max significand is 127, when auto_act_window is larger than
+ * 129, discard the precision of the last digit and increase the
+ * exponent by 1.
+ */
+ while (auto_act_window > CPPC_AUTO_ACT_WINDOW_SIG_CARRY_THRESH) {
+ auto_act_window /= 10;
+ exp += 1;
+ }
+
+ /* For 128 and 129, cut it to 127. */
+ if (auto_act_window > CPPC_AUTO_ACT_WINDOW_MAX_SIG)
+ auto_act_window = CPPC_AUTO_ACT_WINDOW_MAX_SIG;
+ val = (exp << CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) + auto_act_window;
+
+ return cppc_set_reg_val(cpu, AUTO_ACT_WINDOW, val);
+}
+EXPORT_SYMBOL_GPL(cppc_set_auto_act_window);
+
+/**
+ * cppc_get_auto_sel() - Read autonomous selection register.
+ * @cpu: CPU from which to read register.
+ * @enable: Return address.
+ */
+int cppc_get_auto_sel(int cpu, bool *enable)
+{
+ u64 auto_sel;
+ int ret;
+
+ if (enable == NULL)
+ return -EINVAL;
+
+ ret = cppc_get_reg_val(cpu, AUTO_SEL_ENABLE, &auto_sel);
+ if (ret)
return ret;
- }
+
+ *enable = (bool)auto_sel;
return 0;
}
-EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
+EXPORT_SYMBOL_GPL(cppc_get_auto_sel);
/**
* cppc_set_auto_sel - Write autonomous selection register.
@@ -1590,43 +1714,7 @@ EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
*/
int cppc_set_auto_sel(int cpu, bool enable)
{
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
- struct cpc_register_resource *auto_sel_reg;
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = -EINVAL;
-
- if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpu);
- return -ENODEV;
- }
-
- auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
-
- if (CPC_IN_PCC(auto_sel_reg)) {
- if (pcc_ss_id < 0) {
- pr_debug("Invalid pcc_ss_id\n");
- return -ENODEV;
- }
-
- if (CPC_SUPPORTED(auto_sel_reg)) {
- ret = cpc_write(cpu, auto_sel_reg, enable);
- if (ret)
- return ret;
- }
-
- pcc_ss_data = pcc_data[pcc_ss_id];
-
- down_write(&pcc_ss_data->pcc_lock);
- /* after writing CPC, transfer the ownership of PCC to platform */
- ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
- up_write(&pcc_ss_data->pcc_lock);
- } else {
- ret = -ENOTSUPP;
- pr_debug("_CPC in PCC is not supported\n");
- }
-
- return ret;
+ return cppc_set_reg_val(cpu, AUTO_SEL_ENABLE, enable);
}
EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
@@ -1640,38 +1728,7 @@ EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
*/
int cppc_set_enable(int cpu, bool enable)
{
- int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
- struct cpc_register_resource *enable_reg;
- struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
- struct cppc_pcc_data *pcc_ss_data = NULL;
- int ret = -EINVAL;
-
- if (!cpc_desc) {
- pr_debug("No CPC descriptor for CPU:%d\n", cpu);
- return -EINVAL;
- }
-
- enable_reg = &cpc_desc->cpc_regs[ENABLE];
-
- if (CPC_IN_PCC(enable_reg)) {
-
- if (pcc_ss_id < 0)
- return -EIO;
-
- ret = cpc_write(cpu, enable_reg, enable);
- if (ret)
- return ret;
-
- pcc_ss_data = pcc_data[pcc_ss_id];
-
- down_write(&pcc_ss_data->pcc_lock);
- /* after writing CPC, transfer the ownership of PCC to platfrom */
- ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
- up_write(&pcc_ss_data->pcc_lock);
- return ret;
- }
-
- return cpc_write(cpu, enable_reg, enable);
+ return cppc_set_reg_val(cpu, ENABLE, enable);
}
EXPORT_SYMBOL_GPL(cppc_set_enable);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 3c5f34892734..75c7db8b156a 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -23,8 +23,10 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/printk.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
@@ -2031,6 +2033,21 @@ void __init acpi_ec_ecdt_probe(void)
goto out;
}
+ if (!strstarts(ecdt_ptr->id, "\\")) {
+ /*
+ * The ECDT table on some MSI notebooks contains invalid data, together
+ * with an empty ID string ("").
+ *
+ * Section 5.2.15 of the ACPI specification requires the ID string to be
+ * a "fully qualified reference to the (...) embedded controller device",
+ * so this string always has to start with a backslash.
+ *
+ * By verifying this we can avoid such faulty ECDT tables in a safe way.
+ */
+ pr_err(FW_BUG "Ignoring ECDT due to invalid ID string \"%s\"\n", ecdt_ptr->id);
+ goto out;
+ }
+
ec = acpi_ec_alloc();
if (!ec)
goto out;
@@ -2329,6 +2346,12 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
}
},
+ {
+ // TUXEDO InfinityBook Pro AMD Gen9
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
+ },
+ },
{ },
};
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 00910ccd7eda..e2781864fdce 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -175,6 +175,12 @@ bool processor_physically_present(acpi_handle handle);
static inline void acpi_early_processor_control_setup(void) {}
#endif
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+void acpi_idle_rescan_dead_smt_siblings(void);
+#else
+static inline void acpi_idle_rescan_dead_smt_siblings(void) {}
+#endif
+
/* --------------------------------------------------------------------------
Embedded Controller
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index 1687483ff319..76a856c32c4d 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -12,7 +12,7 @@
enum acpi_irq_model_id acpi_irq_model;
-static struct fwnode_handle *(*acpi_get_gsi_domain_id)(u32 gsi);
+static acpi_gsi_domain_disp_fn acpi_get_gsi_domain_id;
static u32 (*acpi_gsi_to_irq_fallback)(u32 gsi);
/**
@@ -307,12 +307,24 @@ EXPORT_SYMBOL_GPL(acpi_irq_get);
* for a given GSI
*/
void __init acpi_set_irq_model(enum acpi_irq_model_id model,
- struct fwnode_handle *(*fn)(u32))
+ acpi_gsi_domain_disp_fn fn)
{
acpi_irq_model = model;
acpi_get_gsi_domain_id = fn;
}
+/*
+ * acpi_get_gsi_dispatcher() - Get the GSI dispatcher function
+ *
+ * Return the dispatcher function that computes the domain fwnode for
+ * a given GSI.
+ */
+acpi_gsi_domain_disp_fn acpi_get_gsi_dispatcher(void)
+{
+ return acpi_get_gsi_domain_id;
+}
+EXPORT_SYMBOL_GPL(acpi_get_gsi_dispatcher);
+
/**
* acpi_set_gsi_to_irq_fallback - Register a GSI transfer
* callback to fallback to arch specified implementation.
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 0a725e46d017..53816dfab645 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/memblock.h>
+#include <linux/memory.h>
#include <linux/numa.h>
#include <linux/nodemask.h>
#include <linux/topology.h>
@@ -429,13 +430,23 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
{
struct acpi_cedt_cfmws *cfmws;
int *fake_pxm = arg;
- u64 start, end;
+ u64 start, end, align;
int node;
+ int err;
cfmws = (struct acpi_cedt_cfmws *)header;
start = cfmws->base_hpa;
end = cfmws->base_hpa + cfmws->window_size;
+ /* Align memblock size to CFMW regions if possible */
+ align = 1UL << __ffs(start | end);
+ if (align >= SZ_256M) {
+ err = memory_block_advise_max_size(align);
+ if (err)
+ pr_warn("CFMWS: memblock size advise failed (%d)\n", err);
+ } else
+ pr_err("CFMWS: [BIOS BUG] base/size alignment violates spec\n");
+
/*
* The SRAT may have already described NUMA details for all,
* or a portion of, this CFMWS HPA range. Extend the memblks
@@ -453,7 +464,7 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
return -EINVAL;
}
- if (numa_add_memblk(node, start, end) < 0) {
+ if (numa_add_reserved_memblk(node, start, end) < 0) {
/* CXL driver must handle the NUMA_NO_NODE case */
pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n",
node, start, end);
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index df9328c850bd..f2c943b934be 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -42,7 +42,6 @@ static struct acpi_osi_entry
osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
{"Module Device", true},
{"Processor Device", true},
- {"3.0 _SCP Extensions", true},
{"Processor Aggregator Device", true},
};
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d0b6a024daae..74ade4160314 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -858,7 +858,7 @@ next:
}
}
-static void acpi_pci_root_remap_iospace(struct fwnode_handle *fwnode,
+static void acpi_pci_root_remap_iospace(const struct fwnode_handle *fwnode,
struct resource_entry *entry)
{
#ifdef PCI_IOBASE
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index ffbfd32f4cf1..b43f4459a4f6 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -688,6 +688,9 @@ static int __init platform_profile_init(void)
{
int err;
+ if (acpi_disabled)
+ return -EOPNOTSUPP;
+
err = class_register(&platform_profile_class);
if (err)
return err;
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index f73ce6e13065..54676e3d82dd 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -231,16 +231,18 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
sizeof(struct acpi_table_pptt));
proc_sz = sizeof(struct acpi_pptt_processor);
- while ((unsigned long)entry + proc_sz < table_end) {
+ /* ignore subtable types that are smaller than a processor node */
+ while ((unsigned long)entry + proc_sz <= table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
+
if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
cpu_node->parent == node_entry)
return 0;
if (entry->length == 0)
return 0;
+
entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry,
entry->length);
-
}
return 1;
}
@@ -273,15 +275,18 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
proc_sz = sizeof(struct acpi_pptt_processor);
/* find the processor structure associated with this cpuid */
- while ((unsigned long)entry + proc_sz < table_end) {
+ while ((unsigned long)entry + proc_sz <= table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
if (entry->length == 0) {
pr_warn("Invalid zero length subtable\n");
break;
}
+ /* entry->length may not equal proc_sz, revalidate the processor structure length */
if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
acpi_cpu_id == cpu_node->acpi_processor_id &&
+ (unsigned long)entry + entry->length <= table_end &&
+ entry->length == proc_sz + cpu_node->number_of_priv_resources * sizeof(u32) &&
acpi_pptt_leaf_node(table_hdr, cpu_node)) {
return (struct acpi_pptt_processor *)entry;
}
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 3b281bc1e73c..65e779be64ff 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -279,6 +279,9 @@ static int __init acpi_processor_driver_init(void)
* after acpi_cppc_processor_probe() has been called for all online CPUs
*/
acpi_processor_init_invariance_cppc();
+
+ acpi_idle_rescan_dead_smt_siblings();
+
return 0;
err:
driver_unregister(&acpi_processor_driver);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index b181f7fc2090..2c2dc559e0f8 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -24,6 +24,8 @@
#include <acpi/processor.h>
#include <linux/context_tracking.h>
+#include "internal.h"
+
/*
* Include the apic definitions for x86 to have the APIC timer related defines
* available also for UP (on SMP it gets magically included via linux/smp.h).
@@ -55,6 +57,12 @@ struct cpuidle_driver acpi_idle_driver = {
};
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+void acpi_idle_rescan_dead_smt_siblings(void)
+{
+ if (cpuidle_get_driver() == &acpi_idle_driver)
+ arch_cpu_rescan_dead_smt_siblings();
+}
+
static
DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
@@ -461,10 +469,8 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
{
- unsigned int i;
int result;
-
/* NOTE: the idle thread may not be running while calling
* this function */
@@ -481,17 +487,7 @@ static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
acpi_processor_get_power_info_default(pr);
pr->power.count = acpi_processor_power_verify(pr);
-
- /*
- * if one state of type C2 or C3 is available, mark this
- * CPU as being "idle manageable"
- */
- for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
- if (pr->power.states[i].valid) {
- pr->power.count = i;
- pr->flags.power = 1;
- }
- }
+ pr->flags.power = 1;
return 0;
}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 53996f1a2d80..64b8d1e19594 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -20,6 +20,7 @@
#include <acpi/processor.h>
#ifdef CONFIG_X86
#include <asm/cpufeature.h>
+#include <asm/msr.h>
#endif
#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 00d045e5f524..d1541a386fbc 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -18,9 +18,12 @@
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/acpi.h>
+#include <linux/uaccess.h>
#include <acpi/processor.h>
#include <asm/io.h>
-#include <linux/uaccess.h>
+#ifdef CONFIG_X86
+#include <asm/msr.h>
+#endif
/* ignore_tpc:
* 0 -> acpi processor driver doesn't ignore _TPC values
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 14c7bac4100b..b1ab192d7a08 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -534,7 +534,7 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
*/
static const struct dmi_system_id irq1_edge_low_force_override[] = {
{
- /* MECHREV Jiaolong17KS Series GM7XG0M */
+ /* MECHREVO Jiaolong17KS Series GM7XG0M */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GM7XG0M"),
},
@@ -667,6 +667,13 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
},
},
{
+ /* MACHENIKE L16P/L16P */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MACHENIKE"),
+ DMI_MATCH(DMI_BOARD_NAME, "L16P"),
+ },
+ },
+ {
/*
* TongFang GM5HG0A in case of the SKIKK Vanaheim relabel the
* board-name is changed, so check OEM strings instead. Note
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 2295abbecd14..fa9bb8c8ce95 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -396,7 +396,7 @@ static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
}
/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
-static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = {
+static const char table_sigs[][ACPI_NAMESEG_SIZE] __nonstring_array __initconst = {
ACPI_SIG_BERT, ACPI_SIG_BGRT, ACPI_SIG_CPEP, ACPI_SIG_ECDT,
ACPI_SIG_EINJ, ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT,
ACPI_SIG_MSCT, ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT,
@@ -719,8 +719,12 @@ int __init acpi_locate_initial_tables(void)
}
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_warn("Failed to initialize tables, status=0x%x (%s)", status, msg);
return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 0c874186f8ae..5c2defe55898 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -803,6 +803,12 @@ static int acpi_thermal_add(struct acpi_device *device)
acpi_thermal_aml_dependency_fix(tz);
+ /*
+ * Set the cooling mode [_SCP] to active cooling. This needs to happen before
+ * we retrieve the trip point values.
+ */
+ acpi_execute_simple_method(tz->device->handle, "_SCP", ACPI_THERMAL_MODE_ACTIVE);
+
/* Get trip points [_ACi, _PSV, etc.] (required). */
acpi_thermal_get_trip_points(tz);
@@ -814,10 +820,6 @@ static int acpi_thermal_add(struct acpi_device *device)
if (result)
goto free_memory;
- /* Set the cooling mode [_SCP] to active cooling. */
- acpi_execute_simple_method(tz->device->handle, "_SCP",
- ACPI_THERMAL_MODE_ACTIVE);
-
/* Determine the default polling frequency [_TZP]. */
if (tzp)
tz->polling_frequency = tzp;
diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
index 2aa69a2fba73..c13a20365c2c 100644
--- a/drivers/acpi/viot.c
+++ b/drivers/acpi/viot.c
@@ -19,11 +19,11 @@
#define pr_fmt(fmt) "ACPI: VIOT: " fmt
#include <linux/acpi_viot.h>
-#include <linux/fwnode.h>
#include <linux/iommu.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
struct viot_iommu {
/* Node offset within the table */
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 5fc2c8ee61b1..c463ca4a8fff 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -79,6 +79,8 @@ static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
static HLIST_HEAD(binder_devices);
+static DEFINE_SPINLOCK(binder_devices_lock);
+
static HLIST_HEAD(binder_procs);
static DEFINE_MUTEX(binder_procs_lock);
@@ -3261,20 +3263,16 @@ static void binder_transaction(struct binder_proc *proc,
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
+ "%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
- (u64)tr->data.ptr.buffer,
- (u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
+ "%d:%d BC_TRANSACTION %d -> %d - node %d, data size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
- (u64)tr->data.ptr.buffer,
- (u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
@@ -4223,20 +4221,21 @@ static int binder_thread_write(struct binder_proc *proc,
if (IS_ERR_OR_NULL(buffer)) {
if (PTR_ERR(buffer) == -EPERM) {
binder_user_error(
- "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
+ "%d:%d BC_FREE_BUFFER matched unreturned or currently freeing buffer at offset %lx\n",
proc->pid, thread->pid,
- (u64)data_ptr);
+ (unsigned long)data_ptr - proc->alloc.vm_start);
} else {
binder_user_error(
- "%d:%d BC_FREE_BUFFER u%016llx no match\n",
+ "%d:%d BC_FREE_BUFFER no match for buffer at offset %lx\n",
proc->pid, thread->pid,
- (u64)data_ptr);
+ (unsigned long)data_ptr - proc->alloc.vm_start);
}
break;
}
binder_debug(BINDER_DEBUG_FREE_BUFFER,
- "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
- proc->pid, thread->pid, (u64)data_ptr,
+ "%d:%d BC_FREE_BUFFER at offset %lx found buffer %d for %s transaction\n",
+ proc->pid, thread->pid,
+ (unsigned long)data_ptr - proc->alloc.vm_start,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
binder_free_buf(proc, thread, buffer, false);
@@ -5053,16 +5052,14 @@ retry:
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
+ "%d:%d %s %d %d:%d, cmd %u size %zd-%zd\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
(cmd == BR_TRANSACTION_SEC_CTX) ?
"BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
t->debug_id, t_from ? t_from->proc->pid : 0,
t_from ? t_from->pid : 0, cmd,
- t->buffer->data_size, t->buffer->offsets_size,
- (u64)trd->data.ptr.buffer,
- (u64)trd->data.ptr.offsets);
+ t->buffer->data_size, t->buffer->offsets_size);
if (t_from)
binder_thread_dec_tmpref(t_from);
@@ -5244,6 +5241,7 @@ static void binder_free_proc(struct binder_proc *proc)
__func__, proc->outstanding_txns);
device = container_of(proc->context, struct binder_device, context);
if (refcount_dec_and_test(&device->ref)) {
+ binder_remove_device(device);
kfree(proc->context->name);
kfree(device);
}
@@ -6377,10 +6375,10 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
}
static void print_binder_work_ilocked(struct seq_file *m,
- struct binder_proc *proc,
- const char *prefix,
- const char *transaction_prefix,
- struct binder_work *w)
+ struct binder_proc *proc,
+ const char *prefix,
+ const char *transaction_prefix,
+ struct binder_work *w, bool hash_ptrs)
{
struct binder_node *node;
struct binder_transaction *t;
@@ -6403,9 +6401,15 @@ static void print_binder_work_ilocked(struct seq_file *m,
break;
case BINDER_WORK_NODE:
node = container_of(w, struct binder_node, work);
- seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
- prefix, node->debug_id,
- (u64)node->ptr, (u64)node->cookie);
+ if (hash_ptrs)
+ seq_printf(m, "%snode work %d: u%p c%p\n",
+ prefix, node->debug_id,
+ (void *)(long)node->ptr,
+ (void *)(long)node->cookie);
+ else
+ seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
+ prefix, node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
break;
case BINDER_WORK_DEAD_BINDER:
seq_printf(m, "%shas dead binder\n", prefix);
@@ -6430,7 +6434,7 @@ static void print_binder_work_ilocked(struct seq_file *m,
static void print_binder_thread_ilocked(struct seq_file *m,
struct binder_thread *thread,
- int print_always)
+ bool print_always, bool hash_ptrs)
{
struct binder_transaction *t;
struct binder_work *w;
@@ -6460,14 +6464,16 @@ static void print_binder_thread_ilocked(struct seq_file *m,
}
list_for_each_entry(w, &thread->todo, entry) {
print_binder_work_ilocked(m, thread->proc, " ",
- " pending transaction", w);
+ " pending transaction",
+ w, hash_ptrs);
}
if (!print_always && m->count == header_pos)
m->count = start_pos;
}
static void print_binder_node_nilocked(struct seq_file *m,
- struct binder_node *node)
+ struct binder_node *node,
+ bool hash_ptrs)
{
struct binder_ref *ref;
struct binder_work *w;
@@ -6475,8 +6481,13 @@ static void print_binder_node_nilocked(struct seq_file *m,
count = hlist_count_nodes(&node->refs);
- seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
- node->debug_id, (u64)node->ptr, (u64)node->cookie,
+ if (hash_ptrs)
+ seq_printf(m, " node %d: u%p c%p", node->debug_id,
+ (void *)(long)node->ptr, (void *)(long)node->cookie);
+ else
+ seq_printf(m, " node %d: u%016llx c%016llx", node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
+ seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d",
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
node->internal_strong_refs, count, node->tmp_refs);
@@ -6489,7 +6500,8 @@ static void print_binder_node_nilocked(struct seq_file *m,
if (node->proc) {
list_for_each_entry(w, &node->async_todo, entry)
print_binder_work_ilocked(m, node->proc, " ",
- " pending async transaction", w);
+ " pending async transaction",
+ w, hash_ptrs);
}
}
@@ -6505,8 +6517,54 @@ static void print_binder_ref_olocked(struct seq_file *m,
binder_node_unlock(ref->node);
}
-static void print_binder_proc(struct seq_file *m,
- struct binder_proc *proc, int print_all)
+/**
+ * print_next_binder_node_ilocked() - Print binder_node from a locked list
+ * @m: struct seq_file for output via seq_printf()
+ * @proc: struct binder_proc we hold the inner_proc_lock to (if any)
+ * @node: struct binder_node to print fields of
+ * @prev_node: struct binder_node we hold a temporary reference to (if any)
+ * @hash_ptrs: whether to hash @node's binder_uintptr_t fields
+ *
+ * Helper function to handle synchronization around printing a struct
+ * binder_node while iterating through @proc->nodes or the dead nodes list.
+ * Caller must hold either @proc->inner_lock (for live nodes) or
+ * binder_dead_nodes_lock. This lock will be released during the body of this
+ * function, but it will be reacquired before returning to the caller.
+ *
+ * Return: pointer to the struct binder_node we hold a tmpref on
+ */
+static struct binder_node *
+print_next_binder_node_ilocked(struct seq_file *m, struct binder_proc *proc,
+ struct binder_node *node,
+ struct binder_node *prev_node, bool hash_ptrs)
+{
+ /*
+ * Take a temporary reference on the node so that isn't freed while
+ * we print it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ /*
+ * Live nodes need to drop the inner proc lock and dead nodes need to
+ * drop the binder_dead_nodes_lock before trying to take the node lock.
+ */
+ if (proc)
+ binder_inner_proc_unlock(proc);
+ else
+ spin_unlock(&binder_dead_nodes_lock);
+ if (prev_node)
+ binder_put_node(prev_node);
+ binder_node_inner_lock(node);
+ print_binder_node_nilocked(m, node, hash_ptrs);
+ binder_node_inner_unlock(node);
+ if (proc)
+ binder_inner_proc_lock(proc);
+ else
+ spin_lock(&binder_dead_nodes_lock);
+ return node;
+}
+
+static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
+ bool print_all, bool hash_ptrs)
{
struct binder_work *w;
struct rb_node *n;
@@ -6519,31 +6577,19 @@ static void print_binder_proc(struct seq_file *m,
header_pos = m->count;
binder_inner_proc_lock(proc);
- for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->threads); n; n = rb_next(n))
print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
- rb_node), print_all);
+ rb_node), print_all, hash_ptrs);
- for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+ for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
if (!print_all && !node->has_async_transaction)
continue;
- /*
- * take a temporary reference on the node so it
- * survives and isn't removed from the tree
- * while we print it.
- */
- binder_inc_node_tmpref_ilocked(node);
- /* Need to drop inner lock to take node lock */
- binder_inner_proc_unlock(proc);
- if (last_node)
- binder_put_node(last_node);
- binder_node_inner_lock(node);
- print_binder_node_nilocked(m, node);
- binder_node_inner_unlock(node);
- last_node = node;
- binder_inner_proc_lock(proc);
+ last_node = print_next_binder_node_ilocked(m, proc, node,
+ last_node,
+ hash_ptrs);
}
binder_inner_proc_unlock(proc);
if (last_node)
@@ -6551,19 +6597,18 @@ static void print_binder_proc(struct seq_file *m,
if (print_all) {
binder_proc_lock(proc);
- for (n = rb_first(&proc->refs_by_desc);
- n != NULL;
- n = rb_next(n))
+ for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n))
print_binder_ref_olocked(m, rb_entry(n,
- struct binder_ref,
- rb_node_desc));
+ struct binder_ref,
+ rb_node_desc));
binder_proc_unlock(proc);
}
binder_alloc_print_allocated(m, &proc->alloc);
binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry)
print_binder_work_ilocked(m, proc, " ",
- " pending transaction", w);
+ " pending transaction", w,
+ hash_ptrs);
list_for_each_entry(w, &proc->delivered_death, entry) {
seq_puts(m, " has delivered dead binder\n");
break;
@@ -6696,7 +6741,7 @@ static void print_binder_proc_stats(struct seq_file *m,
count = 0;
ready_threads = 0;
binder_inner_proc_lock(proc);
- for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->threads); n; n = rb_next(n))
count++;
list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
@@ -6710,7 +6755,7 @@ static void print_binder_proc_stats(struct seq_file *m,
ready_threads,
free_async_space);
count = 0;
- for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->nodes); n; n = rb_next(n))
count++;
binder_inner_proc_unlock(proc);
seq_printf(m, " nodes: %d\n", count);
@@ -6718,7 +6763,7 @@ static void print_binder_proc_stats(struct seq_file *m,
strong = 0;
weak = 0;
binder_proc_lock(proc);
- for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+ for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
struct binder_ref *ref = rb_entry(n, struct binder_ref,
rb_node_desc);
count++;
@@ -6745,7 +6790,7 @@ static void print_binder_proc_stats(struct seq_file *m,
print_binder_stats(m, " ", &proc->stats);
}
-static int state_show(struct seq_file *m, void *unused)
+static void print_binder_state(struct seq_file *m, bool hash_ptrs)
{
struct binder_proc *proc;
struct binder_node *node;
@@ -6756,31 +6801,40 @@ static int state_show(struct seq_file *m, void *unused)
spin_lock(&binder_dead_nodes_lock);
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
- hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
- /*
- * take a temporary reference on the node so it
- * survives and isn't removed from the list
- * while we print it.
- */
- node->tmp_refs++;
- spin_unlock(&binder_dead_nodes_lock);
- if (last_node)
- binder_put_node(last_node);
- binder_node_lock(node);
- print_binder_node_nilocked(m, node);
- binder_node_unlock(node);
- last_node = node;
- spin_lock(&binder_dead_nodes_lock);
- }
+ hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
+ last_node = print_next_binder_node_ilocked(m, NULL, node,
+ last_node,
+ hash_ptrs);
spin_unlock(&binder_dead_nodes_lock);
if (last_node)
binder_put_node(last_node);
mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
- print_binder_proc(m, proc, 1);
+ print_binder_proc(m, proc, true, hash_ptrs);
+ mutex_unlock(&binder_procs_lock);
+}
+
+static void print_binder_transactions(struct seq_file *m, bool hash_ptrs)
+{
+ struct binder_proc *proc;
+
+ seq_puts(m, "binder transactions:\n");
+ mutex_lock(&binder_procs_lock);
+ hlist_for_each_entry(proc, &binder_procs, proc_node)
+ print_binder_proc(m, proc, false, hash_ptrs);
mutex_unlock(&binder_procs_lock);
+}
+
+static int state_show(struct seq_file *m, void *unused)
+{
+ print_binder_state(m, false);
+ return 0;
+}
+static int state_hashed_show(struct seq_file *m, void *unused)
+{
+ print_binder_state(m, true);
return 0;
}
@@ -6802,14 +6856,13 @@ static int stats_show(struct seq_file *m, void *unused)
static int transactions_show(struct seq_file *m, void *unused)
{
- struct binder_proc *proc;
-
- seq_puts(m, "binder transactions:\n");
- mutex_lock(&binder_procs_lock);
- hlist_for_each_entry(proc, &binder_procs, proc_node)
- print_binder_proc(m, proc, 0);
- mutex_unlock(&binder_procs_lock);
+ print_binder_transactions(m, false);
+ return 0;
+}
+static int transactions_hashed_show(struct seq_file *m, void *unused)
+{
+ print_binder_transactions(m, true);
return 0;
}
@@ -6822,7 +6875,7 @@ static int proc_show(struct seq_file *m, void *unused)
hlist_for_each_entry(itr, &binder_procs, proc_node) {
if (itr->pid == pid) {
seq_puts(m, "binder proc state:\n");
- print_binder_proc(m, itr, 1);
+ print_binder_proc(m, itr, true, false);
}
}
mutex_unlock(&binder_procs_lock);
@@ -6889,8 +6942,10 @@ const struct file_operations binder_fops = {
};
DEFINE_SHOW_ATTRIBUTE(state);
+DEFINE_SHOW_ATTRIBUTE(state_hashed);
DEFINE_SHOW_ATTRIBUTE(stats);
DEFINE_SHOW_ATTRIBUTE(transactions);
+DEFINE_SHOW_ATTRIBUTE(transactions_hashed);
DEFINE_SHOW_ATTRIBUTE(transaction_log);
const struct binder_debugfs_entry binder_debugfs_entries[] = {
@@ -6901,6 +6956,12 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
.data = NULL,
},
{
+ .name = "state_hashed",
+ .mode = 0444,
+ .fops = &state_hashed_fops,
+ .data = NULL,
+ },
+ {
.name = "stats",
.mode = 0444,
.fops = &stats_fops,
@@ -6913,6 +6974,12 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
.data = NULL,
},
{
+ .name = "transactions_hashed",
+ .mode = 0444,
+ .fops = &transactions_hashed_fops,
+ .data = NULL,
+ },
+ {
.name = "transaction_log",
.mode = 0444,
.fops = &transaction_log_fops,
@@ -6929,7 +6996,16 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
void binder_add_device(struct binder_device *device)
{
+ spin_lock(&binder_devices_lock);
hlist_add_head(&device->hlist, &binder_devices);
+ spin_unlock(&binder_devices_lock);
+}
+
+void binder_remove_device(struct binder_device *device)
+{
+ spin_lock(&binder_devices_lock);
+ hlist_del_init(&device->hlist);
+ spin_unlock(&binder_devices_lock);
}
static int __init init_binder_device(const char *name)
@@ -6956,7 +7032,7 @@ static int __init init_binder_device(const char *name)
return ret;
}
- hlist_add_head(&binder_device->hlist, &binder_devices);
+ binder_add_device(binder_device);
return ret;
}
@@ -7018,7 +7094,7 @@ static int __init binder_init(void)
err_init_binder_device_failed:
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
misc_deregister(&device->miscdev);
- hlist_del(&device->hlist);
+ binder_remove_device(device);
kfree(device);
}
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 6a66c9769c6c..1ba5caf1d88d 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -583,9 +583,13 @@ struct binder_object {
/**
* Add a binder device to binder_devices
* @device: the new binder device to add to the global list
- *
- * Not reentrant as the list is not protected by any locks
*/
void binder_add_device(struct binder_device *device);
+/**
+ * Remove a binder device to binder_devices
+ * @device: the binder device to remove from the global list
+ */
+void binder_remove_device(struct binder_device *device);
+
#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 94c6446604fc..024275dbfdd8 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -187,7 +187,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
inode_lock(d_inode(root));
/* look it up */
- dentry = lookup_one_len(name, root, name_len);
+ dentry = lookup_noperm(&QSTR(name), root);
if (IS_ERR(dentry)) {
inode_unlock(d_inode(root));
ret = PTR_ERR(dentry);
@@ -274,7 +274,7 @@ static void binderfs_evict_inode(struct inode *inode)
mutex_unlock(&binderfs_minors_mutex);
if (refcount_dec_and_test(&device->ref)) {
- hlist_del_init(&device->hlist);
+ binder_remove_device(device);
kfree(device->context.name);
kfree(device);
}
@@ -487,7 +487,7 @@ static struct dentry *binderfs_create_dentry(struct dentry *parent,
{
struct dentry *dentry;
- dentry = lookup_one_len(name, parent, strlen(name));
+ dentry = lookup_noperm(&QSTR(name), parent);
if (IS_ERR(dentry))
return dentry;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 163ac909bd06..e5e5c2e81d09 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1410,8 +1410,15 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
static bool ahci_broken_lpm(struct pci_dev *pdev)
{
+ /*
+ * Platforms with LPM problems.
+ * If driver_data is NULL, there is no existing BIOS version with
+ * functioning LPM.
+ * If driver_data is non-NULL, then driver_data contains the DMI BIOS
+ * build date of the first BIOS version with functioning LPM (i.e. older
+ * BIOS versions have broken LPM).
+ */
static const struct dmi_system_id sysids[] = {
- /* Various Lenovo 50 series have LPM issues with older BIOSen */
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -1438,13 +1445,30 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
},
+ .driver_data = "20180409", /* 2.35 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ASUSPRO D840MB_M840SA"),
+ },
+ /* 320 is broken, there is no known good version. */
+ },
+ {
/*
- * Note date based on release notes, 2.35 has been
- * reported to be good, but I've been unable to get
- * a hold of the reporter to get the DMI BIOS date.
- * TODO: fix this.
+ * AMD 500 Series Chipset SATA Controller [1022:43eb]
+ * on this motherboard timeouts on ports 5 and 6 when
+ * LPM is enabled, at least with WDC WD20EFAX-68FB5N0
+ * hard drives. LPM with the same drive works fine on
+ * all other ports on the same controller.
*/
- .driver_data = "20180310", /* 2.35 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR,
+ "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME,
+ "ROG STRIX B550-F GAMING (WI-FI)"),
+ },
+ /* 3621 is broken, there is no known good version. */
},
{ } /* terminate list */
};
@@ -1455,6 +1479,9 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
if (!dmi)
return false;
+ if (!dmi->driver_data)
+ return true;
+
dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 22afa4ff860d..4e9c82f36df1 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1034,7 +1034,7 @@ static void ahci_sw_activity(struct ata_link *link)
static void ahci_sw_activity_blink(struct timer_list *t)
{
- struct ahci_em_priv *emp = from_timer(emp, t, timer);
+ struct ahci_em_priv *emp = timer_container_of(emp, t, timer);
struct ata_link *link = emp->link;
struct ata_port *ap = link->ap;
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index b7f0bf795521..f2140fc06ba0 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -514,15 +514,19 @@ unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask);
/**
- * ata_acpi_cbl_80wire - Check for 80 wire cable
+ * ata_acpi_cbl_pata_type - Return PATA cable type
* @ap: Port to check
- * @gtm: GTM data to use
*
- * Return 1 if the @gtm indicates the BIOS selected an 80wire mode.
+ * Return ATA_CBL_PATA* according to the transfer mode selected by BIOS
*/
-int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
+int ata_acpi_cbl_pata_type(struct ata_port *ap)
{
struct ata_device *dev;
+ int ret = ATA_CBL_PATA_UNK;
+ const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
+
+ if (!gtm)
+ return ATA_CBL_PATA40;
ata_for_each_dev(dev, &ap->link, ENABLED) {
unsigned int xfer_mask, udma_mask;
@@ -530,13 +534,17 @@ int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
xfer_mask = ata_acpi_gtm_xfermask(dev, gtm);
ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask);
- if (udma_mask & ~ATA_UDMA_MASK_40C)
- return 1;
+ ret = ATA_CBL_PATA40;
+
+ if (udma_mask & ~ATA_UDMA_MASK_40C) {
+ ret = ATA_CBL_PATA80;
+ break;
+ }
}
- return 0;
+ return ret;
}
-EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire);
+EXPORT_SYMBOL_GPL(ata_acpi_cbl_pata_type);
static void ata_acpi_gtf_to_tf(struct ata_device *dev,
const struct ata_acpi_gtf *gtf,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 773799cfd443..79b20da0a256 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6682,12 +6682,6 @@ const struct ata_port_info ata_dummy_port_info = {
};
EXPORT_SYMBOL_GPL(ata_dummy_port_info);
-void ata_print_version(const struct device *dev, const char *version)
-{
- dev_printk(KERN_DEBUG, dev, "version %s\n", version);
-}
-EXPORT_SYMBOL(ata_print_version);
-
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index b990c1ee0b12..1f90fcd2b3c4 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -860,7 +860,7 @@ static unsigned int ata_eh_nr_in_flight(struct ata_port *ap)
void ata_eh_fastdrain_timerfn(struct timer_list *t)
{
- struct ata_port *ap = from_timer(ap, t, fastdrain_timer);
+ struct ata_port *ap = timer_container_of(ap, t, fastdrain_timer);
unsigned long flags;
unsigned int cnt;
@@ -3432,7 +3432,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
struct ata_eh_context *ehc = &link->eh_context;
struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
enum ata_lpm_policy old_policy = link->lpm_policy;
- bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
+ bool host_has_dipm = !(link->ap->flags & ATA_FLAG_NO_DIPM);
unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
unsigned int err_mask;
int rc;
@@ -3443,28 +3443,35 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
return 0;
/*
- * DIPM is enabled only for MIN_POWER as some devices
- * misbehave when the host NACKs transition to SLUMBER. Order
- * device and link configurations such that the host always
- * allows DIPM requests.
+ * This function currently assumes that it will never be supplied policy
+ * ATA_LPM_UNKNOWN.
+ */
+ if (WARN_ON_ONCE(policy == ATA_LPM_UNKNOWN))
+ return 0;
+
+ /*
+ * DIPM is enabled only for ATA_LPM_MIN_POWER,
+ * ATA_LPM_MIN_POWER_WITH_PARTIAL, and ATA_LPM_MED_POWER_WITH_DIPM, as
+ * some devices misbehave when the host NACKs transition to SLUMBER.
*/
ata_for_each_dev(dev, link, ENABLED) {
- bool hipm = ata_id_has_hipm(dev->id);
- bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
+ bool dev_has_hipm = ata_id_has_hipm(dev->id);
+ bool dev_has_dipm = ata_id_has_dipm(dev->id);
/* find the first enabled and LPM enabled devices */
if (!link_dev)
link_dev = dev;
- if (!lpm_dev && (hipm || dipm))
+ if (!lpm_dev &&
+ (dev_has_hipm || (dev_has_dipm && host_has_dipm)))
lpm_dev = dev;
hints &= ~ATA_LPM_EMPTY;
- if (!hipm)
+ if (!dev_has_hipm)
hints &= ~ATA_LPM_HIPM;
/* disable DIPM before changing link config */
- if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) {
+ if (dev_has_dipm) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_DISABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
@@ -3505,10 +3512,16 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
if (ap && ap->slave_link)
ap->slave_link->lpm_policy = policy;
- /* host config updated, enable DIPM if transitioning to MIN_POWER */
+ /*
+ * Host config updated, enable DIPM if transitioning to
+ * ATA_LPM_MIN_POWER, ATA_LPM_MIN_POWER_WITH_PARTIAL, or
+ * ATA_LPM_MED_POWER_WITH_DIPM.
+ */
ata_for_each_dev(dev, link, ENABLED) {
- if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm &&
- ata_id_has_dipm(dev->id)) {
+ bool dev_has_dipm = ata_id_has_dipm(dev->id);
+
+ if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && host_has_dipm &&
+ dev_has_dipm) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_ENABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 2e4463d3a356..cb46ce276bb1 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -1509,9 +1509,10 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
struct ata_queued_cmd *qc;
unsigned int err_mask, tag;
u8 *sense, sk = 0, asc = 0, ascq = 0;
- u64 sense_valid, val;
u16 extended_sense;
bool aux_icc_valid;
+ u32 sense_valid;
+ u64 val;
int ret = 0;
err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2);
@@ -1529,8 +1530,7 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
return -EIO;
}
- sense_valid = (u64)buf[8] | ((u64)buf[9] << 8) |
- ((u64)buf[10] << 16) | ((u64)buf[11] << 24);
+ sense_valid = get_unaligned_le32(&buf[8]);
extended_sense = get_unaligned_le16(&buf[14]);
aux_icc_valid = extended_sense & BIT(15);
@@ -1545,7 +1545,7 @@ int ata_eh_get_ncq_success_sense(struct ata_link *link)
* If the command does not have any sense data, clear ATA_SENSE.
* Keep ATA_QCFLAG_EH_SUCCESS_CMD so that command is finished.
*/
- if (!(sense_valid & (1ULL << tag))) {
+ if (!(sense_valid & BIT(tag))) {
qc->result_tf.status &= ~ATA_SENSE;
continue;
}
@@ -1634,7 +1634,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
return;
}
- if (!(link->sactive & (1 << tag))) {
+ if (!(link->sactive & BIT(tag))) {
ata_link_err(link, "log page 10h reported inactive tag %d\n",
tag);
return;
@@ -1659,8 +1659,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
if (ata_scsi_sense_is_valid(sense_key, asc, ascq)) {
ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc,
ascq);
- ata_scsi_set_sense_information(dev, qc->scsicmd,
- &qc->result_tf);
qc->flags |= ATA_QCFLAG_SENSE_VALID;
}
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index c0eb8c67a9ff..a21c9895408d 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -216,17 +216,21 @@ void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
scsi_build_sense(cmd, d_sense, sk, asc, ascq);
}
-void ata_scsi_set_sense_information(struct ata_device *dev,
- struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf)
+static void ata_scsi_set_sense_information(struct ata_queued_cmd *qc)
{
u64 information;
- information = ata_tf_read_block(tf, dev);
+ if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
+ ata_dev_dbg(qc->dev,
+ "missing result TF: can't set INFORMATION sense field\n");
+ return;
+ }
+
+ information = ata_tf_read_block(&qc->result_tf, qc->dev);
if (information == U64_MAX)
return;
- scsi_set_sense_information(cmd->sense_buffer,
+ scsi_set_sense_information(qc->scsicmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, information);
}
@@ -971,8 +975,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
* ata_gen_ata_sense - generate a SCSI fixed sense block
* @qc: Command that we are erroring out
*
- * Generate sense block for a failed ATA command @qc. Descriptor
- * format is used to accommodate LBA48 block address.
+ * Generate sense block for a failed ATA command @qc.
*
* LOCKING:
* None.
@@ -982,8 +985,6 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
struct ata_device *dev = qc->dev;
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
- unsigned char *sb = cmd->sense_buffer;
- u64 block;
u8 sense_key, asc, ascq;
if (ata_dev_disabled(dev)) {
@@ -1014,12 +1015,6 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
return;
}
-
- block = ata_tf_read_block(&qc->result_tf, dev);
- if (block == U64_MAX)
- return;
-
- scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block);
}
void ata_scsi_sdev_config(struct scsi_device *sdev)
@@ -1679,8 +1674,10 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
ata_scsi_set_passthru_sense_fields(qc);
if (is_ck_cond_request)
set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
- } else if (is_error && !have_sense) {
- ata_gen_ata_sense(qc);
+ } else if (is_error) {
+ if (!have_sense)
+ ata_gen_ata_sense(qc);
+ ata_scsi_set_sense_information(qc);
}
ata_qc_done(qc);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 0337be4faec7..ce5c628fa6fd 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -141,9 +141,6 @@ extern int ata_scsi_offline_dev(struct ata_device *dev);
extern bool ata_scsi_sense_is_valid(u8 sk, u8 asc, u8 ascq);
extern void ata_scsi_set_sense(struct ata_device *dev,
struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct ata_device *dev,
- struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_scsi_dev_rescan(struct work_struct *work);
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index b811efd2cc34..73e81e160c91 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -27,7 +27,7 @@
#include <scsi/scsi_host.h>
#include <linux/dmi.h>
-#ifdef CONFIG_X86_32
+#if defined(CONFIG_X86) && defined(CONFIG_X86_32)
#include <asm/msr.h>
static int use_msr;
module_param_named(msr, use_msr, int, 0644);
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index fbf5f07ea357..f7a933eefe05 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -1298,7 +1298,7 @@ static int pata_macio_pci_attach(struct pci_dev *pdev,
priv->dev = &pdev->dev;
/* Get MMIO regions */
- if (pci_request_regions(pdev, "pata-macio")) {
+ if (pcim_request_all_regions(pdev, "pata-macio")) {
dev_err(&pdev->dev,
"Cannot obtain PCI resources\n");
return -EBUSY;
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 696b99720dcb..bb80e7800dcb 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -201,11 +201,9 @@ static int via_cable_detect(struct ata_port *ap) {
two drives */
if (ata66 & (0x10100000 >> (16 * ap->port_no)))
return ATA_CBL_PATA80;
+
/* Check with ACPI so we can spot BIOS reported SATA bridges */
- if (ata_acpi_init_gtm(ap) &&
- ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap)))
- return ATA_CBL_PATA80;
- return ATA_CBL_PATA40;
+ return ata_acpi_cbl_pata_type(ap);
}
static int via_pre_reset(struct ata_link *link, unsigned long deadline)
@@ -368,7 +366,8 @@ static unsigned int via_mode_filter(struct ata_device *dev, unsigned int mask)
}
if (dev->class == ATA_DEV_ATAPI &&
- dmi_check_system(no_atapi_dma_dmi_table)) {
+ (dmi_check_system(no_atapi_dma_dmi_table) ||
+ config->id == PCI_DEVICE_ID_VIA_6415)) {
ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n");
mask &= ATA_MASK_PIO;
}
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index c3042eca6332..f7f5131af937 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1301,32 +1301,32 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
}
if (dimm_test) {
- u8 test_parttern1[40] =
+ u8 test_pattern1[40] =
{0x55,0xAA,'P','r','o','m','i','s','e',' ',
'N','o','t',' ','Y','e','t',' ',
'D','e','f','i','n','e','d',' ',
'1','.','1','0',
'9','8','0','3','1','6','1','2',0,0};
- u8 test_parttern2[40] = {0};
+ u8 test_pattern2[40] = {0};
- pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
- pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
+ pdc20621_put_to_dimm(host, test_pattern2, 0x10040, 40);
+ pdc20621_put_to_dimm(host, test_pattern2, 0x40, 40);
- pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
- pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
- dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
- pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
+ pdc20621_put_to_dimm(host, test_pattern1, 0x10040, 40);
+ pdc20621_get_from_dimm(host, test_pattern2, 0x40, 40);
+ dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
+ pdc20621_get_from_dimm(host, test_pattern2, 0x10040,
40);
dev_info(host->dev, "DIMM test pattern 2: %x, %x, %s\n",
- test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
+ test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
- pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
- pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
+ pdc20621_put_to_dimm(host, test_pattern1, 0x40, 40);
+ pdc20621_get_from_dimm(host, test_pattern2, 0x40, 40);
dev_info(host->dev, "DIMM test pattern 3: %x, %x, %s\n",
- test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
+ test_pattern2[0],
+ test_pattern2[1], &(test_pattern2[2]));
}
/* ECC initiliazation. */
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index d4aa0f353b6c..eeae160c898d 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -288,7 +288,9 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
struct sk_buff *new_skb;
int result = 0;
- if (!skb->len) return 0;
+ if (skb->len < sizeof(struct atmtcp_hdr))
+ goto done;
+
dev = vcc->dev_data;
hdr = (struct atmtcp_hdr *) skb->data;
if (hdr->length == ATMTCP_HDR_MAGIC) {
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index a876024d8a05..1206ab764ba9 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1531,7 +1531,7 @@ idt77252_tx(struct idt77252_dev *card)
static void
tst_timer(struct timer_list *t)
{
- struct idt77252_dev *card = from_timer(card, t, tst_timer);
+ struct idt77252_dev *card = timer_container_of(card, t, tst_timer);
unsigned long base, idle, jump;
unsigned long flags;
u32 pc;
@@ -2070,7 +2070,7 @@ idt77252_rate_logindex(struct idt77252_dev *card, int pcr)
static void
idt77252_est_timer(struct timer_list *t)
{
- struct rate_estimator *est = from_timer(est, t, timer);
+ struct rate_estimator *est = timer_container_of(est, t, timer);
struct vc_map *vc = est->vc;
struct idt77252_dev *card = vc->card;
unsigned long flags;
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 00fe25b5b6a3..2a1fe3080712 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -1758,7 +1758,7 @@ static void iter_dequeue(struct lanai_dev *lanai, vci_t vci)
static void lanai_timed_poll(struct timer_list *t)
{
- struct lanai_dev *lanai = from_timer(lanai, t, timer);
+ struct lanai_dev *lanai = timer_container_of(lanai, t, timer);
#ifndef DEBUG_RW
unsigned long flags;
#ifdef USE_POWERDOWN
diff --git a/drivers/auxdisplay/line-display.c b/drivers/auxdisplay/line-display.c
index b6808c4f89b6..8590a4cd21e0 100644
--- a/drivers/auxdisplay/line-display.c
+++ b/drivers/auxdisplay/line-display.c
@@ -40,7 +40,7 @@
*/
static void linedisp_scroll(struct timer_list *t)
{
- struct linedisp *linedisp = from_timer(linedisp, t, timer);
+ struct linedisp *linedisp = timer_container_of(linedisp, t, timer);
unsigned int i, ch = linedisp->scroll_pos;
unsigned int num_chars = linedisp->num_chars;
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index af0029d30dbe..1037169abb45 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -154,14 +154,6 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
per_cpu(arch_freq_scale, i) = scale;
}
-DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
-EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
-
-void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
-{
- per_cpu(cpu_scale, cpu) = capacity;
-}
-
DEFINE_PER_CPU(unsigned long, hw_pressure);
/**
@@ -207,53 +199,9 @@ void topology_update_hw_pressure(const struct cpumask *cpus,
}
EXPORT_SYMBOL_GPL(topology_update_hw_pressure);
-static ssize_t cpu_capacity_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
-
- return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
-}
-
static void update_topology_flags_workfn(struct work_struct *work);
static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
-static DEVICE_ATTR_RO(cpu_capacity);
-
-static int cpu_capacity_sysctl_add(unsigned int cpu)
-{
- struct device *cpu_dev = get_cpu_device(cpu);
-
- if (!cpu_dev)
- return -ENOENT;
-
- device_create_file(cpu_dev, &dev_attr_cpu_capacity);
-
- return 0;
-}
-
-static int cpu_capacity_sysctl_remove(unsigned int cpu)
-{
- struct device *cpu_dev = get_cpu_device(cpu);
-
- if (!cpu_dev)
- return -ENOENT;
-
- device_remove_file(cpu_dev, &dev_attr_cpu_capacity);
-
- return 0;
-}
-
-static int register_cpu_capacity_sysctl(void)
-{
- cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
- cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove);
-
- return 0;
-}
-subsys_initcall(register_cpu_capacity_sysctl);
-
static int update_topology;
int topology_update_cpu_topology(void)
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 95717d509ca9..dba7c8e13a53 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -395,6 +395,114 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv)
}
EXPORT_SYMBOL_GPL(auxiliary_driver_unregister);
+static void auxiliary_device_release(struct device *dev)
+{
+ struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
+
+ kfree(auxdev);
+}
+
+/**
+ * auxiliary_device_create - create a device on the auxiliary bus
+ * @dev: parent device
+ * @modname: module name used to create the auxiliary driver name.
+ * @devname: auxiliary bus device name
+ * @platform_data: auxiliary bus device platform data
+ * @id: auxiliary bus device id
+ *
+ * Helper to create an auxiliary bus device.
+ * The device created matches driver 'modname.devname' on the auxiliary bus.
+ */
+struct auxiliary_device *auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id)
+{
+ struct auxiliary_device *auxdev;
+ int ret;
+
+ auxdev = kzalloc(sizeof(*auxdev), GFP_KERNEL);
+ if (!auxdev)
+ return NULL;
+
+ auxdev->id = id;
+ auxdev->name = devname;
+ auxdev->dev.parent = dev;
+ auxdev->dev.platform_data = platform_data;
+ auxdev->dev.release = auxiliary_device_release;
+ device_set_of_node_from_dev(&auxdev->dev, dev);
+
+ ret = auxiliary_device_init(auxdev);
+ if (ret) {
+ kfree(auxdev);
+ return NULL;
+ }
+
+ ret = __auxiliary_device_add(auxdev, modname);
+ if (ret) {
+ /*
+ * It may look odd but auxdev should not be freed here.
+ * auxiliary_device_uninit() calls device_put() which call
+ * the device release function, freeing auxdev.
+ */
+ auxiliary_device_uninit(auxdev);
+ return NULL;
+ }
+
+ return auxdev;
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_create);
+
+/**
+ * auxiliary_device_destroy - remove an auxiliary device
+ * @auxdev: pointer to the auxdev to be removed
+ *
+ * Helper to remove an auxiliary device created with
+ * auxiliary_device_create()
+ */
+void auxiliary_device_destroy(void *auxdev)
+{
+ struct auxiliary_device *_auxdev = auxdev;
+
+ auxiliary_device_delete(_auxdev);
+ auxiliary_device_uninit(_auxdev);
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_destroy);
+
+/**
+ * __devm_auxiliary_device_create - create a managed device on the auxiliary bus
+ * @dev: parent device
+ * @modname: module name used to create the auxiliary driver name.
+ * @devname: auxiliary bus device name
+ * @platform_data: auxiliary bus device platform data
+ * @id: auxiliary bus device id
+ *
+ * Device managed helper to create an auxiliary bus device.
+ * The device created matches driver 'modname.devname' on the auxiliary bus.
+ */
+struct auxiliary_device *__devm_auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id)
+{
+ struct auxiliary_device *auxdev;
+ int ret;
+
+ auxdev = auxiliary_device_create(dev, modname, devname, platform_data, id);
+ if (!auxdev)
+ return NULL;
+
+ ret = devm_add_action_or_reset(dev, auxiliary_device_destroy,
+ auxdev);
+ if (ret)
+ return NULL;
+
+ return auxdev;
+}
+EXPORT_SYMBOL_GPL(__devm_auxiliary_device_create);
+
void __init auxiliary_bus_init(void)
{
WARN_ON(bus_register(&auxiliary_bus_type));
diff --git a/drivers/base/component.c b/drivers/base/component.c
index abe60eb45c55..024ad9471b8a 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -586,7 +586,8 @@ EXPORT_SYMBOL_GPL(component_master_is_bound);
static void component_unbind(struct component *component,
struct aggregate_device *adev, void *data)
{
- WARN_ON(!component->bound);
+ if (WARN_ON(!component->bound))
+ return;
dev_dbg(adev->parent, "unbinding %s component %p (ops %ps)\n",
dev_name(component->dev), component, component->ops);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index a7e511849875..7779ab0ca7ce 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -600,6 +600,8 @@ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds);
CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
CPU_SHOW_VULN_FALLBACK(ghostwrite);
+CPU_SHOW_VULN_FALLBACK(old_microcode);
+CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -616,6 +618,8 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
+static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL);
+static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -633,6 +637,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_gather_data_sampling.attr,
&dev_attr_reg_file_data_sampling.attr,
&dev_attr_ghostwrite.attr,
+ &dev_attr_old_microcode.attr,
+ &dev_attr_indirect_target_selection.attr,
NULL
};
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index d8a733ea5e1a..ff55e1bcfa30 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -759,6 +759,17 @@ int __devm_add_action(struct device *dev, void (*action)(void *), void *data, co
}
EXPORT_SYMBOL_GPL(__devm_add_action);
+bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data)
+{
+ struct action_devres devres = {
+ .data = data,
+ .action = action,
+ };
+
+ return devres_find(dev, devm_action_release, devm_action_match, &devres);
+}
+EXPORT_SYMBOL_GPL(devm_is_action_added);
+
/**
* devm_remove_action_nowarn() - removes previously added custom action
* @dev: Device that owns the action
@@ -976,17 +987,10 @@ EXPORT_SYMBOL_GPL(devm_krealloc);
*/
char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
{
- size_t size;
- char *buf;
-
if (!s)
return NULL;
- size = strlen(s) + 1;
- buf = devm_kmalloc(dev, size, gfp);
- if (buf)
- memcpy(buf, s, size);
- return buf;
+ return devm_kmemdup(dev, s, strlen(s) + 1, gfp);
}
EXPORT_SYMBOL_GPL(devm_kstrdup);
diff --git a/drivers/base/faux.c b/drivers/base/faux.c
index 407c1d1aad50..f5fbda0a9a44 100644
--- a/drivers/base/faux.c
+++ b/drivers/base/faux.c
@@ -25,6 +25,7 @@
struct faux_object {
struct faux_device faux_dev;
const struct faux_device_ops *faux_ops;
+ const struct attribute_group **groups;
};
#define to_faux_object(dev) container_of_const(dev, struct faux_object, faux_dev.dev)
@@ -43,10 +44,21 @@ static int faux_probe(struct device *dev)
struct faux_object *faux_obj = to_faux_object(dev);
struct faux_device *faux_dev = &faux_obj->faux_dev;
const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
- int ret = 0;
+ int ret;
- if (faux_ops && faux_ops->probe)
+ if (faux_ops && faux_ops->probe) {
ret = faux_ops->probe(faux_dev);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Add groups after the probe succeeds to ensure resources are
+ * initialized correctly
+ */
+ ret = device_add_groups(dev, faux_obj->groups);
+ if (ret && faux_ops && faux_ops->remove)
+ faux_ops->remove(faux_dev);
return ret;
}
@@ -57,6 +69,8 @@ static void faux_remove(struct device *dev)
struct faux_device *faux_dev = &faux_obj->faux_dev;
const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
+ device_remove_groups(dev, faux_obj->groups);
+
if (faux_ops && faux_ops->remove)
faux_ops->remove(faux_dev);
}
@@ -72,6 +86,7 @@ static struct device_driver faux_driver = {
.name = "faux_driver",
.bus = &faux_bus_type,
.probe_type = PROBE_FORCE_SYNCHRONOUS,
+ .suppress_bind_attrs = true,
};
static void faux_device_release(struct device *dev)
@@ -124,8 +139,9 @@ struct faux_device *faux_device_create_with_groups(const char *name,
if (!faux_obj)
return NULL;
- /* Save off the callbacks so we can use them in the future */
+ /* Save off the callbacks and groups so we can use them in the future */
faux_obj->faux_ops = faux_ops;
+ faux_obj->groups = groups;
/* Initialize the device portion and register it with the driver core */
faux_dev = &faux_obj->faux_dev;
@@ -138,7 +154,6 @@ struct faux_device *faux_device_create_with_groups(const char *name,
else
dev->parent = &faux_bus_root;
dev->bus = &faux_bus_type;
- dev->groups = groups;
dev_set_name(dev, "%s", name);
ret = device_add(dev);
@@ -155,7 +170,7 @@ struct faux_device *faux_device_create_with_groups(const char *name,
* successful is almost impossible to determine by the caller.
*/
if (!dev->driver) {
- dev_err(dev, "probe did not succeed, tearing down the device\n");
+ dev_dbg(dev, "probe did not succeed, tearing down the device\n");
faux_device_destroy(faux_dev);
faux_dev = NULL;
}
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index a03701674265..752b9a9bea03 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -3,8 +3,7 @@ menu "Firmware loader"
config FW_LOADER
tristate "Firmware loading facility" if EXPERT
- select CRYPTO_HASH if FW_LOADER_DEBUG
- select CRYPTO_SHA256 if FW_LOADER_DEBUG
+ select CRYPTO_LIB_SHA256 if FW_LOADER_DEBUG
default y
help
This enables the firmware loading facility in the kernel. The kernel
@@ -28,7 +27,6 @@ config FW_LOADER
config FW_LOADER_DEBUG
bool "Log filenames and checksums for loaded firmware"
- depends on CRYPTO = FW_LOADER || CRYPTO=y
depends on DYNAMIC_DEBUG
depends on FW_LOADER
default FW_LOADER
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index cb0912ea3e62..44486b2c7172 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -806,41 +806,15 @@ static void fw_abort_batch_reqs(struct firmware *fw)
}
#if defined(CONFIG_FW_LOADER_DEBUG)
-#include <crypto/hash.h>
#include <crypto/sha2.h>
static void fw_log_firmware_info(const struct firmware *fw, const char *name, struct device *device)
{
- struct shash_desc *shash;
- struct crypto_shash *alg;
- u8 *sha256buf;
- char *outbuf;
+ u8 digest[SHA256_DIGEST_SIZE];
- alg = crypto_alloc_shash("sha256", 0, 0);
- if (IS_ERR(alg))
- return;
-
- sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
- outbuf = kmalloc(SHA256_BLOCK_SIZE + 1, GFP_KERNEL);
- shash = kmalloc(sizeof(*shash) + crypto_shash_descsize(alg), GFP_KERNEL);
- if (!sha256buf || !outbuf || !shash)
- goto out_free;
-
- shash->tfm = alg;
-
- if (crypto_shash_digest(shash, fw->data, fw->size, sha256buf) < 0)
- goto out_free;
-
- for (int i = 0; i < SHA256_DIGEST_SIZE; i++)
- sprintf(&outbuf[i * 2], "%02x", sha256buf[i]);
- outbuf[SHA256_BLOCK_SIZE] = 0;
- dev_dbg(device, "Loaded FW: %s, sha256: %s\n", name, outbuf);
-
-out_free:
- kfree(shash);
- kfree(outbuf);
- kfree(sha256buf);
- crypto_free_shash(alg);
+ sha256(fw->data, fw->size, digest);
+ dev_dbg(device, "Loaded FW: %s, sha256: %*phN\n",
+ name, SHA256_DIGEST_SIZE, digest);
}
#else
static void fw_log_firmware_info(const struct firmware *fw, const char *name,
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 19469e7f88c2..ed3e69dc785c 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -110,6 +110,57 @@ static void memory_block_release(struct device *dev)
kfree(mem);
}
+
+/* Max block size to be set by memory_block_advise_max_size */
+static unsigned long memory_block_advised_size;
+static bool memory_block_advised_size_queried;
+
+/**
+ * memory_block_advise_max_size() - advise memory hotplug on the max suggested
+ * block size, usually for alignment.
+ * @size: suggestion for maximum block size. must be aligned on power of 2.
+ *
+ * Early boot software (pre-allocator init) may advise archs on the max block
+ * size. This value can only decrease after initialization, as the intent is
+ * to identify the largest supported alignment for all sources.
+ *
+ * Use of this value is arch-defined, as is min/max block size.
+ *
+ * Return: 0 on success
+ * -EINVAL if size is 0 or not pow2 aligned
+ * -EBUSY if value has already been probed
+ */
+int __init memory_block_advise_max_size(unsigned long size)
+{
+ if (!size || !is_power_of_2(size))
+ return -EINVAL;
+
+ if (memory_block_advised_size_queried)
+ return -EBUSY;
+
+ if (memory_block_advised_size)
+ memory_block_advised_size = min(memory_block_advised_size, size);
+ else
+ memory_block_advised_size = size;
+
+ return 0;
+}
+
+/**
+ * memory_block_advised_max_size() - query advised max hotplug block size.
+ *
+ * After the first call, the value can never change. Callers looking for the
+ * actual block size should use memory_block_size_bytes. This interface is
+ * intended for use by arch-init when initializing the hotplug block size.
+ *
+ * Return: advised size in bytes, or 0 if never set.
+ */
+unsigned long memory_block_advised_max_size(void)
+{
+ memory_block_advised_size_queried = true;
+ return memory_block_advised_size;
+}
+
unsigned long __weak memory_block_size_bytes(void)
{
return MIN_MEMORY_BLOCK_SIZE;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index cd13ef287011..c19094481630 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memory.h>
+#include <linux/mempolicy.h>
#include <linux/vmstat.h>
#include <linux/notifier.h>
#include <linux/node.h>
@@ -214,6 +215,14 @@ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
break;
}
}
+
+ /* When setting CPU access coordinates, update mempolicy */
+ if (access == ACCESS_COORDINATE_CPU) {
+ if (mempolicy_set_node_perf(nid, coord)) {
+ pr_info("failed to set mempolicy attrs for node %d\n",
+ nid);
+ }
+ }
}
EXPORT_SYMBOL_GPL(node_set_perf_attrs);
@@ -468,7 +477,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_PAGETABLE)),
nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
nid, 0UL,
- nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
+ nid, 0UL,
nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
nid, K(sreclaimable +
node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 0e60dd650b5e..70db08f3ac6f 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -95,5 +95,6 @@ EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs);
void platform_device_msi_free_irqs_all(struct device *dev)
{
msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
+ msi_remove_device_irq_domain(dev, MSI_DEFAULT_DOMAIN);
}
EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index cfccf3ff36e7..075ec1d1b73a 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -982,7 +982,7 @@ struct platform_device * __init_or_module __platform_create_bundle(
struct platform_device *pdev;
int error;
- pdev = platform_device_alloc(driver->driver.name, -1);
+ pdev = platform_device_alloc(driver->driver.name, PLATFORM_DEVID_NONE);
if (!pdev) {
error = -ENOMEM;
goto err_out;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c8b0a9e29ed8..eebe699fdf4f 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -63,6 +63,7 @@ static LIST_HEAD(dpm_noirq_list);
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
+static DEFINE_MUTEX(async_wip_mtx);
static int async_error;
static const char *pm_verb(int event)
@@ -512,7 +513,7 @@ struct dpm_watchdog {
*/
static void dpm_watchdog_handler(struct timer_list *t)
{
- struct dpm_watchdog *wd = from_timer(wd, t, timer);
+ struct dpm_watchdog *wd = timer_container_of(wd, t, timer);
struct timer_list *timer = &wd->timer;
unsigned int time_left;
@@ -560,7 +561,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
struct timer_list *timer = &wd->timer;
timer_delete_sync(timer);
- destroy_timer_on_stack(timer);
+ timer_destroy_on_stack(timer);
}
#else
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
@@ -597,8 +598,11 @@ static bool is_async(struct device *dev)
&& !pm_trace_is_enabled();
}
-static bool dpm_async_fn(struct device *dev, async_func_t func)
+static bool __dpm_async(struct device *dev, async_func_t func)
{
+ if (dev->power.work_in_progress)
+ return true;
+
if (!is_async(dev))
return false;
@@ -611,14 +615,44 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
put_device(dev);
+ return false;
+}
+
+static bool dpm_async_fn(struct device *dev, async_func_t func)
+{
+ guard(mutex)(&async_wip_mtx);
+
+ return __dpm_async(dev, func);
+}
+
+static int dpm_async_with_cleanup(struct device *dev, void *fn)
+{
+ guard(mutex)(&async_wip_mtx);
+
+ if (!__dpm_async(dev, fn))
+ dev->power.work_in_progress = false;
+
+ return 0;
+}
+
+static void dpm_async_resume_children(struct device *dev, async_func_t func)
+{
/*
- * async_schedule_dev_nocall() above has returned false, so func() is
- * not running and it is safe to update power.work_in_progress without
- * extra synchronization.
+ * Prevent racing with dpm_clear_async_state() during initial list
+ * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and
+ * dpm_resume().
*/
- dev->power.work_in_progress = false;
+ guard(mutex)(&dpm_list_mtx);
- return false;
+ /*
+ * Start processing "async" children of the device unless it's been
+ * started already for them.
+ *
+ * This could have been done for the device's "async" consumers too, but
+ * they either need to wait for their parents or the processing has
+ * already started for them after their parents were processed.
+ */
+ device_for_each_child(dev, func, dpm_async_with_cleanup);
}
static void dpm_clear_async_state(struct device *dev)
@@ -627,6 +661,13 @@ static void dpm_clear_async_state(struct device *dev)
dev->power.work_in_progress = false;
}
+static bool dpm_root_device(struct device *dev)
+{
+ return !dev->parent;
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie);
+
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
@@ -710,6 +751,8 @@ Out:
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
}
+
+ dpm_async_resume_children(dev, async_resume_noirq);
}
static void async_resume_noirq(void *data, async_cookie_t cookie)
@@ -733,19 +776,20 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
/*
- * Trigger the resume of "async" devices upfront so they don't have to
- * wait for the "non-async" ones they don't depend on.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
dpm_clear_async_state(dev);
- dpm_async_fn(dev, async_resume_noirq);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume_noirq);
}
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
- if (!dev->power.work_in_progress) {
+ if (!dpm_async_fn(dev, async_resume_noirq)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -781,6 +825,8 @@ void dpm_resume_noirq(pm_message_t state)
device_wakeup_disarm_wake_irqs();
}
+static void async_resume_early(void *data, async_cookie_t cookie);
+
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
@@ -848,6 +894,8 @@ Out:
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async early" : " early", error);
}
+
+ dpm_async_resume_children(dev, async_resume_early);
}
static void async_resume_early(void *data, async_cookie_t cookie)
@@ -875,19 +923,20 @@ void dpm_resume_early(pm_message_t state)
mutex_lock(&dpm_list_mtx);
/*
- * Trigger the resume of "async" devices upfront so they don't have to
- * wait for the "non-async" ones they don't depend on.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
dpm_clear_async_state(dev);
- dpm_async_fn(dev, async_resume_early);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume_early);
}
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
- if (!dev->power.work_in_progress) {
+ if (!dpm_async_fn(dev, async_resume_early)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -919,6 +968,8 @@ void dpm_resume_start(pm_message_t state)
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
+static void async_resume(void *data, async_cookie_t cookie);
+
/**
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
@@ -941,6 +992,8 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
if (!dev->power.is_suspended)
goto Complete;
+ dev->power.is_suspended = false;
+
if (dev->power.direct_complete) {
/*
* Allow new children to be added under the device after this
@@ -1003,7 +1056,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
End:
error = dpm_run_callback(callback, dev, state, info);
- dev->power.is_suspended = false;
device_unlock(dev);
dpm_watchdog_clear(&wd);
@@ -1018,6 +1070,8 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
+
+ dpm_async_resume_children(dev, async_resume);
}
static void async_resume(void *data, async_cookie_t cookie)
@@ -1049,19 +1103,20 @@ void dpm_resume(pm_message_t state)
mutex_lock(&dpm_list_mtx);
/*
- * Trigger the resume of "async" devices upfront so they don't have to
- * wait for the "non-async" ones they don't depend on.
+ * Start processing "async" root devices upfront so they don't wait for
+ * the "sync" devices they don't depend on.
*/
list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
dpm_clear_async_state(dev);
- dpm_async_fn(dev, async_resume);
+ if (dpm_root_device(dev))
+ dpm_async_with_cleanup(dev, async_resume);
}
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
list_move_tail(&dev->power.entry, &dpm_prepared_list);
- if (!dev->power.work_in_progress) {
+ if (!dpm_async_fn(dev, async_resume)) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -1189,6 +1244,41 @@ EXPORT_SYMBOL_GPL(dpm_resume_end);
/*------------------------- Suspend routines -------------------------*/
+static bool dpm_leaf_device(struct device *dev)
+{
+ struct device *child;
+
+ lockdep_assert_held(&dpm_list_mtx);
+
+ child = device_find_any_child(dev);
+ if (child) {
+ put_device(child);
+
+ return false;
+ }
+
+ return true;
+}
+
+static void dpm_async_suspend_parent(struct device *dev, async_func_t func)
+{
+ guard(mutex)(&dpm_list_mtx);
+
+ /*
+ * If the device is suspended asynchronously and the parent's callback
+ * deletes both the device and the parent itself, the parent object may
+ * be freed while this function is running, so avoid that by checking
+ * if the device has been deleted already as the parent cannot be
+ * deleted before it.
+ */
+ if (!device_pm_initialized(dev))
+ return;
+
+ /* Start processing the device's parent if it is "async". */
+ if (dev->parent)
+ dpm_async_with_cleanup(dev->parent, func);
+}
+
/**
* resume_event - Return a "resume" message for given "suspend" sleep state.
* @sleep_state: PM message representing a sleep state.
@@ -1226,6 +1316,8 @@ static void dpm_superior_set_must_resume(struct device *dev)
device_links_read_unlock(idx);
}
+static void async_suspend_noirq(void *data, async_cookie_t cookie);
+
/**
* device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
@@ -1304,7 +1396,13 @@ Skip:
Complete:
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- return error;
+
+ if (error || async_error)
+ return error;
+
+ dpm_async_suspend_parent(dev, async_suspend_noirq);
+
+ return 0;
}
static void async_suspend_noirq(void *data, async_cookie_t cookie)
@@ -1318,6 +1416,7 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie)
static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
+ struct device *dev;
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
@@ -1327,12 +1426,21 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend_noirq);
+ }
+
while (!list_empty(&dpm_late_early_list)) {
- struct device *dev = to_device(dpm_late_early_list.prev);
+ dev = to_device(dpm_late_early_list.prev);
list_move(&dev->power.entry, &dpm_noirq_list);
- dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend_noirq))
continue;
@@ -1346,8 +1454,14 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
- if (error || async_error)
+ if (error || async_error) {
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice_init(&dpm_late_early_list, &dpm_noirq_list);
break;
+ }
}
mutex_unlock(&dpm_list_mtx);
@@ -1400,6 +1514,8 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev)
spin_unlock_irq(&parent->power.lock);
}
+static void async_suspend_late(void *data, async_cookie_t cookie);
+
/**
* device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
@@ -1476,7 +1592,13 @@ Skip:
Complete:
TRACE_SUSPEND(error);
complete_all(&dev->power.completion);
- return error;
+
+ if (error || async_error)
+ return error;
+
+ dpm_async_suspend_parent(dev, async_suspend_late);
+
+ return 0;
}
static void async_suspend_late(void *data, async_cookie_t cookie)
@@ -1494,6 +1616,7 @@ static void async_suspend_late(void *data, async_cookie_t cookie)
int dpm_suspend_late(pm_message_t state)
{
ktime_t starttime = ktime_get();
+ struct device *dev;
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
@@ -1505,12 +1628,21 @@ int dpm_suspend_late(pm_message_t state)
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend_late);
+ }
+
while (!list_empty(&dpm_suspended_list)) {
- struct device *dev = to_device(dpm_suspended_list.prev);
+ dev = to_device(dpm_suspended_list.prev);
list_move(&dev->power.entry, &dpm_late_early_list);
- dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend_late))
continue;
@@ -1524,8 +1656,14 @@ int dpm_suspend_late(pm_message_t state)
mutex_lock(&dpm_list_mtx);
- if (error || async_error)
+ if (error || async_error) {
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice_init(&dpm_suspended_list, &dpm_late_early_list);
break;
+ }
}
mutex_unlock(&dpm_list_mtx);
@@ -1614,6 +1752,8 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
device_links_read_unlock(idx);
}
+static void async_suspend(void *data, async_cookie_t cookie);
+
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
@@ -1743,7 +1883,13 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- return error;
+
+ if (error || async_error)
+ return error;
+
+ dpm_async_suspend_parent(dev, async_suspend);
+
+ return 0;
}
static void async_suspend(void *data, async_cookie_t cookie)
@@ -1761,6 +1907,7 @@ static void async_suspend(void *data, async_cookie_t cookie)
int dpm_suspend(pm_message_t state)
{
ktime_t starttime = ktime_get();
+ struct device *dev;
int error = 0;
trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
@@ -1774,12 +1921,21 @@ int dpm_suspend(pm_message_t state)
mutex_lock(&dpm_list_mtx);
+ /*
+ * Start processing "async" leaf devices upfront so they don't need to
+ * wait for the "sync" devices they don't depend on.
+ */
+ list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
+ dpm_clear_async_state(dev);
+ if (dpm_leaf_device(dev))
+ dpm_async_with_cleanup(dev, async_suspend);
+ }
+
while (!list_empty(&dpm_prepared_list)) {
- struct device *dev = to_device(dpm_prepared_list.prev);
+ dev = to_device(dpm_prepared_list.prev);
list_move(&dev->power.entry, &dpm_suspended_list);
- dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend))
continue;
@@ -1793,8 +1949,14 @@ int dpm_suspend(pm_message_t state)
mutex_lock(&dpm_list_mtx);
- if (error || async_error)
+ if (error || async_error) {
+ /*
+ * Move all devices to the target list to resume them
+ * properly.
+ */
+ list_splice_init(&dpm_prepared_list, &dpm_suspended_list);
break;
+ }
}
mutex_unlock(&dpm_list_mtx);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 0e127b0329c0..c55a7c70bc1a 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1011,7 +1011,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
* If 'expires' is after the current time, we've been called
* too early.
*/
- if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
+ if (expires > 0 && expires <= ktime_get_mono_fast_ns()) {
dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ?
(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
@@ -1568,6 +1568,32 @@ out:
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
+static void pm_runtime_set_suspended_action(void *data)
+{
+ pm_runtime_set_suspended(data);
+}
+
+/**
+ * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_set_active_enabled(struct device *dev)
+{
+ int err;
+
+ err = pm_runtime_set_active(dev);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
+ if (err)
+ return err;
+
+ return devm_pm_runtime_enable(dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);
+
static void pm_runtime_disable_action(void *data)
{
pm_runtime_dont_use_autosuspend(data);
@@ -1590,6 +1616,24 @@ int devm_pm_runtime_enable(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
+static void pm_runtime_put_noidle_action(void *data)
+{
+ pm_runtime_put_noidle(data);
+}
+
+/**
+ * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_get_noresume(struct device *dev)
+{
+ pm_runtime_get_noresume(dev);
+
+ return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
+
/**
* pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index f84018125b46..13b31a3adc77 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -611,15 +611,9 @@ static DEVICE_ATTR_RW(async);
#endif /* CONFIG_PM_ADVANCED_DEBUG */
static struct attribute *power_attrs[] = {
-#ifdef CONFIG_PM_ADVANCED_DEBUG
-#ifdef CONFIG_PM_SLEEP
+#if defined(CONFIG_PM_ADVANCED_DEBUG) && defined(CONFIG_PM_SLEEP)
&dev_attr_async.attr,
#endif
- &dev_attr_runtime_status.attr,
- &dev_attr_runtime_usage.attr,
- &dev_attr_runtime_active_kids.attr,
- &dev_attr_runtime_enabled.attr,
-#endif /* CONFIG_PM_ADVANCED_DEBUG */
NULL,
};
static const struct attribute_group pm_attr_group = {
@@ -650,13 +644,16 @@ static const struct attribute_group pm_wakeup_attr_group = {
};
static struct attribute *runtime_attrs[] = {
-#ifndef CONFIG_PM_ADVANCED_DEBUG
&dev_attr_runtime_status.attr,
-#endif
&dev_attr_control.attr,
&dev_attr_runtime_suspended_time.attr,
&dev_attr_runtime_active_time.attr,
&dev_attr_autosuspend_delay_ms.attr,
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+ &dev_attr_runtime_usage.attr,
+ &dev_attr_runtime_active_kids.attr,
+ &dev_attr_runtime_enabled.attr,
+#endif
NULL,
};
static const struct attribute_group pm_runtime_attr_group = {
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 63bf914a4d44..d1283ff1080b 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -77,7 +77,7 @@ static DEFINE_IDA(wakeup_ida);
* wakeup_source_create - Create a struct wakeup_source object.
* @name: Name of the new wakeup source.
*/
-struct wakeup_source *wakeup_source_create(const char *name)
+static struct wakeup_source *wakeup_source_create(const char *name)
{
struct wakeup_source *ws;
const char *ws_name;
@@ -106,7 +106,6 @@ err_name:
err_ws:
return NULL;
}
-EXPORT_SYMBOL_GPL(wakeup_source_create);
/*
* Record wakeup_source statistics being deleted into a dummy wakeup_source.
@@ -149,7 +148,7 @@ static void wakeup_source_free(struct wakeup_source *ws)
*
* Use only for wakeup source objects created with wakeup_source_create().
*/
-void wakeup_source_destroy(struct wakeup_source *ws)
+static void wakeup_source_destroy(struct wakeup_source *ws)
{
if (!ws)
return;
@@ -158,13 +157,12 @@ void wakeup_source_destroy(struct wakeup_source *ws)
wakeup_source_record(ws);
wakeup_source_free(ws);
}
-EXPORT_SYMBOL_GPL(wakeup_source_destroy);
/**
* wakeup_source_add - Add given object to the list of wakeup sources.
* @ws: Wakeup source object to add to the list.
*/
-void wakeup_source_add(struct wakeup_source *ws)
+static void wakeup_source_add(struct wakeup_source *ws)
{
unsigned long flags;
@@ -179,13 +177,12 @@ void wakeup_source_add(struct wakeup_source *ws)
list_add_rcu(&ws->entry, &wakeup_sources);
raw_spin_unlock_irqrestore(&events_lock, flags);
}
-EXPORT_SYMBOL_GPL(wakeup_source_add);
/**
* wakeup_source_remove - Remove given object from the wakeup sources list.
* @ws: Wakeup source object to remove from the list.
*/
-void wakeup_source_remove(struct wakeup_source *ws)
+static void wakeup_source_remove(struct wakeup_source *ws)
{
unsigned long flags;
@@ -204,7 +201,6 @@ void wakeup_source_remove(struct wakeup_source *ws)
*/
ws->timer.function = NULL;
}
-EXPORT_SYMBOL_GPL(wakeup_source_remove);
/**
* wakeup_source_register - Create wakeup source and add it to the list.
@@ -337,7 +333,7 @@ int device_wakeup_enable(struct device *dev)
if (!dev || !dev->power.can_wakeup)
return -EINVAL;
- if (pm_suspend_target_state != PM_SUSPEND_ON)
+ if (pm_sleep_transition_in_progress())
dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
ws = wakeup_source_register(dev, dev_name(dev));
@@ -763,7 +759,7 @@ EXPORT_SYMBOL_GPL(pm_relax);
*/
static void pm_wakeup_timer_fn(struct timer_list *t)
{
- struct wakeup_source *ws = from_timer(ws, t, timer);
+ struct wakeup_source *ws = timer_container_of(ws, t, timer);
unsigned long flags;
spin_lock_irqsave(&ws->lock, flags);
diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c
index 6732ed2869f9..3ffd427248e8 100644
--- a/drivers/base/power/wakeup_stats.c
+++ b/drivers/base/power/wakeup_stats.c
@@ -34,6 +34,7 @@ wakeup_attr(active_count);
wakeup_attr(event_count);
wakeup_attr(wakeup_count);
wakeup_attr(expire_count);
+wakeup_attr(relax_count);
static ssize_t active_time_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -119,6 +120,7 @@ static struct attribute *wakeup_source_attrs[] = {
&dev_attr_event_count.attr,
&dev_attr_wakeup_count.attr,
&dev_attr_expire_count.attr,
+ &dev_attr_relax_count.attr,
&dev_attr_active_time_ms.attr,
&dev_attr_total_time_ms.attr,
&dev_attr_max_time_ms.attr,
diff --git a/drivers/base/property.c b/drivers/base/property.c
index c1392743df9c..f626d5bbe806 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -928,22 +928,49 @@ bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_device_is_available);
/**
- * device_get_child_node_count - return the number of child nodes for device
- * @dev: Device to count the child nodes for
+ * fwnode_get_child_node_count - return the number of child nodes for a given firmware node
+ * @fwnode: Pointer to the parent firmware node
*
- * Return: the number of child nodes for a given device.
+ * Return: the number of child nodes for a given firmware node.
+ */
+unsigned int fwnode_get_child_node_count(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *child;
+ unsigned int count = 0;
+
+ fwnode_for_each_child_node(fwnode, child)
+ count++;
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_child_node_count);
+
+/**
+ * fwnode_get_named_child_node_count - number of child nodes with given name
+ * @fwnode: Node which child nodes are counted.
+ * @name: String to match child node name against.
+ *
+ * Scan child nodes and count all the nodes with a specific name. Potential
+ * 'number' -ending after the 'at sign' for scanned names is ignored.
+ * E.g.::
+ * fwnode_get_named_child_node_count(fwnode, "channel");
+ * would match all the nodes::
+ * channel { }, channel@0 {}, channel@0xabba {}...
+ *
+ * Return: the number of child nodes with a matching name for a given device.
*/
-unsigned int device_get_child_node_count(const struct device *dev)
+unsigned int fwnode_get_named_child_node_count(const struct fwnode_handle *fwnode,
+ const char *name)
{
struct fwnode_handle *child;
unsigned int count = 0;
- device_for_each_child_node(dev, child)
+ fwnode_for_each_named_child_node(fwnode, child, name)
count++;
return count;
}
-EXPORT_SYMBOL_GPL(device_get_child_node_count);
+EXPORT_SYMBOL_GPL(fwnode_get_named_child_node_count);
bool device_dma_supported(const struct device *dev)
{
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index b1affac70d5d..ffb2ef488298 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -6,8 +6,6 @@
config REGMAP
bool
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO || REGMAP_FSI)
- select IRQ_DOMAIN if REGMAP_IRQ
- select MDIO_BUS if REGMAP_MDIO
help
Enable support for the Register Map (regmap) access API.
@@ -58,12 +56,14 @@ config REGMAP_W1
config REGMAP_MDIO
tristate
+ select MDIO_BUS
config REGMAP_MMIO
tristate
config REGMAP_IRQ
bool
+ select IRQ_DOMAIN
config REGMAP_RAM
tristate
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index f7fcf2de1301..c7650fa434ad 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -34,21 +34,10 @@ static int regcache_defaults_cmp(const void *a, const void *b)
return 0;
}
-static void regcache_defaults_swap(void *a, void *b, int size)
-{
- struct reg_default *x = a;
- struct reg_default *y = b;
- struct reg_default tmp;
-
- tmp = *x;
- *x = *y;
- *y = tmp;
-}
-
void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults)
{
sort(defaults, ndefaults, sizeof(*defaults),
- regcache_defaults_cmp, regcache_defaults_swap);
+ regcache_defaults_cmp, NULL);
}
EXPORT_SYMBOL_GPL(regcache_sort_defaults);
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 6c6869188c31..d1585f073776 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -6,11 +6,13 @@
//
// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+#include <linux/array_size.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/overflow.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -33,6 +35,7 @@ struct regmap_irq_chip_data {
void *status_reg_buf;
unsigned int *main_status_buf;
unsigned int *status_buf;
+ unsigned int *prev_status_buf;
unsigned int *mask_buf;
unsigned int *mask_buf_def;
unsigned int *wake_buf;
@@ -193,10 +196,10 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
/* If we've changed our wakeup count propagate it to the parent */
if (d->wake_count < 0)
for (i = d->wake_count; i < 0; i++)
- irq_set_irq_wake(d->irq, 0);
+ disable_irq_wake(d->irq);
else if (d->wake_count > 0)
for (i = 0; i < d->wake_count; i++)
- irq_set_irq_wake(d->irq, 1);
+ enable_irq_wake(d->irq);
d->wake_count = 0;
@@ -332,27 +335,13 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
return ret;
}
-static irqreturn_t regmap_irq_thread(int irq, void *d)
+static int read_irq_data(struct regmap_irq_chip_data *data)
{
- struct regmap_irq_chip_data *data = d;
const struct regmap_irq_chip *chip = data->chip;
struct regmap *map = data->map;
int ret, i;
- bool handled = false;
u32 reg;
- if (chip->handle_pre_irq)
- chip->handle_pre_irq(chip->irq_drv_data);
-
- if (chip->runtime_pm) {
- ret = pm_runtime_get_sync(map->dev);
- if (ret < 0) {
- dev_err(map->dev, "IRQ thread failed to resume: %d\n",
- ret);
- goto exit;
- }
- }
-
/*
* Read only registers with active IRQs if the chip has 'main status
* register'. Else read in the statuses, using a single bulk read if
@@ -379,10 +368,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
reg = data->get_irq_reg(data, chip->main_status, i);
ret = regmap_read(map, reg, &data->main_status_buf[i]);
if (ret) {
- dev_err(map->dev,
- "Failed to read IRQ status %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status %d\n", ret);
+ return ret;
}
}
@@ -398,10 +385,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
ret = read_sub_irq_data(data, b);
if (ret != 0) {
- dev_err(map->dev,
- "Failed to read IRQ status %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status %d\n", ret);
+ return ret;
}
}
@@ -418,9 +403,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
data->status_reg_buf,
chip->num_regs);
if (ret != 0) {
- dev_err(map->dev, "Failed to read IRQ status: %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
+ return ret;
}
for (i = 0; i < data->chip->num_regs; i++) {
@@ -436,7 +420,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
break;
default:
BUG();
- goto exit;
+ return -EIO;
}
}
@@ -447,10 +431,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
ret = regmap_read(map, reg, &data->status_buf[i]);
if (ret != 0) {
- dev_err(map->dev,
- "Failed to read IRQ status: %d\n",
- ret);
- goto exit;
+ dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
+ return ret;
}
}
}
@@ -459,6 +441,42 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
for (i = 0; i < data->chip->num_regs; i++)
data->status_buf[i] = ~data->status_buf[i];
+ return 0;
+}
+
+static irqreturn_t regmap_irq_thread(int irq, void *d)
+{
+ struct regmap_irq_chip_data *data = d;
+ const struct regmap_irq_chip *chip = data->chip;
+ struct regmap *map = data->map;
+ int ret, i;
+ bool handled = false;
+ u32 reg;
+
+ if (chip->handle_pre_irq)
+ chip->handle_pre_irq(chip->irq_drv_data);
+
+ if (chip->runtime_pm) {
+ ret = pm_runtime_get_sync(map->dev);
+ if (ret < 0) {
+ dev_err(map->dev, "IRQ thread failed to resume: %d\n", ret);
+ goto exit;
+ }
+ }
+
+ ret = read_irq_data(data);
+ if (ret < 0)
+ goto exit;
+
+ if (chip->status_is_level) {
+ for (i = 0; i < data->chip->num_regs; i++) {
+ unsigned int val = data->status_buf[i];
+
+ data->status_buf[i] ^= data->prev_status_buf[i];
+ data->prev_status_buf[i] = val;
+ }
+ }
+
/*
* Ignore masked IRQs and ack if we need to; we ack early so
* there is no race between handling and acknowledging the
@@ -705,6 +723,13 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (!d->status_buf)
goto err_alloc;
+ if (chip->status_is_level) {
+ d->prev_status_buf = kcalloc(chip->num_regs, sizeof(*d->prev_status_buf),
+ GFP_KERNEL);
+ if (!d->prev_status_buf)
+ goto err_alloc;
+ }
+
d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf),
GFP_KERNEL);
if (!d->mask_buf)
@@ -881,6 +906,16 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
}
}
+ /* Store current levels */
+ if (chip->status_is_level) {
+ ret = read_irq_data(d);
+ if (ret < 0)
+ goto err_alloc;
+
+ memcpy(d->prev_status_buf, d->status_buf,
+ array_size(d->chip->num_regs, sizeof(d->prev_status_buf[0])));
+ }
+
ret = regmap_irq_create_domain(fwnode, irq_base, chip, d);
if (ret)
goto err_alloc;
@@ -908,6 +943,7 @@ err_alloc:
kfree(d->mask_buf);
kfree(d->main_status_buf);
kfree(d->status_buf);
+ kfree(d->prev_status_buf);
kfree(d->status_reg_buf);
if (d->config_buf) {
for (i = 0; i < chip->num_config_bases; i++)
@@ -985,6 +1021,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->main_status_buf);
kfree(d->status_reg_buf);
kfree(d->status_buf);
+ kfree(d->prev_status_buf);
if (d->config_buf) {
for (i = 0; i < d->chip->num_config_bases; i++)
kfree(d->config_buf[i]);
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 5c78fa6ae772..deda7f35a059 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -529,7 +529,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
if (prop->is_inline)
return -EINVAL;
- if (index * sizeof(*ref) >= prop->length)
+ if ((index + 1) * sizeof(*ref) > prop->length)
return -ENOENT;
ref_array = prop->pointer;
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index b962da263eee..8b42df05feff 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -208,3 +208,55 @@ static int __init topology_sysfs_init(void)
}
device_initcall(topology_sysfs_init);
+
+DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
+
+void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
+{
+ per_cpu(cpu_scale, cpu) = capacity;
+}
+
+static ssize_t cpu_capacity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+
+ return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
+}
+
+static DEVICE_ATTR_RO(cpu_capacity);
+
+static int cpu_capacity_sysctl_add(unsigned int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev)
+ return -ENOENT;
+
+ device_create_file(cpu_dev, &dev_attr_cpu_capacity);
+
+ return 0;
+}
+
+static int cpu_capacity_sysctl_remove(unsigned int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev)
+ return -ENOENT;
+
+ device_remove_file(cpu_dev, &dev_attr_cpu_capacity);
+
+ return 0;
+}
+
+static int register_cpu_capacity_sysctl(void)
+{
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
+ cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove);
+
+ return 0;
+}
+subsys_initcall(register_cpu_capacity_sysctl);
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 5f90bac6bb09..f021e27644e0 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -26,12 +26,14 @@ static int bcma_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
return !!bcma_chipco_gpio_in(cc, 1 << gpio);
}
-static void bcma_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
- int value)
+static int bcma_gpio_set_value(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct bcma_drv_cc *cc = gpiochip_get_data(chip);
bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0);
+
+ return 0;
}
static int bcma_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
@@ -184,7 +186,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->request = bcma_gpio_request;
chip->free = bcma_gpio_free;
chip->get = bcma_gpio_get_value;
- chip->set = bcma_gpio_set_value;
+ chip->set_rv = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
chip->parent = bus->dev;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index e48b24be45ee..0f70e2374e7f 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -407,4 +407,23 @@ config BLKDEV_UBLK_LEGACY_OPCODES
source "drivers/block/rnbd/Kconfig"
+config BLK_DEV_ZONED_LOOP
+ tristate "Zoned loopback device support"
+ depends on BLK_DEV_ZONED
+ help
+ Saying Y here will allow you to use create a zoned block device using
+ regular files for zones (one file per zones). This is useful to test
+ file systems, device mapper and applications that support zoned block
+ devices. To create a zoned loop device, no user utility is needed, a
+ zoned loop device can be created (or re-started) using a command
+ like:
+
+ echo "add id=0,zone_size_mb=256,capacity_mb=16384,conv_zones=11" > \
+ /dev/zloop-control
+
+ See Documentation/admin-guide/blockdev/zoned_loop.rst for usage
+ details.
+
+ If unsure, say N.
+
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 1105a2d4fdcb..097707aca725 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -41,5 +41,6 @@ obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
obj-$(CONFIG_BLK_DEV_UBLK) += ublk_drv.o
+obj-$(CONFIG_BLK_DEV_ZONED_LOOP) += zloop.o
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 749ae1246f4c..d35caa3c69e1 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -80,6 +80,7 @@ enum {
DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */
DEVFL_FREEING = (1<<7), /* set when device is being cleaned up */
DEVFL_FREED = (1<<8), /* device has been cleaned up */
+ DEVFL_DEAD = (1<<9), /* device has timed out of aoe_deadsecs */
};
enum {
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 92b06d1de4cc..6298f8e271e3 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -745,7 +745,7 @@ rexmit_timer(struct timer_list *timer)
int utgts; /* number of aoetgt descriptors (not slots) */
int since;
- d = from_timer(d, timer, timer);
+ d = timer_container_of(d, timer, timer);
spin_lock_irqsave(&d->lock, flags);
@@ -754,7 +754,7 @@ rexmit_timer(struct timer_list *timer)
utgts = count_targets(d, NULL);
- if (d->flags & DEVFL_TKILL) {
+ if (d->flags & (DEVFL_TKILL | DEVFL_DEAD)) {
spin_unlock_irqrestore(&d->lock, flags);
return;
}
@@ -786,7 +786,8 @@ rexmit_timer(struct timer_list *timer)
* to clean up.
*/
list_splice(&flist, &d->factive[0]);
- aoedev_downdev(d);
+ d->flags |= DEVFL_DEAD;
+ queue_work(aoe_wq, &d->work);
goto out;
}
@@ -898,6 +899,9 @@ aoecmd_sleepwork(struct work_struct *work)
{
struct aoedev *d = container_of(work, struct aoedev, work);
+ if (d->flags & DEVFL_DEAD)
+ aoedev_downdev(d);
+
if (d->flags & DEVFL_GDALLOC)
aoeblk_gdalloc(d);
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 141b2a0e03f2..3a240755045b 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -149,7 +149,7 @@ dummy_timer(struct timer_list *t)
{
struct aoedev *d;
- d = from_timer(d, t, timer);
+ d = timer_container_of(d, t, timer);
if (d->flags & DEVFL_TKILL)
return;
d->timer.expires = jiffies + HZ;
@@ -198,9 +198,13 @@ aoedev_downdev(struct aoedev *d)
{
struct aoetgt *t, **tt, **te;
struct list_head *head, *pos, *nx;
+ struct request *rq, *rqnext;
int i;
+ unsigned long flags;
- d->flags &= ~DEVFL_UP;
+ spin_lock_irqsave(&d->lock, flags);
+ d->flags &= ~(DEVFL_UP | DEVFL_DEAD);
+ spin_unlock_irqrestore(&d->lock, flags);
/* clean out active and to-be-retransmitted buffers */
for (i = 0; i < NFACTIVE; i++) {
@@ -223,6 +227,13 @@ aoedev_downdev(struct aoedev *d)
/* clean out the in-process request (if any) */
aoe_failip(d);
+ /* clean out any queued block requests */
+ list_for_each_entry_safe(rq, rqnext, &d->rq_list, queuelist) {
+ list_del_init(&rq->queuelist);
+ blk_mq_start_request(rq);
+ blk_mq_end_request(rq, BLK_STS_IOERR);
+ }
+
/* fast fail all pending I/O */
if (d->blkq) {
/* UP is cleared, freeze+quiesce to insure all are errored */
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 292f127cae0a..b1be6c510372 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -54,32 +54,33 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
/*
* Insert a new page for a given sector, if one does not already exist.
*/
-static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
+static struct page *brd_insert_page(struct brd_device *brd, sector_t sector,
+ blk_opf_t opf)
+ __releases(rcu)
+ __acquires(rcu)
{
- pgoff_t idx = sector >> PAGE_SECTORS_SHIFT;
- struct page *page;
- int ret = 0;
-
- page = brd_lookup_page(brd, sector);
- if (page)
- return 0;
+ gfp_t gfp = (opf & REQ_NOWAIT) ? GFP_NOWAIT : GFP_NOIO;
+ struct page *page, *ret;
+ rcu_read_unlock();
page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
+ rcu_read_lock();
if (!page)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
xa_lock(&brd->brd_pages);
- ret = __xa_insert(&brd->brd_pages, idx, page, gfp);
- if (!ret)
- brd->brd_nr_pages++;
- xa_unlock(&brd->brd_pages);
-
- if (ret < 0) {
+ ret = __xa_cmpxchg(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT, NULL,
+ page, gfp);
+ if (ret) {
+ xa_unlock(&brd->brd_pages);
__free_page(page);
- if (ret == -EBUSY)
- ret = 0;
+ if (xa_is_err(ret))
+ return ERR_PTR(xa_err(ret));
+ return ret;
}
- return ret;
+ brd->brd_nr_pages++;
+ xa_unlock(&brd->brd_pages);
+ return page;
}
/*
@@ -100,143 +101,77 @@ static void brd_free_pages(struct brd_device *brd)
}
/*
- * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
+ * Process a single segment. The segment is capped to not cross page boundaries
+ * in both the bio and the brd backing memory.
*/
-static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
- gfp_t gfp)
-{
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
- int ret;
-
- copy = min_t(size_t, n, PAGE_SIZE - offset);
- ret = brd_insert_page(brd, sector, gfp);
- if (ret)
- return ret;
- if (copy < n) {
- sector += copy >> SECTOR_SHIFT;
- ret = brd_insert_page(brd, sector, gfp);
- }
- return ret;
-}
-
-/*
- * Copy n bytes from src to the brd starting at sector. Does not sleep.
- */
-static void copy_to_brd(struct brd_device *brd, const void *src,
- sector_t sector, size_t n)
+static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
{
+ struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
+ sector_t sector = bio->bi_iter.bi_sector;
+ u32 offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT;
+ blk_opf_t opf = bio->bi_opf;
struct page *page;
- void *dst;
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
+ void *kaddr;
- copy = min_t(size_t, n, PAGE_SIZE - offset);
- page = brd_lookup_page(brd, sector);
- BUG_ON(!page);
-
- dst = kmap_atomic(page);
- memcpy(dst + offset, src, copy);
- kunmap_atomic(dst);
-
- if (copy < n) {
- src += copy;
- sector += copy >> SECTOR_SHIFT;
- copy = n - copy;
- page = brd_lookup_page(brd, sector);
- BUG_ON(!page);
-
- dst = kmap_atomic(page);
- memcpy(dst, src, copy);
- kunmap_atomic(dst);
- }
-}
+ bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
-/*
- * Copy n bytes to dst from the brd starting at sector. Does not sleep.
- */
-static void copy_from_brd(void *dst, struct brd_device *brd,
- sector_t sector, size_t n)
-{
- struct page *page;
- void *src;
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
-
- copy = min_t(size_t, n, PAGE_SIZE - offset);
+ rcu_read_lock();
page = brd_lookup_page(brd, sector);
- if (page) {
- src = kmap_atomic(page);
- memcpy(dst, src + offset, copy);
- kunmap_atomic(src);
- } else
- memset(dst, 0, copy);
-
- if (copy < n) {
- dst += copy;
- sector += copy >> SECTOR_SHIFT;
- copy = n - copy;
- page = brd_lookup_page(brd, sector);
- if (page) {
- src = kmap_atomic(page);
- memcpy(dst, src, copy);
- kunmap_atomic(src);
- } else
- memset(dst, 0, copy);
+ if (!page && op_is_write(opf)) {
+ page = brd_insert_page(brd, sector, opf);
+ if (IS_ERR(page))
+ goto out_error;
}
-}
-
-/*
- * Process a single bvec of a bio.
- */
-static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, blk_opf_t opf,
- sector_t sector)
-{
- void *mem;
- int err = 0;
+ kaddr = bvec_kmap_local(&bv);
if (op_is_write(opf)) {
- /*
- * Must use NOIO because we don't want to recurse back into the
- * block or filesystem layers from page reclaim.
- */
- gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
-
- err = copy_to_brd_setup(brd, sector, len, gfp);
- if (err)
- goto out;
- }
-
- mem = kmap_atomic(page);
- if (!op_is_write(opf)) {
- copy_from_brd(mem + off, brd, sector, len);
- flush_dcache_page(page);
+ memcpy_to_page(page, offset, kaddr, bv.bv_len);
} else {
- flush_dcache_page(page);
- copy_to_brd(brd, mem + off, sector, len);
+ if (page)
+ memcpy_from_page(kaddr, page, offset, bv.bv_len);
+ else
+ memset(kaddr, 0, bv.bv_len);
}
- kunmap_atomic(mem);
+ kunmap_local(kaddr);
+ rcu_read_unlock();
+
+ bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len);
+ return true;
+
+out_error:
+ rcu_read_unlock();
+ if (PTR_ERR(page) == -ENOMEM && (opf & REQ_NOWAIT))
+ bio_wouldblock_error(bio);
+ else
+ bio_io_error(bio);
+ return false;
+}
-out:
- return err;
+static void brd_free_one_page(struct rcu_head *head)
+{
+ struct page *page = container_of(head, struct page, rcu_head);
+
+ __free_page(page);
}
static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
{
- sector_t aligned_sector = (sector + PAGE_SECTORS) & ~PAGE_SECTORS;
+ sector_t aligned_sector = round_up(sector, PAGE_SECTORS);
+ sector_t aligned_end = round_down(
+ sector + (size >> SECTOR_SHIFT), PAGE_SECTORS);
struct page *page;
- size -= (aligned_sector - sector) * SECTOR_SIZE;
+ if (aligned_end <= aligned_sector)
+ return;
+
xa_lock(&brd->brd_pages);
- while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) {
+ while (aligned_sector < aligned_end && aligned_sector < rd_size * 2) {
page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT);
if (page) {
- __free_page(page);
+ call_rcu(&page->rcu_head, brd_free_one_page);
brd->brd_nr_pages--;
}
aligned_sector += PAGE_SECTORS;
- size -= PAGE_SIZE;
}
xa_unlock(&brd->brd_pages);
}
@@ -244,36 +179,18 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
static void brd_submit_bio(struct bio *bio)
{
struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
- sector_t sector = bio->bi_iter.bi_sector;
- struct bio_vec bvec;
- struct bvec_iter iter;
if (unlikely(op_is_discard(bio->bi_opf))) {
- brd_do_discard(brd, sector, bio->bi_iter.bi_size);
+ brd_do_discard(brd, bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size);
bio_endio(bio);
return;
}
- bio_for_each_segment(bvec, bio, iter) {
- unsigned int len = bvec.bv_len;
- int err;
-
- /* Don't support un-aligned buffer */
- WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) ||
- (len & (SECTOR_SIZE - 1)));
-
- err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
- bio->bi_opf, sector);
- if (err) {
- if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
- bio_wouldblock_error(bio);
- return;
- }
- bio_io_error(bio);
+ do {
+ if (!brd_rw_bvec(brd, bio))
return;
- }
- sector += len >> SECTOR_SHIFT;
- }
+ } while (bio->bi_iter.bi_size);
bio_endio(bio);
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index ced2cc5f46f2..52724b79be30 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -3591,7 +3591,8 @@ int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
static void md_sync_timer_fn(struct timer_list *t)
{
- struct drbd_device *device = from_timer(device, t, md_sync_timer);
+ struct drbd_device *device = timer_container_of(device, t,
+ md_sync_timer);
drbd_device_post_work(device, MD_SYNC);
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 380e6584a4ee..d15826f6ee81 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1699,7 +1699,8 @@ static bool net_timeout_reached(struct drbd_request *net_req,
void request_timer_fn(struct timer_list *t)
{
- struct drbd_device *device = from_timer(device, t, request_timer);
+ struct drbd_device *device = timer_container_of(device, t,
+ request_timer);
struct drbd_connection *connection = first_peer_device(device)->connection;
struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */
struct net_conf *nc;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 4352a50fbb3f..a6ea737b3b71 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -442,7 +442,8 @@ int w_resync_timer(struct drbd_work *w, int cancel)
void resync_timer_fn(struct timer_list *t)
{
- struct drbd_device *device = from_timer(device, t, resync_timer);
+ struct drbd_device *device = timer_container_of(device, t,
+ resync_timer);
drbd_queue_work_if_unqueued(
&first_peer_device(device)->connection->sender_work,
@@ -1698,7 +1699,8 @@ void drbd_rs_controller_reset(struct drbd_peer_device *peer_device)
void start_resync_timer_fn(struct timer_list *t)
{
- struct drbd_device *device = from_timer(device, t, start_resync_timer);
+ struct drbd_device *device = timer_container_of(device, t,
+ start_resync_timer);
drbd_device_post_work(device, RS_START);
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index b8ba7de08753..500840e4a74e 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -308,11 +308,14 @@ end_io:
static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
{
struct request *rq = blk_mq_rq_from_pdu(cmd);
+ struct loop_device *lo = rq->q->queuedata;
if (!atomic_dec_and_test(&cmd->ref))
return;
kfree(cmd->bvec);
cmd->bvec = NULL;
+ if (req_op(rq) == REQ_OP_WRITE)
+ file_end_write(lo->lo_backing_file);
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
}
@@ -387,9 +390,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
cmd->iocb.ki_flags = 0;
}
- if (rw == ITER_SOURCE)
+ if (rw == ITER_SOURCE) {
+ file_start_write(lo->lo_backing_file);
ret = file->f_op->write_iter(&cmd->iocb, &iter);
- else
+ } else
ret = file->f_op->read_iter(&cmd->iocb, &iter);
lo_rw_aio_do_completion(cmd);
@@ -979,9 +983,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (!file)
return -EBADF;
- if ((mode & BLK_OPEN_WRITE) && !file->f_op->write_iter)
- return -EINVAL;
-
error = loop_check_backing_file(file);
if (error)
return error;
@@ -1247,12 +1248,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
lo->lo_flags &= ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
lo->lo_flags |= (info->lo_flags & LOOP_SET_STATUS_SETTABLE_FLAGS);
- if (size_changed) {
- loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
- lo->lo_backing_file);
- loop_set_size(lo, new_size);
- }
-
/* update the direct I/O flag if lo_offset changed */
loop_update_dio(lo);
@@ -1260,6 +1255,11 @@ out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue, memflags);
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
+ if (!err && size_changed) {
+ loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
+ lo->lo_backing_file);
+ loop_set_size(lo, new_size);
+ }
out_unlock:
mutex_unlock(&lo->lo_mutex);
if (partscan)
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 0d619df03fa9..66ce6b81c7d9 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3717,7 +3717,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rv) {
dev_warn(&pdev->dev, "64-bit DMA enable failed\n");
- goto setmask_err;
+ goto iomap_err;
}
/* Copy the info we may need later into the private data structure. */
@@ -3733,7 +3733,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
if (!dd->isr_workq) {
dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
rv = -ENOMEM;
- goto setmask_err;
+ goto iomap_err;
}
memset(cpu_list, 0, sizeof(cpu_list));
@@ -3830,8 +3830,6 @@ msi_initialize_err:
drop_cpu(dd->work[1].cpu_binding);
drop_cpu(dd->work[2].cpu_binding);
}
-setmask_err:
- pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
iomap_err:
kfree(dd);
@@ -3907,7 +3905,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
pci_disable_msi(pdev);
- pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
pci_set_drvdata(pdev, NULL);
put_disk(dd->disk);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 65b96c083b3c..d5cc7bd2875c 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -725,7 +725,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
scmd = blk_mq_rq_to_pdu(rq);
if (cgc->buflen) {
- ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
+ ret = blk_rq_map_kern(rq, cgc->buffer, cgc->buflen,
GFP_NOIO);
if (ret)
goto out;
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 2ee6e9bd4e28..2df8941a6b14 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -147,12 +147,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
bio = bio_alloc(file_bdev(sess_dev->bdev_file), 1,
rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
- if (bio_add_page(bio, virt_to_page(data), datalen,
- offset_in_page(data)) != datalen) {
- rnbd_srv_err_rl(sess_dev, "Failed to map data to bio\n");
- err = -EINVAL;
- goto bio_put;
- }
+ bio_add_virt_nofail(bio, data, datalen);
bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
if (bio_has_data(bio) &&
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index ee6cade70222..01f7aef3fcfb 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -555,7 +555,7 @@ static void act(struct floppy_state *fs)
static void scan_timeout(struct timer_list *t)
{
- struct floppy_state *fs = from_timer(fs, t, timeout);
+ struct floppy_state *fs = timer_container_of(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -579,7 +579,7 @@ static void scan_timeout(struct timer_list *t)
static void seek_timeout(struct timer_list *t)
{
- struct floppy_state *fs = from_timer(fs, t, timeout);
+ struct floppy_state *fs = timer_container_of(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -598,7 +598,7 @@ static void seek_timeout(struct timer_list *t)
static void settle_timeout(struct timer_list *t)
{
- struct floppy_state *fs = from_timer(fs, t, timeout);
+ struct floppy_state *fs = timer_container_of(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -627,7 +627,7 @@ static void settle_timeout(struct timer_list *t)
static void xfer_timeout(struct timer_list *t)
{
- struct floppy_state *fs = from_timer(fs, t, timeout);
+ struct floppy_state *fs = timer_container_of(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
unsigned long flags;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index f9032076bc06..d36f44f5ee80 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -50,6 +50,8 @@
/* private ioctl command mirror */
#define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC)
+#define UBLK_CMD_UPDATE_SIZE _IOC_NR(UBLK_U_CMD_UPDATE_SIZE)
+#define UBLK_CMD_QUIESCE_DEV _IOC_NR(UBLK_U_CMD_QUIESCE_DEV)
#define UBLK_IO_REGISTER_IO_BUF _IOC_NR(UBLK_U_IO_REGISTER_IO_BUF)
#define UBLK_IO_UNREGISTER_IO_BUF _IOC_NR(UBLK_U_IO_UNREGISTER_IO_BUF)
@@ -64,7 +66,11 @@
| UBLK_F_CMD_IOCTL_ENCODE \
| UBLK_F_USER_COPY \
| UBLK_F_ZONED \
- | UBLK_F_USER_RECOVERY_FAIL_IO)
+ | UBLK_F_USER_RECOVERY_FAIL_IO \
+ | UBLK_F_UPDATE_SIZE \
+ | UBLK_F_AUTO_BUF_REG \
+ | UBLK_F_QUIESCE \
+ | UBLK_F_PER_IO_DAEMON)
#define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
| UBLK_F_USER_RECOVERY_REISSUE \
@@ -77,7 +83,11 @@
UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT)
struct ublk_rq_data {
- struct kref ref;
+ refcount_t ref;
+
+ /* for auto-unregister buffer in case of UBLK_F_AUTO_BUF_REG */
+ u16 buf_index;
+ void *buf_ctx_handle;
};
struct ublk_uring_cmd_pdu {
@@ -99,6 +109,9 @@ struct ublk_uring_cmd_pdu {
* setup in ublk uring_cmd handler
*/
struct ublk_queue *ubq;
+
+ struct ublk_auto_buf_reg buf;
+
u16 tag;
};
@@ -131,6 +144,14 @@ struct ublk_uring_cmd_pdu {
*/
#define UBLK_IO_FLAG_NEED_GET_DATA 0x08
+/*
+ * request buffer is registered automatically, so we have to unregister it
+ * before completing this request.
+ *
+ * io_uring will unregister buffer automatically for us during exiting.
+ */
+#define UBLK_IO_FLAG_AUTO_BUF_REG 0x10
+
/* atomic RW with ubq->cancel_lock */
#define UBLK_IO_FLAG_CANCELED 0x80000000
@@ -140,7 +161,14 @@ struct ublk_io {
unsigned int flags;
int res;
- struct io_uring_cmd *cmd;
+ union {
+ /* valid if UBLK_IO_FLAG_ACTIVE is set */
+ struct io_uring_cmd *cmd;
+ /* valid if UBLK_IO_FLAG_OWNED_BY_SRV is set */
+ struct request *req;
+ };
+
+ struct task_struct *task;
};
struct ublk_queue {
@@ -148,11 +176,9 @@ struct ublk_queue {
int q_depth;
unsigned long flags;
- struct task_struct *ubq_daemon;
struct ublksrv_io_desc *io_cmd_buf;
bool force_abort;
- bool timeout;
bool canceling;
bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
unsigned short nr_io_ready; /* how many ios setup */
@@ -198,13 +224,19 @@ struct ublk_params_header {
__u32 types;
};
+static void ublk_io_release(void *priv);
static void ublk_stop_dev_unlocked(struct ublk_device *ub);
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
const struct ublk_queue *ubq, int tag, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
-static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
- int tag);
+
+static inline struct ublksrv_io_desc *
+ublk_get_iod(const struct ublk_queue *ubq, unsigned tag)
+{
+ return &ubq->io_cmd_buf[tag];
+}
+
static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
{
return ub->dev_info.flags & UBLK_F_ZONED;
@@ -356,8 +388,7 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector,
if (ret)
goto free_req;
- ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length,
- GFP_KERNEL);
+ ret = blk_rq_map_kern(req, buffer, buffer_length, GFP_KERNEL);
if (ret)
goto erase_desc;
@@ -477,7 +508,6 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
#endif
static inline void __ublk_complete_rq(struct request *req);
-static void ublk_complete_rq(struct kref *ref);
static dev_t ublk_chr_devt;
static const struct class ublk_chr_class = {
@@ -609,6 +639,11 @@ static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq)
return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY;
}
+static inline bool ublk_support_auto_buf_reg(const struct ublk_queue *ubq)
+{
+ return ubq->flags & UBLK_F_AUTO_BUF_REG;
+}
+
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_USER_COPY;
@@ -616,7 +651,8 @@ static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
{
- return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq);
+ return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq) &&
+ !ublk_support_auto_buf_reg(ubq);
}
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
@@ -627,8 +663,13 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
*
* for zero copy, request buffer need to be registered to io_uring
* buffer table, so reference is needed
+ *
+ * For auto buffer register, ublk server still may issue
+ * UBLK_IO_COMMIT_AND_FETCH_REQ before one registered buffer is used up,
+ * so reference is required too.
*/
- return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq);
+ return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq) ||
+ ublk_support_auto_buf_reg(ubq);
}
static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
@@ -637,7 +678,7 @@ static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
if (ublk_need_req_ref(ubq)) {
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
- kref_init(&data->ref);
+ refcount_set(&data->ref, 1);
}
}
@@ -647,7 +688,7 @@ static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
if (ublk_need_req_ref(ubq)) {
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
- return kref_get_unless_zero(&data->ref);
+ return refcount_inc_not_zero(&data->ref);
}
return true;
@@ -659,7 +700,8 @@ static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
if (ublk_need_req_ref(ubq)) {
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
- kref_put(&data->ref, ublk_complete_rq);
+ if (refcount_dec_and_test(&data->ref))
+ __ublk_complete_rq(req);
} else {
__ublk_complete_rq(req);
}
@@ -695,12 +737,6 @@ static inline bool ublk_rq_has_data(const struct request *rq)
return bio_has_data(rq->bio);
}
-static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
- int tag)
-{
- return &ubq->io_cmd_buf[tag];
-}
-
static inline struct ublksrv_io_desc *
ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
{
@@ -1064,11 +1100,6 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
return io_uring_cmd_to_pdu(ioucmd, struct ublk_uring_cmd_pdu);
}
-static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
-{
- return !ubq->ubq_daemon || ubq->ubq_daemon->flags & PF_EXITING;
-}
-
/* todo: handle partial completion */
static inline void __ublk_complete_rq(struct request *req)
{
@@ -1117,18 +1148,12 @@ exit:
blk_mq_end_request(req, res);
}
-static void ublk_complete_rq(struct kref *ref)
+static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
+ int res, unsigned issue_flags)
{
- struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
- ref);
- struct request *req = blk_mq_rq_from_pdu(data);
-
- __ublk_complete_rq(req);
-}
+ /* read cmd first because req will overwrite it */
+ struct io_uring_cmd *cmd = io->cmd;
-static void ubq_complete_io_cmd(struct ublk_io *io, int res,
- unsigned issue_flags)
-{
/* mark this cmd owned by ublksrv */
io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
@@ -1138,8 +1163,10 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res,
*/
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
+ io->req = req;
+
/* tell ublksrv one io request is coming */
- io_uring_cmd_done(io->cmd, res, 0, issue_flags);
+ io_uring_cmd_done(cmd, res, 0, issue_flags);
}
#define UBLK_REQUEUE_DELAY_MS 3
@@ -1154,28 +1181,103 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
blk_mq_end_request(rq, BLK_STS_IOERR);
}
+static void ublk_auto_buf_reg_fallback(struct request *req)
+{
+ const struct ublk_queue *ubq = req->mq_hctx->driver_data;
+ struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ iod->op_flags |= UBLK_IO_F_NEED_REG_BUF;
+ refcount_set(&data->ref, 1);
+}
+
+static bool ublk_auto_buf_reg(struct request *req, struct ublk_io *io,
+ unsigned int issue_flags)
+{
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(io->cmd);
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+ int ret;
+
+ ret = io_buffer_register_bvec(io->cmd, req, ublk_io_release,
+ pdu->buf.index, issue_flags);
+ if (ret) {
+ if (pdu->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
+ ublk_auto_buf_reg_fallback(req);
+ return true;
+ }
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ return false;
+ }
+ /* one extra reference is dropped by ublk_io_release */
+ refcount_set(&data->ref, 2);
+
+ data->buf_ctx_handle = io_uring_cmd_ctx_handle(io->cmd);
+ /* store buffer index in request payload */
+ data->buf_index = pdu->buf.index;
+ io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG;
+ return true;
+}
+
+static bool ublk_prep_auto_buf_reg(struct ublk_queue *ubq,
+ struct request *req, struct ublk_io *io,
+ unsigned int issue_flags)
+{
+ if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req))
+ return ublk_auto_buf_reg(req, io, issue_flags);
+
+ ublk_init_req_ref(ubq, req);
+ return true;
+}
+
+static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req,
+ struct ublk_io *io)
+{
+ unsigned mapped_bytes = ublk_map_io(ubq, req, io);
+
+ /* partially mapped, update io descriptor */
+ if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
+ /*
+ * Nothing mapped, retry until we succeed.
+ *
+ * We may never succeed in mapping any bytes here because
+ * of OOM. TODO: reserve one buffer with single page pinned
+ * for providing forward progress guarantee.
+ */
+ if (unlikely(!mapped_bytes)) {
+ blk_mq_requeue_request(req, false);
+ blk_mq_delay_kick_requeue_list(req->q,
+ UBLK_REQUEUE_DELAY_MS);
+ return false;
+ }
+
+ ublk_get_iod(ubq, req->tag)->nr_sectors =
+ mapped_bytes >> 9;
+ }
+
+ return true;
+}
+
static void ublk_dispatch_req(struct ublk_queue *ubq,
struct request *req,
unsigned int issue_flags)
{
int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag];
- unsigned int mapped_bytes;
- pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
- __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
+ pr_devel("%s: complete: qid %d tag %d io_flags %x addr %llx\n",
+ __func__, ubq->q_id, req->tag, io->flags,
ublk_get_iod(ubq, req->tag)->addr);
/*
* Task is exiting if either:
*
- * (1) current != ubq_daemon.
+ * (1) current != io->task.
* io_uring_cmd_complete_in_task() tries to run task_work
- * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
+ * in a workqueue if cmd's task is PF_EXITING.
*
* (2) current->flags & PF_EXITING.
*/
- if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
+ if (unlikely(current != io->task || current->flags & PF_EXITING)) {
__ublk_abort_rq(ubq, req);
return;
}
@@ -1183,54 +1285,22 @@ static void ublk_dispatch_req(struct ublk_queue *ubq,
if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
/*
* We have not handled UBLK_IO_NEED_GET_DATA command yet,
- * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
+ * so immediately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
* and notify it.
*/
- if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
- io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
- pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
- __func__, io->cmd->cmd_op, ubq->q_id,
- req->tag, io->flags);
- ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
- return;
- }
- /*
- * We have handled UBLK_IO_NEED_GET_DATA command,
- * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
- * do the copy work.
- */
- io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
- /* update iod->addr because ublksrv may have passed a new io buffer */
- ublk_get_iod(ubq, req->tag)->addr = io->addr;
- pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
- __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
- ublk_get_iod(ubq, req->tag)->addr);
+ io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
+ pr_devel("%s: need get data. qid %d tag %d io_flags %x\n",
+ __func__, ubq->q_id, req->tag, io->flags);
+ ublk_complete_io_cmd(io, req, UBLK_IO_RES_NEED_GET_DATA,
+ issue_flags);
+ return;
}
- mapped_bytes = ublk_map_io(ubq, req, io);
-
- /* partially mapped, update io descriptor */
- if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
- /*
- * Nothing mapped, retry until we succeed.
- *
- * We may never succeed in mapping any bytes here because
- * of OOM. TODO: reserve one buffer with single page pinned
- * for providing forward progress guarantee.
- */
- if (unlikely(!mapped_bytes)) {
- blk_mq_requeue_request(req, false);
- blk_mq_delay_kick_requeue_list(req->q,
- UBLK_REQUEUE_DELAY_MS);
- return;
- }
-
- ublk_get_iod(ubq, req->tag)->nr_sectors =
- mapped_bytes >> 9;
- }
+ if (!ublk_start_io(ubq, req, io))
+ return;
- ublk_init_req_ref(ubq, req);
- ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
+ if (ublk_prep_auto_buf_reg(ubq, req, io, issue_flags))
+ ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags);
}
static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd,
@@ -1256,24 +1326,22 @@ static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd,
{
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct request *rq = pdu->req_list;
- struct ublk_queue *ubq = pdu->ubq;
struct request *next;
do {
next = rq->rq_next;
rq->rq_next = NULL;
- ublk_dispatch_req(ubq, rq, issue_flags);
+ ublk_dispatch_req(rq->mq_hctx->driver_data, rq, issue_flags);
rq = next;
} while (rq);
}
-static void ublk_queue_cmd_list(struct ublk_queue *ubq, struct rq_list *l)
+static void ublk_queue_cmd_list(struct ublk_io *io, struct rq_list *l)
{
- struct request *rq = rq_list_peek(l);
- struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
+ struct io_uring_cmd *cmd = io->cmd;
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
- pdu->req_list = rq;
+ pdu->req_list = rq_list_peek(l);
rq_list_init(l);
io_uring_cmd_complete_in_task(cmd, ublk_cmd_list_tw_cb);
}
@@ -1281,13 +1349,10 @@ static void ublk_queue_cmd_list(struct ublk_queue *ubq, struct rq_list *l)
static enum blk_eh_timer_return ublk_timeout(struct request *rq)
{
struct ublk_queue *ubq = rq->mq_hctx->driver_data;
+ struct ublk_io *io = &ubq->ios[rq->tag];
if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
- if (!ubq->timeout) {
- send_sig(SIGKILL, ubq->ubq_daemon, 0);
- ubq->timeout = true;
- }
-
+ send_sig(SIGKILL, io->task, 0);
return BLK_EH_DONE;
}
@@ -1355,24 +1420,25 @@ static void ublk_queue_rqs(struct rq_list *rqlist)
{
struct rq_list requeue_list = { };
struct rq_list submit_list = { };
- struct ublk_queue *ubq = NULL;
+ struct ublk_io *io = NULL;
struct request *req;
while ((req = rq_list_pop(rqlist))) {
struct ublk_queue *this_q = req->mq_hctx->driver_data;
+ struct ublk_io *this_io = &this_q->ios[req->tag];
- if (ubq && ubq != this_q && !rq_list_empty(&submit_list))
- ublk_queue_cmd_list(ubq, &submit_list);
- ubq = this_q;
+ if (io && io->task != this_io->task && !rq_list_empty(&submit_list))
+ ublk_queue_cmd_list(io, &submit_list);
+ io = this_io;
- if (ublk_prep_req(ubq, req, true) == BLK_STS_OK)
+ if (ublk_prep_req(this_q, req, true) == BLK_STS_OK)
rq_list_add_tail(&submit_list, req);
else
rq_list_add_tail(&requeue_list, req);
}
- if (ubq && !rq_list_empty(&submit_list))
- ublk_queue_cmd_list(ubq, &submit_list);
+ if (!rq_list_empty(&submit_list))
+ ublk_queue_cmd_list(io, &submit_list);
*rqlist = requeue_list;
}
@@ -1400,17 +1466,6 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
/* All old ioucmds have to be completed */
ubq->nr_io_ready = 0;
- /*
- * old daemon is PF_EXITING, put it now
- *
- * It could be NULL in case of closing one quisced device.
- */
- if (ubq->ubq_daemon)
- put_task_struct(ubq->ubq_daemon);
- /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
- ubq->ubq_daemon = NULL;
- ubq->timeout = false;
-
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
@@ -1421,6 +1476,17 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
io->flags &= UBLK_IO_FLAG_CANCELED;
io->cmd = NULL;
io->addr = 0;
+
+ /*
+ * old task is PF_EXITING, put it now
+ *
+ * It could be NULL in case of closing one quiesced
+ * device.
+ */
+ if (io->task) {
+ put_task_struct(io->task);
+ io->task = NULL;
+ }
}
}
@@ -1442,7 +1508,7 @@ static void ublk_reset_ch_dev(struct ublk_device *ub)
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_queue_reinit(ub, ublk_get_queue(ub, i));
- /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
+ /* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */
ub->mm = NULL;
ub->nr_queues_ready = 0;
ub->nr_privileged_daemon = 0;
@@ -1590,30 +1656,6 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
}
-static void ublk_commit_completion(struct ublk_device *ub,
- const struct ublksrv_io_cmd *ub_cmd)
-{
- u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
- struct ublk_queue *ubq = ublk_get_queue(ub, qid);
- struct ublk_io *io = &ubq->ios[tag];
- struct request *req;
-
- /* now this cmd slot is owned by nbd driver */
- io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
- io->res = ub_cmd->result;
-
- /* find the io request and complete */
- req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
- if (WARN_ON_ONCE(unlikely(!req)))
- return;
-
- if (req_op(req) == REQ_OP_ZONE_APPEND)
- req->__sector = ub_cmd->zone_append_lba;
-
- if (likely(!blk_should_fake_timeout(req->q)))
- ublk_put_req_ref(ubq, req);
-}
-
static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
struct request *req)
{
@@ -1642,17 +1684,8 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
- if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
- struct request *rq;
-
- /*
- * Either we fail the request or ublk_rq_task_work_cb
- * will do it
- */
- rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
- if (rq && blk_mq_request_started(rq))
- __ublk_fail_req(ubq, io, rq);
- }
+ if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
+ __ublk_fail_req(ubq, io, io->req);
}
}
@@ -1708,7 +1741,7 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag,
* that ublk_dispatch_req() is always called
*/
req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
- if (req && blk_mq_request_started(req))
+ if (req && blk_mq_request_started(req) && req->tag == tag)
return;
spin_lock(&ubq->cancel_lock);
@@ -1742,6 +1775,7 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
struct task_struct *task;
+ struct ublk_io *io;
if (WARN_ON_ONCE(!ubq))
return;
@@ -1750,13 +1784,14 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
return;
task = io_uring_cmd_get_task(cmd);
- if (WARN_ON_ONCE(task && task != ubq->ubq_daemon))
+ io = &ubq->ios[pdu->tag];
+ if (WARN_ON_ONCE(task && task != io->task))
return;
if (!ubq->canceling)
ublk_start_cancel(ubq);
- WARN_ON_ONCE(ubq->ios[pdu->tag].cmd != cmd);
+ WARN_ON_ONCE(io->cmd != cmd);
ublk_cancel_cmd(ubq, pdu->tag, issue_flags);
}
@@ -1889,8 +1924,6 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
{
ubq->nr_io_ready++;
if (ublk_queue_ready(ubq)) {
- ubq->ubq_daemon = current;
- get_task_struct(ubq->ubq_daemon);
ub->nr_queues_ready++;
if (capable(CAP_SYS_ADMIN))
@@ -1940,6 +1973,20 @@ static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
io_uring_cmd_mark_cancelable(cmd, issue_flags);
}
+static inline int ublk_set_auto_buf_reg(struct io_uring_cmd *cmd)
+{
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+
+ pdu->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
+
+ if (pdu->buf.reserved0 || pdu->buf.reserved1)
+ return -EINVAL;
+
+ if (pdu->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
+ return -EINVAL;
+ return 0;
+}
+
static void ublk_io_release(void *priv)
{
struct request *rq = priv;
@@ -1953,16 +2000,12 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd,
unsigned int index, unsigned int issue_flags)
{
struct ublk_device *ub = cmd->file->private_data;
- const struct ublk_io *io = &ubq->ios[tag];
struct request *req;
int ret;
if (!ublk_support_zero_copy(ubq))
return -EINVAL;
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
- return -EINVAL;
-
req = __ublk_check_and_get_req(ub, ubq, tag, 0);
if (!req)
return -EINVAL;
@@ -1978,17 +2021,12 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd,
}
static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
- const struct ublk_queue *ubq, unsigned int tag,
+ const struct ublk_queue *ubq,
unsigned int index, unsigned int issue_flags)
{
- const struct ublk_io *io = &ubq->ios[tag];
-
if (!ublk_support_zero_copy(ubq))
return -EINVAL;
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
- return -EINVAL;
-
return io_buffer_unregister_bvec(cmd, index, issue_flags);
}
@@ -2031,24 +2069,115 @@ static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
goto out;
}
+ if (ublk_support_auto_buf_reg(ubq)) {
+ ret = ublk_set_auto_buf_reg(cmd);
+ if (ret)
+ goto out;
+ }
+
ublk_fill_io_cmd(io, cmd, buf_addr);
+ WRITE_ONCE(io->task, get_task_struct(current));
ublk_mark_io_ready(ub, ubq);
out:
mutex_unlock(&ub->mutex);
return ret;
}
+static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
+ struct ublk_io *io, struct io_uring_cmd *cmd,
+ const struct ublksrv_io_cmd *ub_cmd,
+ unsigned int issue_flags)
+{
+ struct request *req = io->req;
+
+ if (ublk_need_map_io(ubq)) {
+ /*
+ * COMMIT_AND_FETCH_REQ has to provide IO buffer if
+ * NEED GET DATA is not enabled or it is Read IO.
+ */
+ if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
+ req_op(req) == REQ_OP_READ))
+ return -EINVAL;
+ } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
+ /*
+ * User copy requires addr to be unset when command is
+ * not zone append
+ */
+ return -EINVAL;
+ }
+
+ if (ublk_support_auto_buf_reg(ubq)) {
+ int ret;
+
+ /*
+ * `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ`
+ * and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same
+ * `io_ring_ctx`.
+ *
+ * If this uring_cmd's io_ring_ctx isn't same with the
+ * one for registering the buffer, it is ublk server's
+ * responsibility for unregistering the buffer, otherwise
+ * this ublk request gets stuck.
+ */
+ if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) {
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ if (data->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
+ io_buffer_unregister_bvec(cmd, data->buf_index,
+ issue_flags);
+ io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG;
+ }
+
+ ret = ublk_set_auto_buf_reg(cmd);
+ if (ret)
+ return ret;
+ }
+
+ ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
+
+ /* now this cmd slot is owned by ublk driver */
+ io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
+ io->res = ub_cmd->result;
+
+ if (req_op(req) == REQ_OP_ZONE_APPEND)
+ req->__sector = ub_cmd->zone_append_lba;
+
+ if (likely(!blk_should_fake_timeout(req->q)))
+ ublk_put_req_ref(ubq, req);
+
+ return 0;
+}
+
+static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io)
+{
+ struct request *req = io->req;
+
+ /*
+ * We have handled UBLK_IO_NEED_GET_DATA command,
+ * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
+ * do the copy work.
+ */
+ io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
+ /* update iod->addr because ublksrv may have passed a new io buffer */
+ ublk_get_iod(ubq, req->tag)->addr = io->addr;
+ pr_devel("%s: update iod->addr: qid %d tag %d io_flags %x addr %llx\n",
+ __func__, ubq->q_id, req->tag, io->flags,
+ ublk_get_iod(ubq, req->tag)->addr);
+
+ return ublk_start_io(ubq, req, io);
+}
+
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags,
const struct ublksrv_io_cmd *ub_cmd)
{
struct ublk_device *ub = cmd->file->private_data;
+ struct task_struct *task;
struct ublk_queue *ubq;
struct ublk_io *io;
u32 cmd_op = cmd->cmd_op;
unsigned tag = ub_cmd->tag;
int ret = -EINVAL;
- struct request *req;
pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
__func__, cmd->cmd_op, ub_cmd->q_id, tag,
@@ -2058,16 +2187,14 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
goto out;
ubq = ublk_get_queue(ub, ub_cmd->q_id);
- if (!ubq || ub_cmd->q_id != ubq->q_id)
- goto out;
-
- if (ubq->ubq_daemon && ubq->ubq_daemon != current)
- goto out;
if (tag >= ubq->q_depth)
goto out;
io = &ubq->ios[tag];
+ task = READ_ONCE(io->task);
+ if (task && task != current)
+ goto out;
/* there is pending io cmd, something must be wrong */
if (io->flags & UBLK_IO_FLAG_ACTIVE) {
@@ -2075,6 +2202,11 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
goto out;
}
+ /* only UBLK_IO_FETCH_REQ is allowed if io is not OWNED_BY_SRV */
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) &&
+ _IOC_NR(cmd_op) != UBLK_IO_FETCH_REQ)
+ goto out;
+
/*
* ensure that the user issues UBLK_IO_NEED_GET_DATA
* iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
@@ -2092,45 +2224,23 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
case UBLK_IO_REGISTER_IO_BUF:
return ublk_register_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags);
case UBLK_IO_UNREGISTER_IO_BUF:
- return ublk_unregister_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags);
+ return ublk_unregister_io_buf(cmd, ubq, ub_cmd->addr, issue_flags);
case UBLK_IO_FETCH_REQ:
ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
if (ret)
goto out;
break;
case UBLK_IO_COMMIT_AND_FETCH_REQ:
- req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
-
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
- goto out;
-
- if (ublk_need_map_io(ubq)) {
- /*
- * COMMIT_AND_FETCH_REQ has to provide IO buffer if
- * NEED GET DATA is not enabled or it is Read IO.
- */
- if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
- req_op(req) == REQ_OP_READ))
- goto out;
- } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
- /*
- * User copy requires addr to be unset when command is
- * not zone append
- */
- ret = -EINVAL;
+ ret = ublk_commit_and_fetch(ubq, io, cmd, ub_cmd, issue_flags);
+ if (ret)
goto out;
- }
-
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- ublk_commit_completion(ub, ub_cmd);
break;
case UBLK_IO_NEED_GET_DATA:
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
- goto out;
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
- ublk_dispatch_req(ubq, req, issue_flags);
- return -EIOCBQUEUED;
+ io->addr = ub_cmd->addr;
+ if (!ublk_get_data(ubq, io))
+ return -EIOCBQUEUED;
+
+ return UBLK_IO_RES_OK;
default:
goto out;
}
@@ -2334,9 +2444,14 @@ static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
{
int size = ublk_queue_cmd_buf_size(ub, q_id);
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+ int i;
+
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+ if (io->task)
+ put_task_struct(io->task);
+ }
- if (ubq->ubq_daemon)
- put_task_struct(ubq->ubq_daemon);
if (ubq->io_cmd_buf)
free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
}
@@ -2710,6 +2825,9 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
if (copy_from_user(&info, argp, sizeof(info)))
return -EFAULT;
+ if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || info.nr_hw_queues > UBLK_MAX_NR_QUEUES)
+ return -EINVAL;
+
if (capable(CAP_SYS_ADMIN))
info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
@@ -2728,6 +2846,11 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
return -EINVAL;
}
+ if ((info.flags & UBLK_F_QUIESCE) && !(info.flags & UBLK_F_USER_RECOVERY)) {
+ pr_warn("UBLK_F_QUIESCE requires UBLK_F_USER_RECOVERY\n");
+ return -EINVAL;
+ }
+
/*
* unprivileged device can't be trusted, but RECOVERY and
* RECOVERY_REISSUE still may hang error handling, so can't
@@ -2744,8 +2867,11 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
* For USER_COPY, we depends on userspace to fill request
* buffer by pwrite() to ublk char device, which can't be
* used for unprivileged device
+ *
+ * Same with zero copy or auto buffer register.
*/
- if (info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY))
+ if (info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
+ UBLK_F_AUTO_BUF_REG))
return -EINVAL;
}
@@ -2800,10 +2926,12 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
ub->dev_info.flags &= UBLK_F_ALL;
ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
- UBLK_F_URING_CMD_COMP_IN_TASK;
+ UBLK_F_URING_CMD_COMP_IN_TASK |
+ UBLK_F_PER_IO_DAEMON;
/* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */
- if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY))
+ if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
+ UBLK_F_AUTO_BUF_REG))
ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
/*
@@ -3064,14 +3192,14 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
int ublksrv_pid = (int)header->data[0];
int ret = -EINVAL;
- pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
- __func__, ub->dev_info.nr_hw_queues, header->dev_id);
- /* wait until new ubq_daemon sending all FETCH_REQ */
+ pr_devel("%s: Waiting for all FETCH_REQs, dev id %d...\n", __func__,
+ header->dev_id);
+
if (wait_for_completion_interruptible(&ub->completion))
return -EINTR;
- pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
- __func__, ub->dev_info.nr_hw_queues, header->dev_id);
+ pr_devel("%s: All FETCH_REQs received, dev id %d\n", __func__,
+ header->dev_id);
mutex_lock(&ub->mutex);
if (ublk_nosrv_should_stop_dev(ub))
@@ -3106,6 +3234,127 @@ static int ublk_ctrl_get_features(const struct ublksrv_ctrl_cmd *header)
return 0;
}
+static void ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header)
+{
+ struct ublk_param_basic *p = &ub->params.basic;
+ u64 new_size = header->data[0];
+
+ mutex_lock(&ub->mutex);
+ p->dev_sectors = new_size;
+ set_capacity_and_notify(ub->ub_disk, p->dev_sectors);
+ mutex_unlock(&ub->mutex);
+}
+
+struct count_busy {
+ const struct ublk_queue *ubq;
+ unsigned int nr_busy;
+};
+
+static bool ublk_count_busy_req(struct request *rq, void *data)
+{
+ struct count_busy *idle = data;
+
+ if (!blk_mq_request_started(rq) && rq->mq_hctx->driver_data == idle->ubq)
+ idle->nr_busy += 1;
+ return true;
+}
+
+/* uring_cmd is guaranteed to be active if the associated request is idle */
+static bool ubq_has_idle_io(const struct ublk_queue *ubq)
+{
+ struct count_busy data = {
+ .ubq = ubq,
+ };
+
+ blk_mq_tagset_busy_iter(&ubq->dev->tag_set, ublk_count_busy_req, &data);
+ return data.nr_busy < ubq->q_depth;
+}
+
+/* Wait until each hw queue has at least one idle IO */
+static int ublk_wait_for_idle_io(struct ublk_device *ub,
+ unsigned int timeout_ms)
+{
+ unsigned int elapsed = 0;
+ int ret;
+
+ while (elapsed < timeout_ms && !signal_pending(current)) {
+ unsigned int queues_cancelable = 0;
+ int i;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ queues_cancelable += !!ubq_has_idle_io(ubq);
+ }
+
+ /*
+ * Each queue needs at least one active command for
+ * notifying ublk server
+ */
+ if (queues_cancelable == ub->dev_info.nr_hw_queues)
+ break;
+
+ msleep(UBLK_REQUEUE_DELAY_MS);
+ elapsed += UBLK_REQUEUE_DELAY_MS;
+ }
+
+ if (signal_pending(current))
+ ret = -EINTR;
+ else if (elapsed >= timeout_ms)
+ ret = -EBUSY;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int ublk_ctrl_quiesce_dev(struct ublk_device *ub,
+ const struct ublksrv_ctrl_cmd *header)
+{
+ /* zero means wait forever */
+ u64 timeout_ms = header->data[0];
+ struct gendisk *disk;
+ int i, ret = -ENODEV;
+
+ if (!(ub->dev_info.flags & UBLK_F_QUIESCE))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ub->mutex);
+ disk = ublk_get_disk(ub);
+ if (!disk)
+ goto unlock;
+ if (ub->dev_info.state == UBLK_S_DEV_DEAD)
+ goto put_disk;
+
+ ret = 0;
+ /* already in expected state */
+ if (ub->dev_info.state != UBLK_S_DEV_LIVE)
+ goto put_disk;
+
+ /* Mark all queues as canceling */
+ blk_mq_quiesce_queue(disk->queue);
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ ubq->canceling = true;
+ }
+ blk_mq_unquiesce_queue(disk->queue);
+
+ if (!timeout_ms)
+ timeout_ms = UINT_MAX;
+ ret = ublk_wait_for_idle_io(ub, timeout_ms);
+
+put_disk:
+ ublk_put_disk(disk);
+unlock:
+ mutex_unlock(&ub->mutex);
+
+ /* Cancel pending uring_cmd */
+ if (!ret)
+ ublk_cancel_dev(ub);
+ return ret;
+}
+
/*
* All control commands are sent via /dev/ublk-control, so we have to check
* the destination device's permission
@@ -3191,6 +3440,8 @@ static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
case UBLK_CMD_SET_PARAMS:
case UBLK_CMD_START_USER_RECOVERY:
case UBLK_CMD_END_USER_RECOVERY:
+ case UBLK_CMD_UPDATE_SIZE:
+ case UBLK_CMD_QUIESCE_DEV:
mask = MAY_READ | MAY_WRITE;
break;
default:
@@ -3282,6 +3533,13 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
case UBLK_CMD_END_USER_RECOVERY:
ret = ublk_ctrl_end_recovery(ub, header);
break;
+ case UBLK_CMD_UPDATE_SIZE:
+ ublk_ctrl_set_size(ub, header);
+ ret = 0;
+ break;
+ case UBLK_CMD_QUIESCE_DEV:
+ ret = ublk_ctrl_quiesce_dev(ub, header);
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -3315,6 +3573,7 @@ static int __init ublk_init(void)
BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET +
UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET);
+ BUILD_BUG_ON(sizeof(struct ublk_auto_buf_reg) != 8);
init_waitqueue_head(&ublk_idr_wq);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 7cffea01d868..30bca8cb7106 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -571,7 +571,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk,
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
- err = blk_rq_map_kern(q, req, report_buf, report_len, GFP_KERNEL);
+ err = blk_rq_map_kern(req, report_buf, report_len, GFP_KERNEL);
if (err)
goto out;
@@ -817,7 +817,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
vbr->out_hdr.sector = 0;
- err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
+ err = blk_rq_map_kern(req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
if (err)
goto out;
diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c
new file mode 100644
index 000000000000..553b1a713ab9
--- /dev/null
+++ b/drivers/block/zloop.c
@@ -0,0 +1,1385 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Christoph Hellwig.
+ * Copyright (c) 2025, Western Digital Corporation or its affiliates.
+ *
+ * Zoned Loop Device driver - exports a zoned block device using one file per
+ * zone as backing storage.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/blk-mq.h>
+#include <linux/blkzoned.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/falloc.h>
+#include <linux/mutex.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+
+/*
+ * Options for adding (and removing) a device.
+ */
+enum {
+ ZLOOP_OPT_ERR = 0,
+ ZLOOP_OPT_ID = (1 << 0),
+ ZLOOP_OPT_CAPACITY = (1 << 1),
+ ZLOOP_OPT_ZONE_SIZE = (1 << 2),
+ ZLOOP_OPT_ZONE_CAPACITY = (1 << 3),
+ ZLOOP_OPT_NR_CONV_ZONES = (1 << 4),
+ ZLOOP_OPT_BASE_DIR = (1 << 5),
+ ZLOOP_OPT_NR_QUEUES = (1 << 6),
+ ZLOOP_OPT_QUEUE_DEPTH = (1 << 7),
+ ZLOOP_OPT_BUFFERED_IO = (1 << 8),
+};
+
+static const match_table_t zloop_opt_tokens = {
+ { ZLOOP_OPT_ID, "id=%d" },
+ { ZLOOP_OPT_CAPACITY, "capacity_mb=%u" },
+ { ZLOOP_OPT_ZONE_SIZE, "zone_size_mb=%u" },
+ { ZLOOP_OPT_ZONE_CAPACITY, "zone_capacity_mb=%u" },
+ { ZLOOP_OPT_NR_CONV_ZONES, "conv_zones=%u" },
+ { ZLOOP_OPT_BASE_DIR, "base_dir=%s" },
+ { ZLOOP_OPT_NR_QUEUES, "nr_queues=%u" },
+ { ZLOOP_OPT_QUEUE_DEPTH, "queue_depth=%u" },
+ { ZLOOP_OPT_BUFFERED_IO, "buffered_io" },
+ { ZLOOP_OPT_ERR, NULL }
+};
+
+/* Default values for the "add" operation. */
+#define ZLOOP_DEF_ID -1
+#define ZLOOP_DEF_ZONE_SIZE ((256ULL * SZ_1M) >> SECTOR_SHIFT)
+#define ZLOOP_DEF_NR_ZONES 64
+#define ZLOOP_DEF_NR_CONV_ZONES 8
+#define ZLOOP_DEF_BASE_DIR "/var/local/zloop"
+#define ZLOOP_DEF_NR_QUEUES 1
+#define ZLOOP_DEF_QUEUE_DEPTH 128
+#define ZLOOP_DEF_BUFFERED_IO false
+
+/* Arbitrary limit on the zone size (16GB). */
+#define ZLOOP_MAX_ZONE_SIZE_MB 16384
+
+struct zloop_options {
+ unsigned int mask;
+ int id;
+ sector_t capacity;
+ sector_t zone_size;
+ sector_t zone_capacity;
+ unsigned int nr_conv_zones;
+ char *base_dir;
+ unsigned int nr_queues;
+ unsigned int queue_depth;
+ bool buffered_io;
+};
+
+/*
+ * Device states.
+ */
+enum {
+ Zlo_creating = 0,
+ Zlo_live,
+ Zlo_deleting,
+};
+
+enum zloop_zone_flags {
+ ZLOOP_ZONE_CONV = 0,
+ ZLOOP_ZONE_SEQ_ERROR,
+};
+
+struct zloop_zone {
+ struct file *file;
+
+ unsigned long flags;
+ struct mutex lock;
+ enum blk_zone_cond cond;
+ sector_t start;
+ sector_t wp;
+
+ gfp_t old_gfp_mask;
+};
+
+struct zloop_device {
+ unsigned int id;
+ unsigned int state;
+
+ struct blk_mq_tag_set tag_set;
+ struct gendisk *disk;
+
+ struct workqueue_struct *workqueue;
+ bool buffered_io;
+
+ const char *base_dir;
+ struct file *data_dir;
+
+ unsigned int zone_shift;
+ sector_t zone_size;
+ sector_t zone_capacity;
+ unsigned int nr_zones;
+ unsigned int nr_conv_zones;
+ unsigned int block_size;
+
+ struct zloop_zone zones[] __counted_by(nr_zones);
+};
+
+struct zloop_cmd {
+ struct work_struct work;
+ atomic_t ref;
+ sector_t sector;
+ sector_t nr_sectors;
+ long ret;
+ struct kiocb iocb;
+ struct bio_vec *bvec;
+};
+
+static DEFINE_IDR(zloop_index_idr);
+static DEFINE_MUTEX(zloop_ctl_mutex);
+
+static unsigned int rq_zone_no(struct request *rq)
+{
+ struct zloop_device *zlo = rq->q->queuedata;
+
+ return blk_rq_pos(rq) >> zlo->zone_shift;
+}
+
+static int zloop_update_seq_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ struct kstat stat;
+ sector_t file_sectors;
+ int ret;
+
+ lockdep_assert_held(&zone->lock);
+
+ ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0);
+ if (ret < 0) {
+ pr_err("Failed to get zone %u file stat (err=%d)\n",
+ zone_no, ret);
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ return ret;
+ }
+
+ file_sectors = stat.size >> SECTOR_SHIFT;
+ if (file_sectors > zlo->zone_capacity) {
+ pr_err("Zone %u file too large (%llu sectors > %llu)\n",
+ zone_no, file_sectors, zlo->zone_capacity);
+ return -EINVAL;
+ }
+
+ if (file_sectors & ((zlo->block_size >> SECTOR_SHIFT) - 1)) {
+ pr_err("Zone %u file size not aligned to block size %u\n",
+ zone_no, zlo->block_size);
+ return -EINVAL;
+ }
+
+ if (!file_sectors) {
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+ } else if (file_sectors == zlo->zone_capacity) {
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = zone->start + zlo->zone_size;
+ } else {
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ zone->wp = zone->start + file_sectors;
+ }
+
+ return 0;
+}
+
+static int zloop_open_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ if (ret)
+ goto unlock;
+ }
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_EXP_OPEN:
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_CLOSED:
+ case BLK_ZONE_COND_IMP_OPEN:
+ zone->cond = BLK_ZONE_COND_EXP_OPEN;
+ break;
+ case BLK_ZONE_COND_FULL:
+ default:
+ ret = -EIO;
+ break;
+ }
+
+unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static int zloop_close_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ if (ret)
+ goto unlock;
+ }
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_CLOSED:
+ break;
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ if (zone->wp == zone->start)
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ else
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_FULL:
+ default:
+ ret = -EIO;
+ break;
+ }
+
+unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static int zloop_reset_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (!test_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags) &&
+ zone->cond == BLK_ZONE_COND_EMPTY)
+ goto unlock;
+
+ if (vfs_truncate(&zone->file->f_path, 0)) {
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+ clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+
+unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static int zloop_reset_all_zones(struct zloop_device *zlo)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = zlo->nr_conv_zones; i < zlo->nr_zones; i++) {
+ ret = zloop_reset_zone(zlo, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zloop_finish_zone(struct zloop_device *zlo, unsigned int zone_no)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int ret = 0;
+
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
+ return -EIO;
+
+ mutex_lock(&zone->lock);
+
+ if (!test_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags) &&
+ zone->cond == BLK_ZONE_COND_FULL)
+ goto unlock;
+
+ if (vfs_truncate(&zone->file->f_path, zlo->zone_size << SECTOR_SHIFT)) {
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = zone->start + zlo->zone_size;
+ clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+
+ unlock:
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static void zloop_put_cmd(struct zloop_cmd *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+
+ if (!atomic_dec_and_test(&cmd->ref))
+ return;
+ kfree(cmd->bvec);
+ cmd->bvec = NULL;
+ if (likely(!blk_should_fake_timeout(rq->q)))
+ blk_mq_complete_request(rq);
+}
+
+static void zloop_rw_complete(struct kiocb *iocb, long ret)
+{
+ struct zloop_cmd *cmd = container_of(iocb, struct zloop_cmd, iocb);
+
+ cmd->ret = ret;
+ zloop_put_cmd(cmd);
+}
+
+static void zloop_rw(struct zloop_cmd *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+ struct zloop_device *zlo = rq->q->queuedata;
+ unsigned int zone_no = rq_zone_no(rq);
+ sector_t sector = blk_rq_pos(rq);
+ sector_t nr_sectors = blk_rq_sectors(rq);
+ bool is_append = req_op(rq) == REQ_OP_ZONE_APPEND;
+ bool is_write = req_op(rq) == REQ_OP_WRITE || is_append;
+ int rw = is_write ? ITER_SOURCE : ITER_DEST;
+ struct req_iterator rq_iter;
+ struct zloop_zone *zone;
+ struct iov_iter iter;
+ struct bio_vec tmp;
+ sector_t zone_end;
+ int nr_bvec = 0;
+ int ret;
+
+ atomic_set(&cmd->ref, 2);
+ cmd->sector = sector;
+ cmd->nr_sectors = nr_sectors;
+ cmd->ret = 0;
+
+ /* We should never get an I/O beyond the device capacity. */
+ if (WARN_ON_ONCE(zone_no >= zlo->nr_zones)) {
+ ret = -EIO;
+ goto out;
+ }
+ zone = &zlo->zones[zone_no];
+ zone_end = zone->start + zlo->zone_capacity;
+
+ /*
+ * The block layer should never send requests that are not fully
+ * contained within the zone.
+ */
+ if (WARN_ON_ONCE(sector + nr_sectors > zone->start + zlo->zone_size)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ mutex_lock(&zone->lock);
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ mutex_unlock(&zone->lock);
+ if (ret)
+ goto out;
+ }
+
+ if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) {
+ mutex_lock(&zone->lock);
+
+ if (is_append) {
+ sector = zone->wp;
+ cmd->sector = sector;
+ }
+
+ /*
+ * Write operations must be aligned to the write pointer and
+ * fully contained within the zone capacity.
+ */
+ if (sector != zone->wp || zone->wp + nr_sectors > zone_end) {
+ pr_err("Zone %u: unaligned write: sect %llu, wp %llu\n",
+ zone_no, sector, zone->wp);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* Implicitly open the target zone. */
+ if (zone->cond == BLK_ZONE_COND_CLOSED ||
+ zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->cond = BLK_ZONE_COND_IMP_OPEN;
+
+ /*
+ * Advance the write pointer of sequential zones. If the write
+ * fails, the wp position will be corrected when the next I/O
+ * copmpletes.
+ */
+ zone->wp += nr_sectors;
+ if (zone->wp == zone_end)
+ zone->cond = BLK_ZONE_COND_FULL;
+ }
+
+ rq_for_each_bvec(tmp, rq, rq_iter)
+ nr_bvec++;
+
+ if (rq->bio != rq->biotail) {
+ struct bio_vec *bvec;
+
+ cmd->bvec = kmalloc_array(nr_bvec, sizeof(*cmd->bvec), GFP_NOIO);
+ if (!cmd->bvec) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /*
+ * The bios of the request may be started from the middle of
+ * the 'bvec' because of bio splitting, so we can't directly
+ * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec
+ * API will take care of all details for us.
+ */
+ bvec = cmd->bvec;
+ rq_for_each_bvec(tmp, rq, rq_iter) {
+ *bvec = tmp;
+ bvec++;
+ }
+ iov_iter_bvec(&iter, rw, cmd->bvec, nr_bvec, blk_rq_bytes(rq));
+ } else {
+ /*
+ * Same here, this bio may be started from the middle of the
+ * 'bvec' because of bio splitting, so offset from the bvec
+ * must be passed to iov iterator
+ */
+ iov_iter_bvec(&iter, rw,
+ __bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter),
+ nr_bvec, blk_rq_bytes(rq));
+ iter.iov_offset = rq->bio->bi_iter.bi_bvec_done;
+ }
+
+ cmd->iocb.ki_pos = (sector - zone->start) << SECTOR_SHIFT;
+ cmd->iocb.ki_filp = zone->file;
+ cmd->iocb.ki_complete = zloop_rw_complete;
+ if (!zlo->buffered_io)
+ cmd->iocb.ki_flags = IOCB_DIRECT;
+ cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+
+ if (rw == ITER_SOURCE)
+ ret = zone->file->f_op->write_iter(&cmd->iocb, &iter);
+ else
+ ret = zone->file->f_op->read_iter(&cmd->iocb, &iter);
+unlock:
+ if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write)
+ mutex_unlock(&zone->lock);
+out:
+ if (ret != -EIOCBQUEUED)
+ zloop_rw_complete(&cmd->iocb, ret);
+ zloop_put_cmd(cmd);
+}
+
+static void zloop_handle_cmd(struct zloop_cmd *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+ struct zloop_device *zlo = rq->q->queuedata;
+
+ switch (req_op(rq)) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ case REQ_OP_ZONE_APPEND:
+ /*
+ * zloop_rw() always executes asynchronously or completes
+ * directly.
+ */
+ zloop_rw(cmd);
+ return;
+ case REQ_OP_FLUSH:
+ /*
+ * Sync the entire FS containing the zone files instead of
+ * walking all files
+ */
+ cmd->ret = sync_filesystem(file_inode(zlo->data_dir)->i_sb);
+ break;
+ case REQ_OP_ZONE_RESET:
+ cmd->ret = zloop_reset_zone(zlo, rq_zone_no(rq));
+ break;
+ case REQ_OP_ZONE_RESET_ALL:
+ cmd->ret = zloop_reset_all_zones(zlo);
+ break;
+ case REQ_OP_ZONE_FINISH:
+ cmd->ret = zloop_finish_zone(zlo, rq_zone_no(rq));
+ break;
+ case REQ_OP_ZONE_OPEN:
+ cmd->ret = zloop_open_zone(zlo, rq_zone_no(rq));
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ cmd->ret = zloop_close_zone(zlo, rq_zone_no(rq));
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ pr_err("Unsupported operation %d\n", req_op(rq));
+ cmd->ret = -EOPNOTSUPP;
+ break;
+ }
+
+ blk_mq_complete_request(rq);
+}
+
+static void zloop_cmd_workfn(struct work_struct *work)
+{
+ struct zloop_cmd *cmd = container_of(work, struct zloop_cmd, work);
+ int orig_flags = current->flags;
+
+ current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
+ zloop_handle_cmd(cmd);
+ current->flags = orig_flags;
+}
+
+static void zloop_complete_rq(struct request *rq)
+{
+ struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct zloop_device *zlo = rq->q->queuedata;
+ unsigned int zone_no = cmd->sector >> zlo->zone_shift;
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ blk_status_t sts = BLK_STS_OK;
+
+ switch (req_op(rq)) {
+ case REQ_OP_READ:
+ if (cmd->ret < 0)
+ pr_err("Zone %u: failed read sector %llu, %llu sectors\n",
+ zone_no, cmd->sector, cmd->nr_sectors);
+
+ if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) {
+ /* short read */
+ struct bio *bio;
+
+ __rq_for_each_bio(bio, rq)
+ zero_fill_bio(bio);
+ }
+ break;
+ case REQ_OP_WRITE:
+ case REQ_OP_ZONE_APPEND:
+ if (cmd->ret < 0)
+ pr_err("Zone %u: failed %swrite sector %llu, %llu sectors\n",
+ zone_no,
+ req_op(rq) == REQ_OP_WRITE ? "" : "append ",
+ cmd->sector, cmd->nr_sectors);
+
+ if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) {
+ pr_err("Zone %u: partial write %ld/%u B\n",
+ zone_no, cmd->ret, blk_rq_bytes(rq));
+ cmd->ret = -EIO;
+ }
+
+ if (cmd->ret < 0 && !test_bit(ZLOOP_ZONE_CONV, &zone->flags)) {
+ /*
+ * A write to a sequential zone file failed: mark the
+ * zone as having an error. This will be corrected and
+ * cleared when the next IO is submitted.
+ */
+ set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ break;
+ }
+ if (req_op(rq) == REQ_OP_ZONE_APPEND)
+ rq->__sector = cmd->sector;
+
+ break;
+ default:
+ break;
+ }
+
+ if (cmd->ret < 0)
+ sts = errno_to_blk_status(cmd->ret);
+ blk_mq_end_request(rq, sts);
+}
+
+static blk_status_t zloop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *rq = bd->rq;
+ struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct zloop_device *zlo = rq->q->queuedata;
+
+ if (zlo->state == Zlo_deleting)
+ return BLK_STS_IOERR;
+
+ blk_mq_start_request(rq);
+
+ INIT_WORK(&cmd->work, zloop_cmd_workfn);
+ queue_work(zlo->workqueue, &cmd->work);
+
+ return BLK_STS_OK;
+}
+
+static const struct blk_mq_ops zloop_mq_ops = {
+ .queue_rq = zloop_queue_rq,
+ .complete = zloop_complete_rq,
+};
+
+static int zloop_open(struct gendisk *disk, blk_mode_t mode)
+{
+ struct zloop_device *zlo = disk->private_data;
+ int ret;
+
+ ret = mutex_lock_killable(&zloop_ctl_mutex);
+ if (ret)
+ return ret;
+
+ if (zlo->state != Zlo_live)
+ ret = -ENXIO;
+ mutex_unlock(&zloop_ctl_mutex);
+ return ret;
+}
+
+static int zloop_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ struct zloop_device *zlo = disk->private_data;
+ struct blk_zone blkz = {};
+ unsigned int first, i;
+ int ret;
+
+ first = disk_zone_no(disk, sector);
+ if (first >= zlo->nr_zones)
+ return 0;
+ nr_zones = min(nr_zones, zlo->nr_zones - first);
+
+ for (i = 0; i < nr_zones; i++) {
+ unsigned int zone_no = first + i;
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+
+ mutex_lock(&zone->lock);
+
+ if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ if (ret) {
+ mutex_unlock(&zone->lock);
+ return ret;
+ }
+ }
+
+ blkz.start = zone->start;
+ blkz.len = zlo->zone_size;
+ blkz.wp = zone->wp;
+ blkz.cond = zone->cond;
+ if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) {
+ blkz.type = BLK_ZONE_TYPE_CONVENTIONAL;
+ blkz.capacity = zlo->zone_size;
+ } else {
+ blkz.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
+ blkz.capacity = zlo->zone_capacity;
+ }
+
+ mutex_unlock(&zone->lock);
+
+ ret = cb(&blkz, i, data);
+ if (ret)
+ return ret;
+ }
+
+ return nr_zones;
+}
+
+static void zloop_free_disk(struct gendisk *disk)
+{
+ struct zloop_device *zlo = disk->private_data;
+ unsigned int i;
+
+ for (i = 0; i < zlo->nr_zones; i++) {
+ struct zloop_zone *zone = &zlo->zones[i];
+
+ mapping_set_gfp_mask(zone->file->f_mapping,
+ zone->old_gfp_mask);
+ fput(zone->file);
+ }
+
+ fput(zlo->data_dir);
+ destroy_workqueue(zlo->workqueue);
+ kfree(zlo->base_dir);
+ kvfree(zlo);
+}
+
+static const struct block_device_operations zloop_fops = {
+ .owner = THIS_MODULE,
+ .open = zloop_open,
+ .report_zones = zloop_report_zones,
+ .free_disk = zloop_free_disk,
+};
+
+__printf(3, 4)
+static struct file *zloop_filp_open_fmt(int oflags, umode_t mode,
+ const char *fmt, ...)
+{
+ struct file *file;
+ va_list ap;
+ char *p;
+
+ va_start(ap, fmt);
+ p = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ file = filp_open(p, oflags, mode);
+ kfree(p);
+ return file;
+}
+
+static int zloop_get_block_size(struct zloop_device *zlo,
+ struct zloop_zone *zone)
+{
+ struct block_device *sb_bdev = zone->file->f_mapping->host->i_sb->s_bdev;
+ struct kstat st;
+
+ /*
+ * If the FS block size is lower than or equal to 4K, use that as the
+ * device block size. Otherwise, fallback to the FS direct IO alignment
+ * constraint if that is provided, and to the FS underlying device
+ * physical block size if the direct IO alignment is unknown.
+ */
+ if (file_inode(zone->file)->i_sb->s_blocksize <= SZ_4K)
+ zlo->block_size = file_inode(zone->file)->i_sb->s_blocksize;
+ else if (!vfs_getattr(&zone->file->f_path, &st, STATX_DIOALIGN, 0) &&
+ (st.result_mask & STATX_DIOALIGN))
+ zlo->block_size = st.dio_offset_align;
+ else if (sb_bdev)
+ zlo->block_size = bdev_physical_block_size(sb_bdev);
+ else
+ zlo->block_size = SECTOR_SIZE;
+
+ if (zlo->zone_capacity & ((zlo->block_size >> SECTOR_SHIFT) - 1)) {
+ pr_err("Zone capacity is not aligned to block size %u\n",
+ zlo->block_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int zloop_init_zone(struct zloop_device *zlo, struct zloop_options *opts,
+ unsigned int zone_no, bool restore)
+{
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ int oflags = O_RDWR;
+ struct kstat stat;
+ sector_t file_sectors;
+ int ret;
+
+ mutex_init(&zone->lock);
+ zone->start = (sector_t)zone_no << zlo->zone_shift;
+
+ if (!restore)
+ oflags |= O_CREAT;
+
+ if (!opts->buffered_io)
+ oflags |= O_DIRECT;
+
+ if (zone_no < zlo->nr_conv_zones) {
+ /* Conventional zone file. */
+ set_bit(ZLOOP_ZONE_CONV, &zone->flags);
+ zone->cond = BLK_ZONE_COND_NOT_WP;
+ zone->wp = U64_MAX;
+
+ zone->file = zloop_filp_open_fmt(oflags, 0600, "%s/%u/cnv-%06u",
+ zlo->base_dir, zlo->id, zone_no);
+ if (IS_ERR(zone->file)) {
+ pr_err("Failed to open zone %u file %s/%u/cnv-%06u (err=%ld)",
+ zone_no, zlo->base_dir, zlo->id, zone_no,
+ PTR_ERR(zone->file));
+ return PTR_ERR(zone->file);
+ }
+
+ if (!zlo->block_size) {
+ ret = zloop_get_block_size(zlo, zone);
+ if (ret)
+ return ret;
+ }
+
+ ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0);
+ if (ret < 0) {
+ pr_err("Failed to get zone %u file stat\n", zone_no);
+ return ret;
+ }
+ file_sectors = stat.size >> SECTOR_SHIFT;
+
+ if (restore && file_sectors != zlo->zone_size) {
+ pr_err("Invalid conventional zone %u file size (%llu sectors != %llu)\n",
+ zone_no, file_sectors, zlo->zone_capacity);
+ return ret;
+ }
+
+ ret = vfs_truncate(&zone->file->f_path,
+ zlo->zone_size << SECTOR_SHIFT);
+ if (ret < 0) {
+ pr_err("Failed to truncate zone %u file (err=%d)\n",
+ zone_no, ret);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /* Sequential zone file. */
+ zone->file = zloop_filp_open_fmt(oflags, 0600, "%s/%u/seq-%06u",
+ zlo->base_dir, zlo->id, zone_no);
+ if (IS_ERR(zone->file)) {
+ pr_err("Failed to open zone %u file %s/%u/seq-%06u (err=%ld)",
+ zone_no, zlo->base_dir, zlo->id, zone_no,
+ PTR_ERR(zone->file));
+ return PTR_ERR(zone->file);
+ }
+
+ if (!zlo->block_size) {
+ ret = zloop_get_block_size(zlo, zone);
+ if (ret)
+ return ret;
+ }
+
+ zloop_get_block_size(zlo, zone);
+
+ mutex_lock(&zone->lock);
+ ret = zloop_update_seq_zone(zlo, zone_no);
+ mutex_unlock(&zone->lock);
+
+ return ret;
+}
+
+static bool zloop_dev_exists(struct zloop_device *zlo)
+{
+ struct file *cnv, *seq;
+ bool exists;
+
+ cnv = zloop_filp_open_fmt(O_RDONLY, 0600, "%s/%u/cnv-%06u",
+ zlo->base_dir, zlo->id, 0);
+ seq = zloop_filp_open_fmt(O_RDONLY, 0600, "%s/%u/seq-%06u",
+ zlo->base_dir, zlo->id, 0);
+ exists = !IS_ERR(cnv) || !IS_ERR(seq);
+
+ if (!IS_ERR(cnv))
+ fput(cnv);
+ if (!IS_ERR(seq))
+ fput(seq);
+
+ return exists;
+}
+
+static int zloop_ctl_add(struct zloop_options *opts)
+{
+ struct queue_limits lim = {
+ .max_hw_sectors = SZ_1M >> SECTOR_SHIFT,
+ .max_hw_zone_append_sectors = SZ_1M >> SECTOR_SHIFT,
+ .chunk_sectors = opts->zone_size,
+ .features = BLK_FEAT_ZONED,
+ };
+ unsigned int nr_zones, i, j;
+ struct zloop_device *zlo;
+ int ret = -EINVAL;
+ bool restore;
+
+ __module_get(THIS_MODULE);
+
+ nr_zones = opts->capacity >> ilog2(opts->zone_size);
+ if (opts->nr_conv_zones >= nr_zones) {
+ pr_err("Invalid number of conventional zones %u\n",
+ opts->nr_conv_zones);
+ goto out;
+ }
+
+ zlo = kvzalloc(struct_size(zlo, zones, nr_zones), GFP_KERNEL);
+ if (!zlo) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ zlo->state = Zlo_creating;
+
+ ret = mutex_lock_killable(&zloop_ctl_mutex);
+ if (ret)
+ goto out_free_dev;
+
+ /* Allocate id, if @opts->id >= 0, we're requesting that specific id */
+ if (opts->id >= 0) {
+ ret = idr_alloc(&zloop_index_idr, zlo,
+ opts->id, opts->id + 1, GFP_KERNEL);
+ if (ret == -ENOSPC)
+ ret = -EEXIST;
+ } else {
+ ret = idr_alloc(&zloop_index_idr, zlo, 0, 0, GFP_KERNEL);
+ }
+ mutex_unlock(&zloop_ctl_mutex);
+ if (ret < 0)
+ goto out_free_dev;
+
+ zlo->id = ret;
+ zlo->zone_shift = ilog2(opts->zone_size);
+ zlo->zone_size = opts->zone_size;
+ if (opts->zone_capacity)
+ zlo->zone_capacity = opts->zone_capacity;
+ else
+ zlo->zone_capacity = zlo->zone_size;
+ zlo->nr_zones = nr_zones;
+ zlo->nr_conv_zones = opts->nr_conv_zones;
+ zlo->buffered_io = opts->buffered_io;
+
+ zlo->workqueue = alloc_workqueue("zloop%d", WQ_UNBOUND | WQ_FREEZABLE,
+ opts->nr_queues * opts->queue_depth, zlo->id);
+ if (!zlo->workqueue) {
+ ret = -ENOMEM;
+ goto out_free_idr;
+ }
+
+ if (opts->base_dir)
+ zlo->base_dir = kstrdup(opts->base_dir, GFP_KERNEL);
+ else
+ zlo->base_dir = kstrdup(ZLOOP_DEF_BASE_DIR, GFP_KERNEL);
+ if (!zlo->base_dir) {
+ ret = -ENOMEM;
+ goto out_destroy_workqueue;
+ }
+
+ zlo->data_dir = zloop_filp_open_fmt(O_RDONLY | O_DIRECTORY, 0, "%s/%u",
+ zlo->base_dir, zlo->id);
+ if (IS_ERR(zlo->data_dir)) {
+ ret = PTR_ERR(zlo->data_dir);
+ pr_warn("Failed to open directory %s/%u (err=%d)\n",
+ zlo->base_dir, zlo->id, ret);
+ goto out_free_base_dir;
+ }
+
+ /*
+ * If we already have zone files, we are restoring a device created by a
+ * previous add operation. In this case, zloop_init_zone() will check
+ * that the zone files are consistent with the zone configuration given.
+ */
+ restore = zloop_dev_exists(zlo);
+ for (i = 0; i < nr_zones; i++) {
+ ret = zloop_init_zone(zlo, opts, i, restore);
+ if (ret)
+ goto out_close_files;
+ }
+
+ lim.physical_block_size = zlo->block_size;
+ lim.logical_block_size = zlo->block_size;
+
+ zlo->tag_set.ops = &zloop_mq_ops;
+ zlo->tag_set.nr_hw_queues = opts->nr_queues;
+ zlo->tag_set.queue_depth = opts->queue_depth;
+ zlo->tag_set.numa_node = NUMA_NO_NODE;
+ zlo->tag_set.cmd_size = sizeof(struct zloop_cmd);
+ zlo->tag_set.driver_data = zlo;
+
+ ret = blk_mq_alloc_tag_set(&zlo->tag_set);
+ if (ret) {
+ pr_err("blk_mq_alloc_tag_set failed (err=%d)\n", ret);
+ goto out_close_files;
+ }
+
+ zlo->disk = blk_mq_alloc_disk(&zlo->tag_set, &lim, zlo);
+ if (IS_ERR(zlo->disk)) {
+ pr_err("blk_mq_alloc_disk failed (err=%d)\n", ret);
+ ret = PTR_ERR(zlo->disk);
+ goto out_cleanup_tags;
+ }
+ zlo->disk->flags = GENHD_FL_NO_PART;
+ zlo->disk->fops = &zloop_fops;
+ zlo->disk->private_data = zlo;
+ sprintf(zlo->disk->disk_name, "zloop%d", zlo->id);
+ set_capacity(zlo->disk, (u64)lim.chunk_sectors * zlo->nr_zones);
+
+ ret = blk_revalidate_disk_zones(zlo->disk);
+ if (ret)
+ goto out_cleanup_disk;
+
+ ret = add_disk(zlo->disk);
+ if (ret) {
+ pr_err("add_disk failed (err=%d)\n", ret);
+ goto out_cleanup_disk;
+ }
+
+ mutex_lock(&zloop_ctl_mutex);
+ zlo->state = Zlo_live;
+ mutex_unlock(&zloop_ctl_mutex);
+
+ pr_info("Added device %d: %u zones of %llu MB, %u B block size\n",
+ zlo->id, zlo->nr_zones,
+ ((sector_t)zlo->zone_size << SECTOR_SHIFT) >> 20,
+ zlo->block_size);
+
+ return 0;
+
+out_cleanup_disk:
+ put_disk(zlo->disk);
+out_cleanup_tags:
+ blk_mq_free_tag_set(&zlo->tag_set);
+out_close_files:
+ for (j = 0; j < i; j++) {
+ struct zloop_zone *zone = &zlo->zones[j];
+
+ if (!IS_ERR_OR_NULL(zone->file))
+ fput(zone->file);
+ }
+ fput(zlo->data_dir);
+out_free_base_dir:
+ kfree(zlo->base_dir);
+out_destroy_workqueue:
+ destroy_workqueue(zlo->workqueue);
+out_free_idr:
+ mutex_lock(&zloop_ctl_mutex);
+ idr_remove(&zloop_index_idr, zlo->id);
+ mutex_unlock(&zloop_ctl_mutex);
+out_free_dev:
+ kvfree(zlo);
+out:
+ module_put(THIS_MODULE);
+ if (ret == -ENOENT)
+ ret = -EINVAL;
+ return ret;
+}
+
+static int zloop_ctl_remove(struct zloop_options *opts)
+{
+ struct zloop_device *zlo;
+ int ret;
+
+ if (!(opts->mask & ZLOOP_OPT_ID)) {
+ pr_err("No ID specified\n");
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_killable(&zloop_ctl_mutex);
+ if (ret)
+ return ret;
+
+ zlo = idr_find(&zloop_index_idr, opts->id);
+ if (!zlo || zlo->state == Zlo_creating) {
+ ret = -ENODEV;
+ } else if (zlo->state == Zlo_deleting) {
+ ret = -EINVAL;
+ } else {
+ idr_remove(&zloop_index_idr, zlo->id);
+ zlo->state = Zlo_deleting;
+ }
+
+ mutex_unlock(&zloop_ctl_mutex);
+ if (ret)
+ return ret;
+
+ del_gendisk(zlo->disk);
+ put_disk(zlo->disk);
+ blk_mq_free_tag_set(&zlo->tag_set);
+
+ pr_info("Removed device %d\n", opts->id);
+
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+static int zloop_parse_options(struct zloop_options *opts, const char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ unsigned int token;
+ int ret = 0;
+
+ /* Set defaults. */
+ opts->mask = 0;
+ opts->id = ZLOOP_DEF_ID;
+ opts->capacity = ZLOOP_DEF_ZONE_SIZE * ZLOOP_DEF_NR_ZONES;
+ opts->zone_size = ZLOOP_DEF_ZONE_SIZE;
+ opts->nr_conv_zones = ZLOOP_DEF_NR_CONV_ZONES;
+ opts->nr_queues = ZLOOP_DEF_NR_QUEUES;
+ opts->queue_depth = ZLOOP_DEF_QUEUE_DEPTH;
+ opts->buffered_io = ZLOOP_DEF_BUFFERED_IO;
+
+ if (!buf)
+ return 0;
+
+ /* Skip leading spaces before the options. */
+ while (isspace(*buf))
+ buf++;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ /* Parse the options, doing only some light invalid value checks. */
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, zloop_opt_tokens, args);
+ opts->mask |= token;
+ switch (token) {
+ case ZLOOP_OPT_ID:
+ if (match_int(args, &opts->id)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ case ZLOOP_OPT_CAPACITY:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid capacity\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->capacity =
+ ((sector_t)token * SZ_1M) >> SECTOR_SHIFT;
+ break;
+ case ZLOOP_OPT_ZONE_SIZE:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token || token > ZLOOP_MAX_ZONE_SIZE_MB ||
+ !is_power_of_2(token)) {
+ pr_err("Invalid zone size %u\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->zone_size =
+ ((sector_t)token * SZ_1M) >> SECTOR_SHIFT;
+ break;
+ case ZLOOP_OPT_ZONE_CAPACITY:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid zone capacity\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->zone_capacity =
+ ((sector_t)token * SZ_1M) >> SECTOR_SHIFT;
+ break;
+ case ZLOOP_OPT_NR_CONV_ZONES:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_conv_zones = token;
+ break;
+ case ZLOOP_OPT_BASE_DIR:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(opts->base_dir);
+ opts->base_dir = p;
+ break;
+ case ZLOOP_OPT_NR_QUEUES:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid number of queues\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_queues = min(token, num_online_cpus());
+ break;
+ case ZLOOP_OPT_QUEUE_DEPTH:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!token) {
+ pr_err("Invalid queue depth\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->queue_depth = token;
+ break;
+ case ZLOOP_OPT_BUFFERED_IO:
+ opts->buffered_io = true;
+ break;
+ case ZLOOP_OPT_ERR:
+ default:
+ pr_warn("unknown parameter or missing value '%s'\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ ret = -EINVAL;
+ if (opts->capacity <= opts->zone_size) {
+ pr_err("Invalid capacity\n");
+ goto out;
+ }
+
+ if (opts->zone_capacity > opts->zone_size) {
+ pr_err("Invalid zone capacity\n");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ kfree(options);
+ return ret;
+}
+
+enum {
+ ZLOOP_CTL_ADD,
+ ZLOOP_CTL_REMOVE,
+};
+
+static struct zloop_ctl_op {
+ int code;
+ const char *name;
+} zloop_ctl_ops[] = {
+ { ZLOOP_CTL_ADD, "add" },
+ { ZLOOP_CTL_REMOVE, "remove" },
+ { -1, NULL },
+};
+
+static ssize_t zloop_ctl_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *pos)
+{
+ struct zloop_options opts = { };
+ struct zloop_ctl_op *op;
+ const char *buf, *opts_buf;
+ int i, ret;
+
+ if (count > PAGE_SIZE)
+ return -ENOMEM;
+
+ buf = memdup_user_nul(ubuf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ for (i = 0; i < ARRAY_SIZE(zloop_ctl_ops); i++) {
+ op = &zloop_ctl_ops[i];
+ if (!op->name) {
+ pr_err("Invalid operation\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!strncmp(buf, op->name, strlen(op->name)))
+ break;
+ }
+
+ if (count <= strlen(op->name))
+ opts_buf = NULL;
+ else
+ opts_buf = buf + strlen(op->name);
+
+ ret = zloop_parse_options(&opts, opts_buf);
+ if (ret) {
+ pr_err("Failed to parse options\n");
+ goto out;
+ }
+
+ switch (op->code) {
+ case ZLOOP_CTL_ADD:
+ ret = zloop_ctl_add(&opts);
+ break;
+ case ZLOOP_CTL_REMOVE:
+ ret = zloop_ctl_remove(&opts);
+ break;
+ default:
+ pr_err("Invalid operation\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(opts.base_dir);
+ kfree(buf);
+ return ret ? ret : count;
+}
+
+static int zloop_ctl_show(struct seq_file *seq_file, void *private)
+{
+ const struct match_token *tok;
+ int i;
+
+ /* Add operation */
+ seq_printf(seq_file, "%s ", zloop_ctl_ops[0].name);
+ for (i = 0; i < ARRAY_SIZE(zloop_opt_tokens); i++) {
+ tok = &zloop_opt_tokens[i];
+ if (!tok->pattern)
+ break;
+ if (i)
+ seq_putc(seq_file, ',');
+ seq_puts(seq_file, tok->pattern);
+ }
+ seq_putc(seq_file, '\n');
+
+ /* Remove operation */
+ seq_puts(seq_file, zloop_ctl_ops[1].name);
+ seq_puts(seq_file, " id=%d\n");
+
+ return 0;
+}
+
+static int zloop_ctl_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ return single_open(file, zloop_ctl_show, NULL);
+}
+
+static int zloop_ctl_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations zloop_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = zloop_ctl_open,
+ .release = zloop_ctl_release,
+ .write = zloop_ctl_write,
+ .read = seq_read,
+};
+
+static struct miscdevice zloop_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "zloop-control",
+ .fops = &zloop_ctl_fops,
+};
+
+static int __init zloop_init(void)
+{
+ int ret;
+
+ ret = misc_register(&zloop_misc);
+ if (ret) {
+ pr_err("Failed to register misc device: %d\n", ret);
+ return ret;
+ }
+ pr_info("Module loaded\n");
+
+ return 0;
+}
+
+static void __exit zloop_exit(void)
+{
+ misc_deregister(&zloop_misc);
+ idr_destroy(&zloop_index_idr);
+}
+
+module_init(zloop_init);
+module_exit(zloop_exit);
+
+MODULE_DESCRIPTION("Zoned loopback device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/zram/backend_deflate.c b/drivers/block/zram/backend_deflate.c
index 0f7f252c12f4..b75016e0e654 100644
--- a/drivers/block/zram/backend_deflate.c
+++ b/drivers/block/zram/backend_deflate.c
@@ -8,7 +8,7 @@
#include "backend_deflate.h"
/* Use the same value as crypto API */
-#define DEFLATE_DEF_WINBITS 11
+#define DEFLATE_DEF_WINBITS (-11)
#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL
struct deflate_ctx {
@@ -22,8 +22,10 @@ static void deflate_release_params(struct zcomp_params *params)
static int deflate_setup_params(struct zcomp_params *params)
{
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = Z_DEFAULT_COMPRESSION;
+ if (params->deflate.winbits == ZCOMP_PARAM_NOT_SET)
+ params->deflate.winbits = DEFLATE_DEF_WINBITS;
return 0;
}
@@ -57,13 +59,13 @@ static int deflate_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
return -ENOMEM;
ctx->context = zctx;
- sz = zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL);
+ sz = zlib_deflate_workspacesize(params->deflate.winbits, MAX_MEM_LEVEL);
zctx->cctx.workspace = vzalloc(sz);
if (!zctx->cctx.workspace)
goto error;
ret = zlib_deflateInit2(&zctx->cctx, params->level, Z_DEFLATED,
- -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
+ params->deflate.winbits, DEFLATE_DEF_MEMLEVEL,
Z_DEFAULT_STRATEGY);
if (ret != Z_OK)
goto error;
@@ -73,7 +75,7 @@ static int deflate_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
if (!zctx->dctx.workspace)
goto error;
- ret = zlib_inflateInit2(&zctx->dctx, -DEFLATE_DEF_WINBITS);
+ ret = zlib_inflateInit2(&zctx->dctx, params->deflate.winbits);
if (ret != Z_OK)
goto error;
diff --git a/drivers/block/zram/backend_lz4.c b/drivers/block/zram/backend_lz4.c
index 847f3334eb38..daccd60857eb 100644
--- a/drivers/block/zram/backend_lz4.c
+++ b/drivers/block/zram/backend_lz4.c
@@ -18,7 +18,7 @@ static void lz4_release_params(struct zcomp_params *params)
static int lz4_setup_params(struct zcomp_params *params)
{
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = LZ4_ACCELERATION_DEFAULT;
return 0;
diff --git a/drivers/block/zram/backend_lz4hc.c b/drivers/block/zram/backend_lz4hc.c
index 5f37d5abcaeb..9e8a35dfa56d 100644
--- a/drivers/block/zram/backend_lz4hc.c
+++ b/drivers/block/zram/backend_lz4hc.c
@@ -18,7 +18,7 @@ static void lz4hc_release_params(struct zcomp_params *params)
static int lz4hc_setup_params(struct zcomp_params *params)
{
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = LZ4HC_DEFAULT_CLEVEL;
return 0;
diff --git a/drivers/block/zram/backend_zstd.c b/drivers/block/zram/backend_zstd.c
index 22c8067536f3..81defb98ed09 100644
--- a/drivers/block/zram/backend_zstd.c
+++ b/drivers/block/zram/backend_zstd.c
@@ -58,7 +58,7 @@ static int zstd_setup_params(struct zcomp_params *params)
return -ENOMEM;
params->drv_data = zp;
- if (params->level == ZCOMP_PARAM_NO_LEVEL)
+ if (params->level == ZCOMP_PARAM_NOT_SET)
params->level = zstd_default_clevel();
zp->cprm = zstd_get_params(params->level, PAGE_SIZE);
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index 25339ed1e07e..4acffe671a5e 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -5,7 +5,11 @@
#include <linux/mutex.h>
-#define ZCOMP_PARAM_NO_LEVEL INT_MIN
+#define ZCOMP_PARAM_NOT_SET INT_MIN
+
+struct deflate_params {
+ s32 winbits;
+};
/*
* Immutable driver (backend) parameters. The driver may attach private
@@ -17,6 +21,9 @@ struct zcomp_params {
void *dict;
size_t dict_sz;
s32 level;
+ union {
+ struct deflate_params deflate;
+ };
void *drv_data;
};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fda7d8624889..54c57103715f 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -734,114 +734,19 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
submit_bio(bio);
}
-#define PAGE_WB_SIG "page_index="
-
-#define PAGE_WRITEBACK 0
-#define HUGE_WRITEBACK (1<<0)
-#define IDLE_WRITEBACK (1<<1)
-#define INCOMPRESSIBLE_WRITEBACK (1<<2)
-
-static int scan_slots_for_writeback(struct zram *zram, u32 mode,
- unsigned long nr_pages,
- unsigned long index,
- struct zram_pp_ctl *ctl)
-{
- for (; nr_pages != 0; index++, nr_pages--) {
- bool ok = true;
-
- zram_slot_lock(zram, index);
- if (!zram_allocated(zram, index))
- goto next;
-
- if (zram_test_flag(zram, index, ZRAM_WB) ||
- zram_test_flag(zram, index, ZRAM_SAME))
- goto next;
-
- if (mode & IDLE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_IDLE))
- goto next;
- if (mode & HUGE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_HUGE))
- goto next;
- if (mode & INCOMPRESSIBLE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
- goto next;
-
- ok = place_pp_slot(zram, ctl, index);
-next:
- zram_slot_unlock(zram, index);
- if (!ok)
- break;
- }
-
- return 0;
-}
-
-static ssize_t writeback_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl)
{
- struct zram *zram = dev_to_zram(dev);
- unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
- struct zram_pp_ctl *ctl = NULL;
+ unsigned long blk_idx = 0;
+ struct page *page = NULL;
struct zram_pp_slot *pps;
- unsigned long index = 0;
- struct bio bio;
struct bio_vec bio_vec;
- struct page *page = NULL;
- ssize_t ret = len;
- int mode, err;
- unsigned long blk_idx = 0;
-
- if (sysfs_streq(buf, "idle"))
- mode = IDLE_WRITEBACK;
- else if (sysfs_streq(buf, "huge"))
- mode = HUGE_WRITEBACK;
- else if (sysfs_streq(buf, "huge_idle"))
- mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
- else if (sysfs_streq(buf, "incompressible"))
- mode = INCOMPRESSIBLE_WRITEBACK;
- else {
- if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
- return -EINVAL;
-
- if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
- index >= nr_pages)
- return -EINVAL;
-
- nr_pages = 1;
- mode = PAGE_WRITEBACK;
- }
-
- down_read(&zram->init_lock);
- if (!init_done(zram)) {
- ret = -EINVAL;
- goto release_init_lock;
- }
-
- /* Do not permit concurrent post-processing actions. */
- if (atomic_xchg(&zram->pp_in_progress, 1)) {
- up_read(&zram->init_lock);
- return -EAGAIN;
- }
-
- if (!zram->backing_dev) {
- ret = -ENODEV;
- goto release_init_lock;
- }
+ struct bio bio;
+ int ret = 0, err;
+ u32 index;
page = alloc_page(GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
- goto release_init_lock;
- }
-
- ctl = init_pp_ctl();
- if (!ctl) {
- ret = -ENOMEM;
- goto release_init_lock;
- }
-
- scan_slots_for_writeback(zram, mode, nr_pages, index, ctl);
+ if (!page)
+ return -ENOMEM;
while ((pps = select_pp_slot(ctl))) {
spin_lock(&zram->wb_limit_lock);
@@ -929,10 +834,215 @@ next:
if (blk_idx)
free_block_bdev(zram, blk_idx);
-
-release_init_lock:
if (page)
__free_page(page);
+
+ return ret;
+}
+
+#define PAGE_WRITEBACK 0
+#define HUGE_WRITEBACK (1 << 0)
+#define IDLE_WRITEBACK (1 << 1)
+#define INCOMPRESSIBLE_WRITEBACK (1 << 2)
+
+static int parse_page_index(char *val, unsigned long nr_pages,
+ unsigned long *lo, unsigned long *hi)
+{
+ int ret;
+
+ ret = kstrtoul(val, 10, lo);
+ if (ret)
+ return ret;
+ if (*lo >= nr_pages)
+ return -ERANGE;
+ *hi = *lo + 1;
+ return 0;
+}
+
+static int parse_page_indexes(char *val, unsigned long nr_pages,
+ unsigned long *lo, unsigned long *hi)
+{
+ char *delim;
+ int ret;
+
+ delim = strchr(val, '-');
+ if (!delim)
+ return -EINVAL;
+
+ *delim = 0x00;
+ ret = kstrtoul(val, 10, lo);
+ if (ret)
+ return ret;
+ if (*lo >= nr_pages)
+ return -ERANGE;
+
+ ret = kstrtoul(delim + 1, 10, hi);
+ if (ret)
+ return ret;
+ if (*hi >= nr_pages || *lo > *hi)
+ return -ERANGE;
+ *hi += 1;
+ return 0;
+}
+
+static int parse_mode(char *val, u32 *mode)
+{
+ *mode = 0;
+
+ if (!strcmp(val, "idle"))
+ *mode = IDLE_WRITEBACK;
+ if (!strcmp(val, "huge"))
+ *mode = HUGE_WRITEBACK;
+ if (!strcmp(val, "huge_idle"))
+ *mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
+ if (!strcmp(val, "incompressible"))
+ *mode = INCOMPRESSIBLE_WRITEBACK;
+
+ if (*mode == 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int scan_slots_for_writeback(struct zram *zram, u32 mode,
+ unsigned long lo, unsigned long hi,
+ struct zram_pp_ctl *ctl)
+{
+ u32 index = lo;
+
+ while (index < hi) {
+ bool ok = true;
+
+ zram_slot_lock(zram, index);
+ if (!zram_allocated(zram, index))
+ goto next;
+
+ if (zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME))
+ goto next;
+
+ if (mode & IDLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_IDLE))
+ goto next;
+ if (mode & HUGE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_HUGE))
+ goto next;
+ if (mode & INCOMPRESSIBLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
+ goto next;
+
+ ok = place_pp_slot(zram, ctl, index);
+next:
+ zram_slot_unlock(zram, index);
+ if (!ok)
+ break;
+ index++;
+ }
+
+ return 0;
+}
+
+static ssize_t writeback_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u64 nr_pages = zram->disksize >> PAGE_SHIFT;
+ unsigned long lo = 0, hi = nr_pages;
+ struct zram_pp_ctl *ctl = NULL;
+ char *args, *param, *val;
+ ssize_t ret = len;
+ int err, mode = 0;
+
+ down_read(&zram->init_lock);
+ if (!init_done(zram)) {
+ up_read(&zram->init_lock);
+ return -EINVAL;
+ }
+
+ /* Do not permit concurrent post-processing actions. */
+ if (atomic_xchg(&zram->pp_in_progress, 1)) {
+ up_read(&zram->init_lock);
+ return -EAGAIN;
+ }
+
+ if (!zram->backing_dev) {
+ ret = -ENODEV;
+ goto release_init_lock;
+ }
+
+ ctl = init_pp_ctl();
+ if (!ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ args = skip_spaces(buf);
+ while (*args) {
+ args = next_arg(args, &param, &val);
+
+ /*
+ * Workaround to support the old writeback interface.
+ *
+ * The old writeback interface has a minor inconsistency and
+ * requires key=value only for page_index parameter, while the
+ * writeback mode is a valueless parameter.
+ *
+ * This is not the case anymore and now all parameters are
+ * required to have values, however, we need to support the
+ * legacy writeback interface format so we check if we can
+ * recognize a valueless parameter as the (legacy) writeback
+ * mode.
+ */
+ if (!val || !*val) {
+ err = parse_mode(param, &mode);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ break;
+ }
+
+ if (!strcmp(param, "type")) {
+ err = parse_mode(val, &mode);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ break;
+ }
+
+ if (!strcmp(param, "page_index")) {
+ err = parse_page_index(val, nr_pages, &lo, &hi);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ continue;
+ }
+
+ if (!strcmp(param, "page_indexes")) {
+ err = parse_page_indexes(val, nr_pages, &lo, &hi);
+ if (err) {
+ ret = err;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ continue;
+ }
+ }
+
+ err = zram_writeback_slots(zram, ctl);
+ if (err)
+ ret = err;
+
+release_init_lock:
release_pp_ctl(zram, ctl);
atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
@@ -1166,13 +1276,15 @@ static void comp_params_reset(struct zram *zram, u32 prio)
struct zcomp_params *params = &zram->params[prio];
vfree(params->dict);
- params->level = ZCOMP_PARAM_NO_LEVEL;
+ params->level = ZCOMP_PARAM_NOT_SET;
+ params->deflate.winbits = ZCOMP_PARAM_NOT_SET;
params->dict_sz = 0;
params->dict = NULL;
}
static int comp_params_store(struct zram *zram, u32 prio, s32 level,
- const char *dict_path)
+ const char *dict_path,
+ struct deflate_params *deflate_params)
{
ssize_t sz = 0;
@@ -1190,6 +1302,7 @@ static int comp_params_store(struct zram *zram, u32 prio, s32 level,
zram->params[prio].dict_sz = sz;
zram->params[prio].level = level;
+ zram->params[prio].deflate.winbits = deflate_params->winbits;
return 0;
}
@@ -1198,11 +1311,14 @@ static ssize_t algorithm_params_store(struct device *dev,
const char *buf,
size_t len)
{
- s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NO_LEVEL;
+ s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NOT_SET;
char *args, *param, *val, *algo = NULL, *dict_path = NULL;
+ struct deflate_params deflate_params;
struct zram *zram = dev_to_zram(dev);
int ret;
+ deflate_params.winbits = ZCOMP_PARAM_NOT_SET;
+
args = skip_spaces(buf);
while (*args) {
args = next_arg(args, &param, &val);
@@ -1233,6 +1349,13 @@ static ssize_t algorithm_params_store(struct device *dev,
dict_path = val;
continue;
}
+
+ if (!strcmp(param, "deflate.winbits")) {
+ ret = kstrtoint(val, 10, &deflate_params.winbits);
+ if (ret)
+ return ret;
+ continue;
+ }
}
/* Lookup priority by algorithm name */
@@ -1254,7 +1377,7 @@ static ssize_t algorithm_params_store(struct device *dev,
if (prio < ZRAM_PRIMARY_COMP || prio >= ZRAM_MAX_COMPS)
return -EINVAL;
- ret = comp_params_store(zram, prio, level, dict_path);
+ ret = comp_params_store(zram, prio, level, dict_path, &deflate_params);
return ret ? ret : len;
}
@@ -1694,7 +1817,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
*/
handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
GFP_NOIO | __GFP_NOWARN |
- __GFP_HIGHMEM | __GFP_MOVABLE);
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle))
return PTR_ERR((void *)handle);
@@ -1761,7 +1884,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_NOWARN |
- __GFP_HIGHMEM | __GFP_MOVABLE);
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle)) {
zcomp_stream_put(zstrm);
return PTR_ERR((void *)handle);
@@ -1981,10 +2104,15 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
* We are holding per-CPU stream mutex and entry lock so better
* avoid direct reclaim. Allocation error is not fatal since
* we still have the old object in the mem_pool.
+ *
+ * XXX: technically, the node we really want here is the node that holds
+ * the original compressed data. But that would require us to modify
+ * zsmalloc API to return this information. For now, we will make do with
+ * the node of the page allocated for recompression.
*/
handle_new = zs_malloc(zram->mem_pool, comp_len_new,
GFP_NOIO | __GFP_NOWARN |
- __GFP_HIGHMEM | __GFP_MOVABLE);
+ __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page));
if (IS_ERR_VALUE(handle_new)) {
zcomp_stream_put(zstrm);
return PTR_ERR((void *)handle_new);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 7771edf54fb3..4ab32abf0f48 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -56,18 +56,6 @@ config BT_HCIBTUSB_POLL_SYNC
Say Y here to enable USB poll_sync for Bluetooth USB devices by
default.
-config BT_HCIBTUSB_AUTO_ISOC_ALT
- bool "Automatically adjust alternate setting for Isoc endpoints"
- depends on BT_HCIBTUSB
- default y if CHROME_PLATFORMS
- help
- Say Y here to automatically adjusting the alternate setting for
- HCI_USER_CHANNEL whenever a SCO link is established.
-
- When enabled, btusb intercepts the HCI_EV_SYNC_CONN_COMPLETE packets
- and configures isoc endpoint alternate setting automatically when
- HCI_USER_CHANNEL is in use.
-
config BT_HCIBTUSB_BCM
bool "Broadcom protocol support"
depends on BT_HCIBTUSB
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 1c7f89e134b3..1e3a56e9b139 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -158,7 +158,7 @@ static void bluecard_detach(struct pcmcia_device *p_dev);
static void bluecard_activity_led_timeout(struct timer_list *t)
{
- struct bluecard_info *info = from_timer(info, t, timer);
+ struct bluecard_info *info = timer_container_of(info, t, timer);
unsigned int iobase = info->p_dev->resource[0]->start;
if (test_bit(CARD_ACTIVITY, &(info->hw_state))) {
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 48e2f400957b..55cc1652bfe4 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -2719,7 +2719,7 @@ static int btintel_uefi_get_dsbr(u32 *dsbr_var)
} __packed data;
efi_status_t status;
- unsigned long data_size = 0;
+ unsigned long data_size = sizeof(data);
efi_guid_t guid = EFI_GUID(0xe65d8884, 0xd4af, 0x4b20, 0x8d, 0x03,
0x77, 0x2e, 0xcc, 0x3d, 0xa5, 0x31);
@@ -2730,15 +2730,9 @@ static int btintel_uefi_get_dsbr(u32 *dsbr_var)
return -EOPNOTSUPP;
status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size,
- NULL);
-
- if (status != EFI_BUFFER_TOO_SMALL || !data_size)
- return -EIO;
-
- status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size,
&data);
- if (status != EFI_SUCCESS)
+ if (status != EFI_SUCCESS || data_size != sizeof(data))
return -ENXIO;
*dsbr_var = data.dsbr;
@@ -3688,7 +3682,7 @@ int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name)
}
EXPORT_SYMBOL_GPL(btintel_configure_setup);
-int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
+static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
{
struct intel_tlv *tlv = (void *)&skb->data[5];
@@ -3716,7 +3710,6 @@ int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
recv_frame:
return hci_recv_frame(hdev, skb);
}
-EXPORT_SYMBOL_GPL(btintel_diagnostics);
int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 2aece3effa4e..1d12c4113c66 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -277,7 +277,6 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
int btintel_shutdown_combined(struct hci_dev *hdev);
void btintel_hw_error(struct hci_dev *hdev, u8 code);
void btintel_print_fseq_info(struct hci_dev *hdev);
-int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -411,9 +410,4 @@ static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
static inline void btintel_print_fseq_info(struct hci_dev *hdev)
{
}
-
-static inline int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
-{
- return -EOPNOTSUPP;
-}
#endif
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 0a759ea26fd3..563165c5efae 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -208,6 +208,96 @@ static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index,
memcpy(buf->data, skb->data, tfd->size);
}
+static inline void btintel_pcie_dump_debug_registers(struct hci_dev *hdev)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+ u16 cr_hia, cr_tia;
+ u32 reg, mbox_reg;
+ struct sk_buff *skb;
+ u8 buf[80];
+
+ skb = alloc_skb(1024, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ snprintf(buf, sizeof(buf), "%s", "---- Dump of debug registers ---");
+ bt_dev_dbg(hdev, "%s", buf);
+ skb_put_data(skb, buf, strlen(buf));
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
+ snprintf(buf, sizeof(buf), "boot stage: 0x%8.8x", reg);
+ bt_dev_dbg(hdev, "%s", buf);
+ skb_put_data(skb, buf, strlen(buf));
+ data->boot_stage_cache = reg;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_STATUS_REG);
+ snprintf(buf, sizeof(buf), "ipc status: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_CONTROL_REG);
+ snprintf(buf, sizeof(buf), "ipc control: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG);
+ snprintf(buf, sizeof(buf), "ipc sleep control: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ /*Read the Mail box status and registers*/
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MBOX_STATUS_REG);
+ snprintf(buf, sizeof(buf), "mbox status: 0x%8.8x", reg);
+ skb_put_data(skb, buf, strlen(buf));
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_1_REG);
+ snprintf(buf, sizeof(buf), "mbox_1: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_2_REG);
+ snprintf(buf, sizeof(buf), "mbox_2: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_3_REG);
+ snprintf(buf, sizeof(buf), "mbox_3: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ if (reg & BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4) {
+ mbox_reg = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_MBOX_4_REG);
+ snprintf(buf, sizeof(buf), "mbox_4: 0x%8.8x", mbox_reg);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ }
+
+ cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
+ cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
+ snprintf(buf, sizeof(buf), "rxq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+
+ cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
+ cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
+ snprintf(buf, sizeof(buf), "txq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia);
+ skb_put_data(skb, buf, strlen(buf));
+ bt_dev_dbg(hdev, "%s", buf);
+ snprintf(buf, sizeof(buf), "--------------------------------");
+ bt_dev_dbg(hdev, "%s", buf);
+
+ hci_recv_diag(hdev, skb);
+}
+
static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
struct sk_buff *skb)
{
@@ -237,8 +327,11 @@ static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
/* Wait for the complete interrupt - URBD0 */
ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS));
- if (!ret)
+ if (!ret) {
+ bt_dev_err(data->hdev, "tx completion timeout");
+ btintel_pcie_dump_debug_registers(data->hdev);
return -ETIME;
+ }
return 0;
}
@@ -303,8 +396,13 @@ static int btintel_pcie_submit_rx(struct btintel_pcie_data *data)
static int btintel_pcie_start_rx(struct btintel_pcie_data *data)
{
int i, ret;
+ struct rxq *rxq = &data->rxq;
+
+ /* Post (BTINTEL_PCIE_RX_DESCS_COUNT - 3) buffers to overcome the
+ * hardware issues leading to race condition at the firmware.
+ */
- for (i = 0; i < BTINTEL_PCIE_RX_MAX_QUEUE; i++) {
+ for (i = 0; i < rxq->count - 3; i++) {
ret = btintel_pcie_submit_rx(data);
if (ret)
return ret;
@@ -756,6 +854,26 @@ static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data,
return 0;
}
+static inline bool btintel_pcie_in_lockdown(struct btintel_pcie_data *data)
+{
+ return (data->boot_stage_cache &
+ BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN) ||
+ (data->boot_stage_cache &
+ BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN);
+}
+
+static inline bool btintel_pcie_in_error(struct btintel_pcie_data *data)
+{
+ return (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR) ||
+ (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER);
+}
+
+static void btintel_pcie_msix_gp1_handler(struct btintel_pcie_data *data)
+{
+ bt_dev_err(data->hdev, "Received gp1 mailbox interrupt");
+ btintel_pcie_dump_debug_registers(data->hdev);
+}
+
/* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
* BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
*/
@@ -779,6 +897,18 @@ static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
if (reg != data->img_resp_cache)
data->img_resp_cache = reg;
+ if (btintel_pcie_in_error(data)) {
+ bt_dev_err(data->hdev, "Controller in error state");
+ btintel_pcie_dump_debug_registers(data->hdev);
+ return;
+ }
+
+ if (btintel_pcie_in_lockdown(data)) {
+ bt_dev_err(data->hdev, "Controller in lockdown state");
+ btintel_pcie_dump_debug_registers(data->hdev);
+ return;
+ }
+
data->gp0_received = true;
old_ctxt = data->alive_intr_ctxt;
@@ -889,7 +1019,6 @@ static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *)skb->data;
- const char diagnostics_hdr[] = { 0x87, 0x80, 0x03 };
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
@@ -945,15 +1074,6 @@ static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
}
}
- /* Handle all diagnostics events separately. May still call
- * hci_recv_frame.
- */
- if (len >= sizeof(diagnostics_hdr) &&
- memcmp(&skb->data[2], diagnostics_hdr,
- sizeof(diagnostics_hdr)) == 0) {
- return btintel_diagnostics(hdev, skb);
- }
-
/* This is a debug event that comes from IML and OP image when it
* starts execution. There is no need pass this event to stack.
*/
@@ -1343,6 +1463,9 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP)
btintel_pcie_msix_hw_exp_handler(data);
+ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
+ btintel_pcie_msix_gp1_handler(data);
+
/* This interrupt is triggered by the firmware after updating
* boot_stage register and image_response register
*/
@@ -1664,8 +1787,8 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
* + size of index * Number of queues(2) * type of index array(4)
* + size of context information
*/
- total = (sizeof(struct tfd) + sizeof(struct urbd0) + sizeof(struct frbd)
- + sizeof(struct urbd1)) * BTINTEL_DESCS_COUNT;
+ total = (sizeof(struct tfd) + sizeof(struct urbd0)) * BTINTEL_PCIE_TX_DESCS_COUNT;
+ total += (sizeof(struct frbd) + sizeof(struct urbd1)) * BTINTEL_PCIE_RX_DESCS_COUNT;
/* Add the sum of size of index array and size of ci struct */
total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
@@ -1690,36 +1813,36 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
data->dma_v_addr = v_addr;
/* Setup descriptor count */
- data->txq.count = BTINTEL_DESCS_COUNT;
- data->rxq.count = BTINTEL_DESCS_COUNT;
+ data->txq.count = BTINTEL_PCIE_TX_DESCS_COUNT;
+ data->rxq.count = BTINTEL_PCIE_RX_DESCS_COUNT;
/* Setup tfds */
data->txq.tfds_p_addr = p_addr;
data->txq.tfds = v_addr;
- p_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
+ v_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
/* Setup urbd0 */
data->txq.urbd0s_p_addr = p_addr;
data->txq.urbd0s = v_addr;
- p_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
+ v_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
/* Setup FRBD*/
data->rxq.frbds_p_addr = p_addr;
data->rxq.frbds = v_addr;
- p_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
+ v_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
/* Setup urbd1 */
data->rxq.urbd1s_p_addr = p_addr;
data->rxq.urbd1s = v_addr;
- p_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
+ v_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
/* Setup data buffers for txq */
err = btintel_pcie_setup_txq_bufs(data, &data->txq);
@@ -2028,6 +2151,7 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
while ((err = btintel_pcie_setup_internal(hdev)) && fw_dl_retry++ < 1) {
bt_dev_err(hdev, "Firmware download retry count: %d",
fw_dl_retry);
+ btintel_pcie_dump_debug_registers(hdev);
err = btintel_pcie_reset_bt(data);
if (err) {
bt_dev_err(hdev, "Failed to do shr reset: %d", err);
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index 873178019cad..7dad4523236c 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -12,10 +12,17 @@
#define BTINTEL_PCIE_CSR_HW_REV_REG (BTINTEL_PCIE_CSR_BASE + 0x028)
#define BTINTEL_PCIE_CSR_RF_ID_REG (BTINTEL_PCIE_CSR_BASE + 0x09C)
#define BTINTEL_PCIE_CSR_BOOT_STAGE_REG (BTINTEL_PCIE_CSR_BASE + 0x108)
+#define BTINTEL_PCIE_CSR_IPC_CONTROL_REG (BTINTEL_PCIE_CSR_BASE + 0x10C)
+#define BTINTEL_PCIE_CSR_IPC_STATUS_REG (BTINTEL_PCIE_CSR_BASE + 0x110)
#define BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG (BTINTEL_PCIE_CSR_BASE + 0x114)
#define BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG (BTINTEL_PCIE_CSR_BASE + 0x118)
#define BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG (BTINTEL_PCIE_CSR_BASE + 0x11C)
#define BTINTEL_PCIE_CSR_IMG_RESPONSE_REG (BTINTEL_PCIE_CSR_BASE + 0x12C)
+#define BTINTEL_PCIE_CSR_MBOX_1_REG (BTINTEL_PCIE_CSR_BASE + 0x170)
+#define BTINTEL_PCIE_CSR_MBOX_2_REG (BTINTEL_PCIE_CSR_BASE + 0x174)
+#define BTINTEL_PCIE_CSR_MBOX_3_REG (BTINTEL_PCIE_CSR_BASE + 0x178)
+#define BTINTEL_PCIE_CSR_MBOX_4_REG (BTINTEL_PCIE_CSR_BASE + 0x17C)
+#define BTINTEL_PCIE_CSR_MBOX_STATUS_REG (BTINTEL_PCIE_CSR_BASE + 0x180)
#define BTINTEL_PCIE_PRPH_DEV_ADDR_REG (BTINTEL_PCIE_CSR_BASE + 0x440)
#define BTINTEL_PCIE_PRPH_DEV_RD_REG (BTINTEL_PCIE_CSR_BASE + 0x458)
#define BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR (BTINTEL_PCIE_CSR_BASE + 0x460)
@@ -41,6 +48,9 @@
#define BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW (BIT(2))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN (BIT(10))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN (BIT(11))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_ERR (BIT(12))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER (BIT(13))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_HALTED (BIT(14))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_MAC_ACCESS_ON (BIT(16))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_ALIVE (BIT(23))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY (BIT(24))
@@ -89,6 +99,7 @@ enum msix_fh_int_causes {
/* Causes for the HW register interrupts */
enum msix_hw_int_causes {
BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0 = BIT(0), /* cause 32 */
+ BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1 = BIT(1), /* cause 33 */
BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP = BIT(3), /* cause 35 */
};
@@ -121,6 +132,14 @@ enum btintel_pcie_tlv_type {
BTINTEL_FW_BUILD,
};
+/* causes for the MBOX interrupts */
+enum msix_mbox_int_causes {
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1 = BIT(0), /* cause MBOX1 */
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2 = BIT(1), /* cause MBOX2 */
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3 = BIT(2), /* cause MBOX3 */
+ BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4 = BIT(3), /* cause MBOX4 */
+};
+
#define BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE BIT(7)
/* Minimum and Maximum number of MSI-X Vector
@@ -135,8 +154,11 @@ enum btintel_pcie_tlv_type {
/* Default interrupt timeout in msec */
#define BTINTEL_DEFAULT_INTR_TIMEOUT_MS 3000
-/* The number of descriptors in TX/RX queues */
-#define BTINTEL_DESCS_COUNT 16
+/* The number of descriptors in TX queues */
+#define BTINTEL_PCIE_TX_DESCS_COUNT 32
+
+/* The number of descriptors in RX queues */
+#define BTINTEL_PCIE_RX_DESCS_COUNT 64
/* Number of Queue for TX and RX
* It indicates the index of the IA(Index Array)
@@ -158,9 +180,6 @@ enum {
/* Doorbell vector for TFD */
#define BTINTEL_PCIE_TX_DB_VEC 0
-/* Number of pending RX requests for downlink */
-#define BTINTEL_PCIE_RX_MAX_QUEUE 6
-
/* Doorbell vector for FRBD */
#define BTINTEL_PCIE_RX_DB_VEC 513
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 07cd308f7abf..93932a0d8625 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -100,7 +100,9 @@ static int btmrvl_sdio_probe_of(struct device *dev,
}
/* Configure wakeup (enabled by default) */
- device_init_wakeup(dev, true);
+ ret = devm_device_init_wakeup(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wakeup\n");
}
}
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 1d26207b2ba7..c16a3518b8ff 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -1414,7 +1414,7 @@ static int btmtksdio_probe(struct sdio_func *func,
*/
pm_runtime_put_noidle(bdev->dev);
- err = device_init_wakeup(bdev->dev, true);
+ err = devm_device_init_wakeup(bdev->dev);
if (err)
bt_dev_err(hdev, "failed to initialize device wakeup");
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 604ab2bba231..1088db6056a4 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -17,6 +17,7 @@
#include <linux/crc32.h>
#include <linux/string_helpers.h>
#include <linux/gpio/consumer.h>
+#include <linux/of_irq.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -143,7 +144,9 @@ struct ps_data {
bool driver_sent_cmd;
u16 h2c_ps_interval;
u16 c2h_ps_interval;
+ bool wakeup_source;
struct gpio_desc *h2c_ps_gpio;
+ s32 irq_handler;
struct hci_dev *hdev;
struct work_struct work;
struct timer_list ps_timer;
@@ -464,7 +467,7 @@ static void ps_work_func(struct work_struct *work)
static void ps_timeout_func(struct timer_list *t)
{
- struct ps_data *data = from_timer(data, t, ps_timer);
+ struct ps_data *data = timer_container_of(data, t, ps_timer);
struct hci_dev *hdev = data->hdev;
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
@@ -476,12 +479,21 @@ static void ps_timeout_func(struct timer_list *t)
}
}
+static irqreturn_t ps_host_wakeup_irq_handler(int irq, void *priv)
+{
+ struct btnxpuart_dev *nxpdev = (struct btnxpuart_dev *)priv;
+
+ bt_dev_dbg(nxpdev->hdev, "Host wakeup interrupt");
+ return IRQ_HANDLED;
+}
static int ps_setup(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct serdev_device *serdev = nxpdev->serdev;
struct ps_data *psdata = &nxpdev->psdata;
+ int ret;
+ /* Out-Of-Band Device Wakeup */
psdata->h2c_ps_gpio = devm_gpiod_get_optional(&serdev->dev, "device-wakeup",
GPIOD_OUT_LOW);
if (IS_ERR(psdata->h2c_ps_gpio)) {
@@ -493,11 +505,39 @@ static int ps_setup(struct hci_dev *hdev)
if (device_property_read_u8(&serdev->dev, "nxp,wakein-pin", &psdata->h2c_wakeup_gpio)) {
psdata->h2c_wakeup_gpio = 0xff; /* 0xff: use default pin/gpio */
} else if (!psdata->h2c_ps_gpio) {
- bt_dev_warn(hdev, "nxp,wakein-pin property without device-wakeup GPIO");
+ bt_dev_warn(hdev, "nxp,wakein-pin property without device-wakeup-gpios");
psdata->h2c_wakeup_gpio = 0xff;
}
- device_property_read_u8(&serdev->dev, "nxp,wakeout-pin", &psdata->c2h_wakeup_gpio);
+ /* Out-Of-Band Host Wakeup */
+ if (of_property_read_bool(serdev->dev.of_node, "wakeup-source")) {
+ psdata->irq_handler = of_irq_get_byname(serdev->dev.of_node, "wakeup");
+ bt_dev_info(nxpdev->hdev, "irq_handler: %d", psdata->irq_handler);
+ if (psdata->irq_handler > 0)
+ psdata->wakeup_source = true;
+ }
+
+ if (device_property_read_u8(&serdev->dev, "nxp,wakeout-pin", &psdata->c2h_wakeup_gpio)) {
+ psdata->c2h_wakeup_gpio = 0xff;
+ if (psdata->wakeup_source) {
+ bt_dev_warn(hdev, "host wakeup interrupt without nxp,wakeout-pin");
+ psdata->wakeup_source = false;
+ }
+ } else if (!psdata->wakeup_source) {
+ bt_dev_warn(hdev, "nxp,wakeout-pin property without host wakeup interrupt");
+ psdata->c2h_wakeup_gpio = 0xff;
+ }
+
+ if (psdata->wakeup_source) {
+ ret = devm_request_irq(&serdev->dev, psdata->irq_handler,
+ ps_host_wakeup_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ dev_name(&serdev->dev), nxpdev);
+ if (ret)
+ bt_dev_info(hdev, "error setting wakeup IRQ handler, ignoring\n");
+ disable_irq(psdata->irq_handler);
+ device_init_wakeup(&serdev->dev, true);
+ }
psdata->hdev = hdev;
INIT_WORK(&psdata->work, ps_work_func);
@@ -637,12 +677,10 @@ static void ps_init(struct hci_dev *hdev)
psdata->ps_state = PS_STATE_AWAKE;
- if (psdata->c2h_wakeup_gpio) {
+ if (psdata->c2h_wakeup_gpio != 0xff)
psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_GPIO;
- } else {
+ else
psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE;
- psdata->c2h_wakeup_gpio = 0xff;
- }
psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID;
if (psdata->h2c_ps_gpio)
@@ -1821,6 +1859,11 @@ static int nxp_serdev_suspend(struct device *dev)
struct ps_data *psdata = &nxpdev->psdata;
ps_control(psdata->hdev, PS_STATE_SLEEP);
+
+ if (psdata->wakeup_source) {
+ enable_irq_wake(psdata->irq_handler);
+ enable_irq(psdata->irq_handler);
+ }
return 0;
}
@@ -1829,6 +1872,11 @@ static int nxp_serdev_resume(struct device *dev)
struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev);
struct ps_data *psdata = &nxpdev->psdata;
+ if (psdata->wakeup_source) {
+ disable_irq(psdata->irq_handler);
+ disable_irq_wake(psdata->irq_handler);
+ }
+
ps_control(psdata->hdev, PS_STATE_AWAKE);
return 0;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a42dedb78e0a..9ab661d2d1e6 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -21,6 +21,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_drv.h>
#include "btintel.h"
#include "btbcm.h"
@@ -34,7 +35,6 @@ static bool force_scofix;
static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
static bool enable_poll_sync = IS_ENABLED(CONFIG_BT_HCIBTUSB_POLL_SYNC);
static bool reset = true;
-static bool auto_isoc_alt = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTO_ISOC_ALT);
static struct usb_driver btusb_driver;
@@ -513,6 +513,7 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
/* Realtek 8851BE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0xb850), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3600), .driver_info = BTUSB_REALTEK },
/* Realtek 8852AE Bluetooth devices */
@@ -678,6 +679,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3584), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3605), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3607), .driver_info = BTUSB_MEDIATEK |
@@ -716,8 +719,12 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3608), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3613), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3630), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -1118,42 +1125,6 @@ static inline void btusb_free_frags(struct btusb_data *data)
spin_unlock_irqrestore(&data->rxlock, flags);
}
-static void btusb_sco_connected(struct btusb_data *data, struct sk_buff *skb)
-{
- struct hci_event_hdr *hdr = (void *) skb->data;
- struct hci_ev_sync_conn_complete *ev =
- (void *) skb->data + sizeof(*hdr);
- struct hci_dev *hdev = data->hdev;
- unsigned int notify_air_mode;
-
- if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
- return;
-
- if (skb->len < sizeof(*hdr) || hdr->evt != HCI_EV_SYNC_CONN_COMPLETE)
- return;
-
- if (skb->len != sizeof(*hdr) + sizeof(*ev) || ev->status)
- return;
-
- switch (ev->air_mode) {
- case BT_CODEC_CVSD:
- notify_air_mode = HCI_NOTIFY_ENABLE_SCO_CVSD;
- break;
-
- case BT_CODEC_TRANSPARENT:
- notify_air_mode = HCI_NOTIFY_ENABLE_SCO_TRANSP;
- break;
-
- default:
- return;
- }
-
- bt_dev_info(hdev, "enabling SCO with air mode %u", ev->air_mode);
- data->sco_num = 1;
- data->air_mode = notify_air_mode;
- schedule_work(&data->work);
-}
-
static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
{
if (data->intr_interval) {
@@ -1161,10 +1132,6 @@ static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
schedule_delayed_work(&data->rx_work, 0);
}
- /* Configure altsetting for HCI_USER_CHANNEL on SCO connected */
- if (auto_isoc_alt && hci_dev_test_flag(data->hdev, HCI_USER_CHANNEL))
- btusb_sco_connected(data, skb);
-
return data->recv_event(data->hdev, skb);
}
@@ -3014,9 +2981,8 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
int ret = 0;
+ unsigned int skip = 0;
u8 pkt_type;
- u8 *sk_ptr;
- unsigned int sk_len;
u16 seqno;
u32 dump_size;
@@ -3025,18 +2991,13 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
struct usb_device *udev = btdata->udev;
pkt_type = hci_skb_pkt_type(skb);
- sk_ptr = skb->data;
- sk_len = skb->len;
-
- if (pkt_type == HCI_ACLDATA_PKT) {
- sk_ptr += HCI_ACL_HDR_SIZE;
- sk_len -= HCI_ACL_HDR_SIZE;
- }
+ skip = sizeof(struct hci_event_hdr);
+ if (pkt_type == HCI_ACLDATA_PKT)
+ skip += sizeof(struct hci_acl_hdr);
- sk_ptr += HCI_EVENT_HDR_SIZE;
- sk_len -= HCI_EVENT_HDR_SIZE;
+ skb_pull(skb, skip);
+ dump_hdr = (struct qca_dump_hdr *)skb->data;
- dump_hdr = (struct qca_dump_hdr *)sk_ptr;
seqno = le16_to_cpu(dump_hdr->seqno);
if (seqno == 0) {
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
@@ -3056,16 +3017,15 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
btdata->qca_dump.ram_dump_size = dump_size;
btdata->qca_dump.ram_dump_seqno = 0;
- sk_ptr += offsetof(struct qca_dump_hdr, data0);
- sk_len -= offsetof(struct qca_dump_hdr, data0);
+
+ skb_pull(skb, offsetof(struct qca_dump_hdr, data0));
usb_disable_autosuspend(udev);
bt_dev_info(hdev, "%s memdump size(%u)\n",
(pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event",
dump_size);
} else {
- sk_ptr += offsetof(struct qca_dump_hdr, data);
- sk_len -= offsetof(struct qca_dump_hdr, data);
+ skb_pull(skb, offsetof(struct qca_dump_hdr, data));
}
if (!btdata->qca_dump.ram_dump_size) {
@@ -3085,7 +3045,6 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
return ret;
}
- skb_pull(skb, skb->len - sk_len);
hci_devcd_append(hdev, skb);
btdata->qca_dump.ram_dump_seqno++;
if (seqno == QCA_LAST_SEQUENCE_NUM) {
@@ -3113,68 +3072,58 @@ out:
/* Return: true if the ACL packet is a dump packet, false otherwise. */
static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- u8 *sk_ptr;
- unsigned int sk_len;
-
struct hci_event_hdr *event_hdr;
struct hci_acl_hdr *acl_hdr;
struct qca_dump_hdr *dump_hdr;
+ struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
+ bool is_dump = false;
- sk_ptr = skb->data;
- sk_len = skb->len;
-
- acl_hdr = hci_acl_hdr(skb);
- if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
+ if (!clone)
return false;
- sk_ptr += HCI_ACL_HDR_SIZE;
- sk_len -= HCI_ACL_HDR_SIZE;
- event_hdr = (struct hci_event_hdr *)sk_ptr;
-
- if ((event_hdr->evt != HCI_VENDOR_PKT) ||
- (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
- return false;
+ acl_hdr = skb_pull_data(clone, sizeof(*acl_hdr));
+ if (!acl_hdr || (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE))
+ goto out;
- sk_ptr += HCI_EVENT_HDR_SIZE;
- sk_len -= HCI_EVENT_HDR_SIZE;
+ event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
+ if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
+ goto out;
- dump_hdr = (struct qca_dump_hdr *)sk_ptr;
- if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
- (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
- (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
- return false;
+ dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
+ if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
+ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
+ goto out;
- return true;
+ is_dump = true;
+out:
+ consume_skb(clone);
+ return is_dump;
}
/* Return: true if the event packet is a dump packet, false otherwise. */
static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- u8 *sk_ptr;
- unsigned int sk_len;
-
struct hci_event_hdr *event_hdr;
struct qca_dump_hdr *dump_hdr;
+ struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
+ bool is_dump = false;
- sk_ptr = skb->data;
- sk_len = skb->len;
-
- event_hdr = hci_event_hdr(skb);
-
- if ((event_hdr->evt != HCI_VENDOR_PKT)
- || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
+ if (!clone)
return false;
- sk_ptr += HCI_EVENT_HDR_SIZE;
- sk_len -= HCI_EVENT_HDR_SIZE;
+ event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
+ if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
+ goto out;
- dump_hdr = (struct qca_dump_hdr *)sk_ptr;
- if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
- (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
- (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
- return false;
+ dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
+ if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
+ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
+ goto out;
- return true;
+ is_dump = true;
+out:
+ consume_skb(clone);
+ return is_dump;
}
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
@@ -3771,31 +3720,133 @@ static const struct file_operations force_poll_sync_fops = {
.llseek = default_llseek,
};
-static ssize_t isoc_alt_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+#define BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS \
+ hci_opcode_pack(HCI_DRV_OGF_DRIVER_SPECIFIC, 0x0000)
+#define BTUSB_HCI_DRV_SUPPORTED_ALTSETTINGS_SIZE 0
+struct btusb_hci_drv_rp_supported_altsettings {
+ __u8 num;
+ __u8 altsettings[];
+} __packed;
+
+#define BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING \
+ hci_opcode_pack(HCI_DRV_OGF_DRIVER_SPECIFIC, 0x0001)
+#define BTUSB_HCI_DRV_SWITCH_ALTSETTING_SIZE 1
+struct btusb_hci_drv_cmd_switch_altsetting {
+ __u8 altsetting;
+} __packed;
+
+static const struct {
+ u16 opcode;
+ const char *desc;
+} btusb_hci_drv_supported_commands[] = {
+ /* Common commands */
+ { HCI_DRV_OP_READ_INFO, "Read Info" },
+
+ /* Driver specific commands */
+ { BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS, "Supported Altsettings" },
+ { BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING, "Switch Altsetting" },
+};
+static int btusb_hci_drv_read_info(struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct hci_drv_rp_read_info *rp;
+ size_t rp_size;
+ int err, i;
+ u16 opcode, num_supported_commands =
+ ARRAY_SIZE(btusb_hci_drv_supported_commands);
+
+ rp_size = sizeof(*rp) + num_supported_commands * 2;
+
+ rp = kmalloc(rp_size, GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ strscpy_pad(rp->driver_name, btusb_driver.name);
+
+ rp->num_supported_commands = cpu_to_le16(num_supported_commands);
+ for (i = 0; i < num_supported_commands; i++) {
+ opcode = btusb_hci_drv_supported_commands[i].opcode;
+ bt_dev_info(hdev,
+ "Supported HCI Drv command (0x%02x|0x%04x): %s",
+ hci_opcode_ogf(opcode),
+ hci_opcode_ocf(opcode),
+ btusb_hci_drv_supported_commands[i].desc);
+ rp->supported_commands[i] = cpu_to_le16(opcode);
+ }
+
+ err = hci_drv_cmd_complete(hdev, HCI_DRV_OP_READ_INFO,
+ HCI_DRV_STATUS_SUCCESS, rp, rp_size);
+
+ kfree(rp);
+ return err;
+}
+
+static int btusb_hci_drv_supported_altsettings(struct hci_dev *hdev, void *data,
+ u16 data_len)
{
- struct btusb_data *data = dev_get_drvdata(dev);
+ struct btusb_data *drvdata = hci_get_drvdata(hdev);
+ struct btusb_hci_drv_rp_supported_altsettings *rp;
+ size_t rp_size;
+ int err;
+ u8 i;
+
+ /* There are at most 7 alt (0 - 6) */
+ rp = kmalloc(sizeof(*rp) + 7, GFP_KERNEL);
+
+ rp->num = 0;
+ if (!drvdata->isoc)
+ goto done;
+
+ for (i = 0; i <= 6; i++) {
+ if (btusb_find_altsetting(drvdata, i))
+ rp->altsettings[rp->num++] = i;
+ }
- return sysfs_emit(buf, "%d\n", data->isoc_altsetting);
+done:
+ rp_size = sizeof(*rp) + rp->num;
+
+ err = hci_drv_cmd_complete(hdev, BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS,
+ HCI_DRV_STATUS_SUCCESS, rp, rp_size);
+ kfree(rp);
+ return err;
}
-static ssize_t isoc_alt_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int btusb_hci_drv_switch_altsetting(struct hci_dev *hdev, void *data,
+ u16 data_len)
{
- struct btusb_data *data = dev_get_drvdata(dev);
- int alt;
- int ret;
+ struct btusb_hci_drv_cmd_switch_altsetting *cmd = data;
+ u8 status;
- if (kstrtoint(buf, 10, &alt))
- return -EINVAL;
+ if (cmd->altsetting > 6) {
+ status = HCI_DRV_STATUS_INVALID_PARAMETERS;
+ } else {
+ if (btusb_switch_alt_setting(hdev, cmd->altsetting))
+ status = HCI_DRV_STATUS_UNSPECIFIED_ERROR;
+ else
+ status = HCI_DRV_STATUS_SUCCESS;
+ }
- ret = btusb_switch_alt_setting(data->hdev, alt);
- return ret < 0 ? ret : count;
+ return hci_drv_cmd_status(hdev, BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING,
+ status);
}
-static DEVICE_ATTR_RW(isoc_alt);
+static const struct hci_drv_handler btusb_hci_drv_common_handlers[] = {
+ { btusb_hci_drv_read_info, HCI_DRV_READ_INFO_SIZE },
+};
+
+static const struct hci_drv_handler btusb_hci_drv_specific_handlers[] = {
+ { btusb_hci_drv_supported_altsettings,
+ BTUSB_HCI_DRV_SUPPORTED_ALTSETTINGS_SIZE },
+ { btusb_hci_drv_switch_altsetting,
+ BTUSB_HCI_DRV_SWITCH_ALTSETTING_SIZE },
+};
+
+static struct hci_drv btusb_hci_drv = {
+ .common_handler_count = ARRAY_SIZE(btusb_hci_drv_common_handlers),
+ .common_handlers = btusb_hci_drv_common_handlers,
+ .specific_handler_count = ARRAY_SIZE(btusb_hci_drv_specific_handlers),
+ .specific_handlers = btusb_hci_drv_specific_handlers,
+};
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
@@ -3936,12 +3987,13 @@ static int btusb_probe(struct usb_interface *intf,
data->reset_gpio = reset_gpio;
}
- hdev->open = btusb_open;
- hdev->close = btusb_close;
- hdev->flush = btusb_flush;
- hdev->send = btusb_send_frame;
- hdev->notify = btusb_notify;
- hdev->wakeup = btusb_wakeup;
+ hdev->open = btusb_open;
+ hdev->close = btusb_close;
+ hdev->flush = btusb_flush;
+ hdev->send = btusb_send_frame;
+ hdev->notify = btusb_notify;
+ hdev->wakeup = btusb_wakeup;
+ hdev->hci_drv = &btusb_hci_drv;
#ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev);
@@ -4160,10 +4212,6 @@ static int btusb_probe(struct usb_interface *intf,
data->isoc, data);
if (err < 0)
goto out_free_dev;
-
- err = device_create_file(&intf->dev, &dev_attr_isoc_alt);
- if (err)
- goto out_free_dev;
}
if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) && data->diag) {
@@ -4210,10 +4258,8 @@ static void btusb_disconnect(struct usb_interface *intf)
hdev = data->hdev;
usb_set_intfdata(data->intf, NULL);
- if (data->isoc) {
- device_remove_file(&intf->dev, &dev_attr_isoc_alt);
+ if (data->isoc)
usb_set_intfdata(data->isoc, NULL);
- }
if (data->diag)
usb_set_intfdata(data->diag, NULL);
diff --git a/drivers/bluetooth/hci_aml.c b/drivers/bluetooth/hci_aml.c
index dc9541e76d81..1394c575aa6d 100644
--- a/drivers/bluetooth/hci_aml.c
+++ b/drivers/bluetooth/hci_aml.c
@@ -313,8 +313,7 @@ static int aml_download_firmware(struct hci_dev *hdev, const char *fw_name)
goto exit;
exit:
- if (firmware)
- release_firmware(firmware);
+ release_firmware(firmware);
return ret;
}
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 610d0e3c36d4..664d82d1e613 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -688,7 +688,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
/* Arrange to retransmit all messages in the relq. */
static void bcsp_timed_event(struct timer_list *t)
{
- struct bcsp_struct *bcsp = from_timer(bcsp, t, tbcsp);
+ struct bcsp_struct *bcsp = timer_container_of(bcsp, t, tbcsp);
struct hci_uart *hu = bcsp->hu;
struct sk_buff *skb;
unsigned long flags;
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index edafa228bf83..d0d4420c1a0f 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -149,7 +149,7 @@ static void h5_timed_event(struct timer_list *t)
{
const unsigned char sync_req[] = { 0x01, 0x7e };
unsigned char conf_req[3] = { 0x03, 0xfc };
- struct h5 *h5 = from_timer(h5, t, timer);
+ struct h5 *h5 = timer_container_of(h5, t, timer);
struct hci_uart *hu = h5->hu;
struct sk_buff *skb;
unsigned long flags;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index e00590ba24fd..5fe5879881f5 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -474,7 +474,7 @@ static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
static void hci_ibs_tx_idle_timeout(struct timer_list *t)
{
- struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
+ struct qca_data *qca = timer_container_of(qca, t, tx_idle_timer);
struct hci_uart *hu = qca->hu;
unsigned long flags;
@@ -507,7 +507,7 @@ static void hci_ibs_tx_idle_timeout(struct timer_list *t)
static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
{
- struct qca_data *qca = from_timer(qca, t, wake_retrans_timer);
+ struct qca_data *qca = timer_container_of(qca, t, wake_retrans_timer);
struct hci_uart *hu = qca->hu;
unsigned long flags, retrans_delay;
bool retransmit = false;
@@ -2415,14 +2415,14 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(qcadev->bt_en) &&
- (data->soc_type == QCA_WCN6750 ||
- data->soc_type == QCA_WCN6855)) {
- dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
- return PTR_ERR(qcadev->bt_en);
- }
+ if (IS_ERR(qcadev->bt_en))
+ return dev_err_probe(&serdev->dev,
+ PTR_ERR(qcadev->bt_en),
+ "failed to acquire BT_EN gpio\n");
- if (!qcadev->bt_en)
+ if (!qcadev->bt_en &&
+ (data->soc_type == QCA_WCN6750 ||
+ data->soc_type == QCA_WCN6855))
power_ctrl_enabled = false;
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index ee29162da4ee..91ef99c42344 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -395,10 +395,7 @@ static struct attribute *gisb_arb_sysfs_attrs[] = {
&dev_attr_gisb_arb_timeout.attr,
NULL,
};
-
-static struct attribute_group gisb_arb_sysfs_attr_group = {
- .attrs = gisb_arb_sysfs_attrs,
-};
+ATTRIBUTE_GROUPS(gisb_arb_sysfs);
static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
{ .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 },
@@ -490,10 +487,6 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
- if (err)
- return err;
-
platform_set_drvdata(pdev, gdev);
list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
@@ -550,6 +543,7 @@ static struct platform_driver brcmstb_gisb_arb_driver = {
.name = "brcm-gisb-arb",
.of_match_table = brcmstb_gisb_arb_of_match,
.pm = &brcmstb_gisb_arb_pm_ops,
+ .dev_groups = gisb_arb_sysfs_groups,
},
};
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 52053f7c6d9a..c63a7e688db6 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -806,8 +806,6 @@ int dprc_cleanup(struct fsl_mc_device *mc_dev)
dev_set_msi_domain(&mc_dev->dev, NULL);
}
- fsl_mc_cleanup_all_resource_pools(mc_dev);
-
/* if this step fails we cannot go further with cleanup as there is no way of
* communicating with the firmware
*/
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index dd1b5c0fb7e2..38d40c09b719 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -489,7 +489,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
cmd_params->obj_id = cpu_to_le32(obj_id);
- strscpy_pad(cmd_params->obj_type, obj_type, 16);
+ strscpy(cmd_params->obj_type, obj_type);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -561,7 +561,7 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
cmd_params->obj_id = cpu_to_le32(obj_id);
cmd_params->region_index = region_index;
- strscpy_pad(cmd_params->obj_type, obj_type, 16);
+ strscpy(cmd_params->obj_type, obj_type);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index 6c3beb82dd1b..d2ea59471323 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -555,27 +555,6 @@ void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
}
}
-static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
- enum fsl_mc_pool_type pool_type)
-{
- struct fsl_mc_resource *resource;
- struct fsl_mc_resource *next;
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
- struct fsl_mc_resource_pool *res_pool =
- &mc_bus->resource_pools[pool_type];
-
- list_for_each_entry_safe(resource, next, &res_pool->free_list, node)
- devm_kfree(&mc_bus_dev->dev, resource);
-}
-
-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
-{
- int pool_type;
-
- for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
- fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
-}
-
/*
* fsl_mc_allocator_probe - callback invoked when an allocatable device is
* being added to the system
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index a8be8cf246fb..7671bd158545 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -139,9 +139,9 @@ static int fsl_mc_bus_uevent(const struct device *dev, struct kobj_uevent_env *e
static int fsl_mc_dma_configure(struct device *dev)
{
+ const struct device_driver *drv = READ_ONCE(dev->driver);
struct device *dma_dev = dev;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
u32 input_id = mc_dev->icid;
int ret;
@@ -153,8 +153,8 @@ static int fsl_mc_dma_configure(struct device *dev)
else
ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
- /* @mc_drv may not be valid when we're called from the IOMMU layer */
- if (!ret && dev->driver && !mc_drv->driver_managed_dma) {
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && drv && !to_fsl_mc_driver(drv)->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
@@ -906,8 +906,10 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
error_cleanup_dev:
kfree(mc_dev->regions);
- kfree(mc_bus);
- kfree(mc_dev);
+ if (mc_bus)
+ kfree(mc_bus);
+ else
+ kfree(mc_dev);
return error;
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index e1b7ec3ed1a7..beed4c53533d 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -629,8 +629,6 @@ int __init fsl_mc_allocator_driver_init(void);
void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-
int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
enum fsl_mc_pool_type pool_type,
struct fsl_mc_resource
diff --git a/drivers/bus/fsl-mc/fsl-mc-uapi.c b/drivers/bus/fsl-mc/fsl-mc-uapi.c
index 9c4c1395fcdb..823969e4159c 100644
--- a/drivers/bus/fsl-mc/fsl-mc-uapi.c
+++ b/drivers/bus/fsl-mc/fsl-mc-uapi.c
@@ -48,6 +48,7 @@ enum fsl_mc_cmd_index {
DPRC_GET_POOL,
DPRC_GET_POOL_COUNT,
DPRC_GET_CONNECTION,
+ DPRC_GET_MEM,
DPCI_GET_LINK_STATE,
DPCI_GET_PEER_ATTR,
DPAIOP_GET_SL_VERSION,
@@ -194,6 +195,12 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
.token = true,
.size = 32,
},
+ [DPRC_GET_MEM] = {
+ .cmdid_value = 0x16D0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
[DPCI_GET_LINK_STATE] = {
.cmdid_value = 0x0E10,
@@ -275,13 +282,13 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
.size = 8,
},
[DPSW_GET_TAILDROP] = {
- .cmdid_value = 0x0A80,
+ .cmdid_value = 0x0A90,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 14,
},
[DPSW_SET_TAILDROP] = {
- .cmdid_value = 0x0A90,
+ .cmdid_value = 0x0A80,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 24,
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index a0ad7866cbfc..cd8754763f40 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -214,12 +214,19 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
if (error < 0)
goto error_cleanup_resource;
- dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
- &dpmcp_dev->dev,
- DL_FLAG_AUTOREMOVE_CONSUMER);
- if (!dpmcp_dev->consumer_link) {
- error = -EINVAL;
- goto error_cleanup_mc_io;
+ /* If the DPRC device itself tries to allocate a portal (usually for
+ * UAPI interaction), don't add a device link between them since the
+ * DPMCP device is an actual child device of the DPRC and a reverse
+ * dependency is not allowed.
+ */
+ if (mc_dev != mc_bus_dev) {
+ dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
+ &dpmcp_dev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!dpmcp_dev->consumer_link) {
+ error = -EINVAL;
+ goto error_cleanup_mc_io;
+ }
}
*new_mc_io = mc_io;
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
index f2052cd0a051..b22c59d57c8f 100644
--- a/drivers/bus/fsl-mc/mc-sys.c
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -19,7 +19,7 @@
/*
* Timeout in milliseconds to wait for the completion of an MC command
*/
-#define MC_CMD_COMPLETION_TIMEOUT_MS 500
+#define MC_CMD_COMPLETION_TIMEOUT_MS 15000
/*
* usleep_range() min and max values used to throttle down polling
diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
index aeb53b2c34a8..26357ee68dee 100644
--- a/drivers/bus/mhi/ep/ring.c
+++ b/drivers/bus/mhi/ep/ring.c
@@ -131,19 +131,23 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
}
old_offset = ring->rd_offset;
- mhi_ep_ring_inc_index(ring);
dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
+ buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
+ buf_info.dev_addr = el;
+ buf_info.size = sizeof(*el);
+
+ ret = mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
+ if (ret)
+ return ret;
+
+ mhi_ep_ring_inc_index(ring);
/* Update rp in ring context */
rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
- buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
- buf_info.dev_addr = el;
- buf_info.size = sizeof(*el);
-
- return mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
+ return ret;
}
void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 03aa88795209..589cb6722316 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -782,6 +782,42 @@ static const struct mhi_pci_dev_info mhi_telit_fe990a_info = {
.mru_default = 32768,
};
+static const struct mhi_channel_config mhi_telit_fn920c04_channels[] = {
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3),
+};
+
+static const struct mhi_controller_config modem_telit_fn920c04_config = {
+ .max_channels = 128,
+ .timeout_ms = 50000,
+ .num_channels = ARRAY_SIZE(mhi_telit_fn920c04_channels),
+ .ch_cfg = mhi_telit_fn920c04_channels,
+ .num_events = ARRAY_SIZE(mhi_telit_fn990_events),
+ .event_cfg = mhi_telit_fn990_events,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fn920c04_info = {
+ .name = "telit-fn920c04",
+ .config = &modem_telit_fn920c04_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+ .edl_trigger = true,
+};
+
static const struct mhi_pci_dev_info mhi_netprisma_lcur57_info = {
.name = "netprisma-lcur57",
.edl = "qcom/prog_firehose_sdx24.mbn",
@@ -806,6 +842,9 @@ static const struct mhi_pci_dev_info mhi_netprisma_fcun69_info = {
static const struct pci_device_id mhi_pci_id_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0116),
.driver_data = (kernel_ulong_t) &mhi_qcom_sa8775p_info },
+ /* Telit FN920C04 (sdx35) */
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x011a, 0x1c5d, 0x2020),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn920c04_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c),
@@ -996,10 +1035,6 @@ static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
int err;
- err = pci_assign_resource(pdev, bar_num);
- if (err)
- return err;
-
err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
@@ -1136,7 +1171,8 @@ err_try_reset:
static void health_check(struct timer_list *t)
{
- struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
+ struct mhi_pci_device *mhi_pdev = timer_container_of(mhi_pdev, t,
+ health_check_timer);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index 2fb27e6f8f88..33d92bf2fc3e 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -602,6 +602,7 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
struct mhi_cmd *mhi_cmd;
struct mhi_event_ctxt *er_ctxt;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ bool reset_device = false;
int ret, i;
dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
@@ -630,8 +631,23 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
/* Wake up threads waiting for state transition */
wake_up_all(&mhi_cntrl->state_event);
- /* Trigger MHI RESET so that the device will not access host memory */
if (MHI_REG_ACCESS_VALID(prev_state)) {
+ /*
+ * If the device is in PBL or SBL, it will only respond to
+ * RESET if the device is in SYSERR state. SYSERR might
+ * already be cleared at this point.
+ */
+ enum mhi_state cur_state = mhi_get_mhi_state(mhi_cntrl);
+ enum mhi_ee_type cur_ee = mhi_get_exec_env(mhi_cntrl);
+
+ if (cur_state == MHI_STATE_SYS_ERR)
+ reset_device = true;
+ else if (cur_ee != MHI_EE_PBL && cur_ee != MHI_EE_SBL)
+ reset_device = true;
+ }
+
+ /* Trigger MHI RESET so that the device will not access host memory */
+ if (reset_device) {
u32 in_reset = -1;
unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 1e57ebfb7622..6c3e5c5dae10 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -737,9 +737,9 @@ static int moxtet_irq_setup(struct moxtet *moxtet)
{
int i, ret;
- moxtet->irq.domain = irq_domain_add_simple(moxtet->dev->of_node,
- MOXTET_NIRQS, 0,
- &moxtet_irq_domain, moxtet);
+ moxtet->irq.domain = irq_domain_create_simple(of_fwnode_handle(moxtet->dev->of_node),
+ MOXTET_NIRQS, 0,
+ &moxtet_irq_domain, moxtet);
if (moxtet->irq.domain == NULL) {
dev_err(moxtet->dev, "Could not add IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index f67b927ae4ca..9f624e5da991 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -677,51 +677,6 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
return 0;
}
-/* Interconnect instances to probe before l4_per instances */
-static struct resource early_bus_ranges[] = {
- /* am3/4 l4_wkup */
- { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
- /* omap4/5 and dra7 l4_cfg */
- { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
- /* omap4 l4_wkup */
- { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
- /* omap5 and dra7 l4_wkup without dra7 dcan segment */
- { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
-};
-
-static atomic_t sysc_defer = ATOMIC_INIT(10);
-
-/**
- * sysc_defer_non_critical - defer non_critical interconnect probing
- * @ddata: device driver data
- *
- * We want to probe l4_cfg and l4_wkup interconnect instances before any
- * l4_per instances as l4_per instances depend on resources on l4_cfg and
- * l4_wkup interconnects.
- */
-static int sysc_defer_non_critical(struct sysc *ddata)
-{
- struct resource *res;
- int i;
-
- if (!atomic_read(&sysc_defer))
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
- res = &early_bus_ranges[i];
- if (ddata->module_pa >= res->start &&
- ddata->module_pa <= res->end) {
- atomic_set(&sysc_defer, 0);
-
- return 0;
- }
- }
-
- atomic_dec_if_positive(&sysc_defer);
-
- return -EPROBE_DEFER;
-}
-
static struct device_node *stdout_path;
static void sysc_init_stdout_path(struct sysc *ddata)
@@ -947,10 +902,6 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
if (error)
return error;
- error = sysc_defer_non_critical(ddata);
- if (error)
- return error;
-
sysc_check_children(ddata);
if (!of_property_present(np, "reg"))
@@ -2036,6 +1987,21 @@ static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
}
+static void sysc_module_enable_quirk_pruss(struct sysc *ddata)
+{
+ u32 reg;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /*
+ * Clearing the SYSC_PRUSS_STANDBY_INIT bit - Updates OCP master
+ * port configuration to enable memory access outside of the
+ * PRU-ICSS subsystem.
+ */
+ reg &= (~SYSC_PRUSS_STANDBY_INIT);
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+}
+
static void sysc_init_module_quirks(struct sysc *ddata)
{
if (ddata->legacy_mode || !ddata->name)
@@ -2088,8 +2054,10 @@ static void sysc_init_module_quirks(struct sysc *ddata)
ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
}
- if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS) {
+ ddata->module_enable_quirk = sysc_module_enable_quirk_pruss;
ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
+ }
}
static int sysc_clockdomain_init(struct sysc *ddata)
diff --git a/drivers/cache/sifive_ccache.c b/drivers/cache/sifive_ccache.c
index 6874b72ec59d..e1a283805ea7 100644
--- a/drivers/cache/sifive_ccache.c
+++ b/drivers/cache/sifive_ccache.c
@@ -118,6 +118,8 @@ static void ccache_config_read(void)
}
static const struct of_device_id sifive_ccache_ids[] = {
+ { .compatible = "eswin,eic7700-l3-cache",
+ .data = (void *)(QUIRK_NONSTANDARD_CACHE_OPS) },
{ .compatible = "sifive,fu540-c000-ccache" },
{ .compatible = "sifive,fu740-c000-ccache" },
{ .compatible = "starfive,jh7100-ccache",
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index b163e043c687..21a10552da61 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -3677,8 +3677,7 @@ static void cdrom_sysctl_register(void)
static void cdrom_sysctl_unregister(void)
{
- if (cdrom_sysctl_header)
- unregister_sysctl_table(cdrom_sysctl_header);
+ unregister_sysctl_table(cdrom_sysctl_header);
}
#else /* CONFIG_SYSCTL */
diff --git a/drivers/cdx/cdx_msi.c b/drivers/cdx/cdx_msi.c
index 06d723978232..3388a5d1462c 100644
--- a/drivers/cdx/cdx_msi.c
+++ b/drivers/cdx/cdx_msi.c
@@ -165,7 +165,7 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
struct device_node *parent_node;
struct irq_domain *parent;
- fwnode_handle = of_node_to_fwnode(np);
+ fwnode_handle = of_fwnode_handle(np);
parent_node = of_parse_phandle(np, "msi-map", 1);
if (!parent_node) {
@@ -173,7 +173,7 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
return NULL;
}
- parent = irq_find_matching_fwnode(of_node_to_fwnode(parent_node), DOMAIN_BUS_NEXUS);
+ parent = irq_find_matching_fwnode(of_fwnode_handle(parent_node), DOMAIN_BUS_NEXUS);
if (!parent || !msi_get_domain_info(parent)) {
dev_err(dev, "unable to locate ITS domain\n");
return NULL;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 8fb33c90482f..ae6196760556 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -404,7 +404,7 @@ config TELCLOCK
configuration of the telecom clock configuration settings. This
device is used for hardware synchronization across the ATCA backplane
fabric. Upon loading, the driver exports a sysfs directory,
- /sys/devices/platform/telco_clock, with a number of files for
+ /sys/devices/faux/telco_clock, with a number of files for
controlling the behavior of this hardware.
source "drivers/s390/char/Kconfig"
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 8e41731d3642..bf490967241a 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -16,7 +16,7 @@
#include <linux/mmzone.h>
#include <asm/page.h> /* PAGE_SIZE */
#include <asm/e820/api.h>
-#include <asm/amd_nb.h>
+#include <asm/amd/nb.h>
#include <asm/gart.h>
#include "agp.h"
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index ef30445527a2..bcc26785175d 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -53,6 +53,7 @@ struct intel_gtt_driver {
* of the mmio register file, that's done in the generic code. */
void (*cleanup)(void);
void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
+ dma_addr_t (*read_entry)(unsigned int entry, bool *is_present, bool *is_local);
/* Flags is a more or less chipset specific opaque value.
* For chipsets that need to support old ums (non-gem) code, this
* needs to be identical to the various supported agp memory types! */
@@ -336,6 +337,19 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i810_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u32 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = val & I810_PTE_LOCAL;
+
+ return val & ~0xfff;
+}
+
static resource_size_t intel_gtt_stolen_size(void)
{
u16 gmch_ctrl;
@@ -741,6 +755,19 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i830_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u32 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = false;
+
+ return val & ~0xfff;
+}
+
bool intel_gmch_enable_gtt(void)
{
u8 __iomem *reg;
@@ -878,6 +905,13 @@ void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
}
EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries);
+dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg,
+ bool *is_present, bool *is_local)
+{
+ return intel_private.driver->read_entry(pg, is_present, is_local);
+}
+EXPORT_SYMBOL(intel_gmch_gtt_read_entry);
+
#if IS_ENABLED(CONFIG_AGP_INTEL)
static void intel_gmch_gtt_insert_pages(unsigned int first_entry,
unsigned int num_entries,
@@ -1126,6 +1160,19 @@ static void i965_write_entry(dma_addr_t addr,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
+static dma_addr_t i965_read_entry(unsigned int entry,
+ bool *is_present, bool *is_local)
+{
+ u64 val;
+
+ val = readl(intel_private.gtt + entry);
+
+ *is_present = val & I810_PTE_VALID;
+ *is_local = false;
+
+ return ((val & 0xf0) << 28) | (val & ~0xfff);
+}
+
static int i9xx_setup(void)
{
phys_addr_t reg_addr;
@@ -1187,6 +1234,7 @@ static const struct intel_gtt_driver i81x_gtt_driver = {
.cleanup = i810_cleanup,
.check_flags = i830_check_flags,
.write_entry = i810_write_entry,
+ .read_entry = i810_read_entry,
};
static const struct intel_gtt_driver i8xx_gtt_driver = {
.gen = 2,
@@ -1194,6 +1242,7 @@ static const struct intel_gtt_driver i8xx_gtt_driver = {
.setup = i830_setup,
.cleanup = i830_cleanup,
.write_entry = i830_write_entry,
+ .read_entry = i830_read_entry,
.dma_mask_size = 32,
.check_flags = i830_check_flags,
.chipset_flush = i830_chipset_flush,
@@ -1205,6 +1254,7 @@ static const struct intel_gtt_driver i915_gtt_driver = {
.cleanup = i9xx_cleanup,
/* i945 is the last gpu to need phys mem (for overlay and cursors). */
.write_entry = i830_write_entry,
+ .read_entry = i830_read_entry,
.dma_mask_size = 32,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1215,6 +1265,7 @@ static const struct intel_gtt_driver g33_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1225,6 +1276,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1235,6 +1287,7 @@ static const struct intel_gtt_driver i965_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1244,6 +1297,7 @@ static const struct intel_gtt_driver g4x_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
@@ -1254,6 +1308,7 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
.setup = i9xx_setup,
.cleanup = i9xx_cleanup,
.write_entry = i965_write_entry,
+ .read_entry = i965_read_entry,
.dma_mask_size = 36,
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index e424360fb4a1..4787391bb6b4 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -11,6 +11,7 @@
#include <linux/page-flags.h>
#include <linux/mm.h>
#include <linux/jiffies.h>
+#include <asm/msr.h>
#include "agp.h"
/* NVIDIA registers */
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index e795390b070f..53ce352f7197 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -141,9 +141,6 @@ static struct apm_queue kapmd_queue;
static DEFINE_MUTEX(state_lock);
-static const char driver_version[] = "1.13"; /* no spaces */
-
-
/*
* Compatibility cruft until the IPAQ people move over to the new
@@ -435,6 +432,8 @@ static struct miscdevice apm_device = {
*/
static int proc_apm_show(struct seq_file *m, void *v)
{
+ static const char driver_version[] = "1.13"; /* no spaces */
+
struct apm_power_info info;
char *units;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index e110857824fc..0713ea2b2a51 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -1023,8 +1023,7 @@ static int __init hpet_init(void)
result = acpi_bus_register_driver(&hpet_acpi_driver);
if (result < 0) {
- if (sysctl_header)
- unregister_sysctl_table(sysctl_header);
+ unregister_sysctl_table(sysctl_header);
misc_deregister(&hpet_misc);
return result;
}
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 143406bc6939..d2b00458761e 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -37,6 +37,7 @@ struct atmel_trng {
struct clk *clk;
void __iomem *base;
struct hwrng rng;
+ struct device *dev;
bool has_half_rate;
};
@@ -59,9 +60,9 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
u32 *data = buf;
int ret;
- ret = pm_runtime_get_sync((struct device *)trng->rng.priv);
+ ret = pm_runtime_get_sync(trng->dev);
if (ret < 0) {
- pm_runtime_put_sync((struct device *)trng->rng.priv);
+ pm_runtime_put_sync(trng->dev);
return ret;
}
@@ -79,8 +80,8 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
ret = 4;
out:
- pm_runtime_mark_last_busy((struct device *)trng->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *)trng->rng.priv);
+ pm_runtime_mark_last_busy(trng->dev);
+ pm_runtime_put_sync_autosuspend(trng->dev);
return ret;
}
@@ -134,9 +135,9 @@ static int atmel_trng_probe(struct platform_device *pdev)
return -ENODEV;
trng->has_half_rate = data->has_half_rate;
+ trng->dev = &pdev->dev;
trng->rng.name = pdev->name;
trng->rng.read = atmel_trng_read;
- trng->rng.priv = (unsigned long)&pdev->dev;
platform_set_drvdata(pdev, trng);
#ifndef CONFIG_PM
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index 1e3048f2bb38..b7fa1bc1122b 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -36,6 +36,7 @@ struct mtk_rng {
void __iomem *base;
struct clk *clk;
struct hwrng rng;
+ struct device *dev;
};
static int mtk_rng_init(struct hwrng *rng)
@@ -85,7 +86,7 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
struct mtk_rng *priv = to_mtk_rng(rng);
int retval = 0;
- pm_runtime_get_sync((struct device *)priv->rng.priv);
+ pm_runtime_get_sync(priv->dev);
while (max >= sizeof(u32)) {
if (!mtk_rng_wait_ready(rng, wait))
@@ -97,8 +98,8 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
max -= sizeof(u32);
}
- pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv);
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
}
@@ -112,13 +113,13 @@ static int mtk_rng_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->dev = &pdev->dev;
priv->rng.name = pdev->name;
#ifndef CONFIG_PM
priv->rng.init = mtk_rng_init;
priv->rng.cleanup = mtk_rng_cleanup;
#endif
priv->rng.read = mtk_rng_read;
- priv->rng.priv = (unsigned long)&pdev->dev;
priv->rng.quality = 900;
priv->clk = devm_clk_get(&pdev->dev, "rng");
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
index 9ff00f096f38..3e308c890bd2 100644
--- a/drivers/char/hw_random/npcm-rng.c
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -32,6 +32,7 @@
struct npcm_rng {
void __iomem *base;
struct hwrng rng;
+ struct device *dev;
u32 clkp;
};
@@ -57,7 +58,7 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
int retval = 0;
int ready;
- pm_runtime_get_sync((struct device *)priv->rng.priv);
+ pm_runtime_get_sync(priv->dev);
while (max) {
if (wait) {
@@ -79,8 +80,8 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
max--;
}
- pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
- pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv);
+ pm_runtime_mark_last_busy(priv->dev);
+ pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
}
@@ -109,7 +110,7 @@ static int npcm_rng_probe(struct platform_device *pdev)
#endif
priv->rng.name = pdev->name;
priv->rng.read = npcm_rng_read;
- priv->rng.priv = (unsigned long)&pdev->dev;
+ priv->dev = &pdev->dev;
priv->clkp = (u32)(uintptr_t)of_device_get_match_data(&pdev->dev);
writel(NPCM_RNG_M1ROSEL, priv->base + NPCM_RNGMODE_REG);
diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c
index 161050591663..fb4a30b95507 100644
--- a/drivers/char/hw_random/rockchip-rng.c
+++ b/drivers/char/hw_random/rockchip-rng.c
@@ -93,6 +93,30 @@
#define TRNG_v1_VERSION_CODE 0x46bc
/* end of TRNG_V1 register definitions */
+/*
+ * RKRNG register definitions
+ * The RKRNG IP is a stand-alone TRNG implementation (not part of a crypto IP)
+ * and can be found in the Rockchip RK3576, Rockchip RK3562 and Rockchip RK3528
+ * SoCs. It can either output true randomness (TRNG) or "deterministic"
+ * randomness derived from hashing the true entropy (DRNG). This driver
+ * implementation uses just the true entropy, and leaves stretching the entropy
+ * up to Linux.
+ */
+#define RKRNG_CFG 0x0000
+#define RKRNG_CTRL 0x0010
+#define RKRNG_CTRL_REQ_TRNG BIT(4)
+#define RKRNG_STATE 0x0014
+#define RKRNG_STATE_TRNG_RDY BIT(4)
+#define RKRNG_TRNG_DATA0 0x0050
+#define RKRNG_TRNG_DATA1 0x0054
+#define RKRNG_TRNG_DATA2 0x0058
+#define RKRNG_TRNG_DATA3 0x005C
+#define RKRNG_TRNG_DATA4 0x0060
+#define RKRNG_TRNG_DATA5 0x0064
+#define RKRNG_TRNG_DATA6 0x0068
+#define RKRNG_TRNG_DATA7 0x006C
+#define RKRNG_READ_LEN 32
+
/* Before removing this assert, give rk3588_rng_read an upper bound of 32 */
static_assert(RK_RNG_MAX_BYTE <= (TRNG_V1_RAND7 + 4 - TRNG_V1_RAND0),
"You raised RK_RNG_MAX_BYTE and broke rk3588-rng, congrats.");
@@ -205,6 +229,46 @@ out:
return (ret < 0) ? ret : to_read;
}
+static int rk3576_rng_init(struct hwrng *rng)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+
+ return rk_rng_enable_clks(rk_rng);
+}
+
+static int rk3576_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
+ size_t to_read = min_t(size_t, max, RKRNG_READ_LEN);
+ int ret = 0;
+ u32 val;
+
+ ret = pm_runtime_resume_and_get(rk_rng->dev);
+ if (ret < 0)
+ return ret;
+
+ rk_rng_writel(rk_rng, RKRNG_CTRL_REQ_TRNG | (RKRNG_CTRL_REQ_TRNG << 16),
+ RKRNG_CTRL);
+
+ if (readl_poll_timeout(rk_rng->base + RKRNG_STATE, val,
+ (val & RKRNG_STATE_TRNG_RDY), RK_RNG_POLL_PERIOD_US,
+ RK_RNG_POLL_TIMEOUT_US)) {
+ dev_err(rk_rng->dev, "timed out waiting for data\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ rk_rng_writel(rk_rng, RKRNG_STATE_TRNG_RDY, RKRNG_STATE);
+
+ memcpy_fromio(buf, rk_rng->base + RKRNG_TRNG_DATA0, to_read);
+
+out:
+ pm_runtime_mark_last_busy(rk_rng->dev);
+ pm_runtime_put_sync_autosuspend(rk_rng->dev);
+
+ return (ret < 0) ? ret : to_read;
+}
+
static int rk3588_rng_init(struct hwrng *rng)
{
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
@@ -305,6 +369,14 @@ static const struct rk_rng_soc_data rk3568_soc_data = {
.reset_optional = false,
};
+static const struct rk_rng_soc_data rk3576_soc_data = {
+ .rk_rng_init = rk3576_rng_init,
+ .rk_rng_read = rk3576_rng_read,
+ .rk_rng_cleanup = rk3588_rng_cleanup,
+ .quality = 999, /* as determined by actual testing */
+ .reset_optional = true,
+};
+
static const struct rk_rng_soc_data rk3588_soc_data = {
.rk_rng_init = rk3588_rng_init,
.rk_rng_read = rk3588_rng_read,
@@ -397,6 +469,7 @@ static const struct dev_pm_ops rk_rng_pm_ops = {
static const struct of_device_id rk_rng_dt_match[] = {
{ .compatible = "rockchip,rk3568-rng", .data = (void *)&rk3568_soc_data },
+ { .compatible = "rockchip,rk3576-rng", .data = (void *)&rk3576_soc_data },
{ .compatible = "rockchip,rk3588-rng", .data = (void *)&rk3588_soc_data },
{ /* sentinel */ },
};
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index a1a751074f7e..709a36507145 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -88,7 +88,7 @@ struct xgene_rng_dev {
static void xgene_rng_expired_timer(struct timer_list *t)
{
- struct xgene_rng_dev *ctx = from_timer(ctx, t, failure_timer);
+ struct xgene_rng_dev *ctx = timer_container_of(ctx, t, failure_timer);
/* Clear failure counter as timer expired */
disable_irq(ctx->irq);
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index 77146b5c762b..a179d4797011 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -347,7 +347,7 @@ static const struct file_operations bt_bmc_fops = {
static void poll_timer(struct timer_list *t)
{
- struct bt_bmc *bt_bmc = from_timer(bt_bmc, t, poll_timer);
+ struct bt_bmc *bt_bmc = timer_container_of(bt_bmc, t, poll_timer);
bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
wake_up(&bt_bmc->queue);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 3ba9d7e9a6c7..064944ae9fdc 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -27,7 +27,6 @@
#include <linux/ipmi_smi.h>
#include <linux/notifier.h>
#include <linux/init.h>
-#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
@@ -41,11 +40,12 @@
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
-static void smi_recv_work(struct work_struct *t);
+static void smi_work(struct work_struct *t);
static void handle_new_recv_msgs(struct ipmi_smi *intf);
static void need_waiter(struct ipmi_smi *intf);
static int handle_one_recv_msg(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg);
+static void intf_free(struct kref *ref);
static bool initialized;
static bool drvregistered;
@@ -180,14 +180,8 @@ MODULE_PARM_DESC(max_msgs_per_user,
struct ipmi_user {
struct list_head link;
- /*
- * Set to NULL when the user is destroyed, a pointer to myself
- * so srcu_dereference can be used on it.
- */
- struct ipmi_user *self;
- struct srcu_struct release_barrier;
-
struct kref refcount;
+ refcount_t destroyed;
/* The upper layer that handles receive messages. */
const struct ipmi_user_hndl *handler;
@@ -200,30 +194,8 @@ struct ipmi_user {
bool gets_events;
atomic_t nr_msgs;
-
- /* Free must run in process context for RCU cleanup. */
- struct work_struct remove_work;
};
-static struct workqueue_struct *remove_work_wq;
-
-static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
- __acquires(user->release_barrier)
-{
- struct ipmi_user *ruser;
-
- *index = srcu_read_lock(&user->release_barrier);
- ruser = srcu_dereference(user->self, &user->release_barrier);
- if (!ruser)
- srcu_read_unlock(&user->release_barrier, *index);
- return ruser;
-}
-
-static void release_ipmi_user(struct ipmi_user *user, int index)
-{
- srcu_read_unlock(&user->release_barrier, index);
-}
-
struct cmd_rcvr {
struct list_head link;
@@ -327,6 +299,8 @@ struct bmc_device {
};
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
+static struct workqueue_struct *bmc_remove_work_wq;
+
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
struct ipmi_device_id *id,
bool *guid_set, guid_t *guid);
@@ -451,11 +425,10 @@ struct ipmi_smi {
struct list_head link;
/*
- * The list of upper layers that are using me. seq_lock write
- * protects this. Read protection is with srcu.
+ * The list of upper layers that are using me.
*/
struct list_head users;
- struct srcu_struct users_srcu;
+ struct mutex users_mutex;
atomic_t nr_users;
struct device_attribute nr_users_devattr;
struct device_attribute nr_msgs_devattr;
@@ -496,15 +469,22 @@ struct ipmi_smi {
int curr_seq;
/*
- * Messages queued for delivery. If delivery fails (out of memory
- * for instance), They will stay in here to be processed later in a
- * periodic timer interrupt. The workqueue is for handling received
- * messages directly from the handler.
+ * Messages queued for deliver to the user.
+ */
+ struct mutex user_msgs_mutex;
+ struct list_head user_msgs;
+
+ /*
+ * Messages queued for processing. If processing fails (out
+ * of memory for instance), They will stay in here to be
+ * processed later in a periodic timer interrupt. The
+ * workqueue is for handling received messages directly from
+ * the handler.
*/
spinlock_t waiting_rcv_msgs_lock;
struct list_head waiting_rcv_msgs;
atomic_t watchdog_pretimeouts_to_deliver;
- struct work_struct recv_work;
+ struct work_struct smi_work;
spinlock_t xmit_msgs_lock;
struct list_head xmit_msgs;
@@ -522,10 +502,9 @@ struct ipmi_smi {
* Events that were queues because no one was there to receive
* them.
*/
- spinlock_t events_lock; /* For dealing with event stuff. */
+ struct mutex events_mutex; /* For dealing with event stuff. */
struct list_head waiting_events;
unsigned int waiting_events_count; /* How many events in queue? */
- char delivering_events;
char event_msg_printed;
/* How many users are waiting for events? */
@@ -613,6 +592,28 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
bool guid_set, guid_t *guid, int intf_num);
static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
+static void free_ipmi_user(struct kref *ref)
+{
+ struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
+ struct module *owner;
+
+ owner = user->intf->owner;
+ kref_put(&user->intf->refcount, intf_free);
+ module_put(owner);
+ vfree(user);
+}
+
+static void release_ipmi_user(struct ipmi_user *user)
+{
+ kref_put(&user->refcount, free_ipmi_user);
+}
+
+static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user)
+{
+ if (!kref_get_unless_zero(&user->refcount))
+ return NULL;
+ return user;
+}
/*
* The driver model view of the IPMI messaging driver.
@@ -630,9 +631,6 @@ static DEFINE_MUTEX(ipmidriver_mutex);
static LIST_HEAD(ipmi_interfaces);
static DEFINE_MUTEX(ipmi_interfaces_mutex);
-#define ipmi_interfaces_mutex_held() \
- lockdep_is_held(&ipmi_interfaces_mutex)
-static struct srcu_struct ipmi_interfaces_srcu;
/*
* List of watchers that want to know when smi's are added and deleted.
@@ -698,27 +696,20 @@ static void free_smi_msg_list(struct list_head *q)
}
}
-static void clean_up_interface_data(struct ipmi_smi *intf)
+static void intf_free(struct kref *ref)
{
+ struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
int i;
struct cmd_rcvr *rcvr, *rcvr2;
- struct list_head list;
-
- cancel_work_sync(&intf->recv_work);
free_smi_msg_list(&intf->waiting_rcv_msgs);
free_recv_msg_list(&intf->waiting_events);
/*
* Wholesale remove all the entries from the list in the
- * interface and wait for RCU to know that none are in use.
+ * interface. No need for locks, this is single-threaded.
*/
- mutex_lock(&intf->cmd_rcvrs_mutex);
- INIT_LIST_HEAD(&list);
- list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
- mutex_unlock(&intf->cmd_rcvrs_mutex);
-
- list_for_each_entry_safe(rcvr, rcvr2, &list, link)
+ list_for_each_entry_safe(rcvr, rcvr2, &intf->cmd_rcvrs, link)
kfree(rcvr);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
@@ -726,20 +717,17 @@ static void clean_up_interface_data(struct ipmi_smi *intf)
&& (intf->seq_table[i].recv_msg))
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
-}
-
-static void intf_free(struct kref *ref)
-{
- struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
- clean_up_interface_data(intf);
kfree(intf);
}
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
struct ipmi_smi *intf;
- int index, rv;
+ unsigned int count = 0, i;
+ int *interfaces = NULL;
+ struct device **devices = NULL;
+ int rv = 0;
/*
* Make sure the driver is actually initialized, this handles
@@ -753,20 +741,53 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
list_add(&watcher->link, &smi_watchers);
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
- lockdep_is_held(&smi_watchers_mutex)) {
- int intf_num = READ_ONCE(intf->intf_num);
+ /*
+ * Build an array of ipmi interfaces and fill it in, and
+ * another array of the devices. We can't call the callback
+ * with ipmi_interfaces_mutex held. smi_watchers_mutex will
+ * keep things in order for the user.
+ */
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link)
+ count++;
+ if (count > 0) {
+ interfaces = kmalloc_array(count, sizeof(*interfaces),
+ GFP_KERNEL);
+ if (!interfaces) {
+ rv = -ENOMEM;
+ } else {
+ devices = kmalloc_array(count, sizeof(*devices),
+ GFP_KERNEL);
+ if (!devices) {
+ kfree(interfaces);
+ interfaces = NULL;
+ rv = -ENOMEM;
+ }
+ }
+ count = 0;
+ }
+ if (interfaces) {
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
+ int intf_num = READ_ONCE(intf->intf_num);
- if (intf_num == -1)
- continue;
- watcher->new_smi(intf_num, intf->si_dev);
+ if (intf_num == -1)
+ continue;
+ devices[count] = intf->si_dev;
+ interfaces[count++] = intf_num;
+ }
+ }
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ if (interfaces) {
+ for (i = 0; i < count; i++)
+ watcher->new_smi(interfaces[i], devices[i]);
+ kfree(interfaces);
+ kfree(devices);
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
mutex_unlock(&smi_watchers_mutex);
- return 0;
+ return rv;
}
EXPORT_SYMBOL(ipmi_smi_watcher_register);
@@ -779,22 +800,17 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
}
EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
-/*
- * Must be called with smi_watchers_mutex held.
- */
static void
call_smi_watchers(int i, struct device *dev)
{
struct ipmi_smi_watcher *w;
- mutex_lock(&smi_watchers_mutex);
list_for_each_entry(w, &smi_watchers, link) {
if (try_module_get(w->owner)) {
w->new_smi(i, dev);
module_put(w->owner);
}
}
- mutex_unlock(&smi_watchers_mutex);
}
static int
@@ -941,18 +957,14 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
ipmi_free_recv_msg(msg);
atomic_dec(&msg->user->nr_msgs);
} else {
- int index;
- struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
-
- if (user) {
- atomic_dec(&user->nr_msgs);
- user->handler->ipmi_recv_hndl(msg, user->handler_data);
- release_ipmi_user(user, index);
- } else {
- /* User went away, give up. */
- ipmi_free_recv_msg(msg);
- rv = -EINVAL;
- }
+ /*
+ * Deliver it in smi_work. The message will hold a
+ * refcount to the user.
+ */
+ mutex_lock(&intf->user_msgs_mutex);
+ list_add_tail(&msg->link, &intf->user_msgs);
+ mutex_unlock(&intf->user_msgs_mutex);
+ queue_work(system_wq, &intf->smi_work);
}
return rv;
@@ -1192,23 +1204,14 @@ static int intf_err_seq(struct ipmi_smi *intf,
return rv;
}
-static void free_user_work(struct work_struct *work)
-{
- struct ipmi_user *user = container_of(work, struct ipmi_user,
- remove_work);
-
- cleanup_srcu_struct(&user->release_barrier);
- vfree(user);
-}
-
int ipmi_create_user(unsigned int if_num,
const struct ipmi_user_hndl *handler,
void *handler_data,
struct ipmi_user **user)
{
unsigned long flags;
- struct ipmi_user *new_user;
- int rv, index;
+ struct ipmi_user *new_user = NULL;
+ int rv = 0;
struct ipmi_smi *intf;
/*
@@ -1230,30 +1233,31 @@ int ipmi_create_user(unsigned int if_num,
if (rv)
return rv;
- new_user = vzalloc(sizeof(*new_user));
- if (!new_user)
- return -ENOMEM;
-
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (intf->intf_num == if_num)
goto found;
}
/* Not found, return an error */
rv = -EINVAL;
- goto out_kfree;
+ goto out_unlock;
found:
+ if (intf->in_shutdown) {
+ rv = -ENODEV;
+ goto out_unlock;
+ }
+
if (atomic_add_return(1, &intf->nr_users) > max_users) {
rv = -EBUSY;
goto out_kfree;
}
- INIT_WORK(&new_user->remove_work, free_user_work);
-
- rv = init_srcu_struct(&new_user->release_barrier);
- if (rv)
+ new_user = vzalloc(sizeof(*new_user));
+ if (!new_user) {
+ rv = -ENOMEM;
goto out_kfree;
+ }
if (!try_module_get(intf->owner)) {
rv = -ENODEV;
@@ -1265,64 +1269,58 @@ int ipmi_create_user(unsigned int if_num,
atomic_set(&new_user->nr_msgs, 0);
kref_init(&new_user->refcount);
+ refcount_set(&new_user->destroyed, 1);
+ kref_get(&new_user->refcount); /* Destroy owns a refcount. */
new_user->handler = handler;
new_user->handler_data = handler_data;
new_user->intf = intf;
new_user->gets_events = false;
- rcu_assign_pointer(new_user->self, new_user);
+ mutex_lock(&intf->users_mutex);
spin_lock_irqsave(&intf->seq_lock, flags);
- list_add_rcu(&new_user->link, &intf->users);
+ list_add(&new_user->link, &intf->users);
spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->users_mutex);
+
if (handler->ipmi_watchdog_pretimeout)
/* User wants pretimeouts, so make sure to watch for them. */
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
- *user = new_user;
- return 0;
out_kfree:
- atomic_dec(&intf->nr_users);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
- vfree(new_user);
+ if (rv) {
+ atomic_dec(&intf->nr_users);
+ vfree(new_user);
+ } else {
+ *user = new_user;
+ }
+out_unlock:
+ mutex_unlock(&ipmi_interfaces_mutex);
return rv;
}
EXPORT_SYMBOL(ipmi_create_user);
int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
{
- int rv, index;
+ int rv = -EINVAL;
struct ipmi_smi *intf;
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
- if (intf->intf_num == if_num)
- goto found;
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == if_num) {
+ if (!intf->handlers->get_smi_info)
+ rv = -ENOTTY;
+ else
+ rv = intf->handlers->get_smi_info(intf->send_info, data);
+ break;
+ }
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
-
- /* Not found, return an error */
- return -EINVAL;
-
-found:
- if (!intf->handlers->get_smi_info)
- rv = -ENOTTY;
- else
- rv = intf->handlers->get_smi_info(intf->send_info, data);
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ mutex_unlock(&ipmi_interfaces_mutex);
return rv;
}
EXPORT_SYMBOL(ipmi_get_smi_info);
-static void free_user(struct kref *ref)
-{
- struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
-
- /* SRCU cleanup must happen in workqueue context. */
- queue_work(remove_work_wq, &user->remove_work);
-}
-
+/* Must be called with intf->users_mutex held. */
static void _ipmi_destroy_user(struct ipmi_user *user)
{
struct ipmi_smi *intf = user->intf;
@@ -1330,21 +1328,10 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
unsigned long flags;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
- struct module *owner;
+ struct ipmi_recv_msg *msg, *msg2;
- if (!acquire_ipmi_user(user, &i)) {
- /*
- * The user has already been cleaned up, just make sure
- * nothing is using it and return.
- */
- synchronize_srcu(&user->release_barrier);
+ if (!refcount_dec_if_one(&user->destroyed))
return;
- }
-
- rcu_assign_pointer(user->self, NULL);
- release_ipmi_user(user, i);
-
- synchronize_srcu(&user->release_barrier);
if (user->handler->shutdown)
user->handler->shutdown(user->handler_data);
@@ -1355,11 +1342,11 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
if (user->gets_events)
atomic_dec(&intf->event_waiters);
- /* Remove the user from the interface's sequence table. */
- spin_lock_irqsave(&intf->seq_lock, flags);
- list_del_rcu(&user->link);
+ /* Remove the user from the interface's list and sequence table. */
+ list_del(&user->link);
atomic_dec(&intf->nr_users);
+ spin_lock_irqsave(&intf->seq_lock, flags);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if (intf->seq_table[i].inuse
&& (intf->seq_table[i].recv_msg->user == user)) {
@@ -1374,7 +1361,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
* Remove the user from the command receiver's table. First
* we build a list of everything (not using the standard link,
* since other things may be using it till we do
- * synchronize_srcu()) then free everything in that list.
+ * synchronize_rcu()) then free everything in that list.
*/
mutex_lock(&intf->cmd_rcvrs_mutex);
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
@@ -1386,23 +1373,33 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
}
}
mutex_unlock(&intf->cmd_rcvrs_mutex);
- synchronize_rcu();
while (rcvrs) {
rcvr = rcvrs;
rcvrs = rcvr->next;
kfree(rcvr);
}
- owner = intf->owner;
- kref_put(&intf->refcount, intf_free);
- module_put(owner);
+ mutex_lock(&intf->user_msgs_mutex);
+ list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
+ if (msg->user != user)
+ continue;
+ list_del(&msg->link);
+ ipmi_free_recv_msg(msg);
+ }
+ mutex_unlock(&intf->user_msgs_mutex);
+
+ release_ipmi_user(user);
}
void ipmi_destroy_user(struct ipmi_user *user)
{
+ struct ipmi_smi *intf = user->intf;
+
+ mutex_lock(&intf->users_mutex);
_ipmi_destroy_user(user);
+ mutex_unlock(&intf->users_mutex);
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
}
EXPORT_SYMBOL(ipmi_destroy_user);
@@ -1411,9 +1408,9 @@ int ipmi_get_version(struct ipmi_user *user,
unsigned char *minor)
{
struct ipmi_device_id id;
- int rv, index;
+ int rv;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1422,7 +1419,7 @@ int ipmi_get_version(struct ipmi_user *user,
*major = ipmi_version_major(&id);
*minor = ipmi_version_minor(&id);
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1432,9 +1429,9 @@ int ipmi_set_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1444,7 +1441,7 @@ int ipmi_set_my_address(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].address = address;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1454,9 +1451,9 @@ int ipmi_get_my_address(struct ipmi_user *user,
unsigned int channel,
unsigned char *address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1466,7 +1463,7 @@ int ipmi_get_my_address(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].address;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1476,9 +1473,9 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char LUN)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1488,7 +1485,7 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
user->intf->addrinfo[channel].lun = LUN & 0x3;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1498,9 +1495,9 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
unsigned int channel,
unsigned char *address)
{
- int index, rv = 0;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1510,7 +1507,7 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
*address = user->intf->addrinfo[channel].lun;
}
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1518,17 +1515,17 @@ EXPORT_SYMBOL(ipmi_get_my_LUN);
int ipmi_get_maintenance_mode(struct ipmi_user *user)
{
- int mode, index;
+ int mode;
unsigned long flags;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
mode = user->intf->maintenance_mode;
spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return mode;
}
@@ -1543,11 +1540,11 @@ static void maintenance_mode_update(struct ipmi_smi *intf)
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
{
- int rv = 0, index;
+ int rv = 0;
unsigned long flags;
struct ipmi_smi *intf = user->intf;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1577,7 +1574,7 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
}
out_unlock:
spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1585,19 +1582,17 @@ EXPORT_SYMBOL(ipmi_set_maintenance_mode);
int ipmi_set_gets_events(struct ipmi_user *user, bool val)
{
- unsigned long flags;
struct ipmi_smi *intf = user->intf;
struct ipmi_recv_msg *msg, *msg2;
struct list_head msgs;
- int index;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
INIT_LIST_HEAD(&msgs);
- spin_lock_irqsave(&intf->events_lock, flags);
+ mutex_lock(&intf->events_mutex);
if (user->gets_events == val)
goto out;
@@ -1610,13 +1605,6 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
atomic_dec(&intf->event_waiters);
}
- if (intf->delivering_events)
- /*
- * Another thread is delivering events for this, so
- * let it handle any new events.
- */
- goto out;
-
/* Deliver any queued events. */
while (user->gets_events && !list_empty(&intf->waiting_events)) {
list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
@@ -1627,22 +1615,16 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
intf->event_msg_printed = 0;
}
- intf->delivering_events = 1;
- spin_unlock_irqrestore(&intf->events_lock, flags);
-
list_for_each_entry_safe(msg, msg2, &msgs, link) {
msg->user = user;
kref_get(&user->refcount);
deliver_local_response(intf, msg);
}
-
- spin_lock_irqsave(&intf->events_lock, flags);
- intf->delivering_events = 0;
}
out:
- spin_unlock_irqrestore(&intf->events_lock, flags);
- release_ipmi_user(user, index);
+ mutex_unlock(&intf->events_mutex);
+ release_ipmi_user(user);
return 0;
}
@@ -1687,9 +1669,9 @@ int ipmi_register_for_cmd(struct ipmi_user *user,
{
struct ipmi_smi *intf = user->intf;
struct cmd_rcvr *rcvr;
- int rv = 0, index;
+ int rv = 0;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1719,7 +1701,7 @@ out_unlock:
if (rv)
kfree(rcvr);
out_release:
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
@@ -1733,9 +1715,9 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
struct ipmi_smi *intf = user->intf;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
- int i, rv = -ENOENT, index;
+ int i, rv = -ENOENT;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -1758,7 +1740,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
}
mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
while (rcvrs) {
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
rcvr = rcvrs;
@@ -1882,13 +1864,12 @@ static void smi_send(struct ipmi_smi *intf,
const struct ipmi_smi_handlers *handlers,
struct ipmi_smi_msg *smi_msg, int priority)
{
- int run_to_completion = intf->run_to_completion;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
unsigned long flags = 0;
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
smi_msg = smi_add_send_msg(intf, smi_msg, priority);
-
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
@@ -2304,6 +2285,7 @@ static int i_ipmi_request(struct ipmi_user *user,
{
struct ipmi_smi_msg *smi_msg;
struct ipmi_recv_msg *recv_msg;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
int rv = 0;
if (user) {
@@ -2337,7 +2319,8 @@ static int i_ipmi_request(struct ipmi_user *user,
}
}
- rcu_read_lock();
+ if (!run_to_completion)
+ mutex_lock(&intf->users_mutex);
if (intf->in_shutdown) {
rv = -ENODEV;
goto out_err;
@@ -2383,7 +2366,8 @@ out_err:
smi_send(intf, intf->handlers, smi_msg, priority);
}
- rcu_read_unlock();
+ if (!run_to_completion)
+ mutex_unlock(&intf->users_mutex);
out:
if (rv && user)
@@ -2414,12 +2398,12 @@ int ipmi_request_settime(struct ipmi_user *user,
unsigned int retry_time_ms)
{
unsigned char saddr = 0, lun = 0;
- int rv, index;
+ int rv;
if (!user)
return -EINVAL;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -2438,7 +2422,7 @@ int ipmi_request_settime(struct ipmi_user *user,
retries,
retry_time_ms);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
EXPORT_SYMBOL(ipmi_request_settime);
@@ -2453,12 +2437,12 @@ int ipmi_request_supply_msgs(struct ipmi_user *user,
int priority)
{
unsigned char saddr = 0, lun = 0;
- int rv, index;
+ int rv;
if (!user)
return -EINVAL;
- user = acquire_ipmi_user(user, &index);
+ user = acquire_ipmi_user(user);
if (!user)
return -ENODEV;
@@ -2477,7 +2461,7 @@ int ipmi_request_supply_msgs(struct ipmi_user *user,
lun,
-1, 0);
- release_ipmi_user(user, index);
+ release_ipmi_user(user);
return rv;
}
EXPORT_SYMBOL(ipmi_request_supply_msgs);
@@ -3064,7 +3048,7 @@ cleanup_bmc_device(struct kref *ref)
* with removing the device attributes while reading a device
* attribute.
*/
- queue_work(remove_work_wq, &bmc->remove_work);
+ queue_work(bmc_remove_work_wq, &bmc->remove_work);
}
/*
@@ -3520,15 +3504,14 @@ static ssize_t nr_msgs_show(struct device *dev,
char *buf)
{
struct ipmi_smi *intf = container_of(attr,
- struct ipmi_smi, nr_msgs_devattr);
+ struct ipmi_smi, nr_msgs_devattr);
struct ipmi_user *user;
- int index;
unsigned int count = 0;
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link)
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link)
count += atomic_read(&user->nr_msgs);
- srcu_read_unlock(&intf->users_srcu, index);
+ mutex_unlock(&intf->users_mutex);
return sysfs_emit(buf, "%u\n", count);
}
@@ -3569,12 +3552,6 @@ int ipmi_add_smi(struct module *owner,
if (!intf)
return -ENOMEM;
- rv = init_srcu_struct(&intf->users_srcu);
- if (rv) {
- kfree(intf);
- return rv;
- }
-
intf->owner = owner;
intf->bmc = &intf->tmp_bmc;
INIT_LIST_HEAD(&intf->bmc->intfs);
@@ -3591,7 +3568,10 @@ int ipmi_add_smi(struct module *owner,
}
if (slave_addr != 0)
intf->addrinfo[0].address = slave_addr;
+ INIT_LIST_HEAD(&intf->user_msgs);
+ mutex_init(&intf->user_msgs_mutex);
INIT_LIST_HEAD(&intf->users);
+ mutex_init(&intf->users_mutex);
atomic_set(&intf->nr_users, 0);
intf->handlers = handlers;
intf->send_info = send_info;
@@ -3603,12 +3583,12 @@ int ipmi_add_smi(struct module *owner,
intf->curr_seq = 0;
spin_lock_init(&intf->waiting_rcv_msgs_lock);
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
- INIT_WORK(&intf->recv_work, smi_recv_work);
+ INIT_WORK(&intf->smi_work, smi_work);
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->xmit_msgs_lock);
INIT_LIST_HEAD(&intf->xmit_msgs);
INIT_LIST_HEAD(&intf->hp_xmit_msgs);
- spin_lock_init(&intf->events_lock);
+ mutex_init(&intf->events_mutex);
spin_lock_init(&intf->watch_lock);
atomic_set(&intf->event_waiters, 0);
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
@@ -3621,12 +3601,16 @@ int ipmi_add_smi(struct module *owner,
for (i = 0; i < IPMI_NUM_STATS; i++)
atomic_set(&intf->stats[i], 0);
+ /*
+ * Grab the watchers mutex so we can deliver the new interface
+ * without races.
+ */
+ mutex_lock(&smi_watchers_mutex);
mutex_lock(&ipmi_interfaces_mutex);
/* Look for a hole in the numbers. */
i = 0;
link = &ipmi_interfaces;
- list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
- ipmi_interfaces_mutex_held()) {
+ list_for_each_entry(tintf, &ipmi_interfaces, link) {
if (tintf->intf_num != i) {
link = &tintf->link;
break;
@@ -3635,9 +3619,9 @@ int ipmi_add_smi(struct module *owner,
}
/* Add the new interface in numeric order. */
if (i == 0)
- list_add_rcu(&intf->link, &ipmi_interfaces);
+ list_add(&intf->link, &ipmi_interfaces);
else
- list_add_tail_rcu(&intf->link, link);
+ list_add_tail(&intf->link, link);
rv = handlers->start_processing(send_info, intf);
if (rv)
@@ -3669,18 +3653,14 @@ int ipmi_add_smi(struct module *owner,
goto out_err_bmc_reg;
}
- /*
- * Keep memory order straight for RCU readers. Make
- * sure everything else is committed to memory before
- * setting intf_num to mark the interface valid.
- */
- smp_wmb();
intf->intf_num = i;
mutex_unlock(&ipmi_interfaces_mutex);
/* After this point the interface is legal to use. */
call_smi_watchers(i, intf->si_dev);
+ mutex_unlock(&smi_watchers_mutex);
+
return 0;
out_err_bmc_reg:
@@ -3689,10 +3669,9 @@ int ipmi_add_smi(struct module *owner,
if (intf->handlers->shutdown)
intf->handlers->shutdown(intf->send_info);
out_err:
- list_del_rcu(&intf->link);
+ list_del(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
- synchronize_srcu(&ipmi_interfaces_srcu);
- cleanup_srcu_struct(&intf->users_srcu);
+ mutex_unlock(&smi_watchers_mutex);
kref_put(&intf->refcount, intf_free);
return rv;
@@ -3758,19 +3737,28 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
void ipmi_unregister_smi(struct ipmi_smi *intf)
{
struct ipmi_smi_watcher *w;
- int intf_num, index;
+ int intf_num;
if (!intf)
return;
+
intf_num = intf->intf_num;
mutex_lock(&ipmi_interfaces_mutex);
+ cancel_work_sync(&intf->smi_work);
+ /* smi_work() can no longer be in progress after this. */
+
intf->intf_num = -1;
intf->in_shutdown = true;
- list_del_rcu(&intf->link);
+ list_del(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
- synchronize_srcu(&ipmi_interfaces_srcu);
- /* At this point no users can be added to the interface. */
+ /*
+ * At this point no users can be added to the interface and no
+ * new messages can be sent.
+ */
+
+ if (intf->handlers->shutdown)
+ intf->handlers->shutdown(intf->send_info);
device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
@@ -3784,24 +3772,19 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
w->smi_gone(intf_num);
mutex_unlock(&smi_watchers_mutex);
- index = srcu_read_lock(&intf->users_srcu);
+ mutex_lock(&intf->users_mutex);
while (!list_empty(&intf->users)) {
- struct ipmi_user *user =
- container_of(list_next_rcu(&intf->users),
- struct ipmi_user, link);
+ struct ipmi_user *user = list_first_entry(&intf->users,
+ struct ipmi_user, link);
_ipmi_destroy_user(user);
}
- srcu_read_unlock(&intf->users_srcu, index);
-
- if (intf->handlers->shutdown)
- intf->handlers->shutdown(intf->send_info);
+ mutex_unlock(&intf->users_mutex);
cleanup_smi_msgs(intf);
ipmi_bmc_unregister(intf);
- cleanup_srcu_struct(&intf->users_srcu);
kref_put(&intf->refcount, intf_free);
}
EXPORT_SYMBOL(ipmi_unregister_smi);
@@ -3926,17 +3909,12 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
msg->data_size, msg->data);
- rcu_read_lock();
- if (!intf->in_shutdown) {
- smi_send(intf, intf->handlers, msg, 0);
- /*
- * We used the message, so return the value
- * that causes it to not be freed or
- * queued.
- */
- rv = -1;
- }
- rcu_read_unlock();
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
+ */
+ rv = -1;
} else {
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
@@ -3946,7 +3924,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
* later.
*/
rv = 1;
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
} else {
/* Extract the source address from the data. */
ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
@@ -4017,17 +3995,12 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
msg->data_size = 5;
- rcu_read_lock();
- if (!intf->in_shutdown) {
- smi_send(intf, intf->handlers, msg, 0);
- /*
- * We used the message, so return the value
- * that causes it to not be freed or
- * queued.
- */
- rv = -1;
- }
- rcu_read_unlock();
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
+ */
+ rv = -1;
} else {
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
@@ -4037,7 +4010,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
* later.
*/
rv = 1;
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
} else {
/* Extract the source address from the data. */
daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
@@ -4206,14 +4179,33 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
rcu_read_unlock();
if (user == NULL) {
- /* We didn't find a user, just give up. */
+ /* We didn't find a user, just give up and return an error. */
ipmi_inc_stat(intf, unhandled_commands);
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg->data[1] = IPMI_SEND_MSG_CMD;
+ msg->data[2] = chan;
+ msg->data[3] = msg->rsp[4]; /* handle */
+ msg->data[4] = msg->rsp[8]; /* rsSWID */
+ msg->data[5] = ((netfn + 1) << 2) | (msg->rsp[9] & 0x3);
+ msg->data[6] = ipmb_checksum(&msg->data[3], 3);
+ msg->data[7] = msg->rsp[5]; /* rqSWID */
+ /* rqseq/lun */
+ msg->data[8] = (msg->rsp[9] & 0xfc) | (msg->rsp[6] & 0x3);
+ msg->data[9] = cmd;
+ msg->data[10] = IPMI_INVALID_CMD_COMPLETION_CODE;
+ msg->data[11] = ipmb_checksum(&msg->data[7], 4);
+ msg->data_size = 12;
+
+ dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
+ msg->data_size, msg->data);
+
+ smi_send(intf, intf->handlers, msg, 0);
/*
- * Don't do anything with these messages, just allow
- * them to be freed.
+ * We used the message, so return the value that
+ * causes it to not be freed or queued.
*/
- rv = 0;
+ rv = -1;
} else {
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
@@ -4222,7 +4214,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
* message, so requeue it for handling later.
*/
rv = 1;
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
} else {
/* Extract the source address from the data. */
lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
@@ -4331,7 +4323,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
* later.
*/
rv = 1;
- kref_put(&user->refcount, free_user);
+ kref_put(&user->refcount, free_ipmi_user);
} else {
/*
* OEM Messages are expected to be delivered via
@@ -4393,8 +4385,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg, *recv_msg2;
struct list_head msgs;
struct ipmi_user *user;
- int rv = 0, deliver_count = 0, index;
- unsigned long flags;
+ int rv = 0, deliver_count = 0;
if (msg->rsp_size < 19) {
/* Message is too small to be an IPMB event. */
@@ -4409,7 +4400,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
INIT_LIST_HEAD(&msgs);
- spin_lock_irqsave(&intf->events_lock, flags);
+ mutex_lock(&intf->events_mutex);
ipmi_inc_stat(intf, events);
@@ -4417,18 +4408,20 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
* Allocate and fill in one message for every user that is
* getting events.
*/
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link) {
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link) {
if (!user->gets_events)
continue;
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
- rcu_read_unlock();
+ mutex_unlock(&intf->users_mutex);
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
link) {
+ user = recv_msg->user;
list_del(&recv_msg->link);
ipmi_free_recv_msg(recv_msg);
+ kref_put(&user->refcount, free_ipmi_user);
}
/*
* We couldn't allocate memory for the
@@ -4446,7 +4439,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
kref_get(&user->refcount);
list_add_tail(&recv_msg->link, &msgs);
}
- srcu_read_unlock(&intf->users_srcu, index);
+ mutex_unlock(&intf->users_mutex);
if (deliver_count) {
/* Now deliver all the messages. */
@@ -4484,7 +4477,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
}
out:
- spin_unlock_irqrestore(&intf->events_lock, flags);
+ mutex_unlock(&intf->events_mutex);
return rv;
}
@@ -4570,7 +4563,7 @@ return_unspecified:
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
&& (msg->user_data == NULL)) {
- if (intf->in_shutdown)
+ if (intf->in_shutdown || intf->run_to_completion)
goto out;
/*
@@ -4642,6 +4635,9 @@ return_unspecified:
*/
struct ipmi_recv_msg *recv_msg;
+ if (intf->run_to_completion)
+ goto out;
+
chan = msg->data[2] & 0x0f;
if (chan >= IPMI_MAX_CHANNELS)
/* Invalid channel number */
@@ -4664,6 +4660,9 @@ process_response_response:
&& (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
struct ipmi_channel *chans;
+ if (intf->run_to_completion)
+ goto out;
+
/* It's from the receive queue. */
chan = msg->rsp[3] & 0xf;
if (chan >= IPMI_MAX_CHANNELS) {
@@ -4738,6 +4737,9 @@ process_response_response:
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
/* It's an asynchronous event. */
+ if (intf->run_to_completion)
+ goto out;
+
requeue = handle_read_event_rsp(intf, msg);
} else {
/* It's a response from the local BMC. */
@@ -4753,10 +4755,10 @@ process_response_response:
*/
static void handle_new_recv_msgs(struct ipmi_smi *intf)
{
- struct ipmi_smi_msg *smi_msg;
- unsigned long flags = 0;
- int rv;
- int run_to_completion = intf->run_to_completion;
+ struct ipmi_smi_msg *smi_msg;
+ unsigned long flags = 0;
+ int rv;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
/* See if any waiting messages need to be processed. */
if (!run_to_completion)
@@ -4790,31 +4792,15 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
}
if (!run_to_completion)
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
-
- /*
- * If the pretimout count is non-zero, decrement one from it and
- * deliver pretimeouts to all the users.
- */
- if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
- struct ipmi_user *user;
- int index;
-
- index = srcu_read_lock(&intf->users_srcu);
- list_for_each_entry_rcu(user, &intf->users, link) {
- if (user->handler->ipmi_watchdog_pretimeout)
- user->handler->ipmi_watchdog_pretimeout(
- user->handler_data);
- }
- srcu_read_unlock(&intf->users_srcu, index);
- }
}
-static void smi_recv_work(struct work_struct *t)
+static void smi_work(struct work_struct *t)
{
unsigned long flags = 0; /* keep us warning-free. */
- struct ipmi_smi *intf = from_work(intf, t, recv_work);
- int run_to_completion = intf->run_to_completion;
+ struct ipmi_smi *intf = from_work(intf, t, smi_work);
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
struct ipmi_smi_msg *newmsg = NULL;
+ struct ipmi_recv_msg *msg, *msg2;
/*
* Start the next message if available.
@@ -4824,8 +4810,6 @@ static void smi_recv_work(struct work_struct *t)
* message delivery.
*/
- rcu_read_lock();
-
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
if (intf->curr_msg == NULL && !intf->in_shutdown) {
@@ -4843,15 +4827,57 @@ static void smi_recv_work(struct work_struct *t)
intf->curr_msg = newmsg;
}
}
-
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+
if (newmsg)
intf->handlers->sender(intf->send_info, newmsg);
- rcu_read_unlock();
-
handle_new_recv_msgs(intf);
+
+ /* Nothing below applies during panic time. */
+ if (run_to_completion)
+ return;
+
+ /*
+ * If the pretimout count is non-zero, decrement one from it and
+ * deliver pretimeouts to all the users.
+ */
+ if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
+ struct ipmi_user *user;
+
+ mutex_lock(&intf->users_mutex);
+ list_for_each_entry(user, &intf->users, link) {
+ if (user->handler->ipmi_watchdog_pretimeout)
+ user->handler->ipmi_watchdog_pretimeout(
+ user->handler_data);
+ }
+ mutex_unlock(&intf->users_mutex);
+ }
+
+ /*
+ * Freeing the message can cause a user to be released, which
+ * can then cause the interface to be freed. Make sure that
+ * doesn't happen until we are ready.
+ */
+ kref_get(&intf->refcount);
+
+ mutex_lock(&intf->user_msgs_mutex);
+ list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
+ struct ipmi_user *user = msg->user;
+
+ list_del(&msg->link);
+
+ if (refcount_read(&user->destroyed) == 0) {
+ ipmi_free_recv_msg(msg);
+ } else {
+ atomic_dec(&user->nr_msgs);
+ user->handler->ipmi_recv_hndl(msg, user->handler_data);
+ }
+ }
+ mutex_unlock(&intf->user_msgs_mutex);
+
+ kref_put(&intf->refcount, intf_free);
}
/* Handle a new message from the lower layer. */
@@ -4859,7 +4885,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
unsigned long flags = 0; /* keep us warning-free. */
- int run_to_completion = intf->run_to_completion;
+ int run_to_completion = READ_ONCE(intf->run_to_completion);
/*
* To preserve message order, we keep a queue and deliver from
@@ -4884,9 +4910,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (run_to_completion)
- smi_recv_work(&intf->recv_work);
+ smi_work(&intf->smi_work);
else
- queue_work(system_bh_wq, &intf->recv_work);
+ queue_work(system_wq, &intf->smi_work);
}
EXPORT_SYMBOL(ipmi_smi_msg_received);
@@ -4896,7 +4922,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
return;
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
- queue_work(system_bh_wq, &intf->recv_work);
+ queue_work(system_wq, &intf->smi_work);
}
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
@@ -5065,7 +5091,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
flags);
}
- queue_work(system_bh_wq, &intf->recv_work);
+ queue_work(system_wq, &intf->smi_work);
return need_timer;
}
@@ -5084,17 +5110,19 @@ static struct timer_list ipmi_timer;
static atomic_t stop_operation;
-static void ipmi_timeout(struct timer_list *unused)
+static void ipmi_timeout_work(struct work_struct *work)
{
+ if (atomic_read(&stop_operation))
+ return;
+
struct ipmi_smi *intf;
bool need_timer = false;
- int index;
if (atomic_read(&stop_operation))
return;
- index = srcu_read_lock(&ipmi_interfaces_srcu);
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (atomic_read(&intf->event_waiters)) {
intf->ticks_to_req_ev--;
if (intf->ticks_to_req_ev == 0) {
@@ -5106,12 +5134,22 @@ static void ipmi_timeout(struct timer_list *unused)
need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
}
- srcu_read_unlock(&ipmi_interfaces_srcu, index);
+ mutex_unlock(&ipmi_interfaces_mutex);
if (need_timer)
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
+static DECLARE_WORK(ipmi_timer_work, ipmi_timeout_work);
+
+static void ipmi_timeout(struct timer_list *unused)
+{
+ if (atomic_read(&stop_operation))
+ return;
+
+ queue_work(system_wq, &ipmi_timer_work);
+}
+
static void need_waiter(struct ipmi_smi *intf)
{
/* Racy, but worst case we start the timer twice. */
@@ -5168,7 +5206,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
if (msg->user && !oops_in_progress)
- kref_put(&msg->user->refcount, free_user);
+ kref_put(&msg->user->refcount, free_ipmi_user);
msg->done(msg);
}
EXPORT_SYMBOL(ipmi_free_recv_msg);
@@ -5188,9 +5226,9 @@ static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
/*
* Inside a panic, send a message and wait for a response.
*/
-static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
- struct ipmi_addr *addr,
- struct kernel_ipmi_msg *msg)
+static void _ipmi_panic_request_and_wait(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
{
struct ipmi_smi_msg smi_msg;
struct ipmi_recv_msg recv_msg;
@@ -5220,6 +5258,15 @@ static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
ipmi_poll(intf);
}
+void ipmi_panic_request_and_wait(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
+{
+ user->intf->run_to_completion = 1;
+ _ipmi_panic_request_and_wait(user->intf, addr, msg);
+}
+EXPORT_SYMBOL(ipmi_panic_request_and_wait);
+
static void event_receiver_fetcher(struct ipmi_smi *intf,
struct ipmi_recv_msg *msg)
{
@@ -5288,7 +5335,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
}
/* Send the event announcing the panic. */
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
/*
* On every interface, dump a bunch of OEM event holding the
@@ -5324,7 +5371,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = device_id_fetcher;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
if (intf->local_event_generator) {
/* Request the event receiver from the local MC. */
@@ -5333,7 +5380,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = event_receiver_fetcher;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
}
intf->null_user_handler = NULL;
@@ -5385,7 +5432,7 @@ static void send_panic_events(struct ipmi_smi *intf, char *str)
memcpy_and_pad(data+5, 11, p, size, '\0');
p += size;
- ipmi_panic_request_and_wait(intf, &addr, &msg);
+ _ipmi_panic_request_and_wait(intf, &addr, &msg);
}
}
@@ -5403,7 +5450,7 @@ static int panic_event(struct notifier_block *this,
has_panicked = 1;
/* For every registered interface, set it to run to completion. */
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ list_for_each_entry(intf, &ipmi_interfaces, link) {
if (!intf->handlers || intf->intf_num == -1)
/* Interface is not ready. */
continue;
@@ -5433,7 +5480,7 @@ static int panic_event(struct notifier_block *this,
intf->handlers->set_run_to_completion(intf->send_info,
1);
- list_for_each_entry_rcu(user, &intf->users, link) {
+ list_for_each_entry(user, &intf->users, link) {
if (user->handler->ipmi_panic_handler)
user->handler->ipmi_panic_handler(
user->handler_data);
@@ -5478,15 +5525,11 @@ static int ipmi_init_msghandler(void)
if (initialized)
goto out;
- rv = init_srcu_struct(&ipmi_interfaces_srcu);
- if (rv)
- goto out;
-
- remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
- if (!remove_work_wq) {
+ bmc_remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
+ if (!bmc_remove_work_wq) {
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
rv = -ENOMEM;
- goto out_wq;
+ goto out;
}
timer_setup(&ipmi_timer, ipmi_timeout, 0);
@@ -5496,9 +5539,6 @@ static int ipmi_init_msghandler(void)
initialized = true;
-out_wq:
- if (rv)
- cleanup_srcu_struct(&ipmi_interfaces_srcu);
out:
mutex_unlock(&ipmi_interfaces_mutex);
return rv;
@@ -5522,7 +5562,7 @@ static void __exit cleanup_ipmi(void)
int count;
if (initialized) {
- destroy_workqueue(remove_work_wq);
+ destroy_workqueue(bmc_remove_work_wq);
atomic_notifier_chain_unregister(&panic_notifier_list,
&panic_block);
@@ -5539,6 +5579,7 @@ static void __exit cleanup_ipmi(void)
*/
atomic_set(&stop_operation, 1);
timer_delete_sync(&ipmi_timer);
+ cancel_work_sync(&ipmi_timer_work);
initialized = false;
@@ -5549,8 +5590,6 @@ static void __exit cleanup_ipmi(void)
count = atomic_read(&recv_msg_inuse_count);
if (count != 0)
pr_warn("recv message count %d at exit\n", count);
-
- cleanup_srcu_struct(&ipmi_interfaces_srcu);
}
if (drvregistered)
driver_unregister(&ipmidriver.driver);
diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
index a7ead2a4c753..508c3fd45877 100644
--- a/drivers/char/ipmi/ipmi_si.h
+++ b/drivers/char/ipmi/ipmi_si.h
@@ -26,6 +26,14 @@ enum si_type {
/* Array is defined in the ipmi_si_intf.c */
extern const char *const si_to_str[];
+struct ipmi_match_info {
+ enum si_type type;
+};
+
+extern const struct ipmi_match_info ipmi_kcs_si_info;
+extern const struct ipmi_match_info ipmi_smic_si_info;
+extern const struct ipmi_match_info ipmi_bt_si_info;
+
enum ipmi_addr_space {
IPMI_IO_ADDR_SPACE, IPMI_MEM_ADDR_SPACE
};
@@ -64,7 +72,7 @@ struct si_sm_io {
void (*irq_cleanup)(struct si_sm_io *io);
u8 slave_addr;
- enum si_type si_type;
+ const struct ipmi_match_info *si_info;
struct device *dev;
};
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 12b0b77eb1cc..bb42dfe1c6a8 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -73,6 +73,10 @@ enum si_intf_state {
/* 'invalid' to allow a firmware-specified interface to be disabled */
const char *const si_to_str[] = { "invalid", "kcs", "smic", "bt", NULL };
+const struct ipmi_match_info ipmi_kcs_si_info = { .type = SI_KCS };
+const struct ipmi_match_info ipmi_smic_si_info = { .type = SI_SMIC };
+const struct ipmi_match_info ipmi_bt_si_info = { .type = SI_BT };
+
static bool initialized;
/*
@@ -692,7 +696,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
break;
}
enables = current_global_enables(smi_info, 0, &irq_on);
- if (smi_info->io.si_type == SI_BT)
+ if (smi_info->io.si_info->type == SI_BT)
/* BT has its own interrupt enable bit. */
check_bt_irq(smi_info, irq_on);
if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
@@ -1072,7 +1076,8 @@ static void set_need_watch(void *send_info, unsigned int watch_mask)
static void smi_timeout(struct timer_list *t)
{
- struct smi_info *smi_info = from_timer(smi_info, t, si_timer);
+ struct smi_info *smi_info = timer_container_of(smi_info, t,
+ si_timer);
enum si_sm_result smi_result;
unsigned long flags;
unsigned long jiffies_now;
@@ -1119,7 +1124,7 @@ irqreturn_t ipmi_si_irq_handler(int irq, void *data)
struct smi_info *smi_info = data;
unsigned long flags;
- if (smi_info->io.si_type == SI_BT)
+ if (smi_info->io.si_info->type == SI_BT)
/* We need to clear the IRQ flag for the BT interface. */
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_CLEAR_IRQ_BIT
@@ -1164,7 +1169,7 @@ static int smi_start_processing(void *send_info,
* The BT interface is efficient enough to not need a thread,
* and there is no need for a thread if we have interrupts.
*/
- else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
+ else if (new_smi->io.si_info->type != SI_BT && !new_smi->io.irq)
enable = 1;
if (enable) {
@@ -1235,7 +1240,7 @@ MODULE_PARM_DESC(kipmid_max_busy_us,
void ipmi_irq_finish_setup(struct si_sm_io *io)
{
- if (io->si_type == SI_BT)
+ if (io->si_info->type == SI_BT)
/* Enable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
@@ -1243,7 +1248,7 @@ void ipmi_irq_finish_setup(struct si_sm_io *io)
void ipmi_irq_start_cleanup(struct si_sm_io *io)
{
- if (io->si_type == SI_BT)
+ if (io->si_info->type == SI_BT)
/* Disable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG, 0);
}
@@ -1614,7 +1619,7 @@ static ssize_t type_show(struct device *dev,
{
struct smi_info *smi_info = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_type]);
+ return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_info->type]);
}
static DEVICE_ATTR_RO(type);
@@ -1649,7 +1654,7 @@ static ssize_t params_show(struct device *dev,
return sysfs_emit(buf,
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
- si_to_str[smi_info->io.si_type],
+ si_to_str[smi_info->io.si_info->type],
addr_space_to_str[smi_info->io.addr_space],
smi_info->io.addr_data,
smi_info->io.regspacing,
@@ -1803,7 +1808,7 @@ setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
if (id->manufacturer_id == DELL_IANA_MFR_ID &&
- smi_info->io.si_type == SI_BT)
+ smi_info->io.si_info->type == SI_BT)
register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
}
@@ -1907,13 +1912,13 @@ int ipmi_si_add_smi(struct si_sm_io *io)
/* We prefer ACPI over SMBIOS. */
dev_info(dup->io.dev,
"Removing SMBIOS-specified %s state machine in favor of ACPI\n",
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
cleanup_one_si(dup);
} else {
dev_info(new_smi->io.dev,
"%s-specified %s state machine: duplicate\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
rv = -EBUSY;
kfree(new_smi);
goto out_err;
@@ -1922,7 +1927,7 @@ int ipmi_si_add_smi(struct si_sm_io *io)
pr_info("Adding %s-specified %s state machine\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
list_add_tail(&new_smi->link, &smi_infos);
@@ -1945,12 +1950,12 @@ static int try_smi_init(struct smi_info *new_smi)
pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
- si_to_str[new_smi->io.si_type],
+ si_to_str[new_smi->io.si_info->type],
addr_space_to_str[new_smi->io.addr_space],
new_smi->io.addr_data,
new_smi->io.slave_addr, new_smi->io.irq);
- switch (new_smi->io.si_type) {
+ switch (new_smi->io.si_info->type) {
case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
break;
@@ -2073,7 +2078,7 @@ static int try_smi_init(struct smi_info *new_smi)
smi_num++;
dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
- si_to_str[new_smi->io.si_type]);
+ si_to_str[new_smi->io.si_info->type]);
WARN_ON(new_smi->io.dev->init_name != NULL);
@@ -2091,9 +2096,18 @@ static int try_smi_init(struct smi_info *new_smi)
return rv;
}
+/*
+ * Devices in the same address space at the same address are the same.
+ */
+static bool __init ipmi_smi_info_same(struct smi_info *e1, struct smi_info *e2)
+{
+ return (e1->io.addr_space == e2->io.addr_space &&
+ e1->io.addr_data == e2->io.addr_data);
+}
+
static int __init init_ipmi_si(void)
{
- struct smi_info *e;
+ struct smi_info *e, *e2;
enum ipmi_addr_src type = SI_INVALID;
if (initialized)
@@ -2109,37 +2123,70 @@ static int __init init_ipmi_si(void)
ipmi_si_parisc_init();
- /* We prefer devices with interrupts, but in the case of a machine
- with multiple BMCs we assume that there will be several instances
- of a given type so if we succeed in registering a type then also
- try to register everything else of the same type */
mutex_lock(&smi_infos_lock);
+
+ /*
+ * Scan through all the devices. We prefer devices with
+ * interrupts, so go through those first in case there are any
+ * duplicates that don't have the interrupt set.
+ */
list_for_each_entry(e, &smi_infos, link) {
- /* Try to register a device if it has an IRQ and we either
- haven't successfully registered a device yet or this
- device has the same type as one we successfully registered */
- if (e->io.irq && (!type || e->io.addr_source == type)) {
- if (!try_smi_init(e)) {
- type = e->io.addr_source;
+ bool dup = false;
+
+ /* Register ones with interrupts first. */
+ if (!e->io.irq)
+ continue;
+
+ /*
+ * Go through the ones we have already seen to see if this
+ * is a dup.
+ */
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (e2 == e)
+ break;
+ if (e2->io.irq && ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
}
}
+ if (!dup)
+ try_smi_init(e);
}
- /* type will only have been set if we successfully registered an si */
- if (type)
- goto skip_fallback_noirq;
+ /*
+ * Now try devices without interrupts.
+ */
+ list_for_each_entry(e, &smi_infos, link) {
+ bool dup = false;
- /* Fall back to the preferred device */
+ if (e->io.irq)
+ continue;
- list_for_each_entry(e, &smi_infos, link) {
- if (!e->io.irq && (!type || e->io.addr_source == type)) {
- if (!try_smi_init(e)) {
- type = e->io.addr_source;
+ /*
+ * Go through the ones we have already seen to see if
+ * this is a dup. We have already looked at the ones
+ * with interrupts.
+ */
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (!e2->io.irq)
+ continue;
+ if (ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
+ }
+ }
+ list_for_each_entry(e2, &smi_infos, link) {
+ if (e2 == e)
+ break;
+ if (ipmi_smi_info_same(e, e2)) {
+ dup = true;
+ break;
}
}
+ if (!dup)
+ try_smi_init(e);
}
-skip_fallback_noirq:
initialized = true;
mutex_unlock(&smi_infos_lock);
@@ -2267,7 +2314,7 @@ struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
if (e->io.addr_space != addr_space)
continue;
- if (e->io.si_type != si_type)
+ if (e->io.si_info->type != si_type)
continue;
if (e->io.addr_data == addr) {
dev = get_device(e->io.dev);
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
index 2be2967f6b5f..3b0a70d9adbb 100644
--- a/drivers/char/ipmi/ipmi_si_parisc.c
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -13,7 +13,7 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev)
memset(&io, 0, sizeof(io));
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
io.addr_source = SI_DEVICETREE;
io.addr_space = IPMI_MEM_ADDR_SPACE;
io.addr_data = dev->hpa.start;
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
index 8c0ea637aba0..17f72763322d 100644
--- a/drivers/char/ipmi/ipmi_si_pci.c
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -23,30 +23,32 @@ MODULE_PARM_DESC(trypci,
static int ipmi_pci_probe_regspacing(struct si_sm_io *io)
{
- if (io->si_type == SI_KCS) {
- unsigned char status;
- int regspacing;
-
- io->regsize = DEFAULT_REGSIZE;
- io->regshift = 0;
-
- /* detect 1, 4, 16byte spacing */
- for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
- io->regspacing = regspacing;
- if (io->io_setup(io)) {
- dev_err(io->dev, "Could not setup I/O space\n");
- return DEFAULT_REGSPACING;
- }
- /* write invalid cmd */
- io->outputb(io, 1, 0x10);
- /* read status back */
- status = io->inputb(io, 1);
- io->io_cleanup(io);
- if (status)
- return regspacing;
- regspacing *= 4;
+ unsigned char status;
+ int regspacing;
+
+ if (io->si_info->type != SI_KCS)
+ return DEFAULT_REGSPACING;
+
+ io->regsize = DEFAULT_REGSIZE;
+ io->regshift = 0;
+
+ /* detect 1, 4, 16byte spacing */
+ for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
+ io->regspacing = regspacing;
+ if (io->io_setup(io)) {
+ dev_err(io->dev, "Could not setup I/O space\n");
+ return DEFAULT_REGSPACING;
}
+ /* write invalid cmd */
+ io->outputb(io, 1, 0x10);
+ /* read status back */
+ status = io->inputb(io, 1);
+ io->io_cleanup(io);
+ if (status)
+ return regspacing;
+ regspacing *= 4;
}
+
return DEFAULT_REGSPACING;
}
@@ -74,15 +76,15 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
switch (pdev->class) {
case PCI_CLASS_SERIAL_IPMI_SMIC:
- io.si_type = SI_SMIC;
+ io.si_info = &ipmi_smic_si_info;
break;
case PCI_CLASS_SERIAL_IPMI_KCS:
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
break;
case PCI_CLASS_SERIAL_IPMI_BT:
- io.si_type = SI_BT;
+ io.si_info = &ipmi_bt_si_info;
break;
default:
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index 550cabd43ae6..fb6e359ae494 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -163,9 +163,13 @@ static int platform_ipmi_probe(struct platform_device *pdev)
switch (type) {
case SI_KCS:
+ io.si_info = &ipmi_kcs_si_info;
+ break;
case SI_SMIC:
+ io.si_info = &ipmi_smic_si_info;
+ break;
case SI_BT:
- io.si_type = type;
+ io.si_info = &ipmi_bt_si_info;
break;
case SI_TYPE_INVALID: /* User disabled this in hardcode. */
return -ENODEV;
@@ -213,13 +217,10 @@ static int platform_ipmi_probe(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id of_ipmi_match[] = {
- { .type = "ipmi", .compatible = "ipmi-kcs",
- .data = (void *)(unsigned long) SI_KCS },
- { .type = "ipmi", .compatible = "ipmi-smic",
- .data = (void *)(unsigned long) SI_SMIC },
- { .type = "ipmi", .compatible = "ipmi-bt",
- .data = (void *)(unsigned long) SI_BT },
- {},
+ { .type = "ipmi", .compatible = "ipmi-kcs", .data = &ipmi_kcs_si_info },
+ { .type = "ipmi", .compatible = "ipmi-smic", .data = &ipmi_smic_si_info },
+ { .type = "ipmi", .compatible = "ipmi-bt", .data = &ipmi_bt_si_info },
+ {}
};
MODULE_DEVICE_TABLE(of, of_ipmi_match);
@@ -265,7 +266,7 @@ static int of_ipmi_probe(struct platform_device *pdev)
}
memset(&io, 0, sizeof(io));
- io.si_type = (enum si_type)device_get_match_data(&pdev->dev);
+ io.si_info = device_get_match_data(&pdev->dev);
io.addr_source = SI_DEVICETREE;
io.irq_setup = ipmi_std_irq_setup;
@@ -296,7 +297,7 @@ static int find_slave_address(struct si_sm_io *io, int slave_addr)
{
#ifdef CONFIG_IPMI_DMI_DECODE
if (!slave_addr)
- slave_addr = ipmi_dmi_get_slave_addr(io->si_type,
+ slave_addr = ipmi_dmi_get_slave_addr(io->si_info->type,
io->addr_space,
io->addr_data);
#endif
@@ -335,13 +336,13 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
switch (tmp) {
case 1:
- io.si_type = SI_KCS;
+ io.si_info = &ipmi_kcs_si_info;
break;
case 2:
- io.si_type = SI_SMIC;
+ io.si_info = &ipmi_smic_si_info;
break;
case 3:
- io.si_type = SI_BT;
+ io.si_info = &ipmi_bt_si_info;
break;
case 4: /* SSIF, just ignore */
return -ENODEV;
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 0b45b07dec22..1bc42830444d 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -481,8 +481,6 @@ static int ipmi_ssif_thread(void *data)
/* Wait for something to do */
result = wait_for_completion_interruptible(
&ssif_info->wake_thread);
- if (ssif_info->stopping)
- break;
if (result == -ERESTARTSYS)
continue;
init_completion(&ssif_info->wake_thread);
@@ -541,7 +539,8 @@ static void start_resend(struct ssif_info *ssif_info);
static void retry_timeout(struct timer_list *t)
{
- struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
+ struct ssif_info *ssif_info = timer_container_of(ssif_info, t,
+ retry_timer);
unsigned long oflags, *flags;
bool waiting, resend;
@@ -565,7 +564,8 @@ static void retry_timeout(struct timer_list *t)
static void watch_timeout(struct timer_list *t)
{
- struct ssif_info *ssif_info = from_timer(ssif_info, t, watch_timer);
+ struct ssif_info *ssif_info = timer_container_of(ssif_info, t,
+ watch_timer);
unsigned long oflags, *flags;
if (ssif_info->stopping)
@@ -1270,10 +1270,8 @@ static void shutdown_ssif(void *send_info)
ssif_info->stopping = true;
timer_delete_sync(&ssif_info->watch_timer);
timer_delete_sync(&ssif_info->retry_timer);
- if (ssif_info->thread) {
- complete(&ssif_info->wake_thread);
+ if (ssif_info->thread)
kthread_stop(ssif_info->thread);
- }
}
static void ssif_remove(struct i2c_client *client)
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index f1875b2bebbc..ab759b492fdd 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -150,7 +150,7 @@ static char preaction[16] = "pre_none";
static unsigned char preop_val = WDOG_PREOP_NONE;
static char preop[16] = "preop_none";
-static DEFINE_SPINLOCK(ipmi_read_lock);
+static DEFINE_MUTEX(ipmi_read_mutex);
static char data_to_read;
static DECLARE_WAIT_QUEUE_HEAD(read_q);
static struct fasync_struct *fasync_q;
@@ -363,7 +363,7 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
{
struct kernel_ipmi_msg msg;
unsigned char data[6];
- int rv;
+ int rv = 0;
struct ipmi_system_interface_addr addr;
int hbnow = 0;
@@ -405,14 +405,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
msg.cmd = IPMI_WDOG_SET_TIMER;
msg.data = data;
msg.data_len = sizeof(data);
- rv = ipmi_request_supply_msgs(watchdog_user,
- (struct ipmi_addr *) &addr,
- 0,
- &msg,
- NULL,
- smi_msg,
- recv_msg,
- 1);
+ if (smi_msg)
+ rv = ipmi_request_supply_msgs(watchdog_user,
+ (struct ipmi_addr *) &addr,
+ 0,
+ &msg,
+ NULL,
+ smi_msg,
+ recv_msg,
+ 1);
+ else
+ ipmi_panic_request_and_wait(watchdog_user,
+ (struct ipmi_addr *) &addr, &msg);
if (rv)
pr_warn("set timeout error: %d\n", rv);
else if (send_heartbeat_now)
@@ -431,9 +435,7 @@ static int _ipmi_set_timeout(int do_heartbeat)
atomic_set(&msg_tofree, 2);
- rv = __ipmi_set_timeout(&smi_msg,
- &recv_msg,
- &send_heartbeat_now);
+ rv = __ipmi_set_timeout(&smi_msg, &recv_msg, &send_heartbeat_now);
if (rv) {
atomic_set(&msg_tofree, 0);
return rv;
@@ -460,27 +462,10 @@ static int ipmi_set_timeout(int do_heartbeat)
return rv;
}
-static atomic_t panic_done_count = ATOMIC_INIT(0);
-
-static void panic_smi_free(struct ipmi_smi_msg *msg)
-{
- atomic_dec(&panic_done_count);
-}
-static void panic_recv_free(struct ipmi_recv_msg *msg)
-{
- atomic_dec(&panic_done_count);
-}
-
-static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg =
- INIT_IPMI_SMI_MSG(panic_smi_free);
-static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg =
- INIT_IPMI_RECV_MSG(panic_recv_free);
-
static void panic_halt_ipmi_heartbeat(void)
{
struct kernel_ipmi_msg msg;
struct ipmi_system_interface_addr addr;
- int rv;
/*
* Don't reset the timer if we have the timer turned off, that
@@ -497,24 +482,10 @@ static void panic_halt_ipmi_heartbeat(void)
msg.cmd = IPMI_WDOG_RESET_TIMER;
msg.data = NULL;
msg.data_len = 0;
- atomic_add(2, &panic_done_count);
- rv = ipmi_request_supply_msgs(watchdog_user,
- (struct ipmi_addr *) &addr,
- 0,
- &msg,
- NULL,
- &panic_halt_heartbeat_smi_msg,
- &panic_halt_heartbeat_recv_msg,
- 1);
- if (rv)
- atomic_sub(2, &panic_done_count);
+ ipmi_panic_request_and_wait(watchdog_user, (struct ipmi_addr *) &addr,
+ &msg);
}
-static struct ipmi_smi_msg panic_halt_smi_msg =
- INIT_IPMI_SMI_MSG(panic_smi_free);
-static struct ipmi_recv_msg panic_halt_recv_msg =
- INIT_IPMI_RECV_MSG(panic_recv_free);
-
/*
* Special call, doesn't claim any locks. This is only to be called
* at panic or halt time, in run-to-completion mode, when the caller
@@ -526,22 +497,13 @@ static void panic_halt_ipmi_set_timeout(void)
int send_heartbeat_now;
int rv;
- /* Wait for the messages to be free. */
- while (atomic_read(&panic_done_count) != 0)
- ipmi_poll_interface(watchdog_user);
- atomic_add(2, &panic_done_count);
- rv = __ipmi_set_timeout(&panic_halt_smi_msg,
- &panic_halt_recv_msg,
- &send_heartbeat_now);
+ rv = __ipmi_set_timeout(NULL, NULL, &send_heartbeat_now);
if (rv) {
- atomic_sub(2, &panic_done_count);
pr_warn("Unable to extend the watchdog timeout\n");
} else {
if (send_heartbeat_now)
panic_halt_ipmi_heartbeat();
}
- while (atomic_read(&panic_done_count) != 0)
- ipmi_poll_interface(watchdog_user);
}
static int __ipmi_heartbeat(void)
@@ -793,7 +755,7 @@ static ssize_t ipmi_read(struct file *file,
* Reading returns if the pretimeout has gone off, and it only does
* it once per pretimeout.
*/
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
if (!data_to_read) {
if (file->f_flags & O_NONBLOCK) {
rv = -EAGAIN;
@@ -804,9 +766,9 @@ static ssize_t ipmi_read(struct file *file,
add_wait_queue(&read_q, &wait);
while (!data_to_read && !signal_pending(current)) {
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
schedule();
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
}
remove_wait_queue(&read_q, &wait);
@@ -818,7 +780,7 @@ static ssize_t ipmi_read(struct file *file,
data_to_read = 0;
out:
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
if (rv == 0) {
if (copy_to_user(buf, &data_to_read, 1))
@@ -856,10 +818,10 @@ static __poll_t ipmi_poll(struct file *file, poll_table *wait)
poll_wait(file, &read_q, wait);
- spin_lock_irq(&ipmi_read_lock);
+ mutex_lock(&ipmi_read_mutex);
if (data_to_read)
mask |= (EPOLLIN | EPOLLRDNORM);
- spin_unlock_irq(&ipmi_read_lock);
+ mutex_unlock(&ipmi_read_mutex);
return mask;
}
@@ -932,13 +894,11 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data)
if (atomic_inc_and_test(&preop_panic_excl))
panic("Watchdog pre-timeout");
} else if (preop_val == WDOG_PREOP_GIVE_DATA) {
- unsigned long flags;
-
- spin_lock_irqsave(&ipmi_read_lock, flags);
+ mutex_lock(&ipmi_read_mutex);
data_to_read = 1;
wake_up_interruptible(&read_q);
kill_fasync(&fasync_q, SIGIO, POLL_IN);
- spin_unlock_irqrestore(&ipmi_read_lock, flags);
+ mutex_unlock(&ipmi_read_mutex);
}
}
diff --git a/drivers/char/ipmi/ssif_bmc.c b/drivers/char/ipmi/ssif_bmc.c
index e4bd74585d4d..7a52e3ea49ed 100644
--- a/drivers/char/ipmi/ssif_bmc.c
+++ b/drivers/char/ipmi/ssif_bmc.c
@@ -297,7 +297,8 @@ static void complete_response(struct ssif_bmc_ctx *ssif_bmc)
static void response_timeout(struct timer_list *t)
{
- struct ssif_bmc_ctx *ssif_bmc = from_timer(ssif_bmc, t, response_timer);
+ struct ssif_bmc_ctx *ssif_bmc = timer_container_of(ssif_bmc, t,
+ response_timer);
unsigned long flags;
spin_lock_irqsave(&ssif_bmc->lock, flags);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 169eed162a7f..48839958b0b1 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -61,29 +61,11 @@ static inline int page_is_allowed(unsigned long pfn)
{
return devmem_is_allowed(pfn);
}
-static inline int range_is_allowed(unsigned long pfn, unsigned long size)
-{
- u64 from = ((u64)pfn) << PAGE_SHIFT;
- u64 to = from + size;
- u64 cursor = from;
-
- while (cursor < to) {
- if (!devmem_is_allowed(pfn))
- return 0;
- cursor += PAGE_SIZE;
- pfn++;
- }
- return 1;
-}
#else
static inline int page_is_allowed(unsigned long pfn)
{
return 1;
}
-static inline int range_is_allowed(unsigned long pfn, unsigned long size)
-{
- return 1;
-}
#endif
static inline bool should_stop_iteration(void)
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index dda466f9181a..d5accc10a110 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -58,9 +58,8 @@ static LIST_HEAD(misc_list);
static DEFINE_MUTEX(misc_mtx);
/*
- * Assigned numbers, used for dynamic minors
+ * Assigned numbers.
*/
-#define DYNAMIC_MINORS 128 /* like dynamic majors */
static DEFINE_IDA(misc_minors_ida);
static int misc_minor_alloc(int minor)
@@ -69,34 +68,17 @@ static int misc_minor_alloc(int minor)
if (minor == MISC_DYNAMIC_MINOR) {
/* allocate free id */
- ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL);
- if (ret >= 0) {
- ret = DYNAMIC_MINORS - ret - 1;
- } else {
- ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1,
- MINORMASK, GFP_KERNEL);
- }
+ ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1,
+ MINORMASK, GFP_KERNEL);
} else {
- /* specific minor, check if it is in dynamic or misc dynamic range */
- if (minor < DYNAMIC_MINORS) {
- minor = DYNAMIC_MINORS - minor - 1;
- ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL);
- } else if (minor > MISC_DYNAMIC_MINOR) {
- ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL);
- } else {
- /* case of non-dynamic minors, no need to allocate id */
- ret = 0;
- }
+ ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL);
}
return ret;
}
static void misc_minor_free(int minor)
{
- if (minor < DYNAMIC_MINORS)
- ida_free(&misc_minors_ida, DYNAMIC_MINORS - minor - 1);
- else if (minor > MISC_DYNAMIC_MINOR)
- ida_free(&misc_minors_ida, minor);
+ ida_free(&misc_minors_ida, minor);
}
#ifdef CONFIG_PROC_FS
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 38f2fab29c56..b8b24b6ed3fe 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -309,11 +309,11 @@ static void crng_reseed(struct work_struct *work)
* key value, at index 4, so the state should always be zeroed out
* immediately after using in order to maintain forward secrecy.
* If the state cannot be erased in a timely manner, then it is
- * safer to set the random_data parameter to &chacha_state[4] so
- * that this function overwrites it before returning.
+ * safer to set the random_data parameter to &chacha_state->x[4]
+ * so that this function overwrites it before returning.
*/
static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
- u32 chacha_state[CHACHA_STATE_WORDS],
+ struct chacha_state *chacha_state,
u8 *random_data, size_t random_data_len)
{
u8 first_block[CHACHA_BLOCK_SIZE];
@@ -321,8 +321,8 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
BUG_ON(random_data_len > 32);
chacha_init_consts(chacha_state);
- memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
- memset(&chacha_state[12], 0, sizeof(u32) * 4);
+ memcpy(&chacha_state->x[4], key, CHACHA_KEY_SIZE);
+ memset(&chacha_state->x[12], 0, sizeof(u32) * 4);
chacha20_block(chacha_state, first_block);
memcpy(key, first_block, CHACHA_KEY_SIZE);
@@ -335,7 +335,7 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
* random data. It also returns up to 32 bytes on its own of random data
* that may be used; random_data_len may not be greater than 32.
*/
-static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
+static void crng_make_state(struct chacha_state *chacha_state,
u8 *random_data, size_t random_data_len)
{
unsigned long flags;
@@ -395,7 +395,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
static void _get_random_bytes(void *buf, size_t len)
{
- u32 chacha_state[CHACHA_STATE_WORDS];
+ struct chacha_state chacha_state;
u8 tmp[CHACHA_BLOCK_SIZE];
size_t first_block_len;
@@ -403,26 +403,26 @@ static void _get_random_bytes(void *buf, size_t len)
return;
first_block_len = min_t(size_t, 32, len);
- crng_make_state(chacha_state, buf, first_block_len);
+ crng_make_state(&chacha_state, buf, first_block_len);
len -= first_block_len;
buf += first_block_len;
while (len) {
if (len < CHACHA_BLOCK_SIZE) {
- chacha20_block(chacha_state, tmp);
+ chacha20_block(&chacha_state, tmp);
memcpy(buf, tmp, len);
memzero_explicit(tmp, sizeof(tmp));
break;
}
- chacha20_block(chacha_state, buf);
- if (unlikely(chacha_state[12] == 0))
- ++chacha_state[13];
+ chacha20_block(&chacha_state, buf);
+ if (unlikely(chacha_state.x[12] == 0))
+ ++chacha_state.x[13];
len -= CHACHA_BLOCK_SIZE;
buf += CHACHA_BLOCK_SIZE;
}
- memzero_explicit(chacha_state, sizeof(chacha_state));
+ chacha_zeroize_state(&chacha_state);
}
/*
@@ -441,7 +441,7 @@ EXPORT_SYMBOL(get_random_bytes);
static ssize_t get_random_bytes_user(struct iov_iter *iter)
{
- u32 chacha_state[CHACHA_STATE_WORDS];
+ struct chacha_state chacha_state;
u8 block[CHACHA_BLOCK_SIZE];
size_t ret = 0, copied;
@@ -453,21 +453,22 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter)
* bytes, in case userspace causes copy_to_iter() below to sleep
* forever, so that we still retain forward secrecy in that case.
*/
- crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
+ crng_make_state(&chacha_state, (u8 *)&chacha_state.x[4],
+ CHACHA_KEY_SIZE);
/*
* However, if we're doing a read of len <= 32, we don't need to
* use chacha_state after, so we can simply return those bytes to
* the user directly.
*/
if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
- ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
+ ret = copy_to_iter(&chacha_state.x[4], CHACHA_KEY_SIZE, iter);
goto out_zero_chacha;
}
for (;;) {
- chacha20_block(chacha_state, block);
- if (unlikely(chacha_state[12] == 0))
- ++chacha_state[13];
+ chacha20_block(&chacha_state, block);
+ if (unlikely(chacha_state.x[12] == 0))
+ ++chacha_state.x[13];
copied = copy_to_iter(block, sizeof(block), iter);
ret += copied;
@@ -484,7 +485,7 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter)
memzero_explicit(block, sizeof(block));
out_zero_chacha:
- memzero_explicit(chacha_state, sizeof(chacha_state));
+ chacha_zeroize_state(&chacha_state);
return ret ? ret : -EFAULT;
}
@@ -726,6 +727,7 @@ static void __cold _credit_init_bits(size_t bits)
static DECLARE_WORK(set_ready, crng_set_ready);
unsigned int new, orig, add;
unsigned long flags;
+ int m;
if (!bits)
return;
@@ -748,9 +750,9 @@ static void __cold _credit_init_bits(size_t bits)
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
pr_notice("crng init done\n");
- if (urandom_warning.missed)
- pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
- urandom_warning.missed);
+ m = ratelimit_state_get_miss(&urandom_warning);
+ if (m)
+ pr_notice("%d urandom warning(s) missed due to ratelimiting\n", m);
} else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
spin_lock_irqsave(&base_crng.lock, flags);
/* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
@@ -1311,9 +1313,9 @@ static void __cold try_to_generate_entropy(void)
while (!crng_ready() && !signal_pending(current)) {
/*
* Check !timer_pending() and then ensure that any previous callback has finished
- * executing by checking try_to_del_timer_sync(), before queueing the next one.
+ * executing by checking timer_delete_sync_try(), before queueing the next one.
*/
- if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) {
+ if (!timer_pending(&stack->timer) && timer_delete_sync_try(&stack->timer) >= 0) {
struct cpumask timer_cpus;
unsigned int num_cpus;
@@ -1353,7 +1355,7 @@ static void __cold try_to_generate_entropy(void)
mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
timer_delete_sync(&stack->timer);
- destroy_timer_on_stack(&stack->timer);
+ timer_destroy_on_stack(&stack->timer);
}
@@ -1466,7 +1468,7 @@ static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
if (!crng_ready()) {
if (!ratelimit_disable && maxwarn <= 0)
- ++urandom_warning.missed;
+ ratelimit_state_inc_miss(&urandom_warning);
else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
--maxwarn;
pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index fe4f3a609934..dddd702b2454 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -234,5 +234,15 @@ config TCG_FTPM_TEE
help
This driver proxies for firmware TPM running in TEE.
+config TCG_SVSM
+ tristate "SNP SVSM vTPM interface"
+ depends on AMD_MEM_ENCRYPT
+ help
+ This is a driver for the AMD SVSM vTPM protocol that a SEV-SNP guest
+ OS can use to discover and talk to a vTPM emulated by the Secure VM
+ Service Module (SVSM) in the guest context, but at a more privileged
+ level (usually VMPL0). To compile this driver as a module, choose M
+ here; the module will be called tpm_svsm.
+
source "drivers/char/tpm/st33zp24/Kconfig"
endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 2b004df8c04b..9de1b3ea34a9 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -45,3 +45,4 @@ obj-$(CONFIG_TCG_CRB) += tpm_crb.o
obj-$(CONFIG_TCG_ARM_CRB_FFA) += tpm_crb_ffa.o
obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o
+obj-$(CONFIG_TCG_SVSM) += tpm_svsm.o
diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c
index 12ee42a31c71..e7913b2853d5 100644
--- a/drivers/char/tpm/eventlog/tpm1.c
+++ b/drivers/char/tpm/eventlog/tpm1.c
@@ -257,11 +257,8 @@ static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v)
(unsigned char *)(v + sizeof(struct tcpa_event));
eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
- if (!eventname) {
- printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
- __func__);
- return -EFAULT;
- }
+ if (!eventname)
+ return -ENOMEM;
/* 1st: PCR */
seq_printf(m, "%2d ", do_endian_conversion(event->pcr_index));
diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c
index e49a19fea3bd..dc882fc9fa9e 100644
--- a/drivers/char/tpm/tpm-buf.c
+++ b/drivers/char/tpm/tpm-buf.c
@@ -201,7 +201,7 @@ static void tpm_buf_read(struct tpm_buf *buf, off_t *offset, size_t count, void
*/
u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset)
{
- u8 value;
+ u8 value = 0;
tpm_buf_read(buf, offset, sizeof(value), &value);
@@ -218,7 +218,7 @@ EXPORT_SYMBOL_GPL(tpm_buf_read_u8);
*/
u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset)
{
- u16 value;
+ u16 value = 0;
tpm_buf_read(buf, offset, sizeof(value), &value);
@@ -235,7 +235,7 @@ EXPORT_SYMBOL_GPL(tpm_buf_read_u16);
*/
u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset)
{
- u32 value;
+ u32 value = 0;
tpm_buf_read(buf, offset, sizeof(value), &value);
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 11deaf538e87..f2a5e09257dd 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -91,7 +91,7 @@ out:
static void user_reader_timeout(struct timer_list *t)
{
- struct file_priv *priv = from_timer(priv, t, user_read_timer);
+ struct file_priv *priv = timer_container_of(priv, t, user_read_timer);
pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
task_tgid_nr(current));
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index 3f89635ba5e8..7b5049b3d476 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -40,11 +40,6 @@
*
* These are the usage functions:
*
- * tpm2_start_auth_session() which allocates the opaque auth structure
- * and gets a session from the TPM. This must be called before
- * any of the following functions. The session is protected by a
- * session_key which is derived from a random salt value
- * encrypted to the NULL seed.
* tpm2_end_auth_session() kills the session and frees the resources.
* Under normal operation this function is done by
* tpm_buf_check_hmac_response(), so this is only to be used on
@@ -963,16 +958,13 @@ err:
}
/**
- * tpm2_start_auth_session() - create a HMAC authentication session with the TPM
- * @chip: the TPM chip structure to create the session with
+ * tpm2_start_auth_session() - Create an a HMAC authentication session
+ * @chip: A TPM chip
*
- * This function loads the NULL seed from its saved context and starts
- * an authentication session on the null seed, fills in the
- * @chip->auth structure to contain all the session details necessary
- * for performing the HMAC, encrypt and decrypt operations and
- * returns. The NULL seed is flushed before this function returns.
+ * Loads the ephemeral key (null seed), and starts an HMAC authenticated
+ * session. The null seed is flushed before the return.
*
- * Return: zero on success or actual error encountered.
+ * Returns zero on success, or a POSIX error code.
*/
int tpm2_start_auth_session(struct tpm_chip *chip)
{
@@ -1024,7 +1016,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
/* hash algorithm for session */
tpm_buf_append_u16(&buf, TPM_ALG_SHA256);
- rc = tpm_transmit_cmd(chip, &buf, 0, "start auth session");
+ rc = tpm_ret_to_err(tpm_transmit_cmd(chip, &buf, 0, "StartAuthSession"));
tpm2_flush_context(chip, null_key);
if (rc == TPM2_RC_SUCCESS)
diff --git a/drivers/char/tpm/tpm_crb_ffa.c b/drivers/char/tpm/tpm_crb_ffa.c
index 3169a87a56b6..4ead61f01299 100644
--- a/drivers/char/tpm/tpm_crb_ffa.c
+++ b/drivers/char/tpm/tpm_crb_ffa.c
@@ -38,9 +38,11 @@
* messages.
*
* All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP
- * are using the AArch32 SMC calling convention with register usage as
- * defined in FF-A specification:
- * w0: Function ID (0x8400006F or 0x84000070)
+ * are using the AArch32 or AArch64 SMC calling convention with register usage
+ * as defined in FF-A specification:
+ * w0: Function ID
+ * -for 32-bit: 0x8400006F or 0x84000070
+ * -for 64-bit: 0xC400006F or 0xC4000070
* w1: Source/Destination IDs
* w2: Reserved (MBZ)
* w3-w7: Implementation defined, free to be used below
@@ -68,7 +70,8 @@
#define CRB_FFA_GET_INTERFACE_VERSION 0x0f000001
/*
- * Return information on a given feature of the TPM service
+ * Notifies the TPM service that a TPM command or TPM locality request is
+ * ready to be processed, and allows the TPM service to process it.
* Call register usage:
* w3: Not used (MBZ)
* w4: TPM service function ID, CRB_FFA_START
@@ -105,7 +108,10 @@ struct tpm_crb_ffa {
u16 minor_version;
/* lock to protect sending of FF-A messages: */
struct mutex msg_data_lock;
- struct ffa_send_direct_data direct_msg_data;
+ union {
+ struct ffa_send_direct_data direct_msg_data;
+ struct ffa_send_direct_data2 direct_msg_data2;
+ };
};
static struct tpm_crb_ffa *tpm_crb_ffa;
@@ -185,18 +191,34 @@ static int __tpm_crb_ffa_send_recieve(unsigned long func_id,
msg_ops = tpm_crb_ffa->ffa_dev->ops->msg_ops;
- memset(&tpm_crb_ffa->direct_msg_data, 0x00,
- sizeof(struct ffa_send_direct_data));
-
- tpm_crb_ffa->direct_msg_data.data1 = func_id;
- tpm_crb_ffa->direct_msg_data.data2 = a0;
- tpm_crb_ffa->direct_msg_data.data3 = a1;
- tpm_crb_ffa->direct_msg_data.data4 = a2;
+ if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) {
+ memset(&tpm_crb_ffa->direct_msg_data2, 0x00,
+ sizeof(struct ffa_send_direct_data2));
+
+ tpm_crb_ffa->direct_msg_data2.data[0] = func_id;
+ tpm_crb_ffa->direct_msg_data2.data[1] = a0;
+ tpm_crb_ffa->direct_msg_data2.data[2] = a1;
+ tpm_crb_ffa->direct_msg_data2.data[3] = a2;
+
+ ret = msg_ops->sync_send_receive2(tpm_crb_ffa->ffa_dev,
+ &tpm_crb_ffa->direct_msg_data2);
+ if (!ret)
+ ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data2.data[0]);
+ } else {
+ memset(&tpm_crb_ffa->direct_msg_data, 0x00,
+ sizeof(struct ffa_send_direct_data));
+
+ tpm_crb_ffa->direct_msg_data.data1 = func_id;
+ tpm_crb_ffa->direct_msg_data.data2 = a0;
+ tpm_crb_ffa->direct_msg_data.data3 = a1;
+ tpm_crb_ffa->direct_msg_data.data4 = a2;
+
+ ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev,
+ &tpm_crb_ffa->direct_msg_data);
+ if (!ret)
+ ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1);
+ }
- ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev,
- &tpm_crb_ffa->direct_msg_data);
- if (!ret)
- ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1);
return ret;
}
@@ -231,8 +253,13 @@ int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor)
rc = __tpm_crb_ffa_send_recieve(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00);
if (!rc) {
- *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
- *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) {
+ *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]);
+ *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]);
+ } else {
+ *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ }
}
return rc;
@@ -277,8 +304,9 @@ static int tpm_crb_ffa_probe(struct ffa_device *ffa_dev)
tpm_crb_ffa = ERR_PTR(-ENODEV); // set tpm_crb_ffa so we can detect probe failure
- if (!ffa_partition_supports_direct_recv(ffa_dev)) {
- pr_err("TPM partition doesn't support direct message receive.\n");
+ if (!ffa_partition_supports_direct_recv(ffa_dev) &&
+ !ffa_partition_supports_direct_req2_recv(ffa_dev)) {
+ dev_warn(&ffa_dev->dev, "partition doesn't support direct message receive.\n");
return -EINVAL;
}
@@ -299,17 +327,17 @@ static int tpm_crb_ffa_probe(struct ffa_device *ffa_dev)
rc = tpm_crb_ffa_get_interface_version(&tpm_crb_ffa->major_version,
&tpm_crb_ffa->minor_version);
if (rc) {
- pr_err("failed to get crb interface version. rc:%d", rc);
+ dev_err(&ffa_dev->dev, "failed to get crb interface version. rc:%d\n", rc);
goto out;
}
- pr_info("ABI version %u.%u", tpm_crb_ffa->major_version,
+ dev_info(&ffa_dev->dev, "ABI version %u.%u\n", tpm_crb_ffa->major_version,
tpm_crb_ffa->minor_version);
if (tpm_crb_ffa->major_version != CRB_FFA_VERSION_MAJOR ||
(tpm_crb_ffa->minor_version > 0 &&
tpm_crb_ffa->minor_version < CRB_FFA_VERSION_MINOR)) {
- pr_err("Incompatible ABI version");
+ dev_warn(&ffa_dev->dev, "Incompatible ABI version\n");
goto out;
}
diff --git a/drivers/char/tpm/tpm_svsm.c b/drivers/char/tpm/tpm_svsm.c
new file mode 100644
index 000000000000..4280edf427d6
--- /dev/null
+++ b/drivers/char/tpm/tpm_svsm.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ *
+ * Driver for the vTPM defined by the AMD SVSM spec [1].
+ *
+ * The specification defines a protocol that a SEV-SNP guest OS can use to
+ * discover and talk to a vTPM emulated by the Secure VM Service Module (SVSM)
+ * in the guest context, but at a more privileged level (usually VMPL0).
+ *
+ * [1] "Secure VM Service Module for SEV-SNP Guests"
+ * Publication # 58019 Revision: 1.00
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/tpm_svsm.h>
+
+#include <asm/sev.h>
+
+#include "tpm.h"
+
+struct tpm_svsm_priv {
+ void *buffer;
+};
+
+static int tpm_svsm_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev);
+ int ret;
+
+ ret = svsm_vtpm_cmd_request_fill(priv->buffer, 0, buf, len);
+ if (ret)
+ return ret;
+
+ /*
+ * The SVSM call uses the same buffer for the command and for the
+ * response, so after this call, the buffer will contain the response
+ * that can be used by .recv() op.
+ */
+ return snp_svsm_vtpm_send_command(priv->buffer);
+}
+
+static int tpm_svsm_recv(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev);
+
+ /*
+ * The internal buffer contains the response after we send the command
+ * to SVSM.
+ */
+ return svsm_vtpm_cmd_response_parse(priv->buffer, buf, len);
+}
+
+static struct tpm_class_ops tpm_chip_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .recv = tpm_svsm_recv,
+ .send = tpm_svsm_send,
+};
+
+static int __init tpm_svsm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tpm_svsm_priv *priv;
+ struct tpm_chip *chip;
+ int err;
+
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * The maximum buffer supported is one page (see SVSM_VTPM_MAX_BUFFER
+ * in tpm_svsm.h).
+ */
+ priv->buffer = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
+ if (!priv->buffer)
+ return -ENOMEM;
+
+ chip = tpmm_chip_alloc(dev, &tpm_chip_ops);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ dev_set_drvdata(&chip->dev, priv);
+
+ err = tpm2_probe(chip);
+ if (err)
+ return err;
+
+ err = tpm_chip_register(chip);
+ if (err)
+ return err;
+
+ dev_info(dev, "SNP SVSM vTPM %s device\n",
+ (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2");
+
+ return 0;
+}
+
+static void __exit tpm_svsm_remove(struct platform_device *pdev)
+{
+ struct tpm_chip *chip = platform_get_drvdata(pdev);
+
+ tpm_chip_unregister(chip);
+}
+
+/*
+ * tpm_svsm_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound
+ * at runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver tpm_svsm_driver __refdata = {
+ .remove = __exit_p(tpm_svsm_remove),
+ .driver = {
+ .name = "tpm-svsm",
+ },
+};
+
+module_platform_driver_probe(tpm_svsm_driver, tpm_svsm_probe);
+
+MODULE_DESCRIPTION("SNP SVSM vTPM Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:tpm-svsm");
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 970d02c337c7..6c3aa480396b 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -54,7 +54,7 @@ enum tis_int_flags {
enum tis_defaults {
TIS_MEM_LEN = 0x5000,
TIS_SHORT_TIMEOUT = 750, /* ms */
- TIS_LONG_TIMEOUT = 2000, /* 2 sec */
+ TIS_LONG_TIMEOUT = 4000, /* 4 secs */
TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */
TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */
};
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index 11b7c4749274..efb1ae834265 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -1184,8 +1184,7 @@ static int xillybus_flush(struct file *filp, fl_owner_t id)
static void xillybus_autoflush(struct work_struct *work)
{
- struct delayed_work *workitem = container_of(
- work, struct delayed_work, work);
+ struct delayed_work *workitem = to_delayed_work(work);
struct xilly_channel *channel = container_of(
workitem, struct xilly_channel, rd_workitem);
int rc;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 713573b6c86c..19c1ed280fd7 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -517,6 +517,7 @@ source "drivers/clk/samsung/Kconfig"
source "drivers/clk/sifive/Kconfig"
source "drivers/clk/socfpga/Kconfig"
source "drivers/clk/sophgo/Kconfig"
+source "drivers/clk/spacemit/Kconfig"
source "drivers/clk/sprd/Kconfig"
source "drivers/clk/starfive/Kconfig"
source "drivers/clk/sunxi/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index bf4bd45adc3a..42867cd37c33 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -145,6 +145,7 @@ obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
obj-$(CONFIG_CLK_SIFIVE) += sifive/
obj-y += socfpga/
obj-y += sophgo/
+obj-y += spacemit/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-y += sprd/
obj-$(CONFIG_ARCH_STI) += st/
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index 2b0ea882f1e4..0171e6b2bfca 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -53,24 +53,6 @@ static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
}
-/*
- * Build a scaled divider value as close as possible to the
- * given whole part (div_value) and fractional part (expressed
- * in billionths).
- */
-u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
-{
- u64 combined;
-
- BUG_ON(!div_value);
- BUG_ON(billionths >= BILLION);
-
- combined = (u64)div_value * BILLION + billionths;
- combined <<= div->u.s.frac_width;
-
- return DIV_ROUND_CLOSEST_ULL(combined, BILLION);
-}
-
/* The scaled minimum divisor representable by a divider */
static inline u64
scaled_div_min(struct bcm_clk_div *div)
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
index e09655024ac2..348a3454ce40 100644
--- a/drivers/clk/bcm/clk-kona.h
+++ b/drivers/clk/bcm/clk-kona.h
@@ -492,8 +492,6 @@ extern struct clk_ops kona_peri_clk_ops;
/* Externally visible functions */
extern u64 scaled_div_max(struct bcm_clk_div *div);
-extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value,
- u32 billionths);
extern void __init kona_dt_ccu_setup(struct ccu_data *ccu,
struct device_node *node);
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 0e1fe3759530..8e4fde03ed23 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -286,6 +286,8 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
init.name = devm_kasprintf(rpi->dev, GFP_KERNEL,
"fw-clk-%s",
rpi_firmware_clk_names[id]);
+ if (!init.name)
+ return ERR_PTR(-ENOMEM);
init.ops = &raspberrypi_firmware_clk_ops;
init.flags = CLK_GET_RATE_NOCACHE;
@@ -480,4 +482,3 @@ module_platform_driver(raspberrypi_clk_driver);
MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de>");
MODULE_DESCRIPTION("Raspberry Pi firmware clock driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:raspberrypi-clk");
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 014db6386624..8ddf3a9a53df 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -137,6 +137,8 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
+ clk_data->num = S2MPS11_CLKS_NUM;
+
switch (hwid) {
case S2MPS11X:
s2mps11_reg = S2MPS11_REG_RTC_CTRL;
@@ -186,7 +188,6 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
clk_data->hws[i] = &s2mps11_clks[i].hw;
}
- clk_data->num = S2MPS11_CLKS_NUM;
of_clk_add_hw_provider(s2mps11_clks->clk_np, of_clk_hw_onecell_get,
clk_data);
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 6807a2efa93b..bfb6bbdc036c 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -763,13 +763,14 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
return PTR_ERR(clk);
}
- child = of_get_child_by_name(node, "pllout");
- if (of_device_is_available(child))
+ child = of_get_available_child_by_name(node, "pllout");
+ if (child) {
of_clk_add_provider(child, of_clk_src_simple_get, clk);
- of_node_put(child);
+ of_node_put(child);
+ }
- child = of_get_child_by_name(node, "sysclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "sysclk");
+ if (child) {
struct clk_onecell_data *clk_data;
struct clk **clks;
int n_clks = max_sysclk_id + 1;
@@ -803,11 +804,11 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
clks[(*div_info)->id] = clk;
}
of_clk_add_provider(child, of_clk_src_onecell_get, clk_data);
+ of_node_put(child);
}
- of_node_put(child);
- child = of_get_child_by_name(node, "auxclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "auxclk");
+ if (child) {
char child_name[MAX_NAME_SIZE];
snprintf(child_name, MAX_NAME_SIZE, "%s_auxclk", info->name);
@@ -818,11 +819,12 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
child_name, PTR_ERR(clk));
else
of_clk_add_provider(child, of_clk_src_simple_get, clk);
+
+ of_node_put(child);
}
- of_node_put(child);
- child = of_get_child_by_name(node, "obsclk");
- if (of_device_is_available(child)) {
+ child = of_get_available_child_by_name(node, "obsclk");
+ if (child) {
if (obsclk_info)
clk = davinci_pll_obsclk_register(dev, obsclk_info, base);
else
@@ -833,8 +835,8 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
PTR_ERR(clk));
else
of_clk_add_provider(child, of_clk_src_simple_get, clk);
+ of_node_put(child);
}
- of_node_put(child);
return 0;
}
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index be2e3a5f8336..ff003dc5ab20 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -55,7 +55,7 @@ config COMMON_CLK_MESON_CPU_DYNDIV
config COMMON_CLK_MESON8B
bool "Meson8 SoC Clock controller support"
depends on ARM
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_MPLL
@@ -70,7 +70,7 @@ config COMMON_CLK_MESON8B
config COMMON_CLK_GXBB
tristate "GXBB and GXL SoC clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_VID_PLL_DIV
@@ -86,7 +86,7 @@ config COMMON_CLK_GXBB
config COMMON_CLK_AXG
tristate "AXG SoC clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
@@ -136,7 +136,7 @@ config COMMON_CLK_A1_PERIPHERALS
config COMMON_CLK_C3_PLL
tristate "Amlogic C3 PLL clock controller"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_CLKC_UTILS
@@ -149,7 +149,7 @@ config COMMON_CLK_C3_PLL
config COMMON_CLK_C3_PERIPHERALS
tristate "Amlogic C3 peripherals clock controller"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_CLKC_UTILS
@@ -163,7 +163,7 @@ config COMMON_CLK_C3_PERIPHERALS
config COMMON_CLK_G12A
tristate "G12 and SM1 SoC clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
@@ -181,7 +181,7 @@ config COMMON_CLK_G12A
config COMMON_CLK_S4_PLL
tristate "S4 SoC PLL clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
@@ -194,7 +194,7 @@ config COMMON_CLK_S4_PLL
config COMMON_CLK_S4_PERIPHERALS
tristate "S4 SoC peripherals clock controllers support"
depends on ARM64
- default y
+ default ARCH_MESON
select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index ceabebb1863d..d9e546e006d7 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -4093,6 +4093,7 @@ static const struct clk_parent_data spicc_sclk_parent_data[] = {
{ .hw = &g12a_clk81.hw },
{ .hw = &g12a_fclk_div4.hw },
{ .hw = &g12a_fclk_div3.hw },
+ { .hw = &g12a_fclk_div2.hw },
{ .hw = &g12a_fclk_div5.hw },
{ .hw = &g12a_fclk_div7.hw },
};
diff --git a/drivers/clk/qcom/apcs-sdx55.c b/drivers/clk/qcom/apcs-sdx55.c
index 76ece6c4a969..3ba01622d8f0 100644
--- a/drivers/clk/qcom/apcs-sdx55.c
+++ b/drivers/clk/qcom/apcs-sdx55.c
@@ -111,7 +111,11 @@ static int qcom_apcs_sdx55_clk_probe(struct platform_device *pdev)
* driver, there seems to be no better place to do this. So do it here!
*/
cpu_dev = get_cpu_device(0);
- dev_pm_domain_attach(cpu_dev, true);
+ ret = dev_pm_domain_attach(cpu_dev, true);
+ if (ret) {
+ dev_err_probe(dev, ret, "can't get PM domain: %d\n", ret);
+ goto err;
+ }
return 0;
diff --git a/drivers/clk/qcom/camcc-sa8775p.c b/drivers/clk/qcom/camcc-sa8775p.c
index 11bd2e234811..50e5a131261b 100644
--- a/drivers/clk/qcom/camcc-sa8775p.c
+++ b/drivers/clk/qcom/camcc-sa8775p.c
@@ -10,7 +10,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <dt-bindings/clock/qcom,sa8775p-camcc.h>
+#include <dt-bindings/clock/qcom,qcs8300-camcc.h>
#include "clk-alpha-pll.h"
#include "clk-branch.h"
@@ -1681,6 +1681,24 @@ static struct clk_branch cam_cc_sm_obs_clk = {
},
};
+static struct clk_branch cam_cc_titan_top_accu_shift_clk = {
+ .halt_reg = 0x131f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x131f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_titan_top_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc cam_cc_titan_top_gdsc = {
.gdscr = 0x131bc,
.en_rest_wait_val = 0x2,
@@ -1775,6 +1793,7 @@ static struct clk_regmap *cam_cc_sa8775p_clocks[] = {
[CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
[CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
[CAM_CC_SM_OBS_CLK] = &cam_cc_sm_obs_clk.clkr,
+ [CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK] = NULL,
[CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
[CAM_CC_QDSS_DEBUG_XO_CLK] = &cam_cc_qdss_debug_xo_clk.clkr,
};
@@ -1811,6 +1830,7 @@ static const struct qcom_cc_desc cam_cc_sa8775p_desc = {
};
static const struct of_device_id cam_cc_sa8775p_match_table[] = {
+ { .compatible = "qcom,qcs8300-camcc" },
{ .compatible = "qcom,sa8775p-camcc" },
{ }
};
@@ -1841,10 +1861,83 @@ static int cam_cc_sa8775p_probe(struct platform_device *pdev)
clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
- /* Keep some clocks always enabled */
- qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */
- qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */
- qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */
+ if (device_is_compatible(&pdev->dev, "qcom,qcs8300-camcc")) {
+ cam_cc_camnoc_axi_clk_src.cmd_rcgr = 0x13154;
+ cam_cc_camnoc_axi_clk.halt_reg = 0x1316c;
+ cam_cc_camnoc_axi_clk.clkr.enable_reg = 0x1316c;
+ cam_cc_camnoc_dcd_xo_clk.halt_reg = 0x13174;
+ cam_cc_camnoc_dcd_xo_clk.clkr.enable_reg = 0x13174;
+
+ cam_cc_csi0phytimer_clk_src.cmd_rcgr = 0x15054;
+ cam_cc_csi1phytimer_clk_src.cmd_rcgr = 0x15078;
+ cam_cc_csi2phytimer_clk_src.cmd_rcgr = 0x15098;
+ cam_cc_csid_clk_src.cmd_rcgr = 0x13134;
+
+ cam_cc_mclk0_clk_src.cmd_rcgr = 0x15000;
+ cam_cc_mclk1_clk_src.cmd_rcgr = 0x1501c;
+ cam_cc_mclk2_clk_src.cmd_rcgr = 0x15038;
+
+ cam_cc_fast_ahb_clk_src.cmd_rcgr = 0x13104;
+ cam_cc_slow_ahb_clk_src.cmd_rcgr = 0x1311c;
+ cam_cc_xo_clk_src.cmd_rcgr = 0x131b8;
+ cam_cc_sleep_clk_src.cmd_rcgr = 0x131d4;
+
+ cam_cc_core_ahb_clk.halt_reg = 0x131b4;
+ cam_cc_core_ahb_clk.clkr.enable_reg = 0x131b4;
+
+ cam_cc_cpas_ahb_clk.halt_reg = 0x130f4;
+ cam_cc_cpas_ahb_clk.clkr.enable_reg = 0x130f4;
+ cam_cc_cpas_fast_ahb_clk.halt_reg = 0x130fc;
+ cam_cc_cpas_fast_ahb_clk.clkr.enable_reg = 0x130fc;
+
+ cam_cc_csi0phytimer_clk.halt_reg = 0x1506c;
+ cam_cc_csi0phytimer_clk.clkr.enable_reg = 0x1506c;
+ cam_cc_csi1phytimer_clk.halt_reg = 0x15090;
+ cam_cc_csi1phytimer_clk.clkr.enable_reg = 0x15090;
+ cam_cc_csi2phytimer_clk.halt_reg = 0x150b0;
+ cam_cc_csi2phytimer_clk.clkr.enable_reg = 0x150b0;
+ cam_cc_csid_clk.halt_reg = 0x1314c;
+ cam_cc_csid_clk.clkr.enable_reg = 0x1314c;
+ cam_cc_csid_csiphy_rx_clk.halt_reg = 0x15074;
+ cam_cc_csid_csiphy_rx_clk.clkr.enable_reg = 0x15074;
+ cam_cc_csiphy0_clk.halt_reg = 0x15070;
+ cam_cc_csiphy0_clk.clkr.enable_reg = 0x15070;
+ cam_cc_csiphy1_clk.halt_reg = 0x15094;
+ cam_cc_csiphy1_clk.clkr.enable_reg = 0x15094;
+ cam_cc_csiphy2_clk.halt_reg = 0x150b4;
+ cam_cc_csiphy2_clk.clkr.enable_reg = 0x150b4;
+
+ cam_cc_mclk0_clk.halt_reg = 0x15018;
+ cam_cc_mclk0_clk.clkr.enable_reg = 0x15018;
+ cam_cc_mclk1_clk.halt_reg = 0x15034;
+ cam_cc_mclk1_clk.clkr.enable_reg = 0x15034;
+ cam_cc_mclk2_clk.halt_reg = 0x15050;
+ cam_cc_mclk2_clk.clkr.enable_reg = 0x15050;
+ cam_cc_qdss_debug_xo_clk.halt_reg = 0x1319c;
+ cam_cc_qdss_debug_xo_clk.clkr.enable_reg = 0x1319c;
+
+ cam_cc_titan_top_gdsc.gdscr = 0x131a0;
+
+ cam_cc_sa8775p_clocks[CAM_CC_CCI_3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CCI_3_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSI3PHYTIMER_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSI3PHYTIMER_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_CSIPHY3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_MCLK3_CLK] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_MCLK3_CLK_SRC] = NULL;
+ cam_cc_sa8775p_clocks[CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK] =
+ &cam_cc_titan_top_accu_shift_clk.clkr;
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x13178); /* CAM_CC_CAMNOC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131d0); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_SLEEP_CLK */
+ } else {
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */
+ }
ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sa8775p_desc, regmap);
diff --git a/drivers/clk/qcom/camcc-sm6350.c b/drivers/clk/qcom/camcc-sm6350.c
index 1871970fb046..8aac97d29ce3 100644
--- a/drivers/clk/qcom/camcc-sm6350.c
+++ b/drivers/clk/qcom/camcc-sm6350.c
@@ -1695,6 +1695,9 @@ static struct clk_branch camcc_sys_tmr_clk = {
static struct gdsc bps_gdsc = {
.gdscr = 0x6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "bps_gdsc",
},
@@ -1704,6 +1707,9 @@ static struct gdsc bps_gdsc = {
static struct gdsc ipe_0_gdsc = {
.gdscr = 0x7004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ipe_0_gdsc",
},
@@ -1713,6 +1719,9 @@ static struct gdsc ipe_0_gdsc = {
static struct gdsc ife_0_gdsc = {
.gdscr = 0x9004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ife_0_gdsc",
},
@@ -1721,6 +1730,9 @@ static struct gdsc ife_0_gdsc = {
static struct gdsc ife_1_gdsc = {
.gdscr = 0xa004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ife_1_gdsc",
},
@@ -1729,6 +1741,9 @@ static struct gdsc ife_1_gdsc = {
static struct gdsc ife_2_gdsc = {
.gdscr = 0xb004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ife_2_gdsc",
},
@@ -1737,6 +1752,9 @@ static struct gdsc ife_2_gdsc = {
static struct gdsc titan_top_gdsc = {
.gdscr = 0x14004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "titan_top_gdsc",
},
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index c7675930fde1..00fb3e53a388 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -66,6 +66,8 @@ struct clk_rpmh {
struct clk_rpmh_desc {
struct clk_hw **clks;
size_t num_clks;
+ /* RPMh clock clkaN are optional for this platform */
+ bool clka_optional;
};
static DEFINE_MUTEX(rpmh_clk_lock);
@@ -648,6 +650,7 @@ static struct clk_hw *sm8550_rpmh_clocks[] = {
static const struct clk_rpmh_desc clk_rpmh_sm8550 = {
.clks = sm8550_rpmh_clocks,
.num_clks = ARRAY_SIZE(sm8550_rpmh_clocks),
+ .clka_optional = true,
};
static struct clk_hw *sm8650_rpmh_clocks[] = {
@@ -679,6 +682,7 @@ static struct clk_hw *sm8650_rpmh_clocks[] = {
static const struct clk_rpmh_desc clk_rpmh_sm8650 = {
.clks = sm8650_rpmh_clocks,
.num_clks = ARRAY_SIZE(sm8650_rpmh_clocks),
+ .clka_optional = true,
};
static struct clk_hw *sc7280_rpmh_clocks[] = {
@@ -847,6 +851,7 @@ static struct clk_hw *sm8750_rpmh_clocks[] = {
static const struct clk_rpmh_desc clk_rpmh_sm8750 = {
.clks = sm8750_rpmh_clocks,
.num_clks = ARRAY_SIZE(sm8750_rpmh_clocks),
+ .clka_optional = true,
};
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
@@ -890,6 +895,12 @@ static int clk_rpmh_probe(struct platform_device *pdev)
rpmh_clk = to_clk_rpmh(hw_clks[i]);
res_addr = cmd_db_read_addr(rpmh_clk->res_name);
if (!res_addr) {
+ hw_clks[i] = NULL;
+
+ if (desc->clka_optional &&
+ !strncmp(rpmh_clk->res_name, "clka", sizeof("clka") - 1))
+ continue;
+
dev_err(&pdev->dev, "missing RPMh resource address for %s\n",
rpmh_clk->res_name);
return -ENODEV;
diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
index e703ecf00e44..b0bd163a449c 100644
--- a/drivers/clk/qcom/dispcc-sm6350.c
+++ b/drivers/clk/qcom/dispcc-sm6350.c
@@ -681,6 +681,9 @@ static struct clk_branch disp_cc_xo_clk = {
static struct gdsc mdss_gdsc = {
.gdscr = 0x1004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "mdss_gdsc",
},
diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
index 7431c9a65044..45193b3d714b 100644
--- a/drivers/clk/qcom/gcc-msm8939.c
+++ b/drivers/clk/qcom/gcc-msm8939.c
@@ -432,7 +432,7 @@ static const struct parent_map gcc_xo_gpll0_gpll1a_gpll6_sleep_map[] = {
{ P_XO, 0 },
{ P_GPLL0, 1 },
{ P_GPLL1_AUX, 2 },
- { P_GPLL6, 2 },
+ { P_GPLL6, 3 },
{ P_SLEEP_CLK, 6 },
};
@@ -1113,7 +1113,7 @@ static struct clk_rcg2 jpeg0_clk_src = {
};
static const struct freq_tbl ftbl_gcc_camss_mclk0_1_clk[] = {
- F(24000000, P_GPLL0, 1, 1, 45),
+ F(24000000, P_GPLL6, 1, 1, 45),
F(66670000, P_GPLL0, 12, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index 74346dc02606..a4d6dff9d0f7 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -2320,6 +2320,9 @@ static struct clk_branch gcc_video_xo_clk = {
static struct gdsc usb30_prim_gdsc = {
.gdscr = 0x1a004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "usb30_prim_gdsc",
},
@@ -2328,6 +2331,9 @@ static struct gdsc usb30_prim_gdsc = {
static struct gdsc ufs_phy_gdsc = {
.gdscr = 0x3a004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
.pd = {
.name = "ufs_phy_gdsc",
},
diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c
index fa1672c4e7d8..24f98062b9dd 100644
--- a/drivers/clk/qcom/gcc-sm8650.c
+++ b/drivers/clk/qcom/gcc-sm8650.c
@@ -3817,7 +3817,9 @@ static int gcc_sm8650_probe(struct platform_device *pdev)
qcom_branch_set_clk_en(regmap, 0x32004); /* GCC_VIDEO_AHB_CLK */
qcom_branch_set_clk_en(regmap, 0x32030); /* GCC_VIDEO_XO_CLK */
+ /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */
qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true);
/* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
regmap_write(regmap, 0x52150, 0x0);
diff --git a/drivers/clk/qcom/gcc-sm8750.c b/drivers/clk/qcom/gcc-sm8750.c
index b36d70976095..8092dd6b37b5 100644
--- a/drivers/clk/qcom/gcc-sm8750.c
+++ b/drivers/clk/qcom/gcc-sm8750.c
@@ -3244,8 +3244,9 @@ static int gcc_sm8750_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0x52010, BIT(20), BIT(20));
regmap_update_bits(regmap, 0x52010, BIT(21), BIT(21));
- /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */
qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true);
return qcom_cc_really_probe(&pdev->dev, &gcc_sm8750_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
index 009f39139b64..3e44757e25d3 100644
--- a/drivers/clk/qcom/gcc-x1e80100.c
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -6753,6 +6753,10 @@ static int gcc_x1e80100_probe(struct platform_device *pdev)
/* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
regmap_write(regmap, 0x52224, 0x0);
+ /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true);
+
return qcom_cc_really_probe(&pdev->dev, &gcc_x1e80100_desc, regmap);
}
diff --git a/drivers/clk/qcom/gpucc-sm6350.c b/drivers/clk/qcom/gpucc-sm6350.c
index 35ed0500bc59..ee89c42413f8 100644
--- a/drivers/clk/qcom/gpucc-sm6350.c
+++ b/drivers/clk/qcom/gpucc-sm6350.c
@@ -413,6 +413,9 @@ static struct clk_branch gpu_cc_gx_vsense_clk = {
static struct gdsc gpu_cx_gdsc = {
.gdscr = 0x106c,
.gds_hw_ctrl = 0x1540,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x8,
.pd = {
.name = "gpu_cx_gdsc",
},
@@ -423,6 +426,9 @@ static struct gdsc gpu_cx_gdsc = {
static struct gdsc gpu_gx_gdsc = {
.gdscr = 0x100c,
.clamp_io_ctrl = 0x1508,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
.pd = {
.name = "gpu_gx_gdsc",
.power_on = gdsc_gx_do_nothing_enable,
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 5a4bc3f94d49..50c20119d12a 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -41,6 +41,7 @@ config CLK_RENESAS
select CLK_R9A08G045 if ARCH_R9A08G045
select CLK_R9A09G011 if ARCH_R9A09G011
select CLK_R9A09G047 if ARCH_R9A09G047
+ select CLK_R9A09G056 if ARCH_R9A09G056
select CLK_R9A09G057 if ARCH_R9A09G057
select CLK_SH73A0 if ARCH_SH73A0
@@ -199,6 +200,10 @@ config CLK_R9A09G047
bool "RZ/G3E clock support" if COMPILE_TEST
select CLK_RZV2H
+config CLK_R9A09G056
+ bool "RZ/V2N clock support" if COMPILE_TEST
+ select CLK_RZV2H
+
config CLK_R9A09G057
bool "RZ/V2H(P) clock support" if COMPILE_TEST
select CLK_RZV2H
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 2d6e746939c4..f9075bca6e95 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_CLK_R9A07G054) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A08G045) += r9a08g045-cpg.o
obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o
obj-$(CONFIG_CLK_R9A09G047) += r9a09g047-cpg.o
+obj-$(CONFIG_CLK_R9A09G056) += r9a09g056-cpg.o
obj-$(CONFIG_CLK_R9A09G057) += r9a09g057-cpg.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
diff --git a/drivers/clk/renesas/r9a09g047-cpg.c b/drivers/clk/renesas/r9a09g047-cpg.c
index e9cf4342d0cf..21699999cedd 100644
--- a/drivers/clk/renesas/r9a09g047-cpg.c
+++ b/drivers/clk/renesas/r9a09g047-cpg.c
@@ -16,7 +16,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G047_IOTOP_0_SHCLK,
+ LAST_DT_CORE_CLK = R9A09G047_GBETH_1_CLK_PTP_REF_I,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -31,7 +31,14 @@ enum clk_ids {
CLK_PLLVDO,
/* Internal Core Clocks */
+ CLK_PLLCM33_DIV3,
+ CLK_PLLCM33_DIV4,
+ CLK_PLLCM33_DIV5,
CLK_PLLCM33_DIV16,
+ CLK_PLLCM33_GEAR,
+ CLK_SMUX2_XSPI_CLK0,
+ CLK_SMUX2_XSPI_CLK1,
+ CLK_PLLCM33_XSPI,
CLK_PLLCLN_DIV2,
CLK_PLLCLN_DIV8,
CLK_PLLCLN_DIV16,
@@ -41,6 +48,7 @@ enum clk_ids {
CLK_PLLDTY_ACPU_DIV4,
CLK_PLLDTY_DIV16,
CLK_PLLVDO_CRU0,
+ CLK_PLLVDO_GPU,
/* Module Clocks */
MOD_CLK_BASE,
@@ -60,6 +68,14 @@ static const struct clk_div_table dtable_2_4[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_16[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_2_64[] = {
{0, 2},
{1, 4},
@@ -69,6 +85,10 @@ static const struct clk_div_table dtable_2_64[] = {
{0, 0},
};
+/* Mux clock tables */
+static const char * const smux2_xspi_clk0[] = { ".pllcm33_div3", ".pllcm33_div4" };
+static const char * const smux2_xspi_clk1[] = { ".smux2_xspi_clk0", ".pllcm33_div5" };
+
static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
@@ -79,12 +99,21 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
- DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
/* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div3", CLK_PLLCM33_DIV3, CLK_PLLCM33, 1, 3),
+ DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
+ DEF_FIXED(".pllcm33_div5", CLK_PLLCM33_DIV5, CLK_PLLCM33, 1, 5),
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+ DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
+
+ DEF_SMUX(".smux2_xspi_clk0", CLK_SMUX2_XSPI_CLK0, SSEL1_SELCTL2, smux2_xspi_clk0),
+ DEF_SMUX(".smux2_xspi_clk1", CLK_SMUX2_XSPI_CLK1, SSEL1_SELCTL3, smux2_xspi_clk1),
+ DEF_CSDIV(".pllcm33_xspi", CLK_PLLCM33_XSPI, CLK_SMUX2_XSPI_CLK1, CSDIV0_DIVCTL3,
+ dtable_2_16),
DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
@@ -96,6 +125,7 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
+ DEF_DDIV(".pllvdo_gpu", CLK_PLLVDO_GPU, CLK_PLLVDO, CDDIV3_DIVCTL1, dtable_2_64),
/* Core Clocks */
DEF_FIXED("sys_0_pclk", R9A09G047_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
@@ -108,6 +138,7 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_DDIV("ca55_0_coreclk3", R9A09G047_CA55_0_CORECLK3, CLK_PLLCA55,
CDDIV1_DIVCTL3, dtable_1_8),
DEF_FIXED("iotop_0_shclk", R9A09G047_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+ DEF_FIXED("spi_clk_spi", R9A09G047_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2),
};
static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
@@ -153,6 +184,12 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
BUS_MSTOP(10, BIT(14))),
DEF_MOD("canfd_0_clkc", CLK_PLLCLN_DIV20, 9, 14, 4, 30,
BUS_MSTOP(10, BIT(14))),
+ DEF_MOD("spi_hclk", CLK_PLLCM33_GEAR, 9, 15, 4, 31,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_aclk", CLK_PLLCM33_GEAR, 10, 0, 5, 0,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD_NO_PM("spi_clk_spix2", CLK_PLLCM33_XSPI, 10, 1, 5, 2,
+ BUS_MSTOP(4, BIT(5))),
DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
BUS_MSTOP(8, BIT(2))),
DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
@@ -183,6 +220,12 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
BUS_MSTOP(9, BIT(4))),
DEF_MOD("cru_0_pclk", CLK_PLLDTY_DIV16, 13, 4, 6, 20,
BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("ge3d_clk", CLK_PLLVDO_GPU, 15, 0, 7, 16,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("ge3d_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("ge3d_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18,
+ BUS_MSTOP(3, BIT(4))),
DEF_MOD("tsu_1_pclk", CLK_QEXTAL, 16, 10, 8, 10,
BUS_MSTOP(2, BIT(15))),
};
@@ -207,12 +250,17 @@ static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
DEF_RST(10, 1, 4, 18), /* CANFD_0_RSTP_N */
DEF_RST(10, 2, 4, 19), /* CANFD_0_RSTC_N */
+ DEF_RST(10, 3, 4, 20), /* SPI_HRESETN */
+ DEF_RST(10, 4, 4, 21), /* SPI_ARESETN */
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
+ DEF_RST(13, 13, 6, 14), /* GE3D_RESETN */
+ DEF_RST(13, 14, 6, 15), /* GE3D_AXI_RESETN */
+ DEF_RST(13, 15, 6, 16), /* GE3D_ACE_RESETN */
DEF_RST(15, 8, 7, 9), /* TSU_1_PRESETN */
};
diff --git a/drivers/clk/renesas/r9a09g056-cpg.c b/drivers/clk/renesas/r9a09g056-cpg.c
new file mode 100644
index 000000000000..e2712a25c43a
--- /dev/null
+++ b/drivers/clk/renesas/r9a09g056-cpg.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2N CPG driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/renesas,r9a09g056-cpg.h>
+
+#include "rzv2h-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R9A09G056_GBETH_1_CLK_PTP_REF_I,
+
+ /* External Input Clocks */
+ CLK_AUDIO_EXTAL,
+ CLK_RTXIN,
+ CLK_QEXTAL,
+
+ /* PLL Clocks */
+ CLK_PLLCM33,
+ CLK_PLLCLN,
+ CLK_PLLDTY,
+ CLK_PLLCA55,
+
+ /* Internal Core Clocks */
+ CLK_PLLCM33_DIV16,
+ CLK_PLLCLN_DIV2,
+ CLK_PLLCLN_DIV8,
+ CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV4,
+
+ /* Module Clocks */
+ MOD_CLK_BASE,
+};
+
+static const struct clk_div_table dtable_1_8[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {3, 8},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_64[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {4, 64},
+ {0, 0},
+};
+
+static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
+ DEF_INPUT("rtxin", CLK_RTXIN),
+ DEF_INPUT("qextal", CLK_QEXTAL),
+
+ /* PLL Clocks */
+ DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
+
+ /* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+
+ DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
+ DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
+
+ DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+
+ /* Core Clocks */
+ DEF_FIXED("sys_0_pclk", R9A09G056_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
+ DEF_DDIV("ca55_0_coreclk0", R9A09G056_CA55_0_CORE_CLK0, CLK_PLLCA55,
+ CDDIV1_DIVCTL0, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk1", R9A09G056_CA55_0_CORE_CLK1, CLK_PLLCA55,
+ CDDIV1_DIVCTL1, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk2", R9A09G056_CA55_0_CORE_CLK2, CLK_PLLCA55,
+ CDDIV1_DIVCTL2, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk3", R9A09G056_CA55_0_CORE_CLK3, CLK_PLLCA55,
+ CDDIV1_DIVCTL3, dtable_1_8),
+ DEF_FIXED("iotop_0_shclk", R9A09G056_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+};
+
+static const struct rzv2h_mod_clk r9a09g056_mod_clks[] __initconst = {
+ DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
+ BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
+ BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
+ BUS_MSTOP(8, BIT(4))),
+};
+
+static const struct rzv2h_reset r9a09g056_resets[] __initconst = {
+ DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
+ DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
+ DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
+ DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
+ DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
+ DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+};
+
+const struct rzv2h_cpg_info r9a09g056_cpg_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r9a09g056_core_clks,
+ .num_core_clks = ARRAY_SIZE(r9a09g056_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r9a09g056_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r9a09g056_mod_clks),
+ .num_hw_mod_clks = 25 * 16,
+
+ /* Resets */
+ .resets = r9a09g056_resets,
+ .num_resets = ARRAY_SIZE(r9a09g056_resets),
+
+ .num_mstop_bits = 192,
+};
diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c
index d63eafbca780..3c40e36259fe 100644
--- a/drivers/clk/renesas/r9a09g057-cpg.c
+++ b/drivers/clk/renesas/r9a09g057-cpg.c
@@ -16,7 +16,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G057_IOTOP_0_SHCLK,
+ LAST_DT_CORE_CLK = R9A09G057_GBETH_1_CLK_PTP_REF_I,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -29,6 +29,7 @@ enum clk_ids {
CLK_PLLDTY,
CLK_PLLCA55,
CLK_PLLVDO,
+ CLK_PLLGPU,
/* Internal Core Clocks */
CLK_PLLCM33_DIV4,
@@ -40,6 +41,7 @@ enum clk_ids {
CLK_PLLDTY_ACPU,
CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
+ CLK_PLLDTY_DIV8,
CLK_PLLDTY_DIV16,
CLK_PLLDTY_RCPU,
CLK_PLLDTY_RCPU_DIV4,
@@ -47,6 +49,7 @@ enum clk_ids {
CLK_PLLVDO_CRU1,
CLK_PLLVDO_CRU2,
CLK_PLLVDO_CRU3,
+ CLK_PLLGPU_GEAR,
/* Module Clocks */
MOD_CLK_BASE,
@@ -85,8 +88,9 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
- DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
+ DEF_PLL(".pllgpu", CLK_PLLGPU, CLK_QEXTAL, PLLGPU),
/* Internal Core Clocks */
DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
@@ -101,6 +105,7 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+ DEF_FIXED(".plldty_div8", CLK_PLLDTY_DIV8, CLK_PLLDTY, 1, 8),
DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
DEF_DDIV(".plldty_rcpu", CLK_PLLDTY_RCPU, CLK_PLLDTY, CDDIV3_DIVCTL2, dtable_2_64),
DEF_FIXED(".plldty_rcpu_div4", CLK_PLLDTY_RCPU_DIV4, CLK_PLLDTY_RCPU, 1, 4),
@@ -110,6 +115,8 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_DDIV(".pllvdo_cru2", CLK_PLLVDO_CRU2, CLK_PLLVDO, CDDIV4_DIVCTL1, dtable_2_4),
DEF_DDIV(".pllvdo_cru3", CLK_PLLVDO_CRU3, CLK_PLLVDO, CDDIV4_DIVCTL2, dtable_2_4),
+ DEF_DDIV(".pllgpu_gear", CLK_PLLGPU_GEAR, CLK_PLLGPU, CDDIV3_DIVCTL1, dtable_2_64),
+
/* Core Clocks */
DEF_FIXED("sys_0_pclk", R9A09G057_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
DEF_DDIV("ca55_0_coreclk0", R9A09G057_CA55_0_CORE_CLK0, CLK_PLLCA55,
@@ -121,6 +128,8 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_DDIV("ca55_0_coreclk3", R9A09G057_CA55_0_CORE_CLK3, CLK_PLLCA55,
CDDIV1_DIVCTL3, dtable_1_8),
DEF_FIXED("iotop_0_shclk", R9A09G057_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+ DEF_FIXED("usb2_0_clk_core0", R9A09G057_USB2_0_CLK_CORE0, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb2_0_clk_core1", R9A09G057_USB2_0_CLK_CORE1, CLK_QEXTAL, 1, 1),
};
static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
@@ -214,6 +223,16 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(8, BIT(4))),
DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("usb2_0_u2h0_hclk", CLK_PLLDTY_DIV8, 11, 3, 5, 19,
+ BUS_MSTOP(7, BIT(7))),
+ DEF_MOD("usb2_0_u2h1_hclk", CLK_PLLDTY_DIV8, 11, 4, 5, 20,
+ BUS_MSTOP(7, BIT(8))),
+ DEF_MOD("usb2_0_u2p_exr_cpuclk", CLK_PLLDTY_ACPU_DIV4, 11, 5, 5, 21,
+ BUS_MSTOP(7, BIT(9))),
+ DEF_MOD("usb2_0_pclk_usbtst0", CLK_PLLDTY_ACPU_DIV4, 11, 6, 5, 22,
+ BUS_MSTOP(7, BIT(10))),
+ DEF_MOD("usb2_0_pclk_usbtst1", CLK_PLLDTY_ACPU_DIV4, 11, 7, 5, 23,
+ BUS_MSTOP(7, BIT(11))),
DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18,
BUS_MSTOP(9, BIT(4))),
DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19,
@@ -238,6 +257,12 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(9, BIT(7))),
DEF_MOD("cru_3_pclk", CLK_PLLDTY_DIV16, 13, 13, 6, 29,
BUS_MSTOP(9, BIT(7))),
+ DEF_MOD("gpu_0_clk", CLK_PLLGPU_GEAR, 15, 0, 7, 16,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("gpu_0_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("gpu_0_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18,
+ BUS_MSTOP(3, BIT(4))),
};
static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
@@ -275,6 +300,10 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(10, 12, 4, 29), /* USB2_0_U2H0_HRESETN */
+ DEF_RST(10, 13, 4, 30), /* USB2_0_U2H1_HRESETN */
+ DEF_RST(10, 14, 4, 31), /* USB2_0_U2P_EXL_SYSRST */
+ DEF_RST(10, 15, 5, 0), /* USB2_0_PRESETN */
DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
@@ -287,6 +316,9 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(12, 14, 5, 31), /* CRU_3_PRESETN */
DEF_RST(12, 15, 6, 0), /* CRU_3_ARESETN */
DEF_RST(13, 0, 6, 1), /* CRU_3_S_RESETN */
+ DEF_RST(13, 13, 6, 14), /* GPU_0_RESETN */
+ DEF_RST(13, 14, 6, 15), /* GPU_0_AXI_RESETN */
+ DEF_RST(13, 15, 6, 16), /* GPU_0_ACE_RESETN */
};
const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = {
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index da021ee446ec..71431970d6e6 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -27,6 +27,7 @@
#include <linux/psci.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -204,7 +205,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
int error;
dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk,
- enable ? "ON" : "OFF");
+ str_on_off(enable));
spin_lock_irqsave(&priv->rmw_lock, flags);
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index b91dfbfb01e3..a8628f64a03b 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -27,6 +27,7 @@
#include <linux/pm_domain.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/units.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -1217,7 +1218,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
}
dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk,
- enable ? "ON" : "OFF");
+ str_on_off(enable));
value = bitmask << 16;
if (enable)
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
index 2b9771ab2b3f..bcc496e8cbcd 100644
--- a/drivers/clk/renesas/rzv2h-cpg.c
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -25,6 +25,7 @@
#include <linux/pm_domain.h>
#include <linux/refcount.h>
#include <linux/reset-controller.h>
+#include <linux/string_choices.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -44,10 +45,18 @@
#define CPG_BUS_1_MSTOP (0xd00)
#define CPG_BUS_MSTOP(m) (CPG_BUS_1_MSTOP + ((m) - 1) * 4)
-#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val)))
-#define MDIV(val) FIELD_GET(GENMASK(15, 6), (val))
-#define PDIV(val) FIELD_GET(GENMASK(5, 0), (val))
-#define SDIV(val) FIELD_GET(GENMASK(2, 0), (val))
+#define CPG_PLL_STBY(x) ((x))
+#define CPG_PLL_STBY_RESETB BIT(0)
+#define CPG_PLL_STBY_RESETB_WEN BIT(16)
+#define CPG_PLL_CLK1(x) ((x) + 0x004)
+#define CPG_PLL_CLK1_KDIV(x) ((s16)FIELD_GET(GENMASK(31, 16), (x)))
+#define CPG_PLL_CLK1_MDIV(x) FIELD_GET(GENMASK(15, 6), (x))
+#define CPG_PLL_CLK1_PDIV(x) FIELD_GET(GENMASK(5, 0), (x))
+#define CPG_PLL_CLK2(x) ((x) + 0x008)
+#define CPG_PLL_CLK2_SDIV(x) FIELD_GET(GENMASK(2, 0), (x))
+#define CPG_PLL_MON(x) ((x) + 0x010)
+#define CPG_PLL_MON_RESETB BIT(0)
+#define CPG_PLL_MON_LOCK BIT(4)
#define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16)
@@ -94,8 +103,7 @@ struct pll_clk {
struct rzv2h_cpg_priv *priv;
void __iomem *base;
struct clk_hw hw;
- unsigned int conf;
- unsigned int type;
+ struct pll pll;
};
#define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
@@ -110,7 +118,7 @@ struct pll_clk {
* @on_index: register offset
* @on_bit: ON/MON bit
* @mon_index: monitor register offset
- * @mon_bit: montor bit
+ * @mon_bit: monitor bit
*/
struct mod_clock {
struct rzv2h_cpg_priv *priv;
@@ -140,27 +148,78 @@ struct ddiv_clk {
#define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div)
+static int rzv2h_cpg_pll_clk_is_enabled(struct clk_hw *hw)
+{
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ u32 val = readl(priv->base + CPG_PLL_MON(pll_clk->pll.offset));
+
+ /* Ensure both RESETB and LOCK bits are set */
+ return (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) ==
+ (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK);
+}
+
+static int rzv2h_cpg_pll_clk_enable(struct clk_hw *hw)
+{
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ struct pll pll = pll_clk->pll;
+ u32 stby_offset;
+ u32 mon_offset;
+ u32 val;
+ int ret;
+
+ if (rzv2h_cpg_pll_clk_is_enabled(hw))
+ return 0;
+
+ stby_offset = CPG_PLL_STBY(pll.offset);
+ mon_offset = CPG_PLL_MON(pll.offset);
+
+ writel(CPG_PLL_STBY_RESETB_WEN | CPG_PLL_STBY_RESETB,
+ priv->base + stby_offset);
+
+ /*
+ * Ensure PLL enters into normal mode
+ *
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Since this latency might depend on external crystal or PLL rate,
+ * use a "super" safe timeout value.
+ */
+ ret = readl_poll_timeout_atomic(priv->base + mon_offset, val,
+ (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) ==
+ (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK), 200, 2000);
+ if (ret)
+ dev_err(priv->dev, "Failed to enable PLL 0x%x/%pC\n",
+ stby_offset, hw->clk);
+
+ return ret;
+}
+
static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct pll_clk *pll_clk = to_pll(hw);
struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ struct pll pll = pll_clk->pll;
unsigned int clk1, clk2;
u64 rate;
- if (!PLL_CLK_ACCESS(pll_clk->conf))
+ if (!pll.has_clkn)
return 0;
- clk1 = readl(priv->base + PLL_CLK1_OFFSET(pll_clk->conf));
- clk2 = readl(priv->base + PLL_CLK2_OFFSET(pll_clk->conf));
+ clk1 = readl(priv->base + CPG_PLL_CLK1(pll.offset));
+ clk2 = readl(priv->base + CPG_PLL_CLK2(pll.offset));
- rate = mul_u64_u32_shr(parent_rate, (MDIV(clk1) << 16) + KDIV(clk1),
- 16 + SDIV(clk2));
+ rate = mul_u64_u32_shr(parent_rate, (CPG_PLL_CLK1_MDIV(clk1) << 16) +
+ CPG_PLL_CLK1_KDIV(clk1), 16 + CPG_PLL_CLK2_SDIV(clk2));
- return DIV_ROUND_CLOSEST_ULL(rate, PDIV(clk1));
+ return DIV_ROUND_CLOSEST_ULL(rate, CPG_PLL_CLK1_PDIV(clk1));
}
static const struct clk_ops rzv2h_cpg_pll_ops = {
+ .is_enabled = rzv2h_cpg_pll_clk_is_enabled,
+ .enable = rzv2h_cpg_pll_clk_enable,
.recalc_rate = rzv2h_cpg_pll_clk_recalc_rate,
};
@@ -193,10 +252,9 @@ rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
init.num_parents = 1;
pll_clk->hw.init = &init;
- pll_clk->conf = core->cfg.conf;
+ pll_clk->pll = core->cfg.pll;
pll_clk->base = base;
pll_clk->priv = priv;
- pll_clk->type = core->type;
ret = devm_clk_hw_register(dev, &pll_clk->hw);
if (ret)
@@ -241,6 +299,9 @@ static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon
u32 bitmask = BIT(mon);
u32 val;
+ if (mon == CSDIV_NO_MON)
+ return 0;
+
return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200);
}
@@ -272,12 +333,6 @@ static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
writel(val, divider->reg);
ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
- if (ret)
- goto ddiv_timeout;
-
- spin_unlock_irqrestore(divider->lock, flags);
-
- return 0;
ddiv_timeout:
spin_unlock_irqrestore(divider->lock, flags);
@@ -320,7 +375,10 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
return ERR_PTR(-ENOMEM);
init.name = core->name;
- init.ops = &rzv2h_ddiv_clk_divider_ops;
+ if (cfg_ddiv.no_rmw)
+ init.ops = &clk_divider_ops;
+ else
+ init.ops = &rzv2h_ddiv_clk_divider_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -342,6 +400,24 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
return div->hw.clk;
}
+static struct clk * __init
+rzv2h_cpg_mux_clk_register(const struct cpg_core_clk *core,
+ struct rzv2h_cpg_priv *priv)
+{
+ struct smuxed mux = core->cfg.smux;
+ const struct clk_hw *clk_hw;
+
+ clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
+ core->parent_names, core->num_parents,
+ core->flag, priv->base + mux.offset,
+ mux.shift, mux.width,
+ core->mux_flags, &priv->rmw_lock);
+ if (IS_ERR(clk_hw))
+ return ERR_CAST(clk_hw);
+
+ return clk_hw->clk;
+}
+
static struct clk
*rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
void *data)
@@ -426,6 +502,9 @@ rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
case CLK_TYPE_DDIV:
clk = rzv2h_cpg_ddiv_clk_register(core, priv);
break;
+ case CLK_TYPE_SMUX:
+ clk = rzv2h_cpg_mux_clk_register(core, priv);
+ break;
default:
goto fail;
}
@@ -494,11 +573,14 @@ static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
if (clock->mon_index >= 0) {
offset = GET_CLK_MON_OFFSET(clock->mon_index);
bitmask = BIT(clock->mon_bit);
- } else {
- offset = GET_CLK_ON_OFFSET(clock->on_index);
- bitmask = BIT(clock->on_bit);
+
+ if (!(readl(priv->base + offset) & bitmask))
+ return 0;
}
+ offset = GET_CLK_ON_OFFSET(clock->on_index);
+ bitmask = BIT(clock->on_bit);
+
return readl(priv->base + offset) & bitmask;
}
@@ -514,7 +596,7 @@ static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
int error;
dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk,
- enable ? "ON" : "OFF");
+ str_on_off(enable));
if (enabled == enable)
return 0;
@@ -658,8 +740,8 @@ fail:
mod->name, PTR_ERR(clk));
}
-static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
{
struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
@@ -667,35 +749,31 @@ static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
u8 monbit = priv->resets[id].mon_bit;
u32 value = mask << 16;
- dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, reg);
+ dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n",
+ assert ? "assert" : "deassert", id, reg);
+ if (!assert)
+ value |= mask;
writel(value, priv->base + reg);
reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
mask = BIT(monbit);
return readl_poll_timeout_atomic(priv->base + reg, value,
- value & mask, 10, 200);
+ assert ? (value & mask) : !(value & mask),
+ 10, 200);
+}
+
+static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return __rzv2h_cpg_assert(rcdev, id, true);
}
static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
- unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
- u32 mask = BIT(priv->resets[id].reset_bit);
- u8 monbit = priv->resets[id].mon_bit;
- u32 value = (mask << 16) | mask;
-
- dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, reg);
-
- writel(value, priv->base + reg);
-
- reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
- mask = BIT(monbit);
-
- return readl_poll_timeout_atomic(priv->base + reg, value,
- !(value & mask), 10, 200);
+ return __rzv2h_cpg_assert(rcdev, id, false);
}
static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev,
@@ -967,18 +1045,24 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev)
}
static const struct of_device_id rzv2h_cpg_match[] = {
-#ifdef CONFIG_CLK_R9A09G057
- {
- .compatible = "renesas,r9a09g057-cpg",
- .data = &r9a09g057_cpg_info,
- },
-#endif
#ifdef CONFIG_CLK_R9A09G047
{
.compatible = "renesas,r9a09g047-cpg",
.data = &r9a09g047_cpg_info,
},
#endif
+#ifdef CONFIG_CLK_R9A09G056
+ {
+ .compatible = "renesas,r9a09g056-cpg",
+ .data = &r9a09g056_cpg_info,
+ },
+#endif
+#ifdef CONFIG_CLK_R9A09G057
+ {
+ .compatible = "renesas,r9a09g057-cpg",
+ .data = &r9a09g057_cpg_info,
+ },
+#endif
{ /* sentinel */ }
};
diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h
index 576a070763cb..9104b1cd276c 100644
--- a/drivers/clk/renesas/rzv2h-cpg.h
+++ b/drivers/clk/renesas/rzv2h-cpg.h
@@ -11,20 +11,51 @@
#include <linux/bitfield.h>
/**
+ * struct pll - Structure for PLL configuration
+ *
+ * @offset: STBY register offset
+ * @has_clkn: Flag to indicate if CLK1/2 are accessible or not
+ */
+struct pll {
+ unsigned int offset:9;
+ unsigned int has_clkn:1;
+};
+
+#define PLL_PACK(_offset, _has_clkn) \
+ ((struct pll){ \
+ .offset = _offset, \
+ .has_clkn = _has_clkn \
+ })
+
+#define PLLCA55 PLL_PACK(0x60, 1)
+#define PLLGPU PLL_PACK(0x120, 1)
+
+/**
* struct ddiv - Structure for dynamic switching divider
*
* @offset: register offset
* @shift: position of the divider bit
* @width: width of the divider
* @monbit: monitor bit in CPG_CLKSTATUS0 register
+ * @no_rmw: flag to indicate if the register is read-modify-write
+ * (1: no RMW, 0: RMW)
*/
struct ddiv {
unsigned int offset:11;
unsigned int shift:4;
unsigned int width:4;
unsigned int monbit:5;
+ unsigned int no_rmw:1;
};
+/*
+ * On RZ/V2H(P), the dynamic divider clock supports up to 19 monitor bits,
+ * while on RZ/G3E, it supports up to 16 monitor bits. Use the maximum value
+ * `0x1f` to indicate that monitor bits are not supported for static divider
+ * clocks.
+ */
+#define CSDIV_NO_MON (0x1f)
+
#define DDIV_PACK(_offset, _shift, _width, _monbit) \
((struct ddiv){ \
.offset = _offset, \
@@ -33,10 +64,41 @@ struct ddiv {
.monbit = _monbit \
})
+#define DDIV_PACK_NO_RMW(_offset, _shift, _width, _monbit) \
+ ((struct ddiv){ \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .monbit = (_monbit), \
+ .no_rmw = 1 \
+ })
+
+/**
+ * struct smuxed - Structure for static muxed clocks
+ *
+ * @offset: register offset
+ * @shift: position of the divider field
+ * @width: width of the divider field
+ */
+struct smuxed {
+ unsigned int offset:11;
+ unsigned int shift:4;
+ unsigned int width:4;
+};
+
+#define SMUX_PACK(_offset, _shift, _width) \
+ ((struct smuxed){ \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ })
+
+#define CPG_SSEL1 (0x304)
#define CPG_CDDIV0 (0x400)
#define CPG_CDDIV1 (0x404)
#define CPG_CDDIV3 (0x40C)
#define CPG_CDDIV4 (0x410)
+#define CPG_CSDIV0 (0x500)
#define CDDIV0_DIVCTL1 DDIV_PACK(CPG_CDDIV0, 4, 3, 1)
#define CDDIV0_DIVCTL2 DDIV_PACK(CPG_CDDIV0, 8, 3, 2)
@@ -44,12 +106,18 @@ struct ddiv {
#define CDDIV1_DIVCTL1 DDIV_PACK(CPG_CDDIV1, 4, 2, 5)
#define CDDIV1_DIVCTL2 DDIV_PACK(CPG_CDDIV1, 8, 2, 6)
#define CDDIV1_DIVCTL3 DDIV_PACK(CPG_CDDIV1, 12, 2, 7)
+#define CDDIV3_DIVCTL1 DDIV_PACK(CPG_CDDIV3, 4, 3, 13)
#define CDDIV3_DIVCTL2 DDIV_PACK(CPG_CDDIV3, 8, 3, 14)
#define CDDIV3_DIVCTL3 DDIV_PACK(CPG_CDDIV3, 12, 1, 15)
#define CDDIV4_DIVCTL0 DDIV_PACK(CPG_CDDIV4, 0, 1, 16)
#define CDDIV4_DIVCTL1 DDIV_PACK(CPG_CDDIV4, 4, 1, 17)
#define CDDIV4_DIVCTL2 DDIV_PACK(CPG_CDDIV4, 8, 1, 18)
+#define CSDIV0_DIVCTL3 DDIV_PACK_NO_RMW(CPG_CSDIV0, 12, 2, CSDIV_NO_MON)
+
+#define SSEL1_SELCTL2 SMUX_PACK(CPG_SSEL1, 8, 1)
+#define SSEL1_SELCTL3 SMUX_PACK(CPG_SSEL1, 12, 1)
+
#define BUS_MSTOP_IDX_MASK GENMASK(31, 16)
#define BUS_MSTOP_BITS_MASK GENMASK(15, 0)
#define BUS_MSTOP(idx, mask) (FIELD_PREP_CONST(BUS_MSTOP_IDX_MASK, (idx)) | \
@@ -74,8 +142,13 @@ struct cpg_core_clk {
union {
unsigned int conf;
struct ddiv ddiv;
+ struct pll pll;
+ struct smuxed smux;
} cfg;
const struct clk_div_table *dtable;
+ const char * const *parent_names;
+ unsigned int num_parents;
+ u8 mux_flags;
u32 flag;
};
@@ -85,20 +158,15 @@ enum clk_types {
CLK_TYPE_FF, /* Fixed Factor Clock */
CLK_TYPE_PLL,
CLK_TYPE_DDIV, /* Dynamic Switching Divider */
+ CLK_TYPE_SMUX, /* Static Mux */
};
-/* BIT(31) indicates if CLK1/2 are accessible or not */
-#define PLL_CONF(n) (BIT(31) | ((n) & ~GENMASK(31, 16)))
-#define PLL_CLK_ACCESS(n) ((n) & BIT(31) ? 1 : 0)
-#define PLL_CLK1_OFFSET(n) ((n) & ~GENMASK(31, 16))
-#define PLL_CLK2_OFFSET(n) (((n) & ~GENMASK(31, 16)) + (0x4))
-
#define DEF_TYPE(_name, _id, _type...) \
{ .name = _name, .id = _id, .type = _type }
#define DEF_BASE(_name, _id, _type, _parent...) \
DEF_TYPE(_name, _id, _type, .parent = _parent)
-#define DEF_PLL(_name, _id, _parent, _conf) \
- DEF_TYPE(_name, _id, CLK_TYPE_PLL, .parent = _parent, .cfg.conf = _conf)
+#define DEF_PLL(_name, _id, _parent, _pll_packed) \
+ DEF_TYPE(_name, _id, CLK_TYPE_PLL, .parent = _parent, .cfg.pll = _pll_packed)
#define DEF_INPUT(_name, _id) \
DEF_TYPE(_name, _id, CLK_TYPE_IN)
#define DEF_FIXED(_name, _id, _parent, _mult, _div) \
@@ -109,6 +177,15 @@ enum clk_types {
.parent = _parent, \
.dtable = _dtable, \
.flag = CLK_DIVIDER_HIWORD_MASK)
+#define DEF_CSDIV(_name, _id, _parent, _ddiv_packed, _dtable) \
+ DEF_DDIV(_name, _id, _parent, _ddiv_packed, _dtable)
+#define DEF_SMUX(_name, _id, _smux_packed, _parent_names) \
+ DEF_TYPE(_name, _id, CLK_TYPE_SMUX, \
+ .cfg.smux = _smux_packed, \
+ .parent_names = _parent_names, \
+ .num_parents = ARRAY_SIZE(_parent_names), \
+ .flag = CLK_SET_RATE_PARENT, \
+ .mux_flags = CLK_MUX_HIWORD_MASK)
/**
* struct rzv2h_mod_clk - Module Clocks definitions
@@ -221,6 +298,7 @@ struct rzv2h_cpg_info {
};
extern const struct rzv2h_cpg_info r9a09g047_cpg_info;
+extern const struct rzv2h_cpg_info r9a09g056_cpg_info;
extern const struct rzv2h_cpg_info r9a09g057_cpg_info;
#endif /* __RENESAS_RZV2H_CPG_H__ */
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index e8ece20aebfd..c281a9738d9f 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_COMMON_CLK_ROCKCHIP) += clk-rockchip.o
clk-rockchip-y += clk.o
clk-rockchip-y += clk-pll.o
clk-rockchip-y += clk-cpu.o
+clk-rockchip-y += clk-gate-grf.o
clk-rockchip-y += clk-half-divider.o
clk-rockchip-y += clk-inverter.o
clk-rockchip-y += clk-mmc-phase.o
diff --git a/drivers/clk/rockchip/clk-gate-grf.c b/drivers/clk/rockchip/clk-gate-grf.c
new file mode 100644
index 000000000000..8122f471f391
--- /dev/null
+++ b/drivers/clk/rockchip/clk-gate-grf.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2025 Collabora Ltd.
+ * Author: Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+ *
+ * Certain clocks on Rockchip are "gated" behind an additional register bit
+ * write in a GRF register, such as the SAI MCLKs on RK3576. This code
+ * implements a clock driver for these types of gates, based on regmaps.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+struct rockchip_gate_grf {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ unsigned int reg;
+ unsigned int shift;
+ u8 flags;
+};
+
+#define to_gate_grf(_hw) container_of(_hw, struct rockchip_gate_grf, hw)
+
+static int rockchip_gate_grf_enable(struct clk_hw *hw)
+{
+ struct rockchip_gate_grf *gate = to_gate_grf(hw);
+ u32 val = !(gate->flags & CLK_GATE_SET_TO_DISABLE) ? BIT(gate->shift) : 0;
+ u32 hiword = ((gate->flags & CLK_GATE_HIWORD_MASK) ? 1 : 0) << (gate->shift + 16);
+ int ret;
+
+ ret = regmap_update_bits(gate->regmap, gate->reg,
+ hiword | BIT(gate->shift), hiword | val);
+
+ return ret;
+}
+
+static void rockchip_gate_grf_disable(struct clk_hw *hw)
+{
+ struct rockchip_gate_grf *gate = to_gate_grf(hw);
+ u32 val = !(gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : BIT(gate->shift);
+ u32 hiword = ((gate->flags & CLK_GATE_HIWORD_MASK) ? 1 : 0) << (gate->shift + 16);
+
+ regmap_update_bits(gate->regmap, gate->reg,
+ hiword | BIT(gate->shift), hiword | val);
+}
+
+static int rockchip_gate_grf_is_enabled(struct clk_hw *hw)
+{
+ struct rockchip_gate_grf *gate = to_gate_grf(hw);
+ bool invert = !!(gate->flags & CLK_GATE_SET_TO_DISABLE);
+ int ret;
+
+ ret = regmap_test_bits(gate->regmap, gate->reg, BIT(gate->shift));
+ if (ret < 0)
+ ret = 0;
+
+ return invert ? 1 - ret : ret;
+
+}
+
+static const struct clk_ops rockchip_gate_grf_ops = {
+ .enable = rockchip_gate_grf_enable,
+ .disable = rockchip_gate_grf_disable,
+ .is_enabled = rockchip_gate_grf_is_enabled,
+};
+
+struct clk *rockchip_clk_register_gate_grf(const char *name,
+ const char *parent_name, unsigned long flags,
+ struct regmap *regmap, unsigned int reg, unsigned int shift,
+ u8 gate_flags)
+{
+ struct rockchip_gate_grf *gate;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ if (IS_ERR(regmap)) {
+ pr_err("%s: regmap not available\n", __func__);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.flags = flags;
+ init.num_parents = parent_name ? 1 : 0;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.ops = &rockchip_gate_grf_ops;
+
+ gate->hw.init = &init;
+ gate->regmap = regmap;
+ gate->reg = reg;
+ gate->shift = shift;
+ gate->flags = gate_flags;
+
+ clk = clk_register(NULL, &gate->hw);
+ if (IS_ERR(clk))
+ kfree(gate);
+
+ return clk;
+}
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 91012078681b..b3ed8e7523e5 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -9,11 +9,14 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/regmap.h>
#include "clk.h"
struct rockchip_mmc_clock {
struct clk_hw hw;
void __iomem *reg;
+ struct regmap *grf;
+ int grf_reg;
int shift;
int cached_phase;
struct notifier_block clk_rate_change_nb;
@@ -54,7 +57,12 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
if (!rate)
return 0;
- raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
+ if (mmc_clock->grf)
+ regmap_read(mmc_clock->grf, mmc_clock->grf_reg, &raw_value);
+ else
+ raw_value = readl(mmc_clock->reg);
+
+ raw_value >>= mmc_clock->shift;
degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
@@ -134,8 +142,12 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
raw_value |= nineties;
- writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift),
- mmc_clock->reg);
+ raw_value = HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift);
+
+ if (mmc_clock->grf)
+ regmap_write(mmc_clock->grf, mmc_clock->grf_reg, raw_value);
+ else
+ writel(raw_value, mmc_clock->reg);
pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n",
clk_hw_get_name(hw), degrees, delay_num,
@@ -189,7 +201,9 @@ static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb,
struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
- void __iomem *reg, int shift)
+ void __iomem *reg,
+ struct regmap *grf, int grf_reg,
+ int shift)
{
struct clk_init_data init;
struct rockchip_mmc_clock *mmc_clock;
@@ -208,6 +222,8 @@ struct clk *rockchip_clk_register_mmc(const char *name,
mmc_clock->hw.init = &init;
mmc_clock->reg = reg;
+ mmc_clock->grf = grf;
+ mmc_clock->grf_reg = grf_reg;
mmc_clock->shift = shift;
clk = clk_register(NULL, &mmc_clock->hw);
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 2c2abb3b4210..af74439a7457 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -1027,16 +1027,6 @@ static int rockchip_rk3588_pll_is_enabled(struct clk_hw *hw)
return !(pllcon & RK3588_PLLCON1_PWRDOWN);
}
-static int rockchip_rk3588_pll_init(struct clk_hw *hw)
-{
- struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
-
- if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
- return 0;
-
- return 0;
-}
-
static const struct clk_ops rockchip_rk3588_pll_clk_norate_ops = {
.recalc_rate = rockchip_rk3588_pll_recalc_rate,
.enable = rockchip_rk3588_pll_enable,
@@ -1051,7 +1041,6 @@ static const struct clk_ops rockchip_rk3588_pll_clk_ops = {
.enable = rockchip_rk3588_pll_enable,
.disable = rockchip_rk3588_pll_disable,
.is_enabled = rockchip_rk3588_pll_is_enabled,
- .init = rockchip_rk3588_pll_init,
};
/*
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index d341ce0708aa..df9330958c83 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -123,6 +123,7 @@ PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" };
PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" };
PNAME(mux_pll_src_dmyapll_dpll_gpll_xin24_p) = { "dummy_apll", "dpll", "gpll", "xin24m" };
+PNAME(mux_usb480m_p) = { "usb480m_phy", "xin24m" };
PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" };
PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
PNAME(mux_i2s_clkout_p) = { "i2s_pre", "xin12m" };
@@ -423,6 +424,9 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 9, GFLAGS),
GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 10, GFLAGS),
GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 11, GFLAGS),
+
+ MUX(SCLK_USB480M, "usb480m", mux_usb480m_p, CLK_SET_RATE_PARENT,
+ RK2928_MISC_CON, 15, 1, MFLAGS),
};
static const char *const rk3036_critical_clocks[] __initconst = {
@@ -431,6 +435,7 @@ static const char *const rk3036_critical_clocks[] __initconst = {
"hclk_peri",
"pclk_peri",
"pclk_ddrupctl",
+ "ddrphy",
};
static void __init rk3036_clk_init(struct device_node *np)
@@ -438,7 +443,6 @@ static void __init rk3036_clk_init(struct device_node *np)
struct rockchip_clk_provider *ctx;
unsigned long clk_nr_clks;
void __iomem *reg_base;
- struct clk *clk;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@@ -462,11 +466,6 @@ static void __init rk3036_clk_init(struct device_node *np)
return;
}
- clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock usb480m: %ld\n",
- __func__, PTR_ERR(clk));
-
rockchip_clk_register_plls(ctx, rk3036_pll_clks,
ARRAY_SIZE(rk3036_pll_clks),
RK3036_GRF_SOC_STATUS0);
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 90d329216064..0a1e017df7c6 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -418,7 +418,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(3), 11, GFLAGS),
MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT,
- RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS),
+ RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS, grf_type_sys),
GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0,
RK3288_CLKGATE_CON(9), 0, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index cf60fcf2fa5c..cd5f65b6cdf5 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -677,9 +677,9 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKSEL_CON(27), 15, 1, MFLAGS, 8, 5, DFLAGS,
RK3328_CLKGATE_CON(3), 5, GFLAGS),
MUXGRF(SCLK_MAC2IO, "clk_mac2io", mux_mac2io_src_p, CLK_SET_RATE_NO_REPARENT,
- RK3328_GRF_MAC_CON1, 10, 1, MFLAGS),
+ RK3328_GRF_MAC_CON1, 10, 1, MFLAGS, grf_type_sys),
MUXGRF(SCLK_MAC2IO_EXT, "clk_mac2io_ext", mux_mac2io_ext_p, CLK_SET_RATE_NO_REPARENT,
- RK3328_GRF_SOC_CON4, 14, 1, MFLAGS),
+ RK3328_GRF_SOC_CON4, 14, 1, MFLAGS, grf_type_sys),
COMPOSITE(SCLK_MAC2PHY_SRC, "clk_mac2phy_src", mux_2plls_p, 0,
RK3328_CLKSEL_CON(26), 7, 1, MFLAGS, 0, 5, DFLAGS,
@@ -692,7 +692,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKSEL_CON(26), 8, 2, DFLAGS,
RK3328_CLKGATE_CON(9), 2, GFLAGS),
MUXGRF(SCLK_MAC2PHY, "clk_mac2phy", mux_mac2phy_src_p, CLK_SET_RATE_NO_REPARENT,
- RK3328_GRF_MAC_CON2, 10, 1, MFLAGS),
+ RK3328_GRF_MAC_CON2, 10, 1, MFLAGS, grf_type_sys),
FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
diff --git a/drivers/clk/rockchip/clk-rk3528.c b/drivers/clk/rockchip/clk-rk3528.c
index b8b577b902a0..a5ff64b93f8f 100644
--- a/drivers/clk/rockchip/clk-rk3528.c
+++ b/drivers/clk/rockchip/clk-rk3528.c
@@ -10,6 +10,9 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/minmax.h>
+#include <linux/slab.h>
#include <dt-bindings/clock/rockchip,rk3528-cru.h>
@@ -1061,23 +1064,65 @@ static struct rockchip_clk_branch rk3528_clk_branches[] __initdata = {
0, 1, 1),
};
+static struct rockchip_clk_branch rk3528_vo_clk_branches[] __initdata = {
+ MMC_GRF(SCLK_SDMMC_DRV, "sdmmc_drv", "cclk_src_sdmmc0",
+ RK3528_SDMMC_CON(0), 1, grf_type_vo),
+ MMC_GRF(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "cclk_src_sdmmc0",
+ RK3528_SDMMC_CON(1), 1, grf_type_vo),
+};
+
+static struct rockchip_clk_branch rk3528_vpu_clk_branches[] __initdata = {
+ MMC_GRF(SCLK_SDIO0_DRV, "sdio0_drv", "cclk_src_sdio0",
+ RK3528_SDIO0_CON(0), 1, grf_type_vpu),
+ MMC_GRF(SCLK_SDIO0_SAMPLE, "sdio0_sample", "cclk_src_sdio0",
+ RK3528_SDIO0_CON(1), 1, grf_type_vpu),
+ MMC_GRF(SCLK_SDIO1_DRV, "sdio1_drv", "cclk_src_sdio1",
+ RK3528_SDIO1_CON(0), 1, grf_type_vpu),
+ MMC_GRF(SCLK_SDIO1_SAMPLE, "sdio1_sample", "cclk_src_sdio1",
+ RK3528_SDIO1_CON(1), 1, grf_type_vpu),
+};
+
static int __init clk_rk3528_probe(struct platform_device *pdev)
{
- struct rockchip_clk_provider *ctx;
+ unsigned long nr_vpu_branches = ARRAY_SIZE(rk3528_vpu_clk_branches);
+ unsigned long nr_vo_branches = ARRAY_SIZE(rk3528_vo_clk_branches);
+ unsigned long nr_branches = ARRAY_SIZE(rk3528_clk_branches);
+ unsigned long nr_clks, nr_vo_clks, nr_vpu_clks;
+ struct rockchip_aux_grf *vo_grf_e, *vpu_grf_e;
+ struct regmap *vo_grf, *vpu_grf;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- unsigned long nr_branches = ARRAY_SIZE(rk3528_clk_branches);
- unsigned long nr_clks;
+ struct rockchip_clk_provider *ctx;
void __iomem *reg_base;
- nr_clks = rockchip_clk_find_max_clk_id(rk3528_clk_branches,
- nr_branches) + 1;
-
reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return dev_err_probe(dev, PTR_ERR(reg_base),
"could not map cru region");
+ nr_clks = rockchip_clk_find_max_clk_id(rk3528_clk_branches,
+ nr_branches) + 1;
+
+ vo_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3528-vo-grf");
+ if (!IS_ERR(vo_grf)) {
+ nr_vo_clks = rockchip_clk_find_max_clk_id(rk3528_vo_clk_branches,
+ nr_vo_branches) + 1;
+ nr_clks = max(nr_clks, nr_vo_clks);
+ } else if (PTR_ERR(vo_grf) != -ENODEV) {
+ return dev_err_probe(dev, PTR_ERR(vo_grf),
+ "failed to look up VO GRF\n");
+ }
+
+ vpu_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3528-vpu-grf");
+ if (!IS_ERR(vpu_grf)) {
+ nr_vpu_clks = rockchip_clk_find_max_clk_id(rk3528_vpu_clk_branches,
+ nr_vpu_branches) + 1;
+ nr_clks = max(nr_clks, nr_vpu_clks);
+ } else if (PTR_ERR(vpu_grf) != -ENODEV) {
+ return dev_err_probe(dev, PTR_ERR(vpu_grf),
+ "failed to look up VPU GRF\n");
+ }
+
ctx = rockchip_clk_init(np, reg_base, nr_clks);
if (IS_ERR(ctx))
return dev_err_probe(dev, PTR_ERR(ctx),
@@ -1092,6 +1137,32 @@ static int __init clk_rk3528_probe(struct platform_device *pdev)
ARRAY_SIZE(rk3528_cpuclk_rates));
rockchip_clk_register_branches(ctx, rk3528_clk_branches, nr_branches);
+ if (!IS_ERR(vo_grf)) {
+ vo_grf_e = devm_kzalloc(dev, sizeof(*vo_grf_e), GFP_KERNEL);
+ if (!vo_grf_e)
+ return -ENOMEM;
+
+ vo_grf_e->grf = vo_grf;
+ vo_grf_e->type = grf_type_vo;
+ hash_add(ctx->aux_grf_table, &vo_grf_e->node, grf_type_vo);
+
+ rockchip_clk_register_branches(ctx, rk3528_vo_clk_branches,
+ nr_vo_branches);
+ }
+
+ if (!IS_ERR(vpu_grf)) {
+ vpu_grf_e = devm_kzalloc(dev, sizeof(*vpu_grf_e), GFP_KERNEL);
+ if (!vpu_grf_e)
+ return -ENOMEM;
+
+ vpu_grf_e->grf = vpu_grf;
+ vpu_grf_e->type = grf_type_vpu;
+ hash_add(ctx->aux_grf_table, &vpu_grf_e->node, grf_type_vpu);
+
+ rockchip_clk_register_branches(ctx, rk3528_vpu_clk_branches,
+ nr_vpu_branches);
+ }
+
rk3528_rst_init(np, reg_base);
rockchip_register_restart_notifier(ctx, RK3528_GLB_SRST_FST, NULL);
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index 7d9279291e76..d48ab9d6c064 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -89,6 +89,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
RK3036_PLL_RATE(96000000, 1, 96, 6, 4, 1, 0),
RK3036_PLL_RATE(78750000, 4, 315, 6, 4, 1, 0),
RK3036_PLL_RATE(74250000, 2, 99, 4, 4, 1, 0),
+ RK3036_PLL_RATE(33300000, 4, 111, 5, 4, 1, 0),
{ /* sentinel */ },
};
@@ -590,7 +591,7 @@ static struct rockchip_clk_branch rk3568_clk_branches[] __initdata = {
RK3568_CLKSEL_CON(9), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3568_CLKGATE_CON(4), 0, GFLAGS),
MUXGRF(CLK_DDR1X, "clk_ddr1x", clk_ddr1x_p, CLK_SET_RATE_PARENT,
- RK3568_CLKSEL_CON(9), 15, 1, MFLAGS),
+ RK3568_CLKSEL_CON(9), 15, 1, MFLAGS, grf_type_sys),
COMPOSITE_NOMUX(CLK_MSCH, "clk_msch", "clk_ddr1x", CLK_IGNORE_UNUSED,
RK3568_CLKSEL_CON(10), 0, 2, DFLAGS,
diff --git a/drivers/clk/rockchip/clk-rk3576.c b/drivers/clk/rockchip/clk-rk3576.c
index 595e010341f7..9bc0ef51ef68 100644
--- a/drivers/clk/rockchip/clk-rk3576.c
+++ b/drivers/clk/rockchip/clk-rk3576.c
@@ -10,11 +10,13 @@
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
#include <dt-bindings/clock/rockchip,rk3576-cru.h>
#include "clk.h"
#define RK3576_GRF_SOC_STATUS0 0x600
#define RK3576_PMU0_GRF_OSC_CON6 0x18
+#define RK3576_VCCIO_IOC_MISC_CON0 0x6400
enum rk3576_plls {
bpll, lpll, vpll, aupll, cpll, gpll, ppll,
@@ -541,6 +543,8 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = {
RK3576_CLKGATE_CON(5), 14, GFLAGS),
GATE(CLK_OTPC_AUTO_RD_G, "clk_otpc_auto_rd_g", "xin24m", 0,
RK3576_CLKGATE_CON(5), 15, GFLAGS),
+ GATE(CLK_OTP_PHY_G, "clk_otp_phy_g", "xin24m", 0,
+ RK3576_CLKGATE_CON(6), 0, GFLAGS),
COMPOSITE(CLK_MIPI_CAMERAOUT_M0, "clk_mipi_cameraout_m0", mux_24m_spll_gpll_cpll_p, 0,
RK3576_CLKSEL_CON(38), 8, 2, MFLAGS, 0, 8, DFLAGS,
RK3576_CLKGATE_CON(6), 3, GFLAGS),
@@ -1479,6 +1483,14 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = {
RK3576_CLKGATE_CON(10), 0, GFLAGS),
GATE(CLK_SAI0_MCLKOUT, "clk_sai0_mclkout", "mclk_sai0_8ch", 0,
RK3576_CLKGATE_CON(10), 1, GFLAGS),
+ GATE_GRF(CLK_SAI0_MCLKOUT_TO_IO, "mclk_sai0_to_io", "clk_sai0_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 0, GFLAGS, grf_type_ioc),
+ GATE_GRF(CLK_SAI1_MCLKOUT_TO_IO, "mclk_sai1_to_io", "clk_sai1_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 1, GFLAGS, grf_type_ioc),
+ GATE_GRF(CLK_SAI2_MCLKOUT_TO_IO, "mclk_sai2_to_io", "clk_sai2_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 2, GFLAGS, grf_type_ioc),
+ GATE_GRF(CLK_SAI3_MCLKOUT_TO_IO, "mclk_sai3_to_io", "clk_sai3_mclkout",
+ 0, RK3576_VCCIO_IOC_MISC_CON0, 3, GFLAGS, grf_type_ioc),
/* sdgmac */
COMPOSITE_NODIV(HCLK_SDGMAC_ROOT, "hclk_sdgmac_root", mux_200m_100m_50m_24m_p, 0,
@@ -1676,13 +1688,13 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = {
/* phy ref */
MUXGRF(CLK_PHY_REF_SRC, "clk_phy_ref_src", clk_phy_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 4, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 4, 1, MFLAGS, grf_type_pmu0),
MUXGRF(CLK_USBPHY_REF_SRC, "clk_usbphy_ref_src", clk_usbphy_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 2, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 2, 1, MFLAGS, grf_type_pmu0),
MUXGRF(CLK_CPLL_REF_SRC, "clk_cpll_ref_src", clk_cpll_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 1, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 1, 1, MFLAGS, grf_type_pmu0),
MUXGRF(CLK_AUPLL_REF_SRC, "clk_aupll_ref_src", clk_aupll_ref_src_p, 0,
- RK3576_PMU0_GRF_OSC_CON6, 0, 1, MFLAGS),
+ RK3576_PMU0_GRF_OSC_CON6, 0, 1, MFLAGS, grf_type_pmu0),
/* secure ns */
COMPOSITE_NODIV(ACLK_SECURE_NS, "aclk_secure_ns", mux_350m_175m_116m_24m_p, CLK_IS_CRITICAL,
@@ -1725,17 +1737,26 @@ static void __init rk3576_clk_init(struct device_node *np)
struct rockchip_clk_provider *ctx;
unsigned long clk_nr_clks;
void __iomem *reg_base;
- struct regmap *grf;
+ struct rockchip_aux_grf *ioc_grf_e;
+ struct rockchip_aux_grf *pmu0_grf_e;
+ struct regmap *ioc_grf;
+ struct regmap *pmu0_grf;
clk_nr_clks = rockchip_clk_find_max_clk_id(rk3576_clk_branches,
ARRAY_SIZE(rk3576_clk_branches)) + 1;
- grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-pmu0-grf");
- if (IS_ERR(grf)) {
+ pmu0_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-pmu0-grf");
+ if (IS_ERR(pmu0_grf)) {
pr_err("%s: could not get PMU0 GRF syscon\n", __func__);
return;
}
+ ioc_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-ioc-grf");
+ if (IS_ERR(ioc_grf)) {
+ pr_err("%s: could not get IOC GRF syscon\n", __func__);
+ return;
+ }
+
reg_base = of_iomap(np, 0);
if (!reg_base) {
pr_err("%s: could not map cru region\n", __func__);
@@ -1745,11 +1766,24 @@ static void __init rk3576_clk_init(struct device_node *np)
ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
- iounmap(reg_base);
- return;
+ goto err_unmap;
}
- ctx->grf = grf;
+ pmu0_grf_e = kzalloc(sizeof(*pmu0_grf_e), GFP_KERNEL);
+ if (!pmu0_grf_e)
+ goto err_unmap;
+
+ pmu0_grf_e->grf = pmu0_grf;
+ pmu0_grf_e->type = grf_type_pmu0;
+ hash_add(ctx->aux_grf_table, &pmu0_grf_e->node, grf_type_pmu0);
+
+ ioc_grf_e = kzalloc(sizeof(*ioc_grf_e), GFP_KERNEL);
+ if (!ioc_grf_e)
+ goto err_free_pmu0;
+
+ ioc_grf_e->grf = ioc_grf;
+ ioc_grf_e->type = grf_type_ioc;
+ hash_add(ctx->aux_grf_table, &ioc_grf_e->node, grf_type_ioc);
rockchip_clk_register_plls(ctx, rk3576_pll_clks,
ARRAY_SIZE(rk3576_pll_clks),
@@ -1772,6 +1806,14 @@ static void __init rk3576_clk_init(struct device_node *np)
rockchip_register_restart_notifier(ctx, RK3576_GLB_SRST_FST, NULL);
rockchip_clk_of_add_provider(np, ctx);
+
+ return;
+
+err_free_pmu0:
+ kfree(pmu0_grf_e);
+err_unmap:
+ iounmap(reg_base);
+ return;
}
CLK_OF_DECLARE(rk3576_cru, "rockchip,rk3576-cru", rk3576_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c
index 4031733def4e..1694223f4f84 100644
--- a/drivers/clk/rockchip/clk-rk3588.c
+++ b/drivers/clk/rockchip/clk-rk3588.c
@@ -64,6 +64,7 @@ static struct rockchip_pll_rate_table rk3588_pll_rates[] = {
RK3588_PLL_RATE(1560000000, 2, 260, 1, 0),
RK3588_PLL_RATE(1536000000, 2, 256, 1, 0),
RK3588_PLL_RATE(1512000000, 2, 252, 1, 0),
+ RK3588_PLL_RATE(1500000000, 2, 250, 1, 0),
RK3588_PLL_RATE(1488000000, 2, 248, 1, 0),
RK3588_PLL_RATE(1464000000, 2, 244, 1, 0),
RK3588_PLL_RATE(1440000000, 2, 240, 1, 0),
diff --git a/drivers/clk/rockchip/clk-rv1126.c b/drivers/clk/rockchip/clk-rv1126.c
index fc19c5522490..15e7bfe84506 100644
--- a/drivers/clk/rockchip/clk-rv1126.c
+++ b/drivers/clk/rockchip/clk-rv1126.c
@@ -857,7 +857,7 @@ static struct rockchip_clk_branch rv1126_clk_branches[] __initdata = {
RV1126_GMAC_CON, 5, 1, MFLAGS),
MUXGRF(CLK_GMAC_SRC, "clk_gmac_src", mux_clk_gmac_src_p, CLK_SET_RATE_PARENT |
CLK_SET_RATE_NO_REPARENT,
- RV1126_GRF_IOFUNC_CON1, 12, 1, MFLAGS),
+ RV1126_GRF_IOFUNC_CON1, 12, 1, MFLAGS, grf_type_sys),
GATE(CLK_GMAC_REF, "clk_gmac_ref", "clk_gmac_src", 0,
RV1126_CLKGATE_CON(20), 7, GFLAGS),
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index cbf93ea119a9..19caf26c991b 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -382,6 +382,8 @@ static struct rockchip_clk_provider *rockchip_clk_init_base(
ctx->cru_node = np;
spin_lock_init(&ctx->lock);
+ hash_init(ctx->aux_grf_table);
+
ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
"rockchip,grf");
@@ -496,6 +498,8 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk)
{
+ struct regmap *grf = ctx->grf;
+ struct rockchip_aux_grf *agrf;
struct clk *clk;
unsigned int idx;
unsigned long flags;
@@ -504,6 +508,19 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
flags = list->flags;
clk = NULL;
+ /* for GRF-dependent branches, choose the right grf first */
+ if ((list->branch_type == branch_grf_mux ||
+ list->branch_type == branch_grf_gate ||
+ list->branch_type == branch_grf_mmc) &&
+ list->grf_type != grf_type_sys) {
+ hash_for_each_possible(ctx->aux_grf_table, agrf, node, list->grf_type) {
+ if (agrf->type == list->grf_type) {
+ grf = agrf->grf;
+ break;
+ }
+ }
+ }
+
/* catch simple muxes */
switch (list->branch_type) {
case branch_mux:
@@ -523,10 +540,10 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
list->mux_shift, list->mux_width,
list->mux_flags, &ctx->lock);
break;
- case branch_muxgrf:
+ case branch_grf_mux:
clk = rockchip_clk_register_muxgrf(list->name,
list->parent_names, list->num_parents,
- flags, ctx->grf, list->muxdiv_offset,
+ flags, grf, list->muxdiv_offset,
list->mux_shift, list->mux_width,
list->mux_flags);
break;
@@ -573,6 +590,13 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
ctx->reg_base + list->gate_offset,
list->gate_shift, list->gate_flags, &ctx->lock);
break;
+ case branch_grf_gate:
+ flags |= CLK_SET_RATE_PARENT;
+ clk = rockchip_clk_register_gate_grf(list->name,
+ list->parent_names[0], flags, grf,
+ list->gate_offset, list->gate_shift,
+ list->gate_flags);
+ break;
case branch_composite:
clk = rockchip_clk_register_branch(list->name,
list->parent_names, list->num_parents,
@@ -590,6 +614,16 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
list->name,
list->parent_names, list->num_parents,
ctx->reg_base + list->muxdiv_offset,
+ NULL, 0,
+ list->div_shift
+ );
+ break;
+ case branch_grf_mmc:
+ clk = rockchip_clk_register_mmc(
+ list->name,
+ list->parent_names, list->num_parents,
+ NULL,
+ grf, list->muxdiv_offset,
list->div_shift
);
break;
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index df2b2d706450..1e9c3c0d31e3 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <linux/clk-provider.h>
+#include <linux/hashtable.h>
struct clk;
@@ -217,6 +218,9 @@ struct clk;
#define RK3528_CLKSEL_CON(x) ((x) * 0x4 + 0x300)
#define RK3528_CLKGATE_CON(x) ((x) * 0x4 + 0x800)
#define RK3528_SOFTRST_CON(x) ((x) * 0x4 + 0xa00)
+#define RK3528_SDMMC_CON(x) ((x) * 0x4 + 0x24)
+#define RK3528_SDIO0_CON(x) ((x) * 0x4 + 0x4)
+#define RK3528_SDIO1_CON(x) ((x) * 0x4 + 0xc)
#define RK3528_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RK3528_PMU_CRU_BASE)
#define RK3528_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RK3528_PMU_CRU_BASE)
#define RK3528_PCIE_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RK3528_PCIE_CRU_BASE)
@@ -440,12 +444,37 @@ enum rockchip_pll_type {
.k = _k, \
}
+enum rockchip_grf_type {
+ grf_type_sys = 0,
+ grf_type_pmu0,
+ grf_type_pmu1,
+ grf_type_ioc,
+ grf_type_vo,
+ grf_type_vpu,
+};
+
+/* ceil(sqrt(enums in rockchip_grf_type - 1)) */
+#define GRF_HASH_ORDER 2
+
+/**
+ * struct rockchip_aux_grf - entry for the aux_grf_table hashtable
+ * @grf: pointer to the grf this entry references
+ * @type: what type of GRF this is
+ * @node: hlist node
+ */
+struct rockchip_aux_grf {
+ struct regmap *grf;
+ enum rockchip_grf_type type;
+ struct hlist_node node;
+};
+
/**
* struct rockchip_clk_provider - information about clock provider
* @reg_base: virtual address for the register base.
* @clk_data: holds clock related data like clk* and number of clocks.
* @cru_node: device-node of the clock-provider
* @grf: regmap of the general-register-files syscon
+ * @aux_grf_table: hashtable of auxiliary GRF regmaps, indexed by grf_type
* @lock: maintains exclusion between callbacks for a given clock-provider.
*/
struct rockchip_clk_provider {
@@ -453,6 +482,7 @@ struct rockchip_clk_provider {
struct clk_onecell_data clk_data;
struct device_node *cru_node;
struct regmap *grf;
+ DECLARE_HASHTABLE(aux_grf_table, GRF_HASH_ORDER);
spinlock_t lock;
};
@@ -594,7 +624,9 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
- void __iomem *reg, int shift);
+ void __iomem *reg,
+ struct regmap *grf, int grf_reg,
+ int shift);
/*
* DDRCLK flags, including method of setting the rate
@@ -622,17 +654,24 @@ struct clk *rockchip_clk_register_muxgrf(const char *name,
int flags, struct regmap *grf, int reg,
int shift, int width, int mux_flags);
+struct clk *rockchip_clk_register_gate_grf(const char *name,
+ const char *parent_name, unsigned long flags,
+ struct regmap *regmap, unsigned int reg,
+ unsigned int shift, u8 gate_flags);
+
#define PNAME(x) static const char *const x[] __initconst
enum rockchip_clk_branch_type {
branch_composite,
branch_mux,
- branch_muxgrf,
+ branch_grf_mux,
branch_divider,
branch_fraction_divider,
branch_gate,
+ branch_grf_gate,
branch_linked_gate,
branch_mmc,
+ branch_grf_mmc,
branch_inverter,
branch_factor,
branch_ddrclk,
@@ -660,6 +699,7 @@ struct rockchip_clk_branch {
u8 gate_shift;
u8 gate_flags;
unsigned int linked_clk_id;
+ enum rockchip_grf_type grf_type;
struct rockchip_clk_branch *child;
};
@@ -900,10 +940,10 @@ struct rockchip_clk_branch {
.mux_table = mt, \
}
-#define MUXGRF(_id, cname, pnames, f, o, s, w, mf) \
+#define MUXGRF(_id, cname, pnames, f, o, s, w, mf, gt) \
{ \
.id = _id, \
- .branch_type = branch_muxgrf, \
+ .branch_type = branch_grf_mux, \
.name = cname, \
.parent_names = pnames, \
.num_parents = ARRAY_SIZE(pnames), \
@@ -913,6 +953,7 @@ struct rockchip_clk_branch {
.mux_width = w, \
.mux_flags = mf, \
.gate_offset = -1, \
+ .grf_type = gt, \
}
#define DIV(_id, cname, pname, f, o, s, w, df) \
@@ -958,6 +999,20 @@ struct rockchip_clk_branch {
.gate_flags = gf, \
}
+#define GATE_GRF(_id, cname, pname, f, o, b, gf, gt) \
+ { \
+ .id = _id, \
+ .branch_type = branch_grf_gate, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .flags = f, \
+ .gate_offset = o, \
+ .gate_shift = b, \
+ .gate_flags = gf, \
+ .grf_type = gt, \
+ }
+
#define GATE_LINK(_id, cname, pname, linkedclk, f, o, b, gf) \
{ \
.id = _id, \
@@ -983,6 +1038,18 @@ struct rockchip_clk_branch {
.div_shift = shift, \
}
+#define MMC_GRF(_id, cname, pname, offset, shift, grftype) \
+ { \
+ .id = _id, \
+ .branch_type = branch_grf_mmc, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .muxdiv_offset = offset, \
+ .div_shift = shift, \
+ .grf_type = grftype, \
+ }
+
#define INVERTER(_id, cname, pname, io, is, if) \
{ \
.id = _id, \
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 374c26e5d9fd..cc5c1644c41c 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1269,6 +1269,45 @@ static const struct samsung_cpu_clock exynos4412_cpu_clks[] __initconst = {
CPUCLK_LAYOUT_E4210, e4412_armclk_d),
};
+static const struct samsung_cmu_info cmu_info_exynos4 __initconst = {
+ .mux_clks = exynos4_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(exynos4_mux_clks),
+ .div_clks = exynos4_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos4_div_clks),
+ .gate_clks = exynos4_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos4_gate_clks),
+ .fixed_factor_clks = exynos4_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(exynos4_fixed_factor_clks),
+ .fixed_clks = exynos4_fixed_rate_clks,
+ .nr_fixed_clks = ARRAY_SIZE(exynos4_fixed_rate_clks),
+};
+
+static const struct samsung_cmu_info cmu_info_exynos4210 __initconst = {
+ .mux_clks = exynos4210_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(exynos4210_mux_clks),
+ .div_clks = exynos4210_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos4210_div_clks),
+ .gate_clks = exynos4210_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos4210_gate_clks),
+ .fixed_factor_clks = exynos4210_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(exynos4210_fixed_factor_clks),
+ .fixed_clks = exynos4210_fixed_rate_clks,
+ .nr_fixed_clks = ARRAY_SIZE(exynos4210_fixed_rate_clks),
+ .cpu_clks = exynos4210_cpu_clks,
+ .nr_cpu_clks = ARRAY_SIZE(exynos4210_cpu_clks),
+};
+
+static const struct samsung_cmu_info cmu_info_exynos4x12 __initconst = {
+ .mux_clks = exynos4x12_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(exynos4x12_mux_clks),
+ .div_clks = exynos4x12_div_clks,
+ .nr_div_clks = ARRAY_SIZE(exynos4x12_div_clks),
+ .gate_clks = exynos4x12_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(exynos4x12_gate_clks),
+ .fixed_factor_clks = exynos4x12_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(exynos4x12_fixed_factor_clks),
+};
+
/* register exynos4 clocks */
static void __init exynos4_clk_init(struct device_node *np,
enum exynos4_soc soc)
@@ -1322,41 +1361,12 @@ static void __init exynos4_clk_init(struct device_node *np,
ARRAY_SIZE(exynos4x12_plls));
}
- samsung_clk_register_fixed_rate(ctx, exynos4_fixed_rate_clks,
- ARRAY_SIZE(exynos4_fixed_rate_clks));
- samsung_clk_register_mux(ctx, exynos4_mux_clks,
- ARRAY_SIZE(exynos4_mux_clks));
- samsung_clk_register_div(ctx, exynos4_div_clks,
- ARRAY_SIZE(exynos4_div_clks));
- samsung_clk_register_gate(ctx, exynos4_gate_clks,
- ARRAY_SIZE(exynos4_gate_clks));
- samsung_clk_register_fixed_factor(ctx, exynos4_fixed_factor_clks,
- ARRAY_SIZE(exynos4_fixed_factor_clks));
+ samsung_cmu_register_clocks(ctx, &cmu_info_exynos4);
if (exynos4_soc == EXYNOS4210) {
- samsung_clk_register_fixed_rate(ctx, exynos4210_fixed_rate_clks,
- ARRAY_SIZE(exynos4210_fixed_rate_clks));
- samsung_clk_register_mux(ctx, exynos4210_mux_clks,
- ARRAY_SIZE(exynos4210_mux_clks));
- samsung_clk_register_div(ctx, exynos4210_div_clks,
- ARRAY_SIZE(exynos4210_div_clks));
- samsung_clk_register_gate(ctx, exynos4210_gate_clks,
- ARRAY_SIZE(exynos4210_gate_clks));
- samsung_clk_register_fixed_factor(ctx,
- exynos4210_fixed_factor_clks,
- ARRAY_SIZE(exynos4210_fixed_factor_clks));
- samsung_clk_register_cpu(ctx, exynos4210_cpu_clks,
- ARRAY_SIZE(exynos4210_cpu_clks));
+ samsung_cmu_register_clocks(ctx, &cmu_info_exynos4210);
} else {
- samsung_clk_register_mux(ctx, exynos4x12_mux_clks,
- ARRAY_SIZE(exynos4x12_mux_clks));
- samsung_clk_register_div(ctx, exynos4x12_div_clks,
- ARRAY_SIZE(exynos4x12_div_clks));
- samsung_clk_register_gate(ctx, exynos4x12_gate_clks,
- ARRAY_SIZE(exynos4x12_gate_clks));
- samsung_clk_register_fixed_factor(ctx,
- exynos4x12_fixed_factor_clks,
- ARRAY_SIZE(exynos4x12_fixed_factor_clks));
+ samsung_cmu_register_clocks(ctx, &cmu_info_exynos4x12);
if (soc == EXYNOS4412)
samsung_clk_register_cpu(ctx, exynos4412_cpu_clks,
ARRAY_SIZE(exynos4412_cpu_clks));
diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c
index dc8d4240f6de..da4afe8ac2ab 100644
--- a/drivers/clk/samsung/clk-exynosautov920.c
+++ b/drivers/clk/samsung/clk-exynosautov920.c
@@ -18,6 +18,9 @@
/* NOTE: Must be equal to the last clock ID increased by one */
#define CLKS_NR_TOP (DOUT_CLKCMU_TAA_NOC + 1)
+#define CLKS_NR_CPUCL0 (CLK_DOUT_CPUCL0_NOCP + 1)
+#define CLKS_NR_CPUCL1 (CLK_DOUT_CPUCL1_NOCP + 1)
+#define CLKS_NR_CPUCL2 (CLK_DOUT_CPUCL2_NOCP + 1)
#define CLKS_NR_PERIC0 (CLK_DOUT_PERIC0_I3C + 1)
#define CLKS_NR_PERIC1 (CLK_DOUT_PERIC1_I3C + 1)
#define CLKS_NR_MISC (CLK_DOUT_MISC_OSC_DIV2 + 1)
@@ -1005,6 +1008,339 @@ static void __init exynosautov920_cmu_top_init(struct device_node *np)
CLK_OF_DECLARE(exynosautov920_cmu_top, "samsung,exynosautov920-cmu-top",
exynosautov920_cmu_top_init);
+/* ---- CMU_CPUCL0 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL0 (0x1EC00000) */
+#define PLL_LOCKTIME_PLL_CPUCL0 0x0000
+#define PLL_CON0_PLL_CPUCL0 0x0100
+#define PLL_CON1_PLL_CPUCL0 0x0104
+#define PLL_CON3_PLL_CPUCL0 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER 0x0620
+
+#define CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER 0x1000
+#define CLK_CON_MUX_MUX_CLK_CPUCL0_CORE 0x1004
+
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK 0x1804
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC 0x181c
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG 0x1820
+#define CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP 0x1824
+
+static const unsigned long cpucl0_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL0,
+ PLL_CON0_PLL_CPUCL0,
+ PLL_CON1_PLL_CPUCL0,
+ PLL_CON3_PLL_CPUCL0,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CORE,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG,
+ CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL0 */
+PNAME(mout_pll_cpucl0_p) = { "oscclk", "fout_cpucl0_pll" };
+PNAME(mout_cpucl0_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl0_cluster" };
+PNAME(mout_cpucl0_dbg_user_p) = { "oscclk", "dout_clkcmu_cpucl0_dbg" };
+PNAME(mout_cpucl0_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl0_switch" };
+PNAME(mout_cpucl0_cluster_p) = { "oscclk", "mout_cpucl0_cluster_user",
+ "mout_cpucl0_switch_user"};
+PNAME(mout_cpucl0_core_p) = { "oscclk", "mout_pll_cpucl0",
+ "mout_cpucl0_switch_user"};
+
+static const struct samsung_pll_rate_table cpu_pll_rates[] __initconst = {
+ PLL_35XX_RATE(38400000U, 2400000000U, 250, 4, 0),
+ PLL_35XX_RATE(38400000U, 2304000000U, 240, 4, 0),
+ PLL_35XX_RATE(38400000U, 2208000000U, 230, 4, 0),
+ PLL_35XX_RATE(38400000U, 2112000000U, 220, 4, 0),
+ PLL_35XX_RATE(38400000U, 2016000000U, 210, 4, 0),
+ PLL_35XX_RATE(38400000U, 1824000000U, 190, 4, 0),
+ PLL_35XX_RATE(38400000U, 1680000000U, 175, 4, 0),
+ PLL_35XX_RATE(38400000U, 1344000000U, 140, 4, 0),
+ PLL_35XX_RATE(38400000U, 1152000000U, 120, 4, 0),
+ PLL_35XX_RATE(38400000U, 576000000U, 120, 4, 1),
+ PLL_35XX_RATE(38400000U, 288000000U, 120, 4, 2),
+};
+
+static const struct samsung_pll_clock cpucl0_pll_clks[] __initconst = {
+ /* CMU_CPUCL0_PURECLKCOMP */
+ PLL(pll_531x, CLK_FOUT_CPUCL0_PLL, "fout_cpucl0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL0, PLL_CON3_PLL_CPUCL0, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_CPUCL0, "mout_pll_cpucl0", mout_pll_cpucl0_p,
+ PLL_CON0_PLL_CPUCL0, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_CLUSTER_USER, "mout_cpucl0_cluster_user", mout_cpucl0_cluster_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_DBG_USER, "mout_cpucl0_dbg_user", mout_cpucl0_dbg_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_SWITCH_USER, "mout_cpucl0_switch_user", mout_cpucl0_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL0_CLUSTER, "mout_cpucl0_cluster", mout_cpucl0_cluster_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER, 0, 2),
+ MUX(CLK_MOUT_CPUCL0_CORE, "mout_cpucl0_core", mout_cpucl0_core_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL0_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cpucl0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLUSTER0_ACLK, "dout_cluster0_aclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_ATCLK, "dout_cluster0_atclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_MPCLK, "dout_cluster0_mpclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_PCLK, "dout_cluster0_pclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER0_PERIPHCLK, "dout_cluster0_periphclk",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL0_DBG_NOC, "dout_cpucl0_dbg_noc",
+ "mout_cpucl0_dbg_user", CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC, 0, 3),
+ DIV(CLK_DOUT_CPUCL0_DBG_PCLKDBG, "dout_cpucl0_dbg_pclkdbg",
+ "mout_cpucl0_dbg_user", CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG, 0, 3),
+ DIV(CLK_DOUT_CPUCL0_NOCP, "dout_cpucl0_nocp",
+ "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP, 0, 4),
+};
+
+static const struct samsung_cmu_info cpucl0_cmu_info __initconst = {
+ .pll_clks = cpucl0_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl0_pll_clks),
+ .mux_clks = cpucl0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl0_mux_clks),
+ .div_clks = cpucl0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl0_div_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL0,
+ .clk_regs = cpucl0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl0_clk_regs),
+ .clk_name = "cpucl0",
+};
+
+static void __init exynosautov920_cmu_cpucl0_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl0_cmu_info);
+}
+
+/* Register CMU_CPUCL0 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynosautov920_cmu_cpucl0, "samsung,exynosautov920-cmu-cpucl0",
+ exynosautov920_cmu_cpucl0_init);
+
+/* ---- CMU_CPUCL1 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL1 (0x1ED00000) */
+#define PLL_LOCKTIME_PLL_CPUCL1 0x0000
+#define PLL_CON0_PLL_CPUCL1 0x0100
+#define PLL_CON1_PLL_CPUCL1 0x0104
+#define PLL_CON3_PLL_CPUCL1 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER 0x0610
+
+#define CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER 0x1000
+#define CLK_CON_MUX_MUX_CLK_CPUCL1_CORE 0x1004
+
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK 0x1804
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP 0x181c
+
+static const unsigned long cpucl1_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL1,
+ PLL_CON0_PLL_CPUCL1,
+ PLL_CON1_PLL_CPUCL1,
+ PLL_CON3_PLL_CPUCL1,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CORE,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL1 */
+PNAME(mout_pll_cpucl1_p) = { "oscclk", "fout_cpucl1_pll" };
+PNAME(mout_cpucl1_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl1_cluster" };
+PNAME(mout_cpucl1_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl1_switch" };
+PNAME(mout_cpucl1_cluster_p) = { "oscclk", "mout_cpucl1_cluster_user",
+ "mout_cpucl1_switch_user"};
+PNAME(mout_cpucl1_core_p) = { "oscclk", "mout_pll_cpucl1",
+ "mout_cpucl1_switch_user"};
+
+static const struct samsung_pll_clock cpucl1_pll_clks[] __initconst = {
+ /* CMU_CPUCL1_PURECLKCOMP */
+ PLL(pll_531x, CLK_FOUT_CPUCL1_PLL, "fout_cpucl1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL1, PLL_CON3_PLL_CPUCL1, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_CPUCL1, "mout_pll_cpucl1", mout_pll_cpucl1_p,
+ PLL_CON0_PLL_CPUCL1, 4, 1),
+ MUX(CLK_MOUT_CPUCL1_CLUSTER_USER, "mout_cpucl1_cluster_user", mout_cpucl1_cluster_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL1_SWITCH_USER, "mout_cpucl1_switch_user", mout_cpucl1_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL1_CLUSTER, "mout_cpucl1_cluster", mout_cpucl1_cluster_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER, 0, 2),
+ MUX(CLK_MOUT_CPUCL1_CORE, "mout_cpucl1_core", mout_cpucl1_core_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL1_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cpucl1_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLUSTER1_ACLK, "dout_cluster1_aclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_ATCLK, "dout_cluster1_atclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_MPCLK, "dout_cluster1_mpclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_PCLK, "dout_cluster1_pclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER1_PERIPHCLK, "dout_cluster1_periphclk",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL1_NOCP, "dout_cpucl1_nocp",
+ "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP, 0, 4),
+};
+
+static const struct samsung_cmu_info cpucl1_cmu_info __initconst = {
+ .pll_clks = cpucl1_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl1_pll_clks),
+ .mux_clks = cpucl1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl1_mux_clks),
+ .div_clks = cpucl1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl1_div_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL1,
+ .clk_regs = cpucl1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl1_clk_regs),
+ .clk_name = "cpucl1",
+};
+
+static void __init exynosautov920_cmu_cpucl1_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl1_cmu_info);
+}
+
+/* Register CMU_CPUCL1 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynosautov920_cmu_cpucl1, "samsung,exynosautov920-cmu-cpucl1",
+ exynosautov920_cmu_cpucl1_init);
+
+/* ---- CMU_CPUCL2 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_CPUCL2 (0x1EE00000) */
+#define PLL_LOCKTIME_PLL_CPUCL2 0x0000
+#define PLL_CON0_PLL_CPUCL2 0x0100
+#define PLL_CON1_PLL_CPUCL2 0x0104
+#define PLL_CON3_PLL_CPUCL2 0x010c
+#define PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER 0x0610
+
+#define CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER 0x1000
+#define CLK_CON_MUX_MUX_CLK_CPUCL2_CORE 0x1004
+
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK 0x1804
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK 0x1808
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK 0x180c
+#define CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK 0x1810
+#define CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP 0x181c
+
+static const unsigned long cpucl2_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL2,
+ PLL_CON0_PLL_CPUCL2,
+ PLL_CON1_PLL_CPUCL2,
+ PLL_CON3_PLL_CPUCL2,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CORE,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK,
+ CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK,
+ CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_CPUCL2 */
+PNAME(mout_pll_cpucl2_p) = { "oscclk", "fout_cpucl2_pll" };
+PNAME(mout_cpucl2_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl2_cluster" };
+PNAME(mout_cpucl2_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl2_switch" };
+PNAME(mout_cpucl2_cluster_p) = { "oscclk", "mout_cpucl2_cluster_user",
+ "mout_cpucl2_switch_user"};
+PNAME(mout_cpucl2_core_p) = { "oscclk", "mout_pll_cpucl2",
+ "mout_cpucl2_switch_user"};
+
+static const struct samsung_pll_clock cpucl2_pll_clks[] __initconst = {
+ /* CMU_CPUCL2_PURECLKCOMP */
+ PLL(pll_531x, CLK_FOUT_CPUCL2_PLL, "fout_cpucl2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_CPUCL2, PLL_CON3_PLL_CPUCL2, cpu_pll_rates),
+};
+
+static const struct samsung_mux_clock cpucl2_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_CPUCL2, "mout_pll_cpucl2", mout_pll_cpucl2_p,
+ PLL_CON0_PLL_CPUCL2, 4, 1),
+ MUX(CLK_MOUT_CPUCL2_CLUSTER_USER, "mout_cpucl2_cluster_user", mout_cpucl2_cluster_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL2_SWITCH_USER, "mout_cpucl2_switch_user", mout_cpucl2_switch_user_p,
+ PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER, 4, 1),
+ MUX(CLK_MOUT_CPUCL2_CLUSTER, "mout_cpucl2_cluster", mout_cpucl2_cluster_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER, 0, 2),
+ MUX(CLK_MOUT_CPUCL2_CORE, "mout_cpucl2_core", mout_cpucl2_core_p,
+ CLK_CON_MUX_MUX_CLK_CPUCL2_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cpucl2_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLUSTER2_ACLK, "dout_cluster2_aclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_ATCLK, "dout_cluster2_atclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_MPCLK, "dout_cluster2_mpclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_PCLK, "dout_cluster2_pclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK, 0, 4),
+ DIV(CLK_DOUT_CLUSTER2_PERIPHCLK, "dout_cluster2_periphclk",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL2_NOCP, "dout_cpucl2_nocp",
+ "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP, 0, 4),
+};
+
+static const struct samsung_cmu_info cpucl2_cmu_info __initconst = {
+ .pll_clks = cpucl2_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cpucl2_pll_clks),
+ .mux_clks = cpucl2_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cpucl2_mux_clks),
+ .div_clks = cpucl2_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cpucl2_div_clks),
+ .nr_clk_ids = CLKS_NR_CPUCL2,
+ .clk_regs = cpucl2_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cpucl2_clk_regs),
+ .clk_name = "cpucl2",
+};
+
+static void __init exynosautov920_cmu_cpucl2_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &cpucl2_cmu_info);
+}
+
+/* Register CMU_CPUCL2 early, as CPU clocks should be available ASAP */
+CLK_OF_DECLARE(exynosautov920_cmu_cpucl2, "samsung,exynosautov920-cmu-cpucl2",
+ exynosautov920_cmu_cpucl2_init);
+
/* ---- CMU_PERIC0 --------------------------------------------------------- */
/* Register Offset definitions for CMU_PERIC0 (0x10800000) */
@@ -1393,7 +1729,7 @@ static const unsigned long hsi1_clk_regs[] __initconst = {
/* List of parent clocks for Muxes in CMU_HSI1 */
PNAME(mout_hsi1_mmc_card_user_p) = {"oscclk", "dout_clkcmu_hsi1_mmc_card"};
PNAME(mout_hsi1_noc_user_p) = { "oscclk", "dout_clkcmu_hsi1_noc" };
-PNAME(mout_hsi1_usbdrd_user_p) = { "oscclk", "mout_clkcmu_hsi1_usbdrd" };
+PNAME(mout_hsi1_usbdrd_user_p) = { "oscclk", "dout_clkcmu_hsi1_usbdrd" };
PNAME(mout_hsi1_usbdrd_p) = { "dout_tcxo_div2", "mout_hsi1_usbdrd_user" };
static const struct samsung_mux_clock hsi1_mux_clks[] __initconst = {
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 1d82737befd3..a88c212bda12 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -83,9 +83,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
- unsigned long mdiv;
- unsigned long refdiv;
- unsigned long reg;
+ u32 mdiv;
+ u32 refdiv;
+ u32 reg;
unsigned long long vco_freq;
/* read VCO1 reg for numerator and denominator */
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index 9dcc1b2d2cc0..03a96139a576 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -39,9 +39,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
- unsigned long divf, divq, reg;
+ u32 divf, divq, reg;
unsigned long long vco_freq;
- unsigned long bypass;
+ u32 bypass;
reg = readl(socfpgaclk->hw.reg);
bypass = readl(clk_mgr_base_addr + CLKMGR_BYPASS);
diff --git a/drivers/clk/sophgo/Kconfig b/drivers/clk/sophgo/Kconfig
index 8b1367e3a95e..e14e802f28bf 100644
--- a/drivers/clk/sophgo/Kconfig
+++ b/drivers/clk/sophgo/Kconfig
@@ -37,3 +37,22 @@ config CLK_SOPHGO_SG2042_RPGATE
This clock IP depends on SG2042 Clock Generator because it uses
clock from Clock Generator IP as input.
This driver provides Gate function for RP.
+
+config CLK_SOPHGO_SG2044
+ tristate "Sophgo SG2044 clock controller support"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ This driver supports the clock controller on the Sophgo SG2044
+ SoC. This controller requires mulitple PLL clock as input.
+ This clock control provides PLL clocks and common clock function
+ for various IPs on the SoC.
+
+config CLK_SOPHGO_SG2044_PLL
+ tristate "Sophgo SG2044 PLL clock controller support"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ select MFD_SYSCON
+ select REGMAP_MMIO
+ help
+ This driver supports the PLL clock controller on the Sophgo
+ SG2044 SoC. This controller requires 25M oscillator as input.
+ This clock control provides PLL clocks on the SoC.
diff --git a/drivers/clk/sophgo/Makefile b/drivers/clk/sophgo/Makefile
index 53506845a044..26b2fd121582 100644
--- a/drivers/clk/sophgo/Makefile
+++ b/drivers/clk/sophgo/Makefile
@@ -9,3 +9,5 @@ clk-sophgo-cv1800-y += clk-cv18xx-pll.o
obj-$(CONFIG_CLK_SOPHGO_SG2042_CLKGEN) += clk-sg2042-clkgen.o
obj-$(CONFIG_CLK_SOPHGO_SG2042_PLL) += clk-sg2042-pll.o
obj-$(CONFIG_CLK_SOPHGO_SG2042_RPGATE) += clk-sg2042-rpgate.o
+obj-$(CONFIG_CLK_SOPHGO_SG2044) += clk-sg2044.o
+obj-$(CONFIG_CLK_SOPHGO_SG2044_PLL) += clk-sg2044-pll.o
diff --git a/drivers/clk/sophgo/clk-cv1800.c b/drivers/clk/sophgo/clk-cv1800.c
index e0c4dc347579..a4116ac1adcb 100644
--- a/drivers/clk/sophgo/clk-cv1800.c
+++ b/drivers/clk/sophgo/clk-cv1800.c
@@ -1519,7 +1519,9 @@ static int cv1800_clk_probe(struct platform_device *pdev)
static const struct of_device_id cv1800_clk_ids[] = {
{ .compatible = "sophgo,cv1800-clk", .data = &cv1800_desc },
+ { .compatible = "sophgo,cv1800b-clk", .data = &cv1800_desc },
{ .compatible = "sophgo,cv1810-clk", .data = &cv1810_desc },
+ { .compatible = "sophgo,cv1812h-clk", .data = &cv1810_desc },
{ .compatible = "sophgo,sg2000-clk", .data = &sg2000_desc },
{ }
};
diff --git a/drivers/clk/sophgo/clk-sg2044-pll.c b/drivers/clk/sophgo/clk-sg2044-pll.c
new file mode 100644
index 000000000000..94c0f519ba6d
--- /dev/null
+++ b/drivers/clk/sophgo/clk-sg2044-pll.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2044 PLL clock controller driver
+ *
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/sophgo,sg2044-pll.h>
+
+/* Low Control part */
+#define PLL_VCOSEL_MASK GENMASK(17, 16)
+
+/* High Control part */
+#define PLL_FBDIV_MASK GENMASK(11, 0)
+#define PLL_REFDIV_MASK GENMASK(17, 12)
+#define PLL_POSTDIV1_MASK GENMASK(20, 18)
+#define PLL_POSTDIV2_MASK GENMASK(23, 21)
+
+#define PLL_CALIBRATE_EN BIT(24)
+#define PLL_CALIBRATE_MASK GENMASK(29, 27)
+#define PLL_CALIBRATE_DEFAULT FIELD_PREP(PLL_CALIBRATE_MASK, 2)
+#define PLL_UPDATE_EN BIT(30)
+
+#define PLL_HIGH_CTRL_MASK \
+ (PLL_FBDIV_MASK | PLL_REFDIV_MASK | \
+ PLL_POSTDIV1_MASK | PLL_POSTDIV2_MASK | \
+ PLL_CALIBRATE_EN | PLL_CALIBRATE_MASK | \
+ PLL_UPDATE_EN)
+
+#define PLL_HIGH_CTRL_OFFSET 4
+
+#define PLL_VCOSEL_1G6 0x2
+#define PLL_VCOSEL_2G4 0x3
+
+#define PLL_LIMIT_FOUTVCO 0
+#define PLL_LIMIT_FOUT 1
+#define PLL_LIMIT_REFDIV 2
+#define PLL_LIMIT_FBDIV 3
+#define PLL_LIMIT_POSTDIV1 4
+#define PLL_LIMIT_POSTDIV2 5
+
+#define for_each_pll_limit_range(_var, _limit) \
+ for (_var = (_limit)->min; _var <= (_limit)->max; _var++)
+
+struct sg2044_pll_limit {
+ u64 min;
+ u64 max;
+};
+
+struct sg2044_pll_internal {
+ u32 ctrl_offset;
+ u32 status_offset;
+ u32 enable_offset;
+
+ u8 status_lock_bit;
+ u8 status_updating_bit;
+ u8 enable_bit;
+
+ const struct sg2044_pll_limit *limits;
+};
+
+struct sg2044_clk_common {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ spinlock_t *lock;
+ unsigned int id;
+};
+
+struct sg2044_pll {
+ struct sg2044_clk_common common;
+ struct sg2044_pll_internal pll;
+ unsigned int syscon_offset;
+};
+
+struct sg2044_pll_desc_data {
+ struct sg2044_clk_common * const *pll;
+ u16 num_pll;
+};
+
+#define SG2044_SYSCON_PLL_OFFSET 0x98
+
+struct sg2044_pll_ctrl {
+ spinlock_t lock;
+ struct clk_hw_onecell_data data;
+};
+
+#define hw_to_sg2044_clk_common(_hw) \
+ container_of((_hw), struct sg2044_clk_common, hw)
+
+static inline bool sg2044_clk_fit_limit(u64 value,
+ const struct sg2044_pll_limit *limit)
+{
+ return value >= limit->min && value <= limit->max;
+}
+
+static inline struct sg2044_pll *hw_to_sg2044_pll(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_pll, common);
+}
+
+static unsigned long sg2044_pll_calc_vco_rate(unsigned long parent_rate,
+ unsigned long refdiv,
+ unsigned long fbdiv)
+{
+ u64 numerator = parent_rate * fbdiv;
+
+ return div64_ul(numerator, refdiv);
+}
+
+static unsigned long sg2044_pll_calc_rate(unsigned long parent_rate,
+ unsigned long refdiv,
+ unsigned long fbdiv,
+ unsigned long postdiv1,
+ unsigned long postdiv2)
+{
+ u64 numerator, denominator;
+
+ numerator = parent_rate * fbdiv;
+ denominator = refdiv * (postdiv1 + 1) * (postdiv2 + 1);
+
+ return div64_u64(numerator, denominator);
+}
+
+static unsigned long sg2044_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sg2044_pll *pll = hw_to_sg2044_pll(hw);
+ u32 value;
+ int ret;
+
+ ret = regmap_read(pll->common.regmap,
+ pll->syscon_offset + pll->pll.ctrl_offset + PLL_HIGH_CTRL_OFFSET,
+ &value);
+ if (ret < 0)
+ return 0;
+
+ return sg2044_pll_calc_rate(parent_rate,
+ FIELD_GET(PLL_REFDIV_MASK, value),
+ FIELD_GET(PLL_FBDIV_MASK, value),
+ FIELD_GET(PLL_POSTDIV1_MASK, value),
+ FIELD_GET(PLL_POSTDIV2_MASK, value));
+}
+
+static bool pll_is_better_rate(unsigned long target, unsigned long now,
+ unsigned long best)
+{
+ return abs_diff(target, now) < abs_diff(target, best);
+}
+
+static int sg2042_pll_compute_postdiv(const struct sg2044_pll_limit *limits,
+ unsigned long target,
+ unsigned long parent_rate,
+ unsigned int refdiv,
+ unsigned int fbdiv,
+ unsigned int *postdiv1,
+ unsigned int *postdiv2)
+{
+ unsigned int div1, div2;
+ unsigned long tmp, best_rate = 0;
+ unsigned int best_div1 = 0, best_div2 = 0;
+
+ for_each_pll_limit_range(div2, &limits[PLL_LIMIT_POSTDIV2]) {
+ for_each_pll_limit_range(div1, &limits[PLL_LIMIT_POSTDIV1]) {
+ tmp = sg2044_pll_calc_rate(parent_rate,
+ refdiv, fbdiv,
+ div1, div2);
+
+ if (tmp > target)
+ continue;
+
+ if (pll_is_better_rate(target, tmp, best_rate)) {
+ best_div1 = div1;
+ best_div2 = div2;
+ best_rate = tmp;
+
+ if (tmp == target)
+ goto find;
+ }
+ }
+ }
+
+find:
+ if (best_rate) {
+ *postdiv1 = best_div1;
+ *postdiv2 = best_div2;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int sg2044_compute_pll_setting(const struct sg2044_pll_limit *limits,
+ unsigned long req_rate,
+ unsigned long parent_rate,
+ unsigned int *value)
+{
+ unsigned int refdiv, fbdiv, postdiv1, postdiv2;
+ unsigned int best_refdiv, best_fbdiv, best_postdiv1, best_postdiv2;
+ unsigned long tmp, best_rate = 0;
+ int ret;
+
+ for_each_pll_limit_range(fbdiv, &limits[PLL_LIMIT_FBDIV]) {
+ for_each_pll_limit_range(refdiv, &limits[PLL_LIMIT_REFDIV]) {
+ u64 vco = sg2044_pll_calc_vco_rate(parent_rate,
+ refdiv, fbdiv);
+ if (!sg2044_clk_fit_limit(vco, &limits[PLL_LIMIT_FOUTVCO]))
+ continue;
+
+ ret = sg2042_pll_compute_postdiv(limits,
+ req_rate, parent_rate,
+ refdiv, fbdiv,
+ &postdiv1, &postdiv2);
+ if (ret)
+ continue;
+
+ tmp = sg2044_pll_calc_rate(parent_rate,
+ refdiv, fbdiv,
+ postdiv1, postdiv2);
+
+ if (pll_is_better_rate(req_rate, tmp, best_rate)) {
+ best_refdiv = refdiv;
+ best_fbdiv = fbdiv;
+ best_postdiv1 = postdiv1;
+ best_postdiv2 = postdiv2;
+ best_rate = tmp;
+
+ if (tmp == req_rate)
+ goto find;
+ }
+ }
+ }
+
+find:
+ if (best_rate) {
+ *value = FIELD_PREP(PLL_REFDIV_MASK, best_refdiv) |
+ FIELD_PREP(PLL_FBDIV_MASK, best_fbdiv) |
+ FIELD_PREP(PLL_POSTDIV1_MASK, best_postdiv1) |
+ FIELD_PREP(PLL_POSTDIV2_MASK, best_postdiv2);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int sg2044_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct sg2044_pll *pll = hw_to_sg2044_pll(hw);
+ unsigned int value;
+ u64 target;
+ int ret;
+
+ target = clamp(req->rate, pll->pll.limits[PLL_LIMIT_FOUT].min,
+ pll->pll.limits[PLL_LIMIT_FOUT].max);
+
+ ret = sg2044_compute_pll_setting(pll->pll.limits, target,
+ req->best_parent_rate, &value);
+ if (ret < 0)
+ return ret;
+
+ req->rate = sg2044_pll_calc_rate(req->best_parent_rate,
+ FIELD_GET(PLL_REFDIV_MASK, value),
+ FIELD_GET(PLL_FBDIV_MASK, value),
+ FIELD_GET(PLL_POSTDIV1_MASK, value),
+ FIELD_GET(PLL_POSTDIV2_MASK, value));
+
+ return 0;
+}
+
+static int sg2044_pll_poll_update(struct sg2044_pll *pll)
+{
+ int ret;
+ unsigned int value;
+
+ ret = regmap_read_poll_timeout_atomic(pll->common.regmap,
+ pll->syscon_offset + pll->pll.status_offset,
+ value,
+ (value & BIT(pll->pll.status_lock_bit)),
+ 1, 100000);
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout_atomic(pll->common.regmap,
+ pll->syscon_offset + pll->pll.status_offset,
+ value,
+ (!(value & BIT(pll->pll.status_updating_bit))),
+ 1, 100000);
+}
+
+static int sg2044_pll_enable(struct sg2044_pll *pll, bool en)
+{
+ if (en) {
+ if (sg2044_pll_poll_update(pll) < 0)
+ pr_warn("%s: fail to lock pll\n", clk_hw_get_name(&pll->common.hw));
+
+ return regmap_set_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.enable_offset,
+ BIT(pll->pll.enable_bit));
+ }
+
+ return regmap_clear_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.enable_offset,
+ BIT(pll->pll.enable_bit));
+}
+
+static int sg2044_pll_update_vcosel(struct sg2044_pll *pll, u64 rate)
+{
+ unsigned int sel;
+
+ if (rate < U64_C(2400000000))
+ sel = PLL_VCOSEL_1G6;
+ else
+ sel = PLL_VCOSEL_2G4;
+
+ return regmap_write_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.ctrl_offset,
+ PLL_VCOSEL_MASK,
+ FIELD_PREP(PLL_VCOSEL_MASK, sel));
+}
+
+static int sg2044_pll_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct sg2044_pll *pll = hw_to_sg2044_pll(hw);
+ unsigned int value;
+ u64 vco;
+ int ret;
+
+ ret = sg2044_compute_pll_setting(pll->pll.limits, rate,
+ parent_rate, &value);
+ if (ret < 0)
+ return ret;
+
+ vco = sg2044_pll_calc_vco_rate(parent_rate,
+ FIELD_GET(PLL_REFDIV_MASK, value),
+ FIELD_GET(PLL_FBDIV_MASK, value));
+
+ value |= PLL_CALIBRATE_EN;
+ value |= PLL_CALIBRATE_DEFAULT;
+ value |= PLL_UPDATE_EN;
+
+ guard(spinlock_irqsave)(pll->common.lock);
+
+ ret = sg2044_pll_enable(pll, false);
+ if (ret)
+ return ret;
+
+ sg2044_pll_update_vcosel(pll, vco);
+
+ regmap_write_bits(pll->common.regmap,
+ pll->syscon_offset + pll->pll.ctrl_offset +
+ PLL_HIGH_CTRL_OFFSET,
+ PLL_HIGH_CTRL_MASK, value);
+
+ sg2044_pll_enable(pll, true);
+
+ return ret;
+}
+
+static const struct clk_ops sg2044_pll_ops = {
+ .recalc_rate = sg2044_pll_recalc_rate,
+ .determine_rate = sg2044_pll_determine_rate,
+ .set_rate = sg2044_pll_set_rate,
+};
+
+static const struct clk_ops sg2044_pll_ro_ops = {
+ .recalc_rate = sg2044_pll_recalc_rate,
+};
+
+#define SG2044_CLK_COMMON_PDATA(_id, _name, _parents, _op, _flags) \
+ { \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parents, \
+ _op, (_flags)), \
+ .id = (_id), \
+ }
+
+#define DEFINE_SG2044_PLL(_id, _name, _parent, _flags, \
+ _ctrl_offset, \
+ _status_offset, _status_lock_bit, \
+ _status_updating_bit, \
+ _enable_offset, _enable_bit, \
+ _limits) \
+ struct sg2044_pll _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_pll_ops, \
+ (_flags)), \
+ .pll = { \
+ .ctrl_offset = (_ctrl_offset), \
+ .status_offset = (_status_offset), \
+ .enable_offset = (_enable_offset), \
+ .status_lock_bit = (_status_lock_bit), \
+ .status_updating_bit = (_status_updating_bit), \
+ .enable_bit = (_enable_bit), \
+ .limits = (_limits), \
+ }, \
+ }
+
+#define DEFINE_SG2044_PLL_RO(_id, _name, _parent, _flags, \
+ _ctrl_offset, \
+ _status_offset, _status_lock_bit, \
+ _status_updating_bit, \
+ _enable_offset, _enable_bit, \
+ _limits) \
+ struct sg2044_pll _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_pll_ro_ops, \
+ (_flags)), \
+ .pll = { \
+ .ctrl_offset = (_ctrl_offset), \
+ .status_offset = (_status_offset), \
+ .enable_offset = (_enable_offset), \
+ .status_lock_bit = (_status_lock_bit), \
+ .status_updating_bit = (_status_updating_bit), \
+ .enable_bit = (_enable_bit), \
+ .limits = (_limits), \
+ }, \
+ }
+
+static const struct clk_parent_data osc_parents[] = {
+ { .index = 0 },
+};
+
+static const struct sg2044_pll_limit pll_limits[] = {
+ [PLL_LIMIT_FOUTVCO] = {
+ .min = U64_C(1600000000),
+ .max = U64_C(3200000000),
+ },
+ [PLL_LIMIT_FOUT] = {
+ .min = U64_C(25000),
+ .max = U64_C(3200000000),
+ },
+ [PLL_LIMIT_REFDIV] = {
+ .min = U64_C(1),
+ .max = U64_C(63),
+ },
+ [PLL_LIMIT_FBDIV] = {
+ .min = U64_C(8),
+ .max = U64_C(1066),
+ },
+ [PLL_LIMIT_POSTDIV1] = {
+ .min = U64_C(0),
+ .max = U64_C(7),
+ },
+ [PLL_LIMIT_POSTDIV2] = {
+ .min = U64_C(0),
+ .max = U64_C(7),
+ },
+};
+
+static DEFINE_SG2044_PLL_RO(CLK_FPLL0, clk_fpll0, osc_parents, CLK_IS_CRITICAL,
+ 0x58, 0x00, 22, 6,
+ 0x04, 6, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_FPLL1, clk_fpll1, osc_parents, CLK_IS_CRITICAL,
+ 0x60, 0x00, 23, 7,
+ 0x04, 7, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_FPLL2, clk_fpll2, osc_parents, CLK_IS_CRITICAL,
+ 0x20, 0x08, 16, 0,
+ 0x0c, 0, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL0, clk_dpll0, osc_parents, CLK_IS_CRITICAL,
+ 0x68, 0x00, 24, 8,
+ 0x04, 8, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL1, clk_dpll1, osc_parents, CLK_IS_CRITICAL,
+ 0x70, 0x00, 25, 9,
+ 0x04, 9, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL2, clk_dpll2, osc_parents, CLK_IS_CRITICAL,
+ 0x78, 0x00, 26, 10,
+ 0x04, 10, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL3, clk_dpll3, osc_parents, CLK_IS_CRITICAL,
+ 0x80, 0x00, 27, 11,
+ 0x04, 11, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL4, clk_dpll4, osc_parents, CLK_IS_CRITICAL,
+ 0x88, 0x00, 28, 12,
+ 0x04, 12, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL5, clk_dpll5, osc_parents, CLK_IS_CRITICAL,
+ 0x90, 0x00, 29, 13,
+ 0x04, 13, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL6, clk_dpll6, osc_parents, CLK_IS_CRITICAL,
+ 0x98, 0x00, 30, 14,
+ 0x04, 14, pll_limits);
+
+static DEFINE_SG2044_PLL_RO(CLK_DPLL7, clk_dpll7, osc_parents, CLK_IS_CRITICAL,
+ 0xa0, 0x00, 31, 15,
+ 0x04, 15, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL0, clk_mpll0, osc_parents, CLK_IS_CRITICAL,
+ 0x28, 0x00, 16, 0,
+ 0x04, 0, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL1, clk_mpll1, osc_parents, CLK_IS_CRITICAL,
+ 0x30, 0x00, 17, 1,
+ 0x04, 1, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL2, clk_mpll2, osc_parents, CLK_IS_CRITICAL,
+ 0x38, 0x00, 18, 2,
+ 0x04, 2, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL3, clk_mpll3, osc_parents, CLK_IS_CRITICAL,
+ 0x40, 0x00, 19, 3,
+ 0x04, 3, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL4, clk_mpll4, osc_parents, CLK_IS_CRITICAL,
+ 0x48, 0x00, 20, 4,
+ 0x04, 4, pll_limits);
+
+static DEFINE_SG2044_PLL(CLK_MPLL5, clk_mpll5, osc_parents, CLK_IS_CRITICAL,
+ 0x50, 0x00, 21, 5,
+ 0x04, 5, pll_limits);
+
+static struct sg2044_clk_common * const sg2044_pll_commons[] = {
+ &clk_fpll0.common,
+ &clk_fpll1.common,
+ &clk_fpll2.common,
+ &clk_dpll0.common,
+ &clk_dpll1.common,
+ &clk_dpll2.common,
+ &clk_dpll3.common,
+ &clk_dpll4.common,
+ &clk_dpll5.common,
+ &clk_dpll6.common,
+ &clk_dpll7.common,
+ &clk_mpll0.common,
+ &clk_mpll1.common,
+ &clk_mpll2.common,
+ &clk_mpll3.common,
+ &clk_mpll4.common,
+ &clk_mpll5.common,
+};
+
+static int sg2044_pll_init_ctrl(struct device *dev, struct regmap *regmap,
+ struct sg2044_pll_ctrl *ctrl,
+ const struct sg2044_pll_desc_data *desc)
+{
+ int ret, i;
+
+ spin_lock_init(&ctrl->lock);
+
+ for (i = 0; i < desc->num_pll; i++) {
+ struct sg2044_clk_common *common = desc->pll[i];
+ struct sg2044_pll *pll = hw_to_sg2044_pll(&common->hw);
+
+ common->lock = &ctrl->lock;
+ common->regmap = regmap;
+ pll->syscon_offset = SG2044_SYSCON_PLL_OFFSET;
+
+ ret = devm_clk_hw_register(dev, &common->hw);
+ if (ret)
+ return ret;
+
+ ctrl->data.hws[common->id] = &common->hw;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &ctrl->data);
+}
+
+static int sg2044_pll_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sg2044_pll_ctrl *ctrl;
+ const struct sg2044_pll_desc_data *desc;
+ struct regmap *regmap;
+
+ regmap = device_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "fail to get the regmap for PLL\n");
+
+ desc = (const struct sg2044_pll_desc_data *)platform_get_device_id(pdev)->driver_data;
+ if (!desc)
+ return dev_err_probe(dev, -EINVAL, "no match data for platform\n");
+
+ ctrl = devm_kzalloc(dev, struct_size(ctrl, data.hws, desc->num_pll), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->data.num = desc->num_pll;
+
+ return sg2044_pll_init_ctrl(dev, regmap, ctrl, desc);
+}
+
+static const struct sg2044_pll_desc_data sg2044_pll_desc_data = {
+ .pll = sg2044_pll_commons,
+ .num_pll = ARRAY_SIZE(sg2044_pll_commons),
+};
+
+static const struct platform_device_id sg2044_pll_match[] = {
+ { .name = "sg2044-pll",
+ .driver_data = (unsigned long)&sg2044_pll_desc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, sg2044_pll_match);
+
+static struct platform_driver sg2044_clk_driver = {
+ .probe = sg2044_pll_probe,
+ .driver = {
+ .name = "sg2044-pll",
+ },
+ .id_table = sg2044_pll_match,
+};
+module_platform_driver(sg2044_clk_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo SG2044 pll clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sophgo/clk-sg2044.c b/drivers/clk/sophgo/clk-sg2044.c
new file mode 100644
index 000000000000..f67f99c926b6
--- /dev/null
+++ b/drivers/clk/sophgo/clk-sg2044.c
@@ -0,0 +1,1812 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2044 clock controller driver
+ *
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/sophgo,sg2044-clk.h>
+
+#define DIV_ASSERT BIT(0)
+#define DIV_FACTOR_REG_SOURCE BIT(3)
+#define DIV_BRANCH_EN BIT(4)
+
+#define DIV_ASSERT_TIME 2
+
+struct sg2044_div_internal {
+ u32 offset;
+ u32 initval;
+ u8 shift;
+ u8 width;
+ u16 flags;
+};
+
+struct sg2044_mux_internal {
+ const u32 *table;
+ u32 offset;
+ u16 shift;
+ u16 flags;
+};
+
+struct sg2044_gate_internal {
+ u32 offset;
+ u16 shift;
+ u16 flags;
+};
+
+struct sg2044_clk_common {
+ struct clk_hw hw;
+ void __iomem *base;
+ spinlock_t *lock;
+ unsigned int id;
+};
+
+struct sg2044_div {
+ struct sg2044_clk_common common;
+ struct sg2044_div_internal div;
+};
+
+struct sg2044_mux {
+ struct sg2044_clk_common common;
+ struct sg2044_mux_internal mux;
+ struct notifier_block nb;
+ u8 saved_parent;
+};
+
+struct sg2044_gate {
+ struct sg2044_clk_common common;
+ struct sg2044_gate_internal gate;
+};
+
+struct sg2044_clk_ctrl {
+ spinlock_t lock;
+ struct clk_hw_onecell_data data;
+};
+
+struct sg2044_clk_desc_data {
+ struct sg2044_clk_common * const *pll;
+ struct sg2044_clk_common * const *div;
+ struct sg2044_clk_common * const *mux;
+ struct sg2044_clk_common * const *gate;
+ u16 num_pll;
+ u16 num_div;
+ u16 num_mux;
+ u16 num_gate;
+};
+
+#define hw_to_sg2044_clk_common(_hw) \
+ container_of((_hw), struct sg2044_clk_common, hw)
+
+static inline struct sg2044_div *hw_to_sg2044_div(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_div, common);
+}
+
+static u32 sg2044_div_get_reg_div(u32 reg, struct sg2044_div_internal *div)
+{
+ if ((reg & DIV_FACTOR_REG_SOURCE))
+ return (reg >> div->shift) & clk_div_mask(div->width);
+
+ return div->initval == 0 ? 1 : div->initval;
+}
+
+static unsigned long _sg2044_div_recalc_rate(struct sg2044_clk_common *common,
+ struct sg2044_div_internal *div,
+ unsigned long parent_rate)
+{
+ u32 reg = readl(common->base + div->offset);
+ u32 val = sg2044_div_get_reg_div(reg, div);
+
+ return divider_recalc_rate(&common->hw, parent_rate, val, NULL,
+ div->flags, div->width);
+}
+
+static unsigned long sg2044_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+
+ return _sg2044_div_recalc_rate(&div->common, &div->div,
+ parent_rate);
+}
+
+static int _sg2044_div_determine_rate(struct sg2044_clk_common *common,
+ struct sg2044_div_internal *div,
+ struct clk_rate_request *req)
+{
+ if (div->flags & CLK_DIVIDER_READ_ONLY) {
+ u32 reg = readl(common->base + div->offset);
+ u32 val = sg2044_div_get_reg_div(reg, div);
+
+ return divider_ro_determine_rate(&common->hw, req, NULL,
+ div->width, div->flags,
+ val);
+ }
+
+ return divider_determine_rate(&common->hw, req, NULL,
+ div->width, div->flags);
+}
+
+static int sg2044_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+
+ return _sg2044_div_determine_rate(&div->common, &div->div, req);
+}
+
+static void sg2044_div_set_reg_div(struct sg2044_clk_common *common,
+ struct sg2044_div_internal *div,
+ u32 value)
+{
+ void __iomem *addr = common->base + div->offset;
+ u32 reg;
+
+ reg = readl(addr);
+
+ /* assert */
+ reg &= ~DIV_ASSERT;
+ writel(reg, addr);
+
+ /* set value */
+ reg = readl(addr);
+ reg &= ~(clk_div_mask(div->width) << div->shift);
+ reg |= (value << div->shift) | DIV_FACTOR_REG_SOURCE;
+ writel(reg, addr);
+
+ /* de-assert */
+ reg |= DIV_ASSERT;
+ writel(reg, addr);
+}
+
+static int sg2044_div_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+ u32 value;
+
+ value = divider_get_val(rate, parent_rate, NULL,
+ div->div.width, div->div.flags);
+
+ guard(spinlock_irqsave)(div->common.lock);
+
+ sg2044_div_set_reg_div(&div->common, &div->div, value);
+
+ return 0;
+}
+
+static int sg2044_div_enable(struct clk_hw *hw)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+ void __iomem *addr = div->common.base + div->div.offset;
+ u32 value;
+
+ guard(spinlock_irqsave)(div->common.lock);
+
+ value = readl(addr);
+ value |= DIV_BRANCH_EN;
+ writel(value, addr);
+
+ return 0;
+}
+
+static void sg2044_div_disable(struct clk_hw *hw)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+ void __iomem *addr = div->common.base + div->div.offset;
+ u32 value;
+
+ guard(spinlock_irqsave)(div->common.lock);
+
+ value = readl(addr);
+ value &= ~DIV_BRANCH_EN;
+ writel(value, addr);
+}
+
+static int sg2044_div_is_enabled(struct clk_hw *hw)
+{
+ struct sg2044_div *div = hw_to_sg2044_div(hw);
+
+ return readl(div->common.base + div->div.offset) & DIV_BRANCH_EN;
+}
+
+static const struct clk_ops sg2044_gateable_div_ops = {
+ .enable = sg2044_div_enable,
+ .disable = sg2044_div_disable,
+ .is_enabled = sg2044_div_is_enabled,
+ .recalc_rate = sg2044_div_recalc_rate,
+ .determine_rate = sg2044_div_determine_rate,
+ .set_rate = sg2044_div_set_rate,
+};
+
+static const struct clk_ops sg2044_div_ops = {
+ .recalc_rate = sg2044_div_recalc_rate,
+ .determine_rate = sg2044_div_determine_rate,
+ .set_rate = sg2044_div_set_rate,
+};
+
+static const struct clk_ops sg2044_div_ro_ops = {
+ .recalc_rate = sg2044_div_recalc_rate,
+ .determine_rate = sg2044_div_determine_rate,
+};
+
+static inline struct sg2044_mux *hw_to_sg2044_mux(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_mux, common);
+}
+
+static inline struct sg2044_mux *nb_to_sg2044_mux(struct notifier_block *nb)
+{
+ return container_of(nb, struct sg2044_mux, nb);
+}
+
+static const u32 sg2044_mux_table[] = {0, 1};
+
+static int sg2044_mux_notifier_cb(struct notifier_block *nb,
+ unsigned long event,
+ void *data)
+{
+ struct sg2044_mux *mux = nb_to_sg2044_mux(nb);
+ const struct clk_ops *ops = &clk_mux_ops;
+ struct clk_notifier_data *ndata = data;
+ struct clk_hw *hw = __clk_get_hw(ndata->clk);
+ int ret = 0;
+
+ if (event == PRE_RATE_CHANGE) {
+ mux->saved_parent = ops->get_parent(hw);
+ if (mux->saved_parent)
+ ret = ops->set_parent(hw, 0);
+ } else if (event == POST_RATE_CHANGE) {
+ ret = ops->set_parent(hw, mux->saved_parent);
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static inline struct sg2044_gate *hw_to_sg2044_gate(struct clk_hw *hw)
+{
+ return container_of(hw_to_sg2044_clk_common(hw),
+ struct sg2044_gate, common);
+}
+
+#define SG2044_CLK_COMMON_PDATA(_id, _name, _parents, _op, _flags) \
+ { \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parents, \
+ _op, (_flags)), \
+ .id = (_id), \
+ }
+
+#define SG2044_CLK_COMMON_PHWS(_id, _name, _parents, _op, _flags) \
+ { \
+ .hw.init = CLK_HW_INIT_PARENTS_HW(_name, _parents, \
+ _op, (_flags)), \
+ .id = (_id), \
+ }
+
+#define DEFINE_SG2044_GATEABLE_DIV(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_gateable_div_ops,\
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_DIV(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PHWS(_id, #_name, _parent, \
+ &sg2044_div_ops, \
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_DIV_PDATA(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_div_ops, \
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_DIV_RO(_id, _name, _parent, _flags, \
+ _div_offset, _div_shift, _div_width, \
+ _div_flags, _div_initval) \
+ struct sg2044_div _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &sg2044_div_ro_ops, \
+ (_flags)), \
+ .div = { \
+ .offset = (_div_offset), \
+ .initval = (_div_initval), \
+ .shift = (_div_shift), \
+ .width = (_div_width), \
+ .flags = (_div_flags) | CLK_DIVIDER_READ_ONLY,\
+ }, \
+ }
+
+#define DEFINE_SG2044_MUX(_id, _name, _parent, _flags, \
+ _mux_offset, _mux_shift, \
+ _mux_table, _mux_flags) \
+ struct sg2044_mux _name = { \
+ .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \
+ &clk_mux_ops, (_flags)),\
+ .mux = { \
+ .table = (_mux_table), \
+ .offset = (_mux_offset), \
+ .shift = (_mux_shift), \
+ .flags = (_mux_flags), \
+ }, \
+ }
+
+#define DEFINE_SG2044_GATE(_id, _name, _parent, _flags, \
+ _gate_offset, _gate_shift, _gate_flags) \
+ struct sg2044_gate _name = { \
+ .common = SG2044_CLK_COMMON_PHWS(_id, #_name, _parent, \
+ &clk_gate_ops, (_flags)),\
+ .gate = { \
+ .offset = (_gate_offset), \
+ .shift = (_gate_shift), \
+ .flags = (_gate_flags), \
+ }, \
+ }
+
+static const struct clk_parent_data clk_fpll0_parent[] = {
+ { .fw_name = "fpll0" },
+};
+
+static const struct clk_parent_data clk_fpll1_parent[] = {
+ { .fw_name = "fpll1" },
+};
+
+static const struct clk_parent_data clk_fpll2_parent[] = {
+ { .fw_name = "fpll2" },
+};
+
+static const struct clk_parent_data clk_dpll0_parent[] = {
+ { .fw_name = "dpll0" },
+};
+
+static const struct clk_parent_data clk_dpll1_parent[] = {
+ { .fw_name = "dpll1" },
+};
+
+static const struct clk_parent_data clk_dpll2_parent[] = {
+ { .fw_name = "dpll2" },
+};
+
+static const struct clk_parent_data clk_dpll3_parent[] = {
+ { .fw_name = "dpll3" },
+};
+
+static const struct clk_parent_data clk_dpll4_parent[] = {
+ { .fw_name = "dpll4" },
+};
+
+static const struct clk_parent_data clk_dpll5_parent[] = {
+ { .fw_name = "dpll5" },
+};
+
+static const struct clk_parent_data clk_dpll6_parent[] = {
+ { .fw_name = "dpll6" },
+};
+
+static const struct clk_parent_data clk_dpll7_parent[] = {
+ { .fw_name = "dpll7" },
+};
+
+static const struct clk_parent_data clk_mpll0_parent[] = {
+ { .fw_name = "mpll0" },
+};
+
+static const struct clk_parent_data clk_mpll1_parent[] = {
+ { .fw_name = "mpll1" },
+};
+
+static const struct clk_parent_data clk_mpll2_parent[] = {
+ { .fw_name = "mpll2" },
+};
+
+static const struct clk_parent_data clk_mpll3_parent[] = {
+ { .fw_name = "mpll3" },
+};
+
+static const struct clk_parent_data clk_mpll4_parent[] = {
+ { .fw_name = "mpll4" },
+};
+
+static const struct clk_parent_data clk_mpll5_parent[] = {
+ { .fw_name = "mpll5" },
+};
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_AP_SYS_FIXED, clk_div_ap_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x044, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_AP_SYS_MAIN, clk_div_ap_sys_main,
+ clk_mpll0_parent, 0,
+ 0x040, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_RP_SYS_FIXED, clk_div_rp_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x050, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_RP_SYS_MAIN, clk_div_rp_sys_main,
+ clk_mpll1_parent, 0,
+ 0x04c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_TPU_SYS_FIXED, clk_div_tpu_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x058, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 2);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_TPU_SYS_MAIN, clk_div_tpu_sys_main,
+ clk_mpll2_parent, 0,
+ 0x054, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_NOC_SYS_FIXED, clk_div_noc_sys_fixed,
+ clk_fpll0_parent, 0,
+ 0x070, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_NOC_SYS_MAIN, clk_div_noc_sys_main,
+ clk_mpll3_parent, 0,
+ 0x06c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC0_FIXED, clk_div_vc_src0_fixed,
+ clk_fpll0_parent, 0,
+ 0x078, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 2);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC0_MAIN, clk_div_vc_src0_main,
+ clk_mpll4_parent, 0,
+ 0x074, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC1_FIXED, clk_div_vc_src1_fixed,
+ clk_fpll0_parent, 0,
+ 0x080, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 3);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC1_MAIN, clk_div_vc_src1_main,
+ clk_mpll5_parent, 0,
+ 0x07c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_CXP_MAC_FIXED, clk_div_cxp_mac_fixed,
+ clk_fpll0_parent, 0,
+ 0x088, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 2);
+
+static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_CXP_MAC_MAIN, clk_div_cxp_mac_main,
+ clk_fpll1_parent, 0,
+ 0x084, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_IS_CRITICAL,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR0_FIXED, clk_div_ddr0_fixed,
+ clk_fpll0_parent, 0,
+ 0x124, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR0_MAIN, clk_div_ddr0_main,
+ clk_dpll0_parent, 0,
+ 0x120, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR1_FIXED, clk_div_ddr1_fixed,
+ clk_fpll0_parent, 0,
+ 0x12c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR1_MAIN, clk_div_ddr1_main,
+ clk_dpll1_parent, 0,
+ 0x128, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR2_FIXED, clk_div_ddr2_fixed,
+ clk_fpll0_parent, 0,
+ 0x134, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR2_MAIN, clk_div_ddr2_main,
+ clk_dpll2_parent, 0,
+ 0x130, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR3_FIXED, clk_div_ddr3_fixed,
+ clk_fpll0_parent, 0,
+ 0x13c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR3_MAIN, clk_div_ddr3_main,
+ clk_dpll3_parent, 0,
+ 0x138, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR4_FIXED, clk_div_ddr4_fixed,
+ clk_fpll0_parent, 0,
+ 0x144, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR4_MAIN, clk_div_ddr4_main,
+ clk_dpll4_parent, 0,
+ 0x140, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR5_FIXED, clk_div_ddr5_fixed,
+ clk_fpll0_parent, 0,
+ 0x14c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR5_MAIN, clk_div_ddr5_main,
+ clk_dpll5_parent, 0,
+ 0x148, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR6_FIXED, clk_div_ddr6_fixed,
+ clk_fpll0_parent, 0,
+ 0x154, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR6_MAIN, clk_div_ddr6_main,
+ clk_dpll6_parent, 0,
+ 0x150, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR7_FIXED, clk_div_ddr7_fixed,
+ clk_fpll0_parent, 0,
+ 0x15c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR7_MAIN, clk_div_ddr7_main,
+ clk_dpll7_parent, 0,
+ 0x158, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TOP_50M, clk_div_top_50m,
+ clk_fpll0_parent, 0,
+ 0x048, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 40);
+
+static const struct clk_hw *clk_div_top_50m_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_DIV_RO(CLK_DIV_TOP_AXI0, clk_div_top_axi0,
+ clk_fpll0_parent, 0,
+ 0x118, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 20);
+
+static const struct clk_hw *clk_div_top_axi0_parent[] = {
+ &clk_div_top_axi0.common.hw,
+};
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TOP_AXI_HSPERI, clk_div_top_axi_hsperi,
+ clk_fpll0_parent, 0,
+ 0x11c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 8);
+
+static const struct clk_hw *clk_div_top_axi_hsperi_parent[] = {
+ &clk_div_top_axi_hsperi.common.hw,
+};
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER0, clk_div_timer0,
+ clk_div_top_50m_parent, 0,
+ 0x0d0, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER1, clk_div_timer1,
+ clk_div_top_50m_parent, 0,
+ 0x0d4, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER2, clk_div_timer2,
+ clk_div_top_50m_parent, 0,
+ 0x0d8, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER3, clk_div_timer3,
+ clk_div_top_50m_parent, 0,
+ 0x0dc, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER4, clk_div_timer4,
+ clk_div_top_50m_parent, 0,
+ 0x0e0, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER5, clk_div_timer5,
+ clk_div_top_50m_parent, 0,
+ 0x0e4, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER6, clk_div_timer6,
+ clk_div_top_50m_parent, 0,
+ 0x0e8, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV(CLK_DIV_TIMER7, clk_div_timer7,
+ clk_div_top_50m_parent, 0,
+ 0x0ec, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_CXP_TEST_PHY, clk_div_cxp_test_phy,
+ clk_fpll0_parent, 0,
+ 0x064, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_CXP_TEST_ETH_PHY, clk_div_cxp_test_eth_phy,
+ clk_fpll2_parent, 0,
+ 0x068, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_C2C0_TEST_PHY, clk_div_c2c0_test_phy,
+ clk_fpll0_parent, 0,
+ 0x05c, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_C2C1_TEST_PHY, clk_div_c2c1_test_phy,
+ clk_fpll0_parent, 0,
+ 0x060, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PCIE_1G, clk_div_pcie_1g,
+ clk_fpll1_parent, 0,
+ 0x160, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_UART_500M, clk_div_uart_500m,
+ clk_fpll0_parent, 0,
+ 0x0cc, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 4);
+
+static DEFINE_SG2044_DIV(CLK_DIV_GPIO_DB, clk_div_gpio_db,
+ clk_div_top_axi0_parent, 0,
+ 0x0f8, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1000);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_SD, clk_div_sd,
+ clk_fpll0_parent, 0,
+ 0x110, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 5);
+
+static DEFINE_SG2044_DIV(CLK_DIV_SD_100K, clk_div_sd_100k,
+ clk_div_top_axi0_parent, 0,
+ 0x114, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1000);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_EMMC, clk_div_emmc,
+ clk_fpll0_parent, 0,
+ 0x108, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 5);
+
+static DEFINE_SG2044_DIV(CLK_DIV_EMMC_100K, clk_div_emmc_100k,
+ clk_div_top_axi0_parent, 0,
+ 0x10c, 16, 16,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 1000);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_EFUSE, clk_div_efuse,
+ clk_fpll0_parent, 0,
+ 0x0f4, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 80);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TX_ETH0, clk_div_tx_eth0,
+ clk_fpll0_parent, 0,
+ 0x0fc, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 16);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PTP_REF_I_ETH0, clk_div_ptp_ref_i_eth0,
+ clk_fpll0_parent, 0,
+ 0x100, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 40);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_REF_ETH0, clk_div_ref_eth0,
+ clk_fpll0_parent, 0,
+ 0x104, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 80);
+
+static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PKA, clk_div_pka,
+ clk_fpll0_parent, 0,
+ 0x0f0, 16, 8,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ 2);
+
+static const struct clk_parent_data clk_mux_ddr0_parents[] = {
+ { .hw = &clk_div_ddr0_fixed.common.hw },
+ { .hw = &clk_div_ddr0_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR0, clk_mux_ddr0,
+ clk_mux_ddr0_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 7, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr1_parents[] = {
+ { .hw = &clk_div_ddr1_fixed.common.hw },
+ { .hw = &clk_div_ddr1_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR1, clk_mux_ddr1,
+ clk_mux_ddr1_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 8, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr2_parents[] = {
+ { .hw = &clk_div_ddr2_fixed.common.hw },
+ { .hw = &clk_div_ddr2_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR2, clk_mux_ddr2,
+ clk_mux_ddr2_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 9, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr3_parents[] = {
+ { .hw = &clk_div_ddr3_fixed.common.hw },
+ { .hw = &clk_div_ddr3_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR3, clk_mux_ddr3,
+ clk_mux_ddr3_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 10, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr4_parents[] = {
+ { .hw = &clk_div_ddr4_fixed.common.hw },
+ { .hw = &clk_div_ddr4_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR4, clk_mux_ddr4,
+ clk_mux_ddr4_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 11, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr5_parents[] = {
+ { .hw = &clk_div_ddr5_fixed.common.hw },
+ { .hw = &clk_div_ddr5_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR5, clk_mux_ddr5,
+ clk_mux_ddr5_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 12, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr6_parents[] = {
+ { .hw = &clk_div_ddr6_fixed.common.hw },
+ { .hw = &clk_div_ddr6_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR6, clk_mux_ddr6,
+ clk_mux_ddr6_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 13, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_ddr7_parents[] = {
+ { .hw = &clk_div_ddr7_fixed.common.hw },
+ { .hw = &clk_div_ddr7_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_DDR7, clk_mux_ddr7,
+ clk_mux_ddr7_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 14, sg2044_mux_table, CLK_MUX_READ_ONLY);
+
+static const struct clk_parent_data clk_mux_noc_sys_parents[] = {
+ { .hw = &clk_div_noc_sys_fixed.common.hw },
+ { .hw = &clk_div_noc_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_NOC_SYS, clk_mux_noc_sys,
+ clk_mux_noc_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 3, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_tpu_sys_parents[] = {
+ { .hw = &clk_div_tpu_sys_fixed.common.hw },
+ { .hw = &clk_div_tpu_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_TPU_SYS, clk_mux_tpu_sys,
+ clk_mux_tpu_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 2, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_rp_sys_parents[] = {
+ { .hw = &clk_div_rp_sys_fixed.common.hw },
+ { .hw = &clk_div_rp_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_RP_SYS, clk_mux_rp_sys,
+ clk_mux_rp_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 1, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_ap_sys_parents[] = {
+ { .hw = &clk_div_ap_sys_fixed.common.hw },
+ { .hw = &clk_div_ap_sys_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_AP_SYS, clk_mux_ap_sys,
+ clk_mux_ap_sys_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 0, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_vc_src0_parents[] = {
+ { .hw = &clk_div_vc_src0_fixed.common.hw },
+ { .hw = &clk_div_vc_src0_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_VC_SRC0, clk_mux_vc_src0,
+ clk_mux_vc_src0_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 4, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_vc_src1_parents[] = {
+ { .hw = &clk_div_vc_src1_fixed.common.hw },
+ { .hw = &clk_div_vc_src1_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_VC_SRC1, clk_mux_vc_src1,
+ clk_mux_vc_src1_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 5, sg2044_mux_table, 0);
+
+static const struct clk_parent_data clk_mux_cxp_mac_parents[] = {
+ { .hw = &clk_div_cxp_mac_fixed.common.hw },
+ { .hw = &clk_div_cxp_mac_main.common.hw },
+};
+
+static DEFINE_SG2044_MUX(CLK_MUX_CXP_MAC, clk_mux_cxp_mac,
+ clk_mux_cxp_mac_parents,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ 0x020, 6, sg2044_mux_table, 0);
+
+static const struct clk_hw *clk_gate_ap_sys_parent[] = {
+ &clk_mux_ap_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_AP_SYS, clk_gate_ap_sys,
+ clk_gate_ap_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 0, 0);
+
+static const struct clk_hw *clk_gate_rp_sys_parent[] = {
+ &clk_mux_rp_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_RP_SYS, clk_gate_rp_sys,
+ clk_gate_rp_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 2, 0);
+
+static const struct clk_hw *clk_gate_tpu_sys_parent[] = {
+ &clk_mux_tpu_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TPU_SYS, clk_gate_tpu_sys,
+ clk_gate_tpu_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 3, 0);
+
+static const struct clk_hw *clk_gate_noc_sys_parent[] = {
+ &clk_mux_noc_sys.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_NOC_SYS, clk_gate_noc_sys,
+ clk_gate_noc_sys_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 8, 0);
+
+static const struct clk_hw *clk_gate_vc_src0_parent[] = {
+ &clk_mux_vc_src0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_VC_SRC0, clk_gate_vc_src0,
+ clk_gate_vc_src0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 9, 0);
+
+static const struct clk_hw *clk_gate_vc_src1_parent[] = {
+ &clk_mux_vc_src1.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_VC_SRC1, clk_gate_vc_src1,
+ clk_gate_vc_src1_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 10, 0);
+
+static const struct clk_hw *clk_gate_ddr0_parent[] = {
+ &clk_mux_ddr0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR0, clk_gate_ddr0,
+ clk_gate_ddr0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 7, 0);
+
+static const struct clk_hw *clk_gate_ddr1_parent[] = {
+ &clk_mux_ddr1.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR1, clk_gate_ddr1,
+ clk_gate_ddr1_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 8, 0);
+
+static const struct clk_hw *clk_gate_ddr2_parent[] = {
+ &clk_mux_ddr2.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR2, clk_gate_ddr2,
+ clk_gate_ddr2_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 9, 0);
+
+static const struct clk_hw *clk_gate_ddr3_parent[] = {
+ &clk_mux_ddr3.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR3, clk_gate_ddr3,
+ clk_gate_ddr3_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 10, 0);
+
+static const struct clk_hw *clk_gate_ddr4_parent[] = {
+ &clk_mux_ddr4.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR4, clk_gate_ddr4,
+ clk_gate_ddr4_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 11, 0);
+
+static const struct clk_hw *clk_gate_ddr5_parent[] = {
+ &clk_mux_ddr5.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR5, clk_gate_ddr5,
+ clk_gate_ddr5_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 12, 0);
+
+static const struct clk_hw *clk_gate_ddr6_parent[] = {
+ &clk_mux_ddr6.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR6, clk_gate_ddr6,
+ clk_gate_ddr6_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 13, 0);
+
+static const struct clk_hw *clk_gate_ddr7_parent[] = {
+ &clk_mux_ddr7.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_DDR7, clk_gate_ddr7,
+ clk_gate_ddr7_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 14, 0);
+
+static const struct clk_hw *clk_gate_top_50m_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TOP_50M, clk_gate_top_50m,
+ clk_gate_top_50m_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 1, 0);
+
+static const struct clk_hw *clk_gate_sc_rx_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SC_RX, clk_gate_sc_rx,
+ clk_gate_sc_rx_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 12, 0);
+
+static const struct clk_hw *clk_gate_sc_rx_x0y1_parent[] = {
+ &clk_div_top_50m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SC_RX_X0Y1, clk_gate_sc_rx_x0y1,
+ clk_gate_sc_rx_x0y1_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 13, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_TOP_AXI0, clk_gate_top_axi0,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 5, 0);
+
+static const struct clk_hw *clk_gate_mailbox_intc_parent[] = {
+ &clk_gate_top_axi0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC0, clk_gate_intc0,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 20, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC1, clk_gate_intc1,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 21, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC2, clk_gate_intc2,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 22, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_INTC3, clk_gate_intc3,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 23, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX0, clk_gate_mailbox0,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 16, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX1, clk_gate_mailbox1,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 17, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX2, clk_gate_mailbox2,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 18, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX3, clk_gate_mailbox3,
+ clk_gate_mailbox_intc_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x020, 19, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_TOP_AXI_HSPERI, clk_gate_top_axi_hsperi,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x008, 6, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_TIMER, clk_gate_apb_timer,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 7, 0);
+
+static const struct clk_hw *clk_gate_timer0_parent[] = {
+ &clk_div_timer0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER0, clk_gate_timer0,
+ clk_gate_timer0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 8, 0);
+
+static const struct clk_hw *clk_gate_timer1_parent[] = {
+ &clk_div_timer1.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER1, clk_gate_timer1,
+ clk_gate_timer1_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 9, 0);
+
+static const struct clk_hw *clk_gate_timer2_parent[] = {
+ &clk_div_timer2.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER2, clk_gate_timer2,
+ clk_gate_timer2_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 10, 0);
+
+static const struct clk_hw *clk_gate_timer3_parent[] = {
+ &clk_div_timer3.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER3, clk_gate_timer3,
+ clk_gate_timer3_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 11, 0);
+
+static const struct clk_hw *clk_gate_timer4_parent[] = {
+ &clk_div_timer4.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER4, clk_gate_timer4,
+ clk_gate_timer4_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 12, 0);
+
+static const struct clk_hw *clk_gate_timer5_parent[] = {
+ &clk_div_timer5.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER5, clk_gate_timer5,
+ clk_gate_timer5_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 13, 0);
+
+static const struct clk_hw *clk_gate_timer6_parent[] = {
+ &clk_div_timer6.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER6, clk_gate_timer6,
+ clk_gate_timer6_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 14, 0);
+
+static const struct clk_hw *clk_gate_timer7_parent[] = {
+ &clk_div_timer7.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TIMER7, clk_gate_timer7,
+ clk_gate_timer7_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 15, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_CFG, clk_gate_cxp_cfg,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 15, 0);
+
+static const struct clk_hw *clk_gate_cxp_mac_parent[] = {
+ &clk_mux_cxp_mac.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_MAC, clk_gate_cxp_mac,
+ clk_gate_cxp_mac_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x000, 14, 0);
+
+static const struct clk_hw *clk_gate_cxp_test_phy_parent[] = {
+ &clk_div_cxp_test_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_TEST_PHY, clk_gate_cxp_test_phy,
+ clk_gate_cxp_test_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 6, 0);
+
+static const struct clk_hw *clk_gate_cxp_test_eth_phy_parent[] = {
+ &clk_div_cxp_test_eth_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_CXP_TEST_ETH_PHY, clk_gate_cxp_test_eth_phy,
+ clk_gate_cxp_test_eth_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 7, 0);
+
+static const struct clk_hw *clk_gate_pcie_1g_parent[] = {
+ &clk_div_pcie_1g.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_PCIE_1G, clk_gate_pcie_1g,
+ clk_gate_pcie_1g_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 15, 0);
+
+static const struct clk_hw *clk_gate_c2c0_test_phy_parent[] = {
+ &clk_div_c2c0_test_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_C2C0_TEST_PHY, clk_gate_c2c0_test_phy,
+ clk_gate_c2c0_test_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 4, 0);
+
+static const struct clk_hw *clk_gate_c2c1_test_phy_parent[] = {
+ &clk_div_c2c1_test_phy.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_C2C1_TEST_PHY, clk_gate_c2c1_test_phy,
+ clk_gate_c2c1_test_phy_parent,
+ CLK_SET_RATE_PARENT,
+ 0x000, 5, 0);
+
+static const struct clk_hw *clk_gate_uart_500m_parent[] = {
+ &clk_div_uart_500m.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_UART_500M, clk_gate_uart_500m,
+ clk_gate_uart_500m_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 1, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_UART, clk_gate_apb_uart,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 2, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_SPI, clk_gate_apb_spi,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 22, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AHB_SPIFMC, clk_gate_ahb_spifmc,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 5, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_I2C, clk_gate_apb_i2c,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 23, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_DBG_I2C, clk_gate_axi_dbg_i2c,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 3, 0);
+
+static const struct clk_hw *clk_gate_gpio_db_parent[] = {
+ &clk_div_gpio_db.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_GPIO_DB, clk_gate_gpio_db,
+ clk_gate_gpio_db_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 21, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_GPIO_INTR, clk_gate_apb_gpio_intr,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 20, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_GPIO, clk_gate_apb_gpio,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 19, 0);
+
+static const struct clk_hw *clk_gate_sd_parent[] = {
+ &clk_div_sd.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SD, clk_gate_sd,
+ clk_gate_sd_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 3, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_SD, clk_gate_axi_sd,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 2, 0);
+
+static const struct clk_hw *clk_gate_sd_100k_parent[] = {
+ &clk_div_sd_100k.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_SD_100K, clk_gate_sd_100k,
+ clk_gate_sd_100k_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 4, 0);
+
+static const struct clk_hw *clk_gate_emmc_parent[] = {
+ &clk_div_emmc.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_EMMC, clk_gate_emmc,
+ clk_gate_emmc_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 0, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_EMMC, clk_gate_axi_emmc,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 31, 0);
+
+static const struct clk_hw *clk_gate_emmc_100k_parent[] = {
+ &clk_div_emmc_100k.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_EMMC_100K, clk_gate_emmc_100k,
+ clk_gate_emmc_100k_parent,
+ CLK_SET_RATE_PARENT,
+ 0x008, 1, 0);
+
+static const struct clk_hw *clk_gate_efuse_parent[] = {
+ &clk_div_efuse.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_EFUSE, clk_gate_efuse,
+ clk_gate_efuse_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 17, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_EFUSE, clk_gate_apb_efuse,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 18, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_SYSDMA_AXI, clk_gate_sysdma_axi,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 0, 0);
+
+static const struct clk_hw *clk_gate_tx_eth0_parent[] = {
+ &clk_div_tx_eth0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_TX_ETH0, clk_gate_tx_eth0,
+ clk_gate_tx_eth0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 27, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_ETH0, clk_gate_axi_eth0,
+ clk_div_top_axi_hsperi_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 28, 0);
+
+static const struct clk_hw *clk_gate_ptp_ref_i_eth0_parent[] = {
+ &clk_div_ptp_ref_i_eth0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_PTP_REF_I_ETH0, clk_gate_ptp_ref_i_eth0,
+ clk_gate_ptp_ref_i_eth0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 29, 0);
+
+static const struct clk_hw *clk_gate_ref_eth0_parent[] = {
+ &clk_div_ref_eth0.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_REF_ETH0, clk_gate_ref_eth0,
+ clk_gate_ref_eth0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 30, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_RTC, clk_gate_apb_rtc,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 26, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_PWM, clk_gate_apb_pwm,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 25, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_APB_WDT, clk_gate_apb_wdt,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 24, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AXI_SRAM, clk_gate_axi_sram,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 6, 0);
+
+static DEFINE_SG2044_GATE(CLK_GATE_AHB_ROM, clk_gate_ahb_rom,
+ clk_div_top_axi0_parent,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 0x004, 4, 0);
+
+static const struct clk_hw *clk_gate_pka_parent[] = {
+ &clk_div_pka.common.hw,
+};
+
+static DEFINE_SG2044_GATE(CLK_GATE_PKA, clk_gate_pka,
+ clk_gate_pka_parent,
+ CLK_SET_RATE_PARENT,
+ 0x004, 16, 0);
+
+static struct sg2044_clk_common * const sg2044_div_commons[] = {
+ &clk_div_ap_sys_fixed.common,
+ &clk_div_ap_sys_main.common,
+ &clk_div_rp_sys_fixed.common,
+ &clk_div_rp_sys_main.common,
+ &clk_div_tpu_sys_fixed.common,
+ &clk_div_tpu_sys_main.common,
+ &clk_div_noc_sys_fixed.common,
+ &clk_div_noc_sys_main.common,
+ &clk_div_vc_src0_fixed.common,
+ &clk_div_vc_src0_main.common,
+ &clk_div_vc_src1_fixed.common,
+ &clk_div_vc_src1_main.common,
+ &clk_div_cxp_mac_fixed.common,
+ &clk_div_cxp_mac_main.common,
+ &clk_div_ddr0_fixed.common,
+ &clk_div_ddr0_main.common,
+ &clk_div_ddr1_fixed.common,
+ &clk_div_ddr1_main.common,
+ &clk_div_ddr2_fixed.common,
+ &clk_div_ddr2_main.common,
+ &clk_div_ddr3_fixed.common,
+ &clk_div_ddr3_main.common,
+ &clk_div_ddr4_fixed.common,
+ &clk_div_ddr4_main.common,
+ &clk_div_ddr5_fixed.common,
+ &clk_div_ddr5_main.common,
+ &clk_div_ddr6_fixed.common,
+ &clk_div_ddr6_main.common,
+ &clk_div_ddr7_fixed.common,
+ &clk_div_ddr7_main.common,
+ &clk_div_top_50m.common,
+ &clk_div_top_axi0.common,
+ &clk_div_top_axi_hsperi.common,
+ &clk_div_timer0.common,
+ &clk_div_timer1.common,
+ &clk_div_timer2.common,
+ &clk_div_timer3.common,
+ &clk_div_timer4.common,
+ &clk_div_timer5.common,
+ &clk_div_timer6.common,
+ &clk_div_timer7.common,
+ &clk_div_cxp_test_phy.common,
+ &clk_div_cxp_test_eth_phy.common,
+ &clk_div_c2c0_test_phy.common,
+ &clk_div_c2c1_test_phy.common,
+ &clk_div_pcie_1g.common,
+ &clk_div_uart_500m.common,
+ &clk_div_gpio_db.common,
+ &clk_div_sd.common,
+ &clk_div_sd_100k.common,
+ &clk_div_emmc.common,
+ &clk_div_emmc_100k.common,
+ &clk_div_efuse.common,
+ &clk_div_tx_eth0.common,
+ &clk_div_ptp_ref_i_eth0.common,
+ &clk_div_ref_eth0.common,
+ &clk_div_pka.common,
+};
+
+static struct sg2044_clk_common * const sg2044_mux_commons[] = {
+ &clk_mux_ddr0.common,
+ &clk_mux_ddr1.common,
+ &clk_mux_ddr2.common,
+ &clk_mux_ddr3.common,
+ &clk_mux_ddr4.common,
+ &clk_mux_ddr5.common,
+ &clk_mux_ddr6.common,
+ &clk_mux_ddr7.common,
+ &clk_mux_noc_sys.common,
+ &clk_mux_tpu_sys.common,
+ &clk_mux_rp_sys.common,
+ &clk_mux_ap_sys.common,
+ &clk_mux_vc_src0.common,
+ &clk_mux_vc_src1.common,
+ &clk_mux_cxp_mac.common,
+};
+
+static struct sg2044_clk_common * const sg2044_gate_commons[] = {
+ &clk_gate_ap_sys.common,
+ &clk_gate_rp_sys.common,
+ &clk_gate_tpu_sys.common,
+ &clk_gate_noc_sys.common,
+ &clk_gate_vc_src0.common,
+ &clk_gate_vc_src1.common,
+ &clk_gate_ddr0.common,
+ &clk_gate_ddr1.common,
+ &clk_gate_ddr2.common,
+ &clk_gate_ddr3.common,
+ &clk_gate_ddr4.common,
+ &clk_gate_ddr5.common,
+ &clk_gate_ddr6.common,
+ &clk_gate_ddr7.common,
+ &clk_gate_top_50m.common,
+ &clk_gate_sc_rx.common,
+ &clk_gate_sc_rx_x0y1.common,
+ &clk_gate_top_axi0.common,
+ &clk_gate_intc0.common,
+ &clk_gate_intc1.common,
+ &clk_gate_intc2.common,
+ &clk_gate_intc3.common,
+ &clk_gate_mailbox0.common,
+ &clk_gate_mailbox1.common,
+ &clk_gate_mailbox2.common,
+ &clk_gate_mailbox3.common,
+ &clk_gate_top_axi_hsperi.common,
+ &clk_gate_apb_timer.common,
+ &clk_gate_timer0.common,
+ &clk_gate_timer1.common,
+ &clk_gate_timer2.common,
+ &clk_gate_timer3.common,
+ &clk_gate_timer4.common,
+ &clk_gate_timer5.common,
+ &clk_gate_timer6.common,
+ &clk_gate_timer7.common,
+ &clk_gate_cxp_cfg.common,
+ &clk_gate_cxp_mac.common,
+ &clk_gate_cxp_test_phy.common,
+ &clk_gate_cxp_test_eth_phy.common,
+ &clk_gate_pcie_1g.common,
+ &clk_gate_c2c0_test_phy.common,
+ &clk_gate_c2c1_test_phy.common,
+ &clk_gate_uart_500m.common,
+ &clk_gate_apb_uart.common,
+ &clk_gate_apb_spi.common,
+ &clk_gate_ahb_spifmc.common,
+ &clk_gate_apb_i2c.common,
+ &clk_gate_axi_dbg_i2c.common,
+ &clk_gate_gpio_db.common,
+ &clk_gate_apb_gpio_intr.common,
+ &clk_gate_apb_gpio.common,
+ &clk_gate_sd.common,
+ &clk_gate_axi_sd.common,
+ &clk_gate_sd_100k.common,
+ &clk_gate_emmc.common,
+ &clk_gate_axi_emmc.common,
+ &clk_gate_emmc_100k.common,
+ &clk_gate_efuse.common,
+ &clk_gate_apb_efuse.common,
+ &clk_gate_sysdma_axi.common,
+ &clk_gate_tx_eth0.common,
+ &clk_gate_axi_eth0.common,
+ &clk_gate_ptp_ref_i_eth0.common,
+ &clk_gate_ref_eth0.common,
+ &clk_gate_apb_rtc.common,
+ &clk_gate_apb_pwm.common,
+ &clk_gate_apb_wdt.common,
+ &clk_gate_axi_sram.common,
+ &clk_gate_ahb_rom.common,
+ &clk_gate_pka.common,
+};
+
+static void sg2044_clk_fix_init_parent(struct clk_hw **pdata,
+ const struct clk_init_data *init,
+ struct clk_hw_onecell_data *data)
+{
+ u8 i;
+ const struct clk_hw *hw;
+ const struct sg2044_clk_common *common;
+
+ for (i = 0; i < init->num_parents; i++) {
+ hw = init->parent_hws[i];
+ common = hw_to_sg2044_clk_common(hw);
+
+ WARN(!data->hws[common->id], "clk %u is not register\n",
+ common->id);
+ pdata[i] = data->hws[common->id];
+ }
+}
+
+static int sg2044_clk_init_ctrl(struct device *dev, void __iomem *reg,
+ struct sg2044_clk_ctrl *ctrl,
+ const struct sg2044_clk_desc_data *desc)
+{
+ int ret, i;
+ struct clk_hw *hw;
+
+ spin_lock_init(&ctrl->lock);
+
+ for (i = 0; i < desc->num_div; i++) {
+ struct sg2044_clk_common *common = desc->div[i];
+
+ common->lock = &ctrl->lock;
+ common->base = reg;
+
+ ret = devm_clk_hw_register(dev, &common->hw);
+ if (ret)
+ return ret;
+
+ ctrl->data.hws[common->id] = &common->hw;
+ }
+
+ for (i = 0; i < desc->num_mux; i++) {
+ struct sg2044_clk_common *common = desc->mux[i];
+ struct sg2044_mux *mux = hw_to_sg2044_mux(&common->hw);
+ const struct clk_init_data *init = common->hw.init;
+
+ common->lock = &ctrl->lock;
+ common->base = reg;
+
+ hw = devm_clk_hw_register_mux_parent_data_table(dev,
+ init->name,
+ init->parent_data,
+ init->num_parents,
+ init->flags,
+ reg + mux->mux.offset,
+ mux->mux.shift,
+ 1,
+ mux->mux.flags,
+ mux->mux.table,
+ &ctrl->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ if (!(mux->mux.flags & CLK_MUX_READ_ONLY)) {
+ mux->nb.notifier_call = sg2044_mux_notifier_cb;
+ ret = devm_clk_notifier_register(dev, hw->clk,
+ &mux->nb);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "%s: failed to register notifier\n",
+ clk_hw_get_name(hw));
+ }
+
+ ctrl->data.hws[common->id] = hw;
+ }
+
+ for (i = 0; i < desc->num_gate; i++) {
+ struct sg2044_clk_common *common = desc->gate[i];
+ struct sg2044_gate *gate = hw_to_sg2044_gate(&common->hw);
+ const struct clk_init_data *init = common->hw.init;
+ struct clk_hw *parent_hws[1] = { };
+
+ sg2044_clk_fix_init_parent(parent_hws, init, &ctrl->data);
+ common->lock = &ctrl->lock;
+ common->base = reg;
+
+ hw = devm_clk_hw_register_gate_parent_hw(dev, init->name,
+ parent_hws[0],
+ init->flags,
+ reg + gate->gate.offset,
+ gate->gate.shift,
+ gate->gate.flags,
+ &ctrl->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ ctrl->data.hws[common->id] = hw;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &ctrl->data);
+}
+
+static int sg2044_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sg2044_clk_ctrl *ctrl;
+ const struct sg2044_clk_desc_data *desc;
+ void __iomem *reg;
+ u32 num_clks;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ desc = device_get_match_data(dev);
+ if (!desc)
+ return dev_err_probe(dev, -EINVAL, "no match data for platform\n");
+
+ num_clks = desc->num_div + desc->num_gate + desc->num_mux;
+
+ ctrl = devm_kzalloc(dev, struct_size(ctrl, data.hws, num_clks), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->data.num = num_clks;
+
+ return sg2044_clk_init_ctrl(dev, reg, ctrl, desc);
+}
+
+static const struct sg2044_clk_desc_data sg2044_clk_desc_data = {
+ .div = sg2044_div_commons,
+ .mux = sg2044_mux_commons,
+ .gate = sg2044_gate_commons,
+ .num_div = ARRAY_SIZE(sg2044_div_commons),
+ .num_mux = ARRAY_SIZE(sg2044_mux_commons),
+ .num_gate = ARRAY_SIZE(sg2044_gate_commons),
+};
+
+static const struct of_device_id sg2044_clk_match[] = {
+ { .compatible = "sophgo,sg2044-clk", .data = &sg2044_clk_desc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sg2044_clk_match);
+
+static struct platform_driver sg2044_clk_driver = {
+ .probe = sg2044_clk_probe,
+ .driver = {
+ .name = "sg2044-clk",
+ .of_match_table = sg2044_clk_match,
+ },
+};
+module_platform_driver(sg2044_clk_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo SG2044 clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/spacemit/Kconfig b/drivers/clk/spacemit/Kconfig
new file mode 100644
index 000000000000..4c4df845b3cb
--- /dev/null
+++ b/drivers/clk/spacemit/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config SPACEMIT_CCU
+ tristate "Clock support for SpacemiT SoCs"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Say Y to enable clock controller unit support for SpacemiT SoCs.
+
+if SPACEMIT_CCU
+
+config SPACEMIT_K1_CCU
+ tristate "Support for SpacemiT K1 SoC"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ help
+ Support for clock controller unit in SpacemiT K1 SoC.
+
+endif
diff --git a/drivers/clk/spacemit/Makefile b/drivers/clk/spacemit/Makefile
new file mode 100644
index 000000000000..5ec6da61db98
--- /dev/null
+++ b/drivers/clk/spacemit/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SPACEMIT_K1_CCU) = spacemit-ccu-k1.o
+spacemit-ccu-k1-y = ccu_pll.o ccu_mix.o ccu_ddn.o
+spacemit-ccu-k1-y += ccu-k1.o
diff --git a/drivers/clk/spacemit/ccu-k1.c b/drivers/clk/spacemit/ccu-k1.c
new file mode 100644
index 000000000000..cdde37a05235
--- /dev/null
+++ b/drivers/clk/spacemit/ccu-k1.c
@@ -0,0 +1,1164 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#include <linux/array_size.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_pll.h"
+#include "ccu_mix.h"
+#include "ccu_ddn.h"
+
+#include <dt-bindings/clock/spacemit,k1-syscon.h>
+
+/* APBS register offset */
+#define APBS_PLL1_SWCR1 0x100
+#define APBS_PLL1_SWCR2 0x104
+#define APBS_PLL1_SWCR3 0x108
+#define APBS_PLL2_SWCR1 0x118
+#define APBS_PLL2_SWCR2 0x11c
+#define APBS_PLL2_SWCR3 0x120
+#define APBS_PLL3_SWCR1 0x124
+#define APBS_PLL3_SWCR2 0x128
+#define APBS_PLL3_SWCR3 0x12c
+
+/* MPMU register offset */
+#define MPMU_POSR 0x0010
+#define POSR_PLL1_LOCK BIT(27)
+#define POSR_PLL2_LOCK BIT(28)
+#define POSR_PLL3_LOCK BIT(29)
+#define MPMU_SUCCR 0x0014
+#define MPMU_ISCCR 0x0044
+#define MPMU_WDTPCR 0x0200
+#define MPMU_RIPCCR 0x0210
+#define MPMU_ACGR 0x1024
+#define MPMU_APBCSCR 0x1050
+#define MPMU_SUCCR_1 0x10b0
+
+/* APBC register offset */
+#define APBC_UART1_CLK_RST 0x00
+#define APBC_UART2_CLK_RST 0x04
+#define APBC_GPIO_CLK_RST 0x08
+#define APBC_PWM0_CLK_RST 0x0c
+#define APBC_PWM1_CLK_RST 0x10
+#define APBC_PWM2_CLK_RST 0x14
+#define APBC_PWM3_CLK_RST 0x18
+#define APBC_TWSI8_CLK_RST 0x20
+#define APBC_UART3_CLK_RST 0x24
+#define APBC_RTC_CLK_RST 0x28
+#define APBC_TWSI0_CLK_RST 0x2c
+#define APBC_TWSI1_CLK_RST 0x30
+#define APBC_TIMERS1_CLK_RST 0x34
+#define APBC_TWSI2_CLK_RST 0x38
+#define APBC_AIB_CLK_RST 0x3c
+#define APBC_TWSI4_CLK_RST 0x40
+#define APBC_TIMERS2_CLK_RST 0x44
+#define APBC_ONEWIRE_CLK_RST 0x48
+#define APBC_TWSI5_CLK_RST 0x4c
+#define APBC_DRO_CLK_RST 0x58
+#define APBC_IR_CLK_RST 0x5c
+#define APBC_TWSI6_CLK_RST 0x60
+#define APBC_COUNTER_CLK_SEL 0x64
+#define APBC_TWSI7_CLK_RST 0x68
+#define APBC_TSEN_CLK_RST 0x6c
+#define APBC_UART4_CLK_RST 0x70
+#define APBC_UART5_CLK_RST 0x74
+#define APBC_UART6_CLK_RST 0x78
+#define APBC_SSP3_CLK_RST 0x7c
+#define APBC_SSPA0_CLK_RST 0x80
+#define APBC_SSPA1_CLK_RST 0x84
+#define APBC_IPC_AP2AUD_CLK_RST 0x90
+#define APBC_UART7_CLK_RST 0x94
+#define APBC_UART8_CLK_RST 0x98
+#define APBC_UART9_CLK_RST 0x9c
+#define APBC_CAN0_CLK_RST 0xa0
+#define APBC_PWM4_CLK_RST 0xa8
+#define APBC_PWM5_CLK_RST 0xac
+#define APBC_PWM6_CLK_RST 0xb0
+#define APBC_PWM7_CLK_RST 0xb4
+#define APBC_PWM8_CLK_RST 0xb8
+#define APBC_PWM9_CLK_RST 0xbc
+#define APBC_PWM10_CLK_RST 0xc0
+#define APBC_PWM11_CLK_RST 0xc4
+#define APBC_PWM12_CLK_RST 0xc8
+#define APBC_PWM13_CLK_RST 0xcc
+#define APBC_PWM14_CLK_RST 0xd0
+#define APBC_PWM15_CLK_RST 0xd4
+#define APBC_PWM16_CLK_RST 0xd8
+#define APBC_PWM17_CLK_RST 0xdc
+#define APBC_PWM18_CLK_RST 0xe0
+#define APBC_PWM19_CLK_RST 0xe4
+
+/* APMU register offset */
+#define APMU_JPG_CLK_RES_CTRL 0x020
+#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x024
+#define APMU_ISP_CLK_RES_CTRL 0x038
+#define APMU_LCD_CLK_RES_CTRL1 0x044
+#define APMU_LCD_SPI_CLK_RES_CTRL 0x048
+#define APMU_LCD_CLK_RES_CTRL2 0x04c
+#define APMU_CCIC_CLK_RES_CTRL 0x050
+#define APMU_SDH0_CLK_RES_CTRL 0x054
+#define APMU_SDH1_CLK_RES_CTRL 0x058
+#define APMU_USB_CLK_RES_CTRL 0x05c
+#define APMU_QSPI_CLK_RES_CTRL 0x060
+#define APMU_DMA_CLK_RES_CTRL 0x064
+#define APMU_AES_CLK_RES_CTRL 0x068
+#define APMU_VPU_CLK_RES_CTRL 0x0a4
+#define APMU_GPU_CLK_RES_CTRL 0x0cc
+#define APMU_SDH2_CLK_RES_CTRL 0x0e0
+#define APMU_PMUA_MC_CTRL 0x0e8
+#define APMU_PMU_CC2_AP 0x100
+#define APMU_PMUA_EM_CLK_RES_CTRL 0x104
+#define APMU_AUDIO_CLK_RES_CTRL 0x14c
+#define APMU_HDMI_CLK_RES_CTRL 0x1b8
+#define APMU_CCI550_CLK_CTRL 0x300
+#define APMU_ACLK_CLK_CTRL 0x388
+#define APMU_CPU_C0_CLK_CTRL 0x38C
+#define APMU_CPU_C1_CLK_CTRL 0x390
+#define APMU_PCIE_CLK_RES_CTRL_0 0x3cc
+#define APMU_PCIE_CLK_RES_CTRL_1 0x3d4
+#define APMU_PCIE_CLK_RES_CTRL_2 0x3dc
+#define APMU_EMAC0_CLK_RES_CTRL 0x3e4
+#define APMU_EMAC1_CLK_RES_CTRL 0x3ec
+
+struct spacemit_ccu_data {
+ struct clk_hw **hws;
+ size_t num;
+};
+
+/* APBS clocks start, APBS region contains and only contains all PLL clocks */
+
+/*
+ * PLL{1,2} must run at fixed frequencies to provide clocks in correct rates for
+ * peripherals.
+ */
+static const struct ccu_pll_rate_tbl pll1_rate_tbl[] = {
+ CCU_PLL_RATE(2457600000UL, 0x0050dd64, 0x330ccccd),
+};
+
+static const struct ccu_pll_rate_tbl pll2_rate_tbl[] = {
+ CCU_PLL_RATE(3000000000UL, 0x0050dd66, 0x3fe00000),
+};
+
+static const struct ccu_pll_rate_tbl pll3_rate_tbl[] = {
+ CCU_PLL_RATE(1600000000UL, 0x0050cd61, 0x43eaaaab),
+ CCU_PLL_RATE(1800000000UL, 0x0050cd61, 0x4b000000),
+ CCU_PLL_RATE(2000000000UL, 0x0050dd62, 0x2aeaaaab),
+ CCU_PLL_RATE(2457600000UL, 0x0050dd64, 0x330ccccd),
+ CCU_PLL_RATE(3000000000UL, 0x0050dd66, 0x3fe00000),
+ CCU_PLL_RATE(3200000000UL, 0x0050dd67, 0x43eaaaab),
+};
+
+CCU_PLL_DEFINE(pll1, pll1_rate_tbl, APBS_PLL1_SWCR1, APBS_PLL1_SWCR3, MPMU_POSR, POSR_PLL1_LOCK,
+ CLK_SET_RATE_GATE);
+CCU_PLL_DEFINE(pll2, pll2_rate_tbl, APBS_PLL2_SWCR1, APBS_PLL2_SWCR3, MPMU_POSR, POSR_PLL2_LOCK,
+ CLK_SET_RATE_GATE);
+CCU_PLL_DEFINE(pll3, pll3_rate_tbl, APBS_PLL3_SWCR1, APBS_PLL3_SWCR3, MPMU_POSR, POSR_PLL3_LOCK,
+ CLK_SET_RATE_GATE);
+
+CCU_FACTOR_GATE_DEFINE(pll1_d2, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(1), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d3, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(2), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(3), 4, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d5, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(4), 5, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d6, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(5), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d7, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(6), 7, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(7), 8, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d11_223p4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(15), 11, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d13_189, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(16), 13, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d23_106p8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(20), 23, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d64_38p4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(0), 64, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_aud_245p7, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(10), 10, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_aud_24p5, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(11), 100, 1);
+
+CCU_FACTOR_GATE_DEFINE(pll2_d1, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(0), 1, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d2, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(1), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d3, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(2), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d4, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(3), 4, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d5, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(4), 5, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d6, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(5), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d7, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(6), 7, 1);
+CCU_FACTOR_GATE_DEFINE(pll2_d8, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(7), 8, 1);
+
+CCU_FACTOR_GATE_DEFINE(pll3_d1, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(0), 1, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d2, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(1), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d3, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(2), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d4, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(3), 4, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d5, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(4), 5, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d6, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(5), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d7, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(6), 7, 1);
+CCU_FACTOR_GATE_DEFINE(pll3_d8, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(7), 8, 1);
+
+CCU_FACTOR_DEFINE(pll3_20, CCU_PARENT_HW(pll3_d8), 20, 1);
+CCU_FACTOR_DEFINE(pll3_40, CCU_PARENT_HW(pll3_d8), 10, 1);
+CCU_FACTOR_DEFINE(pll3_80, CCU_PARENT_HW(pll3_d8), 5, 1);
+
+/* APBS clocks end */
+
+/* MPMU clocks start */
+CCU_GATE_DEFINE(pll1_d8_307p2, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(13), 0);
+
+CCU_FACTOR_DEFINE(pll1_d32_76p8, CCU_PARENT_HW(pll1_d8_307p2), 4, 1);
+
+CCU_FACTOR_DEFINE(pll1_d40_61p44, CCU_PARENT_HW(pll1_d8_307p2), 5, 1);
+
+CCU_FACTOR_DEFINE(pll1_d16_153p6, CCU_PARENT_HW(pll1_d8), 2, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d24_102p4, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(12), 3, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d48_51p2, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(7), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d48_51p2_ap, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(11), 6, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_m3d128_57p6, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(8), 16, 3);
+CCU_FACTOR_GATE_DEFINE(pll1_d96_25p6, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(4), 12, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d192_12p8, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(3), 24, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d192_12p8_wdt, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(19), 24, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d384_6p4, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(2), 48, 1);
+
+CCU_FACTOR_DEFINE(pll1_d768_3p2, CCU_PARENT_HW(pll1_d384_6p4), 2, 1);
+CCU_FACTOR_DEFINE(pll1_d1536_1p6, CCU_PARENT_HW(pll1_d384_6p4), 4, 1);
+CCU_FACTOR_DEFINE(pll1_d3072_0p8, CCU_PARENT_HW(pll1_d384_6p4), 8, 1);
+
+CCU_GATE_DEFINE(pll1_d6_409p6, CCU_PARENT_HW(pll1_d6), MPMU_ACGR, BIT(0), 0);
+CCU_FACTOR_GATE_DEFINE(pll1_d12_204p8, CCU_PARENT_HW(pll1_d6), MPMU_ACGR, BIT(5), 2, 1);
+
+CCU_GATE_DEFINE(pll1_d5_491p52, CCU_PARENT_HW(pll1_d5), MPMU_ACGR, BIT(21), 0);
+CCU_FACTOR_GATE_DEFINE(pll1_d10_245p76, CCU_PARENT_HW(pll1_d5), MPMU_ACGR, BIT(18), 2, 1);
+
+CCU_GATE_DEFINE(pll1_d4_614p4, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(15), 0);
+CCU_FACTOR_GATE_DEFINE(pll1_d52_47p26, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(10), 13, 1);
+CCU_FACTOR_GATE_DEFINE(pll1_d78_31p5, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(6), 39, 2);
+
+CCU_GATE_DEFINE(pll1_d3_819p2, CCU_PARENT_HW(pll1_d3), MPMU_ACGR, BIT(14), 0);
+
+CCU_GATE_DEFINE(pll1_d2_1228p8, CCU_PARENT_HW(pll1_d2), MPMU_ACGR, BIT(16), 0);
+
+CCU_GATE_DEFINE(slow_uart, CCU_PARENT_NAME(osc), MPMU_ACGR, BIT(1), CLK_IGNORE_UNUSED);
+CCU_DDN_DEFINE(slow_uart1_14p74, pll1_d16_153p6, MPMU_SUCCR, 16, 13, 0, 13, 0);
+CCU_DDN_DEFINE(slow_uart2_48, pll1_d4_614p4, MPMU_SUCCR_1, 16, 13, 0, 13, 0);
+
+CCU_GATE_DEFINE(wdt_clk, CCU_PARENT_HW(pll1_d96_25p6), MPMU_WDTPCR, BIT(1), 0);
+
+CCU_FACTOR_GATE_DEFINE(i2s_sysclk, CCU_PARENT_HW(pll1_d16_153p6), MPMU_ISCCR, BIT(31), 50, 1);
+CCU_FACTOR_GATE_DEFINE(i2s_bclk, CCU_PARENT_HW(i2s_sysclk), MPMU_ISCCR, BIT(29), 1, 1);
+
+static const struct clk_parent_data apb_parents[] = {
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d24_102p4),
+};
+CCU_MUX_DEFINE(apb_clk, apb_parents, MPMU_APBCSCR, 0, 2, 0);
+
+CCU_GATE_DEFINE(wdt_bus_clk, CCU_PARENT_HW(apb_clk), MPMU_WDTPCR, BIT(0), 0);
+
+CCU_GATE_DEFINE(ripc_clk, CCU_PARENT_HW(apb_clk), MPMU_RIPCCR, 0x1, 0);
+/* MPMU clocks end */
+
+/* APBC clocks start */
+static const struct clk_parent_data uart_clk_parents[] = {
+ CCU_PARENT_HW(pll1_m3d128_57p6),
+ CCU_PARENT_HW(slow_uart1_14p74),
+ CCU_PARENT_HW(slow_uart2_48),
+};
+CCU_MUX_GATE_DEFINE(uart0_clk, uart_clk_parents, APBC_UART1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart2_clk, uart_clk_parents, APBC_UART2_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart3_clk, uart_clk_parents, APBC_UART3_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart4_clk, uart_clk_parents, APBC_UART4_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart5_clk, uart_clk_parents, APBC_UART5_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart6_clk, uart_clk_parents, APBC_UART6_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart7_clk, uart_clk_parents, APBC_UART7_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart8_clk, uart_clk_parents, APBC_UART8_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(uart9_clk, uart_clk_parents, APBC_UART9_CLK_RST, 4, 3, BIT(1), 0);
+
+CCU_GATE_DEFINE(gpio_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_GPIO_CLK_RST, BIT(1), 0);
+
+static const struct clk_parent_data pwm_parents[] = {
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_NAME(osc),
+};
+CCU_MUX_GATE_DEFINE(pwm0_clk, pwm_parents, APBC_PWM0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm1_clk, pwm_parents, APBC_PWM1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm2_clk, pwm_parents, APBC_PWM2_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm3_clk, pwm_parents, APBC_PWM3_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm4_clk, pwm_parents, APBC_PWM4_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm5_clk, pwm_parents, APBC_PWM5_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm6_clk, pwm_parents, APBC_PWM6_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm7_clk, pwm_parents, APBC_PWM7_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm8_clk, pwm_parents, APBC_PWM8_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm9_clk, pwm_parents, APBC_PWM9_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm10_clk, pwm_parents, APBC_PWM10_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm11_clk, pwm_parents, APBC_PWM11_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm12_clk, pwm_parents, APBC_PWM12_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm13_clk, pwm_parents, APBC_PWM13_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm14_clk, pwm_parents, APBC_PWM14_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm15_clk, pwm_parents, APBC_PWM15_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm16_clk, pwm_parents, APBC_PWM16_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm17_clk, pwm_parents, APBC_PWM17_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm18_clk, pwm_parents, APBC_PWM18_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(pwm19_clk, pwm_parents, APBC_PWM19_CLK_RST, 4, 3, BIT(1), 0);
+
+static const struct clk_parent_data ssp_parents[] = {
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d768_3p2),
+ CCU_PARENT_HW(pll1_d1536_1p6),
+ CCU_PARENT_HW(pll1_d3072_0p8),
+};
+CCU_MUX_GATE_DEFINE(ssp3_clk, ssp_parents, APBC_SSP3_CLK_RST, 4, 3, BIT(1), 0);
+
+CCU_GATE_DEFINE(rtc_clk, CCU_PARENT_NAME(osc), APBC_RTC_CLK_RST,
+ BIT(7) | BIT(1), 0);
+
+static const struct clk_parent_data twsi_parents[] = {
+ CCU_PARENT_HW(pll1_d78_31p5),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d40_61p44),
+};
+CCU_MUX_GATE_DEFINE(twsi0_clk, twsi_parents, APBC_TWSI0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi1_clk, twsi_parents, APBC_TWSI1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi2_clk, twsi_parents, APBC_TWSI2_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi4_clk, twsi_parents, APBC_TWSI4_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi5_clk, twsi_parents, APBC_TWSI5_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi6_clk, twsi_parents, APBC_TWSI6_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(twsi7_clk, twsi_parents, APBC_TWSI7_CLK_RST, 4, 3, BIT(1), 0);
+/*
+ * APBC_TWSI8_CLK_RST has a quirk that reading always results in zero.
+ * Combine functional and bus bits together as a gate to avoid sharing the
+ * write-only register between different clock hardwares.
+ */
+CCU_GATE_DEFINE(twsi8_clk, CCU_PARENT_HW(pll1_d78_31p5), APBC_TWSI8_CLK_RST, BIT(1) | BIT(0), 0);
+
+static const struct clk_parent_data timer_parents[] = {
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_NAME(osc),
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_NAME(vctcxo_3m),
+ CCU_PARENT_NAME(vctcxo_1m),
+};
+CCU_MUX_GATE_DEFINE(timers1_clk, timer_parents, APBC_TIMERS1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(timers2_clk, timer_parents, APBC_TIMERS2_CLK_RST, 4, 3, BIT(1), 0);
+
+CCU_GATE_DEFINE(aib_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_AIB_CLK_RST, BIT(1), 0);
+
+CCU_GATE_DEFINE(onewire_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_ONEWIRE_CLK_RST, BIT(1), 0);
+
+static const struct clk_parent_data sspa_parents[] = {
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d768_3p2),
+ CCU_PARENT_HW(pll1_d1536_1p6),
+ CCU_PARENT_HW(pll1_d3072_0p8),
+ CCU_PARENT_HW(i2s_bclk),
+};
+CCU_MUX_GATE_DEFINE(sspa0_clk, sspa_parents, APBC_SSPA0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(sspa1_clk, sspa_parents, APBC_SSPA1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_GATE_DEFINE(dro_clk, CCU_PARENT_HW(apb_clk), APBC_DRO_CLK_RST, BIT(1), 0);
+CCU_GATE_DEFINE(ir_clk, CCU_PARENT_HW(apb_clk), APBC_IR_CLK_RST, BIT(1), 0);
+CCU_GATE_DEFINE(tsen_clk, CCU_PARENT_HW(apb_clk), APBC_TSEN_CLK_RST, BIT(1), 0);
+CCU_GATE_DEFINE(ipc_ap2aud_clk, CCU_PARENT_HW(apb_clk), APBC_IPC_AP2AUD_CLK_RST, BIT(1), 0);
+
+static const struct clk_parent_data can_parents[] = {
+ CCU_PARENT_HW(pll3_20),
+ CCU_PARENT_HW(pll3_40),
+ CCU_PARENT_HW(pll3_80),
+};
+CCU_MUX_GATE_DEFINE(can0_clk, can_parents, APBC_CAN0_CLK_RST, 4, 3, BIT(1), 0);
+CCU_GATE_DEFINE(can0_bus_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_CAN0_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(uart0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART2_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART3_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART4_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART5_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART6_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART7_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart8_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART8_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(uart9_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART9_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(gpio_bus_clk, CCU_PARENT_HW(apb_clk), APBC_GPIO_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(pwm0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM0_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM2_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM3_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM4_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM5_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM6_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM7_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm8_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM8_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm9_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM9_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm10_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM10_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm11_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM11_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm12_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM12_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm13_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM13_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm14_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM14_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm15_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM15_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm16_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM16_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm17_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM17_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm18_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM18_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(pwm19_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM19_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(ssp3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSP3_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(rtc_bus_clk, CCU_PARENT_HW(apb_clk), APBC_RTC_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(twsi0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI0_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI2_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI4_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI5_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI6_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(twsi7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI7_CLK_RST, BIT(0), 0);
+/* Placeholder to workaround quirk of the register */
+CCU_FACTOR_DEFINE(twsi8_bus_clk, CCU_PARENT_HW(apb_clk), 1, 1);
+
+CCU_GATE_DEFINE(timers1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TIMERS1_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(timers2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TIMERS2_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(aib_bus_clk, CCU_PARENT_HW(apb_clk), APBC_AIB_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(onewire_bus_clk, CCU_PARENT_HW(apb_clk), APBC_ONEWIRE_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(sspa0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSPA0_CLK_RST, BIT(0), 0);
+CCU_GATE_DEFINE(sspa1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSPA1_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(tsen_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TSEN_CLK_RST, BIT(0), 0);
+
+CCU_GATE_DEFINE(ipc_ap2aud_bus_clk, CCU_PARENT_HW(apb_clk), APBC_IPC_AP2AUD_CLK_RST, BIT(0), 0);
+/* APBC clocks end */
+
+/* APMU clocks start */
+static const struct clk_parent_data pmua_aclk_parents[] = {
+ CCU_PARENT_HW(pll1_d10_245p76),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_FC_DEFINE(pmua_aclk, pmua_aclk_parents, APMU_ACLK_CLK_CTRL, 1, 2, BIT(4), 0, 1, 0);
+
+static const struct clk_parent_data cci550_clk_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d3),
+};
+CCU_MUX_DIV_FC_DEFINE(cci550_clk, cci550_clk_parents, APMU_CCI550_CLK_CTRL, 8, 3, BIT(12), 0, 2,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data cpu_c0_hi_clk_parents[] = {
+ CCU_PARENT_HW(pll3_d2),
+ CCU_PARENT_HW(pll3_d1),
+};
+CCU_MUX_DEFINE(cpu_c0_hi_clk, cpu_c0_hi_clk_parents, APMU_CPU_C0_CLK_CTRL, 13, 1, 0);
+static const struct clk_parent_data cpu_c0_clk_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll3_d3),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(cpu_c0_hi_clk),
+};
+CCU_MUX_FC_DEFINE(cpu_c0_core_clk, cpu_c0_clk_parents, APMU_CPU_C0_CLK_CTRL, BIT(12), 0, 3,
+ CLK_IS_CRITICAL);
+CCU_DIV_DEFINE(cpu_c0_ace_clk, CCU_PARENT_HW(cpu_c0_core_clk), APMU_CPU_C0_CLK_CTRL, 6, 3,
+ CLK_IS_CRITICAL);
+CCU_DIV_DEFINE(cpu_c0_tcm_clk, CCU_PARENT_HW(cpu_c0_core_clk), APMU_CPU_C0_CLK_CTRL, 9, 3,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data cpu_c1_hi_clk_parents[] = {
+ CCU_PARENT_HW(pll3_d2),
+ CCU_PARENT_HW(pll3_d1),
+};
+CCU_MUX_DEFINE(cpu_c1_hi_clk, cpu_c1_hi_clk_parents, APMU_CPU_C1_CLK_CTRL, 13, 1, 0);
+static const struct clk_parent_data cpu_c1_clk_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll3_d3),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(cpu_c1_hi_clk),
+};
+CCU_MUX_FC_DEFINE(cpu_c1_core_clk, cpu_c1_clk_parents, APMU_CPU_C1_CLK_CTRL, BIT(12), 0, 3,
+ CLK_IS_CRITICAL);
+CCU_DIV_DEFINE(cpu_c1_ace_clk, CCU_PARENT_HW(cpu_c1_core_clk), APMU_CPU_C1_CLK_CTRL, 6, 3,
+ CLK_IS_CRITICAL);
+
+static const struct clk_parent_data jpg_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d3),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(jpg_clk, jpg_parents, APMU_JPG_CLK_RES_CTRL, 5, 3, BIT(15), 2, 3,
+ BIT(1), 0);
+
+static const struct clk_parent_data ccic2phy_parents[] = {
+ CCU_PARENT_HW(pll1_d24_102p4),
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+};
+CCU_MUX_GATE_DEFINE(ccic2phy_clk, ccic2phy_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 7, 1, BIT(5), 0);
+
+static const struct clk_parent_data ccic3phy_parents[] = {
+ CCU_PARENT_HW(pll1_d24_102p4),
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+};
+CCU_MUX_GATE_DEFINE(ccic3phy_clk, ccic3phy_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 31, 1, BIT(30), 0);
+
+static const struct clk_parent_data csi_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d2),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(csi_clk, csi_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 20, 3, BIT(15),
+ 16, 3, BIT(4), 0);
+
+static const struct clk_parent_data camm_parents[] = {
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll2_d5),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_NAME(vctcxo_24m),
+};
+CCU_MUX_DIV_GATE_DEFINE(camm0_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2,
+ BIT(28), 0);
+CCU_MUX_DIV_GATE_DEFINE(camm1_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2,
+ BIT(6), 0);
+CCU_MUX_DIV_GATE_DEFINE(camm2_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2,
+ BIT(3), 0);
+
+static const struct clk_parent_data isp_cpp_parents[] = {
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+};
+CCU_MUX_DIV_GATE_DEFINE(isp_cpp_clk, isp_cpp_parents, APMU_ISP_CLK_RES_CTRL, 24, 2, 26, 1,
+ BIT(28), 0);
+static const struct clk_parent_data isp_bus_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d10_245p76),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(isp_bus_clk, isp_bus_parents, APMU_ISP_CLK_RES_CTRL, 18, 3, BIT(23),
+ 21, 2, BIT(17), 0);
+static const struct clk_parent_data isp_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(isp_clk, isp_parents, APMU_ISP_CLK_RES_CTRL, 4, 3, BIT(7), 8, 2,
+ BIT(1), 0);
+
+static const struct clk_parent_data dpumclk_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(dpu_mclk, dpumclk_parents, APMU_LCD_CLK_RES_CTRL2,
+ APMU_LCD_CLK_RES_CTRL1, 1, 4, BIT(29), 5, 3, BIT(0), 0);
+
+static const struct clk_parent_data dpuesc_parents[] = {
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+ CCU_PARENT_HW(pll1_d52_47p26),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d32_76p8),
+};
+CCU_MUX_GATE_DEFINE(dpu_esc_clk, dpuesc_parents, APMU_LCD_CLK_RES_CTRL1, 0, 2, BIT(2), 0);
+
+static const struct clk_parent_data dpubit_parents[] = {
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d2),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d5),
+ CCU_PARENT_HW(pll2_d7),
+ CCU_PARENT_HW(pll2_d8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(dpu_bit_clk, dpubit_parents, APMU_LCD_CLK_RES_CTRL1, 17, 3, BIT(31),
+ 20, 3, BIT(16), 0);
+
+static const struct clk_parent_data dpupx_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll2_d7),
+ CCU_PARENT_HW(pll2_d8),
+};
+CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(dpu_pxclk, dpupx_parents, APMU_LCD_CLK_RES_CTRL2,
+ APMU_LCD_CLK_RES_CTRL1, 17, 4, BIT(30), 21, 3, BIT(16), 0);
+
+CCU_GATE_DEFINE(dpu_hclk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_CLK_RES_CTRL1,
+ BIT(5), 0);
+
+static const struct clk_parent_data dpu_spi_parents[] = {
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d10_245p76),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d13_189),
+ CCU_PARENT_HW(pll1_d23_106p8),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d5),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(dpu_spi_clk, dpu_spi_parents, APMU_LCD_SPI_CLK_RES_CTRL, 8, 3,
+ BIT(7), 12, 3, BIT(1), 0);
+CCU_GATE_DEFINE(dpu_spi_hbus_clk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(3), 0);
+CCU_GATE_DEFINE(dpu_spi_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(5), 0);
+CCU_GATE_DEFINE(dpu_spi_aclk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(6), 0);
+
+static const struct clk_parent_data v2d_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d4_614p4),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(v2d_clk, v2d_parents, APMU_LCD_CLK_RES_CTRL1, 9, 3, BIT(28), 12, 2,
+ BIT(8), 0);
+
+static const struct clk_parent_data ccic_4x_parents[] = {
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll2_d2),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll1_d2_1228p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(ccic_4x_clk, ccic_4x_parents, APMU_CCIC_CLK_RES_CTRL, 18, 3,
+ BIT(15), 23, 2, BIT(4), 0);
+
+static const struct clk_parent_data ccic1phy_parents[] = {
+ CCU_PARENT_HW(pll1_d24_102p4),
+ CCU_PARENT_HW(pll1_d48_51p2_ap),
+};
+CCU_MUX_GATE_DEFINE(ccic1phy_clk, ccic1phy_parents, APMU_CCIC_CLK_RES_CTRL, 7, 1, BIT(5), 0);
+
+CCU_GATE_DEFINE(sdh_axi_aclk, CCU_PARENT_HW(pmua_aclk), APMU_SDH0_CLK_RES_CTRL, BIT(3), 0);
+static const struct clk_parent_data sdh01_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll2_d8),
+ CCU_PARENT_HW(pll2_d5),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d13_189),
+ CCU_PARENT_HW(pll1_d23_106p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(sdh0_clk, sdh01_parents, APMU_SDH0_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3,
+ BIT(4), 0);
+CCU_MUX_DIV_GATE_FC_DEFINE(sdh1_clk, sdh01_parents, APMU_SDH1_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3,
+ BIT(4), 0);
+static const struct clk_parent_data sdh2_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll2_d8),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d13_189),
+ CCU_PARENT_HW(pll1_d23_106p8),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(sdh2_clk, sdh2_parents, APMU_SDH2_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3,
+ BIT(4), 0);
+
+CCU_GATE_DEFINE(usb_axi_clk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(1), 0);
+CCU_GATE_DEFINE(usb_p1_aclk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(5), 0);
+CCU_GATE_DEFINE(usb30_clk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(8), 0);
+
+static const struct clk_parent_data qspi_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll2_d8),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d10_245p76),
+ CCU_PARENT_HW(pll1_d11_223p4),
+ CCU_PARENT_HW(pll1_d23_106p8),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d13_189),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(qspi_clk, qspi_parents, APMU_QSPI_CLK_RES_CTRL, 9, 3, BIT(12), 6, 3,
+ BIT(4), 0);
+CCU_GATE_DEFINE(qspi_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_QSPI_CLK_RES_CTRL, BIT(3), 0);
+CCU_GATE_DEFINE(dma_clk, CCU_PARENT_HW(pmua_aclk), APMU_DMA_CLK_RES_CTRL, BIT(3), 0);
+
+static const struct clk_parent_data aes_parents[] = {
+ CCU_PARENT_HW(pll1_d12_204p8),
+ CCU_PARENT_HW(pll1_d24_102p4),
+};
+CCU_MUX_GATE_DEFINE(aes_clk, aes_parents, APMU_AES_CLK_RES_CTRL, 6, 1, BIT(5), 0);
+
+static const struct clk_parent_data vpu_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll3_d6),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d5),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(vpu_clk, vpu_parents, APMU_VPU_CLK_RES_CTRL, 13, 3, BIT(21), 10, 3,
+ BIT(3), 0);
+
+static const struct clk_parent_data gpu_parents[] = {
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d3_819p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll3_d6),
+ CCU_PARENT_HW(pll2_d3),
+ CCU_PARENT_HW(pll2_d4),
+ CCU_PARENT_HW(pll2_d5),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(gpu_clk, gpu_parents, APMU_GPU_CLK_RES_CTRL, 12, 3, BIT(15), 18, 3,
+ BIT(4), 0);
+
+static const struct clk_parent_data emmc_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d52_47p26),
+ CCU_PARENT_HW(pll1_d3_819p2),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(emmc_clk, emmc_parents, APMU_PMUA_EM_CLK_RES_CTRL, 8, 3, BIT(11),
+ 6, 2, BIT(4), 0);
+CCU_DIV_GATE_DEFINE(emmc_x_clk, CCU_PARENT_HW(pll1_d2_1228p8), APMU_PMUA_EM_CLK_RES_CTRL, 12,
+ 3, BIT(15), 0);
+
+static const struct clk_parent_data audio_parents[] = {
+ CCU_PARENT_HW(pll1_aud_245p7),
+ CCU_PARENT_HW(pll1_d8_307p2),
+ CCU_PARENT_HW(pll1_d6_409p6),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(audio_clk, audio_parents, APMU_AUDIO_CLK_RES_CTRL, 4, 3, BIT(15),
+ 7, 3, BIT(12), 0);
+
+static const struct clk_parent_data hdmi_parents[] = {
+ CCU_PARENT_HW(pll1_d6_409p6),
+ CCU_PARENT_HW(pll1_d5_491p52),
+ CCU_PARENT_HW(pll1_d4_614p4),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DIV_GATE_FC_DEFINE(hdmi_mclk, hdmi_parents, APMU_HDMI_CLK_RES_CTRL, 1, 4, BIT(29), 5,
+ 3, BIT(0), 0);
+
+CCU_GATE_DEFINE(pcie0_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(2), 0);
+CCU_GATE_DEFINE(pcie0_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(1), 0);
+CCU_GATE_DEFINE(pcie0_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(0), 0);
+
+CCU_GATE_DEFINE(pcie1_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(2), 0);
+CCU_GATE_DEFINE(pcie1_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(1), 0);
+CCU_GATE_DEFINE(pcie1_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(0), 0);
+
+CCU_GATE_DEFINE(pcie2_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(2), 0);
+CCU_GATE_DEFINE(pcie2_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(1), 0);
+CCU_GATE_DEFINE(pcie2_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(0), 0);
+
+CCU_GATE_DEFINE(emac0_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_EMAC0_CLK_RES_CTRL, BIT(0), 0);
+CCU_GATE_DEFINE(emac0_ptp_clk, CCU_PARENT_HW(pll2_d6), APMU_EMAC0_CLK_RES_CTRL, BIT(15), 0);
+CCU_GATE_DEFINE(emac1_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_EMAC1_CLK_RES_CTRL, BIT(0), 0);
+CCU_GATE_DEFINE(emac1_ptp_clk, CCU_PARENT_HW(pll2_d6), APMU_EMAC1_CLK_RES_CTRL, BIT(15), 0);
+
+CCU_GATE_DEFINE(emmc_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_PMUA_EM_CLK_RES_CTRL, BIT(3), 0);
+/* APMU clocks end */
+
+static struct clk_hw *k1_ccu_pll_hws[] = {
+ [CLK_PLL1] = &pll1.common.hw,
+ [CLK_PLL2] = &pll2.common.hw,
+ [CLK_PLL3] = &pll3.common.hw,
+ [CLK_PLL1_D2] = &pll1_d2.common.hw,
+ [CLK_PLL1_D3] = &pll1_d3.common.hw,
+ [CLK_PLL1_D4] = &pll1_d4.common.hw,
+ [CLK_PLL1_D5] = &pll1_d5.common.hw,
+ [CLK_PLL1_D6] = &pll1_d6.common.hw,
+ [CLK_PLL1_D7] = &pll1_d7.common.hw,
+ [CLK_PLL1_D8] = &pll1_d8.common.hw,
+ [CLK_PLL1_D11] = &pll1_d11_223p4.common.hw,
+ [CLK_PLL1_D13] = &pll1_d13_189.common.hw,
+ [CLK_PLL1_D23] = &pll1_d23_106p8.common.hw,
+ [CLK_PLL1_D64] = &pll1_d64_38p4.common.hw,
+ [CLK_PLL1_D10_AUD] = &pll1_aud_245p7.common.hw,
+ [CLK_PLL1_D100_AUD] = &pll1_aud_24p5.common.hw,
+ [CLK_PLL2_D1] = &pll2_d1.common.hw,
+ [CLK_PLL2_D2] = &pll2_d2.common.hw,
+ [CLK_PLL2_D3] = &pll2_d3.common.hw,
+ [CLK_PLL2_D4] = &pll2_d4.common.hw,
+ [CLK_PLL2_D5] = &pll2_d5.common.hw,
+ [CLK_PLL2_D6] = &pll2_d6.common.hw,
+ [CLK_PLL2_D7] = &pll2_d7.common.hw,
+ [CLK_PLL2_D8] = &pll2_d8.common.hw,
+ [CLK_PLL3_D1] = &pll3_d1.common.hw,
+ [CLK_PLL3_D2] = &pll3_d2.common.hw,
+ [CLK_PLL3_D3] = &pll3_d3.common.hw,
+ [CLK_PLL3_D4] = &pll3_d4.common.hw,
+ [CLK_PLL3_D5] = &pll3_d5.common.hw,
+ [CLK_PLL3_D6] = &pll3_d6.common.hw,
+ [CLK_PLL3_D7] = &pll3_d7.common.hw,
+ [CLK_PLL3_D8] = &pll3_d8.common.hw,
+ [CLK_PLL3_80] = &pll3_80.common.hw,
+ [CLK_PLL3_40] = &pll3_40.common.hw,
+ [CLK_PLL3_20] = &pll3_20.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_pll_data = {
+ .hws = k1_ccu_pll_hws,
+ .num = ARRAY_SIZE(k1_ccu_pll_hws),
+};
+
+static struct clk_hw *k1_ccu_mpmu_hws[] = {
+ [CLK_PLL1_307P2] = &pll1_d8_307p2.common.hw,
+ [CLK_PLL1_76P8] = &pll1_d32_76p8.common.hw,
+ [CLK_PLL1_61P44] = &pll1_d40_61p44.common.hw,
+ [CLK_PLL1_153P6] = &pll1_d16_153p6.common.hw,
+ [CLK_PLL1_102P4] = &pll1_d24_102p4.common.hw,
+ [CLK_PLL1_51P2] = &pll1_d48_51p2.common.hw,
+ [CLK_PLL1_51P2_AP] = &pll1_d48_51p2_ap.common.hw,
+ [CLK_PLL1_57P6] = &pll1_m3d128_57p6.common.hw,
+ [CLK_PLL1_25P6] = &pll1_d96_25p6.common.hw,
+ [CLK_PLL1_12P8] = &pll1_d192_12p8.common.hw,
+ [CLK_PLL1_12P8_WDT] = &pll1_d192_12p8_wdt.common.hw,
+ [CLK_PLL1_6P4] = &pll1_d384_6p4.common.hw,
+ [CLK_PLL1_3P2] = &pll1_d768_3p2.common.hw,
+ [CLK_PLL1_1P6] = &pll1_d1536_1p6.common.hw,
+ [CLK_PLL1_0P8] = &pll1_d3072_0p8.common.hw,
+ [CLK_PLL1_409P6] = &pll1_d6_409p6.common.hw,
+ [CLK_PLL1_204P8] = &pll1_d12_204p8.common.hw,
+ [CLK_PLL1_491] = &pll1_d5_491p52.common.hw,
+ [CLK_PLL1_245P76] = &pll1_d10_245p76.common.hw,
+ [CLK_PLL1_614] = &pll1_d4_614p4.common.hw,
+ [CLK_PLL1_47P26] = &pll1_d52_47p26.common.hw,
+ [CLK_PLL1_31P5] = &pll1_d78_31p5.common.hw,
+ [CLK_PLL1_819] = &pll1_d3_819p2.common.hw,
+ [CLK_PLL1_1228] = &pll1_d2_1228p8.common.hw,
+ [CLK_SLOW_UART] = &slow_uart.common.hw,
+ [CLK_SLOW_UART1] = &slow_uart1_14p74.common.hw,
+ [CLK_SLOW_UART2] = &slow_uart2_48.common.hw,
+ [CLK_WDT] = &wdt_clk.common.hw,
+ [CLK_RIPC] = &ripc_clk.common.hw,
+ [CLK_I2S_SYSCLK] = &i2s_sysclk.common.hw,
+ [CLK_I2S_BCLK] = &i2s_bclk.common.hw,
+ [CLK_APB] = &apb_clk.common.hw,
+ [CLK_WDT_BUS] = &wdt_bus_clk.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_mpmu_data = {
+ .hws = k1_ccu_mpmu_hws,
+ .num = ARRAY_SIZE(k1_ccu_mpmu_hws),
+};
+
+static struct clk_hw *k1_ccu_apbc_hws[] = {
+ [CLK_UART0] = &uart0_clk.common.hw,
+ [CLK_UART2] = &uart2_clk.common.hw,
+ [CLK_UART3] = &uart3_clk.common.hw,
+ [CLK_UART4] = &uart4_clk.common.hw,
+ [CLK_UART5] = &uart5_clk.common.hw,
+ [CLK_UART6] = &uart6_clk.common.hw,
+ [CLK_UART7] = &uart7_clk.common.hw,
+ [CLK_UART8] = &uart8_clk.common.hw,
+ [CLK_UART9] = &uart9_clk.common.hw,
+ [CLK_GPIO] = &gpio_clk.common.hw,
+ [CLK_PWM0] = &pwm0_clk.common.hw,
+ [CLK_PWM1] = &pwm1_clk.common.hw,
+ [CLK_PWM2] = &pwm2_clk.common.hw,
+ [CLK_PWM3] = &pwm3_clk.common.hw,
+ [CLK_PWM4] = &pwm4_clk.common.hw,
+ [CLK_PWM5] = &pwm5_clk.common.hw,
+ [CLK_PWM6] = &pwm6_clk.common.hw,
+ [CLK_PWM7] = &pwm7_clk.common.hw,
+ [CLK_PWM8] = &pwm8_clk.common.hw,
+ [CLK_PWM9] = &pwm9_clk.common.hw,
+ [CLK_PWM10] = &pwm10_clk.common.hw,
+ [CLK_PWM11] = &pwm11_clk.common.hw,
+ [CLK_PWM12] = &pwm12_clk.common.hw,
+ [CLK_PWM13] = &pwm13_clk.common.hw,
+ [CLK_PWM14] = &pwm14_clk.common.hw,
+ [CLK_PWM15] = &pwm15_clk.common.hw,
+ [CLK_PWM16] = &pwm16_clk.common.hw,
+ [CLK_PWM17] = &pwm17_clk.common.hw,
+ [CLK_PWM18] = &pwm18_clk.common.hw,
+ [CLK_PWM19] = &pwm19_clk.common.hw,
+ [CLK_SSP3] = &ssp3_clk.common.hw,
+ [CLK_RTC] = &rtc_clk.common.hw,
+ [CLK_TWSI0] = &twsi0_clk.common.hw,
+ [CLK_TWSI1] = &twsi1_clk.common.hw,
+ [CLK_TWSI2] = &twsi2_clk.common.hw,
+ [CLK_TWSI4] = &twsi4_clk.common.hw,
+ [CLK_TWSI5] = &twsi5_clk.common.hw,
+ [CLK_TWSI6] = &twsi6_clk.common.hw,
+ [CLK_TWSI7] = &twsi7_clk.common.hw,
+ [CLK_TWSI8] = &twsi8_clk.common.hw,
+ [CLK_TIMERS1] = &timers1_clk.common.hw,
+ [CLK_TIMERS2] = &timers2_clk.common.hw,
+ [CLK_AIB] = &aib_clk.common.hw,
+ [CLK_ONEWIRE] = &onewire_clk.common.hw,
+ [CLK_SSPA0] = &sspa0_clk.common.hw,
+ [CLK_SSPA1] = &sspa1_clk.common.hw,
+ [CLK_DRO] = &dro_clk.common.hw,
+ [CLK_IR] = &ir_clk.common.hw,
+ [CLK_TSEN] = &tsen_clk.common.hw,
+ [CLK_IPC_AP2AUD] = &ipc_ap2aud_clk.common.hw,
+ [CLK_CAN0] = &can0_clk.common.hw,
+ [CLK_CAN0_BUS] = &can0_bus_clk.common.hw,
+ [CLK_UART0_BUS] = &uart0_bus_clk.common.hw,
+ [CLK_UART2_BUS] = &uart2_bus_clk.common.hw,
+ [CLK_UART3_BUS] = &uart3_bus_clk.common.hw,
+ [CLK_UART4_BUS] = &uart4_bus_clk.common.hw,
+ [CLK_UART5_BUS] = &uart5_bus_clk.common.hw,
+ [CLK_UART6_BUS] = &uart6_bus_clk.common.hw,
+ [CLK_UART7_BUS] = &uart7_bus_clk.common.hw,
+ [CLK_UART8_BUS] = &uart8_bus_clk.common.hw,
+ [CLK_UART9_BUS] = &uart9_bus_clk.common.hw,
+ [CLK_GPIO_BUS] = &gpio_bus_clk.common.hw,
+ [CLK_PWM0_BUS] = &pwm0_bus_clk.common.hw,
+ [CLK_PWM1_BUS] = &pwm1_bus_clk.common.hw,
+ [CLK_PWM2_BUS] = &pwm2_bus_clk.common.hw,
+ [CLK_PWM3_BUS] = &pwm3_bus_clk.common.hw,
+ [CLK_PWM4_BUS] = &pwm4_bus_clk.common.hw,
+ [CLK_PWM5_BUS] = &pwm5_bus_clk.common.hw,
+ [CLK_PWM6_BUS] = &pwm6_bus_clk.common.hw,
+ [CLK_PWM7_BUS] = &pwm7_bus_clk.common.hw,
+ [CLK_PWM8_BUS] = &pwm8_bus_clk.common.hw,
+ [CLK_PWM9_BUS] = &pwm9_bus_clk.common.hw,
+ [CLK_PWM10_BUS] = &pwm10_bus_clk.common.hw,
+ [CLK_PWM11_BUS] = &pwm11_bus_clk.common.hw,
+ [CLK_PWM12_BUS] = &pwm12_bus_clk.common.hw,
+ [CLK_PWM13_BUS] = &pwm13_bus_clk.common.hw,
+ [CLK_PWM14_BUS] = &pwm14_bus_clk.common.hw,
+ [CLK_PWM15_BUS] = &pwm15_bus_clk.common.hw,
+ [CLK_PWM16_BUS] = &pwm16_bus_clk.common.hw,
+ [CLK_PWM17_BUS] = &pwm17_bus_clk.common.hw,
+ [CLK_PWM18_BUS] = &pwm18_bus_clk.common.hw,
+ [CLK_PWM19_BUS] = &pwm19_bus_clk.common.hw,
+ [CLK_SSP3_BUS] = &ssp3_bus_clk.common.hw,
+ [CLK_RTC_BUS] = &rtc_bus_clk.common.hw,
+ [CLK_TWSI0_BUS] = &twsi0_bus_clk.common.hw,
+ [CLK_TWSI1_BUS] = &twsi1_bus_clk.common.hw,
+ [CLK_TWSI2_BUS] = &twsi2_bus_clk.common.hw,
+ [CLK_TWSI4_BUS] = &twsi4_bus_clk.common.hw,
+ [CLK_TWSI5_BUS] = &twsi5_bus_clk.common.hw,
+ [CLK_TWSI6_BUS] = &twsi6_bus_clk.common.hw,
+ [CLK_TWSI7_BUS] = &twsi7_bus_clk.common.hw,
+ [CLK_TWSI8_BUS] = &twsi8_bus_clk.common.hw,
+ [CLK_TIMERS1_BUS] = &timers1_bus_clk.common.hw,
+ [CLK_TIMERS2_BUS] = &timers2_bus_clk.common.hw,
+ [CLK_AIB_BUS] = &aib_bus_clk.common.hw,
+ [CLK_ONEWIRE_BUS] = &onewire_bus_clk.common.hw,
+ [CLK_SSPA0_BUS] = &sspa0_bus_clk.common.hw,
+ [CLK_SSPA1_BUS] = &sspa1_bus_clk.common.hw,
+ [CLK_TSEN_BUS] = &tsen_bus_clk.common.hw,
+ [CLK_IPC_AP2AUD_BUS] = &ipc_ap2aud_bus_clk.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_apbc_data = {
+ .hws = k1_ccu_apbc_hws,
+ .num = ARRAY_SIZE(k1_ccu_apbc_hws),
+};
+
+static struct clk_hw *k1_ccu_apmu_hws[] = {
+ [CLK_CCI550] = &cci550_clk.common.hw,
+ [CLK_CPU_C0_HI] = &cpu_c0_hi_clk.common.hw,
+ [CLK_CPU_C0_CORE] = &cpu_c0_core_clk.common.hw,
+ [CLK_CPU_C0_ACE] = &cpu_c0_ace_clk.common.hw,
+ [CLK_CPU_C0_TCM] = &cpu_c0_tcm_clk.common.hw,
+ [CLK_CPU_C1_HI] = &cpu_c1_hi_clk.common.hw,
+ [CLK_CPU_C1_CORE] = &cpu_c1_core_clk.common.hw,
+ [CLK_CPU_C1_ACE] = &cpu_c1_ace_clk.common.hw,
+ [CLK_CCIC_4X] = &ccic_4x_clk.common.hw,
+ [CLK_CCIC1PHY] = &ccic1phy_clk.common.hw,
+ [CLK_SDH_AXI] = &sdh_axi_aclk.common.hw,
+ [CLK_SDH0] = &sdh0_clk.common.hw,
+ [CLK_SDH1] = &sdh1_clk.common.hw,
+ [CLK_SDH2] = &sdh2_clk.common.hw,
+ [CLK_USB_P1] = &usb_p1_aclk.common.hw,
+ [CLK_USB_AXI] = &usb_axi_clk.common.hw,
+ [CLK_USB30] = &usb30_clk.common.hw,
+ [CLK_QSPI] = &qspi_clk.common.hw,
+ [CLK_QSPI_BUS] = &qspi_bus_clk.common.hw,
+ [CLK_DMA] = &dma_clk.common.hw,
+ [CLK_AES] = &aes_clk.common.hw,
+ [CLK_VPU] = &vpu_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ [CLK_EMMC] = &emmc_clk.common.hw,
+ [CLK_EMMC_X] = &emmc_x_clk.common.hw,
+ [CLK_AUDIO] = &audio_clk.common.hw,
+ [CLK_HDMI] = &hdmi_mclk.common.hw,
+ [CLK_PMUA_ACLK] = &pmua_aclk.common.hw,
+ [CLK_PCIE0_MASTER] = &pcie0_master_clk.common.hw,
+ [CLK_PCIE0_SLAVE] = &pcie0_slave_clk.common.hw,
+ [CLK_PCIE0_DBI] = &pcie0_dbi_clk.common.hw,
+ [CLK_PCIE1_MASTER] = &pcie1_master_clk.common.hw,
+ [CLK_PCIE1_SLAVE] = &pcie1_slave_clk.common.hw,
+ [CLK_PCIE1_DBI] = &pcie1_dbi_clk.common.hw,
+ [CLK_PCIE2_MASTER] = &pcie2_master_clk.common.hw,
+ [CLK_PCIE2_SLAVE] = &pcie2_slave_clk.common.hw,
+ [CLK_PCIE2_DBI] = &pcie2_dbi_clk.common.hw,
+ [CLK_EMAC0_BUS] = &emac0_bus_clk.common.hw,
+ [CLK_EMAC0_PTP] = &emac0_ptp_clk.common.hw,
+ [CLK_EMAC1_BUS] = &emac1_bus_clk.common.hw,
+ [CLK_EMAC1_PTP] = &emac1_ptp_clk.common.hw,
+ [CLK_JPG] = &jpg_clk.common.hw,
+ [CLK_CCIC2PHY] = &ccic2phy_clk.common.hw,
+ [CLK_CCIC3PHY] = &ccic3phy_clk.common.hw,
+ [CLK_CSI] = &csi_clk.common.hw,
+ [CLK_CAMM0] = &camm0_clk.common.hw,
+ [CLK_CAMM1] = &camm1_clk.common.hw,
+ [CLK_CAMM2] = &camm2_clk.common.hw,
+ [CLK_ISP_CPP] = &isp_cpp_clk.common.hw,
+ [CLK_ISP_BUS] = &isp_bus_clk.common.hw,
+ [CLK_ISP] = &isp_clk.common.hw,
+ [CLK_DPU_MCLK] = &dpu_mclk.common.hw,
+ [CLK_DPU_ESC] = &dpu_esc_clk.common.hw,
+ [CLK_DPU_BIT] = &dpu_bit_clk.common.hw,
+ [CLK_DPU_PXCLK] = &dpu_pxclk.common.hw,
+ [CLK_DPU_HCLK] = &dpu_hclk.common.hw,
+ [CLK_DPU_SPI] = &dpu_spi_clk.common.hw,
+ [CLK_DPU_SPI_HBUS] = &dpu_spi_hbus_clk.common.hw,
+ [CLK_DPU_SPIBUS] = &dpu_spi_bus_clk.common.hw,
+ [CLK_DPU_SPI_ACLK] = &dpu_spi_aclk.common.hw,
+ [CLK_V2D] = &v2d_clk.common.hw,
+ [CLK_EMMC_BUS] = &emmc_bus_clk.common.hw,
+};
+
+static const struct spacemit_ccu_data k1_ccu_apmu_data = {
+ .hws = k1_ccu_apmu_hws,
+ .num = ARRAY_SIZE(k1_ccu_apmu_hws),
+};
+
+static int spacemit_ccu_register(struct device *dev,
+ struct regmap *regmap,
+ struct regmap *lock_regmap,
+ const struct spacemit_ccu_data *data)
+{
+ struct clk_hw_onecell_data *clk_data;
+ int i, ret;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, data->num),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ for (i = 0; i < data->num; i++) {
+ struct clk_hw *hw = data->hws[i];
+ struct ccu_common *common;
+ const char *name;
+
+ if (!hw) {
+ clk_data->hws[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+
+ name = hw->init->name;
+
+ common = hw_to_ccu_common(hw);
+ common->regmap = regmap;
+ common->lock_regmap = lock_regmap;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "Cannot register clock %d - %s\n",
+ i, name);
+ return ret;
+ }
+
+ clk_data->hws[i] = hw;
+ }
+
+ clk_data->num = data->num;
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ dev_err(dev, "failed to add clock hardware provider (%d)\n", ret);
+
+ return ret;
+}
+
+static int k1_ccu_probe(struct platform_device *pdev)
+{
+ struct regmap *base_regmap, *lock_regmap = NULL;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ base_regmap = device_node_to_regmap(dev->of_node);
+ if (IS_ERR(base_regmap))
+ return dev_err_probe(dev, PTR_ERR(base_regmap),
+ "failed to get regmap\n");
+
+ /*
+ * The lock status of PLLs locate in MPMU region, while PLLs themselves
+ * are in APBS region. Reference to MPMU syscon is required to check PLL
+ * status.
+ */
+ if (of_device_is_compatible(dev->of_node, "spacemit,k1-pll")) {
+ struct device_node *mpmu = of_parse_phandle(dev->of_node,
+ "spacemit,mpmu", 0);
+ if (!mpmu)
+ return dev_err_probe(dev, -ENODEV,
+ "Cannot parse MPMU region\n");
+
+ lock_regmap = device_node_to_regmap(mpmu);
+ of_node_put(mpmu);
+
+ if (IS_ERR(lock_regmap))
+ return dev_err_probe(dev, PTR_ERR(lock_regmap),
+ "failed to get lock regmap\n");
+ }
+
+ ret = spacemit_ccu_register(dev, base_regmap, lock_regmap,
+ of_device_get_match_data(dev));
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register clocks\n");
+
+ return 0;
+}
+
+static const struct of_device_id of_k1_ccu_match[] = {
+ {
+ .compatible = "spacemit,k1-pll",
+ .data = &k1_ccu_pll_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-mpmu",
+ .data = &k1_ccu_mpmu_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-apbc",
+ .data = &k1_ccu_apbc_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-apmu",
+ .data = &k1_ccu_apmu_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, of_k1_ccu_match);
+
+static struct platform_driver k1_ccu_driver = {
+ .driver = {
+ .name = "spacemit,k1-ccu",
+ .of_match_table = of_k1_ccu_match,
+ },
+ .probe = k1_ccu_probe,
+};
+module_platform_driver(k1_ccu_driver);
+
+MODULE_DESCRIPTION("SpacemiT K1 CCU driver");
+MODULE_AUTHOR("Haylen Chu <heylenay@4d2.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/spacemit/ccu_common.h b/drivers/clk/spacemit/ccu_common.h
new file mode 100644
index 000000000000..da72f3836e0b
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_common.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_COMMON_H_
+#define _CCU_COMMON_H_
+
+#include <linux/regmap.h>
+
+struct ccu_common {
+ struct regmap *regmap;
+ struct regmap *lock_regmap;
+
+ union {
+ /* For DDN and MIX */
+ struct {
+ u32 reg_ctrl;
+ u32 reg_fc;
+ u32 mask_fc;
+ };
+
+ /* For PLL */
+ struct {
+ u32 reg_swcr1;
+ u32 reg_swcr3;
+ };
+ };
+
+ struct clk_hw hw;
+};
+
+static inline struct ccu_common *hw_to_ccu_common(struct clk_hw *hw)
+{
+ return container_of(hw, struct ccu_common, hw);
+}
+
+#define ccu_read(c, reg) \
+ ({ \
+ u32 tmp; \
+ regmap_read((c)->regmap, (c)->reg_##reg, &tmp); \
+ tmp; \
+ })
+#define ccu_update(c, reg, mask, val) \
+ regmap_update_bits((c)->regmap, (c)->reg_##reg, mask, val)
+
+#endif /* _CCU_COMMON_H_ */
diff --git a/drivers/clk/spacemit/ccu_ddn.c b/drivers/clk/spacemit/ccu_ddn.c
new file mode 100644
index 000000000000..be311b045698
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_ddn.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ *
+ * DDN stands for "Divider Denominator Numerator", it's M/N clock with a
+ * constant x2 factor. This clock hardware follows the equation below,
+ *
+ * numerator Fin
+ * 2 * ------------- = -------
+ * denominator Fout
+ *
+ * Thus, Fout could be calculated with,
+ *
+ * Fin denominator
+ * Fout = ----- * -------------
+ * 2 numerator
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/rational.h>
+
+#include "ccu_ddn.h"
+
+static unsigned long ccu_ddn_calc_rate(unsigned long prate,
+ unsigned long num, unsigned long den)
+{
+ return prate * den / 2 / num;
+}
+
+static unsigned long ccu_ddn_calc_best_rate(struct ccu_ddn *ddn,
+ unsigned long rate, unsigned long prate,
+ unsigned long *num, unsigned long *den)
+{
+ rational_best_approximation(rate, prate / 2,
+ ddn->den_mask >> ddn->den_shift,
+ ddn->num_mask >> ddn->num_shift,
+ den, num);
+ return ccu_ddn_calc_rate(prate, *num, *den);
+}
+
+static long ccu_ddn_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
+ unsigned long num, den;
+
+ return ccu_ddn_calc_best_rate(ddn, rate, *prate, &num, &den);
+}
+
+static unsigned long ccu_ddn_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
+ unsigned int val, num, den;
+
+ val = ccu_read(&ddn->common, ctrl);
+
+ num = (val & ddn->num_mask) >> ddn->num_shift;
+ den = (val & ddn->den_mask) >> ddn->den_shift;
+
+ return ccu_ddn_calc_rate(prate, num, den);
+}
+
+static int ccu_ddn_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
+ unsigned long num, den;
+
+ ccu_ddn_calc_best_rate(ddn, rate, prate, &num, &den);
+
+ ccu_update(&ddn->common, ctrl,
+ ddn->num_mask | ddn->den_mask,
+ (num << ddn->num_shift) | (den << ddn->den_shift));
+
+ return 0;
+}
+
+const struct clk_ops spacemit_ccu_ddn_ops = {
+ .recalc_rate = ccu_ddn_recalc_rate,
+ .round_rate = ccu_ddn_round_rate,
+ .set_rate = ccu_ddn_set_rate,
+};
diff --git a/drivers/clk/spacemit/ccu_ddn.h b/drivers/clk/spacemit/ccu_ddn.h
new file mode 100644
index 000000000000..a52fabe77d62
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_ddn.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_DDN_H_
+#define _CCU_DDN_H_
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+struct ccu_ddn {
+ struct ccu_common common;
+ unsigned int num_mask;
+ unsigned int num_shift;
+ unsigned int den_mask;
+ unsigned int den_shift;
+};
+
+#define CCU_DDN_INIT(_name, _parent, _flags) \
+ CLK_HW_INIT_HW(#_name, &_parent.common.hw, &spacemit_ccu_ddn_ops, _flags)
+
+#define CCU_DDN_DEFINE(_name, _parent, _reg_ctrl, _num_shift, _num_width, \
+ _den_shift, _den_width, _flags) \
+static struct ccu_ddn _name = { \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .hw.init = CCU_DDN_INIT(_name, _parent, _flags), \
+ }, \
+ .num_mask = GENMASK(_num_shift + _num_width - 1, _num_shift), \
+ .num_shift = _num_shift, \
+ .den_mask = GENMASK(_den_shift + _den_width - 1, _den_shift), \
+ .den_shift = _den_shift, \
+}
+
+static inline struct ccu_ddn *hw_to_ccu_ddn(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_ddn, common);
+}
+
+extern const struct clk_ops spacemit_ccu_ddn_ops;
+
+#endif
diff --git a/drivers/clk/spacemit/ccu_mix.c b/drivers/clk/spacemit/ccu_mix.c
new file mode 100644
index 000000000000..9b852aa61f78
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_mix.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ *
+ * MIX clock type is the combination of mux, factor or divider, and gate
+ */
+
+#include <linux/clk-provider.h>
+
+#include "ccu_mix.h"
+
+#define MIX_FC_TIMEOUT_US 10000
+#define MIX_FC_DELAY_US 5
+
+static void ccu_gate_disable(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+
+ ccu_update(&mix->common, ctrl, mix->gate.mask, 0);
+}
+
+static int ccu_gate_enable(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_gate_config *gate = &mix->gate;
+
+ ccu_update(&mix->common, ctrl, gate->mask, gate->mask);
+
+ return 0;
+}
+
+static int ccu_gate_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_gate_config *gate = &mix->gate;
+
+ return (ccu_read(&mix->common, ctrl) & gate->mask) == gate->mask;
+}
+
+static unsigned long ccu_factor_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+
+ return parent_rate * mix->factor.mul / mix->factor.div;
+}
+
+static unsigned long ccu_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_div_config *div = &mix->div;
+ unsigned long val;
+
+ val = ccu_read(&mix->common, ctrl) >> div->shift;
+ val &= (1 << div->width) - 1;
+
+ return divider_recalc_rate(hw, parent_rate, val, NULL, 0, div->width);
+}
+
+/*
+ * Some clocks require a "FC" (frequency change) bit to be set after changing
+ * their rates or reparenting. This bit will be automatically cleared by
+ * hardware in MIX_FC_TIMEOUT_US, which indicates the operation is completed.
+ */
+static int ccu_mix_trigger_fc(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+ unsigned int val;
+
+ if (common->reg_fc)
+ return 0;
+
+ ccu_update(common, fc, common->mask_fc, common->mask_fc);
+
+ return regmap_read_poll_timeout_atomic(common->regmap, common->reg_fc,
+ val, !(val & common->mask_fc),
+ MIX_FC_DELAY_US,
+ MIX_FC_TIMEOUT_US);
+}
+
+static long ccu_factor_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return ccu_factor_recalc_rate(hw, *prate);
+}
+
+static int ccu_factor_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+static unsigned long
+ccu_mix_calc_best_rate(struct clk_hw *hw, unsigned long rate,
+ struct clk_hw **best_parent,
+ unsigned long *best_parent_rate,
+ u32 *div_val)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ unsigned int parent_num = clk_hw_get_num_parents(hw);
+ struct ccu_div_config *div = &mix->div;
+ u32 div_max = 1 << div->width;
+ unsigned long best_rate = 0;
+
+ for (int i = 0; i < parent_num; i++) {
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+ unsigned long parent_rate;
+
+ if (!parent)
+ continue;
+
+ parent_rate = clk_hw_get_rate(parent);
+
+ for (int j = 1; j <= div_max; j++) {
+ unsigned long tmp = DIV_ROUND_CLOSEST_ULL(parent_rate, j);
+
+ if (abs(tmp - rate) < abs(best_rate - rate)) {
+ best_rate = tmp;
+
+ if (div_val)
+ *div_val = j - 1;
+
+ if (best_parent) {
+ *best_parent = parent;
+ *best_parent_rate = parent_rate;
+ }
+ }
+ }
+ }
+
+ return best_rate;
+}
+
+static int ccu_mix_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ req->rate = ccu_mix_calc_best_rate(hw, req->rate,
+ &req->best_parent_hw,
+ &req->best_parent_rate,
+ NULL);
+ return 0;
+}
+
+static int ccu_mix_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_common *common = &mix->common;
+ struct ccu_div_config *div = &mix->div;
+ u32 current_div, target_div, mask;
+
+ ccu_mix_calc_best_rate(hw, rate, NULL, NULL, &target_div);
+
+ current_div = ccu_read(common, ctrl) >> div->shift;
+ current_div &= (1 << div->width) - 1;
+
+ if (current_div == target_div)
+ return 0;
+
+ mask = GENMASK(div->width + div->shift - 1, div->shift);
+
+ ccu_update(common, ctrl, mask, target_div << div->shift);
+
+ return ccu_mix_trigger_fc(hw);
+}
+
+static u8 ccu_mux_get_parent(struct clk_hw *hw)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_mux_config *mux = &mix->mux;
+ u8 parent;
+
+ parent = ccu_read(&mix->common, ctrl) >> mux->shift;
+ parent &= (1 << mux->width) - 1;
+
+ return parent;
+}
+
+static int ccu_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct ccu_mix *mix = hw_to_ccu_mix(hw);
+ struct ccu_mux_config *mux = &mix->mux;
+ u32 mask;
+
+ mask = GENMASK(mux->width + mux->shift - 1, mux->shift);
+
+ ccu_update(&mix->common, ctrl, mask, index << mux->shift);
+
+ return ccu_mix_trigger_fc(hw);
+}
+
+const struct clk_ops spacemit_ccu_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+};
+
+const struct clk_ops spacemit_ccu_factor_ops = {
+ .round_rate = ccu_factor_round_rate,
+ .recalc_rate = ccu_factor_recalc_rate,
+ .set_rate = ccu_factor_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_ops = {
+ .determine_rate = ccu_mix_determine_rate,
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+};
+
+const struct clk_ops spacemit_ccu_div_ops = {
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_factor_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .round_rate = ccu_factor_round_rate,
+ .recalc_rate = ccu_factor_recalc_rate,
+ .set_rate = ccu_factor_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+};
+
+const struct clk_ops spacemit_ccu_div_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_div_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
+
+const struct clk_ops spacemit_ccu_mux_div_ops = {
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+
+ .determine_rate = ccu_mix_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_mix_set_rate,
+};
diff --git a/drivers/clk/spacemit/ccu_mix.h b/drivers/clk/spacemit/ccu_mix.h
new file mode 100644
index 000000000000..51d19f5d6aac
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_mix.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_MIX_H_
+#define _CCU_MIX_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+/**
+ * struct ccu_gate_config - Gate configuration
+ *
+ * @mask: Mask to enable the gate. Some clocks may have more than one bit
+ * set in this field.
+ */
+struct ccu_gate_config {
+ u32 mask;
+};
+
+struct ccu_factor_config {
+ u32 div;
+ u32 mul;
+};
+
+struct ccu_mux_config {
+ u8 shift;
+ u8 width;
+};
+
+struct ccu_div_config {
+ u8 shift;
+ u8 width;
+};
+
+struct ccu_mix {
+ struct ccu_factor_config factor;
+ struct ccu_gate_config gate;
+ struct ccu_div_config div;
+ struct ccu_mux_config mux;
+ struct ccu_common common;
+};
+
+#define CCU_GATE_INIT(_mask) { .mask = _mask }
+#define CCU_FACTOR_INIT(_div, _mul) { .div = _div, .mul = _mul }
+#define CCU_MUX_INIT(_shift, _width) { .shift = _shift, .width = _width }
+#define CCU_DIV_INIT(_shift, _width) { .shift = _shift, .width = _width }
+
+#define CCU_PARENT_HW(_parent) { .hw = &_parent.common.hw }
+#define CCU_PARENT_NAME(_name) { .fw_name = #_name }
+
+#define CCU_MIX_INITHW(_name, _parent, _ops, _flags) \
+ .hw.init = &(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = #_name, \
+ .parent_data = (const struct clk_parent_data[]) \
+ { _parent }, \
+ .num_parents = 1, \
+ .ops = &_ops, \
+ }
+
+#define CCU_MIX_INITHW_PARENTS(_name, _parents, _ops, _flags) \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(#_name, _parents, &_ops, _flags)
+
+#define CCU_GATE_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_gate_ops, _flags), \
+ } \
+}
+
+#define CCU_FACTOR_DEFINE(_name, _parent, _div, _mul) \
+static struct ccu_mix _name = { \
+ .factor = CCU_FACTOR_INIT(_div, _mul), \
+ .common = { \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_factor_ops, 0), \
+ } \
+}
+
+#define CCU_MUX_DEFINE(_name, _parents, _reg_ctrl, _shift, _width, _flags) \
+static struct ccu_mix _name = { \
+ .mux = CCU_MUX_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, spacemit_ccu_mux_ops, \
+ _flags), \
+ } \
+}
+
+#define CCU_DIV_DEFINE(_name, _parent, _reg_ctrl, _shift, _width, _flags) \
+static struct ccu_mix _name = { \
+ .div = CCU_DIV_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_div_ops, _flags) \
+ } \
+}
+
+#define CCU_FACTOR_GATE_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _div, \
+ _mul) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .factor = CCU_FACTOR_INIT(_div, _mul), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_factor_gate_ops, 0) \
+ } \
+}
+
+#define CCU_MUX_GATE_DEFINE(_name, _parents, _reg_ctrl, _shift, _width, \
+ _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .mux = CCU_MUX_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_gate_ops, _flags), \
+ } \
+}
+
+#define CCU_DIV_GATE_DEFINE(_name, _parent, _reg_ctrl, _shift, _width, \
+ _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .div = CCU_DIV_INIT(_shift, _width), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_div_gate_ops, \
+ _flags), \
+ } \
+}
+
+#define CCU_MUX_DIV_GATE_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth, \
+ _muxshift, _muxwidth, _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .div = CCU_DIV_INIT(_mshift, _mwidth), \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_div_gate_ops, _flags), \
+ }, \
+}
+
+#define CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(_name, _parents, _reg_ctrl, _reg_fc, \
+ _mshift, _mwidth, _mask_fc, _muxshift, \
+ _muxwidth, _mask_gate, _flags) \
+static struct ccu_mix _name = { \
+ .gate = CCU_GATE_INIT(_mask_gate), \
+ .div = CCU_DIV_INIT(_mshift, _mwidth), \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .reg_fc = _reg_fc, \
+ .mask_fc = _mask_fc, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_div_gate_ops, _flags), \
+ }, \
+}
+
+#define CCU_MUX_DIV_GATE_FC_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth,\
+ _mask_fc, _muxshift, _muxwidth, _mask_gate, \
+ _flags) \
+CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(_name, _parents, _reg_ctrl, _reg_ctrl, _mshift,\
+ _mwidth, _mask_fc, _muxshift, _muxwidth, \
+ _mask_gate, _flags)
+
+#define CCU_MUX_DIV_FC_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth, \
+ _mask_fc, _muxshift, _muxwidth, _flags) \
+static struct ccu_mix _name = { \
+ .div = CCU_DIV_INIT(_mshift, _mwidth), \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .reg_fc = _reg_ctrl, \
+ .mask_fc = _mask_fc, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, \
+ spacemit_ccu_mux_div_ops, _flags), \
+ }, \
+}
+
+#define CCU_MUX_FC_DEFINE(_name, _parents, _reg_ctrl, _mask_fc, _muxshift, \
+ _muxwidth, _flags) \
+static struct ccu_mix _name = { \
+ .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \
+ .common = { \
+ .reg_ctrl = _reg_ctrl, \
+ .reg_fc = _reg_ctrl, \
+ .mask_fc = _mask_fc, \
+ CCU_MIX_INITHW_PARENTS(_name, _parents, spacemit_ccu_mux_ops, \
+ _flags) \
+ }, \
+}
+
+static inline struct ccu_mix *hw_to_ccu_mix(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_mix, common);
+}
+
+extern const struct clk_ops spacemit_ccu_gate_ops;
+extern const struct clk_ops spacemit_ccu_factor_ops;
+extern const struct clk_ops spacemit_ccu_mux_ops;
+extern const struct clk_ops spacemit_ccu_div_ops;
+extern const struct clk_ops spacemit_ccu_factor_gate_ops;
+extern const struct clk_ops spacemit_ccu_div_gate_ops;
+extern const struct clk_ops spacemit_ccu_mux_gate_ops;
+extern const struct clk_ops spacemit_ccu_mux_div_ops;
+extern const struct clk_ops spacemit_ccu_mux_div_gate_ops;
+#endif /* _CCU_DIV_H_ */
diff --git a/drivers/clk/spacemit/ccu_pll.c b/drivers/clk/spacemit/ccu_pll.c
new file mode 100644
index 000000000000..4427dcfbbb97
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_pll.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/math.h>
+#include <linux/regmap.h>
+
+#include "ccu_common.h"
+#include "ccu_pll.h"
+
+#define PLL_TIMEOUT_US 3000
+#define PLL_DELAY_US 5
+
+#define PLL_SWCR3_EN ((u32)BIT(31))
+#define PLL_SWCR3_MASK GENMASK(30, 0)
+
+static const struct ccu_pll_rate_tbl *ccu_pll_lookup_best_rate(struct ccu_pll *pll,
+ unsigned long rate)
+{
+ struct ccu_pll_config *config = &pll->config;
+ const struct ccu_pll_rate_tbl *best_entry;
+ unsigned long best_delta = ULONG_MAX;
+ int i;
+
+ for (i = 0; i < config->tbl_num; i++) {
+ const struct ccu_pll_rate_tbl *entry = &config->rate_tbl[i];
+ unsigned long delta = abs_diff(entry->rate, rate);
+
+ if (delta < best_delta) {
+ best_delta = delta;
+ best_entry = entry;
+ }
+ }
+
+ return best_entry;
+}
+
+static const struct ccu_pll_rate_tbl *ccu_pll_lookup_matched_entry(struct ccu_pll *pll)
+{
+ struct ccu_pll_config *config = &pll->config;
+ u32 swcr1, swcr3;
+ int i;
+
+ swcr1 = ccu_read(&pll->common, swcr1);
+ swcr3 = ccu_read(&pll->common, swcr3);
+ swcr3 &= PLL_SWCR3_MASK;
+
+ for (i = 0; i < config->tbl_num; i++) {
+ const struct ccu_pll_rate_tbl *entry = &config->rate_tbl[i];
+
+ if (swcr1 == entry->swcr1 && swcr3 == entry->swcr3)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static void ccu_pll_update_param(struct ccu_pll *pll, const struct ccu_pll_rate_tbl *entry)
+{
+ struct ccu_common *common = &pll->common;
+
+ regmap_write(common->regmap, common->reg_swcr1, entry->swcr1);
+ ccu_update(common, swcr3, PLL_SWCR3_MASK, entry->swcr3);
+}
+
+static int ccu_pll_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return ccu_read(common, swcr3) & PLL_SWCR3_EN;
+}
+
+static int ccu_pll_enable(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+ struct ccu_common *common = &pll->common;
+ unsigned int tmp;
+
+ ccu_update(common, swcr3, PLL_SWCR3_EN, PLL_SWCR3_EN);
+
+ /* check lock status */
+ return regmap_read_poll_timeout_atomic(common->lock_regmap,
+ pll->config.reg_lock,
+ tmp,
+ tmp & pll->config.mask_lock,
+ PLL_DELAY_US, PLL_TIMEOUT_US);
+}
+
+static void ccu_pll_disable(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ ccu_update(common, swcr3, PLL_SWCR3_EN, 0);
+}
+
+/*
+ * PLLs must be gated before changing rate, which is ensured by
+ * flag CLK_SET_RATE_GATE.
+ */
+static int ccu_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+ const struct ccu_pll_rate_tbl *entry;
+
+ entry = ccu_pll_lookup_best_rate(pll, rate);
+ ccu_pll_update_param(pll, entry);
+
+ return 0;
+}
+
+static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+ const struct ccu_pll_rate_tbl *entry;
+
+ entry = ccu_pll_lookup_matched_entry(pll);
+
+ WARN_ON_ONCE(!entry);
+
+ return entry ? entry->rate : -EINVAL;
+}
+
+static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ return ccu_pll_lookup_best_rate(pll, rate)->rate;
+}
+
+static int ccu_pll_init(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ if (ccu_pll_lookup_matched_entry(pll))
+ return 0;
+
+ ccu_pll_disable(hw);
+ ccu_pll_update_param(pll, &pll->config.rate_tbl[0]);
+
+ return 0;
+}
+
+const struct clk_ops spacemit_ccu_pll_ops = {
+ .init = ccu_pll_init,
+ .enable = ccu_pll_enable,
+ .disable = ccu_pll_disable,
+ .set_rate = ccu_pll_set_rate,
+ .recalc_rate = ccu_pll_recalc_rate,
+ .round_rate = ccu_pll_round_rate,
+ .is_enabled = ccu_pll_is_enabled,
+};
diff --git a/drivers/clk/spacemit/ccu_pll.h b/drivers/clk/spacemit/ccu_pll.h
new file mode 100644
index 000000000000..0592f4c3068c
--- /dev/null
+++ b/drivers/clk/spacemit/ccu_pll.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 SpacemiT Technology Co. Ltd
+ * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org>
+ */
+
+#ifndef _CCU_PLL_H_
+#define _CCU_PLL_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+/**
+ * struct ccu_pll_rate_tbl - Structure mapping between PLL rate and register
+ * configuration.
+ *
+ * @rate: PLL rate
+ * @swcr1: Register value of PLLX_SW1_CTRL (PLLx_SWCR1).
+ * @swcr3: Register value of the PLLx_SW3_CTRL's lowest 31 bits of
+ * PLLx_SW3_CTRL (PLLx_SWCR3). This highest bit is for enabling
+ * the PLL and not contained in this field.
+ */
+struct ccu_pll_rate_tbl {
+ unsigned long rate;
+ u32 swcr1;
+ u32 swcr3;
+};
+
+struct ccu_pll_config {
+ const struct ccu_pll_rate_tbl *rate_tbl;
+ u32 tbl_num;
+ u32 reg_lock;
+ u32 mask_lock;
+};
+
+#define CCU_PLL_RATE(_rate, _swcr1, _swcr3) \
+ { \
+ .rate = _rate, \
+ .swcr1 = _swcr1, \
+ .swcr3 = _swcr3, \
+ }
+
+struct ccu_pll {
+ struct ccu_common common;
+ struct ccu_pll_config config;
+};
+
+#define CCU_PLL_CONFIG(_table, _reg_lock, _mask_lock) \
+ { \
+ .rate_tbl = _table, \
+ .tbl_num = ARRAY_SIZE(_table), \
+ .reg_lock = (_reg_lock), \
+ .mask_lock = (_mask_lock), \
+ }
+
+#define CCU_PLL_HWINIT(_name, _flags) \
+ (&(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = &spacemit_ccu_pll_ops, \
+ .parent_data = &(struct clk_parent_data) { .index = 0 }, \
+ .num_parents = 1, \
+ .flags = _flags, \
+ })
+
+#define CCU_PLL_DEFINE(_name, _table, _reg_swcr1, _reg_swcr3, _reg_lock, \
+ _mask_lock, _flags) \
+static struct ccu_pll _name = { \
+ .config = CCU_PLL_CONFIG(_table, _reg_lock, _mask_lock), \
+ .common = { \
+ .reg_swcr1 = _reg_swcr1, \
+ .reg_swcr3 = _reg_swcr3, \
+ .hw.init = CCU_PLL_HWINIT(_name, _flags) \
+ } \
+}
+
+static inline struct ccu_pll *hw_to_ccu_pll(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_pll, common);
+}
+
+extern const struct clk_ops spacemit_ccu_pll_ops;
+
+#endif
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 5830a9d87bf2..8896fd052ef1 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -9,123 +9,123 @@ if SUNXI_CCU
config SUNIV_F1C100S_CCU
tristate "Support for the Allwinner newer F1C100s CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUNIV || COMPILE_TEST
config SUN20I_D1_CCU
tristate "Support for the Allwinner D1/R528/T113 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || RISCV || COMPILE_TEST
config SUN20I_D1_R_CCU
tristate "Support for the Allwinner D1/R528/T113 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || RISCV || COMPILE_TEST
config SUN50I_A64_CCU
tristate "Support for the Allwinner A64 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_A100_CCU
tristate "Support for the Allwinner A100 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_A100_R_CCU
tristate "Support for the Allwinner A100 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_H6_CCU
tristate "Support for the Allwinner H6 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_H616_CCU
tristate "Support for the Allwinner H616 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN50I_H6_R_CCU
tristate "Support for the Allwinner H6 and H616 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN55I_A523_CCU
tristate "Support for the Allwinner A523/T527 CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN55I_A523_R_CCU
tristate "Support for the Allwinner A523/T527 PRCM CCU"
- default y
+ default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
config SUN4I_A10_CCU
tristate "Support for the Allwinner A10/A20 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
config SUN5I_CCU
bool "Support for the Allwinner sun5i family CCM"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN5I || COMPILE_TEST
depends on SUNXI_CCU=y
config SUN6I_A31_CCU
tristate "Support for the Allwinner A31/A31s CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN6I || COMPILE_TEST
config SUN6I_RTC_CCU
tristate "Support for the Allwinner H616/R329 RTC CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || RISCV || COMPILE_TEST
config SUN8I_A23_CCU
tristate "Support for the Allwinner A23 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A33_CCU
tristate "Support for the Allwinner A33 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A83T_CCU
tristate "Support for the Allwinner A83T CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_H3_CCU
tristate "Support for the Allwinner H3 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || COMPILE_TEST
config SUN8I_V3S_CCU
tristate "Support for the Allwinner V3s CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_DE2_CCU
tristate "Support for the Allwinner SoCs DE2 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || RISCV || COMPILE_TEST
config SUN8I_R40_CCU
tristate "Support for the Allwinner R40 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || COMPILE_TEST
config SUN9I_A80_CCU
tristate "Support for the Allwinner A80 CCU"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN9I || COMPILE_TEST
config SUN8I_R_CCU
tristate "Support for Allwinner SoCs' PRCM CCUs"
- default y
+ default ARCH_SUNXI
depends on MACH_SUN8I || ARM64 || COMPILE_TEST
endif
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
index bb66c906ebbb..e83d4fd40240 100644
--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
@@ -412,19 +412,23 @@ static const struct clk_parent_data mmc0_mmc1_parents[] = {
{ .hw = &pll_periph0_2x_clk.common.hw },
{ .hw = &pll_audio1_div2_clk.common.hw },
};
-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830,
- 0, 4, /* M */
- 8, 2, /* P */
- 24, 3, /* mux */
- BIT(31), /* gate */
- 0);
-
-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834,
- 0, 4, /* M */
- 8, 2, /* P */
- 24, 3, /* mux */
- BIT(31), /* gate */
- 0);
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0",
+ mmc0_mmc1_parents, 0x830,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1",
+ mmc0_mmc1_parents, 0x834,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
static const struct clk_parent_data mmc2_parents[] = {
{ .fw_name = "hosc" },
@@ -433,12 +437,14 @@ static const struct clk_parent_data mmc2_parents[] = {
{ .hw = &pll_periph0_800M_clk.common.hw },
{ .hw = &pll_audio1_div2_clk.common.hw },
};
-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838,
- 0, 4, /* M */
- 8, 2, /* P */
- 24, 3, /* mux */
- BIT(31), /* gate */
- 0);
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc2_parents,
+ 0x838,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws,
0x84c, BIT(0), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
index daa462c7d477..955c614830fa 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -1094,6 +1094,7 @@ static const struct ccu_reset_map sun50i_h616_ccu_resets[] = {
[RST_BUS_TCON_LCD1] = { 0xb7c, BIT(17) },
[RST_BUS_TCON_TV0] = { 0xb9c, BIT(16) },
[RST_BUS_TCON_TV1] = { 0xb9c, BIT(17) },
+ [RST_BUS_LVDS] = { 0xbac, BIT(16) },
[RST_BUS_TVE_TOP] = { 0xbbc, BIT(16) },
[RST_BUS_TVE0] = { 0xbbc, BIT(17) },
[RST_BUS_HDCP] = { 0xc4c, BIT(16) },
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index f2aa71206bc2..a6cd0f988859 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -239,6 +240,16 @@ static const struct sunxi_ccu_desc sun50i_h5_de2_clk_desc = {
.num_resets = ARRAY_SIZE(sun50i_h5_de2_resets),
};
+static const struct sunxi_ccu_desc sun50i_h616_de33_clk_desc = {
+ .ccu_clks = sun8i_de2_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks),
+
+ .hw_clks = &sun8i_h3_de2_hw_clks,
+
+ .resets = sun50i_h5_de2_resets,
+ .num_resets = ARRAY_SIZE(sun50i_h5_de2_resets),
+};
+
static int sunxi_de2_clk_probe(struct platform_device *pdev)
{
struct clk *bus_clk, *mod_clk;
@@ -291,6 +302,16 @@ static int sunxi_de2_clk_probe(struct platform_device *pdev)
goto err_disable_mod_clk;
}
+ /*
+ * The DE33 requires these additional (unknown) registers set
+ * during initialisation.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "allwinner,sun50i-h616-de33-clk")) {
+ writel(0, reg + 0x24);
+ writel(0x0000a980, reg + 0x28);
+ }
+
ret = devm_sunxi_ccu_probe(&pdev->dev, reg, ccu_desc);
if (ret)
goto err_assert_reset;
@@ -335,6 +356,10 @@ static const struct of_device_id sunxi_de2_clk_ids[] = {
.compatible = "allwinner,sun50i-h6-de3-clk",
.data = &sun50i_h5_de2_clk_desc,
},
+ {
+ .compatible = "allwinner,sun50i-h616-de33-clk",
+ .data = &sun50i_h616_de33_clk_desc,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sunxi_de2_clk_ids);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h
index b35aeec70484..bb09c649bfa3 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.h
+++ b/drivers/clk/sunxi-ng/ccu_mp.h
@@ -52,6 +52,28 @@ struct ccu_mp {
} \
}
+#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(_struct, _name, _parents, \
+ _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _postdiv, _flags)\
+ struct ccu_mp _struct = { \
+ .enable = _gate, \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .fixed_post_div = _postdiv, \
+ .common = { \
+ .reg = _reg, \
+ .features = CCU_FEATURE_FIXED_POSTDIV, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parents, \
+ &ccu_mp_ops, \
+ _flags), \
+ } \
+ }
+
#define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
_mshift, _mwidth, \
_pshift, _pwidth, \
@@ -109,8 +131,7 @@ struct ccu_mp {
_mshift, _mwidth, \
_pshift, _pwidth, \
_muxshift, _muxwidth, \
- _gate, _features, \
- _flags) \
+ _gate, _flags, _features) \
struct ccu_mp _struct = { \
.enable = _gate, \
.m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
diff --git a/drivers/clk/sunxi/Kconfig b/drivers/clk/sunxi/Kconfig
index 1c4e543366dd..5e2f92bfe412 100644
--- a/drivers/clk/sunxi/Kconfig
+++ b/drivers/clk/sunxi/Kconfig
@@ -2,13 +2,13 @@
menuconfig CLK_SUNXI
bool "Legacy clock support for Allwinner SoCs"
depends on (ARM && ARCH_SUNXI) || COMPILE_TEST
- default y
+ default (ARM && ARCH_SUNXI)
if CLK_SUNXI
config CLK_SUNXI_CLOCKS
bool "Legacy clock drivers"
- default y
+ default ARCH_SUNXI
help
Legacy clock drivers being used on older (A10, A13, A20,
A23, A31, A80) SoCs. These drivers are kept around for
@@ -19,14 +19,14 @@ config CLK_SUNXI_CLOCKS
config CLK_SUNXI_PRCM_SUN6I
bool "Legacy A31 PRCM driver"
- default y
+ default ARCH_SUNXI
help
Legacy clock driver for the A31 PRCM clocks. Those are
usually needed for the PMIC communication, mostly.
config CLK_SUNXI_PRCM_SUN8I
bool "Legacy sun8i PRCM driver"
- default y
+ default ARCH_SUNXI
help
Legacy clock driver for the sun8i family PRCM clocks.
Those are usually needed for the PMIC communication,
@@ -34,7 +34,7 @@ config CLK_SUNXI_PRCM_SUN8I
config CLK_SUNXI_PRCM_SUN9I
bool "Legacy A80 PRCM driver"
- default y
+ default ARCH_SUNXI
help
Legacy clock driver for the A80 PRCM clocks. Those are
usually needed for the PMIC communication, mostly.
diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
index 4c9555fc6184..ebfb1d59401d 100644
--- a/drivers/clk/thead/clk-th1520-ap.c
+++ b/drivers/clk/thead/clk-th1520-ap.c
@@ -847,6 +847,67 @@ static CCU_GATE(CLK_SRAM1, sram1_clk, "sram1", axi_aclk_pd, 0x20c, BIT(3), 0);
static CCU_GATE(CLK_SRAM2, sram2_clk, "sram2", axi_aclk_pd, 0x20c, BIT(2), 0);
static CCU_GATE(CLK_SRAM3, sram3_clk, "sram3", axi_aclk_pd, 0x20c, BIT(1), 0);
+static CCU_GATE(CLK_AXI4_VO_ACLK, axi4_vo_aclk, "axi4-vo-aclk",
+ video_pll_clk_pd, 0x0, BIT(0), 0);
+static CCU_GATE(CLK_GPU_CORE, gpu_core_clk, "gpu-core-clk", video_pll_clk_pd,
+ 0x0, BIT(3), 0);
+static CCU_GATE(CLK_GPU_CFG_ACLK, gpu_cfg_aclk, "gpu-cfg-aclk",
+ video_pll_clk_pd, 0x0, BIT(4), 0);
+static CCU_GATE(CLK_DPU_PIXELCLK0, dpu0_pixelclk, "dpu0-pixelclk",
+ video_pll_clk_pd, 0x0, BIT(5), 0);
+static CCU_GATE(CLK_DPU_PIXELCLK1, dpu1_pixelclk, "dpu1-pixelclk",
+ video_pll_clk_pd, 0x0, BIT(6), 0);
+static CCU_GATE(CLK_DPU_HCLK, dpu_hclk, "dpu-hclk", video_pll_clk_pd, 0x0,
+ BIT(7), 0);
+static CCU_GATE(CLK_DPU_ACLK, dpu_aclk, "dpu-aclk", video_pll_clk_pd, 0x0,
+ BIT(8), 0);
+static CCU_GATE(CLK_DPU_CCLK, dpu_cclk, "dpu-cclk", video_pll_clk_pd, 0x0,
+ BIT(9), 0);
+static CCU_GATE(CLK_HDMI_SFR, hdmi_sfr_clk, "hdmi-sfr-clk", video_pll_clk_pd,
+ 0x0, BIT(10), 0);
+static CCU_GATE(CLK_HDMI_PCLK, hdmi_pclk, "hdmi-pclk", video_pll_clk_pd, 0x0,
+ BIT(11), 0);
+static CCU_GATE(CLK_HDMI_CEC, hdmi_cec_clk, "hdmi-cec-clk", video_pll_clk_pd,
+ 0x0, BIT(12), 0);
+static CCU_GATE(CLK_MIPI_DSI0_PCLK, mipi_dsi0_pclk, "mipi-dsi0-pclk",
+ video_pll_clk_pd, 0x0, BIT(13), 0);
+static CCU_GATE(CLK_MIPI_DSI1_PCLK, mipi_dsi1_pclk, "mipi-dsi1-pclk",
+ video_pll_clk_pd, 0x0, BIT(14), 0);
+static CCU_GATE(CLK_MIPI_DSI0_CFG, mipi_dsi0_cfg_clk, "mipi-dsi0-cfg-clk",
+ video_pll_clk_pd, 0x0, BIT(15), 0);
+static CCU_GATE(CLK_MIPI_DSI1_CFG, mipi_dsi1_cfg_clk, "mipi-dsi1-cfg-clk",
+ video_pll_clk_pd, 0x0, BIT(16), 0);
+static CCU_GATE(CLK_MIPI_DSI0_REFCLK, mipi_dsi0_refclk, "mipi-dsi0-refclk",
+ video_pll_clk_pd, 0x0, BIT(17), 0);
+static CCU_GATE(CLK_MIPI_DSI1_REFCLK, mipi_dsi1_refclk, "mipi-dsi1-refclk",
+ video_pll_clk_pd, 0x0, BIT(18), 0);
+static CCU_GATE(CLK_HDMI_I2S, hdmi_i2s_clk, "hdmi-i2s-clk", video_pll_clk_pd,
+ 0x0, BIT(19), 0);
+static CCU_GATE(CLK_X2H_DPU1_ACLK, x2h_dpu1_aclk, "x2h-dpu1-aclk",
+ video_pll_clk_pd, 0x0, BIT(20), 0);
+static CCU_GATE(CLK_X2H_DPU_ACLK, x2h_dpu_aclk, "x2h-dpu-aclk",
+ video_pll_clk_pd, 0x0, BIT(21), 0);
+static CCU_GATE(CLK_AXI4_VO_PCLK, axi4_vo_pclk, "axi4-vo-pclk",
+ video_pll_clk_pd, 0x0, BIT(22), 0);
+static CCU_GATE(CLK_IOPMP_VOSYS_DPU_PCLK, iopmp_vosys_dpu_pclk,
+ "iopmp-vosys-dpu-pclk", video_pll_clk_pd, 0x0, BIT(23), 0);
+static CCU_GATE(CLK_IOPMP_VOSYS_DPU1_PCLK, iopmp_vosys_dpu1_pclk,
+ "iopmp-vosys-dpu1-pclk", video_pll_clk_pd, 0x0, BIT(24), 0);
+static CCU_GATE(CLK_IOPMP_VOSYS_GPU_PCLK, iopmp_vosys_gpu_pclk,
+ "iopmp-vosys-gpu-pclk", video_pll_clk_pd, 0x0, BIT(25), 0);
+static CCU_GATE(CLK_IOPMP_DPU1_ACLK, iopmp_dpu1_aclk, "iopmp-dpu1-aclk",
+ video_pll_clk_pd, 0x0, BIT(27), 0);
+static CCU_GATE(CLK_IOPMP_DPU_ACLK, iopmp_dpu_aclk, "iopmp-dpu-aclk",
+ video_pll_clk_pd, 0x0, BIT(28), 0);
+static CCU_GATE(CLK_IOPMP_GPU_ACLK, iopmp_gpu_aclk, "iopmp-gpu-aclk",
+ video_pll_clk_pd, 0x0, BIT(29), 0);
+static CCU_GATE(CLK_MIPIDSI0_PIXCLK, mipi_dsi0_pixclk, "mipi-dsi0-pixclk",
+ video_pll_clk_pd, 0x0, BIT(30), 0);
+static CCU_GATE(CLK_MIPIDSI1_PIXCLK, mipi_dsi1_pixclk, "mipi-dsi1-pixclk",
+ video_pll_clk_pd, 0x0, BIT(31), 0);
+static CCU_GATE(CLK_HDMI_PIXCLK, hdmi_pixclk, "hdmi-pixclk", video_pll_clk_pd,
+ 0x4, BIT(0), 0);
+
static CLK_FIXED_FACTOR_HW(gmac_pll_clk_100m, "gmac-pll-clk-100m",
&gmac_pll_clk.common.hw, 10, 1, 0);
@@ -963,7 +1024,38 @@ static struct ccu_common *th1520_gate_clks[] = {
&sram3_clk.common,
};
-#define NR_CLKS (CLK_UART_SCLK + 1)
+static struct ccu_common *th1520_vo_gate_clks[] = {
+ &axi4_vo_aclk.common,
+ &gpu_core_clk.common,
+ &gpu_cfg_aclk.common,
+ &dpu0_pixelclk.common,
+ &dpu1_pixelclk.common,
+ &dpu_hclk.common,
+ &dpu_aclk.common,
+ &dpu_cclk.common,
+ &hdmi_sfr_clk.common,
+ &hdmi_pclk.common,
+ &hdmi_cec_clk.common,
+ &mipi_dsi0_pclk.common,
+ &mipi_dsi1_pclk.common,
+ &mipi_dsi0_cfg_clk.common,
+ &mipi_dsi1_cfg_clk.common,
+ &mipi_dsi0_refclk.common,
+ &mipi_dsi1_refclk.common,
+ &hdmi_i2s_clk.common,
+ &x2h_dpu1_aclk.common,
+ &x2h_dpu_aclk.common,
+ &axi4_vo_pclk.common,
+ &iopmp_vosys_dpu_pclk.common,
+ &iopmp_vosys_dpu1_pclk.common,
+ &iopmp_vosys_gpu_pclk.common,
+ &iopmp_dpu1_aclk.common,
+ &iopmp_dpu_aclk.common,
+ &iopmp_gpu_aclk.common,
+ &mipi_dsi0_pixclk.common,
+ &mipi_dsi1_pixclk.common,
+ &hdmi_pixclk.common
+};
static const struct regmap_config th1520_clk_regmap_config = {
.reg_bits = 32,
@@ -972,8 +1064,44 @@ static const struct regmap_config th1520_clk_regmap_config = {
.fast_io = true,
};
+struct th1520_plat_data {
+ struct ccu_common **th1520_pll_clks;
+ struct ccu_common **th1520_div_clks;
+ struct ccu_common **th1520_mux_clks;
+ struct ccu_common **th1520_gate_clks;
+
+ int nr_clks;
+ int nr_pll_clks;
+ int nr_div_clks;
+ int nr_mux_clks;
+ int nr_gate_clks;
+};
+
+static const struct th1520_plat_data th1520_ap_platdata = {
+ .th1520_pll_clks = th1520_pll_clks,
+ .th1520_div_clks = th1520_div_clks,
+ .th1520_mux_clks = th1520_mux_clks,
+ .th1520_gate_clks = th1520_gate_clks,
+
+ .nr_clks = CLK_UART_SCLK + 1,
+
+ .nr_pll_clks = ARRAY_SIZE(th1520_pll_clks),
+ .nr_div_clks = ARRAY_SIZE(th1520_div_clks),
+ .nr_mux_clks = ARRAY_SIZE(th1520_mux_clks),
+ .nr_gate_clks = ARRAY_SIZE(th1520_gate_clks),
+};
+
+static const struct th1520_plat_data th1520_vo_platdata = {
+ .th1520_gate_clks = th1520_vo_gate_clks,
+
+ .nr_clks = CLK_HDMI_PIXCLK + 1,
+
+ .nr_gate_clks = ARRAY_SIZE(th1520_vo_gate_clks),
+};
+
static int th1520_clk_probe(struct platform_device *pdev)
{
+ const struct th1520_plat_data *plat_data;
struct device *dev = &pdev->dev;
struct clk_hw_onecell_data *priv;
@@ -982,11 +1110,16 @@ static int th1520_clk_probe(struct platform_device *pdev)
struct clk_hw *hw;
int ret, i;
- priv = devm_kzalloc(dev, struct_size(priv, hws, NR_CLKS), GFP_KERNEL);
+ plat_data = device_get_match_data(&pdev->dev);
+ if (!plat_data)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "No device match data found\n");
+
+ priv = devm_kzalloc(dev, struct_size(priv, hws, plat_data->nr_clks), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->num = NR_CLKS;
+ priv->num = plat_data->nr_clks;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -996,35 +1129,35 @@ static int th1520_clk_probe(struct platform_device *pdev)
if (IS_ERR(map))
return PTR_ERR(map);
- for (i = 0; i < ARRAY_SIZE(th1520_pll_clks); i++) {
- struct ccu_pll *cp = hw_to_ccu_pll(&th1520_pll_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_pll_clks; i++) {
+ struct ccu_pll *cp = hw_to_ccu_pll(&plat_data->th1520_pll_clks[i]->hw);
- th1520_pll_clks[i]->map = map;
+ plat_data->th1520_pll_clks[i]->map = map;
- ret = devm_clk_hw_register(dev, &th1520_pll_clks[i]->hw);
+ ret = devm_clk_hw_register(dev, &plat_data->th1520_pll_clks[i]->hw);
if (ret)
return ret;
priv->hws[cp->common.clkid] = &cp->common.hw;
}
- for (i = 0; i < ARRAY_SIZE(th1520_div_clks); i++) {
- struct ccu_div *cd = hw_to_ccu_div(&th1520_div_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_div_clks; i++) {
+ struct ccu_div *cd = hw_to_ccu_div(&plat_data->th1520_div_clks[i]->hw);
- th1520_div_clks[i]->map = map;
+ plat_data->th1520_div_clks[i]->map = map;
- ret = devm_clk_hw_register(dev, &th1520_div_clks[i]->hw);
+ ret = devm_clk_hw_register(dev, &plat_data->th1520_div_clks[i]->hw);
if (ret)
return ret;
priv->hws[cd->common.clkid] = &cd->common.hw;
}
- for (i = 0; i < ARRAY_SIZE(th1520_mux_clks); i++) {
- struct ccu_mux *cm = hw_to_ccu_mux(&th1520_mux_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_mux_clks; i++) {
+ struct ccu_mux *cm = hw_to_ccu_mux(&plat_data->th1520_mux_clks[i]->hw);
const struct clk_init_data *init = cm->common.hw.init;
- th1520_mux_clks[i]->map = map;
+ plat_data->th1520_mux_clks[i]->map = map;
hw = devm_clk_hw_register_mux_parent_data_table(dev,
init->name,
init->parent_data,
@@ -1040,10 +1173,10 @@ static int th1520_clk_probe(struct platform_device *pdev)
priv->hws[cm->common.clkid] = hw;
}
- for (i = 0; i < ARRAY_SIZE(th1520_gate_clks); i++) {
- struct ccu_gate *cg = hw_to_ccu_gate(&th1520_gate_clks[i]->hw);
+ for (i = 0; i < plat_data->nr_gate_clks; i++) {
+ struct ccu_gate *cg = hw_to_ccu_gate(&plat_data->th1520_gate_clks[i]->hw);
- th1520_gate_clks[i]->map = map;
+ plat_data->th1520_gate_clks[i]->map = map;
hw = devm_clk_hw_register_gate_parent_data(dev,
cg->common.hw.init->name,
@@ -1057,19 +1190,21 @@ static int th1520_clk_probe(struct platform_device *pdev)
priv->hws[cg->common.clkid] = hw;
}
- ret = devm_clk_hw_register(dev, &osc12m_clk.hw);
- if (ret)
- return ret;
- priv->hws[CLK_OSC12M] = &osc12m_clk.hw;
+ if (plat_data == &th1520_ap_platdata) {
+ ret = devm_clk_hw_register(dev, &osc12m_clk.hw);
+ if (ret)
+ return ret;
+ priv->hws[CLK_OSC12M] = &osc12m_clk.hw;
- ret = devm_clk_hw_register(dev, &gmac_pll_clk_100m.hw);
- if (ret)
- return ret;
- priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
+ ret = devm_clk_hw_register(dev, &gmac_pll_clk_100m.hw);
+ if (ret)
+ return ret;
+ priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
- ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw);
- if (ret)
- return ret;
+ ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw);
+ if (ret)
+ return ret;
+ }
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, priv);
if (ret)
@@ -1081,6 +1216,11 @@ static int th1520_clk_probe(struct platform_device *pdev)
static const struct of_device_id th1520_clk_match[] = {
{
.compatible = "thead,th1520-clk-ap",
+ .data = &th1520_ap_platdata,
+ },
+ {
+ .compatible = "thead,th1520-clk-vo",
+ .data = &th1520_vo_platdata,
},
{ /* sentinel */ },
};
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 487c85259967..645f517a1ac2 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -73,6 +73,14 @@ config DW_APB_TIMER_OF
select DW_APB_TIMER
select TIMER_OF
+config ECONET_EN751221_TIMER
+ bool "EcoNet EN751221 High Precision Timer" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ select TIMER_OF
+ help
+ Support for CPU timer found on EcoNet MIPS based SoCs.
+
config FTTMR010_TIMER
bool "Faraday Technology timer driver" if COMPILE_TEST
depends on HAS_IOMEM
@@ -437,8 +445,8 @@ config ATMEL_ST
config ATMEL_TCB_CLKSRC
bool "Atmel TC Block timer driver" if COMPILE_TEST
- depends on ARM && HAS_IOMEM
- select TIMER_OF if OF
+ depends on ARM && OF && HAS_IOMEM
+ select TIMER_OF
help
Support for Timer Counter Blocks on Atmel SoCs.
@@ -763,4 +771,12 @@ config RALINK_TIMER
Enables support for system tick counter present on
Ralink SoCs RT3352 and MT7620.
+config NXP_STM_TIMER
+ bool "NXP System Timer Module driver"
+ depends on ARCH_S32 || COMPILE_TEST
+ select CLKSRC_MMIO
+ help
+ Enables the support for NXP System Timer Module found in the
+ s32g NXP platform series.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 43ef16a4efa6..205bf3b0a8f3 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DAVINCI_TIMER) += timer-davinci.o
obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o
+obj-$(CONFIG_ECONET_EN751221_TIMER) += timer-econet-en751221.o
obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
obj-$(CONFIG_OMAP_DM_SYSTIMER) += timer-ti-dm-systimer.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
@@ -92,3 +93,4 @@ obj-$(CONFIG_GXP_TIMER) += timer-gxp.o
obj-$(CONFIG_CLKSRC_LOONGSON1_PWM) += timer-loongson1-pwm.o
obj-$(CONFIG_EP93XX_TIMER) += timer-ep93xx.o
obj-$(CONFIG_RALINK_TIMER) += timer-ralink.o
+obj-$(CONFIG_NXP_STM_TIMER) += timer-nxp-stm.o
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 3fcbd02b2483..2089aeaae225 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -225,7 +225,6 @@ err_free:
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
-#if defined(CONFIG_ARCH_RZG2L) || defined(CONFIG_ARCH_R9A09G057)
static int __init ostm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -233,7 +232,7 @@ static int __init ostm_probe(struct platform_device *pdev)
return ostm_init(dev->of_node);
}
-static const struct of_device_id ostm_of_table[] = {
+static const struct of_device_id __maybe_unused ostm_of_table[] = {
{ .compatible = "renesas,ostm", },
{ /* sentinel */ }
};
@@ -246,4 +245,3 @@ static struct platform_driver ostm_device_driver = {
},
};
builtin_platform_driver_probe(ostm_device_driver, ostm_probe);
-#endif
diff --git a/drivers/clocksource/timer-econet-en751221.c b/drivers/clocksource/timer-econet-en751221.c
new file mode 100644
index 000000000000..3b449fdaafee
--- /dev/null
+++ b/drivers/clocksource/timer-econet-en751221.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Timer present on EcoNet EN75xx MIPS based SoCs.
+ *
+ * Copyright (C) 2025 by Caleb James DeLisle <cjd@cjdns.fr>
+ */
+
+#include <linux/io.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/sched_clock.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/cpuhotplug.h>
+#include <linux/clk.h>
+
+#define ECONET_BITS 32
+#define ECONET_MIN_DELTA 0x00001000
+#define ECONET_MAX_DELTA GENMASK(ECONET_BITS - 2, 0)
+/* 34Kc hardware has 1 block and 1004Kc has 2. */
+#define ECONET_NUM_BLOCKS DIV_ROUND_UP(NR_CPUS, 2)
+
+static struct {
+ void __iomem *membase[ECONET_NUM_BLOCKS];
+ u32 freq_hz;
+} econet_timer __ro_after_init;
+
+static DEFINE_PER_CPU(struct clock_event_device, econet_timer_pcpu);
+
+/* Each memory block has 2 timers, the order of registers is:
+ * CTL, CMR0, CNT0, CMR1, CNT1
+ */
+static inline void __iomem *reg_ctl(u32 timer_n)
+{
+ return econet_timer.membase[timer_n >> 1];
+}
+
+static inline void __iomem *reg_compare(u32 timer_n)
+{
+ return econet_timer.membase[timer_n >> 1] + (timer_n & 1) * 0x08 + 0x04;
+}
+
+static inline void __iomem *reg_count(u32 timer_n)
+{
+ return econet_timer.membase[timer_n >> 1] + (timer_n & 1) * 0x08 + 0x08;
+}
+
+static inline u32 ctl_bit_enabled(u32 timer_n)
+{
+ return 1U << (timer_n & 1);
+}
+
+static inline u32 ctl_bit_pending(u32 timer_n)
+{
+ return 1U << ((timer_n & 1) + 16);
+}
+
+static bool cevt_is_pending(int cpu_id)
+{
+ return ioread32(reg_ctl(cpu_id)) & ctl_bit_pending(cpu_id);
+}
+
+static irqreturn_t cevt_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *dev = this_cpu_ptr(&econet_timer_pcpu);
+ int cpu = cpumask_first(dev->cpumask);
+
+ /* Each VPE has its own events,
+ * so this will only happen on spurious interrupt.
+ */
+ if (!cevt_is_pending(cpu))
+ return IRQ_NONE;
+
+ iowrite32(ioread32(reg_count(cpu)), reg_compare(cpu));
+ dev->event_handler(dev);
+ return IRQ_HANDLED;
+}
+
+static int cevt_set_next_event(ulong delta, struct clock_event_device *dev)
+{
+ u32 next;
+ int cpu;
+
+ cpu = cpumask_first(dev->cpumask);
+ next = ioread32(reg_count(cpu)) + delta;
+ iowrite32(next, reg_compare(cpu));
+
+ if ((s32)(next - ioread32(reg_count(cpu))) < ECONET_MIN_DELTA / 2)
+ return -ETIME;
+
+ return 0;
+}
+
+static int cevt_init_cpu(uint cpu)
+{
+ struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, cpu);
+ u32 reg;
+
+ pr_debug("%s: Setting up clockevent for CPU %d\n", cd->name, cpu);
+
+ reg = ioread32(reg_ctl(cpu)) | ctl_bit_enabled(cpu);
+ iowrite32(reg, reg_ctl(cpu));
+
+ enable_percpu_irq(cd->irq, IRQ_TYPE_NONE);
+
+ /* Do this last because it synchronously configures the timer */
+ clockevents_config_and_register(cd, econet_timer.freq_hz,
+ ECONET_MIN_DELTA, ECONET_MAX_DELTA);
+
+ return 0;
+}
+
+static u64 notrace sched_clock_read(void)
+{
+ /* Always read from clock zero no matter the CPU */
+ return (u64)ioread32(reg_count(0));
+}
+
+/* Init */
+
+static void __init cevt_dev_init(uint cpu)
+{
+ iowrite32(0, reg_count(cpu));
+ iowrite32(U32_MAX, reg_compare(cpu));
+}
+
+static int __init cevt_init(struct device_node *np)
+{
+ int i, irq, ret;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("%pOFn: irq_of_parse_and_map failed", np);
+ return -EINVAL;
+ }
+
+ ret = request_percpu_irq(irq, cevt_interrupt, np->name, &econet_timer_pcpu);
+
+ if (ret < 0) {
+ pr_err("%pOFn: IRQ %d setup failed (%d)\n", np, irq, ret);
+ goto err_unmap_irq;
+ }
+
+ for_each_possible_cpu(i) {
+ struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, i);
+
+ cd->rating = 310,
+ cd->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_C3STOP |
+ CLOCK_EVT_FEAT_PERCPU;
+ cd->set_next_event = cevt_set_next_event;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(i);
+ cd->name = np->name;
+
+ cevt_dev_init(i);
+ }
+
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "clockevents/econet/timer:starting",
+ cevt_init_cpu, NULL);
+ return 0;
+
+err_unmap_irq:
+ irq_dispose_mapping(irq);
+ return ret;
+}
+
+static int __init timer_init(struct device_node *np)
+{
+ int num_blocks = DIV_ROUND_UP(num_possible_cpus(), 2);
+ struct clk *clk;
+ int ret;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("%pOFn: Failed to get CPU clock from DT %ld\n", np, PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+
+ econet_timer.freq_hz = clk_get_rate(clk);
+
+ for (int i = 0; i < num_blocks; i++) {
+ econet_timer.membase[i] = of_iomap(np, i);
+ if (!econet_timer.membase[i]) {
+ pr_err("%pOFn: failed to map register [%d]\n", np, i);
+ return -ENXIO;
+ }
+ }
+
+ /* For clocksource purposes always read clock zero, whatever the CPU */
+ ret = clocksource_mmio_init(reg_count(0), np->name,
+ econet_timer.freq_hz, 301, ECONET_BITS,
+ clocksource_mmio_readl_up);
+ if (ret) {
+ pr_err("%pOFn: clocksource_mmio_init failed: %d", np, ret);
+ return ret;
+ }
+
+ ret = cevt_init(np);
+ if (ret < 0)
+ return ret;
+
+ sched_clock_register(sched_clock_read, ECONET_BITS,
+ econet_timer.freq_hz);
+
+ pr_info("%pOFn: using %u.%03u MHz high precision timer\n", np,
+ econet_timer.freq_hz / 1000000,
+ (econet_timer.freq_hz / 1000) % 1000);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(econet_timer_hpt, "econet,en751221-timer", timer_init);
diff --git a/drivers/clocksource/timer-nxp-stm.c b/drivers/clocksource/timer-nxp-stm.c
new file mode 100644
index 000000000000..d7ccf9001729
--- /dev/null
+++ b/drivers/clocksource/timer-nxp-stm.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2018,2021-2025 NXP
+ *
+ * NXP System Timer Module:
+ *
+ * STM supports commonly required system and application software
+ * timing functions. STM includes a 32-bit count-up timer and four
+ * 32-bit compare channels with a separate interrupt source for each
+ * channel. The timer is driven by the STM module clock divided by an
+ * 8-bit prescale value (1 to 256). It has ability to stop the timer
+ * in Debug mode
+ */
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/sched_clock.h>
+#include <linux/units.h>
+
+#define STM_CR(__base) (__base)
+
+#define STM_CR_TEN BIT(0)
+#define STM_CR_FRZ BIT(1)
+#define STM_CR_CPS_OFFSET 8u
+#define STM_CR_CPS_MASK GENMASK(15, STM_CR_CPS_OFFSET)
+
+#define STM_CNT(__base) ((__base) + 0x04)
+
+#define STM_CCR0(__base) ((__base) + 0x10)
+#define STM_CCR1(__base) ((__base) + 0x20)
+#define STM_CCR2(__base) ((__base) + 0x30)
+#define STM_CCR3(__base) ((__base) + 0x40)
+
+#define STM_CCR_CEN BIT(0)
+
+#define STM_CIR0(__base) ((__base) + 0x14)
+#define STM_CIR1(__base) ((__base) + 0x24)
+#define STM_CIR2(__base) ((__base) + 0x34)
+#define STM_CIR3(__base) ((__base) + 0x44)
+
+#define STM_CIR_CIF BIT(0)
+
+#define STM_CMP0(__base) ((__base) + 0x18)
+#define STM_CMP1(__base) ((__base) + 0x28)
+#define STM_CMP2(__base) ((__base) + 0x38)
+#define STM_CMP3(__base) ((__base) + 0x48)
+
+#define STM_ENABLE_MASK (STM_CR_FRZ | STM_CR_TEN)
+
+struct stm_timer {
+ void __iomem *base;
+ unsigned long rate;
+ unsigned long delta;
+ unsigned long counter;
+ struct clock_event_device ced;
+ struct clocksource cs;
+ atomic_t refcnt;
+};
+
+static DEFINE_PER_CPU(struct stm_timer *, stm_timers);
+
+static struct stm_timer *stm_sched_clock;
+
+/*
+ * Global structure for multiple STMs initialization
+ */
+static int stm_instances;
+
+/*
+ * This global lock is used to prevent race conditions with the
+ * stm_instances in case the driver is using the ASYNC option
+ */
+static DEFINE_MUTEX(stm_instances_lock);
+
+DEFINE_GUARD(stm_instances, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
+
+static struct stm_timer *cs_to_stm(struct clocksource *cs)
+{
+ return container_of(cs, struct stm_timer, cs);
+}
+
+static struct stm_timer *ced_to_stm(struct clock_event_device *ced)
+{
+ return container_of(ced, struct stm_timer, ced);
+}
+
+static u64 notrace nxp_stm_read_sched_clock(void)
+{
+ return readl(STM_CNT(stm_sched_clock->base));
+}
+
+static u32 nxp_stm_clocksource_getcnt(struct stm_timer *stm_timer)
+{
+ return readl(STM_CNT(stm_timer->base));
+}
+
+static void nxp_stm_clocksource_setcnt(struct stm_timer *stm_timer, u32 cnt)
+{
+ writel(cnt, STM_CNT(stm_timer->base));
+}
+
+static u64 nxp_stm_clocksource_read(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ return (u64)nxp_stm_clocksource_getcnt(stm_timer);
+}
+
+static void nxp_stm_module_enable(struct stm_timer *stm_timer)
+{
+ u32 reg;
+
+ reg = readl(STM_CR(stm_timer->base));
+
+ reg |= STM_ENABLE_MASK;
+
+ writel(reg, STM_CR(stm_timer->base));
+}
+
+static void nxp_stm_module_disable(struct stm_timer *stm_timer)
+{
+ u32 reg;
+
+ reg = readl(STM_CR(stm_timer->base));
+
+ reg &= ~STM_ENABLE_MASK;
+
+ writel(reg, STM_CR(stm_timer->base));
+}
+
+static void nxp_stm_module_put(struct stm_timer *stm_timer)
+{
+ if (atomic_dec_and_test(&stm_timer->refcnt))
+ nxp_stm_module_disable(stm_timer);
+}
+
+static void nxp_stm_module_get(struct stm_timer *stm_timer)
+{
+ if (atomic_inc_return(&stm_timer->refcnt) == 1)
+ nxp_stm_module_enable(stm_timer);
+}
+
+static int nxp_stm_clocksource_enable(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ nxp_stm_module_get(stm_timer);
+
+ return 0;
+}
+
+static void nxp_stm_clocksource_disable(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ nxp_stm_module_put(stm_timer);
+}
+
+static void nxp_stm_clocksource_suspend(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ nxp_stm_clocksource_disable(cs);
+ stm_timer->counter = nxp_stm_clocksource_getcnt(stm_timer);
+}
+
+static void nxp_stm_clocksource_resume(struct clocksource *cs)
+{
+ struct stm_timer *stm_timer = cs_to_stm(cs);
+
+ nxp_stm_clocksource_setcnt(stm_timer, stm_timer->counter);
+ nxp_stm_clocksource_enable(cs);
+}
+
+static void __init devm_clocksource_unregister(void *data)
+{
+ struct stm_timer *stm_timer = data;
+
+ clocksource_unregister(&stm_timer->cs);
+}
+
+static int __init nxp_stm_clocksource_init(struct device *dev, struct stm_timer *stm_timer,
+ const char *name, void __iomem *base, struct clk *clk)
+{
+ int ret;
+
+ stm_timer->base = base;
+ stm_timer->rate = clk_get_rate(clk);
+
+ stm_timer->cs.name = name;
+ stm_timer->cs.rating = 460;
+ stm_timer->cs.read = nxp_stm_clocksource_read;
+ stm_timer->cs.enable = nxp_stm_clocksource_enable;
+ stm_timer->cs.disable = nxp_stm_clocksource_disable;
+ stm_timer->cs.suspend = nxp_stm_clocksource_suspend;
+ stm_timer->cs.resume = nxp_stm_clocksource_resume;
+ stm_timer->cs.mask = CLOCKSOURCE_MASK(32);
+ stm_timer->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ ret = clocksource_register_hz(&stm_timer->cs, stm_timer->rate);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, devm_clocksource_unregister, stm_timer);
+ if (ret) {
+ clocksource_unregister(&stm_timer->cs);
+ return ret;
+ }
+
+ stm_sched_clock = stm_timer;
+
+ sched_clock_register(nxp_stm_read_sched_clock, 32, stm_timer->rate);
+
+ dev_dbg(dev, "Registered clocksource %s\n", name);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_read_counter(struct stm_timer *stm_timer)
+{
+ return readl(STM_CNT(stm_timer->base));
+}
+
+static void nxp_stm_clockevent_disable(struct stm_timer *stm_timer)
+{
+ writel(0, STM_CCR0(stm_timer->base));
+}
+
+static void nxp_stm_clockevent_enable(struct stm_timer *stm_timer)
+{
+ writel(STM_CCR_CEN, STM_CCR0(stm_timer->base));
+}
+
+static int nxp_stm_clockevent_shutdown(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ nxp_stm_clockevent_disable(stm_timer);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_set_next_event(unsigned long delta, struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+ u32 val;
+
+ nxp_stm_clockevent_disable(stm_timer);
+
+ stm_timer->delta = delta;
+
+ val = nxp_stm_clockevent_read_counter(stm_timer) + delta;
+
+ writel(val, STM_CMP0(stm_timer->base));
+
+ /*
+ * The counter is shared across the channels and can not be
+ * stopped while we are setting the next event. If the delta
+ * is very small it is possible the counter increases above
+ * the computed 'val'. The min_delta value specified when
+ * registering the clockevent will prevent that. The second
+ * case is if the counter wraps while we compute the 'val' and
+ * before writing the comparator register. We read the counter,
+ * check if we are back in time and abort the timer with -ETIME.
+ */
+ if (val > nxp_stm_clockevent_read_counter(stm_timer) + delta)
+ return -ETIME;
+
+ nxp_stm_clockevent_enable(stm_timer);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_set_periodic(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ return nxp_stm_clockevent_set_next_event(stm_timer->rate, ced);
+}
+
+static void nxp_stm_clockevent_suspend(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ nxp_stm_module_put(stm_timer);
+}
+
+static void nxp_stm_clockevent_resume(struct clock_event_device *ced)
+{
+ struct stm_timer *stm_timer = ced_to_stm(ced);
+
+ nxp_stm_module_get(stm_timer);
+}
+
+static int __init nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm_timer *stm_timer,
+ const char *name, void __iomem *base, int irq,
+ struct clk *clk, int cpu)
+{
+ stm_timer->base = base;
+ stm_timer->rate = clk_get_rate(clk);
+
+ stm_timer->ced.name = name;
+ stm_timer->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ stm_timer->ced.set_state_shutdown = nxp_stm_clockevent_shutdown;
+ stm_timer->ced.set_state_periodic = nxp_stm_clockevent_set_periodic;
+ stm_timer->ced.set_next_event = nxp_stm_clockevent_set_next_event;
+ stm_timer->ced.suspend = nxp_stm_clockevent_suspend;
+ stm_timer->ced.resume = nxp_stm_clockevent_resume;
+ stm_timer->ced.cpumask = cpumask_of(cpu);
+ stm_timer->ced.rating = 460;
+ stm_timer->ced.irq = irq;
+
+ per_cpu(stm_timers, cpu) = stm_timer;
+
+ nxp_stm_module_get(stm_timer);
+
+ dev_dbg(dev, "Initialized per cpu clockevent name=%s, irq=%d, cpu=%d\n", name, irq, cpu);
+
+ return 0;
+}
+
+static int nxp_stm_clockevent_starting_cpu(unsigned int cpu)
+{
+ struct stm_timer *stm_timer = per_cpu(stm_timers, cpu);
+ int ret;
+
+ if (WARN_ON(!stm_timer))
+ return -EFAULT;
+
+ ret = irq_force_affinity(stm_timer->ced.irq, cpumask_of(cpu));
+ if (ret)
+ return ret;
+
+ /*
+ * The timings measurement show reading the counter register
+ * and writing to the comparator register takes as a maximum
+ * value 1100 ns at 133MHz rate frequency. The timer must be
+ * set above this value and to be secure we set the minimum
+ * value equal to 2000ns, so 2us.
+ *
+ * minimum ticks = (rate / MICRO) * 2
+ */
+ clockevents_config_and_register(&stm_timer->ced, stm_timer->rate,
+ (stm_timer->rate / MICRO) * 2, ULONG_MAX);
+
+ return 0;
+}
+
+static irqreturn_t nxp_stm_module_interrupt(int irq, void *dev_id)
+{
+ struct stm_timer *stm_timer = dev_id;
+ struct clock_event_device *ced = &stm_timer->ced;
+ u32 val;
+
+ /*
+ * The interrupt is shared across the channels in the
+ * module. But this one is configured to run only one channel,
+ * consequently it is pointless to test the interrupt flags
+ * before and we can directly reset the channel 0 irq flag
+ * register.
+ */
+ writel(STM_CIR_CIF, STM_CIR0(stm_timer->base));
+
+ /*
+ * Update STM_CMP value using the counter value
+ */
+ val = nxp_stm_clockevent_read_counter(stm_timer) + stm_timer->delta;
+
+ writel(val, STM_CMP0(stm_timer->base));
+
+ /*
+ * stm hardware doesn't support oneshot, it will generate an
+ * interrupt and start the counter again so software needs to
+ * disable the timer to stop the counter loop in ONESHOT mode.
+ */
+ if (likely(clockevent_state_oneshot(ced)))
+ nxp_stm_clockevent_disable(stm_timer);
+
+ ced->event_handler(ced);
+
+ return IRQ_HANDLED;
+}
+
+static int __init nxp_stm_timer_probe(struct platform_device *pdev)
+{
+ struct stm_timer *stm_timer;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const char *name = of_node_full_name(np);
+ struct clk *clk;
+ void __iomem *base;
+ int irq, ret;
+
+ /*
+ * The device tree can have multiple STM nodes described, so
+ * it makes this driver a good candidate for the async probe.
+ * It is still unclear if the time framework correctly handles
+ * parallel loading of the timers but at least this driver is
+ * ready to support the option.
+ */
+ guard(stm_instances)(&stm_instances_lock);
+
+ /*
+ * The S32Gx are SoCs featuring a diverse set of cores. Linux
+ * is expected to run on Cortex-A53 cores, while other
+ * software stacks will operate on Cortex-M cores. The number
+ * of STM instances has been sized to include at most one
+ * instance per core.
+ *
+ * As we need a clocksource and a clockevent per cpu, we
+ * simply initialize a clocksource per cpu along with the
+ * clockevent which makes the resulting code simpler.
+ *
+ * However if the device tree is describing more STM instances
+ * than the number of cores, then we ignore them.
+ */
+ if (stm_instances >= num_possible_cpus())
+ return 0;
+
+ base = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base), "Failed to iomap %pOFn\n", np);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get IRQ\n");
+
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Clock not found\n");
+
+ stm_timer = devm_kzalloc(dev, sizeof(*stm_timer), GFP_KERNEL);
+ if (!stm_timer)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, irq, nxp_stm_module_interrupt,
+ IRQF_TIMER | IRQF_NOBALANCING, name, stm_timer);
+ if (ret)
+ return dev_err_probe(dev, ret, "Unable to allocate interrupt line\n");
+
+ ret = nxp_stm_clocksource_init(dev, stm_timer, name, base, clk);
+ if (ret)
+ return ret;
+
+ /*
+ * Next probed STM will be a per CPU clockevent, until we
+ * probe as many as we have CPUs available on the system, we
+ * do a partial initialization
+ */
+ ret = nxp_stm_clockevent_per_cpu_init(dev, stm_timer, name,
+ base, irq, clk,
+ stm_instances);
+ if (ret)
+ return ret;
+
+ stm_instances++;
+
+ /*
+ * The number of probed STMs for per CPU clockevent is
+ * equal to the number of available CPUs on the
+ * system. We install the cpu hotplug to finish the
+ * initialization by registering the clockevents
+ */
+ if (stm_instances == num_possible_cpus()) {
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "STM timer:starting",
+ nxp_stm_clockevent_starting_cpu, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id nxp_stm_of_match[] = {
+ { .compatible = "nxp,s32g2-stm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nxp_stm_of_match);
+
+static struct platform_driver nxp_stm_probe = {
+ .probe = nxp_stm_timer_probe,
+ .driver = {
+ .name = "nxp-stm",
+ .of_match_table = nxp_stm_of_match,
+ },
+};
+module_platform_driver(nxp_stm_probe);
+
+MODULE_DESCRIPTION("NXP System Timer Module driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
index 928da2f6de69..6e7944ffd7c0 100644
--- a/drivers/clocksource/timer-stm32-lp.c
+++ b/drivers/clocksource/timer-stm32-lp.c
@@ -5,6 +5,7 @@
* Pascal Paillet <p.paillet@st.com> for STMicroelectronics.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
@@ -27,6 +28,7 @@ struct stm32_lp_private {
u32 psc;
struct device *dev;
struct clk *clk;
+ u32 version;
};
static struct stm32_lp_private*
@@ -47,12 +49,46 @@ static int stm32_clkevent_lp_shutdown(struct clock_event_device *clkevt)
return 0;
}
-static int stm32_clkevent_lp_set_timer(unsigned long evt,
- struct clock_event_device *clkevt,
- int is_periodic)
+static int stm32mp25_clkevent_lp_set_evt(struct stm32_lp_private *priv, unsigned long evt)
{
- struct stm32_lp_private *priv = to_priv(clkevt);
+ int ret;
+ u32 val;
+
+ regmap_read(priv->reg, STM32_LPTIM_CR, &val);
+ if (!FIELD_GET(STM32_LPTIM_ENABLE, val)) {
+ /* Enable LPTIMER to be able to write into IER and ARR registers */
+ regmap_write(priv->reg, STM32_LPTIM_CR, STM32_LPTIM_ENABLE);
+ /*
+ * After setting the ENABLE bit, a delay of two counter clock cycles is needed
+ * before the LPTIM is actually enabled. For 32KHz rate, this makes approximately
+ * 62.5 micro-seconds, round it up.
+ */
+ udelay(63);
+ }
+ /* set next event counter */
+ regmap_write(priv->reg, STM32_LPTIM_ARR, evt);
+ /* enable ARR interrupt */
+ regmap_write(priv->reg, STM32_LPTIM_IER, STM32_LPTIM_ARRMIE);
+
+ /* Poll DIEROK and ARROK to ensure register access has completed */
+ ret = regmap_read_poll_timeout_atomic(priv->reg, STM32_LPTIM_ISR, val,
+ (val & STM32_LPTIM_DIEROK_ARROK) ==
+ STM32_LPTIM_DIEROK_ARROK,
+ 10, 500);
+ if (ret) {
+ dev_err(priv->dev, "access to LPTIM timed out\n");
+ /* Disable LPTIMER */
+ regmap_write(priv->reg, STM32_LPTIM_CR, 0);
+ return ret;
+ }
+ /* Clear DIEROK and ARROK flags */
+ regmap_write(priv->reg, STM32_LPTIM_ICR, STM32_LPTIM_DIEROKCF_ARROKCF);
+ return 0;
+}
+
+static void stm32_clkevent_lp_set_evt(struct stm32_lp_private *priv, unsigned long evt)
+{
/* disable LPTIMER to be able to write into IER register*/
regmap_write(priv->reg, STM32_LPTIM_CR, 0);
/* enable ARR interrupt */
@@ -61,6 +97,22 @@ static int stm32_clkevent_lp_set_timer(unsigned long evt,
regmap_write(priv->reg, STM32_LPTIM_CR, STM32_LPTIM_ENABLE);
/* set next event counter */
regmap_write(priv->reg, STM32_LPTIM_ARR, evt);
+}
+
+static int stm32_clkevent_lp_set_timer(unsigned long evt,
+ struct clock_event_device *clkevt,
+ int is_periodic)
+{
+ struct stm32_lp_private *priv = to_priv(clkevt);
+ int ret;
+
+ if (priv->version == STM32_LPTIM_VERR_23) {
+ ret = stm32mp25_clkevent_lp_set_evt(priv, evt);
+ if (ret)
+ return ret;
+ } else {
+ stm32_clkevent_lp_set_evt(priv, evt);
+ }
/* start counter */
if (is_periodic)
@@ -176,6 +228,7 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev)
return -ENOMEM;
priv->reg = ddata->regmap;
+ priv->version = ddata->version;
priv->clk = ddata->clk;
ret = clk_prepare_enable(priv->clk);
if (ret)
diff --git a/drivers/clocksource/timer-tegra186.c b/drivers/clocksource/timer-tegra186.c
index 5d4cf5237a11..e5394f98a02e 100644
--- a/drivers/clocksource/timer-tegra186.c
+++ b/drivers/clocksource/timer-tegra186.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2019-2020 NVIDIA Corporation. All rights reserved.
+ * Copyright (c) 2019-2025 NVIDIA Corporation. All rights reserved.
*/
+#include <linux/bitfield.h>
#include <linux/clocksource.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -29,6 +30,7 @@
#define TMRSR 0x004
#define TMRSR_INTR_CLR BIT(30)
+#define TMRSR_PCV GENMASK(28, 0)
#define TMRCSSR 0x008
#define TMRCSSR_SRC_USEC (0 << 0)
@@ -45,6 +47,9 @@
#define WDTCR_TIMER_SOURCE_MASK 0xf
#define WDTCR_TIMER_SOURCE(x) ((x) & 0xf)
+#define WDTSR 0x004
+#define WDTSR_CURRENT_EXPIRATION_COUNT GENMASK(14, 12)
+
#define WDTCMDR 0x008
#define WDTCMDR_DISABLE_COUNTER BIT(1)
#define WDTCMDR_START_COUNTER BIT(0)
@@ -169,18 +174,6 @@ static void tegra186_wdt_enable(struct tegra186_wdt *wdt)
value &= ~WDTCR_PERIOD_MASK;
value |= WDTCR_PERIOD(1);
- /* enable local interrupt for WDT petting */
- value |= WDTCR_LOCAL_INT_ENABLE;
-
- /* enable local FIQ and remote interrupt for debug dump */
- if (0)
- value |= WDTCR_REMOTE_INT_ENABLE |
- WDTCR_LOCAL_FIQ_ENABLE;
-
- /* enable system debug reset (doesn't properly reboot) */
- if (0)
- value |= WDTCR_SYSTEM_DEBUG_RESET_ENABLE;
-
/* enable system POR reset */
value |= WDTCR_SYSTEM_POR_RESET_ENABLE;
@@ -234,12 +227,69 @@ static int tegra186_wdt_set_timeout(struct watchdog_device *wdd,
return 0;
}
+static unsigned int tegra186_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+ struct tegra186_wdt *wdt = to_tegra186_wdt(wdd);
+ u32 expiration, val;
+ u64 timeleft;
+
+ if (!watchdog_active(&wdt->base)) {
+ /* return zero if the watchdog timer is not activated. */
+ return 0;
+ }
+
+ /*
+ * Reset occurs on the fifth expiration of the
+ * watchdog timer and so when the watchdog timer is configured,
+ * the actual value programmed into the counter is 1/5 of the
+ * timeout value. Once the counter reaches 0, expiration count
+ * will be increased by 1 and the down counter restarts.
+ * Hence to get the time left before system reset we must
+ * combine 2 parts:
+ * 1. value of the current down counter
+ * 2. (number of counter expirations remaining) * (timeout/5)
+ */
+
+ /* Get the current number of counter expirations. Should be a
+ * value between 0 and 4
+ */
+ val = readl_relaxed(wdt->regs + WDTSR);
+ expiration = FIELD_GET(WDTSR_CURRENT_EXPIRATION_COUNT, val);
+ if (WARN_ON_ONCE(expiration > 4))
+ return 0;
+
+ /* Get the current counter value in microsecond. */
+ val = readl_relaxed(wdt->tmr->regs + TMRSR);
+ timeleft = FIELD_GET(TMRSR_PCV, val);
+
+ /*
+ * Calculate the time remaining by adding the time for the
+ * counter value to the time of the counter expirations that
+ * remain.
+ */
+ timeleft += (((u64)wdt->base.timeout * USEC_PER_SEC) / 5) * (4 - expiration);
+
+ /*
+ * Convert the current counter value to seconds,
+ * rounding up to the nearest second. Cast u64 to
+ * u32 under the assumption that no overflow happens
+ * when coverting to seconds.
+ */
+ timeleft = DIV_ROUND_CLOSEST_ULL(timeleft, USEC_PER_SEC);
+
+ if (WARN_ON_ONCE(timeleft > U32_MAX))
+ return U32_MAX;
+
+ return lower_32_bits(timeleft);
+}
+
static const struct watchdog_ops tegra186_wdt_ops = {
.owner = THIS_MODULE,
.start = tegra186_wdt_start,
.stop = tegra186_wdt_stop,
.ping = tegra186_wdt_ping,
.set_timeout = tegra186_wdt_set_timeout,
+ .get_timeleft = tegra186_wdt_get_timeleft,
};
static struct tegra186_wdt *tegra186_wdt_create(struct tegra186_timer *tegra,
@@ -365,23 +415,10 @@ static int tegra186_timer_usec_init(struct tegra186_timer *tegra)
return clocksource_register_hz(&tegra->usec, USEC_PER_SEC);
}
-static irqreturn_t tegra186_timer_irq(int irq, void *data)
-{
- struct tegra186_timer *tegra = data;
-
- if (watchdog_active(&tegra->wdt->base)) {
- tegra186_wdt_disable(tegra->wdt);
- tegra186_wdt_enable(tegra->wdt);
- }
-
- return IRQ_HANDLED;
-}
-
static int tegra186_timer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra186_timer *tegra;
- unsigned int irq;
int err;
tegra = devm_kzalloc(dev, sizeof(*tegra), GFP_KERNEL);
@@ -400,8 +437,6 @@ static int tegra186_timer_probe(struct platform_device *pdev)
if (err < 0)
return err;
- irq = err;
-
/* create a watchdog using a preconfigured timer */
tegra->wdt = tegra186_wdt_create(tegra, 0);
if (IS_ERR(tegra->wdt)) {
@@ -428,17 +463,8 @@ static int tegra186_timer_probe(struct platform_device *pdev)
goto unregister_osc;
}
- err = devm_request_irq(dev, irq, tegra186_timer_irq, 0,
- "tegra186-timer", tegra);
- if (err < 0) {
- dev_err(dev, "failed to request IRQ#%u: %d\n", irq, err);
- goto unregister_usec;
- }
-
return 0;
-unregister_usec:
- clocksource_unregister(&tegra->usec);
unregister_osc:
clocksource_unregister(&tegra->osc);
unregister_tsc:
diff --git a/drivers/comedi/comedi_buf.c b/drivers/comedi/comedi_buf.c
index 393966c09740..002c0e76baff 100644
--- a/drivers/comedi/comedi_buf.c
+++ b/drivers/comedi/comedi_buf.c
@@ -27,14 +27,12 @@ static void comedi_buf_map_kref_release(struct kref *kref)
if (bm->page_list) {
if (bm->dma_dir != DMA_NONE) {
- /*
- * DMA buffer was allocated as a single block.
- * Address is in page_list[0].
- */
- buf = &bm->page_list[0];
- dma_free_coherent(bm->dma_hw_dev,
- PAGE_SIZE * bm->n_pages,
- buf->virt_addr, buf->dma_addr);
+ for (i = 0; i < bm->n_pages; i++) {
+ buf = &bm->page_list[i];
+ dma_free_coherent(bm->dma_hw_dev, PAGE_SIZE,
+ buf->virt_addr,
+ buf->dma_addr);
+ }
} else {
for (i = 0; i < bm->n_pages; i++) {
buf = &bm->page_list[i];
@@ -56,13 +54,7 @@ static void __comedi_buf_free(struct comedi_device *dev,
struct comedi_buf_map *bm;
unsigned long flags;
- if (async->prealloc_buf) {
- if (s->async_dma_dir == DMA_NONE)
- vunmap(async->prealloc_buf);
- async->prealloc_buf = NULL;
- async->prealloc_bufsz = 0;
- }
-
+ async->prealloc_bufsz = 0;
spin_lock_irqsave(&s->spin_lock, flags);
bm = async->buf_map;
async->buf_map = NULL;
@@ -94,26 +86,14 @@ comedi_buf_map_alloc(struct comedi_device *dev, enum dma_data_direction dma_dir,
goto err;
if (bm->dma_dir != DMA_NONE) {
- void *virt_addr;
- dma_addr_t dma_addr;
-
- /*
- * Currently, the DMA buffer needs to be allocated as a
- * single block so that it can be mmap()'ed.
- */
- virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
- PAGE_SIZE * n_pages, &dma_addr,
- GFP_KERNEL);
- if (!virt_addr)
- goto err;
-
for (i = 0; i < n_pages; i++) {
buf = &bm->page_list[i];
- buf->virt_addr = virt_addr + (i << PAGE_SHIFT);
- buf->dma_addr = dma_addr + (i << PAGE_SHIFT);
+ buf->virt_addr =
+ dma_alloc_coherent(bm->dma_hw_dev, PAGE_SIZE,
+ &buf->dma_addr, GFP_KERNEL);
+ if (!buf->virt_addr)
+ break;
}
-
- bm->n_pages = i;
} else {
for (i = 0; i < n_pages; i++) {
buf = &bm->page_list[i];
@@ -123,11 +103,10 @@ comedi_buf_map_alloc(struct comedi_device *dev, enum dma_data_direction dma_dir,
SetPageReserved(virt_to_page(buf->virt_addr));
}
-
- bm->n_pages = i;
- if (i < n_pages)
- goto err;
}
+ bm->n_pages = i;
+ if (i < n_pages)
+ goto err;
return bm;
@@ -141,11 +120,8 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
unsigned int n_pages)
{
struct comedi_async *async = s->async;
- struct page **pages = NULL;
struct comedi_buf_map *bm;
- struct comedi_buf_page *buf;
unsigned long flags;
- unsigned int i;
if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
dev_err(dev->class_dev,
@@ -160,30 +136,7 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
spin_lock_irqsave(&s->spin_lock, flags);
async->buf_map = bm;
spin_unlock_irqrestore(&s->spin_lock, flags);
-
- if (bm->dma_dir != DMA_NONE) {
- /*
- * DMA buffer was allocated as a single block.
- * Address is in page_list[0].
- */
- buf = &bm->page_list[0];
- async->prealloc_buf = buf->virt_addr;
- } else {
- pages = vmalloc(sizeof(struct page *) * n_pages);
- if (!pages)
- return;
-
- for (i = 0; i < n_pages; i++) {
- buf = &bm->page_list[i];
- pages[i] = virt_to_page(buf->virt_addr);
- }
-
- /* vmap the pages to prealloc_buf */
- async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
- COMEDI_PAGE_PROTECTION);
-
- vfree(pages);
- }
+ async->prealloc_bufsz = n_pages << PAGE_SHIFT;
}
void comedi_buf_map_get(struct comedi_buf_map *bm)
@@ -264,7 +217,7 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
/* if no change is required, do nothing */
- if (async->prealloc_buf && async->prealloc_bufsz == new_size)
+ if (async->prealloc_bufsz == new_size)
return 0;
/* deallocate old buffer */
@@ -275,14 +228,9 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int n_pages = new_size >> PAGE_SHIFT;
__comedi_buf_alloc(dev, s, n_pages);
-
- if (!async->prealloc_buf) {
- /* allocation failed */
- __comedi_buf_free(dev, s);
+ if (!async->prealloc_bufsz)
return -ENOMEM;
- }
}
- async->prealloc_bufsz = new_size;
return 0;
}
@@ -365,6 +313,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
unsigned int num_bytes)
{
struct comedi_async *async = s->async;
+ struct comedi_buf_page *buf_page_list = async->buf_map->page_list;
unsigned int count = 0;
const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
@@ -376,15 +325,16 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
/* don't munge partial samples */
num_bytes -= num_bytes % num_sample_bytes;
while (count < num_bytes) {
- int block_size = num_bytes - count;
- unsigned int buf_end;
-
- buf_end = async->prealloc_bufsz - async->munge_ptr;
- if (block_size > buf_end)
- block_size = buf_end;
+ /*
+ * Do not munge beyond page boundary.
+ * Note: prealloc_bufsz is a multiple of PAGE_SIZE.
+ */
+ unsigned int page = async->munge_ptr >> PAGE_SHIFT;
+ unsigned int offset = offset_in_page(async->munge_ptr);
+ unsigned int block_size =
+ min(num_bytes - count, PAGE_SIZE - offset);
- s->munge(s->device, s,
- async->prealloc_buf + async->munge_ptr,
+ s->munge(s->device, s, buf_page_list[page].virt_addr + offset,
block_size, async->munge_chan);
/*
@@ -397,7 +347,8 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
async->munge_chan %= async->cmd.chanlist_len;
async->munge_count += block_size;
async->munge_ptr += block_size;
- async->munge_ptr %= async->prealloc_bufsz;
+ if (async->munge_ptr == async->prealloc_bufsz)
+ async->munge_ptr = 0;
count += block_size;
}
@@ -558,46 +509,52 @@ static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
const void *data, unsigned int num_bytes)
{
struct comedi_async *async = s->async;
+ struct comedi_buf_page *buf_page_list = async->buf_map->page_list;
unsigned int write_ptr = async->buf_write_ptr;
while (num_bytes) {
- unsigned int block_size;
-
- if (write_ptr + num_bytes > async->prealloc_bufsz)
- block_size = async->prealloc_bufsz - write_ptr;
- else
- block_size = num_bytes;
+ /*
+ * Do not copy beyond page boundary.
+ * Note: prealloc_bufsz is a multiple of PAGE_SIZE.
+ */
+ unsigned int page = write_ptr >> PAGE_SHIFT;
+ unsigned int offset = offset_in_page(write_ptr);
+ unsigned int block_size = min(num_bytes, PAGE_SIZE - offset);
- memcpy(async->prealloc_buf + write_ptr, data, block_size);
+ memcpy(buf_page_list[page].virt_addr + offset,
+ data, block_size);
data += block_size;
num_bytes -= block_size;
-
- write_ptr = 0;
+ write_ptr += block_size;
+ if (write_ptr == async->prealloc_bufsz)
+ write_ptr = 0;
}
}
static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
void *dest, unsigned int nbytes)
{
- void *src;
struct comedi_async *async = s->async;
+ struct comedi_buf_page *buf_page_list = async->buf_map->page_list;
unsigned int read_ptr = async->buf_read_ptr;
while (nbytes) {
- unsigned int block_size;
-
- src = async->prealloc_buf + read_ptr;
-
- if (nbytes >= async->prealloc_bufsz - read_ptr)
- block_size = async->prealloc_bufsz - read_ptr;
- else
- block_size = nbytes;
+ /*
+ * Do not copy beyond page boundary.
+ * Note: prealloc_bufsz is a multiple of PAGE_SIZE.
+ */
+ unsigned int page = read_ptr >> PAGE_SHIFT;
+ unsigned int offset = offset_in_page(read_ptr);
+ unsigned int block_size = min(nbytes, PAGE_SIZE - offset);
- memcpy(dest, src, block_size);
+ memcpy(dest, buf_page_list[page].virt_addr + offset,
+ block_size);
nbytes -= block_size;
dest += block_size;
- read_ptr = 0;
+ read_ptr += block_size;
+ if (read_ptr == async->prealloc_bufsz)
+ read_ptr = 0;
}
}
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index b9df9b19d4bd..3383a7ce27ff 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -2387,13 +2387,27 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
goto done;
}
if (bm->dma_dir != DMA_NONE) {
+ unsigned long vm_start = vma->vm_start;
+ unsigned long vm_end = vma->vm_end;
+
/*
- * DMA buffer was allocated as a single block.
- * Address is in page_list[0].
+ * Buffer pages are not contiguous, so temporarily modify VMA
+ * start and end addresses for each buffer page.
*/
- buf = &bm->page_list[0];
- retval = dma_mmap_coherent(bm->dma_hw_dev, vma, buf->virt_addr,
- buf->dma_addr, n_pages * PAGE_SIZE);
+ for (i = 0; i < n_pages; ++i) {
+ buf = &bm->page_list[i];
+ vma->vm_start = start;
+ vma->vm_end = start + PAGE_SIZE;
+ retval = dma_mmap_coherent(bm->dma_hw_dev, vma,
+ buf->virt_addr,
+ buf->dma_addr, PAGE_SIZE);
+ if (retval)
+ break;
+
+ start += PAGE_SIZE;
+ }
+ vma->vm_start = vm_start;
+ vma->vm_end = vm_end;
} else {
for (i = 0; i < n_pages; ++i) {
unsigned long pfn;
@@ -2407,19 +2421,18 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
start += PAGE_SIZE;
}
+ }
#ifdef CONFIG_MMU
- /*
- * Leaving behind a partial mapping of a buffer we're about to
- * drop is unsafe, see remap_pfn_range_notrack().
- * We need to zap the range here ourselves instead of relying
- * on the automatic zapping in remap_pfn_range() because we call
- * remap_pfn_range() in a loop.
- */
- if (retval)
- zap_vma_ptes(vma, vma->vm_start, size);
+ /*
+ * Leaving behind a partial mapping of a buffer we're about to drop is
+ * unsafe, see remap_pfn_range_notrack(). We need to zap the range
+ * here ourselves instead of relying on the automatic zapping in
+ * remap_pfn_range() because we call remap_pfn_range() in a loop.
+ */
+ if (retval)
+ zap_vma_ptes(vma, vma->vm_start, size);
#endif
- }
if (retval == 0) {
vma->vm_ops = &comedi_vm_ops;
@@ -2475,6 +2488,62 @@ done:
return mask;
}
+static unsigned int comedi_buf_copy_to_user(struct comedi_subdevice *s,
+ void __user *dest, unsigned int src_offset, unsigned int n)
+{
+ struct comedi_buf_map *bm = s->async->buf_map;
+ struct comedi_buf_page *buf_page_list = bm->page_list;
+ unsigned int page = src_offset >> PAGE_SHIFT;
+ unsigned int offset = offset_in_page(src_offset);
+
+ while (n) {
+ unsigned int copy_amount = min(n, PAGE_SIZE - offset);
+ unsigned int uncopied;
+
+ uncopied = copy_to_user(dest, buf_page_list[page].virt_addr +
+ offset, copy_amount);
+ copy_amount -= uncopied;
+ n -= copy_amount;
+ if (uncopied)
+ break;
+
+ dest += copy_amount;
+ page++;
+ if (page == bm->n_pages)
+ page = 0; /* buffer wraparound */
+ offset = 0;
+ }
+ return n;
+}
+
+static unsigned int comedi_buf_copy_from_user(struct comedi_subdevice *s,
+ unsigned int dst_offset, const void __user *src, unsigned int n)
+{
+ struct comedi_buf_map *bm = s->async->buf_map;
+ struct comedi_buf_page *buf_page_list = bm->page_list;
+ unsigned int page = dst_offset >> PAGE_SHIFT;
+ unsigned int offset = offset_in_page(dst_offset);
+
+ while (n) {
+ unsigned int copy_amount = min(n, PAGE_SIZE - offset);
+ unsigned int uncopied;
+
+ uncopied = copy_from_user(buf_page_list[page].virt_addr +
+ offset, src, copy_amount);
+ copy_amount -= uncopied;
+ n -= copy_amount;
+ if (uncopied)
+ break;
+
+ src += copy_amount;
+ page++;
+ if (page == bm->n_pages)
+ page = 0; /* buffer wraparound */
+ offset = 0;
+ }
+ return n;
+}
+
static ssize_t comedi_write(struct file *file, const char __user *buf,
size_t nbytes, loff_t *offset)
{
@@ -2516,7 +2585,6 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
add_wait_queue(&async->wait_head, &wait);
while (count == 0 && !retval) {
unsigned int runflags;
- unsigned int wp, n1, n2;
set_current_state(TASK_INTERRUPTIBLE);
@@ -2555,14 +2623,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
}
set_current_state(TASK_RUNNING);
- wp = async->buf_write_ptr;
- n1 = min(n, async->prealloc_bufsz - wp);
- n2 = n - n1;
- m = copy_from_user(async->prealloc_buf + wp, buf, n1);
- if (m)
- m += n2;
- else if (n2)
- m = copy_from_user(async->prealloc_buf, buf + n1, n2);
+ m = comedi_buf_copy_from_user(s, async->buf_write_ptr, buf, n);
if (m) {
n -= m;
retval = -EFAULT;
@@ -2651,8 +2712,6 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
add_wait_queue(&async->wait_head, &wait);
while (count == 0 && !retval) {
- unsigned int rp, n1, n2;
-
set_current_state(TASK_INTERRUPTIBLE);
m = comedi_buf_read_n_available(s);
@@ -2689,14 +2748,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
}
set_current_state(TASK_RUNNING);
- rp = async->buf_read_ptr;
- n1 = min(n, async->prealloc_bufsz - rp);
- n2 = n - n1;
- m = copy_to_user(buf, async->prealloc_buf + rp, n1);
- if (m)
- m += n2;
- else if (n2)
- m = copy_to_user(buf + n1, async->prealloc_buf, n2);
+ m = comedi_buf_copy_to_user(s, buf, async->buf_read_ptr, n);
if (m) {
n -= m;
retval = -EFAULT;
diff --git a/drivers/comedi/drivers/adl_pci9118.c b/drivers/comedi/drivers/adl_pci9118.c
index a76e2666d583..67c663892e48 100644
--- a/drivers/comedi/drivers/adl_pci9118.c
+++ b/drivers/comedi/drivers/adl_pci9118.c
@@ -32,7 +32,7 @@
* ranges).
*
* There are some hardware limitations:
- * a) You cann't use mixture of unipolar/bipoar ranges or differencial/single
+ * a) You can't use mixture of unipolar/bipolar ranges or differential/single
* ended inputs.
* b) DMA transfers must have the length aligned to two samples (32 bit),
* so there is some problems if cmd->chanlist_len is odd. This driver tries
@@ -227,7 +227,7 @@ struct pci9118_private {
struct pci9118_dmabuf dmabuf[2];
int softsshdelay; /*
* >0 use software S&H,
- * numer is requested delay in ns
+ * number is requested delay in ns
*/
unsigned char softsshsample; /*
* polarity of S&H signal
diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c
index da17d891f0e5..9747e6d1f6eb 100644
--- a/drivers/comedi/drivers/comedi_test.c
+++ b/drivers/comedi/drivers/comedi_test.c
@@ -197,7 +197,8 @@ static unsigned short fake_waveform(struct comedi_device *dev,
*/
static void waveform_ai_timer(struct timer_list *t)
{
- struct waveform_private *devpriv = from_timer(devpriv, t, ai_timer);
+ struct waveform_private *devpriv = timer_container_of(devpriv, t,
+ ai_timer);
struct comedi_device *dev = devpriv->dev;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
@@ -444,7 +445,8 @@ static int waveform_ai_insn_read(struct comedi_device *dev,
*/
static void waveform_ao_timer(struct timer_list *t)
{
- struct waveform_private *devpriv = from_timer(devpriv, t, ao_timer);
+ struct waveform_private *devpriv = timer_container_of(devpriv, t,
+ ao_timer);
struct comedi_device *dev = devpriv->dev;
struct comedi_subdevice *s = dev->write_subdev;
struct comedi_async *async = s->async;
diff --git a/drivers/comedi/drivers/das16.c b/drivers/comedi/drivers/das16.c
index f5ca6c0d4d0c..1f85572c21b4 100644
--- a/drivers/comedi/drivers/das16.c
+++ b/drivers/comedi/drivers/das16.c
@@ -517,7 +517,8 @@ static void das16_interrupt(struct comedi_device *dev)
static void das16_timer_interrupt(struct timer_list *t)
{
- struct das16_private_struct *devpriv = from_timer(devpriv, t, timer);
+ struct das16_private_struct *devpriv = timer_container_of(devpriv, t,
+ timer);
struct comedi_device *dev = devpriv->dev;
unsigned long flags;
diff --git a/drivers/comedi/drivers/jr3_pci.c b/drivers/comedi/drivers/jr3_pci.c
index 75dce1ff2419..61792d940a3d 100644
--- a/drivers/comedi/drivers/jr3_pci.c
+++ b/drivers/comedi/drivers/jr3_pci.c
@@ -562,7 +562,8 @@ jr3_pci_poll_subdevice(struct comedi_subdevice *s)
static void jr3_pci_poll_dev(struct timer_list *t)
{
- struct jr3_pci_dev_private *devpriv = from_timer(devpriv, t, timer);
+ struct jr3_pci_dev_private *devpriv = timer_container_of(devpriv, t,
+ timer);
struct comedi_device *dev = devpriv->dev;
struct jr3_pci_subdev_private *spriv;
struct comedi_subdevice *s;
diff --git a/drivers/comedi/drivers/ni_atmio.c b/drivers/comedi/drivers/ni_atmio.c
index 330ae1c58800..b4e759e5703f 100644
--- a/drivers/comedi/drivers/ni_atmio.c
+++ b/drivers/comedi/drivers/ni_atmio.c
@@ -215,7 +215,7 @@ static const int ni_irqpin[] = {
#include "ni_mio_common.c"
-static const struct pnp_device_id device_ids[] = {
+static const struct pnp_device_id __maybe_unused device_ids[] = {
{.id = "NIC1900", .driver_data = 0},
{.id = "NIC2400", .driver_data = 0},
{.id = "NIC2500", .driver_data = 0},
diff --git a/drivers/comedi/drivers/ni_pcidio.c b/drivers/comedi/drivers/ni_pcidio.c
index 2d58e83420e8..2c7bb9c1ea5b 100644
--- a/drivers/comedi/drivers/ni_pcidio.c
+++ b/drivers/comedi/drivers/ni_pcidio.c
@@ -747,8 +747,6 @@ static int ni_pcidio_change(struct comedi_device *dev,
if (ret < 0)
return ret;
- memset(s->async->prealloc_buf, 0xaa, s->async->prealloc_bufsz);
-
return 0;
}
diff --git a/drivers/counter/interrupt-cnt.c b/drivers/counter/interrupt-cnt.c
index 949598d51575..6c0c1d2d7027 100644
--- a/drivers/counter/interrupt-cnt.c
+++ b/drivers/counter/interrupt-cnt.c
@@ -3,22 +3,25 @@
* Copyright (c) 2021 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
*/
+#include <linux/cleanup.h>
#include <linux/counter.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#define INTERRUPT_CNT_NAME "interrupt-cnt"
struct interrupt_cnt_priv {
- atomic_t count;
+ atomic_long_t count;
struct gpio_desc *gpio;
int irq;
bool enabled;
+ struct mutex lock;
struct counter_signal signals;
struct counter_synapse synapses;
struct counter_count cnts;
@@ -29,7 +32,7 @@ static irqreturn_t interrupt_cnt_isr(int irq, void *dev_id)
struct counter_device *counter = dev_id;
struct interrupt_cnt_priv *priv = counter_priv(counter);
- atomic_inc(&priv->count);
+ atomic_long_inc(&priv->count);
counter_push_event(counter, COUNTER_EVENT_CHANGE_OF_STATE, 0);
@@ -41,6 +44,8 @@ static int interrupt_cnt_enable_read(struct counter_device *counter,
{
struct interrupt_cnt_priv *priv = counter_priv(counter);
+ guard(mutex)(&priv->lock);
+
*enable = priv->enabled;
return 0;
@@ -51,6 +56,8 @@ static int interrupt_cnt_enable_write(struct counter_device *counter,
{
struct interrupt_cnt_priv *priv = counter_priv(counter);
+ guard(mutex)(&priv->lock);
+
if (priv->enabled == enable)
return 0;
@@ -89,7 +96,7 @@ static int interrupt_cnt_read(struct counter_device *counter,
{
struct interrupt_cnt_priv *priv = counter_priv(counter);
- *val = atomic_read(&priv->count);
+ *val = atomic_long_read(&priv->count);
return 0;
}
@@ -102,7 +109,7 @@ static int interrupt_cnt_write(struct counter_device *counter,
if (val != (typeof(priv->count.counter))val)
return -ERANGE;
- atomic_set(&priv->count, val);
+ atomic_long_set(&priv->count, val);
return 0;
}
@@ -227,6 +234,8 @@ static int interrupt_cnt_probe(struct platform_device *pdev)
if (ret)
return ret;
+ mutex_init(&priv->lock);
+
ret = devm_counter_add(dev, counter);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to add counter\n");
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index 1de3c50b9804..1a299d1f350b 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -337,6 +337,28 @@ static struct counter_comp mchp_tc_count_ext[] = {
COUNTER_COMP_COMPARE(mchp_tc_count_compare_read, mchp_tc_count_compare_write),
};
+static int mchp_tc_watch_validate(struct counter_device *counter,
+ const struct counter_watch *watch)
+{
+ if (watch->channel == COUNTER_MCHP_EVCHN_CV || watch->channel == COUNTER_MCHP_EVCHN_RA)
+ switch (watch->event) {
+ case COUNTER_EVENT_CHANGE_OF_STATE:
+ case COUNTER_EVENT_OVERFLOW:
+ case COUNTER_EVENT_CAPTURE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ if (watch->channel == COUNTER_MCHP_EVCHN_RB && watch->event == COUNTER_EVENT_CAPTURE)
+ return 0;
+
+ if (watch->channel == COUNTER_MCHP_EVCHN_RC && watch->event == COUNTER_EVENT_THRESHOLD)
+ return 0;
+
+ return -EINVAL;
+}
+
static struct counter_count mchp_tc_counts[] = {
{
.id = 0,
@@ -356,7 +378,8 @@ static const struct counter_ops mchp_tc_ops = {
.function_read = mchp_tc_count_function_read,
.function_write = mchp_tc_count_function_write,
.action_read = mchp_tc_count_action_read,
- .action_write = mchp_tc_count_action_write
+ .action_write = mchp_tc_count_action_write,
+ .watch_validate = mchp_tc_watch_validate,
};
static const struct atmel_tcb_config tcb_rm9200_config = {
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index e75b69476a00..3d3384cbea87 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -669,12 +669,14 @@ static void stm32_timer_cnt_detect_channels(struct device *dev,
dev_dbg(dev, "has %d cc channels\n", priv->nchannels);
}
-/* encoder supported on TIM1 TIM2 TIM3 TIM4 TIM5 TIM8 */
-#define STM32_TIM_ENCODER_SUPPORTED (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(7))
+/* encoder supported on TIM1 TIM2 TIM3 TIM4 TIM5 TIM8 TIM20 */
+#define STM32_TIM_ENCODER_SUPPORTED (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(7) | \
+ BIT(19))
static const char * const stm32_timer_trigger_compat[] = {
"st,stm32-timer-trigger",
"st,stm32h7-timer-trigger",
+ "st,stm32mp25-timer-trigger",
};
static int stm32_timer_cnt_probe_encoder(struct device *dev,
@@ -846,6 +848,7 @@ static SIMPLE_DEV_PM_OPS(stm32_timer_cnt_pm_ops, stm32_timer_cnt_suspend,
static const struct of_device_id stm32_timer_cnt_of_match[] = {
{ .compatible = "st,stm32-timer-counter", },
+ { .compatible = "st,stm32mp25-timer-counter", },
{},
};
MODULE_DEVICE_TABLE(of, stm32_timer_cnt_of_match);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index d64b07ec48e5..78702a08364f 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -217,6 +217,18 @@ config CPUFREQ_DT
If in doubt, say N.
+config CPUFREQ_DT_RUST
+ tristate "Rust based Generic DT based cpufreq driver"
+ depends on HAVE_CLK && OF && RUST
+ select CPUFREQ_DT_PLATDEV
+ select PM_OPP
+ help
+ This adds a Rust based generic DT based cpufreq driver for frequency
+ management. It supports both uniprocessor (UP) and symmetric
+ multiprocessor (SMP) systems.
+
+ If in doubt, say N.
+
config CPUFREQ_VIRT
tristate "Virtual cpufreq driver"
depends on GENERIC_ARCH_TOPOLOGY
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 22ab45209f9b..d38526b8e063 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
+obj-$(CONFIG_CPUFREQ_DT_RUST) += rcpufreq_dt.o
obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
obj-$(CONFIG_CPUFREQ_VIRT) += virtual-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index d26b610e4f24..4f7f9201598d 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -79,11 +79,11 @@ static bool boost_state(unsigned int cpu)
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
case X86_VENDOR_ZHAOXIN:
- rdmsrl_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
+ rdmsrq_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
- rdmsrl_on_cpu(cpu, MSR_K7_HWCR, &msr);
+ rdmsrq_on_cpu(cpu, MSR_K7_HWCR, &msr);
return !(msr & MSR_K7_HWCR_CPB_DIS);
}
return false;
@@ -110,14 +110,14 @@ static int boost_set_msr(bool enable)
return -EINVAL;
}
- rdmsrl(msr_addr, val);
+ rdmsrq(msr_addr, val);
if (enable)
val &= ~msr_mask;
else
val |= msr_mask;
- wrmsrl(msr_addr, val);
+ wrmsrq(msr_addr, val);
return 0;
}
@@ -660,7 +660,7 @@ static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
nominal_perf = perf_caps.nominal_perf;
if (nominal_freq)
- *nominal_freq = perf_caps.nominal_freq;
+ *nominal_freq = perf_caps.nominal_freq * 1000;
if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index e671bc7d1550..447b9aa5ce40 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -31,6 +31,8 @@
#include <acpi/cppc_acpi.h>
+#include <asm/msr.h>
+
#include "amd-pstate.h"
@@ -90,9 +92,9 @@ static int amd_pstate_ut_check_enabled(u32 index)
if (get_shared_mem())
return 0;
- ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
+ ret = rdmsrq_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
if (ret) {
- pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
+ pr_err("%s rdmsrq_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
return ret;
}
@@ -137,7 +139,7 @@ static int amd_pstate_ut_check_perf(u32 index)
lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
lowest_perf = cppc_perf.lowest_perf;
} else {
- ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
+ ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
if (ret) {
pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
return ret;
@@ -242,25 +244,30 @@ static int amd_pstate_set_mode(enum amd_pstate_mode mode)
static int amd_pstate_ut_check_driver(u32 index)
{
enum amd_pstate_mode mode1, mode2 = AMD_PSTATE_DISABLE;
+ enum amd_pstate_mode orig_mode = amd_pstate_get_status();
+ int ret;
for (mode1 = AMD_PSTATE_DISABLE; mode1 < AMD_PSTATE_MAX; mode1++) {
- int ret = amd_pstate_set_mode(mode1);
+ ret = amd_pstate_set_mode(mode1);
if (ret)
return ret;
for (mode2 = AMD_PSTATE_DISABLE; mode2 < AMD_PSTATE_MAX; mode2++) {
if (mode1 == mode2)
continue;
ret = amd_pstate_set_mode(mode2);
- if (ret) {
- pr_err("%s: failed to update status for %s->%s\n", __func__,
- amd_pstate_get_mode_string(mode1),
- amd_pstate_get_mode_string(mode2));
- return ret;
- }
+ if (ret)
+ goto out;
}
}
- return 0;
+out:
+ if (ret)
+ pr_warn("%s: failed to update status for %s->%s: %d\n", __func__,
+ amd_pstate_get_mode_string(mode1),
+ amd_pstate_get_mode_string(mode2), ret);
+
+ amd_pstate_set_mode(orig_mode);
+ return ret;
}
static int __init amd_pstate_ut_init(void)
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index b961f3a3b580..f3477ab37742 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -197,7 +197,7 @@ static u8 msr_get_epp(struct amd_cpudata *cpudata)
u64 value;
int ret;
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
+ ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
if (ret < 0) {
pr_debug("Could not retrieve energy perf value (%d)\n", ret);
return ret;
@@ -258,10 +258,10 @@ static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf,
return 0;
if (fast_switch) {
- wrmsrl(MSR_AMD_CPPC_REQ, value);
+ wrmsrq(MSR_AMD_CPPC_REQ, value);
return 0;
} else {
- int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ int ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
if (ret)
return ret;
@@ -309,7 +309,7 @@ static int msr_set_epp(struct cpufreq_policy *policy, u8 epp)
if (value == prev)
return 0;
- ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
if (ret) {
pr_err("failed to set energy perf value (%d)\n", ret);
return ret;
@@ -371,7 +371,7 @@ static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
static inline int msr_cppc_enable(struct cpufreq_policy *policy)
{
- return wrmsrl_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1);
+ return wrmsrq_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1);
}
static int shmem_cppc_enable(struct cpufreq_policy *policy)
@@ -389,9 +389,10 @@ static inline int amd_pstate_cppc_enable(struct cpufreq_policy *policy)
static int msr_init_perf(struct amd_cpudata *cpudata)
{
union perf_cached perf = READ_ONCE(cpudata->perf);
- u64 cap1, numerator;
+ u64 cap1, numerator, cppc_req;
+ u8 min_perf;
- int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
+ int ret = rdmsrq_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
&cap1);
if (ret)
return ret;
@@ -400,6 +401,22 @@ static int msr_init_perf(struct amd_cpudata *cpudata)
if (ret)
return ret;
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->cppc_req_cached, cppc_req);
+ min_perf = FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cppc_req);
+
+ /*
+ * Clear out the min_perf part to check if the rest of the MSR is 0, if yes, this is an
+ * indication that the min_perf value is the one specified through the BIOS option
+ */
+ cppc_req &= ~(AMD_CPPC_MIN_PERF_MASK);
+
+ if (!cppc_req)
+ perf.bios_min_perf = min_perf;
+
perf.highest_perf = numerator;
perf.max_limit_perf = numerator;
perf.min_limit_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
@@ -417,6 +434,7 @@ static int shmem_init_perf(struct amd_cpudata *cpudata)
struct cppc_perf_caps cppc_perf;
union perf_cached perf = READ_ONCE(cpudata->perf);
u64 numerator;
+ bool auto_sel;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
@@ -438,7 +456,7 @@ static int shmem_init_perf(struct amd_cpudata *cpudata)
if (cppc_state == AMD_PSTATE_ACTIVE)
return 0;
- ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf);
+ ret = cppc_get_auto_sel(cpudata->cpu, &auto_sel);
if (ret) {
pr_warn("failed to get auto_sel, ret: %d\n", ret);
return 0;
@@ -518,8 +536,8 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
unsigned long flags;
local_irq_save(flags);
- rdmsrl(MSR_IA32_APERF, aperf);
- rdmsrl(MSR_IA32_MPERF, mperf);
+ rdmsrq(MSR_IA32_APERF, aperf);
+ rdmsrq(MSR_IA32_MPERF, mperf);
tsc = rdtsc();
if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
@@ -554,6 +572,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
if (!policy)
return;
+ /* limit the max perf when core performance boost feature is disabled */
+ if (!cpudata->boost_supported)
+ max_perf = min_t(u8, perf.nominal_perf, max_perf);
+
des_perf = clamp_t(u8, des_perf, min_perf, max_perf);
policy->cur = perf_to_freq(perf, cpudata->nominal_freq, des_perf);
@@ -563,10 +585,6 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
des_perf = 0;
}
- /* limit the max perf when core performance boost feature is disabled */
- if (!cpudata->boost_supported)
- max_perf = min_t(u8, perf.nominal_perf, max_perf);
-
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
@@ -580,20 +598,26 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
{
/*
* Initialize lower frequency limit (i.e.policy->min) with
- * lowest_nonlinear_frequency which is the most energy efficient
- * frequency. Override the initial value set by cpufreq core and
- * amd-pstate qos_requests.
+ * lowest_nonlinear_frequency or the min frequency (if) specified in BIOS,
+ * Override the initial value set by cpufreq core and amd-pstate qos_requests.
*/
if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) {
struct cpufreq_policy *policy __free(put_cpufreq_policy) =
cpufreq_cpu_get(policy_data->cpu);
struct amd_cpudata *cpudata;
+ union perf_cached perf;
if (!policy)
return -EINVAL;
cpudata = policy->driver_data;
- policy_data->min = cpudata->lowest_nonlinear_freq;
+ perf = READ_ONCE(cpudata->perf);
+
+ if (perf.bios_min_perf)
+ policy_data->min = perf_to_freq(perf, cpudata->nominal_freq,
+ perf.bios_min_perf);
+ else
+ policy_data->min = cpudata->lowest_nonlinear_freq;
}
cpufreq_verify_within_cpu_limits(policy_data);
@@ -772,7 +796,7 @@ static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
goto exit_err;
}
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
+ ret = rdmsrq_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
if (ret) {
pr_err_once("failed to read initial CPU boost state!\n");
ret = -EIO;
@@ -791,7 +815,7 @@ exit_err:
static void amd_perf_ctl_reset(unsigned int cpu)
{
- wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
+ wrmsrq_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
}
#define CPPC_MAX_PERF U8_MAX
@@ -808,19 +832,16 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu);
}
-static void amd_pstate_update_limits(unsigned int cpu)
+static void amd_pstate_update_limits(struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata;
u32 prev_high = 0, cur_high = 0;
bool highest_perf_changed = false;
+ unsigned int cpu = policy->cpu;
if (!amd_pstate_prefcore)
return;
- if (!policy)
- return;
-
if (amd_get_highest_perf(cpu, &cur_high))
return;
@@ -831,8 +852,10 @@ static void amd_pstate_update_limits(unsigned int cpu)
if (highest_perf_changed) {
WRITE_ONCE(cpudata->prefcore_ranking, cur_high);
- if (cur_high < CPPC_MAX_PERF)
+ if (cur_high < CPPC_MAX_PERF) {
sched_set_itmt_core_prio((int)cur_high, cpu);
+ sched_update_asym_prefer_cpu(cpu, prev_high, cur_high);
+ }
}
}
@@ -1024,6 +1047,10 @@ free_cpudata1:
static void amd_pstate_cpu_exit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ /* Reset CPPC_REQ MSR to the BIOS value */
+ amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
freq_qos_remove_request(&cpudata->req[1]);
freq_qos_remove_request(&cpudata->req[0]);
@@ -1305,6 +1332,12 @@ static ssize_t amd_pstate_show_status(char *buf)
return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
}
+int amd_pstate_get_status(void)
+{
+ return cppc_state;
+}
+EXPORT_SYMBOL_GPL(amd_pstate_get_status);
+
int amd_pstate_update_status(const char *buf, size_t size)
{
int mode_idx;
@@ -1419,7 +1452,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata;
union perf_cached perf;
struct device *dev;
- u64 value;
int ret;
/*
@@ -1484,12 +1516,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
cpudata->epp_default = AMD_CPPC_EPP_BALANCE_PERFORMANCE;
}
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
- if (ret)
- return ret;
- WRITE_ONCE(cpudata->cppc_req_cached, value);
- }
ret = amd_pstate_set_epp(policy, cpudata->epp_default);
if (ret)
return ret;
@@ -1509,6 +1535,11 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ /* Reset CPPC_REQ MSR to the BIOS value */
+ amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+
kfree(cpudata);
policy->driver_data = NULL;
}
@@ -1559,21 +1590,38 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
return 0;
}
-static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
+static int amd_pstate_cpu_online(struct cpufreq_policy *policy)
{
- pr_debug("AMD CPU Core %d going online\n", policy->cpu);
-
return amd_pstate_cppc_enable(policy);
}
-static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
+static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
{
- return 0;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ /*
+ * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified
+ * min_perf value across kexec reboots. If this CPU is just onlined normally after this, the
+ * limits, epp and desired perf will get reset to the cached values in cpudata struct
+ */
+ return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
}
-static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
+static int amd_pstate_suspend(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ int ret;
+
+ /*
+ * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified
+ * min_perf value across kexec reboots. If this CPU is just resumed back without kexec,
+ * the limits, epp and desired perf will get reset to the cached values in cpudata struct
+ */
+ ret = amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+ if (ret)
+ return ret;
/* invalidate to ensure it's rewritten during resume */
cpudata->cppc_req_cached = 0;
@@ -1584,6 +1632,17 @@ static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
return 0;
}
+static int amd_pstate_resume(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+ int cur_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->cur);
+
+ /* Set CPPC_REQ to last sane value until the governor updates it */
+ return amd_pstate_update_perf(policy, perf.min_limit_perf, cur_perf, perf.max_limit_perf,
+ 0U, false);
+}
+
static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
@@ -1609,6 +1668,10 @@ static struct cpufreq_driver amd_pstate_driver = {
.fast_switch = amd_pstate_fast_switch,
.init = amd_pstate_cpu_init,
.exit = amd_pstate_cpu_exit,
+ .online = amd_pstate_cpu_online,
+ .offline = amd_pstate_cpu_offline,
+ .suspend = amd_pstate_suspend,
+ .resume = amd_pstate_resume,
.set_boost = amd_pstate_set_boost,
.update_limits = amd_pstate_update_limits,
.name = "amd-pstate",
@@ -1621,9 +1684,9 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
.setpolicy = amd_pstate_epp_set_policy,
.init = amd_pstate_epp_cpu_init,
.exit = amd_pstate_epp_cpu_exit,
- .offline = amd_pstate_epp_cpu_offline,
- .online = amd_pstate_epp_cpu_online,
- .suspend = amd_pstate_epp_suspend,
+ .offline = amd_pstate_cpu_offline,
+ .online = amd_pstate_cpu_online,
+ .suspend = amd_pstate_suspend,
.resume = amd_pstate_epp_resume,
.update_limits = amd_pstate_update_limits,
.set_boost = amd_pstate_set_boost,
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index fbe1c08d3f06..cb45fdca27a6 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -30,6 +30,7 @@
* @lowest_perf: the absolute lowest performance level of the processor
* @min_limit_perf: Cached value of the performance corresponding to policy->min
* @max_limit_perf: Cached value of the performance corresponding to policy->max
+ * @bios_min_perf: Cached perf value corresponding to the "Requested CPU Min Frequency" BIOS option
*/
union perf_cached {
struct {
@@ -39,6 +40,7 @@ union perf_cached {
u8 lowest_perf;
u8 min_limit_perf;
u8 max_limit_perf;
+ u8 bios_min_perf;
};
u64 val;
};
@@ -119,6 +121,7 @@ enum amd_pstate_mode {
AMD_PSTATE_MAX,
};
const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode);
+int amd_pstate_get_status(void);
int amd_pstate_update_status(const char *buf, size_t size);
#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index 59b19b9975e8..13fed4b9e02b 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -129,7 +129,7 @@ static int __init amd_freq_sensitivity_init(void)
pci_dev_put(pcidev);
}
- if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
+ if (rdmsrq_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
return -ENODEV;
if (!(val >> CLASS_CODE_SHIFT))
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index cb93f00bafdb..b7c688a5659c 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -808,10 +808,119 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf);
}
+
+static ssize_t show_auto_select(struct cpufreq_policy *policy, char *buf)
+{
+ bool val;
+ int ret;
+
+ ret = cppc_get_auto_sel(policy->cpu, &val);
+
+ /* show "<unsupported>" when this register is not supported by cpc */
+ if (ret == -EOPNOTSUPP)
+ return sysfs_emit(buf, "<unsupported>\n");
+
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", val);
+}
+
+static ssize_t store_auto_select(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ bool val;
+ int ret;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ ret = cppc_set_auto_sel(policy->cpu, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t show_auto_act_window(struct cpufreq_policy *policy, char *buf)
+{
+ u64 val;
+ int ret;
+
+ ret = cppc_get_auto_act_window(policy->cpu, &val);
+
+ /* show "<unsupported>" when this register is not supported by cpc */
+ if (ret == -EOPNOTSUPP)
+ return sysfs_emit(buf, "<unsupported>\n");
+
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", val);
+}
+
+static ssize_t store_auto_act_window(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ u64 usec;
+ int ret;
+
+ ret = kstrtou64(buf, 0, &usec);
+ if (ret)
+ return ret;
+
+ ret = cppc_set_auto_act_window(policy->cpu, usec);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t show_energy_performance_preference_val(struct cpufreq_policy *policy, char *buf)
+{
+ u64 val;
+ int ret;
+
+ ret = cppc_get_epp_perf(policy->cpu, &val);
+
+ /* show "<unsupported>" when this register is not supported by cpc */
+ if (ret == -EOPNOTSUPP)
+ return sysfs_emit(buf, "<unsupported>\n");
+
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", val);
+}
+
+static ssize_t store_energy_performance_preference_val(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ u64 val;
+ int ret;
+
+ ret = kstrtou64(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = cppc_set_epp(policy->cpu, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
cpufreq_freq_attr_ro(freqdomain_cpus);
+cpufreq_freq_attr_rw(auto_select);
+cpufreq_freq_attr_rw(auto_act_window);
+cpufreq_freq_attr_rw(energy_performance_preference_val);
static struct freq_attr *cppc_cpufreq_attr[] = {
&freqdomain_cpus,
+ &auto_select,
+ &auto_act_window,
+ &energy_performance_preference_val,
NULL,
};
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index f45ded62b0e0..d7426e1d8bdd 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -255,51 +255,6 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
-/**
- * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
- * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
- */
-void cpufreq_cpu_release(struct cpufreq_policy *policy)
-{
- if (WARN_ON(!policy))
- return;
-
- lockdep_assert_held(&policy->rwsem);
-
- up_write(&policy->rwsem);
-
- cpufreq_cpu_put(policy);
-}
-
-/**
- * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
- * @cpu: CPU to find the policy for.
- *
- * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
- * if the policy returned by it is not NULL, acquire its rwsem for writing.
- * Return the policy if it is active or release it and return NULL otherwise.
- *
- * The policy returned by this function has to be released with the help of
- * cpufreq_cpu_release() in order to release its rwsem and balance its usage
- * counter properly.
- */
-struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
-{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
-
- if (!policy)
- return NULL;
-
- down_write(&policy->rwsem);
-
- if (policy_is_inactive(policy)) {
- cpufreq_cpu_release(policy);
- return NULL;
- }
-
- return policy;
-}
-
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
@@ -636,6 +591,22 @@ static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
return sysfs_emit(buf, "%d\n", policy->boost_enabled);
}
+static int policy_set_boost(struct cpufreq_policy *policy, bool enable)
+{
+ int ret;
+
+ if (policy->boost_enabled == enable)
+ return 0;
+
+ policy->boost_enabled = enable;
+
+ ret = cpufreq_driver->set_boost(policy, enable);
+ if (ret)
+ policy->boost_enabled = !policy->boost_enabled;
+
+ return ret;
+}
+
static ssize_t store_local_boost(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
@@ -651,21 +622,11 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
if (!policy->boost_supported)
return -EINVAL;
- if (policy->boost_enabled == enable)
+ ret = policy_set_boost(policy, enable);
+ if (!ret)
return count;
- policy->boost_enabled = enable;
-
- cpus_read_lock();
- ret = cpufreq_driver->set_boost(policy, enable);
- cpus_read_unlock();
-
- if (ret) {
- policy->boost_enabled = !policy->boost_enabled;
- return ret;
- }
-
- return count;
+ return ret;
}
static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
@@ -845,7 +806,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
- char str_governor[16];
+ char str_governor[CPUFREQ_NAME_LEN];
int ret;
ret = sscanf(buf, "%15s", str_governor);
@@ -956,9 +917,9 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
if (!policy->governor || !policy->governor->store_setspeed)
return -EINVAL;
- ret = sscanf(buf, "%u", &freq);
- if (ret != 1)
- return -EINVAL;
+ ret = kstrtouint(buf, 0, &freq);
+ if (ret)
+ return ret;
policy->governor->store_setspeed(policy, freq);
@@ -1025,17 +986,16 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
- ssize_t ret = -EBUSY;
if (!fattr->show)
return -EIO;
- down_read(&policy->rwsem);
+ guard(cpufreq_policy_read)(policy);
+
if (likely(!policy_is_inactive(policy)))
- ret = fattr->show(policy, buf);
- up_read(&policy->rwsem);
+ return fattr->show(policy, buf);
- return ret;
+ return -EBUSY;
}
static ssize_t store(struct kobject *kobj, struct attribute *attr,
@@ -1043,17 +1003,16 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
- ssize_t ret = -EBUSY;
if (!fattr->store)
return -EIO;
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
+
if (likely(!policy_is_inactive(policy)))
- ret = fattr->store(policy, buf, count);
- up_write(&policy->rwsem);
+ return fattr->store(policy, buf, count);
- return ret;
+ return -EBUSY;
}
static void cpufreq_sysfs_release(struct kobject *kobj)
@@ -1211,7 +1170,8 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (cpumask_test_cpu(cpu, policy->cpus))
return 0;
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
+
if (has_target())
cpufreq_stop_governor(policy);
@@ -1222,7 +1182,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (ret)
pr_err("%s: Failed to start governor\n", __func__);
}
- up_write(&policy->rwsem);
+
return ret;
}
@@ -1242,9 +1202,10 @@ static void handle_update(struct work_struct *work)
container_of(work, struct cpufreq_policy, update);
pr_debug("handle_update for cpu %u called\n", policy->cpu);
- down_write(&policy->rwsem);
+
+ guard(cpufreq_policy_write)(policy);
+
refresh_frequency_limits(policy);
- up_write(&policy->rwsem);
}
static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
@@ -1270,11 +1231,11 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
struct kobject *kobj;
struct completion *cmp;
- down_write(&policy->rwsem);
- cpufreq_stats_free_table(policy);
- kobj = &policy->kobj;
- cmp = &policy->kobj_unregister;
- up_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ cpufreq_stats_free_table(policy);
+ kobj = &policy->kobj;
+ cmp = &policy->kobj_unregister;
+ }
kobject_put(kobj);
/*
@@ -1350,7 +1311,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
init_waitqueue_head(&policy->transition_wait);
INIT_WORK(&policy->update, handle_update);
- policy->cpu = cpu;
return policy;
err_min_qos_notifier:
@@ -1419,35 +1379,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
kfree(policy);
}
-static int cpufreq_online(unsigned int cpu)
+static int cpufreq_policy_online(struct cpufreq_policy *policy,
+ unsigned int cpu, bool new_policy)
{
- struct cpufreq_policy *policy;
- bool new_policy;
unsigned long flags;
unsigned int j;
int ret;
- pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
-
- /* Check if this CPU already has a policy to manage it */
- policy = per_cpu(cpufreq_cpu_data, cpu);
- if (policy) {
- WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
- if (!policy_is_inactive(policy))
- return cpufreq_add_policy_cpu(policy, cpu);
+ guard(cpufreq_policy_write)(policy);
- /* This is the only online CPU for the policy. Start over. */
- new_policy = false;
- down_write(&policy->rwsem);
- policy->cpu = cpu;
- policy->governor = NULL;
- } else {
- new_policy = true;
- policy = cpufreq_policy_alloc(cpu);
- if (!policy)
- return -ENOMEM;
- down_write(&policy->rwsem);
- }
+ policy->cpu = cpu;
+ policy->governor = NULL;
if (!new_policy && cpufreq_driver->online) {
/* Recover policy->cpus using related_cpus */
@@ -1470,7 +1412,7 @@ static int cpufreq_online(unsigned int cpu)
if (ret) {
pr_debug("%s: %d: initialization failed\n", __func__,
__LINE__);
- goto out_free_policy;
+ goto out_clear_policy;
}
/*
@@ -1621,7 +1563,55 @@ static int cpufreq_online(unsigned int cpu)
goto out_destroy_policy;
}
- up_write(&policy->rwsem);
+ return 0;
+
+out_destroy_policy:
+ for_each_cpu(j, policy->real_cpus)
+ remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
+
+out_offline_policy:
+ if (cpufreq_driver->offline)
+ cpufreq_driver->offline(policy);
+
+out_exit_policy:
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+
+out_clear_policy:
+ cpumask_clear(policy->cpus);
+
+ return ret;
+}
+
+static int cpufreq_online(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ bool new_policy;
+ int ret;
+
+ pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
+
+ /* Check if this CPU already has a policy to manage it */
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (policy) {
+ WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
+ if (!policy_is_inactive(policy))
+ return cpufreq_add_policy_cpu(policy, cpu);
+
+ /* This is the only online CPU for the policy. Start over. */
+ new_policy = false;
+ } else {
+ new_policy = true;
+ policy = cpufreq_policy_alloc(cpu);
+ if (!policy)
+ return -ENOMEM;
+ }
+
+ ret = cpufreq_policy_online(policy, cpu, new_policy);
+ if (ret) {
+ cpufreq_policy_free(policy);
+ return ret;
+ }
kobject_uevent(&policy->kobj, KOBJ_ADD);
@@ -1633,41 +1623,24 @@ static int cpufreq_online(unsigned int cpu)
if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
- /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
+ /*
+ * Let the per-policy boost flag mirror the cpufreq_driver boost during
+ * initialization for a new policy. For an existing policy, maintain the
+ * previous boost value unless global boost is disabled.
+ */
if (cpufreq_driver->set_boost && policy->boost_supported &&
- policy->boost_enabled != cpufreq_boost_enabled()) {
- policy->boost_enabled = cpufreq_boost_enabled();
- ret = cpufreq_driver->set_boost(policy, policy->boost_enabled);
+ (new_policy || !cpufreq_boost_enabled())) {
+ ret = policy_set_boost(policy, cpufreq_boost_enabled());
if (ret) {
/* If the set_boost fails, the online operation is not affected */
pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
- str_enable_disable(policy->boost_enabled));
- policy->boost_enabled = !policy->boost_enabled;
+ str_enable_disable(cpufreq_boost_enabled()));
}
}
pr_debug("initialization complete\n");
return 0;
-
-out_destroy_policy:
- for_each_cpu(j, policy->real_cpus)
- remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
-
-out_offline_policy:
- if (cpufreq_driver->offline)
- cpufreq_driver->offline(policy);
-
-out_exit_policy:
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
-
-out_free_policy:
- cpumask_clear(policy->cpus);
- up_write(&policy->rwsem);
-
- cpufreq_policy_free(policy);
- return ret;
}
/**
@@ -1757,11 +1730,10 @@ static int cpufreq_offline(unsigned int cpu)
return 0;
}
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
__cpufreq_offline(cpu, policy);
- up_write(&policy->rwsem);
return 0;
}
@@ -1778,33 +1750,29 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (!policy)
return;
- down_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ if (cpu_online(cpu))
+ __cpufreq_offline(cpu, policy);
- if (cpu_online(cpu))
- __cpufreq_offline(cpu, policy);
+ remove_cpu_dev_symlink(policy, cpu, dev);
- remove_cpu_dev_symlink(policy, cpu, dev);
+ if (!cpumask_empty(policy->real_cpus))
+ return;
- if (!cpumask_empty(policy->real_cpus)) {
- up_write(&policy->rwsem);
- return;
- }
+ /*
+ * Unregister cpufreq cooling once all the CPUs of the policy
+ * are removed.
+ */
+ if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
+ cpufreq_cooling_unregister(policy->cdev);
+ policy->cdev = NULL;
+ }
- /*
- * Unregister cpufreq cooling once all the CPUs of the policy are
- * removed.
- */
- if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
- cpufreq_cooling_unregister(policy->cdev);
- policy->cdev = NULL;
+ /* We did light-weight exit earlier, do full tear down now */
+ if (cpufreq_driver->offline && cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
}
- /* We did light-weight exit earlier, do full tear down now */
- if (cpufreq_driver->offline && cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
-
- up_write(&policy->rwsem);
-
cpufreq_policy_free(policy);
}
@@ -1874,27 +1842,26 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
- struct cpufreq_policy *policy;
- unsigned int ret_freq = 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
- ret_freq = cpufreq_driver->get(cpu);
+ unsigned int ret_freq = cpufreq_driver->get(cpu);
+
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
return ret_freq;
}
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
policy = cpufreq_cpu_get(cpu);
- if (policy) {
- ret_freq = policy->cur;
- cpufreq_cpu_put(policy);
- }
+ if (policy)
+ return policy->cur;
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_quick_get);
@@ -1906,15 +1873,13 @@ EXPORT_SYMBOL(cpufreq_quick_get);
*/
unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret_freq = 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
- if (policy) {
- ret_freq = policy->max;
- cpufreq_cpu_put(policy);
- }
+ policy = cpufreq_cpu_get(cpu);
+ if (policy)
+ return policy->max;
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_quick_get_max);
@@ -1926,15 +1891,13 @@ EXPORT_SYMBOL(cpufreq_quick_get_max);
*/
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret_freq = 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
- if (policy) {
- ret_freq = policy->cpuinfo.max_freq;
- cpufreq_cpu_put(policy);
- }
+ policy = cpufreq_cpu_get(cpu);
+ if (policy)
+ return policy->cpuinfo.max_freq;
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
@@ -1954,19 +1917,18 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret_freq = 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
- if (policy) {
- down_read(&policy->rwsem);
- if (cpufreq_driver->get)
- ret_freq = __cpufreq_get(policy);
- up_read(&policy->rwsem);
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return 0;
- cpufreq_cpu_put(policy);
- }
+ guard(cpufreq_policy_read)(policy);
+
+ if (cpufreq_driver->get)
+ return __cpufreq_get(policy);
- return ret_freq;
+ return 0;
}
EXPORT_SYMBOL(cpufreq_get);
@@ -2025,9 +1987,9 @@ void cpufreq_suspend(void)
for_each_active_policy(policy) {
if (has_target()) {
- down_write(&policy->rwsem);
- cpufreq_stop_governor(policy);
- up_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ cpufreq_stop_governor(policy);
+ }
}
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
@@ -2068,9 +2030,9 @@ void cpufreq_resume(void)
pr_err("%s: Failed to resume driver: %s\n", __func__,
cpufreq_driver->name);
} else if (has_target()) {
- down_write(&policy->rwsem);
- ret = cpufreq_start_governor(policy);
- up_write(&policy->rwsem);
+ scoped_guard(cpufreq_policy_write, policy) {
+ ret = cpufreq_start_governor(policy);
+ }
if (ret)
pr_err("%s: Failed to start governor for CPU%u's policy\n",
@@ -2438,15 +2400,9 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
- int ret;
-
- down_write(&policy->rwsem);
+ guard(cpufreq_policy_write)(policy);
- ret = __cpufreq_driver_target(policy, target_freq, relation);
-
- up_write(&policy->rwsem);
-
- return ret;
+ return __cpufreq_driver_target(policy, target_freq, relation);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
@@ -2618,31 +2574,6 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
* POLICY INTERFACE *
*********************************************************************/
-/**
- * cpufreq_get_policy - get the current cpufreq_policy
- * @policy: struct cpufreq_policy into which the current cpufreq_policy
- * is written
- * @cpu: CPU to find the policy for
- *
- * Reads the current cpufreq policy.
- */
-int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
-{
- struct cpufreq_policy *cpu_policy;
- if (!policy)
- return -EINVAL;
-
- cpu_policy = cpufreq_cpu_get(cpu);
- if (!cpu_policy)
- return -EINVAL;
-
- memcpy(policy, cpu_policy, sizeof(*policy));
-
- cpufreq_cpu_put(cpu_policy);
- return 0;
-}
-EXPORT_SYMBOL(cpufreq_get_policy);
-
DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
/**
@@ -2793,6 +2724,21 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
return ret;
}
+static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
+{
+ guard(cpufreq_policy_write)(policy);
+
+ /*
+ * BIOS might change freq behind our back
+ * -> ask driver for current freq and notify governors about a change
+ */
+ if (cpufreq_driver->get && has_target() &&
+ (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
+ return;
+
+ refresh_frequency_limits(policy);
+}
+
/**
* cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
* @cpu: CPU to re-evaluate the policy for.
@@ -2804,23 +2750,13 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
*/
void cpufreq_update_policy(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
+ policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
- /*
- * BIOS might change freq behind our back
- * -> ask driver for current freq and notify governors about a change
- */
- if (cpufreq_driver->get && has_target() &&
- (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
- goto unlock;
-
- refresh_frequency_limits(policy);
-
-unlock:
- cpufreq_cpu_release(policy);
+ cpufreq_policy_refresh(policy);
}
EXPORT_SYMBOL(cpufreq_update_policy);
@@ -2829,7 +2765,7 @@ EXPORT_SYMBOL(cpufreq_update_policy);
* @cpu: CPU to update the policy limits for.
*
* Invoke the driver's ->update_limits callback if present or call
- * cpufreq_update_policy() for @cpu.
+ * cpufreq_policy_refresh() for @cpu.
*/
void cpufreq_update_limits(unsigned int cpu)
{
@@ -2840,9 +2776,9 @@ void cpufreq_update_limits(unsigned int cpu)
return;
if (cpufreq_driver->update_limits)
- cpufreq_driver->update_limits(cpu);
+ cpufreq_driver->update_limits(policy);
else
- cpufreq_update_policy(cpu);
+ cpufreq_policy_refresh(policy);
}
EXPORT_SYMBOL_GPL(cpufreq_update_limits);
@@ -2876,8 +2812,10 @@ static int cpufreq_boost_trigger_state(int state)
unsigned long flags;
int ret = 0;
- if (cpufreq_driver->boost_enabled == state)
- return 0;
+ /*
+ * Don't compare 'cpufreq_driver->boost_enabled' with 'state' here to
+ * make sure all policies are in sync with global boost flag.
+ */
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver->boost_enabled = state;
@@ -2888,12 +2826,9 @@ static int cpufreq_boost_trigger_state(int state)
if (!policy->boost_supported)
continue;
- policy->boost_enabled = state;
- ret = cpufreq_driver->set_boost(policy, state);
- if (ret) {
- policy->boost_enabled = !policy->boost_enabled;
+ ret = policy_set_boost(policy, state);
+ if (ret)
goto err_reset_state;
- }
}
cpus_read_unlock();
@@ -3118,6 +3053,36 @@ static int __init cpufreq_core_init(void)
return 0;
}
+
+static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
+{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy) {
+ pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
+ return false;
+ }
+
+ return sugov_is_governor(policy);
+}
+
+bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask)
+{
+ unsigned int cpu;
+
+ /* Do not attempt EAS if schedutil is not being used. */
+ for_each_cpu(cpu, cpu_mask) {
+ if (!cpufreq_policy_is_good_for_eas(cpu)) {
+ pr_debug("rd %*pbl: schedutil is mandatory for EAS\n",
+ cpumask_pr_args(cpu_mask));
+ return false;
+ }
+ }
+
+ return true;
+}
+
module_param(off, int, 0444);
module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index d23a97ba6478..320a0af2266a 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -225,12 +225,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
}
/* Enable Enhanced PowerSaver */
- rdmsrl(MSR_IA32_MISC_ENABLE, val);
+ rdmsrq(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
- wrmsrl(MSR_IA32_MISC_ENABLE, val);
+ wrmsrq(MSR_IA32_MISC_ENABLE, val);
/* Can be locked at 0 */
- rdmsrl(MSR_IA32_MISC_ENABLE, val);
+ rdmsrq(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
pr_info("Can't enable Enhanced PowerSaver\n");
return -ENODEV;
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 36494b855e41..fc5a58088b35 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -21,7 +21,6 @@
#include <linux/cpufreq.h>
#include <asm/cpu_device_id.h>
-#include <asm/msr.h>
#include <linux/timex.h>
#include <linux/io.h>
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index ba9bf06f1c77..64587d318267 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -221,6 +221,7 @@ struct global_params {
* @sched_flags: Store scheduler flags for possible cross CPU update
* @hwp_boost_min: Last HWP boosted min performance
* @suspended: Whether or not the driver has been suspended.
+ * @pd_registered: Set when a perf domain is registered for this CPU.
* @hwp_notify_work: workqueue for HWP notifications.
*
* This structure stores per CPU instance data for all CPUs.
@@ -260,6 +261,9 @@ struct cpudata {
unsigned int sched_flags;
u32 hwp_boost_min;
bool suspended;
+#ifdef CONFIG_ENERGY_MODEL
+ bool pd_registered;
+#endif
struct delayed_work hwp_notify_work;
};
@@ -303,6 +307,7 @@ static bool hwp_is_hybrid;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
+#define INTEL_PSTATE_CORE_SCALING 100000
#define HYBRID_SCALING_FACTOR_ADL 78741
#define HYBRID_SCALING_FACTOR_MTL 80000
#define HYBRID_SCALING_FACTOR_LNL 86957
@@ -311,7 +316,7 @@ static int hybrid_scaling_factor;
static inline int core_get_scaling(void)
{
- return 100000;
+ return INTEL_PSTATE_CORE_SCALING;
}
#ifdef CONFIG_ACPI
@@ -601,7 +606,7 @@ static bool turbo_is_disabled(void)
if (!cpu_feature_enabled(X86_FEATURE_IDA))
return true;
- rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
+ rdmsrq(MSR_IA32_MISC_ENABLE, misc_en);
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
}
@@ -623,7 +628,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;
- ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ ret = rdmsrq_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
if (ret)
return (s16)ret;
@@ -640,7 +645,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
* MSR_HWP_REQUEST, so need to read and get EPP.
*/
if (!hwp_req_data) {
- epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
+ epp = rdmsrq_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
&hwp_req_data);
if (epp)
return epp;
@@ -662,12 +667,12 @@ static int intel_pstate_set_epb(int cpu, s16 pref)
if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;
- ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
if (ret)
return ret;
epb = (epb & ~0x0f) | pref;
- wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
+ wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
return 0;
}
@@ -765,7 +770,7 @@ static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
* function, so it cannot run in parallel with the update below.
*/
WRITE_ONCE(cpu->hwp_req_cached, value);
- ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ ret = wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
if (!ret)
cpu->epp_cached = epp;
@@ -919,7 +924,7 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
if (ratio <= 0) {
u64 cap;
- rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
+ rdmsrq_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
ratio = HWP_GUARANTEED_PERF(cap);
}
@@ -948,12 +953,124 @@ static struct cpudata *hybrid_max_perf_cpu __read_mostly;
*/
static DEFINE_MUTEX(hybrid_capacity_lock);
+#ifdef CONFIG_ENERGY_MODEL
+#define HYBRID_EM_STATE_COUNT 4
+
+static int hybrid_active_power(struct device *dev, unsigned long *power,
+ unsigned long *freq)
+{
+ /*
+ * Create "utilization bins" of 0-40%, 40%-60%, 60%-80%, and 80%-100%
+ * of the maximum capacity such that two CPUs of the same type will be
+ * regarded as equally attractive if the utilization of each of them
+ * falls into the same bin, which should prevent tasks from being
+ * migrated between them too often.
+ *
+ * For this purpose, return the "frequency" of 2 for the first
+ * performance level and otherwise leave the value set by the caller.
+ */
+ if (!*freq)
+ *freq = 2;
+
+ /* No power information. */
+ *power = EM_MAX_POWER;
+
+ return 0;
+}
+
+static int hybrid_get_cost(struct device *dev, unsigned long freq,
+ unsigned long *cost)
+{
+ struct pstate_data *pstate = &all_cpu_data[dev->id]->pstate;
+ struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(dev->id);
+
+ /*
+ * The smaller the perf-to-frequency scaling factor, the larger the IPC
+ * ratio between the given CPU and the least capable CPU in the system.
+ * Regard that IPC ratio as the primary cost component and assume that
+ * the scaling factors for different CPU types will differ by at least
+ * 5% and they will not be above INTEL_PSTATE_CORE_SCALING.
+ *
+ * Add the freq value to the cost, so that the cost of running on CPUs
+ * of the same type in different "utilization bins" is different.
+ */
+ *cost = div_u64(100ULL * INTEL_PSTATE_CORE_SCALING, pstate->scaling) + freq;
+ /*
+ * Increase the cost slightly for CPUs able to access L3 to avoid
+ * touching it in case some other CPUs of the same type can do the work
+ * without it.
+ */
+ if (cacheinfo) {
+ unsigned int i;
+
+ /* Check if L3 cache is there. */
+ for (i = 0; i < cacheinfo->num_leaves; i++) {
+ if (cacheinfo->info_list[i].level == 3) {
+ *cost += 2;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static bool hybrid_register_perf_domain(unsigned int cpu)
+{
+ static const struct em_data_callback cb
+ = EM_ADV_DATA_CB(hybrid_active_power, hybrid_get_cost);
+ struct cpudata *cpudata = all_cpu_data[cpu];
+ struct device *cpu_dev;
+
+ /*
+ * Registering EM perf domains without enabling asymmetric CPU capacity
+ * support is not really useful and one domain should not be registered
+ * more than once.
+ */
+ if (!hybrid_max_perf_cpu || cpudata->pd_registered)
+ return false;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return false;
+
+ if (em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
+ cpumask_of(cpu), false))
+ return false;
+
+ cpudata->pd_registered = true;
+
+ return true;
+}
+
+static void hybrid_register_all_perf_domains(void)
+{
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu)
+ hybrid_register_perf_domain(cpu);
+}
+
+static void hybrid_update_perf_domain(struct cpudata *cpu)
+{
+ if (cpu->pd_registered)
+ em_adjust_cpu_capacity(cpu->cpu);
+}
+#else /* !CONFIG_ENERGY_MODEL */
+static inline bool hybrid_register_perf_domain(unsigned int cpu) { return false; }
+static inline void hybrid_register_all_perf_domains(void) {}
+static inline void hybrid_update_perf_domain(struct cpudata *cpu) {}
+#endif /* CONFIG_ENERGY_MODEL */
+
static void hybrid_set_cpu_capacity(struct cpudata *cpu)
{
arch_set_cpu_capacity(cpu->cpu, cpu->capacity_perf,
hybrid_max_perf_cpu->capacity_perf,
cpu->capacity_perf,
cpu->pstate.max_pstate_physical);
+ hybrid_update_perf_domain(cpu);
+
+ topology_set_cpu_scale(cpu->cpu, arch_scale_cpu_capacity(cpu->cpu));
pr_debug("CPU%d: perf = %u, max. perf = %u, base perf = %d\n", cpu->cpu,
cpu->capacity_perf, hybrid_max_perf_cpu->capacity_perf,
@@ -1042,6 +1159,11 @@ static void hybrid_refresh_cpu_capacity_scaling(void)
guard(mutex)(&hybrid_capacity_lock);
__hybrid_refresh_cpu_capacity_scaling();
+ /*
+ * Perf domains are not registered before setting hybrid_max_perf_cpu,
+ * so register them all after setting up CPU capacity scaling.
+ */
+ hybrid_register_all_perf_domains();
}
static void hybrid_init_cpu_capacity_scaling(bool refresh)
@@ -1069,7 +1191,7 @@ static void hybrid_init_cpu_capacity_scaling(bool refresh)
hybrid_refresh_cpu_capacity_scaling();
/*
* Disabling ITMT causes sched domains to be rebuilt to disable asym
- * packing and enable asym capacity.
+ * packing and enable asym capacity and EAS.
*/
sched_clear_itmt_support();
}
@@ -1091,7 +1213,7 @@ static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
{
u64 cap;
- rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
+ rdmsrq_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
WRITE_ONCE(cpu->hwp_cap_cached, cap);
cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap);
cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap);
@@ -1147,6 +1269,14 @@ static void hybrid_update_capacity(struct cpudata *cpu)
}
hybrid_set_cpu_capacity(cpu);
+ /*
+ * If the CPU was offline to start with and it is going online for the
+ * first time, a perf domain needs to be registered for it if hybrid
+ * capacity scaling has been enabled already. In that case, sched
+ * domains need to be rebuilt to take the new perf domain into account.
+ */
+ if (hybrid_register_perf_domain(cpu->cpu))
+ em_rebuild_sched_domains();
unlock:
mutex_unlock(&hybrid_capacity_lock);
@@ -1165,7 +1295,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
min = max;
- rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
+ rdmsrq_on_cpu(cpu, MSR_HWP_REQUEST, &value);
value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min);
@@ -1212,7 +1342,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
}
skip_epp:
WRITE_ONCE(cpu_data->hwp_req_cached, value);
- wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata);
@@ -1259,7 +1389,7 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
if (boot_cpu_has(X86_FEATURE_HWP_EPP))
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
mutex_lock(&hybrid_capacity_lock);
@@ -1288,7 +1418,7 @@ static void set_power_ctl_ee_state(bool input)
u64 power_ctl;
mutex_lock(&intel_pstate_driver_lock);
- rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
if (input) {
power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
power_ctl_ee_state = POWER_CTL_EE_ENABLE;
@@ -1296,7 +1426,7 @@ static void set_power_ctl_ee_state(bool input)
power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
power_ctl_ee_state = POWER_CTL_EE_DISABLE;
}
- wrmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ wrmsrq(MSR_IA32_POWER_CTL, power_ctl);
mutex_unlock(&intel_pstate_driver_lock);
}
@@ -1305,7 +1435,7 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata);
static void intel_pstate_hwp_reenable(struct cpudata *cpu)
{
intel_pstate_hwp_enable(cpu);
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
}
static int intel_pstate_suspend(struct cpufreq_policy *policy)
@@ -1356,9 +1486,11 @@ static void intel_pstate_update_policies(void)
cpufreq_update_policy(cpu);
}
-static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
- struct cpufreq_policy *policy)
+static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy,
+ struct cpudata *cpudata)
{
+ guard(cpufreq_policy_write)(policy);
+
if (hwp_active)
intel_pstate_get_hwp_cap(cpudata);
@@ -1368,42 +1500,34 @@ static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
refresh_frequency_limits(policy);
}
-static void intel_pstate_update_limits(unsigned int cpu)
+static bool intel_pstate_update_max_freq(struct cpudata *cpudata)
{
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
- struct cpudata *cpudata;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
+ policy = cpufreq_cpu_get(cpudata->cpu);
if (!policy)
- return;
+ return false;
- cpudata = all_cpu_data[cpu];
+ __intel_pstate_update_max_freq(policy, cpudata);
- __intel_pstate_update_max_freq(cpudata, policy);
+ return true;
+}
- /* Prevent the driver from being unregistered now. */
- mutex_lock(&intel_pstate_driver_lock);
+static void intel_pstate_update_limits(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpudata = all_cpu_data[policy->cpu];
- cpufreq_cpu_release(policy);
+ __intel_pstate_update_max_freq(policy, cpudata);
hybrid_update_capacity(cpudata);
-
- mutex_unlock(&intel_pstate_driver_lock);
}
static void intel_pstate_update_limits_for_all(void)
{
int cpu;
- for_each_possible_cpu(cpu) {
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
-
- if (!policy)
- continue;
-
- __intel_pstate_update_max_freq(all_cpu_data[cpu], policy);
-
- cpufreq_cpu_release(policy);
- }
+ for_each_possible_cpu(cpu)
+ intel_pstate_update_max_freq(all_cpu_data[cpu]);
mutex_lock(&hybrid_capacity_lock);
@@ -1706,7 +1830,7 @@ static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribut
u64 power_ctl;
int enable;
- rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE));
return sprintf(buf, "%d\n", !enable);
}
@@ -1843,13 +1967,8 @@ static void intel_pstate_notify_work(struct work_struct *work)
{
struct cpudata *cpudata =
container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
- struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
-
- if (policy) {
- __intel_pstate_update_max_freq(cpudata, policy);
-
- cpufreq_cpu_release(policy);
+ if (intel_pstate_update_max_freq(cpudata)) {
/*
* The driver will not be unregistered while this function is
* running, so update the capacity without acquiring the driver
@@ -1858,7 +1977,7 @@ static void intel_pstate_notify_work(struct work_struct *work)
hybrid_update_capacity(cpudata);
}
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
@@ -1880,7 +1999,7 @@ void notify_hwp_interrupt(void)
if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS;
- rdmsrl_safe(MSR_HWP_STATUS, &value);
+ rdmsrq_safe(MSR_HWP_STATUS, &value);
if (!(value & status_mask))
return;
@@ -1897,7 +2016,7 @@ void notify_hwp_interrupt(void)
return;
ack_intr:
- wrmsrl_safe(MSR_HWP_STATUS, 0);
+ wrmsrq_safe(MSR_HWP_STATUS, 0);
raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);
}
@@ -1908,8 +2027,8 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY))
return;
- /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+ /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
raw_spin_lock_irq(&hwp_notify_lock);
cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
@@ -1936,9 +2055,9 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;
- /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask);
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
+ /* wrmsrq_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
}
@@ -1977,9 +2096,9 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt till we activate again */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
- wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
+ wrmsrq_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
intel_pstate_enable_hwp_interrupt(cpudata);
@@ -1993,7 +2112,7 @@ static int atom_get_min_pstate(int not_used)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_RATIOS, value);
+ rdmsrq(MSR_ATOM_CORE_RATIOS, value);
return (value >> 8) & 0x7F;
}
@@ -2001,7 +2120,7 @@ static int atom_get_max_pstate(int not_used)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_RATIOS, value);
+ rdmsrq(MSR_ATOM_CORE_RATIOS, value);
return (value >> 16) & 0x7F;
}
@@ -2009,7 +2128,7 @@ static int atom_get_turbo_pstate(int not_used)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
+ rdmsrq(MSR_ATOM_CORE_TURBO_RATIOS, value);
return value & 0x7F;
}
@@ -2044,7 +2163,7 @@ static int silvermont_get_scaling(void)
static int silvermont_freq_table[] = {
83300, 100000, 133300, 116700, 80000};
- rdmsrl(MSR_FSB_FREQ, value);
+ rdmsrq(MSR_FSB_FREQ, value);
i = value & 0x7;
WARN_ON(i > 4);
@@ -2060,7 +2179,7 @@ static int airmont_get_scaling(void)
83300, 100000, 133300, 116700, 80000,
93300, 90000, 88900, 87500};
- rdmsrl(MSR_FSB_FREQ, value);
+ rdmsrq(MSR_FSB_FREQ, value);
i = value & 0xF;
WARN_ON(i > 8);
@@ -2071,7 +2190,7 @@ static void atom_get_vid(struct cpudata *cpudata)
{
u64 value;
- rdmsrl(MSR_ATOM_CORE_VIDS, value);
+ rdmsrq(MSR_ATOM_CORE_VIDS, value);
cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
cpudata->vid.ratio = div_fp(
@@ -2079,7 +2198,7 @@ static void atom_get_vid(struct cpudata *cpudata)
int_tofp(cpudata->pstate.max_pstate -
cpudata->pstate.min_pstate));
- rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
+ rdmsrq(MSR_ATOM_CORE_TURBO_VIDS, value);
cpudata->vid.turbo = value & 0x7f;
}
@@ -2087,7 +2206,7 @@ static int core_get_min_pstate(int cpu)
{
u64 value;
- rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
+ rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
return (value >> 40) & 0xFF;
}
@@ -2095,7 +2214,7 @@ static int core_get_max_pstate_physical(int cpu)
{
u64 value;
- rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
+ rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
return (value >> 8) & 0xFF;
}
@@ -2109,13 +2228,13 @@ static int core_get_tdp_ratio(int cpu, u64 plat_info)
int err;
/* Get the TDP level (0, 1, 2) to get ratios */
- err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
+ err = rdmsrq_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
if (err)
return err;
/* TDP MSR are continuous starting at 0x648 */
tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
- err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
+ err = rdmsrq_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
if (err)
return err;
@@ -2140,7 +2259,7 @@ static int core_get_max_pstate(int cpu)
int tdp_ratio;
int err;
- rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
+ rdmsrq_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
max_pstate = (plat_info >> 8) & 0xFF;
tdp_ratio = core_get_tdp_ratio(cpu, plat_info);
@@ -2152,7 +2271,7 @@ static int core_get_max_pstate(int cpu)
return tdp_ratio;
}
- err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
+ err = rdmsrq_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
if (!err) {
int tar_levels;
@@ -2172,7 +2291,7 @@ static int core_get_turbo_pstate(int cpu)
u64 value;
int nont, ret;
- rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
+ rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
nont = core_get_max_pstate(cpu);
ret = (value) & 255;
if (ret <= nont)
@@ -2201,7 +2320,7 @@ static int knl_get_turbo_pstate(int cpu)
u64 value;
int nont, ret;
- rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
+ rdmsrq_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
nont = core_get_max_pstate(cpu);
ret = (((value) >> 8) & 0xFF);
if (ret <= nont)
@@ -2247,7 +2366,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
* the CPU being updated, so force the register update to run on the
* right CPU.
*/
- wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+ wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, pstate));
}
@@ -2354,7 +2473,7 @@ static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
return;
hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
- wrmsrl(MSR_HWP_REQUEST, hwp_req);
+ wrmsrq(MSR_HWP_REQUEST, hwp_req);
cpu->last_update = cpu->sample.time;
}
@@ -2367,7 +2486,7 @@ static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
expired = time_after64(cpu->sample.time, cpu->last_update +
hwp_boost_hold_time_ns);
if (expired) {
- wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
+ wrmsrq(MSR_HWP_REQUEST, cpu->hwp_req_cached);
cpu->hwp_boost_min = 0;
}
}
@@ -2428,8 +2547,8 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
u64 tsc;
local_irq_save(flags);
- rdmsrl(MSR_IA32_APERF, aperf);
- rdmsrl(MSR_IA32_MPERF, mperf);
+ rdmsrq(MSR_IA32_APERF, aperf);
+ rdmsrq(MSR_IA32_MPERF, mperf);
tsc = rdtsc();
if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
local_irq_restore(flags);
@@ -2523,7 +2642,7 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
return;
cpu->pstate.current_pstate = pstate;
- wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
+ wrmsrq(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
}
static void intel_pstate_adjust_pstate(struct cpudata *cpu)
@@ -3103,19 +3222,19 @@ static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
WRITE_ONCE(cpu->hwp_req_cached, value);
if (fast_switch)
- wrmsrl(MSR_HWP_REQUEST, value);
+ wrmsrq(MSR_HWP_REQUEST, value);
else
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
}
static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
u32 target_pstate, bool fast_switch)
{
if (fast_switch)
- wrmsrl(MSR_IA32_PERF_CTL,
+ wrmsrq(MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
else
- wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+ wrmsrq_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
}
@@ -3259,7 +3378,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
intel_pstate_get_hwp_cap(cpu);
- rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
+ rdmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
WRITE_ONCE(cpu->hwp_req_cached, value);
cpu->epp_cached = intel_pstate_get_epp(cpu, value);
@@ -3326,7 +3445,7 @@ static int intel_cpufreq_suspend(struct cpufreq_policy *policy)
* written by it may not be suitable.
*/
value &= ~HWP_DESIRED_PERF(~0L);
- wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ wrmsrq_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
WRITE_ONCE(cpu->hwp_req_cached, value);
}
@@ -3576,7 +3695,7 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
id = x86_match_cpu(intel_pstate_cpu_oob_ids);
if (id) {
- rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
+ rdmsrq(MSR_MISC_PWR_MGMT, misc_pwr);
if (misc_pwr & BITMASK_OOB) {
pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
@@ -3632,7 +3751,7 @@ static bool intel_pstate_hwp_is_enabled(void)
{
u64 value;
- rdmsrl(MSR_PM_ENABLE, value);
+ rdmsrq(MSR_PM_ENABLE, value);
return !!(value & 0x1);
}
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 68ccd73c8129..ba0e08c8486a 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -136,7 +136,7 @@ static void do_longhaul1(unsigned int mults_index)
{
union msr_bcr2 bcr2;
- rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ rdmsrq(MSR_VIA_BCR2, bcr2.val);
/* Enable software clock multiplier */
bcr2.bits.ESOFTBF = 1;
bcr2.bits.CLOCKMUL = mults_index & 0xff;
@@ -144,16 +144,16 @@ static void do_longhaul1(unsigned int mults_index)
/* Sync to timer tick */
safe_halt();
/* Change frequency on next halt or sleep */
- wrmsrl(MSR_VIA_BCR2, bcr2.val);
+ wrmsrq(MSR_VIA_BCR2, bcr2.val);
/* Invoke transition */
ACPI_FLUSH_CPU_CACHE();
halt();
/* Disable software clock multiplier */
local_irq_disable();
- rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ rdmsrq(MSR_VIA_BCR2, bcr2.val);
bcr2.bits.ESOFTBF = 0;
- wrmsrl(MSR_VIA_BCR2, bcr2.val);
+ wrmsrq(MSR_VIA_BCR2, bcr2.val);
}
/* For processor with Longhaul MSR */
@@ -164,7 +164,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
union msr_longhaul longhaul;
u32 t;
- rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ rdmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Setup new frequency */
if (!revid_errata)
longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
@@ -180,7 +180,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
/* Raise voltage if necessary */
if (can_scale_voltage && dir) {
longhaul.bits.EnableSoftVID = 1;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Change voltage */
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
@@ -194,12 +194,12 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
longhaul.bits.EnableSoftVID = 0;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
}
/* Change frequency on next halt or sleep */
longhaul.bits.EnableSoftBusRatio = 1;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
halt();
@@ -212,12 +212,12 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
}
/* Disable bus ratio bit */
longhaul.bits.EnableSoftBusRatio = 0;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Reduce voltage if necessary */
if (can_scale_voltage && !dir) {
longhaul.bits.EnableSoftVID = 1;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
/* Change voltage */
if (!cx_address) {
ACPI_FLUSH_CPU_CACHE();
@@ -231,7 +231,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
longhaul.bits.EnableSoftVID = 0;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ wrmsrq(MSR_VIA_LONGHAUL, longhaul.val);
}
}
@@ -534,7 +534,7 @@ static void longhaul_setup_voltagescaling(void)
unsigned int j, speed, pos, kHz_step, numvscales;
int min_vid_speed;
- rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ rdmsrq(MSR_VIA_LONGHAUL, longhaul.val);
if (!(longhaul.bits.RevisionID & 1)) {
pr_info("Voltage scaling not supported by CPU\n");
return;
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index fb2197dc170f..31039330a3ba 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -219,13 +219,13 @@ static void change_FID(int fid)
{
union msr_fidvidctl fidvidctl;
- rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
if (fidvidctl.bits.FID != fid) {
fidvidctl.bits.SGTC = latency;
fidvidctl.bits.FID = fid;
fidvidctl.bits.VIDC = 0;
fidvidctl.bits.FIDC = 1;
- wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
}
}
@@ -234,13 +234,13 @@ static void change_VID(int vid)
{
union msr_fidvidctl fidvidctl;
- rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ rdmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
if (fidvidctl.bits.VID != vid) {
fidvidctl.bits.SGTC = latency;
fidvidctl.bits.VID = vid;
fidvidctl.bits.FIDC = 0;
fidvidctl.bits.VIDC = 1;
- wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ wrmsrq(MSR_K7_FID_VID_CTL, fidvidctl.val);
}
}
@@ -260,7 +260,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
fid = powernow_table[index].driver_data & 0xFF;
vid = (powernow_table[index].driver_data & 0xFF00) >> 8;
- rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
cfid = fidvidstatus.bits.CFID;
freqs.old = fsb * fid_codes[cfid] / 10;
@@ -557,7 +557,7 @@ static unsigned int powernow_get(unsigned int cpu)
if (cpu)
return 0;
- rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
cfid = fidvidstatus.bits.CFID;
return fsb * fid_codes[cfid] / 10;
@@ -598,7 +598,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
if (policy->cpu != 0)
return -ENODEV;
- rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ rdmsrq(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
recalibrate_cpu_khz();
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 4e3ba6e68c32..f7512b4e923e 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -482,7 +482,7 @@ static void check_supported_cpu(void *_rc)
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
if ((edx & P_STATE_TRANSITION_CAPABLE)
!= P_STATE_TRANSITION_CAPABLE) {
- pr_info("Power state transitions not supported\n");
+ pr_info_once("Power state transitions not supported\n");
return;
}
*rc = 0;
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index afe5abf89d33..a8943e2a93be 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -667,7 +667,8 @@ static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
*/
static void gpstate_timer_handler(struct timer_list *t)
{
- struct global_pstate_info *gpstates = from_timer(gpstates, t, timer);
+ struct global_pstate_info *gpstates = timer_container_of(gpstates, t,
+ timer);
struct cpufreq_policy *policy = gpstates->policy;
int gpstate_idx, lpstate_idx;
unsigned long val;
diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs
new file mode 100644
index 000000000000..43c87d0259b6
--- /dev/null
+++ b/drivers/cpufreq/rcpufreq_dt.rs
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust based implementation of the cpufreq-dt driver.
+
+use kernel::{
+ c_str,
+ clk::Clk,
+ cpu, cpufreq,
+ cpumask::CpumaskVar,
+ device::{Core, Device},
+ error::code::*,
+ fmt,
+ macros::vtable,
+ module_platform_driver, of, opp, platform,
+ prelude::*,
+ str::CString,
+ sync::Arc,
+};
+
+/// Finds exact supply name from the OF node.
+fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> {
+ let prop_name = CString::try_from_fmt(fmt!("{}-supply", name)).ok()?;
+ dev.property_present(&prop_name)
+ .then(|| CString::try_from_fmt(fmt!("{name}")).ok())
+ .flatten()
+}
+
+/// Finds supply name for the CPU from DT.
+fn find_supply_names(dev: &Device, cpu: cpu::CpuId) -> Option<KVec<CString>> {
+ // Try "cpu0" for older DTs, fallback to "cpu".
+ let name = (cpu.as_u32() == 0)
+ .then(|| find_supply_name_exact(dev, "cpu0"))
+ .flatten()
+ .or_else(|| find_supply_name_exact(dev, "cpu"))?;
+
+ let mut list = KVec::with_capacity(1, GFP_KERNEL).ok()?;
+ list.push(name, GFP_KERNEL).ok()?;
+
+ Some(list)
+}
+
+/// Represents the cpufreq dt device.
+struct CPUFreqDTDevice {
+ opp_table: opp::Table,
+ freq_table: opp::FreqTable,
+ _mask: CpumaskVar,
+ _token: Option<opp::ConfigToken>,
+ _clk: Clk,
+}
+
+#[derive(Default)]
+struct CPUFreqDTDriver;
+
+#[vtable]
+impl opp::ConfigOps for CPUFreqDTDriver {}
+
+#[vtable]
+impl cpufreq::Driver for CPUFreqDTDriver {
+ const NAME: &'static CStr = c_str!("cpufreq-dt");
+ const FLAGS: u16 = cpufreq::flags::NEED_INITIAL_FREQ_CHECK | cpufreq::flags::IS_COOLING_DEV;
+ const BOOST_ENABLED: bool = true;
+
+ type PData = Arc<CPUFreqDTDevice>;
+
+ fn init(policy: &mut cpufreq::Policy) -> Result<Self::PData> {
+ let cpu = policy.cpu();
+ // SAFETY: The CPU device is only used during init; it won't get hot-unplugged. The cpufreq
+ // core registers with CPU notifiers and the cpufreq core/driver won't use the CPU device,
+ // once the CPU is hot-unplugged.
+ let dev = unsafe { cpu::from_cpu(cpu)? };
+ let mut mask = CpumaskVar::new_zero(GFP_KERNEL)?;
+
+ mask.set(cpu);
+
+ let token = find_supply_names(dev, cpu)
+ .map(|names| {
+ opp::Config::<Self>::new()
+ .set_regulator_names(names)?
+ .set(dev)
+ })
+ .transpose()?;
+
+ // Get OPP-sharing information from "operating-points-v2" bindings.
+ let fallback = match opp::Table::of_sharing_cpus(dev, &mut mask) {
+ Ok(()) => false,
+ Err(e) if e == ENOENT => {
+ // "operating-points-v2" not supported. If the platform hasn't
+ // set sharing CPUs, fallback to all CPUs share the `Policy`
+ // for backward compatibility.
+ opp::Table::sharing_cpus(dev, &mut mask).is_err()
+ }
+ Err(e) => return Err(e),
+ };
+
+ // Initialize OPP tables for all policy cpus.
+ //
+ // For platforms not using "operating-points-v2" bindings, we do this
+ // before updating policy cpus. Otherwise, we will end up creating
+ // duplicate OPPs for the CPUs.
+ //
+ // OPPs might be populated at runtime, don't fail for error here unless
+ // it is -EPROBE_DEFER.
+ let mut opp_table = match opp::Table::from_of_cpumask(dev, &mut mask) {
+ Ok(table) => table,
+ Err(e) => {
+ if e == EPROBE_DEFER {
+ return Err(e);
+ }
+
+ // The table is added dynamically ?
+ opp::Table::from_dev(dev)?
+ }
+ };
+
+ // The OPP table must be initialized, statically or dynamically, by this point.
+ opp_table.opp_count()?;
+
+ // Set sharing cpus for fallback scenario.
+ if fallback {
+ mask.setall();
+ opp_table.set_sharing_cpus(&mut mask)?;
+ }
+
+ let mut transition_latency = opp_table.max_transition_latency_ns() as u32;
+ if transition_latency == 0 {
+ transition_latency = cpufreq::ETERNAL_LATENCY_NS;
+ }
+
+ policy
+ .set_dvfs_possible_from_any_cpu(true)
+ .set_suspend_freq(opp_table.suspend_freq())
+ .set_transition_latency_ns(transition_latency);
+
+ let freq_table = opp_table.cpufreq_table()?;
+ // SAFETY: The `freq_table` is not dropped while it is getting used by the C code.
+ unsafe { policy.set_freq_table(&freq_table) };
+
+ // SAFETY: The returned `clk` is not dropped while it is getting used by the C code.
+ let clk = unsafe { policy.set_clk(dev, None)? };
+
+ mask.copy(policy.cpus());
+
+ Ok(Arc::new(
+ CPUFreqDTDevice {
+ opp_table,
+ freq_table,
+ _mask: mask,
+ _token: token,
+ _clk: clk,
+ },
+ GFP_KERNEL,
+ )?)
+ }
+
+ fn exit(_policy: &mut cpufreq::Policy, _data: Option<Self::PData>) -> Result {
+ Ok(())
+ }
+
+ fn online(_policy: &mut cpufreq::Policy) -> Result {
+ // We did light-weight tear down earlier, nothing to do here.
+ Ok(())
+ }
+
+ fn offline(_policy: &mut cpufreq::Policy) -> Result {
+ // Preserve policy->data and don't free resources on light-weight
+ // tear down.
+ Ok(())
+ }
+
+ fn suspend(policy: &mut cpufreq::Policy) -> Result {
+ policy.generic_suspend()
+ }
+
+ fn verify(data: &mut cpufreq::PolicyData) -> Result {
+ data.generic_verify()
+ }
+
+ fn target_index(policy: &mut cpufreq::Policy, index: cpufreq::TableIndex) -> Result {
+ let Some(data) = policy.data::<Self::PData>() else {
+ return Err(ENOENT);
+ };
+
+ let freq = data.freq_table.freq(index)?;
+ data.opp_table.set_rate(freq)
+ }
+
+ fn get(policy: &mut cpufreq::Policy) -> Result<u32> {
+ policy.generic_get()
+ }
+
+ fn set_boost(_policy: &mut cpufreq::Policy, _state: i32) -> Result {
+ Ok(())
+ }
+
+ fn register_em(policy: &mut cpufreq::Policy) {
+ policy.register_em_opp()
+ }
+}
+
+kernel::of_device_table!(
+ OF_TABLE,
+ MODULE_OF_TABLE,
+ <CPUFreqDTDriver as platform::Driver>::IdInfo,
+ [(of::DeviceId::new(c_str!("operating-points-v2")), ())]
+);
+
+impl platform::Driver for CPUFreqDTDriver {
+ type IdInfo = ();
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+
+ fn probe(
+ pdev: &platform::Device<Core>,
+ _id_info: Option<&Self::IdInfo>,
+ ) -> Result<Pin<KBox<Self>>> {
+ cpufreq::Registration::<CPUFreqDTDriver>::new_foreign_owned(pdev.as_ref())?;
+ Ok(KBox::new(Self {}, GFP_KERNEL)?.into())
+ }
+}
+
+module_platform_driver! {
+ type: CPUFreqDTDriver,
+ name: "cpufreq-dt",
+ author: "Viresh Kumar <viresh.kumar@linaro.org>",
+ description: "Generic CPUFreq DT driver",
+ license: "GPL v2",
+}
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index 103d2519dff7..b360f03a116f 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -21,7 +21,6 @@
#include <linux/io.h>
#include <asm/cpu_device_id.h>
-#include <asm/msr.h>
#define MMCR_BASE 0xfffef000 /* The default base address */
#define OFFS_CPUCTL 0x2 /* CPU Control Register */
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 944e899eb1be..ef078426bfd5 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -393,6 +393,40 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
.set_boost = cpufreq_boost_set_sw,
};
+static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
+{
+ struct device_node *scmi_np = dev_of_node(scmi_dev);
+ struct device_node *cpu_np, *np;
+ struct device *cpu_dev;
+ int cpu, idx;
+
+ if (!scmi_np)
+ return false;
+
+ for_each_possible_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ continue;
+
+ cpu_np = dev_of_node(cpu_dev);
+
+ np = of_parse_phandle(cpu_np, "clocks", 0);
+ of_node_put(np);
+
+ if (np == scmi_np)
+ return true;
+
+ idx = of_property_match_string(cpu_np, "power-domain-names", "perf");
+ np = of_parse_phandle(cpu_np, "power-domains", idx);
+ of_node_put(np);
+
+ if (np == scmi_np)
+ return true;
+ }
+
+ return false;
+}
+
static int scmi_cpufreq_probe(struct scmi_device *sdev)
{
int ret;
@@ -401,7 +435,7 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
handle = sdev->handle;
- if (!handle)
+ if (!handle || !scmi_dev_used_by_cpus(dev))
return -ENODEV;
scmi_cpufreq_driver.driver_data = sdev;
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index 5fb5228f6bf1..2041f59116ce 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -43,7 +43,7 @@ static int psci_pd_power_off(struct generic_pm_domain *pd)
/* OSI mode is enabled, set the corresponding domain state. */
pd_state = state->data;
- psci_set_domain_state(*pd_state);
+ psci_set_domain_state(pd, pd->state_idx, *pd_state);
return 0;
}
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index b46a83f5ffe4..4e1ba35deda9 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -16,7 +16,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/psci.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -36,19 +36,30 @@ struct psci_cpuidle_data {
struct device *dev;
};
+struct psci_cpuidle_domain_state {
+ struct generic_pm_domain *pd;
+ unsigned int state_idx;
+ u32 state;
+};
+
static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
-static DEFINE_PER_CPU(u32, domain_state);
+static DEFINE_PER_CPU(struct psci_cpuidle_domain_state, psci_domain_state);
static bool psci_cpuidle_use_syscore;
static bool psci_cpuidle_use_cpuhp;
-void psci_set_domain_state(u32 state)
+void psci_set_domain_state(struct generic_pm_domain *pd, unsigned int state_idx,
+ u32 state)
{
- __this_cpu_write(domain_state, state);
+ struct psci_cpuidle_domain_state *ds = this_cpu_ptr(&psci_domain_state);
+
+ ds->pd = pd;
+ ds->state_idx = state_idx;
+ ds->state = state;
}
-static inline u32 psci_get_domain_state(void)
+static inline void psci_clear_domain_state(void)
{
- return __this_cpu_read(domain_state);
+ __this_cpu_write(psci_domain_state.state, 0);
}
static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
@@ -58,7 +69,8 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data);
u32 *states = data->psci_states;
struct device *pd_dev = data->dev;
- u32 state;
+ struct psci_cpuidle_domain_state *ds;
+ u32 state = states[idx];
int ret;
ret = cpu_pm_enter();
@@ -71,9 +83,9 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
else
pm_runtime_put_sync_suspend(pd_dev);
- state = psci_get_domain_state();
- if (!state)
- state = states[idx];
+ ds = this_cpu_ptr(&psci_domain_state);
+ if (ds->state)
+ state = ds->state;
trace_psci_domain_idle_enter(dev->cpu, state, s2idle);
ret = psci_cpu_suspend_enter(state) ? -1 : idx;
@@ -86,8 +98,12 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
cpu_pm_exit();
+ /* Correct domain-idlestate statistics if we failed to enter. */
+ if (ret == -1 && ds->state)
+ pm_genpd_inc_rejected(ds->pd, ds->state_idx);
+
/* Clear the domain state to start fresh when back from idle. */
- psci_set_domain_state(0);
+ psci_clear_domain_state();
return ret;
}
@@ -121,7 +137,7 @@ static int psci_idle_cpuhp_down(unsigned int cpu)
if (pd_dev) {
pm_runtime_put_sync(pd_dev);
/* Clear domain state to start fresh at next online. */
- psci_set_domain_state(0);
+ psci_clear_domain_state();
}
return 0;
@@ -147,7 +163,7 @@ static void psci_idle_syscore_switch(bool suspend)
/* Clear domain state to re-start fresh. */
if (!cleared) {
- psci_set_domain_state(0);
+ psci_clear_domain_state();
cleared = true;
}
}
@@ -407,14 +423,14 @@ deinit:
* to register cpuidle driver then rollback to cancel all CPUs
* registration.
*/
-static int psci_cpuidle_probe(struct platform_device *pdev)
+static int psci_cpuidle_probe(struct faux_device *fdev)
{
int cpu, ret;
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
for_each_present_cpu(cpu) {
- ret = psci_idle_init_cpu(&pdev->dev, cpu);
+ ret = psci_idle_init_cpu(&fdev->dev, cpu);
if (ret)
goto out_fail;
}
@@ -434,26 +450,36 @@ out_fail:
return ret;
}
-static struct platform_driver psci_cpuidle_driver = {
+static struct faux_device_ops psci_cpuidle_ops = {
.probe = psci_cpuidle_probe,
- .driver = {
- .name = "psci-cpuidle",
- },
};
+static bool __init dt_idle_state_present(void)
+{
+ struct device_node *cpu_node __free(device_node) =
+ of_cpu_device_node_get(cpumask_first(cpu_possible_mask));
+ if (!cpu_node)
+ return false;
+
+ struct device_node *state_node __free(device_node) =
+ of_get_cpu_state_node(cpu_node, 0);
+ if (!state_node)
+ return false;
+
+ return !!of_match_node(psci_idle_state_match, state_node);
+}
+
static int __init psci_idle_init(void)
{
- struct platform_device *pdev;
- int ret;
+ struct faux_device *fdev;
- ret = platform_driver_register(&psci_cpuidle_driver);
- if (ret)
- return ret;
+ if (!dt_idle_state_present())
+ return 0;
- pdev = platform_device_register_simple("psci-cpuidle", -1, NULL, 0);
- if (IS_ERR(pdev)) {
- platform_driver_unregister(&psci_cpuidle_driver);
- return PTR_ERR(pdev);
+ fdev = faux_device_create("psci-cpuidle", NULL, &psci_cpuidle_ops);
+ if (!fdev) {
+ pr_err("Failed to create psci-cpuidle device\n");
+ return -ENODEV;
}
return 0;
diff --git a/drivers/cpuidle/cpuidle-psci.h b/drivers/cpuidle/cpuidle-psci.h
index ef004ec7a7c5..d29cbd796cd5 100644
--- a/drivers/cpuidle/cpuidle-psci.h
+++ b/drivers/cpuidle/cpuidle-psci.h
@@ -4,8 +4,10 @@
#define __CPUIDLE_PSCI_H
struct device_node;
+struct generic_pm_domain;
-void psci_set_domain_state(u32 state);
+void psci_set_domain_state(struct generic_pm_domain *pd, unsigned int state_idx,
+ u32 state);
int psci_dt_parse_state_node(struct device_node *np, u32 *state);
#endif /* __CPUIDLE_PSCI_H */
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 39aa0aea61c6..52d5d26fc7c6 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -255,7 +255,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/
data->next_timer_ns = KTIME_MAX;
delta_tick = TICK_NSEC / 2;
- data->bucket = which_bucket(KTIME_MAX);
+ data->bucket = BUCKETS - 1;
}
if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index 8fe5e1b47ef9..bfa55c1eab5b 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -19,7 +19,7 @@
*
* Of course, non-timer wakeup sources are more important in some use cases,
* but even then it is generally unnecessary to consider idle duration values
- * greater than the time time till the next timer event, referred as the sleep
+ * greater than the time till the next timer event, referred as the sleep
* length in what follows, because the closest timer will ultimately wake up the
* CPU anyway unless it is woken up earlier.
*
@@ -311,7 +311,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
struct cpuidle_state *s = &drv->states[i];
/*
- * Update the sums of idle state mertics for all of the states
+ * Update the sums of idle state metrics for all of the states
* shallower than the current one.
*/
intercept_sum += prev_bin->intercepts;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 47082782008a..9f8a3a5bed7e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -180,6 +180,7 @@ config CRYPTO_PAES_S390
depends on PKEY
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
+ select CRYPTO_ENGINE
help
This is the s390 hardware accelerated implementation of the
AES cipher algorithms for use with protected key.
@@ -530,13 +531,6 @@ source "drivers/crypto/cavium/nitrox/Kconfig"
source "drivers/crypto/marvell/Kconfig"
source "drivers/crypto/intel/Kconfig"
-config CRYPTO_DEV_CAVIUM_ZIP
- tristate "Cavium ZIP driver"
- depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
- help
- Select this option if you want to enable compression/decompression
- acceleration on Cavium's ARM based SoCs
-
config CRYPTO_DEV_QCE
tristate "Qualcomm crypto engine accelerator"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c97f0ebc55ec..22eadcc8f4a2 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -8,12 +8,9 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_I2C) += atmel-i2c.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA204A) += atmel-sha204a.o
-obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
-obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
-obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
@@ -50,3 +47,4 @@ obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-y += intel/
obj-y += starfive/
+obj-y += cavium/
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 19b7fb4a93e8..f9cf00d690e2 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -33,22 +33,30 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG ||
sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) {
- algt->stat_fb_maxsg++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_maxsg++;
+
return true;
}
if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) {
- algt->stat_fb_leniv++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_leniv++;
+
return true;
}
if (areq->cryptlen == 0) {
- algt->stat_fb_len0++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_len0++;
+
return true;
}
if (areq->cryptlen % 16) {
- algt->stat_fb_mod16++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_mod16++;
+
return true;
}
@@ -56,12 +64,16 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
sg = areq->src;
while (sg) {
if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
- algt->stat_fb_srcali++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srcali++;
+
return true;
}
todo = min(len, sg->length);
if (todo % 4) {
- algt->stat_fb_srclen++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srclen++;
+
return true;
}
len -= todo;
@@ -72,12 +84,16 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
sg = areq->dst;
while (sg) {
if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
- algt->stat_fb_dstali++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_dstali++;
+
return true;
}
todo = min(len, sg->length);
if (todo % 4) {
- algt->stat_fb_dstlen++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_dstlen++;
+
return true;
}
len -= todo;
@@ -100,9 +116,7 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
algt = container_of(alg, struct sun8i_ce_alg_template,
alg.skcipher.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
algt->stat_fb++;
-#endif
}
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
@@ -146,9 +160,8 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
op->keylen);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_req++;
-#endif
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_req++;
flow = rctx->flow;
@@ -275,13 +288,16 @@ theend_sgs:
} else {
if (nr_sgs > 0)
dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
+
+ if (nr_sgd > 0)
+ dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
}
theend_iv:
if (areq->iv && ivsize > 0) {
- if (rctx->addr_iv)
+ if (!dma_mapping_error(ce->dev, rctx->addr_iv))
dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
+
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
memcpy(areq->iv, chan->backup_iv, ivsize);
@@ -434,17 +450,17 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
crypto_skcipher_set_reqsize(sktfm, sizeof(struct sun8i_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm));
- memcpy(algt->fbname,
- crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
- CRYPTO_MAX_ALG_NAME);
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ memcpy(algt->fbname,
+ crypto_skcipher_driver_name(op->fallback_tfm),
+ CRYPTO_MAX_ALG_NAME);
- err = pm_runtime_get_sync(op->ce->dev);
+ err = pm_runtime_resume_and_get(op->ce->dev);
if (err < 0)
goto error_pm;
return 0;
error_pm:
- pm_runtime_put_noidle(op->ce->dev);
crypto_free_skcipher(op->fallback_tfm);
return err;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index ec1ffda9ea32..658f520cee0c 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -832,13 +832,12 @@ static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce)
err = pm_runtime_set_suspended(ce->dev);
if (err)
return err;
- pm_runtime_enable(ce->dev);
- return err;
-}
-static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce)
-{
- pm_runtime_disable(ce->dev);
+ err = devm_pm_runtime_enable(ce->dev);
+ if (err)
+ return err;
+
+ return 0;
}
static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce)
@@ -1041,7 +1040,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
"sun8i-ce-ns", ce);
if (err) {
dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err);
- goto error_irq;
+ goto error_pm;
}
err = sun8i_ce_register_algs(ce);
@@ -1082,8 +1081,6 @@ static int sun8i_ce_probe(struct platform_device *pdev)
return 0;
error_alg:
sun8i_ce_unregister_algs(ce);
-error_irq:
- sun8i_ce_pm_exit(ce);
error_pm:
sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
return err;
@@ -1104,8 +1101,6 @@ static void sun8i_ce_remove(struct platform_device *pdev)
#endif
sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
-
- sun8i_ce_pm_exit(ce);
}
static const struct of_device_id sun8i_ce_crypto_of_match_table[] = {
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 6072dd9f390b..bef44f350167 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -23,6 +23,18 @@
#include <linux/string.h>
#include "sun8i-ce.h"
+static void sun8i_ce_hash_stat_fb_inc(struct crypto_ahash *tfm)
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+ struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+
+ algt = container_of(alg, struct sun8i_ce_alg_template,
+ alg.hash.base);
+ algt->stat_fb++;
+ }
+}
+
int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
{
struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
@@ -48,15 +60,16 @@ int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
sizeof(struct sun8i_ce_hash_reqctx) +
crypto_ahash_reqsize(op->fallback_tfm));
- memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
- CRYPTO_MAX_ALG_NAME);
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ memcpy(algt->fbname,
+ crypto_ahash_driver_name(op->fallback_tfm),
+ CRYPTO_MAX_ALG_NAME);
- err = pm_runtime_get_sync(op->ce->dev);
+ err = pm_runtime_resume_and_get(op->ce->dev);
if (err < 0)
goto error_pm;
return 0;
error_pm:
- pm_runtime_put_noidle(op->ce->dev);
crypto_free_ahash(op->fallback_tfm);
return err;
}
@@ -78,7 +91,9 @@ int sun8i_ce_hash_init(struct ahash_request *areq)
memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx));
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -90,7 +105,9 @@ int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
@@ -102,7 +119,9 @@ int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -113,21 +132,13 @@ int sun8i_ce_hash_final(struct ahash_request *areq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = areq->result;
-
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ sun8i_ce_hash_stat_fb_inc(tfm);
- algt = container_of(alg, struct sun8i_ce_alg_template,
- alg.hash.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_fb++;
-#endif
- }
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -139,10 +150,10 @@ int sun8i_ce_hash_update(struct ahash_request *areq)
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -153,24 +164,14 @@ int sun8i_ce_hash_finup(struct ahash_request *areq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ sun8i_ce_hash_stat_fb_inc(tfm);
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
-
- algt = container_of(alg, struct sun8i_ce_alg_template,
- alg.hash.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_fb++;
-#endif
- }
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -181,24 +182,14 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
-
- if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
- struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ sun8i_ce_hash_stat_fb_inc(tfm);
- algt = container_of(alg, struct sun8i_ce_alg_template,
- alg.hash.base);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_fb++;
-#endif
- }
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -213,22 +204,30 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
if (areq->nbytes == 0) {
- algt->stat_fb_len0++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_len0++;
+
return true;
}
/* we need to reserve one SG for padding one */
if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) {
- algt->stat_fb_maxsg++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_maxsg++;
+
return true;
}
sg = areq->src;
while (sg) {
if (sg->length % 4) {
- algt->stat_fb_srclen++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srclen++;
+
return true;
}
if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
- algt->stat_fb_srcali++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_fb_srcali++;
+
return true;
}
sg = sg_next(sg);
@@ -244,21 +243,11 @@ int sun8i_ce_hash_digest(struct ahash_request *areq)
struct sun8i_ce_alg_template *algt;
struct sun8i_ce_dev *ce;
struct crypto_engine *engine;
- struct scatterlist *sg;
- int nr_sgs, e, i;
+ int e;
if (sun8i_ce_hash_need_fallback(areq))
return sun8i_ce_hash_digest_fb(areq);
- nr_sgs = sg_nents_for_len(areq->src, areq->nbytes);
- if (nr_sgs > MAX_SG - 1)
- return sun8i_ce_hash_digest_fb(areq);
-
- for_each_sg(areq->src, sg, nr_sgs, i) {
- if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
- return sun8i_ce_hash_digest_fb(areq);
- }
-
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
@@ -343,9 +332,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
u32 common;
u64 byte_count;
__le32 *bf;
- void *buf = NULL;
+ void *buf, *result;
int j, i, todo;
- void *result = NULL;
u64 bs;
int digestsize;
dma_addr_t addr_res, addr_pad;
@@ -365,22 +353,22 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA);
if (!buf) {
err = -ENOMEM;
- goto theend;
+ goto err_out;
}
bf = (__le32 *)buf;
result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
if (!result) {
err = -ENOMEM;
- goto theend;
+ goto err_free_buf;
}
flow = rctx->flow;
chan = &ce->chanlist[flow];
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt->stat_req++;
-#endif
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
+ algt->stat_req++;
+
dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
cet = chan->tl;
@@ -398,7 +386,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
- goto theend;
+ goto err_free_result;
}
len = areq->nbytes;
@@ -411,7 +399,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (len > 0) {
dev_err(ce->dev, "remaining len %d\n", len);
err = -EINVAL;
- goto theend;
+ goto err_unmap_src;
}
addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res);
@@ -419,7 +407,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (dma_mapping_error(ce->dev, addr_res)) {
dev_err(ce->dev, "DMA map dest\n");
err = -EINVAL;
- goto theend;
+ goto err_unmap_src;
}
byte_count = areq->nbytes;
@@ -441,7 +429,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
}
if (!j) {
err = -EINVAL;
- goto theend;
+ goto err_unmap_result;
}
addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
@@ -450,7 +438,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (dma_mapping_error(ce->dev, addr_pad)) {
dev_err(ce->dev, "DMA error on padding SG\n");
err = -EINVAL;
- goto theend;
+ goto err_unmap_result;
}
if (ce->variant->hash_t_dlen_in_bits)
@@ -463,16 +451,25 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+
+err_unmap_result:
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
+ if (!err)
+ memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
+err_unmap_src:
+ dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
- memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
-theend:
- kfree(buf);
+err_free_result:
kfree(result);
+
+err_free_buf:
+ kfree(buf);
+
+err_out:
local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
local_bh_enable();
+
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 3b5c2af013d0..83df4d719053 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -308,8 +308,8 @@ struct sun8i_ce_hash_tfm_ctx {
* @flow: the flow to use for this request
*/
struct sun8i_ce_hash_reqctx {
- struct ahash_request fallback_req;
int flow;
+ struct ahash_request fallback_req; // keep at the end
};
/*
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 9b9605ce8ee6..8831bcb230c2 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -141,7 +141,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
/* we need to copy all IVs from source in case DMA is bi-directionnal */
while (sg && len) {
- if (sg_dma_len(sg) == 0) {
+ if (sg->length == 0) {
sg = sg_next(sg);
continue;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 753f67a36dc5..8bc08089f044 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -150,7 +150,9 @@ int sun8i_ss_hash_init(struct ahash_request *areq)
memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx));
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -162,7 +164,9 @@ int sun8i_ss_hash_export(struct ahash_request *areq, void *out)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
@@ -174,7 +178,9 @@ int sun8i_ss_hash_import(struct ahash_request *areq, const void *in)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -186,9 +192,10 @@ int sun8i_ss_hash_final(struct ahash_request *areq)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = areq->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, areq->result, 0);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct ahash_alg *alg = crypto_ahash_alg(tfm);
@@ -212,10 +219,10 @@ int sun8i_ss_hash_update(struct ahash_request *areq)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, NULL, areq->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -227,12 +234,11 @@ int sun8i_ss_hash_finup(struct ahash_request *areq)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct ahash_alg *alg = crypto_ahash_alg(tfm);
@@ -256,12 +262,11 @@ static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct ahash_alg *alg = crypto_ahash_alg(tfm);
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index e0af611a95d8..38e8a61e9166 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -12,9 +12,6 @@
#include <linux/interrupt.h>
#include <linux/spinlock_types.h>
#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <linux/hash.h>
-#include <crypto/internal/hash.h>
#include <linux/dma-mapping.h>
#include <crypto/algapi.h>
#include <crypto/aead.h>
@@ -72,7 +69,7 @@ static inline int crypto4xx_crypt(struct skcipher_request *req,
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
- __le32 iv[AES_IV_SIZE];
+ __le32 iv[AES_IV_SIZE / 4];
if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
return -EINVAL;
@@ -429,7 +426,7 @@ static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- __le32 iv[16];
+ __le32 iv[4];
u32 tmp_sa[SA_AES128_CCM_LEN + 4];
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
unsigned int len = req->cryptlen;
@@ -602,106 +599,3 @@ int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
{
return crypto4xx_crypt_aes_gcm(req, true);
}
-
-/*
- * HASH SHA1 Functions
- */
-static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
- unsigned int sa_len,
- unsigned char ha,
- unsigned char hm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct crypto4xx_alg *my_alg;
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
- struct dynamic_sa_hash160 *sa;
- int rc;
-
- my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg,
- alg.u.hash);
- ctx->dev = my_alg->dev;
-
- /* Create SA */
- if (ctx->sa_in || ctx->sa_out)
- crypto4xx_free_sa(ctx);
-
- rc = crypto4xx_alloc_sa(ctx, sa_len);
- if (rc)
- return rc;
-
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct crypto4xx_ctx));
- sa = (struct dynamic_sa_hash160 *)ctx->sa_in;
- set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV,
- SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
- SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
- SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
- SA_OPCODE_HASH, DIR_INBOUND);
- set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH,
- CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
- SA_SEQ_MASK_OFF, SA_MC_ENABLE,
- SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
- SA_NOT_COPY_HDR);
- /* Need to zero hash digest in SA */
- memset(sa->inner_digest, 0, sizeof(sa->inner_digest));
- memset(sa->outer_digest, 0, sizeof(sa->outer_digest));
-
- return 0;
-}
-
-int crypto4xx_hash_init(struct ahash_request *req)
-{
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- int ds;
- struct dynamic_sa_ctl *sa;
-
- sa = ctx->sa_in;
- ds = crypto_ahash_digestsize(
- __crypto_ahash_cast(req->base.tfm));
- sa->sa_command_0.bf.digest_len = ds >> 2;
- sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
-
- return 0;
-}
-
-int crypto4xx_hash_update(struct ahash_request *req)
-{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct scatterlist dst;
- unsigned int ds = crypto_ahash_digestsize(ahash);
-
- sg_init_one(&dst, req->result, ds);
-
- return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
- req->nbytes, NULL, 0, ctx->sa_in,
- ctx->sa_len, 0, NULL);
-}
-
-int crypto4xx_hash_final(struct ahash_request *req)
-{
- return 0;
-}
-
-int crypto4xx_hash_digest(struct ahash_request *req)
-{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct scatterlist dst;
- unsigned int ds = crypto_ahash_digestsize(ahash);
-
- sg_init_one(&dst, req->result, ds);
-
- return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
- req->nbytes, NULL, 0, ctx->sa_in,
- ctx->sa_len, 0, NULL);
-}
-
-/*
- * SHA1 Algorithm
- */
-int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
-{
- return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
- SA_HASH_MODE_HASH);
-}
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index ec3ccfa60445..8cdc66d520c9 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -485,18 +485,6 @@ static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
}
}
-static void crypto4xx_copy_digest_to_dst(void *dst,
- struct pd_uinfo *pd_uinfo,
- struct crypto4xx_ctx *ctx)
-{
- struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
-
- if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
- memcpy(dst, pd_uinfo->sr_va->save_digest,
- SA_HASH_ALG_SHA1_DIGEST_SIZE);
- }
-}
-
static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo)
{
@@ -549,23 +537,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
skcipher_request_complete(req, 0);
}
-static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
- struct pd_uinfo *pd_uinfo)
-{
- struct crypto4xx_ctx *ctx;
- struct ahash_request *ahash_req;
-
- ahash_req = ahash_request_cast(pd_uinfo->async_req);
- ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req));
-
- crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx);
- crypto4xx_ret_sg_desc(dev, pd_uinfo);
-
- if (pd_uinfo->state & PD_ENTRY_BUSY)
- ahash_request_complete(ahash_req, -EINPROGRESS);
- ahash_request_complete(ahash_req, 0);
-}
-
static void crypto4xx_aead_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
@@ -642,9 +613,6 @@ static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
case CRYPTO_ALG_TYPE_AEAD:
crypto4xx_aead_done(dev, pd_uinfo, pd);
break;
- case CRYPTO_ALG_TYPE_AHASH:
- crypto4xx_ahash_done(dev, pd_uinfo);
- break;
}
}
@@ -676,7 +644,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
struct scatterlist *src,
struct scatterlist *dst,
const unsigned int datalen,
- const __le32 *iv, const u32 iv_len,
+ const void *iv, const u32 iv_len,
const struct dynamic_sa_ctl *req_sa,
const unsigned int sa_len,
const unsigned int assoclen,
@@ -912,8 +880,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
}
pd->pd_ctl.w = PD_CTL_HOST_READY |
- ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) ||
- (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
+ ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
PD_CTL_HASH_FINAL : 0);
pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
@@ -1019,10 +986,6 @@ static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
rc = crypto_register_aead(&alg->alg.u.aead);
break;
- case CRYPTO_ALG_TYPE_AHASH:
- rc = crypto_register_ahash(&alg->alg.u.hash);
- break;
-
case CRYPTO_ALG_TYPE_RNG:
rc = crypto_register_rng(&alg->alg.u.rng);
break;
@@ -1048,10 +1011,6 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
list_del(&alg->entry);
switch (alg->alg.type) {
- case CRYPTO_ALG_TYPE_AHASH:
- crypto_unregister_ahash(&alg->alg.u.hash);
- break;
-
case CRYPTO_ALG_TYPE_AEAD:
crypto_unregister_aead(&alg->alg.u.aead);
break;
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 3adcc5e65694..ee36630c670f 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -16,7 +16,6 @@
#include <linux/ratelimit.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
@@ -135,7 +134,6 @@ struct crypto4xx_alg_common {
u32 type;
union {
struct skcipher_alg cipher;
- struct ahash_alg hash;
struct aead_alg aead;
struct rng_alg rng;
} u;
@@ -147,6 +145,12 @@ struct crypto4xx_alg {
struct crypto4xx_device *dev;
};
+#if IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION >= 120000
+#define BUILD_PD_ACCESS __attribute__((access(read_only, 6, 7)))
+#else
+#define BUILD_PD_ACCESS
+#endif
+
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
int crypto4xx_build_pd(struct crypto_async_request *req,
@@ -154,11 +158,11 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
struct scatterlist *src,
struct scatterlist *dst,
const unsigned int datalen,
- const __le32 *iv, const u32 iv_len,
+ const void *iv, const u32 iv_len,
const struct dynamic_sa_ctl *sa,
const unsigned int sa_len,
const unsigned int assoclen,
- struct scatterlist *dst_tmp);
+ struct scatterlist *dst_tmp) BUILD_PD_ACCESS;
int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
@@ -177,11 +181,6 @@ int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
-int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
-int crypto4xx_hash_digest(struct ahash_request *req);
-int crypto4xx_hash_final(struct ahash_request *req);
-int crypto4xx_hash_update(struct ahash_request *req);
-int crypto4xx_hash_init(struct ahash_request *req);
/*
* Note: Only use this function to copy items that is word aligned.
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 14bf86957d31..27c5d000b4b2 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1743,7 +1743,8 @@ static struct skcipher_alg aes_xts_alg = {
.base.cra_driver_name = "atmel-xts-aes",
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
- .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -2220,7 +2221,7 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
{
- alg->cra_flags |= CRYPTO_ALG_ASYNC;
+ alg->cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->cra_alignmask = 0xf;
alg->cra_priority = ATMEL_AES_PRIORITY;
alg->cra_module = THIS_MODULE;
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 67a170608566..2cc36da163e8 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1254,7 +1254,8 @@ static int atmel_sha_cra_init(struct crypto_tfm *tfm)
static void atmel_sha_alg_init(struct ahash_alg *alg)
{
alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
- alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx);
alg->halg.base.cra_module = THIS_MODULE;
alg->halg.base.cra_init = atmel_sha_cra_init;
@@ -2041,7 +2042,8 @@ static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
static void atmel_sha_hmac_alg_init(struct ahash_alg *alg)
{
alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY;
- alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx);
alg->halg.base.cra_module = THIS_MODULE;
alg->halg.base.cra_init = atmel_sha_hmac_cra_init;
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index de9717e221e4..098f5532f389 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -785,7 +785,7 @@ static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
{
alg->base.cra_priority = ATMEL_TDES_PRIORITY;
- alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx);
alg->base.cra_module = THIS_MODULE;
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index f8d50bd227a6..75ee065da1ec 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2072,7 +2072,7 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
static void artpec6_crypto_timeout(struct timer_list *t)
{
- struct artpec6_crypto *ac = from_timer(ac, t, timer);
+ struct artpec6_crypto *ac = timer_container_of(ac, t, timer);
dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index d4b39184dbdb..38ff931059b4 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -573,6 +573,7 @@ static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8M*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8ULP", .data = &caam_imx8ulp_data },
+ { .soc_id = "i.MX8QM", .data = &caam_imx8ulp_data },
{ .soc_id = "VF*", .data = &caam_vf610_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
diff --git a/drivers/crypto/cavium/Makefile b/drivers/crypto/cavium/Makefile
index 4679c06b611f..75227c587ed0 100644
--- a/drivers/crypto/cavium/Makefile
+++ b/drivers/crypto/cavium/Makefile
@@ -2,4 +2,5 @@
#
# Makefile for Cavium crypto device drivers
#
-obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += zip/
+obj-$(CONFIG_CRYPTO_DEV_CPT) += cpt/
+obj-$(CONFIG_CRYPTO_DEV_NITROX) += nitrox/
diff --git a/drivers/crypto/cavium/zip/Makefile b/drivers/crypto/cavium/zip/Makefile
deleted file mode 100644
index 020d189d793d..000000000000
--- a/drivers/crypto/cavium/zip/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Cavium's ZIP Driver.
-#
-
-obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += thunderx_zip.o
-thunderx_zip-y := zip_main.o \
- zip_device.o \
- zip_crypto.o \
- zip_mem.o \
- zip_deflate.o \
- zip_inflate.o
diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h
deleted file mode 100644
index 54f6fb054119..000000000000
--- a/drivers/crypto/cavium/zip/common.h
+++ /dev/null
@@ -1,222 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __COMMON_H__
-#define __COMMON_H__
-
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/seq_file.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-/* Device specific zlib function definitions */
-#include "zip_device.h"
-
-/* ZIP device definitions */
-#include "zip_main.h"
-
-/* ZIP memory allocation/deallocation related definitions */
-#include "zip_mem.h"
-
-/* Device specific structure definitions */
-#include "zip_regs.h"
-
-#define ZIP_ERROR -1
-
-#define ZIP_FLUSH_FINISH 4
-
-#define RAW_FORMAT 0 /* for rawpipe */
-#define ZLIB_FORMAT 1 /* for zpipe */
-#define GZIP_FORMAT 2 /* for gzpipe */
-#define LZS_FORMAT 3 /* for lzspipe */
-
-/* Max number of ZIP devices supported */
-#define MAX_ZIP_DEVICES 2
-
-/* Configures the number of zip queues to be used */
-#define ZIP_NUM_QUEUES 2
-
-#define DYNAMIC_STOP_EXCESS 1024
-
-/* Maximum buffer sizes in direct mode */
-#define MAX_INPUT_BUFFER_SIZE (64 * 1024)
-#define MAX_OUTPUT_BUFFER_SIZE (64 * 1024)
-
-/**
- * struct zip_operation - common data structure for comp and decomp operations
- * @input: Next input byte is read from here
- * @output: Next output byte written here
- * @ctx_addr: Inflate context buffer address
- * @history: Pointer to the history buffer
- * @input_len: Number of bytes available at next_in
- * @input_total_len: Total number of input bytes read
- * @output_len: Remaining free space at next_out
- * @output_total_len: Total number of bytes output so far
- * @csum: Checksum value of the uncompressed data
- * @flush: Flush flag
- * @format: Format (depends on stream's wrap)
- * @speed: Speed depends on stream's level
- * @ccode: Compression code ( stream's strategy)
- * @lzs_flag: Flag for LZS support
- * @begin_file: Beginning of file indication for inflate
- * @history_len: Size of the history data
- * @end_file: Ending of the file indication for inflate
- * @compcode: Completion status of the ZIP invocation
- * @bytes_read: Input bytes read in current instruction
- * @bits_processed: Total bits processed for entire file
- * @sizeofptr: To distinguish between ILP32 and LP64
- * @sizeofzops: Optional just for padding
- *
- * This structure is used to maintain the required meta data for the
- * comp and decomp operations.
- */
-struct zip_operation {
- u8 *input;
- u8 *output;
- u64 ctx_addr;
- u64 history;
-
- u32 input_len;
- u32 input_total_len;
-
- u32 output_len;
- u32 output_total_len;
-
- u32 csum;
- u32 flush;
-
- u32 format;
- u32 speed;
- u32 ccode;
- u32 lzs_flag;
-
- u32 begin_file;
- u32 history_len;
-
- u32 end_file;
- u32 compcode;
- u32 bytes_read;
- u32 bits_processed;
-
- u32 sizeofptr;
- u32 sizeofzops;
-};
-
-static inline int zip_poll_result(union zip_zres_s *result)
-{
- int retries = 1000;
-
- while (!result->s.compcode) {
- if (!--retries) {
- pr_err("ZIP ERR: request timed out");
- return -ETIMEDOUT;
- }
- udelay(10);
- /*
- * Force re-reading of compcode which is updated
- * by the ZIP coprocessor.
- */
- rmb();
- }
- return 0;
-}
-
-/* error messages */
-#define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
- fmt "\n", __func__, __LINE__, ## args)
-
-#ifdef MSG_ENABLE
-/* Enable all messages */
-#define zip_msg(fmt, args...) pr_info("ZIP_MSG:" fmt "\n", ## args)
-#else
-#define zip_msg(fmt, args...)
-#endif
-
-#if defined(ZIP_DEBUG_ENABLE) && defined(MSG_ENABLE)
-
-#ifdef DEBUG_LEVEL
-
-#define FILE_NAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : \
- strrchr(__FILE__, '\\') ? strrchr(__FILE__, '\\') + 1 : __FILE__)
-
-#if DEBUG_LEVEL >= 4
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
- fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
-
-#elif DEBUG_LEVEL >= 3
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s: %s() : %d: " \
- fmt "\n", FILE_NAME, __func__, __LINE__, ## args)
-
-#elif DEBUG_LEVEL >= 2
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG: %s() : %d: " \
- fmt "\n", __func__, __LINE__, ## args)
-
-#else
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
-
-#endif /* DEBUG LEVEL >=4 */
-
-#else
-
-#define zip_dbg(fmt, args...) pr_info("ZIP DBG:" fmt "\n", ## args)
-
-#endif /* DEBUG_LEVEL */
-#else
-
-#define zip_dbg(fmt, args...)
-
-#endif /* ZIP_DEBUG_ENABLE && MSG_ENABLE*/
-
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
deleted file mode 100644
index 02e87f2d50db..000000000000
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include "zip_crypto.h"
-
-static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
- int lzs_flag)
-{
- zip_ops->flush = ZIP_FLUSH_FINISH;
-
- /* equivalent to level 6 of opensource zlib */
- zip_ops->speed = 1;
-
- if (!lzs_flag) {
- zip_ops->ccode = 0; /* Auto Huffman */
- zip_ops->lzs_flag = 0;
- zip_ops->format = ZLIB_FORMAT;
- } else {
- zip_ops->ccode = 3; /* LZS Encoding */
- zip_ops->lzs_flag = 1;
- zip_ops->format = LZS_FORMAT;
- }
- zip_ops->begin_file = 1;
- zip_ops->history_len = 0;
- zip_ops->end_file = 1;
- zip_ops->compcode = 0;
- zip_ops->csum = 1; /* Adler checksum desired */
-}
-
-static int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
-{
- struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
- struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp;
-
- zip_static_init_zip_ops(comp_ctx, lzs_flag);
- zip_static_init_zip_ops(decomp_ctx, lzs_flag);
-
- comp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
- if (!comp_ctx->input)
- return -ENOMEM;
-
- comp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
- if (!comp_ctx->output)
- goto err_comp_input;
-
- decomp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE);
- if (!decomp_ctx->input)
- goto err_comp_output;
-
- decomp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE);
- if (!decomp_ctx->output)
- goto err_decomp_input;
-
- return 0;
-
-err_decomp_input:
- zip_data_buf_free(decomp_ctx->input, MAX_INPUT_BUFFER_SIZE);
-
-err_comp_output:
- zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
-
-err_comp_input:
- zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
-
- return -ENOMEM;
-}
-
-static void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
-{
- struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
- struct zip_operation *dec_ctx = &zip_ctx->zip_decomp;
-
- zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE);
- zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
-
- zip_data_buf_free(dec_ctx->input, MAX_INPUT_BUFFER_SIZE);
- zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
-}
-
-static int zip_compress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen,
- struct zip_kernel_ctx *zip_ctx)
-{
- struct zip_operation *zip_ops = NULL;
- struct zip_state *zip_state;
- struct zip_device *zip = NULL;
- int ret;
-
- if (!zip_ctx || !src || !dst || !dlen)
- return -ENOMEM;
-
- zip = zip_get_device(zip_get_node_id());
- if (!zip)
- return -ENODEV;
-
- zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
- if (!zip_state)
- return -ENOMEM;
-
- zip_ops = &zip_ctx->zip_comp;
-
- zip_ops->input_len = slen;
- zip_ops->output_len = *dlen;
- memcpy(zip_ops->input, src, slen);
-
- ret = zip_deflate(zip_ops, zip_state, zip);
-
- if (!ret) {
- *dlen = zip_ops->output_len;
- memcpy(dst, zip_ops->output, *dlen);
- }
- kfree(zip_state);
- return ret;
-}
-
-static int zip_decompress(const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen,
- struct zip_kernel_ctx *zip_ctx)
-{
- struct zip_operation *zip_ops = NULL;
- struct zip_state *zip_state;
- struct zip_device *zip = NULL;
- int ret;
-
- if (!zip_ctx || !src || !dst || !dlen)
- return -ENOMEM;
-
- zip = zip_get_device(zip_get_node_id());
- if (!zip)
- return -ENODEV;
-
- zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
- if (!zip_state)
- return -ENOMEM;
-
- zip_ops = &zip_ctx->zip_decomp;
- memcpy(zip_ops->input, src, slen);
-
- /* Work around for a bug in zlib which needs an extra bytes sometimes */
- if (zip_ops->ccode != 3) /* Not LZS Encoding */
- zip_ops->input[slen++] = 0;
-
- zip_ops->input_len = slen;
- zip_ops->output_len = *dlen;
-
- ret = zip_inflate(zip_ops, zip_state, zip);
-
- if (!ret) {
- *dlen = zip_ops->output_len;
- memcpy(dst, zip_ops->output, *dlen);
- }
- kfree(zip_state);
- return ret;
-}
-
-/* SCOMP framework start */
-void *zip_alloc_scomp_ctx_deflate(void)
-{
- int ret;
- struct zip_kernel_ctx *zip_ctx;
-
- zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
- if (!zip_ctx)
- return ERR_PTR(-ENOMEM);
-
- ret = zip_ctx_init(zip_ctx, 0);
-
- if (ret) {
- kfree_sensitive(zip_ctx);
- return ERR_PTR(ret);
- }
-
- return zip_ctx;
-}
-
-void *zip_alloc_scomp_ctx_lzs(void)
-{
- int ret;
- struct zip_kernel_ctx *zip_ctx;
-
- zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL);
- if (!zip_ctx)
- return ERR_PTR(-ENOMEM);
-
- ret = zip_ctx_init(zip_ctx, 1);
-
- if (ret) {
- kfree_sensitive(zip_ctx);
- return ERR_PTR(ret);
- }
-
- return zip_ctx;
-}
-
-void zip_free_scomp_ctx(void *ctx)
-{
- struct zip_kernel_ctx *zip_ctx = ctx;
-
- zip_ctx_exit(zip_ctx);
- kfree_sensitive(zip_ctx);
-}
-
-int zip_scomp_compress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
-{
- struct zip_kernel_ctx *zip_ctx = ctx;
-
- return zip_compress(src, slen, dst, dlen, zip_ctx);
-}
-
-int zip_scomp_decompress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx)
-{
- struct zip_kernel_ctx *zip_ctx = ctx;
-
- return zip_decompress(src, slen, dst, dlen, zip_ctx);
-} /* SCOMP framework end */
diff --git a/drivers/crypto/cavium/zip/zip_crypto.h b/drivers/crypto/cavium/zip/zip_crypto.h
deleted file mode 100644
index 10899ece2d1f..000000000000
--- a/drivers/crypto/cavium/zip/zip_crypto.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_CRYPTO_H__
-#define __ZIP_CRYPTO_H__
-
-#include <crypto/internal/scompress.h>
-#include "common.h"
-#include "zip_deflate.h"
-#include "zip_inflate.h"
-
-struct zip_kernel_ctx {
- struct zip_operation zip_comp;
- struct zip_operation zip_decomp;
-};
-
-void *zip_alloc_scomp_ctx_deflate(void);
-void *zip_alloc_scomp_ctx_lzs(void);
-void zip_free_scomp_ctx(void *zip_ctx);
-int zip_scomp_compress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx);
-int zip_scomp_decompress(struct crypto_scomp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen, void *ctx);
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c
deleted file mode 100644
index d7133f857d67..000000000000
--- a/drivers/crypto/cavium/zip/zip_deflate.c
+++ /dev/null
@@ -1,200 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include "common.h"
-#include "zip_deflate.h"
-
-/* Prepares the deflate zip command */
-static int prepare_zip_command(struct zip_operation *zip_ops,
- struct zip_state *s, union zip_inst_s *zip_cmd)
-{
- union zip_zres_s *result_ptr = &s->result;
-
- memset(zip_cmd, 0, sizeof(s->zip_cmd));
- memset(result_ptr, 0, sizeof(s->result));
-
- /* IWORD #0 */
- /* History gather */
- zip_cmd->s.hg = 0;
- /* compression enable = 1 for deflate */
- zip_cmd->s.ce = 1;
- /* sf (sync flush) */
- zip_cmd->s.sf = 1;
- /* ef (end of file) */
- if (zip_ops->flush == ZIP_FLUSH_FINISH) {
- zip_cmd->s.ef = 1;
- zip_cmd->s.sf = 0;
- }
-
- zip_cmd->s.cc = zip_ops->ccode;
- /* ss (compression speed/storage) */
- zip_cmd->s.ss = zip_ops->speed;
-
- /* IWORD #1 */
- /* adler checksum */
- zip_cmd->s.adlercrc32 = zip_ops->csum;
- zip_cmd->s.historylength = zip_ops->history_len;
- zip_cmd->s.dg = 0;
-
- /* IWORD # 6 and 7 - compression input/history pointer */
- zip_cmd->s.inp_ptr_addr.s.addr = __pa(zip_ops->input);
- zip_cmd->s.inp_ptr_ctl.s.length = (zip_ops->input_len +
- zip_ops->history_len);
- zip_cmd->s.ds = 0;
-
- /* IWORD # 8 and 9 - Output pointer */
- zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
- zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
- /* maximum number of output-stream bytes that can be written */
- zip_cmd->s.totaloutputlength = zip_ops->output_len;
-
- /* IWORD # 10 and 11 - Result pointer */
- zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
- /* Clearing completion code */
- result_ptr->s.compcode = 0;
-
- return 0;
-}
-
-/**
- * zip_deflate - API to offload deflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to zip device structure
- *
- * This function prepares the zip deflate command and submits it to the zip
- * engine for processing.
- *
- * Return: 0 if successful or error code
- */
-int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev)
-{
- union zip_inst_s *zip_cmd = &s->zip_cmd;
- union zip_zres_s *result_ptr = &s->result;
- u32 queue;
-
- /* Prepares zip command based on the input parameters */
- prepare_zip_command(zip_ops, s, zip_cmd);
-
- atomic64_add(zip_ops->input_len, &zip_dev->stats.comp_in_bytes);
- /* Loads zip command into command queues and rings door bell */
- queue = zip_load_instr(zip_cmd, zip_dev);
-
- /* Stats update for compression requests submitted */
- atomic64_inc(&zip_dev->stats.comp_req_submit);
-
- /* Wait for completion or error */
- zip_poll_result(result_ptr);
-
- /* Stats update for compression requests completed */
- atomic64_inc(&zip_dev->stats.comp_req_complete);
-
- zip_ops->compcode = result_ptr->s.compcode;
- switch (zip_ops->compcode) {
- case ZIP_CMD_NOTDONE:
- zip_dbg("Zip instruction not yet completed");
- return ZIP_ERROR;
-
- case ZIP_CMD_SUCCESS:
- zip_dbg("Zip instruction completed successfully");
- zip_update_cmd_bufs(zip_dev, queue);
- break;
-
- case ZIP_CMD_DTRUNC:
- zip_dbg("Output Truncate error");
- /* Returning ZIP_ERROR to avoid copy to user */
- return ZIP_ERROR;
-
- default:
- zip_err("Zip instruction failed. Code:%d", zip_ops->compcode);
- return ZIP_ERROR;
- }
-
- /* Update the CRC depending on the format */
- switch (zip_ops->format) {
- case RAW_FORMAT:
- zip_dbg("RAW Format: %d ", zip_ops->format);
- /* Get checksum from engine, need to feed it again */
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case ZLIB_FORMAT:
- zip_dbg("ZLIB Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case GZIP_FORMAT:
- zip_dbg("GZIP Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.crc32;
- break;
-
- case LZS_FORMAT:
- zip_dbg("LZS Format: %d ", zip_ops->format);
- break;
-
- default:
- zip_err("Unknown Format:%d\n", zip_ops->format);
- }
-
- atomic64_add(result_ptr->s.totalbyteswritten,
- &zip_dev->stats.comp_out_bytes);
-
- /* Update output_len */
- if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
- /* Dynamic stop && strm->output_len < zipconstants[onfsize] */
- zip_err("output_len (%d) < total bytes written(%d)\n",
- zip_ops->output_len, result_ptr->s.totalbyteswritten);
- zip_ops->output_len = 0;
-
- } else {
- zip_ops->output_len = result_ptr->s.totalbyteswritten;
- }
-
- return 0;
-}
diff --git a/drivers/crypto/cavium/zip/zip_deflate.h b/drivers/crypto/cavium/zip/zip_deflate.h
deleted file mode 100644
index 1d32e76edc4d..000000000000
--- a/drivers/crypto/cavium/zip/zip_deflate.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_DEFLATE_H__
-#define __ZIP_DEFLATE_H__
-
-/**
- * zip_deflate - API to offload deflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to the structure representing zip device
- *
- * This function prepares the zip deflate command and submits it to the zip
- * engine by ringing the doorbell.
- *
- * Return: 0 if successful or error code
- */
-int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev);
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_device.c b/drivers/crypto/cavium/zip/zip_device.c
deleted file mode 100644
index f174ec29ed69..000000000000
--- a/drivers/crypto/cavium/zip/zip_device.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include "common.h"
-#include "zip_deflate.h"
-
-/**
- * zip_cmd_queue_consumed - Calculates the space consumed in the command queue.
- *
- * @zip_dev: Pointer to zip device structure
- * @queue: Queue number
- *
- * Return: Bytes consumed in the command queue buffer.
- */
-static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue)
-{
- return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) *
- sizeof(u64 *));
-}
-
-/**
- * zip_load_instr - Submits the instruction into the ZIP command queue
- * @instr: Pointer to the instruction to be submitted
- * @zip_dev: Pointer to ZIP device structure to which the instruction is to
- * be submitted
- *
- * This function copies the ZIP instruction to the command queue and rings the
- * doorbell to notify the engine of the instruction submission. The command
- * queue is maintained in a circular fashion. When there is space for exactly
- * one instruction in the queue, next chunk pointer of the queue is made to
- * point to the head of the queue, thus maintaining a circular queue.
- *
- * Return: Queue number to which the instruction was submitted
- */
-u32 zip_load_instr(union zip_inst_s *instr,
- struct zip_device *zip_dev)
-{
- union zip_quex_doorbell dbell;
- u32 queue = 0;
- u32 consumed = 0;
- u64 *ncb_ptr = NULL;
- union zip_nptr_s ncp;
-
- /*
- * Distribute the instructions between the enabled queues based on
- * the CPU id.
- */
- if (raw_smp_processor_id() % 2 == 0)
- queue = 0;
- else
- queue = 1;
-
- zip_dbg("CPU Core: %d Queue number:%d", raw_smp_processor_id(), queue);
-
- /* Take cmd buffer lock */
- spin_lock(&zip_dev->iq[queue].lock);
-
- /*
- * Command Queue implementation
- * 1. If there is place for new instructions, push the cmd at sw_head.
- * 2. If there is place for exactly one instruction, push the new cmd
- * at the sw_head. Make sw_head point to the sw_tail to make it
- * circular. Write sw_head's physical address to the "Next-Chunk
- * Buffer Ptr" to make it cmd_hw_tail.
- * 3. Ring the door bell.
- */
- zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head);
- zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail);
-
- consumed = zip_cmd_queue_consumed(zip_dev, queue);
- /* Check if there is space to push just one cmd */
- if ((consumed + 128) == (ZIP_CMD_QBUF_SIZE - 8)) {
- zip_dbg("Cmd queue space available for single command");
- /* Space for one cmd, pust it and make it circular queue */
- memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
- sizeof(union zip_inst_s));
- zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
-
- /* Now, point the "Next-Chunk Buffer Ptr" to sw_head */
- ncb_ptr = zip_dev->iq[queue].sw_head;
-
- zip_dbg("ncb addr :0x%lx sw_head addr :0x%lx",
- ncb_ptr, zip_dev->iq[queue].sw_head - 16);
-
- /* Using Circular command queue */
- zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail;
- /* Mark this buffer for free */
- zip_dev->iq[queue].free_flag = 1;
-
- /* Write new chunk buffer address at "Next-Chunk Buffer Ptr" */
- ncp.u_reg64 = 0ull;
- ncp.s.addr = __pa(zip_dev->iq[queue].sw_head);
- *ncb_ptr = ncp.u_reg64;
- zip_dbg("*ncb_ptr :0x%lx sw_head[phys] :0x%lx",
- *ncb_ptr, __pa(zip_dev->iq[queue].sw_head));
-
- zip_dev->iq[queue].pend_cnt++;
-
- } else {
- zip_dbg("Enough space is available for commands");
- /* Push this cmd to cmd queue buffer */
- memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr,
- sizeof(union zip_inst_s));
- zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */
-
- zip_dev->iq[queue].pend_cnt++;
- }
- zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
- zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
- zip_dev->iq[queue].hw_tail);
-
- zip_dbg(" Pushed the new cmd : pend_cnt : %d",
- zip_dev->iq[queue].pend_cnt);
-
- /* Ring the doorbell */
- dbell.u_reg64 = 0ull;
- dbell.s.dbell_cnt = 1;
- zip_reg_write(dbell.u_reg64,
- (zip_dev->reg_base + ZIP_QUEX_DOORBELL(queue)));
-
- /* Unlock cmd buffer lock */
- spin_unlock(&zip_dev->iq[queue].lock);
-
- return queue;
-}
-
-/**
- * zip_update_cmd_bufs - Updates the queue statistics after posting the
- * instruction
- * @zip_dev: Pointer to zip device structure
- * @queue: Queue number
- */
-void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue)
-{
- /* Take cmd buffer lock */
- spin_lock(&zip_dev->iq[queue].lock);
-
- /* Check if the previous buffer can be freed */
- if (zip_dev->iq[queue].free_flag == 1) {
- zip_dbg("Free flag. Free cmd buffer, adjust sw head and tail");
- /* Reset the free flag */
- zip_dev->iq[queue].free_flag = 0;
-
- /* Point the hw_tail to start of the new chunk buffer */
- zip_dev->iq[queue].hw_tail = zip_dev->iq[queue].sw_head;
- } else {
- zip_dbg("Free flag not set. increment hw tail");
- zip_dev->iq[queue].hw_tail += 16; /* 16 64_bit words = 128B */
- }
-
- zip_dev->iq[queue].done_cnt++;
- zip_dev->iq[queue].pend_cnt--;
-
- zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
- zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail,
- zip_dev->iq[queue].hw_tail);
- zip_dbg(" Got CC : pend_cnt : %d\n", zip_dev->iq[queue].pend_cnt);
-
- spin_unlock(&zip_dev->iq[queue].lock);
-}
diff --git a/drivers/crypto/cavium/zip/zip_device.h b/drivers/crypto/cavium/zip/zip_device.h
deleted file mode 100644
index 9e18b3b93d38..000000000000
--- a/drivers/crypto/cavium/zip/zip_device.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_DEVICE_H__
-#define __ZIP_DEVICE_H__
-
-#include <linux/types.h>
-#include "zip_main.h"
-
-struct sg_info {
- /*
- * Pointer to the input data when scatter_gather == 0 and
- * pointer to the input gather list buffer when scatter_gather == 1
- */
- union zip_zptr_s *gather;
-
- /*
- * Pointer to the output data when scatter_gather == 0 and
- * pointer to the output scatter list buffer when scatter_gather == 1
- */
- union zip_zptr_s *scatter;
-
- /*
- * Holds size of the output buffer pointed by scatter list
- * when scatter_gather == 1
- */
- u64 scatter_buf_size;
-
- /* for gather data */
- u64 gather_enable;
-
- /* for scatter data */
- u64 scatter_enable;
-
- /* Number of gather list pointers for gather data */
- u32 gbuf_cnt;
-
- /* Number of scatter list pointers for scatter data */
- u32 sbuf_cnt;
-
- /* Buffers allocation state */
- u8 alloc_state;
-};
-
-/**
- * struct zip_state - Structure representing the required information related
- * to a command
- * @zip_cmd: Pointer to zip instruction structure
- * @result: Pointer to zip result structure
- * @ctx: Context pointer for inflate
- * @history: Decompression history pointer
- * @sginfo: Scatter-gather info structure
- */
-struct zip_state {
- union zip_inst_s zip_cmd;
- union zip_zres_s result;
- union zip_zptr_s *ctx;
- union zip_zptr_s *history;
- struct sg_info sginfo;
-};
-
-#define ZIP_CONTEXT_SIZE 2048
-#define ZIP_INFLATE_HISTORY_SIZE 32768
-#define ZIP_DEFLATE_HISTORY_SIZE 32768
-
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c
deleted file mode 100644
index 7e0d73e2f89e..000000000000
--- a/drivers/crypto/cavium/zip/zip_inflate.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include "common.h"
-#include "zip_inflate.h"
-
-static int prepare_inflate_zcmd(struct zip_operation *zip_ops,
- struct zip_state *s, union zip_inst_s *zip_cmd)
-{
- union zip_zres_s *result_ptr = &s->result;
-
- memset(zip_cmd, 0, sizeof(s->zip_cmd));
- memset(result_ptr, 0, sizeof(s->result));
-
- /* IWORD#0 */
-
- /* Decompression History Gather list - no gather list */
- zip_cmd->s.hg = 0;
- /* For decompression, CE must be 0x0. */
- zip_cmd->s.ce = 0;
- /* For decompression, SS must be 0x0. */
- zip_cmd->s.ss = 0;
- /* For decompression, SF should always be set. */
- zip_cmd->s.sf = 1;
-
- /* Begin File */
- if (zip_ops->begin_file == 0)
- zip_cmd->s.bf = 0;
- else
- zip_cmd->s.bf = 1;
-
- zip_cmd->s.ef = 1;
- /* 0: for Deflate decompression, 3: for LZS decompression */
- zip_cmd->s.cc = zip_ops->ccode;
-
- /* IWORD #1*/
-
- /* adler checksum */
- zip_cmd->s.adlercrc32 = zip_ops->csum;
-
- /*
- * HISTORYLENGTH must be 0x0 for any ZIP decompress operation.
- * History data is added to a decompression operation via IWORD3.
- */
- zip_cmd->s.historylength = 0;
- zip_cmd->s.ds = 0;
-
- /* IWORD # 8 and 9 - Output pointer */
- zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output);
- zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len;
-
- /* Maximum number of output-stream bytes that can be written */
- zip_cmd->s.totaloutputlength = zip_ops->output_len;
-
- zip_dbg("Data Direct Input case ");
-
- /* IWORD # 6 and 7 - input pointer */
- zip_cmd->s.dg = 0;
- zip_cmd->s.inp_ptr_addr.s.addr = __pa((u8 *)zip_ops->input);
- zip_cmd->s.inp_ptr_ctl.s.length = zip_ops->input_len;
-
- /* IWORD # 10 and 11 - Result pointer */
- zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr);
-
- /* Clearing completion code */
- result_ptr->s.compcode = 0;
-
- /* Returning 0 for time being.*/
- return 0;
-}
-
-/**
- * zip_inflate - API to offload inflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to zip device structure
- *
- * This function prepares the zip inflate command and submits it to the zip
- * engine for processing.
- *
- * Return: 0 if successful or error code
- */
-int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev)
-{
- union zip_inst_s *zip_cmd = &s->zip_cmd;
- union zip_zres_s *result_ptr = &s->result;
- u32 queue;
-
- /* Prepare inflate zip command */
- prepare_inflate_zcmd(zip_ops, s, zip_cmd);
-
- atomic64_add(zip_ops->input_len, &zip_dev->stats.decomp_in_bytes);
-
- /* Load inflate command to zip queue and ring the doorbell */
- queue = zip_load_instr(zip_cmd, zip_dev);
-
- /* Decompression requests submitted stats update */
- atomic64_inc(&zip_dev->stats.decomp_req_submit);
-
- /* Wait for completion or error */
- zip_poll_result(result_ptr);
-
- /* Decompression requests completed stats update */
- atomic64_inc(&zip_dev->stats.decomp_req_complete);
-
- zip_ops->compcode = result_ptr->s.compcode;
- switch (zip_ops->compcode) {
- case ZIP_CMD_NOTDONE:
- zip_dbg("Zip Instruction not yet completed\n");
- return ZIP_ERROR;
-
- case ZIP_CMD_SUCCESS:
- zip_dbg("Zip Instruction completed successfully\n");
- break;
-
- case ZIP_CMD_DYNAMIC_STOP:
- zip_dbg(" Dynamic stop Initiated\n");
- break;
-
- default:
- zip_dbg("Instruction failed. Code = %d\n", zip_ops->compcode);
- atomic64_inc(&zip_dev->stats.decomp_bad_reqs);
- zip_update_cmd_bufs(zip_dev, queue);
- return ZIP_ERROR;
- }
-
- zip_update_cmd_bufs(zip_dev, queue);
-
- if ((zip_ops->ccode == 3) && (zip_ops->flush == 4) &&
- (zip_ops->compcode != ZIP_CMD_DYNAMIC_STOP))
- result_ptr->s.ef = 1;
-
- zip_ops->csum = result_ptr->s.adler32;
-
- atomic64_add(result_ptr->s.totalbyteswritten,
- &zip_dev->stats.decomp_out_bytes);
-
- if (zip_ops->output_len < result_ptr->s.totalbyteswritten) {
- zip_err("output_len (%d) < total bytes written (%d)\n",
- zip_ops->output_len, result_ptr->s.totalbyteswritten);
- zip_ops->output_len = 0;
- } else {
- zip_ops->output_len = result_ptr->s.totalbyteswritten;
- }
-
- zip_ops->bytes_read = result_ptr->s.totalbytesread;
- zip_ops->bits_processed = result_ptr->s.totalbitsprocessed;
- zip_ops->end_file = result_ptr->s.ef;
- if (zip_ops->end_file) {
- switch (zip_ops->format) {
- case RAW_FORMAT:
- zip_dbg("RAW Format: %d ", zip_ops->format);
- /* Get checksum from engine */
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case ZLIB_FORMAT:
- zip_dbg("ZLIB Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.adler32;
- break;
-
- case GZIP_FORMAT:
- zip_dbg("GZIP Format: %d ", zip_ops->format);
- zip_ops->csum = result_ptr->s.crc32;
- break;
-
- case LZS_FORMAT:
- zip_dbg("LZS Format: %d ", zip_ops->format);
- break;
-
- default:
- zip_err("Format error:%d\n", zip_ops->format);
- }
- }
-
- return 0;
-}
diff --git a/drivers/crypto/cavium/zip/zip_inflate.h b/drivers/crypto/cavium/zip/zip_inflate.h
deleted file mode 100644
index 6b20f179978e..000000000000
--- a/drivers/crypto/cavium/zip/zip_inflate.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_INFLATE_H__
-#define __ZIP_INFLATE_H__
-
-/**
- * zip_inflate - API to offload inflate operation to hardware
- * @zip_ops: Pointer to zip operation structure
- * @s: Pointer to the structure representing zip state
- * @zip_dev: Pointer to the structure representing zip device
- *
- * This function prepares the zip inflate command and submits it to the zip
- * engine for processing.
- *
- * Return: 0 if successful or error code
- */
-int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
- struct zip_device *zip_dev);
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
deleted file mode 100644
index abd58de4343d..000000000000
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ /dev/null
@@ -1,603 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include "common.h"
-#include "zip_crypto.h"
-
-#define DRV_NAME "ThunderX-ZIP"
-
-static struct zip_device *zip_dev[MAX_ZIP_DEVICES];
-
-static const struct pci_device_id zip_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) },
- { 0, }
-};
-
-static void zip_debugfs_init(void);
-static void zip_debugfs_exit(void);
-static int zip_register_compression_device(void);
-static void zip_unregister_compression_device(void);
-
-void zip_reg_write(u64 val, u64 __iomem *addr)
-{
- writeq(val, addr);
-}
-
-u64 zip_reg_read(u64 __iomem *addr)
-{
- return readq(addr);
-}
-
-/*
- * Allocates new ZIP device structure
- * Returns zip_device pointer or NULL if cannot allocate memory for zip_device
- */
-static struct zip_device *zip_alloc_device(struct pci_dev *pdev)
-{
- struct zip_device *zip = NULL;
- int idx;
-
- for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) {
- if (!zip_dev[idx])
- break;
- }
-
- /* To ensure that the index is within the limit */
- if (idx < MAX_ZIP_DEVICES)
- zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
-
- if (!zip)
- return NULL;
-
- zip_dev[idx] = zip;
- zip->index = idx;
- return zip;
-}
-
-/**
- * zip_get_device - Get ZIP device based on node id of cpu
- *
- * @node: Node id of the current cpu
- * Return: Pointer to Zip device structure
- */
-struct zip_device *zip_get_device(int node)
-{
- if ((node < MAX_ZIP_DEVICES) && (node >= 0))
- return zip_dev[node];
-
- zip_err("ZIP device not found for node id %d\n", node);
- return NULL;
-}
-
-/**
- * zip_get_node_id - Get the node id of the current cpu
- *
- * Return: Node id of the current cpu
- */
-int zip_get_node_id(void)
-{
- return cpu_to_node(raw_smp_processor_id());
-}
-
-/* Initializes the ZIP h/w sub-system */
-static int zip_init_hw(struct zip_device *zip)
-{
- union zip_cmd_ctl cmd_ctl;
- union zip_constants constants;
- union zip_que_ena que_ena;
- union zip_quex_map que_map;
- union zip_que_pri que_pri;
-
- union zip_quex_sbuf_addr que_sbuf_addr;
- union zip_quex_sbuf_ctl que_sbuf_ctl;
-
- int q = 0;
-
- /* Enable the ZIP Engine(Core) Clock */
- cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL);
- cmd_ctl.s.forceclk = 1;
- zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL));
-
- zip_msg("ZIP_CMD_CTL : 0x%016llx",
- zip_reg_read(zip->reg_base + ZIP_CMD_CTL));
-
- constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS);
- zip->depth = constants.s.depth;
- zip->onfsize = constants.s.onfsize;
- zip->ctxsize = constants.s.ctxsize;
-
- zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx",
- zip->depth, zip->onfsize, zip->ctxsize);
-
- /*
- * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to
- * have the correct buffer pointer and size configured for each
- * instruction queue.
- */
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- que_sbuf_ctl.u_reg64 = 0ull;
- que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64));
- que_sbuf_ctl.s.inst_be = 0;
- que_sbuf_ctl.s.stream_id = 0;
- zip_reg_write(que_sbuf_ctl.u_reg64,
- (zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
-
- zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q,
- zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
- }
-
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- memset(&zip->iq[q], 0x0, sizeof(struct zip_iq));
-
- spin_lock_init(&zip->iq[q].lock);
-
- if (zip_cmd_qbuf_alloc(zip, q)) {
- while (q != 0) {
- q--;
- zip_cmd_qbuf_free(zip, q);
- }
- return -ENOMEM;
- }
-
- /* Initialize tail ptr to head */
- zip->iq[q].sw_tail = zip->iq[q].sw_head;
- zip->iq[q].hw_tail = zip->iq[q].sw_head;
-
- /* Write the physical addr to register */
- que_sbuf_addr.u_reg64 = 0ull;
- que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >>
- ZIP_128B_ALIGN);
-
- zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q,
- (u64)que_sbuf_addr.s.ptr);
-
- zip_reg_write(que_sbuf_addr.u_reg64,
- (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
-
- zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q,
- zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
-
- zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
- zip->iq[q].sw_head, zip->iq[q].sw_tail,
- zip->iq[q].hw_tail);
- zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr);
- }
-
- /*
- * Queue-to-ZIP core mapping
- * If a queue is not mapped to a particular core, it is equivalent to
- * the ZIP core being disabled.
- */
- que_ena.u_reg64 = 0x0ull;
- /* Enabling queues based on ZIP_NUM_QUEUES */
- for (q = 0; q < ZIP_NUM_QUEUES; q++)
- que_ena.s.ena |= (0x1 << q);
- zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA));
-
- zip_msg("QUE_ENA : 0x%016llx",
- zip_reg_read(zip->reg_base + ZIP_QUE_ENA));
-
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- que_map.u_reg64 = 0ull;
- /* Mapping each queue to two ZIP cores */
- que_map.s.zce = 0x3;
- zip_reg_write(que_map.u_reg64,
- (zip->reg_base + ZIP_QUEX_MAP(q)));
-
- zip_msg("QUE_MAP(%d) : 0x%016llx", q,
- zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q)));
- }
-
- que_pri.u_reg64 = 0ull;
- for (q = 0; q < ZIP_NUM_QUEUES; q++)
- que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */
- zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI));
-
- zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI));
-
- return 0;
-}
-
-static void zip_reset(struct zip_device *zip)
-{
- union zip_cmd_ctl cmd_ctl;
-
- cmd_ctl.u_reg64 = 0x0ull;
- cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
- zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
-}
-
-static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct device *dev = &pdev->dev;
- struct zip_device *zip = NULL;
- int err;
-
- zip = zip_alloc_device(pdev);
- if (!zip)
- return -ENOMEM;
-
- dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index,
- pdev->vendor, pdev->device, dev_to_node(dev));
-
- pci_set_drvdata(pdev, zip);
- zip->pdev = pdev;
-
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(dev, "Failed to enable PCI device");
- goto err_free_device;
- }
-
- err = pci_request_regions(pdev, DRV_NAME);
- if (err) {
- dev_err(dev, "PCI request regions failed 0x%x", err);
- goto err_disable_device;
- }
-
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
- if (err) {
- dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
- goto err_release_regions;
- }
-
- /* MAP configuration registers */
- zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0);
- if (!zip->reg_base) {
- dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting");
- err = -ENOMEM;
- goto err_release_regions;
- }
-
- /* Initialize ZIP Hardware */
- err = zip_init_hw(zip);
- if (err)
- goto err_release_regions;
-
- /* Register with the Kernel Crypto Interface */
- err = zip_register_compression_device();
- if (err < 0) {
- zip_err("ZIP: Kernel Crypto Registration failed\n");
- goto err_register;
- }
-
- /* comp-decomp statistics are handled with debugfs interface */
- zip_debugfs_init();
-
- return 0;
-
-err_register:
- zip_reset(zip);
-
-err_release_regions:
- if (zip->reg_base)
- iounmap(zip->reg_base);
- pci_release_regions(pdev);
-
-err_disable_device:
- pci_disable_device(pdev);
-
-err_free_device:
- pci_set_drvdata(pdev, NULL);
-
- /* Remove zip_dev from zip_device list, free the zip_device memory */
- zip_dev[zip->index] = NULL;
- devm_kfree(dev, zip);
-
- return err;
-}
-
-static void zip_remove(struct pci_dev *pdev)
-{
- struct zip_device *zip = pci_get_drvdata(pdev);
- int q = 0;
-
- if (!zip)
- return;
-
- zip_debugfs_exit();
-
- zip_unregister_compression_device();
-
- if (zip->reg_base) {
- zip_reset(zip);
- iounmap(zip->reg_base);
- }
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-
- /*
- * Free Command Queue buffers. This free should be called for all
- * the enabled Queues.
- */
- for (q = 0; q < ZIP_NUM_QUEUES; q++)
- zip_cmd_qbuf_free(zip, q);
-
- pci_set_drvdata(pdev, NULL);
- /* remove zip device from zip device list */
- zip_dev[zip->index] = NULL;
-}
-
-/* PCI Sub-System Interface */
-static struct pci_driver zip_driver = {
- .name = DRV_NAME,
- .id_table = zip_id_table,
- .probe = zip_probe,
- .remove = zip_remove,
-};
-
-/* Kernel Crypto Subsystem Interface */
-
-static struct scomp_alg zip_scomp_deflate = {
- .alloc_ctx = zip_alloc_scomp_ctx_deflate,
- .free_ctx = zip_free_scomp_ctx,
- .compress = zip_scomp_compress,
- .decompress = zip_scomp_decompress,
- .base = {
- .cra_name = "deflate",
- .cra_driver_name = "deflate-scomp-cavium",
- .cra_module = THIS_MODULE,
- .cra_priority = 300,
- }
-};
-
-static struct scomp_alg zip_scomp_lzs = {
- .alloc_ctx = zip_alloc_scomp_ctx_lzs,
- .free_ctx = zip_free_scomp_ctx,
- .compress = zip_scomp_compress,
- .decompress = zip_scomp_decompress,
- .base = {
- .cra_name = "lzs",
- .cra_driver_name = "lzs-scomp-cavium",
- .cra_module = THIS_MODULE,
- .cra_priority = 300,
- }
-};
-
-static int zip_register_compression_device(void)
-{
- int ret;
-
- ret = crypto_register_scomp(&zip_scomp_deflate);
- if (ret < 0) {
- zip_err("Deflate scomp algorithm registration failed\n");
- return ret;
- }
-
- ret = crypto_register_scomp(&zip_scomp_lzs);
- if (ret < 0) {
- zip_err("LZS scomp algorithm registration failed\n");
- goto err_unregister_scomp_deflate;
- }
-
- return ret;
-
-err_unregister_scomp_deflate:
- crypto_unregister_scomp(&zip_scomp_deflate);
-
- return ret;
-}
-
-static void zip_unregister_compression_device(void)
-{
- crypto_unregister_scomp(&zip_scomp_deflate);
- crypto_unregister_scomp(&zip_scomp_lzs);
-}
-
-/*
- * debugfs functions
- */
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-
-/* Displays ZIP device statistics */
-static int zip_stats_show(struct seq_file *s, void *unused)
-{
- u64 val = 0ull;
- u64 avg_chunk = 0ull, avg_cr = 0ull;
- u32 q = 0;
-
- int index = 0;
- struct zip_device *zip;
- struct zip_stats *st;
-
- for (index = 0; index < MAX_ZIP_DEVICES; index++) {
- u64 pending = 0;
-
- if (zip_dev[index]) {
- zip = zip_dev[index];
- st = &zip->stats;
-
- /* Get all the pending requests */
- for (q = 0; q < ZIP_NUM_QUEUES; q++) {
- val = zip_reg_read((zip->reg_base +
- ZIP_DBG_QUEX_STA(q)));
- pending += val >> 32 & 0xffffff;
- }
-
- val = atomic64_read(&st->comp_req_complete);
- avg_chunk = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
-
- val = atomic64_read(&st->comp_out_bytes);
- avg_cr = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
- seq_printf(s, " ZIP Device %d Stats\n"
- "-----------------------------------\n"
- "Comp Req Submitted : \t%lld\n"
- "Comp Req Completed : \t%lld\n"
- "Compress In Bytes : \t%lld\n"
- "Compressed Out Bytes : \t%lld\n"
- "Average Chunk size : \t%llu\n"
- "Average Compression ratio : \t%llu\n"
- "Decomp Req Submitted : \t%lld\n"
- "Decomp Req Completed : \t%lld\n"
- "Decompress In Bytes : \t%lld\n"
- "Decompressed Out Bytes : \t%lld\n"
- "Decompress Bad requests : \t%lld\n"
- "Pending Req : \t%lld\n"
- "---------------------------------\n",
- index,
- (u64)atomic64_read(&st->comp_req_submit),
- (u64)atomic64_read(&st->comp_req_complete),
- (u64)atomic64_read(&st->comp_in_bytes),
- (u64)atomic64_read(&st->comp_out_bytes),
- avg_chunk,
- avg_cr,
- (u64)atomic64_read(&st->decomp_req_submit),
- (u64)atomic64_read(&st->decomp_req_complete),
- (u64)atomic64_read(&st->decomp_in_bytes),
- (u64)atomic64_read(&st->decomp_out_bytes),
- (u64)atomic64_read(&st->decomp_bad_reqs),
- pending);
- }
- }
- return 0;
-}
-
-/* Clears stats data */
-static int zip_clear_show(struct seq_file *s, void *unused)
-{
- int index = 0;
-
- for (index = 0; index < MAX_ZIP_DEVICES; index++) {
- if (zip_dev[index]) {
- memset(&zip_dev[index]->stats, 0,
- sizeof(struct zip_stats));
- seq_printf(s, "Cleared stats for zip %d\n", index);
- }
- }
-
- return 0;
-}
-
-static struct zip_registers zipregs[64] = {
- {"ZIP_CMD_CTL ", 0x0000ull},
- {"ZIP_THROTTLE ", 0x0010ull},
- {"ZIP_CONSTANTS ", 0x00A0ull},
- {"ZIP_QUE0_MAP ", 0x1400ull},
- {"ZIP_QUE1_MAP ", 0x1408ull},
- {"ZIP_QUE_ENA ", 0x0500ull},
- {"ZIP_QUE_PRI ", 0x0508ull},
- {"ZIP_QUE0_DONE ", 0x2000ull},
- {"ZIP_QUE1_DONE ", 0x2008ull},
- {"ZIP_QUE0_DOORBELL ", 0x4000ull},
- {"ZIP_QUE1_DOORBELL ", 0x4008ull},
- {"ZIP_QUE0_SBUF_ADDR ", 0x1000ull},
- {"ZIP_QUE1_SBUF_ADDR ", 0x1008ull},
- {"ZIP_QUE0_SBUF_CTL ", 0x1200ull},
- {"ZIP_QUE1_SBUF_CTL ", 0x1208ull},
- { NULL, 0}
-};
-
-/* Prints registers' contents */
-static int zip_regs_show(struct seq_file *s, void *unused)
-{
- u64 val = 0;
- int i = 0, index = 0;
-
- for (index = 0; index < MAX_ZIP_DEVICES; index++) {
- if (zip_dev[index]) {
- seq_printf(s, "--------------------------------\n"
- " ZIP Device %d Registers\n"
- "--------------------------------\n",
- index);
-
- i = 0;
-
- while (zipregs[i].reg_name) {
- val = zip_reg_read((zip_dev[index]->reg_base +
- zipregs[i].reg_offset));
- seq_printf(s, "%s: 0x%016llx\n",
- zipregs[i].reg_name, val);
- i++;
- }
- }
- }
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(zip_stats);
-DEFINE_SHOW_ATTRIBUTE(zip_clear);
-DEFINE_SHOW_ATTRIBUTE(zip_regs);
-
-/* Root directory for thunderx_zip debugfs entry */
-static struct dentry *zip_debugfs_root;
-
-static void zip_debugfs_init(void)
-{
- if (!debugfs_initialized())
- return;
-
- zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
-
- /* Creating files for entries inside thunderx_zip directory */
- debugfs_create_file("zip_stats", 0444, zip_debugfs_root, NULL,
- &zip_stats_fops);
-
- debugfs_create_file("zip_clear", 0444, zip_debugfs_root, NULL,
- &zip_clear_fops);
-
- debugfs_create_file("zip_regs", 0444, zip_debugfs_root, NULL,
- &zip_regs_fops);
-
-}
-
-static void zip_debugfs_exit(void)
-{
- debugfs_remove_recursive(zip_debugfs_root);
-}
-
-#else
-static void __init zip_debugfs_init(void) { }
-static void __exit zip_debugfs_exit(void) { }
-#endif
-/* debugfs - end */
-
-module_pci_driver(zip_driver);
-
-MODULE_AUTHOR("Cavium Inc");
-MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(pci, zip_id_table);
diff --git a/drivers/crypto/cavium/zip/zip_main.h b/drivers/crypto/cavium/zip/zip_main.h
deleted file mode 100644
index e1e4fa92ce80..000000000000
--- a/drivers/crypto/cavium/zip/zip_main.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_MAIN_H__
-#define __ZIP_MAIN_H__
-
-#include "zip_device.h"
-#include "zip_regs.h"
-
-/* PCI device IDs */
-#define PCI_DEVICE_ID_THUNDERX_ZIP 0xA01A
-
-/* ZIP device BARs */
-#define PCI_CFG_ZIP_PF_BAR0 0 /* Base addr for normal regs */
-
-/* Maximum available zip queues */
-#define ZIP_MAX_NUM_QUEUES 8
-
-#define ZIP_128B_ALIGN 7
-
-/* Command queue buffer size */
-#define ZIP_CMD_QBUF_SIZE (8064 + 8)
-
-struct zip_registers {
- char *reg_name;
- u64 reg_offset;
-};
-
-/* ZIP Compression - Decompression stats */
-struct zip_stats {
- atomic64_t comp_req_submit;
- atomic64_t comp_req_complete;
- atomic64_t decomp_req_submit;
- atomic64_t decomp_req_complete;
- atomic64_t comp_in_bytes;
- atomic64_t comp_out_bytes;
- atomic64_t decomp_in_bytes;
- atomic64_t decomp_out_bytes;
- atomic64_t decomp_bad_reqs;
-};
-
-/* ZIP Instruction Queue */
-struct zip_iq {
- u64 *sw_head;
- u64 *sw_tail;
- u64 *hw_tail;
- u64 done_cnt;
- u64 pend_cnt;
- u64 free_flag;
-
- /* ZIP IQ lock */
- spinlock_t lock;
-};
-
-/* ZIP Device */
-struct zip_device {
- u32 index;
- void __iomem *reg_base;
- struct pci_dev *pdev;
-
- /* Different ZIP Constants */
- u64 depth;
- u64 onfsize;
- u64 ctxsize;
-
- struct zip_iq iq[ZIP_MAX_NUM_QUEUES];
- struct zip_stats stats;
-};
-
-/* Prototypes */
-struct zip_device *zip_get_device(int node_id);
-int zip_get_node_id(void);
-void zip_reg_write(u64 val, u64 __iomem *addr);
-u64 zip_reg_read(u64 __iomem *addr);
-void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue);
-u32 zip_load_instr(union zip_inst_s *instr, struct zip_device *zip_dev);
-
-#endif /* ZIP_MAIN_H */
diff --git a/drivers/crypto/cavium/zip/zip_mem.c b/drivers/crypto/cavium/zip/zip_mem.c
deleted file mode 100644
index b3e0843a9169..000000000000
--- a/drivers/crypto/cavium/zip/zip_mem.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-
-#include "common.h"
-
-/**
- * zip_cmd_qbuf_alloc - Allocates a cmd buffer for ZIP Instruction Queue
- * @zip: Pointer to zip device structure
- * @q: Queue number to allocate bufffer to
- * Return: 0 if successful, -ENOMEM otherwise
- */
-int zip_cmd_qbuf_alloc(struct zip_device *zip, int q)
-{
- zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
- get_order(ZIP_CMD_QBUF_SIZE));
-
- if (!zip->iq[q].sw_head)
- return -ENOMEM;
-
- memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE);
-
- zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head);
- return 0;
-}
-
-/**
- * zip_cmd_qbuf_free - Frees the cmd Queue buffer
- * @zip: Pointer to zip device structure
- * @q: Queue number to free buffer of
- */
-void zip_cmd_qbuf_free(struct zip_device *zip, int q)
-{
- zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail);
-
- free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE));
-}
-
-/**
- * zip_data_buf_alloc - Allocates memory for a data bufffer
- * @size: Size of the buffer to allocate
- * Returns: Pointer to the buffer allocated
- */
-u8 *zip_data_buf_alloc(u64 size)
-{
- u8 *ptr;
-
- ptr = (u8 *)__get_free_pages((GFP_KERNEL | GFP_DMA),
- get_order(size));
-
- if (!ptr)
- return NULL;
-
- memset(ptr, 0, size);
-
- zip_dbg("Data buffer allocation success\n");
- return ptr;
-}
-
-/**
- * zip_data_buf_free - Frees the memory of a data buffer
- * @ptr: Pointer to the buffer
- * @size: Buffer size
- */
-void zip_data_buf_free(u8 *ptr, u64 size)
-{
- zip_dbg("Freeing data buffer 0x%lx\n", ptr);
-
- free_pages((u64)ptr, get_order(size));
-}
diff --git a/drivers/crypto/cavium/zip/zip_mem.h b/drivers/crypto/cavium/zip/zip_mem.h
deleted file mode 100644
index f8f2f08c4a5c..000000000000
--- a/drivers/crypto/cavium/zip/zip_mem.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_MEM_H__
-#define __ZIP_MEM_H__
-
-/**
- * zip_cmd_qbuf_free - Frees the cmd Queue buffer
- * @zip: Pointer to zip device structure
- * @q: Queue nmber to free buffer of
- */
-void zip_cmd_qbuf_free(struct zip_device *zip, int q);
-
-/**
- * zip_cmd_qbuf_alloc - Allocates a Chunk/cmd buffer for ZIP Inst(cmd) Queue
- * @zip: Pointer to zip device structure
- * @q: Queue number to allocate bufffer to
- * Return: 0 if successful, 1 otherwise
- */
-int zip_cmd_qbuf_alloc(struct zip_device *zip, int q);
-
-/**
- * zip_data_buf_alloc - Allocates memory for a data bufffer
- * @size: Size of the buffer to allocate
- * Returns: Pointer to the buffer allocated
- */
-u8 *zip_data_buf_alloc(u64 size);
-
-/**
- * zip_data_buf_free - Frees the memory of a data buffer
- * @ptr: Pointer to the buffer
- * @size: Buffer size
- */
-void zip_data_buf_free(u8 *ptr, u64 size);
-
-#endif
diff --git a/drivers/crypto/cavium/zip/zip_regs.h b/drivers/crypto/cavium/zip/zip_regs.h
deleted file mode 100644
index 874e0236c87e..000000000000
--- a/drivers/crypto/cavium/zip/zip_regs.h
+++ /dev/null
@@ -1,1347 +0,0 @@
-/***********************license start************************************
- * Copyright (c) 2003-2017 Cavium, Inc.
- * All rights reserved.
- *
- * License: one of 'Cavium License' or 'GNU General Public License Version 2'
- *
- * This file is provided under the terms of the Cavium License (see below)
- * or under the terms of GNU General Public License, Version 2, as
- * published by the Free Software Foundation. When using or redistributing
- * this file, you may do so under either license.
- *
- * Cavium License: Redistribution and use in source and binary forms, with
- * or without modification, are permitted provided that the following
- * conditions are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- *
- * * Neither the name of Cavium Inc. nor the names of its contributors may be
- * used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * This Software, including technical data, may be subject to U.S. export
- * control laws, including the U.S. Export Administration Act and its
- * associated regulations, and may be subject to export or import
- * regulations in other countries.
- *
- * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
- * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
- * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
- * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
- * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
- * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
- * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
- * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
- * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
- * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
- * WITH YOU.
- ***********************license end**************************************/
-
-#ifndef __ZIP_REGS_H__
-#define __ZIP_REGS_H__
-
-/*
- * Configuration and status register (CSR) address and type definitions for
- * Cavium ZIP.
- */
-
-#include <linux/kern_levels.h>
-
-/* ZIP invocation result completion status codes */
-#define ZIP_CMD_NOTDONE 0x0
-
-/* Successful completion. */
-#define ZIP_CMD_SUCCESS 0x1
-
-/* Output truncated */
-#define ZIP_CMD_DTRUNC 0x2
-
-/* Dynamic Stop */
-#define ZIP_CMD_DYNAMIC_STOP 0x3
-
-/* Uncompress ran out of input data when IWORD0[EF] was set */
-#define ZIP_CMD_ITRUNC 0x4
-
-/* Uncompress found the reserved block type 3 */
-#define ZIP_CMD_RBLOCK 0x5
-
-/*
- * Uncompress found LEN != ZIP_CMD_NLEN in an uncompressed block in the input.
- */
-#define ZIP_CMD_NLEN 0x6
-
-/* Uncompress found a bad code in the main Huffman codes. */
-#define ZIP_CMD_BADCODE 0x7
-
-/* Uncompress found a bad code in the 19 Huffman codes encoding lengths. */
-#define ZIP_CMD_BADCODE2 0x8
-
-/* Compress found a zero-length input. */
-#define ZIP_CMD_ZERO_LEN 0x9
-
-/* The compress or decompress encountered an internal parity error. */
-#define ZIP_CMD_PARITY 0xA
-
-/*
- * Uncompress found a string identifier that precedes the uncompressed data and
- * decompression history.
- */
-#define ZIP_CMD_FATAL 0xB
-
-/**
- * enum zip_int_vec_e - ZIP MSI-X Vector Enumeration, enumerates the MSI-X
- * interrupt vectors.
- */
-enum zip_int_vec_e {
- ZIP_INT_VEC_E_ECCE = 0x10,
- ZIP_INT_VEC_E_FIFE = 0x11,
- ZIP_INT_VEC_E_QUE0_DONE = 0x0,
- ZIP_INT_VEC_E_QUE0_ERR = 0x8,
- ZIP_INT_VEC_E_QUE1_DONE = 0x1,
- ZIP_INT_VEC_E_QUE1_ERR = 0x9,
- ZIP_INT_VEC_E_QUE2_DONE = 0x2,
- ZIP_INT_VEC_E_QUE2_ERR = 0xa,
- ZIP_INT_VEC_E_QUE3_DONE = 0x3,
- ZIP_INT_VEC_E_QUE3_ERR = 0xb,
- ZIP_INT_VEC_E_QUE4_DONE = 0x4,
- ZIP_INT_VEC_E_QUE4_ERR = 0xc,
- ZIP_INT_VEC_E_QUE5_DONE = 0x5,
- ZIP_INT_VEC_E_QUE5_ERR = 0xd,
- ZIP_INT_VEC_E_QUE6_DONE = 0x6,
- ZIP_INT_VEC_E_QUE6_ERR = 0xe,
- ZIP_INT_VEC_E_QUE7_DONE = 0x7,
- ZIP_INT_VEC_E_QUE7_ERR = 0xf,
- ZIP_INT_VEC_E_ENUM_LAST = 0x12,
-};
-
-/**
- * union zip_zptr_addr_s - ZIP Generic Pointer Structure for ADDR.
- *
- * It is the generic format of pointers in ZIP_INST_S.
- */
-union zip_zptr_addr_s {
- u64 u_reg64;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 addr : 49;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-
-};
-
-/**
- * union zip_zptr_ctl_s - ZIP Generic Pointer Structure for CTL.
- *
- * It is the generic format of pointers in ZIP_INST_S.
- */
-union zip_zptr_ctl_s {
- u64 u_reg64;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_112_127 : 16;
- u64 length : 16;
- u64 reserved_67_95 : 29;
- u64 fw : 1;
- u64 nc : 1;
- u64 data_be : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 data_be : 1;
- u64 nc : 1;
- u64 fw : 1;
- u64 reserved_67_95 : 29;
- u64 length : 16;
- u64 reserved_112_127 : 16;
-#endif
- } s;
-};
-
-/**
- * union zip_inst_s - ZIP Instruction Structure.
- * Each ZIP instruction has 16 words (they are called IWORD0 to IWORD15 within
- * the structure).
- */
-union zip_inst_s {
- u64 u_reg64[16];
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 doneint : 1;
- u64 reserved_56_62 : 7;
- u64 totaloutputlength : 24;
- u64 reserved_27_31 : 5;
- u64 exn : 3;
- u64 reserved_23_23 : 1;
- u64 exbits : 7;
- u64 reserved_12_15 : 4;
- u64 sf : 1;
- u64 ss : 2;
- u64 cc : 2;
- u64 ef : 1;
- u64 bf : 1;
- u64 ce : 1;
- u64 reserved_3_3 : 1;
- u64 ds : 1;
- u64 dg : 1;
- u64 hg : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 hg : 1;
- u64 dg : 1;
- u64 ds : 1;
- u64 reserved_3_3 : 1;
- u64 ce : 1;
- u64 bf : 1;
- u64 ef : 1;
- u64 cc : 2;
- u64 ss : 2;
- u64 sf : 1;
- u64 reserved_12_15 : 4;
- u64 exbits : 7;
- u64 reserved_23_23 : 1;
- u64 exn : 3;
- u64 reserved_27_31 : 5;
- u64 totaloutputlength : 24;
- u64 reserved_56_62 : 7;
- u64 doneint : 1;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 historylength : 16;
- u64 reserved_96_111 : 16;
- u64 adlercrc32 : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 adlercrc32 : 32;
- u64 reserved_96_111 : 16;
- u64 historylength : 16;
-#endif
- union zip_zptr_addr_s ctx_ptr_addr;
- union zip_zptr_ctl_s ctx_ptr_ctl;
- union zip_zptr_addr_s his_ptr_addr;
- union zip_zptr_ctl_s his_ptr_ctl;
- union zip_zptr_addr_s inp_ptr_addr;
- union zip_zptr_ctl_s inp_ptr_ctl;
- union zip_zptr_addr_s out_ptr_addr;
- union zip_zptr_ctl_s out_ptr_ctl;
- union zip_zptr_addr_s res_ptr_addr;
- union zip_zptr_ctl_s res_ptr_ctl;
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_817_831 : 15;
- u64 wq_ptr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 wq_ptr : 49;
- u64 reserved_817_831 : 15;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_882_895 : 14;
- u64 tt : 2;
- u64 reserved_874_879 : 6;
- u64 grp : 10;
- u64 tag : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 tag : 32;
- u64 grp : 10;
- u64 reserved_874_879 : 6;
- u64 tt : 2;
- u64 reserved_882_895 : 14;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_896_959 : 64;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reserved_896_959 : 64;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_960_1023 : 64;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reserved_960_1023 : 64;
-#endif
- } s;
-};
-
-/**
- * union zip_nptr_s - ZIP Instruction Next-Chunk-Buffer Pointer (NPTR)
- * Structure
- *
- * ZIP_NPTR structure is used to chain all the zip instruction buffers
- * together. ZIP instruction buffers are managed (allocated and released) by
- * the software.
- */
-union zip_nptr_s {
- u64 u_reg64;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 addr : 49;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-};
-
-/**
- * union zip_zptr_s - ZIP Generic Pointer Structure.
- *
- * It is the generic format of pointers in ZIP_INST_S.
- */
-union zip_zptr_s {
- u64 u_reg64[2];
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 49;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 addr : 49;
- u64 reserved_49_63 : 15;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_112_127 : 16;
- u64 length : 16;
- u64 reserved_67_95 : 29;
- u64 fw : 1;
- u64 nc : 1;
- u64 data_be : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 data_be : 1;
- u64 nc : 1;
- u64 fw : 1;
- u64 reserved_67_95 : 29;
- u64 length : 16;
- u64 reserved_112_127 : 16;
-#endif
- } s;
-};
-
-/**
- * union zip_zres_s - ZIP Result Structure
- *
- * The ZIP coprocessor writes the result structure after it completes the
- * invocation. The result structure is exactly 24 bytes, and each invocation of
- * the ZIP coprocessor produces exactly one result structure.
- */
-union zip_zres_s {
- u64 u_reg64[3];
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 crc32 : 32;
- u64 adler32 : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 adler32 : 32;
- u64 crc32 : 32;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 totalbyteswritten : 32;
- u64 totalbytesread : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 totalbytesread : 32;
- u64 totalbyteswritten : 32;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 totalbitsprocessed : 32;
- u64 doneint : 1;
- u64 reserved_155_158 : 4;
- u64 exn : 3;
- u64 reserved_151_151 : 1;
- u64 exbits : 7;
- u64 reserved_137_143 : 7;
- u64 ef : 1;
-
- volatile u64 compcode : 8;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-
- volatile u64 compcode : 8;
- u64 ef : 1;
- u64 reserved_137_143 : 7;
- u64 exbits : 7;
- u64 reserved_151_151 : 1;
- u64 exn : 3;
- u64 reserved_155_158 : 4;
- u64 doneint : 1;
- u64 totalbitsprocessed : 32;
-#endif
- } s;
-};
-
-/**
- * union zip_cmd_ctl - Structure representing the register that controls
- * clock and reset.
- */
-union zip_cmd_ctl {
- u64 u_reg64;
- struct zip_cmd_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_2_63 : 62;
- u64 forceclk : 1;
- u64 reset : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reset : 1;
- u64 forceclk : 1;
- u64 reserved_2_63 : 62;
-#endif
- } s;
-};
-
-#define ZIP_CMD_CTL 0x0ull
-
-/**
- * union zip_constants - Data structure representing the register that contains
- * all of the current implementation-related parameters of the zip core in this
- * chip.
- */
-union zip_constants {
- u64 u_reg64;
- struct zip_constants_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 nexec : 8;
- u64 reserved_49_55 : 7;
- u64 syncflush_capable : 1;
- u64 depth : 16;
- u64 onfsize : 12;
- u64 ctxsize : 12;
- u64 reserved_1_7 : 7;
- u64 disabled : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 disabled : 1;
- u64 reserved_1_7 : 7;
- u64 ctxsize : 12;
- u64 onfsize : 12;
- u64 depth : 16;
- u64 syncflush_capable : 1;
- u64 reserved_49_55 : 7;
- u64 nexec : 8;
-#endif
- } s;
-};
-
-#define ZIP_CONSTANTS 0x00A0ull
-
-/**
- * union zip_corex_bist_status - Represents registers which have the BIST
- * status of memories in zip cores.
- *
- * Each bit is the BIST result of an individual memory
- * (per bit, 0 = pass and 1 = fail).
- */
-union zip_corex_bist_status {
- u64 u_reg64;
- struct zip_corex_bist_status_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_53_63 : 11;
- u64 bstatus : 53;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 bstatus : 53;
- u64 reserved_53_63 : 11;
-#endif
- } s;
-};
-
-static inline u64 ZIP_COREX_BIST_STATUS(u64 param1)
-{
- if (param1 <= 1)
- return 0x0520ull + (param1 & 1) * 0x8ull;
- pr_err("ZIP_COREX_BIST_STATUS: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_ctl_bist_status - Represents register that has the BIST status of
- * memories in ZIP_CTL (instruction buffer, G/S pointer FIFO, input data
- * buffer, output data buffers).
- *
- * Each bit is the BIST result of an individual memory
- * (per bit, 0 = pass and 1 = fail).
- */
-union zip_ctl_bist_status {
- u64 u_reg64;
- struct zip_ctl_bist_status_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_9_63 : 55;
- u64 bstatus : 9;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 bstatus : 9;
- u64 reserved_9_63 : 55;
-#endif
- } s;
-};
-
-#define ZIP_CTL_BIST_STATUS 0x0510ull
-
-/**
- * union zip_ctl_cfg - Represents the register that controls the behavior of
- * the ZIP DMA engines.
- *
- * It is recommended to keep default values for normal operation. Changing the
- * values of the fields may be useful for diagnostics.
- */
-union zip_ctl_cfg {
- u64 u_reg64;
- struct zip_ctl_cfg_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_52_63 : 12;
- u64 ildf : 4;
- u64 reserved_36_47 : 12;
- u64 drtf : 4;
- u64 reserved_27_31 : 5;
- u64 stcf : 3;
- u64 reserved_19_23 : 5;
- u64 ldf : 3;
- u64 reserved_2_15 : 14;
- u64 busy : 1;
- u64 reserved_0_0 : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 reserved_0_0 : 1;
- u64 busy : 1;
- u64 reserved_2_15 : 14;
- u64 ldf : 3;
- u64 reserved_19_23 : 5;
- u64 stcf : 3;
- u64 reserved_27_31 : 5;
- u64 drtf : 4;
- u64 reserved_36_47 : 12;
- u64 ildf : 4;
- u64 reserved_52_63 : 12;
-#endif
- } s;
-};
-
-#define ZIP_CTL_CFG 0x0560ull
-
-/**
- * union zip_dbg_corex_inst - Represents the registers that reflect the status
- * of the current instruction that the ZIP core is executing or has executed.
- *
- * These registers are only for debug use.
- */
-union zip_dbg_corex_inst {
- u64 u_reg64;
- struct zip_dbg_corex_inst_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 busy : 1;
- u64 reserved_35_62 : 28;
- u64 qid : 3;
- u64 iid : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 iid : 32;
- u64 qid : 3;
- u64 reserved_35_62 : 28;
- u64 busy : 1;
-#endif
- } s;
-};
-
-static inline u64 ZIP_DBG_COREX_INST(u64 param1)
-{
- if (param1 <= 1)
- return 0x0640ull + (param1 & 1) * 0x8ull;
- pr_err("ZIP_DBG_COREX_INST: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_dbg_corex_sta - Represents registers that reflect the status of
- * the zip cores.
- *
- * They are for debug use only.
- */
-union zip_dbg_corex_sta {
- u64 u_reg64;
- struct zip_dbg_corex_sta_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 busy : 1;
- u64 reserved_37_62 : 26;
- u64 ist : 5;
- u64 nie : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 nie : 32;
- u64 ist : 5;
- u64 reserved_37_62 : 26;
- u64 busy : 1;
-#endif
- } s;
-};
-
-static inline u64 ZIP_DBG_COREX_STA(u64 param1)
-{
- if (param1 <= 1)
- return 0x0680ull + (param1 & 1) * 0x8ull;
- pr_err("ZIP_DBG_COREX_STA: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_dbg_quex_sta - Represets registers that reflect status of the zip
- * instruction queues.
- *
- * They are for debug use only.
- */
-union zip_dbg_quex_sta {
- u64 u_reg64;
- struct zip_dbg_quex_sta_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 busy : 1;
- u64 reserved_56_62 : 7;
- u64 rqwc : 24;
- u64 nii : 32;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 nii : 32;
- u64 rqwc : 24;
- u64 reserved_56_62 : 7;
- u64 busy : 1;
-#endif
- } s;
-};
-
-static inline u64 ZIP_DBG_QUEX_STA(u64 param1)
-{
- if (param1 <= 7)
- return 0x1800ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_DBG_QUEX_STA: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_ecc_ctl - Represents the register that enables ECC for each
- * individual internal memory that requires ECC.
- *
- * For debug purpose, it can also flip one or two bits in the ECC data.
- */
-union zip_ecc_ctl {
- u64 u_reg64;
- struct zip_ecc_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_19_63 : 45;
- u64 vmem_cdis : 1;
- u64 vmem_fs : 2;
- u64 reserved_15_15 : 1;
- u64 idf1_cdis : 1;
- u64 idf1_fs : 2;
- u64 reserved_11_11 : 1;
- u64 idf0_cdis : 1;
- u64 idf0_fs : 2;
- u64 reserved_7_7 : 1;
- u64 gspf_cdis : 1;
- u64 gspf_fs : 2;
- u64 reserved_3_3 : 1;
- u64 iqf_cdis : 1;
- u64 iqf_fs : 2;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 iqf_fs : 2;
- u64 iqf_cdis : 1;
- u64 reserved_3_3 : 1;
- u64 gspf_fs : 2;
- u64 gspf_cdis : 1;
- u64 reserved_7_7 : 1;
- u64 idf0_fs : 2;
- u64 idf0_cdis : 1;
- u64 reserved_11_11 : 1;
- u64 idf1_fs : 2;
- u64 idf1_cdis : 1;
- u64 reserved_15_15 : 1;
- u64 vmem_fs : 2;
- u64 vmem_cdis : 1;
- u64 reserved_19_63 : 45;
-#endif
- } s;
-};
-
-#define ZIP_ECC_CTL 0x0568ull
-
-/* NCB - zip_ecce_ena_w1c */
-union zip_ecce_ena_w1c {
- u64 u_reg64;
- struct zip_ecce_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_ENA_W1C 0x0598ull
-
-/* NCB - zip_ecce_ena_w1s */
-union zip_ecce_ena_w1s {
- u64 u_reg64;
- struct zip_ecce_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_ENA_W1S 0x0590ull
-
-/**
- * union zip_ecce_int - Represents the register that contains the status of the
- * ECC interrupt sources.
- */
-union zip_ecce_int {
- u64 u_reg64;
- struct zip_ecce_int_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_INT 0x0580ull
-
-/* NCB - zip_ecce_int_w1s */
-union zip_ecce_int_w1s {
- u64 u_reg64;
- struct zip_ecce_int_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_37_63 : 27;
- u64 dbe : 5;
- u64 reserved_5_31 : 27;
- u64 sbe : 5;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 sbe : 5;
- u64 reserved_5_31 : 27;
- u64 dbe : 5;
- u64 reserved_37_63 : 27;
-#endif
- } s;
-};
-
-#define ZIP_ECCE_INT_W1S 0x0588ull
-
-/* NCB - zip_fife_ena_w1c */
-union zip_fife_ena_w1c {
- u64 u_reg64;
- struct zip_fife_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_ENA_W1C 0x0090ull
-
-/* NCB - zip_fife_ena_w1s */
-union zip_fife_ena_w1s {
- u64 u_reg64;
- struct zip_fife_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_ENA_W1S 0x0088ull
-
-/* NCB - zip_fife_int */
-union zip_fife_int {
- u64 u_reg64;
- struct zip_fife_int_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_INT 0x0078ull
-
-/* NCB - zip_fife_int_w1s */
-union zip_fife_int_w1s {
- u64 u_reg64;
- struct zip_fife_int_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_42_63 : 22;
- u64 asserts : 42;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 asserts : 42;
- u64 reserved_42_63 : 22;
-#endif
- } s;
-};
-
-#define ZIP_FIFE_INT_W1S 0x0080ull
-
-/**
- * union zip_msix_pbax - Represents the register that is the MSI-X PBA table
- *
- * The bit number is indexed by the ZIP_INT_VEC_E enumeration.
- */
-union zip_msix_pbax {
- u64 u_reg64;
- struct zip_msix_pbax_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 pend : 64;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 pend : 64;
-#endif
- } s;
-};
-
-static inline u64 ZIP_MSIX_PBAX(u64 param1)
-{
- if (param1 == 0)
- return 0x0000838000FF0000ull;
- pr_err("ZIP_MSIX_PBAX: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_msix_vecx_addr - Represents the register that is the MSI-X vector
- * table, indexed by the ZIP_INT_VEC_E enumeration.
- */
-union zip_msix_vecx_addr {
- u64 u_reg64;
- struct zip_msix_vecx_addr_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 addr : 47;
- u64 reserved_1_1 : 1;
- u64 secvec : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 secvec : 1;
- u64 reserved_1_1 : 1;
- u64 addr : 47;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-};
-
-static inline u64 ZIP_MSIX_VECX_ADDR(u64 param1)
-{
- if (param1 <= 17)
- return 0x0000838000F00000ull + (param1 & 31) * 0x10ull;
- pr_err("ZIP_MSIX_VECX_ADDR: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_msix_vecx_ctl - Represents the register that is the MSI-X vector
- * table, indexed by the ZIP_INT_VEC_E enumeration.
- */
-union zip_msix_vecx_ctl {
- u64 u_reg64;
- struct zip_msix_vecx_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_33_63 : 31;
- u64 mask : 1;
- u64 reserved_20_31 : 12;
- u64 data : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 data : 20;
- u64 reserved_20_31 : 12;
- u64 mask : 1;
- u64 reserved_33_63 : 31;
-#endif
- } s;
-};
-
-static inline u64 ZIP_MSIX_VECX_CTL(u64 param1)
-{
- if (param1 <= 17)
- return 0x0000838000F00008ull + (param1 & 31) * 0x10ull;
- pr_err("ZIP_MSIX_VECX_CTL: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done - Represents the registers that contain the per-queue
- * instruction done count.
- */
-union zip_quex_done {
- u64 u_reg64;
- struct zip_quex_done_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_20_63 : 44;
- u64 done : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done : 20;
- u64 reserved_20_63 : 44;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE(u64 param1)
-{
- if (param1 <= 7)
- return 0x2000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_ack - Represents the registers on write to which will
- * decrement the per-queue instructiona done count.
- */
-union zip_quex_done_ack {
- u64 u_reg64;
- struct zip_quex_done_ack_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_20_63 : 44;
- u64 done_ack : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done_ack : 20;
- u64 reserved_20_63 : 44;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_ACK(u64 param1)
-{
- if (param1 <= 7)
- return 0x2200ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_ACK: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_ena_w1c - Represents the register which when written
- * 1 to will disable the DONEINT interrupt for the queue.
- */
-union zip_quex_done_ena_w1c {
- u64 u_reg64;
- struct zip_quex_done_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_1_63 : 63;
- u64 done_ena : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done_ena : 1;
- u64 reserved_1_63 : 63;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_ENA_W1C(u64 param1)
-{
- if (param1 <= 7)
- return 0x2600ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_ENA_W1C: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_ena_w1s - Represents the register that when written 1 to
- * will enable the DONEINT interrupt for the queue.
- */
-union zip_quex_done_ena_w1s {
- u64 u_reg64;
- struct zip_quex_done_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_1_63 : 63;
- u64 done_ena : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 done_ena : 1;
- u64 reserved_1_63 : 63;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_ENA_W1S(u64 param1)
-{
- if (param1 <= 7)
- return 0x2400ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_ENA_W1S: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_done_wait - Represents the register that specifies the per
- * queue interrupt coalescing settings.
- */
-union zip_quex_done_wait {
- u64 u_reg64;
- struct zip_quex_done_wait_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_48_63 : 16;
- u64 time_wait : 16;
- u64 reserved_20_31 : 12;
- u64 num_wait : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 num_wait : 20;
- u64 reserved_20_31 : 12;
- u64 time_wait : 16;
- u64 reserved_48_63 : 16;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DONE_WAIT(u64 param1)
-{
- if (param1 <= 7)
- return 0x2800ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DONE_WAIT: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_doorbell - Represents doorbell registers for the ZIP
- * instruction queues.
- */
-union zip_quex_doorbell {
- u64 u_reg64;
- struct zip_quex_doorbell_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_20_63 : 44;
- u64 dbell_cnt : 20;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dbell_cnt : 20;
- u64 reserved_20_63 : 44;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_DOORBELL(u64 param1)
-{
- if (param1 <= 7)
- return 0x4000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_DOORBELL: %llu\n", param1);
- return 0;
-}
-
-union zip_quex_err_ena_w1c {
- u64 u_reg64;
- struct zip_quex_err_ena_w1c_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_ENA_W1C(u64 param1)
-{
- if (param1 <= 7)
- return 0x3600ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_ENA_W1C: %llu\n", param1);
- return 0;
-}
-
-union zip_quex_err_ena_w1s {
- u64 u_reg64;
- struct zip_quex_err_ena_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_ENA_W1S(u64 param1)
-{
- if (param1 <= 7)
- return 0x3400ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_ENA_W1S: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_err_int - Represents registers that contain the per-queue
- * error interrupts.
- */
-union zip_quex_err_int {
- u64 u_reg64;
- struct zip_quex_err_int_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_INT(u64 param1)
-{
- if (param1 <= 7)
- return 0x3000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_INT: %llu\n", param1);
- return 0;
-}
-
-/* NCB - zip_que#_err_int_w1s */
-union zip_quex_err_int_w1s {
- u64 u_reg64;
- struct zip_quex_err_int_w1s_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_5_63 : 59;
- u64 mdbe : 1;
- u64 nwrp : 1;
- u64 nrrp : 1;
- u64 irde : 1;
- u64 dovf : 1;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 dovf : 1;
- u64 irde : 1;
- u64 nrrp : 1;
- u64 nwrp : 1;
- u64 mdbe : 1;
- u64 reserved_5_63 : 59;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_ERR_INT_W1S(u64 param1)
-{
- if (param1 <= 7)
- return 0x3200ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_ERR_INT_W1S: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_gcfg - Represents the registers that reflect status of the
- * zip instruction queues,debug use only.
- */
-union zip_quex_gcfg {
- u64 u_reg64;
- struct zip_quex_gcfg_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_4_63 : 60;
- u64 iqb_ldwb : 1;
- u64 cbw_sty : 1;
- u64 l2ld_cmd : 2;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 l2ld_cmd : 2;
- u64 cbw_sty : 1;
- u64 iqb_ldwb : 1;
- u64 reserved_4_63 : 60;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_GCFG(u64 param1)
-{
- if (param1 <= 7)
- return 0x1A00ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_GCFG: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_map - Represents the registers that control how each
- * instruction queue maps to zip cores.
- */
-union zip_quex_map {
- u64 u_reg64;
- struct zip_quex_map_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_2_63 : 62;
- u64 zce : 2;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 zce : 2;
- u64 reserved_2_63 : 62;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_MAP(u64 param1)
-{
- if (param1 <= 7)
- return 0x1400ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_MAP: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_sbuf_addr - Represents the registers that set the buffer
- * parameters for the instruction queues.
- *
- * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
- * this register to effectively reset the command buffer state machine.
- * These registers must be programmed after SW programs the corresponding
- * ZIP_QUE(0..7)_SBUF_CTL.
- */
-union zip_quex_sbuf_addr {
- u64 u_reg64;
- struct zip_quex_sbuf_addr_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_49_63 : 15;
- u64 ptr : 42;
- u64 off : 7;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 off : 7;
- u64 ptr : 42;
- u64 reserved_49_63 : 15;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_SBUF_ADDR(u64 param1)
-{
- if (param1 <= 7)
- return 0x1000ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_SBUF_ADDR: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_quex_sbuf_ctl - Represents the registers that set the buffer
- * parameters for the instruction queues.
- *
- * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite
- * this register to effectively reset the command buffer state machine.
- * These registers must be programmed before SW programs the corresponding
- * ZIP_QUE(0..7)_SBUF_ADDR.
- */
-union zip_quex_sbuf_ctl {
- u64 u_reg64;
- struct zip_quex_sbuf_ctl_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_45_63 : 19;
- u64 size : 13;
- u64 inst_be : 1;
- u64 reserved_24_30 : 7;
- u64 stream_id : 8;
- u64 reserved_12_15 : 4;
- u64 aura : 12;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 aura : 12;
- u64 reserved_12_15 : 4;
- u64 stream_id : 8;
- u64 reserved_24_30 : 7;
- u64 inst_be : 1;
- u64 size : 13;
- u64 reserved_45_63 : 19;
-#endif
- } s;
-};
-
-static inline u64 ZIP_QUEX_SBUF_CTL(u64 param1)
-{
- if (param1 <= 7)
- return 0x1200ull + (param1 & 7) * 0x8ull;
- pr_err("ZIP_QUEX_SBUF_CTL: %llu\n", param1);
- return 0;
-}
-
-/**
- * union zip_que_ena - Represents queue enable register
- *
- * If a queue is disabled, ZIP_CTL stops fetching instructions from the queue.
- */
-union zip_que_ena {
- u64 u_reg64;
- struct zip_que_ena_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_8_63 : 56;
- u64 ena : 8;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 ena : 8;
- u64 reserved_8_63 : 56;
-#endif
- } s;
-};
-
-#define ZIP_QUE_ENA 0x0500ull
-
-/**
- * union zip_que_pri - Represents the register that defines the priority
- * between instruction queues.
- */
-union zip_que_pri {
- u64 u_reg64;
- struct zip_que_pri_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_8_63 : 56;
- u64 pri : 8;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 pri : 8;
- u64 reserved_8_63 : 56;
-#endif
- } s;
-};
-
-#define ZIP_QUE_PRI 0x0508ull
-
-/**
- * union zip_throttle - Represents the register that controls the maximum
- * number of in-flight X2I data fetch transactions.
- *
- * Writing 0 to this register causes the ZIP module to temporarily suspend NCB
- * accesses; it is not recommended for normal operation, but may be useful for
- * diagnostics.
- */
-union zip_throttle {
- u64 u_reg64;
- struct zip_throttle_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_6_63 : 58;
- u64 ld_infl : 6;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u64 ld_infl : 6;
- u64 reserved_6_63 : 58;
-#endif
- } s;
-};
-
-#define ZIP_THROTTLE 0x0010ull
-
-#endif /* _CSRS_ZIP__ */
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index d11daaf47f06..685d42ec7ade 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -7,15 +7,16 @@
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index afae30adb703..91b1189c47de 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -7,14 +7,15 @@
* Author: Gary R Hook <ghook@amd.com>
*/
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/des.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index ecd58b38c46e..bc90aba5162a 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -7,14 +7,17 @@
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/ccp.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/ccp.h>
+#include <linux/module.h>
#include <linux/scatterlist.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/akcipher.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index cb8e99936abb..109b5aef4034 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -8,13 +8,14 @@
* Author: Gary R Hook <gary.hook@amd.com>
*/
-#include <linux/dma-mapping.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <crypto/scatterwalk.h>
#include <crypto/des.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/utils.h>
#include <linux/ccp.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include "ccp-dev.h"
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 2e87ca0e292a..3451bada884e 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -33,6 +33,7 @@
#include <asm/cacheflush.h>
#include <asm/e820/types.h>
#include <asm/sev.h>
+#include <asm/msr.h>
#include "psp-dev.h"
#include "sev-dev.h"
@@ -109,6 +110,15 @@ static void *sev_init_ex_buffer;
*/
static struct sev_data_range_list *snp_range_list;
+static void __sev_firmware_shutdown(struct sev_device *sev, bool panic);
+
+static int snp_shutdown_on_panic(struct notifier_block *nb,
+ unsigned long reason, void *arg);
+
+static struct notifier_block snp_panic_notifier = {
+ .notifier_call = snp_shutdown_on_panic,
+};
+
static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
{
struct sev_device *sev = psp_master->sev_data;
@@ -1060,7 +1070,7 @@ static inline int __sev_do_init_locked(int *psp_ret)
static void snp_set_hsave_pa(void *arg)
{
- wrmsrl(MSR_VM_HSAVE_PA, 0);
+ wrmsrq(MSR_VM_HSAVE_PA, 0);
}
static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
@@ -1112,7 +1122,7 @@ static int __sev_snp_init_locked(int *error)
if (!sev_version_greater_or_equal(SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR)) {
dev_dbg(sev->dev, "SEV-SNP support requires firmware version >= %d:%d\n",
SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR);
- return 0;
+ return -EOPNOTSUPP;
}
/* SNP_INIT requires MSR_VM_HSAVE_PA to be cleared on all CPUs. */
@@ -1176,21 +1186,34 @@ static int __sev_snp_init_locked(int *error)
wbinvd_on_all_cpus();
rc = __sev_do_cmd_locked(cmd, arg, error);
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV-SNP: %s failed rc %d, error %#x\n",
+ cmd == SEV_CMD_SNP_INIT_EX ? "SNP_INIT_EX" : "SNP_INIT",
+ rc, *error);
return rc;
+ }
/* Prepare for first SNP guest launch after INIT. */
wbinvd_on_all_cpus();
rc = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, error);
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV-SNP: SNP_DF_FLUSH failed rc %d, error %#x\n",
+ rc, *error);
return rc;
+ }
sev->snp_initialized = true;
dev_dbg(sev->dev, "SEV-SNP firmware initialized\n");
+ dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major,
+ sev->api_minor, sev->build);
+
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &snp_panic_notifier);
+
sev_es_tmr_size = SNP_TMR_SIZE;
- return rc;
+ return 0;
}
static void __sev_platform_init_handle_tmr(struct sev_device *sev)
@@ -1287,16 +1310,22 @@ static int __sev_platform_init_locked(int *error)
if (error)
*error = psp_ret;
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV: %s failed %#x, rc %d\n",
+ sev_init_ex_buffer ? "INIT_EX" : "INIT", psp_ret, rc);
return rc;
+ }
sev->state = SEV_STATE_INIT;
/* Prepare for first SEV guest launch after INIT */
wbinvd_on_all_cpus();
rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV: DF_FLUSH failed %#x, rc %d\n",
+ *error, rc);
return rc;
+ }
dev_dbg(sev->dev, "SEV firmware initialized\n");
@@ -1319,19 +1348,9 @@ static int _sev_platform_init_locked(struct sev_platform_init_args *args)
if (sev->state == SEV_STATE_INIT)
return 0;
- /*
- * Legacy guests cannot be running while SNP_INIT(_EX) is executing,
- * so perform SEV-SNP initialization at probe time.
- */
rc = __sev_snp_init_locked(&args->error);
- if (rc && rc != -ENODEV) {
- /*
- * Don't abort the probe if SNP INIT failed,
- * continue to initialize the legacy SEV firmware.
- */
- dev_err(sev->dev, "SEV-SNP: failed to INIT rc %d, error %#x\n",
- rc, args->error);
- }
+ if (rc && rc != -ENODEV)
+ return rc;
/* Defer legacy SEV/SEV-ES support if allowed by caller/module. */
if (args->probe && !psp_init_on_probe)
@@ -1367,8 +1386,11 @@ static int __sev_platform_shutdown_locked(int *error)
return 0;
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
- if (ret)
+ if (ret) {
+ dev_err(sev->dev, "SEV: failed to SHUTDOWN error %#x, rc %d\n",
+ *error, ret);
return ret;
+ }
sev->state = SEV_STATE_UNINIT;
dev_dbg(sev->dev, "SEV firmware shutdown\n");
@@ -1389,6 +1411,37 @@ static int sev_get_platform_state(int *state, int *error)
return rc;
}
+static int sev_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required)
+{
+ struct sev_platform_init_args init_args = {0};
+ int rc;
+
+ rc = _sev_platform_init_locked(&init_args);
+ if (rc) {
+ argp->error = SEV_RET_INVALID_PLATFORM_STATE;
+ return rc;
+ }
+
+ *shutdown_required = true;
+
+ return 0;
+}
+
+static int snp_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required)
+{
+ int error, rc;
+
+ rc = __sev_snp_init_locked(&error);
+ if (rc) {
+ argp->error = SEV_RET_INVALID_PLATFORM_STATE;
+ return rc;
+ }
+
+ *shutdown_required = true;
+
+ return 0;
+}
+
static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable)
{
int state, rc;
@@ -1441,24 +1494,31 @@ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
+ bool shutdown_required = false;
int rc;
if (!writable)
return -EPERM;
if (sev->state == SEV_STATE_UNINIT) {
- rc = __sev_platform_init_locked(&argp->error);
+ rc = sev_move_to_init_state(argp, &shutdown_required);
if (rc)
return rc;
}
- return __sev_do_cmd_locked(cmd, NULL, &argp->error);
+ rc = __sev_do_cmd_locked(cmd, NULL, &argp->error);
+
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
+ return rc;
}
static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_csr input;
+ bool shutdown_required = false;
struct sev_data_pek_csr data;
void __user *input_address;
void *blob = NULL;
@@ -1490,7 +1550,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
cmd:
if (sev->state == SEV_STATE_UNINIT) {
- ret = __sev_platform_init_locked(&argp->error);
+ ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_blob;
}
@@ -1511,6 +1571,9 @@ cmd:
}
e_free_blob:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(blob);
return ret;
}
@@ -1682,9 +1745,12 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, error);
/* SHUTDOWN may require DF_FLUSH */
if (*error == SEV_RET_DFFLUSH_REQUIRED) {
- ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, NULL);
+ int dfflush_error = SEV_RET_NO_FW_CALL;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, &dfflush_error);
if (ret) {
- dev_err(sev->dev, "SEV-SNP DF_FLUSH failed\n");
+ dev_err(sev->dev, "SEV-SNP DF_FLUSH failed, ret = %d, error = %#x\n",
+ ret, dfflush_error);
return ret;
}
/* reissue the shutdown command */
@@ -1692,7 +1758,8 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
error);
}
if (ret) {
- dev_err(sev->dev, "SEV-SNP firmware shutdown failed\n");
+ dev_err(sev->dev, "SEV-SNP firmware shutdown failed, rc %d, error %#x\n",
+ ret, *error);
return ret;
}
@@ -1718,6 +1785,12 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
sev->snp_initialized = false;
dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &snp_panic_notifier);
+
+ /* Reset TMR size back to default */
+ sev_es_tmr_size = SEV_TMR_SIZE;
+
return ret;
}
@@ -1726,6 +1799,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_cert_import input;
struct sev_data_pek_cert_import data;
+ bool shutdown_required = false;
void *pek_blob, *oca_blob;
int ret;
@@ -1756,7 +1830,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
/* If platform is not in INIT state then transition it to INIT */
if (sev->state != SEV_STATE_INIT) {
- ret = __sev_platform_init_locked(&argp->error);
+ ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_oca;
}
@@ -1764,6 +1838,9 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error);
e_free_oca:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(oca_blob);
e_free_pek:
kfree(pek_blob);
@@ -1880,32 +1957,23 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
struct sev_data_pdh_cert_export data;
void __user *input_cert_chain_address;
void __user *input_pdh_cert_address;
+ bool shutdown_required = false;
int ret;
- /* If platform is not in INIT state then transition it to INIT. */
- if (sev->state != SEV_STATE_INIT) {
- if (!writable)
- return -EPERM;
-
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- return ret;
- }
-
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
memset(&data, 0, sizeof(data));
+ input_pdh_cert_address = (void __user *)input.pdh_cert_address;
+ input_cert_chain_address = (void __user *)input.cert_chain_address;
+
/* Userspace wants to query the certificate length. */
if (!input.pdh_cert_address ||
!input.pdh_cert_len ||
!input.cert_chain_address)
goto cmd;
- input_pdh_cert_address = (void __user *)input.pdh_cert_address;
- input_cert_chain_address = (void __user *)input.cert_chain_address;
-
/* Allocate a physically contiguous buffer to store the PDH blob. */
if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE)
return -EFAULT;
@@ -1931,6 +1999,17 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
data.cert_chain_len = input.cert_chain_len;
cmd:
+ /* If platform is not in INIT state then transition it to INIT. */
+ if (sev->state != SEV_STATE_INIT) {
+ if (!writable) {
+ ret = -EPERM;
+ goto e_free_cert;
+ }
+ ret = sev_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto e_free_cert;
+ }
+
ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error);
/* If we query the length, FW responded with expected data. */
@@ -1957,6 +2036,9 @@ cmd:
}
e_free_cert:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(cert_blob);
e_free_pdh:
kfree(pdh_blob);
@@ -1966,12 +2048,13 @@ e_free_pdh:
static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
{
struct sev_device *sev = psp_master->sev_data;
+ bool shutdown_required = false;
struct sev_data_snp_addr buf;
struct page *status_page;
+ int ret, error;
void *data;
- int ret;
- if (!sev->snp_initialized || !argp->data)
+ if (!argp->data)
return -EINVAL;
status_page = alloc_page(GFP_KERNEL_ACCOUNT);
@@ -1980,6 +2063,12 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
data = page_address(status_page);
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto cleanup;
+ }
+
/*
* Firmware expects status page to be in firmware-owned state, otherwise
* it will report firmware error code INVALID_PAGE_STATE (0x1A).
@@ -2008,6 +2097,9 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
ret = -EFAULT;
cleanup:
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
__free_pages(status_page, 0);
return ret;
}
@@ -2016,21 +2108,33 @@ static int sev_ioctl_do_snp_commit(struct sev_issue_cmd *argp)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_data_snp_commit buf;
+ bool shutdown_required = false;
+ int ret, error;
- if (!sev->snp_initialized)
- return -EINVAL;
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ return ret;
+ }
buf.len = sizeof(buf);
- return __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error);
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error);
+
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+ return ret;
}
static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_snp_config config;
+ bool shutdown_required = false;
+ int ret, error;
- if (!sev->snp_initialized || !argp->data)
+ if (!argp->data)
return -EINVAL;
if (!writable)
@@ -2039,17 +2143,29 @@ static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable
if (copy_from_user(&config, (void __user *)argp->data, sizeof(config)))
return -EFAULT;
- return __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error);
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ return ret;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error);
+
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+ return ret;
}
static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_snp_vlek_load input;
+ bool shutdown_required = false;
+ int ret, error;
void *blob;
- int ret;
- if (!sev->snp_initialized || !argp->data)
+ if (!argp->data)
return -EINVAL;
if (!writable)
@@ -2068,8 +2184,18 @@ static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable)
input.vlek_wrapped_address = __psp_pa(blob);
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto cleanup;
+ }
+
ret = __sev_do_cmd_locked(SEV_CMD_SNP_VLEK_LOAD, &input, &argp->error);
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+cleanup:
kfree(blob);
return ret;
@@ -2339,6 +2465,15 @@ static void sev_firmware_shutdown(struct sev_device *sev)
mutex_unlock(&sev_cmd_mutex);
}
+void sev_platform_shutdown(void)
+{
+ if (!psp_master || !psp_master->sev_data)
+ return;
+
+ sev_firmware_shutdown(psp_master->sev_data);
+}
+EXPORT_SYMBOL_GPL(sev_platform_shutdown);
+
void sev_dev_destroy(struct psp_device *psp)
{
struct sev_device *sev = psp->sev_data;
@@ -2373,10 +2508,6 @@ static int snp_shutdown_on_panic(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static struct notifier_block snp_panic_notifier = {
- .notifier_call = snp_shutdown_on_panic,
-};
-
int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
void *data, int *error)
{
@@ -2390,9 +2521,7 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
void sev_pci_init(void)
{
struct sev_device *sev = psp_master->sev_data;
- struct sev_platform_init_args args = {0};
u8 api_major, api_minor, build;
- int rc;
if (!sev)
return;
@@ -2415,18 +2544,6 @@ void sev_pci_init(void)
api_major, api_minor, build,
sev->api_major, sev->api_minor, sev->build);
- /* Initialize the platform */
- args.probe = true;
- rc = sev_platform_init(&args);
- if (rc)
- dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n",
- args.error, rc);
-
- dev_info(sev->dev, "SEV%s API:%d.%d build:%d\n", sev->snp_initialized ?
- "-SNP" : "", sev->api_major, sev->api_minor, sev->build);
-
- atomic_notifier_chain_register(&panic_notifier_list,
- &snp_panic_notifier);
return;
err:
@@ -2443,7 +2560,4 @@ void sev_pci_exit(void)
return;
sev_firmware_shutdown(sev);
-
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &snp_panic_notifier);
}
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 2ebc878da160..e1be2072d680 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -375,6 +375,7 @@ static const struct tee_vdata teev1 = {
static const struct tee_vdata teev2 = {
.ring_wptr_reg = 0x10950, /* C2PMSG_20 */
.ring_rptr_reg = 0x10954, /* C2PMSG_21 */
+ .info_reg = 0x109e8, /* C2PMSG_58 */
};
static const struct platform_access_vdata pa_v1 = {
@@ -440,6 +441,7 @@ static const struct psp_vdata pspv5 = {
.cmdresp_reg = 0x10944, /* C2PMSG_17 */
.cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
.cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10510, /* P2CMSG_INTEN */
.intsts_reg = 0x10514, /* P2CMSG_INTSTS */
@@ -535,6 +537,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
+ { PCI_VDEVICE(AMD, 0x17D8), (kernel_ulong_t)&dev_vdata[8] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index d3f5d108b898..7c41f9593d03 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -862,7 +862,7 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d
return -EINVAL;
}
- algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN, GFP_KERNEL);
if (!algs)
return -ENOMEM;
@@ -5224,7 +5224,7 @@ static int qm_pre_store_caps(struct hisi_qm *qm)
size_t i, size;
size = ARRAY_SIZE(qm_cap_query_info);
- qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL);
+ qm_cap = devm_kcalloc(&pdev->dev, sizeof(*qm_cap), size, GFP_KERNEL);
if (!qm_cap)
return -ENOMEM;
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 1dc2378aa88b..e050f5ff5efb 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -491,8 +491,9 @@ static int img_hash_init(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -555,10 +556,10 @@ static int img_hash_update(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -570,9 +571,10 @@ static int img_hash_final(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -584,11 +586,12 @@ static int img_hash_finup(struct ahash_request *req)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
+
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -600,8 +603,9 @@ static int img_hash_import(struct ahash_request *req, const void *in)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -613,8 +617,9 @@ static int img_hash_export(struct ahash_request *req, void *out)
struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
- rctx->fallback_req.base.flags = req->base.flags
- & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.c b/drivers/crypto/inside-secure/eip93/eip93-hash.c
index df1b05ac5a57..ac13d90a2b7c 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-hash.c
+++ b/drivers/crypto/inside-secure/eip93/eip93-hash.c
@@ -97,12 +97,20 @@ void eip93_hash_handle_result(struct crypto_async_request *async, int err)
static void eip93_hash_init_sa_state_digest(u32 hash, u8 *digest)
{
- u32 sha256_init[] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 };
- u32 sha224_init[] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
- SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 };
- u32 sha1_init[] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
- u32 md5_init[] = { MD5_H0, MD5_H1, MD5_H2, MD5_H3 };
+ static const u32 sha256_init[] = {
+ SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+ SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+ };
+ static const u32 sha224_init[] = {
+ SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
+ SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7
+ };
+ static const u32 sha1_init[] = {
+ SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4
+ };
+ static const u32 md5_init[] = {
+ MD5_H0, MD5_H1, MD5_H2, MD5_H3
+ };
/* Init HASH constant */
switch (hash) {
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index f44c08f5f5ec..d2b632193beb 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -2043,7 +2043,7 @@ struct safexcel_alg_template safexcel_alg_cbcmac = {
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
+ .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
.cra_init = safexcel_ahash_cra_init,
.cra_exit = safexcel_ahash_cra_exit,
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 09d9589f2d68..23f585219fb4 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -725,7 +725,7 @@ static int alloc_wq_table(int max_wqs)
for (cpu = 0; cpu < nr_cpus; cpu++) {
entry = per_cpu_ptr(wq_table, cpu);
- entry->wqs = kcalloc(max_wqs, sizeof(struct wq *), GFP_KERNEL);
+ entry->wqs = kcalloc(max_wqs, sizeof(*entry->wqs), GFP_KERNEL);
if (!entry->wqs) {
free_wq_table();
return -ENOMEM;
@@ -894,7 +894,7 @@ out:
static void rebalance_wq_table(void)
{
const struct cpumask *node_cpus;
- int node, cpu, iaa = -1;
+ int node_cpu, node, cpu, iaa = 0;
if (nr_iaa == 0)
return;
@@ -905,36 +905,29 @@ static void rebalance_wq_table(void)
clear_wq_table();
if (nr_iaa == 1) {
- for (cpu = 0; cpu < nr_cpus; cpu++) {
- if (WARN_ON(wq_table_add_wqs(0, cpu))) {
- pr_debug("could not add any wqs for iaa 0 to cpu %d!\n", cpu);
- return;
- }
+ for_each_possible_cpu(cpu) {
+ if (WARN_ON(wq_table_add_wqs(0, cpu)))
+ goto err;
}
return;
}
for_each_node_with_cpus(node) {
+ cpu = 0;
node_cpus = cpumask_of_node(node);
- for (cpu = 0; cpu < cpumask_weight(node_cpus); cpu++) {
- int node_cpu = cpumask_nth(cpu, node_cpus);
-
- if (WARN_ON(node_cpu >= nr_cpu_ids)) {
- pr_debug("node_cpu %d doesn't exist!\n", node_cpu);
- return;
- }
-
- if ((cpu % cpus_per_iaa) == 0)
- iaa++;
-
- if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) {
- pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu);
- return;
- }
+ for_each_cpu(node_cpu, node_cpus) {
+ iaa = cpu / cpus_per_iaa;
+ if (WARN_ON(wq_table_add_wqs(iaa, node_cpu)))
+ goto err;
+ cpu++;
}
}
+
+ return;
+err:
+ pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu);
}
static inline int check_completion(struct device *dev,
@@ -999,12 +992,9 @@ out:
static int deflate_generic_decompress(struct acomp_req *req)
{
- ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req));
+ ACOMP_FBREQ_ON_STACK(fbreq, req);
int ret;
- acomp_request_set_callback(fbreq, 0, NULL, NULL);
- acomp_request_set_params(fbreq, req->src, req->dst, req->slen,
- req->dlen);
ret = crypto_acomp_decompress(fbreq);
req->dlen = fbreq->dlen;
@@ -1020,8 +1010,7 @@ static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- u32 compression_crc);
+ dma_addr_t dst_addr, unsigned int *dlen);
static void iaa_desc_complete(struct idxd_desc *idxd_desc,
enum idxd_complete_type comp_type,
@@ -1087,10 +1076,10 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc,
}
if (ctx->compress && compression_ctx->verify_compress) {
+ u32 *compression_crc = acomp_request_ctx(ctx->req);
dma_addr_t src_addr, dst_addr;
- u32 compression_crc;
- compression_crc = idxd_desc->iax_completion->crc;
+ *compression_crc = idxd_desc->iax_completion->crc;
ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr);
if (ret) {
@@ -1100,8 +1089,7 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc,
}
ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr,
- ctx->req->slen, dst_addr, &ctx->req->dlen,
- compression_crc);
+ ctx->req->slen, dst_addr, &ctx->req->dlen);
if (ret) {
dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret);
err = -EIO;
@@ -1130,11 +1118,11 @@ out:
static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- u32 *compression_crc)
+ dma_addr_t dst_addr, unsigned int *dlen)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *compression_crc = acomp_request_ctx(req);
struct iaa_device *iaa_device;
struct idxd_desc *idxd_desc;
struct iax_hw_desc *desc;
@@ -1187,8 +1175,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
" src_addr %llx, dst_addr %llx\n", __func__,
active_compression_mode->name,
src_addr, dst_addr);
- } else if (ctx->async_mode)
- req->base.data = idxd_desc;
+ }
dev_dbg(dev, "%s: compression mode %s,"
" desc->src1_addr %llx, desc->src1_size %d,"
@@ -1282,11 +1269,11 @@ out:
static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- u32 compression_crc)
+ dma_addr_t dst_addr, unsigned int *dlen)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 *compression_crc = acomp_request_ctx(req);
struct iaa_device *iaa_device;
struct idxd_desc *idxd_desc;
struct iax_hw_desc *desc;
@@ -1346,10 +1333,10 @@ static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
goto err;
}
- if (compression_crc != idxd_desc->iax_completion->crc) {
+ if (*compression_crc != idxd_desc->iax_completion->crc) {
ret = -EINVAL;
dev_dbg(dev, "(verify) iaa comp/decomp crc mismatch:"
- " comp=0x%x, decomp=0x%x\n", compression_crc,
+ " comp=0x%x, decomp=0x%x\n", *compression_crc,
idxd_desc->iax_completion->crc);
print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET,
8, 1, idxd_desc->iax_completion, 64, 0);
@@ -1369,8 +1356,7 @@ err:
static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
- dma_addr_t dst_addr, unsigned int *dlen,
- bool disable_async)
+ dma_addr_t dst_addr, unsigned int *dlen)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1412,7 +1398,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
desc->src1_size = slen;
desc->completion_addr = idxd_desc->compl_dma;
- if (ctx->use_irq && !disable_async) {
+ if (ctx->use_irq) {
desc->flags |= IDXD_OP_FLAG_RCI;
idxd_desc->crypto.req = req;
@@ -1425,8 +1411,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
" src_addr %llx, dst_addr %llx\n", __func__,
active_compression_mode->name,
src_addr, dst_addr);
- } else if (ctx->async_mode && !disable_async)
- req->base.data = idxd_desc;
+ }
dev_dbg(dev, "%s: decompression mode %s,"
" desc->src1_addr %llx, desc->src1_size %d,"
@@ -1446,7 +1431,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
update_total_decomp_calls();
update_wq_decomp_calls(wq);
- if (ctx->async_mode && !disable_async) {
+ if (ctx->async_mode) {
ret = -EINPROGRESS;
dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
goto out;
@@ -1474,7 +1459,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
*dlen = req->dlen;
- if (!ctx->async_mode || disable_async)
+ if (!ctx->async_mode)
idxd_free_desc(wq, idxd_desc);
/* Update stats */
@@ -1496,7 +1481,6 @@ static int iaa_comp_acompress(struct acomp_req *req)
dma_addr_t src_addr, dst_addr;
int nr_sgs, cpu, ret = 0;
struct iaa_wq *iaa_wq;
- u32 compression_crc;
struct idxd_wq *wq;
struct device *dev;
@@ -1557,7 +1541,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
req->dst, req->dlen, sg_dma_len(req->dst));
ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
- &req->dlen, &compression_crc);
+ &req->dlen);
if (ret == -EINPROGRESS)
return ret;
@@ -1569,7 +1553,7 @@ static int iaa_comp_acompress(struct acomp_req *req)
}
ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen,
- dst_addr, &req->dlen, compression_crc);
+ dst_addr, &req->dlen);
if (ret)
dev_dbg(dev, "asynchronous compress verification failed ret=%d\n", ret);
@@ -1655,7 +1639,7 @@ static int iaa_comp_adecompress(struct acomp_req *req)
req->dst, req->dlen, sg_dma_len(req->dst));
ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
- dst_addr, &req->dlen, false);
+ dst_addr, &req->dlen);
if (ret == -EINPROGRESS)
return ret;
@@ -1699,6 +1683,7 @@ static struct acomp_alg iaa_acomp_fixed_deflate = {
.cra_driver_name = "deflate-iaa",
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_ctxsize = sizeof(struct iaa_compression_ctx),
+ .cra_reqsize = sizeof(u32),
.cra_module = THIS_MODULE,
.cra_priority = IAA_ALG_PRIORITY,
}
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index 02fb8abe4e6e..359c61f0c8a1 100644
--- a/drivers/crypto/intel/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -70,6 +70,18 @@ config CRYPTO_DEV_QAT_420XX
To compile this as a module, choose M here: the module
will be called qat_420xx.
+config CRYPTO_DEV_QAT_6XXX
+ tristate "Support for Intel(R) QuickAssist Technology QAT_6XXX"
+ depends on (X86 || COMPILE_TEST)
+ depends on PCI
+ select CRYPTO_DEV_QAT
+ help
+ Support for Intel(R) QuickAssist Technology QAT_6xxx
+ for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_6xxx.
+
config CRYPTO_DEV_QAT_DH895xCCVF
tristate "Support for Intel(R) DH895xCC Virtual Function"
depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
diff --git a/drivers/crypto/intel/qat/Makefile b/drivers/crypto/intel/qat/Makefile
index 235b69f4f3f7..abef14207afa 100644
--- a/drivers/crypto/intel/qat/Makefile
+++ b/drivers/crypto/intel/qat/Makefile
@@ -1,10 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
+subdir-ccflags-y := -I$(src)/qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/
obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/
obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx/
+obj-$(CONFIG_CRYPTO_DEV_QAT_6XXX) += qat_6xxx/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/
diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile
index 72b24b1804cf..f6df54d2993e 100644
--- a/drivers/crypto/intel/qat/qat_420xx/Makefile
+++ b/drivers/crypto/intel/qat/qat_420xx/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o
qat_420xx-y := adf_drv.o adf_420xx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 4feeef83f7a3..7c3c0f561c95 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -9,15 +9,14 @@
#include <adf_common_drv.h>
#include <adf_fw_config.h>
#include <adf_gen4_config.h>
-#include <adf_gen4_dc.h>
#include <adf_gen4_hw_csr_data.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include <adf_gen4_ras.h>
-#include <adf_gen4_timer.h>
#include <adf_gen4_tl.h>
#include <adf_gen4_vf_mig.h>
+#include <adf_timer.h>
#include "adf_420xx_hw_data.h"
#include "icp_qat_hw.h"
@@ -93,7 +92,6 @@ static const struct adf_fw_config adf_fw_dcc_config[] = {
static struct adf_hw_device_class adf_420xx_class = {
.name = ADF_420XX_DEVICE_NAME,
.type = DEV_420XX,
- .instances = 0,
};
static u32 get_ae_mask(struct adf_hw_device_data *self)
@@ -469,8 +467,8 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
- hw_data->start_timer = adf_gen4_timer_start;
- hw_data->stop_timer = adf_gen4_timer_stop;
+ hw_data->start_timer = adf_timer_start;
+ hw_data->stop_timer = adf_timer_stop;
hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_420XX_AE_FREQ;
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
index 8084aa0f7f41..cfa00daeb4fb 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
@@ -14,7 +14,7 @@
#include "adf_420xx_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, ADF_420XX_PCI_DEVICE_ID), },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_420XX) },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -186,11 +186,19 @@ static void adf_remove(struct pci_dev *pdev)
adf_cleanup_accel(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_420XX_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
+ .shutdown = adf_shutdown,
.sriov_configure = adf_sriov_configure,
.err_handler = &adf_err_handler,
};
diff --git a/drivers/crypto/intel/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile
index e8480bb80dee..188b611445e6 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_4xxx/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o
qat_4xxx-y := adf_drv.o adf_4xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 4eb6ef99efdd..bd0b1b1015c0 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -9,15 +9,14 @@
#include <adf_common_drv.h>
#include <adf_fw_config.h>
#include <adf_gen4_config.h>
-#include <adf_gen4_dc.h>
#include <adf_gen4_hw_csr_data.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include "adf_gen4_ras.h"
-#include <adf_gen4_timer.h>
#include <adf_gen4_tl.h>
#include <adf_gen4_vf_mig.h>
+#include <adf_timer.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -96,7 +95,6 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config));
static struct adf_hw_device_class adf_4xxx_class = {
.name = ADF_4XXX_DEVICE_NAME,
.type = DEV_4XXX,
- .instances = 0,
};
static u32 get_ae_mask(struct adf_hw_device_data *self)
@@ -422,13 +420,13 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
hw_data->num_rps = ADF_GEN4_MAX_RPS;
switch (dev_id) {
- case ADF_402XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_402XX:
hw_data->fw_name = ADF_402XX_FW;
hw_data->fw_mmp_name = ADF_402XX_MMP;
hw_data->uof_get_name = uof_get_name_402xx;
hw_data->get_ena_thd_mask = get_ena_thd_mask;
break;
- case ADF_401XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_401XX:
hw_data->fw_name = ADF_4XXX_FW;
hw_data->fw_mmp_name = ADF_4XXX_MMP;
hw_data->uof_get_name = uof_get_name_4xxx;
@@ -455,8 +453,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
- hw_data->start_timer = adf_gen4_timer_start;
- hw_data->stop_timer = adf_gen4_timer_stop;
+ hw_data->start_timer = adf_timer_start;
+ hw_data->stop_timer = adf_timer_stop;
hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index 5537a9991e4e..c9be5dcddb27 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -14,9 +14,9 @@
#include "adf_4xxx_hw_data.h"
static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
- { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), },
- { PCI_VDEVICE(INTEL, ADF_402XX_PCI_DEVICE_ID), },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_4XXX) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_401XX) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_402XX) },
{ }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -188,11 +188,19 @@ static void adf_remove(struct pci_dev *pdev)
adf_cleanup_accel(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_4XXX_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
+ .shutdown = adf_shutdown,
.sriov_configure = adf_sriov_configure,
.err_handler = &adf_err_handler,
};
diff --git a/drivers/crypto/intel/qat/qat_6xxx/Makefile b/drivers/crypto/intel/qat/qat_6xxx/Makefile
new file mode 100644
index 000000000000..4b4de67cb0c2
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_6xxx/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_QAT_6XXX) += qat_6xxx.o
+qat_6xxx-y := adf_drv.o adf_6xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
new file mode 100644
index 000000000000..359a6447ccb8
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include <adf_accel_devices.h>
+#include <adf_admin.h>
+#include <adf_cfg.h>
+#include <adf_cfg_services.h>
+#include <adf_clock.h>
+#include <adf_common_drv.h>
+#include <adf_fw_config.h>
+#include <adf_gen6_pm.h>
+#include <adf_gen6_ras.h>
+#include <adf_gen6_shared.h>
+#include <adf_timer.h>
+#include "adf_6xxx_hw_data.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_hw_51_comp.h"
+
+#define RP_GROUP_0_MASK (BIT(0) | BIT(2))
+#define RP_GROUP_1_MASK (BIT(1) | BIT(3))
+#define RP_GROUP_ALL_MASK (RP_GROUP_0_MASK | RP_GROUP_1_MASK)
+
+#define ADF_AE_GROUP_0 GENMASK(3, 0)
+#define ADF_AE_GROUP_1 GENMASK(7, 4)
+#define ADF_AE_GROUP_2 BIT(8)
+
+struct adf_ring_config {
+ u32 ring_mask;
+ enum adf_cfg_service_type ring_type;
+ const unsigned long *thrd_mask;
+};
+
+static u32 rmask_two_services[] = {
+ RP_GROUP_0_MASK,
+ RP_GROUP_1_MASK,
+};
+
+enum adf_gen6_rps {
+ RP0 = 0,
+ RP1 = 1,
+ RP2 = 2,
+ RP3 = 3,
+ RP_MAX = RP3
+};
+
+/*
+ * thrd_mask_[sym|asym|cpr|dcc]: these static arrays define the thread
+ * configuration for handling requests of specific services across the
+ * accelerator engines. Each element in an array corresponds to an
+ * accelerator engine, with the value being a bitmask that specifies which
+ * threads within that engine are capable of processing the particular service.
+ *
+ * For example, a value of 0x0C means that threads 2 and 3 are enabled for the
+ * service in the respective accelerator engine.
+ */
+static const unsigned long thrd_mask_sym[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x0C, 0x0C, 0x0C, 0x0C, 0x1C, 0x1C, 0x1C, 0x1C, 0x00
+};
+
+static const unsigned long thrd_mask_asym[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x70, 0x70, 0x70, 0x70, 0x60, 0x60, 0x60, 0x60, 0x00
+};
+
+static const unsigned long thrd_mask_cpr[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00
+};
+
+static const unsigned long thrd_mask_dcc[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0x07, 0x03, 0x03, 0x00
+};
+
+static const char *const adf_6xxx_fw_objs[] = {
+ [ADF_FW_CY_OBJ] = ADF_6XXX_CY_OBJ,
+ [ADF_FW_DC_OBJ] = ADF_6XXX_DC_OBJ,
+ [ADF_FW_ADMIN_OBJ] = ADF_6XXX_ADMIN_OBJ,
+};
+
+static const struct adf_fw_config adf_default_fw_config[] = {
+ { ADF_AE_GROUP_1, ADF_FW_DC_OBJ },
+ { ADF_AE_GROUP_0, ADF_FW_CY_OBJ },
+ { ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ },
+};
+
+static struct adf_hw_device_class adf_6xxx_class = {
+ .name = ADF_6XXX_DEVICE_NAME,
+ .type = DEV_6XXX,
+};
+
+static bool services_supported(unsigned long mask)
+{
+ int num_svc;
+
+ if (mask >= BIT(SVC_BASE_COUNT))
+ return false;
+
+ num_svc = hweight_long(mask);
+ switch (num_svc) {
+ case ADF_ONE_SERVICE:
+ return true;
+ case ADF_TWO_SERVICES:
+ case ADF_THREE_SERVICES:
+ return !test_bit(SVC_DCC, &mask);
+ default:
+ return false;
+ }
+}
+
+static int get_service(unsigned long *mask)
+{
+ if (test_and_clear_bit(SVC_ASYM, mask))
+ return SVC_ASYM;
+
+ if (test_and_clear_bit(SVC_SYM, mask))
+ return SVC_SYM;
+
+ if (test_and_clear_bit(SVC_DC, mask))
+ return SVC_DC;
+
+ if (test_and_clear_bit(SVC_DCC, mask))
+ return SVC_DCC;
+
+ return -EINVAL;
+}
+
+static enum adf_cfg_service_type get_ring_type(enum adf_services service)
+{
+ switch (service) {
+ case SVC_SYM:
+ return SYM;
+ case SVC_ASYM:
+ return ASYM;
+ case SVC_DC:
+ case SVC_DCC:
+ return COMP;
+ default:
+ return UNUSED;
+ }
+}
+
+static const unsigned long *get_thrd_mask(enum adf_services service)
+{
+ switch (service) {
+ case SVC_SYM:
+ return thrd_mask_sym;
+ case SVC_ASYM:
+ return thrd_mask_asym;
+ case SVC_DC:
+ return thrd_mask_cpr;
+ case SVC_DCC:
+ return thrd_mask_dcc;
+ default:
+ return NULL;
+ }
+}
+
+static int get_rp_config(struct adf_accel_dev *accel_dev, struct adf_ring_config *rp_config,
+ unsigned int *num_services)
+{
+ unsigned int i, nservices;
+ unsigned long mask;
+ int ret, service;
+
+ ret = adf_get_service_mask(accel_dev, &mask);
+ if (ret)
+ return ret;
+
+ nservices = hweight_long(mask);
+ if (nservices > MAX_NUM_CONCURR_SVC)
+ return -EINVAL;
+
+ for (i = 0; i < nservices; i++) {
+ service = get_service(&mask);
+ if (service < 0)
+ return service;
+
+ rp_config[i].ring_type = get_ring_type(service);
+ rp_config[i].thrd_mask = get_thrd_mask(service);
+
+ /*
+ * If there is only one service enabled, use all ring pairs for
+ * that service.
+ * If there are two services enabled, use ring pairs 0 and 2 for
+ * one service and ring pairs 1 and 3 for the other service.
+ */
+ switch (nservices) {
+ case ADF_ONE_SERVICE:
+ rp_config[i].ring_mask = RP_GROUP_ALL_MASK;
+ break;
+ case ADF_TWO_SERVICES:
+ rp_config[i].ring_mask = rmask_two_services[i];
+ break;
+ case ADF_THREE_SERVICES:
+ rp_config[i].ring_mask = BIT(i);
+
+ /* If ASYM is enabled, use additional ring pair */
+ if (service == SVC_ASYM)
+ rp_config[i].ring_mask |= BIT(RP3);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ *num_services = nservices;
+
+ return 0;
+}
+
+static u32 adf_gen6_get_arb_mask(struct adf_accel_dev *accel_dev, unsigned int ae)
+{
+ struct adf_ring_config rp_config[MAX_NUM_CONCURR_SVC];
+ unsigned int num_services, i, thrd;
+ u32 ring_mask, thd2arb_mask = 0;
+ const unsigned long *p_mask;
+
+ if (get_rp_config(accel_dev, rp_config, &num_services))
+ return 0;
+
+ /*
+ * The thd2arb_mask maps ring pairs to threads within an accelerator engine.
+ * It ensures that jobs submitted to ring pairs are scheduled on threads capable
+ * of handling the specified service type.
+ *
+ * Each group of 4 bits in the mask corresponds to a thread, with each bit
+ * indicating whether a job from a ring pair can be scheduled on that thread.
+ * The use of 4 bits is due to the organization of ring pairs into groups of
+ * four, where each group shares the same configuration.
+ */
+ for (i = 0; i < num_services; i++) {
+ p_mask = &rp_config[i].thrd_mask[ae];
+ ring_mask = rp_config[i].ring_mask;
+
+ for_each_set_bit(thrd, p_mask, ADF_NUM_THREADS_PER_AE)
+ thd2arb_mask |= ring_mask << (thrd * 4);
+ }
+
+ return thd2arb_mask;
+}
+
+static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
+{
+ enum adf_cfg_service_type rps[ADF_GEN6_NUM_BANKS_PER_VF] = { };
+ struct adf_ring_config rp_config[MAX_NUM_CONCURR_SVC];
+ unsigned int num_services, rp_num, i;
+ unsigned long cfg_mask;
+ u16 ring_to_svc_map;
+
+ if (get_rp_config(accel_dev, rp_config, &num_services))
+ return 0;
+
+ /*
+ * Loop through the configured services and populate the `rps` array that
+ * contains what service that particular ring pair can handle (i.e. symmetric
+ * crypto, asymmetric crypto, data compression or compression chaining).
+ */
+ for (i = 0; i < num_services; i++) {
+ cfg_mask = rp_config[i].ring_mask;
+ for_each_set_bit(rp_num, &cfg_mask, ADF_GEN6_NUM_BANKS_PER_VF)
+ rps[rp_num] = rp_config[i].ring_type;
+ }
+
+ /*
+ * The ring_mask is structured into segments of 3 bits, with each
+ * segment representing the service configuration for a specific ring pair.
+ * Since ring pairs are organized into groups of 4, the ring_mask contains 4
+ * such 3-bit segments, each corresponding to one ring pair.
+ *
+ * The device has 64 ring pairs, which are organized in groups of 4, namely
+ * 16 groups. Each group has the same configuration, represented here by
+ * `ring_to_svc_map`.
+ */
+ ring_to_svc_map = rps[RP0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
+ rps[RP1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
+ rps[RP2] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
+ rps[RP3] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
+
+ return ring_to_svc_map;
+}
+
+static u32 get_accel_mask(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_ACCELERATORS_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+ return self ? hweight32(self->ae_mask) : 0;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_ETR_BAR;
+}
+
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_SRAM_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+ return DEV_SKU_1;
+}
+
+static void get_arb_info(struct arb_info *arb_info)
+{
+ arb_info->arb_cfg = ADF_GEN6_ARB_CONFIG;
+ arb_info->arb_offset = ADF_GEN6_ARB_OFFSET;
+ arb_info->wt2sam_offset = ADF_GEN6_ARB_WRK_2_SER_MAP_OFFSET;
+}
+
+static void get_admin_info(struct admin_info *admin_csrs_info)
+{
+ admin_csrs_info->mailbox_offset = ADF_GEN6_MAILBOX_BASE_OFFSET;
+ admin_csrs_info->admin_msg_ur = ADF_GEN6_ADMINMSGUR_OFFSET;
+ admin_csrs_info->admin_msg_lr = ADF_GEN6_ADMINMSGLR_OFFSET;
+}
+
+static u32 get_heartbeat_clock(struct adf_hw_device_data *self)
+{
+ return ADF_GEN6_COUNTER_FREQ;
+}
+
+static void enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+
+ /*
+ * Enable all error notification bits in errsou3 except VFLR
+ * notification on host.
+ */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, ADF_GEN6_VFLNOTIFY);
+}
+
+static void enable_ints(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr = adf_get_pmisc_base(accel_dev);
+
+ /* Enable bundle interrupts */
+ ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_RP_X0_MASK_OFFSET, 0);
+ ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_RP_X1_MASK_OFFSET, 0);
+
+ /* Enable misc interrupts */
+ ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_MASK_OFFSET, 0);
+}
+
+static void set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr = adf_get_pmisc_base(accel_dev);
+ u64 val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+ u64 val = ADF_SSM_WDT_DEFAULT_VALUE;
+
+ /* Enable watchdog timer for sym and dc */
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTATHL_OFFSET, ADF_SSMWDTATHH_OFFSET, val);
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTCNVL_OFFSET, ADF_SSMWDTCNVH_OFFSET, val);
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTUCSL_OFFSET, ADF_SSMWDTUCSH_OFFSET, val);
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTDCPRL_OFFSET, ADF_SSMWDTDCPRH_OFFSET, val);
+
+ /* Enable watchdog timer for pke */
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET, val_pke);
+}
+
+/*
+ * The vector routing table is used to select the MSI-X entry to use for each
+ * interrupt source.
+ * The first ADF_GEN6_ETR_MAX_BANKS entries correspond to ring interrupts.
+ * The final entry corresponds to VF2PF or error interrupts.
+ * This vector table could be used to configure one MSI-X entry to be shared
+ * between multiple interrupt sources.
+ *
+ * The default routing is set to have a one to one correspondence between the
+ * interrupt source and the MSI-X entry used.
+ */
+static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+ unsigned int i;
+
+ for (i = 0; i <= ADF_GEN6_ETR_MAX_BANKS; i++)
+ ADF_CSR_WR(csr, ADF_GEN6_MSIX_RTTABLE_OFFSET(i), i);
+}
+
+static int reset_ring_pair(void __iomem *csr, u32 bank_number)
+{
+ u32 status;
+ int ret;
+
+ /*
+ * Write rpresetctl register BIT(0) as 1.
+ * Since rpresetctl registers have no RW fields, no need to preserve
+ * values for other bits. Just write directly.
+ */
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
+ ADF_WQM_CSR_RPRESETCTL_RESET);
+
+ /* Read rpresetsts register and wait for rp reset to complete */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_WQM_CSR_RPRESETSTS_STATUS,
+ ADF_RPRESET_POLL_DELAY_US,
+ ADF_RPRESET_POLL_TIMEOUT_US, true,
+ csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
+ if (ret)
+ return ret;
+
+ /* When ring pair reset is done, clear rpresetsts */
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number), ADF_WQM_CSR_RPRESETSTS_STATUS);
+
+ return 0;
+}
+
+static int ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ void __iomem *csr = adf_get_etr_base(accel_dev);
+ int ret;
+
+ if (bank_number >= hw_data->num_banks)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "ring pair reset for bank:%d\n", bank_number);
+
+ ret = reset_ring_pair(csr, bank_number);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "ring pair reset failed (timeout)\n");
+ else
+ dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
+
+ return ret;
+}
+
+static int build_comp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_hw_comp_51_config_csr_lower hw_comp_lower_csr = { };
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ u32 lower_val;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_DISABLED;
+ hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1;
+ lower_val = ICP_QAT_FW_COMP_51_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
+ cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+
+ return 0;
+}
+
+static int build_decomp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] = 0;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+
+ return 0;
+}
+
+static void adf_gen6_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+ dc_ops->build_comp_block = build_comp_block;
+ dc_ops->build_decomp_block = build_decomp_block;
+}
+
+static int adf_gen6_init_thd2arb_map(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ u32 *thd2arb_map = hw_data->thd_to_arb_map;
+ unsigned int i;
+
+ for (i = 0; i < hw_data->num_engines; i++) {
+ thd2arb_map[i] = adf_gen6_get_arb_mask(accel_dev, i);
+ dev_dbg(&GET_DEV(accel_dev), "ME:%d arb_mask:%#x\n", i, thd2arb_map[i]);
+ }
+
+ return 0;
+}
+
+static void set_vc_csr_for_bank(void __iomem *csr, u32 bank_number)
+{
+ u32 value;
+
+ /*
+ * After each PF FLR, for each of the 64 ring pairs in the PF, the
+ * driver must program the ringmodectl CSRs.
+ */
+ value = ADF_CSR_RD(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number));
+ value |= FIELD_PREP(ADF_GEN6_RINGMODECTL_TC_MASK, ADF_GEN6_RINGMODECTL_TC_DEFAULT);
+ value |= FIELD_PREP(ADF_GEN6_RINGMODECTL_TC_EN_MASK, ADF_GEN6_RINGMODECTL_TC_EN_OP1);
+ ADF_CSR_WR(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number), value);
+}
+
+static int set_vc_config(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ u32 value;
+ int err;
+
+ /*
+ * After each PF FLR, the driver must program the Port Virtual Channel (VC)
+ * Control Registers.
+ * Read PVC0CTL then write the masked values.
+ */
+ pci_read_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, &value);
+ value |= FIELD_PREP(ADF_GEN6_PVC0CTL_TCVCMAP_MASK, ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT);
+ err = pci_write_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, value);
+ if (err) {
+ dev_err(&GET_DEV(accel_dev), "pci write to PVC0CTL failed\n");
+ return pcibios_err_to_errno(err);
+ }
+
+ /* Read PVC1CTL then write masked values */
+ pci_read_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, &value);
+ value |= FIELD_PREP(ADF_GEN6_PVC1CTL_TCVCMAP_MASK, ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT);
+ value |= FIELD_PREP(ADF_GEN6_PVC1CTL_VCEN_MASK, ADF_GEN6_PVC1CTL_VCEN_ON);
+ err = pci_write_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, value);
+ if (err)
+ dev_err(&GET_DEV(accel_dev), "pci write to PVC1CTL failed\n");
+
+ return pcibios_err_to_errno(err);
+}
+
+static int adf_gen6_set_vc(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ void __iomem *csr = adf_get_etr_base(accel_dev);
+ u32 i;
+
+ for (i = 0; i < hw_data->num_banks; i++) {
+ dev_dbg(&GET_DEV(accel_dev), "set virtual channels for bank:%d\n", i);
+ set_vc_csr_for_bank(csr, i);
+ }
+
+ return set_vc_config(accel_dev);
+}
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+ unsigned long fuses = self->fuses[ADF_FUSECTL4];
+ u32 mask = ADF_6XXX_ACCELENGINES_MASK;
+
+ /*
+ * If bit 0 is set in the fuses, the first 4 engines are disabled.
+ * If bit 4 is set, the second group of 4 engines are disabled.
+ * If bit 8 is set, the admin engine (bit 8) is disabled.
+ */
+ if (test_bit(0, &fuses))
+ mask &= ~ADF_AE_GROUP_0;
+
+ if (test_bit(4, &fuses))
+ mask &= ~ADF_AE_GROUP_1;
+
+ if (test_bit(8, &fuses))
+ mask &= ~ADF_AE_GROUP_2;
+
+ return mask;
+}
+
+static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+{
+ u32 capabilities_sym, capabilities_asym;
+ u32 capabilities_dc;
+ unsigned long mask;
+ u32 caps = 0;
+ u32 fusectl1;
+
+ fusectl1 = GET_HW_DATA(accel_dev)->fuses[ADF_FUSECTL1];
+
+ /* Read accelerator capabilities mask */
+ capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_SHA3 |
+ ICP_ACCEL_CAPABILITIES_SHA3_EXT |
+ ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
+ ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
+ ICP_ACCEL_CAPABILITIES_AES_V2;
+
+ /* A set bit in fusectl1 means the corresponding feature is OFF in this SKU */
+ if (fusectl1 & ICP_ACCEL_GEN6_MASK_UCS_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+ if (fusectl1 & ICP_ACCEL_GEN6_MASK_AUTH_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+
+ capabilities_asym = 0;
+
+ capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+
+ if (fusectl1 & ICP_ACCEL_GEN6_MASK_CPR_SLICE) {
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+ }
+
+ if (adf_get_service_mask(accel_dev, &mask))
+ return 0;
+
+ if (test_bit(SVC_ASYM, &mask))
+ caps |= capabilities_asym;
+ if (test_bit(SVC_SYM, &mask))
+ caps |= capabilities_sym;
+ if (test_bit(SVC_DC, &mask))
+ caps |= capabilities_dc;
+ if (test_bit(SVC_DCC, &mask)) {
+ /*
+ * Sym capabilities are available for chaining operations,
+ * but sym crypto instances cannot be supported
+ */
+ caps = capabilities_dc | capabilities_sym;
+ caps &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ }
+
+ return caps;
+}
+
+static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
+{
+ return ARRAY_SIZE(adf_default_fw_config);
+}
+
+static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ int num_fw_objs = ARRAY_SIZE(adf_6xxx_fw_objs);
+ int id;
+
+ id = adf_default_fw_config[obj_num].obj;
+ if (id >= num_fw_objs)
+ return NULL;
+
+ return adf_6xxx_fw_objs[id];
+}
+
+static const char *uof_get_name_6xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ return uof_get_name(accel_dev, obj_num);
+}
+
+static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return -EINVAL;
+
+ return adf_default_fw_config[obj_num].obj;
+}
+
+static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ return adf_default_fw_config[obj_num].ae_mask;
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+ if (adf_gen6_init_thd2arb_map(accel_dev))
+ dev_warn(&GET_DEV(accel_dev),
+ "Failed to generate thread to arbiter mapping");
+
+ return GET_HW_DATA(accel_dev)->thd_to_arb_map;
+}
+
+static int adf_init_device(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr = adf_get_pmisc_base(accel_dev);
+ u32 status;
+ u32 csr;
+ int ret;
+
+ /* Temporarily mask PM interrupt */
+ csr = ADF_CSR_RD(addr, ADF_GEN6_ERRMSK2);
+ csr |= ADF_GEN6_PM_SOU;
+ ADF_CSR_WR(addr, ADF_GEN6_ERRMSK2, csr);
+
+ /* Set DRV_ACTIVE bit to power up the device */
+ ADF_CSR_WR(addr, ADF_GEN6_PM_INTERRUPT, ADF_GEN6_PM_DRV_ACTIVE);
+
+ /* Poll status register to make sure the device is powered up */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_GEN6_PM_INIT_STATE,
+ ADF_GEN6_PM_POLL_DELAY_US,
+ ADF_GEN6_PM_POLL_TIMEOUT_US, true, addr,
+ ADF_GEN6_PM_STATUS);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
+ return ret;
+ }
+
+ dev_dbg(&GET_DEV(accel_dev), "Setting virtual channels for device qat_dev%d\n",
+ accel_dev->accel_id);
+
+ ret = adf_gen6_set_vc(accel_dev);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Failed to set virtual channels\n");
+
+ return ret;
+}
+
+static int enable_pm(struct adf_accel_dev *accel_dev)
+{
+ return adf_init_admin_pm(accel_dev, ADF_GEN6_PM_DEFAULT_IDLE_FILTER);
+}
+
+static int dev_config(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
+ if (ret)
+ return ret;
+
+ ret = adf_cfg_section_add(accel_dev, "Accelerator0");
+ if (ret)
+ return ret;
+
+ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_DC:
+ case SVC_DCC:
+ ret = adf_gen6_comp_dev_config(accel_dev);
+ break;
+ default:
+ ret = adf_gen6_no_dev_config(accel_dev);
+ break;
+ }
+ if (ret)
+ return ret;
+
+ __set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+ return ret;
+}
+
+void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class = &adf_6xxx_class;
+ hw_data->instance_id = adf_6xxx_class.instances++;
+ hw_data->num_banks = ADF_GEN6_ETR_MAX_BANKS;
+ hw_data->num_banks_per_vf = ADF_GEN6_NUM_BANKS_PER_VF;
+ hw_data->num_rings_per_bank = ADF_GEN6_NUM_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_GEN6_MAX_ACCELERATORS;
+ hw_data->num_engines = ADF_6XXX_MAX_ACCELENGINES;
+ hw_data->num_logical_accel = 1;
+ hw_data->tx_rx_gap = ADF_GEN6_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN6_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = 0;
+ hw_data->alloc_irq = adf_isr_resource_alloc;
+ hw_data->free_irq = adf_isr_resource_free;
+ hw_data->enable_error_correction = enable_error_correction;
+ hw_data->get_accel_mask = get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_num_accels = get_num_accels;
+ hw_data->get_num_aes = get_num_aes;
+ hw_data->get_sram_bar_id = get_sram_bar_id;
+ hw_data->get_etr_bar_id = get_etr_bar_id;
+ hw_data->get_misc_bar_id = get_misc_bar_id;
+ hw_data->get_arb_info = get_arb_info;
+ hw_data->get_admin_info = get_admin_info;
+ hw_data->get_accel_cap = get_accel_cap;
+ hw_data->get_sku = get_sku;
+ hw_data->init_admin_comms = adf_init_admin_comms;
+ hw_data->exit_admin_comms = adf_exit_admin_comms;
+ hw_data->send_admin_init = adf_send_admin_init;
+ hw_data->init_arb = adf_init_arb;
+ hw_data->exit_arb = adf_exit_arb;
+ hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+ hw_data->enable_ints = enable_ints;
+ hw_data->reset_device = adf_reset_flr;
+ hw_data->admin_ae_mask = ADF_6XXX_ADMIN_AE_MASK;
+ hw_data->fw_name = ADF_6XXX_FW;
+ hw_data->fw_mmp_name = ADF_6XXX_MMP;
+ hw_data->uof_get_name = uof_get_name_6xxx;
+ hw_data->uof_get_num_objs = uof_get_num_objs;
+ hw_data->uof_get_obj_type = uof_get_obj_type;
+ hw_data->uof_get_ae_mask = uof_get_ae_mask;
+ hw_data->set_msix_rttable = set_msix_default_rttable;
+ hw_data->set_ssm_wdtimer = set_ssm_wdtimer;
+ hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->ring_pair_reset = ring_pair_reset;
+ hw_data->dev_config = dev_config;
+ hw_data->get_hb_clock = get_heartbeat_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->start_timer = adf_timer_start;
+ hw_data->stop_timer = adf_timer_stop;
+ hw_data->init_device = adf_init_device;
+ hw_data->enable_pm = enable_pm;
+ hw_data->services_supported = services_supported;
+
+ adf_gen6_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen6_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+ adf_gen6_init_dc_ops(&hw_data->dc_ops);
+ adf_gen6_init_ras_ops(&hw_data->ras_ops);
+}
+
+void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data)
+{
+ if (hw_data->dev_class->instances)
+ hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
new file mode 100644
index 000000000000..78e2e2c5816e
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_6XXX_HW_DATA_H_
+#define ADF_6XXX_HW_DATA_H_
+
+#include <linux/bits.h>
+#include <linux/time.h>
+#include <linux/units.h>
+
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+#include "adf_dc.h"
+
+/* PCIe configuration space */
+#define ADF_GEN6_BAR_MASK (BIT(0) | BIT(2) | BIT(4))
+#define ADF_GEN6_SRAM_BAR 0
+#define ADF_GEN6_PMISC_BAR 1
+#define ADF_GEN6_ETR_BAR 2
+#define ADF_6XXX_MAX_ACCELENGINES 9
+
+/* Clocks frequency */
+#define ADF_GEN6_COUNTER_FREQ (100 * HZ_PER_MHZ)
+
+/* Physical function fuses */
+#define ADF_GEN6_FUSECTL0_OFFSET 0x2C8
+#define ADF_GEN6_FUSECTL1_OFFSET 0x2CC
+#define ADF_GEN6_FUSECTL4_OFFSET 0x2D8
+
+/* Accelerators */
+#define ADF_GEN6_ACCELERATORS_MASK 0x1
+#define ADF_GEN6_MAX_ACCELERATORS 1
+
+/* MSI-X interrupt */
+#define ADF_GEN6_SMIAPF_RP_X0_MASK_OFFSET 0x41A040
+#define ADF_GEN6_SMIAPF_RP_X1_MASK_OFFSET 0x41A044
+#define ADF_GEN6_SMIAPF_MASK_OFFSET 0x41A084
+#define ADF_GEN6_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 4))
+
+/* Bank and ring configuration */
+#define ADF_GEN6_NUM_RINGS_PER_BANK 2
+#define ADF_GEN6_NUM_BANKS_PER_VF 4
+#define ADF_GEN6_ETR_MAX_BANKS 64
+#define ADF_GEN6_RX_RINGS_OFFSET 1
+#define ADF_GEN6_TX_RINGS_MASK 0x1
+
+/* Arbiter configuration */
+#define ADF_GEN6_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
+#define ADF_GEN6_ARB_OFFSET 0x000
+#define ADF_GEN6_ARB_WRK_2_SER_MAP_OFFSET 0x400
+
+/* Admin interface configuration */
+#define ADF_GEN6_ADMINMSGUR_OFFSET 0x500574
+#define ADF_GEN6_ADMINMSGLR_OFFSET 0x500578
+#define ADF_GEN6_MAILBOX_BASE_OFFSET 0x600970
+
+/*
+ * Watchdog timers
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000ULL
+#define ADF_SSMWDTATHL_OFFSET 0x5208
+#define ADF_SSMWDTATHH_OFFSET 0x520C
+#define ADF_SSMWDTCNVL_OFFSET 0x5408
+#define ADF_SSMWDTCNVH_OFFSET 0x540C
+#define ADF_SSMWDTUCSL_OFFSET 0x5808
+#define ADF_SSMWDTUCSH_OFFSET 0x580C
+#define ADF_SSMWDTDCPRL_OFFSET 0x5A08
+#define ADF_SSMWDTDCPRH_OFFSET 0x5A0C
+#define ADF_SSMWDTPKEL_OFFSET 0x5E08
+#define ADF_SSMWDTPKEH_OFFSET 0x5E0C
+
+/* Ring reset */
+#define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
+#define ADF_RPRESET_POLL_DELAY_US 20
+#define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0)
+#define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + (bank) * 8)
+#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0)
+#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
+
+/* Controls and sets up the corresponding ring mode of operation */
+#define ADF_GEN6_CSR_RINGMODECTL(bank) (0x9000 + (bank) * 4)
+
+/* Specifies the traffic class to use for the transactions to/from the ring */
+#define ADF_GEN6_RINGMODECTL_TC_MASK GENMASK(18, 16)
+#define ADF_GEN6_RINGMODECTL_TC_DEFAULT 0x7
+
+/* Specifies usage of tc for the transactions to/from this ring */
+#define ADF_GEN6_RINGMODECTL_TC_EN_MASK GENMASK(20, 19)
+
+/*
+ * Use the value programmed in the tc field for request descriptor
+ * and metadata read transactions
+ */
+#define ADF_GEN6_RINGMODECTL_TC_EN_OP1 0x1
+
+/* VC0 Resource Control Register */
+#define ADF_GEN6_PVC0CTL_OFFSET 0x204
+#define ADF_GEN6_PVC0CTL_TCVCMAP_OFFSET 1
+#define ADF_GEN6_PVC0CTL_TCVCMAP_MASK GENMASK(7, 1)
+#define ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT 0x7F
+
+/* VC1 Resource Control Register */
+#define ADF_GEN6_PVC1CTL_OFFSET 0x210
+#define ADF_GEN6_PVC1CTL_TCVCMAP_OFFSET 1
+#define ADF_GEN6_PVC1CTL_TCVCMAP_MASK GENMASK(7, 1)
+#define ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT 0x40
+#define ADF_GEN6_PVC1CTL_VCEN_OFFSET 31
+#define ADF_GEN6_PVC1CTL_VCEN_MASK BIT(31)
+/* RW bit: 0x1 - enables a Virtual Channel, 0x0 - disables */
+#define ADF_GEN6_PVC1CTL_VCEN_ON 0x1
+
+/* Error source mask registers */
+#define ADF_GEN6_ERRMSK0 0x41A210
+#define ADF_GEN6_ERRMSK1 0x41A214
+#define ADF_GEN6_ERRMSK2 0x41A218
+#define ADF_GEN6_ERRMSK3 0x41A21C
+
+#define ADF_GEN6_VFLNOTIFY BIT(7)
+
+/* Number of heartbeat counter pairs */
+#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+
+/* Physical function fuses */
+#define ADF_6XXX_ACCELENGINES_MASK GENMASK(8, 0)
+#define ADF_6XXX_ADMIN_AE_MASK GENMASK(8, 8)
+
+/* Firmware binaries */
+#define ADF_6XXX_FW "qat_6xxx.bin"
+#define ADF_6XXX_MMP "qat_6xxx_mmp.bin"
+#define ADF_6XXX_CY_OBJ "qat_6xxx_cy.bin"
+#define ADF_6XXX_DC_OBJ "qat_6xxx_dc.bin"
+#define ADF_6XXX_ADMIN_OBJ "qat_6xxx_admin.bin"
+
+enum icp_qat_gen6_slice_mask {
+ ICP_ACCEL_GEN6_MASK_UCS_SLICE = BIT(0),
+ ICP_ACCEL_GEN6_MASK_AUTH_SLICE = BIT(1),
+ ICP_ACCEL_GEN6_MASK_PKE_SLICE = BIT(2),
+ ICP_ACCEL_GEN6_MASK_CPR_SLICE = BIT(3),
+ ICP_ACCEL_GEN6_MASK_DCPRZ_SLICE = BIT(4),
+ ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE = BIT(6),
+};
+
+void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data);
+
+#endif /* ADF_6XXX_HW_DATA_H_ */
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c
new file mode 100644
index 000000000000..c1dc9c56fdf5
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/array_size.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include <adf_accel_devices.h>
+#include <adf_cfg.h>
+#include <adf_common_drv.h>
+#include <adf_dbgfs.h>
+
+#include "adf_gen6_shared.h"
+#include "adf_6xxx_hw_data.h"
+
+static int bar_map[] = {
+ 0, /* SRAM */
+ 2, /* PMISC */
+ 4, /* ETR */
+};
+
+static void adf_device_down(void *accel_dev)
+{
+ adf_dev_down(accel_dev);
+}
+
+static void adf_dbgfs_cleanup(void *accel_dev)
+{
+ adf_dbgfs_exit(accel_dev);
+}
+
+static void adf_cfg_device_remove(void *accel_dev)
+{
+ adf_cfg_dev_remove(accel_dev);
+}
+
+static void adf_cleanup_hw_data(void *accel_dev)
+{
+ struct adf_accel_dev *accel_device = accel_dev;
+
+ if (accel_device->hw_device) {
+ adf_clean_hw_data_6xxx(accel_device->hw_device);
+ accel_device->hw_device = NULL;
+ }
+}
+
+static void adf_devmgr_remove(void *accel_dev)
+{
+ adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ struct device *dev = &pdev->dev;
+ struct adf_accel_dev *accel_dev;
+ struct adf_bar *bar;
+ unsigned int i;
+ int ret;
+
+ if (num_possible_nodes() > 1 && dev_to_node(dev) < 0) {
+ /*
+ * If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow.
+ */
+ return dev_err_probe(dev, -EINVAL, "Invalid NUMA configuration.\n");
+ }
+
+ accel_dev = devm_kzalloc(dev, sizeof(*accel_dev), GFP_KERNEL);
+ if (!accel_dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+ INIT_LIST_HEAD(&accel_dev->list);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+ accel_dev->owner = THIS_MODULE;
+
+ hw_data = devm_kzalloc(dev, sizeof(*hw_data), GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+ pci_read_config_dword(pdev, ADF_GEN6_FUSECTL4_OFFSET, &hw_data->fuses[ADF_FUSECTL4]);
+ pci_read_config_dword(pdev, ADF_GEN6_FUSECTL0_OFFSET, &hw_data->fuses[ADF_FUSECTL0]);
+ pci_read_config_dword(pdev, ADF_GEN6_FUSECTL1_OFFSET, &hw_data->fuses[ADF_FUSECTL1]);
+
+ if (!(hw_data->fuses[ADF_FUSECTL1] & ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE))
+ return dev_err_probe(dev, -EFAULT, "Wireless mode is not supported.\n");
+
+ /* Enable PCI device */
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot enable PCI device.\n");
+
+ ret = adf_devmgr_add_dev(accel_dev, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add new accelerator device.\n");
+
+ ret = devm_add_action_or_reset(dev, adf_devmgr_remove, accel_dev);
+ if (ret)
+ return ret;
+
+ accel_dev->hw_device = hw_data;
+ adf_init_hw_data_6xxx(accel_dev->hw_device);
+
+ ret = devm_add_action_or_reset(dev, adf_cleanup_hw_data, accel_dev);
+ if (ret)
+ return ret;
+
+ /* Get Accelerators and Accelerator Engine masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+ /* If the device has no acceleration engines then ignore it */
+ if (!hw_data->accel_mask || !hw_data->ae_mask ||
+ (~hw_data->ae_mask & ADF_GEN6_ACCELERATORS_MASK)) {
+ ret = -EFAULT;
+ return dev_err_probe(dev, ret, "No acceleration units were found.\n");
+ }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, adf_cfg_device_remove, accel_dev);
+ if (ret)
+ return ret;
+
+ /* Set DMA identifier */
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret)
+ return dev_err_probe(dev, ret, "No usable DMA configuration.\n");
+
+ ret = adf_gen6_cfg_dev_init(accel_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialize configuration.\n");
+
+ /* Get accelerator capability mask */
+ hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+ if (!hw_data->accel_capabilities_mask) {
+ ret = -EINVAL;
+ return dev_err_probe(dev, ret, "Failed to get capabilities mask.\n");
+ }
+
+ for (i = 0; i < ARRAY_SIZE(bar_map); i++) {
+ bar = &accel_pci_dev->pci_bars[i];
+
+ /* Map 64-bit PCIe BAR */
+ bar->virt_addr = pcim_iomap_region(pdev, bar_map[i], pci_name(pdev));
+ if (IS_ERR(bar->virt_addr)) {
+ ret = PTR_ERR(bar->virt_addr);
+ return dev_err_probe(dev, ret, "Failed to ioremap PCI region.\n");
+ }
+ }
+
+ pci_set_master(pdev);
+
+ /*
+ * The PCI config space is saved at this point and will be restored
+ * after a Function Level Reset (FLR) as the FLR does not completely
+ * restore it.
+ */
+ ret = pci_save_state(pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to save pci state.\n");
+
+ accel_dev->ras_errors.enabled = true;
+
+ adf_dbgfs_init(accel_dev);
+
+ ret = devm_add_action_or_reset(dev, adf_dbgfs_cleanup, accel_dev);
+ if (ret)
+ return ret;
+
+ ret = adf_dev_up(accel_dev, true);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, adf_device_down, accel_dev);
+ if (ret)
+ return ret;
+
+ ret = adf_sysfs_init(accel_dev);
+
+ return ret;
+}
+
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_6XXX) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_6XXX_DEVICE_NAME,
+ .probe = adf_probe,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+module_pci_driver(adf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_6XXX_FW);
+MODULE_FIRMWARE(ADF_6XXX_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology for GEN6 Devices");
+MODULE_SOFTDEP("pre: crypto-intel_qat");
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
index d9e568572da8..43604c025f0c 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxx/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o
qat_c3xxx-y := adf_drv.o adf_c3xxx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index e78f7bfd30b8..07f2c42a68f5 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -5,7 +5,6 @@
#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -22,7 +21,6 @@ static const u32 thrd_to_arb_map[ADF_C3XXX_MAX_ACCELENGINES] = {
static struct adf_hw_device_class c3xxx_class = {
.name = ADF_C3XXX_DEVICE_NAME,
.type = DEV_C3XXX,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
index b825b35ab4bf..bceb5dd8b148 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
@@ -19,24 +19,6 @@
#include <adf_dbgfs.h>
#include "adf_c3xxx_hw_data.h"
-static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
- .id_table = adf_pci_tbl,
- .name = ADF_C3XXX_DEVICE_NAME,
- .probe = adf_probe,
- .remove = adf_remove,
- .sriov_configure = adf_sriov_configure,
- .err_handler = &adf_err_handler,
-};
-
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
@@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev)
kfree(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_C3XXX_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
static int __init adfdrv_init(void)
{
request_module("intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
index 31a908a211ac..03f6745b4aa2 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o
qat_c3xxxvf-y := adf_drv.o adf_c3xxxvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index a512ca4efd3f..db3c33fa1881 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -3,7 +3,6 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -13,7 +12,6 @@
static struct adf_hw_device_class c3xxxiov_class = {
.name = ADF_C3XXXVF_DEVICE_NAME,
.type = DEV_C3XXXVF,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile
index cbdaaa135e84..f3d722bef088 100644
--- a/drivers/crypto/intel/qat/qat_c62x/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62x/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o
qat_c62x-y := adf_drv.o adf_c62x_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
index 32ebe09477a8..0b410b41474d 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
@@ -5,7 +5,6 @@
#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -22,7 +21,6 @@ static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] = {
static struct adf_hw_device_class c62x_class = {
.name = ADF_C62X_DEVICE_NAME,
.type = DEV_C62X,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
index 8a7bdec358d6..23ccb72b6ea2 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
@@ -19,24 +19,6 @@
#include <adf_dbgfs.h>
#include "adf_c62x_hw_data.h"
-static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
- .id_table = adf_pci_tbl,
- .name = ADF_C62X_DEVICE_NAME,
- .probe = adf_probe,
- .remove = adf_remove,
- .sriov_configure = adf_sriov_configure,
- .err_handler = &adf_err_handler,
-};
-
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
@@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev)
kfree(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_C62X_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
static int __init adfdrv_init(void)
{
request_module("intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
index 60e499b041ec..ed7f3f722d99 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_c62xvf/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o
qat_c62xvf-y := adf_drv.o adf_c62xvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index 4aaaaf921734..7f00035d3661 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -3,7 +3,6 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -13,7 +12,6 @@
static struct adf_hw_device_class c62xiov_class = {
.name = ADF_C62XVF_DEVICE_NAME,
.type = DEV_C62XVF,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index af5df29fd2e3..66bb295ace28 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -8,19 +8,19 @@ intel_qat-y := adf_accel_engine.o \
adf_cfg_services.o \
adf_clock.o \
adf_ctl_drv.o \
+ adf_dc.o \
adf_dev_mgr.o \
adf_gen2_config.o \
- adf_gen2_dc.o \
adf_gen2_hw_csr_data.o \
adf_gen2_hw_data.o \
adf_gen4_config.o \
- adf_gen4_dc.o \
adf_gen4_hw_csr_data.o \
adf_gen4_hw_data.o \
adf_gen4_pm.o \
adf_gen4_ras.o \
- adf_gen4_timer.o \
adf_gen4_vf_mig.o \
+ adf_gen6_ras.o \
+ adf_gen6_shared.o \
adf_hw_arbiter.o \
adf_init.o \
adf_isr.o \
@@ -30,6 +30,7 @@ intel_qat-y := adf_accel_engine.o \
adf_sysfs.o \
adf_sysfs_ras_counters.o \
adf_sysfs_rl.o \
+ adf_timer.o \
adf_transport.o \
qat_algs.o \
qat_algs_send.o \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index dc21551153cb..2ee526063213 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -12,6 +12,7 @@
#include <linux/qat/qat_mig_dev.h>
#include <linux/wordpart.h>
#include "adf_cfg_common.h"
+#include "adf_dc.h"
#include "adf_rl.h"
#include "adf_telemetry.h"
#include "adf_pfvf_msg.h"
@@ -25,14 +26,18 @@
#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
#define ADF_4XXX_DEVICE_NAME "4xxx"
#define ADF_420XX_DEVICE_NAME "420xx"
-#define ADF_4XXX_PCI_DEVICE_ID 0x4940
-#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
-#define ADF_401XX_PCI_DEVICE_ID 0x4942
-#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
-#define ADF_402XX_PCI_DEVICE_ID 0x4944
-#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
-#define ADF_420XX_PCI_DEVICE_ID 0x4946
-#define ADF_420XXIOV_PCI_DEVICE_ID 0x4947
+#define ADF_6XXX_DEVICE_NAME "6xxx"
+#define PCI_DEVICE_ID_INTEL_QAT_4XXX 0x4940
+#define PCI_DEVICE_ID_INTEL_QAT_4XXXIOV 0x4941
+#define PCI_DEVICE_ID_INTEL_QAT_401XX 0x4942
+#define PCI_DEVICE_ID_INTEL_QAT_401XXIOV 0x4943
+#define PCI_DEVICE_ID_INTEL_QAT_402XX 0x4944
+#define PCI_DEVICE_ID_INTEL_QAT_402XXIOV 0x4945
+#define PCI_DEVICE_ID_INTEL_QAT_420XX 0x4946
+#define PCI_DEVICE_ID_INTEL_QAT_420XXIOV 0x4947
+#define PCI_DEVICE_ID_INTEL_QAT_6XXX 0x4948
+#define PCI_DEVICE_ID_INTEL_QAT_6XXX_IOV 0x4949
+
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
#define ADF_DEVICE_FUSECTL_MASK 0x80000000
@@ -267,7 +272,8 @@ struct adf_pfvf_ops {
};
struct adf_dc_ops {
- void (*build_deflate_ctx)(void *ctx);
+ int (*build_comp_block)(void *ctx, enum adf_dc_algo algo);
+ int (*build_decomp_block)(void *ctx, enum adf_dc_algo algo);
};
struct qat_migdev_ops {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
index acad526eb741..573388c37100 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
@@ -449,6 +449,7 @@ int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
+EXPORT_SYMBOL_GPL(adf_init_admin_pm);
int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr,
size_t buff_size)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
index 89df3888d7ea..15fdf9854b81 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
@@ -48,6 +48,7 @@ enum adf_device_type {
DEV_C3XXXVF,
DEV_4XXX,
DEV_420XX,
+ DEV_6XXX,
};
struct adf_dev_status_info {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
index 30abcd9e1283..c39871291da7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
@@ -116,7 +116,7 @@ int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in,
return adf_service_mask_to_string(mask, out, out_len);
}
-static int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask)
+int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
size_t len;
@@ -138,6 +138,7 @@ static int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *
return ret;
}
+EXPORT_SYMBOL_GPL(adf_get_service_mask);
int adf_get_service_enabled(struct adf_accel_dev *accel_dev)
{
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
index f6bafc15cbc6..3742c450878f 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
@@ -32,5 +32,6 @@ enum {
int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in,
size_t in_len, char *out, size_t out_len);
int adf_get_service_enabled(struct adf_accel_dev *accel_dev);
+int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c b/drivers/crypto/intel/qat/qat_common/adf_dc.c
index 47261b1c1da6..3e8fb4e3ed97 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dc.c
@@ -1,22 +1,21 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation */
#include "adf_accel_devices.h"
-#include "adf_gen2_dc.h"
+#include "adf_dc.h"
#include "icp_qat_fw_comp.h"
-static void qat_comp_build_deflate_ctx(void *ctx)
+int qat_comp_build_ctx(struct adf_accel_dev *accel_dev, void *ctx, enum adf_dc_algo algo)
{
- struct icp_qat_fw_comp_req *req_tmpl = (struct icp_qat_fw_comp_req *)ctx;
- struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
- struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
- struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
struct icp_qat_fw_comp_cd_hdr *comp_cd_ctrl = &req_tmpl->comp_cd_ctrl;
+ struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ int ret;
memset(req_tmpl, 0, sizeof(*req_tmpl));
header->hdr_flags =
ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
- header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
header->comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
QAT_COMN_PTR_TYPE_SGL);
@@ -26,12 +25,14 @@ static void qat_comp_build_deflate_ctx(void *ctx)
ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
- cd_pars->u.sl.comp_slice_cfg_word[0] =
- ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS,
- ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
- ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
- ICP_QAT_HW_COMPRESSION_DEPTH_1,
- ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+
+ /* Build HW config block for compression */
+ ret = GET_DC_OPS(accel_dev)->build_comp_block(ctx, algo);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to build compression block\n");
+ return ret;
+ }
+
req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
req_pars->req_par_flags =
@@ -45,26 +46,19 @@ static void qat_comp_build_deflate_ctx(void *ctx)
ICP_QAT_FW_COMP_NO_XXHASH_ACC,
ICP_QAT_FW_COMP_CNV_ERROR_NONE,
ICP_QAT_FW_COMP_NO_APPEND_CRC,
- ICP_QAT_FW_COMP_NO_DROP_DATA);
+ ICP_QAT_FW_COMP_NO_DROP_DATA,
+ ICP_QAT_FW_COMP_NO_PARTIAL_DECOMPRESS);
ICP_QAT_FW_COMN_NEXT_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
ICP_QAT_FW_COMN_CURR_ID_SET(comp_cd_ctrl, ICP_QAT_FW_SLICE_COMP);
/* Fill second half of the template for decompression */
memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
req_tmpl++;
- header = &req_tmpl->comn_hdr;
- header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
- cd_pars = &req_tmpl->cd_pars;
- cd_pars->u.sl.comp_slice_cfg_word[0] =
- ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS,
- ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
- ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
- ICP_QAT_HW_COMPRESSION_DEPTH_1,
- ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
-}
-void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops)
-{
- dc_ops->build_deflate_ctx = qat_comp_build_deflate_ctx;
+ /* Build HW config block for decompression */
+ ret = GET_DC_OPS(accel_dev)->build_decomp_block(req_tmpl, algo);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Failed to build decompression block\n");
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dc.h b/drivers/crypto/intel/qat/qat_common/adf_dc.h
new file mode 100644
index 000000000000..6cb5e09054a6
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_dc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_DC_H
+#define ADF_DC_H
+
+struct adf_accel_dev;
+
+enum adf_dc_algo {
+ QAT_DEFLATE,
+ QAT_LZ4,
+ QAT_LZ4S,
+ QAT_ZSTD,
+};
+
+int qat_comp_build_ctx(struct adf_accel_dev *accel_dev, void *ctx, enum adf_dc_algo algo);
+
+#endif /* ADF_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
index 4f86696800c9..78957fa900b7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
@@ -8,6 +8,7 @@ enum adf_fw_objs {
ADF_FW_ASYM_OBJ,
ADF_FW_DC_OBJ,
ADF_FW_ADMIN_OBJ,
+ ADF_FW_CY_OBJ,
};
struct adf_fw_config {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h
deleted file mode 100644
index 6eae023354d7..000000000000
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_dc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef ADF_GEN2_DC_H
-#define ADF_GEN2_DC_H
-
-#include "adf_accel_devices.h"
-
-void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops);
-
-#endif /* ADF_GEN2_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
index 2b263442c856..6a505e9a5cf9 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
#include "adf_common_drv.h"
+#include "adf_dc.h"
#include "adf_gen2_hw_data.h"
+#include "icp_qat_fw_comp.h"
#include "icp_qat_hw.h"
#include <linux/pci.h>
@@ -169,3 +171,58 @@ void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
}
}
EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
+
+static int adf_gen2_build_comp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
+ ICP_QAT_HW_COMPRESSION_DEPTH_1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+
+ return 0;
+}
+
+static int adf_gen2_build_decomp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
+ ICP_QAT_HW_COMPRESSION_DEPTH_1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+
+ return 0;
+}
+
+void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+ dc_ops->build_comp_block = adf_gen2_build_comp_block;
+ dc_ops->build_decomp_block = adf_gen2_build_decomp_block;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
index 708e9186127b..59bad368a921 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
@@ -88,5 +88,6 @@ void adf_gen2_get_arb_info(struct arb_info *arb_info);
void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev);
u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev);
void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
index a716545a764c..34a63cf40db2 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_pfvf.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
index f97e7a880f3a..afcdfdd0a37a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
@@ -11,7 +11,7 @@
#include "qat_compression.h"
#include "qat_crypto.h"
-static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
+int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
int banks = GET_MAX_BANKS(accel_dev);
@@ -117,7 +117,7 @@ err:
return ret;
}
-static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
+int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
int banks = GET_MAX_BANKS(accel_dev);
@@ -187,7 +187,7 @@ err:
return ret;
}
-static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
+int adf_no_dev_config(struct adf_accel_dev *accel_dev)
{
unsigned long val;
int ret;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
index bb87655f69a8..38a674c27e40 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
@@ -7,5 +7,8 @@
int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev);
+int adf_crypto_dev_config(struct adf_accel_dev *accel_dev);
+int adf_comp_dev_config(struct adf_accel_dev *accel_dev);
+int adf_no_dev_config(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c
deleted file mode 100644
index 5859238e37de..000000000000
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.c
+++ /dev/null
@@ -1,83 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation */
-#include "adf_accel_devices.h"
-#include "icp_qat_fw_comp.h"
-#include "icp_qat_hw_20_comp.h"
-#include "adf_gen4_dc.h"
-
-static void qat_comp_build_deflate(void *ctx)
-{
- struct icp_qat_fw_comp_req *req_tmpl =
- (struct icp_qat_fw_comp_req *)ctx;
- struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
- struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
- struct icp_qat_fw_comp_req_params *req_pars = &req_tmpl->comp_pars;
- struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = {0};
- struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = {0};
- struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = {0};
- u32 upper_val;
- u32 lower_val;
-
- memset(req_tmpl, 0, sizeof(*req_tmpl));
- header->hdr_flags =
- ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
- header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
- header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
- header->comn_req_flags =
- ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_16BYTE_DATA,
- QAT_COMN_PTR_TYPE_SGL);
- header->serv_specif_flags =
- ICP_QAT_FW_COMP_FLAGS_BUILD(ICP_QAT_FW_COMP_STATELESS_SESSION,
- ICP_QAT_FW_COMP_AUTO_SELECT_BEST,
- ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
- ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
- ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
- hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
- hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
- hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
- hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
- hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
- hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
- hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
- hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
-
- upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
- lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
-
- cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
- cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
-
- req_pars->crc.legacy.initial_adler = COMP_CPR_INITIAL_ADLER;
- req_pars->crc.legacy.initial_crc32 = COMP_CPR_INITIAL_CRC;
- req_pars->req_par_flags =
- ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(ICP_QAT_FW_COMP_SOP,
- ICP_QAT_FW_COMP_EOP,
- ICP_QAT_FW_COMP_BFINAL,
- ICP_QAT_FW_COMP_CNV,
- ICP_QAT_FW_COMP_CNV_RECOVERY,
- ICP_QAT_FW_COMP_NO_CNV_DFX,
- ICP_QAT_FW_COMP_CRC_MODE_LEGACY,
- ICP_QAT_FW_COMP_NO_XXHASH_ACC,
- ICP_QAT_FW_COMP_CNV_ERROR_NONE,
- ICP_QAT_FW_COMP_NO_APPEND_CRC,
- ICP_QAT_FW_COMP_NO_DROP_DATA);
-
- /* Fill second half of the template for decompression */
- memcpy(req_tmpl + 1, req_tmpl, sizeof(*req_tmpl));
- req_tmpl++;
- header = &req_tmpl->comn_hdr;
- header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
- cd_pars = &req_tmpl->cd_pars;
-
- hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
- lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
-
- cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
- cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
-}
-
-void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
-{
- dc_ops->build_deflate_ctx = qat_comp_build_deflate;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h
deleted file mode 100644
index 0b1a6774412e..000000000000
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_dc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright(c) 2022 Intel Corporation */
-#ifndef ADF_GEN4_DC_H
-#define ADF_GEN4_DC_H
-
-#include "adf_accel_devices.h"
-
-void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
-
-#endif /* ADF_GEN4_DC_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 099949a2421c..0406cb09c5bb 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -9,6 +9,8 @@
#include "adf_fw_config.h"
#include "adf_gen4_hw_data.h"
#include "adf_gen4_pm.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_hw_20_comp.h"
u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self)
{
@@ -663,3 +665,71 @@ int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number
return ret;
}
EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore);
+
+static int adf_gen4_build_comp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = { };
+ struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = { };
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ u32 upper_val;
+ u32 lower_val;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
+ hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
+ hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
+ hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
+ hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
+ hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
+ hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
+ hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
+
+ upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
+ lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
+
+ return 0;
+}
+
+static int adf_gen4_build_decomp_block(void *ctx, enum adf_dc_algo algo)
+{
+ struct icp_qat_fw_comp_req *req_tmpl = ctx;
+ struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = { };
+ struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ u32 lower_val;
+
+ switch (algo) {
+ case QAT_DEFLATE:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
+ lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
+
+ cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
+ cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+
+ return 0;
+}
+
+void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
+{
+ dc_ops->build_comp_block = adf_gen4_build_comp_block;
+ dc_ops->build_decomp_block = adf_gen4_build_decomp_block;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index 51fc2eaa263e..e4f4d5fa616d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -7,6 +7,7 @@
#include "adf_accel_devices.h"
#include "adf_cfg_common.h"
+#include "adf_dc.h"
/* PCIe configuration space */
#define ADF_GEN4_BAR_MASK (BIT(0) | BIT(2) | BIT(4))
@@ -180,5 +181,6 @@ int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev,
u32 bank_number, struct bank_state *state);
bool adf_gen4_services_supported(unsigned long service_mask);
+void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
index 17d1b774d4a8..2c8708117f70 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
@@ -4,6 +4,7 @@
#define ADF_GEN4_PFVF_H
#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
#ifdef CONFIG_PCI_IOV
void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
new file mode 100644
index 000000000000..9a5b995f7ada
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_GEN6_PM_H
+#define ADF_GEN6_PM_H
+
+#include <linux/bits.h>
+#include <linux/time.h>
+
+struct adf_accel_dev;
+
+/* Power management */
+#define ADF_GEN6_PM_POLL_DELAY_US 20
+#define ADF_GEN6_PM_POLL_TIMEOUT_US USEC_PER_SEC
+#define ADF_GEN6_PM_STATUS 0x50A00C
+#define ADF_GEN6_PM_INTERRUPT 0x50A028
+
+/* Power management source in ERRSOU2 and ERRMSK2 */
+#define ADF_GEN6_PM_SOU BIT(18)
+
+/* cpm_pm_interrupt bitfields */
+#define ADF_GEN6_PM_DRV_ACTIVE BIT(20)
+
+#define ADF_GEN6_PM_DEFAULT_IDLE_FILTER 0x6
+
+/* cpm_pm_status bitfields */
+#define ADF_GEN6_PM_INIT_STATE BIT(21)
+
+#endif /* ADF_GEN6_PM_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c
new file mode 100644
index 000000000000..967253082a98
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.c
@@ -0,0 +1,818 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/types.h>
+
+#include "adf_common_drv.h"
+#include "adf_gen6_ras.h"
+#include "adf_sysfs_ras_counters.h"
+
+static void enable_errsou_reporting(void __iomem *csr)
+{
+ /* Enable correctable error reporting in ERRSOU0 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK0, 0);
+
+ /* Enable uncorrectable error reporting in ERRSOU1 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK1, 0);
+
+ /*
+ * Enable uncorrectable error reporting in ERRSOU2
+ * but disable PM interrupt by default
+ */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK2, ADF_GEN6_ERRSOU2_PM_INT_BIT);
+
+ /* Enable uncorrectable error reporting in ERRSOU3 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, 0);
+}
+
+static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask;
+
+ /* Enable acceleration engine correctable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOGENABLE_CPP0, ae_mask);
+
+ /* Enable acceleration engine uncorrectable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0, ae_mask);
+}
+
+static void enable_cpp_error_reporting(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ /* Enable HI CPP agents command parity error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE,
+ ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK);
+
+ ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_CTRL, ADF_GEN6_CPP_CFC_ERR_CTRL_MASK);
+}
+
+static void enable_ti_ri_error_reporting(void __iomem *csr)
+{
+ u32 reg, mask;
+
+ /* Enable RI memory error reporting */
+ mask = ADF_GEN6_RIMEM_PARERR_FATAL_MASK | ADF_GEN6_RIMEM_PARERR_CERR_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_RI_MEM_PAR_ERR_EN0, mask);
+
+ /* Enable IOSF primary command parity error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_RIMISCCTL, ADF_GEN6_RIMISCSTS_BIT);
+
+ /* Enable TI internal memory parity error reporting */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_CI_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_CD_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK);
+ reg &= ~ADF_GEN6_TI_TRNSB_PAR_STS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK, reg);
+
+ /* Enable error handling in RI, TI CPP interface control registers */
+ ADF_CSR_WR(csr, ADF_GEN6_RICPPINTCTL, ADF_GEN6_RICPPINTCTL_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TICPPINTCTL, ADF_GEN6_TICPPINTCTL_MASK);
+
+ /*
+ * Enable error detection and reporting in TIMISCSTS
+ * with bits 1, 2 and 30 value preserved
+ */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TIMISCCTL);
+ reg &= ADF_GEN6_TIMSCCTL_RELAY_MASK;
+ reg |= ADF_GEN6_TIMISCCTL_BIT;
+ ADF_CSR_WR(csr, ADF_GEN6_TIMISCCTL, reg);
+}
+
+static void enable_ssm_error_reporting(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ /* Enable SSM interrupts */
+ ADF_CSR_WR(csr, ADF_GEN6_INTMASKSSM, 0);
+}
+
+static void adf_gen6_enable_ras(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+
+ enable_errsou_reporting(csr);
+ enable_ae_error_reporting(accel_dev, csr);
+ enable_cpp_error_reporting(accel_dev, csr);
+ enable_ti_ri_error_reporting(csr);
+ enable_ssm_error_reporting(accel_dev, csr);
+}
+
+static void disable_errsou_reporting(void __iomem *csr)
+{
+ u32 val;
+
+ /* Disable correctable error reporting in ERRSOU0 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK0, ADF_GEN6_ERRSOU0_MASK);
+
+ /* Disable uncorrectable error reporting in ERRSOU1 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK1, ADF_GEN6_ERRMSK1_MASK);
+
+ /* Disable uncorrectable error reporting in ERRSOU2 */
+ val = ADF_CSR_RD(csr, ADF_GEN6_ERRMSK2);
+ val |= ADF_GEN6_ERRSOU2_DIS_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK2, val);
+
+ /* Disable uncorrectable error reporting in ERRSOU3 */
+ ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, ADF_GEN6_ERRSOU3_DIS_MASK);
+}
+
+static void disable_ae_error_reporting(void __iomem *csr)
+{
+ /* Disable acceleration engine correctable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOGENABLE_CPP0, 0);
+
+ /* Disable acceleration engine uncorrectable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0, 0);
+}
+
+static void disable_cpp_error_reporting(void __iomem *csr)
+{
+ /* Disable HI CPP agents command parity error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE, 0);
+
+ ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_CTRL, ADF_GEN6_CPP_CFC_ERR_CTRL_DIS_MASK);
+}
+
+static void disable_ti_ri_error_reporting(void __iomem *csr)
+{
+ u32 reg;
+
+ /* Disable RI memory error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_RI_MEM_PAR_ERR_EN0, 0);
+
+ /* Disable IOSF primary command parity error reporting */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_RIMISCCTL);
+ reg &= ~ADF_GEN6_RIMISCSTS_BIT;
+ ADF_CSR_WR(csr, ADF_GEN6_RIMISCCTL, reg);
+
+ /* Disable TI internal memory parity error reporting */
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_ERR_MASK, ADF_GEN6_TI_CI_PAR_STS_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK, ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK, ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_ERR_MASK, ADF_GEN6_TI_CD_PAR_STS_MASK);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_ERR_MASK, ADF_GEN6_TI_TRNSB_PAR_STS_MASK);
+
+ /* Disable error handling in RI, TI CPP interface control registers */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_RICPPINTCTL);
+ reg &= ~ADF_GEN6_RICPPINTCTL_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_RICPPINTCTL, reg);
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TICPPINTCTL);
+ reg &= ~ADF_GEN6_TICPPINTCTL_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TICPPINTCTL, reg);
+
+ /*
+ * Disable error detection and reporting in TIMISCSTS
+ * with bits 1, 2 and 30 value preserved
+ */
+ reg = ADF_CSR_RD(csr, ADF_GEN6_TIMISCCTL);
+ reg &= ADF_GEN6_TIMSCCTL_RELAY_MASK;
+ ADF_CSR_WR(csr, ADF_GEN6_TIMISCCTL, reg);
+}
+
+static void disable_ssm_error_reporting(void __iomem *csr)
+{
+ /* Disable SSM interrupts */
+ ADF_CSR_WR(csr, ADF_GEN6_INTMASKSSM, ADF_GEN6_INTMASKSSM_MASK);
+}
+
+static void adf_gen6_disable_ras(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+
+ disable_errsou_reporting(csr);
+ disable_ae_error_reporting(csr);
+ disable_cpp_error_reporting(csr);
+ disable_ti_ri_error_reporting(csr);
+ disable_ssm_error_reporting(csr);
+}
+
+static void adf_gen6_process_errsou0(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ae, errsou;
+
+ ae = ADF_CSR_RD(csr, ADF_GEN6_HIAECORERRLOG_CPP0);
+ ae &= GET_HW_DATA(accel_dev)->ae_mask;
+
+ dev_warn(&GET_DEV(accel_dev), "Correctable error detected: %#x\n", ae);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+
+ /* Clear interrupt from ERRSOU0 */
+ ADF_CSR_WR(csr, ADF_GEN6_HIAECORERRLOG_CPP0, ae);
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU0);
+ if (errsou & ADF_GEN6_ERRSOU0_MASK)
+ dev_warn(&GET_DEV(accel_dev), "errsou0 still set: %#x\n", errsou);
+}
+
+static void adf_handle_cpp_ae_unc(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 ae;
+
+ if (!(errsou & ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT))
+ return;
+
+ ae = ADF_CSR_RD(csr, ADF_GEN6_HIAEUNCERRLOG_CPP0);
+ ae &= GET_HW_DATA(accel_dev)->ae_mask;
+ if (ae) {
+ dev_err(&GET_DEV(accel_dev), "Uncorrectable error detected: %#x\n", ae);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_HIAEUNCERRLOG_CPP0, ae);
+ }
+}
+
+static void adf_handle_cpp_cmd_par_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 cmd_par_err;
+
+ if (!(errsou & ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT))
+ return;
+
+ cmd_par_err = ADF_CSR_RD(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOG);
+ cmd_par_err &= ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK;
+ if (cmd_par_err) {
+ dev_err(&GET_DEV(accel_dev), "HI CPP agent command parity error: %#x\n",
+ cmd_par_err);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_HICPPAGENTCMDPARERRLOG, cmd_par_err);
+ }
+}
+
+static void adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 rimem_parerr_sts;
+
+ if (!(errsou & ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT))
+ return;
+
+ rimem_parerr_sts = ADF_CSR_RD(csr, ADF_GEN6_RIMEM_PARERR_STS);
+ rimem_parerr_sts &= ADF_GEN6_RIMEM_PARERR_CERR_MASK |
+ ADF_GEN6_RIMEM_PARERR_FATAL_MASK;
+ if (rimem_parerr_sts & ADF_GEN6_RIMEM_PARERR_CERR_MASK) {
+ dev_err(&GET_DEV(accel_dev), "RI memory parity correctable error: %#x\n",
+ rimem_parerr_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+ }
+
+ if (rimem_parerr_sts & ADF_GEN6_RIMEM_PARERR_FATAL_MASK) {
+ dev_err(&GET_DEV(accel_dev), "RI memory parity fatal error: %#x\n",
+ rimem_parerr_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+
+ ADF_CSR_WR(csr, ADF_GEN6_RIMEM_PARERR_STS, rimem_parerr_sts);
+}
+
+static void adf_handle_ti_ci_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_ci_par_sts;
+
+ ti_ci_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_CI_PAR_STS);
+ ti_ci_par_sts &= ADF_GEN6_TI_CI_PAR_STS_MASK;
+ if (ti_ci_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI memory parity error: %#x\n", ti_ci_par_sts);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CI_PAR_STS, ti_ci_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+}
+
+static void adf_handle_ti_pullfub_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_pullfub_par_sts;
+
+ ti_pullfub_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_PULL0FUB_PAR_STS);
+ ti_pullfub_par_sts &= ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK;
+ if (ti_pullfub_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI pull parity error: %#x\n", ti_pullfub_par_sts);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PULL0FUB_PAR_STS, ti_pullfub_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+}
+
+static void adf_handle_ti_pushfub_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_pushfub_par_sts;
+
+ ti_pushfub_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_PUSHFUB_PAR_STS);
+ ti_pushfub_par_sts &= ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK;
+ if (ti_pushfub_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI push parity error: %#x\n", ti_pushfub_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_PUSHFUB_PAR_STS, ti_pushfub_par_sts);
+ }
+}
+
+static void adf_handle_ti_cd_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_cd_par_sts;
+
+ ti_cd_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_CD_PAR_STS);
+ ti_cd_par_sts &= ADF_GEN6_TI_CD_PAR_STS_MASK;
+ if (ti_cd_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI CD parity error: %#x\n", ti_cd_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_CD_PAR_STS, ti_cd_par_sts);
+ }
+}
+
+static void adf_handle_ti_trnsb_par_sts(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 ti_trnsb_par_sts;
+
+ ti_trnsb_par_sts = ADF_CSR_RD(csr, ADF_GEN6_TI_TRNSB_PAR_STS);
+ ti_trnsb_par_sts &= ADF_GEN6_TI_TRNSB_PAR_STS_MASK;
+ if (ti_trnsb_par_sts) {
+ dev_err(&GET_DEV(accel_dev), "TI TRNSB parity error: %#x\n", ti_trnsb_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_TI_TRNSB_PAR_STS, ti_trnsb_par_sts);
+ }
+}
+
+static void adf_handle_iosfp_cmd_parerr(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 rimiscsts;
+
+ rimiscsts = ADF_CSR_RD(csr, ADF_GEN6_RIMISCSTS);
+ rimiscsts &= ADF_GEN6_RIMISCSTS_BIT;
+ if (rimiscsts) {
+ dev_err(&GET_DEV(accel_dev), "Command parity error detected on IOSFP: %#x\n",
+ rimiscsts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_RIMISCSTS, rimiscsts);
+ }
+}
+
+static void adf_handle_ti_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT))
+ return;
+
+ adf_handle_ti_ci_par_sts(accel_dev, csr);
+ adf_handle_ti_pullfub_par_sts(accel_dev, csr);
+ adf_handle_ti_pushfub_par_sts(accel_dev, csr);
+ adf_handle_ti_cd_par_sts(accel_dev, csr);
+ adf_handle_ti_trnsb_par_sts(accel_dev, csr);
+ adf_handle_iosfp_cmd_parerr(accel_dev, csr);
+}
+
+static void adf_handle_sfi_cmd_parerr(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev),
+ "Command parity error detected on streaming fabric interface\n");
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_gen6_process_errsou1(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ adf_handle_cpp_ae_unc(accel_dev, csr, errsou);
+ adf_handle_cpp_cmd_par_err(accel_dev, csr, errsou);
+ adf_handle_ri_mem_par_err(accel_dev, csr, errsou);
+ adf_handle_ti_err(accel_dev, csr, errsou);
+ adf_handle_sfi_cmd_parerr(accel_dev, csr, errsou);
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU1);
+ if (errsou & ADF_GEN6_ERRSOU1_MASK)
+ dev_warn(&GET_DEV(accel_dev), "errsou1 still set: %#x\n", errsou);
+}
+
+static void adf_handle_cerrssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 reg;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_CERRSSMSH);
+ reg &= ADF_GEN6_CERRSSMSH_ERROR_BIT;
+ if (reg) {
+ dev_warn(&GET_DEV(accel_dev),
+ "Correctable error on ssm shared memory: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+ ADF_CSR_WR(csr, ADF_GEN6_CERRSSMSH, reg);
+ }
+}
+
+static void adf_handle_uerrssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_UERRSSMSH);
+ reg &= ADF_GEN6_UERRSSMSH_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "Fatal error on ssm shared memory: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_UERRSSMSH, reg);
+ }
+}
+
+static void adf_handle_pperr_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_PPERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_PPERR);
+ reg &= ADF_GEN6_PPERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "Fatal push or pull data error: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_PPERR, reg);
+ }
+}
+
+static void adf_handle_scmpar_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
+ reg &= ADF_GEN6_SCM_PAR_ERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev), "Fatal error on SCM: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
+ }
+}
+
+static void adf_handle_cpppar_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
+ reg &= ADF_GEN6_CPP_PAR_ERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev), "Fatal error on CPP: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
+ }
+}
+
+static void adf_handle_rfpar_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
+ reg &= ADF_GEN6_RF_PAR_ERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev), "Fatal error on RF Parity: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
+ }
+}
+
+static void adf_handle_unexp_cpl_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_SSM_FERR_STATUS);
+ reg &= ADF_GEN6_UNEXP_CPL_ERR_MASK;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "Fatal error for AXI unexpected tag/length: %#x\n", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_SSM_FERR_STATUS, reg);
+ }
+}
+
+static void adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, void __iomem *csr)
+{
+ u32 iastatssm = ADF_CSR_RD(csr, ADF_GEN6_IAINTSTATSSM);
+
+ iastatssm &= ADF_GEN6_IAINTSTATSSM_MASK;
+ if (!iastatssm)
+ return;
+
+ adf_handle_uerrssmsh(accel_dev, csr, iastatssm);
+ adf_handle_pperr_err(accel_dev, csr, iastatssm);
+ adf_handle_scmpar_err(accel_dev, csr, iastatssm);
+ adf_handle_cpppar_err(accel_dev, csr, iastatssm);
+ adf_handle_rfpar_err(accel_dev, csr, iastatssm);
+ adf_handle_unexp_cpl_err(accel_dev, csr, iastatssm);
+
+ ADF_CSR_WR(csr, ADF_GEN6_IAINTSTATSSM, iastatssm);
+}
+
+static void adf_handle_ssm(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU2_SSM_ERR_BIT))
+ return;
+
+ adf_handle_cerrssmsh(accel_dev, csr);
+ adf_handle_iaintstatssm(accel_dev, csr);
+}
+
+static void adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 reg;
+
+ if (!(errsou & ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT))
+ return;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN6_CPP_CFC_ERR_STATUS);
+ if (reg & ADF_GEN6_CPP_CFC_ERR_STATUS_DATAPAR_BIT) {
+ dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: data parity: %#x", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+
+ if (reg & ADF_GEN6_CPP_CFC_ERR_STATUS_CMDPAR_BIT) {
+ dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: command parity: %#x", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+
+ if (reg & ADF_GEN6_CPP_CFC_FATAL_ERR_BIT) {
+ dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: errors: %#x", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+
+ ADF_CSR_WR(csr, ADF_GEN6_CPP_CFC_ERR_STATUS_CLR,
+ ADF_GEN6_CPP_CFC_ERR_STATUS_CLR_MASK);
+}
+
+static void adf_gen6_process_errsou2(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ adf_handle_ssm(accel_dev, csr, errsou);
+ adf_handle_cpp_cfc_err(accel_dev, csr, errsou);
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU2);
+ if (errsou & ADF_GEN6_ERRSOU2_MASK)
+ dev_warn(&GET_DEV(accel_dev), "errsou2 still set: %#x\n", errsou);
+}
+
+static void adf_handle_timiscsts(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 timiscsts;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_TIMISCSTS_BIT))
+ return;
+
+ timiscsts = ADF_CSR_RD(csr, ADF_GEN6_TIMISCSTS);
+ if (timiscsts) {
+ dev_err(&GET_DEV(accel_dev), "Fatal error in transmit interface: %#x\n",
+ timiscsts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+}
+
+static void adf_handle_ricppintsts(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 ricppintsts;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK))
+ return;
+
+ ricppintsts = ADF_CSR_RD(csr, ADF_GEN6_RICPPINTSTS);
+ ricppintsts &= ADF_GEN6_RICPPINTSTS_MASK;
+ if (ricppintsts) {
+ dev_err(&GET_DEV(accel_dev), "RI push pull error: %#x\n", ricppintsts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_RICPPINTSTS, ricppintsts);
+ }
+}
+
+static void adf_handle_ticppintsts(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 ticppintsts;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK))
+ return;
+
+ ticppintsts = ADF_CSR_RD(csr, ADF_GEN6_TICPPINTSTS);
+ ticppintsts &= ADF_GEN6_TICPPINTSTS_MASK;
+ if (ticppintsts) {
+ dev_err(&GET_DEV(accel_dev), "TI push pull error: %#x\n", ticppintsts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ ADF_CSR_WR(csr, ADF_GEN6_TICPPINTSTS, ticppintsts);
+ }
+}
+
+static void adf_handle_atufaultstatus(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 max_rp_num = GET_HW_DATA(accel_dev)->num_banks;
+ u32 atufaultstatus;
+ u32 i;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT))
+ return;
+
+ for (i = 0; i < max_rp_num; i++) {
+ atufaultstatus = ADF_CSR_RD(csr, ADF_GEN6_ATUFAULTSTATUS(i));
+
+ atufaultstatus &= ADF_GEN6_ATUFAULTSTATUS_BIT;
+ if (atufaultstatus) {
+ dev_err(&GET_DEV(accel_dev), "Ring pair (%u) ATU detected fault: %#x\n", i,
+ atufaultstatus);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_ATUFAULTSTATUS(i), atufaultstatus);
+ }
+ }
+}
+
+static void adf_handle_rlterror(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ u32 rlterror;
+
+ if (!(errsou & ADF_GEN6_ERRSOU3_RLTERROR_BIT))
+ return;
+
+ rlterror = ADF_CSR_RD(csr, ADF_GEN6_RLT_ERRLOG);
+ rlterror &= ADF_GEN6_RLT_ERRLOG_MASK;
+ if (rlterror) {
+ dev_err(&GET_DEV(accel_dev), "Error in rate limiting block: %#x\n", rlterror);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN6_RLT_ERRLOG, rlterror);
+ }
+}
+
+static void adf_handle_vflr(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev), "Uncorrectable error in VF\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+}
+
+static void adf_handle_tc_vc_map_error(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev), "Violation of PCIe TC VC mapping\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_handle_pcie_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev),
+ "DEVHALT due to an error in an incoming transaction\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_handle_pg_req_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev),
+ "Error due to response failure in response to a page request\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_handle_xlt_cpl_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev), "Error status for a address translation request\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_handle_ti_int_err_devhalt(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ if (!(errsou & ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT))
+ return;
+
+ dev_err(&GET_DEV(accel_dev), "DEVHALT due to a TI internal memory error\n");
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+}
+
+static void adf_gen6_process_errsou3(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ adf_handle_timiscsts(accel_dev, csr, errsou);
+ adf_handle_ricppintsts(accel_dev, csr, errsou);
+ adf_handle_ticppintsts(accel_dev, csr, errsou);
+ adf_handle_atufaultstatus(accel_dev, csr, errsou);
+ adf_handle_rlterror(accel_dev, csr, errsou);
+ adf_handle_vflr(accel_dev, csr, errsou);
+ adf_handle_tc_vc_map_error(accel_dev, csr, errsou);
+ adf_handle_pcie_devhalt(accel_dev, csr, errsou);
+ adf_handle_pg_req_devhalt(accel_dev, csr, errsou);
+ adf_handle_xlt_cpl_devhalt(accel_dev, csr, errsou);
+ adf_handle_ti_int_err_devhalt(accel_dev, csr, errsou);
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU3);
+ if (errsou & ADF_GEN6_ERRSOU3_MASK)
+ dev_warn(&GET_DEV(accel_dev), "errsou3 still set: %#x\n", errsou);
+}
+
+static void adf_gen6_is_reset_required(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ bool *reset_required)
+{
+ u8 reset, dev_state;
+ u32 gensts;
+
+ gensts = ADF_CSR_RD(csr, ADF_GEN6_GENSTS);
+ dev_state = FIELD_GET(ADF_GEN6_GENSTS_DEVICE_STATE_MASK, gensts);
+ reset = FIELD_GET(ADF_GEN6_GENSTS_RESET_TYPE_MASK, gensts);
+ if (dev_state == ADF_GEN6_GENSTS_DEVHALT && reset == ADF_GEN6_GENSTS_PFLR) {
+ *reset_required = true;
+ return;
+ }
+
+ if (reset == ADF_GEN6_GENSTS_COLD_RESET)
+ dev_err(&GET_DEV(accel_dev), "Fatal error, cold reset required\n");
+
+ *reset_required = false;
+}
+
+static bool adf_gen6_handle_interrupt(struct adf_accel_dev *accel_dev, bool *reset_required)
+{
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+ bool handled = false;
+ u32 errsou;
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU0);
+ if (errsou & ADF_GEN6_ERRSOU0_MASK) {
+ adf_gen6_process_errsou0(accel_dev, csr);
+ handled = true;
+ }
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU1);
+ if (errsou & ADF_GEN6_ERRSOU1_MASK) {
+ adf_gen6_process_errsou1(accel_dev, csr, errsou);
+ handled = true;
+ }
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU2);
+ if (errsou & ADF_GEN6_ERRSOU2_MASK) {
+ adf_gen6_process_errsou2(accel_dev, csr, errsou);
+ handled = true;
+ }
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN6_ERRSOU3);
+ if (errsou & ADF_GEN6_ERRSOU3_MASK) {
+ adf_gen6_process_errsou3(accel_dev, csr, errsou);
+ handled = true;
+ }
+
+ adf_gen6_is_reset_required(accel_dev, csr, reset_required);
+
+ return handled;
+}
+
+void adf_gen6_init_ras_ops(struct adf_ras_ops *ras_ops)
+{
+ ras_ops->enable_ras_errors = adf_gen6_enable_ras;
+ ras_ops->disable_ras_errors = adf_gen6_disable_ras;
+ ras_ops->handle_interrupt = adf_gen6_handle_interrupt;
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_ras_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h
new file mode 100644
index 000000000000..66ced271d173
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_ras.h
@@ -0,0 +1,504 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_GEN6_RAS_H_
+#define ADF_GEN6_RAS_H_
+
+#include <linux/bits.h>
+
+struct adf_ras_ops;
+
+/* Error source registers */
+#define ADF_GEN6_ERRSOU0 0x41A200
+#define ADF_GEN6_ERRSOU1 0x41A204
+#define ADF_GEN6_ERRSOU2 0x41A208
+#define ADF_GEN6_ERRSOU3 0x41A20C
+
+/* Error source mask registers */
+#define ADF_GEN6_ERRMSK0 0x41A210
+#define ADF_GEN6_ERRMSK1 0x41A214
+#define ADF_GEN6_ERRMSK2 0x41A218
+#define ADF_GEN6_ERRMSK3 0x41A21C
+
+/* ERRSOU0 Correctable error mask */
+#define ADF_GEN6_ERRSOU0_MASK BIT(0)
+
+#define ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT BIT(0)
+#define ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT BIT(1)
+#define ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT BIT(2)
+#define ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT BIT(3)
+#define ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT BIT(4)
+
+#define ADF_GEN6_ERRSOU1_MASK ( \
+ (ADF_GEN6_ERRSOU1_CPP0_MEUNC_BIT) | \
+ (ADF_GEN6_ERRSOU1_CPP_CMDPARERR_BIT) | \
+ (ADF_GEN6_ERRSOU1_RIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN6_ERRSOU1_TIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN6_ERRSOU1_SFICMD_PARERR_BIT))
+
+#define ADF_GEN6_ERRMSK1_CPP0_MEUNC_BIT BIT(0)
+#define ADF_GEN6_ERRMSK1_CPP_CMDPARERR_BIT BIT(1)
+#define ADF_GEN6_ERRMSK1_RIMEM_PARERR_STS_BIT BIT(2)
+#define ADF_GEN6_ERRMSK1_TIMEM_PARERR_STS_BIT BIT(3)
+#define ADF_GEN6_ERRMSK1_IOSFCMD_PARERR_BIT BIT(4)
+
+#define ADF_GEN6_ERRMSK1_MASK ( \
+ (ADF_GEN6_ERRMSK1_CPP0_MEUNC_BIT) | \
+ (ADF_GEN6_ERRMSK1_CPP_CMDPARERR_BIT) | \
+ (ADF_GEN6_ERRMSK1_RIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN6_ERRMSK1_TIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN6_ERRMSK1_IOSFCMD_PARERR_BIT))
+
+/* HI AE Uncorrectable error log */
+#define ADF_GEN6_HIAEUNCERRLOG_CPP0 0x41A300
+
+/* HI AE Uncorrectable error log enable */
+#define ADF_GEN6_HIAEUNCERRLOGENABLE_CPP0 0x41A320
+
+/* HI AE Correctable error log */
+#define ADF_GEN6_HIAECORERRLOG_CPP0 0x41A308
+
+/* HI AE Correctable error log enable */
+#define ADF_GEN6_HIAECORERRLOGENABLE_CPP0 0x41A318
+
+/* HI CPP Agent Command parity error log */
+#define ADF_GEN6_HICPPAGENTCMDPARERRLOG 0x41A310
+
+/* HI CPP Agent command parity error logging enable */
+#define ADF_GEN6_HICPPAGENTCMDPARERRLOGENABLE 0x41A314
+
+#define ADF_6XXX_HICPPAGENTCMDPARERRLOG_MASK 0x1B
+
+/* RI Memory parity error status register */
+#define ADF_GEN6_RIMEM_PARERR_STS 0x41B128
+
+/* RI Memory parity error reporting enable */
+#define ADF_GEN6_RI_MEM_PAR_ERR_EN0 0x41B12C
+
+/*
+ * RI Memory parity error mask
+ * BIT(4) - ri_tlq_phdr parity error
+ * BIT(5) - ri_tlq_pdata parity error
+ * BIT(6) - ri_tlq_nphdr parity error
+ * BIT(7) - ri_tlq_npdata parity error
+ * BIT(8) - ri_tlq_cplhdr parity error
+ * BIT(10) - BIT(13) - ri_tlq_cpldata[0:3] parity error
+ * BIT(19) - ri_cds_cmd_fifo parity error
+ * BIT(20) - ri_obc_ricpl_fifo parity error
+ * BIT(21) - ri_obc_tiricpl_fifo parity error
+ * BIT(22) - ri_obc_cppcpl_fifo parity error
+ * BIT(23) - ri_obc_pendcpl_fifo parity error
+ * BIT(24) - ri_cpp_cmd_fifo parity error
+ * BIT(25) - ri_cds_ticmd_fifo parity error
+ * BIT(26) - riti_cmd_fifo parity error
+ * BIT(27) - ri_int_msixtbl parity error
+ * BIT(28) - ri_int_imstbl parity error
+ * BIT(30) - ri_kpt_fuses parity error
+ */
+#define ADF_GEN6_RIMEM_PARERR_FATAL_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | \
+ BIT(7) | BIT(8) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | \
+ BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26) | BIT(27) | \
+ BIT(28) | BIT(30))
+
+#define ADF_GEN6_RIMEM_PARERR_CERR_MASK \
+ (BIT(10) | BIT(11) | BIT(12) | BIT(13))
+
+/* TI CI parity status */
+#define ADF_GEN6_TI_CI_PAR_STS 0x50060C
+
+/* TI CI parity reporting mask */
+#define ADF_GEN6_TI_CI_PAR_ERR_MASK 0x500608
+
+/*
+ * TI CI parity status mask
+ * BIT(0) - CdCmdQ_sts patiry error status
+ * BIT(1) - CdDataQ_sts parity error status
+ * BIT(3) - CPP_SkidQ_sts parity error status
+ */
+#define ADF_GEN6_TI_CI_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(3))
+
+/* TI PULLFUB parity status */
+#define ADF_GEN6_TI_PULL0FUB_PAR_STS 0x500618
+
+/* TI PULLFUB parity error reporting mask */
+#define ADF_GEN6_TI_PULL0FUB_PAR_ERR_MASK 0x500614
+
+/*
+ * TI PULLFUB parity status mask
+ * BIT(0) - TrnPullReqQ_sts parity status
+ * BIT(1) - TrnSharedDataQ_sts parity status
+ * BIT(2) - TrnPullReqDataQ_sts parity status
+ * BIT(4) - CPP_CiPullReqQ_sts parity status
+ * BIT(5) - CPP_TrnPullReqQ_sts parity status
+ * BIT(6) - CPP_PullidQ_sts parity status
+ * BIT(7) - CPP_WaitDataQ_sts parity status
+ * BIT(8) - CPP_CdDataQ_sts parity status
+ * BIT(9) - CPP_TrnDataQP0_sts parity status
+ * BIT(10) - BIT(11) - CPP_TrnDataQRF[00:01]_sts parity status
+ * BIT(12) - CPP_TrnDataQP1_sts parity status
+ * BIT(13) - BIT(14) - CPP_TrnDataQRF[10:11]_sts parity status
+ */
+#define ADF_GEN6_TI_PULL0FUB_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | \
+ BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14))
+
+/* TI PUSHUB parity status */
+#define ADF_GEN6_TI_PUSHFUB_PAR_STS 0x500630
+
+/* TI PUSHFUB parity error reporting mask */
+#define ADF_GEN6_TI_PUSHFUB_PAR_ERR_MASK 0x50062C
+
+/*
+ * TI PUSHUB parity status mask
+ * BIT(0) - SbPushReqQ_sts parity status
+ * BIT(1) - BIT(2) - SbPushDataQ[0:1]_sts parity status
+ * BIT(4) - CPP_CdPushReqQ_sts parity status
+ * BIT(5) - BIT(6) - CPP_CdPushDataQ[0:1]_sts parity status
+ * BIT(7) - CPP_SbPushReqQ_sts parity status
+ * BIT(8) - CPP_SbPushDataQP_sts parity status
+ * BIT(9) - BIT(10) - CPP_SbPushDataQRF[0:1]_sts parity status
+ */
+#define ADF_GEN6_TI_PUSHFUB_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | \
+ BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10))
+
+/* TI CD parity status */
+#define ADF_GEN6_TI_CD_PAR_STS 0x50063C
+
+/* TI CD parity error mask */
+#define ADF_GEN6_TI_CD_PAR_ERR_MASK 0x500638
+
+/*
+ * TI CD parity status mask
+ * BIT(0) - BIT(15) - CtxMdRam[0:15]_sts parity status
+ * BIT(16) - Leaf2ClusterRam_sts parity status
+ * BIT(17) - BIT(18) - Ring2LeafRam[0:1]_sts parity status
+ * BIT(19) - VirtualQ_sts parity status
+ * BIT(20) - DtRdQ_sts parity status
+ * BIT(21) - DtWrQ_sts parity status
+ * BIT(22) - RiCmdQ_sts parity status
+ * BIT(23) - BypassQ_sts parity status
+ * BIT(24) - DtRdQ_sc_sts parity status
+ * BIT(25) - DtWrQ_sc_sts parity status
+ */
+#define ADF_GEN6_TI_CD_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \
+ BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | \
+ BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \
+ BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25))
+
+/* TI TRNSB parity status */
+#define ADF_GEN6_TI_TRNSB_PAR_STS 0x500648
+
+/* TI TRNSB parity error reporting mask */
+#define ADF_GEN6_TI_TRNSB_PAR_ERR_MASK 0x500644
+
+/*
+ * TI TRNSB parity status mask
+ * BIT(0) - TrnPHdrQP_sts parity status
+ * BIT(1) - TrnPHdrQRF_sts parity status
+ * BIT(2) - TrnPDataQP_sts parity status
+ * BIT(3) - BIT(6) - TrnPDataQRF[0:3]_sts parity status
+ * BIT(7) - TrnNpHdrQP_sts parity status
+ * BIT(8) - BIT(9) - TrnNpHdrQRF[0:1]_sts parity status
+ * BIT(10) - TrnCplHdrQ_sts parity status
+ * BIT(11) - TrnPutObsReqQ_sts parity status
+ * BIT(12) - TrnPushReqQ_sts parity status
+ * BIT(13) - SbSplitIdRam_sts parity status
+ * BIT(14) - SbReqCountQ_sts parity status
+ * BIT(15) - SbCplTrkRam_sts parity status
+ * BIT(16) - SbGetObsReqQ_sts parity status
+ * BIT(17) - SbEpochIdQ_sts parity status
+ * BIT(18) - SbAtCplHdrQ_sts parity status
+ * BIT(19) - SbAtCplDataQ_sts parity status
+ * BIT(20) - SbReqCountRam_sts parity status
+ * BIT(21) - SbAtCplHdrQ_sc_sts parity status
+ */
+#define ADF_GEN6_TI_TRNSB_PAR_STS_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \
+ BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | \
+ BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | \
+ BIT(19) | BIT(20) | BIT(21))
+
+/* Status register to log misc error on RI */
+#define ADF_GEN6_RIMISCSTS 0x41B1B8
+
+/* Status control register to log misc RI error */
+#define ADF_GEN6_RIMISCCTL 0x41B1BC
+
+/*
+ * ERRSOU2 bit mask
+ * BIT(0) - SSM Interrupt Mask
+ * BIT(1) - CFC on CPP. ORed of CFC Push error and Pull error
+ * BIT(2) - BIT(4) - CPP attention interrupts
+ * BIT(18) - PM interrupt
+ */
+#define ADF_GEN6_ERRSOU2_SSM_ERR_BIT BIT(0)
+#define ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT BIT(1)
+#define ADF_GEN6_ERRSOU2_CPP_CFC_ATT_INT_MASK \
+ (BIT(2) | BIT(3) | BIT(4))
+
+#define ADF_GEN6_ERRSOU2_PM_INT_BIT BIT(18)
+
+#define ADF_GEN6_ERRSOU2_MASK \
+ (ADF_GEN6_ERRSOU2_SSM_ERR_BIT | \
+ ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT)
+
+#define ADF_GEN6_ERRSOU2_DIS_MASK \
+ (ADF_GEN6_ERRSOU2_SSM_ERR_BIT | \
+ ADF_GEN6_ERRSOU2_CPP_CFC_ERR_STATUS_BIT | \
+ ADF_GEN6_ERRSOU2_CPP_CFC_ATT_INT_MASK)
+
+#define ADF_GEN6_IAINTSTATSSM 0x28
+
+/* IAINTSTATSSM error bit mask definitions */
+#define ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT BIT(0)
+#define ADF_GEN6_IAINTSTATSSM_PPERR_BIT BIT(2)
+#define ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT BIT(4)
+#define ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT BIT(5)
+#define ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT BIT(6)
+#define ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT BIT(7)
+
+#define ADF_GEN6_IAINTSTATSSM_MASK \
+ (ADF_GEN6_IAINTSTATSSM_SH_ERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_PPERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_SCMPAR_ERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_CPPPAR_ERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_RFPAR_ERR_BIT | \
+ ADF_GEN6_IAINTSTATSSM_UNEXP_CPL_ERR_BIT)
+
+#define ADF_GEN6_UERRSSMSH 0x18
+
+/*
+ * UERRSSMSH error bit mask definitions
+ *
+ * BIT(0) - Indicates one uncorrectable error
+ * BIT(15) - Indicates multiple uncorrectable errors
+ * in device shared memory
+ */
+#define ADF_GEN6_UERRSSMSH_MASK (BIT(0) | BIT(15))
+
+/*
+ * CERRSSMSH error bit
+ * BIT(0) - Indicates one correctable error
+ */
+#define ADF_GEN6_CERRSSMSH_ERROR_BIT (BIT(0) | BIT(15) | BIT(24))
+#define ADF_GEN6_CERRSSMSH 0x10
+
+#define ADF_GEN6_INTMASKSSM 0x0
+
+/*
+ * Error reporting mask in INTMASKSSM
+ * BIT(0) - Shared memory uncorrectable interrupt mask
+ * BIT(2) - PPERR interrupt mask
+ * BIT(4) - SCM parity error interrupt mask
+ * BIT(5) - CPP parity error interrupt mask
+ * BIT(6) - SHRAM RF parity error interrupt mask
+ * BIT(7) - AXI unexpected completion error mask
+ */
+#define ADF_GEN6_INTMASKSSM_MASK \
+ (BIT(0) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7))
+
+/* CPP push or pull error */
+#define ADF_GEN6_PPERR 0x8
+
+#define ADF_GEN6_PPERR_MASK (BIT(0) | BIT(1))
+
+/*
+ * SSM_FERR_STATUS error bit mask definitions
+ */
+#define ADF_GEN6_SCM_PAR_ERR_MASK BIT(5)
+#define ADF_GEN6_CPP_PAR_ERR_MASK (BIT(0) | BIT(1) | BIT(2))
+#define ADF_GEN6_UNEXP_CPL_ERR_MASK (BIT(3) | BIT(4) | BIT(10) | BIT(11))
+#define ADF_GEN6_RF_PAR_ERR_MASK BIT(16)
+
+#define ADF_GEN6_SSM_FERR_STATUS 0x9C
+
+#define ADF_GEN6_CPP_CFC_ERR_STATUS 0x640C04
+
+/*
+ * BIT(0) - Indicates one or more CPP CFC errors
+ * BIT(1) - Indicates multiple CPP CFC errors
+ * BIT(7) - Indicates CPP CFC command parity error type
+ * BIT(8) - Indicates CPP CFC data parity error type
+ */
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_ERR_BIT BIT(0)
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_MERR_BIT BIT(1)
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_CMDPAR_BIT BIT(7)
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_DATAPAR_BIT BIT(8)
+#define ADF_GEN6_CPP_CFC_FATAL_ERR_BIT \
+ (ADF_GEN6_CPP_CFC_ERR_STATUS_ERR_BIT | \
+ ADF_GEN6_CPP_CFC_ERR_STATUS_MERR_BIT)
+
+/*
+ * BIT(0) - Enables CFC to detect and log a push/pull data error
+ * BIT(1) - Enables CFC to generate interrupt to PCIEP for a CPP error
+ * BIT(4) - When 1 parity detection is disabled
+ * BIT(5) - When 1 parity detection is disabled on CPP command bus
+ * BIT(6) - When 1 parity detection is disabled on CPP push/pull bus
+ * BIT(9) - When 1 RF parity error detection is disabled
+ */
+#define ADF_GEN6_CPP_CFC_ERR_CTRL_MASK (BIT(0) | BIT(1))
+
+#define ADF_GEN6_CPP_CFC_ERR_CTRL_DIS_MASK \
+ (BIT(4) | BIT(5) | BIT(6) | BIT(9) | BIT(10))
+
+#define ADF_GEN6_CPP_CFC_ERR_CTRL 0x640C00
+
+/*
+ * BIT(0) - Clears bit(0) of ADF_GEN6_CPP_CFC_ERR_STATUS
+ * when an error is reported on CPP
+ * BIT(1) - Clears bit(1) of ADF_GEN6_CPP_CFC_ERR_STATUS
+ * when multiple errors are reported on CPP
+ * BIT(2) - Clears bit(2) of ADF_GEN6_CPP_CFC_ERR_STATUS
+ * when attention interrupt is reported
+ */
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_CLR_MASK (BIT(0) | BIT(1) | BIT(2))
+#define ADF_GEN6_CPP_CFC_ERR_STATUS_CLR 0x640C08
+
+/*
+ * ERRSOU3 bit masks
+ * BIT(0) - indicates error response order overflow and/or BME error
+ * BIT(1) - indicates RI push/pull error
+ * BIT(2) - indicates TI push/pull error
+ * BIT(5) - indicates TI pull parity error
+ * BIT(6) - indicates RI push parity error
+ * BIT(7) - indicates VFLR interrupt
+ * BIT(8) - indicates ring pair interrupts for ATU detected fault
+ * BIT(9) - indicates rate limiting error
+ */
+#define ADF_GEN6_ERRSOU3_TIMISCSTS_BIT BIT(0)
+#define ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK (BIT(1) | BIT(6))
+#define ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK (BIT(2) | BIT(5))
+#define ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT BIT(7)
+#define ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT BIT(8)
+#define ADF_GEN6_ERRSOU3_RLTERROR_BIT BIT(9)
+#define ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT BIT(16)
+#define ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT BIT(17)
+#define ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT BIT(18)
+#define ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT BIT(19)
+#define ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT BIT(20)
+
+#define ADF_GEN6_ERRSOU3_MASK ( \
+ (ADF_GEN6_ERRSOU3_TIMISCSTS_BIT) | \
+ (ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK) | \
+ (ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK) | \
+ (ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT) | \
+ (ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT) | \
+ (ADF_GEN6_ERRSOU3_RLTERROR_BIT) | \
+ (ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT) | \
+ (ADF_GEN6_ERRSOU3_PCIE_DEVHALT_BIT) | \
+ (ADF_GEN6_ERRSOU3_PG_REQ_DEVHALT_BIT) | \
+ (ADF_GEN6_ERRSOU3_XLT_CPL_DEVHALT_BIT) | \
+ (ADF_GEN6_ERRSOU3_TI_INT_ERR_DEVHALT_BIT))
+
+#define ADF_GEN6_ERRSOU3_DIS_MASK ( \
+ (ADF_GEN6_ERRSOU3_TIMISCSTS_BIT) | \
+ (ADF_GEN6_ERRSOU3_RICPPINTSTS_MASK) | \
+ (ADF_GEN6_ERRSOU3_TICPPINTSTS_MASK) | \
+ (ADF_GEN6_ERRSOU3_VFLRNOTIFY_BIT) | \
+ (ADF_GEN6_ERRSOU3_ATUFAULTSTATUS_BIT) | \
+ (ADF_GEN6_ERRSOU3_RLTERROR_BIT) | \
+ (ADF_GEN6_ERRSOU3_TC_VC_MAP_ERROR_BIT))
+
+/* Rate limiting error log register */
+#define ADF_GEN6_RLT_ERRLOG 0x508814
+
+#define ADF_GEN6_RLT_ERRLOG_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* TI misc status register */
+#define ADF_GEN6_TIMISCSTS 0x50054C
+
+/* TI misc error reporting mask */
+#define ADF_GEN6_TIMISCCTL 0x500548
+
+/*
+ * TI Misc error reporting control mask
+ * BIT(0) - Enables error detection and logging in TIMISCSTS register
+ * BIT(1) - It has effect only when SRIOV enabled, this bit is 0 by default
+ * BIT(2) - Enables the D-F-x counter within the dispatch arbiter
+ * to start based on the command triggered from
+ * BIT(30) - Disables VFLR functionality
+ * bits 1, 2 and 30 value should be preserved and not meant to be changed
+ * within RAS.
+ */
+#define ADF_GEN6_TIMISCCTL_BIT BIT(0)
+#define ADF_GEN6_TIMSCCTL_RELAY_MASK (BIT(1) | BIT(2) | BIT(30))
+
+/* RI CPP interface status register */
+#define ADF_GEN6_RICPPINTSTS 0x41A330
+
+/*
+ * Uncorrectable error mask in RICPPINTSTS register
+ * BIT(0) - RI asserted the CPP error signal during a push
+ * BIT(1) - RI detected the CPP error signal asserted during a pull
+ * BIT(2) - RI detected a push data parity error
+ * BIT(3) - RI detected a push valid parity error
+ */
+#define ADF_GEN6_RICPPINTSTS_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* RI CPP interface register control */
+#define ADF_GEN6_RICPPINTCTL 0x41A32C
+
+/*
+ * Control bit mask for RICPPINTCTL register
+ * BIT(0) - value of 1 enables error detection and reporting
+ * on the RI CPP Push interface
+ * BIT(1) - value of 1 enables error detection and reporting
+ * on the RI CPP Pull interface
+ * BIT(2) - value of 1 enables error detection and reporting
+ * on the RI Parity
+ * BIT(3) - value of 1 enable checking parity on CPP
+ */
+#define ADF_GEN6_RICPPINTCTL_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4))
+
+/* TI CPP interface status register */
+#define ADF_GEN6_TICPPINTSTS 0x50053C
+
+/*
+ * Uncorrectable error mask in TICPPINTSTS register
+ * BIT(0) - value of 1 indicates that the TI asserted
+ * the CPP error signal during a push
+ * BIT(1) - value of 1 indicates that the TI detected
+ * the CPP error signal asserted during a pull
+ * BIT(2) - value of 1 indicates that the TI detected
+ * a pull data parity error
+ */
+#define ADF_GEN6_TICPPINTSTS_MASK (BIT(0) | BIT(1) | BIT(2))
+
+/* TI CPP interface status register control */
+#define ADF_GEN6_TICPPINTCTL 0x500538
+
+/*
+ * Control bit mask for TICPPINTCTL register
+ * BIT(0) - value of 1 enables error detection and reporting on
+ * the TI CPP Push interface
+ * BIT(1) - value of 1 enables error detection and reporting on
+ * the TI CPP Push interface
+ * BIT(2) - value of 1 enables parity error detection and logging on
+ * the TI CPP Pull interface
+ * BIT(3) - value of 1 enables CPP CMD and Pull Data parity checking
+ */
+#define ADF_GEN6_TICPPINTCTL_MASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4))
+
+/* ATU fault status register */
+#define ADF_GEN6_ATUFAULTSTATUS(i) (0x506000 + ((i) * 0x4))
+
+#define ADF_GEN6_ATUFAULTSTATUS_BIT BIT(0)
+
+/* Command parity error detected on IOSFP command to QAT */
+#define ADF_GEN6_RIMISCSTS_BIT BIT(0)
+
+#define ADF_GEN6_GENSTS 0x41A220
+#define ADF_GEN6_GENSTS_DEVICE_STATE_MASK GENMASK(1, 0)
+#define ADF_GEN6_GENSTS_RESET_TYPE_MASK GENMASK(3, 2)
+#define ADF_GEN6_GENSTS_PFLR 0x1
+#define ADF_GEN6_GENSTS_COLD_RESET 0x3
+#define ADF_GEN6_GENSTS_DEVHALT 0x1
+
+void adf_gen6_init_ras_ops(struct adf_ras_ops *ras_ops);
+
+#endif /* ADF_GEN6_RAS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
new file mode 100644
index 000000000000..58a072e2f936
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/export.h>
+
+#include "adf_gen4_config.h"
+#include "adf_gen4_hw_csr_data.h"
+#include "adf_gen4_pfvf.h"
+#include "adf_gen6_shared.h"
+
+struct adf_accel_dev;
+struct adf_pfvf_ops;
+struct adf_hw_csr_ops;
+
+/*
+ * QAT GEN4 and GEN6 devices often differ in terms of supported features,
+ * options and internal logic. However, some of the mechanisms and register
+ * layout are shared between those two GENs. This file serves as an abstraction
+ * layer that allows to use existing GEN4 implementation that is also
+ * applicable to GEN6 without additional overhead and complexity.
+ */
+void adf_gen6_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
+{
+ adf_gen4_init_pf_pfvf_ops(pfvf_ops);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_pf_pfvf_ops);
+
+void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+{
+ return adf_gen4_init_hw_csr_ops(csr_ops);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_hw_csr_ops);
+
+int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev)
+{
+ return adf_gen4_cfg_dev_init(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_cfg_dev_init);
+
+int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev)
+{
+ return adf_comp_dev_config(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_comp_dev_config);
+
+int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev)
+{
+ return adf_no_dev_config(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_no_dev_config);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
new file mode 100644
index 000000000000..bc8e71e984fc
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_GEN6_SHARED_H_
+#define ADF_GEN6_SHARED_H_
+
+struct adf_hw_csr_ops;
+struct adf_accel_dev;
+struct adf_pfvf_ops;
+
+void adf_gen6_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
+void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev);
+int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev);
+int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev);
+#endif/* ADF_GEN6_SHARED_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c b/drivers/crypto/intel/qat/qat_common/adf_timer.c
index 35ccb91d6ec1..8962a49f145a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_timer.c
@@ -12,9 +12,9 @@
#include "adf_admin.h"
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
-#include "adf_gen4_timer.h"
+#include "adf_timer.h"
-#define ADF_GEN4_TIMER_PERIOD_MS 200
+#define ADF_DEFAULT_TIMER_PERIOD_MS 200
/* This periodic update is used to trigger HB, RL & TL fw events */
static void work_handler(struct work_struct *work)
@@ -27,16 +27,16 @@ static void work_handler(struct work_struct *work)
accel_dev = timer_ctx->accel_dev;
adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
- msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
+ msecs_to_jiffies(ADF_DEFAULT_TIMER_PERIOD_MS));
time_periods = div_u64(ktime_ms_delta(ktime_get_real(), timer_ctx->initial_ktime),
- ADF_GEN4_TIMER_PERIOD_MS);
+ ADF_DEFAULT_TIMER_PERIOD_MS);
if (adf_send_admin_tim_sync(accel_dev, time_periods))
dev_err(&GET_DEV(accel_dev), "Failed to synchronize qat timer\n");
}
-int adf_gen4_timer_start(struct adf_accel_dev *accel_dev)
+int adf_timer_start(struct adf_accel_dev *accel_dev)
{
struct adf_timer *timer_ctx;
@@ -50,13 +50,13 @@ int adf_gen4_timer_start(struct adf_accel_dev *accel_dev)
INIT_DELAYED_WORK(&timer_ctx->work_ctx, work_handler);
adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
- msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
+ msecs_to_jiffies(ADF_DEFAULT_TIMER_PERIOD_MS));
return 0;
}
-EXPORT_SYMBOL_GPL(adf_gen4_timer_start);
+EXPORT_SYMBOL_GPL(adf_timer_start);
-void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev)
+void adf_timer_stop(struct adf_accel_dev *accel_dev)
{
struct adf_timer *timer_ctx = accel_dev->timer;
@@ -68,4 +68,4 @@ void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev)
kfree(timer_ctx);
accel_dev->timer = NULL;
}
-EXPORT_SYMBOL_GPL(adf_gen4_timer_stop);
+EXPORT_SYMBOL_GPL(adf_timer_stop);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h b/drivers/crypto/intel/qat/qat_common/adf_timer.h
index 66a709e7b358..68e5136d6ba1 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_timer.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright(c) 2023 Intel Corporation */
-#ifndef ADF_GEN4_TIMER_H_
-#define ADF_GEN4_TIMER_H_
+#ifndef ADF_TIMER_H_
+#define ADF_TIMER_H_
#include <linux/ktime.h>
#include <linux/workqueue.h>
@@ -15,7 +15,7 @@ struct adf_timer {
ktime_t initial_ktime;
};
-int adf_gen4_timer_start(struct adf_accel_dev *accel_dev);
-void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev);
+int adf_timer_start(struct adf_accel_dev *accel_dev);
+void adf_timer_stop(struct adf_accel_dev *accel_dev);
-#endif /* ADF_GEN4_TIMER_H_ */
+#endif /* ADF_TIMER_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
index 04f645957e28..81969c515a17 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
@@ -44,6 +44,7 @@ enum icp_qat_fw_comp_20_cmd_id {
#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1
#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7
#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MAX_VALUE 0xFFFFFFFF
#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \
ret_uncomp, secure_ram) \
@@ -117,7 +118,7 @@ struct icp_qat_fw_comp_req_params {
#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr, \
cnvdfx, crc, xxhash_acc, \
cnv_error_type, append_crc, \
- drop_data) \
+ drop_data, partial_decomp) \
((((sop) & ICP_QAT_FW_COMP_SOP_MASK) << \
ICP_QAT_FW_COMP_SOP_BITPOS) | \
(((eop) & ICP_QAT_FW_COMP_EOP_MASK) << \
@@ -139,7 +140,9 @@ struct icp_qat_fw_comp_req_params {
(((append_crc) & ICP_QAT_FW_COMP_APPEND_CRC_MASK) \
<< ICP_QAT_FW_COMP_APPEND_CRC_BITPOS) | \
(((drop_data) & ICP_QAT_FW_COMP_DROP_DATA_MASK) \
- << ICP_QAT_FW_COMP_DROP_DATA_BITPOS))
+ << ICP_QAT_FW_COMP_DROP_DATA_BITPOS) | \
+ (((partial_decomp) & ICP_QAT_FW_COMP_PARTIAL_DECOMP_MASK) \
+ << ICP_QAT_FW_COMP_PARTIAL_DECOMP_BITPOS))
#define ICP_QAT_FW_COMP_NOT_SOP 0
#define ICP_QAT_FW_COMP_SOP 1
@@ -161,6 +164,8 @@ struct icp_qat_fw_comp_req_params {
#define ICP_QAT_FW_COMP_NO_APPEND_CRC 0
#define ICP_QAT_FW_COMP_DROP_DATA 1
#define ICP_QAT_FW_COMP_NO_DROP_DATA 0
+#define ICP_QAT_FW_COMP_PARTIAL_DECOMPRESS 1
+#define ICP_QAT_FW_COMP_NO_PARTIAL_DECOMPRESS 0
#define ICP_QAT_FW_COMP_SOP_BITPOS 0
#define ICP_QAT_FW_COMP_SOP_MASK 0x1
#define ICP_QAT_FW_COMP_EOP_BITPOS 1
@@ -189,6 +194,8 @@ struct icp_qat_fw_comp_req_params {
#define ICP_QAT_FW_COMP_APPEND_CRC_MASK 0x1
#define ICP_QAT_FW_COMP_DROP_DATA_BITPOS 25
#define ICP_QAT_FW_COMP_DROP_DATA_MASK 0x1
+#define ICP_QAT_FW_COMP_PARTIAL_DECOMP_BITPOS 27
+#define ICP_QAT_FW_COMP_PARTIAL_DECOMP_MASK 0x1
#define ICP_QAT_FW_COMP_SOP_GET(flags) \
QAT_FIELD_GET(flags, ICP_QAT_FW_COMP_SOP_BITPOS, \
@@ -281,8 +288,18 @@ struct icp_qat_fw_comp_req {
union {
struct icp_qat_fw_xlt_req_params xlt_pars;
__u32 resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2];
+ struct {
+ __u32 partial_decompress_length;
+ __u32 partial_decompress_offset;
+ } partial_decompress;
} u1;
- __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
+ union {
+ __u32 resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
+ struct {
+ __u32 asb_value;
+ __u32 reserved;
+ } asb_threshold;
+ } u3;
struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl;
union {
struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
index 7eb5daef4f88..6887930c7995 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
@@ -35,6 +35,7 @@ struct icp_qat_fw_loader_chip_info {
u32 wakeup_event_val;
bool fw_auth;
bool css_3k;
+ bool dual_sign;
bool tgroup_share_ustore;
u32 fcu_ctl_csr;
u32 fcu_sts_csr;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h
new file mode 100644
index 000000000000..dce639152345
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ICP_QAT_HW_51_COMP_H_
+#define ICP_QAT_HW_51_COMP_H_
+
+#include <linux/types.h>
+
+#include "icp_qat_fw.h"
+#include "icp_qat_hw_51_comp_defs.h"
+
+struct icp_qat_hw_comp_51_config_csr_lower {
+ enum icp_qat_hw_comp_51_abd abd;
+ enum icp_qat_hw_comp_51_lllbd_ctrl lllbd;
+ enum icp_qat_hw_comp_51_search_depth sd;
+ enum icp_qat_hw_comp_51_min_match_control mmctrl;
+ enum icp_qat_hw_comp_51_lz4_block_checksum lbc;
+};
+
+static inline u32
+ICP_QAT_FW_COMP_51_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_51_config_csr_lower csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.abd,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_MASK);
+ QAT_FIELD_SET(val32, csr.lllbd,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_MASK);
+ QAT_FIELD_SET(val32, csr.sd,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_MASK);
+ QAT_FIELD_SET(val32, csr.mmctrl,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
+ QAT_FIELD_SET(val32, csr.lbc,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK);
+
+ return val32;
+}
+
+struct icp_qat_hw_comp_51_config_csr_upper {
+ enum icp_qat_hw_comp_51_dmm_algorithm edmm;
+ enum icp_qat_hw_comp_51_bms bms;
+ enum icp_qat_hw_comp_51_scb_mode_reset_mask scb_mode_reset;
+};
+
+static inline u32
+ICP_QAT_FW_COMP_51_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_51_config_csr_upper csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.edmm,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_MASK);
+ QAT_FIELD_SET(val32, csr.bms,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_MASK);
+ QAT_FIELD_SET(val32, csr.scb_mode_reset,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS,
+ ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK);
+
+ return val32;
+}
+
+struct icp_qat_hw_decomp_51_config_csr_lower {
+ enum icp_qat_hw_decomp_51_lz4_block_checksum lbc;
+};
+
+static inline u32
+ICP_QAT_FW_DECOMP_51_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_51_config_csr_lower csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.lbc,
+ ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS,
+ ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK);
+
+ return val32;
+}
+
+struct icp_qat_hw_decomp_51_config_csr_upper {
+ enum icp_qat_hw_decomp_51_bms bms;
+};
+
+static inline u32
+ICP_QAT_FW_DECOMP_51_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_51_config_csr_upper csr)
+{
+ u32 val32 = 0;
+
+ QAT_FIELD_SET(val32, csr.bms,
+ ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_BITPOS,
+ ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_MASK);
+
+ return val32;
+}
+
+#endif /* ICP_QAT_HW_51_COMP_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h
new file mode 100644
index 000000000000..e745688c5da4
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_51_comp_defs.h
@@ -0,0 +1,318 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ICP_QAT_HW_51_COMP_DEFS_H_
+#define ICP_QAT_HW_51_COMP_DEFS_H_
+
+#include <linux/bits.h>
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_BITPOS 28
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_MASK GENMASK(1, 0)
+enum icp_qat_hw_comp_51_som_control {
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_NORMAL_MODE = 0x0,
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_DICTIONARY_MODE = 0x1,
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_INPUT_CRC = 0x2,
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_RESERVED_MODE = 0x3,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SOM_CONTROL_NORMAL_MODE
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_skip_hash_rd_control {
+ ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0,
+ ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SKIP_HASH_RD_CONTROL_NO_SKIP
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_BITPOS 25
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_bypass_compression {
+ ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_DISABLED = 0x0,
+ ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_ENABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYPASS_COMPRESSION_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_BYPASS_COMPRESSION_DISABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_BITPOS 22
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_dmm_algorithm {
+ ICP_QAT_HW_COMP_51_DMM_ALGORITHM_EDMM_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_DMM_ALGORITHM_ZSTD_DMM_LITE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_DMM_ALGORITHM_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_DMM_ALGORITHM_EDMM_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_BITPOS 21
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_token_fusion_internal_only {
+ ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_TOKEN_FUSION_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_TOKEN_FUSION_INTERNAL_ONLY_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_BITPOS 19
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_MASK GENMASK(1, 0)
+enum icp_qat_hw_comp_51_bms {
+ ICP_QAT_HW_COMP_51_BMS_BMS_64KB = 0x0,
+ ICP_QAT_HW_COMP_51_BMS_BMS_256KB = 0x1,
+ ICP_QAT_HW_COMP_51_BMS_BMS_1MB = 0x2,
+ ICP_QAT_HW_COMP_51_BMS_BMS_4MB = 0x3,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BMS_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_BMS_BMS_64KB
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_scb_mode_reset_mask {
+ ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_DO_NOT_RESET_HB_HT = 0x0,
+ ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_RESET_HB_HT = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SCB_MODE_RESET_MASK_DO_NOT_RESET_HB_HT
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_BITPOS 2
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_zstd_frame_gen_dec_en {
+ ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_DISABLE = 0x0,
+ ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_BITPOS 1
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_cnv_disable {
+ ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_CNV_DISABLE_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_CNV_DISABLE_CNV_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_BITPOS 0
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_asb_disable {
+ ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ASB_DISABLE_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_ASB_DISABLE_ASB_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_BITPOS 21
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_spec_decoder_internal_only {
+ ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL = 0x0,
+ ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_BITPOS 20
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_mini_xcam_internal_only {
+ ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL = 0x0,
+ ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_BITPOS 19
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_rep_off_enc_internal_only {
+ ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_REP_OFF_ENC_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_REP_OFF_ENC_INTERNAL_ONLY_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_BITPOS 18
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_prog_block_drop_internal_only {
+ ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_DISABLE = 0x0,
+ ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_ENABLE = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_PROG_BLOCK_DROP_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_PROG_BLOCK_DROP_INTERNAL_ONLY_DISABLE
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_BITPOS 17
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_skip_hash_override_internal_only {
+ ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DETERMINE_HASH_PARAMS = 0x0,
+ ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_OVERRIDE_HASH_PARAMS = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SKIP_HASH_OVERRIDE_INTERNAL_ONLY_DETERMINE_HASH_PARAMS
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_BITPOS 14
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_MASK GENMASK(2, 0)
+enum icp_qat_hw_comp_51_hbs {
+ ICP_QAT_HW_COMP_51_HBS_32KB = 0x0,
+ ICP_QAT_HW_COMP_51_HBS_64KB = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_HBS_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_HBS_32KB
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_BITPOS 13
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_abd {
+ ICP_QAT_HW_COMP_51_ABD_ABD_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_ABD_ABD_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_ABD_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_ABD_ABD_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_BITPOS 12
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_lllbd_ctrl {
+ ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_ENABLED = 0x0,
+ ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_ENABLED
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_MASK GENMASK(3, 0)
+enum icp_qat_hw_comp_51_search_depth {
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1 = 0x1,
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_6 = 0x3,
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_9 = 0x4,
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_10 = 0x4,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_BITPOS 5
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_MASK GENMASK(2, 0)
+enum icp_qat_hw_comp_51_format {
+ ICP_QAT_HW_COMP_51_FORMAT_ILZ77 = 0x1,
+ ICP_QAT_HW_COMP_51_FORMAT_LZ4 = 0x2,
+ ICP_QAT_HW_COMP_51_FORMAT_LZ4s = 0x3,
+ ICP_QAT_HW_COMP_51_FORMAT_ZSTD = 0x4,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_FORMAT_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_FORMAT_ILZ77
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_min_match_control {
+ ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
+ ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_MIN_MATCH_CONTROL_MATCH_3B
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_skip_hash_collision {
+ ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_ALLOW = 0x0,
+ ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SKIP_HASH_COLLISION_ALLOW
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_skip_hash_update {
+ ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_ALLOW = 0x0,
+ ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_SKIP_HASH_UPDATE_ALLOW
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_BITPOS 1
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_byte_skip {
+ ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_TOKEN = 0x0,
+ ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_LITERAL = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_BYTE_SKIP_3BYTE_TOKEN
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS 0
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK GENMASK(0, 0)
+enum icp_qat_hw_comp_51_lz4_block_checksum {
+ ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_ABSENT = 0x0,
+ ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_PRESENT = 0x1,
+};
+
+#define ICP_QAT_HW_COMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_DEFAULT_VAL \
+ ICP_QAT_HW_COMP_51_LZ4_BLOCK_CHECKSUM_ABSENT
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_BITPOS 26
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_discard_data {
+ ICP_QAT_HW_DECOMP_51_DISCARD_DATA_DISABLED = 0x0,
+ ICP_QAT_HW_DECOMP_51_DISCARD_DATA_ENABLED = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_DISCARD_DATA_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_DISCARD_DATA_DISABLED
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_BITPOS 19
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_MASK GENMASK(1, 0)
+enum icp_qat_hw_decomp_51_bms {
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_64KB = 0x0,
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_256KB = 0x1,
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_1MB = 0x2,
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_4MB = 0x3,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_BMS_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_BMS_BMS_64KB
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_BITPOS 2
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_zstd_frame_gen_dec_en {
+ ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_DISABLE = 0x0,
+ ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_ZSTD_FRAME_GEN_DEC_EN_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_ZSTD_FRAME_GEN_DEC_EN_ZSTD_FRAME_HDR_ENABLE
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_BITPOS 21
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_spec_decoder_internal_only {
+ ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL = 0x0,
+ ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_SPEC_DECODER_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_SPEC_DECODER_INTERNAL_ONLY_NORMAL
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_BITPOS 20
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_mini_xcam_internal_only {
+ ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL = 0x0,
+ ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_DISABLED = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_MINI_XCAM_INTERNAL_ONLY_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_MINI_XCAM_INTERNAL_ONLY_NORMAL
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_BITPOS 14
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_MASK GENMASK(2, 0)
+enum icp_qat_hw_decomp_51_hbs {
+ ICP_QAT_HW_DECOMP_51_HBS_32KB = 0x0,
+ ICP_QAT_HW_DECOMP_51_HBS_64KB = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_HBS_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_HBS_32KB
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_BITPOS 5
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_MASK GENMASK(2, 0)
+enum icp_qat_hw_decomp_51_format {
+ ICP_QAT_HW_DECOMP_51_FORMAT_ILZ77 = 0x1,
+ ICP_QAT_HW_DECOMP_51_FORMAT_LZ4 = 0x2,
+ ICP_QAT_HW_DECOMP_51_FORMAT_RESERVED = 0x3,
+ ICP_QAT_HW_DECOMP_51_FORMAT_ZSTD = 0x4,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_FORMAT_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_FORMAT_ILZ77
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_BITPOS 0
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_MASK GENMASK(0, 0)
+enum icp_qat_hw_decomp_51_lz4_block_checksum {
+ ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_ABSENT = 0x0,
+ ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_PRESENT = 0x1,
+};
+
+#define ICP_QAT_HW_DECOMP_51_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_DEFAULT_VAL \
+ ICP_QAT_HW_DECOMP_51_LZ4_BLOCK_CHECKSUM_ABSENT
+
+#endif /* ICP_QAT_HW_51_COMP_DEFS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
index 1c7bcd8e4055..6313c35eff0c 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
@@ -7,6 +7,7 @@
#define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000
#define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000
#define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000
+#define ICP_QAT_AC_6XXX_DEV_TYPE 0x80000000
#define ICP_QAT_UCLO_MAX_AE 17
#define ICP_QAT_UCLO_MAX_CTX 8
#define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
@@ -81,6 +82,21 @@
#define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN 0x40000
#define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN 0x30000
+/* All lengths below are in bytes */
+#define ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN 12
+#define ICP_QAT_DUALSIGN_OPAQUE_HDR_ALIGN_LEN 16
+#define ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN 3540
+#define ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN 64
+#define ICP_QAT_DUALSIGN_XMSS_SIG_LEN 2692
+#define ICP_QAT_DUALSIGN_XMSS_SIG_ALIGN_LEN 2696
+#define ICP_QAT_DUALSIGN_MISC_INFO_LEN 16
+#define ICP_QAT_DUALSIGN_FW_TYPE_LEN 7
+#define ICP_QAT_DUALSIGN_MODULE_TYPE 0x14
+#define ICP_QAT_DUALSIGN_HDR_LEN 0x375
+#define ICP_QAT_DUALSIGN_HDR_VER 0x40001
+#define ICP_QAT_DUALSIGN_HDR_LEN_OFFSET 4
+#define ICP_QAT_DUALSIGN_HDR_VER_OFFSET 8
+
#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1)
@@ -440,6 +456,13 @@ struct icp_qat_fw_auth_desc {
unsigned int img_ae_init_data_low;
unsigned int img_ae_insts_high;
unsigned int img_ae_insts_low;
+ unsigned int cpp_mask;
+ unsigned int reserved;
+ unsigned int xmss_pubkey_high;
+ unsigned int xmss_pubkey_low;
+ unsigned int xmss_sig_high;
+ unsigned int xmss_sig_low;
+ unsigned int reserved2[2];
};
struct icp_qat_auth_chunk {
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
index a6e02405d402..8b123472b71c 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
@@ -8,6 +8,7 @@
#include <linux/workqueue.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
+#include "adf_dc.h"
#include "qat_bl.h"
#include "qat_comp_req.h"
#include "qat_compression.h"
@@ -145,9 +146,7 @@ static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
return -EINVAL;
ctx->inst = inst;
- ctx->inst->build_deflate_ctx(ctx->comp_ctx);
-
- return 0;
+ return qat_comp_build_ctx(inst->accel_dev, ctx->comp_ctx, QAT_DEFLATE);
}
static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
@@ -241,13 +240,13 @@ static struct acomp_alg qat_acomp[] = { {
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct qat_compression_ctx),
+ .cra_reqsize = sizeof(struct qat_compression_req),
.cra_module = THIS_MODULE,
},
.init = qat_comp_alg_init_tfm,
.exit = qat_comp_alg_exit_tfm,
.compress = qat_comp_alg_compress,
.decompress = qat_comp_alg_decompress,
- .reqsize = sizeof(struct qat_compression_req),
}};
int qat_comp_algs_register(void)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
index 7842a9f22178..c285b45b8679 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_compression.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c
@@ -144,7 +144,6 @@ static int qat_compression_create_instances(struct adf_accel_dev *accel_dev)
inst->id = i;
atomic_set(&inst->refctr, 0);
inst->accel_dev = accel_dev;
- inst->build_deflate_ctx = GET_DC_OPS(accel_dev)->build_deflate_ctx;
snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.h b/drivers/crypto/intel/qat/qat_common/qat_compression.h
index aebac2302dcf..5ced3ed0e5ea 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_compression.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.h
@@ -20,7 +20,6 @@ struct qat_compression_instance {
atomic_t refctr;
struct qat_instance_backlog backlog;
struct adf_dc_data *dc_data;
- void (*build_deflate_ctx)(void *ctx);
};
static inline bool adf_hw_dev_has_compression(struct adf_accel_dev *accel_dev)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
index ef8a9cf74f0c..da4eca6e1633 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
@@ -694,16 +694,17 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->pci_dev = pci_info->pci_dev;
switch (handle->pci_dev->device) {
- case ADF_4XXX_PCI_DEVICE_ID:
- case ADF_401XX_PCI_DEVICE_ID:
- case ADF_402XX_PCI_DEVICE_ID:
- case ADF_420XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_4XXX:
+ case PCI_DEVICE_ID_INTEL_QAT_401XX:
+ case PCI_DEVICE_ID_INTEL_QAT_402XX:
+ case PCI_DEVICE_ID_INTEL_QAT_420XX:
+ case PCI_DEVICE_ID_INTEL_QAT_6XXX:
handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = false;
handle->chip_info->lm2lm3 = true;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
handle->chip_info->icp_rst_csr = ICP_RESET_CPP0;
- if (handle->pci_dev->device == ADF_420XX_PCI_DEVICE_ID)
+ if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_420XX)
handle->chip_info->icp_rst_mask = 0x100155;
else
handle->chip_info->icp_rst_mask = 0x100015;
@@ -712,6 +713,8 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->chip_info->wakeup_event_val = 0x80000000;
handle->chip_info->fw_auth = true;
handle->chip_info->css_3k = true;
+ if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX)
+ handle->chip_info->dual_sign = true;
handle->chip_info->tgroup_share_ustore = true;
handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX;
handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index 7678a93c6853..21d652a1c8ef 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -1,11 +1,16 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
+
+#define pr_fmt(fmt) "QAT: " fmt
+
#include <linux/align.h>
+#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/pci_ids.h>
+#include <linux/wordpart.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "icp_qat_uclo.h"
@@ -59,7 +64,7 @@ static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
unsigned int i;
if (!ae_data) {
- pr_err("QAT: bad argument, ae_data is NULL\n");
+ pr_err("bad argument, ae_data is NULL\n");
return -EINVAL;
}
@@ -86,12 +91,11 @@ static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
int min = hdr->min_ver & 0xff;
if (hdr->file_id != ICP_QAT_UOF_FID) {
- pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
+ pr_err("Invalid header 0x%x\n", hdr->file_id);
return -EINVAL;
}
if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
- pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
- maj, min);
+ pr_err("bad UOF version, major 0x%x, minor 0x%x\n", maj, min);
return -EINVAL;
}
return 0;
@@ -103,20 +107,19 @@ static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
int min = suof_hdr->min_ver & 0xff;
if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
- pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
+ pr_err("invalid header 0x%x\n", suof_hdr->file_id);
return -EINVAL;
}
if (suof_hdr->fw_type != 0) {
- pr_err("QAT: unsupported firmware type\n");
+ pr_err("unsupported firmware type\n");
return -EINVAL;
}
if (suof_hdr->num_chunks <= 0x1) {
- pr_err("QAT: SUOF chunk amount is incorrect\n");
+ pr_err("SUOF chunk amount is incorrect\n");
return -EINVAL;
}
if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
- pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
- maj, min);
+ pr_err("bad SUOF version, major 0x%x, minor 0x%x\n", maj, min);
return -EINVAL;
}
return 0;
@@ -223,24 +226,24 @@ static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
char *str;
if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
- pr_err("QAT: initmem is out of range");
+ pr_err("initmem is out of range");
return -EINVAL;
}
if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
- pr_err("QAT: Memory scope for init_mem error\n");
+ pr_err("Memory scope for init_mem error\n");
return -EINVAL;
}
str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
if (!str) {
- pr_err("QAT: AE name assigned in UOF init table is NULL\n");
+ pr_err("AE name assigned in UOF init table is NULL\n");
return -EINVAL;
}
if (qat_uclo_parse_num(str, ae)) {
- pr_err("QAT: Parse num for AE number failed\n");
+ pr_err("Parse num for AE number failed\n");
return -EINVAL;
}
if (*ae >= ICP_QAT_UCLO_MAX_AE) {
- pr_err("QAT: ae %d out of range\n", *ae);
+ pr_err("ae %d out of range\n", *ae);
return -EINVAL;
}
return 0;
@@ -356,8 +359,7 @@ static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
return -EINVAL;
break;
default:
- pr_err("QAT: initmem region error. region type=0x%x\n",
- init_mem->region);
+ pr_err("initmem region error. region type=0x%x\n", init_mem->region);
return -EINVAL;
}
return 0;
@@ -431,7 +433,7 @@ static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
if (qat_hal_batch_wr_lm(handle, ae,
obj_handle->lm_init_tab[ae])) {
- pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
+ pr_err("fail to batch init lmem for AE %d\n", ae);
return -EINVAL;
}
qat_uclo_cleanup_batch_init_list(handle,
@@ -539,26 +541,26 @@ qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
code_page->imp_expr_tab_offset);
if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
imp_expr_tab->entry_num) {
- pr_err("QAT: UOF can't contain imported variable to be parsed\n");
+ pr_err("UOF can't contain imported variable to be parsed\n");
return -EINVAL;
}
neigh_reg_tab = (struct icp_qat_uof_objtable *)
(encap_uof_obj->beg_uof +
code_page->neigh_reg_tab_offset);
if (neigh_reg_tab->entry_num) {
- pr_err("QAT: UOF can't contain neighbor register table\n");
+ pr_err("UOF can't contain neighbor register table\n");
return -EINVAL;
}
if (image->numpages > 1) {
- pr_err("QAT: UOF can't contain multiple pages\n");
+ pr_err("UOF can't contain multiple pages\n");
return -EINVAL;
}
if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
- pr_err("QAT: UOF can't use shared control store feature\n");
+ pr_err("UOF can't use shared control store feature\n");
return -EFAULT;
}
if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
- pr_err("QAT: UOF can't use reloadable feature\n");
+ pr_err("UOF can't use reloadable feature\n");
return -EFAULT;
}
return 0;
@@ -677,7 +679,7 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
}
}
if (!mflag) {
- pr_err("QAT: uimage uses AE not set\n");
+ pr_err("uimage uses AE not set\n");
return -EINVAL;
}
return 0;
@@ -731,14 +733,15 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
return ICP_QAT_AC_C62X_DEV_TYPE;
case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
return ICP_QAT_AC_C3XXX_DEV_TYPE;
- case ADF_4XXX_PCI_DEVICE_ID:
- case ADF_401XX_PCI_DEVICE_ID:
- case ADF_402XX_PCI_DEVICE_ID:
- case ADF_420XX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_4XXX:
+ case PCI_DEVICE_ID_INTEL_QAT_401XX:
+ case PCI_DEVICE_ID_INTEL_QAT_402XX:
+ case PCI_DEVICE_ID_INTEL_QAT_420XX:
return ICP_QAT_AC_4XXX_A_DEV_TYPE;
+ case PCI_DEVICE_ID_INTEL_QAT_6XXX:
+ return ICP_QAT_AC_6XXX_DEV_TYPE;
default:
- pr_err("QAT: unsupported device 0x%x\n",
- handle->pci_dev->device);
+ pr_err("unsupported device 0x%x\n", handle->pci_dev->device);
return 0;
}
}
@@ -748,7 +751,7 @@ static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
unsigned int maj_ver, prod_type = obj_handle->prod_type;
if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
- pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
+ pr_err("UOF type 0x%x doesn't match with platform 0x%x\n",
obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
prod_type);
return -EINVAL;
@@ -756,7 +759,7 @@ static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
maj_ver = obj_handle->prod_rev & 0xff;
if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
- pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
+ pr_err("UOF majVer 0x%x out of range\n", maj_ver);
return -EINVAL;
}
return 0;
@@ -799,7 +802,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
case ICP_NEIGH_REL:
return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
default:
- pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
+ pr_err("UOF uses not supported reg type 0x%x\n", reg_type);
return -EFAULT;
}
return 0;
@@ -835,8 +838,7 @@ static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
case ICP_QAT_UOF_INIT_REG_CTX:
/* check if ctx is appropriate for the ctxMode */
if (!((1 << init_regsym->ctx) & ctx_mask)) {
- pr_err("QAT: invalid ctx num = 0x%x\n",
- init_regsym->ctx);
+ pr_err("invalid ctx num = 0x%x\n", init_regsym->ctx);
return -EINVAL;
}
qat_uclo_init_reg(handle, ae,
@@ -848,10 +850,10 @@ static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
exp_res);
break;
case ICP_QAT_UOF_INIT_EXPR:
- pr_err("QAT: INIT_EXPR feature not supported\n");
+ pr_err("INIT_EXPR feature not supported\n");
return -EINVAL;
case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
- pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
+ pr_err("INIT_EXPR_ENDIAN_SWAP feature not supported\n");
return -EINVAL;
default:
break;
@@ -871,7 +873,7 @@ static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
return 0;
if (obj_handle->init_mem_tab.entry_num) {
if (qat_uclo_init_memory(handle)) {
- pr_err("QAT: initialize memory failed\n");
+ pr_err("initialize memory failed\n");
return -EINVAL;
}
}
@@ -900,40 +902,40 @@ static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
mode = ICP_QAT_CTX_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_ctx_mode(handle, ae, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
+ pr_err("qat_hal_set_ae_ctx_mode error\n");
return ret;
}
if (handle->chip_info->nn) {
mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_nn_mode(handle, ae, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
+ pr_err("qat_hal_set_ae_nn_mode error\n");
return ret;
}
}
mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
+ pr_err("qat_hal_set_ae_lm_mode LMEM0 error\n");
return ret;
}
mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
+ pr_err("qat_hal_set_ae_lm_mode LMEM1 error\n");
return ret;
}
if (handle->chip_info->lm2lm3) {
mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
+ pr_err("qat_hal_set_ae_lm_mode LMEM2 error\n");
return ret;
}
mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode);
if (ret) {
- pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
+ pr_err("qat_hal_set_ae_lm_mode LMEM3 error\n");
return ret;
}
mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
@@ -997,7 +999,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
obj_handle->prod_rev = PID_MAJOR_REV |
(PID_MINOR_REV & handle->hal_handle->revision_id);
if (qat_uclo_check_uof_compat(obj_handle)) {
- pr_err("QAT: UOF incompatible\n");
+ pr_err("UOF incompatible\n");
return -EINVAL;
}
obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64),
@@ -1008,7 +1010,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
if (!obj_handle->obj_hdr->file_buff ||
!qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
&obj_handle->str_table)) {
- pr_err("QAT: UOF doesn't have effective images\n");
+ pr_err("UOF doesn't have effective images\n");
goto out_err;
}
obj_handle->uimage_num =
@@ -1017,7 +1019,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
if (!obj_handle->uimage_num)
goto out_err;
if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
- pr_err("QAT: Bad object\n");
+ pr_err("Bad object\n");
goto out_check_uof_aemask_err;
}
qat_uclo_init_uword_num(handle);
@@ -1034,6 +1036,36 @@ out_err:
return -EFAULT;
}
+static unsigned int qat_uclo_simg_hdr2sign_len(struct icp_qat_fw_loader_handle *handle)
+{
+ if (handle->chip_info->dual_sign)
+ return ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN;
+
+ return ICP_QAT_AE_IMG_OFFSET(handle);
+}
+
+static unsigned int qat_uclo_simg_hdr2cont_len(struct icp_qat_fw_loader_handle *handle)
+{
+ if (handle->chip_info->dual_sign)
+ return ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN + ICP_QAT_DUALSIGN_MISC_INFO_LEN;
+
+ return ICP_QAT_AE_IMG_OFFSET(handle);
+}
+
+static unsigned int qat_uclo_simg_fw_type(struct icp_qat_fw_loader_handle *handle, void *img_ptr)
+{
+ struct icp_qat_css_hdr *hdr = img_ptr;
+ char *fw_hdr = img_ptr;
+ unsigned int offset;
+
+ if (handle->chip_info->dual_sign) {
+ offset = qat_uclo_simg_hdr2sign_len(handle) + ICP_QAT_DUALSIGN_FW_TYPE_LEN;
+ return *(fw_hdr + offset);
+ }
+
+ return hdr->fw_type;
+}
+
static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_filehdr *suof_ptr,
int suof_size)
@@ -1050,7 +1082,7 @@ static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
min_ver_offset);
if (check_sum != suof_ptr->check_sum) {
- pr_err("QAT: incorrect SUOF checksum\n");
+ pr_err("incorrect SUOF checksum\n");
return -EINVAL;
}
suof_handle->check_sum = suof_ptr->check_sum;
@@ -1065,9 +1097,9 @@ static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
{
struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
- unsigned int offset = ICP_QAT_AE_IMG_OFFSET(handle);
- struct icp_qat_simg_ae_mode *ae_mode;
+ unsigned int offset = qat_uclo_simg_hdr2cont_len(handle);
struct icp_qat_suof_objhdr *suof_objhdr;
+ struct icp_qat_simg_ae_mode *ae_mode;
suof_img_hdr->simg_buf = (suof_handle->suof_buf +
suof_chunk_hdr->offset +
@@ -1112,14 +1144,13 @@ static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
prod_rev = PID_MAJOR_REV |
(PID_MINOR_REV & handle->hal_handle->revision_id);
if (img_ae_mode->dev_type != prod_type) {
- pr_err("QAT: incompatible product type %x\n",
- img_ae_mode->dev_type);
+ pr_err("incompatible product type %x\n", img_ae_mode->dev_type);
return -EINVAL;
}
maj_ver = prod_rev & 0xff;
if (maj_ver > img_ae_mode->devmax_ver ||
maj_ver < img_ae_mode->devmin_ver) {
- pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
+ pr_err("incompatible device majver 0x%x\n", maj_ver);
return -EINVAL;
}
return 0;
@@ -1162,7 +1193,7 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_img_hdr img_header;
if (!suof_ptr || suof_size == 0) {
- pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
+ pr_err("input parameter SUOF pointer/size is NULL\n");
return -EINVAL;
}
if (qat_uclo_check_suof_format(suof_ptr))
@@ -1205,7 +1236,6 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
}
#define ADD_ADDR(high, low) ((((u64)high) << 32) + low)
-#define BITS_IN_DWORD 32
static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
@@ -1223,7 +1253,7 @@ static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi;
fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo;
- SET_CAP_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
+ SET_CAP_CSR(handle, fcu_dram_hi_csr, bus_addr >> BITS_PER_TYPE(u32));
SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr);
SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
@@ -1237,7 +1267,7 @@ static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
return 0;
} while (retry++ < FW_AUTH_MAX_RETRY);
auth_fail:
- pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
+ pr_err("authentication error (FCU_STATUS = 0x%x),retry = %d\n",
fcu_sts & FCU_AUTH_STS_MASK, retry);
return -EINVAL;
}
@@ -1273,14 +1303,13 @@ static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
fcu_sts_csr = handle->chip_info->fcu_sts_csr;
fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
} else {
- pr_err("Chip 0x%x doesn't support broadcast load\n",
- handle->pci_dev->device);
+ pr_err("Chip 0x%x doesn't support broadcast load\n", handle->pci_dev->device);
return -EINVAL;
}
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
if (qat_hal_check_ae_active(handle, (unsigned char)ae)) {
- pr_err("QAT: Broadcast load failed. AE is not enabled or active.\n");
+ pr_err("Broadcast load failed. AE is not enabled or active.\n");
return -EINVAL;
}
@@ -1312,7 +1341,7 @@ static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
} while (retry++ < FW_AUTH_MAX_RETRY);
if (retry > FW_AUTH_MAX_RETRY) {
- pr_err("QAT: broadcast load failed timeout %d\n", retry);
+ pr_err("broadcast load failed timeout %d\n", retry);
return -EINVAL;
}
}
@@ -1366,24 +1395,38 @@ static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
}
static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
- char *image, unsigned int size,
+ void *image, unsigned int size,
unsigned int fw_type)
{
char *fw_type_name = fw_type ? "MMP" : "AE";
unsigned int css_dword_size = sizeof(u32);
+ unsigned int header_len, simg_type;
+ struct icp_qat_css_hdr *css_hdr;
if (handle->chip_info->fw_auth) {
- struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
- unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
+ header_len = qat_uclo_simg_hdr2sign_len(handle);
+ simg_type = qat_uclo_simg_fw_type(handle, image);
+ css_hdr = image;
+
+ if (handle->chip_info->dual_sign) {
+ if (css_hdr->module_type != ICP_QAT_DUALSIGN_MODULE_TYPE)
+ goto err;
+ if (css_hdr->header_len != ICP_QAT_DUALSIGN_HDR_LEN)
+ goto err;
+ if (css_hdr->header_ver != ICP_QAT_DUALSIGN_HDR_VER)
+ goto err;
+ } else {
+ if (css_hdr->header_len * css_dword_size != header_len)
+ goto err;
+ if (css_hdr->size * css_dword_size != size)
+ goto err;
+ if (size <= header_len)
+ goto err;
+ }
- if ((css_hdr->header_len * css_dword_size) != header_len)
- goto err;
- if ((css_hdr->size * css_dword_size) != size)
- goto err;
- if (fw_type != css_hdr->fw_type)
- goto err;
- if (size <= header_len)
+ if (fw_type != simg_type)
goto err;
+
size -= header_len;
}
@@ -1397,123 +1440,95 @@ static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
goto err;
} else {
- pr_err("QAT: Unsupported firmware type\n");
+ pr_err("Unsupported firmware type\n");
return -EINVAL;
}
return 0;
err:
- pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
+ pr_err("Invalid %s firmware image\n", fw_type_name);
return -EINVAL;
}
-static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
- char *image, unsigned int size,
- struct icp_qat_fw_auth_desc **desc)
+static int qat_uclo_build_auth_desc_RSA(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ struct icp_firml_dram_desc *dram_desc,
+ unsigned int fw_type, struct icp_qat_fw_auth_desc **desc)
{
struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
- struct icp_qat_fw_auth_desc *auth_desc;
- struct icp_qat_auth_chunk *auth_chunk;
- u64 virt_addr, bus_addr, virt_base;
- unsigned int simg_offset = sizeof(*auth_chunk);
struct icp_qat_simg_ae_mode *simg_ae_mode;
- struct icp_firml_dram_desc img_desc;
- int ret;
-
- ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN);
- if (ret) {
- pr_err("QAT: error, allocate continuous dram fail\n");
- return ret;
- }
-
- if (!IS_ALIGNED(img_desc.dram_size, 8) || !img_desc.dram_bus_addr) {
- pr_debug("QAT: invalid address\n");
- qat_uclo_simg_free(handle, &img_desc);
- return -EINVAL;
- }
+ struct icp_qat_fw_auth_desc *auth_desc;
+ char *virt_addr, *virt_base;
+ u64 bus_addr;
- auth_chunk = img_desc.dram_base_addr_v;
- auth_chunk->chunk_size = img_desc.dram_size;
- auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
- virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
- bus_addr = img_desc.dram_bus_addr + simg_offset;
- auth_desc = img_desc.dram_base_addr_v;
- auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
- auth_desc->css_hdr_low = (unsigned int)bus_addr;
+ virt_base = dram_desc->dram_base_addr_v;
+ virt_base += sizeof(struct icp_qat_auth_chunk);
+ bus_addr = dram_desc->dram_bus_addr + sizeof(struct icp_qat_auth_chunk);
+ auth_desc = dram_desc->dram_base_addr_v;
+ auth_desc->css_hdr_high = upper_32_bits(bus_addr);
+ auth_desc->css_hdr_low = lower_32_bits(bus_addr);
virt_addr = virt_base;
- memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
+ memcpy(virt_addr, image, sizeof(*css_hdr));
/* pub key */
bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
sizeof(*css_hdr);
virt_addr = virt_addr + sizeof(*css_hdr);
- auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
- auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
+ auth_desc->fwsk_pub_high = upper_32_bits(bus_addr);
+ auth_desc->fwsk_pub_low = lower_32_bits(bus_addr);
- memcpy((void *)(uintptr_t)virt_addr,
- (void *)(image + sizeof(*css_hdr)),
- ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
+ memcpy(virt_addr, image + sizeof(*css_hdr), ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
/* padding */
memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
/* exponent */
- memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
- ICP_QAT_CSS_FWSK_PAD_LEN(handle)),
- (void *)(image + sizeof(*css_hdr) +
- ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
- sizeof(unsigned int));
+ memcpy(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+ ICP_QAT_CSS_FWSK_PAD_LEN(handle), image + sizeof(*css_hdr) +
+ ICP_QAT_CSS_FWSK_MODULUS_LEN(handle), sizeof(unsigned int));
/* signature */
bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
auth_desc->fwsk_pub_low) +
ICP_QAT_CSS_FWSK_PUB_LEN(handle);
virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle);
- auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
- auth_desc->signature_low = (unsigned int)bus_addr;
+ auth_desc->signature_high = upper_32_bits(bus_addr);
+ auth_desc->signature_low = lower_32_bits(bus_addr);
- memcpy((void *)(uintptr_t)virt_addr,
- (void *)(image + sizeof(*css_hdr) +
- ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
- ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)),
- ICP_QAT_CSS_SIGNATURE_LEN(handle));
+ memcpy(virt_addr, image + sizeof(*css_hdr) + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
+ ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle), ICP_QAT_CSS_SIGNATURE_LEN(handle));
bus_addr = ADD_ADDR(auth_desc->signature_high,
auth_desc->signature_low) +
ICP_QAT_CSS_SIGNATURE_LEN(handle);
virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
- auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
- auth_desc->img_low = (unsigned int)bus_addr;
- auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle);
- if (bus_addr + auth_desc->img_len > img_desc.dram_bus_addr +
- ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) {
- pr_err("QAT: insufficient memory size for authentication data\n");
- qat_uclo_simg_free(handle, &img_desc);
+ auth_desc->img_high = upper_32_bits(bus_addr);
+ auth_desc->img_low = lower_32_bits(bus_addr);
+ auth_desc->img_len = size - qat_uclo_simg_hdr2sign_len(handle);
+ if (bus_addr + auth_desc->img_len >
+ dram_desc->dram_bus_addr + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) {
+ pr_err("insufficient memory size for authentication data\n");
+ qat_uclo_simg_free(handle, dram_desc);
return -ENOMEM;
}
- memcpy((void *)(uintptr_t)virt_addr,
- (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)),
- auth_desc->img_len);
+ memcpy(virt_addr, image + qat_uclo_simg_hdr2sign_len(handle), auth_desc->img_len);
virt_addr = virt_base;
/* AE firmware */
- if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
- CSS_AE_FIRMWARE) {
+ if (fw_type == CSS_AE_FIRMWARE) {
auth_desc->img_ae_mode_data_high = auth_desc->img_high;
auth_desc->img_ae_mode_data_low = auth_desc->img_low;
bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
auth_desc->img_ae_mode_data_low) +
sizeof(struct icp_qat_simg_ae_mode);
- auth_desc->img_ae_init_data_high = (unsigned int)
- (bus_addr >> BITS_IN_DWORD);
- auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
+ auth_desc->img_ae_init_data_high = upper_32_bits(bus_addr);
+ auth_desc->img_ae_init_data_low = lower_32_bits(bus_addr);
bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
- auth_desc->img_ae_insts_high = (unsigned int)
- (bus_addr >> BITS_IN_DWORD);
- auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
+ auth_desc->img_ae_insts_high = upper_32_bits(bus_addr);
+ auth_desc->img_ae_insts_low = lower_32_bits(bus_addr);
virt_addr += sizeof(struct icp_qat_css_hdr);
virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle);
virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
@@ -1527,6 +1542,141 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
return 0;
}
+static int qat_uclo_build_auth_desc_dualsign(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ struct icp_firml_dram_desc *dram_desc,
+ unsigned int fw_type,
+ struct icp_qat_fw_auth_desc **desc)
+{
+ struct icp_qat_simg_ae_mode *simg_ae_mode;
+ struct icp_qat_fw_auth_desc *auth_desc;
+ unsigned int chunk_offset, img_offset;
+ u64 bus_addr, addr;
+ char *virt_addr;
+
+ virt_addr = dram_desc->dram_base_addr_v;
+ virt_addr += sizeof(struct icp_qat_auth_chunk);
+ bus_addr = dram_desc->dram_bus_addr + sizeof(struct icp_qat_auth_chunk);
+
+ auth_desc = dram_desc->dram_base_addr_v;
+ auth_desc->img_len = size - qat_uclo_simg_hdr2sign_len(handle);
+ auth_desc->css_hdr_high = upper_32_bits(bus_addr);
+ auth_desc->css_hdr_low = lower_32_bits(bus_addr);
+ memcpy(virt_addr, image, ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN);
+
+ img_offset = ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN;
+ chunk_offset = ICP_QAT_DUALSIGN_OPAQUE_HDR_ALIGN_LEN;
+
+ /* RSA pub key */
+ addr = bus_addr + chunk_offset;
+ auth_desc->fwsk_pub_high = upper_32_bits(addr);
+ auth_desc->fwsk_pub_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
+
+ img_offset += ICP_QAT_CSS_FWSK_MODULUS_LEN(handle);
+ chunk_offset += ICP_QAT_CSS_FWSK_MODULUS_LEN(handle);
+ /* RSA padding */
+ memset(virt_addr + chunk_offset, 0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
+
+ chunk_offset += ICP_QAT_CSS_FWSK_PAD_LEN(handle);
+ /* RSA exponent */
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle));
+
+ img_offset += ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
+ chunk_offset += ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
+ /* RSA signature */
+ addr = bus_addr + chunk_offset;
+ auth_desc->signature_high = upper_32_bits(addr);
+ auth_desc->signature_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_SIGNATURE_LEN(handle));
+
+ img_offset += ICP_QAT_CSS_SIGNATURE_LEN(handle);
+ chunk_offset += ICP_QAT_CSS_SIGNATURE_LEN(handle);
+ /* XMSS pubkey */
+ addr = bus_addr + chunk_offset;
+ auth_desc->xmss_pubkey_high = upper_32_bits(addr);
+ auth_desc->xmss_pubkey_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN);
+
+ img_offset += ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN;
+ chunk_offset += ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN;
+ /* XMSS signature */
+ addr = bus_addr + chunk_offset;
+ auth_desc->xmss_sig_high = upper_32_bits(addr);
+ auth_desc->xmss_sig_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_DUALSIGN_XMSS_SIG_LEN);
+
+ img_offset += ICP_QAT_DUALSIGN_XMSS_SIG_LEN;
+ chunk_offset += ICP_QAT_DUALSIGN_XMSS_SIG_ALIGN_LEN;
+
+ if (dram_desc->dram_size < (chunk_offset + auth_desc->img_len)) {
+ pr_err("auth chunk memory size is not enough to store data\n");
+ return -ENOMEM;
+ }
+
+ /* Signed data */
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_high = upper_32_bits(addr);
+ auth_desc->img_low = lower_32_bits(addr);
+ memcpy(virt_addr + chunk_offset, image + img_offset, auth_desc->img_len);
+
+ chunk_offset += ICP_QAT_DUALSIGN_MISC_INFO_LEN;
+ /* AE firmware */
+ if (fw_type == CSS_AE_FIRMWARE) {
+ /* AE mode data */
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_ae_mode_data_high = upper_32_bits(addr);
+ auth_desc->img_ae_mode_data_low = lower_32_bits(addr);
+ simg_ae_mode =
+ (struct icp_qat_simg_ae_mode *)(virt_addr + chunk_offset);
+ auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask;
+
+ chunk_offset += sizeof(struct icp_qat_simg_ae_mode);
+ /* AE init seq */
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_ae_init_data_high = upper_32_bits(addr);
+ auth_desc->img_ae_init_data_low = lower_32_bits(addr);
+
+ chunk_offset += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
+ /* AE instructions */
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_ae_insts_high = upper_32_bits(addr);
+ auth_desc->img_ae_insts_low = lower_32_bits(addr);
+ } else {
+ addr = bus_addr + chunk_offset;
+ auth_desc->img_ae_insts_high = upper_32_bits(addr);
+ auth_desc->img_ae_insts_low = lower_32_bits(addr);
+ }
+ *desc = auth_desc;
+ return 0;
+}
+
+static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ struct icp_qat_fw_auth_desc **desc)
+{
+ struct icp_qat_auth_chunk *auth_chunk;
+ struct icp_firml_dram_desc img_desc;
+ unsigned int simg_fw_type;
+ int ret;
+
+ ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN);
+ if (ret)
+ return ret;
+
+ simg_fw_type = qat_uclo_simg_fw_type(handle, image);
+ auth_chunk = img_desc.dram_base_addr_v;
+ auth_chunk->chunk_size = img_desc.dram_size;
+ auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
+
+ if (handle->chip_info->dual_sign)
+ return qat_uclo_build_auth_desc_dualsign(handle, image, size, &img_desc,
+ simg_fw_type, desc);
+
+ return qat_uclo_build_auth_desc_RSA(handle, image, size, &img_desc,
+ simg_fw_type, desc);
+}
+
static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
{
@@ -1546,7 +1696,7 @@ static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
if (!((desc->ae_mask >> i) & 0x1))
continue;
if (qat_hal_check_ae_active(handle, i)) {
- pr_err("QAT: AE %d is active\n", i);
+ pr_err("AE %d is active\n", i);
return -EINVAL;
}
SET_CAP_CSR(handle, fcu_ctl_csr,
@@ -1566,7 +1716,7 @@ static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
}
} while (retry++ < FW_AUTH_MAX_RETRY);
if (retry > FW_AUTH_MAX_RETRY) {
- pr_err("QAT: firmware load failed timeout %x\n", retry);
+ pr_err("firmware load failed timeout %x\n", retry);
return -EINVAL;
}
}
@@ -1584,7 +1734,7 @@ static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
handle->sobj_handle = suof_handle;
if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
qat_uclo_del_suof(handle);
- pr_err("QAT: map SUOF failed\n");
+ pr_err("map SUOF failed\n");
return -EINVAL;
}
return 0;
@@ -1608,7 +1758,7 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
qat_uclo_ummap_auth_fw(handle, &desc);
} else {
if (handle->chip_info->mmp_sram_size < mem_size) {
- pr_err("QAT: MMP size is too large: 0x%x\n", mem_size);
+ pr_err("MMP size is too large: 0x%x\n", mem_size);
return -EFBIG;
}
qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
@@ -1634,7 +1784,7 @@ static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
ICP_QAT_UOF_OBJS);
if (!objhdl->obj_hdr) {
- pr_err("QAT: object file chunk is null\n");
+ pr_err("object file chunk is null\n");
goto out_objhdr_err;
}
handle->obj_handle = objhdl;
@@ -1669,7 +1819,7 @@ static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver,
min_ver_offset);
if (checksum != mof_ptr->checksum) {
- pr_err("QAT: incorrect MOF checksum\n");
+ pr_err("incorrect MOF checksum\n");
return -EINVAL;
}
@@ -1705,7 +1855,7 @@ static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
}
}
- pr_err("QAT: object %s is not found inside MOF\n", obj_name);
+ pr_err("object %s is not found inside MOF\n", obj_name);
return -EINVAL;
}
@@ -1722,7 +1872,7 @@ static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset;
} else {
- pr_err("QAT: unsupported chunk id\n");
+ pr_err("unsupported chunk id\n");
return -EINVAL;
}
mobj_hdr->obj_buf = obj;
@@ -1783,7 +1933,7 @@ static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
}
if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) {
- pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
+ pr_err("inconsistent UOF/SUOF chunk amount\n");
return -EINVAL;
}
return 0;
@@ -1824,17 +1974,16 @@ static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr)
int min = mof_hdr->min_ver & 0xff;
if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
- pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id);
+ pr_err("invalid header 0x%x\n", mof_hdr->file_id);
return -EINVAL;
}
if (mof_hdr->num_chunks <= 0x1) {
- pr_err("QAT: MOF chunk amount is incorrect\n");
+ pr_err("MOF chunk amount is incorrect\n");
return -EINVAL;
}
if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
- pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
- maj, min);
+ pr_err("bad MOF version, major 0x%x, minor 0x%x\n", maj, min);
return -EINVAL;
}
return 0;
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
index 5bf5c890c362..1427fe76f171 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
qat_dh895xcc-y := adf_drv.o adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index e48bcf1818cd..5b4bd0ba1ccb 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -4,7 +4,6 @@
#include <adf_admin.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -24,7 +23,6 @@ static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
static struct adf_hw_device_class dh895xcc_class = {
.name = ADF_DH895XCC_DEVICE_NAME,
.type = DEV_DH895XCC,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
index 07e9d7e52861..b59e0cc49e52 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
@@ -19,24 +19,6 @@
#include <adf_dbgfs.h>
#include "adf_dh895xcc_hw_data.h"
-static const struct pci_device_id adf_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
-
-static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
-static void adf_remove(struct pci_dev *dev);
-
-static struct pci_driver adf_driver = {
- .id_table = adf_pci_tbl,
- .name = ADF_DH895XCC_DEVICE_NAME,
- .probe = adf_probe,
- .remove = adf_remove,
- .sriov_configure = adf_sriov_configure,
- .err_handler = &adf_err_handler,
-};
-
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
@@ -227,6 +209,29 @@ static void adf_remove(struct pci_dev *pdev)
kfree(accel_dev);
}
+static void adf_shutdown(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ adf_dev_down(accel_dev);
+}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_DH895XCC_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .shutdown = adf_shutdown,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
static int __init adfdrv_init(void)
{
request_module("intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
index 93f9c81edf09..c2fdb6e0f68f 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(src)/../qat_common
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
qat_dh895xccvf-y := adf_drv.o adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index f4ee4c2e00da..828456c43b76 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -3,7 +3,6 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
-#include <adf_gen2_dc.h>
#include <adf_gen2_hw_csr_data.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
@@ -13,7 +12,6 @@
static struct adf_hw_device_class dh895xcciov_class = {
.name = ADF_DH895XCCVF_DEVICE_NAME,
.type = DEV_DH895XCCVF,
- .instances = 0
};
static u32 get_accel_mask(struct adf_hw_device_data *self)
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index fa08f10e6f3f..9c21f5d835d2 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -94,7 +94,7 @@ static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
{
- if (engine->chain.first && engine->chain.last)
+ if (engine->chain_hw.first && engine->chain_hw.last)
return mv_cesa_tdma_process(engine, status);
return mv_cesa_std_process(engine, status);
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index d215a6bed6bc..50ca1039fdaa 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -440,8 +440,10 @@ struct mv_cesa_dev {
* SRAM
* @queue: fifo of the pending crypto requests
* @load: engine load counter, useful for load balancing
- * @chain: list of the current tdma descriptors being processed
- * by this engine.
+ * @chain_hw: list of the current tdma descriptors being processed
+ * by the hardware.
+ * @chain_sw: list of the current tdma descriptors that will be
+ * submitted to the hardware.
* @complete_queue: fifo of the processed requests by the engine
*
* Structure storing CESA engine information.
@@ -463,7 +465,8 @@ struct mv_cesa_engine {
struct gen_pool *pool;
struct crypto_queue queue;
atomic_t load;
- struct mv_cesa_tdma_chain chain;
+ struct mv_cesa_tdma_chain chain_hw;
+ struct mv_cesa_tdma_chain chain_sw;
struct list_head complete_queue;
int irq;
};
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index cf62db50f958..48c5c8ea8c43 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -459,6 +459,9 @@ static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_engine *engine;
+ if (!req->cryptlen)
+ return 0;
+
ret = mv_cesa_skcipher_req_init(req, tmpl);
if (ret)
return ret;
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index f150861ceaf6..6815eddc9068 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -663,7 +663,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
if (ret)
goto err_free_tdma;
- if (iter.src.sg) {
+ if (iter.base.len > iter.src.op_offset) {
/*
* Add all the new data, inserting an operation block and
* launch command between each full SRAM block-worth of
diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
index 388a06e180d6..243305354420 100644
--- a/drivers/crypto/marvell/cesa/tdma.c
+++ b/drivers/crypto/marvell/cesa/tdma.c
@@ -38,6 +38,15 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq)
{
struct mv_cesa_engine *engine = dreq->engine;
+ spin_lock_bh(&engine->lock);
+ if (engine->chain_sw.first == dreq->chain.first) {
+ engine->chain_sw.first = NULL;
+ engine->chain_sw.last = NULL;
+ }
+ engine->chain_hw.first = dreq->chain.first;
+ engine->chain_hw.last = dreq->chain.last;
+ spin_unlock_bh(&engine->lock);
+
writel_relaxed(0, engine->regs + CESA_SA_CFG);
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
@@ -96,25 +105,27 @@ void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
struct mv_cesa_req *dreq)
{
- if (engine->chain.first == NULL && engine->chain.last == NULL) {
- engine->chain.first = dreq->chain.first;
- engine->chain.last = dreq->chain.last;
- } else {
- struct mv_cesa_tdma_desc *last;
+ struct mv_cesa_tdma_desc *last = engine->chain_sw.last;
- last = engine->chain.last;
+ /*
+ * Break the DMA chain if the request being queued needs the IV
+ * regs to be set before lauching the request.
+ */
+ if (!last || dreq->chain.first->flags & CESA_TDMA_SET_STATE)
+ engine->chain_sw.first = dreq->chain.first;
+ else {
last->next = dreq->chain.first;
- engine->chain.last = dreq->chain.last;
-
- /*
- * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
- * the last element of the current chain, or if the request
- * being queued needs the IV regs to be set before lauching
- * the request.
- */
- if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
- !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
- last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
+ last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
+ }
+ last = dreq->chain.last;
+ engine->chain_sw.last = last;
+ /*
+ * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
+ * the last element of the current chain.
+ */
+ if (last->flags & CESA_TDMA_BREAK_CHAIN) {
+ engine->chain_sw.first = NULL;
+ engine->chain_sw.last = NULL;
}
}
@@ -127,7 +138,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
- for (tdma = engine->chain.first; tdma; tdma = next) {
+ for (tdma = engine->chain_hw.first; tdma; tdma = next) {
spin_lock_bh(&engine->lock);
next = tdma->next;
spin_unlock_bh(&engine->lock);
@@ -149,12 +160,12 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
&backlog);
/* Re-chaining to the next request */
- engine->chain.first = tdma->next;
+ engine->chain_hw.first = tdma->next;
tdma->next = NULL;
/* If this is the last request, clear the chain */
- if (engine->chain.first == NULL)
- engine->chain.last = NULL;
+ if (engine->chain_hw.first == NULL)
+ engine->chain_hw.last = NULL;
spin_unlock_bh(&engine->lock);
ctx = crypto_tfm_ctx(req->tfm);
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
index 5cae8fafa151..d4aab9e20f2a 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
@@ -6,6 +6,7 @@
#include "otx2_cptvf.h"
#include "otx2_cptlf.h"
#include "cn10k_cpt.h"
+#include "otx2_cpt_common.h"
static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
struct otx2_cptlf_info *lf);
@@ -27,7 +28,7 @@ static struct cpt_hw_ops cn10k_hw_ops = {
static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
struct otx2_cptlf_info *lf)
{
- void __iomem *lmtline = lf->lmtline;
+ void *lmtline = lf->lfs->lmt_info.base + (lf->slot * LMTLINE_SIZE);
u64 val = (lf->slot & 0x7FF);
u64 tar_addr = 0;
@@ -41,15 +42,49 @@ static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
dma_wmb();
/* Copy CPT command to LMTLINE */
- memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
+ memcpy(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
cn10k_lmt_flush(val, tar_addr);
}
+void cn10k_cpt_lmtst_free(struct pci_dev *pdev, struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_lmt_info *lmt_info = &lfs->lmt_info;
+
+ if (!lmt_info->base)
+ return;
+
+ dma_free_attrs(&pdev->dev, lmt_info->size,
+ lmt_info->base - lmt_info->align,
+ lmt_info->iova - lmt_info->align,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+}
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_lmtst_free, "CRYPTO_DEV_OCTEONTX2_CPT");
+
+static int cn10k_cpt_lmtst_alloc(struct pci_dev *pdev,
+ struct otx2_cptlfs_info *lfs, u32 size)
+{
+ struct otx2_lmt_info *lmt_info = &lfs->lmt_info;
+ dma_addr_t align_iova;
+ dma_addr_t iova;
+
+ lmt_info->base = dma_alloc_attrs(&pdev->dev, size, &iova, GFP_KERNEL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+ if (!lmt_info->base)
+ return -ENOMEM;
+
+ align_iova = ALIGN((u64)iova, LMTLINE_ALIGN);
+ lmt_info->iova = align_iova;
+ lmt_info->align = align_iova - iova;
+ lmt_info->size = size;
+ lmt_info->base += lmt_info->align;
+ return 0;
+}
+
int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
{
struct pci_dev *pdev = cptpf->pdev;
- resource_size_t size;
- u64 lmt_base;
+ u32 size;
+ int ret;
if (!test_bit(CN10K_LMTST, &cptpf->cap_flag)) {
cptpf->lfs.ops = &otx2_hw_ops;
@@ -57,18 +92,19 @@ int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
}
cptpf->lfs.ops = &cn10k_hw_ops;
- lmt_base = readq(cptpf->reg_base + RVU_PF_LMTLINE_ADDR);
- if (!lmt_base) {
- dev_err(&pdev->dev, "PF LMTLINE address not configured\n");
- return -ENOMEM;
+ size = OTX2_CPT_MAX_VFS_NUM * LMTLINE_SIZE + LMTLINE_ALIGN;
+ ret = cn10k_cpt_lmtst_alloc(pdev, &cptpf->lfs, size);
+ if (ret) {
+ dev_err(&pdev->dev, "PF-%d LMTLINE memory allocation failed\n",
+ cptpf->pf_id);
+ return ret;
}
- size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
- size -= ((1 + cptpf->max_vfs) * MBOX_SIZE);
- cptpf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, lmt_base, size);
- if (!cptpf->lfs.lmt_base) {
- dev_err(&pdev->dev,
- "Mapping of PF LMTLINE address failed\n");
- return -ENOMEM;
+
+ ret = otx2_cpt_lmtst_tbl_setup_msg(&cptpf->lfs);
+ if (ret) {
+ dev_err(&pdev->dev, "PF-%d: LMTST Table setup failed\n",
+ cptpf->pf_id);
+ cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
}
return 0;
@@ -78,18 +114,25 @@ EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, "CRYPTO_DEV_OCTEONTX2_CPT");
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
{
struct pci_dev *pdev = cptvf->pdev;
- resource_size_t offset, size;
+ u32 size;
+ int ret;
if (!test_bit(CN10K_LMTST, &cptvf->cap_flag))
return 0;
- offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
- size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
- /* Map VF LMILINE region */
- cptvf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, offset, size);
- if (!cptvf->lfs.lmt_base) {
- dev_err(&pdev->dev, "Unable to map BAR4\n");
- return -ENOMEM;
+ size = cptvf->lfs.lfs_num * LMTLINE_SIZE + LMTLINE_ALIGN;
+ ret = cn10k_cpt_lmtst_alloc(pdev, &cptvf->lfs, size);
+ if (ret) {
+ dev_err(&pdev->dev, "VF-%d LMTLINE memory allocation failed\n",
+ cptvf->vf_id);
+ return ret;
+ }
+
+ ret = otx2_cpt_lmtst_tbl_setup_msg(&cptvf->lfs);
+ if (ret) {
+ dev_err(&pdev->dev, "VF-%d: LMTST Table setup failed\n",
+ cptvf->vf_id);
+ cn10k_cpt_lmtst_free(pdev, &cptvf->lfs);
}
return 0;
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
index 92be3ecf570f..ea5990048c21 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
@@ -50,6 +50,7 @@ static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result)
int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf);
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf);
+void cn10k_cpt_lmtst_free(struct pci_dev *pdev, struct otx2_cptlfs_info *lfs);
void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval);
int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
struct cn10k_cpt_errata_ctx *er_ctx);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index c5b7c57574ef..d529bcb03775 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -145,11 +145,8 @@ static inline u64 otx2_cpt_read64(void __iomem *reg_base, u64 blk, u64 slot,
static inline bool is_dev_otx2(struct pci_dev *pdev)
{
- if (pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID ||
- pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID)
- return true;
-
- return false;
+ return pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID ||
+ pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID;
}
static inline bool is_dev_cn10ka(struct pci_dev *pdev)
@@ -159,12 +156,10 @@ static inline bool is_dev_cn10ka(struct pci_dev *pdev)
static inline bool is_dev_cn10ka_ax(struct pci_dev *pdev)
{
- if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
- ((pdev->revision & 0xFF) == 4 || (pdev->revision & 0xFF) == 0x50 ||
- (pdev->revision & 0xff) == 0x51))
- return true;
-
- return false;
+ return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
+ ((pdev->revision & 0xFF) == 4 ||
+ (pdev->revision & 0xFF) == 0x50 ||
+ (pdev->revision & 0xFF) == 0x51);
}
static inline bool is_dev_cn10kb(struct pci_dev *pdev)
@@ -174,11 +169,8 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev)
static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
{
- if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
- (pdev->revision & 0xFF) == 0x54)
- return true;
-
- return false;
+ return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
+ (pdev->revision & 0xFF) == 0x54;
}
static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
@@ -192,18 +184,12 @@ static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
static inline bool cpt_is_errata_38550_exists(struct pci_dev *pdev)
{
- if (is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev))
- return true;
-
- return false;
+ return is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev);
}
static inline bool cpt_feature_sgv2(struct pci_dev *pdev)
{
- if (!is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev))
- return true;
-
- return false;
+ return !is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev);
}
int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
@@ -223,5 +209,6 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox);
int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot);
+int otx2_cpt_lmtst_tbl_setup_msg(struct otx2_cptlfs_info *lfs);
#endif /* __OTX2_CPT_COMMON_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index b8b7c8a3c0ca..95f3de3a34eb 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -255,3 +255,28 @@ int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot)
return ret;
}
EXPORT_SYMBOL_NS_GPL(otx2_cpt_lf_reset_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
+
+int otx2_cpt_lmtst_tbl_setup_msg(struct otx2_cptlfs_info *lfs)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct pci_dev *pdev = lfs->pdev;
+ struct lmtst_tbl_setup_req *req;
+
+ req = (struct lmtst_tbl_setup_req *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req) {
+ dev_err(&pdev->dev, "RVU MBOX failed to alloc message.\n");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_LMTST_TBL_SETUP;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = 0;
+
+ req->use_local_lmt_region = true;
+ req->lmt_iova = lfs->lmt_info.iova;
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_lmtst_tbl_setup_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index b5d66afcc030..dc7c7a2650a5 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -433,10 +433,7 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
for (slot = 0; slot < lfs->lfs_num; slot++) {
lfs->lf[slot].lfs = lfs;
lfs->lf[slot].slot = slot;
- if (lfs->lmt_base)
- lfs->lf[slot].lmtline = lfs->lmt_base +
- (slot * LMTLINE_SIZE);
- else
+ if (!lfs->lmt_info.base)
lfs->lf[slot].lmtline = lfs->reg_base +
OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
OTX2_CPT_LMT_LF_LMTLINEX(0));
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index bd8604be2952..6e004a5568d8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -105,11 +105,19 @@ struct cpt_hw_ops {
gfp_t gfp);
};
+#define LMTLINE_SIZE 128
+#define LMTLINE_ALIGN 128
+struct otx2_lmt_info {
+ void *base;
+ dma_addr_t iova;
+ u32 size;
+ u8 align;
+};
+
struct otx2_cptlfs_info {
/* Registers start address of VF/PF LFs are attached to */
void __iomem *reg_base;
-#define LMTLINE_SIZE 128
- void __iomem *lmt_base;
+ struct otx2_lmt_info lmt_info;
struct pci_dev *pdev; /* Device LFs are attached to */
struct otx2_cptlf_info lf[OTX2_CPT_MAX_LFS_NUM];
struct otx2_mbox *mbox;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 12971300296d..1c5c262af48d 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -639,6 +639,12 @@ static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
/* Disable all cores */
ret = otx2_cpt_disable_all_cores(cptpf);
+ otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
+ &cptpf->afpf_mbox, BLKADDR_CPT0);
+ if (cptpf->has_cpt1)
+ otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
+ cptpf->reg_base, &cptpf->afpf_mbox,
+ BLKADDR_CPT1);
return ret;
}
@@ -786,19 +792,19 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
cptpf->kvf_limits = 1;
- err = cn10k_cptpf_lmtst_init(cptpf);
+ /* Initialize CPT PF device */
+ err = cptpf_device_init(cptpf);
if (err)
goto unregister_intr;
- /* Initialize CPT PF device */
- err = cptpf_device_init(cptpf);
+ err = cn10k_cptpf_lmtst_init(cptpf);
if (err)
goto unregister_intr;
/* Initialize engine groups */
err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
if (err)
- goto unregister_intr;
+ goto free_lmtst;
err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
if (err)
@@ -814,6 +820,8 @@ sysfs_grp_del:
sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
cleanup_eng_grps:
otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
+free_lmtst:
+ cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
unregister_intr:
cptpf_disable_afpf_mbox_intr(cptpf);
destroy_afpf_mbox:
@@ -848,6 +856,8 @@ static void otx2_cptpf_remove(struct pci_dev *pdev)
cptpf_disable_afpf_mbox_intr(cptpf);
/* Destroy AF-PF mbox */
cptpf_afpf_mbox_destroy(cptpf);
+ /* Free LMTST memory */
+ cn10k_cpt_lmtst_free(pdev, &cptpf->lfs);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
index ec1ac7e836a3..12c0e966fa65 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -264,8 +264,6 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
return -ENOENT;
}
- otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
- &cptpf->afpf_mbox, BLKADDR_CPT0);
cptpf->lfs.global_slot = 0;
cptpf->lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
cptpf->lfs.ctx_ilen = cfg_req->ctx_ilen;
@@ -278,9 +276,6 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
if (cptpf->has_cpt1) {
cptpf->rsrc_req_blkaddr = BLKADDR_CPT1;
- otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
- cptpf->reg_base, &cptpf->afpf_mbox,
- BLKADDR_CPT1);
cptpf->cpt1_lfs.global_slot = num_lfs;
cptpf->cpt1_lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
cptpf->cpt1_lfs.ctx_ilen = cfg_req->ctx_ilen;
@@ -507,6 +502,7 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
case MBOX_MSG_CPT_INLINE_IPSEC_CFG:
case MBOX_MSG_NIX_INLINE_IPSEC_CFG:
case MBOX_MSG_CPT_LF_RESET:
+ case MBOX_MSG_LMTST_TBL_SETUP:
break;
default:
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 42c5484ce66a..78367849c3d5 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -1513,8 +1513,6 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
if (ret)
goto delete_grps;
- otx2_cptlf_set_dev_info(lfs, cptpf->pdev, cptpf->reg_base,
- &cptpf->afpf_mbox, BLKADDR_CPT0);
ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
OTX2_CPT_QUEUE_HI_PRIO, 1);
if (ret)
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index d84eebdf2fa8..56904bdfd6e8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -283,8 +283,6 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
lfs_num = cptvf->lfs.kvf_limits;
- otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base,
- &cptvf->pfvf_mbox, cptvf->blkaddr);
ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
lfs_num);
if (ret)
@@ -378,10 +376,6 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag);
- ret = cn10k_cptvf_lmtst_init(cptvf);
- if (ret)
- goto clear_drvdata;
-
/* Initialize PF<=>VF mailbox */
ret = cptvf_pfvf_mbox_init(cptvf);
if (ret)
@@ -396,6 +390,9 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
cptvf_hw_ops_get(cptvf);
+ otx2_cptlf_set_dev_info(&cptvf->lfs, cptvf->pdev, cptvf->reg_base,
+ &cptvf->pfvf_mbox, cptvf->blkaddr);
+
ret = otx2_cptvf_send_caps_msg(cptvf);
if (ret) {
dev_err(&pdev->dev, "Couldn't get CPT engine capabilities.\n");
@@ -404,13 +401,19 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
if (cptvf->eng_caps[OTX2_CPT_SE_TYPES] & BIT_ULL(35))
cptvf->lfs.ops->cpt_sg_info_create = cn10k_sgv2_info_create;
+ ret = cn10k_cptvf_lmtst_init(cptvf);
+ if (ret)
+ goto unregister_interrupts;
+
/* Initialize CPT LFs */
ret = cptvf_lf_init(cptvf);
if (ret)
- goto unregister_interrupts;
+ goto free_lmtst;
return 0;
+free_lmtst:
+ cn10k_cpt_lmtst_free(pdev, &cptvf->lfs);
unregister_interrupts:
cptvf_disable_pfvf_mbox_intrs(cptvf);
destroy_pfvf_mbox:
@@ -434,6 +437,8 @@ static void otx2_cptvf_remove(struct pci_dev *pdev)
cptvf_disable_pfvf_mbox_intrs(cptvf);
/* Destroy PF-VF mbox */
cptvf_pfvf_mbox_destroy(cptvf);
+ /* Free LMTST memory */
+ cn10k_cpt_lmtst_free(pdev, &cptvf->lfs);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index d9fa5f6e204d..931b72580fd9 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -134,6 +134,7 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
sizeof(cptvf->eng_caps));
break;
case MBOX_MSG_CPT_LF_RESET:
+ case MBOX_MSG_LMTST_TBL_SETUP:
break;
default:
dev_err(&cptvf->pdev->dev, "Unsupported msg %d received.\n",
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index 0e440f704a8f..35fa5bad1d9f 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -8,10 +8,12 @@
*/
#include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index dfa3ad1a12f2..709b3ee74657 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -9,10 +9,12 @@
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 502a565074e9..4039cf3b22d4 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -8,10 +8,12 @@
*/
#include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <asm/vio.h>
#include "nx_csbcpb.h"
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index eb5c8f689360..bf465d824e2c 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -7,13 +7,14 @@
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
-#include <crypto/internal/hash.h>
#include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-#include <asm/vio.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -21,8 +22,6 @@
struct xcbc_state {
u8 state[AES_BLOCK_SIZE];
- unsigned int count;
- u8 buffer[AES_BLOCK_SIZE];
};
static int nx_xcbc_set_key(struct crypto_shash *desc,
@@ -58,7 +57,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
*/
static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
u8 keys[2][AES_BLOCK_SIZE];
@@ -135,9 +134,9 @@ out:
return rc;
}
-static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
+static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_shash *tfm)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
int err;
@@ -166,31 +165,24 @@ static int nx_xcbc_update(struct shash_desc *desc,
const u8 *data,
unsigned int len)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct xcbc_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg;
struct nx_sg *out_sg;
- u32 to_process = 0, leftover, total;
unsigned int max_sg_len;
unsigned long irq_flags;
+ u32 to_process, total;
int rc = 0;
int data_len;
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+ memcpy(csbcpb->cpb.aes_xcbc.out_cv_mac, sctx->state, AES_BLOCK_SIZE);
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- total = sctx->count + len;
-
- /* 2 cases for total data len:
- * 1: <= AES_BLOCK_SIZE: copy into state, return 0
- * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
- */
- if (total <= AES_BLOCK_SIZE) {
- memcpy(sctx->buffer + sctx->count, data, len);
- sctx->count += len;
- goto out;
- }
+ total = len;
in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
@@ -200,7 +192,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
data_len = AES_BLOCK_SIZE;
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
- &len, nx_ctx->ap->sglen);
+ &data_len, nx_ctx->ap->sglen);
if (data_len != AES_BLOCK_SIZE) {
rc = -EINVAL;
@@ -210,56 +202,21 @@ static int nx_xcbc_update(struct shash_desc *desc,
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
do {
- to_process = total - to_process;
- to_process = to_process & ~(AES_BLOCK_SIZE - 1);
-
- leftover = total - to_process;
-
- /* the hardware will not accept a 0 byte operation for this
- * algorithm and the operation MUST be finalized to be correct.
- * So if we happen to get an update that falls on a block sized
- * boundary, we must save off the last block to finalize with
- * later. */
- if (!leftover) {
- to_process -= AES_BLOCK_SIZE;
- leftover = AES_BLOCK_SIZE;
- }
-
- if (sctx->count) {
- data_len = sctx->count;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
- (u8 *) sctx->buffer,
- &data_len,
- max_sg_len);
- if (data_len != sctx->count) {
- rc = -EINVAL;
- goto out;
- }
- }
+ to_process = total & ~(AES_BLOCK_SIZE - 1);
- data_len = to_process - sctx->count;
in_sg = nx_build_sg_list(in_sg,
(u8 *) data,
- &data_len,
+ &to_process,
max_sg_len);
- if (data_len != to_process - sctx->count) {
- rc = -EINVAL;
- goto out;
- }
-
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
/* we've hit the nx chip previously and we're updating again,
* so copy over the partial digest */
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- memcpy(csbcpb->cpb.aes_xcbc.cv,
- csbcpb->cpb.aes_xcbc.out_cv_mac,
- AES_BLOCK_SIZE);
- }
+ memcpy(csbcpb->cpb.aes_xcbc.cv,
+ csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
rc = -EINVAL;
goto out;
@@ -271,28 +228,24 @@ static int nx_xcbc_update(struct shash_desc *desc,
atomic_inc(&(nx_ctx->stats->aes_ops));
- /* everything after the first update is continuation */
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
-
total -= to_process;
- data += to_process - sctx->count;
- sctx->count = 0;
+ data += to_process;
in_sg = nx_ctx->in_sg;
- } while (leftover > AES_BLOCK_SIZE);
+ } while (total >= AES_BLOCK_SIZE);
- /* copy the leftover back into the state struct */
- memcpy(sctx->buffer, data, leftover);
- sctx->count = leftover;
+ rc = total;
+ memcpy(sctx->state, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
+static int nx_xcbc_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int nbytes, u8 *out)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct xcbc_state *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
unsigned long irq_flags;
@@ -301,12 +254,10 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- /* we've hit the nx chip previously, now we're finalizing,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.aes_xcbc.cv,
- csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
- } else if (sctx->count == 0) {
+ if (nbytes) {
+ /* non-zero final, so copy over the partial digest */
+ memcpy(csbcpb->cpb.aes_xcbc.cv, sctx->state, AES_BLOCK_SIZE);
+ } else {
/*
* we've never seen an update, so this is a 0 byte op. The
* hardware cannot handle a 0 byte op, so just ECB to
@@ -320,11 +271,11 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
* this is not an intermediate operation */
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- len = sctx->count;
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
- &len, nx_ctx->ap->sglen);
+ len = nbytes;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len,
+ nx_ctx->ap->sglen);
- if (len != sctx->count) {
+ if (len != nbytes) {
rc = -EINVAL;
goto out;
}
@@ -362,18 +313,19 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
.digestsize = AES_BLOCK_SIZE,
.init = nx_xcbc_init,
.update = nx_xcbc_update,
- .final = nx_xcbc_final,
+ .finup = nx_xcbc_finup,
.setkey = nx_xcbc_set_key,
.descsize = sizeof(struct xcbc_state),
- .statesize = sizeof(struct xcbc_state),
+ .init_tfm = nx_crypto_ctx_aes_xcbc_init2,
+ .exit_tfm = nx_crypto_ctx_shash_exit,
.base = {
.cra_name = "xcbc(aes)",
.cra_driver_name = "xcbc-aes-nx",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINAL_NONZERO,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_aes_xcbc_init2,
- .cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index c3bebf0feabe..5b29dd026df2 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -9,9 +9,12 @@
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/vio.h>
-#include <asm/byteorder.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -19,12 +22,11 @@
struct sha256_state_be {
__be32 state[SHA256_DIGEST_SIZE / 4];
u64 count;
- u8 buf[SHA256_BLOCK_SIZE];
};
-static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
+static int nx_crypto_ctx_sha256_init(struct crypto_shash *tfm)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
int err;
err = nx_crypto_ctx_sha_init(tfm);
@@ -40,11 +42,10 @@ static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
return 0;
}
-static int nx_sha256_init(struct shash_desc *desc) {
+static int nx_sha256_init(struct shash_desc *desc)
+{
struct sha256_state_be *sctx = shash_desc_ctx(desc);
- memset(sctx, 0, sizeof *sctx);
-
sctx->state[0] = __cpu_to_be32(SHA256_H0);
sctx->state[1] = __cpu_to_be32(SHA256_H1);
sctx->state[2] = __cpu_to_be32(SHA256_H2);
@@ -61,30 +62,18 @@ static int nx_sha256_init(struct shash_desc *desc) {
static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct sha256_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ u64 to_process, leftover, total = len;
struct nx_sg *out_sg;
- u64 to_process = 0, leftover, total;
unsigned long irq_flags;
int rc = 0;
int data_len;
u32 max_sg_len;
- u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- /* 2 cases for total data len:
- * 1: < SHA256_BLOCK_SIZE: copy into state, return 0
- * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
- */
- total = (sctx->count % SHA256_BLOCK_SIZE) + len;
- if (total < SHA256_BLOCK_SIZE) {
- memcpy(sctx->buf + buf_len, data, len);
- sctx->count += len;
- goto out;
- }
-
memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
@@ -105,41 +94,17 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
}
do {
- int used_sgs = 0;
struct nx_sg *in_sg = nx_ctx->in_sg;
- if (buf_len) {
- data_len = buf_len;
- in_sg = nx_build_sg_list(in_sg,
- (u8 *) sctx->buf,
- &data_len,
- max_sg_len);
-
- if (data_len != buf_len) {
- rc = -EINVAL;
- goto out;
- }
- used_sgs = in_sg - nx_ctx->in_sg;
- }
+ to_process = total & ~(SHA256_BLOCK_SIZE - 1);
- /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
- * processed in this iteration. This value is restricted
- * by sg list limits and number of sgs we already used
- * for leftover data. (see above)
- * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
- * but because data may not be aligned, we need to account
- * for that too. */
- to_process = min_t(u64, total,
- (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
- to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
-
- data_len = to_process - buf_len;
+ data_len = to_process;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- to_process = data_len + buf_len;
+ to_process = data_len;
leftover = total - to_process;
/*
@@ -162,26 +127,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
atomic_inc(&(nx_ctx->stats->sha256_ops));
total -= to_process;
- data += to_process - buf_len;
- buf_len = 0;
-
+ data += to_process;
+ sctx->count += to_process;
} while (leftover >= SHA256_BLOCK_SIZE);
- /* copy the leftover back into the state struct */
- if (leftover)
- memcpy(sctx->buf, data, leftover);
-
- sctx->count += len;
+ rc = leftover;
memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int nx_sha256_final(struct shash_desc *desc, u8 *out)
+static int nx_sha256_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int nbytes, u8 *out)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct sha256_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
unsigned long irq_flags;
@@ -197,25 +158,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* final is represented by continuing the operation and indicating that
- * this is not an intermediate operation */
- if (sctx->count >= SHA256_BLOCK_SIZE) {
- /* we've hit the nx chip previously, now we're finalizing,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- } else {
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
- }
+ * this is not an intermediate operation
+ * copy over the partial digest */
+ memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ sctx->count += nbytes;
csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
- len = sctx->count & (SHA256_BLOCK_SIZE - 1);
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
- &len, max_sg_len);
+ len = nbytes;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, max_sg_len);
- if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
+ if (len != nbytes) {
rc = -EINVAL;
goto out;
}
@@ -251,18 +206,34 @@ out:
static int nx_sha256_export(struct shash_desc *desc, void *out)
{
struct sha256_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ u8 *u8;
+ u32 *u32;
+ u64 *u64;
+ } p = { .u8 = out };
+ int i;
- memcpy(out, sctx, sizeof(*sctx));
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(*p.u32); i++)
+ put_unaligned(be32_to_cpu(sctx->state[i]), p.u32++);
+ put_unaligned(sctx->count, p.u64++);
return 0;
}
static int nx_sha256_import(struct shash_desc *desc, const void *in)
{
struct sha256_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ const u8 *u8;
+ const u32 *u32;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int i;
- memcpy(sctx, in, sizeof(*sctx));
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(*p.u32); i++)
+ sctx->state[i] = cpu_to_be32(get_unaligned(p.u32++));
+ sctx->count = get_unaligned(p.u64++);
return 0;
}
@@ -270,19 +241,20 @@ struct shash_alg nx_shash_sha256_alg = {
.digestsize = SHA256_DIGEST_SIZE,
.init = nx_sha256_init,
.update = nx_sha256_update,
- .final = nx_sha256_final,
+ .finup = nx_sha256_finup,
.export = nx_sha256_export,
.import = nx_sha256_import,
+ .init_tfm = nx_crypto_ctx_sha256_init,
+ .exit_tfm = nx_crypto_ctx_shash_exit,
.descsize = sizeof(struct sha256_state_be),
.statesize = sizeof(struct sha256_state_be),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-nx",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha256_init,
- .cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 1ffb40d2c324..f74776b7d7d7 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -9,8 +9,12 @@
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/vio.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
#include "nx_csbcpb.h"
#include "nx.h"
@@ -18,12 +22,11 @@
struct sha512_state_be {
__be64 state[SHA512_DIGEST_SIZE / 8];
u64 count[2];
- u8 buf[SHA512_BLOCK_SIZE];
};
-static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
+static int nx_crypto_ctx_sha512_init(struct crypto_shash *tfm)
{
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
int err;
err = nx_crypto_ctx_sha_init(tfm);
@@ -43,8 +46,6 @@ static int nx_sha512_init(struct shash_desc *desc)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
- memset(sctx, 0, sizeof *sctx);
-
sctx->state[0] = __cpu_to_be64(SHA512_H0);
sctx->state[1] = __cpu_to_be64(SHA512_H1);
sctx->state[2] = __cpu_to_be64(SHA512_H2);
@@ -54,6 +55,7 @@ static int nx_sha512_init(struct shash_desc *desc)
sctx->state[6] = __cpu_to_be64(SHA512_H6);
sctx->state[7] = __cpu_to_be64(SHA512_H7);
sctx->count[0] = 0;
+ sctx->count[1] = 0;
return 0;
}
@@ -61,30 +63,18 @@ static int nx_sha512_init(struct shash_desc *desc)
static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct sha512_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ u64 to_process, leftover, total = len;
struct nx_sg *out_sg;
- u64 to_process, leftover = 0, total;
unsigned long irq_flags;
int rc = 0;
int data_len;
u32 max_sg_len;
- u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- /* 2 cases for total data len:
- * 1: < SHA512_BLOCK_SIZE: copy into state, return 0
- * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
- */
- total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
- if (total < SHA512_BLOCK_SIZE) {
- memcpy(sctx->buf + buf_len, data, len);
- sctx->count[0] += len;
- goto out;
- }
-
memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
@@ -105,45 +95,17 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
}
do {
- int used_sgs = 0;
struct nx_sg *in_sg = nx_ctx->in_sg;
- if (buf_len) {
- data_len = buf_len;
- in_sg = nx_build_sg_list(in_sg,
- (u8 *) sctx->buf,
- &data_len, max_sg_len);
-
- if (data_len != buf_len) {
- rc = -EINVAL;
- goto out;
- }
- used_sgs = in_sg - nx_ctx->in_sg;
- }
+ to_process = total & ~(SHA512_BLOCK_SIZE - 1);
- /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
- * processed in this iteration. This value is restricted
- * by sg list limits and number of sgs we already used
- * for leftover data. (see above)
- * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
- * but because data may not be aligned, we need to account
- * for that too. */
- to_process = min_t(u64, total,
- (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
- to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
-
- data_len = to_process - buf_len;
+ data_len = to_process;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- if (data_len != (to_process - buf_len)) {
- rc = -EINVAL;
- goto out;
- }
-
- to_process = data_len + buf_len;
+ to_process = data_len;
leftover = total - to_process;
/*
@@ -166,30 +128,29 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
atomic_inc(&(nx_ctx->stats->sha512_ops));
total -= to_process;
- data += to_process - buf_len;
- buf_len = 0;
-
+ data += to_process;
+ sctx->count[0] += to_process;
+ if (sctx->count[0] < to_process)
+ sctx->count[1]++;
} while (leftover >= SHA512_BLOCK_SIZE);
- /* copy the leftover back into the state struct */
- if (leftover)
- memcpy(sctx->buf, data, leftover);
- sctx->count[0] += len;
+ rc = leftover;
memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
-static int nx_sha512_final(struct shash_desc *desc, u8 *out)
+static int nx_sha512_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int nbytes, u8 *out)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
u32 max_sg_len;
- u64 count0;
unsigned long irq_flags;
+ u64 count0, count1;
int rc = 0;
int len;
@@ -201,30 +162,23 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
/* final is represented by continuing the operation and indicating that
- * this is not an intermediate operation */
- if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
- /* we've hit the nx chip previously, now we're finalizing,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
- SHA512_DIGEST_SIZE);
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- } else {
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
- }
-
+ * this is not an intermediate operation
+ * copy over the partial digest */
+ memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state, SHA512_DIGEST_SIZE);
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- count0 = sctx->count[0] * 8;
+ count0 = sctx->count[0] + nbytes;
+ count1 = sctx->count[1];
- csbcpb->cpb.sha512.message_bit_length_lo = count0;
+ csbcpb->cpb.sha512.message_bit_length_lo = count0 << 3;
+ csbcpb->cpb.sha512.message_bit_length_hi = (count1 << 3) |
+ (count0 >> 61);
- len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
- in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
- max_sg_len);
+ len = nbytes;
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len, max_sg_len);
- if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
+ if (len != nbytes) {
rc = -EINVAL;
goto out;
}
@@ -246,7 +200,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
goto out;
atomic_inc(&(nx_ctx->stats->sha512_ops));
- atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
+ atomic64_add(count0, &(nx_ctx->stats->sha512_bytes));
memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
@@ -257,18 +211,34 @@ out:
static int nx_sha512_export(struct shash_desc *desc, void *out)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ u8 *u8;
+ u64 *u64;
+ } p = { .u8 = out };
+ int i;
- memcpy(out, sctx, sizeof(*sctx));
+ for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(*p.u64); i++)
+ put_unaligned(be64_to_cpu(sctx->state[i]), p.u64++);
+ put_unaligned(sctx->count[0], p.u64++);
+ put_unaligned(sctx->count[1], p.u64++);
return 0;
}
static int nx_sha512_import(struct shash_desc *desc, const void *in)
{
struct sha512_state_be *sctx = shash_desc_ctx(desc);
+ union {
+ const u8 *u8;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int i;
- memcpy(sctx, in, sizeof(*sctx));
+ for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(*p.u64); i++)
+ sctx->state[i] = cpu_to_be64(get_unaligned(p.u64++));
+ sctx->count[0] = get_unaligned(p.u64++);
+ sctx->count[1] = get_unaligned(p.u64++);
return 0;
}
@@ -276,19 +246,20 @@ struct shash_alg nx_shash_sha512_alg = {
.digestsize = SHA512_DIGEST_SIZE,
.init = nx_sha512_init,
.update = nx_sha512_update,
- .final = nx_sha512_final,
+ .finup = nx_sha512_finup,
.export = nx_sha512_export,
.import = nx_sha512_import,
+ .init_tfm = nx_crypto_ctx_sha512_init,
+ .exit_tfm = nx_crypto_ctx_shash_exit,
.descsize = sizeof(struct sha512_state_be),
.statesize = sizeof(struct sha512_state_be),
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-nx",
.cra_priority = 300,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
- .cra_init = nx_crypto_ctx_sha512_init,
- .cra_exit = nx_crypto_ctx_exit,
}
};
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index a3b979193d9b..78135fb13f5c 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -7,11 +7,11 @@
* Author: Kent Yoder <yoder1@us.ibm.com>
*/
+#include <crypto/aes.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
-#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/sha2.h>
-#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -124,8 +124,6 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
}
if ((sg - sg_head) == sgmax) {
- pr_err("nx: scatter/gather list overflow, pid: %d\n",
- current->pid);
sg++;
break;
}
@@ -702,14 +700,14 @@ int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm)
NX_MODE_AES_ECB);
}
-int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_sha_init(struct crypto_shash *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
+ return nx_crypto_ctx_init(crypto_shash_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
}
-int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_xcbc_init(struct crypto_shash *tfm)
{
- return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ return nx_crypto_ctx_init(crypto_shash_ctx(tfm), NX_FC_AES,
NX_MODE_AES_XCBC_MAC);
}
@@ -744,6 +742,11 @@ void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm)
kfree_sensitive(nx_ctx->kmem);
}
+void nx_crypto_ctx_shash_exit(struct crypto_shash *tfm)
+{
+ nx_crypto_ctx_exit(crypto_shash_ctx(tfm));
+}
+
static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id)
{
dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index e1b4b6927bec..36974f08490a 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -3,7 +3,11 @@
#ifndef __NX_H__
#define __NX_H__
+#include <asm/vio.h>
#include <crypto/ctr.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#define NX_NAME "nx-crypto"
#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
@@ -139,19 +143,20 @@ struct nx_crypto_ctx {
} priv;
};
-struct crypto_aead;
+struct scatterlist;
/* prototypes */
int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
-int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_xcbc_init(struct crypto_shash *tfm);
int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm);
int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm);
int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm);
-int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_sha_init(struct crypto_shash *tfm);
void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm);
void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm);
+void nx_crypto_ctx_shash_exit(struct crypto_shash *tfm);
void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
u32 may_sleep);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 551dd32a8db0..1ecf5f6ac04e 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1086,10 +1086,7 @@ static struct attribute *omap_aes_attrs[] = {
&dev_attr_fallback.attr,
NULL,
};
-
-static const struct attribute_group omap_aes_attr_group = {
- .attrs = omap_aes_attrs,
-};
+ATTRIBUTE_GROUPS(omap_aes);
static int omap_aes_probe(struct platform_device *pdev)
{
@@ -1215,12 +1212,6 @@ static int omap_aes_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&dev->kobj, &omap_aes_attr_group);
- if (err) {
- dev_err(dev, "could not create sysfs device attrs\n");
- goto err_aead_algs;
- }
-
return 0;
err_aead_algs:
for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
@@ -1277,8 +1268,6 @@ static void omap_aes_remove(struct platform_device *pdev)
tasklet_kill(&dd->done_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
-
- sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group);
}
#ifdef CONFIG_PM_SLEEP
@@ -1304,6 +1293,7 @@ static struct platform_driver omap_aes_driver = {
.name = "omap-aes",
.pm = &omap_aes_pm_ops,
.of_match_table = omap_aes_of_match,
+ .dev_groups = omap_aes_groups,
},
};
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 7021481bf027..56f192cb976d 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2039,10 +2039,7 @@ static struct attribute *omap_sham_attrs[] = {
&dev_attr_fallback.attr,
NULL,
};
-
-static const struct attribute_group omap_sham_attr_group = {
- .attrs = omap_sham_attrs,
-};
+ATTRIBUTE_GROUPS(omap_sham);
static int omap_sham_probe(struct platform_device *pdev)
{
@@ -2158,12 +2155,6 @@ static int omap_sham_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
- if (err) {
- dev_err(dev, "could not create sysfs device attrs\n");
- goto err_algs;
- }
-
return 0;
err_algs:
@@ -2210,8 +2201,6 @@ static void omap_sham_remove(struct platform_device *pdev)
if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
-
- sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
}
static struct platform_driver omap_sham_driver = {
@@ -2220,6 +2209,7 @@ static struct platform_driver omap_sham_driver = {
.driver = {
.name = "omap-sham",
.of_match_table = omap_sham_of_match,
+ .dev_groups = omap_sham_groups,
},
};
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index db9e84c0c9fb..329f60ad422e 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -7,59 +7,89 @@
* Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
*/
+#include <asm/cpu_device_id.h>
#include <crypto/internal/hash.h>
#include <crypto/padlock.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
+#include <linux/cpufeature.h>
#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/scatterlist.h>
-#include <asm/cpu_device_id.h>
-#include <asm/fpu/api.h>
+#include <linux/module.h>
-struct padlock_sha_desc {
- struct shash_desc fallback;
-};
+#define PADLOCK_SHA_DESCSIZE (128 + ((PADLOCK_ALIGNMENT - 1) & \
+ ~(CRYPTO_MINALIGN - 1)))
struct padlock_sha_ctx {
- struct crypto_shash *fallback;
+ struct crypto_ahash *fallback;
};
-static int padlock_sha_init(struct shash_desc *desc)
+static inline void *padlock_shash_desc_ctx(struct shash_desc *desc)
{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ return PTR_ALIGN(shash_desc_ctx(desc), PADLOCK_ALIGNMENT);
+}
+
+static int padlock_sha1_init(struct shash_desc *desc)
+{
+ struct sha1_state *sctx = padlock_shash_desc_ctx(desc);
+
+ *sctx = (struct sha1_state){
+ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
+ };
+
+ return 0;
+}
+
+static int padlock_sha256_init(struct shash_desc *desc)
+{
+ struct crypto_sha256_state *sctx = padlock_shash_desc_ctx(desc);
- dctx->fallback.tfm = ctx->fallback;
- return crypto_shash_init(&dctx->fallback);
+ sha256_block_init(sctx);
+ return 0;
}
static int padlock_sha_update(struct shash_desc *desc,
const u8 *data, unsigned int length)
{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+ u8 *state = padlock_shash_desc_ctx(desc);
+ struct crypto_shash *tfm = desc->tfm;
+ int err, remain;
+
+ remain = length - round_down(length, crypto_shash_blocksize(tfm));
+ {
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(tfm);
+ HASH_REQUEST_ON_STACK(req, ctx->fallback);
+
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_virt(req, data, NULL, length - remain);
+ err = crypto_ahash_import_core(req, state) ?:
+ crypto_ahash_update(req) ?:
+ crypto_ahash_export_core(req, state);
+ HASH_REQUEST_ZERO(req);
+ }
- return crypto_shash_update(&dctx->fallback, data, length);
+ return err ?: remain;
}
static int padlock_sha_export(struct shash_desc *desc, void *out)
{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_export(&dctx->fallback, out);
+ memcpy(out, padlock_shash_desc_ctx(desc),
+ crypto_shash_coresize(desc->tfm));
+ return 0;
}
static int padlock_sha_import(struct shash_desc *desc, const void *in)
{
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ unsigned int bs = crypto_shash_blocksize(desc->tfm);
+ unsigned int ss = crypto_shash_coresize(desc->tfm);
+ u64 *state = padlock_shash_desc_ctx(desc);
+
+ memcpy(state, in, ss);
+
+ /* Stop evil imports from generating a fault. */
+ state[ss / 8 - 1] &= ~(bs - 1);
- dctx->fallback.tfm = ctx->fallback;
- return crypto_shash_import(&dctx->fallback, in);
+ return 0;
}
static inline void padlock_output_block(uint32_t *src,
@@ -69,65 +99,38 @@ static inline void padlock_output_block(uint32_t *src,
*dst++ = swab32(*src++);
}
+static int padlock_sha_finup(struct shash_desc *desc, const u8 *in,
+ unsigned int count, u8 *out)
+{
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
+ HASH_REQUEST_ON_STACK(req, ctx->fallback);
+
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_virt(req, in, out, count);
+ return crypto_ahash_import_core(req, padlock_shash_desc_ctx(desc)) ?:
+ crypto_ahash_finup(req);
+}
+
static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
unsigned int count, u8 *out)
{
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
- char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
- ((aligned(STACK_ALIGN)));
- char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct sha1_state state;
- unsigned int space;
- unsigned int leftover;
- int err;
-
- err = crypto_shash_export(&dctx->fallback, &state);
- if (err)
- goto out;
+ struct sha1_state *state = padlock_shash_desc_ctx(desc);
+ u64 start = state->count;
- if (state.count + count > ULONG_MAX)
- return crypto_shash_finup(&dctx->fallback, in, count, out);
-
- leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
- space = SHA1_BLOCK_SIZE - leftover;
- if (space) {
- if (count > space) {
- err = crypto_shash_update(&dctx->fallback, in, space) ?:
- crypto_shash_export(&dctx->fallback, &state);
- if (err)
- goto out;
- count -= space;
- in += space;
- } else {
- memcpy(state.buffer + leftover, in, count);
- in = state.buffer;
- count += leftover;
- state.count &= ~(SHA1_BLOCK_SIZE - 1);
- }
- }
-
- memcpy(result, &state.state, SHA1_DIGEST_SIZE);
+ if (start + count > ULONG_MAX)
+ return padlock_sha_finup(desc, in, count, out);
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
: \
- : "c"((unsigned long)state.count + count), \
- "a"((unsigned long)state.count), \
- "S"(in), "D"(result));
-
- padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
+ : "c"((unsigned long)start + count), \
+ "a"((unsigned long)start), \
+ "S"(in), "D"(state));
-out:
- return err;
-}
-
-static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
-{
- const u8 *buf = (void *)desc;
-
- return padlock_sha1_finup(desc, buf, 0, out);
+ padlock_output_block(state->state, (uint32_t *)out, 5);
+ return 0;
}
static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
@@ -136,78 +139,46 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
- char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
- ((aligned(STACK_ALIGN)));
- char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- struct sha256_state state;
- unsigned int space;
- unsigned int leftover;
- int err;
-
- err = crypto_shash_export(&dctx->fallback, &state);
- if (err)
- goto out;
+ struct sha256_state *state = padlock_shash_desc_ctx(desc);
+ u64 start = state->count;
- if (state.count + count > ULONG_MAX)
- return crypto_shash_finup(&dctx->fallback, in, count, out);
-
- leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
- space = SHA256_BLOCK_SIZE - leftover;
- if (space) {
- if (count > space) {
- err = crypto_shash_update(&dctx->fallback, in, space) ?:
- crypto_shash_export(&dctx->fallback, &state);
- if (err)
- goto out;
- count -= space;
- in += space;
- } else {
- memcpy(state.buf + leftover, in, count);
- in = state.buf;
- count += leftover;
- state.count &= ~(SHA1_BLOCK_SIZE - 1);
- }
- }
-
- memcpy(result, &state.state, SHA256_DIGEST_SIZE);
+ if (start + count > ULONG_MAX)
+ return padlock_sha_finup(desc, in, count, out);
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
: \
- : "c"((unsigned long)state.count + count), \
- "a"((unsigned long)state.count), \
- "S"(in), "D"(result));
+ : "c"((unsigned long)start + count), \
+ "a"((unsigned long)start), \
+ "S"(in), "D"(state));
- padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
-
-out:
- return err;
-}
-
-static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
-{
- const u8 *buf = (void *)desc;
-
- return padlock_sha256_finup(desc, buf, 0, out);
+ padlock_output_block(state->state, (uint32_t *)out, 8);
+ return 0;
}
static int padlock_init_tfm(struct crypto_shash *hash)
{
const char *fallback_driver_name = crypto_shash_alg_name(hash);
struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
- struct crypto_shash *fallback_tfm;
+ struct crypto_ahash *fallback_tfm;
/* Allocate a fallback and abort if it failed. */
- fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(fallback_tfm)) {
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
fallback_driver_name);
return PTR_ERR(fallback_tfm);
}
+ if (crypto_shash_statesize(hash) !=
+ crypto_ahash_statesize(fallback_tfm)) {
+ crypto_free_ahash(fallback_tfm);
+ return -EINVAL;
+ }
+
ctx->fallback = fallback_tfm;
- hash->descsize += crypto_shash_descsize(fallback_tfm);
+
return 0;
}
@@ -215,26 +186,27 @@ static void padlock_exit_tfm(struct crypto_shash *hash)
{
struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
- crypto_free_shash(ctx->fallback);
+ crypto_free_ahash(ctx->fallback);
}
static struct shash_alg sha1_alg = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = padlock_sha_init,
+ .init = padlock_sha1_init,
.update = padlock_sha_update,
.finup = padlock_sha1_finup,
- .final = padlock_sha1_final,
.export = padlock_sha_export,
.import = padlock_sha_import,
.init_tfm = padlock_init_tfm,
.exit_tfm = padlock_exit_tfm,
- .descsize = sizeof(struct padlock_sha_desc),
- .statesize = sizeof(struct sha1_state),
+ .descsize = PADLOCK_SHA_DESCSIZE,
+ .statesize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
@@ -243,21 +215,22 @@ static struct shash_alg sha1_alg = {
static struct shash_alg sha256_alg = {
.digestsize = SHA256_DIGEST_SIZE,
- .init = padlock_sha_init,
+ .init = padlock_sha256_init,
.update = padlock_sha_update,
.finup = padlock_sha256_finup,
- .final = padlock_sha256_final,
+ .init_tfm = padlock_init_tfm,
.export = padlock_sha_export,
.import = padlock_sha_import,
- .init_tfm = padlock_init_tfm,
.exit_tfm = padlock_exit_tfm,
- .descsize = sizeof(struct padlock_sha_desc),
- .statesize = sizeof(struct sha256_state),
+ .descsize = PADLOCK_SHA_DESCSIZE,
+ .statesize = sizeof(struct crypto_sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
@@ -266,207 +239,58 @@ static struct shash_alg sha256_alg = {
/* Add two shash_alg instance for hardware-implemented *
* multiple-parts hash supported by VIA Nano Processor.*/
-static int padlock_sha1_init_nano(struct shash_desc *desc)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- *sctx = (struct sha1_state){
- .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
- };
-
- return 0;
-}
static int padlock_sha1_update_nano(struct shash_desc *desc,
- const u8 *data, unsigned int len)
+ const u8 *src, unsigned int len)
{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial, done;
- const u8 *src;
/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
- u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
- ((aligned(STACK_ALIGN)));
- u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
-
- partial = sctx->count & 0x3f;
- sctx->count += len;
- done = 0;
- src = data;
- memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE);
-
- if ((partial + len) >= SHA1_BLOCK_SIZE) {
-
- /* Append the bytes in state's buffer to a block to handle */
- if (partial) {
- done = -partial;
- memcpy(sctx->buffer + partial, data,
- done + SHA1_BLOCK_SIZE);
- src = sctx->buffer;
- asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
- : "+S"(src), "+D"(dst) \
- : "a"((long)-1), "c"((unsigned long)1));
- done += SHA1_BLOCK_SIZE;
- src = data + done;
- }
-
- /* Process the left bytes from the input data */
- if (len - done >= SHA1_BLOCK_SIZE) {
- asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
- : "+S"(src), "+D"(dst)
- : "a"((long)-1),
- "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
- done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
- src = data + done;
- }
- partial = 0;
- }
- memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE);
- memcpy(sctx->buffer + partial, src, len - done);
-
- return 0;
-}
-
-static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out)
-{
- struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc);
- unsigned int partial, padlen;
- __be64 bits;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_be64(state->count << 3);
-
- /* Pad out to 56 mod 64 */
- partial = state->count & 0x3f;
- padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
- padlock_sha1_update_nano(desc, padding, padlen);
-
- /* Append length field bytes */
- padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits));
-
- /* Swap to output */
- padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5);
-
- return 0;
-}
-
-static int padlock_sha256_init_nano(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- *sctx = (struct sha256_state){
- .state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7},
- };
-
- return 0;
+ struct sha1_state *state = padlock_shash_desc_ctx(desc);
+ int blocks = len / SHA1_BLOCK_SIZE;
+
+ len -= blocks * SHA1_BLOCK_SIZE;
+ state->count += blocks * SHA1_BLOCK_SIZE;
+
+ /* Process the left bytes from the input data */
+ asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
+ : "+S"(src), "+D"(state)
+ : "a"((long)-1),
+ "c"((unsigned long)blocks));
+ return len;
}
-static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
+static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *src,
unsigned int len)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- unsigned int partial, done;
- const u8 *src;
/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
- u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
- ((aligned(STACK_ALIGN)));
- u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
-
- partial = sctx->count & 0x3f;
- sctx->count += len;
- done = 0;
- src = data;
- memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE);
-
- if ((partial + len) >= SHA256_BLOCK_SIZE) {
-
- /* Append the bytes in state's buffer to a block to handle */
- if (partial) {
- done = -partial;
- memcpy(sctx->buf + partial, data,
- done + SHA256_BLOCK_SIZE);
- src = sctx->buf;
- asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
- : "+S"(src), "+D"(dst)
- : "a"((long)-1), "c"((unsigned long)1));
- done += SHA256_BLOCK_SIZE;
- src = data + done;
- }
-
- /* Process the left bytes from input data*/
- if (len - done >= SHA256_BLOCK_SIZE) {
- asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
- : "+S"(src), "+D"(dst)
- : "a"((long)-1),
- "c"((unsigned long)((len - done) / 64)));
- done += ((len - done) - (len - done) % 64);
- src = data + done;
- }
- partial = 0;
- }
- memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE);
- memcpy(sctx->buf + partial, src, len - done);
-
- return 0;
-}
-
-static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out)
-{
- struct sha256_state *state =
- (struct sha256_state *)shash_desc_ctx(desc);
- unsigned int partial, padlen;
- __be64 bits;
- static const u8 padding[64] = { 0x80, };
-
- bits = cpu_to_be64(state->count << 3);
-
- /* Pad out to 56 mod 64 */
- partial = state->count & 0x3f;
- padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
- padlock_sha256_update_nano(desc, padding, padlen);
-
- /* Append length field bytes */
- padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits));
-
- /* Swap to output */
- padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8);
-
- return 0;
-}
-
-static int padlock_sha_export_nano(struct shash_desc *desc,
- void *out)
-{
- int statesize = crypto_shash_statesize(desc->tfm);
- void *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, statesize);
- return 0;
-}
-
-static int padlock_sha_import_nano(struct shash_desc *desc,
- const void *in)
-{
- int statesize = crypto_shash_statesize(desc->tfm);
- void *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, statesize);
- return 0;
+ struct crypto_sha256_state *state = padlock_shash_desc_ctx(desc);
+ int blocks = len / SHA256_BLOCK_SIZE;
+
+ len -= blocks * SHA256_BLOCK_SIZE;
+ state->count += blocks * SHA256_BLOCK_SIZE;
+
+ /* Process the left bytes from input data*/
+ asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
+ : "+S"(src), "+D"(state)
+ : "a"((long)-1),
+ "c"((unsigned long)blocks));
+ return len;
}
static struct shash_alg sha1_alg_nano = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = padlock_sha1_init_nano,
+ .init = padlock_sha1_init,
.update = padlock_sha1_update_nano,
- .final = padlock_sha1_final_nano,
- .export = padlock_sha_export_nano,
- .import = padlock_sha_import_nano,
- .descsize = sizeof(struct sha1_state),
- .statesize = sizeof(struct sha1_state),
+ .finup = padlock_sha1_finup,
+ .export = padlock_sha_export,
+ .import = padlock_sha_import,
+ .descsize = PADLOCK_SHA_DESCSIZE,
+ .statesize = SHA1_STATE_SIZE,
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -474,17 +298,19 @@ static struct shash_alg sha1_alg_nano = {
static struct shash_alg sha256_alg_nano = {
.digestsize = SHA256_DIGEST_SIZE,
- .init = padlock_sha256_init_nano,
+ .init = padlock_sha256_init,
.update = padlock_sha256_update_nano,
- .final = padlock_sha256_final_nano,
- .export = padlock_sha_export_nano,
- .import = padlock_sha_import_nano,
- .descsize = sizeof(struct sha256_state),
- .statesize = sizeof(struct sha256_state),
+ .finup = padlock_sha256_finup,
+ .export = padlock_sha_export,
+ .import = padlock_sha_import,
+ .descsize = PADLOCK_SHA_DESCSIZE,
+ .statesize = sizeof(struct crypto_sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock-nano",
.cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
+ CRYPTO_AHASH_ALG_FINUP_MAX,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 69d6019d8abc..d6928ebe9526 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -52,12 +52,11 @@ static int rk_ahash_digest_fb(struct ahash_request *areq)
algt->stat_fb++;
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
- rctx->fallback_req.base.flags = areq->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = areq->nbytes;
- rctx->fallback_req.src = areq->src;
- rctx->fallback_req.result = areq->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ areq->base.complete, areq->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, areq->src, areq->result,
+ areq->nbytes);
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -124,8 +123,9 @@ static int rk_ahash_init(struct ahash_request *req)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -137,10 +137,10 @@ static int rk_ahash_update(struct ahash_request *req)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -152,9 +152,10 @@ static int rk_ahash_final(struct ahash_request *req)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -166,12 +167,11 @@ static int rk_ahash_finup(struct ahash_request *req)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -183,8 +183,9 @@ static int rk_ahash_import(struct ahash_request *req, const void *in)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -196,8 +197,9 @@ static int rk_ahash_export(struct ahash_request *req, void *out)
struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index b4c3c14dafd5..b829c84f60f2 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -9,11 +9,17 @@
//
// Hash part based on omap-sham.c driver.
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/md5.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -22,17 +28,9 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
-
-#include <crypto/ctr.h>
-#include <crypto/aes.h>
-#include <crypto/algapi.h>
-#include <crypto/scatterwalk.h>
-
-#include <crypto/hash.h>
-#include <crypto/md5.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/internal/hash.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#define _SBF(s, v) ((v) << (s))
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 091612b066f1..fdc0b2486069 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -1415,22 +1415,13 @@ static int sa_sha_run(struct ahash_request *req)
(auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
struct ahash_request *subreq = &rctx->fallback_req;
- int ret = 0;
+ int ret;
ahash_request_set_tfm(subreq, ctx->fallback.ahash);
- subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
- crypto_ahash_init(subreq);
-
- subreq->nbytes = auth_len;
- subreq->src = req->src;
- subreq->result = req->result;
-
- ret |= crypto_ahash_update(subreq);
-
- subreq->nbytes = 0;
+ ahash_request_set_callback(subreq, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(subreq, req->src, req->result, auth_len);
- ret |= crypto_ahash_final(subreq);
+ ret = crypto_ahash_digest(subreq);
return ret;
}
@@ -1502,8 +1493,7 @@ static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
return ret;
if (alg_base) {
- ctx->shash = crypto_alloc_shash(alg_base, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->shash = crypto_alloc_shash(alg_base, 0, 0);
if (IS_ERR(ctx->shash)) {
dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
alg_base);
@@ -1511,8 +1501,7 @@ static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
}
/* for fallback */
ctx->fallback.ahash =
- crypto_alloc_ahash(alg_base, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ crypto_alloc_ahash(alg_base, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->fallback.ahash)) {
dev_err(ctx->dev_data->dev,
"Could not load fallback driver\n");
@@ -1546,54 +1535,38 @@ static int sa_sha_init(struct ahash_request *req)
crypto_ahash_digestsize(tfm), rctx);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, NULL, 0);
return crypto_ahash_init(&rctx->fallback_req);
}
static int sa_sha_update(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
static int sa_sha_final(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
static int sa_sha_finup(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result, req->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -1605,8 +1578,7 @@ static int sa_sha_import(struct ahash_request *req, const void *in)
struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -1614,12 +1586,9 @@ static int sa_sha_import(struct ahash_request *req, const void *in)
static int sa_sha_export(struct ahash_request *req, void *out)
{
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
struct ahash_request *subreq = &rctx->fallback_req;
- ahash_request_set_tfm(subreq, ctx->fallback.ahash);
- subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(subreq, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
return crypto_ahash_export(subreq, out);
}
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index 42d007b7af45..d09b4aaeecef 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -117,8 +117,9 @@ static int tegra_sha_fallback_init(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_init(&rctx->fallback_req);
}
@@ -130,10 +131,10 @@ static int tegra_sha_fallback_update(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, NULL, req->nbytes);
return crypto_ahash_update(&rctx->fallback_req);
}
@@ -145,9 +146,10 @@ static int tegra_sha_fallback_final(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, NULL, req->result, 0);
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -159,12 +161,11 @@ static int tegra_sha_fallback_finup(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -176,12 +177,11 @@ static int tegra_sha_fallback_digest(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
+ ahash_request_set_crypt(&rctx->fallback_req, req->src, req->result,
+ req->nbytes);
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -193,8 +193,9 @@ static int tegra_sha_fallback_import(struct ahash_request *req, const void *in)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_import(&rctx->fallback_req, in);
}
@@ -206,8 +207,9 @@ static int tegra_sha_fallback_export(struct ahash_request *req, void *out)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_callback(&rctx->fallback_req,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP,
+ req->base.complete, req->base.data);
return crypto_ahash_export(&rctx->fallback_req, out);
}
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
index 580649f9bff8..5813017b6b79 100644
--- a/drivers/crypto/xilinx/zynqmp-sha.c
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -3,18 +3,18 @@
* Xilinx ZynqMP SHA Driver.
* Copyright (c) 2022 Xilinx Inc.
*/
-#include <linux/cacheflush.h>
-#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <crypto/sha3.h>
-#include <linux/crypto.h>
+#include <linux/cacheflush.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/firmware/xlnx-zynqmp.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/spinlock.h>
#include <linux/platform_device.h>
#define ZYNQMP_DMA_BIT_MASK 32U
@@ -36,13 +36,11 @@ struct zynqmp_sha_tfm_ctx {
struct crypto_shash *fbk_tfm;
};
-struct zynqmp_sha_desc_ctx {
- struct shash_desc fbk_req;
-};
-
static dma_addr_t update_dma_addr, final_dma_addr;
static char *ubuf, *fbuf;
+static DEFINE_SPINLOCK(zynqmp_sha_lock);
+
static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
{
const char *fallback_driver_name = crypto_shash_alg_name(hash);
@@ -60,8 +58,13 @@ static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
if (IS_ERR(fallback_tfm))
return PTR_ERR(fallback_tfm);
+ if (crypto_shash_descsize(hash) <
+ crypto_shash_statesize(tfm_ctx->fbk_tfm)) {
+ crypto_free_shash(fallback_tfm);
+ return -EINVAL;
+ }
+
tfm_ctx->fbk_tfm = fallback_tfm;
- hash->descsize += crypto_shash_descsize(tfm_ctx->fbk_tfm);
return 0;
}
@@ -70,61 +73,55 @@ static void zynqmp_sha_exit_tfm(struct crypto_shash *hash)
{
struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
- if (tfm_ctx->fbk_tfm) {
- crypto_free_shash(tfm_ctx->fbk_tfm);
- tfm_ctx->fbk_tfm = NULL;
- }
+ crypto_free_shash(tfm_ctx->fbk_tfm);
+}
- memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx));
+static int zynqmp_sha_continue(struct shash_desc *desc,
+ struct shash_desc *fbdesc, int err)
+{
+ err = err ?: crypto_shash_export(fbdesc, shash_desc_ctx(desc));
+ shash_desc_zero(fbdesc);
+ return err;
}
static int zynqmp_sha_init(struct shash_desc *desc)
{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct crypto_shash *fbtfm = tctx->fbk_tfm;
+ SHASH_DESC_ON_STACK(fbdesc, fbtfm);
+ int err;
- dctx->fbk_req.tfm = tctx->fbk_tfm;
- return crypto_shash_init(&dctx->fbk_req);
+ fbdesc->tfm = fbtfm;
+ err = crypto_shash_init(fbdesc);
+ return zynqmp_sha_continue(desc, fbdesc, err);
}
static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length)
{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_update(&dctx->fbk_req, data, length);
-}
-
-static int zynqmp_sha_final(struct shash_desc *desc, u8 *out)
-{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct crypto_shash *fbtfm = tctx->fbk_tfm;
+ SHASH_DESC_ON_STACK(fbdesc, fbtfm);
+ int err;
- return crypto_shash_final(&dctx->fbk_req, out);
+ fbdesc->tfm = fbtfm;
+ err = crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?:
+ crypto_shash_update(fbdesc, data, length);
+ return zynqmp_sha_continue(desc, fbdesc, err);
}
static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_finup(&dctx->fbk_req, data, length, out);
-}
-
-static int zynqmp_sha_import(struct shash_desc *desc, const void *in)
-{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct crypto_shash *fbtfm = tctx->fbk_tfm;
+ SHASH_DESC_ON_STACK(fbdesc, fbtfm);
- dctx->fbk_req.tfm = tctx->fbk_tfm;
- return crypto_shash_import(&dctx->fbk_req, in);
+ fbdesc->tfm = fbtfm;
+ return crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?:
+ crypto_shash_finup(fbdesc, data, length, out);
}
-static int zynqmp_sha_export(struct shash_desc *desc, void *out)
-{
- struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc);
-
- return crypto_shash_export(&dctx->fbk_req, out);
-}
-
-static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
+static int __zynqmp_sha_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
{
unsigned int remaining_len = len;
int update_size;
@@ -159,26 +156,27 @@ static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned i
return ret;
}
+static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
+{
+ scoped_guard(spinlock_bh, &zynqmp_sha_lock)
+ return __zynqmp_sha_digest(desc, data, len, out);
+}
+
static struct zynqmp_sha_drv_ctx sha3_drv_ctx = {
.sha3_384 = {
.init = zynqmp_sha_init,
.update = zynqmp_sha_update,
- .final = zynqmp_sha_final,
.finup = zynqmp_sha_finup,
.digest = zynqmp_sha_digest,
- .export = zynqmp_sha_export,
- .import = zynqmp_sha_import,
.init_tfm = zynqmp_sha_init_tfm,
.exit_tfm = zynqmp_sha_exit_tfm,
- .descsize = sizeof(struct zynqmp_sha_desc_ctx),
- .statesize = sizeof(struct sha3_state),
+ .descsize = SHA3_384_EXPORT_SIZE,
.digestsize = SHA3_384_DIGEST_SIZE,
.base = {
.cra_name = "sha3-384",
.cra_driver_name = "zynqmp-sha3-384",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA3_384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx),
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index cf1ba673b8c2..48b7314afdb8 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -114,6 +114,77 @@ config CXL_FEATURES
If unsure say 'n'
+config CXL_EDAC_MEM_FEATURES
+ bool "CXL: EDAC Memory Features"
+ depends on EXPERT
+ depends on CXL_MEM
+ depends on CXL_FEATURES
+ depends on EDAC >= CXL_BUS
+ help
+ The CXL EDAC memory feature is optional and allows host to
+ control the EDAC memory features configurations of CXL memory
+ expander devices.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory RAS feature established by the platform/device.
+ Otherwise say 'n'.
+
+config CXL_EDAC_SCRUB
+ bool "Enable CXL Patrol Scrub Control (Patrol Read)"
+ depends on CXL_EDAC_MEM_FEATURES
+ depends on EDAC_SCRUB
+ help
+ The CXL EDAC scrub control is optional and allows host to
+ control the scrub feature configurations of CXL memory expander
+ devices.
+
+ When enabled 'cxl_mem' and 'cxl_region' EDAC devices are
+ published with memory scrub control attributes as described by
+ Documentation/ABI/testing/sysfs-edac-scrub.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory scrub feature established by the platform/device
+ (e.g. scrub rates for the patrol scrub feature).
+ Otherwise say 'n'.
+
+config CXL_EDAC_ECS
+ bool "Enable CXL Error Check Scrub (Repair)"
+ depends on CXL_EDAC_MEM_FEATURES
+ depends on EDAC_ECS
+ help
+ The CXL EDAC ECS control is optional and allows host to
+ control the ECS feature configurations of CXL memory expander
+ devices.
+
+ When enabled 'cxl_mem' EDAC devices are published with memory
+ ECS control attributes as described by
+ Documentation/ABI/testing/sysfs-edac-ecs.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory ECS feature established by the platform/device.
+ Otherwise say 'n'.
+
+config CXL_EDAC_MEM_REPAIR
+ bool "Enable CXL Memory Repair"
+ depends on CXL_EDAC_MEM_FEATURES
+ depends on EDAC_MEM_REPAIR
+ help
+ The CXL EDAC memory repair control is optional and allows host
+ to control the memory repair features (e.g. sparing, PPR)
+ configurations of CXL memory expander devices.
+
+ When enabled, the memory repair feature requires an additional
+ memory of approximately 43KB to store CXL DRAM and CXL general
+ media event records.
+
+ When enabled 'cxl_mem' EDAC devices are published with memory
+ repair control attributes as described by
+ Documentation/ABI/testing/sysfs-edac-memory-repair.
+
+ Say 'y' if you have an expert need to change default settings
+ of a memory repair feature established by the platform/device.
+ Otherwise say 'n'.
+
config CXL_PORT
default CXL_BUS
tristate
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index cb14829bb9be..a1a99ec3f12c 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -11,8 +11,6 @@
#include "cxlpci.h"
#include "cxl.h"
-#define CXL_RCRB_SIZE SZ_8K
-
struct cxl_cxims_data {
int nr_maps;
u64 xormaps[] __counted_by(nr_maps);
@@ -421,7 +419,15 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
rc = cxl_decoder_add(cxld, target_map);
if (rc)
return rc;
- return cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
+
+ rc = cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
+ if (rc)
+ return rc;
+
+ dev_dbg(root_port->dev.parent, "%s added to %s\n",
+ dev_name(&cxld->dev), dev_name(&root_port->dev));
+
+ return 0;
}
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
@@ -479,7 +485,11 @@ static int cxl_get_chbs_iter(union acpi_subtable_headers *header, void *arg,
chbs = (struct acpi_cedt_chbs *) header;
if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 &&
- chbs->length != CXL_RCRB_SIZE)
+ chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL11)
+ return 0;
+
+ if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20 &&
+ chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL20)
return 0;
if (!chbs->base)
@@ -739,10 +749,10 @@ static void remove_cxl_resources(void *data)
* expanding its boundaries to ensure that any conflicting resources become
* children. If a window is expanded it may then conflict with a another window
* entry and require the window to be truncated or trimmed. Consider this
- * situation:
+ * situation::
*
- * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
- * |--------------- "System RAM" -------------|
+ * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
+ * |--------------- "System RAM" -------------|
*
* ...where platform firmware has established as System RAM resource across 2
* windows, but has left some portion of window 1 for dynamic CXL region
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 086df97a0fcf..79e2ef81fde8 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -20,3 +20,4 @@ cxl_core-$(CONFIG_TRACING) += trace.o
cxl_core-$(CONFIG_CXL_REGION) += region.o
cxl_core-$(CONFIG_CXL_MCE) += mce.o
cxl_core-$(CONFIG_CXL_FEATURES) += features.o
+cxl_core-$(CONFIG_CXL_EDAC_MEM_FEATURES) += edac.o
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index edb4f41eeacc..0ccef2f2a26a 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -28,7 +28,7 @@ static u32 cdat_normalize(u16 entry, u64 base, u8 type)
*/
if (entry == 0xffff || !entry)
return 0;
- else if (base > (UINT_MAX / (entry)))
+ if (base > (UINT_MAX / (entry)))
return 0;
/*
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 17b692eb3257..29b61828a847 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -76,7 +76,7 @@ void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
struct dentry *cxl_debugfs_create_dir(const char *dir);
int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
enum cxl_partition_mode mode);
-int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size);
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size);
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
@@ -124,6 +124,8 @@ int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
int nid, resource_size_t *size);
#ifdef CONFIG_CXL_FEATURES
+struct cxl_feat_entry *
+cxl_feature_info(struct cxl_features_state *cxlfs, const uuid_t *uuid);
size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
enum cxl_get_feat_selection selection,
void *feat_out, size_t feat_out_size, u16 offset,
diff --git a/drivers/cxl/core/edac.c b/drivers/cxl/core/edac.c
new file mode 100644
index 000000000000..2cbc664e5d62
--- /dev/null
+++ b/drivers/cxl/core/edac.c
@@ -0,0 +1,2102 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CXL EDAC memory feature driver.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ *
+ * - Supports functions to configure EDAC features of the
+ * CXL memory devices.
+ * - Registers with the EDAC device subsystem driver to expose
+ * the features sysfs attributes to the user for configuring
+ * CXL memory RAS feature.
+ */
+
+#include <linux/cleanup.h>
+#include <linux/edac.h>
+#include <linux/limits.h>
+#include <linux/unaligned.h>
+#include <linux/xarray.h>
+#include <cxl/features.h>
+#include <cxl.h>
+#include <cxlmem.h>
+#include "core.h"
+#include "trace.h"
+
+#define CXL_NR_EDAC_DEV_FEATURES 7
+
+#define CXL_SCRUB_NO_REGION -1
+
+struct cxl_patrol_scrub_context {
+ u8 instance;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u8 get_version;
+ u8 set_version;
+ u16 effects;
+ struct cxl_memdev *cxlmd;
+ struct cxl_region *cxlr;
+};
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.1 Table 8-222 Device Patrol Scrub Control
+ * Feature Readable Attributes.
+ */
+struct cxl_scrub_rd_attrbs {
+ u8 scrub_cycle_cap;
+ __le16 scrub_cycle_hours;
+ u8 scrub_flags;
+} __packed;
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.1 Table 8-223 Device Patrol Scrub Control
+ * Feature Writable Attributes.
+ */
+struct cxl_scrub_wr_attrbs {
+ u8 scrub_cycle_hours;
+ u8 scrub_flags;
+} __packed;
+
+#define CXL_SCRUB_CONTROL_CHANGEABLE BIT(0)
+#define CXL_SCRUB_CONTROL_REALTIME BIT(1)
+#define CXL_SCRUB_CONTROL_CYCLE_MASK GENMASK(7, 0)
+#define CXL_SCRUB_CONTROL_MIN_CYCLE_MASK GENMASK(15, 8)
+#define CXL_SCRUB_CONTROL_ENABLE BIT(0)
+
+#define CXL_GET_SCRUB_CYCLE_CHANGEABLE(cap) \
+ FIELD_GET(CXL_SCRUB_CONTROL_CHANGEABLE, cap)
+#define CXL_GET_SCRUB_CYCLE(cycle) \
+ FIELD_GET(CXL_SCRUB_CONTROL_CYCLE_MASK, cycle)
+#define CXL_GET_SCRUB_MIN_CYCLE(cycle) \
+ FIELD_GET(CXL_SCRUB_CONTROL_MIN_CYCLE_MASK, cycle)
+#define CXL_GET_SCRUB_EN_STS(flags) FIELD_GET(CXL_SCRUB_CONTROL_ENABLE, flags)
+
+#define CXL_SET_SCRUB_CYCLE(cycle) \
+ FIELD_PREP(CXL_SCRUB_CONTROL_CYCLE_MASK, cycle)
+#define CXL_SET_SCRUB_EN(en) FIELD_PREP(CXL_SCRUB_CONTROL_ENABLE, en)
+
+static int cxl_mem_scrub_get_attrbs(struct cxl_mailbox *cxl_mbox, u8 *cap,
+ u16 *cycle, u8 *flags, u8 *min_cycle)
+{
+ size_t rd_data_size = sizeof(struct cxl_scrub_rd_attrbs);
+ size_t data_size;
+ struct cxl_scrub_rd_attrbs *rd_attrbs __free(kfree) =
+ kzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, NULL);
+ if (!data_size)
+ return -EIO;
+
+ *cap = rd_attrbs->scrub_cycle_cap;
+ *cycle = le16_to_cpu(rd_attrbs->scrub_cycle_hours);
+ *flags = rd_attrbs->scrub_flags;
+ if (min_cycle)
+ *min_cycle = CXL_GET_SCRUB_MIN_CYCLE(*cycle);
+
+ return 0;
+}
+
+static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 *cap, u16 *cycle, u8 *flags, u8 *min_cycle)
+{
+ struct cxl_mailbox *cxl_mbox;
+ u8 min_scrub_cycle = U8_MAX;
+ struct cxl_region_params *p;
+ struct cxl_memdev *cxlmd;
+ struct cxl_region *cxlr;
+ int i, ret;
+
+ if (!cxl_ps_ctx->cxlr) {
+ cxl_mbox = &cxl_ps_ctx->cxlmd->cxlds->cxl_mbox;
+ return cxl_mem_scrub_get_attrbs(cxl_mbox, cap, cycle,
+ flags, min_cycle);
+ }
+
+ struct rw_semaphore *region_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_region_rwsem);
+ if (!region_lock)
+ return -EINTR;
+
+ cxlr = cxl_ps_ctx->cxlr;
+ p = &cxlr->params;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
+ cxlmd = cxled_to_memdev(cxled);
+ cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ ret = cxl_mem_scrub_get_attrbs(cxl_mbox, cap, cycle, flags,
+ min_cycle);
+ if (ret)
+ return ret;
+
+ if (min_cycle)
+ min_scrub_cycle = min(*min_cycle, min_scrub_cycle);
+ }
+
+ if (min_cycle)
+ *min_cycle = min_scrub_cycle;
+
+ return 0;
+}
+
+static int cxl_scrub_set_attrbs_region(struct device *dev,
+ struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 cycle, u8 flags)
+{
+ struct cxl_scrub_wr_attrbs wr_attrbs;
+ struct cxl_mailbox *cxl_mbox;
+ struct cxl_region_params *p;
+ struct cxl_memdev *cxlmd;
+ struct cxl_region *cxlr;
+ int ret, i;
+
+ struct rw_semaphore *region_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_region_rwsem);
+ if (!region_lock)
+ return -EINTR;
+
+ cxlr = cxl_ps_ctx->cxlr;
+ p = &cxlr->params;
+ wr_attrbs.scrub_cycle_hours = cycle;
+ wr_attrbs.scrub_flags = flags;
+
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
+ cxlmd = cxled_to_memdev(cxled);
+ cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ ret = cxl_set_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
+ cxl_ps_ctx->set_version, &wr_attrbs,
+ sizeof(wr_attrbs),
+ CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET,
+ 0, NULL);
+ if (ret)
+ return ret;
+
+ if (cycle != cxlmd->scrub_cycle) {
+ if (cxlmd->scrub_region_id != CXL_SCRUB_NO_REGION)
+ dev_info(dev,
+ "Device scrub rate(%d hours) set by region%d rate overwritten by region%d scrub rate(%d hours)\n",
+ cxlmd->scrub_cycle,
+ cxlmd->scrub_region_id, cxlr->id,
+ cycle);
+
+ cxlmd->scrub_cycle = cycle;
+ cxlmd->scrub_region_id = cxlr->id;
+ }
+ }
+
+ return 0;
+}
+
+static int cxl_scrub_set_attrbs_device(struct device *dev,
+ struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 cycle, u8 flags)
+{
+ struct cxl_scrub_wr_attrbs wr_attrbs;
+ struct cxl_mailbox *cxl_mbox;
+ struct cxl_memdev *cxlmd;
+ int ret;
+
+ wr_attrbs.scrub_cycle_hours = cycle;
+ wr_attrbs.scrub_flags = flags;
+
+ cxlmd = cxl_ps_ctx->cxlmd;
+ cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ ret = cxl_set_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
+ cxl_ps_ctx->set_version, &wr_attrbs,
+ sizeof(wr_attrbs),
+ CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET, 0,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (cycle != cxlmd->scrub_cycle) {
+ if (cxlmd->scrub_region_id != CXL_SCRUB_NO_REGION)
+ dev_info(dev,
+ "Device scrub rate(%d hours) set by region%d rate overwritten with device local scrub rate(%d hours)\n",
+ cxlmd->scrub_cycle, cxlmd->scrub_region_id,
+ cycle);
+
+ cxlmd->scrub_cycle = cycle;
+ cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
+ }
+
+ return 0;
+}
+
+static int cxl_scrub_set_attrbs(struct device *dev,
+ struct cxl_patrol_scrub_context *cxl_ps_ctx,
+ u8 cycle, u8 flags)
+{
+ if (cxl_ps_ctx->cxlr)
+ return cxl_scrub_set_attrbs_region(dev, cxl_ps_ctx, cycle, flags);
+
+ return cxl_scrub_set_attrbs_device(dev, cxl_ps_ctx, cycle, flags);
+}
+
+static int cxl_patrol_scrub_get_enabled_bg(struct device *dev, void *drv_data,
+ bool *enabled)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags;
+ u16 cycle;
+ int ret;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, NULL);
+ if (ret)
+ return ret;
+
+ *enabled = CXL_GET_SCRUB_EN_STS(flags);
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_set_enabled_bg(struct device *dev, void *drv_data,
+ bool enable)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags, wr_cycle;
+ u16 rd_cycle;
+ int ret;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &rd_cycle, &flags, NULL);
+ if (ret)
+ return ret;
+
+ wr_cycle = CXL_GET_SCRUB_CYCLE(rd_cycle);
+ flags = CXL_SET_SCRUB_EN(enable);
+
+ return cxl_scrub_set_attrbs(dev, ctx, wr_cycle, flags);
+}
+
+static int cxl_patrol_scrub_get_min_scrub_cycle(struct device *dev,
+ void *drv_data, u32 *min)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags, min_cycle;
+ u16 cycle;
+ int ret;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, &min_cycle);
+ if (ret)
+ return ret;
+
+ *min = min_cycle * 3600;
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_get_max_scrub_cycle(struct device *dev,
+ void *drv_data, u32 *max)
+{
+ *max = U8_MAX * 3600; /* Max set by register size */
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_get_scrub_cycle(struct device *dev, void *drv_data,
+ u32 *scrub_cycle_secs)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 cap, flags;
+ u16 cycle;
+ int ret;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, NULL);
+ if (ret)
+ return ret;
+
+ *scrub_cycle_secs = CXL_GET_SCRUB_CYCLE(cycle) * 3600;
+
+ return 0;
+}
+
+static int cxl_patrol_scrub_set_scrub_cycle(struct device *dev, void *drv_data,
+ u32 scrub_cycle_secs)
+{
+ struct cxl_patrol_scrub_context *ctx = drv_data;
+ u8 scrub_cycle_hours = scrub_cycle_secs / 3600;
+ u8 cap, wr_cycle, flags, min_cycle;
+ u16 rd_cycle;
+ int ret;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ ret = cxl_scrub_get_attrbs(ctx, &cap, &rd_cycle, &flags, &min_cycle);
+ if (ret)
+ return ret;
+
+ if (!CXL_GET_SCRUB_CYCLE_CHANGEABLE(cap))
+ return -EOPNOTSUPP;
+
+ if (scrub_cycle_hours < min_cycle) {
+ dev_dbg(dev, "Invalid CXL patrol scrub cycle(%d) to set\n",
+ scrub_cycle_hours);
+ dev_dbg(dev,
+ "Minimum supported CXL patrol scrub cycle in hour %d\n",
+ min_cycle);
+ return -EINVAL;
+ }
+ wr_cycle = CXL_SET_SCRUB_CYCLE(scrub_cycle_hours);
+
+ return cxl_scrub_set_attrbs(dev, ctx, wr_cycle, flags);
+}
+
+static const struct edac_scrub_ops cxl_ps_scrub_ops = {
+ .get_enabled_bg = cxl_patrol_scrub_get_enabled_bg,
+ .set_enabled_bg = cxl_patrol_scrub_set_enabled_bg,
+ .get_min_cycle = cxl_patrol_scrub_get_min_scrub_cycle,
+ .get_max_cycle = cxl_patrol_scrub_get_max_scrub_cycle,
+ .get_cycle_duration = cxl_patrol_scrub_get_scrub_cycle,
+ .set_cycle_duration = cxl_patrol_scrub_set_scrub_cycle,
+};
+
+static int cxl_memdev_scrub_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature,
+ u8 scrub_inst)
+{
+ struct cxl_patrol_scrub_context *cxl_ps_ctx;
+ struct cxl_feat_entry *feat_entry;
+ u8 cap, flags;
+ u16 cycle;
+ int rc;
+
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &CXL_FEAT_PATROL_SCRUB_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ cxl_ps_ctx = devm_kzalloc(&cxlmd->dev, sizeof(*cxl_ps_ctx), GFP_KERNEL);
+ if (!cxl_ps_ctx)
+ return -ENOMEM;
+
+ *cxl_ps_ctx = (struct cxl_patrol_scrub_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .instance = scrub_inst,
+ .cxlmd = cxlmd,
+ };
+
+ rc = cxl_mem_scrub_get_attrbs(&cxlmd->cxlds->cxl_mbox, &cap, &cycle,
+ &flags, NULL);
+ if (rc)
+ return rc;
+
+ cxlmd->scrub_cycle = CXL_GET_SCRUB_CYCLE(cycle);
+ cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
+
+ ras_feature->ft_type = RAS_FEAT_SCRUB;
+ ras_feature->instance = cxl_ps_ctx->instance;
+ ras_feature->scrub_ops = &cxl_ps_scrub_ops;
+ ras_feature->ctx = cxl_ps_ctx;
+
+ return 0;
+}
+
+static int cxl_region_scrub_init(struct cxl_region *cxlr,
+ struct edac_dev_feature *ras_feature,
+ u8 scrub_inst)
+{
+ struct cxl_patrol_scrub_context *cxl_ps_ctx;
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_feat_entry *feat_entry = NULL;
+ struct cxl_memdev *cxlmd;
+ u8 cap, flags;
+ u16 cycle;
+ int i, rc;
+
+ /*
+ * The cxl_region_rwsem must be held if the code below is used in a context
+ * other than when the region is in the probe state, as shown here.
+ */
+ for (i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+
+ cxlmd = cxled_to_memdev(cxled);
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &CXL_FEAT_PATROL_SCRUB_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) &
+ CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ rc = cxl_mem_scrub_get_attrbs(&cxlmd->cxlds->cxl_mbox, &cap,
+ &cycle, &flags, NULL);
+ if (rc)
+ return rc;
+
+ cxlmd->scrub_cycle = CXL_GET_SCRUB_CYCLE(cycle);
+ cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
+ }
+
+ cxl_ps_ctx = devm_kzalloc(&cxlr->dev, sizeof(*cxl_ps_ctx), GFP_KERNEL);
+ if (!cxl_ps_ctx)
+ return -ENOMEM;
+
+ *cxl_ps_ctx = (struct cxl_patrol_scrub_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .instance = scrub_inst,
+ .cxlr = cxlr,
+ };
+
+ ras_feature->ft_type = RAS_FEAT_SCRUB;
+ ras_feature->instance = cxl_ps_ctx->instance;
+ ras_feature->scrub_ops = &cxl_ps_scrub_ops;
+ ras_feature->ctx = cxl_ps_ctx;
+
+ return 0;
+}
+
+struct cxl_ecs_context {
+ u16 num_media_frus;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u8 get_version;
+ u8 set_version;
+ u16 effects;
+ struct cxl_memdev *cxlmd;
+};
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.2 Table 8-225 DDR5 ECS Control Feature
+ * Readable Attributes.
+ */
+struct cxl_ecs_fru_rd_attrbs {
+ u8 ecs_cap;
+ __le16 ecs_config;
+ u8 ecs_flags;
+} __packed;
+
+struct cxl_ecs_rd_attrbs {
+ u8 ecs_log_cap;
+ struct cxl_ecs_fru_rd_attrbs fru_attrbs[];
+} __packed;
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.9.11.2 Table 8-226 DDR5 ECS Control Feature
+ * Writable Attributes.
+ */
+struct cxl_ecs_fru_wr_attrbs {
+ __le16 ecs_config;
+} __packed;
+
+struct cxl_ecs_wr_attrbs {
+ u8 ecs_log_cap;
+ struct cxl_ecs_fru_wr_attrbs fru_attrbs[];
+} __packed;
+
+#define CXL_ECS_LOG_ENTRY_TYPE_MASK GENMASK(1, 0)
+#define CXL_ECS_REALTIME_REPORT_CAP_MASK BIT(0)
+#define CXL_ECS_THRESHOLD_COUNT_MASK GENMASK(2, 0)
+#define CXL_ECS_COUNT_MODE_MASK BIT(3)
+#define CXL_ECS_RESET_COUNTER_MASK BIT(4)
+#define CXL_ECS_RESET_COUNTER 1
+
+enum {
+ ECS_THRESHOLD_256 = 256,
+ ECS_THRESHOLD_1024 = 1024,
+ ECS_THRESHOLD_4096 = 4096,
+};
+
+enum {
+ ECS_THRESHOLD_IDX_256 = 3,
+ ECS_THRESHOLD_IDX_1024 = 4,
+ ECS_THRESHOLD_IDX_4096 = 5,
+};
+
+static const u16 ecs_supp_threshold[] = {
+ [ECS_THRESHOLD_IDX_256] = 256,
+ [ECS_THRESHOLD_IDX_1024] = 1024,
+ [ECS_THRESHOLD_IDX_4096] = 4096,
+};
+
+enum {
+ ECS_LOG_ENTRY_TYPE_DRAM = 0x0,
+ ECS_LOG_ENTRY_TYPE_MEM_MEDIA_FRU = 0x1,
+};
+
+enum cxl_ecs_count_mode {
+ ECS_MODE_COUNTS_ROWS = 0,
+ ECS_MODE_COUNTS_CODEWORDS = 1,
+};
+
+static int cxl_mem_ecs_get_attrbs(struct device *dev,
+ struct cxl_ecs_context *cxl_ecs_ctx,
+ int fru_id, u8 *log_cap, u16 *config)
+{
+ struct cxl_memdev *cxlmd = cxl_ecs_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ struct cxl_ecs_fru_rd_attrbs *fru_rd_attrbs;
+ size_t rd_data_size;
+ size_t data_size;
+
+ rd_data_size = cxl_ecs_ctx->get_feat_size;
+
+ struct cxl_ecs_rd_attrbs *rd_attrbs __free(kvfree) =
+ kvzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, NULL);
+ if (!data_size)
+ return -EIO;
+
+ fru_rd_attrbs = rd_attrbs->fru_attrbs;
+ *log_cap = rd_attrbs->ecs_log_cap;
+ *config = le16_to_cpu(fru_rd_attrbs[fru_id].ecs_config);
+
+ return 0;
+}
+
+static int cxl_mem_ecs_set_attrbs(struct device *dev,
+ struct cxl_ecs_context *cxl_ecs_ctx,
+ int fru_id, u8 log_cap, u16 config)
+{
+ struct cxl_memdev *cxlmd = cxl_ecs_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ struct cxl_ecs_fru_rd_attrbs *fru_rd_attrbs;
+ struct cxl_ecs_fru_wr_attrbs *fru_wr_attrbs;
+ size_t rd_data_size, wr_data_size;
+ u16 num_media_frus, count;
+ size_t data_size;
+
+ num_media_frus = cxl_ecs_ctx->num_media_frus;
+ rd_data_size = cxl_ecs_ctx->get_feat_size;
+ wr_data_size = cxl_ecs_ctx->set_feat_size;
+ struct cxl_ecs_rd_attrbs *rd_attrbs __free(kvfree) =
+ kvzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, NULL);
+ if (!data_size)
+ return -EIO;
+
+ struct cxl_ecs_wr_attrbs *wr_attrbs __free(kvfree) =
+ kvzalloc(wr_data_size, GFP_KERNEL);
+ if (!wr_attrbs)
+ return -ENOMEM;
+
+ /*
+ * Fill writable attributes from the current attributes read
+ * for all the media FRUs.
+ */
+ fru_rd_attrbs = rd_attrbs->fru_attrbs;
+ fru_wr_attrbs = wr_attrbs->fru_attrbs;
+ wr_attrbs->ecs_log_cap = log_cap;
+ for (count = 0; count < num_media_frus; count++)
+ fru_wr_attrbs[count].ecs_config =
+ fru_rd_attrbs[count].ecs_config;
+
+ fru_wr_attrbs[fru_id].ecs_config = cpu_to_le16(config);
+
+ return cxl_set_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
+ cxl_ecs_ctx->set_version, wr_attrbs,
+ wr_data_size,
+ CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET,
+ 0, NULL);
+}
+
+static u8 cxl_get_ecs_log_entry_type(u8 log_cap, u16 config)
+{
+ return FIELD_GET(CXL_ECS_LOG_ENTRY_TYPE_MASK, log_cap);
+}
+
+static u16 cxl_get_ecs_threshold(u8 log_cap, u16 config)
+{
+ u8 index = FIELD_GET(CXL_ECS_THRESHOLD_COUNT_MASK, config);
+
+ return ecs_supp_threshold[index];
+}
+
+static u8 cxl_get_ecs_count_mode(u8 log_cap, u16 config)
+{
+ return FIELD_GET(CXL_ECS_COUNT_MODE_MASK, config);
+}
+
+#define CXL_ECS_GET_ATTR(attrb) \
+ static int cxl_ecs_get_##attrb(struct device *dev, void *drv_data, \
+ int fru_id, u32 *val) \
+ { \
+ struct cxl_ecs_context *ctx = drv_data; \
+ u8 log_cap; \
+ u16 config; \
+ int ret; \
+ \
+ ret = cxl_mem_ecs_get_attrbs(dev, ctx, fru_id, &log_cap, \
+ &config); \
+ if (ret) \
+ return ret; \
+ \
+ *val = cxl_get_ecs_##attrb(log_cap, config); \
+ \
+ return 0; \
+ }
+
+CXL_ECS_GET_ATTR(log_entry_type)
+CXL_ECS_GET_ATTR(count_mode)
+CXL_ECS_GET_ATTR(threshold)
+
+static int cxl_set_ecs_log_entry_type(struct device *dev, u8 *log_cap,
+ u16 *config, u32 val)
+{
+ if (val != ECS_LOG_ENTRY_TYPE_DRAM &&
+ val != ECS_LOG_ENTRY_TYPE_MEM_MEDIA_FRU)
+ return -EINVAL;
+
+ *log_cap = FIELD_PREP(CXL_ECS_LOG_ENTRY_TYPE_MASK, val);
+
+ return 0;
+}
+
+static int cxl_set_ecs_threshold(struct device *dev, u8 *log_cap, u16 *config,
+ u32 val)
+{
+ *config &= ~CXL_ECS_THRESHOLD_COUNT_MASK;
+
+ switch (val) {
+ case ECS_THRESHOLD_256:
+ *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
+ ECS_THRESHOLD_IDX_256);
+ break;
+ case ECS_THRESHOLD_1024:
+ *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
+ ECS_THRESHOLD_IDX_1024);
+ break;
+ case ECS_THRESHOLD_4096:
+ *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
+ ECS_THRESHOLD_IDX_4096);
+ break;
+ default:
+ dev_dbg(dev, "Invalid CXL ECS threshold count(%d) to set\n",
+ val);
+ dev_dbg(dev, "Supported ECS threshold counts: %u, %u, %u\n",
+ ECS_THRESHOLD_256, ECS_THRESHOLD_1024,
+ ECS_THRESHOLD_4096);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cxl_set_ecs_count_mode(struct device *dev, u8 *log_cap, u16 *config,
+ u32 val)
+{
+ if (val != ECS_MODE_COUNTS_ROWS && val != ECS_MODE_COUNTS_CODEWORDS) {
+ dev_dbg(dev, "Invalid CXL ECS scrub mode(%d) to set\n", val);
+ dev_dbg(dev,
+ "Supported ECS Modes: 0: ECS counts rows with errors,"
+ " 1: ECS counts codewords with errors\n");
+ return -EINVAL;
+ }
+
+ *config &= ~CXL_ECS_COUNT_MODE_MASK;
+ *config |= FIELD_PREP(CXL_ECS_COUNT_MODE_MASK, val);
+
+ return 0;
+}
+
+static int cxl_set_ecs_reset_counter(struct device *dev, u8 *log_cap,
+ u16 *config, u32 val)
+{
+ if (val != CXL_ECS_RESET_COUNTER)
+ return -EINVAL;
+
+ *config &= ~CXL_ECS_RESET_COUNTER_MASK;
+ *config |= FIELD_PREP(CXL_ECS_RESET_COUNTER_MASK, val);
+
+ return 0;
+}
+
+#define CXL_ECS_SET_ATTR(attrb) \
+ static int cxl_ecs_set_##attrb(struct device *dev, void *drv_data, \
+ int fru_id, u32 val) \
+ { \
+ struct cxl_ecs_context *ctx = drv_data; \
+ u8 log_cap; \
+ u16 config; \
+ int ret; \
+ \
+ if (!capable(CAP_SYS_RAWIO)) \
+ return -EPERM; \
+ \
+ ret = cxl_mem_ecs_get_attrbs(dev, ctx, fru_id, &log_cap, \
+ &config); \
+ if (ret) \
+ return ret; \
+ \
+ ret = cxl_set_ecs_##attrb(dev, &log_cap, &config, val); \
+ if (ret) \
+ return ret; \
+ \
+ return cxl_mem_ecs_set_attrbs(dev, ctx, fru_id, log_cap, \
+ config); \
+ }
+CXL_ECS_SET_ATTR(log_entry_type)
+CXL_ECS_SET_ATTR(count_mode)
+CXL_ECS_SET_ATTR(reset_counter)
+CXL_ECS_SET_ATTR(threshold)
+
+static const struct edac_ecs_ops cxl_ecs_ops = {
+ .get_log_entry_type = cxl_ecs_get_log_entry_type,
+ .set_log_entry_type = cxl_ecs_set_log_entry_type,
+ .get_mode = cxl_ecs_get_count_mode,
+ .set_mode = cxl_ecs_set_count_mode,
+ .reset = cxl_ecs_set_reset_counter,
+ .get_threshold = cxl_ecs_get_threshold,
+ .set_threshold = cxl_ecs_set_threshold,
+};
+
+static int cxl_memdev_ecs_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature)
+{
+ struct cxl_ecs_context *cxl_ecs_ctx;
+ struct cxl_feat_entry *feat_entry;
+ int num_media_frus;
+
+ feat_entry =
+ cxl_feature_info(to_cxlfs(cxlmd->cxlds), &CXL_FEAT_ECS_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ num_media_frus = (le16_to_cpu(feat_entry->get_feat_size) -
+ sizeof(struct cxl_ecs_rd_attrbs)) /
+ sizeof(struct cxl_ecs_fru_rd_attrbs);
+ if (!num_media_frus)
+ return -EOPNOTSUPP;
+
+ cxl_ecs_ctx =
+ devm_kzalloc(&cxlmd->dev, sizeof(*cxl_ecs_ctx), GFP_KERNEL);
+ if (!cxl_ecs_ctx)
+ return -ENOMEM;
+
+ *cxl_ecs_ctx = (struct cxl_ecs_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .num_media_frus = num_media_frus,
+ .cxlmd = cxlmd,
+ };
+
+ ras_feature->ft_type = RAS_FEAT_ECS;
+ ras_feature->ecs_ops = &cxl_ecs_ops;
+ ras_feature->ctx = cxl_ecs_ctx;
+ ras_feature->ecs_info.num_media_frus = num_media_frus;
+
+ return 0;
+}
+
+/*
+ * Perform Maintenance CXL 3.2 Spec 8.2.10.7.1
+ */
+
+/*
+ * Perform Maintenance input payload
+ * CXL rev 3.2 section 8.2.10.7.1 Table 8-117
+ */
+struct cxl_mbox_maintenance_hdr {
+ u8 op_class;
+ u8 op_subclass;
+} __packed;
+
+static int cxl_perform_maintenance(struct cxl_mailbox *cxl_mbox, u8 class,
+ u8 subclass, void *data_in,
+ size_t data_in_size)
+{
+ struct cxl_memdev_maintenance_pi {
+ struct cxl_mbox_maintenance_hdr hdr;
+ u8 data[];
+ } __packed;
+ struct cxl_mbox_cmd mbox_cmd;
+ size_t hdr_size;
+
+ struct cxl_memdev_maintenance_pi *pi __free(kvfree) =
+ kvzalloc(cxl_mbox->payload_size, GFP_KERNEL);
+ if (!pi)
+ return -ENOMEM;
+
+ pi->hdr.op_class = class;
+ pi->hdr.op_subclass = subclass;
+ hdr_size = sizeof(pi->hdr);
+ /*
+ * Check minimum mbox payload size is available for
+ * the maintenance data transfer.
+ */
+ if (hdr_size + data_in_size > cxl_mbox->payload_size)
+ return -ENOMEM;
+
+ memcpy(pi->data, data_in, data_in_size);
+ mbox_cmd = (struct cxl_mbox_cmd){
+ .opcode = CXL_MBOX_OP_DO_MAINTENANCE,
+ .size_in = hdr_size + data_in_size,
+ .payload_in = pi,
+ };
+
+ return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+}
+
+/*
+ * Support for finding a memory operation attributes
+ * are from the current boot or not.
+ */
+
+struct cxl_mem_err_rec {
+ struct xarray rec_gen_media;
+ struct xarray rec_dram;
+};
+
+enum cxl_mem_repair_type {
+ CXL_PPR,
+ CXL_CACHELINE_SPARING,
+ CXL_ROW_SPARING,
+ CXL_BANK_SPARING,
+ CXL_RANK_SPARING,
+ CXL_REPAIR_MAX,
+};
+
+/**
+ * struct cxl_mem_repair_attrbs - CXL memory repair attributes
+ * @dpa: DPA of memory to repair
+ * @nibble_mask: nibble mask, identifies one or more nibbles on the memory bus
+ * @row: row of memory to repair
+ * @column: column of memory to repair
+ * @channel: channel of memory to repair
+ * @sub_channel: sub channel of memory to repair
+ * @rank: rank of memory to repair
+ * @bank_group: bank group of memory to repair
+ * @bank: bank of memory to repair
+ * @repair_type: repair type. For eg. PPR, memory sparing etc.
+ */
+struct cxl_mem_repair_attrbs {
+ u64 dpa;
+ u32 nibble_mask;
+ u32 row;
+ u16 column;
+ u8 channel;
+ u8 sub_channel;
+ u8 rank;
+ u8 bank_group;
+ u8 bank;
+ enum cxl_mem_repair_type repair_type;
+};
+
+static struct cxl_event_gen_media *
+cxl_find_rec_gen_media(struct cxl_memdev *cxlmd,
+ struct cxl_mem_repair_attrbs *attrbs)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_gen_media *rec;
+
+ if (!array_rec)
+ return NULL;
+
+ rec = xa_load(&array_rec->rec_gen_media, attrbs->dpa);
+ if (!rec)
+ return NULL;
+
+ if (attrbs->repair_type == CXL_PPR)
+ return rec;
+
+ return NULL;
+}
+
+static struct cxl_event_dram *
+cxl_find_rec_dram(struct cxl_memdev *cxlmd,
+ struct cxl_mem_repair_attrbs *attrbs)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_dram *rec;
+ u16 validity_flags;
+
+ if (!array_rec)
+ return NULL;
+
+ rec = xa_load(&array_rec->rec_dram, attrbs->dpa);
+ if (!rec)
+ return NULL;
+
+ validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags);
+ if (!(validity_flags & CXL_DER_VALID_CHANNEL) ||
+ !(validity_flags & CXL_DER_VALID_RANK))
+ return NULL;
+
+ switch (attrbs->repair_type) {
+ case CXL_PPR:
+ if (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) == attrbs->nibble_mask)
+ return rec;
+ break;
+ case CXL_CACHELINE_SPARING:
+ if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
+ !(validity_flags & CXL_DER_VALID_BANK) ||
+ !(validity_flags & CXL_DER_VALID_ROW) ||
+ !(validity_flags & CXL_DER_VALID_COLUMN))
+ return NULL;
+
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ rec->bank_group == attrbs->bank_group &&
+ rec->bank == attrbs->bank &&
+ get_unaligned_le24(rec->row) == attrbs->row &&
+ get_unaligned_le16(rec->column) == attrbs->column &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask) &&
+ (!(validity_flags & CXL_DER_VALID_SUB_CHANNEL) ||
+ rec->sub_channel == attrbs->sub_channel))
+ return rec;
+ break;
+ case CXL_ROW_SPARING:
+ if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
+ !(validity_flags & CXL_DER_VALID_BANK) ||
+ !(validity_flags & CXL_DER_VALID_ROW))
+ return NULL;
+
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ rec->bank_group == attrbs->bank_group &&
+ rec->bank == attrbs->bank &&
+ get_unaligned_le24(rec->row) == attrbs->row &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask))
+ return rec;
+ break;
+ case CXL_BANK_SPARING:
+ if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
+ !(validity_flags & CXL_DER_VALID_BANK))
+ return NULL;
+
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ rec->bank_group == attrbs->bank_group &&
+ rec->bank == attrbs->bank &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask))
+ return rec;
+ break;
+ case CXL_RANK_SPARING:
+ if (rec->media_hdr.channel == attrbs->channel &&
+ rec->media_hdr.rank == attrbs->rank &&
+ (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
+ get_unaligned_le24(rec->nibble_mask) ==
+ attrbs->nibble_mask))
+ return rec;
+ break;
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+#define CXL_MAX_STORAGE_DAYS 10
+#define CXL_MAX_STORAGE_TIME_SECS (CXL_MAX_STORAGE_DAYS * 24 * 60 * 60)
+
+static void cxl_del_expired_gmedia_recs(struct xarray *rec_xarray,
+ struct cxl_event_gen_media *cur_rec)
+{
+ u64 cur_ts = le64_to_cpu(cur_rec->media_hdr.hdr.timestamp);
+ struct cxl_event_gen_media *rec;
+ unsigned long index;
+ u64 delta_ts_secs;
+
+ xa_for_each(rec_xarray, index, rec) {
+ delta_ts_secs = (cur_ts -
+ le64_to_cpu(rec->media_hdr.hdr.timestamp)) / 1000000000ULL;
+ if (delta_ts_secs >= CXL_MAX_STORAGE_TIME_SECS) {
+ xa_erase(rec_xarray, index);
+ kfree(rec);
+ }
+ }
+}
+
+static void cxl_del_expired_dram_recs(struct xarray *rec_xarray,
+ struct cxl_event_dram *cur_rec)
+{
+ u64 cur_ts = le64_to_cpu(cur_rec->media_hdr.hdr.timestamp);
+ struct cxl_event_dram *rec;
+ unsigned long index;
+ u64 delta_secs;
+
+ xa_for_each(rec_xarray, index, rec) {
+ delta_secs = (cur_ts -
+ le64_to_cpu(rec->media_hdr.hdr.timestamp)) / 1000000000ULL;
+ if (delta_secs >= CXL_MAX_STORAGE_TIME_SECS) {
+ xa_erase(rec_xarray, index);
+ kfree(rec);
+ }
+ }
+}
+
+#define CXL_MAX_REC_STORAGE_COUNT 200
+
+static void cxl_del_overflow_old_recs(struct xarray *rec_xarray)
+{
+ void *err_rec;
+ unsigned long index, count = 0;
+
+ xa_for_each(rec_xarray, index, err_rec)
+ count++;
+
+ if (count <= CXL_MAX_REC_STORAGE_COUNT)
+ return;
+
+ count -= CXL_MAX_REC_STORAGE_COUNT;
+ xa_for_each(rec_xarray, index, err_rec) {
+ xa_erase(rec_xarray, index);
+ kfree(err_rec);
+ count--;
+ if (!count)
+ break;
+ }
+}
+
+int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, union cxl_event *evt)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_gen_media *rec;
+ void *old_rec;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
+ return 0;
+
+ rec = kmemdup(&evt->gen_media, sizeof(*rec), GFP_KERNEL);
+ if (!rec)
+ return -ENOMEM;
+
+ old_rec = xa_store(&array_rec->rec_gen_media,
+ le64_to_cpu(rec->media_hdr.phys_addr), rec,
+ GFP_KERNEL);
+ if (xa_is_err(old_rec))
+ return xa_err(old_rec);
+
+ kfree(old_rec);
+
+ cxl_del_expired_gmedia_recs(&array_rec->rec_gen_media, rec);
+ cxl_del_overflow_old_recs(&array_rec->rec_gen_media);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_store_rec_gen_media, "CXL");
+
+int cxl_store_rec_dram(struct cxl_memdev *cxlmd, union cxl_event *evt)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_dram *rec;
+ void *old_rec;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
+ return 0;
+
+ rec = kmemdup(&evt->dram, sizeof(*rec), GFP_KERNEL);
+ if (!rec)
+ return -ENOMEM;
+
+ old_rec = xa_store(&array_rec->rec_dram,
+ le64_to_cpu(rec->media_hdr.phys_addr), rec,
+ GFP_KERNEL);
+ if (xa_is_err(old_rec))
+ return xa_err(old_rec);
+
+ kfree(old_rec);
+
+ cxl_del_expired_dram_recs(&array_rec->rec_dram, rec);
+ cxl_del_overflow_old_recs(&array_rec->rec_dram);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_store_rec_dram, "CXL");
+
+static bool cxl_is_memdev_memory_online(const struct cxl_memdev *cxlmd)
+{
+ struct cxl_port *port = cxlmd->endpoint;
+
+ if (port && cxl_num_decoders_committed(port))
+ return true;
+
+ return false;
+}
+
+/*
+ * CXL memory sparing control
+ */
+enum cxl_mem_sparing_granularity {
+ CXL_MEM_SPARING_CACHELINE,
+ CXL_MEM_SPARING_ROW,
+ CXL_MEM_SPARING_BANK,
+ CXL_MEM_SPARING_RANK,
+ CXL_MEM_SPARING_MAX
+};
+
+struct cxl_mem_sparing_context {
+ struct cxl_memdev *cxlmd;
+ uuid_t repair_uuid;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u16 effects;
+ u8 instance;
+ u8 get_version;
+ u8 set_version;
+ u8 op_class;
+ u8 op_subclass;
+ bool cap_safe_when_in_use;
+ bool cap_hard_sparing;
+ bool cap_soft_sparing;
+ u8 channel;
+ u8 rank;
+ u8 bank_group;
+ u32 nibble_mask;
+ u64 dpa;
+ u32 row;
+ u16 column;
+ u8 bank;
+ u8 sub_channel;
+ enum edac_mem_repair_type repair_type;
+ bool persist_mode;
+};
+
+#define CXL_SPARING_RD_CAP_SAFE_IN_USE_MASK BIT(0)
+#define CXL_SPARING_RD_CAP_HARD_SPARING_MASK BIT(1)
+#define CXL_SPARING_RD_CAP_SOFT_SPARING_MASK BIT(2)
+
+#define CXL_SPARING_WR_DEVICE_INITIATED_MASK BIT(0)
+
+#define CXL_SPARING_QUERY_RESOURCE_FLAG BIT(0)
+#define CXL_SET_HARD_SPARING_FLAG BIT(1)
+#define CXL_SPARING_SUB_CHNL_VALID_FLAG BIT(2)
+#define CXL_SPARING_NIB_MASK_VALID_FLAG BIT(3)
+
+#define CXL_GET_SPARING_SAFE_IN_USE(flags) \
+ (FIELD_GET(CXL_SPARING_RD_CAP_SAFE_IN_USE_MASK, \
+ flags) ^ 1)
+#define CXL_GET_CAP_HARD_SPARING(flags) \
+ FIELD_GET(CXL_SPARING_RD_CAP_HARD_SPARING_MASK, \
+ flags)
+#define CXL_GET_CAP_SOFT_SPARING(flags) \
+ FIELD_GET(CXL_SPARING_RD_CAP_SOFT_SPARING_MASK, \
+ flags)
+
+#define CXL_SET_SPARING_QUERY_RESOURCE(val) \
+ FIELD_PREP(CXL_SPARING_QUERY_RESOURCE_FLAG, val)
+#define CXL_SET_HARD_SPARING(val) \
+ FIELD_PREP(CXL_SET_HARD_SPARING_FLAG, val)
+#define CXL_SET_SPARING_SUB_CHNL_VALID(val) \
+ FIELD_PREP(CXL_SPARING_SUB_CHNL_VALID_FLAG, val)
+#define CXL_SET_SPARING_NIB_MASK_VALID(val) \
+ FIELD_PREP(CXL_SPARING_NIB_MASK_VALID_FLAG, val)
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.7.2.3 Table 8-134 Memory Sparing Feature
+ * Readable Attributes.
+ */
+struct cxl_memdev_repair_rd_attrbs_hdr {
+ u8 max_op_latency;
+ __le16 op_cap;
+ __le16 op_mode;
+ u8 op_class;
+ u8 op_subclass;
+ u8 rsvd[9];
+} __packed;
+
+struct cxl_memdev_sparing_rd_attrbs {
+ struct cxl_memdev_repair_rd_attrbs_hdr hdr;
+ u8 rsvd;
+ __le16 restriction_flags;
+} __packed;
+
+/*
+ * See CXL spec rev 3.2 @8.2.10.7.1.4 Table 8-120 Memory Sparing Input Payload.
+ */
+struct cxl_memdev_sparing_in_payload {
+ u8 flags;
+ u8 channel;
+ u8 rank;
+ u8 nibble_mask[3];
+ u8 bank_group;
+ u8 bank;
+ u8 row[3];
+ __le16 column;
+ u8 sub_channel;
+} __packed;
+
+static int
+cxl_mem_sparing_get_attrbs(struct cxl_mem_sparing_context *cxl_sparing_ctx)
+{
+ size_t rd_data_size = sizeof(struct cxl_memdev_sparing_rd_attrbs);
+ struct cxl_memdev *cxlmd = cxl_sparing_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ u16 restriction_flags;
+ size_t data_size;
+ u16 return_code;
+ struct cxl_memdev_sparing_rd_attrbs *rd_attrbs __free(kfree) =
+ kzalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &cxl_sparing_ctx->repair_uuid,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, &return_code);
+ if (!data_size)
+ return -EIO;
+
+ cxl_sparing_ctx->op_class = rd_attrbs->hdr.op_class;
+ cxl_sparing_ctx->op_subclass = rd_attrbs->hdr.op_subclass;
+ restriction_flags = le16_to_cpu(rd_attrbs->restriction_flags);
+ cxl_sparing_ctx->cap_safe_when_in_use =
+ CXL_GET_SPARING_SAFE_IN_USE(restriction_flags);
+ cxl_sparing_ctx->cap_hard_sparing =
+ CXL_GET_CAP_HARD_SPARING(restriction_flags);
+ cxl_sparing_ctx->cap_soft_sparing =
+ CXL_GET_CAP_SOFT_SPARING(restriction_flags);
+
+ return 0;
+}
+
+static struct cxl_event_dram *
+cxl_mem_get_rec_dram(struct cxl_memdev *cxlmd,
+ struct cxl_mem_sparing_context *ctx)
+{
+ struct cxl_mem_repair_attrbs attrbs = { 0 };
+
+ attrbs.dpa = ctx->dpa;
+ attrbs.channel = ctx->channel;
+ attrbs.rank = ctx->rank;
+ attrbs.nibble_mask = ctx->nibble_mask;
+ switch (ctx->repair_type) {
+ case EDAC_REPAIR_CACHELINE_SPARING:
+ attrbs.repair_type = CXL_CACHELINE_SPARING;
+ attrbs.bank_group = ctx->bank_group;
+ attrbs.bank = ctx->bank;
+ attrbs.row = ctx->row;
+ attrbs.column = ctx->column;
+ attrbs.sub_channel = ctx->sub_channel;
+ break;
+ case EDAC_REPAIR_ROW_SPARING:
+ attrbs.repair_type = CXL_ROW_SPARING;
+ attrbs.bank_group = ctx->bank_group;
+ attrbs.bank = ctx->bank;
+ attrbs.row = ctx->row;
+ break;
+ case EDAC_REPAIR_BANK_SPARING:
+ attrbs.repair_type = CXL_BANK_SPARING;
+ attrbs.bank_group = ctx->bank_group;
+ attrbs.bank = ctx->bank;
+ break;
+ case EDAC_REPAIR_RANK_SPARING:
+ attrbs.repair_type = CXL_BANK_SPARING;
+ break;
+ default:
+ return NULL;
+ }
+
+ return cxl_find_rec_dram(cxlmd, &attrbs);
+}
+
+static int
+cxl_mem_perform_sparing(struct device *dev,
+ struct cxl_mem_sparing_context *cxl_sparing_ctx)
+{
+ struct cxl_memdev *cxlmd = cxl_sparing_ctx->cxlmd;
+ struct cxl_memdev_sparing_in_payload sparing_pi;
+ struct cxl_event_dram *rec = NULL;
+ u16 validity_flags = 0;
+
+ struct rw_semaphore *region_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_region_rwsem);
+ if (!region_lock)
+ return -EINTR;
+
+ struct rw_semaphore *dpa_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_dpa_rwsem);
+ if (!dpa_lock)
+ return -EINTR;
+
+ if (!cxl_sparing_ctx->cap_safe_when_in_use) {
+ /* Memory to repair must be offline */
+ if (cxl_is_memdev_memory_online(cxlmd))
+ return -EBUSY;
+ } else {
+ if (cxl_is_memdev_memory_online(cxlmd)) {
+ rec = cxl_mem_get_rec_dram(cxlmd, cxl_sparing_ctx);
+ if (!rec)
+ return -EINVAL;
+
+ if (!get_unaligned_le16(rec->media_hdr.validity_flags))
+ return -EINVAL;
+ }
+ }
+
+ memset(&sparing_pi, 0, sizeof(sparing_pi));
+ sparing_pi.flags = CXL_SET_SPARING_QUERY_RESOURCE(0);
+ if (cxl_sparing_ctx->persist_mode)
+ sparing_pi.flags |= CXL_SET_HARD_SPARING(1);
+
+ if (rec)
+ validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags);
+
+ switch (cxl_sparing_ctx->repair_type) {
+ case EDAC_REPAIR_CACHELINE_SPARING:
+ sparing_pi.column = cpu_to_le16(cxl_sparing_ctx->column);
+ if (!rec || (validity_flags & CXL_DER_VALID_SUB_CHANNEL)) {
+ sparing_pi.flags |= CXL_SET_SPARING_SUB_CHNL_VALID(1);
+ sparing_pi.sub_channel = cxl_sparing_ctx->sub_channel;
+ }
+ fallthrough;
+ case EDAC_REPAIR_ROW_SPARING:
+ put_unaligned_le24(cxl_sparing_ctx->row, sparing_pi.row);
+ fallthrough;
+ case EDAC_REPAIR_BANK_SPARING:
+ sparing_pi.bank_group = cxl_sparing_ctx->bank_group;
+ sparing_pi.bank = cxl_sparing_ctx->bank;
+ fallthrough;
+ case EDAC_REPAIR_RANK_SPARING:
+ sparing_pi.rank = cxl_sparing_ctx->rank;
+ fallthrough;
+ default:
+ sparing_pi.channel = cxl_sparing_ctx->channel;
+ if ((rec && (validity_flags & CXL_DER_VALID_NIBBLE)) ||
+ (!rec && (!cxl_sparing_ctx->nibble_mask ||
+ (cxl_sparing_ctx->nibble_mask & 0xFFFFFF)))) {
+ sparing_pi.flags |= CXL_SET_SPARING_NIB_MASK_VALID(1);
+ put_unaligned_le24(cxl_sparing_ctx->nibble_mask,
+ sparing_pi.nibble_mask);
+ }
+ break;
+ }
+
+ return cxl_perform_maintenance(&cxlmd->cxlds->cxl_mbox,
+ cxl_sparing_ctx->op_class,
+ cxl_sparing_ctx->op_subclass,
+ &sparing_pi, sizeof(sparing_pi));
+}
+
+static int cxl_mem_sparing_get_repair_type(struct device *dev, void *drv_data,
+ const char **repair_type)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ switch (ctx->repair_type) {
+ case EDAC_REPAIR_CACHELINE_SPARING:
+ case EDAC_REPAIR_ROW_SPARING:
+ case EDAC_REPAIR_BANK_SPARING:
+ case EDAC_REPAIR_RANK_SPARING:
+ *repair_type = edac_repair_type[ctx->repair_type];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define CXL_SPARING_GET_ATTR(attrb, data_type) \
+ static int cxl_mem_sparing_get_##attrb( \
+ struct device *dev, void *drv_data, data_type *val) \
+ { \
+ struct cxl_mem_sparing_context *ctx = drv_data; \
+ \
+ *val = ctx->attrb; \
+ \
+ return 0; \
+ }
+CXL_SPARING_GET_ATTR(persist_mode, bool)
+CXL_SPARING_GET_ATTR(dpa, u64)
+CXL_SPARING_GET_ATTR(nibble_mask, u32)
+CXL_SPARING_GET_ATTR(bank_group, u32)
+CXL_SPARING_GET_ATTR(bank, u32)
+CXL_SPARING_GET_ATTR(rank, u32)
+CXL_SPARING_GET_ATTR(row, u32)
+CXL_SPARING_GET_ATTR(column, u32)
+CXL_SPARING_GET_ATTR(channel, u32)
+CXL_SPARING_GET_ATTR(sub_channel, u32)
+
+#define CXL_SPARING_SET_ATTR(attrb, data_type) \
+ static int cxl_mem_sparing_set_##attrb(struct device *dev, \
+ void *drv_data, data_type val) \
+ { \
+ struct cxl_mem_sparing_context *ctx = drv_data; \
+ \
+ ctx->attrb = val; \
+ \
+ return 0; \
+ }
+CXL_SPARING_SET_ATTR(nibble_mask, u32)
+CXL_SPARING_SET_ATTR(bank_group, u32)
+CXL_SPARING_SET_ATTR(bank, u32)
+CXL_SPARING_SET_ATTR(rank, u32)
+CXL_SPARING_SET_ATTR(row, u32)
+CXL_SPARING_SET_ATTR(column, u32)
+CXL_SPARING_SET_ATTR(channel, u32)
+CXL_SPARING_SET_ATTR(sub_channel, u32)
+
+static int cxl_mem_sparing_set_persist_mode(struct device *dev, void *drv_data,
+ bool persist_mode)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ if ((persist_mode && ctx->cap_hard_sparing) ||
+ (!persist_mode && ctx->cap_soft_sparing))
+ ctx->persist_mode = persist_mode;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int cxl_get_mem_sparing_safe_when_in_use(struct device *dev,
+ void *drv_data, bool *safe)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ *safe = ctx->cap_safe_when_in_use;
+
+ return 0;
+}
+
+static int cxl_mem_sparing_get_min_dpa(struct device *dev, void *drv_data,
+ u64 *min_dpa)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+ struct cxl_memdev *cxlmd = ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *min_dpa = cxlds->dpa_res.start;
+
+ return 0;
+}
+
+static int cxl_mem_sparing_get_max_dpa(struct device *dev, void *drv_data,
+ u64 *max_dpa)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+ struct cxl_memdev *cxlmd = ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *max_dpa = cxlds->dpa_res.end;
+
+ return 0;
+}
+
+static int cxl_mem_sparing_set_dpa(struct device *dev, void *drv_data, u64 dpa)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+ struct cxl_memdev *cxlmd = ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end)
+ return -EINVAL;
+
+ ctx->dpa = dpa;
+
+ return 0;
+}
+
+static int cxl_do_mem_sparing(struct device *dev, void *drv_data, u32 val)
+{
+ struct cxl_mem_sparing_context *ctx = drv_data;
+
+ if (val != EDAC_DO_MEM_REPAIR)
+ return -EINVAL;
+
+ return cxl_mem_perform_sparing(dev, ctx);
+}
+
+#define RANK_OPS \
+ .get_repair_type = cxl_mem_sparing_get_repair_type, \
+ .get_persist_mode = cxl_mem_sparing_get_persist_mode, \
+ .set_persist_mode = cxl_mem_sparing_set_persist_mode, \
+ .get_repair_safe_when_in_use = cxl_get_mem_sparing_safe_when_in_use, \
+ .get_min_dpa = cxl_mem_sparing_get_min_dpa, \
+ .get_max_dpa = cxl_mem_sparing_get_max_dpa, \
+ .get_dpa = cxl_mem_sparing_get_dpa, \
+ .set_dpa = cxl_mem_sparing_set_dpa, \
+ .get_nibble_mask = cxl_mem_sparing_get_nibble_mask, \
+ .set_nibble_mask = cxl_mem_sparing_set_nibble_mask, \
+ .get_rank = cxl_mem_sparing_get_rank, \
+ .set_rank = cxl_mem_sparing_set_rank, \
+ .get_channel = cxl_mem_sparing_get_channel, \
+ .set_channel = cxl_mem_sparing_set_channel, \
+ .do_repair = cxl_do_mem_sparing
+
+#define BANK_OPS \
+ RANK_OPS, .get_bank_group = cxl_mem_sparing_get_bank_group, \
+ .set_bank_group = cxl_mem_sparing_set_bank_group, \
+ .get_bank = cxl_mem_sparing_get_bank, \
+ .set_bank = cxl_mem_sparing_set_bank
+
+#define ROW_OPS \
+ BANK_OPS, .get_row = cxl_mem_sparing_get_row, \
+ .set_row = cxl_mem_sparing_set_row
+
+#define CACHELINE_OPS \
+ ROW_OPS, .get_column = cxl_mem_sparing_get_column, \
+ .set_column = cxl_mem_sparing_set_column, \
+ .get_sub_channel = cxl_mem_sparing_get_sub_channel, \
+ .set_sub_channel = cxl_mem_sparing_set_sub_channel
+
+static const struct edac_mem_repair_ops cxl_rank_sparing_ops = {
+ RANK_OPS,
+};
+
+static const struct edac_mem_repair_ops cxl_bank_sparing_ops = {
+ BANK_OPS,
+};
+
+static const struct edac_mem_repair_ops cxl_row_sparing_ops = {
+ ROW_OPS,
+};
+
+static const struct edac_mem_repair_ops cxl_cacheline_sparing_ops = {
+ CACHELINE_OPS,
+};
+
+struct cxl_mem_sparing_desc {
+ const uuid_t repair_uuid;
+ enum edac_mem_repair_type repair_type;
+ const struct edac_mem_repair_ops *repair_ops;
+};
+
+static const struct cxl_mem_sparing_desc mem_sparing_desc[] = {
+ {
+ .repair_uuid = CXL_FEAT_CACHELINE_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_CACHELINE_SPARING,
+ .repair_ops = &cxl_cacheline_sparing_ops,
+ },
+ {
+ .repair_uuid = CXL_FEAT_ROW_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_ROW_SPARING,
+ .repair_ops = &cxl_row_sparing_ops,
+ },
+ {
+ .repair_uuid = CXL_FEAT_BANK_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_BANK_SPARING,
+ .repair_ops = &cxl_bank_sparing_ops,
+ },
+ {
+ .repair_uuid = CXL_FEAT_RANK_SPARING_UUID,
+ .repair_type = EDAC_REPAIR_RANK_SPARING,
+ .repair_ops = &cxl_rank_sparing_ops,
+ },
+};
+
+static int cxl_memdev_sparing_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature,
+ const struct cxl_mem_sparing_desc *desc,
+ u8 repair_inst)
+{
+ struct cxl_mem_sparing_context *cxl_sparing_ctx;
+ struct cxl_feat_entry *feat_entry;
+ int ret;
+
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &desc->repair_uuid);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ cxl_sparing_ctx = devm_kzalloc(&cxlmd->dev, sizeof(*cxl_sparing_ctx),
+ GFP_KERNEL);
+ if (!cxl_sparing_ctx)
+ return -ENOMEM;
+
+ *cxl_sparing_ctx = (struct cxl_mem_sparing_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .cxlmd = cxlmd,
+ .repair_type = desc->repair_type,
+ .instance = repair_inst++,
+ };
+ uuid_copy(&cxl_sparing_ctx->repair_uuid, &desc->repair_uuid);
+
+ ret = cxl_mem_sparing_get_attrbs(cxl_sparing_ctx);
+ if (ret)
+ return ret;
+
+ if ((cxl_sparing_ctx->cap_soft_sparing &&
+ cxl_sparing_ctx->cap_hard_sparing) ||
+ cxl_sparing_ctx->cap_soft_sparing)
+ cxl_sparing_ctx->persist_mode = 0;
+ else if (cxl_sparing_ctx->cap_hard_sparing)
+ cxl_sparing_ctx->persist_mode = 1;
+ else
+ return -EOPNOTSUPP;
+
+ ras_feature->ft_type = RAS_FEAT_MEM_REPAIR;
+ ras_feature->instance = cxl_sparing_ctx->instance;
+ ras_feature->mem_repair_ops = desc->repair_ops;
+ ras_feature->ctx = cxl_sparing_ctx;
+
+ return 0;
+}
+
+/*
+ * CXL memory soft PPR & hard PPR control
+ */
+struct cxl_ppr_context {
+ uuid_t repair_uuid;
+ u8 instance;
+ u16 get_feat_size;
+ u16 set_feat_size;
+ u8 get_version;
+ u8 set_version;
+ u16 effects;
+ u8 op_class;
+ u8 op_subclass;
+ bool cap_dpa;
+ bool cap_nib_mask;
+ bool media_accessible;
+ bool data_retained;
+ struct cxl_memdev *cxlmd;
+ enum edac_mem_repair_type repair_type;
+ bool persist_mode;
+ u64 dpa;
+ u32 nibble_mask;
+};
+
+/*
+ * See CXL rev 3.2 @8.2.10.7.2.1 Table 8-128 sPPR Feature Readable Attributes
+ *
+ * See CXL rev 3.2 @8.2.10.7.2.2 Table 8-131 hPPR Feature Readable Attributes
+ */
+
+#define CXL_PPR_OP_CAP_DEVICE_INITIATED BIT(0)
+#define CXL_PPR_OP_MODE_DEV_INITIATED BIT(0)
+
+#define CXL_PPR_FLAG_DPA_SUPPORT_MASK BIT(0)
+#define CXL_PPR_FLAG_NIB_SUPPORT_MASK BIT(1)
+#define CXL_PPR_FLAG_MEM_SPARING_EV_REC_SUPPORT_MASK BIT(2)
+#define CXL_PPR_FLAG_DEV_INITED_PPR_AT_BOOT_CAP_MASK BIT(3)
+
+#define CXL_PPR_RESTRICTION_FLAG_MEDIA_ACCESSIBLE_MASK BIT(0)
+#define CXL_PPR_RESTRICTION_FLAG_DATA_RETAINED_MASK BIT(2)
+
+#define CXL_PPR_SPARING_EV_REC_EN_MASK BIT(0)
+#define CXL_PPR_DEV_INITED_PPR_AT_BOOT_EN_MASK BIT(1)
+
+#define CXL_PPR_GET_CAP_DPA(flags) \
+ FIELD_GET(CXL_PPR_FLAG_DPA_SUPPORT_MASK, flags)
+#define CXL_PPR_GET_CAP_NIB_MASK(flags) \
+ FIELD_GET(CXL_PPR_FLAG_NIB_SUPPORT_MASK, flags)
+#define CXL_PPR_GET_MEDIA_ACCESSIBLE(restriction_flags) \
+ (FIELD_GET(CXL_PPR_RESTRICTION_FLAG_MEDIA_ACCESSIBLE_MASK, \
+ restriction_flags) ^ 1)
+#define CXL_PPR_GET_DATA_RETAINED(restriction_flags) \
+ (FIELD_GET(CXL_PPR_RESTRICTION_FLAG_DATA_RETAINED_MASK, \
+ restriction_flags) ^ 1)
+
+struct cxl_memdev_ppr_rd_attrbs {
+ struct cxl_memdev_repair_rd_attrbs_hdr hdr;
+ u8 ppr_flags;
+ __le16 restriction_flags;
+ u8 ppr_op_mode;
+} __packed;
+
+/*
+ * See CXL rev 3.2 @8.2.10.7.1.2 Table 8-118 sPPR Maintenance Input Payload
+ *
+ * See CXL rev 3.2 @8.2.10.7.1.3 Table 8-119 hPPR Maintenance Input Payload
+ */
+struct cxl_memdev_ppr_maintenance_attrbs {
+ u8 flags;
+ __le64 dpa;
+ u8 nibble_mask[3];
+} __packed;
+
+static int cxl_mem_ppr_get_attrbs(struct cxl_ppr_context *cxl_ppr_ctx)
+{
+ size_t rd_data_size = sizeof(struct cxl_memdev_ppr_rd_attrbs);
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
+ u16 restriction_flags;
+ size_t data_size;
+ u16 return_code;
+
+ struct cxl_memdev_ppr_rd_attrbs *rd_attrbs __free(kfree) =
+ kmalloc(rd_data_size, GFP_KERNEL);
+ if (!rd_attrbs)
+ return -ENOMEM;
+
+ data_size = cxl_get_feature(cxl_mbox, &cxl_ppr_ctx->repair_uuid,
+ CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
+ rd_data_size, 0, &return_code);
+ if (!data_size)
+ return -EIO;
+
+ cxl_ppr_ctx->op_class = rd_attrbs->hdr.op_class;
+ cxl_ppr_ctx->op_subclass = rd_attrbs->hdr.op_subclass;
+ cxl_ppr_ctx->cap_dpa = CXL_PPR_GET_CAP_DPA(rd_attrbs->ppr_flags);
+ cxl_ppr_ctx->cap_nib_mask =
+ CXL_PPR_GET_CAP_NIB_MASK(rd_attrbs->ppr_flags);
+
+ restriction_flags = le16_to_cpu(rd_attrbs->restriction_flags);
+ cxl_ppr_ctx->media_accessible =
+ CXL_PPR_GET_MEDIA_ACCESSIBLE(restriction_flags);
+ cxl_ppr_ctx->data_retained =
+ CXL_PPR_GET_DATA_RETAINED(restriction_flags);
+
+ return 0;
+}
+
+static int cxl_mem_perform_ppr(struct cxl_ppr_context *cxl_ppr_ctx)
+{
+ struct cxl_memdev_ppr_maintenance_attrbs maintenance_attrbs;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_mem_repair_attrbs attrbs = { 0 };
+
+ struct rw_semaphore *region_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_region_rwsem);
+ if (!region_lock)
+ return -EINTR;
+
+ struct rw_semaphore *dpa_lock __free(rwsem_read_release) =
+ rwsem_read_intr_acquire(&cxl_dpa_rwsem);
+ if (!dpa_lock)
+ return -EINTR;
+
+ if (!cxl_ppr_ctx->media_accessible || !cxl_ppr_ctx->data_retained) {
+ /* Memory to repair must be offline */
+ if (cxl_is_memdev_memory_online(cxlmd))
+ return -EBUSY;
+ } else {
+ if (cxl_is_memdev_memory_online(cxlmd)) {
+ /* Check memory to repair is from the current boot */
+ attrbs.repair_type = CXL_PPR;
+ attrbs.dpa = cxl_ppr_ctx->dpa;
+ attrbs.nibble_mask = cxl_ppr_ctx->nibble_mask;
+ if (!cxl_find_rec_dram(cxlmd, &attrbs) &&
+ !cxl_find_rec_gen_media(cxlmd, &attrbs))
+ return -EINVAL;
+ }
+ }
+
+ memset(&maintenance_attrbs, 0, sizeof(maintenance_attrbs));
+ maintenance_attrbs.flags = 0;
+ maintenance_attrbs.dpa = cpu_to_le64(cxl_ppr_ctx->dpa);
+ put_unaligned_le24(cxl_ppr_ctx->nibble_mask,
+ maintenance_attrbs.nibble_mask);
+
+ return cxl_perform_maintenance(&cxlmd->cxlds->cxl_mbox,
+ cxl_ppr_ctx->op_class,
+ cxl_ppr_ctx->op_subclass,
+ &maintenance_attrbs,
+ sizeof(maintenance_attrbs));
+}
+
+static int cxl_ppr_get_repair_type(struct device *dev, void *drv_data,
+ const char **repair_type)
+{
+ *repair_type = edac_repair_type[EDAC_REPAIR_PPR];
+
+ return 0;
+}
+
+static int cxl_ppr_get_persist_mode(struct device *dev, void *drv_data,
+ bool *persist_mode)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *persist_mode = cxl_ppr_ctx->persist_mode;
+
+ return 0;
+}
+
+static int cxl_get_ppr_safe_when_in_use(struct device *dev, void *drv_data,
+ bool *safe)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *safe = cxl_ppr_ctx->media_accessible & cxl_ppr_ctx->data_retained;
+
+ return 0;
+}
+
+static int cxl_ppr_get_min_dpa(struct device *dev, void *drv_data, u64 *min_dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *min_dpa = cxlds->dpa_res.start;
+
+ return 0;
+}
+
+static int cxl_ppr_get_max_dpa(struct device *dev, void *drv_data, u64 *max_dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ *max_dpa = cxlds->dpa_res.end;
+
+ return 0;
+}
+
+static int cxl_ppr_get_dpa(struct device *dev, void *drv_data, u64 *dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *dpa = cxl_ppr_ctx->dpa;
+
+ return 0;
+}
+
+static int cxl_ppr_set_dpa(struct device *dev, void *drv_data, u64 dpa)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end)
+ return -EINVAL;
+
+ cxl_ppr_ctx->dpa = dpa;
+
+ return 0;
+}
+
+static int cxl_ppr_get_nibble_mask(struct device *dev, void *drv_data,
+ u32 *nibble_mask)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ *nibble_mask = cxl_ppr_ctx->nibble_mask;
+
+ return 0;
+}
+
+static int cxl_ppr_set_nibble_mask(struct device *dev, void *drv_data,
+ u32 nibble_mask)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ cxl_ppr_ctx->nibble_mask = nibble_mask;
+
+ return 0;
+}
+
+static int cxl_do_ppr(struct device *dev, void *drv_data, u32 val)
+{
+ struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+
+ if (!cxl_ppr_ctx->dpa || val != EDAC_DO_MEM_REPAIR)
+ return -EINVAL;
+
+ return cxl_mem_perform_ppr(cxl_ppr_ctx);
+}
+
+static const struct edac_mem_repair_ops cxl_sppr_ops = {
+ .get_repair_type = cxl_ppr_get_repair_type,
+ .get_persist_mode = cxl_ppr_get_persist_mode,
+ .get_repair_safe_when_in_use = cxl_get_ppr_safe_when_in_use,
+ .get_min_dpa = cxl_ppr_get_min_dpa,
+ .get_max_dpa = cxl_ppr_get_max_dpa,
+ .get_dpa = cxl_ppr_get_dpa,
+ .set_dpa = cxl_ppr_set_dpa,
+ .get_nibble_mask = cxl_ppr_get_nibble_mask,
+ .set_nibble_mask = cxl_ppr_set_nibble_mask,
+ .do_repair = cxl_do_ppr,
+};
+
+static int cxl_memdev_soft_ppr_init(struct cxl_memdev *cxlmd,
+ struct edac_dev_feature *ras_feature,
+ u8 repair_inst)
+{
+ struct cxl_ppr_context *cxl_sppr_ctx;
+ struct cxl_feat_entry *feat_entry;
+ int ret;
+
+ feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
+ &CXL_FEAT_SPPR_UUID);
+ if (IS_ERR(feat_entry))
+ return -EOPNOTSUPP;
+
+ if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
+ return -EOPNOTSUPP;
+
+ cxl_sppr_ctx =
+ devm_kzalloc(&cxlmd->dev, sizeof(*cxl_sppr_ctx), GFP_KERNEL);
+ if (!cxl_sppr_ctx)
+ return -ENOMEM;
+
+ *cxl_sppr_ctx = (struct cxl_ppr_context){
+ .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
+ .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
+ .get_version = feat_entry->get_feat_ver,
+ .set_version = feat_entry->set_feat_ver,
+ .effects = le16_to_cpu(feat_entry->effects),
+ .cxlmd = cxlmd,
+ .repair_type = EDAC_REPAIR_PPR,
+ .persist_mode = 0,
+ .instance = repair_inst,
+ };
+ uuid_copy(&cxl_sppr_ctx->repair_uuid, &CXL_FEAT_SPPR_UUID);
+
+ ret = cxl_mem_ppr_get_attrbs(cxl_sppr_ctx);
+ if (ret)
+ return ret;
+
+ ras_feature->ft_type = RAS_FEAT_MEM_REPAIR;
+ ras_feature->instance = cxl_sppr_ctx->instance;
+ ras_feature->mem_repair_ops = &cxl_sppr_ops;
+ ras_feature->ctx = cxl_sppr_ctx;
+
+ return 0;
+}
+
+int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd)
+{
+ struct edac_dev_feature ras_features[CXL_NR_EDAC_DEV_FEATURES];
+ int num_ras_features = 0;
+ u8 repair_inst = 0;
+ int rc;
+
+ if (IS_ENABLED(CONFIG_CXL_EDAC_SCRUB)) {
+ rc = cxl_memdev_scrub_init(cxlmd, &ras_features[num_ras_features], 0);
+ if (rc < 0 && rc != -EOPNOTSUPP)
+ return rc;
+
+ if (rc != -EOPNOTSUPP)
+ num_ras_features++;
+ }
+
+ if (IS_ENABLED(CONFIG_CXL_EDAC_ECS)) {
+ rc = cxl_memdev_ecs_init(cxlmd, &ras_features[num_ras_features]);
+ if (rc < 0 && rc != -EOPNOTSUPP)
+ return rc;
+
+ if (rc != -EOPNOTSUPP)
+ num_ras_features++;
+ }
+
+ if (IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR)) {
+ for (int i = 0; i < CXL_MEM_SPARING_MAX; i++) {
+ rc = cxl_memdev_sparing_init(cxlmd,
+ &ras_features[num_ras_features],
+ &mem_sparing_desc[i], repair_inst);
+ if (rc == -EOPNOTSUPP)
+ continue;
+ if (rc < 0)
+ return rc;
+
+ repair_inst++;
+ num_ras_features++;
+ }
+
+ rc = cxl_memdev_soft_ppr_init(cxlmd, &ras_features[num_ras_features],
+ repair_inst);
+ if (rc < 0 && rc != -EOPNOTSUPP)
+ return rc;
+
+ if (rc != -EOPNOTSUPP) {
+ repair_inst++;
+ num_ras_features++;
+ }
+
+ if (repair_inst) {
+ struct cxl_mem_err_rec *array_rec =
+ devm_kzalloc(&cxlmd->dev, sizeof(*array_rec),
+ GFP_KERNEL);
+ if (!array_rec)
+ return -ENOMEM;
+
+ xa_init(&array_rec->rec_gen_media);
+ xa_init(&array_rec->rec_dram);
+ cxlmd->err_rec_array = array_rec;
+ }
+ }
+
+ if (!num_ras_features)
+ return -EINVAL;
+
+ char *cxl_dev_name __free(kfree) =
+ kasprintf(GFP_KERNEL, "cxl_%s", dev_name(&cxlmd->dev));
+ if (!cxl_dev_name)
+ return -ENOMEM;
+
+ return edac_dev_register(&cxlmd->dev, cxl_dev_name, NULL,
+ num_ras_features, ras_features);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_memdev_edac_register, "CXL");
+
+int devm_cxl_region_edac_register(struct cxl_region *cxlr)
+{
+ struct edac_dev_feature ras_features[CXL_NR_EDAC_DEV_FEATURES];
+ int num_ras_features = 0;
+ int rc;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_SCRUB))
+ return 0;
+
+ rc = cxl_region_scrub_init(cxlr, &ras_features[num_ras_features], 0);
+ if (rc < 0)
+ return rc;
+
+ num_ras_features++;
+
+ char *cxl_dev_name __free(kfree) =
+ kasprintf(GFP_KERNEL, "cxl_%s", dev_name(&cxlr->dev));
+ if (!cxl_dev_name)
+ return -ENOMEM;
+
+ return edac_dev_register(&cxlr->dev, cxl_dev_name, NULL,
+ num_ras_features, ras_features);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_region_edac_register, "CXL");
+
+void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd)
+{
+ struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
+ struct cxl_event_gen_media *rec_gen_media;
+ struct cxl_event_dram *rec_dram;
+ unsigned long index;
+
+ if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
+ return;
+
+ xa_for_each(&array_rec->rec_dram, index, rec_dram)
+ kfree(rec_dram);
+ xa_destroy(&array_rec->rec_dram);
+
+ xa_for_each(&array_rec->rec_gen_media, index, rec_gen_media)
+ kfree(rec_gen_media);
+ xa_destroy(&array_rec->rec_gen_media);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_memdev_edac_release, "CXL");
diff --git a/drivers/cxl/core/features.c b/drivers/cxl/core/features.c
index 1498e2369c37..6f2eae1eb126 100644
--- a/drivers/cxl/core/features.c
+++ b/drivers/cxl/core/features.c
@@ -9,6 +9,16 @@
#include "core.h"
#include "cxlmem.h"
+/**
+ * DOC: cxl features
+ *
+ * CXL Features:
+ * A CXL device that includes a mailbox supports commands that allows
+ * listing, getting, and setting of optionally defined features such
+ * as memory sparing or post package sparing. Vendors may define custom
+ * features for the device.
+ */
+
/* All the features below are exclusive to the kernel */
static const uuid_t cxl_exclusive_feats[] = {
CXL_FEAT_PATROL_SCRUB_UUID,
@@ -36,7 +46,7 @@ static bool is_cxl_feature_exclusive(struct cxl_feat_entry *entry)
return is_cxl_feature_exclusive_by_uuid(&entry->uuid);
}
-inline struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
+struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
{
return cxlds->cxlfs;
}
@@ -355,17 +365,11 @@ static void cxlctl_close_uctx(struct fwctl_uctx *uctx)
{
}
-static struct cxl_feat_entry *
-get_support_feature_info(struct cxl_features_state *cxlfs,
- const struct fwctl_rpc_cxl *rpc_in)
+struct cxl_feat_entry *
+cxl_feature_info(struct cxl_features_state *cxlfs,
+ const uuid_t *uuid)
{
struct cxl_feat_entry *feat;
- const uuid_t *uuid;
-
- if (rpc_in->op_size < sizeof(uuid))
- return ERR_PTR(-EINVAL);
-
- uuid = &rpc_in->set_feat_in.uuid;
for (int i = 0; i < cxlfs->entries->num_features; i++) {
feat = &cxlfs->entries->ent[i];
@@ -416,14 +420,6 @@ static void *cxlctl_get_supported_features(struct cxl_features_state *cxlfs,
rpc_out->size = struct_size(feat_out, ents, requested);
feat_out = &rpc_out->get_sup_feats_out;
- if (requested == 0) {
- feat_out->num_entries = cpu_to_le16(requested);
- feat_out->supported_feats =
- cpu_to_le16(cxlfs->entries->num_features);
- rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
- *out_len = out_size;
- return no_free_ptr(rpc_out);
- }
for (i = start, pos = &feat_out->ents[0];
i < cxlfs->entries->num_features; i++, pos++) {
@@ -547,7 +543,10 @@ static bool cxlctl_validate_set_features(struct cxl_features_state *cxlfs,
struct cxl_feat_entry *feat;
u32 flags;
- feat = get_support_feature_info(cxlfs, rpc_in);
+ if (rpc_in->op_size < sizeof(uuid_t))
+ return ERR_PTR(-EINVAL);
+
+ feat = cxl_feature_info(cxlfs, &rpc_in->set_feat_in.uuid);
if (IS_ERR(feat))
return false;
@@ -614,11 +613,7 @@ static bool cxlctl_validate_hw_command(struct cxl_features_state *cxlfs,
switch (opcode) {
case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
case CXL_MBOX_OP_GET_FEATURE:
- if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
- return false;
- if (scope >= FWCTL_RPC_CONFIGURATION)
- return true;
- return false;
+ return cxl_mbox->feat_cap >= CXL_FEATURES_RO;
case CXL_MBOX_OP_SET_FEATURE:
if (cxl_mbox->feat_cap < CXL_FEATURES_RW)
return false;
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 70cae4ebf8a4..ab1007495f6b 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -34,7 +34,8 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
if (rc)
return rc;
- dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
+ dev_dbg(port->uport_dev, "%s added to %s\n",
+ dev_name(&cxld->dev), dev_name(&port->dev));
return 0;
}
@@ -603,7 +604,7 @@ int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
return 0;
}
-static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
@@ -666,15 +667,15 @@ static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long lon
skip = res->start - skip_start;
if (size > avail) {
- dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
- res->name, &avail);
+ dev_dbg(dev, "%llu exceeds available %s capacity: %llu\n", size,
+ res->name, (u64)avail);
return -ENOSPC;
}
return __cxl_dpa_reserve(cxled, start, size, skip);
}
-int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
{
struct cxl_port *port = cxled_to_port(cxled);
int rc;
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index d72764056ce6..2689e6453c5a 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -922,12 +922,19 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
hpa_alias = hpa - cache_size;
}
- if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
+ if (event_type == CXL_CPER_EVENT_GEN_MEDIA) {
+ if (cxl_store_rec_gen_media((struct cxl_memdev *)cxlmd, evt))
+ dev_dbg(&cxlmd->dev, "CXL store rec_gen_media failed\n");
+
trace_cxl_general_media(cxlmd, type, cxlr, hpa,
hpa_alias, &evt->gen_media);
- else if (event_type == CXL_CPER_EVENT_DRAM)
+ } else if (event_type == CXL_CPER_EVENT_DRAM) {
+ if (cxl_store_rec_dram((struct cxl_memdev *)cxlmd, evt))
+ dev_dbg(&cxlmd->dev, "CXL store rec_dram failed\n");
+
trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias,
&evt->dram);
+ }
}
}
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index a16a5886d40a..f88a13adf7fa 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -27,6 +27,7 @@ static void cxl_memdev_release(struct device *dev)
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
ida_free(&cxl_memdev_ida, cxlmd->id);
+ devm_cxl_memdev_edac_release(cxlmd);
kfree(cxlmd);
}
@@ -153,8 +154,8 @@ static ssize_t security_state_show(struct device *dev,
return sysfs_emit(buf, "frozen\n");
if (state & CXL_PMEM_SEC_STATE_LOCKED)
return sysfs_emit(buf, "locked\n");
- else
- return sysfs_emit(buf, "unlocked\n");
+
+ return sysfs_emit(buf, "unlocked\n");
}
static struct device_attribute dev_attr_security_state =
__ATTR(state, 0444, security_state_show, NULL);
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 3b80e9a76ba8..b50551601c2e 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -415,17 +415,20 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
*/
if (global_ctrl & CXL_HDM_DECODER_ENABLE || (!hdm && info->mem_enabled))
return devm_cxl_enable_mem(&port->dev, cxlds);
- else if (!hdm)
- return -ENODEV;
- root = to_cxl_port(port->dev.parent);
- while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
- root = to_cxl_port(root->dev.parent);
- if (!is_cxl_root(root)) {
- dev_err(dev, "Failed to acquire root port for HDM enable\n");
+ /*
+ * If the HDM Decoder Capability does not exist and DVSEC was
+ * not setup, the DVSEC based emulation cannot be used.
+ */
+ if (!hdm)
return -ENODEV;
- }
+ /* The HDM Decoder Capability exists but is globally disabled. */
+
+ /*
+ * If the DVSEC CXL Range registers are not enabled, just
+ * enable and use the HDM Decoder Capability registers.
+ */
if (!info->mem_enabled) {
rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
if (rc)
@@ -434,6 +437,26 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
return devm_cxl_enable_mem(&port->dev, cxlds);
}
+ /*
+ * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
+ * [High,Low] when HDM operation is enabled the range register values
+ * are ignored by the device, but the spec also recommends matching the
+ * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
+ * are expected even though Linux does not require or maintain that
+ * match. Check if at least one DVSEC range is enabled and allowed by
+ * the platform. That is, the DVSEC range must be covered by a locked
+ * platform window (CFMWS). Fail otherwise as the endpoint's decoders
+ * cannot be used.
+ */
+
+ root = to_cxl_port(port->dev.parent);
+ while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
+ root = to_cxl_port(root->dev.parent);
+ if (!is_cxl_root(root)) {
+ dev_err(dev, "Failed to acquire root port for HDM enable\n");
+ return -ENODEV;
+ }
+
for (i = 0, allowed = 0; i < info->ranges; i++) {
struct device *cxld_dev;
@@ -453,15 +476,6 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
return -ENXIO;
}
- /*
- * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
- * [High,Low] when HDM operation is enabled the range register values
- * are ignored by the device, but the spec also recommends matching the
- * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
- * are expected even though Linux does not require or maintain that
- * match. If at least one DVSEC range is enabled and allowed, skip HDM
- * Decoder Capability Enable.
- */
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, "CXL");
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 726bd4a7de27..eb46c6764d20 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -602,17 +602,19 @@ struct cxl_port *to_cxl_port(const struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(to_cxl_port, "CXL");
+struct cxl_port *parent_port_of(struct cxl_port *port)
+{
+ if (!port || !port->parent_dport)
+ return NULL;
+ return port->parent_dport->port;
+}
+
static void unregister_port(void *_port)
{
struct cxl_port *port = _port;
- struct cxl_port *parent;
+ struct cxl_port *parent = parent_port_of(port);
struct device *lock_dev;
- if (is_cxl_root(port))
- parent = NULL;
- else
- parent = to_cxl_port(port->dev.parent);
-
/*
* CXL root port's and the first level of ports are unregistered
* under the platform firmware device lock, all other ports are
@@ -1035,15 +1037,6 @@ struct cxl_root *find_cxl_root(struct cxl_port *port)
}
EXPORT_SYMBOL_NS_GPL(find_cxl_root, "CXL");
-void put_cxl_root(struct cxl_root *cxl_root)
-{
- if (!cxl_root)
- return;
-
- put_device(&cxl_root->port.dev);
-}
-EXPORT_SYMBOL_NS_GPL(put_cxl_root, "CXL");
-
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{
struct cxl_dport *dport;
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index c3f4dc244df7..6e5e1460068d 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -231,11 +231,10 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
&cxlr->dev,
"Bypassing cpu_cache_invalidate_memregion() for testing!\n");
return 0;
- } else {
- dev_WARN(&cxlr->dev,
- "Failed to synchronize CPU cache state\n");
- return -ENXIO;
}
+ dev_WARN(&cxlr->dev,
+ "Failed to synchronize CPU cache state\n");
+ return -ENXIO;
}
cpu_cache_invalidate_memregion(IORES_DESC_CXL);
@@ -865,10 +864,23 @@ static int match_auto_decoder(struct device *dev, const void *data)
return 0;
}
+/**
+ * cxl_port_pick_region_decoder() - assign or lookup a decoder for a region
+ * @port: a port in the ancestry of the endpoint implied by @cxled
+ * @cxled: endpoint decoder to be, or currently, mapped by @port
+ * @cxlr: region to establish, or validate, decode @port
+ *
+ * In the region creation path cxl_port_pick_region_decoder() is an
+ * allocator to find a free port. In the region assembly path, it is
+ * recalling the decoder that platform firmware picked for validation
+ * purposes.
+ *
+ * The result is recorded in a 'struct cxl_region_ref' in @port.
+ */
static struct cxl_decoder *
-cxl_region_find_decoder(struct cxl_port *port,
- struct cxl_endpoint_decoder *cxled,
- struct cxl_region *cxlr)
+cxl_port_pick_region_decoder(struct cxl_port *port,
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_region *cxlr)
{
struct device *dev;
@@ -916,7 +928,8 @@ static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
static struct cxl_region_ref *
alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled)
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_decoder *cxld)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_region_ref *cxl_rr, *iter;
@@ -930,9 +943,6 @@ alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
continue;
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
- struct cxl_decoder *cxld;
-
- cxld = cxl_region_find_decoder(port, cxled, cxlr);
if (auto_order_ok(port, iter->region, cxld))
continue;
}
@@ -1014,19 +1024,11 @@ static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
return 0;
}
-static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
- struct cxl_endpoint_decoder *cxled,
- struct cxl_region_ref *cxl_rr)
+static int cxl_rr_assign_decoder(struct cxl_port *port, struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_region_ref *cxl_rr,
+ struct cxl_decoder *cxld)
{
- struct cxl_decoder *cxld;
-
- cxld = cxl_region_find_decoder(port, cxled, cxlr);
- if (!cxld) {
- dev_dbg(&cxlr->dev, "%s: no decoder available\n",
- dev_name(&port->dev));
- return -EBUSY;
- }
-
if (cxld->region) {
dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
dev_name(&port->dev), dev_name(&cxld->dev),
@@ -1117,7 +1119,16 @@ static int cxl_port_attach_region(struct cxl_port *port,
nr_targets_inc = true;
}
} else {
- cxl_rr = alloc_region_ref(port, cxlr, cxled);
+ struct cxl_decoder *cxld;
+
+ cxld = cxl_port_pick_region_decoder(port, cxled, cxlr);
+ if (!cxld) {
+ dev_dbg(&cxlr->dev, "%s: no decoder available\n",
+ dev_name(&port->dev));
+ return -EBUSY;
+ }
+
+ cxl_rr = alloc_region_ref(port, cxlr, cxled, cxld);
if (IS_ERR(cxl_rr)) {
dev_dbg(&cxlr->dev,
"%s: failed to allocate region reference\n",
@@ -1126,7 +1137,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
}
nr_targets_inc = true;
- rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr);
+ rc = cxl_rr_assign_decoder(port, cxlr, cxled, cxl_rr, cxld);
if (rc)
goto out_erase;
}
@@ -1446,7 +1457,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
if (cxld->interleave_ways != iw ||
- cxld->interleave_granularity != ig ||
+ (iw > 1 && cxld->interleave_granularity != ig) ||
!region_res_match_cxl_range(p, &cxld->hpa_range) ||
((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
dev_err(&cxlr->dev,
@@ -1748,13 +1759,6 @@ static int cmp_interleave_pos(const void *a, const void *b)
return cxled_a->pos - cxled_b->pos;
}
-static struct cxl_port *next_port(struct cxl_port *port)
-{
- if (!port->parent_dport)
- return NULL;
- return port->parent_dport->port;
-}
-
static int match_switch_decoder_by_range(struct device *dev,
const void *data)
{
@@ -1781,7 +1785,7 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range,
struct device *dev;
int rc = -ENXIO;
- parent = next_port(port);
+ parent = parent_port_of(port);
if (!parent)
return rc;
@@ -1805,6 +1809,13 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range,
}
put_device(dev);
+ if (rc)
+ dev_err(port->uport_dev,
+ "failed to find %s:%s in target list of %s\n",
+ dev_name(&port->dev),
+ dev_name(port->parent_dport->dport_dev),
+ dev_name(&cxlsd->cxld.dev));
+
return rc;
}
@@ -1861,7 +1872,7 @@ static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
*/
/* Iterate from endpoint to root_port refining the position */
- for (iter = port; iter; iter = next_port(iter)) {
+ for (iter = port; iter; iter = parent_port_of(iter)) {
if (is_cxl_root(iter))
break;
@@ -1940,7 +1951,9 @@ static int cxl_region_attach(struct cxl_region *cxlr,
if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_dbg(&cxlr->dev, "region already active\n");
return -EBUSY;
- } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
+ }
+
+ if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_dbg(&cxlr->dev, "interleave config missing\n");
return -ENXIO;
}
@@ -2160,6 +2173,12 @@ static int attach_target(struct cxl_region *cxlr,
rc = cxl_region_attach(cxlr, cxled, pos);
up_read(&cxl_dpa_rwsem);
up_write(&cxl_region_rwsem);
+
+ if (rc)
+ dev_warn(cxled->cxld.dev.parent,
+ "failed to attach %s to %s: %d\n",
+ dev_name(&cxled->cxld.dev), dev_name(&cxlr->dev), rc);
+
return rc;
}
@@ -3196,20 +3215,49 @@ err:
return rc;
}
-static int match_root_decoder_by_range(struct device *dev,
- const void *data)
+static int match_decoder_by_range(struct device *dev, const void *data)
{
const struct range *r1, *r2 = data;
- struct cxl_root_decoder *cxlrd;
+ struct cxl_decoder *cxld;
- if (!is_root_decoder(dev))
+ if (!is_switch_decoder(dev))
return 0;
- cxlrd = to_cxl_root_decoder(dev);
- r1 = &cxlrd->cxlsd.cxld.hpa_range;
+ cxld = to_cxl_decoder(dev);
+ r1 = &cxld->hpa_range;
return range_contains(r1, r2);
}
+static struct cxl_decoder *
+cxl_port_find_switch_decoder(struct cxl_port *port, struct range *hpa)
+{
+ struct device *cxld_dev = device_find_child(&port->dev, hpa,
+ match_decoder_by_range);
+
+ return cxld_dev ? to_cxl_decoder(cxld_dev) : NULL;
+}
+
+static struct cxl_root_decoder *
+cxl_find_root_decoder(struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxled_to_port(cxled);
+ struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
+ struct cxl_decoder *root, *cxld = &cxled->cxld;
+ struct range *hpa = &cxld->hpa_range;
+
+ root = cxl_port_find_switch_decoder(&cxl_root->port, hpa);
+ if (!root) {
+ dev_err(cxlmd->dev.parent,
+ "%s:%s no CXL window for range %#llx:%#llx\n",
+ dev_name(&cxlmd->dev), dev_name(&cxld->dev),
+ cxld->hpa_range.start, cxld->hpa_range.end);
+ return NULL;
+ }
+
+ return to_cxl_root_decoder(&root->dev);
+}
+
static int match_region_by_range(struct device *dev, const void *data)
{
struct cxl_region_params *p;
@@ -3376,47 +3424,45 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
return cxlr;
}
-int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
+static struct cxl_region *
+cxl_find_region_by_range(struct cxl_root_decoder *cxlrd, struct range *hpa)
+{
+ struct device *region_dev;
+
+ region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
+ match_region_by_range);
+ if (!region_dev)
+ return NULL;
+
+ return to_cxl_region(region_dev);
+}
+
+int cxl_add_to_region(struct cxl_endpoint_decoder *cxled)
{
- struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct range *hpa = &cxled->cxld.hpa_range;
- struct cxl_decoder *cxld = &cxled->cxld;
- struct device *cxlrd_dev, *region_dev;
- struct cxl_root_decoder *cxlrd;
struct cxl_region_params *p;
- struct cxl_region *cxlr;
bool attach = false;
int rc;
- cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
- match_root_decoder_by_range);
- if (!cxlrd_dev) {
- dev_err(cxlmd->dev.parent,
- "%s:%s no CXL window for range %#llx:%#llx\n",
- dev_name(&cxlmd->dev), dev_name(&cxld->dev),
- cxld->hpa_range.start, cxld->hpa_range.end);
+ struct cxl_root_decoder *cxlrd __free(put_cxl_root_decoder) =
+ cxl_find_root_decoder(cxled);
+ if (!cxlrd)
return -ENXIO;
- }
-
- cxlrd = to_cxl_root_decoder(cxlrd_dev);
/*
* Ensure that if multiple threads race to construct_region() for @hpa
* one does the construction and the others add to that.
*/
mutex_lock(&cxlrd->range_lock);
- region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa,
- match_region_by_range);
- if (!region_dev) {
+ struct cxl_region *cxlr __free(put_cxl_region) =
+ cxl_find_region_by_range(cxlrd, hpa);
+ if (!cxlr)
cxlr = construct_region(cxlrd, cxled);
- region_dev = &cxlr->dev;
- } else
- cxlr = to_cxl_region(region_dev);
mutex_unlock(&cxlrd->range_lock);
rc = PTR_ERR_OR_ZERO(cxlr);
if (rc)
- goto out;
+ return rc;
attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
@@ -3436,9 +3482,6 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
p->res);
}
- put_device(region_dev);
-out:
- put_device(cxlrd_dev);
return rc;
}
EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, "CXL");
@@ -3537,8 +3580,18 @@ out:
switch (cxlr->mode) {
case CXL_PARTMODE_PMEM:
+ rc = devm_cxl_region_edac_register(cxlr);
+ if (rc)
+ dev_dbg(&cxlr->dev, "CXL EDAC registration for region_id=%d failed\n",
+ cxlr->id);
+
return devm_cxl_add_pmem_region(cxlr);
case CXL_PARTMODE_RAM:
+ rc = devm_cxl_region_edac_register(cxlr);
+ if (rc)
+ dev_dbg(&cxlr->dev, "CXL EDAC registration for region_id=%d failed\n",
+ cxlr->id);
+
/*
* The region can not be manged by CXL if any portion of
* it is already online as 'System RAM'
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index a9ab46eb0610..3f1695c96abc 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -724,6 +724,7 @@ static inline bool is_cxl_root(struct cxl_port *port)
int cxl_num_decoders_committed(struct cxl_port *port);
bool is_cxl_port(const struct device *dev);
struct cxl_port *to_cxl_port(const struct device *dev);
+struct cxl_port *parent_port_of(struct cxl_port *port);
void cxl_port_commit_reap(struct cxl_decoder *cxld);
struct pci_bus;
int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
@@ -736,10 +737,12 @@ struct cxl_port *devm_cxl_add_port(struct device *host,
struct cxl_root *devm_cxl_add_root(struct device *host,
const struct cxl_root_ops *ops);
struct cxl_root *find_cxl_root(struct cxl_port *port);
-void put_cxl_root(struct cxl_root *cxl_root);
-DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_cxl_root(_T))
+DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_device(&_T->port.dev))
DEFINE_FREE(put_cxl_port, struct cxl_port *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
+DEFINE_FREE(put_cxl_root_decoder, struct cxl_root_decoder *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
+DEFINE_FREE(put_cxl_region, struct cxl_region *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
+
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
void cxl_bus_rescan(void);
void cxl_bus_drain(void);
@@ -856,8 +859,7 @@ struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port);
#ifdef CONFIG_CXL_REGION
bool is_cxl_pmem_region(struct device *dev);
struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
-int cxl_add_to_region(struct cxl_port *root,
- struct cxl_endpoint_decoder *cxled);
+int cxl_add_to_region(struct cxl_endpoint_decoder *cxled);
struct cxl_dax_region *to_cxl_dax_region(struct device *dev);
u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa);
#else
@@ -869,8 +871,7 @@ static inline struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
{
return NULL;
}
-static inline int cxl_add_to_region(struct cxl_port *root,
- struct cxl_endpoint_decoder *cxled)
+static inline int cxl_add_to_region(struct cxl_endpoint_decoder *cxled)
{
return 0;
}
@@ -912,4 +913,14 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
u16 cxl_gpf_get_dvsec(struct device *dev);
+static inline struct rw_semaphore *rwsem_read_intr_acquire(struct rw_semaphore *rwsem)
+{
+ if (down_read_interruptible(rwsem))
+ return NULL;
+
+ return rwsem;
+}
+
+DEFINE_FREE(rwsem_read_release, struct rw_semaphore *, if (_T) up_read(_T))
+
#endif /* __CXL_H__ */
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 3ec6b906371b..551b0ba2caa1 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -45,6 +45,11 @@
* @endpoint: connection to the CXL port topology for this memory device
* @id: id number of this memdev instance.
* @depth: endpoint port depth
+ * @scrub_cycle: current scrub cycle set for this device
+ * @scrub_region_id: id number of a backed region (if any) for which current scrub cycle set
+ * @err_rec_array: List of xarrarys to store the memdev error records to
+ * check attributes for a memory repair operation are from
+ * current boot.
*/
struct cxl_memdev {
struct device dev;
@@ -56,6 +61,9 @@ struct cxl_memdev {
struct cxl_port *endpoint;
int id;
int depth;
+ u8 scrub_cycle;
+ int scrub_region_id;
+ void *err_rec_array;
};
static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
@@ -527,6 +535,7 @@ enum cxl_opcode {
CXL_MBOX_OP_GET_SUPPORTED_FEATURES = 0x0500,
CXL_MBOX_OP_GET_FEATURE = 0x0501,
CXL_MBOX_OP_SET_FEATURE = 0x0502,
+ CXL_MBOX_OP_DO_MAINTENANCE = 0x0600,
CXL_MBOX_OP_IDENTIFY = 0x4000,
CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100,
CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101,
@@ -853,6 +862,27 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd);
int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa);
int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa);
+#ifdef CONFIG_CXL_EDAC_MEM_FEATURES
+int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd);
+int devm_cxl_region_edac_register(struct cxl_region *cxlr);
+int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, union cxl_event *evt);
+int cxl_store_rec_dram(struct cxl_memdev *cxlmd, union cxl_event *evt);
+void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd);
+#else
+static inline int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd)
+{ return 0; }
+static inline int devm_cxl_region_edac_register(struct cxl_region *cxlr)
+{ return 0; }
+static inline int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd,
+ union cxl_event *evt)
+{ return 0; }
+static inline int cxl_store_rec_dram(struct cxl_memdev *cxlmd,
+ union cxl_event *evt)
+{ return 0; }
+static inline void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd)
+{ return; }
+#endif
+
#ifdef CONFIG_CXL_SUSPEND
void cxl_mem_active_inc(void);
void cxl_mem_active_dec(void);
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 9675243bd05b..6e6777b7bafb 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -180,6 +180,10 @@ static int cxl_mem_probe(struct device *dev)
return rc;
}
+ rc = devm_cxl_memdev_edac_register(cxlmd);
+ if (rc)
+ dev_dbg(dev, "CXL memdev EDAC registration failed rc=%d\n", rc);
+
/*
* The kernel may be operating out of CXL memory on this device,
* there is no spec defined way to determine whether this device
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index a35fc5552845..fe4b593331da 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -30,7 +30,7 @@ static void schedule_detach(void *cxlmd)
schedule_cxl_memdev_detach(cxlmd);
}
-static int discover_region(struct device *dev, void *root)
+static int discover_region(struct device *dev, void *unused)
{
struct cxl_endpoint_decoder *cxled;
int rc;
@@ -49,7 +49,7 @@ static int discover_region(struct device *dev, void *root)
* Region enumeration is opportunistic, if this add-event fails,
* continue to the next endpoint decoder.
*/
- rc = cxl_add_to_region(root, cxled);
+ rc = cxl_add_to_region(cxled);
if (rc)
dev_dbg(dev, "failed to add to region: %#llx-%#llx\n",
cxled->cxld.hpa_range.start, cxled->cxld.hpa_range.end);
@@ -95,7 +95,6 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_hdm *cxlhdm;
- struct cxl_port *root;
int rc;
rc = cxl_dvsec_rr_decode(cxlds, &info);
@@ -127,18 +126,10 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
return rc;
/*
- * This can't fail in practice as CXL root exit unregisters all
- * descendant ports and that in turn synchronizes with cxl_port_probe()
- */
- struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
-
- root = &cxl_root->port;
-
- /*
* Now that all endpoint decoders are successfully enumerated, try to
* assemble regions from committed decoders
*/
- device_for_each_child(&port->dev, root, discover_region);
+ device_for_each_child(&port->dev, NULL, discover_region);
return 0;
}
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index e97d47f42ee2..584c70a34b52 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -13,6 +13,7 @@
#include <linux/mman.h>
#include <linux/memory-tiers.h>
#include <linux/memory_hotplug.h>
+#include <linux/string_helpers.h>
#include "dax-private.h"
#include "bus.h"
@@ -68,7 +69,7 @@ static void kmem_put_memory_types(void)
static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
{
struct device *dev = &dev_dax->dev;
- unsigned long total_len = 0;
+ unsigned long total_len = 0, orig_len = 0;
struct dax_kmem_data *data;
struct memory_dev_type *mtype;
int i, rc, mapped = 0;
@@ -97,6 +98,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
for (i = 0; i < dev_dax->nr_range; i++) {
struct range range;
+ orig_len += range_len(&dev_dax->ranges[i].range);
rc = dax_kmem_range(dev_dax, i, &range);
if (rc) {
dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
@@ -109,6 +111,12 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
if (!total_len) {
dev_warn(dev, "rejecting DAX region without any memory after alignment\n");
return -EINVAL;
+ } else if (total_len != orig_len) {
+ char buf[16];
+
+ string_get_size(orig_len - total_len, 1, STRING_UNITS_2,
+ buf, sizeof(buf));
+ dev_warn(dev, "DAX region truncated by %s due to alignment\n", buf);
}
init_node_memory_type(numa_node, mtype);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 5baa83b85515..2bcf9ceca997 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -19,7 +19,9 @@
#include <linux/anon_inodes.h>
#include <linux/export.h>
#include <linux/debugfs.h>
+#include <linux/list.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/sync_file.h>
#include <linux/poll.h>
@@ -35,35 +37,91 @@
static inline int is_dma_buf_file(struct file *);
-#if IS_ENABLED(CONFIG_DEBUG_FS)
-static DEFINE_MUTEX(debugfs_list_mutex);
-static LIST_HEAD(debugfs_list);
+static DEFINE_MUTEX(dmabuf_list_mutex);
+static LIST_HEAD(dmabuf_list);
-static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
+static void __dma_buf_list_add(struct dma_buf *dmabuf)
{
- mutex_lock(&debugfs_list_mutex);
- list_add(&dmabuf->list_node, &debugfs_list);
- mutex_unlock(&debugfs_list_mutex);
+ mutex_lock(&dmabuf_list_mutex);
+ list_add(&dmabuf->list_node, &dmabuf_list);
+ mutex_unlock(&dmabuf_list_mutex);
}
-static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
+static void __dma_buf_list_del(struct dma_buf *dmabuf)
{
if (!dmabuf)
return;
- mutex_lock(&debugfs_list_mutex);
+ mutex_lock(&dmabuf_list_mutex);
list_del(&dmabuf->list_node);
- mutex_unlock(&debugfs_list_mutex);
+ mutex_unlock(&dmabuf_list_mutex);
}
-#else
-static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
+
+/**
+ * dma_buf_iter_begin - begin iteration through global list of all DMA buffers
+ *
+ * Returns the first buffer in the global list of DMA-bufs that's not in the
+ * process of being destroyed. Increments that buffer's reference count to
+ * prevent buffer destruction. Callers must release the reference, either by
+ * continuing iteration with dma_buf_iter_next(), or with dma_buf_put().
+ *
+ * Return:
+ * * First buffer from global list, with refcount elevated
+ * * NULL if no active buffers are present
+ */
+struct dma_buf *dma_buf_iter_begin(void)
{
+ struct dma_buf *ret = NULL, *dmabuf;
+
+ /*
+ * The list mutex does not protect a dmabuf's refcount, so it can be
+ * zeroed while we are iterating. We cannot call get_dma_buf() since the
+ * caller may not already own a reference to the buffer.
+ */
+ mutex_lock(&dmabuf_list_mutex);
+ list_for_each_entry(dmabuf, &dmabuf_list, list_node) {
+ if (file_ref_get(&dmabuf->file->f_ref)) {
+ ret = dmabuf;
+ break;
+ }
+ }
+ mutex_unlock(&dmabuf_list_mutex);
+ return ret;
}
-static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
+/**
+ * dma_buf_iter_next - continue iteration through global list of all DMA buffers
+ * @dmabuf: [in] pointer to dma_buf
+ *
+ * Decrements the reference count on the provided buffer. Returns the next
+ * buffer from the remainder of the global list of DMA-bufs with its reference
+ * count incremented. Callers must release the reference, either by continuing
+ * iteration with dma_buf_iter_next(), or with dma_buf_put().
+ *
+ * Return:
+ * * Next buffer from global list, with refcount elevated
+ * * NULL if no additional active buffers are present
+ */
+struct dma_buf *dma_buf_iter_next(struct dma_buf *dmabuf)
{
+ struct dma_buf *ret = NULL;
+
+ /*
+ * The list mutex does not protect a dmabuf's refcount, so it can be
+ * zeroed while we are iterating. We cannot call get_dma_buf() since the
+ * caller may not already own a reference to the buffer.
+ */
+ mutex_lock(&dmabuf_list_mutex);
+ dma_buf_put(dmabuf);
+ list_for_each_entry_continue(dmabuf, &dmabuf_list, list_node) {
+ if (file_ref_get(&dmabuf->file->f_ref)) {
+ ret = dmabuf;
+ break;
+ }
+ }
+ mutex_unlock(&dmabuf_list_mutex);
+ return ret;
}
-#endif
static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
{
@@ -115,7 +173,7 @@ static int dma_buf_file_release(struct inode *inode, struct file *file)
if (!is_dma_buf_file(file))
return -EINVAL;
- __dma_buf_debugfs_list_del(file->private_data);
+ __dma_buf_list_del(file->private_data);
return 0;
}
@@ -636,10 +694,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|| !exp_info->ops->release))
return ERR_PTR(-EINVAL);
- if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
- (exp_info->ops->pin || exp_info->ops->unpin)))
- return ERR_PTR(-EINVAL);
-
if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
return ERR_PTR(-EINVAL);
@@ -689,7 +743,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
file->f_path.dentry->d_fsdata = dmabuf;
dmabuf->file = file;
- __dma_buf_debugfs_list_add(dmabuf);
+ __dma_buf_list_add(dmabuf);
return dmabuf;
@@ -782,7 +836,7 @@ static void mangle_sg_table(struct sg_table *sg_table)
/* To catch abuse of the underlying struct page by importers mix
* up the bits, but take care to preserve the low SG_ bits to
- * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
+ * not corrupt the sgt. The mixing is undone on unmap
* before passing the sgt back to the exporter.
*/
for_each_sgtable_sg(sg_table, sg, i)
@@ -790,29 +844,19 @@ static void mangle_sg_table(struct sg_table *sg_table)
#endif
}
-static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach,
- enum dma_data_direction direction)
-{
- struct sg_table *sg_table;
- signed long ret;
- sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
- if (IS_ERR_OR_NULL(sg_table))
- return sg_table;
-
- if (!dma_buf_attachment_is_dynamic(attach)) {
- ret = dma_resv_wait_timeout(attach->dmabuf->resv,
- DMA_RESV_USAGE_KERNEL, true,
- MAX_SCHEDULE_TIMEOUT);
- if (ret < 0) {
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
- direction);
- return ERR_PTR(ret);
- }
- }
+static inline bool
+dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
+{
+ return !!attach->importer_ops;
+}
- mangle_sg_table(sg_table);
- return sg_table;
+static bool
+dma_buf_pin_on_map(struct dma_buf_attachment *attach)
+{
+ return attach->dmabuf->ops->pin &&
+ (!dma_buf_attachment_is_dynamic(attach) ||
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY));
}
/**
@@ -935,48 +979,11 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
list_add(&attach->node, &dmabuf->attachments);
dma_resv_unlock(dmabuf->resv);
- /* When either the importer or the exporter can't handle dynamic
- * mappings we cache the mapping here to avoid issues with the
- * reservation object lock.
- */
- if (dma_buf_attachment_is_dynamic(attach) !=
- dma_buf_is_dynamic(dmabuf)) {
- struct sg_table *sgt;
-
- dma_resv_lock(attach->dmabuf->resv, NULL);
- if (dma_buf_is_dynamic(attach->dmabuf)) {
- ret = dmabuf->ops->pin(attach);
- if (ret)
- goto err_unlock;
- }
-
- sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
- if (!sgt)
- sgt = ERR_PTR(-ENOMEM);
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- goto err_unpin;
- }
- dma_resv_unlock(attach->dmabuf->resv);
- attach->sgt = sgt;
- attach->dir = DMA_BIDIRECTIONAL;
- }
-
return attach;
err_attach:
kfree(attach);
return ERR_PTR(ret);
-
-err_unpin:
- if (dma_buf_is_dynamic(attach->dmabuf))
- dmabuf->ops->unpin(attach);
-
-err_unlock:
- dma_resv_unlock(attach->dmabuf->resv);
-
- dma_buf_detach(dmabuf, attach);
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
@@ -995,16 +1002,6 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
-static void __unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *sg_table,
- enum dma_data_direction direction)
-{
- /* uses XOR, hence this unmangles */
- mangle_sg_table(sg_table);
-
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
-}
-
/**
* dma_buf_detach - Remove the given attachment from dmabuf's attachments list
* @dmabuf: [in] buffer to detach from.
@@ -1020,16 +1017,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
return;
dma_resv_lock(dmabuf->resv, NULL);
-
- if (attach->sgt) {
-
- __unmap_dma_buf(attach, attach->sgt, attach->dir);
-
- if (dma_buf_is_dynamic(attach->dmabuf))
- dmabuf->ops->unpin(attach);
- }
list_del(&attach->node);
-
dma_resv_unlock(dmabuf->resv);
if (dmabuf->ops->detach)
@@ -1058,7 +1046,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
struct dma_buf *dmabuf = attach->dmabuf;
int ret = 0;
- WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+ WARN_ON(!attach->importer_ops);
dma_resv_assert_held(dmabuf->resv);
@@ -1081,7 +1069,7 @@ void dma_buf_unpin(struct dma_buf_attachment *attach)
{
struct dma_buf *dmabuf = attach->dmabuf;
- WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+ WARN_ON(!attach->importer_ops);
dma_resv_assert_held(dmabuf->resv);
@@ -1115,7 +1103,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
- int r;
+ signed long ret;
might_sleep();
@@ -1124,41 +1112,37 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- if (attach->sgt) {
+ if (dma_buf_pin_on_map(attach)) {
+ ret = attach->dmabuf->ops->pin(attach);
/*
- * Two mappings with different directions for the same
- * attachment are not allowed.
+ * Catch exporters making buffers inaccessible even when
+ * attachments preventing that exist.
*/
- if (attach->dir != direction &&
- attach->dir != DMA_BIDIRECTIONAL)
- return ERR_PTR(-EBUSY);
-
- return attach->sgt;
- }
-
- if (dma_buf_is_dynamic(attach->dmabuf)) {
- if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
- r = attach->dmabuf->ops->pin(attach);
- if (r)
- return ERR_PTR(r);
- }
+ WARN_ON_ONCE(ret == -EBUSY);
+ if (ret)
+ return ERR_PTR(ret);
}
- sg_table = __map_dma_buf(attach, direction);
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sg_table))
+ goto error_unpin;
- if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- attach->dmabuf->ops->unpin(attach);
-
- if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
- attach->sgt = sg_table;
- attach->dir = direction;
+ /*
+ * Importers with static attachments don't wait for fences.
+ */
+ if (!dma_buf_attachment_is_dynamic(attach)) {
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ goto error_unmap;
}
+ mangle_sg_table(sg_table);
#ifdef CONFIG_DMA_API_DEBUG
- if (!IS_ERR(sg_table)) {
+ {
struct scatterlist *sg;
u64 addr;
int len;
@@ -1175,6 +1159,16 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
}
#endif /* CONFIG_DMA_API_DEBUG */
return sg_table;
+
+error_unmap:
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+ sg_table = ERR_PTR(ret);
+
+error_unpin:
+ if (dma_buf_pin_on_map(attach))
+ attach->dmabuf->ops->unpin(attach);
+
+ return sg_table;
}
EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
@@ -1227,14 +1221,11 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- if (attach->sgt == sg_table)
- return;
-
- __unmap_dma_buf(attach, sg_table, direction);
+ mangle_sg_table(sg_table);
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
- if (dma_buf_is_dynamic(attach->dmabuf) &&
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- dma_buf_unpin(attach);
+ if (dma_buf_pin_on_map(attach))
+ attach->dmabuf->ops->unpin(attach);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
@@ -1630,7 +1621,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
size_t size = 0;
int ret;
- ret = mutex_lock_interruptible(&debugfs_list_mutex);
+ ret = mutex_lock_interruptible(&dmabuf_list_mutex);
if (ret)
return ret;
@@ -1639,7 +1630,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
"size", "flags", "mode", "count", "ino");
- list_for_each_entry(buf_obj, &debugfs_list, list_node) {
+ list_for_each_entry(buf_obj, &dmabuf_list, list_node) {
ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
if (ret)
@@ -1676,11 +1667,11 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
- mutex_unlock(&debugfs_list_mutex);
+ mutex_unlock(&dmabuf_list_mutex);
return 0;
error_unlock:
- mutex_unlock(&debugfs_list_mutex);
+ mutex_unlock(&dmabuf_list_mutex);
return ret;
}
diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
index 2a059ac0ed27..a495d8a6c2e3 100644
--- a/drivers/dma-buf/dma-fence-unwrap.c
+++ b/drivers/dma-buf/dma-fence-unwrap.c
@@ -79,6 +79,41 @@ static int fence_cmp(const void *_a, const void *_b)
return 0;
}
+/**
+ * dma_fence_dedup_array - Sort and deduplicate an array of dma_fence pointers
+ * @fences: Array of dma_fence pointers to be deduplicated
+ * @num_fences: Number of entries in the @fences array
+ *
+ * Sorts the input array by context, then removes duplicate
+ * fences with the same context, keeping only the most recent one.
+ *
+ * The array is modified in-place and unreferenced duplicate fences are released
+ * via dma_fence_put(). The function returns the new number of fences after
+ * deduplication.
+ *
+ * Return: Number of unique fences remaining in the array.
+ */
+int dma_fence_dedup_array(struct dma_fence **fences, int num_fences)
+{
+ int i, j;
+
+ sort(fences, num_fences, sizeof(*fences), fence_cmp, NULL);
+
+ /*
+ * Only keep the most recent fence for each context.
+ */
+ j = 0;
+ for (i = 1; i < num_fences; i++) {
+ if (fences[i]->context == fences[j]->context)
+ dma_fence_put(fences[i]);
+ else
+ fences[++j] = fences[i];
+ }
+
+ return ++j;
+}
+EXPORT_SYMBOL_GPL(dma_fence_dedup_array);
+
/* Implementation for the dma_fence_merge() marco, don't use directly */
struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence **fences,
@@ -87,7 +122,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence *tmp, *unsignaled = NULL, **array;
struct dma_fence_array *result;
ktime_t timestamp;
- int i, j, count;
+ int i, count;
count = 0;
timestamp = ns_to_ktime(0);
@@ -141,19 +176,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
if (count == 0 || count == 1)
goto return_fastpath;
- sort(array, count, sizeof(*array), fence_cmp, NULL);
-
- /*
- * Only keep the most recent fence for each context.
- */
- j = 0;
- for (i = 1; i < count; i++) {
- if (array[i]->context == array[j]->context)
- dma_fence_put(array[i]);
- else
- array[++j] = array[i];
- }
- count = ++j;
+ count = dma_fence_dedup_array(array, count);
if (count > 1) {
result = dma_fence_array_create(count, array,
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 5f8d010516f0..b1ef4546346d 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -320,8 +320,9 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
count++;
dma_resv_list_set(fobj, i, fence, usage);
- /* pointer update must be visible before we extend the num_fences */
- smp_store_mb(fobj->num_fences, count);
+ /* fence update must be visible before we extend the num_fences */
+ smp_wmb();
+ fobj->num_fences = count;
}
EXPORT_SYMBOL(dma_resv_add_fence);
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 26d5dc89ea16..82b1b714300d 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -21,8 +21,6 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
-static struct dma_heap *sys_heap;
-
struct system_heap_buffer {
struct dma_heap *heap;
struct list_head attachments;
@@ -424,6 +422,7 @@ static const struct dma_heap_ops system_heap_ops = {
static int __init system_heap_create(void)
{
struct dma_heap_export_info exp_info;
+ struct dma_heap *sys_heap;
exp_info.name = "system";
exp_info.ops = &system_heap_ops;
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index 9f80a45498f0..27a36045410b 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -375,7 +375,7 @@ struct wait_timer {
static void wait_timer(struct timer_list *timer)
{
- struct wait_timer *wt = from_timer(wt, timer, timer);
+ struct wait_timer *wt = timer_container_of(wt, timer, timer);
dma_fence_signal(wt->f);
}
@@ -413,7 +413,7 @@ static int test_wait_timeout(void *arg)
err = 0;
err_free:
timer_delete_sync(&wt.timer);
- destroy_timer_on_stack(&wt.timer);
+ timer_destroy_on_stack(&wt.timer);
dma_fence_signal(wt.f);
dma_fence_put(wt.f);
return err;
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 22a808995f10..4f27ee93a00c 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -173,20 +173,6 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
}
-static void timeline_fence_value_str(struct dma_fence *fence,
- char *str, int size)
-{
- snprintf(str, size, "%lld", fence->seqno);
-}
-
-static void timeline_fence_timeline_value_str(struct dma_fence *fence,
- char *str, int size)
-{
- struct sync_timeline *parent = dma_fence_parent(fence);
-
- snprintf(str, size, "%d", parent->value);
-}
-
static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
{
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
@@ -208,8 +194,6 @@ static const struct dma_fence_ops timeline_fence_ops = {
.get_timeline_name = timeline_fence_get_timeline_name,
.signaled = timeline_fence_signaled,
.release = timeline_fence_release,
- .fence_value_str = timeline_fence_value_str,
- .timeline_value_str = timeline_fence_timeline_value_str,
.set_deadline = timeline_fence_set_deadline,
};
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 237bce21d1e7..67cd69551e42 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -12,8 +12,6 @@ static struct dentry *dbgfs;
static LIST_HEAD(sync_timeline_list_head);
static DEFINE_SPINLOCK(sync_timeline_list_lock);
-static LIST_HEAD(sync_file_list_head);
-static DEFINE_SPINLOCK(sync_file_list_lock);
void sync_timeline_debug_add(struct sync_timeline *obj)
{
@@ -33,24 +31,6 @@ void sync_timeline_debug_remove(struct sync_timeline *obj)
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
}
-void sync_file_debug_add(struct sync_file *sync_file)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_file_list_lock, flags);
- list_add_tail(&sync_file->sync_file_list, &sync_file_list_head);
- spin_unlock_irqrestore(&sync_file_list_lock, flags);
-}
-
-void sync_file_debug_remove(struct sync_file *sync_file)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_file_list_lock, flags);
- list_del(&sync_file->sync_file_list);
- spin_unlock_irqrestore(&sync_file_list_lock, flags);
-}
-
static const char *sync_status_str(int status)
{
if (status < 0)
@@ -82,25 +62,8 @@ static void sync_print_fence(struct seq_file *s,
seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
}
- if (fence->ops->timeline_value_str &&
- fence->ops->fence_value_str) {
- char value[64];
- bool success;
-
- fence->ops->fence_value_str(fence, value, sizeof(value));
- success = strlen(value);
-
- if (success) {
- seq_printf(s, ": %s", value);
-
- fence->ops->timeline_value_str(fence, value,
- sizeof(value));
-
- if (strlen(value))
- seq_printf(s, " / %s", value);
- }
- }
-
+ seq_printf(s, ": %lld", fence->seqno);
+ seq_printf(s, " / %d", parent->value);
seq_putc(s, '\n');
}
@@ -118,26 +81,6 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
spin_unlock(&obj->lock);
}
-static void sync_print_sync_file(struct seq_file *s,
- struct sync_file *sync_file)
-{
- char buf[128];
- int i;
-
- seq_printf(s, "[%p] %s: %s\n", sync_file,
- sync_file_get_name(sync_file, buf, sizeof(buf)),
- sync_status_str(dma_fence_get_status(sync_file->fence)));
-
- if (dma_fence_is_array(sync_file->fence)) {
- struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
-
- for (i = 0; i < array->num_fences; ++i)
- sync_print_fence(s, array->fences[i], true);
- } else {
- sync_print_fence(s, sync_file->fence, true);
- }
-}
-
static int sync_info_debugfs_show(struct seq_file *s, void *unused)
{
struct list_head *pos;
@@ -157,15 +100,6 @@ static int sync_info_debugfs_show(struct seq_file *s, void *unused)
seq_puts(s, "fences:\n--------------\n");
- spin_lock_irq(&sync_file_list_lock);
- list_for_each(pos, &sync_file_list_head) {
- struct sync_file *sync_file =
- container_of(pos, struct sync_file, sync_file_list);
-
- sync_print_sync_file(s, sync_file);
- seq_putc(s, '\n');
- }
- spin_unlock_irq(&sync_file_list_lock);
return 0;
}
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index a1bdd62efccd..02af347293d0 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -68,7 +68,5 @@ extern const struct file_operations sw_sync_debugfs_fops;
void sync_timeline_debug_add(struct sync_timeline *obj);
void sync_timeline_debug_remove(struct sync_timeline *obj);
-void sync_file_debug_add(struct sync_file *fence);
-void sync_file_debug_remove(struct sync_file *fence);
#endif /* _LINUX_SYNC_H */
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index e74e36a8ecda..c9d0c68d2fcb 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -264,8 +264,7 @@ static int begin_cpu_udmabuf(struct dma_buf *buf,
ubuf->sg = NULL;
}
} else {
- dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
- direction);
+ dma_sync_sgtable_for_cpu(dev, ubuf->sg, direction);
}
return ret;
@@ -280,12 +279,11 @@ static int end_cpu_udmabuf(struct dma_buf *buf,
if (!ubuf->sg)
return -EINVAL;
- dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
+ dma_sync_sgtable_for_device(dev, ubuf->sg, direction);
return 0;
}
static const struct dma_buf_ops udmabuf_ops = {
- .cache_sgt_mapping = true,
.map_dma_buf = map_udmabuf,
.unmap_dma_buf = unmap_udmabuf,
.release = release_udmabuf,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index df2d2dc00a05..db87dd2a07f7 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -93,6 +93,14 @@ config APPLE_ADMAC
help
Enable support for Audio DMA Controller found on Apple Silicon SoCs.
+config ARM_DMA350
+ tristate "Arm DMA-350 support"
+ depends on ARM || ARM64 || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the Arm DMA-350 controller.
+
config AT_HDMAC
tristate "Atmel AHB DMA support"
depends on ARCH_AT91
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 19ba465011a6..ba9732644752 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o
+obj-$(CONFIG_ARM_DMA350) += arm-dma350.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
diff --git a/drivers/dma/amd/ptdma/ptdma-dmaengine.c b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
index 715ac3ae067b..628c49ce5de9 100644
--- a/drivers/dma/amd/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
@@ -342,6 +342,9 @@ static void pt_cmd_callback_work(void *data, int err)
struct pt_dma_chan *chan;
unsigned long flags;
+ if (!desc)
+ return;
+
dma_chan = desc->vd.tx.chan;
chan = to_pt_chan(dma_chan);
@@ -355,16 +358,14 @@ static void pt_cmd_callback_work(void *data, int err)
desc->status = DMA_ERROR;
spin_lock_irqsave(&chan->vc.lock, flags);
- if (desc) {
- if (desc->status != DMA_COMPLETE) {
- if (desc->status != DMA_ERROR)
- desc->status = DMA_COMPLETE;
+ if (desc->status != DMA_COMPLETE) {
+ if (desc->status != DMA_ERROR)
+ desc->status = DMA_COMPLETE;
- dma_cookie_complete(tx_desc);
- dma_descriptor_unmap(tx_desc);
- } else {
- tx_desc = NULL;
- }
+ dma_cookie_complete(tx_desc);
+ dma_descriptor_unmap(tx_desc);
+ } else {
+ tx_desc = NULL;
}
spin_unlock_irqrestore(&chan->vc.lock, flags);
@@ -565,7 +566,6 @@ int pt_dmaengine_register(struct pt_device *pt)
struct ae4_device *ae4 = NULL;
struct pt_dma_chan *chan;
char *desc_cache_name;
- char *cmd_cache_name;
int ret, i;
if (pt->ver == AE4_DMA_VERSION)
@@ -581,27 +581,17 @@ int pt_dmaengine_register(struct pt_device *pt)
if (!pt->pt_dma_chan)
return -ENOMEM;
- cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
- "%s-dmaengine-cmd-cache",
- dev_name(pt->dev));
- if (!cmd_cache_name)
- return -ENOMEM;
-
desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
"%s-dmaengine-desc-cache",
dev_name(pt->dev));
- if (!desc_cache_name) {
- ret = -ENOMEM;
- goto err_cache;
- }
+ if (!desc_cache_name)
+ return -ENOMEM;
pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
sizeof(struct pt_dma_desc), 0,
SLAB_HWCACHE_ALIGN, NULL);
- if (!pt->dma_desc_cache) {
- ret = -ENOMEM;
- goto err_cache;
- }
+ if (!pt->dma_desc_cache)
+ return -ENOMEM;
dma_dev->dev = pt->dev;
dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
@@ -655,9 +645,6 @@ int pt_dmaengine_register(struct pt_device *pt)
err_reg:
kmem_cache_destroy(pt->dma_desc_cache);
-err_cache:
- kmem_cache_destroy(pt->dma_cmd_cache);
-
return ret;
}
EXPORT_SYMBOL_GPL(pt_dmaengine_register);
@@ -669,5 +656,4 @@ void pt_dmaengine_unregister(struct pt_device *pt)
dma_async_device_unregister(dma_dev);
kmem_cache_destroy(pt->dma_desc_cache);
- kmem_cache_destroy(pt->dma_cmd_cache);
}
diff --git a/drivers/dma/amd/ptdma/ptdma.h b/drivers/dma/amd/ptdma/ptdma.h
index 0a7939105e51..ef3f55632107 100644
--- a/drivers/dma/amd/ptdma/ptdma.h
+++ b/drivers/dma/amd/ptdma/ptdma.h
@@ -254,7 +254,6 @@ struct pt_device {
/* Support for the DMA Engine capabilities */
struct dma_device dma_dev;
struct pt_dma_chan *pt_dma_chan;
- struct kmem_cache *dma_cmd_cache;
struct kmem_cache *dma_desc_cache;
wait_queue_head_t lsb_queue;
diff --git a/drivers/dma/arm-dma350.c b/drivers/dma/arm-dma350.c
new file mode 100644
index 000000000000..9efe2ca7d5ec
--- /dev/null
+++ b/drivers/dma/arm-dma350.c
@@ -0,0 +1,660 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2024-2025 Arm Limited
+// Arm DMA-350 driver
+
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define DMAINFO 0x0f00
+
+#define DMA_BUILDCFG0 0xb0
+#define DMA_CFG_DATA_WIDTH GENMASK(18, 16)
+#define DMA_CFG_ADDR_WIDTH GENMASK(15, 10)
+#define DMA_CFG_NUM_CHANNELS GENMASK(9, 4)
+
+#define DMA_BUILDCFG1 0xb4
+#define DMA_CFG_NUM_TRIGGER_IN GENMASK(8, 0)
+
+#define IIDR 0xc8
+#define IIDR_PRODUCTID GENMASK(31, 20)
+#define IIDR_VARIANT GENMASK(19, 16)
+#define IIDR_REVISION GENMASK(15, 12)
+#define IIDR_IMPLEMENTER GENMASK(11, 0)
+
+#define PRODUCTID_DMA350 0x3a0
+#define IMPLEMENTER_ARM 0x43b
+
+#define DMACH(n) (0x1000 + 0x0100 * (n))
+
+#define CH_CMD 0x00
+#define CH_CMD_RESUME BIT(5)
+#define CH_CMD_PAUSE BIT(4)
+#define CH_CMD_STOP BIT(3)
+#define CH_CMD_DISABLE BIT(2)
+#define CH_CMD_CLEAR BIT(1)
+#define CH_CMD_ENABLE BIT(0)
+
+#define CH_STATUS 0x04
+#define CH_STAT_RESUMEWAIT BIT(21)
+#define CH_STAT_PAUSED BIT(20)
+#define CH_STAT_STOPPED BIT(19)
+#define CH_STAT_DISABLED BIT(18)
+#define CH_STAT_ERR BIT(17)
+#define CH_STAT_DONE BIT(16)
+#define CH_STAT_INTR_ERR BIT(1)
+#define CH_STAT_INTR_DONE BIT(0)
+
+#define CH_INTREN 0x08
+#define CH_INTREN_ERR BIT(1)
+#define CH_INTREN_DONE BIT(0)
+
+#define CH_CTRL 0x0c
+#define CH_CTRL_USEDESTRIGIN BIT(26)
+#define CH_CTRL_USESRCTRIGIN BIT(26)
+#define CH_CTRL_DONETYPE GENMASK(23, 21)
+#define CH_CTRL_REGRELOADTYPE GENMASK(20, 18)
+#define CH_CTRL_XTYPE GENMASK(11, 9)
+#define CH_CTRL_TRANSIZE GENMASK(2, 0)
+
+#define CH_SRCADDR 0x10
+#define CH_SRCADDRHI 0x14
+#define CH_DESADDR 0x18
+#define CH_DESADDRHI 0x1c
+#define CH_XSIZE 0x20
+#define CH_XSIZEHI 0x24
+#define CH_SRCTRANSCFG 0x28
+#define CH_DESTRANSCFG 0x2c
+#define CH_CFG_MAXBURSTLEN GENMASK(19, 16)
+#define CH_CFG_PRIVATTR BIT(11)
+#define CH_CFG_SHAREATTR GENMASK(9, 8)
+#define CH_CFG_MEMATTR GENMASK(7, 0)
+
+#define TRANSCFG_DEVICE \
+ FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \
+ FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_OSH) | \
+ FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_DEVICE)
+#define TRANSCFG_NC \
+ FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \
+ FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_OSH) | \
+ FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_NC)
+#define TRANSCFG_WB \
+ FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \
+ FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_ISH) | \
+ FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_WB)
+
+#define CH_XADDRINC 0x30
+#define CH_XY_DES GENMASK(31, 16)
+#define CH_XY_SRC GENMASK(15, 0)
+
+#define CH_FILLVAL 0x38
+#define CH_SRCTRIGINCFG 0x4c
+#define CH_DESTRIGINCFG 0x50
+#define CH_LINKATTR 0x70
+#define CH_LINK_SHAREATTR GENMASK(9, 8)
+#define CH_LINK_MEMATTR GENMASK(7, 0)
+
+#define CH_AUTOCFG 0x74
+#define CH_LINKADDR 0x78
+#define CH_LINKADDR_EN BIT(0)
+
+#define CH_LINKADDRHI 0x7c
+#define CH_ERRINFO 0x90
+#define CH_ERRINFO_AXIRDPOISERR BIT(18)
+#define CH_ERRINFO_AXIWRRESPERR BIT(17)
+#define CH_ERRINFO_AXIRDRESPERR BIT(16)
+
+#define CH_BUILDCFG0 0xf8
+#define CH_CFG_INC_WIDTH GENMASK(29, 26)
+#define CH_CFG_DATA_WIDTH GENMASK(24, 22)
+#define CH_CFG_DATA_BUF_SIZE GENMASK(7, 0)
+
+#define CH_BUILDCFG1 0xfc
+#define CH_CFG_HAS_CMDLINK BIT(8)
+#define CH_CFG_HAS_TRIGSEL BIT(7)
+#define CH_CFG_HAS_TRIGIN BIT(5)
+#define CH_CFG_HAS_WRAP BIT(1)
+
+
+#define LINK_REGCLEAR BIT(0)
+#define LINK_INTREN BIT(2)
+#define LINK_CTRL BIT(3)
+#define LINK_SRCADDR BIT(4)
+#define LINK_SRCADDRHI BIT(5)
+#define LINK_DESADDR BIT(6)
+#define LINK_DESADDRHI BIT(7)
+#define LINK_XSIZE BIT(8)
+#define LINK_XSIZEHI BIT(9)
+#define LINK_SRCTRANSCFG BIT(10)
+#define LINK_DESTRANSCFG BIT(11)
+#define LINK_XADDRINC BIT(12)
+#define LINK_FILLVAL BIT(14)
+#define LINK_SRCTRIGINCFG BIT(19)
+#define LINK_DESTRIGINCFG BIT(20)
+#define LINK_AUTOCFG BIT(29)
+#define LINK_LINKADDR BIT(30)
+#define LINK_LINKADDRHI BIT(31)
+
+
+enum ch_ctrl_donetype {
+ CH_CTRL_DONETYPE_NONE = 0,
+ CH_CTRL_DONETYPE_CMD = 1,
+ CH_CTRL_DONETYPE_CYCLE = 3
+};
+
+enum ch_ctrl_xtype {
+ CH_CTRL_XTYPE_DISABLE = 0,
+ CH_CTRL_XTYPE_CONTINUE = 1,
+ CH_CTRL_XTYPE_WRAP = 2,
+ CH_CTRL_XTYPE_FILL = 3
+};
+
+enum ch_cfg_shareattr {
+ SHAREATTR_NSH = 0,
+ SHAREATTR_OSH = 2,
+ SHAREATTR_ISH = 3
+};
+
+enum ch_cfg_memattr {
+ MEMATTR_DEVICE = 0x00,
+ MEMATTR_NC = 0x44,
+ MEMATTR_WB = 0xff
+};
+
+struct d350_desc {
+ struct virt_dma_desc vd;
+ u32 command[16];
+ u16 xsize;
+ u16 xsizehi;
+ u8 tsz;
+};
+
+struct d350_chan {
+ struct virt_dma_chan vc;
+ struct d350_desc *desc;
+ void __iomem *base;
+ int irq;
+ enum dma_status status;
+ dma_cookie_t cookie;
+ u32 residue;
+ u8 tsz;
+ bool has_trig;
+ bool has_wrap;
+ bool coherent;
+};
+
+struct d350 {
+ struct dma_device dma;
+ int nchan;
+ int nreq;
+ struct d350_chan channels[] __counted_by(nchan);
+};
+
+static inline struct d350_chan *to_d350_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct d350_chan, vc.chan);
+}
+
+static inline struct d350_desc *to_d350_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct d350_desc, vd);
+}
+
+static void d350_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(to_d350_desc(vd));
+}
+
+static struct dma_async_tx_descriptor *d350_prep_memcpy(struct dma_chan *chan,
+ dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ struct d350_desc *desc;
+ u32 *cmd;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->tsz = __ffs(len | dest | src | (1 << dch->tsz));
+ desc->xsize = lower_16_bits(len >> desc->tsz);
+ desc->xsizehi = upper_16_bits(len >> desc->tsz);
+
+ cmd = desc->command;
+ cmd[0] = LINK_CTRL | LINK_SRCADDR | LINK_SRCADDRHI | LINK_DESADDR |
+ LINK_DESADDRHI | LINK_XSIZE | LINK_XSIZEHI | LINK_SRCTRANSCFG |
+ LINK_DESTRANSCFG | LINK_XADDRINC | LINK_LINKADDR;
+
+ cmd[1] = FIELD_PREP(CH_CTRL_TRANSIZE, desc->tsz) |
+ FIELD_PREP(CH_CTRL_XTYPE, CH_CTRL_XTYPE_CONTINUE) |
+ FIELD_PREP(CH_CTRL_DONETYPE, CH_CTRL_DONETYPE_CMD);
+
+ cmd[2] = lower_32_bits(src);
+ cmd[3] = upper_32_bits(src);
+ cmd[4] = lower_32_bits(dest);
+ cmd[5] = upper_32_bits(dest);
+ cmd[6] = FIELD_PREP(CH_XY_SRC, desc->xsize) | FIELD_PREP(CH_XY_DES, desc->xsize);
+ cmd[7] = FIELD_PREP(CH_XY_SRC, desc->xsizehi) | FIELD_PREP(CH_XY_DES, desc->xsizehi);
+ cmd[8] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
+ cmd[9] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
+ cmd[10] = FIELD_PREP(CH_XY_SRC, 1) | FIELD_PREP(CH_XY_DES, 1);
+ cmd[11] = 0;
+
+ return vchan_tx_prep(&dch->vc, &desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *d350_prep_memset(struct dma_chan *chan,
+ dma_addr_t dest, int value, size_t len, unsigned long flags)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ struct d350_desc *desc;
+ u32 *cmd;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->tsz = __ffs(len | dest | (1 << dch->tsz));
+ desc->xsize = lower_16_bits(len >> desc->tsz);
+ desc->xsizehi = upper_16_bits(len >> desc->tsz);
+
+ cmd = desc->command;
+ cmd[0] = LINK_CTRL | LINK_DESADDR | LINK_DESADDRHI |
+ LINK_XSIZE | LINK_XSIZEHI | LINK_DESTRANSCFG |
+ LINK_XADDRINC | LINK_FILLVAL | LINK_LINKADDR;
+
+ cmd[1] = FIELD_PREP(CH_CTRL_TRANSIZE, desc->tsz) |
+ FIELD_PREP(CH_CTRL_XTYPE, CH_CTRL_XTYPE_FILL) |
+ FIELD_PREP(CH_CTRL_DONETYPE, CH_CTRL_DONETYPE_CMD);
+
+ cmd[2] = lower_32_bits(dest);
+ cmd[3] = upper_32_bits(dest);
+ cmd[4] = FIELD_PREP(CH_XY_DES, desc->xsize);
+ cmd[5] = FIELD_PREP(CH_XY_DES, desc->xsizehi);
+ cmd[6] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
+ cmd[7] = FIELD_PREP(CH_XY_DES, 1);
+ cmd[8] = (u8)value * 0x01010101;
+ cmd[9] = 0;
+
+ return vchan_tx_prep(&dch->vc, &desc->vd, flags);
+}
+
+static int d350_pause(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ if (dch->status == DMA_IN_PROGRESS) {
+ writel_relaxed(CH_CMD_PAUSE, dch->base + CH_CMD);
+ dch->status = DMA_PAUSED;
+ }
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+
+ return 0;
+}
+
+static int d350_resume(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ if (dch->status == DMA_PAUSED) {
+ writel_relaxed(CH_CMD_RESUME, dch->base + CH_CMD);
+ dch->status = DMA_IN_PROGRESS;
+ }
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+
+ return 0;
+}
+
+static u32 d350_get_residue(struct d350_chan *dch)
+{
+ u32 res, xsize, xsizehi, hi_new;
+ int retries = 3; /* 1st time unlucky, 2nd improbable, 3rd just broken */
+
+ hi_new = readl_relaxed(dch->base + CH_XSIZEHI);
+ do {
+ xsizehi = hi_new;
+ xsize = readl_relaxed(dch->base + CH_XSIZE);
+ hi_new = readl_relaxed(dch->base + CH_XSIZEHI);
+ } while (xsizehi != hi_new && --retries);
+
+ res = FIELD_GET(CH_XY_DES, xsize);
+ res |= FIELD_GET(CH_XY_DES, xsizehi) << 16;
+
+ return res << dch->desc->tsz;
+}
+
+static int d350_terminate_all(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ writel_relaxed(CH_CMD_STOP, dch->base + CH_CMD);
+ if (dch->desc) {
+ if (dch->status != DMA_ERROR)
+ vchan_terminate_vdesc(&dch->desc->vd);
+ dch->desc = NULL;
+ dch->status = DMA_COMPLETE;
+ }
+ vchan_get_all_descriptors(&dch->vc, &list);
+ list_splice_tail(&list, &dch->vc.desc_terminated);
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+
+ return 0;
+}
+
+static void d350_synchronize(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+
+ vchan_synchronize(&dch->vc);
+}
+
+static u32 d350_desc_bytes(struct d350_desc *desc)
+{
+ return ((u32)desc->xsizehi << 16 | desc->xsize) << desc->tsz;
+}
+
+static enum dma_status d350_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ struct virt_dma_desc *vd;
+ enum dma_status status;
+ unsigned long flags;
+ u32 residue = 0;
+
+ status = dma_cookie_status(chan, cookie, state);
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ if (cookie == dch->cookie) {
+ status = dch->status;
+ if (status == DMA_IN_PROGRESS || status == DMA_PAUSED)
+ dch->residue = d350_get_residue(dch);
+ residue = dch->residue;
+ } else if ((vd = vchan_find_desc(&dch->vc, cookie))) {
+ residue = d350_desc_bytes(to_d350_desc(vd));
+ } else if (status == DMA_IN_PROGRESS) {
+ /* Somebody else terminated it? */
+ status = DMA_ERROR;
+ }
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+
+ dma_set_residue(state, residue);
+ return status;
+}
+
+static void d350_start_next(struct d350_chan *dch)
+{
+ u32 hdr, *reg;
+
+ dch->desc = to_d350_desc(vchan_next_desc(&dch->vc));
+ if (!dch->desc)
+ return;
+
+ list_del(&dch->desc->vd.node);
+ dch->status = DMA_IN_PROGRESS;
+ dch->cookie = dch->desc->vd.tx.cookie;
+ dch->residue = d350_desc_bytes(dch->desc);
+
+ hdr = dch->desc->command[0];
+ reg = &dch->desc->command[1];
+
+ if (hdr & LINK_INTREN)
+ writel_relaxed(*reg++, dch->base + CH_INTREN);
+ if (hdr & LINK_CTRL)
+ writel_relaxed(*reg++, dch->base + CH_CTRL);
+ if (hdr & LINK_SRCADDR)
+ writel_relaxed(*reg++, dch->base + CH_SRCADDR);
+ if (hdr & LINK_SRCADDRHI)
+ writel_relaxed(*reg++, dch->base + CH_SRCADDRHI);
+ if (hdr & LINK_DESADDR)
+ writel_relaxed(*reg++, dch->base + CH_DESADDR);
+ if (hdr & LINK_DESADDRHI)
+ writel_relaxed(*reg++, dch->base + CH_DESADDRHI);
+ if (hdr & LINK_XSIZE)
+ writel_relaxed(*reg++, dch->base + CH_XSIZE);
+ if (hdr & LINK_XSIZEHI)
+ writel_relaxed(*reg++, dch->base + CH_XSIZEHI);
+ if (hdr & LINK_SRCTRANSCFG)
+ writel_relaxed(*reg++, dch->base + CH_SRCTRANSCFG);
+ if (hdr & LINK_DESTRANSCFG)
+ writel_relaxed(*reg++, dch->base + CH_DESTRANSCFG);
+ if (hdr & LINK_XADDRINC)
+ writel_relaxed(*reg++, dch->base + CH_XADDRINC);
+ if (hdr & LINK_FILLVAL)
+ writel_relaxed(*reg++, dch->base + CH_FILLVAL);
+ if (hdr & LINK_SRCTRIGINCFG)
+ writel_relaxed(*reg++, dch->base + CH_SRCTRIGINCFG);
+ if (hdr & LINK_DESTRIGINCFG)
+ writel_relaxed(*reg++, dch->base + CH_DESTRIGINCFG);
+ if (hdr & LINK_AUTOCFG)
+ writel_relaxed(*reg++, dch->base + CH_AUTOCFG);
+ if (hdr & LINK_LINKADDR)
+ writel_relaxed(*reg++, dch->base + CH_LINKADDR);
+ if (hdr & LINK_LINKADDRHI)
+ writel_relaxed(*reg++, dch->base + CH_LINKADDRHI);
+
+ writel(CH_CMD_ENABLE, dch->base + CH_CMD);
+}
+
+static void d350_issue_pending(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dch->vc.lock, flags);
+ if (vchan_issue_pending(&dch->vc) && !dch->desc)
+ d350_start_next(dch);
+ spin_unlock_irqrestore(&dch->vc.lock, flags);
+}
+
+static irqreturn_t d350_irq(int irq, void *data)
+{
+ struct d350_chan *dch = data;
+ struct device *dev = dch->vc.chan.device->dev;
+ struct virt_dma_desc *vd = &dch->desc->vd;
+ u32 ch_status;
+
+ ch_status = readl(dch->base + CH_STATUS);
+ if (!ch_status)
+ return IRQ_NONE;
+
+ if (ch_status & CH_STAT_INTR_ERR) {
+ u32 errinfo = readl_relaxed(dch->base + CH_ERRINFO);
+
+ if (errinfo & (CH_ERRINFO_AXIRDPOISERR | CH_ERRINFO_AXIRDRESPERR))
+ vd->tx_result.result = DMA_TRANS_READ_FAILED;
+ else if (errinfo & CH_ERRINFO_AXIWRRESPERR)
+ vd->tx_result.result = DMA_TRANS_WRITE_FAILED;
+ else
+ vd->tx_result.result = DMA_TRANS_ABORTED;
+
+ vd->tx_result.residue = d350_get_residue(dch);
+ } else if (!(ch_status & CH_STAT_INTR_DONE)) {
+ dev_warn(dev, "Unexpected IRQ source? 0x%08x\n", ch_status);
+ }
+ writel_relaxed(ch_status, dch->base + CH_STATUS);
+
+ spin_lock(&dch->vc.lock);
+ vchan_cookie_complete(vd);
+ if (ch_status & CH_STAT_INTR_DONE) {
+ dch->status = DMA_COMPLETE;
+ dch->residue = 0;
+ d350_start_next(dch);
+ } else {
+ dch->status = DMA_ERROR;
+ dch->residue = vd->tx_result.residue;
+ }
+ spin_unlock(&dch->vc.lock);
+
+ return IRQ_HANDLED;
+}
+
+static int d350_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ int ret = request_irq(dch->irq, d350_irq, IRQF_SHARED,
+ dev_name(&dch->vc.chan.dev->device), dch);
+ if (!ret)
+ writel_relaxed(CH_INTREN_DONE | CH_INTREN_ERR, dch->base + CH_INTREN);
+
+ return ret;
+}
+
+static void d350_free_chan_resources(struct dma_chan *chan)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+
+ writel_relaxed(0, dch->base + CH_INTREN);
+ free_irq(dch->irq, dch);
+ vchan_free_chan_resources(&dch->vc);
+}
+
+static int d350_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct d350 *dmac;
+ void __iomem *base;
+ u32 reg;
+ int ret, nchan, dw, aw, r, p;
+ bool coherent, memset;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ reg = readl_relaxed(base + DMAINFO + IIDR);
+ r = FIELD_GET(IIDR_VARIANT, reg);
+ p = FIELD_GET(IIDR_REVISION, reg);
+ if (FIELD_GET(IIDR_IMPLEMENTER, reg) != IMPLEMENTER_ARM ||
+ FIELD_GET(IIDR_PRODUCTID, reg) != PRODUCTID_DMA350)
+ return dev_err_probe(dev, -ENODEV, "Not a DMA-350!");
+
+ reg = readl_relaxed(base + DMAINFO + DMA_BUILDCFG0);
+ nchan = FIELD_GET(DMA_CFG_NUM_CHANNELS, reg) + 1;
+ dw = 1 << FIELD_GET(DMA_CFG_DATA_WIDTH, reg);
+ aw = FIELD_GET(DMA_CFG_ADDR_WIDTH, reg) + 1;
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(aw));
+ coherent = device_get_dma_attr(dev) == DEV_DMA_COHERENT;
+
+ dmac = devm_kzalloc(dev, struct_size(dmac, channels, nchan), GFP_KERNEL);
+ if (!dmac)
+ return -ENOMEM;
+
+ dmac->nchan = nchan;
+
+ reg = readl_relaxed(base + DMAINFO + DMA_BUILDCFG1);
+ dmac->nreq = FIELD_GET(DMA_CFG_NUM_TRIGGER_IN, reg);
+
+ dev_dbg(dev, "DMA-350 r%dp%d with %d channels, %d requests\n", r, p, dmac->nchan, dmac->nreq);
+
+ dmac->dma.dev = dev;
+ for (int i = min(dw, 16); i > 0; i /= 2) {
+ dmac->dma.src_addr_widths |= BIT(i);
+ dmac->dma.dst_addr_widths |= BIT(i);
+ }
+ dmac->dma.directions = BIT(DMA_MEM_TO_MEM);
+ dmac->dma.descriptor_reuse = true;
+ dmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dmac->dma.device_alloc_chan_resources = d350_alloc_chan_resources;
+ dmac->dma.device_free_chan_resources = d350_free_chan_resources;
+ dma_cap_set(DMA_MEMCPY, dmac->dma.cap_mask);
+ dmac->dma.device_prep_dma_memcpy = d350_prep_memcpy;
+ dmac->dma.device_pause = d350_pause;
+ dmac->dma.device_resume = d350_resume;
+ dmac->dma.device_terminate_all = d350_terminate_all;
+ dmac->dma.device_synchronize = d350_synchronize;
+ dmac->dma.device_tx_status = d350_tx_status;
+ dmac->dma.device_issue_pending = d350_issue_pending;
+ INIT_LIST_HEAD(&dmac->dma.channels);
+
+ /* Would be nice to have per-channel caps for this... */
+ memset = true;
+ for (int i = 0; i < nchan; i++) {
+ struct d350_chan *dch = &dmac->channels[i];
+
+ dch->base = base + DMACH(i);
+ writel_relaxed(CH_CMD_CLEAR, dch->base + CH_CMD);
+
+ reg = readl_relaxed(dch->base + CH_BUILDCFG1);
+ if (!(FIELD_GET(CH_CFG_HAS_CMDLINK, reg))) {
+ dev_warn(dev, "No command link support on channel %d\n", i);
+ continue;
+ }
+ dch->irq = platform_get_irq(pdev, i);
+ if (dch->irq < 0)
+ return dev_err_probe(dev, dch->irq,
+ "Failed to get IRQ for channel %d\n", i);
+
+ dch->has_wrap = FIELD_GET(CH_CFG_HAS_WRAP, reg);
+ dch->has_trig = FIELD_GET(CH_CFG_HAS_TRIGIN, reg) &
+ FIELD_GET(CH_CFG_HAS_TRIGSEL, reg);
+
+ /* Fill is a special case of Wrap */
+ memset &= dch->has_wrap;
+
+ reg = readl_relaxed(dch->base + CH_BUILDCFG0);
+ dch->tsz = FIELD_GET(CH_CFG_DATA_WIDTH, reg);
+
+ reg = FIELD_PREP(CH_LINK_SHAREATTR, coherent ? SHAREATTR_ISH : SHAREATTR_OSH);
+ reg |= FIELD_PREP(CH_LINK_MEMATTR, coherent ? MEMATTR_WB : MEMATTR_NC);
+ writel_relaxed(reg, dch->base + CH_LINKATTR);
+
+ dch->vc.desc_free = d350_desc_free;
+ vchan_init(&dch->vc, &dmac->dma);
+ }
+
+ if (memset) {
+ dma_cap_set(DMA_MEMSET, dmac->dma.cap_mask);
+ dmac->dma.device_prep_dma_memset = d350_prep_memset;
+ }
+
+ platform_set_drvdata(pdev, dmac);
+
+ ret = dma_async_device_register(&dmac->dma);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register DMA device\n");
+
+ return 0;
+}
+
+static void d350_remove(struct platform_device *pdev)
+{
+ struct d350 *dmac = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&dmac->dma);
+}
+
+static const struct of_device_id d350_of_match[] __maybe_unused = {
+ { .compatible = "arm,dma-350" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, d350_of_match);
+
+static struct platform_driver d350_driver = {
+ .driver = {
+ .name = "arm-dma350",
+ .of_match_table = of_match_ptr(d350_of_match),
+ },
+ .probe = d350_probe,
+ .remove = d350_remove,
+};
+module_platform_driver(d350_driver);
+
+MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>");
+MODULE_DESCRIPTION("Arm DMA-350 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index ba25c23164e7..3fbc74710a13 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -2033,10 +2033,8 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
* at_xdmac_start_xfer() for this descriptor. Now it's time
* to release it.
*/
- if (desc->active_xfer) {
- pm_runtime_put_autosuspend(atxdmac->dev);
- pm_runtime_mark_last_busy(atxdmac->dev);
- }
+ if (desc->active_xfer)
+ pm_runtime_put_noidle(atxdmac->dev);
}
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index d891dfca358e..91b2fbc0b864 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -841,9 +841,9 @@ static int dmatest_func(void *data)
} else {
dma_async_issue_pending(chan);
- wait_event_timeout(thread->done_wait,
- done->done,
- msecs_to_jiffies(params->timeout));
+ wait_event_freezable_timeout(thread->done_wait,
+ done->done,
+ msecs_to_jiffies(params->timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL,
NULL);
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 1c6043751dc9..49f09998e5c0 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -136,7 +136,8 @@ static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val);
if (map != EDMA_MF_EDMA_LEGACY &&
map != EDMA_MF_EDMA_UNROLL &&
- map != EDMA_MF_HDMA_COMPAT)
+ map != EDMA_MF_HDMA_COMPAT &&
+ map != EDMA_MF_HDMA_NATIVE)
return;
pdata->mf = map;
@@ -291,6 +292,8 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", chip->mf);
else if (chip->mf == EDMA_MF_HDMA_COMPAT)
pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", chip->mf);
+ else if (chip->mf == EDMA_MF_HDMA_NATIVE)
+ pci_dbg(pdev, "Version:\tHDMA Native (0x%x)\n", chip->mf);
else
pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf);
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 443b2430466c..4976d7dde080 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -95,7 +95,7 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
}
val = edma_readl_chreg(fsl_chan, ch_csr);
- val |= EDMA_V3_CH_CSR_ERQ;
+ val |= EDMA_V3_CH_CSR_ERQ | EDMA_V3_CH_CSR_EEI;
edma_writel_chreg(fsl_chan, val, ch_csr);
}
@@ -821,7 +821,7 @@ void fsl_edma_issue_pending(struct dma_chan *chan)
int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- int ret;
+ int ret = 0;
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
clk_prepare_enable(fsl_chan->clk);
@@ -831,17 +831,29 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd),
32, 0);
- if (fsl_chan->txirq) {
+ if (fsl_chan->txirq)
ret = request_irq(fsl_chan->txirq, fsl_chan->irq_handler, IRQF_SHARED,
fsl_chan->chan_name, fsl_chan);
- if (ret) {
- dma_pool_destroy(fsl_chan->tcd_pool);
- return ret;
- }
- }
+ if (ret)
+ goto err_txirq;
+
+ if (fsl_chan->errirq > 0)
+ ret = request_irq(fsl_chan->errirq, fsl_chan->errirq_handler, IRQF_SHARED,
+ fsl_chan->errirq_name, fsl_chan);
+
+ if (ret)
+ goto err_errirq;
return 0;
+
+err_errirq:
+ if (fsl_chan->txirq)
+ free_irq(fsl_chan->txirq, fsl_chan);
+err_txirq:
+ dma_pool_destroy(fsl_chan->tcd_pool);
+
+ return ret;
}
void fsl_edma_free_chan_resources(struct dma_chan *chan)
@@ -862,6 +874,8 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
if (fsl_chan->txirq)
free_irq(fsl_chan->txirq, fsl_chan);
+ if (fsl_chan->errirq)
+ free_irq(fsl_chan->errirq, fsl_chan);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
dma_pool_destroy(fsl_chan->tcd_pool);
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 10a5565ddfd7..205a96489094 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -71,6 +71,18 @@
#define EDMA_V3_CH_ES_ERR BIT(31)
#define EDMA_V3_MP_ES_VLD BIT(31)
+#define EDMA_V3_CH_ERR_DBE BIT(0)
+#define EDMA_V3_CH_ERR_SBE BIT(1)
+#define EDMA_V3_CH_ERR_SGE BIT(2)
+#define EDMA_V3_CH_ERR_NCE BIT(3)
+#define EDMA_V3_CH_ERR_DOE BIT(4)
+#define EDMA_V3_CH_ERR_DAE BIT(5)
+#define EDMA_V3_CH_ERR_SOE BIT(6)
+#define EDMA_V3_CH_ERR_SAE BIT(7)
+#define EDMA_V3_CH_ERR_ECX BIT(8)
+#define EDMA_V3_CH_ERR_UCE BIT(9)
+#define EDMA_V3_CH_ERR BIT(31)
+
enum fsl_edma_pm_state {
RUNNING = 0,
SUSPENDED,
@@ -162,6 +174,7 @@ struct fsl_edma_chan {
u32 dma_dev_size;
enum dma_data_direction dma_dir;
char chan_name[32];
+ char errirq_name[36];
void __iomem *tcd;
void __iomem *mux_addr;
u32 real_count;
@@ -174,7 +187,9 @@ struct fsl_edma_chan {
int priority;
int hw_chanid;
int txirq;
+ int errirq;
irqreturn_t (*irq_handler)(int irq, void *dev_id);
+ irqreturn_t (*errirq_handler)(int irq, void *dev_id);
bool is_rxchan;
bool is_remote;
bool is_multi_fifo;
@@ -208,6 +223,9 @@ struct fsl_edma_desc {
/* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
#define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
#define FSL_EDMA_DRV_TCD64 BIT(15)
+/* All channel ERR IRQ share one IRQ line */
+#define FSL_EDMA_DRV_ERRIRQ_SHARE BIT(16)
+
#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
FSL_EDMA_DRV_BUS_8BYTE | \
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 756d67325db5..97583c7d51a2 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -50,6 +50,83 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void fsl_edma3_err_check(struct fsl_edma_chan *fsl_chan)
+{
+ unsigned int ch_err;
+ u32 val;
+
+ scoped_guard(spinlock, &fsl_chan->vchan.lock) {
+ ch_err = edma_readl_chreg(fsl_chan, ch_es);
+ if (!(ch_err & EDMA_V3_CH_ERR))
+ return;
+
+ edma_writel_chreg(fsl_chan, EDMA_V3_CH_ERR, ch_es);
+ val = edma_readl_chreg(fsl_chan, ch_csr);
+ val &= ~EDMA_V3_CH_CSR_ERQ;
+ edma_writel_chreg(fsl_chan, val, ch_csr);
+ }
+
+ /* Ignore this interrupt since channel has been disabled already */
+ if (!fsl_chan->edesc)
+ return;
+
+ if (ch_err & EDMA_V3_CH_ERR_DBE)
+ dev_err(&fsl_chan->pdev->dev, "Destination Bus Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_SBE)
+ dev_err(&fsl_chan->pdev->dev, "Source Bus Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_SGE)
+ dev_err(&fsl_chan->pdev->dev, "Scatter/Gather Configuration Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_NCE)
+ dev_err(&fsl_chan->pdev->dev, "NBYTES/CITER Configuration Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_DOE)
+ dev_err(&fsl_chan->pdev->dev, "Destination Offset Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_DAE)
+ dev_err(&fsl_chan->pdev->dev, "Destination Address Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_SOE)
+ dev_err(&fsl_chan->pdev->dev, "Source Offset Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_SAE)
+ dev_err(&fsl_chan->pdev->dev, "Source Address Error interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_ECX)
+ dev_err(&fsl_chan->pdev->dev, "Transfer Canceled interrupt.\n");
+
+ if (ch_err & EDMA_V3_CH_ERR_UCE)
+ dev_err(&fsl_chan->pdev->dev, "Uncorrectable TCD error during channel execution interrupt.\n");
+
+ fsl_chan->status = DMA_ERROR;
+}
+
+static irqreturn_t fsl_edma3_err_handler_per_chan(int irq, void *dev_id)
+{
+ struct fsl_edma_chan *fsl_chan = dev_id;
+
+ fsl_edma3_err_check(fsl_chan);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_edma3_err_handler_shared(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *fsl_edma = dev_id;
+ unsigned int ch;
+
+ for (ch = 0; ch < fsl_edma->n_chans; ch++) {
+ if (fsl_edma->chan_masked & BIT(ch))
+ continue;
+
+ fsl_edma3_err_check(&fsl_edma->chans[ch]);
+ }
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
{
struct fsl_edma_chan *fsl_chan = dev_id;
@@ -57,7 +134,7 @@ static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
intr = edma_readl_chreg(fsl_chan, ch_int);
if (!intr)
- return IRQ_HANDLED;
+ return IRQ_NONE;
edma_writel_chreg(fsl_chan, 1, ch_int);
@@ -309,7 +386,8 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
- int i;
+ char *errirq_name;
+ int i, ret;
for (i = 0; i < fsl_edma->n_chans; i++) {
@@ -324,6 +402,27 @@ static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engi
return -EINVAL;
fsl_chan->irq_handler = fsl_edma3_tx_handler;
+
+ if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_ERRIRQ_SHARE)) {
+ fsl_chan->errirq = fsl_chan->txirq;
+ fsl_chan->errirq_handler = fsl_edma3_err_handler_per_chan;
+ }
+ }
+
+ /* All channel err use one irq number */
+ if (fsl_edma->drvdata->flags & FSL_EDMA_DRV_ERRIRQ_SHARE) {
+ /* last one is error irq */
+ fsl_edma->errirq = platform_get_irq_optional(pdev, fsl_edma->n_chans);
+ if (fsl_edma->errirq < 0)
+ return 0; /* dts miss err irq, treat as no err irq case */
+
+ errirq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s-err",
+ dev_name(&pdev->dev));
+
+ ret = devm_request_irq(&pdev->dev, fsl_edma->errirq, fsl_edma3_err_handler_shared,
+ 0, errirq_name, fsl_edma);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Can't register eDMA err IRQ.\n");
}
return 0;
@@ -464,7 +563,8 @@ static struct fsl_edma_drvdata imx7ulp_data = {
};
static struct fsl_edma_drvdata imx8qm_data = {
- .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE,
+ .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE
+ | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x10000,
.chreg_off = 0x10000,
.setup_irq = fsl_edma3_irq_init,
@@ -481,14 +581,15 @@ static struct fsl_edma_drvdata imx8ulp_data = {
};
static struct fsl_edma_drvdata imx93_data3 = {
- .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
+ .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x10000,
.chreg_off = 0x10000,
.setup_irq = fsl_edma3_irq_init,
};
static struct fsl_edma_drvdata imx93_data4 = {
- .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4,
+ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4
+ | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x8000,
.chreg_off = 0x10000,
.mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
@@ -498,7 +599,7 @@ static struct fsl_edma_drvdata imx93_data4 = {
static struct fsl_edma_drvdata imx95_data5 = {
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4 |
- FSL_EDMA_DRV_TCD64,
+ FSL_EDMA_DRV_TCD64 | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x8000,
.chreg_off = 0x10000,
.mux_off = 0x200,
@@ -700,6 +801,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
snprintf(fsl_chan->chan_name, sizeof(fsl_chan->chan_name), "%s-CH%02d",
dev_name(&pdev->dev), i);
+ snprintf(fsl_chan->errirq_name, sizeof(fsl_chan->errirq_name),
+ "%s-CH%02d-err", dev_name(&pdev->dev), i);
+
fsl_chan->edma = fsl_edma;
fsl_chan->pm_state = RUNNING;
fsl_chan->srcid = 0;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index b5e7d18b9766..9b126a260267 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1226,6 +1226,8 @@ static int fsldma_of_probe(struct platform_device *op)
fdev->dev = &op->dev;
INIT_LIST_HEAD(&fdev->common.channels);
+ /* The DMA address bits supported for this device. */
+ fdev->addr_bits = (long)device_get_match_data(fdev->dev);
/* ioremap the registers for use */
fdev->regs = of_iomap(op->dev.of_node, 0);
@@ -1254,7 +1256,7 @@ static int fsldma_of_probe(struct platform_device *op)
fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
- dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
+ dma_set_mask(&(op->dev), DMA_BIT_MASK(fdev->addr_bits));
platform_set_drvdata(op, fdev);
@@ -1387,10 +1389,20 @@ static const struct dev_pm_ops fsldma_pm_ops = {
};
#endif
+/* The .data field is used for dma-bit-mask. */
static const struct of_device_id fsldma_of_ids[] = {
- { .compatible = "fsl,elo3-dma", },
- { .compatible = "fsl,eloplus-dma", },
- { .compatible = "fsl,elo-dma", },
+ {
+ .compatible = "fsl,elo3-dma",
+ .data = (void *)40,
+ },
+ {
+ .compatible = "fsl,eloplus-dma",
+ .data = (void *)36,
+ },
+ {
+ .compatible = "fsl,elo-dma",
+ .data = (void *)32,
+ },
{}
};
MODULE_DEVICE_TABLE(of, fsldma_of_ids);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 308bed0a560a..d7b7a3138b85 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -124,6 +124,7 @@ struct fsldma_device {
struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
u32 feature; /* The same as DMA channels */
int irq; /* Channel IRQ */
+ int addr_bits; /* DMA addressing bits supported */
};
/* Define macros for fsldma_chan->feature property */
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index ff94ee892339..7e4715f92773 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -222,7 +222,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
struct idxd_wq *wq;
struct device *dev, *fdev;
int rc = 0;
- struct iommu_sva *sva;
+ struct iommu_sva *sva = NULL;
unsigned int pasid;
struct idxd_cdev *idxd_cdev;
@@ -317,7 +317,7 @@ failed_set_pasid:
if (device_user_pasid_enabled(idxd))
idxd_xa_pasid_remove(ctx);
failed_get_pasid:
- if (device_user_pasid_enabled(idxd))
+ if (device_user_pasid_enabled(idxd) && !IS_ERR_OR_NULL(sva))
iommu_sva_unbind_device(sva);
failed:
mutex_unlock(&wq->wq_lock);
@@ -349,7 +349,9 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
set_bit(h, evl->bmap);
h = (h + 1) % size;
}
- drain_workqueue(wq->wq);
+ if (wq->wq)
+ drain_workqueue(wq->wq);
+
mutex_unlock(&evl->lock);
}
@@ -407,6 +409,9 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (current->mm != ctx->mm)
+ return -EPERM;
+
rc = check_vma(wq, vma, __func__);
if (rc < 0)
return rc;
@@ -439,10 +444,12 @@ static int idxd_submit_user_descriptor(struct idxd_user_context *ctx,
* DSA devices are capable of indirect ("batch") command submission.
* On devices where direct user submissions are not safe, we cannot
* allow this since there is no good way for us to verify these
- * indirect commands.
+ * indirect commands. Narrow the restriction of operations with the
+ * BATCH opcode to only DSA version 1 devices.
*/
if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH &&
- !wq->idxd->user_submission_safe)
+ wq->idxd->hw.version == DEVICE_VERSION_1 &&
+ !wq->idxd->user_submission_safe)
return -EINVAL;
/*
* As per the programming specification, the completion address must be
@@ -473,6 +480,9 @@ static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t
ssize_t written = 0;
int i;
+ if (current->mm != ctx->mm)
+ return -EPERM;
+
for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
int rc = idxd_submit_user_descriptor(ctx, udesc + i);
@@ -493,6 +503,9 @@ static __poll_t idxd_cdev_poll(struct file *filp,
struct idxd_device *idxd = wq->idxd;
__poll_t out = 0;
+ if (current->mm != ctx->mm)
+ return POLLNVAL;
+
poll_wait(filp, &wq->err_queue, wait);
spin_lock(&idxd->dev_lock);
if (idxd->sw_err.valid)
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 214b8039439f..74e6695881e6 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -19,7 +19,6 @@
#define IDXD_DRIVER_VERSION "1.00"
-extern struct kmem_cache *idxd_desc_pool;
extern bool tc_override;
struct idxd_wq;
@@ -171,7 +170,6 @@ struct idxd_cdev {
#define DRIVER_NAME_SIZE 128
-#define IDXD_ALLOCATED_BATCH_SIZE 128U
#define WQ_NAME_SIZE 1024
#define WQ_TYPE_SIZE 10
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index fca1d2924999..80355d03004d 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -155,6 +155,25 @@ static void idxd_cleanup_interrupts(struct idxd_device *idxd)
pci_free_irq_vectors(pdev);
}
+static void idxd_clean_wqs(struct idxd_device *idxd)
+{
+ struct idxd_wq *wq;
+ struct device *conf_dev;
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ wq = idxd->wqs[i];
+ if (idxd->hw.wq_cap.op_config)
+ bitmap_free(wq->opcap_bmap);
+ kfree(wq->wqcfg);
+ conf_dev = wq_confdev(wq);
+ put_device(conf_dev);
+ kfree(wq);
+ }
+ bitmap_free(idxd->wq_enable_map);
+ kfree(idxd->wqs);
+}
+
static int idxd_setup_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -169,8 +188,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
if (!idxd->wq_enable_map) {
- kfree(idxd->wqs);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_bitmap;
}
for (i = 0; i < idxd->max_wqs; i++) {
@@ -189,10 +208,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
conf_dev->bus = &dsa_bus_type;
conf_dev->type = &idxd_wq_device_type;
rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
- if (rc < 0) {
- put_device(conf_dev);
+ if (rc < 0)
goto err;
- }
mutex_init(&wq->wq_lock);
init_waitqueue_head(&wq->err_queue);
@@ -203,7 +220,6 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg) {
- put_device(conf_dev);
rc = -ENOMEM;
goto err;
}
@@ -211,9 +227,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
if (idxd->hw.wq_cap.op_config) {
wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
if (!wq->opcap_bmap) {
- put_device(conf_dev);
rc = -ENOMEM;
- goto err;
+ goto err_opcap_bmap;
}
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
}
@@ -224,15 +239,46 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
return 0;
- err:
+err_opcap_bmap:
+ kfree(wq->wqcfg);
+
+err:
+ put_device(conf_dev);
+ kfree(wq);
+
while (--i >= 0) {
wq = idxd->wqs[i];
+ if (idxd->hw.wq_cap.op_config)
+ bitmap_free(wq->opcap_bmap);
+ kfree(wq->wqcfg);
conf_dev = wq_confdev(wq);
put_device(conf_dev);
+ kfree(wq);
+
}
+ bitmap_free(idxd->wq_enable_map);
+
+err_bitmap:
+ kfree(idxd->wqs);
+
return rc;
}
+static void idxd_clean_engines(struct idxd_device *idxd)
+{
+ struct idxd_engine *engine;
+ struct device *conf_dev;
+ int i;
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ engine = idxd->engines[i];
+ conf_dev = engine_confdev(engine);
+ put_device(conf_dev);
+ kfree(engine);
+ }
+ kfree(idxd->engines);
+}
+
static int idxd_setup_engines(struct idxd_device *idxd)
{
struct idxd_engine *engine;
@@ -263,6 +309,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
if (rc < 0) {
put_device(conf_dev);
+ kfree(engine);
goto err;
}
@@ -276,10 +323,26 @@ static int idxd_setup_engines(struct idxd_device *idxd)
engine = idxd->engines[i];
conf_dev = engine_confdev(engine);
put_device(conf_dev);
+ kfree(engine);
}
+ kfree(idxd->engines);
+
return rc;
}
+static void idxd_clean_groups(struct idxd_device *idxd)
+{
+ struct idxd_group *group;
+ int i;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = idxd->groups[i];
+ put_device(group_confdev(group));
+ kfree(group);
+ }
+ kfree(idxd->groups);
+}
+
static int idxd_setup_groups(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -310,6 +373,7 @@ static int idxd_setup_groups(struct idxd_device *idxd)
rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
if (rc < 0) {
put_device(conf_dev);
+ kfree(group);
goto err;
}
@@ -334,20 +398,18 @@ static int idxd_setup_groups(struct idxd_device *idxd)
while (--i >= 0) {
group = idxd->groups[i];
put_device(group_confdev(group));
+ kfree(group);
}
+ kfree(idxd->groups);
+
return rc;
}
static void idxd_cleanup_internals(struct idxd_device *idxd)
{
- int i;
-
- for (i = 0; i < idxd->max_groups; i++)
- put_device(group_confdev(idxd->groups[i]));
- for (i = 0; i < idxd->max_engines; i++)
- put_device(engine_confdev(idxd->engines[i]));
- for (i = 0; i < idxd->max_wqs; i++)
- put_device(wq_confdev(idxd->wqs[i]));
+ idxd_clean_groups(idxd);
+ idxd_clean_engines(idxd);
+ idxd_clean_wqs(idxd);
destroy_workqueue(idxd->wq);
}
@@ -390,7 +452,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
static int idxd_setup_internals(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
- int rc, i;
+ int rc;
init_waitqueue_head(&idxd->cmd_waitq);
@@ -421,14 +483,11 @@ static int idxd_setup_internals(struct idxd_device *idxd)
err_evl:
destroy_workqueue(idxd->wq);
err_wkq_create:
- for (i = 0; i < idxd->max_groups; i++)
- put_device(group_confdev(idxd->groups[i]));
+ idxd_clean_groups(idxd);
err_group:
- for (i = 0; i < idxd->max_engines; i++)
- put_device(engine_confdev(idxd->engines[i]));
+ idxd_clean_engines(idxd);
err_engine:
- for (i = 0; i < idxd->max_wqs; i++)
- put_device(wq_confdev(idxd->wqs[i]));
+ idxd_clean_wqs(idxd);
err_wqs:
return rc;
}
@@ -528,6 +587,17 @@ static void idxd_read_caps(struct idxd_device *idxd)
idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET);
}
+static void idxd_free(struct idxd_device *idxd)
+{
+ if (!idxd)
+ return;
+
+ put_device(idxd_confdev(idxd));
+ bitmap_free(idxd->opcap_bmap);
+ ida_free(&idxd_ida, idxd->id);
+ kfree(idxd);
+}
+
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
{
struct device *dev = &pdev->dev;
@@ -545,28 +615,34 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
if (idxd->id < 0)
- return NULL;
+ goto err_ida;
idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev));
- if (!idxd->opcap_bmap) {
- ida_free(&idxd_ida, idxd->id);
- return NULL;
- }
+ if (!idxd->opcap_bmap)
+ goto err_opcap;
device_initialize(conf_dev);
conf_dev->parent = dev;
conf_dev->bus = &dsa_bus_type;
conf_dev->type = idxd->data->dev_type;
rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
- if (rc < 0) {
- put_device(conf_dev);
- return NULL;
- }
+ if (rc < 0)
+ goto err_name;
spin_lock_init(&idxd->dev_lock);
spin_lock_init(&idxd->cmd_lock);
return idxd;
+
+err_name:
+ put_device(conf_dev);
+ bitmap_free(idxd->opcap_bmap);
+err_opcap:
+ ida_free(&idxd_ida, idxd->id);
+err_ida:
+ kfree(idxd);
+
+ return NULL;
}
static int idxd_enable_system_pasid(struct idxd_device *idxd)
@@ -626,27 +702,6 @@ static void idxd_disable_system_pasid(struct idxd_device *idxd)
idxd->pasid = IOMMU_PASID_INVALID;
}
-static int idxd_enable_sva(struct pci_dev *pdev)
-{
- int ret;
-
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
- if (ret)
- return ret;
-
- ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- if (ret)
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
-
- return ret;
-}
-
-static void idxd_disable_sva(struct pci_dev *pdev)
-{
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF);
-}
-
static int idxd_probe(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
@@ -661,17 +716,13 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
- if (idxd_enable_sva(pdev)) {
- dev_warn(dev, "Unable to turn on user SVA feature.\n");
- } else {
- set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
+ set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
- rc = idxd_enable_system_pasid(idxd);
- if (rc)
- dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
- else
- set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
- }
+ rc = idxd_enable_system_pasid(idxd);
+ if (rc)
+ dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
+ else
+ set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
} else if (!sva) {
dev_warn(dev, "User forced SVA off via module param.\n");
}
@@ -709,8 +760,6 @@ static int idxd_probe(struct idxd_device *idxd)
err:
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- if (device_user_pasid_enabled(idxd))
- idxd_disable_sva(pdev);
return rc;
}
@@ -721,8 +770,6 @@ static void idxd_cleanup(struct idxd_device *idxd)
idxd_cleanup_internals(idxd);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- if (device_user_pasid_enabled(idxd))
- idxd_disable_sva(idxd->pdev);
}
/*
@@ -1190,7 +1237,7 @@ int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
err:
pci_iounmap(pdev, idxd->reg_base);
err_iomap:
- put_device(idxd_confdev(idxd));
+ idxd_free(idxd);
err_idxd_alloc:
pci_disable_device(pdev);
return rc;
@@ -1232,7 +1279,6 @@ static void idxd_shutdown(struct pci_dev *pdev)
static void idxd_remove(struct pci_dev *pdev)
{
struct idxd_device *idxd = pci_get_drvdata(pdev);
- struct idxd_irq_entry *irq_entry;
idxd_unregister_devices(idxd);
/*
@@ -1245,20 +1291,12 @@ static void idxd_remove(struct pci_dev *pdev)
get_device(idxd_confdev(idxd));
device_unregister(idxd_confdev(idxd));
idxd_shutdown(pdev);
- if (device_pasid_enabled(idxd))
- idxd_disable_system_pasid(idxd);
idxd_device_remove_debugfs(idxd);
-
- irq_entry = idxd_get_ie(idxd, 0);
- free_irq(irq_entry->vector, irq_entry);
- pci_free_irq_vectors(pdev);
+ idxd_cleanup(idxd);
pci_iounmap(pdev, idxd->reg_base);
- if (device_user_pasid_enabled(idxd))
- idxd_disable_sva(pdev);
- pci_disable_device(pdev);
- destroy_workqueue(idxd->wq);
- perfmon_pmu_remove(idxd);
put_device(idxd_confdev(idxd));
+ idxd_free(idxd);
+ pci_disable_device(pdev);
}
static struct pci_driver idxd_pci_driver = {
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 6af493f6ba77..9f0701021af0 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1208,9 +1208,11 @@ static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long *
/* On systems where direct user submissions are not safe, we need to clear out
* the BATCH capability from the capability mask in sysfs since we cannot support
- * that command on such systems.
+ * that command on such systems. Narrow the restriction of operations with the
+ * BATCH opcode to only DSA version 1 devices.
*/
- if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe)
+ if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe &&
+ confdev_to_idxd(dev)->hw.version == DEVICE_VERSION_1)
clear_bit(DSA_OPCODE_BATCH % 64, &val);
pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index b96cc0a83872..ba434657059a 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -337,7 +337,8 @@ static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
static void imxdma_watchdog(struct timer_list *t)
{
- struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog);
+ struct imxdma_channel *imxdmac = timer_container_of(imxdmac, t,
+ watchdog);
struct imxdma_engine *imxdma = imxdmac->imxdma;
int channel = imxdmac->channel;
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index c9aba2304de7..5d3c0ae6b342 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -10,7 +10,7 @@
#include <linux/interrupt.h>
#include <linux/dca.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
/* either a kernel change is needed, or we need something like this in kernel */
#ifndef CONFIG_SMP
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 06a813cc7641..b8fff8333aef 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -901,7 +901,8 @@ static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
void ioat_timer_event(struct timer_list *t)
{
- struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
+ struct ioatdma_chan *ioat_chan = timer_container_of(ioat_chan, t,
+ timer);
dma_addr_t phys_complete;
u64 status;
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index d5ddb4e30e71..47c8adfdc155 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -420,15 +420,11 @@ static struct virt_dma_desc *mtk_cqdma_find_active_desc(struct dma_chan *c,
{
struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c);
struct virt_dma_desc *vd;
- unsigned long flags;
- spin_lock_irqsave(&cvc->pc->lock, flags);
list_for_each_entry(vd, &cvc->pc->queue, node)
if (vd->tx.cookie == cookie) {
- spin_unlock_irqrestore(&cvc->pc->lock, flags);
return vd;
}
- spin_unlock_irqrestore(&cvc->pc->lock, flags);
list_for_each_entry(vd, &cvc->vc.desc_issued, node)
if (vd->tx.cookie == cookie)
@@ -452,9 +448,11 @@ static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c,
if (ret == DMA_COMPLETE || !txstate)
return ret;
+ spin_lock_irqsave(&cvc->pc->lock, flags);
spin_lock_irqsave(&cvc->vc.lock, flags);
vd = mtk_cqdma_find_active_desc(c, cookie);
spin_unlock_irqrestore(&cvc->vc.lock, flags);
+ spin_unlock_irqrestore(&cvc->pc->lock, flags);
if (vd) {
cvd = to_cqdma_vdesc(vd);
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index 9235db551026..1f687b08d6b8 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -14,6 +14,7 @@
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
+#include <linux/irqchip/irq-renesas-rzv2h.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -89,8 +90,14 @@ struct rz_dmac_chan {
#define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan)
+struct rz_dmac_icu {
+ struct platform_device *pdev;
+ u8 dmac_index;
+};
+
struct rz_dmac {
struct dma_device engine;
+ struct rz_dmac_icu icu;
struct device *dev;
struct reset_control *rstc;
void __iomem *base;
@@ -99,6 +106,8 @@ struct rz_dmac {
unsigned int n_channels;
struct rz_dmac_chan *channels;
+ bool has_icu;
+
DECLARE_BITMAP(modules, 1024);
};
@@ -167,6 +176,9 @@ struct rz_dmac {
#define RZ_DMAC_MAX_CHANNELS 16
#define DMAC_NR_LMDESC 64
+/* RZ/V2H ICU related */
+#define RZV2H_MAX_DMAC_INDEX 4
+
/*
* -----------------------------------------------------------------------------
* Device access
@@ -324,7 +336,13 @@ static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel)
lmdesc->chext = 0;
lmdesc->header = HEADER_LV;
- rz_dmac_set_dmars_register(dmac, channel->index, 0);
+ if (dmac->has_icu) {
+ rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
+ channel->index,
+ RZV2H_ICU_DMAC_REQ_NO_DEFAULT);
+ } else {
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+ }
channel->chcfg = chcfg;
channel->chctrl = CHCTRL_STG | CHCTRL_SETEN;
@@ -375,7 +393,13 @@ static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel)
channel->lmdesc.tail = lmdesc;
- rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
+ if (dmac->has_icu) {
+ rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
+ channel->index, channel->mid_rid);
+ } else {
+ rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
+ }
+
channel->chctrl = CHCTRL_SETEN;
}
@@ -647,7 +671,13 @@ static void rz_dmac_device_synchronize(struct dma_chan *chan)
if (ret < 0)
dev_warn(dmac->dev, "DMA Timeout");
- rz_dmac_set_dmars_register(dmac, channel->index, 0);
+ if (dmac->has_icu) {
+ rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
+ channel->index,
+ RZV2H_ICU_DMAC_REQ_NO_DEFAULT);
+ } else {
+ rz_dmac_set_dmars_register(dmac, channel->index, 0);
+ }
}
/*
@@ -748,7 +778,8 @@ static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- return dma_request_channel(mask, rz_dmac_chan_filter, dma_spec);
+ return __dma_request_channel(&mask, rz_dmac_chan_filter, dma_spec,
+ ofdma->of_node);
}
/*
@@ -823,6 +854,38 @@ static int rz_dmac_chan_probe(struct rz_dmac *dmac,
return 0;
}
+static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac)
+{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ uint32_t dmac_index;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, "renesas,icu", 1, 0, &args);
+ if (ret == -ENOENT)
+ return 0;
+ if (ret)
+ return ret;
+
+ dmac->has_icu = true;
+
+ dmac->icu.pdev = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+ if (!dmac->icu.pdev) {
+ dev_err(dev, "ICU device not found.\n");
+ return -ENODEV;
+ }
+
+ dmac_index = args.args[0];
+ if (dmac_index > RZV2H_MAX_DMAC_INDEX) {
+ dev_err(dev, "DMAC index %u invalid.\n", dmac_index);
+ return -EINVAL;
+ }
+ dmac->icu.dmac_index = dmac_index;
+
+ return 0;
+}
+
static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
{
struct device_node *np = dev->of_node;
@@ -839,7 +902,7 @@ static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
return -EINVAL;
}
- return 0;
+ return rz_dmac_parse_of_icu(dev, dmac);
}
static int rz_dmac_probe(struct platform_device *pdev)
@@ -873,9 +936,11 @@ static int rz_dmac_probe(struct platform_device *pdev)
if (IS_ERR(dmac->base))
return PTR_ERR(dmac->base);
- dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(dmac->ext_base))
- return PTR_ERR(dmac->ext_base);
+ if (!dmac->has_icu) {
+ dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(dmac->ext_base))
+ return PTR_ERR(dmac->ext_base);
+ }
/* Register interrupt handler for error */
irq = platform_get_irq_byname(pdev, irqname);
@@ -990,9 +1055,12 @@ static void rz_dmac_remove(struct platform_device *pdev)
reset_control_assert(dmac->rstc);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+
+ platform_device_put(dmac->icu.pdev);
}
static const struct of_device_id of_rz_dmac_match[] = {
+ { .compatible = "renesas,r9a09g057-dmac", },
{ .compatible = "renesas,rz-dmac", },
{ /* Sentinel */ }
};
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index ce80ac4b1a1b..fad896ff29a2 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -27,10 +27,10 @@
#define ADMA_CH_INT_CLEAR 0x1c
#define ADMA_CH_CTRL 0x24
-#define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12)
+#define ADMA_CH_CTRL_DIR(val, mask, shift) (((val) & (mask)) << (shift))
#define ADMA_CH_CTRL_DIR_AHUB2MEM 2
#define ADMA_CH_CTRL_DIR_MEM2AHUB 4
-#define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8)
+#define ADMA_CH_CTRL_MODE_CONTINUOUS(shift) (2 << (shift))
#define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1)
#define ADMA_CH_CTRL_XFER_PAUSE_SHIFT 0
@@ -41,15 +41,27 @@
#define ADMA_CH_CONFIG_MAX_BURST_SIZE 16
#define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf)
#define ADMA_CH_CONFIG_MAX_BUFS 8
-#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
+#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) ((reqs) << 4)
+
+#define ADMA_GLOBAL_CH_CONFIG 0x400
+#define ADMA_GLOBAL_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0x7)
+#define ADMA_GLOBAL_CH_CONFIG_OUTSTANDING_REQS(reqs) ((reqs) << 8)
#define TEGRA186_ADMA_GLOBAL_PAGE_CHGRP 0x30
#define TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ 0x70
#define TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ 0x84
+#define TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0 0x44
+#define TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1 0x48
+#define TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0 0x100
+#define TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1 0x104
+#define TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0 0x180
+#define TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1 0x184
+#define TEGRA264_ADMA_GLOBAL_PAGE_OFFSET 0x8
#define ADMA_CH_FIFO_CTRL 0x2c
#define ADMA_CH_TX_FIFO_SIZE_SHIFT 8
#define ADMA_CH_RX_FIFO_SIZE_SHIFT 0
+#define ADMA_GLOBAL_CH_FIFO_CTRL 0x300
#define ADMA_CH_LOWER_SRC_ADDR 0x34
#define ADMA_CH_LOWER_TRG_ADDR 0x3c
@@ -73,36 +85,48 @@ struct tegra_adma;
* @adma_get_burst_config: Function callback used to set DMA burst size.
* @global_reg_offset: Register offset of DMA global register.
* @global_int_clear: Register offset of DMA global interrupt clear.
+ * @global_ch_fifo_base: Global channel fifo ctrl base offset
+ * @global_ch_config_base: Global channel config base offset
* @ch_req_tx_shift: Register offset for AHUB transmit channel select.
* @ch_req_rx_shift: Register offset for AHUB receive channel select.
+ * @ch_dir_shift: Channel direction bit position.
+ * @ch_mode_shift: Channel mode bit position.
* @ch_base_offset: Register offset of DMA channel registers.
+ * @ch_tc_offset_diff: From TC register onwards offset differs for Tegra264
* @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
+ * @ch_config: Outstanding and WRR config values
* @ch_req_mask: Mask for Tx or Rx channel select.
+ * @ch_dir_mask: Mask for channel direction.
* @ch_req_max: Maximum number of Tx or Rx channels available.
* @ch_reg_size: Size of DMA channel register space.
* @nr_channels: Number of DMA channels available.
* @ch_fifo_size_mask: Mask for FIFO size field.
* @sreq_index_offset: Slave channel index offset.
* @max_page: Maximum ADMA Channel Page.
- * @has_outstanding_reqs: If DMA channel can have outstanding requests.
* @set_global_pg_config: Global page programming.
*/
struct tegra_adma_chip_data {
unsigned int (*adma_get_burst_config)(unsigned int burst_size);
unsigned int global_reg_offset;
unsigned int global_int_clear;
+ unsigned int global_ch_fifo_base;
+ unsigned int global_ch_config_base;
unsigned int ch_req_tx_shift;
unsigned int ch_req_rx_shift;
+ unsigned int ch_dir_shift;
+ unsigned int ch_mode_shift;
unsigned int ch_base_offset;
+ unsigned int ch_tc_offset_diff;
unsigned int ch_fifo_ctrl;
+ unsigned int ch_config;
unsigned int ch_req_mask;
+ unsigned int ch_dir_mask;
unsigned int ch_req_max;
unsigned int ch_reg_size;
unsigned int nr_channels;
unsigned int ch_fifo_size_mask;
unsigned int sreq_index_offset;
unsigned int max_page;
- bool has_outstanding_reqs;
void (*set_global_pg_config)(struct tegra_adma *tdma);
};
@@ -112,6 +136,7 @@ struct tegra_adma_chip_data {
struct tegra_adma_chan_regs {
unsigned int ctrl;
unsigned int config;
+ unsigned int global_config;
unsigned int src_addr;
unsigned int trg_addr;
unsigned int fifo_ctrl;
@@ -150,6 +175,9 @@ struct tegra_adma_chan {
/* Transfer count and position info */
unsigned int tx_buf_count;
unsigned int tx_buf_pos;
+
+ unsigned int global_ch_fifo_offset;
+ unsigned int global_ch_config_offset;
};
/*
@@ -246,6 +274,29 @@ static void tegra186_adma_global_page_config(struct tegra_adma *tdma)
tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ + (tdma->ch_page_no * 0x4), 0xffffff);
}
+static void tegra264_adma_global_page_config(struct tegra_adma *tdma)
+{
+ u32 global_page_offset = tdma->ch_page_no * TEGRA264_ADMA_GLOBAL_PAGE_OFFSET;
+
+ /* If the default page (page1) is not used, then clear page1 registers */
+ if (tdma->ch_page_no) {
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0, 0);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1, 0);
+ }
+
+ /* Program global registers for selected page */
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0 + global_page_offset, 0xffffffff);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1 + global_page_offset, 0xffffffff);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0 + global_page_offset, 0xffffffff);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1 + global_page_offset, 0x1);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0 + global_page_offset, 0xffffffff);
+ tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1 + global_page_offset, 0x1);
+}
+
static int tegra_adma_init(struct tegra_adma *tdma)
{
u32 status;
@@ -404,11 +455,21 @@ static void tegra_adma_start(struct tegra_adma_chan *tdc)
tdc->tx_buf_pos = 0;
tdc->tx_buf_count = 0;
- tdma_ch_write(tdc, ADMA_CH_TC, ch_regs->tc);
+ tdma_ch_write(tdc, ADMA_CH_TC - tdc->tdma->cdata->ch_tc_offset_diff, ch_regs->tc);
tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl);
- tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_regs->src_addr);
- tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_regs->trg_addr);
- tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR - tdc->tdma->cdata->ch_tc_offset_diff,
+ ch_regs->src_addr);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR - tdc->tdma->cdata->ch_tc_offset_diff,
+ ch_regs->trg_addr);
+
+ if (!tdc->tdma->cdata->global_ch_fifo_base)
+ tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl);
+ else if (tdc->global_ch_fifo_offset)
+ tdma_write(tdc->tdma, tdc->global_ch_fifo_offset, ch_regs->fifo_ctrl);
+
+ if (tdc->global_ch_config_offset)
+ tdma_write(tdc->tdma, tdc->global_ch_config_offset, ch_regs->global_config);
+
tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_regs->config);
/* Start ADMA */
@@ -421,7 +482,8 @@ static unsigned int tegra_adma_get_residue(struct tegra_adma_chan *tdc)
{
struct tegra_adma_desc *desc = tdc->desc;
unsigned int max = ADMA_CH_XFER_STATUS_COUNT_MASK + 1;
- unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS);
+ unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS -
+ tdc->tdma->cdata->ch_tc_offset_diff);
unsigned int periods_remaining;
/*
@@ -627,13 +689,16 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
return -EINVAL;
}
- ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir) |
- ADMA_CH_CTRL_MODE_CONTINUOUS |
+ ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir, cdata->ch_dir_mask,
+ cdata->ch_dir_shift) |
+ ADMA_CH_CTRL_MODE_CONTINUOUS(cdata->ch_mode_shift) |
ADMA_CH_CTRL_FLOWCTRL_EN;
ch_regs->config |= cdata->adma_get_burst_config(burst_size);
- ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
- if (cdata->has_outstanding_reqs)
- ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
+
+ if (cdata->global_ch_config_base)
+ ch_regs->global_config |= cdata->ch_config;
+ else
+ ch_regs->config |= cdata->ch_config;
/*
* 'sreq_index' represents the current ADMAIF channel number and as per
@@ -788,12 +853,23 @@ static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
/* skip if channel is not active */
if (!ch_reg->cmd)
continue;
- ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC);
- ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR);
- ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR);
+ ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC - tdma->cdata->ch_tc_offset_diff);
+ ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR -
+ tdma->cdata->ch_tc_offset_diff);
+ ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR -
+ tdma->cdata->ch_tc_offset_diff);
ch_reg->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL);
- ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL);
+
+ if (tdc->global_ch_config_offset)
+ ch_reg->global_config = tdma_read(tdc->tdma, tdc->global_ch_config_offset);
+
+ if (!tdc->tdma->cdata->global_ch_fifo_base)
+ ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL);
+ else if (tdc->global_ch_fifo_offset)
+ ch_reg->fifo_ctrl = tdma_read(tdc->tdma, tdc->global_ch_fifo_offset);
+
ch_reg->config = tdma_ch_read(tdc, ADMA_CH_CONFIG);
+
}
clk_disable:
@@ -832,12 +908,23 @@ static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
/* skip if channel was not active earlier */
if (!ch_reg->cmd)
continue;
- tdma_ch_write(tdc, ADMA_CH_TC, ch_reg->tc);
- tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_reg->src_addr);
- tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_reg->trg_addr);
+ tdma_ch_write(tdc, ADMA_CH_TC - tdma->cdata->ch_tc_offset_diff, ch_reg->tc);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR - tdma->cdata->ch_tc_offset_diff,
+ ch_reg->src_addr);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR - tdma->cdata->ch_tc_offset_diff,
+ ch_reg->trg_addr);
tdma_ch_write(tdc, ADMA_CH_CTRL, ch_reg->ctrl);
- tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl);
+
+ if (!tdc->tdma->cdata->global_ch_fifo_base)
+ tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl);
+ else if (tdc->global_ch_fifo_offset)
+ tdma_write(tdc->tdma, tdc->global_ch_fifo_offset, ch_reg->fifo_ctrl);
+
+ if (tdc->global_ch_config_offset)
+ tdma_write(tdc->tdma, tdc->global_ch_config_offset, ch_reg->global_config);
+
tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_reg->config);
+
tdma_ch_write(tdc, ADMA_CH_CMD, ch_reg->cmd);
}
@@ -848,17 +935,23 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
.adma_get_burst_config = tegra210_adma_get_burst_config,
.global_reg_offset = 0xc00,
.global_int_clear = 0x20,
+ .global_ch_fifo_base = 0,
+ .global_ch_config_base = 0,
.ch_req_tx_shift = 28,
.ch_req_rx_shift = 24,
+ .ch_dir_shift = 12,
+ .ch_mode_shift = 8,
.ch_base_offset = 0,
+ .ch_tc_offset_diff = 0,
+ .ch_config = ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1),
.ch_req_mask = 0xf,
+ .ch_dir_mask = 0xf,
.ch_req_max = 10,
.ch_reg_size = 0x80,
.nr_channels = 22,
.ch_fifo_size_mask = 0xf,
.sreq_index_offset = 2,
.max_page = 0,
- .has_outstanding_reqs = false,
.set_global_pg_config = NULL,
};
@@ -866,23 +959,56 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
.adma_get_burst_config = tegra186_adma_get_burst_config,
.global_reg_offset = 0,
.global_int_clear = 0x402c,
+ .global_ch_fifo_base = 0,
+ .global_ch_config_base = 0,
.ch_req_tx_shift = 27,
.ch_req_rx_shift = 22,
+ .ch_dir_shift = 12,
+ .ch_mode_shift = 8,
.ch_base_offset = 0x10000,
+ .ch_tc_offset_diff = 0,
+ .ch_config = ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1) |
+ TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8),
.ch_req_mask = 0x1f,
+ .ch_dir_mask = 0xf,
.ch_req_max = 20,
.ch_reg_size = 0x100,
.nr_channels = 32,
.ch_fifo_size_mask = 0x1f,
.sreq_index_offset = 4,
.max_page = 4,
- .has_outstanding_reqs = true,
.set_global_pg_config = tegra186_adma_global_page_config,
};
+static const struct tegra_adma_chip_data tegra264_chip_data = {
+ .adma_get_burst_config = tegra186_adma_get_burst_config,
+ .global_reg_offset = 0,
+ .global_int_clear = 0x800c,
+ .global_ch_fifo_base = ADMA_GLOBAL_CH_FIFO_CTRL,
+ .global_ch_config_base = ADMA_GLOBAL_CH_CONFIG,
+ .ch_req_tx_shift = 26,
+ .ch_req_rx_shift = 20,
+ .ch_dir_shift = 10,
+ .ch_mode_shift = 7,
+ .ch_base_offset = 0x10000,
+ .ch_tc_offset_diff = 4,
+ .ch_config = ADMA_GLOBAL_CH_CONFIG_WEIGHT_FOR_WRR(1) |
+ ADMA_GLOBAL_CH_CONFIG_OUTSTANDING_REQS(8),
+ .ch_req_mask = 0x3f,
+ .ch_dir_mask = 7,
+ .ch_req_max = 32,
+ .ch_reg_size = 0x100,
+ .nr_channels = 64,
+ .ch_fifo_size_mask = 0x7f,
+ .sreq_index_offset = 0,
+ .max_page = 10,
+ .set_global_pg_config = tegra264_adma_global_page_config,
+};
+
static const struct of_device_id tegra_adma_of_match[] = {
{ .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data },
{ .compatible = "nvidia,tegra186-adma", .data = &tegra186_chip_data },
+ { .compatible = "nvidia,tegra264-adma", .data = &tegra264_chip_data },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_adma_of_match);
@@ -985,6 +1111,15 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdc->chan_addr = tdma->ch_base_addr + (cdata->ch_reg_size * i);
+ if (tdma->base_addr) {
+ if (cdata->global_ch_fifo_base)
+ tdc->global_ch_fifo_offset = cdata->global_ch_fifo_base + (4 * i);
+
+ if (cdata->global_ch_config_base)
+ tdc->global_ch_config_offset =
+ cdata->global_ch_config_base + (4 * i);
+ }
+
tdc->irq = of_irq_get(pdev->dev.of_node, i);
if (tdc->irq <= 0) {
ret = tdc->irq ?: -ENXIO;
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index b223a7aacb0c..aa2dc762140f 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -1091,8 +1091,11 @@ static void udma_check_tx_completion(struct work_struct *work)
u32 residue_diff;
ktime_t time_diff;
unsigned long delay;
+ unsigned long flags;
while (1) {
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
if (uc->desc) {
/* Get previous residue and time stamp */
residue_diff = uc->tx_drain.residue;
@@ -1127,6 +1130,8 @@ static void udma_check_tx_completion(struct work_struct *work)
break;
}
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+
usleep_range(ktime_to_us(delay),
ktime_to_us(delay) + 10);
continue;
@@ -1143,6 +1148,8 @@ static void udma_check_tx_completion(struct work_struct *work)
break;
}
+
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
}
static irqreturn_t udma_ring_irq_handler(int irq, void *data)
@@ -4246,7 +4253,6 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct udma_dev *ud = ofdma->of_dma_data;
- dma_cap_mask_t mask = ud->ddev.cap_mask;
struct udma_filter_param filter_param;
struct dma_chan *chan;
@@ -4278,7 +4284,7 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
}
}
- chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
+ chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param,
ofdma->of_node);
if (!chan) {
dev_err(ud->dev, "get channel fail in %s.\n", __func__);
@@ -5618,7 +5624,8 @@ static int udma_probe(struct platform_device *pdev)
uc->config.dir = DMA_MEM_TO_MEM;
uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
dev_name(dev), i);
-
+ if (!uc->name)
+ return -ENOMEM;
vchan_init(&uc->vc, &ud->ddev);
/* Use custom vchan completion handling */
tasklet_setup(&uc->vc.task, udma_vchan_complete);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 3ad44afd0e74..a34d8f0ceed8 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2909,6 +2909,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
return -EINVAL;
}
+ xdev->common.directions |= chan->direction;
+
/* Request the interrupt */
chan->irq = of_irq_get(node, chan->tdest);
if (chan->irq < 0)
@@ -3115,6 +3117,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
}
+ dma_set_max_seg_size(xdev->dev, xdev->max_buffer_len);
+
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
xdev->has_axistream_connected =
of_property_read_bool(node, "xlnx,axistream-connected");
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index dcd7008fe06b..cae52c654a15 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1746,9 +1746,9 @@ altr_edac_a10_device_trig(struct file *file, const char __user *user_buf,
local_irq_save(flags);
if (trig_type == ALTR_UE_TRIGGER_CHAR)
- writel(priv->ue_set_mask, set_addr);
+ writew(priv->ue_set_mask, set_addr);
else
- writel(priv->ce_set_mask, set_addr);
+ writew(priv->ce_set_mask, set_addr);
/* Ensure the interrupt test bits are set */
wmb();
@@ -1778,7 +1778,7 @@ altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf,
local_irq_save(flags);
if (trig_type == ALTR_UE_TRIGGER_CHAR) {
- writel(priv->ue_set_mask, set_addr);
+ writew(priv->ue_set_mask, set_addr);
} else {
/* Setup read/write of 4 bytes */
writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST);
@@ -2131,8 +2131,8 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
edac->irq_chip.name = pdev->dev.of_node->name;
edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
- edac->domain = irq_domain_add_linear(pdev->dev.of_node, 64,
- &a10_eccmgr_ic_ops, edac);
+ edac->domain = irq_domain_create_linear(of_fwnode_handle(pdev->dev.of_node),
+ 64, &a10_eccmgr_ic_ops, edac);
if (!edac->domain) {
dev_err(&pdev->dev, "Error adding IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 90f0eb7cc5b9..58b1482a0fbb 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2,8 +2,8 @@
#include <linux/ras.h>
#include <linux/string_choices.h>
#include "amd64_edac.h"
-#include <asm/amd_nb.h>
-#include <asm/amd_node.h>
+#include <asm/amd/nb.h>
+#include <asm/amd/node.h>
static struct edac_pci_ctl_info *pci_ctl;
@@ -2942,13 +2942,13 @@ static void dct_read_mc_regs(struct amd64_pvt *pvt)
* Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
* those are Read-As-Zero.
*/
- rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
+ rdmsrq(MSR_K8_TOP_MEM1, pvt->top_mem);
edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
/* Check first whether TOP_MEM2 is enabled: */
- rdmsrl(MSR_AMD64_SYSCFG, msr_val);
+ rdmsrq(MSR_AMD64_SYSCFG, msr_val);
if (msr_val & BIT(21)) {
- rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
+ rdmsrq(MSR_K8_TOP_MEM2, pvt->top_mem2);
edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
} else {
edac_dbg(0, " TOP_MEM2 disabled\n");
diff --git a/drivers/edac/bluefield_edac.c b/drivers/edac/bluefield_edac.c
index 4942a240c30f..ae3bb7afa103 100644
--- a/drivers/edac/bluefield_edac.c
+++ b/drivers/edac/bluefield_edac.c
@@ -199,8 +199,10 @@ static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
* error without the detailed information.
*/
err = bluefield_edac_readl(priv, MLXBF_SYNDROM, &dram_syndrom);
- if (err)
+ if (err) {
dev_err(priv->dev, "DRAM syndrom read failed.\n");
+ return;
+ }
serr = FIELD_GET(MLXBF_SYNDROM__SERR, dram_syndrom);
derr = FIELD_GET(MLXBF_SYNDROM__DERR, dram_syndrom);
@@ -213,20 +215,26 @@ static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
}
err = bluefield_edac_readl(priv, MLXBF_ADD_INFO, &dram_additional_info);
- if (err)
+ if (err) {
dev_err(priv->dev, "DRAM additional info read failed.\n");
+ return;
+ }
err_prank = FIELD_GET(MLXBF_ADD_INFO__ERR_PRANK, dram_additional_info);
ecc_dimm = (err_prank >= 2 && priv->dimm_ranks[0] <= 2) ? 1 : 0;
err = bluefield_edac_readl(priv, MLXBF_ERR_ADDR_0, &edea0);
- if (err)
+ if (err) {
dev_err(priv->dev, "Error addr 0 read failed.\n");
+ return;
+ }
err = bluefield_edac_readl(priv, MLXBF_ERR_ADDR_1, &edea1);
- if (err)
+ if (err) {
dev_err(priv->dev, "Error addr 1 read failed.\n");
+ return;
+ }
ecc_dimm_addr = ((u64)edea1 << 32) | edea0;
@@ -250,8 +258,10 @@ static void bluefield_edac_check(struct mem_ctl_info *mci)
return;
err = bluefield_edac_readl(priv, MLXBF_ECC_CNT, &ecc_count);
- if (err)
+ if (err) {
dev_err(priv->dev, "ECC count read failed.\n");
+ return;
+ }
single_error_count = FIELD_GET(MLXBF_ECC_CNT__SERR_CNT, ecc_count);
double_error_count = FIELD_GET(MLXBF_ECC_CNT__DERR_CNT, ecc_count);
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 355a977019e9..a3fca2567752 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -72,12 +72,6 @@
#define I10NM_SAD_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
#define I10NM_SAD_NM_CACHEABLE(reg) GET_BITFIELD(reg, 5, 5)
-#define RETRY_RD_ERR_LOG_UC BIT(1)
-#define RETRY_RD_ERR_LOG_NOOVER BIT(14)
-#define RETRY_RD_ERR_LOG_EN BIT(15)
-#define RETRY_RD_ERR_LOG_NOOVER_UC (BIT(14) | BIT(1))
-#define RETRY_RD_ERR_LOG_OVER_UC_V (BIT(2) | BIT(1) | BIT(0))
-
static struct list_head *i10nm_edac_list;
static struct res_config *res_cfg;
@@ -85,227 +79,319 @@ static int retry_rd_err_log;
static int decoding_via_mca;
static bool mem_cfg_2lm;
-static u32 offsets_scrub_icx[] = {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8};
-static u32 offsets_scrub_spr[] = {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8};
-static u32 offsets_scrub_spr_hbm0[] = {0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8};
-static u32 offsets_scrub_spr_hbm1[] = {0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8};
-static u32 offsets_demand_icx[] = {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0};
-static u32 offsets_demand_spr[] = {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0};
-static u32 offsets_demand2_spr[] = {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10};
-static u32 offsets_demand_spr_hbm0[] = {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0};
-static u32 offsets_demand_spr_hbm1[] = {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0};
-
-static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable,
- u32 *offsets_scrub, u32 *offsets_demand,
- u32 *offsets_demand2)
+static struct reg_rrl icx_reg_rrl_ddr = {
+ .set_num = 2,
+ .reg_num = 6,
+ .modes = {LRE_SCRUB, LRE_DEMAND},
+ .offsets = {
+ {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8},
+ {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0},
+ },
+ .widths = {4, 4, 4, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(13),
+ .noover_mask = BIT(14),
+ .en_mask = BIT(15),
+
+ .cecnt_num = 4,
+ .cecnt_offsets = {0x22c18, 0x22c1c, 0x22c20, 0x22c24},
+ .cecnt_widths = {4, 4, 4, 4},
+};
+
+static struct reg_rrl spr_reg_rrl_ddr = {
+ .set_num = 3,
+ .reg_num = 6,
+ .modes = {LRE_SCRUB, LRE_DEMAND, FRE_DEMAND},
+ .offsets = {
+ {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8},
+ {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0},
+ {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10},
+ },
+ .widths = {4, 4, 8, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(13),
+ .noover_mask = BIT(14),
+ .en_mask = BIT(15),
+
+ .cecnt_num = 4,
+ .cecnt_offsets = {0x22c18, 0x22c1c, 0x22c20, 0x22c24},
+ .cecnt_widths = {4, 4, 4, 4},
+};
+
+static struct reg_rrl spr_reg_rrl_hbm_pch0 = {
+ .set_num = 2,
+ .reg_num = 6,
+ .modes = {LRE_SCRUB, LRE_DEMAND},
+ .offsets = {
+ {0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8},
+ {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0},
+ },
+ .widths = {4, 4, 8, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(13),
+ .noover_mask = BIT(14),
+ .en_mask = BIT(15),
+
+ .cecnt_num = 4,
+ .cecnt_offsets = {0x2818, 0x281c, 0x2820, 0x2824},
+ .cecnt_widths = {4, 4, 4, 4},
+};
+
+static struct reg_rrl spr_reg_rrl_hbm_pch1 = {
+ .set_num = 2,
+ .reg_num = 6,
+ .modes = {LRE_SCRUB, LRE_DEMAND},
+ .offsets = {
+ {0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8},
+ {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0},
+ },
+ .widths = {4, 4, 8, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(13),
+ .noover_mask = BIT(14),
+ .en_mask = BIT(15),
+
+ .cecnt_num = 4,
+ .cecnt_offsets = {0x2c18, 0x2c1c, 0x2c20, 0x2c24},
+ .cecnt_widths = {4, 4, 4, 4},
+};
+
+static struct reg_rrl gnr_reg_rrl_ddr = {
+ .set_num = 4,
+ .reg_num = 6,
+ .modes = {FRE_SCRUB, FRE_DEMAND, LRE_SCRUB, LRE_DEMAND},
+ .offsets = {
+ {0x2f10, 0x2f20, 0x2f30, 0x2f50, 0x2f60, 0xba0},
+ {0x2f14, 0x2f24, 0x2f38, 0x2f54, 0x2f64, 0xba8},
+ {0x2f18, 0x2f28, 0x2f40, 0x2f58, 0x2f68, 0xbb0},
+ {0x2f1c, 0x2f2c, 0x2f48, 0x2f5c, 0x2f6c, 0xbb8},
+ },
+ .widths = {4, 4, 8, 4, 4, 8},
+ .v_mask = BIT(0),
+ .uc_mask = BIT(1),
+ .over_mask = BIT(2),
+ .en_patspr_mask = BIT(14),
+ .noover_mask = BIT(15),
+ .en_mask = BIT(12),
+
+ .cecnt_num = 8,
+ .cecnt_offsets = {0x2c10, 0x2c14, 0x2c18, 0x2c1c, 0x2c20, 0x2c24, 0x2c28, 0x2c2c},
+ .cecnt_widths = {4, 4, 4, 4, 4, 4, 4, 4},
+};
+
+static u64 read_imc_reg(struct skx_imc *imc, int chan, u32 offset, u8 width)
{
- u32 s, d, d2;
+ switch (width) {
+ case 4:
+ return I10NM_GET_REG32(imc, chan, offset);
+ case 8:
+ return I10NM_GET_REG64(imc, chan, offset);
+ default:
+ i10nm_printk(KERN_ERR, "Invalid readd RRL 0x%x width %d\n", offset, width);
+ return 0;
+ }
+}
+
+static void write_imc_reg(struct skx_imc *imc, int chan, u32 offset, u8 width, u64 val)
+{
+ switch (width) {
+ case 4:
+ return I10NM_SET_REG32(imc, chan, offset, (u32)val);
+ default:
+ i10nm_printk(KERN_ERR, "Invalid write RRL 0x%x width %d\n", offset, width);
+ }
+}
- s = I10NM_GET_REG32(imc, chan, offsets_scrub[0]);
- d = I10NM_GET_REG32(imc, chan, offsets_demand[0]);
- if (offsets_demand2)
- d2 = I10NM_GET_REG32(imc, chan, offsets_demand2[0]);
+static void enable_rrl(struct skx_imc *imc, int chan, struct reg_rrl *rrl,
+ int rrl_set, bool enable, u32 *rrl_ctl)
+{
+ enum rrl_mode mode = rrl->modes[rrl_set];
+ u32 offset = rrl->offsets[rrl_set][0], v;
+ u8 width = rrl->widths[0];
+ bool first, scrub;
+
+ /* First or last read error. */
+ first = (mode == FRE_SCRUB || mode == FRE_DEMAND);
+ /* Patrol scrub or on-demand read error. */
+ scrub = (mode == FRE_SCRUB || mode == LRE_SCRUB);
+
+ v = read_imc_reg(imc, chan, offset, width);
if (enable) {
- /* Save default configurations */
- imc->chan[chan].retry_rd_err_log_s = s;
- imc->chan[chan].retry_rd_err_log_d = d;
- if (offsets_demand2)
- imc->chan[chan].retry_rd_err_log_d2 = d2;
-
- s &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
- s |= RETRY_RD_ERR_LOG_EN;
- d &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
- d |= RETRY_RD_ERR_LOG_EN;
-
- if (offsets_demand2) {
- d2 &= ~RETRY_RD_ERR_LOG_UC;
- d2 |= RETRY_RD_ERR_LOG_NOOVER;
- d2 |= RETRY_RD_ERR_LOG_EN;
- }
+ /* Save default configurations. */
+ *rrl_ctl = v;
+ v &= ~rrl->uc_mask;
+
+ if (first)
+ v |= rrl->noover_mask;
+ else
+ v &= ~rrl->noover_mask;
+
+ if (scrub)
+ v |= rrl->en_patspr_mask;
+ else
+ v &= ~rrl->en_patspr_mask;
+
+ v |= rrl->en_mask;
} else {
- /* Restore default configurations */
- if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC)
- s |= RETRY_RD_ERR_LOG_UC;
- if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_NOOVER)
- s |= RETRY_RD_ERR_LOG_NOOVER;
- if (!(imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_EN))
- s &= ~RETRY_RD_ERR_LOG_EN;
- if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_UC)
- d |= RETRY_RD_ERR_LOG_UC;
- if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_NOOVER)
- d |= RETRY_RD_ERR_LOG_NOOVER;
- if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN))
- d &= ~RETRY_RD_ERR_LOG_EN;
-
- if (offsets_demand2) {
- if (imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_UC)
- d2 |= RETRY_RD_ERR_LOG_UC;
- if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_NOOVER))
- d2 &= ~RETRY_RD_ERR_LOG_NOOVER;
- if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_EN))
- d2 &= ~RETRY_RD_ERR_LOG_EN;
+ /* Restore default configurations. */
+ if (*rrl_ctl & rrl->uc_mask)
+ v |= rrl->uc_mask;
+
+ if (first) {
+ if (!(*rrl_ctl & rrl->noover_mask))
+ v &= ~rrl->noover_mask;
+ } else {
+ if (*rrl_ctl & rrl->noover_mask)
+ v |= rrl->noover_mask;
}
+
+ if (scrub) {
+ if (!(*rrl_ctl & rrl->en_patspr_mask))
+ v &= ~rrl->en_patspr_mask;
+ } else {
+ if (*rrl_ctl & rrl->en_patspr_mask)
+ v |= rrl->en_patspr_mask;
+ }
+
+ if (!(*rrl_ctl & rrl->en_mask))
+ v &= ~rrl->en_mask;
}
- I10NM_SET_REG32(imc, chan, offsets_scrub[0], s);
- I10NM_SET_REG32(imc, chan, offsets_demand[0], d);
- if (offsets_demand2)
- I10NM_SET_REG32(imc, chan, offsets_demand2[0], d2);
+ write_imc_reg(imc, chan, offset, width, v);
+}
+
+static void enable_rrls(struct skx_imc *imc, int chan, struct reg_rrl *rrl,
+ bool enable, u32 *rrl_ctl)
+{
+ for (int i = 0; i < rrl->set_num; i++)
+ enable_rrl(imc, chan, rrl, i, enable, rrl_ctl + i);
+}
+
+static void enable_rrls_ddr(struct skx_imc *imc, bool enable)
+{
+ struct reg_rrl *rrl_ddr = res_cfg->reg_rrl_ddr;
+ int i, chan_num = res_cfg->ddr_chan_num;
+ struct skx_channel *chan = imc->chan;
+
+ if (!imc->mbase)
+ return;
+
+ for (i = 0; i < chan_num; i++)
+ enable_rrls(imc, i, rrl_ddr, enable, chan[i].rrl_ctl[0]);
+}
+
+static void enable_rrls_hbm(struct skx_imc *imc, bool enable)
+{
+ struct reg_rrl **rrl_hbm = res_cfg->reg_rrl_hbm;
+ int i, chan_num = res_cfg->hbm_chan_num;
+ struct skx_channel *chan = imc->chan;
+
+ if (!imc->mbase || !imc->hbm_mc || !rrl_hbm[0] || !rrl_hbm[1])
+ return;
+
+ for (i = 0; i < chan_num; i++) {
+ enable_rrls(imc, i, rrl_hbm[0], enable, chan[i].rrl_ctl[0]);
+ enable_rrls(imc, i, rrl_hbm[1], enable, chan[i].rrl_ctl[1]);
+ }
}
static void enable_retry_rd_err_log(bool enable)
{
- int i, j, imc_num, chan_num;
- struct skx_imc *imc;
struct skx_dev *d;
+ int i, imc_num;
edac_dbg(2, "\n");
list_for_each_entry(d, i10nm_edac_list, list) {
imc_num = res_cfg->ddr_imc_num;
- chan_num = res_cfg->ddr_chan_num;
-
- for (i = 0; i < imc_num; i++) {
- imc = &d->imc[i];
- if (!imc->mbase)
- continue;
-
- for (j = 0; j < chan_num; j++)
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub,
- res_cfg->offsets_demand,
- res_cfg->offsets_demand2);
- }
+ for (i = 0; i < imc_num; i++)
+ enable_rrls_ddr(&d->imc[i], enable);
imc_num += res_cfg->hbm_imc_num;
- chan_num = res_cfg->hbm_chan_num;
-
- for (; i < imc_num; i++) {
- imc = &d->imc[i];
- if (!imc->mbase || !imc->hbm_mc)
- continue;
-
- for (j = 0; j < chan_num; j++) {
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub_hbm0,
- res_cfg->offsets_demand_hbm0,
- NULL);
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub_hbm1,
- res_cfg->offsets_demand_hbm1,
- NULL);
- }
- }
+ for (; i < imc_num; i++)
+ enable_rrls_hbm(&d->imc[i], enable);
}
}
static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
int len, bool scrub_err)
{
+ int i, j, n, ch = res->channel, pch = res->cs & 1;
struct skx_imc *imc = &res->dev->imc[res->imc];
- u32 log0, log1, log2, log3, log4;
- u32 corr0, corr1, corr2, corr3;
- u32 lxg0, lxg1, lxg3, lxg4;
- u32 *xffsets = NULL;
- u64 log2a, log5;
- u64 lxg2a, lxg5;
- u32 *offsets;
- int n, pch;
+ u64 log, corr, status_mask;
+ struct reg_rrl *rrl;
+ bool scrub;
+ u32 offset;
+ u8 width;
if (!imc->mbase)
return;
- if (imc->hbm_mc) {
- pch = res->cs & 1;
+ rrl = imc->hbm_mc ? res_cfg->reg_rrl_hbm[pch] : res_cfg->reg_rrl_ddr;
- if (pch)
- offsets = scrub_err ? res_cfg->offsets_scrub_hbm1 :
- res_cfg->offsets_demand_hbm1;
- else
- offsets = scrub_err ? res_cfg->offsets_scrub_hbm0 :
- res_cfg->offsets_demand_hbm0;
- } else {
- if (scrub_err) {
- offsets = res_cfg->offsets_scrub;
- } else {
- offsets = res_cfg->offsets_demand;
- xffsets = res_cfg->offsets_demand2;
- }
- }
+ if (!rrl)
+ return;
- log0 = I10NM_GET_REG32(imc, res->channel, offsets[0]);
- log1 = I10NM_GET_REG32(imc, res->channel, offsets[1]);
- log3 = I10NM_GET_REG32(imc, res->channel, offsets[3]);
- log4 = I10NM_GET_REG32(imc, res->channel, offsets[4]);
- log5 = I10NM_GET_REG64(imc, res->channel, offsets[5]);
-
- if (xffsets) {
- lxg0 = I10NM_GET_REG32(imc, res->channel, xffsets[0]);
- lxg1 = I10NM_GET_REG32(imc, res->channel, xffsets[1]);
- lxg3 = I10NM_GET_REG32(imc, res->channel, xffsets[3]);
- lxg4 = I10NM_GET_REG32(imc, res->channel, xffsets[4]);
- lxg5 = I10NM_GET_REG64(imc, res->channel, xffsets[5]);
- }
+ status_mask = rrl->over_mask | rrl->uc_mask | rrl->v_mask;
- if (res_cfg->type == SPR) {
- log2a = I10NM_GET_REG64(imc, res->channel, offsets[2]);
- n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx",
- log0, log1, log2a, log3, log4, log5);
+ n = snprintf(msg, len, " retry_rd_err_log[");
+ for (i = 0; i < rrl->set_num; i++) {
+ scrub = (rrl->modes[i] == FRE_SCRUB || rrl->modes[i] == LRE_SCRUB);
+ if (scrub_err != scrub)
+ continue;
- if (len - n > 0) {
- if (xffsets) {
- lxg2a = I10NM_GET_REG64(imc, res->channel, xffsets[2]);
- n += snprintf(msg + n, len - n, " %.8x %.8x %.16llx %.8x %.8x %.16llx]",
- lxg0, lxg1, lxg2a, lxg3, lxg4, lxg5);
- } else {
- n += snprintf(msg + n, len - n, "]");
- }
- }
- } else {
- log2 = I10NM_GET_REG32(imc, res->channel, offsets[2]);
- n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x %.16llx]",
- log0, log1, log2, log3, log4, log5);
- }
+ for (j = 0; j < rrl->reg_num && len - n > 0; j++) {
+ offset = rrl->offsets[i][j];
+ width = rrl->widths[j];
+ log = read_imc_reg(imc, ch, offset, width);
- if (imc->hbm_mc) {
- if (pch) {
- corr0 = I10NM_GET_REG32(imc, res->channel, 0x2c18);
- corr1 = I10NM_GET_REG32(imc, res->channel, 0x2c1c);
- corr2 = I10NM_GET_REG32(imc, res->channel, 0x2c20);
- corr3 = I10NM_GET_REG32(imc, res->channel, 0x2c24);
- } else {
- corr0 = I10NM_GET_REG32(imc, res->channel, 0x2818);
- corr1 = I10NM_GET_REG32(imc, res->channel, 0x281c);
- corr2 = I10NM_GET_REG32(imc, res->channel, 0x2820);
- corr3 = I10NM_GET_REG32(imc, res->channel, 0x2824);
+ if (width == 4)
+ n += snprintf(msg + n, len - n, "%.8llx ", log);
+ else
+ n += snprintf(msg + n, len - n, "%.16llx ", log);
+
+ /* Clear RRL status if RRL in Linux control mode. */
+ if (retry_rd_err_log == 2 && !j && (log & status_mask))
+ write_imc_reg(imc, ch, offset, width, log & ~status_mask);
}
- } else {
- corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18);
- corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c);
- corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20);
- corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24);
}
- if (len - n > 0)
- snprintf(msg + n, len - n,
- " correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]",
- corr0 & 0xffff, corr0 >> 16,
- corr1 & 0xffff, corr1 >> 16,
- corr2 & 0xffff, corr2 >> 16,
- corr3 & 0xffff, corr3 >> 16);
-
- /* Clear status bits */
- if (retry_rd_err_log == 2) {
- if (log0 & RETRY_RD_ERR_LOG_OVER_UC_V) {
- log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
- I10NM_SET_REG32(imc, res->channel, offsets[0], log0);
+ /* Move back one space. */
+ n--;
+ n += snprintf(msg + n, len - n, "]");
+
+ if (len - n > 0) {
+ n += snprintf(msg + n, len - n, " correrrcnt[");
+ for (i = 0; i < rrl->cecnt_num && len - n > 0; i++) {
+ offset = rrl->cecnt_offsets[i];
+ width = rrl->cecnt_widths[i];
+ corr = read_imc_reg(imc, ch, offset, width);
+
+ /* CPUs {ICX,SPR} encode two counters per 4-byte CORRERRCNT register. */
+ if (res_cfg->type <= SPR) {
+ n += snprintf(msg + n, len - n, "%.4llx %.4llx ",
+ corr & 0xffff, corr >> 16);
+ } else {
+ /* CPUs {GNR} encode one counter per CORRERRCNT register. */
+ if (width == 4)
+ n += snprintf(msg + n, len - n, "%.8llx ", corr);
+ else
+ n += snprintf(msg + n, len - n, "%.16llx ", corr);
+ }
}
- if (xffsets && (lxg0 & RETRY_RD_ERR_LOG_OVER_UC_V)) {
- lxg0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
- I10NM_SET_REG32(imc, res->channel, xffsets[0], lxg0);
- }
+ /* Move back one space. */
+ n--;
+ n += snprintf(msg + n, len - n, "]");
}
}
@@ -870,8 +956,7 @@ static struct res_config i10nm_cfg0 = {
.ddr_mdev_bdf = {0, 12, 0},
.hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x108,
- .offsets_scrub = offsets_scrub_icx,
- .offsets_demand = offsets_demand_icx,
+ .reg_rrl_ddr = &icx_reg_rrl_ddr,
};
static struct res_config i10nm_cfg1 = {
@@ -889,8 +974,7 @@ static struct res_config i10nm_cfg1 = {
.ddr_mdev_bdf = {0, 12, 0},
.hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x108,
- .offsets_scrub = offsets_scrub_icx,
- .offsets_demand = offsets_demand_icx,
+ .reg_rrl_ddr = &icx_reg_rrl_ddr,
};
static struct res_config spr_cfg = {
@@ -913,13 +997,9 @@ static struct res_config spr_cfg = {
.ddr_mdev_bdf = {0, 12, 0},
.hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x300,
- .offsets_scrub = offsets_scrub_spr,
- .offsets_scrub_hbm0 = offsets_scrub_spr_hbm0,
- .offsets_scrub_hbm1 = offsets_scrub_spr_hbm1,
- .offsets_demand = offsets_demand_spr,
- .offsets_demand2 = offsets_demand2_spr,
- .offsets_demand_hbm0 = offsets_demand_spr_hbm0,
- .offsets_demand_hbm1 = offsets_demand_spr_hbm1,
+ .reg_rrl_ddr = &spr_reg_rrl_ddr,
+ .reg_rrl_hbm[0] = &spr_reg_rrl_hbm_pch0,
+ .reg_rrl_hbm[1] = &spr_reg_rrl_hbm_pch1,
};
static struct res_config gnr_cfg = {
@@ -937,6 +1017,7 @@ static struct res_config gnr_cfg = {
.uracu_bdf = {0, 0, 1},
.ddr_mdev_bdf = {0, 5, 1},
.sad_all_offset = 0x300,
+ .reg_rrl_ddr = &gnr_reg_rrl_ddr,
};
static const struct x86_cpu_id i10nm_cpuids[] = {
@@ -1108,7 +1189,7 @@ static int __init i10nm_init(void)
mce_register_decode_chain(&i10nm_mce_dec);
skx_setup_debug("i10nm_test");
- if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
+ if (retry_rd_err_log && res_cfg->reg_rrl_ddr) {
skx_set_decode(i10nm_mc_decode, show_retry_rd_err_log);
if (retry_rd_err_log == 2)
enable_retry_rd_err_log(true);
@@ -1128,7 +1209,7 @@ static void __exit i10nm_exit(void)
{
edac_dbg(2, "\n");
- if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
+ if (retry_rd_err_log && res_cfg->reg_rrl_ddr) {
skx_set_decode(NULL, NULL);
if (retry_rd_err_log == 2)
enable_retry_rd_err_log(false);
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 204834149579..a53612be4b2f 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -52,6 +52,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <asm/mce.h>
+#include <asm/msr.h>
#include "edac_module.h"
#define EDAC_MOD_STR "ie31200_edac"
@@ -89,6 +90,10 @@
#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1 0xa703
#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2 0x4640
#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3 0x4630
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_4 0xa700
+
+/* Alder Lake-S */
+#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1 0x4660
#define IE31200_RANKS_PER_CHANNEL 8
#define IE31200_DIMMS_PER_CHANNEL 2
@@ -734,6 +739,8 @@ static const struct pci_device_id ie31200_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_4), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1), (kernel_ulong_t)&rpl_s_cfg},
{ 0, } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, ie31200_pci_tbl);
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index 5807517ee32d..1930dc00c791 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -127,6 +127,7 @@
static const struct res_config {
bool machine_check;
+ /* The number of present memory controllers. */
int num_imc;
u32 imc_base;
u32 cmf_base;
@@ -240,6 +241,12 @@ static struct work_struct ecclog_work;
#define DID_ADL_N_SKU11 0x467c
#define DID_ADL_N_SKU12 0x4632
+/* Compute die IDs for Arizona Beach with IBECC */
+#define DID_AZB_SKU1 0x4676
+
+/* Compute did IDs for Amston Lake with IBECC */
+#define DID_ASL_SKU1 0x464a
+
/* Compute die IDs for Raptor Lake-P with IBECC */
#define DID_RPL_P_SKU1 0xa706
#define DID_RPL_P_SKU2 0xa707
@@ -595,6 +602,8 @@ static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU10), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU11), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU12), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_AZB_SKU1), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ASL_SKU1), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU1), (kernel_ulong_t)&rpl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU2), (kernel_ulong_t)&rpl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU3), (kernel_ulong_t)&rpl_p_cfg },
@@ -1201,23 +1210,21 @@ static void igen6_check(struct mem_ctl_info *mci)
irq_work_queue(&ecclog_irq_work);
}
-static int igen6_register_mci(int mc, u64 mchbar, struct pci_dev *pdev)
+/* Check whether the memory controller is absent. */
+static bool igen6_imc_absent(void __iomem *window)
+{
+ return readl(window + MAD_INTER_CHANNEL_OFFSET) == ~0;
+}
+
+static int igen6_register_mci(int mc, void __iomem *window, struct pci_dev *pdev)
{
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
struct igen6_imc *imc;
- void __iomem *window;
int rc;
edac_dbg(2, "\n");
- mchbar += mc * MCHBAR_SIZE;
- window = ioremap(mchbar, MCHBAR_SIZE);
- if (!window) {
- igen6_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", mchbar);
- return -ENODEV;
- }
-
layers[0].type = EDAC_MC_LAYER_CHANNEL;
layers[0].size = NUM_CHANNELS;
layers[0].is_virt_csrow = false;
@@ -1283,7 +1290,6 @@ fail3:
fail2:
edac_mc_free(mci);
fail:
- iounmap(window);
return rc;
}
@@ -1309,6 +1315,56 @@ static void igen6_unregister_mcis(void)
}
}
+static int igen6_register_mcis(struct pci_dev *pdev, u64 mchbar)
+{
+ void __iomem *window;
+ int lmc, pmc, rc;
+ u64 base;
+
+ for (lmc = 0, pmc = 0; pmc < NUM_IMC; pmc++) {
+ base = mchbar + pmc * MCHBAR_SIZE;
+ window = ioremap(base, MCHBAR_SIZE);
+ if (!window) {
+ igen6_printk(KERN_ERR, "Failed to ioremap 0x%llx for mc%d\n", base, pmc);
+ rc = -ENOMEM;
+ goto out_unregister_mcis;
+ }
+
+ if (igen6_imc_absent(window)) {
+ iounmap(window);
+ edac_dbg(2, "Skip absent mc%d\n", pmc);
+ continue;
+ }
+
+ rc = igen6_register_mci(lmc, window, pdev);
+ if (rc)
+ goto out_iounmap;
+
+ /* Done, if all present MCs are detected and registered. */
+ if (++lmc >= res_cfg->num_imc)
+ break;
+ }
+
+ if (!lmc) {
+ igen6_printk(KERN_ERR, "No mc found.\n");
+ return -ENODEV;
+ }
+
+ if (lmc < res_cfg->num_imc)
+ igen6_printk(KERN_WARNING, "Expected %d mcs, but only %d detected.",
+ res_cfg->num_imc, lmc);
+
+ return 0;
+
+out_iounmap:
+ iounmap(window);
+
+out_unregister_mcis:
+ igen6_unregister_mcis();
+
+ return rc;
+}
+
static int igen6_mem_slice_setup(u64 mchbar)
{
struct igen6_imc *imc = &igen6_pvt->imc[0];
@@ -1405,7 +1461,7 @@ static void opstate_set(const struct res_config *cfg, const struct pci_device_id
static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u64 mchbar;
- int i, rc;
+ int rc;
edac_dbg(2, "\n");
@@ -1421,11 +1477,9 @@ static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
opstate_set(res_cfg, ent);
- for (i = 0; i < res_cfg->num_imc; i++) {
- rc = igen6_register_mci(i, mchbar, pdev);
- if (rc)
- goto fail2;
- }
+ rc = igen6_register_mcis(pdev, mchbar);
+ if (rc)
+ goto fail;
if (res_cfg->num_imc > 1) {
rc = igen6_mem_slice_setup(mchbar);
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 50d74d3bf0f5..af3c12284a1e 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -3,6 +3,7 @@
#include <linux/slab.h>
#include <asm/cpu.h>
+#include <asm/msr.h>
#include "mce_amd.h"
diff --git a/drivers/edac/mem_repair.c b/drivers/edac/mem_repair.c
index 3b1a845457b0..d1a8caa85369 100755
--- a/drivers/edac/mem_repair.c
+++ b/drivers/edac/mem_repair.c
@@ -45,6 +45,15 @@ struct edac_mem_repair_context {
struct attribute_group group;
};
+const char * const edac_repair_type[] = {
+ [EDAC_REPAIR_PPR] = "ppr",
+ [EDAC_REPAIR_CACHELINE_SPARING] = "cacheline-sparing",
+ [EDAC_REPAIR_ROW_SPARING] = "row-sparing",
+ [EDAC_REPAIR_BANK_SPARING] = "bank-sparing",
+ [EDAC_REPAIR_RANK_SPARING] = "rank-sparing",
+};
+EXPORT_SYMBOL_GPL(edac_repair_type);
+
#define TO_MR_DEV_ATTR(_dev_attr) \
container_of(_dev_attr, struct edac_mem_repair_dev_attr, dev_attr)
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index fa5b442b1844..c9ade45c1a99 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -116,6 +116,7 @@ EXPORT_SYMBOL_GPL(skx_adxl_get);
void skx_adxl_put(void)
{
+ adxl_component_count = 0;
kfree(adxl_values);
kfree(adxl_msg);
}
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index ca5408803f87..ec4966f7ea40 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -79,6 +79,47 @@
*/
#define MCACOD_EXT_MEM_ERR 0x280
+/* Max RRL register sets per {,sub-,pseudo-}channel. */
+#define NUM_RRL_SET 4
+/* Max RRL registers per set. */
+#define NUM_RRL_REG 6
+/* Max correctable error count registers. */
+#define NUM_CECNT_REG 8
+
+/* Modes of RRL register set. */
+enum rrl_mode {
+ /* Last read error from patrol scrub. */
+ LRE_SCRUB,
+ /* Last read error from demand. */
+ LRE_DEMAND,
+ /* First read error from patrol scrub. */
+ FRE_SCRUB,
+ /* First read error from demand. */
+ FRE_DEMAND,
+};
+
+/* RRL registers per {,sub-,pseudo-}channel. */
+struct reg_rrl {
+ /* RRL register parts. */
+ int set_num, reg_num;
+ enum rrl_mode modes[NUM_RRL_SET];
+ u32 offsets[NUM_RRL_SET][NUM_RRL_REG];
+ /* RRL register widths in byte per set. */
+ u8 widths[NUM_RRL_REG];
+ /* RRL control bits of the first register per set. */
+ u32 v_mask;
+ u32 uc_mask;
+ u32 over_mask;
+ u32 en_patspr_mask;
+ u32 noover_mask;
+ u32 en_mask;
+
+ /* CORRERRCNT register parts. */
+ int cecnt_num;
+ u32 cecnt_offsets[NUM_CECNT_REG];
+ u8 cecnt_widths[NUM_CECNT_REG];
+};
+
/*
* Each cpu socket contains some pci devices that provide global
* information, and also some that are local to each of the two
@@ -117,9 +158,11 @@ struct skx_dev {
struct skx_channel {
struct pci_dev *cdev;
struct pci_dev *edev;
- u32 retry_rd_err_log_s;
- u32 retry_rd_err_log_d;
- u32 retry_rd_err_log_d2;
+ /*
+ * Two groups of RRL control registers per channel to save default RRL
+ * settings of two {sub-,pseudo-}channels in Linux RRL control mode.
+ */
+ u32 rrl_ctl[2][NUM_RRL_SET];
struct skx_dimm {
u8 close_pg;
u8 bank_xor_enable;
@@ -232,14 +275,10 @@ struct res_config {
/* HBM mdev device BDF */
struct pci_bdf hbm_mdev_bdf;
int sad_all_offset;
- /* Offsets of retry_rd_err_log registers */
- u32 *offsets_scrub;
- u32 *offsets_scrub_hbm0;
- u32 *offsets_scrub_hbm1;
- u32 *offsets_demand;
- u32 *offsets_demand2;
- u32 *offsets_demand_hbm0;
- u32 *offsets_demand_hbm1;
+ /* RRL register sets per DDR channel */
+ struct reg_rrl *reg_rrl_ddr;
+ /* RRL register sets per HBM channel */
+ struct reg_rrl *reg_rrl_hbm[2];
};
typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci,
diff --git a/drivers/eisa/Makefile b/drivers/eisa/Makefile
index a1dd0eaec2d4..552bd9478340 100644
--- a/drivers/eisa/Makefile
+++ b/drivers/eisa/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for the Linux device tree
+always-$(CONFIG_EISA) += devlist.h
obj-$(CONFIG_EISA) += eisa-bus.o
obj-${CONFIG_EISA_PCI_EISA} += pci_eisa.o
@@ -9,14 +10,11 @@ obj-${CONFIG_EISA_PCI_EISA} += pci_eisa.o
obj-${CONFIG_EISA_VIRTUAL_ROOT} += virtual_root.o
-# Ugly hack to get DEVICE_NAME_SIZE value...
-DEVICE_NAME_SIZE = 50
-
$(obj)/eisa-bus.o: $(obj)/devlist.h
quiet_cmd_eisaid = GEN $@
- cmd_eisaid = sed -e '/^\#/D' -e 's/^\([[:alnum:]]\{7\}\) \+"\([^"]\{1,$(DEVICE_NAME_SIZE)\}\).*"/EISA_DEVINFO ("\1", "\2"),/' $< > $@
+ cmd_eisaid = sed -e '/^\#/D' -e 's/^\([[:alnum:]]\{7\}\) \+"\([^"]*\)"/EISA_DEVINFO ("\1", "\2"),/' $< > $@
clean-files := devlist.h
-$(obj)/devlist.h: $(src)/eisa.ids include/linux/device.h
- $(call cmd,eisaid)
+$(obj)/devlist.h: $(src)/eisa.ids include/linux/device.h FORCE
+ $(call if_changed,eisaid)
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
index cb586a362944..edceea083b98 100644
--- a/drivers/eisa/eisa-bus.c
+++ b/drivers/eisa/eisa-bus.c
@@ -21,7 +21,7 @@
struct eisa_device_info {
struct eisa_device_id id;
- char name[50];
+ char name[EISA_DEVICE_INFO_NAME_SIZE];
};
#ifdef CONFIG_EISA_NAMES
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 905c82e26ce7..a5f5e250223a 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -83,7 +83,7 @@ config FIREWIRE_KUNIT_SELF_ID_SEQUENCE_HELPER_TEST
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
- depends on PCI && FIREWIRE && MMU
+ depends on PCI && FIREWIRE
help
Enable this driver if you have a FireWire controller based
on the OHCI specification. For all practical purposes, this
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index b0f9ef6ac6df..2bd5deb9054e 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(fw_cancel_transaction);
static void split_transaction_timeout_callback(struct timer_list *timer)
{
- struct fw_transaction *t = from_timer(t, timer, split_timeout_timer);
+ struct fw_transaction *t = timer_container_of(t, timer, split_timeout_timer);
struct fw_card *card = t->card;
scoped_guard(spinlock_irqsave, &card->lock) {
@@ -431,7 +431,7 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
fw_send_request(card, &t, tcode, destination_id, generation, speed,
offset, payload, length, transaction_callback, &d);
wait_for_completion(&d.done);
- destroy_timer_on_stack(&t.split_timeout_timer);
+ timer_destroy_on_stack(&t.split_timeout_timer);
return d.rcode;
}
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index aadc395ee168..bbd2155d8483 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -31,7 +31,6 @@ config ARM_SCPI_PROTOCOL
config ARM_SDE_INTERFACE
bool "ARM Software Delegated Exception Interface (SDEI)"
depends on ARM64
- depends on ACPI_APEI_GHES
help
The Software Delegated Exception Interface (SDEI) is an ARM
standard for registering callbacks from the platform firmware
@@ -268,6 +267,23 @@ config TURRIS_MOX_RWTM
other manufacturing data and also utilize the Entropy Bit Generator
for hardware random number generation.
+if TURRIS_MOX_RWTM
+
+config TURRIS_MOX_RWTM_KEYCTL
+ bool "Turris Mox rWTM ECDSA message signing"
+ default y
+ depends on KEYS
+ depends on ASYMMETRIC_KEY_TYPE
+ select CZNIC_PLATFORMS
+ select TURRIS_SIGNING_KEY
+ help
+ Say Y here to add support for ECDSA message signing with board private
+ key (each Turris Mox has an ECDSA private key generated in the secure
+ coprocessor when manufactured). This functionality is exposed via the
+ keyctl() syscall.
+
+endif # TURRIS_MOX_RWTM
+
source "drivers/firmware/arm_ffa/Kconfig"
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/cirrus/Kconfig"
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index dabd874641d0..e3fb36825978 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -69,6 +69,19 @@ config ARM_SCMI_DEBUG_COUNTERS
such useful debug counters. This can be helpful for debugging and
SCMI monitoring.
+config ARM_SCMI_QUIRKS
+ bool "Enable SCMI Quirks framework"
+ depends on JUMP_LABEL || COMPILE_TEST
+ default y
+ help
+ Enables support for SCMI Quirks framework to workaround SCMI platform
+ firmware bugs on system already deployed in the wild.
+
+ The framework allows the definition of platform-specific code quirks
+ that will be associated and enabled only on the desired platforms
+ depending on the SCMI firmware advertised versions and/or machine
+ compatibles.
+
source "drivers/firmware/arm_scmi/transports/Kconfig"
source "drivers/firmware/arm_scmi/vendors/imx/Kconfig"
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 9ac81adff567..780cd62b2f78 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -3,6 +3,7 @@ scmi-bus-y = bus.o
scmi-core-objs := $(scmi-bus-y)
scmi-driver-y = driver.o notify.o
+scmi-driver-$(CONFIG_ARM_SCMI_QUIRKS) += quirks.o
scmi-driver-$(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) += raw_mode.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 3a5474015f7d..1adef0389475 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -201,55 +201,51 @@ scmi_protocol_table_unregister(const struct scmi_device_id *id_table)
scmi_protocol_device_unrequest(entry);
}
-static const struct scmi_device_id *
-scmi_dev_match_id(struct scmi_device *scmi_dev, const struct scmi_driver *scmi_drv)
+static int scmi_dev_match_by_id_table(struct scmi_device *scmi_dev,
+ const struct scmi_device_id *id_table)
{
- const struct scmi_device_id *id = scmi_drv->id_table;
-
- if (!id)
- return NULL;
-
- for (; id->protocol_id; id++)
- if (id->protocol_id == scmi_dev->protocol_id) {
- if (!id->name)
- return id;
- else if (!strcmp(id->name, scmi_dev->name))
- return id;
- }
+ if (!id_table || !id_table->name)
+ return 0;
+
+ /* Always skip transport devices from matching */
+ for (; id_table->protocol_id && id_table->name; id_table++)
+ if (id_table->protocol_id == scmi_dev->protocol_id &&
+ strncmp(scmi_dev->name, "__scmi_transport_device", 23) &&
+ !strcmp(id_table->name, scmi_dev->name))
+ return 1;
+ return 0;
+}
- return NULL;
+static int scmi_dev_match_id(struct scmi_device *scmi_dev,
+ const struct scmi_driver *scmi_drv)
+{
+ return scmi_dev_match_by_id_table(scmi_dev, scmi_drv->id_table);
}
static int scmi_dev_match(struct device *dev, const struct device_driver *drv)
{
const struct scmi_driver *scmi_drv = to_scmi_driver(drv);
struct scmi_device *scmi_dev = to_scmi_dev(dev);
- const struct scmi_device_id *id;
-
- id = scmi_dev_match_id(scmi_dev, scmi_drv);
- if (id)
- return 1;
- return 0;
+ return scmi_dev_match_id(scmi_dev, scmi_drv);
}
static int scmi_match_by_id_table(struct device *dev, const void *data)
{
- struct scmi_device *sdev = to_scmi_dev(dev);
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
const struct scmi_device_id *id_table = data;
- return sdev->protocol_id == id_table->protocol_id &&
- (id_table->name && !strcmp(sdev->name, id_table->name));
+ return scmi_dev_match_by_id_table(scmi_dev, id_table);
}
static struct scmi_device *scmi_child_dev_find(struct device *parent,
int prot_id, const char *name)
{
- struct scmi_device_id id_table;
+ struct scmi_device_id id_table[2] = { 0 };
struct device *dev;
- id_table.protocol_id = prot_id;
- id_table.name = name;
+ id_table[0].protocol_id = prot_id;
+ id_table[0].name = name;
dev = device_find_child(parent, &id_table, scmi_match_by_id_table);
if (!dev)
@@ -463,6 +459,20 @@ put_dev:
return NULL;
}
+static struct scmi_device *
+_scmi_device_create(struct device_node *np, struct device *parent,
+ int protocol, const char *name)
+{
+ struct scmi_device *sdev;
+
+ sdev = __scmi_device_create(np, parent, protocol, name);
+ if (!sdev)
+ pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
+ of_node_full_name(parent->of_node), protocol, name);
+
+ return sdev;
+}
+
/**
* scmi_device_create - A method to create one or more SCMI devices
*
@@ -495,7 +505,7 @@ struct scmi_device *scmi_device_create(struct device_node *np,
struct scmi_device *scmi_dev = NULL;
if (name)
- return __scmi_device_create(np, parent, protocol, name);
+ return _scmi_device_create(np, parent, protocol, name);
mutex_lock(&scmi_requested_devices_mtx);
phead = idr_find(&scmi_requested_devices, protocol);
@@ -509,18 +519,13 @@ struct scmi_device *scmi_device_create(struct device_node *np,
list_for_each_entry(rdev, phead, node) {
struct scmi_device *sdev;
- sdev = __scmi_device_create(np, parent,
- rdev->id_table->protocol_id,
- rdev->id_table->name);
- /* Report errors and carry on... */
+ sdev = _scmi_device_create(np, parent,
+ rdev->id_table->protocol_id,
+ rdev->id_table->name);
if (sdev)
scmi_dev = sdev;
- else
- pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
- of_node_full_name(parent->of_node),
- rdev->id_table->protocol_id,
- rdev->id_table->name);
}
+
mutex_unlock(&scmi_requested_devices_mtx);
return scmi_dev;
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 2ed2279388f0..afa7981efe82 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -11,6 +11,7 @@
#include "protocols.h"
#include "notify.h"
+#include "quirks.h"
/* Updated only after ALL the mandatory features for that version are merged */
#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000
@@ -429,6 +430,23 @@ static void iter_clk_describe_prepare_message(void *message,
msg->rate_index = cpu_to_le32(desc_index);
}
+#define QUIRK_OUT_OF_SPEC_TRIPLET \
+ ({ \
+ /* \
+ * A known quirk: a triplet is returned but num_returned != 3 \
+ * Check for a safe payload size and fix. \
+ */ \
+ if (st->num_returned != 3 && st->num_remaining == 0 && \
+ st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) { \
+ st->num_returned = 3; \
+ st->num_remaining = 0; \
+ } else { \
+ dev_err(p->dev, \
+ "Cannot fix out-of-spec reply !\n"); \
+ return -EPROTO; \
+ } \
+ })
+
static int
iter_clk_describe_update_state(struct scmi_iterator_state *st,
const void *response, void *priv)
@@ -450,19 +468,8 @@ iter_clk_describe_update_state(struct scmi_iterator_state *st,
p->clk->name, st->num_returned, st->num_remaining,
st->rx_len);
- /*
- * A known quirk: a triplet is returned but num_returned != 3
- * Check for a safe payload size and fix.
- */
- if (st->num_returned != 3 && st->num_remaining == 0 &&
- st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
- st->num_returned = 3;
- st->num_remaining = 0;
- } else {
- dev_err(p->dev,
- "Cannot fix out-of-spec reply !\n");
- return -EPROTO;
- }
+ SCMI_QUIRK(clock_rates_triplet_out_of_spec,
+ QUIRK_OUT_OF_SPEC_TRIPLET);
}
return 0;
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 10ea7962323e..dab758c5fdea 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -475,6 +475,7 @@ static int __tag##_probe(struct platform_device *pdev) \
if (ret) \
goto err; \
\
+ spdev->dev.parent = dev; \
ret = platform_device_add(spdev); \
if (ret) \
goto err; \
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 0390d5ff195e..395fe9289035 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -11,7 +11,7 @@
* various power domain DVFS including the core/cluster, certain system
* clocks configuration, thermal sensors and many others.
*
- * Copyright (C) 2018-2024 ARM Ltd.
+ * Copyright (C) 2018-2025 ARM Ltd.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -38,6 +38,7 @@
#include "common.h"
#include "notify.h"
+#include "quirks.h"
#include "raw_mode.h"
@@ -439,14 +440,8 @@ static void scmi_create_protocol_devices(struct device_node *np,
struct scmi_info *info,
int prot_id, const char *name)
{
- struct scmi_device *sdev;
-
mutex_lock(&info->devreq_mtx);
- sdev = scmi_device_create(np, info->dev, prot_id, name);
- if (name && !sdev)
- dev_err(info->dev,
- "failed to create device for protocol 0x%X (%s)\n",
- prot_id, name);
+ scmi_device_create(np, info->dev, prot_id, name);
mutex_unlock(&info->devreq_mtx);
}
@@ -1190,7 +1185,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
* RX path since it will be already queued at the end of the TX
* poll loop.
*/
- if (!xfer->hdr.poll_completion)
+ if (!xfer->hdr.poll_completion ||
+ xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
scmi_raw_message_report(info->raw, xfer,
SCMI_RAW_REPLY_QUEUE,
cinfo->id);
@@ -1738,6 +1734,39 @@ static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph)
}
/**
+ * scmi_protocol_msg_check - Check protocol message attributes
+ *
+ * @ph: A reference to the protocol handle.
+ * @message_id: The ID of the message to check.
+ * @attributes: A parameter to optionally return the retrieved message
+ * attributes, in case of Success.
+ *
+ * An helper to check protocol message attributes for a specific protocol
+ * and message pair.
+ *
+ * Return: 0 on SUCCESS
+ */
+static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
+ u32 message_id, u32 *attributes)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
+ sizeof(__le32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(message_id, t->tx.buf);
+ ret = do_xfer(ph, t);
+ if (!ret && attributes)
+ *attributes = get_unaligned_le32(t->rx.buf);
+ xfer_put(ph, t);
+
+ return ret;
+}
+
+/**
* struct scmi_iterator - Iterator descriptor
* @msg: A reference to the message TX buffer; filled by @prepare_message with
* a proper custom command payload for each multi-part command request.
@@ -1869,6 +1898,13 @@ struct scmi_msg_resp_desc_fc {
__le32 db_preserve_hmask;
};
+#define QUIRK_PERF_FC_FORCE \
+ ({ \
+ if (pi->proto->id == SCMI_PROTOCOL_PERF && \
+ message_id == 0x8 /* PERF_LEVEL_GET */) \
+ attributes |= BIT(0); \
+ })
+
static void
scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
u8 describe_id, u32 message_id, u32 valid_size,
@@ -1878,6 +1914,7 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
int ret;
u32 flags;
u64 phys_addr;
+ u32 attributes;
u8 size;
void __iomem *addr;
struct scmi_xfer *t;
@@ -1886,6 +1923,16 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
struct scmi_msg_resp_desc_fc *resp;
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
+ /* Check if the MSG_ID supports fastchannel */
+ ret = scmi_protocol_msg_check(ph, message_id, &attributes);
+ SCMI_QUIRK(perf_level_get_fc_force, QUIRK_PERF_FC_FORCE);
+ if (ret || !MSG_SUPPORTS_FASTCHANNEL(attributes)) {
+ dev_dbg(ph->dev,
+ "Skip FC init for 0x%02X/%d domain:%d - ret:%d\n",
+ pi->proto->id, message_id, domain, ret);
+ return;
+ }
+
if (!p_addr) {
ret = -EINVAL;
goto err_out;
@@ -2003,39 +2050,6 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
SCMI_PROTO_FC_RING_DB(64);
}
-/**
- * scmi_protocol_msg_check - Check protocol message attributes
- *
- * @ph: A reference to the protocol handle.
- * @message_id: The ID of the message to check.
- * @attributes: A parameter to optionally return the retrieved message
- * attributes, in case of Success.
- *
- * An helper to check protocol message attributes for a specific protocol
- * and message pair.
- *
- * Return: 0 on SUCCESS
- */
-static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
- u32 message_id, u32 *attributes)
-{
- int ret;
- struct scmi_xfer *t;
-
- ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
- sizeof(__le32), 0, &t);
- if (ret)
- return ret;
-
- put_unaligned_le32(message_id, t->tx.buf);
- ret = do_xfer(ph, t);
- if (!ret && attributes)
- *attributes = get_unaligned_le32(t->rx.buf);
- xfer_put(ph, t);
-
- return ret;
-}
-
static const struct scmi_proto_helpers_ops helpers_ops = {
.extended_name_get = scmi_common_extended_name_get,
.get_max_msg_size = scmi_common_get_max_msg_size,
@@ -2828,9 +2842,8 @@ static int scmi_bus_notifier(struct notifier_block *nb,
struct scmi_info *info = bus_nb_to_scmi_info(nb);
struct scmi_device *sdev = to_scmi_dev(data);
- /* Skip transport devices and devices of different SCMI instances */
- if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
- sdev->dev.parent != info->dev)
+ /* Skip devices of different SCMI instances */
+ if (sdev->dev.parent != info->dev)
return NOTIFY_DONE;
switch (action) {
@@ -3101,6 +3114,18 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
return &trans->desc;
}
+static void scmi_enable_matching_quirks(struct scmi_info *info)
+{
+ struct scmi_revision_info *rev = &info->version;
+
+ dev_dbg(info->dev, "Looking for quirks matching: %s/%s/0x%08X\n",
+ rev->vendor_id, rev->sub_vendor_id, rev->impl_ver);
+
+ /* Enable applicable quirks */
+ scmi_quirks_enable(info->dev, rev->vendor_id,
+ rev->sub_vendor_id, rev->impl_ver);
+}
+
static int scmi_probe(struct platform_device *pdev)
{
int ret;
@@ -3222,6 +3247,8 @@ static int scmi_probe(struct platform_device *pdev)
list_add_tail(&info->node, &scmi_list);
mutex_unlock(&scmi_list_mutex);
+ scmi_enable_matching_quirks(info);
+
for_each_available_child_of_node(np, child) {
u32 prot_id;
@@ -3380,6 +3407,8 @@ static struct dentry *scmi_debugfs_init(void)
static int __init scmi_driver_init(void)
{
+ scmi_quirks_initialize();
+
/* Bail out if no SCMI transport was configured */
if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
return -EINVAL;
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index aaee57cdcd55..d62c4469d1fd 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -31,6 +31,8 @@
#define SCMI_PROTOCOL_VENDOR_BASE 0x80
+#define MSG_SUPPORTS_FASTCHANNEL(x) ((x) & BIT(0))
+
enum scmi_common_cmd {
PROTOCOL_VERSION = 0x0,
PROTOCOL_ATTRIBUTES = 0x1,
diff --git a/drivers/firmware/arm_scmi/quirks.c b/drivers/firmware/arm_scmi/quirks.c
new file mode 100644
index 000000000000..03960aca3610
--- /dev/null
+++ b/drivers/firmware/arm_scmi/quirks.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message Protocol Quirks
+ *
+ * Copyright (C) 2025 ARM Ltd.
+ */
+
+/**
+ * DOC: Theory of operation
+ *
+ * A framework to define SCMI quirks and their activation conditions based on
+ * existing static_keys kernel facilities.
+ *
+ * Quirks are named and their activation conditions defined using the macro
+ * DEFINE_SCMI_QUIRK() in this file.
+ *
+ * After a quirk is defined, a corresponding entry must also be added to the
+ * global @scmi_quirks_table in this file using __DECLARE_SCMI_QUIRK_ENTRY().
+ *
+ * Additionally a corresponding quirk declaration must be added also to the
+ * quirk.h file using DECLARE_SCMI_QUIRK().
+ *
+ * The needed quirk code-snippet itself will be defined local to the SCMI code
+ * that is meant to fix and will be associated to the previously defined quirk
+ * and related activation conditions using the macro SCMI_QUIRK().
+ *
+ * At runtime, during the SCMI stack probe sequence, once the SCMI Server had
+ * advertised the running platform Vendor, SubVendor and Implementation Version
+ * data, all the defined quirks matching the activation conditions will be
+ * enabled.
+ *
+ * Example
+ *
+ * quirk.c
+ * -------
+ * DEFINE_SCMI_QUIRK(fix_me, "vendor", "subvend", "0x12000-0x30000",
+ * "someone,plat_A", "another,plat_b", "vend,sku");
+ *
+ * static struct scmi_quirk *scmi_quirks_table[] = {
+ * ...
+ * __DECLARE_SCMI_QUIRK_ENTRY(fix_me),
+ * NULL
+ * };
+ *
+ * quirk.h
+ * -------
+ * DECLARE_SCMI_QUIRK(fix_me);
+ *
+ * <somewhere_in_the_scmi_stack.c>
+ * ------------------------------
+ *
+ * #define QUIRK_CODE_SNIPPET_FIX_ME() \
+ * ({ \
+ * if (p->condition) \
+ * a_ptr->calculated_val = 123; \
+ * })
+ *
+ *
+ * int some_function_to_fix(int param, struct something *p)
+ * {
+ * struct some_strut *a_ptr;
+ *
+ * a_ptr = some_load_func(p);
+ * SCMI_QUIRK(fix_me, QUIRK_CODE_SNIPPET_FIX_ME);
+ * some_more_func(a_ptr);
+ * ...
+ *
+ * return 0;
+ * }
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/hashtable.h>
+#include <linux/kstrtox.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/static_key.h>
+#include <linux/string.h>
+#include <linux/stringhash.h>
+#include <linux/types.h>
+
+#include "quirks.h"
+
+#define SCMI_QUIRKS_HT_SZ 4
+
+struct scmi_quirk {
+ bool enabled;
+ const char *name;
+ char *vendor;
+ char *sub_vendor_id;
+ char *impl_ver_range;
+ u32 start_range;
+ u32 end_range;
+ struct static_key_false *key;
+ struct hlist_node hash;
+ unsigned int hkey;
+ const char *const compats[];
+};
+
+#define __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ...) \
+ static struct scmi_quirk scmi_quirk_entry_ ## _qn = { \
+ .name = __stringify(quirk_ ## _qn), \
+ .vendor = _ven, \
+ .sub_vendor_id = _sub, \
+ .impl_ver_range = _impl, \
+ .key = &(scmi_quirk_ ## _qn), \
+ .compats = { __VA_ARGS__ __VA_OPT__(,) NULL }, \
+ }
+
+#define __DECLARE_SCMI_QUIRK_ENTRY(_qn) (&(scmi_quirk_entry_ ## _qn))
+
+/*
+ * Define a quirk by name and provide the matching tokens where:
+ *
+ * _qn: A string which will be used to build the quirk and the global
+ * static_key names.
+ * _ven : SCMI Vendor ID string match, NULL means any.
+ * _sub : SCMI SubVendor ID string match, NULL means any.
+ * _impl : SCMI Implementation Version string match, NULL means any.
+ * This string can be used to express version ranges which will be
+ * interpreted as follows:
+ *
+ * NULL [0, 0xFFFFFFFF]
+ * "X" [X, X]
+ * "X-" [X, 0xFFFFFFFF]
+ * "-X" [0, X]
+ * "X-Y" [X, Y]
+ *
+ * with X <= Y and <v> in [X, Y] meaning X <= <v> <= Y
+ *
+ * ... : An optional variadic macros argument used to provide a comma-separated
+ * list of compatible strings matches; when no variadic argument is
+ * provided, ANY compatible will match this quirk.
+ *
+ * This implicitly define also a properly named global static-key that
+ * will be used to dynamically enable the quirk at initialization time.
+ *
+ * Note that it is possible to associate multiple quirks to the same
+ * matching pattern, if your firmware quality is really astounding :P
+ *
+ * Example:
+ *
+ * Compatibles list NOT provided, so ANY compatible will match:
+ *
+ * DEFINE_SCMI_QUIRK(my_new_issue, "Vend", "SVend", "0x12000-0x30000");
+ *
+ *
+ * A few compatibles provided to match against:
+ *
+ * DEFINE_SCMI_QUIRK(my_new_issue, "Vend", "SVend", "0x12000-0x30000",
+ * "xvend,plat_a", "xvend,plat_b", "xvend,sku_name");
+ */
+#define DEFINE_SCMI_QUIRK(_qn, _ven, _sub, _impl, ...) \
+ DEFINE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn); \
+ __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ##__VA_ARGS__)
+
+/*
+ * Same as DEFINE_SCMI_QUIRK but EXPORTED: this is meant to address quirks
+ * that possibly reside in code that is included in loadable kernel modules
+ * that needs to be able to access the global static keys at runtime to
+ * determine if enabled or not. (see SCMI_QUIRK to understand usage)
+ */
+#define DEFINE_SCMI_QUIRK_EXPORTED(_qn, _ven, _sub, _impl, ...) \
+ DEFINE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn); \
+ EXPORT_SYMBOL_GPL(scmi_quirk_ ## _qn); \
+ __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ##__VA_ARGS__)
+
+/* Global Quirks Definitions */
+DEFINE_SCMI_QUIRK(clock_rates_triplet_out_of_spec, NULL, NULL, NULL);
+DEFINE_SCMI_QUIRK(perf_level_get_fc_force, "Qualcomm", NULL, "0x20000-");
+
+/*
+ * Quirks Pointers Array
+ *
+ * This is filled at compile-time with the list of pointers to all the currently
+ * defined quirks descriptors.
+ */
+static struct scmi_quirk *scmi_quirks_table[] = {
+ __DECLARE_SCMI_QUIRK_ENTRY(clock_rates_triplet_out_of_spec),
+ __DECLARE_SCMI_QUIRK_ENTRY(perf_level_get_fc_force),
+ NULL
+};
+
+/*
+ * Quirks HashTable
+ *
+ * A run-time populated hashtable containing all the defined quirks descriptors
+ * hashed by matching pattern.
+ */
+static DEFINE_READ_MOSTLY_HASHTABLE(scmi_quirks_ht, SCMI_QUIRKS_HT_SZ);
+
+static unsigned int scmi_quirk_signature(const char *vend, const char *sub_vend)
+{
+ char *signature, *p;
+ unsigned int hash32;
+ unsigned long hash = 0;
+
+ /* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */
+ signature = kasprintf(GFP_KERNEL, "|%s|%s|", vend ?: "", sub_vend ?: "");
+ if (!signature)
+ return 0;
+
+ pr_debug("SCMI Quirk Signature >>>%s<<<\n", signature);
+
+ p = signature;
+ while (*p)
+ hash = partial_name_hash(tolower(*p++), hash);
+ hash32 = end_name_hash(hash);
+
+ kfree(signature);
+
+ return hash32;
+}
+
+static int scmi_quirk_range_parse(struct scmi_quirk *quirk)
+{
+ const char *last, *first = quirk->impl_ver_range;
+ size_t len;
+ char *sep;
+ int ret;
+
+ quirk->start_range = 0;
+ quirk->end_range = 0xFFFFFFFF;
+ len = quirk->impl_ver_range ? strlen(quirk->impl_ver_range) : 0;
+ if (!len)
+ return 0;
+
+ last = first + len - 1;
+ sep = strchr(quirk->impl_ver_range, '-');
+ if (sep)
+ *sep = '\0';
+
+ if (sep == first) /* -X */
+ ret = kstrtouint(first + 1, 0, &quirk->end_range);
+ else /* X OR X- OR X-y */
+ ret = kstrtouint(first, 0, &quirk->start_range);
+ if (ret)
+ return ret;
+
+ if (!sep)
+ quirk->end_range = quirk->start_range;
+ else if (sep != last) /* x-Y */
+ ret = kstrtouint(sep + 1, 0, &quirk->end_range);
+
+ if (quirk->start_range > quirk->end_range)
+ return -EINVAL;
+
+ return ret;
+}
+
+void scmi_quirks_initialize(void)
+{
+ struct scmi_quirk *quirk;
+ int i;
+
+ for (i = 0, quirk = scmi_quirks_table[0]; quirk;
+ i++, quirk = scmi_quirks_table[i]) {
+ int ret;
+
+ ret = scmi_quirk_range_parse(quirk);
+ if (ret) {
+ pr_err("SCMI skip QUIRK [%s] - BAD RANGE - |%s|\n",
+ quirk->name, quirk->impl_ver_range);
+ continue;
+ }
+ quirk->hkey = scmi_quirk_signature(quirk->vendor,
+ quirk->sub_vendor_id);
+
+ hash_add(scmi_quirks_ht, &quirk->hash, quirk->hkey);
+
+ pr_debug("Registered SCMI QUIRK [%s] -- %p - Key [0x%08X] - %s/%s/[0x%08X-0x%08X]\n",
+ quirk->name, quirk, quirk->hkey,
+ quirk->vendor, quirk->sub_vendor_id,
+ quirk->start_range, quirk->end_range);
+ }
+
+ pr_debug("SCMI Quirks initialized\n");
+}
+
+void scmi_quirks_enable(struct device *dev, const char *vend,
+ const char *subv, const u32 impl)
+{
+ for (int i = 3; i >= 0; i--) {
+ struct scmi_quirk *quirk;
+ unsigned int hkey;
+
+ hkey = scmi_quirk_signature(i > 1 ? vend : NULL,
+ i > 2 ? subv : NULL);
+
+ /*
+ * Note that there could be multiple matches so we
+ * will enable multiple quirk part of a hash collision
+ * domain...BUT we cannot assume that ALL quirks on the
+ * same collision domain are a full match.
+ */
+ hash_for_each_possible(scmi_quirks_ht, quirk, hash, hkey) {
+ if (quirk->enabled || quirk->hkey != hkey ||
+ impl < quirk->start_range ||
+ impl > quirk->end_range)
+ continue;
+
+ if (quirk->compats[0] &&
+ !of_machine_compatible_match(quirk->compats))
+ continue;
+
+ dev_info(dev, "Enabling SCMI Quirk [%s]\n",
+ quirk->name);
+
+ dev_dbg(dev,
+ "Quirk matched on: %s/%s/%s/[0x%08X-0x%08X]\n",
+ quirk->compats[0], quirk->vendor,
+ quirk->sub_vendor_id,
+ quirk->start_range, quirk->end_range);
+
+ static_branch_enable(quirk->key);
+ quirk->enabled = true;
+ }
+ }
+}
diff --git a/drivers/firmware/arm_scmi/quirks.h b/drivers/firmware/arm_scmi/quirks.h
new file mode 100644
index 000000000000..a71fde85a527
--- /dev/null
+++ b/drivers/firmware/arm_scmi/quirks.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol Quirks
+ *
+ * Copyright (C) 2025 ARM Ltd.
+ */
+#ifndef _SCMI_QUIRKS_H
+#define _SCMI_QUIRKS_H
+
+#include <linux/static_key.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_ARM_SCMI_QUIRKS
+
+#define DECLARE_SCMI_QUIRK(_qn) \
+ DECLARE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn)
+
+/*
+ * A helper to associate the actual code snippet to use as a quirk
+ * named as _qn.
+ */
+#define SCMI_QUIRK(_qn, _blk) \
+ do { \
+ if (static_branch_unlikely(&(scmi_quirk_ ## _qn))) \
+ (_blk); \
+ } while (0)
+
+void scmi_quirks_initialize(void);
+void scmi_quirks_enable(struct device *dev, const char *vend,
+ const char *subv, const u32 impl);
+
+#else
+
+#define DECLARE_SCMI_QUIRK(_qn)
+/* Force quirks compilation even when SCMI Quirks are disabled */
+#define SCMI_QUIRK(_qn, _blk) \
+ do { \
+ if (0) \
+ (_blk); \
+ } while (0)
+
+static inline void scmi_quirks_initialize(void) { }
+static inline void scmi_quirks_enable(struct device *dev, const char *vend,
+ const char *sub_vend, const u32 impl) { }
+
+#endif /* CONFIG_ARM_SCMI_QUIRKS */
+
+/* Quirk delarations */
+DECLARE_SCMI_QUIRK(clock_rates_triplet_out_of_spec);
+DECLARE_SCMI_QUIRK(perf_level_get_fc_force);
+
+#endif /* _SCMI_QUIRKS_H */
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 7cc0d616b8de..3d543b1d8947 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -671,11 +671,13 @@ static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw,
* @len: Length of the message in @buf.
* @chan_id: The channel ID to use.
* @async: A flag stating if an asynchronous command is required.
+ * @poll: A flag stating if a polling transmission is required.
*
* Return: 0 on Success
*/
static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
- void *buf, size_t len, u8 chan_id, bool async)
+ void *buf, size_t len, u8 chan_id,
+ bool async, bool poll)
{
int ret;
struct scmi_xfer *xfer;
@@ -684,6 +686,16 @@ static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
if (ret)
return ret;
+ if (poll) {
+ if (is_transport_polling_capable(raw->desc)) {
+ xfer->hdr.poll_completion = true;
+ } else {
+ dev_err(raw->handle->dev,
+ "Failed to send RAW message - Polling NOT supported\n");
+ return -EINVAL;
+ }
+ }
+
ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async);
if (ret)
scmi_xfer_raw_put(raw->handle, xfer);
@@ -801,7 +813,7 @@ static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp,
static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos,
- bool async)
+ bool async, bool poll)
{
int ret;
struct scmi_dbg_raw_data *rd = filp->private_data;
@@ -831,7 +843,7 @@ static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
}
ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size,
- rd->chan_id, async);
+ rd->chan_id, async, poll);
/* Reset ppos for next message ... */
rd->tx_size = 0;
@@ -875,7 +887,8 @@ static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
- return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false);
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ false, false);
}
static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
@@ -964,7 +977,8 @@ static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
- return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true);
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ true, false);
}
static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
@@ -976,6 +990,40 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
.owner = THIS_MODULE,
};
+static ssize_t scmi_dbg_raw_mode_message_poll_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ false, true);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_message_poll_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_poll_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t scmi_dbg_raw_mode_message_poll_async_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos,
+ true, true);
+}
+
+static const struct file_operations scmi_dbg_raw_mode_message_poll_async_fops = {
+ .open = scmi_dbg_raw_mode_open,
+ .release = scmi_dbg_raw_mode_release,
+ .read = scmi_dbg_raw_mode_message_read,
+ .write = scmi_dbg_raw_mode_message_poll_async_write,
+ .poll = scmi_dbg_raw_mode_message_poll,
+ .owner = THIS_MODULE,
+};
+
static ssize_t scmi_test_dbg_raw_mode_notif_read(struct file *filp,
char __user *buf,
size_t count, loff_t *ppos)
@@ -1199,6 +1247,12 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle,
debugfs_create_file("message_async", 0600, raw->dentry, raw,
&scmi_dbg_raw_mode_message_async_fops);
+ debugfs_create_file("message_poll", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_poll_fops);
+
+ debugfs_create_file("message_poll_async", 0600, raw->dentry, raw,
+ &scmi_dbg_raw_mode_message_poll_async_fops);
+
debugfs_create_file("notification", 0400, raw->dentry, raw,
&scmi_dbg_raw_mode_notification_fops);
@@ -1230,6 +1284,14 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle,
debugfs_create_file_aux_num("message_async", 0600, chd,
raw, channels[i],
&scmi_dbg_raw_mode_message_async_fops);
+
+ debugfs_create_file_aux_num("message_poll", 0600, chd,
+ raw, channels[i],
+ &scmi_dbg_raw_mode_message_poll_fops);
+
+ debugfs_create_file_aux_num("message_poll_async", 0600,
+ chd, raw, channels[i],
+ &scmi_dbg_raw_mode_message_poll_async_fops);
}
}
diff --git a/drivers/firmware/arm_scmi/vendors/imx/Kconfig b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
index a01bf5e47301..c34c8c837441 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/Kconfig
+++ b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
@@ -12,6 +12,30 @@ config IMX_SCMI_BBM_EXT
To compile this driver as a module, choose M here: the
module will be called imx-sm-bbm.
+config IMX_SCMI_CPU_EXT
+ tristate "i.MX SCMI CPU EXTENSION"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ depends on IMX_SCMI_CPU_DRV
+ default y if ARCH_MXC
+ help
+ This enables i.MX System CPU Protocol to manage cpu
+ start, stop and etc.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-sm-cpu.
+
+config IMX_SCMI_LMM_EXT
+ tristate "i.MX SCMI LMM EXTENSION"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ depends on IMX_SCMI_LMM_DRV
+ default y if ARCH_MXC
+ help
+ This enables i.MX System Logical Machine Protocol to
+ manage Logical Machines boot, shutdown and etc.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-sm-lmm.
+
config IMX_SCMI_MISC_EXT
tristate "i.MX SCMI MISC EXTENSION"
depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
diff --git a/drivers/firmware/arm_scmi/vendors/imx/Makefile b/drivers/firmware/arm_scmi/vendors/imx/Makefile
index d3ee6d544924..e3a5ea46345c 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/Makefile
+++ b/drivers/firmware/arm_scmi/vendors/imx/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_IMX_SCMI_BBM_EXT) += imx-sm-bbm.o
+obj-$(CONFIG_IMX_SCMI_CPU_EXT) += imx-sm-cpu.o
+obj-$(CONFIG_IMX_SCMI_LMM_EXT) += imx-sm-lmm.o
obj-$(CONFIG_IMX_SCMI_MISC_EXT) += imx-sm-misc.o
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c
new file mode 100644
index 000000000000..66f47f5371e5
--- /dev/null
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System control and Management Interface (SCMI) NXP CPU Protocol
+ *
+ * Copyright 2025 NXP
+ */
+
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+#include "../../protocols.h"
+#include "../../notify.h"
+
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000
+
+enum scmi_imx_cpu_protocol_cmd {
+ SCMI_IMX_CPU_ATTRIBUTES = 0x3,
+ SCMI_IMX_CPU_START = 0x4,
+ SCMI_IMX_CPU_STOP = 0x5,
+ SCMI_IMX_CPU_RESET_VECTOR_SET = 0x6,
+ SCMI_IMX_CPU_INFO_GET = 0xC,
+};
+
+struct scmi_imx_cpu_info {
+ u32 nr_cpu;
+};
+
+#define SCMI_IMX_CPU_NR_CPU_MASK GENMASK(15, 0)
+struct scmi_msg_imx_cpu_protocol_attributes {
+ __le32 attributes;
+};
+
+struct scmi_msg_imx_cpu_attributes_out {
+ __le32 attributes;
+#define CPU_MAX_NAME 16
+ u8 name[CPU_MAX_NAME];
+};
+
+struct scmi_imx_cpu_reset_vector_set_in {
+ __le32 cpuid;
+#define CPU_VEC_FLAGS_RESUME BIT(31)
+#define CPU_VEC_FLAGS_START BIT(30)
+#define CPU_VEC_FLAGS_BOOT BIT(29)
+ __le32 flags;
+ __le32 resetvectorlow;
+ __le32 resetvectorhigh;
+};
+
+struct scmi_imx_cpu_info_get_out {
+#define CPU_RUN_MODE_START 0
+#define CPU_RUN_MODE_HOLD 1
+#define CPU_RUN_MODE_STOP 2
+#define CPU_RUN_MODE_SLEEP 3
+ __le32 runmode;
+ __le32 sleepmode;
+ __le32 resetvectorlow;
+ __le32 resetvectorhigh;
+};
+
+static int scmi_imx_cpu_validate_cpuid(const struct scmi_protocol_handle *ph,
+ u32 cpuid)
+{
+ struct scmi_imx_cpu_info *info = ph->get_priv(ph);
+
+ if (cpuid >= info->nr_cpu)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int scmi_imx_cpu_start(const struct scmi_protocol_handle *ph,
+ u32 cpuid, bool start)
+{
+ struct scmi_xfer *t;
+ u8 msg_id;
+ int ret;
+
+ ret = scmi_imx_cpu_validate_cpuid(ph, cpuid);
+ if (ret)
+ return ret;
+
+ if (start)
+ msg_id = SCMI_IMX_CPU_START;
+ else
+ msg_id = SCMI_IMX_CPU_STOP;
+
+ ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpuid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_reset_vector_set(const struct scmi_protocol_handle *ph,
+ u32 cpuid, u64 vector, bool start,
+ bool boot, bool resume)
+{
+ struct scmi_imx_cpu_reset_vector_set_in *in;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = scmi_imx_cpu_validate_cpuid(ph, cpuid);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_RESET_VECTOR_SET, sizeof(*in),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->cpuid = cpu_to_le32(cpuid);
+ in->flags = cpu_to_le32(0);
+ if (start)
+ in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_START);
+ if (boot)
+ in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_BOOT);
+ if (resume)
+ in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_RESUME);
+ in->resetvectorlow = cpu_to_le32(lower_32_bits(vector));
+ in->resetvectorhigh = cpu_to_le32(upper_32_bits(vector));
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_started(const struct scmi_protocol_handle *ph, u32 cpuid,
+ bool *started)
+{
+ struct scmi_imx_cpu_info_get_out *out;
+ struct scmi_xfer *t;
+ u32 mode;
+ int ret;
+
+ if (!started)
+ return -EINVAL;
+
+ *started = false;
+ ret = scmi_imx_cpu_validate_cpuid(ph, cpuid);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_INFO_GET, sizeof(u32),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpuid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ mode = le32_to_cpu(out->runmode);
+ if (mode == CPU_RUN_MODE_START || mode == CPU_RUN_MODE_SLEEP)
+ *started = true;
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static const struct scmi_imx_cpu_proto_ops scmi_imx_cpu_proto_ops = {
+ .cpu_reset_vector_set = scmi_imx_cpu_reset_vector_set,
+ .cpu_start = scmi_imx_cpu_start,
+ .cpu_started = scmi_imx_cpu_started,
+};
+
+static int scmi_imx_cpu_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_imx_cpu_info *info)
+{
+ struct scmi_msg_imx_cpu_protocol_attributes *attr;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ info->nr_cpu = le32_get_bits(attr->attributes, SCMI_IMX_CPU_NR_CPU_MASK);
+ dev_info(ph->dev, "i.MX SM CPU: %d cpus\n",
+ info->nr_cpu);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 cpuid)
+{
+ struct scmi_msg_imx_cpu_attributes_out *out;
+ char name[SCMI_SHORT_NAME_MAX_SIZE] = {'\0'};
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_ATTRIBUTES, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpuid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(name, out->name, SCMI_SHORT_NAME_MAX_SIZE);
+ dev_info(ph->dev, "i.MX CPU: name: %s\n", name);
+ } else {
+ dev_err(ph->dev, "i.MX cpu: Failed to get info of cpu(%u)\n", cpuid);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_cpu_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_cpu_info *info;
+ u32 version;
+ int ret, i;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_info(ph->dev, "NXP SM CPU Protocol Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ info = devm_kzalloc(ph->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = scmi_imx_cpu_protocol_attributes_get(ph, info);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < info->nr_cpu; i++) {
+ ret = scmi_imx_cpu_attributes_get(ph, i);
+ if (ret)
+ return ret;
+ }
+
+ return ph->set_priv(ph, info, version);
+}
+
+static const struct scmi_protocol scmi_imx_cpu = {
+ .id = SCMI_PROTOCOL_IMX_CPU,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_imx_cpu_protocol_init,
+ .ops = &scmi_imx_cpu_proto_ops,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
+};
+module_scmi_protocol(scmi_imx_cpu);
+
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_CPU) "-" SCMI_IMX_VENDOR);
+MODULE_DESCRIPTION("i.MX SCMI CPU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c
new file mode 100644
index 000000000000..b519c67fe920
--- /dev/null
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System control and Management Interface (SCMI) NXP LMM Protocol
+ *
+ * Copyright 2025 NXP
+ */
+
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+#include "../../protocols.h"
+#include "../../notify.h"
+
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000
+
+enum scmi_imx_lmm_protocol_cmd {
+ SCMI_IMX_LMM_ATTRIBUTES = 0x3,
+ SCMI_IMX_LMM_BOOT = 0x4,
+ SCMI_IMX_LMM_RESET = 0x5,
+ SCMI_IMX_LMM_SHUTDOWN = 0x6,
+ SCMI_IMX_LMM_WAKE = 0x7,
+ SCMI_IMX_LMM_SUSPEND = 0x8,
+ SCMI_IMX_LMM_NOTIFY = 0x9,
+ SCMI_IMX_LMM_RESET_REASON = 0xA,
+ SCMI_IMX_LMM_POWER_ON = 0xB,
+ SCMI_IMX_LMM_RESET_VECTOR_SET = 0xC,
+};
+
+struct scmi_imx_lmm_priv {
+ u32 nr_lmm;
+};
+
+#define SCMI_IMX_LMM_NR_LM_MASK GENMASK(5, 0)
+#define SCMI_IMX_LMM_NR_MAX 16
+struct scmi_msg_imx_lmm_protocol_attributes {
+ __le32 attributes;
+};
+
+struct scmi_msg_imx_lmm_attributes_out {
+ __le32 lmid;
+ __le32 attributes;
+ __le32 state;
+ __le32 errstatus;
+ u8 name[LMM_MAX_NAME];
+};
+
+struct scmi_imx_lmm_reset_vector_set_in {
+ __le32 lmid;
+ __le32 cpuid;
+ __le32 flags; /* reserved for future extension */
+ __le32 resetvectorlow;
+ __le32 resetvectorhigh;
+};
+
+struct scmi_imx_lmm_shutdown_in {
+ __le32 lmid;
+#define SCMI_IMX_LMM_SHUTDOWN_GRACEFUL BIT(0)
+ __le32 flags;
+};
+
+static int scmi_imx_lmm_validate_lmid(const struct scmi_protocol_handle *ph, u32 lmid)
+{
+ struct scmi_imx_lmm_priv *priv = ph->get_priv(ph);
+
+ if (lmid >= priv->nr_lmm)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int scmi_imx_lmm_attributes(const struct scmi_protocol_handle *ph,
+ u32 lmid, struct scmi_imx_lmm_info *info)
+{
+ struct scmi_msg_imx_lmm_attributes_out *out;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_ATTRIBUTES, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(lmid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ info->lmid = le32_to_cpu(out->lmid);
+ info->state = le32_to_cpu(out->state);
+ info->errstatus = le32_to_cpu(out->errstatus);
+ strscpy(info->name, out->name);
+ dev_dbg(ph->dev, "i.MX LMM: Logical Machine(%d), name: %s\n",
+ info->lmid, info->name);
+ } else {
+ dev_err(ph->dev, "i.MX LMM: Failed to get info of Logical Machine(%u)\n", lmid);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int
+scmi_imx_lmm_power_boot(const struct scmi_protocol_handle *ph, u32 lmid, bool boot)
+{
+ struct scmi_xfer *t;
+ u8 msg_id;
+ int ret;
+
+ ret = scmi_imx_lmm_validate_lmid(ph, lmid);
+ if (ret)
+ return ret;
+
+ if (boot)
+ msg_id = SCMI_IMX_LMM_BOOT;
+ else
+ msg_id = SCMI_IMX_LMM_POWER_ON;
+
+ ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(u32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(lmid, t->tx.buf);
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_lmm_reset_vector_set(const struct scmi_protocol_handle *ph,
+ u32 lmid, u32 cpuid, u32 flags, u64 vector)
+{
+ struct scmi_imx_lmm_reset_vector_set_in *in;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_RESET_VECTOR_SET, sizeof(*in),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->lmid = cpu_to_le32(lmid);
+ in->cpuid = cpu_to_le32(cpuid);
+ in->flags = cpu_to_le32(0);
+ in->resetvectorlow = cpu_to_le32(lower_32_bits(vector));
+ in->resetvectorhigh = cpu_to_le32(upper_32_bits(vector));
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_lmm_shutdown(const struct scmi_protocol_handle *ph, u32 lmid,
+ u32 flags)
+{
+ struct scmi_imx_lmm_shutdown_in *in;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = scmi_imx_lmm_validate_lmid(ph, lmid);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_SHUTDOWN, sizeof(*in),
+ 0, &t);
+ if (ret)
+ return ret;
+
+ in = t->tx.buf;
+ in->lmid = cpu_to_le32(lmid);
+ if (flags & SCMI_IMX_LMM_SHUTDOWN_GRACEFUL)
+ in->flags = cpu_to_le32(SCMI_IMX_LMM_SHUTDOWN_GRACEFUL);
+ else
+ in->flags = cpu_to_le32(0);
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static const struct scmi_imx_lmm_proto_ops scmi_imx_lmm_proto_ops = {
+ .lmm_power_boot = scmi_imx_lmm_power_boot,
+ .lmm_info = scmi_imx_lmm_attributes,
+ .lmm_reset_vector_set = scmi_imx_lmm_reset_vector_set,
+ .lmm_shutdown = scmi_imx_lmm_shutdown,
+};
+
+static int scmi_imx_lmm_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct scmi_imx_lmm_priv *priv)
+{
+ struct scmi_msg_imx_lmm_protocol_attributes *attr;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ priv->nr_lmm = le32_get_bits(attr->attributes, SCMI_IMX_LMM_NR_LM_MASK);
+ if (priv->nr_lmm > SCMI_IMX_LMM_NR_MAX) {
+ dev_err(ph->dev, "i.MX LMM: %d:Exceed max supported Logical Machines\n",
+ priv->nr_lmm);
+ ret = -EINVAL;
+ } else {
+ dev_info(ph->dev, "i.MX LMM: %d Logical Machines\n", priv->nr_lmm);
+ }
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_lmm_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_lmm_priv *info;
+ u32 version;
+ int ret;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_info(ph->dev, "NXP SM LMM Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ info = devm_kzalloc(ph->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = scmi_imx_lmm_protocol_attributes_get(ph, info);
+ if (ret)
+ return ret;
+
+ return ph->set_priv(ph, info, version);
+}
+
+static const struct scmi_protocol scmi_imx_lmm = {
+ .id = SCMI_PROTOCOL_IMX_LMM,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_imx_lmm_protocol_init,
+ .ops = &scmi_imx_lmm_proto_ops,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
+};
+module_scmi_protocol(scmi_imx_lmm);
+
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_LMM) "-" SCMI_IMX_VENDOR);
+MODULE_DESCRIPTION("i.MX SCMI LMM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
index b2dfd6c46ca2..4e246a78a042 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
@@ -32,6 +32,518 @@ port, and deploy the SM on supported processors.
The SM implements an interface compliant with the Arm SCMI Specification
with additional vendor specific extensions.
+System Control and Management Logical Machine Management Vendor Protocol
+========================================================================
+
+The SM adds the concept of logical machines (LMs). These are analogous to
+VMs and each has its own instance of SCMI. All normal SCMI calls only apply
+the LM running the calling agent. That includes boot, shutdown, reset,
+suspend, wake, etc. If a caller makes the SCMI base call to get a list
+of agents, it will only get those on that LM. Each LM is completely isolated
+from the others. This is mandatory for these to operate independently.
+
+This protocol is intended to support boot, shutdown, and reset of other logical
+machines (LM). It is usually used to allow one LM(e.g. OSPM) to manage
+another LM which is usually an offload or accelerator engine. Notifications
+from this protocol can also be used to manage a communication link to another
+LM. The LMM protocol provides commands to:
+
+- Describe the protocol version.
+- Discover implementation attributes.
+- Discover all the LMs defined in the system.
+- Boot a target LM.
+- Shutdown a target LM (gracefully or forcibly).
+- Reset a target LM (gracefully or forcibly).
+- Wake a target LM from suspend.
+- Suspend a target LM (gracefully).
+- Read boot/shutdown/reset information for a target LM.
+- Get notifications when a target LM boots or shuts down (e.g. LM 'X' requested
+ notification of LM 'Y' boots or shuts down, when LM 'Y' boots or shuts down,
+ SCMI firmware will send notification to LM 'X').
+
+'Graceful' means asking LM itself to shutdown/reset/etc (e.g. sending
+notification to Linux, Then Linux reboots or powers down itself). It is async
+command that the SUCCESS of the command just means the command successfully
+return, not means reboot/reset successfully finished.
+
+'Forceful' means the SM will force shutdown/reset/etc the LM. It is sync
+command that the SUCCESS of the command means the LM has been successfully
+shutdown/reset/etc.
+If the commands not have Graceful/Forceful flag settings, such as WAKE, SUSEND,
+it is a Graceful command.
+
+Commands:
+_________
+
+PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x80
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++---------------+--------------------------------------------------------------+
+|Name |Description |
++---------------+--------------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++---------------+--------------------------------------------------------------+
+|uint32 version | For this revision of the specification, this value must be |
+| | 0x10000. |
++---------------+--------------------------------------------------------------+
+
+PROTOCOL_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x1
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Protocol attributes: |
+| |Bits[31:5] Reserved, must be zero. |
+| |Bits[4:0] Number of Logical Machines |
+| |Note that due to both hardware limitations and reset reason|
+| |field limitations, the max number of LM is 16. The minimum |
+| |is 1. |
++------------------+-----------------------------------------------------------+
+
+PROTOCOL_MESSAGE_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x2
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: in case the message is implemented and available |
+| |to use. |
+| |NOT_FOUND: if the message identified by message_id is |
+| |invalid or not implemented |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Flags that are associated with a specific command in the |
+| |protocol. For all commands in this protocol, this |
+| |parameter has a value of 0 |
++------------------+-----------------------------------------------------------+
+
+LMM_ATTRIBUTES
+~~~~~~~~~~~~~~
+
+message_id: 0x3
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if valid attributes are returned. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |DENIED: if the agent does not have permission to get info |
+| |for the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |Identifier of the LM whose identification is requested. |
+| |This field is: Populated with the lmid of the calling |
+| |agent, when the lmid parameter passed via the command is |
+| |0xFFFFFFFF. Identical to the lmid field passed via the |
+| |calling parameters, in all other cases |
++------------------+-----------------------------------------------------------+
+|uint32 attributes | Bits[31:0] reserved. must be zero |
++------------------+-----------------------------------------------------------+
+|uint32 state | Current state of the LM |
++------------------+-----------------------------------------------------------+
+|uint32 errStatus | Last error status recorded |
++------------------+-----------------------------------------------------------+
+|char name[16] | A NULL terminated ASCII string with the LM name, of up |
+| | to 16 bytes |
++------------------+-----------------------------------------------------------+
+
+LMM_BOOT
+~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM boots successfully started. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_RESET
+~~~~~~~~~
+
+message_id: 0x5
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Reset flags: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] Graceful request: |
+| |Set to 1 if the request is a graceful request. |
+| |Set to 0 if the request is a forceful request. |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: The LMM RESET command finished successfully in |
+| |graceful reset or LM successfully resets in forceful reset.|
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_SHUTDOWN
+~~~~~~~~~~~~
+
+message_id: 0x6
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Reset flags: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] Graceful request: |
+| |Set to 1 if the request is a graceful request. |
+| |Set to 0 if the request is a forceful request. |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: The LMM shutdown command finished successfully in |
+| |graceful request or LM successfully shutdown in forceful |
+| |request. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_WAKE
+~~~~~~~~
+
+message_id: 0x7
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM wake command successfully returns. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_SUSPEND
+~~~~~~~~~~~
+
+message_id: 0x8
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM suspend command successfully returns. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_NOTIFY
+~~~~~~~~~~
+
+message_id: 0x9
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|uint32 flags |Notification flags: |
+| |Bits[31:3] Reserved, must be zero. |
+| |Bit[3] Wake (resume) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[2] Suspend (sleep) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[1] Shutdown (off) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification. |
+| |Bit[0] Boot (on) notification: |
+| |Set to 1 to send notification. |
+| |Set to 0 if no notification |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the notification state successfully updated. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if input attributes flag specifies |
+| |unsupported or invalid configurations. |
+| |DENIED: if the agent does not have permission to request |
+| |the notification. |
++------------------+-----------------------------------------------------------+
+
+LMM_RESET_REASON
+~~~~~~~~~~~~~~~~
+
+message_id: 0xA
+protocol_id: 0x80
+This command is mandatory.
+
+This command is to return the reset reason that caused the last reset, such as
+POR, WDOG, JTAG and etc.
+
++---------------------+--------------------------------------------------------+
+|Parameters |
++---------------------+--------------------------------------------------------+
+|Name |Description |
++---------------------+--------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++---------------------+--------------------------------------------------------+
+|Return values |
++---------------------+--------------------------------------------------------+
+|Name |Description |
++---------------------+--------------------------------------------------------+
+|int32 status |SUCCESS: if the reset reason of the LM successfully |
+| |updated. |
+| |NOT_FOUND: if lmid not points to a valid logical machine|
+| |DENIED: if the agent does not have permission to request|
+| |the reset reason. |
++---------------------+--------------------------------------------------------+
+|uint32 bootflags |Boot reason flags. This parameter has the format: |
+| |Bits[31] Valid. |
+| |Set to 1 if the entire reason is valid. |
+| |Set to 0 if the entire reason is not valid. |
+| |Bits[30:29] Reserved, must be zero. |
+| |Bit[28] Valid origin: |
+| |Set to 1 if the origin field is valid. |
+| |Set to 0 if the origin field is not valid. |
+| |Bits[27:24] Origin. |
+| |Logical Machine(LM) ID that causes the BOOT of this LM |
+| |Bit[23] Valid err ID: |
+| |Set to 1 if the error ID field is valid. |
+| |Set to 0 if the error ID field is not valid. |
+| |Bits[22:8] Error ID(Agent ID of the system). |
+| |Bit[7:0] Reason(WDOG, POR, FCCU and etc): |
+| |See the SRESR register description in the System |
+| |Reset Controller (SRC) section in SoC reference mannual |
+| |One reason maps to BIT(reason) in SRESR |
++---------------------+--------------------------------------------------------+
+|uint32 shutdownflags |Shutdown reason flags. This parameter has the format: |
+| |Bits[31] Valid. |
+| |Set to 1 if the entire reason is valid. |
+| |Set to 0 if the entire reason is not valid. |
+| |Bits[30:29] Number of valid extended info words. |
+| |Bit[28] Valid origin: |
+| |Set to 1 if the origin field is valid. |
+| |Set to 0 if the origin field is not valid. |
+| |Bits[27:24] Origin. |
+| |Logical Machine(LM) ID that causes the BOOT of this LM |
+| |Bit[23] Valid err ID: |
+| |Set to 1 if the error ID field is valid. |
+| |Set to 0 if the error ID field is not valid. |
+| |Bits[22:8] Error ID(Agent ID of the System). |
+| |Bit[7:0] Reason |
+| |See the SRESR register description in the System |
+| |Reset Controller (SRC) section in SoC reference mannual |
+| |One reason maps to BIT(reason) in SRESR |
++---------------------+--------------------------------------------------------+
+|uint32 extinfo[3] |Array of extended info words(e.g. fault pc) |
++---------------------+--------------------------------------------------------+
+
+LMM_POWER_ON
+~~~~~~~~~~~~
+
+message_id: 0xB
+protocol_id: 0x80
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if LM successfully powers on. |
+| |NOT_FOUND: if lmid not points to a valid logical machine. |
+| |INVALID_PARAMETERS: if lmid is same as the caller. |
+| |DENIED: if the agent does not have permission to manage the|
+| |the LM specified by lmid. |
++------------------+-----------------------------------------------------------+
+
+LMM_RESET_VECTOR_SET
+~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0xC
+protocol_id: 0x80
+This command is mandatory.
+
++-----------------------+------------------------------------------------------+
+|Parameters |
++-----------------------+------------------------------------------------------+
+|Name |Description |
++-----------------------+------------------------------------------------------+
+|uint32 lmid |ID of the Logical Machine |
++-----------------------+------------------------------------------------------+
+|uint32 cpuid |ID of the CPU inside the LM |
++-----------------------+------------------------------------------------------+
+|uint32 flags |Reset vector flags |
+| |Bits[31:0] Reserved, must be zero. |
++-----------------------+------------------------------------------------------+
+|uint32 resetVectorLow |Lower vector |
++-----------------------+------------------------------------------------------+
+|uint32 resetVectorHigh |Higher vector |
++-----------------------+------------------------------------------------------+
+|Return values |
++-----------------------+------------------------------------------------------+
+|Name |Description |
++-----------------------+------------------------------------------------------+
+|int32 status |SUCCESS: If reset vector is set successfully. |
+| |NOT_FOUND: if lmid not points to a valid logical |
+| |machine, or cpuId is not valid. |
+| |INVALID_PARAMETERS: if reset vector is invalid. |
+| |DENIED: if the agent does not have permission to set |
+| |the reset vector for the CPU in the LM. |
++-----------------------+------------------------------------------------------+
+
+NEGOTIATE_PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x10
+protocol_id: 0x80
+This command is mandatory.
+
++--------------------+---------------------------------------------------------+
+|Parameters |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|uint32 version |The negotiated protocol version the agent intends to use |
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: if the negotiated protocol version is supported |
+| |by the platform. All commands, responses, and |
+| |notifications post successful return of this command must|
+| |comply with the negotiated version. |
+| |NOT_SUPPORTED: if the protocol version is not supported. |
++--------------------+---------------------------------------------------------+
+
+Notifications
+_____________
+
+LMM_EVENT
+~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x80
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 lmid |Identifier for the LM that caused the transition. |
++------------------+-----------------------------------------------------------+
+|uint32 eventlm |Identifier of the LM this event refers to. |
++------------------+-----------------------------------------------------------+
+|uint32 flags |LM events: |
+| |Bits[31:3] Reserved, must be zero. |
+| |Bit[3] Wake (resume) event: |
+| |1 LM has awakened. |
+| |0 not a wake event. |
+| |Bit[2] Suspend (sleep) event: |
+| |1 LM has suspended. |
+| |0 not a suspend event. |
+| |Bit[1] Shutdown (off) event: |
+| |1 LM has shutdown. |
+| |0 not a shutdown event. |
+| |Bit[0] Boot (on) event: |
+| |1 LM has booted. |
+| |0 not a boot event. |
++------------------+-----------------------------------------------------------+
+
SCMI_BBM: System Control and Management BBM Vendor Protocol
==============================================================
@@ -436,6 +948,322 @@ protocol_id: 0x81
| |0 no button change detected. |
+------------------+-----------------------------------------------------------+
+System Control and Management CPU Vendor Protocol
+=================================================
+
+This protocol allows an agent to start or stop a CPU. It is used to manage
+auxiliary CPUs in a target LM (e.g. additional cores in an AP cluster or
+Cortex-M cores).
+Note:
+ - For cores in AP cluster, PSCI should be used and PSCI firmware will use CPU
+ protocol to handle them. For cores in non-AP cluster, Operating System(e.g.
+ Linux OS) could use CPU protocols to control Cortex-M7 cores.
+ - CPU indicates the core and its auxiliary peripherals(e.g. TCM) inside
+ i.MX SoC
+
+There are cases where giving an agent full control of a CPU via the CPU
+protocol is not desired. The LMM protocol is more restricted to just boot,
+shutdown, etc. So an agent might boot another logical machine but not be
+able to directly mess the state of its CPUs. Its also the reason there is an
+LMM power on command even though that could have been done through the
+power protocol.
+
+The CPU protocol provides commands to:
+
+- Describe the protocol version.
+- Discover implementation attributes.
+- Discover the CPUs defined in the system.
+- Start a CPU.
+- Stop a CPU.
+- Set the boot and resume addresses for a CPU.
+- Set the sleep mode of a CPU.
+- Configure wake-up sources for a CPU.
+- Configure power domain reactions (LPM mode and retention mask) for a CPU.
+- The CPU IDs can be found in the CPU section of the SoC DEVICE: SM Device
+ Interface. They can also be found in the SoC RM. See the CPU Mode Control
+ (CMC) list in General Power Controller (GPC) section.
+
+CPU settings are not aggregated and setting their state is normally exclusive
+to one client.
+
+Commands:
+_________
+
+PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~
+
+message_id: 0x0
+protocol_id: 0x82
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++---------------+--------------------------------------------------------------+
+|Name |Description |
++---------------+--------------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++---------------+--------------------------------------------------------------+
+|uint32 version | For this revision of the specification, this value must be |
+| | 0x10000. |
++---------------+--------------------------------------------------------------+
+
+PROTOCOL_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x1
+protocol_id: 0x82
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status | See ARM SCMI Specification for status code definitions. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Protocol attributes: |
+| |Bits[31:16] Reserved, must be zero. |
+| |Bits[15:0] Number of CPUs |
++------------------+-----------------------------------------------------------+
+
+PROTOCOL_MESSAGE_ATTRIBUTES
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x2
+protocol_id: 0x82
+This command is mandatory.
+
++---------------+--------------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: in case the message is implemented and available |
+| |to use. |
+| |NOT_FOUND: if the message identified by message_id is |
+| |invalid or not implemented |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Flags that are associated with a specific command in the |
+| |protocol. For all commands in this protocol, this |
+| |parameter has a value of 0 |
++------------------+-----------------------------------------------------------+
+
+CPU_ATTRIBUTES
+~~~~~~~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x82
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if valid attributes are returned successfully. |
+| |NOT_FOUND: if the cpuid is not valid. |
++------------------+-----------------------------------------------------------+
+|uint32 attributes |Bits[31:0] Reserved, must be zero |
++------------------+-----------------------------------------------------------+
+|char name[16] |NULL terminated ASCII string with CPU name up to 16 bytes |
++------------------+-----------------------------------------------------------+
+
+CPU_START
+~~~~~~~~~
+
+message_id: 0x4
+protocol_id: 0x82
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the cpu is started successfully. |
+| |NOT_FOUND: if cpuid is not valid. |
+| |DENIED: the calling agent is not allowed to start this CPU.|
++------------------+-----------------------------------------------------------+
+
+CPU_STOP
+~~~~~~~~
+
+message_id: 0x5
+protocol_id: 0x82
+This command is mandatory.
+
++------------------+-----------------------------------------------------------+
+|Parameters |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++------------------+-----------------------------------------------------------+
+|Return values |
++------------------+-----------------------------------------------------------+
+|Name |Description |
++------------------+-----------------------------------------------------------+
+|int32 status |SUCCESS: if the cpu is started successfully. |
+| |NOT_FOUND: if cpuid is not valid. |
+| |DENIED: the calling agent is not allowed to stop this CPU. |
++------------------+-----------------------------------------------------------+
+
+CPU_RESET_VECTOR_SET
+~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x6
+protocol_id: 0x82
+This command is mandatory.
+
++----------------------+-------------------------------------------------------+
+|Parameters |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++----------------------+-------------------------------------------------------+
+|uint32 flags |Reset vector flags: |
+| |Bit[31] Resume flag. |
+| |Set to 1 to update the reset vector used on resume. |
+| |Bit[30] Boot flag. |
+| |Set to 1 to update the reset vector used for boot. |
+| |Bits[29:1] Reserved, must be zero. |
+| |Bit[0] Table flag. |
+| |Set to 1 if vector is the vector table base address. |
++----------------------+-------------------------------------------------------+
+|uint32 resetVectorLow |Lower vector: |
+| |If bit[0] of flags is 0, the lower 32 bits of the |
+| |physical address where the CPU should execute from on |
+| |reset. If bit[0] of flags is 1, the lower 32 bits of |
+| |the vector table base address |
++----------------------+-------------------------------------------------------+
+|uint32 resetVectorhigh|Upper vector: |
+| |If bit[0] of flags is 0, the upper 32 bits of the |
+| |physical address where the CPU should execute from on |
+| |reset. If bit[0] of flags is 1, the upper 32 bits of |
+| |the vector table base address |
++----------------------+-------------------------------------------------------+
+|Return values |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|int32 status |SUCCESS: if the CPU reset vector is set successfully. |
+| |NOT_FOUND: if cpuId does not point to a valid CPU. |
+| |INVALID_PARAMETERS: the requested vector type is not |
+| |supported by this CPU. |
+| |DENIED: the calling agent is not allowed to set the |
+| |reset vector of this CPU |
++----------------------+-------------------------------------------------------+
+
+CPU_SLEEP_MODE_SET
+~~~~~~~~~~~~~~~~~~
+
+message_id: 0x7
+protocol_id: 0x82
+This command is mandatory.
+
++----------------------+-------------------------------------------------------+
+|Parameters |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++----------------------+-------------------------------------------------------+
+|uint32 flags |Sleep mode flags: |
+| |Bits[31:1] Reserved, must be zero. |
+| |Bit[0] IRQ mux: |
+| |If set to 1 the wakeup mux source is the GIC, else if 0|
+| |then the GPC |
++----------------------+-------------------------------------------------------+
+|uint32 sleepmode |target sleep mode. When CPU runs into WFI, the GPC mode|
+| |will be triggered to be in below modes: |
+| |RUN: (0) |
+| |WAIT: (1) |
+| |STOP: (2) |
+| |SUSPEND: (3) |
++----------------------+-------------------------------------------------------+
+|Return values |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|int32 status |SUCCESS: if the CPU sleep mode is set successfully. |
+| |NOT_FOUND: if cpuId does not point to a valid CPU. |
+| |INVALID_PARAMETERS: the sleepmode or flags is invalid. |
+| |DENIED: the calling agent is not allowed to configure |
+| |the CPU |
++----------------------+-------------------------------------------------------+
+
+CPU_INFO_GET
+~~~~~~~~~~~~
+
+message_id: 0xC
+protocol_id: 0x82
+This command is mandatory.
+
++----------------------+-------------------------------------------------------+
+|Parameters |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|uint32 cpuid |Identifier for the CPU |
++----------------------+-------------------------------------------------------+
+|Return values |
++----------------------+-------------------------------------------------------+
+|Name |Description |
++----------------------+-------------------------------------------------------+
+|int32 status |SUCCESS: if valid attributes are returned successfully.|
+| |NOT_FOUND: if the cpuid is not valid. |
++----------------------+-------------------------------------------------------+
+|uint32 runmode |Run mode for the CPU |
+| |RUN(0):cpu started |
+| |HOLD(1):cpu powered up and reset asserted |
+| |STOP(2):cpu reseted and hold cpu |
+| |SUSPEND(3):in cpuidle state |
++----------------------+-------------------------------------------------------+
+|uint32 sleepmode |Sleep mode for the CPU, see CPU_SLEEP_MODE_SET |
++----------------------+-------------------------------------------------------+
+|uint32 resetvectorlow |Reset vector low 32 bits for the CPU |
++----------------------+-------------------------------------------------------+
+|uint32 resetvecothigh |Reset vector high 32 bits for the CPU |
++----------------------+-------------------------------------------------------+
+
+NEGOTIATE_PROTOCOL_VERSION
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+message_id: 0x10
+protocol_id: 0x82
+This command is mandatory.
+
++--------------------+---------------------------------------------------------+
+|Parameters |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|uint32 version |The negotiated protocol version the agent intends to use |
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: if the negotiated protocol version is supported |
+| |by the platform. All commands, responses, and |
+| |notifications post successful return of this command must|
+| |comply with the negotiated version. |
+| |NOT_SUPPORTED: if the protocol version is not supported. |
++--------------------+---------------------------------------------------------+
+
SCMI_MISC: System Control and Management MISC Vendor Protocol
================================================================
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index 3e8051fe8296..71e2a9a89f6a 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -1062,13 +1062,12 @@ static bool __init sdei_present_acpi(void)
return true;
}
-void __init sdei_init(void)
+void __init acpi_sdei_init(void)
{
struct platform_device *pdev;
int ret;
- ret = platform_driver_register(&sdei_driver);
- if (ret || !sdei_present_acpi())
+ if (!sdei_present_acpi())
return;
pdev = platform_device_register_simple(sdei_driver.driver.name,
@@ -1081,6 +1080,12 @@ void __init sdei_init(void)
}
}
+static int __init sdei_init(void)
+{
+ return platform_driver_register(&sdei_driver);
+}
+arch_initcall(sdei_init);
+
int sdei_event_handler(struct pt_regs *regs,
struct sdei_registered_event *arg)
{
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
index 49d84f7e59e6..3f8777ee4dc0 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
@@ -96,10 +96,11 @@ static void cs_dsp_mock_bin_add_name_or_info(struct cs_dsp_mock_bin_builder *bui
if (info_len % 4) {
/* Create a padded string with length a multiple of 4 */
+ size_t copy_len = info_len;
info_len = round_up(info_len, 4);
tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp);
- memcpy(tmp, info, info_len);
+ memcpy(tmp, info, copy_len);
info = tmp;
}
@@ -176,6 +177,9 @@ struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv,
struct cs_dsp_mock_bin_builder *builder;
struct wmfw_coeff_hdr *hdr;
+ KUNIT_ASSERT_LE(priv->test, format_version, 0xff);
+ KUNIT_ASSERT_LE(priv->test, fw_version, 0xffffff);
+
builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
builder->test_priv = priv;
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
index 73412bcef50c..95946fac5563 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
@@ -505,9 +505,11 @@ void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv)
* Could be one 32-bit register or two 16-bit registers.
* A raw read will read the requested number of bytes.
*/
- regmap_raw_read(priv->dsp->regmap,
- xm + (offsetof(struct wmfw_adsp2_id_hdr, n_algs) / 2),
- &num_algs_be32, sizeof(num_algs_be32));
+ KUNIT_ASSERT_GE(priv->test, 0,
+ regmap_raw_read(priv->dsp->regmap,
+ xm +
+ (offsetof(struct wmfw_adsp2_id_hdr, n_algs) / 2),
+ &num_algs_be32, sizeof(num_algs_be32)));
num_algs = be32_to_cpu(num_algs_be32);
bytes = sizeof(struct wmfw_adsp2_id_hdr) +
(num_algs * sizeof(struct wmfw_adsp2_alg_hdr)) +
@@ -516,9 +518,10 @@ void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv)
regcache_drop_region(priv->dsp->regmap, xm, xm + (bytes / 2) - 1);
break;
case WMFW_HALO:
- regmap_read(priv->dsp->regmap,
- xm + offsetof(struct wmfw_halo_id_hdr, n_algs),
- &num_algs);
+ KUNIT_ASSERT_GE(priv->test, 0,
+ regmap_read(priv->dsp->regmap,
+ xm + offsetof(struct wmfw_halo_id_hdr, n_algs),
+ &num_algs));
bytes = sizeof(struct wmfw_halo_id_hdr) +
(num_algs * sizeof(struct wmfw_halo_alg_hdr)) +
4 /* terminator word */;
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
index 5a3ac03ac37f..5e1d5a810afe 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
@@ -133,10 +133,11 @@ void cs_dsp_mock_wmfw_add_info(struct cs_dsp_mock_wmfw_builder *builder,
if (info_len % 4) {
/* Create a padded string with length a multiple of 4 */
+ size_t copy_len = info_len;
info_len = round_up(info_len, 4);
tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp);
- memcpy(tmp, info, info_len);
+ memcpy(tmp, info, copy_len);
info = tmp;
}
@@ -178,6 +179,8 @@ void cs_dsp_mock_wmfw_start_alg_info_block(struct cs_dsp_mock_wmfw_builder *buil
size_t bytes_needed, name_len, description_len;
int offset;
+ KUNIT_ASSERT_LE(builder->test_priv->test, alg_id, 0xffffff);
+
/* Bytes needed for region header */
bytes_needed = offsetof(struct wmfw_region, data);
@@ -435,6 +438,8 @@ struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv,
{
struct cs_dsp_mock_wmfw_builder *builder;
+ KUNIT_ASSERT_LE(priv->test, format_version, 0xff);
+
/* If format version isn't given use the default for the target core */
if (format_version < 0) {
switch (priv->dsp->type) {
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c
index 83386cc978e3..ebca3a4ab0f1 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c
@@ -776,7 +776,6 @@ static void cs_dsp_ctl_cache_init_multiple_offsets(struct kunit *test)
"dummyalg", NULL);
/* Create controls identical except for offset */
- def.length_bytes = 8;
def.offset_dsp_words = 0;
def.shortname = "CtlA";
cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 5fe61b9ab5f9..db8c5c03d3a2 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -281,6 +281,30 @@ config EFI_EMBEDDED_FIRMWARE
bool
select CRYPTO_LIB_SHA256
+config EFI_SBAT
+ def_bool y if EFI_SBAT_FILE!=""
+
+config EFI_SBAT_FILE
+ string "Embedded SBAT section file path"
+ depends on EFI_ZBOOT
+ help
+ SBAT section provides a way to improve SecureBoot revocations of UEFI
+ binaries by introducing a generation-based mechanism. With SBAT, older
+ UEFI binaries can be prevented from booting by bumping the minimal
+ required generation for the specific component in the bootloader.
+
+ Note: SBAT information is distribution specific, i.e. the owner of the
+ signing SecureBoot certificate must define the SBAT policy. Linux
+ kernel upstream does not define SBAT components and their generations.
+
+ See https://github.com/rhboot/shim/blob/main/SBAT.md for the additional
+ details.
+
+ Specify a file with SBAT data which is going to be embedded as '.sbat'
+ section into the kernel.
+
+ If unsure, leave blank.
+
endmenu
config UEFI_CPER
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 7309394b8fc9..e57bff702b5f 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -558,6 +558,7 @@ int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
__weak __alias(__efi_mem_desc_lookup);
+EXPORT_SYMBOL_GPL(efi_mem_desc_lookup);
/*
* Calculate the highest address of an efi memory descriptor.
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index d23a1b9fed75..939a4955e00b 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -31,7 +31,7 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
$(DISABLE_STACKLEAK_PLUGIN)
cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax \
$(DISABLE_STACKLEAK_PLUGIN)
-cflags-$(CONFIG_LOONGARCH) += -fpie
+cflags-$(CONFIG_LOONGARCH) += -fpie $(DISABLE_STACKLEAK_PLUGIN)
cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt
@@ -85,7 +85,6 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += kaslr.o arm64.o arm64-stub.o smbios.o
lib-$(CONFIG_X86) += x86-stub.o smbios.o
-lib-$(CONFIG_EFI_MIXED) += x86-mixed.o
lib-$(CONFIG_X86_64) += x86-5lvl.o
lib-$(CONFIG_RISCV) += kaslr.o riscv.o riscv-stub.o
lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
@@ -102,7 +101,7 @@ lib-$(CONFIG_EFI_ZBOOT) += zboot.o $(zboot-obj-y)
lib-$(CONFIG_UNACCEPTED_MEMORY) += unaccepted_memory.o bitmap.o find.o
-extra-y := $(lib-y)
+targets := $(lib-y)
lib-y := $(patsubst %.o,%.stub.o,$(lib-y))
# Even when -mbranch-protection=none is set, Clang will generate a
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
index 48842b5c106b..92e3c73502ba 100644
--- a/drivers/firmware/efi/libstub/Makefile.zboot
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -44,6 +44,10 @@ AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE
$(obj)/zboot-header.o: $(srctree)/drivers/firmware/efi/libstub/zboot-header.S FORCE
$(call if_changed_rule,as_o_S)
+ifneq ($(CONFIG_EFI_SBAT_FILE),)
+$(obj)/zboot-header.o: $(CONFIG_EFI_SBAT_FILE)
+endif
+
ZBOOT_DEPS := $(obj)/zboot-header.o $(objtree)/drivers/firmware/efi/libstub/lib.a
LDFLAGS_vmlinuz.efi.elf := -T $(srctree)/drivers/firmware/efi/libstub/zboot.lds
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index fd6dc790c5a8..7aa2f9ad2935 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -601,6 +601,7 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
* @image: EFI loaded image protocol
* @soft_limit: preferred address for loading the initrd
* @hard_limit: upper limit address for loading the initrd
+ * @out: pointer to store the address of the initrd table
*
* Return: status code
*/
diff --git a/drivers/firmware/efi/libstub/x86-5lvl.c b/drivers/firmware/efi/libstub/x86-5lvl.c
index 77359e802181..f1c5fb45d5f7 100644
--- a/drivers/firmware/efi/libstub/x86-5lvl.c
+++ b/drivers/firmware/efi/libstub/x86-5lvl.c
@@ -62,7 +62,7 @@ efi_status_t efi_setup_5level_paging(void)
void efi_5level_switch(void)
{
- bool want_la57 = IS_ENABLED(CONFIG_X86_5LEVEL) && !efi_no5lvl;
+ bool want_la57 = !efi_no5lvl;
bool have_la57 = native_read_cr4() & X86_CR4_LA57;
bool need_toggle = want_la57 ^ have_la57;
u64 *pgt = (void *)la57_toggle + PAGE_SIZE;
diff --git a/drivers/firmware/efi/libstub/x86-mixed.S b/drivers/firmware/efi/libstub/x86-mixed.S
deleted file mode 100644
index e04ed99bc449..000000000000
--- a/drivers/firmware/efi/libstub/x86-mixed.S
+++ /dev/null
@@ -1,253 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming
- *
- * Early support for invoking 32-bit EFI services from a 64-bit kernel.
- *
- * Because this thunking occurs before ExitBootServices() we have to
- * restore the firmware's 32-bit GDT and IDT before we make EFI service
- * calls.
- *
- * On the plus side, we don't have to worry about mangling 64-bit
- * addresses into 32-bits because we're executing with an identity
- * mapped pagetable and haven't transitioned to 64-bit virtual addresses
- * yet.
- */
-
-#include <linux/linkage.h>
-#include <asm/desc_defs.h>
-#include <asm/msr.h>
-#include <asm/page_types.h>
-#include <asm/pgtable_types.h>
-#include <asm/processor-flags.h>
-#include <asm/segment.h>
-
- .text
- .code32
-#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
-SYM_FUNC_START(efi32_stub_entry)
- call 1f
-1: popl %ecx
-
- /* Clear BSS */
- xorl %eax, %eax
- leal (_bss - 1b)(%ecx), %edi
- leal (_ebss - 1b)(%ecx), %ecx
- subl %edi, %ecx
- shrl $2, %ecx
- cld
- rep stosl
-
- add $0x4, %esp /* Discard return address */
- movl 8(%esp), %ebx /* struct boot_params pointer */
- jmp efi32_startup
-SYM_FUNC_END(efi32_stub_entry)
-#endif
-
-/*
- * Called using a far call from __efi64_thunk() below, using the x86_64 SysV
- * ABI (except for R8/R9 which are inaccessible to 32-bit code - EAX/EBX are
- * used instead). EBP+16 points to the arguments passed via the stack.
- *
- * The first argument (EDI) is a pointer to the boot service or protocol, to
- * which the remaining arguments are passed, each truncated to 32 bits.
- */
-SYM_FUNC_START_LOCAL(efi_enter32)
- /*
- * Convert x86-64 SysV ABI params to i386 ABI
- */
- pushl 32(%ebp) /* Up to 3 args passed via the stack */
- pushl 24(%ebp)
- pushl 16(%ebp)
- pushl %ebx /* R9 */
- pushl %eax /* R8 */
- pushl %ecx
- pushl %edx
- pushl %esi
-
- /* Disable paging */
- movl %cr0, %eax
- btrl $X86_CR0_PG_BIT, %eax
- movl %eax, %cr0
-
- /* Disable long mode via EFER */
- movl $MSR_EFER, %ecx
- rdmsr
- btrl $_EFER_LME, %eax
- wrmsr
-
- call *%edi
-
- /* We must preserve return value */
- movl %eax, %edi
-
- call efi32_enable_long_mode
-
- addl $32, %esp
- movl %edi, %eax
- lret
-SYM_FUNC_END(efi_enter32)
-
- .code64
-SYM_FUNC_START(__efi64_thunk)
- push %rbp
- movl %esp, %ebp
- push %rbx
-
- /* Move args #5 and #6 into 32-bit accessible registers */
- movl %r8d, %eax
- movl %r9d, %ebx
-
- lcalll *efi32_call(%rip)
-
- pop %rbx
- pop %rbp
- RET
-SYM_FUNC_END(__efi64_thunk)
-
- .code32
-SYM_FUNC_START_LOCAL(efi32_enable_long_mode)
- movl %cr4, %eax
- btsl $(X86_CR4_PAE_BIT), %eax
- movl %eax, %cr4
-
- movl $MSR_EFER, %ecx
- rdmsr
- btsl $_EFER_LME, %eax
- wrmsr
-
- /* Disable interrupts - the firmware's IDT does not work in long mode */
- cli
-
- /* Enable paging */
- movl %cr0, %eax
- btsl $X86_CR0_PG_BIT, %eax
- movl %eax, %cr0
- ret
-SYM_FUNC_END(efi32_enable_long_mode)
-
-/*
- * This is the common EFI stub entry point for mixed mode. It sets up the GDT
- * and page tables needed for 64-bit execution, after which it calls the
- * common 64-bit EFI entrypoint efi_stub_entry().
- *
- * Arguments: 0(%esp) image handle
- * 4(%esp) EFI system table pointer
- * %ebx struct boot_params pointer (or NULL)
- *
- * Since this is the point of no return for ordinary execution, no registers
- * are considered live except for the function parameters. [Note that the EFI
- * stub may still exit and return to the firmware using the Exit() EFI boot
- * service.]
- */
-SYM_FUNC_START_LOCAL(efi32_startup)
- movl %esp, %ebp
-
- subl $8, %esp
- sgdtl (%esp) /* Save GDT descriptor to the stack */
- movl 2(%esp), %esi /* Existing GDT pointer */
- movzwl (%esp), %ecx /* Existing GDT limit */
- inc %ecx /* Existing GDT size */
- andl $~7, %ecx /* Ensure size is multiple of 8 */
-
- subl %ecx, %esp /* Allocate new GDT */
- andl $~15, %esp /* Realign the stack */
- movl %esp, %edi /* New GDT address */
- leal 7(%ecx), %eax /* New GDT limit */
- pushw %cx /* Push 64-bit CS (for LJMP below) */
- pushl %edi /* Push new GDT address */
- pushw %ax /* Push new GDT limit */
-
- /* Copy GDT to the stack and add a 64-bit code segment at the end */
- movl $GDT_ENTRY(DESC_CODE64, 0, 0xfffff) & 0xffffffff, (%edi,%ecx)
- movl $GDT_ENTRY(DESC_CODE64, 0, 0xfffff) >> 32, 4(%edi,%ecx)
- shrl $2, %ecx
- cld
- rep movsl /* Copy the firmware GDT */
- lgdtl (%esp) /* Switch to the new GDT */
-
- call 1f
-1: pop %edi
-
- /* Record mixed mode entry */
- movb $0x0, (efi_is64 - 1b)(%edi)
-
- /* Set up indirect far call to re-enter 32-bit mode */
- leal (efi32_call - 1b)(%edi), %eax
- addl %eax, (%eax)
- movw %cs, 4(%eax)
-
- /* Disable paging */
- movl %cr0, %eax
- btrl $X86_CR0_PG_BIT, %eax
- movl %eax, %cr0
-
- /* Set up 1:1 mapping */
- leal (pte - 1b)(%edi), %eax
- movl $_PAGE_PRESENT | _PAGE_RW | _PAGE_PSE, %ecx
- leal (_PAGE_PRESENT | _PAGE_RW)(%eax), %edx
-2: movl %ecx, (%eax)
- addl $8, %eax
- addl $PMD_SIZE, %ecx
- jnc 2b
-
- movl $PAGE_SIZE, %ecx
- .irpc l, 0123
- movl %edx, \l * 8(%eax)
- addl %ecx, %edx
- .endr
- addl %ecx, %eax
- movl %edx, (%eax)
- movl %eax, %cr3
-
- call efi32_enable_long_mode
-
- /* Set up far jump to 64-bit mode (CS is already on the stack) */
- leal (efi_stub_entry - 1b)(%edi), %eax
- movl %eax, 2(%esp)
-
- movl 0(%ebp), %edi
- movl 4(%ebp), %esi
- movl %ebx, %edx
- ljmpl *2(%esp)
-SYM_FUNC_END(efi32_startup)
-
-/*
- * efi_status_t efi32_pe_entry(efi_handle_t image_handle,
- * efi_system_table_32_t *sys_table)
- */
-SYM_FUNC_START(efi32_pe_entry)
- pushl %ebx // save callee-save registers
-
- /* Check whether the CPU supports long mode */
- movl $0x80000001, %eax // assume extended info support
- cpuid
- btl $29, %edx // check long mode bit
- jnc 1f
- leal 8(%esp), %esp // preserve stack alignment
- xor %ebx, %ebx // no struct boot_params pointer
- jmp efi32_startup // only ESP and EBX remain live
-1: movl $0x80000003, %eax // EFI_UNSUPPORTED
- popl %ebx
- RET
-SYM_FUNC_END(efi32_pe_entry)
-
-#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
- .org efi32_stub_entry + 0x200
- .code64
-SYM_FUNC_START_NOALIGN(efi64_stub_entry)
- jmp efi_handover_entry
-SYM_FUNC_END(efi64_stub_entry)
-#endif
-
- .data
- .balign 8
-SYM_DATA_START_LOCAL(efi32_call)
- .long efi_enter32 - .
- .word 0x0
-SYM_DATA_END(efi32_call)
-SYM_DATA(efi_is64, .byte 1)
-
- .bss
- .balign PAGE_SIZE
-SYM_DATA_LOCAL(pte, .fill 6 * PAGE_SIZE, 1, 0)
diff --git a/drivers/firmware/efi/libstub/zboot-header.S b/drivers/firmware/efi/libstub/zboot-header.S
index fb676ded47fa..b6431edd0fc9 100644
--- a/drivers/firmware/efi/libstub/zboot-header.S
+++ b/drivers/firmware/efi/libstub/zboot-header.S
@@ -4,17 +4,17 @@
#ifdef CONFIG_64BIT
.set .Lextra_characteristics, 0x0
- .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32PLUS
+ .set .Lpe_opt_magic, IMAGE_NT_OPTIONAL_HDR64_MAGIC
#else
.set .Lextra_characteristics, IMAGE_FILE_32BIT_MACHINE
- .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32
+ .set .Lpe_opt_magic, IMAGE_NT_OPTIONAL_HDR32_MAGIC
#endif
.section ".head", "a"
.globl __efistub_efi_zboot_header
__efistub_efi_zboot_header:
.Ldoshdr:
- .long MZ_MAGIC
+ .long IMAGE_DOS_SIGNATURE
.ascii "zimg" // image type
.long __efistub__gzdata_start - .Ldoshdr // payload offset
.long __efistub__gzdata_size - ZBOOT_SIZE_LEN // payload size
@@ -25,7 +25,7 @@ __efistub_efi_zboot_header:
.long .Lpehdr - .Ldoshdr // PE header offset
.Lpehdr:
- .long PE_MAGIC
+ .long IMAGE_NT_SIGNATURE
.short MACHINE_TYPE
.short .Lsection_count
.long 0
@@ -63,7 +63,7 @@ __efistub_efi_zboot_header:
.long .Lefi_header_end - .Ldoshdr
.long 0
.short IMAGE_SUBSYSTEM_EFI_APPLICATION
- .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
+ .short IMAGE_DLLCHARACTERISTICS_NX_COMPAT
#ifdef CONFIG_64BIT
.quad 0, 0, 0, 0
#else
@@ -123,11 +123,29 @@ __efistub_efi_zboot_header:
IMAGE_SCN_MEM_READ | \
IMAGE_SCN_MEM_EXECUTE
+#ifdef CONFIG_EFI_SBAT
+ .ascii ".sbat\0\0\0"
+ .long __sbat_size
+ .long _sbat - .Ldoshdr
+ .long __sbat_size
+ .long _sbat - .Ldoshdr
+
+ .long 0, 0
+ .short 0, 0
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_DISCARDABLE
+
+ .pushsection ".sbat", "a", @progbits
+ .incbin CONFIG_EFI_SBAT_FILE
+ .popsection
+#endif
+
.ascii ".data\0\0\0"
.long __data_size
- .long _etext - .Ldoshdr
+ .long _data - .Ldoshdr
.long __data_rawsize
- .long _etext - .Ldoshdr
+ .long _data - .Ldoshdr
.long 0, 0
.short 0, 0
diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
index 9ecc57ff5b45..c3a166675450 100644
--- a/drivers/firmware/efi/libstub/zboot.lds
+++ b/drivers/firmware/efi/libstub/zboot.lds
@@ -29,7 +29,17 @@ SECTIONS
. = _etext;
}
+#ifdef CONFIG_EFI_SBAT
+ .sbat : ALIGN(4096) {
+ _sbat = .;
+ *(.sbat)
+ _esbat = ALIGN(4096);
+ . = _esbat;
+ }
+#endif
+
.data : ALIGN(4096) {
+ _data = .;
*(.data* .init.data*)
_edata = ALIGN(512);
. = _edata;
@@ -52,3 +62,4 @@ PROVIDE(__efistub__gzdata_size =
PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext));
PROVIDE(__data_size = ABSOLUTE(_end - _etext));
+PROVIDE(__sbat_size = ABSOLUTE(_esbat - _sbat));
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 34109fd86c55..f1c04d7cfd71 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -43,7 +43,8 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data)
map.map = early_memremap(phys_map, data->size);
if (!map.map) {
- pr_err("Could not map the memory map!\n");
+ pr_err("Could not map the memory map! phys_map=%pa, size=0x%lx\n",
+ &phys_map, data->size);
return -ENOMEM;
}
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 9e2628728aad..77b5f7ac3e20 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -361,6 +361,10 @@ static long efi_runtime_get_waketime(unsigned long arg)
getwakeuptime.enabled))
return -EFAULT;
+ if (getwakeuptime.pending && put_user(pending,
+ getwakeuptime.pending))
+ return -EFAULT;
+
if (getwakeuptime.time) {
if (copy_to_user(getwakeuptime.time, &efi_time,
sizeof(efi_time_t)))
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index c964f4924359..127ad752acf8 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -23,6 +23,28 @@ config IMX_SCU
This driver manages the IPC interface between host CPU and the
SCU firmware running on M4.
+config IMX_SCMI_CPU_DRV
+ tristate "IMX SCMI CPU Protocol driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ default y if ARCH_MXC
+ help
+ The System Controller Management Interface firmware (SCMI FW) is
+ a low-level system function which runs on a dedicated Cortex-M
+ core that could provide cpu management features.
+
+ This driver can also be built as a module.
+
+config IMX_SCMI_LMM_DRV
+ tristate "IMX SCMI LMM Protocol driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ default y if ARCH_MXC
+ help
+ The System Controller Management Interface firmware (SCMI FW) is
+ a low-level system function which runs on a dedicated Cortex-M
+ core that could provide Logical Machine management features.
+
+ This driver can also be built as a module.
+
config IMX_SCMI_MISC_DRV
tristate "IMX SCMI MISC Protocol driver"
depends on ARCH_MXC || COMPILE_TEST
diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile
index 8d046c341be8..3bbaffa6e347 100644
--- a/drivers/firmware/imx/Makefile
+++ b/drivers/firmware/imx/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IMX_DSP) += imx-dsp.o
obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o imx-scu-soc.o
+obj-${CONFIG_IMX_SCMI_CPU_DRV} += sm-cpu.o
obj-${CONFIG_IMX_SCMI_MISC_DRV} += sm-misc.o
+obj-${CONFIG_IMX_SCMI_LMM_DRV} += sm-lmm.o
diff --git a/drivers/firmware/imx/sm-cpu.c b/drivers/firmware/imx/sm-cpu.c
new file mode 100644
index 000000000000..091b014f739f
--- /dev/null
+++ b/drivers/firmware/imx/sm-cpu.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <linux/firmware/imx/sm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+static const struct scmi_imx_cpu_proto_ops *imx_cpu_ops;
+static struct scmi_protocol_handle *ph;
+
+int scmi_imx_cpu_reset_vector_set(u32 cpuid, u64 vector, bool start, bool boot,
+ bool resume)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ return imx_cpu_ops->cpu_reset_vector_set(ph, cpuid, vector, start,
+ boot, resume);
+}
+EXPORT_SYMBOL(scmi_imx_cpu_reset_vector_set);
+
+int scmi_imx_cpu_start(u32 cpuid, bool start)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ if (start)
+ return imx_cpu_ops->cpu_start(ph, cpuid, true);
+
+ return imx_cpu_ops->cpu_start(ph, cpuid, false);
+};
+EXPORT_SYMBOL(scmi_imx_cpu_start);
+
+int scmi_imx_cpu_started(u32 cpuid, bool *started)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ if (!started)
+ return -EINVAL;
+
+ return imx_cpu_ops->cpu_started(ph, cpuid, started);
+};
+EXPORT_SYMBOL(scmi_imx_cpu_started);
+
+static int scmi_imx_cpu_probe(struct scmi_device *sdev)
+{
+ const struct scmi_handle *handle = sdev->handle;
+
+ if (!handle)
+ return -ENODEV;
+
+ if (imx_cpu_ops) {
+ dev_err(&sdev->dev, "sm cpu already initialized\n");
+ return -EEXIST;
+ }
+
+ imx_cpu_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_CPU, &ph);
+ if (IS_ERR(imx_cpu_ops))
+ return PTR_ERR(imx_cpu_ops);
+
+ return 0;
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_IMX_CPU, "imx-cpu" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_imx_cpu_driver = {
+ .name = "scmi-imx-cpu",
+ .probe = scmi_imx_cpu_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_imx_cpu_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("IMX SM CPU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/imx/sm-lmm.c b/drivers/firmware/imx/sm-lmm.c
new file mode 100644
index 000000000000..6807bf563c03
--- /dev/null
+++ b/drivers/firmware/imx/sm-lmm.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <linux/firmware/imx/sm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scmi_protocol.h>
+#include <linux/scmi_imx_protocol.h>
+
+static const struct scmi_imx_lmm_proto_ops *imx_lmm_ops;
+static struct scmi_protocol_handle *ph;
+
+int scmi_imx_lmm_info(u32 lmid, struct scmi_imx_lmm_info *info)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ if (!info)
+ return -EINVAL;
+
+ return imx_lmm_ops->lmm_info(ph, lmid, info);
+};
+EXPORT_SYMBOL(scmi_imx_lmm_info);
+
+int scmi_imx_lmm_reset_vector_set(u32 lmid, u32 cpuid, u32 flags, u64 vector)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ return imx_lmm_ops->lmm_reset_vector_set(ph, lmid, cpuid, flags, vector);
+}
+EXPORT_SYMBOL(scmi_imx_lmm_reset_vector_set);
+
+int scmi_imx_lmm_operation(u32 lmid, enum scmi_imx_lmm_op op, u32 flags)
+{
+ if (!ph)
+ return -EPROBE_DEFER;
+
+ switch (op) {
+ case SCMI_IMX_LMM_BOOT:
+ return imx_lmm_ops->lmm_power_boot(ph, lmid, true);
+ case SCMI_IMX_LMM_POWER_ON:
+ return imx_lmm_ops->lmm_power_boot(ph, lmid, false);
+ case SCMI_IMX_LMM_SHUTDOWN:
+ return imx_lmm_ops->lmm_shutdown(ph, lmid, flags);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(scmi_imx_lmm_operation);
+
+static int scmi_imx_lmm_probe(struct scmi_device *sdev)
+{
+ const struct scmi_handle *handle = sdev->handle;
+
+ if (!handle)
+ return -ENODEV;
+
+ if (imx_lmm_ops) {
+ dev_err(&sdev->dev, "lmm already initialized\n");
+ return -EEXIST;
+ }
+
+ imx_lmm_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_LMM, &ph);
+ if (IS_ERR(imx_lmm_ops))
+ return PTR_ERR(imx_lmm_ops);
+
+ return 0;
+}
+
+static const struct scmi_device_id scmi_id_table[] = {
+ { SCMI_PROTOCOL_IMX_LMM, "imx-lmm" },
+ { },
+};
+MODULE_DEVICE_TABLE(scmi, scmi_id_table);
+
+static struct scmi_driver scmi_imx_lmm_driver = {
+ .name = "scmi-imx-lmm",
+ .probe = scmi_imx_lmm_probe,
+ .id_table = scmi_id_table,
+};
+module_scmi_driver(scmi_imx_lmm_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("IMX SM LMM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index a1ebbe9b73b1..38ca190d4a22 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -804,8 +804,10 @@ int __init psci_dt_init(void)
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
- if (!np || !of_device_is_available(np))
+ if (!np || !of_device_is_available(np)) {
+ of_node_put(np);
return -ENODEV;
+ }
init_fn = (psci_initcall_t)matched_np->data;
ret = init_fn(np);
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index b662b7e28b80..df02a4ec3398 100644
--- a/drivers/firmware/psci/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
@@ -343,7 +343,7 @@ static int suspend_test_thread(void *arg)
* later.
*/
timer_delete(&wakeup_timer);
- destroy_timer_on_stack(&wakeup_timer);
+ timer_destroy_on_stack(&wakeup_timer);
if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
complete(&suspend_threads_done);
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index fc4d67e4c4a6..f63b716be5b0 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -1986,7 +1986,10 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
*/
static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ .compatible = "asus,vivobook-s15" },
+ { .compatible = "asus,zenbook-a14-ux3407qa" },
+ { .compatible = "asus,zenbook-a14-ux3407ra" },
{ .compatible = "dell,xps13-9345" },
+ { .compatible = "hp,elitebook-ultra-g1q" },
{ .compatible = "hp,omnibook-x14" },
{ .compatible = "huawei,gaokun3" },
{ .compatible = "lenovo,flex-5g" },
diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h
index 097369d38b84..3133d826f5fa 100644
--- a/drivers/firmware/qcom/qcom_scm.h
+++ b/drivers/firmware/qcom/qcom_scm.h
@@ -44,8 +44,11 @@ enum qcom_scm_arg_types {
/**
* struct qcom_scm_desc
+ * @svc: Service identifier
+ * @cmd: Command identifier
* @arginfo: Metadata describing the arguments in args[]
* @args: The array of arguments for the secure syscall
+ * @owner: Owner identifier
*/
struct qcom_scm_desc {
u32 svc;
diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c
index 92b365178235..94196ad87105 100644
--- a/drivers/firmware/qcom/qcom_tzmem.c
+++ b/drivers/firmware/qcom/qcom_tzmem.c
@@ -79,6 +79,7 @@ static const char *const qcom_tzmem_blacklist[] = {
"qcom,sc8180x",
"qcom,sdm670", /* failure in GPU firmware loading */
"qcom,sdm845", /* reset in rmtfs memory assignment */
+ "qcom,sm7150", /* reset in rmtfs memory assignment */
"qcom,sm8150", /* reset in rmtfs memory assignment */
NULL
};
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.c b/drivers/firmware/samsung/exynos-acpm-pmic.c
index 85e90d236da2..39b33a356ebd 100644
--- a/drivers/firmware/samsung/exynos-acpm-pmic.c
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.c
@@ -43,13 +43,13 @@ static inline u32 acpm_pmic_get_bulk(u32 data, unsigned int i)
return (data >> (ACPM_PMIC_BULK_SHIFT * i)) & ACPM_PMIC_BULK_MASK;
}
-static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd,
+static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd, size_t cmdlen,
unsigned int acpm_chan_id)
{
xfer->txd = cmd;
xfer->rxd = cmd;
- xfer->txlen = sizeof(cmd);
- xfer->rxlen = sizeof(cmd);
+ xfer->txlen = cmdlen;
+ xfer->rxlen = cmdlen;
xfer->acpm_chan_id = acpm_chan_id;
}
@@ -71,7 +71,7 @@ int acpm_pmic_read_reg(const struct acpm_handle *handle,
int ret;
acpm_pmic_init_read_cmd(cmd, type, reg, chan);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
@@ -104,7 +104,7 @@ int acpm_pmic_bulk_read(const struct acpm_handle *handle,
return -EINVAL;
acpm_pmic_init_bulk_read_cmd(cmd, type, reg, chan, count);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
@@ -144,7 +144,7 @@ int acpm_pmic_write_reg(const struct acpm_handle *handle,
int ret;
acpm_pmic_init_write_cmd(cmd, type, reg, chan, value);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
@@ -184,7 +184,7 @@ int acpm_pmic_bulk_write(const struct acpm_handle *handle,
return -EINVAL;
acpm_pmic_init_bulk_write_cmd(cmd, type, reg, chan, count, buf);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
@@ -214,7 +214,7 @@ int acpm_pmic_update_reg(const struct acpm_handle *handle,
int ret;
acpm_pmic_init_update_cmd(cmd, type, reg, chan, value, mask);
- acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+ acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id);
ret = acpm_do_xfer(handle, &xfer);
if (ret)
diff --git a/drivers/firmware/samsung/exynos-acpm.c b/drivers/firmware/samsung/exynos-acpm.c
index a85b2dbdd9f0..e02f14f4bd7c 100644
--- a/drivers/firmware/samsung/exynos-acpm.c
+++ b/drivers/firmware/samsung/exynos-acpm.c
@@ -15,6 +15,7 @@
#include <linux/firmware/samsung/exynos-acpm-protocol.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/ktime.h>
#include <linux/mailbox/exynos-message.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
@@ -32,8 +33,7 @@
#define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16)
-/* The unit of counter is 20 us. 5000 * 20 = 100 ms */
-#define ACPM_POLL_TIMEOUT 5000
+#define ACPM_POLL_TIMEOUT_US (100 * USEC_PER_MSEC)
#define ACPM_TX_TIMEOUT_US 500000
#define ACPM_GS101_INITDATA_BASE 0xa000
@@ -185,6 +185,29 @@ struct acpm_match_data {
#define handle_to_acpm_info(h) container_of(h, struct acpm_info, handle)
/**
+ * acpm_get_saved_rx() - get the response if it was already saved.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer to get response for.
+ * @tx_seqnum: xfer TX sequence number.
+ */
+static void acpm_get_saved_rx(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer, u32 tx_seqnum)
+{
+ const struct acpm_rx_data *rx_data = &achan->rx_data[tx_seqnum - 1];
+ u32 rx_seqnum;
+
+ if (!rx_data->response)
+ return;
+
+ rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, rx_data->cmd[0]);
+
+ if (rx_seqnum == tx_seqnum) {
+ memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen);
+ clear_bit(rx_seqnum - 1, achan->bitmap_seqnum);
+ }
+}
+
+/**
* acpm_get_rx() - get response from RX queue.
* @achan: ACPM channel info.
* @xfer: reference to the transfer to get response for.
@@ -204,15 +227,16 @@ static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer)
rx_front = readl(achan->rx.front);
i = readl(achan->rx.rear);
- /* Bail out if RX is empty. */
- if (i == rx_front)
+ tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
+
+ if (i == rx_front) {
+ acpm_get_saved_rx(achan, xfer, tx_seqnum);
return 0;
+ }
base = achan->rx.base;
mlen = achan->mlen;
- tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
-
/* Drain RX queue. */
do {
/* Read RX seqnum. */
@@ -259,16 +283,8 @@ static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer)
* If the response was not in this iteration of the queue, check if the
* RX data was previously saved.
*/
- rx_data = &achan->rx_data[tx_seqnum - 1];
- if (!rx_set && rx_data->response) {
- rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM,
- rx_data->cmd[0]);
-
- if (rx_seqnum == tx_seqnum) {
- memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen);
- clear_bit(rx_seqnum - 1, achan->bitmap_seqnum);
- }
- }
+ if (!rx_set)
+ acpm_get_saved_rx(achan, xfer, tx_seqnum);
return 0;
}
@@ -284,12 +300,13 @@ static int acpm_dequeue_by_polling(struct acpm_chan *achan,
const struct acpm_xfer *xfer)
{
struct device *dev = achan->acpm->dev;
- unsigned int cnt_20us = 0;
+ ktime_t timeout;
u32 seqnum;
int ret;
seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
+ timeout = ktime_add_us(ktime_get(), ACPM_POLL_TIMEOUT_US);
do {
ret = acpm_get_rx(achan, xfer);
if (ret)
@@ -299,12 +316,11 @@ static int acpm_dequeue_by_polling(struct acpm_chan *achan,
return 0;
/* Determined experimentally. */
- usleep_range(20, 30);
- cnt_20us++;
- } while (cnt_20us < ACPM_POLL_TIMEOUT);
+ udelay(20);
+ } while (ktime_before(ktime_get(), timeout));
- dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx, cnt_20us = %d.\n",
- achan->id, seqnum, achan->bitmap_seqnum[0], cnt_20us);
+ dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx.\n",
+ achan->id, seqnum, achan->bitmap_seqnum[0]);
return -ETIME;
}
@@ -633,7 +649,7 @@ static int acpm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, acpm);
- return 0;
+ return devm_of_platform_populate(dev);
}
/**
@@ -661,43 +677,30 @@ static void devm_acpm_release(struct device *dev, void *res)
}
/**
- * acpm_get_by_phandle() - get the ACPM handle using DT phandle.
- * @dev: device pointer requesting ACPM handle.
- * @property: property name containing phandle on ACPM node.
+ * acpm_get_by_node() - get the ACPM handle using node pointer.
+ * @dev: device pointer requesting ACPM handle.
+ * @np: ACPM device tree node.
*
* Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
*/
-static const struct acpm_handle *acpm_get_by_phandle(struct device *dev,
- const char *property)
+static const struct acpm_handle *acpm_get_by_node(struct device *dev,
+ struct device_node *np)
{
struct platform_device *pdev;
- struct device_node *acpm_np;
struct device_link *link;
struct acpm_info *acpm;
- acpm_np = of_parse_phandle(dev->of_node, property, 0);
- if (!acpm_np)
- return ERR_PTR(-ENODEV);
-
- pdev = of_find_device_by_node(acpm_np);
- if (!pdev) {
- dev_err(dev, "Cannot find device node %s\n", acpm_np->name);
- of_node_put(acpm_np);
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
- }
-
- of_node_put(acpm_np);
acpm = platform_get_drvdata(pdev);
if (!acpm) {
- dev_err(dev, "Cannot get drvdata from %s\n",
- dev_name(&pdev->dev));
platform_device_put(pdev);
return ERR_PTR(-EPROBE_DEFER);
}
if (!try_module_get(pdev->dev.driver->owner)) {
- dev_err(dev, "Cannot get module reference.\n");
platform_device_put(pdev);
return ERR_PTR(-EPROBE_DEFER);
}
@@ -716,14 +719,14 @@ static const struct acpm_handle *acpm_get_by_phandle(struct device *dev,
}
/**
- * devm_acpm_get_by_phandle() - managed get handle using phandle.
- * @dev: device pointer requesting ACPM handle.
- * @property: property name containing phandle on ACPM node.
+ * devm_acpm_get_by_node() - managed get handle using node pointer.
+ * @dev: device pointer requesting ACPM handle.
+ * @np: ACPM device tree node.
*
* Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
*/
-const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
- const char *property)
+const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
+ struct device_node *np)
{
const struct acpm_handle **ptr, *handle;
@@ -731,7 +734,7 @@ const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
if (!ptr)
return ERR_PTR(-ENOMEM);
- handle = acpm_get_by_phandle(dev, property);
+ handle = acpm_get_by_node(dev, np);
if (!IS_ERR(handle)) {
*ptr = handle;
devres_add(dev, ptr);
@@ -741,6 +744,7 @@ const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
return handle;
}
+EXPORT_SYMBOL_GPL(devm_acpm_get_by_node);
static const struct acpm_match_data acpm_gs101 = {
.initdata_base = ACPM_GS101_INITDATA_BASE,
diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c
index a123c05cbc9e..49e1de83d2e8 100644
--- a/drivers/firmware/smccc/kvm_guest.c
+++ b/drivers/firmware/smccc/kvm_guest.c
@@ -17,17 +17,11 @@ static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_afte
void __init kvm_init_hyp_services(void)
{
+ uuid_t kvm_uuid = ARM_SMCCC_VENDOR_HYP_UID_KVM;
struct arm_smccc_res res;
u32 val[4];
- if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC)
- return;
-
- arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
- if (res.a0 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 ||
- res.a1 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 ||
- res.a2 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 ||
- res.a3 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3)
+ if (!arm_smccc_hypervisor_has_uuid(&kvm_uuid))
return;
memset(&res, 0, sizeof(res));
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index a74600d9f2d7..cd65b434dc6e 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -67,6 +67,23 @@ s32 arm_smccc_get_soc_id_revision(void)
}
EXPORT_SYMBOL_GPL(arm_smccc_get_soc_id_revision);
+bool arm_smccc_hypervisor_has_uuid(const uuid_t *hyp_uuid)
+{
+ struct arm_smccc_res res = {};
+ uuid_t uuid;
+
+ if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC)
+ return false;
+
+ arm_smccc_1_1_hvc(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
+ return false;
+
+ uuid = smccc_res_to_uuid(res.a0, res.a1, res.a2, res.a3);
+ return uuid_equal(&uuid, hyp_uuid);
+}
+EXPORT_SYMBOL_GPL(arm_smccc_hypervisor_has_uuid);
+
static int __init smccc_devices_init(void)
{
struct platform_device *pdev;
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index 7c5c03f274b9..889e5b05c739 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -143,6 +143,7 @@ static __init int sysfb_init(void)
{
struct screen_info *si = &screen_info;
struct device *parent;
+ unsigned int type;
struct simplefb_platform_data mode;
const char *name;
bool compatible;
@@ -170,17 +171,26 @@ static __init int sysfb_init(void)
goto put_device;
}
+ type = screen_info_video_type(si);
+
/* if the FB is incompatible, create a legacy framebuffer device */
- if (si->orig_video_isVGA == VIDEO_TYPE_EFI)
- name = "efi-framebuffer";
- else if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
- name = "vesa-framebuffer";
- else if (si->orig_video_isVGA == VIDEO_TYPE_VGAC)
- name = "vga-framebuffer";
- else if (si->orig_video_isVGA == VIDEO_TYPE_EGAC)
+ switch (type) {
+ case VIDEO_TYPE_EGAC:
name = "ega-framebuffer";
- else
+ break;
+ case VIDEO_TYPE_VGAC:
+ name = "vga-framebuffer";
+ break;
+ case VIDEO_TYPE_VLFB:
+ name = "vesa-framebuffer";
+ break;
+ case VIDEO_TYPE_EFI:
+ name = "efi-framebuffer";
+ break;
+ default:
name = "platform-framebuffer";
+ break;
+ }
pd = platform_device_alloc(name, 0);
if (!pd) {
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index 75a186bf8f8e..592d8a644619 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -35,36 +35,7 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI)
return false;
- /*
- * The meaning of depth and bpp for direct-color formats is
- * inconsistent:
- *
- * - DRM format info specifies depth as the number of color
- * bits; including alpha, but not including filler bits.
- * - Linux' EFI platform code computes lfb_depth from the
- * individual color channels, including the reserved bits.
- * - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later
- * versions use 15.
- * - On the kernel command line, 'bpp' of 32 is usually
- * XRGB8888 including the filler bits, but 15 is XRGB1555
- * not including the filler bit.
- *
- * It's not easily possible to fix this in struct screen_info,
- * as this could break UAPI. The best solution is to compute
- * bits_per_pixel from the color bits, reserved bits and
- * reported lfb_depth, whichever is highest. In the loop below,
- * ignore simplefb formats with alpha bits, as EFI and VESA
- * don't specify alpha channels.
- */
- if (si->lfb_depth > 8) {
- bits_per_pixel = max(max3(si->red_size + si->red_pos,
- si->green_size + si->green_pos,
- si->blue_size + si->blue_pos),
- si->rsvd_size + si->rsvd_pos);
- bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth);
- } else {
- bits_per_pixel = si->lfb_depth;
- }
+ bits_per_pixel = __screen_info_lfb_bits_per_pixel(si);
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
const struct simplefb_format *f = &formats[i];
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 806a975fff22..ae5fd1936ad3 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -2,7 +2,7 @@
/*
* Texas Instruments System Control Interface Protocol Driver
*
- * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2025 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
@@ -3670,6 +3670,7 @@ static int __maybe_unused ti_sci_suspend(struct device *dev)
struct ti_sci_info *info = dev_get_drvdata(dev);
struct device *cpu_dev, *cpu_dev_max = NULL;
s32 val, cpu_lat = 0;
+ u16 cpu_lat_ms;
int i, ret;
if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
@@ -3682,9 +3683,16 @@ static int __maybe_unused ti_sci_suspend(struct device *dev)
}
}
if (cpu_dev_max) {
- dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u\n", __func__, cpu_lat);
+ /*
+ * PM QoS latency unit is usecs, device manager uses msecs.
+ * Convert to msecs and round down for device manager.
+ */
+ cpu_lat_ms = cpu_lat / USEC_PER_MSEC;
+ dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u ms\n", __func__,
+ cpu_lat_ms);
ret = ti_sci_cmd_set_latency_constraint(&info->handle,
- cpu_lat, TISCI_MSG_CONSTRAINT_SET);
+ cpu_lat_ms,
+ TISCI_MSG_CONSTRAINT_SET);
if (ret)
return ret;
}
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index 47fe6261f5a3..1eac9948148f 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -2,29 +2,31 @@
/*
* Turris Mox rWTM firmware driver
*
- * Copyright (C) 2019, 2024 Marek Behún <kabel@kernel.org>
+ * Copyright (C) 2019, 2024, 2025 Marek Behún <kabel@kernel.org>
*/
#include <crypto/sha2.h>
#include <linux/align.h>
#include <linux/armada-37xx-rwtm-mailbox.h>
+#include <linux/cleanup.h>
#include <linux/completion.h>
#include <linux/container_of.h>
-#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/fs.h>
#include <linux/hw_random.h>
#include <linux/if_ether.h>
+#include <linux/key.h>
#include <linux/kobject.h>
#include <linux/mailbox_client.h>
+#include <linux/math.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/sysfs.h>
+#include <linux/turris-signing-key.h>
#include <linux/types.h>
#define DRIVER_NAME "turris-mox-rwtm"
@@ -37,10 +39,13 @@
* https://gitlab.labs.nic.cz/turris/mox-boot-builder/tree/master/wtmi.
*/
-#define MOX_ECC_NUMBER_WORDS 17
-#define MOX_ECC_NUMBER_LEN (MOX_ECC_NUMBER_WORDS * sizeof(u32))
-
-#define MOX_ECC_SIGNATURE_WORDS (2 * MOX_ECC_NUMBER_WORDS)
+enum {
+ MOX_ECC_NUM_BITS = 521,
+ MOX_ECC_NUM_LEN = DIV_ROUND_UP(MOX_ECC_NUM_BITS, 8),
+ MOX_ECC_NUM_WORDS = DIV_ROUND_UP(MOX_ECC_NUM_BITS, 32),
+ MOX_ECC_SIG_LEN = 2 * MOX_ECC_NUM_LEN,
+ MOX_ECC_PUBKEY_LEN = 1 + MOX_ECC_NUM_LEN,
+};
#define MBOX_STS_SUCCESS (0 << 30)
#define MBOX_STS_FAIL (1 << 30)
@@ -77,10 +82,7 @@ enum mbox_cmd {
* @ram_size: RAM size of the device
* @mac_address1: first MAC address of the device
* @mac_address2: second MAC address of the device
- * @has_pubkey: whether board ECDSA public key is present
* @pubkey: board ECDSA public key
- * @last_sig: last ECDSA signature generated with board ECDSA private key
- * @last_sig_done: whether the last ECDSA signing is complete
*/
struct mox_rwtm {
struct mbox_client mbox_client;
@@ -100,18 +102,8 @@ struct mox_rwtm {
int board_version, ram_size;
u8 mac_address1[ETH_ALEN], mac_address2[ETH_ALEN];
- bool has_pubkey;
- u8 pubkey[135];
-
-#ifdef CONFIG_DEBUG_FS
- /*
- * Signature process. This is currently done via debugfs, because it
- * does not conform to the sysfs standard "one file per attribute".
- * It should be rewritten via crypto API once akcipher API is available
- * from userspace.
- */
- u32 last_sig[MOX_ECC_SIGNATURE_WORDS];
- bool last_sig_done;
+#ifdef CONFIG_TURRIS_MOX_RWTM_KEYCTL
+ u8 pubkey[MOX_ECC_PUBKEY_LEN];
#endif
};
@@ -120,24 +112,23 @@ static inline struct device *rwtm_dev(struct mox_rwtm *rwtm)
return rwtm->mbox_client.dev;
}
-#define MOX_ATTR_RO(name, format, cat) \
+#define MOX_ATTR_RO(name, format) \
static ssize_t \
name##_show(struct device *dev, struct device_attribute *a, \
char *buf) \
{ \
struct mox_rwtm *rwtm = dev_get_drvdata(dev); \
- if (!rwtm->has_##cat) \
+ if (!rwtm->has_board_info) \
return -ENODATA; \
return sysfs_emit(buf, format, rwtm->name); \
} \
static DEVICE_ATTR_RO(name)
-MOX_ATTR_RO(serial_number, "%016llX\n", board_info);
-MOX_ATTR_RO(board_version, "%i\n", board_info);
-MOX_ATTR_RO(ram_size, "%i\n", board_info);
-MOX_ATTR_RO(mac_address1, "%pM\n", board_info);
-MOX_ATTR_RO(mac_address2, "%pM\n", board_info);
-MOX_ATTR_RO(pubkey, "%s\n", pubkey);
+MOX_ATTR_RO(serial_number, "%016llX\n");
+MOX_ATTR_RO(board_version, "%i\n");
+MOX_ATTR_RO(ram_size, "%i\n");
+MOX_ATTR_RO(mac_address1, "%pM\n");
+MOX_ATTR_RO(mac_address2, "%pM\n");
static struct attribute *turris_mox_rwtm_attrs[] = {
&dev_attr_serial_number.attr,
@@ -145,7 +136,6 @@ static struct attribute *turris_mox_rwtm_attrs[] = {
&dev_attr_ram_size.attr,
&dev_attr_mac_address1.attr,
&dev_attr_mac_address2.attr,
- &dev_attr_pubkey.attr,
NULL
};
ATTRIBUTE_GROUPS(turris_mox_rwtm);
@@ -247,24 +237,6 @@ static int mox_get_board_info(struct mox_rwtm *rwtm)
pr_info(" burned RAM size %i MiB\n", rwtm->ram_size);
}
- ret = mox_rwtm_exec(rwtm, MBOX_CMD_ECDSA_PUB_KEY, NULL, false);
- if (ret == -ENODATA) {
- dev_warn(dev, "Board has no public key burned!\n");
- } else if (ret == -EOPNOTSUPP) {
- dev_notice(dev,
- "Firmware does not support the ECDSA_PUB_KEY command\n");
- } else if (ret < 0) {
- return ret;
- } else {
- u32 *s = reply->status;
-
- rwtm->has_pubkey = true;
- sprintf(rwtm->pubkey,
- "%06x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x",
- ret, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7],
- s[8], s[9], s[10], s[11], s[12], s[13], s[14], s[15]);
- }
-
return 0;
}
@@ -306,127 +278,139 @@ unlock_mutex:
return ret;
}
-#ifdef CONFIG_DEBUG_FS
-static int rwtm_debug_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
+#ifdef CONFIG_TURRIS_MOX_RWTM_KEYCTL
- return nonseekable_open(inode, file);
-}
-
-static ssize_t do_sign_read(struct file *file, char __user *buf, size_t len,
- loff_t *ppos)
+static void mox_ecc_number_to_bin(void *dst, const u32 *src)
{
- struct mox_rwtm *rwtm = file->private_data;
- ssize_t ret;
+ __be32 tmp[MOX_ECC_NUM_WORDS];
- /* only allow one read, of whole signature, from position 0 */
- if (*ppos != 0)
- return 0;
+ cpu_to_be32_array(tmp, src, MOX_ECC_NUM_WORDS);
- if (len < sizeof(rwtm->last_sig))
- return -EINVAL;
+ memcpy(dst, (void *)tmp + 2, MOX_ECC_NUM_LEN);
+}
- if (!rwtm->last_sig_done)
- return -ENODATA;
+static void mox_ecc_public_key_to_bin(void *dst, u32 src_first,
+ const u32 *src_rest)
+{
+ __be32 tmp[MOX_ECC_NUM_WORDS - 1];
+ u8 *p = dst;
- ret = simple_read_from_buffer(buf, len, ppos, rwtm->last_sig,
- sizeof(rwtm->last_sig));
- rwtm->last_sig_done = false;
+ /* take 3 bytes from the first word */
+ *p++ = src_first >> 16;
+ *p++ = src_first >> 8;
+ *p++ = src_first;
- return ret;
+ /* take the rest of the words */
+ cpu_to_be32_array(tmp, src_rest, MOX_ECC_NUM_WORDS - 1);
+ memcpy(p, tmp, sizeof(tmp));
}
-static ssize_t do_sign_write(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos)
+static int mox_rwtm_sign(const struct key *key, const void *data, void *signature)
{
- struct mox_rwtm *rwtm = file->private_data;
- struct armada_37xx_rwtm_tx_msg msg;
- loff_t dummy = 0;
- ssize_t ret;
-
- if (len != SHA512_DIGEST_SIZE)
- return -EINVAL;
-
- /* if last result is not zero user has not read that information yet */
- if (rwtm->last_sig_done)
- return -EBUSY;
+ struct mox_rwtm *rwtm = dev_get_drvdata(turris_signing_key_get_dev(key));
+ struct armada_37xx_rwtm_tx_msg msg = {};
+ u32 offset_r, offset_s;
+ int ret;
- if (!mutex_trylock(&rwtm->busy))
- return -EBUSY;
+ guard(mutex)(&rwtm->busy);
/*
- * Here we have to send:
- * 1. Address of the input to sign.
- * The input is an array of 17 32-bit words, the first (most
- * significat) is 0, the rest 16 words are copied from the SHA-512
- * hash given by the user and converted from BE to LE.
- * 2. Address of the buffer where ECDSA signature value R shall be
- * stored by the rWTM firmware.
- * 3. Address of the buffer where ECDSA signature value S shall be
- * stored by the rWTM firmware.
+ * For MBOX_CMD_SIGN command:
+ * args[0] - must be 1
+ * args[1] - address of message M to sign; message is a 521-bit number
+ * args[2] - address where the R part of the signature will be stored
+ * args[3] - address where the S part of the signature will be stored
+ *
+ * M, R and S are 521-bit numbers encoded as seventeen 32-bit words,
+ * most significat word first.
+ * Since the message in @data is a sha512 digest, the most significat
+ * word is always zero.
*/
+
+ offset_r = MOX_ECC_NUM_WORDS * sizeof(u32);
+ offset_s = 2 * MOX_ECC_NUM_WORDS * sizeof(u32);
+
memset(rwtm->buf, 0, sizeof(u32));
- ret = simple_write_to_buffer(rwtm->buf + sizeof(u32),
- SHA512_DIGEST_SIZE, &dummy, buf, len);
- if (ret < 0)
- goto unlock_mutex;
- be32_to_cpu_array(rwtm->buf, rwtm->buf, MOX_ECC_NUMBER_WORDS);
+ memcpy(rwtm->buf + sizeof(u32), data, SHA512_DIGEST_SIZE);
+ be32_to_cpu_array(rwtm->buf, rwtm->buf, MOX_ECC_NUM_WORDS);
msg.args[0] = 1;
msg.args[1] = rwtm->buf_phys;
- msg.args[2] = rwtm->buf_phys + MOX_ECC_NUMBER_LEN;
- msg.args[3] = rwtm->buf_phys + 2 * MOX_ECC_NUMBER_LEN;
+ msg.args[2] = rwtm->buf_phys + offset_r;
+ msg.args[3] = rwtm->buf_phys + offset_s;
ret = mox_rwtm_exec(rwtm, MBOX_CMD_SIGN, &msg, true);
if (ret < 0)
- goto unlock_mutex;
+ return ret;
- /*
- * Here we read the R and S values of the ECDSA signature
- * computed by the rWTM firmware and convert their words from
- * LE to BE.
- */
- memcpy(rwtm->last_sig, rwtm->buf + MOX_ECC_NUMBER_LEN,
- sizeof(rwtm->last_sig));
- cpu_to_be32_array(rwtm->last_sig, rwtm->last_sig,
- MOX_ECC_SIGNATURE_WORDS);
- rwtm->last_sig_done = true;
+ /* convert R and S parts of the signature */
+ mox_ecc_number_to_bin(signature, rwtm->buf + offset_r);
+ mox_ecc_number_to_bin(signature + MOX_ECC_NUM_LEN, rwtm->buf + offset_s);
- mutex_unlock(&rwtm->busy);
- return len;
-unlock_mutex:
- mutex_unlock(&rwtm->busy);
- return ret;
+ return 0;
}
-static const struct file_operations do_sign_fops = {
- .owner = THIS_MODULE,
- .open = rwtm_debug_open,
- .read = do_sign_read,
- .write = do_sign_write,
-};
-
-static void rwtm_debugfs_release(void *root)
+static const void *mox_rwtm_get_public_key(const struct key *key)
{
- debugfs_remove_recursive(root);
+ struct mox_rwtm *rwtm = dev_get_drvdata(turris_signing_key_get_dev(key));
+
+ return rwtm->pubkey;
}
-static void rwtm_register_debugfs(struct mox_rwtm *rwtm)
+static const struct turris_signing_key_subtype mox_signing_key_subtype = {
+ .key_size = MOX_ECC_NUM_BITS,
+ .data_size = SHA512_DIGEST_SIZE,
+ .sig_size = MOX_ECC_SIG_LEN,
+ .public_key_size = MOX_ECC_PUBKEY_LEN,
+ .hash_algo = "sha512",
+ .get_public_key = mox_rwtm_get_public_key,
+ .sign = mox_rwtm_sign,
+};
+
+static int mox_register_signing_key(struct mox_rwtm *rwtm)
{
- struct dentry *root;
+ struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
+ struct device *dev = rwtm_dev(rwtm);
+ int ret;
- root = debugfs_create_dir("turris-mox-rwtm", NULL);
+ ret = mox_rwtm_exec(rwtm, MBOX_CMD_ECDSA_PUB_KEY, NULL, false);
+ if (ret == -ENODATA) {
+ dev_warn(dev, "Board has no public key burned!\n");
+ } else if (ret == -EOPNOTSUPP) {
+ dev_notice(dev,
+ "Firmware does not support the ECDSA_PUB_KEY command\n");
+ } else if (ret < 0) {
+ return ret;
+ } else {
+ char sn[17] = "unknown";
+ char desc[46];
+
+ if (rwtm->has_board_info)
+ sprintf(sn, "%016llX", rwtm->serial_number);
+
+ sprintf(desc, "Turris MOX SN %s rWTM ECDSA key", sn);
- debugfs_create_file_unsafe("do_sign", 0600, root, rwtm, &do_sign_fops);
+ mox_ecc_public_key_to_bin(rwtm->pubkey, ret, reply->status);
- devm_add_action_or_reset(rwtm_dev(rwtm), rwtm_debugfs_release, root);
+ ret = devm_turris_signing_key_create(dev,
+ &mox_signing_key_subtype,
+ desc);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Cannot create signing key\n");
+ }
+
+ return 0;
}
-#else
-static inline void rwtm_register_debugfs(struct mox_rwtm *rwtm)
+
+#else /* CONFIG_TURRIS_MOX_RWTM_KEYCTL */
+
+static inline int mox_register_signing_key(struct mox_rwtm *rwtm)
{
+ return 0;
}
-#endif
+
+#endif /* !CONFIG_TURRIS_MOX_RWTM_KEYCTL */
static void rwtm_devm_mbox_release(void *mbox)
{
@@ -477,6 +461,10 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
if (ret < 0)
dev_warn(dev, "Cannot read board information: %i\n", ret);
+ ret = mox_register_signing_key(rwtm);
+ if (ret < 0)
+ return ret;
+
ret = check_get_random_support(rwtm);
if (ret < 0) {
dev_notice(dev,
@@ -491,8 +479,6 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "Cannot register HWRNG!\n");
- rwtm_register_debugfs(rwtm);
-
dev_info(dev, "HWRNG successfully registered\n");
/*
diff --git a/drivers/fpga/tests/fpga-mgr-test.c b/drivers/fpga/tests/fpga-mgr-test.c
index 8748babb0504..62975a39ee14 100644
--- a/drivers/fpga/tests/fpga-mgr-test.c
+++ b/drivers/fpga/tests/fpga-mgr-test.c
@@ -263,6 +263,7 @@ static void fpga_mgr_test_img_load_sgt(struct kunit *test)
img_buf = init_test_buffer(test, IMAGE_SIZE);
sgt = kunit_kzalloc(test, sizeof(*sgt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
KUNIT_ASSERT_EQ(test, ret, 0);
sg_init_one(sgt->sgl, img_buf, IMAGE_SIZE);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index f2c39bbff83a..44f922e10db2 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -201,6 +201,7 @@ config GPIO_RASPBERRYPI_EXP
config GPIO_BCM_KONA
bool "Broadcom Kona GPIO"
depends on ARCH_BCM_MOBILE || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
help
Turn on GPIO support for Broadcom "Kona" chips.
@@ -213,6 +214,18 @@ config GPIO_BCM_XGS_IPROC
help
Say yes here to enable GPIO support for Broadcom XGS iProc SoCs.
+config GPIO_BLZP1600
+ tristate "Blaize BLZP1600 GPIO support"
+ default y if ARCH_BLAIZE
+ depends on ARCH_BLAIZE || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y or M here to add support for the Blaize BLZP1600 GPIO device.
+ The controller is based on the Verisilicon Microelectronics GPIO APB v0.2
+ IP block.
+
config GPIO_BRCMSTB
tristate "BRCMSTB GPIO support"
default y if (ARCH_BRCMSTB || BMIPS_GENERIC)
@@ -241,6 +254,7 @@ config GPIO_DAVINCI
tristate "TI Davinci/Keystone GPIO support"
default y if ARCH_DAVINCI
depends on ((ARM || ARM64) && (ARCH_DAVINCI || ARCH_KEYSTONE || ARCH_K3)) || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
help
Say yes here to enable GPIO support for TI Davinci/Keystone SoCs.
@@ -340,7 +354,7 @@ config GPIO_GRGPIO
tristate "Aeroflex Gaisler GRGPIO support"
depends on OF || COMPILE_TEST
select GPIO_GENERIC
- select IRQ_DOMAIN
+ select GPIOLIB_IRQCHIP
help
Select this to support Aeroflex Gaisler GRGPIO cores from the GRLIB
VHDL IP core library.
@@ -368,8 +382,7 @@ config GPIO_HLWD
config GPIO_ICH
tristate "Intel ICH GPIO"
- depends on X86
- depends on LPC_ICH
+ depends on (X86 && LPC_ICH) || (COMPILE_TEST && HAS_IOPORT)
help
Say yes here to support the GPIO functionality of a number of Intel
ICH-based chipsets. Currently supported devices: ICH6, ICH7, ICH8
@@ -425,6 +438,7 @@ config GPIO_LPC18XX
default y if ARCH_LPC18XX
depends on OF_GPIO && (ARCH_LPC18XX || COMPILE_TEST)
select IRQ_DOMAIN_HIERARCHY
+ select GPIOLIB_IRQCHIP
help
Select this option to enable GPIO driver for
NXP LPC18XX/43XX devices.
@@ -468,7 +482,7 @@ config GPIO_MPC8XXX
FSL_SOC_BOOKE || PPC_86xx || ARCH_LAYERSCAPE || ARM || \
COMPILE_TEST
select GPIO_GENERIC
- select IRQ_DOMAIN
+ select GPIOLIB_IRQCHIP
help
Say Y here if you're going to use hardware that connects to the
MPC512x/831x/834x/837x/8572/8610/QorIQ GPIOs.
@@ -540,7 +554,7 @@ config GPIO_OMAP
config GPIO_PL061
tristate "PrimeCell PL061 GPIO support"
- depends on ARM_AMBA
+ depends on ARM_AMBA || COMPILE_TEST
select IRQ_DOMAIN
select GPIOLIB_IRQCHIP
help
@@ -555,6 +569,7 @@ config GPIO_POLARFIRE_SOC
config GPIO_PXA
bool "PXA GPIO support"
depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
help
Say yes here to support the PXA GPIO device.
@@ -604,7 +619,7 @@ config GPIO_ROCKCHIP
config GPIO_RTD
tristate "Realtek DHC GPIO support"
- depends on ARCH_REALTEK
+ depends on ARCH_REALTEK || COMPILE_TEST
default y
select GPIOLIB_IRQCHIP
help
@@ -656,6 +671,15 @@ config GPIO_SNPS_CREG
where only several fields in register belong to GPIO lines and
each GPIO line owns a field with different length and on/off value.
+config GPIO_SPACEMIT_K1
+ tristate "SPACEMIT K1 GPIO support"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support the SpacemiT's K1 GPIO device.
+
config GPIO_SPEAR_SPICS
bool "ST SPEAr13xx SPI Chip Select as GPIO support"
depends on PLAT_SPEAR
@@ -753,7 +777,7 @@ config GPIO_UNIPHIER
Say yes here to support UniPhier GPIOs.
config GPIO_VF610
- bool "VF610 GPIO support"
+ tristate "VF610 GPIO support"
default y if SOC_VF610
depends on ARCH_MXC || COMPILE_TEST
select GPIOLIB_IRQCHIP
@@ -830,14 +854,14 @@ config GPIO_ZEVIO
config GPIO_ZYNQ
tristate "Xilinx Zynq GPIO support"
- depends on ARCH_ZYNQ || ARCH_ZYNQMP
+ depends on ARCH_ZYNQ || ARCH_ZYNQMP || COMPILE_TEST
select GPIOLIB_IRQCHIP
help
Say yes here to support Xilinx Zynq GPIO controller.
config GPIO_ZYNQMP_MODEPIN
tristate "ZynqMP ps-mode pin GPIO configuration driver"
- depends on ZYNQMP_FIRMWARE
+ depends on ZYNQMP_FIRMWARE || COMPILE_TEST
default ZYNQMP_FIRMWARE
help
Say yes here to support the ZynqMP ps-mode pin GPIO configuration
@@ -866,7 +890,7 @@ config GPIO_AMD_FCH
config GPIO_MSC313
bool "MStar MSC313 GPIO support"
- depends on ARCH_MSTARV7
+ depends on ARCH_MSTARV7 || COMPILE_TEST
default ARCH_MSTARV7
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
@@ -1365,7 +1389,7 @@ config GPIO_DLN2
config HTC_EGPIO
bool "HTC EGPIO support"
- depends on ARM
+ depends on ARM || COMPILE_TEST
help
This driver supports the CPLD egpio chip present on
several HTC phones. It provides basic support for input
@@ -1463,6 +1487,19 @@ config GPIO_MAX77650
GPIO driver for MAX77650/77651 PMIC from Maxim Semiconductor.
These chips have a single pin that can be configured as GPIO.
+config GPIO_MAX77759
+ tristate "Maxim Integrated MAX77759 GPIO support"
+ depends on MFD_MAX77759
+ default MFD_MAX77759
+ select GPIOLIB_IRQCHIP
+ help
+ GPIO driver for MAX77759 PMIC from Maxim Integrated.
+ There are two GPIOs available on these chips in total, both of
+ which can also generate interrupts.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-max77759.
+
config GPIO_PALMAS
bool "TI PALMAS series PMICs GPIO"
depends on MFD_PALMAS
@@ -1520,12 +1557,13 @@ config GPIO_TC3589X
config GPIO_TIMBERDALE
bool "Support for timberdale GPIO IP"
depends on MFD_TIMBERDALE
+ select GPIOLIB_IRQCHIP
help
Add support for the GPIO IP in the timberdale FPGA.
config GPIO_TN48M_CPLD
tristate "Delta Networks TN48M switch CPLD GPIO driver"
- depends on MFD_TN48M_CPLD
+ depends on MFD_TN48M_CPLD || COMPILE_TEST
select GPIO_REGMAP
help
This enables support for the GPIOs found on the Delta
@@ -1869,6 +1907,8 @@ menu "Virtual GPIO drivers"
config GPIO_AGGREGATOR
tristate "GPIO Aggregator"
+ select CONFIGFS_FS
+ select DEV_SYNC_PROBE
help
Say yes here to enable the GPIO Aggregator, which provides a way to
aggregate existing GPIO lines into a new virtual GPIO chip.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index af130882ffee..88dedd298256 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
obj-$(CONFIG_GPIO_CDEV) += gpiolib-cdev.o
obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o
obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
+gpiolib-acpi-y := gpiolib-acpi-core.o gpiolib-acpi-quirks.o
obj-$(CONFIG_GPIOLIB) += gpiolib-swnode.o
# Device drivers. Generally keep list sorted alphabetically
@@ -45,6 +46,7 @@ obj-$(CONFIG_GPIO_BCM_XGS_IPROC) += gpio-xgs-iproc.o
obj-$(CONFIG_GPIO_BD71815) += gpio-bd71815.o
obj-$(CONFIG_GPIO_BD71828) += gpio-bd71828.o
obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd9571mwv.o
+obj-$(CONFIG_GPIO_BLZP1600) += gpio-blzp1600.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
obj-$(CONFIG_GPIO_CADENCE) += gpio-cadence.o
@@ -105,6 +107,7 @@ obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
obj-$(CONFIG_GPIO_MAX77620) += gpio-max77620.o
obj-$(CONFIG_GPIO_MAX77650) += gpio-max77650.o
+obj-$(CONFIG_GPIO_MAX77759) += gpio-max77759.o
obj-$(CONFIG_GPIO_MB86S7X) += gpio-mb86s7x.o
obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o
@@ -159,6 +162,7 @@ obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o
obj-$(CONFIG_GPIO_SL28CPLD) += gpio-sl28cpld.o
obj-$(CONFIG_GPIO_SLOPPY_LOGIC_ANALYZER) += gpio-sloppy-logic-analyzer.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
+obj-$(CONFIG_GPIO_SPACEMIT_K1) += gpio-spacemit-k1.o
obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o
obj-$(CONFIG_GPIO_SPRD) += gpio-sprd.o
obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 4b70cbaa1caa..4a8b349f2483 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -44,6 +44,13 @@ Work items:
to a machine description such as device tree, ACPI or fwnode that
implicitly does not use global GPIO numbers.
+- Fix drivers to not read back struct gpio_chip::base. Some drivers do
+ that and would be broken by attempts to poison it or make it dynamic.
+ Example in AT91 pinctrl driver:
+ https://lore.kernel.org/all/1d00c056-3d61-4c22-bedd-3bae0bf1ddc4@pengutronix.de/
+ This particular driver is also DT-only, so with the above fixed, the
+ base can be made dynamic (set to -1) if CONFIG_GPIO_SYSFS is disabled.
+
- When this work is complete (will require some of the items in the
following ongoing work as well) we can delete the old global
numberspace accessors from <linux/gpio.h> and eventually delete
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index d232ea865356..6f941db02c04 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -9,10 +9,13 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/idr.h>
#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -27,226 +30,200 @@
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
+#include "dev-sync-probe.h"
+
#define AGGREGATOR_MAX_GPIOS 512
+#define AGGREGATOR_LEGACY_PREFIX "_sysfs"
/*
* GPIO Aggregator sysfs interface
*/
struct gpio_aggregator {
+ struct dev_sync_probe_data probe_data;
+ struct config_group group;
struct gpiod_lookup_table *lookups;
- struct platform_device *pdev;
+ struct mutex lock;
+ int id;
+
+ /* List of gpio_aggregator_line. Always added in order */
+ struct list_head list_head;
+
+ /* used by legacy sysfs interface only */
+ bool init_via_sysfs;
char args[];
};
+struct gpio_aggregator_line {
+ struct config_group group;
+ struct gpio_aggregator *parent;
+ struct list_head entry;
+
+ /* Line index within the aggregator device */
+ unsigned int idx;
+
+ /* Custom name for the virtual line */
+ const char *name;
+ /* GPIO chip label or line name */
+ const char *key;
+ /* Can be negative to indicate lookup by line name */
+ int offset;
+
+ enum gpio_lookup_flags flags;
+};
+
+struct gpio_aggregator_pdev_meta {
+ bool init_via_sysfs;
+};
+
static DEFINE_MUTEX(gpio_aggregator_lock); /* protects idr */
static DEFINE_IDR(gpio_aggregator_idr);
-static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
- int hwnum, unsigned int *n)
+static int gpio_aggregator_alloc(struct gpio_aggregator **aggr, size_t arg_size)
{
- struct gpiod_lookup_table *lookups;
+ int ret;
- lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2),
- GFP_KERNEL);
- if (!lookups)
+ struct gpio_aggregator *new __free(kfree) = kzalloc(
+ sizeof(*new) + arg_size, GFP_KERNEL);
+ if (!new)
return -ENOMEM;
- lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
+ scoped_guard(mutex, &gpio_aggregator_lock)
+ ret = idr_alloc(&gpio_aggregator_idr, new, 0, 0, GFP_KERNEL);
- (*n)++;
- memset(&lookups->table[*n], 0, sizeof(lookups->table[*n]));
+ if (ret < 0)
+ return ret;
- aggr->lookups = lookups;
+ new->id = ret;
+ INIT_LIST_HEAD(&new->list_head);
+ mutex_init(&new->lock);
+ *aggr = no_free_ptr(new);
return 0;
}
-static int aggr_parse(struct gpio_aggregator *aggr)
+static void gpio_aggregator_free(struct gpio_aggregator *aggr)
{
- char *args = skip_spaces(aggr->args);
- char *name, *offsets, *p;
- unsigned int i, n = 0;
- int error = 0;
-
- unsigned long *bitmap __free(bitmap) =
- bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL);
- if (!bitmap)
- return -ENOMEM;
-
- args = next_arg(args, &name, &p);
- while (*args) {
- args = next_arg(args, &offsets, &p);
-
- p = get_options(offsets, 0, &error);
- if (error == 0 || *p) {
- /* Named GPIO line */
- error = aggr_add_gpio(aggr, name, U16_MAX, &n);
- if (error)
- return error;
+ scoped_guard(mutex, &gpio_aggregator_lock)
+ idr_remove(&gpio_aggregator_idr, aggr->id);
- name = offsets;
- continue;
- }
+ mutex_destroy(&aggr->lock);
+ kfree(aggr);
+}
- /* GPIO chip + offset(s) */
- error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS);
- if (error) {
- pr_err("Cannot parse %s: %d\n", offsets, error);
- return error;
- }
+static int gpio_aggregator_add_gpio(struct gpio_aggregator *aggr,
+ const char *key, int hwnum, unsigned int *n)
+{
+ struct gpiod_lookup_table *lookups;
- for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) {
- error = aggr_add_gpio(aggr, name, i, &n);
- if (error)
- return error;
- }
+ lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2),
+ GFP_KERNEL);
+ if (!lookups)
+ return -ENOMEM;
- args = next_arg(args, &name, &p);
- }
+ lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
- if (!n) {
- pr_err("No GPIOs specified\n");
- return -EINVAL;
- }
+ (*n)++;
+ memset(&lookups->table[*n], 0, sizeof(lookups->table[*n]));
+ aggr->lookups = lookups;
return 0;
}
-static ssize_t new_device_store(struct device_driver *driver, const char *buf,
- size_t count)
+static bool gpio_aggregator_is_active(struct gpio_aggregator *aggr)
{
- struct gpio_aggregator *aggr;
- struct platform_device *pdev;
- int res, id;
+ lockdep_assert_held(&aggr->lock);
- if (!try_module_get(THIS_MODULE))
- return -ENOENT;
-
- /* kernfs guarantees string termination, so count + 1 is safe */
- aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL);
- if (!aggr) {
- res = -ENOMEM;
- goto put_module;
- }
-
- memcpy(aggr->args, buf, count + 1);
+ return aggr->probe_data.pdev && platform_get_drvdata(aggr->probe_data.pdev);
+}
- aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
- GFP_KERNEL);
- if (!aggr->lookups) {
- res = -ENOMEM;
- goto free_ga;
- }
+/* Only aggregators created via legacy sysfs can be "activating". */
+static bool gpio_aggregator_is_activating(struct gpio_aggregator *aggr)
+{
+ lockdep_assert_held(&aggr->lock);
- mutex_lock(&gpio_aggregator_lock);
- id = idr_alloc(&gpio_aggregator_idr, aggr, 0, 0, GFP_KERNEL);
- mutex_unlock(&gpio_aggregator_lock);
+ return aggr->probe_data.pdev && !platform_get_drvdata(aggr->probe_data.pdev);
+}
- if (id < 0) {
- res = id;
- goto free_table;
- }
+static size_t gpio_aggregator_count_lines(struct gpio_aggregator *aggr)
+{
+ lockdep_assert_held(&aggr->lock);
- aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, id);
- if (!aggr->lookups->dev_id) {
- res = -ENOMEM;
- goto remove_idr;
- }
+ return list_count_nodes(&aggr->list_head);
+}
- res = aggr_parse(aggr);
- if (res)
- goto free_dev_id;
+static struct gpio_aggregator_line *
+gpio_aggregator_line_alloc(struct gpio_aggregator *parent, unsigned int idx,
+ char *key, int offset)
+{
+ struct gpio_aggregator_line *line;
- gpiod_add_lookup_table(aggr->lookups);
+ line = kzalloc(sizeof(*line), GFP_KERNEL);
+ if (!line)
+ return ERR_PTR(-ENOMEM);
- pdev = platform_device_register_simple(DRV_NAME, id, NULL, 0);
- if (IS_ERR(pdev)) {
- res = PTR_ERR(pdev);
- goto remove_table;
+ if (key) {
+ line->key = kstrdup(key, GFP_KERNEL);
+ if (!line->key) {
+ kfree(line);
+ return ERR_PTR(-ENOMEM);
+ }
}
- aggr->pdev = pdev;
- module_put(THIS_MODULE);
- return count;
+ line->flags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ line->parent = parent;
+ line->idx = idx;
+ line->offset = offset;
+ INIT_LIST_HEAD(&line->entry);
-remove_table:
- gpiod_remove_lookup_table(aggr->lookups);
-free_dev_id:
- kfree(aggr->lookups->dev_id);
-remove_idr:
- mutex_lock(&gpio_aggregator_lock);
- idr_remove(&gpio_aggregator_idr, id);
- mutex_unlock(&gpio_aggregator_lock);
-free_table:
- kfree(aggr->lookups);
-free_ga:
- kfree(aggr);
-put_module:
- module_put(THIS_MODULE);
- return res;
-}
-
-static DRIVER_ATTR_WO(new_device);
-
-static void gpio_aggregator_free(struct gpio_aggregator *aggr)
-{
- platform_device_unregister(aggr->pdev);
- gpiod_remove_lookup_table(aggr->lookups);
- kfree(aggr->lookups->dev_id);
- kfree(aggr->lookups);
- kfree(aggr);
+ return line;
}
-static ssize_t delete_device_store(struct device_driver *driver,
- const char *buf, size_t count)
+static void gpio_aggregator_line_add(struct gpio_aggregator *aggr,
+ struct gpio_aggregator_line *line)
{
- struct gpio_aggregator *aggr;
- unsigned int id;
- int error;
+ struct gpio_aggregator_line *tmp;
- if (!str_has_prefix(buf, DRV_NAME "."))
- return -EINVAL;
+ lockdep_assert_held(&aggr->lock);
- error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id);
- if (error)
- return error;
-
- if (!try_module_get(THIS_MODULE))
- return -ENOENT;
-
- mutex_lock(&gpio_aggregator_lock);
- aggr = idr_remove(&gpio_aggregator_idr, id);
- mutex_unlock(&gpio_aggregator_lock);
- if (!aggr) {
- module_put(THIS_MODULE);
- return -ENOENT;
+ list_for_each_entry(tmp, &aggr->list_head, entry) {
+ if (tmp->idx > line->idx) {
+ list_add_tail(&line->entry, &tmp->entry);
+ return;
+ }
}
-
- gpio_aggregator_free(aggr);
- module_put(THIS_MODULE);
- return count;
+ list_add_tail(&line->entry, &aggr->list_head);
}
-static DRIVER_ATTR_WO(delete_device);
-
-static struct attribute *gpio_aggregator_attrs[] = {
- &driver_attr_new_device.attr,
- &driver_attr_delete_device.attr,
- NULL
-};
-ATTRIBUTE_GROUPS(gpio_aggregator);
-static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data)
+static void gpio_aggregator_line_del(struct gpio_aggregator *aggr,
+ struct gpio_aggregator_line *line)
{
- gpio_aggregator_free(p);
- return 0;
+ lockdep_assert_held(&aggr->lock);
+
+ list_del(&line->entry);
}
-static void __exit gpio_aggregator_remove_all(void)
+static void gpio_aggregator_free_lines(struct gpio_aggregator *aggr)
{
- mutex_lock(&gpio_aggregator_lock);
- idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL);
- idr_destroy(&gpio_aggregator_idr);
- mutex_unlock(&gpio_aggregator_lock);
+ struct gpio_aggregator_line *line, *tmp;
+
+ list_for_each_entry_safe(line, tmp, &aggr->list_head, entry) {
+ configfs_unregister_group(&line->group);
+ /*
+ * Normally, we acquire aggr->lock within the configfs
+ * callback. However, in the legacy sysfs interface case,
+ * calling configfs_(un)register_group while holding
+ * aggr->lock could cause a deadlock. Fortunately, this is
+ * unnecessary because the new_device/delete_device path
+ * and the module unload path are mutually exclusive,
+ * thanks to an explicit try_module_get. That's why this
+ * minimal scoped_guard suffices.
+ */
+ scoped_guard(mutex, &aggr->lock)
+ gpio_aggregator_line_del(aggr, line);
+ kfree(line->key);
+ kfree(line->name);
+ kfree(line);
+ }
}
@@ -582,6 +559,728 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
return fwd;
}
+/*
+ * Configfs interface
+ */
+
+static struct gpio_aggregator *
+to_gpio_aggregator(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ return container_of(group, struct gpio_aggregator, group);
+}
+
+static struct gpio_aggregator_line *
+to_gpio_aggregator_line(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ return container_of(group, struct gpio_aggregator_line, group);
+}
+
+static struct fwnode_handle *
+gpio_aggregator_make_device_sw_node(struct gpio_aggregator *aggr)
+{
+ struct property_entry properties[2];
+ struct gpio_aggregator_line *line;
+ size_t num_lines;
+ int n = 0;
+
+ memset(properties, 0, sizeof(properties));
+
+ num_lines = gpio_aggregator_count_lines(aggr);
+ if (num_lines == 0)
+ return NULL;
+
+ const char **line_names __free(kfree) = kcalloc(
+ num_lines, sizeof(*line_names), GFP_KERNEL);
+ if (!line_names)
+ return ERR_PTR(-ENOMEM);
+
+ /* The list is always sorted as new elements are inserted in order. */
+ list_for_each_entry(line, &aggr->list_head, entry)
+ line_names[n++] = line->name ?: "";
+
+ properties[0] = PROPERTY_ENTRY_STRING_ARRAY_LEN(
+ "gpio-line-names",
+ line_names, num_lines);
+
+ return fwnode_create_software_node(properties, NULL);
+}
+
+static int gpio_aggregator_activate(struct gpio_aggregator *aggr)
+{
+ struct platform_device_info pdevinfo;
+ struct gpio_aggregator_line *line;
+ struct fwnode_handle *swnode;
+ unsigned int n = 0;
+ int ret = 0;
+
+ if (gpio_aggregator_count_lines(aggr) == 0)
+ return -EINVAL;
+
+ aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
+ GFP_KERNEL);
+ if (!aggr->lookups)
+ return -ENOMEM;
+
+ swnode = gpio_aggregator_make_device_sw_node(aggr);
+ if (IS_ERR(swnode)) {
+ ret = PTR_ERR(swnode);
+ goto err_remove_lookups;
+ }
+
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ pdevinfo.name = DRV_NAME;
+ pdevinfo.id = aggr->id;
+ pdevinfo.fwnode = swnode;
+
+ /* The list is always sorted as new elements are inserted in order. */
+ list_for_each_entry(line, &aggr->list_head, entry) {
+ /*
+ * - Either GPIO chip label or line name must be configured
+ * (i.e. line->key must be non-NULL)
+ * - Line directories must be named with sequential numeric
+ * suffixes starting from 0. (i.e. ./line0, ./line1, ...)
+ */
+ if (!line->key || line->idx != n) {
+ ret = -EINVAL;
+ goto err_remove_swnode;
+ }
+
+ if (line->offset < 0)
+ ret = gpio_aggregator_add_gpio(aggr, line->key,
+ U16_MAX, &n);
+ else
+ ret = gpio_aggregator_add_gpio(aggr, line->key,
+ line->offset, &n);
+ if (ret)
+ goto err_remove_swnode;
+ }
+
+ aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id);
+ if (!aggr->lookups->dev_id) {
+ ret = -ENOMEM;
+ goto err_remove_swnode;
+ }
+
+ gpiod_add_lookup_table(aggr->lookups);
+
+ ret = dev_sync_probe_register(&aggr->probe_data, &pdevinfo);
+ if (ret)
+ goto err_remove_lookup_table;
+
+ return 0;
+
+err_remove_lookup_table:
+ kfree(aggr->lookups->dev_id);
+ gpiod_remove_lookup_table(aggr->lookups);
+err_remove_swnode:
+ fwnode_remove_software_node(swnode);
+err_remove_lookups:
+ kfree(aggr->lookups);
+
+ return ret;
+}
+
+static void gpio_aggregator_deactivate(struct gpio_aggregator *aggr)
+{
+ dev_sync_probe_unregister(&aggr->probe_data);
+ gpiod_remove_lookup_table(aggr->lookups);
+ kfree(aggr->lookups->dev_id);
+ kfree(aggr->lookups);
+}
+
+static void gpio_aggregator_lockup_configfs(struct gpio_aggregator *aggr,
+ bool lock)
+{
+ struct configfs_subsystem *subsys = aggr->group.cg_subsys;
+ struct gpio_aggregator_line *line;
+
+ /*
+ * The device only needs to depend on leaf lines. This is
+ * sufficient to lock up all the configfs entries that the
+ * instantiated, alive device depends on.
+ */
+ list_for_each_entry(line, &aggr->list_head, entry) {
+ if (lock)
+ configfs_depend_item_unlocked(
+ subsys, &line->group.cg_item);
+ else
+ configfs_undepend_item_unlocked(
+ &line->group.cg_item);
+ }
+}
+
+static ssize_t
+gpio_aggregator_line_key_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ guard(mutex)(&aggr->lock);
+
+ return sysfs_emit(page, "%s\n", line->key ?: "");
+}
+
+static ssize_t
+gpio_aggregator_line_key_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ char *key __free(kfree) = kstrndup(skip_spaces(page), count,
+ GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+
+ strim(key);
+
+ guard(mutex)(&aggr->lock);
+
+ if (gpio_aggregator_is_activating(aggr) ||
+ gpio_aggregator_is_active(aggr))
+ return -EBUSY;
+
+ kfree(line->key);
+ line->key = no_free_ptr(key);
+
+ return count;
+}
+CONFIGFS_ATTR(gpio_aggregator_line_, key);
+
+static ssize_t
+gpio_aggregator_line_name_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ guard(mutex)(&aggr->lock);
+
+ return sysfs_emit(page, "%s\n", line->name ?: "");
+}
+
+static ssize_t
+gpio_aggregator_line_name_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ char *name __free(kfree) = kstrndup(skip_spaces(page), count,
+ GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ strim(name);
+
+ guard(mutex)(&aggr->lock);
+
+ if (gpio_aggregator_is_activating(aggr) ||
+ gpio_aggregator_is_active(aggr))
+ return -EBUSY;
+
+ kfree(line->name);
+ line->name = no_free_ptr(name);
+
+ return count;
+}
+CONFIGFS_ATTR(gpio_aggregator_line_, name);
+
+static ssize_t
+gpio_aggregator_line_offset_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ guard(mutex)(&aggr->lock);
+
+ return sysfs_emit(page, "%d\n", line->offset);
+}
+
+static ssize_t
+gpio_aggregator_line_offset_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+ int offset, ret;
+
+ ret = kstrtoint(page, 0, &offset);
+ if (ret)
+ return ret;
+
+ /*
+ * When offset == -1, 'key' represents a line name to lookup.
+ * When 0 <= offset < 65535, 'key' represents the label of the chip with
+ * the 'offset' value representing the line within that chip.
+ *
+ * GPIOLIB uses the U16_MAX value to indicate lookup by line name so
+ * the greatest offset we can accept is (U16_MAX - 1).
+ */
+ if (offset > (U16_MAX - 1) || offset < -1)
+ return -EINVAL;
+
+ guard(mutex)(&aggr->lock);
+
+ if (gpio_aggregator_is_activating(aggr) ||
+ gpio_aggregator_is_active(aggr))
+ return -EBUSY;
+
+ line->offset = offset;
+
+ return count;
+}
+CONFIGFS_ATTR(gpio_aggregator_line_, offset);
+
+static struct configfs_attribute *gpio_aggregator_line_attrs[] = {
+ &gpio_aggregator_line_attr_key,
+ &gpio_aggregator_line_attr_name,
+ &gpio_aggregator_line_attr_offset,
+ NULL
+};
+
+static ssize_t
+gpio_aggregator_device_dev_name_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(item);
+ struct platform_device *pdev;
+
+ guard(mutex)(&aggr->lock);
+
+ pdev = aggr->probe_data.pdev;
+ if (pdev)
+ return sysfs_emit(page, "%s\n", dev_name(&pdev->dev));
+
+ return sysfs_emit(page, "%s.%d\n", DRV_NAME, aggr->id);
+}
+CONFIGFS_ATTR_RO(gpio_aggregator_device_, dev_name);
+
+static ssize_t
+gpio_aggregator_device_live_show(struct config_item *item, char *page)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(item);
+
+ guard(mutex)(&aggr->lock);
+
+ return sysfs_emit(page, "%c\n",
+ gpio_aggregator_is_active(aggr) ? '1' : '0');
+}
+
+static ssize_t
+gpio_aggregator_device_live_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(item);
+ int ret = 0;
+ bool live;
+
+ ret = kstrtobool(page, &live);
+ if (ret)
+ return ret;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
+ if (live && !aggr->init_via_sysfs)
+ gpio_aggregator_lockup_configfs(aggr, true);
+
+ scoped_guard(mutex, &aggr->lock) {
+ if (gpio_aggregator_is_activating(aggr) ||
+ (live == gpio_aggregator_is_active(aggr)))
+ ret = -EPERM;
+ else if (live)
+ ret = gpio_aggregator_activate(aggr);
+ else
+ gpio_aggregator_deactivate(aggr);
+ }
+
+ /*
+ * Undepend is required only if device disablement (live == 0)
+ * succeeds or if device enablement (live == 1) fails.
+ */
+ if (live == !!ret && !aggr->init_via_sysfs)
+ gpio_aggregator_lockup_configfs(aggr, false);
+
+ module_put(THIS_MODULE);
+
+ return ret ?: count;
+}
+CONFIGFS_ATTR(gpio_aggregator_device_, live);
+
+static struct configfs_attribute *gpio_aggregator_device_attrs[] = {
+ &gpio_aggregator_device_attr_dev_name,
+ &gpio_aggregator_device_attr_live,
+ NULL
+};
+
+static void
+gpio_aggregator_line_release(struct config_item *item)
+{
+ struct gpio_aggregator_line *line = to_gpio_aggregator_line(item);
+ struct gpio_aggregator *aggr = line->parent;
+
+ guard(mutex)(&aggr->lock);
+
+ gpio_aggregator_line_del(aggr, line);
+ kfree(line->key);
+ kfree(line->name);
+ kfree(line);
+}
+
+static struct configfs_item_operations gpio_aggregator_line_item_ops = {
+ .release = gpio_aggregator_line_release,
+};
+
+static const struct config_item_type gpio_aggregator_line_type = {
+ .ct_item_ops = &gpio_aggregator_line_item_ops,
+ .ct_attrs = gpio_aggregator_line_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void gpio_aggregator_device_release(struct config_item *item)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(item);
+
+ /*
+ * At this point, aggr is neither active nor activating,
+ * so calling gpio_aggregator_deactivate() is always unnecessary.
+ */
+ gpio_aggregator_free(aggr);
+}
+
+static struct configfs_item_operations gpio_aggregator_device_item_ops = {
+ .release = gpio_aggregator_device_release,
+};
+
+static struct config_group *
+gpio_aggregator_device_make_group(struct config_group *group, const char *name)
+{
+ struct gpio_aggregator *aggr = to_gpio_aggregator(&group->cg_item);
+ struct gpio_aggregator_line *line;
+ unsigned int idx;
+ int ret, nchar;
+
+ ret = sscanf(name, "line%u%n", &idx, &nchar);
+ if (ret != 1 || nchar != strlen(name))
+ return ERR_PTR(-EINVAL);
+
+ if (aggr->init_via_sysfs)
+ /*
+ * Aggregators created via legacy sysfs interface are exposed as
+ * default groups, which means rmdir(2) is prohibited for them.
+ * For simplicity, and to avoid confusion, we also prohibit
+ * mkdir(2).
+ */
+ return ERR_PTR(-EPERM);
+
+ guard(mutex)(&aggr->lock);
+
+ if (gpio_aggregator_is_active(aggr))
+ return ERR_PTR(-EBUSY);
+
+ list_for_each_entry(line, &aggr->list_head, entry)
+ if (line->idx == idx)
+ return ERR_PTR(-EINVAL);
+
+ line = gpio_aggregator_line_alloc(aggr, idx, NULL, -1);
+ if (IS_ERR(line))
+ return ERR_CAST(line);
+
+ config_group_init_type_name(&line->group, name, &gpio_aggregator_line_type);
+
+ gpio_aggregator_line_add(aggr, line);
+
+ return &line->group;
+}
+
+static struct configfs_group_operations gpio_aggregator_device_group_ops = {
+ .make_group = gpio_aggregator_device_make_group,
+};
+
+static const struct config_item_type gpio_aggregator_device_type = {
+ .ct_group_ops = &gpio_aggregator_device_group_ops,
+ .ct_item_ops = &gpio_aggregator_device_item_ops,
+ .ct_attrs = gpio_aggregator_device_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *
+gpio_aggregator_make_group(struct config_group *group, const char *name)
+{
+ struct gpio_aggregator *aggr;
+ int ret;
+
+ /*
+ * "_sysfs" prefix is reserved for auto-generated config group
+ * for devices create via legacy sysfs interface.
+ */
+ if (strncmp(name, AGGREGATOR_LEGACY_PREFIX,
+ sizeof(AGGREGATOR_LEGACY_PREFIX) - 1) == 0)
+ return ERR_PTR(-EINVAL);
+
+ /* arg space is unneeded */
+ ret = gpio_aggregator_alloc(&aggr, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type);
+ dev_sync_probe_init(&aggr->probe_data);
+
+ return &aggr->group;
+}
+
+static struct configfs_group_operations gpio_aggregator_group_ops = {
+ .make_group = gpio_aggregator_make_group,
+};
+
+static const struct config_item_type gpio_aggregator_type = {
+ .ct_group_ops = &gpio_aggregator_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem gpio_aggregator_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = DRV_NAME,
+ .ci_type = &gpio_aggregator_type,
+ },
+ },
+};
+
+/*
+ * Sysfs interface
+ */
+static int gpio_aggregator_parse(struct gpio_aggregator *aggr)
+{
+ char *args = skip_spaces(aggr->args);
+ struct gpio_aggregator_line *line;
+ char name[CONFIGFS_ITEM_NAME_LEN];
+ char *key, *offsets, *p;
+ unsigned int i, n = 0;
+ int error = 0;
+
+ unsigned long *bitmap __free(bitmap) =
+ bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL);
+ if (!bitmap)
+ return -ENOMEM;
+
+ args = next_arg(args, &key, &p);
+ while (*args) {
+ args = next_arg(args, &offsets, &p);
+
+ p = get_options(offsets, 0, &error);
+ if (error == 0 || *p) {
+ /* Named GPIO line */
+ scnprintf(name, sizeof(name), "line%u", n);
+ line = gpio_aggregator_line_alloc(aggr, n, key, -1);
+ if (IS_ERR(line)) {
+ error = PTR_ERR(line);
+ goto err;
+ }
+ config_group_init_type_name(&line->group, name,
+ &gpio_aggregator_line_type);
+ error = configfs_register_group(&aggr->group,
+ &line->group);
+ if (error)
+ goto err;
+ scoped_guard(mutex, &aggr->lock)
+ gpio_aggregator_line_add(aggr, line);
+
+ error = gpio_aggregator_add_gpio(aggr, key, U16_MAX, &n);
+ if (error)
+ goto err;
+
+ key = offsets;
+ continue;
+ }
+
+ /* GPIO chip + offset(s) */
+ error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS);
+ if (error) {
+ pr_err("Cannot parse %s: %d\n", offsets, error);
+ goto err;
+ }
+
+ for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) {
+ scnprintf(name, sizeof(name), "line%u", n);
+ line = gpio_aggregator_line_alloc(aggr, n, key, i);
+ if (IS_ERR(line)) {
+ error = PTR_ERR(line);
+ goto err;
+ }
+ config_group_init_type_name(&line->group, name,
+ &gpio_aggregator_line_type);
+ error = configfs_register_group(&aggr->group,
+ &line->group);
+ if (error)
+ goto err;
+ scoped_guard(mutex, &aggr->lock)
+ gpio_aggregator_line_add(aggr, line);
+
+ error = gpio_aggregator_add_gpio(aggr, key, i, &n);
+ if (error)
+ goto err;
+ }
+
+ args = next_arg(args, &key, &p);
+ }
+
+ if (!n) {
+ pr_err("No GPIOs specified\n");
+ error = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ gpio_aggregator_free_lines(aggr);
+ return error;
+}
+
+static ssize_t gpio_aggregator_new_device_store(struct device_driver *driver,
+ const char *buf, size_t count)
+{
+ struct gpio_aggregator_pdev_meta meta = { .init_via_sysfs = true };
+ char name[CONFIGFS_ITEM_NAME_LEN];
+ struct gpio_aggregator *aggr;
+ struct platform_device *pdev;
+ int res;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
+ /* kernfs guarantees string termination, so count + 1 is safe */
+ res = gpio_aggregator_alloc(&aggr, count + 1);
+ if (res)
+ goto put_module;
+
+ memcpy(aggr->args, buf, count + 1);
+
+ aggr->init_via_sysfs = true;
+ aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
+ GFP_KERNEL);
+ if (!aggr->lookups) {
+ res = -ENOMEM;
+ goto free_ga;
+ }
+
+ aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id);
+ if (!aggr->lookups->dev_id) {
+ res = -ENOMEM;
+ goto free_table;
+ }
+
+ scnprintf(name, sizeof(name), "%s.%d", AGGREGATOR_LEGACY_PREFIX, aggr->id);
+ config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type);
+
+ /*
+ * Since the device created by sysfs might be toggled via configfs
+ * 'live' attribute later, this initialization is needed.
+ */
+ dev_sync_probe_init(&aggr->probe_data);
+
+ /* Expose to configfs */
+ res = configfs_register_group(&gpio_aggregator_subsys.su_group,
+ &aggr->group);
+ if (res)
+ goto free_dev_id;
+
+ res = gpio_aggregator_parse(aggr);
+ if (res)
+ goto unregister_group;
+
+ gpiod_add_lookup_table(aggr->lookups);
+
+ pdev = platform_device_register_data(NULL, DRV_NAME, aggr->id, &meta, sizeof(meta));
+ if (IS_ERR(pdev)) {
+ res = PTR_ERR(pdev);
+ goto remove_table;
+ }
+
+ aggr->probe_data.pdev = pdev;
+ module_put(THIS_MODULE);
+ return count;
+
+remove_table:
+ gpiod_remove_lookup_table(aggr->lookups);
+unregister_group:
+ configfs_unregister_group(&aggr->group);
+free_dev_id:
+ kfree(aggr->lookups->dev_id);
+free_table:
+ kfree(aggr->lookups);
+free_ga:
+ gpio_aggregator_free(aggr);
+put_module:
+ module_put(THIS_MODULE);
+ return res;
+}
+
+static struct driver_attribute driver_attr_gpio_aggregator_new_device =
+ __ATTR(new_device, 0200, NULL, gpio_aggregator_new_device_store);
+
+static void gpio_aggregator_destroy(struct gpio_aggregator *aggr)
+{
+ scoped_guard(mutex, &aggr->lock) {
+ if (gpio_aggregator_is_activating(aggr) ||
+ gpio_aggregator_is_active(aggr))
+ gpio_aggregator_deactivate(aggr);
+ }
+ gpio_aggregator_free_lines(aggr);
+ configfs_unregister_group(&aggr->group);
+ kfree(aggr);
+}
+
+static ssize_t gpio_aggregator_delete_device_store(struct device_driver *driver,
+ const char *buf, size_t count)
+{
+ struct gpio_aggregator *aggr;
+ unsigned int id;
+ int error;
+
+ if (!str_has_prefix(buf, DRV_NAME "."))
+ return -EINVAL;
+
+ error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id);
+ if (error)
+ return error;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
+ mutex_lock(&gpio_aggregator_lock);
+ aggr = idr_find(&gpio_aggregator_idr, id);
+ /*
+ * For simplicity, devices created via configfs cannot be deleted
+ * via sysfs.
+ */
+ if (aggr && aggr->init_via_sysfs)
+ idr_remove(&gpio_aggregator_idr, id);
+ else {
+ mutex_unlock(&gpio_aggregator_lock);
+ module_put(THIS_MODULE);
+ return -ENOENT;
+ }
+ mutex_unlock(&gpio_aggregator_lock);
+
+ gpio_aggregator_destroy(aggr);
+ module_put(THIS_MODULE);
+ return count;
+}
+
+static struct driver_attribute driver_attr_gpio_aggregator_delete_device =
+ __ATTR(delete_device, 0200, NULL, gpio_aggregator_delete_device_store);
+
+static struct attribute *gpio_aggregator_attrs[] = {
+ &driver_attr_gpio_aggregator_new_device.attr,
+ &driver_attr_gpio_aggregator_delete_device.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(gpio_aggregator);
/*
* GPIO Aggregator platform device
@@ -589,7 +1288,9 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
static int gpio_aggregator_probe(struct platform_device *pdev)
{
+ struct gpio_aggregator_pdev_meta *meta;
struct device *dev = &pdev->dev;
+ bool init_via_sysfs = false;
struct gpio_desc **descs;
struct gpiochip_fwd *fwd;
unsigned long features;
@@ -603,10 +1304,28 @@ static int gpio_aggregator_probe(struct platform_device *pdev)
if (!descs)
return -ENOMEM;
+ meta = dev_get_platdata(&pdev->dev);
+ if (meta && meta->init_via_sysfs)
+ init_via_sysfs = true;
+
for (i = 0; i < n; i++) {
descs[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
- if (IS_ERR(descs[i]))
+ if (IS_ERR(descs[i])) {
+ /*
+ * Deferred probing is not suitable when the aggregator
+ * is created via configfs. They should just retry later
+ * whenever they like. For device creation via sysfs,
+ * error is propagated without overriding for backward
+ * compatibility. .prevent_deferred_probe is kept unset
+ * for other cases.
+ */
+ if (!init_via_sysfs && !dev_of_node(dev) &&
+ descs[i] == ERR_PTR(-EPROBE_DEFER)) {
+ pr_warn("Deferred probe canceled for creation via configfs.\n");
+ return -ENODEV;
+ }
return PTR_ERR(descs[i]);
+ }
}
features = (uintptr_t)device_get_match_data(dev);
@@ -640,9 +1359,63 @@ static struct platform_driver gpio_aggregator_driver = {
},
};
+static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data)
+{
+ /*
+ * There should be no aggregator created via configfs, as their
+ * presence would prevent module unloading.
+ */
+ gpio_aggregator_destroy(p);
+ return 0;
+}
+
+static void __exit gpio_aggregator_remove_all(void)
+{
+ /*
+ * Configfs callbacks acquire gpio_aggregator_lock when accessing
+ * gpio_aggregator_idr, so to prevent lock inversion deadlock, we
+ * cannot protect idr_for_each invocation here with
+ * gpio_aggregator_lock, as gpio_aggregator_idr_remove() accesses
+ * configfs groups. Fortunately, the new_device/delete_device path
+ * and the module unload path are mutually exclusive, thanks to an
+ * explicit try_module_get inside of those driver attr handlers.
+ * Also, when we reach here, no configfs entries present or being
+ * created. Therefore, no need to protect with gpio_aggregator_lock
+ * below.
+ */
+ idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL);
+ idr_destroy(&gpio_aggregator_idr);
+}
+
static int __init gpio_aggregator_init(void)
{
- return platform_driver_register(&gpio_aggregator_driver);
+ int ret = 0;
+
+ config_group_init(&gpio_aggregator_subsys.su_group);
+ mutex_init(&gpio_aggregator_subsys.su_mutex);
+ ret = configfs_register_subsystem(&gpio_aggregator_subsys);
+ if (ret) {
+ pr_err("Failed to register the '%s' configfs subsystem: %d\n",
+ gpio_aggregator_subsys.su_group.cg_item.ci_namebuf, ret);
+ mutex_destroy(&gpio_aggregator_subsys.su_mutex);
+ return ret;
+ }
+
+ /*
+ * CAVEAT: This must occur after configfs registration. Otherwise,
+ * a race condition could arise: driver attribute groups might be
+ * exposed and accessed by users before configfs registration
+ * completes. new_device_store() does not expect a partially
+ * initialized configfs state.
+ */
+ ret = platform_driver_register(&gpio_aggregator_driver);
+ if (ret) {
+ pr_err("Failed to register the platform driver: %d\n", ret);
+ mutex_destroy(&gpio_aggregator_subsys.su_mutex);
+ configfs_unregister_subsystem(&gpio_aggregator_subsys);
+ }
+
+ return ret;
}
module_init(gpio_aggregator_init);
@@ -650,6 +1423,7 @@ static void __exit gpio_aggregator_exit(void)
{
gpio_aggregator_remove_all();
platform_driver_unregister(&gpio_aggregator_driver);
+ configfs_unregister_subsystem(&gpio_aggregator_subsys);
}
module_exit(gpio_aggregator_exit);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 17c287dc7471..8f22cb36004d 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -516,6 +516,7 @@ static struct irq_chip bcm_gpio_irq_chip = {
.irq_set_type = bcm_kona_gpio_irq_set_type,
.irq_request_resources = bcm_kona_gpio_irq_reqres,
.irq_release_resources = bcm_kona_gpio_irq_relres,
+ .flags = IRQCHIP_IMMUTABLE,
};
static struct of_device_id const bcm_kona_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-blzp1600.c b/drivers/gpio/gpio-blzp1600.c
new file mode 100644
index 000000000000..055cb296ae54
--- /dev/null
+++ b/drivers/gpio/gpio-blzp1600.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 VeriSilicon Limited.
+ * Copyright (C) 2025 Blaize, Inc.
+ */
+
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define GPIO_DIR_REG 0x00
+#define GPIO_CTRL_REG 0x04
+#define GPIO_SET_REG 0x08
+#define GPIO_CLR_REG 0x0C
+#define GPIO_ODATA_REG 0x10
+#define GPIO_IDATA_REG 0x14
+#define GPIO_IEN_REG 0x18
+#define GPIO_IS_REG 0x1C
+#define GPIO_IBE_REG 0x20
+#define GPIO_IEV_REG 0x24
+#define GPIO_RIS_REG 0x28
+#define GPIO_IM_REG 0x2C
+#define GPIO_MIS_REG 0x30
+#define GPIO_IC_REG 0x34
+#define GPIO_DB_REG 0x38
+#define GPIO_DFG_REG 0x3C
+
+#define DRIVER_NAME "blzp1600-gpio"
+
+struct blzp1600_gpio {
+ void __iomem *base;
+ struct gpio_chip gc;
+ int irq;
+};
+
+static inline struct blzp1600_gpio *get_blzp1600_gpio_from_irq_data(struct irq_data *d)
+{
+ return gpiochip_get_data(irq_data_get_irq_chip_data(d));
+}
+
+static inline struct blzp1600_gpio *get_blzp1600_gpio_from_irq_desc(struct irq_desc *d)
+{
+ return gpiochip_get_data(irq_desc_get_handler_data(d));
+}
+
+static inline u32 blzp1600_gpio_read(struct blzp1600_gpio *chip, unsigned int offset)
+{
+ return readl_relaxed(chip->base + offset);
+}
+
+static inline void blzp1600_gpio_write(struct blzp1600_gpio *chip, unsigned int offset, u32 val)
+{
+ writel_relaxed(val, chip->base + offset);
+}
+
+static inline void blzp1600_gpio_rmw(void __iomem *reg, u32 mask, bool set)
+{
+ u32 val = readl_relaxed(reg);
+
+ if (set)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ writel_relaxed(val, reg);
+}
+
+static void blzp1600_gpio_irq_mask(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ blzp1600_gpio_rmw(chip->base + GPIO_IM_REG, BIT(d->hwirq), 1);
+}
+
+static void blzp1600_gpio_irq_unmask(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ blzp1600_gpio_rmw(chip->base + GPIO_IM_REG, BIT(d->hwirq), 0);
+}
+
+static void blzp1600_gpio_irq_ack(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ blzp1600_gpio_write(chip, GPIO_IC_REG, BIT(d->hwirq));
+}
+
+static void blzp1600_gpio_irq_enable(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ gpiochip_enable_irq(&chip->gc, irqd_to_hwirq(d));
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ blzp1600_gpio_rmw(chip->base + GPIO_DIR_REG, BIT(d->hwirq), 0);
+ blzp1600_gpio_rmw(chip->base + GPIO_IEN_REG, BIT(d->hwirq), 1);
+}
+
+static void blzp1600_gpio_irq_disable(struct irq_data *d)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ blzp1600_gpio_rmw(chip->base + GPIO_IEN_REG, BIT(d->hwirq), 0);
+ gpiochip_disable_irq(&chip->gc, irqd_to_hwirq(d));
+}
+
+static int blzp1600_gpio_irq_set_type(struct irq_data *d, u32 type)
+{
+ struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
+ u32 edge_level, single_both, fall_rise;
+ int mask = BIT(d->hwirq);
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ edge_level = blzp1600_gpio_read(chip, GPIO_IS_REG);
+ single_both = blzp1600_gpio_read(chip, GPIO_IBE_REG);
+ fall_rise = blzp1600_gpio_read(chip, GPIO_IEV_REG);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ edge_level &= ~mask;
+ single_both |= mask;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ edge_level &= ~mask;
+ single_both &= ~mask;
+ fall_rise |= mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ edge_level &= ~mask;
+ single_both &= ~mask;
+ fall_rise &= ~mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ edge_level |= mask;
+ fall_rise |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ edge_level |= mask;
+ fall_rise &= ~mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ blzp1600_gpio_write(chip, GPIO_IS_REG, edge_level);
+ blzp1600_gpio_write(chip, GPIO_IBE_REG, single_both);
+ blzp1600_gpio_write(chip, GPIO_IEV_REG, fall_rise);
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(d, handle_level_irq);
+ else
+ irq_set_handler_locked(d, handle_edge_irq);
+
+ return 0;
+}
+
+static const struct irq_chip blzp1600_gpio_irqchip = {
+ .name = DRIVER_NAME,
+ .irq_ack = blzp1600_gpio_irq_ack,
+ .irq_mask = blzp1600_gpio_irq_mask,
+ .irq_unmask = blzp1600_gpio_irq_unmask,
+ .irq_set_type = blzp1600_gpio_irq_set_type,
+ .irq_enable = blzp1600_gpio_irq_enable,
+ .irq_disable = blzp1600_gpio_irq_disable,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_MASK_ON_SUSPEND,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void blzp1600_gpio_irqhandler(struct irq_desc *desc)
+{
+ struct blzp1600_gpio *gpio = get_blzp1600_gpio_from_irq_desc(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ unsigned long irq_status;
+ int hwirq = 0;
+
+ chained_irq_enter(irqchip, desc);
+ irq_status = blzp1600_gpio_read(gpio, GPIO_RIS_REG);
+ for_each_set_bit(hwirq, &irq_status, gpio->gc.ngpio)
+ generic_handle_domain_irq(gpio->gc.irq.domain, hwirq);
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static int blzp1600_gpio_set_debounce(struct gpio_chip *gc, unsigned int offset,
+ unsigned int debounce)
+{
+ struct blzp1600_gpio *chip = gpiochip_get_data(gc);
+
+ guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ blzp1600_gpio_rmw(chip->base + GPIO_DB_REG, BIT(offset), debounce);
+
+ return 0;
+}
+
+static int blzp1600_gpio_set_config(struct gpio_chip *gc, unsigned int offset, unsigned long config)
+{
+ u32 debounce;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+ return blzp1600_gpio_set_debounce(gc, offset, debounce);
+}
+
+static int blzp1600_gpio_probe(struct platform_device *pdev)
+{
+ struct blzp1600_gpio *chip;
+ struct gpio_chip *gc;
+ int ret;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(chip->base))
+ return PTR_ERR(chip->base);
+
+ ret = bgpio_init(&chip->gc, &pdev->dev, 4, chip->base + GPIO_IDATA_REG,
+ chip->base + GPIO_SET_REG, chip->base + GPIO_CLR_REG,
+ chip->base + GPIO_DIR_REG, NULL, 0);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register generic gpio\n");
+
+ /* configure the gpio chip */
+ gc = &chip->gc;
+ gc->set_config = blzp1600_gpio_set_config;
+
+ if (device_property_present(&pdev->dev, "interrupt-controller")) {
+ struct gpio_irq_chip *girq;
+
+ chip->irq = platform_get_irq(pdev, 0);
+ if (chip->irq < 0)
+ return chip->irq;
+
+ girq = &gc->irq;
+ gpio_irq_chip_set_chip(girq, &blzp1600_gpio_irqchip);
+ girq->parent_handler = blzp1600_gpio_irqhandler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+
+ girq->parents[0] = chip->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ }
+
+ return devm_gpiochip_add_data(&pdev->dev, gc, chip);
+}
+
+static const struct of_device_id blzp1600_gpio_of_match[] = {
+ { .compatible = "blaize,blzp1600-gpio", },
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, blzp1600_gpio_of_match);
+
+static struct platform_driver blzp1600_gpio_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = blzp1600_gpio_of_match,
+ },
+ .probe = blzp1600_gpio_probe,
+};
+
+module_platform_driver(blzp1600_gpio_driver);
+
+MODULE_AUTHOR("Nikolaos Pasaloukos <nikolaos.pasaloukos@blaize.com>");
+MODULE_DESCRIPTION("Blaize BLZP1600 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index ca3472977431..e7671bcd5c07 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -437,7 +437,7 @@ static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
int err;
priv->irq_domain =
- irq_domain_add_linear(np, priv->num_gpios,
+ irq_domain_create_linear(of_fwnode_handle(np), priv->num_gpios,
&brcmstb_gpio_irq_domain_ops,
priv);
if (!priv->irq_domain) {
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 63fc7888c1d4..80a82492171e 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -68,15 +68,6 @@ static inline u32 __gpio_mask(unsigned gpio)
return 1 << (gpio % 32);
}
-static inline struct davinci_gpio_regs __iomem *irq2regs(struct irq_data *d)
-{
- struct davinci_gpio_regs __iomem *g;
-
- g = (__force struct davinci_gpio_regs __iomem *)irq_data_get_irq_chip_data(d);
-
- return g;
-}
-
static int davinci_gpio_irq_setup(struct platform_device *pdev);
/*--------------------------------------------------------------------------*/
@@ -255,19 +246,27 @@ static int davinci_gpio_probe(struct platform_device *pdev)
static void gpio_irq_mask(struct irq_data *d)
{
- struct davinci_gpio_regs __iomem *g = irq2regs(d);
+ struct davinci_gpio_controller *chips = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct davinci_gpio_regs __iomem *g = chips->regs[hwirq / 32];
uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d);
writel_relaxed(mask, &g->clr_falling);
writel_relaxed(mask, &g->clr_rising);
+
+ gpiochip_disable_irq(&chips->chip, hwirq);
}
static void gpio_irq_unmask(struct irq_data *d)
{
- struct davinci_gpio_regs __iomem *g = irq2regs(d);
+ struct davinci_gpio_controller *chips = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct davinci_gpio_regs __iomem *g = chips->regs[hwirq / 32];
uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d);
unsigned status = irqd_get_trigger_type(d);
+ gpiochip_enable_irq(&chips->chip, hwirq);
+
status &= IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
if (!status)
status = IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
@@ -286,12 +285,13 @@ static int gpio_irq_type(struct irq_data *d, unsigned trigger)
return 0;
}
-static struct irq_chip gpio_irqchip = {
+static const struct irq_chip gpio_irqchip = {
.name = "GPIO",
.irq_unmask = gpio_irq_unmask,
.irq_mask = gpio_irq_mask,
.irq_set_type = gpio_irq_type,
- .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static void gpio_irq_handler(struct irq_desc *desc)
@@ -399,12 +399,11 @@ davinci_gpio_irq_map(struct irq_domain *d, unsigned int irq,
{
struct davinci_gpio_controller *chips =
(struct davinci_gpio_controller *)d->host_data;
- struct davinci_gpio_regs __iomem *g = chips->regs[hw / 32];
irq_set_chip_and_handler_name(irq, &gpio_irqchip, handle_simple_irq,
"davinci_gpio");
irq_set_irq_type(irq, IRQ_TYPE_NONE);
- irq_set_chip_data(irq, (__force void *)g);
+ irq_set_chip_data(irq, (__force void *)chips);
irq_set_handler_data(irq, (void *)(uintptr_t)__gpio_mask(hw));
return 0;
@@ -479,9 +478,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
return irq;
}
- irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0,
- &davinci_gpio_irq_ops,
- chips);
+ irq_domain = irq_domain_create_legacy(of_fwnode_handle(dev->of_node), ngpio, irq, 0,
+ &davinci_gpio_irq_ops, chips);
if (!irq_domain) {
dev_err(dev, "Couldn't register an IRQ domain\n");
return -ENODEV;
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 596da59d4b13..4bd3c47eaf93 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -220,11 +220,12 @@ static int dln2_gpio_get(struct gpio_chip *chip, unsigned int offset)
return dln2_gpio_pin_get_out_val(dln2, offset);
}
-static void dln2_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int dln2_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct dln2_gpio *dln2 = gpiochip_get_data(chip);
- dln2_gpio_pin_set_out_val(dln2, offset, value);
+ return dln2_gpio_pin_set_out_val(dln2, offset, value);
}
static int dln2_gpio_set_direction(struct gpio_chip *chip, unsigned offset,
@@ -468,7 +469,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
dln2->gpio.base = -1;
dln2->gpio.ngpio = pins;
dln2->gpio.can_sleep = true;
- dln2->gpio.set = dln2_gpio_set;
+ dln2->gpio.set_rv = dln2_gpio_set;
dln2->gpio.get = dln2_gpio_get;
dln2->gpio.request = dln2_gpio_request;
dln2->gpio.free = dln2_gpio_free;
diff --git a/drivers/gpio/gpio-ds4520.c b/drivers/gpio/gpio-ds4520.c
index 1903deaef3e9..f52ecae382a4 100644
--- a/drivers/gpio/gpio-ds4520.c
+++ b/drivers/gpio/gpio-ds4520.c
@@ -25,7 +25,6 @@ static int ds4520_gpio_probe(struct i2c_client *client)
struct gpio_regmap_config config = { };
struct device *dev = &client->dev;
struct regmap *regmap;
- u32 ngpio;
u32 base;
int ret;
@@ -33,10 +32,6 @@ static int ds4520_gpio_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "Missing 'reg' property.\n");
- ret = device_property_read_u32(dev, "ngpios", &ngpio);
- if (ret)
- return dev_err_probe(dev, ret, "Missing 'ngpios' property.\n");
-
regmap = devm_regmap_init_i2c(client, &ds4520_regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap),
@@ -44,7 +39,6 @@ static int ds4520_gpio_probe(struct i2c_client *client)
config.regmap = regmap;
config.parent = dev;
- config.ngpio = ngpio;
config.reg_dat_base = base + DS4520_IO_STATUS0;
config.reg_set_base = base + DS4520_PULLUP0;
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index d4bf8d187e16..f2973d0b7138 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -203,9 +203,10 @@ static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
return 0;
}
-static void sprd_eic_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int sprd_eic_set(struct gpio_chip *chip, unsigned int offset, int value)
{
/* EICs are always input, nothing need to do here. */
+ return 0;
}
static int sprd_eic_set_debounce(struct gpio_chip *chip, unsigned int offset,
@@ -662,7 +663,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
sprd_eic->chip.request = sprd_eic_request;
sprd_eic->chip.free = sprd_eic_free;
sprd_eic->chip.set_config = sprd_eic_set_config;
- sprd_eic->chip.set = sprd_eic_set;
+ sprd_eic->chip.set_rv = sprd_eic_set;
fallthrough;
case SPRD_EIC_ASYNC:
case SPRD_EIC_SYNC:
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 6c862c572322..a5e6e446f39c 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -204,13 +204,15 @@ static void __em_gio_set(struct gpio_chip *chip, unsigned int reg,
(BIT(shift + 16)) | (value << shift));
}
-static void em_gio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int em_gio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
/* output is split into two registers */
if (offset < 16)
__em_gio_set(chip, GIO_OL, offset, value);
else
__em_gio_set(chip, GIO_OH, offset - 16, value);
+
+ return 0;
}
static int em_gio_direction_output(struct gpio_chip *chip, unsigned offset,
@@ -304,7 +306,7 @@ static int em_gio_probe(struct platform_device *pdev)
gpio_chip->direction_input = em_gio_direction_input;
gpio_chip->get = em_gio_get;
gpio_chip->direction_output = em_gio_direction_output;
- gpio_chip->set = em_gio_set;
+ gpio_chip->set_rv = em_gio_set;
gpio_chip->to_irq = em_gio_to_irq;
gpio_chip->request = pinctrl_gpio_request;
gpio_chip->free = em_gio_free;
@@ -323,8 +325,9 @@ static int em_gio_probe(struct platform_device *pdev)
irq_chip->irq_release_resources = em_gio_irq_relres;
irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
- p->irq_domain = irq_domain_add_simple(dev->of_node, ngpios, 0,
- &em_gio_irq_domain_ops, p);
+ p->irq_domain = irq_domain_create_simple(of_fwnode_handle(dev->of_node),
+ ngpios, 0,
+ &em_gio_irq_domain_ops, p);
if (!p->irq_domain) {
dev_err(dev, "cannot initialize irq domain\n");
return -ENXIO;
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index d5909a4f0433..beb98286d13e 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -93,8 +93,8 @@ static int exar_get_value(struct gpio_chip *chip, unsigned int offset)
return !!(regmap_test_bits(exar_gpio->regmap, addr, BIT(bit)));
}
-static void exar_set_value(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int exar_set_value(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
unsigned int addr = exar_offset_to_lvl_addr(exar_gpio, offset);
@@ -105,7 +105,7 @@ static void exar_set_value(struct gpio_chip *chip, unsigned int offset,
* regmap_write_bits() forces value to be written when an external
* pull up/down might otherwise indicate value was already set.
*/
- regmap_write_bits(exar_gpio->regmap, addr, BIT(bit), bit_value);
+ return regmap_write_bits(exar_gpio->regmap, addr, BIT(bit), bit_value);
}
static int exar_direction_output(struct gpio_chip *chip, unsigned int offset,
@@ -114,11 +114,13 @@ static int exar_direction_output(struct gpio_chip *chip, unsigned int offset,
struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
unsigned int addr = exar_offset_to_sel_addr(exar_gpio, offset);
unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
+ int ret;
- exar_set_value(chip, offset, value);
- regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit));
+ ret = exar_set_value(chip, offset, value);
+ if (ret)
+ return ret;
- return 0;
+ return regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit));
}
static int exar_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -209,7 +211,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
exar_gpio->gpio_chip.direction_input = exar_direction_input;
exar_gpio->gpio_chip.get_direction = exar_get_direction;
exar_gpio->gpio_chip.get = exar_get_value;
- exar_gpio->gpio_chip.set = exar_set_value;
+ exar_gpio->gpio_chip.set_rv = exar_set_value;
exar_gpio->gpio_chip.base = -1;
exar_gpio->gpio_chip.ngpio = ngpios;
exar_gpio->index = index;
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index 3875fd940ccb..dfcd3634f279 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -159,7 +159,8 @@ static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset);
static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset);
static int f7188x_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value);
-static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value);
+static int f7188x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value);
static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
unsigned long config);
@@ -172,7 +173,7 @@ static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
.direction_input = f7188x_gpio_direction_in, \
.get = f7188x_gpio_get, \
.direction_output = f7188x_gpio_direction_out, \
- .set = f7188x_gpio_set, \
+ .set_rv = f7188x_gpio_set, \
.set_config = f7188x_gpio_set_config, \
.base = -1, \
.ngpio = _ngpio, \
@@ -391,7 +392,8 @@ static int f7188x_gpio_direction_out(struct gpio_chip *chip,
return 0;
}
-static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int f7188x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
int err;
struct f7188x_gpio_bank *bank = gpiochip_get_data(chip);
@@ -400,7 +402,8 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
err = superio_enter(sio->addr);
if (err)
- return;
+ return err;
+
superio_select(sio->addr, sio->device);
data_out = superio_inb(sio->addr, f7188x_gpio_data_out(bank->regbase));
@@ -411,6 +414,8 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
superio_outb(sio->addr, f7188x_gpio_data_out(bank->regbase), data_out);
superio_exit(sio->addr);
+
+ return 0;
}
static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
diff --git a/drivers/gpio/gpio-graniterapids.c b/drivers/gpio/gpio-graniterapids.c
index ad6a045fd3d2..f25283e5239d 100644
--- a/drivers/gpio/gpio-graniterapids.c
+++ b/drivers/gpio/gpio-graniterapids.c
@@ -116,7 +116,7 @@ static int gnr_gpio_get(struct gpio_chip *gc, unsigned int gpio)
return !!(dw & GNR_CFG_DW_RXSTATE);
}
-static void gnr_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+static int gnr_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
u32 clear = 0;
u32 set = 0;
@@ -126,7 +126,7 @@ static void gnr_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
else
clear = GNR_CFG_DW_TXSTATE;
- gnr_gpio_configure_line(gc, gpio, clear, set);
+ return gnr_gpio_configure_line(gc, gpio, clear, set);
}
static int gnr_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
@@ -159,7 +159,7 @@ static const struct gpio_chip gnr_gpio_chip = {
.owner = THIS_MODULE,
.request = gnr_gpio_request,
.get = gnr_gpio_get,
- .set = gnr_gpio_set,
+ .set_rv = gnr_gpio_set,
.get_direction = gnr_gpio_get_direction,
.direction_input = gnr_gpio_direction_input,
.direction_output = gnr_gpio_direction_output,
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 30a0522ae735..d38a2d9854ca 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -170,6 +170,8 @@ static void grgpio_irq_mask(struct irq_data *d)
grgpio_set_imask(priv, offset, 0);
raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+
+ gpiochip_disable_irq(&priv->gc, d->hwirq);
}
static void grgpio_irq_unmask(struct irq_data *d)
@@ -178,6 +180,7 @@ static void grgpio_irq_unmask(struct irq_data *d)
int offset = d->hwirq;
unsigned long flags;
+ gpiochip_enable_irq(&priv->gc, d->hwirq);
raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
grgpio_set_imask(priv, offset, 1);
@@ -185,11 +188,13 @@ static void grgpio_irq_unmask(struct irq_data *d)
raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
}
-static struct irq_chip grgpio_irq_chip = {
+static const struct irq_chip grgpio_irq_chip = {
.name = "grgpio",
.irq_mask = grgpio_irq_mask,
.irq_unmask = grgpio_irq_unmask,
.irq_set_type = grgpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t grgpio_irq_handler(int irq, void *dev)
@@ -397,7 +402,7 @@ static int grgpio_probe(struct platform_device *ofdev)
return -EINVAL;
}
- priv->domain = irq_domain_add_linear(np, gc->ngpio,
+ priv->domain = irq_domain_create_linear(of_fwnode_handle(np), gc->ngpio,
&grgpio_irq_domain_ops,
priv);
if (!priv->domain) {
diff --git a/drivers/gpio/gpio-gw-pld.c b/drivers/gpio/gpio-gw-pld.c
index 7e29a2d8de1a..a40ba99a3aea 100644
--- a/drivers/gpio/gpio-gw-pld.c
+++ b/drivers/gpio/gpio-gw-pld.c
@@ -62,9 +62,9 @@ static int gw_pld_output8(struct gpio_chip *gc, unsigned offset, int value)
return i2c_smbus_write_byte(gw->client, gw->out);
}
-static void gw_pld_set8(struct gpio_chip *gc, unsigned offset, int value)
+static int gw_pld_set8(struct gpio_chip *gc, unsigned int offset, int value)
{
- gw_pld_output8(gc, offset, value);
+ return gw_pld_output8(gc, offset, value);
}
static int gw_pld_probe(struct i2c_client *client)
@@ -86,7 +86,7 @@ static int gw_pld_probe(struct i2c_client *client)
gw->chip.direction_input = gw_pld_input8;
gw->chip.get = gw_pld_get8;
gw->chip.direction_output = gw_pld_output8;
- gw->chip.set = gw_pld_set8;
+ gw->chip.set_rv = gw_pld_set8;
gw->client = client;
/*
diff --git a/drivers/gpio/gpio-htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c
index a40bd56673fe..b1844a676c7c 100644
--- a/drivers/gpio/gpio-htc-egpio.c
+++ b/drivers/gpio/gpio-htc-egpio.c
@@ -170,7 +170,7 @@ static int egpio_direction_input(struct gpio_chip *chip, unsigned offset)
* Output pins
*/
-static void egpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int egpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
unsigned long flag;
struct egpio_chip *egpio;
@@ -198,6 +198,8 @@ static void egpio_set(struct gpio_chip *chip, unsigned offset, int value)
egpio->cached_values &= ~(1 << offset);
egpio_writew((egpio->cached_values >> shift) & ei->reg_mask, ei, reg);
spin_unlock_irqrestore(&ei->lock, flag);
+
+ return 0;
}
static int egpio_direction_output(struct gpio_chip *chip,
@@ -206,12 +208,10 @@ static int egpio_direction_output(struct gpio_chip *chip,
struct egpio_chip *egpio;
egpio = gpiochip_get_data(chip);
- if (test_bit(offset, &egpio->is_out)) {
- egpio_set(chip, offset, value);
- return 0;
- } else {
- return -EINVAL;
- }
+ if (test_bit(offset, &egpio->is_out))
+ return egpio_set(chip, offset, value);
+
+ return -EINVAL;
}
static int egpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -324,7 +324,7 @@ static int __init egpio_probe(struct platform_device *pdev)
chip->parent = &pdev->dev;
chip->owner = THIS_MODULE;
chip->get = egpio_get;
- chip->set = egpio_set;
+ chip->set_rv = egpio_set;
chip->direction_input = egpio_direction_input;
chip->direction_output = egpio_direction_output;
chip->get_direction = egpio_get_direction;
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 0be9285efebc..67089b2423d8 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -175,12 +175,16 @@ static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned int nr)
static int ichx_gpio_direction_output(struct gpio_chip *gpio, unsigned int nr,
int val)
{
+ int ret;
+
/* Disable blink hardware which is available for GPIOs from 0 to 31. */
if (nr < 32 && ichx_priv.desc->have_blink)
ichx_write_bit(GPO_BLINK, nr, 0, 0);
/* Set GPIO output value. */
- ichx_write_bit(GPIO_LVL, nr, val, 0);
+ ret = ichx_write_bit(GPIO_LVL, nr, val, 0);
+ if (ret)
+ return ret;
/*
* Try setting pin as an output and verify it worked since many pins
@@ -252,9 +256,9 @@ static int ich6_gpio_request(struct gpio_chip *chip, unsigned int nr)
return ichx_gpio_request(chip, nr);
}
-static void ichx_gpio_set(struct gpio_chip *chip, unsigned int nr, int val)
+static int ichx_gpio_set(struct gpio_chip *chip, unsigned int nr, int val)
{
- ichx_write_bit(GPIO_LVL, nr, val, 0);
+ return ichx_write_bit(GPIO_LVL, nr, val, 0);
}
static void ichx_gpiolib_setup(struct gpio_chip *chip)
@@ -269,7 +273,7 @@ static void ichx_gpiolib_setup(struct gpio_chip *chip)
chip->get = ichx_priv.desc->get ?
ichx_priv.desc->get : ichx_gpio_get;
- chip->set = ichx_gpio_set;
+ chip->set_rv = ichx_gpio_set;
chip->get_direction = ichx_gpio_get_direction;
chip->direction_input = ichx_gpio_direction_input;
chip->direction_output = ichx_gpio_direction_output;
diff --git a/drivers/gpio/gpio-idt3243x.c b/drivers/gpio/gpio-idt3243x.c
index 00f547d26254..535f25514455 100644
--- a/drivers/gpio/gpio-idt3243x.c
+++ b/drivers/gpio/gpio-idt3243x.c
@@ -37,7 +37,7 @@ static void idt_gpio_dispatch(struct irq_desc *desc)
pending = readl(ctrl->pic + IDT_PIC_IRQ_PEND);
pending &= ~ctrl->mask_cache;
for_each_set_bit(bit, &pending, gc->ngpio) {
- virq = irq_linear_revmap(gc->irq.domain, bit);
+ virq = irq_find_mapping(gc->irq.domain, bit);
if (virq)
generic_handle_irq(virq);
}
diff --git a/drivers/gpio/gpio-imx-scu.c b/drivers/gpio/gpio-imx-scu.c
index 13baf465aedf..1693dbf1b777 100644
--- a/drivers/gpio/gpio-imx-scu.c
+++ b/drivers/gpio/gpio-imx-scu.c
@@ -6,8 +6,10 @@
* to control the PIN resources on SCU domain.
*/
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/firmware/imx/svc/rm.h>
@@ -37,16 +39,11 @@ static int imx_scu_gpio_get(struct gpio_chip *chip, unsigned int offset)
int level;
int err;
- if (offset >= chip->ngpio)
- return -EINVAL;
-
- mutex_lock(&priv->lock);
-
- /* to read PIN state via scu api */
- err = imx_sc_misc_get_control(priv->handle,
- scu_rsrc_arr[offset], 0, &level);
- mutex_unlock(&priv->lock);
-
+ scoped_guard(mutex, &priv->lock) {
+ /* to read PIN state via scu api */
+ err = imx_sc_misc_get_control(priv->handle,
+ scu_rsrc_arr[offset], 0, &level);
+ }
if (err) {
dev_err(priv->dev, "SCU get failed: %d\n", err);
return err;
@@ -55,31 +52,26 @@ static int imx_scu_gpio_get(struct gpio_chip *chip, unsigned int offset)
return level;
}
-static void imx_scu_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int imx_scu_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct scu_gpio_priv *priv = gpiochip_get_data(chip);
int err;
- if (offset >= chip->ngpio)
- return;
-
- mutex_lock(&priv->lock);
-
- /* to set PIN output level via scu api */
- err = imx_sc_misc_set_control(priv->handle,
- scu_rsrc_arr[offset], 0, value);
- mutex_unlock(&priv->lock);
-
+ scoped_guard(mutex, &priv->lock) {
+ /* to set PIN output level via scu api */
+ err = imx_sc_misc_set_control(priv->handle,
+ scu_rsrc_arr[offset], 0, value);
+ }
if (err)
dev_err(priv->dev, "SCU set (%d) failed: %d\n",
scu_rsrc_arr[offset], err);
+
+ return err;
}
static int imx_scu_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
- if (offset >= chip->ngpio)
- return -EINVAL;
-
return GPIO_LINE_DIRECTION_OUT;
}
@@ -99,7 +91,10 @@ static int imx_scu_gpio_probe(struct platform_device *pdev)
return ret;
priv->dev = dev;
- mutex_init(&priv->lock);
+
+ ret = devm_mutex_init(&pdev->dev, &priv->lock);
+ if (ret)
+ return ret;
gc = &priv->chip;
gc->base = -1;
@@ -107,7 +102,7 @@ static int imx_scu_gpio_probe(struct platform_device *pdev)
gc->ngpio = ARRAY_SIZE(scu_rsrc_arr);
gc->label = dev_name(dev);
gc->get = imx_scu_gpio_get;
- gc->set = imx_scu_gpio_set;
+ gc->set_rv = imx_scu_gpio_set;
gc->get_direction = imx_scu_gpio_get_direction;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index f332341fd4c8..d8184b527bac 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -213,8 +213,7 @@ exit:
return rc;
}
-static void it87_gpio_set(struct gpio_chip *chip,
- unsigned gpio_num, int val)
+static int it87_gpio_set(struct gpio_chip *chip, unsigned int gpio_num, int val)
{
u8 mask, curr_vals;
u16 reg;
@@ -228,6 +227,8 @@ static void it87_gpio_set(struct gpio_chip *chip,
outb(curr_vals | mask, reg);
else
outb(curr_vals & ~mask, reg);
+
+ return 0;
}
static int it87_gpio_direction_out(struct gpio_chip *chip,
@@ -249,7 +250,9 @@ static int it87_gpio_direction_out(struct gpio_chip *chip,
/* set the output enable bit */
superio_set_mask(mask, group + it87_gpio->output_base);
- it87_gpio_set(chip, gpio_num, val);
+ rc = it87_gpio_set(chip, gpio_num, val);
+ if (rc)
+ goto exit;
superio_exit();
@@ -264,7 +267,7 @@ static const struct gpio_chip it87_template_chip = {
.request = it87_gpio_request,
.get = it87_gpio_get,
.direction_input = it87_gpio_direction_in,
- .set = it87_gpio_set,
+ .set_rv = it87_gpio_set,
.direction_output = it87_gpio_direction_out,
.base = -1
};
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index cdf50e4ea165..9f548eda3888 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -76,7 +76,7 @@ static int ttl_get_value(struct gpio_chip *gpio, unsigned offset)
return !!ret;
}
-static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value)
+static int ttl_set_value(struct gpio_chip *gpio, unsigned int offset, int value)
{
struct ttl_module *mod = dev_get_drvdata(gpio->parent);
void __iomem *port;
@@ -103,6 +103,8 @@ static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value)
iowrite16be(*shadow, port);
spin_unlock(&mod->lock);
+
+ return 0;
}
static void ttl_write_reg(struct ttl_module *mod, u8 reg, u16 val)
@@ -169,7 +171,7 @@ static int ttl_probe(struct platform_device *pdev)
gpio->parent = &pdev->dev;
gpio->label = pdev->name;
gpio->get = ttl_get_value;
- gpio->set = ttl_set_value;
+ gpio->set_rv = ttl_set_value;
gpio->owner = THIS_MODULE;
/* request dynamic allocation */
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index 4ea15f08e0f4..e38e604baa22 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -63,7 +63,8 @@ static int kempld_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!kempld_gpio_get_bit(pld, KEMPLD_GPIO_LVL_NUM(offset), offset);
}
-static void kempld_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int kempld_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct kempld_gpio_data *gpio = gpiochip_get_data(chip);
struct kempld_device_data *pld = gpio->pld;
@@ -71,6 +72,8 @@ static void kempld_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
kempld_get_mutex(pld);
kempld_gpio_bitop(pld, KEMPLD_GPIO_LVL_NUM(offset), offset, value);
kempld_release_mutex(pld);
+
+ return 0;
}
static int kempld_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -166,7 +169,7 @@ static int kempld_gpio_probe(struct platform_device *pdev)
chip->direction_output = kempld_gpio_direction_output;
chip->get_direction = kempld_gpio_get_direction;
chip->get = kempld_gpio_get;
- chip->set = kempld_gpio_set;
+ chip->set_rv = kempld_gpio_set;
chip->ngpio = kempld_gpio_pincount(pld);
if (chip->ngpio == 0) {
dev_err(dev, "No GPIO pins detected\n");
diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
index 817ecb12d550..61524a9ba765 100644
--- a/drivers/gpio/gpio-ljca.c
+++ b/drivers/gpio/gpio-ljca.c
@@ -144,8 +144,8 @@ static int ljca_gpio_get_value(struct gpio_chip *chip, unsigned int offset)
return ljca_gpio_read(ljca_gpio, offset);
}
-static void ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
int ret;
@@ -155,6 +155,8 @@ static void ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
dev_err(chip->parent,
"set value failed offset: %u val: %d ret: %d\n",
offset, val, ret);
+
+ return ret;
}
static int ljca_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -183,7 +185,10 @@ static int ljca_gpio_direction_output(struct gpio_chip *chip,
if (ret)
return ret;
- ljca_gpio_set_value(chip, offset, val);
+ ret = ljca_gpio_set_value(chip, offset, val);
+ if (ret)
+ return ret;
+
set_bit(offset, ljca_gpio->output_enabled);
return 0;
@@ -432,7 +437,7 @@ static int ljca_gpio_probe(struct auxiliary_device *auxdev,
ljca_gpio->gc.direction_output = ljca_gpio_direction_output;
ljca_gpio->gc.get_direction = ljca_gpio_get_direction;
ljca_gpio->gc.get = ljca_gpio_get_value;
- ljca_gpio->gc.set = ljca_gpio_set_value;
+ ljca_gpio->gc.set_rv = ljca_gpio_set_value;
ljca_gpio->gc.set_config = ljca_gpio_set_config;
ljca_gpio->gc.init_valid_mask = ljca_gpio_init_valid_mask;
ljca_gpio->gc.can_sleep = true;
diff --git a/drivers/gpio/gpio-logicvc.c b/drivers/gpio/gpio-logicvc.c
index 05d62011f335..19cd2847467c 100644
--- a/drivers/gpio/gpio-logicvc.c
+++ b/drivers/gpio/gpio-logicvc.c
@@ -61,23 +61,22 @@ static int logicvc_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(value & bit);
}
-static void logicvc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int logicvc_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct logicvc_gpio *logicvc = gpiochip_get_data(chip);
unsigned int reg, bit;
logicvc_gpio_offset(logicvc, offset, &reg, &bit);
- regmap_update_bits(logicvc->regmap, reg, bit, value ? bit : 0);
+ return regmap_update_bits(logicvc->regmap, reg, bit, value ? bit : 0);
}
static int logicvc_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
/* Pins are always configured as output, so just set the value. */
- logicvc_gpio_set(chip, offset, value);
-
- return 0;
+ return logicvc_gpio_set(chip, offset, value);
}
static struct regmap_config logicvc_gpio_regmap_config = {
@@ -135,7 +134,7 @@ static int logicvc_gpio_probe(struct platform_device *pdev)
logicvc->chip.ngpio = LOGICVC_CTRL_GPIO_BITS +
LOGICVC_POWER_CTRL_GPIO_BITS;
logicvc->chip.get = logicvc_gpio_get;
- logicvc->chip.set = logicvc_gpio_set;
+ logicvc->chip.set_rv = logicvc_gpio_set;
logicvc->chip.direction_output = logicvc_gpio_direction_output;
return devm_gpiochip_add_data(dev, &logicvc->chip, logicvc);
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
index a9a93036f08f..70a01c5b8ad1 100644
--- a/drivers/gpio/gpio-loongson-64bit.c
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -105,7 +105,7 @@ static int loongson_gpio_get_direction(struct gpio_chip *chip, unsigned int pin)
return GPIO_LINE_DIRECTION_OUT;
}
-static void loongson_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
+static int loongson_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
unsigned long flags;
struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
@@ -113,6 +113,8 @@ static void loongson_gpio_set(struct gpio_chip *chip, unsigned int pin, int valu
spin_lock_irqsave(&lgpio->lock, flags);
loongson_commit_level(lgpio, pin, value);
spin_unlock_irqrestore(&lgpio->lock, flags);
+
+ return 0;
}
static int loongson_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
@@ -155,7 +157,7 @@ static int loongson_gpio_init(struct device *dev, struct loongson_gpio_chip *lgp
lgpio->chip.get = loongson_gpio_get;
lgpio->chip.get_direction = loongson_gpio_get_direction;
lgpio->chip.direction_output = loongson_gpio_direction_output;
- lgpio->chip.set = loongson_gpio_set;
+ lgpio->chip.set_rv = loongson_gpio_set;
lgpio->chip.parent = dev;
spin_lock_init(&lgpio->lock);
}
@@ -266,7 +268,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls7a2000_data0 = {
/* LS7A2000 ACPI GPIO */
static const struct loongson_gpio_chip_data loongson_gpio_ls7a2000_data1 = {
.label = "ls7a2000_gpio",
- .mode = BYTE_CTRL_MODE,
+ .mode = BIT_CTRL_MODE,
.conf_offset = 0x4,
.in_offset = 0x8,
.out_offset = 0x0,
diff --git a/drivers/gpio/gpio-loongson.c b/drivers/gpio/gpio-loongson.c
index a42145873cc9..8f3668169ebf 100644
--- a/drivers/gpio/gpio-loongson.c
+++ b/drivers/gpio/gpio-loongson.c
@@ -48,8 +48,8 @@ static int loongson_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
return !!(val & BIT(gpio + LOONGSON_GPIO_IN_OFFSET));
}
-static void loongson_gpio_set_value(struct gpio_chip *chip,
- unsigned gpio, int value)
+static int loongson_gpio_set_value(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
u32 val;
@@ -61,6 +61,8 @@ static void loongson_gpio_set_value(struct gpio_chip *chip,
val &= ~BIT(gpio);
LOONGSON_GPIODATA = val;
spin_unlock(&gpio_lock);
+
+ return 0;
}
static int loongson_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
@@ -104,7 +106,7 @@ static int loongson_gpio_probe(struct platform_device *pdev)
gc->base = 0;
gc->ngpio = LOONGSON_N_GPIO;
gc->get = loongson_gpio_get_value;
- gc->set = loongson_gpio_set_value;
+ gc->set_rv = loongson_gpio_set_value;
gc->direction_input = loongson_gpio_direction_input;
gc->direction_output = loongson_gpio_direction_output;
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index 8e58242f5123..52ab3ac4844c 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -147,7 +147,8 @@ static int lp3943_gpio_get(struct gpio_chip *chip, unsigned int offset)
return lp3943_get_gpio_out_status(lp3943_gpio, chip, offset);
}
-static void lp3943_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int lp3943_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
u8 data;
@@ -157,15 +158,19 @@ static void lp3943_gpio_set(struct gpio_chip *chip, unsigned int offset, int val
else
data = LP3943_GPIO_OUT_LOW;
- lp3943_gpio_set_mode(lp3943_gpio, offset, data);
+ return lp3943_gpio_set_mode(lp3943_gpio, offset, data);
}
static int lp3943_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct lp3943_gpio *lp3943_gpio = gpiochip_get_data(chip);
+ int ret;
+
+ ret = lp3943_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
- lp3943_gpio_set(chip, offset, value);
lp3943_gpio->input_mask &= ~BIT(offset);
return 0;
@@ -179,7 +184,7 @@ static const struct gpio_chip lp3943_gpio_chip = {
.direction_input = lp3943_gpio_direction_input,
.get = lp3943_gpio_get,
.direction_output = lp3943_gpio_direction_output,
- .set = lp3943_gpio_set,
+ .set_rv = lp3943_gpio_set,
.base = -1,
.ngpio = LP3943_MAX_GPIO,
.can_sleep = 1,
diff --git a/drivers/gpio/gpio-lp873x.c b/drivers/gpio/gpio-lp873x.c
index 5c79ba1f229c..1908ed302e92 100644
--- a/drivers/gpio/gpio-lp873x.c
+++ b/drivers/gpio/gpio-lp873x.c
@@ -58,14 +58,14 @@ static int lp873x_gpio_get(struct gpio_chip *chip, unsigned int offset)
return val & BIT(offset * BITS_PER_GPO);
}
-static void lp873x_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int lp873x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct lp873x_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->lp873->regmap, LP873X_REG_GPO_CTRL,
- BIT(offset * BITS_PER_GPO),
- value ? BIT(offset * BITS_PER_GPO) : 0);
+ return regmap_update_bits(gpio->lp873->regmap, LP873X_REG_GPO_CTRL,
+ BIT(offset * BITS_PER_GPO),
+ value ? BIT(offset * BITS_PER_GPO) : 0);
}
static int lp873x_gpio_request(struct gpio_chip *gc, unsigned int offset)
@@ -124,7 +124,7 @@ static const struct gpio_chip template_chip = {
.direction_input = lp873x_gpio_direction_input,
.direction_output = lp873x_gpio_direction_output,
.get = lp873x_gpio_get,
- .set = lp873x_gpio_set,
+ .set_rv = lp873x_gpio_set,
.set_config = lp873x_gpio_set_config,
.base = -1,
.ngpio = 2,
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c
index d3ce027de081..8ea687d5d028 100644
--- a/drivers/gpio/gpio-lp87565.c
+++ b/drivers/gpio/gpio-lp87565.c
@@ -30,13 +30,13 @@ static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset));
}
-static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct lp87565_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT,
- BIT(offset), value ? BIT(offset) : 0);
+ return regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT,
+ BIT(offset), value ? BIT(offset) : 0);
}
static int lp87565_gpio_get_direction(struct gpio_chip *chip,
@@ -69,8 +69,11 @@ static int lp87565_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct lp87565_gpio *gpio = gpiochip_get_data(chip);
+ int ret;
- lp87565_gpio_set(chip, offset, value);
+ ret = lp87565_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
return regmap_update_bits(gpio->map,
LP87565_REG_GPIO_CONFIG,
@@ -136,7 +139,7 @@ static const struct gpio_chip template_chip = {
.direction_input = lp87565_gpio_direction_input,
.direction_output = lp87565_gpio_direction_output,
.get = lp87565_gpio_get,
- .set = lp87565_gpio_set,
+ .set_rv = lp87565_gpio_set,
.set_config = lp87565_gpio_set_config,
.base = -1,
.ngpio = 3,
diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c
index 2cf9fb4637a2..b0a8da5c058d 100644
--- a/drivers/gpio/gpio-lpc18xx.c
+++ b/drivers/gpio/gpio-lpc18xx.c
@@ -42,6 +42,7 @@ struct lpc18xx_gpio_pin_ic {
void __iomem *base;
struct irq_domain *domain;
struct raw_spinlock lock;
+ struct gpio_chip *gpio;
};
struct lpc18xx_gpio_chip {
@@ -74,6 +75,7 @@ static void lpc18xx_gpio_pin_ic_mask(struct irq_data *d)
{
struct lpc18xx_gpio_pin_ic *ic = d->chip_data;
u32 type = irqd_get_trigger_type(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
raw_spin_lock(&ic->lock);
@@ -88,12 +90,17 @@ static void lpc18xx_gpio_pin_ic_mask(struct irq_data *d)
raw_spin_unlock(&ic->lock);
irq_chip_mask_parent(d);
+
+ gpiochip_disable_irq(ic->gpio, hwirq);
}
static void lpc18xx_gpio_pin_ic_unmask(struct irq_data *d)
{
struct lpc18xx_gpio_pin_ic *ic = d->chip_data;
u32 type = irqd_get_trigger_type(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(ic->gpio, hwirq);
raw_spin_lock(&ic->lock);
@@ -149,13 +156,14 @@ static int lpc18xx_gpio_pin_ic_set_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip lpc18xx_gpio_pin_ic = {
+static const struct irq_chip lpc18xx_gpio_pin_ic = {
.name = "LPC18xx GPIO pin",
.irq_mask = lpc18xx_gpio_pin_ic_mask,
.irq_unmask = lpc18xx_gpio_pin_ic_unmask,
.irq_eoi = lpc18xx_gpio_pin_ic_eoi,
.irq_set_type = lpc18xx_gpio_pin_ic_set_type,
- .flags = IRQCHIP_SET_TYPE_MASKED,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SET_TYPE_MASKED,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int lpc18xx_gpio_pin_ic_domain_alloc(struct irq_domain *domain,
@@ -240,17 +248,16 @@ static int lpc18xx_gpio_pin_ic_probe(struct lpc18xx_gpio_chip *gc)
raw_spin_lock_init(&ic->lock);
- ic->domain = irq_domain_add_hierarchy(parent_domain, 0,
- NR_LPC18XX_GPIO_PIN_IC_IRQS,
- dev->of_node,
- &lpc18xx_gpio_pin_ic_domain_ops,
- ic);
+ ic->domain = irq_domain_create_hierarchy(parent_domain, 0, NR_LPC18XX_GPIO_PIN_IC_IRQS,
+ of_fwnode_handle(dev->of_node),
+ &lpc18xx_gpio_pin_ic_domain_ops, ic);
if (!ic->domain) {
pr_err("unable to add irq domain\n");
ret = -ENODEV;
goto free_iomap;
}
+ ic->gpio = &gc->gpio;
gc->pin_ic = ic;
return 0;
@@ -263,10 +270,14 @@ free_ic:
return ret;
}
-static void lpc18xx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int lpc18xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct lpc18xx_gpio_chip *gc = gpiochip_get_data(chip);
+
writeb(value ? 1 : 0, gc->base + offset);
+
+ return 0;
}
static int lpc18xx_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -316,7 +327,7 @@ static const struct gpio_chip lpc18xx_chip = {
.free = gpiochip_generic_free,
.direction_input = lpc18xx_gpio_direction_input,
.direction_output = lpc18xx_gpio_direction_output,
- .set = lpc18xx_gpio_set,
+ .set_rv = lpc18xx_gpio_set,
.get = lpc18xx_gpio_get,
.ngpio = LPC18XX_MAX_PORTS * LPC18XX_PINS_PER_PORT,
.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index c097e310c9e8..6668b8bd9f1e 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -340,28 +340,34 @@ static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin,
return 0;
}
-static void lpc32xx_gpio_set_value_p012(struct gpio_chip *chip, unsigned pin,
- int value)
+static int lpc32xx_gpio_set_value_p012(struct gpio_chip *chip,
+ unsigned int pin, int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpio_level_p012(group, pin, value);
+
+ return 0;
}
-static void lpc32xx_gpio_set_value_p3(struct gpio_chip *chip, unsigned pin,
- int value)
+static int lpc32xx_gpio_set_value_p3(struct gpio_chip *chip,
+ unsigned int pin, int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpio_level_p3(group, pin, value);
+
+ return 0;
}
-static void lpc32xx_gpo_set_value(struct gpio_chip *chip, unsigned pin,
- int value)
+static int lpc32xx_gpo_set_value(struct gpio_chip *chip, unsigned int pin,
+ int value)
{
struct lpc32xx_gpio_chip *group = gpiochip_get_data(chip);
__set_gpo_level_p3(group, pin, value);
+
+ return 0;
}
static int lpc32xx_gpo_get_value(struct gpio_chip *chip, unsigned pin)
@@ -401,7 +407,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set = lpc32xx_gpio_set_value_p012,
+ .set_rv = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P0_GRP,
@@ -417,7 +423,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set = lpc32xx_gpio_set_value_p012,
+ .set_rv = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P1_GRP,
@@ -433,7 +439,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set = lpc32xx_gpio_set_value_p012,
+ .set_rv = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.base = LPC32XX_GPIO_P2_GRP,
.ngpio = LPC32XX_GPIO_P2_MAX,
@@ -448,7 +454,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p3,
.get = lpc32xx_gpio_get_value_p3,
.direction_output = lpc32xx_gpio_dir_output_p3,
- .set = lpc32xx_gpio_set_value_p3,
+ .set_rv = lpc32xx_gpio_set_value_p3,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_gpio_p3,
.base = LPC32XX_GPIO_P3_GRP,
@@ -476,7 +482,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.chip = {
.label = "gpo_p3",
.direction_output = lpc32xx_gpio_dir_out_always,
- .set = lpc32xx_gpo_set_value,
+ .set_rv = lpc32xx_gpo_set_value,
.get = lpc32xx_gpo_get_value,
.request = lpc32xx_gpio_request,
.base = LPC32XX_GPO_P3_GRP,
diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c
index 8f38303fcbc4..e73e72d62bc8 100644
--- a/drivers/gpio/gpio-madera.c
+++ b/drivers/gpio/gpio-madera.c
@@ -87,23 +87,17 @@ static int madera_gpio_direction_out(struct gpio_chip *chip,
MADERA_GP1_LVL_MASK, reg_val);
}
-static void madera_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int madera_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct madera_gpio *madera_gpio = gpiochip_get_data(chip);
struct madera *madera = madera_gpio->madera;
unsigned int reg_offset = 2 * offset;
unsigned int reg_val = value ? MADERA_GP1_LVL : 0;
- int ret;
-
- ret = regmap_update_bits(madera->regmap,
- MADERA_GPIO1_CTRL_1 + reg_offset,
- MADERA_GP1_LVL_MASK, reg_val);
- /* set() doesn't return an error so log a warning */
- if (ret)
- dev_warn(madera->dev, "Failed to write to 0x%x (%d)\n",
- MADERA_GPIO1_CTRL_1 + reg_offset, ret);
+ return regmap_update_bits(madera->regmap,
+ MADERA_GPIO1_CTRL_1 + reg_offset,
+ MADERA_GP1_LVL_MASK, reg_val);
}
static const struct gpio_chip madera_gpio_chip = {
@@ -115,7 +109,7 @@ static const struct gpio_chip madera_gpio_chip = {
.direction_input = madera_gpio_direction_in,
.get = madera_gpio_get,
.direction_output = madera_gpio_direction_out,
- .set = madera_gpio_set,
+ .set_rv = madera_gpio_set,
.set_config = gpiochip_generic_config,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c
index fc0708ab5192..6e6504ab740a 100644
--- a/drivers/gpio/gpio-max3191x.c
+++ b/drivers/gpio/gpio-max3191x.c
@@ -103,19 +103,6 @@ static int max3191x_direction_input(struct gpio_chip *gpio, unsigned int offset)
return 0;
}
-static int max3191x_direction_output(struct gpio_chip *gpio,
- unsigned int offset, int value)
-{
- return -EINVAL;
-}
-
-static void max3191x_set(struct gpio_chip *gpio, unsigned int offset, int value)
-{ }
-
-static void max3191x_set_multiple(struct gpio_chip *gpio, unsigned long *mask,
- unsigned long *bits)
-{ }
-
static unsigned int max3191x_wordlen(struct max3191x_chip *max3191x)
{
return max3191x->mode == STATUS_BYTE_ENABLED ? 2 : 1;
@@ -421,9 +408,6 @@ static int max3191x_probe(struct spi_device *spi)
max3191x->gpio.get_direction = max3191x_get_direction;
max3191x->gpio.direction_input = max3191x_direction_input;
- max3191x->gpio.direction_output = max3191x_direction_output;
- max3191x->gpio.set = max3191x_set;
- max3191x->gpio.set_multiple = max3191x_set_multiple;
max3191x->gpio.get = max3191x_get;
max3191x->gpio.get_multiple = max3191x_get_multiple;
max3191x->gpio.set_config = max3191x_set_config;
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index e688c13c8cc3..75d414d8c992 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -143,18 +143,21 @@ static int max7301_get(struct gpio_chip *chip, unsigned offset)
return level;
}
-static void max7301_set(struct gpio_chip *chip, unsigned offset, int value)
+static int max7301_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct max7301 *ts = gpiochip_get_data(chip);
+ int ret;
/* First 4 pins are unused in the controller */
offset += 4;
mutex_lock(&ts->lock);
- __max7301_set(ts, offset, value);
+ ret = __max7301_set(ts, offset, value);
mutex_unlock(&ts->lock);
+
+ return ret;
}
int __max730x_probe(struct max7301 *ts)
@@ -185,7 +188,7 @@ int __max730x_probe(struct max7301 *ts)
ts->chip.direction_input = max7301_direction_input;
ts->chip.get = max7301_get;
ts->chip.direction_output = max7301_direction_output;
- ts->chip.set = max7301_set;
+ ts->chip.set_rv = max7301_set;
ts->chip.ngpio = PIN_NUMBER;
ts->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 49d362907bc7..d5ffedb086af 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -225,16 +225,19 @@ out:
mutex_unlock(&chip->lock);
}
-static void max732x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
+static int max732x_gpio_set_value(struct gpio_chip *gc, unsigned int off,
+ int val)
{
unsigned base = off & ~0x7;
uint8_t mask = 1u << (off & 0x7);
max732x_gpio_set_mask(gc, base, mask, val << (off & 0x7));
+
+ return 0;
}
-static void max732x_gpio_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int max732x_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
unsigned mask_lo = mask[0] & 0xff;
unsigned mask_hi = (mask[0] >> 8) & 0xff;
@@ -243,6 +246,8 @@ static void max732x_gpio_set_multiple(struct gpio_chip *gc,
max732x_gpio_set_mask(gc, 0, mask_lo, bits[0] & 0xff);
if (mask_hi)
max732x_gpio_set_mask(gc, 8, mask_hi, (bits[0] >> 8) & 0xff);
+
+ return 0;
}
static int max732x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
@@ -580,8 +585,8 @@ static int max732x_setup_gpio(struct max732x_chip *chip,
gc->direction_input = max732x_gpio_direction_input;
if (chip->dir_output) {
gc->direction_output = max732x_gpio_direction_output;
- gc->set = max732x_gpio_set_value;
- gc->set_multiple = max732x_gpio_set_multiple;
+ gc->set_rv = max732x_gpio_set_value;
+ gc->set_multiple_rv = max732x_gpio_set_multiple;
}
gc->get = max732x_gpio_get_value;
gc->can_sleep = true;
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index 8c2a5609161f..af7af8e40afe 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -223,20 +223,17 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
return ret;
}
-static void max77620_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static int max77620_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct max77620_gpio *mgpio = gpiochip_get_data(gc);
u8 val;
- int ret;
val = (value) ? MAX77620_CNFG_GPIO_OUTPUT_VAL_HIGH :
MAX77620_CNFG_GPIO_OUTPUT_VAL_LOW;
- ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
- MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK, val);
- if (ret < 0)
- dev_err(mgpio->dev, "CNFG_GPIO_OUT update failed: %d\n", ret);
+ return regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
+ MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK, val);
}
static int max77620_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
@@ -314,7 +311,7 @@ static int max77620_gpio_probe(struct platform_device *pdev)
mgpio->gpio_chip.direction_input = max77620_gpio_dir_input;
mgpio->gpio_chip.get = max77620_gpio_get;
mgpio->gpio_chip.direction_output = max77620_gpio_dir_output;
- mgpio->gpio_chip.set = max77620_gpio_set;
+ mgpio->gpio_chip.set_rv = max77620_gpio_set;
mgpio->gpio_chip.set_config = max77620_gpio_set_config;
mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR;
mgpio->gpio_chip.can_sleep = 1;
diff --git a/drivers/gpio/gpio-max77759.c b/drivers/gpio/gpio-max77759.c
new file mode 100644
index 000000000000..7fe8e6f697d0
--- /dev/null
+++ b/drivers/gpio/gpio-max77759.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright 2020 Google Inc
+// Copyright 2025 Linaro Ltd.
+//
+// GPIO driver for Maxim MAX77759
+
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/device/driver.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/lockdep.h>
+#include <linux/mfd/max77759.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+
+#define MAX77759_N_GPIOS ARRAY_SIZE(max77759_gpio_line_names)
+static const char * const max77759_gpio_line_names[] = { "GPIO5", "GPIO6" };
+
+struct max77759_gpio_chip {
+ struct regmap *map;
+ struct max77759 *max77759;
+ struct gpio_chip gc;
+ struct mutex maxq_lock; /* protect MaxQ r/m/w operations */
+
+ struct mutex irq_lock; /* protect irq bus */
+ int irq_mask;
+ int irq_mask_changed;
+ int irq_trig;
+ int irq_trig_changed;
+};
+
+#define MAX77759_GPIOx_TRIGGER(offs, val) (((val) & 1) << (offs))
+#define MAX77759_GPIOx_TRIGGER_MASK(offs) MAX77759_GPIOx_TRIGGER(offs, ~0)
+enum max77759_trigger_gpio_type {
+ MAX77759_GPIO_TRIGGER_RISING = 0,
+ MAX77759_GPIO_TRIGGER_FALLING = 1
+};
+
+#define MAX77759_GPIOx_DIR(offs, dir) (((dir) & 1) << (2 + (3 * (offs))))
+#define MAX77759_GPIOx_DIR_MASK(offs) MAX77759_GPIOx_DIR(offs, ~0)
+enum max77759_control_gpio_dir {
+ MAX77759_GPIO_DIR_IN = 0,
+ MAX77759_GPIO_DIR_OUT = 1
+};
+
+#define MAX77759_GPIOx_OUTVAL(offs, val) (((val) & 1) << (3 + (3 * (offs))))
+#define MAX77759_GPIOx_OUTVAL_MASK(offs) MAX77759_GPIOx_OUTVAL(offs, ~0)
+
+#define MAX77759_GPIOx_INVAL_MASK(offs) (BIT(4) << (3 * (offs)))
+
+static int max77759_gpio_maxq_gpio_trigger_read(struct max77759_gpio_chip *chip)
+{
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length, 1);
+ DEFINE_FLEX(struct max77759_maxq_response, rsp, rsp, length, 2);
+ int ret;
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_READ;
+
+ ret = max77759_maxq_command(chip->max77759, cmd, rsp);
+ if (ret < 0)
+ return ret;
+
+ return rsp->rsp[1];
+}
+
+static int max77759_gpio_maxq_gpio_trigger_write(struct max77759_gpio_chip *chip,
+ u8 trigger)
+{
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length, 2);
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_WRITE;
+ cmd->cmd[1] = trigger;
+
+ return max77759_maxq_command(chip->max77759, cmd, NULL);
+}
+
+static int max77759_gpio_maxq_gpio_control_read(struct max77759_gpio_chip *chip)
+{
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length, 1);
+ DEFINE_FLEX(struct max77759_maxq_response, rsp, rsp, length, 2);
+ int ret;
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_GPIO_CONTROL_READ;
+
+ ret = max77759_maxq_command(chip->max77759, cmd, rsp);
+ if (ret < 0)
+ return ret;
+
+ return rsp->rsp[1];
+}
+
+static int max77759_gpio_maxq_gpio_control_write(struct max77759_gpio_chip *chip,
+ u8 ctrl)
+{
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length, 2);
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_GPIO_CONTROL_WRITE;
+ cmd->cmd[1] = ctrl;
+
+ return max77759_maxq_command(chip->max77759, cmd, NULL);
+}
+
+static int
+max77759_gpio_direction_from_control(int ctrl, unsigned int offset)
+{
+ enum max77759_control_gpio_dir dir;
+
+ dir = !!(ctrl & MAX77759_GPIOx_DIR_MASK(offset));
+ return ((dir == MAX77759_GPIO_DIR_OUT)
+ ? GPIO_LINE_DIRECTION_OUT
+ : GPIO_LINE_DIRECTION_IN);
+}
+
+static int max77759_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ctrl;
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ if (ctrl < 0)
+ return ctrl;
+
+ return max77759_gpio_direction_from_control(ctrl, offset);
+}
+
+static int max77759_gpio_direction_helper(struct gpio_chip *gc,
+ unsigned int offset,
+ enum max77759_control_gpio_dir dir,
+ int value)
+{
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ctrl, new_ctrl;
+
+ guard(mutex)(&chip->maxq_lock);
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ if (ctrl < 0)
+ return ctrl;
+
+ new_ctrl = ctrl & ~MAX77759_GPIOx_DIR_MASK(offset);
+ new_ctrl |= MAX77759_GPIOx_DIR(offset, dir);
+
+ if (dir == MAX77759_GPIO_DIR_OUT) {
+ new_ctrl &= ~MAX77759_GPIOx_OUTVAL_MASK(offset);
+ new_ctrl |= MAX77759_GPIOx_OUTVAL(offset, value);
+ }
+
+ if (new_ctrl == ctrl)
+ return 0;
+
+ return max77759_gpio_maxq_gpio_control_write(chip, new_ctrl);
+}
+
+static int max77759_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ return max77759_gpio_direction_helper(gc, offset,
+ MAX77759_GPIO_DIR_IN, -1);
+}
+
+static int max77759_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ return max77759_gpio_direction_helper(gc, offset,
+ MAX77759_GPIO_DIR_OUT, value);
+}
+
+static int max77759_gpio_get_value(struct gpio_chip *gc, unsigned int offset)
+{
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ctrl, mask;
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ if (ctrl < 0)
+ return ctrl;
+
+ /*
+ * The input status bit doesn't reflect the pin state when the GPIO is
+ * configured as an output. Check the direction, and inspect the input
+ * or output bit accordingly.
+ */
+ mask = ((max77759_gpio_direction_from_control(ctrl, offset)
+ == GPIO_LINE_DIRECTION_IN)
+ ? MAX77759_GPIOx_INVAL_MASK(offset)
+ : MAX77759_GPIOx_OUTVAL_MASK(offset));
+
+ return !!(ctrl & mask);
+}
+
+static int max77759_gpio_set_value(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ctrl, new_ctrl;
+
+ guard(mutex)(&chip->maxq_lock);
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ if (ctrl < 0)
+ return ctrl;
+
+ new_ctrl = ctrl & ~MAX77759_GPIOx_OUTVAL_MASK(offset);
+ new_ctrl |= MAX77759_GPIOx_OUTVAL(offset, value);
+
+ if (new_ctrl == ctrl)
+ return 0;
+
+ return max77759_gpio_maxq_gpio_control_write(chip, new_ctrl);
+}
+
+static void max77759_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ chip->irq_mask &= ~MAX77759_MAXQ_REG_UIC_INT1_GPIOxI_MASK(hwirq);
+ chip->irq_mask |= MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(hwirq, 1);
+ chip->irq_mask_changed |= MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(hwirq, 1);
+
+ gpiochip_disable_irq(gc, hwirq);
+}
+
+static void max77759_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gc, hwirq);
+
+ chip->irq_mask &= ~MAX77759_MAXQ_REG_UIC_INT1_GPIOxI_MASK(hwirq);
+ chip->irq_mask |= MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(hwirq, 0);
+ chip->irq_mask_changed |= MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(hwirq, 1);
+}
+
+static int max77759_gpio_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ chip->irq_trig &= ~MAX77759_GPIOx_TRIGGER_MASK(hwirq);
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ chip->irq_trig |= MAX77759_GPIOx_TRIGGER(hwirq,
+ MAX77759_GPIO_TRIGGER_RISING);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ chip->irq_trig |= MAX77759_GPIOx_TRIGGER(hwirq,
+ MAX77759_GPIO_TRIGGER_FALLING);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ chip->irq_trig_changed |= MAX77759_GPIOx_TRIGGER(hwirq, 1);
+
+ return 0;
+}
+
+static void max77759_gpio_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+
+ mutex_lock(&chip->irq_lock);
+}
+
+static int max77759_gpio_bus_sync_unlock_helper(struct gpio_chip *gc,
+ struct max77759_gpio_chip *chip)
+ __must_hold(&chip->maxq_lock)
+{
+ int ctrl, trigger, new_trigger, new_ctrl;
+ unsigned long irq_trig_changed;
+ int offset;
+ int ret;
+
+ lockdep_assert_held(&chip->maxq_lock);
+
+ ctrl = max77759_gpio_maxq_gpio_control_read(chip);
+ trigger = max77759_gpio_maxq_gpio_trigger_read(chip);
+ if (ctrl < 0 || trigger < 0) {
+ dev_err(gc->parent, "failed to read current state: %d / %d\n",
+ ctrl, trigger);
+ return (ctrl < 0) ? ctrl : trigger;
+ }
+
+ new_trigger = trigger & ~chip->irq_trig_changed;
+ new_trigger |= (chip->irq_trig & chip->irq_trig_changed);
+
+ /* change GPIO direction if required */
+ new_ctrl = ctrl;
+ irq_trig_changed = chip->irq_trig_changed;
+ for_each_set_bit(offset, &irq_trig_changed, MAX77759_N_GPIOS) {
+ new_ctrl &= ~MAX77759_GPIOx_DIR_MASK(offset);
+ new_ctrl |= MAX77759_GPIOx_DIR(offset, MAX77759_GPIO_DIR_IN);
+ }
+
+ if (new_trigger != trigger) {
+ ret = max77759_gpio_maxq_gpio_trigger_write(chip, new_trigger);
+ if (ret) {
+ dev_err(gc->parent,
+ "failed to write new trigger: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (new_ctrl != ctrl) {
+ ret = max77759_gpio_maxq_gpio_control_write(chip, new_ctrl);
+ if (ret) {
+ dev_err(gc->parent,
+ "failed to write new control: %d\n", ret);
+ return ret;
+ }
+ }
+
+ chip->irq_trig_changed = 0;
+
+ return 0;
+}
+
+static void max77759_gpio_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct max77759_gpio_chip *chip = gpiochip_get_data(gc);
+ int ret;
+
+ scoped_guard(mutex, &chip->maxq_lock) {
+ ret = max77759_gpio_bus_sync_unlock_helper(gc, chip);
+ if (ret)
+ goto out_unlock;
+ }
+
+ ret = regmap_update_bits(chip->map,
+ MAX77759_MAXQ_REG_UIC_INT1_M,
+ chip->irq_mask_changed, chip->irq_mask);
+ if (ret) {
+ dev_err(gc->parent,
+ "failed to update UIC_INT1 irq mask: %d\n", ret);
+ goto out_unlock;
+ }
+
+ chip->irq_mask_changed = 0;
+
+out_unlock:
+ mutex_unlock(&chip->irq_lock);
+}
+
+static void max77759_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ seq_puts(p, dev_name(gc->parent));
+}
+
+static const struct irq_chip max77759_gpio_irq_chip = {
+ .irq_mask = max77759_gpio_irq_mask,
+ .irq_unmask = max77759_gpio_irq_unmask,
+ .irq_set_type = max77759_gpio_set_irq_type,
+ .irq_bus_lock = max77759_gpio_bus_lock,
+ .irq_bus_sync_unlock = max77759_gpio_bus_sync_unlock,
+ .irq_print_chip = max77759_gpio_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static irqreturn_t max77759_gpio_irqhandler(int irq, void *data)
+{
+ struct max77759_gpio_chip *chip = data;
+ struct gpio_chip *gc = &chip->gc;
+ bool handled = false;
+
+ /* iterate until no interrupt is pending */
+ while (true) {
+ unsigned int uic_int1;
+ int ret;
+ unsigned long pending;
+ int offset;
+
+ ret = regmap_read(chip->map, MAX77759_MAXQ_REG_UIC_INT1,
+ &uic_int1);
+ if (ret < 0) {
+ dev_err_ratelimited(gc->parent,
+ "failed to read IRQ status: %d\n",
+ ret);
+ /*
+ * If !handled, we have looped not even once, which
+ * means we should return IRQ_NONE in that case (and
+ * of course IRQ_HANDLED otherwise).
+ */
+ return IRQ_RETVAL(handled);
+ }
+
+ pending = uic_int1;
+ pending &= (MAX77759_MAXQ_REG_UIC_INT1_GPIO6I
+ | MAX77759_MAXQ_REG_UIC_INT1_GPIO5I);
+ if (!pending)
+ break;
+
+ for_each_set_bit(offset, &pending, MAX77759_N_GPIOS) {
+ /*
+ * ACK interrupt by writing 1 to bit 'offset', all
+ * others need to be written as 0. This needs to be
+ * done unconditionally hence regmap_set_bits() is
+ * inappropriate here.
+ */
+ regmap_write(chip->map, MAX77759_MAXQ_REG_UIC_INT1,
+ BIT(offset));
+
+ handle_nested_irq(irq_find_mapping(gc->irq.domain,
+ offset));
+
+ handled = true;
+ }
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static int max77759_gpio_probe(struct platform_device *pdev)
+{
+ struct max77759_gpio_chip *chip;
+ int irq;
+ struct gpio_irq_chip *girq;
+ int ret;
+ unsigned long irq_flags;
+ struct irq_data *irqd;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->map = dev_get_regmap(pdev->dev.parent, "maxq");
+ if (!chip->map)
+ return dev_err_probe(&pdev->dev, -ENODEV, "Missing regmap\n");
+
+ irq = platform_get_irq_byname(pdev, "GPI");
+ if (irq < 0)
+ return dev_err_probe(&pdev->dev, irq, "Failed to get IRQ\n");
+
+ chip->max77759 = dev_get_drvdata(pdev->dev.parent);
+ ret = devm_mutex_init(&pdev->dev, &chip->maxq_lock);
+ if (ret)
+ return ret;
+ ret = devm_mutex_init(&pdev->dev, &chip->irq_lock);
+ if (ret)
+ return ret;
+
+ chip->gc.base = -1;
+ chip->gc.label = dev_name(&pdev->dev);
+ chip->gc.parent = &pdev->dev;
+ chip->gc.can_sleep = true;
+
+ chip->gc.names = max77759_gpio_line_names;
+ chip->gc.ngpio = MAX77759_N_GPIOS;
+ chip->gc.get_direction = max77759_gpio_get_direction;
+ chip->gc.direction_input = max77759_gpio_direction_input;
+ chip->gc.direction_output = max77759_gpio_direction_output;
+ chip->gc.get = max77759_gpio_get_value;
+ chip->gc.set_rv = max77759_gpio_set_value;
+
+ girq = &chip->gc.irq;
+ gpio_irq_chip_set_chip(girq, &max77759_gpio_irq_chip);
+ /* This will let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ girq->threaded = true;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to add GPIO chip\n");
+
+ irq_flags = IRQF_ONESHOT | IRQF_SHARED;
+ irqd = irq_get_irq_data(irq);
+ if (irqd)
+ irq_flags |= irqd_get_trigger_type(irqd);
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ max77759_gpio_irqhandler, irq_flags,
+ dev_name(&pdev->dev), chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to request IRQ\n");
+
+ return ret;
+}
+
+static const struct of_device_id max77759_gpio_of_id[] = {
+ { .compatible = "maxim,max77759-gpio", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77759_gpio_of_id);
+
+static const struct platform_device_id max77759_gpio_platform_id[] = {
+ { "max77759-gpio", },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, max77759_gpio_platform_id);
+
+static struct platform_driver max77759_gpio_driver = {
+ .driver = {
+ .name = "max77759-gpio",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = max77759_gpio_of_id,
+ },
+ .probe = max77759_gpio_probe,
+ .id_table = max77759_gpio_platform_id,
+};
+
+module_platform_driver(max77759_gpio_driver);
+
+MODULE_AUTHOR("André Draszik <andre.draszik@linaro.org>");
+MODULE_DESCRIPTION("GPIO driver for Maxim MAX77759");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 7ee891ef6905..5ee2991ecdfd 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -119,7 +119,7 @@ static int mb86s70_gpio_get(struct gpio_chip *gc, unsigned gpio)
return !!(readl(gchip->base + PDR(gpio)) & OFFSET(gpio));
}
-static void mb86s70_gpio_set(struct gpio_chip *gc, unsigned gpio, int value)
+static int mb86s70_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct mb86s70_gpio_chip *gchip = gpiochip_get_data(gc);
unsigned long flags;
@@ -135,6 +135,8 @@ static void mb86s70_gpio_set(struct gpio_chip *gc, unsigned gpio, int value)
writel(val, gchip->base + PDR(gpio));
spin_unlock_irqrestore(&gchip->lock, flags);
+
+ return 0;
}
static int mb86s70_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
@@ -178,7 +180,7 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
gchip->gc.request = mb86s70_gpio_request;
gchip->gc.free = mb86s70_gpio_free;
gchip->gc.get = mb86s70_gpio_get;
- gchip->gc.set = mb86s70_gpio_set;
+ gchip->gc.set_rv = mb86s70_gpio_set;
gchip->gc.to_irq = mb86s70_gpio_to_irq;
gchip->gc.label = dev_name(&pdev->dev);
gchip->gc.ngpio = 32;
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index 5fb357d7b78a..e68956104161 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -57,15 +57,18 @@ static int __mc33880_set(struct mc33880 *mc, unsigned offset, int value)
}
-static void mc33880_set(struct gpio_chip *chip, unsigned offset, int value)
+static int mc33880_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct mc33880 *mc = gpiochip_get_data(chip);
+ int ret;
mutex_lock(&mc->lock);
- __mc33880_set(mc, offset, value);
+ ret = __mc33880_set(mc, offset, value);
mutex_unlock(&mc->lock);
+
+ return ret;
}
static int mc33880_probe(struct spi_device *spi)
@@ -100,7 +103,7 @@ static int mc33880_probe(struct spi_device *spi)
mc->spi = spi;
mc->chip.label = DRIVER_NAME;
- mc->chip.set = mc33880_set;
+ mc->chip.set_rv = mc33880_set;
mc->chip.base = pdata->base;
mc->chip.ngpio = PIN_NUMBER;
mc->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 48e3768a830e..12cf36f9ca63 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -89,7 +89,7 @@ struct ioh_gpio {
static const int num_ports[] = {6, 12, 16, 16, 15, 16, 16, 12};
-static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+static int ioh_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
u32 reg_val;
struct ioh_gpio *chip = gpiochip_get_data(gpio);
@@ -104,6 +104,8 @@ static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
spin_unlock_irqrestore(&chip->spinlock, flags);
+
+ return 0;
}
static int ioh_gpio_get(struct gpio_chip *gpio, unsigned nr)
@@ -222,7 +224,7 @@ static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
gpio->direction_input = ioh_gpio_direction_input;
gpio->get = ioh_gpio_get;
gpio->direction_output = ioh_gpio_direction_output;
- gpio->set = ioh_gpio_set;
+ gpio->set_rv = ioh_gpio_set;
gpio->dbg_show = NULL;
gpio->base = -1;
gpio->ngpio = num_port;
diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c
index 10ea71273c89..9875e34bde72 100644
--- a/drivers/gpio/gpio-mlxbf3.c
+++ b/drivers/gpio/gpio-mlxbf3.c
@@ -190,7 +190,9 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
struct mlxbf3_gpio_context *gs;
struct gpio_irq_chip *girq;
struct gpio_chip *gc;
+ char *colon_ptr;
int ret, irq;
+ long num;
gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL);
if (!gs)
@@ -227,25 +229,39 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
gc->owner = THIS_MODULE;
gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges;
- irq = platform_get_irq(pdev, 0);
- if (irq >= 0) {
- girq = &gs->gc.irq;
- gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
- girq->default_type = IRQ_TYPE_NONE;
- /* This will let us handle the parent IRQ in the driver */
- girq->num_parents = 0;
- girq->parents = NULL;
- girq->parent_handler = NULL;
- girq->handler = handle_bad_irq;
-
- /*
- * Directly request the irq here instead of passing
- * a flow-handler because the irq is shared.
- */
- ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
- IRQF_SHARED, dev_name(dev), gs);
- if (ret)
- return dev_err_probe(dev, ret, "failed to request IRQ");
+ colon_ptr = strchr(dev_name(dev), ':');
+ if (!colon_ptr) {
+ dev_err(dev, "invalid device name format\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtol(++colon_ptr, 16, &num);
+ if (ret) {
+ dev_err(dev, "invalid device instance\n");
+ return ret;
+ }
+
+ if (!num) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0) {
+ girq = &gs->gc.irq;
+ gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
+ girq->default_type = IRQ_TYPE_NONE;
+ /* This will let us handle the parent IRQ in the driver */
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->parent_handler = NULL;
+ girq->handler = handle_bad_irq;
+
+ /*
+ * Directly request the irq here instead of passing
+ * a flow-handler because the irq is shared.
+ */
+ ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
+ IRQF_SHARED, dev_name(dev), gs);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request IRQ");
+ }
}
platform_set_drvdata(pdev, gs);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 541517536489..121efdd71e45 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -123,9 +123,12 @@ static irqreturn_t mpc8xxx_gpio_irq_cascade(int irq, void *data)
static void mpc8xxx_irq_unmask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long flags;
+ gpiochip_enable_irq(gc, hwirq);
+
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
@@ -138,6 +141,7 @@ static void mpc8xxx_irq_unmask(struct irq_data *d)
static void mpc8xxx_irq_mask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long flags;
@@ -148,6 +152,8 @@ static void mpc8xxx_irq_mask(struct irq_data *d)
& ~mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+
+ gpiochip_disable_irq(gc, hwirq);
}
static void mpc8xxx_irq_ack(struct irq_data *d)
@@ -244,6 +250,8 @@ static struct irq_chip mpc8xxx_irq_chip = {
.irq_ack = mpc8xxx_irq_ack,
/* this might get overwritten in mpc8xxx_probe() */
.irq_set_type = mpc8xxx_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 3604abcb6fec..57633a7b4270 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -408,9 +408,8 @@ static void mvebu_gpio_irq_ack(struct irq_data *d)
struct mvebu_gpio_chip *mvchip = gc->private;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
mvebu_gpio_write_edge_cause(mvchip, ~mask);
- irq_gc_unlock(gc);
}
static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
@@ -420,10 +419,9 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
ct->mask_cache_priv &= ~mask;
mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv);
- irq_gc_unlock(gc);
}
static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
@@ -433,11 +431,10 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
mvebu_gpio_write_edge_cause(mvchip, ~mask);
ct->mask_cache_priv |= mask;
mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv);
- irq_gc_unlock(gc);
}
static void mvebu_gpio_level_irq_mask(struct irq_data *d)
@@ -447,10 +444,9 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
ct->mask_cache_priv &= ~mask;
mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv);
- irq_gc_unlock(gc);
}
static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
@@ -460,10 +456,9 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
ct->mask_cache_priv |= mask;
mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv);
- irq_gc_unlock(gc);
}
/*****************************************************************************
@@ -1242,7 +1237,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
return 0;
mvchip->domain =
- irq_domain_add_linear(np, ngpios, &irq_generic_chip_ops, NULL);
+ irq_domain_create_linear(of_fwnode_handle(np), ngpios, &irq_generic_chip_ops, NULL);
if (!mvchip->domain) {
dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
mvchip->chip.label);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 619b6fb9d833..fae1a30f8ae6 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -490,7 +490,14 @@ static int mxc_gpio_probe(struct platform_device *pdev)
port->gc.request = mxc_gpio_request;
port->gc.free = mxc_gpio_free;
port->gc.to_irq = mxc_gpio_to_irq;
- port->gc.base = of_alias_get_id(np, "gpio") * 32;
+ /*
+ * Driver is DT-only, so a fixed base needs only be maintained for legacy
+ * userspace with sysfs interface.
+ */
+ if (IS_ENABLED(CONFIG_GPIO_SYSFS))
+ port->gc.base = of_alias_get_id(np, "gpio") * 32;
+ else /* silence boot time warning */
+ port->gc.base = -1;
err = devm_gpiochip_add_data(&pdev->dev, &port->gc, port);
if (err)
@@ -502,7 +509,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
goto out_bgio;
}
- port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
+ port->domain = irq_domain_create_legacy(of_fwnode_handle(np), 32, irq_base, 0,
&irq_domain_simple_ops, NULL);
if (!port->domain) {
err = -ENODEV;
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 024ad077e98d..b418fbccb26c 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -303,8 +303,8 @@ static int mxs_gpio_probe(struct platform_device *pdev)
goto out_iounmap;
}
- port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ port->domain = irq_domain_create_legacy(of_fwnode_handle(np), 32, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
if (!port->domain) {
err = -ENODEV;
goto out_iounmap;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 442435ded020..e80a96f39788 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -215,6 +215,8 @@ struct pca953x_chip {
DECLARE_BITMAP(irq_stat, MAX_LINE);
DECLARE_BITMAP(irq_trig_raise, MAX_LINE);
DECLARE_BITMAP(irq_trig_fall, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_level_high, MAX_LINE);
+ DECLARE_BITMAP(irq_trig_level_low, MAX_LINE);
#endif
atomic_t wakeup_path;
@@ -774,6 +776,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
bitmap_or(irq_mask, chip->irq_trig_fall, chip->irq_trig_raise, gc->ngpio);
+ bitmap_or(irq_mask, irq_mask, chip->irq_trig_level_high, gc->ngpio);
+ bitmap_or(irq_mask, irq_mask, chip->irq_trig_level_low, gc->ngpio);
bitmap_complement(reg_direction, reg_direction, gc->ngpio);
bitmap_and(irq_mask, irq_mask, reg_direction, gc->ngpio);
@@ -791,13 +795,15 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
struct device *dev = &chip->client->dev;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- if (!(type & IRQ_TYPE_EDGE_BOTH)) {
+ if (!(type & IRQ_TYPE_SENSE_MASK)) {
dev_err(dev, "irq %d: unsupported type %d\n", d->irq, type);
return -EINVAL;
}
assign_bit(hwirq, chip->irq_trig_fall, type & IRQ_TYPE_EDGE_FALLING);
assign_bit(hwirq, chip->irq_trig_raise, type & IRQ_TYPE_EDGE_RISING);
+ assign_bit(hwirq, chip->irq_trig_level_low, type & IRQ_TYPE_LEVEL_LOW);
+ assign_bit(hwirq, chip->irq_trig_level_high, type & IRQ_TYPE_LEVEL_HIGH);
return 0;
}
@@ -810,6 +816,8 @@ static void pca953x_irq_shutdown(struct irq_data *d)
clear_bit(hwirq, chip->irq_trig_raise);
clear_bit(hwirq, chip->irq_trig_fall);
+ clear_bit(hwirq, chip->irq_trig_level_low);
+ clear_bit(hwirq, chip->irq_trig_level_high);
}
static void pca953x_irq_print_chip(struct irq_data *data, struct seq_file *p)
@@ -840,6 +848,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
DECLARE_BITMAP(cur_stat, MAX_LINE);
DECLARE_BITMAP(new_stat, MAX_LINE);
DECLARE_BITMAP(trigger, MAX_LINE);
+ DECLARE_BITMAP(edges, MAX_LINE);
int ret;
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
@@ -857,13 +866,26 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
- if (bitmap_empty(trigger, gc->ngpio))
- return false;
+ if (bitmap_empty(chip->irq_trig_level_high, gc->ngpio) &&
+ bitmap_empty(chip->irq_trig_level_low, gc->ngpio)) {
+ if (bitmap_empty(trigger, gc->ngpio))
+ return false;
+ }
bitmap_and(cur_stat, chip->irq_trig_fall, old_stat, gc->ngpio);
bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio);
- bitmap_or(new_stat, old_stat, cur_stat, gc->ngpio);
- bitmap_and(pending, new_stat, trigger, gc->ngpio);
+ bitmap_or(edges, old_stat, cur_stat, gc->ngpio);
+ bitmap_and(pending, edges, trigger, gc->ngpio);
+
+ bitmap_and(cur_stat, new_stat, chip->irq_trig_level_high, gc->ngpio);
+ bitmap_and(cur_stat, cur_stat, chip->irq_mask, gc->ngpio);
+ bitmap_or(pending, pending, cur_stat, gc->ngpio);
+
+ bitmap_complement(cur_stat, new_stat, gc->ngpio);
+ bitmap_and(cur_stat, cur_stat, reg_direction, gc->ngpio);
+ bitmap_and(old_stat, cur_stat, chip->irq_trig_level_low, gc->ngpio);
+ bitmap_and(old_stat, old_stat, chip->irq_mask, gc->ngpio);
+ bitmap_or(pending, pending, old_stat, gc->ngpio);
return !bitmap_empty(pending, gc->ngpio);
}
@@ -952,7 +974,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
IRQF_ONESHOT | IRQF_SHARED, dev_name(dev),
chip);
if (ret)
- return dev_err_probe(dev, client->irq, "failed to request irq\n");
+ return dev_err_probe(dev, ret, "failed to request irq\n");
return 0;
}
@@ -1204,6 +1226,8 @@ static int pca953x_restore_context(struct pca953x_chip *chip)
guard(mutex)(&chip->i2c_lock);
+ if (chip->client->irq > 0)
+ enable_irq(chip->client->irq);
regcache_cache_only(chip->regmap, false);
regcache_mark_dirty(chip->regmap);
ret = pca953x_regcache_sync(chip);
@@ -1216,6 +1240,10 @@ static int pca953x_restore_context(struct pca953x_chip *chip)
static void pca953x_save_context(struct pca953x_chip *chip)
{
guard(mutex)(&chip->i2c_lock);
+
+ /* Disable IRQ to prevent early triggering while regmap "cache only" is on */
+ if (chip->client->irq > 0)
+ disable_irq(chip->client->irq);
regcache_cache_only(chip->regmap, true);
}
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 91cea97255fa..aead35ea090e 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -497,6 +497,8 @@ static void pxa_mask_muxed_gpio(struct irq_data *d)
gfer = readl_relaxed(base + GFER_OFFSET) & ~GPIO_bit(gpio);
writel_relaxed(grer, base + GRER_OFFSET);
writel_relaxed(gfer, base + GFER_OFFSET);
+
+ gpiochip_disable_irq(&pchip->chip, gpio);
}
static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on)
@@ -516,17 +518,21 @@ static void pxa_unmask_muxed_gpio(struct irq_data *d)
unsigned int gpio = irqd_to_hwirq(d);
struct pxa_gpio_bank *c = gpio_to_pxabank(&pchip->chip, gpio);
+ gpiochip_enable_irq(&pchip->chip, gpio);
+
c->irq_mask |= GPIO_bit(gpio);
update_edge_detect(c);
}
-static struct irq_chip pxa_muxed_gpio_chip = {
+static const struct irq_chip pxa_muxed_gpio_chip = {
.name = "GPIO",
.irq_ack = pxa_ack_muxed_gpio,
.irq_mask = pxa_mask_muxed_gpio,
.irq_unmask = pxa_unmask_muxed_gpio,
.irq_set_type = pxa_gpio_irq_type,
.irq_set_wake = pxa_gpio_set_wake,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int pxa_gpio_nums(struct platform_device *pdev)
@@ -636,9 +642,9 @@ static int pxa_gpio_probe(struct platform_device *pdev)
if (!pxa_last_gpio)
return -EINVAL;
- pchip->irqdomain = irq_domain_add_legacy(pdev->dev.of_node,
- pxa_last_gpio + 1, irq_base,
- 0, &pxa_irq_domain_ops, pchip);
+ pchip->irqdomain = irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node),
+ pxa_last_gpio + 1, irq_base, 0,
+ &pxa_irq_domain_ops, pchip);
if (!pchip->irqdomain)
return -ENOMEM;
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index 01a3b3dac58b..c63352f2f1ec 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -521,7 +521,7 @@ static int rockchip_interrupts_register(struct rockchip_pin_bank *bank)
struct irq_chip_generic *gc;
int ret;
- bank->domain = irq_domain_add_linear(bank->of_node, 32,
+ bank->domain = irq_domain_create_linear(of_fwnode_handle(bank->of_node), 32,
&irq_generic_chip_ops, NULL);
if (!bank->domain) {
dev_warn(bank->dev, "could not init irq domain for bank %s\n",
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 242dad763ac4..3f3ee36bc3cb 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -319,7 +319,7 @@ void __init sa1100_init_gpio(void)
gpiochip_add_data(&sa1100_gpio_chip.chip, NULL);
- sa1100_gpio_irqdomain = irq_domain_add_simple(NULL,
+ sa1100_gpio_irqdomain = irq_domain_create_simple(NULL,
28, IRQ_GPIO0,
&sa1100_gpio_irqdomain_ops, sgc);
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index c2a2c76c1652..6a3c4c625138 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -169,7 +169,7 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST,
IRQ_LEVEL | IRQ_NOPROBE);
- sd->id = irq_domain_add_legacy(pdev->dev.of_node, SDV_NUM_PUB_GPIOS,
+ sd->id = irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node), SDV_NUM_PUB_GPIOS,
sd->irq_base, 0, &irq_domain_sdv_ops, sd);
if (!sd->id)
return -ENODEV;
diff --git a/drivers/gpio/gpio-spacemit-k1.c b/drivers/gpio/gpio-spacemit-k1.c
new file mode 100644
index 000000000000..3cc75c701ec4
--- /dev/null
+++ b/drivers/gpio/gpio-spacemit-k1.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
+ * Copyright (C) 2025 Yixun Lan <dlan@gentoo.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
+/* register offset */
+#define SPACEMIT_GPLR 0x00 /* port level - R */
+#define SPACEMIT_GPDR 0x0c /* port direction - R/W */
+#define SPACEMIT_GPSR 0x18 /* port set - W */
+#define SPACEMIT_GPCR 0x24 /* port clear - W */
+#define SPACEMIT_GRER 0x30 /* port rising edge R/W */
+#define SPACEMIT_GFER 0x3c /* port falling edge R/W */
+#define SPACEMIT_GEDR 0x48 /* edge detect status - R/W1C */
+#define SPACEMIT_GSDR 0x54 /* (set) direction - W */
+#define SPACEMIT_GCDR 0x60 /* (clear) direction - W */
+#define SPACEMIT_GSRER 0x6c /* (set) rising edge detect enable - W */
+#define SPACEMIT_GCRER 0x78 /* (clear) rising edge detect enable - W */
+#define SPACEMIT_GSFER 0x84 /* (set) falling edge detect enable - W */
+#define SPACEMIT_GCFER 0x90 /* (clear) falling edge detect enable - W */
+#define SPACEMIT_GAPMASK 0x9c /* interrupt mask , 0 disable, 1 enable - R/W */
+
+#define SPACEMIT_NR_BANKS 4
+#define SPACEMIT_NR_GPIOS_PER_BANK 32
+
+#define to_spacemit_gpio_bank(x) container_of((x), struct spacemit_gpio_bank, gc)
+
+struct spacemit_gpio;
+
+struct spacemit_gpio_bank {
+ struct gpio_chip gc;
+ struct spacemit_gpio *sg;
+ void __iomem *base;
+ u32 irq_mask;
+ u32 irq_rising_edge;
+ u32 irq_falling_edge;
+};
+
+struct spacemit_gpio {
+ struct device *dev;
+ struct spacemit_gpio_bank sgb[SPACEMIT_NR_BANKS];
+};
+
+static u32 spacemit_gpio_bank_index(struct spacemit_gpio_bank *gb)
+{
+ return (u32)(gb - gb->sg->sgb);
+}
+
+static irqreturn_t spacemit_gpio_irq_handler(int irq, void *dev_id)
+{
+ struct spacemit_gpio_bank *gb = dev_id;
+ unsigned long pending;
+ u32 n, gedr;
+
+ gedr = readl(gb->base + SPACEMIT_GEDR);
+ if (!gedr)
+ return IRQ_NONE;
+ writel(gedr, gb->base + SPACEMIT_GEDR);
+
+ pending = gedr & gb->irq_mask;
+ if (!pending)
+ return IRQ_NONE;
+
+ for_each_set_bit(n, &pending, BITS_PER_LONG)
+ handle_nested_irq(irq_find_mapping(gb->gc.irq.domain, n));
+
+ return IRQ_HANDLED;
+}
+
+static void spacemit_gpio_irq_ack(struct irq_data *d)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(irqd_to_hwirq(d)), gb->base + SPACEMIT_GEDR);
+}
+
+static void spacemit_gpio_irq_mask(struct irq_data *d)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(d);
+ u32 bit = BIT(irqd_to_hwirq(d));
+
+ gb->irq_mask &= ~bit;
+ writel(gb->irq_mask, gb->base + SPACEMIT_GAPMASK);
+
+ if (bit & gb->irq_rising_edge)
+ writel(bit, gb->base + SPACEMIT_GCRER);
+
+ if (bit & gb->irq_falling_edge)
+ writel(bit, gb->base + SPACEMIT_GCFER);
+}
+
+static void spacemit_gpio_irq_unmask(struct irq_data *d)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(d);
+ u32 bit = BIT(irqd_to_hwirq(d));
+
+ gb->irq_mask |= bit;
+
+ if (bit & gb->irq_rising_edge)
+ writel(bit, gb->base + SPACEMIT_GSRER);
+
+ if (bit & gb->irq_falling_edge)
+ writel(bit, gb->base + SPACEMIT_GSFER);
+
+ writel(gb->irq_mask, gb->base + SPACEMIT_GAPMASK);
+}
+
+static int spacemit_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(d);
+ u32 bit = BIT(irqd_to_hwirq(d));
+
+ if (type & IRQ_TYPE_EDGE_RISING) {
+ gb->irq_rising_edge |= bit;
+ writel(bit, gb->base + SPACEMIT_GSRER);
+ } else {
+ gb->irq_rising_edge &= ~bit;
+ writel(bit, gb->base + SPACEMIT_GCRER);
+ }
+
+ if (type & IRQ_TYPE_EDGE_FALLING) {
+ gb->irq_falling_edge |= bit;
+ writel(bit, gb->base + SPACEMIT_GSFER);
+ } else {
+ gb->irq_falling_edge &= ~bit;
+ writel(bit, gb->base + SPACEMIT_GCFER);
+ }
+
+ return 0;
+}
+
+static void spacemit_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p)
+{
+ struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(data);
+
+ seq_printf(p, "%s-%d", dev_name(gb->gc.parent), spacemit_gpio_bank_index(gb));
+}
+
+static struct irq_chip spacemit_gpio_chip = {
+ .name = "k1-gpio-irqchip",
+ .irq_ack = spacemit_gpio_irq_ack,
+ .irq_mask = spacemit_gpio_irq_mask,
+ .irq_unmask = spacemit_gpio_irq_unmask,
+ .irq_set_type = spacemit_gpio_irq_set_type,
+ .irq_print_chip = spacemit_gpio_irq_print_chip,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SKIP_SET_WAKE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static bool spacemit_of_node_instance_match(struct gpio_chip *gc, unsigned int i)
+{
+ struct spacemit_gpio_bank *gb = gpiochip_get_data(gc);
+ struct spacemit_gpio *sg = gb->sg;
+
+ if (i >= SPACEMIT_NR_BANKS)
+ return false;
+
+ return (gc == &sg->sgb[i].gc);
+}
+
+static int spacemit_gpio_add_bank(struct spacemit_gpio *sg,
+ void __iomem *regs,
+ int index, int irq)
+{
+ struct spacemit_gpio_bank *gb = &sg->sgb[index];
+ struct gpio_chip *gc = &gb->gc;
+ struct device *dev = sg->dev;
+ struct gpio_irq_chip *girq;
+ void __iomem *dat, *set, *clr, *dirin, *dirout;
+ int ret, bank_base[] = { 0x0, 0x4, 0x8, 0x100 };
+
+ gb->base = regs + bank_base[index];
+
+ dat = gb->base + SPACEMIT_GPLR;
+ set = gb->base + SPACEMIT_GPSR;
+ clr = gb->base + SPACEMIT_GPCR;
+ dirin = gb->base + SPACEMIT_GCDR;
+ dirout = gb->base + SPACEMIT_GSDR;
+
+ /* This registers 32 GPIO lines per bank */
+ ret = bgpio_init(gc, dev, 4, dat, set, clr, dirout, dirin,
+ BGPIOF_UNREADABLE_REG_SET | BGPIOF_UNREADABLE_REG_DIR);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init gpio chip\n");
+
+ gb->sg = sg;
+
+ gc->label = dev_name(dev);
+ gc->request = gpiochip_generic_request;
+ gc->free = gpiochip_generic_free;
+ gc->ngpio = SPACEMIT_NR_GPIOS_PER_BANK;
+ gc->base = -1;
+ gc->of_gpio_n_cells = 3;
+ gc->of_node_instance_match = spacemit_of_node_instance_match;
+
+ girq = &gc->irq;
+ girq->threaded = true;
+ girq->handler = handle_simple_irq;
+
+ gpio_irq_chip_set_chip(girq, &spacemit_gpio_chip);
+
+ /* Disable Interrupt */
+ writel(0, gb->base + SPACEMIT_GAPMASK);
+ /* Disable Edge Detection Settings */
+ writel(0x0, gb->base + SPACEMIT_GRER);
+ writel(0x0, gb->base + SPACEMIT_GFER);
+ /* Clear Interrupt */
+ writel(0xffffffff, gb->base + SPACEMIT_GCRER);
+ writel(0xffffffff, gb->base + SPACEMIT_GCFER);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ spacemit_gpio_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ gb->gc.label, gb);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to register IRQ\n");
+
+ ret = devm_gpiochip_add_data(dev, gc, gb);
+ if (ret)
+ return ret;
+
+ /* Distuingish IRQ domain, for selecting threecells mode */
+ irq_domain_update_bus_token(girq->domain, DOMAIN_BUS_WIRED);
+
+ return 0;
+}
+
+static int spacemit_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spacemit_gpio *sg;
+ struct clk *core_clk, *bus_clk;
+ void __iomem *regs;
+ int i, irq, ret;
+
+ sg = devm_kzalloc(dev, sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return -ENOMEM;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ sg->dev = dev;
+
+ core_clk = devm_clk_get_enabled(dev, "core");
+ if (IS_ERR(core_clk))
+ return dev_err_probe(dev, PTR_ERR(core_clk), "failed to get clock\n");
+
+ bus_clk = devm_clk_get_enabled(dev, "bus");
+ if (IS_ERR(bus_clk))
+ return dev_err_probe(dev, PTR_ERR(bus_clk), "failed to get bus clock\n");
+
+ for (i = 0; i < SPACEMIT_NR_BANKS; i++) {
+ ret = spacemit_gpio_add_bank(sg, regs, i, irq);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id spacemit_gpio_dt_ids[] = {
+ { .compatible = "spacemit,k1-gpio" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, spacemit_gpio_dt_ids);
+
+static struct platform_driver spacemit_gpio_driver = {
+ .probe = spacemit_gpio_probe,
+ .driver = {
+ .name = "k1-gpio",
+ .of_match_table = spacemit_gpio_dt_ids,
+ },
+};
+module_platform_driver(spacemit_gpio_driver);
+
+MODULE_AUTHOR("Yixun Lan <dlan@gentoo.org>");
+MODULE_DESCRIPTION("GPIO driver for SpacemiT K1 SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index b6335cde455f..8cf676fd0a0b 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -183,7 +183,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (ret != 0)
return ret;
- tb10x_gpio->domain = irq_domain_add_linear(np,
+ tb10x_gpio->domain = irq_domain_create_linear(of_fwnode_handle(np),
tb10x_gpio->gc.ngpio,
&irq_generic_chip_ops, NULL);
if (!tb10x_gpio->domain) {
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index fad979797486..cb303a26f4d3 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -103,20 +103,26 @@ static void timbgpio_irq_disable(struct irq_data *d)
{
struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
int offset = d->irq - tgpio->irq_base;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
spin_lock_irqsave(&tgpio->lock, flags);
tgpio->last_ier &= ~(1UL << offset);
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
spin_unlock_irqrestore(&tgpio->lock, flags);
+
+ gpiochip_disable_irq(&tgpio->gpio, hwirq);
}
static void timbgpio_irq_enable(struct irq_data *d)
{
struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
int offset = d->irq - tgpio->irq_base;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
+ gpiochip_enable_irq(&tgpio->gpio, hwirq);
+
spin_lock_irqsave(&tgpio->lock, flags);
tgpio->last_ier |= 1UL << offset;
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
@@ -205,11 +211,13 @@ static void timbgpio_irq(struct irq_desc *desc)
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
}
-static struct irq_chip timbgpio_irqchip = {
+static const struct irq_chip timbgpio_irqchip = {
.name = "GPIO",
.irq_enable = timbgpio_irq_enable,
.irq_disable = timbgpio_irq_disable,
.irq_set_type = timbgpio_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int timbgpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index bcd692229c7c..0d17985a5fdc 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -502,7 +502,6 @@ static void gpio_twl4030_power_off_action(void *data)
static int gpio_twl4030_probe(struct platform_device *pdev)
{
struct twl4030_gpio_platform_data *pdata;
- struct device_node *node = pdev->dev.of_node;
struct gpio_twl4030_priv *priv;
int ret, irq_base;
@@ -524,8 +523,8 @@ static int gpio_twl4030_probe(struct platform_device *pdev)
return irq_base;
}
- irq_domain_add_legacy(node, TWL4030_GPIO_MAX, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node), TWL4030_GPIO_MAX, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
ret = twl4030_sih_setup(&pdev->dev, TWL4030_MODULE_GPIO, irq_base);
if (ret < 0)
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 4dad7ce0c4dc..7de0d5b53d56 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -345,4 +345,6 @@ static struct platform_driver vf610_gpio_driver = {
.probe = vf610_gpio_probe,
};
-builtin_platform_driver(vf610_gpio_driver);
+module_platform_driver(vf610_gpio_driver);
+MODULE_DESCRIPTION("VF610 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
index 13407fd4f0eb..eab6726953b4 100644
--- a/drivers/gpio/gpio-virtuser.c
+++ b/drivers/gpio/gpio-virtuser.c
@@ -401,10 +401,15 @@ static ssize_t gpio_virtuser_direction_do_write(struct file *file,
char buf[32], *trimmed;
int ret, dir, val = 0;
- ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
if (ret < 0)
return ret;
+ buf[ret] = '\0';
+
trimmed = strim(buf);
if (strcmp(trimmed, "input") == 0) {
@@ -623,12 +628,15 @@ static ssize_t gpio_virtuser_consumer_write(struct file *file,
char buf[GPIO_VIRTUSER_NAME_BUF_LEN + 2];
int ret;
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
ret = simple_write_to_buffer(buf, GPIO_VIRTUSER_NAME_BUF_LEN, ppos,
user_buf, count);
if (ret < 0)
return ret;
- buf[strlen(buf) - 1] = '\0';
+ buf[ret] = '\0';
ret = gpiod_set_consumer_name(data->ad.desc, buf);
if (ret)
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 48b829733b15..b51b1fa726bb 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -103,12 +103,32 @@ static int xgene_gpio_sb_irq_set_type(struct irq_data *d, unsigned int type)
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
}
-static struct irq_chip xgene_gpio_sb_irq_chip = {
+static void xgene_gpio_sb_irq_mask(struct irq_data *d)
+{
+ struct xgene_gpio_sb *priv = irq_data_get_irq_chip_data(d);
+
+ irq_chip_mask_parent(d);
+
+ gpiochip_disable_irq(&priv->gc, d->hwirq);
+}
+
+static void xgene_gpio_sb_irq_unmask(struct irq_data *d)
+{
+ struct xgene_gpio_sb *priv = irq_data_get_irq_chip_data(d);
+
+ gpiochip_enable_irq(&priv->gc, d->hwirq);
+
+ irq_chip_unmask_parent(d);
+}
+
+static const struct irq_chip xgene_gpio_sb_irq_chip = {
.name = "sbgpio",
.irq_eoi = irq_chip_eoi_parent,
- .irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
+ .irq_mask = xgene_gpio_sb_irq_mask,
+ .irq_unmask = xgene_gpio_sb_irq_unmask,
.irq_set_type = xgene_gpio_sb_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi-core.c
index 69caa35c58df..12b24a717e43 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi-core.c
@@ -23,29 +23,6 @@
#include "gpiolib.h"
#include "gpiolib-acpi.h"
-static int run_edge_events_on_boot = -1;
-module_param(run_edge_events_on_boot, int, 0444);
-MODULE_PARM_DESC(run_edge_events_on_boot,
- "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
-
-static char *ignore_wake;
-module_param(ignore_wake, charp, 0444);
-MODULE_PARM_DESC(ignore_wake,
- "controller@pin combos on which to ignore the ACPI wake flag "
- "ignore_wake=controller@pin[,controller@pin[,...]]");
-
-static char *ignore_interrupt;
-module_param(ignore_interrupt, charp, 0444);
-MODULE_PARM_DESC(ignore_interrupt,
- "controller@pin combos on which to ignore interrupt "
- "ignore_interrupt=controller@pin[,controller@pin[,...]]");
-
-struct acpi_gpiolib_dmi_quirk {
- bool no_edge_events_on_boot;
- char *ignore_wake;
- char *ignore_interrupt;
-};
-
/**
* struct acpi_gpio_event - ACPI GPIO event handler data
*
@@ -96,10 +73,10 @@ struct acpi_gpio_chip {
* @adev: reference to ACPI device which consumes GPIO resource
* @flags: GPIO initialization flags
* @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
+ * @wake_capable: wake capability as provided by ACPI
* @pin_config: pin bias as provided by ACPI
* @polarity: interrupt polarity as provided by ACPI
* @triggering: triggering type as provided by ACPI
- * @wake_capable: wake capability as provided by ACPI
* @debounce: debounce timeout as provided by ACPI
* @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
*/
@@ -107,25 +84,14 @@ struct acpi_gpio_info {
struct acpi_device *adev;
enum gpiod_flags flags;
bool gpioint;
+ bool wake_capable;
int pin_config;
int polarity;
int triggering;
- bool wake_capable;
unsigned int debounce;
unsigned int quirks;
};
-/*
- * For GPIO chips which call acpi_gpiochip_request_interrupts() before late_init
- * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
- * late_initcall_sync() handler, so that other builtin drivers can register their
- * OpRegions before the event handlers can run. This list contains GPIO chips
- * for which the acpi_gpiochip_request_irqs() call has been deferred.
- */
-static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
-static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
-static bool acpi_gpio_deferred_req_irqs_done;
-
static int acpi_gpiochip_find(struct gpio_chip *gc, const void *data)
{
/* First check the actual GPIO device */
@@ -268,7 +234,7 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
event->irq_requested = true;
/* Make sure we trigger the initial state of edge-triggered IRQs */
- if (run_edge_events_on_boot &&
+ if (acpi_gpio_need_run_edge_events_on_boot() &&
(event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
value = gpiod_get_raw_value_cansleep(event->desc);
if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
@@ -350,42 +316,6 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
return desc;
}
-static bool acpi_gpio_in_ignore_list(const char *ignore_list, const char *controller_in,
- unsigned int pin_in)
-{
- const char *controller, *pin_str;
- unsigned int pin;
- char *endp;
- int len;
-
- controller = ignore_list;
- while (controller) {
- pin_str = strchr(controller, '@');
- if (!pin_str)
- goto err;
-
- len = pin_str - controller;
- if (len == strlen(controller_in) &&
- strncmp(controller, controller_in, len) == 0) {
- pin = simple_strtoul(pin_str + 1, &endp, 10);
- if (*endp != 0 && *endp != ',')
- goto err;
-
- if (pin == pin_in)
- return true;
- }
-
- controller = strchr(controller, ',');
- if (controller)
- controller++;
- }
-
- return false;
-err:
- pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_...: %s\n", ignore_list);
- return false;
-}
-
static bool acpi_gpio_irq_is_wake(struct device *parent,
const struct acpi_resource_gpio *agpio)
{
@@ -394,7 +324,7 @@ static bool acpi_gpio_irq_is_wake(struct device *parent,
if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
return false;
- if (acpi_gpio_in_ignore_list(ignore_wake, dev_name(parent), pin)) {
+ if (acpi_gpio_in_ignore_list(ACPI_GPIO_IGNORE_WAKE, dev_name(parent), pin)) {
dev_info(parent, "Ignoring wakeup on pin %u\n", pin);
return false;
}
@@ -437,7 +367,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
if (!handler)
return AE_OK;
- if (acpi_gpio_in_ignore_list(ignore_interrupt, dev_name(chip->parent), pin)) {
+ if (acpi_gpio_in_ignore_list(ACPI_GPIO_IGNORE_INTERRUPT, dev_name(chip->parent), pin)) {
dev_info(chip->parent, "Ignoring interrupt on pin %u\n", pin);
return AE_OK;
}
@@ -525,7 +455,6 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
struct acpi_gpio_chip *acpi_gpio;
acpi_handle handle;
acpi_status status;
- bool defer;
if (!chip->parent || !chip->to_irq)
return;
@@ -544,14 +473,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
acpi_walk_resources(handle, METHOD_NAME__AEI,
acpi_gpiochip_alloc_event, acpi_gpio);
- mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
- defer = !acpi_gpio_deferred_req_irqs_done;
- if (defer)
- list_add(&acpi_gpio->deferred_req_irqs_list_entry,
- &acpi_gpio_deferred_req_irqs_list);
- mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
-
- if (defer)
+ if (acpi_gpio_add_to_deferred_list(&acpi_gpio->deferred_req_irqs_list_entry))
return;
acpi_gpiochip_request_irqs(acpi_gpio);
@@ -583,10 +505,7 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
if (ACPI_FAILURE(status))
return;
- mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
- if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry))
- list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
- mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+ acpi_gpio_remove_from_deferred_list(&acpi_gpio->deferred_req_irqs_list_entry);
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
if (event->irq_requested) {
@@ -604,6 +523,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
}
EXPORT_SYMBOL_GPL(acpi_gpiochip_free_interrupts);
+void __init acpi_gpio_process_deferred_list(struct list_head *list)
+{
+ struct acpi_gpio_chip *acpi_gpio, *tmp;
+
+ list_for_each_entry_safe(acpi_gpio, tmp, list, deferred_req_irqs_list_entry)
+ acpi_gpiochip_request_irqs(acpi_gpio);
+}
+
int acpi_dev_add_driver_gpios(struct acpi_device *adev,
const struct acpi_gpio_mapping *gpios)
{
@@ -653,12 +580,12 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
for (gm = adev->driver_gpios; gm->name; gm++)
if (!strcmp(name, gm->name) && gm->data && index < gm->size) {
- const struct acpi_gpio_params *par = gm->data + index;
+ const struct acpi_gpio_params *params = gm->data + index;
args->fwnode = acpi_fwnode_handle(adev);
- args->args[0] = par->crs_entry_index;
- args->args[1] = par->line_index;
- args->args[2] = par->active_low;
+ args->args[0] = params->crs_entry_index;
+ args->args[1] = params->line_index;
+ args->args[2] = params->active_low;
args->nargs = 3;
*quirks = gm->quirks;
@@ -743,10 +670,8 @@ static int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
}
struct acpi_gpio_lookup {
- struct acpi_gpio_info info;
- int index;
- u16 pin_index;
- bool active_low;
+ struct acpi_gpio_params params;
+ struct acpi_gpio_info *info;
struct gpio_desc *desc;
int n;
};
@@ -754,6 +679,8 @@ struct acpi_gpio_lookup {
static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
{
struct acpi_gpio_lookup *lookup = data;
+ struct acpi_gpio_params *params = &lookup->params;
+ struct acpi_gpio_info *info = lookup->info;
if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
return 1;
@@ -764,26 +691,26 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
struct gpio_desc *desc;
u16 pin_index;
- if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
- lookup->index++;
+ if (info->quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
+ params->crs_entry_index++;
- if (lookup->n++ != lookup->index)
+ if (lookup->n++ != params->crs_entry_index)
return 1;
- pin_index = lookup->pin_index;
+ pin_index = params->line_index;
if (pin_index >= agpio->pin_table_length)
return 1;
- if (lookup->info.quirks & ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER)
+ if (info->quirks & ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER)
desc = gpio_to_desc(agpio->pin_table[pin_index]);
else
desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
agpio->pin_table[pin_index]);
lookup->desc = desc;
- lookup->info.pin_config = agpio->pin_config;
- lookup->info.debounce = agpio->debounce_timeout;
- lookup->info.gpioint = gpioint;
- lookup->info.wake_capable = acpi_gpio_irq_is_wake(&lookup->info.adev->dev, agpio);
+ info->pin_config = agpio->pin_config;
+ info->debounce = agpio->debounce_timeout;
+ info->gpioint = gpioint;
+ info->wake_capable = acpi_gpio_irq_is_wake(&info->adev->dev, agpio);
/*
* Polarity and triggering are only specified for GpioInt
@@ -792,23 +719,23 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
* - ACPI_ACTIVE_LOW == GPIO_ACTIVE_LOW
* - ACPI_ACTIVE_HIGH == GPIO_ACTIVE_HIGH
*/
- if (lookup->info.gpioint) {
- lookup->info.polarity = agpio->polarity;
- lookup->info.triggering = agpio->triggering;
+ if (info->gpioint) {
+ info->polarity = agpio->polarity;
+ info->triggering = agpio->triggering;
} else {
- lookup->info.polarity = lookup->active_low;
+ info->polarity = params->active_low;
}
- lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio, lookup->info.polarity);
+ info->flags = acpi_gpio_to_gpiod_flags(agpio, info->polarity);
}
return 1;
}
-static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
- struct acpi_gpio_info *info)
+static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup)
{
- struct acpi_device *adev = lookup->info.adev;
+ struct acpi_gpio_info *info = lookup->info;
+ struct acpi_device *adev = info->adev;
struct list_head res_list;
int ret;
@@ -825,22 +752,22 @@ static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
if (!lookup->desc)
return -ENOENT;
- if (info)
- *info = lookup->info;
return 0;
}
-static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
- const char *propname, int index,
+static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode, const char *propname,
struct acpi_gpio_lookup *lookup)
{
struct fwnode_reference_args args;
+ struct acpi_gpio_params *params = &lookup->params;
+ struct acpi_gpio_info *info = lookup->info;
+ unsigned int index = params->crs_entry_index;
unsigned int quirks = 0;
int ret;
memset(&args, 0, sizeof(args));
- ret = __acpi_node_get_property_reference(fwnode, propname, index, 3,
- &args);
+
+ ret = __acpi_node_get_property_reference(fwnode, propname, index, 3, &args);
if (ret) {
struct acpi_device *adev;
@@ -857,12 +784,12 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
if (args.nargs != 3)
return -EPROTO;
- lookup->index = args.args[0];
- lookup->pin_index = args.args[1];
- lookup->active_low = !!args.args[2];
+ params->crs_entry_index = args.args[0];
+ params->line_index = args.args[1];
+ params->active_low = !!args.args[2];
- lookup->info.adev = to_acpi_device_node(args.fwnode);
- lookup->info.quirks = quirks;
+ info->adev = to_acpi_device_node(args.fwnode);
+ info->quirks = quirks;
return 0;
}
@@ -871,96 +798,83 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
* acpi_get_gpiod_by_index() - get a GPIO descriptor from device resources
* @adev: pointer to a ACPI device to get GPIO from
* @propname: Property name of the GPIO (optional)
- * @index: index of GpioIo/GpioInt resource (starting from %0)
- * @info: info pointer to fill in (optional)
+ * @lookup: pointer to struct acpi_gpio_lookup to fill in
*
- * Function goes through ACPI resources for @adev and based on @index looks
+ * Function goes through ACPI resources for @adev and based on @lookup.index looks
* up a GpioIo/GpioInt resource, translates it to the Linux GPIO descriptor,
- * and returns it. @index matches GpioIo/GpioInt resources only so if there
- * are total %3 GPIO resources, the index goes from %0 to %2.
+ * and returns it. @lookup.index matches GpioIo/GpioInt resources only so if there
+ * are total 3 GPIO resources, the index goes from 0 to 2.
*
* If @propname is specified the GPIO is looked using device property. In
* that case @index is used to select the GPIO entry in the property value
* (in case of multiple).
*
* Returns:
- * GPIO descriptor to use with Linux generic GPIO API.
- * If the GPIO cannot be translated or there is an error an ERR_PTR is
- * returned.
+ * 0 on success, negative errno on failure.
+ *
+ * The @lookup is filled with GPIO descriptor to use with Linux generic GPIO API.
+ * If the GPIO cannot be translated an error will be returned.
*
* Note: if the GPIO resource has multiple entries in the pin list, this
* function only returns the first.
*/
-static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
- const char *propname,
- int index,
- struct acpi_gpio_info *info)
+static int acpi_get_gpiod_by_index(struct acpi_device *adev, const char *propname,
+ struct acpi_gpio_lookup *lookup)
{
- struct acpi_gpio_lookup lookup;
+ struct acpi_gpio_params *params = &lookup->params;
+ struct acpi_gpio_info *info = lookup->info;
int ret;
- memset(&lookup, 0, sizeof(lookup));
- lookup.index = index;
-
if (propname) {
dev_dbg(&adev->dev, "GPIO: looking up %s\n", propname);
- ret = acpi_gpio_property_lookup(acpi_fwnode_handle(adev),
- propname, index, &lookup);
+ ret = acpi_gpio_property_lookup(acpi_fwnode_handle(adev), propname, lookup);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- dev_dbg(&adev->dev, "GPIO: _DSD returned %s %d %u %u\n",
- dev_name(&lookup.info.adev->dev), lookup.index,
- lookup.pin_index, lookup.active_low);
+ dev_dbg(&adev->dev, "GPIO: _DSD returned %s %u %u %u\n",
+ dev_name(&info->adev->dev),
+ params->crs_entry_index, params->line_index, params->active_low);
} else {
- dev_dbg(&adev->dev, "GPIO: looking up %d in _CRS\n", index);
- lookup.info.adev = adev;
+ dev_dbg(&adev->dev, "GPIO: looking up %u in _CRS\n", params->crs_entry_index);
+ info->adev = adev;
}
- ret = acpi_gpio_resource_lookup(&lookup, info);
- return ret ? ERR_PTR(ret) : lookup.desc;
+ return acpi_gpio_resource_lookup(lookup);
}
/**
* acpi_get_gpiod_from_data() - get a GPIO descriptor from ACPI data node
* @fwnode: pointer to an ACPI firmware node to get the GPIO information from
* @propname: Property name of the GPIO
- * @index: index of GpioIo/GpioInt resource (starting from %0)
- * @info: info pointer to fill in (optional)
+ * @lookup: pointer to struct acpi_gpio_lookup to fill in
*
* This function uses the property-based GPIO lookup to get to the GPIO
* resource with the relevant information from a data-only ACPI firmware node
* and uses that to obtain the GPIO descriptor to return.
*
* Returns:
- * GPIO descriptor to use with Linux generic GPIO API.
- * If the GPIO cannot be translated or there is an error an ERR_PTR is
- * returned.
+ * 0 on success, negative errno on failure.
+ *
+ * The @lookup is filled with GPIO descriptor to use with Linux generic GPIO API.
+ * If the GPIO cannot be translated an error will be returned.
*/
-static struct gpio_desc *acpi_get_gpiod_from_data(struct fwnode_handle *fwnode,
- const char *propname,
- int index,
- struct acpi_gpio_info *info)
+static int acpi_get_gpiod_from_data(struct fwnode_handle *fwnode, const char *propname,
+ struct acpi_gpio_lookup *lookup)
{
- struct acpi_gpio_lookup lookup;
int ret;
if (!is_acpi_data_node(fwnode))
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
if (!propname)
- return ERR_PTR(-EINVAL);
-
- memset(&lookup, 0, sizeof(lookup));
- lookup.index = index;
+ return -EINVAL;
- ret = acpi_gpio_property_lookup(fwnode, propname, index, &lookup);
+ ret = acpi_gpio_property_lookup(fwnode, propname, lookup);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- ret = acpi_gpio_resource_lookup(&lookup, info);
- return ret ? ERR_PTR(ret) : lookup.desc;
+ return acpi_gpio_resource_lookup(lookup);
}
static bool acpi_can_fallback_to_crs(struct acpi_device *adev,
@@ -982,17 +896,25 @@ __acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int
bool can_fallback, struct acpi_gpio_info *info)
{
struct acpi_device *adev = to_acpi_device_node(fwnode);
+ struct acpi_gpio_lookup lookup;
struct gpio_desc *desc;
char propname[32];
+ int ret;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.params.crs_entry_index = idx;
+ lookup.info = info;
/* Try first from _DSD */
for_each_gpio_property_name(propname, con_id) {
if (adev)
- desc = acpi_get_gpiod_by_index(adev,
- propname, idx, info);
+ ret = acpi_get_gpiod_by_index(adev, propname, &lookup);
else
- desc = acpi_get_gpiod_from_data(fwnode,
- propname, idx, info);
+ ret = acpi_get_gpiod_from_data(fwnode, propname, &lookup);
+ if (ret)
+ continue;
+
+ desc = lookup.desc;
if (PTR_ERR(desc) == -EPROBE_DEFER)
return desc;
@@ -1001,8 +923,13 @@ __acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int
}
/* Then from plain _CRS GPIOs */
- if (can_fallback)
- return acpi_get_gpiod_by_index(adev, NULL, idx, info);
+ if (can_fallback) {
+ ret = acpi_get_gpiod_by_index(adev, NULL, &lookup);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return lookup.desc;
+ }
return ERR_PTR(-ENOENT);
}
@@ -1488,248 +1415,3 @@ int acpi_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
}
return count ? count : -ENOENT;
}
-
-/* Run deferred acpi_gpiochip_request_irqs() */
-static int __init acpi_gpio_handle_deferred_request_irqs(void)
-{
- struct acpi_gpio_chip *acpi_gpio, *tmp;
-
- mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
- list_for_each_entry_safe(acpi_gpio, tmp,
- &acpi_gpio_deferred_req_irqs_list,
- deferred_req_irqs_list_entry)
- acpi_gpiochip_request_irqs(acpi_gpio);
-
- acpi_gpio_deferred_req_irqs_done = true;
- mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
-
- return 0;
-}
-/* We must use _sync so that this runs after the first deferred_probe run */
-late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
-
-static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
- {
- /*
- * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
- * a non existing micro-USB-B connector which puts the HDMI
- * DDC pins in GPIO mode, breaking HDMI support.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .no_edge_events_on_boot = true,
- },
- },
- {
- /*
- * The Terra Pad 1061 has a micro-USB-B id-pin handler, which
- * instead of controlling the actual micro-USB-B turns the 5V
- * boost for its USB-A connector off. The actual micro-USB-B
- * connector is wired for charging only.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .no_edge_events_on_boot = true,
- },
- },
- {
- /*
- * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
- * external embedded-controller connected via I2C + an ACPI GPIO
- * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "INT33FC:02@12",
- },
- },
- {
- /*
- * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
- * external embedded-controller connected via I2C + an ACPI GPIO
- * event handler on INT33FF:01 pin 0, causing spurious wakeups.
- * When suspending by closing the LID, the power to the USB
- * keyboard is turned off, causing INT0002 ACPI events to
- * trigger once the XHCI controller notices the keyboard is
- * gone. So INT0002 events cause spurious wakeups too. Ignoring
- * EC wakes breaks wakeup when opening the lid, the user needs
- * to press the power-button to wakeup the system. The
- * alternative is suspend simply not working, which is worse.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "INT33FF:01@0,INT0002:00@2",
- },
- },
- {
- /*
- * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an
- * external embedded-controller connected via I2C + an ACPI GPIO
- * event handler on INT33FC:02 pin 28, causing spurious wakeups.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
- DMI_MATCH(DMI_BOARD_NAME, "815D"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "INT33FC:02@28",
- },
- },
- {
- /*
- * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an
- * external embedded-controller connected via I2C + an ACPI GPIO
- * event handler on INT33FF:01 pin 0, causing spurious wakeups.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
- DMI_MATCH(DMI_BOARD_NAME, "813E"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "INT33FF:01@0",
- },
- },
- {
- /*
- * Interrupt storm caused from edge triggered floating pin
- * Found in BIOS UX325UAZ.300
- * https://bugzilla.kernel.org/show_bug.cgi?id=216208
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UAZ_UM325UAZ"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_interrupt = "AMDI0030:00@18",
- },
- },
- {
- /*
- * Spurious wakeups from TP_ATTN# pin
- * Found in BIOS 1.7.8
- * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
- */
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "ELAN0415:00@9",
- },
- },
- {
- /*
- * Spurious wakeups from TP_ATTN# pin
- * Found in BIOS 1.7.8
- * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
- */
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "ELAN0415:00@9",
- },
- },
- {
- /*
- * Spurious wakeups from TP_ATTN# pin
- * Found in BIOS 1.7.7
- */
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "SYNA1202:00@16",
- },
- },
- {
- /*
- * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
- * a "dolby" button. At the ACPI level an _AEI event-handler
- * is connected which sets an ACPI variable to 1 on both
- * edges. This variable can be polled + cleared to 0 using
- * WMI. But since the variable is set on both edges the WMI
- * interface is pretty useless even when polling.
- * So instead the x86-android-tablets code instantiates
- * a gpio-keys platform device for it.
- * Ignore the _AEI handler for the pin, so that it is not busy.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_interrupt = "INT33FC:00@3",
- },
- },
- {
- /*
- * Spurious wakeups from TP_ATTN# pin
- * Found in BIOS 0.35
- * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
- DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_wake = "PNP0C50:00@8",
- },
- },
- {
- /*
- * Spurious wakeups from GPIO 11
- * Found in BIOS 1.04
- * https://gitlab.freedesktop.org/drm/amd/-/issues/3954
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"),
- },
- .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
- .ignore_interrupt = "AMDI0030:00@11",
- },
- },
- {} /* Terminating entry */
-};
-
-static int __init acpi_gpio_setup_params(void)
-{
- const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
- const struct dmi_system_id *id;
-
- id = dmi_first_match(gpiolib_acpi_quirks);
- if (id)
- quirk = id->driver_data;
-
- if (run_edge_events_on_boot < 0) {
- if (quirk && quirk->no_edge_events_on_boot)
- run_edge_events_on_boot = 0;
- else
- run_edge_events_on_boot = 1;
- }
-
- if (ignore_wake == NULL && quirk && quirk->ignore_wake)
- ignore_wake = quirk->ignore_wake;
-
- if (ignore_interrupt == NULL && quirk && quirk->ignore_interrupt)
- ignore_interrupt = quirk->ignore_interrupt;
-
- return 0;
-}
-
-/* Directly after dmi_setup() which runs as core_initcall() */
-postcore_initcall(acpi_gpio_setup_params);
diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c
new file mode 100644
index 000000000000..219667315b2c
--- /dev/null
+++ b/drivers/gpio/gpiolib-acpi-quirks.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACPI quirks for GPIO ACPI helpers
+ *
+ * Author: Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/dmi.h>
+#include <linux/kstrtox.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/printk.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "gpiolib-acpi.h"
+
+static int run_edge_events_on_boot = -1;
+module_param(run_edge_events_on_boot, int, 0444);
+MODULE_PARM_DESC(run_edge_events_on_boot,
+ "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
+
+static char *ignore_wake;
+module_param(ignore_wake, charp, 0444);
+MODULE_PARM_DESC(ignore_wake,
+ "controller@pin combos on which to ignore the ACPI wake flag "
+ "ignore_wake=controller@pin[,controller@pin[,...]]");
+
+static char *ignore_interrupt;
+module_param(ignore_interrupt, charp, 0444);
+MODULE_PARM_DESC(ignore_interrupt,
+ "controller@pin combos on which to ignore interrupt "
+ "ignore_interrupt=controller@pin[,controller@pin[,...]]");
+
+/*
+ * For GPIO chips which call acpi_gpiochip_request_interrupts() before late_init
+ * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
+ * late_initcall_sync() handler, so that other builtin drivers can register their
+ * OpRegions before the event handlers can run. This list contains GPIO chips
+ * for which the acpi_gpiochip_request_irqs() call has been deferred.
+ */
+static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
+static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
+static bool acpi_gpio_deferred_req_irqs_done;
+
+bool acpi_gpio_add_to_deferred_list(struct list_head *list)
+{
+ bool defer;
+
+ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+ defer = !acpi_gpio_deferred_req_irqs_done;
+ if (defer)
+ list_add(list, &acpi_gpio_deferred_req_irqs_list);
+ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
+ return defer;
+}
+
+void acpi_gpio_remove_from_deferred_list(struct list_head *list)
+{
+ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+ if (!list_empty(list))
+ list_del_init(list);
+ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+}
+
+int acpi_gpio_need_run_edge_events_on_boot(void)
+{
+ return run_edge_events_on_boot;
+}
+
+bool acpi_gpio_in_ignore_list(enum acpi_gpio_ignore_list list,
+ const char *controller_in, unsigned int pin_in)
+{
+ const char *ignore_list, *controller, *pin_str;
+ unsigned int pin;
+ char *endp;
+ int len;
+
+ switch (list) {
+ case ACPI_GPIO_IGNORE_WAKE:
+ ignore_list = ignore_wake;
+ break;
+ case ACPI_GPIO_IGNORE_INTERRUPT:
+ ignore_list = ignore_interrupt;
+ break;
+ default:
+ return false;
+ }
+
+ controller = ignore_list;
+ while (controller) {
+ pin_str = strchr(controller, '@');
+ if (!pin_str)
+ goto err;
+
+ len = pin_str - controller;
+ if (len == strlen(controller_in) &&
+ strncmp(controller, controller_in, len) == 0) {
+ pin = simple_strtoul(pin_str + 1, &endp, 10);
+ if (*endp != 0 && *endp != ',')
+ goto err;
+
+ if (pin == pin_in)
+ return true;
+ }
+
+ controller = strchr(controller, ',');
+ if (controller)
+ controller++;
+ }
+
+ return false;
+err:
+ pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_...: %s\n", ignore_list);
+ return false;
+}
+
+/* Run deferred acpi_gpiochip_request_irqs() */
+static int __init acpi_gpio_handle_deferred_request_irqs(void)
+{
+ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+ acpi_gpio_process_deferred_list(&acpi_gpio_deferred_req_irqs_list);
+ acpi_gpio_deferred_req_irqs_done = true;
+ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
+ return 0;
+}
+/* We must use _sync so that this runs after the first deferred_probe run */
+late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
+
+struct acpi_gpiolib_dmi_quirk {
+ bool no_edge_events_on_boot;
+ char *ignore_wake;
+ char *ignore_interrupt;
+};
+
+static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+ {
+ /*
+ * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
+ * a non existing micro-USB-B connector which puts the HDMI
+ * DDC pins in GPIO mode, breaking HDMI support.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .no_edge_events_on_boot = true,
+ },
+ },
+ {
+ /*
+ * The Terra Pad 1061 has a micro-USB-B id-pin handler, which
+ * instead of controlling the actual micro-USB-B turns the 5V
+ * boost for its USB-A connector off. The actual micro-USB-B
+ * connector is wired for charging only.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .no_edge_events_on_boot = true,
+ },
+ },
+ {
+ /*
+ * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FC:02@12",
+ },
+ },
+ {
+ /*
+ * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FF:01 pin 0, causing spurious wakeups.
+ * When suspending by closing the LID, the power to the USB
+ * keyboard is turned off, causing INT0002 ACPI events to
+ * trigger once the XHCI controller notices the keyboard is
+ * gone. So INT0002 events cause spurious wakeups too. Ignoring
+ * EC wakes breaks wakeup when opening the lid, the user needs
+ * to press the power-button to wakeup the system. The
+ * alternative is suspend simply not working, which is worse.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FF:01@0,INT0002:00@2",
+ },
+ },
+ {
+ /*
+ * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FC:02 pin 28, causing spurious wakeups.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_MATCH(DMI_BOARD_NAME, "815D"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FC:02@28",
+ },
+ },
+ {
+ /*
+ * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an
+ * external embedded-controller connected via I2C + an ACPI GPIO
+ * event handler on INT33FF:01 pin 0, causing spurious wakeups.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_MATCH(DMI_BOARD_NAME, "813E"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "INT33FF:01@0",
+ },
+ },
+ {
+ /*
+ * Interrupt storm caused from edge triggered floating pin
+ * Found in BIOS UX325UAZ.300
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216208
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UAZ_UM325UAZ"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@18",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.8
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "ELAN0415:00@9",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.8
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "ELAN0415:00@9",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.7
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "SYNA1202:00@16",
+ },
+ },
+ {
+ /*
+ * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
+ * a "dolby" button. At the ACPI level an _AEI event-handler
+ * is connected which sets an ACPI variable to 1 on both
+ * edges. This variable can be polled + cleared to 0 using
+ * WMI. But since the variable is set on both edges the WMI
+ * interface is pretty useless even when polling.
+ * So instead the x86-android-tablets code instantiates
+ * a gpio-keys platform device for it.
+ * Ignore the _AEI handler for the pin, so that it is not busy.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "INT33FC:00@3",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 0.35
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "PNP0C50:00@8",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from GPIO 11
+ * Found in BIOS 1.04
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3954
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@11",
+ },
+ },
+ {} /* Terminating entry */
+};
+
+static int __init acpi_gpio_setup_params(void)
+{
+ const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
+ const struct dmi_system_id *id;
+
+ id = dmi_first_match(gpiolib_acpi_quirks);
+ if (id)
+ quirk = id->driver_data;
+
+ if (run_edge_events_on_boot < 0) {
+ if (quirk && quirk->no_edge_events_on_boot)
+ run_edge_events_on_boot = 0;
+ else
+ run_edge_events_on_boot = 1;
+ }
+
+ if (ignore_wake == NULL && quirk && quirk->ignore_wake)
+ ignore_wake = quirk->ignore_wake;
+
+ if (ignore_interrupt == NULL && quirk && quirk->ignore_interrupt)
+ ignore_interrupt = quirk->ignore_interrupt;
+
+ return 0;
+}
+
+/* Directly after dmi_setup() which runs as core_initcall() */
+postcore_initcall(acpi_gpio_setup_params);
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
index 7e1c51d04040..a90267470a4e 100644
--- a/drivers/gpio/gpiolib-acpi.h
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -58,4 +58,19 @@ static inline int acpi_gpio_count(const struct fwnode_handle *fwnode,
}
#endif
+void acpi_gpio_process_deferred_list(struct list_head *list);
+
+bool acpi_gpio_add_to_deferred_list(struct list_head *list);
+void acpi_gpio_remove_from_deferred_list(struct list_head *list);
+
+int acpi_gpio_need_run_edge_events_on_boot(void);
+
+enum acpi_gpio_ignore_list {
+ ACPI_GPIO_IGNORE_WAKE,
+ ACPI_GPIO_IGNORE_INTERRUPT,
+};
+
+bool acpi_gpio_in_ignore_list(enum acpi_gpio_ignore_list list,
+ const char *controller_in, unsigned int pin_in);
+
#endif /* GPIOLIB_ACPI_H */
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 107d75558b5a..e6a289fa0f8f 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -1366,9 +1366,6 @@ static long linereq_set_values(struct linereq *lr, void __user *ip)
/* scan requested lines to determine the subset to be set */
for (num_set = 0, i = 0; i < lr->num_lines; i++) {
if (lv.mask & BIT_ULL(i)) {
- /* setting inputs is not allowed */
- if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags))
- return -EPERM;
/* add to compacted values */
if (lv.bits & BIT_ULL(i))
__set_bit(num_set, vals);
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 120d1ec5af3b..4d5f83b17624 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -6,7 +6,7 @@
* Copyright (c) 2011 John Crispin <john@phrozen.org>
*/
-#include <linux/device.h>
+#include <linux/device/devres.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/gfp.h>
@@ -19,32 +19,14 @@
struct fwnode_handle;
struct lock_class_key;
-static void devm_gpiod_release(struct device *dev, void *res)
+static void devm_gpiod_release(void *desc)
{
- struct gpio_desc **desc = res;
-
- gpiod_put(*desc);
-}
-
-static int devm_gpiod_match(struct device *dev, void *res, void *data)
-{
- struct gpio_desc **this = res, **gpio = data;
-
- return *this == *gpio;
+ gpiod_put(desc);
}
-static void devm_gpiod_release_array(struct device *dev, void *res)
+static void devm_gpiod_release_array(void *descs)
{
- struct gpio_descs **descs = res;
-
- gpiod_put_array(*descs);
-}
-
-static int devm_gpiod_match_array(struct device *dev, void *res, void *data)
-{
- struct gpio_descs **this = res, **gpios = data;
-
- return *this == *gpios;
+ gpiod_put_array(descs);
}
/**
@@ -114,8 +96,8 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
unsigned int idx,
enum gpiod_flags flags)
{
- struct gpio_desc **dr;
struct gpio_desc *desc;
+ int ret;
desc = gpiod_get_index(dev, con_id, idx, flags);
if (IS_ERR(desc))
@@ -126,23 +108,16 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
* already under resource management by this device.
*/
if (flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE) {
- struct devres *dres;
+ bool dres;
- dres = devres_find(dev, devm_gpiod_release,
- devm_gpiod_match, &desc);
+ dres = devm_is_action_added(dev, devm_gpiod_release, desc);
if (dres)
return desc;
}
- dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
- GFP_KERNEL);
- if (!dr) {
- gpiod_put(desc);
- return ERR_PTR(-ENOMEM);
- }
-
- *dr = desc;
- devres_add(dev, dr);
+ ret = devm_add_action_or_reset(dev, devm_gpiod_release, desc);
+ if (ret)
+ return ERR_PTR(ret);
return desc;
}
@@ -171,22 +146,16 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
enum gpiod_flags flags,
const char *label)
{
- struct gpio_desc **dr;
struct gpio_desc *desc;
-
- dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
- GFP_KERNEL);
- if (!dr)
- return ERR_PTR(-ENOMEM);
+ int ret;
desc = gpiod_find_and_request(dev, fwnode, con_id, index, flags, label, false);
- if (IS_ERR(desc)) {
- devres_free(dr);
+ if (IS_ERR(desc))
return desc;
- }
- *dr = desc;
- devres_add(dev, dr);
+ ret = devm_add_action_or_reset(dev, devm_gpiod_release, desc);
+ if (ret)
+ return ERR_PTR(ret);
return desc;
}
@@ -244,22 +213,16 @@ struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
- struct gpio_descs **dr;
struct gpio_descs *descs;
-
- dr = devres_alloc(devm_gpiod_release_array,
- sizeof(struct gpio_descs *), GFP_KERNEL);
- if (!dr)
- return ERR_PTR(-ENOMEM);
+ int ret;
descs = gpiod_get_array(dev, con_id, flags);
- if (IS_ERR(descs)) {
- devres_free(dr);
+ if (IS_ERR(descs))
return descs;
- }
- *dr = descs;
- devres_add(dev, dr);
+ ret = devm_add_action_or_reset(dev, devm_gpiod_release_array, descs);
+ if (ret)
+ return ERR_PTR(ret);
return descs;
}
@@ -307,8 +270,7 @@ EXPORT_SYMBOL_GPL(devm_gpiod_get_array_optional);
*/
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
{
- WARN_ON(devres_release(dev, devm_gpiod_release, devm_gpiod_match,
- &desc));
+ devm_release_action(dev, devm_gpiod_release, desc);
}
EXPORT_SYMBOL_GPL(devm_gpiod_put);
@@ -332,13 +294,13 @@ void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc)
if (IS_ERR_OR_NULL(desc))
return;
- ret = devres_destroy(dev, devm_gpiod_release,
- devm_gpiod_match, &desc);
+
/*
* If the GPIO descriptor is requested as nonexclusive, we
* may call this function several times on the same descriptor
* so it is OK if devres_destroy() returns -ENOENT.
*/
+ ret = devm_remove_action_nowarn(dev, devm_gpiod_release, desc);
if (ret == -ENOENT)
return;
/* Anything else we should warn about */
@@ -357,8 +319,7 @@ EXPORT_SYMBOL_GPL(devm_gpiod_unhinge);
*/
void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs)
{
- WARN_ON(devres_release(dev, devm_gpiod_release_array,
- devm_gpiod_match_array, &descs));
+ devm_remove_action(dev, devm_gpiod_release_array, descs);
}
EXPORT_SYMBOL_GPL(devm_gpiod_put_array);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 65f6a7177b78..73ba73b31cb1 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -224,6 +224,15 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
*/
{ "lantiq,pci-xway", "gpio-reset", false },
#endif
+#if IS_ENABLED(CONFIG_REGULATOR_S5M8767)
+ /*
+ * According to S5M8767, the DVS and DS pin are
+ * active-high signals. However, exynos5250-spring.dts use
+ * active-low setting.
+ */
+ { "samsung,s5m8767-pmic", "s5m8767,pmic-buck-dvs-gpios", true },
+ { "samsung,s5m8767-pmic", "s5m8767,pmic-buck-ds-gpios", true },
+#endif
#if IS_ENABLED(CONFIG_TOUCHSCREEN_TSC2005)
/*
* DTS for Nokia N900 incorrectly specified "active high"
@@ -1278,3 +1287,11 @@ void of_gpiochip_remove(struct gpio_chip *chip)
{
of_node_put(dev_of_node(&chip->gpiodev->dev));
}
+
+bool of_gpiochip_instance_match(struct gpio_chip *gc, unsigned int index)
+{
+ if (gc->of_node_instance_match)
+ return gc->of_node_instance_match(gc, index);
+
+ return false;
+}
diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h
index 16d6ac8cb156..3eebfac290c5 100644
--- a/drivers/gpio/gpiolib-of.h
+++ b/drivers/gpio/gpiolib-of.h
@@ -22,6 +22,7 @@ struct gpio_desc *of_find_gpio(struct device_node *np,
unsigned long *lookupflags);
int of_gpiochip_add(struct gpio_chip *gc);
void of_gpiochip_remove(struct gpio_chip *gc);
+bool of_gpiochip_instance_match(struct gpio_chip *gc, unsigned int index);
int of_gpio_count(const struct fwnode_handle *fwnode, const char *con_id);
#else
static inline struct gpio_desc *of_find_gpio(struct device_node *np,
@@ -33,6 +34,11 @@ static inline struct gpio_desc *of_find_gpio(struct device_node *np,
}
static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
+static inline bool of_gpiochip_instance_match(struct gpio_chip *gc,
+ unsigned int index)
+{
+ return false;
+}
static inline int of_gpio_count(const struct fwnode_handle *fwnode,
const char *con_id)
{
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 1acfa43bf1ab..4a3aa09dad9d 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -134,17 +134,15 @@ static ssize_t value_store(struct device *dev,
long value;
status = kstrtol(buf, 0, &value);
+ if (status)
+ return status;
guard(mutex)(&data->mutex);
- if (!test_bit(FLAG_IS_OUT, &desc->flags))
- return -EPERM;
-
+ status = gpiod_set_value_cansleep(desc, value);
if (status)
return status;
- gpiod_set_value_cansleep(desc, value);
-
return size;
}
static DEVICE_ATTR_PREALLOC(value, S_IWUSR | S_IRUGO, value_show, value_store);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index cd4fecbb41f2..fdafa0df1b43 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -266,6 +266,20 @@ struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc)
EXPORT_SYMBOL_GPL(gpiod_to_gpio_device);
/**
+ * gpiod_is_equal() - Check if two GPIO descriptors refer to the same pin.
+ * @desc: Descriptor to compare.
+ * @other: The second descriptor to compare against.
+ *
+ * Returns:
+ * True if the descriptors refer to the same physical pin. False otherwise.
+ */
+bool gpiod_is_equal(struct gpio_desc *desc, struct gpio_desc *other)
+{
+ return desc == other;
+}
+EXPORT_SYMBOL_GPL(gpiod_is_equal);
+
+/**
* gpio_device_get_base() - Get the base GPIO number allocated by this device
* @gdev: GPIO device
*
@@ -342,6 +356,37 @@ static int gpiochip_find_base_unlocked(u16 ngpio)
}
}
+/*
+ * This descriptor validation needs to be inserted verbatim into each
+ * function taking a descriptor, so we need to use a preprocessor
+ * macro to avoid endless duplication. If the desc is NULL it is an
+ * optional GPIO and calls should just bail out.
+ */
+static int validate_desc(const struct gpio_desc *desc, const char *func)
+{
+ if (!desc)
+ return 0;
+
+ if (IS_ERR(desc)) {
+ pr_warn("%s: invalid GPIO (errorpointer: %pe)\n", func, desc);
+ return PTR_ERR(desc);
+ }
+
+ return 1;
+}
+
+#define VALIDATE_DESC(desc) do { \
+ int __valid = validate_desc(desc, __func__); \
+ if (__valid <= 0) \
+ return __valid; \
+ } while (0)
+
+#define VALIDATE_DESC_VOID(desc) do { \
+ int __valid = validate_desc(desc, __func__); \
+ if (__valid <= 0) \
+ return; \
+ } while (0)
+
static int gpiochip_get_direction(struct gpio_chip *gc, unsigned int offset)
{
int ret;
@@ -376,11 +421,8 @@ int gpiod_get_direction(struct gpio_desc *desc)
unsigned int offset;
int ret;
- /*
- * We cannot use VALIDATE_DESC() as we must not return 0 for a NULL
- * descriptor like we usually do.
- */
- if (IS_ERR_OR_NULL(desc))
+ ret = validate_desc(desc, __func__);
+ if (ret <= 0)
return -EINVAL;
CLASS(gpio_chip_guard, guard)(desc);
@@ -742,6 +784,12 @@ EXPORT_SYMBOL_GPL(gpiochip_query_valid_mask);
bool gpiochip_line_is_valid(const struct gpio_chip *gc,
unsigned int offset)
{
+ /*
+ * hog pins are requested before registering GPIO chip
+ */
+ if (!gc->gpiodev)
+ return true;
+
/* No mask means all valid */
if (likely(!gc->gpiodev->valid_mask))
return true;
@@ -874,14 +922,12 @@ static void machine_gpiochip_add(struct gpio_chip *gc)
{
struct gpiod_hog *hog;
- mutex_lock(&gpio_machine_hogs_mutex);
+ guard(mutex)(&gpio_machine_hogs_mutex);
list_for_each_entry(hog, &gpio_machine_hogs, list) {
if (!strcmp(gc->label, hog->chip_label))
gpiochip_machine_hog(gc, hog);
}
-
- mutex_unlock(&gpio_machine_hogs_mutex);
}
static void gpiochip_setup_devs(void)
@@ -975,7 +1021,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
struct gpio_device *gdev;
unsigned int desc_index;
int base = 0;
- int ret = 0;
+ int ret;
/* Only allow one set() and one set_multiple(). */
if ((gc->set && gc->set_rv) ||
@@ -1000,11 +1046,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
device_set_node(&gdev->dev, gpiochip_choose_fwnode(gc));
- gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
- if (gdev->id < 0) {
- ret = gdev->id;
+ ret = ida_alloc(&gpio_ida, GFP_KERNEL);
+ if (ret < 0)
goto err_free_gdev;
- }
+ gdev->id = ret;
ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
if (ret)
@@ -1507,9 +1552,8 @@ static int gpiochip_hierarchy_irq_domain_translate(struct irq_domain *d,
unsigned int *type)
{
/* We support standard DT translation */
- if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
- return irq_domain_translate_twocell(d, fwspec, hwirq, type);
- }
+ if (is_of_node(fwspec->fwnode))
+ return irq_domain_translate_twothreecell(d, fwspec, hwirq, type);
/* This is for board files and others not using DT */
if (is_fwnode_irqchip(fwspec->fwnode)) {
@@ -1811,11 +1855,26 @@ static void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
irq_set_chip_data(irq, NULL);
}
+static int gpiochip_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ struct fwnode_handle *fwnode = fwspec->fwnode;
+ struct gpio_chip *gc = d->host_data;
+ unsigned int index = fwspec->param[0];
+
+ if (fwspec->param_count == 3 && is_of_node(fwnode))
+ return of_gpiochip_instance_match(gc, index);
+
+ /* Fallback for twocells */
+ return (fwnode && (d->fwnode == fwnode) && (d->bus_token == bus_token));
+}
+
static const struct irq_domain_ops gpiochip_domain_ops = {
.map = gpiochip_irq_map,
.unmap = gpiochip_irq_unmap,
+ .select = gpiochip_irq_select,
/* Virtually all GPIO irqchips are twocell:ed */
- .xlate = irq_domain_xlate_twocell,
+ .xlate = irq_domain_xlate_twothreecell,
};
static struct irq_domain *gpiochip_simple_create_domain(struct gpio_chip *gc)
@@ -1835,7 +1894,6 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct irq_domain *domain = gc->irq.domain;
-#ifdef CONFIG_GPIOLIB_IRQCHIP
/*
* Avoid race condition with other code, which tries to lookup
* an IRQ before the irqchip has been properly registered,
@@ -1843,7 +1901,6 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
*/
if (!gc->irq.initialized)
return -EPROBE_DEFER;
-#endif
if (!gpiochip_irqchip_irq_valid(gc, offset))
return -ENXIO;
@@ -2405,37 +2462,6 @@ out_clear_bit:
return ret;
}
-/*
- * This descriptor validation needs to be inserted verbatim into each
- * function taking a descriptor, so we need to use a preprocessor
- * macro to avoid endless duplication. If the desc is NULL it is an
- * optional GPIO and calls should just bail out.
- */
-static int validate_desc(const struct gpio_desc *desc, const char *func)
-{
- if (!desc)
- return 0;
-
- if (IS_ERR(desc)) {
- pr_warn("%s: invalid GPIO (errorpointer)\n", func);
- return PTR_ERR(desc);
- }
-
- return 1;
-}
-
-#define VALIDATE_DESC(desc) do { \
- int __valid = validate_desc(desc, __func__); \
- if (__valid <= 0) \
- return __valid; \
- } while (0)
-
-#define VALIDATE_DESC_VOID(desc) do { \
- int __valid = validate_desc(desc, __func__); \
- if (__valid <= 0) \
- return; \
- } while (0)
-
int gpiod_request(struct gpio_desc *desc, const char *label)
{
int ret = -EPROBE_DEFER;
@@ -3045,7 +3071,7 @@ set_output_flag:
*/
int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
{
- int ret = 0;
+ int ret;
VALIDATE_DESC(desc);
@@ -3078,7 +3104,7 @@ EXPORT_SYMBOL_GPL(gpiod_enable_hw_timestamp_ns);
*/
int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
{
- int ret = 0;
+ int ret;
VALIDATE_DESC(desc);
@@ -3593,6 +3619,9 @@ static int gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
static int gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
{
+ if (unlikely(!test_bit(FLAG_IS_OUT, &desc->flags)))
+ return -EPERM;
+
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
return -ENODEV;
@@ -3664,6 +3693,12 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
if (!can_sleep)
WARN_ON(array_info->gdev->can_sleep);
+ for (i = 0; i < array_size; i++) {
+ if (unlikely(!test_bit(FLAG_IS_OUT,
+ &desc_array[i]->flags)))
+ return -EPERM;
+ }
+
guard(srcu)(&array_info->gdev->srcu);
gc = srcu_dereference(array_info->gdev->chip,
&array_info->gdev->srcu);
@@ -3723,6 +3758,9 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
int hwgpio = gpio_chip_hwgpio(desc);
int value = test_bit(i, value_bitmap);
+ if (unlikely(!test_bit(FLAG_IS_OUT, &desc->flags)))
+ return -EPERM;
+
/*
* Pins applicable for fast input but not for
* fast output processing may have been already
@@ -3944,13 +3982,10 @@ int gpiod_to_irq(const struct gpio_desc *desc)
struct gpio_device *gdev;
struct gpio_chip *gc;
int offset;
+ int ret;
- /*
- * Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics
- * requires this function to not return zero on an invalid descriptor
- * but rather a negative error number.
- */
- if (IS_ERR_OR_NULL(desc))
+ ret = validate_desc(desc, __func__);
+ if (ret <= 0)
return -EINVAL;
gdev = desc->gdev;
@@ -3962,13 +3997,12 @@ int gpiod_to_irq(const struct gpio_desc *desc)
offset = gpio_chip_hwgpio(desc);
if (gc->to_irq) {
- int retirq = gc->to_irq(gc, offset);
+ ret = gc->to_irq(gc, offset);
+ if (ret)
+ return ret;
/* Zero means NO_IRQ */
- if (!retirq)
- return -ENXIO;
-
- return retirq;
+ return -ENXIO;
}
#ifdef CONFIG_GPIOLIB_IRQCHIP
if (gc->irq.chip) {
@@ -4323,12 +4357,10 @@ void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n)
{
unsigned int i;
- mutex_lock(&gpio_lookup_lock);
+ guard(mutex)(&gpio_lookup_lock);
for (i = 0; i < n; i++)
list_add_tail(&tables[i]->list, &gpio_lookup_list);
-
- mutex_unlock(&gpio_lookup_lock);
}
/**
@@ -4387,11 +4419,9 @@ void gpiod_remove_lookup_table(struct gpiod_lookup_table *table)
if (!table)
return;
- mutex_lock(&gpio_lookup_lock);
+ guard(mutex)(&gpio_lookup_lock);
list_del(&table->list);
-
- mutex_unlock(&gpio_lookup_lock);
}
EXPORT_SYMBOL_GPL(gpiod_remove_lookup_table);
@@ -4403,7 +4433,7 @@ void gpiod_add_hogs(struct gpiod_hog *hogs)
{
struct gpiod_hog *hog;
- mutex_lock(&gpio_machine_hogs_mutex);
+ guard(mutex)(&gpio_machine_hogs_mutex);
for (hog = &hogs[0]; hog->chip_label; hog++) {
list_add_tail(&hog->list, &gpio_machine_hogs);
@@ -4417,8 +4447,6 @@ void gpiod_add_hogs(struct gpiod_hog *hogs)
if (gdev)
gpiochip_machine_hog(gpio_device_get_chip(gdev), hog);
}
-
- mutex_unlock(&gpio_machine_hogs_mutex);
}
EXPORT_SYMBOL_GPL(gpiod_add_hogs);
@@ -4426,10 +4454,10 @@ void gpiod_remove_hogs(struct gpiod_hog *hogs)
{
struct gpiod_hog *hog;
- mutex_lock(&gpio_machine_hogs_mutex);
+ guard(mutex)(&gpio_machine_hogs_mutex);
+
for (hog = &hogs[0]; hog->chip_label; hog++)
list_del(&hog->list);
- mutex_unlock(&gpio_machine_hogs_mutex);
}
EXPORT_SYMBOL_GPL(gpiod_remove_hogs);
@@ -5108,8 +5136,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_optional);
*/
void gpiod_put(struct gpio_desc *desc)
{
- if (desc)
- gpiod_free(desc);
+ gpiod_free(desc);
}
EXPORT_SYMBOL_GPL(gpiod_put);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f01925ed8176..f7ea8e895c0c 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -26,6 +26,11 @@ menuconfig DRM
details. You should also select and configure AGP
(/dev/agpgart) support if it is available for your platform.
+menu "DRM debugging options"
+depends on DRM
+source "drivers/gpu/drm/Kconfig.debug"
+endmenu
+
if DRM
config DRM_MIPI_DBI
@@ -37,65 +42,6 @@ config DRM_MIPI_DSI
bool
depends on DRM
-config DRM_DEBUG_MM
- bool "Insert extra checks and debug info into the DRM range managers"
- default n
- depends on DRM
- depends on STACKTRACE_SUPPORT
- select STACKDEPOT
- help
- Enable allocation tracking of memory manager and leak detection on
- shutdown.
-
- Recommended for driver developers only.
-
- If in doubt, say "N".
-
-config DRM_USE_DYNAMIC_DEBUG
- bool "use dynamic debug to implement drm.debug"
- default n
- depends on BROKEN
- depends on DRM
- depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
- depends on JUMP_LABEL
- help
- Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
- Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
- bytes per callsite, the .data costs can be substantial, and
- are therefore configurable.
-
-config DRM_KUNIT_TEST_HELPERS
- tristate
- depends on DRM && KUNIT
- select DRM_KMS_HELPER
- help
- KUnit Helpers for KMS drivers.
-
-config DRM_KUNIT_TEST
- tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
- depends on DRM && KUNIT && MMU
- select DRM_BUDDY
- select DRM_DISPLAY_DP_HELPER
- select DRM_DISPLAY_HDMI_STATE_HELPER
- select DRM_DISPLAY_HELPER
- select DRM_EXEC
- select DRM_EXPORT_FOR_TESTS if m
- select DRM_GEM_SHMEM_HELPER
- select DRM_KUNIT_TEST_HELPERS
- select DRM_LIB_RANDOM
- select PRIME_NUMBERS
- default KUNIT_ALL_TESTS
- help
- This builds unit tests for DRM. This option is not useful for
- distributions or general kernels, but only for kernel
- developers working on DRM and associated drivers.
-
- For more information on KUnit and unit tests in general,
- please refer to the KUnit documentation in
- Documentation/dev-tools/kunit/.
-
- If in doubt, say "N".
-
config DRM_KMS_HELPER
tristate
depends on DRM
@@ -242,28 +188,12 @@ source "drivers/gpu/drm/display/Kconfig"
config DRM_TTM
tristate
depends on DRM && MMU
+ select SHMEM
help
GPU memory management subsystem for devices with multiple
GPU memory types. Will be enabled automatically if a device driver
uses it.
-config DRM_TTM_KUNIT_TEST
- tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
- default n
- depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
- select DRM_TTM
- select DRM_BUDDY
- select DRM_EXPORT_FOR_TESTS if m
- select DRM_KUNIT_TEST_HELPERS
- default KUNIT_ALL_TESTS
- help
- Enables unit tests for TTM, a GPU memory manager subsystem used
- to manage memory buffers. This option is mostly useful for kernel
- developers. It depends on (UML || COMPILE_TEST) since no other driver
- which uses TTM can be loaded while running the tests.
-
- If in doubt, say "N".
-
config DRM_EXEC
tristate
depends on DRM
@@ -335,6 +265,8 @@ config DRM_SCHED
tristate
depends on DRM
+source "drivers/gpu/drm/sysfb/Kconfig"
+
source "drivers/gpu/drm/arm/Kconfig"
source "drivers/gpu/drm/radeon/Kconfig"
@@ -343,6 +275,8 @@ source "drivers/gpu/drm/amd/amdgpu/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig"
+source "drivers/gpu/drm/nova/Kconfig"
+
source "drivers/gpu/drm/i915/Kconfig"
source "drivers/gpu/drm/xe/Kconfig"
@@ -454,6 +388,8 @@ source "drivers/gpu/drm/xlnx/Kconfig"
source "drivers/gpu/drm/gud/Kconfig"
+source "drivers/gpu/drm/sitronix/Kconfig"
+
source "drivers/gpu/drm/solomon/Kconfig"
source "drivers/gpu/drm/sprd/Kconfig"
@@ -462,7 +398,7 @@ source "drivers/gpu/drm/imagination/Kconfig"
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
- depends on DRM && PCI && MMU && HYPERV
+ depends on DRM && PCI && HYPERV
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
@@ -474,9 +410,6 @@ config DRM_HYPERV
If M is selected the module will be called hyperv_drm.
-config DRM_EXPORT_FOR_TESTS
- bool
-
# Separate option as not all DRM drivers use it
config DRM_PANEL_BACKLIGHT_QUIRKS
tristate
@@ -489,31 +422,6 @@ config DRM_PRIVACY_SCREEN
bool
default n
-config DRM_WERROR
- bool "Compile the drm subsystem with warnings as errors"
- depends on DRM && EXPERT
- depends on !WERROR
- default n
- help
- A kernel build should not cause any compiler warnings, and this
- enables the '-Werror' flag to enforce that rule in the drm subsystem.
-
- The drm subsystem enables more warnings than the kernel default, so
- this config option is disabled by default.
-
- If in doubt, say N.
-
-config DRM_HEADER_TEST
- bool "Ensure DRM headers are self-contained and pass kernel-doc"
- depends on DRM && EXPERT && BROKEN
- default n
- help
- Ensure the DRM subsystem headers both under drivers/gpu/drm and
- include/drm compile, are self-contained, have header guards, and have
- no kernel-doc warnings.
-
- If in doubt, say N.
-
endif
# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
diff --git a/drivers/gpu/drm/Kconfig.debug b/drivers/gpu/drm/Kconfig.debug
new file mode 100644
index 000000000000..fa6ee76f4d3c
--- /dev/null
+++ b/drivers/gpu/drm/Kconfig.debug
@@ -0,0 +1,116 @@
+config DRM_USE_DYNAMIC_DEBUG
+ bool "use dynamic debug to implement drm.debug"
+ default n
+ depends on BROKEN
+ depends on DRM
+ depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
+ depends on JUMP_LABEL
+ help
+ Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
+ Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
+ bytes per callsite, the .data costs can be substantial, and
+ are therefore configurable.
+
+config DRM_WERROR
+ bool "Compile the drm subsystem with warnings as errors"
+ depends on DRM && EXPERT
+ depends on !WERROR
+ default n
+ help
+ A kernel build should not cause any compiler warnings, and this
+ enables the '-Werror' flag to enforce that rule in the drm subsystem.
+
+ The drm subsystem enables more warnings than the kernel default, so
+ this config option is disabled by default.
+
+ If in doubt, say N.
+
+config DRM_HEADER_TEST
+ bool "Ensure DRM headers are self-contained and pass kernel-doc"
+ depends on DRM && EXPERT && BROKEN
+ default n
+ help
+ Ensure the DRM subsystem headers both under drivers/gpu/drm and
+ include/drm compile, are self-contained, have header guards, and have
+ no kernel-doc warnings.
+
+ If in doubt, say N.
+
+config DRM_DEBUG_MM
+ bool "Insert extra checks and debug info into the DRM range managers"
+ default n
+ depends on DRM
+ depends on STACKTRACE_SUPPORT
+ select STACKDEPOT
+ help
+ Enable allocation tracking of memory manager and leak detection on
+ shutdown.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_KUNIT_TEST_HELPERS
+ tristate
+ depends on DRM && KUNIT
+ select DRM_KMS_HELPER
+ help
+ KUnit Helpers for KMS drivers.
+
+config DRM_KUNIT_TEST
+ tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
+ depends on DRM && KUNIT && MMU
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_BUDDY
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HDMI_STATE_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_EXEC
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KUNIT_TEST_HELPERS
+ select DRM_LIB_RANDOM
+ select PRIME_NUMBERS
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for DRM. This option is not useful for
+ distributions or general kernels, but only for kernel
+ developers working on DRM and associated drivers.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+
+ If in doubt, say "N".
+
+config DRM_TTM_KUNIT_TEST
+ tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
+ default n
+ depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
+ select DRM_TTM
+ select DRM_BUDDY
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_KUNIT_TEST_HELPERS
+ default KUNIT_ALL_TESTS
+ help
+ Enables unit tests for TTM, a GPU memory manager subsystem used
+ to manage memory buffers. This option is mostly useful for kernel
+ developers. It depends on (UML || COMPILE_TEST) since no other driver
+ which uses TTM can be loaded while running the tests.
+
+ If in doubt, say "N".
+
+config DRM_SCHED_KUNIT_TEST
+ tristate "KUnit tests for the DRM scheduler" if !KUNIT_ALL_TESTS
+ select DRM_SCHED
+ depends on DRM && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Choose this option to build unit tests for the DRM scheduler.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_EXPORT_FOR_TESTS
+ bool
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index ed54a546bbe2..5050ac32bba2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -134,6 +134,7 @@ obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
drm_kms_helper-y := \
drm_atomic_helper.o \
drm_atomic_state_helper.o \
+ drm_bridge_helper.o \
drm_crtc_helper.o \
drm_damage_helper.o \
drm_flip_work.o \
@@ -176,6 +177,7 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VGEM) += vgem/
obj-$(CONFIG_DRM_VKMS) += vkms/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
+obj-$(CONFIG_DRM_NOVA) += nova/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
obj-$(CONFIG_DRM_GMA500) += gma500/
@@ -204,6 +206,7 @@ obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-y += hisilicon/
obj-y += mxsfb/
+obj-y += sysfb/
obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
@@ -219,6 +222,7 @@ obj-$(CONFIG_DRM_TIDSS) += tidss/
obj-y += xlnx/
obj-y += gud/
obj-$(CONFIG_DRM_HYPERV) += hyperv/
+obj-y += sitronix/
obj-y += solomon/
obj-$(CONFIG_DRM_SPRD) += sprd/
obj-$(CONFIG_DRM_LOONGSON) += loongson/
@@ -236,7 +240,7 @@ always-$(CONFIG_DRM_HEADER_TEST) += \
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
cmd_hdrtest = \
$(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
- $(srctree)/scripts/kernel-doc -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
+ PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
touch $@
$(obj)/%.hdrtest: $(src)/%.h FORCE
diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c
index ad80542b60ed..2b60128e2c69 100644
--- a/drivers/gpu/drm/adp/adp-mipi.c
+++ b/drivers/gpu/drm/adp/adp-mipi.c
@@ -212,12 +212,13 @@ static const struct mipi_dsi_host_ops adp_dsi_host_ops = {
};
static int adp_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct adp_mipi_drv_private *adp =
container_of(bridge, struct adp_mipi_drv_private, bridge);
- return drm_bridge_attach(bridge->encoder, adp->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, adp->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs adp_dsi_bridge_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 1a11cab741ac..1acfed2f92ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -2,7 +2,7 @@
config DRM_AMDGPU
tristate "AMD GPU"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on !UML
select FW_LOADER
select DRM_CLIENT
@@ -68,7 +68,6 @@ config DRM_AMDGPU_CIK
config DRM_AMDGPU_USERPTR
bool "Always enable userptr write support"
depends on DRM_AMDGPU
- depends on MMU
select HMM_MIRROR
select MMU_NOTIFIER
help
@@ -77,7 +76,7 @@ config DRM_AMDGPU_USERPTR
config DRM_AMD_ISP
bool "Enable AMD Image Signal Processor IP support"
- depends on DRM_AMDGPU
+ depends on DRM_AMDGPU && ACPI
select MFD_CORE
select PM_GENERIC_DOMAINS if PM
help
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index aacc810cabb3..87080c06e5fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -66,7 +66,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o \
- amdgpu_cper.o
+ amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
@@ -174,7 +174,10 @@ amdgpu-y += \
amdgpu-y += \
amdgpu_mes.o \
mes_v11_0.o \
- mes_v12_0.o
+ mes_v12_0.o \
+
+# add GFX userqueue support
+amdgpu-y += mes_userqueue.o
# add UVD block
amdgpu-y += \
@@ -253,6 +256,8 @@ amdgpu-y += \
# add amdkfd interfaces
amdgpu-y += amdgpu_amdkfd.o
+# add gfx usermode queue
+amdgpu-y += amdgpu_userq.o
ifneq ($(CONFIG_HSA_AMD),)
AMDKFD_PATH := ../amdkfd
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c3641331d4de..a5ccd0ada16a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -113,6 +113,8 @@
#include "amdgpu_xcp.h"
#include "amdgpu_seq64.h"
#include "amdgpu_reg_state.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_eviction_fence.h"
#if defined(CONFIG_DRM_AMD_ISP)
#include "amdgpu_isp.h"
#endif
@@ -228,7 +230,7 @@ extern int amdgpu_force_asic_type;
extern int amdgpu_smartshift_bias;
extern int amdgpu_use_xgmi_p2p;
extern int amdgpu_mtype_local;
-extern bool enforce_isolation;
+extern int amdgpu_enforce_isolation;
#ifdef CONFIG_HSA_AMD
extern int sched_policy;
extern bool debug_evictions;
@@ -266,8 +268,10 @@ extern int amdgpu_umsch_mm_fwlog;
extern int amdgpu_user_partt_mode;
extern int amdgpu_agp;
+extern int amdgpu_rebar;
extern int amdgpu_wbrf;
+extern int amdgpu_user_queue;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
@@ -488,7 +492,6 @@ struct amdgpu_flip_work {
bool async;
};
-
/*
* file private structure
*/
@@ -501,6 +504,11 @@ struct amdgpu_fpriv {
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
+ struct amdgpu_userq_mgr userq_mgr;
+
+ /* Eviction fence infra */
+ struct amdgpu_eviction_fence_mgr evf_mgr;
+
/** GPU partition selection */
uint32_t xcp_id;
};
@@ -512,12 +520,62 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
*/
#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
+/**
+ * amdgpu_wb - This struct is used for small GPU memory allocation.
+ *
+ * This struct is used to allocate a small amount of GPU memory that can be
+ * used to shadow certain states into the memory. This is especially useful for
+ * providing easy CPU access to some states without requiring register access
+ * (e.g., if some block is power gated, reading register may be problematic).
+ *
+ * Note: the term writeback was initially used because many of the amdgpu
+ * components had some level of writeback memory, and this struct initially
+ * described those components.
+ */
struct amdgpu_wb {
+
+ /**
+ * @wb_obj:
+ *
+ * Buffer Object used for the writeback memory.
+ */
struct amdgpu_bo *wb_obj;
+
+ /**
+ * @wb:
+ *
+ * Pointer to the first writeback slot. In terms of CPU address
+ * this value can be accessed directly by using the offset as an index.
+ * For the GPU address, it is necessary to use gpu_addr and the offset.
+ */
volatile uint32_t *wb;
+
+ /**
+ * @gpu_addr:
+ *
+ * Writeback base address in the GPU.
+ */
uint64_t gpu_addr;
- u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
+
+ /**
+ * @num_wb:
+ *
+ * Number of writeback slots reserved for amdgpu.
+ */
+ u32 num_wb;
+
+ /**
+ * @used:
+ *
+ * Track the writeback slot already used.
+ */
unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
+
+ /**
+ * @lock:
+ *
+ * Protects read and write of the used field array.
+ */
spinlock_t lock;
};
@@ -551,6 +609,7 @@ struct amdgpu_allowed_register_entry {
* are reset depends on the ASIC. Notably doesn't reset IPs
* shared with the CPU on APUs or the memory controllers (so
* VRAM is not lost). Not available on all ASICs.
+ * @AMD_RESET_LINK: Triggers SW-UP link reset on other GPUs
* @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card
* but without powering off the PCI bus. Suitable only for
* discrete GPUs.
@@ -568,6 +627,7 @@ enum amd_reset_method {
AMD_RESET_METHOD_MODE0,
AMD_RESET_METHOD_MODE1,
AMD_RESET_METHOD_MODE2,
+ AMD_RESET_METHOD_LINK,
AMD_RESET_METHOD_BACO,
AMD_RESET_METHOD_PCI,
AMD_RESET_METHOD_ON_INIT,
@@ -821,6 +881,11 @@ struct amdgpu_mqd_prop {
uint32_t hqd_queue_priority;
bool allow_tunneling;
bool hqd_active;
+ uint64_t shadow_addr;
+ uint64_t gds_bkup_addr;
+ uint64_t csa_addr;
+ uint64_t fence_address;
+ bool tmz_queue;
};
struct amdgpu_mqd {
@@ -829,6 +894,12 @@ struct amdgpu_mqd {
struct amdgpu_mqd_prop *p);
};
+struct amdgpu_pcie_reset_ctx {
+ bool in_link_reset;
+ bool occurs_dpc;
+ bool audio_suspended;
+};
+
/*
* Custom Init levels could be defined for different situations where a full
* initialization of all hardware blocks are not expected. Sample cases are
@@ -853,6 +924,14 @@ struct amdgpu_init_level {
struct amdgpu_reset_domain;
struct amdgpu_fru_info;
+enum amdgpu_enforce_isolation_mode {
+ AMDGPU_ENFORCE_ISOLATION_DISABLE = 0,
+ AMDGPU_ENFORCE_ISOLATION_ENABLE = 1,
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY = 2,
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3,
+};
+
+
/*
* Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
*/
@@ -1081,6 +1160,13 @@ struct amdgpu_device {
bool enable_uni_mes;
struct amdgpu_mes mes;
struct amdgpu_mqd mqds[AMDGPU_HW_IP_NUM];
+ const struct amdgpu_userq_funcs *userq_funcs[AMDGPU_HW_IP_NUM];
+
+ /* xarray used to retrieve the user queue fence driver reference
+ * in the EOP interrupt handler to signal the particular user
+ * queue fence.
+ */
+ struct xarray userq_xa;
/* df */
struct amdgpu_df df;
@@ -1160,6 +1246,8 @@ struct amdgpu_device {
struct pci_saved_state *pci_state;
pci_channel_state_t pci_channel_state;
+ struct amdgpu_pcie_reset_ctx pcie_reset_ctx;
+
/* Track auto wait count on s_barrier settings */
bool barrier_has_auto_waitcnt;
@@ -1193,10 +1281,11 @@ struct amdgpu_device {
bool debug_enable_ras_aca;
bool debug_exp_resets;
bool debug_disable_gpu_ring_reset;
+ bool debug_vm_userptr;
/* Protection for the following isolation structure */
struct mutex enforce_isolation_mutex;
- bool enforce_isolation[MAX_XCP];
+ enum amdgpu_enforce_isolation_mode enforce_isolation[MAX_XCP];
struct amdgpu_isolation {
void *owner;
struct dma_fence *spearhead;
@@ -1210,6 +1299,10 @@ struct amdgpu_device {
* in KFD: VRAM or GTT.
*/
bool apu_prefer_gtt;
+
+ struct list_head userq_mgr_list;
+ struct mutex userq_mutex;
+ bool userq_halt_for_enforce_isolation;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1464,6 +1557,7 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 array_size);
int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
+int amdgpu_device_link_reset(struct amdgpu_device *adev);
bool amdgpu_device_supports_atpx(struct drm_device *dev);
bool amdgpu_device_supports_px(struct drm_device *dev);
bool amdgpu_device_supports_boco(struct drm_device *dev);
@@ -1619,6 +1713,10 @@ static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { retu
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
#endif
+#if defined(CONFIG_DRM_AMD_ISP)
+int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN]);
+#endif
+
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index b4ad163f42a7..3835f2592914 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -120,6 +120,9 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st
for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
RAS_EVENT_LOG(adev, event_id, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
+
+ if (ACA_REG__STATUS__SCRUB(bank->regs[ACA_REG_IDX_STATUS]))
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "hardware error logged by the scrubber\n");
}
static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 707e131f89d2..f5466c592d94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1532,5 +1532,35 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
return true;
#endif /* CONFIG_AMD_PMC */
}
-
#endif /* CONFIG_SUSPEND */
+
+#if IS_ENABLED(CONFIG_DRM_AMD_ISP)
+static const struct acpi_device_id isp_sensor_ids[] = {
+ { "OMNI5C10" },
+ { }
+};
+
+static int isp_match_acpi_device_ids(struct device *dev, const void *data)
+{
+ return acpi_match_device(data, dev) ? 1 : 0;
+}
+
+int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN])
+{
+ struct device *pdev __free(put_device) = NULL;
+ struct acpi_device *acpi_pdev;
+
+ pdev = bus_find_device(&platform_bus_type, NULL, isp_sensor_ids,
+ isp_match_acpi_device_ids);
+ if (!pdev)
+ return -EINVAL;
+
+ acpi_pdev = ACPI_COMPANION(pdev);
+ if (!acpi_pdev)
+ return -ENODEV;
+
+ strscpy(*hid, acpi_device_hid(acpi_pdev));
+
+ return 0;
+}
+#endif /* CONFIG_DRM_AMD_ISP */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 4cec3a873995..d8ac4b1051a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -368,6 +368,9 @@ void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
{
struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
+ if (!bo || !*bo)
+ return;
+
(void)amdgpu_bo_reserve(*bo, true);
amdgpu_bo_kunmap(*bo);
amdgpu_bo_unpin(*bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index d2ec4130a316..260165bbe373 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2559,6 +2559,18 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
if (ret != -EFAULT)
return ret;
+ /* If applications unmap memory before destroying the userptr
+ * from the KFD, trigger a segmentation fault in VM debug mode.
+ */
+ if (amdgpu_ttm_adev(bo->tbo.bdev)->debug_vm_userptr) {
+ pr_err("Pid %d unmapped memory before destroying userptr at GPU addr 0x%llx\n",
+ pid_nr(process_info->pid), mem->va);
+
+ // Send GPU VM fault to user space
+ kfd_signal_vm_fault_event_with_userptr(kfd_lookup_process_by_pid(process_info->pid),
+ mem->va);
+ }
+
ret = 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index eb015bdda8a7..c7d32fb216e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -281,6 +281,9 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
case ATOM_DGPU_VRAM_TYPE_GDDR6:
vram_type = AMDGPU_VRAM_TYPE_GDDR6;
break;
+ case ATOM_DGPU_VRAM_TYPE_HBM3E:
+ vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+ break;
default:
vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 68bce6a6d09d..004a6a9d6b9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -252,83 +252,22 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
- case CHIP_TAHITI:
- strcpy(fw_name, "radeon/tahiti_smc.bin");
- break;
- case CHIP_PITCAIRN:
- if ((adev->pdev->revision == 0x81) &&
- ((adev->pdev->device == 0x6810) ||
- (adev->pdev->device == 0x6811))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/pitcairn_smc.bin");
- }
- break;
- case CHIP_VERDE:
- if (((adev->pdev->device == 0x6820) &&
- ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83))) ||
- ((adev->pdev->device == 0x6821) &&
- ((adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87))) ||
- ((adev->pdev->revision == 0x87) &&
- ((adev->pdev->device == 0x6823) ||
- (adev->pdev->device == 0x682b)))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/verde_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/verde_smc.bin");
- }
- break;
- case CHIP_OLAND:
- if (((adev->pdev->revision == 0x81) &&
- ((adev->pdev->device == 0x6600) ||
- (adev->pdev->device == 0x6604) ||
- (adev->pdev->device == 0x6605) ||
- (adev->pdev->device == 0x6610))) ||
- ((adev->pdev->revision == 0x83) &&
- (adev->pdev->device == 0x6610))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/oland_k_smc.bin");
- } else {
- strcpy(fw_name, "radeon/oland_smc.bin");
- }
- break;
- case CHIP_HAINAN:
- if (((adev->pdev->revision == 0x81) &&
- (adev->pdev->device == 0x6660)) ||
- ((adev->pdev->revision == 0x83) &&
- ((adev->pdev->device == 0x6660) ||
- (adev->pdev->device == 0x6663) ||
- (adev->pdev->device == 0x6665) ||
- (adev->pdev->device == 0x6667)))) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/hainan_k_smc.bin");
- } else if ((adev->pdev->revision == 0xc3) &&
- (adev->pdev->device == 0x6665)) {
- info->is_kicker = true;
- strcpy(fw_name, "radeon/banks_k_2_smc.bin");
- } else {
- strcpy(fw_name, "radeon/hainan_smc.bin");
- }
- break;
case CHIP_BONAIRE:
if ((adev->pdev->revision == 0x80) ||
(adev->pdev->revision == 0x81) ||
(adev->pdev->device == 0x665f)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/bonaire_k_smc.bin");
+ strscpy(fw_name, "amdgpu/bonaire_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/bonaire_smc.bin");
+ strscpy(fw_name, "amdgpu/bonaire_smc.bin");
}
break;
case CHIP_HAWAII:
if (adev->pdev->revision == 0x80) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/hawaii_k_smc.bin");
+ strscpy(fw_name, "amdgpu/hawaii_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/hawaii_smc.bin");
+ strscpy(fw_name, "amdgpu/hawaii_smc.bin");
}
break;
case CHIP_TOPAZ:
@@ -338,76 +277,76 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
+ strscpy(fw_name, "amdgpu/topaz_k_smc.bin");
} else
- strcpy(fw_name, "amdgpu/topaz_smc.bin");
+ strscpy(fw_name, "amdgpu/topaz_smc.bin");
break;
case CHIP_TONGA:
if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
+ strscpy(fw_name, "amdgpu/tonga_k_smc.bin");
} else
- strcpy(fw_name, "amdgpu/tonga_smc.bin");
+ strscpy(fw_name, "amdgpu/tonga_smc.bin");
break;
case CHIP_FIJI:
- strcpy(fw_name, "amdgpu/fiji_smc.bin");
+ strscpy(fw_name, "amdgpu/fiji_smc.bin");
break;
case CHIP_POLARIS11:
if (type == CGS_UCODE_ID_SMU) {
if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_k_smc.bin");
} else if (ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_smc.bin");
}
} else if (type == CGS_UCODE_ID_SMU_SK) {
- strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
+ strscpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
}
break;
case CHIP_POLARIS10:
if (type == CGS_UCODE_ID_SMU) {
if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_k_smc.bin");
} else if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_smc.bin");
}
} else if (type == CGS_UCODE_ID_SMU_SK) {
- strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
+ strscpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
}
break;
case CHIP_POLARIS12:
if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris12_k_smc.bin");
} else {
- strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris12_smc.bin");
}
break;
case CHIP_VEGAM:
- strcpy(fw_name, "amdgpu/vegam_smc.bin");
+ strscpy(fw_name, "amdgpu/vegam_smc.bin");
break;
case CHIP_VEGA10:
if ((adev->pdev->device == 0x687f) &&
((adev->pdev->revision == 0xc0) ||
(adev->pdev->revision == 0xc1) ||
(adev->pdev->revision == 0xc3)))
- strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
+ strscpy(fw_name, "amdgpu/vega10_acg_smc.bin");
else
- strcpy(fw_name, "amdgpu/vega10_smc.bin");
+ strscpy(fw_name, "amdgpu/vega10_smc.bin");
break;
case CHIP_VEGA12:
- strcpy(fw_name, "amdgpu/vega12_smc.bin");
+ strscpy(fw_name, "amdgpu/vega12_smc.bin");
break;
case CHIP_VEGA20:
- strcpy(fw_name, "amdgpu/vega20_smc.bin");
+ strscpy(fw_name, "amdgpu/vega20_smc.bin");
break;
default:
DRM_ERROR("SMC firmware not supported\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index 360e07a5c7c1..5a234eadae8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -549,7 +549,7 @@ int amdgpu_cper_init(struct amdgpu_device *adev)
{
int r;
- if (!amdgpu_aca_is_enabled(adev))
+ if (!amdgpu_aca_is_enabled(adev) && !amdgpu_sriov_ras_cper_en(adev))
return 0;
r = amdgpu_cper_ring_init(adev);
@@ -568,7 +568,7 @@ int amdgpu_cper_init(struct amdgpu_device *adev)
int amdgpu_cper_fini(struct amdgpu_device *adev)
{
- if (!amdgpu_aca_is_enabled(adev))
+ if (!amdgpu_aca_is_enabled(adev) && !amdgpu_sriov_ras_cper_en(adev))
return 0;
adev->cper.enabled = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 82df06a72ee0..9ea0d9b71f48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -296,7 +296,25 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
num_ibs[i], &p->jobs[i]);
if (ret)
goto free_all_kdata;
- p->jobs[i]->enforce_isolation = p->adev->enforce_isolation[fpriv->xcp_id];
+ switch (p->adev->enforce_isolation[fpriv->xcp_id]) {
+ case AMDGPU_ENFORCE_ISOLATION_DISABLE:
+ default:
+ p->jobs[i]->enforce_isolation = false;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_ENABLE:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = true;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ }
}
p->gang_leader = p->jobs[p->gang_leader_idx];
@@ -349,6 +367,10 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
ring = amdgpu_job_ring(job);
ib = &job->ibs[job->num_ibs++];
+ /* submissions to kernel queues are disabled */
+ if (ring->no_user_submission)
+ return -EINVAL;
+
/* MM engine doesn't support user fences */
if (p->uf_bo && ring->funcs->no_user_fence)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index cfdf558b48b6..02138aa55793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -109,7 +109,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_exec exec;
int r;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index c43d1b6e5d66..85567d0d9545 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -919,7 +919,7 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
return timeout;
}
-void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
+static void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
@@ -949,19 +949,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
{
- struct amdgpu_ctx *ctx;
- struct idr *idp;
- uint32_t id;
-
amdgpu_ctx_mgr_entity_fini(mgr);
-
- idp = &mgr->ctx_handles;
-
- idr_for_each_entry(idp, ctx, id) {
- if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
- DRM_ERROR("ctx %p is still alive\n", ctx);
- }
-
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index 85376baaa92f..090dfe86f75b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -92,7 +92,6 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
struct amdgpu_device *adev);
-void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a1450f13d963..f81608330a3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1902,7 +1902,7 @@ no_preempt:
continue;
}
job = to_amdgpu_job(s_job);
- if (preempted && (&job->hw_fence) == fence)
+ if (preempted && (&job->hw_fence.base) == fence)
/* mark the job as preempted */
job->preemption_status |= AMDGPU_IB_PREEMPTED;
}
@@ -2105,6 +2105,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_rap_debugfs_init(adev);
amdgpu_securedisplay_debugfs_init(adev);
amdgpu_fw_attestation_debugfs_init(adev);
+ amdgpu_psp_debugfs_init(adev);
debugfs_create_file("amdgpu_evict_vram", 0400, root, adev,
&amdgpu_evict_vram_fops);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index f8b3e04d71ed..78f8755996f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -85,6 +85,7 @@
#if IS_ENABLED(CONFIG_X86)
#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#endif
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
@@ -511,12 +512,13 @@ void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
break;
case CHIP_VEGA10:
/* enable BACO as runpm mode if noretry=0 */
- if (!adev->gmc.noretry)
+ if (!adev->gmc.noretry && !amdgpu_passthrough(adev))
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
default:
/* enable BACO as runpm mode on CI+ */
- adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
+ if (!amdgpu_passthrough(adev))
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
}
@@ -1680,6 +1682,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
+ if (!amdgpu_rebar)
+ return 0;
+
/* resizing on Dell G5 SE platforms causes problems with runtime pm */
if ((amdgpu_runtime_pm != 0) &&
adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
@@ -1870,6 +1875,35 @@ static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device
return true;
}
+static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
+{
+#if IS_ENABLED(CONFIG_X86)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ if (!(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 0) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 1)))
+ return false;
+
+ if (c->x86 == 6 &&
+ adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) {
+ switch (c->x86_model) {
+ case VFM_MODEL(INTEL_ALDERLAKE):
+ case VFM_MODEL(INTEL_ALDERLAKE_L):
+ case VFM_MODEL(INTEL_RAPTORLAKE):
+ case VFM_MODEL(INTEL_RAPTORLAKE_P):
+ case VFM_MODEL(INTEL_RAPTORLAKE_S):
+ return true;
+ default:
+ return false;
+ }
+ } else {
+ return false;
+ }
+#else
+ return false;
+#endif
+}
+
/**
* amdgpu_device_should_use_aspm - check if the device should program ASPM
*
@@ -1894,7 +1928,7 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
}
if (adev->flags & AMD_IS_APU)
return false;
- if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
+ if (amdgpu_device_aspm_support_quirk(adev))
return false;
return pcie_aspm_enabled(adev->pdev);
}
@@ -2112,8 +2146,31 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- for (i = 0; i < MAX_XCP; i++)
- adev->enforce_isolation[i] = !!enforce_isolation;
+ for (i = 0; i < MAX_XCP; i++) {
+ switch (amdgpu_enforce_isolation) {
+ case -1:
+ case 0:
+ default:
+ /* disable */
+ adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
+ break;
+ case 1:
+ /* enable */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE;
+ break;
+ case 2:
+ /* enable legacy mode */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
+ break;
+ case 3:
+ /* enable only process isolation without submitting cleaner shader */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
+ break;
+ }
+ }
return 0;
}
@@ -2689,6 +2746,13 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
break;
}
+ /* Check for IP version 9.4.3 with A0 hardware */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
+ !amdgpu_device_get_rev_id(adev)) {
+ dev_err(adev->dev, "Unsupported A0 hardware\n");
+ return -ENODEV; /* device unsupported - no device error */
+ }
+
if (amdgpu_has_atpx() &&
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
@@ -2701,7 +2765,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
}
-
adev->pm.pp_feature = amdgpu_pp_feature_mask;
if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
@@ -3172,6 +3235,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
* always assumed to be lost.
*/
switch (amdgpu_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_LINK:
case AMD_RESET_METHOD_BACO:
case AMD_RESET_METHOD_MODE1:
return true;
@@ -3455,6 +3519,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
amdgpu_amdkfd_suspend(adev, false);
+ amdgpu_userq_suspend(adev);
/* Workaround for ASICs need to disable SMC first */
amdgpu_device_smu_fini_early(adev);
@@ -4307,9 +4372,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_sync_create(&adev->isolation[i].active);
amdgpu_sync_create(&adev->isolation[i].prev);
}
- mutex_init(&adev->gfx.kfd_sch_mutex);
+ mutex_init(&adev->gfx.userq_sch_mutex);
mutex_init(&adev->gfx.workload_profile_mutex);
mutex_init(&adev->vcn.workload_profile_mutex);
+ mutex_init(&adev->userq_mutex);
amdgpu_device_init_apu_flags(adev);
@@ -4329,12 +4395,16 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->virt.rlcg_reg_lock);
spin_lock_init(&adev->wb.lock);
+ xa_init_flags(&adev->userq_xa, XA_FLAGS_LOCK_IRQ);
+
INIT_LIST_HEAD(&adev->reset_list);
INIT_LIST_HEAD(&adev->ras_list);
INIT_LIST_HEAD(&adev->pm.od_kobj_list);
+ INIT_LIST_HEAD(&adev->userq_mgr_list);
+
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
@@ -4659,7 +4729,7 @@ fence_driver_init:
amdgpu_fru_sysfs_init(adev);
amdgpu_reg_state_sysfs_init(adev);
- amdgpu_xcp_cfg_sysfs_init(adev);
+ amdgpu_xcp_sysfs_init(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
r = amdgpu_pmu_init(adev);
@@ -4789,7 +4859,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_fru_sysfs_fini(adev);
amdgpu_reg_state_sysfs_fini(adev);
- amdgpu_xcp_cfg_sysfs_fini(adev);
+ amdgpu_xcp_sysfs_fini(adev);
/* disable ras feature must before hw fini */
amdgpu_ras_pre_fini(adev);
@@ -5003,8 +5073,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
amdgpu_device_ip_suspend_phase1(adev);
- if (!adev->in_s0ix)
+ if (!adev->in_s0ix) {
amdgpu_amdkfd_suspend(adev, adev->in_runpm);
+ amdgpu_userq_suspend(adev);
+ }
r = amdgpu_device_evict_resources(adev);
if (r)
@@ -5071,6 +5143,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
if (r)
goto exit;
+
+ r = amdgpu_userq_resume(adev);
+ if (r)
+ goto exit;
}
r = amdgpu_device_ip_late_init(adev);
@@ -5119,9 +5195,6 @@ exit:
}
adev->in_suspend = false;
- if (adev->enable_mes)
- amdgpu_mes_self_test(adev);
-
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
DRM_WARN("smart shift update failed\n");
@@ -5502,6 +5575,29 @@ mode1_reset_failed:
return ret;
}
+int amdgpu_device_link_reset(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ dev_info(adev->dev, "GPU link reset\n");
+
+ if (!adev->pcie_reset_ctx.occurs_dpc)
+ ret = amdgpu_dpm_link_reset(adev);
+
+ if (ret)
+ goto link_reset_failed;
+
+ ret = amdgpu_psp_wait_for_bootloader(adev);
+ if (ret)
+ goto link_reset_failed;
+
+ return 0;
+
+link_reset_failed:
+ dev_err(adev->dev, "GPU link reset failed\n");
+ return ret;
+}
+
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
struct amdgpu_reset_context *reset_context)
{
@@ -5806,6 +5902,7 @@ static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_MODE1:
+ case AMD_RESET_METHOD_LINK:
adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
break;
case AMD_RESET_METHOD_MODE2:
@@ -5922,104 +6019,76 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle)
return ret;
}
-/**
- * amdgpu_device_gpu_recover - reset the asic and recover scheduler
- *
- * @adev: amdgpu_device pointer
- * @job: which job trigger hang
- * @reset_context: amdgpu reset context pointer
- *
- * Attempt to reset the GPU if it has hung (all asics).
- * Attempt to do soft-reset or full-reset and reinitialize Asic
- * Returns 0 for success or an error on failure.
- */
-
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
- struct amdgpu_job *job,
- struct amdgpu_reset_context *reset_context)
+static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ struct amdgpu_hive_info *hive)
{
- struct list_head device_list, *device_list_handle = NULL;
- bool job_signaled = false;
- struct amdgpu_hive_info *hive = NULL;
struct amdgpu_device *tmp_adev = NULL;
- int i, r = 0;
- bool need_emergency_restart = false;
- bool audio_suspended = false;
- int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
-
- /*
- * If it reaches here because of hang/timeout and a RAS error is
- * detected at the same time, let RAS recovery take care of it.
- */
- if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
- !amdgpu_sriov_vf(adev) &&
- reset_context->src != AMDGPU_RESET_SRC_RAS) {
- dev_dbg(adev->dev,
- "Gpu recovery from source: %d yielding to RAS error recovery handling",
- reset_context->src);
- return 0;
- }
- /*
- * Special case: RAS triggered and full reset isn't supported
- */
- need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
-
- /*
- * Flush RAM to disk so that after reboot
- * the user can read log and see why the system rebooted.
- */
- if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
- amdgpu_ras_get_context(adev)->reboot) {
- DRM_WARN("Emergency reboot.");
-
- ksys_sync_helper();
- emergency_restart();
- }
-
- dev_info(adev->dev, "GPU %s begin!\n",
- need_emergency_restart ? "jobs stop":"reset");
-
- if (!amdgpu_sriov_vf(adev))
- hive = amdgpu_get_xgmi_hive(adev);
- if (hive)
- mutex_lock(&hive->hive_lock);
+ int r;
- reset_context->job = job;
- reset_context->hive = hive;
/*
* Build list of devices to reset.
* In case we are in XGMI hive mode, resort the device list
* to put adev in the 1st position.
*/
- INIT_LIST_HEAD(&device_list);
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- list_add_tail(&tmp_adev->reset_list, &device_list);
+ list_add_tail(&tmp_adev->reset_list, device_list);
if (adev->shutdown)
tmp_adev->shutdown = true;
+ if (adev->pcie_reset_ctx.occurs_dpc)
+ tmp_adev->pcie_reset_ctx.in_link_reset = true;
}
- if (!list_is_first(&adev->reset_list, &device_list))
- list_rotate_to_front(&adev->reset_list, &device_list);
- device_list_handle = &device_list;
+ if (!list_is_first(&adev->reset_list, device_list))
+ list_rotate_to_front(&adev->reset_list, device_list);
} else {
- list_add_tail(&adev->reset_list, &device_list);
- device_list_handle = &device_list;
+ list_add_tail(&adev->reset_list, device_list);
}
- if (!amdgpu_sriov_vf(adev)) {
- r = amdgpu_device_health_check(device_list_handle);
+ if (!amdgpu_sriov_vf(adev) && (!adev->pcie_reset_ctx.occurs_dpc)) {
+ r = amdgpu_device_health_check(device_list);
if (r)
- goto end_reset;
+ return r;
}
- /* We need to lock reset domain only once both for XGMI and single device */
- tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
- reset_list);
+ return 0;
+}
+
+static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ if (list_empty(device_list))
+ return;
+ tmp_adev =
+ list_first_entry(device_list, struct amdgpu_device, reset_list);
amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
+}
- /* block all schedulers and reset given job's ring */
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ if (list_empty(device_list))
+ return;
+ tmp_adev =
+ list_first_entry(device_list, struct amdgpu_device, reset_list);
+ amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+}
+
+static int amdgpu_device_halt_activities(
+ struct amdgpu_device *adev, struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context,
+ struct list_head *device_list, struct amdgpu_hive_info *hive,
+ bool need_emergency_restart)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int i, r = 0;
+
+ /* block all schedulers and reset given job's ring */
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
amdgpu_device_set_mp1_state(tmp_adev);
/*
@@ -6033,7 +6102,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* some audio codec errors.
*/
if (!amdgpu_device_suspend_display_audio(tmp_adev))
- audio_suspended = true;
+ tmp_adev->pcie_reset_ctx.audio_suspended = true;
amdgpu_ras_set_error_query_ready(tmp_adev, false);
@@ -6051,6 +6120,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* disable ras on ALL IPs */
if (!need_emergency_restart &&
+ (!adev->pcie_reset_ctx.occurs_dpc) &&
amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
@@ -6068,24 +6138,24 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
atomic_inc(&tmp_adev->gpu_reset_counter);
}
- if (need_emergency_restart)
- goto skip_sched_resume;
+ return r;
+}
- /*
- * Must check guilty signal here since after this point all old
- * HW fences are force signaled.
- *
- * job->base holds a reference to parent fence
- */
- if (job && dma_fence_is_signaled(&job->hw_fence)) {
- job_signaled = true;
- dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
- goto skip_hw_reset;
- }
+static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
+ int r = 0;
retry: /* Rest of adevs pre asic reset from XGMI hive. */
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
+ if (adev->pcie_reset_ctx.occurs_dpc)
+ tmp_adev->no_hw_access = true;
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
+ if (adev->pcie_reset_ctx.occurs_dpc)
+ tmp_adev->no_hw_access = false;
/*TODO Should we stop ?*/
if (r) {
dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
@@ -6097,6 +6167,11 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
/* Actual ASIC resets if needed.*/
/* Host driver will handle XGMI hive reset for SRIOV */
if (amdgpu_sriov_vf(adev)) {
+
+ /* Bail out of reset early */
+ if (amdgpu_ras_is_rma(adev))
+ return -ENODEV;
+
if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) {
dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n");
amdgpu_ras_set_fed(adev, true);
@@ -6111,12 +6186,12 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (r)
adev->asic_reset_res = r;
} else {
- r = amdgpu_do_asic_reset(device_list_handle, reset_context);
+ r = amdgpu_do_asic_reset(device_list, reset_context);
if (r && r == -EAGAIN)
goto retry;
}
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
/*
* Drop any pending non scheduler resets queued before reset is done.
* Any reset scheduled after this point would be valid. Scheduler resets
@@ -6126,10 +6201,18 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
amdgpu_device_stop_pending_resets(tmp_adev);
}
-skip_hw_reset:
+ return r;
+}
+
+static int amdgpu_device_sched_resume(struct list_head *device_list,
+ struct amdgpu_reset_context *reset_context,
+ bool job_signaled)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int i, r = 0;
/* Post ASIC reset for all devs .*/
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -6165,8 +6248,16 @@ skip_hw_reset:
}
}
-skip_sched_resume:
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ return r;
+}
+
+static void amdgpu_device_gpu_resume(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ bool need_emergency_restart)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
/* unlock kfd: SRIOV would do it separately */
if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_post_reset(tmp_adev);
@@ -6177,18 +6268,117 @@ skip_sched_resume:
if (!adev->kfd.init_complete)
amdgpu_amdkfd_device_init(adev);
- if (audio_suspended)
+ if (tmp_adev->pcie_reset_ctx.audio_suspended)
amdgpu_device_resume_display_audio(tmp_adev);
amdgpu_device_unset_mp1_state(tmp_adev);
amdgpu_ras_set_error_query_ready(tmp_adev, true);
+
}
+}
- tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
- reset_list);
- amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu_device pointer
+ * @job: which job trigger hang
+ * @reset_context: amdgpu reset context pointer
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Attempt to do soft-reset or full-reset and reinitialize Asic
+ * Returns 0 for success or an error on failure.
+ */
+
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head device_list;
+ bool job_signaled = false;
+ struct amdgpu_hive_info *hive = NULL;
+ int r = 0;
+ bool need_emergency_restart = false;
+
+ /*
+ * If it reaches here because of hang/timeout and a RAS error is
+ * detected at the same time, let RAS recovery take care of it.
+ */
+ if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
+ !amdgpu_sriov_vf(adev) &&
+ reset_context->src != AMDGPU_RESET_SRC_RAS) {
+ dev_dbg(adev->dev,
+ "Gpu recovery from source: %d yielding to RAS error recovery handling",
+ reset_context->src);
+ return 0;
+ }
+
+ /*
+ * Special case: RAS triggered and full reset isn't supported
+ */
+ need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
+
+ /*
+ * Flush RAM to disk so that after reboot
+ * the user can read log and see why the system rebooted.
+ */
+ if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
+ amdgpu_ras_get_context(adev)->reboot) {
+ DRM_WARN("Emergency reboot.");
+
+ ksys_sync_helper();
+ emergency_restart();
+ }
+
+ dev_info(adev->dev, "GPU %s begin!\n",
+ need_emergency_restart ? "jobs stop":"reset");
+
+ if (!amdgpu_sriov_vf(adev))
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+
+ reset_context->job = job;
+ reset_context->hive = hive;
+ INIT_LIST_HEAD(&device_list);
+
+ if (amdgpu_device_recovery_prepare(adev, &device_list, hive))
+ goto end_reset;
+
+ /* We need to lock reset domain only once both for XGMI and single device */
+ amdgpu_device_recovery_get_reset_lock(adev, &device_list);
+
+ r = amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
+ hive, need_emergency_restart);
+ if (r)
+ goto reset_unlock;
+
+ if (need_emergency_restart)
+ goto skip_sched_resume;
+ /*
+ * Must check guilty signal here since after this point all old
+ * HW fences are force signaled.
+ *
+ * job->base holds a reference to parent fence
+ */
+ if (job && dma_fence_is_signaled(&job->hw_fence.base)) {
+ job_signaled = true;
+ dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
+ goto skip_hw_reset;
+ }
+
+ r = amdgpu_device_asic_reset(adev, &device_list, reset_context);
+ if (r)
+ goto reset_unlock;
+skip_hw_reset:
+ r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled);
+ if (r)
+ goto reset_unlock;
+skip_sched_resume:
+ amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
+reset_unlock:
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
end_reset:
if (hive) {
mutex_unlock(&hive->hive_lock);
@@ -6572,12 +6762,15 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int i;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ struct amdgpu_reset_context reset_context;
+ struct list_head device_list;
+ int r = 0;
- DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
+ dev_info(adev->dev, "PCI error: detected callback!!\n");
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- DRM_WARN("No support for XGMI hive yet...");
+ if (!amdgpu_dpm_is_link_reset_supported(adev)) {
+ dev_warn(adev->dev, "No support for XGMI hive yet...\n");
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -6585,32 +6778,32 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
switch (state) {
case pci_channel_io_normal:
+ dev_info(adev->dev, "pci_channel_io_normal: state(%d)!!\n", state);
return PCI_ERS_RESULT_CAN_RECOVER;
- /* Fatal error, prepare for slot reset */
case pci_channel_io_frozen:
- /*
- * Locking adev->reset_domain->sem will prevent any external access
- * to GPU during PCI error recovery
- */
- amdgpu_device_lock_reset_domain(adev->reset_domain);
- amdgpu_device_set_mp1_state(adev);
-
- /*
- * Block any work scheduling as we do for regular GPU reset
- * for the duration of the recovery
- */
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!amdgpu_ring_sched_ready(ring))
- continue;
-
- drm_sched_stop(&ring->sched, NULL);
+ /* Fatal error, prepare for slot reset */
+ dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
+
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+ adev->pcie_reset_ctx.occurs_dpc = true;
+ memset(&reset_context, 0, sizeof(reset_context));
+ INIT_LIST_HEAD(&device_list);
+
+ amdgpu_device_recovery_prepare(adev, &device_list, hive);
+ amdgpu_device_recovery_get_reset_lock(adev, &device_list);
+ r = amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
+ hive, false);
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
}
- atomic_inc(&adev->gpu_reset_counter);
+ if (r)
+ return PCI_ERS_RESULT_DISCONNECT;
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
/* Permanent error, prepare for device removal */
+ dev_info(adev->dev, "pci_channel_io_perm_failure: state(%d)!!\n", state);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -6623,8 +6816,10 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
*/
pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
- DRM_INFO("PCI error: mmio enabled callback!!\n");
+ dev_info(adev->dev, "PCI error: mmio enabled callback!!\n");
/* TODO - dump whatever for debugging purposes */
@@ -6648,10 +6843,12 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int r, i;
struct amdgpu_reset_context reset_context;
- u32 memsize;
+ struct amdgpu_device *tmp_adev;
+ struct amdgpu_hive_info *hive;
struct list_head device_list;
+ int r = 0, i;
+ u32 memsize;
/* PCI error slot reset should be skipped During RAS recovery */
if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
@@ -6659,15 +6856,12 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
amdgpu_ras_in_recovery(adev))
return PCI_ERS_RESULT_RECOVERED;
- DRM_INFO("PCI error: slot reset callback!!\n");
+ dev_info(adev->dev, "PCI error: slot reset callback!!\n");
memset(&reset_context, 0, sizeof(reset_context));
- INIT_LIST_HEAD(&device_list);
- list_add_tail(&adev->reset_list, &device_list);
-
/* wait for asic to come out of reset */
- msleep(500);
+ msleep(700);
/* Restore PCI confspace */
amdgpu_device_load_pci_state(pdev);
@@ -6688,26 +6882,40 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
-
- adev->no_hw_access = true;
- r = amdgpu_device_pre_asic_reset(adev, &reset_context);
- adev->no_hw_access = false;
- if (r)
- goto out;
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+ INIT_LIST_HEAD(&device_list);
- r = amdgpu_do_asic_reset(&device_list, &reset_context);
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ mutex_lock(&hive->hive_lock);
+ reset_context.hive = hive;
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ tmp_adev->pcie_reset_ctx.in_link_reset = true;
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ }
+ } else {
+ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+ list_add_tail(&adev->reset_list, &device_list);
+ }
+ r = amdgpu_device_asic_reset(adev, &device_list, &reset_context);
out:
if (!r) {
if (amdgpu_device_cache_pci_state(adev->pdev))
pci_restore_state(adev->pdev);
-
- DRM_INFO("PCIe error recovery succeeded\n");
+ dev_info(adev->dev, "PCIe error recovery succeeded\n");
} else {
- DRM_ERROR("PCIe error recovery failed, err:%d", r);
- amdgpu_device_unset_mp1_state(adev);
- amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ dev_err(adev->dev, "PCIe error recovery failed, err:%d\n", r);
+ if (hive) {
+ list_for_each_entry(tmp_adev, &device_list, reset_list)
+ amdgpu_device_unset_mp1_state(tmp_adev);
+ }
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
+ }
+
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
}
return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
@@ -6724,26 +6932,37 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- int i;
-
+ struct list_head device_list;
+ struct amdgpu_hive_info *hive = NULL;
+ struct amdgpu_device *tmp_adev = NULL;
- DRM_INFO("PCI error: resume callback!!\n");
+ dev_info(adev->dev, "PCI error: resume callback!!\n");
/* Only continue execution for the case of pci_channel_io_frozen */
if (adev->pci_channel_state != pci_channel_io_frozen)
return;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
+ INIT_LIST_HEAD(&device_list);
- if (!amdgpu_ring_sched_ready(ring))
- continue;
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ mutex_lock(&hive->hive_lock);
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ tmp_adev->pcie_reset_ctx.in_link_reset = false;
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ }
+ } else
+ list_add_tail(&adev->reset_list, &device_list);
- drm_sched_start(&ring->sched, 0);
- }
+ amdgpu_device_sched_resume(&device_list, NULL, NULL);
+ amdgpu_device_gpu_resume(adev, &device_list, false);
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
+ adev->pcie_reset_ctx.occurs_dpc = false;
- amdgpu_device_unset_mp1_state(adev);
- amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+ }
}
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 9e738fae2b74..a0e9bf9b2710 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -270,9 +270,10 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
uint8_t *binary)
{
+ bool sz_valid = true;
uint64_t vram_size;
- u32 msg;
int i, ret = 0;
+ u32 msg;
if (!amdgpu_sriov_vf(adev)) {
/* It can take up to a second for IFWI init to complete on some dGPUs,
@@ -291,9 +292,13 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
}
}
- vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+ vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
+ if (!vram_size || vram_size == U32_MAX)
+ sz_valid = false;
+ else
+ vram_size <<= 20;
- if (vram_size) {
+ if (sz_valid) {
uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
adev->mman.discovery_tmr_size, false);
@@ -301,6 +306,11 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
}
+ if (ret)
+ dev_err(adev->dev,
+ "failed to read discovery info from memory, vram size read: %llx",
+ vram_size);
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 72c807f5822e..4db92e0a60da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -51,6 +51,8 @@
#include "amdgpu_reset.h"
#include "amdgpu_sched.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_userq_fence.h"
#include "../amdxcp/amdgpu_xcp_drv.h"
/*
@@ -123,9 +125,10 @@
* - 3.61.0 - Contains fix for RV/PCO compute queues
* - 3.62.0 - Add AMDGPU_IDS_FLAGS_MODE_PF, AMDGPU_IDS_FLAGS_MODE_VF & AMDGPU_IDS_FLAGS_MODE_PT
* - 3.63.0 - GFX12 display DCC supports 256B max compressed block size
+ * - 3.64.0 - Userq IP support query
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 63
+#define KMS_DRIVER_MINOR 64
#define KMS_DRIVER_PATCHLEVEL 0
/*
@@ -140,6 +143,7 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5),
AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),
AMDGPU_DEBUG_SMU_POOL = BIT(7),
+ AMDGPU_DEBUG_VM_USERPTR = BIT(8),
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -176,7 +180,7 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu;
char *amdgpu_virtual_display;
-bool enforce_isolation;
+int amdgpu_enforce_isolation = -1;
int amdgpu_modeset = -1;
/* Specifies the default granularity for SVM, used in buffer
@@ -238,6 +242,8 @@ int amdgpu_agp = -1; /* auto */
int amdgpu_wbrf = -1;
int amdgpu_damage_clips = -1; /* auto */
int amdgpu_umsch_mm_fwlog;
+int amdgpu_rebar = -1; /* auto */
+int amdgpu_user_queue = -1;
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
@@ -1033,11 +1039,13 @@ module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444);
/**
- * DOC: enforce_isolation (bool)
- * enforce process isolation between graphics and compute via using the same reserved vmid.
+ * DOC: enforce_isolation (int)
+ * enforce process isolation between graphics and compute.
+ * (-1 = auto, 0 = disable, 1 = enable, 2 = enable legacy mode, 3 = enable without cleaner shader)
*/
-module_param(enforce_isolation, bool, 0444);
-MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on");
+module_param_named(enforce_isolation, amdgpu_enforce_isolation, int, 0444);
+MODULE_PARM_DESC(enforce_isolation,
+"enforce process isolation between graphics and compute. (-1 = auto, 0 = disable, 1 = enable, 2 = enable legacy mode, 3 = enable without cleaner shader)");
/**
* DOC: modeset (int)
@@ -1096,6 +1104,28 @@ MODULE_PARM_DESC(wbrf,
"Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)");
module_param_named(wbrf, amdgpu_wbrf, int, 0444);
+/**
+ * DOC: rebar (int)
+ * Allow BAR resizing. Disable this to prevent the driver from attempting
+ * to resize the BAR if the GPU supports it and there is available MMIO space.
+ * Note that this just prevents the driver from resizing the BAR. The BIOS
+ * may have already resized the BAR at boot time.
+ */
+MODULE_PARM_DESC(rebar, "Resizable BAR (-1 = auto (default), 0 = disable, 1 = enable)");
+module_param_named(rebar, amdgpu_rebar, int, 0444);
+
+/**
+ * DOC: user_queue (int)
+ * Enable user queues on systems that support user queues. Possible values:
+ *
+ * - -1 = auto (ASIC specific default)
+ * - 0 = user queues disabled
+ * - 1 = user queues enabled and kernel queues enabled (if supported)
+ * - 2 = user queues enabled and kernel queues disabled
+ */
+MODULE_PARM_DESC(user_queue, "Enable user queues (-1 = auto (default), 0 = disable, 1 = enable, 2 = enable UQs and disable KQs)");
+module_param_named(user_queue, amdgpu_user_queue, int, 0444);
+
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/
@@ -2244,6 +2274,10 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: use vram for smu pool\n");
adev->pm.smu_debug_mask |= SMU_DEBUG_POOL_USE_VRAM;
}
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_VM_USERPTR) {
+ pr_info("debug: VM mode debug for userptr is enabled\n");
+ adev->debug_vm_userptr = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2700,6 +2734,29 @@ static int amdgpu_runtime_idle_check_display(struct device *dev)
return 0;
}
+static int amdgpu_runtime_idle_check_userq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+done:
+ mutex_unlock(&adev->userq_mutex);
+
+ return ret;
+}
+
static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2715,6 +2772,9 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
ret = amdgpu_runtime_idle_check_display(dev);
if (ret)
return ret;
+ ret = amdgpu_runtime_idle_check_userq(dev);
+ if (ret)
+ return ret;
/* wait for all rings to drain before suspending */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -2836,12 +2896,30 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
}
ret = amdgpu_runtime_idle_check_display(dev);
+ if (ret)
+ goto done;
+ ret = amdgpu_runtime_idle_check_userq(dev);
+done:
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return ret;
}
+static int amdgpu_drm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+
+ if (fpriv) {
+ fpriv->evf_mgr.fd_closing = true;
+ amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
+ amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
+ }
+
+ return drm_release(inode, filp);
+}
+
long amdgpu_drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
@@ -2893,7 +2971,7 @@ static const struct file_operations amdgpu_driver_kms_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.flush = amdgpu_flush,
- .release = drm_release,
+ .release = amdgpu_drm_release,
.unlocked_ioctl = amdgpu_drm_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
@@ -2940,6 +3018,9 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct drm_driver amdgpu_kms_driver = {
@@ -3030,6 +3111,10 @@ static int __init amdgpu_init(void)
if (r)
goto error_fence;
+ r = amdgpu_userq_fence_slab_init();
+ if (r)
+ goto error_fence;
+
DRM_INFO("amdgpu kernel modesetting enabled.\n");
amdgpu_register_atpx_handler();
amdgpu_acpi_detect();
@@ -3061,6 +3146,7 @@ static void __exit amdgpu_exit(void)
amdgpu_acpi_release();
amdgpu_sync_fini();
amdgpu_fence_slab_fini();
+ amdgpu_userq_fence_slab_fini();
mmu_notifier_synchronize();
amdgpu_xcp_drv_release();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
new file mode 100644
index 000000000000..8b919ad3af29
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/sched.h>
+#include <drm/drm_exec.h>
+#include "amdgpu.h"
+
+#define work_to_evf_mgr(w, name) container_of(w, struct amdgpu_eviction_fence_mgr, name)
+#define evf_mgr_to_fpriv(e) container_of(e, struct amdgpu_fpriv, evf_mgr)
+
+static const char *
+amdgpu_eviction_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "amdgpu_eviction_fence";
+}
+
+static const char *
+amdgpu_eviction_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_eviction_fence *ef;
+
+ ef = container_of(f, struct amdgpu_eviction_fence, base);
+ return ef->timeline_name;
+}
+
+int
+amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct drm_exec *exec)
+{
+ struct amdgpu_eviction_fence *old_ef, *new_ef;
+ struct drm_gem_object *obj;
+ unsigned long index;
+ int ret;
+
+ if (evf_mgr->ev_fence &&
+ !dma_fence_is_signaled(&evf_mgr->ev_fence->base))
+ return 0;
+ /*
+ * Steps to replace eviction fence:
+ * * lock all objects in exec (caller)
+ * * create a new eviction fence
+ * * update new eviction fence in evf_mgr
+ * * attach the new eviction fence to BOs
+ * * release the old fence
+ * * unlock the objects (caller)
+ */
+ new_ef = amdgpu_eviction_fence_create(evf_mgr);
+ if (!new_ef) {
+ DRM_ERROR("Failed to create new eviction fence\n");
+ return -ENOMEM;
+ }
+
+ /* Update the eviction fence now */
+ spin_lock(&evf_mgr->ev_fence_lock);
+ old_ef = evf_mgr->ev_fence;
+ evf_mgr->ev_fence = new_ef;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ /* Attach the new fence */
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ if (!bo)
+ continue;
+ ret = amdgpu_eviction_fence_attach(evf_mgr, bo);
+ if (ret) {
+ DRM_ERROR("Failed to attch new eviction fence\n");
+ goto free_err;
+ }
+ }
+
+ /* Free old fence */
+ if (old_ef)
+ dma_fence_put(&old_ef->base);
+ return 0;
+
+free_err:
+ kfree(new_ef);
+ return ret;
+}
+
+static void
+amdgpu_eviction_fence_suspend_worker(struct work_struct *work)
+{
+ struct amdgpu_eviction_fence_mgr *evf_mgr = work_to_evf_mgr(work, suspend_work.work);
+ struct amdgpu_fpriv *fpriv = evf_mgr_to_fpriv(evf_mgr);
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_eviction_fence *ev_fence;
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ if (ev_fence)
+ dma_fence_get(&ev_fence->base);
+ else
+ goto unlock;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ amdgpu_userq_evict(uq_mgr, ev_fence);
+
+ mutex_unlock(&uq_mgr->userq_mutex);
+ dma_fence_put(&ev_fence->base);
+ return;
+
+unlock:
+ spin_unlock(&evf_mgr->ev_fence_lock);
+ mutex_unlock(&uq_mgr->userq_mutex);
+}
+
+static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f)
+{
+ struct amdgpu_eviction_fence_mgr *evf_mgr;
+ struct amdgpu_eviction_fence *ev_fence;
+
+ if (!f)
+ return true;
+
+ ev_fence = to_ev_fence(f);
+ evf_mgr = ev_fence->evf_mgr;
+
+ schedule_delayed_work(&evf_mgr->suspend_work, 0);
+ return true;
+}
+
+static const struct dma_fence_ops amdgpu_eviction_fence_ops = {
+ .use_64bit_seqno = true,
+ .get_driver_name = amdgpu_eviction_fence_get_driver_name,
+ .get_timeline_name = amdgpu_eviction_fence_get_timeline_name,
+ .enable_signaling = amdgpu_eviction_fence_enable_signaling,
+};
+
+void amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_eviction_fence *ev_fence)
+{
+ spin_lock(&evf_mgr->ev_fence_lock);
+ dma_fence_signal(&ev_fence->base);
+ spin_unlock(&evf_mgr->ev_fence_lock);
+}
+
+struct amdgpu_eviction_fence *
+amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+ ev_fence = kzalloc(sizeof(*ev_fence), GFP_KERNEL);
+ if (!ev_fence)
+ return NULL;
+
+ ev_fence->evf_mgr = evf_mgr;
+ get_task_comm(ev_fence->timeline_name, current);
+ spin_lock_init(&ev_fence->lock);
+ dma_fence_init(&ev_fence->base, &amdgpu_eviction_fence_ops,
+ &ev_fence->lock, evf_mgr->ev_fence_ctx,
+ atomic_inc_return(&evf_mgr->ev_fence_seq));
+ return ev_fence;
+}
+
+void amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+ /* Wait for any pending work to execute */
+ flush_delayed_work(&evf_mgr->suspend_work);
+
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ if (!ev_fence)
+ return;
+
+ dma_fence_wait(&ev_fence->base, false);
+
+ /* Last unref of ev_fence */
+ dma_fence_put(&ev_fence->base);
+}
+
+int amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+ struct dma_resv *resv = bo->tbo.base.resv;
+ int ret;
+
+ if (!resv)
+ return 0;
+
+ ret = dma_resv_reserve_fences(resv, 1);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Failed to resv fence space\n");
+ return ret;
+ }
+
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ if (ev_fence)
+ dma_resv_add_fence(resv, &ev_fence->base, DMA_RESV_USAGE_BOOKKEEP);
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ return 0;
+}
+
+void amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
+{
+ struct dma_fence *stub = dma_fence_get_stub();
+
+ dma_resv_replace_fences(bo->tbo.base.resv, evf_mgr->ev_fence_ctx,
+ stub, DMA_RESV_USAGE_BOOKKEEP);
+ dma_fence_put(stub);
+}
+
+int amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ /* This needs to be done one time per open */
+ atomic_set(&evf_mgr->ev_fence_seq, 0);
+ evf_mgr->ev_fence_ctx = dma_fence_context_alloc(1);
+ spin_lock_init(&evf_mgr->ev_fence_lock);
+
+ INIT_DELAYED_WORK(&evf_mgr->suspend_work, amdgpu_eviction_fence_suspend_worker);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
new file mode 100644
index 000000000000..fcd867b7147d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_EV_FENCE_H_
+#define AMDGPU_EV_FENCE_H_
+
+struct amdgpu_eviction_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+ char timeline_name[TASK_COMM_LEN];
+ struct amdgpu_eviction_fence_mgr *evf_mgr;
+};
+
+struct amdgpu_eviction_fence_mgr {
+ u64 ev_fence_ctx;
+ atomic_t ev_fence_seq;
+ spinlock_t ev_fence_lock;
+ struct amdgpu_eviction_fence *ev_fence;
+ struct delayed_work suspend_work;
+ uint8_t fd_closing;
+};
+
+/* Eviction fence helper functions */
+struct amdgpu_eviction_fence *
+amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+void
+amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+int
+amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
+
+void
+amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
+
+int
+amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+void
+amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_eviction_fence *ev_fence);
+
+int
+amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct drm_exec *exec);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 5f5c00ace96b..5fec808d7f54 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -41,22 +41,6 @@
#include "amdgpu_trace.h"
#include "amdgpu_reset.h"
-/*
- * Fences mark an event in the GPUs pipeline and are used
- * for GPU/CPU synchronization. When the fence is written,
- * it is expected that all buffers associated with that fence
- * are no longer in use by the associated ring on the GPU and
- * that the relevant GPU caches have been flushed.
- */
-
-struct amdgpu_fence {
- struct dma_fence base;
-
- /* RB, DMA, etc. */
- struct amdgpu_ring *ring;
- ktime_t start_timestamp;
-};
-
static struct kmem_cache *amdgpu_fence_slab;
int amdgpu_fence_slab_init(void)
@@ -151,12 +135,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
if (am_fence == NULL)
return -ENOMEM;
- fence = &am_fence->base;
- am_fence->ring = ring;
} else {
/* take use of job-embedded fence */
- fence = &job->hw_fence;
+ am_fence = &job->hw_fence;
}
+ fence = &am_fence->base;
+ am_fence->ring = ring;
seq = ++ring->fence_drv.sync_seq;
if (job && job->job_run_counter) {
@@ -322,8 +306,8 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
*/
static void amdgpu_fence_fallback(struct timer_list *t)
{
- struct amdgpu_ring *ring = from_timer(ring, t,
- fence_drv.fallback_timer);
+ struct amdgpu_ring *ring = timer_container_of(ring, t,
+ fence_drv.fallback_timer);
if (amdgpu_fence_process(ring))
DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
@@ -718,7 +702,7 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
* it right here or we won't be able to track them in fence_drv
* and they will remain unsignaled during sa_bo free.
*/
- job = container_of(old, struct amdgpu_job, hw_fence);
+ job = container_of(old, struct amdgpu_job, hw_fence.base);
if (!job->base.s_fence && !dma_fence_is_signaled(old))
dma_fence_signal(old);
RCU_INIT_POINTER(*ptr, NULL);
@@ -780,7 +764,7 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
return (const char *)to_amdgpu_ring(job->base.sched)->name;
}
@@ -810,7 +794,7 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
*/
static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
@@ -845,7 +829,7 @@ static void amdgpu_job_fence_free(struct rcu_head *rcu)
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
/* free job if fence has a parent job */
- kfree(container_of(f, struct amdgpu_job, hw_fence));
+ kfree(container_of(f, struct amdgpu_job, hw_fence.base));
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 69429df09477..0ecc88df7208 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -36,6 +36,7 @@
#include <drm/drm_exec.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/ttm/ttm_tt.h>
+#include <drm/drm_syncobj.h>
#include "amdgpu.h"
#include "amdgpu_display.h"
@@ -44,6 +45,114 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_vm.h"
+static int
+amdgpu_gem_add_input_fence(struct drm_file *filp,
+ uint64_t syncobj_handles_array,
+ uint32_t num_syncobj_handles)
+{
+ struct dma_fence *fence;
+ uint32_t *syncobj_handles;
+ int ret, i;
+
+ if (!num_syncobj_handles)
+ return 0;
+
+ syncobj_handles = memdup_user(u64_to_user_ptr(syncobj_handles_array),
+ size_mul(sizeof(uint32_t), num_syncobj_handles));
+ if (IS_ERR(syncobj_handles))
+ return PTR_ERR(syncobj_handles);
+
+ for (i = 0; i < num_syncobj_handles; i++) {
+
+ if (!syncobj_handles[i]) {
+ ret = -EINVAL;
+ goto free_memdup;
+ }
+
+ ret = drm_syncobj_find_fence(filp, syncobj_handles[i], 0, 0, &fence);
+ if (ret)
+ goto free_memdup;
+
+ dma_fence_wait(fence, false);
+
+ /* TODO: optimize async handling */
+ dma_fence_put(fence);
+ }
+
+free_memdup:
+ kfree(syncobj_handles);
+ return ret;
+}
+
+static int
+amdgpu_gem_update_timeline_node(struct drm_file *filp,
+ uint32_t syncobj_handle,
+ uint64_t point,
+ struct drm_syncobj **syncobj,
+ struct dma_fence_chain **chain)
+{
+ if (!syncobj_handle)
+ return 0;
+
+ /* Find the sync object */
+ *syncobj = drm_syncobj_find(filp, syncobj_handle);
+ if (!*syncobj)
+ return -ENOENT;
+
+ if (!point)
+ return 0;
+
+ /* Allocate the chain node */
+ *chain = dma_fence_chain_alloc();
+ if (!*chain) {
+ drm_syncobj_put(*syncobj);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+amdgpu_gem_update_bo_mapping(struct drm_file *filp,
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation,
+ uint64_t point,
+ struct dma_fence *fence,
+ struct drm_syncobj *syncobj,
+ struct dma_fence_chain *chain)
+{
+ struct amdgpu_bo *bo = bo_va ? bo_va->base.bo : NULL;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct dma_fence *last_update;
+
+ if (!syncobj)
+ return;
+
+ /* Find the last update fence */
+ switch (operation) {
+ case AMDGPU_VA_OP_MAP:
+ case AMDGPU_VA_OP_REPLACE:
+ if (bo && (bo->tbo.base.resv == vm->root.bo->tbo.base.resv))
+ last_update = vm->last_update;
+ else
+ last_update = bo_va->last_pt_update;
+ break;
+ case AMDGPU_VA_OP_UNMAP:
+ case AMDGPU_VA_OP_CLEAR:
+ last_update = fence;
+ break;
+ default:
+ return;
+ }
+
+ /* Add fence to timeline */
+ if (!point)
+ drm_syncobj_replace_fence(syncobj, last_update);
+ else
+ drm_syncobj_add_point(syncobj, chain, last_update, point);
+}
+
static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
@@ -184,6 +293,15 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
else
++bo_va->ref_count;
+
+ /* attach gfx eviction fence */
+ r = amdgpu_eviction_fence_attach(&fpriv->evf_mgr, abo);
+ if (r) {
+ DRM_DEBUG_DRIVER("Failed to attach eviction fence to BO\n");
+ amdgpu_bo_unreserve(abo);
+ return r;
+ }
+
amdgpu_bo_unreserve(abo);
/* Validate and add eviction fence to DMABuf imports with dynamic
@@ -247,6 +365,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
goto out_unlock;
}
+ if (!amdgpu_vm_is_bo_always_valid(vm, bo))
+ amdgpu_eviction_fence_detach(&fpriv->evf_mgr, bo);
+
bo_va = amdgpu_vm_bo_find(vm, bo);
if (!bo_va || --bo_va->ref_count)
goto out_unlock;
@@ -321,10 +442,6 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
uint32_t handle, initial_domain;
int r;
- /* reject DOORBELLs until userspace code to use it is available */
- if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL)
- return -EINVAL;
-
/* reject invalid gem flags */
if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
@@ -638,18 +755,23 @@ out:
*
* Update the bo_va directly after setting its address. Errors are not
* vital here, so they are not reported back to userspace.
+ *
+ * Returns resulting fence if freed BO(s) got cleared from the PT.
+ * otherwise stub fence in case of error.
*/
-static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_bo_va *bo_va,
- uint32_t operation)
+static struct dma_fence *
+amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation)
{
+ struct dma_fence *fence = dma_fence_get_stub();
int r;
if (!amdgpu_vm_ready(vm))
- return;
+ return fence;
- r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ r = amdgpu_vm_clear_freed(adev, vm, &fence);
if (r)
goto error;
@@ -665,6 +787,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
+
+ return fence;
}
/**
@@ -713,6 +837,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
+ struct drm_syncobj *timeline_syncobj = NULL;
+ struct dma_fence_chain *timeline_chain = NULL;
+ struct dma_fence *fence;
struct drm_exec exec;
uint64_t va_flags;
uint64_t vm_size;
@@ -774,6 +901,12 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
abo = NULL;
}
+ r = amdgpu_gem_add_input_fence(filp,
+ args->input_fence_syncobj_handles,
+ args->num_syncobj_handles);
+ if (r)
+ goto error_put_gobj;
+
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
@@ -802,6 +935,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
bo_va = NULL;
}
+ r = amdgpu_gem_update_timeline_node(filp,
+ args->vm_timeline_syncobj_out,
+ args->vm_timeline_point,
+ &timeline_syncobj,
+ &timeline_chain);
+ if (r)
+ goto error;
+
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
@@ -827,12 +968,24 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
default:
break;
}
- if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm)
- amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
- args->operation);
+ if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) {
+ fence = amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
+ args->operation);
+
+ if (timeline_syncobj)
+ amdgpu_gem_update_bo_mapping(filp, bo_va,
+ args->operation,
+ args->vm_timeline_point,
+ fence, timeline_syncobj,
+ timeline_chain);
+ else
+ dma_fence_put(fence);
+
+ }
error:
drm_exec_fini(&exec);
+error_put_gobj:
drm_gem_object_put(gobj);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index cf2df7790077..c5646af055ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -33,6 +33,7 @@
#include "amdgpu_reset.h"
#include "amdgpu_xcp.h"
#include "amdgpu_xgmi.h"
+#include "nvd.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
@@ -74,14 +75,15 @@ bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
}
-int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
- int me, int pipe, int queue)
+static int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
+ int me, int pipe, int queue)
{
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
int bit = 0;
bit += me * adev->gfx.me.num_pipe_per_me
- * adev->gfx.me.num_queue_per_pipe;
- bit += pipe * adev->gfx.me.num_queue_per_pipe;
+ * num_queue_per_pipe;
+ bit += pipe * num_queue_per_pipe;
bit += queue;
return bit;
@@ -238,8 +240,8 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
{
int i, queue, pipe;
bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
- int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
- adev->gfx.me.num_queue_per_pipe;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
+ int max_queues_per_me = adev->gfx.me.num_pipe_per_me * num_queue_per_pipe;
if (multipipe_policy) {
/* policy: amdgpu owns the first queue per pipe at this stage
@@ -247,9 +249,9 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
for (i = 0; i < max_queues_per_me; i++) {
pipe = i % adev->gfx.me.num_pipe_per_me;
queue = (i / adev->gfx.me.num_pipe_per_me) %
- adev->gfx.me.num_queue_per_pipe;
+ num_queue_per_pipe;
- set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
+ set_bit(pipe * num_queue_per_pipe + queue,
adev->gfx.me.queue_bitmap);
}
} else {
@@ -258,8 +260,9 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
}
/* update the number of active graphics rings */
- adev->gfx.num_gfx_rings =
- bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
+ if (adev->gfx.num_gfx_rings)
+ adev->gfx.num_gfx_rings =
+ bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
}
static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
@@ -1351,6 +1354,10 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
int mode;
+ /* Only minimal precaution taken to reject requests while in reset.*/
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
AMDGPU_XCP_FL_NONE);
@@ -1394,8 +1401,14 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
return -EINVAL;
}
+ /* Don't allow a switch while under reset */
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return -EPERM;
+
ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
+ up_read(&adev->reset_domain->sem);
+
if (ret)
return ret;
@@ -1466,6 +1479,8 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
goto err;
job->enforce_isolation = true;
+ /* always run the cleaner shader */
+ job->run_cleaner_shader = true;
ib = &job->ibs[0];
for (i = 0; i <= ring->funcs->align_mask; ++i)
@@ -1552,6 +1567,9 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
+ if (adev->gfx.disable_kq)
+ return -EPERM;
+
ret = kstrtol(buf, 0, &value);
if (ret)
@@ -1594,7 +1612,8 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
* Provides the sysfs read interface to get the current settings of the 'enforce_isolation'
* feature for each GPU partition. Reading from the 'enforce_isolation'
* sysfs file returns the isolation settings for all partitions, where '0'
- * indicates disabled and '1' indicates enabled.
+ * indicates disabled, '1' indicates enabled, and '2' indicates enabled in legacy mode,
+ * and '3' indicates enabled without cleaner shader.
*
* Return: The number of bytes read from the sysfs file.
*/
@@ -1629,9 +1648,12 @@ static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
* @count: The size of the input data
*
* This function allows control over the 'enforce_isolation' feature, which
- * serializes access to the graphics engine. Writing '1' or '0' to the
- * 'enforce_isolation' sysfs file enables or disables process isolation for
- * each partition. The input should specify the setting for all partitions.
+ * serializes access to the graphics engine. Writing '0' to disable, '1' to
+ * enable isolation with cleaner shader, '2' to enable legacy isolation without
+ * cleaner shader, or '3' to enable process isolation without submitting the
+ * cleaner shader to the 'enforce_isolation' sysfs file sets the isolation mode
+ * for each partition. The input should specify the setting for all
+ * partitions.
*
* Return: The number of bytes written to the sysfs file.
*/
@@ -1668,13 +1690,34 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
return -EINVAL;
for (i = 0; i < num_partitions; i++) {
- if (partition_values[i] != 0 && partition_values[i] != 1)
+ if (partition_values[i] != 0 &&
+ partition_values[i] != 1 &&
+ partition_values[i] != 2 &&
+ partition_values[i] != 3)
return -EINVAL;
}
mutex_lock(&adev->enforce_isolation_mutex);
- for (i = 0; i < num_partitions; i++)
- adev->enforce_isolation[i] = partition_values[i];
+ for (i = 0; i < num_partitions; i++) {
+ switch (partition_values[i]) {
+ case 0:
+ default:
+ adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
+ break;
+ case 1:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE;
+ break;
+ case 2:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
+ break;
+ case 3:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
+ break;
+ }
+ }
mutex_unlock(&adev->enforce_isolation_mutex);
amdgpu_mes_update_enforce_isolation(adev);
@@ -1923,39 +1966,41 @@ void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
bool enable)
{
- mutex_lock(&adev->gfx.kfd_sch_mutex);
+ mutex_lock(&adev->gfx.userq_sch_mutex);
if (enable) {
/* If the count is already 0, it means there's an imbalance bug somewhere.
* Note that the bug may be in a different caller than the one which triggers the
* WARN_ON_ONCE.
*/
- if (WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx] == 0)) {
+ if (WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx] == 0)) {
dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
goto unlock;
}
- adev->gfx.kfd_sch_req_count[idx]--;
+ adev->gfx.userq_sch_req_count[idx]--;
- if (adev->gfx.kfd_sch_req_count[idx] == 0 &&
- adev->gfx.kfd_sch_inactive[idx]) {
+ if (adev->gfx.userq_sch_req_count[idx] == 0 &&
+ adev->gfx.userq_sch_inactive[idx]) {
schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx]));
}
} else {
- if (adev->gfx.kfd_sch_req_count[idx] == 0) {
+ if (adev->gfx.userq_sch_req_count[idx] == 0) {
cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
- if (!adev->gfx.kfd_sch_inactive[idx]) {
- amdgpu_amdkfd_stop_sched(adev, idx);
- adev->gfx.kfd_sch_inactive[idx] = true;
+ if (!adev->gfx.userq_sch_inactive[idx]) {
+ amdgpu_userq_stop_sched_for_enforce_isolation(adev, idx);
+ if (adev->kfd.init_complete)
+ amdgpu_amdkfd_stop_sched(adev, idx);
+ adev->gfx.userq_sch_inactive[idx] = true;
}
}
- adev->gfx.kfd_sch_req_count[idx]++;
+ adev->gfx.userq_sch_req_count[idx]++;
}
unlock:
- mutex_unlock(&adev->gfx.kfd_sch_mutex);
+ mutex_unlock(&adev->gfx.userq_sch_mutex);
}
/**
@@ -2000,12 +2045,13 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
msecs_to_jiffies(1));
} else {
/* Tell KFD to resume the runqueue */
- if (adev->kfd.init_complete) {
- WARN_ON_ONCE(!adev->gfx.kfd_sch_inactive[idx]);
- WARN_ON_ONCE(adev->gfx.kfd_sch_req_count[idx]);
+ WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]);
+ WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]);
+
+ amdgpu_userq_start_sched_for_enforce_isolation(adev, idx);
+ if (adev->kfd.init_complete)
amdgpu_amdkfd_start_sched(adev, idx);
- adev->gfx.kfd_sch_inactive[idx] = false;
- }
+ adev->gfx.userq_sch_inactive[idx] = false;
}
mutex_unlock(&adev->enforce_isolation_mutex);
}
@@ -2029,7 +2075,7 @@ amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
bool wait = false;
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
/* set the initial values if nothing is set */
if (!adev->gfx.enforce_isolation_jiffies[idx]) {
adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
@@ -2096,7 +2142,7 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx);
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
if (adev->kfd.init_complete)
sched_work = true;
}
@@ -2133,7 +2179,7 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
return;
mutex_lock(&adev->enforce_isolation_mutex);
- if (adev->enforce_isolation[idx]) {
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
if (adev->kfd.init_complete)
sched_work = true;
}
@@ -2182,6 +2228,9 @@ void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring)
enum PP_SMC_POWER_PROFILE profile;
int r;
+ if (amdgpu_dpm_is_overdrive_enabled(adev))
+ return;
+
if (adev->gfx.num_gfx_rings)
profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
else
@@ -2212,11 +2261,84 @@ void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring)
void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (amdgpu_dpm_is_overdrive_enabled(adev))
+ return;
+
atomic_dec(&ring->adev->gfx.total_submission_cnt);
schedule_delayed_work(&ring->adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
}
+/**
+ * amdgpu_gfx_csb_preamble_start - Set CSB preamble start
+ *
+ * @buffer: This is an output variable that gets the PACKET3 preamble setup.
+ *
+ * Return:
+ * return the latest index.
+ */
+u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer)
+{
+ u32 count = 0;
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ buffer[count++] = cpu_to_le32(0x80000000);
+ buffer[count++] = cpu_to_le32(0x80000000);
+
+ return count;
+}
+
+/**
+ * amdgpu_gfx_csb_data_parser - Parser CS data
+ *
+ * @adev: amdgpu_device pointer used to get the CS data and other gfx info.
+ * @buffer: This is an output variable that gets the PACKET3 preamble end.
+ * @count: Index to start set the preemble end.
+ *
+ * Return:
+ * return the latest index.
+ */
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count)
+{
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+ u32 i;
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
+
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = cpu_to_le32(ext->extent[i]);
+ }
+ }
+ }
+
+ return count;
+}
+
+/**
+ * amdgpu_gfx_csb_preamble_end - Set CSB preamble end
+ *
+ * @buffer: This is an output variable that gets the PACKET3 preamble end.
+ * @count: Index to start set the preemble end.
+ */
+void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count)
+{
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+ buffer[count++] = cpu_to_le32(0);
+}
+
/*
* debugfs for to enable/disable gfx job submission to specific core.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 87e862188766..08f268dab8f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -170,10 +170,46 @@ struct amdgpu_kiq {
#define AMDGPU_GFX_MAX_SE 4
#define AMDGPU_GFX_MAX_SH_PER_SE 2
+/**
+ * amdgpu_rb_config - Configure a single Render Backend (RB)
+ *
+ * Bad RBs are fused off and there is a harvest register the driver reads to
+ * determine which RB(s) are fused off so that the driver can configure the
+ * hardware state so that nothing gets sent to them. There are also user
+ * harvest registers that the driver can program to disable additional RBs,
+ * etc., for testing purposes.
+ */
struct amdgpu_rb_config {
+ /**
+ * @rb_backend_disable:
+ *
+ * The value captured from register RB_BACKEND_DISABLE indicates if the
+ * RB backend is disabled or not.
+ */
uint32_t rb_backend_disable;
+
+ /**
+ * @user_rb_backend_disable:
+ *
+ * The value captured from register USER_RB_BACKEND_DISABLE indicates
+ * if the User RB backend is disabled or not.
+ */
uint32_t user_rb_backend_disable;
+
+ /**
+ * @raster_config:
+ *
+ * To set up all of the states, it is necessary to have two registers
+ * to keep all of the states. This field holds the first register.
+ */
uint32_t raster_config;
+
+ /**
+ * @raster_config_1:
+ *
+ * To set up all of the states, it is necessary to have two registers
+ * to keep all of the states. This field holds the second register.
+ */
uint32_t raster_config_1;
};
@@ -221,6 +257,13 @@ struct amdgpu_gfx_config {
uint32_t macrotile_mode_array[16];
struct gb_addr_config gb_addr_config_fields;
+
+ /**
+ * @rb_config:
+ *
+ * Matrix that keeps all the Render Backend (color and depth buffer
+ * handling) configuration on the 3D engine.
+ */
struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
/* gfx configure feature */
@@ -305,7 +348,8 @@ struct amdgpu_gfx_funcs {
void (*init_spm_golden)(struct amdgpu_device *adev);
void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable);
int (*get_gfx_shadow_info)(struct amdgpu_device *adev,
- struct amdgpu_gfx_shadow_info *shadow_info);
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check);
enum amdgpu_gfx_partition
(*query_partition_mode)(struct amdgpu_device *adev);
int (*switch_partition_mode)(struct amdgpu_device *adev,
@@ -474,9 +518,9 @@ struct amdgpu_gfx {
bool enable_cleaner_shader;
struct amdgpu_isolation_work enforce_isolation[MAX_XCP];
/* Mutex for synchronizing KFD scheduler operations */
- struct mutex kfd_sch_mutex;
- u64 kfd_sch_req_count[MAX_XCP];
- bool kfd_sch_inactive[MAX_XCP];
+ struct mutex userq_sch_mutex;
+ u64 userq_sch_req_count[MAX_XCP];
+ bool userq_sch_inactive[MAX_XCP];
unsigned long enforce_isolation_jiffies[MAX_XCP];
unsigned long enforce_isolation_time[MAX_XCP];
@@ -484,6 +528,9 @@ struct amdgpu_gfx {
struct delayed_work idle_work;
bool workload_profile_active;
struct mutex workload_profile_mutex;
+
+ bool disable_kq;
+ bool disable_uq;
};
struct amdgpu_gfx_ras_reg_entry {
@@ -503,7 +550,7 @@ struct amdgpu_gfx_ras_mem_id_entry {
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id)))
#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id)))
#define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev))
-#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si)))
+#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si), false))
/**
* amdgpu_gfx_create_bitmask - create a bitmask
@@ -550,8 +597,6 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
-int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
- int pipe, int queue);
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
@@ -597,6 +642,9 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work);
void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring);
+u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer);
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count);
+void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count);
void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev);
void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index ecb74ccf1d90..6b0fbbb91e57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -1230,6 +1230,10 @@ static ssize_t current_memory_partition_show(
struct amdgpu_device *adev = drm_to_adev(ddev);
enum amdgpu_memory_partition mode;
+ /* Only minimal precaution taken to reject requests while in reset */
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
if ((mode >= ARRAY_SIZE(nps_desc)) ||
(BIT(mode) & AMDGPU_ALL_NPS_MASK) != BIT(mode))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index bd7fc123b8f9..80fa29c26e9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -62,6 +62,9 @@
*/
#define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL
+/* XNACK flags */
+#define AMDGPU_GMC_XNACK_FLAG_CHAIN BIT(0)
+
struct firmware;
enum amdgpu_memory_partition {
@@ -301,6 +304,7 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
int noretry;
+ uint32_t xnack_flags;
uint32_t vmid0_page_table_block_size;
uint32_t vmid0_page_table_depth;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
index b6cf801939aa..6e02fb9ac2f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
@@ -22,6 +22,7 @@
*/
#include "amdgpu.h"
#include "amdgpu_ras.h"
+#include <uapi/linux/kfd_ioctl.h>
int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev)
{
@@ -46,3 +47,22 @@ int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev)
/* hdp ras follows amdgpu_ras_block_late_init_default for late init */
return 0;
}
+
+void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32((adev->rmmio_remap.reg_offset +
+ KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >>
+ 2,
+ 0);
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
+ } else {
+ amdgpu_ring_emit_wreg(ring,
+ (adev->rmmio_remap.reg_offset +
+ KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >>
+ 2,
+ 0);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
index 7b8a6152dc8d..4cfd932b7e91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -44,4 +44,6 @@ struct amdgpu_hdp {
};
int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev);
+void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 2ea98ec60220..802743efa3b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -163,12 +163,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
init_shadow = false;
}
- if (!ring->sched.ready && !ring->is_mes_queue) {
+ if (!ring->sched.ready) {
dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
return -EINVAL;
}
- if (vm && !job->vmid && !ring->is_mes_queue) {
+ if (vm && !job->vmid) {
dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 4c4e087230ac..5dd78a9cb12d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -576,8 +576,16 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
INIT_LIST_HEAD(&id_mgr->ids_lru);
id_mgr->reserved_use_count = 0;
- /* manage only VMIDs not used by KFD */
- id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
+ /* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0))
+ /* manage only VMIDs not used by KFD */
+ id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
+ else if (AMDGPU_IS_MMHUB0(i) ||
+ AMDGPU_IS_MMHUB1(i))
+ id_mgr->num_ids = 16;
+ else
+ /* manage only VMIDs not used by KFD */
+ id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
/* skip over VMID 0, since it is the system VM */
for (j = 1; j < id_mgr->num_ids; ++j) {
@@ -588,7 +596,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
}
/* alloc a default reserved vmid to enforce isolation */
for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
- if (adev->enforce_isolation[i])
+ if (adev->enforce_isolation[i] != AMDGPU_ENFORCE_ISOLATION_DISABLE)
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 901f8b12c672..30f16968b578 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_ih.h"
+#include "amdgpu_reset.h"
/**
* amdgpu_ih_ring_init - initialize the IH state
@@ -227,13 +228,23 @@ restart_ih:
ih->rptr &= ih->ptr_mask;
}
- amdgpu_ih_set_rptr(adev, ih);
+ if (!ih->overflow)
+ amdgpu_ih_set_rptr(adev, ih);
+
wake_up_all(&ih->wait_process);
/* make sure wptr hasn't changed while processing */
wptr = amdgpu_ih_get_wptr(adev, ih);
if (wptr != ih->rptr)
- goto restart_ih;
+ if (!ih->overflow)
+ goto restart_ih;
+
+ if (ih->overflow)
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index b0a88f92cd82..7f7ea046e209 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -72,6 +72,7 @@ struct amdgpu_ih_ring {
/* For waiting on IH processing at checkpoint. */
wait_queue_head_t wait_process;
uint64_t processed_timestamp;
+ bool overflow;
};
/* return true if time stamp t2 is after t1 with 48bit wrap around */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 19ce4da285e8..13c60cac4261 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -619,6 +619,10 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned int type)
{
+ /* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */
+ if (amdgpu_ras_is_rma(adev))
+ return -EINVAL;
+
if (!adev->irq.installed)
return -ENOENT;
@@ -725,8 +729,8 @@ static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
*/
int amdgpu_irq_add_domain(struct amdgpu_device *adev)
{
- adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
- &amdgpu_hw_irqdomain_ops, adev);
+ adev->irq.domain = irq_domain_create_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
+ &amdgpu_hw_irqdomain_ops, adev);
if (!adev->irq.domain) {
DRM_ERROR("GPU irq add domain failed\n");
return -ENODEV;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index acb21fc8b3ce..ddb9d3269357 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -272,8 +272,8 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
/* Check if any fences where initialized */
if (job->base.s_fence && job->base.s_fence->finished.ops)
f = &job->base.s_fence->finished;
- else if (job->hw_fence.ops)
- f = &job->hw_fence;
+ else if (job->hw_fence.base.ops)
+ f = &job->hw_fence.base;
else
f = NULL;
@@ -290,10 +290,10 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
amdgpu_sync_free(&job->explicit_sync);
/* only put the hw fence if has embedded fence */
- if (!job->hw_fence.ops)
+ if (!job->hw_fence.base.ops)
kfree(job);
else
- dma_fence_put(&job->hw_fence);
+ dma_fence_put(&job->hw_fence.base);
}
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
@@ -322,10 +322,10 @@ void amdgpu_job_free(struct amdgpu_job *job)
if (job->gang_submit != &job->base.s_fence->scheduled)
dma_fence_put(job->gang_submit);
- if (!job->hw_fence.ops)
+ if (!job->hw_fence.base.ops)
kfree(job);
else
- dma_fence_put(&job->hw_fence);
+ dma_fence_put(&job->hw_fence.base);
}
struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index ce6b9ba967ff..931fed8892cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -48,7 +48,7 @@ struct amdgpu_job {
struct drm_sched_job base;
struct amdgpu_vm *vm;
struct amdgpu_sync explicit_sync;
- struct dma_fence hw_fence;
+ struct amdgpu_fence hw_fence;
struct dma_fence *gang_submit;
uint32_t preamble_status;
uint32_t preemption_status;
@@ -78,6 +78,7 @@ struct amdgpu_job {
/* enforce isolation */
bool enforce_isolation;
+ bool run_cleaner_shader;
uint32_t num_ibs;
struct amdgpu_ib ibs[];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 27bfe9c8af06..d2ce7d86dbc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -45,6 +45,7 @@
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
#include "amd_pcie.h"
+#include "amdgpu_userq.h"
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
{
@@ -370,6 +371,26 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
return 0;
}
+static int amdgpu_userq_metadata_info_gfx(struct amdgpu_device *adev,
+ struct drm_amdgpu_info *info,
+ struct drm_amdgpu_info_uq_metadata_gfx *meta)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (adev->gfx.funcs->get_gfx_shadow_info) {
+ struct amdgpu_gfx_shadow_info shadow = {};
+
+ adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow, true);
+ meta->shadow_size = shadow.shadow_size;
+ meta->shadow_alignment = shadow.shadow_alignment;
+ meta->csa_size = shadow.csa_size;
+ meta->csa_alignment = shadow.csa_alignment;
+ ret = 0;
+ }
+
+ return ret;
+}
+
static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
struct drm_amdgpu_info *info,
struct drm_amdgpu_info_hw_ip *result)
@@ -387,7 +408,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_GFX:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- if (adev->gfx.gfx_ring[i].sched.ready)
+ if (adev->gfx.gfx_ring[i].sched.ready &&
+ !adev->gfx.gfx_ring[i].no_user_submission)
++num_rings;
ib_start_alignment = 32;
ib_size_alignment = 32;
@@ -395,7 +417,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_COMPUTE:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- if (adev->gfx.compute_ring[i].sched.ready)
+ if (adev->gfx.compute_ring[i].sched.ready &&
+ !adev->gfx.compute_ring[i].no_user_submission)
++num_rings;
ib_start_alignment = 32;
ib_size_alignment = 32;
@@ -403,7 +426,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA;
for (i = 0; i < adev->sdma.num_instances; i++)
- if (adev->sdma.instance[i].ring.sched.ready)
+ if (adev->sdma.instance[i].ring.sched.ready &&
+ !adev->sdma.instance[i].ring.no_user_submission)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -414,7 +438,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->uvd.harvest_config & (1 << i))
continue;
- if (adev->uvd.inst[i].ring.sched.ready)
+ if (adev->uvd.inst[i].ring.sched.ready &&
+ !adev->uvd.inst[i].ring.no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -423,7 +448,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
for (i = 0; i < adev->vce.num_rings; i++)
- if (adev->vce.ring[i].sched.ready)
+ if (adev->vce.ring[i].sched.ready &&
+ !adev->vce.ring[i].no_user_submission)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -435,7 +461,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->uvd.num_enc_rings; j++)
- if (adev->uvd.inst[i].ring_enc[j].sched.ready)
+ if (adev->uvd.inst[i].ring_enc[j].sched.ready &&
+ !adev->uvd.inst[i].ring_enc[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -447,7 +474,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->vcn.harvest_config & (1 << i))
continue;
- if (adev->vcn.inst[i].ring_dec.sched.ready)
+ if (adev->vcn.inst[i].ring_dec.sched.ready &&
+ !adev->vcn.inst[i].ring_dec.no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -460,7 +488,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->vcn.inst[i].num_enc_rings; j++)
- if (adev->vcn.inst[i].ring_enc[j].sched.ready)
+ if (adev->vcn.inst[i].ring_enc[j].sched.ready &&
+ !adev->vcn.inst[i].ring_enc[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -475,7 +504,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->jpeg.num_jpeg_rings; j++)
- if (adev->jpeg.inst[i].ring_dec[j].sched.ready)
+ if (adev->jpeg.inst[i].ring_dec[j].sched.ready &&
+ !adev->jpeg.inst[i].ring_dec[j].no_user_submission)
++num_rings;
}
ib_start_alignment = 256;
@@ -483,7 +513,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
break;
case AMDGPU_HW_IP_VPE:
type = AMD_IP_BLOCK_TYPE_VPE;
- if (adev->vpe.ring.sched.ready)
+ if (adev->vpe.ring.sched.ready &&
+ !adev->vpe.ring.no_user_submission)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -978,6 +1009,8 @@ out:
}
}
+ dev_info->userq_ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+
ret = copy_to_user(out, dev_info,
min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
kfree(dev_info);
@@ -1293,6 +1326,22 @@ out:
return copy_to_user(out, &gpuvm_fault,
min((size_t)size, sizeof(gpuvm_fault))) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_UQ_FW_AREAS: {
+ struct drm_amdgpu_info_uq_metadata meta_info = {};
+
+ switch (info->query_hw_ip.type) {
+ case AMDGPU_HW_IP_GFX:
+ ret = amdgpu_userq_metadata_info_gfx(adev, info, &meta_info.gfx);
+ if (ret)
+ return ret;
+
+ ret = copy_to_user(out, &meta_info,
+ min((size_t)size, sizeof(meta_info))) ? -EFAULT : 0;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -1376,6 +1425,14 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
mutex_init(&fpriv->bo_list_lock);
idr_init_base(&fpriv->bo_list_handles, 1);
+ r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev);
+ if (r)
+ DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n");
+
+ r = amdgpu_eviction_fence_init(&fpriv->evf_mgr);
+ if (r)
+ goto error_vm;
+
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
file_priv->driver_priv = fpriv;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index fb212f0a1136..6fa9fa11c8f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -39,42 +39,6 @@ int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
PAGE_SIZE);
}
-static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
- int ip_type, uint64_t *doorbell_index)
-{
- unsigned int offset, found;
- struct amdgpu_mes *mes = &adev->mes;
-
- if (ip_type == AMDGPU_RING_TYPE_SDMA)
- offset = adev->doorbell_index.sdma_engine[0];
- else
- offset = 0;
-
- found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
- if (found >= mes->num_mes_dbs) {
- DRM_WARN("No doorbell available\n");
- return -ENOSPC;
- }
-
- set_bit(found, mes->doorbell_bitmap);
-
- /* Get the absolute doorbell index on BAR */
- *doorbell_index = mes->db_start_dw_offset + found * 2;
- return 0;
-}
-
-static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
- uint32_t doorbell_index)
-{
- unsigned int old, rel_index;
- struct amdgpu_mes *mes = &adev->mes;
-
- /* Find the relative index of the doorbell in this object */
- rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
- old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
- WARN_ON(!old);
-}
-
static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
{
int i;
@@ -126,7 +90,7 @@ static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
int amdgpu_mes_init(struct amdgpu_device *adev)
{
- int i, r;
+ int i, r, num_pipes;
adev->mes.adev = adev;
@@ -142,19 +106,52 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
adev->mes.vmid_mask_mmhub = 0xffffff00;
- adev->mes.vmid_mask_gfxhub = 0xffffff00;
+ adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00;
+
+ num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me;
+ if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES)
+ dev_warn(adev->dev, "more gfx pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_GFX_PIPES);
+
+ for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) {
+ if (i >= num_pipes)
+ break;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
+ IP_VERSION(12, 0, 0))
+ /*
+ * GFX V12 has only one GFX pipe, but 8 queues in it.
+ * GFX pipe 0 queue 0 is being used by Kernel queue.
+ * Set GFX pipe 0 queue 1-7 for MES scheduling
+ * mask = 1111 1110b
+ */
+ adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0xFF : 0xFE;
+ else
+ /*
+ * GFX pipe 0 queue 0 is being used by Kernel queue.
+ * Set GFX pipe 0 queue 1 for MES scheduling
+ * mask = 10b
+ */
+ adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0x3 : 0x2;
+ }
+
+ num_pipes = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec;
+ if (num_pipes > AMDGPU_MES_MAX_COMPUTE_PIPES)
+ dev_warn(adev->dev, "more compute pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_COMPUTE_PIPES);
for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
- if (i >= (adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec))
+ if (i >= num_pipes)
break;
- adev->mes.compute_hqd_mask[i] = 0xc;
+ adev->mes.compute_hqd_mask[i] = adev->gfx.disable_kq ? 0xF : 0xC;
}
- for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
- adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
+ num_pipes = adev->sdma.num_instances;
+ if (num_pipes > AMDGPU_MES_MAX_SDMA_PIPES)
+ dev_warn(adev->dev, "more SDMA pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_SDMA_PIPES);
for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
- if (i >= adev->sdma.num_instances)
+ if (i >= num_pipes)
break;
adev->mes.sdma_hqd_mask[i] = 0xfc;
}
@@ -240,244 +237,6 @@ void amdgpu_mes_fini(struct amdgpu_device *adev)
mutex_destroy(&adev->mes.mutex_hidden);
}
-static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
-{
- amdgpu_bo_free_kernel(&q->mqd_obj,
- &q->mqd_gpu_addr,
- &q->mqd_cpu_ptr);
-}
-
-int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
- struct amdgpu_vm *vm)
-{
- struct amdgpu_mes_process *process;
- int r;
-
- /* allocate the mes process buffer */
- process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
- if (!process) {
- DRM_ERROR("no more memory to create mes process\n");
- return -ENOMEM;
- }
-
- /* allocate the process context bo and map it */
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
- if (r) {
- DRM_ERROR("failed to allocate process context bo\n");
- goto clean_up_memory;
- }
- memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* add the mes process to idr list */
- r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
- GFP_KERNEL);
- if (r < 0) {
- DRM_ERROR("failed to lock pasid=%d\n", pasid);
- goto clean_up_ctx;
- }
-
- INIT_LIST_HEAD(&process->gang_list);
- process->vm = vm;
- process->pasid = pasid;
- process->process_quantum = adev->mes.default_process_quantum;
- process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_ctx:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_bo_free_kernel(&process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
-clean_up_memory:
- kfree(process);
- return r;
-}
-
-void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
-{
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang, *tmp1;
- struct amdgpu_mes_queue *queue, *tmp2;
- struct mes_remove_queue_input queue_input;
- unsigned long flags;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- process = idr_find(&adev->mes.pasid_idr, pasid);
- if (!process) {
- DRM_WARN("pasid %d doesn't exist\n", pasid);
- amdgpu_mes_unlock(&adev->mes);
- return;
- }
-
- /* Remove all queues from hardware */
- list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
- list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->remove_hw_queue(&adev->mes,
- &queue_input);
- if (r)
- DRM_WARN("failed to remove hardware queue\n");
- }
-
- idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
- }
-
- idr_remove(&adev->mes.pasid_idr, pasid);
- amdgpu_mes_unlock(&adev->mes);
-
- /* free all memory allocated by the process */
- list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
- /* free all queues in the gang */
- list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
- amdgpu_mes_queue_free_mqd(queue);
- list_del(&queue->list);
- kfree(queue);
- }
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
- list_del(&gang->list);
- kfree(gang);
-
- }
- amdgpu_bo_free_kernel(&process->proc_ctx_bo,
- &process->proc_ctx_gpu_addr,
- &process->proc_ctx_cpu_ptr);
- kfree(process);
-}
-
-int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
- struct amdgpu_mes_gang_properties *gprops,
- int *gang_id)
-{
- struct amdgpu_mes_process *process;
- struct amdgpu_mes_gang *gang;
- int r;
-
- /* allocate the mes gang buffer */
- gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
- if (!gang) {
- return -ENOMEM;
- }
-
- /* allocate the gang context bo and map it to cpu space */
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
- if (r) {
- DRM_ERROR("failed to allocate process context bo\n");
- goto clean_up_mem;
- }
- memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- process = idr_find(&adev->mes.pasid_idr, pasid);
- if (!process) {
- DRM_ERROR("pasid %d doesn't exist\n", pasid);
- r = -EINVAL;
- goto clean_up_ctx;
- }
-
- /* add the mes gang to idr list */
- r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
- GFP_KERNEL);
- if (r < 0) {
- DRM_ERROR("failed to allocate idr for gang\n");
- goto clean_up_ctx;
- }
-
- gang->gang_id = r;
- *gang_id = r;
-
- INIT_LIST_HEAD(&gang->queue_list);
- gang->process = process;
- gang->priority = gprops->priority;
- gang->gang_quantum = gprops->gang_quantum ?
- gprops->gang_quantum : adev->mes.default_gang_quantum;
- gang->global_priority_level = gprops->global_priority_level;
- gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
- list_add_tail(&gang->list, &process->gang_list);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_ctx:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
-clean_up_mem:
- kfree(gang);
- return r;
-}
-
-int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
-{
- struct amdgpu_mes_gang *gang;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- amdgpu_mes_unlock(&adev->mes);
- return -EINVAL;
- }
-
- if (!list_empty(&gang->queue_list)) {
- DRM_ERROR("queue list is not empty\n");
- amdgpu_mes_unlock(&adev->mes);
- return -EBUSY;
- }
-
- idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
- list_del(&gang->list);
- amdgpu_mes_unlock(&adev->mes);
-
- amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
- &gang->gang_ctx_gpu_addr,
- &gang->gang_ctx_cpu_ptr);
-
- kfree(gang);
-
- return 0;
-}
-
int amdgpu_mes_suspend(struct amdgpu_device *adev)
{
struct mes_suspend_gang_input input;
@@ -526,304 +285,6 @@ int amdgpu_mes_resume(struct amdgpu_device *adev)
return r;
}
-static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
- struct amdgpu_mes_queue *q,
- struct amdgpu_mes_queue_properties *p)
-{
- struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
- u32 mqd_size = mqd_mgr->mqd_size;
- int r;
-
- r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &q->mqd_obj,
- &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
- if (r) {
- dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
- return r;
- }
- memset(q->mqd_cpu_ptr, 0, mqd_size);
-
- r = amdgpu_bo_reserve(q->mqd_obj, false);
- if (unlikely(r != 0))
- goto clean_up;
-
- return 0;
-
-clean_up:
- amdgpu_bo_free_kernel(&q->mqd_obj,
- &q->mqd_gpu_addr,
- &q->mqd_cpu_ptr);
- return r;
-}
-
-static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
- struct amdgpu_mes_queue *q,
- struct amdgpu_mes_queue_properties *p)
-{
- struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
- struct amdgpu_mqd_prop mqd_prop = {0};
-
- mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
- mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
- mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
- mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
- mqd_prop.queue_size = p->queue_size;
- mqd_prop.use_doorbell = true;
- mqd_prop.doorbell_index = p->doorbell_off;
- mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
- mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
- mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
- mqd_prop.hqd_active = false;
-
- if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
- p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- mutex_lock(&adev->srbm_mutex);
- amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
- }
-
- mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
-
- if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
- p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- }
-
- amdgpu_bo_unreserve(q->mqd_obj);
-}
-
-int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
- struct amdgpu_mes_queue_properties *qprops,
- int *queue_id)
-{
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_add_queue_input queue_input;
- unsigned long flags;
- int r;
-
- memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
-
- /* allocate the mes queue buffer */
- queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
- if (!queue) {
- DRM_ERROR("Failed to allocate memory for queue\n");
- return -ENOMEM;
- }
-
- /* Allocate the queue mqd */
- r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
- if (r)
- goto clean_up_memory;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- r = -EINVAL;
- goto clean_up_mqd;
- }
-
- /* add the mes gang to idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
- GFP_ATOMIC);
- if (r < 0) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- goto clean_up_mqd;
- }
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- *queue_id = queue->queue_id = r;
-
- /* allocate a doorbell index for the queue */
- r = amdgpu_mes_kernel_doorbell_get(adev,
- qprops->queue_type,
- &qprops->doorbell_off);
- if (r)
- goto clean_up_queue_id;
-
- /* initialize the queue mqd */
- amdgpu_mes_queue_init_mqd(adev, queue, qprops);
-
- /* add hw queue to mes */
- queue_input.process_id = gang->process->pasid;
-
- queue_input.page_table_base_addr =
- adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
- adev->gmc.vram_start;
-
- queue_input.process_va_start = 0;
- queue_input.process_va_end =
- (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
- queue_input.process_quantum = gang->process->process_quantum;
- queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
- queue_input.gang_quantum = gang->gang_quantum;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
- queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
- queue_input.gang_global_priority_level = gang->global_priority_level;
- queue_input.doorbell_offset = qprops->doorbell_off;
- queue_input.mqd_addr = queue->mqd_gpu_addr;
- queue_input.wptr_addr = qprops->wptr_gpu_addr;
- queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
- queue_input.queue_type = qprops->queue_type;
- queue_input.paging = qprops->paging;
- queue_input.is_kfd_process = 0;
-
- r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
- if (r) {
- DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
- qprops->doorbell_off);
- goto clean_up_doorbell;
- }
-
- DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
- "queue type=%d, doorbell=0x%llx\n",
- gang->process->pasid, gang_id, qprops->queue_type,
- qprops->doorbell_off);
-
- queue->ring = qprops->ring;
- queue->doorbell_off = qprops->doorbell_off;
- queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
- queue->queue_type = qprops->queue_type;
- queue->paging = qprops->paging;
- queue->gang = gang;
- queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
- list_add_tail(&queue->list, &gang->queue_list);
-
- amdgpu_mes_unlock(&adev->mes);
- return 0;
-
-clean_up_doorbell:
- amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off);
-clean_up_queue_id:
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
- idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-clean_up_mqd:
- amdgpu_mes_unlock(&adev->mes);
- amdgpu_mes_queue_free_mqd(queue);
-clean_up_memory:
- kfree(queue);
- return r;
-}
-
-int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
-{
- unsigned long flags;
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_remove_queue_input queue_input;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* remove the mes gang from idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
-
- queue = idr_find(&adev->mes.queue_id_idr, queue_id);
- if (!queue) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- amdgpu_mes_unlock(&adev->mes);
- DRM_ERROR("queue id %d doesn't exist\n", queue_id);
- return -EINVAL;
- }
-
- idr_remove(&adev->mes.queue_id_idr, queue_id);
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
- queue->doorbell_off);
-
- gang = queue->gang;
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
- queue_id);
-
- list_del(&queue->list);
- amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off);
- amdgpu_mes_unlock(&adev->mes);
-
- amdgpu_mes_queue_free_mqd(queue);
- kfree(queue);
- return 0;
-}
-
-int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
-{
- unsigned long flags;
- struct amdgpu_mes_queue *queue;
- struct amdgpu_mes_gang *gang;
- struct mes_reset_queue_input queue_input;
- int r;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
-
- /* remove the mes gang from idr list */
- spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
-
- queue = idr_find(&adev->mes.queue_id_idr, queue_id);
- if (!queue) {
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
- amdgpu_mes_unlock(&adev->mes);
- DRM_ERROR("queue id %d doesn't exist\n", queue_id);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
-
- DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n",
- queue->doorbell_off);
-
- gang = queue->gang;
- queue_input.doorbell_offset = queue->doorbell_off;
- queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
-
- r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to reset hardware queue, queue id = %d\n",
- queue_id);
-
- amdgpu_mes_unlock(&adev->mes);
-
- return 0;
-}
-
-int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
- int me_id, int pipe_id, int queue_id, int vmid)
-{
- struct mes_reset_queue_input queue_input;
- int r;
-
- queue_input.queue_type = queue_type;
- queue_input.use_mmio = true;
- queue_input.me_id = me_id;
- queue_input.pipe_id = pipe_id;
- queue_input.queue_id = queue_id;
- queue_input.vmid = vmid;
- r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
- if (r)
- DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
- queue_id);
- return r;
-}
-
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -839,7 +300,9 @@ int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
queue_input.wptr_addr = ring->wptr_gpu_addr;
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
DRM_ERROR("failed to map legacy queue\n");
@@ -862,7 +325,9 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
queue_input.trail_fence_addr = gpu_addr;
queue_input.trail_fence_data = seq;
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
DRM_ERROR("failed to unmap legacy queue\n");
@@ -874,7 +339,7 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
unsigned int vmid,
bool use_mmio)
{
- struct mes_reset_legacy_queue_input queue_input;
+ struct mes_reset_queue_input queue_input;
int r;
memset(&queue_input, 0, sizeof(queue_input));
@@ -888,8 +353,13 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
queue_input.wptr_addr = ring->wptr_gpu_addr;
queue_input.vmid = vmid;
queue_input.use_mmio = use_mmio;
+ queue_input.is_kq = true;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_GFX)
+ queue_input.legacy_gfx = true;
- r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
DRM_ERROR("failed to reset legacy queue\n");
@@ -905,7 +375,7 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
uint32_t *read_val_ptr;
if (amdgpu_device_wb_get(adev, &addr_offset)) {
- DRM_ERROR("critical bug! too many mes readers\n");
+ dev_err(adev->dev, "critical bug! too many mes readers\n");
goto error;
}
read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
@@ -915,13 +385,15 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
op_input.read_reg.buffer_addr = read_val_gpu_addr;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes rreg is not supported!\n");
+ dev_err(adev->dev, "mes rreg is not supported!\n");
goto error;
}
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to read reg (0x%x)\n", reg);
+ dev_err(adev->dev, "failed to read reg (0x%x)\n", reg);
else
val = *(read_val_ptr);
@@ -942,14 +414,16 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev,
op_input.write_reg.reg_value = val;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes wreg is not supported!\n");
+ dev_err(adev->dev, "mes wreg is not supported!\n");
r = -EINVAL;
goto error;
}
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to write reg (0x%x)\n", reg);
+ dev_err(adev->dev, "failed to write reg (0x%x)\n", reg);
error:
return r;
@@ -969,39 +443,16 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
op_input.wrm_reg.mask = mask;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
- r = -EINVAL;
- goto error;
- }
-
- r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
- if (r)
- DRM_ERROR("failed to reg_write_reg_wait\n");
-
-error:
- return r;
-}
-
-int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
- uint32_t val, uint32_t mask)
-{
- struct mes_misc_op_input op_input;
- int r;
-
- op_input.op = MES_MISC_OP_WRM_REG_WAIT;
- op_input.wrm_reg.reg0 = reg;
- op_input.wrm_reg.ref = val;
- op_input.wrm_reg.mask = mask;
-
- if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes reg wait is not supported!\n");
+ dev_err(adev->dev, "mes reg_write_reg_wait is not supported!\n");
r = -EINVAL;
goto error;
}
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to reg_write_reg_wait\n");
+ dev_err(adev->dev, "failed to reg_write_reg_wait\n");
error:
return r;
@@ -1075,508 +526,12 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
return r;
}
-static void
-amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_mes_queue_properties *props)
-{
- props->queue_type = ring->funcs->type;
- props->hqd_base_gpu_addr = ring->gpu_addr;
- props->rptr_gpu_addr = ring->rptr_gpu_addr;
- props->wptr_gpu_addr = ring->wptr_gpu_addr;
- props->wptr_mc_addr =
- ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
- props->queue_size = ring->ring_size;
- props->eop_gpu_addr = ring->eop_gpu_addr;
- props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
- props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
- props->paging = false;
- props->ring = ring;
-}
-
-#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
-do { \
- if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
- return offsetof(struct amdgpu_mes_ctx_meta_data, \
- _eng[ring->idx].slots[id_offs]); \
- else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
- return offsetof(struct amdgpu_mes_ctx_meta_data, \
- _eng[ring->idx].ring); \
- else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
- return offsetof(struct amdgpu_mes_ctx_meta_data, \
- _eng[ring->idx].ib); \
- else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
- return offsetof(struct amdgpu_mes_ctx_meta_data, \
- _eng[ring->idx].padding); \
-} while(0)
-
-int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
-{
- switch (ring->funcs->type) {
- case AMDGPU_RING_TYPE_GFX:
- DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
- break;
- case AMDGPU_RING_TYPE_COMPUTE:
- DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
- break;
- case AMDGPU_RING_TYPE_SDMA:
- DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
- break;
- default:
- break;
- }
-
- WARN_ON(1);
- return -EINVAL;
-}
-
-int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
- int queue_type, int idx,
- struct amdgpu_mes_ctx_data *ctx_data,
- struct amdgpu_ring **out)
-{
- struct amdgpu_ring *ring;
- struct amdgpu_mes_gang *gang;
- struct amdgpu_mes_queue_properties qprops = {0};
- int r, queue_id, pasid;
-
- /*
- * Avoid taking any other locks under MES lock to avoid circular
- * lock dependencies.
- */
- amdgpu_mes_lock(&adev->mes);
- gang = idr_find(&adev->mes.gang_id_idr, gang_id);
- if (!gang) {
- DRM_ERROR("gang id %d doesn't exist\n", gang_id);
- amdgpu_mes_unlock(&adev->mes);
- return -EINVAL;
- }
- pasid = gang->process->pasid;
-
- ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
- if (!ring) {
- amdgpu_mes_unlock(&adev->mes);
- return -ENOMEM;
- }
-
- ring->ring_obj = NULL;
- ring->use_doorbell = true;
- ring->is_mes_queue = true;
- ring->mes_ctx = ctx_data;
- ring->idx = idx;
- ring->no_scheduler = true;
-
- if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
- int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- compute[ring->idx].mec_hpd);
- ring->eop_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- }
-
- switch (queue_type) {
- case AMDGPU_RING_TYPE_GFX:
- ring->funcs = adev->gfx.gfx_ring[0].funcs;
- ring->me = adev->gfx.gfx_ring[0].me;
- ring->pipe = adev->gfx.gfx_ring[0].pipe;
- break;
- case AMDGPU_RING_TYPE_COMPUTE:
- ring->funcs = adev->gfx.compute_ring[0].funcs;
- ring->me = adev->gfx.compute_ring[0].me;
- ring->pipe = adev->gfx.compute_ring[0].pipe;
- break;
- case AMDGPU_RING_TYPE_SDMA:
- ring->funcs = adev->sdma.instance[0].ring.funcs;
- break;
- default:
- BUG();
- }
-
- r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
- if (r) {
- amdgpu_mes_unlock(&adev->mes);
- goto clean_up_memory;
- }
-
- amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
-
- dma_fence_wait(gang->process->vm->last_update, false);
- dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
- amdgpu_mes_unlock(&adev->mes);
-
- r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
- if (r)
- goto clean_up_ring;
-
- ring->hw_queue_id = queue_id;
- ring->doorbell_index = qprops.doorbell_off;
-
- if (queue_type == AMDGPU_RING_TYPE_GFX)
- sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
- else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
- sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
- queue_id);
- else if (queue_type == AMDGPU_RING_TYPE_SDMA)
- sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
- queue_id);
- else
- BUG();
-
- *out = ring;
- return 0;
-
-clean_up_ring:
- amdgpu_ring_fini(ring);
-clean_up_memory:
- kfree(ring);
- return r;
-}
-
-void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring)
- return;
-
- amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
- timer_delete_sync(&ring->fence_drv.fallback_timer);
- amdgpu_ring_fini(ring);
- kfree(ring);
-}
-
uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
enum amdgpu_mes_priority_level prio)
{
return adev->mes.aggregated_doorbells[prio];
}
-int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- int r;
-
- r = amdgpu_bo_create_kernel(adev,
- sizeof(struct amdgpu_mes_ctx_meta_data),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &ctx_data->meta_data_obj,
- &ctx_data->meta_data_mc_addr,
- &ctx_data->meta_data_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
- return r;
- }
-
- if (!ctx_data->meta_data_obj)
- return -ENOMEM;
-
- memset(ctx_data->meta_data_ptr, 0,
- sizeof(struct amdgpu_mes_ctx_meta_data));
-
- return 0;
-}
-
-void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
-{
- if (ctx_data->meta_data_obj)
- amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
- &ctx_data->meta_data_mc_addr,
- &ctx_data->meta_data_ptr);
-}
-
-int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_bo_va *bo_va;
- struct amdgpu_sync sync;
- struct drm_exec exec;
- int r;
-
- amdgpu_sync_create(&sync);
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec,
- &ctx_data->meta_data_obj->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto error_fini_exec;
- }
-
- bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
- if (!bo_va) {
- DRM_ERROR("failed to create bo_va for meta data BO\n");
- r = -ENOMEM;
- goto error_fini_exec;
- }
-
- r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
- sizeof(struct amdgpu_mes_ctx_meta_data),
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
-
- if (r) {
- DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
- goto error_del_bo_va;
- }
-
- r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r) {
- DRM_ERROR("failed to do vm_bo_update on meta data\n");
- goto error_del_bo_va;
- }
- amdgpu_sync_fence(&sync, bo_va->last_pt_update, GFP_KERNEL);
-
- r = amdgpu_vm_update_pdes(adev, vm, false);
- if (r) {
- DRM_ERROR("failed to update pdes on meta data\n");
- goto error_del_bo_va;
- }
- amdgpu_sync_fence(&sync, vm->last_update, GFP_KERNEL);
-
- amdgpu_sync_wait(&sync, false);
- drm_exec_fini(&exec);
-
- amdgpu_sync_free(&sync);
- ctx_data->meta_data_va = bo_va;
- return 0;
-
-error_del_bo_va:
- amdgpu_vm_bo_del(adev, bo_va);
-
-error_fini_exec:
- drm_exec_fini(&exec);
- amdgpu_sync_free(&sync);
- return r;
-}
-
-int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
- struct amdgpu_bo *bo = ctx_data->meta_data_obj;
- struct amdgpu_vm *vm = bo_va->base.vm;
- struct dma_fence *fence;
- struct drm_exec exec;
- long r;
-
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- r = drm_exec_lock_obj(&exec,
- &ctx_data->meta_data_obj->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
-
- r = amdgpu_vm_lock_pd(vm, &exec, 0);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(r))
- goto out_unlock;
- }
-
- amdgpu_vm_bo_del(adev, bo_va);
- if (!amdgpu_vm_ready(vm))
- goto out_unlock;
-
- r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
- &fence);
- if (r)
- goto out_unlock;
- if (fence) {
- amdgpu_bo_fence(bo, fence, true);
- fence = NULL;
- }
-
- r = amdgpu_vm_clear_freed(adev, vm, &fence);
- if (r || !fence)
- goto out_unlock;
-
- dma_fence_wait(fence, false);
- amdgpu_bo_fence(bo, fence, true);
- dma_fence_put(fence);
-
-out_unlock:
- if (unlikely(r < 0))
- dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
- drm_exec_fini(&exec);
-
- return r;
-}
-
-static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
- int pasid, int *gang_id,
- int queue_type, int num_queue,
- struct amdgpu_ring **added_rings,
- struct amdgpu_mes_ctx_data *ctx_data)
-{
- struct amdgpu_ring *ring;
- struct amdgpu_mes_gang_properties gprops = {0};
- int r, j;
-
- /* create a gang for the process */
- gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.gang_quantum = adev->mes.default_gang_quantum;
- gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
- gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
-
- r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
- if (r) {
- DRM_ERROR("failed to add gang\n");
- return r;
- }
-
- /* create queues for the gang */
- for (j = 0; j < num_queue; j++) {
- r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
- ctx_data, &ring);
- if (r) {
- DRM_ERROR("failed to add ring\n");
- break;
- }
-
- DRM_INFO("ring %s was added\n", ring->name);
- added_rings[j] = ring;
- }
-
- return 0;
-}
-
-static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
-{
- struct amdgpu_ring *ring;
- int i, r;
-
- for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
- ring = added_rings[i];
- if (!ring)
- continue;
-
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
-
- r = amdgpu_ring_test_ib(ring, 1000 * 10);
- if (r) {
- DRM_DEV_ERROR(ring->adev->dev,
- "ring %s ib test failed (%d)\n",
- ring->name, r);
- return r;
- } else
- DRM_INFO("ring %s ib test pass\n", ring->name);
- }
-
- return 0;
-}
-
-int amdgpu_mes_self_test(struct amdgpu_device *adev)
-{
- struct amdgpu_vm *vm = NULL;
- struct amdgpu_mes_ctx_data ctx_data = {0};
- struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
- int gang_ids[3] = {0};
- int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
- { AMDGPU_RING_TYPE_COMPUTE, 1 },
- { AMDGPU_RING_TYPE_SDMA, 1} };
- int i, r, pasid, k = 0;
-
- pasid = amdgpu_pasid_alloc(16);
- if (pasid < 0) {
- dev_warn(adev->dev, "No more PASIDs available!");
- pasid = 0;
- }
-
- vm = kzalloc(sizeof(*vm), GFP_KERNEL);
- if (!vm) {
- r = -ENOMEM;
- goto error_pasid;
- }
-
- r = amdgpu_vm_init(adev, vm, -1);
- if (r) {
- DRM_ERROR("failed to initialize vm\n");
- goto error_pasid;
- }
-
- r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
- if (r) {
- DRM_ERROR("failed to alloc ctx meta data\n");
- goto error_fini;
- }
-
- ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
- r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
- if (r) {
- DRM_ERROR("failed to map ctx meta data\n");
- goto error_vm;
- }
-
- r = amdgpu_mes_create_process(adev, pasid, vm);
- if (r) {
- DRM_ERROR("failed to create MES process\n");
- goto error_vm;
- }
-
- for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
- /* On GFX v10.3, fw hasn't supported to map sdma queue. */
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
- IP_VERSION(10, 3, 0) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) <
- IP_VERSION(11, 0, 0) &&
- queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
- continue;
-
- r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
- &gang_ids[i],
- queue_types[i][0],
- queue_types[i][1],
- &added_rings[k],
- &ctx_data);
- if (r)
- goto error_queues;
-
- k += queue_types[i][1];
- }
-
- /* start ring test and ib test for MES queues */
- amdgpu_mes_test_queues(added_rings);
-
-error_queues:
- /* remove all queues */
- for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
- if (!added_rings[i])
- continue;
- amdgpu_mes_remove_ring(adev, added_rings[i]);
- }
-
- for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
- if (!gang_ids[i])
- continue;
- amdgpu_mes_remove_gang(adev, gang_ids[i]);
- }
-
- amdgpu_mes_destroy_process(adev, pasid);
-
-error_vm:
- amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
-
-error_fini:
- amdgpu_vm_fini(adev, vm);
-
-error_pasid:
- if (pasid)
- amdgpu_pasid_free(pasid);
-
- amdgpu_mes_ctx_free_meta_data(&ctx_data);
- kfree(vm);
- return 0;
-}
-
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
{
const struct mes_firmware_header_v1_0 *mes_hdr;
@@ -1690,7 +645,9 @@ static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
goto error;
}
+ amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
if (r)
dev_err(adev->dev, "failed to change_config.\n");
@@ -1705,7 +662,7 @@ int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
mutex_lock(&adev->enforce_isolation_mutex);
for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
- if (adev->enforce_isolation[i])
+ if (adev->enforce_isolation[i] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
else
r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index da2c9a8cb3e0..c0d2c195fe2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -111,8 +111,8 @@ struct amdgpu_mes {
uint32_t vmid_mask_gfxhub;
uint32_t vmid_mask_mmhub;
- uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
uint32_t gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES];
+ uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
uint32_t sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES];
uint32_t aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
uint32_t sch_ctx_offs[AMDGPU_MAX_MES_PIPES];
@@ -149,19 +149,6 @@ struct amdgpu_mes {
};
-struct amdgpu_mes_process {
- int pasid;
- struct amdgpu_vm *vm;
- uint64_t pd_gpu_addr;
- struct amdgpu_bo *proc_ctx_bo;
- uint64_t proc_ctx_gpu_addr;
- void *proc_ctx_cpu_ptr;
- uint64_t process_quantum;
- struct list_head gang_list;
- uint32_t doorbell_index;
- struct mutex doorbell_lock;
-};
-
struct amdgpu_mes_gang {
int gang_id;
int priority;
@@ -248,18 +235,6 @@ struct mes_remove_queue_input {
uint64_t gang_context_addr;
};
-struct mes_reset_queue_input {
- uint32_t doorbell_offset;
- uint64_t gang_context_addr;
- bool use_mmio;
- uint32_t queue_type;
- uint32_t me_id;
- uint32_t pipe_id;
- uint32_t queue_id;
- uint32_t xcc_id;
- uint32_t vmid;
-};
-
struct mes_map_legacy_queue_input {
uint32_t queue_type;
uint32_t doorbell_offset;
@@ -291,7 +266,7 @@ struct mes_resume_gang_input {
uint64_t gang_context_addr;
};
-struct mes_reset_legacy_queue_input {
+struct mes_reset_queue_input {
uint32_t queue_type;
uint32_t doorbell_offset;
bool use_mmio;
@@ -301,6 +276,8 @@ struct mes_reset_legacy_queue_input {
uint64_t mqd_addr;
uint64_t wptr_addr;
uint32_t vmid;
+ bool legacy_gfx;
+ bool is_kq;
};
enum mes_misc_opcode {
@@ -388,9 +365,6 @@ struct amdgpu_mes_funcs {
int (*misc_op)(struct amdgpu_mes *mes,
struct mes_misc_op_input *input);
- int (*reset_legacy_queue)(struct amdgpu_mes *mes,
- struct mes_reset_legacy_queue_input *input);
-
int (*reset_hw_queue)(struct amdgpu_mes *mes,
struct mes_reset_queue_input *input);
};
@@ -398,32 +372,13 @@ struct amdgpu_mes_funcs {
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
#define amdgpu_mes_kiq_hw_fini(adev) (adev)->mes.kiq_hw_fini((adev))
-int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
-
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
int amdgpu_mes_init(struct amdgpu_device *adev);
void amdgpu_mes_fini(struct amdgpu_device *adev);
-int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
- struct amdgpu_vm *vm);
-void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid);
-
-int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
- struct amdgpu_mes_gang_properties *gprops,
- int *gang_id);
-int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id);
-
int amdgpu_mes_suspend(struct amdgpu_device *adev);
int amdgpu_mes_resume(struct amdgpu_device *adev);
-int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
- struct amdgpu_mes_queue_properties *qprops,
- int *queue_id);
-int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
-int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id);
-int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
- int me_id, int pipe_id, int queue_id, int vmid);
-
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
@@ -438,8 +393,6 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
int amdgpu_mes_wreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t val);
-int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
- uint32_t val, uint32_t mask);
int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
@@ -451,27 +404,10 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
bool trap_en);
int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr);
-int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
- int queue_type, int idx,
- struct amdgpu_mes_ctx_data *ctx_data,
- struct amdgpu_ring **out);
-void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
- struct amdgpu_ring *ring);
uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
enum amdgpu_mes_priority_level prio);
-int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data);
-void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data);
-int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_mes_ctx_data *ctx_data);
-int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
- struct amdgpu_mes_ctx_data *ctx_data);
-
-int amdgpu_mes_self_test(struct amdgpu_device *adev);
-
int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev);
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 0b9987781f76..73403744331a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1044,7 +1044,8 @@ static const char * const amdgpu_vram_names[] = {
"GDDR6",
"DDR5",
"LPDDR4",
- "LPDDR5"
+ "LPDDR5",
+ "HBM3E"
};
/**
@@ -1644,7 +1645,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
-
+ /* Add the gem obj resv fence dump*/
+ if (dma_resv_trylock(bo->tbo.base.resv)) {
+ dma_resv_describe(bo->tbo.base.resv, m);
+ dma_resv_unlock(bo->tbo.base.resv);
+ }
seq_puts(m, "\n");
return size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index df5d5dbd7f0f..c14f63cefe67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2214,7 +2214,8 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
!psp->securedisplay_context.context.bin_desc.start_addr) {
- dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
+ dev_info(psp->adev->dev,
+ "SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
return 0;
}
@@ -3521,8 +3522,12 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
uint8_t *ucode_array_start_addr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_sos.bin", chip_name);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos_kicker.bin", chip_name);
+ else
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos.bin", chip_name);
if (err)
goto out;
@@ -3798,8 +3803,12 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
struct amdgpu_device *adev = psp->adev;
int err;
- err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_ta.bin", chip_name);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta_kicker.bin", chip_name);
+ else
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta.bin", chip_name);
if (err)
return err;
@@ -4185,6 +4194,110 @@ const struct attribute_group amdgpu_flash_attr_group = {
.is_visible = amdgpu_flash_attr_is_visible,
};
+#if defined(CONFIG_DEBUG_FS)
+static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet;
+ int ret;
+
+ /* serialize the open() file calling */
+ if (!mutex_trylock(&adev->psp.mutex))
+ return -EBUSY;
+
+ /*
+ * make sure only one userpace process is alive for dumping so that
+ * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
+ * let's say the case where one process try opening the file while
+ * another one has proceeded to read or release. In this way, eliminate
+ * the use of mutex for read() or release() callback as well.
+ */
+ if (adev->psp.spirom_dump_trip) {
+ mutex_unlock(&adev->psp.mutex);
+ return -EBUSY;
+ }
+
+ bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
+ if (!bo_triplet) {
+ mutex_unlock(&adev->psp.mutex);
+ return -ENOMEM;
+ }
+
+ ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
+ AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &bo_triplet->bo,
+ &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+ if (ret)
+ goto rel_trip;
+
+ ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
+ if (ret)
+ goto rel_bo;
+
+ adev->psp.spirom_dump_trip = bo_triplet;
+ mutex_unlock(&adev->psp.mutex);
+ return 0;
+rel_bo:
+ amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+rel_trip:
+ kfree(bo_triplet);
+ mutex_unlock(&adev->psp.mutex);
+ dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
+ return ret;
+}
+
+static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
+
+ if (!bo_triplet)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf,
+ size,
+ pos, bo_triplet->cpu_addr,
+ AMD_VBIOS_FILE_MAX_SIZE_B * 2);
+}
+
+static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
+
+ if (bo_triplet) {
+ amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+ kfree(bo_triplet);
+ }
+
+ adev->psp.spirom_dump_trip = NULL;
+ return 0;
+}
+
+static const struct file_operations psp_dump_spirom_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = psp_read_spirom_debugfs_open,
+ .read = psp_read_spirom_debugfs_read,
+ .release = psp_read_spirom_debugfs_release,
+ .llseek = default_llseek,
+};
+#endif
+
+void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+
+ debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root,
+ adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
+#endif
+}
+
const struct amd_ip_funcs psp_ip_funcs = {
.name = "psp",
.early_init = psp_early_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 8d5acc415d38..428adc7f741d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -39,6 +39,18 @@
#define PSP_TMR_ALIGNMENT 0x100000
#define PSP_FW_NAME_LEN 0x24
+/* VBIOS gfl defines */
+#define MBOX_READY_MASK 0x80000000
+#define MBOX_STATUS_MASK 0x0000FFFF
+#define MBOX_COMMAND_MASK 0x00FF0000
+#define MBOX_READY_FLAG 0x80000000
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
+#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
+#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO 0xf
+#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI 0x10
+#define C2PMSG_CMD_SPI_GET_FLASH_IMAGE 0x11
+
extern const struct attribute_group amdgpu_flash_attr_group;
enum psp_shared_mem_size {
@@ -107,6 +119,7 @@ enum psp_reg_prog_id {
PSP_REG_IH_RB_CNTL = 0, /* register IH_RB_CNTL */
PSP_REG_IH_RB_CNTL_RING1 = 1, /* register IH_RB_CNTL_RING1 */
PSP_REG_IH_RB_CNTL_RING2 = 2, /* register IH_RB_CNTL_RING2 */
+ PSP_REG_MMHUB_L1_TLB_CNTL = 25,
PSP_REG_LAST
};
@@ -137,11 +150,14 @@ struct psp_funcs {
int (*load_usbc_pd_fw)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
+ int (*dump_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*vbflash_stat)(struct psp_context *psp);
int (*fatal_error_recovery_quirk)(struct psp_context *psp);
bool (*get_ras_capability)(struct psp_context *psp);
bool (*is_aux_sos_load_required)(struct psp_context *psp);
bool (*is_reload_needed)(struct psp_context *psp);
+ int (*reg_program_no_ring)(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id);
};
struct ta_funcs {
@@ -319,6 +335,14 @@ struct psp_runtime_scpm_entry {
enum psp_runtime_scpm_authentication scpm_status;
};
+#if defined(CONFIG_DEBUG_FS)
+struct spirom_bo {
+ struct amdgpu_bo *bo;
+ uint64_t mc_addr;
+ void *cpu_addr;
+};
+#endif
+
struct psp_context {
struct amdgpu_device *adev;
struct psp_ring km_ring;
@@ -406,6 +430,9 @@ struct psp_context {
char *vbflash_tmp_buf;
size_t vbflash_image_size;
bool vbflash_done;
+#if defined(CONFIG_DEBUG_FS)
+ struct spirom_bo *spirom_dump_trip;
+#endif
};
struct amdgpu_psp_funcs {
@@ -464,6 +491,10 @@ struct amdgpu_psp_funcs {
((psp)->funcs->update_spirom ? \
(psp)->funcs->update_spirom((psp), fw_pri_mc_addr) : -EINVAL)
+#define psp_dump_spirom(psp, fw_pri_mc_addr) \
+ ((psp)->funcs->dump_spirom ? \
+ (psp)->funcs->dump_spirom((psp), fw_pri_mc_addr) : -EINVAL)
+
#define psp_vbflash_status(psp) \
((psp)->funcs->vbflash_stat ? \
(psp)->funcs->vbflash_stat((psp)) : -EINVAL)
@@ -475,6 +506,10 @@ struct amdgpu_psp_funcs {
#define psp_is_aux_sos_load_required(psp) \
((psp)->funcs->is_aux_sos_load_required ? (psp)->funcs->is_aux_sos_load_required((psp)) : 0)
+#define psp_reg_program_no_ring(psp, val, id) \
+ ((psp)->funcs->reg_program_no_ring ? \
+ (psp)->funcs->reg_program_no_ring((psp), val, id) : -EINVAL)
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -569,5 +604,9 @@ bool amdgpu_psp_get_ras_capability(struct psp_context *psp);
int psp_config_sq_perfmon(struct psp_context *psp, uint32_t xcp_id,
bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable);
bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev);
+int amdgpu_psp_reg_program_no_ring(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id);
+void amdgpu_psp_debugfs_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 443409d4f4b0..de0944947eaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1498,6 +1498,9 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
!amdgpu_ras_get_aca_debug_mode(adev))
return -EOPNOTSUPP;
+ if (amdgpu_sriov_vf(adev))
+ return -EOPNOTSUPP;
+
/* skip ras error reset in gpu reset */
if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
((smu_funcs && smu_funcs->set_debug_mode) ||
@@ -2161,7 +2164,7 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
/* Fatal error events are handled on host side */
if (amdgpu_sriov_vf(adev))
return;
- /**
+ /*
* If the current interrupt is caused by a non-fatal RAS error, skip
* check for fatal error. For fatal errors, FED status of all devices
* in XGMI hive gets set when the first device gets fatal error
@@ -2856,6 +2859,15 @@ static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
return -EINVAL;
}
} else {
+ if (bps[0].address == 0) {
+ /* for specific old eeprom data, mca address is not stored,
+ * calc it from pa
+ */
+ if (amdgpu_umc_pa2mca(adev, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT,
+ &(bps[0].address), AMDGPU_NPS1_PARTITION_MODE))
+ return -EINVAL;
+ }
+
if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) {
if (nps == AMDGPU_NPS1_PARTITION_MODE)
memcpy(err_data->err_addr, bps,
@@ -2883,9 +2895,22 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
} else {
- if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
- return -EINVAL;
+ if (bps->address) {
+ if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
+ return -EINVAL;
+ } else {
+ /* for specific old eeprom data, mca address is not stored,
+ * calc it from pa
+ */
+ if (amdgpu_umc_pa2mca(adev, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT,
+ &(bps->address), AMDGPU_NPS1_PARTITION_MODE))
+ return -EINVAL;
+
+ if (amdgpu_ras_mca2pa(adev, bps, err_data))
+ return -EOPNOTSUPP;
+ }
}
+
return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
adev->umc.retire_unit);
}
@@ -2900,7 +2925,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
&adev->psp.ras_context.ras->eeprom_control;
enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
- uint32_t i;
+ uint32_t i = 0;
if (!con || !con->eh_data || !bps || pages <= 0)
return 0;
@@ -2921,34 +2946,36 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
mutex_lock(&con->recovery_lock);
if (from_rom) {
- for (i = 0; i < pages; i++) {
- if (control->ras_num_recs - i >= adev->umc.retire_unit) {
- if ((bps[i].address == bps[i + 1].address) &&
- (bps[i].mem_channel == bps[i + 1].mem_channel)) {
- //deal with retire_unit records a time
- ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
- &bps[i], &err_data, nps);
- if (ret)
- goto free;
- i += (adev->umc.retire_unit - 1);
+ /* there is no pa recs in V3, so skip pa recs processing */
+ if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ for (i = 0; i < pages; i++) {
+ if (control->ras_num_recs - i >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ /* deal with retire_unit records a time */
+ ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
+ &bps[i], &err_data, nps);
+ if (ret)
+ control->ras_num_bad_pages -= adev->umc.retire_unit;
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ break;
+ }
} else {
break;
}
- } else {
- break;
}
}
for (; i < pages; i++) {
ret = __amdgpu_ras_convert_rec_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
- goto free;
+ control->ras_num_bad_pages -= adev->umc.retire_unit;
}
} else {
ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
}
-free:
if (from_rom)
kfree(err_data.err_addr);
mutex_unlock(&con->recovery_lock);
@@ -3037,21 +3064,28 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
dev_err(adev->dev, "Failed to load EEPROM table records!");
} else {
if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
- for (i = 0; i < control->ras_num_recs; i++) {
- if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
- if ((bps[i].address == bps[i + 1].address) &&
- (bps[i].mem_channel == bps[i + 1].mem_channel)) {
- control->ras_num_pa_recs += adev->umc.retire_unit;
- i += (adev->umc.retire_unit - 1);
+ /*In V3, there is no pa recs, and some cases(when address==0) may be parsed
+ as pa recs, so add verion check to avoid it.
+ */
+ if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ for (i = 0; i < control->ras_num_recs; i++) {
+ if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ control->ras_num_pa_recs += adev->umc.retire_unit;
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ control->ras_num_mca_recs +=
+ (control->ras_num_recs - i);
+ break;
+ }
} else {
- control->ras_num_mca_recs +=
- (control->ras_num_recs - i);
+ control->ras_num_mca_recs += (control->ras_num_recs - i);
break;
}
- } else {
- control->ras_num_mca_recs += (control->ras_num_recs - i);
- break;
}
+ } else {
+ control->ras_num_mca_recs = control->ras_num_recs;
}
}
@@ -3460,6 +3494,10 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
control->ras_num_pa_recs = control->ras_num_recs;
+ if (adev->umc.ras &&
+ adev->umc.ras->get_retire_flip_bits)
+ adev->umc.ras->get_retire_flip_bits(adev);
+
if (control->ras_num_recs) {
ret = amdgpu_ras_load_bad_pages(adev);
if (ret)
@@ -3691,7 +3729,8 @@ static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev
*/
if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
- amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
+ amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3) ||
+ amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(5, 0, 1))
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
1 << AMDGPU_RAS_BLOCK__JPEG);
else
@@ -3793,10 +3832,12 @@ init_ras_enabled_flag:
adev->ras_hw_enabled & amdgpu_ras_mask;
/* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */
- adev->aca.is_enabled =
- (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
- amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
- amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->aca.is_enabled =
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
+ }
/* bad page feature is not applicable to specific app platform */
if (adev->gmc.is_app_apu &&
@@ -4479,8 +4520,11 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
u64 event_id;
- if (amdgpu_ras_mark_ras_event(adev, type))
+ if (amdgpu_ras_mark_ras_event(adev, type)) {
+ dev_err(adev->dev,
+ "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n");
return;
+ }
event_id = amdgpu_ras_acquire_event_id(adev, type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 0ea7cfaf3587..2c58e09e56f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -418,6 +418,7 @@ static void amdgpu_ras_set_eeprom_table_version(struct amdgpu_ras_eeprom_control
hdr->version = RAS_TABLE_VER_V2_1;
return;
case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 5, 0):
hdr->version = RAS_TABLE_VER_V3;
return;
default:
@@ -1392,17 +1393,39 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
__decode_table_header_from_buf(hdr, buf);
- if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ if (hdr->header != RAS_TABLE_HDR_VAL &&
+ hdr->header != RAS_TABLE_HDR_BAD) {
+ dev_info(adev->dev, "Creating a new EEPROM table");
+ return amdgpu_ras_eeprom_reset_table(control);
+ }
+
+ switch (hdr->version) {
+ case RAS_TABLE_VER_V2_1:
+ case RAS_TABLE_VER_V3:
control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr);
control->ras_record_offset = RAS_RECORD_START_V2_1;
control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
- } else {
+ break;
+ case RAS_TABLE_VER_V1:
control->ras_num_recs = RAS_NUM_RECS(hdr);
control->ras_record_offset = RAS_RECORD_START;
control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ break;
+ default:
+ dev_err(adev->dev,
+ "RAS header invalid, unsupported version: %u",
+ hdr->version);
+ return -EINVAL;
}
- control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
+ if (control->ras_num_recs > control->ras_max_record_count) {
+ dev_err(adev->dev,
+ "RAS header invalid, records in header: %u max allowed :%u",
+ control->ras_num_recs, control->ras_max_record_count);
+ return -EINVAL;
+ }
+
+ control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
control->ras_num_mca_recs = 0;
control->ras_num_pa_recs = 0;
return 0;
@@ -1413,7 +1436,7 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
struct amdgpu_device *adev = to_amdgpu_device(control);
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- int res;
+ int res = 0;
if (!__is_ras_eeprom_supported(adev))
return 0;
@@ -1494,10 +1517,6 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
"User defined threshold is set, runtime service will be halt when threshold is reached\n");
}
}
- } else {
- DRM_INFO("Creating a new EEPROM table");
-
- res = amdgpu_ras_eeprom_reset_table(control);
}
return res < 0 ? res : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 59acdbfe28d8..426834806fbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -187,14 +187,10 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
}
#define amdgpu_ring_get_gpu_addr(ring, offset) \
- (ring->is_mes_queue ? \
- (ring->mes_ctx->meta_data_gpu_addr + offset) : \
- (ring->adev->wb.gpu_addr + offset * 4))
+ (ring->adev->wb.gpu_addr + offset * 4)
#define amdgpu_ring_get_cpu_addr(ring, offset) \
- (ring->is_mes_queue ? \
- (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
- (&ring->adev->wb.wb[offset]))
+ (&ring->adev->wb.wb[offset])
/**
* amdgpu_ring_init - init driver ring struct.
@@ -243,57 +239,42 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->sched_score = sched_score;
ring->vmid_wait = dma_fence_get_stub();
- if (!ring->is_mes_queue) {
- ring->idx = adev->num_rings++;
- adev->rings[ring->idx] = ring;
- }
+ ring->idx = adev->num_rings++;
+ adev->rings[ring->idx] = ring;
r = amdgpu_fence_driver_init_ring(ring);
if (r)
return r;
}
- if (ring->is_mes_queue) {
- ring->rptr_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_RPTR_OFFS);
- ring->wptr_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_WPTR_OFFS);
- ring->fence_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_FENCE_OFFS);
- ring->trail_fence_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_TRAIL_FENCE_OFFS);
- ring->cond_exe_offs = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_COND_EXE_OFFS);
- } else {
- r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->fence_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->fence_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
+ return r;
+ }
- r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
+ return r;
}
ring->fence_gpu_addr =
@@ -353,18 +334,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->cached_rptr = 0;
/* Allocate ring buffer */
- if (ring->is_mes_queue) {
- int offset = 0;
-
- BUG_ON(ring->ring_size > PAGE_SIZE*4);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_RING_OFFS);
- ring->gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ring->ring = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- amdgpu_ring_clear_ring(ring);
-
- } else if (ring->ring_obj == NULL) {
+ if (ring->ring_obj == NULL) {
r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
@@ -401,32 +371,26 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
/* Not to finish a ring which is not initialized */
- if (!(ring->adev) ||
- (!ring->is_mes_queue && !(ring->adev->rings[ring->idx])))
+ if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
return;
ring->sched.ready = false;
- if (!ring->is_mes_queue) {
- amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
- amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
- amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
- amdgpu_device_wb_free(ring->adev, ring->fence_offs);
+ amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
+ amdgpu_device_wb_free(ring->adev, ring->fence_offs);
- amdgpu_bo_free_kernel(&ring->ring_obj,
- &ring->gpu_addr,
- (void **)&ring->ring);
- } else {
- kfree(ring->fence_drv.fences);
- }
+ amdgpu_bo_free_kernel(&ring->ring_obj,
+ &ring->gpu_addr,
+ (void **)&ring->ring);
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->me = 0;
- if (!ring->is_mes_queue)
- ring->adev->rings[ring->idx] = NULL;
+ ring->adev->rings[ring->idx] = NULL;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index bb2b66385223..e1f25218943a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -127,6 +127,22 @@ struct amdgpu_fence_driver {
struct dma_fence **fences;
};
+/*
+ * Fences mark an event in the GPUs pipeline and are used
+ * for GPU/CPU synchronization. When the fence is written,
+ * it is expected that all buffers associated with that fence
+ * are no longer in use by the associated ring on the GPU and
+ * that the relevant GPU caches have been flushed.
+ */
+
+struct amdgpu_fence {
+ struct dma_fence base;
+
+ /* RB, DMA, etc. */
+ struct amdgpu_ring *ring;
+ ktime_t start_timestamp;
+};
+
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
@@ -164,8 +180,24 @@ void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
/* provided by hw blocks that expose a ring buffer for commands */
struct amdgpu_ring_funcs {
+ /**
+ * @type:
+ *
+ * GFX, Compute, SDMA, UVD, VCE, VCN, VPE, KIQ, MES, UMSCH, and CPER
+ * use ring buffers. The type field just identifies which component the
+ * ring buffer is associated with.
+ */
enum amdgpu_ring_type type;
uint32_t align_mask;
+
+ /**
+ * @nop:
+ *
+ * Every block in the amdgpu has no-op instructions (e.g., GFX 10
+ * uses PACKET3(PACKET3_NOP, 0x3FFF), VCN 5 uses VCN_ENC_CMD_NO_OP,
+ * etc). This field receives the specific no-op for the component
+ * that initializes the ring.
+ */
u32 nop;
bool support_64bit_ptrs;
bool no_user_fence;
@@ -241,6 +273,9 @@ struct amdgpu_ring_funcs {
bool (*is_guilty)(struct amdgpu_ring *ring);
};
+/**
+ * amdgpu_ring - Holds ring information
+ */
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
@@ -252,13 +287,61 @@ struct amdgpu_ring {
unsigned rptr_offs;
u64 rptr_gpu_addr;
volatile u32 *rptr_cpu_addr;
+
+ /**
+ * @wptr:
+ *
+ * This is part of the Ring buffer implementation and represents the
+ * write pointer. The wptr determines where the host has written.
+ */
u64 wptr;
+
+ /**
+ * @wptr_old:
+ *
+ * Before update wptr with the new value, usually the old value is
+ * stored in the wptr_old.
+ */
u64 wptr_old;
unsigned ring_size;
+
+ /**
+ * @max_dw:
+ *
+ * Maximum number of DWords for ring allocation. This information is
+ * provided at the ring initialization time, and each IP block can
+ * specify a specific value. Check places that invoke
+ * amdgpu_ring_init() to see the maximum size per block.
+ */
unsigned max_dw;
+
+ /**
+ * @count_dw:
+ *
+ * This value starts with the maximum amount of DWords supported by the
+ * ring. This value is updated based on the ring manipulation.
+ */
int count_dw;
uint64_t gpu_addr;
+
+ /**
+ * @ptr_mask:
+ *
+ * Some IPs provide support for 64-bit pointers and others for 32-bit
+ * only; this behavior is component-specific and defined by the field
+ * support_64bit_ptr. If the IP block supports 64-bits, the mask
+ * 0xffffffffffffffff is set; otherwise, this value assumes buf_mask.
+ * Notice that this field is used to keep wptr under a valid range.
+ */
uint64_t ptr_mask;
+
+ /**
+ * @buf_mask:
+ *
+ * Buffer mask is a value used to keep wptr count under its
+ * thresholding. Buffer mask initialized during the ring buffer
+ * initialization time, and it is defined as (ring_size / 4) -1.
+ */
uint32_t buf_mask;
u32 idx;
u32 xcc_id;
@@ -276,6 +359,13 @@ struct amdgpu_ring {
bool use_pollmem;
unsigned wptr_offs;
u64 wptr_gpu_addr;
+
+ /**
+ * @wptr_cpu_addr:
+ *
+ * This is the CPU address pointer in the writeback slot. This is used
+ * to commit changes to the GPU.
+ */
volatile u32 *wptr_cpu_addr;
unsigned fence_offs;
u64 fence_gpu_addr;
@@ -297,20 +387,15 @@ struct amdgpu_ring {
struct dma_fence *vmid_wait;
bool has_compute_vm_bug;
bool no_scheduler;
+ bool no_user_submission;
int hw_prio;
unsigned num_hw_submission;
atomic_t *sched_score;
- /* used for mes */
- bool is_mes_queue;
- uint32_t hw_queue_id;
- struct amdgpu_mes_ctx_data *mes_ctx;
-
bool is_sw_ring;
unsigned int entry_index;
/* store the cached rptr to restore after reset */
uint64_t cached_rptr;
-
};
#define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
@@ -435,15 +520,6 @@ static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring,
ring->ring[offset] = cur - offset;
}
-#define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset) \
- (ring->is_mes_queue && ring->mes_ctx ? \
- (ring->mes_ctx->meta_data_gpu_addr + offset) : 0)
-
-#define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset) \
- (ring->is_mes_queue && ring->mes_ctx ? \
- (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
- NULL)
-
int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
index 03ed14663107..7e7d6c3865bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
@@ -135,7 +135,8 @@ static void amdgpu_ring_mux_schedule_resubmit(struct amdgpu_ring_mux *mux)
static void amdgpu_mux_resubmit_fallback(struct timer_list *t)
{
- struct amdgpu_ring_mux *mux = from_timer(mux, t, resubmit_timer);
+ struct amdgpu_ring_mux *mux = timer_container_of(mux, t,
+ resubmit_timer);
if (!spin_trylock(&mux->lock)) {
amdgpu_ring_mux_schedule_resubmit(mux);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index fce22d3f816b..c210625be220 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -237,6 +237,20 @@ struct amdgpu_rlc_funcs {
void (*unset_safe_mode)(struct amdgpu_device *adev, int xcc_id);
int (*init)(struct amdgpu_device *adev);
u32 (*get_csb_size)(struct amdgpu_device *adev);
+
+ /**
+ * @get_csb_buffer: Get the clear state to be put into the hardware.
+ *
+ * The parameter adev is used to get the CS data and other gfx info,
+ * and buffer is the RLC CS pointer
+ *
+ * Sometimes, the user space puts a request to clear the state in the
+ * command buffer; this function provides the clear state that gets put
+ * into the hardware. Note that the driver programs Clear State
+ * Indirect Buffer (CSB) explicitly when it sets up the kernel rings,
+ * and it also provides a pointer to it which is used by the firmware
+ * to load the clear state in some cases.
+ */
void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
int (*get_cp_table_num)(struct amdgpu_device *adev);
int (*resume)(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 529c9696c2f3..9b54a1ece447 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -26,6 +26,8 @@
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
+#include "gc/gc_10_1_0_offset.h"
+#include "gc/gc_10_3_0_sh_mask.h"
#define AMDGPU_CSA_SDMA_SIZE 64
/* SDMA CSA reside in the 3rd page of CSA */
@@ -76,22 +78,14 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
return 0;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
+ r = amdgpu_sdma_get_index_from_ring(ring, &index);
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- sdma[ring->idx].sdma_meta_data);
- csa_mc_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- } else {
- r = amdgpu_sdma_get_index_from_ring(ring, &index);
-
- if (r || index > 31)
- csa_mc_addr = 0;
- else
- csa_mc_addr = amdgpu_csa_vaddr(adev) +
- AMDGPU_CSA_SDMA_OFFSET +
- index * AMDGPU_CSA_SDMA_SIZE;
- }
+ if (r || index > 31)
+ csa_mc_addr = 0;
+ else
+ csa_mc_addr = amdgpu_csa_vaddr(adev) +
+ AMDGPU_CSA_SDMA_OFFSET +
+ index * AMDGPU_CSA_SDMA_SIZE;
return csa_mc_addr;
}
@@ -537,45 +531,51 @@ bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_rin
return false;
}
-/**
- * amdgpu_sdma_register_on_reset_callbacks - Register SDMA reset callbacks
- * @funcs: Pointer to the callback structure containing pre_reset and post_reset functions
- *
- * This function allows KFD and AMDGPU to register their own callbacks for handling
- * pre-reset and post-reset operations for engine reset. These are needed because engine
- * reset will stop all queues on that engine.
- */
-void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs)
+static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id)
{
- if (!funcs)
- return;
-
- /* Ensure the reset_callback_list is initialized */
- if (!adev->sdma.reset_callback_list.next) {
- INIT_LIST_HEAD(&adev->sdma.reset_callback_list);
+ struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
+ int r = -EOPNOTSUPP;
+
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(4, 4, 2):
+ case IP_VERSION(4, 4, 4):
+ case IP_VERSION(4, 4, 5):
+ /* For SDMA 4.x, use the existing DPM interface for backward compatibility,
+ * we need to convert the logical instance ID to physical instance ID before reset.
+ */
+ r = amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, instance_id));
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 5):
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 2):
+ case IP_VERSION(5, 2, 4):
+ case IP_VERSION(5, 2, 5):
+ case IP_VERSION(5, 2, 6):
+ case IP_VERSION(5, 2, 3):
+ case IP_VERSION(5, 2, 1):
+ case IP_VERSION(5, 2, 7):
+ if (sdma_instance->funcs->soft_reset_kernel_queue)
+ r = sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id);
+ break;
+ default:
+ break;
}
- /* Initialize the list node in the callback structure */
- INIT_LIST_HEAD(&funcs->list);
- /* Add the callback structure to the global list */
- list_add_tail(&funcs->list, &adev->sdma.reset_callback_list);
+ return r;
}
/**
* amdgpu_sdma_reset_engine - Reset a specific SDMA engine
* @adev: Pointer to the AMDGPU device
- * @instance_id: ID of the SDMA engine instance to reset
- *
- * This function performs the following steps:
- * 1. Calls all registered pre_reset callbacks to allow KFD and AMDGPU to save their state.
- * 2. Resets the specified SDMA engine instance.
- * 3. Calls all registered post_reset callbacks to allow KFD and AMDGPU to restore their state.
+ * @instance_id: Logical ID of the SDMA engine instance to reset
*
* Returns: 0 on success, or a negative error code on failure.
*/
int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
{
- struct sdma_on_reset_funcs *funcs;
int ret = 0;
struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
struct amdgpu_ring *gfx_ring = &sdma_instance->ring;
@@ -597,38 +597,18 @@ int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
page_sched_stopped = true;
}
- /* Invoke all registered pre_reset callbacks */
- list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) {
- if (funcs->pre_reset) {
- ret = funcs->pre_reset(adev, instance_id);
- if (ret) {
- dev_err(adev->dev,
- "beforeReset callback failed for instance %u: %d\n",
- instance_id, ret);
- goto exit;
- }
- }
- }
+ if (sdma_instance->funcs->stop_kernel_queue)
+ sdma_instance->funcs->stop_kernel_queue(gfx_ring);
/* Perform the SDMA reset for the specified instance */
- ret = amdgpu_dpm_reset_sdma(adev, 1 << instance_id);
+ ret = amdgpu_sdma_soft_reset(adev, instance_id);
if (ret) {
- dev_err(adev->dev, "Failed to reset SDMA instance %u\n", instance_id);
+ dev_err(adev->dev, "Failed to reset SDMA logical instance %u\n", instance_id);
goto exit;
}
- /* Invoke all registered post_reset callbacks */
- list_for_each_entry(funcs, &adev->sdma.reset_callback_list, list) {
- if (funcs->post_reset) {
- ret = funcs->post_reset(adev, instance_id);
- if (ret) {
- dev_err(adev->dev,
- "afterReset callback failed for instance %u: %d\n",
- instance_id, ret);
- goto exit;
- }
- }
- }
+ if (sdma_instance->funcs->start_kernel_queue)
+ sdma_instance->funcs->start_kernel_queue(gfx_ring);
exit:
/* Restart the scheduler's work queue for the GFX and page rings
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 47d56fd0589f..e5f8951bbb6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -50,6 +50,12 @@ enum amdgpu_sdma_irq {
#define NUM_SDMA(x) hweight32(x)
+struct amdgpu_sdma_funcs {
+ int (*stop_kernel_queue)(struct amdgpu_ring *ring);
+ int (*start_kernel_queue)(struct amdgpu_ring *ring);
+ int (*soft_reset_kernel_queue)(struct amdgpu_device *adev, u32 instance_id);
+};
+
struct amdgpu_sdma_instance {
/* SDMA firmware */
const struct firmware *fw;
@@ -68,7 +74,7 @@ struct amdgpu_sdma_instance {
/* track guilty state of GFX and PAGE queues */
bool gfx_guilty;
bool page_guilty;
-
+ const struct amdgpu_sdma_funcs *funcs;
};
enum amdgpu_sdma_ras_memory_id {
@@ -103,17 +109,11 @@ struct amdgpu_sdma_ras {
struct amdgpu_ras_block_object ras_block;
};
-struct sdma_on_reset_funcs {
- int (*pre_reset)(struct amdgpu_device *adev, uint32_t instance_id);
- int (*post_reset)(struct amdgpu_device *adev, uint32_t instance_id);
- /* Linked list node to store this structure in a list; */
- struct list_head list;
-};
-
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
+ struct amdgpu_irq_src fence_irq;
struct amdgpu_irq_src ecc_irq;
struct amdgpu_irq_src vm_hole_irq;
struct amdgpu_irq_src doorbell_invalid_irq;
@@ -131,6 +131,8 @@ struct amdgpu_sdma {
uint32_t *ip_dump;
uint32_t supported_reset;
struct list_head reset_callback_list;
+ bool no_user_submission;
+ bool disable_uq;
};
/*
@@ -170,7 +172,6 @@ struct amdgpu_buffer_funcs {
uint32_t byte_count);
};
-void amdgpu_sdma_register_on_reset_callbacks(struct amdgpu_device *adev, struct sdma_on_reset_funcs *funcs);
int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id);
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
index e22cb2b5cd92..d45ebfb642ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -45,7 +45,11 @@
*/
static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
{
- return AMDGPU_VA_RESERVED_SEQ64_START(adev);
+ u64 addr = AMDGPU_VA_RESERVED_SEQ64_START(adev);
+
+ addr = amdgpu_gmc_sign_extend(addr);
+
+ return addr;
}
/**
@@ -63,9 +67,9 @@ static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va)
{
+ u64 seq64_addr, va_flags;
struct amdgpu_bo *bo;
struct drm_exec exec;
- u64 seq64_addr;
int r;
bo = adev->seq64.sbo;
@@ -88,9 +92,11 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
goto error;
}
- seq64_addr = amdgpu_seq64_get_va_base(adev);
+ seq64_addr = amdgpu_seq64_get_va_base(adev) & AMDGPU_GMC_HOLE_MASK;
+
+ va_flags = amdgpu_gem_va_map_flags(adev, AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE,
- AMDGPU_PTE_READABLE);
+ va_flags);
if (r) {
DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
@@ -133,7 +139,7 @@ void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv)
vm = &fpriv->vm;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
@@ -156,6 +162,7 @@ error:
*
* @adev: amdgpu_device pointer
* @va: VA to access the seq in process address space
+ * @gpu_addr: GPU address to access the seq
* @cpu_addr: CPU address to access the seq
*
* Alloc a 64 bit memory from seq64 pool.
@@ -163,7 +170,8 @@ error:
* Returns:
* 0 on success or a negative error code on failure
*/
-int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr)
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va,
+ u64 *gpu_addr, u64 **cpu_addr)
{
unsigned long bit_pos;
@@ -172,7 +180,12 @@ int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr)
return -ENOSPC;
__set_bit(bit_pos, adev->seq64.used);
+
*va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev);
+
+ if (gpu_addr)
+ *gpu_addr = bit_pos * sizeof(u64) + adev->seq64.gpu_addr;
+
*cpu_addr = bit_pos + adev->seq64.cpu_base_addr;
return 0;
@@ -233,7 +246,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev)
*/
r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &adev->seq64.sbo, NULL,
+ &adev->seq64.sbo, &adev->seq64.gpu_addr,
(void **)&adev->seq64.cpu_base_addr);
if (r) {
dev_warn(adev->dev, "(%d) create seq64 failed\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
index 4203b2ab318d..26a249aaaee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
@@ -32,13 +32,14 @@
struct amdgpu_seq64 {
struct amdgpu_bo *sbo;
u32 num_sem;
+ u64 gpu_addr;
u64 *cpu_base_addr;
DECLARE_BITMAP(used, AMDGPU_MAX_SEQ64_SLOTS);
};
void amdgpu_seq64_fini(struct amdgpu_device *adev);
int amdgpu_seq64_init(struct amdgpu_device *adev);
-int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, u64 **cpu_addr);
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 *gpu_addr, u64 **cpu_addr);
void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr);
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 5576ed0b508f..d6ae9974c952 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -249,9 +249,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
if (resv == NULL)
return -EINVAL;
-
- /* TODO: Use DMA_RESV_USAGE_READ here */
- dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) {
+ /* Implicitly sync only to KERNEL, WRITE and READ */
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) {
dma_fence_chain_for_each(f, f) {
struct dma_fence *tmp = dma_fence_chain_contained(f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 53b71e9d8076..9c5df35f05b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -2081,6 +2081,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
amdgpu_preempt_mgr_fini(adev);
+ amdgpu_doorbell_fini(adev);
+
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 3d9e9fdc10b4..eaddc441c51a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -30,6 +30,10 @@
#define AMDGPU_UCODE_NAME_MAX (128)
+static const struct kicker_device kicker_device_list[] = {
+ {0x744B, 0x00},
+};
+
static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
{
DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
@@ -765,8 +769,10 @@ FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version);
FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version);
+FW_VERSION_ATTR(dmcub_fw_version, 0444, dm.dmcub_fw_version);
FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK);
FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK);
+FW_VERSION_ATTR(pldm_fw_version, 0444, firmware.pldm_version);
static struct attribute *fw_attrs[] = {
&dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr,
@@ -779,8 +785,9 @@ static struct attribute *fw_attrs[] = {
&dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr,
&dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr,
&dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
- &dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr,
- &dev_attr_mes_fw_version.attr, &dev_attr_mes_kiq_fw_version.attr,
+ &dev_attr_dmcu_fw_version.attr, &dev_attr_dmcub_fw_version.attr,
+ &dev_attr_imu_fw_version.attr, &dev_attr_mes_fw_version.attr,
+ &dev_attr_mes_kiq_fw_version.attr, &dev_attr_pldm_fw_version.attr,
NULL
};
@@ -1384,6 +1391,19 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
return NULL;
}
+bool amdgpu_is_kicker_fw(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) {
+ if (adev->pdev->device == kicker_device_list[i].device &&
+ adev->pdev->revision == kicker_device_list[i].revision)
+ return true;
+ }
+
+ return false;
+}
+
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len)
{
int maj, min, rev;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 4eedd92f000b..6349aad6da35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -25,6 +25,8 @@
#include "amdgpu_socbb.h"
+#define RS64_FW_UC_START_ADDR_LO 0x3000
+
struct common_firmware_header {
uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
uint32_t header_size_bytes; /* size of just the header in bytes */
@@ -600,6 +602,12 @@ struct amdgpu_firmware {
void *fw_buf_ptr;
uint64_t fw_buf_mc;
+ uint32_t pldm_version;
+};
+
+struct kicker_device{
+ unsigned short device;
+ u8 revision;
};
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
@@ -629,5 +637,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id);
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len);
+bool amdgpu_is_kicker_fw(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 0a1ef95b2866..c92b8794aa73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -529,6 +529,7 @@ int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
pfns[i] = err_data.err_addr[i].retired_page;
}
ret = i;
+ adev->umc.err_addr_cnt = err_data.err_addr_cnt;
out:
kfree(err_data.err_addr);
@@ -561,3 +562,26 @@ int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
return 0;
}
+
+int amdgpu_umc_pa2mca(struct amdgpu_device *adev,
+ uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps)
+{
+ struct ta_ras_query_address_input addr_in;
+ struct ta_ras_query_address_output addr_out;
+ int ret;
+
+ /* nps: the pa belongs to */
+ addr_in.pa.pa = pa | ((uint64_t)nps << 58);
+ addr_in.addr_type = TA_RAS_PA_TO_MCA;
+ ret = psp_ras_query_address(&adev->psp, &addr_in, &addr_out);
+ if (ret) {
+ dev_warn(adev->dev, "Failed to query RAS MCA address for 0x%llx",
+ pa);
+
+ return ret;
+ }
+
+ *mca = addr_out.ma.err_addr;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 857693bcd8d4..ec203f9e5ffa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -78,6 +78,18 @@
#define UMC_NPS_SHIFT 40
#define UMC_NPS_MASK 0xffULL
+/* three column bits and one row bit in MCA address flip
+ * in bad page retirement
+ */
+#define RETIRE_FLIP_BITS_NUM 4
+
+struct amdgpu_umc_flip_bits {
+ uint32_t flip_bits_in_pa[RETIRE_FLIP_BITS_NUM];
+ uint32_t flip_row_bit;
+ uint32_t r13_in_pa;
+ uint32_t bit_num;
+};
+
typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst,
uint32_t umc_inst, uint32_t ch_inst, void *data);
@@ -100,6 +112,7 @@ struct amdgpu_umc_ras {
bool dump_addr);
uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev,
uint64_t mca_addr, uint64_t retired_page);
+ void (*get_retire_flip_bits)(struct amdgpu_device *adev);
};
struct amdgpu_umc_funcs {
@@ -130,6 +143,10 @@ struct amdgpu_umc {
/* active mask for umc node instance */
unsigned long active_mask;
+
+ struct amdgpu_umc_flip_bits flip_bits;
+
+ unsigned long err_addr_cnt;
};
int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
@@ -172,4 +189,6 @@ int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
uint64_t err_addr, uint32_t ch, uint32_t umc,
uint32_t node, uint32_t socket,
struct ta_ras_query_address_output *addr_out, bool dump_addr);
+int amdgpu_umc_pa2mca(struct amdgpu_device *adev,
+ uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
new file mode 100644
index 000000000000..295e7186e156
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -0,0 +1,924 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drm_auth.h>
+#include <drm/drm_exec.h>
+#include <linux/pm_runtime.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_userq_fence.h"
+
+u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
+{
+ int i;
+ u32 userq_ip_mask = 0;
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
+ if (adev->userq_funcs[i])
+ userq_ip_mask |= (1 << i);
+ }
+
+ return userq_ip_mask;
+}
+
+static int
+amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+ r = userq_funcs->unmap(uq_mgr, queue);
+ if (r)
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ else
+ queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
+ }
+ return r;
+}
+
+static int
+amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
+ r = userq_funcs->map(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+ return r;
+}
+
+static void
+amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct dma_fence *f = queue->last_fence;
+ int ret;
+
+ if (f && !dma_fence_is_signaled(f)) {
+ ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
+ if (ret <= 0)
+ drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
+ f->context, f->seqno);
+ }
+}
+
+static void
+amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ int queue_id)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
+
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ amdgpu_userq_fence_driver_free(queue);
+ idr_remove(&uq_mgr->userq_idr, queue_id);
+ kfree(queue);
+}
+
+int
+amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id;
+ int ret = 0;
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ /* Resume all the queues for this process */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id)
+ ret += queue->state == AMDGPU_USERQ_STATE_MAPPED;
+
+ mutex_unlock(&uq_mgr->userq_mutex);
+ return ret;
+}
+
+static struct amdgpu_usermode_queue *
+amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
+{
+ return idr_find(&uq_mgr->userq_idr, qid);
+}
+
+void
+amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+retry:
+ /* Flush any pending resume work to create ev_fence */
+ flush_delayed_work(&uq_mgr->resume_work);
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+ if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
+ mutex_unlock(&uq_mgr->userq_mutex);
+ /*
+ * Looks like there was no pending resume work,
+ * add one now to create a valid eviction fence
+ */
+ schedule_delayed_work(&uq_mgr->resume_work, 0);
+ goto retry;
+ }
+}
+
+int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj,
+ int size)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_bo_param bp;
+ int r;
+
+ memset(&bp, 0, sizeof(bp));
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+ bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ bp.type = ttm_bo_type_kernel;
+ bp.size = size;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(userq_obj->obj, true);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
+ goto free_obj;
+ }
+
+ r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
+ goto unresv;
+ }
+
+ r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
+ goto unresv;
+ }
+
+ userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
+ amdgpu_bo_unreserve(userq_obj->obj);
+ memset(userq_obj->cpu_ptr, 0, size);
+ return 0;
+
+unresv:
+ amdgpu_bo_unreserve(userq_obj->obj);
+
+free_obj:
+ amdgpu_bo_unref(&userq_obj->obj);
+ return r;
+}
+
+void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj)
+{
+ amdgpu_bo_kunmap(userq_obj->obj);
+ amdgpu_bo_unref(&userq_obj->obj);
+}
+
+uint64_t
+amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_db_info *db_info,
+ struct drm_file *filp)
+{
+ uint64_t index;
+ struct drm_gem_object *gobj;
+ struct amdgpu_userq_obj *db_obj = db_info->db_obj;
+ int r, db_size;
+
+ gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
+ if (gobj == NULL) {
+ drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
+ return -EINVAL;
+ }
+
+ db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+ drm_gem_object_put(gobj);
+
+ r = amdgpu_bo_reserve(db_obj->obj, true);
+ if (r) {
+ drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
+ goto unref_bo;
+ }
+
+ /* Pin the BO before generating the index, unpin in queue destroy */
+ r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
+ if (r) {
+ drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
+ goto unresv_bo;
+ }
+
+ switch (db_info->queue_type) {
+ case AMDGPU_HW_IP_GFX:
+ case AMDGPU_HW_IP_COMPUTE:
+ case AMDGPU_HW_IP_DMA:
+ db_size = sizeof(u64);
+ break;
+
+ case AMDGPU_HW_IP_VCN_ENC:
+ db_size = sizeof(u32);
+ db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1;
+ break;
+
+ case AMDGPU_HW_IP_VPE:
+ db_size = sizeof(u32);
+ db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1;
+ break;
+
+ default:
+ drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
+ db_info->queue_type);
+ r = -EINVAL;
+ goto unpin_bo;
+ }
+
+ index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
+ db_info->doorbell_offset, db_size);
+ drm_dbg_driver(adev_to_drm(uq_mgr->adev),
+ "[Usermode queues] doorbell index=%lld\n", index);
+ amdgpu_bo_unreserve(db_obj->obj);
+ return index;
+
+unpin_bo:
+ amdgpu_bo_unpin(db_obj->obj);
+unresv_bo:
+ amdgpu_bo_unreserve(db_obj->obj);
+unref_bo:
+ amdgpu_bo_unref(&db_obj->obj);
+ return r;
+}
+
+static int
+amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_usermode_queue *queue;
+ int r = 0;
+
+ cancel_delayed_work_sync(&uq_mgr->resume_work);
+ mutex_lock(&uq_mgr->userq_mutex);
+
+ queue = amdgpu_userq_find(uq_mgr, queue_id);
+ if (!queue) {
+ drm_dbg_driver(adev_to_drm(uq_mgr->adev), "Invalid queue id to destroy\n");
+ mutex_unlock(&uq_mgr->userq_mutex);
+ return -EINVAL;
+ }
+ amdgpu_userq_wait_for_last_fence(uq_mgr, queue);
+ r = amdgpu_bo_reserve(queue->db_obj.obj, true);
+ if (!r) {
+ amdgpu_bo_unpin(queue->db_obj.obj);
+ amdgpu_bo_unreserve(queue->db_obj.obj);
+ }
+ amdgpu_bo_unref(&queue->db_obj.obj);
+ r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
+ mutex_unlock(&uq_mgr->userq_mutex);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+static int amdgpu_userq_priority_permit(struct drm_file *filp,
+ int priority)
+{
+ if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
+ return 0;
+
+ if (capable(CAP_SYS_NICE))
+ return 0;
+
+ if (drm_is_current_master(filp))
+ return 0;
+
+ return -EACCES;
+}
+
+static int
+amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *uq_funcs;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_db_info db_info;
+ bool skip_map_queue;
+ uint64_t index;
+ int qid, r = 0;
+ int priority =
+ (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
+ AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
+
+ /* Usermode queues are only supported for GFX IP as of now */
+ if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
+ args->in.ip_type != AMDGPU_HW_IP_DMA &&
+ args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
+ drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n",
+ args->in.ip_type);
+ return -EINVAL;
+ }
+
+ r = amdgpu_userq_priority_permit(filp, priority);
+ if (r)
+ return r;
+
+ if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
+ (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
+ (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
+ !amdgpu_is_tmz(adev)) {
+ drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n");
+ return -EINVAL;
+ }
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ /*
+ * There could be a situation that we are creating a new queue while
+ * the other queues under this UQ_mgr are suspended. So if there is any
+ * resume work pending, wait for it to get done.
+ *
+ * This will also make sure we have a valid eviction fence ready to be used.
+ */
+ mutex_lock(&adev->userq_mutex);
+ amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+
+ uq_funcs = adev->userq_funcs[args->in.ip_type];
+ if (!uq_funcs) {
+ drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
+ args->in.ip_type);
+ r = -EINVAL;
+ goto unlock;
+ }
+
+ queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
+ if (!queue) {
+ drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
+ r = -ENOMEM;
+ goto unlock;
+ }
+ queue->doorbell_handle = args->in.doorbell_handle;
+ queue->queue_type = args->in.ip_type;
+ queue->vm = &fpriv->vm;
+ queue->priority = priority;
+
+ db_info.queue_type = queue->queue_type;
+ db_info.doorbell_handle = queue->doorbell_handle;
+ db_info.db_obj = &queue->db_obj;
+ db_info.doorbell_offset = args->in.doorbell_offset;
+
+ /* Convert relative doorbell offset into absolute doorbell index */
+ index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
+ if (index == (uint64_t)-EINVAL) {
+ drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
+ kfree(queue);
+ goto unlock;
+ }
+
+ queue->doorbell_index = index;
+ xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
+ r = amdgpu_userq_fence_driver_alloc(adev, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
+ goto unlock;
+ }
+
+ r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to create Queue\n");
+ amdgpu_userq_fence_driver_free(queue);
+ kfree(queue);
+ goto unlock;
+ }
+
+
+ qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
+ if (qid < 0) {
+ drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
+ amdgpu_userq_fence_driver_free(queue);
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ kfree(queue);
+ r = -ENOMEM;
+ goto unlock;
+ }
+
+ /* don't map the queue if scheduling is halted */
+ if (adev->userq_halt_for_enforce_isolation &&
+ ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
+ skip_map_queue = true;
+ else
+ skip_map_queue = false;
+ if (!skip_map_queue) {
+ r = amdgpu_userq_map_helper(uq_mgr, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to map Queue\n");
+ idr_remove(&uq_mgr->userq_idr, qid);
+ amdgpu_userq_fence_driver_free(queue);
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ kfree(queue);
+ goto unlock;
+ }
+ }
+
+
+ args->out.queue_id = qid;
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+ mutex_unlock(&adev->userq_mutex);
+
+ return r;
+}
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ union drm_amdgpu_userq *args = data;
+ int r;
+
+ switch (args->in.op) {
+ case AMDGPU_USERQ_OP_CREATE:
+ if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
+ AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
+ return -EINVAL;
+ r = amdgpu_userq_create(filp, args);
+ if (r)
+ drm_file_err(filp, "Failed to create usermode queue\n");
+ break;
+
+ case AMDGPU_USERQ_OP_FREE:
+ if (args->in.ip_type ||
+ args->in.doorbell_handle ||
+ args->in.doorbell_offset ||
+ args->in.flags ||
+ args->in.queue_va ||
+ args->in.queue_size ||
+ args->in.rptr_va ||
+ args->in.wptr_va ||
+ args->in.wptr_va ||
+ args->in.mqd ||
+ args->in.mqd_size)
+ return -EINVAL;
+ r = amdgpu_userq_destroy(filp, args->in.queue_id);
+ if (r)
+ drm_file_err(filp, "Failed to destroy usermode queue\n");
+ break;
+
+ default:
+ drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op);
+ return -EINVAL;
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id;
+ int ret = 0, r;
+
+ /* Resume all the queues for this process */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_map_helper(uq_mgr, queue);
+ if (r)
+ ret = r;
+ }
+
+ if (ret)
+ drm_file_err(uq_mgr->file, "Failed to map all the queues\n");
+ return ret;
+}
+
+static int
+amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ int ret;
+
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ DRM_ERROR("Fail to validate\n");
+
+ return ret;
+}
+
+static int
+amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_bo_va *bo_va;
+ struct ww_acquire_ctx *ticket;
+ struct drm_exec exec;
+ struct amdgpu_bo *bo;
+ struct dma_resv *resv;
+ bool clear, unlock;
+ int ret = 0;
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ ret = amdgpu_vm_lock_pd(vm, &exec, 2);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret)) {
+ drm_file_err(uq_mgr->file, "Failed to lock PD\n");
+ goto unlock_all;
+ }
+
+ /* Lock the done list */
+ list_for_each_entry(bo_va, &vm->done, base.vm_status) {
+ bo = bo_va->base.bo;
+ if (!bo)
+ continue;
+
+ ret = drm_exec_lock_obj(&exec, &bo->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
+ }
+ }
+
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->moved)) {
+ bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->status_lock);
+
+ /* Per VM BOs never need to bo cleared in the page tables */
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (ret)
+ goto unlock_all;
+ spin_lock(&vm->status_lock);
+ }
+
+ ticket = &exec.ticket;
+ while (!list_empty(&vm->invalidated)) {
+ bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
+ base.vm_status);
+ resv = bo_va->base.bo->tbo.base.resv;
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_va->base.bo;
+ ret = amdgpu_userq_validate_vm_bo(NULL, bo);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to validate BO\n");
+ goto unlock_all;
+ }
+
+ /* Try to reserve the BO to avoid clearing its ptes */
+ if (!adev->debug_vm && dma_resv_trylock(resv)) {
+ clear = false;
+ unlock = true;
+ /* The caller is already holding the reservation lock */
+ } else if (dma_resv_locking_ctx(resv) == ticket) {
+ clear = false;
+ unlock = false;
+ /* Somebody else is using the BO right now */
+ } else {
+ clear = true;
+ unlock = false;
+ }
+
+ ret = amdgpu_vm_bo_update(adev, bo_va, clear);
+
+ if (unlock)
+ dma_resv_unlock(resv);
+ if (ret)
+ goto unlock_all;
+
+ spin_lock(&vm->status_lock);
+ }
+ spin_unlock(&vm->status_lock);
+
+ ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
+ if (ret)
+ drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
+
+unlock_all:
+ drm_exec_fini(&exec);
+ return ret;
+}
+
+static void amdgpu_userq_restore_worker(struct work_struct *work)
+{
+ struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ int ret;
+
+ flush_work(&fpriv->evf_mgr.suspend_work.work);
+
+ mutex_lock(&uq_mgr->userq_mutex);
+
+ ret = amdgpu_userq_validate_bos(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
+ goto unlock;
+ }
+
+ ret = amdgpu_userq_restore_all(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+}
+
+static int
+amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id;
+ int ret = 0, r;
+
+ /* Try to unmap all the queues in this process ctx */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ if (r)
+ ret = r;
+ }
+
+ if (ret)
+ drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n");
+ return ret;
+}
+
+static int
+amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id, ret;
+
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ struct dma_fence *f = queue->last_fence;
+
+ if (!f || dma_fence_is_signaled(f))
+ continue;
+ ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
+ if (ret <= 0) {
+ drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
+ f->context, f->seqno);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+void
+amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence *ev_fence)
+{
+ int ret;
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
+
+ /* Wait for any pending userqueue fence work to finish */
+ ret = amdgpu_userq_wait_for_signal(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n");
+ return;
+ }
+
+ ret = amdgpu_userq_evict_all(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to evict userqueue\n");
+ return;
+ }
+
+ /* Signal current eviction fence */
+ amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
+
+ if (evf_mgr->fd_closing) {
+ cancel_delayed_work_sync(&uq_mgr->resume_work);
+ return;
+ }
+
+ /* Schedule a resume work */
+ schedule_delayed_work(&uq_mgr->resume_work, 0);
+}
+
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
+ struct amdgpu_device *adev)
+{
+ mutex_init(&userq_mgr->userq_mutex);
+ idr_init_base(&userq_mgr->userq_idr, 1);
+ userq_mgr->adev = adev;
+ userq_mgr->file = file_priv;
+
+ mutex_lock(&adev->userq_mutex);
+ list_add(&userq_mgr->list, &adev->userq_mgr_list);
+ mutex_unlock(&adev->userq_mutex);
+
+ INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
+ return 0;
+}
+
+void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
+{
+ struct amdgpu_device *adev = userq_mgr->adev;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ uint32_t queue_id;
+
+ cancel_delayed_work_sync(&userq_mgr->resume_work);
+
+ mutex_lock(&adev->userq_mutex);
+ mutex_lock(&userq_mgr->userq_mutex);
+ idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
+ amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
+ amdgpu_userq_unmap_helper(userq_mgr, queue);
+ amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
+ }
+
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ if (uqm == userq_mgr) {
+ list_del(&uqm->list);
+ break;
+ }
+ }
+ idr_destroy(&userq_mgr->userq_idr);
+ mutex_unlock(&userq_mgr->userq_mutex);
+ mutex_unlock(&adev->userq_mutex);
+ mutex_destroy(&userq_mgr->userq_mutex);
+}
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ if (!ip_mask)
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ cancel_delayed_work_sync(&uqm->resume_work);
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_unmap_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_resume(struct amdgpu_device *adev)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ if (!ip_mask)
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_map_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ /* only need to stop gfx/compute */
+ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ if (adev->userq_halt_for_enforce_isolation)
+ dev_warn(adev->dev, "userq scheduling already stopped!\n");
+ adev->userq_halt_for_enforce_isolation = true;
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ cancel_delayed_work_sync(&uqm->resume_work);
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ r = amdgpu_userq_unmap_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ /* only need to stop gfx/compute */
+ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ if (!adev->userq_halt_for_enforce_isolation)
+ dev_warn(adev->dev, "userq scheduling already started!\n");
+ adev->userq_halt_for_enforce_isolation = false;
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ r = amdgpu_userq_map_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
new file mode 100644
index 000000000000..ec040c2fd6c9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_USERQ_H_
+#define AMDGPU_USERQ_H_
+#include "amdgpu_eviction_fence.h"
+
+#define AMDGPU_MAX_USERQ_COUNT 512
+
+#define to_ev_fence(f) container_of(f, struct amdgpu_eviction_fence, base)
+#define uq_mgr_to_fpriv(u) container_of(u, struct amdgpu_fpriv, userq_mgr)
+#define work_to_uq_mgr(w, name) container_of(w, struct amdgpu_userq_mgr, name)
+
+enum amdgpu_userq_state {
+ AMDGPU_USERQ_STATE_UNMAPPED = 0,
+ AMDGPU_USERQ_STATE_MAPPED,
+ AMDGPU_USERQ_STATE_PREEMPTED,
+ AMDGPU_USERQ_STATE_HUNG,
+};
+
+struct amdgpu_mqd_prop;
+
+struct amdgpu_userq_obj {
+ void *cpu_ptr;
+ uint64_t gpu_addr;
+ struct amdgpu_bo *obj;
+};
+
+struct amdgpu_usermode_queue {
+ int queue_type;
+ enum amdgpu_userq_state state;
+ uint64_t doorbell_handle;
+ uint64_t doorbell_index;
+ uint64_t flags;
+ struct amdgpu_mqd_prop *userq_prop;
+ struct amdgpu_userq_mgr *userq_mgr;
+ struct amdgpu_vm *vm;
+ struct amdgpu_userq_obj mqd;
+ struct amdgpu_userq_obj db_obj;
+ struct amdgpu_userq_obj fw_obj;
+ struct amdgpu_userq_obj wptr_obj;
+ struct xarray fence_drv_xa;
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct dma_fence *last_fence;
+ u32 xcp_id;
+ int priority;
+};
+
+struct amdgpu_userq_funcs {
+ int (*mqd_create)(struct amdgpu_userq_mgr *uq_mgr,
+ struct drm_amdgpu_userq_in *args,
+ struct amdgpu_usermode_queue *queue);
+ void (*mqd_destroy)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *uq);
+ int (*unmap)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*map)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+};
+
+/* Usermode queues for gfx */
+struct amdgpu_userq_mgr {
+ struct idr userq_idr;
+ struct mutex userq_mutex;
+ struct amdgpu_device *adev;
+ struct delayed_work resume_work;
+ struct list_head list;
+ struct drm_file *file;
+};
+
+struct amdgpu_db_info {
+ uint64_t doorbell_handle;
+ uint32_t queue_type;
+ uint32_t doorbell_offset;
+ struct amdgpu_userq_obj *db_obj;
+};
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
+ struct amdgpu_device *adev);
+
+void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr);
+
+int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj,
+ int size);
+
+void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj);
+
+void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence *ev_fence);
+
+int amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr);
+
+void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
+ struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+uint64_t amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_db_info *db_info,
+ struct drm_file *filp);
+
+u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev);
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev);
+int amdgpu_userq_resume(struct amdgpu_device *adev);
+
+int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx);
+int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
new file mode 100644
index 000000000000..a86616c6deef
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -0,0 +1,968 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/dma-fence-unwrap.h>
+
+#include <drm/drm_exec.h>
+#include <drm/drm_syncobj.h>
+
+#include "amdgpu.h"
+#include "amdgpu_userq_fence.h"
+
+static const struct dma_fence_ops amdgpu_userq_fence_ops;
+static struct kmem_cache *amdgpu_userq_fence_slab;
+
+int amdgpu_userq_fence_slab_init(void)
+{
+ amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
+ sizeof(struct amdgpu_userq_fence),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!amdgpu_userq_fence_slab)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void amdgpu_userq_fence_slab_fini(void)
+{
+ rcu_barrier();
+ kmem_cache_destroy(amdgpu_userq_fence_slab);
+}
+
+static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
+{
+ if (!f || f->ops != &amdgpu_userq_fence_ops)
+ return NULL;
+
+ return container_of(f, struct amdgpu_userq_fence, base);
+}
+
+static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ return le64_to_cpu(*fence_drv->cpu_addr);
+}
+
+int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *userq)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ unsigned long flags;
+ int r;
+
+ fence_drv = kzalloc(sizeof(*fence_drv), GFP_KERNEL);
+ if (!fence_drv)
+ return -ENOMEM;
+
+ /* Acquire seq64 memory */
+ r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
+ &fence_drv->cpu_addr);
+ if (r)
+ goto free_fence_drv;
+
+ memset(fence_drv->cpu_addr, 0, sizeof(u64));
+
+ kref_init(&fence_drv->refcount);
+ INIT_LIST_HEAD(&fence_drv->fences);
+ spin_lock_init(&fence_drv->fence_list_lock);
+
+ fence_drv->adev = adev;
+ fence_drv->context = dma_fence_context_alloc(1);
+ get_task_comm(fence_drv->timeline_name, current);
+
+ xa_lock_irqsave(&adev->userq_xa, flags);
+ r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index,
+ fence_drv, GFP_KERNEL));
+ xa_unlock_irqrestore(&adev->userq_xa, flags);
+ if (r)
+ goto free_seq64;
+
+ userq->fence_drv = fence_drv;
+
+ return 0;
+
+free_seq64:
+ amdgpu_seq64_free(adev, fence_drv->va);
+free_fence_drv:
+ kfree(fence_drv);
+
+ return r;
+}
+
+static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ unsigned long index;
+
+ if (xa_empty(xa))
+ return;
+
+ xa_lock(xa);
+ xa_for_each(xa, index, fence_drv) {
+ __xa_erase(xa, index);
+ amdgpu_userq_fence_driver_put(fence_drv);
+ }
+
+ xa_unlock(xa);
+}
+
+void
+amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
+{
+ amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
+ xa_destroy(&userq->fence_drv_xa);
+ /* Drop the fence_drv reference held by user queue */
+ amdgpu_userq_fence_driver_put(userq->fence_drv);
+}
+
+void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ struct amdgpu_userq_fence *userq_fence, *tmp;
+ struct dma_fence *fence;
+ u64 rptr;
+ int i;
+
+ if (!fence_drv)
+ return;
+
+ rptr = amdgpu_userq_fence_read(fence_drv);
+
+ spin_lock(&fence_drv->fence_list_lock);
+ list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
+ fence = &userq_fence->base;
+
+ if (rptr < fence->seqno)
+ break;
+
+ dma_fence_signal(fence);
+
+ for (i = 0; i < userq_fence->fence_drv_array_count; i++)
+ amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
+
+ list_del(&userq_fence->link);
+ dma_fence_put(fence);
+ }
+ spin_unlock(&fence_drv->fence_list_lock);
+}
+
+void amdgpu_userq_fence_driver_destroy(struct kref *ref)
+{
+ struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
+ struct amdgpu_userq_fence_driver,
+ refcount);
+ struct amdgpu_userq_fence_driver *xa_fence_drv;
+ struct amdgpu_device *adev = fence_drv->adev;
+ struct amdgpu_userq_fence *fence, *tmp;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long index, flags;
+ struct dma_fence *f;
+
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+ list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
+ f = &fence->base;
+
+ if (!dma_fence_is_signaled(f)) {
+ dma_fence_set_error(f, -ECANCELED);
+ dma_fence_signal(f);
+ }
+
+ list_del(&fence->link);
+ dma_fence_put(f);
+ }
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+
+ xa_lock_irqsave(xa, flags);
+ xa_for_each(xa, index, xa_fence_drv)
+ if (xa_fence_drv == fence_drv)
+ __xa_erase(xa, index);
+ xa_unlock_irqrestore(xa, flags);
+
+ /* Free seq64 memory */
+ amdgpu_seq64_free(adev, fence_drv->va);
+ kfree(fence_drv);
+}
+
+void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ kref_get(&fence_drv->refcount);
+}
+
+void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
+}
+
+static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
+{
+ *userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
+ return *userq_fence ? 0 : -ENOMEM;
+}
+
+static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
+ struct amdgpu_userq_fence *userq_fence,
+ u64 seq, struct dma_fence **f)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct dma_fence *fence;
+ unsigned long flags;
+
+ fence_drv = userq->fence_drv;
+ if (!fence_drv)
+ return -EINVAL;
+
+ spin_lock_init(&userq_fence->lock);
+ INIT_LIST_HEAD(&userq_fence->link);
+ fence = &userq_fence->base;
+ userq_fence->fence_drv = fence_drv;
+
+ dma_fence_init(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
+ fence_drv->context, seq);
+
+ amdgpu_userq_fence_driver_get(fence_drv);
+ dma_fence_get(fence);
+
+ if (!xa_empty(&userq->fence_drv_xa)) {
+ struct amdgpu_userq_fence_driver *stored_fence_drv;
+ unsigned long index, count = 0;
+ int i = 0;
+
+ xa_lock(&userq->fence_drv_xa);
+ xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
+ count++;
+
+ userq_fence->fence_drv_array =
+ kvmalloc_array(count,
+ sizeof(struct amdgpu_userq_fence_driver *),
+ GFP_ATOMIC);
+
+ if (userq_fence->fence_drv_array) {
+ xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
+ userq_fence->fence_drv_array[i] = stored_fence_drv;
+ __xa_erase(&userq->fence_drv_xa, index);
+ i++;
+ }
+ }
+
+ userq_fence->fence_drv_array_count = i;
+ xa_unlock(&userq->fence_drv_xa);
+ } else {
+ userq_fence->fence_drv_array = NULL;
+ userq_fence->fence_drv_array_count = 0;
+ }
+
+ /* Check if hardware has already processed the job */
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+ if (!dma_fence_is_signaled_locked(fence))
+ list_add_tail(&userq_fence->link, &fence_drv->fences);
+ else
+ dma_fence_put(fence);
+
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+
+ *f = fence;
+
+ return 0;
+}
+
+static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
+{
+ return "amdgpu_userq_fence";
+}
+
+static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+
+ return fence->fence_drv->timeline_name;
+}
+
+static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
+{
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ u64 rptr, wptr;
+
+ rptr = amdgpu_userq_fence_read(fence_drv);
+ wptr = fence->base.seqno;
+
+ if (rptr >= wptr)
+ return true;
+
+ return false;
+}
+
+static void amdgpu_userq_fence_free(struct rcu_head *rcu)
+{
+ struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
+ struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
+ struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
+
+ /* Release the fence driver reference */
+ amdgpu_userq_fence_driver_put(fence_drv);
+
+ kvfree(userq_fence->fence_drv_array);
+ kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+}
+
+static void amdgpu_userq_fence_release(struct dma_fence *f)
+{
+ call_rcu(&f->rcu, amdgpu_userq_fence_free);
+}
+
+static const struct dma_fence_ops amdgpu_userq_fence_ops = {
+ .use_64bit_seqno = true,
+ .get_driver_name = amdgpu_userq_fence_get_driver_name,
+ .get_timeline_name = amdgpu_userq_fence_get_timeline_name,
+ .signaled = amdgpu_userq_fence_signaled,
+ .release = amdgpu_userq_fence_release,
+};
+
+/**
+ * amdgpu_userq_fence_read_wptr - Read the userq wptr value
+ *
+ * @queue: user mode queue structure pointer
+ * @wptr: write pointer value
+ *
+ * Read the wptr value from userq's MQD. The userq signal IOCTL
+ * creates a dma_fence for the shared buffers that expects the
+ * RPTR value written to seq64 memory >= WPTR.
+ *
+ * Returns wptr value on success, error on failure.
+ */
+static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue,
+ u64 *wptr)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ struct amdgpu_bo *bo;
+ u64 addr, *ptr;
+ int r;
+
+ r = amdgpu_bo_reserve(queue->vm->root.bo, false);
+ if (r)
+ return r;
+
+ addr = queue->userq_prop->wptr_gpu_addr;
+ addr &= AMDGPU_GMC_HOLE_MASK;
+
+ mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
+ if (!mapping) {
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
+ return -EINVAL;
+ }
+
+ bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ DRM_ERROR("Failed to reserve userqueue wptr bo");
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(bo, (void **)&ptr);
+ if (r) {
+ DRM_ERROR("Failed mapping the userqueue wptr bo");
+ goto map_error;
+ }
+
+ *wptr = le64_to_cpu(*ptr);
+
+ amdgpu_bo_kunmap(bo);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return 0;
+
+map_error:
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return r;
+}
+
+static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
+{
+ dma_fence_put(fence);
+}
+
+int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
+ struct drm_amdgpu_userq_signal *args = data;
+ struct drm_gem_object **gobj_write = NULL;
+ struct drm_gem_object **gobj_read = NULL;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_fence *userq_fence;
+ struct drm_syncobj **syncobj = NULL;
+ u32 *bo_handles_write, num_write_bo_handles;
+ u32 *syncobj_handles, num_syncobj_handles;
+ u32 *bo_handles_read, num_read_bo_handles;
+ int r, i, entry, rentry, wentry;
+ struct dma_fence *fence;
+ struct drm_exec exec;
+ u64 wptr;
+
+ num_syncobj_handles = args->num_syncobj_handles;
+ syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
+ size_mul(sizeof(u32), num_syncobj_handles));
+ if (IS_ERR(syncobj_handles))
+ return PTR_ERR(syncobj_handles);
+
+ /* Array of pointers to the looked up syncobjs */
+ syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
+ if (!syncobj) {
+ r = -ENOMEM;
+ goto free_syncobj_handles;
+ }
+
+ for (entry = 0; entry < num_syncobj_handles; entry++) {
+ syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
+ if (!syncobj[entry]) {
+ r = -ENOENT;
+ goto free_syncobj;
+ }
+ }
+
+ num_read_bo_handles = args->num_bo_read_handles;
+ bo_handles_read = memdup_user(u64_to_user_ptr(args->bo_read_handles),
+ sizeof(u32) * num_read_bo_handles);
+ if (IS_ERR(bo_handles_read)) {
+ r = PTR_ERR(bo_handles_read);
+ goto free_syncobj;
+ }
+
+ /* Array of pointers to the GEM read objects */
+ gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
+ if (!gobj_read) {
+ r = -ENOMEM;
+ goto free_bo_handles_read;
+ }
+
+ for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
+ gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
+ if (!gobj_read[rentry]) {
+ r = -ENOENT;
+ goto put_gobj_read;
+ }
+ }
+
+ num_write_bo_handles = args->num_bo_write_handles;
+ bo_handles_write = memdup_user(u64_to_user_ptr(args->bo_write_handles),
+ sizeof(u32) * num_write_bo_handles);
+ if (IS_ERR(bo_handles_write)) {
+ r = PTR_ERR(bo_handles_write);
+ goto put_gobj_read;
+ }
+
+ /* Array of pointers to the GEM write objects */
+ gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
+ if (!gobj_write) {
+ r = -ENOMEM;
+ goto free_bo_handles_write;
+ }
+
+ for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
+ gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
+ if (!gobj_write[wentry]) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+ }
+
+ /* Retrieve the user queue */
+ queue = idr_find(&userq_mgr->userq_idr, args->queue_id);
+ if (!queue) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+
+ r = amdgpu_userq_fence_read_wptr(queue, &wptr);
+ if (r)
+ goto put_gobj_write;
+
+ r = amdgpu_userq_fence_alloc(&userq_fence);
+ if (r)
+ goto put_gobj_write;
+
+ /* We are here means UQ is active, make sure the eviction fence is valid */
+ amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+
+ /* Create a new fence */
+ r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
+ if (r) {
+ mutex_unlock(&userq_mgr->userq_mutex);
+ kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+ goto put_gobj_write;
+ }
+
+ dma_fence_put(queue->last_fence);
+ queue->last_fence = dma_fence_get(fence);
+ mutex_unlock(&userq_mgr->userq_mutex);
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
+ (num_read_bo_handles + num_write_bo_handles));
+
+ /* Lock all BOs with retry handling */
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ amdgpu_userq_fence_cleanup(fence);
+ goto exec_fini;
+ }
+
+ r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ amdgpu_userq_fence_cleanup(fence);
+ goto exec_fini;
+ }
+ }
+
+ for (i = 0; i < num_read_bo_handles; i++) {
+ if (!gobj_read || !gobj_read[i]->resv)
+ continue;
+
+ dma_resv_add_fence(gobj_read[i]->resv, fence,
+ DMA_RESV_USAGE_READ);
+ }
+
+ for (i = 0; i < num_write_bo_handles; i++) {
+ if (!gobj_write || !gobj_write[i]->resv)
+ continue;
+
+ dma_resv_add_fence(gobj_write[i]->resv, fence,
+ DMA_RESV_USAGE_WRITE);
+ }
+
+ /* Add the created fence to syncobj/BO's */
+ for (i = 0; i < num_syncobj_handles; i++)
+ drm_syncobj_replace_fence(syncobj[i], fence);
+
+ /* drop the reference acquired in fence creation function */
+ dma_fence_put(fence);
+
+exec_fini:
+ drm_exec_fini(&exec);
+put_gobj_write:
+ while (wentry-- > 0)
+ drm_gem_object_put(gobj_write[wentry]);
+ kfree(gobj_write);
+free_bo_handles_write:
+ kfree(bo_handles_write);
+put_gobj_read:
+ while (rentry-- > 0)
+ drm_gem_object_put(gobj_read[rentry]);
+ kfree(gobj_read);
+free_bo_handles_read:
+ kfree(bo_handles_read);
+free_syncobj:
+ while (entry-- > 0)
+ if (syncobj[entry])
+ drm_syncobj_put(syncobj[entry]);
+ kfree(syncobj);
+free_syncobj_handles:
+ kfree(syncobj_handles);
+
+ return r;
+}
+
+int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles_read, *bo_handles_write;
+ u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
+ struct drm_amdgpu_userq_fence_info *fence_info = NULL;
+ struct drm_amdgpu_userq_wait *wait_info = data;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_usermode_queue *waitq;
+ struct drm_gem_object **gobj_write;
+ struct drm_gem_object **gobj_read;
+ struct dma_fence **fences = NULL;
+ u16 num_points, num_fences = 0;
+ int r, i, rentry, wentry, cnt;
+ struct drm_exec exec;
+
+ num_read_bo_handles = wait_info->num_bo_read_handles;
+ bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
+ size_mul(sizeof(u32), num_read_bo_handles));
+ if (IS_ERR(bo_handles_read))
+ return PTR_ERR(bo_handles_read);
+
+ num_write_bo_handles = wait_info->num_bo_write_handles;
+ bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles),
+ size_mul(sizeof(u32), num_write_bo_handles));
+ if (IS_ERR(bo_handles_write)) {
+ r = PTR_ERR(bo_handles_write);
+ goto free_bo_handles_read;
+ }
+
+ num_syncobj = wait_info->num_syncobj_handles;
+ syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles),
+ size_mul(sizeof(u32), num_syncobj));
+ if (IS_ERR(syncobj_handles)) {
+ r = PTR_ERR(syncobj_handles);
+ goto free_bo_handles_write;
+ }
+
+ num_points = wait_info->num_syncobj_timeline_handles;
+ timeline_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
+ sizeof(u32) * num_points);
+ if (IS_ERR(timeline_handles)) {
+ r = PTR_ERR(timeline_handles);
+ goto free_syncobj_handles;
+ }
+
+ timeline_points = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_points),
+ sizeof(u32) * num_points);
+ if (IS_ERR(timeline_points)) {
+ r = PTR_ERR(timeline_points);
+ goto free_timeline_handles;
+ }
+
+ gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
+ if (!gobj_read) {
+ r = -ENOMEM;
+ goto free_timeline_points;
+ }
+
+ for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
+ gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
+ if (!gobj_read[rentry]) {
+ r = -ENOENT;
+ goto put_gobj_read;
+ }
+ }
+
+ gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
+ if (!gobj_write) {
+ r = -ENOMEM;
+ goto put_gobj_read;
+ }
+
+ for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
+ gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
+ if (!gobj_write[wentry]) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+ }
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
+ (num_read_bo_handles + num_write_bo_handles));
+
+ /* Lock all BOs with retry handling */
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ drm_exec_fini(&exec);
+ goto put_gobj_write;
+ }
+
+ r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ drm_exec_fini(&exec);
+ goto put_gobj_write;
+ }
+ }
+
+ if (!wait_info->num_fences) {
+ if (num_points) {
+ struct dma_fence_unwrap iter;
+ struct dma_fence *fence;
+ struct dma_fence *f;
+
+ for (i = 0; i < num_points; i++) {
+ r = drm_syncobj_find_fence(filp, timeline_handles[i],
+ timeline_points[i],
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto exec_fini;
+
+ dma_fence_unwrap_for_each(f, &iter, fence)
+ num_fences++;
+
+ dma_fence_put(fence);
+ }
+ }
+
+ /* Count syncobj's fence */
+ for (i = 0; i < num_syncobj; i++) {
+ struct dma_fence *fence;
+
+ r = drm_syncobj_find_fence(filp, syncobj_handles[i],
+ 0,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto exec_fini;
+
+ num_fences++;
+ dma_fence_put(fence);
+ }
+
+ /* Count GEM objects fence */
+ for (i = 0; i < num_read_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
+ DMA_RESV_USAGE_READ, fence)
+ num_fences++;
+ }
+
+ for (i = 0; i < num_write_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
+ DMA_RESV_USAGE_WRITE, fence)
+ num_fences++;
+ }
+
+ /*
+ * Passing num_fences = 0 means that userspace doesn't want to
+ * retrieve userq_fence_info. If num_fences = 0 we skip filling
+ * userq_fence_info and return the actual number of fences on
+ * args->num_fences.
+ */
+ wait_info->num_fences = num_fences;
+ } else {
+ /* Array of fence info */
+ fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
+ if (!fence_info) {
+ r = -ENOMEM;
+ goto exec_fini;
+ }
+
+ /* Array of fences */
+ fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL);
+ if (!fences) {
+ r = -ENOMEM;
+ goto free_fence_info;
+ }
+
+ /* Retrieve GEM read objects fence */
+ for (i = 0; i < num_read_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
+ DMA_RESV_USAGE_READ, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ dma_fence_get(fence);
+ }
+ }
+
+ /* Retrieve GEM write objects fence */
+ for (i = 0; i < num_write_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
+ DMA_RESV_USAGE_WRITE, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ dma_fence_get(fence);
+ }
+ }
+
+ if (num_points) {
+ struct dma_fence_unwrap iter;
+ struct dma_fence *fence;
+ struct dma_fence *f;
+
+ for (i = 0; i < num_points; i++) {
+ r = drm_syncobj_find_fence(filp, timeline_handles[i],
+ timeline_points[i],
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto free_fences;
+
+ dma_fence_unwrap_for_each(f, &iter, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ dma_fence_get(f);
+ fences[num_fences++] = f;
+ }
+
+ dma_fence_put(fence);
+ }
+ }
+
+ /* Retrieve syncobj's fence */
+ for (i = 0; i < num_syncobj; i++) {
+ struct dma_fence *fence;
+
+ r = drm_syncobj_find_fence(filp, syncobj_handles[i],
+ 0,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto free_fences;
+
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ }
+
+ /*
+ * Keep only the latest fences to reduce the number of values
+ * given back to userspace.
+ */
+ num_fences = dma_fence_dedup_array(fences, num_fences);
+
+ waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id);
+ if (!waitq) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ for (i = 0, cnt = 0; i < num_fences; i++) {
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct amdgpu_userq_fence *userq_fence;
+ u32 index;
+
+ userq_fence = to_amdgpu_userq_fence(fences[i]);
+ if (!userq_fence) {
+ /*
+ * Just waiting on other driver fences should
+ * be good for now
+ */
+ r = dma_fence_wait(fences[i], true);
+ if (r) {
+ dma_fence_put(fences[i]);
+ goto free_fences;
+ }
+
+ dma_fence_put(fences[i]);
+ continue;
+ }
+
+ fence_drv = userq_fence->fence_drv;
+ /*
+ * We need to make sure the user queue release their reference
+ * to the fence drivers at some point before queue destruction.
+ * Otherwise, we would gather those references until we don't
+ * have any more space left and crash.
+ */
+ r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
+ xa_limit_32b, GFP_KERNEL);
+ if (r)
+ goto free_fences;
+
+ amdgpu_userq_fence_driver_get(fence_drv);
+
+ /* Store drm syncobj's gpu va address and value */
+ fence_info[cnt].va = fence_drv->va;
+ fence_info[cnt].value = fences[i]->seqno;
+
+ dma_fence_put(fences[i]);
+ /* Increment the actual userq fence count */
+ cnt++;
+ }
+
+ wait_info->num_fences = cnt;
+ /* Copy userq fence info to user space */
+ if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
+ fence_info, wait_info->num_fences * sizeof(*fence_info))) {
+ r = -EFAULT;
+ goto free_fences;
+ }
+
+ kfree(fences);
+ kfree(fence_info);
+ }
+
+ drm_exec_fini(&exec);
+ for (i = 0; i < num_read_bo_handles; i++)
+ drm_gem_object_put(gobj_read[i]);
+ kfree(gobj_read);
+
+ for (i = 0; i < num_write_bo_handles; i++)
+ drm_gem_object_put(gobj_write[i]);
+ kfree(gobj_write);
+
+ kfree(timeline_points);
+ kfree(timeline_handles);
+ kfree(syncobj_handles);
+ kfree(bo_handles_write);
+ kfree(bo_handles_read);
+
+ return 0;
+
+free_fences:
+ while (num_fences-- > 0)
+ dma_fence_put(fences[num_fences]);
+ kfree(fences);
+free_fence_info:
+ kfree(fence_info);
+exec_fini:
+ drm_exec_fini(&exec);
+put_gobj_write:
+ while (wentry-- > 0)
+ drm_gem_object_put(gobj_write[wentry]);
+ kfree(gobj_write);
+put_gobj_read:
+ while (rentry-- > 0)
+ drm_gem_object_put(gobj_read[rentry]);
+ kfree(gobj_read);
+free_timeline_points:
+ kfree(timeline_points);
+free_timeline_handles:
+ kfree(timeline_handles);
+free_syncobj_handles:
+ kfree(syncobj_handles);
+free_bo_handles_write:
+ kfree(bo_handles_write);
+free_bo_handles_read:
+ kfree(bo_handles_read);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
new file mode 100644
index 000000000000..97a125ab8a78
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_USERQ_FENCE_H__
+#define __AMDGPU_USERQ_FENCE_H__
+
+#include <linux/types.h>
+
+#include "amdgpu_userq.h"
+
+struct amdgpu_userq_fence {
+ struct dma_fence base;
+ /*
+ * This lock is necessary to synchronize the
+ * userqueue dma fence operations.
+ */
+ spinlock_t lock;
+ struct list_head link;
+ unsigned long fence_drv_array_count;
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct amdgpu_userq_fence_driver **fence_drv_array;
+};
+
+struct amdgpu_userq_fence_driver {
+ struct kref refcount;
+ u64 va;
+ u64 gpu_addr;
+ u64 *cpu_addr;
+ u64 context;
+ /*
+ * This lock is necesaary to synchronize the access
+ * to the fences list by the fence driver.
+ */
+ spinlock_t fence_list_lock;
+ struct list_head fences;
+ struct amdgpu_device *adev;
+ char timeline_name[TASK_COMM_LEN];
+};
+
+int amdgpu_userq_fence_slab_init(void);
+void amdgpu_userq_fence_slab_fini(void);
+
+void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv);
+int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_destroy(struct kref *ref);
+int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 1991dd3d1056..c8885c3d54b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -353,9 +353,9 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
- /* err_event_athub will corrupt VCPU buffer, so we need to
+ /* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to
* restore fw data and clear buffer in amdgpu_vcn_resume() */
- if (in_ras_intr)
+ if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset)
return 0;
return amdgpu_vcn_save_vcpu_bo_inst(adev, i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 0bb8cbe0dcc0..13f0cdeb59c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -1323,6 +1323,9 @@ static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bo
{
struct amdgpu_virt *virt = &adev->virt;
+ if (!virt->ops || !virt->ops->req_ras_err_count)
+ return -EOPNOTSUPP;
+
/* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
* will ignore incoming guest messages. Ratelimit the guest messages to
* prevent guest self DOS.
@@ -1378,14 +1381,16 @@ amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev,
used_size = host_telemetry->header.used_size;
if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
- return 0;
+ return -EINVAL;
cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL);
if (!cper_dump)
return -ENOMEM;
- if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0))
+ if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0)) {
+ ret = -EINVAL;
goto out;
+ }
*more = cper_dump->more;
@@ -1425,7 +1430,7 @@ static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev)
int ret = 0;
uint32_t more = 0;
- if (!amdgpu_sriov_ras_cper_en(adev))
+ if (!virt->ops || !virt->ops->req_ras_cper_dump)
return -EOPNOTSUPP;
do {
@@ -1434,7 +1439,7 @@ static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev)
adev, virt->fw_reserve.ras_telemetry, &more);
else
ret = 0;
- } while (more);
+ } while (more && !ret);
return ret;
}
@@ -1444,6 +1449,9 @@ int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update)
struct amdgpu_virt *virt = &adev->virt;
int ret = 0;
+ if (!amdgpu_sriov_ras_cper_en(adev))
+ return -EOPNOTSUPP;
+
if ((__ratelimit(&virt->ras.ras_cper_dump_rs) || force_update) &&
down_read_trylock(&adev->reset_domain->sem)) {
mutex_lock(&virt->ras.ras_telemetry_mutex);
@@ -1480,3 +1488,16 @@ bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
return true;
}
+
+/*
+ * amdgpu_virt_request_bad_pages() - request bad pages
+ * @adev: amdgpu device.
+ * Send command to GPU hypervisor to write new bad pages into the shared PF2VF region
+ */
+void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (virt->ops && virt->ops->req_bad_pages)
+ virt->ops->req_bad_pages(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index df03dba67ab8..577c6194db78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -97,6 +97,7 @@ struct amdgpu_virt_ops {
bool (*rcvd_ras_intr)(struct amdgpu_device *adev);
int (*req_ras_err_count)(struct amdgpu_device *adev);
int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
+ int (*req_bad_pages)(struct amdgpu_device *adev);
};
/*
@@ -146,11 +147,13 @@ enum AMDGIM_FEATURE_FLAG {
enum AMDGIM_REG_ACCESS_FLAG {
/* Use PSP to program IH_RB_CNTL */
- AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0),
+ AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0),
/* Use RLC to program MMHUB regs */
- AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
+ AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
/* Use RLC to program GC regs */
- AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
+ AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
+ /* Use PSP to program L1_TLB_CNTL*/
+ AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN = (1 << 3),
};
struct amdgim_pf2vf_info_v1 {
@@ -260,7 +263,10 @@ struct amdgpu_virt {
uint32_t reg_val_offs;
struct amdgpu_irq_src ack_irq;
struct amdgpu_irq_src rcv_irq;
+
struct work_struct flr_work;
+ struct work_struct bad_pages_work;
+
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
struct amdgpu_vf_error_buffer vf_errors;
@@ -330,6 +336,10 @@ struct amdgpu_video_codec_info;
(amdgpu_sriov_vf((adev)) && \
((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN)))
+#define amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN)))
+
#define amdgpu_sriov_rlcg_error_report_enabled(adev) \
(amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
@@ -423,4 +433,5 @@ int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update)
int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
enum amdgpu_ras_block block);
+void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ce52b4d75e94..3911c78f8282 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -787,7 +787,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
ring->funcs->emit_wreg;
- cleaner_shader_needed = adev->gfx.enable_cleaner_shader &&
+ cleaner_shader_needed = job->run_cleaner_shader &&
+ adev->gfx.enable_cleaner_shader &&
ring->funcs->emit_cleaner_shader && job->base.s_fence &&
&job->base.s_fence->scheduled == isolation->spearhead;
@@ -817,7 +818,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
- if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
+ if (ring->funcs->emit_gds_switch &&
gds_switch_needed) {
amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
job->gds_size, job->gws_base,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 2d7f82e98df9..abdc52b0895a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -463,7 +463,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
int r;
lpfn = (u64)place->lpfn << PAGE_SHIFT;
- if (!lpfn)
+ if (!lpfn || lpfn > man->size)
lpfn = man->size;
fpfn = (u64)place->fpfn << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index 23b6f7a4aa4a..322816805bfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -27,6 +27,9 @@
#include <drm/drm_drv.h>
#include "../amdxcp/amdgpu_xcp_drv.h"
+static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr);
+static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr);
+
static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
{
@@ -189,7 +192,7 @@ static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
goto out;
}
-
+ amdgpu_xcp_sysfs_entries_update(xcp_mgr);
out:
mutex_unlock(&xcp_mgr->xcp_lock);
@@ -263,9 +266,10 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
if (ret == -ENOSPC) {
dev_warn(adev->dev,
"Skip xcp node #%d when out of drm node resource.", i);
- return 0;
+ ret = 0;
+ goto out;
} else if (ret) {
- return ret;
+ goto out;
}
/* Redirect all IOCTLs to the primary device */
@@ -278,9 +282,14 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
p_ddev->vma_offset_manager = ddev->vma_offset_manager;
p_ddev->driver = &amdgpu_partition_driver;
adev->xcp_mgr->xcp[i].ddev = p_ddev;
+
+ dev_set_drvdata(p_ddev->dev, &adev->xcp_mgr->xcp[i]);
}
+ ret = 0;
+out:
+ amdgpu_xcp_sysfs_entries_init(adev->xcp_mgr);
- return 0;
+ return ret;
}
int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
@@ -288,6 +297,7 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
struct amdgpu_xcp_mgr_funcs *xcp_funcs)
{
struct amdgpu_xcp_mgr *xcp_mgr;
+ int i;
if (!xcp_funcs || !xcp_funcs->get_ip_details)
return -EINVAL;
@@ -306,6 +316,8 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
adev->xcp_mgr = xcp_mgr;
+ for (i = 0; i < MAX_XCP; ++i)
+ xcp_mgr->xcp[i].xcp_mgr = xcp_mgr;
return amdgpu_xcp_dev_alloc(adev);
}
@@ -433,6 +445,7 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
}
}
+/*====================== xcp sysfs - configuration ======================*/
#define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \
static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \
struct amdgpu_xcp_res_details *xcp_res, char *buf) \
@@ -635,7 +648,7 @@ static const struct attribute *xcp_attrs[] = {
NULL,
};
-void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
+static void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
{
struct amdgpu_xcp_res_details *xcp_res;
struct amdgpu_xcp_cfg *xcp_cfg;
@@ -703,16 +716,16 @@ err1:
kobject_put(&xcp_cfg->kobj);
}
-void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
+static void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
{
struct amdgpu_xcp_res_details *xcp_res;
struct amdgpu_xcp_cfg *xcp_cfg;
int i;
- if (!adev->xcp_mgr)
+ if (!adev->xcp_mgr || !adev->xcp_mgr->xcp_cfg)
return;
- xcp_cfg = adev->xcp_mgr->xcp_cfg;
+ xcp_cfg = adev->xcp_mgr->xcp_cfg;
for (i = 0; i < xcp_cfg->num_res; i++) {
xcp_res = &xcp_cfg->xcp_res[i];
kobject_put(&xcp_res->kobj);
@@ -722,3 +735,124 @@ void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
kobject_put(&xcp_cfg->kobj);
}
+
+/*====================== xcp sysfs - data entries ======================*/
+
+#define to_xcp(x) container_of(x, struct amdgpu_xcp, kobj)
+
+static ssize_t xcp_metrics_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct amdgpu_xcp *xcp = to_xcp(kobj);
+ struct amdgpu_xcp_mgr *xcp_mgr;
+ ssize_t size;
+
+ xcp_mgr = xcp->xcp_mgr;
+ size = amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, NULL);
+ if (size <= 0)
+ return size;
+
+ if (size > PAGE_SIZE)
+ return -ENOSPC;
+
+ return amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, buf);
+}
+
+static umode_t amdgpu_xcp_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct amdgpu_xcp *xcp = to_xcp(kobj);
+
+ if (!xcp || !xcp->valid)
+ return 0;
+
+ return attr->mode;
+}
+
+static struct kobj_attribute xcp_sysfs_metrics = __ATTR_RO(xcp_metrics);
+
+static struct attribute *amdgpu_xcp_attrs[] = {
+ &xcp_sysfs_metrics.attr,
+ NULL,
+};
+
+static const struct attribute_group amdgpu_xcp_attrs_group = {
+ .attrs = amdgpu_xcp_attrs,
+ .is_visible = amdgpu_xcp_attrs_is_visible
+};
+
+static const struct kobj_type xcp_sysfs_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static void amdgpu_xcp_sysfs_entries_fini(struct amdgpu_xcp_mgr *xcp_mgr, int n)
+{
+ struct amdgpu_xcp *xcp;
+
+ for (n--; n >= 0; n--) {
+ xcp = &xcp_mgr->xcp[n];
+ if (!xcp->ddev || !xcp->valid)
+ continue;
+ sysfs_remove_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
+ kobject_put(&xcp->kobj);
+ }
+}
+
+static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_xcp *xcp;
+ int i, r;
+
+ for (i = 0; i < MAX_XCP; i++) {
+ /* Redirect all IOCTLs to the primary device */
+ xcp = &xcp_mgr->xcp[i];
+ if (!xcp->ddev)
+ break;
+ r = kobject_init_and_add(&xcp->kobj, &xcp_sysfs_ktype,
+ &xcp->ddev->dev->kobj, "xcp");
+ if (r)
+ goto out;
+
+ r = sysfs_create_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
+ if (r)
+ goto out;
+ }
+
+ return;
+out:
+ kobject_put(&xcp->kobj);
+}
+
+static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_xcp *xcp;
+ int i;
+
+ for (i = 0; i < MAX_XCP; i++) {
+ /* Redirect all IOCTLs to the primary device */
+ xcp = &xcp_mgr->xcp[i];
+ if (!xcp->ddev)
+ continue;
+ sysfs_update_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
+ }
+
+ return;
+}
+
+void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev)
+{
+ if (!adev->xcp_mgr)
+ return;
+
+ amdgpu_xcp_cfg_sysfs_init(adev);
+
+ return;
+}
+
+void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev)
+{
+ if (!adev->xcp_mgr)
+ return;
+ amdgpu_xcp_sysfs_entries_fini(adev->xcp_mgr, MAX_XCP);
+ amdgpu_xcp_cfg_sysfs_fini(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
index b63f53242c57..454b33f889fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -108,6 +108,8 @@ struct amdgpu_xcp {
struct drm_driver *driver;
struct drm_vma_offset_manager *vma_offset_manager;
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
+ struct amdgpu_xcp_mgr *xcp_mgr;
+ struct kobject kobj;
};
struct amdgpu_xcp_mgr {
@@ -175,8 +177,8 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
struct amdgpu_ctx_entity *entity);
-void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev);
-void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev);
+void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev);
+void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev);
#define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \
((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 477424472bbe..d9ad37711c3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -294,17 +294,46 @@ static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = {
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)},
};
+int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num)
+{
+ int link_map_6_4_x[8] = { 0, 3, 1, 2, 7, 6, 4, 5 };
+
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
+ if (link_num < ARRAY_SIZE(link_map_6_4_x))
+ return link_map_6_4_x[link_num];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link_num)
{
- const u32 smnpcs_xgmi3x16_pcs_state_hist1 = 0x11a00070;
- const int xgmi_inst = 2;
- u32 link_inst;
+ const u32 smn_xgmi_6_4_pcs_state_hist1[2] = { 0x11a00070, 0x11b00070 };
+ const u32 smn_xgmi_6_4_1_pcs_state_hist1[2] = { 0x12100070,
+ 0x11b00070 };
+ u32 i, n;
u64 addr;
- link_inst = global_link_num % xgmi_inst;
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ n = ARRAY_SIZE(smn_xgmi_6_4_pcs_state_hist1);
+ addr = smn_xgmi_6_4_pcs_state_hist1[global_link_num % n];
+ break;
+ case IP_VERSION(6, 4, 1):
+ n = ARRAY_SIZE(smn_xgmi_6_4_1_pcs_state_hist1);
+ addr = smn_xgmi_6_4_1_pcs_state_hist1[global_link_num % n];
+ break;
+ default:
+ return U32_MAX;
+ }
- addr = (smnpcs_xgmi3x16_pcs_state_hist1 | (link_inst << 20)) +
- adev->asic_funcs->encode_ext_smn_addressing(global_link_num / xgmi_inst);
+ i = global_link_num / n;
+ addr += adev->asic_funcs->encode_ext_smn_addressing(i);
return RREG32_PCIE_EXT(addr);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 32dabba4062f..f994be985f42 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -125,6 +125,7 @@ int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev,
int req_nps_mode);
int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev,
int global_link_num);
+int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num);
void amdgpu_xgmi_early_init(struct amdgpu_device *adev);
uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index d6ac2652f0ac..92ca13097aaa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -109,10 +109,11 @@ union amd_sriov_msg_feature_flags {
union amd_sriov_reg_access_flags {
struct {
- uint32_t vf_reg_access_ih : 1;
- uint32_t vf_reg_access_mmhub : 1;
- uint32_t vf_reg_access_gc : 1;
- uint32_t reserved : 29;
+ uint32_t vf_reg_access_ih : 1;
+ uint32_t vf_reg_access_mmhub : 1;
+ uint32_t vf_reg_access_gc : 1;
+ uint32_t vf_reg_access_l1_tlb_cntl : 1;
+ uint32_t reserved : 28;
} flags;
uint32_t all;
};
@@ -330,6 +331,7 @@ enum amd_sriov_mailbox_request_message {
MB_REQ_MSG_RAS_POISON = 202,
MB_REQ_RAS_ERROR_COUNT = 203,
MB_REQ_RAS_CPER_DUMP = 204,
+ MB_REQ_RAS_BAD_PAGES = 205,
};
/* mailbox message send from host to guest */
@@ -347,6 +349,9 @@ enum amd_sriov_mailbox_response_message {
MB_RES_MSG_GPU_RMA = 10,
MB_RES_MSG_RAS_ERROR_COUNT_READY = 11,
MB_REQ_RAS_CPER_DUMP_READY = 14,
+ MB_RES_MSG_RAS_BAD_PAGES_READY = 15,
+ MB_RES_MSG_RAS_BAD_PAGES_NOTIFICATION = 16,
+ MB_RES_MSG_UNRECOV_ERR_NOTIFICATION = 17,
MB_RES_MSG_TEXT_MESSAGE = 255
};
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index ae071985f26e..1c083304ae77 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -448,53 +448,71 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x
return 0;
}
-static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
- int mode,
- struct amdgpu_xcp_cfg *xcp_cfg)
+static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int px_mode, int *num_xcp,
+ uint16_t *nps_modes)
{
struct amdgpu_device *adev = xcp_mgr->adev;
- int max_res[AMDGPU_XCP_RES_MAX] = {};
- bool res_lt_xcp;
- int num_xcp, i;
- u16 nps_modes;
- if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
+ if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
return -EINVAL;
- max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
- max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
- max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
- max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
-
- switch (mode) {
+ switch (px_mode) {
case AMDGPU_SPX_PARTITION_MODE:
- num_xcp = 1;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
+ *num_xcp = 1;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
break;
case AMDGPU_DPX_PARTITION_MODE:
- num_xcp = 2;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS2_PARTITION_MODE);
+ *num_xcp = 2;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
case AMDGPU_TPX_PARTITION_MODE:
- num_xcp = 3;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
+ *num_xcp = 3;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
break;
case AMDGPU_QPX_PARTITION_MODE:
- num_xcp = 4;
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
+ *num_xcp = 4;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
break;
case AMDGPU_CPX_PARTITION_MODE:
- num_xcp = NUM_XCC(adev->gfx.xcc_mask);
- nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
- BIT(AMDGPU_NPS4_PARTITION_MODE);
+ *num_xcp = NUM_XCC(adev->gfx.xcc_mask);
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ if (amdgpu_sriov_vf(adev))
+ *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
default:
return -EINVAL;
}
+ return 0;
+}
+
+static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode,
+ struct amdgpu_xcp_cfg *xcp_cfg)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ int max_res[AMDGPU_XCP_RES_MAX] = {};
+ bool res_lt_xcp;
+ int num_xcp, i, r;
+ u16 nps_modes;
+
+ if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
+ return -EINVAL;
+
+ max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
+ max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
+ max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
+ max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
+
+ r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, &nps_modes);
+ if (r)
+ return r;
+
xcp_cfg->compatible_nps_modes =
(adev->gmc.supported_nps_modes & nps_modes);
xcp_cfg->num_res = ARRAY_SIZE(max_res);
@@ -543,30 +561,31 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
enum amdgpu_gfx_partition mode)
{
struct amdgpu_device *adev = xcp_mgr->adev;
- int num_xcc, num_xccs_per_xcp;
+ int num_xcc, num_xccs_per_xcp, r;
+ int num_xcp, nps_mode;
+ u16 supp_nps_modes;
+ bool comp_mode;
+
+ nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp,
+ &supp_nps_modes);
+ if (r)
+ return false;
+ comp_mode = !!(BIT(nps_mode) & supp_nps_modes);
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
switch (mode) {
case AMDGPU_SPX_PARTITION_MODE:
- return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
+ return comp_mode && num_xcc > 0;
case AMDGPU_DPX_PARTITION_MODE:
- return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0;
+ return comp_mode && (num_xcc % 4) == 0;
case AMDGPU_TPX_PARTITION_MODE:
- return (adev->gmc.num_mem_partitions == 1 ||
- adev->gmc.num_mem_partitions == 3) &&
- ((num_xcc % 3) == 0);
+ return comp_mode && ((num_xcc % 3) == 0);
case AMDGPU_QPX_PARTITION_MODE:
num_xccs_per_xcp = num_xcc / 4;
- return (adev->gmc.num_mem_partitions == 1 ||
- adev->gmc.num_mem_partitions == 4) &&
- (num_xccs_per_xcp >= 2);
+ return comp_mode && (num_xccs_per_xcp >= 2);
case AMDGPU_CPX_PARTITION_MODE:
- /* (num_xcc > 1) because 1 XCC is considered SPX, not CPX.
- * (num_xcc % adev->gmc.num_mem_partitions) == 0 because
- * num_compute_partitions can't be less than num_mem_partitions
- */
- return ((num_xcc > 1) &&
- (num_xcc % adev->gmc.num_mem_partitions) == 0);
+ return comp_mode && (num_xcc > 1);
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 81d195d366ce..427b073de2fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1444,6 +1444,7 @@ static void atom_get_vbios_pn(struct atom_context *ctx)
if (vbios_str == NULL)
vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1;
}
+ OPTIMIZER_HIDE_VAR(vbios_str);
if (vbios_str != NULL && *vbios_str == 0)
vbios_str++;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 521b9faab180..492813ab1b54 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -458,8 +458,8 @@ bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connect
u8 link_status[DP_LINK_STATUS_SIZE];
struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
- if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status)
- <= 0)
+ if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux,
+ link_status) < 0)
return false;
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
@@ -616,7 +616,7 @@ amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_i
drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
@@ -681,7 +681,7 @@ amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_i
drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 508cea965983..9e8715b4739d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -56,6 +56,8 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block);
+u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
+
MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin");
@@ -67,9 +69,6 @@ MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin");
MODULE_FIRMWARE("amdgpu/mullins_sdma.bin");
MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin");
-u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
-
-
static void cik_sdma_free_microcode(struct amdgpu_device *adev)
{
int i;
@@ -993,14 +992,9 @@ static int cik_sdma_sw_fini(struct amdgpu_ip_block *ip_block)
static int cik_sdma_hw_init(struct amdgpu_ip_block *ip_block)
{
- int r;
struct amdgpu_device *adev = ip_block->adev;
- r = cik_sdma_start(adev);
- if (r)
- return r;
-
- return r;
+ return cik_sdma_start(adev);
}
static int cik_sdma_hw_fini(struct amdgpu_ip_block *ip_block)
@@ -1040,14 +1034,10 @@ static bool cik_sdma_is_idle(struct amdgpu_ip_block *ip_block)
static int cik_sdma_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- u32 tmp;
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
- SRBM_STATUS2__SDMA1_BUSY_MASK);
-
- if (!tmp)
+ if (cik_sdma_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 279288365940..8aca4f2734f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -60,9 +60,6 @@
#define AUD5_REGISTER_OFFSET (0x179d - 0x1780)
#define AUD6_REGISTER_OFFSET (0x17a4 - 0x1780)
-#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
-#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
-
#define PIPEID(x) ((x) << 0)
#define MEID(x) ((x) << 2)
#define VMID(x) ((x) << 4)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index df401aded662..bf7c22f81cda 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3075,7 +3075,7 @@ static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return 0;
}
@@ -3227,7 +3227,7 @@ static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 80f01c3989cd..47e05783c4a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3206,7 +3206,7 @@ static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return 0;
}
@@ -3358,7 +3358,7 @@ static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
@@ -3488,8 +3488,7 @@ static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.set_powergating_state = dce_v11_0_set_powergating_state,
};
-static void
-dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
+static void dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 255c70959343..276c025c4c03 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -287,7 +287,7 @@ static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
@@ -412,7 +412,7 @@ static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
{
if (!render)
WREG32(mmVGA_RENDER_CONTROL,
- RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
+ RREG32(mmVGA_RENDER_CONTROL) & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK);
}
static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
@@ -1011,16 +1011,16 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
/* select wm A */
arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
tmp = arb_control3;
- tmp &= ~LATENCY_WATERMARK_MASK(3);
- tmp |= LATENCY_WATERMARK_MASK(1);
+ tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
+ tmp |= (1 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
/* select wm B */
tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
- tmp &= ~LATENCY_WATERMARK_MASK(3);
- tmp |= LATENCY_WATERMARK_MASK(2);
+ tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
+ tmp |= (2 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
@@ -1089,7 +1089,7 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
}
WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
- DC_LB_MEMORY_CONFIG(tmp));
+ (tmp << DC_LB_MEMORY_SPLIT__DC_LB_MEMORY_CONFIG__SHIFT));
WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
(buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
@@ -1306,6 +1306,7 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ u32 offset;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
@@ -1327,6 +1328,11 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
+ if (!dig || !dig->afmt || !dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
@@ -1348,7 +1354,7 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
return;
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
- u32 tmp = 0;
+ u32 value = 0;
u8 stereo_freqs = 0;
int max_channels = -1;
int j;
@@ -1358,12 +1364,12 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (sad->format == eld_reg_to_type[i][1]) {
if (sad->channels > max_channels) {
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- MAX_CHANNELS, sad->channels);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- DESCRIPTOR_BYTE_2, sad->byte2);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES, sad->freq);
+ value = (sad->channels <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
+ (sad->byte2 <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
+ (sad->freq <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
max_channels = sad->channels;
}
@@ -1374,13 +1380,13 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
}
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
+ value |= (stereo_freqs <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
+
+ WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
}
kfree(sads);
-
}
static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
@@ -1886,7 +1892,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels, pipe_config;
- u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
+ u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
@@ -1926,76 +1932,76 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
switch (target_fb->format->format) {
case DRM_FORMAT_C8:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
- GRPH_FORMAT(GRPH_FORMAT_INDEXED));
+ fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
break;
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_BGRA5551:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_RGB565:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB565));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_BGRA1010102:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
- fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) |
- GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+ fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
+ (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
default:
@@ -2013,18 +2019,18 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
- fb_format |= GRPH_NUM_BANKS(num_banks);
- fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
- fb_format |= GRPH_TILE_SPLIT(tile_split);
- fb_format |= GRPH_BANK_WIDTH(bankw);
- fb_format |= GRPH_BANK_HEIGHT(bankh);
- fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
+ fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
+ fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
+ fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
+ fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
+ fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
+ fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
- fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
+ fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
}
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
- fb_format |= GRPH_PIPE_CONFIG(pipe_config);
+ fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
dce_v6_0_vga_enable(crtc, false);
@@ -2040,7 +2046,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
(u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
+ (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
@@ -2108,14 +2114,13 @@ static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
- INTERLEAVE_EN);
+ DATA_FORMAT__INTERLEAVE_EN_MASK);
else
WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
}
static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
{
-
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -2125,15 +2130,15 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
- (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
+ ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
+ (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
- (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
+ ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
+ (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
@@ -2160,19 +2165,19 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
}
WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
- (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
- ICON_DEGAMMA_MODE(0) |
- (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
+ ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__ICON_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
- (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
+ ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
+ (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
- (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
+ ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
+ (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
- (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
+ ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
+ (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
/* XXX match this to the depth of the crtc fmt block, move to modeset? */
WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
@@ -2267,8 +2272,6 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
(CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
-
-
}
static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
@@ -2285,7 +2288,6 @@ static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
CUR_CONTROL__CURSOR_EN_MASK |
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
(CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
-
}
static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
@@ -2596,7 +2598,6 @@ static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
@@ -2669,7 +2670,7 @@ static void dce_v6_0_panic_flush(struct drm_plane *plane)
/* Disable DC tiling */
fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
- fb_format &= ~GRPH_ARRAY_MODE(0x7);
+ fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
}
@@ -2745,7 +2746,6 @@ static int dce_v6_0_early_init(struct amdgpu_ip_block *ip_block)
static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- bool ret;
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
@@ -2789,8 +2789,7 @@ static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
- if (ret)
+ if (amdgpu_atombios_get_connector_info_from_object_table(adev))
amdgpu_display_print_display_setup(adev_to_drm(adev));
else
return -EINVAL;
@@ -2986,12 +2985,12 @@ static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
interrupt_mask = RREG32(mmINT_MASK + reg_block);
- interrupt_mask &= ~VBLANK_INT_MASK;
+ interrupt_mask &= ~INT_MASK__VBLANK_INT_MASK;
WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
case AMDGPU_IRQ_STATE_ENABLE:
interrupt_mask = RREG32(mmINT_MASK + reg_block);
- interrupt_mask |= VBLANK_INT_MASK;
+ interrupt_mask |= INT_MASK__VBLANK_INT_MASK;
WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
default:
@@ -3006,28 +3005,28 @@ static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
}
-static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_hpd_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
- unsigned type,
+ unsigned hpd,
enum amdgpu_interrupt_state state)
{
u32 dc_hpd_int_cntl;
- if (type >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", type);
+ if (hpd >= adev->mode_info.num_hpd) {
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
- dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
- dc_hpd_int_cntl |= DC_HPDx_INT_EN;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl);
break;
default:
break;
@@ -3036,7 +3035,7 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_crtc_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3096,7 +3095,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
switch (entry->src_data[0]) {
case 0: /* vblank */
if (disp_int & interrupt_status_offsets[crtc].vblank)
- WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
+ WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_STATUS__VBLANK_ACK_MASK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
@@ -3107,7 +3106,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
break;
case 1: /* vline */
if (disp_int & interrupt_status_offsets[crtc].vline)
- WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
+ WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_STATUS__VLINE_ACK_MASK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
@@ -3121,7 +3120,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_pageflip_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3172,7 +3171,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
- if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
@@ -3249,12 +3248,10 @@ static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.set_powergating_state = dce_v6_0_set_powergating_state,
};
-static void
-dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
+static void dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
@@ -3274,7 +3271,6 @@ dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
{
-
struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -3314,7 +3310,6 @@ static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
{
-
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -3325,7 +3320,6 @@ static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
{
-
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig;
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
@@ -3541,17 +3535,17 @@ static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
- .set = dce_v6_0_set_crtc_interrupt_state,
+ .set = dce_v6_0_set_crtc_irq_state,
.process = dce_v6_0_crtc_irq,
};
static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
- .set = dce_v6_0_set_pageflip_interrupt_state,
+ .set = dce_v6_0_set_pageflip_irq_state,
.process = dce_v6_0_pageflip_irq,
};
static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
- .set = dce_v6_0_set_hpd_interrupt_state,
+ .set = dce_v6_0_set_hpd_irq_state,
.process = dce_v6_0_hpd_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 07358546581f..e62ccf9eb73d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -271,7 +271,7 @@ static void dce_v8_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
@@ -3021,7 +3021,7 @@ static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
}
}
-static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+static int dce_v8_0_set_hpd_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3029,7 +3029,7 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
u32 dc_hpd_int_cntl;
if (type >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", type);
+ DRM_DEBUG("invalid hpd %d\n", type);
return 0;
}
@@ -3051,7 +3051,7 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+static int dce_v8_0_set_crtc_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3136,7 +3136,7 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+static int dce_v8_0_set_pageflip_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -3547,17 +3547,17 @@ static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
- .set = dce_v8_0_set_crtc_interrupt_state,
+ .set = dce_v8_0_set_crtc_irq_state,
.process = dce_v8_0_crtc_irq,
};
static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
- .set = dce_v8_0_set_pageflip_interrupt_state,
+ .set = dce_v8_0_set_pageflip_irq_state,
.process = dce_v8_0_pageflip_irq,
};
static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
- .set = dce_v8_0_set_hpd_interrupt_state,
+ .set = dce_v8_0_set_hpd_irq_state,
.process = dce_v8_0_hpd_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 23e6a05359c2..75ea071744eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -368,11 +368,6 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_10_1[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_DEBUG_INST_ADDR),
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_LX6_CORE_PDEBUG_INST),
/* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME2_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_MES_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0),
@@ -421,7 +416,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_10[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_WG_STATE_OFFSET),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_STATUS)
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_STATUS),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
};
static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_10[] = {
@@ -448,7 +452,32 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_10[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_MQD_BASE_ADDR),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_MQD_BASE_ADDR_HI),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI)
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI),
+ /* gfx header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
};
static const struct soc15_reg_golden golden_settings_gc_10_1[] = {
@@ -4296,9 +4325,7 @@ static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
int ctx_reg_offset;
if (adev->gfx.rlc.cs_data == NULL)
@@ -4306,39 +4333,15 @@ static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
-
- ctx_reg_offset =
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
+ ctx_reg_offset = SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(ctx_reg_offset);
buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev)
@@ -4752,6 +4755,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
int i, j, k, r, ring_id = 0;
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
@@ -4763,7 +4767,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(10, 1, 4):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 8;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
@@ -4778,7 +4782,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(10, 3, 7):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 2;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 2;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 4;
@@ -4800,7 +4804,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->gfx.cleaner_shader_size = sizeof(gfx_10_1_10_cleaner_shader_hex);
if (adev->gfx.me_fw_version >= 101 &&
adev->gfx.pfp_fw_version >= 158 &&
- adev->gfx.mec_fw_version >= 152) {
+ adev->gfx.mec_fw_version >= 151) {
adev->gfx.enable_cleaner_shader = true;
r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
if (r) {
@@ -4810,7 +4814,9 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
}
break;
case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
@@ -4826,6 +4832,34 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
+ case IP_VERSION(10, 3, 6):
+ adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_3_0_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 14 &&
+ adev->gfx.pfp_fw_version >= 17 &&
+ adev->gfx.mec_fw_version >= 24) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ case IP_VERSION(10, 3, 7):
+ adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_3_0_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 4 &&
+ adev->gfx.pfp_fw_version >= 9 &&
+ adev->gfx.mec_fw_version >= 12) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -4886,7 +4920,7 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
/* set up the gfx ring */
for (i = 0; i < adev->gfx.me.num_me; i++) {
- for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
+ for (j = 0; j < num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
continue;
@@ -9645,9 +9679,14 @@ static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printe
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p, "%-50s \t 0x%08x\n",
- gc_cp_reg_list_10[reg].reg_name,
- adev->gfx.ip_dump_compute_queues[index + reg]);
+ if (i && gc_cp_reg_list_10[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ "mmCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues[index + reg]);
+ else
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_10[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues[index + reg]);
}
index += reg_count;
}
@@ -9708,9 +9747,13 @@ static void gfx_v10_ip_dump(struct amdgpu_ip_block *ip_block)
nv_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0);
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues[index + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET(
- gc_cp_reg_list_10[reg]));
+ if (i && gc_cp_reg_list_10[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gc_cp_reg_list_10[reg]));
}
index += reg_count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
index 5255378af53c..f67569ccf9f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
@@ -43,9 +43,9 @@ static const u32 gfx_10_1_10_cleaner_shader_hex[] = {
0xd70f6a01, 0x000202ff,
0x00000400, 0x80828102,
0xbf84fff7, 0xbefc03ff,
- 0x00000068, 0xbe803080,
- 0xbe813080, 0xbe823080,
- 0xbe833080, 0x80fc847c,
+ 0x00000068, 0xbe803000,
+ 0xbe813000, 0xbe823000,
+ 0xbe833000, 0x80fc847c,
0xbf84fffa, 0xbeea0480,
0xbeec0480, 0xbeee0480,
0xbef00480, 0xbef20480,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm
index 9ba3359253c9..54f7ed9e2801 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm
@@ -40,7 +40,6 @@ shader main
type(CS)
wave_size(32)
// Note: original source code from SQ team
-
//
// Create 32 waves in a threadgroup (CS waves)
// Each allocates 64 VGPRs
@@ -71,8 +70,8 @@ label_0005:
s_sub_u32 s2, s2, 8
s_cbranch_scc0 label_0005
//
- s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
- s_and_b32 s2, s2, s0 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
+ s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
+ s_and_b32 s2, s2, s1 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
s_cbranch_scc0 label_0023 // Clean LDS if its first wave of ThreadGroup/WorkGroup
// CLEAR LDS
//
@@ -99,10 +98,10 @@ label_001F:
label_0023:
s_mov_b32 m0, 0x00000068 // Loop 108/4=27 times (loop unrolled for performance)
label_sgpr_loop:
- s_movreld_b32 s0, 0
- s_movreld_b32 s1, 0
- s_movreld_b32 s2, 0
- s_movreld_b32 s3, 0
+ s_movreld_b32 s0, s0
+ s_movreld_b32 s1, s0
+ s_movreld_b32 s2, s0
+ s_movreld_b32 s3, s0
s_sub_u32 m0, m0, 4
s_cbranch_scc0 label_sgpr_loop
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 2a5c2a1ae3c7..ec9b84f92d46 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -48,6 +48,8 @@
#include "gfx_v11_0_3.h"
#include "nbio_v4_3.h"
#include "mes_v11_0.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
#define GFX11_NUM_GFX_RINGS 1
#define GFX11_MEC_HPD_SIZE 2048
@@ -83,6 +85,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_kicker.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
@@ -177,9 +180,13 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_11_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_INSTR_PNTR),
SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
/* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
@@ -230,7 +237,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_11[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
};
static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = {
@@ -259,7 +275,24 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_11[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
};
static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
@@ -580,33 +613,18 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t padding, offset;
-
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- padding = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
-
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
- *cpu_ptr = cpu_to_le32(0xCAFEDEAD);
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r)
- return r;
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
- cpu_ptr = &adev->wb.wb[index];
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ cpu_ptr = &adev->wb.wb[index];
- r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err1;
- }
+ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
@@ -633,12 +651,10 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
else
r = -EINVAL;
err2:
- if (!ring->is_mes_queue)
- amdgpu_ib_free(&ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -744,6 +760,10 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
AMDGPU_UCODE_REQUIRED,
"amdgpu/gc_11_0_0_rlc_1.bin");
+ else if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_rlc_kicker.bin", ucode_prefix);
else
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
AMDGPU_UCODE_REQUIRED,
@@ -833,9 +853,7 @@ static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
int ctx_reg_offset;
if (adev->gfx.rlc.cs_data == NULL)
@@ -843,39 +861,15 @@ static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
- ctx_reg_offset =
- SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
+ ctx_reg_offset = SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(ctx_reg_offset);
buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev)
@@ -1056,14 +1050,21 @@ static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
#define MQD_FWWORKAREA_SIZE 484
#define MQD_FWWORKAREA_ALIGNMENT 256
-static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+static void gfx_v11_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev,
struct amdgpu_gfx_shadow_info *shadow_info)
{
- if (adev->gfx.cp_gfx_shadow) {
- shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
- shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
- shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
- shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+ shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
+ shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
+ shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
+ shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+}
+
+static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check)
+{
+ if (adev->gfx.cp_gfx_shadow || skip_check) {
+ gfx_v11_0_get_gfx_shadow_info_nocheck(adev, shadow_info);
return 0;
} else {
memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
@@ -1136,6 +1137,10 @@ static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
+ if (adev->gfx.disable_kq) {
+ ring->no_scheduler = true;
+ ring->no_user_submission = true;
+ }
if (!ring_id)
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
@@ -1568,24 +1573,18 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- int i, j, k, r, ring_id = 0;
+ int i, j, k, r, ring_id;
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
- adev->gfx.me.num_me = 1;
- adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
- adev->gfx.mec.num_mec = 1;
- adev->gfx.mec.num_pipe_per_mec = 4;
- adev->gfx.mec.num_queue_per_pipe = 4;
- break;
- case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
@@ -1593,7 +1592,7 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 5, 3):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 2;
adev->gfx.mec.num_mec = 1;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 4;
@@ -1612,6 +1611,35 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
+ if (!adev->gfx.disable_uq &&
+ adev->gfx.me_fw_version >= 2390 &&
+ adev->gfx.pfp_fw_version >= 2530 &&
+ adev->gfx.mec_fw_version >= 2600 &&
+ adev->mes.fw_version[0] >= 120) {
+ adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+ adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
+ }
+ break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
+ /* add firmware version checks here */
+ if (0 && !adev->gfx.disable_uq) {
+ adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+ adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
if (adev->gfx.me_fw_version >= 2280 &&
@@ -1640,6 +1668,34 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
+ case IP_VERSION(11, 5, 2):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 12 &&
+ adev->gfx.pfp_fw_version >= 15 &&
+ adev->gfx.mec_fw_version >= 15) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ case IP_VERSION(11, 5, 3):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 7 &&
+ adev->gfx.pfp_fw_version >= 8 &&
+ adev->gfx.mec_fw_version >= 8) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -1701,37 +1757,42 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* set up the gfx ring */
- for (i = 0; i < adev->gfx.me.num_me; i++) {
- for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
- if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
- continue;
-
- r = gfx_v11_0_gfx_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
- ring_id++;
+ if (adev->gfx.num_gfx_rings) {
+ ring_id = 0;
+ /* set up the gfx ring */
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
+ if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
+ continue;
+
+ r = gfx_v11_0_gfx_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
+ ring_id++;
+ }
}
}
}
- ring_id = 0;
- /* set up the compute queues - allocate horizontally across pipes */
- for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
- for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
- k, j))
- continue;
+ if (adev->gfx.num_compute_rings) {
+ ring_id = 0;
+ /* set up the compute queues - allocate horizontally across pipes */
+ for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
+ for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
+ k, j))
+ continue;
- r = gfx_v11_0_compute_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
+ r = gfx_v11_0_compute_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
- ring_id++;
+ ring_id++;
+ }
}
}
}
@@ -4061,6 +4122,8 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
#endif
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -4081,6 +4144,16 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
/* active the queue */
mqd->cp_gfx_hqd_active = 1;
+ /* set gfx UQ items */
+ mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr);
+ mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr);
+ mqd->gds_bkup_base_lo = lower_32_bits(prop->gds_bkup_addr);
+ mqd->gds_bkup_base_hi = upper_32_bits(prop->gds_bkup_addr);
+ mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr);
+ mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr);
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -4205,6 +4278,8 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
prop->allow_tunneling);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
/* set the wb address whether it's enabled or not */
@@ -4256,6 +4331,10 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_active = prop->hqd_active;
+ /* set UQ fenceaddress */
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -4509,11 +4588,23 @@ static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
return r;
}
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
+ if (adev->gfx.disable_kq) {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ /* we don't want to set ring->ready */
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ return r;
+ }
+ if (amdgpu_async_gfx_ring)
+ amdgpu_gfx_disable_kgq(adev, 0);
+ } else {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
}
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
@@ -4722,6 +4813,49 @@ static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+static int gfx_v11_0_set_userq_eop_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int m, p, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) {
+ for (m = 0; m < adev->gfx.me.num_me; m++) {
+ for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) {
+ irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) {
+ for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
+ for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ + (m * adev->gfx.mec.num_pipe_per_mec)
+ + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -4731,9 +4865,11 @@ static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
+ gfx_v11_0_set_userq_eop_interrupts(adev, false);
if (!adev->no_hw_access) {
- if (amdgpu_async_gfx_ring) {
+ if (amdgpu_async_gfx_ring &&
+ !adev->gfx.disable_kq) {
if (amdgpu_gfx_disable_kgq(adev, 0))
DRM_ERROR("KGQ disable failed\n");
}
@@ -5059,11 +5195,36 @@ static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = true;
+ break;
+ case 1:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = false;
+ break;
+ case 2:
+ adev->gfx.disable_kq = true;
+ adev->gfx.disable_uq = false;
+ break;
+ }
+
adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
- adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
- adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
- AMDGPU_MAX_COMPUTE_RINGS);
+ if (adev->gfx.disable_kq) {
+ /* We need one GFX ring temporarily to set up
+ * the clear state.
+ */
+ adev->gfx.num_gfx_rings = 1;
+ adev->gfx.num_compute_rings = 0;
+ } else {
+ adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
+ adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
+ AMDGPU_MAX_COMPUTE_RINGS);
+ }
gfx_v11_0_set_kiq_pm4_funcs(adev);
gfx_v11_0_set_ring_funcs(adev);
@@ -5094,6 +5255,11 @@ static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block)
r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
if (r)
return r;
+
+ r = gfx_v11_0_set_userq_eop_interrupts(adev, true);
+ if (r)
+ return r;
+
return 0;
}
@@ -5691,10 +5857,6 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
}
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x400000;
-
amdgpu_ring_write(ring, header);
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
@@ -5714,10 +5876,6 @@ static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x40000000;
-
/* Currently, there is a high possibility to get wave ID mismatch
* between ME and GDS, leading to a hw deadlock, because ME generates
* different wave IDs than the GDS expects. This situation happens
@@ -5775,8 +5933,7 @@ static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
- amdgpu_ring_write(ring, ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
+ amdgpu_ring_write(ring, 0);
}
static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
@@ -5804,10 +5961,7 @@ static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- if (ring->is_mes_queue)
- gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
- else
- amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* compute doesn't have PFP */
if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
@@ -6036,28 +6190,13 @@ static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
void *de_payload_cpu_addr;
int cnt;
- if (ring->is_mes_queue) {
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v10_gfx_meta_data, de_payload);
- de_payload_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- de_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gds_backup) +
- offsetof(struct v10_gfx_meta_data, de_payload);
- gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- } else {
- offset = offsetof(struct v10_gfx_meta_data, de_payload);
- de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
- de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
+ offset = offsetof(struct v10_gfx_meta_data, de_payload);
+ de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
- gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
- AMDGPU_CSA_SIZE - adev->gds.gds_size,
- PAGE_SIZE);
- }
+ gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
+ AMDGPU_CSA_SIZE - adev->gds.gds_size,
+ PAGE_SIZE);
de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
@@ -6296,25 +6435,23 @@ static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- int i;
+ u32 doorbell_offset = entry->src_data[0];
u8 me_id, pipe_id, queue_id;
struct amdgpu_ring *ring;
- uint32_t mes_queue_id = entry->src_data[0];
+ int i;
DRM_DEBUG("IH: CP EOP\n");
- if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
- struct amdgpu_mes_queue *queue;
-
- mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
+ if (adev->enable_mes && doorbell_offset) {
+ struct amdgpu_userq_fence_driver *fence_drv = NULL;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long flags;
- spin_lock(&adev->mes.queue_id_lock);
- queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
- if (queue) {
- DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
- amdgpu_fence_process(queue->ring);
- }
- spin_unlock(&adev->mes.queue_id_lock);
+ xa_lock_irqsave(xa, flags);
+ fence_drv = xa_load(xa, doorbell_offset);
+ if (fence_drv)
+ amdgpu_userq_fence_driver_process(fence_drv);
+ xa_unlock_irqrestore(xa, flags);
} else {
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
@@ -6481,27 +6618,29 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
pipe_id = (entry->ring_id & 0x03) >> 0;
queue_id = (entry->ring_id & 0x70) >> 4;
- switch (me_id) {
- case 0:
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
- }
- break;
- case 1:
- case 2:
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
+ if (!adev->gfx.disable_kq) {
+ switch (me_id) {
+ case 0:
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ default:
+ BUG();
+ break;
}
- break;
- default:
- BUG();
- break;
}
}
@@ -6609,6 +6748,69 @@ static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
+static bool gfx_v11_pipe_reset_support(struct amdgpu_device *adev)
+{
+ /* Disable the pipe reset until the CPFW fully support it.*/
+ dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n");
+ return false;
+}
+
+
+static int gfx_v11_reset_gfx_pipe(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
+ int r;
+
+ if (!gfx_v11_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v11_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 0);
+ break;
+ default:
+ break;
+ }
+
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe);
+
+ r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v11_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe reset to the ME firmware start PC: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /* FIXME: Sometimes driver can't cache the ME firmware start PC correctly,
+ * so the pipe reset status relies on the later gfx ring test result.
+ */
+ return 0;
+}
+
static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -6618,8 +6820,13 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return -EINVAL;
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
- if (r)
- return r;
+ if (r) {
+
+ dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
+ r = gfx_v11_reset_gfx_pipe(ring);
+ if (r)
+ return r;
+ }
r = gfx_v11_0_kgq_init_queue(ring, true);
if (r) {
@@ -6636,6 +6843,136 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return amdgpu_ring_test_ring(ring);
}
+static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring)
+{
+
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
+ int r;
+
+ if (!gfx_v11_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v11_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
+ clean_pipe = reset_pipe;
+
+ if (adev->gfx.rs64_enable) {
+
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe);
+ r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ } else {
+ if (ring->me == 1) {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ /* mec1 fw pc: CP_MEC1_INSTR_PNTR */
+ } else {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ /* mec2 fw pc: CP:CP_MEC2_INSTR_PNTR */
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe);
+ r = RREG32(SOC15_REG_OFFSET(GC, 0, regCP_MEC1_INSTR_PNTR));
+ }
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v11_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe resets to MEC FW start PC: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /*FIXME:Sometimes driver can't cache the MEC firmware start PC correctly, so the pipe
+ * reset status relies on the compute ring test result.
+ */
+ return 0;
+}
+
static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -6646,8 +6983,10 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
- dev_err(adev->dev, "reset via MMIO failed %d\n", r);
- return r;
+ dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
+ r = gfx_v11_0_reset_compute_pipe(ring);
+ if (r)
+ return r;
}
r = gfx_v11_0_kcq_init_queue(ring, true);
@@ -6693,9 +7032,14 @@ static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printe
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p, "%-50s \t 0x%08x\n",
- gc_cp_reg_list_11[reg].reg_name,
- adev->gfx.ip_dump_compute_queues[index + reg]);
+ if (i && gc_cp_reg_list_11[reg].reg_offset == regCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ "regCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues[index + reg]);
+ else
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_11[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues[index + reg]);
}
index += reg_count;
}
@@ -6755,9 +7099,16 @@ static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block)
/* ME0 is for GFX so start from 1 for CP */
soc21_grbm_select(adev, adev->gfx.me.num_me + i, j, k, 0);
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues[index + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET(
- gc_cp_reg_list_11[reg]));
+ if (i &&
+ gc_cp_reg_list_11[reg].reg_offset ==
+ regCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, 0,
+ regCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gc_cp_reg_list_11[reg]));
}
index += reg_count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 62a257a4a3e9..1234c8d64e20 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -36,7 +36,7 @@
#include "gc/gc_12_0_0_offset.h"
#include "gc/gc_12_0_0_sh_mask.h"
#include "soc24_enum.h"
-#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
+#include "ivsrcid/gfx/irqsrcs_gfx_12_0_0.h"
#include "soc15.h"
#include "clearstate_gfx12.h"
@@ -44,6 +44,8 @@
#include "gfx_v12_0.h"
#include "nbif_v6_3_1.h"
#include "mes_v12_0.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
#define GFX12_NUM_GFX_RINGS 1
#define GFX12_MEC_HPD_SIZE 2048
@@ -133,11 +135,14 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR0),
SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_RS64_INSTR_PNTR1),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_RS64_INSTR_PNTR),
-
/* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MES_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
@@ -186,7 +191,16 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_12[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_SUSPEND_WG_STATE_OFFSET),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_STATUS),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
};
static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = {
@@ -215,7 +229,24 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BASE_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_CMD_BUFSZ),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ),
+ /* cp header registers */
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_ME_HEADER_DUMP),
};
static const struct soc15_reg_golden golden_settings_gc_12_0_rev0[] = {
@@ -475,33 +506,18 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t padding, offset;
-
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- padding = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
-
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
- *cpu_ptr = cpu_to_le32(0xCAFEDEAD);
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r)
- return r;
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
- cpu_ptr = &adev->wb.wb[index];
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ cpu_ptr = &adev->wb.wb[index];
- r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
- goto err1;
- }
+ r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
@@ -528,12 +544,10 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
else
r = -EINVAL;
err2:
- if (!ring->is_mes_queue)
- amdgpu_ib_free(&ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -881,6 +895,34 @@ static void gfx_v12_0_select_me_pipe_q(struct amdgpu_device *adev,
soc24_grbm_select(adev, me, pipe, q, vm);
}
+/* all sizes are in bytes */
+#define MQD_SHADOW_BASE_SIZE 73728
+#define MQD_SHADOW_BASE_ALIGNMENT 256
+#define MQD_FWWORKAREA_SIZE 484
+#define MQD_FWWORKAREA_ALIGNMENT 256
+
+static void gfx_v12_0_get_gfx_shadow_info_nocheck(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info)
+{
+ shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
+ shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
+ shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
+ shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+}
+
+static int gfx_v12_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check)
+{
+ if (adev->gfx.cp_gfx_shadow || skip_check) {
+ gfx_v12_0_get_gfx_shadow_info_nocheck(adev, shadow_info);
+ return 0;
+ }
+
+ memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
+ return -EINVAL;
+}
+
static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v12_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v12_0_select_se_sh,
@@ -889,6 +931,7 @@ static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = {
.read_wave_vgprs = &gfx_v12_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v12_0_select_me_pipe_q,
.update_perfmon_mgcg = &gfx_v12_0_update_perf_clk,
+ .get_gfx_shadow_info = &gfx_v12_0_get_gfx_shadow_info,
};
static int gfx_v12_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1346,6 +1389,7 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
unsigned num_compute_rings;
int xcc_id = 0;
struct amdgpu_device *adev = ip_block->adev;
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
INIT_DELAYED_WORK(&adev->gfx.idle_work, amdgpu_gfx_profile_idle_work_handler);
@@ -1354,7 +1398,7 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(12, 0, 1):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
- adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.me.num_queue_per_pipe = 8;
adev->gfx.mec.num_mec = 1;
adev->gfx.mec.num_pipe_per_mec = 2;
adev->gfx.mec.num_queue_per_pipe = 4;
@@ -1372,6 +1416,22 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
+ if (!adev->gfx.disable_uq &&
+ adev->gfx.me_fw_version >= 2780 &&
+ adev->gfx.pfp_fw_version >= 2840 &&
+ adev->gfx.mec_fw_version >= 3050 &&
+ adev->mes.fw_version[0] >= 123) {
+ adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+ adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
if (adev->gfx.me_fw_version >= 2480 &&
adev->gfx.pfp_fw_version >= 2530 &&
adev->gfx.mec_fw_version >= 2680 &&
@@ -1383,36 +1443,38 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
break;
}
- /* recalculate compute rings to use based on hardware configuration */
- num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
- adev->gfx.mec.num_queue_per_pipe) / 2;
- adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
- num_compute_rings);
+ if (adev->gfx.num_compute_rings) {
+ /* recalculate compute rings to use based on hardware configuration */
+ num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe) / 2;
+ adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
+ num_compute_rings);
+ }
/* EOP Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
- GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
+ GFX_12_0_0__SRCID__CP_EOP_INTERRUPT,
&adev->gfx.eop_irq);
if (r)
return r;
/* Bad opcode Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
- GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR,
+ GFX_12_0_0__SRCID__CP_BAD_OPCODE_ERROR,
&adev->gfx.bad_op_irq);
if (r)
return r;
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
- GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
+ GFX_12_0_0__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
- GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
+ GFX_12_0_0__SRCID__CP_PRIV_INSTR_FAULT,
&adev->gfx.priv_inst_irq);
if (r)
return r;
@@ -1433,37 +1495,41 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* set up the gfx ring */
- for (i = 0; i < adev->gfx.me.num_me; i++) {
- for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
- if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
- continue;
-
- r = gfx_v12_0_gfx_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
- ring_id++;
+ if (adev->gfx.num_gfx_rings) {
+ /* set up the gfx ring */
+ for (i = 0; i < adev->gfx.me.num_me; i++) {
+ for (j = 0; j < num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
+ if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
+ continue;
+
+ r = gfx_v12_0_gfx_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
+ ring_id++;
+ }
}
}
}
- ring_id = 0;
- /* set up the compute queues - allocate horizontally across pipes */
- for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
- for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
- for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev,
- 0, i, k, j))
- continue;
+ if (adev->gfx.num_compute_rings) {
+ ring_id = 0;
+ /* set up the compute queues - allocate horizontally across pipes */
+ for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
+ for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev,
+ 0, i, k, j))
+ continue;
- r = gfx_v12_0_compute_ring_init(adev, ring_id,
- i, k, j);
- if (r)
- return r;
+ r = gfx_v12_0_compute_ring_init(adev, ring_id,
+ i, k, j);
+ if (r)
+ return r;
- ring_id++;
+ ring_id++;
+ }
}
}
}
@@ -2948,6 +3014,8 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
#endif
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -2968,6 +3036,14 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
/* active the queue */
mqd->cp_gfx_hqd_active = 1;
+ /* set gfx UQ items */
+ mqd->shadow_base_lo = lower_32_bits(prop->shadow_addr);
+ mqd->shadow_base_hi = upper_32_bits(prop->shadow_addr);
+ mqd->fw_work_area_base_lo = lower_32_bits(prop->csa_addr);
+ mqd->fw_work_area_base_hi = upper_32_bits(prop->csa_addr);
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -3091,6 +3167,8 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->tmz_queue)
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
/* set the wb address whether it's enabled or not */
@@ -3142,6 +3220,10 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_active = prop->hqd_active;
+ /* set UQ fenceaddress */
+ mqd->fence_address_lo = lower_32_bits(prop->fence_address);
+ mqd->fence_address_hi = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -3600,6 +3682,49 @@ static int gfx_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+static int gfx_v12_0_set_userq_eop_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int m, p, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_GFX]) {
+ for (m = 0; m < adev->gfx.me.num_me; m++) {
+ for (p = 0; p < adev->gfx.me.num_pipe_per_me; p++) {
+ irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_COMPUTE]) {
+ for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
+ for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ + (m * adev->gfx.mec.num_pipe_per_mec)
+ + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -3610,6 +3735,7 @@ static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
+ gfx_v12_0_set_userq_eop_interrupts(adev, false);
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
@@ -3698,11 +3824,33 @@ static int gfx_v12_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = true;
+ break;
+ case 1:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = false;
+ break;
+ case 2:
+ adev->gfx.disable_kq = true;
+ adev->gfx.disable_uq = false;
+ break;
+ }
+
adev->gfx.funcs = &gfx_v12_0_gfx_funcs;
- adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS;
- adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
- AMDGPU_MAX_COMPUTE_RINGS);
+ if (adev->gfx.disable_kq) {
+ adev->gfx.num_gfx_rings = 0;
+ adev->gfx.num_compute_rings = 0;
+ } else {
+ adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS;
+ adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
+ AMDGPU_MAX_COMPUTE_RINGS);
+ }
gfx_v12_0_set_kiq_pm4_funcs(adev);
gfx_v12_0_set_ring_funcs(adev);
@@ -3733,6 +3881,10 @@ static int gfx_v12_0_late_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = gfx_v12_0_set_userq_eop_interrupts(adev, true);
+ if (r)
+ return r;
+
return 0;
}
@@ -4172,45 +4324,17 @@ static u64 gfx_v12_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
static void gfx_v12_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size;
- uint64_t wptr_tmp;
-
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- ring->hw_prio);
-
- wptr_tmp = ring->wptr & ring->buf_mask;
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
- *wptr_saved = wptr_tmp;
- /* assume doorbell always being used by mes mapped queue */
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- } else {
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- if (*is_queue_unmap)
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- }
+ if (ring->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
+ ring->wptr);
+ WDOORBELL64(ring->doorbell_index, ring->wptr);
} else {
- if (ring->use_doorbell) {
- /* XXX check if swapping is necessary on BE */
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr);
- WDOORBELL64(ring->doorbell_index, ring->wptr);
- } else {
- WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
- lower_32_bits(ring->wptr));
- WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
- upper_32_bits(ring->wptr));
- }
+ WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
+ lower_32_bits(ring->wptr));
+ WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
+ upper_32_bits(ring->wptr));
}
}
@@ -4235,42 +4359,14 @@ static u64 gfx_v12_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
static void gfx_v12_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size;
- uint64_t wptr_tmp;
-
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- ring->hw_prio);
-
- wptr_tmp = ring->wptr & ring->buf_mask;
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
- *wptr_saved = wptr_tmp;
- /* assume doorbell always used by mes mapped queue */
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- } else {
- WDOORBELL64(ring->doorbell_index, wptr_tmp);
- if (*is_queue_unmap)
- WDOORBELL64(aggregated_db_index, wptr_tmp);
- }
+ /* XXX check if swapping is necessary on BE */
+ if (ring->use_doorbell) {
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
+ ring->wptr);
+ WDOORBELL64(ring->doorbell_index, ring->wptr);
} else {
- /* XXX check if swapping is necessary on BE */
- if (ring->use_doorbell) {
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr);
- WDOORBELL64(ring->doorbell_index, ring->wptr);
- } else {
- BUG(); /* only DOORBELL method supported on gfx12 now */
- }
+ BUG(); /* only DOORBELL method supported on gfx12 now */
}
}
@@ -4317,10 +4413,6 @@ static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
control |= ib->length_dw | (vmid << 24);
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x400000;
-
amdgpu_ring_write(ring, header);
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
@@ -4340,10 +4432,6 @@ static void gfx_v12_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
- if (ring->is_mes_queue)
- /* inherit vmid from mqd */
- control |= 0x40000000;
-
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
@@ -4383,8 +4471,7 @@ static void gfx_v12_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
- amdgpu_ring_write(ring, ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
+ amdgpu_ring_write(ring, 0);
}
static void gfx_v12_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
@@ -4412,10 +4499,7 @@ static void gfx_v12_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
static void gfx_v12_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- if (ring->is_mes_queue)
- gfx_v12_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
- else
- amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* compute doesn't have PFP */
if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
@@ -4749,25 +4833,23 @@ static int gfx_v12_0_eop_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- int i;
+ u32 doorbell_offset = entry->src_data[0];
u8 me_id, pipe_id, queue_id;
struct amdgpu_ring *ring;
- uint32_t mes_queue_id = entry->src_data[0];
+ int i;
DRM_DEBUG("IH: CP EOP\n");
- if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
- struct amdgpu_mes_queue *queue;
+ if (adev->enable_mes && doorbell_offset) {
+ struct amdgpu_userq_fence_driver *fence_drv = NULL;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long flags;
- mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
-
- spin_lock(&adev->mes.queue_id_lock);
- queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
- if (queue) {
- DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
- amdgpu_fence_process(queue->ring);
- }
- spin_unlock(&adev->mes.queue_id_lock);
+ xa_lock_irqsave(xa, flags);
+ fence_drv = xa_load(xa, doorbell_offset);
+ if (fence_drv)
+ amdgpu_userq_fence_driver_process(fence_drv);
+ xa_unlock_irqrestore(xa, flags);
} else {
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
@@ -4934,27 +5016,29 @@ static void gfx_v12_0_handle_priv_fault(struct amdgpu_device *adev,
pipe_id = (entry->ring_id & 0x03) >> 0;
queue_id = (entry->ring_id & 0x70) >> 4;
- switch (me_id) {
- case 0:
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
- }
- break;
- case 1:
- case 2:
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
+ if (!adev->gfx.disable_kq) {
+ switch (me_id) {
+ case 0:
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ default:
+ BUG();
+ break;
}
- break;
- default:
- BUG();
- break;
}
}
@@ -5160,6 +5244,69 @@ static void gfx_v12_ip_dump(struct amdgpu_ip_block *ip_block)
amdgpu_gfx_off_ctrl(adev, true);
}
+static bool gfx_v12_pipe_reset_support(struct amdgpu_device *adev)
+{
+ /* Disable the pipe reset until the CPFW fully support it.*/
+ dev_warn_once(adev->dev, "The CPFW hasn't support pipe reset yet.\n");
+ return false;
+}
+
+static int gfx_v12_reset_gfx_pipe(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
+ int r;
+
+ if (!gfx_v12_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v12_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE0_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 1);
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ PFP_PIPE1_RESET, 0);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_ME_CNTL,
+ ME_PIPE1_RESET, 0);
+ break;
+ default:
+ break;
+ }
+
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_ME_CNTL, clean_pipe);
+
+ r = (RREG32(SOC15_REG_OFFSET(GC, 0, regCP_GFX_RS64_INSTR_PNTR1)) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ soc24_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v12_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe reset: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /* Sometimes the ME start pc counter can't cache correctly, so the
+ * PC check only as a reference and pipe reset result rely on the
+ * later ring test.
+ */
+ return 0;
+}
+
static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -5170,8 +5317,10 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
if (r) {
- dev_err(adev->dev, "reset via MES failed %d\n", r);
- return r;
+ dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
+ r = gfx_v12_reset_gfx_pipe(ring);
+ if (r)
+ return r;
}
r = gfx_v12_0_kgq_init_queue(ring, true);
@@ -5189,6 +5338,89 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return amdgpu_ring_test_ring(ring);
}
+static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe = 0, clean_pipe = 0;
+ int r = 0;
+
+ if (!gfx_v12_pipe_reset_support(adev))
+ return -EOPNOTSUPP;
+
+ gfx_v12_0_set_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ reset_pipe = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
+ clean_pipe = reset_pipe;
+
+ if (adev->gfx.rs64_enable) {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE1_RESET, 0);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE2_RESET, 0);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_RS64_CNTL,
+ MEC_PIPE3_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, clean_pipe);
+ r = (RREG32_SOC15(GC, 0, regCP_MEC_RS64_INSTR_PNTR) << 2) -
+ RS64_FW_UC_START_ADDR_LO;
+ } else {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 0);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 1);
+ clean_pipe = REG_SET_FIELD(clean_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 0);
+ break;
+ default:
+ break;
+ }
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, reset_pipe);
+ WREG32_SOC15(GC, 0, regCP_MEC_CNTL, clean_pipe);
+ /* Doesn't find the F32 MEC instruction pointer register, and suppose
+ * the driver won't run into the F32 mode.
+ */
+ }
+
+ soc24_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v12_0_unset_safe_mode(adev, 0);
+
+ dev_info(adev->dev, "The ring %s pipe resets: %s\n", ring->name,
+ r == 0 ? "successfully" : "failed");
+ /* Need the ring test to verify the pipe reset result.*/
+ return 0;
+}
+
static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -5199,8 +5431,10 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
- dev_err(adev->dev, "reset via MMIO failed %d\n", r);
- return r;
+ dev_warn(adev->dev, "fail(%d) to reset kcq and try pipe reset\n", r);
+ r = gfx_v12_0_reset_compute_pipe(ring);
+ if (r)
+ return r;
}
r = gfx_v12_0_kcq_init_queue(ring, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 13fbee46417a..70d7a1f434c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -53,6 +53,9 @@
#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
+#define GFX6_NUM_GFX_RINGS 1
+#define GFX6_NUM_COMPUTE_RINGS 2
+
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
@@ -1732,10 +1735,14 @@ static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
gfx_v6_0_get_cu_info(adev);
gfx_v6_0_config_init(adev);
- WREG32(mmCP_QUEUE_THRESHOLDS, ((0x16 << CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT) |
- (0x2b << CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT)));
- WREG32(mmCP_MEQ_THRESHOLDS, (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
- (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
+ WREG32(mmCP_QUEUE_THRESHOLDS,
+ ((0x16 << CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT) |
+ (0x2b << CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT)));
+
+ /* set HW defaults for 3D engine */
+ WREG32(mmCP_MEQ_THRESHOLDS,
+ (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
+ (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
sx_debug_1 = RREG32(mmSX_DEBUG_1);
WREG32(mmSX_DEBUG_1, sx_debug_1);
@@ -2851,44 +2858,21 @@ static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v6_0_init_pg(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 8181bd0e4f18..da0534ff1271 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -55,6 +55,9 @@
#define GFX7_NUM_GFX_RINGS 1
#define GFX7_MEC_HPD_SIZE 2048
+#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
+#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
+
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
@@ -3882,67 +3885,22 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- buffer[count++] = cpu_to_le32(0x16000012);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_KAVERI:
- buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_KABINI:
- case CHIP_MULLINS:
- buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- case CHIP_HAWAII:
- buffer[count++] = cpu_to_le32(0x3a00161a);
- buffer[count++] = cpu_to_le32(0x0000002e);
- break;
- default:
- buffer[count++] = cpu_to_le32(0x00000000);
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- }
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
+ buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index bfedd487efc5..5ee2237d8ee8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1223,48 +1223,22 @@ out:
static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
- PACKET3_SET_CONTEXT_REG_START);
+ buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d7db4cb907ae..d377a7c57d5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -225,17 +225,36 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_9[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_SAFE_MODE),
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_INT_STAT),
SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_GENERAL_6),
- /* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME2_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0),
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE1),
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE2),
- SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3)
+ SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3),
+ /* packet headers */
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP)
};
static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = {
@@ -277,6 +296,14 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_LO),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_HI),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GFX_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP)
};
enum ta_ras_gfx_subblock {
@@ -1624,42 +1651,16 @@ static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
+ u32 count = 0;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index -
- PACKET3_SET_CONTEXT_REG_START);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
- }
- }
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
+ count = amdgpu_gfx_csb_preamble_start(buffer);
+ count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
+ amdgpu_gfx_csb_preamble_end(buffer, count);
}
static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
@@ -5441,16 +5442,8 @@ static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring,
payload_size = sizeof(struct v9_ce_ib_state);
- if (ring->is_mes_queue) {
- payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
- } else {
- payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
- }
+ payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
+ ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size);
@@ -5473,16 +5466,8 @@ static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring,
payload_size = sizeof(struct v9_de_ib_state);
- if (ring->is_mes_queue) {
- payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
- } else {
- payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
- }
+ payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status =
IB_COMPLETION_STATUS_PREEMPTED;
@@ -5672,19 +5657,9 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
- if (ring->is_mes_queue) {
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ce_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- } else {
- offset = offsetof(struct v9_gfx_meta_data, ce_payload);
- ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
- ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
- }
+ offset = offsetof(struct v9_gfx_meta_data, ce_payload);
+ ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
+ ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
@@ -5770,28 +5745,13 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bo
void *de_payload_cpu_addr;
int cnt;
- if (ring->is_mes_queue) {
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gfx_meta_data) +
- offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_gpu_addr =
- amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- de_payload_cpu_addr =
- amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = offsetof(struct amdgpu_mes_ctx_meta_data,
- gfx[0].gds_backup) +
- offsetof(struct v9_gfx_meta_data, de_payload);
- gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- } else {
- offset = offsetof(struct v9_gfx_meta_data, de_payload);
- de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
- de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
+ offset = offsetof(struct v9_gfx_meta_data, de_payload);
+ de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
+ de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
- gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
- AMDGPU_CSA_SIZE - adev->gds.gds_size,
- PAGE_SIZE);
- }
+ gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
+ AMDGPU_CSA_SIZE - adev->gds.gds_size,
+ PAGE_SIZE);
if (usegds) {
de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
@@ -7339,9 +7299,14 @@ static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p, "%-50s \t 0x%08x\n",
- gc_cp_reg_list_9[reg].reg_name,
- adev->gfx.ip_dump_compute_queues[index + reg]);
+ if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ "mmCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues[index + reg]);
+ else
+ drm_printf(p, "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_9[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues[index + reg]);
}
index += reg_count;
}
@@ -7378,9 +7343,13 @@ static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
soc15_grbm_select(adev, 1 + i, j, k, 0, 0);
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues[index + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET(
- gc_cp_reg_list_9[reg]));
+ if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues[index + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET(
+ gc_cp_reg_list_9[reg]));
}
index += reg_count;
}
@@ -7394,8 +7363,14 @@ static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
/* Emit the cleaner shader */
- amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ else
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER_9_0, 0));
+
amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index d81449f9d822..c48cd47b531f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -1547,7 +1547,7 @@ static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev,
{
uint32_t bank, way, mem;
static const char * const vml2_way_str[] = { "BIGK", "4K" };
- static const char * const utcl2_rounter_str[] = { "VMC", "APT" };
+ static const char * const utcl2_router_str[] = { "VMC", "APT" };
mem = instance % blk->num_mem_blocks;
way = (instance / blk->num_mem_blocks) % blk->num_ways;
@@ -1568,7 +1568,7 @@ static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev,
dev_info(
adev->dev,
"GFX SubBlock UTCL2_ROUTER_IFIF%d_GROUP0_%s, SED %d, DED %d\n",
- bank, utcl2_rounter_str[mem], sec_cnt, ded_cnt);
+ bank, utcl2_router_str[mem], sec_cnt, ded_cnt);
break;
case ATC_L2_CACHE_2M:
dev_info(
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 53fbf6ca7cdb..c233edf60569 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -105,9 +105,6 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
- /* cp header registers */
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
- SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
@@ -154,6 +151,14 @@ static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
+ SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
};
struct amdgpu_gfx_ras gfx_v9_4_3_ras;
@@ -1148,6 +1153,12 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
break;
+ case IP_VERSION(9, 5, 0):
+ if (adev->gfx.mec_fw_version >= 21) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
+ }
+ break;
default:
break;
}
@@ -1262,6 +1273,22 @@ static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
}
}
+/* For ASICs that needs xnack chain and MEC version supports, set SG_CONFIG1
+ * DISABLE_XNACK_CHECK_IN_RETRY_DISABLE bit and inform KFD to set xnack_chain
+ * bit in SET_RESOURCES
+ */
+static void gfx_v9_4_3_xcc_init_sq(struct amdgpu_device *adev, int xcc_id)
+{
+ uint32_t data;
+
+ if (!(adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN))
+ return;
+
+ data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_CONFIG1);
+ data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1);
+ WREG32_SOC15(GC, xcc_id, regSQ_CONFIG1, data);
+}
+
static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
int xcc_id)
{
@@ -1306,6 +1333,7 @@ static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
+ gfx_v9_4_3_xcc_init_sq(adev, xcc_id);
}
static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
@@ -1318,6 +1346,20 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
adev->gfx.config.db_debug2 =
RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ /* ToDo: GC 9.4.4 */
+ case IP_VERSION(9, 4, 3):
+ if (adev->gfx.mec_fw_version >= 184)
+ adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
+ break;
+ case IP_VERSION(9, 5, 0):
+ if (adev->gfx.mec_fw_version >= 23)
+ adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
+ break;
+ default:
+ break;
+ }
+
for (i = 0; i < num_xcc; i++)
gfx_v9_4_3_xcc_constants_init(adev, i);
}
@@ -3447,9 +3489,7 @@ static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
{
- /*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/
- if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
- adev->gfx.mec_fw_version >= 0x0000009b)
+ if (!!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
return true;
else
dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");
@@ -4558,12 +4598,21 @@ static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_pri
"\nxcc:%d mec:%d, pipe:%d, queue:%d\n",
xcc_id, i, j, k);
for (reg = 0; reg < reg_count; reg++) {
- drm_printf(p,
- "%-50s \t 0x%08x\n",
- gc_cp_reg_list_9_4_3[reg].reg_name,
- adev->gfx.ip_dump_compute_queues
- [xcc_offset + inst_offset +
- reg]);
+ if (i && gc_cp_reg_list_9_4_3[reg].reg_offset ==
+ regCP_MEC_ME1_HEADER_DUMP)
+ drm_printf(p,
+ "%-50s \t 0x%08x\n",
+ "regCP_MEC_ME2_HEADER_DUMP",
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset + inst_offset +
+ reg]);
+ else
+ drm_printf(p,
+ "%-50s \t 0x%08x\n",
+ gc_cp_reg_list_9_4_3[reg].reg_name,
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset + inst_offset +
+ reg]);
}
inst_offset += reg_count;
}
@@ -4612,12 +4661,20 @@ static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
GET_INST(GC, xcc_id));
for (reg = 0; reg < reg_count; reg++) {
- adev->gfx.ip_dump_compute_queues
- [xcc_offset +
- inst_offset + reg] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(
- gc_cp_reg_list_9_4_3[reg],
- GET_INST(GC, xcc_id)));
+ if (i && gc_cp_reg_list_9_4_3[reg].reg_offset ==
+ regCP_MEC_ME1_HEADER_DUMP)
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset +
+ inst_offset + reg] =
+ RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id),
+ regCP_MEC_ME2_HEADER_DUMP));
+ else
+ adev->gfx.ip_dump_compute_queues
+ [xcc_offset +
+ inst_offset + reg] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(
+ gc_cp_reg_list_9_4_3[reg],
+ GET_INST(GC, xcc_id)));
}
inst_offset += reg_count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 809b3a882d0d..a3e2787501f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -428,10 +428,6 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- /* MES fw manages IH_VMID_x_LUT updating */
- if (ring->is_mes_queue)
- return;
-
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index e74e26b6a4f2..72211409227b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -393,10 +393,6 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- /* MES fw manages IH_VMID_x_LUT updating */
- if (ring->is_mes_queue)
- return;
-
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
else
@@ -752,6 +748,18 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->gmc.vram_type = vram_type;
adev->gmc.vram_vendor = vram_vendor;
+ /* The mall_size is already calculated as mall_size_per_umc * num_umc.
+ * However, for gfx1151, which features a 2-to-1 UMC mapping,
+ * the result must be multiplied by 2 to determine the actual mall size.
+ */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(11, 5, 1):
+ adev->gmc.mall_size *= 2;
+ break;
+ default:
+ break;
+ }
+
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
@@ -832,7 +840,7 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.first_kfd_vmid = 8;
+ adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8;
amdgpu_vm_manager_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index c6f290704d47..b645d3e6a6c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -413,10 +413,6 @@ static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
- /* MES fw manages IH_VMID_x_LUT updating */
- if (ring->is_mes_queue)
- return;
-
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
else
@@ -820,7 +816,7 @@ static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.first_kfd_vmid = 8;
+ adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8;
amdgpu_vm_manager_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index a992e79d9581..8030fcd64210 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -249,7 +249,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
/* disable VGA render */
tmp = RREG32(mmVGA_RENDER_CONTROL);
- tmp &= ~VGA_VSTATUS_CNTL;
+ tmp &= VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK;
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
/* Update configuration */
@@ -627,17 +627,16 @@ static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
"write" : "read", block, mc_client, mc_id);
}
-/*
static const u32 mc_cg_registers[] = {
- MC_HUB_MISC_HUB_CG,
- MC_HUB_MISC_SIP_CG,
- MC_HUB_MISC_VM_CG,
- MC_XPB_CLK_GAT,
- ATC_MISC_CG,
- MC_CITF_MISC_WR_CG,
- MC_CITF_MISC_RD_CG,
- MC_CITF_MISC_VM_CG,
- VM_L2_CG,
+ mmMC_HUB_MISC_HUB_CG,
+ mmMC_HUB_MISC_SIP_CG,
+ mmMC_HUB_MISC_VM_CG,
+ mmMC_XPB_CLK_GAT,
+ mmATC_MISC_CG,
+ mmMC_CITF_MISC_WR_CG,
+ mmMC_CITF_MISC_RD_CG,
+ mmMC_CITF_MISC_VM_CG,
+ mmVM_L2_CG,
};
static const u32 mc_cg_ls_en[] = {
@@ -672,7 +671,7 @@ static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
data |= mc_cg_ls_en[i];
else
data &= ~mc_cg_ls_en[i];
@@ -689,7 +688,7 @@ static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
data |= mc_cg_en[i];
else
data &= ~mc_cg_en[i];
@@ -705,7 +704,7 @@ static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
orig = data = RREG32_PCIE(ixPCIE_CNTL2);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
@@ -728,7 +727,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
else
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
@@ -744,7 +743,7 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
orig = data = RREG32(mmHDP_MEM_POWER_LS);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
else
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
@@ -752,7 +751,6 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
if (orig != data)
WREG32(mmHDP_MEM_POWER_LS, data);
}
-*/
static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
{
@@ -1098,6 +1096,20 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
static int gmc_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool gate = false;
+
+ if (state == AMD_CG_STATE_GATE)
+ gate = true;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ gmc_v6_0_enable_mc_mgcg(adev, gate);
+ gmc_v6_0_enable_mc_ls(adev, gate);
+ }
+ gmc_v6_0_enable_bif_mgls(adev, gate);
+ gmc_v6_0_enable_hdp_mgcg(adev, gate);
+ gmc_v6_0_enable_hdp_ls(adev, gate);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 83e39f16044a..a8d5795084fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1157,17 +1157,10 @@ static bool gmc_v7_0_is_idle(struct amdgpu_ip_block *ip_block)
static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned int i;
- u32 tmp;
struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
- SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
- SRBM_STATUS__MCC_BUSY_MASK |
- SRBM_STATUS__MCD_BUSY_MASK |
- SRBM_STATUS__VMC_BUSY_MASK);
- if (!tmp)
+ if (gmc_v7_0_is_idle(ip_block))
return 0;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 5effe8327d29..282197f4ffb1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1213,10 +1213,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
if (uncached) {
mtype = MTYPE_UC;
} else if (ext_coherent) {
- if (gc_ip_version == IP_VERSION(9, 5, 0) || adev->rev_id)
- mtype = is_local ? MTYPE_CC : MTYPE_UC;
- else
- mtype = MTYPE_UC;
+ mtype = is_local ? MTYPE_CC : MTYPE_UC;
} else if (adev->flags & AMD_IS_APU) {
mtype = is_local ? mtype_local : MTYPE_NC;
} else {
@@ -1336,7 +1333,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
mtype_local = MTYPE_CC;
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype_local);
- } else if (adev->rev_id) {
+ } else {
/* MTYPE_UC case */
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
}
@@ -1505,7 +1502,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
- adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
adev->umc.ras = &umc_v12_0_ras;
break;
@@ -2075,6 +2071,9 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
adev->gmc.vram_width = 128 * 64;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
}
static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
@@ -2411,13 +2410,6 @@ static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
adev->gmc.flush_tlb_needs_extra_type_2 =
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) &&
adev->gmc.xgmi.num_physical_nodes;
- /*
- * TODO: This workaround is badly documented and had a buggy
- * implementation. We should probably verify what we do here.
- */
- adev->gmc.flush_tlb_needs_extra_type_0 =
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
- adev->rev_id == 0;
/* The sequence of these two function calls matters.*/
gmc_v9_0_init_golden_registers(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index cbbeadeb53f7..e6c0d86d3486 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -36,22 +36,6 @@
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
-static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- /* We just need to read back a register to post the write.
- * Reading back the remapped register causes problems on
- * some platforms so just read back the memory size register.
- */
- if (adev->nbio.funcs->get_memsize)
- adev->nbio.funcs->get_memsize(adev);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -185,7 +169,7 @@ struct amdgpu_hdp_ras hdp_v4_0_ras = {
};
const struct amdgpu_hdp_funcs hdp_v4_0_funcs = {
- .flush_hdp = hdp_v4_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.invalidate_hdp = hdp_v4_0_invalidate_hdp,
.update_clock_gating = hdp_v4_0_update_clock_gating,
.get_clock_gating_state = hdp_v4_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
index 086a647308df..8bc001dc9f63 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -27,22 +27,6 @@
#include "hdp/hdp_5_0_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
-static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- /* We just need to read back a register to post the write.
- * Reading back the remapped register causes problems on
- * some platforms so just read back the memory size register.
- */
- if (adev->nbio.funcs->get_memsize)
- adev->nbio.funcs->get_memsize(adev);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -222,7 +206,7 @@ static void hdp_v5_0_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_hdp_funcs hdp_v5_0_funcs = {
- .flush_hdp = hdp_v5_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.invalidate_hdp = hdp_v5_0_invalidate_hdp,
.update_clock_gating = hdp_v5_0_update_clock_gating,
.get_clock_gating_state = hdp_v5_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
index 6ccd31c8bc69..ec20daf4272c 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
@@ -30,22 +30,6 @@
#define regHDP_CLK_CNTL_V6_1 0xd5
#define regHDP_CLK_CNTL_V6_1_BASE_IDX 0
-static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- /* We just need to read back a register to post the write.
- * Reading back the remapped register causes problems on
- * some platforms so just read back the memory size register.
- */
- if (adev->nbio.funcs->get_memsize)
- adev->nbio.funcs->get_memsize(adev);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev,
bool enable)
{
@@ -154,7 +138,7 @@ static void hdp_v6_0_get_clockgating_state(struct amdgpu_device *adev,
}
const struct amdgpu_hdp_funcs hdp_v6_0_funcs = {
- .flush_hdp = hdp_v6_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.update_clock_gating = hdp_v6_0_update_clock_gating,
.get_clock_gating_state = hdp_v6_0_get_clockgating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
index 2c9239a22f39..ed1debc03507 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
@@ -27,22 +27,6 @@
#include "hdp/hdp_7_0_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
-static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
-{
- if (!ring || !ring->funcs->emit_wreg) {
- WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- /* We just need to read back a register to post the write.
- * Reading back the remapped register causes problems on
- * some platforms so just read back the memory size register.
- */
- if (adev->nbio.funcs->get_memsize)
- adev->nbio.funcs->get_memsize(adev);
- } else {
- amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- }
-}
-
static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev,
bool enable)
{
@@ -142,7 +126,7 @@ static void hdp_v7_0_get_clockgating_state(struct amdgpu_device *adev,
}
const struct amdgpu_hdp_funcs hdp_v7_0_funcs = {
- .flush_hdp = hdp_v7_0_flush_hdp,
+ .flush_hdp = amdgpu_hdp_generic_flush,
.update_clock_gating = hdp_v7_0_update_clock_gating,
.get_clock_gating_state = hdp_v7_0_get_clockgating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index eb4185dcbd1d..5900b560b7de 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -349,6 +349,7 @@ static int ih_v6_0_irq_init(struct amdgpu_device *adev)
if (ret)
return ret;
}
+ ih[i]->overflow = false;
}
/* update doorbell range for ih ring 0 */
@@ -446,7 +447,10 @@ static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ if (!amdgpu_sriov_vf(adev))
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ else
+ ih->overflow = true;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 32). Hopefully
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index cfa91d709d49..cc626036ed9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -32,6 +32,7 @@
#include "gc/gc_11_0_0_sh_mask.h"
MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu_kicker.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
@@ -51,8 +52,12 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_imu.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
index 69dd92f6e86d..574880d67009 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
@@ -25,6 +25,7 @@
*
*/
+#include <linux/gpio/machine.h>
#include "amdgpu.h"
#include "isp_v4_1_1.h"
@@ -39,15 +40,45 @@ static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = {
ISP_4_1__SRCID__ISP_RINGBUFFER_WPT16
};
+static struct gpiod_lookup_table isp_gpio_table = {
+ .dev_id = "amd_isp_capture",
+ .table = {
+ GPIO_LOOKUP("AMDI0030:00", 85, "enable_isp", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table isp_sensor_gpio_table = {
+ .dev_id = "i2c-ov05c10",
+ .table = {
+ GPIO_LOOKUP("amdisp-pinctrl", 0, "enable", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
{
struct amdgpu_device *adev = isp->adev;
int idx, int_idx, num_res, r;
+ u8 isp_dev_hid[ACPI_ID_LEN];
u64 isp_base;
if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
return -EINVAL;
+ r = amdgpu_acpi_get_isp4_dev_hid(&isp_dev_hid);
+ if (r) {
+ drm_dbg(&adev->ddev, "Invalid isp platform detected (%d)", r);
+ /* allow GPU init to progress */
+ return 0;
+ }
+
+ /* add GPIO resources required for OMNI5C10 sensor */
+ if (!strcmp("OMNI5C10", isp_dev_hid)) {
+ gpiod_add_lookup_table(&isp_gpio_table);
+ gpiod_add_lookup_table(&isp_sensor_gpio_table);
+ }
+
isp_base = adev->rmmio_base;
isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index a8ccae361ec7..79e342d5ab28 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -149,6 +149,18 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+ /* JPEG DJPEG POISON EVENT */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
+ if (r)
+ return r;
+
+ /* JPEG EJPEG POISON EVENT */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
+ if (r)
+ return r;
+
r = amdgpu_jpeg_sw_init(adev);
if (r)
return r;
@@ -434,6 +446,9 @@ static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
ret = jpeg_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
+
return ret;
}
@@ -1041,6 +1056,14 @@ static int jpeg_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v4_0_3_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -1200,6 +1223,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_irq_funcs = {
.process = jpeg_v4_0_3_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_ras_irq_funcs = {
+ .set = jpeg_v4_0_3_set_ras_interrupt_state,
+ .process = amdgpu_jpeg_process_poison_irq,
+};
+
static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
{
int i;
@@ -1208,6 +1236,9 @@ static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings;
}
adev->jpeg.inst->irq.funcs = &jpeg_v4_0_3_irq_funcs;
+
+ adev->jpeg.inst->ras_poison_irq.num_types = 1;
+ adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_3_ras_irq_funcs;
}
const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block = {
@@ -1304,9 +1335,47 @@ static void jpeg_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
jpeg_v4_0_3_inst_reset_ras_error_count(adev, i);
}
+static uint32_t jpeg_v4_0_3_query_poison_by_instance(struct amdgpu_device *adev,
+ uint32_t instance, uint32_t sub_block)
+{
+ uint32_t poison_stat = 0, reg_value = 0;
+
+ switch (sub_block) {
+ case AMDGPU_JPEG_V4_0_3_JPEG0:
+ reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF);
+ break;
+ case AMDGPU_JPEG_V4_0_3_JPEG1:
+ reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF);
+ break;
+ default:
+ break;
+ }
+
+ if (poison_stat)
+ dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n",
+ instance, sub_block);
+
+ return poison_stat;
+}
+
+static bool jpeg_v4_0_3_query_ras_poison_status(struct amdgpu_device *adev)
+{
+ uint32_t inst = 0, sub = 0, poison_stat = 0;
+
+ for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++)
+ for (sub = 0; sub < AMDGPU_JPEG_V4_0_3_MAX_SUB_BLOCK; sub++)
+ poison_stat +=
+ jpeg_v4_0_3_query_poison_by_instance(adev, inst, sub);
+
+ return !!poison_stat;
+}
+
static const struct amdgpu_ras_block_hw_ops jpeg_v4_0_3_ras_hw_ops = {
.query_ras_error_count = jpeg_v4_0_3_query_ras_error_count,
.reset_ras_error_count = jpeg_v4_0_3_reset_ras_error_count,
+ .query_poison_status = jpeg_v4_0_3_query_ras_poison_status,
};
static int jpeg_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
@@ -1383,6 +1452,13 @@ static int jpeg_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_comm
if (r)
return r;
+ if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+ adev->jpeg.inst->ras_poison_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
&jpeg_v4_0_3_aca_info, NULL);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
index a90bf370a002..2e110d04af84 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
@@ -46,6 +46,13 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
+enum amdgpu_jpeg_v4_0_3_sub_block {
+ AMDGPU_JPEG_V4_0_3_JPEG0 = 0,
+ AMDGPU_JPEG_V4_0_3_JPEG1,
+
+ AMDGPU_JPEG_V4_0_3_MAX_SUB_BLOCK,
+};
+
extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block;
void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index 218e16b68f1d..3b6f65a25646 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -28,15 +28,18 @@
#include "soc15d.h"
#include "jpeg_v4_0_3.h"
#include "jpeg_v5_0_1.h"
+#include "mmsch_v5_0.h"
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
+static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
+static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring);
static int amdgpu_ih_srcid_jpeg[] = {
@@ -118,6 +121,7 @@ static int jpeg_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS;
jpeg_v5_0_1_set_dec_ring_funcs(adev);
jpeg_v5_0_1_set_irq_funcs(adev);
+ jpeg_v5_0_1_set_ras_funcs(adev);
return 0;
}
@@ -142,6 +146,17 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
}
+ /* JPEG DJPEG POISON EVENT */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_5_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
+ if (r)
+ return r;
+
+ /* JPEG EJPEG POISON EVENT */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_5_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
+ if (r)
+ return r;
r = amdgpu_jpeg_sw_init(adev);
if (r)
@@ -156,21 +171,16 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
- ring->use_doorbell = false;
+ ring->use_doorbell = true;
ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id);
if (!amdgpu_sriov_vf(adev)) {
ring->doorbell_index =
(adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
1 + j + 11 * jpeg_inst;
} else {
- if (j < 4)
- ring->doorbell_index =
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 4 + j + 32 * jpeg_inst;
- else
- ring->doorbell_index =
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 8 + j + 32 * jpeg_inst;
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 2 + j + 32 * jpeg_inst;
}
sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j);
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
@@ -237,7 +247,10 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
int i, j, r, jpeg_inst;
if (amdgpu_sriov_vf(adev)) {
- /* jpeg_v5_0_1_start_sriov(adev); */
+ r = jpeg_v5_0_1_start_sriov(adev);
+ if (r)
+ return r;
+
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
@@ -264,7 +277,7 @@ static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->jpeg.inst[i].ring_dec[j];
if (ring->use_doorbell)
WREG32_SOC15_OFFSET(VCN, GET_INST(VCN, i), regVCN_JPEG_DB_CTRL,
- (ring->pipe ? (ring->pipe - 0x15) : 0),
+ ring->pipe,
ring->doorbell_index <<
VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
VCN_JPEG_DB_CTRL__EN_MASK);
@@ -291,8 +304,13 @@ static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
cancel_delayed_work_sync(&adev->jpeg.idle_work);
- if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
- ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ if (!amdgpu_sriov_vf(adev)) {
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
+ ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+ }
+
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return ret;
}
@@ -422,6 +440,119 @@ static void jpeg_v5_0_1_init_jrbc(struct amdgpu_ring *ring)
reg_offset);
}
+static int jpeg_v5_0_1_start_sriov(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ uint64_t ctx_addr;
+ uint32_t param, resp, expected;
+ uint32_t tmp, timeout;
+
+ struct amdgpu_mm_table *table = &adev->virt.mm_table;
+ uint32_t *table_loc;
+ uint32_t table_size;
+ uint32_t size, size_dw, item_offset;
+ uint32_t init_status;
+ int i, j, jpeg_inst;
+
+ struct mmsch_v5_0_cmd_direct_write
+ direct_wt = { {0} };
+ struct mmsch_v5_0_cmd_end end = { {0} };
+ struct mmsch_v5_0_init_header header;
+
+ direct_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_WRITE;
+ end.cmd_header.command_type =
+ MMSCH_COMMAND__END;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ jpeg_inst = GET_INST(JPEG, i);
+
+ memset(&header, 0, sizeof(struct mmsch_v5_0_init_header));
+ header.version = MMSCH_VERSION;
+ header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2;
+
+ table_loc = (uint32_t *)table->cpu_addr;
+ table_loc += header.total_size;
+
+ item_offset = header.total_size;
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; j++) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ table_size = 0;
+
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW);
+ MMSCH_V5_0_INSERT_DIRECT_WT(tmp, lower_32_bits(ring->gpu_addr));
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH);
+ MMSCH_V5_0_INSERT_DIRECT_WT(tmp, upper_32_bits(ring->gpu_addr));
+ tmp = SOC15_REG_OFFSET(JPEG, 0, regUVD_JRBC_RB_SIZE);
+ MMSCH_V5_0_INSERT_DIRECT_WT(tmp, ring->ring_size / 4);
+
+ if (j < 5) {
+ header.mjpegdec0[j].table_offset = item_offset;
+ header.mjpegdec0[j].init_status = 0;
+ header.mjpegdec0[j].table_size = table_size;
+ } else {
+ header.mjpegdec1[j - 5].table_offset = item_offset;
+ header.mjpegdec1[j - 5].init_status = 0;
+ header.mjpegdec1[j - 5].table_size = table_size;
+ }
+ header.total_size += table_size;
+ item_offset += table_size;
+ }
+
+ MMSCH_V5_0_INSERT_END();
+
+ /* send init table to MMSCH */
+ size = sizeof(struct mmsch_v5_0_init_header);
+ table_loc = (uint32_t *)table->cpu_addr;
+ memcpy((void *)table_loc, &header, size);
+
+ ctx_addr = table->gpu_addr;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
+
+ tmp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID);
+ tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_VMID, tmp);
+
+ size = header.total_size;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_CTX_SIZE, size);
+
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP, 0);
+
+ param = 0x00000001;
+ WREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_HOST, param);
+ tmp = 0;
+ timeout = 1000;
+ resp = 0;
+ expected = MMSCH_VF_MAILBOX_RESP__OK;
+ init_status =
+ ((struct mmsch_v5_0_init_header *)(table_loc))->mjpegdec0[i].init_status;
+ while (resp != expected) {
+ resp = RREG32_SOC15(VCN, jpeg_inst, regMMSCH_VF_MAILBOX_RESP);
+
+ if (resp != 0)
+ break;
+ udelay(10);
+ tmp = tmp + 10;
+ if (tmp >= timeout) {
+ DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
+ " waiting for regMMSCH_VF_MAILBOX_RESP "\
+ "(expected=0x%08x, readback=0x%08x)\n",
+ tmp, expected, resp);
+ return -EBUSY;
+ }
+ }
+ if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE &&
+ init_status != MMSCH_VF_ENGINE_STATUS__PASS)
+ DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n",
+ resp, init_status);
+
+ }
+ return 0;
+}
+
/**
* jpeg_v5_0_1_start - start JPEG block
*
@@ -581,6 +712,11 @@ static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
struct amdgpu_device *adev = ip_block->adev;
int ret;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->jpeg.cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if (state == adev->jpeg.cur_state)
return 0;
@@ -603,6 +739,16 @@ static int jpeg_v5_0_1_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+
+
static int jpeg_v5_0_1_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -772,6 +918,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_irq_funcs = {
.process = jpeg_v5_0_1_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_ras_irq_funcs = {
+ .set = jpeg_v5_0_1_set_ras_interrupt_state,
+ .process = amdgpu_jpeg_process_poison_irq,
+};
+
static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
{
int i;
@@ -780,6 +931,10 @@ static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings;
adev->jpeg.inst->irq.funcs = &jpeg_v5_0_1_irq_funcs;
+
+ adev->jpeg.inst->ras_poison_irq.num_types = 1;
+ adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v5_0_1_ras_irq_funcs;
+
}
const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = {
@@ -789,3 +944,150 @@ const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = {
.rev = 1,
.funcs = &jpeg_v5_0_1_ip_funcs,
};
+
+static uint32_t jpeg_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev,
+ uint32_t instance, uint32_t sub_block)
+{
+ uint32_t poison_stat = 0, reg_value = 0;
+
+ switch (sub_block) {
+ case AMDGPU_JPEG_V5_0_1_JPEG0:
+ reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF);
+ break;
+ case AMDGPU_JPEG_V5_0_1_JPEG1:
+ reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF);
+ break;
+ default:
+ break;
+ }
+
+ if (poison_stat)
+ dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n",
+ instance, sub_block);
+
+ return poison_stat;
+}
+
+static bool jpeg_v5_0_1_query_ras_poison_status(struct amdgpu_device *adev)
+{
+ uint32_t inst = 0, sub = 0, poison_stat = 0;
+
+ for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++)
+ for (sub = 0; sub < AMDGPU_JPEG_V5_0_1_MAX_SUB_BLOCK; sub++)
+ poison_stat +=
+ jpeg_v5_0_1_query_poison_by_instance(adev, inst, sub);
+
+ return !!poison_stat;
+}
+
+static const struct amdgpu_ras_block_hw_ops jpeg_v5_0_1_ras_hw_ops = {
+ .query_poison_status = jpeg_v5_0_1_query_ras_poison_status,
+};
+
+static int jpeg_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ struct aca_bank_info info;
+ u64 misc0;
+ int ret;
+
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
+ 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
+ ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* reference to smu driver if header file */
+static int jpeg_v5_0_1_err_codes[] = {
+ 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-7][S|D] */
+ 24, 25, 26, 27, 28, 29, 30, 31
+};
+
+static bool jpeg_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ if (aca_bank_check_error_codes(handle->adev, bank,
+ jpeg_v5_0_1_err_codes,
+ ARRAY_SIZE(jpeg_v5_0_1_err_codes)))
+ return false;
+
+ return true;
+}
+
+static const struct aca_bank_ops jpeg_v5_0_1_aca_bank_ops = {
+ .aca_bank_parser = jpeg_v5_0_1_aca_bank_parser,
+ .aca_bank_is_valid = jpeg_v5_0_1_aca_bank_is_valid,
+};
+
+static const struct aca_info jpeg_v5_0_1_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK,
+ .bank_ops = &jpeg_v5_0_1_aca_bank_ops,
+};
+
+static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+ adev->jpeg.inst->ras_poison_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
+ &jpeg_v5_0_1_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
+static struct amdgpu_jpeg_ras jpeg_v5_0_1_ras = {
+ .ras_block = {
+ .hw_ops = &jpeg_v5_0_1_ras_hw_ops,
+ .ras_late_init = jpeg_v5_0_1_ras_late_init,
+ },
+};
+
+static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev)
+{
+ adev->jpeg.ras = &jpeg_v5_0_1_ras;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
index efdab57324e4..a7e58d5fb246 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
@@ -26,6 +26,9 @@
extern const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block;
+#define regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET 0x4094
+#define regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET 0x1bffe
+
#define regUVD_JRBC0_UVD_JRBC_RB_WPTR 0x0640
#define regUVD_JRBC0_UVD_JRBC_RB_WPTR_BASE_IDX 1
#define regUVD_JRBC0_UVD_JRBC_STATUS 0x0649
@@ -98,4 +101,11 @@ extern const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block;
#define regVCN_RRMT_CNTL 0x0940
#define regVCN_RRMT_CNTL_BASE_IDX 1
+enum amdgpu_jpeg_v5_0_1_sub_block {
+ AMDGPU_JPEG_V5_0_1_JPEG0 = 0,
+ AMDGPU_JPEG_V5_0_1_JPEG1,
+
+ AMDGPU_JPEG_V5_0_1_MAX_SUB_BLOCK,
+};
+
#endif /* __JPEG_V5_0_1_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
new file mode 100644
index 000000000000..d6f50b13e2ba
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
+
+#define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE
+#define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE
+
+static int
+mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
+{
+ int ret;
+
+ ret = amdgpu_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("Failed to reserve bo. ret %d\n", ret);
+ goto err_reserve_bo_failed;
+ }
+
+ ret = amdgpu_ttm_alloc_gart(&bo->tbo);
+ if (ret) {
+ DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
+ goto err_map_bo_gart_failed;
+ }
+
+ amdgpu_bo_unreserve(bo);
+ bo = amdgpu_bo_ref(bo);
+
+ return 0;
+
+err_map_bo_gart_failed:
+ amdgpu_bo_unreserve(bo);
+err_reserve_bo_failed:
+ return ret;
+}
+
+static int
+mes_userq_create_wptr_mapping(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ uint64_t wptr)
+{
+ struct amdgpu_bo_va_mapping *wptr_mapping;
+ struct amdgpu_vm *wptr_vm;
+ struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj;
+ int ret;
+
+ wptr_vm = queue->vm;
+ ret = amdgpu_bo_reserve(wptr_vm->root.bo, false);
+ if (ret)
+ return ret;
+
+ wptr &= AMDGPU_GMC_HOLE_MASK;
+ wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT);
+ amdgpu_bo_unreserve(wptr_vm->root.bo);
+ if (!wptr_mapping) {
+ DRM_ERROR("Failed to lookup wptr bo\n");
+ return -EINVAL;
+ }
+
+ wptr_obj->obj = wptr_mapping->bo_va->base.bo;
+ if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
+ DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n");
+ return -EINVAL;
+ }
+
+ ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj);
+ if (ret) {
+ DRM_ERROR("Failed to map wptr bo to GART\n");
+ return ret;
+ }
+
+ queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset_no_check(wptr_obj->obj);
+ return 0;
+}
+
+static int convert_to_mes_priority(int priority)
+{
+ switch (priority) {
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW:
+ default:
+ return AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW:
+ return AMDGPU_MES_PRIORITY_LEVEL_LOW;
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH:
+ return AMDGPU_MES_PRIORITY_LEVEL_MEDIUM;
+ case AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH:
+ return AMDGPU_MES_PRIORITY_LEVEL_HIGH;
+ }
+}
+
+static int mes_userq_map(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ struct amdgpu_mqd_prop *userq_props = queue->userq_prop;
+ struct mes_add_queue_input queue_input;
+ int r;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
+
+ queue_input.process_va_start = 0;
+ queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
+
+ /* set process quantum to 10 ms and gang quantum to 1 ms as default */
+ queue_input.process_quantum = 100000;
+ queue_input.gang_quantum = 10000;
+ queue_input.paging = false;
+
+ queue_input.process_context_addr = ctx->gpu_addr;
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+ queue_input.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
+ queue_input.gang_global_priority_level = convert_to_mes_priority(queue->priority);
+
+ queue_input.process_id = queue->vm->pasid;
+ queue_input.queue_type = queue->queue_type;
+ queue_input.mqd_addr = queue->mqd.gpu_addr;
+ queue_input.wptr_addr = userq_props->wptr_gpu_addr;
+ queue_input.queue_size = userq_props->queue_size >> 2;
+ queue_input.doorbell_offset = userq_props->doorbell_index;
+ queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo);
+ queue_input.wptr_mc_addr = queue->wptr_obj.gpu_addr;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r) {
+ DRM_ERROR("Failed to map queue in HW, err (%d)\n", r);
+ return r;
+ }
+
+ DRM_DEBUG_DRIVER("Queue (doorbell:%d) mapped successfully\n", userq_props->doorbell_index);
+ return 0;
+}
+
+static int mes_userq_unmap(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct mes_remove_queue_input queue_input;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ int r;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
+ queue_input.doorbell_offset = queue->doorbell_index;
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r);
+ return r;
+}
+
+static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ struct drm_amdgpu_userq_in *mqd_user)
+{
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ int r, size;
+
+ /*
+ * The FW expects at least one page space allocated for
+ * process ctx and gang ctx each. Create an object
+ * for the same.
+ */
+ size = AMDGPU_USERQ_PROC_CTX_SZ + AMDGPU_USERQ_GANG_CTX_SZ;
+ r = amdgpu_userq_create_object(uq_mgr, ctx, size);
+ if (r) {
+ DRM_ERROR("Failed to allocate ctx space bo for userqueue, err:%d\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
+ struct drm_amdgpu_userq_in *args_in,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
+ struct drm_amdgpu_userq_in *mqd_user = args_in;
+ struct amdgpu_mqd_prop *userq_props;
+ int r;
+
+ /* Structure to initialize MQD for userqueue using generic MQD init function */
+ userq_props = kzalloc(sizeof(struct amdgpu_mqd_prop), GFP_KERNEL);
+ if (!userq_props) {
+ DRM_ERROR("Failed to allocate memory for userq_props\n");
+ return -ENOMEM;
+ }
+
+ if (!mqd_user->wptr_va || !mqd_user->rptr_va ||
+ !mqd_user->queue_va || mqd_user->queue_size == 0) {
+ DRM_ERROR("Invalid MQD parameters for userqueue\n");
+ r = -EINVAL;
+ goto free_props;
+ }
+
+ r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);
+ if (r) {
+ DRM_ERROR("Failed to create MQD object for userqueue\n");
+ goto free_props;
+ }
+
+ /* Initialize the MQD BO with user given values */
+ userq_props->wptr_gpu_addr = mqd_user->wptr_va;
+ userq_props->rptr_gpu_addr = mqd_user->rptr_va;
+ userq_props->queue_size = mqd_user->queue_size;
+ userq_props->hqd_base_gpu_addr = mqd_user->queue_va;
+ userq_props->mqd_gpu_addr = queue->mqd.gpu_addr;
+ userq_props->use_doorbell = true;
+ userq_props->doorbell_index = queue->doorbell_index;
+ userq_props->fence_address = queue->fence_drv->gpu_addr;
+
+ if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
+ struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
+
+ if (mqd_user->mqd_size != sizeof(*compute_mqd)) {
+ DRM_ERROR("Invalid compute IP MQD size\n");
+ r = -EINVAL;
+ goto free_mqd;
+ }
+
+ compute_mqd = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+ if (IS_ERR(compute_mqd)) {
+ DRM_ERROR("Failed to read user MQD\n");
+ r = -ENOMEM;
+ goto free_mqd;
+ }
+
+ userq_props->eop_gpu_addr = compute_mqd->eop_va;
+ userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
+ userq_props->hqd_active = false;
+ userq_props->tmz_queue =
+ mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+ kfree(compute_mqd);
+ } else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
+ struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
+
+ if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
+ DRM_ERROR("Invalid GFX MQD\n");
+ r = -EINVAL;
+ goto free_mqd;
+ }
+
+ mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+ if (IS_ERR(mqd_gfx_v11)) {
+ DRM_ERROR("Failed to read user MQD\n");
+ r = -ENOMEM;
+ goto free_mqd;
+ }
+
+ userq_props->shadow_addr = mqd_gfx_v11->shadow_va;
+ userq_props->csa_addr = mqd_gfx_v11->csa_va;
+ userq_props->tmz_queue =
+ mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+ kfree(mqd_gfx_v11);
+ } else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
+ struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
+
+ if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) {
+ DRM_ERROR("Invalid SDMA MQD\n");
+ r = -EINVAL;
+ goto free_mqd;
+ }
+
+ mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+ if (IS_ERR(mqd_sdma_v11)) {
+ DRM_ERROR("Failed to read sdma user MQD\n");
+ r = -ENOMEM;
+ goto free_mqd;
+ }
+
+ userq_props->csa_addr = mqd_sdma_v11->csa_va;
+ kfree(mqd_sdma_v11);
+ }
+
+ queue->userq_prop = userq_props;
+
+ r = mqd_hw_default->init_mqd(adev, (void *)queue->mqd.cpu_ptr, userq_props);
+ if (r) {
+ DRM_ERROR("Failed to initialize MQD for userqueue\n");
+ goto free_mqd;
+ }
+
+ /* Create BO for FW operations */
+ r = mes_userq_create_ctx_space(uq_mgr, queue, mqd_user);
+ if (r) {
+ DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
+ goto free_mqd;
+ }
+
+ /* FW expects WPTR BOs to be mapped into GART */
+ r = mes_userq_create_wptr_mapping(uq_mgr, queue, userq_props->wptr_gpu_addr);
+ if (r) {
+ DRM_ERROR("Failed to create WPTR mapping\n");
+ goto free_ctx;
+ }
+
+ return 0;
+
+free_ctx:
+ amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
+
+free_mqd:
+ amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
+
+free_props:
+ kfree(userq_props);
+
+ return r;
+}
+
+static void
+mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
+ kfree(queue->userq_prop);
+ amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
+}
+
+const struct amdgpu_userq_funcs userq_mes_funcs = {
+ .mqd_create = mes_userq_mqd_create,
+ .mqd_destroy = mes_userq_mqd_destroy,
+ .unmap = mes_userq_unmap,
+ .map = mes_userq_map,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.h
index 7ac87ef26aec..090ae8897770 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2023 Red Hat Inc.
+ * Copyright 2024 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,27 +19,12 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
+ *
*/
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ad102_ofa = {
- .sclass = {
- { -1, -1, NVC9FA_VIDEO_OFA },
- {}
- }
-};
-int
-ad102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_ofa_new(&ad102_ofa, device, type, inst, pengine);
+#ifndef MES_USERQ_H
+#define MES_USERQ_H
+#include "amdgpu_userq.h"
- return -ENODEV;
-}
+extern const struct amdgpu_userq_funcs userq_mes_funcs;
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index ef9538fbbf53..c9eba537de09 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -287,6 +287,23 @@ static int convert_to_mes_queue_type(int queue_type)
return -1;
}
+static int convert_to_mes_priority_level(int priority_level)
+{
+ switch (priority_level) {
+ case AMDGPU_MES_PRIORITY_LEVEL_LOW:
+ return AMD_PRIORITY_LEVEL_LOW;
+ case AMDGPU_MES_PRIORITY_LEVEL_NORMAL:
+ default:
+ return AMD_PRIORITY_LEVEL_NORMAL;
+ case AMDGPU_MES_PRIORITY_LEVEL_MEDIUM:
+ return AMD_PRIORITY_LEVEL_MEDIUM;
+ case AMDGPU_MES_PRIORITY_LEVEL_HIGH:
+ return AMD_PRIORITY_LEVEL_HIGH;
+ case AMDGPU_MES_PRIORITY_LEVEL_REALTIME:
+ return AMD_PRIORITY_LEVEL_REALTIME;
+ }
+}
+
static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
struct mes_add_queue_input *input)
{
@@ -310,9 +327,9 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.gang_quantum = input->gang_quantum;
mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
mes_add_queue_pkt.inprocess_gang_priority =
- input->inprocess_gang_priority;
+ convert_to_mes_priority_level(input->inprocess_gang_priority);
mes_add_queue_pkt.gang_global_priority_level =
- input->gang_global_priority_level;
+ convert_to_mes_priority_level(input->gang_global_priority_level);
mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_add_queue_pkt.mqd_addr = input->mqd_addr;
@@ -458,31 +475,6 @@ static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ
return r;
}
-static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
- struct mes_reset_queue_input *input)
-{
- if (input->use_mmio)
- return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
- input->me_id, input->pipe_id,
- input->queue_id, input->vmid);
-
- union MESAPI__RESET mes_reset_queue_pkt;
-
- memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
-
- mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
- mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
- mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
-
- mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
- mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
- /*mes_reset_queue_pkt.reset_queue_only = 1;*/
-
- return mes_v11_0_submit_pkt_and_poll_completion(mes,
- &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
- offsetof(union MESAPI__REMOVE_QUEUE, api_status));
-}
-
static int mes_v11_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
@@ -649,7 +641,7 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
break;
case MES_MISC_OP_CHANGE_CONFIG:
if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) {
- dev_err(mes->adev->dev, "MES FW versoin must be larger than 0x63 to support limit single process feature.\n");
+ dev_err(mes->adev->dev, "MES FW version must be larger than 0x63 to support limit single process feature.\n");
return -EINVAL;
}
misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG;
@@ -694,7 +686,8 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes->compute_hqd_mask[i];
for (i = 0; i < MAX_GFX_PIPES; i++)
- mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i];
+ mes_set_hw_res_pkt.gfx_hqd_mask[i] =
+ mes->gfx_hqd_mask[i];
for (i = 0; i < MAX_SDMA_PIPES; i++)
mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i];
@@ -723,7 +716,7 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes->event_log_gpu_addr;
}
- if (enforce_isolation)
+ if (adev->enforce_isolation[0] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
mes_set_hw_res_pkt.limit_single_process = 1;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
@@ -753,8 +746,8 @@ static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
}
-static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
- struct mes_reset_legacy_queue_input *input)
+static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input)
{
union MESAPI__RESET mes_reset_queue_pkt;
@@ -772,7 +765,7 @@ static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
mes_reset_queue_pkt.queue_type =
convert_to_mes_queue_type(input->queue_type);
- if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
+ if (input->legacy_gfx) {
mes_reset_queue_pkt.reset_legacy_gfx = 1;
mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
mes_reset_queue_pkt.queue_id_lp = input->queue_id;
@@ -798,7 +791,6 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.suspend_gang = mes_v11_0_suspend_gang,
.resume_gang = mes_v11_0_resume_gang,
.misc_op = mes_v11_0_misc_op,
- .reset_legacy_queue = mes_v11_0_reset_legacy_queue,
.reset_hw_queue = mes_v11_0_reset_hw_queue,
};
@@ -1701,22 +1693,10 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int mes_v11_0_late_init(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- /* it's only intended for use in mes_self_test case, not for s0ix and reset */
- if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
- (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3)))
- amdgpu_mes_self_test(adev);
-
- return 0;
-}
-
static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
.name = "mes_v11_0",
.early_init = mes_v11_0_early_init,
- .late_init = mes_v11_0_late_init,
+ .late_init = NULL,
.sw_init = mes_v11_0_sw_init,
.sw_fini = mes_v11_0_sw_fini,
.hw_init = mes_v11_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index e6ab617b9a40..b4f17332d466 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -274,6 +274,23 @@ static int convert_to_mes_queue_type(int queue_type)
return -1;
}
+static int convert_to_mes_priority_level(int priority_level)
+{
+ switch (priority_level) {
+ case AMDGPU_MES_PRIORITY_LEVEL_LOW:
+ return AMD_PRIORITY_LEVEL_LOW;
+ case AMDGPU_MES_PRIORITY_LEVEL_NORMAL:
+ default:
+ return AMD_PRIORITY_LEVEL_NORMAL;
+ case AMDGPU_MES_PRIORITY_LEVEL_MEDIUM:
+ return AMD_PRIORITY_LEVEL_MEDIUM;
+ case AMDGPU_MES_PRIORITY_LEVEL_HIGH:
+ return AMD_PRIORITY_LEVEL_HIGH;
+ case AMDGPU_MES_PRIORITY_LEVEL_REALTIME:
+ return AMD_PRIORITY_LEVEL_REALTIME;
+ }
+}
+
static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes,
struct mes_add_queue_input *input)
{
@@ -297,9 +314,9 @@ static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.gang_quantum = input->gang_quantum;
mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
mes_add_queue_pkt.inprocess_gang_priority =
- input->inprocess_gang_priority;
+ convert_to_mes_priority_level(input->inprocess_gang_priority);
mes_add_queue_pkt.gang_global_priority_level =
- input->gang_global_priority_level;
+ convert_to_mes_priority_level(input->gang_global_priority_level);
mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_add_queue_pkt.mqd_addr = input->mqd_addr;
@@ -477,32 +494,6 @@ static int mes_v12_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ
return r;
}
-static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
- struct mes_reset_queue_input *input)
-{
- union MESAPI__RESET mes_reset_queue_pkt;
- int pipe;
-
- memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
-
- mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
- mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
- mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
-
- mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
- mes_reset_queue_pkt.gang_context_addr = input->gang_context_addr;
- /*mes_reset_queue_pkt.reset_queue_only = 1;*/
-
- if (mes->adev->enable_uni_mes)
- pipe = AMDGPU_MES_KIQ_PIPE;
- else
- pipe = AMDGPU_MES_SCHED_PIPE;
-
- return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
- &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
- offsetof(union MESAPI__REMOVE_QUEUE, api_status));
-}
-
static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes,
struct mes_map_legacy_queue_input *input)
{
@@ -762,7 +753,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
pipe * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE);
}
- if (enforce_isolation)
+ if (adev->enforce_isolation[0] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
mes_set_hw_res_pkt.limit_single_process = 1;
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
@@ -845,8 +836,8 @@ static void mes_v12_0_enable_unmapped_doorbell_handling(
WREG32_SOC15(GC, 0, regCP_UNMAPPED_DOORBELL, data);
}
-static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
- struct mes_reset_legacy_queue_input *input)
+static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input)
{
union MESAPI__RESET mes_reset_queue_pkt;
int pipe;
@@ -865,7 +856,7 @@ static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
mes_reset_queue_pkt.queue_type =
convert_to_mes_queue_type(input->queue_type);
- if (mes_reset_queue_pkt.queue_type == MES_QUEUE_TYPE_GFX) {
+ if (input->legacy_gfx) {
mes_reset_queue_pkt.reset_legacy_gfx = 1;
mes_reset_queue_pkt.pipe_id_lp = input->pipe_id;
mes_reset_queue_pkt.queue_id_lp = input->queue_id;
@@ -878,7 +869,7 @@ static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset;
}
- if (mes->adev->enable_uni_mes)
+ if (input->is_kq)
pipe = AMDGPU_MES_KIQ_PIPE;
else
pipe = AMDGPU_MES_SCHED_PIPE;
@@ -896,7 +887,6 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.suspend_gang = mes_v12_0_suspend_gang,
.resume_gang = mes_v12_0_resume_gang,
.misc_op = mes_v12_0_misc_op,
- .reset_legacy_queue = mes_v12_0_reset_legacy_queue,
.reset_hw_queue = mes_v12_0_reset_hw_queue,
};
@@ -1811,21 +1801,10 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int mes_v12_0_late_init(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- /* it's only intended for use in mes_self_test case, not for s0ix and reset */
- if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend)
- amdgpu_mes_self_test(adev);
-
- return 0;
-}
-
static const struct amd_ip_funcs mes_v12_0_ip_funcs = {
.name = "mes_v12_0",
.early_init = mes_v12_0_early_init,
- .late_init = mes_v12_0_late_init,
+ .late_init = NULL,
.sw_init = mes_v12_0_sw_init,
.sw_fini = mes_v12_0_sw_fini,
.hw_init = mes_v12_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index 84cde1239ee4..76167fadb292 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -30,6 +30,7 @@
#include "soc15_common.h"
#include "soc15.h"
#include "amdgpu_ras.h"
+#include "amdgpu_psp.h"
#define regVM_L2_CNTL3_DEFAULT 0x80100007
#define regVM_L2_CNTL4_DEFAULT 0x000000c1
@@ -192,10 +193,8 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
uint32_t tmp, inst_mask;
int i;
- /* Setup TLB control */
- inst_mask = adev->aid_mask;
- for_each_inst(i, inst_mask) {
- tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
+ if (amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev)) {
+ tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
1);
@@ -209,7 +208,26 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
- WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ psp_reg_program_no_ring(&adev->psp, tmp, PSP_REG_MMHUB_L1_TLB_CNTL);
+ } else {
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
+ 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ MTYPE, MTYPE_UC);/* XXX for emulation. */
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
+
+ WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ }
}
}
@@ -221,6 +239,9 @@ static void mmhub_v1_8_init_snoop_override_regs(struct amdgpu_device *adev)
uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
inst_mask = adev->aid_mask;
for_each_inst(i, inst_mask) {
for (j = 0; j < 5; j++) { /* DAGB instances */
@@ -454,6 +475,30 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
return 0;
}
+static void mmhub_v1_8_disable_l1_tlb(struct amdgpu_device *adev)
+{
+ u32 tmp;
+ u32 i, inst_mask;
+
+ if (amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev)) {
+ tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ psp_reg_program_no_ring(&adev->psp, tmp, PSP_REG_MMHUB_L1_TLB_CNTL);
+ } else {
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
+ 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ }
+ }
+}
+
static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub;
@@ -467,15 +512,6 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
for (i = 0; i < 16; i++)
WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL,
i * hub->ctx_distance, 0);
-
- /* Setup TLB control */
- tmp = RREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
- 0);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
- ENABLE_ADVANCED_DRIVER_MODEL, 0);
- WREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL, tmp);
-
if (!amdgpu_sriov_vf(adev)) {
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL);
@@ -485,6 +521,8 @@ static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0);
}
}
+
+ mmhub_v1_8_disable_l1_tlb(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h
new file mode 100644
index 000000000000..6f749814929f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MMSCH_V5_0_H__
+#define __MMSCH_V5_0_H__
+
+#include "amdgpu_vcn.h"
+
+#define MMSCH_VERSION_MAJOR 5
+#define MMSCH_VERSION_MINOR 0
+#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+
+#define RB_ENABLED (1 << 0)
+#define RB4_ENABLED (1 << 1)
+
+#define MMSCH_VF_ENGINE_STATUS__PASS 0x1
+
+#define MMSCH_VF_MAILBOX_RESP__OK 0x1
+#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2
+#define MMSCH_VF_MAILBOX_RESP__FAILED 0x3
+#define MMSCH_VF_MAILBOX_RESP__FAILED_SMALL_CTX_SIZE 0x4
+#define MMSCH_VF_MAILBOX_RESP__UNKNOWN_CMD 0x5
+
+enum mmsch_v5_0_command_type {
+ MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
+ MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3,
+ MMSCH_COMMAND__INDIRECT_REG_WRITE = 8,
+ MMSCH_COMMAND__END = 0xf
+};
+
+struct mmsch_v5_0_table_info {
+ uint32_t init_status;
+ uint32_t table_offset;
+ uint32_t table_size;
+};
+
+struct mmsch_v5_0_init_header {
+ uint32_t version;
+ uint32_t total_size;
+ struct mmsch_v5_0_table_info vcn0;
+ struct mmsch_v5_0_table_info mjpegdec0[5];
+ struct mmsch_v5_0_table_info mjpegdec1[5];
+};
+
+struct mmsch_v5_0_cmd_direct_reg_header {
+ uint32_t reg_offset : 28;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v5_0_cmd_indirect_reg_header {
+ uint32_t reg_offset : 20;
+ uint32_t reg_idx_space : 8;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v5_0_cmd_direct_write {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+struct mmsch_v5_0_cmd_direct_read_modify_write {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+ uint32_t write_data;
+ uint32_t mask_value;
+};
+
+struct mmsch_v5_0_cmd_direct_polling {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+ uint32_t mask_value;
+ uint32_t wait_value;
+};
+
+struct mmsch_v5_0_cmd_end {
+ struct mmsch_v5_0_cmd_direct_reg_header cmd_header;
+};
+
+struct mmsch_v5_0_cmd_indirect_write {
+ struct mmsch_v5_0_cmd_indirect_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+#define MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
+ size = sizeof(struct mmsch_v5_0_cmd_direct_read_modify_write); \
+ size_dw = size / 4; \
+ direct_rd_mod_wt.cmd_header.reg_offset = reg; \
+ direct_rd_mod_wt.mask_value = mask; \
+ direct_rd_mod_wt.write_data = data; \
+ memcpy((void *)table_loc, &direct_rd_mod_wt, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V5_0_INSERT_DIRECT_WT(reg, value) { \
+ size = sizeof(struct mmsch_v5_0_cmd_direct_write); \
+ size_dw = size / 4; \
+ direct_wt.cmd_header.reg_offset = reg; \
+ direct_wt.reg_value = value; \
+ memcpy((void *)table_loc, &direct_wt, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V5_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
+ size = sizeof(struct mmsch_v5_0_cmd_direct_polling); \
+ size_dw = size / 4; \
+ direct_poll.cmd_header.reg_offset = reg; \
+ direct_poll.mask_value = mask; \
+ direct_poll.wait_value = wait; \
+ memcpy((void *)table_loc, &direct_poll, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V5_0_INSERT_END() { \
+ size = sizeof(struct mmsch_v5_0_cmd_end); \
+ size_dw = size / 4; \
+ memcpy((void *)table_loc, &end, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index f5411b798e11..48101a34e049 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -274,6 +274,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
{
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+ struct amdgpu_reset_context reset_context = { 0 };
amdgpu_virt_fini_data_exchange(adev);
@@ -281,8 +282,6 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
if (amdgpu_device_should_recover_gpu(adev)
&& (!amdgpu_device_has_job_running(adev) ||
adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) {
- struct amdgpu_reset_context reset_context;
- memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
@@ -293,6 +292,19 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
}
}
+static void xgpu_ai_mailbox_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_request_bad_pages(adev);
+ amdgpu_virt_init_data_exchange(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -312,26 +324,42 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
- case IDH_FLR_NOTIFICATION:
+ case IDH_RAS_BAD_PAGES_NOTIFICATION:
+ xgpu_ai_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.bad_pages_work);
+ break;
+ case IDH_UNRECOV_ERR_NOTIFICATION:
+ xgpu_ai_mailbox_send_ack(adev);
+ ras->is_rma = true;
+ dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n");
if (amdgpu_sriov_runtime(adev))
WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
- &adev->virt.flr_work),
- "Failed to queue work! at %s",
- __func__);
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
break;
- case IDH_QUERY_ALIVE:
- xgpu_ai_mailbox_send_ack(adev);
- break;
- /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
- * it byfar since that polling thread will handle it,
- * other msg like flr complete is not handled here.
- */
- case IDH_CLR_MSG_BUF:
- case IDH_FLR_NOTIFICATION_CMPL:
- case IDH_READY_TO_ACCESS_GPU:
- default:
+ case IDH_FLR_NOTIFICATION:
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
+ break;
+ case IDH_QUERY_ALIVE:
+ xgpu_ai_mailbox_send_ack(adev);
+ break;
+ /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
+ * it byfar since that polling thread will handle it,
+ * other msg like flr complete is not handled here.
+ */
+ case IDH_CLR_MSG_BUF:
+ case IDH_FLR_NOTIFICATION_CMPL:
+ case IDH_READY_TO_ACCESS_GPU:
+ default:
break;
}
@@ -387,6 +415,7 @@ int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
+ INIT_WORK(&adev->virt.bad_pages_work, xgpu_ai_mailbox_bad_pages_work);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index ed57cbc150af..874b9f8f9804 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -40,6 +40,7 @@ enum idh_request {
IDH_LOG_VF_ERROR = 200,
IDH_READY_TO_RESET = 201,
IDH_RAS_POISON = 202,
+ IDH_REQ_RAS_BAD_PAGES = 205,
};
enum idh_event {
@@ -54,6 +55,9 @@ enum idh_event {
IDH_RAS_POISON_READY,
IDH_PF_SOFT_FLR_NOTIFICATION,
IDH_RAS_ERROR_DETECTED,
+ IDH_RAS_BAD_PAGES_READY = 15,
+ IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
+ IDH_UNRECOV_ERR_NOTIFICATION = 17,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 5aadf24cb202..f6d8597452ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -67,6 +67,8 @@ static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
if (reg == IDH_FAIL)
r = -EINVAL;
+ if (reg == IDH_UNRECOV_ERR_NOTIFICATION)
+ r = -ENODEV;
else if (reg != event)
return -ENOENT;
@@ -103,6 +105,7 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
{
int r;
uint64_t timeout, now;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
now = (uint64_t)ktime_to_ms(ktime_get());
timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
@@ -110,8 +113,16 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
do {
r = xgpu_nv_mailbox_rcv_msg(adev, event);
if (!r) {
- dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n", event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
+ dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n",
+ event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
return 0;
+ } else if (r == -ENODEV) {
+ if (!amdgpu_ras_is_rma(adev)) {
+ ras->is_rma = true;
+ dev_err(adev->dev, "VF is in an unrecoverable state. "
+ "Runtime Services are halted.\n");
+ }
+ return r;
}
msleep(10);
@@ -166,6 +177,10 @@ static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
enum idh_event event = -1;
send_request:
+
+ if (amdgpu_ras_is_rma(adev))
+ return -ENODEV;
+
xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
switch (req) {
@@ -187,6 +202,9 @@ send_request:
case IDH_REQ_RAS_CPER_DUMP:
event = IDH_RAS_CPER_DUMP_READY;
break;
+ case IDH_REQ_RAS_BAD_PAGES:
+ event = IDH_RAS_BAD_PAGES_READY;
+ break;
default:
break;
}
@@ -320,6 +338,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
{
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+ struct amdgpu_reset_context reset_context = { 0 };
amdgpu_virt_fini_data_exchange(adev);
@@ -330,8 +349,6 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
- struct amdgpu_reset_context reset_context;
- memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
@@ -342,6 +359,19 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
}
}
+static void xgpu_nv_mailbox_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_request_bad_pages(adev);
+ amdgpu_virt_init_data_exchange(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -364,8 +394,27 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
+ case IDH_RAS_BAD_PAGES_NOTIFICATION:
+ xgpu_nv_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.bad_pages_work);
+ break;
+ case IDH_UNRECOV_ERR_NOTIFICATION:
+ xgpu_nv_mailbox_send_ack(adev);
+ if (!amdgpu_ras_is_rma(adev)) {
+ ras->is_rma = true;
+ dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n");
+ }
+
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
+ break;
case IDH_FLR_NOTIFICATION:
if (amdgpu_sriov_runtime(adev))
WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
@@ -436,6 +485,7 @@ int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
+ INIT_WORK(&adev->virt.bad_pages_work, xgpu_nv_mailbox_bad_pages_work);
return 0;
}
@@ -480,6 +530,11 @@ static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr)
adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0);
}
+static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev)
+{
+ return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@@ -492,4 +547,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
.req_ras_err_count = xgpu_nv_req_ras_err_count,
.req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
+ .req_bad_pages = xgpu_nv_req_ras_bad_pages,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 72c9fceb9d79..5808689562cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -42,6 +42,7 @@ enum idh_request {
IDH_RAS_POISON = 202,
IDH_REQ_RAS_ERROR_COUNT = 203,
IDH_REQ_RAS_CPER_DUMP = 204,
+ IDH_REQ_RAS_BAD_PAGES = 205,
};
enum idh_event {
@@ -58,6 +59,9 @@ enum idh_event {
IDH_RAS_ERROR_DETECTED,
IDH_RAS_ERROR_COUNT_READY = 11,
IDH_RAS_CPER_DUMP_READY = 14,
+ IDH_RAS_BAD_PAGES_READY = 15,
+ IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
+ IDH_UNRECOV_ERR_NOTIFICATION = 17,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index f23cb79110d6..a376f072700d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -177,8 +177,12 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
{
u32 doorbell_range = 0, doorbell_ctrl = 0;
u32 aid_id = instance;
+ u32 range_size;
if (use_doorbell) {
+ range_size = (amdgpu_ip_version(adev, GC_HWIP, 0) ==
+ IP_VERSION(9, 5, 0)) ?
+ 0xb : 0x9;
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_OFFSET_ENTRY,
@@ -186,7 +190,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_SIZE_ENTRY,
- 0x9);
+ range_size);
if (aid_id)
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
@@ -204,7 +208,7 @@ static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x4);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
- S2A_DOORBELL_PORT1_RANGE_SIZE, 0x9);
+ S2A_DOORBELL_PORT1_RANGE_SIZE, range_size);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x4);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index fcd708eae75c..80153f837470 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -34,9 +34,6 @@
#include "sdma0/sdma0_4_0_offset.h"
#include "nbio/nbio_7_4_offset.h"
-#include "oss/osssys_4_0_offset.h"
-#include "oss/osssys_4_0_sh_mask.h"
-
MODULE_FIRMWARE("amdgpu/renoir_asd.bin");
MODULE_FIRMWARE("amdgpu/renoir_ta.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_asd.bin");
@@ -99,9 +96,6 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
0x80000000, 0x80000000, false);
@@ -138,8 +132,6 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
0, true);
@@ -147,37 +139,6 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
return ret;
}
-static void psp_v12_0_reroute_ih(struct psp_context *psp)
-{
- struct amdgpu_device *adev = psp->adev;
- uint32_t tmp;
-
- /* Change IH ring for VMC */
- tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
- mdelay(20);
- psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
-
- /* Change IH ring for UMC */
- tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
- tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
-
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
-
- mdelay(20);
- psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
-}
-
static int psp_v12_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -186,49 +147,23 @@ static int psp_v12_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- psp_v12_0_reroute_ih(psp);
-
- if (amdgpu_sriov_vf(psp->adev)) {
- /* Write low address of the ring to C2PMSG_102 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_103 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
-
- /* Write the ring initialization command to C2PMSG_101 */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
- GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
-
- } else {
- /* Write low address of the ring to C2PMSG_69 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_70 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
- /* Write size of ring to C2PMSG_71 */
- psp_ring_reg = ring->ring_size;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
- /* Write the ring initialization command to C2PMSG_64 */
- psp_ring_reg = ring_type;
- psp_ring_reg = psp_ring_reg << 16;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
- }
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
return ret;
}
@@ -247,9 +182,6 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
GFX_CTRL_CMD_ID_DESTROY_RINGS);
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index afdf8ce3b4c5..ead616c11705 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -42,7 +42,9 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_5_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_8_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_8_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
@@ -71,20 +73,13 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_4_ta.bin");
/* Retry times for vmbx ready wait */
#define PSP_VMBX_POLLING_LIMIT 3000
-/* VBIOS gfl defines */
-#define MBOX_READY_MASK 0x80000000
-#define MBOX_STATUS_MASK 0x0000FFFF
-#define MBOX_COMMAND_MASK 0x00FF0000
-#define MBOX_READY_FLAG 0x80000000
-#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
-#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
-#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
-
/* memory training timeout define */
#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
#define regMP1_PUB_SCRATCH0 0x3b10090
+#define PSP13_BL_STATUS_SIZE 100
+
static int psp_v13_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -151,6 +146,32 @@ static bool psp_v13_0_is_sos_alive(struct psp_context *psp)
return sol_reg != 0x0;
}
+static void psp_v13_0_bootloader_print_status(struct psp_context *psp,
+ const char *msg)
+{
+ struct amdgpu_device *adev = psp->adev;
+ u32 bl_status_reg;
+ char bl_status_msg[PSP13_BL_STATUS_SIZE];
+ int i, at;
+
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
+ at = 0;
+ for_each_inst(i, adev->aid_mask) {
+ bl_status_reg =
+ (SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_92)
+ << 2) +
+ adev->asic_funcs->encode_ext_smn_addressing(i);
+ at += snprintf(bl_status_msg + at,
+ PSP13_BL_STATUS_SIZE - at,
+ " status(%02i): 0x%08x", i,
+ RREG32_PCIE_EXT(bl_status_reg));
+ }
+ dev_info(adev->dev, "%s - %s", msg, bl_status_msg);
+ }
+}
+
static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -196,6 +217,9 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
if (ret == 0)
return 0;
+ if (retry_loop && !(retry_loop % 10))
+ psp_v13_0_bootloader_print_status(
+ psp, "Waiting for bootloader completion");
}
return ret;
@@ -710,7 +734,8 @@ static int psp_v13_0_exec_spi_cmd(struct psp_context *psp, int cmd)
/* Ring the doorbell */
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_73, 1);
- if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE)
+ if (cmd == C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE ||
+ cmd == C2PMSG_CMD_SPI_GET_FLASH_IMAGE)
ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
else
@@ -766,6 +791,37 @@ static int psp_v13_0_update_spirom(struct psp_context *psp,
return 0;
}
+static int psp_v13_0_dump_spirom(struct psp_context *psp,
+ uint64_t fw_pri_mc_addr)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+
+ /* Confirm PSP is ready to start */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ if (ret) {
+ dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
+ return ret;
+ }
+
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_116, lower_32_bits(fw_pri_mc_addr));
+
+ ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO);
+ if (ret)
+ return ret;
+
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_116, upper_32_bits(fw_pri_mc_addr));
+
+ ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI);
+ if (ret)
+ return ret;
+
+ ret = psp_v13_0_exec_spi_cmd(psp, C2PMSG_CMD_SPI_GET_FLASH_IMAGE);
+
+ return ret;
+}
+
static int psp_v13_0_vbflash_status(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -858,6 +914,25 @@ static bool psp_v13_0_is_reload_needed(struct psp_context *psp)
return false;
}
+static int psp_v13_0_reg_program_no_ring(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret = -EOPNOTSUPP;
+
+ /* PSP will broadcast the value to all instances */
+ if (amdgpu_sriov_vf(adev)) {
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_GBR_IH_SET);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, id);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_103, val);
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ }
+
+ return ret;
+}
+
static const struct psp_funcs psp_v13_0_funcs = {
.init_microcode = psp_v13_0_init_microcode,
.wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state,
@@ -879,11 +954,13 @@ static const struct psp_funcs psp_v13_0_funcs = {
.load_usbc_pd_fw = psp_v13_0_load_usbc_pd_fw,
.read_usbc_pd_fw = psp_v13_0_read_usbc_pd_fw,
.update_spirom = psp_v13_0_update_spirom,
+ .dump_spirom = psp_v13_0_dump_spirom,
.vbflash_stat = psp_v13_0_vbflash_status,
.fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
.get_ras_capability = psp_v13_0_get_ras_capability,
.is_aux_sos_load_required = psp_v13_0_is_aux_sos_load_required,
.is_reload_needed = psp_v13_0_is_reload_needed,
+ .reg_program_no_ring = psp_v13_0_reg_program_no_ring,
};
void psp_v13_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 688a720bbbbd..cef68df4c663 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -106,8 +106,9 @@ static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
-static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
+static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring);
+static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring);
static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
u32 instance, u32 offset)
@@ -489,7 +490,7 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
{
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 doorbell_offset, doorbell;
- u32 rb_cntl, ib_cntl;
+ u32 rb_cntl, ib_cntl, sdma_cntl;
int i;
for_each_inst(i, inst_mask) {
@@ -501,6 +502,9 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
+ sdma_cntl = RREG32_SDMA(i, regSDMA_CNTL);
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, UTC_L1_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_CNTL, sdma_cntl);
if (sdma[i]->use_doorbell) {
doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
@@ -994,6 +998,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
/* set utc l1 enable flag always to 1 */
temp = RREG32_SDMA(i, regSDMA_CNTL);
temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_CNTL, temp);
if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) {
/* enable context empty interrupt during initialization */
@@ -1333,6 +1338,11 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
}
}
+static const struct amdgpu_sdma_funcs sdma_v4_4_2_sdma_funcs = {
+ .stop_kernel_queue = &sdma_v4_4_2_stop_queue,
+ .start_kernel_queue = &sdma_v4_4_2_restore_queue,
+};
+
static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -1351,8 +1361,6 @@ static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
sdma_v4_4_2_set_vm_pte_funcs(adev);
sdma_v4_4_2_set_irq_funcs(adev);
sdma_v4_4_2_set_ras_funcs(adev);
- sdma_v4_4_2_set_engine_reset_funcs(adev);
-
return 0;
}
@@ -1447,6 +1455,7 @@ static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
/* Initialize guilty flags for GFX and PAGE queues */
adev->sdma.instance[i].gfx_guilty = false;
adev->sdma.instance[i].page_guilty = false;
+ adev->sdma.instance[i].funcs = &sdma_v4_4_2_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@@ -1665,7 +1674,7 @@ static bool sdma_v4_4_2_page_ring_is_guilty(struct amdgpu_ring *ring)
static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- u32 id = GET_INST(SDMA0, ring->me);
+ u32 id = ring->me;
int r;
if (!(adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
@@ -1678,11 +1687,12 @@ static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
-static int sdma_v4_4_2_stop_queue(struct amdgpu_device *adev, uint32_t instance_id)
+static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+ u32 instance_id = ring->me;
u32 inst_mask;
uint64_t rptr;
- struct amdgpu_ring *ring = &adev->sdma.instance[instance_id].ring;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -1715,11 +1725,11 @@ static int sdma_v4_4_2_stop_queue(struct amdgpu_device *adev, uint32_t instance_
return 0;
}
-static int sdma_v4_4_2_restore_queue(struct amdgpu_device *adev, uint32_t instance_id)
+static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
{
- int i;
+ struct amdgpu_device *adev = ring->adev;
u32 inst_mask;
- struct amdgpu_ring *ring = &adev->sdma.instance[instance_id].ring;
+ int i;
inst_mask = 1 << ring->me;
udelay(50);
@@ -1739,16 +1749,6 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_device *adev, uint32_t instan
return sdma_v4_4_2_inst_start(adev, inst_mask, true);
}
-static struct sdma_on_reset_funcs sdma_v4_4_2_engine_reset_funcs = {
- .pre_reset = sdma_v4_4_2_stop_queue,
- .post_reset = sdma_v4_4_2_restore_queue,
-};
-
-static void sdma_v4_4_2_set_engine_reset_funcs(struct amdgpu_device *adev)
-{
- amdgpu_sdma_register_on_reset_callbacks(adev, &sdma_v4_4_2_engine_reset_funcs);
-}
-
static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -2373,7 +2373,9 @@ static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
case IP_VERSION(9, 5, 0):
- /*TODO: enable the queue reset flag until fw supported */
+ if ((adev->gfx.mec_fw_version >= 0xf) && amdgpu_dpm_reset_sdma_is_supported(adev))
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 0dce59f4f6e2..1813c3ed0aa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -112,6 +112,8 @@ static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
+static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring);
+static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring);
static const struct soc15_reg_golden golden_settings_sdma_5[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
@@ -369,67 +371,36 @@ static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
DRM_DEBUG("Setting write pointer\n");
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- AMDGPU_MES_PRIORITY_LEVEL_NORMAL);
-
+ if (ring->use_doorbell) {
+ DRM_DEBUG("Using doorbell -- "
+ "wptr_offs == 0x%08x "
+ "lower_32_bits(ring->wptr) << 2 == 0x%08x "
+ "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ ring->wptr_offs,
+ lower_32_bits(ring->wptr << 2),
+ upper_32_bits(ring->wptr << 2));
+ /* XXX check if swapping is necessary on BE */
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
ring->wptr << 2);
- *wptr_saved = ring->wptr << 2;
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
-
- if (*is_queue_unmap)
- WDOORBELL64(aggregated_db_index,
- ring->wptr << 2);
- }
+ DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
+ ring->doorbell_index, ring->wptr << 2);
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
- if (ring->use_doorbell) {
- DRM_DEBUG("Using doorbell -- "
- "wptr_offs == 0x%08x "
- "lower_32_bits(ring->wptr) << 2 == 0x%08x "
- "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
- ring->wptr_offs,
- lower_32_bits(ring->wptr << 2),
- upper_32_bits(ring->wptr << 2));
- /* XXX check if swapping is necessary on BE */
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("Not using doorbell -- "
- "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
- "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
- ring->me,
- lower_32_bits(ring->wptr << 2),
- ring->me,
- upper_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
- ring->me, mmSDMA0_GFX_RB_WPTR),
- lower_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
- ring->me, mmSDMA0_GFX_RB_WPTR_HI),
- upper_32_bits(ring->wptr << 2));
- }
+ DRM_DEBUG("Not using doorbell -- "
+ "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
+ "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
+ ring->me,
+ lower_32_bits(ring->wptr << 2),
+ ring->me,
+ upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
+ ring->me, mmSDMA0_GFX_RB_WPTR),
+ lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
+ ring->me, mmSDMA0_GFX_RB_WPTR_HI),
+ upper_32_bits(ring->wptr << 2));
}
}
@@ -575,11 +546,9 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -588,15 +557,15 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
* sdma_v5_0_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
- *
+ * @inst_mask: mask of dma engine instances to be disabled
* Stop the gfx async dma ring buffers (NAVI10).
*/
-static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
+static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask)
{
u32 rb_cntl, ib_cntl;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for_each_inst(i, inst_mask) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
@@ -688,9 +657,11 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
+ uint32_t inst_mask;
+ inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
if (!enable) {
- sdma_v5_0_gfx_stop(adev);
+ sdma_v5_0_gfx_stop(adev, 1 << inst_mask);
sdma_v5_0_rlc_stop(adev);
}
@@ -1046,33 +1017,22 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 20);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1085,10 +1045,7 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -1100,8 +1057,7 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1124,38 +1080,24 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256,
- AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -1183,10 +1125,7 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1197,8 +1136,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1389,6 +1327,36 @@ static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
+static int sdma_v5_0_soft_reset_engine(struct amdgpu_device *adev, u32 instance_id)
+{
+ u32 grbm_soft_reset;
+ u32 tmp;
+
+ grbm_soft_reset = REG_SET_FIELD(0,
+ GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
+ 1);
+ grbm_soft_reset <<= instance_id;
+
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ return 0;
+}
+
+static const struct amdgpu_sdma_funcs sdma_v5_0_sdma_funcs = {
+ .stop_kernel_queue = &sdma_v5_0_stop_queue,
+ .start_kernel_queue = &sdma_v5_0_restore_queue,
+ .soft_reset_kernel_queue = &sdma_v5_0_soft_reset_engine,
+};
+
static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -1431,6 +1399,8 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
for (i = 0; i < adev->sdma.num_instances; i++) {
+ mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
+ adev->sdma.instance[i].funcs = &sdma_v5_0_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
@@ -1572,32 +1542,25 @@ static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block)
static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- int i, j, r;
- u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
+ u32 inst_id = ring->me;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ return amdgpu_sdma_reset_engine(adev, inst_id);
+}
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
- }
+static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring)
+{
+ u32 f32_cntl, freeze, cntl, stat1_reg;
+ struct amdgpu_device *adev = ring->adev;
+ int i, j, r = 0;
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
+ if (amdgpu_sriov_vf(adev))
return -EINVAL;
- }
+ i = ring->me;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* stop queue */
- ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
-
- rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ sdma_v5_0_gfx_stop(adev, 1 << i);
/* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
@@ -1628,30 +1591,25 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
+err0:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
- /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
- preempt = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
- preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
-
- soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
-
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
-
- udelay(50);
-
- soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 inst_id = ring->me;
+ u32 freeze;
+ int r;
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* unfreeze*/
- freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE));
freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+ WREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze);
- r = sdma_v5_0_gfx_resume_instance(adev, i, true);
-
-err0:
+ r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true);
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 2b39a03ff0c1..23f97da62808 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -113,6 +113,8 @@ static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev);
+static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring);
+static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring);
static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
{
@@ -394,11 +396,9 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if ((flags & AMDGPU_FENCE_FLAG_INT)) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -407,15 +407,15 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
* sdma_v5_2_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
- *
+ * @inst_mask: mask of dma engine instances to be disabled
* Stop the gfx async dma ring buffers.
*/
-static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
+static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask)
{
u32 rb_cntl, ib_cntl;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
+ for_each_inst(i, inst_mask) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
@@ -506,9 +506,11 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
+ uint32_t inst_mask;
+ inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
if (!enable) {
- sdma_v5_2_gfx_stop(adev);
+ sdma_v5_2_gfx_stop(adev, inst_mask);
sdma_v5_2_rlc_stop(adev);
}
@@ -761,37 +763,49 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
return 0;
}
-static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block)
+static int sdma_v5_2_soft_reset_engine(struct amdgpu_device *adev, u32 instance_id)
{
- struct amdgpu_device *adev = ip_block->adev;
u32 grbm_soft_reset;
u32 tmp;
- int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- grbm_soft_reset = REG_SET_FIELD(0,
- GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
- 1);
- grbm_soft_reset <<= i;
+ grbm_soft_reset = REG_SET_FIELD(0,
+ GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
+ 1);
+ grbm_soft_reset <<= instance_id;
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ return 0;
+}
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ sdma_v5_2_soft_reset_engine(adev, i);
udelay(50);
}
return 0;
}
+static const struct amdgpu_sdma_funcs sdma_v5_2_sdma_funcs = {
+ .stop_kernel_queue = &sdma_v5_2_stop_queue,
+ .start_kernel_queue = &sdma_v5_2_restore_queue,
+ .soft_reset_kernel_queue = &sdma_v5_2_soft_reset_engine,
+};
+
/**
* sdma_v5_2_start - setup and start the async dma engines
*
@@ -903,33 +917,22 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 20);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -942,10 +945,7 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -957,8 +957,7 @@ static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -981,37 +980,23 @@ static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -1039,10 +1024,7 @@ static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1053,8 +1035,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1337,6 +1318,8 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
+ mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
+ adev->sdma.instance[i].funcs = &sdma_v5_2_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
@@ -1472,32 +1455,25 @@ static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- int i, j, r;
- u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
+ u32 inst_id = ring->me;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ return amdgpu_sdma_reset_engine(adev, inst_id);
+}
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
- }
+static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring)
+{
+ u32 f32_cntl, freeze, cntl, stat1_reg;
+ struct amdgpu_device *adev = ring->adev;
+ int i, j, r = 0;
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
+ if (amdgpu_sriov_vf(adev))
return -EINVAL;
- }
+ i = ring->me;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* stop queue */
- ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
-
- rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ sdma_v5_2_gfx_stop(adev, 1 << i);
/*engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
@@ -1530,31 +1506,26 @@ static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
- /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
- preempt = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
- preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
-
- soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
-
-
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
-
- udelay(50);
-
- soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
+err0:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 inst_id = ring->me;
+ u32 freeze;
+ int r;
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* unfreeze and unhalt */
- freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = RREG32(sdma_v5_2_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE));
freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+ WREG32(sdma_v5_2_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze);
- r = sdma_v5_2_gfx_resume_instance(adev, i, true);
+ r = sdma_v5_2_gfx_resume_instance(adev, inst_id, true);
-err0:
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index c214c3d2149b..5a70ae17be04 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -43,6 +43,8 @@
#include "sdma_common.h"
#include "sdma_v6_0.h"
#include "v11_structs.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
@@ -376,11 +378,9 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -891,6 +891,12 @@ static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_rb_aql_cntl = regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT;
m->sdmax_rlcx_dummy_reg = regSDMA0_QUEUE0_DUMMY_REG_DEFAULT;
+ m->sdmax_rlcx_csa_addr_lo = lower_32_bits(prop->csa_addr);
+ m->sdmax_rlcx_csa_addr_hi = upper_32_bits(prop->csa_addr);
+
+ m->sdmax_rlcx_f32_dbg0 = lower_32_bits(prop->fence_address);
+ m->sdmax_rlcx_f32_dbg1 = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -917,33 +923,22 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -956,10 +951,7 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -971,8 +963,7 @@ static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -995,37 +986,23 @@ static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
@@ -1053,10 +1030,7 @@ static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1067,8 +1041,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1300,6 +1273,23 @@ static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int r;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = true;
+ break;
+ case 1:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = false;
+ break;
+ case 2:
+ adev->sdma.no_user_submission = true;
+ adev->sdma.disable_uq = false;
+ break;
+ }
+
r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r)
return r;
@@ -1329,11 +1319,19 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ /* SDMA user fence event */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
+ GFX_11_0_0__SRCID__SDMA_FENCE,
+ &adev->sdma.fence_irq);
+ if (r)
+ return r;
+
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->me = i;
+ ring->no_user_submission = adev->sdma.no_user_submission;
DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
@@ -1376,6 +1374,10 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+ /* add firmware version checks here */
+ if (0 && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
if (r)
return r;
@@ -1399,11 +1401,39 @@ static int sdma_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int sdma_v6_0_set_userq_trap_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int i, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_DMA]) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ irq_type = AMDGPU_SDMA_IRQ_INSTANCE0 + i;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->sdma.trap_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->sdma.trap_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
static int sdma_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
- return sdma_v6_0_start(adev);
+ r = sdma_v6_0_start(adev);
+ if (r)
+ return r;
+
+ return sdma_v6_0_set_userq_trap_interrupts(adev, true);
}
static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
@@ -1415,6 +1445,7 @@ static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
sdma_v6_0_ctxempty_int_enable(adev, false);
sdma_v6_0_enable(adev, false);
+ sdma_v6_0_set_userq_trap_interrupts(adev, false);
return 0;
}
@@ -1555,25 +1586,9 @@ static int sdma_v6_0_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
int instances, queue;
- uint32_t mes_queue_id = entry->src_data[0];
DRM_DEBUG("IH: SDMA trap\n");
- if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
- struct amdgpu_mes_queue *queue;
-
- mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
-
- spin_lock(&adev->mes.queue_id_lock);
- queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
- if (queue) {
- DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
- amdgpu_fence_process(queue->ring);
- }
- spin_unlock(&adev->mes.queue_id_lock);
- return 0;
- }
-
queue = entry->ring_id & 0xf;
instances = (entry->ring_id & 0xf0) >> 4;
if (instances > 1) {
@@ -1595,6 +1610,29 @@ static int sdma_v6_0_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v6_0_process_fence_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ u32 doorbell_offset = entry->src_data[0];
+
+ if (adev->enable_mes && doorbell_offset) {
+ struct amdgpu_userq_fence_driver *fence_drv = NULL;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long flags;
+
+ doorbell_offset >>= SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
+
+ xa_lock_irqsave(xa, flags);
+ fence_drv = xa_load(xa, doorbell_offset);
+ if (fence_drv)
+ amdgpu_userq_fence_driver_process(fence_drv);
+ xa_unlock_irqrestore(xa, flags);
+ }
+
+ return 0;
+}
+
static int sdma_v6_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -1731,6 +1769,10 @@ static const struct amdgpu_irq_src_funcs sdma_v6_0_trap_irq_funcs = {
.process = sdma_v6_0_process_trap_irq,
};
+static const struct amdgpu_irq_src_funcs sdma_v6_0_fence_irq_funcs = {
+ .process = sdma_v6_0_process_fence_irq,
+};
+
static const struct amdgpu_irq_src_funcs sdma_v6_0_illegal_inst_irq_funcs = {
.process = sdma_v6_0_process_illegal_inst_irq,
};
@@ -1740,6 +1782,7 @@ static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
adev->sdma.num_instances;
adev->sdma.trap_irq.funcs = &sdma_v6_0_trap_irq_funcs;
+ adev->sdma.fence_irq.funcs = &sdma_v6_0_fence_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v6_0_illegal_inst_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index b2706221df99..ad47d0bdf777 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -33,7 +33,7 @@
#include "gc/gc_12_0_0_offset.h"
#include "gc/gc_12_0_0_sh_mask.h"
#include "hdp/hdp_6_0_0_offset.h"
-#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
+#include "ivsrcid/gfx/irqsrcs_gfx_12_0_0.h"
#include "soc15_common.h"
#include "soc15.h"
@@ -42,6 +42,8 @@
#include "sdma_common.h"
#include "sdma_v7_0.h"
#include "v12_structs.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
MODULE_FIRMWARE("amdgpu/sdma_7_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin");
@@ -204,66 +206,39 @@ static uint64_t sdma_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
static void sdma_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t *wptr_saved;
- uint32_t *is_queue_unmap;
- uint64_t aggregated_db_index;
- uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
DRM_DEBUG("Setting write pointer\n");
- if (ring->is_mes_queue) {
- wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
- is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
- sizeof(uint32_t));
- aggregated_db_index =
- amdgpu_mes_get_aggregated_doorbell_index(adev,
- ring->hw_prio);
-
+ if (ring->use_doorbell) {
+ DRM_DEBUG("Using doorbell -- "
+ "wptr_offs == 0x%08x "
+ "lower_32_bits(ring->wptr) << 2 == 0x%08x "
+ "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ ring->wptr_offs,
+ lower_32_bits(ring->wptr << 2),
+ upper_32_bits(ring->wptr << 2));
+ /* XXX check if swapping is necessary on BE */
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
ring->wptr << 2);
- *wptr_saved = ring->wptr << 2;
- if (*is_queue_unmap) {
- WDOORBELL64(aggregated_db_index, ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- }
+ DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
+ ring->doorbell_index, ring->wptr << 2);
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
- if (ring->use_doorbell) {
- DRM_DEBUG("Using doorbell -- "
- "wptr_offs == 0x%08x "
- "lower_32_bits(ring->wptr) << 2 == 0x%08x "
- "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
- ring->wptr_offs,
- lower_32_bits(ring->wptr << 2),
- upper_32_bits(ring->wptr << 2));
- /* XXX check if swapping is necessary on BE */
- atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
- ring->wptr << 2);
- DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
- ring->doorbell_index, ring->wptr << 2);
- WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
- } else {
- DRM_DEBUG("Not using doorbell -- "
- "regSDMA%i_GFX_RB_WPTR == 0x%08x "
- "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
- ring->me,
- lower_32_bits(ring->wptr << 2),
- ring->me,
- upper_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
- ring->me,
- regSDMA0_QUEUE0_RB_WPTR),
- lower_32_bits(ring->wptr << 2));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
- ring->me,
- regSDMA0_QUEUE0_RB_WPTR_HI),
- upper_32_bits(ring->wptr << 2));
- }
+ DRM_DEBUG("Not using doorbell -- "
+ "regSDMA%i_GFX_RB_WPTR == 0x%08x "
+ "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
+ ring->me,
+ lower_32_bits(ring->wptr << 2),
+ ring->me,
+ upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
+ ring->me,
+ regSDMA0_QUEUE0_RB_WPTR),
+ lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
+ ring->me,
+ regSDMA0_QUEUE0_RB_WPTR_HI),
+ upper_32_bits(ring->wptr << 2));
}
}
@@ -407,11 +382,9 @@ static void sdma_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
- uint32_t ctx = ring->is_mes_queue ?
- (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP));
- amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
+ amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
}
@@ -935,6 +908,12 @@ static int sdma_v7_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_rb_aql_cntl = 0x4000; //regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT;
m->sdmax_rlcx_dummy_reg = 0xf; //regSDMA0_QUEUE0_DUMMY_REG_DEFAULT;
+ m->sdmax_rlcx_csa_addr_lo = lower_32_bits(prop->csa_addr);
+ m->sdmax_rlcx_csa_addr_hi = upper_32_bits(prop->csa_addr);
+
+ m->sdmax_rlcx_mcu_dbg0 = lower_32_bits(prop->fence_address);
+ m->sdmax_rlcx_mcu_dbg1 = upper_32_bits(prop->fence_address);
+
return 0;
}
@@ -961,33 +940,22 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
u32 tmp;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
- return r;
- }
-
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
}
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1000,10 +968,7 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
@@ -1015,8 +980,7 @@ static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1039,37 +1003,23 @@ static int sdma_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
u32 tmp = 0;
u64 gpu_addr;
- volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
- if (ring->is_mes_queue) {
- uint32_t offset = 0;
- offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
- ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
-
- offset = amdgpu_mes_ctx_get_offs(ring,
- AMDGPU_MES_CTX_PADDING_OFFS);
- gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
- cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
- *cpu_ptr = tmp;
- } else {
- r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
- gpu_addr = adev->wb.gpu_addr + (index * 4);
- adev->wb.wb[index] = cpu_to_le32(tmp);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(tmp);
- r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
- goto err0;
- }
+ r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
}
ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
@@ -1097,10 +1047,7 @@ static int sdma_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
goto err1;
}
- if (ring->is_mes_queue)
- tmp = le32_to_cpu(*cpu_ptr);
- else
- tmp = le32_to_cpu(adev->wb.wb[index]);
+ tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
@@ -1111,8 +1058,7 @@ err1:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- if (!ring->is_mes_queue)
- amdgpu_device_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1312,6 +1258,23 @@ static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int r;
+ switch (amdgpu_user_queue) {
+ case -1:
+ case 0:
+ default:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = true;
+ break;
+ case 1:
+ adev->sdma.no_user_submission = false;
+ adev->sdma.disable_uq = false;
+ break;
+ case 2:
+ adev->sdma.no_user_submission = true;
+ adev->sdma.disable_uq = false;
+ break;
+ }
+
r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r) {
DRM_ERROR("Failed to init sdma firmware!\n");
@@ -1337,16 +1300,24 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
- GFX_11_0_0__SRCID__SDMA_TRAP,
+ GFX_12_0_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
+ /* SDMA user fence event */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
+ GFX_12_0_0__SRCID__SDMA_FENCE,
+ &adev->sdma.fence_irq);
+ if (r)
+ return r;
+
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->me = i;
+ ring->no_user_submission = adev->sdma.no_user_submission;
DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
@@ -1378,6 +1349,10 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+ /* add firmware version checks here */
+ if (0 && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+
return r;
}
@@ -1400,11 +1375,39 @@ static int sdma_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int sdma_v7_0_set_userq_trap_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int i, r;
+
+ if (adev->userq_funcs[AMDGPU_HW_IP_DMA]) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ irq_type = AMDGPU_SDMA_IRQ_INSTANCE0 + i;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->sdma.trap_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->sdma.trap_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
static int sdma_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
- return sdma_v7_0_start(adev);
+ r = sdma_v7_0_start(adev);
+ if (r)
+ return r;
+
+ return sdma_v7_0_set_userq_trap_interrupts(adev, true);
}
static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
@@ -1416,6 +1419,7 @@ static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
sdma_v7_0_ctx_switch_enable(adev, false);
sdma_v7_0_enable(adev, false);
+ sdma_v7_0_set_userq_trap_interrupts(adev, false);
return 0;
}
@@ -1533,25 +1537,9 @@ static int sdma_v7_0_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
int instances, queue;
- uint32_t mes_queue_id = entry->src_data[0];
DRM_DEBUG("IH: SDMA trap\n");
- if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
- struct amdgpu_mes_queue *queue;
-
- mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
-
- spin_lock(&adev->mes.queue_id_lock);
- queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
- if (queue) {
- DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
- amdgpu_fence_process(queue->ring);
- }
- spin_unlock(&adev->mes.queue_id_lock);
- return 0;
- }
-
queue = entry->ring_id & 0xf;
instances = (entry->ring_id & 0xf0) >> 4;
if (instances > 1) {
@@ -1573,6 +1561,29 @@ static int sdma_v7_0_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v7_0_process_fence_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ u32 doorbell_offset = entry->src_data[0];
+
+ if (adev->enable_mes && doorbell_offset) {
+ struct amdgpu_userq_fence_driver *fence_drv = NULL;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long flags;
+
+ doorbell_offset >>= SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
+
+ xa_lock_irqsave(xa, flags);
+ fence_drv = xa_load(xa, doorbell_offset);
+ if (fence_drv)
+ amdgpu_userq_fence_driver_process(fence_drv);
+ xa_unlock_irqrestore(xa, flags);
+ }
+
+ return 0;
+}
+
static int sdma_v7_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -1710,6 +1721,10 @@ static const struct amdgpu_irq_src_funcs sdma_v7_0_trap_irq_funcs = {
.process = sdma_v7_0_process_trap_irq,
};
+static const struct amdgpu_irq_src_funcs sdma_v7_0_fence_irq_funcs = {
+ .process = sdma_v7_0_process_fence_irq,
+};
+
static const struct amdgpu_irq_src_funcs sdma_v7_0_illegal_inst_irq_funcs = {
.process = sdma_v7_0_process_illegal_inst_irq,
};
@@ -1719,6 +1734,7 @@ static void sdma_v7_0_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
adev->sdma.num_instances;
adev->sdma.trap_irq.funcs = &sdma_v7_0_trap_irq_funcs;
+ adev->sdma.fence_irq.funcs = &sdma_v7_0_fence_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v7_0_illegal_inst_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 2247f6a94858..e0f139de7991 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -35,6 +35,7 @@
#include "amdgpu_vce.h"
#include "atom.h"
#include "amd_pcie.h"
+
#include "si_dpm.h"
#include "sid.h"
#include "si_ih.h"
@@ -44,17 +45,30 @@
#include "dce_v6_0.h"
#include "si.h"
#include "uvd_v3_1.h"
-#include "amdgpu_vkms.h"
+
+#include "uvd/uvd_4_0_d.h"
+
+#include "smu/smu_6_0_d.h"
+#include "smu/smu_6_0_sh_mask.h"
+
#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
+
#include "gmc/gmc_6_0_d.h"
+#include"gmc/gmc_6_0_sh_mask.h"
+
#include "dce/dce_6_0_d.h"
-#include "uvd/uvd_4_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+
#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"
+#include "si_enums.h"
#include "amdgpu_dm.h"
+#include "amdgpu_vkms.h"
static const u32 tahiti_golden_registers[] =
{
@@ -1071,8 +1085,8 @@ static u32 si_smc_rreg(struct amdgpu_device *adev, u32 reg)
u32 r;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, (reg));
- r = RREG32(SMC_IND_DATA_0);
+ WREG32(mmSMC_IND_INDEX_0, (reg));
+ r = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return r;
}
@@ -1082,8 +1096,8 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, (reg));
- WREG32(SMC_IND_DATA_0, (v));
+ WREG32(mmSMC_IND_INDEX_0, (reg));
+ WREG32(mmSMC_IND_DATA_0, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
@@ -1110,20 +1124,20 @@ static void si_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
}
static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
- {GRBM_STATUS},
+ {mmGRBM_STATUS},
{mmGRBM_STATUS2},
{mmGRBM_STATUS_SE0},
{mmGRBM_STATUS_SE1},
{mmSRBM_STATUS},
{mmSRBM_STATUS2},
- {DMA_STATUS_REG + DMA0_REGISTER_OFFSET},
- {DMA_STATUS_REG + DMA1_REGISTER_OFFSET},
+ {mmDMA_STATUS_REG + DMA0_REGISTER_OFFSET},
+ {mmDMA_STATUS_REG + DMA1_REGISTER_OFFSET},
{mmCP_STAT},
{mmCP_STALLED_STAT1},
{mmCP_STALLED_STAT2},
{mmCP_STALLED_STAT3},
- {GB_ADDR_CONFIG},
- {MC_ARB_RAMCFG},
+ {mmGB_ADDR_CONFIG},
+ {mmMC_ARB_RAMCFG},
{mmGB_TILE_MODE0},
{mmGB_TILE_MODE1},
{mmGB_TILE_MODE2},
@@ -1156,7 +1170,7 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
{mmGB_TILE_MODE29},
{mmGB_TILE_MODE30},
{mmGB_TILE_MODE31},
- {CC_RB_BACKEND_DISABLE, true},
+ {mmCC_RB_BACKEND_DISABLE, true},
{mmGC_USER_RB_BACKEND_DISABLE, true},
{mmPA_SC_RASTER_CONFIG, true},
};
@@ -1264,37 +1278,37 @@ static bool si_read_disabled_bios(struct amdgpu_device *adev)
u32 rom_cntl;
bool r;
- bus_cntl = RREG32(R600_BUS_CNTL);
+ bus_cntl = RREG32(mmBUS_CNTL);
if (adev->mode_info.num_crtc) {
- d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
- d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
- vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ d1vga_control = RREG32(mmD1VGA_CONTROL);
+ d2vga_control = RREG32(mmD2VGA_CONTROL);
+ vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
}
rom_cntl = RREG32(R600_ROM_CNTL);
/* enable the rom */
- WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+ WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
if (adev->mode_info.num_crtc) {
/* Disable VGA mode */
- WREG32(AVIVO_D1VGA_CONTROL,
- (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
- AVIVO_DVGA_CONTROL_TIMING_SELECT)));
- WREG32(AVIVO_D2VGA_CONTROL,
- (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
- AVIVO_DVGA_CONTROL_TIMING_SELECT)));
- WREG32(VGA_RENDER_CONTROL,
- (vga_render_control & C_000300_VGA_VSTATUS_CNTL));
+ WREG32(mmD1VGA_CONTROL,
+ (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
+ D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
+ WREG32(mmD2VGA_CONTROL,
+ (d2vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
+ D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
+ WREG32(mmVGA_RENDER_CONTROL,
+ (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
}
WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
r = amdgpu_read_bios(adev);
/* restore regs */
- WREG32(R600_BUS_CNTL, bus_cntl);
+ WREG32(mmBUS_CNTL, bus_cntl);
if (adev->mode_info.num_crtc) {
- WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
- WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
- WREG32(VGA_RENDER_CONTROL, vga_render_control);
+ WREG32(mmD1VGA_CONTROL, d1vga_control);
+ WREG32(mmD2VGA_CONTROL, d2vga_control);
+ WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
}
WREG32(R600_ROM_CNTL, rom_cntl);
return r;
@@ -1331,23 +1345,24 @@ static void si_set_clk_bypass_mode(struct amdgpu_device *adev)
{
u32 tmp, i;
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_BYPASS_EN;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL);
+ tmp |= CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL, tmp);
- tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
- tmp |= SPLL_CTLREQ_CHG;
- WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL_2);
+ tmp |= CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL_2, tmp);
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
+ if (RREG32(mmCG_SPLL_STATUS) & CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK)
break;
udelay(1);
}
- tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
- tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
- WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL_2);
+ tmp &= ~(CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK |
+ CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK);
+ WREG32(mmCG_SPLL_FUNC_CNTL_2, tmp);
tmp = RREG32(MPLL_CNTL_MODE);
tmp &= ~MPLL_MCLK_SEL;
@@ -1358,21 +1373,21 @@ static void si_spll_powerdown(struct amdgpu_device *adev)
{
u32 tmp;
- tmp = RREG32(SPLL_CNTL_MODE);
- tmp |= SPLL_SW_DIR_CONTROL;
- WREG32(SPLL_CNTL_MODE, tmp);
+ tmp = RREG32(mmSPLL_CNTL_MODE);
+ tmp |= SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK;
+ WREG32(mmSPLL_CNTL_MODE, tmp);
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_RESET;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL);
+ tmp |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL, tmp);
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_SLEEP;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
+ tmp = RREG32(mmCG_SPLL_FUNC_CNTL);
+ tmp |= CG_SPLL_FUNC_CNTL__SPLL_SLEEP_MASK;
+ WREG32(mmCG_SPLL_FUNC_CNTL, tmp);
- tmp = RREG32(SPLL_CNTL_MODE);
- tmp &= ~SPLL_SW_DIR_CONTROL;
- WREG32(SPLL_CNTL_MODE, tmp);
+ tmp = RREG32(mmSPLL_CNTL_MODE);
+ tmp &= ~SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK;
+ WREG32(mmSPLL_CNTL_MODE, tmp);
}
static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
@@ -1454,14 +1469,14 @@ static void si_vga_set_state(struct amdgpu_device *adev, bool state)
{
uint32_t temp;
- temp = RREG32(CONFIG_CNTL);
+ temp = RREG32(mmCONFIG_CNTL);
if (!state) {
temp &= ~(1<<0);
temp |= (1<<1);
} else {
temp &= ~(1<<1);
}
- WREG32(CONFIG_CNTL, temp);
+ WREG32(mmCONFIG_CNTL, temp);
}
static u32 si_get_xclk(struct amdgpu_device *adev)
@@ -1469,12 +1484,12 @@ static u32 si_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
u32 tmp;
- tmp = RREG32(CG_CLKPIN_CNTL_2);
- if (tmp & MUX_TCLK_TO_XCLK)
+ tmp = RREG32(mmCG_CLKPIN_CNTL_2);
+ if (tmp & CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK)
return TCLK;
- tmp = RREG32(CG_CLKPIN_CNTL);
- if (tmp & XTALIN_DIVIDE)
+ tmp = RREG32(mmCG_CLKPIN_CNTL);
+ if (tmp & CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK)
return reference_clock / 4;
return reference_clock;
@@ -1519,9 +1534,9 @@ static int si_get_pcie_lanes(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return 0;
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
- switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) {
+ switch ((link_width_cntl & PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT) {
case LC_LINK_WIDTH_X1:
return 1;
case LC_LINK_WIDTH_X2:
@@ -1568,13 +1583,13 @@ static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
return;
}
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- link_width_cntl &= ~LC_LINK_WIDTH_MASK;
- link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT;
- link_width_cntl |= (LC_RECONFIG_NOW |
- LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK;
+ link_width_cntl |= mask << PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT;
+ link_width_cntl |= (PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK |
+ PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_ARC_MISSING_ESCAPE_MASK);
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
static void si_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
@@ -2018,7 +2033,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
{
- return (RREG32(CC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
+ return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
@@ -2239,9 +2254,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
return;
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
- LC_CURRENT_DATA_RATE_SHIFT;
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL);
+ current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >>
+ PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
@@ -2268,17 +2283,17 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
- tmp = RREG32_PCIE(PCIE_LC_STATUS1);
- max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
- current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
+ tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >> PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT;
+ current_lw = (tmp & PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK) >> PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT;
if (current_lw < max_lw) {
- tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- if (tmp & LC_RENEGOTIATION_SUPPORT) {
- tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
- tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
- tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
+ if (tmp & PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK) {
+ tmp &= ~(PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK);
+ tmp |= (max_lw << PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT);
+ tmp |= PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK | PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, tmp);
}
}
@@ -2301,13 +2316,13 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
PCI_EXP_LNKCTL2,
&gpu_cfg2);
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp |= LC_SET_QUIESCE;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4);
+ tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp);
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp |= LC_REDO_EQ;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4);
+ tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp);
mdelay(100);
@@ -2333,16 +2348,16 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
(PCI_EXP_LNKCTL2_ENTER_COMP |
PCI_EXP_LNKCTL2_TX_MARGIN));
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp &= ~LC_SET_QUIESCE;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ tmp = RREG32_PCIE_PORT(ixPCIE_LC_CNTL4);
+ tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL4, tmp);
}
}
}
- speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
- speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ speed_cntl |= PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE_MASK | PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK;
+ speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL, speed_cntl);
tmp16 = 0;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -2354,13 +2369,13 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL2,
PCI_EXP_LNKCTL2_TLS, tmp16);
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL);
+ speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
+ WREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL, speed_cntl);
for (i = 0; i < adev->usec_timeout; i++) {
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK) == 0)
break;
udelay(1);
}
@@ -2418,121 +2433,121 @@ static void si_program_aspm(struct amdgpu_device *adev)
if (!amdgpu_device_should_use_aspm(adev))
return;
- orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
- data &= ~LC_XMIT_N_FTS_MASK;
- data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL);
+ data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
+ data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) | PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL, data);
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
- data |= LC_GO_TO_RECOVERY;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL3, data);
- orig = data = RREG32_PCIE(PCIE_P_CNTL);
- data |= P_IGNORE_EDB_ERR;
+ orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
+ data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
if (orig != data)
- WREG32_PCIE(PCIE_P_CNTL, data);
+ WREG32_PCIE(ixPCIE_P_CNTL, data);
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
- data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
- data |= LC_PMI_TO_L1_DIS;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL);
+ data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK | PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK);
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (!disable_l0s)
- data |= LC_L0S_INACTIVITY(7);
+ data |= (7 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT);
if (!disable_l1) {
- data |= LC_L1_INACTIVITY(7);
- data &= ~LC_PMI_TO_L1_DIS;
+ data |= (7 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT);
+ data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data);
if (!disable_plloff_in_l1) {
bool clk_req_support;
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
- data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
- data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_0);
+ data &= ~(PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK | PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) | (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
- data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
- data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_1);
+ data &= ~(PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK | PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) | (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_1, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
- data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
- data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_0);
+ data &= ~(PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK | PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) | (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
- data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
- data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_1);
+ data &= ~(PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK | PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) | (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_1, data);
if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
- data &= ~PLL_RAMP_UP_TIME_0_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_0);
+ data &= ~PB0_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
- data &= ~PLL_RAMP_UP_TIME_1_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_1);
+ data &= ~PB0_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_1, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_2);
- data &= ~PLL_RAMP_UP_TIME_2_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_2);
+ data &= ~PB0_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_2, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_2, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_3);
- data &= ~PLL_RAMP_UP_TIME_3_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_PWRDOWN_3);
+ data &= ~PB0_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3_MASK;
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_3, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_PWRDOWN_3, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
- data &= ~PLL_RAMP_UP_TIME_0_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_0);
+ data &= ~PB1_PIF_PWRDOWN_0__PLL_RAMP_UP_TIME_0_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_0, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
- data &= ~PLL_RAMP_UP_TIME_1_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_1);
+ data &= ~PB1_PIF_PWRDOWN_1__PLL_RAMP_UP_TIME_1_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_1, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_2);
- data &= ~PLL_RAMP_UP_TIME_2_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_2);
+ data &= ~PB1_PIF_PWRDOWN_2__PLL_RAMP_UP_TIME_2_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_2, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_2, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_3);
- data &= ~PLL_RAMP_UP_TIME_3_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_PWRDOWN_3);
+ data &= ~PB1_PIF_PWRDOWN_3__PLL_RAMP_UP_TIME_3_MASK;
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_3, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_PWRDOWN_3, data);
}
- orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- data &= ~LC_DYN_LANES_PWR_STATE_MASK;
- data |= LC_DYN_LANES_PWR_STATE(3);
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL);
+ data &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
+ data |= (3 << PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT);
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_LINK_WIDTH_CNTL, data);
- orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
- data &= ~LS2_EXIT_TIME_MASK;
+ orig = data = si_pif_phy0_rreg(adev,ixPB0_PIF_CNTL);
+ data &= ~PB0_PIF_CNTL__LS2_EXIT_TIME_MASK;
if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
- data |= LS2_EXIT_TIME(5);
+ data |= (5 << PB0_PIF_CNTL__LS2_EXIT_TIME__SHIFT);
if (orig != data)
- si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
+ si_pif_phy0_wreg(adev,ixPB0_PIF_CNTL, data);
- orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
- data &= ~LS2_EXIT_TIME_MASK;
+ orig = data = si_pif_phy1_rreg(adev,ixPB1_PIF_CNTL);
+ data &= ~PB1_PIF_CNTL__LS2_EXIT_TIME_MASK;
if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
- data |= LS2_EXIT_TIME(5);
+ data |= (5 << PB1_PIF_CNTL__LS2_EXIT_TIME__SHIFT);
if (orig != data)
- si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
+ si_pif_phy1_wreg(adev,ixPB1_PIF_CNTL, data);
if (!disable_clkreq &&
!pci_is_root_bus(adev->pdev->bus)) {
@@ -2548,64 +2563,64 @@ static void si_program_aspm(struct amdgpu_device *adev)
}
if (clk_req_support) {
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
- data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL2);
+ data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL2, data);
- orig = data = RREG32(THM_CLK_CNTL);
- data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
- data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
+ orig = data = RREG32(mmTHM_CLK_CNTL);
+ data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK);
+ data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) | (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
if (orig != data)
- WREG32(THM_CLK_CNTL, data);
+ WREG32(mmTHM_CLK_CNTL, data);
- orig = data = RREG32(MISC_CLK_CNTL);
- data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
- data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
+ orig = data = RREG32(mmMISC_CLK_CNTL);
+ data &= ~(MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK | MISC_CLK_CNTL__ZCLK_SEL_MASK);
+ data |= (1 << MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT) | (1 << MISC_CLK_CNTL__ZCLK_SEL__SHIFT);
if (orig != data)
- WREG32(MISC_CLK_CNTL, data);
+ WREG32(mmMISC_CLK_CNTL, data);
- orig = data = RREG32(CG_CLKPIN_CNTL);
- data &= ~BCLK_AS_XCLK;
+ orig = data = RREG32(mmCG_CLKPIN_CNTL);
+ data &= ~CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK;
if (orig != data)
- WREG32(CG_CLKPIN_CNTL, data);
+ WREG32(mmCG_CLKPIN_CNTL, data);
- orig = data = RREG32(CG_CLKPIN_CNTL_2);
- data &= ~FORCE_BIF_REFCLK_EN;
+ orig = data = RREG32(mmCG_CLKPIN_CNTL_2);
+ data &= ~CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK;
if (orig != data)
- WREG32(CG_CLKPIN_CNTL_2, data);
+ WREG32(mmCG_CLKPIN_CNTL_2, data);
- orig = data = RREG32(MPLL_BYPASSCLK_SEL);
- data &= ~MPLL_CLKOUT_SEL_MASK;
- data |= MPLL_CLKOUT_SEL(4);
+ orig = data = RREG32(mmMPLL_BYPASSCLK_SEL);
+ data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
+ data |= 4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT;
if (orig != data)
- WREG32(MPLL_BYPASSCLK_SEL, data);
+ WREG32(mmMPLL_BYPASSCLK_SEL, data);
- orig = data = RREG32(SPLL_CNTL_MODE);
- data &= ~SPLL_REFCLK_SEL_MASK;
+ orig = data = RREG32(mmSPLL_CNTL_MODE);
+ data &= ~SPLL_CNTL_MODE__SPLL_REFCLK_SEL_MASK;
if (orig != data)
- WREG32(SPLL_CNTL_MODE, data);
+ WREG32(mmSPLL_CNTL_MODE, data);
}
}
} else {
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data);
}
- orig = data = RREG32_PCIE(PCIE_CNTL2);
- data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
+ orig = data = RREG32_PCIE(ixPCIE_CNTL2);
+ data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | PCIE_CNTL2__MST_MEM_LS_EN_MASK | PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
if (orig != data)
- WREG32_PCIE(PCIE_CNTL2, data);
+ WREG32_PCIE(ixPCIE_CNTL2, data);
if (!disable_l0s) {
- data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
- if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
- data = RREG32_PCIE(PCIE_LC_STATUS1);
- if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
- data &= ~LC_L0S_INACTIVITY_MASK;
+ data = RREG32_PCIE_PORT(ixPCIE_LC_N_FTS_CNTL);
+ if((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) {
+ data = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ if ((data & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK) && (data & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK)) {
+ orig = data = RREG32_PCIE_PORT(ixPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ WREG32_PCIE_PORT(ixPCIE_LC_CNTL, data);
}
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index e2089c8da71b..7f18e4875287 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -27,6 +27,8 @@
#include "si.h"
#include "sid.h"
+#include "oss/oss_1_0_d.h"
+#include "oss/oss_1_0_sh_mask.h"
const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
{
DMA0_REGISTER_OFFSET,
@@ -38,17 +40,31 @@ static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
+/**
+ * si_dma_ring_get_rptr - get the current read pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current rptr from the hardware (SI).
+ */
static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
{
return *ring->rptr_cpu_addr;
}
+/**
+ * si_dma_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware (SI).
+ */
static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+ return (RREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
}
static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
@@ -56,7 +72,7 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
+ WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
}
static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
@@ -117,9 +133,9 @@ static void si_dma_stop(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
/* dma0 */
- rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
- rb_cntl &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+ rb_cntl = RREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i]);
+ rb_cntl &= ~DMA_GFX_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
}
}
@@ -133,44 +149,44 @@ static int si_dma_start(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
- WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
+ WREG32(mmDMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
+ WREG32(mmDMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = rb_bufsz << 1;
#ifdef __BIG_ENDIAN
- rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+ rb_cntl |= DMA_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK | DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
#endif
- WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+ WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
/* Initialize the ring buffer's read and write pointers */
- WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
- WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmDMA_GFX_RB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[i], 0);
rptr_addr = ring->rptr_gpu_addr;
- WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
- WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
+ WREG32(mmDMA_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
+ WREG32(mmDMA_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
- rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+ rb_cntl |= DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
- WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
+ WREG32(mmDMA_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
/* enable DMA IBs */
- ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+ ib_cntl = DMA_GFX_IB_CNTL__IB_ENABLE_MASK | DMA_GFX_IB_CNTL__CMD_VMID_FORCE_MASK;
#ifdef __BIG_ENDIAN
- ib_cntl |= DMA_IB_SWAP_ENABLE;
+ ib_cntl |= DMA_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK;
#endif
- WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
+ WREG32(mmDMA_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
- dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
- dma_cntl &= ~CTXEMPTY_INT_ENABLE;
- WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
+ dma_cntl = RREG32(mmDMA_CNTL + sdma_offsets[i]);
+ dma_cntl &= ~DMA_CNTL__CTXEMPTY_INT_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + sdma_offsets[i], dma_cntl);
ring->wptr = 0;
- WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
- WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
+ WREG32(mmDMA_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+ WREG32(mmDMA_GFX_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_GFX_RB_CNTL__RB_ENABLE_MASK);
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -461,7 +477,7 @@ static int si_dma_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- adev->sdma.num_instances = 2;
+ adev->sdma.num_instances = SDMA_MAX_INSTANCE;
si_dma_set_ring_funcs(adev);
si_dma_set_buffer_funcs(adev);
@@ -545,9 +561,9 @@ static bool si_dma_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- u32 tmp = RREG32(SRBM_STATUS2);
+ u32 tmp = RREG32(mmSRBM_STATUS2);
- if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
+ if (tmp & (SRBM_STATUS2__DMA_BUSY_MASK | SRBM_STATUS2__DMA1_BUSY_MASK))
return false;
return true;
@@ -583,14 +599,14 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
- sdma_cntl &= ~TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET);
+ sdma_cntl &= ~DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
- sdma_cntl |= TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET);
+ sdma_cntl |= DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
@@ -599,14 +615,14 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
- sdma_cntl &= ~TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET);
+ sdma_cntl &= ~DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
- sdma_cntl |= TRAP_ENABLE;
- WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+ sdma_cntl = RREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET);
+ sdma_cntl |= DMA_CNTL__TRAP_ENABLE_MASK;
+ WREG32(mmDMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
@@ -645,11 +661,11 @@ static int si_dma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
offset = DMA0_REGISTER_OFFSET;
else
offset = DMA1_REGISTER_OFFSET;
- orig = data = RREG32(DMA_POWER_CNTL + offset);
- data &= ~MEM_POWER_OVERRIDE;
+ orig = data = RREG32(mmDMA_POWER_CNTL + offset);
+ data &= ~DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (data != orig)
- WREG32(DMA_POWER_CNTL + offset, data);
- WREG32(DMA_CLK_CTRL + offset, 0x00000100);
+ WREG32(mmDMA_POWER_CNTL + offset, data);
+ WREG32(mmDMA_CLK_CTRL + offset, 0x00000100);
}
} else {
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -657,15 +673,15 @@ static int si_dma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
offset = DMA0_REGISTER_OFFSET;
else
offset = DMA1_REGISTER_OFFSET;
- orig = data = RREG32(DMA_POWER_CNTL + offset);
- data |= MEM_POWER_OVERRIDE;
+ orig = data = RREG32(mmDMA_POWER_CNTL + offset);
+ data |= DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (data != orig)
- WREG32(DMA_POWER_CNTL + offset, data);
+ WREG32(mmDMA_POWER_CNTL + offset, data);
- orig = data = RREG32(DMA_CLK_CTRL + offset);
+ orig = data = RREG32(mmDMA_CLK_CTRL + offset);
data = 0xff000000;
if (data != orig)
- WREG32(DMA_CLK_CTRL + offset, data);
+ WREG32(mmDMA_CLK_CTRL + offset, data);
}
}
@@ -679,11 +695,11 @@ static int si_dma_set_powergating_state(struct amdgpu_ip_block *ip_block,
struct amdgpu_device *adev = ip_block->adev;
- WREG32(DMA_PGFSM_WRITE, 0x00002000);
- WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
+ WREG32(mmDMA_PGFSM_WRITE, 0x00002000);
+ WREG32(mmDMA_PGFSM_CONFIG, 0x100010ff);
for (tmp = 0; tmp < 5; tmp++)
- WREG32(DMA_PGFSM_WRITE, 0);
+ WREG32(mmDMA_PGFSM_WRITE, 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h
index d656ef1fa6e1..6da65778292b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_enums.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h
@@ -23,115 +23,15 @@
#ifndef SI_ENUMS_H
#define SI_ENUMS_H
-#define VBLANK_INT_MASK (1 << 0)
-#define DC_HPDx_INT_EN (1 << 16)
-#define VBLANK_ACK (1 << 4)
-#define VLINE_ACK (1 << 4)
-
-#define CURSOR_WIDTH 64
-#define CURSOR_HEIGHT 64
-
-#define VGA_VSTATUS_CNTL 0xFFFCFFFF
#define PRIORITY_MARK_MASK 0x7fff
#define PRIORITY_OFF (1 << 16)
#define PRIORITY_ALWAYS_ON (1 << 20)
-#define INTERLEAVE_EN (1 << 0)
-
-#define LATENCY_WATERMARK_MASK(x) ((x) << 16)
-#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
-#define ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
-
-#define GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
-#define GRPH_ENDIAN_NONE 0
-#define GRPH_ENDIAN_8IN16 1
-#define GRPH_ENDIAN_8IN32 2
-#define GRPH_ENDIAN_8IN64 3
-#define GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
-#define GRPH_RED_SEL_R 0
-#define GRPH_RED_SEL_G 1
-#define GRPH_RED_SEL_B 2
-#define GRPH_RED_SEL_A 3
-#define GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
-#define GRPH_GREEN_SEL_G 0
-#define GRPH_GREEN_SEL_B 1
-#define GRPH_GREEN_SEL_A 2
-#define GRPH_GREEN_SEL_R 3
-#define GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
-#define GRPH_BLUE_SEL_B 0
-#define GRPH_BLUE_SEL_A 1
-#define GRPH_BLUE_SEL_R 2
-#define GRPH_BLUE_SEL_G 3
-#define GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
-#define GRPH_ALPHA_SEL_A 0
-#define GRPH_ALPHA_SEL_R 1
-#define GRPH_ALPHA_SEL_G 2
-#define GRPH_ALPHA_SEL_B 3
-
-#define GRPH_DEPTH(x) (((x) & 0x3) << 0)
-#define GRPH_DEPTH_8BPP 0
-#define GRPH_DEPTH_16BPP 1
-#define GRPH_DEPTH_32BPP 2
-
-#define GRPH_FORMAT(x) (((x) & 0x7) << 8)
-#define GRPH_FORMAT_INDEXED 0
-#define GRPH_FORMAT_ARGB1555 0
-#define GRPH_FORMAT_ARGB565 1
-#define GRPH_FORMAT_ARGB4444 2
-#define GRPH_FORMAT_AI88 3
-#define GRPH_FORMAT_MONO16 4
-#define GRPH_FORMAT_BGRA5551 5
-#define GRPH_FORMAT_ARGB8888 0
-#define GRPH_FORMAT_ARGB2101010 1
-#define GRPH_FORMAT_32BPP_DIG 2
-#define GRPH_FORMAT_8B_ARGB2101010 3
-#define GRPH_FORMAT_BGRA1010102 4
-#define GRPH_FORMAT_8B_BGRA1010102 5
-#define GRPH_FORMAT_RGB111110 6
-#define GRPH_FORMAT_BGR101111 7
-
-#define GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
-#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
-#define GRPH_ARRAY_LINEAR_GENERAL 0
-#define GRPH_ARRAY_LINEAR_ALIGNED 1
-#define GRPH_ARRAY_1D_TILED_THIN1 2
-#define GRPH_ARRAY_2D_TILED_THIN1 4
-#define GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
-#define GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
-#define GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
-#define GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
-#define GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
-#define GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
-
-#define CURSOR_EN (1 << 0)
-#define CURSOR_MODE(x) (((x) & 0x3) << 8)
-#define CURSOR_MONO 0
-#define CURSOR_24_1 1
-#define CURSOR_24_8_PRE_MULT 2
-#define CURSOR_24_8_UNPRE_MULT 3
-#define CURSOR_2X_MAGNIFY (1 << 16)
-#define CURSOR_FORCE_MC_ON (1 << 20)
-#define CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
-#define CURSOR_URGENT_ALWAYS 0
-#define CURSOR_URGENT_1_8 1
-#define CURSOR_URGENT_1_4 2
-#define CURSOR_URGENT_3_8 3
-#define CURSOR_URGENT_1_2 4
-#define CURSOR_UPDATE_PENDING (1 << 0)
-#define CURSOR_UPDATE_TAKEN (1 << 1)
-#define CURSOR_UPDATE_LOCK (1 << 16)
-#define CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
-
-
-#define ES_AND_GS_AUTO 3
-#define RADEON_PACKET_TYPE3 3
-#define CE_PARTITION_BASE 3
-#define BUF_SWAP_32BIT (2 << 16)
#define GFX_POWER_STATUS (1 << 1)
#define GFX_CLOCK_STATUS (1 << 2)
#define GFX_LS_STATUS (1 << 3)
-#define RLC_BUSY_STATUS (1 << 0)
+#define RLC_BUSY_STATUS (1 << 0)
#define RLC_PUD(x) ((x) << 0)
#define RLC_PUD_MASK (0xff << 0)
#define RLC_PDD(x) ((x) << 8)
@@ -140,140 +40,8 @@
#define RLC_TTPD_MASK (0xff << 16)
#define RLC_MSD(x) ((x) << 24)
#define RLC_MSD_MASK (0xff << 24)
-#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
-#define WRITE_DATA_DST_SEL(x) ((x) << 8)
-#define EVENT_TYPE(x) ((x) << 0)
-#define EVENT_INDEX(x) ((x) << 8)
-#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
-#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
-#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
-#define GFX6_NUM_GFX_RINGS 1
-#define GFX6_NUM_COMPUTE_RINGS 2
#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
-#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
- (((op) & 0xFF) << 8) | \
- ((n) & 0x3FFF) << 16)
-#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
-#define PACKET3_NOP 0x10
-#define PACKET3_SET_BASE 0x11
-#define PACKET3_BASE_INDEX(x) ((x) << 0)
-#define PACKET3_CLEAR_STATE 0x12
-#define PACKET3_INDEX_BUFFER_SIZE 0x13
-#define PACKET3_DISPATCH_DIRECT 0x15
-#define PACKET3_DISPATCH_INDIRECT 0x16
-#define PACKET3_ALLOC_GDS 0x1B
-#define PACKET3_WRITE_GDS_RAM 0x1C
-#define PACKET3_ATOMIC_GDS 0x1D
-#define PACKET3_ATOMIC 0x1E
-#define PACKET3_OCCLUSION_QUERY 0x1F
-#define PACKET3_SET_PREDICATION 0x20
-#define PACKET3_REG_RMW 0x21
-#define PACKET3_COND_EXEC 0x22
-#define PACKET3_PRED_EXEC 0x23
-#define PACKET3_DRAW_INDIRECT 0x24
-#define PACKET3_DRAW_INDEX_INDIRECT 0x25
-#define PACKET3_INDEX_BASE 0x26
-#define PACKET3_DRAW_INDEX_2 0x27
-#define PACKET3_CONTEXT_CONTROL 0x28
-#define PACKET3_INDEX_TYPE 0x2A
-#define PACKET3_DRAW_INDIRECT_MULTI 0x2C
-#define PACKET3_DRAW_INDEX_AUTO 0x2D
-#define PACKET3_DRAW_INDEX_IMMD 0x2E
-#define PACKET3_NUM_INSTANCES 0x2F
-#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
-#define PACKET3_INDIRECT_BUFFER_CONST 0x31
-#define PACKET3_INDIRECT_BUFFER 0x3F
-#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
-#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
-#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
-#define PACKET3_WRITE_DATA 0x37
-#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
-#define PACKET3_MEM_SEMAPHORE 0x39
-#define PACKET3_MPEG_INDEX 0x3A
-#define PACKET3_COPY_DW 0x3B
-#define PACKET3_WAIT_REG_MEM 0x3C
-#define PACKET3_MEM_WRITE 0x3D
-#define PACKET3_COPY_DATA 0x40
-#define PACKET3_CP_DMA 0x41
-# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
-# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
-# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
-# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
-# define PACKET3_CP_DMA_DIS_WC (1 << 21)
-# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
-# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
-# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
-# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
-# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
-# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
-# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
-#define PACKET3_PFP_SYNC_ME 0x42
-#define PACKET3_SURFACE_SYNC 0x43
-# define PACKET3_DEST_BASE_0_ENA (1 << 0)
-# define PACKET3_DEST_BASE_1_ENA (1 << 1)
-# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
-# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
-# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
-# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
-# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
-# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
-# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
-# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
-# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
-# define PACKET3_DEST_BASE_2_ENA (1 << 19)
-# define PACKET3_DEST_BASE_3_ENA (1 << 21)
-# define PACKET3_TCL1_ACTION_ENA (1 << 22)
-# define PACKET3_TC_ACTION_ENA (1 << 23)
-# define PACKET3_CB_ACTION_ENA (1 << 25)
-# define PACKET3_DB_ACTION_ENA (1 << 26)
-# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
-# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
-#define PACKET3_ME_INITIALIZE 0x44
-#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
-#define PACKET3_COND_WRITE 0x45
-#define PACKET3_EVENT_WRITE 0x46
-#define PACKET3_EVENT_WRITE_EOP 0x47
-#define PACKET3_EVENT_WRITE_EOS 0x48
-#define PACKET3_PREAMBLE_CNTL 0x4A
-# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
-# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
-#define PACKET3_ONE_REG_WRITE 0x57
-#define PACKET3_LOAD_CONFIG_REG 0x5F
-#define PACKET3_LOAD_CONTEXT_REG 0x60
-#define PACKET3_LOAD_SH_REG 0x61
-#define PACKET3_SET_CONFIG_REG 0x68
-#define PACKET3_SET_CONFIG_REG_START 0x00002000
-#define PACKET3_SET_CONFIG_REG_END 0x00002c00
-#define PACKET3_SET_CONTEXT_REG 0x69
-#define PACKET3_SET_CONTEXT_REG_START 0x000a000
-#define PACKET3_SET_CONTEXT_REG_END 0x000a400
-#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
-#define PACKET3_SET_RESOURCE_INDIRECT 0x74
-#define PACKET3_SET_SH_REG 0x76
-#define PACKET3_SET_SH_REG_START 0x00002c00
-#define PACKET3_SET_SH_REG_END 0x00003000
-#define PACKET3_SET_SH_REG_OFFSET 0x77
-#define PACKET3_ME_WRITE 0x7A
-#define PACKET3_SCRATCH_RAM_WRITE 0x7D
-#define PACKET3_SCRATCH_RAM_READ 0x7E
-#define PACKET3_CE_WRITE 0x7F
-#define PACKET3_LOAD_CONST_RAM 0x80
-#define PACKET3_WRITE_CONST_RAM 0x81
-#define PACKET3_WRITE_CONST_RAM_OFFSET 0x82
-#define PACKET3_DUMP_CONST_RAM 0x83
-#define PACKET3_INCREMENT_CE_COUNTER 0x84
-#define PACKET3_INCREMENT_DE_COUNTER 0x85
-#define PACKET3_WAIT_ON_CE_COUNTER 0x86
-#define PACKET3_WAIT_ON_DE_COUNTER 0x87
-#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
-#define PACKET3_SET_CE_DE_COUNTERS 0x89
-#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
-#define PACKET3_SWITCH_BUFFER 0x8B
-#define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
-#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
-#define PACKET3_SEM_SEL_WAIT (0x7 << 29)
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 5c38e1fb1dca..1df00f8a2406 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -27,6 +27,7 @@
#include "amdgpu_ih.h"
#include "sid.h"
#include "si_ih.h"
+
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
@@ -213,7 +214,7 @@ static int si_ih_resume(struct amdgpu_ip_block *ip_block)
static bool si_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- u32 tmp = RREG32(SRBM_STATUS);
+ u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
return false;
@@ -239,23 +240,23 @@ static int si_ih_soft_reset(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
- u32 tmp = RREG32(SRBM_STATUS);
+ u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
if (srbm_soft_reset) {
- tmp = RREG32(SRBM_SOFT_RESET);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
+ dev_info(adev->dev, "mmSRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h
index cbf232f5235b..cbd4f8951cfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/sid.h
+++ b/drivers/gpu/drm/amd/amdgpu/sid.h
@@ -24,43 +24,12 @@
#ifndef SI_H
#define SI_H
-#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2
-
-#define SI_MAX_SH_GPRS 256
-#define SI_MAX_TEMP_GPRS 16
-#define SI_MAX_SH_THREADS 256
-#define SI_MAX_SH_STACK_ENTRIES 4096
-#define SI_MAX_FRC_EOV_CNT 16384
-#define SI_MAX_BACKENDS 8
-#define SI_MAX_BACKENDS_MASK 0xFF
-#define SI_MAX_BACKENDS_PER_SE_MASK 0x0F
-#define SI_MAX_SIMDS 12
-#define SI_MAX_SIMDS_MASK 0x0FFF
-#define SI_MAX_SIMDS_PER_SE_MASK 0x00FF
-#define SI_MAX_PIPES 8
-#define SI_MAX_PIPES_MASK 0xFF
-#define SI_MAX_PIPES_PER_SIMD_MASK 0x3F
-#define SI_MAX_LDS_NUM 0xFFFF
-#define SI_MAX_TCC 16
-#define SI_MAX_TCC_MASK 0xFFFF
#define SI_MAX_CTLACKS_ASSERTION_WAIT 100
-/* SMC IND accessor regs */
-#define SMC_IND_INDEX_0 0x80
-#define SMC_IND_DATA_0 0x81
-
-#define SMC_IND_ACCESS_CNTL 0x8A
-# define AUTO_INCREMENT_IND_0 (1 << 0)
-#define SMC_MESSAGE_0 0x8B
-#define SMC_RESP_0 0x8C
-
/* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */
#define SMC_CG_IND_START 0xc0030000
#define SMC_CG_IND_END 0xc0040000
-#define CG_CGTT_LOCAL_0 0x400
-#define CG_CGTT_LOCAL_1 0x401
-
/* SMC IND registers */
#define SMC_SYSCON_RESET_CNTL 0x80000000
# define RST_REG (1 << 0)
@@ -68,9 +37,6 @@
# define CK_DISABLE (1 << 0)
# define CKEN (1 << 24)
-#define VGA_HDP_CONTROL 0xCA
-#define VGA_MEMORY_DISABLE (1 << 4)
-
#define DCCG_DISP_SLOW_SELECT_REG 0x13F
#define DCCG_DISP1_SLOW_SELECT(x) ((x) << 0)
#define DCCG_DISP1_SLOW_SELECT_MASK (7 << 0)
@@ -79,47 +45,6 @@
#define DCCG_DISP2_SLOW_SELECT_MASK (7 << 4)
#define DCCG_DISP2_SLOW_SELECT_SHIFT 4
-#define CG_SPLL_FUNC_CNTL 0x180
-#define SPLL_RESET (1 << 0)
-#define SPLL_SLEEP (1 << 1)
-#define SPLL_BYPASS_EN (1 << 3)
-#define SPLL_REF_DIV(x) ((x) << 4)
-#define SPLL_REF_DIV_MASK (0x3f << 4)
-#define SPLL_PDIV_A(x) ((x) << 20)
-#define SPLL_PDIV_A_MASK (0x7f << 20)
-#define SPLL_PDIV_A_SHIFT 20
-#define CG_SPLL_FUNC_CNTL_2 0x181
-#define SCLK_MUX_SEL(x) ((x) << 0)
-#define SCLK_MUX_SEL_MASK (0x1ff << 0)
-#define SPLL_CTLREQ_CHG (1 << 23)
-#define SCLK_MUX_UPDATE (1 << 26)
-#define CG_SPLL_FUNC_CNTL_3 0x182
-#define SPLL_FB_DIV(x) ((x) << 0)
-#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
-#define SPLL_FB_DIV_SHIFT 0
-#define SPLL_DITHEN (1 << 28)
-#define CG_SPLL_FUNC_CNTL_4 0x183
-
-#define SPLL_STATUS 0x185
-#define SPLL_CHG_STATUS (1 << 1)
-#define SPLL_CNTL_MODE 0x186
-#define SPLL_SW_DIR_CONTROL (1 << 0)
-# define SPLL_REFCLK_SEL(x) ((x) << 26)
-# define SPLL_REFCLK_SEL_MASK (3 << 26)
-
-#define CG_SPLL_SPREAD_SPECTRUM 0x188
-#define SSEN (1 << 0)
-#define CLK_S(x) ((x) << 4)
-#define CLK_S_MASK (0xfff << 4)
-#define CLK_S_SHIFT 4
-#define CG_SPLL_SPREAD_SPECTRUM_2 0x189
-#define CLK_V(x) ((x) << 0)
-#define CLK_V_MASK (0x3ffffff << 0)
-#define CLK_V_SHIFT 0
-
-#define CG_SPLL_AUTOSCALE_CNTL 0x18b
-# define AUTOSCALE_ON_SS_CLEAR (1 << 9)
-
/* discrete uvd clocks */
#define CG_UPLL_FUNC_CNTL 0x18d
# define UPLL_RESET_MASK 0x00000001
@@ -149,317 +74,13 @@
#define CG_UPLL_SPREAD_SPECTRUM 0x194
# define SSEN_MASK 0x00000001
-#define MPLL_BYPASSCLK_SEL 0x197
-# define MPLL_CLKOUT_SEL(x) ((x) << 8)
-# define MPLL_CLKOUT_SEL_MASK 0xFF00
-
-#define CG_CLKPIN_CNTL 0x198
-# define XTALIN_DIVIDE (1 << 1)
-# define BCLK_AS_XCLK (1 << 2)
-#define CG_CLKPIN_CNTL_2 0x199
-# define FORCE_BIF_REFCLK_EN (1 << 3)
-# define MUX_TCLK_TO_XCLK (1 << 8)
-
-#define THM_CLK_CNTL 0x19b
-# define CMON_CLK_SEL(x) ((x) << 0)
-# define CMON_CLK_SEL_MASK 0xFF
-# define TMON_CLK_SEL(x) ((x) << 8)
-# define TMON_CLK_SEL_MASK 0xFF00
-#define MISC_CLK_CNTL 0x19c
-# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0)
-# define DEEP_SLEEP_CLK_SEL_MASK 0xFF
-# define ZCLK_SEL(x) ((x) << 8)
-# define ZCLK_SEL_MASK 0xFF00
-
-#define CG_THERMAL_CTRL 0x1c0
-#define DPM_EVENT_SRC(x) ((x) << 0)
-#define DPM_EVENT_SRC_MASK (7 << 0)
-#define DIG_THERM_DPM(x) ((x) << 14)
-#define DIG_THERM_DPM_MASK 0x003FC000
-#define DIG_THERM_DPM_SHIFT 14
-#define CG_THERMAL_STATUS 0x1c1
-#define FDO_PWM_DUTY(x) ((x) << 9)
-#define FDO_PWM_DUTY_MASK (0xff << 9)
-#define FDO_PWM_DUTY_SHIFT 9
-#define CG_THERMAL_INT 0x1c2
-#define DIG_THERM_INTH(x) ((x) << 8)
-#define DIG_THERM_INTH_MASK 0x0000FF00
-#define DIG_THERM_INTH_SHIFT 8
-#define DIG_THERM_INTL(x) ((x) << 16)
-#define DIG_THERM_INTL_MASK 0x00FF0000
-#define DIG_THERM_INTL_SHIFT 16
-#define THERM_INT_MASK_HIGH (1 << 24)
-#define THERM_INT_MASK_LOW (1 << 25)
-
-#define CG_MULT_THERMAL_CTRL 0x1c4
-#define TEMP_SEL(x) ((x) << 20)
-#define TEMP_SEL_MASK (0xff << 20)
-#define TEMP_SEL_SHIFT 20
-#define CG_MULT_THERMAL_STATUS 0x1c5
-#define ASIC_MAX_TEMP(x) ((x) << 0)
-#define ASIC_MAX_TEMP_MASK 0x000001ff
-#define ASIC_MAX_TEMP_SHIFT 0
-#define CTF_TEMP(x) ((x) << 9)
-#define CTF_TEMP_MASK 0x0003fe00
-#define CTF_TEMP_SHIFT 9
-
-#define CG_FDO_CTRL0 0x1d5
-#define FDO_STATIC_DUTY(x) ((x) << 0)
-#define FDO_STATIC_DUTY_MASK 0x000000FF
-#define FDO_STATIC_DUTY_SHIFT 0
-#define CG_FDO_CTRL1 0x1d6
-#define FMAX_DUTY100(x) ((x) << 0)
-#define FMAX_DUTY100_MASK 0x000000FF
-#define FMAX_DUTY100_SHIFT 0
-#define CG_FDO_CTRL2 0x1d7
-#define TMIN(x) ((x) << 0)
-#define TMIN_MASK 0x000000FF
-#define TMIN_SHIFT 0
-#define FDO_PWM_MODE(x) ((x) << 11)
-#define FDO_PWM_MODE_MASK (7 << 11)
-#define FDO_PWM_MODE_SHIFT 11
-#define TACH_PWM_RESP_RATE(x) ((x) << 25)
-#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
-#define TACH_PWM_RESP_RATE_SHIFT 25
-
-#define CG_TACH_CTRL 0x1dc
-# define EDGE_PER_REV(x) ((x) << 0)
-# define EDGE_PER_REV_MASK (0x7 << 0)
-# define EDGE_PER_REV_SHIFT 0
-# define TARGET_PERIOD(x) ((x) << 3)
-# define TARGET_PERIOD_MASK 0xfffffff8
-# define TARGET_PERIOD_SHIFT 3
-#define CG_TACH_STATUS 0x1dd
-# define TACH_PERIOD(x) ((x) << 0)
-# define TACH_PERIOD_MASK 0xffffffff
-# define TACH_PERIOD_SHIFT 0
-
-#define GENERAL_PWRMGT 0x1e0
-# define GLOBAL_PWRMGT_EN (1 << 0)
-# define STATIC_PM_EN (1 << 1)
-# define THERMAL_PROTECTION_DIS (1 << 2)
-# define THERMAL_PROTECTION_TYPE (1 << 3)
-# define SW_SMIO_INDEX(x) ((x) << 6)
-# define SW_SMIO_INDEX_MASK (1 << 6)
-# define SW_SMIO_INDEX_SHIFT 6
-# define VOLT_PWRMGT_EN (1 << 10)
-# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
-#define CG_TPC 0x1e1
-#define SCLK_PWRMGT_CNTL 0x1e2
-# define SCLK_PWRMGT_OFF (1 << 0)
-# define SCLK_LOW_D1 (1 << 1)
-# define FIR_RESET (1 << 4)
-# define FIR_FORCE_TREND_SEL (1 << 5)
-# define FIR_TREND_MODE (1 << 6)
-# define DYN_GFX_CLK_OFF_EN (1 << 7)
-# define GFX_CLK_FORCE_ON (1 << 8)
-# define GFX_CLK_REQUEST_OFF (1 << 9)
-# define GFX_CLK_FORCE_OFF (1 << 10)
-# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
-# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
-# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
-# define DYN_LIGHT_SLEEP_EN (1 << 14)
-
-#define TARGET_AND_CURRENT_PROFILE_INDEX 0x1e6
-# define CURRENT_STATE_INDEX_MASK (0xf << 4)
-# define CURRENT_STATE_INDEX_SHIFT 4
-
-#define CG_FTV 0x1ef
-
-#define CG_FFCT_0 0x1f0
-# define UTC_0(x) ((x) << 0)
-# define UTC_0_MASK (0x3ff << 0)
-# define DTC_0(x) ((x) << 10)
-# define DTC_0_MASK (0x3ff << 10)
-
-#define CG_BSP 0x1ff
-# define BSP(x) ((x) << 0)
-# define BSP_MASK (0xffff << 0)
-# define BSU(x) ((x) << 16)
-# define BSU_MASK (0xf << 16)
-#define CG_AT 0x200
-# define CG_R(x) ((x) << 0)
-# define CG_R_MASK (0xffff << 0)
-# define CG_L(x) ((x) << 16)
-# define CG_L_MASK (0xffff << 16)
-
-#define CG_GIT 0x201
-# define CG_GICST(x) ((x) << 0)
-# define CG_GICST_MASK (0xffff << 0)
-# define CG_GIPOT(x) ((x) << 16)
-# define CG_GIPOT_MASK (0xffff << 16)
-
-#define CG_SSP 0x203
-# define SST(x) ((x) << 0)
-# define SST_MASK (0xffff << 0)
-# define SSTU(x) ((x) << 16)
-# define SSTU_MASK (0xf << 16)
-
-#define CG_DISPLAY_GAP_CNTL 0x20a
-# define DISP1_GAP(x) ((x) << 0)
-# define DISP1_GAP_MASK (3 << 0)
-# define DISP2_GAP(x) ((x) << 2)
-# define DISP2_GAP_MASK (3 << 2)
-# define VBI_TIMER_COUNT(x) ((x) << 4)
-# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
-# define VBI_TIMER_UNIT(x) ((x) << 20)
-# define VBI_TIMER_UNIT_MASK (7 << 20)
-# define DISP1_GAP_MCHG(x) ((x) << 24)
-# define DISP1_GAP_MCHG_MASK (3 << 24)
-# define DISP2_GAP_MCHG(x) ((x) << 26)
-# define DISP2_GAP_MCHG_MASK (3 << 26)
-
-#define CG_ULV_CONTROL 0x21e
-#define CG_ULV_PARAMETER 0x21f
-
-#define SMC_SCRATCH0 0x221
-
-#define CG_CAC_CTRL 0x22e
-# define CAC_WINDOW(x) ((x) << 0)
-# define CAC_WINDOW_MASK 0x00ffffff
-
-#define DMIF_ADDR_CONFIG 0x2F5
-
-#define DMIF_ADDR_CALC 0x300
-
-#define PIPE0_DMIF_BUFFER_CONTROL 0x0328
-# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
-# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
-
-#define SRBM_STATUS 0x394
-#define GRBM_RQ_PENDING (1 << 5)
-#define VMC_BUSY (1 << 8)
-#define MCB_BUSY (1 << 9)
-#define MCB_NON_DISPLAY_BUSY (1 << 10)
-#define MCC_BUSY (1 << 11)
-#define MCD_BUSY (1 << 12)
-#define SEM_BUSY (1 << 14)
-#define IH_BUSY (1 << 17)
-
-#define SRBM_SOFT_RESET 0x398
-#define SOFT_RESET_BIF (1 << 1)
-#define SOFT_RESET_DC (1 << 5)
-#define SOFT_RESET_DMA1 (1 << 6)
-#define SOFT_RESET_GRBM (1 << 8)
-#define SOFT_RESET_HDP (1 << 9)
-#define SOFT_RESET_IH (1 << 10)
-#define SOFT_RESET_MC (1 << 11)
-#define SOFT_RESET_ROM (1 << 14)
-#define SOFT_RESET_SEM (1 << 15)
-#define SOFT_RESET_VMC (1 << 17)
-#define SOFT_RESET_DMA (1 << 20)
-#define SOFT_RESET_TST (1 << 21)
-#define SOFT_RESET_REGBB (1 << 22)
-#define SOFT_RESET_ORB (1 << 23)
-
-#define CC_SYS_RB_BACKEND_DISABLE 0x3A0
-#define GC_USER_SYS_RB_BACKEND_DISABLE 0x3A1
-
-#define SRBM_READ_ERROR 0x3A6
-#define SRBM_INT_CNTL 0x3A8
-#define SRBM_INT_ACK 0x3AA
-
-#define SRBM_STATUS2 0x3B1
-#define DMA_BUSY (1 << 5)
-#define DMA1_BUSY (1 << 6)
-
-#define VM_L2_CNTL 0x500
-#define ENABLE_L2_CACHE (1 << 0)
-#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
-#define L2_CACHE_PTE_ENDIAN_SWAP_MODE(x) ((x) << 2)
-#define L2_CACHE_PDE_ENDIAN_SWAP_MODE(x) ((x) << 4)
-#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
-#define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10)
-#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 15)
-#define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 19)
-#define VM_L2_CNTL2 0x501
-#define INVALIDATE_ALL_L1_TLBS (1 << 0)
-#define INVALIDATE_L2_CACHE (1 << 1)
-#define INVALIDATE_CACHE_MODE(x) ((x) << 26)
-#define INVALIDATE_PTE_AND_PDE_CACHES 0
-#define INVALIDATE_ONLY_PTE_CACHES 1
-#define INVALIDATE_ONLY_PDE_CACHES 2
-#define VM_L2_CNTL3 0x502
-#define BANK_SELECT(x) ((x) << 0)
-#define L2_CACHE_UPDATE_MODE(x) ((x) << 6)
-#define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15)
-#define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20)
-#define VM_L2_STATUS 0x503
-#define L2_BUSY (1 << 0)
-#define VM_CONTEXT0_CNTL 0x504
-#define ENABLE_CONTEXT (1 << 0)
-#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
-#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
-#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
-#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
-#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
-#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
-#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
-#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
-#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
-#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
-#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
-#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
-#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
-#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
-#define VM_CONTEXT1_CNTL 0x505
-#define VM_CONTEXT0_CNTL2 0x50C
-#define VM_CONTEXT1_CNTL2 0x50D
-#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR 0x50E
-#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR 0x50F
-#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR 0x510
-#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR 0x511
-#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR 0x512
-#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR 0x513
-#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x514
-#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x515
-
-#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x53f
-#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x537
-#define PROTECTIONS_MASK (0xf << 0)
-#define PROTECTIONS_SHIFT 0
- /* bit 0: range
- * bit 1: pde0
- * bit 2: valid
- * bit 3: read
- * bit 4: write
- */
-#define MEMORY_CLIENT_ID_MASK (0xff << 12)
-#define MEMORY_CLIENT_ID_SHIFT 12
-#define MEMORY_CLIENT_RW_MASK (1 << 24)
-#define MEMORY_CLIENT_RW_SHIFT 24
-#define FAULT_VMID_MASK (0xf << 25)
-#define FAULT_VMID_SHIFT 25
-
#define VM_INVALIDATE_REQUEST 0x51E
#define VM_INVALIDATE_RESPONSE 0x51F
-#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x546
-#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x547
-
-#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x54F
-#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR 0x550
-#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR 0x551
-#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR 0x552
-#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR 0x553
-#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR 0x554
-#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR 0x555
-#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR 0x556
-#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x557
-#define VM_CONTEXT1_PAGE_TABLE_START_ADDR 0x558
-
-#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x55F
-#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x560
-
#define VM_L2_CG 0x570
#define MC_CG_ENABLE (1 << 18)
#define MC_LS_ENABLE (1 << 19)
-#define MC_SHARED_CHMAP 0x801
-#define NOOFCHAN_SHIFT 12
-#define NOOFCHAN_MASK 0x0000f000
-#define MC_SHARED_CHREMAP 0x802
-
#define MC_VM_FB_LOCATION 0x809
#define MC_VM_AGP_TOP 0x80A
#define MC_VM_AGP_BOT 0x80B
@@ -491,21 +112,6 @@
#define MC_CITF_MISC_WR_CG 0x993
#define MC_CITF_MISC_VM_CG 0x994
-#define MC_ARB_RAMCFG 0x9D8
-#define NOOFBANK_SHIFT 0
-#define NOOFBANK_MASK 0x00000003
-#define NOOFRANK_SHIFT 2
-#define NOOFRANK_MASK 0x00000004
-#define NOOFROWS_SHIFT 3
-#define NOOFROWS_MASK 0x00000038
-#define NOOFCOLS_SHIFT 6
-#define NOOFCOLS_MASK 0x000000C0
-#define CHANSIZE_SHIFT 8
-#define CHANSIZE_MASK 0x00000100
-#define CHANSIZE_OVERRIDE (1 << 11)
-#define NOOFGROUPS_SHIFT 12
-#define NOOFGROUPS_MASK 0x00001000
-
#define MC_ARB_DRAM_TIMING 0x9DD
#define MC_ARB_DRAM_TIMING2 0x9DE
@@ -631,20 +237,6 @@
#define CLKS(x) ((x) << 0)
#define CLKS_MASK (0xfff << 0)
-#define HDP_HOST_PATH_CNTL 0xB00
-#define CLOCK_GATING_DIS (1 << 23)
-#define HDP_NONSURFACE_BASE 0xB01
-#define HDP_NONSURFACE_INFO 0xB02
-#define HDP_NONSURFACE_SIZE 0xB03
-
-#define HDP_DEBUG0 0xBCC
-
-#define HDP_ADDR_CONFIG 0xBD2
-#define HDP_MISC_CNTL 0xBD3
-#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
-#define HDP_MEM_POWER_LS 0xBD4
-#define HDP_LS_ENABLE (1 << 0)
-
#define ATC_MISC_CG 0xCD4
#define IH_RB_CNTL 0xF80
@@ -674,8 +266,6 @@
# define MC_WR_CLEAN_CNT(x) ((x) << 20)
# define MC_VMID(x) ((x) << 25)
-#define CONFIG_MEMSIZE 0x150A
-
#define INTERRUPT_CNTL 0x151A
# define IH_DUMMY_RD_OVERRIDE (1 << 0)
# define IH_DUMMY_RD_EN (1 << 1)
@@ -683,486 +273,22 @@
# define GEN_IH_INT_EN (1 << 8)
#define INTERRUPT_CNTL2 0x151B
-#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x1520
-
-#define BIF_FB_EN 0x1524
-#define FB_READ_EN (1 << 0)
-#define FB_WRITE_EN (1 << 1)
-
-#define HDP_REG_COHERENCY_FLUSH_CNTL 0x1528
-
-/* DCE6 ELD audio interface */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2B /* MP3 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2C /* MPEG2 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2D /* AAC */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2E /* DTS */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2F /* ATRAC */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30 /* one bit audio - leave at 0 (default) */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31 /* Dolby Digital */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32 /* DTS-HD */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33 /* MAT-MLP */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34 /* DTS */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35 /* WMA Pro */
-# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
-/* max channels minus one. 7 = 8 channels */
-# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
-# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
-# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
-/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
- * bit0 = 32 kHz
- * bit1 = 44.1 kHz
- * bit2 = 48 kHz
- * bit3 = 88.2 kHz
- * bit4 = 96 kHz
- * bit5 = 176.4 kHz
- * bit6 = 192 kHz
- */
-
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x37
-# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
-# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
-/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
- * 0 = invalid
- * x = legal delay value
- * 255 = sync not supported
- */
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x38
-# define HBR_CAPABLE (1 << 0) /* enabled by default */
-
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x3a
-# define MANUFACTURER_ID(x) (((x) & 0xffff) << 0)
-# define PRODUCT_ID(x) (((x) & 0xffff) << 16)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x3b
-# define SINK_DESCRIPTION_LEN(x) (((x) & 0xff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x3c
-# define PORT_ID0(x) (((x) & 0xffffffff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x3d
-# define PORT_ID1(x) (((x) & 0xffffffff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x3e
-# define DESCRIPTION0(x) (((x) & 0xff) << 0)
-# define DESCRIPTION1(x) (((x) & 0xff) << 8)
-# define DESCRIPTION2(x) (((x) & 0xff) << 16)
-# define DESCRIPTION3(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x3f
-# define DESCRIPTION4(x) (((x) & 0xff) << 0)
-# define DESCRIPTION5(x) (((x) & 0xff) << 8)
-# define DESCRIPTION6(x) (((x) & 0xff) << 16)
-# define DESCRIPTION7(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x40
-# define DESCRIPTION8(x) (((x) & 0xff) << 0)
-# define DESCRIPTION9(x) (((x) & 0xff) << 8)
-# define DESCRIPTION10(x) (((x) & 0xff) << 16)
-# define DESCRIPTION11(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x41
-# define DESCRIPTION12(x) (((x) & 0xff) << 0)
-# define DESCRIPTION13(x) (((x) & 0xff) << 8)
-# define DESCRIPTION14(x) (((x) & 0xff) << 16)
-# define DESCRIPTION15(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x42
-# define DESCRIPTION16(x) (((x) & 0xff) << 0)
-# define DESCRIPTION17(x) (((x) & 0xff) << 8)
-
-#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x54
-# define AUDIO_ENABLED (1 << 31)
-
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
-#define PORT_CONNECTIVITY_MASK (3 << 30)
-#define PORT_CONNECTIVITY_SHIFT 30
-
-#define DC_LB_MEMORY_SPLIT 0x1AC3
-#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
-
-#define PRIORITY_A_CNT 0x1AC6
-#define PRIORITY_MARK_MASK 0x7fff
-#define PRIORITY_OFF (1 << 16)
-#define PRIORITY_ALWAYS_ON (1 << 20)
-#define PRIORITY_B_CNT 0x1AC7
-
-#define DPG_PIPE_ARBITRATION_CONTROL3 0x1B32
-# define LATENCY_WATERMARK_MASK(x) ((x) << 16)
-#define DPG_PIPE_LATENCY_CONTROL 0x1B33
-# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
-# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
-
-/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
-#define VLINE_STATUS 0x1AEE
-# define VLINE_OCCURRED (1 << 0)
-# define VLINE_ACK (1 << 4)
-# define VLINE_STAT (1 << 12)
-# define VLINE_INTERRUPT (1 << 16)
-# define VLINE_INTERRUPT_TYPE (1 << 17)
-/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
-#define VBLANK_STATUS 0x1AEF
-# define VBLANK_OCCURRED (1 << 0)
-# define VBLANK_ACK (1 << 4)
-# define VBLANK_STAT (1 << 12)
-# define VBLANK_INTERRUPT (1 << 16)
-# define VBLANK_INTERRUPT_TYPE (1 << 17)
-
-/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
-#define INT_MASK 0x1AD0
-# define VBLANK_INT_MASK (1 << 0)
-# define VLINE_INT_MASK (1 << 4)
-
-#define DISP_INTERRUPT_STATUS 0x183D
-# define LB_D1_VLINE_INTERRUPT (1 << 2)
-# define LB_D1_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD1_INTERRUPT (1 << 17)
-# define DC_HPD1_RX_INTERRUPT (1 << 18)
-# define DACA_AUTODETECT_INTERRUPT (1 << 22)
-# define DACB_AUTODETECT_INTERRUPT (1 << 23)
-# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
-# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
-#define DISP_INTERRUPT_STATUS_CONTINUE 0x183E
-# define LB_D2_VLINE_INTERRUPT (1 << 2)
-# define LB_D2_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD2_INTERRUPT (1 << 17)
-# define DC_HPD2_RX_INTERRUPT (1 << 18)
-# define DISP_TIMER_INTERRUPT (1 << 24)
-#define DISP_INTERRUPT_STATUS_CONTINUE2 0x183F
-# define LB_D3_VLINE_INTERRUPT (1 << 2)
-# define LB_D3_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD3_INTERRUPT (1 << 17)
-# define DC_HPD3_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE3 0x1840
-# define LB_D4_VLINE_INTERRUPT (1 << 2)
-# define LB_D4_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD4_INTERRUPT (1 << 17)
-# define DC_HPD4_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE4 0x1853
-# define LB_D5_VLINE_INTERRUPT (1 << 2)
-# define LB_D5_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD5_INTERRUPT (1 << 17)
-# define DC_HPD5_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE5 0x1854
-# define LB_D6_VLINE_INTERRUPT (1 << 2)
-# define LB_D6_VBLANK_INTERRUPT (1 << 3)
-# define DC_HPD6_INTERRUPT (1 << 17)
-# define DC_HPD6_RX_INTERRUPT (1 << 18)
-
-/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
-#define GRPH_INT_STATUS 0x1A16
-# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
-# define GRPH_PFLIP_INT_CLEAR (1 << 8)
-/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
-#define GRPH_INT_CONTROL 0x1A17
-# define GRPH_PFLIP_INT_MASK (1 << 0)
-# define GRPH_PFLIP_INT_TYPE (1 << 8)
-
-#define DAC_AUTODETECT_INT_CONTROL 0x19F2
-
-#define DC_HPD1_INT_STATUS 0x1807
-#define DC_HPD2_INT_STATUS 0x180A
-#define DC_HPD3_INT_STATUS 0x180D
-#define DC_HPD4_INT_STATUS 0x1810
-#define DC_HPD5_INT_STATUS 0x1813
-#define DC_HPD6_INT_STATUS 0x1816
-# define DC_HPDx_INT_STATUS (1 << 0)
-# define DC_HPDx_SENSE (1 << 1)
-# define DC_HPDx_RX_INT_STATUS (1 << 8)
-
-#define DC_HPD1_INT_CONTROL 0x1808
-#define DC_HPD2_INT_CONTROL 0x180B
-#define DC_HPD3_INT_CONTROL 0x180E
-#define DC_HPD4_INT_CONTROL 0x1811
-#define DC_HPD5_INT_CONTROL 0x1814
-#define DC_HPD6_INT_CONTROL 0x1817
-# define DC_HPDx_INT_ACK (1 << 0)
-# define DC_HPDx_INT_POLARITY (1 << 8)
-# define DC_HPDx_INT_EN (1 << 16)
-# define DC_HPDx_RX_INT_ACK (1 << 20)
-# define DC_HPDx_RX_INT_EN (1 << 24)
-
-#define DC_HPD1_CONTROL 0x1809
-#define DC_HPD2_CONTROL 0x180C
-#define DC_HPD3_CONTROL 0x180F
-#define DC_HPD4_CONTROL 0x1812
-#define DC_HPD5_CONTROL 0x1815
-#define DC_HPD6_CONTROL 0x1818
-# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
-# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
-# define DC_HPDx_EN (1 << 28)
-
-#define DPG_PIPE_STUTTER_CONTROL 0x1B35
-# define STUTTER_ENABLE (1 << 0)
-
-/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
-#define CRTC_STATUS_FRAME_COUNT 0x1BA6
-
-/* Audio clocks */
-#define DCCG_AUDIO_DTO0_PHASE 0x05b0
-#define DCCG_AUDIO_DTO0_MODULE 0x05b4
-#define DCCG_AUDIO_DTO1_PHASE 0x05c0
-#define DCCG_AUDIO_DTO1_MODULE 0x05c4
-
-#define GRBM_CNTL 0x2000
-#define GRBM_READ_TIMEOUT(x) ((x) << 0)
-
-#define GRBM_STATUS2 0x2002
-#define RLC_RQ_PENDING (1 << 0)
-#define RLC_BUSY (1 << 8)
-#define TC_BUSY (1 << 9)
-
-#define GRBM_STATUS 0x2004
-#define CMDFIFO_AVAIL_MASK 0x0000000F
-#define RING2_RQ_PENDING (1 << 4)
-#define SRBM_RQ_PENDING (1 << 5)
-#define RING1_RQ_PENDING (1 << 6)
-#define CF_RQ_PENDING (1 << 7)
-#define PF_RQ_PENDING (1 << 8)
-#define GDS_DMA_RQ_PENDING (1 << 9)
-#define GRBM_EE_BUSY (1 << 10)
-#define DB_CLEAN (1 << 12)
-#define CB_CLEAN (1 << 13)
-#define TA_BUSY (1 << 14)
-#define GDS_BUSY (1 << 15)
-#define VGT_BUSY (1 << 17)
-#define IA_BUSY_NO_DMA (1 << 18)
-#define IA_BUSY (1 << 19)
-#define SX_BUSY (1 << 20)
-#define SPI_BUSY (1 << 22)
-#define BCI_BUSY (1 << 23)
-#define SC_BUSY (1 << 24)
-#define PA_BUSY (1 << 25)
-#define DB_BUSY (1 << 26)
-#define CP_COHERENCY_BUSY (1 << 28)
-#define CP_BUSY (1 << 29)
-#define CB_BUSY (1 << 30)
-#define GUI_ACTIVE (1 << 31)
-#define GRBM_STATUS_SE0 0x2005
-#define GRBM_STATUS_SE1 0x2006
-#define SE_DB_CLEAN (1 << 1)
-#define SE_CB_CLEAN (1 << 2)
-#define SE_BCI_BUSY (1 << 22)
-#define SE_VGT_BUSY (1 << 23)
-#define SE_PA_BUSY (1 << 24)
-#define SE_TA_BUSY (1 << 25)
-#define SE_SX_BUSY (1 << 26)
-#define SE_SPI_BUSY (1 << 27)
-#define SE_SC_BUSY (1 << 29)
-#define SE_DB_BUSY (1 << 30)
-#define SE_CB_BUSY (1 << 31)
-
-#define GRBM_INT_CNTL 0x2018
-# define RDERR_INT_ENABLE (1 << 0)
-# define GUI_IDLE_INT_ENABLE (1 << 19)
-
-#define CP_STRMOUT_CNTL 0x213F
-#define SCRATCH_REG0 0x2140
-#define SCRATCH_REG1 0x2141
-#define SCRATCH_REG2 0x2142
-#define SCRATCH_REG3 0x2143
-#define SCRATCH_REG4 0x2144
-#define SCRATCH_REG5 0x2145
-#define SCRATCH_REG6 0x2146
-#define SCRATCH_REG7 0x2147
-
-#define SCRATCH_UMSK 0x2150
-#define SCRATCH_ADDR 0x2151
-
-#define CP_SEM_WAIT_TIMER 0x216F
-
-#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x2172
-
-#define CP_ME_CNTL 0x21B6
-#define CP_CE_HALT (1 << 24)
-#define CP_PFP_HALT (1 << 26)
-#define CP_ME_HALT (1 << 28)
-
-#define CP_COHER_CNTL2 0x217A
-
-#define CP_RB2_RPTR 0x21BE
-#define CP_RB1_RPTR 0x21BF
-#define CP_RB0_RPTR 0x21C0
-#define CP_RB_WPTR_DELAY 0x21C1
-
-#define CP_QUEUE_THRESHOLDS 0x21D8
-#define ROQ_IB1_START(x) ((x) << 0)
-#define ROQ_IB2_START(x) ((x) << 8)
-#define CP_MEQ_THRESHOLDS 0x21D9
-#define MEQ1_START(x) ((x) << 0)
-#define MEQ2_START(x) ((x) << 8)
-
-#define CP_PERFMON_CNTL 0x21FF
-
#define VGT_VTX_VECT_EJECT_REG 0x222C
-
#define VGT_ESGS_RING_SIZE 0x2232
#define VGT_GSVS_RING_SIZE 0x2233
-
#define VGT_GS_VERTEX_REUSE 0x2235
-
#define VGT_PRIMITIVE_TYPE 0x2256
#define VGT_INDEX_TYPE 0x2257
-
#define VGT_NUM_INDICES 0x225C
#define VGT_NUM_INSTANCES 0x225D
-
#define VGT_TF_RING_SIZE 0x2262
-
#define VGT_HS_OFFCHIP_PARAM 0x226C
-
#define VGT_TF_MEMORY_BASE 0x226E
-#define PA_CL_ENHANCE 0x2285
-#define CLIP_VTX_REORDER_ENA (1 << 0)
-#define NUM_CLIP_SEQ(x) ((x) << 1)
-
-#define PA_SU_LINE_STIPPLE_VALUE 0x2298
-
-#define PA_SC_LINE_STIPPLE_STATE 0x22C4
-
-#define PA_SC_FORCE_EOV_MAX_CNTS 0x22C9
-#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
-#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
-
-#define PA_SC_FIFO_SIZE 0x22F3
-#define SC_FRONTEND_PRIM_FIFO_SIZE(x) ((x) << 0)
-#define SC_BACKEND_PRIM_FIFO_SIZE(x) ((x) << 6)
-#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 15)
-#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 23)
-
#define PA_SC_ENHANCE 0x22FC
-#define SQ_CONFIG 0x2300
-
-#define SQC_CACHES 0x2302
-
-#define SQ_POWER_THROTTLE 0x2396
-#define MIN_POWER(x) ((x) << 0)
-#define MIN_POWER_MASK (0x3fff << 0)
-#define MIN_POWER_SHIFT 0
-#define MAX_POWER(x) ((x) << 16)
-#define MAX_POWER_MASK (0x3fff << 16)
-#define MAX_POWER_SHIFT 0
-#define SQ_POWER_THROTTLE2 0x2397
-#define MAX_POWER_DELTA(x) ((x) << 0)
-#define MAX_POWER_DELTA_MASK (0x3fff << 0)
-#define MAX_POWER_DELTA_SHIFT 0
-#define STI_SIZE(x) ((x) << 16)
-#define STI_SIZE_MASK (0x3ff << 16)
-#define STI_SIZE_SHIFT 16
-#define LTI_RATIO(x) ((x) << 27)
-#define LTI_RATIO_MASK (0xf << 27)
-#define LTI_RATIO_SHIFT 27
-
-#define SX_DEBUG_1 0x2418
-
-#define SPI_STATIC_THREAD_MGMT_1 0x2438
-#define SPI_STATIC_THREAD_MGMT_2 0x2439
-#define SPI_STATIC_THREAD_MGMT_3 0x243A
-#define SPI_PS_MAX_WAVE_ID 0x243B
-
-#define SPI_CONFIG_CNTL 0x2440
-
-#define SPI_CONFIG_CNTL_1 0x244F
-#define VTX_DONE_DELAY(x) ((x) << 0)
-#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
-
-#define CGTS_TCC_DISABLE 0x2452
-#define CGTS_USER_TCC_DISABLE 0x2453
-#define TCC_DISABLE_MASK 0xFFFF0000
-#define TCC_DISABLE_SHIFT 16
-#define CGTS_SM_CTRL_REG 0x2454
-#define OVERRIDE (1 << 21)
-#define LS_OVERRIDE (1 << 22)
-
-#define SPI_LB_CU_MASK 0x24D5
-
#define TA_CNTL_AUX 0x2542
-#define CC_RB_BACKEND_DISABLE 0x263D
-#define BACKEND_DISABLE(x) ((x) << 16)
-#define GB_ADDR_CONFIG 0x263E
-#define NUM_PIPES(x) ((x) << 0)
-#define NUM_PIPES_MASK 0x00000007
-#define NUM_PIPES_SHIFT 0
-#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
-#define PIPE_INTERLEAVE_SIZE_MASK 0x00000070
-#define PIPE_INTERLEAVE_SIZE_SHIFT 4
-#define NUM_SHADER_ENGINES(x) ((x) << 12)
-#define NUM_SHADER_ENGINES_MASK 0x00003000
-#define NUM_SHADER_ENGINES_SHIFT 12
-#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
-#define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000
-#define SHADER_ENGINE_TILE_SIZE_SHIFT 16
-#define NUM_GPUS(x) ((x) << 20)
-#define NUM_GPUS_MASK 0x00700000
-#define NUM_GPUS_SHIFT 20
-#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
-#define MULTI_GPU_TILE_SIZE_MASK 0x03000000
-#define MULTI_GPU_TILE_SIZE_SHIFT 24
-#define ROW_SIZE(x) ((x) << 28)
-#define ROW_SIZE_MASK 0x30000000
-#define ROW_SIZE_SHIFT 28
-
-#define CB_PERFCOUNTER0_SELECT0 0x2688
-#define CB_PERFCOUNTER0_SELECT1 0x2689
-#define CB_PERFCOUNTER1_SELECT0 0x268A
-#define CB_PERFCOUNTER1_SELECT1 0x268B
-#define CB_PERFCOUNTER2_SELECT0 0x268C
-#define CB_PERFCOUNTER2_SELECT1 0x268D
-#define CB_PERFCOUNTER3_SELECT0 0x268E
-#define CB_PERFCOUNTER3_SELECT1 0x268F
-
-#define CB_CGTT_SCLK_CTRL 0x2698
-
-#define TCP_CHAN_STEER_LO 0x2B03
-#define TCP_CHAN_STEER_HI 0x2B94
-
-#define CP_RB0_BASE 0x3040
-#define CP_RB0_CNTL 0x3041
-#define RB_BUFSZ(x) ((x) << 0)
-#define RB_BLKSZ(x) ((x) << 8)
-#define BUF_SWAP_32BIT (2 << 16)
-#define RB_NO_UPDATE (1 << 27)
-#define RB_RPTR_WR_ENA (1 << 31)
-
-#define CP_RB0_RPTR_ADDR 0x3043
-#define CP_RB0_RPTR_ADDR_HI 0x3044
-#define CP_RB0_WPTR 0x3045
-
-#define CP_PFP_UCODE_ADDR 0x3054
-#define CP_PFP_UCODE_DATA 0x3055
-#define CP_ME_RAM_RADDR 0x3056
-#define CP_ME_RAM_WADDR 0x3057
-#define CP_ME_RAM_DATA 0x3058
-
-#define CP_CE_UCODE_ADDR 0x305A
-#define CP_CE_UCODE_DATA 0x305B
-
-#define CP_RB1_BASE 0x3060
-#define CP_RB1_CNTL 0x3061
-#define CP_RB1_RPTR_ADDR 0x3062
-#define CP_RB1_RPTR_ADDR_HI 0x3063
-#define CP_RB1_WPTR 0x3064
-#define CP_RB2_BASE 0x3065
-#define CP_RB2_CNTL 0x3066
-#define CP_RB2_RPTR_ADDR 0x3067
-#define CP_RB2_RPTR_ADDR_HI 0x3068
-#define CP_RB2_WPTR 0x3069
-#define CP_INT_CNTL_RING0 0x306A
-#define CP_INT_CNTL_RING1 0x306B
-#define CP_INT_CNTL_RING2 0x306C
-# define CNTX_BUSY_INT_ENABLE (1 << 19)
-# define CNTX_EMPTY_INT_ENABLE (1 << 20)
-# define WAIT_MEM_SEM_INT_ENABLE (1 << 21)
-# define TIME_STAMP_INT_ENABLE (1 << 26)
-# define CP_RINGID2_INT_ENABLE (1 << 29)
-# define CP_RINGID1_INT_ENABLE (1 << 30)
-# define CP_RINGID0_INT_ENABLE (1 << 31)
-#define CP_INT_STATUS_RING0 0x306D
-#define CP_INT_STATUS_RING1 0x306E
-#define CP_INT_STATUS_RING2 0x306F
-# define WAIT_MEM_SEM_INT_STAT (1 << 21)
-# define TIME_STAMP_INT_STAT (1 << 26)
-# define CP_RINGID2_INT_STAT (1 << 29)
-# define CP_RINGID1_INT_STAT (1 << 30)
-# define CP_RINGID0_INT_STAT (1 << 31)
-
// #define PA_SC_RASTER_CONFIG 0xA0D4
# define RB_XSEL2(x) ((x) << 4)
# define RB_XSEL2_MASK (0x3 << 4)
@@ -1185,171 +311,14 @@
# define SE_YSEL(x) ((x) << 28)
# define SE_YSEL_MASK (0x3 << 28)
-/* PIF PHY0 registers idx/data 0x8/0xc */
-#define PB0_PIF_CNTL 0x10
-# define LS2_EXIT_TIME(x) ((x) << 17)
-# define LS2_EXIT_TIME_MASK (0x7 << 17)
-# define LS2_EXIT_TIME_SHIFT 17
-#define PB0_PIF_PAIRING 0x11
-# define MULTI_PIF (1 << 25)
-#define PB0_PIF_PWRDOWN_0 0x12
-# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
-# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_0_SHIFT 24
-#define PB0_PIF_PWRDOWN_1 0x13
-# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
-# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_1_SHIFT 24
-
-#define PB0_PIF_PWRDOWN_2 0x17
-# define PLL_POWER_STATE_IN_TXS2_2(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_2_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_2_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_2(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_2_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_2_SHIFT 10
-# define PLL_RAMP_UP_TIME_2(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_2_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_2_SHIFT 24
-#define PB0_PIF_PWRDOWN_3 0x18
-# define PLL_POWER_STATE_IN_TXS2_3(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_3_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_3_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_3(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_3_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_3_SHIFT 10
-# define PLL_RAMP_UP_TIME_3(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_3_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_3_SHIFT 24
-/* PIF PHY1 registers idx/data 0x10/0x14 */
-#define PB1_PIF_CNTL 0x10
-#define PB1_PIF_PAIRING 0x11
-#define PB1_PIF_PWRDOWN_0 0x12
-#define PB1_PIF_PWRDOWN_1 0x13
-
-#define PB1_PIF_PWRDOWN_2 0x17
-#define PB1_PIF_PWRDOWN_3 0x18
-/* PCIE registers idx/data 0x30/0x34 */
-#define PCIE_CNTL2 0x1c /* PCIE */
-# define SLV_MEM_LS_EN (1 << 16)
-# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
-# define MST_MEM_LS_EN (1 << 18)
-# define REPLAY_MEM_LS_EN (1 << 19)
-#define PCIE_LC_STATUS1 0x28 /* PCIE */
-# define LC_REVERSE_RCVR (1 << 0)
-# define LC_REVERSE_XMIT (1 << 1)
-# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2)
-# define LC_OPERATING_LINK_WIDTH_SHIFT 2
-# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5)
-# define LC_DETECTED_LINK_WIDTH_SHIFT 5
-
-#define PCIE_P_CNTL 0x40 /* PCIE */
-# define P_IGNORE_EDB_ERR (1 << 6)
-
/* PCIE PORT registers idx/data 0x38/0x3c */
-#define PCIE_LC_CNTL 0xa0
-# define LC_L0S_INACTIVITY(x) ((x) << 8)
-# define LC_L0S_INACTIVITY_MASK (0xf << 8)
-# define LC_L0S_INACTIVITY_SHIFT 8
-# define LC_L1_INACTIVITY(x) ((x) << 12)
-# define LC_L1_INACTIVITY_MASK (0xf << 12)
-# define LC_L1_INACTIVITY_SHIFT 12
-# define LC_PMI_TO_L1_DIS (1 << 16)
-# define LC_ASPM_TO_L1_DIS (1 << 24)
-#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
-# define LC_LINK_WIDTH_SHIFT 0
-# define LC_LINK_WIDTH_MASK 0x7
+// #define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
# define LC_LINK_WIDTH_X0 0
# define LC_LINK_WIDTH_X1 1
# define LC_LINK_WIDTH_X2 2
# define LC_LINK_WIDTH_X4 3
# define LC_LINK_WIDTH_X8 4
# define LC_LINK_WIDTH_X16 6
-# define LC_LINK_WIDTH_RD_SHIFT 4
-# define LC_LINK_WIDTH_RD_MASK 0x70
-# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
-# define LC_RECONFIG_NOW (1 << 8)
-# define LC_RENEGOTIATION_SUPPORT (1 << 9)
-# define LC_RENEGOTIATE_EN (1 << 10)
-# define LC_SHORT_RECONFIG_EN (1 << 11)
-# define LC_UPCONFIGURE_SUPPORT (1 << 12)
-# define LC_UPCONFIGURE_DIS (1 << 13)
-# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
-# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
-# define LC_DYN_LANES_PWR_STATE_SHIFT 21
-#define PCIE_LC_N_FTS_CNTL 0xa3 /* PCIE_P */
-# define LC_XMIT_N_FTS(x) ((x) << 0)
-# define LC_XMIT_N_FTS_MASK (0xff << 0)
-# define LC_XMIT_N_FTS_SHIFT 0
-# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8)
-# define LC_N_FTS_MASK (0xff << 24)
-#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
-# define LC_GEN2_EN_STRAP (1 << 0)
-# define LC_GEN3_EN_STRAP (1 << 1)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3
-# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5)
-# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6)
-# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7)
-# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8)
-# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9)
-# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10)
-# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10
-# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */
-# define LC_CURRENT_DATA_RATE_SHIFT 13
-# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16)
-# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18)
-# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19)
-# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20)
-# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21)
-
-#define PCIE_LC_CNTL2 0xb1
-# define LC_ALLOW_PDWN_IN_L1 (1 << 17)
-# define LC_ALLOW_PDWN_IN_L23 (1 << 18)
-
-#define PCIE_LC_CNTL3 0xb5 /* PCIE_P */
-# define LC_GO_TO_RECOVERY (1 << 30)
-#define PCIE_LC_CNTL4 0xb6 /* PCIE_P */
-# define LC_REDO_EQ (1 << 5)
-# define LC_SET_QUIESCE (1 << 13)
-
-/*
- * UVD
- */
-#define UVD_UDEC_ADDR_CONFIG 0x3bd3
-#define UVD_UDEC_DB_ADDR_CONFIG 0x3bd4
-#define UVD_UDEC_DBW_ADDR_CONFIG 0x3bd5
-#define UVD_RBC_RB_RPTR 0x3da4
-#define UVD_RBC_RB_WPTR 0x3da5
-#define UVD_STATUS 0x3daf
-
-#define UVD_CGC_CTRL 0x3dc2
-# define DCM (1 << 0)
-# define CG_DT(x) ((x) << 2)
-# define CG_DT_MASK (0xf << 2)
-# define CLK_OD(x) ((x) << 6)
-# define CLK_OD_MASK (0x1f << 6)
-
- /* UVD CTX indirect */
-#define UVD_CGC_MEM_CTRL 0xC0
-#define UVD_CGC_CTRL2 0xC1
-# define DYN_OR_EN (1 << 0)
-# define DYN_RR_EN (1 << 1)
-# define G_DIV_ID(x) ((x) << 2)
-# define G_DIV_ID_MASK (0x7 << 2)
/*
* PM4
@@ -1583,45 +552,7 @@
/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
#define DMA1_REGISTER_OFFSET 0x200 /* not a register */
-
-#define DMA_RB_CNTL 0x3400
-# define DMA_RB_ENABLE (1 << 0)
-# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
-# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
-# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
-# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
-# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
-#define DMA_RB_BASE 0x3401
-#define DMA_RB_RPTR 0x3402
-#define DMA_RB_WPTR 0x3403
-
-#define DMA_RB_RPTR_ADDR_HI 0x3407
-#define DMA_RB_RPTR_ADDR_LO 0x3408
-
-#define DMA_IB_CNTL 0x3409
-# define DMA_IB_ENABLE (1 << 0)
-# define DMA_IB_SWAP_ENABLE (1 << 4)
-# define CMD_VMID_FORCE (1 << 31)
-#define DMA_IB_RPTR 0x340a
-#define DMA_CNTL 0x340b
-# define TRAP_ENABLE (1 << 0)
-# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
-# define SEM_WAIT_INT_ENABLE (1 << 2)
-# define DATA_SWAP_ENABLE (1 << 3)
-# define FENCE_SWAP_ENABLE (1 << 4)
-# define CTXEMPTY_INT_ENABLE (1 << 28)
-#define DMA_STATUS_REG 0x340d
-# define DMA_IDLE (1 << 0)
-#define DMA_TILING_CONFIG 0x342e
-
-#define DMA_POWER_CNTL 0x342f
-# define MEM_POWER_OVERRIDE (1 << 8)
-#define DMA_CLK_CTRL 0x3430
-
-#define DMA_PG 0x3435
-# define PG_CNTL_ENABLE (1 << 0)
-#define DMA_PGFSM_CONFIG 0x3436
-#define DMA_PGFSM_WRITE 0x3437
+#define SDMA_MAX_INSTANCE 2
#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
(((b) & 0x1) << 26) | \
@@ -1650,6 +581,7 @@
#define DMA_PACKET_POLL_REG_MEM 0xe
#define DMA_PACKET_NOP 0xf
+/* VCE */
#define VCE_STATUS 0x20004
#define VCE_VCPU_CNTL 0x20014
#define VCE_CLK_EN (1 << 0)
@@ -1726,378 +658,118 @@
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
-#define AMDGPU_MM_INDEX 0x0000
-#define AMDGPU_MM_DATA 0x0001
-
-#define VERDE_NUM_CRTC 6
-#define BLACKOUT_MODE_MASK 0x00000007
-#define VGA_RENDER_CONTROL 0xC0
-#define R_000300_VGA_RENDER_CONTROL 0xC0
-#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
-#define EVERGREEN_CRTC_STATUS 0x1BA3
-#define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_POSITION 0x1BA4
-/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
-#define EVERGREEN_CRTC_V_BLANK_START_END 0x1b8d
-#define EVERGREEN_CRTC_CONTROL 0x1b9c
-#define EVERGREEN_CRTC_MASTER_EN (1 << 0)
-#define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
-#define EVERGREEN_CRTC_BLANK_CONTROL 0x1b9d
-#define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
-#define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x1ba8
-#define EVERGREEN_CRTC_UPDATE_LOCK 0x1bb5
-#define EVERGREEN_MASTER_UPDATE_LOCK 0x1bbd
-#define EVERGREEN_MASTER_UPDATE_MODE 0x1bbe
-#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05
-#define EVERGREEN_GRPH_UPDATE 0x1a11
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0xc4
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0xc9
-#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
-
-#define EVERGREEN_DATA_FORMAT 0x1ac0
-# define EVERGREEN_INTERLEAVE_EN (1 << 0)
-
-#define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL (0 << 20)
-#define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED (1 << 20)
-#define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1 (2 << 20)
-#define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1 (4 << 20)
-
-#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a45
-#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1845
-
-#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1847
-#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a47
-
-#define R600_D1GRPH_SWAP_CONTROL 0x1843
-#define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0)
-#define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0)
-#define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0)
-#define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0)
-
-#define AVIVO_D1VGA_CONTROL 0x00cc
-# define AVIVO_DVGA_CONTROL_MODE_ENABLE (1 << 0)
-# define AVIVO_DVGA_CONTROL_TIMING_SELECT (1 << 8)
-# define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1 << 9)
-# define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1 << 10)
-# define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1 << 16)
-# define AVIVO_DVGA_CONTROL_ROTATE (1 << 24)
-#define AVIVO_D2VGA_CONTROL 0x00ce
-
-#define R600_BUS_CNTL 0x1508
-# define R600_BIOS_ROM_DIS (1 << 1)
+
#define R600_ROM_CNTL 0x580
# define R600_SCK_OVERWRITE (1 << 1)
# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
# define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK (0xf << 28)
-#define FMT_BIT_DEPTH_CONTROL 0x1bf2
-#define FMT_TRUNCATE_EN (1 << 0)
-#define FMT_TRUNCATE_DEPTH (1 << 4)
-#define FMT_SPATIAL_DITHER_EN (1 << 8)
-#define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
-#define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
-#define FMT_FRAME_RANDOM_ENABLE (1 << 13)
-#define FMT_RGB_RANDOM_ENABLE (1 << 14)
-#define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
-#define FMT_TEMPORAL_DITHER_EN (1 << 16)
-#define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
-#define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
-#define FMT_TEMPORAL_LEVEL (1 << 24)
-#define FMT_TEMPORAL_DITHER_RESET (1 << 25)
-#define FMT_25FRC_SEL(x) ((x) << 26)
-#define FMT_50FRC_SEL(x) ((x) << 28)
-#define FMT_75FRC_SEL(x) ((x) << 30)
-
-#define EVERGREEN_DC_LUT_CONTROL 0x1a80
-#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x1a81
-#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x1a82
-#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x1a83
-#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x1a84
-#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x1a85
-#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x1a86
-#define EVERGREEN_DC_LUT_30_COLOR 0x1a7c
-#define EVERGREEN_DC_LUT_RW_INDEX 0x1a79
-#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x1a7e
-#define EVERGREEN_DC_LUT_RW_MODE 0x1a78
-
-#define EVERGREEN_GRPH_ENABLE 0x1a00
-#define EVERGREEN_GRPH_CONTROL 0x1a01
-#define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0)
-#define EVERGREEN_GRPH_DEPTH_8BPP 0
-#define EVERGREEN_GRPH_DEPTH_16BPP 1
-#define EVERGREEN_GRPH_DEPTH_32BPP 2
-#define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
-#define EVERGREEN_ADDR_SURF_2_BANK 0
-#define EVERGREEN_ADDR_SURF_4_BANK 1
-#define EVERGREEN_ADDR_SURF_8_BANK 2
-#define EVERGREEN_ADDR_SURF_16_BANK 3
-#define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
-#define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
-#define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
-#define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
-
-#define EVERGREEN_GRPH_FORMAT_INDEXED 0
-#define EVERGREEN_GRPH_FORMAT_ARGB1555 0
-#define EVERGREEN_GRPH_FORMAT_ARGB565 1
-#define EVERGREEN_GRPH_FORMAT_ARGB4444 2
-#define EVERGREEN_GRPH_FORMAT_AI88 3
-#define EVERGREEN_GRPH_FORMAT_MONO16 4
-#define EVERGREEN_GRPH_FORMAT_BGRA5551 5
+#define GRPH_ARRAY_LINEAR_GENERAL 0
+#define GRPH_ARRAY_LINEAR_ALIGNED 1
+#define GRPH_ARRAY_1D_TILED_THIN1 2
+#define GRPH_ARRAY_2D_TILED_THIN1 4
+
+#define ES_AND_GS_AUTO 3
+#define BUF_SWAP_32BIT (2 << 16)
+
+#define GRPH_DEPTH_8BPP 0
+#define GRPH_DEPTH_16BPP 1
+#define GRPH_DEPTH_32BPP 2
+
+/* 8 BPP */
+#define GRPH_FORMAT_INDEXED 0
+
+/* 16 BPP */
+#define GRPH_FORMAT_ARGB1555 0
+#define GRPH_FORMAT_ARGB565 1
+#define GRPH_FORMAT_ARGB4444 2
+#define GRPH_FORMAT_AI88 3
+#define GRPH_FORMAT_MONO16 4
+#define GRPH_FORMAT_BGRA5551 5
/* 32 BPP */
-#define EVERGREEN_GRPH_FORMAT_ARGB8888 0
-#define EVERGREEN_GRPH_FORMAT_ARGB2101010 1
-#define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2
-#define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3
-#define EVERGREEN_GRPH_FORMAT_BGRA1010102 4
-#define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
-#define EVERGREEN_GRPH_FORMAT_RGB111110 6
-#define EVERGREEN_GRPH_FORMAT_BGR101111 7
-#define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
-#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
-#define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
-#define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
-#define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
-#define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
-#define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
-#define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
-#define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
-#define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
-#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
-
-#define EVERGREEN_GRPH_SWAP_CONTROL 0x1a03
-#define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
-# define EVERGREEN_GRPH_ENDIAN_NONE 0
-# define EVERGREEN_GRPH_ENDIAN_8IN16 1
-# define EVERGREEN_GRPH_ENDIAN_8IN32 2
-# define EVERGREEN_GRPH_ENDIAN_8IN64 3
-#define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
-# define EVERGREEN_GRPH_RED_SEL_R 0
-# define EVERGREEN_GRPH_RED_SEL_G 1
-# define EVERGREEN_GRPH_RED_SEL_B 2
-# define EVERGREEN_GRPH_RED_SEL_A 3
-#define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
-# define EVERGREEN_GRPH_GREEN_SEL_G 0
-# define EVERGREEN_GRPH_GREEN_SEL_B 1
-# define EVERGREEN_GRPH_GREEN_SEL_A 2
-# define EVERGREEN_GRPH_GREEN_SEL_R 3
-#define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
-# define EVERGREEN_GRPH_BLUE_SEL_B 0
-# define EVERGREEN_GRPH_BLUE_SEL_A 1
-# define EVERGREEN_GRPH_BLUE_SEL_R 2
-# define EVERGREEN_GRPH_BLUE_SEL_G 3
-#define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
-# define EVERGREEN_GRPH_ALPHA_SEL_A 0
-# define EVERGREEN_GRPH_ALPHA_SEL_R 1
-# define EVERGREEN_GRPH_ALPHA_SEL_G 2
-# define EVERGREEN_GRPH_ALPHA_SEL_B 3
-
-#define EVERGREEN_D3VGA_CONTROL 0xf8
-#define EVERGREEN_D4VGA_CONTROL 0xf9
-#define EVERGREEN_D5VGA_CONTROL 0xfa
-#define EVERGREEN_D6VGA_CONTROL 0xfb
-
-#define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00
-
-#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL 0x1a02
-#define EVERGREEN_LUT_10BIT_BYPASS_EN (1 << 8)
-
-#define EVERGREEN_GRPH_PITCH 0x1a06
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
-#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x1a09
-#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x1a0a
-#define EVERGREEN_GRPH_X_START 0x1a0b
-#define EVERGREEN_GRPH_Y_START 0x1a0c
-#define EVERGREEN_GRPH_X_END 0x1a0d
-#define EVERGREEN_GRPH_Y_END 0x1a0e
-#define EVERGREEN_GRPH_UPDATE 0x1a11
-#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
-#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
-#define EVERGREEN_GRPH_FLIP_CONTROL 0x1a12
-#define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
-
-#define EVERGREEN_VIEWPORT_START 0x1b5c
-#define EVERGREEN_VIEWPORT_SIZE 0x1b5d
-#define EVERGREEN_DESKTOP_HEIGHT 0x1ac1
-
-/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
-#define EVERGREEN_CUR_CONTROL 0x1a66
-# define EVERGREEN_CURSOR_EN (1 << 0)
-# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8)
-# define EVERGREEN_CURSOR_MONO 0
-# define EVERGREEN_CURSOR_24_1 1
-# define EVERGREEN_CURSOR_24_8_PRE_MULT 2
-# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3
-# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16)
-# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20)
-# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
-# define EVERGREEN_CURSOR_URGENT_ALWAYS 0
-# define EVERGREEN_CURSOR_URGENT_1_8 1
-# define EVERGREEN_CURSOR_URGENT_1_4 2
-# define EVERGREEN_CURSOR_URGENT_3_8 3
-# define EVERGREEN_CURSOR_URGENT_1_2 4
-#define EVERGREEN_CUR_SURFACE_ADDRESS 0x1a67
-# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000
-#define EVERGREEN_CUR_SIZE 0x1a68
-#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x1a69
-#define EVERGREEN_CUR_POSITION 0x1a6a
-#define EVERGREEN_CUR_HOT_SPOT 0x1a6b
-#define EVERGREEN_CUR_COLOR1 0x1a6c
-#define EVERGREEN_CUR_COLOR2 0x1a6d
-#define EVERGREEN_CUR_UPDATE 0x1a6e
-# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0)
-# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1)
-# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16)
-# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
-
-
-#define NI_INPUT_CSC_CONTROL 0x1a35
-# define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0)
-# define NI_INPUT_CSC_BYPASS 0
-# define NI_INPUT_CSC_PROG_COEFF 1
-# define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2
-# define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4)
-
-#define NI_OUTPUT_CSC_CONTROL 0x1a3c
-# define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0)
-# define NI_OUTPUT_CSC_BYPASS 0
-# define NI_OUTPUT_CSC_TV_RGB 1
-# define NI_OUTPUT_CSC_YCBCR_601 2
-# define NI_OUTPUT_CSC_YCBCR_709 3
-# define NI_OUTPUT_CSC_PROG_COEFF 4
-# define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5
-# define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4)
-
-#define NI_DEGAMMA_CONTROL 0x1a58
-# define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0)
-# define NI_DEGAMMA_BYPASS 0
-# define NI_DEGAMMA_SRGB_24 1
-# define NI_DEGAMMA_XVYCC_222 2
-# define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4)
-# define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
-# define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12)
-
-#define NI_GAMUT_REMAP_CONTROL 0x1a59
-# define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0)
-# define NI_GAMUT_REMAP_BYPASS 0
-# define NI_GAMUT_REMAP_PROG_COEFF 1
-# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2
-# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3
-# define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4)
-
-#define NI_REGAMMA_CONTROL 0x1aa0
-# define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0)
-# define NI_REGAMMA_BYPASS 0
-# define NI_REGAMMA_SRGB_24 1
-# define NI_REGAMMA_XVYCC_222 2
-# define NI_REGAMMA_PROG_A 3
-# define NI_REGAMMA_PROG_B 4
-# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4)
-
-
-#define NI_PRESCALE_GRPH_CONTROL 0x1a2d
-# define NI_GRPH_PRESCALE_BYPASS (1 << 4)
-
-#define NI_PRESCALE_OVL_CONTROL 0x1a31
-# define NI_OVL_PRESCALE_BYPASS (1 << 4)
-
-#define NI_INPUT_GAMMA_CONTROL 0x1a10
-# define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0)
-# define NI_INPUT_GAMMA_USE_LUT 0
-# define NI_INPUT_GAMMA_BYPASS 1
-# define NI_INPUT_GAMMA_SRGB_24 2
-# define NI_INPUT_GAMMA_XVYCC_222 3
-# define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4)
-
-#define BLACKOUT_MODE_MASK 0x00000007
-#define VGA_RENDER_CONTROL 0xC0
-#define R_000300_VGA_RENDER_CONTROL 0xC0
-#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
-#define EVERGREEN_CRTC_STATUS 0x1BA3
-#define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_POSITION 0x1BA4
-/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
-#define EVERGREEN_CRTC_V_BLANK_START_END 0x1b8d
-#define EVERGREEN_CRTC_CONTROL 0x1b9c
-# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
-# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
-#define EVERGREEN_CRTC_BLANK_CONTROL 0x1b9d
-# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
-# define EVERGREEN_CRTC_V_BLANK (1 << 0)
-#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x1ba8
-#define EVERGREEN_CRTC_UPDATE_LOCK 0x1bb5
-#define EVERGREEN_MASTER_UPDATE_LOCK 0x1bbd
-#define EVERGREEN_MASTER_UPDATE_MODE 0x1bbe
-#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
-#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04
-#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05
-#define EVERGREEN_GRPH_UPDATE 0x1a11
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0xc4
-#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0xc9
-#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
-
-#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10
-#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
-#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80
-#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
-#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x400
-#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
-#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x2000
-#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xd
-#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10000
-#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
-#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80000
-#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x13
-
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID_MASK 0x1e000000
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID__SHIFT 0x19
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS_MASK 0xff
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS__SHIFT 0x0
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID_MASK 0xff000
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID__SHIFT 0xc
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW_MASK 0x1000000
-#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW__SHIFT 0x18
-
-#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE_MASK 0x7
-#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE__SHIFT 0x0
-
-#define mmBIF_FB_EN__xxFB_READ_EN_MASK 0x1
-#define mmBIF_FB_EN__xxFB_READ_EN__SHIFT 0x0
-#define mmBIF_FB_EN__xxFB_WRITE_EN_MASK 0x2
-#define mmBIF_FB_EN__xxFB_WRITE_EN__SHIFT 0x1
-
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC_MASK 0x20000
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC__SHIFT 0x11
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC_MASK 0x800
-#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC__SHIFT 0xb
+#define GRPH_FORMAT_ARGB8888 0
+#define GRPH_FORMAT_ARGB2101010 1
+#define GRPH_FORMAT_32BPP_DIG 2
+#define GRPH_FORMAT_8B_ARGB2101010 3
+#define GRPH_FORMAT_BGRA1010102 4
+#define GRPH_FORMAT_8B_BGRA1010102 5
+#define GRPH_FORMAT_RGB111110 6
+#define GRPH_FORMAT_BGR101111 7
+
+#define GRPH_ENDIAN_NONE 0
+#define GRPH_ENDIAN_8IN16 1
+#define GRPH_ENDIAN_8IN32 2
+#define GRPH_ENDIAN_8IN64 3
+#define GRPH_RED_SEL_R 0
+#define GRPH_RED_SEL_G 1
+#define GRPH_RED_SEL_B 2
+#define GRPH_RED_SEL_A 3
+
+#define GRPH_GREEN_SEL_G 0
+#define GRPH_GREEN_SEL_B 1
+#define GRPH_GREEN_SEL_A 2
+#define GRPH_GREEN_SEL_R 3
+
+#define GRPH_BLUE_SEL_B 0
+#define GRPH_BLUE_SEL_A 1
+#define GRPH_BLUE_SEL_R 2
+#define GRPH_BLUE_SEL_G 3
+
+#define GRPH_ALPHA_SEL_A 0
+#define GRPH_ALPHA_SEL_R 1
+#define GRPH_ALPHA_SEL_G 2
+#define GRPH_ALPHA_SEL_B 3
+
+/* CUR_CONTROL */
+ #define CURSOR_MONO 0
+ #define CURSOR_24_1 1
+ #define CURSOR_24_8_PRE_MULT 2
+ #define CURSOR_24_8_UNPRE_MULT 3
+ #define CURSOR_URGENT_ALWAYS 0
+ #define CURSOR_URGENT_1_8 1
+ #define CURSOR_URGENT_1_4 2
+ #define CURSOR_URGENT_3_8 3
+ #define CURSOR_URGENT_1_2 4
+
+/* INPUT_CSC_CONTROL */
+# define INPUT_CSC_BYPASS 0
+# define INPUT_CSC_PROG_COEFF 1
+# define INPUT_CSC_PROG_SHARED_MATRIXA 2
+
+/* OUTPUT_CSC_CONTROL */
+# define OUTPUT_CSC_BYPASS 0
+# define OUTPUT_CSC_TV_RGB 1
+# define OUTPUT_CSC_YCBCR_601 2
+# define OUTPUT_CSC_YCBCR_709 3
+# define OUTPUT_CSC_PROG_COEFF 4
+# define OUTPUT_CSC_PROG_SHARED_MATRIXB 5
+
+/* DEGAMMA_CONTROL */
+# define DEGAMMA_BYPASS 0
+# define DEGAMMA_SRGB_24 1
+# define DEGAMMA_XVYCC_222 2
+
+/* GAMUT_REMAP_CONTROL */
+# define GAMUT_REMAP_BYPASS 0
+# define GAMUT_REMAP_PROG_COEFF 1
+# define GAMUT_REMAP_PROG_SHARED_MATRIXA 2
+# define GAMUT_REMAP_PROG_SHARED_MATRIXB 3
+
+/* REGAMMA_CONTROL */
+# define REGAMMA_BYPASS 0
+# define REGAMMA_SRGB_24 1
+# define REGAMMA_XVYCC_222 2
+# define REGAMMA_PROG_A 3
+# define REGAMMA_PROG_B 4
+
+
+/* INPUT_GAMMA_CONTROL */
+# define INPUT_GAMMA_USE_LUT 0
+# define INPUT_GAMMA_BYPASS 1
+# define INPUT_GAMMA_SRGB_24 2
+# define INPUT_GAMMA_XVYCC_222 3
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
@@ -2113,20 +785,9 @@
#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
#define PACKET3_SEM_SEL_WAIT (0x7 << 29)
-#define CONFIG_CNTL 0x1509
-#define CC_DRM_ID_STRAPS 0X1559
#define AMDGPU_PCIE_INDEX 0xc
#define AMDGPU_PCIE_DATA 0xd
-#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0x3411
-#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0x3412
-#define DMA_MODE 0x342f
-#define DMA_RB_RPTR_ADDR_HI 0x3407
-#define DMA_RB_RPTR_ADDR_LO 0x3408
-#define DMA_BUSY_MASK 0x20
-#define DMA1_BUSY_MASK 0X40
-#define SDMA_MAX_INSTANCE 2
-
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
#define PCIE_PORT_INDEX 0xe
@@ -2136,8 +797,6 @@
#define EVERGREEN_PIF_PHY1_INDEX 0x10
#define EVERGREEN_PIF_PHY1_DATA 0x14
-#define MC_VM_FB_OFFSET 0x81a
-
/* Discrete VCE clocks */
#define CG_VCEPLL_FUNC_CNTL 0xc0030600
#define VCEPLL_RESET_MASK 0x00000001
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 659eab9b90be..c457be3a3c56 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -584,6 +584,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
* Enable triggering of GPU reset only if specified
* by module parameter.
*/
+ if (adev->pcie_reset_ctx.in_link_reset)
+ return AMD_RESET_METHOD_LINK;
if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5)
return AMD_RESET_METHOD_MODE2;
else if (!(adev->flags & AMD_IS_APU))
@@ -640,6 +642,9 @@ asic_reset:
case AMD_RESET_METHOD_MODE2:
dev_info(adev->dev, "MODE2 reset\n");
return amdgpu_dpm_mode2_reset(adev);
+ case AMD_RESET_METHOD_LINK:
+ dev_info(adev->dev, "Link reset\n");
+ return amdgpu_device_link_reset(adev);
default:
dev_info(adev->dev, "MODE1 reset\n");
return amdgpu_device_mode1_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index a5000c171c02..cf93fa477674 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -552,6 +552,11 @@
# define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2)
# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
+#define PACKET3_RUN_CLEANER_SHADER_9_0 0xD7
+/* 1. header
+ * 2. RESERVED [31:0]
+ */
+
#define PACKET3_RUN_CLEANER_SHADER 0xD2
/* 1. header
* 2. RESERVED [31:0]
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 0e404c074975..e590cbdd8de9 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -174,19 +174,76 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
umc_v12_0_reset_error_count(adev);
}
+static void umc_v12_0_get_retire_flip_bits(struct amdgpu_device *adev)
+{
+ enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
+ uint32_t vram_type = adev->gmc.vram_type;
+ struct amdgpu_umc_flip_bits *flip_bits = &(adev->umc.flip_bits);
+
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+
+ /* default setting */
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C3_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_C4_BIT;
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R13_BIT;
+ flip_bits->flip_row_bit = 13;
+ flip_bits->bit_num = 4;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R13_BIT;
+
+ if (nps == AMDGPU_NPS2_PARTITION_MODE) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT;
+ } else if (nps == AMDGPU_NPS4_PARTITION_MODE) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT;
+ }
+
+ switch (vram_type) {
+ case AMDGPU_VRAM_TYPE_HBM:
+ /* other nps modes are taken as nps1 */
+ if (nps == AMDGPU_NPS2_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ else if (nps == AMDGPU_NPS4_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+
+ break;
+ case AMDGPU_VRAM_TYPE_HBM3E:
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ flip_bits->flip_row_bit = 12;
+
+ if (nps == AMDGPU_NPS2_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+ else if (nps == AMDGPU_NPS4_PARTITION_MODE)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT;
+
+ break;
+ default:
+ dev_warn(adev->dev,
+ "Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n");
+ break;
+ }
+
+ adev->umc.retire_unit = 0x1 << flip_bits->bit_num;
+}
+
static int umc_v12_0_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
struct ta_ras_query_address_input *addr_in,
struct ta_ras_query_address_output *addr_out,
bool dump_addr)
{
- uint32_t col, col_lower, row, row_lower, bank;
+ uint32_t col, col_lower, row, row_lower, row_high, bank;
uint32_t channel_index = 0, umc_inst = 0;
- uint32_t i, loop_bits[UMC_V12_0_RETIRE_LOOP_BITS];
+ uint32_t i, bit_num, retire_unit, *flip_bits;
uint64_t soc_pa, column, err_addr;
struct ta_ras_query_address_output addr_out_tmp;
struct ta_ras_query_address_output *paddr_out;
- enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
if (!addr_out)
@@ -211,46 +268,46 @@ static int umc_v12_0_convert_error_address(struct amdgpu_device *adev,
umc_inst = addr_in->ma.umc_inst;
}
- loop_bits[0] = UMC_V12_0_PA_C2_BIT;
- loop_bits[1] = UMC_V12_0_PA_C3_BIT;
- loop_bits[2] = UMC_V12_0_PA_C4_BIT;
- loop_bits[3] = UMC_V12_0_PA_R13_BIT;
-
- if (adev->gmc.gmc_funcs->query_mem_partition_mode)
- nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
-
- /* other nps modes are taken as nps1 */
- if (nps == AMDGPU_NPS4_PARTITION_MODE) {
- loop_bits[0] = UMC_V12_0_PA_CH4_BIT;
- loop_bits[1] = UMC_V12_0_PA_CH5_BIT;
- loop_bits[2] = UMC_V12_0_PA_B0_BIT;
- loop_bits[3] = UMC_V12_0_PA_R11_BIT;
- }
+ flip_bits = adev->umc.flip_bits.flip_bits_in_pa;
+ bit_num = adev->umc.flip_bits.bit_num;
+ retire_unit = adev->umc.retire_unit;
soc_pa = paddr_out->pa.pa;
channel_index = paddr_out->pa.channel_idx;
/* clear loop bits in soc physical address */
- for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
- soc_pa &= ~BIT_ULL(loop_bits[i]);
+ for (i = 0; i < bit_num; i++)
+ soc_pa &= ~BIT_ULL(flip_bits[i]);
paddr_out->pa.pa = soc_pa;
/* get column bit 0 and 1 in mca address */
col_lower = (err_addr >> 1) & 0x3ULL;
- /* MA_R13_BIT will be handled later */
+ /* extra row bit will be handled later */
row_lower = (err_addr >> UMC_V12_0_MA_R0_BIT) & 0x1fffULL;
+ row_lower &= ~BIT_ULL(adev->umc.flip_bits.flip_row_bit);
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 5, 0)) {
+ row_high = (soc_pa >> adev->umc.flip_bits.r13_in_pa) & 0x3ULL;
+ /* it's 2.25GB in each channel, from MCA address to PA
+ * [R14 R13] is converted if the two bits value are 0x3,
+ * get them from PA instead of MCA address.
+ */
+ row_lower |= (row_high << 13);
+ }
if (!err_data && !dump_addr)
goto out;
/* loop for all possibilities of retired bits */
- for (column = 0; column < UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; column++) {
+ for (column = 0; column < retire_unit; column++) {
soc_pa = paddr_out->pa.pa;
- for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
- soc_pa |= (((column >> i) & 0x1ULL) << loop_bits[i]);
+ for (i = 0; i < bit_num; i++)
+ soc_pa |= (((column >> i) & 0x1ULL) << flip_bits[i]);
col = ((column & 0x7) << 2) | col_lower;
- /* add row bit 13 */
- row = ((column >> 3) << 13) | row_lower;
+ /* handle extra row bit */
+ if (bit_num == RETIRE_FLIP_BITS_NUM)
+ row = ((column >> 3) << adev->umc.flip_bits.flip_row_bit) |
+ row_lower;
if (dump_addr)
dev_info(adev->dev,
@@ -428,8 +485,12 @@ static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank
bank->regs[ACA_REG_IDX_ADDR]);
ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
- count = ext_error_code == 0 ?
- ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
+ if (umc_v12_0_is_deferred_error(adev, status))
+ count = ext_error_code == 0 ?
+ adev->umc.err_addr_cnt / adev->umc.retire_unit : 1ULL;
+ else
+ count = ext_error_code == 0 ?
+ ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
return aca_error_cache_log_bank_error(handle, &info, err_type, count);
}
@@ -469,8 +530,7 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
uint64_t err_addr, pa_addr = 0;
struct ras_ecc_err *ecc_err;
struct ta_ras_query_address_output addr_out;
- enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
- uint32_t shift_bit = UMC_V12_0_PA_C4_BIT;
+ uint32_t shift_bit = adev->umc.flip_bits.flip_bits_in_pa[2];
int count, ret, i;
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
@@ -515,11 +575,6 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
ecc_err->pa_pfn = pa_addr >> AMDGPU_GPU_PAGE_SHIFT;
ecc_err->channel_idx = addr_out.pa.channel_idx;
- if (adev->gmc.gmc_funcs->query_mem_partition_mode)
- nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
- if (nps == AMDGPU_NPS4_PARTITION_MODE)
- shift_bit = UMC_V12_0_PA_B0_BIT;
-
/* If converted pa_pfn is 0, use pa C4 pfn. */
if (!ecc_err->pa_pfn)
ecc_err->pa_pfn = BIT_ULL(shift_bit) >> AMDGPU_GPU_PAGE_SHIFT;
@@ -665,5 +720,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
.update_ecc_status = umc_v12_0_update_ecc_status,
.convert_ras_err_addr = umc_v12_0_convert_error_address,
.get_die_id_from_pa = umc_v12_0_get_die_id,
+ .get_retire_flip_bits = umc_v12_0_get_retire_flip_bits,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index 9298018d938f..63b7e7254526 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -55,8 +55,6 @@
#define UMC_V12_0_NA_MAP_PA_NUM 8
/* R13 bit shift should be considered, double the number */
#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2)
-/* C2, C3, C4, R13, four bits in MCA address are looped in retirement */
-#define UMC_V12_0_RETIRE_LOOP_BITS 4
/* column bits in SOC physical address */
#define UMC_V12_0_PA_C2_BIT 15
@@ -64,13 +62,16 @@
#define UMC_V12_0_PA_C4_BIT 21
/* row bits in SOC physical address */
#define UMC_V12_0_PA_R0_BIT 22
+#define UMC_V12_0_PA_R10_BIT 32
#define UMC_V12_0_PA_R11_BIT 33
+#define UMC_V12_0_PA_R12_BIT 34
#define UMC_V12_0_PA_R13_BIT 35
/* channel bit in SOC physical address */
#define UMC_V12_0_PA_CH4_BIT 12
#define UMC_V12_0_PA_CH5_BIT 13
/* bank bit in SOC physical address */
#define UMC_V12_0_PA_B0_BIT 19
+#define UMC_V12_0_PA_B1_BIT 20
/* row bits in MCA address */
#define UMC_V12_0_MA_R0_BIT 10
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 21b57c29bf7d..c74947705d77 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1009,6 +1009,11 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_vcn_inst *vinst)
jpeg_v1_0_start(adev, 0);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
@@ -1154,6 +1159,11 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst)
jpeg_v1_0_start(adev, 1);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
@@ -1216,6 +1226,12 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_vcn_inst *vinst)
vcn_v1_0_enable_clock_gating(vinst);
vcn_1_0_enable_static_power_gating(vinst);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
@@ -1250,6 +1266,11 @@ static int vcn_v1_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index b8d835c9e17e..148b651be7ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -978,6 +978,12 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
/* Unstall DPG */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
@@ -1152,6 +1158,11 @@ static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
@@ -1183,6 +1194,11 @@ static int vcn_v2_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(UVD, 0, mmUVD_STATUS);
+
return 0;
}
@@ -1248,6 +1264,11 @@ static int vcn_v2_0_stop(struct amdgpu_vcn_inst *vinst)
vcn_v2_0_enable_clock_gating(vinst);
vcn_v2_0_enable_static_power_gating(vinst);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS);
+
power_off:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 3eec1b8feaee..58b527a6b795 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -1158,6 +1158,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
+
return 0;
}
@@ -1343,6 +1348,11 @@ static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, mmUVD_STATUS);
+
return 0;
}
@@ -1569,6 +1579,11 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
+
return 0;
}
@@ -1635,6 +1650,10 @@ static int vcn_v2_5_stop(struct amdgpu_vcn_inst *vinst)
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, mmUVD_STATUS);
done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 0b19f0ab4480..9fb0d5380589 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -1173,6 +1173,11 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
+
return 0;
}
@@ -1360,6 +1365,11 @@ static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst)
fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
}
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, mmUVD_STATUS);
+
return 0;
}
@@ -1602,6 +1612,11 @@ static int vcn_v3_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
+
return 0;
}
@@ -1674,6 +1689,11 @@ static int vcn_v3_0_stop(struct amdgpu_vcn_inst *vinst)
/* enable VCN power gating */
vcn_v3_0_enable_static_power_gating(vinst);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, mmUVD_STATUS);
+
done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 1f777c125b00..b5071f77f78d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -239,9 +239,9 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
@@ -1122,6 +1122,11 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
VCN_RB1_DB_CTRL__EN_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
+
return 0;
}
@@ -1303,6 +1308,11 @@ static int vcn_v4_0_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, regUVD_STATUS);
+
return 0;
}
@@ -1583,6 +1593,11 @@ static void vcn_v4_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
}
/**
@@ -1666,6 +1681,11 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
/* enable VCN power gating */
vcn_v4_0_enable_static_power_gating(vinst);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, regUVD_STATUS);
+
done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);
@@ -1947,6 +1967,20 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
return 0;
}
+static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ return -EOPNOTSUPP;
+
+ vcn_v4_0_stop(vinst);
+ vcn_v4_0_start(vinst);
+
+ return amdgpu_ring_test_helper(ring);
+}
+
static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1976,6 +2010,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v4_0_ring_reset,
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 012f6ea928ec..5a33140f5723 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -169,6 +169,10 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ /* VCN POISON TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_sw_init(adev, i);
@@ -288,6 +292,31 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int vcn_v4_0_3_hw_init_inst(struct amdgpu_vcn_inst *vinst)
+{
+ int vcn_inst;
+ struct amdgpu_device *adev = vinst->adev;
+ struct amdgpu_ring *ring;
+ int inst_idx = vinst->inst;
+
+ vcn_inst = GET_INST(VCN, inst_idx);
+ ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+ if (ring->use_doorbell) {
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst,
+ adev->vcn.inst[inst_idx].aid_id);
+
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ /* Read DB_CTRL to flush the write DB_CTRL command. */
+ RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
+ }
+
+ return 0;
+}
+
/**
* vcn_v4_0_3_hw_init - start and test VCN block
*
@@ -299,7 +328,8 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
- int i, r, vcn_inst;
+ struct amdgpu_vcn_inst *vinst;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
r = vcn_v4_0_3_start_sriov(adev);
@@ -322,28 +352,9 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
struct amdgpu_vcn4_fw_shared *fw_shared;
- vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
-
- if (ring->use_doorbell) {
- adev->nbio.funcs->vcn_doorbell_range(
- adev, ring->use_doorbell,
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 9 * vcn_inst,
- adev->vcn.inst[i].aid_id);
-
- WREG32_SOC15(
- VCN, GET_INST(VCN, ring->me),
- regVCN_RB1_DB_CTRL,
- ring->doorbell_index
- << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
-
- /* Read DB_CTRL to flush the write DB_CTRL command. */
- RREG32_SOC15(
- VCN, GET_INST(VCN, ring->me),
- regVCN_RB1_DB_CTRL);
- }
+ vinst = &adev->vcn.inst[i];
+ vcn_v4_0_3_hw_init_inst(vinst);
/* Re-init fw_shared when RAS fatal error occurred */
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
@@ -380,6 +391,9 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
+
return 0;
}
@@ -963,6 +977,11 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
/*resetting done, fw can check RB ring */
fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+
return 0;
}
@@ -1356,6 +1375,12 @@ static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+
return 0;
}
@@ -1439,6 +1464,11 @@ static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst)
/* apply HW clock gating */
vcn_v4_0_3_enable_clock_gating(vinst);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+
Done:
return 0;
}
@@ -1564,6 +1594,37 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ int r = 0;
+ int vcn_inst;
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ if (amdgpu_sriov_vf(ring->adev))
+ return -EOPNOTSUPP;
+
+ if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ return -EOPNOTSUPP;
+
+ vcn_inst = GET_INST(VCN, ring->me);
+ r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst);
+
+ if (r) {
+ DRM_DEV_ERROR(adev->dev, "VCN reset fail : %d\n", r);
+ return r;
+ }
+
+ /* This flag is not set for VF, assumed to be disabled always */
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
+ adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
+ vcn_v4_0_3_hw_init_inst(vinst);
+ vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[ring->me].indirect_sram);
+ r = amdgpu_ring_test_helper(ring);
+
+ return r;
+}
+
static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1592,6 +1653,7 @@ static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
.emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v4_0_3_ring_reset,
};
/**
@@ -1775,11 +1837,24 @@ static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int vcn_v4_0_3_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
.set = vcn_v4_0_3_set_interrupt_state,
.process = vcn_v4_0_3_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs vcn_v4_0_3_ras_irq_funcs = {
+ .set = vcn_v4_0_3_set_ras_interrupt_state,
+ .process = amdgpu_vcn_process_poison_irq,
+};
+
/**
* vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
*
@@ -1795,6 +1870,9 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst->irq.num_types++;
}
adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
+
+ adev->vcn.inst->ras_poison_irq.num_types = 1;
+ adev->vcn.inst->ras_poison_irq.funcs = &vcn_v4_0_3_ras_irq_funcs;
}
static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -1942,9 +2020,44 @@ static void vcn_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
vcn_v4_0_3_inst_reset_ras_error_count(adev, i);
}
+static uint32_t vcn_v4_0_3_query_poison_by_instance(struct amdgpu_device *adev,
+ uint32_t instance, uint32_t sub_block)
+{
+ uint32_t poison_stat = 0, reg_value = 0;
+
+ switch (sub_block) {
+ case AMDGPU_VCN_V4_0_3_VCPU_VCODEC:
+ reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
+ break;
+ default:
+ break;
+ }
+
+ if (poison_stat)
+ dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
+ instance, sub_block);
+
+ return poison_stat;
+}
+
+static bool vcn_v4_0_3_query_poison_status(struct amdgpu_device *adev)
+{
+ uint32_t inst, sub;
+ uint32_t poison_stat = 0;
+
+ for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
+ for (sub = 0; sub < AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK; sub++)
+ poison_stat +=
+ vcn_v4_0_3_query_poison_by_instance(adev, inst, sub);
+
+ return !!poison_stat;
+}
+
static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = {
.query_ras_error_count = vcn_v4_0_3_query_ras_error_count,
.reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count,
+ .query_poison_status = vcn_v4_0_3_query_poison_status,
};
static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
@@ -2020,6 +2133,13 @@ static int vcn_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_commo
if (r)
return r;
+ if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+ adev->vcn.inst->ras_poison_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->vcn.inst->ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
&vcn_v4_0_3_aca_info, NULL);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
index 03572a1d0c9c..aeab89853a92 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
@@ -24,6 +24,12 @@
#ifndef __VCN_V4_0_3_H__
#define __VCN_V4_0_3_H__
+enum amdgpu_vcn_v4_0_3_sub_block {
+ AMDGPU_VCN_V4_0_3_VCPU_VCODEC = 0,
+
+ AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK,
+};
+
extern const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block;
void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index a1171e6152ed..16ade84facc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -208,6 +208,10 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+ fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;
+ fw_shared->drm_key_wa.method =
+ AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
+
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
@@ -215,6 +219,13 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode;
}
+ adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -1023,6 +1034,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
VCN_RB1_DB_CTRL__EN_MASK);
+ /* Keeping one read-back to ensure all register writes are done, otherwise
+ * it may introduce race conditions */
+ RREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL);
+
return 0;
}
@@ -1205,6 +1220,10 @@ static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+ /* Keeping one read-back to ensure all register writes are done, otherwise
+ * it may introduce race conditions */
+ RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+
return 0;
}
@@ -1235,6 +1254,11 @@ static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
}
/**
@@ -1318,6 +1342,11 @@ static int vcn_v4_0_5_stop(struct amdgpu_vcn_inst *vinst)
/* enable VCN power gating */
vcn_v4_0_5_enable_static_power_gating(vinst);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, regUVD_STATUS);
+
done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);
@@ -1436,6 +1465,20 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ return -EOPNOTSUPP;
+
+ vcn_v4_0_5_stop(vinst);
+ vcn_v4_0_5_start(vinst);
+
+ return amdgpu_ring_test_helper(ring);
+}
+
static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1463,6 +1506,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v4_0_5_ring_reset,
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index b90da3d3e140..f8e3f0b882da 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -196,9 +196,9 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
}
- /* TODO: Add queue reset mask when FW fully supports it */
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
vcn_v5_0_0_alloc_ip_dump(adev);
@@ -794,6 +794,11 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
VCN_RB1_DB_CTRL__EN_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
+
return 0;
}
@@ -946,6 +951,11 @@ static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, regUVD_STATUS);
+
return 0;
}
@@ -977,6 +987,11 @@ static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
+
return;
}
@@ -1058,6 +1073,11 @@ static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst)
/* enable VCN power gating */
vcn_v5_0_0_enable_static_power_gating(vinst);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, i, regUVD_STATUS);
+
done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);
@@ -1172,6 +1192,20 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ return -EOPNOTSUPP;
+
+ vcn_v5_0_0_stop(vinst);
+ vcn_v5_0_0_start(vinst);
+
+ return amdgpu_ring_test_helper(ring);
+}
+
static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1199,6 +1233,7 @@ static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v5_0_0_ring_reset,
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index e0e84ef7f568..cdefd7fcb0da 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -30,6 +30,7 @@
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
#include "vcn_v4_0_3.h"
+#include "mmsch_v5_0.h"
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
@@ -39,12 +40,13 @@
#include <drm/drm_drv.h>
+static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
enum amd_powergating_state state);
static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring);
-
+static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev);
/**
* vcn_v5_0_1_early_init - set function pointers and load microcode
*
@@ -64,6 +66,7 @@ static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
vcn_v5_0_1_set_unified_ring_funcs(adev);
vcn_v5_0_1_set_irq_funcs(adev);
+ vcn_v5_0_1_set_ras_funcs(adev);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
adev->vcn.inst[i].set_pg_state = vcn_v5_0_1_set_pg_state;
@@ -111,6 +114,10 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ /* VCN POISON TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
vcn_inst = GET_INST(VCN, i);
@@ -126,7 +133,14 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst;
+ if (!amdgpu_sriov_vf(adev))
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 11 * vcn_inst;
+ else
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 32 * vcn_inst;
ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
@@ -143,6 +157,12 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
+ return r;
+ }
+
vcn_v5_0_0_alloc_ip_dump(adev);
return amdgpu_vcn_sysfs_reset_mask_init(adev);
@@ -172,6 +192,9 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
drm_dev_exit(idx);
}
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_free_mm_table(adev);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(adev, i);
if (r)
@@ -204,24 +227,38 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, r, vcn_inst;
- if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
- adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- vcn_inst = GET_INST(VCN, i);
- ring = &adev->vcn.inst[i].ring_enc[0];
-
- if (ring->use_doorbell)
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 9 * vcn_inst),
- adev->vcn.inst[i].aid_id);
-
- /* Re-init fw_shared, if required */
- vcn_v5_0_1_fw_shared_init(adev, i);
-
- r = amdgpu_ring_test_helper(ring);
+ if (amdgpu_sriov_vf(adev)) {
+ r = vcn_v5_0_1_start_sriov(adev);
if (r)
return r;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ vcn_v5_0_1_unified_ring_set_wptr(ring);
+ ring->sched.ready = true;
+ }
+ } else {
+ if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
+ adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ vcn_inst = GET_INST(VCN, i);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ if (ring->use_doorbell)
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 11 * vcn_inst),
+ adev->vcn.inst[i].aid_id);
+
+ /* Re-init fw_shared, if required */
+ vcn_v5_0_1_fw_shared_init(adev, i);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
}
return 0;
@@ -247,6 +284,9 @@ static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
+
return 0;
}
@@ -629,6 +669,9 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
if (indirect)
amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ /* resetting ring, fw should not check RB ring */
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+
/* Pause dpg */
vcn_v5_0_1_pause_dpg_mode(vinst, &state);
@@ -641,7 +684,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+
WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
@@ -652,6 +695,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ /* resetting done, fw can check RB ring */
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
@@ -663,6 +707,195 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
return 0;
}
+static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev)
+{
+ int i, vcn_inst;
+ struct amdgpu_ring *ring_enc;
+ uint64_t cache_addr;
+ uint64_t rb_enc_addr;
+ uint64_t ctx_addr;
+ uint32_t param, resp, expected;
+ uint32_t offset, cache_size;
+ uint32_t tmp, timeout;
+
+ struct amdgpu_mm_table *table = &adev->virt.mm_table;
+ uint32_t *table_loc;
+ uint32_t table_size;
+ uint32_t size, size_dw;
+ uint32_t init_status;
+ uint32_t enabled_vcn;
+
+ struct mmsch_v5_0_cmd_direct_write
+ direct_wt = { {0} };
+ struct mmsch_v5_0_cmd_direct_read_modify_write
+ direct_rd_mod_wt = { {0} };
+ struct mmsch_v5_0_cmd_end end = { {0} };
+ struct mmsch_v5_0_init_header header;
+
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+
+ direct_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ end.cmd_header.command_type = MMSCH_COMMAND__END;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ vcn_inst = GET_INST(VCN, i);
+
+ vcn_v5_0_1_fw_shared_init(adev, vcn_inst);
+
+ memset(&header, 0, sizeof(struct mmsch_v5_0_init_header));
+ header.version = MMSCH_VERSION;
+ header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2;
+
+ table_loc = (uint32_t *)table->cpu_addr;
+ table_loc += header.total_size;
+
+ table_size = 0;
+
+ MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
+ ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
+
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
+
+ offset = 0;
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET0), 0);
+ } else {
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr));
+ offset = cache_size;
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE0),
+ cache_size);
+
+ cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset;
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET1), 0);
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE);
+
+ cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE;
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr));
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr));
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_OFFSET2), 0);
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE);
+
+ fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr;
+ rb_setup = &fw_shared->rb_setup;
+
+ ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0];
+ ring_enc->wptr = 0;
+ rb_enc_addr = ring_enc->gpu_addr;
+
+ rb_setup->is_rb_enabled_flags |= RB_ENABLED;
+ rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
+ rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
+ rb_setup->rb_size = ring_enc->ring_size / 4;
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr));
+ MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
+ regUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+ MMSCH_V5_0_INSERT_END();
+
+ header.vcn0.init_status = 0;
+ header.vcn0.table_offset = header.total_size;
+ header.vcn0.table_size = table_size;
+ header.total_size += table_size;
+
+ /* Send init table to mmsch */
+ size = sizeof(struct mmsch_v5_0_init_header);
+ table_loc = (uint32_t *)table->cpu_addr;
+ memcpy((void *)table_loc, &header, size);
+
+ ctx_addr = table->gpu_addr;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID);
+ tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp);
+
+ size = header.total_size;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size);
+
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0);
+
+ param = 0x00000001;
+ WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param);
+ tmp = 0;
+ timeout = 1000;
+ resp = 0;
+ expected = MMSCH_VF_MAILBOX_RESP__OK;
+ while (resp != expected) {
+ resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP);
+ if (resp != 0)
+ break;
+
+ udelay(10);
+ tmp = tmp + 10;
+ if (tmp >= timeout) {
+ DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
+ " waiting for regMMSCH_VF_MAILBOX_RESP "\
+ "(expected=0x%08x, readback=0x%08x)\n",
+ tmp, expected, resp);
+ return -EBUSY;
+ }
+ }
+
+ enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
+ init_status = ((struct mmsch_v5_0_init_header *)(table_loc))->vcn0.init_status;
+ if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
+ && init_status != MMSCH_VF_ENGINE_STATUS__PASS) {
+ DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
+ "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
+ }
+ }
+
+ return 0;
+}
+
/**
* vcn_v5_0_1_start - VCN start
*
@@ -809,6 +1042,11 @@ static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+
return 0;
}
@@ -843,6 +1081,11 @@ static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
}
/**
@@ -918,6 +1161,11 @@ static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst)
/* clear status */
WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
+ /* Keeping one read-back to ensure all register writes are done,
+ * otherwise it may introduce race conditions.
+ */
+ RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+
return 0;
}
@@ -1103,8 +1351,18 @@ static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
enum amd_powergating_state state)
{
+ struct amdgpu_device *adev = vinst->adev;
int ret = 0;
+ /* for SRIOV, guest should not control VCN Power-gating
+ * MMSCH FW should control Power-gating and clock-gating
+ * guest should avoid touching CGC and PG
+ */
+ if (amdgpu_sriov_vf(adev)) {
+ vinst->cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if (state == vinst->cur_state)
return 0;
@@ -1160,10 +1418,24 @@ static int vcn_v5_0_1_process_interrupt(struct amdgpu_device *adev, struct amdgp
return 0;
}
+static int vcn_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static const struct amdgpu_irq_src_funcs vcn_v5_0_1_irq_funcs = {
.process = vcn_v5_0_1_process_interrupt,
};
+static const struct amdgpu_irq_src_funcs vcn_v5_0_1_ras_irq_funcs = {
+ .set = vcn_v5_0_1_set_ras_interrupt_state,
+ .process = amdgpu_vcn_process_poison_irq,
+};
+
+
/**
* vcn_v5_0_1_set_irq_funcs - set VCN block interrupt irq functions
*
@@ -1177,7 +1449,12 @@ static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
adev->vcn.inst->irq.num_types++;
+
adev->vcn.inst->irq.funcs = &vcn_v5_0_1_irq_funcs;
+
+ adev->vcn.inst->ras_poison_irq.num_types = 1;
+ adev->vcn.inst->ras_poison_irq.funcs = &vcn_v5_0_1_ras_irq_funcs;
+
}
static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
@@ -1209,3 +1486,139 @@ const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = {
.rev = 1,
.funcs = &vcn_v5_0_1_ip_funcs,
};
+
+static uint32_t vcn_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev,
+ uint32_t instance, uint32_t sub_block)
+{
+ uint32_t poison_stat = 0, reg_value = 0;
+
+ switch (sub_block) {
+ case AMDGPU_VCN_V5_0_1_VCPU_VCODEC:
+ reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
+ break;
+ default:
+ break;
+ }
+
+ if (poison_stat)
+ dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
+ instance, sub_block);
+
+ return poison_stat;
+}
+
+static bool vcn_v5_0_1_query_poison_status(struct amdgpu_device *adev)
+{
+ uint32_t inst, sub;
+ uint32_t poison_stat = 0;
+
+ for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
+ for (sub = 0; sub < AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK; sub++)
+ poison_stat +=
+ vcn_v5_0_1_query_poison_by_instance(adev, inst, sub);
+
+ return !!poison_stat;
+}
+
+static const struct amdgpu_ras_block_hw_ops vcn_v5_0_1_ras_hw_ops = {
+ .query_poison_status = vcn_v5_0_1_query_poison_status,
+};
+
+static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ struct aca_bank_info info;
+ u64 misc0;
+ int ret;
+
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
+ 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
+ ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* reference to smu driver if header file */
+static int vcn_v5_0_1_err_codes[] = {
+ 14, 15, /* VCN */
+};
+
+static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ if (aca_bank_check_error_codes(handle->adev, bank,
+ vcn_v5_0_1_err_codes,
+ ARRAY_SIZE(vcn_v5_0_1_err_codes)))
+ return false;
+
+ return true;
+}
+
+static const struct aca_bank_ops vcn_v5_0_1_aca_bank_ops = {
+ .aca_bank_parser = vcn_v5_0_1_aca_bank_parser,
+ .aca_bank_is_valid = vcn_v5_0_1_aca_bank_is_valid,
+};
+
+static const struct aca_info vcn_v5_0_1_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK,
+ .bank_ops = &vcn_v5_0_1_aca_bank_ops,
+};
+
+static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
+ &vcn_v5_0_1_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
+static struct amdgpu_vcn_ras vcn_v5_0_1_ras = {
+ .ras_block = {
+ .hw_ops = &vcn_v5_0_1_ras_hw_ops,
+ .ras_late_init = vcn_v5_0_1_ras_late_init,
+ },
+};
+
+static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev)
+{
+ adev->vcn.ras = &vcn_v5_0_1_ras;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
index 8fd90bd10807..b72e4da68317 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
@@ -27,6 +27,13 @@
#define regVCN_RRMT_CNTL 0x0940
#define regVCN_RRMT_CNTL_BASE_IDX 1
+
+enum amdgpu_vcn_v5_0_1_sub_block {
+ AMDGPU_VCN_V5_0_1_VCPU_VCODEC = 0,
+
+ AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK,
+};
+
extern const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block;
#endif /* __VCN_v5_0_1_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index faa0dd75dd6d..85846fd08ce4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -350,6 +350,7 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
if (ret)
return ret;
}
+ ih[i]->overflow = false;
}
if (!amdgpu_sriov_vf(adev))
@@ -437,7 +438,10 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ if (!amdgpu_sriov_vf(adev))
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+ else
+ ih->overflow = true;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 32). Hopefully
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index d3c3d3ab7225..62e88e5362e9 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -5,7 +5,7 @@
config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
- depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64)
+ depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT))
select HMM_MIRROR
select MMU_NOTIFIER
select DRM_AMDGPU_USERPTR
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 981d9adcc5e1..73acbe0b7c21 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -91,7 +91,6 @@ static void cik_event_interrupt_wq(struct kfd_node *dev,
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
uint32_t context_id = ihre->data & 0xfffffff;
- unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8;
u32 pasid = (ihre->ring_id & 0xffff0000) >> 16;
if (pasid == 0)
@@ -125,11 +124,7 @@ static void cik_event_interrupt_wq(struct kfd_node *dev,
return;
}
- if (info.vmid == vmid)
- kfd_signal_vm_fault_event(pdd, &info, NULL);
- else
- kfd_signal_vm_fault_event(pdd, &info, NULL);
-
+ kfd_signal_vm_fault_event(pdd, &info, NULL);
kfd_unref_process(p);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 1e9dd00620bf..a2149afa5803 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -2039,9 +2039,7 @@ static int criu_get_process_object_info(struct kfd_process *p,
num_events = kfd_get_num_events(p);
- ret = svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
- if (ret)
- return ret;
+ svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
*num_objects = num_queues + num_events + num_svm_ranges;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 4a5a0a4e00f2..9bde2c64540f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -27,6 +27,16 @@
#include "kfd_priv.h"
static struct dentry *debugfs_root;
+static struct dentry *debugfs_proc;
+static struct list_head procs;
+
+struct debugfs_proc_entry {
+ struct list_head list;
+ struct dentry *proc_dentry;
+ pid_t pid;
+};
+
+#define MAX_DEBUGFS_FILENAME_LEN 32
static int kfd_debugfs_open(struct inode *inode, struct file *file)
{
@@ -92,6 +102,8 @@ static const struct file_operations kfd_debugfs_hang_hws_fops = {
void kfd_debugfs_init(void)
{
debugfs_root = debugfs_create_dir("kfd", NULL);
+ debugfs_proc = debugfs_create_dir("proc", debugfs_root);
+ INIT_LIST_HEAD(&procs);
debugfs_create_file("mqds", S_IFREG | 0444, debugfs_root,
kfd_debugfs_mqds_by_process, &kfd_debugfs_fops);
@@ -107,5 +119,69 @@ void kfd_debugfs_init(void)
void kfd_debugfs_fini(void)
{
+ debugfs_remove_recursive(debugfs_proc);
debugfs_remove_recursive(debugfs_root);
}
+
+static ssize_t kfd_debugfs_pasid_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct kfd_process_device *pdd = file_inode(file)->i_private;
+ char tmp[32];
+ int len;
+
+ len = snprintf(tmp, sizeof(tmp), "%u\n", pdd->pasid);
+
+ return simple_read_from_buffer(buf, count, ppos, tmp, len);
+}
+
+static const struct file_operations kfd_debugfs_pasid_fops = {
+ .owner = THIS_MODULE,
+ .read = kfd_debugfs_pasid_read,
+};
+
+void kfd_debugfs_add_process(struct kfd_process *p)
+{
+ int i;
+ char name[MAX_DEBUGFS_FILENAME_LEN];
+ struct debugfs_proc_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return;
+
+ list_add(&entry->list, &procs);
+ entry->pid = p->lead_thread->pid;
+ snprintf(name, MAX_DEBUGFS_FILENAME_LEN, "%d",
+ (int)entry->pid);
+ entry->proc_dentry = debugfs_create_dir(name, debugfs_proc);
+
+ /* Create debugfs files for each GPU:
+ * - proc/<pid>/pasid_<gpuid>
+ */
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ snprintf(name, MAX_DEBUGFS_FILENAME_LEN, "pasid_%u",
+ pdd->dev->id);
+ debugfs_create_file((const char *)name, S_IFREG | 0444,
+ entry->proc_dentry, pdd,
+ &kfd_debugfs_pasid_fops);
+ }
+}
+
+void kfd_debugfs_remove_process(struct kfd_process *p)
+{
+ struct debugfs_proc_entry *entry, *next;
+
+ mutex_lock(&kfd_processes_mutex);
+ list_for_each_entry_safe(entry, next, &procs, list) {
+ if (entry->pid != p->lead_thread->pid)
+ continue;
+
+ debugfs_remove_recursive(entry->proc_dentry);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ mutex_unlock(&kfd_processes_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index b9c82be6ce13..bf0854bd5555 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -352,11 +352,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &aldebaran_kfd2kgd;
break;
case IP_VERSION(9, 4, 3):
- gfx_target_version = adev->rev_id >= 1 ? 90402
- : adev->flags & AMD_IS_APU ? 90400
- : 90401;
- f2g = &gc_9_4_3_kfd2kgd;
- break;
case IP_VERSION(9, 4, 4):
gfx_target_version = 90402;
f2g = &gc_9_4_3_kfd2kgd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c610e172a2b8..76359c6a3f3a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1576,8 +1576,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
int bit;
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
- dev_err(dev, "No more SDMA queue to allocate\n");
+ if (bitmap_empty(dqm->sdma_bitmap, get_num_sdma_queues(dqm))) {
+ dev_warn(dev, "No more SDMA queue to allocate (%d total queues)\n",
+ get_num_sdma_queues(dqm));
return -ENOMEM;
}
@@ -1602,8 +1603,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
q->properties.sdma_queue_id = q->sdma_id /
kfd_get_num_sdma_engines(dqm->dev);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
- dev_err(dev, "No more XGMI SDMA queue to allocate\n");
+ if (bitmap_empty(dqm->xgmi_sdma_bitmap, get_num_xgmi_sdma_queues(dqm))) {
+ dev_warn(dev, "No more XGMI SDMA queue to allocate (%d total queues)\n",
+ get_num_xgmi_sdma_queues(dqm));
return -ENOMEM;
}
if (restore_sdma_id) {
@@ -1662,8 +1664,8 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
}
if (!free_bit_found) {
- dev_err(dev, "No more SDMA queue to allocate for target ID %i\n",
- q->properties.sdma_engine_id);
+ dev_warn(dev, "No more SDMA queue to allocate for target ID %i (%d total queues)\n",
+ q->properties.sdma_engine_id, num_queues);
return -ENOMEM;
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index fecdb6794075..2b294ada3ec0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -1177,6 +1177,25 @@ void kfd_signal_hw_exception_event(u32 pasid)
kfd_unref_process(p);
}
+void kfd_signal_vm_fault_event_with_userptr(struct kfd_process *p, uint64_t gpu_va)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_hsa_memory_exception_data exception_data;
+ int i;
+
+ memset(&exception_data, 0, sizeof(exception_data));
+ exception_data.va = gpu_va;
+ exception_data.failure.NotPresent = 1;
+
+ // Send VM seg fault to all kfd process device
+ for (i = 0; i < p->n_pdds; i++) {
+ pdd = p->pdds[i];
+ exception_data.gpu_id = pdd->user_gpu_id;
+ kfd_evict_process_device(pdd);
+ kfd_signal_vm_fault_event(pdd, NULL, &exception_data);
+ }
+}
+
void kfd_signal_vm_fault_event(struct kfd_process_device *pdd,
struct kfd_vm_fault_info *info,
struct kfd_hsa_memory_exception_data *data)
@@ -1331,6 +1350,7 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
if (unlikely(user_gpu_id == -EINVAL)) {
WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
+ kfd_unref_process(p);
return;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
index 37b69fe0ede3..3e1ad8974797 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
@@ -168,14 +168,14 @@ static bool event_interrupt_isr_v10(struct kfd_node *dev,
client_id != SOC15_IH_CLIENTID_SE3SH)
return false;
- pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
- client_id, source_id, vmid, pasid);
- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]);
+ dev_dbg(dev->adev->dev,
+ "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3], data[4], data[5], data[6],
+ data[7]);
- /* If there is no valid PASID, it's likely a bug */
- if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+ if (pasid == 0)
return 0;
/* Interrupt types we care about: various signals and faults.
@@ -217,37 +217,66 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
switch (encoding) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- WLT),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE_BUF0_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE_BUF1_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
- THREAD_TRACE_UTC_ERROR));
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_AUTO_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ WLT),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF0_FULL),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF1_FULL),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_UTC_ERROR));
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
- pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SA_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SIMD_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- WGP_ID));
+ dev_dbg_ratelimited(
+ dev->adev->dev,
+ "sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SA_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID));
if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK) {
if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
@@ -259,21 +288,37 @@ static void event_interrupt_wq_v10(struct kfd_node *dev,
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0,
ERR_TYPE);
- pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SA_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
- SIMD_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
- WGP_ID),
+ dev_warn_ratelimited(
+ dev->adev->dev,
+ "sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SA_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id1,
+ SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID),
sq_intr_err_type);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index c5f97e6e36ff..2788a52714d1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -148,44 +148,69 @@ enum SQ_INTERRUPT_ERROR_TYPE {
#define KFD_CTXID0_DOORBELL_ID(ctxid0) ((ctxid0) & \
KFD_CTXID0_DOORBELL_ID_MASK)
-static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_auto(struct kfd_node *dev, uint32_t context_id0,
+ uint32_t context_id1)
{
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: auto, ttrace %d, wlt %d, ttrace_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, WLT),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE_BUF_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, REG_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, CMD_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, HOST_CMD_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, HOST_REG_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, IMMED_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE_UTC_ERROR));
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_BUF_FULL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ REG_TIMESTAMP),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ CMD_TIMESTAMP),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ HOST_CMD_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ HOST_REG_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ IMMED_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
+ THREAD_TRACE_UTC_ERROR));
}
-static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_inst(struct kfd_node *dev, uint32_t context_id0,
+ uint32_t context_id1)
{
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: inst, data 0x%08x, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, SH_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ SH_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, WAVE_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, SIMD_ID),
- REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, WGP_ID));
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ SIMD_ID),
+ REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
+ WGP_ID));
}
-static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
+static void print_sq_intr_info_error(struct kfd_node *dev, uint32_t context_id0,
+ uint32_t context_id1)
{
- pr_warn_ratelimited(
+ dev_warn_ratelimited(
+ dev->adev->dev,
"sq_intr: error, detail 0x%08x, type %d, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, DETAIL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, SH_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, SIMD_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, WGP_ID));
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ DETAIL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ TYPE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ SH_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ PRIV),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0,
+ WAVE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1,
+ SIMD_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1,
+ WGP_ID));
}
static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
@@ -255,14 +280,14 @@ static bool event_interrupt_isr_v11(struct kfd_node *dev,
(context_id0 & AMDGPU_FENCE_MES_QUEUE_FLAG))
return false;
- pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
- client_id, source_id, vmid, pasid);
- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]);
+ dev_dbg(dev->adev->dev,
+ "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3], data[4], data[5], data[6],
+ data[7]);
- /* If there is no valid PASID, it's likely a bug */
- if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+ if (pasid == 0)
return false;
/* Interrupt types we care about: various signals and faults.
@@ -353,10 +378,10 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
switch (sq_int_enc) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
- print_sq_intr_info_auto(context_id0, context_id1);
+ print_sq_intr_info_auto(dev, context_id0, context_id1);
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
- print_sq_intr_info_inst(context_id0, context_id1);
+ print_sq_intr_info_inst(dev, context_id0, context_id1);
sq_int_priv = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV);
if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
@@ -366,7 +391,7 @@ static void event_interrupt_wq_v11(struct kfd_node *dev,
return;
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
- print_sq_intr_info_error(context_id0, context_id1);
+ print_sq_intr_info_error(dev, context_id0, context_id1);
sq_int_errtype = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE);
if (sq_int_errtype != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index b8a91bf4ef30..4ceb251312a6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -314,11 +314,12 @@ static bool event_interrupt_isr_v9(struct kfd_node *dev,
& ~pasid_mask) | pasid);
}
- pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
- client_id, source_id, vmid, pasid);
- pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]);
+ dev_dbg(dev->adev->dev,
+ "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
+ dev_dbg(dev->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
+ data[0], data[1], data[2], data[3], data[4], data[5], data[6],
+ data[7]);
/* If there is no valid PASID, it's likely a bug */
if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
@@ -379,28 +380,82 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING);
switch (encoding) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
- pr_debug_ratelimited(
+ dev_dbg_ratelimited(
+ dev->adev->dev,
"sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, WLT),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_BUF_FULL),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, REG_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, CMD_TIMESTAMP),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_CMD_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_REG_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, IMMED_OVERFLOW),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR));
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ THREAD_TRACE),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ WLT),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ THREAD_TRACE_BUF_FULL),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ REG_TIMESTAMP),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ CMD_TIMESTAMP),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ HOST_CMD_OVERFLOW),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ HOST_REG_OVERFLOW),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ IMMED_OVERFLOW),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_AUTO_CTXID,
+ THREAD_TRACE_UTC_ERROR));
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
- pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
+ dev_dbg_ratelimited(
+ dev->adev->dev,
+ "sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SH_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ CU_ID),
sq_int_data);
if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK) {
if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
@@ -412,14 +467,37 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
- pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
- REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
+ dev_warn_ratelimited(
+ dev->adev->dev,
+ "sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ DATA),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SH_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ PRIV),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ WAVE_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ SIMD_ID),
+ REG_GET_FIELD(
+ context_id0,
+ SQ_INTERRUPT_WORD_WAVE_CTXID,
+ CU_ID),
sq_intr_err);
if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 80320a6c8854..97933d2a3803 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -495,6 +495,10 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
+ /* Allow context switch so we don't cross-process starve with a massive
+ * command buffer of long-running SDMA commands
+ */
+ m->sdmax_rlcx_ib_cntl |= SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK;
q->is_active = QUEUE_IS_ACTIVE(*q);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 271c567242ab..b1a6eb349bb3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -31,6 +31,7 @@
#define OVER_SUBSCRIPTION_PROCESS_COUNT (1 << 0)
#define OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT (1 << 1)
#define OVER_SUBSCRIPTION_GWS_QUEUE_COUNT (1 << 2)
+#define OVER_SUBSCRIPTION_XNACK_CONFLICT (1 << 3)
static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
unsigned int buffer_size_bytes)
@@ -44,7 +45,8 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int *rlib_size,
- int *over_subscription)
+ int *over_subscription,
+ int xnack_conflict)
{
unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
unsigned int map_queue_size;
@@ -73,6 +75,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
*over_subscription |= OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT;
if (gws_queue_count > 1)
*over_subscription |= OVER_SUBSCRIPTION_GWS_QUEUE_COUNT;
+ if (xnack_conflict && (node->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN))
+ *over_subscription |= OVER_SUBSCRIPTION_XNACK_CONFLICT;
if (*over_subscription)
dev_dbg(dev, "Over subscribed runlist\n");
@@ -96,7 +100,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
unsigned int **rl_buffer,
uint64_t *rl_gpu_buffer,
unsigned int *rl_buffer_size,
- int *is_over_subscription)
+ int *is_over_subscription,
+ int xnack_conflict)
{
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
@@ -105,7 +110,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
if (WARN_ON(pm->allocated))
return -EINVAL;
- pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
+ pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription,
+ xnack_conflict);
mutex_lock(&pm->lock);
@@ -142,11 +148,27 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
struct queue *q;
struct kernel_queue *kq;
int is_over_subscription;
+ int xnack_enabled = -1;
+ bool xnack_conflict = 0;
rl_wptr = retval = processes_mapped = 0;
+ /* Check if processes set different xnack modes */
+ list_for_each_entry(cur, queues, list) {
+ qpd = cur->qpd;
+ if (xnack_enabled < 0)
+ /* First process */
+ xnack_enabled = qpd->pqm->process->xnack_enabled;
+ else if (qpd->pqm->process->xnack_enabled != xnack_enabled) {
+ /* Found a process with a different xnack mode */
+ xnack_conflict = 1;
+ break;
+ }
+ }
+
retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
- &alloc_size_bytes, &is_over_subscription);
+ &alloc_size_bytes, &is_over_subscription,
+ xnack_conflict);
if (retval)
return retval;
@@ -156,9 +178,13 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
dev_dbg(dev, "Building runlist ib process count: %d queues count %d\n",
pm->dqm->processes_count, pm->dqm->active_queue_count);
+build_runlist_ib:
/* build the run list ib packet */
list_for_each_entry(cur, queues, list) {
qpd = cur->qpd;
+ /* group processes with the same xnack mode together */
+ if (qpd->pqm->process->xnack_enabled != xnack_enabled)
+ continue;
/* build map process packet */
if (processes_mapped >= pm->dqm->processes_count) {
dev_dbg(dev, "Not enough space left in runlist IB\n");
@@ -215,18 +241,26 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
alloc_size_bytes);
}
}
+ if (xnack_conflict) {
+ /* pick up processes with the other xnack mode */
+ xnack_enabled = !xnack_enabled;
+ xnack_conflict = 0;
+ goto build_runlist_ib;
+ }
dev_dbg(dev, "Finished map process and queues to runlist\n");
if (is_over_subscription) {
if (!pm->is_over_subscription)
- dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s. Expect reduced ROCm performance.\n",
- is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ?
- " too many processes." : "",
- is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ?
- " too many queues." : "",
- is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ?
- " multiple processes using cooperative launch." : "");
+ dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s%s. Expect reduced ROCm performance.\n",
+ is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ?
+ " too many processes" : "",
+ is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ?
+ " too many queues" : "",
+ is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ?
+ " multiple processes using cooperative launch" : "",
+ is_over_subscription & OVER_SUBSCRIPTION_XNACK_CONFLICT ?
+ " xnack on/off processes mixed on gfx9" : "");
retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
*rl_gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 2893fd5e5d00..505036968a77 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -43,7 +43,7 @@ static int pm_map_process_v9(struct packet_manager *pm,
memset(buffer, 0, sizeof(struct pm4_mes_map_process));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process));
- if (adev->enforce_isolation[kfd->node_id])
+ if (adev->enforce_isolation[kfd->node_id] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
@@ -102,7 +102,8 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
sizeof(struct pm4_mes_map_process_aldebaran));
- if (adev->enforce_isolation[knode->node_id])
+ if (adev->enforce_isolation[knode->node_id] ==
+ AMDGPU_ENFORCE_ISOLATION_ENABLE)
packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
@@ -165,9 +166,9 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
* hws_max_conc_proc has been done in
* kgd2kfd_device_init().
*/
- concurrent_proc_cnt = adev->enforce_isolation[kfd->node_id] ?
- 1 : min(pm->dqm->processes_count,
- kfd->max_proc_per_quantum);
+ concurrent_proc_cnt = (adev->enforce_isolation[kfd->node_id] ==
+ AMDGPU_ENFORCE_ISOLATION_ENABLE) ?
+ 1 : min(pm->dqm->processes_count, kfd->max_proc_per_quantum);
packet = (struct pm4_mes_runlist *)buffer;
@@ -202,6 +203,8 @@ static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer,
queue_type__mes_set_resources__hsa_interface_queue_hiq;
packet->bitfields2.vmid_mask = res->vmid_mask;
packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
+ if (pm->dqm->dev->adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN)
+ packet->bitfields2.enb_xnack_retry_disable_check = 1;
packet->bitfields7.oac_mask = res->oac_mask;
packet->bitfields8.gds_heap_base = res->gds_heap_base;
packet->bitfields8.gds_heap_size = res->gds_heap_size;
@@ -237,7 +240,7 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
packet->bitfields2.engine_sel =
engine_sel__mes_map_queues__compute_vi;
- packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
+ packet->bitfields2.gws_control_queue = q->properties.is_gws ? 1 : 0;
packet->bitfields2.extended_engine_sel =
extended_engine_sel__mes_map_queues__legacy_engine_sel;
packet->bitfields2.queue_type =
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
index cd8611401a66..e356a207d03c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
@@ -63,7 +63,8 @@ struct pm4_mes_set_resources {
struct {
uint32_t vmid_mask:16;
uint32_t unmap_latency:8;
- uint32_t reserved1:5;
+ uint32_t reserved1:4;
+ uint32_t enb_xnack_retry_disable_check:1;
enum mes_set_resources_queue_type_enum queue_type:3;
} bitfields2;
uint32_t ordinal2;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f6aedf69c644..d221c58dccc3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1507,6 +1507,8 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
int kfd_get_num_events(struct kfd_process *p);
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
+void kfd_signal_vm_fault_event_with_userptr(struct kfd_process *p, uint64_t gpu_va);
+
void kfd_signal_vm_fault_event(struct kfd_process_device *pdd,
struct kfd_vm_fault_info *info,
struct kfd_hsa_memory_exception_data *data);
@@ -1581,10 +1583,15 @@ int kfd_debugfs_hang_hws(struct kfd_node *dev);
int pm_debugfs_hang_hws(struct packet_manager *pm);
int dqm_debugfs_hang_hws(struct device_queue_manager *dqm);
+void kfd_debugfs_add_process(struct kfd_process *p);
+void kfd_debugfs_remove_process(struct kfd_process *p);
+
#else
static inline void kfd_debugfs_init(void) {}
static inline void kfd_debugfs_fini(void) {}
+static inline void kfd_debugfs_add_process(struct kfd_process *p) {}
+static inline void kfd_debugfs_remove_process(struct kfd_process *p) {}
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 7c0c24732481..722ac1662bdc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -900,6 +900,8 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
kfd_procfs_add_sysfs_files(process);
kfd_procfs_add_sysfs_counters(process);
+ kfd_debugfs_add_process(process);
+
init_waitqueue_head(&process->wait_irq_drain);
}
out:
@@ -1054,6 +1056,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
+ kfd_smi_event_process(pdd, false);
+
pr_debug("Releasing pdd (topology id %d, for pid %d)\n",
pdd->dev->id, p->lead_thread->pid);
kfd_process_device_destroy_cwsr_dgpu(pdd);
@@ -1174,6 +1178,7 @@ static void kfd_process_wq_release(struct work_struct *work)
dma_fence_signal(ef);
kfd_process_remove_sysfs(p);
+ kfd_debugfs_remove_process(p);
kfd_process_kunmap_signal_bo(p);
kfd_process_free_outstanding_kfd_bos(p);
@@ -1715,6 +1720,8 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
pdd->pasid = avm->pasid;
pdd->drm_file = drm_file;
+ kfd_smi_event_process(pdd, true);
+
return 0;
err_get_pasid:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 7eb370b68159..c643e0ccec52 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -279,20 +279,17 @@ static int init_user_queue(struct process_queue_manager *pqm,
/* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
* on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
*/
- if (((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
- >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
- if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) {
- pr_err("Queue memory allocated to wrong device\n");
- retval = -EINVAL;
- goto free_gang_ctx_bo;
- }
+ if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) {
+ pr_err("Queue memory allocated to wrong device\n");
+ retval = -EINVAL;
+ goto free_gang_ctx_bo;
+ }
- retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo,
- &(*q)->wptr_bo_gart);
- if (retval) {
- pr_err("Failed to map wptr bo to GART\n");
- goto free_gang_ctx_bo;
- }
+ retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo,
+ &(*q)->wptr_bo_gart);
+ if (retval) {
+ pr_err("Failed to map wptr bo to GART\n");
+ goto free_gang_ctx_bo;
}
}
@@ -451,8 +448,15 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("process pid %d DQM create queue type %d failed. ret %d\n",
- pqm->process->lead_thread->pid, type, retval);
+ if ((type == KFD_QUEUE_TYPE_SDMA ||
+ type == KFD_QUEUE_TYPE_SDMA_XGMI ||
+ type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) &&
+ retval == -ENOMEM)
+ pr_warn("process pid %d DQM create queue type %d failed. ret %d\n",
+ pqm->process->lead_thread->pid, type, retval);
+ else
+ pr_err("process pid %d DQM create queue type %d failed. ret %d\n",
+ pqm->process->lead_thread->pid, type, retval);
goto err_create_queue;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index 4afff7094caf..a65c67cf56ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -402,7 +402,7 @@ static u32 kfd_get_vgpr_size_per_cu(u32 gfxv)
{
u32 vgpr_size = 0x40000;
- if ((gfxv / 100 * 100) == 90400 || /* GFX_VERSION_AQUA_VANJARAM */
+ if (gfxv == 90402 || /* GFX_VERSION_AQUA_VANJARAM */
gfxv == 90010 || /* GFX_VERSION_ALDEBARAN */
gfxv == 90008 || /* GFX_VERSION_ARCTURUS */
gfxv == 90500)
@@ -462,7 +462,7 @@ void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev)
if (gfxv == 80002) /* GFX_VERSION_TONGA */
props->eop_buffer_size = 0x8000;
- else if ((gfxv / 100 * 100) == 90400) /* GFX_VERSION_AQUA_VANJARAM */
+ else if (gfxv == 90402) /* GFX_VERSION_AQUA_VANJARAM */
props->eop_buffer_size = 4096;
else if (gfxv >= 80000)
props->eop_buffer_size = 4096;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index 9b8169761ec5..83d9384ac815 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -163,10 +163,9 @@ static int kfd_smi_ev_release(struct inode *inode, struct file *filep)
static bool kfd_smi_ev_enabled(pid_t pid, struct kfd_smi_client *client,
unsigned int event)
{
- uint64_t all = KFD_SMI_EVENT_MASK_FROM_INDEX(KFD_SMI_EVENT_ALL_PROCESS);
uint64_t events = READ_ONCE(client->events);
- if (pid && client->pid != pid && !(client->suser && (events & all)))
+ if (pid && client->pid != pid && !client->suser)
return false;
return events & KFD_SMI_EVENT_MASK_FROM_INDEX(event);
@@ -345,6 +344,27 @@ void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
pid, address, last - address + 1, node->id, trigger));
}
+void kfd_smi_event_process(struct kfd_process_device *pdd, bool start)
+{
+ struct amdgpu_task_info *task_info;
+ struct amdgpu_vm *avm;
+
+ if (!pdd->drm_priv)
+ return;
+
+ avm = drm_priv_to_vm(pdd->drm_priv);
+ task_info = amdgpu_vm_get_task_info_vm(avm);
+
+ if (task_info) {
+ kfd_smi_event_add(0, pdd->dev,
+ start ? KFD_SMI_EVENT_PROCESS_START :
+ KFD_SMI_EVENT_PROCESS_END,
+ KFD_EVENT_FMT_PROCESS(task_info->pid,
+ task_info->task_name));
+ amdgpu_vm_put_task_info(task_info);
+ }
+}
+
int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd)
{
struct kfd_smi_client *client;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
index 503bff13d815..bb4d72b57387 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
@@ -53,4 +53,5 @@ void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm);
void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
unsigned long address, unsigned long last,
uint32_t trigger);
+void kfd_smi_event_process(struct kfd_process_device *pdd, bool start);
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 100717a98ec1..865dca2547de 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1245,8 +1245,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
case IP_VERSION(9, 4, 4):
case IP_VERSION(9, 5, 0):
if (ext_coherent)
- mtype_local = (gc_ip_version < IP_VERSION(9, 5, 0) && !node->adev->rev_id) ?
- AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_CC;
+ mtype_local = AMDGPU_VM_MTYPE_CC;
else
mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
@@ -4076,8 +4075,8 @@ exit:
return ret;
}
-int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
- uint64_t *svm_priv_data_size)
+void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
+ uint64_t *svm_priv_data_size)
{
uint64_t total_size, accessibility_size, common_attr_size;
int nattr_common = 4, nattr_accessibility = 1;
@@ -4089,8 +4088,6 @@ int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
*svm_priv_data_size = 0;
svms = &p->svms;
- if (!svms)
- return -EINVAL;
mutex_lock(&svms->lock);
list_for_each_entry(prange, &svms->list, list) {
@@ -4132,7 +4129,6 @@ int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
*svm_priv_data_size);
- return 0;
}
int kfd_criu_checkpoint_svm(struct kfd_process *p,
@@ -4149,8 +4145,6 @@ int kfd_criu_checkpoint_svm(struct kfd_process *p,
struct mm_struct *mm;
svms = &p->svms;
- if (!svms)
- return -EINVAL;
mm = get_task_mm(p->lead_thread);
if (!mm) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 6ea23c78009c..01c7a4877904 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -184,8 +184,8 @@ void schedule_deferred_list_work(struct svm_range_list *svms);
void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages);
void svm_range_dma_unmap(struct svm_range *prange);
-int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
- uint64_t *svm_priv_data_size);
+void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
+ uint64_t *svm_priv_data_size);
int kfd_criu_checkpoint_svm(struct kfd_process *p,
uint8_t __user *user_priv_data,
uint64_t *priv_offset);
@@ -237,13 +237,12 @@ static inline int svm_range_schedule_evict_svm_bo(
return -EINVAL;
}
-static inline int svm_range_get_info(struct kfd_process *p,
- uint32_t *num_svm_ranges,
- uint64_t *svm_priv_data_size)
+static inline void svm_range_get_info(struct kfd_process *p,
+ uint32_t *num_svm_ranges,
+ uint64_t *svm_priv_data_size)
{
*num_svm_ranges = 0;
*svm_priv_data_size = 0;
- return 0;
}
static inline int kfd_criu_checkpoint_svm(struct kfd_process *p,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 9bbee484d57c..4ec73f33535e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -510,6 +510,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.capability |=
HSA_CAP_AQL_QUEUE_DOUBLE_MAP;
+ if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0) &&
+ (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
+
sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_fcompute",
dev->node_props.max_engine_clk_fcompute);
@@ -1267,34 +1271,41 @@ static void kfd_set_recommended_sdma_engines(struct kfd_topology_device *to_dev,
{
struct kfd_node *gpu = outbound_link->gpu;
struct amdgpu_device *adev = gpu->adev;
- int num_xgmi_nodes = adev->gmc.xgmi.num_physical_nodes;
+ unsigned int num_xgmi_nodes = adev->gmc.xgmi.num_physical_nodes;
+ unsigned int num_xgmi_sdma_engines = kfd_get_num_xgmi_sdma_engines(gpu);
+ unsigned int num_sdma_engines = kfd_get_num_sdma_engines(gpu);
+ uint32_t sdma_eng_id_mask = (1 << num_sdma_engines) - 1;
+ uint32_t xgmi_sdma_eng_id_mask =
+ ((1 << num_xgmi_sdma_engines) - 1) << num_sdma_engines;
+
bool support_rec_eng = !amdgpu_sriov_vf(adev) && to_dev->gpu &&
adev->aid_mask && num_xgmi_nodes && gpu->kfd->num_nodes == 1 &&
- kfd_get_num_xgmi_sdma_engines(gpu) >= 14 &&
- (!(adev->flags & AMD_IS_APU) && num_xgmi_nodes == 8);
+ num_xgmi_sdma_engines >= 6 && (!(adev->flags & AMD_IS_APU) &&
+ num_xgmi_nodes == 8);
if (support_rec_eng) {
int src_socket_id = adev->gmc.xgmi.physical_node_id;
int dst_socket_id = to_dev->gpu->adev->gmc.xgmi.physical_node_id;
+ unsigned int reshift = num_xgmi_sdma_engines == 6 ? 1 : 0;
outbound_link->rec_sdma_eng_id_mask =
- 1 << rec_sdma_eng_map[src_socket_id][dst_socket_id];
+ 1 << (rec_sdma_eng_map[src_socket_id][dst_socket_id] >> reshift);
inbound_link->rec_sdma_eng_id_mask =
- 1 << rec_sdma_eng_map[dst_socket_id][src_socket_id];
- } else {
- int num_sdma_eng = kfd_get_num_sdma_engines(gpu);
- int i, eng_offset = 0;
+ 1 << (rec_sdma_eng_map[dst_socket_id][src_socket_id] >> reshift);
- if (outbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
- kfd_get_num_xgmi_sdma_engines(gpu) && to_dev->gpu) {
- eng_offset = num_sdma_eng;
- num_sdma_eng = kfd_get_num_xgmi_sdma_engines(gpu);
- }
+ /* If recommended engine is out of range, need to reset the mask */
+ if (outbound_link->rec_sdma_eng_id_mask & sdma_eng_id_mask)
+ outbound_link->rec_sdma_eng_id_mask = xgmi_sdma_eng_id_mask;
+ if (inbound_link->rec_sdma_eng_id_mask & sdma_eng_id_mask)
+ inbound_link->rec_sdma_eng_id_mask = xgmi_sdma_eng_id_mask;
- for (i = 0; i < num_sdma_eng; i++) {
- outbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset));
- inbound_link->rec_sdma_eng_id_mask |= (1 << (i + eng_offset));
- }
+ } else {
+ uint32_t engine_mask = (outbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
+ num_xgmi_sdma_engines && to_dev->gpu) ? xgmi_sdma_eng_id_mask :
+ sdma_eng_id_mask;
+
+ outbound_link->rec_sdma_eng_id_mask = engine_mask;
+ inbound_link->rec_sdma_eng_id_mask = engine_mask;
}
}
@@ -2001,8 +2012,6 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
if (!amdgpu_sriov_vf(dev->gpu->adev))
dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
- if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
- dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
} else {
dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index ab2a97e354da..7329b8cc2576 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -38,6 +38,7 @@ AMDGPUDM = \
amdgpu_dm_pp_smu.o \
amdgpu_dm_psr.o \
amdgpu_dm_replay.o \
+ amdgpu_dm_quirks.o \
amdgpu_dm_wb.o
ifdef CONFIG_DRM_AMD_DC_FP
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 64df8ca448b3..bc4cd11bfc79 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -80,7 +80,6 @@
#include <linux/power_supply.h>
#include <linux/firmware.h>
#include <linux/component.h>
-#include <linux/dmi.h>
#include <linux/sort.h>
#include <drm/display/drm_dp_mst_helper.h>
@@ -115,6 +114,8 @@
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
+static_assert(AMDGPU_DMUB_NOTIFICATION_MAX == DMUB_NOTIFICATION_MAX, "AMDGPU_DMUB_NOTIFICATION_MAX mismatch");
+
#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
@@ -280,7 +281,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
acrtc = adev->mode_info.crtcs[crtc];
if (!acrtc->dm_irq_params.stream) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
crtc);
return 0;
}
@@ -301,7 +302,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
acrtc = adev->mode_info.crtcs[crtc];
if (!acrtc->dm_irq_params.stream) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ drm_err(adev_to_drm(adev), "dc_stream_state is NULL for crtc '%d'!\n",
crtc);
return 0;
}
@@ -372,6 +373,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
struct dm_crtc_state *new_state)
{
+ if (new_state->stream->adjust.timing_adjust_pending)
+ return true;
if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
return true;
else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
@@ -673,21 +676,15 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (acrtc->dm_irq_params.stream &&
- acrtc->dm_irq_params.vrr_params.supported) {
- bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
- bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
- bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
-
+ acrtc->dm_irq_params.vrr_params.supported &&
+ acrtc->dm_irq_params.freesync_config.state ==
+ VRR_STATE_ACTIVE_VARIABLE) {
mod_freesync_handle_v_update(adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
- if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
- dc_stream_adjust_vmin_vmax(adev->dm.dc,
- acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
- }
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
}
/*
@@ -755,6 +752,29 @@ static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
complete(&adev->dm.dmub_aux_transfer_done);
}
+static void dmub_aux_fused_io_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
+{
+ if (!adev || !notify) {
+ ASSERT(false);
+ return;
+ }
+
+ const struct dmub_cmd_fused_request *req = &notify->fused_request;
+ const uint8_t ddc_line = req->u.aux.ddc_line;
+
+ if (ddc_line >= ARRAY_SIZE(adev->dm.fused_io)) {
+ ASSERT(false);
+ return;
+ }
+
+ struct fused_io_sync *sync = &adev->dm.fused_io[ddc_line];
+
+ static_assert(sizeof(*req) <= sizeof(sync->reply_data), "Size mismatch");
+ memcpy(sync->reply_data, req, sizeof(*req));
+ complete(&sync->replied);
+}
+
/**
* dmub_hpd_callback - DMUB HPD interrupt processing callback.
* @adev: amdgpu_device pointer
@@ -778,18 +798,18 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
return;
if (notify == NULL) {
- DRM_ERROR("DMUB HPD callback notification was NULL");
+ drm_err(adev_to_drm(adev), "DMUB HPD callback notification was NULL");
return;
}
if (notify->link_index > adev->dm.dc->link_count) {
- DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
+ drm_err(adev_to_drm(adev), "DMUB HPD index (%u)is abnormal", notify->link_index);
return;
}
/* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */
if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) {
- DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n");
+ drm_info(adev_to_drm(adev), "Skip DMUB HPD IRQ callback in suspend/resume\n");
return;
}
@@ -806,11 +826,11 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
aconnector = to_amdgpu_dm_connector(connector);
if (link && aconnector->dc_link == link) {
if (notify->type == DMUB_NOTIFICATION_HPD)
- DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+ drm_info(adev_to_drm(adev), "DMUB HPD IRQ callback: link_index=%u\n", link_index);
else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
- DRM_INFO("DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
+ drm_info(adev_to_drm(adev), "DMUB HPD RX IRQ callback: link_index=%u\n", link_index);
else
- DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
+ drm_warn(adev_to_drm(adev), "DMUB Unknown HPD callback type %d, link_index=%u\n",
notify->type, link_index);
hpd_aconnector = aconnector;
@@ -822,7 +842,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
if (hpd_aconnector) {
if (notify->type == DMUB_NOTIFICATION_HPD) {
if (hpd_aconnector->dc_link->hpd_status == (notify->hpd_status == DP_HPD_PLUG))
- DRM_WARN("DMUB reported hpd status unchanged. link_index=%u\n", link_index);
+ drm_warn(adev_to_drm(adev), "DMUB reported hpd status unchanged. link_index=%u\n", link_index);
handle_hpd_irq_helper(hpd_aconnector);
} else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) {
handle_hpd_rx_irq(hpd_aconnector);
@@ -841,7 +861,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
static void dmub_hpd_sense_callback(struct amdgpu_device *adev,
struct dmub_notification *notify)
{
- DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n");
+ drm_dbg_driver(adev_to_drm(adev), "DMUB HPD SENSE callback.\n");
}
/**
@@ -877,7 +897,7 @@ static void dm_handle_hpd_work(struct work_struct *work)
dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
if (!dmub_hpd_wrk->dmub_notify) {
- DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
+ drm_err(adev_to_drm(dmub_hpd_wrk->adev), "dmub_hpd_wrk dmub_notify is NULL");
return;
}
@@ -891,6 +911,30 @@ static void dm_handle_hpd_work(struct work_struct *work)
}
+static const char *dmub_notification_type_str(enum dmub_notification_type e)
+{
+ switch (e) {
+ case DMUB_NOTIFICATION_NO_DATA:
+ return "NO_DATA";
+ case DMUB_NOTIFICATION_AUX_REPLY:
+ return "AUX_REPLY";
+ case DMUB_NOTIFICATION_HPD:
+ return "HPD";
+ case DMUB_NOTIFICATION_HPD_IRQ:
+ return "HPD_IRQ";
+ case DMUB_NOTIFICATION_SET_CONFIG_REPLY:
+ return "SET_CONFIG_REPLY";
+ case DMUB_NOTIFICATION_DPIA_NOTIFICATION:
+ return "DPIA_NOTIFICATION";
+ case DMUB_NOTIFICATION_HPD_SENSE_NOTIFY:
+ return "HPD_SENSE_NOTIFY";
+ case DMUB_NOTIFICATION_FUSED_IO:
+ return "FUSED_IO";
+ default:
+ return "<unknown>";
+ }
+}
+
#define DMUB_TRACE_MAX_READ 64
/**
* dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
@@ -908,22 +952,13 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct dmcub_trace_buf_entry entry = { 0 };
u32 count = 0;
struct dmub_hpd_work *dmub_hpd_wrk;
- static const char *const event_type[] = {
- "NO_DATA",
- "AUX_REPLY",
- "HPD",
- "HPD_IRQ",
- "SET_CONFIGC_REPLY",
- "DPIA_NOTIFICATION",
- "HPD_SENSE_NOTIFY",
- };
do {
if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
entry.param0, entry.param1);
- DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+ drm_dbg_driver(adev_to_drm(adev), "trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
entry.trace_code, entry.tick_count, entry.param0, entry.param1);
} else
break;
@@ -933,7 +968,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
} while (count <= DMUB_TRACE_MAX_READ);
if (count > DMUB_TRACE_MAX_READ)
- DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
+ drm_dbg_driver(adev_to_drm(adev), "Warning : count > DMUB_TRACE_MAX_READ");
if (dc_enable_dmub_notifications(adev->dm.dc) &&
irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
@@ -941,25 +976,25 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
do {
dc_stat_get_dmub_notification(adev->dm.dc, &notify);
if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
- DRM_ERROR("DM: notify type %d invalid!", notify.type);
+ drm_err(adev_to_drm(adev), "DM: notify type %d invalid!", notify.type);
continue;
}
if (!dm->dmub_callback[notify.type]) {
- DRM_WARN("DMUB notification skipped due to no handler: type=%s\n",
- event_type[notify.type]);
+ drm_warn(adev_to_drm(adev), "DMUB notification skipped due to no handler: type=%s\n",
+ dmub_notification_type_str(notify.type));
continue;
}
if (dm->dmub_thread_offload[notify.type] == true) {
dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
if (!dmub_hpd_wrk) {
- DRM_ERROR("Failed to allocate dmub_hpd_wrk");
+ drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk");
return;
}
dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
GFP_ATOMIC);
if (!dmub_hpd_wrk->dmub_notify) {
kfree(dmub_hpd_wrk);
- DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
+ drm_err(adev_to_drm(adev), "Failed to allocate dmub_hpd_wrk->dmub_notify");
return;
}
INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
@@ -1017,10 +1052,10 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
&compressor->gpu_addr, &compressor->cpu_addr);
if (r)
- DRM_ERROR("DM: Failed to initialize FBC\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize FBC\n");
else {
adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
- DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
+ drm_info(adev_to_drm(adev), "DM: FBC alloc %lu\n", max_size*4);
}
}
@@ -1185,13 +1220,13 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0;
if (!fb_info) {
- DRM_ERROR("No framebuffer info for DMUB service.\n");
+ drm_err(adev_to_drm(adev), "No framebuffer info for DMUB service.\n");
return -EINVAL;
}
if (!dmub_fw) {
/* Firmware required for DMUB support. */
- DRM_ERROR("No firmware provided for DMUB.\n");
+ drm_err(adev_to_drm(adev), "No firmware provided for DMUB.\n");
return -EINVAL;
}
@@ -1201,19 +1236,19 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error checking HW support for DMUB: %d\n", status);
return -EINVAL;
}
if (!has_hw_support) {
- DRM_INFO("DMUB unsupported on ASIC\n");
+ drm_info(adev_to_drm(adev), "DMUB unsupported on ASIC\n");
return 0;
}
/* Reset DMCUB if it was previously running - before we overwrite its memory. */
status = dmub_srv_hw_reset(dmub_srv);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Error resetting DMUB HW: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Error resetting DMUB HW: %d\n", status);
hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
@@ -1296,6 +1331,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 1):
case IP_VERSION(3, 6, 0):
hw_params.ips_sequential_ono = adev->external_rev_id > 0x10;
+ hw_params.lower_hbr3_phy_ssc = true;
break;
default:
break;
@@ -1303,14 +1339,14 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
status = dmub_srv_hw_init(dmub_srv, &hw_params);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error initializing DMUB HW: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error initializing DMUB HW: %d\n", status);
return -EINVAL;
}
/* Wait for firmware load to finish. */
status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);
/* Init DMCU and ABM if available. */
if (dmcu && abm) {
@@ -1321,11 +1357,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
if (!adev->dm.dc->ctx->dmub_srv)
adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
if (!adev->dm.dc->ctx->dmub_srv) {
- DRM_ERROR("Couldn't allocate DC DMUB server!\n");
+ drm_err(adev_to_drm(adev), "Couldn't allocate DC DMUB server!\n");
return -ENOMEM;
}
- DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
+ drm_info(adev_to_drm(adev), "DMUB hardware initialized: version=0x%08X\n",
adev->dm.dmcub_fw_version);
/* Keeping sanity checks off if
@@ -1368,18 +1404,18 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
status = dmub_srv_is_hw_init(dmub_srv, &init);
if (status != DMUB_STATUS_OK)
- DRM_WARN("DMUB hardware init check failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "DMUB hardware init check failed: %d\n", status);
if (status == DMUB_STATUS_OK && init) {
/* Wait for firmware load to finish. */
status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
if (status != DMUB_STATUS_OK)
- DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ drm_warn(adev_to_drm(adev), "Wait for DMUB auto-load failed: %d\n", status);
} else {
/* Perform the full hardware initialization. */
r = dm_dmub_hw_init(adev);
if (r)
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
}
}
@@ -1489,18 +1525,18 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
aconnector = offload_work->offload_wq->aconnector;
+ adev = offload_work->adev;
if (!aconnector) {
- DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
+ drm_err(adev_to_drm(adev), "Can't retrieve aconnector in hpd_rx_irq_offload_work");
goto skip;
}
- adev = drm_to_adev(aconnector->base.dev);
dc_link = aconnector->dc_link;
mutex_lock(&aconnector->hpd_lock);
if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
mutex_unlock(&aconnector->hpd_lock);
if (new_connection_type == dc_connection_none)
@@ -1569,8 +1605,9 @@ skip:
}
-static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
+static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct amdgpu_device *adev)
{
+ struct dc *dc = adev->dm.dc;
int max_caps = dc->caps.max_links;
int i = 0;
struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
@@ -1586,7 +1623,7 @@ static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct
create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
if (hpd_rx_offload_wq[i].wq == NULL) {
- DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
+ drm_err(adev_to_drm(adev), "create amdgpu_dm_hpd_rx_offload_wq fail!");
goto out_err;
}
@@ -1635,153 +1672,6 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
return false;
}
-struct amdgpu_dm_quirks {
- bool aux_hpd_discon;
- bool support_edp0_on_dp1;
-};
-
-static struct amdgpu_dm_quirks quirk_entries = {
- .aux_hpd_discon = false,
- .support_edp0_on_dp1 = false
-};
-
-static int edp0_on_dp1_callback(const struct dmi_system_id *id)
-{
- quirk_entries.support_edp0_on_dp1 = true;
- return 0;
-}
-
-static int aux_hpd_discon_callback(const struct dmi_system_id *id)
-{
- quirk_entries.aux_hpd_discon = true;
- return 0;
-}
-
-static const struct dmi_system_id dmi_quirk_table[] = {
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
- },
- },
- {
- .callback = aux_hpd_discon_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"),
- },
- },
- {
- .callback = edp0_on_dp1_callback,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"),
- },
- },
- {}
- /* TODO: refactor this from a fixed table to a dynamic option */
-};
-
-static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data)
-{
- int dmi_id;
- struct drm_device *dev = dm->ddev;
-
- dm->aux_hpd_discon_quirk = false;
- init_data->flags.support_edp0_on_dp1 = false;
-
- dmi_id = dmi_check_system(dmi_quirk_table);
-
- if (!dmi_id)
- return;
-
- if (quirk_entries.aux_hpd_discon) {
- dm->aux_hpd_discon_quirk = true;
- drm_info(dev, "aux_hpd_discon_quirk attached\n");
- }
- if (quirk_entries.support_edp0_on_dp1) {
- init_data->flags.support_edp0_on_dp1 = true;
- drm_info(dev, "support_edp0_on_dp1 attached\n");
- }
-}
void*
dm_allocate_gpu_mem(
@@ -1957,7 +1847,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.audio_lock);
if (amdgpu_dm_irq_init(adev)) {
- DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to initialize DM IRQ support.\n");
goto error;
}
@@ -2068,7 +1958,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
- retrieve_dmi_info(&adev->dm, &init_data);
+ retrieve_dmi_info(&adev->dm);
+ if (adev->dm.edp0_on_dp1_quirk)
+ init_data.flags.support_edp0_on_dp1 = true;
if (adev->dm.bb_from_dmub)
init_data.bb_from_dmub = adev->dm.bb_from_dmub;
@@ -2079,10 +1971,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.dc = dc_create(&init_data);
if (adev->dm.dc) {
- DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
+ drm_info(adev_to_drm(adev), "Display Core v%s initialized on %s\n", DC_VER,
dce_version_to_string(adev->dm.dc->ctx->dce_version));
} else {
- DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
+ drm_info(adev_to_drm(adev), "Display Core failed to initialize with v%s!\n", DC_VER);
goto error;
}
@@ -2108,33 +2000,44 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true;
- if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP)
+ if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP_FAMS) {
adev->dm.dc->debug.force_disable_subvp = true;
+ adev->dm.dc->debug.fams2_config.bits.enable = false;
+ }
if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) {
adev->dm.dc->debug.using_dml2 = true;
adev->dm.dc->debug.using_dml21 = true;
}
+ if (amdgpu_dc_debug_mask & DC_HDCP_LC_FORCE_FW_ENABLE)
+ adev->dm.dc->debug.hdcp_lc_force_fw_enable = true;
+
+ if (amdgpu_dc_debug_mask & DC_HDCP_LC_ENABLE_SW_FALLBACK)
+ adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true;
+
+ if (amdgpu_dc_debug_mask & DC_SKIP_DETECTION_LT)
+ adev->dm.dc->debug.skip_detection_link_training = true;
+
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
adev->dm.dc->debug.ignore_cable_id = true;
if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
- DRM_INFO("DP-HDMI FRL PCON supported\n");
+ drm_info(adev_to_drm(adev), "DP-HDMI FRL PCON supported\n");
r = dm_dmub_hw_init(adev);
if (r) {
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
goto error;
}
dc_hardware_init(adev->dm.dc);
- adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
+ adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
if (!adev->dm.hpd_rx_offload_wq) {
- DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd rx offload workqueue.\n");
goto error;
}
@@ -2149,10 +2052,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"amdgpu: failed to initialize freesync_module.\n");
} else
- DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
+ drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n",
adev->dm.freesync_module);
amdgpu_dm_init_color_mod();
@@ -2161,7 +2064,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.vblank_control_workqueue =
create_singlethread_workqueue("dm_vblank_control_workqueue");
if (!adev->dm.vblank_control_workqueue)
- DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to initialize vblank_workqueue.\n");
}
if (adev->dm.dc->caps.ips_support &&
@@ -2172,9 +2075,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
- DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to initialize hdcp_workqueue.\n");
else
- DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
+ drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
dc_init_callbacks(adev->dm.dc, &init_params);
}
@@ -2182,20 +2085,29 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_completion(&adev->dm.dmub_aux_transfer_done);
adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
if (!adev->dm.dmub_notify) {
- DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
+ drm_info(adev_to_drm(adev), "amdgpu: fail to allocate adev->dm.dmub_notify");
goto error;
}
adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
if (!adev->dm.delayed_hpd_wq) {
- DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd offload workqueue.\n");
goto error;
}
amdgpu_dm_outbox_init(adev);
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
dmub_aux_setconfig_callback, false)) {
- DRM_ERROR("amdgpu: fail to register dmub aux callback");
+ drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub aux callback");
+ goto error;
+ }
+
+ for (size_t i = 0; i < ARRAY_SIZE(adev->dm.fused_io); i++)
+ init_completion(&adev->dm.fused_io[i].replied);
+
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO,
+ dmub_aux_fused_io_callback, false)) {
+ drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub fused io callback");
goto error;
}
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
@@ -2212,7 +2124,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
if (amdgpu_dm_initialize_drm_device(adev)) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"amdgpu: failed to initialize sw for display support.\n");
goto error;
}
@@ -2227,7 +2139,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"amdgpu: failed to initialize sw for display support.\n");
goto error;
}
@@ -2235,14 +2147,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
amdgpu_dm_crtc_secure_display_create_contexts(adev);
if (!adev->dm.secure_display_ctx.crtc_ctx)
- DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to initialize secure display contexts.\n");
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1))
adev->dm.secure_display_ctx.support_mul_roi = true;
#endif
- DRM_DEBUG_DRIVER("KMS initialized.\n");
+ drm_dbg_driver(adev_to_drm(adev), "KMS initialized.\n");
return 0;
error:
@@ -2415,7 +2327,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
default:
break;
}
- DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
+ drm_err(adev_to_drm(adev), "Unsupported ASIC type: 0x%X\n", adev->asic_type);
return -EINVAL;
}
@@ -2433,7 +2345,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
if (r) {
- dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
+ drm_err(adev_to_drm(adev), "amdgpu_dm: Can't validate firmware \"%s\"\n",
fw_name_dmcu);
amdgpu_ucode_release(&adev->dm.fw_dmcu);
return r;
@@ -2558,7 +2470,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
- DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+ drm_info(adev_to_drm(adev), "Loading DMUB firmware via PSP: version=0x%08X\n",
adev->dm.dmcub_fw_version);
}
@@ -2567,7 +2479,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
dmub_srv = adev->dm.dmub_srv;
if (!dmub_srv) {
- DRM_ERROR("Failed to allocate DMUB service!\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate DMUB service!\n");
return -ENOMEM;
}
@@ -2580,7 +2492,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
/* Create the DMUB service. */
status = dmub_srv_create(dmub_srv, &create_params);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error creating DMUB service: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error creating DMUB service: %d\n", status);
return -EINVAL;
}
@@ -2605,7 +2517,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
&region_info);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error calculating DMUB region info: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error calculating DMUB region info: %d\n", status);
return -EINVAL;
}
@@ -2634,14 +2546,14 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
fb_info = adev->dm.dmub_fb_info;
if (!fb_info) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"Failed to allocate framebuffer info for DMUB service!\n");
return -ENOMEM;
}
status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
if (status != DMUB_STATUS_OK) {
- DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ drm_err(adev_to_drm(adev), "Error calculating DMUB FB info: %d\n", status);
return -EINVAL;
}
@@ -2658,7 +2570,7 @@ static int dm_sw_init(struct amdgpu_ip_block *ip_block)
adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
if (!adev->dm.cgs_device) {
- DRM_ERROR("amdgpu: failed to create cgs device.\n");
+ drm_err(adev_to_drm(adev), "amdgpu: failed to create cgs device.\n");
return -EINVAL;
}
@@ -2964,7 +2876,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
ret = amdgpu_dpm_write_watermarks_table(adev);
if (ret) {
- DRM_ERROR("Failed to update WMTABLE!\n");
+ drm_err(adev_to_drm(adev), "Failed to update WMTABLE!\n");
return ret;
}
@@ -2982,13 +2894,13 @@ static int dm_oem_i2c_hw_init(struct amdgpu_device *adev)
if (oem_ddc_service) {
oem_i2c = create_i2c(oem_ddc_service, true);
if (!oem_i2c) {
- dev_info(adev->dev, "Failed to create oem i2c adapter data\n");
+ drm_info(adev_to_drm(adev), "Failed to create oem i2c adapter data\n");
return -ENOMEM;
}
r = i2c_add_adapter(&oem_i2c->base);
if (r) {
- dev_info(adev->dev, "Failed to register oem i2c\n");
+ drm_info(adev_to_drm(adev), "Failed to register oem i2c\n");
kfree(oem_i2c);
return r;
}
@@ -3031,7 +2943,7 @@ static int dm_hw_init(struct amdgpu_ip_block *ip_block)
r = dm_oem_i2c_hw_init(adev);
if (r)
- dev_info(adev->dev, "Failed to add OEM i2c bus\n");
+ drm_info(adev_to_drm(adev), "Failed to add OEM i2c bus\n");
return 0;
}
@@ -3074,7 +2986,7 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
if (rc)
- DRM_WARN("Failed to %s pflip interrupts\n",
+ drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n",
enable ? "enable" : "disable");
if (enable) {
@@ -3084,14 +2996,14 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
if (rc)
- DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+ drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
/* During gpu-reset we disable and then enable vblank irq, so
* don't use amdgpu_irq_get/put() to avoid refcount change.
*/
if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
- DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
+ drm_warn(adev_to_drm(adev), "Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
}
}
@@ -3467,11 +3379,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-
- /* leave display off for S4 sequence */
- if (adev->in_s4)
- return 0;
-
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_state_release(dm_state->context);
dm_state->context = dc_state_create(dm->dc, NULL);
@@ -3926,20 +3833,21 @@ static void handle_hpd_irq(void *param)
}
-static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
+static void schedule_hpd_rx_offload_work(struct amdgpu_device *adev, struct hpd_rx_irq_offload_work_queue *offload_wq,
union hpd_irq_data hpd_irq_data)
{
struct hpd_rx_irq_offload_work *offload_work =
kzalloc(sizeof(*offload_work), GFP_KERNEL);
if (!offload_work) {
- DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate hpd_rx_irq_offload_work.\n");
return;
}
INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
offload_work->data = hpd_irq_data;
offload_work->offload_wq = offload_wq;
+ offload_work->adev = adev;
queue_work(offload_wq->wq, &offload_work->work);
DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
@@ -3981,7 +3889,7 @@ static void handle_hpd_rx_irq(void *param)
goto out;
if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -4003,7 +3911,7 @@ static void handle_hpd_rx_irq(void *param)
spin_unlock(&offload_wq->offload_lock);
if (!skip)
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -4020,7 +3928,7 @@ static void handle_hpd_rx_irq(void *param)
spin_unlock(&offload_wq->offload_lock);
if (!skip)
- schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ schedule_hpd_rx_offload_work(adev, offload_wq, hpd_irq_data);
goto out;
}
@@ -4030,7 +3938,7 @@ out:
if (result && !is_mst_root_connector) {
/* Downstream Port status changed. */
if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(dc_link);
@@ -4093,19 +4001,19 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD,
dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ,
dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
dmub_hpd_sense_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd sense callback");
+ drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd sense callback");
return -EINVAL;
}
}
@@ -4126,7 +4034,7 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_HPD1 ||
int_params.irq_source > DC_IRQ_SOURCE_HPD6) {
- DRM_ERROR("Failed to register hpd irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register hpd irq!\n");
return -EINVAL;
}
@@ -4144,7 +4052,7 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_HPD1RX ||
int_params.irq_source > DC_IRQ_SOURCE_HPD6RX) {
- DRM_ERROR("Failed to register hpd rx irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register hpd rx irq!\n");
return -EINVAL;
}
@@ -4186,7 +4094,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -4197,7 +4105,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4216,7 +4124,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4227,7 +4135,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4245,7 +4153,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, client_id,
VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4287,7 +4195,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -4298,7 +4206,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4316,7 +4224,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
if (r) {
- DRM_ERROR("Failed to add vupdate irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n");
return r;
}
@@ -4327,7 +4235,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 ||
int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) {
- DRM_ERROR("Failed to register vupdate irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n");
return -EINVAL;
}
@@ -4346,7 +4254,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4357,7 +4265,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4375,7 +4283,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, client_id,
VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4425,7 +4333,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
if (r) {
- DRM_ERROR("Failed to add crtc irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add crtc irq id!\n");
return r;
}
@@ -4436,7 +4344,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VBLANK1 ||
int_params.irq_source > DC_IRQ_SOURCE_VBLANK6) {
- DRM_ERROR("Failed to register vblank irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vblank irq!\n");
return -EINVAL;
}
@@ -4457,7 +4365,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
vrtl_int_srcid[i], &adev->vline0_irq);
if (r) {
- DRM_ERROR("Failed to add vline0 irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vline0 irq id!\n");
return r;
}
@@ -4468,7 +4376,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_DC1_VLINE0 ||
int_params.irq_source > DC_IRQ_SOURCE_DC6_VLINE0) {
- DRM_ERROR("Failed to register vline0 irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vline0 irq!\n");
return -EINVAL;
}
@@ -4496,7 +4404,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
if (r) {
- DRM_ERROR("Failed to add vupdate irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add vupdate irq id!\n");
return r;
}
@@ -4507,7 +4415,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_VUPDATE1 ||
int_params.irq_source > DC_IRQ_SOURCE_VUPDATE6) {
- DRM_ERROR("Failed to register vupdate irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register vupdate irq!\n");
return -EINVAL;
}
@@ -4527,7 +4435,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
if (r) {
- DRM_ERROR("Failed to add page flip irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add page flip irq id!\n");
return r;
}
@@ -4538,7 +4446,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
if (int_params.irq_source == DC_IRQ_SOURCE_INVALID ||
int_params.irq_source < DC_IRQ_SOURCE_PFLIP_FIRST ||
int_params.irq_source > DC_IRQ_SOURCE_PFLIP_LAST) {
- DRM_ERROR("Failed to register pflip irq!\n");
+ drm_err(adev_to_drm(adev), "Failed to register pflip irq!\n");
return -EINVAL;
}
@@ -4556,7 +4464,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
&adev->hpd_irq);
if (r) {
- DRM_ERROR("Failed to add hpd irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add hpd irq id!\n");
return r;
}
@@ -4578,7 +4486,7 @@ static int register_outbox_irq_handlers(struct amdgpu_device *adev)
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
&adev->dmub_outbox_irq);
if (r) {
- DRM_ERROR("Failed to add outbox irq id!\n");
+ drm_err(adev_to_drm(adev), "Failed to add outbox irq id!\n");
return r;
}
@@ -4810,45 +4718,72 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
return 1;
}
-static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
- uint32_t brightness)
+/* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */
+static inline u32 scale_input_to_fw(int min, int max, u64 input)
{
- unsigned int min, max;
- u8 prev_signal = 0, prev_lum = 0;
+ return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min);
+}
- if (!get_brightness_range(caps, &min, &max))
- return brightness;
+/* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */
+static inline u32 scale_fw_to_input(int min, int max, u64 input)
+{
+ return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL);
+}
- for (int i = 0; i < caps->data_points; i++) {
- u8 signal, lum;
+static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps,
+ unsigned int min, unsigned int max,
+ uint32_t *user_brightness)
+{
+ u32 brightness = scale_input_to_fw(min, max, *user_brightness);
+ u8 prev_signal = 0, prev_lum = 0;
+ int i = 0;
- if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
- break;
+ if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
+ return;
- signal = caps->luminance_data[i].input_signal;
- lum = caps->luminance_data[i].luminance;
+ if (!caps->data_points)
+ return;
+
+ /* choose start to run less interpolation steps */
+ if (caps->luminance_data[caps->data_points/2].input_signal > brightness)
+ i = caps->data_points/2;
+ do {
+ u8 signal = caps->luminance_data[i].input_signal;
+ u8 lum = caps->luminance_data[i].luminance;
/*
* brightness == signal: luminance is percent numerator
* brightness < signal: interpolate between previous and current luminance numerator
* brightness > signal: find next data point
*/
- if (brightness < signal)
- lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
- (brightness - prev_signal),
- signal - prev_signal);
- else if (brightness > signal) {
+ if (brightness > signal) {
prev_signal = signal;
prev_lum = lum;
+ i++;
continue;
}
- brightness = DIV_ROUND_CLOSEST(lum * brightness, 101);
- break;
- }
+ if (brightness < signal)
+ lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
+ (brightness - prev_signal),
+ signal - prev_signal);
+ *user_brightness = scale_fw_to_input(min, max,
+ DIV_ROUND_CLOSEST(lum * brightness, 101));
+ return;
+ } while (i < caps->data_points);
+}
+
+static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t brightness)
+{
+ unsigned int min, max;
- // Rescale 0..255 to min..max
- return min + DIV_ROUND_CLOSEST((max - min) * brightness,
- AMDGPU_MAX_BL_LEVEL);
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ convert_custom_brightness(caps, min, max, &brightness);
+
+ // Rescale 0..max to min..max
+ return min + DIV_ROUND_CLOSEST_ULL((u64)(max - min) * brightness, max);
}
static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
@@ -4861,8 +4796,8 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
if (brightness < min)
return 0;
- // Rescale min..max to 0..255
- return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
+ // Rescale min..max to 0..max
+ return DIV_ROUND_CLOSEST_ULL((u64)max * (brightness - min),
max - min);
}
@@ -4987,8 +4922,9 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
struct drm_device *drm = aconnector->base.dev;
struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
struct backlight_properties props = { 0 };
- struct amdgpu_dm_backlight_caps caps = { 0 };
+ struct amdgpu_dm_backlight_caps *caps;
char bl_name[16];
+ int min, max;
if (aconnector->bl_idx == -1)
return;
@@ -5000,18 +4936,21 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
return;
}
- amdgpu_acpi_get_backlight_caps(&caps);
- if (caps.caps_valid) {
+ caps = &dm->backlight_caps[aconnector->bl_idx];
+ if (get_brightness_range(caps, &min, &max)) {
if (power_supply_is_system_supplied() > 0)
- props.brightness = caps.ac_level;
+ props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps->ac_level, 100);
else
- props.brightness = caps.dc_level;
+ props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps->dc_level, 100);
+ /* min is zero, so max needs to be adjusted */
+ props.max_brightness = max - min;
+ drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max,
+ caps->ac_level, caps->dc_level);
} else
- props.brightness = AMDGPU_MAX_BL_LEVEL;
+ props.brightness = props.max_brightness = AMDGPU_MAX_BL_LEVEL;
- if (caps.data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE))
+ if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE))
drm_info(drm, "Using custom brightness curve\n");
- props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
@@ -5023,10 +4962,10 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
dm->brightness[aconnector->bl_idx] = props.brightness;
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
- DRM_ERROR("DM: Backlight registration failed!\n");
+ drm_err(drm, "DM: Backlight registration failed!\n");
dm->backlight_dev[aconnector->bl_idx] = NULL;
} else
- DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
+ drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name);
}
static int initialize_plane(struct amdgpu_display_manager *dm,
@@ -5040,7 +4979,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
if (!plane) {
- DRM_ERROR("KMS: Failed to allocate plane\n");
+ drm_err(adev_to_drm(dm->adev), "KMS: Failed to allocate plane\n");
return -ENOMEM;
}
plane->type = plane_type;
@@ -5058,7 +4997,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
if (ret) {
- DRM_ERROR("KMS: Failed to initialize plane\n");
+ drm_err(adev_to_drm(dm->adev), "KMS: Failed to initialize plane\n");
kfree(plane);
return ret;
}
@@ -5127,14 +5066,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize mode config\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize mode config\n");
return -EINVAL;
}
/* There is one primary plane per CRTC */
primary_planes = dm->dc->caps.max_streams;
if (primary_planes > AMDGPU_MAX_PLANES) {
- DRM_ERROR("DM: Plane nums out of 6 planes\n");
+ drm_err(adev_to_drm(adev), "DM: Plane nums out of 6 planes\n");
return -EINVAL;
}
@@ -5147,7 +5086,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (initialize_plane(dm, mode_info, i,
DRM_PLANE_TYPE_PRIMARY, plane)) {
- DRM_ERROR("KMS: Failed to initialize primary plane\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize primary plane\n");
goto fail;
}
}
@@ -5179,14 +5118,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (initialize_plane(dm, NULL, primary_planes + i,
DRM_PLANE_TYPE_OVERLAY, plane)) {
- DRM_ERROR("KMS: Failed to initialize overlay plane\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize overlay plane\n");
goto fail;
}
}
for (i = 0; i < dm->dc->caps.max_streams; i++)
if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
- DRM_ERROR("KMS: Failed to initialize crtc\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize crtc\n");
goto fail;
}
@@ -5206,7 +5145,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
if (register_outbox_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5256,7 +5195,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
if (link_cnt > MAX_LINKS) {
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"KMS: Cannot support more than %d display indexes\n",
MAX_LINKS);
goto fail;
@@ -5272,12 +5211,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
if (!wbcon) {
- DRM_ERROR("KMS: Failed to allocate writeback connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to allocate writeback connector\n");
continue;
}
if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
- DRM_ERROR("KMS: Failed to initialize writeback connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize writeback connector\n");
kfree(wbcon);
continue;
}
@@ -5297,12 +5236,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
- DRM_ERROR("KMS: Failed to initialize encoder\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize encoder\n");
goto fail;
}
if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
- DRM_ERROR("KMS: Failed to initialize connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to initialize connector\n");
goto fail;
}
@@ -5311,7 +5250,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
aconnector;
if (!dc_link_detect_connection_type(link, &new_connection_type))
- DRM_ERROR("KMS: Failed to detect connector\n");
+ drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(link);
@@ -5333,8 +5272,15 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (amdgpu_dm_set_replay_caps(link, aconnector))
psr_feature_enabled = false;
- if (psr_feature_enabled)
+ if (psr_feature_enabled) {
amdgpu_dm_set_psr_caps(link);
+ drm_info(adev_to_drm(adev), "PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
+ link->psr_settings.psr_feature_enabled,
+ link->psr_settings.psr_version,
+ link->dpcd_caps.psr_info.psr_version,
+ link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
+ link->dpcd_caps.psr_info.psr2_su_y_granularity_cap);
+ }
}
}
amdgpu_set_panel_orientation(&aconnector->base);
@@ -5348,7 +5294,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_VERDE:
case CHIP_OLAND:
if (dce60_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5370,7 +5316,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_VEGA12:
case CHIP_VEGA20:
if (dce110_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
@@ -5398,12 +5344,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case IP_VERSION(3, 6, 0):
case IP_VERSION(4, 0, 1):
if (dcn10_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ drm_err(adev_to_drm(adev), "DM: Failed to initialize IRQ\n");
goto fail;
}
break;
default:
- DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
+ drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%X\n",
amdgpu_ip_version(adev, DCE_HWIP, 0));
goto fail;
}
@@ -5564,7 +5510,7 @@ static int dm_early_init(struct amdgpu_ip_block *ip_block)
/* if there is no object header, skip DM */
if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
- dev_info(adev->dev, "No object header, skipping DM\n");
+ drm_info(adev_to_drm(adev), "No object header, skipping DM\n");
return -ENOENT;
}
@@ -5676,7 +5622,7 @@ static int dm_early_init(struct amdgpu_ip_block *ip_block)
adev->mode_info.num_dig = 4;
break;
default:
- DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
+ drm_err(adev_to_drm(adev), "Unsupported DCE IP versions: 0x%x\n",
amdgpu_ip_version(adev, DCE_HWIP, 0));
return -EINVAL;
}
@@ -5825,7 +5771,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
break;
default:
- DRM_ERROR(
+ drm_err(adev_to_drm(adev),
"Unsupported screen format %p4cc\n",
&fb->format->format);
return -EINVAL;
@@ -6346,6 +6292,7 @@ static void fill_stream_properties_from_drm_display_mode(
struct amdgpu_dm_connector *aconnector = NULL;
struct hdmi_vendor_infoframe hv_frame;
struct hdmi_avi_infoframe avi_frame;
+ ssize_t err;
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
aconnector = to_amdgpu_dm_connector(connector);
@@ -6392,9 +6339,17 @@ static void fill_stream_properties_from_drm_display_mode(
}
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
- drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame,
+ (struct drm_connector *)connector,
+ mode_in);
+ if (err < 0)
+ drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd \n", connector->name, err);
timing_out->vic = avi_frame.video_code;
- drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
+ err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame,
+ (struct drm_connector *)connector,
+ mode_in);
+ if (err < 0)
+ drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd \n", connector->name, err);
timing_out->hdmi_vic = hv_frame.vic;
}
@@ -6519,7 +6474,7 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
}
static struct dc_sink *
-create_fake_sink(struct dc_link *link)
+create_fake_sink(struct drm_device *dev, struct dc_link *link)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct dc_sink *sink = NULL;
@@ -6529,7 +6484,7 @@ create_fake_sink(struct dc_link *link)
sink = dc_sink_create(&sink_init_data);
if (!sink) {
- DRM_ERROR("Failed to create sink!\n");
+ drm_err(dev, "Failed to create sink!\n");
return NULL;
}
sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
@@ -6662,7 +6617,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
m_pref = list_first_entry_or_null(
&aconnector->base.modes, struct drm_display_mode, head);
if (!m_pref) {
- DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
+ drm_dbg_driver(aconnector->base.dev, "No preferred mode found in EDID\n");
return NULL;
}
}
@@ -6837,7 +6792,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n",
+ drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from SST RX\n",
__func__, drm_connector->name);
}
} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
@@ -6857,7 +6812,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
+ drm_dbg_driver(drm_connector->dev, "%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
__func__, drm_connector->name);
}
}
@@ -6885,6 +6840,7 @@ create_stream_for_sink(struct drm_connector *connector,
const struct dc_stream_state *old_stream,
int requested_bpc)
{
+ struct drm_device *dev = connector->dev;
struct amdgpu_dm_connector *aconnector = NULL;
struct drm_display_mode *preferred_mode = NULL;
const struct drm_connector_state *con_state = &dm_state->base;
@@ -6907,11 +6863,6 @@ create_stream_for_sink(struct drm_connector *connector,
drm_mode_init(&mode, drm_mode);
memset(&saved_mode, 0, sizeof(saved_mode));
- if (connector == NULL) {
- DRM_ERROR("connector is NULL!\n");
- return stream;
- }
-
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
aconnector = NULL;
aconnector = to_amdgpu_dm_connector(connector);
@@ -6926,7 +6877,7 @@ create_stream_for_sink(struct drm_connector *connector,
}
if (!aconnector || !aconnector->dc_sink) {
- sink = create_fake_sink(link);
+ sink = create_fake_sink(dev, link);
if (!sink)
return stream;
@@ -6938,7 +6889,7 @@ create_stream_for_sink(struct drm_connector *connector,
stream = dc_create_stream_for_sink(sink);
if (stream == NULL) {
- DRM_ERROR("Failed to create stream for sink!\n");
+ drm_err(dev, "Failed to create stream for sink!\n");
goto finish;
}
@@ -6970,7 +6921,7 @@ create_stream_for_sink(struct drm_connector *connector,
* case, we call set mode ourselves to restore the previous mode
* and the modelist may not be filled in time.
*/
- DRM_DEBUG_DRIVER("No preferred mode found\n");
+ drm_dbg_driver(dev, "No preferred mode found\n");
} else if (aconnector) {
recalculate_timing = amdgpu_freesync_vid_mode &&
is_freesync_video_mode(&mode, aconnector);
@@ -7420,6 +7371,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
const struct drm_edid *drm_edid;
struct i2c_adapter *ddc;
+ struct drm_device *dev = connector->dev;
if (dc_link && dc_link->aux_mode)
ddc = &aconnector->dm_dp_aux.aux.ddc;
@@ -7429,7 +7381,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
- DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ drm_err(dev, "No EDID found on connector: %s.\n", connector->name);
return;
}
@@ -7488,7 +7440,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
- DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ drm_err(connector->dev, "No EDID found on connector: %s.\n", connector->name);
return;
}
@@ -7622,7 +7574,7 @@ create_validate_stream_for_sink(struct drm_connector *connector,
dm_state, old_stream,
requested_bpc);
if (stream == NULL) {
- DRM_ERROR("Failed to create stream for sink!\n");
+ drm_err(adev_to_drm(adev), "Failed to create stream for sink!\n");
break;
}
@@ -7697,7 +7649,7 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
aconnector->base.force != DRM_FORCE_ON) {
- DRM_ERROR("dc_sink is NULL!\n");
+ drm_err(connector->dev, "dc_sink is NULL!\n");
goto fail;
}
@@ -8605,7 +8557,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
i2c = create_i2c(link->ddc, false);
if (!i2c) {
- DRM_ERROR("Failed to create i2c adapter data\n");
+ drm_err(adev_to_drm(dm->adev), "Failed to create i2c adapter data\n");
return -ENOMEM;
}
@@ -8613,7 +8565,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
res = i2c_add_adapter(&i2c->base);
if (res) {
- DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+ drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index);
goto out_free;
}
@@ -8627,7 +8579,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
&i2c->base);
if (res) {
- DRM_ERROR("connector_init failed\n");
+ drm_err(adev_to_drm(dm->adev), "connector_init failed\n");
aconnector->connector_id = -1;
goto out_free;
}
@@ -9117,7 +9069,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
*/
WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
- DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
+ drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR off->on: Get vblank ref\n",
__func__, new_state->base.crtc->base.id);
} else if (old_vrr_active && !new_vrr_active) {
/* Transition VRR active -> inactive:
@@ -9125,7 +9077,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
*/
WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
drm_crtc_vblank_put(new_state->base.crtc);
- DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
+ drm_dbg_driver(new_state->base.crtc->dev, "%s: crtc=%u VRR on->off: Drop vblank ref\n",
__func__, new_state->base.crtc->base.id);
}
}
@@ -9212,13 +9164,13 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane,
if (crtc_state->stream) {
if (!dc_stream_set_cursor_attributes(crtc_state->stream,
&attributes))
- DRM_ERROR("DC failed to set cursor attributes\n");
+ drm_err(adev_to_drm(adev), "DC failed to set cursor attributes\n");
update->cursor_attributes = &crtc_state->stream->cursor_attributes;
if (!dc_stream_set_cursor_position(crtc_state->stream,
&position))
- DRM_ERROR("DC failed to set cursor position\n");
+ drm_err(adev_to_drm(adev), "DC failed to set cursor position\n");
update->cursor_position = &crtc_state->stream->cursor_position;
}
@@ -9469,7 +9421,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].surface = dc_plane;
if (!bundle->surface_updates[planes_count].surface) {
- DRM_ERROR("No surface for CRTC: id=%d\n",
+ drm_err(dev, "No surface for CRTC: id=%d\n",
acrtc_attach->crtc_id);
continue;
}
@@ -9985,20 +9937,20 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm,
wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
if (!wb_info) {
- DRM_ERROR("Failed to allocate wb_info\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate wb_info\n");
return;
}
acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
if (!acrtc) {
- DRM_ERROR("no amdgpu_crtc found\n");
+ drm_err(adev_to_drm(adev), "no amdgpu_crtc found\n");
kfree(wb_info);
return;
}
afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
if (!afb) {
- DRM_ERROR("No amdgpu_framebuffer found\n");
+ drm_err(adev_to_drm(adev), "No amdgpu_framebuffer found\n");
kfree(wb_info);
return;
}
@@ -10219,7 +10171,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
enable_encryption = true;
- DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+ drm_info(adev_to_drm(adev), "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
if (aconnector->dc_link)
hdcp_update_display(
@@ -10311,7 +10263,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
*/
dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
if (!dummy_updates) {
- DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
+ drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n");
continue;
}
for (j = 0; j < status->plane_count; j++)
@@ -10519,16 +10471,20 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
*/
conn_state = drm_atomic_get_connector_state(state, connector);
- ret = PTR_ERR_OR_ZERO(conn_state);
- if (ret)
+ /* Check for error in getting connector state */
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
goto out;
+ }
/* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
- ret = PTR_ERR_OR_ZERO(crtc_state);
- if (ret)
+ /* Check for error in getting crtc state */
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
goto out;
+ }
/* force a restore */
crtc_state->mode_changed = true;
@@ -10536,9 +10492,11 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
/* Attach plane to drm_atomic_state */
plane_state = drm_atomic_get_plane_state(state, plane);
- ret = PTR_ERR_OR_ZERO(plane_state);
- if (ret)
+ /* Check for error in getting plane state */
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
goto out;
+ }
/* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state);
@@ -10546,7 +10504,7 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
out:
drm_atomic_state_put(state);
if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_err(ddev, "Restoring old state failed with %i\n", ret);
return ret;
}
@@ -10630,7 +10588,7 @@ static int do_aquire_global_lock(struct drm_device *dev,
&commit->flip_done, 10*HZ);
if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
+ drm_err(dev, "[CRTC:%d:%s] hw_done or flip_done timed out\n",
crtc->base.id, crtc->name);
drm_crtc_commit_put(commit);
@@ -10746,6 +10704,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
struct dm_atomic_state *dm_state = NULL;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
struct dc_stream_state *new_stream;
+ struct amdgpu_device *adev = dm->adev;
int ret = 0;
/*
@@ -10775,8 +10734,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
drm_old_conn_state = drm_atomic_get_old_connector_state(state,
connector);
- if (IS_ERR(drm_new_conn_state)) {
- ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
+ if (WARN_ON(!drm_new_conn_state)) {
+ ret = -EINVAL;
goto fail;
}
@@ -10799,7 +10758,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
*/
if (!new_stream) {
- DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ drm_dbg_driver(adev_to_drm(adev), "%s: Failed to create new stream for crtc %d\n",
__func__, acrtc->base.base.id);
ret = -ENOMEM;
goto fail;
@@ -10837,7 +10796,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
new_crtc_state->mode_changed = false;
- DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ drm_dbg_driver(adev_to_drm(adev), "Mode change not required, setting mode_changed to %d",
new_crtc_state->mode_changed);
}
}
@@ -10875,7 +10834,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
is_timing_unchanged_for_freesync(new_crtc_state,
old_crtc_state)) {
new_crtc_state->mode_changed = false;
- DRM_DEBUG_DRIVER(
+ drm_dbg_driver(adev_to_drm(adev),
"Mode change not required for front porch change, setting mode_changed to %d",
new_crtc_state->mode_changed);
@@ -10896,7 +10855,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (ret)
goto fail;
- DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
+ drm_dbg_driver(adev_to_drm(adev), "Disabling DRM crtc: %d\n",
crtc->base.id);
/* i.e. reset mode */
@@ -11749,7 +11708,7 @@ static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
old_plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(new_plane_state) || IS_ERR(old_plane_state)) {
- DRM_ERROR("Failed to get plane state for plane %s\n", plane->name);
+ drm_err(dev, "Failed to get plane state for plane %s\n", plane->name);
return false;
}
@@ -12318,7 +12277,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
if (!res) {
- DRM_ERROR("EDID CEA parser failed\n");
+ drm_err(adev_to_drm(dm->adev), "EDID CEA parser failed\n");
return false;
}
@@ -12326,7 +12285,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
if (output->type == DMUB_CMD__EDID_CEA_ACK) {
if (!output->ack.success) {
- DRM_ERROR("EDID CEA ack failed at offset %d\n",
+ drm_err(adev_to_drm(dm->adev), "EDID CEA ack failed at offset %d\n",
output->ack.offset);
}
} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
@@ -12338,7 +12297,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
} else {
- DRM_WARN("Unknown EDID CEA parser results\n");
+ drm_warn(adev_to_drm(dm->adev), "Unknown EDID CEA parser results\n");
return false;
}
@@ -12554,7 +12513,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
if (!connector->state) {
- DRM_ERROR("%s - Connector has no state", __func__);
+ drm_err(adev_to_drm(adev), "%s - Connector has no state", __func__);
goto update;
}
@@ -12739,7 +12698,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
}
if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
- DRM_ERROR("wait_for_completion_timeout timeout!");
+ drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!");
*operation_result = AUX_RET_ERROR_TIMEOUT;
goto out;
}
@@ -12750,11 +12709,11 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
* lead to this error. We can ignore this for now.
*/
if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) {
- DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
+ drm_warn(adev_to_drm(adev), "DPIA AUX failed on 0x%x(%d), error %d\n",
payload->address, payload->length,
p_notify->result);
}
- *operation_result = AUX_RET_ERROR_INVALID_REPLY;
+ *operation_result = p_notify->result;
goto out;
}
@@ -12763,7 +12722,8 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
/* The reply is stored in the top nibble of the command. */
payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
- if (!payload->write && p_notify->aux_reply.length)
+ /*write req may receive a byte indicating partially written number as well*/
+ if (p_notify->aux_reply.length)
memcpy(payload->data, p_notify->aux_reply.data,
p_notify->aux_reply.length);
@@ -12776,6 +12736,79 @@ out:
return ret;
}
+static void abort_fused_io(
+ struct dc_context *ctx,
+ const struct dmub_cmd_fused_request *request
+)
+{
+ union dmub_rb_cmd command = { 0 };
+ struct dmub_rb_cmd_fused_io *io = &command.fused_io;
+
+ io->header.type = DMUB_CMD__FUSED_IO;
+ io->header.sub_type = DMUB_CMD__FUSED_IO_ABORT;
+ io->header.payload_bytes = sizeof(*io) - sizeof(io->header);
+ io->request = *request;
+ dm_execute_dmub_cmd(ctx, &command, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
+static bool execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_context *ctx,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+)
+{
+ const uint8_t ddc_line = commands[0].fused_io.request.u.aux.ddc_line;
+
+ if (ddc_line >= ARRAY_SIZE(dev->dm.fused_io))
+ return false;
+
+ struct fused_io_sync *sync = &dev->dm.fused_io[ddc_line];
+ struct dmub_rb_cmd_fused_io *first = &commands[0].fused_io;
+ const bool result = dm_execute_dmub_cmd_list(ctx, count, commands, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
+ && first->header.ret_status
+ && first->request.status == FUSED_REQUEST_STATUS_SUCCESS;
+
+ if (!result)
+ return false;
+
+ while (wait_for_completion_timeout(&sync->replied, usecs_to_jiffies(timeout_us))) {
+ reinit_completion(&sync->replied);
+
+ struct dmub_cmd_fused_request *reply = (struct dmub_cmd_fused_request *) sync->reply_data;
+
+ static_assert(sizeof(*reply) <= sizeof(sync->reply_data), "Size mismatch");
+
+ if (reply->identifier == first->request.identifier) {
+ first->request = *reply;
+ return true;
+ }
+ }
+
+ reinit_completion(&sync->replied);
+ first->request.status = FUSED_REQUEST_STATUS_TIMEOUT;
+ abort_fused_io(ctx, &first->request);
+ return false;
+}
+
+bool amdgpu_dm_execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us)
+{
+ struct amdgpu_display_manager *dm = &dev->dm;
+
+ mutex_lock(&dm->dpia_aux_lock);
+
+ const bool result = execute_fused_io(dev, link->ctx, commands, count, timeout_us);
+
+ mutex_unlock(&dm->dpia_aux_lock);
+ return result;
+}
+
int amdgpu_dm_process_dmub_set_config_sync(
struct dc_context *ctx,
unsigned int link_index,
@@ -12794,7 +12827,7 @@ int amdgpu_dm_process_dmub_set_config_sync(
ret = 0;
*operation_result = adev->dm.dmub_notify->sc_status;
} else {
- DRM_ERROR("wait_for_completion_timeout timeout!");
+ drm_err(adev_to_drm(adev), "wait_for_completion_timeout timeout!");
ret = -1;
*operation_result = SET_CONFIG_UNKNOWN_ERROR;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 385faaca6e26..d7d92f9911e4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -50,7 +50,7 @@
#define AMDGPU_DM_MAX_NUM_EDP 2
-#define AMDGPU_DMUB_NOTIFICATION_MAX 7
+#define AMDGPU_DMUB_NOTIFICATION_MAX 8
#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
@@ -81,6 +81,7 @@ struct amdgpu_bo;
struct dmub_srv;
struct dc_plane_state;
struct dmub_notification;
+struct dmub_cmd_fused_request;
struct amd_vsdb_block {
unsigned char ieee_id[3];
@@ -276,6 +277,10 @@ struct hpd_rx_irq_offload_work {
* @offload_wq: offload work queue that this work is queued to
*/
struct hpd_rx_irq_offload_work_queue *offload_wq;
+ /**
+ * @adev: amdgpu_device pointer
+ */
+ struct amdgpu_device *adev;
};
/**
@@ -614,6 +619,13 @@ struct amdgpu_display_manager {
bool aux_hpd_discon_quirk;
/**
+ * @edp0_on_dp1_quirk:
+ *
+ * quirk for platforms that put edp0 on DP1.
+ */
+ bool edp0_on_dp1_quirk;
+
+ /**
* @dpia_aux_lock:
*
* Guards access to DPIA AUX
@@ -633,6 +645,16 @@ struct amdgpu_display_manager {
* OEM i2c bus
*/
struct amdgpu_i2c_adapter *oem_i2c;
+
+ /**
+ * @fused_io:
+ *
+ * dmub fused io interface
+ */
+ struct fused_io_sync {
+ struct completion replied;
+ char reply_data[0x40]; // Cannot include dmub_cmd here
+ } fused_io[8];
};
enum dsc_clock_force_state {
@@ -1012,6 +1034,14 @@ extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index,
struct aux_payload *payload, enum aux_return_code_type *operation_result);
+bool amdgpu_dm_execute_fused_io(
+ struct amdgpu_device *dev,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+);
+
int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index,
struct set_config_cmd_payload *payload, enum set_config_status *operation_result);
@@ -1045,4 +1075,6 @@ void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector);
void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector);
int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector);
+void retrieve_dmi_info(struct amdgpu_display_manager *dm);
+
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index e8bdd7f0c460..87058271b00c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -246,8 +246,6 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
- struct amdgpu_device *adev = drm_to_adev(dm->ddev);
- int r;
mutex_lock(&dm->dc_lock);
@@ -275,15 +273,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
}
- if (dm->active_vblank_irq_count == 0) {
- r = amdgpu_dpm_pause_power_profile(adev, true);
- if (r)
- dev_warn(adev->dev, "failed to set default power profile mode\n");
+ if (dm->active_vblank_irq_count == 0)
dc_allow_idle_optimizations(dm->dc, true);
- r = amdgpu_dpm_pause_power_profile(adev, false);
- if (r)
- dev_warn(adev->dev, "failed to restore the power profile mode\n");
- }
mutex_unlock(&dm->dc_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 8f22ad966543..c16962256514 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -26,6 +26,7 @@
#include "amdgpu_dm_hdcp.h"
#include "amdgpu.h"
#include "amdgpu_dm.h"
+#include "dc_fused_io.h"
#include "dm_helpers.h"
#include <drm/display/drm_hdcp_helper.h>
#include "hdcp_psp.h"
@@ -76,6 +77,34 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
}
+static bool lp_atomic_write_poll_read_i2c(
+ void *handle,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ struct dc_link *link = handle;
+
+ return dm_atomic_write_poll_read_i2c(link, write, poll, read, poll_timeout_us, poll_mask_msb);
+}
+
+static bool lp_atomic_write_poll_read_aux(
+ void *handle,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ struct dc_link *link = handle;
+
+ return dm_atomic_write_poll_read_aux(link, write, poll, read, poll_timeout_us, poll_mask_msb);
+}
+
static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
{
struct ta_hdcp_shared_memory *hdcp_cmd;
@@ -732,7 +761,10 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
- hdcp_work[i].hdcp.config.psp.handle = &adev->psp;
+ struct mod_hdcp_config *config = &hdcp_work[i].hdcp.config;
+ struct mod_hdcp_ddc_funcs *ddc_funcs = &config->ddc.funcs;
+
+ config->psp.handle = &adev->psp;
if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
dc->ctx->dce_version == DCN_VERSION_3_14 ||
dc->ctx->dce_version == DCN_VERSION_3_15 ||
@@ -740,12 +772,22 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
dc->ctx->dce_version == DCN_VERSION_3_51 ||
dc->ctx->dce_version == DCN_VERSION_3_6 ||
dc->ctx->dce_version == DCN_VERSION_3_16)
- hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1;
- hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
- hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
- hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
- hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd;
- hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd;
+ config->psp.caps.dtm_v3_supported = 1;
+ config->ddc.handle = dc_get_link_at_index(dc, i);
+
+ ddc_funcs->write_i2c = lp_write_i2c;
+ ddc_funcs->read_i2c = lp_read_i2c;
+ ddc_funcs->write_dpcd = lp_write_dpcd;
+ ddc_funcs->read_dpcd = lp_read_dpcd;
+
+ config->debug.lc_enable_sw_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
+ if (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable) {
+ ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c;
+ ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux;
+ } else {
+ ddc_funcs->atomic_write_poll_read_i2c = NULL;
+ ddc_funcs->atomic_write_poll_read_aux = NULL;
+ }
memset(hdcp_work[i].aconnector, 0,
sizeof(struct amdgpu_dm_connector *) *
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 1395a748d726..d4395b92fb85 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -630,6 +630,19 @@ bool dm_helpers_submit_i2c(
return result;
}
+bool dm_helpers_execute_fused_io(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+)
+{
+ struct amdgpu_device *dev = ctx->driver_context;
+
+ return amdgpu_dm_execute_fused_io(dev, link, commands, count, timeout_us);
+}
+
static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
bool is_write_cmd,
unsigned char cmd,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 074b79fd5822..25e8befbcc47 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -62,6 +62,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
enum aux_return_code_type operation_result;
struct amdgpu_device *adev;
struct ddc_service *ddc;
+ uint8_t copy[16];
if (WARN_ON(msg->size > 16))
return -E2BIG;
@@ -77,6 +78,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
(msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
payload.defer_delay = 0;
+ if (payload.write) {
+ memcpy(copy, msg->buffer, msg->size);
+ payload.data = copy;
+ }
+
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
&operation_result);
@@ -100,9 +106,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
*/
if (payload.write && result >= 0) {
if (result) {
- /*one byte indicating partially written bytes. Force 0 to retry*/
- drm_info(adev_to_drm(adev), "amdgpu: AUX partially written\n");
- result = 0;
+ /*one byte indicating partially written bytes*/
+ drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n");
+ result = payload.data[0];
} else if (!payload.reply[0])
/*I2C_ACK|AUX_ACK*/
result = msg->size;
@@ -127,11 +133,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
break;
}
- drm_info(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
+ drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
}
if (payload.reply[0])
- drm_info(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
+ drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
payload.reply[0]);
return result;
@@ -1733,16 +1739,17 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
struct dc_dsc_bw_range *bw_range)
{
struct dc_dsc_policy dsc_policy = {0};
+ bool is_dsc_possible;
dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link));
- dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
- stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
- dsc_policy.min_target_bpp * 16,
- dsc_policy.max_target_bpp * 16,
- &stream->sink->dsc_caps.dsc_dec_caps,
- &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
-
- return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
+ is_dsc_possible = dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
+ stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
+ dsc_policy.min_target_bpp * 16,
+ dsc_policy.max_target_bpp * 16,
+ &stream->sink->dsc_caps.dsc_dec_caps,
+ &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
+
+ return is_dsc_possible;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 3e0f45f1711c..b7c6e8d13435 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -948,13 +948,13 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
r = amdgpu_bo_reserve(rbo, true);
if (r) {
- dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+ drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r);
return r;
}
r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
if (r) {
- dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
+ drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r);
goto error_unlock;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index e140b7a04d72..f984cb0cb889 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -87,14 +87,6 @@ void amdgpu_dm_set_psr_caps(struct dc_link *link)
link->psr_settings.psr_feature_enabled = true;
}
-
- DRM_INFO("PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
- link->psr_settings.psr_feature_enabled,
- link->psr_settings.psr_version,
- link->dpcd_caps.psr_info.psr_version,
- link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
- link->dpcd_caps.psr_info.psr2_su_y_granularity_cap);
-
}
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c
new file mode 100644
index 000000000000..1da07ebf9217
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/dmi.h>
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+
+struct amdgpu_dm_quirks {
+ bool aux_hpd_discon;
+ bool support_edp0_on_dp1;
+};
+
+static struct amdgpu_dm_quirks quirk_entries = {
+ .aux_hpd_discon = false,
+ .support_edp0_on_dp1 = false
+};
+
+static int edp0_on_dp1_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.support_edp0_on_dp1 = true;
+ return 0;
+}
+
+static int aux_hpd_discon_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.aux_hpd_discon = true;
+ return 0;
+}
+
+static const struct dmi_system_id dmi_quirk_table[] = {
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
+ },
+ },
+ {
+ .callback = aux_hpd_discon_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 645 14 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 445 14 inch G11 Notebook PC"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 465 16 inch G11 Notebook PC"),
+ },
+ },
+ {}
+ /* TODO: refactor this from a fixed table to a dynamic option */
+};
+
+void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+{
+ struct drm_device *dev = dm->ddev;
+ int dmi_id;
+
+ dm->aux_hpd_discon_quirk = false;
+ dm->edp0_on_dp1_quirk = false;
+
+ dmi_id = dmi_check_system(dmi_quirk_table);
+
+ if (!dmi_id)
+ return;
+
+ if (quirk_entries.aux_hpd_discon) {
+ dm->aux_hpd_discon_quirk = true;
+ drm_info(dev, "aux_hpd_discon_quirk attached\n");
+ }
+ if (quirk_entries.support_edp0_on_dp1) {
+ dm->edp0_on_dp1_quirk = true;
+ drm_info(dev, "support_edp0_on_dp1 attached\n");
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
index 0d5fefb0f591..d9527c05fc87 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
@@ -102,13 +102,13 @@ static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector
r = amdgpu_bo_reserve(rbo, true);
if (r) {
- dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+ drm_err(adev_to_drm(adev), "fail to reserve bo (%d)\n", r);
return r;
}
r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
if (r) {
- dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
+ drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r);
goto error_unlock;
}
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 3e1f5b689718..3c9ecea7eebc 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -53,31 +53,30 @@ DC_LIBS += hdcp
ifdef CONFIG_DRM_AMD_DC_FP
DC_LIBS += sspl
-DC_SPL_TRANS += dc_spl_translate.o
+AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/, dc_spl_translate.o)
endif
AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
include $(AMD_DC)
-DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
-dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o dc_state.o
+FILES =
+FILES += dc_dmub_srv.o
+FILES += dc_edid_parser.o
+FILES += dc_fused_io.o
+FILES += dc_helper.o
+FILES += core/dc.o
+FILES += core/dc_debug.o
+FILES += core/dc_hw_sequencer.o
+FILES += core/dc_link_enc_cfg.o
+FILES += core/dc_link_exports.o
+FILES += core/dc_resource.o
+FILES += core/dc_sink.o
+FILES += core/dc_stat.o
+FILES += core/dc_state.o
+FILES += core/dc_stream.o
+FILES += core/dc_surface.o
+FILES += core/dc_vm_helper.o
+
+AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/, $(FILES))
-DISPLAY_CORE += dc_vm_helper.o
-
-AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE))
-
-AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
-
-AMD_DC_SPL_TRANS = $(addprefix $(AMDDALPATH)/dc/,$(DC_SPL_TRANS))
-
-AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
-AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
-
-DC_DMUB += dc_dmub_srv.o
-DC_EDID += dc_edid_parser.o
-AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
-AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
-AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)
-
-AMD_DISPLAY_FILES += $(AMD_DC_SPL_TRANS)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index 681799468487..d897f8a30ede 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -1393,7 +1393,7 @@ static void calculate_bandwidth(
if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
/*determine the minimum dram clock change margin for each set of clock frequencies*/
data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
- /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
+ /*compute the maximum clock frequency required for the dram clock change at each set of clock frequencies*/
data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
data->display_pstate_change_enable[k] = 1;
@@ -1407,7 +1407,7 @@ static void calculate_bandwidth(
if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
/*determine the minimum dram clock change margin for each display pipe*/
data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
- /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
+ /*compute the maximum clock frequency required for the dram clock change at each set of clock frequencies*/
data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
data->display_pstate_change_enable[k] = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 88d3f9d7dd55..452206b5095e 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -51,8 +51,6 @@ static inline unsigned long long complete_integer_division_u64(
{
unsigned long long result;
- ASSERT(divisor);
-
result = div64_u64_rem(dividend, divisor, remainder);
return result;
@@ -213,9 +211,6 @@ struct fixed31_32 dc_fixpt_recip(struct fixed31_32 arg)
* @note
* Good idea to use Newton's method
*/
-
- ASSERT(arg.value);
-
return dc_fixpt_from_fraction(
dc_fixpt_one.value,
arg.value);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 3bacf470f7c5..67f08495b7e6 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -2384,10 +2384,10 @@ static enum bp_result get_integrated_info_v8(
}
/*
- * get_integrated_info_v8
+ * get_integrated_info_v9
*
* @brief
- * Get V8 integrated BIOS information
+ * Get V9 integrated BIOS information
*
* @param
* bios_parser *bp - [in]BIOS parser handler to get master data table
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index 19897fa52e7e..d82a52319088 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -142,17 +142,3 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di
return actual_dispclk_set_mhz * 1000;
}
-
-int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
-{
- int actual_dprefclk_set_mhz = -1;
-
- actual_dprefclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
- clk_mgr,
- VBIOSSMC_MSG_SetDprefclkFreq,
- khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
-
- /* TODO: add code for programing DP DTO, currently this is down by command table */
-
- return actual_dprefclk_set_mhz * 1000;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
index 083cb3158859..81d7c912549c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h
@@ -27,6 +27,5 @@
#define DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_
int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
-int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 23b390245b5d..5a633333dbb5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -164,20 +164,6 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis
return actual_dispclk_set_mhz * 1000;
}
-int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
-{
- int actual_dprefclk_set_mhz = -1;
-
- actual_dprefclk_set_mhz = rn_vbios_smu_send_msg_with_param(
- clk_mgr,
- VBIOSSMC_MSG_SetDprefclkFreq,
- khz_to_mhz_ceil(clk_mgr->base.dprefclk_khz));
-
- /* TODO: add code for programing DP DTO, currently this is down by command table */
-
- return actual_dprefclk_set_mhz * 1000;
-}
-
int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz)
{
int actual_dcfclk_set_mhz = -1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
index 1ce19d875358..f76fad87f0e1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
@@ -30,7 +30,6 @@ enum dcn_pwr_state;
int rn_vbios_smu_get_smu_version(struct clk_mgr_internal *clk_mgr);
int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
-int rn_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz);
int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);
void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 2d14346b680e..478b4d6a3544 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -49,12 +49,9 @@ static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E0000
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000, 0x04040000 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
+
+#define CTX clk_mgr->base.ctx
+#define IND_REG(offset) offset
#define regBIF_BX_PF2_RSMU_INDEX 0x0000
#define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1
@@ -67,9 +64,6 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
#define FN(reg_name, field) \
FD(reg_name##__##field)
-#define REG_NBIO(reg_name) \
- (NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name)
-
#undef DC_LOGGER
#define DC_LOGGER \
CTX->logger
@@ -77,6 +71,13 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
#define mmMP1_C2PMSG_3 0x3B1050C
+#define reg__MP1_C2PMSG_3_MASK (0xFFFFFFFF)
+#define reg__MP1_C2PMSG_3__SHIFT (0)
+
+
+#define data_reg_name__MP1_C2PMSG_3_MASK (0xFFFFFFFF)
+#define data_reg_name__MP1_C2PMSG_3__SHIFT (0)
+
#define VBIOSSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
#define VBIOSSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
#define VBIOSSMC_MSG_Spare0 0x03 ///< Spare0
@@ -153,12 +154,10 @@ static int dcn315_smu_send_msg_with_param(
for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) {
/* Trigger the message transaction by writing the message ID */
- generic_write_indirect_reg(CTX,
- REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
- mmMP1_C2PMSG_3, msg_id);
- read_back_data = generic_read_indirect_reg(CTX,
- REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
- mmMP1_C2PMSG_3);
+ IX_REG_SET_SYNC(mmMP1_C2PMSG_3, 0,
+ MP1_C2PMSG_3, msg_id);
+ IX_REG_GET_SYNC(mmMP1_C2PMSG_3,
+ MP1_C2PMSG_3, &read_back_data);
if (read_back_data == msg_id)
break;
udelay(2);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
index 6a6ae618650b..4607eff07253 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
@@ -65,6 +65,7 @@
#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
#define mmCLK5_spll_field_8 0x1B04B
+#define mmCLK6_spll_field_8 0x1B24B
#define mmDENTIST_DISPCLK_CNTL 0x0124
#define regDENTIST_DISPCLK_CNTL 0x0064
#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 142de8938d7c..bb1ac12a2b09 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -90,6 +90,7 @@
#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
#define mmCLK5_spll_field_8 0x1B24B
+#define mmCLK6_spll_field_8 0x1B24B
#define mmDENTIST_DISPCLK_CNTL 0x0124
#define regDENTIST_DISPCLK_CNTL 0x0064
#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
@@ -116,6 +117,7 @@
#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+#define CLK6_spll_field_8__spll_ssc_en_MASK 0x00002000L
#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
#undef FN
@@ -596,7 +598,11 @@ static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
uint32_t ssc_enable;
- ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK;
+ if (clk_mgr_base->ctx->dce_version == DCN_VERSION_3_51) {
+ ssc_enable = REG_READ(CLK6_spll_field_8) & CLK6_spll_field_8__spll_ssc_en_MASK;
+ } else {
+ ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK;
+ }
return ssc_enable != 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
index f6f0e6a33001..604d256cb47a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
@@ -84,8 +84,8 @@
#define VBIOSSMC_MSG_AllowZstatesEntry 0x15
#define VBIOSSMC_MSG_DisallowZstatesEntry 0x16
#define VBIOSSMC_MSG_SetDtbClk 0x17
-#define VBIOSSMC_MSG_DispPsrEntry 0x18 ///< Display PSR entry, DMU
-#define VBIOSSMC_MSG_DispPsrExit 0x19 ///< Display PSR exit, DMU
+#define VBIOSSMC_MSG_DispIPS2Entry 0x18 ///< Display IPS2 entry, DMU
+#define VBIOSSMC_MSG_DispIPS2Exit 0x19 ///< Display IPS2 exit, DMU
#define VBIOSSMC_MSG_DisableLSdma 0x1A ///< Disable LSDMA; only sent by VBIOS
#define VBIOSSMC_MSG_DpControllerPhyStatus 0x1B ///< Inform PMFW about the pre conditions for turning SLDO2 on/off . bit[0]==1 precondition is met, bit[1-2] are for DPPHY number
#define VBIOSSMC_MSG_QueryIPS2Support 0x1C ///< Return 1: support; else not supported
@@ -475,7 +475,7 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
retv = dcn35_smu_send_msg_with_param(
clk_mgr,
- VBIOSSMC_MSG_DispPsrExit,
+ VBIOSSMC_MSG_DispIPS2Exit,
0);
smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv);
return retv;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 28d1353f403d..b34b5b52236d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -36,7 +36,9 @@
#include "resource.h"
#include "dc_state.h"
#include "dc_state_priv.h"
+#include "dc_plane.h"
#include "dc_plane_priv.h"
+#include "dc_stream_priv.h"
#include "gpio_service_interface.h"
#include "clk_mgr.h"
@@ -239,6 +241,7 @@ static bool create_links(
DC_LOG_DC("BIOS object table - end");
/* Create a link for each usb4 dpia port */
+ dc->lowest_dpia_link_index = MAX_LINKS;
for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
struct link_init_data link_init_params = {0};
struct dc_link *link;
@@ -251,6 +254,9 @@ static bool create_links(
link = dc->link_srv->create_link(&link_init_params);
if (link) {
+ if (dc->lowest_dpia_link_index > dc->link_count)
+ dc->lowest_dpia_link_index = dc->link_count;
+
dc->links[dc->link_count] = link;
link->dc = dc;
++dc->link_count;
@@ -439,9 +445,12 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* Don't adjust DRR while there's bandwidth optimizations pending to
* avoid conflicting with firmware updates.
*/
- if (dc->ctx->dce_version > DCE_VERSION_MAX)
- if (dc->optimized_required || dc->wm_optimized_required)
+ if (dc->ctx->dce_version > DCE_VERSION_MAX) {
+ if (dc->optimized_required || dc->wm_optimized_required) {
+ stream->adjust.timing_adjust_pending = true;
return false;
+ }
+ }
dc_exit_ips_for_hw_access(dc);
@@ -1192,6 +1201,12 @@ static void apply_ctx_interdependent_lock(struct dc *dc,
static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
+ if (dc->debug.visual_confirm & VISUAL_CONFIRM_EXPLICIT) {
+ memcpy(&pipe_ctx->visual_confirm_color, &pipe_ctx->plane_state->visual_confirm_color,
+ sizeof(pipe_ctx->visual_confirm_color));
+ return;
+ }
+
if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
@@ -1225,6 +1240,51 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte
}
}
+void dc_get_visual_confirm_for_stream(
+ struct dc *dc,
+ struct dc_stream_state *stream_state,
+ struct tg_color *color)
+{
+ struct dc_stream_status *stream_status = dc_stream_get_status(stream_state);
+ struct pipe_ctx *pipe_ctx;
+ int i;
+ struct dc_plane_state *plane_state = NULL;
+
+ if (!stream_status)
+ return;
+
+ switch (dc->debug.visual_confirm) {
+ case VISUAL_CONFIRM_DISABLE:
+ return;
+ case VISUAL_CONFIRM_PSR:
+ case VISUAL_CONFIRM_FAMS:
+ pipe_ctx = dc_stream_get_pipe_ctx(stream_state);
+ if (!pipe_ctx)
+ return;
+ dc_dmub_srv_get_visual_confirm_color_cmd(dc, pipe_ctx);
+ memcpy(color, &dc->ctx->dmub_srv->dmub->visual_confirm_color, sizeof(struct tg_color));
+ return;
+
+ default:
+ /* find plane with highest layer_index */
+ for (i = 0; i < stream_status->plane_count; i++) {
+ if (stream_status->plane_states[i]->visible)
+ plane_state = stream_status->plane_states[i];
+ }
+ if (!plane_state)
+ return;
+ /* find pipe that contains plane with highest layer index */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state == plane_state) {
+ memcpy(color, &pipe->visual_confirm_color, sizeof(struct tg_color));
+ return;
+ }
+ }
+ }
+}
+
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
@@ -2053,6 +2113,18 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.enable_accelerated_mode(dc, context);
}
+ if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ //Only delay otg master for a given config
+ if (resource_is_pipe_type(pipe, OTG_MASTER)) {
+ //dc_commit_state_no_check is always a full update
+ dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, pipe, false);
+ break;
+ }
+ }
+ }
+
if (context->stream_count > get_seamless_boot_stream_count(context) ||
context->stream_count == 0)
dc->hwss.prepare_bandwidth(dc, context);
@@ -2117,6 +2189,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (dc->hwss.program_front_end_for_ctx) {
dc->hwss.interdependent_update_lock(dc, context, true);
dc->hwss.program_front_end_for_ctx(dc, context);
+
+ if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe);
+ }
+ }
+
dc->hwss.interdependent_update_lock(dc, context, false);
dc->hwss.post_unlock_program_front_end(dc, context);
}
@@ -2258,11 +2338,15 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
for (i = 0; i < params->stream_count; i++) {
struct dc_stream_state *stream = params->streams[i];
struct dc_stream_status *status = dc_stream_get_status(stream);
+ struct dc_sink *sink = stream->sink;
/* revalidate streams */
- res = dc_validate_stream(dc, stream);
- if (res != DC_OK)
- return res;
+ if (!dc_is_virtual_signal(sink->sink_signal)) {
+ res = dc_validate_stream(dc, stream);
+ if (res != DC_OK)
+ return res;
+ }
+
dc_stream_log(dc, stream);
@@ -2815,7 +2899,7 @@ static enum surface_update_type check_update_surfaces_for_stream(
int i;
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
- if (dc->idle_optimizations_allowed)
+ if (dc->idle_optimizations_allowed || dc_can_clear_cursor_limit(dc))
overall_type = UPDATE_TYPE_FULL;
if (stream_status == NULL || stream_status->plane_count != surface_count)
@@ -3168,7 +3252,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->crtc_timing_adjust) {
if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min ||
- stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max)
+ stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max ||
+ stream->adjust.timing_adjust_pending)
update->crtc_timing_adjust->timing_adjust_pending = true;
stream->adjust = *update->crtc_timing_adjust;
update->crtc_timing_adjust->timing_adjust_pending = false;
@@ -3219,7 +3304,7 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (dsc_validate_context) {
stream->timing.dsc_cfg = *update->dsc_config;
stream->timing.flags.DSC = enable_dsc;
- if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true) != DC_OK) {
stream->timing.dsc_cfg = old_dsc_cfg;
stream->timing.flags.DSC = old_dsc_enabled;
update->dsc_config = NULL;
@@ -3248,7 +3333,7 @@ static void backup_planes_and_stream_state(
return;
for (i = 0; i < status->plane_count; i++) {
- scratch->plane_states[i] = *status->plane_states[i];
+ dc_plane_copy_config(&scratch->plane_states[i], status->plane_states[i]);
}
scratch->stream_state = *stream;
}
@@ -3264,10 +3349,7 @@ static void restore_planes_and_stream_state(
return;
for (i = 0; i < status->plane_count; i++) {
- /* refcount will always be valid, restore everything else */
- struct kref refcount = status->plane_states[i]->refcount;
- *status->plane_states[i] = scratch->plane_states[i];
- status->plane_states[i]->refcount = refcount;
+ dc_plane_copy_config(status->plane_states[i], &scratch->plane_states[i]);
}
*stream = scratch->stream_state;
}
@@ -3444,7 +3526,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
if (update_type == UPDATE_TYPE_FULL) {
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) {
BREAK_TO_DEBUGGER();
goto fail;
}
@@ -3998,6 +4080,7 @@ static void commit_planes_for_stream(struct dc *dc,
&context->res_ctx,
stream);
ASSERT(top_pipe_to_program != NULL);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -4048,6 +4131,9 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.wait_for_dcc_meta_propagation(dc, top_pipe_to_program);
}
+ if (dc->hwseq->funcs.wait_for_pipe_update_if_needed)
+ dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type == UPDATE_TYPE_FAST);
+
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
@@ -4168,12 +4254,6 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FAST)
continue;
- ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
- if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
- /*turn off triple buffer for full update*/
- dc->hwss.program_triplebuffer(
- dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
- }
stream_status =
stream_get_status(context, pipe_ctx->stream);
@@ -4182,8 +4262,37 @@ static void commit_planes_for_stream(struct dc *dc,
dc, pipe_ctx->stream, stream_status->plane_count, context);
}
}
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ /* Full fe update*/
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+
+ ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ }
+ }
+
if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
dc->hwss.program_front_end_for_ctx(dc, context);
+
+ //Pipe busy until some frame and line #
+ if (dc->hwseq->funcs.set_wait_for_update_needed_for_pipe && update_type == UPDATE_TYPE_FULL) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ dc->hwseq->funcs.set_wait_for_update_needed_for_pipe(dc, pipe_ctx);
+ }
+ }
+
if (dc->debug.validate_dml_output) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
@@ -4523,7 +4632,7 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
backup_and_set_minimal_pipe_split_policy(dc, base_context, policy);
/* commit minimal state */
- if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false) == DC_OK) {
/* prevent underflow and corruption when reconfiguring pipes */
force_vsync_flip_in_minimal_transition_context(minimal_transition_context);
} else {
@@ -4958,6 +5067,9 @@ static bool full_update_required(struct dc *dc,
if (dc->idle_optimizations_allowed)
return true;
+ if (dc_can_clear_cursor_limit(dc))
+ return true;
+
return false;
}
@@ -5043,7 +5155,7 @@ static bool update_planes_and_stream_v1(struct dc *dc,
copy_stream_update_to_stream(dc, context, stream, stream_update);
if (update_type >= UPDATE_TYPE_FULL) {
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) {
DC_ERROR("Mode validation failed for stream update!\n");
dc_state_release(context);
return false;
@@ -6187,15 +6299,22 @@ bool dc_abm_save_restore(
void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
{
unsigned int i;
- bool subvp_sw_cursor_req = false;
+ unsigned int max_cursor_size = dc->caps.max_cursor_size;
+ unsigned int stream_cursor_size;
- for (i = 0; i < dc->current_state->stream_count; i++) {
- if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i]) && !dc->current_state->streams[i]->hw_cursor_req) {
- subvp_sw_cursor_req = true;
- break;
+ if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) {
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ stream_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc,
+ dc->current_state,
+ dc->current_state->streams[i]);
+
+ if (stream_cursor_size < max_cursor_size) {
+ max_cursor_size = stream_cursor_size;
+ }
}
}
- properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size;
+
+ properties->cursor_size_limit = max_cursor_size;
}
/**
@@ -6261,3 +6380,56 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
else
return 0;
}
+/**
+ ***********************************************************************************************
+ * dc_get_host_router_index: Get index of host router from a dpia link
+ *
+ * This function return a host router index of the target link. If the target link is dpia link.
+ *
+ * @param [in] link: target link
+ * @param [out] host_router_index: host router index of the target link
+ *
+ * @return: true if the host router index is found and valid.
+ *
+ ***********************************************************************************************
+ */
+bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index)
+{
+ struct dc *dc = link->ctx->dc;
+
+ if (link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ return false;
+
+ if (link->link_index < dc->lowest_dpia_link_index)
+ return false;
+
+ *host_router_index = (link->link_index - dc->lowest_dpia_link_index) / dc->caps.num_of_dpias_per_host_router;
+ if (*host_router_index < dc->caps.num_of_host_routers)
+ return true;
+ else
+ return false;
+}
+
+bool dc_is_cursor_limit_pending(struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc_stream_is_cursor_limit_pending(dc, dc->current_state->streams[i]))
+ return true;
+ }
+
+ return false;
+}
+
+bool dc_can_clear_cursor_limit(struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc_state_can_clear_stream_cursor_subvp_limit(dc->current_state->streams[i], dc->current_state))
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 650e89825968..7551d0a3fe82 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -266,6 +266,8 @@ char *dc_status_to_str(enum dc_status status)
return "Fail dp payload allocation";
case DC_FAIL_DP_LINK_BANDWIDTH:
return "Insufficient DP link bandwidth";
+ case DC_FAIL_HW_CURSOR_SUPPORT:
+ return "HW Cursor not supported";
case DC_ERROR_UNEXPECTED:
return "Unexpected error";
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 55b32dfbfdd6..7014b8d000bb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -697,7 +697,7 @@ void get_fams2_visual_confirm_color(
void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
unsigned int *num_steps,
struct pipe_ctx *pipe_ctx,
struct dc_stream_status *stream_status,
@@ -896,7 +896,7 @@ void hwss_build_fast_sequence(struct dc *dc,
}
void hwss_execute_sequence(struct dc *dc,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
int num_steps)
{
unsigned int i;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 313a32248cd7..3da25bd8b578 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1342,32 +1342,6 @@ static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
data->viewport_c.y += src.y / vpc_div;
}
-static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream)
-{
- uint32_t refresh_rate;
- struct dc *dc = stream->ctx->dc;
-
- refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 +
- stream->timing.v_total * stream->timing.h_total - (uint64_t)1);
- refresh_rate = div_u64(refresh_rate, stream->timing.v_total);
- refresh_rate = div_u64(refresh_rate, stream->timing.h_total);
-
- /* If there's any stream that fits the SubVP high refresh criteria,
- * we must return true. This is because cursor updates are asynchronous
- * with full updates, so we could transition into a SubVP config and
- * remain in HW cursor mode if there's no cursor update which will
- * then cause corruption.
- */
- if ((refresh_rate >= 120 && refresh_rate <= 175 &&
- stream->timing.v_addressable >= 1080 &&
- stream->timing.v_addressable <= 2160) &&
- (dc->current_state->stream_count > 1 ||
- (dc->current_state->stream_count == 1 && !stream->allow_freesync)))
- return true;
-
- return false;
-}
-
static enum controller_dp_test_pattern convert_dp_to_controller_test_pattern(
enum dp_test_pattern test_pattern)
{
@@ -3937,6 +3911,10 @@ enum dc_status resource_map_pool_resources(
if (!dc->link_srv->dp_decide_link_settings(stream,
&pipe_ctx->link_config.dp_link_settings))
return DC_FAIL_DP_LINK_BANDWIDTH;
+
+ dc->link_srv->dp_decide_tunnel_settings(stream,
+ &pipe_ctx->link_config.dp_tunnel_settings);
+
if (dc->link_srv->dp_get_encoding_format(
&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
pipe_ctx->stream_res.hpo_dp_stream_enc =
@@ -4259,6 +4237,11 @@ enum dc_status dc_validate_with_context(struct dc *dc,
}
}
+ /* clear subvp cursor limitations */
+ for (i = 0; i < context->stream_count; i++) {
+ dc_state_set_stream_subvp_cursor_limit(context->streams[i], context, false);
+ }
+
res = dc_validate_global_state(dc, context, fast_validate);
/* calculate pixel rate divider after deciding pxiel clock & odm combine */
@@ -4385,8 +4368,7 @@ enum dc_status dc_validate_global_state(
result = resource_build_scaling_params_for_context(dc, new_ctx);
if (result == DC_OK)
- if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
- result = DC_FAIL_BANDWIDTH_VALIDATE;
+ result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate);
return result;
}
@@ -5538,23 +5520,17 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
return DC_OK;
}
-bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream)
+struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx)
{
- if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream))
- return true;
- if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 &&
- ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
- return true;
- else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 1080 &&
- ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
- return true;
-
- return false;
+ return &pipe_ctx->plane_res.scl_data.dscl_prog_data;
}
-struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx)
+static bool resource_allocate_mcache(struct dc_state *context, const struct dc_mcache_params *mcache_params)
{
- return &pipe_ctx->plane_res.scl_data.dscl_prog_data;
+ if (context->clk_mgr->ctx->dc->res_pool->funcs->program_mcache_pipe_config)
+ context->clk_mgr->ctx->dc->res_pool->funcs->program_mcache_pipe_config(context, mcache_params);
+
+ return true;
}
void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options)
@@ -5576,6 +5552,7 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio
dml2_options->callbacks.get_stream_status = &dc_state_get_stream_status;
dml2_options->callbacks.get_stream_from_id = &dc_state_get_stream_from_id;
dml2_options->callbacks.get_max_flickerless_instant_vtotal_increase = &dc_stream_get_max_flickerless_instant_vtotal_increase;
+ dml2_options->callbacks.allocate_mcache = &resource_allocate_mcache;
dml2_options->svp_pstate.callbacks.dc = dc;
dml2_options->svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 1b2cce127981..4db7383720fd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -22,6 +22,7 @@
* Authors: AMD
*
*/
+#include "dc_types.h"
#include "core_types.h"
#include "core_status.h"
#include "dc_state.h"
@@ -812,8 +813,12 @@ enum dc_status dc_state_add_phantom_stream(const struct dc *dc,
if (phantom_stream_status) {
phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM;
phantom_stream_status->mall_stream_config.paired_stream = main_stream;
+ phantom_stream_status->mall_stream_config.subvp_limit_cursor_size = false;
+ phantom_stream_status->mall_stream_config.cursor_size_limit_subvp = false;
}
+ dc_state_set_stream_subvp_cursor_limit(main_stream, state, true);
+
return res;
}
@@ -939,13 +944,20 @@ void dc_state_release_phantom_streams_and_planes(
const struct dc *dc,
struct dc_state *state)
{
+ unsigned int phantom_count;
+ struct dc_stream_state *phantom_streams[MAX_PHANTOM_PIPES];
+ struct dc_plane_state *phantom_planes[MAX_PHANTOM_PIPES];
int i;
- for (i = 0; i < state->phantom_stream_count; i++)
- dc_state_release_phantom_stream(dc, state, state->phantom_streams[i]);
+ phantom_count = state->phantom_stream_count;
+ memcpy(phantom_streams, state->phantom_streams, sizeof(struct dc_stream_state *) * MAX_PHANTOM_PIPES);
+ for (i = 0; i < phantom_count; i++)
+ dc_state_release_phantom_stream(dc, state, phantom_streams[i]);
- for (i = 0; i < state->phantom_plane_count; i++)
- dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]);
+ phantom_count = state->phantom_plane_count;
+ memcpy(phantom_planes, state->phantom_planes, sizeof(struct dc_plane_state *) * MAX_PHANTOM_PIPES);
+ for (i = 0; i < phantom_count; i++)
+ dc_state_release_phantom_plane(dc, state, phantom_planes[i]);
}
struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id)
@@ -977,3 +989,94 @@ bool dc_state_is_fams2_in_use(
return is_fams2_in_use;
}
+
+void dc_state_set_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit)
+{
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ stream_status->mall_stream_config.subvp_limit_cursor_size = limit;
+ }
+}
+
+bool dc_state_get_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ bool limit = false;
+
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ limit = stream_status->mall_stream_config.subvp_limit_cursor_size;
+ }
+
+ return limit;
+}
+
+void dc_state_set_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit)
+{
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ stream_status->mall_stream_config.cursor_size_limit_subvp = limit;
+ }
+}
+
+bool dc_state_get_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ bool limit = false;
+
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ limit = stream_status->mall_stream_config.cursor_size_limit_subvp;
+ }
+
+ return limit;
+}
+
+bool dc_state_can_clear_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ bool can_clear_limit = false;
+
+ struct dc_stream_status *stream_status;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+
+ if (stream_status) {
+ can_clear_limit = dc_state_get_stream_cursor_subvp_limit(stream, state) &&
+ (stream_status->mall_stream_config.type == SUBVP_PHANTOM ||
+ stream->hw_cursor_req ||
+ !stream_status->mall_stream_config.subvp_limit_cursor_size ||
+ !stream->cursor_position.enable ||
+ dc_stream_check_cursor_attributes(stream, state, &stream->cursor_attributes));
+ }
+
+ return can_clear_limit;
+}
+
+bool dc_state_is_subvp_in_use(struct dc_state *state)
+{
+ uint32_t i;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (dc_state_get_stream_subvp_type(state, state->streams[i]) != SUBVP_NONE)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 0478dd856d8c..b883fb24fa12 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -265,13 +265,16 @@ void program_cursor_attributes(
}
/*
- * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
+ * dc_stream_check_cursor_attributes() - Check validitity of cursor attributes and surface address
*/
-bool dc_stream_set_cursor_attributes(
- struct dc_stream_state *stream,
+bool dc_stream_check_cursor_attributes(
+ const struct dc_stream_state *stream,
+ struct dc_state *state,
const struct dc_cursor_attributes *attributes)
{
- struct dc *dc;
+ const struct dc *dc;
+
+ unsigned int max_cursor_size;
if (NULL == stream) {
dm_error("DC: dc_stream is NULL!\n");
@@ -289,24 +292,38 @@ bool dc_stream_set_cursor_attributes(
dc = stream->ctx->dc;
- /* SubVP is not compatible with HW cursor larger than 64 x 64 x 4.
- * Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case:
- * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs)
- * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz
- * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz
+ /* SubVP is not compatible with HW cursor larger than what can fit in cursor SRAM.
+ * Therefore, if cursor is greater than this, fallback to SW cursor.
*/
- if (dc->debug.allow_sw_cursor_fallback &&
- attributes->height * attributes->width * 4 > 16384 &&
- !stream->hw_cursor_req) {
- if (check_subvp_sw_cursor_fallback_req(dc, stream))
+ if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) {
+ max_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc, state, stream);
+ max_cursor_size = max_cursor_size * max_cursor_size * 4;
+
+ if (attributes->height * attributes->width * 4 > max_cursor_size) {
return false;
+ }
}
- stream->cursor_attributes = *attributes;
-
return true;
}
+/*
+ * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
+ */
+bool dc_stream_set_cursor_attributes(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_attributes *attributes)
+{
+ bool result = false;
+
+ if (dc_stream_check_cursor_attributes(stream, stream->ctx->dc->current_state, attributes)) {
+ stream->cursor_attributes = *attributes;
+ result = true;
+ }
+
+ return result;
+}
+
bool dc_stream_program_cursor_attributes(
struct dc_stream_state *stream,
const struct dc_cursor_attributes *attributes)
@@ -552,6 +569,14 @@ bool dc_stream_fc_disable_writeback(struct dc *dc,
return true;
}
+/**
+ * dc_stream_remove_writeback() - Disables writeback and removes writeback info.
+ * @dc: Display core control structure.
+ * @stream: Display core stream state.
+ * @dwb_pipe_inst: Display writeback pipe.
+ *
+ * Return: returns true on success, false otherwise.
+ */
bool dc_stream_remove_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst)
@@ -1109,3 +1134,26 @@ unsigned int dc_stream_get_max_flickerless_instant_vtotal_increase(struct dc_str
return dc_stream_get_max_flickerless_instant_vtotal_delta(stream, is_gaming, false);
}
+
+bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state *stream)
+{
+ bool is_limit_pending = false;
+
+ if (dc->current_state)
+ is_limit_pending = dc_state_get_stream_cursor_subvp_limit(stream, dc->current_state);
+
+ return is_limit_pending;
+}
+
+bool dc_stream_can_clear_cursor_limit(struct dc *dc, struct dc_stream_state *stream)
+{
+ bool can_clear_limit = false;
+
+ if (dc->current_state)
+ can_clear_limit = dc_state_get_stream_cursor_subvp_limit(stream, dc->current_state) &&
+ (stream->hw_cursor_req ||
+ !stream->cursor_position.enable ||
+ dc_stream_check_cursor_attributes(stream, dc->current_state, &stream->cursor_attributes));
+
+ return can_clear_limit;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index e6fcc21bb9bc..922f23557f5d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -109,7 +109,8 @@ struct dc_plane_state *dc_create_plane_state(const struct dc *dc)
*****************************************************************************
*/
const struct dc_plane_status *dc_plane_get_status(
- const struct dc_plane_state *plane_state)
+ const struct dc_plane_state *plane_state,
+ union dc_plane_status_update_flags flags)
{
const struct dc_plane_status *plane_status;
struct dc *dc;
@@ -136,7 +137,7 @@ const struct dc_plane_status *dc_plane_get_status(
if (pipe_ctx->plane_state != plane_state)
continue;
- if (pipe_ctx->plane_state)
+ if (pipe_ctx->plane_state && flags.bits.address)
pipe_ctx->plane_state->status.is_flip_pending = false;
break;
@@ -151,7 +152,8 @@ const struct dc_plane_status *dc_plane_get_status(
if (pipe_ctx->plane_state != plane_state)
continue;
- dc->hwss.update_pending_status(pipe_ctx);
+ if (flags.bits.address)
+ dc->hwss.update_pending_status(pipe_ctx);
}
return plane_status;
@@ -294,3 +296,17 @@ void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state,
dc->hwss.clear_surface_dcc_and_tiling(pipe_ctx, plane_state, clear_tiling);
}
}
+
+void dc_plane_copy_config(struct dc_plane_state *dst, const struct dc_plane_state *src)
+{
+ struct kref temp_refcount;
+
+ /* backup persistent info */
+ memcpy(&temp_refcount, &dst->refcount, sizeof(struct kref));
+
+ /* copy all configuration information */
+ memcpy(dst, src, sizeof(struct dc_plane_state));
+
+ /* restore persistent info */
+ memcpy(&dst->refcount, &temp_refcount, sizeof(struct kref));
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 7c2ee0526926..f41073c0147e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -53,7 +53,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.325"
+#define DC_VER "3.2.334"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -66,7 +66,8 @@ struct dmub_notification;
#define MAX_STREAMS 6
#define MIN_VIEWPORT_SIZE 12
#define MAX_NUM_EDP 2
-#define MAX_HOST_ROUTERS_NUM 2
+#define MAX_HOST_ROUTERS_NUM 3
+#define MAX_DPIA_PER_HOST_ROUTER 2
/* Display Core Interfaces */
struct dc_versions {
@@ -249,6 +250,7 @@ struct dc_caps {
uint32_t i2c_speed_in_khz_hdcp;
uint32_t dmdata_alloc_size;
unsigned int max_cursor_size;
+ unsigned int max_buffered_cursor_size;
unsigned int max_video_width;
/*
* max video plane width that can be safely assumed to be always
@@ -282,6 +284,7 @@ struct dc_caps {
bool edp_dsc_support;
bool vbios_lttpr_aware;
bool vbios_lttpr_enable;
+ bool fused_io_supported;
uint32_t max_otg_num;
uint32_t max_cab_allocation_bytes;
uint32_t cache_line_size;
@@ -303,6 +306,8 @@ struct dc_caps {
/* Conservative limit for DCC cases which require ODM4:1 to support*/
uint32_t dcc_plane_width_limit;
struct dc_scl_caps scl_caps;
+ uint8_t num_of_host_routers;
+ uint8_t num_of_dpias_per_host_router;
};
struct dc_bug_wa {
@@ -447,6 +452,7 @@ struct dc_config {
bool enable_windowed_mpo_odm;
bool forceHBR2CP2520; // Used for switching between test patterns TPS4 and CP2520
uint32_t allow_edp_hotplug_detection;
+ bool skip_riommu_prefetch_wa;
bool clamp_min_dcfclk;
uint64_t vblank_alignment_dto_params;
uint8_t vblank_alignment_max_frame_time_diff;
@@ -496,6 +502,7 @@ enum visual_confirm {
VISUAL_CONFIRM_HW_CURSOR = 20,
VISUAL_CONFIRM_VABC = 21,
VISUAL_CONFIRM_DCC = 22,
+ VISUAL_CONFIRM_EXPLICIT = 0x80000000,
};
enum dc_psr_power_opts {
@@ -902,6 +909,9 @@ struct dc_debug_options {
bool voltage_align_fclk;
bool disable_min_fclk;
+ bool hdcp_lc_force_fw_enable;
+ bool hdcp_lc_enable_sw_fallback;
+
bool disable_dfs_bypass;
bool disable_dpp_power_gate;
bool disable_hubp_power_gate;
@@ -1418,6 +1428,171 @@ struct dc_scratch_space {
struct dc_stream_state stream_state;
};
+/*
+ * A link contains one or more sinks and their connected status.
+ * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
+ */
+ struct dc_link {
+ struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
+ unsigned int sink_count;
+ struct dc_sink *local_sink;
+ unsigned int link_index;
+ enum dc_connection_type type;
+ enum signal_type connector_signal;
+ enum dc_irq_source irq_source_hpd;
+ enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
+ enum dc_irq_source irq_source_read_request;/* Read Request */
+
+ bool is_hpd_filter_disabled;
+ bool dp_ss_off;
+
+ /**
+ * @link_state_valid:
+ *
+ * If there is no link and local sink, this variable should be set to
+ * false. Otherwise, it should be set to true; usually, the function
+ * core_link_enable_stream sets this field to true.
+ */
+ bool link_state_valid;
+ bool aux_access_disabled;
+ bool sync_lt_in_progress;
+ bool skip_stream_reenable;
+ bool is_internal_display;
+ /** @todo Rename. Flag an endpoint as having a programmable mapping to a DIG encoder. */
+ bool is_dig_mapping_flexible;
+ bool hpd_status; /* HPD status of link without physical HPD pin. */
+ bool is_hpd_pending; /* Indicates a new received hpd */
+
+ /* USB4 DPIA links skip verifying link cap, instead performing the fallback method
+ * for every link training. This is incompatible with DP LL compliance automation,
+ * which expects the same link settings to be used every retry on a link loss.
+ * This flag is used to skip the fallback when link loss occurs during automation.
+ */
+ bool skip_fallback_on_link_loss;
+
+ bool edp_sink_present;
+
+ struct dp_trace dp_trace;
+
+ /* caps is the same as reported_link_cap. link_traing use
+ * reported_link_cap. Will clean up. TODO
+ */
+ struct dc_link_settings reported_link_cap;
+ struct dc_link_settings verified_link_cap;
+ struct dc_link_settings cur_link_settings;
+ struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
+ struct dc_link_settings preferred_link_setting;
+ /* preferred_training_settings are override values that
+ * come from DM. DM is responsible for the memory
+ * management of the override pointers.
+ */
+ struct dc_link_training_overrides preferred_training_settings;
+ struct dp_audio_test_data audio_test_data;
+
+ uint8_t ddc_hw_inst;
+
+ uint8_t hpd_src;
+
+ uint8_t link_enc_hw_inst;
+ /* DIG link encoder ID. Used as index in link encoder resource pool.
+ * For links with fixed mapping to DIG, this is not changed after dc_link
+ * object creation.
+ */
+ enum engine_id eng_id;
+ enum engine_id dpia_preferred_eng_id;
+
+ bool test_pattern_enabled;
+ /* Pending/Current test pattern are only used to perform and track
+ * FIXED_VS retimer test pattern/lane adjustment override state.
+ * Pending allows link HWSS to differentiate PHY vs non-PHY pattern,
+ * to perform specific lane adjust overrides before setting certain
+ * PHY test patterns. In cases when lane adjust and set test pattern
+ * calls are not performed atomically (i.e. performing link training),
+ * pending_test_pattern will be invalid or contain a non-PHY test pattern
+ * and current_test_pattern will contain required context for any future
+ * set pattern/set lane adjust to transition between override state(s).
+ * */
+ enum dp_test_pattern current_test_pattern;
+ enum dp_test_pattern pending_test_pattern;
+
+ union compliance_test_state compliance_test_state;
+
+ void *priv;
+
+ struct ddc_service *ddc;
+
+ enum dp_panel_mode panel_mode;
+ bool aux_mode;
+
+ /* Private to DC core */
+
+ const struct dc *dc;
+
+ struct dc_context *ctx;
+
+ struct panel_cntl *panel_cntl;
+ struct link_encoder *link_enc;
+ struct graphics_object_id link_id;
+ /* Endpoint type distinguishes display endpoints which do not have entries
+ * in the BIOS connector table from those that do. Helps when tracking link
+ * encoder to display endpoint assignments.
+ */
+ enum display_endpoint_type ep_type;
+ union ddi_channel_mapping ddi_channel_mapping;
+ struct connector_device_tag_info device_tag;
+ struct dpcd_caps dpcd_caps;
+ uint32_t dongle_max_pix_clk;
+ unsigned short chip_caps;
+ unsigned int dpcd_sink_count;
+ struct hdcp_caps hdcp_caps;
+ enum edp_revision edp_revision;
+ union dpcd_sink_ext_caps dpcd_sink_ext_caps;
+
+ struct psr_settings psr_settings;
+ struct replay_settings replay_settings;
+
+ /* Drive settings read from integrated info table */
+ struct dc_lane_settings bios_forced_drive_settings;
+
+ /* Vendor specific LTTPR workaround variables */
+ uint8_t vendor_specific_lttpr_link_rate_wa;
+ bool apply_vendor_specific_lttpr_link_rate_wa;
+
+ /* MST record stream using this link */
+ struct link_flags {
+ bool dp_keep_receiver_powered;
+ bool dp_skip_DID2;
+ bool dp_skip_reset_segment;
+ bool dp_skip_fs_144hz;
+ bool dp_mot_reset_segment;
+ /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
+ bool dpia_mst_dsc_always_on;
+ /* Forced DPIA into TBT3 compatibility mode. */
+ bool dpia_forced_tbt3_mode;
+ bool dongle_mode_timing_override;
+ bool blank_stream_on_ocs_change;
+ bool read_dpcd204h_on_irq_hpd;
+ bool force_dp_ffe_preset;
+ } wa_flags;
+ union dc_dp_ffe_preset forced_dp_ffe_preset;
+ struct link_mst_stream_allocation_table mst_stream_alloc_table;
+
+ struct dc_link_status link_status;
+ struct dprx_states dprx_states;
+
+ struct gpio *hpd_gpio;
+ enum dc_link_fec_state fec_state;
+ bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
+
+ struct dc_panel_config panel_config;
+ struct phy_state phy_state;
+ uint32_t phy_transition_bitmask;
+ // BW ALLOCATON USB4 ONLY
+ struct dc_dpia_bw_alloc dpia_bw_alloc_config;
+ bool skip_implict_edp_power_control;
+ enum backlight_control_type backlight_control_type;
+};
+
struct dc {
struct dc_debug_options debug;
struct dc_versions versions;
@@ -1431,6 +1606,7 @@ struct dc {
uint8_t link_count;
struct dc_link *links[MAX_LINKS];
+ uint8_t lowest_dpia_link_index;
struct link_service *link_srv;
struct dc_state *current_state;
@@ -1485,6 +1661,7 @@ struct dc {
struct dc_scratch_space current_state;
struct dc_scratch_space new_state;
struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack
+ struct dc_link temp_link;
bool pipes_to_unlock_first[MAX_PIPES]; /* Any of the pipes indicated here should be unlocked first */
} scratch;
@@ -1651,170 +1828,6 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
const enum dc_link_encoding_format link_encoding);
/* Link Interfaces */
-/*
- * A link contains one or more sinks and their connected status.
- * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
- */
-struct dc_link {
- struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
- unsigned int sink_count;
- struct dc_sink *local_sink;
- unsigned int link_index;
- enum dc_connection_type type;
- enum signal_type connector_signal;
- enum dc_irq_source irq_source_hpd;
- enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
-
- bool is_hpd_filter_disabled;
- bool dp_ss_off;
-
- /**
- * @link_state_valid:
- *
- * If there is no link and local sink, this variable should be set to
- * false. Otherwise, it should be set to true; usually, the function
- * core_link_enable_stream sets this field to true.
- */
- bool link_state_valid;
- bool aux_access_disabled;
- bool sync_lt_in_progress;
- bool skip_stream_reenable;
- bool is_internal_display;
- /** @todo Rename. Flag an endpoint as having a programmable mapping to a DIG encoder. */
- bool is_dig_mapping_flexible;
- bool hpd_status; /* HPD status of link without physical HPD pin. */
- bool is_hpd_pending; /* Indicates a new received hpd */
-
- /* USB4 DPIA links skip verifying link cap, instead performing the fallback method
- * for every link training. This is incompatible with DP LL compliance automation,
- * which expects the same link settings to be used every retry on a link loss.
- * This flag is used to skip the fallback when link loss occurs during automation.
- */
- bool skip_fallback_on_link_loss;
-
- bool edp_sink_present;
-
- struct dp_trace dp_trace;
-
- /* caps is the same as reported_link_cap. link_traing use
- * reported_link_cap. Will clean up. TODO
- */
- struct dc_link_settings reported_link_cap;
- struct dc_link_settings verified_link_cap;
- struct dc_link_settings cur_link_settings;
- struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
- struct dc_link_settings preferred_link_setting;
- /* preferred_training_settings are override values that
- * come from DM. DM is responsible for the memory
- * management of the override pointers.
- */
- struct dc_link_training_overrides preferred_training_settings;
- struct dp_audio_test_data audio_test_data;
-
- uint8_t ddc_hw_inst;
-
- uint8_t hpd_src;
-
- uint8_t link_enc_hw_inst;
- /* DIG link encoder ID. Used as index in link encoder resource pool.
- * For links with fixed mapping to DIG, this is not changed after dc_link
- * object creation.
- */
- enum engine_id eng_id;
- enum engine_id dpia_preferred_eng_id;
-
- bool test_pattern_enabled;
- /* Pending/Current test pattern are only used to perform and track
- * FIXED_VS retimer test pattern/lane adjustment override state.
- * Pending allows link HWSS to differentiate PHY vs non-PHY pattern,
- * to perform specific lane adjust overrides before setting certain
- * PHY test patterns. In cases when lane adjust and set test pattern
- * calls are not performed atomically (i.e. performing link training),
- * pending_test_pattern will be invalid or contain a non-PHY test pattern
- * and current_test_pattern will contain required context for any future
- * set pattern/set lane adjust to transition between override state(s).
- * */
- enum dp_test_pattern current_test_pattern;
- enum dp_test_pattern pending_test_pattern;
-
- union compliance_test_state compliance_test_state;
-
- void *priv;
-
- struct ddc_service *ddc;
-
- enum dp_panel_mode panel_mode;
- bool aux_mode;
-
- /* Private to DC core */
-
- const struct dc *dc;
-
- struct dc_context *ctx;
-
- struct panel_cntl *panel_cntl;
- struct link_encoder *link_enc;
- struct graphics_object_id link_id;
- /* Endpoint type distinguishes display endpoints which do not have entries
- * in the BIOS connector table from those that do. Helps when tracking link
- * encoder to display endpoint assignments.
- */
- enum display_endpoint_type ep_type;
- union ddi_channel_mapping ddi_channel_mapping;
- struct connector_device_tag_info device_tag;
- struct dpcd_caps dpcd_caps;
- uint32_t dongle_max_pix_clk;
- unsigned short chip_caps;
- unsigned int dpcd_sink_count;
- struct hdcp_caps hdcp_caps;
- enum edp_revision edp_revision;
- union dpcd_sink_ext_caps dpcd_sink_ext_caps;
-
- struct psr_settings psr_settings;
- struct replay_settings replay_settings;
-
- /* Drive settings read from integrated info table */
- struct dc_lane_settings bios_forced_drive_settings;
-
- /* Vendor specific LTTPR workaround variables */
- uint8_t vendor_specific_lttpr_link_rate_wa;
- bool apply_vendor_specific_lttpr_link_rate_wa;
-
- /* MST record stream using this link */
- struct link_flags {
- bool dp_keep_receiver_powered;
- bool dp_skip_DID2;
- bool dp_skip_reset_segment;
- bool dp_skip_fs_144hz;
- bool dp_mot_reset_segment;
- /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
- bool dpia_mst_dsc_always_on;
- /* Forced DPIA into TBT3 compatibility mode. */
- bool dpia_forced_tbt3_mode;
- bool dongle_mode_timing_override;
- bool blank_stream_on_ocs_change;
- bool read_dpcd204h_on_irq_hpd;
- bool force_dp_ffe_preset;
- } wa_flags;
- union dc_dp_ffe_preset forced_dp_ffe_preset;
- struct link_mst_stream_allocation_table mst_stream_alloc_table;
-
- struct dc_link_status link_status;
- struct dprx_states dprx_states;
-
- struct gpio *hpd_gpio;
- enum dc_link_fec_state fec_state;
- bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
-
- struct dc_panel_config panel_config;
- struct phy_state phy_state;
- uint32_t phy_transition_bitmask;
- // BW ALLOCATON USB4 ONLY
- struct dc_dpia_bw_alloc dpia_bw_alloc_config;
- bool skip_implict_edp_power_control;
- enum backlight_control_type backlight_control_type;
-};
-
/* Return an enumerated dc_link.
* dc_link order is constant and determined at
* boot time. They cannot be created or destroyed.
@@ -2586,13 +2599,23 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
+bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index);
+
/* DSC Interfaces */
#include "dc_dsc.h"
+void dc_get_visual_confirm_for_stream(
+ struct dc *dc,
+ struct dc_stream_state *stream_state,
+ struct tg_color *color);
+
/* Disable acc mode Interfaces */
void dc_disable_accelerated_mode(struct dc *dc);
bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream);
+bool dc_is_cursor_limit_pending(struct dc *dc);
+bool dc_can_clear_cursor_limit(struct dc *dc);
+
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 614e03bfd598..afbcf866520e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -39,6 +39,7 @@
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
+#define GPINT_RETRY_NUM 20
static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
struct dmub_srv *dmub)
@@ -70,20 +71,28 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
}
}
-void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
+bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv)
{
- struct dmub_srv *dmub = dc_dmub_srv->dmub;
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ struct dmub_srv *dmub;
+ struct dc_context *dc_ctx;
enum dmub_status status;
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ dc_ctx = dc_dmub_srv->ctx;
+ dmub = dc_dmub_srv->dmub;
+
do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ status = dmub_srv_wait_for_pending(dmub, 100000);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
if (status != DMUB_STATUS_OK) {
DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
}
+
+ return status == DMUB_STATUS_OK;
}
void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
@@ -126,7 +135,49 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv,
}
}
-bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+static bool dc_dmub_srv_reg_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+ unsigned int count,
+ union dmub_rb_cmd *cmd_list)
+{
+ struct dc_context *dc_ctx;
+ struct dmub_srv *dmub;
+ enum dmub_status status = DMUB_STATUS_OK;
+ int i;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ dc_ctx = dc_dmub_srv->ctx;
+ dmub = dc_dmub_srv->dmub;
+
+ for (i = 0 ; i < count; i++) {
+ /* confirm no messages pending */
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
+
+ /* queue command */
+ if (status == DMUB_STATUS_OK)
+ status = dmub_srv_reg_cmd_execute(dmub, &cmd_list[i]);
+
+ /* check for errors */
+ if (status != DMUB_STATUS_OK) {
+ break;
+ }
+ }
+
+ if (status != DMUB_STATUS_OK) {
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
+ return false;
+ }
+
+ return true;
+}
+
+static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
unsigned int count,
union dmub_rb_cmd *cmd_list)
{
@@ -143,20 +194,25 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
for (i = 0 ; i < count; i++) {
// Queue command
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+ if (!cmd_list[i].cmd_common.header.multi_cmd_pending ||
+ dmub_rb_num_free(&dmub->inbox1.rb) >= count - i) {
+ status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
+ } else {
+ status = DMUB_STATUS_QUEUE_FULL;
+ }
if (status == DMUB_STATUS_QUEUE_FULL) {
/* Execute and wait for queue to become empty again. */
- status = dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_fb_cmd_execute(dmub);
if (status == DMUB_STATUS_POWER_STATE_D3)
return false;
do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ status = dmub_srv_wait_for_inbox_free(dmub, 100000, count - i);
} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
/* Requeue the command. */
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+ status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
}
if (status != DMUB_STATUS_OK) {
@@ -168,7 +224,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
}
}
- status = dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_fb_cmd_execute(dmub);
if (status != DMUB_STATUS_OK) {
if (status != DMUB_STATUS_POWER_STATE_D3) {
DC_ERROR("Error starting DMUB execution: status=%d\n", status);
@@ -180,6 +236,26 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
return true;
}
+bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+ unsigned int count,
+ union dmub_rb_cmd *cmd_list)
+{
+ bool res = false;
+
+ if (dc_dmub_srv && dc_dmub_srv->dmub) {
+ if (dc_dmub_srv->dmub->inbox_type == DMUB_CMD_INTERFACE_REG) {
+ res = dc_dmub_srv_reg_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
+ } else {
+ res = dc_dmub_srv_fb_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
+ }
+
+ if (res)
+ res = dmub_srv_update_inbox_status(dc_dmub_srv->dmub) == DMUB_STATUS_OK;
+ }
+
+ return res;
+}
+
bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
enum dm_dmub_wait_type wait_type,
union dmub_rb_cmd *cmd_list)
@@ -202,7 +278,8 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
if (!dmub->debug.timeout_info.timeout_occured) {
dmub->debug.timeout_info.timeout_occured = true;
- dmub->debug.timeout_info.timeout_cmd = *cmd_list;
+ if (cmd_list)
+ dmub->debug.timeout_info.timeout_cmd = *cmd_list;
dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
}
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
@@ -210,8 +287,9 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
}
// Copy data back from ring buffer into command
- if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
+ if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY && cmd_list) {
+ dmub_srv_cmd_get_response(dc_dmub_srv->dmub, cmd_list);
+ }
}
return true;
@@ -224,74 +302,10 @@ bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd
bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type)
{
- struct dc_context *dc_ctx;
- struct dmub_srv *dmub;
- enum dmub_status status;
- int i;
-
- if (!dc_dmub_srv || !dc_dmub_srv->dmub)
- return false;
-
- dc_ctx = dc_dmub_srv->ctx;
- dmub = dc_dmub_srv->dmub;
-
- for (i = 0 ; i < count; i++) {
- // Queue command
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
-
- if (status == DMUB_STATUS_QUEUE_FULL) {
- /* Execute and wait for queue to become empty again. */
- status = dmub_srv_cmd_execute(dmub);
- if (status == DMUB_STATUS_POWER_STATE_D3)
- return false;
-
- status = dmub_srv_wait_for_idle(dmub, 100000);
- if (status != DMUB_STATUS_OK)
- return false;
-
- /* Requeue the command. */
- status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
- }
-
- if (status != DMUB_STATUS_OK) {
- if (status != DMUB_STATUS_POWER_STATE_D3) {
- DC_ERROR("Error queueing DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- }
- return false;
- }
- }
-
- status = dmub_srv_cmd_execute(dmub);
- if (status != DMUB_STATUS_OK) {
- if (status != DMUB_STATUS_POWER_STATE_D3) {
- DC_ERROR("Error starting DMUB execution: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- }
+ if (!dc_dmub_srv_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list))
return false;
- }
- // Wait for DMUB to process command
- if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
- if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
- do {
- status = dmub_srv_wait_for_idle(dmub, 100000);
- } while (status != DMUB_STATUS_OK);
- } else
- status = dmub_srv_wait_for_idle(dmub, 100000);
-
- if (status != DMUB_STATUS_OK) {
- DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- return false;
- }
-
- // Copy data back from ring buffer into command
- if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
- }
-
- return true;
+ return dc_dmub_srv_wait_for_idle(dc_dmub_srv, wait_type, cmd_list);
}
bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
@@ -1243,7 +1257,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dc_dmub_srv_wait_for_idle(dc->ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
memset(&new_signals, 0, sizeof(new_signals));
@@ -1355,14 +1369,15 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit)
udelay(dc->debug.ips2_eval_delay_us);
- if (ips_fw->signals.bits.ips2_commit) {
- DC_LOG_IPS(
- "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
- ips_fw->signals.bits.ips1_commit,
- ips_fw->signals.bits.ips2_commit);
+ DC_LOG_IPS(
+ "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
- // Tell PMFW to exit low power state
- dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+ // Tell PMFW to exit low power state
+ dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+
+ if (ips_fw->signals.bits.ips2_commit) {
DC_LOG_IPS(
"wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)",
@@ -1400,7 +1415,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
- dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
+ dmub_srv_sync_inboxes(dc->ctx->dmub_srv->dmub);
}
}
@@ -1654,7 +1669,8 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* fill in generic command header */
global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ global_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
if (enable) {
/* send global configuration parameters */
@@ -1673,11 +1689,13 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* configure command header */
stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- stream_base_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_base_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
stream_base_cmd->header.multi_cmd_pending = 1;
stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- stream_sub_state_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_sub_state_cmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
stream_sub_state_cmd->header.multi_cmd_pending = 1;
/* copy stream static base state */
memcpy(&stream_base_cmd->config,
@@ -1723,7 +1741,8 @@ void dc_dmub_srv_fams2_drr_update(struct dc *dc,
cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num;
cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger;
- cmd.fams2_drr_update.header.payload_bytes = sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
+ cmd.fams2_drr_update.header.payload_bytes =
+ sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
@@ -1759,7 +1778,8 @@ void dc_dmub_srv_fams2_passthrough_flip(
/* build command header */
cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP;
- cmds[num_cmds].fams2_flip.header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2_flip);
+ cmds[num_cmds].fams2_flip.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_fams2_flip) - sizeof(struct dmub_cmd_header);
/* for chaining multiple commands, all but last command should set to 1 */
cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1;
@@ -1869,11 +1889,14 @@ void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struc
if (command_code == DMUB_GPINT__INVALID_COMMAND)
return;
- // send gpint commands and wait for ack
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
- (uint16_t)(output->ips_mode),
- &output->residency_percent, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->residency_percent = 0;
+ for (i = 0; i < GPINT_RETRY_NUM; i++) {
+ // false could mean GPINT timeout, in which case we should retry
+ if (dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
+ (uint16_t)(output->ips_mode), &output->residency_percent,
+ DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ break;
+ udelay(100);
+ }
if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER,
(uint16_t)(output->ips_mode),
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index a636f4c3f01d..ada5c2fb2db3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -58,7 +58,7 @@ struct dc_dmub_srv {
bool needs_idle_wake;
};
-void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
+bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv);
bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 77c87ad57220..d346f8ae1634 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -159,6 +159,11 @@ struct dc_link_settings {
uint8_t link_rate_set;
};
+struct dc_tunnel_settings {
+ bool should_enable_dp_tunneling;
+ bool should_use_dp_bw_allocation;
+};
+
union dc_dp_ffe_preset {
struct {
uint8_t level : 4;
@@ -943,10 +948,20 @@ union dpia_info {
uint8_t raw;
};
+/* DPCD[0xE0020] USB4_DRIVER_BW_CAPABILITY register. */
+union usb4_driver_bw_cap {
+ struct {
+ uint8_t rsvd :7;
+ uint8_t driver_bw_alloc_support :1;
+ } bits;
+ uint8_t raw;
+};
+
/* DP Tunneling over USB4 */
struct dpcd_usb4_dp_tunneling_info {
union dp_tun_cap_support dp_tun_cap;
union dpia_info dpia_info;
+ union usb4_driver_bw_cap driver_bw_cap;
uint8_t usb4_driver_id;
uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN];
};
@@ -1157,8 +1172,8 @@ struct dc_lttpr_caps {
union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
union dp_alpm_lttpr_cap alpm;
uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
- uint8_t lttpr_ieee_oui[3];
- uint8_t lttpr_device_id[6];
+ uint8_t lttpr_ieee_oui[3]; // Always read from closest LTTPR to host
+ uint8_t lttpr_device_id[6]; // Always read from closest LTTPR to host
};
struct dc_dongle_dfp_cap_ext {
@@ -1486,5 +1501,11 @@ struct dp_trace {
# ifndef DP_TUNNELING_BW_ALLOC_CAP_CHANGED
# define DP_TUNNELING_BW_ALLOC_CAP_CHANGED (1 << 3)
# endif
+# ifndef DPTX_BW_ALLOC_UNMASK_IRQ
+# define DPTX_BW_ALLOC_UNMASK_IRQ (1 << 6)
+# endif
+# ifndef DPTX_BW_ALLOC_MODE_ENABLE
+# define DPTX_BW_ALLOC_MODE_ENABLE (1 << 7)
+# endif
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_fused_io.c b/drivers/gpu/drm/amd/display/dc/dc_fused_io.c
new file mode 100644
index 000000000000..fee69642fb93
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_fused_io.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "dc_fused_io.h"
+
+#include "dm_helpers.h"
+#include "gpio.h"
+
+static bool op_i2c_convert(
+ union dmub_rb_cmd *cmd,
+ const struct mod_hdcp_atomic_op_i2c *op,
+ enum dmub_cmd_fused_request_type type,
+ uint32_t ddc_line,
+ bool over_aux
+)
+{
+ struct dmub_cmd_fused_request *req = &cmd->fused_io.request;
+ struct dmub_cmd_fused_request_location_i2c *loc = &req->u.i2c;
+
+ if (!op || op->size > sizeof(req->buffer))
+ return false;
+
+ req->type = type;
+ loc->is_aux = false;
+ loc->ddc_line = ddc_line;
+ loc->over_aux = over_aux;
+ loc->address = op->address;
+ loc->offset = op->offset;
+ loc->length = op->size;
+ memcpy(req->buffer, op->data, op->size);
+
+ return true;
+}
+
+static bool op_aux_convert(
+ union dmub_rb_cmd *cmd,
+ const struct mod_hdcp_atomic_op_aux *op,
+ enum dmub_cmd_fused_request_type type,
+ uint32_t ddc_line
+)
+{
+ struct dmub_cmd_fused_request *req = &cmd->fused_io.request;
+ struct dmub_cmd_fused_request_location_aux *loc = &req->u.aux;
+
+ if (!op || op->size > sizeof(req->buffer))
+ return false;
+
+ req->type = type;
+ loc->is_aux = true;
+ loc->ddc_line = ddc_line;
+ loc->address = op->address;
+ loc->length = op->size;
+ memcpy(req->buffer, op->data, op->size);
+
+ return true;
+}
+
+static bool atomic_write_poll_read(
+ struct dc_link *link,
+ union dmub_rb_cmd commands[3],
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ const uint8_t count = 3;
+ const uint32_t timeout_per_request_us = 10000;
+ const uint32_t timeout_per_aux_transaction_us = 10000;
+ uint64_t timeout_us = 0;
+
+ commands[1].fused_io.request.poll_mask_msb = poll_mask_msb;
+ commands[1].fused_io.request.timeout_us = poll_timeout_us;
+
+ for (uint8_t i = 0; i < count; i++) {
+ struct dmub_rb_cmd_fused_io *io = &commands[i].fused_io;
+
+ io->header.type = DMUB_CMD__FUSED_IO;
+ io->header.sub_type = DMUB_CMD__FUSED_IO_EXECUTE;
+ io->header.multi_cmd_pending = i != count - 1;
+ io->header.payload_bytes = sizeof(commands[i].fused_io) - sizeof(io->header);
+
+ timeout_us += timeout_per_request_us + io->request.timeout_us;
+ if (!io->request.timeout_us && io->request.u.aux.is_aux)
+ timeout_us += timeout_per_aux_transaction_us * (io->request.u.aux.length / 16);
+ }
+
+ if (!dm_helpers_execute_fused_io(link->ctx, link, commands, count, timeout_us))
+ return false;
+
+ return commands[0].fused_io.request.status == FUSED_REQUEST_STATUS_SUCCESS;
+}
+
+bool dm_atomic_write_poll_read_i2c(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ if (!link)
+ return false;
+
+ const bool over_aux = false;
+ const uint32_t ddc_line = link->ddc->ddc_pin->pin_data->en;
+
+ union dmub_rb_cmd commands[3] = { 0 };
+ const bool converted = op_i2c_convert(&commands[0], write, FUSED_REQUEST_WRITE, ddc_line, over_aux)
+ && op_i2c_convert(&commands[1], poll, FUSED_REQUEST_POLL, ddc_line, over_aux)
+ && op_i2c_convert(&commands[2], read, FUSED_REQUEST_READ, ddc_line, over_aux);
+
+ if (!converted)
+ return false;
+
+ const bool result = atomic_write_poll_read(link, commands, poll_timeout_us, poll_mask_msb);
+
+ memcpy(read->data, commands[0].fused_io.request.buffer, read->size);
+ return result;
+}
+
+bool dm_atomic_write_poll_read_aux(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+)
+{
+ if (!link)
+ return false;
+
+ const uint32_t ddc_line = link->ddc->ddc_pin->pin_data->en;
+ union dmub_rb_cmd commands[3] = { 0 };
+ const bool converted = op_aux_convert(&commands[0], write, FUSED_REQUEST_WRITE, ddc_line)
+ && op_aux_convert(&commands[1], poll, FUSED_REQUEST_POLL, ddc_line)
+ && op_aux_convert(&commands[2], read, FUSED_REQUEST_READ, ddc_line);
+
+ if (!converted)
+ return false;
+
+ const bool result = atomic_write_poll_read(link, commands, poll_timeout_us, poll_mask_msb);
+
+ memcpy(read->data, commands[0].fused_io.request.buffer, read->size);
+ return result;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc_fused_io.h b/drivers/gpu/drm/amd/display/dc/dc_fused_io.h
new file mode 100644
index 000000000000..c74917240985
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_fused_io.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __DC_FUSED_IO_H__
+#define __DC_FUSED_IO_H__
+
+#include "dc.h"
+#include "mod_hdcp.h"
+
+bool dm_atomic_write_poll_read_i2c(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+);
+
+bool dm_atomic_write_poll_read_aux(
+ struct dc_link *link,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+);
+
+#endif // __DC_FUSED_IO_H__
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 8f077e15b4f0..7217de258851 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -682,13 +682,19 @@ void reg_sequence_wait_done(const struct dc_context *ctx)
if (offload &&
ctx->dc->debug.dmub_offload_enabled &&
!ctx->dc->debug.dmcub_emulation) {
- dc_dmub_srv_wait_idle(ctx->dmub_srv);
+ dc_dmub_srv_wait_for_idle(ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
}
}
char *dce_version_to_string(const int version)
{
switch (version) {
+ case DCE_VERSION_6_0:
+ return "DCE 6.0";
+ case DCE_VERSION_6_1:
+ return "DCE 6.1";
+ case DCE_VERSION_6_4:
+ return "DCE 6.4";
case DCE_VERSION_8_0:
return "DCE 8.0";
case DCE_VERSION_8_1:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
index e9413685ed4f..14feb843e694 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -28,13 +28,24 @@
#include "dc_hw_types.h"
+union dc_plane_status_update_flags {
+ struct {
+ uint32_t address : 1;
+ } bits;
+ uint32_t raw;
+};
+
struct dc_plane_state *dc_create_plane_state(const struct dc *dc);
const struct dc_plane_status *dc_plane_get_status(
- const struct dc_plane_state *plane_state);
+ const struct dc_plane_state *plane_state,
+ union dc_plane_status_update_flags flags);
void dc_plane_state_retain(struct dc_plane_state *plane_state);
void dc_plane_state_release(struct dc_plane_state *plane_state);
void dc_plane_force_dcc_and_tiling_disable(struct dc_plane_state *plane_state,
bool clear_tiling);
+
+void dc_plane_copy_config(struct dc_plane_state *dst, const struct dc_plane_state *src);
+
#endif /* _DC_PLANE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
index 1a12ef579ff4..1d9bae56ff6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
@@ -105,4 +105,24 @@ bool dc_state_is_fams2_in_use(
const struct dc *dc,
const struct dc_state *state);
+
+void dc_state_set_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit);
+
+bool dc_state_get_stream_subvp_cursor_limit(const struct dc_stream_state *stream,
+ struct dc_state *state);
+
+void dc_state_set_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state,
+ bool limit);
+
+bool dc_state_get_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state);
+
+bool dc_state_can_clear_stream_cursor_subvp_limit(const struct dc_stream_state *stream,
+ struct dc_state *state);
+
+bool dc_state_is_subvp_in_use(struct dc_state *state);
+
#endif /* _DC_STATE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index e0bfddaa23e3..341d2ffb64b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -44,6 +44,8 @@ struct mall_stream_config {
*/
enum mall_stream_type type;
struct dc_stream_state *paired_stream; // master / slave stream
+ bool subvp_limit_cursor_size; /* stream has/is using subvp limiting hw cursor support */
+ bool cursor_size_limit_subvp; /* stream is using hw cursor config preventing subvp */
};
struct dc_stream_status {
@@ -503,6 +505,11 @@ void program_cursor_position(
struct dc *dc,
struct dc_stream_state *stream);
+bool dc_stream_check_cursor_attributes(
+ const struct dc_stream_state *stream,
+ struct dc_state *state,
+ const struct dc_cursor_attributes *attributes);
+
bool dc_stream_set_cursor_attributes(
struct dc_stream_state *stream,
const struct dc_cursor_attributes *attributes);
@@ -579,4 +586,8 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
struct dc_stream_state *stream,
struct dc_surface_update *srf_updates,
struct dc_state *context);
+
+bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state *stream);
+bool dc_stream_can_clear_cursor_limit(struct dc *dc, struct dc_stream_state *stream);
+
#endif /* DC_STREAM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 83ffaae9f439..a4cd0eb39a3a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -210,6 +210,7 @@ struct dc_edid_caps {
bool edid_hdmi;
bool hdr_supported;
+ bool rr_capable;
struct dc_panel_patch panel_patch;
};
@@ -1089,7 +1090,8 @@ union replay_low_refresh_rate_enable_options {
struct {
//BIT[0-3]: Replay Low Hz Support control
unsigned int ENABLE_LOW_RR_SUPPORT :1;
- unsigned int RESERVED_1_3 :3;
+ unsigned int SKIP_ASIC_CHECK :1;
+ unsigned int RESERVED_2_3 :2;
//BIT[4-15]: Replay Low Hz Enable Scenarios
unsigned int ENABLE_STATIC_SCREEN :1;
unsigned int ENABLE_FULL_SCREEN_VIDEO :1;
@@ -1129,6 +1131,10 @@ struct replay_config {
union replay_low_refresh_rate_enable_options low_rr_enable_options;
/* Replay coasting vtotal is within low refresh rate range. */
bool low_rr_activated;
+ /* Replay low refresh rate supported*/
+ bool low_rr_supported;
+ /* Replay Video Conferencing Optimization Enabled */
+ bool replay_video_conferencing_optimization_enabled;
};
/* Replay feature flags*/
@@ -1249,6 +1255,7 @@ enum dc_cm2_gpu_mem_layout {
enum dc_cm2_gpu_mem_pixel_component_order {
DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA,
+ DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA
};
enum dc_cm2_gpu_mem_format {
@@ -1270,7 +1277,8 @@ struct dc_cm2_gpu_mem_format_parameters {
enum dc_cm2_gpu_mem_size {
DC_CM2_GPU_MEM_SIZE_171717,
- DC_CM2_GPU_MEM_SIZE_TRANSFORMED
+ DC_CM2_GPU_MEM_SIZE_333333,
+ DC_CM2_GPU_MEM_SIZE_TRANSFORMED,
};
struct dc_cm2_gpu_mem_parameters {
@@ -1279,6 +1287,7 @@ struct dc_cm2_gpu_mem_parameters {
struct dc_cm2_gpu_mem_format_parameters format_params;
enum dc_cm2_gpu_mem_pixel_component_order component_order;
enum dc_cm2_gpu_mem_size size;
+ uint16_t bit_depth;
};
enum dc_cm2_transfer_func_source {
@@ -1302,6 +1311,10 @@ struct dc_cm2_func_luts {
const struct dc_3dlut *lut3d_func;
struct dc_cm2_gpu_mem_parameters gpu_mem_params;
};
+ bool rmcm_3dlut_shaper_select;
+ bool mpc_3dlut_enable;
+ bool rmcm_3dlut_enable;
+ bool mpc_mcm_post_blend;
} lut3d_data;
const struct dc_transfer_func *lut1d_func;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index b363f5360818..58c84f555c0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -391,6 +391,7 @@ static void dccg35_set_dppclk_rcg(struct dccg *dccg,
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable)
return;
@@ -411,6 +412,8 @@ static void dccg35_set_dppclk_rcg(struct dccg *dccg,
BREAK_TO_DEBUGGER();
break;
}
+ //DC_LOG_DEBUG("%s: inst(%d) DPPCLK rcg_disable: %d\n", __func__, inst, enable ? 0 : 1);
+
}
static void dccg35_set_dpstreamclk_rcg(
@@ -1035,6 +1038,7 @@ static void dccg35_enable_dpp_clk_new(
DPPCLK0_DTO_MODULO, 0xFF);
}
+
static void dccg35_disable_dpp_clk_new(
struct dccg *dccg,
int inst)
@@ -1112,30 +1116,24 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg,
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
switch (dpp_inst) {
case 0:
REG_UPDATE(DPPCLK_CTRL, DPPCLK0_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
break;
case 1:
REG_UPDATE(DPPCLK_CTRL, DPPCLK1_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable);
break;
case 2:
REG_UPDATE(DPPCLK_CTRL, DPPCLK2_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable);
break;
case 3:
REG_UPDATE(DPPCLK_CTRL, DPPCLK3_EN, enable);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable);
break;
default:
break;
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable);
}
@@ -1163,14 +1161,18 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst,
ASSERT(false);
phase = 0xff;
}
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, false);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, phase,
DPPCLK0_DTO_MODULO, modulo);
dcn35_set_dppclk_enable(dccg, dpp_inst, true);
- } else
+ } else {
dcn35_set_dppclk_enable(dccg, dpp_inst, false);
+ /*we have this in hwss: disable_plane*/
+ //dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
+ }
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
@@ -1182,6 +1184,7 @@ static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg,
if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
return;
+
switch (dpp_inst) {
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
@@ -1198,6 +1201,8 @@ static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg,
default:
break;
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) rcg: %d\n", __func__, dpp_inst, enable);
+
}
static void dccg35_get_pixel_rate_div(
@@ -1521,28 +1526,30 @@ static void dccg35_set_physymclk_root_clock_gating(
switch (phy_inst) {
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 1:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 2:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 3:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 4:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
- PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+ PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE:\n", __func__, phy_inst, enable ? 0 : 1);
+
}
static void dccg35_set_physymclk(
@@ -1643,6 +1650,8 @@ static void dccg35_dpp_root_clock_control(
return;
if (clock_on) {
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, false);
+
/* turn off the DTO and leave phase/modulo at max */
dcn35_set_dppclk_enable(dccg, dpp_inst, 1);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
@@ -1654,6 +1663,8 @@ static void dccg35_dpp_root_clock_control(
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, 0,
DPPCLK0_DTO_MODULO, 1);
+ /*we have this in hwss: disable_plane*/
+ //dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
}
dccg->dpp_clock_gated[dpp_inst] = !clock_on;
@@ -1771,36 +1782,40 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
//Disable DTO
switch (inst) {
case 0:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK0_DTO_PARAM,
DSCCLK0_DTO_PHASE, 0,
DSCCLK0_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
break;
case 1:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
DSCCLK1_DTO_PHASE, 0,
DSCCLK1_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
break;
case 2:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
DSCCLK2_DTO_PHASE, 0,
DSCCLK2_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
break;
case 3:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
+
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
DSCCLK3_DTO_PHASE, 0,
DSCCLK3_DTO_MODULO, 0);
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
break;
default:
BREAK_TO_DEBUGGER();
@@ -1813,9 +1828,6 @@ static void dccg35_disable_dscclk(struct dccg *dccg,
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- return;
-
switch (inst) {
case 0:
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 077337698e0a..b4f5b4a6331a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -976,11 +976,12 @@ static bool dcn31_program_pix_clk(
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
- // Apply ssed(spread spectrum) dpref clock for edp only.
- if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0
- && pix_clk_params->signal_type == SIGNAL_TYPE_EDP
- && encoding == DP_8b_10b_ENCODING)
+ // Apply ssed(spread spectrum) dpref clock for edp and dp
+ if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0 &&
+ dc_is_dp_signal(pix_clk_params->signal_type) &&
+ encoding == DP_8b_10b_ENCODING)
dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
+
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
if (e) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index 0721ae895ae9..94128f7a18b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -257,7 +257,7 @@ bool dce110_clk_src_construct(
struct dce110_clk_src *clk_src,
struct dc_context *ctx,
struct dc_bios *bios,
- enum clock_source_id,
+ enum clock_source_id id,
const struct dce110_clk_src_regs *regs,
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index ccc154b0281c..3b9011ef9b68 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -28,6 +28,8 @@
#include "dc.h"
#include "core_types.h"
#include "dmub_cmd.h"
+#include "dc_dmub_srv.h"
+#include "dmub/dmub_srv.h"
#define TO_DMUB_ABM(abm)\
container_of(abm, struct dce_abm, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
index 0d7e7f3b81a1..a641ae04450c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -240,7 +240,8 @@ bool dmub_abm_save_restore(
cmd.abm_save_restore.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
cmd.abm_save_restore.abm_init_config_data.panel_mask = panel_mask;
- cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore);
+ cmd.abm_save_restore.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_abm_save_restore) - sizeof(struct dmub_cmd_header);
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index c31e4f26a305..fcd3d86ad517 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -280,7 +280,9 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm
memset(&cmd, 0, sizeof(cmd));
pCmd->header.type = DMUB_CMD__REPLAY;
pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL;
- pCmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal);
+ pCmd->header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal) -
+ sizeof(struct dmub_cmd_header);
pCmd->replay_set_power_opt_data.power_opt = power_opt;
pCmd->replay_set_power_opt_data.panel_inst = panel_inst;
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
@@ -319,7 +321,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_timing_sync.header.sub_type =
DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED;
cmd.replay_set_timing_sync.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_timing_sync);
+ sizeof(struct dmub_rb_cmd_replay_set_timing_sync) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_timing_sync.replay_set_timing_sync_data.panel_inst =
cmd_element->sync_data.panel_inst;
@@ -331,7 +334,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_frameupdate_timer.header.sub_type =
DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER;
cmd.replay_set_frameupdate_timer.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer);
+ sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_frameupdate_timer.data.panel_inst =
cmd_element->panel_inst;
@@ -345,7 +349,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_pseudo_vtotal.header.sub_type =
DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL;
cmd.replay_set_pseudo_vtotal.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal);
+ sizeof(struct dmub_rb_cmd_replay_set_pseudo_vtotal) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_pseudo_vtotal.data.panel_inst =
cmd_element->pseudo_vtotal_data.panel_inst;
@@ -357,7 +362,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_disabled_adaptive_sync_sdp.header.sub_type =
DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP;
cmd.replay_disabled_adaptive_sync_sdp.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp);
+ sizeof(struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_disabled_adaptive_sync_sdp.data.panel_inst =
cmd_element->disabled_adaptive_sync_sdp_data.panel_inst;
@@ -369,7 +375,8 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_set_general_cmd.header.sub_type =
DMUB_CMD__REPLAY_SET_GENERAL_CMD;
cmd.replay_set_general_cmd.header.payload_bytes =
- sizeof(struct dmub_rb_cmd_replay_set_general_cmd);
+ sizeof(struct dmub_rb_cmd_replay_set_general_cmd) -
+ sizeof(struct dmub_cmd_header);
//Cmd Body
cmd.replay_set_general_cmd.data.panel_inst =
cmd_element->set_general_cmd_data.panel_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index eede83ad91fa..824f73eb3326 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -25,8 +25,7 @@
CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init
-DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
- dce60_resource.o
+DCE60 = dce60_timing_generator.o
AMD_DAL_DCE60 = $(addprefix $(AMDDALPATH)/dc/dce60/,$(DCE60))
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
index 003a9330c286..88e7a1fc9a30 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
@@ -105,7 +105,7 @@ static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_100hz)
dm_write_reg(tg->ctx, addr, value);
}
-static void program_timing(struct timing_generator *tg,
+static void dce80_timing_generator_program_timing(struct timing_generator *tg,
const struct dc_crtc_timing *timing,
int vready_offset,
int vstartup_start,
@@ -185,7 +185,7 @@ static void dce80_timing_generator_enable_advanced_request(
static const struct timing_generator_funcs dce80_tg_funcs = {
.validate_timing = dce110_tg_validate_timing,
- .program_timing = program_timing,
+ .program_timing = dce80_timing_generator_program_timing,
.enable_crtc = dce110_timing_generator_enable_crtc,
.disable_crtc = dce110_timing_generator_disable_crtc,
.is_counter_moving = dce110_timing_generator_is_counter_moving,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 5efddd48d5c5..9d160b39e8c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -153,6 +153,14 @@ bool dm_helpers_submit_i2c(
const struct dc_link *link,
struct i2c_command *cmd);
+bool dm_helpers_execute_fused_io(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ union dmub_rb_cmd *commands,
+ uint8_t count,
+ uint32_t timeout_us
+);
+
bool dm_helpers_dp_write_dsc_enable(
struct dc_context *ctx,
const struct dc_stream_state *stream,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index f1fe49401bc0..8d24763938ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -1002,6 +1002,7 @@ static bool CalculatePrefetchSchedule(
dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime
- (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
Lsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC);
Tsw_oto = Lsw_oto * LineTime;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index f567a9023682..ed59c77bc6f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -1105,6 +1105,7 @@ static bool CalculatePrefetchSchedule(
Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 5865e8fa2d8e..9f3938a50240 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -1123,6 +1123,7 @@ static bool CalculatePrefetchSchedule(
Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+ dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
Tpre_rounded = dst_y_prefetch_equ * LineTime;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 56dda686e299..b0fc1fd20208 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -627,6 +627,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
*/
if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) &&
!pipe->stream->hw_cursor_req &&
+ !dc_state_get_stream_cursor_subvp_limit(pipe->stream, context) &&
!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index d9159ca55412..92f0a099d089 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = 1,
+ .do_urgent_latency_adjustment = 0,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
};
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index 21fd466dba26..157ecf008d6c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -99,7 +99,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_ccflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
@@ -117,11 +116,9 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_floa
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_rcflags)
DML21 := src/dml2_top/dml2_top_interfaces.o
DML21 += src/dml2_top/dml2_top_soc15.o
-DML21 += src/inc/dml2_debug.o
DML21 += src/dml2_core/dml2_core_dcn4.o
DML21 += src/dml2_core/dml2_core_factory.o
DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index 0c8ec30ea672..2aa6d44bb359 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -526,7 +526,8 @@ static void populate_dml21_output_config_from_stream_state(struct dml2_link_outp
static void populate_dml21_stream_overrides_from_stream_state(
struct dml2_stream_parameters *stream_desc,
- struct dc_stream_state *stream)
+ struct dc_stream_state *stream,
+ struct dc_stream_status *stream_status)
{
switch (stream->debug.force_odm_combine_segments) {
case 0:
@@ -551,7 +552,9 @@ static void populate_dml21_stream_overrides_from_stream_state(
if (!stream->ctx->dc->debug.enable_single_display_2to1_odm_policy ||
stream->debug.force_odm_combine_segments > 0)
stream_desc->overrides.disable_dynamic_odm = true;
- stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp || stream->hw_cursor_req;
+ stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp ||
+ stream->hw_cursor_req ||
+ stream_status->mall_stream_config.cursor_size_limit_subvp;
}
static enum dml2_swizzle_mode gfx_addr3_to_dml2_swizzle_mode(enum swizzle_mode_addr3_values addr3_mode)
@@ -785,6 +788,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->pixel_format = dml2_420_10;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
plane->pixel_format = dml2_444_64;
@@ -885,6 +889,9 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
case DC_CM2_GPU_MEM_SIZE_171717:
plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube;
break;
+ case DC_CM2_GPU_MEM_SIZE_333333:
+ plane->tdlut.tdlut_width_mode = dml2_tdlut_width_33_cube;
+ break;
case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
//plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined
break;
@@ -910,7 +917,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
}
//TODO : Could be possibly moved to a common helper layer.
-static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const struct dc_plane_state *plane, unsigned int *plane_id)
+static bool dml21_wrapper_get_plane_id(const struct dc_state *context, unsigned int stream_id, const struct dc_plane_state *plane, unsigned int *plane_id)
{
int i, j;
@@ -918,10 +925,12 @@ static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const str
return false;
for (i = 0; i < context->stream_count; i++) {
- for (j = 0; j < context->stream_status[i].plane_count; j++) {
- if (context->stream_status[i].plane_states[j] == plane) {
- *plane_id = (i << 16) | j;
- return true;
+ if (context->streams[i]->stream_id == stream_id) {
+ for (j = 0; j < context->stream_status[i].plane_count; j++) {
+ if (context->stream_status[i].plane_states[j] == plane) {
+ *plane_id = (i << 16) | j;
+ return true;
+ }
}
}
}
@@ -944,14 +953,14 @@ static unsigned int map_stream_to_dml21_display_cfg(const struct dml2_context *d
return location;
}
-static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx,
+unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id,
const struct dc_plane_state *plane, const struct dc_state *context)
{
unsigned int plane_id;
int i = 0;
int location = -1;
- if (!dml21_wrapper_get_plane_id(context, plane, &plane_id)) {
+ if (!dml21_wrapper_get_plane_id(context, stream_id, plane, &plane_id)) {
ASSERT(false);
return -1;
}
@@ -1021,7 +1030,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]);
populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
- populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index]);
+ populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index], &context->stream_status[stream_index]);
dml_dispcfg->stream_descriptors[disp_cfg_stream_location].overrides.hw.twait_budgeting.fclk_pstate = dml2_twait_budgeting_setting_if_needed;
dml_dispcfg->stream_descriptors[disp_cfg_stream_location].overrides.hw.twait_budgeting.uclk_pstate = dml2_twait_budgeting_setting_if_needed;
@@ -1037,7 +1046,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
} else {
for (plane_index = 0; plane_index < context->stream_status[stream_index].plane_count; plane_index++) {
- disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->stream_status[stream_index].plane_states[plane_index], context);
+ disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], context);
if (disp_cfg_plane_location < 0)
disp_cfg_plane_location = dml_dispcfg->num_planes++;
@@ -1048,7 +1057,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index);
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
- if (dml21_wrapper_get_plane_id(context, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
+ if (dml21_wrapper_get_plane_id(context, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id_valid[disp_cfg_plane_location] = true;
/* apply forced pstate policy */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
index 069b939c672a..73a013be1e48 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
@@ -11,6 +11,7 @@ struct dc_state;
struct dcn_watermarks;
union dcn_watermark_set;
struct pipe_ctx;
+struct dc_plane_state;
struct dml2_context;
struct dml2_configuration_options;
@@ -25,4 +26,5 @@ void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_se
void dml21_map_hw_resources(struct dml2_context *dml_ctx);
void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config);
void dml21_set_dc_p_state_type(struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming, bool sub_vp_enabled);
+unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id, const struct dc_plane_state *plane, const struct dc_state *context);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index ed6584535e89..208d3651b6ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -12,6 +12,8 @@
#include "dml21_translation_helper.h"
#include "dml2_dc_resource_mgmt.h"
+#define INVALID -1
+
static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
{
*dml_ctx = vzalloc(sizeof(struct dml2_context));
@@ -208,10 +210,40 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta
}
}
+static void dml21_prepare_mcache_params(struct dml2_context *dml_ctx, struct dc_state *context, struct dc_mcache_params *mcache_params)
+{
+ int dc_plane_idx = 0;
+ int dml_prog_idx, stream_idx, plane_idx;
+ struct dml2_per_plane_programming *pln_prog = NULL;
+
+ for (stream_idx = 0; stream_idx < context->stream_count; stream_idx++) {
+ for (plane_idx = 0; plane_idx < context->stream_status[stream_idx].plane_count; plane_idx++) {
+ dml_prog_idx = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_idx]->stream_id, context->stream_status[stream_idx].plane_states[plane_idx], context);
+ if (dml_prog_idx == INVALID) {
+ continue;
+ }
+ pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
+ mcache_params[dc_plane_idx].valid = pln_prog->mcache_allocation.valid;
+ mcache_params[dc_plane_idx].num_mcaches_plane0 = pln_prog->mcache_allocation.num_mcaches_plane0;
+ mcache_params[dc_plane_idx].num_mcaches_plane1 = pln_prog->mcache_allocation.num_mcaches_plane1;
+ mcache_params[dc_plane_idx].requires_dedicated_mall_mcache = pln_prog->mcache_allocation.requires_dedicated_mall_mcache;
+ mcache_params[dc_plane_idx].last_slice_sharing.plane0_plane1 = pln_prog->mcache_allocation.last_slice_sharing.plane0_plane1;
+ memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane0,
+ pln_prog->mcache_allocation.mcache_x_offsets_plane0,
+ sizeof(int) * (DML2_MAX_MCACHES + 1));
+ memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane1,
+ pln_prog->mcache_allocation.mcache_x_offsets_plane1,
+ sizeof(int) * (DML2_MAX_MCACHES + 1));
+ dc_plane_idx++;
+ }
+ }
+}
+
static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx)
{
bool result = false;
struct dml2_build_mode_programming_in_out *mode_programming = &dml_ctx->v21.mode_programming;
+ struct dc_mcache_params mcache_params[MAX_PLANES] = {0};
memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg));
memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
@@ -246,6 +278,14 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
dml2_map_dc_pipes(dml_ctx, context, NULL, &dml_ctx->v21.dml_to_dc_pipe_mapping, in_dc->current_state);
/* if subvp phantoms are present, expand them into dc context */
dml21_handle_phantom_streams_planes(in_dc, context, dml_ctx);
+
+ if (in_dc->res_pool->funcs->program_mcache_pipe_config) {
+ //Prepare mcache params for each plane based on mcache output from DML
+ dml21_prepare_mcache_params(dml_ctx, context, mcache_params);
+
+ //populate mcache regs to each pipe
+ dml_ctx->config.callbacks.allocate_mcache(context, mcache_params);
+ }
}
/* Copy DML CLK, WM and REG outputs to bandwidth context */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
index b2075b8c363b..42e715024bc9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
@@ -8,6 +8,7 @@
#include "os_types.h"
#include "dml_top_soc_parameter_types.h"
+#include "dml_top_display_cfg_types.h"
struct dc;
struct dc_state;
@@ -65,4 +66,67 @@ struct socbb_ip_params_external {
struct dml2_ip_capabilities ip_params;
struct dml2_soc_bb soc_bb;
};
+
+/*mcache parameters decided by dml*/
+struct dc_mcache_params {
+ bool valid;
+ /*
+ * For iMALL, dedicated mall mcaches are required (sharing of last
+ * slice possible), for legacy phantom or phantom without return
+ * the only mall mcaches need to be valid.
+ */
+ bool requires_dedicated_mall_mcache;
+ unsigned int num_mcaches_plane0;
+ unsigned int num_mcaches_plane1;
+ /*
+ * Generally, plane0/1 slices must use a disjoint set of caches
+ * but in some cases the final segement of the two planes can
+ * use the same cache. If plane0_plane1 is set, then this is
+ * allowed.
+ *
+ * Similarly, the caches allocated to MALL prefetcher are generally
+ * disjoint, but if mall_prefetch is set, then the final segment
+ * between the main and the mall pixel requestor can use the same
+ * cache.
+ *
+ * Note that both bits may be set at the same time.
+ */
+ struct {
+ bool mall_comb_mcache_p0;
+ bool mall_comb_mcache_p1;
+ bool plane0_plane1;
+ } last_slice_sharing;
+ /*
+ * A plane is divided into vertical slices of mcaches,
+ * which wrap on the surface width.
+ *
+ * For example, if the surface width is 7680, and split into
+ * three slices of equal width, the boundary array would contain
+ * [2560, 5120, 7680]
+ *
+ * The assignments are
+ * 0 = [0 .. 2559]
+ * 1 = [2560 .. 5119]
+ * 2 = [5120 .. 7679]
+ * 0 = [7680 .. INF]
+ * The final element implicitly is the same as the first, and
+ * at first seems invalid since it is never referenced (since)
+ * it is outside the surface. However, its useful when shifting
+ * (see below).
+ *
+ * For any given valid mcache assignment, a shifted version, wrapped
+ * on the surface width boundary is also assumed to be valid.
+ *
+ * For example, shifting [2560, 5120, 7680] by -50 results in
+ * [2510, 5170, 7630].
+ *
+ * The assignments are now:
+ * 0 = [0 .. 2509]
+ * 1 = [2510 .. 5169]
+ * 2 = [5170 .. 7629]
+ * 0 = [7630 .. INF]
+ */
+ int mcache_x_offsets_plane0[DML2_MAX_MCACHES + 1];
+ int mcache_x_offsets_plane1[DML2_MAX_MCACHES + 1];
+};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
index a64ec4dcf11a..c047d56527c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
@@ -43,4 +43,5 @@ bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_o
*/
bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *in_out);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
index 25b607e7b726..84c90050668c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
@@ -156,6 +156,8 @@ struct dml2_dchub_watermark_regs {
uint32_t urgent;
uint32_t sr_enter;
uint32_t sr_exit;
+ uint32_t sr_enter_z8;
+ uint32_t sr_exit_z8;
uint32_t uclk_pstate;
uint32_t fclk_pstate;
uint32_t temp_read_or_ppt;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
index 5e1ab6d97640..255f05de362c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
@@ -166,7 +166,7 @@ struct dml2_surface_cfg {
enum dml2_swizzle_mode tiling;
struct {
- unsigned long pitch;
+ unsigned long pitch; // In elements, two pixels per element in 422 packed format
unsigned long width;
unsigned long height;
} plane0;
@@ -385,6 +385,7 @@ struct dml2_plane_parameters {
long reserved_vblank_time_ns;
unsigned int max_vactive_det_fill_delay_us; // 0 = no reserved time, +ve = explicit max delay
unsigned int gpuvm_min_page_size_kbytes;
+ unsigned int hostvm_min_page_size_kbytes;
enum dml2_svp_mode_override legacy_svp_config; //TODO remove in favor of svp_config
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
index bb863c8c6b39..6ee37386f672 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -456,10 +456,10 @@ bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out)
in_out->mode_support_result.global.active.urgent_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->urg_bandwidth_required_flip[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram] * 1000), 1.0);
in_out->mode_support_result.global.svp_prefetch.average_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] * 1000), 1.0);
in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = (unsigned long)math_ceil2((l->mode_support_ex_params.out_evaluation_info->urg_bandwidth_required_flip[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] * 1000), 1.0);
- dml2_printf("DML::%s: in_out->mode_support_result.global.active.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_sdp_kbps);
- dml2_printf("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps);
- dml2_printf("DML::%s: in_out->mode_support_result.global.active.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_dram_kbps);
- dml2_printf("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.active.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_sdp_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_sdp_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.active.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.active.urgent_bw_dram_kbps);
+ DML_LOG_VERBOSE("DML::%s: in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps = %ld\n", __func__, in_out->mode_support_result.global.svp_prefetch.urgent_bw_dram_kbps);
for (i = 0; i < l->svp_expanded_display_cfg.num_planes; i++) {
in_out->mode_support_result.per_plane[i].dppclk_khz = (unsigned int)(core->clean_me_up.mode_lib.ms.RequiredDPPCLK[i] * 1000);
@@ -509,7 +509,7 @@ bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out)
stream_index = l->svp_expanded_display_cfg.plane_descriptors[i].stream_index;
in_out->mode_support_result.per_stream[stream_index].dscclk_khz = (unsigned int)core->clean_me_up.mode_lib.ms.required_dscclk_freq_mhz[i] * 1000;
- dml2_printf("CORE_DCN4::%s: i=%d stream_index=%d, in_out->mode_support_result.per_stream[stream_index].dscclk_khz = %u\n", __func__, i, stream_index, in_out->mode_support_result.per_stream[stream_index].dscclk_khz);
+ DML_LOG_VERBOSE("CORE_DCN4::%s: i=%d stream_index=%d, in_out->mode_support_result.per_stream[stream_index].dscclk_khz = %u\n", __func__, i, stream_index, in_out->mode_support_result.per_stream[stream_index].dscclk_khz);
if (!((stream_bitmask >> stream_index) & 0x1)) {
in_out->mode_support_result.cfg_support_info.stream_support_info[stream_index].odms_used = odm_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 4c504cb0e1c5..5b62cd19d979 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -54,104 +54,104 @@ static double dml2_core_div_rem(double dividend, unsigned int divisor, unsigned
static void dml2_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
{
- dml2_printf("DML: ===================================== \n");
- dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n");
+ DML_LOG_VERBOSE("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: DML_MODE_SUPPORT_INFO_ST\n");
if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
- dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
+ DML_LOG_VERBOSE("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
if (!fail_only || support->ViewportSizeSupport == 0)
- dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
+ DML_LOG_VERBOSE("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
- dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
+ DML_LOG_VERBOSE("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
if (!fail_only || support->BPPForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
- dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
+ DML_LOG_VERBOSE("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
if (!fail_only || support->ExceededMultistreamSlots == 1)
- dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
+ DML_LOG_VERBOSE("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
- dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
+ DML_LOG_VERBOSE("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
if (!fail_only || support->NotEnoughLanesForMSO == 1)
- dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
+ DML_LOG_VERBOSE("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
if (!fail_only || support->P2IWith420 == 1)
- dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420);
+ DML_LOG_VERBOSE("DML: support: P2IWith420 = %d\n", support->P2IWith420);
if (!fail_only || support->DSC422NativeNotSupported == 1)
- dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
+ DML_LOG_VERBOSE("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
if (!fail_only || support->DSCSlicesODMModeSupported == 0)
- dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
+ DML_LOG_VERBOSE("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
if (!fail_only || support->NotEnoughDSCUnits == 1)
- dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
if (!fail_only || support->NotEnoughDSCSlices == 1)
- dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
- dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
- dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
+ DML_LOG_VERBOSE("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
if (!fail_only || support->ROBSupport == 0)
- dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport);
+ DML_LOG_VERBOSE("DML: support: ROBSupport = %d\n", support->ROBSupport);
if (!fail_only || support->OutstandingRequestsSupport == 0)
- dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0)
- dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
- dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
+ DML_LOG_VERBOSE("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
if (!fail_only || support->TotalAvailablePipesSupport == 0)
- dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
+ DML_LOG_VERBOSE("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
if (!fail_only || support->NumberOfOTGSupport == 0)
- dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
- dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
if (!fail_only || support->NumberOfDP2p0Support == 0)
- dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
+ DML_LOG_VERBOSE("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
if (!fail_only || support->EnoughWritebackUnits == 0)
- dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
+ DML_LOG_VERBOSE("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
if (!fail_only || support->WritebackLatencySupport == 0)
- dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
+ DML_LOG_VERBOSE("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
if (!fail_only || support->CursorSupport == 0)
- dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport);
+ DML_LOG_VERBOSE("DML: support: CursorSupport = %d\n", support->CursorSupport);
if (!fail_only || support->PitchSupport == 0)
- dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport);
+ DML_LOG_VERBOSE("DML: support: PitchSupport = %d\n", support->PitchSupport);
if (!fail_only || support->ViewportExceedsSurface == 1)
- dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
+ DML_LOG_VERBOSE("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
if (!fail_only || support->PrefetchSupported == 0)
- dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
- dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
+ DML_LOG_VERBOSE("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
if (!fail_only || support->AvgBandwidthSupport == 0)
- dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
+ DML_LOG_VERBOSE("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
if (!fail_only || support->DynamicMetadataSupported == 0)
- dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
+ DML_LOG_VERBOSE("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
- dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
if (!fail_only || support->ExceededMALLSize == 1)
- dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
+ DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
if (!fail_only || support->g6_temp_read_support == 0)
- dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
if (!fail_only || support->ImmediateFlipSupport == 0)
- dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
if (!fail_only || support->LinkCapacitySupport == 0)
- dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
+ DML_LOG_VERBOSE("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
if (!fail_only || support->ModeSupport == 0)
- dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport);
- dml2_printf("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: support: ModeSupport = %d\n", support->ModeSupport);
+ DML_LOG_VERBOSE("DML: ===================================== \n");
}
static void get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg)
@@ -179,11 +179,9 @@ static void get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg
} else {
out_bpp[k] = 0;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
- dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
- dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
+ DML_LOG_VERBOSE("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
+ DML_LOG_VERBOSE("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
}
}
@@ -212,9 +210,7 @@ static unsigned int dml_get_num_active_pipes(int unsigned num_planes, const stru
num_active_pipes = num_active_pipes + (unsigned int)cfg_support_info->plane_support_info[k].dpps_used;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
-#endif
+ DML_LOG_VERBOSE("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
return num_active_pipes;
}
@@ -251,7 +247,7 @@ static bool dml_get_is_phantom_pipe(const struct dml2_display_cfg *display_cfg,
unsigned int plane_idx = mode_lib->mp.pipe_plane[pipe_idx];
bool is_phantom = dml_is_phantom_pipe(&display_cfg->plane_descriptors[plane_idx]);
- dml2_printf("DML::%s: pipe_idx=%d legacy_svp_config=%0d is_phantom=%d\n", __func__, pipe_idx, display_cfg->plane_descriptors[plane_idx].overrides.legacy_svp_config, is_phantom);
+ DML_LOG_VERBOSE("DML::%s: pipe_idx=%d legacy_svp_config=%0d is_phantom=%d\n", __func__, pipe_idx, display_cfg->plane_descriptors[plane_idx].overrides.legacy_svp_config, is_phantom);
return is_phantom;
}
@@ -415,19 +411,17 @@ static void CalculateMaxDETAndMinCompressedBufferSize(
*nomDETInKByte = (unsigned int)(math_floor2((double)*MaxTotalDETInKByte / (double)MaxNumDPP, ConfigReturnBufferSegmentSizeInKByte));
*MinCompressedBufferSizeInKByte = ConfigReturnBufferSizeInKByte - *MaxTotalDETInKByte;
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: is_mrq_present = %u\n", __func__, is_mrq_present);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: ROBBufferSizeInKByte = %u\n", __func__, ROBBufferSizeInKByte);
- dml2_printf("DML::%s: MaxNumDPP = %u\n", __func__, MaxNumDPP);
- dml2_printf("DML::%s: MaxTotalDETInKByte = %u\n", __func__, *MaxTotalDETInKByte);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, *nomDETInKByte);
- dml2_printf("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, *MinCompressedBufferSizeInKByte);
-#endif
+ DML_LOG_VERBOSE("DML::%s: is_mrq_present = %u\n", __func__, is_mrq_present);
+ DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: ROBBufferSizeInKByte = %u\n", __func__, ROBBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: MaxNumDPP = %u\n", __func__, MaxNumDPP);
+ DML_LOG_VERBOSE("DML::%s: MaxTotalDETInKByte = %u\n", __func__, *MaxTotalDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, *nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, *MinCompressedBufferSizeInKByte);
if (nomDETInKByteOverrideEnable) {
*nomDETInKByte = nomDETInKByteOverrideValue;
- dml2_printf("DML::%s: nomDETInKByte = %u (overrided)\n", __func__, *nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u (overrided)\n", __func__, *nomDETInKByte);
}
}
@@ -502,7 +496,7 @@ static bool dml_is_420(enum dml2_source_format_class source_format)
val = 0;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -535,7 +529,7 @@ static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode
else if (sw_mode == dml2_gfx11_sw_256kb_r_x)
return 262144;
else {
- DML2_ASSERT(0);
+ DML_ASSERT(0);
return 256;
}
}
@@ -570,8 +564,8 @@ static int unsigned dml_get_gfx_version(enum dml2_swizzle_mode sw_mode)
sw_mode == dml2_gfx11_sw_256kb_r_x) {
version = 11;
} else {
- dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
+ DML_ASSERT(0);
}
return version;
@@ -645,21 +639,19 @@ static void CalculateBytePerPixelAndBlockSizes(
*BytePerPixelY = 2;
*BytePerPixelC = 4;
} else {
- dml2_printf("ERROR: DML::%s: SourcePixelFormat = %u not supported!\n", __func__, SourcePixelFormat);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: DML::%s: SourcePixelFormat = %u not supported!\n", __func__, SourcePixelFormat);
+ DML_ASSERT(0);
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SourcePixelFormat = %u\n", __func__, SourcePixelFormat);
- dml2_printf("DML::%s: BytePerPixelDETY = %f\n", __func__, *BytePerPixelDETY);
- dml2_printf("DML::%s: BytePerPixelDETC = %f\n", __func__, *BytePerPixelDETC);
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, *BytePerPixelY);
- dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, *BytePerPixelC);
- dml2_printf("DML::%s: pitch_y = %u\n", __func__, pitch_y);
- dml2_printf("DML::%s: pitch_c = %u\n", __func__, pitch_c);
- dml2_printf("DML::%s: surf_linear128_l = %u\n", __func__, *surf_linear128_l);
- dml2_printf("DML::%s: surf_linear128_c = %u\n", __func__, *surf_linear128_c);
-#endif
+ DML_LOG_VERBOSE("DML::%s: SourcePixelFormat = %u\n", __func__, SourcePixelFormat);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelDETY = %f\n", __func__, *BytePerPixelDETY);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelDETC = %f\n", __func__, *BytePerPixelDETC);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, *BytePerPixelY);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelC = %u\n", __func__, *BytePerPixelC);
+ DML_LOG_VERBOSE("DML::%s: pitch_y = %u\n", __func__, pitch_y);
+ DML_LOG_VERBOSE("DML::%s: pitch_c = %u\n", __func__, pitch_c);
+ DML_LOG_VERBOSE("DML::%s: surf_linear128_l = %u\n", __func__, *surf_linear128_l);
+ DML_LOG_VERBOSE("DML::%s: surf_linear128_c = %u\n", __func__, *surf_linear128_c);
if (dml_get_gfx_version(SurfaceTiling) == 11) {
*surf_linear128_l = 0;
@@ -703,12 +695,10 @@ static void CalculateBytePerPixelAndBlockSizes(
*BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
*BlockWidth256BytesC = 256U / *BytePerPixelC / *BlockHeight256BytesC;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: BlockWidth256BytesY = %u\n", __func__, *BlockWidth256BytesY);
- dml2_printf("DML::%s: BlockHeight256BytesY = %u\n", __func__, *BlockHeight256BytesY);
- dml2_printf("DML::%s: BlockWidth256BytesC = %u\n", __func__, *BlockWidth256BytesC);
- dml2_printf("DML::%s: BlockHeight256BytesC = %u\n", __func__, *BlockHeight256BytesC);
-#endif
+ DML_LOG_VERBOSE("DML::%s: BlockWidth256BytesY = %u\n", __func__, *BlockWidth256BytesY);
+ DML_LOG_VERBOSE("DML::%s: BlockHeight256BytesY = %u\n", __func__, *BlockHeight256BytesY);
+ DML_LOG_VERBOSE("DML::%s: BlockWidth256BytesC = %u\n", __func__, *BlockWidth256BytesC);
+ DML_LOG_VERBOSE("DML::%s: BlockHeight256BytesC = %u\n", __func__, *BlockHeight256BytesC);
if (dml_get_gfx_version(SurfaceTiling) == 11) {
if (SurfaceTiling == dml2_gfx11_sw_linear) {
@@ -752,8 +742,8 @@ static void CalculateBytePerPixelAndBlockSizes(
} else if (SurfaceTiling == dml2_sw_256kb_2d) {
macro_tile_scale = 32;
} else {
- dml2_printf("ERROR: Invalid SurfaceTiling setting! val=%u\n", SurfaceTiling);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: Invalid SurfaceTiling setting! val=%u\n", SurfaceTiling);
+ DML_ASSERT(0);
}
*MacroTileHeightY = macro_tile_scale * *BlockHeight256BytesY;
@@ -766,12 +756,10 @@ static void CalculateBytePerPixelAndBlockSizes(
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MacroTileWidthY = %u\n", __func__, *MacroTileWidthY);
- dml2_printf("DML::%s: MacroTileHeightY = %u\n", __func__, *MacroTileHeightY);
- dml2_printf("DML::%s: MacroTileWidthC = %u\n", __func__, *MacroTileWidthC);
- dml2_printf("DML::%s: MacroTileHeightC = %u\n", __func__, *MacroTileHeightC);
-#endif
+ DML_LOG_VERBOSE("DML::%s: MacroTileWidthY = %u\n", __func__, *MacroTileWidthY);
+ DML_LOG_VERBOSE("DML::%s: MacroTileHeightY = %u\n", __func__, *MacroTileHeightY);
+ DML_LOG_VERBOSE("DML::%s: MacroTileWidthC = %u\n", __func__, *MacroTileWidthC);
+ DML_LOG_VERBOSE("DML::%s: MacroTileHeightC = %u\n", __func__, *MacroTileHeightC);
}
static void CalculateSinglePipeDPPCLKAndSCLThroughput(
@@ -860,10 +848,8 @@ static void CalculateSwathWidth(
unsigned int surface_width_ub_c;
unsigned int surface_height_ub_c;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
-#endif
+ DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
if (!dml_is_vertical_rotation(display_cfg->plane_descriptors[k].composition.rotation_angle)) {
@@ -872,11 +858,9 @@ static void CalculateSwathWidth(
SwathWidthSingleDPPY[k] = (unsigned int)display_cfg->plane_descriptors[k].composition.viewport.plane0.height;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u ViewportWidth=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
- dml2_printf("DML::%s: k=%u ViewportHeight=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
- dml2_printf("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportWidth=%lu\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportHeight=%lu\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
MainSurfaceODMMode = ODMMode[k];
@@ -899,13 +883,11 @@ static void CalculateSwathWidth(
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u HActive=%u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active);
- dml2_printf("DML::%s: k=%u HRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- dml2_printf("DML::%s: k=%u MainSurfaceODMMode=%u\n", __func__, k, MainSurfaceODMMode);
- dml2_printf("DML::%s: k=%u SwathWidthSingleDPPY=%u\n", __func__, k, SwathWidthSingleDPPY[k]);
- dml2_printf("DML::%s: k=%u SwathWidthY=%u\n", __func__, k, SwathWidthY[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u HActive=%lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active);
+ DML_LOG_VERBOSE("DML::%s: k=%u HRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u MainSurfaceODMMode=%u\n", __func__, k, MainSurfaceODMMode);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthSingleDPPY=%u\n", __func__, k, SwathWidthSingleDPPY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthY=%u\n", __func__, k, SwathWidthY[k]);
if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format)) {
SwathWidthC[k] = SwathWidthY[k] / 2;
@@ -934,22 +916,20 @@ static void CalculateSwathWidth(
surface_width_ub_c = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane1.width, req_width_horz_c);
surface_height_ub_c = (unsigned int)math_ceil2((double)display_cfg->plane_descriptors[k].surface.plane1.height, Read256BytesBlockHeightC[k]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u surface_width_ub_l=%u\n", __func__, k, surface_width_ub_l);
- dml2_printf("DML::%s: k=%u surface_height_ub_l=%u\n", __func__, k, surface_height_ub_l);
- dml2_printf("DML::%s: k=%u surface_width_ub_c=%u\n", __func__, k, surface_width_ub_c);
- dml2_printf("DML::%s: k=%u surface_height_ub_c=%u\n", __func__, k, surface_height_ub_c);
- dml2_printf("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
- dml2_printf("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
- dml2_printf("DML::%s: k=%u Read256BytesBlockWidthY=%u\n", __func__, k, Read256BytesBlockWidthY[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockHeightY=%u\n", __func__, k, Read256BytesBlockHeightY[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockWidthC=%u\n", __func__, k, Read256BytesBlockWidthC[k]);
- dml2_printf("DML::%s: k=%u Read256BytesBlockHeightC=%u\n", __func__, k, Read256BytesBlockHeightC[k]);
- dml2_printf("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
- dml2_printf("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
- dml2_printf("DML::%s: k=%u ViewportStationary=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.stationary);
- dml2_printf("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_width_ub_l=%u\n", __func__, k, surface_width_ub_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_height_ub_l=%u\n", __func__, k, surface_height_ub_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_width_ub_c=%u\n", __func__, k, surface_width_ub_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u surface_height_ub_c=%u\n", __func__, k, surface_height_ub_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockWidthY=%u\n", __func__, k, Read256BytesBlockWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockHeightY=%u\n", __func__, k, Read256BytesBlockHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockWidthC=%u\n", __func__, k, Read256BytesBlockWidthC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u Read256BytesBlockHeightC=%u\n", __func__, k, Read256BytesBlockHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_y=%u\n", __func__, k, req_width_horz_y);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_width_horz_c=%u\n", __func__, k, req_width_horz_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportStationary=%u\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.stationary);
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface=%u\n", __func__, k, DPPPerSurface[k]);
req_per_swath_ub_l[k] = 0;
req_per_swath_ub_c[k] = 0;
@@ -995,15 +975,12 @@ static void CalculateSwathWidth(
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u swath_width_luma_ub=%u\n", __func__, k, swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u swath_width_chroma_ub=%u\n", __func__, k, swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightY=%u\n", __func__, k, MaximumSwathHeightY[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightC=%u\n", __func__, k, MaximumSwathHeightC[k]);
- dml2_printf("DML::%s: k=%u req_per_swath_ub_l=%u\n", __func__, k, req_per_swath_ub_l[k]);
- dml2_printf("DML::%s: k=%u req_per_swath_ub_c=%u\n", __func__, k, req_per_swath_ub_c[k]);
-#endif
-
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_luma_ub=%u\n", __func__, k, swath_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_chroma_ub=%u\n", __func__, k, swath_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightY=%u\n", __func__, k, MaximumSwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightC=%u\n", __func__, k, MaximumSwathHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_per_swath_ub_l=%u\n", __func__, k, req_per_swath_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u req_per_swath_ub_c=%u\n", __func__, k, req_per_swath_ub_c[k]);
}
}
@@ -1018,13 +995,11 @@ static bool UnboundedRequest(bool unb_req_force_en, bool unb_req_force_val, unsi
if (unb_req_force_en) {
unb_req_en = unb_req_force_val && unb_req_ok;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: unb_req_force_en = %u\n", __func__, unb_req_force_en);
- dml2_printf("DML::%s: unb_req_force_val = %u\n", __func__, unb_req_force_val);
- dml2_printf("DML::%s: unb_req_ok = %u\n", __func__, unb_req_ok);
- dml2_printf("DML::%s: unb_req_en = %u\n", __func__, unb_req_en);
-#endif
- return (unb_req_en);
+ DML_LOG_VERBOSE("DML::%s: unb_req_force_en = %u\n", __func__, unb_req_force_en);
+ DML_LOG_VERBOSE("DML::%s: unb_req_force_val = %u\n", __func__, unb_req_force_val);
+ DML_LOG_VERBOSE("DML::%s: unb_req_ok = %u\n", __func__, unb_req_ok);
+ DML_LOG_VERBOSE("DML::%s: unb_req_en = %u\n", __func__, unb_req_en);
+ return unb_req_en;
}
static void CalculateDETBufferSize(
@@ -1054,16 +1029,14 @@ static void CalculateDETBufferSize(
bool NextPotentialSurfaceToAssignDETPieceFound;
bool MinimizeReallocationSuccess = false;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, UnboundedRequestEnabled);
- dml2_printf("DML::%s: MaxTotalDETInKByte = %u\n", __func__, MaxTotalDETInKByte);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, MinCompressedBufferSizeInKByte);
- dml2_printf("DML::%s: CompressedBufferSegmentSizeInkByte = %u\n", __func__, CompressedBufferSegmentSizeInkByte);
-#endif
+ DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, ForceSingleDPP);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
+ DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: MaxTotalDETInKByte = %u\n", __func__, MaxTotalDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, ConfigReturnBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: MinCompressedBufferSizeInKByte = %u\n", __func__, MinCompressedBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSegmentSizeInkByte = %u\n", __func__, CompressedBufferSegmentSizeInkByte);
// Note: Will use default det size if that fits 2 swaths
if (UnboundedRequestEnabled) {
@@ -1092,19 +1065,15 @@ static void CalculateDETBufferSize(
l->minDET = l->minDET + ConfigReturnBufferSegmentSizeInkByte;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u minDET = %u\n", __func__, k, l->minDET);
- dml2_printf("DML::%s: k=%u max_minDET = %u\n", __func__, k, l->max_minDET);
- dml2_printf("DML::%s: k=%u minDET_pipe = %u\n", __func__, k, l->minDET_pipe);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, full_swath_bytes_c[k]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u minDET = %u\n", __func__, k, l->minDET);
+ DML_LOG_VERBOSE("DML::%s: k=%u max_minDET = %u\n", __func__, k, l->max_minDET);
+ DML_LOG_VERBOSE("DML::%s: k=%u minDET_pipe = %u\n", __func__, k, l->minDET_pipe);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, full_swath_bytes_c[k]);
if (l->minDET_pipe == 0) {
l->minDET_pipe = (unsigned int)(math_max2(128, math_ceil2(((double)full_swath_bytes_l[k] + (double)full_swath_bytes_c[k]) / 1024.0, ConfigReturnBufferSegmentSizeInkByte)));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u minDET_pipe = %u (assume each plane take half DET)\n", __func__, k, l->minDET_pipe);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u minDET_pipe = %u (assume each plane take half DET)\n", __func__, k, l->minDET_pipe);
}
if (dml_is_phantom_pipe(&display_cfg->plane_descriptors[k])) {
@@ -1117,12 +1086,10 @@ static void CalculateDETBufferSize(
l->DETBufferSizePoolInKByte = l->DETBufferSizePoolInKByte - (ForceSingleDPP ? 1 : DPPPerSurface[k]) * l->minDET_pipe;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, DPPPerSurface[k]);
- dml2_printf("DML::%s: k=%u DETSizeOverride = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.det_size_override_kb);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
- dml2_printf("DML::%s: DETBufferSizePoolInKByte = %u\n", __func__, l->DETBufferSizePoolInKByte);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, DPPPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETSizeOverride = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.det_size_override_kb);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizePoolInKByte = %u\n", __func__, l->DETBufferSizePoolInKByte);
}
if (display_cfg->minimize_det_reallocation) {
@@ -1194,14 +1161,12 @@ static void CalculateDETBufferSize(
l->TotalBandwidth = l->TotalBandwidth + ReadBandwidthLuma[k] + ReadBandwidthChroma[k];
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: --- Before bandwidth adjustment ---\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: --- Before bandwidth adjustment ---\n", __func__);
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, DETBufferSizeInKByte[k]);
}
- dml2_printf("DML::%s: --- DET allocation with bandwidth ---\n", __func__);
-#endif
- dml2_printf("DML::%s: TotalBandwidth = %f\n", __func__, l->TotalBandwidth);
+ DML_LOG_VERBOSE("DML::%s: --- DET allocation with bandwidth ---\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: TotalBandwidth = %f\n", __func__, l->TotalBandwidth);
l->BandwidthOfSurfacesNotAssignedDETPiece = l->TotalBandwidth;
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
@@ -1213,10 +1178,8 @@ static void CalculateDETBufferSize(
} else {
DETPieceAssignedToThisSurfaceAlready[k] = false;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, k, DETPieceAssignedToThisSurfaceAlready[k]);
- dml2_printf("DML::%s: k=%u BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, k, l->BandwidthOfSurfacesNotAssignedDETPiece);
-#endif
+ DML_LOG_VERBOSE("DML::%s: k=%u DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, k, DETPieceAssignedToThisSurfaceAlready[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, k, l->BandwidthOfSurfacesNotAssignedDETPiece);
}
for (unsigned int j = 0; j < NumberOfActiveSurfaces; ++j) {
@@ -1224,22 +1187,18 @@ static void CalculateDETBufferSize(
l->NextSurfaceToAssignDETPiece = 0;
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthLuma[k] = %f\n", __func__, j, k, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthChroma[k] = %f\n", __func__, j, k, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthLuma[Next] = %f\n", __func__, j, k, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u k=%u, ReadBandwidthChroma[Next] = %f\n", __func__, j, k, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u k=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, k, l->NextSurfaceToAssignDETPiece);
-#endif
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthLuma[k] = %f\n", __func__, j, k, ReadBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthChroma[k] = %f\n", __func__, j, k, ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthLuma[Next] = %f\n", __func__, j, k, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, ReadBandwidthChroma[Next] = %f\n", __func__, j, k, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, k, l->NextSurfaceToAssignDETPiece);
if (!DETPieceAssignedToThisSurfaceAlready[k] && (!NextPotentialSurfaceToAssignDETPieceFound ||
ReadBandwidthLuma[k] + ReadBandwidthChroma[k] < ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece] + ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece])) {
l->NextSurfaceToAssignDETPiece = k;
NextPotentialSurfaceToAssignDETPieceFound = true;
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u k=%u, DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, j, k, DETPieceAssignedToThisSurfaceAlready[k]);
- dml2_printf("DML::%s: j=%u k=%u, NextPotentialSurfaceToAssignDETPieceFound = %u\n", __func__, j, k, NextPotentialSurfaceToAssignDETPieceFound);
-#endif
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, DETPieceAssignedToThisSurfaceAlready = %u\n", __func__, j, k, DETPieceAssignedToThisSurfaceAlready[k]);
+ DML_LOG_VERBOSE("DML::%s: j=%u k=%u, NextPotentialSurfaceToAssignDETPieceFound = %u\n", __func__, j, k, NextPotentialSurfaceToAssignDETPieceFound);
}
if (NextPotentialSurfaceToAssignDETPieceFound) {
@@ -1249,20 +1208,16 @@ static void CalculateDETBufferSize(
* (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]) * ConfigReturnBufferSegmentSizeInkByte,
math_floor2((double)l->DETBufferSizePoolInKByte, (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]) * ConfigReturnBufferSegmentSizeInkByte)));
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: j=%u, DETBufferSizePoolInKByte = %u\n", __func__, j, l->DETBufferSizePoolInKByte);
- dml2_printf("DML::%s: j=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, l->NextSurfaceToAssignDETPiece);
- dml2_printf("DML::%s: j=%u, ReadBandwidthLuma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u, ReadBandwidthChroma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
- dml2_printf("DML::%s: j=%u, BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, j, l->BandwidthOfSurfacesNotAssignedDETPiece);
- dml2_printf("DML::%s: j=%u, NextDETBufferPieceInKByte = %u\n", __func__, j, l->NextDETBufferPieceInKByte);
- dml2_printf("DML::%s: j=%u, DETBufferSizeInKByte[%u] increases from %u ", __func__, j, l->NextSurfaceToAssignDETPiece, DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
-#endif
+ DML_LOG_VERBOSE("DML::%s: j=%u, DETBufferSizePoolInKByte = %u\n", __func__, j, l->DETBufferSizePoolInKByte);
+ DML_LOG_VERBOSE("DML::%s: j=%u, NextSurfaceToAssignDETPiece = %u\n", __func__, j, l->NextSurfaceToAssignDETPiece);
+ DML_LOG_VERBOSE("DML::%s: j=%u, ReadBandwidthLuma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthLuma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u, ReadBandwidthChroma[%u] = %f\n", __func__, j, l->NextSurfaceToAssignDETPiece, ReadBandwidthChroma[l->NextSurfaceToAssignDETPiece]);
+ DML_LOG_VERBOSE("DML::%s: j=%u, BandwidthOfSurfacesNotAssignedDETPiece = %f\n", __func__, j, l->BandwidthOfSurfacesNotAssignedDETPiece);
+ DML_LOG_VERBOSE("DML::%s: j=%u, NextDETBufferPieceInKByte = %u\n", __func__, j, l->NextDETBufferPieceInKByte);
+ DML_LOG_VERBOSE("DML::%s: j=%u, DETBufferSizeInKByte[%u] increases from %u ", __func__, j, l->NextSurfaceToAssignDETPiece, DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece] = DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece] + l->NextDETBufferPieceInKByte / (ForceSingleDPP ? 1 : DPPPerSurface[l->NextSurfaceToAssignDETPiece]);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("to %u\n", DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
-#endif
+ DML_LOG_VERBOSE("to %u\n", DETBufferSizeInKByte[l->NextSurfaceToAssignDETPiece]);
l->DETBufferSizePoolInKByte = l->DETBufferSizePoolInKByte - l->NextDETBufferPieceInKByte;
DETPieceAssignedToThisSurfaceAlready[l->NextSurfaceToAssignDETPiece] = true;
@@ -1274,13 +1229,11 @@ static void CalculateDETBufferSize(
}
*CompressedBufferSizeInkByte = *CompressedBufferSizeInkByte * CompressedBufferSegmentSizeInkByte / ConfigReturnBufferSegmentSizeInkByte;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: --- After bandwidth adjustment ---\n", __func__);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: --- After bandwidth adjustment ---\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *CompressedBufferSizeInkByte);
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u (TotalReadBandWidth=%f)\n", __func__, k, DETBufferSizeInKByte[k], ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u (TotalReadBandWidth=%f)\n", __func__, k, DETBufferSizeInKByte[k], ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
}
-#endif
}
static double CalculateRequiredDispclk(
@@ -1510,15 +1463,13 @@ static unsigned int dscceComputeDelay(
//pixel delay is group_delay (converted to pixels) + pipeline, however, first group is a special case since it is processed as soon as it arrives (i.e., in 3 cycles regardless of pixel format)
pixels = (group_delay - 1) * cycles_per_group + 3 + pipeline_delay;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: bpc: %u\n", __func__, bpc);
- dml2_printf("DML::%s: BPP: %f\n", __func__, BPP);
- dml2_printf("DML::%s: sliceWidth: %u\n", __func__, sliceWidth);
- dml2_printf("DML::%s: numSlices: %u\n", __func__, numSlices);
- dml2_printf("DML::%s: pixelFormat: %u\n", __func__, pixelFormat);
- dml2_printf("DML::%s: Output: %u\n", __func__, Output);
- dml2_printf("DML::%s: pixels: %u\n", __func__, pixels);
-#endif
+ DML_LOG_VERBOSE("DML::%s: bpc: %u\n", __func__, bpc);
+ DML_LOG_VERBOSE("DML::%s: BPP: %f\n", __func__, BPP);
+ DML_LOG_VERBOSE("DML::%s: sliceWidth: %u\n", __func__, sliceWidth);
+ DML_LOG_VERBOSE("DML::%s: numSlices: %u\n", __func__, numSlices);
+ DML_LOG_VERBOSE("DML::%s: pixelFormat: %u\n", __func__, pixelFormat);
+ DML_LOG_VERBOSE("DML::%s: Output: %u\n", __func__, Output);
+ DML_LOG_VERBOSE("DML::%s: pixels: %u\n", __func__, pixels);
return pixels;
}
@@ -1593,10 +1544,8 @@ static unsigned int dscComputeDelay(enum dml2_output_format_class pixelFormat, e
// sft
Delay = Delay + 1;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: pixelFormat = %u\n", __func__, pixelFormat);
- dml2_printf("DML::%s: Delay = %u\n", __func__, Delay);
-#endif
+ DML_LOG_VERBOSE("DML::%s: pixelFormat = %u\n", __func__, pixelFormat);
+ DML_LOG_VERBOSE("DML::%s: Delay = %u\n", __func__, Delay);
return Delay;
}
@@ -1667,10 +1616,8 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
}
meta_surface_bytes = (unsigned int)(p->DCCMetaPitch * vp_height_meta_ub * p->BytePerPixel / 256.0);
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCMetaPitch = %u\n", __func__, p->DCCMetaPitch);
- dml2_printf("DML::%s: meta_surface_bytes = %u\n", __func__, meta_surface_bytes);
-#endif
+ DML_LOG_VERBOSE("DML::%s: DCCMetaPitch = %u\n", __func__, p->DCCMetaPitch);
+ DML_LOG_VERBOSE("DML::%s: meta_surface_bytes = %u\n", __func__, meta_surface_bytes);
if (p->GPUVMEnable == true) {
double meta_vmpg_bytes = 4.0 * 1024.0;
*p->meta_pte_bytes_per_frame_ub = (unsigned int)((math_ceil2((double) (meta_surface_bytes - meta_vmpg_bytes) / (8 * meta_vmpg_bytes), 1) + 1) * 64);
@@ -1724,25 +1671,23 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
vm_bytes = *p->meta_pte_bytes_per_frame_ub + extra_mpde_bytes + *p->dpde0_bytes_per_frame_ub + extra_dpde_bytes;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->DCCEnable);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
- dml2_printf("DML::%s: SwModeLinear = %u\n", __func__, p->SurfaceTiling == dml2_sw_linear);
- dml2_printf("DML::%s: BytePerPixel = %u\n", __func__, p->BytePerPixel);
- dml2_printf("DML::%s: GPUVMMaxPageTableLevels = %u\n", __func__, p->GPUVMMaxPageTableLevels);
- dml2_printf("DML::%s: BlockHeight256Bytes = %u\n", __func__, p->BlockHeight256Bytes);
- dml2_printf("DML::%s: BlockWidth256Bytes = %u\n", __func__, p->BlockWidth256Bytes);
- dml2_printf("DML::%s: MacroTileHeight = %u\n", __func__, p->MacroTileHeight);
- dml2_printf("DML::%s: MacroTileWidth = %u\n", __func__, p->MacroTileWidth);
- dml2_printf("DML::%s: meta_pte_bytes_per_frame_ub = %u\n", __func__, *p->meta_pte_bytes_per_frame_ub);
- dml2_printf("DML::%s: dpde0_bytes_per_frame_ub = %u\n", __func__, *p->dpde0_bytes_per_frame_ub);
- dml2_printf("DML::%s: extra_mpde_bytes = %u\n", __func__, extra_mpde_bytes);
- dml2_printf("DML::%s: extra_dpde_bytes = %u\n", __func__, extra_dpde_bytes);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
- dml2_printf("DML::%s: ViewportHeight = %u\n", __func__, p->ViewportHeight);
- dml2_printf("DML::%s: SwathWidth = %u\n", __func__, p->SwathWidth);
- dml2_printf("DML::%s: vp_height_dpte_ub = %u\n", __func__, vp_height_dpte_ub);
-#endif
+ DML_LOG_VERBOSE("DML::%s: DCCEnable = %u\n", __func__, p->DCCEnable);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
+ DML_LOG_VERBOSE("DML::%s: SwModeLinear = %u\n", __func__, p->SurfaceTiling == dml2_sw_linear);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixel = %u\n", __func__, p->BytePerPixel);
+ DML_LOG_VERBOSE("DML::%s: GPUVMMaxPageTableLevels = %u\n", __func__, p->GPUVMMaxPageTableLevels);
+ DML_LOG_VERBOSE("DML::%s: BlockHeight256Bytes = %u\n", __func__, p->BlockHeight256Bytes);
+ DML_LOG_VERBOSE("DML::%s: BlockWidth256Bytes = %u\n", __func__, p->BlockWidth256Bytes);
+ DML_LOG_VERBOSE("DML::%s: MacroTileHeight = %u\n", __func__, p->MacroTileHeight);
+ DML_LOG_VERBOSE("DML::%s: MacroTileWidth = %u\n", __func__, p->MacroTileWidth);
+ DML_LOG_VERBOSE("DML::%s: meta_pte_bytes_per_frame_ub = %u\n", __func__, *p->meta_pte_bytes_per_frame_ub);
+ DML_LOG_VERBOSE("DML::%s: dpde0_bytes_per_frame_ub = %u\n", __func__, *p->dpde0_bytes_per_frame_ub);
+ DML_LOG_VERBOSE("DML::%s: extra_mpde_bytes = %u\n", __func__, extra_mpde_bytes);
+ DML_LOG_VERBOSE("DML::%s: extra_dpde_bytes = %u\n", __func__, extra_dpde_bytes);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: ViewportHeight = %u\n", __func__, p->ViewportHeight);
+ DML_LOG_VERBOSE("DML::%s: SwathWidth = %u\n", __func__, p->SwathWidth);
+ DML_LOG_VERBOSE("DML::%s: vp_height_dpte_ub = %u\n", __func__, vp_height_dpte_ub);
if (p->SurfaceTiling == dml2_sw_linear) {
*p->PixelPTEReqHeight = 1;
@@ -1778,22 +1723,20 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->vmpg_width = 1024 * p->GPUVMMinPageSizeKBytes / (p->MacroTileHeight * p->BytePerPixel);
if (p->GPUVMEnable == true) {
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes=%u and sw_mode=%u (tile_size=%d) not supported!\n",
+ DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes=%u and sw_mode=%u (tile_size=%d) not supported!\n",
__func__, p->GPUVMMinPageSizeKBytes, p->SurfaceTiling, dml_get_tile_block_size_bytes(p->SurfaceTiling));
- DML2_ASSERT(0);
+ DML_ASSERT(0);
}
}
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
- dml2_printf("DML::%s: PixelPTEReqHeight = %u\n", __func__, *p->PixelPTEReqHeight);
- dml2_printf("DML::%s: PixelPTEReqWidth = %u\n", __func__, *p->PixelPTEReqWidth);
- dml2_printf("DML::%s: PixelPTEReqWidth_linear = %u\n", __func__, PixelPTEReqWidth_linear);
- dml2_printf("DML::%s: PTERequestSize = %u\n", __func__, *p->PTERequestSize);
- dml2_printf("DML::%s: Pitch = %u\n", __func__, p->Pitch);
- dml2_printf("DML::%s: vmpg_width = %u\n", __func__, *p->vmpg_width);
- dml2_printf("DML::%s: vmpg_height = %u\n", __func__, *p->vmpg_height);
-#endif
+ DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEReqHeight = %u\n", __func__, *p->PixelPTEReqHeight);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEReqWidth = %u\n", __func__, *p->PixelPTEReqWidth);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEReqWidth_linear = %u\n", __func__, PixelPTEReqWidth_linear);
+ DML_LOG_VERBOSE("DML::%s: PTERequestSize = %u\n", __func__, *p->PTERequestSize);
+ DML_LOG_VERBOSE("DML::%s: Pitch = %u\n", __func__, p->Pitch);
+ DML_LOG_VERBOSE("DML::%s: vmpg_width = %u\n", __func__, *p->vmpg_width);
+ DML_LOG_VERBOSE("DML::%s: vmpg_height = %u\n", __func__, *p->vmpg_height);
*p->dpte_row_height_one_row_per_frame = vp_height_dpte_ub;
*p->dpte_row_width_ub_one_row_per_frame = (unsigned int)((math_ceil2(((double)p->Pitch * (double)*p->dpte_row_height_one_row_per_frame / (double)*p->PixelPTEReqHeight - 1) / (double)*p->PixelPTEReqWidth, 1) + 1) * (double)*p->PixelPTEReqWidth);
@@ -1811,7 +1754,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->dpte_row_height_linear = 128;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (linear)\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (linear)\n", __func__, *p->dpte_row_width_ub);
#endif
} else if (!dml_is_vertical_rotation(p->RotationAngle)) {
@@ -1825,7 +1768,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->dpte_row_width_ub = (unsigned int)((math_ceil2((double)(p->SwathWidth - 1) / (double)*p->PixelPTEReqWidth, 1) + 1.0) * *p->PixelPTEReqWidth);
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (tiled horz)\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (tiled horz)\n", __func__, *p->dpte_row_width_ub);
#endif
*p->PixelPTEBytesPerRow = *p->dpte_row_width_ub / *p->PixelPTEReqWidth * *p->PTERequestSize;
@@ -1840,7 +1783,7 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->PixelPTEBytesPerRow = (unsigned int)((double)*p->dpte_row_width_ub / (double)*p->PixelPTEReqHeight * *p->PTERequestSize);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dpte_row_width_ub = %u (tiled vert)\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u (tiled vert)\n", __func__, *p->dpte_row_width_ub);
#endif
}
@@ -1852,18 +1795,18 @@ static unsigned int CalculateVMAndRowBytes(struct dml2_core_shared_calculate_vm_
*p->PixelPTEBytesPerRowStorage = *p->PixelPTEBytesPerRow;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
- dml2_printf("DML::%s: meta_row_height = %u\n", __func__, *p->meta_row_height);
- dml2_printf("DML::%s: dpte_row_height = %u\n", __func__, *p->dpte_row_height);
- dml2_printf("DML::%s: dpte_row_height_linear = %u\n", __func__, *p->dpte_row_height_linear);
- dml2_printf("DML::%s: dpte_row_width_ub = %u\n", __func__, *p->dpte_row_width_ub);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, *p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: PixelPTEBytesPerRowStorage = %u\n", __func__, *p->PixelPTEBytesPerRowStorage);
- dml2_printf("DML::%s: PTEBufferSizeInRequests = %u\n", __func__, p->PTEBufferSizeInRequests);
- dml2_printf("DML::%s: dpte_row_height_one_row_per_frame = %u\n", __func__, *p->dpte_row_height_one_row_per_frame);
- dml2_printf("DML::%s: dpte_row_width_ub_one_row_per_frame = %u\n", __func__, *p->dpte_row_width_ub_one_row_per_frame);
- dml2_printf("DML::%s: PixelPTEBytesPerRow_one_row_per_frame = %u\n", __func__, *p->PixelPTEBytesPerRow_one_row_per_frame);
+ DML_LOG_VERBOSE("DML::%s: GPUVMMinPageSizeKBytes = %u\n", __func__, p->GPUVMMinPageSizeKBytes);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->GPUVMEnable);
+ DML_LOG_VERBOSE("DML::%s: meta_row_height = %u\n", __func__, *p->meta_row_height);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height = %u\n", __func__, *p->dpte_row_height);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height_linear = %u\n", __func__, *p->dpte_row_height_linear);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub = %u\n", __func__, *p->dpte_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, *p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRowStorage = %u\n", __func__, *p->PixelPTEBytesPerRowStorage);
+ DML_LOG_VERBOSE("DML::%s: PTEBufferSizeInRequests = %u\n", __func__, p->PTEBufferSizeInRequests);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height_one_row_per_frame = %u\n", __func__, *p->dpte_row_height_one_row_per_frame);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_width_ub_one_row_per_frame = %u\n", __func__, *p->dpte_row_width_ub_one_row_per_frame);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow_one_row_per_frame = %u\n", __func__, *p->PixelPTEBytesPerRow_one_row_per_frame);
#endif
return vm_bytes;
@@ -1894,12 +1837,12 @@ static unsigned int CalculatePrefetchSourceLines(
double numLines = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
- dml2_printf("DML::%s: VTaps = %u\n", __func__, VTaps);
- dml2_printf("DML::%s: ViewportXStart = %u\n", __func__, ViewportXStart);
- dml2_printf("DML::%s: ViewportYStart = %u\n", __func__, ViewportYStart);
- dml2_printf("DML::%s: ViewportStationary = %u\n", __func__, ViewportStationary);
- dml2_printf("DML::%s: SwathHeight = %u\n", __func__, SwathHeight);
+ DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio);
+ DML_LOG_VERBOSE("DML::%s: VTaps = %u\n", __func__, VTaps);
+ DML_LOG_VERBOSE("DML::%s: ViewportXStart = %u\n", __func__, ViewportXStart);
+ DML_LOG_VERBOSE("DML::%s: ViewportYStart = %u\n", __func__, ViewportYStart);
+ DML_LOG_VERBOSE("DML::%s: ViewportStationary = %u\n", __func__, ViewportStationary);
+ DML_LOG_VERBOSE("DML::%s: SwathHeight = %u\n", __func__, SwathHeight);
#endif
if (ProgressiveToInterlaceUnitInOPP)
*VInitPreFill = (unsigned int)(math_floor2((VRatio + (double)VTaps + 1) / 2.0, 1));
@@ -1934,11 +1877,11 @@ static unsigned int CalculatePrefetchSourceLines(
numLines = *MaxNumSwath * SwathHeight + MaxPartialSwath;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vp_start_rot = %u\n", __func__, vp_start_rot);
- dml2_printf("DML::%s: VInitPreFill = %u\n", __func__, *VInitPreFill);
- dml2_printf("DML::%s: MaxPartialSwath = %u\n", __func__, MaxPartialSwath);
- dml2_printf("DML::%s: MaxNumSwath = %u\n", __func__, *MaxNumSwath);
- dml2_printf("DML::%s: Prefetch source lines = %3.2f\n", __func__, numLines);
+ DML_LOG_VERBOSE("DML::%s: vp_start_rot = %u\n", __func__, vp_start_rot);
+ DML_LOG_VERBOSE("DML::%s: VInitPreFill = %u\n", __func__, *VInitPreFill);
+ DML_LOG_VERBOSE("DML::%s: MaxPartialSwath = %u\n", __func__, MaxPartialSwath);
+ DML_LOG_VERBOSE("DML::%s: MaxNumSwath = %u\n", __func__, *MaxNumSwath);
+ DML_LOG_VERBOSE("DML::%s: Prefetch source lines = %3.2f\n", __func__, numLines);
#endif
return (unsigned int)(numLines);
@@ -2007,8 +1950,8 @@ static void CalculateMALLUseForStaticScreen(
if (is_using_mall_for_ss[k])
TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, is_using_mall_for_ss[k]);
- dml2_printf("DML::%s: k=%u, TotalSurfaceSizeInMALL = %u\n", __func__, k, TotalSurfaceSizeInMALL);
+ DML_LOG_VERBOSE("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, is_using_mall_for_ss[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TotalSurfaceSizeInMALL = %u\n", __func__, k, TotalSurfaceSizeInMALL);
#endif
}
@@ -2022,7 +1965,7 @@ static void CalculateMALLUseForStaticScreen(
(!CanAddAnotherSurfaceToMALL || SurfaceSizeInMALL[k] < SurfaceSizeInMALL[SurfaceToAddToMALL])) {
CanAddAnotherSurfaceToMALL = true;
SurfaceToAddToMALL = k;
- dml2_printf("DML::%s: k=%u, UseMALLForStaticScreen = %u (dis, en, optimize)\n", __func__, k, display_cfg->plane_descriptors[k].overrides.refresh_from_mall);
+ DML_LOG_VERBOSE("DML::%s: k=%u, UseMALLForStaticScreen = %u (dis, en, optimize)\n", __func__, k, display_cfg->plane_descriptors[k].overrides.refresh_from_mall);
}
}
if (CanAddAnotherSurfaceToMALL) {
@@ -2030,8 +1973,8 @@ static void CalculateMALLUseForStaticScreen(
TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[SurfaceToAddToMALL];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SurfaceToAddToMALL = %u\n", __func__, SurfaceToAddToMALL);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALL = %u\n", __func__, TotalSurfaceSizeInMALL);
+ DML_LOG_VERBOSE("DML::%s: SurfaceToAddToMALL = %u\n", __func__, SurfaceToAddToMALL);
+ DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALL = %u\n", __func__, TotalSurfaceSizeInMALL);
#endif
}
}
@@ -2203,15 +2146,15 @@ static void CalculateDCCConfiguration(
segment_order_vert_contiguous_chroma = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DCCEnabled = %u\n", __func__, DCCEnabled);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
- dml2_printf("DML::%s: DETBufferSizeForDCC = %u\n", __func__, DETBufferSizeForDCC);
- dml2_printf("DML::%s: req128_horz_wc_l = %u\n", __func__, req128_horz_wc_l);
- dml2_printf("DML::%s: req128_horz_wc_c = %u\n", __func__, req128_horz_wc_c);
- dml2_printf("DML::%s: full_swath_bytes_horz_wc_l = %u\n", __func__, full_swath_bytes_horz_wc_l);
- dml2_printf("DML::%s: full_swath_bytes_vert_wc_c = %u\n", __func__, full_swath_bytes_vert_wc_c);
- dml2_printf("DML::%s: segment_order_horz_contiguous_luma = %u\n", __func__, segment_order_horz_contiguous_luma);
- dml2_printf("DML::%s: segment_order_horz_contiguous_chroma = %u\n", __func__, segment_order_horz_contiguous_chroma);
+ DML_LOG_VERBOSE("DML::%s: DCCEnabled = %u\n", __func__, DCCEnabled);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeForDCC = %u\n", __func__, DETBufferSizeForDCC);
+ DML_LOG_VERBOSE("DML::%s: req128_horz_wc_l = %u\n", __func__, req128_horz_wc_l);
+ DML_LOG_VERBOSE("DML::%s: req128_horz_wc_c = %u\n", __func__, req128_horz_wc_c);
+ DML_LOG_VERBOSE("DML::%s: full_swath_bytes_horz_wc_l = %u\n", __func__, full_swath_bytes_horz_wc_l);
+ DML_LOG_VERBOSE("DML::%s: full_swath_bytes_vert_wc_c = %u\n", __func__, full_swath_bytes_vert_wc_c);
+ DML_LOG_VERBOSE("DML::%s: segment_order_horz_contiguous_luma = %u\n", __func__, segment_order_horz_contiguous_luma);
+ DML_LOG_VERBOSE("DML::%s: segment_order_horz_contiguous_chroma = %u\n", __func__, segment_order_horz_contiguous_chroma);
#endif
if (DCCProgrammingAssumesScanDirectionUnknown == true) {
if (req128_horz_wc_l == 0 && req128_vert_wc_l == 0) {
@@ -2301,12 +2244,12 @@ static void CalculateDCCConfiguration(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MaxUncompressedBlockLuma = %u\n", __func__, *MaxUncompressedBlockLuma);
- dml2_printf("DML::%s: MaxCompressedBlockLuma = %u\n", __func__, *MaxCompressedBlockLuma);
- dml2_printf("DML::%s: IndependentBlockLuma = %u\n", __func__, *IndependentBlockLuma);
- dml2_printf("DML::%s: MaxUncompressedBlockChroma = %u\n", __func__, *MaxUncompressedBlockChroma);
- dml2_printf("DML::%s: MaxCompressedBlockChroma = %u\n", __func__, *MaxCompressedBlockChroma);
- dml2_printf("DML::%s: IndependentBlockChroma = %u\n", __func__, *IndependentBlockChroma);
+ DML_LOG_VERBOSE("DML::%s: MaxUncompressedBlockLuma = %u\n", __func__, *MaxUncompressedBlockLuma);
+ DML_LOG_VERBOSE("DML::%s: MaxCompressedBlockLuma = %u\n", __func__, *MaxCompressedBlockLuma);
+ DML_LOG_VERBOSE("DML::%s: IndependentBlockLuma = %u\n", __func__, *IndependentBlockLuma);
+ DML_LOG_VERBOSE("DML::%s: MaxUncompressedBlockChroma = %u\n", __func__, *MaxUncompressedBlockChroma);
+ DML_LOG_VERBOSE("DML::%s: MaxCompressedBlockChroma = %u\n", __func__, *MaxCompressedBlockChroma);
+ DML_LOG_VERBOSE("DML::%s: IndependentBlockChroma = %u\n", __func__, *IndependentBlockChroma);
#endif
}
@@ -2326,26 +2269,26 @@ static void calculate_mcache_row_bytes(
unsigned int mvmpg_per_mcache;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_chans = %u\n", __func__, p->num_chans);
- dml2_printf("DML::%s: mem_word_bytes = %u\n", __func__, p->mem_word_bytes);
- dml2_printf("DML::%s: mcache_line_size_bytes = %u\n", __func__, p->mcache_line_size_bytes);
- dml2_printf("DML::%s: mcache_size_bytes = %u\n", __func__, p->mcache_size_bytes);
- dml2_printf("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: gpuvm_page_size_kbytes = %u\n", __func__, p->gpuvm_page_size_kbytes);
- dml2_printf("DML::%s: vp_stationary = %u\n", __func__, p->vp_stationary);
- dml2_printf("DML::%s: tiling_mode = %u\n", __func__, p->tiling_mode);
- dml2_printf("DML::%s: vp_start_x = %u\n", __func__, p->vp_start_x);
- dml2_printf("DML::%s: vp_start_y = %u\n", __func__, p->vp_start_y);
- dml2_printf("DML::%s: full_vp_width = %u\n", __func__, p->full_vp_width);
- dml2_printf("DML::%s: full_vp_height = %u\n", __func__, p->full_vp_height);
- dml2_printf("DML::%s: blk_width = %u\n", __func__, p->blk_width);
- dml2_printf("DML::%s: blk_height = %u\n", __func__, p->blk_height);
- dml2_printf("DML::%s: vmpg_width = %u\n", __func__, p->vmpg_width);
- dml2_printf("DML::%s: vmpg_height = %u\n", __func__, p->vmpg_height);
- dml2_printf("DML::%s: full_swath_bytes = %u\n", __func__, p->full_swath_bytes);
-#endif
- DML2_ASSERT(p->mcache_line_size_bytes != 0);
- DML2_ASSERT(p->mcache_size_bytes != 0);
+ DML_LOG_VERBOSE("DML::%s: num_chans = %u\n", __func__, p->num_chans);
+ DML_LOG_VERBOSE("DML::%s: mem_word_bytes = %u\n", __func__, p->mem_word_bytes);
+ DML_LOG_VERBOSE("DML::%s: mcache_line_size_bytes = %u\n", __func__, p->mcache_line_size_bytes);
+ DML_LOG_VERBOSE("DML::%s: mcache_size_bytes = %u\n", __func__, p->mcache_size_bytes);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_page_size_kbytes = %u\n", __func__, p->gpuvm_page_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: vp_stationary = %u\n", __func__, p->vp_stationary);
+ DML_LOG_VERBOSE("DML::%s: tiling_mode = %u\n", __func__, p->tiling_mode);
+ DML_LOG_VERBOSE("DML::%s: vp_start_x = %u\n", __func__, p->vp_start_x);
+ DML_LOG_VERBOSE("DML::%s: vp_start_y = %u\n", __func__, p->vp_start_y);
+ DML_LOG_VERBOSE("DML::%s: full_vp_width = %u\n", __func__, p->full_vp_width);
+ DML_LOG_VERBOSE("DML::%s: full_vp_height = %u\n", __func__, p->full_vp_height);
+ DML_LOG_VERBOSE("DML::%s: blk_width = %u\n", __func__, p->blk_width);
+ DML_LOG_VERBOSE("DML::%s: blk_height = %u\n", __func__, p->blk_height);
+ DML_LOG_VERBOSE("DML::%s: vmpg_width = %u\n", __func__, p->vmpg_width);
+ DML_LOG_VERBOSE("DML::%s: vmpg_height = %u\n", __func__, p->vmpg_height);
+ DML_LOG_VERBOSE("DML::%s: full_swath_bytes = %u\n", __func__, p->full_swath_bytes);
+#endif
+ DML_ASSERT(p->mcache_line_size_bytes != 0);
+ DML_ASSERT(p->mcache_size_bytes != 0);
*p->mvmpg_width = 0;
*p->mvmpg_height = 0;
@@ -2370,8 +2313,8 @@ static void calculate_mcache_row_bytes(
*p->mvmpg_width = p->vmpg_width;
*p->mvmpg_height = p->vmpg_height;
} else if (!((blk_bytes == 65536) && (vmpg_bytes == 4096))) {
- dml2_printf("ERROR: DML::%s: Tiling size and vm page size combination not supported\n", __func__);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: DML::%s: Tiling size and vm page size combination not supported\n", __func__);
+ DML_ASSERT(0);
}
}
@@ -2439,25 +2382,25 @@ static void calculate_mcache_row_bytes(
*p->mvmpg_per_mcache_lb = (unsigned int)math_floor2(mvmpg_per_mcache, 1);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: vmpg_bytes = %u\n", __func__, vmpg_bytes);
- dml2_printf("DML::%s: blk_bytes = %u\n", __func__, blk_bytes);
- dml2_printf("DML::%s: meta_per_mvmpg_per_channel = %f\n", __func__, meta_per_mvmpg_per_channel);
- dml2_printf("DML::%s: mvmpg_per_row_ub = %u\n", __func__, mvmpg_per_row_ub);
- dml2_printf("DML::%s: meta_row_width_ub = %u\n", __func__, *p->meta_row_width_ub);
- dml2_printf("DML::%s: mvmpg_width = %u\n", __func__, *p->mvmpg_width);
- dml2_printf("DML::%s: mvmpg_height = %u\n", __func__, *p->mvmpg_height);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_nom_overhead_factor);
- dml2_printf("DML::%s: dcc_dram_bw_pref_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_pref_overhead_factor);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %u\n", __func__, p->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: vmpg_bytes = %u\n", __func__, vmpg_bytes);
+ DML_LOG_VERBOSE("DML::%s: blk_bytes = %u\n", __func__, blk_bytes);
+ DML_LOG_VERBOSE("DML::%s: meta_per_mvmpg_per_channel = %f\n", __func__, meta_per_mvmpg_per_channel);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_per_row_ub = %u\n", __func__, mvmpg_per_row_ub);
+ DML_LOG_VERBOSE("DML::%s: meta_row_width_ub = %u\n", __func__, *p->meta_row_width_ub);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_width = %u\n", __func__, *p->mvmpg_width);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_height = %u\n", __func__, *p->mvmpg_height);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_nom_overhead_factor);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_pref_overhead_factor = %f\n", __func__, *p->dcc_dram_bw_pref_overhead_factor);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: mcache_row_bytes = %u\n", __func__, *p->mcache_row_bytes);
- dml2_printf("DML::%s: mcache_row_bytes_per_channel = %u\n", __func__, *p->mcache_row_bytes_per_channel);
- dml2_printf("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches);
+ DML_LOG_VERBOSE("DML::%s: mcache_row_bytes = %u\n", __func__, *p->mcache_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: mcache_row_bytes_per_channel = %u\n", __func__, *p->mcache_row_bytes_per_channel);
+ DML_LOG_VERBOSE("DML::%s: num_mcaches = %u\n", __func__, *p->num_mcaches);
#endif
- DML2_ASSERT(*p->num_mcaches > 0);
+ DML_ASSERT(*p->num_mcaches > 0);
}
static void calculate_mcache_setting(
@@ -2523,7 +2466,7 @@ static void calculate_mcache_setting(
l->l_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_l;
calculate_mcache_row_bytes(scratch, &l->l_p);
- DML2_ASSERT(*p->num_mcaches_l > 0);
+ DML_ASSERT(*p->num_mcaches_l > 0);
if (l->is_dual_plane) {
l->c_p.num_chans = p->num_chans;
@@ -2559,7 +2502,7 @@ static void calculate_mcache_setting(
l->c_p.mvmpg_per_mcache_lb = &l->mvmpg_per_mcache_lb_c;
calculate_mcache_row_bytes(scratch, &l->c_p);
- DML2_ASSERT(*p->num_mcaches_c > 0);
+ DML_ASSERT(*p->num_mcaches_c > 0);
}
// Sharing for iMALL access
@@ -2598,28 +2541,28 @@ static void calculate_mcache_setting(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: imall_enable = %u\n", __func__, p->imall_enable);
- dml2_printf("DML::%s: is_dual_plane = %u\n", __func__, l->is_dual_plane);
- dml2_printf("DML::%s: surf_vert = %u\n", __func__, p->surf_vert);
- dml2_printf("DML::%s: mvmpg_width_l = %u\n", __func__, l->mvmpg_width_l);
- dml2_printf("DML::%s: mvmpg_height_l = %u\n", __func__, l->mvmpg_height_l);
- dml2_printf("DML::%s: mcache_remainder_l = %f\n", __func__, l->mcache_remainder_l);
- dml2_printf("DML::%s: num_mcaches_l = %u\n", __func__, *p->num_mcaches_l);
- dml2_printf("DML::%s: avg_mcache_element_size_l = %u\n", __func__, l->avg_mcache_element_size_l);
- dml2_printf("DML::%s: mvmpg_access_width_l = %u\n", __func__, l->mvmpg_access_width_l);
- dml2_printf("DML::%s: mall_comb_mcache_l = %u\n", __func__, *p->mall_comb_mcache_l);
+ DML_LOG_VERBOSE("DML::%s: imall_enable = %u\n", __func__, p->imall_enable);
+ DML_LOG_VERBOSE("DML::%s: is_dual_plane = %u\n", __func__, l->is_dual_plane);
+ DML_LOG_VERBOSE("DML::%s: surf_vert = %u\n", __func__, p->surf_vert);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_width_l = %u\n", __func__, l->mvmpg_width_l);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_height_l = %u\n", __func__, l->mvmpg_height_l);
+ DML_LOG_VERBOSE("DML::%s: mcache_remainder_l = %f\n", __func__, l->mcache_remainder_l);
+ DML_LOG_VERBOSE("DML::%s: num_mcaches_l = %u\n", __func__, *p->num_mcaches_l);
+ DML_LOG_VERBOSE("DML::%s: avg_mcache_element_size_l = %u\n", __func__, l->avg_mcache_element_size_l);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_access_width_l = %u\n", __func__, l->mvmpg_access_width_l);
+ DML_LOG_VERBOSE("DML::%s: mall_comb_mcache_l = %u\n", __func__, *p->mall_comb_mcache_l);
if (l->is_dual_plane) {
- dml2_printf("DML::%s: mvmpg_width_c = %u\n", __func__, l->mvmpg_width_c);
- dml2_printf("DML::%s: mvmpg_height_c = %u\n", __func__, l->mvmpg_height_c);
- dml2_printf("DML::%s: mcache_remainder_c = %f\n", __func__, l->mcache_remainder_c);
- dml2_printf("DML::%s: luma_time_factor = %f\n", __func__, l->luma_time_factor);
- dml2_printf("DML::%s: num_mcaches_c = %u\n", __func__, *p->num_mcaches_c);
- dml2_printf("DML::%s: avg_mcache_element_size_c = %u\n", __func__, l->avg_mcache_element_size_c);
- dml2_printf("DML::%s: mvmpg_access_width_c = %u\n", __func__, l->mvmpg_access_width_c);
- dml2_printf("DML::%s: mall_comb_mcache_c = %u\n", __func__, *p->mall_comb_mcache_c);
- dml2_printf("DML::%s: lc_comb_last_mcache_size = %u\n", __func__, l->lc_comb_last_mcache_size);
- dml2_printf("DML::%s: lc_comb_mcache = %u\n", __func__, *p->lc_comb_mcache);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_width_c = %u\n", __func__, l->mvmpg_width_c);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_height_c = %u\n", __func__, l->mvmpg_height_c);
+ DML_LOG_VERBOSE("DML::%s: mcache_remainder_c = %f\n", __func__, l->mcache_remainder_c);
+ DML_LOG_VERBOSE("DML::%s: luma_time_factor = %f\n", __func__, l->luma_time_factor);
+ DML_LOG_VERBOSE("DML::%s: num_mcaches_c = %u\n", __func__, *p->num_mcaches_c);
+ DML_LOG_VERBOSE("DML::%s: avg_mcache_element_size_c = %u\n", __func__, l->avg_mcache_element_size_c);
+ DML_LOG_VERBOSE("DML::%s: mvmpg_access_width_c = %u\n", __func__, l->mvmpg_access_width_c);
+ DML_LOG_VERBOSE("DML::%s: mall_comb_mcache_c = %u\n", __func__, *p->mall_comb_mcache_c);
+ DML_LOG_VERBOSE("DML::%s: lc_comb_last_mcache_size = %u\n", __func__, l->lc_comb_last_mcache_size);
+ DML_LOG_VERBOSE("DML::%s: lc_comb_mcache = %u\n", __func__, *p->lc_comb_mcache);
}
#endif
// calculate split_coordinate
@@ -2639,11 +2582,11 @@ static void calculate_mcache_setting(
}
#ifdef __DML_VBA_DEBUG__
for (n = 0; n < *p->num_mcaches_l; n++)
- dml2_printf("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
if (l->is_dual_plane) {
for (n = 0; n < *p->num_mcaches_c; n++)
- dml2_printf("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
}
#endif
@@ -2660,10 +2603,10 @@ static void calculate_mcache_setting(
#ifdef __DML_VBA_DEBUG__
for (n = 0; n < *p->num_mcaches_l; n++)
- dml2_printf("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_l[%u] = %u\n", __func__, n, p->mcache_offsets_l[n]);
for (n = 0; n < *p->num_mcaches_c; n++)
- dml2_printf("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
+ DML_LOG_VERBOSE("DML::%s: mcache_offsets_c[%u] = %u\n", __func__, n, p->mcache_offsets_c[n]);
#endif
}
@@ -2694,8 +2637,8 @@ static void calculate_mall_bw_overhead_factor(
mall_prefetch_dram_overhead_factor[k] = 2.0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, mall_prefetch_sdp_overhead_factor = %f\n", __func__, k, mall_prefetch_sdp_overhead_factor[k]);
- dml2_printf("DML::%s: k=%u, mall_prefetch_dram_overhead_factor = %f\n", __func__, k, mall_prefetch_dram_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, mall_prefetch_sdp_overhead_factor = %f\n", __func__, k, mall_prefetch_sdp_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, mall_prefetch_dram_overhead_factor = %f\n", __func__, k, mall_prefetch_dram_overhead_factor[k]);
#endif
}
}
@@ -2772,22 +2715,20 @@ static double dml_get_return_bandwidth_available(
else // dml2_core_internal_bw_dram
return_bw_mbps = derate_dram_bandwidth;
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: is_avg_bw = %u\n", __func__, is_avg_bw);
- dml2_printf("DML::%s: is_hvm_en = %u\n", __func__, is_hvm_en);
- dml2_printf("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only);
- dml2_printf("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type));
- dml2_printf("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type));
- dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
- dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
- dml2_printf("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth);
- dml2_printf("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth);
- dml2_printf("DML::%s: ideal_dram_bandwidth = %f\n", __func__, ideal_dram_bandwidth);
- dml2_printf("DML::%s: derate_sdp_bandwidth = %f (derate %f)\n", __func__, derate_sdp_bandwidth, derate_sdp_factor);
- dml2_printf("DML::%s: derate_fabric_bandwidth = %f (derate %f)\n", __func__, derate_fabric_bandwidth, derate_fabric_factor);
- dml2_printf("DML::%s: derate_dram_bandwidth = %f (derate %f)\n", __func__, derate_dram_bandwidth, derate_dram_factor);
- dml2_printf("DML::%s: return_bw_mbps = %f\n", __func__, return_bw_mbps);
-#endif
+ DML_LOG_VERBOSE("DML::%s: is_avg_bw = %u\n", __func__, is_avg_bw);
+ DML_LOG_VERBOSE("DML::%s: is_hvm_en = %u\n", __func__, is_hvm_en);
+ DML_LOG_VERBOSE("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only);
+ DML_LOG_VERBOSE("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type));
+ DML_LOG_VERBOSE("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type));
+ DML_LOG_VERBOSE("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth);
+ DML_LOG_VERBOSE("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth);
+ DML_LOG_VERBOSE("DML::%s: ideal_dram_bandwidth = %f\n", __func__, ideal_dram_bandwidth);
+ DML_LOG_VERBOSE("DML::%s: derate_sdp_bandwidth = %f (derate %f)\n", __func__, derate_sdp_bandwidth, derate_sdp_factor);
+ DML_LOG_VERBOSE("DML::%s: derate_fabric_bandwidth = %f (derate %f)\n", __func__, derate_fabric_bandwidth, derate_fabric_factor);
+ DML_LOG_VERBOSE("DML::%s: derate_dram_bandwidth = %f (derate %f)\n", __func__, derate_dram_bandwidth, derate_dram_factor);
+ DML_LOG_VERBOSE("DML::%s: return_bw_mbps = %f\n", __func__, return_bw_mbps);
return return_bw_mbps;
}
@@ -2807,9 +2748,9 @@ static noinline_for_stack void calculate_bandwidth_available(
{
unsigned int n, m;
- dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
- dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, dram_bw_mbps);
+ DML_LOG_VERBOSE("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, dram_bw_mbps);
// Calculate all the bandwidth availabe
for (m = 0; m < dml2_core_internal_soc_state_max; m++) {
@@ -2828,8 +2769,8 @@ static noinline_for_stack void calculate_bandwidth_available(
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), avg_bandwidth_available[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_available[m][n]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), avg_bandwidth_available[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), urg_bandwidth_available[m][n]);
#endif
// urg_bandwidth_available_vm_only is indexed by soc_state
@@ -2843,9 +2784,9 @@ static noinline_for_stack void calculate_bandwidth_available(
urg_bandwidth_available_min[m] = math_min2(urg_bandwidth_available[m][dml2_core_internal_bw_dram], urg_bandwidth_available[m][dml2_core_internal_bw_sdp]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), avg_bandwidth_available_min[m]);
- dml2_printf("DML::%s: urg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_min[m]);
- dml2_printf("DML::%s: urg_bandwidth_available_vm_only[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_vm_only[n]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), avg_bandwidth_available_min[m]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_min[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_min[m]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_vm_only[%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), urg_bandwidth_available_vm_only[n]);
#endif
}
}
@@ -2879,13 +2820,13 @@ static void calculate_avg_bandwidth_required(
// SysActive and SVP Prefetch AVG bandwidth Check
for (k = 0; k < num_active_planes; ++k) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: plane %0d\n", __func__, k);
- dml2_printf("DML::%s: ReadBandwidthLuma=%f\n", __func__, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: ReadBandwidthChroma=%f\n", __func__, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor_p0=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p0[k]);
- dml2_printf("DML::%s: dcc_dram_bw_nom_overhead_factor_p1=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p1[k]);
- dml2_printf("DML::%s: mall_prefetch_dram_overhead_factor=%f\n", __func__, mall_prefetch_dram_overhead_factor[k]);
- dml2_printf("DML::%s: mall_prefetch_sdp_overhead_factor=%f\n", __func__, mall_prefetch_sdp_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: plane %0d\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: ReadBandwidthLuma=%f\n", __func__, ReadBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: ReadBandwidthChroma=%f\n", __func__, ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor_p0=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p0[k]);
+ DML_LOG_VERBOSE("DML::%s: dcc_dram_bw_nom_overhead_factor_p1=%f\n", __func__, dcc_dram_bw_nom_overhead_factor_p1[k]);
+ DML_LOG_VERBOSE("DML::%s: mall_prefetch_dram_overhead_factor=%f\n", __func__, mall_prefetch_dram_overhead_factor[k]);
+ DML_LOG_VERBOSE("DML::%s: mall_prefetch_sdp_overhead_factor=%f\n", __func__, mall_prefetch_sdp_overhead_factor[k]);
#endif
sdp_overhead_factor = mall_prefetch_sdp_overhead_factor[k];
@@ -2902,10 +2843,10 @@ static void calculate_avg_bandwidth_required(
avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram] += dram_overhead_factor_p0 * ReadBandwidthLuma[k] + dram_overhead_factor_p1 * ReadBandwidthChroma[k] + cursor_bw[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_sys_active), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_sdp), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_required[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(dml2_core_internal_soc_state_svp_prefetch), dml2_core_internal_bw_type_str(dml2_core_internal_bw_dram), avg_bandwidth_required[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]);
#endif
}
}
@@ -3080,10 +3021,10 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
&p->MaxNumSwathY[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, vm_bytes_l = %u (before hvm level)\n", __func__, k, s->vm_bytes_l);
- dml2_printf("DML::%s: k=%u, vm_bytes_c = %u (before hvm level)\n", __func__, k, s->vm_bytes_c);
- dml2_printf("DML::%s: k=%u, meta_row_bytes_per_row_ub_l = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_l[k]);
- dml2_printf("DML::%s: k=%u, meta_row_bytes_per_row_ub_c = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes_l = %u (before hvm level)\n", __func__, k, s->vm_bytes_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes_c = %u (before hvm level)\n", __func__, k, s->vm_bytes_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes_per_row_ub_l = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes_per_row_ub_c = %u\n", __func__, k, s->meta_row_bytes_per_row_ub_c[k]);
#endif
p->vm_bytes[k] = (s->vm_bytes_l + s->vm_bytes_c) * (1 + 8 * s->HostVMDynamicLevels);
p->meta_row_bytes[k] = s->meta_row_bytes_per_row_ub_l[k] + s->meta_row_bytes_per_row_ub_c[k];
@@ -3091,8 +3032,8 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
p->meta_row_bytes_per_row_ub_c[k] = s->meta_row_bytes_per_row_ub_c[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, meta_row_bytes = %u\n", __func__, k, p->meta_row_bytes[k]);
- dml2_printf("DML::%s: k=%u, vm_bytes = %u (after hvm level)\n", __func__, k, p->vm_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_bytes = %u\n", __func__, k, p->meta_row_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_bytes = %u (after hvm level)\n", __func__, k, p->vm_bytes[k]);
#endif
if (s->PixelPTEBytesPerRowStorageY[k] <= 64 * s->PTEBufferSizeInRequestsForLuma[k] && s->PixelPTEBytesPerRowStorageC[k] <= 64 * s->PTEBufferSizeInRequestsForChroma[k]) {
p->PTEBufferSizeNotExceeded[k] = true;
@@ -3104,18 +3045,18 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
s->PixelPTEBytesPerRowC_one_row_per_frame[k] <= 64 * 2 * s->PTEBufferSizeInRequestsForChroma[k]);
#ifdef __DML_VBA_DEBUG__
if (p->PTEBufferSizeNotExceeded[k] == 0 || s->one_row_per_frame_fits_in_buffer[k] == 0) {
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowStorageY = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowStorageC = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageC[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeInRequestsForLuma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForLuma[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeInRequestsForChroma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForChroma[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded (not one_row_per_frame) = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (before hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowStorageY = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowStorageC = %u\n", __func__, k, s->PixelPTEBytesPerRowStorageC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeInRequestsForLuma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeInRequestsForChroma = %u\n", __func__, k, s->PTEBufferSizeInRequestsForChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded (not one_row_per_frame) = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, HostVMDynamicLevels = %u\n", __func__, k, s->HostVMDynamicLevels);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowY_one_row_per_frame[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowC_one_row_per_frame[k]);
- dml2_printf("DML::%s: k=%u, one_row_per_frame_fits_in_buffer = %u\n", __func__, k, s->one_row_per_frame_fits_in_buffer[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, HostVMDynamicLevels = %u\n", __func__, k, s->HostVMDynamicLevels);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowY_one_row_per_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC_one_row_per_frame = %u\n", __func__, k, s->PixelPTEBytesPerRowC_one_row_per_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, one_row_per_frame_fits_in_buffer = %u\n", __func__, k, s->one_row_per_frame_fits_in_buffer[k]);
}
#endif
}
@@ -3146,8 +3087,8 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
p->DCCMetaBufferSizeNotExceeded[k] = true;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, SurfaceSizeInMALL = %u\n", __func__, k, p->SurfaceSizeInMALL[k]);
- dml2_printf("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, p->is_using_mall_for_ss[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SurfaceSizeInMALL = %u\n", __func__, k, p->SurfaceSizeInMALL[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, is_using_mall_for_ss = %u\n", __func__, k, p->is_using_mall_for_ss[k]);
#endif
p->use_one_row_for_frame[k] = p->myPipe[k].FORCE_ONE_ROW_FOR_FRAME || p->is_using_mall_for_ss[k] || (p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe) ||
(dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) || (p->display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes > 64 && dml_is_vertical_rotation(p->myPipe[k].RotationAngle));
@@ -3170,9 +3111,9 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
p->DCCMetaBufferSizeNotExceeded[k] = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, meta_row_bytes = %d\n", __func__, k, p->meta_row_bytes[k]);
- dml2_printf("DML::%s: k=%d, DCCMetaBufferSizeBytes = %d\n", __func__, k, p->DCCMetaBufferSizeBytes);
- dml2_printf("DML::%s: k=%d, DCCMetaBufferSizeNotExceeded = %d\n", __func__, k, p->DCCMetaBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_row_bytes = %d\n", __func__, k, p->meta_row_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DCCMetaBufferSizeBytes = %d\n", __func__, k, p->DCCMetaBufferSizeBytes);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DCCMetaBufferSizeNotExceeded = %d\n", __func__, k, p->DCCMetaBufferSizeNotExceeded[k]);
#endif
}
@@ -3209,20 +3150,20 @@ static void CalculateVMRowAndSwath(struct dml2_core_internal_scratch *scratch,
&p->dpte_row_bw[k],
&p->meta_row_bw[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame_flip = %u\n", __func__, k, p->use_one_row_for_frame_flip[k]);
- dml2_printf("DML::%s: k=%u, UseMALLForPStateChange = %u\n", __func__, k, p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config);
- dml2_printf("DML::%s: k=%u, dpte_row_height_luma = %u\n", __func__, k, p->dpte_row_height_luma[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_height_chroma = %u\n", __func__, k, p->dpte_row_height_chroma[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEBytesPerRow = %u\n", __func__, k, p->PixelPTEBytesPerRow[k]);
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, gpuvm_enable = %u\n", __func__, k, p->display_cfg->gpuvm_enable);
- dml2_printf("DML::%s: k=%u, PTE_BUFFER_MODE = %u\n", __func__, k, p->PTE_BUFFER_MODE[k]);
- dml2_printf("DML::%s: k=%u, BIGK_FRAGMENT_SIZE = %u\n", __func__, k, p->BIGK_FRAGMENT_SIZE[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame_flip = %u\n", __func__, k, p->use_one_row_for_frame_flip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, UseMALLForPStateChange = %u\n", __func__, k, p->display_cfg->plane_descriptors[k].overrides.legacy_svp_config);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_height_luma = %u\n", __func__, k, p->dpte_row_height_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowY = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_height_chroma = %u\n", __func__, k, p->dpte_row_height_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRowC = %u (after hvm level)\n", __func__, k, s->PixelPTEBytesPerRowC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEBytesPerRow = %u\n", __func__, k, p->PixelPTEBytesPerRow[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, p->PTEBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, gpuvm_enable = %u\n", __func__, k, p->display_cfg->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTE_BUFFER_MODE = %u\n", __func__, k, p->PTE_BUFFER_MODE[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BIGK_FRAGMENT_SIZE = %u\n", __func__, k, p->BIGK_FRAGMENT_SIZE[k]);
#endif
}
}
@@ -3257,19 +3198,19 @@ static double CalculateUrgentLatency(
}
#ifdef __DML_VBA_DEBUG__
if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
- dml2_printf("DML::%s: umc_urgent_ramp_latency_margin = %f\n", __func__, umc_urgent_ramp_latency_margin);
+ DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: urgent_ramp_uclk_cycles = %d\n", __func__, urgent_ramp_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: umc_urgent_ramp_latency_margin = %f\n", __func__, umc_urgent_ramp_latency_margin);
} else {
- dml2_printf("DML::%s: UrgentLatencyPixelDataOnly = %f\n", __func__, UrgentLatencyPixelDataOnly);
- dml2_printf("DML::%s: UrgentLatencyPixelMixedWithVMData = %f\n", __func__, UrgentLatencyPixelMixedWithVMData);
- dml2_printf("DML::%s: UrgentLatencyVMDataOnly = %f\n", __func__, UrgentLatencyVMDataOnly);
- dml2_printf("DML::%s: UrgentLatencyAdjustmentFabricClockComponent = %f\n", __func__, UrgentLatencyAdjustmentFabricClockComponent);
- dml2_printf("DML::%s: UrgentLatencyAdjustmentFabricClockReference = %f\n", __func__, UrgentLatencyAdjustmentFabricClockReference);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyPixelDataOnly = %f\n", __func__, UrgentLatencyPixelDataOnly);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyPixelMixedWithVMData = %f\n", __func__, UrgentLatencyPixelMixedWithVMData);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyVMDataOnly = %f\n", __func__, UrgentLatencyVMDataOnly);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyAdjustmentFabricClockComponent = %f\n", __func__, UrgentLatencyAdjustmentFabricClockComponent);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatencyAdjustmentFabricClockReference = %f\n", __func__, UrgentLatencyAdjustmentFabricClockReference);
}
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, FabricClock);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, urgent_latency);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, FabricClock);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, urgent_latency);
#endif
return urgent_latency;
}
@@ -3296,18 +3237,18 @@ static double CalculateTripToMemory(
#ifdef __DML_VBA_DEBUG__
if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles);
- dml2_printf("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles);
- dml2_printf("DML::%s: trip_to_memory_uclk_cycles = %d\n", __func__, trip_to_memory_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, FabricClock);
- dml2_printf("DML::%s: fabric_max_transport_latency_margin = %f\n", __func__, fabric_max_transport_latency_margin);
- dml2_printf("DML::%s: umc_max_latency_margin = %f\n", __func__, umc_max_latency_margin);
+ DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: max_round_trip_to_furthest_cs_fclk_cycles = %d\n", __func__, max_round_trip_to_furthest_cs_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: mall_overhead_fclk_cycles = %d\n", __func__, mall_overhead_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: trip_to_memory_uclk_cycles = %d\n", __func__, trip_to_memory_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, FabricClock);
+ DML_LOG_VERBOSE("DML::%s: fabric_max_transport_latency_margin = %f\n", __func__, fabric_max_transport_latency_margin);
+ DML_LOG_VERBOSE("DML::%s: umc_max_latency_margin = %f\n", __func__, umc_max_latency_margin);
} else {
- dml2_printf("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
+ DML_LOG_VERBOSE("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
}
- dml2_printf("DML::%s: trip_to_memory_us = %f\n", __func__, trip_to_memory_us);
+ DML_LOG_VERBOSE("DML::%s: trip_to_memory_us = %f\n", __func__, trip_to_memory_us);
#endif
@@ -3334,14 +3275,14 @@ static double CalculateMetaTripToMemory(
#ifdef __DML_VBA_DEBUG__
if (qos_type == dml2_qos_param_type_dcn4x) {
- dml2_printf("DML::%s: qos_type = %d\n", __func__, qos_type);
- dml2_printf("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles);
- dml2_printf("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: qos_type = %d\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_fclk_cycles = %d\n", __func__, meta_trip_to_memory_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_uclk_cycles = %d\n", __func__, meta_trip_to_memory_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, uclk_freq_mhz);
} else {
- dml2_printf("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
+ DML_LOG_VERBOSE("DML::%s: UrgLatency = %f\n", __func__, UrgLatency);
}
- dml2_printf("DML::%s: meta_trip_to_memory_us = %f\n", __func__, meta_trip_to_memory_us);
+ DML_LOG_VERBOSE("DML::%s: meta_trip_to_memory_us = %f\n", __func__, meta_trip_to_memory_us);
#endif
@@ -3358,7 +3299,6 @@ static void calculate_cursor_req_attributes(
unsigned int *cursor_bytes_per_chunk,
unsigned int *cursor_bytes)
{
- unsigned int cursor_pitch = 0;
unsigned int cursor_bytes_per_req = 0;
unsigned int cursor_width_bytes = 0;
unsigned int cursor_height = 0;
@@ -3366,10 +3306,6 @@ static void calculate_cursor_req_attributes(
//SW determines the cursor pitch to support the maximum cursor_width that will be used but the following restrictions apply.
//- For 2bpp, cursor_pitch = 256 pixels due to min cursor request size of 64B
//- For 32 or 64 bpp, cursor_pitch = 64, 128 or 256 pixels depending on the cursor width
- if (cursor_bpp == 2)
- cursor_pitch = 256;
- else
- cursor_pitch = (unsigned int)1 << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1);
//The cursor requestor uses a cursor request size of 64B, 128B, or 256B depending on the cursor_width and cursor_bpp as follows.
@@ -3409,8 +3345,8 @@ static void calculate_cursor_req_attributes(
*cursor_lines_per_chunk = 1;
} else {
if (cursor_width > 0) {
- dml2_printf("DML::%s: Invalid cursor_bpp = %d\n", __func__, cursor_bpp);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("DML::%s: Invalid cursor_bpp = %d\n", __func__, cursor_bpp);
+ DML_ASSERT(0);
}
}
@@ -3421,15 +3357,15 @@ static void calculate_cursor_req_attributes(
cursor_height = cursor_width;
*cursor_bytes = *cursor_bytes_per_line * cursor_height;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: cursor_bpp = %d\n", __func__, cursor_bpp);
- dml2_printf("DML::%s: cursor_width = %d\n", __func__, cursor_width);
- dml2_printf("DML::%s: cursor_width_bytes = %d\n", __func__, cursor_width_bytes);
- dml2_printf("DML::%s: cursor_bytes_per_req = %d\n", __func__, cursor_bytes_per_req);
- dml2_printf("DML::%s: cursor_lines_per_chunk = %d\n", __func__, *cursor_lines_per_chunk);
- dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, *cursor_bytes_per_line);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, *cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_bytes = %d\n", __func__, *cursor_bytes);
- dml2_printf("DML::%s: cursor_pitch = %d\n", __func__, cursor_pitch);
+ DML_LOG_VERBOSE("DML::%s: cursor_bpp = %d\n", __func__, cursor_bpp);
+ DML_LOG_VERBOSE("DML::%s: cursor_width = %d\n", __func__, cursor_width);
+ DML_LOG_VERBOSE("DML::%s: cursor_width_bytes = %d\n", __func__, cursor_width_bytes);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_req = %d\n", __func__, cursor_bytes_per_req);
+ DML_LOG_VERBOSE("DML::%s: cursor_lines_per_chunk = %d\n", __func__, *cursor_lines_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_line = %d\n", __func__, *cursor_bytes_per_line);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, *cursor_bytes_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes = %d\n", __func__, *cursor_bytes);
+ DML_LOG_VERBOSE("DML::%s: cursor_pitch = %d\n", __func__, cursor_bpp == 2 ? 256 : (unsigned int)1 << (unsigned int)math_ceil2(math_log((float)cursor_width, 2), 1));
#endif
}
@@ -3460,13 +3396,13 @@ static void calculate_cursor_urgent_burst_factor(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: LinesInCursorBuffer = %u\n", __func__, LinesInCursorBuffer);
- dml2_printf("DML::%s: CursorBufferSizeInTime = %f\n", __func__, CursorBufferSizeInTime);
- dml2_printf("DML::%s: CursorBufferSize = %u (kbytes)\n", __func__, CursorBufferSize);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %u\n", __func__, cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_lines_per_chunk = %u\n", __func__, cursor_lines_per_chunk);
- dml2_printf("DML::%s: UrgentBurstFactorCursor = %f\n", __func__, *UrgentBurstFactorCursor);
- dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
+ DML_LOG_VERBOSE("DML::%s: LinesInCursorBuffer = %u\n", __func__, LinesInCursorBuffer);
+ DML_LOG_VERBOSE("DML::%s: CursorBufferSizeInTime = %f\n", __func__, CursorBufferSizeInTime);
+ DML_LOG_VERBOSE("DML::%s: CursorBufferSize = %u (kbytes)\n", __func__, CursorBufferSize);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %u\n", __func__, cursor_bytes_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_lines_per_chunk = %u\n", __func__, cursor_lines_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorCursor = %f\n", __func__, *UrgentBurstFactorCursor);
+ DML_LOG_VERBOSE("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
#endif
}
@@ -3501,15 +3437,15 @@ static void CalculateUrgentBurstFactor(
*UrgentBurstFactorChroma = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
- dml2_printf("DML::%s: VRatioC = %f\n", __func__, VRatioC);
- dml2_printf("DML::%s: DETBufferSizeY = %d\n", __func__, DETBufferSizeY);
- dml2_printf("DML::%s: DETBufferSizeC = %d\n", __func__, DETBufferSizeC);
- dml2_printf("DML::%s: BytePerPixelInDETY = %f\n", __func__, BytePerPixelInDETY);
- dml2_printf("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, LineTime);
+ DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio);
+ DML_LOG_VERBOSE("DML::%s: VRatioC = %f\n", __func__, VRatioC);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeY = %d\n", __func__, DETBufferSizeY);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeC = %d\n", __func__, DETBufferSizeC);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelInDETY = %f\n", __func__, BytePerPixelInDETY);
+ DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %d\n", __func__, swath_width_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, LineTime);
#endif
- DML2_ASSERT(VRatio > 0);
+ DML_ASSERT(VRatio > 0);
LinesInDETLuma = (dml_is_phantom_pipe(plane_cfg) ? 1024 * 1024 : DETBufferSizeY) / BytePerPixelInDETY / swath_width_luma_ub;
@@ -3534,12 +3470,12 @@ static void CalculateUrgentBurstFactor(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: LinesInDETLuma = %f\n", __func__, LinesInDETLuma);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
- dml2_printf("DML::%s: DETBufferSizeInTimeLuma = %f\n", __func__, DETBufferSizeInTimeLuma);
- dml2_printf("DML::%s: UrgentBurstFactorLuma = %f\n", __func__, *UrgentBurstFactorLuma);
- dml2_printf("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma);
- dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
+ DML_LOG_VERBOSE("DML::%s: LinesInDETLuma = %f\n", __func__, LinesInDETLuma);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeInTimeLuma = %f\n", __func__, DETBufferSizeInTimeLuma);
+ DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorLuma = %f\n", __func__, *UrgentBurstFactorLuma);
+ DML_LOG_VERBOSE("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma);
+ DML_LOG_VERBOSE("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
#endif
}
@@ -3600,10 +3536,10 @@ static void CalculateDCFCLKDeepSleepTdlut(
if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut && tdlut_bytes_to_deliver[k] > 0) {
double tdlut_required_deepsleep_dcfclk = (double) tdlut_bytes_to_deliver[k] / 64.0 / prefetch_swath_time_us[k];
- dml2_printf("DML::%s: k=%d, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
- dml2_printf("DML::%s: k=%d, tdlut_bytes_to_deliver = %d\n", __func__, k, tdlut_bytes_to_deliver[k]);
- dml2_printf("DML::%s: k=%d, prefetch_swath_time_us = %f\n", __func__, k, prefetch_swath_time_us[k]);
- dml2_printf("DML::%s: k=%d, tdlut_required_deepsleep_dcfclk = %f\n", __func__, k, tdlut_required_deepsleep_dcfclk);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, tdlut_bytes_to_deliver = %d\n", __func__, k, tdlut_bytes_to_deliver[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_swath_time_us = %f\n", __func__, k, prefetch_swath_time_us[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, tdlut_required_deepsleep_dcfclk = %f\n", __func__, k, tdlut_required_deepsleep_dcfclk);
// increase the deepsleep dcfclk to match the original dispclk throughput rate
if (tdlut_required_deepsleep_dcfclk > DCFClkDeepSleepPerSurface[k]) {
@@ -3613,8 +3549,8 @@ static void CalculateDCFCLKDeepSleepTdlut(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz);
- dml2_printf("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
#endif
}
@@ -3625,17 +3561,17 @@ static void CalculateDCFCLKDeepSleepTdlut(
*DCFClkDeepSleep = math_max2(8.0, __DML2_CALCS_DCFCLK_FACTOR__ * ReadBandwidth / (double)ReturnBusWidth);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: __DML2_CALCS_DCFCLK_FACTOR__ = %f\n", __func__, __DML2_CALCS_DCFCLK_FACTOR__);
- dml2_printf("DML::%s: ReadBandwidth = %f\n", __func__, ReadBandwidth);
- dml2_printf("DML::%s: ReturnBusWidth = %u\n", __func__, ReturnBusWidth);
- dml2_printf("DML::%s: DCFClkDeepSleep = %f\n", __func__, *DCFClkDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: __DML2_CALCS_DCFCLK_FACTOR__ = %f\n", __func__, __DML2_CALCS_DCFCLK_FACTOR__);
+ DML_LOG_VERBOSE("DML::%s: ReadBandwidth = %f\n", __func__, ReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: ReturnBusWidth = %u\n", __func__, ReturnBusWidth);
+ DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f\n", __func__, *DCFClkDeepSleep);
#endif
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
*DCFClkDeepSleep = math_max2(*DCFClkDeepSleep, DCFClkDeepSleepPerSurface[k]);
}
- dml2_printf("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
}
static noinline_for_stack void CalculateDCFCLKDeepSleep(
@@ -3731,12 +3667,12 @@ static unsigned int CalculateMaxVStartup(
else
max_vstartup_lines = vblank_size - (unsigned int)math_max2(1.0, math_ceil2(write_back_delay_us / line_time_us, 1.0));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VBlankNom = %u\n", __func__, timing->vblank_nom);
- dml2_printf("DML::%s: vblank_nom_default_us = %u\n", __func__, vblank_nom_default_us);
- dml2_printf("DML::%s: line_time_us = %f\n", __func__, line_time_us);
- dml2_printf("DML::%s: vblank_actual = %u\n", __func__, vblank_actual);
- dml2_printf("DML::%s: vblank_avail = %u\n", __func__, vblank_avail);
- dml2_printf("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines);
+ DML_LOG_VERBOSE("DML::%s: VBlankNom = %lu\n", __func__, timing->vblank_nom);
+ DML_LOG_VERBOSE("DML::%s: vblank_nom_default_us = %u\n", __func__, vblank_nom_default_us);
+ DML_LOG_VERBOSE("DML::%s: line_time_us = %f\n", __func__, line_time_us);
+ DML_LOG_VERBOSE("DML::%s: vblank_actual = %u\n", __func__, vblank_actual);
+ DML_LOG_VERBOSE("DML::%s: vblank_avail = %u\n", __func__, vblank_avail);
+ DML_LOG_VERBOSE("DML::%s: max_vstartup_lines = %u\n", __func__, max_vstartup_lines);
#endif
max_vstartup_lines = (unsigned int)math_min2(max_vstartup_lines, DML_MAX_VSTARTUP_START);
return max_vstartup_lines;
@@ -3761,9 +3697,9 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
const long MAXIMUMCOMPRESSION = 4;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ForceSingleDPP = %u\n", __func__, p->ForceSingleDPP);
+ DML_LOG_VERBOSE("DML::%s: ForceSingleDPP = %u\n", __func__, p->ForceSingleDPP);
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- dml2_printf("DML::%s: DPPPerSurface[%u] = %u\n", __func__, k, p->DPPPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: DPPPerSurface[%u] = %u\n", __func__, k, p->DPPPerSurface[k]);
}
#endif
CalculateSwathWidth(
@@ -3797,15 +3733,15 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->full_swath_bytes_l[k] = (unsigned int)(p->swath_width_luma_ub[k] * p->BytePerPixDETY[k] * MaximumSwathHeightY[k]);
p->full_swath_bytes_c[k] = (unsigned int)(p->swath_width_chroma_ub[k] * p->BytePerPixDETC[k] * MaximumSwathHeightC[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, p->DPPPerSurface[k]);
- dml2_printf("DML::%s: k=%u swath_width_luma_ub = %u\n", __func__, k, p->swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u BytePerPixDETY = %f\n", __func__, k, p->BytePerPixDETY[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightY = %u\n", __func__, k, MaximumSwathHeightY[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u swath_width_chroma_ub = %u\n", __func__, k, p->swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u BytePerPixDETC = %f\n", __func__, k, p->BytePerPixDETC[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathHeightC = %u\n", __func__, k, MaximumSwathHeightC[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DPPPerSurface = %u\n", __func__, k, p->DPPPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_luma_ub = %u\n", __func__, k, p->swath_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u BytePerPixDETY = %f\n", __func__, k, p->BytePerPixDETY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightY = %u\n", __func__, k, MaximumSwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u swath_width_chroma_ub = %u\n", __func__, k, p->swath_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u BytePerPixDETC = %f\n", __func__, k, p->BytePerPixDETC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathHeightC = %u\n", __func__, k, MaximumSwathHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
#endif
if (p->display_cfg->plane_descriptors[k].pixel_format == dml2_420_10) {
p->full_swath_bytes_l[k] = (unsigned int)(math_ceil2((double)p->full_swath_bytes_l[k], 256));
@@ -3848,11 +3784,11 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->CompressedBufferSizeInkByte);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TotalActiveDPP = %u\n", __func__, TotalActiveDPP);
- dml2_printf("DML::%s: nomDETInKByte = %u\n", __func__, p->nomDETInKByte);
- dml2_printf("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, p->ConfigReturnBufferSizeInKByte);
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, *p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *p->CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: TotalActiveDPP = %u\n", __func__, TotalActiveDPP);
+ DML_LOG_VERBOSE("DML::%s: nomDETInKByte = %u\n", __func__, p->nomDETInKByte);
+ DML_LOG_VERBOSE("DML::%s: ConfigReturnBufferSizeInKByte = %u\n", __func__, p->ConfigReturnBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, *p->UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u\n", __func__, *p->CompressedBufferSizeInkByte);
#endif
*p->ViewportSizeSupport = true;
@@ -3860,7 +3796,7 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
DETBufferSizeInKByteForSwathCalculation = (dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k]) ? 1024 : p->DETBufferSizeInKByte[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation = %u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation = %u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
#endif
if (p->display_cfg->plane_descriptors[k].surface.tiling == dml2_sw_linear) {
p->SwathHeightY[k] = MaximumSwathHeightY[k];
@@ -3917,13 +3853,13 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
if ((p->full_swath_bytes_l[k] / 2 + p->full_swath_bytes_c[k] / 2 > DETBufferSizeInKByteForSwathCalculation * 1024 / 2) ||
p->SwathWidth[k] > p->MaximumSwathWidthLuma[k] || (p->SwathHeightC[k] > 0 && p->SwathWidthChroma[k] > p->MaximumSwathWidthChroma[k])) {
*p->ViewportSizeSupport = false;
- dml2_printf("DML::%s: k=%u full_swath_bytes_l=%u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c=%u\n", __func__, k, p->full_swath_bytes_c[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation=%u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
- dml2_printf("DML::%s: k=%u SwathWidth=%u\n", __func__, k, p->SwathWidth[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, p->MaximumSwathWidthLuma[k]);
- dml2_printf("DML::%s: k=%u SwathWidthChroma=%d\n", __func__, k, p->SwathWidthChroma[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, p->MaximumSwathWidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l=%u\n", __func__, k, p->full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c=%u\n", __func__, k, p->full_swath_bytes_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByteForSwathCalculation=%u\n", __func__, k, DETBufferSizeInKByteForSwathCalculation);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidth=%u\n", __func__, k, p->SwathWidth[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, p->MaximumSwathWidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathWidthChroma=%d\n", __func__, k, p->SwathWidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, p->MaximumSwathWidthChroma[k]);
p->ViewportSizeSupportPerSurface[k] = false;
} else {
p->ViewportSizeSupportPerSurface[k] = true;
@@ -3931,35 +3867,35 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
if (p->SwathHeightC[k] == 0) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, All DET will be used for plane0\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u, All DET will be used for plane0\n", __func__, k);
#endif
p->DETBufferSizeY[k] = p->DETBufferSizeInKByte[k] * 1024;
p->DETBufferSizeC[k] = 0;
} else if (RoundedUpSwathSizeBytesY[k] <= 1.5 * RoundedUpSwathSizeBytesC[k]) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, Half DET will be used for plane0, and half for plane1\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u, Half DET will be used for plane0, and half for plane1\n", __func__, k);
#endif
p->DETBufferSizeY[k] = p->DETBufferSizeInKByte[k] * 1024 / 2;
p->DETBufferSizeC[k] = p->DETBufferSizeInKByte[k] * 1024 / 2;
} else {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, 2/3 DET will be used for plane0, and 1/3 for plane1\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u, 2/3 DET will be used for plane0, and 1/3 for plane1\n", __func__, k);
#endif
p->DETBufferSizeY[k] = (unsigned int)(math_floor2(p->DETBufferSizeInKByte[k] * 1024 * 2 / 3, 1024));
p->DETBufferSizeC[k] = p->DETBufferSizeInKByte[k] * 1024 - p->DETBufferSizeY[k];
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
- dml2_printf("DML::%s: k=%u SwathHeightC = %u\n", __func__, k, p->SwathHeightC[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
- dml2_printf("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesY = %u\n", __func__, k, RoundedUpSwathSizeBytesY[k]);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
- dml2_printf("DML::%s: k=%u DETBufferSizeC = %u\n", __func__, k, p->DETBufferSizeC[k]);
- dml2_printf("DML::%s: k=%u ViewportSizeSupportPerSurface = %u\n", __func__, k, p->ViewportSizeSupportPerSurface[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u SwathHeightC = %u\n", __func__, k, p->SwathHeightC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_l = %u\n", __func__, k, p->full_swath_bytes_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u full_swath_bytes_c = %u\n", __func__, k, p->full_swath_bytes_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesY = %u\n", __func__, k, RoundedUpSwathSizeBytesY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeInKByte = %u\n", __func__, k, p->DETBufferSizeInKByte[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u DETBufferSizeC = %u\n", __func__, k, p->DETBufferSizeC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportSizeSupportPerSurface = %u\n", __func__, k, p->ViewportSizeSupportPerSurface[k]);
#endif
}
@@ -3969,12 +3905,12 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
*p->compbuf_reserved_space_64b = (unsigned int)math_ceil2(math_max2(*p->compbuf_reserved_space_64b,
(double)(p->rob_buffer_size_kbytes * 1024 / 64) - (double)(RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest] * TTUFIFODEPTH / (p->mrq_present ? MAXIMUMCOMPRESSION : 1) / 64)), 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]);
- dml2_printf("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: RoundedUpSwathSizeBytesY[%d] = %u\n", __func__, SurfaceDoingUnboundedRequest, RoundedUpSwathSizeBytesY[SurfaceDoingUnboundedRequest]);
+ DML_LOG_VERBOSE("DML::%s: rob_buffer_size_kbytes = %u\n", __func__, p->rob_buffer_size_kbytes);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: compbuf_reserved_space_64b = %u\n", __func__, *p->compbuf_reserved_space_64b);
+ DML_LOG_VERBOSE("DML::%s: compbuf_reserved_space_64b = %u\n", __func__, *p->compbuf_reserved_space_64b);
#endif
*p->hw_debug5 = false;
@@ -3989,12 +3925,12 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
+ *p->CompressedBufferSizeInkByte * MAXIMUMCOMPRESSION * 1024) > TTUFIFODEPTH * (RoundedUpSwathSizeBytesY[k] + RoundedUpSwathSizeBytesC[k])))
*p->hw_debug5 = true;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION);
- dml2_printf("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH);
- dml2_printf("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte);
- dml2_printf("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
- dml2_printf("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
+ DML_LOG_VERBOSE("DML::%s: k=%u UnboundedRequestEnabled = %u\n", __func__, k, *p->UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: k=%u MAXIMUMCOMPRESSION = %lu\n", __func__, k, MAXIMUMCOMPRESSION);
+ DML_LOG_VERBOSE("DML::%s: k=%u TTUFIFODEPTH = %lu\n", __func__, k, TTUFIFODEPTH);
+ DML_LOG_VERBOSE("DML::%s: k=%u CompressedBufferSizeInkByte = %u\n", __func__, k, *p->CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: k=%u RoundedUpSwathSizeBytesC = %u\n", __func__, k, RoundedUpSwathSizeBytesC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
#endif
}
#endif
@@ -4192,15 +4128,15 @@ static noinline_for_stack void CalculateODMMode(
SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock);
SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ODMUse = %d\n", __func__, ODMUse);
- dml2_printf("DML::%s: Output = %d\n", __func__, Output);
- dml2_printf("DML::%s: DSCEnable = %d\n", __func__, DSCEnable);
- dml2_printf("DML::%s: MaxDispclk = %f\n", __func__, MaxDispclk);
- dml2_printf("DML::%s: MaximumPixelsPerLinePerDSCUnit = %d\n", __func__, MaximumPixelsPerLinePerDSCUnit);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithoutODMCombine = %f\n", __func__, SurfaceRequiredDISPCLKWithoutODMCombine);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineTwoToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineTwoToOne);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineThreeToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineThreeToOne);
- dml2_printf("DML::%s: SurfaceRequiredDISPCLKWithODMCombineFourToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineFourToOne);
+ DML_LOG_VERBOSE("DML::%s: ODMUse = %d\n", __func__, ODMUse);
+ DML_LOG_VERBOSE("DML::%s: Output = %d\n", __func__, Output);
+ DML_LOG_VERBOSE("DML::%s: DSCEnable = %d\n", __func__, DSCEnable);
+ DML_LOG_VERBOSE("DML::%s: MaxDispclk = %f\n", __func__, MaxDispclk);
+ DML_LOG_VERBOSE("DML::%s: MaximumPixelsPerLinePerDSCUnit = %d\n", __func__, MaximumPixelsPerLinePerDSCUnit);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithoutODMCombine = %f\n", __func__, SurfaceRequiredDISPCLKWithoutODMCombine);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineTwoToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineTwoToOne);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineThreeToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineThreeToOne);
+ DML_LOG_VERBOSE("DML::%s: SurfaceRequiredDISPCLKWithODMCombineFourToOne = %f\n", __func__, SurfaceRequiredDISPCLKWithODMCombineFourToOne);
#endif
if (ODMUse == dml2_odm_mode_auto)
DecidedODMMode = DecideODMMode(HActive,
@@ -4245,10 +4181,10 @@ static noinline_for_stack void CalculateODMMode(
*NumberOfDPP = NumberOfDPPRequired;
*RequiredDISPCLKPerSurface = success ? DISPCLKRequired : 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: ODMMode = %d\n", __func__, *ODMMode);
- dml2_printf("DML::%s: NumberOfDPP = %d\n", __func__, *NumberOfDPP);
- dml2_printf("DML::%s: TotalAvailablePipesSupport = %d\n", __func__, *TotalAvailablePipesSupport);
- dml2_printf("DML::%s: RequiredDISPCLKPerSurface = %f\n", __func__, *RequiredDISPCLKPerSurface);
+ DML_LOG_VERBOSE("DML::%s: ODMMode = %d\n", __func__, *ODMMode);
+ DML_LOG_VERBOSE("DML::%s: NumberOfDPP = %d\n", __func__, *NumberOfDPP);
+ DML_LOG_VERBOSE("DML::%s: TotalAvailablePipesSupport = %d\n", __func__, *TotalAvailablePipesSupport);
+ DML_LOG_VERBOSE("DML::%s: RequiredDISPCLKPerSurface = %f\n", __func__, *RequiredDISPCLKPerSurface);
#endif
}
@@ -4292,17 +4228,17 @@ static noinline_for_stack void CalculateOutputLink(
*OutputRate = dml2_core_internal_output_rate_unknown;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSCEnable = %u (dis, en, en_if_necessary)\n", __func__, DSCEnable);
- dml2_printf("DML::%s: PHYCLK = %f\n", __func__, PHYCLK);
- dml2_printf("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
- dml2_printf("DML::%s: AudioSampleRate = %f\n", __func__, AudioSampleRate);
- dml2_printf("DML::%s: HActive = %u\n", __func__, HActive);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: ODMModeNoDSC = %u\n", __func__, ODMModeNoDSC);
- dml2_printf("DML::%s: ODMModeDSC = %u\n", __func__, ODMModeDSC);
- dml2_printf("DML::%s: ForcedOutputLinkBPP = %f\n", __func__, ForcedOutputLinkBPP);
- dml2_printf("DML::%s: Output (encoder) = %u\n", __func__, Output);
- dml2_printf("DML::%s: OutputLinkDPRate = %u\n", __func__, OutputLinkDPRate);
+ DML_LOG_VERBOSE("DML::%s: DSCEnable = %u (dis, en, en_if_necessary)\n", __func__, DSCEnable);
+ DML_LOG_VERBOSE("DML::%s: PHYCLK = %f\n", __func__, PHYCLK);
+ DML_LOG_VERBOSE("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
+ DML_LOG_VERBOSE("DML::%s: AudioSampleRate = %f\n", __func__, AudioSampleRate);
+ DML_LOG_VERBOSE("DML::%s: HActive = %u\n", __func__, HActive);
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal);
+ DML_LOG_VERBOSE("DML::%s: ODMModeNoDSC = %u\n", __func__, ODMModeNoDSC);
+ DML_LOG_VERBOSE("DML::%s: ODMModeDSC = %u\n", __func__, ODMModeDSC);
+ DML_LOG_VERBOSE("DML::%s: ForcedOutputLinkBPP = %f\n", __func__, ForcedOutputLinkBPP);
+ DML_LOG_VERBOSE("DML::%s: Output (encoder) = %u\n", __func__, Output);
+ DML_LOG_VERBOSE("DML::%s: OutputLinkDPRate = %u\n", __func__, OutputLinkDPRate);
#endif
{
if (Output == dml2_hdmi) {
@@ -4487,9 +4423,9 @@ static noinline_for_stack void CalculateOutputLink(
}
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: RequiresDSC = %u\n", __func__, *RequiresDSC);
- dml2_printf("DML::%s: RequiresFEC = %u\n", __func__, *RequiresFEC);
- dml2_printf("DML::%s: OutBpp = %f\n", __func__, *OutBpp);
+ DML_LOG_VERBOSE("DML::%s: RequiresDSC = %u\n", __func__, *RequiresDSC);
+ DML_LOG_VERBOSE("DML::%s: RequiresFEC = %u\n", __func__, *RequiresFEC);
+ DML_LOG_VERBOSE("DML::%s: OutBpp = %f\n", __func__, *OutBpp);
#endif
}
@@ -4571,17 +4507,17 @@ static unsigned int DSCDelayRequirement(
DSCDelayRequirement_val = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSCEnabled= %u\n", __func__, DSCEnabled);
- dml2_printf("DML::%s: ODMMode = %u\n", __func__, ODMMode);
- dml2_printf("DML::%s: OutputBpp = %f\n", __func__, OutputBpp);
- dml2_printf("DML::%s: HActive = %u\n", __func__, HActive);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, PixelClock);
- dml2_printf("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
- dml2_printf("DML::%s: OutputFormat = %u\n", __func__, OutputFormat);
- dml2_printf("DML::%s: DSCInputBitPerComponent = %u\n", __func__, DSCInputBitPerComponent);
- dml2_printf("DML::%s: NumberOfDSCSlices = %u\n", __func__, NumberOfDSCSlices);
- dml2_printf("DML::%s: DSCDelayRequirement_val = %u\n", __func__, DSCDelayRequirement_val);
+ DML_LOG_VERBOSE("DML::%s: DSCEnabled= %u\n", __func__, DSCEnabled);
+ DML_LOG_VERBOSE("DML::%s: ODMMode = %u\n", __func__, ODMMode);
+ DML_LOG_VERBOSE("DML::%s: OutputBpp = %f\n", __func__, OutputBpp);
+ DML_LOG_VERBOSE("DML::%s: HActive = %u\n", __func__, HActive);
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal);
+ DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, PixelClock);
+ DML_LOG_VERBOSE("DML::%s: PixelClockBackEnd = %f\n", __func__, PixelClockBackEnd);
+ DML_LOG_VERBOSE("DML::%s: OutputFormat = %u\n", __func__, OutputFormat);
+ DML_LOG_VERBOSE("DML::%s: DSCInputBitPerComponent = %u\n", __func__, DSCInputBitPerComponent);
+ DML_LOG_VERBOSE("DML::%s: NumberOfDSCSlices = %u\n", __func__, NumberOfDSCSlices);
+ DML_LOG_VERBOSE("DML::%s: DSCDelayRequirement_val = %u\n", __func__, DSCDelayRequirement_val);
#endif
return DSCDelayRequirement_val;
@@ -4654,10 +4590,10 @@ static void CalculateSurfaceSizeInMall(
(TotalSurfaceSizeInMALLForSubVP > MALLAllocatedForDCNInBytes);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MALLAllocatedForDCN = %u\n", __func__, MALLAllocatedForDCN * 1024 * 1024);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALLForSubVP = %u\n", __func__, TotalSurfaceSizeInMALLForSubVP);
- dml2_printf("DML::%s: TotalSurfaceSizeInMALLForSS = %u\n", __func__, TotalSurfaceSizeInMALLForSS);
- dml2_printf("DML::%s: ExceededMALLSize = %u\n", __func__, *ExceededMALLSize);
+ DML_LOG_VERBOSE("DML::%s: MALLAllocatedForDCN = %u\n", __func__, MALLAllocatedForDCN * 1024 * 1024);
+ DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALLForSubVP = %u\n", __func__, TotalSurfaceSizeInMALLForSubVP);
+ DML_LOG_VERBOSE("DML::%s: TotalSurfaceSizeInMALLForSS = %u\n", __func__, TotalSurfaceSizeInMALLForSS);
+ DML_LOG_VERBOSE("DML::%s: ExceededMALLSize = %u\n", __func__, *ExceededMALLSize);
#endif
}
@@ -4674,7 +4610,6 @@ static void calculate_tdlut_setting(
unsigned int tdlut_vmpg_per_frame;
unsigned int tdlut_pte_req_per_frame;
unsigned int tdlut_bytes_per_line;
- unsigned int tdlut_delivery_cycles;
double tdlut_drain_rate;
unsigned int tdlut_mpc_width;
unsigned int tdlut_bytes_per_group_simple;
@@ -4737,44 +4672,47 @@ static void calculate_tdlut_setting(
*p->tdlut_bytes_per_frame = tdlut_bytes_per_line * tdlut_mpc_width * tdlut_mpc_width;
*p->tdlut_bytes_per_group = tdlut_bytes_per_line * tdlut_mpc_width;
//the delivery cycles is DispClk cycles per line * number of lines * number of slices
- tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width;
+ //tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width;
tdlut_drain_rate = tdlut_bytes_per_line * p->dispclk_mhz / math_ceil2(tdlut_mpc_width/2.0, 1);
} else {
//tdlut_addressing_mode = tdlut_simple_linear, 3dlut width should be 4*1229=4916 elements
*p->tdlut_bytes_per_frame = (unsigned int)math_ceil2(tdlut_width * tdlut_bpe, 256);
*p->tdlut_bytes_per_group = tdlut_bytes_per_group_simple;
- tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_width/2.0, 1);
+ //tdlut_delivery_cycles = (unsigned int)math_ceil2(tdlut_width/2.0, 1);
tdlut_drain_rate = 2 * tdlut_bpe * p->dispclk_mhz;
}
//the tdlut is fetched during the 2 row times of prefetch.
if (p->setup_for_tdlut) {
*p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
- *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
+ if (*p->tdlut_bytes_per_frame > p->cursor_buffer_size * 1024)
+ *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
+ else
+ *p->tdlut_opt_time = 0;
*p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate;
*p->tdlut_bytes_to_deliver = (unsigned int) (p->cursor_buffer_size * 1024.0);
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: gpuvm_enable = %d\n", __func__, p->gpuvm_enable);
- dml2_printf("DML::%s: vmpg_bytes = %d\n", __func__, vmpg_bytes);
- dml2_printf("DML::%s: tdlut_vmpg_per_frame = %d\n", __func__, tdlut_vmpg_per_frame);
- dml2_printf("DML::%s: tdlut_pte_req_per_frame = %d\n", __func__, tdlut_pte_req_per_frame);
+ DML_LOG_VERBOSE("DML::%s: gpuvm_enable = %d\n", __func__, p->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: vmpg_bytes = %d\n", __func__, vmpg_bytes);
+ DML_LOG_VERBOSE("DML::%s: tdlut_vmpg_per_frame = %d\n", __func__, tdlut_vmpg_per_frame);
+ DML_LOG_VERBOSE("DML::%s: tdlut_pte_req_per_frame = %d\n", __func__, tdlut_pte_req_per_frame);
- dml2_printf("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz);
- dml2_printf("DML::%s: tdlut_width = %u\n", __func__, tdlut_width);
- dml2_printf("DML::%s: tdlut_addressing_mode = %s\n", __func__, (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) ? "sw_linear" : "simple_linear");
- dml2_printf("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes);
- dml2_printf("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes);
- dml2_printf("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame);
- dml2_printf("DML::%s: tdlut_bytes_per_line = %u\n", __func__, tdlut_bytes_per_line);
- dml2_printf("DML::%s: tdlut_bytes_per_group = %u\n", __func__, *p->tdlut_bytes_per_group);
- dml2_printf("DML::%s: tdlut_drain_rate = %f\n", __func__, tdlut_drain_rate);
- dml2_printf("DML::%s: tdlut_delivery_cycles = %u\n", __func__, tdlut_delivery_cycles);
- dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time);
- dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time);
- dml2_printf("DML::%s: tdlut_bytes_to_deliver = %d\n", __func__, *p->tdlut_bytes_to_deliver);
- dml2_printf("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub);
+ DML_LOG_VERBOSE("DML::%s: dispclk_mhz = %f\n", __func__, p->dispclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: tdlut_width = %u\n", __func__, tdlut_width);
+ DML_LOG_VERBOSE("DML::%s: tdlut_addressing_mode = %s\n", __func__, (p->tdlut_addressing_mode == dml2_tdlut_sw_linear) ? "sw_linear" : "simple_linear");
+ DML_LOG_VERBOSE("DML::%s: tdlut_pitch_bytes = %u\n", __func__, tdlut_pitch_bytes);
+ DML_LOG_VERBOSE("DML::%s: tdlut_footprint_bytes = %u\n", __func__, tdlut_footprint_bytes);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_frame = %u\n", __func__, *p->tdlut_bytes_per_frame);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_line = %u\n", __func__, tdlut_bytes_per_line);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_per_group = %u\n", __func__, *p->tdlut_bytes_per_group);
+ DML_LOG_VERBOSE("DML::%s: tdlut_drain_rate = %f\n", __func__, tdlut_drain_rate);
+ DML_LOG_VERBOSE("DML::%s: tdlut_delivery_cycles = %u\n", __func__, p->tdlut_addressing_mode == dml2_tdlut_sw_linear ? (unsigned int)math_ceil2(tdlut_mpc_width/2.0, 1) * tdlut_mpc_width * tdlut_mpc_width : (unsigned int)math_ceil2(tdlut_width/2.0, 1));
+ DML_LOG_VERBOSE("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time);
+ DML_LOG_VERBOSE("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time);
+ DML_LOG_VERBOSE("DML::%s: tdlut_bytes_to_deliver = %d\n", __func__, *p->tdlut_bytes_to_deliver);
+ DML_LOG_VERBOSE("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub);
#endif
}
@@ -4820,10 +4758,10 @@ static void CalculateTarb(
*Tarb = extra_bytes / ReturnBW;
*Tarb_prefetch = extra_bytes_prefetch / ReturnBW;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PixelChunkSizeInKByte = %d\n", __func__, PixelChunkSizeInKByte);
- dml2_printf("DML::%s: MetaChunkSize = %d\n", __func__, MetaChunkSize);
- dml2_printf("DML::%s: extra_bytes = %f\n", __func__, extra_bytes);
- dml2_printf("DML::%s: extra_bytes_prefetch = %f\n", __func__, extra_bytes_prefetch);
+ DML_LOG_VERBOSE("DML::%s: PixelChunkSizeInKByte = %d\n", __func__, PixelChunkSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: MetaChunkSize = %d\n", __func__, MetaChunkSize);
+ DML_LOG_VERBOSE("DML::%s: extra_bytes = %f\n", __func__, extra_bytes);
+ DML_LOG_VERBOSE("DML::%s: extra_bytes_prefetch = %f\n", __func__, extra_bytes_prefetch);
#endif
}
@@ -4838,10 +4776,10 @@ static double CalculateTWait(
TWait = math_max2(reserved_vblank_time_ns/1000.0, g6_temp_read_blackout_us) + t_urg_trip;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: reserved_vblank_time_ns = %d\n", __func__, reserved_vblank_time_ns);
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, Ttrip);
- dml2_printf("DML::%s: TWait = %f\n", __func__, TWait);
+ DML_LOG_VERBOSE("DML::%s: reserved_vblank_time_ns = %ld\n", __func__, reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
+ DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, Ttrip);
+ DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, TWait);
#endif
return TWait;
}
@@ -4887,20 +4825,20 @@ static void CalculateVUpdateAndDynamicMetadataParameters(
*Tdmsks = *Tdmsks / 2;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DynamicMetadataLinesBeforeActiveRequired = %u\n", __func__, DynamicMetadataLinesBeforeActiveRequired);
- dml2_printf("DML::%s: VBlank = %u\n", __func__, VBlank);
- dml2_printf("DML::%s: HTotal = %u\n", __func__, HTotal);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, PixelClock);
- dml2_printf("DML::%s: Dppclk = %f\n", __func__, Dppclk);
- dml2_printf("DML::%s: DCFClkDeepSleep = %f\n", __func__, DCFClkDeepSleep);
- dml2_printf("DML::%s: MaxInterDCNTileRepeaters = %u\n", __func__, MaxInterDCNTileRepeaters);
- dml2_printf("DML::%s: TotalRepeaterDelayTime = %f\n", __func__, TotalRepeaterDelayTime);
+ DML_LOG_VERBOSE("DML::%s: DynamicMetadataLinesBeforeActiveRequired = %u\n", __func__, DynamicMetadataLinesBeforeActiveRequired);
+ DML_LOG_VERBOSE("DML::%s: VBlank = %u\n", __func__, VBlank);
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, HTotal);
+ DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, PixelClock);
+ DML_LOG_VERBOSE("DML::%s: Dppclk = %f\n", __func__, Dppclk);
+ DML_LOG_VERBOSE("DML::%s: DCFClkDeepSleep = %f\n", __func__, DCFClkDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: MaxInterDCNTileRepeaters = %u\n", __func__, MaxInterDCNTileRepeaters);
+ DML_LOG_VERBOSE("DML::%s: TotalRepeaterDelayTime = %f\n", __func__, TotalRepeaterDelayTime);
- dml2_printf("DML::%s: VUpdateWidthPix = %u\n", __func__, *VUpdateWidthPix);
- dml2_printf("DML::%s: VReadyOffsetPix = %u\n", __func__, *VReadyOffsetPix);
- dml2_printf("DML::%s: VUpdateOffsetPix = %u\n", __func__, *VUpdateOffsetPix);
+ DML_LOG_VERBOSE("DML::%s: VUpdateWidthPix = %u\n", __func__, *VUpdateWidthPix);
+ DML_LOG_VERBOSE("DML::%s: VReadyOffsetPix = %u\n", __func__, *VReadyOffsetPix);
+ DML_LOG_VERBOSE("DML::%s: VUpdateOffsetPix = %u\n", __func__, *VUpdateOffsetPix);
- dml2_printf("DML::%s: Tdmsks = %f\n", __func__, *Tdmsks);
+ DML_LOG_VERBOSE("DML::%s: Tdmsks = %f\n", __func__, *Tdmsks);
#endif
}
@@ -4962,11 +4900,11 @@ static double get_urgent_bandwidth_required(
l->adj_factor_cur_pre = UrgentBurstFactorCursorPre[k];
bool is_phantom = dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]);
- bool exclude_this_plane = 0;
+ bool exclude_this_plane = false;
// Exclude phantom pipe in bw calculation for non svp prefetch state
if (state_type != dml2_core_internal_soc_state_svp_prefetch && is_phantom)
- exclude_this_plane = 1;
+ exclude_this_plane = true;
// The qualified row bandwidth, qual_row_bw, accounts for the regular non-flip row bandwidth when there is no possible immediate flip or HostVM invalidation flip.
// The qual_row_bw is zero if HostVM is possible and only non-zero and equal to row_bw(i) if immediate flip is not allowed for that pipe.
@@ -4995,12 +4933,12 @@ static double get_urgent_bandwidth_required(
surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, max1: vm_row_bw=%f\n", __func__, k, l->vm_row_bw);
- dml2_printf("DML::%s: k=%d, max2: flip_and_active_bw=%f\n", __func__, k, l->flip_and_active_bw);
- dml2_printf("DML::%s: k=%d, max3: flip_and_prefetch_bw=%f\n", __func__, k, l->flip_and_prefetch_bw);
- dml2_printf("DML::%s: k=%d, max4: active_and_excess_bw=%f\n", __func__, k, l->active_and_excess_bw);
- dml2_printf("DML::%s: k=%d, surface_required_bw=%f\n", __func__, k, surface_required_bw[k]);
- dml2_printf("DML::%s: k=%d, surface_peak_required_bw=%f\n", __func__, k, surface_peak_required_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max1: vm_row_bw=%f\n", __func__, k, l->vm_row_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max2: flip_and_active_bw=%f\n", __func__, k, l->flip_and_active_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max3: flip_and_prefetch_bw=%f\n", __func__, k, l->flip_and_prefetch_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, max4: active_and_excess_bw=%f\n", __func__, k, l->active_and_excess_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, surface_required_bw=%f\n", __func__, k, surface_required_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, surface_peak_required_bw=%f\n", __func__, k, surface_peak_required_bw[k]);
#endif
} else {
surface_required_bw[k] = 0.0;
@@ -5009,34 +4947,34 @@ static double get_urgent_bandwidth_required(
l->required_bandwidth_mbps += surface_required_bw[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]);
- dml2_printf("DML::%s: k=%d, use_qual_row_bw=%d\n", __func__, k, use_qual_row_bw);
- dml2_printf("DML::%s: k=%d, immediate_flip=%d\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
- dml2_printf("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor);
- dml2_printf("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0);
- dml2_printf("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1);
- dml2_printf("DML::%s: k=%d, adj_factor_cur=%f\n", __func__, k, l->adj_factor_cur);
+ DML_LOG_VERBOSE("DML::%s: k=%d, NumberOfDPP=%d\n", __func__, k, NumberOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, use_qual_row_bw=%d\n", __func__, k, use_qual_row_bw);
+ DML_LOG_VERBOSE("DML::%s: k=%d, immediate_flip=%d\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
+ DML_LOG_VERBOSE("DML::%s: k=%d, mall_svp_prefetch_factor=%f\n", __func__, k, l->mall_svp_prefetch_factor);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p0=%f\n", __func__, k, l->adj_factor_p0);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p1=%f\n", __func__, k, l->adj_factor_p1);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_cur=%f\n", __func__, k, l->adj_factor_cur);
- dml2_printf("DML::%s: k=%d, adj_factor_p0_pre=%f\n", __func__, k, l->adj_factor_p0_pre);
- dml2_printf("DML::%s: k=%d, adj_factor_p1_pre=%f\n", __func__, k, l->adj_factor_p1_pre);
- dml2_printf("DML::%s: k=%d, adj_factor_cur_pre=%f\n", __func__, k, l->adj_factor_cur_pre);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p0_pre=%f\n", __func__, k, l->adj_factor_p0_pre);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_p1_pre=%f\n", __func__, k, l->adj_factor_p1_pre);
+ DML_LOG_VERBOSE("DML::%s: k=%d, adj_factor_cur_pre=%f\n", __func__, k, l->adj_factor_cur_pre);
- dml2_printf("DML::%s: k=%d, per_plane_flip_bw=%f\n", __func__, k, l->per_plane_flip_bw[k]);
- dml2_printf("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]);
- dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_l=%f\n", __func__, k, excess_vactive_fill_bw_l[k]);
- dml2_printf("DML::%s: k=%d, excess_vactive_fill_bw_c=%f\n", __func__, k, excess_vactive_fill_bw_c[k]);
- dml2_printf("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, per_plane_flip_bw=%f\n", __func__, k, l->per_plane_flip_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_vmrow_bw=%f\n", __func__, k, prefetch_vmrow_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, ReadBandwidthLuma=%f\n", __func__, k, ReadBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, ReadBandwidthChroma=%f\n", __func__, k, ReadBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, excess_vactive_fill_bw_l=%f\n", __func__, k, excess_vactive_fill_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, excess_vactive_fill_bw_c=%f\n", __func__, k, excess_vactive_fill_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, cursor_bw=%f\n", __func__, k, cursor_bw[k]);
- dml2_printf("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]);
- dml2_printf("DML::%s: k=%d, dpte_row_bw=%f\n", __func__, k, dpte_row_bw[k]);
- dml2_printf("DML::%s: k=%d, PrefetchBandwidthLuma=%f\n", __func__, k, PrefetchBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%d, PrefetchBandwidthChroma=%f\n", __func__, k, PrefetchBandwidthChroma[k]);
- dml2_printf("DML::%s: k=%d, prefetch_cursor_bw=%f\n", __func__, k, prefetch_cursor_bw[k]);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), soc_state=%s, inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, dml2_core_internal_soc_state_type_str(state_type), inc_flip_bw, is_phantom, exclude_this_plane);
- dml2_printf("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_row_bw=%f\n", __func__, k, meta_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, dpte_row_bw=%f\n", __func__, k, dpte_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, PrefetchBandwidthLuma=%f\n", __func__, k, PrefetchBandwidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, PrefetchBandwidthChroma=%f\n", __func__, k, PrefetchBandwidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, prefetch_cursor_bw=%f\n", __func__, k, prefetch_cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
+ DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), soc_state=%s, inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, dml2_core_internal_soc_state_type_str(state_type), inc_flip_bw, is_phantom, exclude_this_plane);
+ DML_LOG_VERBOSE("DML::%s: k=%d, required_bandwidth_mbps=%f (total), inc_flip_bw=%d, is_phantom=%d exclude_this_plane=%d\n", __func__, k, l->required_bandwidth_mbps, inc_flip_bw, is_phantom, exclude_this_plane);
#endif
}
@@ -5120,19 +5058,19 @@ static void CalculateExtraLatency(
*ExtraLatency_sr = *ExtraLatency_sr + Tarb;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: qos_type=%u\n", __func__, qos_type);
- dml2_printf("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
- dml2_printf("DML::%s: Tex_trips=%u\n", __func__, Tex_trips);
- dml2_printf("DML::%s: max_outstanding_when_urgent_expected=%u\n", __func__, max_outstanding_when_urgent_expected);
- dml2_printf("DML::%s: FabricClock=%f\n", __func__, FabricClock);
- dml2_printf("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
- dml2_printf("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
- dml2_printf("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles);
- dml2_printf("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes);
- dml2_printf("DML::%s: Tarb=%f\n", __func__, Tarb);
- dml2_printf("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency);
- dml2_printf("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr);
- dml2_printf("DML::%s: ExtraLatencyPrefetch=%f\n", __func__, *ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: qos_type=%u\n", __func__, qos_type);
+ DML_LOG_VERBOSE("DML::%s: hostvm_mode=%u\n", __func__, hostvm_mode);
+ DML_LOG_VERBOSE("DML::%s: Tex_trips=%f\n", __func__, Tex_trips);
+ DML_LOG_VERBOSE("DML::%s: max_outstanding_when_urgent_expected=%u\n", __func__, max_outstanding_when_urgent_expected);
+ DML_LOG_VERBOSE("DML::%s: FabricClock=%f\n", __func__, FabricClock);
+ DML_LOG_VERBOSE("DML::%s: DCFCLK=%f\n", __func__, DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: ReturnBW=%f\n", __func__, ReturnBW);
+ DML_LOG_VERBOSE("DML::%s: RoundTripPingLatencyCycles=%u\n", __func__, RoundTripPingLatencyCycles);
+ DML_LOG_VERBOSE("DML::%s: ReorderingBytes=%u\n", __func__, ReorderingBytes);
+ DML_LOG_VERBOSE("DML::%s: Tarb=%f\n", __func__, Tarb);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatency=%f\n", __func__, *ExtraLatency);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatency_sr=%f\n", __func__, *ExtraLatency_sr);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch=%f\n", __func__, *ExtraLatencyPrefetch);
#endif
}
@@ -5199,20 +5137,20 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->HostVMDynamicLevelsTrips = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dcc_enable = %u\n", __func__, p->dcc_enable);
- dml2_printf("DML::%s: mrq_present = %u\n", __func__, p->mrq_present);
- dml2_printf("DML::%s: dcc_mrq_enable = %u\n", __func__, dcc_mrq_enable);
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, p->display_cfg->gpuvm_enable);
- dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
- dml2_printf("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable);
- dml2_printf("DML::%s: VStartup = %u\n", __func__, p->VStartup);
- dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait);
- dml2_printf("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: myPipe->Dppclk = %f\n", __func__, p->myPipe->Dppclk);
- dml2_printf("DML::%s: myPipe->Dispclk = %f\n", __func__, p->myPipe->Dispclk);
+ DML_LOG_VERBOSE("DML::%s: dcc_enable = %u\n", __func__, p->dcc_enable);
+ DML_LOG_VERBOSE("DML::%s: mrq_present = %u\n", __func__, p->mrq_present);
+ DML_LOG_VERBOSE("DML::%s: dcc_mrq_enable = %u\n", __func__, dcc_mrq_enable);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, p->display_cfg->gpuvm_enable);
+ DML_LOG_VERBOSE("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
+ DML_LOG_VERBOSE("DML::%s: DCCEnable = %u\n", __func__, p->myPipe->DCCEnable);
+ DML_LOG_VERBOSE("DML::%s: VStartup = %u\n", __func__, p->VStartup);
+ DML_LOG_VERBOSE("DML::%s: HostVMEnable = %u\n", __func__, p->display_cfg->hostvm_enable);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, p->TWait);
+ DML_LOG_VERBOSE("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
+ DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
+ DML_LOG_VERBOSE("DML::%s: myPipe->Dppclk = %f\n", __func__, p->myPipe->Dppclk);
+ DML_LOG_VERBOSE("DML::%s: myPipe->Dispclk = %f\n", __func__, p->myPipe->Dispclk);
#endif
CalculateVUpdateAndDynamicMetadataParameters(
p->MaxInterDCNTileRepeaters,
@@ -5258,11 +5196,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (p->DynamicMetadataEnable == true) {
if (p->VStartup * s->LineTime < *p->TSetup + *p->Tdmdl + s->Tdmbf + s->Tdmec + s->Tdmsks) {
*p->NotEnoughTimeForDynamicMetadata = true;
- dml2_printf("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
- dml2_printf("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
- dml2_printf("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
- dml2_printf("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
- dml2_printf("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
+ DML_LOG_VERBOSE("DML::%s: Not Enough Time for Dynamic Meta!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
+ DML_LOG_VERBOSE("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
+ DML_LOG_VERBOSE("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
+ DML_LOG_VERBOSE("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
} else {
*p->NotEnoughTimeForDynamicMetadata = false;
}
@@ -5288,21 +5226,21 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
((p->myPipe->ODMMode == dml2_odm_mode_mso_1to4) ? (double)p->myPipe->HActive * 3.0 / 4.0 : 0));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DynamicMetadataVMEnabled = %u\n", __func__, p->DynamicMetadataVMEnabled);
- dml2_printf("DML::%s: DPPCycles = %u\n", __func__, s->DPPCycles);
- dml2_printf("DML::%s: PixelClock = %f\n", __func__, p->myPipe->PixelClock);
- dml2_printf("DML::%s: Dppclk = %f\n", __func__, p->myPipe->Dppclk);
- dml2_printf("DML::%s: DISPCLKCycles = %u\n", __func__, s->DISPCLKCycles);
- dml2_printf("DML::%s: DISPCLK = %f\n", __func__, p->myPipe->Dispclk);
- dml2_printf("DML::%s: DSCDelay = %u\n", __func__, p->DSCDelay);
- dml2_printf("DML::%s: ODMMode = %u\n", __func__, p->myPipe->ODMMode);
- dml2_printf("DML::%s: DPP_RECOUT_WIDTH = %u\n", __func__, p->DPP_RECOUT_WIDTH);
- dml2_printf("DML::%s: DSTXAfterScaler = %u\n", __func__, *p->DSTXAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DynamicMetadataVMEnabled = %u\n", __func__, p->DynamicMetadataVMEnabled);
+ DML_LOG_VERBOSE("DML::%s: DPPCycles = %u\n", __func__, s->DPPCycles);
+ DML_LOG_VERBOSE("DML::%s: PixelClock = %f\n", __func__, p->myPipe->PixelClock);
+ DML_LOG_VERBOSE("DML::%s: Dppclk = %f\n", __func__, p->myPipe->Dppclk);
+ DML_LOG_VERBOSE("DML::%s: DISPCLKCycles = %u\n", __func__, s->DISPCLKCycles);
+ DML_LOG_VERBOSE("DML::%s: DISPCLK = %f\n", __func__, p->myPipe->Dispclk);
+ DML_LOG_VERBOSE("DML::%s: DSCDelay = %u\n", __func__, p->DSCDelay);
+ DML_LOG_VERBOSE("DML::%s: ODMMode = %u\n", __func__, p->myPipe->ODMMode);
+ DML_LOG_VERBOSE("DML::%s: DPP_RECOUT_WIDTH = %u\n", __func__, p->DPP_RECOUT_WIDTH);
+ DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler = %u\n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut);
- dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time);
- dml2_printf("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame);
- dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, p->tdlut_drain_time);
+ DML_LOG_VERBOSE("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut);
+ DML_LOG_VERBOSE("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time);
+ DML_LOG_VERBOSE("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame);
+ DML_LOG_VERBOSE("DML::%s: tdlut_drain_time = %f\n", __func__, p->tdlut_drain_time);
#endif
if (p->OutputFormat == dml2_420 || (p->myPipe->InterlaceEnable && p->myPipe->ProgressiveToInterlaceUnitInOPP))
@@ -5314,17 +5252,17 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->DSTYAfterScaler = (unsigned int)(math_floor2(s->DSTTotalPixelsAfterScaler / p->myPipe->HTotal, 1));
*p->DSTXAfterScaler = (unsigned int)(s->DSTTotalPixelsAfterScaler - ((double)(*p->DSTYAfterScaler * p->myPipe->HTotal)));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DSTXAfterScaler = %u (final)\n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler = %u (final)\n", __func__, *p->DSTXAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DSTYAfterScaler = %u (final)\n", __func__, *p->DSTYAfterScaler);
#endif
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
- dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
- dml2_printf("DML::%s: HostVMDynamicLevelsTrips = %u\n", __func__, s->HostVMDynamicLevelsTrips);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
+ DML_LOG_VERBOSE("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: GPUVMPageTableLevels = %u\n", __func__, p->display_cfg->gpuvm_max_page_table_levels);
+ DML_LOG_VERBOSE("DML::%s: HostVMDynamicLevelsTrips = %u\n", __func__, s->HostVMDynamicLevelsTrips);
#endif
if (p->display_cfg->gpuvm_enable) {
s->Tvm_trips_rounded = math_ceil2(4.0 * *p->Tvm_trips / s->LineTime, 1.0) / 4.0 * s->LineTime;
@@ -5402,7 +5340,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
}
/* oto prefetch bw should be always be less than total vactive bw */
- //DML2_ASSERT(s->prefetch_bw_oto < s->per_pipe_vactive_sw_bw * p->myPipe->DPPPerSurface);
+ //DML_ASSERT(s->prefetch_bw_oto < s->per_pipe_vactive_sw_bw * p->myPipe->DPPPerSurface);
s->prefetch_bw_oto = math_max2(s->per_pipe_vactive_sw_bw, s->prefetch_bw_oto) * p->mall_prefetch_sdp_overhead_factor;
@@ -5421,9 +5359,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->RequiredPrefetchBWOTO = s->prefetch_bw_oto;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
- dml2_printf("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c);
- dml2_printf("DML::%s: per_pipe_vactive_sw_bw = %f\n", __func__, s->per_pipe_vactive_sw_bw);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c);
+ DML_LOG_VERBOSE("DML::%s: per_pipe_vactive_sw_bw = %f\n", __func__, s->per_pipe_vactive_sw_bw);
#endif
if (p->display_cfg->gpuvm_enable == true) {
@@ -5433,9 +5371,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0);
#endif
} else {
s->Tvm_oto = s->Tvm_trips_rounded;
@@ -5447,9 +5385,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto,
s->LineTime / 4.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
#endif
} else
s->Tr0_oto = s->LineTime / 4.0;
@@ -5459,11 +5397,11 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto;
#ifdef DML_GLOBAL_PREFETCH_CHECK
- dml2_printf("DML::%s: impacted_Tpre = %f\n", __func__, p->impacted_dst_y_pre);
+ DML_LOG_VERBOSE("DML::%s: impacted_Tpre = %f\n", __func__, p->impacted_dst_y_pre);
if (p->impacted_dst_y_pre > 0) {
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
s->dst_y_prefetch_oto = math_max2(s->dst_y_prefetch_oto, p->impacted_dst_y_pre);
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f (impacted)\n", __func__, s->dst_y_prefetch_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f (impacted)\n", __func__, s->dst_y_prefetch_oto);
}
#endif
*p->Tpre_oto = s->dst_y_prefetch_oto * s->LineTime;
@@ -5492,72 +5430,71 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->dst_y_prefetch_equ = math_min2(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
- dml2_printf("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto);
- dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
- dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip);
- dml2_printf("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
- dml2_printf("DML::%s: mall_prefetch_sdp_overhead_factor = %f\n", __func__, p->mall_prefetch_sdp_overhead_factor);
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
- dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC);
- dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
- dml2_printf("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub);
- dml2_printf("DML::%s: prefetch_sw_bytes = %f\n", __func__, *p->prefetch_sw_bytes);
- dml2_printf("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw);
- dml2_printf("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
- dml2_printf("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
- dml2_printf("DML::%s: Tvm_trips_flip = %f\n", __func__, *p->Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_trips_flip = %f\n", __func__, *p->Tr0_trips_flip);
- dml2_printf("DML::%s: prefetch_bw_pr = %f\n", __func__, s->prefetch_bw_pr);
- dml2_printf("DML::%s: prefetch_bw_oto = %f\n", __func__, s->prefetch_bw_oto);
- dml2_printf("DML::%s: Tr0_oto = %f\n", __func__, s->Tr0_oto);
- dml2_printf("DML::%s: Tvm_oto = %f\n", __func__, s->Tvm_oto);
- dml2_printf("DML::%s: Tvm_oto_lines = %f\n", __func__, s->Tvm_oto_lines);
- dml2_printf("DML::%s: Tr0_oto_lines = %f\n", __func__, s->Tr0_oto_lines);
- dml2_printf("DML::%s: Lsw_oto = %f\n", __func__, s->Lsw_oto);
- dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
- dml2_printf("DML::%s: dst_y_prefetch_equ = %f\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: tdlut_row_bytes = %d\n", __func__, tdlut_row_bytes);
- dml2_printf("DML::%s: meta_row_bytes = %d\n", __func__, p->meta_row_bytes);
-#endif
- double Tpre = s->dst_y_prefetch_equ * s->LineTime;
+ DML_LOG_VERBOSE("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
+ DML_LOG_VERBOSE("DML::%s: min_Lsw_oto = %f\n", __func__, s->min_Lsw_oto);
+ DML_LOG_VERBOSE("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw_flip = %f\n", __func__, *p->Tno_bw_flip);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatencyPrefetch = %f\n", __func__, p->ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: trip_to_mem = %f\n", __func__, s->trip_to_mem);
+ DML_LOG_VERBOSE("DML::%s: mall_prefetch_sdp_overhead_factor = %f\n", __func__, p->mall_prefetch_sdp_overhead_factor);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
+ DML_LOG_VERBOSE("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub);
+ DML_LOG_VERBOSE("DML::%s: prefetch_sw_bytes = %f\n", __func__, *p->prefetch_sw_bytes);
+ DML_LOG_VERBOSE("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw);
+ DML_LOG_VERBOSE("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips = %f\n", __func__, *p->Tvm_trips);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips = %f\n", __func__, *p->Tr0_trips);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip = %f\n", __func__, *p->Tvm_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip = %f\n", __func__, *p->Tr0_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_pr = %f\n", __func__, s->prefetch_bw_pr);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_oto = %f\n", __func__, s->prefetch_bw_oto);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto = %f\n", __func__, s->Tr0_oto);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto = %f\n", __func__, s->Tvm_oto);
+ DML_LOG_VERBOSE("DML::%s: Tvm_oto_lines = %f\n", __func__, s->Tvm_oto_lines);
+ DML_LOG_VERBOSE("DML::%s: Tr0_oto_lines = %f\n", __func__, s->Tr0_oto_lines);
+ DML_LOG_VERBOSE("DML::%s: Lsw_oto = %f\n", __func__, s->Lsw_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_equ = %f\n", __func__, s->dst_y_prefetch_equ);
+ DML_LOG_VERBOSE("DML::%s: tdlut_row_bytes = %d\n", __func__, tdlut_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %d\n", __func__, p->meta_row_bytes);
+#endif
s->dst_y_prefetch_equ = math_floor2(4.0 * (s->dst_y_prefetch_equ + 0.125), 1) / 4.0;
*p->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: LineTime: %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: VStartup: %u\n", __func__, p->VStartup);
- dml2_printf("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n", __func__, p->VStartup * s->LineTime);
- dml2_printf("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *p->TSetup);
- dml2_printf("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, p->TCalc);
- dml2_printf("DML::%s: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", __func__, p->TWait);
- dml2_printf("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
- dml2_printf("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
- dml2_printf("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
- dml2_printf("DML::%s: TWait = %f\n", __func__, p->TWait);
- dml2_printf("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: Tex = %f\n", __func__, p->ExtraLatencyPrefetch);
- dml2_printf("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd \n", __func__, *p->Tdmdl_vm);
- dml2_printf("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
- dml2_printf("DML::%s: TWait_p: %fus\n", __func__, s->TWait_p);
- dml2_printf("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip);
- dml2_printf("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler);
- dml2_printf("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler);
- dml2_printf("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes);
- dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre));
- dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
+ DML_LOG_VERBOSE("DML::%s: LineTime: %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: VStartup: %u\n", __func__, p->VStartup);
+ DML_LOG_VERBOSE("DML::%s: Tvstartup: %fus - time between vstartup and first pixel of active\n", __func__, p->VStartup * s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: TSetup: %fus - time from vstartup to vready\n", __func__, *p->TSetup);
+ DML_LOG_VERBOSE("DML::%s: TCalc: %fus - time for calculations in dchub starting at vready\n", __func__, p->TCalc);
+ DML_LOG_VERBOSE("DML::%s: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", __func__, p->TWait);
+ DML_LOG_VERBOSE("DML::%s: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", __func__, s->Tdmbf);
+ DML_LOG_VERBOSE("DML::%s: Tdmec: %fus - time dio takes to transfer dmd\n", __func__, s->Tdmec);
+ DML_LOG_VERBOSE("DML::%s: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", __func__, s->Tdmsks);
+ DML_LOG_VERBOSE("DML::%s: TWait = %f\n", __func__, p->TWait);
+ DML_LOG_VERBOSE("DML::%s: TWait_p = %f\n", __func__, s->TWait_p);
+ DML_LOG_VERBOSE("DML::%s: Ttrip = %f\n", __func__, p->Ttrip);
+ DML_LOG_VERBOSE("DML::%s: Tex = %f\n", __func__, p->ExtraLatencyPrefetch);
+ DML_LOG_VERBOSE("DML::%s: Tdmdl_vm: %fus - time for vm stages of dmd \n", __func__, *p->Tdmdl_vm);
+ DML_LOG_VERBOSE("DML::%s: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", __func__, *p->Tdmdl);
+ DML_LOG_VERBOSE("DML::%s: TWait_p: %fus\n", __func__, s->TWait_p);
+ DML_LOG_VERBOSE("DML::%s: Ttrip: %fus\n", __func__, p->Ttrip);
+ DML_LOG_VERBOSE("DML::%s: DSTXAfterScaler: %u pixels - number of pixel clocks pipeline and buffer delay after scaler \n", __func__, *p->DSTXAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: DSTYAfterScaler: %u lines - number of lines of pipeline and buffer delay after scaler \n", __func__, *p->DSTYAfterScaler);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, (s->dst_y_prefetch_equ * s->LineTime), *p->Tpre_rounded, (*p->Tpre_rounded - (s->dst_y_prefetch_equ * s->LineTime)));
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
#endif
*p->dst_y_per_vm_vblank = 0;
@@ -5596,19 +5533,19 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else
s->prefetch_bw1 = 0;
- dml2_printf("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
if ((s->Tsw_est1 < s->min_Lsw_equ * s->LineTime) && (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) {
s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) /
(*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)));
- dml2_printf("DML::%s: Tpre_rounded = %f\n", __func__, *p->Tpre_rounded);
- dml2_printf("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
- dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
- dml2_printf("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
+ DML_LOG_VERBOSE("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)));
+ DML_LOG_VERBOSE("DML::%s: Tpre_rounded = %f\n", __func__, *p->Tpre_rounded);
+ DML_LOG_VERBOSE("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
#endif
}
@@ -5620,10 +5557,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else
s->prefetch_bw2 = 0;
- dml2_printf("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2);
if ((s->Tsw_est2 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime) > 0)) {
s->prefetch_bw2 = vm_bytes * p->HostVMInefficiencyFactor / (*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime);
- dml2_printf("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2);
}
// prefetch_bw3: 2*R0 + SW
@@ -5634,10 +5571,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else
s->prefetch_bw3 = 0;
- dml2_printf("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
if ((s->Tsw_est3 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) {
s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
- dml2_printf("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
}
// prefetch_bw4: SW
@@ -5647,17 +5584,17 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->prefetch_bw4 = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre));
- dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
- dml2_printf("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips));
- dml2_printf("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
- dml2_printf("DML::%s: Tsw_est2: %f\n", __func__, s->Tsw_est2);
- dml2_printf("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3);
- dml2_printf("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1);
- dml2_printf("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2);
- dml2_printf("DML::%s: prefetch_bw3: %f (final)\n", __func__, s->prefetch_bw3);
- dml2_printf("DML::%s: prefetch_bw4: %f (final)\n", __func__, s->prefetch_bw4);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
+ DML_LOG_VERBOSE("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, s->dst_y_prefetch_equ * s->LineTime, *p->Tpre_rounded, (*p->Tpre_rounded - (s->dst_y_prefetch_equ * s->LineTime)));
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips));
+ DML_LOG_VERBOSE("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
+ DML_LOG_VERBOSE("DML::%s: Tsw_est2: %f\n", __func__, s->Tsw_est2);
+ DML_LOG_VERBOSE("DML::%s: Tsw_est3: %f\n", __func__, s->Tsw_est3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw1: %f (final)\n", __func__, s->prefetch_bw1);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw2: %f (final)\n", __func__, s->prefetch_bw2);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw3: %f (final)\n", __func__, s->prefetch_bw3);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw4: %f (final)\n", __func__, s->prefetch_bw4);
#endif
{
bool Case1OK = false;
@@ -5676,14 +5613,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
double total_row_bytes = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes);
- dml2_printf("DML::%s: Tvm_trips_rounded = %f\n", __func__, s->Tvm_trips_rounded);
- dml2_printf("DML::%s: Tr0_trips_rounded = %f\n", __func__, s->Tr0_trips_rounded);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_rounded = %f\n", __func__, s->Tvm_trips_rounded);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_rounded = %f\n", __func__, s->Tr0_trips_rounded);
if (s->prefetch_bw1 > 0) {
double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw1;
double row_transfer_time = total_row_bytes / s->prefetch_bw1;
- dml2_printf("DML::%s: Case1: vm_transfer_time = %f\n", __func__, vm_transfer_time);
- dml2_printf("DML::%s: Case1: row_transfer_time = %f\n", __func__, row_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case1: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case1: row_transfer_time = %f\n", __func__, row_transfer_time);
if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) {
Case1OK = true;
}
@@ -5696,8 +5633,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (s->prefetch_bw2 > 0) {
double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw2;
double row_transfer_time = total_row_bytes / s->prefetch_bw2;
- dml2_printf("DML::%s: Case2: vm_transfer_time = %f\n", __func__, vm_transfer_time);
- dml2_printf("DML::%s: Case2: row_transfer_time = %f\n", __func__, row_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case2: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case2: row_transfer_time = %f\n", __func__, row_transfer_time);
if (vm_transfer_time >= s->Tvm_trips_rounded && row_transfer_time < s->Tr0_trips_rounded) {
Case2OK = true;
}
@@ -5709,8 +5646,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (s->prefetch_bw3 > 0) {
double vm_transfer_time = *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw3;
double row_transfer_time = total_row_bytes / s->prefetch_bw3;
- dml2_printf("DML::%s: Case3: vm_transfer_time = %f\n", __func__, vm_transfer_time);
- dml2_printf("DML::%s: Case3: row_transfer_time = %f\n", __func__, row_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case3: vm_transfer_time = %f\n", __func__, vm_transfer_time);
+ DML_LOG_VERBOSE("DML::%s: Case3: row_transfer_time = %f\n", __func__, row_transfer_time);
if (vm_transfer_time < s->Tvm_trips_rounded && row_transfer_time >= s->Tr0_trips_rounded) {
Case3OK = true;
}
@@ -5730,10 +5667,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Case1OK: %u\n", __func__, Case1OK);
- dml2_printf("DML::%s: Case2OK: %u\n", __func__, Case2OK);
- dml2_printf("DML::%s: Case3OK: %u\n", __func__, Case3OK);
- dml2_printf("DML::%s: prefetch_bw_equ: %f\n", __func__, s->prefetch_bw_equ);
+ DML_LOG_VERBOSE("DML::%s: Case1OK: %u\n", __func__, Case1OK);
+ DML_LOG_VERBOSE("DML::%s: Case2OK: %u\n", __func__, Case2OK);
+ DML_LOG_VERBOSE("DML::%s: Case3OK: %u\n", __func__, Case3OK);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_equ: %f\n", __func__, s->prefetch_bw_equ);
#endif
if (s->prefetch_bw_equ > 0) {
@@ -5753,12 +5690,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else {
s->Tvm_equ = 0;
s->Tr0_equ = 0;
- dml2_printf("DML::%s: prefetch_bw_equ equals 0!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: prefetch_bw_equ equals 0!\n", __func__);
}
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
- dml2_printf("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
+ DML_LOG_VERBOSE("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
+ DML_LOG_VERBOSE("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
#endif
// Use the more stressful prefetch schedule
if (s->dst_y_prefetch_oto < s->dst_y_prefetch_equ) {
@@ -5769,7 +5706,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
*p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Using oto scheduling for prefetch\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Using oto scheduling for prefetch\n", __func__);
#endif
} else {
@@ -5785,7 +5722,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
#endif
}
@@ -5797,31 +5734,31 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->prefetch_swath_time_us = (s->LinesToRequestPrefetchPixelData * s->LineTime);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM);
- dml2_printf("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, s->TimeForFetchingRowInVBlank);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: dst_y_prefetch = %f\n", __func__, *p->dst_y_prefetch);
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: prefetch_swath_time_us = %f\n", __func__, *p->prefetch_swath_time_us);
+ DML_LOG_VERBOSE("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM);
+ DML_LOG_VERBOSE("DML::%s: TimeForFetchingRowInVBlank = %f\n", __func__, s->TimeForFetchingRowInVBlank);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: dst_y_prefetch = %f\n", __func__, *p->dst_y_prefetch);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ DML_LOG_VERBOSE("DML::%s: prefetch_swath_time_us = %f\n", __func__, *p->prefetch_swath_time_us);
- dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk);
- dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line);
- dml2_printf("DML::%s: cursor_prefetch_bytes = %d\n", __func__, s->cursor_prefetch_bytes);
- dml2_printf("DML::%s: prefetch_cursor_bw = %f\n", __func__, *p->prefetch_cursor_bw);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk);
+ DML_LOG_VERBOSE("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line);
+ DML_LOG_VERBOSE("DML::%s: cursor_prefetch_bytes = %d\n", __func__, s->cursor_prefetch_bytes);
+ DML_LOG_VERBOSE("DML::%s: prefetch_cursor_bw = %f\n", __func__, *p->prefetch_cursor_bw);
#endif
- DML2_ASSERT(*p->dst_y_prefetch < 64);
+ DML_ASSERT(*p->dst_y_prefetch < 64);
unsigned int min_lsw_required = (unsigned int)math_max2(2, p->tdlut_drain_time / s->LineTime);
if (s->LinesToRequestPrefetchPixelData >= min_lsw_required && s->prefetch_bw_equ > 0) {
*p->VRatioPrefetchY = (double)p->PrefetchSourceLinesY / s->LinesToRequestPrefetchPixelData;
*p->VRatioPrefetchY = math_max2(*p->VRatioPrefetchY, 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
- dml2_printf("DML::%s: SwathHeightY = %u\n", __func__, p->SwathHeightY);
- dml2_printf("DML::%s: VInitPreFillY = %u\n", __func__, p->VInitPreFillY);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
+ DML_LOG_VERBOSE("DML::%s: SwathHeightY = %u\n", __func__, p->SwathHeightY);
+ DML_LOG_VERBOSE("DML::%s: VInitPreFillY = %u\n", __func__, p->VInitPreFillY);
#endif
if ((p->SwathHeightY > 4) && (p->VInitPreFillY > 3)) {
if (s->LinesToRequestPrefetchPixelData > (p->VInitPreFillY - 3.0) / 2.0) {
@@ -5829,13 +5766,13 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
(double)p->MaxNumSwathY * p->SwathHeightY / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillY - 3.0) / 2.0));
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VinitPreFillY=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillY);
*p->VRatioPrefetchY = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
- dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
- dml2_printf("DML::%s: MaxNumSwathY = %u\n", __func__, p->MaxNumSwathY);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchY = %f\n", __func__, *p->VRatioPrefetchY);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ DML_LOG_VERBOSE("DML::%s: MaxNumSwathY = %u\n", __func__, p->MaxNumSwathY);
#endif
}
@@ -5843,22 +5780,22 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
- dml2_printf("DML::%s: SwathHeightC = %u\n", __func__, p->SwathHeightC);
- dml2_printf("DML::%s: VInitPreFillC = %u\n", __func__, p->VInitPreFillC);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
+ DML_LOG_VERBOSE("DML::%s: SwathHeightC = %u\n", __func__, p->SwathHeightC);
+ DML_LOG_VERBOSE("DML::%s: VInitPreFillC = %u\n", __func__, p->VInitPreFillC);
#endif
if ((p->SwathHeightC > 4) && (p->VInitPreFillC > 3)) {
if (s->LinesToRequestPrefetchPixelData > (p->VInitPreFillC - 3.0) / 2.0) {
*p->VRatioPrefetchC = math_max2(*p->VRatioPrefetchC, (double)p->MaxNumSwathC * p->SwathHeightC / (s->LinesToRequestPrefetchPixelData - (p->VInitPreFillC - 3.0) / 2.0));
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. LinesToRequestPrefetchPixelData=%f VInitPreFillC=%u\n", __func__, s->LinesToRequestPrefetchPixelData, p->VInitPreFillC);
*p->VRatioPrefetchC = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
- dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
- dml2_printf("DML::%s: MaxNumSwathC = %u\n", __func__, p->MaxNumSwathC);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchC = %f\n", __func__, *p->VRatioPrefetchC);
+ DML_LOG_VERBOSE("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
+ DML_LOG_VERBOSE("DML::%s: MaxNumSwathC = %u\n", __func__, p->MaxNumSwathC);
#endif
}
@@ -5866,36 +5803,34 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->RequiredPrefetchPixelDataBWChroma = (double)p->PrefetchSourceLinesC / s->LinesToRequestPrefetchPixelData * p->myPipe->BytePerPixelC * p->swath_width_chroma_ub / s->LineTime;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
- dml2_printf("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWChroma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
+ DML_LOG_VERBOSE("DML::%s: BytePerPixelY = %u\n", __func__, p->myPipe->BytePerPixelY);
+ DML_LOG_VERBOSE("DML::%s: swath_width_luma_ub = %u\n", __func__, p->swath_width_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWLuma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWChroma = %f\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
#endif
} else {
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required);
- dml2_printf("DML::%s: No time to prefetch!, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!, LinesToRequestPrefetchPixelData: %f, should be >= %d\n", __func__, s->LinesToRequestPrefetchPixelData, min_lsw_required);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!, prefetch_bw_equ: %f, should be > 0\n", __func__, s->prefetch_bw_equ);
*p->VRatioPrefetchY = 0;
*p->VRatioPrefetchC = 0;
*p->RequiredPrefetchPixelDataBWLuma = 0;
*p->RequiredPrefetchPixelDataBWChroma = 0;
}
- dml2_printf("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM);
- dml2_printf("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM);
- dml2_printf("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank);
- dml2_printf("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime);
- dml2_printf("DML: To: %fus - time for propagation from scaler to optc\n", (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime);
- dml2_printf("DML: Tvstartup - TSetup - Tcalc - TWait - Tpre - To > 0\n");
- dml2_printf("DML: Tslack(pre): %fus - time left over in schedule\n", p->VStartup * s->LineTime - s->TimeForFetchingVM - 2 * s->TimeForFetchingRowInVBlank - (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime - p->TWait - p->TCalc - *p->TSetup);
- dml2_printf("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML: Tpre: %fus - sum of time to request 2 x data pte, swaths\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime + 2.0 * s->TimeForFetchingRowInVBlank + s->TimeForFetchingVM);
+ DML_LOG_VERBOSE("DML: Tvm: %fus - time to fetch vm\n", s->TimeForFetchingVM);
+ DML_LOG_VERBOSE("DML: Tr0: %fus - time to fetch first row of data pagetables\n", s->TimeForFetchingRowInVBlank);
+ DML_LOG_VERBOSE("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)s->LinesToRequestPrefetchPixelData * s->LineTime);
+ DML_LOG_VERBOSE("DML: To: %fus - time for propagation from scaler to optc\n", (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime);
+ DML_LOG_VERBOSE("DML: Tvstartup - TSetup - Tcalc - TWait - Tpre - To > 0\n");
+ DML_LOG_VERBOSE("DML: Tslack(pre): %fus - time left over in schedule\n", p->VStartup * s->LineTime - s->TimeForFetchingVM - 2 * s->TimeForFetchingRowInVBlank - (*p->DSTYAfterScaler + ((double)(*p->DSTXAfterScaler) / (double)p->myPipe->HTotal)) * s->LineTime - p->TWait - p->TCalc - *p->TSetup);
+ DML_LOG_VERBOSE("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %u\n", p->PixelPTEBytesPerRow);
} else {
- dml2_printf("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
__func__, min_Lsw_equ_ok, *p->Tpre_rounded, s->Tvm_trips_rounded, 2.0*s->Tr0_trips_rounded, s->min_Lsw_equ*s->LineTime);
- dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded+Tvm_trips_rounded+2.0*Tr0_trips_rounded+min_Tsw_equ (%f) should be > \n",
- __func__, tpre_gt_req_latency, (s->min_Lsw_equ*s->LineTime + s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded), p->Turg, s->trip_to_mem, p->ExtraLatencyPrefetch);
s->NoTimeToPrefetch = true;
s->TimeForFetchingVM = 0;
s->TimeForFetchingRowInVBlank = 0;
@@ -5916,18 +5851,18 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
prefetch_vm_bw = 0;
} else if (*p->dst_y_per_vm_vblank > 0) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, s->LineTime);
#endif
prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (*p->dst_y_per_vm_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
+ DML_LOG_VERBOSE("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
} else {
prefetch_vm_bw = 0;
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. dst_y_per_vm_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_vm_vblank);
}
if (p->PixelPTEBytesPerRow == 0 && tdlut_row_bytes == 0) {
@@ -5936,14 +5871,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (*p->dst_y_per_row_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: prefetch_row_bw = %f\n", __func__, prefetch_row_bw);
#endif
} else {
prefetch_row_bw = 0;
s->NoTimeToPrefetch = true;
- dml2_printf("DML::%s: No time to prefetch!. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: No time to prefetch!. dst_y_per_row_vblank=%f (should be > 0)\n", __func__, *p->dst_y_per_row_vblank);
}
*p->prefetch_vmrow_bw = math_max2(prefetch_vm_bw, prefetch_row_bw);
@@ -5963,12 +5898,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->prefetch_vmrow_bw = 0;
}
- dml2_printf("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank);
- dml2_printf("DML::%s: prefetch_vmrow_bw = %f (final)\n", __func__, *p->prefetch_vmrow_bw);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWLuma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
- dml2_printf("DML::%s: RequiredPrefetchPixelDataBWChroma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
- dml2_printf("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_vblank = %f (final)\n", __func__, *p->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_vblank = %f (final)\n", __func__, *p->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML::%s: prefetch_vmrow_bw = %f (final)\n", __func__, *p->prefetch_vmrow_bw);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWLuma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWLuma);
+ DML_LOG_VERBOSE("DML::%s: RequiredPrefetchPixelDataBWChroma = %f (final)\n", __func__, *p->RequiredPrefetchPixelDataBWChroma);
+ DML_LOG_VERBOSE("DML::%s: NoTimeToPrefetch=%d\n", __func__, s->NoTimeToPrefetch);
return s->NoTimeToPrefetch;
}
@@ -6005,7 +5940,7 @@ static unsigned int find_max_impact_plane(unsigned int this_plane_idx, unsigned
}
}
if (max_idx <= 0) {
- DML2_ASSERT(max_idx >= 0);
+ DML_ASSERT(max_idx >= 0);
max_idx = this_plane_idx;
}
@@ -6037,12 +5972,12 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
// worst case if the rob and cdb is fully hogged
s->max_Trpd_dcfclk_cycles = (unsigned int) math_ceil2((p->rob_buffer_size_kbytes*1024 + p->compressed_buffer_size_kbytes*DML_MAX_COMPRESSION_RATIO*1024)/64.0, 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_planes = %d\n", __func__, p->num_active_planes);
- dml2_printf("DML::%s: rob_buffer_size_kbytes = %d\n", __func__, p->rob_buffer_size_kbytes);
- dml2_printf("DML::%s: compressed_buffer_size_kbytes = %d\n", __func__, p->compressed_buffer_size_kbytes);
- dml2_printf("DML::%s: estimated_urg_bandwidth_required_mbps = %f\n", __func__, p->estimated_urg_bandwidth_required_mbps);
- dml2_printf("DML::%s: estimated_dcfclk_mhz = %f\n", __func__, p->estimated_dcfclk_mhz);
- dml2_printf("DML::%s: max_Trpd_dcfclk_cycles = %u\n", __func__, s->max_Trpd_dcfclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: num_active_planes = %d\n", __func__, p->num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: rob_buffer_size_kbytes = %d\n", __func__, p->rob_buffer_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: compressed_buffer_size_kbytes = %d\n", __func__, p->compressed_buffer_size_kbytes);
+ DML_LOG_VERBOSE("DML::%s: estimated_urg_bandwidth_required_mbps = %f\n", __func__, p->estimated_urg_bandwidth_required_mbps);
+ DML_LOG_VERBOSE("DML::%s: estimated_dcfclk_mhz = %f\n", __func__, p->estimated_dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_Trpd_dcfclk_cycles = %u\n", __func__, s->max_Trpd_dcfclk_cycles);
#endif
// calculate the return impact from each plane, request is 256B per dcfclk
@@ -6063,12 +5998,12 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
s->burst_bytes_to_fill_det += (unsigned int) (math_floor2(p->lb_source_lines_l[i] / p->swath_height_l[i], 1) * s->src_swath_bytes_l[i]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u pixel_format = %d\n", __func__, i, p->pixel_format[i]);
- dml2_printf("DML::%s: i=%u chunk_bytes_l = %d\n", __func__, i, p->chunk_bytes_l);
- dml2_printf("DML::%s: i=%u lb_source_lines_l = %d\n", __func__, i, p->lb_source_lines_l[i]);
- dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_l=%d\n", __func__, i, s->src_detile_buf_size_bytes_l[i]);
- dml2_printf("DML::%s: i=%u src_swath_bytes_l=%d\n", __func__, i, s->src_swath_bytes_l[i]);
- dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d (luma)\n", __func__, i, s->burst_bytes_to_fill_det);
+ DML_LOG_VERBOSE("DML::%s: i=%u pixel_format = %d\n", __func__, i, p->pixel_format[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u chunk_bytes_l = %d\n", __func__, i, p->chunk_bytes_l);
+ DML_LOG_VERBOSE("DML::%s: i=%u lb_source_lines_l = %d\n", __func__, i, p->lb_source_lines_l[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_detile_buf_size_bytes_l=%d\n", __func__, i, s->src_detile_buf_size_bytes_l[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_swath_bytes_l=%d\n", __func__, i, s->src_swath_bytes_l[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u burst_bytes_to_fill_det=%d (luma)\n", __func__, i, s->burst_bytes_to_fill_det);
#endif
if (s->src_swath_bytes_c[i] > 0) { // dual_plane
@@ -6079,10 +6014,10 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u chunk_bytes_c = %d\n", __func__, i, p->chunk_bytes_c);
- dml2_printf("DML::%s: i=%u lb_source_lines_c = %d\n", __func__, i, p->lb_source_lines_c[i]);
- dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_c=%d\n", __func__, i, s->src_detile_buf_size_bytes_c[i]);
- dml2_printf("DML::%s: i=%u src_swath_bytes_c=%d\n", __func__, i, s->src_swath_bytes_c[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u chunk_bytes_c = %d\n", __func__, i, p->chunk_bytes_c);
+ DML_LOG_VERBOSE("DML::%s: i=%u lb_source_lines_c = %d\n", __func__, i, p->lb_source_lines_c[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_detile_buf_size_bytes_c=%d\n", __func__, i, s->src_detile_buf_size_bytes_c[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u src_swath_bytes_c=%d\n", __func__, i, s->src_swath_bytes_c[i]);
#endif
}
@@ -6090,9 +6025,9 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
s->accumulated_return_path_dcfclk_cycles[i] = (unsigned int) math_ceil2(((DML_MAX_COMPRESSION_RATIO-1) * 64 * p->estimated_dcfclk_mhz) * s->time_to_fill_det_us / 64.0, 1.0); //for 64B per DCFClk
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d\n", __func__, i, s->burst_bytes_to_fill_det);
- dml2_printf("DML::%s: i=%u time_to_fill_det_us=%f\n", __func__, i, s->time_to_fill_det_us);
- dml2_printf("DML::%s: i=%u accumulated_return_path_dcfclk_cycles=%u\n", __func__, i, s->accumulated_return_path_dcfclk_cycles[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u burst_bytes_to_fill_det=%d\n", __func__, i, s->burst_bytes_to_fill_det);
+ DML_LOG_VERBOSE("DML::%s: i=%u time_to_fill_det_us=%f\n", __func__, i, s->time_to_fill_det_us);
+ DML_LOG_VERBOSE("DML::%s: i=%u accumulated_return_path_dcfclk_cycles=%u\n", __func__, i, s->accumulated_return_path_dcfclk_cycles[i]);
#endif
// clamping to worst case delay which is one which occupy the full rob+cdb
if (s->accumulated_return_path_dcfclk_cycles[i] > s->max_Trpd_dcfclk_cycles)
@@ -6109,7 +6044,7 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
p->impacted_dst_y_pre[i] = math_ceil2(p->impacted_dst_y_pre[i] / p->line_time[i], 0.25);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u impacted_Tpre=%f (k=%u)\n", __func__, i, p->impacted_dst_y_pre[i], k);
+ DML_LOG_VERBOSE("DML::%s: i=%u impacted_Tpre=%f (k=%u)\n", __func__, i, p->impacted_dst_y_pre[i], k);
#endif
}
@@ -6120,8 +6055,8 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
*p->recalc_prefetch_schedule = 1;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: i=%u Tpre_rounded=%f\n", __func__, i, p->Tpre_rounded[i]);
- dml2_printf("DML::%s: i=%u Tpre_oto=%f\n", __func__, i, p->Tpre_oto[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u Tpre_rounded=%f\n", __func__, i, p->Tpre_rounded[i]);
+ DML_LOG_VERBOSE("DML::%s: i=%u Tpre_oto=%f\n", __func__, i, p->Tpre_oto[i]);
#endif
}
} else {
@@ -6131,8 +6066,8 @@ static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: prefetch_global_check_passed=%u\n", __func__, s->prefetch_global_check_passed);
- dml2_printf("DML::%s: recalc_prefetch_schedule=%u\n", __func__, *p->recalc_prefetch_schedule);
+ DML_LOG_VERBOSE("DML::%s: prefetch_global_check_passed=%u\n", __func__, s->prefetch_global_check_passed);
+ DML_LOG_VERBOSE("DML::%s: recalc_prefetch_schedule=%u\n", __func__, *p->recalc_prefetch_schedule);
#endif
return s->prefetch_global_check_passed;
@@ -6150,8 +6085,8 @@ static void calculate_peak_bandwidth_required(
memset(l, 0, sizeof(struct dml2_core_shared_calculate_peak_bandwidth_required_locals));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: inc_flip_bw = %d\n", __func__, p->inc_flip_bw);
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, p->num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: inc_flip_bw = %d\n", __func__, p->inc_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %d\n", __func__, p->num_active_planes);
#endif
for (unsigned int k = 0; k < p->num_active_planes; ++k) {
@@ -6347,12 +6282,12 @@ static void calculate_peak_bandwidth_required(
p->surface_peak_required_bw[m][n]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: urg_vactive_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_vactive_bandwidth_required[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
- dml2_printf("DML::%s: urg_bandwidth_required_qual[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
- dml2_printf("DML::%s: non_urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->non_urg_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_vactive_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_vactive_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_qual[%s][%s]=%f\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->urg_bandwidth_required[m][n]);
+ DML_LOG_VERBOSE("DML::%s: non_urg_bandwidth_required%s[%s][%s]=%f\n", __func__, (p->inc_flip_bw ? "_flip" : ""), dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n), p->non_urg_bandwidth_required[m][n]);
#endif
- DML2_ASSERT(p->urg_bandwidth_required[m][n] >= p->non_urg_bandwidth_required[m][n]);
+ DML_ASSERT(p->urg_bandwidth_required[m][n] >= p->non_urg_bandwidth_required[m][n]);
}
}
}
@@ -6414,18 +6349,18 @@ static void check_urgent_bandwidth_support(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: frac_urg_bandwidth_nom_sdp = %f\n", __func__, frac_urg_bandwidth_nom_sdp);
- dml2_printf("DML::%s: frac_urg_bandwidth_nom_dram = %f\n", __func__, frac_urg_bandwidth_nom_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_nom = %f\n", __func__, *frac_urg_bandwidth_nom);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom_sdp = %f\n", __func__, frac_urg_bandwidth_nom_sdp);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom_dram = %f\n", __func__, frac_urg_bandwidth_nom_dram);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_nom = %f\n", __func__, *frac_urg_bandwidth_nom);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall_sdp = %f\n", __func__, frac_urg_bandwidth_mall_sdp);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall_dram = %f\n", __func__, frac_urg_bandwidth_mall_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_mall = %f\n", __func__, *frac_urg_bandwidth_mall);
- dml2_printf("DML::%s: bandwidth_support_ok = %d\n", __func__, *bandwidth_support_ok);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall_sdp = %f\n", __func__, frac_urg_bandwidth_mall_sdp);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall_dram = %f\n", __func__, frac_urg_bandwidth_mall_dram);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_mall = %f\n", __func__, *frac_urg_bandwidth_mall);
+ DML_LOG_VERBOSE("DML::%s: bandwidth_support_ok = %d\n", __func__, *bandwidth_support_ok);
for (unsigned int m = 0; m < dml2_core_internal_soc_state_max; m++) {
for (unsigned int n = 0; n < dml2_core_internal_bw_max; n++) {
- dml2_printf("DML::%s: state:%s bw_type:%s urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
+ DML_LOG_VERBOSE("DML::%s: state:%s bw_type:%s urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
__func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n),
urg_bandwidth_available[m][n], (urg_bandwidth_available[m][n] < urg_bandwidth_required[m][n]) ? "<" : ">=", urg_bandwidth_required[m][n]);
}
@@ -6446,14 +6381,14 @@ static double get_bandwidth_available_for_immediate_flip(enum dml2_core_internal
flip_bw_available_mbps = flip_bw_available_sdp_mbps < flip_bw_available_dram_mbps ? flip_bw_available_sdp_mbps : flip_bw_available_dram_mbps;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
- dml2_printf("DML::%s: urg_bandwidth_available_sdp_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: urg_bandwidth_available_dram_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: urg_bandwidth_required_sdp_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_sdp]);
- dml2_printf("DML::%s: urg_bandwidth_required_dram_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_dram]);
- dml2_printf("DML::%s: flip_bw_available_sdp_mbps = %f\n", __func__, flip_bw_available_sdp_mbps);
- dml2_printf("DML::%s: flip_bw_available_dram_mbps = %f\n", __func__, flip_bw_available_dram_mbps);
- dml2_printf("DML::%s: flip_bw_available_mbps = %f\n", __func__, flip_bw_available_mbps);
+ DML_LOG_VERBOSE("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_sdp_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available_dram_mbps = %f\n", __func__, urg_bandwidth_available[eval_state][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_sdp_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_sdp]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_dram_mbps = %f\n", __func__, urg_bandwidth_required[eval_state][dml2_core_internal_bw_dram]);
+ DML_LOG_VERBOSE("DML::%s: flip_bw_available_sdp_mbps = %f\n", __func__, flip_bw_available_sdp_mbps);
+ DML_LOG_VERBOSE("DML::%s: flip_bw_available_dram_mbps = %f\n", __func__, flip_bw_available_dram_mbps);
+ DML_LOG_VERBOSE("DML::%s: flip_bw_available_mbps = %f\n", __func__, flip_bw_available_mbps);
#endif
return flip_bw_available_mbps;
@@ -6478,28 +6413,28 @@ static void calculate_immediate_flip_bandwidth_support(
*flip_bandwidth_support_ok &= urg_bandwidth_available[eval_state][n] >= urg_bandwidth_required_flip[eval_state][n];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: n = %s\n", __func__, dml2_core_internal_bw_type_str(n));
- dml2_printf("DML::%s: urg_bandwidth_available = %f\n", __func__, urg_bandwidth_available[eval_state][n]);
- dml2_printf("DML::%s: non_urg_bandwidth_required_flip = %f\n", __func__, non_urg_bandwidth_required_flip[eval_state][n]);
- dml2_printf("DML::%s: urg_bandwidth_required_flip = %f\n", __func__, urg_bandwidth_required_flip[eval_state][n]);
- dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
+ DML_LOG_VERBOSE("DML::%s: n = %s\n", __func__, dml2_core_internal_bw_type_str(n));
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_available = %f\n", __func__, urg_bandwidth_available[eval_state][n]);
+ DML_LOG_VERBOSE("DML::%s: non_urg_bandwidth_required_flip = %f\n", __func__, non_urg_bandwidth_required_flip[eval_state][n]);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_required_flip = %f\n", __func__, urg_bandwidth_required_flip[eval_state][n]);
+ DML_LOG_VERBOSE("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
#endif
- DML2_ASSERT(urg_bandwidth_required_flip[eval_state][n] >= non_urg_bandwidth_required_flip[eval_state][n]);
+ DML_ASSERT(urg_bandwidth_required_flip[eval_state][n] >= non_urg_bandwidth_required_flip[eval_state][n]);
}
*frac_urg_bandwidth_flip = (frac_urg_bw_flip_sdp > frac_urg_bw_flip_dram) ? frac_urg_bw_flip_sdp : frac_urg_bw_flip_dram;
*flip_bandwidth_support_ok &= (*frac_urg_bandwidth_flip <= 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
- dml2_printf("DML::%s: frac_urg_bw_flip_sdp = %f\n", __func__, frac_urg_bw_flip_sdp);
- dml2_printf("DML::%s: frac_urg_bw_flip_dram = %f\n", __func__, frac_urg_bw_flip_dram);
- dml2_printf("DML::%s: frac_urg_bandwidth_flip = %f\n", __func__, *frac_urg_bandwidth_flip);
- dml2_printf("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
+ DML_LOG_VERBOSE("DML::%s: eval_state = %s\n", __func__, dml2_core_internal_soc_state_type_str(eval_state));
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bw_flip_sdp = %f\n", __func__, frac_urg_bw_flip_sdp);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bw_flip_dram = %f\n", __func__, frac_urg_bw_flip_dram);
+ DML_LOG_VERBOSE("DML::%s: frac_urg_bandwidth_flip = %f\n", __func__, *frac_urg_bandwidth_flip);
+ DML_LOG_VERBOSE("DML::%s: flip_bandwidth_support_ok = %d\n", __func__, *flip_bandwidth_support_ok);
for (unsigned int m = 0; m < dml2_core_internal_soc_state_max; m++) {
for (unsigned int n = 0; n < dml2_core_internal_bw_max; n++) {
- dml2_printf("DML::%s: state:%s bw_type:%s, urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
+ DML_LOG_VERBOSE("DML::%s: state:%s bw_type:%s, urg_bandwidth_available=%f %s urg_bandwidth_required=%f\n",
__func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n),
urg_bandwidth_available[m][n], (urg_bandwidth_available[m][n] < urg_bandwidth_required_flip[m][n]) ? "<" : ">=", urg_bandwidth_required_flip[m][n]);
}
@@ -6549,27 +6484,27 @@ static void CalculateFlipSchedule(
l->dpte_row_bytes = DPTEBytesPerRow;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable);
- dml2_printf("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us);
- dml2_printf("DML::%s: ip.max_flip_time_lines = %d\n", __func__, max_flip_time_lines);
- dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
- dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes);
- dml2_printf("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw);
- dml2_printf("DML::%s: iflip_enable = %u\n", __func__, iflip_enable);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
- dml2_printf("DML::%s: LineTime = %f\n", __func__, LineTime);
- dml2_printf("DML::%s: Tno_bw_flip = %f\n", __func__, Tno_bw_flip);
- dml2_printf("DML::%s: Tvm_trips_flip = %f\n", __func__, Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_trips_flip = %f\n", __func__, Tr0_trips_flip);
- dml2_printf("DML::%s: Tvm_trips_flip_rounded = %f\n", __func__, Tvm_trips_flip_rounded);
- dml2_printf("DML::%s: Tr0_trips_flip_rounded = %f\n", __func__, Tr0_trips_flip_rounded);
- dml2_printf("DML::%s: vm_bytes = %f\n", __func__, vm_bytes);
- dml2_printf("DML::%s: DPTEBytesPerRow = %f\n", __func__, DPTEBytesPerRow);
- dml2_printf("DML::%s: meta_row_bytes = %d\n", __func__, meta_row_bytes);
- dml2_printf("DML::%s: dpte_row_bytes = %f\n", __func__, l->dpte_row_bytes);
- dml2_printf("DML::%s: dpte_row_height = %d\n", __func__, dpte_row_height);
- dml2_printf("DML::%s: meta_row_height = %d\n", __func__, meta_row_height);
- dml2_printf("DML::%s: VRatio = %f\n", __func__, VRatio);
+ DML_LOG_VERBOSE("DML::%s: GPUVMEnable = %u\n", __func__, GPUVMEnable);
+ DML_LOG_VERBOSE("DML::%s: ip.max_flip_time_us = %d\n", __func__, max_flip_time_us);
+ DML_LOG_VERBOSE("DML::%s: ip.max_flip_time_lines = %d\n", __func__, max_flip_time_lines);
+ DML_LOG_VERBOSE("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
+ DML_LOG_VERBOSE("DML::%s: TotImmediateFlipBytes = %u\n", __func__, TotImmediateFlipBytes);
+ DML_LOG_VERBOSE("DML::%s: use_lb_flip_bw = %u\n", __func__, use_lb_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: iflip_enable = %u\n", __func__, iflip_enable);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: LineTime = %f\n", __func__, LineTime);
+ DML_LOG_VERBOSE("DML::%s: Tno_bw_flip = %f\n", __func__, Tno_bw_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip = %f\n", __func__, Tvm_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip = %f\n", __func__, Tr0_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip_rounded = %f\n", __func__, Tvm_trips_flip_rounded);
+ DML_LOG_VERBOSE("DML::%s: Tr0_trips_flip_rounded = %f\n", __func__, Tr0_trips_flip_rounded);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %f\n", __func__, vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: DPTEBytesPerRow = %f\n", __func__, DPTEBytesPerRow);
+ DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %d\n", __func__, meta_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_bytes = %f\n", __func__, l->dpte_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: dpte_row_height = %d\n", __func__, dpte_row_height);
+ DML_LOG_VERBOSE("DML::%s: meta_row_height = %d\n", __func__, meta_row_height);
+ DML_LOG_VERBOSE("DML::%s: VRatio = %f\n", __func__, VRatio);
#endif
if (TotImmediateFlipBytes > 0 && (GPUVMEnable || dcc_mrq_enable)) {
@@ -6596,9 +6531,9 @@ static void CalculateFlipSchedule(
l->min_row_time = l->min_row_height * LineTime / VRatio;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min_row_time = %f\n", __func__, l->min_row_time);
+ DML_LOG_VERBOSE("DML::%s: min_row_time = %f\n", __func__, l->min_row_time);
#endif
- DML2_ASSERT(l->min_row_time > 0);
+ DML_ASSERT(l->min_row_time > 0);
if (use_lb_flip_bw) {
// For mode check, calculation the flip bw requirement with worst case flip time
@@ -6619,20 +6554,20 @@ static void CalculateFlipSchedule(
l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded),
l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time);
- dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes);
- dml2_printf("DML::%s: total row bytes (%d row, hvm ineff scaled) = %f\n", __func__, l->num_rows, l->hvm_scaled_row_bytes);
- dml2_printf("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes);
- dml2_printf("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip));
- dml2_printf("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded));
- dml2_printf("DML::%s: lb_flip_bw for row = %f\n", __func__, l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
+ DML_LOG_VERBOSE("DML::%s: max_flip_time = %f\n", __func__, l->max_flip_time);
+ DML_LOG_VERBOSE("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_bytes);
+ DML_LOG_VERBOSE("DML::%s: total row bytes (%f row, hvm ineff scaled) = %f\n", __func__, l->num_rows, l->hvm_scaled_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: total vm+row bytes (hvm ineff scaled) = %f\n", __func__, l->hvm_scaled_vm_row_bytes);
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm and row = %f\n", __func__, l->hvm_scaled_vm_row_bytes / (l->max_flip_time - Tno_bw_flip));
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm = %f\n", __func__, l->hvm_scaled_vm_bytes / (l->max_flip_time - Tno_bw_flip - 2 * Tr0_trips_flip_rounded));
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for row = %f\n", __func__, l->hvm_scaled_row_bytes / (l->max_flip_time - Tvm_trips_flip_rounded));
if (l->lb_flip_bw > 0) {
- dml2_printf("DML::%s: mode_support est Tvm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw);
- dml2_printf("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows);
- dml2_printf("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime);
- dml2_printf("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows);
- dml2_printf("DML::%s: Tvm_trips_flip_rounded + 2*Tr0_trips_flip_rounded = %f\n", __func__, (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded));
+ DML_LOG_VERBOSE("DML::%s: mode_support est Tvm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: mode_support est Tr0_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / l->num_rows);
+ DML_LOG_VERBOSE("DML::%s: mode_support est dst_y_per_vm_flip = %f (bw-based)\n", __func__, Tno_bw_flip + l->hvm_scaled_vm_bytes / l->lb_flip_bw / LineTime);
+ DML_LOG_VERBOSE("DML::%s: mode_support est dst_y_per_row_flip = %f (bw-based)\n", __func__, l->hvm_scaled_row_bytes / l->lb_flip_bw / LineTime / l->num_rows);
+ DML_LOG_VERBOSE("DML::%s: Tvm_trips_flip_rounded + 2*Tr0_trips_flip_rounded = %f\n", __func__, (Tvm_trips_flip_rounded + 2 * Tr0_trips_flip_rounded));
}
#endif
l->lb_flip_bw = math_max3(l->lb_flip_bw,
@@ -6640,8 +6575,8 @@ static void CalculateFlipSchedule(
(l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: lb_flip_bw for vm reg limit = %f\n", __func__, l->hvm_scaled_vm_bytes / (31 * LineTime) - Tno_bw_flip);
- dml2_printf("DML::%s: lb_flip_bw for row reg limit = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for vm reg limit = %f\n", __func__, l->hvm_scaled_vm_bytes / (31 * LineTime) - Tno_bw_flip);
+ DML_LOG_VERBOSE("DML::%s: lb_flip_bw for row reg limit = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / (15 * LineTime));
#endif
}
@@ -6653,13 +6588,12 @@ static void CalculateFlipSchedule(
} else {
if (iflip_enable) {
l->ImmediateFlipBW = (double)per_pipe_flip_bytes * BandwidthAvailableForImmediateFlip / (double)TotImmediateFlipBytes; // flip_bw(i)
- double portion = (double)per_pipe_flip_bytes / (double)TotImmediateFlipBytes;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: per_pipe_flip_bytes = %d\n", __func__, per_pipe_flip_bytes);
- dml2_printf("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
- dml2_printf("DML::%s: ImmediateFlipBW = %f\n", __func__, l->ImmediateFlipBW);
- dml2_printf("DML::%s: portion of flip bw = %f\n", __func__, portion);
+ DML_LOG_VERBOSE("DML::%s: per_pipe_flip_bytes = %d\n", __func__, per_pipe_flip_bytes);
+ DML_LOG_VERBOSE("DML::%s: BandwidthAvailableForImmediateFlip = %f\n", __func__, BandwidthAvailableForImmediateFlip);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipBW = %f\n", __func__, l->ImmediateFlipBW);
+ DML_LOG_VERBOSE("DML::%s: portion of flip bw = %f\n", __func__, (double)per_pipe_flip_bytes / (double)TotImmediateFlipBytes);
#endif
if (l->ImmediateFlipBW == 0) {
l->Tvm_flip = 0;
@@ -6674,11 +6608,11 @@ static void CalculateFlipSchedule(
LineTime / 4.0);
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, vm_bytes * HostVMInefficiencyFactor);
- dml2_printf("DML::%s: total row bytes (hvm ineff scaled, one row) = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes));
+ DML_LOG_VERBOSE("DML::%s: total vm bytes (hvm ineff scaled) = %f\n", __func__, vm_bytes * HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: total row bytes (hvm ineff scaled, one row) = %f\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes));
- dml2_printf("DML::%s: Tvm_flip = %f (bw-based), Tvm_trips_flip = %f (latency-based)\n", __func__, Tno_bw_flip + vm_bytes * HostVMInefficiencyFactor / l->ImmediateFlipBW, Tvm_trips_flip);
- dml2_printf("DML::%s: Tr0_flip = %f (bw-based), Tr0_trips_flip = %f (latency-based)\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / l->ImmediateFlipBW, Tr0_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_flip = %f (bw-based), Tvm_trips_flip = %f (latency-based)\n", __func__, Tno_bw_flip + vm_bytes * HostVMInefficiencyFactor / l->ImmediateFlipBW, Tvm_trips_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_flip = %f (bw-based), Tr0_trips_flip = %f (latency-based)\n", __func__, (l->dpte_row_bytes * HostVMInefficiencyFactor + meta_row_bytes) / l->ImmediateFlipBW, Tr0_trips_flip);
#endif
*dst_y_per_vm_flip = math_ceil2(4.0 * (l->Tvm_flip / LineTime), 1.0) / 4.0;
*dst_y_per_row_flip = math_ceil2(4.0 * (l->Tr0_flip / LineTime), 1.0) / 4.0;
@@ -6711,14 +6645,14 @@ static void CalculateFlipSchedule(
#ifdef __DML_VBA_DEBUG__
if (!use_lb_flip_bw) {
- dml2_printf("DML::%s: dst_y_per_vm_flip = %f (should be < 32)\n", __func__, *dst_y_per_vm_flip);
- dml2_printf("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip);
- dml2_printf("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip);
- dml2_printf("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip);
- dml2_printf("DML::%s: Tvm_flip + 2*Tr0_flip = %f (should be <= min_row_time=%f)\n", __func__, l->Tvm_flip + 2 * l->Tr0_flip, l->min_row_time);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_vm_flip = %f (should be < 32)\n", __func__, *dst_y_per_vm_flip);
+ DML_LOG_VERBOSE("DML::%s: dst_y_per_row_flip = %f (should be < 16)\n", __func__, *dst_y_per_row_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_flip = %f (final)\n", __func__, l->Tvm_flip);
+ DML_LOG_VERBOSE("DML::%s: Tr0_flip = %f (final)\n", __func__, l->Tr0_flip);
+ DML_LOG_VERBOSE("DML::%s: Tvm_flip + 2*Tr0_flip = %f (should be <= min_row_time=%f)\n", __func__, l->Tvm_flip + 2 * l->Tr0_flip, l->min_row_time);
}
- dml2_printf("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw);
- dml2_printf("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe);
+ DML_LOG_VERBOSE("DML::%s: final_flip_bw = %f\n", __func__, *final_flip_bw);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupportedForPipe = %u\n", __func__, *ImmediateFlipSupportedForPipe);
#endif
}
@@ -6736,7 +6670,7 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->UrgentWatermark = p->mmSOCParameters.UrgentLatency + p->mmSOCParameters.ExtraLatency;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
+ DML_LOG_VERBOSE("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
#endif
p->Watermark->USRRetrainingWatermark = p->mmSOCParameters.UrgentLatency + p->mmSOCParameters.ExtraLatency + p->mmSOCParameters.USRRetrainingLatency + p->mmSOCParameters.SMNLatency;
@@ -6755,20 +6689,20 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->temp_read_or_ppt_watermark_us = p->mmSOCParameters.g6_temp_read_blackout_us + p->Watermark->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency);
- dml2_printf("DML::%s: ExtraLatency = %f\n", __func__, p->mmSOCParameters.ExtraLatency);
- dml2_printf("DML::%s: DRAMClockChangeLatency = %f\n", __func__, p->mmSOCParameters.DRAMClockChangeLatency);
- dml2_printf("DML::%s: SREnterPlusExitZ8Time = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitZ8Time);
- dml2_printf("DML::%s: SREnterPlusExitTime = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitTime);
- dml2_printf("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
- dml2_printf("DML::%s: USRRetrainingWatermark = %f\n", __func__, p->Watermark->USRRetrainingWatermark);
- dml2_printf("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, p->Watermark->DRAMClockChangeWatermark);
- dml2_printf("DML::%s: FCLKChangeWatermark = %f\n", __func__, p->Watermark->FCLKChangeWatermark);
- dml2_printf("DML::%s: StutterExitWatermark = %f\n", __func__, p->Watermark->StutterExitWatermark);
- dml2_printf("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark);
- dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: temp_read_or_ppt_watermark_us = %f\n", __func__, p->Watermark->temp_read_or_ppt_watermark_us);
+ DML_LOG_VERBOSE("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency);
+ DML_LOG_VERBOSE("DML::%s: ExtraLatency = %f\n", __func__, p->mmSOCParameters.ExtraLatency);
+ DML_LOG_VERBOSE("DML::%s: DRAMClockChangeLatency = %f\n", __func__, p->mmSOCParameters.DRAMClockChangeLatency);
+ DML_LOG_VERBOSE("DML::%s: SREnterPlusExitZ8Time = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitZ8Time);
+ DML_LOG_VERBOSE("DML::%s: SREnterPlusExitTime = %f\n", __func__, p->mmSOCParameters.SREnterPlusExitTime);
+ DML_LOG_VERBOSE("DML::%s: UrgentWatermark = %f\n", __func__, p->Watermark->UrgentWatermark);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingWatermark = %f\n", __func__, p->Watermark->USRRetrainingWatermark);
+ DML_LOG_VERBOSE("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, p->Watermark->DRAMClockChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: FCLKChangeWatermark = %f\n", __func__, p->Watermark->FCLKChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: StutterExitWatermark = %f\n", __func__, p->Watermark->StutterExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: temp_read_or_ppt_watermark_us = %f\n", __func__, p->Watermark->temp_read_or_ppt_watermark_us);
#endif
s->TotalActiveWriteback = 0;
@@ -6801,11 +6735,11 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->WritebackFCLKChangeWatermark = p->Watermark->WritebackFCLKChangeWatermark + p->mmSOCParameters.USRRetrainingLatency;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: WritebackDRAMClockChangeWatermark = %f\n", __func__, p->Watermark->WritebackDRAMClockChangeWatermark);
- dml2_printf("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, p->Watermark->WritebackFCLKChangeWatermark);
- dml2_printf("DML::%s: WritebackUrgentWatermark = %f\n", __func__, p->Watermark->WritebackUrgentWatermark);
- dml2_printf("DML::%s: USRRetrainingRequired = %u\n", __func__, p->USRRetrainingRequired);
- dml2_printf("DML::%s: USRRetrainingLatency = %f\n", __func__, p->mmSOCParameters.USRRetrainingLatency);
+ DML_LOG_VERBOSE("DML::%s: WritebackDRAMClockChangeWatermark = %f\n", __func__, p->Watermark->WritebackDRAMClockChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, p->Watermark->WritebackFCLKChangeWatermark);
+ DML_LOG_VERBOSE("DML::%s: WritebackUrgentWatermark = %f\n", __func__, p->Watermark->WritebackUrgentWatermark);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingRequired = %u\n", __func__, p->USRRetrainingRequired);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingLatency = %f\n", __func__, p->mmSOCParameters.USRRetrainingLatency);
#endif
s->TotalPixelBW = 0.0;
@@ -6836,11 +6770,11 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->LBLatencyHidingSourceLinesC[k] = (unsigned int)(math_min2((double)p->MaxLineBufferLines, math_floor2((double)p->LineBufferSize / LBBitPerPixel / ((double)p->SwathWidthC[k] / math_max2(h_ratio_c, 1.0)), 1)) - (v_taps_c - 1));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaxLineBufferLines = %u\n", __func__, k, p->MaxLineBufferLines);
- dml2_printf("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize);
- dml2_printf("DML::%s: k=%u, LBBitPerPixel = %u\n", __func__, k, LBBitPerPixel);
- dml2_printf("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio);
- dml2_printf("DML::%s: k=%u, VTaps = %f\n", __func__, k, v_taps);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaxLineBufferLines = %u\n", __func__, k, p->MaxLineBufferLines);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LBBitPerPixel = %f\n", __func__, k, LBBitPerPixel);
+ DML_LOG_VERBOSE("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VTaps = %f\n", __func__, k, v_taps);
#endif
s->EffectiveLBLatencyHidingY = s->LBLatencyHidingSourceLinesY[k] / v_ratio * (h_total / pixel_clock_mhz);
@@ -6943,16 +6877,16 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->sub_vp_lines_l = s->src_y_pstate_l + s->src_y_ahead_l + p->meta_row_height_l[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
- dml2_printf("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
- dml2_printf("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
- dml2_printf("DML::%s: k=%u, SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
- dml2_printf("DML::%s: k=%u, LBLatencyHidingSourceLinesY = %u\n", __func__, k, s->LBLatencyHidingSourceLinesY[k]);
- dml2_printf("DML::%s: k=%u, dst_y_pstate = %u\n", __func__, k, s->dst_y_pstate);
- dml2_printf("DML::%s: k=%u, src_y_pstate_l = %u\n", __func__, k, s->src_y_pstate_l);
- dml2_printf("DML::%s: k=%u, src_y_ahead_l = %u\n", __func__, k, s->src_y_ahead_l);
- dml2_printf("DML::%s: k=%u, meta_row_height_l = %u\n", __func__, k, p->meta_row_height_l[k]);
- dml2_printf("DML::%s: k=%u, sub_vp_lines_l = %u\n", __func__, k, s->sub_vp_lines_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferSizeY = %u\n", __func__, k, p->DETBufferSizeY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathHeightY = %u\n", __func__, k, p->SwathHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LBLatencyHidingSourceLinesY = %u\n", __func__, k, s->LBLatencyHidingSourceLinesY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_pstate = %u\n", __func__, k, s->dst_y_pstate);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_pstate_l = %u\n", __func__, k, s->src_y_pstate_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_ahead_l = %u\n", __func__, k, s->src_y_ahead_l);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_height_l = %u\n", __func__, k, p->meta_row_height_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, sub_vp_lines_l = %u\n", __func__, k, s->sub_vp_lines_l);
#endif
p->SubViewportLinesNeededInMALL[k] = s->sub_vp_lines_l;
@@ -6967,10 +6901,10 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->SubViewportLinesNeededInMALL[k] = (unsigned int)(math_max2(s->sub_vp_lines_l, s->sub_vp_lines_c));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, k, p->meta_row_height_c[k]);
- dml2_printf("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c);
- dml2_printf("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c);
- dml2_printf("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, meta_row_height_c = %u\n", __func__, k, p->meta_row_height_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_pstate_c = %u\n", __func__, k, s->src_y_pstate_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, src_y_ahead_c = %u\n", __func__, k, s->src_y_ahead_c);
+ DML_LOG_VERBOSE("DML::%s: k=%u, sub_vp_lines_c = %u\n", __func__, k, s->sub_vp_lines_c);
#endif
}
}
@@ -6992,10 +6926,10 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DRAMClockChangeSupport = %u\n", __func__, *p->global_dram_clock_change_supported);
- dml2_printf("DML::%s: FCLKChangeSupport = %u\n", __func__, *p->global_fclk_change_supported);
- dml2_printf("DML::%s: MaxActiveFCLKChangeLatencySupported = %f\n", __func__, *p->MaxActiveFCLKChangeLatencySupported);
- dml2_printf("DML::%s: USRRetrainingSupport = %u\n", __func__, *p->USRRetrainingSupport);
+ DML_LOG_VERBOSE("DML::%s: DRAMClockChangeSupport = %u\n", __func__, *p->global_dram_clock_change_supported);
+ DML_LOG_VERBOSE("DML::%s: FCLKChangeSupport = %u\n", __func__, *p->global_fclk_change_supported);
+ DML_LOG_VERBOSE("DML::%s: MaxActiveFCLKChangeLatencySupported = %f\n", __func__, *p->MaxActiveFCLKChangeLatencySupported);
+ DML_LOG_VERBOSE("DML::%s: USRRetrainingSupport = %u\n", __func__, *p->USRRetrainingSupport);
#endif
}
@@ -7141,7 +7075,7 @@ static unsigned int get_qos_param_index(unsigned long uclk_freq_khz, const struc
unsigned int index = 0;
for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
+ DML_LOG_VERBOSE("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %ld\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
if (i == 0)
index = 0;
@@ -7153,32 +7087,30 @@ static unsigned int get_qos_param_index(unsigned long uclk_freq_khz, const struc
break;
}
}
-#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, index);
-#endif
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, index);
return index;
}
static unsigned int get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table)
{
unsigned int i;
- bool clk_entry_found = 0;
+ bool clk_entry_found = false;
for (i = 0; i < clk_table->uclk.num_clk_values; i++) {
- dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
+ DML_LOG_VERBOSE("DML::%s: clk_table.uclk.clk_values_khz[%d] = %ld\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) {
- clk_entry_found = 1;
+ clk_entry_found = true;
break;
}
}
if (!clk_entry_found)
- DML2_ASSERT(clk_entry_found);
+ DML_ASSERT(clk_entry_found);
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, i);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, i);
#endif
return i;
}
@@ -7218,10 +7150,10 @@ static void calculate_hostvm_inefficiency_factor(
if ((*HostVMInefficiencyFactorPrefetch < 4) && (remote_iommu_outstanding_translations < max_outstanding_reqs))
*HostVMInefficiencyFactorPrefetch = 4;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: urg_bandwidth_avail_active_pixel_and_vm = %f\n", __func__, urg_bandwidth_avail_active_pixel_and_vm);
- dml2_printf("DML::%s: urg_bandwidth_avail_active_vm_only = %f\n", __func__, urg_bandwidth_avail_active_vm_only);
- dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, *HostVMInefficiencyFactor);
- dml2_printf("DML::%s: HostVMInefficiencyFactorPrefetch = %f\n", __func__, *HostVMInefficiencyFactorPrefetch);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_avail_active_pixel_and_vm = %f\n", __func__, urg_bandwidth_avail_active_pixel_and_vm);
+ DML_LOG_VERBOSE("DML::%s: urg_bandwidth_avail_active_vm_only = %f\n", __func__, urg_bandwidth_avail_active_vm_only);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, *HostVMInefficiencyFactor);
+ DML_LOG_VERBOSE("DML::%s: HostVMInefficiencyFactorPrefetch = %f\n", __func__, *HostVMInefficiencyFactorPrefetch);
#endif
}
}
@@ -7335,30 +7267,659 @@ static void calculate_pstate_keepout_dst_lines(
}
}
+static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_internal_display_mode_lib *mode_lib,
+ const struct dml2_display_cfg *display_cfg)
+{
+ struct dml2_core_calcs_mode_support_locals *s = &mode_lib->scratch.dml_core_mode_support_locals;
+ struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
+ struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
+ struct dml2_core_calcs_calculate_peak_bandwidth_required_params *calculate_peak_bandwidth_params = &mode_lib->scratch.calculate_peak_bandwidth_params;
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params;
+#endif
+ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *CalculateWatermarks_params = &mode_lib->scratch.CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params;
+
+ double min_return_bw_for_latency;
+ unsigned int k;
+
+ mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep;
+
+ calculate_hostvm_inefficiency_factor(
+ &s->HostVMInefficiencyFactor,
+ &s->HostVMInefficiencyFactorPrefetch,
+
+ display_cfg->gpuvm_enable,
+ display_cfg->hostvm_enable,
+ mode_lib->ip.remote_iommu_outstanding_translations,
+ mode_lib->soc.max_outstanding_reqs,
+ mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_sys_active],
+ mode_lib->ms.support.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active]);
+
+ mode_lib->ms.Total3dlutActive = 0;
+ for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut)
+ mode_lib->ms.Total3dlutActive = mode_lib->ms.Total3dlutActive + 1;
+
+ // Calculate tdlut schedule related terms
+ calculate_tdlut_setting_params->dispclk_mhz = mode_lib->ms.RequiredDISPCLK;
+ calculate_tdlut_setting_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
+ calculate_tdlut_setting_params->tdlut_width_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_width_mode;
+ calculate_tdlut_setting_params->tdlut_addressing_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_addressing_mode;
+ calculate_tdlut_setting_params->cursor_buffer_size = mode_lib->ip.cursor_buffer_size;
+ calculate_tdlut_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
+ calculate_tdlut_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
+ calculate_tdlut_setting_params->tdlut_mpc_width_flag = display_cfg->plane_descriptors[k].tdlut.tdlut_mpc_width_flag;
+ calculate_tdlut_setting_params->is_gfx11 = dml_get_gfx_version(display_cfg->plane_descriptors[k].surface.tiling);
+
+ // output
+ calculate_tdlut_setting_params->tdlut_pte_bytes_per_frame = &s->tdlut_pte_bytes_per_frame[k];
+ calculate_tdlut_setting_params->tdlut_bytes_per_frame = &s->tdlut_bytes_per_frame[k];
+ calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
+ calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
+ calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
+ calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k];
+ calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
+
+ calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
+ }
+
+ min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active];
+
+ if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
+ s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
+
+ CalculateExtraLatency(
+ display_cfg,
+ mode_lib->ip.rob_buffer_size_kbytes,
+ mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
+ s->ReorderingBytes,
+ mode_lib->ms.DCFCLK,
+ mode_lib->ms.FabricClock,
+ mode_lib->ip.pixel_chunk_size_kbytes,
+ min_return_bw_for_latency,
+ mode_lib->ms.num_active_planes,
+ mode_lib->ms.NoOfDPP,
+ mode_lib->ms.dpte_group_bytes,
+ s->tdlut_bytes_per_group,
+ s->HostVMInefficiencyFactor,
+ s->HostVMInefficiencyFactorPrefetch,
+ mode_lib->soc.hostvm_min_page_size_kbytes,
+ mode_lib->soc.qos_parameters.qos_type,
+ !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable),
+ mode_lib->soc.max_outstanding_reqs,
+ mode_lib->ms.support.request_size_bytes_luma,
+ mode_lib->ms.support.request_size_bytes_chroma,
+ mode_lib->ip.meta_chunk_size_kbytes,
+ mode_lib->ip.dchub_arb_to_ret_delay,
+ mode_lib->ms.TripToMemory,
+ mode_lib->ip.hostvm_mode,
+
+ // output
+ &mode_lib->ms.ExtraLatency,
+ &mode_lib->ms.ExtraLatency_sr,
+ &mode_lib->ms.ExtraLatencyPrefetch);
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++)
+ s->impacted_dst_y_pre[k] = 0;
+
+ s->recalc_prefetch_schedule = 0;
+ s->recalc_prefetch_done = 0;
+ do {
+ mode_lib->ms.support.PrefetchSupported = true;
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format;
+
+ s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->ms.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
+ s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->ms.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
+ struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
+
+ mode_lib->ms.TWait[k] = CalculateTWait(
+ display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
+ mode_lib->ms.UrgLatency,
+ mode_lib->ms.TripToMemory,
+ !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
+ get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), mode_lib->ms.state_idx) : 0.0);
+
+ myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k];
+ myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK;
+ myPipe->PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ myPipe->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
+ myPipe->DPPPerSurface = mode_lib->ms.NoOfDPP[k];
+ myPipe->ScalerEnabled = display_cfg->plane_descriptors[k].composition.scaler_info.enabled;
+ myPipe->VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
+ myPipe->VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
+ myPipe->VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
+ myPipe->VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
+ myPipe->RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
+ myPipe->mirrored = display_cfg->plane_descriptors[k].composition.mirrored;
+ myPipe->BlockWidth256BytesY = mode_lib->ms.Read256BlockWidthY[k];
+ myPipe->BlockHeight256BytesY = mode_lib->ms.Read256BlockHeightY[k];
+ myPipe->BlockWidth256BytesC = mode_lib->ms.Read256BlockWidthC[k];
+ myPipe->BlockHeight256BytesC = mode_lib->ms.Read256BlockHeightC[k];
+ myPipe->InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
+ myPipe->NumberOfCursors = display_cfg->plane_descriptors[k].cursor.num_cursors;
+ myPipe->VBlank = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active;
+ myPipe->HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
+ myPipe->HActive = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active;
+ myPipe->DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
+ myPipe->ODMMode = mode_lib->ms.ODMMode[k];
+ myPipe->SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
+ myPipe->BytePerPixelY = mode_lib->ms.BytePerPixelY[k];
+ myPipe->BytePerPixelC = mode_lib->ms.BytePerPixelC[k];
+ myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
+
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: MaximumVStartup = %u\n", __func__, s->MaximumVStartup[k]);
+#endif
+ CalculatePrefetchSchedule_params->display_cfg = display_cfg;
+ CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
+ CalculatePrefetchSchedule_params->myPipe = myPipe;
+ CalculatePrefetchSchedule_params->DSCDelay = mode_lib->ms.DSCDelay[k];
+ CalculatePrefetchSchedule_params->DPPCLKDelaySubtotalPlusCNVCFormater = mode_lib->ip.dppclk_delay_subtotal + mode_lib->ip.dppclk_delay_cnvc_formatter;
+ CalculatePrefetchSchedule_params->DPPCLKDelaySCL = mode_lib->ip.dppclk_delay_scl;
+ CalculatePrefetchSchedule_params->DPPCLKDelaySCLLBOnly = mode_lib->ip.dppclk_delay_scl_lb_only;
+ CalculatePrefetchSchedule_params->DPPCLKDelayCNVCCursor = mode_lib->ip.dppclk_delay_cnvc_cursor;
+ CalculatePrefetchSchedule_params->DISPCLKDelaySubtotal = mode_lib->ip.dispclk_delay_subtotal;
+ CalculatePrefetchSchedule_params->DPP_RECOUT_WIDTH = (unsigned int)(mode_lib->ms.SwathWidthY[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
+ CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
+ CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
+ CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k];
+ CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
+ CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
+ CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
+ CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required;
+ CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes;
+ CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->ms.UrgLatency;
+ CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->ms.ExtraLatencyPrefetch;
+ CalculatePrefetchSchedule_params->TCalc = mode_lib->ms.TimeCalc;
+ CalculatePrefetchSchedule_params->vm_bytes = mode_lib->ms.vm_bytes[k];
+ CalculatePrefetchSchedule_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRow[k];
+ CalculatePrefetchSchedule_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesY[k];
+ CalculatePrefetchSchedule_params->VInitPreFillY = mode_lib->ms.PrefillY[k];
+ CalculatePrefetchSchedule_params->MaxNumSwathY = mode_lib->ms.MaxNumSwathY[k];
+ CalculatePrefetchSchedule_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesC[k];
+ CalculatePrefetchSchedule_params->VInitPreFillC = mode_lib->ms.PrefillC[k];
+ CalculatePrefetchSchedule_params->MaxNumSwathC = mode_lib->ms.MaxNumSwathC[k];
+ CalculatePrefetchSchedule_params->swath_width_luma_ub = mode_lib->ms.swath_width_luma_ub[k];
+ CalculatePrefetchSchedule_params->swath_width_chroma_ub = mode_lib->ms.swath_width_chroma_ub[k];
+ CalculatePrefetchSchedule_params->SwathHeightY = mode_lib->ms.SwathHeightY[k];
+ CalculatePrefetchSchedule_params->SwathHeightC = mode_lib->ms.SwathHeightC[k];
+ CalculatePrefetchSchedule_params->TWait = mode_lib->ms.TWait[k];
+ CalculatePrefetchSchedule_params->Ttrip = mode_lib->ms.TripToMemory;
+ CalculatePrefetchSchedule_params->Turg = mode_lib->ms.UrgLatency;
+ CalculatePrefetchSchedule_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
+ CalculatePrefetchSchedule_params->tdlut_pte_bytes_per_frame = s->tdlut_pte_bytes_per_frame[k];
+ CalculatePrefetchSchedule_params->tdlut_bytes_per_frame = s->tdlut_bytes_per_frame[k];
+ CalculatePrefetchSchedule_params->tdlut_opt_time = s->tdlut_opt_time[k];
+ CalculatePrefetchSchedule_params->tdlut_drain_time = s->tdlut_drain_time[k];
+ CalculatePrefetchSchedule_params->num_cursors = (display_cfg->plane_descriptors[k].cursor.cursor_width > 0);
+ CalculatePrefetchSchedule_params->cursor_bytes_per_chunk = s->cursor_bytes_per_chunk[k];
+ CalculatePrefetchSchedule_params->cursor_bytes_per_line = s->cursor_bytes_per_line[k];
+ CalculatePrefetchSchedule_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
+ CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
+ CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k];
+ CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k];
+ CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->ms.vactive_sw_bw_l[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->ms.vactive_sw_bw_c[k];
+
+ // output
+ CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k];
+ CalculatePrefetchSchedule_params->DSTYAfterScaler = &s->DSTYAfterScaler[k];
+ CalculatePrefetchSchedule_params->dst_y_prefetch = &mode_lib->ms.dst_y_prefetch[k];
+ CalculatePrefetchSchedule_params->dst_y_per_vm_vblank = &mode_lib->ms.LinesForVM[k];
+ CalculatePrefetchSchedule_params->dst_y_per_row_vblank = &mode_lib->ms.LinesForDPTERow[k];
+ CalculatePrefetchSchedule_params->VRatioPrefetchY = &mode_lib->ms.VRatioPreY[k];
+ CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
+ CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
+ CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &mode_lib->ms.RequiredPrefetchBWOTO[k];
+ CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
+ CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
+ CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
+ CalculatePrefetchSchedule_params->prefetch_vmrow_bw = &mode_lib->ms.prefetch_vmrow_bw[k];
+ CalculatePrefetchSchedule_params->Tdmdl_vm = &s->dummy_single[0];
+ CalculatePrefetchSchedule_params->Tdmdl = &s->dummy_single[1];
+ CalculatePrefetchSchedule_params->TSetup = &s->dummy_single[2];
+ CalculatePrefetchSchedule_params->Tvm_trips = &s->Tvm_trips[k];
+ CalculatePrefetchSchedule_params->Tr0_trips = &s->Tr0_trips[k];
+ CalculatePrefetchSchedule_params->Tvm_trips_flip = &s->Tvm_trips_flip[k];
+ CalculatePrefetchSchedule_params->Tr0_trips_flip = &s->Tr0_trips_flip[k];
+ CalculatePrefetchSchedule_params->Tvm_trips_flip_rounded = &s->Tvm_trips_flip_rounded[k];
+ CalculatePrefetchSchedule_params->Tr0_trips_flip_rounded = &s->Tr0_trips_flip_rounded[k];
+ CalculatePrefetchSchedule_params->VUpdateOffsetPix = &s->dummy_integer[0];
+ CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1];
+ CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2];
+ CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k];
+ CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
+ CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
+ CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
+ CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->prefetch_swath_time_us[k];
+
+ mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
+
+ mode_lib->ms.support.PrefetchSupported &= !mode_lib->ms.NoTimeForPrefetch[k];
+ DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_per_vm_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank);
+ } // for k num_planes
+
+ CalculateDCFCLKDeepSleepTdlut(
+ display_cfg,
+ mode_lib->ms.num_active_planes,
+ mode_lib->ms.BytePerPixelY,
+ mode_lib->ms.BytePerPixelC,
+ mode_lib->ms.SwathWidthY,
+ mode_lib->ms.SwathWidthC,
+ mode_lib->ms.NoOfDPP,
+ mode_lib->ms.PSCL_FACTOR,
+ mode_lib->ms.PSCL_FACTOR_CHROMA,
+ mode_lib->ms.RequiredDPPCLK,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
+ mode_lib->soc.return_bus_width_bytes,
+ mode_lib->ms.RequiredDISPCLK,
+ s->tdlut_bytes_to_deliver,
+ s->prefetch_swath_time_us,
+
+ /* Output */
+ &mode_lib->ms.dcfclk_deepsleep);
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (mode_lib->ms.dst_y_prefetch[k] < 2.0
+ || mode_lib->ms.LinesForVM[k] >= 32.0
+ || mode_lib->ms.LinesForDPTERow[k] >= 16.0
+ || mode_lib->ms.NoTimeForPrefetch[k] == true
+ || s->DSTYAfterScaler[k] > 8) {
+ mode_lib->ms.support.PrefetchSupported = false;
+ DML_LOG_VERBOSE("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]);
+ }
+ }
+
+ mode_lib->ms.support.DynamicMetadataSupported = true;
+ for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
+ if (mode_lib->ms.NoTimeForDynamicMetadata[k] == true) {
+ mode_lib->ms.support.DynamicMetadataSupported = false;
+ }
+ }
+
+ mode_lib->ms.support.VRatioInPrefetchSupported = true;
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
+ mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
+ mode_lib->ms.support.VRatioInPrefetchSupported = false;
+ DML_LOG_VERBOSE("DML::%s: k=%d VRatioPreY = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: k=%d VRatioPreC = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported);
+ }
+ }
+
+ mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.VRatioInPrefetchSupported;
+
+ // By default, do not recalc prefetch schedule
+ s->recalc_prefetch_schedule = 0;
+
+ // Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
+ if (mode_lib->ms.support.PrefetchSupported) {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ // Calculate Urgent burst factor for prefetch
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPreY=%f\n", __func__, k, mode_lib->ms.VRatioPreY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPreC=%f\n", __func__, k, mode_lib->ms.VRatioPreC[k]);
+#endif
+ CalculateUrgentBurstFactor(
+ &display_cfg->plane_descriptors[k],
+ mode_lib->ms.swath_width_luma_ub[k],
+ mode_lib->ms.swath_width_chroma_ub[k],
+ mode_lib->ms.SwathHeightY[k],
+ mode_lib->ms.SwathHeightC[k],
+ s->line_times[k],
+ mode_lib->ms.UrgLatency,
+ mode_lib->ms.VRatioPreY[k],
+ mode_lib->ms.VRatioPreC[k],
+ mode_lib->ms.BytePerPixelInDETY[k],
+ mode_lib->ms.BytePerPixelInDETC[k],
+ mode_lib->ms.DETBufferSizeY[k],
+ mode_lib->ms.DETBufferSizeC[k],
+ /* Output */
+ &mode_lib->ms.UrgentBurstFactorLumaPre[k],
+ &mode_lib->ms.UrgentBurstFactorChromaPre[k],
+ &mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
+ }
+
+ // Calculate urgent bandwidth required, both urg and non urg peak bandwidth
+ // assume flip bw is 0 at this point
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++)
+ mode_lib->ms.final_flip_bw[k] = 0;
+
+ calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = mode_lib->ms.support.urg_vactive_bandwidth_required;
+ calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required;
+ calculate_peak_bandwidth_params->urg_bandwidth_required_qual = mode_lib->ms.support.urg_bandwidth_required_qual;
+ calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required;
+ calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = mode_lib->ms.surface_avg_vactive_required_bw;
+ calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
+
+ calculate_peak_bandwidth_params->display_cfg = display_cfg;
+ calculate_peak_bandwidth_params->inc_flip_bw = 0;
+ calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
+ calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
+ calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
+ calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
+
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
+ calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
+ calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
+ calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
+ calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
+ calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
+ calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
+ calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
+
+ calculate_peak_bandwidth_required(
+ &mode_lib->scratch,
+ calculate_peak_bandwidth_params);
+
+ // Check urg peak bandwidth against available urg bw
+ // check at SDP and DRAM, for all soc states (SVP prefetch an Sys Active)
+ check_urgent_bandwidth_support(
+ &s->dummy_single[0], // double* frac_urg_bandwidth
+ &s->dummy_single[1], // double* frac_urg_bandwidth_mall
+ &mode_lib->ms.support.UrgVactiveBandwidthSupport,
+ &mode_lib->ms.support.PrefetchBandwidthSupported,
+
+ mode_lib->soc.mall_allocated_for_dcn_mbytes,
+ mode_lib->ms.support.non_urg_bandwidth_required,
+ mode_lib->ms.support.urg_vactive_bandwidth_required,
+ mode_lib->ms.support.urg_bandwidth_required,
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.PrefetchBandwidthSupported;
+ DML_LOG_VERBOSE("DML::%s: PrefetchBandwidthSupported=%0d\n", __func__, mode_lib->ms.support.PrefetchBandwidthSupported);
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]) {
+ mode_lib->ms.support.PrefetchSupported = false;
+ DML_LOG_VERBOSE("DML::%s: k=%d, NotEnoughUrgentLatencyHidingPre=%d\n", __func__, k, mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
+ }
+ }
+
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ if (mode_lib->ms.support.PrefetchSupported && mode_lib->ms.num_active_planes > 1 && s->recalc_prefetch_done == 0) {
+ CheckGlobalPrefetchAdmissibility_params->num_active_planes = mode_lib->ms.num_active_planes;
+ CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->ms.SwathHeightY;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->ms.SwathHeightC;
+ CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
+ CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->ms.CompressedBufferSizeInkByte;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->ms.DETBufferSizeY;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->ms.DETBufferSizeC;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c;
+ CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = s->Tpre_rounded;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_oto = s->Tpre_oto;
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->ms.support.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
+ CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times;
+ CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->ms.dst_y_prefetch;
+ if (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps < 10 * 1024)
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = 10 * 1024;
+
+ CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps / (double) mode_lib->soc.return_bus_width_bytes) /
+ ((double)mode_lib->soc.qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0);
+
+ // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible
+ CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->recalc_prefetch_schedule;
+ CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre;
+ mode_lib->ms.support.PrefetchSupported = CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params);
+ s->recalc_prefetch_done = 1;
+ s->recalc_prefetch_schedule = 1;
+ }
+#endif
+ } // prefetch schedule ok, do urg bw and flip schedule
+ } while (s->recalc_prefetch_schedule);
+
+ // Flip Schedule
+ // Both prefetch schedule and BW okay
+ if (mode_lib->ms.support.PrefetchSupported == true) {
+ mode_lib->ms.BandwidthAvailableForImmediateFlip =
+ get_bandwidth_available_for_immediate_flip(
+ dml2_core_internal_soc_state_sys_active,
+ mode_lib->ms.support.urg_bandwidth_required_qual, // no flip
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ mode_lib->ms.TotImmediateFlipBytes = 0;
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (display_cfg->plane_descriptors[k].immediate_flip) {
+ s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
+ s->HostVMInefficiencyFactor,
+ mode_lib->ms.vm_bytes[k],
+ mode_lib->ms.DPTEBytesPerRow[k],
+ mode_lib->ms.meta_row_bytes[k]);
+ } else {
+ s->per_pipe_flip_bytes[k] = 0;
+ }
+ mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
+
+ }
+
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ CalculateFlipSchedule(
+ &mode_lib->scratch,
+ display_cfg->plane_descriptors[k].immediate_flip,
+ 1, // use_lb_flip_bw
+ s->HostVMInefficiencyFactor,
+ s->Tvm_trips_flip[k],
+ s->Tr0_trips_flip[k],
+ s->Tvm_trips_flip_rounded[k],
+ s->Tr0_trips_flip_rounded[k],
+ display_cfg->gpuvm_enable,
+ mode_lib->ms.vm_bytes[k],
+ mode_lib->ms.DPTEBytesPerRow[k],
+ mode_lib->ms.BandwidthAvailableForImmediateFlip,
+ mode_lib->ms.TotImmediateFlipBytes,
+ display_cfg->plane_descriptors[k].pixel_format,
+ (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
+ mode_lib->ms.Tno_bw_flip[k],
+ mode_lib->ms.dpte_row_height[k],
+ mode_lib->ms.dpte_row_height_chroma[k],
+ mode_lib->ms.use_one_row_for_frame_flip[k],
+ mode_lib->ip.max_flip_time_us,
+ mode_lib->ip.max_flip_time_lines,
+ s->per_pipe_flip_bytes[k],
+ mode_lib->ms.meta_row_bytes[k],
+ s->meta_row_height_luma[k],
+ s->meta_row_height_chroma[k],
+ mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
+
+ /* Output */
+ &mode_lib->ms.dst_y_per_vm_flip[k],
+ &mode_lib->ms.dst_y_per_row_flip[k],
+ &mode_lib->ms.final_flip_bw[k],
+ &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
+ }
+
+ calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw;
+ calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip;
+ calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw;
+ calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip;
+ calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw;
+ calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
+
+ calculate_peak_bandwidth_params->display_cfg = display_cfg;
+ calculate_peak_bandwidth_params->inc_flip_bw = 1;
+ calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
+ calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
+ calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
+ calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
+
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
+ calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
+ calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
+ calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
+ calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
+ calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
+ calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
+ calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
+
+ calculate_peak_bandwidth_required(
+ &mode_lib->scratch,
+ calculate_peak_bandwidth_params);
+
+ calculate_immediate_flip_bandwidth_support(
+ &s->dummy_single[0], // double* frac_urg_bandwidth_flip
+ &mode_lib->ms.support.ImmediateFlipSupport,
+
+ dml2_core_internal_soc_state_sys_active,
+ mode_lib->ms.support.urg_bandwidth_required_flip,
+ mode_lib->ms.support.non_urg_bandwidth_required_flip,
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
+ mode_lib->ms.support.ImmediateFlipSupport = false;
+ }
+
+ } else { // if prefetch not support, assume iflip is not supported too
+ mode_lib->ms.support.ImmediateFlipSupport = false;
+ }
+
+ s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
+ s->mSOCParameters.ExtraLatency = mode_lib->ms.ExtraLatency;
+ s->mSOCParameters.ExtraLatency_sr = mode_lib->ms.ExtraLatency_sr;
+ s->mSOCParameters.WritebackLatency = mode_lib->soc.qos_parameters.writeback.base_latency_us;
+ s->mSOCParameters.DRAMClockChangeLatency = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
+ s->mSOCParameters.FCLKChangeLatency = mode_lib->soc.power_management_parameters.fclk_change_blackout_us;
+ s->mSOCParameters.SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us;
+ s->mSOCParameters.SREnterPlusExitTime = mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us;
+ s->mSOCParameters.SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us;
+ s->mSOCParameters.SREnterPlusExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_enter_plus_exit_latency_us;
+ s->mSOCParameters.USRRetrainingLatency = 0;
+ s->mSOCParameters.SMNLatency = 0;
+ s->mSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), mode_lib->ms.state_idx);
+ s->mSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, mode_lib->ms.state_idx);
+ s->mSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
+ s->mSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
+
+ CalculateWatermarks_params->display_cfg = display_cfg;
+ CalculateWatermarks_params->USRRetrainingRequired = false;
+ CalculateWatermarks_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
+ CalculateWatermarks_params->MaxLineBufferLines = mode_lib->ip.max_line_buffer_lines;
+ CalculateWatermarks_params->LineBufferSize = mode_lib->ip.line_buffer_size_bits;
+ CalculateWatermarks_params->WritebackInterfaceBufferSize = mode_lib->ip.writeback_interface_buffer_size_kbytes;
+ CalculateWatermarks_params->DCFCLK = mode_lib->ms.DCFCLK;
+ CalculateWatermarks_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings;
+ CalculateWatermarks_params->SynchronizeDRRDisplaysForUCLKPStateChange = display_cfg->overrides.synchronize_ddr_displays_for_uclk_pstate_change;
+ CalculateWatermarks_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes;
+ CalculateWatermarks_params->mmSOCParameters = s->mSOCParameters;
+ CalculateWatermarks_params->WritebackChunkSize = mode_lib->ip.writeback_chunk_size_kbytes;
+ CalculateWatermarks_params->SOCCLK = mode_lib->ms.SOCCLK;
+ CalculateWatermarks_params->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
+ CalculateWatermarks_params->DETBufferSizeY = mode_lib->ms.DETBufferSizeY;
+ CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC;
+ CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY;
+ CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC;
+ CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY;
+ CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC;
+ CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP;
+ CalculateWatermarks_params->BytePerPixelDETY = mode_lib->ms.BytePerPixelInDETY;
+ CalculateWatermarks_params->BytePerPixelDETC = mode_lib->ms.BytePerPixelInDETC;
+ CalculateWatermarks_params->DSTXAfterScaler = s->DSTXAfterScaler;
+ CalculateWatermarks_params->DSTYAfterScaler = s->DSTYAfterScaler;
+ CalculateWatermarks_params->UnboundedRequestEnabled = mode_lib->ms.UnboundedRequestEnabled;
+ CalculateWatermarks_params->CompressedBufferSizeInkByte = mode_lib->ms.CompressedBufferSizeInkByte;
+ CalculateWatermarks_params->meta_row_height_l = s->meta_row_height_luma;
+ CalculateWatermarks_params->meta_row_height_c = s->meta_row_height_chroma;
+
+ // Output
+ CalculateWatermarks_params->Watermark = &mode_lib->ms.support.watermarks; // Watermarks *Watermark
+ CalculateWatermarks_params->DRAMClockChangeSupport = mode_lib->ms.support.DRAMClockChangeSupport;
+ CalculateWatermarks_params->global_dram_clock_change_supported = &mode_lib->ms.support.global_dram_clock_change_supported;
+ CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0]; // double *MaxActiveDRAMClockChangeLatencySupported[]
+ CalculateWatermarks_params->SubViewportLinesNeededInMALL = mode_lib->ms.SubViewportLinesNeededInMALL; // unsigned int SubViewportLinesNeededInMALL[]
+ CalculateWatermarks_params->FCLKChangeSupport = mode_lib->ms.support.FCLKChangeSupport;
+ CalculateWatermarks_params->global_fclk_change_supported = &mode_lib->ms.support.global_fclk_change_supported;
+ CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // double *MaxActiveFCLKChangeLatencySupported
+ CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport;
+ CalculateWatermarks_params->g6_temp_read_support = &mode_lib->ms.support.g6_temp_read_support;
+ CalculateWatermarks_params->VActiveLatencyHidingMargin = mode_lib->ms.VActiveLatencyHidingMargin;
+ CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs;
+
+ CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
+
+ calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->ms.support.watermarks, s->dummy_integer_array[0]);
+ DML_LOG_VERBOSE("DML::%s: Done prefetch calculation\n", __func__);
+
+}
+
static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params)
{
struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib;
const struct dml2_display_cfg *display_cfg = in_out_params->in_display_cfg;
const struct dml2_mcg_min_clock_table *min_clk_table = in_out_params->min_clk_table;
-#if defined(__DML_VBA_DEBUG__)
- double old_ReadBandwidthLuma;
- double old_ReadBandwidthChroma;
-#endif
double outstanding_latency_us = 0;
- double min_return_bw_for_latency;
struct dml2_core_calcs_mode_support_locals *s = &mode_lib->scratch.dml_core_mode_support_locals;
- struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params *CalculateWatermarks_params = &mode_lib->scratch.CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_params;
struct dml2_core_calcs_CalculateVMRowAndSwath_params *CalculateVMRowAndSwath_params = &mode_lib->scratch.CalculateVMRowAndSwath_params;
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *CalculateSwathAndDETConfiguration_params = &mode_lib->scratch.CalculateSwathAndDETConfiguration_params;
- struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
-#ifdef DML_GLOBAL_PREFETCH_CHECK
- struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params;
-#endif
- struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
struct dml2_core_calcs_calculate_mcache_setting_params *calculate_mcache_setting_params = &mode_lib->scratch.calculate_mcache_setting_params;
- struct dml2_core_calcs_calculate_peak_bandwidth_required_params *calculate_peak_bandwidth_params = &mode_lib->scratch.calculate_peak_bandwidth_params;
struct dml2_core_calcs_calculate_bytes_to_fetch_required_to_hide_latency_params *calculate_bytes_to_fetch_required_to_hide_latency_params = &mode_lib->scratch.calculate_bytes_to_fetch_required_to_hide_latency_params;
unsigned int k, m, n;
@@ -7374,9 +7935,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.FabricClock = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz / 1000);
mode_lib->ms.MaxDCFCLK = (double)min_clk_table->max_clocks_khz.dcfclk / 1000;
mode_lib->ms.MaxFabricClock = (double)min_clk_table->max_clocks_khz.fclk / 1000;
- mode_lib->ms.max_dispclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dispclk / 1000;
+ mode_lib->ms.max_dispclk_freq_mhz = (double)min_clk_table->max_ss_clocks_khz.dispclk / 1000;
mode_lib->ms.max_dscclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dscclk / 1000;
- mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dppclk / 1000;
+ mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_ss_clocks_khz.dppclk / 1000;
mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config);
mode_lib->ms.dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps / 1000);
mode_lib->ms.max_dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[min_clk_table->dram_bw_table.num_entries - 1].pre_derate_dram_bw_kbps / 1000);
@@ -7384,25 +7945,25 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.active_min_uclk_dpm_index = get_active_min_uclk_dpm_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), &mode_lib->soc.clk_table);
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: --- START --- \n", __func__);
- dml2_printf("DML::%s: num_active_planes = %u\n", __func__, mode_lib->ms.num_active_planes);
- dml2_printf("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: qos_param_index = %0d\n", __func__, mode_lib->ms.qos_param_index);
- dml2_printf("DML::%s: SOCCLK = %f\n", __func__, mode_lib->ms.SOCCLK);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->ms.dram_bw_mbps);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
- dml2_printf("DML::%s: MaxDCFCLK = %f\n", __func__, mode_lib->ms.MaxDCFCLK);
- dml2_printf("DML::%s: max_dispclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dispclk_freq_mhz);
- dml2_printf("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
- dml2_printf("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz);
- dml2_printf("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock);
- dml2_printf("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes);
- dml2_printf("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present);
+ DML_LOG_VERBOSE("DML::%s: --- START --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: num_active_planes = %u\n", __func__, mode_lib->ms.num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: qos_param_index = %0d\n", __func__, mode_lib->ms.qos_param_index);
+ DML_LOG_VERBOSE("DML::%s: SOCCLK = %f\n", __func__, mode_lib->ms.SOCCLK);
+ DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->ms.dram_bw_mbps);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
+ DML_LOG_VERBOSE("DML::%s: MaxDCFCLK = %f\n", __func__, mode_lib->ms.MaxDCFCLK);
+ DML_LOG_VERBOSE("DML::%s: max_dispclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dispclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_dscclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dscclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_dppclk_freq_mhz = %f\n", __func__, mode_lib->ms.max_dppclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: MaxFabricClock = %f\n", __func__, mode_lib->ms.MaxFabricClock);
+ DML_LOG_VERBOSE("DML::%s: ip.compressed_buffer_segment_size_in_kbytes = %u\n", __func__, mode_lib->ip.compressed_buffer_segment_size_in_kbytes);
+ DML_LOG_VERBOSE("DML::%s: ip.dcn_mrq_present = %u\n", __func__, mode_lib->ip.dcn_mrq_present);
for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
#endif
CalculateMaxDETAndMinCompressedBufferSize(
@@ -7504,12 +8065,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->plane_descriptors[k].cursor.cursor_bpp / 8.0 / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
#ifdef __DML_VBA_DEBUG__
- old_ReadBandwidthLuma = mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelInDETY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- old_ReadBandwidthChroma = mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0;
- dml2_printf("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, old_ReadBandwidthLuma);
- dml2_printf("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, old_ReadBandwidthChroma);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelInDETY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_c[k]);
#endif
}
@@ -7629,13 +8188,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.MaximumSwathWidthLuma[k] = math_min2(s->MaximumSwathWidthSupportLuma, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
mode_lib->ms.MaximumSwathWidthChroma[k] = math_min2(s->MaximumSwathWidthSupportChroma, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthLuma[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthSupportLuma=%u\n", __func__, k, s->MaximumSwathWidthSupportLuma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthInLineBufferLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthSupportLuma=%u\n", __func__, k, s->MaximumSwathWidthSupportLuma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthInLineBufferLuma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferLuma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthChroma[k]);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthSupportChroma=%u\n", __func__, k, s->MaximumSwathWidthSupportChroma);
- dml2_printf("DML::%s: k=%u MaximumSwathWidthInLineBufferChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthSupportChroma=%u\n", __func__, k, s->MaximumSwathWidthSupportChroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaximumSwathWidthInLineBufferChroma=%f\n", __func__, k, mode_lib->ms.MaximumSwathWidthInLineBufferChroma);
}
/* Cursor Support Check */
@@ -7672,11 +8231,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.AlignedCPitch[k] > display_cfg->plane_descriptors[k].surface.plane1.pitch) {
mode_lib->ms.support.PitchSupport = false;
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%u AlignedYPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedYPitch[k]);
- dml2_printf("DML::%s: k=%u PitchY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.pitch);
- dml2_printf("DML::%s: k=%u AlignedCPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedCPitch[k]);
- dml2_printf("DML::%s: k=%u PitchC = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane1.pitch);
- dml2_printf("DML::%s: k=%u PitchSupport = %d\n", __func__, k, mode_lib->ms.support.PitchSupport);
+ DML_LOG_VERBOSE("DML::%s: k=%u AlignedYPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedYPitch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u PitchY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.pitch);
+ DML_LOG_VERBOSE("DML::%s: k=%u AlignedCPitch = %d\n", __func__, k, mode_lib->ms.support.AlignedCPitch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u PitchC = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane1.pitch);
+ DML_LOG_VERBOSE("DML::%s: k=%u PitchSupport = %d\n", __func__, k, mode_lib->ms.support.PitchSupport);
#endif
}
@@ -7708,11 +8267,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->plane_descriptors[k].composition.viewport.plane0.height > display_cfg->plane_descriptors[k].surface.plane0.height) {
mode_lib->ms.support.ViewportExceedsSurface = true;
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%u ViewportWidth = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
- dml2_printf("DML::%s: k=%u SurfaceWidthY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.width);
- dml2_printf("DML::%s: k=%u ViewportHeight = %d\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
- dml2_printf("DML::%s: k=%u SurfaceHeightY = %d\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height);
- dml2_printf("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportWidth = %ld\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.width);
+ DML_LOG_VERBOSE("DML::%s: k=%u SurfaceWidthY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.width);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportHeight = %ld\n", __func__, k, display_cfg->plane_descriptors[k].composition.viewport.plane0.height);
+ DML_LOG_VERBOSE("DML::%s: k=%u SurfaceHeightY = %ld\n", __func__, k, display_cfg->plane_descriptors[k].surface.plane0.height);
+ DML_LOG_VERBOSE("DML::%s: k=%u ViewportExceedsSurface = %d\n", __func__, k, mode_lib->ms.support.ViewportExceedsSurface);
#endif
}
if (dml_is_420(display_cfg->plane_descriptors[k].pixel_format) || display_cfg->plane_descriptors[k].pixel_format == dml2_rgbe_alpha) {
@@ -7894,8 +8453,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.TotalNumberOfActiveDPP = mode_lib->ms.TotalNumberOfActiveDPP + s->NumberOfDPPDSC;
}
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d RequiresDSC = %d\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
- dml2_printf("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d RequiresDSC = %d\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
#endif
// ensure the number dsc slices is integer multiple based on ODM mode
@@ -7911,9 +8470,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.DSCSlicesODMModeSupported = ((mode_lib->ms.support.NumberOfDSCSlices[k] % 4) == 0);
#if defined(__DML_VBA_DEBUG__)
if (!mode_lib->ms.support.DSCSlicesODMModeSupported) {
- dml2_printf("DML::%s: k=%d Invalid dsc num_slices and ODM mode setting\n", __func__, k);
- dml2_printf("DML::%s: k=%d num_slices = %d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.overrides.num_slices);
- dml2_printf("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d Invalid dsc num_slices and ODM mode setting\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%d num_slices = %d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.overrides.num_slices);
+ DML_LOG_VERBOSE("DML::%s: k=%d ODMMode = %d\n", __func__, k, mode_lib->ms.ODMMode[k]);
}
#endif
} else {
@@ -7958,7 +8517,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 1;
if (!mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
- dml2_printf("WARNING: DML::%s: MPCC is override to disable but viewport is too large to be supported with single pipe!\n", __func__);
+ DML_LOG_VERBOSE("WARNING: DML::%s: MPCC is override to disable but viewport is too large to be supported with single pipe!\n", __func__);
}
} else {
if ((mode_lib->ms.MinDPPCLKUsingSingleDPP[k] > mode_lib->ms.max_dppclk_freq_mhz) || !mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
@@ -7968,7 +8527,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
}
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d, NoOfDPP = %d\n", __func__, k, mode_lib->ms.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, NoOfDPP = %d\n", __func__, k, mode_lib->ms.NoOfDPP[k]);
#endif
}
@@ -8138,7 +8697,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_rate,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.audio_sample_layout);
- if (mode_lib->ms.RequiredDTBCLK[k] > ((double)min_clk_table->max_clocks_khz.dtbclk / 1000)) {
+ if (mode_lib->ms.RequiredDTBCLK[k] > ((double)min_clk_table->max_ss_clocks_khz.dtbclk / 1000)) {
mode_lib->ms.support.DTBCLKRequiredMoreThanSupported = true;
}
} else {
@@ -8167,7 +8726,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
s->DSCFormatFactor = 1;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, RequiresDSC = %u\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, RequiresDSC = %u\n", __func__, k, mode_lib->ms.RequiresDSC[k]);
#endif
if (mode_lib->ms.RequiresDSC[k] == true) {
s->PixelClockBackEndFactor = 3.0;
@@ -8185,10 +8744,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PixelClockBackEnd = %f\n", __func__, k, s->PixelClockBackEnd[k]);
- dml2_printf("DML::%s: k=%u, required_dscclk_freq_mhz = %f\n", __func__, k, mode_lib->ms.required_dscclk_freq_mhz[k]);
- dml2_printf("DML::%s: k=%u, DSCFormatFactor = %u\n", __func__, k, s->DSCFormatFactor);
- dml2_printf("DML::%s: k=%u, DSCCLKRequiredMoreThanSupported = %u\n", __func__, k, mode_lib->ms.support.DSCCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelClockBackEnd = %f\n", __func__, k, s->PixelClockBackEnd[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, required_dscclk_freq_mhz = %f\n", __func__, k, mode_lib->ms.required_dscclk_freq_mhz[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DSCFormatFactor = %u\n", __func__, k, s->DSCFormatFactor);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DSCCLKRequiredMoreThanSupported = %u\n", __func__, k, mode_lib->ms.support.DSCCLKRequiredMoreThanSupported);
#endif
}
}
@@ -8423,13 +8982,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.DCCMetaBufferSizeNotExceeded = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.PTEBufferSizeNotExceeded[k]);
- dml2_printf("DML::%s: k=%u, DCCMetaBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.DCCMetaBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTEBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.PTEBufferSizeNotExceeded[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DCCMetaBufferSizeNotExceeded = %u\n", __func__, k, mode_lib->ms.DCCMetaBufferSizeNotExceeded[k]);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PTEBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.PTEBufferSizeNotExceeded);
- dml2_printf("DML::%s: DCCMetaBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.DCCMetaBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML::%s: PTEBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.PTEBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML::%s: DCCMetaBufferSizeNotExceeded = %u\n", __func__, mode_lib->ms.support.DCCMetaBufferSizeNotExceeded);
#endif
/* VActive bytes to fetch for UCLK P-State */
@@ -8502,7 +9061,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
double line_time_us = (double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- bool cursor_not_enough_urgent_latency_hiding = 0;
+ bool cursor_not_enough_urgent_latency_hiding = false;
if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) {
calculate_cursor_req_attributes(
@@ -8531,9 +9090,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.UrgentBurstFactorCursorPre[k] = mode_lib->ms.UrgentBurstFactorCursor[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor\n", __func__, k);
- dml2_printf("DML::%s: k=%d, VRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%d, VRatioChroma=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%d, Calling CalculateUrgentBurstFactor\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatio=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioChroma=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
#endif
CalculateUrgentBurstFactor(
@@ -8605,7 +9164,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaximumVStartup = %u\n", __func__, k, s->MaximumVStartup[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaximumVStartup = %u\n", __func__, k, s->MaximumVStartup[k]);
#endif
/* Immediate Flip and MALL parameters */
@@ -8654,16 +9213,15 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
(s->SubViewportMALLPStateMethod && s->FullFrameMALLPStateMethod) || s->SubViewportMALLRefreshGreaterThan120Hz;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SubViewportMALLPStateMethod = %u\n", __func__, s->SubViewportMALLPStateMethod);
- dml2_printf("DML::%s: PhantomPipeMALLPStateMethod = %u\n", __func__, s->PhantomPipeMALLPStateMethod);
- dml2_printf("DML::%s: FullFrameMALLPStateMethod = %u\n", __func__, s->FullFrameMALLPStateMethod);
- dml2_printf("DML::%s: SubViewportMALLRefreshGreaterThan120Hz = %u\n", __func__, s->SubViewportMALLRefreshGreaterThan120Hz);
- dml2_printf("DML::%s: InvalidCombinationOfMALLUseForPState = %u\n", __func__, mode_lib->ms.support.InvalidCombinationOfMALLUseForPState);
- dml2_printf("DML::%s: in_out_params->min_clk_index = %u\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
- dml2_printf("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
- dml2_printf("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: urgent latency tolarance = %f\n", __func__, ((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)));
+ DML_LOG_VERBOSE("DML::%s: SubViewportMALLPStateMethod = %u\n", __func__, s->SubViewportMALLPStateMethod);
+ DML_LOG_VERBOSE("DML::%s: PhantomPipeMALLPStateMethod = %u\n", __func__, s->PhantomPipeMALLPStateMethod);
+ DML_LOG_VERBOSE("DML::%s: FullFrameMALLPStateMethod = %u\n", __func__, s->FullFrameMALLPStateMethod);
+ DML_LOG_VERBOSE("DML::%s: SubViewportMALLRefreshGreaterThan120Hz = %u\n", __func__, s->SubViewportMALLRefreshGreaterThan120Hz);
+ DML_LOG_VERBOSE("DML::%s: InvalidCombinationOfMALLUseForPState = %u\n", __func__, mode_lib->ms.support.InvalidCombinationOfMALLUseForPState);
+ DML_LOG_VERBOSE("DML::%s: in_out_params->min_clk_index = %u\n", __func__, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
+ DML_LOG_VERBOSE("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
#endif
mode_lib->ms.support.OutstandingRequestsSupport = true;
@@ -8703,10 +9261,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_urgent_latency_us);
- dml2_printf("DML::%s: avg_non_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_non_urgent_latency_us);
- dml2_printf("DML::%s: k=%d, request_size_bytes_luma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_luma[k]);
- dml2_printf("DML::%s: k=%d, outstanding_latency_us = %f (luma)\n", __func__, k, outstanding_latency_us);
+ DML_LOG_VERBOSE("DML::%s: avg_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_urgent_latency_us);
+ DML_LOG_VERBOSE("DML::%s: avg_non_urgent_latency_us = %f\n", __func__, mode_lib->ms.support.avg_non_urgent_latency_us);
+ DML_LOG_VERBOSE("DML::%s: k=%d, request_size_bytes_luma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, outstanding_latency_us = %f (luma)\n", __func__, k, outstanding_latency_us);
#endif
}
@@ -8722,8 +9280,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.OutstandingRequestsUrgencyAvoidance = false;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, request_size_bytes_chroma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_chroma[k]);
- dml2_printf("DML::%s: k=%d, outstanding_latency_us = %f (chroma)\n", __func__, k, outstanding_latency_us);
+ DML_LOG_VERBOSE("DML::%s: k=%d, request_size_bytes_chroma = %d\n", __func__, k, mode_lib->ms.support.request_size_bytes_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, outstanding_latency_us = %f (chroma)\n", __func__, k, outstanding_latency_us);
#endif
}
}
@@ -8869,7 +9427,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
if (mode_lib->ms.NotEnoughUrgentLatencyHiding[k]) {
mode_lib->ms.support.EnoughUrgentLatencyHidingSupport = false;
- dml2_printf("DML::%s: k=%u NotEnoughUrgentLatencyHiding set\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: k=%u NotEnoughUrgentLatencyHiding set\n", __func__, k);
}
}
@@ -8878,639 +9436,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if (!mode_lib->ms.support.avg_bandwidth_support_ok[m][n] && (m == dml2_core_internal_soc_state_sys_active || mode_lib->soc.mall_allocated_for_dcn_mbytes > 0)) {
mode_lib->ms.support.AvgBandwidthSupport = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: avg_bandwidth_support_ok[%s][%s] not ok\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n));
+ DML_LOG_VERBOSE("DML::%s: avg_bandwidth_support_ok[%s][%s] not ok\n", __func__, dml2_core_internal_soc_state_type_str(m), dml2_core_internal_bw_type_str(n));
#endif
}
}
}
- /* Prefetch Check */
- {
- mode_lib->ms.TimeCalc = 24 / mode_lib->ms.dcfclk_deepsleep;
-
- calculate_hostvm_inefficiency_factor(
- &s->HostVMInefficiencyFactor,
- &s->HostVMInefficiencyFactorPrefetch,
-
- display_cfg->gpuvm_enable,
- display_cfg->hostvm_enable,
- mode_lib->ip.remote_iommu_outstanding_translations,
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->ms.support.urg_bandwidth_available_pixel_and_vm[dml2_core_internal_soc_state_sys_active],
- mode_lib->ms.support.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_sys_active]);
-
- mode_lib->ms.Total3dlutActive = 0;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut)
- mode_lib->ms.Total3dlutActive = mode_lib->ms.Total3dlutActive + 1;
-
- // Calculate tdlut schedule related terms
- calculate_tdlut_setting_params->dispclk_mhz = mode_lib->ms.RequiredDISPCLK;
- calculate_tdlut_setting_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- calculate_tdlut_setting_params->tdlut_width_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_width_mode;
- calculate_tdlut_setting_params->tdlut_addressing_mode = display_cfg->plane_descriptors[k].tdlut.tdlut_addressing_mode;
- calculate_tdlut_setting_params->cursor_buffer_size = mode_lib->ip.cursor_buffer_size;
- calculate_tdlut_setting_params->gpuvm_enable = display_cfg->gpuvm_enable;
- calculate_tdlut_setting_params->gpuvm_page_size_kbytes = display_cfg->plane_descriptors[k].overrides.gpuvm_min_page_size_kbytes;
- calculate_tdlut_setting_params->tdlut_mpc_width_flag = display_cfg->plane_descriptors[k].tdlut.tdlut_mpc_width_flag;
- calculate_tdlut_setting_params->is_gfx11 = dml_get_gfx_version(display_cfg->plane_descriptors[k].surface.tiling);
-
- // output
- calculate_tdlut_setting_params->tdlut_pte_bytes_per_frame = &s->tdlut_pte_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_frame = &s->tdlut_bytes_per_frame[k];
- calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
- calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
- calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
- calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k];
- calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
-
- calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
- }
-
- min_return_bw_for_latency = mode_lib->ms.support.urg_bandwidth_available_min_latency[dml2_core_internal_soc_state_sys_active];
-
- if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn3)
- s->ReorderingBytes = (unsigned int)(mode_lib->soc.clk_table.dram_config.channel_count * math_max3(mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_only_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.urgent_out_of_order_return_per_channel_vm_only_bytes));
-
- CalculateExtraLatency(
- display_cfg,
- mode_lib->ip.rob_buffer_size_kbytes,
- mode_lib->soc.qos_parameters.qos_params.dcn32x.loaded_round_trip_latency_fclk_cycles,
- s->ReorderingBytes,
- mode_lib->ms.DCFCLK,
- mode_lib->ms.FabricClock,
- mode_lib->ip.pixel_chunk_size_kbytes,
- min_return_bw_for_latency,
- mode_lib->ms.num_active_planes,
- mode_lib->ms.NoOfDPP,
- mode_lib->ms.dpte_group_bytes,
- s->tdlut_bytes_per_group,
- s->HostVMInefficiencyFactor,
- s->HostVMInefficiencyFactorPrefetch,
- mode_lib->soc.hostvm_min_page_size_kbytes,
- mode_lib->soc.qos_parameters.qos_type,
- !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable),
- mode_lib->soc.max_outstanding_reqs,
- mode_lib->ms.support.request_size_bytes_luma,
- mode_lib->ms.support.request_size_bytes_chroma,
- mode_lib->ip.meta_chunk_size_kbytes,
- mode_lib->ip.dchub_arb_to_ret_delay,
- mode_lib->ms.TripToMemory,
- mode_lib->ip.hostvm_mode,
-
- // output
- &mode_lib->ms.ExtraLatency,
- &mode_lib->ms.ExtraLatency_sr,
- &mode_lib->ms.ExtraLatencyPrefetch);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- s->impacted_dst_y_pre[k] = 0;
-
- s->recalc_prefetch_schedule = 0;
- s->recalc_prefetch_done = 0;
- do {
- mode_lib->ms.support.PrefetchSupported = true;
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format;
-
- s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
- mode_lib->ms.NoOfDPP[k],
- display_cfg->plane_descriptors[k].composition.viewport.plane0.width,
- display_cfg->plane_descriptors[k].composition.viewport.plane0.height,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
- display_cfg->plane_descriptors[k].composition.rotation_angle);
-
- s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
- mode_lib->ms.NoOfDPP[k],
- display_cfg->plane_descriptors[k].composition.viewport.plane1.width,
- display_cfg->plane_descriptors[k].composition.viewport.plane1.height,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
- display_cfg->plane_descriptors[k].composition.rotation_angle);
-
- struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
-
- mode_lib->ms.TWait[k] = CalculateTWait(
- display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
- mode_lib->ms.UrgLatency,
- mode_lib->ms.TripToMemory,
- !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
- get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
-
- myPipe->Dppclk = mode_lib->ms.RequiredDPPCLK[k];
- myPipe->Dispclk = mode_lib->ms.RequiredDISPCLK;
- myPipe->PixelClock = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
- myPipe->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
- myPipe->DPPPerSurface = mode_lib->ms.NoOfDPP[k];
- myPipe->ScalerEnabled = display_cfg->plane_descriptors[k].composition.scaler_info.enabled;
- myPipe->VRatio = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- myPipe->VRatioChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- myPipe->VTaps = display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_taps;
- myPipe->VTapsChroma = display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_taps;
- myPipe->RotationAngle = display_cfg->plane_descriptors[k].composition.rotation_angle;
- myPipe->mirrored = display_cfg->plane_descriptors[k].composition.mirrored;
- myPipe->BlockWidth256BytesY = mode_lib->ms.Read256BlockWidthY[k];
- myPipe->BlockHeight256BytesY = mode_lib->ms.Read256BlockHeightY[k];
- myPipe->BlockWidth256BytesC = mode_lib->ms.Read256BlockWidthC[k];
- myPipe->BlockHeight256BytesC = mode_lib->ms.Read256BlockHeightC[k];
- myPipe->InterlaceEnable = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.interlaced;
- myPipe->NumberOfCursors = display_cfg->plane_descriptors[k].cursor.num_cursors;
- myPipe->VBlank = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active;
- myPipe->HTotal = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total;
- myPipe->HActive = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_active;
- myPipe->DCCEnable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- myPipe->ODMMode = mode_lib->ms.ODMMode[k];
- myPipe->SourcePixelFormat = display_cfg->plane_descriptors[k].pixel_format;
- myPipe->BytePerPixelY = mode_lib->ms.BytePerPixelY[k];
- myPipe->BytePerPixelC = mode_lib->ms.BytePerPixelC[k];
- myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
- dml2_printf("DML::%s: MaximumVStartup = %u\n", __func__, s->MaximumVStartup[k]);
-#endif
- CalculatePrefetchSchedule_params->display_cfg = display_cfg;
- CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
- CalculatePrefetchSchedule_params->myPipe = myPipe;
- CalculatePrefetchSchedule_params->DSCDelay = mode_lib->ms.DSCDelay[k];
- CalculatePrefetchSchedule_params->DPPCLKDelaySubtotalPlusCNVCFormater = mode_lib->ip.dppclk_delay_subtotal + mode_lib->ip.dppclk_delay_cnvc_formatter;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCL = mode_lib->ip.dppclk_delay_scl;
- CalculatePrefetchSchedule_params->DPPCLKDelaySCLLBOnly = mode_lib->ip.dppclk_delay_scl_lb_only;
- CalculatePrefetchSchedule_params->DPPCLKDelayCNVCCursor = mode_lib->ip.dppclk_delay_cnvc_cursor;
- CalculatePrefetchSchedule_params->DISPCLKDelaySubtotal = mode_lib->ip.dispclk_delay_subtotal;
- CalculatePrefetchSchedule_params->DPP_RECOUT_WIDTH = (unsigned int)(mode_lib->ms.SwathWidthY[k] / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format;
- CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters;
- CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k];
- CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes;
- CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable;
- CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled;
- CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required;
- CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes;
- CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->ms.UrgLatency;
- CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->ms.ExtraLatencyPrefetch;
- CalculatePrefetchSchedule_params->TCalc = mode_lib->ms.TimeCalc;
- CalculatePrefetchSchedule_params->vm_bytes = mode_lib->ms.vm_bytes[k];
- CalculatePrefetchSchedule_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRow[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesY[k];
- CalculatePrefetchSchedule_params->VInitPreFillY = mode_lib->ms.PrefillY[k];
- CalculatePrefetchSchedule_params->MaxNumSwathY = mode_lib->ms.MaxNumSwathY[k];
- CalculatePrefetchSchedule_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesC[k];
- CalculatePrefetchSchedule_params->VInitPreFillC = mode_lib->ms.PrefillC[k];
- CalculatePrefetchSchedule_params->MaxNumSwathC = mode_lib->ms.MaxNumSwathC[k];
- CalculatePrefetchSchedule_params->swath_width_luma_ub = mode_lib->ms.swath_width_luma_ub[k];
- CalculatePrefetchSchedule_params->swath_width_chroma_ub = mode_lib->ms.swath_width_chroma_ub[k];
- CalculatePrefetchSchedule_params->SwathHeightY = mode_lib->ms.SwathHeightY[k];
- CalculatePrefetchSchedule_params->SwathHeightC = mode_lib->ms.SwathHeightC[k];
- CalculatePrefetchSchedule_params->TWait = mode_lib->ms.TWait[k];
- CalculatePrefetchSchedule_params->Ttrip = mode_lib->ms.TripToMemory;
- CalculatePrefetchSchedule_params->Turg = mode_lib->ms.UrgLatency;
- CalculatePrefetchSchedule_params->setup_for_tdlut = display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut;
- CalculatePrefetchSchedule_params->tdlut_pte_bytes_per_frame = s->tdlut_pte_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_bytes_per_frame = s->tdlut_bytes_per_frame[k];
- CalculatePrefetchSchedule_params->tdlut_opt_time = s->tdlut_opt_time[k];
- CalculatePrefetchSchedule_params->tdlut_drain_time = s->tdlut_drain_time[k];
- CalculatePrefetchSchedule_params->num_cursors = (display_cfg->plane_descriptors[k].cursor.cursor_width > 0);
- CalculatePrefetchSchedule_params->cursor_bytes_per_chunk = s->cursor_bytes_per_chunk[k];
- CalculatePrefetchSchedule_params->cursor_bytes_per_line = s->cursor_bytes_per_line[k];
- CalculatePrefetchSchedule_params->dcc_enable = display_cfg->plane_descriptors[k].surface.dcc.enable;
- CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
- CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k];
- CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k];
- CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k];
- CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->ms.vactive_sw_bw_l[k];
- CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->ms.vactive_sw_bw_c[k];
-
- // output
- CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k];
- CalculatePrefetchSchedule_params->DSTYAfterScaler = &s->DSTYAfterScaler[k];
- CalculatePrefetchSchedule_params->dst_y_prefetch = &mode_lib->ms.dst_y_prefetch[k];
- CalculatePrefetchSchedule_params->dst_y_per_vm_vblank = &mode_lib->ms.LinesForVM[k];
- CalculatePrefetchSchedule_params->dst_y_per_row_vblank = &mode_lib->ms.LinesForDPTERow[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchY = &mode_lib->ms.VRatioPreY[k];
- CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
- CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
- CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &mode_lib->ms.RequiredPrefetchBWOTO[k];
- CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
- CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
- CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
- CalculatePrefetchSchedule_params->prefetch_vmrow_bw = &mode_lib->ms.prefetch_vmrow_bw[k];
- CalculatePrefetchSchedule_params->Tdmdl_vm = &s->dummy_single[0];
- CalculatePrefetchSchedule_params->Tdmdl = &s->dummy_single[1];
- CalculatePrefetchSchedule_params->TSetup = &s->dummy_single[2];
- CalculatePrefetchSchedule_params->Tvm_trips = &s->Tvm_trips[k];
- CalculatePrefetchSchedule_params->Tr0_trips = &s->Tr0_trips[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip = &s->Tvm_trips_flip[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip = &s->Tr0_trips_flip[k];
- CalculatePrefetchSchedule_params->Tvm_trips_flip_rounded = &s->Tvm_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->Tr0_trips_flip_rounded = &s->Tr0_trips_flip_rounded[k];
- CalculatePrefetchSchedule_params->VUpdateOffsetPix = &s->dummy_integer[0];
- CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1];
- CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2];
- CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k];
- CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
- CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
- CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
- CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->prefetch_swath_time_us[k];
-
- mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
-
- mode_lib->ms.support.PrefetchSupported &= !mode_lib->ms.NoTimeForPrefetch[k];
- dml2_printf("DML::%s: k=%d, dst_y_per_vm_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_vm_vblank);
- dml2_printf("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank);
- } // for k num_planes
-
- CalculateDCFCLKDeepSleepTdlut(
- display_cfg,
- mode_lib->ms.num_active_planes,
- mode_lib->ms.BytePerPixelY,
- mode_lib->ms.BytePerPixelC,
- mode_lib->ms.SwathWidthY,
- mode_lib->ms.SwathWidthC,
- mode_lib->ms.NoOfDPP,
- mode_lib->ms.PSCL_FACTOR,
- mode_lib->ms.PSCL_FACTOR_CHROMA,
- mode_lib->ms.RequiredDPPCLK,
- mode_lib->ms.vactive_sw_bw_l,
- mode_lib->ms.vactive_sw_bw_c,
- mode_lib->soc.return_bus_width_bytes,
- mode_lib->ms.RequiredDISPCLK,
- s->tdlut_bytes_to_deliver,
- s->prefetch_swath_time_us,
-
- /* Output */
- &mode_lib->ms.dcfclk_deepsleep);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.dst_y_prefetch[k] < 2.0
- || mode_lib->ms.LinesForVM[k] >= 32.0
- || mode_lib->ms.LinesForDPTERow[k] >= 16.0
- || mode_lib->ms.NoTimeForPrefetch[k] == true
- || s->DSTYAfterScaler[k] > 8) {
- mode_lib->ms.support.PrefetchSupported = false;
- dml2_printf("DML::%s: k=%d, dst_y_prefetch=%f (should not be < 2)\n", __func__, k, mode_lib->ms.dst_y_prefetch[k]);
- dml2_printf("DML::%s: k=%d, LinesForVM=%f (should not be >= 32)\n", __func__, k, mode_lib->ms.LinesForVM[k]);
- dml2_printf("DML::%s: k=%d, LinesForDPTERow=%f (should not be >= 16)\n", __func__, k, mode_lib->ms.LinesForDPTERow[k]);
- dml2_printf("DML::%s: k=%d, DSTYAfterScaler=%d (should be <= 8)\n", __func__, k, s->DSTYAfterScaler[k]);
- dml2_printf("DML::%s: k=%d, NoTimeForPrefetch=%d\n", __func__, k, mode_lib->ms.NoTimeForPrefetch[k]);
- }
- }
-
- mode_lib->ms.support.DynamicMetadataSupported = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (mode_lib->ms.NoTimeForDynamicMetadata[k] == true) {
- mode_lib->ms.support.DynamicMetadataSupported = false;
- }
- }
-
- mode_lib->ms.support.VRatioInPrefetchSupported = true;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
- mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
- mode_lib->ms.support.VRatioInPrefetchSupported = false;
- dml2_printf("DML::%s: k=%d VRatioPreY = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: k=%d VRatioPreC = %f (should be <= %f)\n", __func__, k, mode_lib->ms.VRatioPreC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: VRatioInPrefetchSupported = %u\n", __func__, mode_lib->ms.support.VRatioInPrefetchSupported);
- }
- }
-
- mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.VRatioInPrefetchSupported;
-
- // By default, do not recalc prefetch schedule
- s->recalc_prefetch_schedule = 0;
-
- // Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
- if (mode_lib->ms.support.PrefetchSupported) {
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- // Calculate Urgent burst factor for prefetch
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k);
- dml2_printf("DML::%s: k=%d, VRatioPreY=%f\n", __func__, k, mode_lib->ms.VRatioPreY[k]);
- dml2_printf("DML::%s: k=%d, VRatioPreC=%f\n", __func__, k, mode_lib->ms.VRatioPreC[k]);
-#endif
- CalculateUrgentBurstFactor(
- &display_cfg->plane_descriptors[k],
- mode_lib->ms.swath_width_luma_ub[k],
- mode_lib->ms.swath_width_chroma_ub[k],
- mode_lib->ms.SwathHeightY[k],
- mode_lib->ms.SwathHeightC[k],
- s->line_times[k],
- mode_lib->ms.UrgLatency,
- mode_lib->ms.VRatioPreY[k],
- mode_lib->ms.VRatioPreC[k],
- mode_lib->ms.BytePerPixelInDETY[k],
- mode_lib->ms.BytePerPixelInDETC[k],
- mode_lib->ms.DETBufferSizeY[k],
- mode_lib->ms.DETBufferSizeC[k],
- /* Output */
- &mode_lib->ms.UrgentBurstFactorLumaPre[k],
- &mode_lib->ms.UrgentBurstFactorChromaPre[k],
- &mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
- }
-
- // Calculate urgent bandwidth required, both urg and non urg peak bandwidth
- // assume flip bw is 0 at this point
- for (k = 0; k < mode_lib->ms.num_active_planes; k++)
- mode_lib->ms.final_flip_bw[k] = 0;
-
- calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = mode_lib->ms.support.urg_vactive_bandwidth_required;
- calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required;
- calculate_peak_bandwidth_params->urg_bandwidth_required_qual = mode_lib->ms.support.urg_bandwidth_required_qual;
- calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required;
- calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = mode_lib->ms.surface_avg_vactive_required_bw;
- calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
-
- calculate_peak_bandwidth_params->display_cfg = display_cfg;
- calculate_peak_bandwidth_params->inc_flip_bw = 0;
- calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
- calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
- calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
- calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
-
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
- calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
- calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
- calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
- calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
- calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
- calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
- calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
- calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
- calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- calculate_peak_bandwidth_params);
-
- // Check urg peak bandwidth against available urg bw
- // check at SDP and DRAM, for all soc states (SVP prefetch an Sys Active)
- check_urgent_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth
- &s->dummy_single[1], // double* frac_urg_bandwidth_mall
- &mode_lib->ms.support.UrgVactiveBandwidthSupport,
- &mode_lib->ms.support.PrefetchBandwidthSupported,
-
- mode_lib->soc.mall_allocated_for_dcn_mbytes,
- mode_lib->ms.support.non_urg_bandwidth_required,
- mode_lib->ms.support.urg_vactive_bandwidth_required,
- mode_lib->ms.support.urg_bandwidth_required,
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.PrefetchBandwidthSupported;
- dml2_printf("DML::%s: PrefetchBandwidthSupported=%0d\n", __func__, mode_lib->ms.support.PrefetchBandwidthSupported);
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]) {
- mode_lib->ms.support.PrefetchSupported = false;
- dml2_printf("DML::%s: k=%d, NotEnoughUrgentLatencyHidingPre=%d\n", __func__, k, mode_lib->ms.NotEnoughUrgentLatencyHidingPre[k]);
- }
- }
-
-#ifdef DML_GLOBAL_PREFETCH_CHECK
- if (mode_lib->ms.support.PrefetchSupported && mode_lib->ms.num_active_planes > 1 && s->recalc_prefetch_done == 0) {
- CheckGlobalPrefetchAdmissibility_params->num_active_planes = mode_lib->ms.num_active_planes;
- CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format;
- CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
- CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
- CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l;
- CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c;
- CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->ms.SwathHeightY;
- CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->ms.SwathHeightC;
- CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
- CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->ms.CompressedBufferSizeInkByte;
- CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->ms.DETBufferSizeY;
- CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->ms.DETBufferSizeC;
- CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l;
- CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c;
- CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes;
- CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = s->Tpre_rounded;
- CheckGlobalPrefetchAdmissibility_params->Tpre_oto = s->Tpre_oto;
- CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->ms.support.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
- CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times;
- CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->ms.dst_y_prefetch;
- if (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps < 10 * 1024)
- CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = 10 * 1024;
-
- CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps / (double) mode_lib->soc.return_bus_width_bytes) /
- ((double)mode_lib->soc.qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0);
-
- // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible
- CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->recalc_prefetch_schedule;
- CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre;
- mode_lib->ms.support.PrefetchSupported = CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params);
- s->recalc_prefetch_done = 1;
- s->recalc_prefetch_schedule = 1;
- }
-#endif
- } // prefetch schedule ok, do urg bw and flip schedule
- } while (s->recalc_prefetch_schedule);
-
- // Flip Schedule
- // Both prefetch schedule and BW okay
- if (mode_lib->ms.support.PrefetchSupported == true) {
- mode_lib->ms.BandwidthAvailableForImmediateFlip =
- get_bandwidth_available_for_immediate_flip(
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_qual, // no flip
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.TotImmediateFlipBytes = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip) {
- s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
- s->HostVMInefficiencyFactor,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.meta_row_bytes[k]);
- } else {
- s->per_pipe_flip_bytes[k] = 0;
- }
- mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
-
- }
-
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- CalculateFlipSchedule(
- &mode_lib->scratch,
- display_cfg->plane_descriptors[k].immediate_flip,
- 1, // use_lb_flip_bw
- s->HostVMInefficiencyFactor,
- s->Tvm_trips_flip[k],
- s->Tr0_trips_flip[k],
- s->Tvm_trips_flip_rounded[k],
- s->Tr0_trips_flip_rounded[k],
- display_cfg->gpuvm_enable,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.BandwidthAvailableForImmediateFlip,
- mode_lib->ms.TotImmediateFlipBytes,
- display_cfg->plane_descriptors[k].pixel_format,
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->ms.Tno_bw_flip[k],
- mode_lib->ms.dpte_row_height[k],
- mode_lib->ms.dpte_row_height_chroma[k],
- mode_lib->ms.use_one_row_for_frame_flip[k],
- mode_lib->ip.max_flip_time_us,
- mode_lib->ip.max_flip_time_lines,
- s->per_pipe_flip_bytes[k],
- mode_lib->ms.meta_row_bytes[k],
- s->meta_row_height_luma[k],
- s->meta_row_height_chroma[k],
- mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
-
- /* Output */
- &mode_lib->ms.dst_y_per_vm_flip[k],
- &mode_lib->ms.dst_y_per_row_flip[k],
- &mode_lib->ms.final_flip_bw[k],
- &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
- }
-
- calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw;
- calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip;
- calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw;
- calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip;
- calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw;
- calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
-
- calculate_peak_bandwidth_params->display_cfg = display_cfg;
- calculate_peak_bandwidth_params->inc_flip_bw = 1;
- calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
- calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
- calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
- calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
-
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
- calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
- calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
- calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
- calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
- calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
- calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
- calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
- calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
- calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- calculate_peak_bandwidth_params);
-
- calculate_immediate_flip_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth_flip
- &mode_lib->ms.support.ImmediateFlipSupport,
-
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_flip,
- mode_lib->ms.support.non_urg_bandwidth_required_flip,
- mode_lib->ms.support.urg_bandwidth_available);
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
-
- } else { // if prefetch not support, assume iflip is not supported too
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
-
- s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
- s->mSOCParameters.ExtraLatency = mode_lib->ms.ExtraLatency;
- s->mSOCParameters.ExtraLatency_sr = mode_lib->ms.ExtraLatency_sr;
- s->mSOCParameters.WritebackLatency = mode_lib->soc.qos_parameters.writeback.base_latency_us;
- s->mSOCParameters.DRAMClockChangeLatency = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
- s->mSOCParameters.FCLKChangeLatency = mode_lib->soc.power_management_parameters.fclk_change_blackout_us;
- s->mSOCParameters.SRExitTime = mode_lib->soc.power_management_parameters.stutter_exit_latency_us;
- s->mSOCParameters.SREnterPlusExitTime = mode_lib->soc.power_management_parameters.stutter_enter_plus_exit_latency_us;
- s->mSOCParameters.SRExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_exit_latency_us;
- s->mSOCParameters.SREnterPlusExitZ8Time = mode_lib->soc.power_management_parameters.z8_stutter_enter_plus_exit_latency_us;
- s->mSOCParameters.USRRetrainingLatency = 0;
- s->mSOCParameters.SMNLatency = 0;
- s->mSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index);
- s->mSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, in_out_params->min_clk_index);
- s->mSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
- s->mSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
-
- CalculateWatermarks_params->display_cfg = display_cfg;
- CalculateWatermarks_params->USRRetrainingRequired = false;
- CalculateWatermarks_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
- CalculateWatermarks_params->MaxLineBufferLines = mode_lib->ip.max_line_buffer_lines;
- CalculateWatermarks_params->LineBufferSize = mode_lib->ip.line_buffer_size_bits;
- CalculateWatermarks_params->WritebackInterfaceBufferSize = mode_lib->ip.writeback_interface_buffer_size_kbytes;
- CalculateWatermarks_params->DCFCLK = mode_lib->ms.DCFCLK;
- CalculateWatermarks_params->SynchronizeTimings = display_cfg->overrides.synchronize_timings;
- CalculateWatermarks_params->SynchronizeDRRDisplaysForUCLKPStateChange = display_cfg->overrides.synchronize_ddr_displays_for_uclk_pstate_change;
- CalculateWatermarks_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes;
- CalculateWatermarks_params->mmSOCParameters = s->mSOCParameters;
- CalculateWatermarks_params->WritebackChunkSize = mode_lib->ip.writeback_chunk_size_kbytes;
- CalculateWatermarks_params->SOCCLK = mode_lib->ms.SOCCLK;
- CalculateWatermarks_params->DCFClkDeepSleep = mode_lib->ms.dcfclk_deepsleep;
- CalculateWatermarks_params->DETBufferSizeY = mode_lib->ms.DETBufferSizeY;
- CalculateWatermarks_params->DETBufferSizeC = mode_lib->ms.DETBufferSizeC;
- CalculateWatermarks_params->SwathHeightY = mode_lib->ms.SwathHeightY;
- CalculateWatermarks_params->SwathHeightC = mode_lib->ms.SwathHeightC;
- CalculateWatermarks_params->SwathWidthY = mode_lib->ms.SwathWidthY;
- CalculateWatermarks_params->SwathWidthC = mode_lib->ms.SwathWidthC;
- CalculateWatermarks_params->DPPPerSurface = mode_lib->ms.NoOfDPP;
- CalculateWatermarks_params->BytePerPixelDETY = mode_lib->ms.BytePerPixelInDETY;
- CalculateWatermarks_params->BytePerPixelDETC = mode_lib->ms.BytePerPixelInDETC;
- CalculateWatermarks_params->DSTXAfterScaler = s->DSTXAfterScaler;
- CalculateWatermarks_params->DSTYAfterScaler = s->DSTYAfterScaler;
- CalculateWatermarks_params->UnboundedRequestEnabled = mode_lib->ms.UnboundedRequestEnabled;
- CalculateWatermarks_params->CompressedBufferSizeInkByte = mode_lib->ms.CompressedBufferSizeInkByte;
- CalculateWatermarks_params->meta_row_height_l = s->meta_row_height_luma;
- CalculateWatermarks_params->meta_row_height_c = s->meta_row_height_chroma;
-
- // Output
- CalculateWatermarks_params->Watermark = &mode_lib->ms.support.watermarks; // Watermarks *Watermark
- CalculateWatermarks_params->DRAMClockChangeSupport = mode_lib->ms.support.DRAMClockChangeSupport;
- CalculateWatermarks_params->global_dram_clock_change_supported = &mode_lib->ms.support.global_dram_clock_change_supported;
- CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0]; // double *MaxActiveDRAMClockChangeLatencySupported[]
- CalculateWatermarks_params->SubViewportLinesNeededInMALL = mode_lib->ms.SubViewportLinesNeededInMALL; // unsigned int SubViewportLinesNeededInMALL[]
- CalculateWatermarks_params->FCLKChangeSupport = mode_lib->ms.support.FCLKChangeSupport;
- CalculateWatermarks_params->global_fclk_change_supported = &mode_lib->ms.support.global_fclk_change_supported;
- CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // double *MaxActiveFCLKChangeLatencySupported
- CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport;
- CalculateWatermarks_params->g6_temp_read_support = &mode_lib->ms.support.g6_temp_read_support;
- CalculateWatermarks_params->VActiveLatencyHidingMargin = mode_lib->ms.VActiveLatencyHidingMargin;
- CalculateWatermarks_params->VActiveLatencyHidingUs = mode_lib->ms.VActiveLatencyHidingUs;
-
- CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
-
- calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->ms.support.watermarks, s->dummy_integer_array[0]);
- }
- dml2_printf("DML::%s: Done prefetch calculation\n", __func__);
- // End of Prefetch Check
+ dml_core_ms_prefetch_check(mode_lib, display_cfg);
mode_lib->ms.support.max_urgent_latency_us = s->mSOCParameters.max_urgent_latency_us;
@@ -9546,8 +9478,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.dram_change_vactive_det_fill_delay_us);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us);
- dml2_printf("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport);
+ DML_LOG_VERBOSE("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us);
+ DML_LOG_VERBOSE("DML::%s: ROBSupport = %u\n", __func__, mode_lib->ms.support.ROBSupport);
#endif
/*Mode Support, Voltage State and SOC Configuration*/
@@ -9597,17 +9529,17 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
&& !mode_lib->ms.support.ExceededMALLSize
&& mode_lib->ms.support.g6_temp_read_support
&& ((!display_cfg->hostvm_enable && !s->ImmediateFlipRequired) || mode_lib->ms.support.ImmediateFlipSupport)) {
- dml2_printf("DML::%s: mode is supported\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: mode is supported\n", __func__);
mode_lib->ms.support.ModeSupport = true;
} else {
- dml2_printf("DML::%s: mode is NOT supported\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: mode is NOT supported\n", __func__);
mode_lib->ms.support.ModeSupport = false;
}
}
// Since now the mode_support work on 1 particular power state, so there is only 1 state idx (index 0).
- dml2_printf("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport);
- dml2_printf("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport);
+ DML_LOG_VERBOSE("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport);
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
mode_lib->ms.support.MPCCombineEnable[k] = mode_lib->ms.MPCCombine[k];
@@ -9623,8 +9555,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.OutputRate[k] = mode_lib->ms.OutputRate[k];
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: k=%d, ODMMode = %u\n", __func__, k, mode_lib->ms.support.ODMMode[k]);
- dml2_printf("DML::%s: k=%d, DSCEnabled = %u\n", __func__, k, mode_lib->ms.support.DSCEnabled[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, ODMMode = %u\n", __func__, k, mode_lib->ms.support.ODMMode[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DSCEnabled = %u\n", __func__, k, mode_lib->ms.support.DSCEnabled[k]);
#endif
}
@@ -9632,7 +9564,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if (!mode_lib->ms.support.ModeSupport)
dml2_print_mode_support_info(&mode_lib->ms.support, true);
- dml2_printf("DML::%s: --- DONE --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: --- DONE --- \n", __func__);
#endif
return mode_lib->ms.support.ModeSupport;
@@ -9642,18 +9574,18 @@ unsigned int dml2_core_calcs_mode_support_ex(struct dml2_core_calcs_mode_support
{
unsigned int result;
- dml2_printf("DML::%s: ------------- START ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: ------------- START ----------\n", __func__);
result = dml_core_mode_support(in_out_params);
if (result)
*in_out_params->out_evaluation_info = in_out_params->mode_lib->ms.support;
- dml2_printf("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index);
for (unsigned int k = 0; k < in_out_params->in_display_cfg->num_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
- dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: ------------- DONE ----------\n", __func__);
return result;
}
@@ -9687,19 +9619,19 @@ static void CalculatePixelDeliveryTimes(
double pixel_clock_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : HRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
- dml2_printf("DML::%s: k=%u : VRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%u : HRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio);
- dml2_printf("DML::%s: k=%u : VRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
- dml2_printf("DML::%s: k=%u : VRatioPrefetchY = %f\n", __func__, k, VRatioPrefetchY[k]);
- dml2_printf("DML::%s: k=%u : VRatioPrefetchC = %f\n", __func__, k, VRatioPrefetchC[k]);
- dml2_printf("DML::%s: k=%u : swath_width_luma_ub = %u\n", __func__, k, swath_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u : swath_width_chroma_ub = %u\n", __func__, k, swath_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u : PSCL_THROUGHPUT = %f\n", __func__, k, PSCL_THROUGHPUT[k]);
- dml2_printf("DML::%s: k=%u : PSCL_THROUGHPUT_CHROMA = %f\n", __func__, k, PSCL_THROUGHPUT_CHROMA[k]);
- dml2_printf("DML::%s: k=%u : DPPPerSurface = %u\n", __func__, k, cfg_support_info->plane_support_info[k].dpps_used);
- dml2_printf("DML::%s: k=%u : pixel_clock_mhz = %f\n", __func__, k, pixel_clock_mhz);
- dml2_printf("DML::%s: k=%u : Dppclk = %f\n", __func__, k, Dppclk[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : HRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatio = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : HRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatioChroma = %f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatioPrefetchY = %f\n", __func__, k, VRatioPrefetchY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : VRatioPrefetchC = %f\n", __func__, k, VRatioPrefetchC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : swath_width_luma_ub = %u\n", __func__, k, swath_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : swath_width_chroma_ub = %u\n", __func__, k, swath_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : PSCL_THROUGHPUT = %f\n", __func__, k, PSCL_THROUGHPUT[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : PSCL_THROUGHPUT_CHROMA = %f\n", __func__, k, PSCL_THROUGHPUT_CHROMA[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DPPPerSurface = %u\n", __func__, k, cfg_support_info->plane_support_info[k].dpps_used);
+ DML_LOG_VERBOSE("DML::%s: k=%u : pixel_clock_mhz = %f\n", __func__, k, pixel_clock_mhz);
+ DML_LOG_VERBOSE("DML::%s: k=%u : Dppclk = %f\n", __func__, k, Dppclk[k]);
#endif
if (display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio <= 1) {
DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] * cfg_support_info->plane_support_info[k].dpps_used / display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio / pixel_clock_mhz;
@@ -9733,10 +9665,10 @@ static void CalculatePixelDeliveryTimes(
}
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLuma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChroma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeLumaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeLineDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeLineDeliveryTimeChromaPrefetch[k]);
#endif
}
@@ -9752,12 +9684,12 @@ static void CalculatePixelDeliveryTimes(
DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = DisplayPipeLineDeliveryTimeChromaPrefetch[k] / req_per_swath_ub_c[k];
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLuma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : req_per_swath_ub_l = %d\n", __func__, k, req_per_swath_ub_l[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChroma[k]);
- dml2_printf("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]);
- dml2_printf("DML::%s: k=%u : req_per_swath_ub_c = %d\n", __func__, k, req_per_swath_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLuma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeLumaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeLumaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : req_per_swath_ub_l = %d\n", __func__, k, req_per_swath_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChroma = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : DisplayPipeRequestDeliveryTimeChromaPrefetch = %f\n", __func__, k, DisplayPipeRequestDeliveryTimeChromaPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u : req_per_swath_ub_c = %d\n", __func__, k, req_per_swath_ub_c[k]);
#endif
}
}
@@ -9853,14 +9785,14 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_L[k]);
- dml2_printf("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_C[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkNominal = %f\n", __func__, k, p->TimePerMetaChunkNominal[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkVBlank = %f\n", __func__, k, p->TimePerMetaChunkVBlank[k]);
- dml2_printf("DML::%s: k=%d, TimePerMetaChunkFlip = %f\n", __func__, k, p->TimePerMetaChunkFlip[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkNominal = %f\n", __func__, k, p->TimePerChromaMetaChunkNominal[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkVBlank = %f\n", __func__, k, p->TimePerChromaMetaChunkVBlank[k]);
- dml2_printf("DML::%s: k=%d, TimePerChromaMetaChunkFlip = %f\n", __func__, k, p->TimePerChromaMetaChunkFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_L[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, DST_Y_PER_META_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_META_ROW_NOM_C[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkNominal = %f\n", __func__, k, p->TimePerMetaChunkNominal[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkVBlank = %f\n", __func__, k, p->TimePerMetaChunkVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerMetaChunkFlip = %f\n", __func__, k, p->TimePerMetaChunkFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkNominal = %f\n", __func__, k, p->TimePerChromaMetaChunkNominal[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkVBlank = %f\n", __func__, k, p->TimePerChromaMetaChunkVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, TimePerChromaMetaChunkFlip = %f\n", __func__, k, p->TimePerChromaMetaChunkFlip[k]);
#endif
}
@@ -9881,7 +9813,7 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
else
p->time_per_tdlut_group[k] = 0;
- dml2_printf("DML::%s: k=%u, time_per_tdlut_group = %f\n", __func__, k, p->time_per_tdlut_group[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_tdlut_group = %f\n", __func__, k, p->time_per_tdlut_group[k]);
if (p->display_cfg->gpuvm_enable == true) {
if (!dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) {
@@ -9897,14 +9829,14 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
if (dpte_groups_per_row_luma_ub <= 2) {
dpte_groups_per_row_luma_ub = dpte_groups_per_row_luma_ub + 1;
}
- dml2_printf("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]);
- dml2_printf("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEReqWidthY = %u\n", __func__, k, p->PixelPTEReqWidthY[k]);
- dml2_printf("DML::%s: k=%u, PixelPTEReqHeightY = %u\n", __func__, k, p->PixelPTEReqHeightY[k]);
- dml2_printf("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_width_luma = %u\n", __func__, k, dpte_group_width_luma);
- dml2_printf("DML::%s: k=%u, dpte_groups_per_row_luma_ub = %u\n", __func__, k, dpte_groups_per_row_luma_ub);
+ DML_LOG_VERBOSE("DML::%s: k=%u, use_one_row_for_frame = %u\n", __func__, k, p->use_one_row_for_frame[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_bytes = %u\n", __func__, k, p->dpte_group_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PTERequestSizeY = %u\n", __func__, k, p->PTERequestSizeY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEReqWidthY = %u\n", __func__, k, p->PixelPTEReqWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, PixelPTEReqHeightY = %u\n", __func__, k, p->PixelPTEReqHeightY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_luma_ub = %u\n", __func__, k, p->dpte_row_width_luma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_width_luma = %u\n", __func__, k, dpte_group_width_luma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_groups_per_row_luma_ub = %u\n", __func__, k, dpte_groups_per_row_luma_ub);
p->time_per_pte_group_nom_luma[k] = p->DST_Y_PER_PTE_ROW_NOM_L[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub;
p->time_per_pte_group_vblank_luma[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_luma_ub;
@@ -9928,9 +9860,9 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
if (dpte_groups_per_row_chroma_ub <= 2) {
dpte_groups_per_row_chroma_ub = dpte_groups_per_row_chroma_ub + 1;
}
- dml2_printf("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
- dml2_printf("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma);
- dml2_printf("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_row_width_chroma_ub = %u\n", __func__, k, p->dpte_row_width_chroma_ub[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_group_width_chroma = %u\n", __func__, k, dpte_group_width_chroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpte_groups_per_row_chroma_ub = %u\n", __func__, k, dpte_groups_per_row_chroma_ub);
p->time_per_pte_group_nom_chroma[k] = p->DST_Y_PER_PTE_ROW_NOM_C[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub;
p->time_per_pte_group_vblank_chroma[k] = p->dst_y_per_row_vblank[k] * p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / pixel_clock_mhz / dpte_groups_per_row_chroma_ub;
@@ -9945,17 +9877,17 @@ static void CalculateMetaAndPTETimes(struct dml2_core_shared_CalculateMetaAndPTE
p->time_per_pte_group_flip_chroma[k] = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, dst_y_per_row_vblank = %f\n", __func__, k, p->dst_y_per_row_vblank[k]);
- dml2_printf("DML::%s: k=%u, dst_y_per_row_flip = %f\n", __func__, k, p->dst_y_per_row_flip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_row_vblank = %f\n", __func__, k, p->dst_y_per_row_vblank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_row_flip = %f\n", __func__, k, p->dst_y_per_row_flip[k]);
- dml2_printf("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_L[k]);
- dml2_printf("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_C[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_nom_luma = %f\n", __func__, k, p->time_per_pte_group_nom_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_vblank_luma = %f\n", __func__, k, p->time_per_pte_group_vblank_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_flip_luma = %f\n", __func__, k, p->time_per_pte_group_flip_luma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_nom_chroma = %f\n", __func__, k, p->time_per_pte_group_nom_chroma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_vblank_chroma = %f\n", __func__, k, p->time_per_pte_group_vblank_chroma[k]);
- dml2_printf("DML::%s: k=%u, time_per_pte_group_flip_chroma = %f\n", __func__, k, p->time_per_pte_group_flip_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_L = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_L[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DST_Y_PER_PTE_ROW_NOM_C = %f\n", __func__, k, p->DST_Y_PER_PTE_ROW_NOM_C[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_nom_luma = %f\n", __func__, k, p->time_per_pte_group_nom_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_vblank_luma = %f\n", __func__, k, p->time_per_pte_group_vblank_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_flip_luma = %f\n", __func__, k, p->time_per_pte_group_flip_luma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_nom_chroma = %f\n", __func__, k, p->time_per_pte_group_nom_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_vblank_chroma = %f\n", __func__, k, p->time_per_pte_group_vblank_chroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, time_per_pte_group_flip_chroma = %f\n", __func__, k, p->time_per_pte_group_flip_chroma[k]);
#endif
}
} // CalculateMetaAndPTETimes
@@ -9991,18 +9923,18 @@ static void CalculateVMGroupAndRequestTimes(
double line_time;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
+ DML_LOG_VERBOSE("DML::%s: NumberOfActiveSurfaces = %u\n", __func__, NumberOfActiveSurfaces);
#endif
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
double pixel_clock_mhz = ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
bool dcc_mrq_enable = display_cfg->plane_descriptors[k].surface.dcc.enable && mrq_present;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, dcc_mrq_enable = %u\n", __func__, k, dcc_mrq_enable);
- dml2_printf("DML::%s: k=%u, vm_group_bytes = %u\n", __func__, k, vm_group_bytes[k]);
- dml2_printf("DML::%s: k=%u, dpde0_bytes_per_frame_ub_l = %u\n", __func__, k, dpde0_bytes_per_frame_ub_l[k]);
- dml2_printf("DML::%s: k=%u, dpde0_bytes_per_frame_ub_c = %u\n", __func__, k, dpde0_bytes_per_frame_ub_c[k]);
- dml2_printf("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_l = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_l[k]);
- dml2_printf("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_c = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dcc_mrq_enable = %u\n", __func__, k, dcc_mrq_enable);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vm_group_bytes = %u\n", __func__, k, vm_group_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpde0_bytes_per_frame_ub_l = %u\n", __func__, k, dpde0_bytes_per_frame_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dpde0_bytes_per_frame_ub_c = %u\n", __func__, k, dpde0_bytes_per_frame_ub_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_l = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d, meta_pte_bytes_per_frame_ub_c = %d\n", __func__, k, meta_pte_bytes_per_frame_ub_c[k]);
#endif
if (display_cfg->gpuvm_enable) {
@@ -10071,13 +10003,13 @@ static void CalculateVMGroupAndRequestTimes(
else
TimePerVMRequestFlip[k] = 0.0;
- dml2_printf("DML::%s: k=%u, dst_y_per_vm_vblank = %f\n", __func__, k, dst_y_per_vm_vblank[k]);
- dml2_printf("DML::%s: k=%u, dst_y_per_vm_flip = %f\n", __func__, k, dst_y_per_vm_flip[k]);
- dml2_printf("DML::%s: k=%u, line_time = %f\n", __func__, k, line_time);
- dml2_printf("DML::%s: k=%u, num_group_per_lower_vm_stage_pref = %f\n", __func__, k, num_group_per_lower_vm_stage_pref);
- dml2_printf("DML::%s: k=%u, num_group_per_lower_vm_stage_flip = %f\n", __func__, k, num_group_per_lower_vm_stage_flip);
- dml2_printf("DML::%s: k=%u, num_req_per_lower_vm_stage_pref = %f\n", __func__, k, num_req_per_lower_vm_stage_pref);
- dml2_printf("DML::%s: k=%u, num_req_per_lower_vm_stage_flip = %f\n", __func__, k, num_req_per_lower_vm_stage_flip);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_vm_vblank = %f\n", __func__, k, dst_y_per_vm_vblank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, dst_y_per_vm_flip = %f\n", __func__, k, dst_y_per_vm_flip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, line_time = %f\n", __func__, k, line_time);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_group_per_lower_vm_stage_pref = %d\n", __func__, k, num_group_per_lower_vm_stage_pref);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_group_per_lower_vm_stage_flip = %d\n", __func__, k, num_group_per_lower_vm_stage_flip);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_req_per_lower_vm_stage_pref = %d\n", __func__, k, num_req_per_lower_vm_stage_pref);
+ DML_LOG_VERBOSE("DML::%s: k=%u, num_req_per_lower_vm_stage_flip = %d\n", __func__, k, num_req_per_lower_vm_stage_flip);
if (display_cfg->gpuvm_max_page_table_levels > 2) {
TimePerVMGroupVBlank[k] = TimePerVMGroupVBlank[k] / 2;
@@ -10094,10 +10026,10 @@ static void CalculateVMGroupAndRequestTimes(
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, TimePerVMGroupVBlank = %f\n", __func__, k, TimePerVMGroupVBlank[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMGroupFlip = %f\n", __func__, k, TimePerVMGroupFlip[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMRequestVBlank = %f\n", __func__, k, TimePerVMRequestVBlank[k]);
- dml2_printf("DML::%s: k=%u, TimePerVMRequestFlip = %f\n", __func__, k, TimePerVMRequestFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMGroupVBlank = %f\n", __func__, k, TimePerVMGroupVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMGroupFlip = %f\n", __func__, k, TimePerVMGroupFlip[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMRequestVBlank = %f\n", __func__, k, TimePerVMRequestVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TimePerVMRequestFlip = %f\n", __func__, k, TimePerVMRequestFlip[k]);
#endif
}
}
@@ -10113,7 +10045,6 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
unsigned int SingleVTotal = 0;
bool SameTiming = true;
bool FoundCriticalSurface = false;
- double LastZ8StutterPeriod = 0;
memset(l, 0, sizeof(struct dml2_core_calcs_CalculateStutterEfficiency_locals));
@@ -10127,9 +10058,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] / math_min2(p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0, l->MaximumEffectiveCompressionLuma);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
- dml2_printf("DML::%s: k=%u, NetDCCRateLuma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0);
- dml2_printf("DML::%s: k=%u, MaximumEffectiveCompressionLuma = %f\n", __func__, k, l->MaximumEffectiveCompressionLuma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NetDCCRateLuma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane0);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaximumEffectiveCompressionLuma = %f\n", __func__, k, l->MaximumEffectiveCompressionLuma);
#endif
l->TotalZeroSizeRequestReadBandwidth = l->TotalZeroSizeRequestReadBandwidth + p->ReadBandwidthSurfaceLuma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane0;
l->TotalZeroSizeCompressedReadBandwidth = l->TotalZeroSizeCompressedReadBandwidth + p->ReadBandwidthSurfaceLuma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane0 / l->MaximumEffectiveCompressionLuma;
@@ -10142,9 +10073,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
l->TotalCompressedReadBandwidth = l->TotalCompressedReadBandwidth + p->ReadBandwidthSurfaceChroma[k] / math_min2(p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1, l->MaximumEffectiveCompressionChroma);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, p->ReadBandwidthSurfaceChroma[k]);
- dml2_printf("DML::%s: k=%u, NetDCCRateChroma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1);
- dml2_printf("DML::%s: k=%u, MaximumEffectiveCompressionChroma = %f\n", __func__, k, l->MaximumEffectiveCompressionChroma);
+ DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, p->ReadBandwidthSurfaceChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NetDCCRateChroma = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].surface.dcc.informative.dcc_rate_plane1);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaximumEffectiveCompressionChroma = %f\n", __func__, k, l->MaximumEffectiveCompressionChroma);
#endif
l->TotalZeroSizeRequestReadBandwidth = l->TotalZeroSizeRequestReadBandwidth + p->ReadBandwidthSurfaceChroma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane1;
l->TotalZeroSizeCompressedReadBandwidth = l->TotalZeroSizeCompressedReadBandwidth + p->ReadBandwidthSurfaceChroma[k] * p->display_cfg->plane_descriptors[k].surface.dcc.informative.fraction_of_zero_size_request_plane1 / l->MaximumEffectiveCompressionChroma;
@@ -10160,19 +10091,19 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->AverageDCCZeroSizeFraction = l->TotalZeroSizeRequestReadBandwidth / p->TotalDataReadBandwidth;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: UnboundedRequestEnabled = %u\n", __func__, p->UnboundedRequestEnabled);
- dml2_printf("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, l->TotalCompressedReadBandwidth);
- dml2_printf("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, l->TotalZeroSizeRequestReadBandwidth);
- dml2_printf("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n", __func__, l->TotalZeroSizeCompressedReadBandwidth);
- dml2_printf("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, l->MaximumEffectiveCompressionLuma);
- dml2_printf("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, l->MaximumEffectiveCompressionChroma);
- dml2_printf("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, l->AverageDCCZeroSizeFraction);
+ DML_LOG_VERBOSE("DML::%s: UnboundedRequestEnabled = %u\n", __func__, p->UnboundedRequestEnabled);
+ DML_LOG_VERBOSE("DML::%s: TotalCompressedReadBandwidth = %f\n", __func__, l->TotalCompressedReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: TotalZeroSizeRequestReadBandwidth = %f\n", __func__, l->TotalZeroSizeRequestReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: TotalZeroSizeCompressedReadBandwidth = %f\n", __func__, l->TotalZeroSizeCompressedReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: MaximumEffectiveCompressionLuma = %f\n", __func__, l->MaximumEffectiveCompressionLuma);
+ DML_LOG_VERBOSE("DML::%s: MaximumEffectiveCompressionChroma = %f\n", __func__, l->MaximumEffectiveCompressionChroma);
+ DML_LOG_VERBOSE("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: AverageDCCZeroSizeFraction = %f\n", __func__, l->AverageDCCZeroSizeFraction);
- dml2_printf("DML::%s: CompbufReservedSpace64B = %u (%f kbytes)\n", __func__, p->CompbufReservedSpace64B, p->CompbufReservedSpace64B * 64 / 1024.0);
- dml2_printf("DML::%s: CompbufReservedSpaceZs = %u\n", __func__, p->CompbufReservedSpaceZs);
- dml2_printf("DML::%s: CompressedBufferSizeInkByte = %u kbytes\n", __func__, p->CompressedBufferSizeInkByte);
- dml2_printf("DML::%s: ROBBufferSizeInKByte = %u kbytes\n", __func__, p->ROBBufferSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: CompbufReservedSpace64B = %u (%f kbytes)\n", __func__, p->CompbufReservedSpace64B, p->CompbufReservedSpace64B * 64 / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: CompbufReservedSpaceZs = %u\n", __func__, p->CompbufReservedSpaceZs);
+ DML_LOG_VERBOSE("DML::%s: CompressedBufferSizeInkByte = %u kbytes\n", __func__, p->CompressedBufferSizeInkByte);
+ DML_LOG_VERBOSE("DML::%s: ROBBufferSizeInKByte = %u kbytes\n", __func__, p->ROBBufferSizeInKByte);
#endif
if (l->AverageDCCZeroSizeFraction == 1) {
l->AverageZeroSizeCompressionRate = l->TotalZeroSizeRequestReadBandwidth / l->TotalZeroSizeCompressedReadBandwidth;
@@ -10189,10 +10120,10 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate + 1 / l->AverageDCCCompressionRate));
- dml2_printf("DML::%s: min 3 = %d\n", __func__, (p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64));
- dml2_printf("DML::%s: min 4 = %f\n", __func__, (p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate));
+ DML_LOG_VERBOSE("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate + 1 / l->AverageDCCCompressionRate));
+ DML_LOG_VERBOSE("DML::%s: min 3 = %d\n", __func__, (p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64));
+ DML_LOG_VERBOSE("DML::%s: min 4 = %f\n", __func__, (p->ZeroSizeBufferEntries - p->CompbufReservedSpaceZs) * 64 / (l->AverageDCCZeroSizeFraction / l->AverageZeroSizeCompressionRate));
#endif
} else {
l->EffectiveCompressedBufferSize = math_min2((double)p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate,
@@ -10200,16 +10131,16 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
((double)p->ROBBufferSizeInKByte * 1024 - p->CompbufReservedSpace64B * 64) * (p->rob_alloc_compressed ? l->AverageDCCCompressionRate : 1.0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: min 1 = %f\n", __func__, p->CompressedBufferSizeInkByte * 1024 * l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: min 2 = %f\n", __func__, p->MetaFIFOSizeInKEntries * 1024 * 64 * l->AverageDCCCompressionRate);
#endif
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: MetaFIFOSizeInKEntries = %u\n", __func__, p->MetaFIFOSizeInKEntries);
- dml2_printf("DML::%s: ZeroSizeBufferEntries = %u\n", __func__, p->ZeroSizeBufferEntries);
- dml2_printf("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, l->AverageZeroSizeCompressionRate);
- dml2_printf("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: MetaFIFOSizeInKEntries = %u\n", __func__, p->MetaFIFOSizeInKEntries);
+ DML_LOG_VERBOSE("DML::%s: ZeroSizeBufferEntries = %u\n", __func__, p->ZeroSizeBufferEntries);
+ DML_LOG_VERBOSE("DML::%s: AverageZeroSizeCompressionRate = %f\n", __func__, l->AverageZeroSizeCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
#endif
*p->StutterPeriod = 0;
@@ -10220,15 +10151,15 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->LinesInDETYRoundedDownToSwath = math_floor2(l->LinesInDETY, p->SwathHeightY[k]);
l->DETBufferingTimeY = l->LinesInDETYRoundedDownToSwath * ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) / p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, DETBufferSizeY = %u (%u kbytes)\n", __func__, k, p->DETBufferSizeY[k], p->DETBufferSizeY[k] / 1024);
- dml2_printf("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
- dml2_printf("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
- dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, p->TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, LinesInDETY = %f\n", __func__, k, l->LinesInDETY);
- dml2_printf("DML::%s: k=%u, LinesInDETYRoundedDownToSwath = %f\n", __func__, k, l->LinesInDETYRoundedDownToSwath);
- dml2_printf("DML::%s: k=%u, VRatio = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%u, DETBufferingTimeY = %f\n", __func__, k, l->DETBufferingTimeY);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferSizeY = %u (%u kbytes)\n", __func__, k, p->DETBufferSizeY[k], p->DETBufferSizeY[k] / 1024);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelDETY = %f\n", __func__, k, p->BytePerPixelDETY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthY = %u\n", __func__, k, p->SwathWidthY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, p->ReadBandwidthSurfaceLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, p->TotalDataReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LinesInDETY = %f\n", __func__, k, l->LinesInDETY);
+ DML_LOG_VERBOSE("DML::%s: k=%u, LinesInDETYRoundedDownToSwath = %f\n", __func__, k, l->LinesInDETYRoundedDownToSwath);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VRatio = %f\n", __func__, k, p->display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DETBufferingTimeY = %f\n", __func__, k, l->DETBufferingTimeY);
#endif
if (!FoundCriticalSurface || l->DETBufferingTimeY < *p->StutterPeriod) {
@@ -10248,17 +10179,17 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->SinglePipeCriticalSurface = (p->DPPPerSurface[k] == 1);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, FoundCriticalSurface = %u\n", __func__, k, FoundCriticalSurface);
- dml2_printf("DML::%s: k=%u, StutterPeriod = %f\n", __func__, k, *p->StutterPeriod);
- dml2_printf("DML::%s: k=%u, MinTTUVBlankCriticalSurface = %f\n", __func__, k, l->MinTTUVBlankCriticalSurface);
- dml2_printf("DML::%s: k=%u, FrameTimeCriticalSurface= %f\n", __func__, k, l->FrameTimeCriticalSurface);
- dml2_printf("DML::%s: k=%u, VActiveTimeCriticalSurface = %f\n", __func__, k, l->VActiveTimeCriticalSurface);
- dml2_printf("DML::%s: k=%u, BytePerPixelYCriticalSurface = %u\n", __func__, k, l->BytePerPixelYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SwathWidthYCriticalSurface = %f\n", __func__, k, l->SwathWidthYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SwathHeightYCriticalSurface = %f\n", __func__, k, l->SwathHeightYCriticalSurface);
- dml2_printf("DML::%s: k=%u, BlockWidth256BytesYCriticalSurface = %u\n", __func__, k, l->BlockWidth256BytesYCriticalSurface);
- dml2_printf("DML::%s: k=%u, SinglePlaneCriticalSurface = %u\n", __func__, k, l->SinglePlaneCriticalSurface);
- dml2_printf("DML::%s: k=%u, SinglePipeCriticalSurface = %u\n", __func__, k, l->SinglePipeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, FoundCriticalSurface = %u\n", __func__, k, FoundCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, StutterPeriod = %f\n", __func__, k, *p->StutterPeriod);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlankCriticalSurface = %f\n", __func__, k, l->MinTTUVBlankCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, FrameTimeCriticalSurface= %f\n", __func__, k, l->FrameTimeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VActiveTimeCriticalSurface = %f\n", __func__, k, l->VActiveTimeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BytePerPixelYCriticalSurface = %u\n", __func__, k, l->BytePerPixelYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathWidthYCriticalSurface = %f\n", __func__, k, l->SwathWidthYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SwathHeightYCriticalSurface = %f\n", __func__, k, l->SwathHeightYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, BlockWidth256BytesYCriticalSurface = %u\n", __func__, k, l->BlockWidth256BytesYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SinglePlaneCriticalSurface = %u\n", __func__, k, l->SinglePlaneCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: k=%u, SinglePipeCriticalSurface = %u\n", __func__, k, l->SinglePipeCriticalSurface);
#endif
}
}
@@ -10276,14 +10207,14 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = math_min2(*p->StutterPeriod * p->TotalDataReadBandwidth, l->EffectiveCompressedBufferSize);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
- dml2_printf("DML::%s: StutterPeriod*TotalDataReadBandwidth = %f (%f kbytes)\n", __func__, *p->StutterPeriod * p->TotalDataReadBandwidth, (*p->StutterPeriod * p->TotalDataReadBandwidth) / 1024.0);
- dml2_printf("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
- dml2_printf("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f (%f kbytes)\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / 1024);
- dml2_printf("DML::%s: ReturnBW = %f\n", __func__, p->ReturnBW);
- dml2_printf("DML::%s: TotalDataReadBandwidth = %f\n", __func__, p->TotalDataReadBandwidth);
- dml2_printf("DML::%s: TotalRowReadBandwidth = %f\n", __func__, l->TotalRowReadBandwidth);
- dml2_printf("DML::%s: DCFCLK = %f\n", __func__, p->DCFCLK);
+ DML_LOG_VERBOSE("DML::%s: AverageDCCCompressionRate = %f\n", __func__, l->AverageDCCCompressionRate);
+ DML_LOG_VERBOSE("DML::%s: StutterPeriod*TotalDataReadBandwidth = %f (%f kbytes)\n", __func__, *p->StutterPeriod * p->TotalDataReadBandwidth, (*p->StutterPeriod * p->TotalDataReadBandwidth) / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: EffectiveCompressedBufferSize = %f (%f kbytes)\n", __func__, l->EffectiveCompressedBufferSize, l->EffectiveCompressedBufferSize / 1024.0);
+ DML_LOG_VERBOSE("DML::%s: PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer = %f (%f kbytes)\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / 1024);
+ DML_LOG_VERBOSE("DML::%s: ReturnBW = %f\n", __func__, p->ReturnBW);
+ DML_LOG_VERBOSE("DML::%s: TotalDataReadBandwidth = %f\n", __func__, p->TotalDataReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: TotalRowReadBandwidth = %f\n", __func__, l->TotalRowReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: DCFCLK = %f\n", __func__, p->DCFCLK);
#endif
l->StutterBurstTime = l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer
@@ -10292,10 +10223,10 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
/ math_min2(p->DCFCLK * 64, p->ReturnBW * (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate)) +
*p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate));
- dml2_printf("DML::%s: Part 2 = %f\n", __func__, (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (p->DCFCLK * 64));
- dml2_printf("DML::%s: Part 3 = %f\n", __func__, *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW);
- dml2_printf("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
+ DML_LOG_VERBOSE("DML::%s: Part 1 = %f\n", __func__, l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer / p->ReturnBW / (p->hw_debug5 ? 1 : l->AverageDCCCompressionRate));
+ DML_LOG_VERBOSE("DML::%s: Part 2 = %f\n", __func__, (*p->StutterPeriod * p->TotalDataReadBandwidth - l->PartOfUncompressedPixelBurstThatFitsInROBAndCompressedBuffer) / (p->DCFCLK * 64));
+ DML_LOG_VERBOSE("DML::%s: Part 3 = %f\n", __func__, *p->StutterPeriod * l->TotalRowReadBandwidth / p->ReturnBW);
+ DML_LOG_VERBOSE("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
#endif
l->TotalActiveWriteback = 0;
memset(l->stream_visited, 0, DML2_MAX_PLANES * sizeof(bool));
@@ -10324,9 +10255,9 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
if (l->TotalActiveWriteback == 0) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: SRExitTime = %f\n", __func__, p->SRExitTime);
- dml2_printf("DML::%s: SRExitZ8Time = %f\n", __func__, p->SRExitZ8Time);
- dml2_printf("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
+ DML_LOG_VERBOSE("DML::%s: SRExitTime = %f\n", __func__, p->SRExitTime);
+ DML_LOG_VERBOSE("DML::%s: SRExitZ8Time = %f\n", __func__, p->SRExitZ8Time);
+ DML_LOG_VERBOSE("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
#endif
*p->StutterEfficiencyNotIncludingVBlank = math_max2(0., 1 - (p->SRExitTime + l->StutterBurstTime) / *p->StutterPeriod) * 100;
*p->Z8StutterEfficiencyNotIncludingVBlank = math_max2(0., 1 - (p->SRExitZ8Time + l->StutterBurstTime) / *p->StutterPeriod) * 100;
@@ -10339,11 +10270,11 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
*p->Z8NumberOfStutterBurstsPerFrame = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: VActiveTimeCriticalSurface = %f\n", __func__, l->VActiveTimeCriticalSurface);
- dml2_printf("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->NumberOfStutterBurstsPerFrame);
- dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
+ DML_LOG_VERBOSE("DML::%s: VActiveTimeCriticalSurface = %f\n", __func__, l->VActiveTimeCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank);
+ DML_LOG_VERBOSE("DML::%s: NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->NumberOfStutterBurstsPerFrame);
+ DML_LOG_VERBOSE("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
#endif
if (*p->StutterEfficiencyNotIncludingVBlank > 0) {
@@ -10358,7 +10289,7 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
if (*p->Z8StutterEfficiencyNotIncludingVBlank > 0) {
- LastZ8StutterPeriod = l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod;
+ //LastZ8StutterPeriod = l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod;
if (!((p->SynchronizeTimings || TotalNumberOfActiveOTG == 1) && SameTiming)) {
*p->Z8StutterEfficiency = *p->Z8StutterEfficiencyNotIncludingVBlank;
} else {
@@ -10370,25 +10301,25 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: TotalNumberOfActiveOTG = %u\n", __func__, TotalNumberOfActiveOTG);
- dml2_printf("DML::%s: SameTiming = %u\n", __func__, SameTiming);
- dml2_printf("DML::%s: SynchronizeTimings = %u\n", __func__, p->SynchronizeTimings);
- dml2_printf("DML::%s: LastZ8StutterPeriod = %f\n", __func__, LastZ8StutterPeriod);
- dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Z8StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
- dml2_printf("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
- dml2_printf("DML::%s: StutterEfficiency = %f\n", __func__, *p->StutterEfficiency);
- dml2_printf("DML::%s: Z8StutterEfficiency = %f\n", __func__, *p->Z8StutterEfficiency);
- dml2_printf("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
- dml2_printf("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
+ DML_LOG_VERBOSE("DML::%s: TotalNumberOfActiveOTG = %u\n", __func__, TotalNumberOfActiveOTG);
+ DML_LOG_VERBOSE("DML::%s: SameTiming = %u\n", __func__, SameTiming);
+ DML_LOG_VERBOSE("DML::%s: SynchronizeTimings = %u\n", __func__, p->SynchronizeTimings);
+ DML_LOG_VERBOSE("DML::%s: LastZ8StutterPeriod = %f\n", __func__, *p->Z8StutterEfficiencyNotIncludingVBlank > 0 ? l->VActiveTimeCriticalSurface - (*p->Z8NumberOfStutterBurstsPerFrame - 1) * *p->StutterPeriod : 0);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Z8StutterEnterPlusExitWatermark);
+ DML_LOG_VERBOSE("DML::%s: StutterBurstTime = %f\n", __func__, l->StutterBurstTime);
+ DML_LOG_VERBOSE("DML::%s: StutterPeriod = %f\n", __func__, *p->StutterPeriod);
+ DML_LOG_VERBOSE("DML::%s: StutterEfficiency = %f\n", __func__, *p->StutterEfficiency);
+ DML_LOG_VERBOSE("DML::%s: Z8StutterEfficiency = %f\n", __func__, *p->Z8StutterEfficiency);
+ DML_LOG_VERBOSE("DML::%s: StutterEfficiencyNotIncludingVBlank = %f\n", __func__, *p->StutterEfficiencyNotIncludingVBlank);
+ DML_LOG_VERBOSE("DML::%s: Z8NumberOfStutterBurstsPerFrame = %u\n", __func__, *p->Z8NumberOfStutterBurstsPerFrame);
#endif
*p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = !(!p->UnboundedRequestEnabled && (p->NumberOfActiveSurfaces == 1) && l->SinglePlaneCriticalSurface && l->SinglePipeCriticalSurface);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface);
- dml2_printf("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte);
- dml2_printf("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
+ DML_LOG_VERBOSE("DML::%s: DETBufferSizeYCriticalSurface = %u\n", __func__, l->DETBufferSizeYCriticalSurface);
+ DML_LOG_VERBOSE("DML::%s: PixelChunkSizeInKByte = %u\n", __func__, p->PixelChunkSizeInKByte);
+ DML_LOG_VERBOSE("DML::%s: DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE = %u\n", __func__, *p->DCHUBBUB_ARB_CSTATE_MAX_CAP_MODE);
#endif
}
@@ -10422,7 +10353,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
double max_uclk_mhz = 0;
double min_return_latency_in_DCFCLK_cycles = 0;
- dml2_printf("DML::%s: --- START --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: --- START --- \n", __func__);
memset(&mode_lib->scratch, 0, sizeof(struct dml2_core_internal_scratch));
memset(&mode_lib->mp, 0, sizeof(struct dml2_core_internal_mode_program));
@@ -10444,13 +10375,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
- DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].odms_used <= 4);
- DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4 ||
+ DML_ASSERT(cfg_support_info->stream_support_info[stream_index].odms_used <= 4);
+ DML_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 4 ||
cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 2 ||
cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
if (cfg_support_info->stream_support_info[stream_index].odms_used > 1)
- DML2_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
+ DML_ASSERT(cfg_support_info->stream_support_info[stream_index].num_odm_output_segments == 1);
switch (cfg_support_info->stream_support_info[stream_index].odms_used) {
case (4):
@@ -10476,51 +10407,51 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.NoOfDPP[k] = cfg_support_info->plane_support_info[k].dpps_used;
mode_lib->mp.Dppclk[k] = programming->plane_programming[k].min_clocks.dcn4x.dppclk_khz / 1000.0;
- DML2_ASSERT(mode_lib->mp.Dppclk[k] > 0);
+ DML_ASSERT(mode_lib->mp.Dppclk[k] > 0);
}
for (k = 0; k < s->num_active_planes; ++k) {
unsigned int stream_index = display_cfg->plane_descriptors[k].stream_index;
mode_lib->mp.DSCCLK[k] = programming->stream_programming[stream_index].min_clocks.dcn4x.dscclk_khz / 1000.0;
- dml2_printf("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d stream_index=%d, mode_lib->mp.DSCCLK = %f\n", __func__, k, stream_index, mode_lib->mp.DSCCLK[k]);
}
mode_lib->mp.Dispclk = programming->min_clocks.dcn4x.dispclk_khz / 1000.0;
mode_lib->mp.DCFCLKDeepSleep = programming->min_clocks.dcn4x.deepsleep_dcfclk_khz / 1000.0;
- DML2_ASSERT(mode_lib->mp.Dcfclk > 0);
- DML2_ASSERT(mode_lib->mp.FabricClock > 0);
- DML2_ASSERT(mode_lib->mp.dram_bw_mbps > 0);
- DML2_ASSERT(mode_lib->mp.uclk_freq_mhz > 0);
- DML2_ASSERT(mode_lib->mp.GlobalDPPCLK > 0);
- DML2_ASSERT(mode_lib->mp.Dispclk > 0);
- DML2_ASSERT(mode_lib->mp.DCFCLKDeepSleep > 0);
- DML2_ASSERT(s->SOCCLK > 0);
-
-#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes);
- dml2_printf("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes);
- dml2_printf("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk);
- dml2_printf("DML::%s: FabricClock = %f\n", __func__, mode_lib->mp.FabricClock);
- dml2_printf("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->mp.dram_bw_mbps);
- dml2_printf("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->mp.uclk_freq_mhz);
- dml2_printf("DML::%s: Dispclk = %f\n", __func__, mode_lib->mp.Dispclk);
+ DML_ASSERT(mode_lib->mp.Dcfclk > 0);
+ DML_ASSERT(mode_lib->mp.FabricClock > 0);
+ DML_ASSERT(mode_lib->mp.dram_bw_mbps > 0);
+ DML_ASSERT(mode_lib->mp.uclk_freq_mhz > 0);
+ DML_ASSERT(mode_lib->mp.GlobalDPPCLK > 0);
+ DML_ASSERT(mode_lib->mp.Dispclk > 0);
+ DML_ASSERT(mode_lib->mp.DCFCLKDeepSleep > 0);
+ DML_ASSERT(s->SOCCLK > 0);
+
+#ifdef __DML_VBA_DEBUG__
+ DML_LOG_VERBOSE("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes);
+ DML_LOG_VERBOSE("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes);
+ DML_LOG_VERBOSE("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk);
+ DML_LOG_VERBOSE("DML::%s: FabricClock = %f\n", __func__, mode_lib->mp.FabricClock);
+ DML_LOG_VERBOSE("DML::%s: dram_bw_mbps = %f\n", __func__, mode_lib->mp.dram_bw_mbps);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_mhz = %f\n", __func__, mode_lib->mp.uclk_freq_mhz);
+ DML_LOG_VERBOSE("DML::%s: Dispclk = %f\n", __func__, mode_lib->mp.Dispclk);
for (k = 0; k < s->num_active_planes; ++k) {
- dml2_printf("DML::%s: Dppclk[%0d] = %f\n", __func__, k, mode_lib->mp.Dppclk[k]);
- }
- dml2_printf("DML::%s: GlobalDPPCLK = %f\n", __func__, mode_lib->mp.GlobalDPPCLK);
- dml2_printf("DML::%s: DCFCLKDeepSleep = %f\n", __func__, mode_lib->mp.DCFCLKDeepSleep);
- dml2_printf("DML::%s: SOCCLK = %f\n", __func__, s->SOCCLK);
- dml2_printf("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
- dml2_printf("DML::%s: min_clk_table min_fclk_khz = %d\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz);
- dml2_printf("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config));
+ DML_LOG_VERBOSE("DML::%s: Dppclk[%0d] = %f\n", __func__, k, mode_lib->mp.Dppclk[k]);
+ }
+ DML_LOG_VERBOSE("DML::%s: GlobalDPPCLK = %f\n", __func__, mode_lib->mp.GlobalDPPCLK);
+ DML_LOG_VERBOSE("DML::%s: DCFCLKDeepSleep = %f\n", __func__, mode_lib->mp.DCFCLKDeepSleep);
+ DML_LOG_VERBOSE("DML::%s: SOCCLK = %f\n", __func__, s->SOCCLK);
+ DML_LOG_VERBOSE("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index);
+ DML_LOG_VERBOSE("DML::%s: min_clk_table min_fclk_khz = %ld\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz);
+ DML_LOG_VERBOSE("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config));
for (k = 0; k < mode_lib->mp.num_active_pipes; ++k) {
- dml2_printf("DML::%s: pipe=%d is in plane=%d\n", __func__, k, mode_lib->mp.pipe_plane[k]);
- dml2_printf("DML::%s: Per-plane DPPPerSurface[%0d] = %d\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: pipe=%d is in plane=%d\n", __func__, k, mode_lib->mp.pipe_plane[k]);
+ DML_LOG_VERBOSE("DML::%s: Per-plane DPPPerSurface[%0d] = %d\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
}
for (k = 0; k < s->num_active_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ DML_LOG_VERBOSE("DML::%s: plane_%d: reserved_vblank_time_ns = %lu\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
#endif
CalculateMaxDETAndMinCompressedBufferSize(
@@ -10617,8 +10548,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
mode_lib->mp.vactive_sw_bw_l[k] = mode_lib->mp.SwathWidthSingleDPPY[k] * mode_lib->mp.BytePerPixelY[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
mode_lib->mp.vactive_sw_bw_c[k] = mode_lib->mp.SwathWidthSingleDPPC[k] * mode_lib->mp.BytePerPixelC[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- dml2_printf("DML::%s: vactive_sw_bw_l[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: vactive_sw_bw_c[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_c[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
}
CalculateSwathAndDETConfiguration_params->display_cfg = display_cfg;
@@ -11097,7 +11028,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin);
for (k = 0; k < s->num_active_planes; ++k) {
- bool cursor_not_enough_urgent_latency_hiding = 0;
+ bool cursor_not_enough_urgent_latency_hiding = false;
s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
@@ -11173,8 +11104,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.WritebackDelay[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
- dml2_printf("DML::%s: k=%u WritebackDelay = %f\n", __func__, k, mode_lib->mp.WritebackDelay[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u WritebackDelay = %f\n", __func__, k, mode_lib->mp.WritebackDelay[k]);
#endif
}
@@ -11183,7 +11114,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->immediate_flip_required = s->immediate_flip_required || display_cfg->plane_descriptors[k].immediate_flip;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required);
+ DML_LOG_VERBOSE("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required);
#endif
if (s->num_active_planes > 1) {
@@ -11219,12 +11150,12 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->DestinationLineTimesForPrefetchLessThan2 = false;
s->VRatioPrefetchMoreThanMax = false;
- dml2_printf("DML::%s: Start one iteration of prefetch schedule evaluation\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Start one iteration of prefetch schedule evaluation\n", __func__);
for (k = 0; k < s->num_active_planes; ++k) {
struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
- dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
mode_lib->mp.TWait[k] = CalculateTWait(
display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
mode_lib->mp.UrgentLatency,
@@ -11261,7 +11192,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
myPipe->ProgressiveToInterlaceUnitInOPP = mode_lib->ip.ptoi_supported;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: Calling CalculatePrefetchSchedule for k=%u\n", __func__, k);
#endif
CalculatePrefetchSchedule_params->display_cfg = display_cfg;
CalculatePrefetchSchedule_params->HostVMInefficiencyFactor = s->HostVMInefficiencyFactorPrefetch;
@@ -11356,7 +11287,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.impacted_prefetch_margin_us[k] = 0;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
#endif
mode_lib->mp.VStartupMin[k] = s->MaxVStartupLines[k];
} // for k
@@ -11366,9 +11297,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
if (mode_lib->mp.NoTimeToPrefetch[k] == true ||
mode_lib->mp.NotEnoughTimeForDynamicMetadata[k] ||
mode_lib->mp.DSTYAfterScaler[k] > 8) {
- dml2_printf("DML::%s: k=%u, NoTimeToPrefetch = %0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
- dml2_printf("DML::%s: k=%u, NotEnoughTimeForDynamicMetadata=%u\n", __func__, k, mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]);
- dml2_printf("DML::%s: k=%u, DSTYAfterScaler=%u (should be <= 0)\n", __func__, k, mode_lib->mp.DSTYAfterScaler[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NoTimeToPrefetch = %0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughTimeForDynamicMetadata=%u\n", __func__, k, mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, DSTYAfterScaler=%u (should be <= 0)\n", __func__, k, mode_lib->mp.DSTYAfterScaler[k]);
mode_lib->mp.PrefetchModeSupported = false;
}
if (mode_lib->mp.dst_y_prefetch[k] < 2)
@@ -11377,24 +11308,24 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
if (mode_lib->mp.VRatioPrefetchY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
mode_lib->mp.VRatioPrefetchC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
s->VRatioPrefetchMoreThanMax = true;
- dml2_printf("DML::%s: k=%d, VRatioPrefetchY=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: k=%d, VRatioPrefetchC=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
- dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPrefetchY=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: k=%d, VRatioPrefetchC=%f (should not be < %f)\n", __func__, k, mode_lib->mp.VRatioPrefetchC[k], __DML2_CALCS_MAX_VRATIO_PRE__);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
}
if (mode_lib->mp.NotEnoughUrgentLatencyHiding[k]) {
- dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughUrgentLatencyHiding = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHiding[k]);
mode_lib->mp.PrefetchModeSupported = false;
}
}
if (s->VRatioPrefetchMoreThanMax == true || s->DestinationLineTimesForPrefetchLessThan2 == true) {
- dml2_printf("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
- dml2_printf("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
+ DML_LOG_VERBOSE("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
+ DML_LOG_VERBOSE("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
mode_lib->mp.PrefetchModeSupported = false;
}
- dml2_printf("DML::%s: Prefetch schedule is %sOK at vstartup = %u\n", __func__,
+ DML_LOG_VERBOSE("DML::%s: Prefetch schedule is %sOK at vstartup = %u\n", __func__,
mode_lib->mp.PrefetchModeSupported ? "" : "NOT ", CalculatePrefetchSchedule_params->VStartup);
// Prefetch schedule OK, now check prefetch bw
@@ -11422,24 +11353,24 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
&mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%0u DPPPerSurface=%u\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorLuma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLuma[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorChroma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChroma[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorLumaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLumaPre[k]);
- dml2_printf("DML::%s: k=%0u UrgentBurstFactorChromaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChromaPre[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u DPPPerSurface=%u\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorLuma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorChroma=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorLumaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorLumaPre[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u UrgentBurstFactorChromaPre=%f\n", __func__, k, mode_lib->mp.UrgentBurstFactorChromaPre[k]);
- dml2_printf("DML::%s: k=%0u VRatioPrefetchY=%f\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k]);
- dml2_printf("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
+ DML_LOG_VERBOSE("DML::%s: k=%0u VRatioPrefetchY=%f\n", __func__, k, mode_lib->mp.VRatioPrefetchY[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
- dml2_printf("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%0u vactive_sw_bw_l=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: k=%0u vactive_sw_bw_c=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
- dml2_printf("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]);
- dml2_printf("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]);
- dml2_printf("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]);
- dml2_printf("DML::%s: k=%0u RequiredPrefetchPixelDataBWLuma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]);
- dml2_printf("DML::%s: k=%0u RequiredPrefetchPixelDataBWChroma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]);
- dml2_printf("DML::%s: k=%0u prefetch_cursor_bw=%f\n", __func__, k, mode_lib->mp.prefetch_cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u vactive_sw_bw_l=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u vactive_sw_bw_c=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u RequiredPrefetchPixelDataBWLuma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u RequiredPrefetchPixelDataBWChroma=%f\n", __func__, k, mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%0u prefetch_cursor_bw=%f\n", __func__, k, mode_lib->mp.prefetch_cursor_bw[k]);
#endif
}
@@ -11503,11 +11434,11 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.urg_bandwidth_available);
if (!mode_lib->mp.PrefetchModeSupported)
- dml2_printf("DML::%s: Bandwidth not sufficient for prefetch!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Bandwidth not sufficient for prefetch!\n", __func__);
for (k = 0; k < s->num_active_planes; ++k) {
if (mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]) {
- dml2_printf("DML::%s: k=%u, NotEnoughUrgentLatencyHidingPre = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, NotEnoughUrgentLatencyHidingPre = %u\n", __func__, k, mode_lib->mp.NotEnoughUrgentLatencyHidingPre[k]);
mode_lib->mp.PrefetchModeSupported = false;
}
}
@@ -11533,12 +11464,12 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
}
mode_lib->mp.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->mp.NoOfDPP[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k = %u\n", __func__, k);
- dml2_printf("DML::%s: DPPPerSurface = %u\n", __func__, mode_lib->mp.NoOfDPP[k]);
- dml2_printf("DML::%s: vm_bytes = %u\n", __func__, mode_lib->mp.vm_bytes[k]);
- dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, mode_lib->mp.PixelPTEBytesPerRow[k]);
- dml2_printf("DML::%s: meta_row_bytes = %u\n", __func__, mode_lib->mp.meta_row_bytes[k]);
- dml2_printf("DML::%s: TotImmediateFlipBytes = %u\n", __func__, mode_lib->mp.TotImmediateFlipBytes);
+ DML_LOG_VERBOSE("DML::%s: k = %u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: DPPPerSurface = %u\n", __func__, mode_lib->mp.NoOfDPP[k]);
+ DML_LOG_VERBOSE("DML::%s: vm_bytes = %u\n", __func__, mode_lib->mp.vm_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, mode_lib->mp.PixelPTEBytesPerRow[k]);
+ DML_LOG_VERBOSE("DML::%s: meta_row_bytes = %u\n", __func__, mode_lib->mp.meta_row_bytes[k]);
+ DML_LOG_VERBOSE("DML::%s: TotImmediateFlipBytes = %u\n", __func__, mode_lib->mp.TotImmediateFlipBytes);
#endif
}
for (k = 0; k < s->num_active_planes; ++k) {
@@ -11631,13 +11562,13 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.urg_bandwidth_available);
if (!mode_lib->mp.ImmediateFlipSupported)
- dml2_printf("DML::%s: Bandwidth not sufficient for flip!", __func__);
+ DML_LOG_VERBOSE("DML::%s: Bandwidth not sufficient for flip!", __func__);
for (k = 0; k < s->num_active_planes; ++k) {
if (display_cfg->plane_descriptors[k].immediate_flip && mode_lib->mp.ImmediateFlipSupportedForPipe[k] == false) {
mode_lib->mp.ImmediateFlipSupported = false;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Pipe %0d not supporting iflip!\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: Pipe %0d not supporting iflip!\n", __func__, k);
#endif
}
}
@@ -11650,28 +11581,28 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.PrefetchAndImmediateFlipSupported = (mode_lib->mp.PrefetchModeSupported == true && (!must_support_iflip || mode_lib->mp.ImmediateFlipSupported));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: PrefetchModeSupported = %u\n", __func__, mode_lib->mp.PrefetchModeSupported);
+ DML_LOG_VERBOSE("DML::%s: PrefetchModeSupported = %u\n", __func__, mode_lib->mp.PrefetchModeSupported);
for (k = 0; k < s->num_active_planes; ++k)
- dml2_printf("DML::%s: immediate_flip_required[%u] = %u\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
- dml2_printf("DML::%s: HostVMEnable = %u\n", __func__, display_cfg->hostvm_enable);
- dml2_printf("DML::%s: ImmediateFlipSupported = %u\n", __func__, mode_lib->mp.ImmediateFlipSupported);
- dml2_printf("DML::%s: PrefetchAndImmediateFlipSupported = %u\n", __func__, mode_lib->mp.PrefetchAndImmediateFlipSupported);
+ DML_LOG_VERBOSE("DML::%s: immediate_flip_required[%u] = %u\n", __func__, k, display_cfg->plane_descriptors[k].immediate_flip);
+ DML_LOG_VERBOSE("DML::%s: HostVMEnable = %u\n", __func__, display_cfg->hostvm_enable);
+ DML_LOG_VERBOSE("DML::%s: ImmediateFlipSupported = %u\n", __func__, mode_lib->mp.ImmediateFlipSupported);
+ DML_LOG_VERBOSE("DML::%s: PrefetchAndImmediateFlipSupported = %u\n", __func__, mode_lib->mp.PrefetchAndImmediateFlipSupported);
#endif
- dml2_printf("DML::%s: Done one iteration: k=%d, MaxVStartupLines=%u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: Done one iteration: k=%d, MaxVStartupLines=%u\n", __func__, k, s->MaxVStartupLines[k]);
}
for (k = 0; k < s->num_active_planes; ++k)
- dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
if (!mode_lib->mp.PrefetchAndImmediateFlipSupported) {
- dml2_printf("DML::%s: Bad, Prefetch and flip scheduling solution NOT found!\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Bad, Prefetch and flip scheduling solution NOT found!\n", __func__);
} else {
- dml2_printf("DML::%s: Good, Prefetch and flip scheduling solution found\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: Good, Prefetch and flip scheduling solution found\n", __func__);
// DCC Configuration
for (k = 0; k < s->num_active_planes; ++k) {
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: Calculate DCC configuration for surface k=%u\n", __func__, k);
+ DML_LOG_VERBOSE("DML::%s: Calculate DCC configuration for surface k=%u\n", __func__, k);
#endif
CalculateDCCConfiguration(
display_cfg->plane_descriptors[k].surface.dcc.enable,
@@ -11780,8 +11711,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_pstate_keepout_dst_lines(display_cfg, &mode_lib->mp.Watermark, mode_lib->mp.pstate_keepout_dst_lines);
- dml2_printf("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index);
- dml2_printf("DML::%s: DEBUG PixelClock = %d kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz));
+ DML_LOG_VERBOSE("DML::%s: DEBUG stream_index = %0d\n", __func__, display_cfg->plane_descriptors[0].stream_index);
+ DML_LOG_VERBOSE("DML::%s: DEBUG PixelClock = %ld kHz\n", __func__, (display_cfg->stream_descriptors[display_cfg->plane_descriptors[0].stream_index].timing.pixel_clock_khz));
//Display Pipeline Delivery Time in Prefetch, Groups
CalculatePixelDeliveryTimes(
@@ -11893,15 +11824,15 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.TCalc + mode_lib->mp.MinTTUVBlank[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MinTTUVBlank = %f (before vstartup margin)\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlank = %f (before vstartup margin)\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
#endif
s->Tvstartup_margin = (s->MaxVStartupLines[k] - mode_lib->mp.VStartupMin[k]) * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
mode_lib->mp.MinTTUVBlank[k] = mode_lib->mp.MinTTUVBlank[k] + s->Tvstartup_margin;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, Tvstartup_margin = %f\n", __func__, k, s->Tvstartup_margin);
- dml2_printf("DML::%s: k=%u, MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
- dml2_printf("DML::%s: k=%u, MinTTUVBlank = %f\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, Tvstartup_margin = %f\n", __func__, k, s->Tvstartup_margin);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MinTTUVBlank = %f\n", __func__, k, mode_lib->mp.MinTTUVBlank[k]);
#endif
mode_lib->mp.Tdmdl[k] = mode_lib->mp.Tdmdl[k] + s->Tvstartup_margin;
@@ -11920,9 +11851,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->blank_lines_remaining = (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total - display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active) - mode_lib->mp.VStartup[k];
if (s->blank_lines_remaining < 0) {
- dml2_printf("ERROR: Vstartup is larger than vblank!?\n");
+ DML_LOG_VERBOSE("ERROR: Vstartup is larger than vblank!?\n");
s->blank_lines_remaining = 0;
- DML2_ASSERT(0);
+ DML_ASSERT(0);
}
mode_lib->mp.MIN_DST_Y_NEXT_START[k] = s->dlg_vblank_start + s->blank_lines_remaining + s->LSetup;
@@ -11936,18 +11867,18 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k] = false;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, VStartup = %u (max)\n", __func__, k, mode_lib->mp.VStartup[k]);
- dml2_printf("DML::%s: k=%u, VStartupMin = %u (max)\n", __func__, k, mode_lib->mp.VStartupMin[k]);
- dml2_printf("DML::%s: k=%u, VUpdateOffsetPix = %u\n", __func__, k, mode_lib->mp.VUpdateOffsetPix[k]);
- dml2_printf("DML::%s: k=%u, VUpdateWidthPix = %u\n", __func__, k, mode_lib->mp.VUpdateWidthPix[k]);
- dml2_printf("DML::%s: k=%u, VReadyOffsetPix = %u\n", __func__, k, mode_lib->mp.VReadyOffsetPix[k]);
- dml2_printf("DML::%s: k=%u, HTotal = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total);
- dml2_printf("DML::%s: k=%u, VTotal = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total);
- dml2_printf("DML::%s: k=%u, VActive = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active);
- dml2_printf("DML::%s: k=%u, VFrontPorch = %u\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch);
- dml2_printf("DML::%s: k=%u, TSetup = %f\n", __func__, k, mode_lib->mp.TSetup[k]);
- dml2_printf("DML::%s: k=%u, MIN_DST_Y_NEXT_START = %f\n", __func__, k, mode_lib->mp.MIN_DST_Y_NEXT_START[k]);
- dml2_printf("DML::%s: k=%u, VREADY_AT_OR_AFTER_VSYNC = %u\n", __func__, k, mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VStartup = %u (max)\n", __func__, k, mode_lib->mp.VStartup[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VStartupMin = %u (max)\n", __func__, k, mode_lib->mp.VStartupMin[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VUpdateOffsetPix = %u\n", __func__, k, mode_lib->mp.VUpdateOffsetPix[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VUpdateWidthPix = %u\n", __func__, k, mode_lib->mp.VUpdateWidthPix[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VReadyOffsetPix = %u\n", __func__, k, mode_lib->mp.VReadyOffsetPix[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, HTotal = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VTotal = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_total);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VActive = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_active);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VFrontPorch = %lu\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.v_front_porch);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TSetup = %f\n", __func__, k, mode_lib->mp.TSetup[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, MIN_DST_Y_NEXT_START = %f\n", __func__, k, mode_lib->mp.MIN_DST_Y_NEXT_START[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, VREADY_AT_OR_AFTER_VSYNC = %u\n", __func__, k, mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[k]);
#endif
}
@@ -11969,9 +11900,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.TotalDataReadBandwidth = mode_lib->mp.TotalDataReadBandwidth + mode_lib->mp.vactive_sw_bw_l[k] + mode_lib->mp.vactive_sw_bw_c[k];
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
- dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
#endif
}
@@ -12051,28 +11982,28 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
min_return_latency_in_DCFCLK_cycles = (min_return_uclk_cycles / max_uclk_mhz + min_return_fclk_cycles / max_fclk_mhz) * hard_minimum_dcfclk_mhz;
mode_lib->mp.min_return_latency_in_dcfclk = (unsigned int)min_return_latency_in_DCFCLK_cycles;
mode_lib->mp.dcfclk_deep_sleep_hysteresis = (unsigned int)math_max2(32, (double)mode_lib->ip.pixel_chunk_size_kbytes * 1024 * 3 / 4 / 64 - min_return_latency_in_DCFCLK_cycles);
- DML2_ASSERT(mode_lib->mp.dcfclk_deep_sleep_hysteresis < 256);
+ DML_ASSERT(mode_lib->mp.dcfclk_deep_sleep_hysteresis < 256);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_fclk_mhz = %f\n", __func__, max_fclk_mhz);
- dml2_printf("DML::%s: max_uclk_mhz = %f\n", __func__, max_uclk_mhz);
- dml2_printf("DML::%s: hard_minimum_dcfclk_mhz = %f\n", __func__, hard_minimum_dcfclk_mhz);
- dml2_printf("DML::%s: min_return_uclk_cycles = %d\n", __func__, min_return_uclk_cycles);
- dml2_printf("DML::%s: min_return_fclk_cycles = %d\n", __func__, min_return_fclk_cycles);
- dml2_printf("DML::%s: min_return_latency_in_DCFCLK_cycles = %f\n", __func__, min_return_latency_in_DCFCLK_cycles);
- dml2_printf("DML::%s: dcfclk_deep_sleep_hysteresis = %d \n", __func__, mode_lib->mp.dcfclk_deep_sleep_hysteresis);
- dml2_printf("DML::%s: --- END --- \n", __func__);
+ DML_LOG_VERBOSE("DML::%s: max_fclk_mhz = %f\n", __func__, max_fclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: max_uclk_mhz = %f\n", __func__, max_uclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: hard_minimum_dcfclk_mhz = %f\n", __func__, hard_minimum_dcfclk_mhz);
+ DML_LOG_VERBOSE("DML::%s: min_return_uclk_cycles = %ld\n", __func__, min_return_uclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: min_return_fclk_cycles = %ld\n", __func__, min_return_fclk_cycles);
+ DML_LOG_VERBOSE("DML::%s: min_return_latency_in_DCFCLK_cycles = %f\n", __func__, min_return_latency_in_DCFCLK_cycles);
+ DML_LOG_VERBOSE("DML::%s: dcfclk_deep_sleep_hysteresis = %d \n", __func__, mode_lib->mp.dcfclk_deep_sleep_hysteresis);
+ DML_LOG_VERBOSE("DML::%s: --- END --- \n", __func__);
#endif
return (in_out_params->mode_lib->mp.PrefetchAndImmediateFlipSupported);
}
bool dml2_core_calcs_mode_programming_ex(struct dml2_core_calcs_mode_programming_ex *in_out_params)
{
- dml2_printf("DML::%s: ------------- START ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: ------------- START ----------\n", __func__);
bool result = dml_core_mode_programming(in_out_params);
- dml2_printf("DML::%s: result = %0d\n", __func__, result);
- dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
+ DML_LOG_VERBOSE("DML::%s: result = %0d\n", __func__, result);
+ DML_LOG_VERBOSE("DML::%s: ------------- DONE ----------\n", __func__);
return result;
}
@@ -12130,16 +12061,16 @@ void dml2_core_calcs_get_dpte_row_height(
unsigned int MacroTileHeight = is_plane1 ? MacroTileHeightC : MacroTileHeightY;
unsigned int PTEBufferSizeInRequests = is_plane1 ? mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma : mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML: %s: is_plane1 = %u\n", __func__, is_plane1);
- dml2_printf("DML: %s: BytePerPixel = %u\n", __func__, BytePerPixel);
- dml2_printf("DML: %s: BlockHeight256Bytes = %u\n", __func__, BlockHeight256Bytes);
- dml2_printf("DML: %s: BlockWidth256Bytes = %u\n", __func__, BlockWidth256Bytes);
- dml2_printf("DML: %s: MacroTileWidth = %u\n", __func__, MacroTileWidth);
- dml2_printf("DML: %s: MacroTileHeight = %u\n", __func__, MacroTileHeight);
- dml2_printf("DML: %s: PTEBufferSizeInRequests = %u\n", __func__, PTEBufferSizeInRequests);
- dml2_printf("DML: %s: dpte_buffer_size_in_pte_reqs_luma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma);
- dml2_printf("DML: %s: dpte_buffer_size_in_pte_reqs_chroma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma);
- dml2_printf("DML: %s: GPUVMMinPageSizeKBytes = %u\n", __func__, GPUVMMinPageSizeKBytes);
+ DML_LOG_VERBOSE("DML: %s: is_plane1 = %u\n", __func__, is_plane1);
+ DML_LOG_VERBOSE("DML: %s: BytePerPixel = %u\n", __func__, BytePerPixel);
+ DML_LOG_VERBOSE("DML: %s: BlockHeight256Bytes = %u\n", __func__, BlockHeight256Bytes);
+ DML_LOG_VERBOSE("DML: %s: BlockWidth256Bytes = %u\n", __func__, BlockWidth256Bytes);
+ DML_LOG_VERBOSE("DML: %s: MacroTileWidth = %u\n", __func__, MacroTileWidth);
+ DML_LOG_VERBOSE("DML: %s: MacroTileHeight = %u\n", __func__, MacroTileHeight);
+ DML_LOG_VERBOSE("DML: %s: PTEBufferSizeInRequests = %u\n", __func__, PTEBufferSizeInRequests);
+ DML_LOG_VERBOSE("DML: %s: dpte_buffer_size_in_pte_reqs_luma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma);
+ DML_LOG_VERBOSE("DML: %s: dpte_buffer_size_in_pte_reqs_chroma = %u\n", __func__, mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma);
+ DML_LOG_VERBOSE("DML: %s: GPUVMMinPageSizeKBytes = %u\n", __func__, GPUVMMinPageSizeKBytes);
#endif
unsigned int dummy_integer[21];
@@ -12193,16 +12124,16 @@ void dml2_core_calcs_get_dpte_row_height(
CalculateVMAndRowBytes(&mode_lib->scratch.calculate_vm_and_row_bytes_params);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML: %s: dpte_row_height = %u\n", __func__, *dpte_row_height);
+ DML_LOG_VERBOSE("DML: %s: dpte_row_height = %u\n", __func__, *dpte_row_height);
#endif
}
static bool is_dual_plane(enum dml2_source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
if ((source_format == dml2_420_12) || (source_format == dml2_420_8) || (source_format == dml2_420_10) || (source_format == dml2_rgbe_alpha))
- ret_val = 1;
+ ret_val = true;
return ret_val;
}
@@ -12220,6 +12151,8 @@ static void rq_dlg_get_wm_regs(const struct dml2_display_cfg *display_cfg, const
wm_regs->fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
wm_regs->sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
wm_regs->sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ wm_regs->sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ wm_regs->sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
wm_regs->temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
wm_regs->uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
wm_regs->urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
@@ -12246,11 +12179,11 @@ void dml2_core_calcs_cursor_dlg_reg(struct dml2_cursor_dlg_regs *cursor_dlg_regs
cursor_dlg_regs->dst_x_offset = (unsigned int) ((dst_x_offset > 0) ? dst_x_offset : 0);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG::%s: cursor_x_position=%d\n", __func__, p->cursor_x_position);
- dml2_printf("DML_DLG::%s: dlg_refclk_mhz=%f\n", __func__, p->dlg_refclk_mhz);
- dml2_printf("DML_DLG::%s: pixel_rate_mhz=%f\n", __func__, p->pixel_rate_mhz);
- dml2_printf("DML_DLG::%s: dst_x_offset=%d\n", __func__, dst_x_offset);
- dml2_printf("DML_DLG::%s: dst_x_offset=%d (reg)\n", __func__, cursor_dlg_regs->dst_x_offset);
+ DML_LOG_VERBOSE("DML_DLG::%s: cursor_x_position=%d\n", __func__, p->cursor_x_position);
+ DML_LOG_VERBOSE("DML_DLG::%s: dlg_refclk_mhz=%f\n", __func__, p->dlg_refclk_mhz);
+ DML_LOG_VERBOSE("DML_DLG::%s: pixel_rate_mhz=%f\n", __func__, p->pixel_rate_mhz);
+ DML_LOG_VERBOSE("DML_DLG::%s: dst_x_offset=%d\n", __func__, dst_x_offset);
+ DML_LOG_VERBOSE("DML_DLG::%s: dst_x_offset=%d (reg)\n", __func__, cursor_dlg_regs->dst_x_offset);
#endif
cursor_dlg_regs->chunk_hdl_adjust = 3;
@@ -12286,7 +12219,7 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
double stored_swath_c_bytes;
bool is_phantom_pipe;
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] start\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] start\n", __func__, pipe_idx);
pixel_chunk_bytes = (unsigned int)(mode_lib->ip.pixel_chunk_size_kbytes * 1024);
min_pixel_chunk_bytes = (unsigned int)(mode_lib->ip.min_pixel_chunk_size_bytes);
@@ -12329,19 +12262,19 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
if (sw_mode == dml2_sw_linear && display_cfg->gpuvm_enable) {
unsigned int p0_pte_row_height_linear = (unsigned int)(dml_get_dpte_row_height_linear_l(mode_lib, pipe_idx));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: p0_pte_row_height_linear = %u\n", __func__, p0_pte_row_height_linear);
+ DML_LOG_VERBOSE("DML_DLG: %s: p0_pte_row_height_linear = %u\n", __func__, p0_pte_row_height_linear);
#endif
- DML2_ASSERT(p0_pte_row_height_linear >= 8);
+ DML_ASSERT(p0_pte_row_height_linear >= 8);
rq_regs->rq_regs_l.pte_row_height_linear = math_log2_approx(p0_pte_row_height_linear) - 3;
if (dual_plane) {
unsigned int p1_pte_row_height_linear = (unsigned int)(dml_get_dpte_row_height_linear_c(mode_lib, pipe_idx));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: p1_pte_row_height_linear = %u\n", __func__, p1_pte_row_height_linear);
+ DML_LOG_VERBOSE("DML_DLG: %s: p1_pte_row_height_linear = %u\n", __func__, p1_pte_row_height_linear);
#endif
if (sw_mode == dml2_sw_linear) {
- DML2_ASSERT(p1_pte_row_height_linear >= 8);
+ DML_ASSERT(p1_pte_row_height_linear >= 8);
}
rq_regs->rq_regs_c.pte_row_height_linear = math_log2_approx(p1_pte_row_height_linear) - 3;
}
@@ -12375,12 +12308,12 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
if (stored_swath_l_bytes / stored_swath_c_bytes <= 1.5) {
detile_buf_plane1_addr = (unsigned int)(detile_buf_size_in_bytes / 2.0 / 1024.0); // half to chroma
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
#endif
} else {
detile_buf_plane1_addr = (unsigned int)(dml_round_to_multiple((unsigned int)((2.0 * detile_buf_size_in_bytes) / 3.0), 1024, 0) / 1024.0); // 2/3 to luma
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d (1/3 chroma)\n", __func__, detile_buf_plane1_addr);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d (1/3 chroma)\n", __func__, detile_buf_plane1_addr);
#endif
}
}
@@ -12388,15 +12321,15 @@ static void rq_dlg_get_rq_reg(struct dml2_display_rq_regs *rq_regs,
rq_regs->plane1_base_address = detile_buf_plane1_addr;
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML_DLG: %s: is_phantom_pipe = %d\n", __func__, is_phantom_pipe);
- dml2_printf("DML_DLG: %s: stored_swath_l_bytes = %f\n", __func__, stored_swath_l_bytes);
- dml2_printf("DML_DLG: %s: stored_swath_c_bytes = %f\n", __func__, stored_swath_c_bytes);
- dml2_printf("DML_DLG: %s: detile_buf_size_in_bytes = %d\n", __func__, detile_buf_size_in_bytes);
- dml2_printf("DML_DLG: %s: detile_buf_plane1_addr = %d\n", __func__, detile_buf_plane1_addr);
- dml2_printf("DML_DLG: %s: plane1_base_address = %d\n", __func__, rq_regs->plane1_base_address);
+ DML_LOG_VERBOSE("DML_DLG: %s: is_phantom_pipe = %d\n", __func__, is_phantom_pipe);
+ DML_LOG_VERBOSE("DML_DLG: %s: stored_swath_l_bytes = %f\n", __func__, stored_swath_l_bytes);
+ DML_LOG_VERBOSE("DML_DLG: %s: stored_swath_c_bytes = %f\n", __func__, stored_swath_c_bytes);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_size_in_bytes = %d\n", __func__, detile_buf_size_in_bytes);
+ DML_LOG_VERBOSE("DML_DLG: %s: detile_buf_plane1_addr = %d\n", __func__, detile_buf_plane1_addr);
+ DML_LOG_VERBOSE("DML_DLG: %s: plane1_base_address = %d\n", __func__, rq_regs->plane1_base_address);
#endif
- //dml2_printf_rq_regs_st(rq_regs);
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
+ //DML_LOG_VERBOSE_rq_regs_st(rq_regs);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
}
static void rq_dlg_get_dlg_reg(
@@ -12411,10 +12344,10 @@ static void rq_dlg_get_dlg_reg(
memset(l, 0, sizeof(struct dml2_core_shared_rq_dlg_get_dlg_reg_locals));
- dml2_printf("DML_DLG::%s: Calculation for pipe_idx=%d\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe_idx=%d\n", __func__, pipe_idx);
l->plane_idx = dml_get_plane_idx(mode_lib, pipe_idx);
- DML2_ASSERT(l->plane_idx < DML2_MAX_PLANES);
+ DML_ASSERT(l->plane_idx < DML2_MAX_PLANES);
l->source_format = dml2_444_8;
l->odm_mode = dml2_odm_mode_bypass;
@@ -12444,18 +12377,18 @@ static void rq_dlg_get_dlg_reg(
l->pclk_freq_in_mhz = (double)l->timing->pixel_clock_khz / 1000;
l->ref_freq_to_pix_freq = l->refclk_freq_in_mhz / l->pclk_freq_in_mhz;
- dml2_printf("DML_DLG::%s: plane_idx = %d\n", __func__, l->plane_idx);
- dml2_printf("DML_DLG: %s: htotal = %d\n", __func__, l->htotal);
- dml2_printf("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, l->refclk_freq_in_mhz);
- dml2_printf("DML_DLG: %s: dlg_ref_clk_mhz = %3.2f\n", __func__, display_cfg->overrides.hw.dlg_ref_clk_mhz);
- dml2_printf("DML_DLG: %s: soc.refclk_mhz = %3.2f\n", __func__, mode_lib->soc.dchub_refclk_mhz);
- dml2_printf("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, l->pclk_freq_in_mhz);
- dml2_printf("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
- dml2_printf("DML_DLG: %s: interlaced = %d\n", __func__, l->interlaced);
+ DML_LOG_VERBOSE("DML_DLG::%s: plane_idx = %d\n", __func__, l->plane_idx);
+ DML_LOG_VERBOSE("DML_DLG: %s: htotal = %d\n", __func__, l->htotal);
+ DML_LOG_VERBOSE("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, l->refclk_freq_in_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: dlg_ref_clk_mhz = %3.2f\n", __func__, display_cfg->overrides.hw.dlg_ref_clk_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: soc.refclk_mhz = %d\n", __func__, mode_lib->soc.dchub_refclk_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, l->pclk_freq_in_mhz);
+ DML_LOG_VERBOSE("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
+ DML_LOG_VERBOSE("DML_DLG: %s: interlaced = %d\n", __func__, l->interlaced);
- DML2_ASSERT(l->refclk_freq_in_mhz != 0);
- DML2_ASSERT(l->pclk_freq_in_mhz != 0);
- DML2_ASSERT(l->ref_freq_to_pix_freq < 4.0);
+ DML_ASSERT(l->refclk_freq_in_mhz != 0);
+ DML_ASSERT(l->pclk_freq_in_mhz != 0);
+ DML_ASSERT(l->ref_freq_to_pix_freq < 4.0);
// Need to figure out which side of odm combine we're in
// Assume the pipe instance under the same plane is in order
@@ -12484,14 +12417,14 @@ static void rq_dlg_get_dlg_reg(
l->pipe_idx_in_combine = pipe_idx - l->first_pipe_idx_in_plane; // DML assumes the pipes in the same plane will have continuous indexing (i.e. plane 0 use pipe 0, 1, and plane 1 uses pipe 2, 3, etc.)
disp_dlg_regs->refcyc_h_blank_end = (unsigned int)(((double)l->hblank_end + (double)l->pipe_idx_in_combine * (double)l->hactive / (double)l->odm_combine_factor) * l->ref_freq_to_pix_freq);
- dml2_printf("DML_DLG: %s: pipe_idx = %d\n", __func__, pipe_idx);
- dml2_printf("DML_DLG: %s: first_pipe_idx_in_plane = %d\n", __func__, l->first_pipe_idx_in_plane);
- dml2_printf("DML_DLG: %s: pipe_idx_in_combine = %d\n", __func__, l->pipe_idx_in_combine);
- dml2_printf("DML_DLG: %s: odm_combine_factor = %d\n", __func__, l->odm_combine_factor);
+ DML_LOG_VERBOSE("DML_DLG: %s: pipe_idx = %d\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG: %s: first_pipe_idx_in_plane = %d\n", __func__, l->first_pipe_idx_in_plane);
+ DML_LOG_VERBOSE("DML_DLG: %s: pipe_idx_in_combine = %d\n", __func__, l->pipe_idx_in_combine);
+ DML_LOG_VERBOSE("DML_DLG: %s: odm_combine_factor = %d\n", __func__, l->odm_combine_factor);
}
- dml2_printf("DML_DLG: %s: refcyc_h_blank_end = %d\n", __func__, disp_dlg_regs->refcyc_h_blank_end);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_h_blank_end = %d\n", __func__, disp_dlg_regs->refcyc_h_blank_end);
- DML2_ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)math_pow(2, 13));
disp_dlg_regs->ref_freq_to_pix_freq = (unsigned int)(l->ref_freq_to_pix_freq * math_pow(2, 19));
disp_dlg_regs->refcyc_per_htotal = (unsigned int)(l->ref_freq_to_pix_freq * (double)l->htotal * math_pow(2, 8));
@@ -12500,20 +12433,20 @@ static void rq_dlg_get_dlg_reg(
l->min_ttu_vblank = mode_lib->mp.MinTTUVBlank[mode_lib->mp.pipe_plane[pipe_idx]];
l->min_dst_y_next_start = (unsigned int)(mode_lib->mp.MIN_DST_Y_NEXT_START[mode_lib->mp.pipe_plane[pipe_idx]]);
- dml2_printf("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, l->min_ttu_vblank);
- dml2_printf("DML_DLG: %s: min_dst_y_next_start = %d\n", __func__, l->min_dst_y_next_start);
- dml2_printf("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
+ DML_LOG_VERBOSE("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, l->min_ttu_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: min_dst_y_next_start = %d\n", __func__, l->min_dst_y_next_start);
+ DML_LOG_VERBOSE("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", __func__, l->ref_freq_to_pix_freq);
l->vready_after_vcount0 = (unsigned int)(mode_lib->mp.VREADY_AT_OR_AFTER_VSYNC[mode_lib->mp.pipe_plane[pipe_idx]]);
disp_dlg_regs->vready_after_vcount0 = l->vready_after_vcount0;
- dml2_printf("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0);
+ DML_LOG_VERBOSE("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, disp_dlg_regs->vready_after_vcount0);
l->dst_x_after_scaler = (unsigned int)(mode_lib->mp.DSTXAfterScaler[mode_lib->mp.pipe_plane[pipe_idx]]);
l->dst_y_after_scaler = (unsigned int)(mode_lib->mp.DSTYAfterScaler[mode_lib->mp.pipe_plane[pipe_idx]]);
- dml2_printf("DML_DLG: %s: dst_x_after_scaler = %d\n", __func__, l->dst_x_after_scaler);
- dml2_printf("DML_DLG: %s: dst_y_after_scaler = %d\n", __func__, l->dst_y_after_scaler);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_x_after_scaler = %d\n", __func__, l->dst_x_after_scaler);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_after_scaler = %d\n", __func__, l->dst_y_after_scaler);
l->dst_y_prefetch = mode_lib->mp.dst_y_prefetch[mode_lib->mp.pipe_plane[pipe_idx]];
l->dst_y_per_vm_vblank = mode_lib->mp.dst_y_per_vm_vblank[mode_lib->mp.pipe_plane[pipe_idx]];
@@ -12521,28 +12454,28 @@ static void rq_dlg_get_dlg_reg(
l->dst_y_per_vm_flip = mode_lib->mp.dst_y_per_vm_flip[mode_lib->mp.pipe_plane[pipe_idx]];
l->dst_y_per_row_flip = mode_lib->mp.dst_y_per_row_flip[mode_lib->mp.pipe_plane[pipe_idx]];
- dml2_printf("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, l->dst_y_prefetch);
- dml2_printf("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, l->dst_y_per_vm_flip);
- dml2_printf("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, l->dst_y_per_row_flip);
- dml2_printf("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, l->dst_y_per_vm_vblank);
- dml2_printf("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, l->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, l->dst_y_prefetch);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, l->dst_y_per_vm_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, l->dst_y_per_row_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, l->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, l->dst_y_per_row_vblank);
if (l->dst_y_prefetch > 0 && l->dst_y_per_vm_vblank > 0 && l->dst_y_per_row_vblank > 0) {
- DML2_ASSERT(l->dst_y_prefetch > (l->dst_y_per_vm_vblank + l->dst_y_per_row_vblank));
+ DML_ASSERT(l->dst_y_prefetch > (l->dst_y_per_vm_vblank + l->dst_y_per_row_vblank));
}
l->vratio_pre_l = mode_lib->mp.VRatioPrefetchY[mode_lib->mp.pipe_plane[pipe_idx]];
l->vratio_pre_c = mode_lib->mp.VRatioPrefetchC[mode_lib->mp.pipe_plane[pipe_idx]];
- dml2_printf("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, l->vratio_pre_l);
- dml2_printf("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, l->vratio_pre_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: vratio_pre_l = %3.2f\n", __func__, l->vratio_pre_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, l->vratio_pre_c);
// Active
l->refcyc_per_line_delivery_pre_l = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_line_delivery_l = mode_lib->mp.DisplayPipeLineDeliveryTimeLuma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_l);
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", __func__, l->refcyc_per_line_delivery_l);
l->refcyc_per_line_delivery_pre_c = 0.0;
l->refcyc_per_line_delivery_c = 0.0;
@@ -12551,8 +12484,8 @@ static void rq_dlg_get_dlg_reg(
l->refcyc_per_line_delivery_pre_c = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_line_delivery_c = mode_lib->mp.DisplayPipeLineDeliveryTimeChroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_c);
- dml2_printf("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_pre_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", __func__, l->refcyc_per_line_delivery_c);
}
disp_dlg_regs->refcyc_per_vm_dmdata = (unsigned int)(mode_lib->mp.Tdmdl_vm[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
@@ -12561,8 +12494,8 @@ static void rq_dlg_get_dlg_reg(
l->refcyc_per_req_delivery_pre_l = mode_lib->mp.DisplayPipeRequestDeliveryTimeLumaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_req_delivery_l = mode_lib->mp.DisplayPipeRequestDeliveryTimeLuma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_l);
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_l);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", __func__, l->refcyc_per_req_delivery_l);
l->refcyc_per_req_delivery_pre_c = 0.0;
l->refcyc_per_req_delivery_c = 0.0;
@@ -12570,16 +12503,16 @@ static void rq_dlg_get_dlg_reg(
l->refcyc_per_req_delivery_pre_c = mode_lib->mp.DisplayPipeRequestDeliveryTimeChromaPrefetch[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
l->refcyc_per_req_delivery_c = mode_lib->mp.DisplayPipeRequestDeliveryTimeChroma[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz;
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_c);
- dml2_printf("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_pre_c);
+ DML_LOG_VERBOSE("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", __func__, l->refcyc_per_req_delivery_c);
}
// TTU - Cursor
- DML2_ASSERT(display_cfg->plane_descriptors[l->plane_idx].cursor.num_cursors <= 1);
+ DML_ASSERT(display_cfg->plane_descriptors[l->plane_idx].cursor.num_cursors <= 1);
// Assign to register structures
disp_dlg_regs->min_dst_y_next_start = (unsigned int)((double)l->min_dst_y_next_start * math_pow(2, 2));
- DML2_ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)math_pow(2, 18));
+ DML_ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)math_pow(2, 18));
disp_dlg_regs->dst_y_after_scaler = l->dst_y_after_scaler; // in terms of line
disp_dlg_regs->refcyc_x_after_scaler = (unsigned int)((double)l->dst_x_after_scaler * l->ref_freq_to_pix_freq); // in terms of refclk
@@ -12592,10 +12525,10 @@ static void rq_dlg_get_dlg_reg(
disp_dlg_regs->vratio_prefetch = (unsigned int)(l->vratio_pre_l * math_pow(2, 19));
disp_dlg_regs->vratio_prefetch_c = (unsigned int)(l->vratio_pre_c * math_pow(2, 19));
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
- dml2_printf("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
+ DML_LOG_VERBOSE("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
disp_dlg_regs->refcyc_per_vm_group_vblank = (unsigned int)(mode_lib->mp.TimePerVMGroupVBlank[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
disp_dlg_regs->refcyc_per_vm_group_flip = (unsigned int)(mode_lib->mp.TimePerVMGroupFlip[mode_lib->mp.pipe_plane[pipe_idx]] * l->refclk_freq_in_mhz);
@@ -12662,11 +12595,11 @@ static void rq_dlg_get_dlg_reg(
disp_ttu_regs->qos_ramp_disable_c = 0;
disp_ttu_regs->min_ttu_vblank = (unsigned int)(l->min_ttu_vblank * l->refclk_freq_in_mhz);
- // CHECK for HW registers' range, DML2_ASSERT or clamp
- DML2_ASSERT(l->refcyc_per_req_delivery_pre_l < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_l < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_pre_c < math_pow(2, 13));
- DML2_ASSERT(l->refcyc_per_req_delivery_c < math_pow(2, 13));
+ // CHECK for HW registers' range, DML_ASSERT or clamp
+ DML_ASSERT(l->refcyc_per_req_delivery_pre_l < math_pow(2, 13));
+ DML_ASSERT(l->refcyc_per_req_delivery_l < math_pow(2, 13));
+ DML_ASSERT(l->refcyc_per_req_delivery_pre_c < math_pow(2, 13));
+ DML_ASSERT(l->refcyc_per_req_delivery_c < math_pow(2, 13));
if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)math_pow(2, 23))
disp_dlg_regs->refcyc_per_vm_group_vblank = (unsigned int)(math_pow(2, 23) - 1);
@@ -12680,16 +12613,16 @@ static void rq_dlg_get_dlg_reg(
disp_dlg_regs->refcyc_per_vm_req_flip = (unsigned int)(math_pow(2, 23) - 1);
- DML2_ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
- DML2_ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
+ DML_ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)math_pow(2, 13));
if (disp_dlg_regs->dst_y_per_pte_row_nom_l >= (unsigned int)math_pow(2, 17)) {
- dml2_printf("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_L %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_l, (unsigned int)math_pow(2, 17) - 1);
+ DML_LOG_VERBOSE("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_L %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_l, (unsigned int)math_pow(2, 17) - 1);
l->dst_y_per_pte_row_nom_l = (unsigned int)math_pow(2, 17) - 1;
}
if (l->dual_plane) {
if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int)math_pow(2, 17)) {
- dml2_printf("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_C %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_c, (unsigned int)math_pow(2, 17) - 1);
+ DML_LOG_VERBOSE("DML_DLG: %s: Warning DST_Y_PER_PTE_ROW_NOM_C %u > register max U15.2 %u, clamp to max\n", __func__, disp_dlg_regs->dst_y_per_pte_row_nom_c, (unsigned int)math_pow(2, 17) - 1);
l->dst_y_per_pte_row_nom_c = (unsigned int)math_pow(2, 17) - 1;
}
}
@@ -12700,20 +12633,20 @@ static void rq_dlg_get_dlg_reg(
if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int)math_pow(2, 23))
disp_dlg_regs->refcyc_per_pte_group_nom_c = (unsigned int)(math_pow(2, 23) - 1);
}
- DML2_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)math_pow(2, 13));
if (l->dual_plane) {
- DML2_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int)math_pow(2, 13));
}
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)math_pow(2, 13));
- DML2_ASSERT(disp_ttu_regs->qos_level_low_wm < (unsigned int)math_pow(2, 14));
- DML2_ASSERT(disp_ttu_regs->qos_level_high_wm < (unsigned int)math_pow(2, 14));
- DML2_ASSERT(disp_ttu_regs->min_ttu_vblank < (unsigned int)math_pow(2, 24));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)math_pow(2, 13));
+ DML_ASSERT(disp_ttu_regs->qos_level_low_wm < (unsigned int)math_pow(2, 14));
+ DML_ASSERT(disp_ttu_regs->qos_level_high_wm < (unsigned int)math_pow(2, 14));
+ DML_ASSERT(disp_ttu_regs->min_ttu_vblank < (unsigned int)math_pow(2, 24));
- dml2_printf("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
+ DML_LOG_VERBOSE("DML_DLG::%s: Calculation for pipe[%d] done\n", __func__, pipe_idx);
}
}
@@ -12736,11 +12669,11 @@ static void rq_dlg_get_arb_params(const struct dml2_display_cfg *display_cfg, co
arb_param->pstate_stall_threshold = (unsigned int)(mode_lib->ip_caps.fams2.max_allow_delay_us * refclk_freq_in_mhz);
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding);
- dml2_printf("DML::%s: sdpif_request_rate_limit = %d\n", __func__, arb_param->sdpif_request_rate_limit);
- dml2_printf("DML::%s: compbuf_reserved_space_kbytes = %d\n", __func__, arb_param->compbuf_reserved_space_kbytes);
- dml2_printf("DML::%s: allow_sdpif_rate_limit_when_cstate_req = %d\n", __func__, arb_param->allow_sdpif_rate_limit_when_cstate_req);
- dml2_printf("DML::%s: dcfclk_deep_sleep_hysteresis = %d\n", __func__, arb_param->dcfclk_deep_sleep_hysteresis);
+ DML_LOG_VERBOSE("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding);
+ DML_LOG_VERBOSE("DML::%s: sdpif_request_rate_limit = %d\n", __func__, arb_param->sdpif_request_rate_limit);
+ DML_LOG_VERBOSE("DML::%s: compbuf_reserved_space_kbytes = %d\n", __func__, arb_param->compbuf_reserved_space_kbytes);
+ DML_LOG_VERBOSE("DML::%s: allow_sdpif_rate_limit_when_cstate_req = %d\n", __func__, arb_param->allow_sdpif_rate_limit_when_cstate_req);
+ DML_LOG_VERBOSE("DML::%s: dcfclk_deep_sleep_hysteresis = %d\n", __func__, arb_param->dcfclk_deep_sleep_hysteresis);
#endif
}
@@ -13013,10 +12946,10 @@ void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *disp
out->vblank_reserved_time_us = display_cfg->plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000;
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: subvp_fw_processing_delay_us = %d\n", __func__, mode_lib->ip.subvp_fw_processing_delay_us);
- dml2_printf("DML::%s: subvp_pstate_allow_width_us = %d\n", __func__, mode_lib->ip.subvp_pstate_allow_width_us);
- dml2_printf("DML::%s: subvp_swath_height_margin_lines = %d\n", __func__, mode_lib->ip.subvp_swath_height_margin_lines);
- dml2_printf("DML::%s: vblank_reserved_time_us = %f\n", __func__, out->vblank_reserved_time_us);
+ DML_LOG_VERBOSE("DML::%s: subvp_fw_processing_delay_us = %d\n", __func__, mode_lib->ip.subvp_fw_processing_delay_us);
+ DML_LOG_VERBOSE("DML::%s: subvp_pstate_allow_width_us = %d\n", __func__, mode_lib->ip.subvp_pstate_allow_width_us);
+ DML_LOG_VERBOSE("DML::%s: subvp_swath_height_margin_lines = %d\n", __func__, mode_lib->ip.subvp_swath_height_margin_lines);
+ DML_LOG_VERBOSE("DML::%s: vblank_reserved_time_us = %u\n", __func__, out->vblank_reserved_time_us);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index 4e502f0a6d20..bdee6ad7bc59 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -1078,6 +1078,8 @@ struct dml2_core_calcs_mode_programming_locals {
enum dml2_source_format_class pixel_format[DML2_MAX_PLANES];
unsigned int lb_source_lines_l[DML2_MAX_PLANES];
unsigned int lb_source_lines_c[DML2_MAX_PLANES];
+ unsigned int num_dsc_slices[DML2_MAX_PLANES];
+ bool dsc_enable[DML2_MAX_PLANES];
};
struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
index 2504d9c2ec34..7a220c0141c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
@@ -82,7 +82,7 @@ bool dml2_core_utils_is_420(enum dml2_source_format_class source_format)
val = 0;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -145,7 +145,7 @@ bool dml2_core_utils_is_422_planar(enum dml2_source_format_class source_format)
val = 0;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -208,7 +208,7 @@ bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format)
val = 1;
break;
default:
- DML2_ASSERT(0);
+ DML_ASSERT(0);
break;
}
return val;
@@ -216,104 +216,104 @@ bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format)
void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only)
{
- dml2_printf("DML: ===================================== \n");
- dml2_printf("DML: DML_MODE_SUPPORT_INFO_ST\n");
+ DML_LOG_VERBOSE("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: DML_MODE_SUPPORT_INFO_ST\n");
if (!fail_only || support->ScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: ScaleRatioAndTapsSupport = %d\n", support->ScaleRatioAndTapsSupport);
if (!fail_only || support->SourceFormatPixelAndScanSupport == 0)
- dml2_printf("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
+ DML_LOG_VERBOSE("DML: support: SourceFormatPixelAndScanSupport = %d\n", support->SourceFormatPixelAndScanSupport);
if (!fail_only || support->ViewportSizeSupport == 0)
- dml2_printf("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
+ DML_LOG_VERBOSE("DML: support: ViewportSizeSupport = %d\n", support->ViewportSizeSupport);
if (!fail_only || support->LinkRateDoesNotMatchDPVersion == 1)
- dml2_printf("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
+ DML_LOG_VERBOSE("DML: support: LinkRateDoesNotMatchDPVersion = %d\n", support->LinkRateDoesNotMatchDPVersion);
if (!fail_only || support->LinkRateForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: LinkRateForMultistreamNotIndicated = %d\n", support->LinkRateForMultistreamNotIndicated);
if (!fail_only || support->BPPForMultistreamNotIndicated == 1)
- dml2_printf("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
+ DML_LOG_VERBOSE("DML: support: BPPForMultistreamNotIndicated = %d\n", support->BPPForMultistreamNotIndicated);
if (!fail_only || support->MultistreamWithHDMIOreDP == 1)
- dml2_printf("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
+ DML_LOG_VERBOSE("DML: support: MultistreamWithHDMIOreDP = %d\n", support->MultistreamWithHDMIOreDP);
if (!fail_only || support->ExceededMultistreamSlots == 1)
- dml2_printf("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
+ DML_LOG_VERBOSE("DML: support: ExceededMultistreamSlots = %d\n", support->ExceededMultistreamSlots);
if (!fail_only || support->MSOOrODMSplitWithNonDPLink == 1)
- dml2_printf("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
+ DML_LOG_VERBOSE("DML: support: MSOOrODMSplitWithNonDPLink = %d\n", support->MSOOrODMSplitWithNonDPLink);
if (!fail_only || support->NotEnoughLanesForMSO == 1)
- dml2_printf("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
+ DML_LOG_VERBOSE("DML: support: NotEnoughLanesForMSO = %d\n", support->NotEnoughLanesForMSO);
if (!fail_only || support->P2IWith420 == 1)
- dml2_printf("DML: support: P2IWith420 = %d\n", support->P2IWith420);
+ DML_LOG_VERBOSE("DML: support: P2IWith420 = %d\n", support->P2IWith420);
if (!fail_only || support->DSC422NativeNotSupported == 1)
- dml2_printf("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
+ DML_LOG_VERBOSE("DML: support: DSC422NativeNotSupported = %d\n", support->DSC422NativeNotSupported);
if (!fail_only || support->DSCSlicesODMModeSupported == 0)
- dml2_printf("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
+ DML_LOG_VERBOSE("DML: support: DSCSlicesODMModeSupported = %d\n", support->DSCSlicesODMModeSupported);
if (!fail_only || support->NotEnoughDSCUnits == 1)
- dml2_printf("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCUnits = %d\n", support->NotEnoughDSCUnits);
if (!fail_only || support->NotEnoughDSCSlices == 1)
- dml2_printf("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
+ DML_LOG_VERBOSE("DML: support: NotEnoughDSCSlices = %d\n", support->NotEnoughDSCSlices);
if (!fail_only || support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe == 1)
- dml2_printf("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe = %d\n", support->ImmediateFlipOrHostVMAndPStateWithMALLFullFrameOrPhantomPipe);
if (!fail_only || support->InvalidCombinationOfMALLUseForPStateAndStaticScreen == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPStateAndStaticScreen = %d\n", support->InvalidCombinationOfMALLUseForPStateAndStaticScreen);
if (!fail_only || support->DSCCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DSCCLKRequiredMoreThanSupported = %d\n", support->DSCCLKRequiredMoreThanSupported);
if (!fail_only || support->PixelsPerLinePerDSCUnitSupport == 0)
- dml2_printf("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
+ DML_LOG_VERBOSE("DML: support: PixelsPerLinePerDSCUnitSupport = %d\n", support->PixelsPerLinePerDSCUnitSupport);
if (!fail_only || support->DTBCLKRequiredMoreThanSupported == 1)
- dml2_printf("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
+ DML_LOG_VERBOSE("DML: support: DTBCLKRequiredMoreThanSupported = %d\n", support->DTBCLKRequiredMoreThanSupported);
if (!fail_only || support->InvalidCombinationOfMALLUseForPState == 1)
- dml2_printf("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
+ DML_LOG_VERBOSE("DML: support: InvalidCombinationOfMALLUseForPState = %d\n", support->InvalidCombinationOfMALLUseForPState);
if (!fail_only || support->ROBSupport == 0)
- dml2_printf("DML: support: ROBSupport = %d\n", support->ROBSupport);
+ DML_LOG_VERBOSE("DML: support: ROBSupport = %d\n", support->ROBSupport);
if (!fail_only || support->OutstandingRequestsSupport == 0)
- dml2_printf("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsSupport = %d\n", support->OutstandingRequestsSupport);
if (!fail_only || support->OutstandingRequestsUrgencyAvoidance == 0)
- dml2_printf("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
+ DML_LOG_VERBOSE("DML: support: OutstandingRequestsUrgencyAvoidance = %d\n", support->OutstandingRequestsUrgencyAvoidance);
if (!fail_only || support->DISPCLK_DPPCLK_Support == 0)
- dml2_printf("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
+ DML_LOG_VERBOSE("DML: support: DISPCLK_DPPCLK_Support = %d\n", support->DISPCLK_DPPCLK_Support);
if (!fail_only || support->TotalAvailablePipesSupport == 0)
- dml2_printf("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
+ DML_LOG_VERBOSE("DML: support: TotalAvailablePipesSupport = %d\n", support->TotalAvailablePipesSupport);
if (!fail_only || support->NumberOfOTGSupport == 0)
- dml2_printf("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfOTGSupport = %d\n", support->NumberOfOTGSupport);
if (!fail_only || support->NumberOfHDMIFRLSupport == 0)
- dml2_printf("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
+ DML_LOG_VERBOSE("DML: support: NumberOfHDMIFRLSupport = %d\n", support->NumberOfHDMIFRLSupport);
if (!fail_only || support->NumberOfDP2p0Support == 0)
- dml2_printf("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
+ DML_LOG_VERBOSE("DML: support: NumberOfDP2p0Support = %d\n", support->NumberOfDP2p0Support);
if (!fail_only || support->EnoughWritebackUnits == 0)
- dml2_printf("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
+ DML_LOG_VERBOSE("DML: support: EnoughWritebackUnits = %d\n", support->EnoughWritebackUnits);
if (!fail_only || support->WritebackScaleRatioAndTapsSupport == 0)
- dml2_printf("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
+ DML_LOG_VERBOSE("DML: support: WritebackScaleRatioAndTapsSupport = %d\n", support->WritebackScaleRatioAndTapsSupport);
if (!fail_only || support->WritebackLatencySupport == 0)
- dml2_printf("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
+ DML_LOG_VERBOSE("DML: support: WritebackLatencySupport = %d\n", support->WritebackLatencySupport);
if (!fail_only || support->CursorSupport == 0)
- dml2_printf("DML: support: CursorSupport = %d\n", support->CursorSupport);
+ DML_LOG_VERBOSE("DML: support: CursorSupport = %d\n", support->CursorSupport);
if (!fail_only || support->PitchSupport == 0)
- dml2_printf("DML: support: PitchSupport = %d\n", support->PitchSupport);
+ DML_LOG_VERBOSE("DML: support: PitchSupport = %d\n", support->PitchSupport);
if (!fail_only || support->ViewportExceedsSurface == 1)
- dml2_printf("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
+ DML_LOG_VERBOSE("DML: support: ViewportExceedsSurface = %d\n", support->ViewportExceedsSurface);
if (!fail_only || support->PrefetchSupported == 0)
- dml2_printf("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: PrefetchSupported = %d\n", support->PrefetchSupported);
if (!fail_only || support->EnoughUrgentLatencyHidingSupport == 0)
- dml2_printf("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
+ DML_LOG_VERBOSE("DML: support: EnoughUrgentLatencyHidingSupport = %d\n", support->EnoughUrgentLatencyHidingSupport);
if (!fail_only || support->AvgBandwidthSupport == 0)
- dml2_printf("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
+ DML_LOG_VERBOSE("DML: support: AvgBandwidthSupport = %d\n", support->AvgBandwidthSupport);
if (!fail_only || support->DynamicMetadataSupported == 0)
- dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
+ DML_LOG_VERBOSE("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
- dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
+ DML_LOG_VERBOSE("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0)
- dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
+ DML_LOG_VERBOSE("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
if (!fail_only || support->ExceededMALLSize == 1)
- dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
+ DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
if (!fail_only || support->g6_temp_read_support == 0)
- dml2_printf("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
if (!fail_only || support->ImmediateFlipSupport == 0)
- dml2_printf("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
+ DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
if (!fail_only || support->LinkCapacitySupport == 0)
- dml2_printf("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
+ DML_LOG_VERBOSE("DML: support: LinkCapacitySupport = %d\n", support->LinkCapacitySupport);
if (!fail_only || support->ModeSupport == 0)
- dml2_printf("DML: support: ModeSupport = %d\n", support->ModeSupport);
- dml2_printf("DML: ===================================== \n");
+ DML_LOG_VERBOSE("DML: support: ModeSupport = %d\n", support->ModeSupport);
+ DML_LOG_VERBOSE("DML: ===================================== \n");
}
const char *dml2_core_utils_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type)
@@ -358,9 +358,9 @@ void dml2_core_utils_get_stream_output_bpp(double *out_bpp, const struct dml2_di
out_bpp[k] = 0;
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
- dml2_printf("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
- dml2_printf("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
+ DML_LOG_VERBOSE("DML::%s: k=%d bpc=%f\n", __func__, k, bpc);
+ DML_LOG_VERBOSE("DML::%s: k=%d dsc.enable=%d\n", __func__, k, display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.dsc.enable);
+ DML_LOG_VERBOSE("DML::%s: k=%d out_bpp=%f\n", __func__, k, out_bpp[k]);
#endif
}
}
@@ -391,7 +391,7 @@ unsigned int dml2_core_util_get_num_active_pipes(int unsigned num_planes, const
}
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
+ DML_LOG_VERBOSE("DML::%s: num_active_pipes = %d\n", __func__, num_active_pipes);
#endif
return num_active_pipes;
}
@@ -452,7 +452,7 @@ unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw
else if (sw_mode == dml2_gfx11_sw_256kb_r_x)
return 262144;
else {
- DML2_ASSERT(0);
+ DML_ASSERT(0);
return 256;
};
}
@@ -498,8 +498,8 @@ int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode)
sw_mode == dml2_gfx11_sw_256kb_r_x)
version = 11;
else {
- dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
- DML2_ASSERT(0);
+ DML_LOG_VERBOSE("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
+ DML_ASSERT(0);
}
return version;
@@ -511,7 +511,7 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co
unsigned int index = 0;
for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- dml2_printf("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %d\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
+ DML_LOG_VERBOSE("DML::%s: per_uclk_dpm_params[%d].minimum_uclk_khz = %ld\n", __func__, i, per_uclk_dpm_params[i].minimum_uclk_khz);
if (i == 0)
index = 0;
@@ -524,8 +524,8 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co
}
}
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %d\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, index);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, index);
#endif
return index;
}
@@ -533,32 +533,32 @@ unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, co
unsigned int dml2_core_utils_get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table)
{
unsigned int i;
- bool clk_entry_found = 0;
+ bool clk_entry_found = false;
for (i = 0; i < clk_table->uclk.num_clk_values; i++) {
- dml2_printf("DML::%s: clk_table.uclk.clk_values_khz[%d] = %d\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
+ DML_LOG_VERBOSE("DML::%s: clk_table.uclk.clk_values_khz[%d] = %ld\n", __func__, i, clk_table->uclk.clk_values_khz[i]);
if (uclk_freq_khz == clk_table->uclk.clk_values_khz[i]) {
- clk_entry_found = 1;
+ clk_entry_found = true;
break;
}
}
if (!clk_entry_found)
- DML2_ASSERT(clk_entry_found);
+ DML_ASSERT(clk_entry_found);
#if defined(__DML_VBA_DEBUG__)
- dml2_printf("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
- dml2_printf("DML::%s: index = %d\n", __func__, i);
+ DML_LOG_VERBOSE("DML::%s: uclk_freq_khz = %ld\n", __func__, uclk_freq_khz);
+ DML_LOG_VERBOSE("DML::%s: index = %d\n", __func__, i);
#endif
return i;
}
bool dml2_core_utils_is_dual_plane(enum dml2_source_format_class source_format)
{
- bool ret_val = 0;
+ bool ret_val = false;
if (dml2_core_utils_is_420(source_format) || dml2_core_utils_is_422_planar(source_format) || (source_format == dml2_rgbe_alpha))
- ret_val = 1;
+ ret_val = true;
return ret_val;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index 15507926f3a4..f486b090bbfc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -754,6 +754,8 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
@@ -768,6 +770,8 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
index f4b1a7d02d42..a265f254152c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
@@ -182,6 +182,10 @@ static bool build_min_clock_table(const struct dml2_soc_bb *soc_bb, struct dml2_
min_table->max_clocks_khz.dtbclk = soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1];
min_table->max_clocks_khz.phyclk = soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1];
+ min_table->max_ss_clocks_khz.dispclk = (unsigned int)((double)min_table->max_clocks_khz.dispclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+ min_table->max_ss_clocks_khz.dppclk = (unsigned int)((double)min_table->max_clocks_khz.dppclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+ min_table->max_ss_clocks_khz.dtbclk = (unsigned int)((double)min_table->max_clocks_khz.dtbclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+
min_table->max_clocks_khz.dcfclk = soc_bb->clk_table.dcfclk.clk_values_khz[soc_bb->clk_table.dcfclk.num_clk_values - 1];
min_table->max_clocks_khz.fclk = soc_bb->clk_table.fclk.clk_values_khz[soc_bb->clk_table.fclk.num_clk_values - 1];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index f50662b83296..d88b3e0082dd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -659,7 +659,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
for (i = 1; i <= PMO_DCN4_MAX_DISPLAYS; i++) {
switch (i) {
case 1:
- DML2_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+ DML_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
pmo_dcn4_fams2_expand_base_pstate_strategies(
@@ -670,7 +670,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
&pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 2:
- DML2_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+ DML_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
pmo_dcn4_fams2_expand_base_pstate_strategies(
@@ -681,7 +681,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
&pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 3:
- DML2_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+ DML_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
pmo_dcn4_fams2_expand_base_pstate_strategies(
@@ -692,7 +692,7 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
&pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 4:
- DML2_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+ DML_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
pmo_dcn4_fams2_expand_base_pstate_strategies(
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
index dc2ce5e77f57..4a7c4c62111e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
@@ -761,7 +761,7 @@ bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache
total_mcaches_required--;
}
}
- dml2_printf("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
+ DML_LOG_VERBOSE("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
if (total_mcaches_required > dml->soc_bbox.num_dcc_mcaches) {
result = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
deleted file mode 100644
index c506667897c4..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
+++ /dev/null
@@ -1,31 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dml2_debug.h"
-
-int dml2_log_internal(const char *format, ...)
-{
- return 0;
-}
-
-int dml2_printf(const char *format, ...)
-{
-#ifdef _DEBUG
-#ifdef _DEBUG_PRINTS
- int result;
- va_list args;
- va_start(args, format);
-
- result = vprintf(format, args);
-
- va_end(args);
-
- return result;
-#else
- return 0;
-#endif
-#else
- return 0;
-#endif
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
index bfe6f236d2e4..b226225103c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
@@ -5,55 +5,62 @@
#ifndef __DML2_DEBUG_H__
#define __DML2_DEBUG_H__
-#ifndef DML2_ASSERT
-#define DML2_ASSERT(condition) ((void)0)
-#endif
+#include "os_types.h"
+#define DML_ASSERT(condition) ASSERT(condition)
+#define DML_LOG_LEVEL_DEFAULT DML_LOG_LEVEL_WARN
+#define DML_LOG_INTERNAL(fmt, ...) dm_output_to_console(fmt, ## __VA_ARGS__)
-/*
- * DML_LOG_FATAL - fatal errors for unrecoverable DML states until a restart.
- * DML_LOG_ERROR - unexpected but recoverable failures inside DML
- * DML_LOG_WARN - unexpected inputs or events to DML
- * DML_LOG_INFO - high level tracing of DML interfaces
- * DML_LOG_DEBUG - detailed tracing of DML internal components
- * DML_LOG_VERBOSE - detailed tracing of DML calculation procedure
- */
-#if !defined(DML_LOG_LEVEL)
-#if defined(_DEBUG) && defined(_DEBUG_PRINTS)
-/* for backward compatibility with old macros */
-#define DML_LOG_LEVEL 5
-#else
-#define DML_LOG_LEVEL 0
-#endif
-#endif
+/* ASSERT with message output */
+#define DML_ASSERT_MSG(condition, fmt, ...) \
+ do { \
+ if (!(condition)) { \
+ DML_LOG_ERROR("DML ASSERT hit in %s line %d\n", __func__, __LINE__); \
+ DML_LOG_ERROR(fmt, ## __VA_ARGS__); \
+ DML_ASSERT(condition); \
+ } \
+ } while (0)
+
+/* fatal errors for unrecoverable DML states until a full reset */
+#define DML_LOG_LEVEL_FATAL 0
+/* unexpected but recoverable failures inside DML */
+#define DML_LOG_LEVEL_ERROR 1
+/* unexpected inputs or events to DML */
+#define DML_LOG_LEVEL_WARN 2
+/* high level tracing of DML interfaces */
+#define DML_LOG_LEVEL_INFO 3
+/* detailed tracing of DML internal components */
+#define DML_LOG_LEVEL_DEBUG 4
+/* detailed tracing of DML calculation procedure */
+#define DML_LOG_LEVEL_VERBOSE 5
-#define DML_LOG_FATAL(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
-#if DML_LOG_LEVEL >= 1
-#define DML_LOG_ERROR(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#ifndef DML_LOG_LEVEL
+#define DML_LOG_LEVEL DML_LOG_LEVEL_DEFAULT
+#endif /* #ifndef DML_LOG_LEVEL */
+
+#define DML_LOG_FATAL(fmt, ...) DML_LOG_INTERNAL("[DML FATAL] " fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_ERROR
+#define DML_LOG_ERROR(fmt, ...) DML_LOG_INTERNAL("[DML ERROR] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_ERROR(fmt, ...) ((void)0)
#endif
-#if DML_LOG_LEVEL >= 2
-#define DML_LOG_WARN(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_WARN
+#define DML_LOG_WARN(fmt, ...) DML_LOG_INTERNAL("[DML WARN] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_WARN(fmt, ...) ((void)0)
#endif
-#if DML_LOG_LEVEL >= 3
-#define DML_LOG_INFO(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_INFO
+#define DML_LOG_INFO(fmt, ...) DML_LOG_INTERNAL("[DML INFO] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_INFO(fmt, ...) ((void)0)
#endif
-#if DML_LOG_LEVEL >= 4
-#define DML_LOG_DEBUG(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_DEBUG
+#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL("[DML DEBUG] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_DEBUG(fmt, ...) ((void)0)
#endif
-#if DML_LOG_LEVEL >= 5
-#define DML_LOG_VERBOSE(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE
+#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL("[DML VERBOSE] "fmt, ## __VA_ARGS__)
#else
#define DML_LOG_VERBOSE(fmt, ...) ((void)0)
#endif
-
-int dml2_log_internal(const char *format, ...);
-int dml2_printf(const char *format, ...);
-
-#endif
+#endif /* __DML2_DEBUG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
index d8d01dceacdd..00688b9f1df4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
@@ -38,6 +38,12 @@ struct dml2_mcg_min_clock_table {
} max_clocks_khz;
struct {
+ unsigned int dispclk;
+ unsigned int dppclk;
+ unsigned int dtbclk;
+ } max_ss_clocks_khz;
+
+ struct {
unsigned int dprefclk;
unsigned int xtalclk;
unsigned int pcierefclk;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index a966abd40788..5f1b49a50049 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -1082,22 +1082,22 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s
if (stream_disp_cfg_index >= disp_cfg_index_max)
continue;
- if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_bypass) {
- scratch.odm_info.odm_factor = 1;
- } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_2to1) {
- scratch.odm_info.odm_factor = 2;
- } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_4to1) {
- scratch.odm_info.odm_factor = 4;
- } else {
- ASSERT(false);
- scratch.odm_info.odm_factor = 1;
- }
-
+ if (ctx->architecture == dml2_architecture_20) {
+ if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_bypass) {
+ scratch.odm_info.odm_factor = 1;
+ } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_2to1) {
+ scratch.odm_info.odm_factor = 2;
+ } else if (ODMMode[stream_disp_cfg_index] == dml_odm_mode_combine_4to1) {
+ scratch.odm_info.odm_factor = 4;
+ } else {
+ ASSERT(false);
+ scratch.odm_info.odm_factor = 1;
+ }
+ } else if (ctx->architecture == dml2_architecture_21) {
/* After DML2.1 update, ODM interpretation needs to change and is no longer same as for DML2.0.
* This is not an issue with new resource management logic. This block ensure backcompat
* with legacy pipe management with updated DML.
* */
- if (ctx->architecture == dml2_architecture_21) {
if (ODMMode[stream_disp_cfg_index] == 1) {
scratch.odm_info.odm_factor = 1;
} else if (ODMMode[stream_disp_cfg_index] == 2) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index ab6baf269801..208630754c8a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -896,7 +896,7 @@ static void populate_dummy_dml_surface_cfg(struct dml_surface_cfg_st *out, unsig
out->SurfaceWidthC[location] = in->timing.h_addressable;
out->SurfaceHeightC[location] = in->timing.v_addressable;
out->PitchY[location] = ((out->SurfaceWidthY[location] + 127) / 128) * 128;
- out->PitchC[location] = 0;
+ out->PitchC[location] = 1;
out->DCCEnable[location] = false;
out->DCCMetaPitchY[location] = 0;
out->DCCMetaPitchC[location] = 0;
@@ -953,6 +953,7 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
out->SourcePixelFormat[location] = dml_420_10;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
out->SourcePixelFormat[location] = dml_444_64;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index e89571874185..525b7d04bf84 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -663,7 +663,10 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
dml2_copy_clocks_to_dc_state(&out_clks, context);
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.a, &dml2->v20.dml_core_ctx);
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx);
- memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
+ if (context->streams[0]->sink->link->dc->caps.is_apu)
+ dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.dml_core_ctx);
+ else
+ memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx);
dml2_extract_writeback_wm(context, &dml2->v20.dml_core_ctx);
//copy for deciding zstate use
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index 785226945699..5100f269368e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -40,6 +40,7 @@ struct dc_sink;
struct dc_stream_state;
struct resource_context;
struct display_stream_compressor;
+struct dc_mcache_params;
// Configuration of the MALL on the SoC
struct dml2_soc_mall_info {
@@ -107,6 +108,7 @@ struct dml2_dc_callbacks {
unsigned int (*get_max_flickerless_instant_vtotal_increase)(
struct dc_stream_state *stream,
bool is_gaming);
+ bool (*allocate_mcache)(struct dc_state *context, const struct dc_mcache_params *mcache_params);
};
struct dml2_dc_svp_callbacks {
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index abf439e743f2..2d70586cef40 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -790,8 +790,7 @@ static bool dpp3_program_blnd_lut(struct dpp *dpp_base,
if (params == NULL) {
REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_MODE, 0);
- if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
- dpp3_power_on_blnd_lut(dpp_base, false);
+ dpp3_power_on_blnd_lut(dpp_base, false);
return false;
}
@@ -1204,8 +1203,7 @@ static bool dpp3_program_shaper(struct dpp *dpp_base,
if (params == NULL) {
REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0);
- if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
- dpp3_power_on_shaper(dpp_base, false);
+ dpp3_power_on_shaper(dpp_base, false);
return false;
}
@@ -1399,8 +1397,7 @@ static bool dpp3_program_3dlut(struct dpp *dpp_base,
if (params == NULL) {
dpp3_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false);
- if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm)
- dpp3_power_on_hdr3dlut(dpp_base, false);
+ dpp3_power_on_hdr3dlut(dpp_base, false);
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
index 62b7012cda43..f7a373a3d70a 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
@@ -138,7 +138,7 @@ bool dpp35_construct(
dpp->base.funcs = &dcn35_dpp_funcs;
// w/a for cursor memory stuck in LS by programming DISPCLK_R_GATE_DISABLE, limit w/a to some ASIC revs
- if (dpp->base.ctx->asic_id.hw_internal_rev <= 0x10)
+ if (dpp->base.ctx->asic_id.hw_internal_rev < 0x40)
dpp->dispclk_r_gate_disable = true;
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index 1236e0f9a256..712aff7e17f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -120,10 +120,11 @@ void dpp401_set_cursor_attributes(
enum dc_cursor_color_format color_format = cursor_attributes->color_format;
int cur_rom_en = 0;
- // DCN4 should always do Cursor degamma for Cursor Color modes
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
- cur_rom_en = 1;
+ if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
+ cur_rom_en = 1;
+ }
}
REG_UPDATE_3(CURSOR0_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
index 75128fd34306..bd1b9aef6d5c 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
@@ -57,13 +57,6 @@ static const struct dsc_funcs dcn20_dsc_funcs = {
#define DC_LOGGER \
dsc->ctx->logger
-enum dsc_bits_per_comp {
- DSC_BPC_8 = 8,
- DSC_BPC_10 = 10,
- DSC_BPC_12 = 12,
- DSC_BPC_UNKNOWN
-};
-
/* API functions (external or via structure->function_pointer) */
void dsc2_construct(struct dcn20_dsc *dsc,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
index 1fb90b52b814..a9c04fc95bd1 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
@@ -457,6 +457,12 @@
type DSCRM_DSC_DOUBLE_BUFFER_REG_UPDATE_PENDING; \
type DSCRM_DSC_FORWARD_EN_STATUS
+enum dsc_bits_per_comp {
+ DSC_BPC_8 = 8,
+ DSC_BPC_10 = 10,
+ DSC_BPC_12 = 12,
+ DSC_BPC_UNKNOWN
+};
struct dcn20_dsc_registers {
uint32_t DSC_TOP_CONTROL;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index 4893b793fec0..4222679fd4c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -45,12 +45,6 @@ static const struct dsc_funcs dcn401_dsc_funcs = {
#define DC_LOGGER \
dsc->ctx->logger
-enum dsc_bits_per_comp {
- DSC_BPC_8 = 8,
- DSC_BPC_10 = 10,
- DSC_BPC_12 = 12,
- DSC_BPC_UNKNOWN
-};
/* API functions (external or via structure->function_pointer) */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index b099989d9364..942d9f0b6df2 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -411,6 +411,20 @@ enum dc_irq_source dal_irq_get_rx_source(
}
}
+enum dc_irq_source dal_irq_get_read_request(
+ const struct gpio *irq)
+{
+ enum gpio_id id = dal_gpio_get_id(irq);
+
+ switch (id) {
+ case GPIO_ID_HPD:
+ return (enum dc_irq_source)(DC_IRQ_SOURCE_DCI2C_RR_DDC1 +
+ dal_gpio_get_enum(irq));
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
enum gpio_result dal_irq_setup_hpd_filter(
struct gpio *irq,
struct gpio_hpd_config *config)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
index 3f13a744d07d..01ec451004f7 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
@@ -62,7 +62,7 @@ static void dal_hw_hpd_destroy(
*ptr = NULL;
}
-static enum gpio_result get_value(
+static enum gpio_result dal_hw_hpd_get_value(
const struct hw_gpio_pin *ptr,
uint32_t *value)
{
@@ -85,7 +85,7 @@ static enum gpio_result get_value(
return dal_hw_gpio_get_value(ptr, value);
}
-static enum gpio_result set_config(
+static enum gpio_result dal_hw_hpd_set_config(
struct hw_gpio_pin *ptr,
const struct gpio_config_data *config_data)
{
@@ -104,9 +104,9 @@ static enum gpio_result set_config(
static const struct hw_gpio_pin_funcs funcs = {
.destroy = dal_hw_hpd_destroy,
.open = dal_hw_gpio_open,
- .get_value = get_value,
+ .get_value = dal_hw_hpd_get_value,
.set_value = dal_hw_gpio_set_value,
- .set_config = set_config,
+ .set_config = dal_hw_hpd_set_config,
.change_mode = dal_hw_gpio_change_mode,
.close = dal_hw_gpio_close,
};
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c
index 2546224b326a..e4496ad203b2 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c
@@ -132,9 +132,9 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
// Init VMID 0 based on PA config
dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
}
-
- dcn21_dchvm_init(hubbub);
-
+ if (!hubbub1->base.ctx->dc->config.skip_riommu_prefetch_wa) {
+ dcn21_dchvm_init(hubbub);
+ }
return hubbub1->num_vmid;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index 5ed195377a6c..baed31611477 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -1032,7 +1032,7 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_3dlut_fl_tmz_protected = hubp401_program_3dlut_fl_tmz_protected,
.hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar,
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
- .hubp_clear_tiling = hubp2_clear_tiling,
+ .hubp_clear_tiling = hubp401_clear_tiling,
};
bool hubp401_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
index 40ecebea1ba0..bee617ca0838 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
@@ -27,6 +27,24 @@
# DCE
###############################################################################
+ifdef CONFIG_DRM_AMD_DC_SI
+HWSS_DCE60 = dce60_hwseq.o
+
+AMD_DAL_HWSS_DCE60 = $(addprefix $(AMDDALPATH)/dc/hwss/dce60/,$(HWSS_DCE60))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE60)
+endif
+
+###############################################################################
+
+HWSS_DCE80 = dce80_hwseq.o
+
+AMD_DAL_HWSS_DCE80 = $(addprefix $(AMDDALPATH)/dc/hwss/dce80/,$(HWSS_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE80)
+
+###############################################################################
+
HWSS_DCE = dce_hwseq.o
AMD_DAL_HWSS_DCE = $(addprefix $(AMDDALPATH)/dc/hwss/dce/,$(HWSS_DCE))
@@ -65,14 +83,6 @@ AMD_DAL_HWSS_DCE120 = $(addprefix $(AMDDALPATH)/dc/hwss/dce120/,$(HWSS_DCE120))
AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE120)
-###############################################################################
-
-HWSS_DCE80 = dce80_hwseq.o
-
-AMD_DAL_HWSS_DCE80 = $(addprefix $(AMDDALPATH)/dc/hwss/dce80/,$(HWSS_DCE80))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCE80)
-
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
# DCN
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 5656d10368ad..38e17b1796e1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -952,8 +952,8 @@ void dce110_edp_backlight_control(
struct dc_context *ctx = link->ctx;
struct bp_transmitter_control cntl = { 0 };
uint8_t pwrseq_instance = 0;
- unsigned int pre_T11_delay = OLED_PRE_T11_DELAY;
- unsigned int post_T7_delay = OLED_POST_T7_DELAY;
+ unsigned int pre_T11_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_PRE_T11_DELAY : 0);
+ unsigned int post_T7_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_POST_T7_DELAY : 0);
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
!= CONNECTOR_ID_EDP) {
@@ -1069,7 +1069,8 @@ void dce110_edp_backlight_control(
if (!enable) {
/*follow oem panel config's requirement*/
pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms;
- msleep(pre_T11_delay);
+ if (pre_T11_delay)
+ msleep(pre_T11_delay);
}
}
@@ -1220,8 +1221,11 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
+ if (hws && hws->wa_state.skip_blank_stream)
+ return;
+
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
- if (!link->skip_implict_edp_power_control)
+ if (!link->skip_implict_edp_power_control && hws)
hws->funcs.edp_backlight_control(link, false);
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
@@ -2763,12 +2767,12 @@ static void dce110_enable_per_frame_crtc_position_reset(
}
-static void init_pipes(struct dc *dc, struct dc_state *context)
+static void dce110_init_pipes(struct dc *dc, struct dc_state *context)
{
// Do nothing
}
-static void init_hw(struct dc *dc)
+static void dce110_init_hw(struct dc *dc)
{
int i;
struct dc_bios *bp;
@@ -3327,7 +3331,7 @@ void dce110_disable_link_output(struct dc_link *link,
static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_output_csc = program_output_csc,
- .init_hw = init_hw,
+ .init_hw = dce110_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
.post_unlock_program_front_end = dce110_post_unlock_program_front_end,
@@ -3371,7 +3375,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
};
static const struct hwseq_private_funcs dce110_private_funcs = {
- .init_pipes = init_pipes,
+ .init_pipes = dce110_init_pipes,
.set_input_transfer_func = dce110_set_input_transfer_func,
.set_output_transfer_func = dce110_set_output_transfer_func,
.power_down = dce110_power_down,
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c
index 44b56490e152..a08e9f9eec17 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c
@@ -26,7 +26,7 @@
#include "dm_services.h"
#include "dc.h"
#include "core_types.h"
-#include "dce60_hw_sequencer.h"
+#include "dce60_hwseq.h"
#include "dce/dce_hwseq.h"
#include "dce110/dce110_hwseq.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h
index f3b2d8b60d5b..f3b2d8b60d5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 912f96323ed6..f9ee55998b6b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -94,6 +94,128 @@ static void print_microsec(struct dc_context *dc_ctx,
us_x10 % frac);
}
+/*
+ * Delay until we passed busy-until-point to which we can
+ * do necessary locking/programming on consecutive full updates
+ */
+void dcn10_wait_for_pipe_update_if_needed(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only)
+{
+ struct crtc_position position;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ unsigned int vpos, frame_count;
+ uint32_t vupdate_start, vupdate_end, vblank_start;
+ unsigned int lines_to_vupdate, us_to_vupdate;
+ unsigned int us_per_line, us_vupdate;
+
+ if (!pipe_ctx->stream ||
+ !pipe_ctx->stream_res.tg ||
+ !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ if (pipe_ctx->prev_odm_pipe &&
+ pipe_ctx->stream)
+ return;
+
+ if (!pipe_ctx->wait_is_required)
+ return;
+
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
+ return;
+
+ dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+ &vupdate_end);
+
+ dc->hwss.get_position(&pipe_ctx, 1, &position);
+ vpos = position.vertical_count;
+
+ frame_count = tg->funcs->get_frame_count(tg);
+
+ if (frame_count - pipe_ctx->wait_frame_count > 2)
+ return;
+
+ vblank_start = pipe_ctx->pipe_dlg_param.vblank_start;
+
+ if (vpos >= vupdate_start && vupdate_start >= vblank_start)
+ lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
+ else
+ lines_to_vupdate = vupdate_start - vpos;
+
+ us_per_line =
+ stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
+ us_to_vupdate = lines_to_vupdate * us_per_line;
+
+ if (vupdate_end < vupdate_start)
+ vupdate_end += stream->timing.v_total;
+
+ if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
+ us_to_vupdate = 0;
+
+ us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
+
+ if (is_surface_update_only && us_to_vupdate + us_vupdate > 200) {
+ //surface updates come in at high irql
+ pipe_ctx->wait_is_required = true;
+ return;
+ }
+
+ fsleep(us_to_vupdate + us_vupdate);
+
+ //clear
+ pipe_ctx->next_vupdate = 0;
+ pipe_ctx->wait_frame_count = 0;
+ pipe_ctx->wait_is_required = false;
+}
+
+/*
+ * On pipe unlock and programming, indicate pipe will be busy
+ * until some frame and line (vupdate), this is required for consecutive
+ * full updates, need to wait for updates
+ * to latch to try and program the next update
+ */
+void dcn10_set_wait_for_update_needed_for_pipe(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ uint32_t vupdate_start, vupdate_end;
+ struct crtc_position position;
+ unsigned int vpos, cur_frame;
+
+ if (!pipe_ctx->stream ||
+ !pipe_ctx->stream_res.tg ||
+ !pipe_ctx->stream_res.stream_enc)
+ return;
+
+ dc->hwss.get_position(&pipe_ctx, 1, &position);
+ vpos = position.vertical_count;
+
+ dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+ &vupdate_end);
+
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+ ASSERT(optc1->max_frame_count != 0);
+
+ if (tg->funcs->is_tg_enabled && !tg->funcs->is_tg_enabled(tg))
+ return;
+
+ pipe_ctx->next_vupdate = vupdate_start;
+
+ cur_frame = tg->funcs->get_frame_count(tg);
+
+ if (vpos < vupdate_start) {
+ pipe_ctx->wait_frame_count = cur_frame;
+ } else {
+ if (cur_frame + 1 > optc1->max_frame_count)
+ pipe_ctx->wait_frame_count = cur_frame + 1 - optc1->max_frame_count;
+ else
+ pipe_ctx->wait_frame_count = cur_frame + 1;
+ }
+
+ pipe_ctx->wait_is_required = true;
+}
+
void dcn10_lock_all_pipes(struct dc *dc,
struct dc_state *context,
bool lock)
@@ -2664,7 +2786,6 @@ void dcn10_update_visual_confirm_color(struct dc *dc,
struct mpc *mpc = dc->res_pool->mpc;
if (mpc->funcs->set_bg_color) {
- memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
index 42ffd1e1299c..57d30ea225f2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
@@ -50,6 +50,13 @@ void dcn10_optimize_bandwidth(
void dcn10_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
+void dcn10_wait_for_pipe_update_if_needed(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool is_surface_update_only);
+void dcn10_set_wait_for_update_needed_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
void dcn10_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 846c9c51f2d9..c277df12c817 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -76,6 +76,7 @@ void dcn20_log_color_state(struct dc *dc,
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
+ bool is_gamut_remap_available = false;
int i;
DTN_INFO("DPP: DGAM mode SHAPER mode 3DLUT mode 3DLUT bit depth"
@@ -89,15 +90,15 @@ void dcn20_log_color_state(struct dc *dc,
struct dcn_dpp_state s = {0};
dpp->funcs->dpp_read_state(dpp, &s);
- dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+ if (dpp->funcs->dpp_get_gamut_remap) {
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+ is_gamut_remap_available = true;
+ }
if (!s.is_enabled)
continue;
- DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s %12s "
- "%010lld %010lld %010lld %010lld "
- "%010lld %010lld %010lld %010lld "
- "%010lld %010lld %010lld %010lld",
+ DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s",
dpp->inst,
(s.dgam_lut_mode == 0) ? "Bypass" :
((s.dgam_lut_mode == 1) ? "sRGB" :
@@ -114,10 +115,17 @@ void dcn20_log_color_state(struct dc *dc,
(s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
(s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
(s.rgam_lut_mode == 1) ? "RAM A" :
- ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"),
+ ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"));
+
+ if (is_gamut_remap_available) {
+ DTN_INFO(" %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
+
(s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
- ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
- "SW"),
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+ "SW"),
s.gamut_remap.temperature_matrix[0].value,
s.gamut_remap.temperature_matrix[1].value,
s.gamut_remap.temperature_matrix[2].value,
@@ -130,6 +138,8 @@ void dcn20_log_color_state(struct dc *dc,
s.gamut_remap.temperature_matrix[9].value,
s.gamut_remap.temperature_matrix[10].value,
s.gamut_remap.temperature_matrix[11].value);
+ }
+
DTN_INFO("\n");
}
DTN_INFO("\n");
@@ -2053,7 +2063,7 @@ void dcn20_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ if (pipe->plane_state) {
ASSERT(!pipe->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
@@ -2482,7 +2492,7 @@ bool dcn20_update_bandwidth(
struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK)
return false;
/* apply updated bandwidth parameters */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index e89ebfda4873..37a239219dfe 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -74,6 +74,7 @@ void dcn30_log_color_state(struct dc *dc,
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
+ bool is_gamut_remap_available = false;
int i;
DTN_INFO("DPP: DGAM ROM DGAM ROM type DGAM LUT SHAPER mode"
@@ -88,16 +89,16 @@ void dcn30_log_color_state(struct dc *dc,
struct dcn_dpp_state s = {0};
dpp->funcs->dpp_read_state(dpp, &s);
- dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+
+ if (dpp->funcs->dpp_get_gamut_remap) {
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+ is_gamut_remap_available = true;
+ }
if (!s.is_enabled)
continue;
- DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s"
- " %12s "
- "%010lld %010lld %010lld %010lld "
- "%010lld %010lld %010lld %010lld "
- "%010lld %010lld %010lld %010lld",
+ DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s",
dpp->inst,
s.pre_dgam_mode,
(s.pre_dgam_select == 0) ? "sRGB" :
@@ -121,7 +122,14 @@ void dcn30_log_color_state(struct dc *dc,
(s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
(s.rgam_lut_mode == 0) ? "Bypass" :
((s.rgam_lut_mode == 1) ? "RAM A" :
- "RAM B"),
+ "RAM B"));
+
+ if (is_gamut_remap_available) {
+ DTN_INFO(" %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
+
(s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
"SW"),
@@ -137,6 +145,8 @@ void dcn30_log_color_state(struct dc *dc,
s.gamut_remap.temperature_matrix[9].value,
s.gamut_remap.temperature_matrix[10].value,
s.gamut_remap.temperature_matrix[11].value);
+ }
+
DTN_INFO("\n");
}
DTN_INFO("\n");
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index f38340aa3f15..5ba3999991b0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -526,9 +526,15 @@ static void dcn31_reset_back_end_for_pipe(
link = pipe_ctx->stream->link;
+ if (dc->hwseq)
+ dc->hwseq->wa_state.skip_blank_stream = false;
+
if ((!pipe_ctx->stream->dpms_off || link->link_status.link_active) &&
- (link->connector_signal == SIGNAL_TYPE_EDP))
+ (link->connector_signal == SIGNAL_TYPE_EDP)) {
dc->hwss.blank_stream(pipe_ctx);
+ if (dc->hwseq)
+ dc->hwseq->wa_state.skip_blank_stream = true;
+ }
pipe_ctx->stream_res.tg->funcs->set_dsc_config(
pipe_ctx->stream_res.tg,
@@ -570,7 +576,8 @@ static void dcn31_reset_back_end_for_pipe(
pipe_ctx->stream_res.audio = NULL;
}
}
-
+ if (dc->hwseq)
+ dc->hwseq->wa_state.skip_blank_stream = false;
pipe_ctx->stream = NULL;
DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index be26c925fdfa..e68f21fd5f0f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -84,6 +84,20 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
struct dsc_config dsc_cfg;
struct dsc_optc_config dsc_optc_cfg = {0};
enum optc_dsc_mode optc_dsc_mode;
+ struct dcn_dsc_state dsc_state = {0};
+
+ if (!dsc) {
+ DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+
+ if (dsc->funcs->dsc_read_state) {
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+ if (!dsc_state.dsc_fw_en) {
+ DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ return;
+ }
+ }
/* Enable DSC hw block */
dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index cd0adf72b223..a0b05b9ef660 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1181,6 +1181,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
bool two_pix_per_container = false;
+ struct dce_hwseq *hws = stream->ctx->dc->hwseq;
two_pix_per_container = pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
@@ -1201,7 +1202,8 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
} else {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_4;
- if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
+ if ((odm_combine_factor == 2) || (hws->funcs.is_dp_dig_pixel_rate_div_policy &&
+ hws->funcs.is_dp_dig_pixel_rate_div_policy(pipe_ctx)))
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 922b8d71cf1a..a267f574b619 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -241,11 +241,6 @@ void dcn35_init_hw(struct dc *dc)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
- if (res_pool->dccg->funcs->dccg_root_gate_disable_control) {
- for (i = 0; i < res_pool->pipe_count; i++)
- res_pool->dccg->funcs->dccg_root_gate_disable_control(res_pool->dccg, i, 0);
- }
-
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
@@ -901,12 +896,18 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dccg *dccg = dc->res_pool->dccg;
+
+
/* enable DCFCLK current DCHUB */
pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
/* initialize HUBP on power up */
pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
-
+ /*make sure DPPCLK is on*/
+ dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, true);
+ dpp->funcs->dpp_dppclk_control(dpp, false, true);
/* make sure OPP_PIPE_CLOCK_EN = 1 */
pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
pipe_ctx->stream_res.opp,
@@ -923,6 +924,7 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
// Program system aperture settings
pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
}
+ //DC_LOG_DEBUG("%s: dpp_inst(%d) =\n", __func__, dpp->inst);
if (!pipe_ctx->top_pipe
&& pipe_ctx->plane_state
@@ -938,6 +940,8 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dccg *dccg = dc->res_pool->dccg;
+
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
@@ -955,7 +959,8 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->funcs->hubp_clk_cntl(hubp, false);
dpp->funcs->dpp_dppclk_control(dpp, false, false);
-/*to do, need to support both case*/
+ dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, false);
+
hubp->power_gated = true;
hubp->funcs->hubp_reset(hubp);
@@ -967,6 +972,8 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->top_pipe = NULL;
pipe_ctx->bottom_pipe = NULL;
pipe_ctx->plane_state = NULL;
+ //DC_LOG_DEBUG("%s: dpp_inst(%d)=\n", __func__, dpp->inst);
+
}
void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
@@ -1040,6 +1047,15 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (dc->caps.sequential_ono) {
update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false;
update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false;
+
+ /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
+ if (!pipe_ctx->top_pipe && pipe_ctx->plane_res.hubp &&
+ pipe_ctx->plane_res.hubp->inst != pipe_ctx->stream_res.dsc->inst) {
+ for (j = 0; j < dc->res_pool->pipe_count; ++j) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = false;
+ update_state->pg_pipe_res_update[PG_DPP][j] = false;
+ }
+ }
}
}
@@ -1186,6 +1202,25 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
if (dc->caps.sequential_ono) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (new_pipe->stream_res.dsc && !new_pipe->top_pipe &&
+ update_state->pg_pipe_res_update[PG_DSC][new_pipe->stream_res.dsc->inst]) {
+ update_state->pg_pipe_res_update[PG_HUBP][new_pipe->stream_res.dsc->inst] = true;
+ update_state->pg_pipe_res_update[PG_DPP][new_pipe->stream_res.dsc->inst] = true;
+
+ /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
+ if (new_pipe->plane_res.hubp &&
+ new_pipe->plane_res.hubp->inst != new_pipe->stream_res.dsc->inst) {
+ for (j = 0; j < dc->res_pool->pipe_count; ++j) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = true;
+ update_state->pg_pipe_res_update[PG_DPP][j] = true;
+ }
+ }
+ }
+ }
+
for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
@@ -1543,7 +1578,7 @@ static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings;
const struct dc *dc = pipe_ctx->stream->link->dc;
- if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->link_config.dp_tunnel_settings.should_enable_dp_tunneling == false)
return false;
// Not necessary for MST configurations
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index 6a82a865209c..a3ccf805bd16 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -168,6 +168,8 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
.dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
+ .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
+ .set_wait_for_update_needed_for_pipe = dcn10_set_wait_for_update_needed_for_pipe,
};
void dcn35_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index 902a96940a01..58f2be2a326b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -158,10 +158,12 @@ static const struct hwseq_private_funcs dcn351_private_funcs = {
.set_mcm_luts = dcn32_set_mcm_luts,
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
- .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
.dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
+ .wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
+ .set_wait_for_update_needed_for_pipe = dcn10_set_wait_for_update_needed_for_pipe,
};
void dcn351_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 5489f3d431f6..c4177a9a662f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -396,6 +396,249 @@ static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ct
}
}
+static void dcn401_set_mcm_location_post_blend(struct dc *dc, struct pipe_ctx *pipe_ctx, bool bPostBlend)
+{
+ struct mpc *mpc = dc->res_pool->mpc;
+ int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+
+ if (!pipe_ctx->plane_state)
+ return;
+
+ mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
+ pipe_ctx->plane_state->mcm_location = (bPostBlend) ?
+ MPCC_MOVABLE_CM_LOCATION_AFTER :
+ MPCC_MOVABLE_CM_LOCATION_BEFORE;
+}
+
+static void dc_get_lut_mode(
+ enum dc_cm2_gpu_mem_layout layout,
+ enum hubp_3dlut_fl_mode *mode,
+ enum hubp_3dlut_fl_addressing_mode *addr_mode)
+{
+ switch (layout) {
+ case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
+ *mode = hubp_3dlut_fl_mode_native_1;
+ *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
+ break;
+ case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
+ *mode = hubp_3dlut_fl_mode_native_2;
+ *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
+ break;
+ case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
+ *mode = hubp_3dlut_fl_mode_transform;
+ *addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
+ break;
+ default:
+ *mode = hubp_3dlut_fl_mode_disable;
+ *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
+ break;
+ }
+}
+
+static void dc_get_lut_format(
+ enum dc_cm2_gpu_mem_format dc_format,
+ enum hubp_3dlut_fl_format *format)
+{
+ switch (dc_format) {
+ case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
+ *format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
+ break;
+ case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
+ *format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
+ break;
+ case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
+ *format = hubp_3dlut_fl_format_float_fp1_5_10;
+ break;
+ }
+}
+
+static void dc_get_lut_xbar(
+ enum dc_cm2_gpu_mem_pixel_component_order order,
+ enum hubp_3dlut_fl_crossbar_bit_slice *cr_r,
+ enum hubp_3dlut_fl_crossbar_bit_slice *y_g,
+ enum hubp_3dlut_fl_crossbar_bit_slice *cb_b)
+{
+ switch (order) {
+ case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
+ *cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ *cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ break;
+ case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA:
+ *cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ *cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ break;
+ }
+}
+
+static void dc_get_lut_width(
+ enum dc_cm2_gpu_mem_size size,
+ enum hubp_3dlut_fl_width *width)
+{
+ switch (size) {
+ case DC_CM2_GPU_MEM_SIZE_333333:
+ *width = hubp_3dlut_fl_width_33;
+ break;
+ case DC_CM2_GPU_MEM_SIZE_171717:
+ *width = hubp_3dlut_fl_width_17;
+ break;
+ case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
+ *width = hubp_3dlut_fl_width_transformed;
+ break;
+ }
+}
+static bool dc_is_rmcm_3dlut_supported(struct hubp *hubp, struct mpc *mpc)
+{
+ if (mpc->funcs->rmcm.update_3dlut_fast_load_select &&
+ mpc->funcs->rmcm.program_lut_read_write_control &&
+ hubp->funcs->hubp_program_3dlut_fl_addr &&
+ mpc->funcs->rmcm.program_bit_depth &&
+ hubp->funcs->hubp_program_3dlut_fl_mode &&
+ hubp->funcs->hubp_program_3dlut_fl_addressing_mode &&
+ hubp->funcs->hubp_program_3dlut_fl_format &&
+ hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
+ mpc->funcs->rmcm.program_bias_scale &&
+ hubp->funcs->hubp_program_3dlut_fl_crossbar &&
+ hubp->funcs->hubp_program_3dlut_fl_width &&
+ mpc->funcs->rmcm.update_3dlut_fast_load_select &&
+ mpc->funcs->rmcm.populate_lut &&
+ mpc->funcs->rmcm.program_lut_mode &&
+ hubp->funcs->hubp_enable_3dlut_fl &&
+ mpc->funcs->rmcm.enable_3dlut_fl)
+ return true;
+
+ return false;
+}
+
+bool dcn401_program_rmcm_luts(
+ struct hubp *hubp,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_cm2_transfer_func_source lut3d_src,
+ struct dc_cm2_func_luts *mcm_luts,
+ struct mpc *mpc,
+ bool lut_bank_a,
+ int mpcc_id)
+{
+ struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ union mcm_lut_params m_lut_params;
+ enum MCM_LUT_XABLE shaper_xable, lut3d_xable = MCM_LUT_DISABLE, lut1d_xable;
+ enum hubp_3dlut_fl_mode mode;
+ enum hubp_3dlut_fl_addressing_mode addr_mode;
+ enum hubp_3dlut_fl_format format = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
+ enum hubp_3dlut_fl_width width = 0;
+ struct dc *dc = hubp->ctx->dc;
+
+ bool bypass_rmcm_3dlut = false;
+ bool bypass_rmcm_shaper = false;
+
+ dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
+
+ /* 3DLUT */
+ switch (lut3d_src) {
+ case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
+ memset(&m_lut_params, 0, sizeof(m_lut_params));
+ // Don't know what to do in this case.
+ //case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
+ break;
+ case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
+ dc_get_lut_width(mcm_luts->lut3d_data.gpu_mem_params.size, &width);
+ if (!dc_is_rmcm_3dlut_supported(hubp, mpc) ||
+ !mpc->funcs->rmcm.is_config_supported(width))
+ return false;
+
+ //0. disable fl on mpc
+ mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, 0xF);
+
+ //1. power down the block
+ mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, false);
+
+ //2. program RMCM
+ //2a. 3dlut reg programming
+ mpc->funcs->rmcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a,
+ (!bypass_rmcm_3dlut) && lut3d_xable != MCM_LUT_DISABLE, mpcc_id);
+
+ hubp->funcs->hubp_program_3dlut_fl_addr(hubp,
+ mcm_luts->lut3d_data.gpu_mem_params.addr);
+
+ mpc->funcs->rmcm.program_bit_depth(mpc,
+ mcm_luts->lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
+
+ // setting native or transformed mode,
+ dc_get_lut_mode(mcm_luts->lut3d_data.gpu_mem_params.layout, &mode, &addr_mode);
+
+ //these program the mcm 3dlut
+ hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
+
+ hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
+
+ //seems to be only for the MCM
+ dc_get_lut_format(mcm_luts->lut3d_data.gpu_mem_params.format_params.format, &format);
+ hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
+
+ mpc->funcs->rmcm.program_bias_scale(mpc,
+ mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias,
+ mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale,
+ mpcc_id);
+ hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
+ mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias,
+ mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale);
+
+ dc_get_lut_xbar(
+ mcm_luts->lut3d_data.gpu_mem_params.component_order,
+ &crossbar_bit_slice_cr_r,
+ &crossbar_bit_slice_y_g,
+ &crossbar_bit_slice_cb_b);
+
+ hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
+ crossbar_bit_slice_cr_r,
+ crossbar_bit_slice_y_g,
+ crossbar_bit_slice_cb_b);
+
+ mpc->funcs->rmcm.program_3dlut_size(mpc, width, mpcc_id);
+
+ mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
+
+ //2b. shaper reg programming
+ memset(&m_lut_params, 0, sizeof(m_lut_params));
+
+ if (mcm_luts->shaper->type == TF_TYPE_HWPWL) {
+ m_lut_params.pwl = &mcm_luts->shaper->pwl;
+ } else if (mcm_luts->shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ ASSERT(false);
+ cm_helper_translate_curve_to_hw_format(
+ dc->ctx,
+ mcm_luts->shaper,
+ &dpp_base->regamma_params, true);
+ m_lut_params.pwl = &dpp_base->regamma_params;
+ }
+ if (m_lut_params.pwl) {
+ mpc->funcs->rmcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
+ mpc->funcs->rmcm.program_lut_mode(mpc, !bypass_rmcm_shaper, lut_bank_a, mpcc_id);
+ } else {
+ //RMCM 3dlut won't work without its shaper
+ return false;
+ }
+
+ //3. Select the hubp connected to this RMCM
+ hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
+ mpc->funcs->rmcm.enable_3dlut_fl(mpc, true, mpcc_id);
+
+ //4. power on the block
+ if (m_lut_params.pwl)
+ mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, true);
+
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
void dcn401_populate_mcm_luts(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_cm2_func_luts mcm_luts,
@@ -407,21 +650,39 @@ void dcn401_populate_mcm_luts(struct dc *dc,
struct mpc *mpc = dc->res_pool->mpc;
union mcm_lut_params m_lut_params;
enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src;
- enum hubp_3dlut_fl_format format;
+ enum hubp_3dlut_fl_format format = 0;
enum hubp_3dlut_fl_mode mode;
- enum hubp_3dlut_fl_width width;
+ enum hubp_3dlut_fl_width width = 0;
enum hubp_3dlut_fl_addressing_mode addr_mode;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
- bool is_17x17x17 = true;
bool rval;
dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
+ //MCM - setting its location (Before/After) blender
+ //set to post blend (true)
+ dcn401_set_mcm_location_post_blend(
+ dc,
+ pipe_ctx,
+ mcm_luts.lut3d_data.mpc_mcm_post_blend);
+
+ //RMCM - 3dLUT+Shaper
+ if (mcm_luts.lut3d_data.rmcm_3dlut_enable) {
+ dcn401_program_rmcm_luts(
+ hubp,
+ pipe_ctx,
+ lut3d_src,
+ &mcm_luts,
+ mpc,
+ lut_bank_a,
+ mpcc_id);
+ }
+
/* 1D LUT */
if (mcm_luts.lut1d_func) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
@@ -442,7 +703,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
}
/* Shaper */
- if (mcm_luts.shaper) {
+ if (mcm_luts.shaper && mcm_luts.lut3d_data.mpc_3dlut_enable) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
if (mcm_luts.shaper->type == TF_TYPE_HWPWL)
m_lut_params.pwl = &mcm_luts.shaper->pwl;
@@ -454,11 +715,11 @@ void dcn401_populate_mcm_luts(struct dc *dc,
m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
}
if (m_lut_params.pwl) {
- if (mpc->funcs->populate_lut)
- mpc->funcs->populate_lut(mpc, MCM_LUT_SHAPER, m_lut_params, lut_bank_a, mpcc_id);
+ if (mpc->funcs->mcm.populate_lut)
+ mpc->funcs->mcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
+ if (mpc->funcs->program_lut_mode)
+ mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_ENABLE, lut_bank_a, mpcc_id);
}
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, shaper_xable, lut_bank_a, mpcc_id);
}
/* 3DLUT */
@@ -467,6 +728,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
memset(&m_lut_params, 0, sizeof(m_lut_params));
if (hubp->funcs->hubp_enable_3dlut_fl)
hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
+
if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) {
m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d;
if (mpc->funcs->populate_lut)
@@ -476,16 +738,35 @@ void dcn401_populate_mcm_luts(struct dc *dc,
mpcc_id);
}
break;
- case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
+ case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
+ switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
+ case DC_CM2_GPU_MEM_SIZE_333333:
+ width = hubp_3dlut_fl_width_33;
+ break;
+ case DC_CM2_GPU_MEM_SIZE_171717:
+ width = hubp_3dlut_fl_width_17;
+ break;
+ case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
+ width = hubp_3dlut_fl_width_transformed;
+ break;
+ }
+
+ //check for support
+ if (mpc->funcs->mcm.is_config_supported &&
+ !mpc->funcs->mcm.is_config_supported(width))
+ break;
if (mpc->funcs->program_lut_read_write_control)
mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id);
if (mpc->funcs->program_lut_mode)
mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id);
- if (mpc->funcs->program_3dlut_size)
- mpc->funcs->program_3dlut_size(mpc, is_17x17x17, mpcc_id);
+
if (hubp->funcs->hubp_program_3dlut_fl_addr)
hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr);
+
+ if (mpc->funcs->mcm.program_bit_depth)
+ mpc->funcs->mcm.program_bit_depth(mpc, mcm_luts.lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
+
switch (mcm_luts.lut3d_data.gpu_mem_params.layout) {
case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
mode = hubp_3dlut_fl_mode_native_1;
@@ -512,7 +793,6 @@ void dcn401_populate_mcm_luts(struct dc *dc,
switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) {
case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
- default:
format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
break;
case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
@@ -524,37 +804,37 @@ void dcn401_populate_mcm_luts(struct dc *dc,
}
if (hubp->funcs->hubp_program_3dlut_fl_format)
hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
- if (hubp->funcs->hubp_update_3dlut_fl_bias_scale)
+ if (hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
+ mpc->funcs->mcm.program_bias_scale) {
+ mpc->funcs->mcm.program_bias_scale(mpc,
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale,
+ mpcc_id);
hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
-
- switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
- case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
- default:
- crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- break;
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
+ mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
}
+ //navi 4x has a bug and r and blue are swapped and need to be worked around here in
+ //TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x
+ dc_get_lut_xbar(
+ mcm_luts.lut3d_data.gpu_mem_params.component_order,
+ &crossbar_bit_slice_cr_r,
+ &crossbar_bit_slice_y_g,
+ &crossbar_bit_slice_cb_b);
+
if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
+ crossbar_bit_slice_cr_r,
crossbar_bit_slice_y_g,
- crossbar_bit_slice_cb_b,
- crossbar_bit_slice_cr_r);
+ crossbar_bit_slice_cb_b);
+
+ if (mpc->funcs->mcm.program_lut_read_write_control)
+ mpc->funcs->mcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, true, mpcc_id);
+
+ if (mpc->funcs->mcm.program_3dlut_size)
+ mpc->funcs->mcm.program_3dlut_size(mpc, width, mpcc_id);
- switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
- case DC_CM2_GPU_MEM_SIZE_171717:
- default:
- width = hubp_3dlut_fl_width_17;
- break;
- case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
- width = hubp_3dlut_fl_width_transformed;
- break;
- }
- if (hubp->funcs->hubp_program_3dlut_fl_width)
- hubp->funcs->hubp_program_3dlut_fl_width(hubp, width);
if (mpc->funcs->update_3dlut_fast_load_select)
mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
@@ -1980,9 +2260,9 @@ void dcn401_program_pipe(
dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
}
- if (pipe_ctx->update_flags.raw ||
- (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) ||
- pipe_ctx->stream->update_flags.raw)
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
+ pipe_ctx->plane_state->update_flags.raw ||
+ pipe_ctx->stream->update_flags.raw))
dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context);
if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
@@ -2081,7 +2361,7 @@ void dcn401_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ if (pipe->plane_state) {
if (pipe->plane_state->triplebuffer_flips)
BREAK_TO_DEBUGGER();
@@ -2371,7 +2651,7 @@ bool dcn401_update_bandwidth(
struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK)
return false;
/* apply updated bandwidth parameters */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index 781cf0efccc6..ce65b4f6c672 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -109,4 +109,12 @@ void dcn401_detect_pipe_changes(
void dcn401_plane_atomic_power_down(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
+bool dcn401_program_rmcm_luts(
+ struct hubp *hubp,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_cm2_transfer_func_source lut3d_src,
+ struct dc_cm2_func_luts *mcm_luts,
+ struct mpc *mpc,
+ bool lut_bank_a,
+ int mpcc_id);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index c8b5ed834579..3a0795045bc6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -195,6 +195,8 @@ enum block_sequence_func {
DMUB_SUBVP_SAVE_SURF_ADDR,
HUBP_WAIT_FOR_DCC_META_PROP,
DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST,
+ /* This must be the last value in this enum, add new ones above */
+ HWSS_BLOCK_SEQUENCE_FUNC_COUNT
};
struct block_sequence {
@@ -202,6 +204,8 @@ struct block_sequence {
enum block_sequence_func func;
};
+#define MAX_HWSS_BLOCK_SEQUENCE_SIZE (HWSS_BLOCK_SEQUENCE_FUNC_COUNT * MAX_PIPES)
+
struct hw_sequencer_funcs {
void (*hardware_release)(struct dc *dc);
/* Embedded Display Related */
@@ -534,13 +538,13 @@ void set_drr_and_clear_adjust_pending(
struct drr_params *params);
void hwss_execute_sequence(struct dc *dc,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
int num_steps);
void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
- struct block_sequence block_sequence[],
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE],
unsigned int *num_steps,
struct pipe_ctx *pipe_ctx,
struct dc_stream_status *stream_status,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 22a5d4a03c98..1e2d247fbbac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -49,6 +49,7 @@ struct hwseq_wa_state {
bool DEGVIDCN10_253_applied;
bool disallow_self_refresh_during_multi_plane_transition_applied;
unsigned int disallow_self_refresh_during_multi_plane_transition_applied_on_frame;
+ bool skip_blank_stream;
};
struct pipe_ctx;
@@ -183,6 +184,8 @@ struct hwseq_private_funcs {
struct dc_cm2_func_luts mcm_luts,
bool lut_bank_a);
void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx);
+ void (*wait_for_pipe_update_if_needed)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only);
+ void (*set_wait_for_update_needed_for_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index b5afd8c3103d..f3696143590c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -26,6 +26,8 @@
#ifndef _CORE_STATUS_H_
#define _CORE_STATUS_H_
+#include "dc_hw_types.h"
+
enum dc_status {
DC_OK = 1,
@@ -56,6 +58,7 @@ enum dc_status {
DC_NO_LINK_ENC_RESOURCE = 26,
DC_FAIL_DP_PAYLOAD_ALLOCATION = 27,
DC_FAIL_DP_LINK_BANDWIDTH = 28,
+ DC_FAIL_HW_CURSOR_SUPPORT = 29,
DC_ERROR_UNEXPECTED = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index d0021f25f3d8..0cf349cafb3e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -65,6 +65,7 @@ struct resource_pool;
struct dc_state;
struct resource_context;
struct clk_bw_params;
+struct dc_mcache_params;
struct resource_funcs {
enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index);
@@ -78,8 +79,7 @@ struct resource_funcs {
/* Create a minimal link encoder object with no dc_link object
* associated with it. */
struct link_encoder *(*link_enc_create_minimal)(struct dc_context *ctx, enum engine_id eng_id);
-
- bool (*validate_bandwidth)(
+ enum dc_status (*validate_bandwidth)(
struct dc *dc,
struct dc_state *context,
bool fast_validate);
@@ -218,6 +218,11 @@ struct resource_funcs {
int (*get_power_profile)(const struct dc_state *context);
unsigned int (*get_det_buffer_size)(const struct dc_state *context);
unsigned int (*get_vstartup_for_pipe)(struct pipe_ctx *pipe_ctx);
+ unsigned int (*get_max_hw_cursor_size)(const struct dc *dc,
+ struct dc_state *state,
+ const struct dc_stream_state *stream);
+ bool (*program_mcache_pipe_config)(struct dc_state *context,
+ const struct dc_mcache_params *mcache_params);
};
struct audio_support{
@@ -382,7 +387,9 @@ struct link_resource {
struct link_config {
struct dc_link_settings dp_link_settings;
+ struct dc_tunnel_settings dp_tunnel_settings;
};
+
union pipe_update_flags {
struct {
uint32_t enable : 1;
@@ -480,6 +487,10 @@ struct pipe_ctx {
struct pixel_rate_divider pixel_rate_divider;
/* pixels borrowed from hblank to hactive */
uint8_t hblank_borrow;
+ /* next vupdate */
+ uint32_t next_vupdate;
+ uint32_t wait_frame_count;
+ bool wait_is_required;
};
/* Data used for dynamic link encoder assignment.
@@ -507,7 +518,7 @@ struct resource_context {
unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
bool is_mpc_3dlut_acquired[MAX_PIPES];
- /* solely used for build scalar data in dml2 */
+ /* used to build scalar data in dml2 and for edp backlight programming */
struct pipe_ctx temp_pipe;
};
@@ -630,7 +641,7 @@ struct dc_state {
*/
struct bw_context bw_ctx;
- struct block_sequence block_sequence[100];
+ struct block_sequence block_sequence[MAX_HWSS_BLOCK_SEQUENCE_SIZE];
unsigned int block_sequence_steps;
struct dc_dmub_cmd dc_dmub_cmd[10];
unsigned int dmub_cmd_count;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 221645c023b5..bac8febad69a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -199,6 +199,7 @@ enum dentist_divider_range {
CLK_SR_DCN35(CLK1_CLK4_ALLOW_DS), \
CLK_SR_DCN35(CLK1_CLK5_ALLOW_DS), \
CLK_SR_DCN35(CLK5_spll_field_8), \
+ CLK_SR_DCN35(CLK6_spll_field_8), \
SR(DENTIST_DISPCLK_CNTL), \
#define CLK_COMMON_MASK_SH_LIST_DCN32(mask_sh) \
@@ -307,7 +308,7 @@ struct clk_mgr_registers {
uint32_t CLK1_CLK4_ALLOW_DS;
uint32_t CLK1_CLK5_ALLOW_DS;
uint32_t CLK5_spll_field_8;
-
+ uint32_t CLK6_spll_field_8;
};
struct clk_mgr_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 3a89cc0cffc1..6e303b81bfb0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -967,23 +967,6 @@ struct mpc_funcs {
*/
void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
- /**
- * @get_3dlut_fast_load_status:
- *
- * Get 3D LUT fast load status and reference them with done, soft_underflow and hard_underflow pointers.
- *
- * Parameters:
- * - [in/out] mpc - MPC context.
- * - [in] mpcc_id
- * - [in/out] done
- * - [in/out] soft_underflow
- * - [in/out] hard_underflow
- *
- * Return:
- *
- * void
- */
- void (*get_3dlut_fast_load_status)(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow);
/**
* @populate_lut:
@@ -1054,6 +1037,35 @@ struct mpc_funcs {
* void
*/
void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id);
+
+ struct {
+ void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
+ void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
+ void (*program_bit_depth)(struct mpc *mpc, uint16_t bit_depth, int mpcc_id);
+ bool (*is_config_supported)(uint32_t width);
+ void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
+ bool lut_bank_a, bool enabled, int mpcc_id);
+
+ void (*populate_lut)(struct mpc *mpc, const union mcm_lut_params params,
+ bool lut_bank_a, int mpcc_id);
+ } mcm;
+
+ struct {
+ void (*enable_3dlut_fl)(struct mpc *mpc, bool enable, int mpcc_id);
+ void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
+ void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
+ bool lut_bank_a, bool enabled, int mpcc_id);
+ void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_XABLE xable,
+ bool lut_bank_a, int mpcc_id);
+ void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
+ void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
+ void (*program_bit_depth)(struct mpc *mpc, uint16_t bit_depth, int mpcc_id);
+ bool (*is_config_supported)(uint32_t width);
+
+ void (*power_on_shaper_3dlut)(struct mpc *mpc, uint32_t mpcc_id, bool power_on);
+ void (*populate_lut)(struct mpc *mpc, const union mcm_lut_params params,
+ bool lut_bank_a, int mpcc_id);
+ } rmcm;
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
index 7f371cbb35cd..0d5a8358a778 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
@@ -68,6 +68,7 @@ struct optc {
int pstate_keepout;
struct dc_crtc_timing orginal_patched_timing;
enum signal_type signal;
+ uint32_t max_frame_count;
};
void optc1_read_otg_state(struct timing_generator *optc, struct dcn_otg_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index 2948a696ee12..7d16351bba99 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -207,6 +207,9 @@ struct link_service {
bool (*dp_decide_link_settings)(
struct dc_stream_state *stream,
struct dc_link_settings *link_setting);
+ void (*dp_decide_tunnel_settings)(
+ struct dc_stream_state *stream,
+ struct dc_tunnel_settings *dp_tunnel_setting);
enum dp_link_encoding (*mst_decide_link_encoding_format)(
const struct dc_link *link);
bool (*edp_decide_link_settings)(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index a402df225a76..26cb1459b743 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -508,6 +508,10 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
initial_val, \
n, __VA_ARGS__)
+#define IX_REG_SET_SYNC(index, init_value, f1, v1) \
+ IX_REG_SET_N_SYNC(index, 1, init_value, \
+ FN(reg, f1), v1)
+
#define IX_REG_SET_2_SYNC(index, init_value, f1, v1, f2, v2) \
IX_REG_SET_N_SYNC(index, 2, init_value, \
FN(reg, f1), v1,\
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 9458187b834d..a890f581f4e8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -32,6 +32,7 @@
#define MEMORY_TYPE_MULTIPLIER_CZ 4
#define MEMORY_TYPE_HBM 2
+#define MAX_MCACHES 8
#define IS_PIPE_SYNCD_VALID(pipe) ((((pipe)->pipe_idx_syncd) & 0x80)?1:0)
@@ -65,6 +66,13 @@ struct resource_straps {
uint32_t audio_stream_number;
};
+struct dc_mcache_allocations {
+ int global_mcache_ids_plane0[MAX_MCACHES + 1];
+ int global_mcache_ids_plane1[MAX_MCACHES + 1];
+ int global_mcache_ids_mall_plane0[MAX_MCACHES + 1];
+ int global_mcache_ids_mall_plane1[MAX_MCACHES + 1];
+};
+
struct resource_create_funcs {
void (*read_dce_straps)(
struct dc_context *ctx, struct resource_straps *straps);
@@ -628,8 +636,6 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
struct dc_state *context,
struct pipe_ctx *pipe_ctx);
-bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream);
-
/* Get hw programming parameters container from pipe context
* @pipe_ctx: pipe context
* @dscl_prog_data: struct to hold programmable hw reg values
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
index 953f4a4dacad..33ce470e4c88 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -37,36 +37,9 @@
#include "ivsrcid/ivsrcid_vislands30.h"
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
index 2c72074310c7..d777b85e70da 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
@@ -46,36 +46,9 @@
#include "dc_types.h"
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- DC_HPD1_INT_STATUS,
- DC_HPD1_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- DC_HPD1_INT_CONTROL,
- DC_HPD1_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd1_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -391,5 +364,3 @@ struct irq_service *dal_irq_service_dce60_create(
dce60_irq_construct(irq_service, init_data);
return irq_service;
}
-
-
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
index 49317934ef4f..3a9163acb49b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -37,36 +37,9 @@
#include "dc_types.h"
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- DC_HPD1_INT_STATUS,
- DC_HPD1_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- DC_HPD1_INT_CONTROL,
- DC_HPD1_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd1_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -303,5 +276,3 @@ struct irq_service *dal_irq_service_dce80_create(
dce80_irq_construct(irq_service, init_data);
return irq_service;
}
-
-
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index 9ca28565a9d1..4ce9edd16344 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -129,36 +129,9 @@ static enum dc_irq_source to_dal_irq_source_dcn10(struct irq_service *irq_servic
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index 916f0c974637..5847af0e66cb 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -130,36 +130,9 @@ static enum dc_irq_source to_dal_irq_source_dcn20(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
index 1d61d475d36f..6417011d2246 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
@@ -80,36 +80,9 @@ static enum dc_irq_source to_dal_irq_source_dcn201(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index 42cdfe6c3538..71d2f065140b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -132,36 +132,9 @@ static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_servic
return DC_IRQ_SOURCE_INVALID;
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
index a443a8abb1ea..2a4080bdcf6b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
@@ -139,36 +139,9 @@ static enum dc_irq_source to_dal_irq_source_dcn30(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -447,4 +420,3 @@ struct irq_service *dal_irq_service_dcn30_create(
dcn30_irq_construct(irq_service, init_data);
return irq_service;
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
index 8ffc7e2c681a..624f1ac309f8 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
@@ -126,26 +126,9 @@ static enum dc_irq_source to_dal_irq_source_dcn302(struct irq_service *irq_servi
}
}
-static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
index 262bb8b74b15..137caffae916 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
@@ -77,26 +77,9 @@ static enum dc_irq_source to_dal_irq_source_dcn303(struct irq_service *irq_servi
}
}
-static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
index 53e78ae7eecf..921cb167d920 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
@@ -128,36 +128,9 @@ static enum dc_irq_source to_dal_irq_source_dcn31(struct irq_service *irq_servic
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
index e0563e880432..0118fd6e5db0 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
@@ -130,36 +130,9 @@ static enum dc_irq_source to_dal_irq_source_dcn314(struct irq_service *irq_servi
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
index 2ef22299101a..adebfc888618 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
@@ -135,36 +135,9 @@ static enum dc_irq_source to_dal_irq_source_dcn315(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c b/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
index f0ac0aeeac51..e9e315c75d76 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
@@ -129,36 +129,9 @@ static enum dc_irq_source to_dal_irq_source_dcn32(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -191,6 +164,16 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.ack = NULL
};
+static struct irq_source_info_funcs vline1_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vline2_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
@@ -259,6 +242,13 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
@@ -270,14 +260,6 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &vupdate_no_lock_irq_info_funcs\
}
-#define vblank_int_entry(reg_num)\
- [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
- IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
-}
-
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
@@ -285,6 +267,20 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
+#define vline1_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_CLEAR),\
+ .funcs = &vline1_irq_info_funcs\
+ }
+#define vline2_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE2 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_CLEAR),\
+ .funcs = &vline2_irq_info_funcs\
+ }
#define dmub_outbox_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\
IRQ_REG_ENTRY_DMUB(\
@@ -387,21 +383,29 @@ irq_source_info_dcn32[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_no_lock_int_entry(0),
- vupdate_no_lock_int_entry(1),
- vupdate_no_lock_int_entry(2),
- vupdate_no_lock_int_entry(3),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
+ [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
+ dmub_outbox_int_entry(),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
- [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
- [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
- dmub_outbox_int_entry(),
+ vline1_int_entry(0),
+ vline1_int_entry(1),
+ vline1_int_entry(2),
+ vline1_int_entry(3),
+ vline2_int_entry(0),
+ vline2_int_entry(1),
+ vline2_int_entry(2),
+ vline2_int_entry(3)
};
static const struct irq_service_funcs irq_service_funcs_dcn32 = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c b/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
index ea8c271171bc..79e5e8c137ca 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
@@ -127,36 +127,9 @@ static enum dc_irq_source to_dal_irq_source_dcn35(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
index 7ec8e0de2f01..163b8ee9ebf7 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
@@ -106,36 +106,9 @@ static enum dc_irq_source to_dal_irq_source_dcn351(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
index ea958628f8b8..f716ab0fd30e 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
@@ -105,36 +105,9 @@ static enum dc_irq_source to_dal_irq_source_dcn36(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c b/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
index b43c9524b0de..fd9bb1950c20 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
@@ -109,36 +109,9 @@ static enum dc_irq_source to_dal_irq_source_dcn401(
}
}
-static bool hpd_ack(
- struct irq_service *irq_service,
- const struct irq_source_info *info)
-{
- uint32_t addr = info->status_reg;
- uint32_t value = dm_read_reg(irq_service->ctx, addr);
- uint32_t current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE_DELAYED);
-
- dal_irq_service_ack_generic(irq_service, info);
-
- value = dm_read_reg(irq_service->ctx, info->enable_reg);
-
- set_reg_field_value(
- value,
- current_status ? 0 : 1,
- HPD0_DC_HPD_INT_CONTROL,
- DC_HPD_INT_POLARITY);
-
- dm_write_reg(irq_service->ctx, info->enable_reg, value);
-
- return true;
-}
-
static struct irq_source_info_funcs hpd_irq_info_funcs = {
.set = NULL,
- .ack = hpd_ack
+ .ack = hpd0_ack
};
static struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
@@ -171,6 +144,16 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.ack = NULL
};
+static struct irq_source_info_funcs vline1_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static struct irq_source_info_funcs vline2_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#undef BASE_INNER
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
@@ -239,6 +222,13 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
@@ -250,13 +240,6 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &vupdate_no_lock_irq_info_funcs\
}
-#define vblank_int_entry(reg_num)\
- [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
- IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
- }
#define vline0_int_entry(reg_num)\
[DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
@@ -264,6 +247,20 @@ static struct irq_source_info_funcs vline0_irq_info_funcs = {
OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
.funcs = &vline0_irq_info_funcs\
}
+#define vline1_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_CLEAR),\
+ .funcs = &vline1_irq_info_funcs\
+ }
+#define vline2_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE2 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_CLEAR),\
+ .funcs = &vline2_irq_info_funcs\
+ }
#define dmub_outbox_int_entry()\
[DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\
IRQ_REG_ENTRY_DMUB(\
@@ -364,21 +361,29 @@ irq_source_info_dcn401[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_no_lock_int_entry(0),
- vupdate_no_lock_int_entry(1),
- vupdate_no_lock_int_entry(2),
- vupdate_no_lock_int_entry(3),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
vblank_int_entry(3),
+ [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
+ dmub_outbox_int_entry(),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
vline0_int_entry(0),
vline0_int_entry(1),
vline0_int_entry(2),
vline0_int_entry(3),
- [DC_IRQ_SOURCE_DC5_VLINE1] = dummy_irq_entry(),
- [DC_IRQ_SOURCE_DC6_VLINE1] = dummy_irq_entry(),
- dmub_outbox_int_entry(),
+ vline1_int_entry(0),
+ vline1_int_entry(1),
+ vline1_int_entry(2),
+ vline1_int_entry(3),
+ vline2_int_entry(0),
+ vline2_int_entry(1),
+ vline2_int_entry(2),
+ vline2_int_entry(3),
};
static const struct irq_service_funcs irq_service_funcs_dcn401 = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index eca3d7ee7e4e..b595a11c5eaf 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -41,6 +41,16 @@
#include "reg_helper.h"
#include "irq_service.h"
+//HPD0_DC_HPD_INT_STATUS
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+//HPD1_DC_HPD_INT_STATUS
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED_MASK 0x10
+#define DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED__SHIFT 0x4
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK 0x100
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY__SHIFT 0x8
#define CTX \
@@ -177,3 +187,57 @@ enum dc_irq_source dal_irq_service_to_irq_source(
src_id,
ext_id);
}
+
+bool hpd0_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+bool hpd1_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ DC_HPD1_INT_STATUS,
+ DC_HPD1_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ DC_HPD1_INT_CONTROL,
+ DC_HPD1_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
index b178f85944cd..bbcef3d2fe33 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
@@ -82,4 +82,12 @@ void dal_irq_service_set_generic(
const struct irq_source_info *info,
bool enable);
+bool hpd0_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+
+bool hpd1_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index 110f656d43ae..a2f7b933bebf 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -161,6 +161,20 @@ enum dc_irq_source {
DC_IRQ_SOURCE_DPCX_TX_PHYE,
DC_IRQ_SOURCE_DPCX_TX_PHYF,
+ DC_IRQ_SOURCE_DC1_VLINE2,
+ DC_IRQ_SOURCE_DC2_VLINE2,
+ DC_IRQ_SOURCE_DC3_VLINE2,
+ DC_IRQ_SOURCE_DC4_VLINE2,
+ DC_IRQ_SOURCE_DC5_VLINE2,
+ DC_IRQ_SOURCE_DC6_VLINE2,
+
+ DC_IRQ_SOURCE_DCI2C_RR_DDC1,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC2,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC3,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC4,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC5,
+ DC_IRQ_SOURCE_DCI2C_RR_DDC6,
+
DAL_IRQ_SOURCES_NUMBER
};
@@ -170,6 +184,8 @@ enum irq_type
IRQ_TYPE_VUPDATE = DC_IRQ_SOURCE_VUPDATE1,
IRQ_TYPE_VBLANK = DC_IRQ_SOURCE_VBLANK1,
IRQ_TYPE_VLINE0 = DC_IRQ_SOURCE_DC1_VLINE0,
+ IRQ_TYPE_VLINE1 = DC_IRQ_SOURCE_DC1_VLINE1,
+ IRQ_TYPE_VLINE2 = DC_IRQ_SOURCE_DC1_VLINE2,
IRQ_TYPE_DCUNDERFLOW = DC_IRQ_SOURCE_DC1UNDERFLOW,
};
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index cc9191a5c9e6..9655e6fa53a4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -611,6 +611,7 @@ static bool detect_dp(struct dc_link *link,
link->dpcd_caps.dongle_type = sink_caps->dongle_type;
link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one;
link->dpcd_caps.dpcd_rev.raw = 0;
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = 0;
}
return true;
@@ -1007,21 +1008,11 @@ static bool detect_link_and_local_sink(struct dc_link *link,
link->reported_link_cap.link_rate > LINK_RATE_HIGH3)
link->reported_link_cap.link_rate = LINK_RATE_HIGH3;
- /*
- * If this is DP over USB4 link then we need to:
- * - Enable BW ALLOC support on DPtx if applicable
- */
- if (dc->config.usb4_bw_alloc_support) {
- if (link_dp_dpia_set_dptx_usb4_bw_alloc_support(link)) {
- /* update with non reduced link cap if bw allocation mode is supported */
- if (link->dpia_bw_alloc_config.nrd_max_link_rate &&
- link->dpia_bw_alloc_config.nrd_max_lane_count) {
- link->reported_link_cap.link_rate =
- link->dpia_bw_alloc_config.nrd_max_link_rate;
- link->reported_link_cap.lane_count =
- link->dpia_bw_alloc_config.nrd_max_lane_count;
- }
- }
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) {
+ if (link_dpia_enable_usb4_dp_bw_alloc_mode(link) == false)
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc = false;
}
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 268626e73c54..273a3be6d593 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -148,6 +148,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
{
struct pipe_ctx *pipes[MAX_PIPES];
+ struct dc_stream_state *streams[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
uint8_t count;
int i;
@@ -160,10 +161,18 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
link_get_master_pipes_with_dpms_on(link, state, &count, pipes);
+ /* The subsequent call to dc_commit_updates_for_stream for a full update
+ * will release the current state and swap to a new state. Releasing the
+ * current state results in the stream pointers in the pipe_ctx structs
+ * to be zero'd. Hence, cache all streams prior to dc_commit_updates_for_stream.
+ */
+ for (i = 0; i < count; i++)
+ streams[i] = pipes[i]->stream;
+
for (i = 0; i < count; i++) {
- stream_update.stream = pipes[i]->stream;
+ stream_update.stream = streams[i];
dc_commit_updates_for_stream(link->ctx->dc, NULL, 0,
- pipes[i]->stream, &stream_update,
+ streams[i], &stream_update,
state);
}
@@ -2365,7 +2374,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
update_psp_stream_config(pipe_ctx, true);
dc->hwss.blank_stream(pipe_ctx);
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->link_config.dp_tunnel_settings.should_use_dp_bw_allocation)
deallocate_usb4_bandwidth(pipe_ctx->stream);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
@@ -2433,7 +2442,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
if (link->connector_signal == SIGNAL_TYPE_EDP && dc->debug.psp_disabled_wa) {
/* reset internal save state to default since eDP is off */
enum dp_panel_mode panel_mode = dp_get_panel_mode(pipe_ctx->stream->link);
- /* since current psp not loaded, we need to reset it to default*/
+ /* since current psp not loaded, we need to reset it to default */
link->panel_mode = panel_mode;
}
}
@@ -2611,7 +2620,7 @@ void link_set_dpms_on(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_set_hblank_reduction_on_rx(pipe_ctx);
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->link_config.dp_tunnel_settings.should_use_dp_bw_allocation)
allocate_usb4_bandwidth(pipe_ctx->stream);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index f6b6b19e7481..1a04f4b74585 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -156,6 +156,7 @@ static void construct_link_service_dp_capability(struct link_service *link_srv)
link_srv->dp_get_encoding_format = link_dp_get_encoding_format;
link_srv->dp_should_enable_fec = dp_should_enable_fec;
link_srv->dp_decide_link_settings = link_decide_link_settings;
+ link_srv->dp_decide_tunnel_settings = link_decide_dp_tunnel_settings;
link_srv->mst_decide_link_encoding_format =
mst_decide_link_encoding_format;
link_srv->edp_decide_link_settings = edp_decide_link_settings;
@@ -464,6 +465,7 @@ static bool construct_phy(struct dc_link *link,
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_read_request = DC_IRQ_SOURCE_INVALID;
link->link_status.dpcd_caps = &link->dpcd_caps;
link->dc = init_params->dc;
@@ -514,6 +516,9 @@ static bool construct_phy(struct dc_link *link,
case CONNECTOR_ID_HDMI_TYPE_A:
link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ if (link->hpd_gpio)
+ link->irq_source_read_request =
+ dal_irq_get_read_request(link->hpd_gpio);
break;
case CONNECTOR_ID_SINGLE_LINK_DVID:
case CONNECTOR_ID_SINGLE_LINK_DVII:
@@ -653,7 +658,7 @@ static bool construct_phy(struct dc_link *link,
}
/* Look for device tag that matches connector signal,
- * CRT for rgb, LCD for other supported signal tyes
+ * CRT for rgb, LCD for other supported signal types
*/
if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios,
link->device_tag.dev_id))
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 21ee0d96c9d4..0f965380a9b4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -158,6 +158,14 @@ uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count)
return 0; // invalid value
}
+uint32_t dp_get_closest_lttpr_offset(uint8_t lttpr_count)
+{
+ /* Calculate offset for LTTPR closest to DPTX which is highest in the chain
+ * Offset is 0 for single LTTPR cases as base LTTPR DPCD addresses target LTTPR 1
+ */
+ return DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE * (lttpr_count - 1);
+}
+
uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
{
switch (bw) {
@@ -377,9 +385,15 @@ bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx)
bool dp_is_lttpr_present(struct dc_link *link)
{
/* Some sink devices report invalid LTTPR revision, so don't validate against that cap */
- return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
+ uint32_t lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ bool is_lttpr_present = (lttpr_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count <= 4);
+
+ if (lttpr_count > 0 && !is_lttpr_present)
+ DC_LOG_ERROR("LTTPR count is nonzero but invalid lane count reported. Assuming no LTTPR present.\n");
+
+ return is_lttpr_present;
}
/* in DP compliance test, DPR-120 may have
@@ -1543,6 +1557,8 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
uint8_t lttpr_dpcd_data[10] = {0};
enum dc_status status;
bool is_lttpr_present;
+ uint32_t lttpr_count;
+ uint32_t closest_lttpr_offset;
/* Logic to determine LTTPR support*/
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
@@ -1594,20 +1610,22 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
lttpr_dpcd_data[DP_LTTPR_ALPM_CAPABILITIES -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
/* If this chip cap is set, at least one retimer must exist in the chain
* Override count to 1 if we receive a known bad count (0 or an invalid value) */
if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
+ lttpr_count == 0) {
/* If you see this message consistently, either the host platform has FIXED_VS flag
* incorrectly configured or the sink device is returning an invalid count.
*/
DC_LOG_ERROR("lttpr_caps phy_repeater_cnt is 0x%x, forcing it to 0x80.",
link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
+ lttpr_count = 1;
DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
}
- /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
is_lttpr_present = dp_is_lttpr_present(link);
DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
@@ -1615,11 +1633,25 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
if (is_lttpr_present) {
CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
- core_link_read_dpcd(link, DP_LTTPR_IEEE_OUI, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui));
- CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui), "LTTPR IEEE OUI: ");
+ // Identify closest LTTPR to determine if workarounds required for known embedded LTTPR
+ closest_lttpr_offset = dp_get_closest_lttpr_offset(lttpr_count);
+
+ core_link_read_dpcd(link, (DP_LTTPR_IEEE_OUI + closest_lttpr_offset),
+ link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui));
+ core_link_read_dpcd(link, (DP_LTTPR_DEVICE_ID + closest_lttpr_offset),
+ link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id));
- core_link_read_dpcd(link, DP_LTTPR_DEVICE_ID, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id));
- CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id), "LTTPR Device ID: ");
+ if (lttpr_count > 1) {
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui),
+ "Closest LTTPR To Host's IEEE OUI: ");
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id),
+ "Closest LTTPR To Host's LTTPR Device ID: ");
+ } else {
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui),
+ "LTTPR IEEE OUI: ");
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id),
+ "LTTPR Device ID: ");
+ }
}
return status;
@@ -2013,11 +2045,9 @@ static bool retrieve_link_cap(struct dc_link *link)
sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw));
/* Read DP tunneling information. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- status = dpcd_get_tunneling_device_data(link);
- if (status != DC_OK)
- dm_error("%s: Read DP tunneling device data failed.\n", __func__);
- }
+ status = dpcd_get_tunneling_device_data(link);
+ if (status != DC_OK)
+ DC_LOG_DP2("%s: Read DP tunneling device data failed.\n", __func__);
retrieve_cable_id(link);
dpcd_write_cable_id_to_dprx(link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
index 0ce0af3ddbeb..940b147cc5d4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
@@ -48,6 +48,9 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link);
/* Convert PHY repeater count read from DPCD uint8_t. */
uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count);
+/* Calculate embedded LTTPR address offset for vendor-specific behaviour */
+uint32_t dp_get_closest_lttpr_offset(uint8_t lttpr_count);
+
bool dp_is_sink_present(struct dc_link *link);
bool dp_is_lttpr_present(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 0d123e647652..22bfdced64ab 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -62,6 +62,36 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
if (status != DC_OK)
goto err;
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
+ dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT];
+
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling == false)
+ goto err;
+
+ link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
+ dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
+ link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
+ dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
+
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc) {
+ status = core_link_read_dpcd(link, USB4_DRIVER_BW_CAPABILITY,
+ dpcd_dp_tun_data, 1);
+
+ if (status != DC_OK)
+ goto err;
+
+ link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw = dpcd_dp_tun_data[0];
+ }
+
+ DC_LOG_DEBUG("%s: Link[%d] DP tunneling support (RouterId=%d AdapterId=%d) "
+ "DPIA_BW_Alloc_support=%d "
+ "CM_BW_Alloc_support=%d ",
+ __func__, link->link_index,
+ link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id,
+ link->dpcd_caps.usb4_dp_tun_info.dpia_info.bits.dpia_num,
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc,
+ link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
+
status = core_link_read_dpcd(
link,
DP_USB4_ROUTER_TOPOLOGY_ID,
@@ -71,13 +101,6 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
if (status != DC_OK)
goto err;
- link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
- dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT];
- link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
- dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
- link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
- dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
-
for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++)
link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i];
@@ -92,6 +115,7 @@ bool dpia_query_hpd_status(struct dc_link *link)
/* prepare QUERY_HPD command */
cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
+ cmd.query_hpd.header.payload_bytes = sizeof(cmd.query_hpd.data);
cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
@@ -119,3 +143,20 @@ bool dpia_query_hpd_status(struct dc_link *link)
return link->hpd_status;
}
+void link_decide_dp_tunnel_settings(struct dc_stream_state *stream,
+ struct dc_tunnel_settings *dp_tunnel_setting)
+{
+ struct dc_link *link = stream->link;
+
+ memset(dp_tunnel_setting, 0, sizeof(*dp_tunnel_setting));
+
+ if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT) || (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) {
+ dp_tunnel_setting->should_enable_dp_tunneling =
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling;
+
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support)
+ dp_tunnel_setting->should_use_dp_bw_allocation = true;
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
index 363f45a1a964..a61edfc9ca7a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
@@ -38,4 +38,10 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
* Returns true if HPD high.
*/
bool dpia_query_hpd_status(struct dc_link *link);
+
+/* Decide the DP tunneling settings based on the DPCD capabilities
+ */
+void link_decide_dp_tunnel_settings(struct dc_stream_state *stream,
+ struct dc_tunnel_settings *dp_tunnel_setting);
+
#endif /* __DC_LINK_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index a254ead2f7e8..3af7564a84f1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -46,9 +46,10 @@
*/
static bool link_dp_is_bw_alloc_available(struct dc_link *link)
{
- return (link && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA
- && link->hpd_status
- && link->dpia_bw_alloc_config.bw_alloc_enabled);
+ return (link && link->hpd_status
+ && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
+ && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
}
static void reset_bw_alloc_struct(struct dc_link *link)
@@ -141,7 +142,7 @@ static int get_non_reduced_max_lane_count(struct dc_link *link)
* granuality, Driver_ID, CM_Group, & populate the BW allocation structs
* for host router and dpia
*/
-static void init_usb4_bw_struct(struct dc_link *link)
+static void retrieve_usb4_dp_bw_allocation_info(struct dc_link *link)
{
reset_bw_alloc_struct(link);
@@ -282,49 +283,26 @@ static void link_dpia_send_bw_alloc_request(struct dc_link *link, int req_bw)
// ------------------------------------------------------------------
// PUBLIC FUNCTIONS
// ------------------------------------------------------------------
-bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
+bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
{
bool ret = false;
- uint8_t response = 0,
- bw_support_dpia = 0,
- bw_support_cm = 0;
+ uint8_t val;
- if (!(link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->hpd_status))
- goto out;
+ if (link->hpd_status) {
+ val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
- if (core_link_read_dpcd(
- link,
- DP_TUNNELING_CAPABILITIES,
- &response,
- sizeof(uint8_t)) == DC_OK)
- bw_support_dpia = (response >> 7) & 1;
-
- if (core_link_read_dpcd(
- link,
- USB4_DRIVER_BW_CAPABILITY,
- &response,
- sizeof(uint8_t)) == DC_OK)
- bw_support_cm = (response >> 7) & 1;
-
- /* Send request acknowledgment to Turn ON DPTX support */
- if (bw_support_cm && bw_support_dpia) {
-
- response = 0x80;
- if (core_link_write_dpcd(
- link,
- DPTX_BW_ALLOCATION_MODE_CONTROL,
- &response,
- sizeof(uint8_t)) != DC_OK) {
- DC_LOG_DEBUG("%s: FAILURE Enabling DPtx BW Allocation Mode Support for link(%d)\n",
- __func__, link->link_index);
- } else {
- // SUCCESS Enabled DPtx BW Allocation Mode Support
- DC_LOG_DEBUG("%s: SUCCESS Enabling DPtx BW Allocation Mode Support for link(%d)\n",
- __func__, link->link_index);
+ if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
+ DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index);
+
+ retrieve_usb4_dp_bw_allocation_info(link);
+
+ if (link->dpia_bw_alloc_config.nrd_max_link_rate && link->dpia_bw_alloc_config.nrd_max_lane_count) {
+ link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
+ link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
+ }
- ret = true;
- init_usb4_bw_struct(link);
link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+ ret = true;
/*
* During DP tunnel creation, CM preallocates BW and reduces estimated BW of other
@@ -332,11 +310,12 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
* to make the CM to release preallocation and update estimated BW correctly for
* all DPIAs per host router
*/
+ // TODO: Zero allocation can be removed once the MSFT CM fix has been released
link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
- }
+ } else
+ DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
}
-out:
return ret;
}
@@ -378,7 +357,8 @@ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status)
*/
void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw)
{
- if (link && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dpia_bw_alloc_config.bw_alloc_enabled) {
+ if (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ && link->dpia_bw_alloc_config.bw_alloc_enabled) {
//1. Hot Plug
if (link->hpd_status && peak_bw > 0) {
// If DP over USB4 then we need to check BW allocation
@@ -401,7 +381,7 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
if (link_dp_is_bw_alloc_available(link))
link_dpia_send_bw_alloc_request(link, req_bw);
else
- DC_LOG_DEBUG("%s: Not able to send the BW Allocation request", __func__);
+ DC_LOG_DEBUG("%s: BW Allocation mode not available", __func__);
}
bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index 6df9b946b00f..801965b5f9a4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -43,13 +43,13 @@ enum bw_type {
};
/*
- * Enable BW Allocation Mode Support from the DP-Tx side
+ * Enable USB4 DP BW allocation mode
*
* @link: pointer to the dc_link struct instance
*
* return: SUCCESS or FAILURE
*/
-bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link);
+bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link);
/*
* Allocates only what the stream needs for bw, so if:
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 5be00e4ce10b..693477413347 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -229,6 +229,10 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
link->replay_settings.config.replay_error_status.raw |= replay_error_status.raw;
+ /* Increment desync error counter if a desync error is detected */
+ if (replay_configuration.bits.DESYNC_ERROR_STATUS)
+ link->replay_settings.replay_desync_error_fail_count++;
+
if (link->replay_settings.config.force_disable_desync_error_check)
return;
@@ -240,9 +244,6 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
&replay_configuration.raw,
sizeof(replay_configuration.raw));
- /* Update desync error counter */
- link->replay_settings.replay_desync_error_fail_count++;
-
/* Acknowledge and clear error bits */
dm_helpers_dp_write_dpcd(
link->ctx,
@@ -351,7 +352,7 @@ enum dc_status dp_read_hpd_rx_irq_data(
irq_data->raw,
DP_SINK_STATUS - DP_SINK_COUNT + 1);
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling) {
retval = core_link_read_dpcd(
link, DP_LINK_SERVICE_IRQ_VECTOR_ESI0,
&irq_data->bytes.link_service_irq_esi0.raw, 1);
@@ -520,7 +521,7 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
dp_trace_link_loss_increment(link);
}
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling) {
if (hpd_irq_dpcd_data.bytes.link_service_irq_esi0.bits.DP_LINK_TUNNELING_IRQ)
dp_handle_tunneling_irq(link);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index ef358afdfb65..2dc1a660e504 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -785,7 +785,6 @@ void override_training_settings(
lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR;
dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
-
}
enum dc_dp_training_pattern decide_cr_training_pattern(
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
index 5a5d48fadbf2..66d0fb1b9b9d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
@@ -142,6 +142,14 @@ void decide_8b_10b_training_settings(
lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting, lt_settings->lttpr_mode);
dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+
+ /* Some embedded LTTPRs rely on receiving TPS2 before LT to interop reliably with sensitive VGA dongles
+ * This allows these LTTPRs to minimize freq/phase and skew variation during lock and deskew sequences
+ */
+ if ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) ==
+ AMD_EXT_DISPLAY_PATH_CAPS__DP_EARLY_8B10B_TPS2) {
+ lt_settings->lttpr_early_tps2 = true;
+ }
}
enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
@@ -173,6 +181,42 @@ enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
return LTTPR_MODE_NON_LTTPR;
}
+static void set_link_settings_and_perform_early_tps2_retimer_pre_lt_sequence(struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t lttpr_count)
+{
+ /* Vendor-specific LTTPR early TPS2 sequence:
+ * 1. Output TPS2
+ * 2. Wait 400us
+ * 3. Set link settings as usual
+ * 4. Write TPS1 to DP_TRAINING_PATTERN_SET_PHY_REPEATERx targeting LTTPR closest to host
+ * 5. Wait 1ms
+ * 6. Begin link training as usual
+ * */
+
+ uint32_t closest_lttpr_address_offset = dp_get_closest_lttpr_offset(lttpr_count);
+
+ union dpcd_training_pattern dpcd_pattern = {0};
+
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = 1;
+ dpcd_pattern.v1_4.SCRAMBLING_DISABLE = 1;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS2. Wait 400us.\n", __func__);
+
+ dp_set_hw_training_pattern(link, link_res, DP_TRAINING_PATTERN_SEQUENCE_2, DPRX);
+
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
+
+ udelay(400);
+
+ dpcd_set_link_settings(link, lt_settings);
+
+ core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + closest_lttpr_address_offset, &dpcd_pattern.raw, 1);
+
+ udelay(1000);
+ }
+
enum link_training_result perform_8b_10b_clock_recovery_sequence(
struct dc_link *link,
const struct link_resource *link_res,
@@ -383,7 +427,7 @@ enum link_training_result dp_perform_8b_10b_link_training(
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
- uint8_t repeater_cnt;
+ uint8_t repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
uint8_t repeater_id;
uint8_t lane = 0;
@@ -391,14 +435,16 @@ enum link_training_result dp_perform_8b_10b_link_training(
start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
/* 1. set link rate, lane count and spread. */
- dpcd_set_link_settings(link, lt_settings);
+ if (lt_settings->lttpr_early_tps2)
+ set_link_settings_and_perform_early_tps2_retimer_pre_lt_sequence(link, link_res, lt_settings, repeater_cnt);
+ else
+ dpcd_set_link_settings(link, lt_settings);
if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
/* 2. perform link training (set link training done
* to false is done as well)
*/
- repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
repeater_id--) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 1e4adbc764ea..da74c2b5854f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -524,7 +524,7 @@ bool edp_set_backlight_level(const struct dc_link *link,
struct dc *dc = link->ctx->dc;
uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16;
uint32_t frame_ramp = backlight_level_params->frame_ramp;
- DC_LOGGER_INIT(link->ctx->logger);
+
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
backlight_pwm_u16_16, backlight_pwm_u16_16);
@@ -1022,6 +1022,9 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
&alpm_config.raw,
sizeof(alpm_config.raw));
}
+
+ link->replay_settings.config.replay_video_conferencing_optimization_enabled = false;
+
return true;
}
@@ -1130,11 +1133,11 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
struct abm *abm = NULL;
for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
- struct dc_stream_state *stream = pipe_ctx.stream;
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *stream = pipe_ctx->stream;
if (stream && stream->link == link) {
- abm = pipe_ctx.stream_res.abm;
+ abm = pipe_ctx->stream_res.abm;
break;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
index a0e9e9f0441a..b4cea2b8cb2a 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
@@ -370,275 +370,279 @@ void mpc32_program_shaper_luta_settings(
MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
curve = params->arr_curve_points;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_0_1[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_2_3[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_4_5[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_6_7[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_8_9[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_10_11[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_12_13[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_14_15[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_16_17[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_18_19[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_20_21[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_22_23[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_24_25[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_26_27[mpcc_id], 0,
+ if (curve) {
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_0_1[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_28_29[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_30_31[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_32_33[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-}
-
-
-void mpc32_program_shaper_lutb_settings(
- struct mpc *mpc,
- const struct pwl_params *params,
- uint32_t mpcc_id)
-{
- const struct gamma_curve *curve;
- struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
-
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_B[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_G[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_R[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
-
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_B[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_G[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y);
- REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_R[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
-
- curve = params->arr_curve_points;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_0_1[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_2_3[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_2_3[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
-
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_4_5[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_4_5[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_6_7[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_6_7[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_8_9[mpcc_id], 0,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
- MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_8_9[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_10_11[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_10_11[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_12_13[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_12_13[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_14_15[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_14_15[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_16_17[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_16_17[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_18_19[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_18_19[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_20_21[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_20_21[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_22_23[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_22_23[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_24_25[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_24_25[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_26_27[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_26_27[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_28_29[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_28_29[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_30_31[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_30_31[mpcc_id], 0,
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMA_REGION_32_33[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ }
+}
+
+
+void mpc32_program_shaper_lutb_settings(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t mpcc_id)
+{
+ const struct gamma_curve *curve;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_B[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_G[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_START_CNTL_R[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
- curve += 2;
- REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_32_33[mpcc_id], 0,
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_B[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_G[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y);
+ REG_SET_2(MPCC_MCM_SHAPER_RAMB_END_CNTL_R[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
+
+ curve = params->arr_curve_points;
+ if (curve) {
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_0_1[mpcc_id], 0,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_2_3[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_4_5[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_6_7[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_8_9[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_10_11[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_12_13[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_14_15[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_16_17[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_18_19[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_20_21[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_22_23[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_24_25[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_26_27[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_28_29[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_30_31[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(MPCC_MCM_SHAPER_RAMB_REGION_32_33[mpcc_id], 0,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPCC_MCM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index ad67197557ca..98cf0cbd59ba 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
@@ -47,16 +47,6 @@ void mpc401_update_3dlut_fast_load_select(struct mpc *mpc, int mpcc_id, int hubp
REG_SET(MPCC_MCM_3DLUT_FAST_LOAD_SELECT[mpcc_id], 0, MPCC_MCM_3DLUT_FL_SEL, hubp_idx);
}
-void mpc401_get_3dlut_fast_load_status(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow)
-{
- struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
-
- REG_GET_3(MPCC_MCM_3DLUT_FAST_LOAD_STATUS[mpcc_id],
- MPCC_MCM_3DLUT_FL_DONE, done,
- MPCC_MCM_3DLUT_FL_SOFT_UNDERFLOW, soft_underflow,
- MPCC_MCM_3DLUT_FL_HARD_UNDERFLOW, hard_underflow);
-}
-
void mpc401_set_movable_cm_location(struct mpc *mpc, enum mpcc_movable_cm_location location, int mpcc_id)
{
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
@@ -618,7 +608,6 @@ static const struct mpc_funcs dcn401_mpc_funcs = {
.set_bg_color = mpc1_set_bg_color,
.set_movable_cm_location = mpc401_set_movable_cm_location,
.update_3dlut_fast_load_select = mpc401_update_3dlut_fast_load_select,
- .get_3dlut_fast_load_status = mpc401_get_3dlut_fast_load_status,
.populate_lut = mpc401_populate_lut,
.program_lut_read_write_control = mpc401_program_lut_read_write_control,
.program_lut_mode = mpc401_program_lut_mode,
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
index ce6fbcf14d7a..8e35ebc603a9 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
@@ -241,23 +241,9 @@ void mpc401_update_3dlut_fast_load_select(
int mpcc_id,
int hubp_idx);
-void mpc401_get_3dlut_fast_load_status(
- struct mpc *mpc,
- int mpcc_id,
- uint32_t *done,
- uint32_t *soft_underflow,
- uint32_t *hard_underflow);
-
void mpc401_update_3dlut_fast_load_select(
struct mpc *mpc,
int mpcc_id,
int hubp_idx);
-void mpc401_get_3dlut_fast_load_status(
- struct mpc *mpc,
- int mpcc_id,
- uint32_t *done,
- uint32_t *soft_underflow,
- uint32_t *hard_underflow);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
index 81857ce6d68d..e7a90a437fff 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
@@ -502,7 +502,7 @@ void optc2_get_last_used_drr_vtotal(struct timing_generator *optc, uint32_t *ref
REG_GET(OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, refresh_rate);
}
-static struct timing_generator_funcs dcn20_tg_funcs = {
+static const struct timing_generator_funcs dcn20_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
index f2415eebdc09..772a8bfb949c 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
@@ -129,7 +129,7 @@ static void optc201_get_optc_source(struct timing_generator *optc,
*num_of_src_opp = 1;
}
-static struct timing_generator_funcs dcn201_tg_funcs = {
+static const struct timing_generator_funcs dcn201_tg_funcs = {
.validate_timing = optc201_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
index 78b58a449fa4..ee4665aa49e9 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
@@ -357,7 +357,7 @@ void optc3_tg_init(struct timing_generator *optc)
optc1_clear_optc_underflow(optc);
}
-static struct timing_generator_funcs dcn30_tg_funcs = {
+static const struct timing_generator_funcs dcn30_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
index 65e9089b7f31..38f85bc2681a 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
@@ -109,7 +109,7 @@ void optc301_setup_manual_trigger(struct timing_generator *optc)
OTG_TRIGA_CLEAR, 1);
}
-static struct timing_generator_funcs dcn30_tg_funcs = {
+static const struct timing_generator_funcs dcn30_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
index ef536f37b4ed..4f1830ba619f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
@@ -315,7 +315,7 @@ void optc31_read_otg_state(struct timing_generator *optc,
s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
}
-static struct timing_generator_funcs dcn31_tg_funcs = {
+static const struct timing_generator_funcs dcn31_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
index 0e603bad0d12..4a2caca37255 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
@@ -192,7 +192,7 @@ static void optc314_set_h_timing_div_manual_mode(struct timing_generator *optc,
}
-static struct timing_generator_funcs dcn314_tg_funcs = {
+static const struct timing_generator_funcs dcn314_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index 2cdd19ba634b..b2b226bcd871 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -297,7 +297,7 @@ static void optc32_set_drr(
optc32_setup_manual_trigger(optc);
}
-static struct timing_generator_funcs dcn32_tg_funcs = {
+static const struct timing_generator_funcs dcn32_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index b86fe2b094f8..72bff94cb57d 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -428,7 +428,7 @@ static void optc35_set_long_vtotal(
}
}
-static struct timing_generator_funcs dcn35_tg_funcs = {
+static const struct timing_generator_funcs dcn35_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
@@ -507,6 +507,7 @@ void dcn35_timing_generator_init(struct optc *optc1)
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
+ optc1->max_frame_count = 0xFFFFFF;
dcn35_timing_generator_set_fgcg(
optc1, CTX->dc->debug.enable_fine_grain_clock_gating.bits.optc);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index 382ac18e7854..ff79c38287df 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -459,7 +459,7 @@ bool optc401_wait_update_lock_status(struct timing_generator *tg, bool locked)
return true;
}
-static struct timing_generator_funcs dcn401_tg_funcs = {
+static const struct timing_generator_funcs dcn401_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile
index b8cddef6b3d2..5b42da8b79c2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile
@@ -27,6 +27,24 @@
# DCE
###############################################################################
+ifdef CONFIG_DRM_AMD_DC_SI
+RESOURCE_DCE60 = dce60_resource.o
+
+AMD_DAL_RESOURCE_DCE60 = $(addprefix $(AMDDALPATH)/dc/resource/dce60/,$(RESOURCE_DCE60))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE60)
+endif
+
+###############################################################################
+
+RESOURCE_DCE80 = dce80_resource.o
+
+AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80)
+
+###############################################################################
+
RESOURCE_DCE100 = dce100_resource.o
AMD_DAL_RESOURCE_DCE100 = $(addprefix $(AMDDALPATH)/dc/resource/dce100/,$(RESOURCE_DCE100))
@@ -57,14 +75,6 @@ AMD_DAL_RESOURCE_DCE120 = $(addprefix $(AMDDALPATH)/dc/resource/dce120/,$(RESOUR
AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE120)
-###############################################################################
-
-RESOURCE_DCE80 = dce80_resource.o
-
-AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80)
-
ifdef CONFIG_DRM_AMD_DC_FP
###############################################################################
# DCN
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index e698543ec937..84f73fdb0f95 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
@@ -836,7 +836,7 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-static bool dce100_validate_bandwidth(
+static enum dc_status dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -858,7 +858,7 @@ static bool dce100_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz = 0;
}
- return true;
+ return DC_OK;
}
static bool dce100_validate_surface_sets(
@@ -1069,7 +1069,7 @@ static bool dce100_resource_construct(
pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 40;
- dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.i2c_speed_in_khz_hdcp = 40;
dc->caps.max_cursor_size = 128;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dual_link_dvi = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index 035c6cfdaee5..f3d5baac11bf 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
@@ -960,7 +960,7 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-static bool dce110_validate_bandwidth(
+static enum dc_status dce110_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -1031,7 +1031,7 @@ static bool dce110_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz,
context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
- return result;
+ return result ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 480a50967385..4225cae68c10 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -883,7 +883,7 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-bool dce112_validate_bandwidth(
+enum dc_status dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -952,7 +952,7 @@ bool dce112_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz,
context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
- return result;
+ return result ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
enum dc_status resource_map_phy_clock_resources(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
index 1f57ebc6f9b4..6221d749246d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
@@ -42,7 +42,7 @@ enum dc_status dce112_validate_with_context(
struct dc_state *context,
struct dc_state *old_context);
-bool dce112_validate_bandwidth(
+enum dc_status dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate);
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
index 889f314cac65..d9ffdded5ce1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
@@ -48,7 +48,7 @@
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
-#include "dce60/dce60_hw_sequencer.h"
+#include "dce60/dce60_hwseq.h"
#include "dce100/dce100_resource.h"
#include "dce/dce_panel_cntl.h"
@@ -863,7 +863,7 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool)
}
}
-static bool dce60_validate_bandwidth(
+static enum dc_status dce60_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -885,7 +885,7 @@ static bool dce60_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz = 0;
}
- return true;
+ return DC_OK;
}
static bool dce60_validate_surface_sets(
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h
index 5d653a76b0b0..5d653a76b0b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index 3d5113f010bb..bd5811f97531 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -869,7 +869,7 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool)
}
}
-static bool dce80_validate_bandwidth(
+static enum dc_status dce80_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -891,7 +891,7 @@ static bool dce80_validate_bandwidth(
context->bw_ctx.bw.dce.yclk_khz = 0;
}
- return true;
+ return DC_OK;
}
static bool dce80_validate_surface_sets(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index e92f14d50adb..be4ade0853e9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -23,6 +23,7 @@
*
*/
+#include "core_status.h"
#include "dm_services.h"
#include "dc.h"
@@ -1125,7 +1126,7 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool)
*pool = NULL;
}
-static bool dcn10_validate_bandwidth(
+static enum dc_status dcn10_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -1136,7 +1137,7 @@ static bool dcn10_validate_bandwidth(
voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate);
DC_FP_END();
- return voltage_supported;
+ return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
@@ -1245,6 +1246,10 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id ==
link->link_enc->preferred_engine)
return pool->stream_enc[i];
+
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && pool->stream_enc[i]->id ==
+ link->dpia_preferred_eng_id)
+ return pool->stream_enc[i];
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index e4eca3e32c1b..3405be07f5e3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -2124,7 +2124,7 @@ validate_out:
return out;
}
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
+enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate)
{
bool voltage_supported;
@@ -2132,14 +2132,14 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
if (!pipes)
- return false;
+ return DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes);
DC_FP_END();
kfree(pipes);
- return voltage_supported;
+ return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
struct pipe_ctx *dcn20_acquire_free_pipe_for_layer(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
index 4cee3fa11a7f..c0e062c7407d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
@@ -119,7 +119,7 @@ void dcn20_set_mcif_arb_params(
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
+enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
void dcn20_merge_pipes_for_validate(
struct dc *dc,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 4bd5c2278596..9ab01b65b177 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -923,7 +923,7 @@ validate_out:
* with DC_FP_START()/DC_FP_END(). Use the same approach as for
* dcn20_validate_bandwidth in dcn20_resource.c.
*/
-static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
+static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate)
{
bool voltage_supported;
@@ -931,14 +931,14 @@ static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
if (!pipes)
- return false;
+ return DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes);
DC_FP_END();
kfree(pipes);
- return voltage_supported;
+ return voltage_supported ? DC_OK : DC_NOT_SUPPORTED;
}
static void dcn21_destroy_resource_pool(struct resource_pool **pool)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index f01ced015072..f631ae34e320 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -1891,8 +1891,6 @@ static int get_refresh_rate(struct dc_state *context)
/* check if refresh rate at least 120hz */
timing = &context->streams[0]->timing;
- if (timing == NULL)
- return 0;
h_v_total = timing->h_total * timing->v_total;
if (h_v_total == 0)
@@ -2037,7 +2035,7 @@ void dcn30_calculate_wm_and_dlg(
DC_FP_END();
}
-bool dcn30_validate_bandwidth(struct dc *dc,
+enum dc_status dcn30_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -2094,7 +2092,7 @@ validate_out:
BW_VAL_TRACE_FINISH();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
index 8e6b8b7368fd..689d9bdace81 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
@@ -56,7 +56,7 @@ unsigned int dcn30_calc_max_scaled_time(
enum mmhubbub_wbif_mode mode,
unsigned int urgent_watermark);
-bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
+enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate);
bool dcn30_internal_validate_bw(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index dddddbfef85f..51ca0b2959fc 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1758,7 +1758,7 @@ dcn31_set_mcif_arb_params(struct dc *dc,
DC_FP_END();
}
-bool dcn31_validate_bandwidth(struct dc *dc,
+enum dc_status dcn31_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -1813,7 +1813,7 @@ validate_out:
BW_VAL_TRACE_FINISH();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config)
@@ -1954,6 +1954,9 @@ static bool dcn31_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
dc->config.disable_hbr_audio_dp2 = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
index 551ad912f7be..dd82815d7efe 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
@@ -37,7 +37,7 @@ struct dcn31_resource_pool {
struct resource_pool base;
};
-bool dcn31_validate_bandwidth(struct dc *dc,
+enum dc_status dcn31_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate);
void dcn31_calculate_wm_and_dlg(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index 26becc4cb804..8383e2e59be5 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -1694,7 +1694,7 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi
*panel_config = panel_config_defaults;
}
-bool dcn314_validate_bandwidth(struct dc *dc,
+enum dc_status dcn314_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -1750,7 +1750,7 @@ validate_out:
BW_VAL_TRACE_FINISH();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static struct resource_funcs dcn314_res_pool_funcs = {
@@ -1885,6 +1885,9 @@ static bool dcn314_resource_construct(
dc->caps.max_disp_clock_khz_at_vmin = 650000;
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
index 49ffe71018df..f8ba531d6342 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
@@ -39,7 +39,7 @@ struct dcn314_resource_pool {
struct resource_pool base;
};
-bool dcn314_validate_bandwidth(struct dc *dc,
+enum dc_status dcn314_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 944650cb13de..bb0dae0be5b8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -24,6 +24,7 @@
*
*/
+#include "dc_types.h"
#include "dm_services.h"
#include "dc.h"
@@ -1806,19 +1807,56 @@ validate_out:
return out;
}
-bool dcn32_validate_bandwidth(struct dc *dc,
+enum dc_status dcn32_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
- bool out = false;
+ unsigned int i;
+ enum dc_status status;
+ const struct dc_stream_state *stream;
+
+ /* reset cursor limitations on subvp */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_can_clear_stream_cursor_subvp_limit(stream, context)) {
+ dc_state_set_stream_cursor_subvp_limit(stream, context, false);
+ }
+ }
if (dc->debug.using_dml2)
- out = dml2_validate(dc, context,
+ status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
+ fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
else
- out = dml1_validate(dc, context, fast_validate);
- return out;
+ status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+
+ if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) {
+ /* check new stream configuration still supports cursor if subvp used */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_get_stream_subvp_type(context, stream) != SUBVP_PHANTOM &&
+ stream->cursor_position.enable &&
+ !dc_stream_check_cursor_attributes(stream, context, &stream->cursor_attributes)) {
+ /* hw cursor cannot be supported with subvp active, so disable subvp for now */
+ dc_state_set_stream_cursor_subvp_limit(stream, context, true);
+ status = DC_FAIL_HW_CURSOR_SUPPORT;
+ }
+ };
+ }
+
+ if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) {
+ /* attempt to validate again with subvp disabled due to cursor */
+ if (dc->debug.using_dml2)
+ status = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ else
+ status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+
+ return status;
}
int dcn32_populate_dml_pipes_from_context(
@@ -2042,6 +2080,18 @@ static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw
DC_FP_END();
}
+unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
+ struct dc_state *state,
+ const struct dc_stream_state *stream)
+{
+ bool limit_cur_to_buf;
+
+ limit_cur_to_buf = dc_state_get_stream_subvp_cursor_limit(stream, state) &&
+ !stream->hw_cursor_req;
+
+ return limit_cur_to_buf ? dc->caps.max_buffered_cursor_size : dc->caps.max_cursor_size;
+}
+
static struct resource_funcs dcn32_res_pool_funcs = {
.destroy = dcn32_destroy_resource_pool,
.link_enc_create = dcn32_link_encoder_create,
@@ -2067,7 +2117,8 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -2151,6 +2202,7 @@ static bool dcn32_resource_construct(
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
/* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/
dc->caps.max_cursor_size = 64;
+ dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4)
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 1aa4ced29291..d60ed77eda80 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -98,7 +98,7 @@ void dcn32_add_phantom_pipes(struct dc *dc,
unsigned int pipe_cnt,
unsigned int index);
-bool dcn32_validate_bandwidth(struct dc *dc,
+enum dc_status dcn32_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate);
@@ -188,6 +188,10 @@ void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context);
unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes);
+unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
+ struct dc_state *state,
+ const struct dc_stream_state *stream);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 38d76434683e..7db1f7a5613f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1624,7 +1624,8 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
- .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
+ .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1709,6 +1710,7 @@ static bool dcn321_resource_construct(
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
/* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
dc->caps.max_cursor_size = 64;
+ dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4)
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index ffd2b816cd02..e01aa2f2e13e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -1732,7 +1732,7 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
}
-static bool dcn35_validate_bandwidth(struct dc *dc,
+static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -1743,13 +1743,13 @@ static bool dcn35_validate_bandwidth(struct dc *dc,
fast_validate);
if (fast_validate)
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_state)
@@ -1894,6 +1894,9 @@ static bool dcn35_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
* It's expected for furture ASIC to have equal or higher value, in order to
@@ -1903,7 +1906,7 @@ static bool dcn35_resource_construct(
dc->caps.max_disp_clock_khz_at_vmin = 650000;
/* Sequential ONO is based on ASIC. */
- if (dc->ctx->asic_id.hw_internal_rev > 0x10)
+ if (dc->ctx->asic_id.hw_internal_rev >= 0x40)
dc->caps.sequential_ono = true;
/* Use pipe context based otg sync logic */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 98f5bc1b929e..4ebe4e00a4f8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -1712,7 +1712,7 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
}
-static bool dcn351_validate_bandwidth(struct dc *dc,
+static enum dc_status dcn351_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -1723,13 +1723,13 @@ static bool dcn351_validate_bandwidth(struct dc *dc,
fast_validate);
if (fast_validate)
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
static struct resource_funcs dcn351_res_pool_funcs = {
@@ -1866,6 +1866,9 @@ static bool dcn351_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
* It's expected for furture ASIC to have equal or higher value, in order to
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
index b6468573dc33..db36b8f9ce65 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
@@ -1713,7 +1713,7 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config
}
-static bool dcn35_validate_bandwidth(struct dc *dc,
+static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
@@ -1724,13 +1724,13 @@ static bool dcn35_validate_bandwidth(struct dc *dc,
fast_validate);
if (fast_validate)
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
DC_FP_START();
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
- return out;
+ return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
@@ -1867,6 +1867,9 @@ static bool dcn36_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
* It's expected for furture ASIC to have equal or higher value, in order to
@@ -1876,7 +1879,7 @@ static bool dcn36_resource_construct(
dc->caps.max_disp_clock_khz_at_vmin = 650000;
/* Sequential ONO is based on ASIC. */
- if (dc->ctx->asic_id.hw_internal_rev > 0x10)
+ if (dc->ctx->asic_id.hw_internal_rev >= 0x40)
dc->caps.sequential_ono = true;
/* Use pipe context based otg sync logic */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 7436dfbdf927..f420c4dafa03 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -1642,16 +1642,52 @@ enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_sta
return DC_OK;
}
-bool dcn401_validate_bandwidth(struct dc *dc,
+enum dc_status dcn401_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
{
- bool out = false;
+ unsigned int i;
+ enum dc_status status = DC_OK;
+ const struct dc_stream_state *stream;
+
+ /* reset cursor limitations on subvp */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_can_clear_stream_cursor_subvp_limit(stream, context)) {
+ dc_state_set_stream_cursor_subvp_limit(stream, context, false);
+ }
+ }
+
if (dc->debug.using_dml2)
- out = dml2_validate(dc, context,
+ status = dml2_validate(dc, context,
context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
- fast_validate);
- return out;
+ fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+
+ if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) {
+ /* check new stream configuration still supports cursor if subvp used */
+ for (i = 0; i < context->stream_count; i++) {
+ stream = context->streams[i];
+
+ if (dc_state_get_stream_subvp_type(context, stream) != SUBVP_PHANTOM &&
+ stream->cursor_position.enable &&
+ !dc_stream_check_cursor_attributes(stream, context, &stream->cursor_attributes)) {
+ /* hw cursor cannot be supported with subvp active, so disable subvp for now */
+ dc_state_set_stream_cursor_subvp_limit(stream, context, true);
+ status = DC_FAIL_HW_CURSOR_SUPPORT;
+ }
+ };
+ }
+
+ if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) {
+ /* attempt to validate again with subvp disabled due to cursor */
+ if (dc->debug.using_dml2)
+ status = dml2_validate(dc, context,
+ context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2,
+ fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+
+ return status;
}
void dcn401_prepare_mcache_programming(struct dc *dc,
@@ -1770,7 +1806,8 @@ static struct resource_funcs dcn401_res_pool_funcs = {
.build_pipe_pix_clk_params = dcn401_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
.get_power_profile = dcn401_get_power_profile,
- .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe
+ .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe,
+ .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1846,8 +1883,9 @@ static bool dcn401_resource_construct(
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 95;
dc->caps.i2c_speed_in_khz_hdcp = 95; /*1.4 w/a applied by default*/
- /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
+ /* used to set cursor pitch, so must be aligned to power of 2 (HW actually supported 78x78) */
dc->caps.max_cursor_size = 64;
+ dc->caps.max_buffered_cursor_size = 64;
dc->caps.cursor_not_scaled = true;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
@@ -1900,8 +1938,8 @@ static bool dcn401_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
- dc->caps.color.dpp.hw_3d_lut = 1;
- dc->caps.color.dpp.ogam_ram = 1;
+ dc->caps.color.dpp.hw_3d_lut = 0;
+ dc->caps.color.dpp.ogam_ram = 0;
// no OGAM ROM on DCN2 and later ASICs
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index 4c259745d519..dc52a30991af 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -22,7 +22,7 @@ struct resource_pool *dcn401_create_resource_pool(
enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_state);
-bool dcn401_validate_bandwidth(struct dc *dc,
+enum dc_status dcn401_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate);
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
index 28348734d900..e0008c5f08ad 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
@@ -776,7 +776,7 @@ static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
* Do not bypass UV at 1:1 for cositing to be applied
*/
if (!enable_isharp) {
- if (data->ratios.horz.value == one && data->ratios.vert.value == one)
+ if (data->ratios.horz.value == one && data->ratios.vert.value == one && !spl_in->basic_out.always_scale)
return SCL_MODE_SCALING_420_LUMA_BYPASS;
}
@@ -884,7 +884,7 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
/* Calculate number of tap with adaptive scaling off */
static void spl_get_taps_non_adaptive_scaler(
- struct spl_scratch *spl_scratch, const struct spl_taps *in_taps)
+ struct spl_scratch *spl_scratch, const struct spl_taps *in_taps, bool always_scale)
{
bool check_max_downscale = false;
@@ -944,15 +944,15 @@ static void spl_get_taps_non_adaptive_scaler(
spl_fixpt_from_fraction(6, 1));
SPL_ASSERT(check_max_downscale);
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz))
+
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz) && !always_scale)
spl_scratch->scl_data.taps.h_taps = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert) && !always_scale)
spl_scratch->scl_data.taps.v_taps = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c))
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !always_scale)
spl_scratch->scl_data.taps.h_taps_c = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !always_scale)
spl_scratch->scl_data.taps.v_taps_c = 1;
-
}
/* Calculate optimal number of taps */
@@ -965,13 +965,15 @@ static bool spl_get_optimal_number_of_taps(
unsigned int max_taps_y, max_taps_c;
unsigned int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
- bool skip_easf = false;
+ bool skip_easf = false;
+ bool always_scale = spl_in->basic_out.always_scale;
bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format);
+
if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active &&
max_downscale_src_width != 0 &&
spl_scratch->scl_data.viewport.width > max_downscale_src_width) {
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale);
*enable_easf_v = false;
*enable_easf_h = false;
*enable_isharp = false;
@@ -980,7 +982,7 @@ static bool spl_get_optimal_number_of_taps(
/* Disable adaptive scaler and sharpener when integer scaling is enabled */
if (spl_in->scaling_quality.integer_scaling) {
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale);
*enable_easf_v = false;
*enable_easf_h = false;
*enable_isharp = false;
@@ -996,7 +998,7 @@ static bool spl_get_optimal_number_of_taps(
* taps = 4 for upscaling
*/
if (skip_easf)
- spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale);
else {
if (spl_is_video_format(spl_in->basic_in.format)) {
spl_scratch->scl_data.taps.h_taps = 6;
@@ -1297,7 +1299,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
if (enable_easf_v) {
dscl_prog_data->easf_v_en = true;
dscl_prog_data->easf_v_ring = 0;
- dscl_prog_data->easf_v_sharp_factor = 0;
+ dscl_prog_data->easf_v_sharp_factor = 1;
dscl_prog_data->easf_v_bf1_en = 1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_v_bf2_mode = 0xF; // 4-bit, BF2 calculation mode
/* 2-bit, BF3 chroma mode correction calculation mode */
@@ -1461,7 +1463,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
if (enable_easf_h) {
dscl_prog_data->easf_h_en = true;
dscl_prog_data->easf_h_ring = 0;
- dscl_prog_data->easf_h_sharp_factor = 0;
+ dscl_prog_data->easf_h_sharp_factor = 1;
dscl_prog_data->easf_h_bf1_en =
1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_h_bf2_mode =
@@ -1898,3 +1900,4 @@ bool SPL_NAMESPACE(spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out
spl_set_taps_data(dscl_prog_data, data);
return res;
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
index 1c3949b24611..36a284305a70 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
@@ -480,6 +480,10 @@ enum sharpness_setting {
SHARPNESS_ZERO,
SHARPNESS_CUSTOM
};
+enum sharpness_range_source {
+ SHARPNESS_RANGE_DCN = 0,
+ SHARPNESS_RANGE_DCN_OVERRIDE
+};
struct spl_sharpness_range {
int sdr_rgb_min;
int sdr_rgb_max;
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
index 52d97918a3bd..ebf0287417e0 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c
@@ -29,8 +29,6 @@ static inline unsigned long long spl_complete_integer_division_u64(
{
unsigned long long result;
- SPL_ASSERT(divisor);
-
result = spl_div64_u64_rem(dividend, divisor, remainder);
return result;
@@ -196,8 +194,6 @@ struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg)
* Good idea to use Newton's method
*/
- SPL_ASSERT(arg.value);
-
return spl_fixpt_from_fraction(
spl_fixpt_one.value,
arg.value);
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 4e0efff92dca..3f3fa1b6a69e 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -51,8 +51,8 @@
* for the cache windows.
*
* The call to dmub_srv_hw_init() programs the DMCUB registers to prepare
- * for command submission. Commands can be queued via dmub_srv_cmd_queue()
- * and executed via dmub_srv_cmd_execute().
+ * for command submission. Commands can be queued via dmub_srv_fb_cmd_queue()
+ * and executed via dmub_srv_fb_cmd_execute().
*
* If the queue is full the dmub_srv_wait_for_idle() call can be used to
* wait until the queue has been cleared.
@@ -142,6 +142,7 @@ enum dmub_notification_type {
DMUB_NOTIFICATION_SET_CONFIG_REPLY,
DMUB_NOTIFICATION_DPIA_NOTIFICATION,
DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
+ DMUB_NOTIFICATION_FUSED_IO,
DMUB_NOTIFICATION_MAX
};
@@ -170,6 +171,13 @@ enum dmub_srv_power_state_type {
DMUB_POWER_STATE_D3 = 8
};
+/* enum dmub_inbox_cmd_interface type - defines default interface for host->dmub commands */
+enum dmub_inbox_cmd_interface_type {
+ DMUB_CMD_INTERFACE_DEFAULT = 0,
+ DMUB_CMD_INTERFACE_FB = 1,
+ DMUB_CMD_INTERFACE_REG = 2,
+};
+
/**
* struct dmub_region - dmub hw memory region
* @base: base address for region, must be 256 byte aligned
@@ -349,6 +357,21 @@ struct dmub_diagnostic_data {
uint8_t is_cw6_enabled : 1;
};
+struct dmub_srv_inbox {
+ /* generic status */
+ uint64_t num_submitted;
+ uint64_t num_reported;
+ union {
+ /* frame buffer mailbox status */
+ struct dmub_rb rb;
+ /* register mailbox status */
+ struct {
+ bool is_pending;
+ bool is_multi_pending;
+ };
+ };
+};
+
/**
* struct dmub_srv_base_funcs - Driver specific base callbacks
*/
@@ -422,6 +445,8 @@ struct dmub_srv_hw_funcs {
uint32_t (*emul_get_inbox1_rptr)(struct dmub_srv *dmub);
+ uint32_t (*emul_get_inbox1_wptr)(struct dmub_srv *dmub);
+
void (*emul_set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
bool (*is_supported)(struct dmub_srv *dmub);
@@ -462,18 +487,21 @@ struct dmub_srv_hw_funcs {
void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx);
void (*subvp_save_surf_addr)(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
+
void (*send_reg_inbox0_cmd_msg)(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
uint32_t (*read_reg_inbox0_rsp_int_status)(struct dmub_srv *dmub);
void (*read_reg_inbox0_cmd_rsp)(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
void (*write_reg_inbox0_rsp_int_ack)(struct dmub_srv *dmub);
+ void (*clear_reg_inbox0_rsp_int_ack)(struct dmub_srv *dmub);
+ void (*enable_reg_inbox0_rsp_int)(struct dmub_srv *dmub, bool enable);
+
uint32_t (*read_reg_outbox0_rdy_int_status)(struct dmub_srv *dmub);
void (*write_reg_outbox0_rdy_int_ack)(struct dmub_srv *dmub);
void (*read_reg_outbox0_msg)(struct dmub_srv *dmub, uint32_t *msg);
void (*write_reg_outbox0_rsp)(struct dmub_srv *dmub, uint32_t *rsp);
uint32_t (*read_reg_outbox0_rsp_int_status)(struct dmub_srv *dmub);
- void (*enable_reg_inbox0_rsp_int)(struct dmub_srv *dmub, bool enable);
void (*enable_reg_outbox0_rdy_int)(struct dmub_srv *dmub, bool enable);
};
@@ -493,6 +521,7 @@ struct dmub_srv_create_params {
enum dmub_asic asic;
uint32_t fw_version;
bool is_virtual;
+ enum dmub_inbox_cmd_interface_type inbox_type;
};
/**
@@ -521,8 +550,9 @@ struct dmub_srv {
const struct dmub_srv_dcn401_regs *regs_dcn401;
struct dmub_srv_base_funcs funcs;
struct dmub_srv_hw_funcs hw_funcs;
- struct dmub_rb inbox1_rb;
+ struct dmub_srv_inbox inbox1;
uint32_t inbox1_last_wptr;
+ struct dmub_srv_inbox reg_inbox0;
/**
* outbox1_rb is accessed without locks (dal & dc)
* and to be used only in dmub_srv_stat_get_notification()
@@ -542,6 +572,7 @@ struct dmub_srv {
struct dmub_fw_meta_info meta_info;
struct dmub_feature_caps feature_caps;
struct dmub_visual_confirm_color visual_confirm_color;
+ enum dmub_inbox_cmd_interface_type inbox_type;
enum dmub_srv_power_state_type power_state;
struct dmub_diagnostic_data debug;
@@ -566,11 +597,8 @@ struct dmub_notification {
struct aux_reply_data aux_reply;
enum dp_hpd_status hpd_status;
enum set_config_status sc_status;
- /**
- * DPIA notification command.
- */
- struct dmub_rb_cmd_dpia_notification dpia_notification;
struct dmub_rb_cmd_hpd_sense_notify_data hpd_sense_notify;
+ struct dmub_cmd_fused_request fused_request;
};
};
@@ -699,19 +727,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
/**
- * dmub_srv_sync_inbox1() - sync sw state with hw state
- * @dmub: the dmub service
- *
- * Sync sw state with hw state when resume from S0i3
- *
- * Return:
- * DMUB_STATUS_OK - success
- * DMUB_STATUS_INVALID - unspecified error
- */
-enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
-
-/**
- * dmub_srv_cmd_queue() - queues a command to the DMUB
+ * dmub_srv_fb_cmd_queue() - queues a command to the DMUB
* @dmub: the dmub service
* @cmd: the command to queue
*
@@ -723,11 +739,11 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
* DMUB_STATUS_QUEUE_FULL - no remaining room in queue
* DMUB_STATUS_INVALID - unspecified error
*/
-enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub,
const union dmub_rb_cmd *cmd);
/**
- * dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
+ * dmub_srv_fb_cmd_execute() - Executes a queued sequence to the dmub
* @dmub: the dmub service
*
* Begins execution of queued commands on the dmub.
@@ -736,7 +752,7 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
* DMUB_STATUS_OK - success
* DMUB_STATUS_INVALID - unspecified error
*/
-enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub);
+enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub);
/**
* dmub_srv_wait_for_hw_pwr_up() - Waits for firmware hardware power up is completed
@@ -795,6 +811,23 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
uint32_t timeout_us);
/**
+ * dmub_srv_wait_for_pending() - Re-entrant wait for messages currently pending
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Waits until the commands queued prior to this call are complete.
+ * If interfaces remain busy due to additional work being submitted
+ * concurrently, this function will not continue to wait.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub,
+ uint32_t timeout_us);
+
+/**
* dmub_srv_wait_for_idle() - Waits for the DMUB to be idle
* @dmub: the dmub service
* @timeout_us: the maximum number of microseconds to wait
@@ -892,9 +925,6 @@ enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub,
enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub,
union dmub_fw_boot_options *option);
-enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd);
-
enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
bool skip);
@@ -959,26 +989,6 @@ enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub);
void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index);
/**
- * dmub_srv_send_reg_inbox0_cmd() - send a dmub command and wait for the command
- * being processed by DMUB.
- * @dmub: The dmub service
- * @cmd: The dmub command being sent. If with_replay is true, the function will
- * update cmd with replied data.
- * @with_reply: true if DMUB reply needs to be copied back to cmd. false if the
- * cmd doesn't need to be replied.
- * @timeout_us: timeout in microseconds.
- *
- * Return:
- * DMUB_STATUS_OK - success
- * DMUB_STATUS_TIMEOUT - DMUB fails to process the command within the timeout
- * interval.
- */
-enum dmub_status dmub_srv_send_reg_inbox0_cmd(
- struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd,
- bool with_reply, uint32_t timeout_us);
-
-/**
* dmub_srv_set_power_state() - Track DC power state in dmub_srv
* @dmub: The dmub service
* @power_state: DC power state setting
@@ -990,4 +1000,71 @@ enum dmub_status dmub_srv_send_reg_inbox0_cmd(
*/
void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state);
+/**
+ * dmub_srv_reg_cmd_execute() - Executes provided command to the dmub
+ * @dmub: the dmub service
+ * @cmd: the command packet to be executed
+ *
+ * Executes a single command for the dmub.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd);
+
+
+/**
+ * dmub_srv_cmd_get_response() - Copies return data for command into buffer
+ * @dmub: the dmub service
+ * @cmd_rsp: response buffer
+ *
+ * Copies return data for command into buffer
+ */
+void dmub_srv_cmd_get_response(struct dmub_srv *dmub,
+ union dmub_rb_cmd *cmd_rsp);
+
+/**
+ * dmub_srv_sync_inboxes() - Sync inbox state
+ * @dmub: the dmub service
+ *
+ * Sync inbox state
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub);
+
+/**
+ * dmub_srv_wait_for_inbox_free() - Waits for space in the DMUB inbox to free up
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ * @num_free_required: number of free entries required
+ *
+ * Waits until the DMUB buffer is freed to the specified number.
+ * The maximum wait time is given in microseconds to prevent spinning
+ * forever.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
+ uint32_t timeout_us,
+ uint32_t num_free_required);
+
+/**
+ * dmub_srv_update_inbox_status() - Updates pending status for inbox & reg inbox0
+ * @dmub: the dmub service
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for buffer to flush timed out
+ * DMUB_STATUS_HW_FAILURE - issue with HW programming
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub);
+
#endif /* _DMUB_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 1f5f4e3e49d4..b66bd10cdc9b 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -36,6 +36,9 @@
//<DMUB_TYPES>==================================================================
/* Basic type definitions. */
+#ifdef __forceinline
+#undef __forceinline
+#endif
#define __forceinline inline
/**
@@ -547,6 +550,11 @@ union replay_hw_flags {
* @is_alpm_initialized: Indicates whether ALPM is initialized
*/
uint32_t is_alpm_initialized : 1;
+
+ /**
+ * @alpm_mode: Indicates ALPM mode selected
+ */
+ uint32_t alpm_mode : 2;
} bitfields;
uint32_t u32All;
@@ -739,6 +747,14 @@ enum dmub_ips_disable_type {
DMUB_IPS_DISABLE_IPS2_Z10 = 4,
DMUB_IPS_DISABLE_DYNAMIC = 5,
DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF = 6,
+ DMUB_IPS_DISABLE_Z8_RETENTION = 7,
+};
+
+enum dmub_ips_rcg_disable_type {
+ DMUB_IPS_RCG_ENABLE = 0,
+ DMUB_IPS0_RCG_DISABLE = 1,
+ DMUB_IPS1_RCG_DISABLE = 2,
+ DMUB_IPS_RCG_DISABLE = 3
};
#define DMUB_IPS1_ALLOW_MASK 0x00000001
@@ -817,11 +833,12 @@ enum dmub_shared_state_feature_id {
*/
union dmub_shared_state_ips_fw_signals {
struct {
- uint32_t ips1_commit : 1; /**< 1 if in IPS1 */
+ uint32_t ips1_commit : 1; /**< 1 if in IPS1 or IPS0 RCG */
uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
uint32_t in_idle : 1; /**< 1 if DMCUB is in idle */
uint32_t detection_required : 1; /**< 1 if detection is required */
- uint32_t reserved_bits : 28; /**< Reversed */
+ uint32_t ips1z8_commit: 1; /**< 1 if in IPS1 Z8 Retention */
+ uint32_t reserved_bits : 27; /**< Reversed */
} bits;
uint32_t all;
};
@@ -836,7 +853,10 @@ union dmub_shared_state_ips_driver_signals {
uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */
uint32_t allow_idle: 1; /**< 1 if driver is allowing idle */
- uint32_t reserved_bits : 27; /**< Reversed bits */
+ uint32_t allow_ips0_rcg : 1; /**< 1 is IPS0 RCG is allowed */
+ uint32_t allow_ips1_rcg : 1; /**< 1 is IPS1 RCG is allowed */
+ uint32_t allow_ips1z8 : 1; /**< 1 is IPS1 Z8 Retention is allowed */
+ uint32_t reserved_bits : 24; /**< Reversed bits */
} bits;
uint32_t all;
};
@@ -865,7 +885,9 @@ struct dmub_shared_state_ips_fw {
uint32_t ips1_exit_count; /**< Exit counter for IPS1 */
uint32_t ips2_entry_count; /**< Entry counter for IPS2 */
uint32_t ips2_exit_count; /**< Exit counter for IPS2 */
- uint32_t reserved[55]; /**< Reversed, to be updated when adding new fields. */
+ uint32_t ips1_z8ret_entry_count; /**< Entry counter for IPS1 Z8 Retention */
+ uint32_t ips1_z8ret_exit_count; /**< Exit counter for IPS1 Z8 Retention */
+ uint32_t reserved[53]; /**< Reversed, to be updated when adding new fields. */
}; /* 248-bytes, fixed */
/**
@@ -1253,6 +1275,10 @@ enum dmub_gpint_command {
* DESC: Setup debug configs.
*/
DMUB_GPINT__SETUP_DEBUG_MODE = 136,
+ /**
+ * DESC: Initiates IPS wake sequence.
+ */
+ DMUB_GPINT__IPS_DEBUG_WAKE = 137,
};
/**
@@ -2636,7 +2662,11 @@ enum dp_hpd_type {
/**
* DP HPD short pulse
*/
- DP_IRQ
+ DP_IRQ = 1,
+ /**
+ * Failure to acquire DP HPD state
+ */
+ DP_NONE_HPD = 2
};
/**
@@ -3609,6 +3639,12 @@ struct dmub_rb_cmd_psr_set_power_opt {
struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data;
};
+enum dmub_alpm_mode {
+ ALPM_AUXWAKE = 0,
+ ALPM_AUXLESS = 1,
+ ALPM_UNSUPPORTED = 2,
+};
+
/**
* Definition of Replay Residency GPINT command.
* Bit[0] - Residency mode for Revision 0
@@ -3742,6 +3778,15 @@ enum dmub_cmd_replay_general_subtype {
REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE,
};
+struct dmub_alpm_auxless_data {
+ uint16_t lfps_setup_ns;
+ uint16_t lfps_period_ns;
+ uint16_t lfps_silence_ns;
+ uint16_t lfps_t1_t2_override_us;
+ short lfps_t1_t2_offset_us;
+ uint8_t lttpr_count;
+};
+
/**
* Data passed from driver to FW in a DMUB_CMD__REPLAY_COPY_SETTINGS command.
*/
@@ -3812,6 +3857,10 @@ struct dmub_cmd_replay_copy_settings_data {
* Use FSM state for Replay power up/down
*/
uint8_t use_phy_fsm;
+ /**
+ * Use for AUX-less ALPM LFPS wake operation
+ */
+ struct dmub_alpm_auxless_data auxless_alpm_data;
};
/**
@@ -4360,6 +4409,11 @@ enum dmub_cmd_abm_type {
* Get the current ACE curve.
*/
DMUB_CMD__ABM_GET_ACE_CURVE = 10,
+
+ /**
+ * Get current histogram data
+ */
+ DMUB_CMD__ABM_GET_HISTOGRAM_DATA = 11,
};
struct abm_ace_curve {
@@ -4954,6 +5008,20 @@ enum dmub_abm_ace_curve_type {
};
/**
+ * enum dmub_abm_histogram_type - Histogram type.
+ */
+enum dmub_abm_histogram_type {
+ /**
+ * ACE curve as defined by the SW layer.
+ */
+ ABM_HISTOGRAM_TYPE__SW = 0,
+ /**
+ * ACE curve as defined by the SW to HW translation interface layer.
+ */
+ ABM_HISTOGRAM_TYPE__SW_IF = 1,
+};
+
+/**
* Definition of a DMUB_CMD__ABM_GET_ACE_CURVE command.
*/
struct dmub_rb_cmd_abm_get_ace_curve {
@@ -4989,6 +5057,41 @@ struct dmub_rb_cmd_abm_get_ace_curve {
};
/**
+ * Definition of a DMUB_CMD__ABM_GET_HISTOGRAM command.
+ */
+struct dmub_rb_cmd_abm_get_histogram {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ /**
+ * Address where Histogram should be copied.
+ */
+ union dmub_addr dest;
+
+ /**
+ * Type of Histogram being queried.
+ */
+ enum dmub_abm_histogram_type histogram_type;
+
+ /**
+ * Indirect buffer length.
+ */
+ uint16_t bytes;
+
+ /**
+ * eDP panel instance.
+ */
+ uint8_t panel_inst;
+
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad;
+};
+
+/**
* Definition of a DMUB_CMD__ABM_SAVE_RESTORE command.
*/
struct dmub_rb_cmd_abm_save_restore {
@@ -5389,7 +5492,8 @@ struct dmub_cmd_fused_request {
struct dmub_cmd_fused_request_location_i2c {
uint8_t is_aux : 1; // False
uint8_t ddc_line : 3;
- uint8_t _reserved0 : 4;
+ uint8_t over_aux : 1;
+ uint8_t _reserved0 : 3;
uint8_t address;
uint8_t offset;
uint8_t length;
@@ -5687,6 +5791,11 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_abm_get_ace_curve abm_get_ace_curve;
/**
+ * Definition of a DMUB_CMD__ABM_GET_HISTOGRAM command.
+ */
+ struct dmub_rb_cmd_abm_get_histogram abm_get_histogram;
+
+ /**
* Definition of a DMUB_CMD__ABM_SET_EVENT command.
*/
struct dmub_rb_cmd_abm_set_event abm_set_event;
@@ -5934,6 +6043,9 @@ static inline uint32_t dmub_rb_num_free(struct dmub_rb *rb)
else
data_count = rb->capacity - (rb->rptr - rb->wrpt);
+ /* +1 because 1 entry is always unusable */
+ data_count += DMUB_RB_CMD_SIZE;
+
return (rb->capacity - data_count) / DMUB_RB_CMD_SIZE;
}
@@ -5953,6 +6065,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
else
data_count = rb->capacity - (rb->rptr - rb->wrpt);
+ /* -1 because 1 entry is always unusable */
return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE));
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index e67f7c4784eb..2575dbc448f7 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -66,24 +66,20 @@ void dmub_dcn401_reset(struct dmub_srv *dmub)
const uint32_t timeout_us = 1 * 1000 * 1000; //1s
const uint32_t poll_delay_us = 1; //1us
uint32_t i = 0;
- uint32_t in_reset, scratch, pwait_mode;
+ uint32_t enabled, in_reset, scratch, pwait_mode;
- REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
+ REG_GET(DMCUB_CNTL,
+ DMCUB_ENABLE, &enabled);
+ REG_GET(DMCUB_CNTL2,
+ DMCUB_SOFT_RESET, &in_reset);
- if (in_reset == 0) {
+ if (enabled && in_reset == 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
- for (i = 0; i < timeout_us; i++) {
- if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
- break;
-
- udelay(poll_delay_us);
- }
-
for (; i < timeout_us; i++) {
scratch = dmub->hw_funcs.get_gpint_response(dmub);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
@@ -517,28 +513,69 @@ void dmub_dcn401_send_reg_inbox0_cmd_msg(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd)
{
uint32_t *dwords = (uint32_t *)cmd;
-
+ int32_t payload_size_bytes = cmd->cmd_common.header.payload_bytes;
+ uint32_t msg_index;
static_assert(sizeof(*cmd) == 64, "DMUB command size mismatch");
- REG_WRITE(DMCUB_REG_INBOX0_MSG0, dwords[0]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG1, dwords[1]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG2, dwords[2]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG3, dwords[3]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG4, dwords[4]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG5, dwords[5]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG6, dwords[6]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG7, dwords[7]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG8, dwords[8]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG9, dwords[9]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG10, dwords[10]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG11, dwords[11]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG12, dwords[12]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG13, dwords[13]);
- REG_WRITE(DMCUB_REG_INBOX0_MSG14, dwords[14]);
+ /* read remaining data based on payload size */
+ for (msg_index = 0; msg_index < 15; msg_index++) {
+ if (payload_size_bytes <= msg_index * 4) {
+ break;
+ }
+
+ switch (msg_index) {
+ case 0:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG0, dwords[msg_index + 1]);
+ break;
+ case 1:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG1, dwords[msg_index + 1]);
+ break;
+ case 2:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG2, dwords[msg_index + 1]);
+ break;
+ case 3:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG3, dwords[msg_index + 1]);
+ break;
+ case 4:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG4, dwords[msg_index + 1]);
+ break;
+ case 5:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG5, dwords[msg_index + 1]);
+ break;
+ case 6:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG6, dwords[msg_index + 1]);
+ break;
+ case 7:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG7, dwords[msg_index + 1]);
+ break;
+ case 8:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG8, dwords[msg_index + 1]);
+ break;
+ case 9:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG9, dwords[msg_index + 1]);
+ break;
+ case 10:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG10, dwords[msg_index + 1]);
+ break;
+ case 11:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG11, dwords[msg_index + 1]);
+ break;
+ case 12:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG12, dwords[msg_index + 1]);
+ break;
+ case 13:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG13, dwords[msg_index + 1]);
+ break;
+ case 14:
+ REG_WRITE(DMCUB_REG_INBOX0_MSG14, dwords[msg_index + 1]);
+ break;
+ }
+ }
+
/* writing to INBOX RDY register will trigger DMUB REG INBOX0 RDY
* interrupt.
*/
- REG_WRITE(DMCUB_REG_INBOX0_RDY, dwords[15]);
+ REG_WRITE(DMCUB_REG_INBOX0_RDY, dwords[0]);
}
uint32_t dmub_dcn401_read_reg_inbox0_rsp_int_status(struct dmub_srv *dmub)
@@ -556,30 +593,39 @@ void dmub_dcn401_read_reg_inbox0_cmd_rsp(struct dmub_srv *dmub,
static_assert(sizeof(*cmd) == 64, "DMUB command size mismatch");
- dwords[0] = REG_READ(DMCUB_REG_INBOX0_MSG0);
- dwords[1] = REG_READ(DMCUB_REG_INBOX0_MSG1);
- dwords[2] = REG_READ(DMCUB_REG_INBOX0_MSG2);
- dwords[3] = REG_READ(DMCUB_REG_INBOX0_MSG3);
- dwords[4] = REG_READ(DMCUB_REG_INBOX0_MSG4);
- dwords[5] = REG_READ(DMCUB_REG_INBOX0_MSG5);
- dwords[6] = REG_READ(DMCUB_REG_INBOX0_MSG6);
- dwords[7] = REG_READ(DMCUB_REG_INBOX0_MSG7);
- dwords[8] = REG_READ(DMCUB_REG_INBOX0_MSG8);
- dwords[9] = REG_READ(DMCUB_REG_INBOX0_MSG9);
- dwords[10] = REG_READ(DMCUB_REG_INBOX0_MSG10);
- dwords[11] = REG_READ(DMCUB_REG_INBOX0_MSG11);
- dwords[12] = REG_READ(DMCUB_REG_INBOX0_MSG12);
- dwords[13] = REG_READ(DMCUB_REG_INBOX0_MSG13);
- dwords[14] = REG_READ(DMCUB_REG_INBOX0_MSG14);
- dwords[15] = REG_READ(DMCUB_REG_INBOX0_RSP);
+ dwords[0] = REG_READ(DMCUB_REG_INBOX0_RSP);
+ dwords[1] = REG_READ(DMCUB_REG_INBOX0_MSG0);
+ dwords[2] = REG_READ(DMCUB_REG_INBOX0_MSG1);
+ dwords[3] = REG_READ(DMCUB_REG_INBOX0_MSG2);
+ dwords[4] = REG_READ(DMCUB_REG_INBOX0_MSG3);
+ dwords[5] = REG_READ(DMCUB_REG_INBOX0_MSG4);
+ dwords[6] = REG_READ(DMCUB_REG_INBOX0_MSG5);
+ dwords[7] = REG_READ(DMCUB_REG_INBOX0_MSG6);
+ dwords[8] = REG_READ(DMCUB_REG_INBOX0_MSG7);
+ dwords[9] = REG_READ(DMCUB_REG_INBOX0_MSG8);
+ dwords[10] = REG_READ(DMCUB_REG_INBOX0_MSG9);
+ dwords[11] = REG_READ(DMCUB_REG_INBOX0_MSG10);
+ dwords[12] = REG_READ(DMCUB_REG_INBOX0_MSG11);
+ dwords[13] = REG_READ(DMCUB_REG_INBOX0_MSG12);
+ dwords[14] = REG_READ(DMCUB_REG_INBOX0_MSG13);
+ dwords[15] = REG_READ(DMCUB_REG_INBOX0_MSG14);
}
void dmub_dcn401_write_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK, 1);
+}
+
+void dmub_dcn401_clear_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub)
+{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK, 0);
}
+void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable)
+{
+ REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN, enable ? 1:0);
+}
+
void dmub_dcn401_write_reg_outbox0_rdy_int_ack(struct dmub_srv *dmub)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_ACK, 1);
@@ -604,11 +650,6 @@ uint32_t dmub_dcn401_read_reg_outbox0_rsp_int_status(struct dmub_srv *dmub)
return status;
}
-void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable)
-{
- REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN, enable ? 1:0);
-}
-
void dmub_dcn401_enable_reg_outbox0_rdy_int(struct dmub_srv *dmub, bool enable)
{
REG_UPDATE(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN, enable ? 1:0);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
index c35be52676f6..88c3a44d67d9 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
@@ -277,11 +277,13 @@ uint32_t dmub_dcn401_read_reg_inbox0_rsp_int_status(struct dmub_srv *dmub);
void dmub_dcn401_read_reg_inbox0_cmd_rsp(struct dmub_srv *dmub,
union dmub_rb_cmd *cmd);
void dmub_dcn401_write_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub);
+void dmub_dcn401_clear_reg_inbox0_rsp_int_ack(struct dmub_srv *dmub);
+void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable);
+
void dmub_dcn401_write_reg_outbox0_rdy_int_ack(struct dmub_srv *dmub);
void dmub_dcn401_read_reg_outbox0_msg(struct dmub_srv *dmub, uint32_t *msg);
void dmub_dcn401_write_reg_outbox0_rsp(struct dmub_srv *dmub, uint32_t *msg);
uint32_t dmub_dcn401_read_reg_outbox0_rsp_int_status(struct dmub_srv *dmub);
-void dmub_dcn401_enable_reg_inbox0_rsp_int(struct dmub_srv *dmub, bool enable);
void dmub_dcn401_enable_reg_outbox0_rdy_int(struct dmub_srv *dmub, bool enable);
uint32_t dmub_dcn401_read_reg_outbox0_rdy_int_status(struct dmub_srv *dmub);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index ae8133816b43..acca7943a8c8 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -157,6 +157,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
{
struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs;
+ /* default to specifying now inbox type */
+ enum dmub_inbox_cmd_interface_type default_inbox_type = DMUB_CMD_INTERFACE_DEFAULT;
+
switch (asic) {
case DMUB_ASIC_DCN20:
case DMUB_ASIC_DCN21:
@@ -395,10 +398,15 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_current_time = dmub_dcn401_get_current_time;
funcs->get_diagnostic_data = dmub_dcn401_get_diagnostic_data;
+
funcs->send_reg_inbox0_cmd_msg = dmub_dcn401_send_reg_inbox0_cmd_msg;
funcs->read_reg_inbox0_rsp_int_status = dmub_dcn401_read_reg_inbox0_rsp_int_status;
funcs->read_reg_inbox0_cmd_rsp = dmub_dcn401_read_reg_inbox0_cmd_rsp;
funcs->write_reg_inbox0_rsp_int_ack = dmub_dcn401_write_reg_inbox0_rsp_int_ack;
+ funcs->clear_reg_inbox0_rsp_int_ack = dmub_dcn401_clear_reg_inbox0_rsp_int_ack;
+ funcs->enable_reg_inbox0_rsp_int = dmub_dcn401_enable_reg_inbox0_rsp_int;
+ default_inbox_type = DMUB_CMD_INTERFACE_FB; // still default to FB for now
+
funcs->write_reg_outbox0_rdy_int_ack = dmub_dcn401_write_reg_outbox0_rdy_int_ack;
funcs->read_reg_outbox0_msg = dmub_dcn401_read_reg_outbox0_msg;
funcs->write_reg_outbox0_rsp = dmub_dcn401_write_reg_outbox0_rsp;
@@ -411,6 +419,20 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
return false;
}
+ /* set default inbox type if not overriden */
+ if (dmub->inbox_type == DMUB_CMD_INTERFACE_DEFAULT) {
+ if (default_inbox_type != DMUB_CMD_INTERFACE_DEFAULT) {
+ /* use default inbox type as specified by DCN rev */
+ dmub->inbox_type = default_inbox_type;
+ } else if (funcs->send_reg_inbox0_cmd_msg) {
+ /* prefer reg as default inbox type if present */
+ dmub->inbox_type = DMUB_CMD_INTERFACE_REG;
+ } else {
+ /* use fb as fallback */
+ dmub->inbox_type = DMUB_CMD_INTERFACE_FB;
+ }
+ }
+
return true;
}
@@ -426,6 +448,7 @@ enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
dmub->asic = params->asic;
dmub->fw_version = params->fw_version;
dmub->is_virtual = params->is_virtual;
+ dmub->inbox_type = params->inbox_type;
/* Setup asic dependent hardware funcs. */
if (!dmub_srv_hw_setup(dmub, params->asic)) {
@@ -695,7 +718,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
inbox1.base = cw4.region.base;
inbox1.top = cw4.region.base + DMUB_RB_SIZE;
outbox1.base = inbox1.top;
- outbox1.top = cw4.region.top;
+ outbox1.top = inbox1.top + DMUB_RB_SIZE;
cw5.offset.quad_part = tracebuff_fb->gpu_addr;
cw5.region.base = DMUB_CW5_BASE;
@@ -737,7 +760,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
rb_params.ctx = dmub;
rb_params.base_address = mail_fb->cpu_addr;
rb_params.capacity = DMUB_RB_SIZE;
- dmub_rb_init(&dmub->inbox1_rb, &rb_params);
+ dmub_rb_init(&dmub->inbox1.rb, &rb_params);
// Initialize outbox1 ring buffer
rb_params.ctx = dmub;
@@ -768,27 +791,6 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
-{
- if (!dmub->sw_init)
- return DMUB_STATUS_INVALID;
-
- if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
- uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
- uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
-
- if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
- return DMUB_STATUS_HW_FAILURE;
- } else {
- dmub->inbox1_rb.rptr = rptr;
- dmub->inbox1_rb.wrpt = wptr;
- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
- }
- }
-
- return DMUB_STATUS_OK;
-}
-
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
{
if (!dmub->sw_init)
@@ -799,8 +801,13 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
/* mailboxes have been reset in hw, so reset the sw state as well */
dmub->inbox1_last_wptr = 0;
- dmub->inbox1_rb.wrpt = 0;
- dmub->inbox1_rb.rptr = 0;
+ dmub->inbox1.rb.wrpt = 0;
+ dmub->inbox1.rb.rptr = 0;
+ dmub->inbox1.num_reported = 0;
+ dmub->inbox1.num_submitted = 0;
+ dmub->reg_inbox0.num_reported = 0;
+ dmub->reg_inbox0.num_submitted = 0;
+ dmub->reg_inbox0.is_pending = 0;
dmub->outbox0_rb.wrpt = 0;
dmub->outbox0_rb.rptr = 0;
dmub->outbox1_rb.wrpt = 0;
@@ -811,7 +818,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
+enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub,
const union dmub_rb_cmd *cmd)
{
if (!dmub->hw_init)
@@ -820,18 +827,20 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
if (dmub->power_state != DMUB_POWER_STATE_D0)
return DMUB_STATUS_POWER_STATE_D3;
- if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
- dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
+ if (dmub->inbox1.rb.rptr > dmub->inbox1.rb.capacity ||
+ dmub->inbox1.rb.wrpt > dmub->inbox1.rb.capacity) {
return DMUB_STATUS_HW_FAILURE;
}
- if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
+ if (dmub_rb_push_front(&dmub->inbox1.rb, cmd)) {
+ dmub->inbox1.num_submitted++;
return DMUB_STATUS_OK;
+ }
return DMUB_STATUS_QUEUE_FULL;
}
-enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
+enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub)
{
struct dmub_rb flush_rb;
@@ -846,13 +855,13 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
* been flushed to framebuffer memory. Otherwise DMCUB might
* read back stale, fully invalid or partially invalid data.
*/
- flush_rb = dmub->inbox1_rb;
+ flush_rb = dmub->inbox1.rb;
flush_rb.rptr = dmub->inbox1_last_wptr;
dmub_rb_flush_pending(&flush_rb);
- dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
+ dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1.rb.wrpt);
- dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
+ dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
return DMUB_STATUS_OK;
}
@@ -910,26 +919,84 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
return DMUB_STATUS_TIMEOUT;
}
+static void dmub_srv_update_reg_inbox0_status(struct dmub_srv *dmub)
+{
+ if (dmub->reg_inbox0.is_pending) {
+ dmub->reg_inbox0.is_pending = dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
+ !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
+
+ if (!dmub->reg_inbox0.is_pending) {
+ /* ack the rsp interrupt */
+ if (dmub->hw_funcs.write_reg_inbox0_rsp_int_ack)
+ dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub);
+
+ /* only update the reported count if commands aren't being batched */
+ if (!dmub->reg_inbox0.is_pending && !dmub->reg_inbox0.is_multi_pending) {
+ dmub->reg_inbox0.num_reported = dmub->reg_inbox0.num_submitted;
+ }
+ }
+ }
+}
+
+enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub,
+ uint32_t timeout_us)
+{
+ uint32_t i;
+ const uint32_t polling_interval_us = 1;
+ struct dmub_srv_inbox scratch_reg_inbox0 = dmub->reg_inbox0;
+ struct dmub_srv_inbox scratch_inbox1 = dmub->inbox1;
+ const volatile struct dmub_srv_inbox *reg_inbox0 = &dmub->reg_inbox0;
+ const volatile struct dmub_srv_inbox *inbox1 = &dmub->inbox1;
+
+ if (!dmub->hw_init ||
+ !dmub->hw_funcs.get_inbox1_wptr)
+ return DMUB_STATUS_INVALID;
+
+ for (i = 0; i <= timeout_us; i += polling_interval_us) {
+ scratch_inbox1.rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
+ scratch_inbox1.rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+
+ scratch_reg_inbox0.is_pending = scratch_reg_inbox0.is_pending &&
+ dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
+ !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
+
+ if (scratch_inbox1.rb.rptr > dmub->inbox1.rb.capacity)
+ return DMUB_STATUS_HW_FAILURE;
+
+ /* check current HW state first, but use command submission vs reported as a fallback */
+ if ((dmub_rb_empty(&scratch_inbox1.rb) ||
+ inbox1->num_reported >= scratch_inbox1.num_submitted) &&
+ (!scratch_reg_inbox0.is_pending ||
+ reg_inbox0->num_reported >= scratch_reg_inbox0.num_submitted))
+ return DMUB_STATUS_OK;
+
+ udelay(polling_interval_us);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
uint32_t timeout_us)
{
- uint32_t i, rptr;
+ enum dmub_status status;
+ uint32_t i;
+ const uint32_t polling_interval_us = 1;
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
- for (i = 0; i <= timeout_us; ++i) {
- rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ for (i = 0; i < timeout_us; i += polling_interval_us) {
+ status = dmub_srv_update_inbox_status(dmub);
- if (rptr > dmub->inbox1_rb.capacity)
- return DMUB_STATUS_HW_FAILURE;
+ if (status != DMUB_STATUS_OK)
+ return status;
- dmub->inbox1_rb.rptr = rptr;
-
- if (dmub_rb_empty(&dmub->inbox1_rb))
+ /* check for idle */
+ if (dmub_rb_empty(&dmub->inbox1.rb) && !dmub->reg_inbox0.is_pending)
return DMUB_STATUS_OK;
- udelay(1);
+ udelay(polling_interval_us);
}
return DMUB_STATUS_TIMEOUT;
@@ -1040,35 +1107,6 @@ enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
-enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd)
-{
- enum dmub_status status = DMUB_STATUS_OK;
-
- // Queue command
- status = dmub_srv_cmd_queue(dmub, cmd);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Execute command
- status = dmub_srv_cmd_execute(dmub);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Wait for DMUB to process command
- status = dmub_srv_wait_for_idle(dmub, 100000);
-
- if (status != DMUB_STATUS_OK)
- return status;
-
- // Copy data back from ring buffer into command
- dmub_rb_get_return_data(&dmub->inbox1_rb, cmd);
-
- return status;
-}
-
static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb,
void *entry)
{
@@ -1160,47 +1198,162 @@ void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_
}
}
+void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state)
+{
+ if (!dmub || !dmub->hw_init)
+ return;
+
+ dmub->power_state = dmub_srv_power_state;
+}
-enum dmub_status dmub_srv_send_reg_inbox0_cmd(
- struct dmub_srv *dmub,
- union dmub_rb_cmd *cmd,
- bool with_reply, uint32_t timeout_us)
+enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd)
{
- uint32_t rsp_ready = 0;
- uint32_t i;
+ uint32_t num_pending = 0;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+ if (dmub->power_state != DMUB_POWER_STATE_D0)
+ return DMUB_STATUS_POWER_STATE_D3;
+
+ if (!dmub->hw_funcs.send_reg_inbox0_cmd_msg ||
+ !dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->reg_inbox0.num_submitted >= dmub->reg_inbox0.num_reported)
+ num_pending = dmub->reg_inbox0.num_submitted - dmub->reg_inbox0.num_reported;
+ else
+ /* num_submitted wrapped */
+ num_pending = DMUB_REG_INBOX0_RB_MAX_ENTRY -
+ (dmub->reg_inbox0.num_reported - dmub->reg_inbox0.num_submitted);
+
+ if (num_pending >= DMUB_REG_INBOX0_RB_MAX_ENTRY)
+ return DMUB_STATUS_QUEUE_FULL;
+
+ /* clear last rsp ack and send message */
+ dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack(dmub);
dmub->hw_funcs.send_reg_inbox0_cmd_msg(dmub, cmd);
- for (i = 0; i < timeout_us; i++) {
- rsp_ready = dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
- if (rsp_ready)
- break;
- udelay(1);
+ dmub->reg_inbox0.num_submitted++;
+ dmub->reg_inbox0.is_pending = true;
+ dmub->reg_inbox0.is_multi_pending = cmd->cmd_common.header.multi_cmd_pending;
+
+ return DMUB_STATUS_OK;
+}
+
+void dmub_srv_cmd_get_response(struct dmub_srv *dmub,
+ union dmub_rb_cmd *cmd_rsp)
+{
+ if (dmub) {
+ if (dmub->inbox_type == DMUB_CMD_INTERFACE_REG &&
+ dmub->hw_funcs.read_reg_inbox0_cmd_rsp) {
+ dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd_rsp);
+ } else {
+ dmub_rb_get_return_data(&dmub->inbox1.rb, cmd_rsp);
+ }
}
- if (rsp_ready == 0)
- return DMUB_STATUS_TIMEOUT;
+}
+
+static enum dmub_status dmub_srv_sync_reg_inbox0(struct dmub_srv *dmub)
+{
+ if (!dmub || !dmub->sw_init)
+ return DMUB_STATUS_INVALID;
- if (with_reply)
- dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd);
+ dmub->reg_inbox0.is_pending = 0;
+ dmub->reg_inbox0.is_multi_pending = 0;
- dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub);
+ return DMUB_STATUS_OK;
+}
- /* wait for rsp int status is cleared to initial state before exit */
- for (; i <= timeout_us; i++) {
- rsp_ready = dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
- if (rsp_ready == 0)
- break;
- udelay(1);
+static enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
+{
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
+ uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
+
+ if (rptr > dmub->inbox1.rb.capacity || wptr > dmub->inbox1.rb.capacity) {
+ return DMUB_STATUS_HW_FAILURE;
+ } else {
+ dmub->inbox1.rb.rptr = rptr;
+ dmub->inbox1.rb.wrpt = wptr;
+ dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
+ }
}
- ASSERT(rsp_ready == 0);
return DMUB_STATUS_OK;
}
-void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state)
+enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub)
{
- if (!dmub || !dmub->hw_init)
- return;
+ enum dmub_status status;
- dmub->power_state = dmub_srv_power_state;
+ status = dmub_srv_sync_reg_inbox0(dmub);
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ status = dmub_srv_sync_inbox1(dmub);
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
+ uint32_t timeout_us,
+ uint32_t num_free_required)
+{
+ enum dmub_status status;
+ uint32_t i;
+ const uint32_t polling_interval_us = 1;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ for (i = 0; i < timeout_us; i += polling_interval_us) {
+ status = dmub_srv_update_inbox_status(dmub);
+
+ if (status != DMUB_STATUS_OK)
+ return status;
+
+ /* check for space in inbox1 */
+ if (dmub_rb_num_free(&dmub->inbox1.rb) >= num_free_required)
+ return DMUB_STATUS_OK;
+
+ udelay(polling_interval_us);
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub)
+{
+ uint32_t rptr;
+
+ if (!dmub->hw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->power_state != DMUB_POWER_STATE_D0)
+ return DMUB_STATUS_POWER_STATE_D3;
+
+ /* update inbox1 state */
+ rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+
+ if (rptr > dmub->inbox1.rb.capacity)
+ return DMUB_STATUS_HW_FAILURE;
+
+ if (dmub->inbox1.rb.rptr > rptr) {
+ /* rb wrapped */
+ dmub->inbox1.num_reported += (rptr + dmub->inbox1.rb.capacity - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
+ } else {
+ dmub->inbox1.num_reported += (rptr - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
+ }
+ dmub->inbox1.rb.rptr = rptr;
+
+ /* update reg_inbox0 */
+ dmub_srv_update_reg_inbox0_status(dmub);
+
+ return DMUB_STATUS_OK;
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
index cce887cefc01..567c5b1aeb7a 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
@@ -95,23 +95,6 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
case DMUB_OUT_CMD__DPIA_NOTIFICATION:
notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION;
notify->link_index = cmd.dpia_notification.payload.header.instance;
-
- if (cmd.dpia_notification.payload.header.type == DPIA_NOTIFY__BW_ALLOCATION) {
-
- notify->dpia_notification.payload.data.dpia_bw_alloc.estimated_bw =
- cmd.dpia_notification.payload.data.dpia_bw_alloc.estimated_bw;
- notify->dpia_notification.payload.data.dpia_bw_alloc.allocated_bw =
- cmd.dpia_notification.payload.data.dpia_bw_alloc.allocated_bw;
-
- if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_failed)
- notify->result = DPIA_BW_REQ_FAILED;
- else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_succeeded)
- notify->result = DPIA_BW_REQ_SUCCESS;
- else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.est_bw_changed)
- notify->result = DPIA_EST_BW_CHANGED;
- else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_alloc_cap_changed)
- notify->result = DPIA_BW_ALLOC_CAPS_CHANGED;
- }
break;
case DMUB_OUT_CMD__HPD_SENSE_NOTIFY:
notify->type = DMUB_NOTIFICATION_HPD_SENSE_NOTIFY;
@@ -119,6 +102,10 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
&cmd.hpd_sense_notify.data,
sizeof(cmd.hpd_sense_notify.data));
break;
+ case DMUB_OUT_CMD__FUSED_IO:
+ notify->type = DMUB_NOTIFICATION_FUSED_IO;
+ dmub_memcpy(&notify->fused_request, &cmd.fused_io.request, sizeof(cmd.fused_io.request));
+ break;
default:
notify->type = DMUB_NOTIFICATION_NO_DATA;
break;
diff --git a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
index 7e3240e73c1f..63813009a3a6 100644
--- a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
+++ b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
@@ -86,6 +86,9 @@ enum dc_irq_source dal_irq_get_source(
enum dc_irq_source dal_irq_get_rx_source(
const struct gpio *irq);
+enum dc_irq_source dal_irq_get_read_request(
+ const struct gpio *irq);
+
enum gpio_result dal_irq_setup_hpd_filter(
struct gpio *irq,
struct gpio_hpd_config *config);
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index 813463ffe15c..cc467031651d 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -424,7 +424,7 @@ struct integrated_info {
/*
* DFS-bypass flag
*/
-/* Copy of SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS from atombios.h */
+/* Copy of SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS from atombios.h */
enum {
DFS_BYPASS_ENABLE = 0x10
};
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 1867aac57cf2..da74ed66c8f9 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -89,6 +89,8 @@ struct link_training_settings {
bool enhanced_framing;
enum lttpr_mode lttpr_mode;
+ bool lttpr_early_tps2;
+
/* disallow different lanes to have different lane settings */
bool disallow_per_lane_settings;
/* dpcd lane settings will always use the same hw lane settings
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 55c7d873175f..a37634942b07 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -386,6 +386,7 @@ enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp);
/* hdcp version helpers */
static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 1d41dd58f6bc..bb8ae80b37f8 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -452,21 +452,12 @@ out:
return status;
}
-static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
+static enum mod_hdcp_status locality_check_sw(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
- if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
- event_ctx->unexpected_event = 1;
- goto out;
- }
-
- if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init,
- &input->lc_init_prepare, &status,
- hdcp, "lc_init_prepare"))
- goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init,
&input->lc_init_write, &status,
hdcp, "lc_init_write"))
@@ -482,6 +473,48 @@ static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
&input->l_prime_read, &status,
hdcp, "l_prime_read"))
goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status locality_check_fw(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_poll_read_lc_fw,
+ &input->l_prime_read, &status,
+ hdcp, "l_prime_read"))
+ goto out;
+
+out:
+ return status;
+}
+
+static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp2 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c
+ && hdcp->config.ddc.funcs.atomic_write_poll_read_aux
+ && !hdcp->connection.link.adjust.hdcp2.force_sw_locality_check;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init,
+ &input->lc_init_prepare, &status,
+ hdcp, "lc_init_prepare"))
+ goto out;
+
+ status = (use_fw ? locality_check_fw : locality_check_sw)(hdcp, event_ctx, input);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime,
&input->l_prime_validation, &status,
hdcp, "l_prime_validation"))
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
index c5f6c11de7e5..89ffb89e1932 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -184,17 +184,28 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK);
break;
- case H2_A2_LOCALITY_CHECK:
+ case H2_A2_LOCALITY_CHECK: {
+ const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c
+ && !adjust->hdcp2.force_sw_locality_check;
+
+ /*
+ * 1A-05: consider disconnection after LC init a failure
+ * 1A-13-1: consider invalid l' a failure
+ * 1A-13-2: consider l' timeout a failure
+ */
if (hdcp->state.stay_count > 10 ||
input->lc_init_prepare != PASS ||
- input->lc_init_write != PASS ||
- input->l_prime_available_poll != PASS ||
- input->l_prime_read != PASS) {
- /*
- * 1A-05: consider disconnection after LC init a failure
- * 1A-13-1: consider invalid l' a failure
- * 1A-13-2: consider l' timeout a failure
- */
+ (!use_fw && input->lc_init_write != PASS) ||
+ (!use_fw && input->l_prime_available_poll != PASS)) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->l_prime_read != PASS) {
+ if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) {
+ adjust->hdcp2.force_sw_locality_check = true;
+ callback_in_ms(0, output);
+ break;
+ }
+
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->l_prime_validation != PASS) {
@@ -205,6 +216,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
break;
+ }
case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
if (input->eks_prepare != PASS ||
input->eks_write != PASS) {
@@ -498,14 +510,25 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK);
break;
- case D2_A2_LOCALITY_CHECK:
+ case D2_A2_LOCALITY_CHECK: {
+ const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_aux
+ && !adjust->hdcp2.force_sw_locality_check;
+
if (hdcp->state.stay_count > 10 ||
input->lc_init_prepare != PASS ||
- input->lc_init_write != PASS ||
- input->l_prime_read != PASS) {
+ (!use_fw && input->lc_init_write != PASS)) {
/* 1A-12: consider invalid l' a failure */
fail_and_restart_in_ms(0, &status, output);
break;
+ } else if (input->l_prime_read != PASS) {
+ if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) {
+ adjust->hdcp2.force_sw_locality_check = true;
+ callback_in_ms(0, output);
+ break;
+ }
+
+ fail_and_restart_in_ms(0, &status, output);
+ break;
} else if (input->l_prime_validation != PASS) {
callback_in_ms(0, output);
increment_stay_counter(hdcp);
@@ -514,6 +537,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
break;
+ }
case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
if (input->eks_prepare != PASS ||
input->eks_write != PASS) {
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index 6e064e6ae949..2e6408579194 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -688,3 +688,76 @@ enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_INVALID_OPERATION;
}
+
+static bool write_stall_read_lc_fw_aux(struct mod_hdcp *hdcp)
+{
+ struct mod_hdcp_message_hdcp2 *hdcp2 = &hdcp->auth.msg.hdcp2;
+
+ struct mod_hdcp_atomic_op_aux write = {
+ hdcp_dpcd_addrs[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT],
+ hdcp2->lc_init + 1,
+ sizeof(hdcp2->lc_init) - 1,
+ };
+ struct mod_hdcp_atomic_op_aux stall = { 0, NULL, 0, };
+ struct mod_hdcp_atomic_op_aux read = {
+ hdcp_dpcd_addrs[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME],
+ hdcp2->lc_l_prime + 1,
+ sizeof(hdcp2->lc_l_prime) - 1,
+ };
+
+ hdcp2->lc_l_prime[0] = HDCP_2_2_LC_SEND_LPRIME;
+
+ return hdcp->config.ddc.funcs.atomic_write_poll_read_aux(
+ hdcp->config.ddc.handle,
+ &write,
+ &stall,
+ &read,
+ 16 * 1000,
+ 0
+ );
+}
+
+static bool write_poll_read_lc_fw_i2c(struct mod_hdcp *hdcp)
+{
+ struct mod_hdcp_message_hdcp2 *hdcp2 = &hdcp->auth.msg.hdcp2;
+ uint8_t expected_rxstatus[2] = { sizeof(hdcp2->lc_l_prime) };
+
+ hdcp->buf[0] = hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT];
+ memmove(&hdcp->buf[1], hdcp2->lc_init, sizeof(hdcp2->lc_init));
+
+ struct mod_hdcp_atomic_op_i2c write = {
+ HDCP_I2C_ADDR,
+ 0,
+ hdcp->buf,
+ sizeof(hdcp2->lc_init) + 1,
+ };
+ struct mod_hdcp_atomic_op_i2c poll = {
+ HDCP_I2C_ADDR,
+ hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_READ_RXSTATUS],
+ expected_rxstatus,
+ sizeof(expected_rxstatus),
+ };
+ struct mod_hdcp_atomic_op_i2c read = {
+ HDCP_I2C_ADDR,
+ hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME],
+ hdcp2->lc_l_prime,
+ sizeof(hdcp2->lc_l_prime),
+ };
+
+ return hdcp->config.ddc.funcs.atomic_write_poll_read_i2c(
+ hdcp->config.ddc.handle,
+ &write,
+ &poll,
+ &read,
+ 20 * 1000,
+ 6
+ );
+}
+
+enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp)
+{
+ const bool success = (is_dp_hdcp(hdcp) ? write_stall_read_lc_fw_aux : write_poll_read_lc_fw_i2c)(hdcp);
+
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 8c137d7c032e..e58e7b93810b 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -368,6 +368,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
struct mod_hdcp_display *display = get_first_active_display(hdcp);
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index a4d344a4db9e..c42468bb70ac 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -133,9 +133,22 @@ enum mod_hdcp_display_disable_option {
MOD_HDCP_DISPLAY_DISABLE_ENCRYPTION,
};
+struct mod_hdcp_atomic_op_i2c {
+ uint8_t address;
+ uint8_t offset;
+ uint8_t *data;
+ uint32_t size;
+};
+
+struct mod_hdcp_atomic_op_aux {
+ uint32_t address;
+ uint8_t *data;
+ uint32_t size;
+};
+
struct mod_hdcp_ddc {
void *handle;
- struct {
+ struct mod_hdcp_ddc_funcs {
bool (*read_i2c)(void *handle,
uint32_t address,
uint8_t offset,
@@ -153,6 +166,22 @@ struct mod_hdcp_ddc {
uint32_t address,
const uint8_t *data,
uint32_t size);
+ bool (*atomic_write_poll_read_i2c)(
+ void *handle,
+ const struct mod_hdcp_atomic_op_i2c *write,
+ const struct mod_hdcp_atomic_op_i2c *poll,
+ struct mod_hdcp_atomic_op_i2c *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+ );
+ bool (*atomic_write_poll_read_aux)(
+ void *handle,
+ const struct mod_hdcp_atomic_op_aux *write,
+ const struct mod_hdcp_atomic_op_aux *poll,
+ struct mod_hdcp_atomic_op_aux *read,
+ uint32_t poll_timeout_us,
+ uint8_t poll_mask_msb
+ );
} funcs;
};
@@ -185,7 +214,8 @@ struct mod_hdcp_link_adjustment_hdcp2 {
uint8_t force_type : 2;
uint8_t force_no_stored_km : 1;
uint8_t increase_h_prime_timeout: 1;
- uint8_t reserved : 3;
+ uint8_t force_sw_locality_check : 1;
+ uint8_t reserved : 2;
};
struct mod_hdcp_link_adjustment {
@@ -272,6 +302,10 @@ struct mod_hdcp_display_query {
struct mod_hdcp_config {
struct mod_hdcp_psp psp;
struct mod_hdcp_ddc ddc;
+ struct {
+ uint8_t lc_enable_sw_fallback : 1;
+ uint8_t reserved : 7;
+ } debug;
uint8_t index;
};
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 4c95b885d1d0..11374a2cbab8 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -351,9 +351,10 @@ enum DC_DEBUG_MASK {
DC_DISABLE_HDMI_CEC = 0x10000,
/**
- * @DC_DISABLE_SUBVP: If set, disable DCN Sub-Viewport feature in amdgpu driver.
+ * @DC_DISABLE_SUBVP_FAMS: If set, disable DCN Sub-Viewport & Firmware Assisted
+ * Memory Clock Switching (FAMS) feature in amdgpu driver.
*/
- DC_DISABLE_SUBVP = 0x20000,
+ DC_DISABLE_SUBVP_FAMS = 0x20000,
/**
* @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: If set, disable support for custom brightness curves
*/
@@ -366,10 +367,15 @@ enum DC_DEBUG_MASK {
DC_HDCP_LC_FORCE_FW_ENABLE = 0x80000,
/**
- * @DC_HDCP_LC_ENABLE_SW_FALLBACK If set, upon HDCP Locality Check FW
+ * @DC_HDCP_LC_ENABLE_SW_FALLBACK: If set, upon HDCP Locality Check FW
* path failure, retry using legacy SW path.
*/
DC_HDCP_LC_ENABLE_SW_FALLBACK = 0x100000,
+
+ /**
+ * @DC_SKIP_DETECTION_LT: If set, skip detection link training
+ */
+ DC_SKIP_DETECTION_LT = 0x200000,
};
enum amd_dpm_forced_level;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
index bd8085ec54ed..2d6a598a6c25 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
@@ -5242,6 +5242,8 @@
#define DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT 0x0000000c
#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE_MASK 0x00000003L
#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT 0x00000000
+#define DEGAMMA_CONTROL__ICON_DEGAMMA_MODE_MASK 0x00000300L
+#define DEGAMMA_CONTROL__ICON_DEGAMMA_MODE__SHIFT 0x00000008
#define DEGAMMA_CONTROL__OVL_DEGAMMA_MODE_MASK 0x00000030L
#define DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT 0x00000004
#define DENORM_CONTROL__DENORM_MODE_MASK 0x00000007L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h
index 15e5a65cf492..70ee6be94a9b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h
@@ -9776,6 +9776,14 @@
#define regDIG0_DIG_BE_CNTL_BASE_IDX 2
#define regDIG0_DIG_BE_EN_CNTL 0x20bd
#define regDIG0_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG0_HDCP_INT_CONTROL 0x20c0
+#define regDIG0_HDCP_INT_CONTROL_BASE_IDX 2
+#define regDIG0_HDCP_LINK0_STATUS 0x20c1
+#define regDIG0_HDCP_LINK0_STATUS_BASE_IDX 2
+#define regDIG0_HDCP_I2C_CONTROL_0 0x20c2
+#define regDIG0_HDCP_I2C_CONTROL_0_BASE_IDX 2
+#define regDIG0_HDCP_I2C_CONTROL_1 0x20c3
+#define regDIG0_HDCP_I2C_CONTROL_1_BASE_IDX 2
#define regDIG0_TMDS_CNTL 0x20e4
#define regDIG0_TMDS_CNTL_BASE_IDX 2
#define regDIG0_TMDS_CONTROL_CHAR 0x20e5
@@ -10081,6 +10089,12 @@
#define regDIG1_DIG_BE_CNTL_BASE_IDX 2
#define regDIG1_DIG_BE_EN_CNTL 0x21e1
#define regDIG1_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG1_HDCP_INT_CONTROL 0x21e4
+#define regDIG1_HDCP_INT_CONTROL_BASE_IDX 2
+#define regDIG1_HDCP_I2C_CONTROL_0 0x21e6
+#define regDIG1_HDCP_I2C_CONTROL_0_BASE_IDX 2
+#define regDIG1_HDCP_I2C_CONTROL_1 0x21e7
+#define regDIG1_HDCP_I2C_CONTROL_1_BASE_IDX 2
#define regDIG1_TMDS_CNTL 0x2208
#define regDIG1_TMDS_CNTL_BASE_IDX 2
#define regDIG1_TMDS_CONTROL_CHAR 0x2209
@@ -10386,6 +10400,12 @@
#define regDIG2_DIG_BE_CNTL_BASE_IDX 2
#define regDIG2_DIG_BE_EN_CNTL 0x2305
#define regDIG2_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG2_HDCP_INT_CONTROL 0x2308
+#define regDIG2_HDCP_INT_CONTROL_BASE_IDX 2
+#define regDIG2_HDCP_I2C_CONTROL_0 0x230a
+#define regDIG2_HDCP_I2C_CONTROL_0_BASE_IDX 2
+#define regDIG2_HDCP_I2C_CONTROL_1 0x230b
+#define regDIG2_HDCP_I2C_CONTROL_1_BASE_IDX 2
#define regDIG2_TMDS_CNTL 0x232c
#define regDIG2_TMDS_CNTL_BASE_IDX 2
#define regDIG2_TMDS_CONTROL_CHAR 0x232d
@@ -10691,6 +10711,12 @@
#define regDIG3_DIG_BE_CNTL_BASE_IDX 2
#define regDIG3_DIG_BE_EN_CNTL 0x2429
#define regDIG3_DIG_BE_EN_CNTL_BASE_IDX 2
+#define regDIG3_HDCP_INT_CONTROL 0x242c
+#define regDIG3_HDCP_INT_CONTROL_BASE_IDX 2
+#define regDIG3_HDCP_I2C_CONTROL_0 0x242e
+#define regDIG3_HDCP_I2C_CONTROL_0_BASE_IDX 2
+#define regDIG3_HDCP_I2C_CONTROL_1 0x242f
+#define regDIG3_HDCP_I2C_CONTROL_1_BASE_IDX 2
#define regDIG3_TMDS_CNTL 0x2450
#define regDIG3_TMDS_CNTL_BASE_IDX 2
#define regDIG3_TMDS_CONTROL_CHAR 0x2451
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h
index 5d9d5fea6e06..e3d841b2e9af 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h
@@ -2847,6 +2847,14 @@
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x1
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0x2
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0x3
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0x4
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x5
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0x6
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0x7
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0x8
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x9
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0xa
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0xb
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0xc
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0xd
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0xe
@@ -2871,6 +2879,14 @@
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000002L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000004L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000008L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00000010L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000020L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000040L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000080L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00000100L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000200L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000400L
+#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000800L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00001000L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00002000L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00004000L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
index c75aee25619e..6f44345277af 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h
@@ -1779,6 +1779,8 @@
#define mmRLC_TTOP_D 0x3105
#define mmRLC_CLEAR_STATE_RESTORE_BASE 0x30C8
#define mmRLC_PG_AO_CU_MASK 0x310B
+#define mmSPI_STATIC_THREAD_MGMT_1 0x2438
+#define mmSPI_STATIC_THREAD_MGMT_2 0x2439
#define mmSPI_STATIC_THREAD_MGMT_3 0x243A
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
index edc8a793a95d..4dd386b98748 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h
@@ -234,6 +234,26 @@
#define mmIH_RB_WPTR_ADDR_HI 0x0F84
#define mmIH_RB_WPTR_ADDR_LO 0x0F85
#define mmIH_STATUS 0x0F88
+
+#define mmDMA_GFX_RB_CNTL 0x3400
+#define mmDMA_GFX_RB_BASE 0x3401
+#define mmDMA_GFX_RB_RPTR 0x3402
+#define mmDMA_GFX_RB_WPTR 0x3403
+#define mmDMA_GFX_RB_RPTR_ADDR_HI 0x3407
+#define mmDMA_GFX_RB_RPTR_ADDR_LO 0x3408
+#define mmDMA_GFX_IB_CNTL 0x3409
+#define mmDMA_GFX_IB_RPTR 0x340a
+#define mmDMA_CNTL 0x340b
+#define mmDMA_STATUS_REG 0x340D
+#define mmDMA_TILING_CONFIG 0x342E
+#define mmDMA_SEM_INCOMPLETE_TIMER_CNTL 0x3411
+#define mmDMA_SEM_WAIT_FAIL_TIMER_CNTL 0x3412
+#define mmDMA_POWER_CNTL 0x342F
+#define mmDMA_CLK_CTRL 0x3430
+#define mmDMA_PG 0x3435
+#define mmDMA_PGFSM_CONFIG 0x3436
+#define mmDMA_PGFSM_WRITE 0x3437
+
#define mmSEM_MAILBOX 0x0F9B
#define mmSEM_MAILBOX_CLIENTCONFIG 0x0F9A
#define mmSEM_MAILBOX_CONTROL 0x0F9C
@@ -269,7 +289,4 @@
#define mmVCE_CONFIG 0x0F94
#define mmXDMA_MSTR_MEM_OVERFLOW_CNTL 0x03F8
-/* from the old sid.h */
-#define mmDMA_TILING_CONFIG 0x342E
-
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
index 1c540fe136cb..9f7fc2428b69 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h
@@ -823,6 +823,43 @@
#define LX3__RESERVED__SHIFT 0x00000000
#define RINGOSC_MASK__MASK_MASK 0x0000ffffL
#define RINGOSC_MASK__MASK__SHIFT 0x00000000
+
+#define DMA_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define DMA_CNTL__TRAP_ENABLE__SHIFT 0x00000000
+#define DMA_CNTL__SEM_INCOMPLETE_INT_ENABLE_MASK 0x00000002L
+#define DMA_CNTL__SEM_INCOMPLETE_INT_ENABLE__SHIFT 0x00000001
+#define DMA_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define DMA_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x00000002
+#define DMA_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define DMA_CNTL__DATA_SWAP_ENABLE__SHIFT 0x00000003
+#define DMA_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define DMA_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x00000004
+#define DMA_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define DMA_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x0000001C
+#define DMA_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define DMA_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x00000000
+#define DMA_GFX_RB_CNTL__RB_SIZE__SHIFT 0x00000001
+#define DMA_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define DMA_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x00000009
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0x0000000C
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0x0000000D
+#define DMA_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x00000010
+#define DMA_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define DMA_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x00000000
+#define DMA_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define DMA_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x00000004
+#define DMA_GFX_IB_CNTL__CMD_VMID_FORCE_MASK 0x80000000L
+#define DMA_GFX_IB_CNTL__CMD_VMID_FORCE__SHIFT 0x0000001F
+
+#define DMA_STATUS_REG__IDLE_MASK 0x00000001L
+#define DMA_STATUS_REG__IDLE__SHIFT 0x00000000
+#define DMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L
+#define DMA_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x00000008
+#define DMA_PG__PG_CNTL_ENABLE_MASK 0x00000001L
+#define DMA_PG__PG_CNTL_ENABLE__SHIFT 0x00000000
+
#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0_MASK 0x00000007L
#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT0__SHIFT 0x00000000
#define SEM_MAILBOX_CLIENTCONFIG__CP_CLIENT1_MASK 0x00000038L
@@ -1015,6 +1052,10 @@
#define SRBM_STATUS2__VCE_BUSY__SHIFT 0x00000007
#define SRBM_STATUS2__VCE_RQ_PENDING_MASK 0x00000008L
#define SRBM_STATUS2__VCE_RQ_PENDING__SHIFT 0x00000003
+#define SRBM_STATUS2__DMA_BUSY_MASK 0x00000020L
+#define SRBM_STATUS2__DMA_BUSY__SHIFT 0x00000005
+#define SRBM_STATUS2__DMA1_BUSY_MASK 0x00000040L
+#define SRBM_STATUS2__DMA1_BUSY__SHIFT 0x00000006
#define SRBM_STATUS2__XDMA_BUSY_MASK 0x00000100L
#define SRBM_STATUS2__XDMA_BUSY__SHIFT 0x00000008
#define SRBM_STATUS2__XSP_BUSY_MASK 0x00000010L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
index 6b10be61efc3..bdef1f743df7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h
@@ -41,7 +41,49 @@
#define ixLCAC_MC5_CNTL 0x012B
#define ixLCAC_MC5_OVR_SEL 0x012C
#define ixLCAC_MC5_OVR_VAL 0x012D
+
+#define mmCG_SPLL_FUNC_CNTL 0x0180
+#define mmCG_SPLL_FUNC_CNTL_2 0x0181
+#define mmCG_SPLL_FUNC_CNTL_3 0x0182
+#define mmCG_SPLL_FUNC_CNTL_4 0x0183
+#define mmCG_SPLL_STATUS 0x0185
+#define mmSPLL_CNTL_MODE 0x0186
+#define mmCG_SPLL_SPREAD_SPECTRUM 0x0188
+#define mmCG_SPLL_SPREAD_SPECTRUM_2 0x0189
+#define mmCG_SPLL_AUTOSCALE_CNTL 0x018B
+#define mmMPLL_BYPASSCLK_SEL 0x0197
+#define mmCG_CLKPIN_CNTL 0x0198
+#define mmCG_CLKPIN_CNTL_2 0x0199
+#define mmTHM_CLK_CNTL 0x019B
+#define mmMISC_CLK_CNTL 0x019C
+#define mmCG_THERMAL_CTRL 0x01C0
+#define mmCG_THERMAL_STATUS 0x01C1
+#define mmCG_THERMAL_INT 0x01C2
+#define mmCG_MULT_THERMAL_CTRL 0x01C4
+#define mmCG_MULT_THERMAL_STATUS 0x01C5
+#define mmCG_FDO_CTRL0 0x01D5
+#define mmCG_FDO_CTRL1 0x01D6
+#define mmCG_FDO_CTRL2 0x01D7
+#define mmCG_TACH_CTRL 0x01DC
+#define mmCG_TACH_STATUS 0x01DD
+#define mmGENERAL_PWRMGT 0x1E0
+#define mmCG_TPC 0x1E1
+#define mmSCLK_PWRMGT_CNTL 0x1E2
+#define mmTARGET_AND_CURRENT_PROFILE_INDEX 0x01E6
+#define mmCG_FTV 0x01EF
+#define mmCG_FFCT_0 0x01F0
+#define mmCG_BSP 0x01FF
+#define mmCG_AT 0x0200
+#define mmCG_GIT 0x0201
+#define mmCG_SSP 0x0203
+#define mmCG_DISPLAY_GAP_CNTL 0x020A
+#define mmCG_ULV_CONTROL 0x021E
+#define mmCG_ULV_PARAMETER 0x021F
+#define mmSMC_SCRATCH0 0x0221
+#define mmCG_CAC_CTRL 0x022E
+
#define ixSMC_PC_C 0x80000370
+
#define ixTHM_TMON0_DEBUG 0x03F0
#define ixTHM_TMON0_INT_DATA 0x0380
#define ixTHM_TMON0_RDIL0_DATA 0x0300
@@ -110,6 +152,7 @@
#define ixTHM_TMON1_RDIR7_DATA 0x0337
#define ixTHM_TMON1_RDIR8_DATA 0x0338
#define ixTHM_TMON1_RDIR9_DATA 0x0339
+
#define mmGPIOPAD_A 0x05E7
#define mmGPIOPAD_EN 0x05E8
#define mmGPIOPAD_EXTERN_TRIG_CNTL 0x05F1
@@ -127,6 +170,7 @@
#define mmGPIOPAD_STRENGTH 0x05E5
#define mmGPIOPAD_SW_INT_STAT 0x05E4
#define mmGPIOPAD_Y 0x05E9
+
#define mmSMC_IND_ACCESS_CNTL 0x008A
#define mmSMC_IND_DATA_0 0x0081
#define mmSMC_IND_DATA 0x0081
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
index 7d3925b7266e..67d3c7e13a48 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h
@@ -23,10 +23,142 @@
#ifndef SMU_6_0_SH_MASK_H
#define SMU_6_0_SH_MASK_H
-#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x03ffffffL
-#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x00000000
-#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x000003f0L
+#define CG_AT__CG_R_MASK 0x0000FFFFL
+#define CG_AT__CG_R__SHIFT 0x00000000
+#define CG_AT__CG_L_MASK 0xFFFF0000L
+#define CG_AT__CG_L__SHIFT 0x00000010
+
+#define CG_BSP__BSP_MASK 0x0000FFFFL
+#define CG_BSP__BSP__SHIFT 0x00000000
+#define CG_BSP__BSU_MASK 0x000F0000L
+#define CG_BSP__BSU__SHIFT 0x00000010
+
+#define CG_CAC_CTRL__CAC_WINDOW_MASK 0x00FFFFFFL
+#define CG_CAC_CTRL__CAC_WINDOW__SHIFT 0x00000000
+
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK 0x00000002L
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE__SHIFT 0x00000001
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK 0x00000004L
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT 0x00000002
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK 0x00000008L
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT 0x00000003
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK 0x00000100L
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK__SHIFT 0x00000008
+
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK 0x00000003L
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT 0x00000000
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK 0x0000000CL
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT 0x00000002
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT_MASK 0x0003FFF0L
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT__SHIFT 0x00000004
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT_MASK 0x00700000
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT__SHIFT 0x00000014
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG_MASK 0x03000000L
+#define CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG__SHIFT 0x00000018
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG_MASK 0x0C000000L
+#define CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG__SHIFT 0x0000001A
+
+#define CG_FFCT_0__UTC_0_MASK 0x000003FFL
+#define CG_FFCT_0__UTC_0__SHIFT 0x00000000
+#define CG_FFCT_0__DTC_0_MASK 0x000FFC00L
+#define CG_FFCT_0__DTC_0__SHIFT 0x0000000A
+
+#define CG_GIT__CG_GICST_MASK 0x0000FFFFL
+#define CG_GIT__CG_GICST__SHIFT 0x00000000
+#define CG_GIT__CG_GIPOT_MASK 0xFFFF0000L
+#define CG_GIT__CG_GIPOT__SHIFT 0x00000010
+
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK 0x00000001L
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL__SPLL_SLEEP_MASK 0x00000002L
+#define CG_SPLL_FUNC_CNTL__SPLL_SLEEP__SHIFT 0x00000001
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK 0x00000008L
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT 0x00000003
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x000003F0L
#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x00000004
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK 0x007F00000
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT 0x00000014
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK 0x0000001FF
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK 0x00800000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT 0x00000017
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK 0x04000000
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE__SHIFT 0x0000001A
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x03FFFFFFL
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x00000000
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK 0x10000000L
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN__SHIFT 0x0000001C
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x00000002L
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x00000001
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK 0x00000001L
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT 0x00000000
+#define CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK 0x0000FFF0L
+#define CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT 0x00000004
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK 0x00000200L
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT 0x00000000
+#define CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK 0x03FFFFFFL
+#define CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR__SHIFT 0x00000009
+
+#define CG_SSP__SST_MASK 0x0000FFFFL
+#define CG_SSP__SST__SHIFT 0x00000000
+#define CG_SSP__SSTU_MASK 0x000F0000L
+#define CG_SSP__SSTU__SHIFT 0x00000010
+
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK 0x00000007L
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT 0x00000000
+#define CG_THERMAL_CTRL__DIG_THERM_DPM_MASK 0x003FC000
+#define CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT 0x0000000E
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK 0x0001FE00L
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT 0x00000009
+#define CG_THERMAL_INT__DIG_THERM_INTH_MASK 0x0000FF00L
+#define CG_THERMAL_INT__DIG_THERM_INTH__SHIFT 0x00000008
+#define CG_THERMAL_INT__DIG_THERM_INTL_MASK 0x00FF0000L
+#define CG_THERMAL_INT__DIG_THERM_INTL__SHIFT 0x00000010
+#define CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK 0x01000000L
+#define CG_THERMAL_INT__THERM_INT_MASK_HIGH__SHIFT 0x00000018
+#define CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK 0x02000000
+#define CG_THERMAL_INT__THERM_INT_MASK_LOW__SHIFT 0x00000019
+
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK 0x0FF00000L
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT 0x00000014
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x000001FFL
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT 0x00000000
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x0003fe00L
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x00000009
+
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0x000000FFL
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x00000000
+#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0x000000FFL
+#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x00000000
+#define CG_FDO_CTRL2__TMIN_MASK 0x000000FFL
+#define CG_FDO_CTRL2__TMIN__SHIFT 0x00000000
+#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x00003800L
+#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0x0000000B
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK 0xFE000000L
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT 0x00000019
+
+#define CG_TACH_CTRL__EDGE_PER_REV_MASK 0x00000007L
+#define CG_TACH_CTRL__EDGE_PER_REV__SHIFT 0x00000000
+#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xFFFFFFF8L
+#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x00000003
+#define CG_TACH_STATUS__TACH_PERIOD_MASK 0xFFFFFFFFL
+#define CG_TACH_STATUS__TACH_PERIOD__SHIFT 0x00000000
+
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK 0x00000001L
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN__SHIFT 0x00000000
+#define GENERAL_PWRMGT__STATIC_PM_EN_MASK 0x00000002L
+#define GENERAL_PWRMGT__STATIC_PM_EN__SHIFT 0x00000001
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK 0x00000004L
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS__SHIFT 0x00000002
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE_MASK 0x00000008L
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE__SHIFT 0x00000003
+#define GENERAL_PWRMGT__SW_SMIO_INDEX_MASK 0x00000040L
+#define GENERAL_PWRMGT__SW_SMIO_INDEX__SHIFT 0x00000006
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK 0x00000400L
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN__SHIFT 0x0000000A
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK 0x00800000L
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN__SHIFT 0x00000017
+
#define GPIOPAD_A__GPIO_A_MASK 0x7fffffffL
#define GPIOPAD_A__GPIO_A__SHIFT 0x00000000
#define GPIOPAD_EN__GPIO_EN_MASK 0x7fffffffL
@@ -195,6 +327,7 @@
#define GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x00000000
#define GPIOPAD_Y__GPIO_Y_MASK 0x7fffffffL
#define GPIOPAD_Y__GPIO_Y__SHIFT 0x00000000
+
#define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x00000001L
#define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x00000000
#define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x0001fffeL
@@ -243,6 +376,37 @@
#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL__SHIFT 0x00000000
#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL_MASK 0xffffffffL
#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL__SHIFT 0x00000000
+
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK 0x0000FF00L
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT 0x00000008
+
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK 0x00000001L
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF__SHIFT 0x00000000
+#define SCLK_PWRMGT_CNTL__SCLK_LOW_D1_MASK 0x00000002L
+#define SCLK_PWRMGT_CNTL__SCLK_LOW_D1__SHIFT 0x00000001
+#define SCLK_PWRMGT_CNTL__FIR_RESET_MASK 0x00000010L
+#define SCLK_PWRMGT_CNTL__FIR_RESET__SHIFT 0x00000004
+#define SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK 0x00000020L
+#define SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL__SHIFT 0x00000005
+#define SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK 0x00000040L
+#define SCLK_PWRMGT_CNTL__FIR_TREND_MODE__SHIFT 0x00000006
+#define SCLK_PWRMGT_CNTL__DYN_GFX_CLK_OFF_EN_MASK 0x00000080L
+#define SCLK_PWRMGT_CNTL__DYN_GFX_CLK_OFF_EN__SHIFT 0x00000007
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_ON_MASK 0x00000100L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_ON__SHIFT 0x00000008
+#define SCLK_PWRMGT_CNTL__GFX_CLK_REQUEST_OFF_MASK 0x00000200L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_REQUEST_OFF__SHIFT 0x00000009
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_OFF_MASK 0x00000400L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_FORCE_OFF__SHIFT 0x0000000A
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D1_MASK 0x00000800L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D1__SHIFT 0x0000000B
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D2_MASK 0x00001000L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D2__SHIFT 0x0000000C
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D3_MASK 0x00002000L
+#define SCLK_PWRMGT_CNTL__GFX_CLK_OFF_ACPI_D3__SHIFT 0x0000000D
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN_MASK 0x00004000L
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN__SHIFT 0x0000000E
+
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x00000001L
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x00000000
#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x00000100L
@@ -285,6 +449,7 @@
#define SMC_RESP_1__SMC_RESP__SHIFT 0x00000000
#define SMC_RESP_2__SMC_RESP_MASK 0xffffffffL
#define SMC_RESP_2__SMC_RESP__SHIFT 0x00000000
+
#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0x000ff000L
#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0x0000000c
#define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x00000010L
@@ -293,6 +458,8 @@
#define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x00000003
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x00000002L
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x00000001
+#define SPLL_CNTL_MODE__SPLL_REFCLK_SEL_MASK 0x0C000000L
+#define SPLL_CNTL_MODE__SPLL_REFCLK_SEL__SHIFT 0x0000001A
#define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000L
#define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x0000001c
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x00000001L
@@ -303,10 +470,25 @@
#define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x00000002
#define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000L
#define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x0000001d
+
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0x0f000000L
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x00000018
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000L
#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x0000001c
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK 0x000000F0L
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT 0x00000004
+
+#define THM_CLK_CNTL__CMON_CLK_SEL_MASK 0x000000FFL
+#define THM_CLK_CNTL__CMON_CLK_SEL__SHIFT 0x00000000
+#define THM_CLK_CNTL__TMON_CLK_SEL_MASK 0x0000FF00L
+#define THM_CLK_CNTL__TMON_CLK_SEL__SHIFT 0x00000008
+
+#define MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK 0x000000FFL
+#define MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT 0x00000000
+#define MISC_CLK_CNTL__ZCLK_SEL_MASK 0x0000FF00L
+#define MISC_CLK_CNTL__ZCLK_SEL__SHIFT 0x00000008
+
#define THM_TMON0_DEBUG__DEBUG_RDI_MASK 0x0000001fL
#define THM_TMON0_DEBUG__DEBUG_RDI__SHIFT 0x00000000
#define THM_TMON0_DEBUG__DEBUG_Z_MASK 0x0000ffe0L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
index 14574112c469..72a118b2af69 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
@@ -1067,7 +1067,13 @@
#define regVCN_FEATURES_BASE_IDX 1
#define regUVD_GPUIOV_STATUS 0x0055
#define regUVD_GPUIOV_STATUS_BASE_IDX 1
+#define regUVD_RAS_VCPU_VCODEC_STATUS 0x0057
+#define regUVD_RAS_VCPU_VCODEC_STATUS_BASE_IDX 1
#define regUVD_SCRATCH15 0x005c
+#define regUVD_RAS_JPEG0_STATUS 0x0059
+#define regUVD_RAS_JPEG0_STATUS_BASE_IDX 1
+#define regUVD_RAS_JPEG1_STATUS 0x005a
+#define regUVD_RAS_JPEG1_STATUS_BASE_IDX 1
#define regUVD_SCRATCH15_BASE_IDX 1
#define regUVD_VERSION 0x005d
#define regUVD_VERSION_BASE_IDX 1
@@ -1147,6 +1153,22 @@
#define regUVD_DPG_LMA_CTL2_BASE_IDX 1
+// addressBlock: uvd_mmsch_dec
+// base address: 0x20d2c
+#define regMMSCH_VF_VMID 0x054b
+#define regMMSCH_VF_VMID_BASE_IDX 1
+#define regMMSCH_VF_CTX_ADDR_LO 0x054c
+#define regMMSCH_VF_CTX_ADDR_LO_BASE_IDX 1
+#define regMMSCH_VF_CTX_ADDR_HI 0x054d
+#define regMMSCH_VF_CTX_ADDR_HI_BASE_IDX 1
+#define regMMSCH_VF_CTX_SIZE 0x054e
+#define regMMSCH_VF_CTX_SIZE_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX_HOST 0x0552
+#define regMMSCH_VF_MAILBOX_HOST_BASE_IDX 1
+#define regMMSCH_VF_MAILBOX_RESP 0x0553
+#define regMMSCH_VF_MAILBOX_RESP_BASE_IDX 1
+
+
// addressBlock: uvd_vcn_umsch_dec
// base address: 0x21500
#define regVCN_UMSCH_MES_CNTL 0x0740
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
index 5c119a6b87fb..c78b09d6fbae 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
@@ -5714,6 +5714,22 @@
//UVD_GPUIOV_STATUS
#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE__SHIFT 0x0
#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE_MASK 0x00000001L
+//UVD_RAS_VCPU_VCODEC_STATUS
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF_MASK 0x80000000L
+
+//UVD_RAS_JPEG0_STATUS
+#define UVD_RAS_JPEG0_STATUS__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_JPEG0_STATUS__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_JPEG0_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_JPEG0_STATUS__POISONED_PF_MASK 0x80000000L
+//UVD_RAS_JPEG1_STATUS
+#define UVD_RAS_JPEG1_STATUS__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_JPEG1_STATUS__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_JPEG1_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_JPEG1_STATUS__POISONED_PF_MASK 0x80000000L
//UVD_SCRATCH15
#define UVD_SCRATCH15__SCRATCH15_DATA__SHIFT 0x0
#define UVD_SCRATCH15__SCRATCH15_DATA_MASK 0xFFFFFFFFL
@@ -5929,6 +5945,29 @@
#define UVD_DPG_LMA_CTL2__JPEG_WRITE_PTR_MASK 0x0000FE00L
+// addressBlock: uvd_mmsch_dec
+//MMSCH_VF_VMID
+#define MMSCH_VF_VMID__VF_CTX_VMID__SHIFT 0x0
+#define MMSCH_VF_VMID__VF_GPCOM_VMID__SHIFT 0x5
+#define MMSCH_VF_VMID__VF_CTX_VMID_MASK 0x0000001FL
+#define MMSCH_VF_VMID__VF_GPCOM_VMID_MASK 0x000003E0L
+//MMSCH_VF_CTX_ADDR_LO
+#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO__SHIFT 0x6
+#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO_MASK 0xFFFFFFC0L
+//MMSCH_VF_CTX_ADDR_HI
+#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI__SHIFT 0x0
+#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI_MASK 0xFFFFFFFFL
+//MMSCH_VF_CTX_SIZE
+#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE__SHIFT 0x0
+#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_HOST
+#define MMSCH_VF_MAILBOX_HOST__DATA__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_HOST__DATA_MASK 0xFFFFFFFFL
+//MMSCH_VF_MAILBOX_RESP
+#define MMSCH_VF_MAILBOX_RESP__RESP__SHIFT 0x0
+#define MMSCH_VF_MAILBOX_RESP__RESP_MASK 0xFFFFFFFFL
+
+
// addressBlock: uvd_vcn_umsch_dec
//VCN_UMSCH_MES_CNTL
#define VCN_UMSCH_MES_CNTL__PIPE_ID__SHIFT 0x0
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index b78360a71bc9..b344acefc606 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -4308,7 +4308,7 @@ typedef struct _ATOM_DPCD_INFO
// note2: From RV770, the memory is more than 32bit addressable, so we will change
// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains
// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
-// (in offset to start of memory address) is KB aligned instead of byte aligend.
+// (in offset to start of memory address) is KB aligned instead of byte aligned.
// Note3:
/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged
constant across VGA or non VGA adapter,
@@ -6017,7 +6017,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02
#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08
-#define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS 0x10
+#define SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS 0x10
//ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML
#define SYS_INFO_GPUCAPS__GNB_FAST_RESUME_CAPABLE 0x00010000
@@ -6460,7 +6460,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9
// ulGPUCapInfo
#define SYS_INFO_V1_9_GPUCAPSINFO_DISABLE_AUX_MODE_DETECT 0x08
-#define SYS_INFO_V1_9_GPUCAPSINFO_ENABEL_DFS_BYPASS 0x10
+#define SYS_INFO_V1_9_GPUCAPSINFO_ENABLE_DFS_BYPASS 0x10
//ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML
#define SYS_INFO_V1_9_GPUCAPSINFO_GNB_FAST_RESUME_CAPABLE 0x00010000
//ulGPUCapInfo[18]=1 indicate the IOMMU is not available
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 0160d65f3f5e..5c86423c2e92 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -183,6 +183,7 @@ enum atom_dgpu_vram_type {
ATOM_DGPU_VRAM_TYPE_HBM2E = 0x61,
ATOM_DGPU_VRAM_TYPE_GDDR6 = 0x70,
ATOM_DGPU_VRAM_TYPE_HBM3 = 0x80,
+ ATOM_DGPU_VRAM_TYPE_HBM3E = 0x81,
};
enum atom_dp_vs_preemph_def{
@@ -1713,7 +1714,7 @@ enum atom_system_vbiosmisc_def{
// gpucapinfo
enum atom_system_gpucapinf_def{
- SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS = 0x10,
+ SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS = 0x10,
};
//dpphy_override
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h
index 3a4670bc4449..b98b7ae551b5 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h
@@ -48,6 +48,7 @@
#define GFX_11_0_0__SRCID__SDMA_SRAM_ECC 64 // 0x40 SRAM ECC Error
#define GFX_11_0_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 65 // 0x41 GPF(Sem incomplete timeout)
#define GFX_11_0_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 66 // 0x42 Semaphore wait fail timeout
+#define GFX_11_0_0__SRCID__SDMA_FENCE 67 // 0x43 User fence
#define GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT 128 // 0x80 FED Interrupt (for data poisoning)
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h
new file mode 100644
index 000000000000..467897ec2e65
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __IRQSRCS_GFX_12_0_0_H__
+#define __IRQSRCS_GFX_12_0_0_H__
+
+#define GFX_12_0_0__SRCID__UTCL2_FAULT 0 // UTCL2 has encountered a fault or retry scenario
+#define GFX_12_0_0__SRCID__UTCL2_DATA_POISONING 1 // UTCL2 for data poisoning
+#define GFX_12_0_0__SRCID__MEM_ACCES_MON 10 // 0x0A EA memory access monitor interrupt
+#define GFX_12_0_0__SRCID__SDMA_ATOMIC_RTN_DONE 48 // 0x30 SDMA atomic*_rtn ops complete
+#define GFX_12_0_0__SRCID__SDMA_TRAP 49 // 0x31 Trap
+#define GFX_12_0_0__SRCID__SDMA_SRBMWRITE 50 // 0x32 SRBM write Protection
+#define GFX_12_0_0__SRCID__SDMA_CTXEMPTY 51 // 0x33 Context Empty
+#define GFX_12_0_0__SRCID__SDMA_PREEMPT 52 // 0x34 SDMA New Run List
+#define GFX_12_0_0__SRCID__SDMA_IB_PREEMPT 53 // 0x35 sdma mid - command buffer preempt interrupt
+#define GFX_12_0_0__SRCID__SDMA_DOORBELL_INVALID 54 // 0x36 Doorbell BE invalid
+#define GFX_12_0_0__SRCID__SDMA_QUEUE_HANG 55 // 0x37 Queue hang or Command timeout
+#define GFX_12_0_0__SRCID__SDMA_ATOMIC_TIMEOUT 56 // 0x38 SDMA atomic CMPSWAP loop timeout
+#define GFX_12_0_0__SRCID__SDMA_POLL_TIMEOUT 57 // 0x39 SRBM read poll timeout
+#define GFX_12_0_0__SRCID__SDMA_PAGE_TIMEOUT 58 // 0x3A Page retry timeout after UTCL2 return nack = 1
+#define GFX_12_0_0__SRCID__SDMA_PAGE_NULL 59 // 0x3B Page Null from UTCL2 when nack = 2
+#define GFX_12_0_0__SRCID__SDMA_PAGE_FAULT 60 // 0x3C Page Fault Error from UTCL2 when nack = 3
+#define GFX_12_0_0__SRCID__SDMA_VM_HOLE 61 // 0x3D MC or SEM address in VM hole
+#define GFX_12_0_0__SRCID__SDMA_ECC 62 // 0x3E ECC Error
+#define GFX_12_0_0__SRCID__SDMA_FROZEN 63 // 0x3F SDMA Frozen
+#define GFX_12_0_0__SRCID__SDMA_SRAM_ECC 64 // 0x40 SRAM ECC Error
+#define GFX_12_0_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 65 // 0x41 GPF(Sem incomplete timeout)
+#define GFX_12_0_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 66 // 0x42 Semaphore wait fail timeout
+#define GFX_12_0_0__SRCID__SDMA_FENCE 70 // 0x46 User fence
+#define GFX_12_0_0__SRCID__RLC_GC_FED_INTERRUPT 128 // 0x80 FED Interrupt (for data poisoning)
+#define GFX_12_0_0__SRCID__CP_GENERIC_INT 177 // 0xB1 CP_GENERIC int
+#define GFX_12_0_0__SRCID__CP_PM4_PKT_RSVD_BIT_ERROR 180 // 0xB4 PM4 Pkt Rsvd Bits Error
+#define GFX_12_0_0__SRCID__CP_EOP_INTERRUPT 181 // 0xB5 End-of-Pipe Interrupt
+#define GFX_12_0_0__SRCID__CP_BAD_OPCODE_ERROR 183 // 0xB7 Bad Opcode Error
+#define GFX_12_0_0__SRCID__CP_PRIV_REG_FAULT 184 // 0xB8 Privileged Register Fault
+#define GFX_12_0_0__SRCID__CP_PRIV_INSTR_FAULT 185 // 0xB9 Privileged Instr Fault
+#define GFX_12_0_0__SRCID__CP_WAIT_MEM_SEM_FAULT 186 // 0xBA Wait Memory Semaphore Fault (Sync Object Fault)
+#define GFX_12_0_0__SRCID__CP_CTX_EMPTY_INTERRUPT 187 // 0xBB Context Empty Interrupt
+#define GFX_12_0_0__SRCID__CP_CTX_BUSY_INTERRUPT 188 // 0xBC Context Busy Interrupt
+#define GFX_12_0_0__SRCID__CP_ME_WAIT_REG_MEM_POLL_TIMEOUT 192 // 0xC0 CP.ME Wait_Reg_Mem Poll Timeout
+#define GFX_12_0_0__SRCID__CP_SIG_INCOMPLETE 193 // 0xC1 "Surface Probe Fault Signal Incomplete"
+#define GFX_12_0_0__SRCID__CP_PREEMPT_ACK 194 // 0xC2 Preemption Ack-wledge
+#define GFX_12_0_0__SRCID__CP_GPF 195 // 0xC3 General Protection Fault (GPF)
+#define GFX_12_0_0__SRCID__CP_GDS_ALLOC_ERROR 196 // 0xC4 GDS Alloc Error
+#define GFX_12_0_0__SRCID__CP_ECC_ERROR 197 // 0xC5 ECC Error
+#define GFX_12_0_0__SRCID__CP_COMPUTE_QUERY_STATUS 199 // 0xC7 Compute query status
+#define GFX_12_0_0__SRCID__CP_VM_DOORBELL 200 // 0xC8 Unattached VM Doorbell Received
+#define GFX_12_0_0__SRCID__CP_FUE_ERROR 201 // 0xC9 ECC FUE Error
+#define GFX_12_0_0__SRCID__RLC_STRM_PERF_MONITOR_INTERRUPT 202 // 0xCA Streaming Perf Monitor Interrupt
+#define GFX_12_0_0__SRCID__GRBM_RD_TIMEOUT_ERROR 232 // 0xE8 CRead timeout error
+#define GFX_12_0_0__SRCID__GRBM_REG_GUI_IDLE 233 // 0xE9 Register GUI Idle
+#define GFX_12_0_0__SRCID__SQ_INTERRUPT_ID 239 // 0xEF SQ Interrupt (ttrace wrap, errors)
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 21dc956b5f35..f4d914dc731f 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -128,6 +128,7 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_CPU_CLK,
AMDGPU_PP_SENSOR_VDDNB,
AMDGPU_PP_SENSOR_VDDGFX,
+ AMDGPU_PP_SENSOR_VDDBOARD,
AMDGPU_PP_SENSOR_UVD_VCLK,
AMDGPU_PP_SENSOR_UVD_DCLK,
AMDGPU_PP_SENSOR_VCE_ECCLK,
@@ -493,6 +494,7 @@ struct amd_pm_funcs {
int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
ssize_t (*get_gpu_metrics)(void *handle, void **table);
+ ssize_t (*get_xcp_metrics)(void *handle, int xcp_id, void *table);
ssize_t (*get_pm_metrics)(void *handle, void *pmmetrics, size_t size);
int (*set_watermarks_for_clock_ranges)(void *handle,
struct pp_smu_wm_range_sets *ranges);
@@ -1591,4 +1593,27 @@ struct amdgpu_pm_metrics {
uint8_t data[];
};
+struct amdgpu_partition_metrics_v1_0 {
+ struct metrics_table_header common_header;
+ /* Current clocks (Mhz) */
+ uint16_t current_gfxclk[MAX_XCC];
+ uint16_t current_socclk[MAX_CLKS];
+ uint16_t current_vclk0[MAX_CLKS];
+ uint16_t current_dclk0[MAX_CLKS];
+ uint16_t current_uclk;
+ uint16_t padding;
+
+ /* Utilization Instantaneous (%) */
+ uint32_t gfx_busy_inst[MAX_XCC];
+ uint16_t jpeg_busy[NUM_JPEG_ENG_V1];
+ uint16_t vcn_busy[NUM_VCN];
+ /* Utilization Accumulated (%) */
+ uint64_t gfx_busy_acc[MAX_XCC];
+ /* Total App Clock Counter Accumulated */
+ uint64_t gfx_below_host_limit_ppt_acc[MAX_XCC];
+ uint64_t gfx_below_host_limit_thm_acc[MAX_XCC];
+ uint64_t gfx_low_utilization_acc[MAX_XCC];
+ uint64_t gfx_below_host_limit_total_acc[MAX_XCC];
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/include/v11_structs.h b/drivers/gpu/drm/amd/include/v11_structs.h
index f8008270f813..3728389fc3be 100644
--- a/drivers/gpu/drm/amd/include/v11_structs.h
+++ b/drivers/gpu/drm/amd/include/v11_structs.h
@@ -535,8 +535,8 @@ struct v11_gfx_mqd {
uint32_t reserved_507; // offset: 507 (0x1FB)
uint32_t reserved_508; // offset: 508 (0x1FC)
uint32_t reserved_509; // offset: 509 (0x1FD)
- uint32_t reserved_510; // offset: 510 (0x1FE)
- uint32_t reserved_511; // offset: 511 (0x1FF)
+ uint32_t fence_address_lo; // offset: 510 (0x1FE)
+ uint32_t fence_address_hi; // offset: 511 (0x1FF)
};
struct v11_sdma_mqd {
@@ -1118,8 +1118,8 @@ struct v11_compute_mqd {
uint32_t reserved_443; // offset: 443 (0x1BB)
uint32_t reserved_444; // offset: 444 (0x1BC)
uint32_t reserved_445; // offset: 445 (0x1BD)
- uint32_t reserved_446; // offset: 446 (0x1BE)
- uint32_t reserved_447; // offset: 447 (0x1BF)
+ uint32_t fence_address_lo; // offset: 446 (0x1BE)
+ uint32_t fence_address_hi; // offset: 447 (0x1BF)
uint32_t gws_0_val; // offset: 448 (0x1C0)
uint32_t gws_1_val; // offset: 449 (0x1C1)
uint32_t gws_2_val; // offset: 450 (0x1C2)
diff --git a/drivers/gpu/drm/amd/include/v12_structs.h b/drivers/gpu/drm/amd/include/v12_structs.h
index 5eabab611b02..03a35f8a65b0 100644
--- a/drivers/gpu/drm/amd/include/v12_structs.h
+++ b/drivers/gpu/drm/amd/include/v12_structs.h
@@ -535,8 +535,8 @@ struct v12_gfx_mqd {
uint32_t reserved_507; // offset: 507 (0x1FB)
uint32_t reserved_508; // offset: 508 (0x1FC)
uint32_t reserved_509; // offset: 509 (0x1FD)
- uint32_t reserved_510; // offset: 510 (0x1FE)
- uint32_t reserved_511; // offset: 511 (0x1FF)
+ uint32_t fence_address_lo; // offset: 510 (0x1FE)
+ uint32_t fence_address_hi; // offset: 511 (0x1FF)
};
struct v12_sdma_mqd {
@@ -1118,8 +1118,8 @@ struct v12_compute_mqd {
uint32_t reserved_443; // offset: 443 (0x1BB)
uint32_t reserved_444; // offset: 444 (0x1BC)
uint32_t reserved_445; // offset: 445 (0x1BD)
- uint32_t reserved_446; // offset: 446 (0x1BE)
- uint32_t reserved_447; // offset: 447 (0x1BF)
+ uint32_t fence_address_lo; // offset: 446 (0x1BE)
+ uint32_t fence_address_hi; // offset: 447 (0x1BF)
uint32_t gws_0_val; // offset: 448 (0x1C0)
uint32_t gws_1_val; // offset: 449 (0x1C1)
uint32_t gws_2_val; // offset: 450 (0x1C2)
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 3533d43ed1e7..5c1cbdc122d2 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -329,6 +329,34 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
return ret;
}
+bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ bool support_link_reset = false;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ support_link_reset = smu_link_reset_is_support(smu);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return support_link_reset;
+}
+
+int amdgpu_dpm_link_reset(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = -EOPNOTSUPP;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_link_reset(smu);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
enum PP_SMC_POWER_PROFILE type,
bool en)
@@ -780,6 +808,21 @@ int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
return ret;
}
+int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_vcn(smu, inst_mask);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
@@ -1654,6 +1697,28 @@ int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
}
}
+int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev)
+{
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu->od_enabled;
+ } else {
+ struct pp_hwmgr *hwmgr;
+
+ /*
+ * dpm on some legacy asics don't carry od_enabled member
+ * as its pp_handle is casted directly from adev.
+ */
+ if (amdgpu_dpm_is_legacy_dpm(adev))
+ return false;
+
+ hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
+
+ return hwmgr->od_enabled;
+ }
+}
+
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
const char *buf,
size_t size)
@@ -1976,3 +2041,35 @@ int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
return ret;
}
+
+/**
+ * amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute
+ * partition
+ * @adev: Pointer to the device.
+ * @xcp_id: Identifier of the XCP for which metrics are to be retrieved.
+ * @table: Pointer to a buffer where the metrics will be stored. If NULL, the
+ * function returns the size of the metrics structure.
+ *
+ * This function retrieves metrics for a specific XCP, including details such as
+ * VCN/JPEG activity, clock frequencies, and other performance metrics. If the
+ * table parameter is NULL, the function returns the size of the metrics
+ * structure without populating it.
+ *
+ * Return: Size of the metrics structure on success, or a negative error code on failure.
+ */
+ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
+ void *table)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_xcp_metrics)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_xcp_metrics(adev->powerplay.pp_handle, xcp_id,
+ table);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 922def51685b..edd9895b46c0 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1606,7 +1606,6 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
long throttling_logging_interval;
- unsigned long flags;
int ret = 0;
ret = kstrtol(buf, 0, &throttling_logging_interval);
@@ -1617,18 +1616,12 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
return -EINVAL;
if (throttling_logging_interval > 0) {
- raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
/*
* Reset the ratelimit timer internals.
* This can effectively restart the timer.
*/
- adev->throttling_logging_rs.interval =
- (throttling_logging_interval - 1) * HZ;
- adev->throttling_logging_rs.begin = 0;
- adev->throttling_logging_rs.printed = 0;
- adev->throttling_logging_rs.missed = 0;
- raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
-
+ ratelimit_state_reset_interval(&adev->throttling_logging_rs,
+ (throttling_logging_interval - 1) * HZ);
atomic_set(&adev->throttling_logging_enabled, 1);
} else {
atomic_set(&adev->throttling_logging_enabled, 0);
@@ -2944,6 +2937,23 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
return sysfs_emit(buf, "%d\n", vddgfx);
}
+static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
+ u32 vddboard;
+ int r;
+
+ /* get the voltage */
+ r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&vddboard);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%d\n", vddboard);
+}
+
static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -2951,6 +2961,12 @@ static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
return sysfs_emit(buf, "vddgfx\n");
}
+static ssize_t amdgpu_hwmon_show_vddboard_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "vddboard\n");
+}
static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -3294,6 +3310,8 @@ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0)
static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, amdgpu_hwmon_show_vddboard, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, amdgpu_hwmon_show_vddboard_label, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
@@ -3341,6 +3359,8 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_in0_label.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_label.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_label.dev_attr.attr,
&sensor_dev_attr_power1_average.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
&sensor_dev_attr_power1_cap_max.dev_attr.attr,
@@ -3492,6 +3512,13 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_in1_label.dev_attr.attr))
return 0;
+ /* only few boards support vddboard */
+ if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_in2_label.dev_attr.attr) &&
+ amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&tmp) == -EOPNOTSUPP)
+ return 0;
+
/* no mclk on APUs other than gc 9,4,3*/
if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
(attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 4c0f7ad14816..768317ee1486 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -416,11 +416,13 @@ int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
+int amdgpu_dpm_link_reset(struct amdgpu_device *adev);
int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev);
int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev);
+bool amdgpu_dpm_is_link_reset_supported(struct amdgpu_device *adev);
int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
@@ -522,6 +524,8 @@ int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
long *input, uint32_t size);
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table);
+ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
+ void *table);
/**
* @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The
@@ -559,6 +563,7 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
void **addr,
size_t *size);
int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev);
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
const char *buf,
size_t size);
@@ -607,5 +612,6 @@ ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
enum pp_pm_policy p_type, char *buf);
int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask);
bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask);
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 59fae668dc3f..34e71727b27d 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -2594,7 +2594,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev)
le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
}
if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
- SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
+ SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS)
pi->caps_enable_dfs_bypass = true;
sumo_construct_sclk_voltage_mapping_table(adev,
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 1c25f3023e93..4c0e976004ba 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -30,16 +30,32 @@
#include "amdgpu_atombios.h"
#include "amdgpu_dpm_internal.h"
#include "amd_pcie.h"
-#include "sid.h"
+#include "atom.h"
+#include "gfx_v6_0.h"
#include "r600_dpm.h"
+#include "sid.h"
#include "si_dpm.h"
-#include "atom.h"
#include "../include/pptable.h"
#include <linux/math64.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
#include <legacy_dpm.h>
+#include "bif/bif_3_0_d.h"
+#include "bif/bif_3_0_sh_mask.h"
+
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+
+#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+
+#include"gmc/gmc_6_0_d.h"
+#include"gmc/gmc_6_0_sh_mask.h"
+
+#include "smu/smu_6_0_d.h"
+#include "smu/smu_6_0_sh_mask.h"
+
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
#define MC_CG_ARB_FREQ_F2 0x0c
@@ -2193,7 +2209,7 @@ static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
if (xclk == 0)
return 0;
- cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
+ cac_window = RREG32(mmCG_CAC_CTRL) & CG_CAC_CTRL__CAC_WINDOW_MASK;
cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
wintime = (cac_window_size * 100) / xclk;
@@ -2489,19 +2505,19 @@ static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
if (adev->pm.dpm.sq_ramping_threshold == 0)
return -EINVAL;
- if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (SQ_POWER_THROTTLE__MAX_POWER_MASK >> SQ_POWER_THROTTLE__MAX_POWER__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (SQ_POWER_THROTTLE__MIN_POWER_MASK >> SQ_POWER_THROTTLE__MIN_POWER__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK >> SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK >> SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK >> SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {
@@ -2510,14 +2526,17 @@ static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
enable_sq_ramping) {
- sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
- sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
- sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
- sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
- sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
+ sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER << SQ_POWER_THROTTLE__MAX_POWER__SHIFT;
+ sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MIN_POWER << SQ_POWER_THROTTLE__MIN_POWER__SHIFT;
+ sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA << SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT;
+ sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_STI_SIZE << SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT;
+ sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_LTI_RATIO << SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT;
} else {
- sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
- sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ sq_power_throttle |= SQ_POWER_THROTTLE__MAX_POWER_MASK |
+ SQ_POWER_THROTTLE__MIN_POWER_MASK;
+ sq_power_throttle2 |= SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK |
+ SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK |
+ SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
}
smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
@@ -2761,9 +2780,9 @@ static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
if (!cac_tables)
return -ENOMEM;
- reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
- reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
- WREG32(CG_CAC_CTRL, reg);
+ reg = RREG32(mmCG_CAC_CTRL) & ~CG_CAC_CTRL__CAC_WINDOW_MASK;
+ reg |= (si_pi->powertune_data->cac_window << CG_CAC_CTRL__CAC_WINDOW__SHIFT);
+ WREG32(mmCG_CAC_CTRL, reg);
si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
si_pi->dyn_powertune_data.dc_pwr_value =
@@ -2962,10 +2981,10 @@ static int si_init_smc_spll_table(struct amdgpu_device *adev)
ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
if (ret)
break;
- p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
- fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
- clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
- clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
+ p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK) >> CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT;
+ fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK) >> CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT;
+ clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK) >> CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT;
+ clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK) >> CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT;
fb_div &= ~0x00001FFF;
fb_div >>= 1;
@@ -3669,10 +3688,10 @@ static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
- tmp = RREG32(MC_ARB_RAMCFG);
- row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
- column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
- bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
+ tmp = RREG32(mmMC_ARB_RAMCFG);
+ row = ((tmp & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT) + 10;
+ column = ((tmp & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT) + 8;
+ bank = ((tmp & MC_ARB_RAMCFG__NOOFBANK_MASK) >> MC_ARB_RAMCFG__NOOFBANK__SHIFT) + 2;
density = (1 << (row + column - 20 + bank)) * width;
@@ -3756,11 +3775,11 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
}
if (want_thermal_protection) {
- WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
+ WREG32_P(mmCG_THERMAL_CTRL, dpm_event_src << CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT, ~CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK);
if (pi->thermal_protection)
- WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
} else {
- WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
}
}
@@ -3785,20 +3804,20 @@ static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
static void si_start_dpm(struct amdgpu_device *adev)
{
- WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK);
}
static void si_stop_dpm(struct amdgpu_device *adev)
{
- WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK);
}
static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
{
if (enable)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK);
else
- WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK);
}
@@ -3838,7 +3857,7 @@ static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power
static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
PPSMC_Msg msg, u32 parameter)
{
- WREG32(SMC_SCRATCH0, parameter);
+ WREG32(mmSMC_SCRATCH0, parameter);
return amdgpu_si_send_msg_to_smc(adev, msg);
}
@@ -4023,12 +4042,12 @@ static void si_read_clock_registers(struct amdgpu_device *adev)
{
struct si_power_info *si_pi = si_get_pi(adev);
- si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
- si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
- si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
- si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
- si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
- si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
+ si_pi->clock_registers.cg_spll_func_cntl = RREG32(mmCG_SPLL_FUNC_CNTL);
+ si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(mmCG_SPLL_FUNC_CNTL_2);
+ si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(mmCG_SPLL_FUNC_CNTL_3);
+ si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(mmCG_SPLL_FUNC_CNTL_4);
+ si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(mmCG_SPLL_SPREAD_SPECTRUM);
+ si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(mmCG_SPLL_SPREAD_SPECTRUM_2);
si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
@@ -4044,14 +4063,14 @@ static void si_enable_thermal_protection(struct amdgpu_device *adev,
bool enable)
{
if (enable)
- WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
else
- WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
}
static void si_enable_acpi_power_management(struct amdgpu_device *adev)
{
- WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__STATIC_PM_EN_MASK, ~GENERAL_PWRMGT__STATIC_PM_EN_MASK);
}
#if 0
@@ -4132,9 +4151,9 @@ static void si_program_ds_registers(struct amdgpu_device *adev)
tmp = 0x1;
if (eg_pi->sclk_deep_sleep) {
- WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
- WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
- ~AUTOSCALE_ON_SS_CLEAR);
+ WREG32_P(mmMISC_CLK_CNTL, (tmp << MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT), ~MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK);
+ WREG32_P(mmCG_SPLL_AUTOSCALE_CNTL, CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK,
+ ~CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK);
}
}
@@ -4143,18 +4162,18 @@ static void si_program_display_gap(struct amdgpu_device *adev)
u32 tmp, pipe;
int i;
- tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+ tmp = RREG32(mmCG_DISPLAY_GAP_CNTL) & ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);
if (adev->pm.dpm.new_active_crtc_count > 0)
- tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
else
- tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+ tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
if (adev->pm.dpm.new_active_crtc_count > 1)
- tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
else
- tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+ tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
- WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+ WREG32(mmCG_DISPLAY_GAP_CNTL, tmp);
tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
@@ -4189,10 +4208,10 @@ static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
if (enable) {
if (pi->sclk_ss)
- WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK);
} else {
- WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
- WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
+ WREG32_P(mmCG_SPLL_SPREAD_SPECTRUM, 0, ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK);
}
}
@@ -4214,15 +4233,15 @@ static void si_setup_bsp(struct amdgpu_device *adev)
&pi->pbsu);
- pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
- pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
+ pi->dsp = (pi->bsp << CG_BSP__BSP__SHIFT) | (pi->bsu << CG_BSP__BSU__SHIFT);
+ pi->psp = (pi->pbsp << CG_BSP__BSP__SHIFT) | (pi->pbsu << CG_BSP__BSU__SHIFT);
- WREG32(CG_BSP, pi->dsp);
+ WREG32(mmCG_BSP, pi->dsp);
}
static void si_program_git(struct amdgpu_device *adev)
{
- WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
+ WREG32_P(mmCG_GIT, R600_GICST_DFLT << CG_GIT__CG_GICST__SHIFT, ~CG_GIT__CG_GICST_MASK);
}
static void si_program_tp(struct amdgpu_device *adev)
@@ -4231,54 +4250,54 @@ static void si_program_tp(struct amdgpu_device *adev)
enum r600_td td = R600_TD_DFLT;
for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
- WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
+ WREG32(mmCG_FFCT_0 + i, (r600_utc[i] << CG_FFCT_0__UTC_0__SHIFT | r600_dtc[i] << CG_FFCT_0__DTC_0__SHIFT));
if (td == R600_TD_AUTO)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK);
else
- WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK);
if (td == R600_TD_UP)
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK);
if (td == R600_TD_DOWN)
- WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
+ WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK);
}
static void si_program_tpp(struct amdgpu_device *adev)
{
- WREG32(CG_TPC, R600_TPC_DFLT);
+ WREG32(mmCG_TPC, R600_TPC_DFLT);
}
static void si_program_sstp(struct amdgpu_device *adev)
{
- WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+ WREG32(mmCG_SSP, (R600_SSTU_DFLT << CG_SSP__SSTU__SHIFT| R600_SST_DFLT << CG_SSP__SST__SHIFT));
}
static void si_enable_display_gap(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+ u32 tmp = RREG32(mmCG_DISPLAY_GAP_CNTL);
- tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
- tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
- DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
+ tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);
+ tmp |= (R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT |
+ R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT);
- tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
- tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
- DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
- WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+ tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG_MASK);
+ tmp |= (R600_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG__SHIFT |
+ R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG__SHIFT);
+ WREG32(mmCG_DISPLAY_GAP_CNTL, tmp);
}
static void si_program_vc(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
- WREG32(CG_FTV, pi->vrc);
+ WREG32(mmCG_FTV, pi->vrc);
}
static void si_clear_vc(struct amdgpu_device *adev)
{
- WREG32(CG_FTV, 0);
+ WREG32(mmCG_FTV, 0);
}
static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
@@ -4735,7 +4754,7 @@ static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
u32 dram_rows;
u32 dram_refresh_rate;
u32 mc_arb_rfsh_rate;
- u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+ u32 tmp = (RREG32(mmMC_ARB_RAMCFG) & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT;
if (tmp >= 4)
dram_rows = 16384;
@@ -4909,7 +4928,7 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd);
- reg = CG_R(0xffff) | CG_L(0);
+ reg = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT;
table->initialState.level.aT = cpu_to_be32(reg);
table->initialState.level.bSP = cpu_to_be32(pi->dsp);
table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen;
@@ -4935,10 +4954,13 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
table->initialState.level.dpm2.BelowSafeInc = 0;
table->initialState.level.dpm2.PwrEfficiencyRatio = 0;
- reg = MIN_POWER_MASK | MAX_POWER_MASK;
+ reg = SQ_POWER_THROTTLE__MIN_POWER_MASK |
+ SQ_POWER_THROTTLE__MAX_POWER_MASK;
table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
- reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK |
+ SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK |
+ SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
@@ -5057,8 +5079,8 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
- spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
- spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+ spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= 4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT;
table->ACPIState.level.mclk.vDLL_CNTL =
cpu_to_be32(dll_cntl);
@@ -5102,10 +5124,10 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
table->ACPIState.level.dpm2.BelowSafeInc = 0;
table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0;
- reg = MIN_POWER_MASK | MAX_POWER_MASK;
+ reg = SQ_POWER_THROTTLE__MIN_POWER_MASK | SQ_POWER_THROTTLE__MAX_POWER_MASK;
table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
- reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK | SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK | SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
@@ -5250,8 +5272,8 @@ static int si_init_smc_table(struct amdgpu_device *adev)
if (ret)
return ret;
- WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
- WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+ WREG32(mmCG_ULV_CONTROL, ulv->cg_ulv_control);
+ WREG32(mmCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
lane_width = amdgpu_get_pcie_lanes(adev);
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
@@ -5294,16 +5316,16 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,
do_div(tmp, reference_clock);
fbdiv = (u32) tmp;
- spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
- spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
- spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
+ spll_func_cntl &= ~(CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK | CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK);
+ spll_func_cntl |= dividers.ref_div << CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT;
+ spll_func_cntl |= dividers.post_div << CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT;
- spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
- spll_func_cntl_2 |= SCLK_MUX_SEL(2);
+ spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= 2 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT;
- spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
- spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
- spll_func_cntl_3 |= SPLL_DITHEN;
+ spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
+ spll_func_cntl_3 |= fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT;
+ spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
if (pi->sclk_ss) {
struct amdgpu_atom_ss ss;
@@ -5314,12 +5336,12 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,
u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
- cg_spll_spread_spectrum &= ~CLK_S_MASK;
- cg_spll_spread_spectrum |= CLK_S(clk_s);
- cg_spll_spread_spectrum |= SSEN;
+ cg_spll_spread_spectrum &= ~CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK;
+ cg_spll_spread_spectrum |= clk_s << CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT;
+ cg_spll_spread_spectrum |= CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
- cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
- cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+ cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK;
+ cg_spll_spread_spectrum_2 |= clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT;
}
}
@@ -5485,7 +5507,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
if (pi->mclk_stutter_mode_threshold &&
(pl->mclk <= pi->mclk_stutter_mode_threshold) &&
!eg_pi->uvd_enabled &&
- (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
+ (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
(adev->pm.dpm.new_active_crtc_count <= 2)) {
level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
}
@@ -5579,7 +5601,7 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
return -EINVAL;
if (state->performance_level_count < 2) {
- a_t = CG_R(0xffff) | CG_L(0);
+ a_t = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT;
smc_state->levels[0].aT = cpu_to_be32(a_t);
return 0;
}
@@ -5600,13 +5622,13 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
}
- a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
- a_t |= CG_R(t_l * pi->bsp / 20000);
+ a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_AT__CG_R_MASK;
+ a_t |= (t_l * pi->bsp / 20000) << CG_AT__CG_R__SHIFT;
smc_state->levels[i].aT = cpu_to_be32(a_t);
high_bsp = (i == state->performance_level_count - 2) ?
pi->pbsp : pi->bsp;
- a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
+ a_t = (0xffff) << CG_AT__CG_R__SHIFT | (t_h * high_bsp / 20000) << CG_AT__CG_L__SHIFT;
smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
}
@@ -6180,9 +6202,9 @@ static int si_upload_mc_reg_table(struct amdgpu_device *adev,
static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
{
if (enable)
- WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK);
else
- WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+ WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK);
}
static enum si_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
@@ -6204,8 +6226,8 @@ static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
{
u32 speed_cntl;
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
- speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
+ speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL) & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
+ speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
return (u16)speed_cntl;
}
@@ -6412,21 +6434,21 @@ static void si_dpm_setup_asic(struct amdgpu_device *adev)
static int si_thermal_enable_alert(struct amdgpu_device *adev,
bool enable)
{
- u32 thermal_int = RREG32(CG_THERMAL_INT);
+ u32 thermal_int = RREG32(mmCG_THERMAL_INT);
if (enable) {
PPSMC_Result result;
- thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
- WREG32(CG_THERMAL_INT, thermal_int);
+ thermal_int &= ~(CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK);
+ WREG32(mmCG_THERMAL_INT, thermal_int);
result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
if (result != PPSMC_Result_OK) {
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
return -EINVAL;
}
} else {
- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
- WREG32(CG_THERMAL_INT, thermal_int);
+ thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
+ WREG32(mmCG_THERMAL_INT, thermal_int);
}
return 0;
@@ -6447,9 +6469,9 @@ static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
return -EINVAL;
}
- WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
- WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
- WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
+ WREG32_P(mmCG_THERMAL_INT, (high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTH_MASK);
+ WREG32_P(mmCG_THERMAL_INT, (low_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTL_MASK);
+ WREG32_P(mmCG_THERMAL_CTRL, (high_temp / 1000) << CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT, ~CG_THERMAL_CTRL__DIG_THERM_DPM_MASK);
adev->pm.dpm.thermal.min_temp = low_temp;
adev->pm.dpm.thermal.max_temp = high_temp;
@@ -6463,20 +6485,20 @@ static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
u32 tmp;
if (si_pi->fan_ctrl_is_in_default_mode) {
- tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+ tmp = (RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
si_pi->fan_ctrl_default_mode = tmp;
- tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+ tmp = (RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK) >> CG_FDO_CTRL2__TMIN__SHIFT;
si_pi->t_min = tmp;
si_pi->fan_ctrl_is_in_default_mode = false;
}
- tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
- tmp |= TMIN(0);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
+ tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
- tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
- tmp |= FDO_PWM_MODE(mode);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
}
static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
@@ -6495,7 +6517,7 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
return 0;
}
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+ duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
if (duty100 == 0) {
adev->pm.dpm.fan.ucode_fan_control = false;
@@ -6531,7 +6553,7 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
reference_clock) / 1600);
fan_table.fdo_max = cpu_to_be16((u16)duty100);
- tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+ tmp = (RREG32(mmCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK) >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
fan_table.temp_src = (uint8_t)tmp;
ret = amdgpu_si_copy_bytes_to_smc(adev,
@@ -6590,8 +6612,8 @@ static int si_dpm_get_fan_speed_pwm(void *handle,
if (adev->pm.no_fan)
return -ENOENT;
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
- duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+ duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
+ duty = (RREG32(mmCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK) >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
if (duty100 == 0)
return -EINVAL;
@@ -6621,7 +6643,7 @@ static int si_dpm_set_fan_speed_pwm(void *handle,
if (speed > 255)
return -EINVAL;
- duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+ duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
if (duty100 == 0)
return -EINVAL;
@@ -6630,9 +6652,9 @@ static int si_dpm_set_fan_speed_pwm(void *handle,
do_div(tmp64, 255);
duty = (u32)tmp64;
- tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
- tmp |= FDO_STATIC_DUTY(duty);
- WREG32(CG_FDO_CTRL0, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
+ tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
+ WREG32(mmCG_FDO_CTRL0, tmp);
return 0;
}
@@ -6672,8 +6694,8 @@ static int si_dpm_get_fan_control_mode(void *handle, u32 *fan_mode)
if (si_pi->fan_is_controlled_by_smc)
return 0;
- tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
- *fan_mode = (tmp >> FDO_PWM_MODE_SHIFT);
+ tmp = RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ *fan_mode = (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
return 0;
}
@@ -6691,7 +6713,7 @@ static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
if (adev->pm.fan_pulses_per_revolution == 0)
return -ENOENT;
- tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+ tach_period = (RREG32(mmCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK) >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
if (tach_period == 0)
return -ENOENT;
@@ -6720,9 +6742,9 @@ static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
si_fan_ctrl_stop_smc_fan_control(adev);
tach_period = 60 * xclk * 10000 / (8 * speed);
- tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
- tmp |= TARGET_PERIOD(tach_period);
- WREG32(CG_TACH_CTRL, tmp);
+ tmp = RREG32(mmCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
+ tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
+ WREG32(mmCG_TACH_CTRL, tmp);
si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
@@ -6736,13 +6758,13 @@ static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
u32 tmp;
if (!si_pi->fan_ctrl_is_in_default_mode) {
- tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
- tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
+ tmp |= si_pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
- tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
- tmp |= TMIN(si_pi->t_min);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
+ tmp |= si_pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
si_pi->fan_ctrl_is_in_default_mode = true;
}
}
@@ -6760,14 +6782,14 @@ static void si_thermal_initialize(struct amdgpu_device *adev)
u32 tmp;
if (adev->pm.fan_pulses_per_revolution) {
- tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
- tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);
- WREG32(CG_TACH_CTRL, tmp);
+ tmp = RREG32(mmCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
+ tmp |= (adev->pm.fan_pulses_per_revolution -1) << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
+ WREG32(mmCG_TACH_CTRL, tmp);
}
- tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
- tmp |= TACH_PWM_RESP_RATE(0x28);
- WREG32(CG_FDO_CTRL2, tmp);
+ tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
+ tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
+ WREG32(mmCG_FDO_CTRL2, tmp);
}
static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
@@ -7530,8 +7552,8 @@ static void si_dpm_debugfs_print_current_performance_level(void *handle,
struct si_ps *ps = si_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
- (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
- CURRENT_STATE_INDEX_SHIFT;
+ (RREG32(mmTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT;
if (current_index >= ps->performance_level_count) {
seq_printf(m, "invalid dpm profile %d\n", current_index);
@@ -7554,14 +7576,14 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int |= THERM_INT_MASK_HIGH;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int &= ~THERM_INT_MASK_HIGH;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
default:
break;
@@ -7571,14 +7593,14 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int |= THERM_INT_MASK_LOW;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
- cg_thermal_int &= ~THERM_INT_MASK_LOW;
- WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
+ cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
+ WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int);
break;
default:
break;
@@ -7883,8 +7905,8 @@ static int si_dpm_get_temp(void *handle)
int actual_temp = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
- CTF_TEMP_SHIFT;
+ temp = (RREG32(mmCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
+ CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
if (temp & 0x200)
actual_temp = 255;
@@ -8014,8 +8036,8 @@ static int si_dpm_read_sensor(void *handle, int idx,
struct si_ps *ps = si_get_ps(rps);
uint32_t sclk, mclk;
u32 pl_index =
- (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
- CURRENT_STATE_INDEX_SHIFT;
+ (RREG32(mmTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >>
+ TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
index 8f994ffa9cd1..4e65ab9e931c 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
@@ -30,6 +30,12 @@
#include "amdgpu_ucode.h"
#include "sislands_smc.h"
+#include "smu/smu_6_0_d.h"
+#include "smu/smu_6_0_sh_mask.h"
+
+#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+
static int si_set_smc_sram_address(struct amdgpu_device *adev,
u32 smc_address, u32 limit)
{
@@ -38,8 +44,8 @@ static int si_set_smc_sram_address(struct amdgpu_device *adev,
if ((smc_address + 3) > limit)
return -EINVAL;
- WREG32(SMC_IND_INDEX_0, smc_address);
- WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ WREG32(mmSMC_IND_INDEX_0, smc_address);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
return 0;
}
@@ -68,7 +74,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
if (ret)
goto done;
- WREG32(SMC_IND_DATA_0, data);
+ WREG32(mmSMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
@@ -83,7 +89,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
if (ret)
goto done;
- original_data = RREG32(SMC_IND_DATA_0);
+ original_data = RREG32(mmSMC_IND_DATA_0);
extra_shift = 8 * (4 - byte_count);
while (byte_count > 0) {
@@ -99,7 +105,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
if (ret)
goto done;
- WREG32(SMC_IND_DATA_0, data);
+ WREG32(mmSMC_IND_DATA_0, data);
}
done:
@@ -121,10 +127,10 @@ void amdgpu_si_reset_smc(struct amdgpu_device *adev)
{
u32 tmp;
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
+ RREG32(mmCB_CGTT_SCLK_CTRL);
tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
RST_REG;
@@ -170,16 +176,16 @@ PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
if (!amdgpu_si_is_smc_running(adev))
return PPSMC_Result_Failed;
- WREG32(SMC_MESSAGE_0, msg);
+ WREG32(mmSMC_MESSAGE_0, msg);
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(SMC_RESP_0);
+ tmp = RREG32(mmSMC_RESP_0);
if (tmp != 0)
break;
udelay(1);
}
- return (PPSMC_Result)RREG32(SMC_RESP_0);
+ return (PPSMC_Result)RREG32(mmSMC_RESP_0);
}
PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
@@ -225,18 +231,18 @@ int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
return -EINVAL;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(SMC_IND_INDEX_0, ucode_start_address);
- WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
+ WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
while (ucode_size >= 4) {
/* SMC address space is BE */
data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
- WREG32(SMC_IND_DATA_0, data);
+ WREG32(mmSMC_IND_DATA_0, data);
src += 4;
ucode_size -= 4;
}
- WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return 0;
@@ -251,7 +257,7 @@ int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
spin_lock_irqsave(&adev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(adev, smc_address, limit);
if (ret == 0)
- *value = RREG32(SMC_IND_DATA_0);
+ *value = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return ret;
@@ -266,7 +272,7 @@ int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
spin_lock_irqsave(&adev->smc_idx_lock, flags);
ret = si_set_smc_sram_address(adev, smc_address, limit);
if (ret == 0)
- WREG32(SMC_IND_DATA_0, value);
+ WREG32(mmSMC_IND_DATA_0, value);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return ret;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index 4bd92fd782be..8d40ed0f0e83 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -143,6 +143,10 @@ int atomctrl_initialize_mc_reg_table(
vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
+ if (!vram_info) {
+ pr_err("Could not retrieve the VramInfo table!");
+ return -EINVAL;
+ }
if (module_index >= vram_info->ucNumOfVRAMModule) {
pr_err("Invalid VramInfo table.");
@@ -180,6 +184,10 @@ int atomctrl_initialize_mc_reg_table_v2_2(
vram_info = (ATOM_VRAM_INFO_HEADER_V2_2 *)
smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
+ if (!vram_info) {
+ pr_err("Could not retrieve the VramInfo table!");
+ return -EINVAL;
+ }
if (module_index >= vram_info->ucNumOfVRAMModule) {
pr_err("Invalid VramInfo table.");
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index 9d3b33446adc..9b20076e26c0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
@@ -394,7 +394,7 @@ static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
}
if (le32_to_cpu(info->ulGPUCapInfo) &
- SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) {
+ SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableDFSBypass);
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
index 5a010cd38303..baf51cd82a35 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
@@ -46,42 +46,6 @@ static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
}
-int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
-{
- uint32_t data;
- uint32_t addr;
- uint8_t *dest_byte;
- uint8_t i, data_byte[4] = {0};
- uint32_t *pdata = (uint32_t *)&data_byte;
-
- PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
- PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
-
- addr = smc_start_address;
-
- while (byte_count >= 4) {
- smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
-
- *dest = PP_SMC_TO_HOST_UL(data);
-
- dest += 1;
- byte_count -= 4;
- addr += 4;
- }
-
- if (byte_count) {
- smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
- *pdata = PP_SMC_TO_HOST_UL(data);
- /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
- dest_byte = (uint8_t *)dest;
- for (i = 0; i < byte_count; i++)
- dest_byte[i] = data_byte[i];
- }
-
- return 0;
-}
-
-
int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit)
{
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
index e7303dc8c260..63e428ceaee4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
@@ -53,8 +53,6 @@ struct smu7_smumgr {
};
-int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
- uint32_t *dest, uint32_t byte_count, uint32_t limit);
int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit);
int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 46cce1d2aaf3..d79a1d94661a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -3432,15 +3432,15 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
return ret;
}
-bool smu_mode2_reset_is_support(struct smu_context *smu)
+bool smu_link_reset_is_support(struct smu_context *smu)
{
bool ret = false;
if (!smu->pm_enabled)
return false;
- if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
- ret = smu->ppt_funcs->mode2_reset_is_support(smu);
+ if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support)
+ ret = smu->ppt_funcs->link_reset_is_support(smu);
return ret;
}
@@ -3475,6 +3475,19 @@ static int smu_mode2_reset(void *handle)
return ret;
}
+int smu_link_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return -EOPNOTSUPP;
+
+ if (smu->ppt_funcs->link_reset)
+ ret = smu->ppt_funcs->link_reset(smu);
+
+ return ret;
+}
+
static int smu_enable_gfx_features(void *handle)
{
struct smu_context *smu = handle;
@@ -3745,6 +3758,19 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
return ret;
}
+static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table)
+{
+ struct smu_context *smu = handle;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics)
+ return -EOPNOTSUPP;
+
+ return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table);
+}
+
static const struct amd_pm_funcs swsmu_pm_funcs = {
/* export for sysfs */
.set_fan_control_mode = smu_set_fan_control_mode,
@@ -3803,6 +3829,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_uclk_dpm_states = smu_get_uclk_dpm_states,
.get_dpm_clock_table = smu_get_dpm_clock_table,
.get_smu_prv_buf_details = smu_get_prv_buffer_details,
+ .get_xcp_metrics = smu_sys_get_xcp_metrics,
};
int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
@@ -3975,3 +4002,11 @@ int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+
+int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
+{
+ if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)
+ smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index dd6d0e7aa242..9aacc7bc1c69 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -438,9 +438,11 @@ struct mclock_latency_table {
};
enum smu_reset_mode {
- SMU_RESET_MODE_0,
- SMU_RESET_MODE_1,
- SMU_RESET_MODE_2,
+ SMU_RESET_MODE_0,
+ SMU_RESET_MODE_1,
+ SMU_RESET_MODE_2,
+ SMU_RESET_MODE_3,
+ SMU_RESET_MODE_4,
};
enum smu_baco_state {
@@ -1229,10 +1231,11 @@ struct pptable_funcs {
* @mode1_reset_is_support: Check if GPU supports mode1 reset.
*/
bool (*mode1_reset_is_support)(struct smu_context *smu);
+
/**
- * @mode2_reset_is_support: Check if GPU supports mode2 reset.
+ * @link_reset_is_support: Check if GPU supports link reset.
*/
- bool (*mode2_reset_is_support)(struct smu_context *smu);
+ bool (*link_reset_is_support)(struct smu_context *smu);
/**
* @mode1_reset: Perform mode1 reset.
@@ -1252,6 +1255,13 @@ struct pptable_funcs {
int (*enable_gfx_features)(struct smu_context *smu);
/**
+ * @link_reset: Perform link reset.
+ *
+ * The gfx device driver reset
+ */
+ int (*link_reset)(struct smu_context *smu);
+
+ /**
* @get_dpm_ultimate_freq: Get the hard frequency range of a clock
* domain in MHz.
*/
@@ -1383,6 +1393,11 @@ struct pptable_funcs {
bool (*reset_sdma_is_supported)(struct smu_context *smu);
/**
+ * @reset_vcn: message SMU to soft reset vcn instance.
+ */
+ int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask);
+
+ /**
* @get_ecc_table: message SMU to get ECC INFO table.
*/
ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
@@ -1451,6 +1466,12 @@ struct pptable_funcs {
*/
int (*set_wbrf_exclusion_ranges)(struct smu_context *smu,
struct freq_band_range *exclusion_ranges);
+ /**
+ * @get_xcp_metrics: Get a copy of the partition metrics table from SMU.
+ * Return: Size of table
+ */
+ ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id,
+ void *table);
};
typedef enum {
@@ -1601,8 +1622,9 @@ int smu_get_power_limit(void *handle,
enum pp_power_type pp_power_type);
bool smu_mode1_reset_is_support(struct smu_context *smu);
-bool smu_mode2_reset_is_support(struct smu_context *smu);
+bool smu_link_reset_is_support(struct smu_context *smu);
int smu_mode1_reset(struct smu_context *smu);
+int smu_link_reset(struct smu_context *smu);
extern const struct amd_ip_funcs smu_ip_funcs;
@@ -1643,6 +1665,7 @@ int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
int smu_send_rma_reason(struct smu_context *smu);
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
bool smu_reset_sdma_is_supported(struct smu_context *smu);
+int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index d26f35119a12..01790a927930 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -127,7 +127,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0x10
+#define SMU_METRICS_TABLE_VERSION 0x11
// Unified metrics table for smu_v13_0_6
typedef struct __attribute__((packed, aligned(4))) {
@@ -459,4 +459,13 @@ typedef struct __attribute__((packed, aligned(4))) {
uint64_t AccGfxclkBelowHostLimit;
} VfMetricsTable_t;
+#pragma pack(push, 4)
+typedef struct {
+ // Telemetry
+ uint32_t InputTelemetryVoltageInmV;
+ // General info
+ uint32_t pldmVersion[2];
+} StaticMetricsTable_t;
+#pragma pack(pop)
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 288b2576432b..41f268313613 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -94,7 +94,9 @@
#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
#define PPSMC_MSG_SetThrottlingPolicy 0x44
#define PPSMC_MSG_ResetSDMA 0x4D
-#define PPSMC_Message_Count 0x4E
+#define PPSMC_MSG_ResetVCN 0x4E
+#define PPSMC_MSG_GetStaticMetricsTable 0x59
+#define PPSMC_Message_Count 0x5A
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index c9dee09395e3..eefdaa0b5df6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -277,6 +277,7 @@
__SMU_DUMMY_MAP(MALLPowerController), \
__SMU_DUMMY_MAP(MALLPowerState), \
__SMU_DUMMY_MAP(ResetSDMA), \
+ __SMU_DUMMY_MAP(ResetVCN), \
__SMU_DUMMY_MAP(GetStaticMetricsTable),
#undef __SMU_DUMMY_MAP
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index ed8304d82831..56ae555bb52a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -281,11 +281,6 @@ int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
struct smu_11_0_dpm_table *single_dpm_table);
-int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value);
-
int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu);
uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index cd03caffe317..4263798d716b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -112,6 +112,7 @@ struct smu_13_0_dpm_context {
uint32_t workload_policy_mask;
uint32_t dcef_min_ds_clk;
uint64_t caps;
+ uint32_t board_volt;
};
enum smu_13_0_power_state {
@@ -162,8 +163,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu);
int smu_v13_0_system_features_control(struct smu_context *smu,
bool en);
-int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count);
-
int smu_v13_0_set_allowed_mask(struct smu_context *smu);
int smu_v13_0_notify_display_change(struct smu_context *smu);
@@ -183,13 +182,6 @@ int smu_v13_0_disable_thermal_alert(struct smu_context *smu);
int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value);
-int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk);
-
-int
-smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
- struct pp_display_clock_request
- *clock_req);
-
uint32_t
smu_v13_0_get_fan_control_mode(struct smu_context *smu);
@@ -226,11 +218,6 @@ int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max, bool automatic);
-int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max);
-
int smu_v13_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level);
@@ -310,14 +297,6 @@ int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
uint32_t *value);
void smu_v13_0_interrupt_work(struct smu_context *smu);
-bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
-int smu_v13_0_12_get_max_metrics_size(void);
-int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
-int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
- MetricsMember_t member,
- uint32_t *value);
-ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table);
-extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
-extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
+void smu_v13_0_reset_custom_level(struct smu_context *smu);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 453952cdc353..9ad46f545d15 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1347,7 +1347,7 @@ static int arcturus_get_power_limit(struct smu_context *smu,
*default_power_limit = power_limit;
if (max_power_limit)
*max_power_limit = power_limit;
- /**
+ /*
* No lower bound is imposed on the limit. Any unreasonable limit set
* will result in frequent throttling.
*/
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 19a25fdc2f5b..115e3fa456bc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -3089,11 +3089,6 @@ static int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
return 0;
}
-static bool sienna_cichlid_is_mode2_reset_supported(struct smu_context *smu)
-{
- return true;
-}
-
static int sienna_cichlid_mode2_reset(struct smu_context *smu)
{
int ret = 0, index;
@@ -3229,7 +3224,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_default_config_table_settings = sienna_cichlid_get_default_config_table_settings,
.set_config_table = sienna_cichlid_set_config_table,
.get_unique_id = sienna_cichlid_get_unique_id,
- .mode2_reset_is_support = sienna_cichlid_is_mode2_reset_supported,
.mode2_reset = sienna_cichlid_mode2_reset,
};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 25fabf336a64..78e4186d06cc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -2059,45 +2059,6 @@ int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
return 0;
}
-int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value)
-{
- uint32_t level_count = 0;
- int ret = 0;
-
- if (!min_value && !max_value)
- return -EINVAL;
-
- if (min_value) {
- /* by default, level 0 clock value as min value */
- ret = smu_v11_0_get_dpm_freq_by_index(smu,
- clk_type,
- 0,
- min_value);
- if (ret)
- return ret;
- }
-
- if (max_value) {
- ret = smu_v11_0_get_dpm_level_count(smu,
- clk_type,
- &level_count);
- if (ret)
- return ret;
-
- ret = smu_v11_0_get_dpm_freq_by_index(smu,
- clk_type,
- level_count - 1,
- max_value);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 83163d7c7f00..6de653d2ed62 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1270,6 +1270,7 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
struct smu_13_0_dpm_table *gfx_table =
&dpm_context->dpm_tables.gfx_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+ int r;
/* Disable determinism if switching to another mode */
if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
@@ -1282,7 +1283,11 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
return 0;
-
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ r = smu_v13_0_set_performance_level(smu, level);
+ if (!r)
+ smu_v13_0_reset_custom_level(smu);
+ return r;
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_LOW:
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
@@ -1423,7 +1428,11 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_
min_clk = dpm_context->dpm_tables.gfx_table.min;
max_clk = dpm_context->dpm_tables.gfx_table.max;
- return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false);
+ ret = aldebaran_set_soft_freq_limited_range(
+ smu, SMU_GFXCLK, min_clk, max_clk, false);
+ if (ret)
+ return ret;
+ smu_v13_0_reset_custom_level(smu);
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -1976,11 +1985,6 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
return true;
}
-static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu)
-{
- return true;
-}
-
static int aldebaran_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state)
{
@@ -2086,7 +2090,6 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = aldebaran_get_gpu_metrics,
.mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
- .mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
.smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr,
.mode1_reset = aldebaran_mode1_reset,
.set_mp1_state = aldebaran_set_mp1_state,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index ba5a9012dbd5..1c7235935d14 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -58,6 +58,7 @@
MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
+MODULE_FIRMWARE("amdgpu/smu_13_0_0_kicker.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
@@ -92,7 +93,7 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
@@ -103,8 +104,13 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s.bin", ucode_prefix);
+
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
@@ -709,18 +715,6 @@ int smu_v13_0_notify_memory_pool_location(struct smu_context *smu)
return ret;
}
-int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
-{
- int ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
- if (ret)
- dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!");
-
- return ret;
-}
-
int smu_v13_0_set_driver_table_location(struct smu_context *smu)
{
struct smu_table *driver_table = &smu->smu_table.driver_table;
@@ -761,18 +755,6 @@ int smu_v13_0_set_tool_table_location(struct smu_context *smu)
return ret;
}
-int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count)
-{
- int ret = 0;
-
- if (!smu->pm_enabled)
- return ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
-
- return ret;
-}
-
int smu_v13_0_set_allowed_mask(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
@@ -1073,56 +1055,6 @@ int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
}
-int
-smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
- struct pp_display_clock_request
- *clock_req)
-{
- enum amd_pp_clock_type clk_type = clock_req->clock_type;
- int ret = 0;
- enum smu_clk_type clk_select = 0;
- uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
-
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
- smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
- switch (clk_type) {
- case amd_pp_dcef_clock:
- clk_select = SMU_DCEFCLK;
- break;
- case amd_pp_disp_clock:
- clk_select = SMU_DISPCLK;
- break;
- case amd_pp_pixel_clock:
- clk_select = SMU_PIXCLK;
- break;
- case amd_pp_phy_clock:
- clk_select = SMU_PHYCLK;
- break;
- case amd_pp_mem_clock:
- clk_select = SMU_UCLK;
- break;
- default:
- dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
- ret = -EINVAL;
- break;
- }
-
- if (ret)
- goto failed;
-
- if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
- return 0;
-
- ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
-
- if (clk_select == SMU_UCLK)
- smu->hard_min_uclk_req_from_dal = clk_freq;
- }
-
-failed:
- return ret;
-}
-
uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu)
{
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
@@ -1647,45 +1579,6 @@ out:
return ret;
}
-int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
-{
- int ret = 0, clk_id = 0;
- uint32_t param;
-
- if (min <= 0 && max <= 0)
- return -EINVAL;
-
- if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
- return 0;
-
- clk_id = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_CLK,
- clk_type);
- if (clk_id < 0)
- return clk_id;
-
- if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
- param, NULL);
- if (ret)
- return ret;
- }
-
- if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
- param, NULL);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
int smu_v13_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
@@ -2595,3 +2488,13 @@ int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
return ret;
}
+
+void smu_v13_0_reset_custom_level(struct smu_context *smu)
+{
+ struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
+
+ pstate_table->uclk_pstate.custom.min = 0;
+ pstate_table->uclk_pstate.custom.max = 0;
+ pstate_table->gfxclk_pstate.custom.min = 0;
+ pstate_table->gfxclk_pstate.custom.max = 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index 238bd71baa6d..e0d356f93ab0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -187,26 +187,6 @@ int smu_v13_0_12_get_max_metrics_size(void)
return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t));
}
-static int smu_v13_0_12_get_static_metrics_table(struct smu_context *smu)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
- uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
- struct smu_table *table = &smu_table->driver_table;
- int ret;
-
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
- if (ret) {
- dev_info(smu->adev->dev,
- "Failed to export static metrics table!\n");
- return ret;
- }
-
- amdgpu_asic_invalidate_hdp(smu->adev, NULL);
- memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
-
- return 0;
-}
-
int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -217,7 +197,7 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
int ret, i;
if (!pptable->Init) {
- ret = smu_v13_0_12_get_static_metrics_table(smu);
+ ret = smu_v13_0_6_get_static_metrics_table(smu);
if (ret)
return ret;
@@ -342,11 +322,67 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
return ret;
}
-ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
+ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics)
+{
+ const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW;
+ struct amdgpu_partition_metrics_v1_0 *xcp_metrics;
+ struct amdgpu_device *adev = smu->adev;
+ MetricsTable_t *metrics;
+ int inst, j, k, idx;
+ u32 inst_mask;
+
+ metrics = (MetricsTable_t *)smu_metrics;
+ xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *) table;
+ smu_cmn_init_partition_metrics(xcp_metrics, 1, 0);
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ /* Both JPEG and VCN has same instance */
+ inst = GET_INST(VCN, k);
+ for (j = 0; j < num_jpeg_rings; ++j) {
+ xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] =
+ SMUQ10_ROUND(metrics->
+ JpegBusy[(inst * num_jpeg_rings) + j]);
+ }
+ xcp_metrics->vcn_busy[idx] =
+ SMUQ10_ROUND(metrics->VcnBusy[inst]);
+ xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND(
+ metrics->VclkFrequency[inst]);
+ xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND(
+ metrics->DclkFrequency[inst]);
+ xcp_metrics->current_socclk[idx] = SMUQ10_ROUND(
+ metrics->SocclkFrequency[inst]);
+
+ idx++;
+ }
+
+ xcp_metrics->current_uclk =
+ SMUQ10_ROUND(metrics->UclkFrequency);
+
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ inst = GET_INST(GC, k);
+ xcp_metrics->current_gfxclk[idx] = SMUQ10_ROUND(metrics->GfxclkFrequency[inst]);
+ xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND(metrics->GfxBusy[inst]);
+ xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ xcp_metrics->gfx_below_host_limit_ppt_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]);
+ xcp_metrics->gfx_below_host_limit_thm_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]);
+ xcp_metrics->gfx_low_utilization_acc[idx] = SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]);
+ xcp_metrics->gfx_below_host_limit_total_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
+ idx++;
+ }
+
+ return sizeof(*xcp_metrics);
+}
+
+ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_7 *gpu_metrics =
- (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_8 *gpu_metrics =
+ (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
u8 num_jpeg_rings_gpu_metrics;
@@ -354,10 +390,9 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
struct amdgpu_xcp *xcp;
u32 inst_mask;
- metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
- memcpy(metrics, smu_table->metrics_table, sizeof(MetricsTable_t));
+ metrics = (MetricsTable_t *)smu_metrics;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8);
gpu_metrics->temperature_hotspot =
SMUQ10_ROUND(metrics->MaxSocketTemperature);
@@ -436,13 +471,16 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
gpu_metrics->mem_activity_acc = SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc);
for (i = 0; i < NUM_XGMI_LINKS; i++) {
- gpu_metrics->xgmi_read_data_acc[i] =
+ j = amdgpu_xgmi_get_ext_link(adev, i);
+ if (j < 0 || j >= NUM_XGMI_LINKS)
+ continue;
+ gpu_metrics->xgmi_read_data_acc[j] =
SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]);
- gpu_metrics->xgmi_write_data_acc[i] =
+ gpu_metrics->xgmi_write_data_acc[j] =
SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]);
ret = amdgpu_get_xgmi_link_status(adev, i);
if (ret >= 0)
- gpu_metrics->xgmi_link_status[i] = ret;
+ gpu_metrics->xgmi_link_status[j] = ret;
}
gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
@@ -474,6 +512,16 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
SMUQ10_ROUND(metrics->GfxBusy[inst]);
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] =
+ SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
idx++;
}
}
@@ -484,7 +532,6 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
gpu_metrics->firmware_timestamp = metrics->Timestamp;
*table = (void *)gpu_metrics;
- kfree(metrics);
return sizeof(*gpu_metrics);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index c478b3be37af..f00ef7f3f355 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -101,24 +101,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin");
#define MCA_BANK_IPID(_ip, _hwid, _type) \
[AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
-#define SMU_CAP(x) SMU_13_0_6_CAPS_##x
-
-enum smu_v13_0_6_caps {
- SMU_CAP(DPM),
- SMU_CAP(DPM_POLICY),
- SMU_CAP(OTHER_END_METRICS),
- SMU_CAP(SET_UCLK_MAX),
- SMU_CAP(PCIE_METRICS),
- SMU_CAP(MCA_DEBUG_MODE),
- SMU_CAP(PER_INST_METRICS),
- SMU_CAP(CTF_LIMIT),
- SMU_CAP(RMA_MSG),
- SMU_CAP(ACA_SYND),
- SMU_CAP(SDMA_RESET),
- SMU_CAP(STATIC_METRICS),
- SMU_CAP(ALL),
-};
-
struct mca_bank_ipid {
enum amdgpu_mca_ip ip;
uint16_t hwid;
@@ -194,6 +176,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
+ MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
+ MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 0),
};
// clang-format on
@@ -299,8 +283,8 @@ static inline void smu_v13_0_6_cap_clear(struct smu_context *smu,
dpm_context->caps &= ~BIT_ULL(cap);
}
-static inline bool smu_v13_0_6_cap_supported(struct smu_context *smu,
- enum smu_v13_0_6_caps cap)
+bool smu_v13_0_6_cap_supported(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
@@ -328,6 +312,11 @@ static void smu_v13_0_14_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
if (fw_ver >= 0x5551200)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+ if (fw_ver >= 0x5551600) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
+ }
}
static void smu_v13_0_12_init_caps(struct smu_context *smu)
@@ -353,6 +342,9 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
if (fw_ver >= 0x00561E00)
smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
+
+ if (fw_ver >= 0x00562500)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
@@ -402,6 +394,17 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
if (fw_ver < 0x00555600)
smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
+ if ((pgm == 7 && fw_ver >= 0x7550E00) ||
+ (pgm == 0 && fw_ver >= 0x00557E00))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+ if ((pgm == 0 && fw_ver >= 0x00557F01) ||
+ (pgm == 7 && fw_ver >= 0x7551000)) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+ }
+ if ((pgm == 0 && fw_ver >= 0x00558000) ||
+ (pgm == 7 && fw_ver >= 0x7551000))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
((pgm == 0) && (fw_ver >= 0x00557900)) ||
@@ -525,7 +528,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_7);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8);
smu_table->gpu_metrics_table =
kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) {
@@ -747,9 +750,48 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
return pm_metrics->common_header.structure_size;
}
+static void smu_v13_0_6_fill_static_metrics_table(struct smu_context *smu,
+ StaticMetricsTable_t *static_metrics)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ if (!static_metrics->InputTelemetryVoltageInmV) {
+ dev_warn(smu->adev->dev, "Invalid board voltage %d\n",
+ static_metrics->InputTelemetryVoltageInmV);
+ }
+
+ dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV;
+
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) &&
+ static_metrics->pldmVersion[0] != 0xFFFFFFFF)
+ smu->adev->firmware.pldm_version =
+ static_metrics->pldmVersion[0];
+}
+
+int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ struct smu_table *table = &smu_table->driver_table;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export static metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
+
+ return 0;
+}
+
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
@@ -759,7 +801,8 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
int ret, i, retry = 100;
uint32_t table_version;
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
return smu_v13_0_12_setup_driver_pptable(smu);
/* Store one-time values in driver PPTable */
@@ -813,6 +856,12 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0];
pptable->Init = true;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ ret = smu_v13_0_6_get_static_metrics_table(smu);
+ if (ret)
+ return ret;
+ smu_v13_0_6_fill_static_metrics_table(smu, static_metrics);
+ }
}
return 0;
@@ -1142,7 +1191,8 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
if (ret)
return ret;
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
return smu_v13_0_12_get_smu_metrics_data(smu, member, value);
/* For clocks with multiple instances, only report the first one */
@@ -1616,6 +1666,7 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor, void *data,
uint32_t *size)
{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
int ret = 0;
if (amdgpu_ras_intr_triggered())
@@ -1660,6 +1711,15 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VDDBOARD:
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) {
+ *(uint32_t *)data = dpm_context->board_volt;
+ *size = 4;
+ break;
+ } else {
+ ret = -EOPNOTSUPP;
+ break;
+ }
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
default:
ret = -EOPNOTSUPP;
@@ -1927,7 +1987,7 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
return ret;
pstate_table->uclk_pstate.curr.max = uclk_table->max;
}
- pstate_table->uclk_pstate.custom.max = 0;
+ smu_v13_0_reset_custom_level(smu);
return 0;
case AMD_DPM_FORCED_LEVEL_MANUAL:
@@ -2140,7 +2200,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
smu, SMU_UCLK, min_clk, max_clk, false);
if (ret)
return ret;
- pstate_table->uclk_pstate.custom.max = 0;
+ smu_v13_0_reset_custom_level(smu);
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -2483,11 +2543,131 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
return pcie_gen_to_speed(speed_level + 1);
}
+static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
+ void *table)
+{
+ const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
+ int version = smu_v13_0_6_get_metrics_version(smu);
+ struct amdgpu_partition_metrics_v1_0 *xcp_metrics;
+ struct amdgpu_device *adev = smu->adev;
+ int ret, inst, i, j, k, idx;
+ MetricsTableV0_t *metrics_v0;
+ MetricsTableV1_t *metrics_v1;
+ MetricsTableV2_t *metrics_v2;
+ struct amdgpu_xcp *xcp;
+ u32 inst_mask;
+ bool per_inst;
+
+ if (!table)
+ return sizeof(*xcp_metrics);
+
+ for_each_xcp(adev->xcp_mgr, xcp, i) {
+ if (xcp->id == xcp_id)
+ break;
+ }
+ if (i == adev->xcp_mgr->num_xcps)
+ return -EINVAL;
+
+ xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *)table;
+ smu_cmn_init_partition_metrics(xcp_metrics, 1, 0);
+
+ metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
+ if (!metrics_v0)
+ return -ENOMEM;
+
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
+ if (ret) {
+ kfree(metrics_v0);
+ return ret;
+ }
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ ret = smu_v13_0_12_get_xcp_metrics(smu, xcp, table, metrics_v0);
+ goto out;
+ }
+
+ metrics_v1 = (MetricsTableV1_t *)metrics_v0;
+ metrics_v2 = (MetricsTableV2_t *)metrics_v0;
+
+ per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS));
+
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ /* Both JPEG and VCN has same instances */
+ inst = GET_INST(VCN, k);
+
+ for (j = 0; j < num_jpeg_rings; ++j) {
+ xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(
+ JpegBusy,
+ version)[(inst * num_jpeg_rings) + j]);
+ }
+ xcp_metrics->vcn_busy[idx] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]);
+
+ xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(VclkFrequency, version)[inst]);
+ xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(DclkFrequency, version)[inst]);
+ xcp_metrics->current_socclk[idx] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(SocclkFrequency, version)[inst]);
+
+ idx++;
+ }
+
+ xcp_metrics->current_uclk =
+ SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version));
+
+ if (per_inst) {
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ inst = GET_INST(GC, k);
+ xcp_metrics->current_gfxclk[idx] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency,
+ version)[inst]);
+
+ xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND(
+ GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]);
+ xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND(
+ GET_GPU_METRIC_FIELD(GfxBusyAcc,
+ version)[inst]);
+ if (smu_v13_0_6_cap_supported(
+ smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ xcp_metrics->gfx_below_host_limit_ppt_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitPptAcc
+ [inst]);
+ xcp_metrics->gfx_below_host_limit_thm_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitThmAcc
+ [inst]);
+ xcp_metrics->gfx_low_utilization_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0
+ ->GfxclkLowUtilizationAcc[inst]);
+ xcp_metrics->gfx_below_host_limit_total_acc
+ [idx] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitTotalAcc
+ [inst]);
+ }
+ idx++;
+ }
+ }
+out:
+ kfree(metrics_v0);
+
+ return sizeof(*xcp_metrics);
+}
+
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_7 *gpu_metrics =
- (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_8 *gpu_metrics =
+ (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
int version = smu_v13_0_6_get_metrics_version(smu);
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
@@ -2496,6 +2676,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
MetricsTableV2_t *metrics_v2;
struct amdgpu_xcp *xcp;
u16 link_width_level;
+ ssize_t num_bytes;
u8 num_jpeg_rings;
u32 inst_mask;
bool per_inst;
@@ -2507,13 +2688,17 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
return ret;
}
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
- return smu_v13_0_12_get_gpu_metrics(smu, table);
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ num_bytes = smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0);
+ kfree(metrics_v0);
+ return num_bytes;
+ }
metrics_v1 = (MetricsTableV1_t *)metrics_v0;
metrics_v2 = (MetricsTableV2_t *)metrics_v0;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8);
gpu_metrics->temperature_hotspot =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version));
@@ -2623,13 +2808,16 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, version));
for (i = 0; i < NUM_XGMI_LINKS; i++) {
- gpu_metrics->xgmi_read_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]);
- gpu_metrics->xgmi_write_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]);
+ j = amdgpu_xgmi_get_ext_link(adev, i);
+ if (j < 0 || j >= NUM_XGMI_LINKS)
+ continue;
+ gpu_metrics->xgmi_read_data_acc[j] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]);
+ gpu_metrics->xgmi_write_data_acc[j] = SMUQ10_ROUND(
+ GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]);
ret = amdgpu_get_xgmi_link_status(adev, i);
if (ret >= 0)
- gpu_metrics->xgmi_link_status[i] = ret;
+ gpu_metrics->xgmi_link_status[j] = ret;
}
gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
@@ -2666,6 +2854,20 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc,
version)[inst]);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkBelowHostLimitPptAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkBelowHostLimitThmAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] =
+ SMUQ10_ROUND
+ (metrics_v0->GfxclkBelowHostLimitTotalAcc[inst]);
+ }
idx++;
}
}
@@ -2844,14 +3046,29 @@ static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
return ret;
}
+static int smu_v13_0_6_link_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_4, NULL);
+ return ret;
+}
+
static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
{
return true;
}
-static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
+static inline bool smu_v13_0_6_is_link_reset_supported(struct smu_context *smu)
{
- return true;
+ struct amdgpu_device *adev = smu->adev;
+ int var = (adev->pdev->device & 0xF);
+
+ if (var == 0x1)
+ return true;
+
+ return false;
}
static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
@@ -2924,6 +3141,19 @@ static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
+{
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ResetVCN, inst_mask, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "failed to send ResetVCN event with mask 0x%x\n",
+ inst_mask);
+ return ret;
+}
+
+
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -3584,11 +3814,13 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
.get_pm_metrics = smu_v13_0_6_get_pm_metrics,
+ .get_xcp_metrics = smu_v13_0_6_get_xcp_metrics,
.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
- .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
+ .link_reset_is_support = smu_v13_0_6_is_link_reset_supported,
.mode1_reset = smu_v13_0_6_mode1_reset,
.mode2_reset = smu_v13_0_6_mode2_reset,
+ .link_reset = smu_v13_0_6_link_reset,
.wait_for_event = smu_v13_0_wait_for_event,
.i2c_init = smu_v13_0_6_i2c_control_init,
.i2c_fini = smu_v13_0_6_i2c_control_fini,
@@ -3596,6 +3828,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.send_rma_reason = smu_v13_0_6_send_rma_reason,
.reset_sdma = smu_v13_0_6_reset_sdma,
.reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported,
+ .dpm_reset_vcn = smu_v13_0_6_reset_vcn,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index 83745909e564..d38d6d76b1e7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -26,6 +26,7 @@
#define SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL 0x2
#define SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL 0x4
#define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2
+#define SMU_CAP(x) SMU_13_0_6_CAPS_##x
typedef enum {
/*0*/ METRICS_VERSION_V0 = 0,
@@ -51,6 +52,38 @@ struct PPTable_t {
bool Init;
};
+enum smu_v13_0_6_caps {
+ SMU_CAP(DPM),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(OTHER_END_METRICS),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(PER_INST_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND),
+ SMU_CAP(SDMA_RESET),
+ SMU_CAP(STATIC_METRICS),
+ SMU_CAP(HST_LIMIT_METRICS),
+ SMU_CAP(BOARD_VOLTAGE),
+ SMU_CAP(PLDM_VERSION),
+ SMU_CAP(ALL),
+};
+
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
+bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap);
+int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu);
+bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
+int smu_v13_0_12_get_max_metrics_size(void);
+int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
+int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member, uint32_t *value);
+ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics);
+ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu,
+ struct amdgpu_xcp *xcp, void *table,
+ void *smu_metrics);
+extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
+extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 80eb1a03b3ca..7eaf58fd7f9a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -1051,73 +1051,6 @@ int smu_cmn_get_combo_pptable(struct smu_context *smu)
false);
}
-void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
-{
- struct metrics_table_header *header = (struct metrics_table_header *)table;
- uint16_t structure_size;
-
-#define METRICS_VERSION(a, b) ((a << 16) | b)
-
- switch (METRICS_VERSION(frev, crev)) {
- case METRICS_VERSION(1, 0):
- structure_size = sizeof(struct gpu_metrics_v1_0);
- break;
- case METRICS_VERSION(1, 1):
- structure_size = sizeof(struct gpu_metrics_v1_1);
- break;
- case METRICS_VERSION(1, 2):
- structure_size = sizeof(struct gpu_metrics_v1_2);
- break;
- case METRICS_VERSION(1, 3):
- structure_size = sizeof(struct gpu_metrics_v1_3);
- break;
- case METRICS_VERSION(1, 4):
- structure_size = sizeof(struct gpu_metrics_v1_4);
- break;
- case METRICS_VERSION(1, 5):
- structure_size = sizeof(struct gpu_metrics_v1_5);
- break;
- case METRICS_VERSION(1, 6):
- structure_size = sizeof(struct gpu_metrics_v1_6);
- break;
- case METRICS_VERSION(1, 7):
- structure_size = sizeof(struct gpu_metrics_v1_7);
- break;
- case METRICS_VERSION(1, 8):
- structure_size = sizeof(struct gpu_metrics_v1_8);
- break;
- case METRICS_VERSION(2, 0):
- structure_size = sizeof(struct gpu_metrics_v2_0);
- break;
- case METRICS_VERSION(2, 1):
- structure_size = sizeof(struct gpu_metrics_v2_1);
- break;
- case METRICS_VERSION(2, 2):
- structure_size = sizeof(struct gpu_metrics_v2_2);
- break;
- case METRICS_VERSION(2, 3):
- structure_size = sizeof(struct gpu_metrics_v2_3);
- break;
- case METRICS_VERSION(2, 4):
- structure_size = sizeof(struct gpu_metrics_v2_4);
- break;
- case METRICS_VERSION(3, 0):
- structure_size = sizeof(struct gpu_metrics_v3_0);
- break;
- default:
- return;
- }
-
-#undef METRICS_VERSION
-
- memset(header, 0xFF, structure_size);
-
- header->format_revision = frev;
- header->content_revision = crev;
- header->structure_size = structure_size;
-
-}
-
int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state)
{
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index a020277dec3e..7473672abd2a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -40,6 +40,30 @@
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
+#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \
+ do { \
+ typecheck(struct gpu_metrics_v##frev##_##crev, \
+ typeof(*(ptr))); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)(ptr); \
+ memset(header, 0xFF, sizeof(*(ptr))); \
+ header->format_revision = frev; \
+ header->content_revision = crev; \
+ header->structure_size = sizeof(*(ptr)); \
+ } while (0)
+
+#define smu_cmn_init_partition_metrics(ptr, frev, crev) \
+ do { \
+ typecheck(struct amdgpu_partition_metrics_v##frev##_##crev, \
+ typeof(*(ptr))); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)(ptr); \
+ memset(header, 0xFF, sizeof(*(ptr))); \
+ header->format_revision = frev; \
+ header->content_revision = crev; \
+ header->structure_size = sizeof(*(ptr)); \
+ } while (0)
+
extern const int link_speed[];
/* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
@@ -125,8 +149,6 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
int smu_cmn_get_combo_pptable(struct smu_context *smu);
-void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
-
int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state);
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 34547edf1ee3..87f2e5ee8790 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -159,7 +159,7 @@ bool malidp_format_mod_supported(struct drm_device *drm,
}
if (!fourcc_mod_is_vendor(modifier, ARM)) {
- DRM_ERROR("Unknown modifier (not Arm)\n");
+ DRM_DEBUG_KMS("Unknown modifier (not Arm)\n");
return false;
}
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index da0663542e8a..242fbccdf844 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_AST
tristate "AST server chips"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select DRM_CLIENT_SELECTION
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c
index 139ab00dee8f..2d3ad7610c2e 100644
--- a/drivers/gpu/drm/ast/ast_cursor.c
+++ b/drivers/gpu/drm/ast/ast_cursor.c
@@ -37,6 +37,7 @@
*/
/* define for signature structure */
+#define AST_HWC_SIGNATURE_SIZE SZ_32
#define AST_HWC_SIGNATURE_CHECKSUM 0x00
#define AST_HWC_SIGNATURE_SizeX 0x04
#define AST_HWC_SIGNATURE_SizeY 0x08
@@ -45,6 +46,21 @@
#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+static unsigned long ast_cursor_vram_size(void)
+{
+ return AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE;
+}
+
+long ast_cursor_vram_offset(struct ast_device *ast)
+{
+ unsigned long size = ast_cursor_vram_size();
+
+ if (size > ast->vram_size)
+ return -EINVAL;
+
+ return ALIGN_DOWN(ast->vram_size - size, SZ_8);
+}
+
static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, unsigned int height)
{
u32 csum = 0;
@@ -75,7 +91,7 @@ static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, un
static void ast_set_cursor_image(struct ast_device *ast, const u8 *src,
unsigned int width, unsigned int height)
{
- u8 __iomem *dst = ast->cursor_plane.base.vaddr;
+ u8 __iomem *dst = ast_plane_vaddr(&ast->cursor_plane.base);
u32 csum;
csum = ast_cursor_calculate_checksum(src, width, height);
@@ -177,7 +193,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
struct ast_device *ast = to_ast_device(plane->dev);
struct drm_rect damage;
u64 dst_off = ast_plane->offset;
- u8 __iomem *dst = ast_plane->vaddr; /* TODO: Use mapping abstraction properly */
+ u8 __iomem *dst = ast_plane_vaddr(ast_plane); /* TODO: Use mapping abstraction properly */
u8 __iomem *sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
unsigned int offset_x, offset_y;
u16 x, y;
@@ -274,25 +290,16 @@ int ast_cursor_plane_init(struct ast_device *ast)
struct ast_cursor_plane *ast_cursor_plane = &ast->cursor_plane;
struct ast_plane *ast_plane = &ast_cursor_plane->base;
struct drm_plane *cursor_plane = &ast_plane->base;
- size_t size;
- void __iomem *vaddr;
- u64 offset;
+ unsigned long size;
+ long offset;
int ret;
- /*
- * Allocate backing storage for cursors. The BOs are permanently
- * pinned to the top end of the VRAM.
- */
-
- size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
+ size = ast_cursor_vram_size();
+ offset = ast_cursor_vram_offset(ast);
+ if (offset < 0)
+ return offset;
- if (ast->vram_fb_available < size)
- return -ENOMEM;
-
- vaddr = ast->vram + ast->vram_fb_available - size;
- offset = ast->vram_fb_available - size;
-
- ret = ast_plane_init(dev, ast_plane, vaddr, offset, size,
+ ret = ast_plane_init(dev, ast_plane, offset, size,
0x01, &ast_cursor_plane_funcs,
ast_cursor_plane_formats, ARRAY_SIZE(ast_cursor_plane_formats),
NULL, DRM_PLANE_TYPE_CURSOR);
@@ -303,7 +310,5 @@ int ast_cursor_plane_init(struct ast_device *ast)
drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
drm_plane_enable_fb_damage_clips(cursor_plane);
- ast->vram_fb_available -= size;
-
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index d2c2605d2728..2ee402096cd9 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -112,12 +112,9 @@ enum ast_config_mode {
#define AST_MAX_HWC_WIDTH 64
#define AST_MAX_HWC_HEIGHT 64
-
#define AST_HWC_PITCH (AST_MAX_HWC_WIDTH * SZ_2)
#define AST_HWC_SIZE (AST_MAX_HWC_HEIGHT * AST_HWC_PITCH)
-#define AST_HWC_SIGNATURE_SIZE 32
-
/*
* Planes
*/
@@ -125,7 +122,6 @@ enum ast_config_mode {
struct ast_plane {
struct drm_plane base;
- void __iomem *vaddr;
u64 offset;
unsigned long size;
};
@@ -183,7 +179,6 @@ struct ast_device {
void __iomem *vram;
unsigned long vram_base;
unsigned long vram_size;
- unsigned long vram_fb_available;
struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */
@@ -340,14 +335,6 @@ static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 i
__ast_write8_i_masked(ast->ioregs, base, index, preserve_mask, val);
}
-#define AST_VIDMEM_SIZE_8M 0x00800000
-#define AST_VIDMEM_SIZE_16M 0x01000000
-#define AST_VIDMEM_SIZE_32M 0x02000000
-#define AST_VIDMEM_SIZE_64M 0x04000000
-#define AST_VIDMEM_SIZE_128M 0x08000000
-
-#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
-
struct ast_vbios_stdtable {
u8 misc;
u8 seq[4];
@@ -440,6 +427,7 @@ int ast_vga_output_init(struct ast_device *ast);
int ast_sil164_output_init(struct ast_device *ast);
/* ast_cursor.c */
+long ast_cursor_vram_offset(struct ast_device *ast);
int ast_cursor_plane_init(struct ast_device *ast);
/* ast dp501 */
@@ -454,11 +442,12 @@ int ast_astdp_output_init(struct ast_device *ast);
/* ast_mode.c */
int ast_mode_config_init(struct ast_device *ast);
int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
- void __iomem *vaddr, u64 offset, unsigned long size,
+ u64 offset, unsigned long size,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
const uint64_t *format_modifiers,
enum drm_plane_type type);
+void __iomem *ast_plane_vaddr(struct ast_plane *ast);
#endif
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 6dfe6d9777d4..0bc140319464 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -35,36 +35,35 @@
static u32 ast_get_vram_size(struct ast_device *ast)
{
- u8 jreg;
u32 vram_size;
+ u8 vgacr99, vgacraa;
- vram_size = AST_VIDMEM_DEFAULT_SIZE;
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xaa, 0xff);
- switch (jreg & 3) {
+ vgacraa = ast_get_index_reg(ast, AST_IO_VGACRI, 0xaa);
+ switch (vgacraa & AST_IO_VGACRAA_VGAMEM_SIZE_MASK) {
case 0:
- vram_size = AST_VIDMEM_SIZE_8M;
+ vram_size = SZ_8M;
break;
case 1:
- vram_size = AST_VIDMEM_SIZE_16M;
+ vram_size = SZ_16M;
break;
case 2:
- vram_size = AST_VIDMEM_SIZE_32M;
+ vram_size = SZ_32M;
break;
case 3:
- vram_size = AST_VIDMEM_SIZE_64M;
+ vram_size = SZ_64M;
break;
}
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0x99, 0xff);
- switch (jreg & 0x03) {
+ vgacr99 = ast_get_index_reg(ast, AST_IO_VGACRI, 0x99);
+ switch (vgacr99 & AST_IO_VGACR99_VGAMEM_RSRV_MASK) {
case 1:
- vram_size -= 0x100000;
+ vram_size -= SZ_1M;
break;
case 2:
- vram_size -= 0x200000;
+ vram_size -= SZ_2M;
break;
case 3:
- vram_size -= 0x400000;
+ vram_size -= SZ_4M;
break;
}
@@ -93,7 +92,6 @@ int ast_mm_init(struct ast_device *ast)
ast->vram_base = base;
ast->vram_size = vram_size;
- ast->vram_fb_available = vram_size;
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c3b950675485..031980d8f3ab 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -29,7 +29,6 @@
*/
#include <linux/delay.h>
-#include <linux/export.h>
#include <linux/pci.h>
#include <drm/drm_atomic.h>
@@ -51,6 +50,26 @@
#define AST_LUT_SIZE 256
+#define AST_PRIMARY_PLANE_MAX_OFFSET (BIT(16) - 1)
+
+static unsigned long ast_fb_vram_offset(void)
+{
+ return 0; // with shmem, the primary plane is always at offset 0
+}
+
+static unsigned long ast_fb_vram_size(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ unsigned long offset = ast_fb_vram_offset(); // starts at offset
+ long cursor_offset = ast_cursor_vram_offset(ast); // ends at cursor offset
+
+ if (cursor_offset < 0)
+ cursor_offset = ast->vram_size; // no cursor; it's all ours
+ if (drm_WARN_ON_ONCE(dev, offset > cursor_offset))
+ return 0; // cannot legally happen; signal error
+ return cursor_offset - offset;
+}
+
static inline void ast_load_palette_index(struct ast_device *ast,
u8 index, u8 red, u8 green,
u8 blue)
@@ -439,7 +458,7 @@ static void ast_wait_for_vretrace(struct ast_device *ast)
*/
int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
- void __iomem *vaddr, u64 offset, unsigned long size,
+ u64 offset, unsigned long size,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
@@ -448,7 +467,6 @@ int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
{
struct drm_plane *plane = &ast_plane->base;
- ast_plane->vaddr = vaddr;
ast_plane->offset = offset;
ast_plane->size = size;
@@ -457,6 +475,13 @@ int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
type, NULL);
}
+void __iomem *ast_plane_vaddr(struct ast_plane *ast_plane)
+{
+ struct ast_device *ast = to_ast_device(ast_plane->base.dev);
+
+ return ast->vram + ast_plane->offset;
+}
+
/*
* Primary plane
*/
@@ -503,7 +528,7 @@ static void ast_handle_damage(struct ast_plane *ast_plane, struct iosys_map *src
struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
- struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane->vaddr);
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane_vaddr(ast_plane));
iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
drm_fb_memcpy(&dst, fb->pitches, src, fb, clip);
@@ -576,12 +601,12 @@ static int ast_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
{
struct ast_plane *ast_plane = to_ast_plane(plane);
- if (plane->state && plane->state->fb && ast_plane->vaddr) {
+ if (plane->state && plane->state->fb) {
sb->format = plane->state->fb->format;
sb->width = plane->state->fb->width;
sb->height = plane->state->fb->height;
sb->pitch[0] = plane->state->fb->pitches[0];
- iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane->vaddr);
+ iosys_map_set_vaddr_iomem(&sb->map[0], ast_plane_vaddr(ast_plane));
return 0;
}
return -ENODEV;
@@ -608,13 +633,11 @@ static int ast_primary_plane_init(struct ast_device *ast)
struct drm_device *dev = &ast->base;
struct ast_plane *ast_primary_plane = &ast->primary_plane;
struct drm_plane *primary_plane = &ast_primary_plane->base;
- void __iomem *vaddr = ast->vram;
- u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */
- unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
- unsigned long size = ast->vram_fb_available - cursor_size;
+ u64 offset = ast_fb_vram_offset();
+ unsigned long size = ast_fb_vram_size(ast);
int ret;
- ret = ast_plane_init(dev, ast_primary_plane, vaddr, offset, size,
+ ret = ast_plane_init(dev, ast_primary_plane, offset, size,
0x01, &ast_primary_plane_funcs,
ast_primary_plane_formats, ARRAY_SIZE(ast_primary_plane_formats),
NULL, DRM_PLANE_TYPE_PRIMARY);
@@ -922,9 +945,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s
/*
* Concurrent operations could possibly trigger a call to
- * drm_connector_helper_funcs.get_modes by trying to read the
- * display modes. Protect access to I/O registers by acquiring
- * the I/O-register lock. Released in atomic_flush().
+ * drm_connector_helper_funcs.get_modes by reading the display
+ * modes. Protect access to registers by acquiring the modeset
+ * lock.
*/
mutex_lock(&ast->modeset_lock);
drm_atomic_helper_commit_tail(state);
@@ -938,16 +961,20 @@ static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs =
static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
- static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB8888);
struct ast_device *ast = to_ast_device(dev);
- unsigned long fbsize, fbpages, max_fbpages;
-
- max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT;
-
- fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
- fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
-
- if (fbpages > max_fbpages)
+ unsigned long max_fb_size = ast_fb_vram_size(ast);
+ u64 pitch;
+
+ if (drm_WARN_ON_ONCE(dev, !info))
+ return MODE_ERROR; /* driver bug */
+
+ pitch = drm_format_info_min_pitch(info, 0, mode->hdisplay);
+ if (!pitch)
+ return MODE_BAD_WIDTH;
+ if (pitch > AST_PRIMARY_PLANE_MAX_OFFSET)
+ return MODE_BAD_WIDTH; /* maximum programmable pitch */
+ if (pitch > max_fb_size / mode->vdisplay)
return MODE_MEM;
return MODE_OK;
@@ -1018,10 +1045,7 @@ int ast_mode_config_init(struct ast_device *ast)
return ret;
drm_mode_config_reset(dev);
-
- ret = drmm_kms_helper_poll_init(dev);
- if (ret)
- return ret;
+ drmm_kms_helper_poll_init(dev);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 91e85e457bdf..37568cf3822c 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -1075,16 +1075,16 @@ static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *par
switch (param->vram_size) {
default:
- case AST_VIDMEM_SIZE_8M:
+ case SZ_8M:
param->dram_config |= 0x00;
break;
- case AST_VIDMEM_SIZE_16M:
+ case SZ_16M:
param->dram_config |= 0x04;
break;
- case AST_VIDMEM_SIZE_32M:
+ case SZ_32M:
param->dram_config |= 0x08;
break;
- case AST_VIDMEM_SIZE_64M:
+ case SZ_64M:
param->dram_config |= 0x0c;
break;
}
@@ -1446,16 +1446,16 @@ static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *par
switch (param->vram_size) {
default:
- case AST_VIDMEM_SIZE_8M:
+ case SZ_8M:
param->dram_config |= 0x00;
break;
- case AST_VIDMEM_SIZE_16M:
+ case SZ_16M:
param->dram_config |= 0x04;
break;
- case AST_VIDMEM_SIZE_32M:
+ case SZ_32M:
param->dram_config |= 0x08;
break;
- case AST_VIDMEM_SIZE_64M:
+ case SZ_64M:
param->dram_config |= 0x0c;
break;
}
@@ -1635,19 +1635,19 @@ static void ast_post_chip_2300(struct ast_device *ast)
switch (temp & 0x0c) {
default:
case 0x00:
- param.vram_size = AST_VIDMEM_SIZE_8M;
+ param.vram_size = SZ_8M;
break;
case 0x04:
- param.vram_size = AST_VIDMEM_SIZE_16M;
+ param.vram_size = SZ_16M;
break;
case 0x08:
- param.vram_size = AST_VIDMEM_SIZE_32M;
+ param.vram_size = SZ_32M;
break;
case 0x0c:
- param.vram_size = AST_VIDMEM_SIZE_64M;
+ param.vram_size = SZ_64M;
break;
}
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index bb2cc1d8b84e..e15adaf3a80e 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -30,9 +30,11 @@
#define AST_IO_VGACRI (0x54)
#define AST_IO_VGACR80_PASSWORD (0xa8)
+#define AST_IO_VGACR99_VGAMEM_RSRV_MASK GENMASK(1, 0)
#define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1)
#define AST_IO_VGACRA1_MMIO_ENABLED BIT(2)
#define AST_IO_VGACRA3_DVO_ENABLED BIT(7)
+#define AST_IO_VGACRAA_VGAMEM_SIZE_MASK GENMASK(1, 0)
#define AST_IO_VGACRB6_HSYNC_OFF BIT(0)
#define AST_IO_VGACRB6_VSYNC_OFF BIT(1)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 09a1be234f71..b9e0ca85226a 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -16,6 +16,7 @@ config DRM_AUX_BRIDGE
tristate
depends on DRM_BRIDGE && OF
select AUXILIARY_BUS
+ select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
help
Simple transparent bridge that is used by several non-DRM drivers to
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 050dae338ffe..1257009e850c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -948,13 +948,14 @@ static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
}
static int adv7511_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
int ret = 0;
if (adv->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, adv->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, adv->next_bridge, bridge,
flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 83d711ee3a2e..f3fe47b12edc 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -143,35 +143,7 @@ static int anx6345_dp_link_training(struct anx6345 *anx6345)
if (err)
return err;
- /*
- * Power up the sink (DP_SET_POWER register is only available on DPCD
- * v1.1 and later).
- */
- if (anx6345->dpcd[DP_DPCD_REV] >= 0x11) {
- err = drm_dp_dpcd_readb(&anx6345->aux, DP_SET_POWER, &dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
- err);
- return err;
- }
-
- dpcd[0] &= ~DP_SET_POWER_MASK;
- dpcd[0] |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(&anx6345->aux, DP_SET_POWER, dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n",
- err);
- return err;
- }
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
+ drm_dp_link_power_up(&anx6345->aux, anx6345->dpcd[DP_DPCD_REV]);
/* Possibly enable downspread on the sink */
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
@@ -517,6 +489,7 @@ static const struct drm_connector_funcs anx6345_connector_funcs = {
};
static int anx6345_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
@@ -553,7 +526,7 @@ static int anx6345_bridge_attach(struct drm_bridge *bridge,
anx6345->connector.polled = DRM_CONNECTOR_POLL_HPD;
err = drm_connector_attach_encoder(&anx6345->connector,
- bridge->encoder);
+ encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
goto connector_cleanup;
@@ -691,9 +664,10 @@ static int anx6345_i2c_probe(struct i2c_client *client)
struct device *dev;
int i, err;
- anx6345 = devm_kzalloc(&client->dev, sizeof(*anx6345), GFP_KERNEL);
- if (!anx6345)
- return -ENOMEM;
+ anx6345 = devm_drm_bridge_alloc(&client->dev, struct anx6345, bridge,
+ &anx6345_bridge_funcs);
+ if (IS_ERR(anx6345))
+ return PTR_ERR(anx6345);
mutex_init(&anx6345->lock);
@@ -765,7 +739,6 @@ static int anx6345_i2c_probe(struct i2c_client *client)
/* Look for supported chip ID */
anx6345_poweron(anx6345);
if (anx6345_get_chip_id(anx6345)) {
- anx6345->bridge.funcs = &anx6345_bridge_funcs;
drm_bridge_add(&anx6345->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index f74694bb9c50..a83020d6576f 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -656,35 +656,7 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- /*
- * Power up the sink (DP_SET_POWER register is only available on DPCD
- * v1.1 and later).
- */
- if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) {
- err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
- err);
- return err;
- }
-
- dpcd[0] &= ~DP_SET_POWER_MASK;
- dpcd[0] |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n",
- err);
- return err;
- }
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
+ drm_dp_link_power_up(&anx78xx->aux, anx78xx->dpcd[DP_DPCD_REV]);
/* Possibly enable downspread on the sink */
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
@@ -888,6 +860,7 @@ static const struct drm_connector_funcs anx78xx_connector_funcs = {
};
static int anx78xx_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
@@ -924,7 +897,7 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge,
anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD;
err = drm_connector_attach_encoder(&anx78xx->connector,
- bridge->encoder);
+ encoder);
if (err) {
DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
goto connector_cleanup;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 071168aa0c3b..505eec6b819b 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -838,10 +838,7 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
int ret;
/* Keep the panel disabled while we configure video */
- if (dp->plat_data->panel) {
- if (drm_panel_disable(dp->plat_data->panel))
- DRM_ERROR("failed to disable the panel\n");
- }
+ drm_panel_disable(dp->plat_data->panel);
ret = analogix_dp_train_link(dp);
if (ret) {
@@ -863,13 +860,7 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
}
/* Safe to enable the panel now */
- if (dp->plat_data->panel) {
- ret = drm_panel_enable(dp->plat_data->panel);
- if (ret) {
- DRM_ERROR("failed to enable the panel\n");
- return ret;
- }
- }
+ drm_panel_enable(dp->plat_data->panel);
/* Check whether panel supports fast training */
ret = analogix_dp_fast_link_train_detection(dp);
@@ -955,67 +946,15 @@ static int analogix_dp_disable_psr(struct analogix_dp_device *dp)
return analogix_dp_send_psr_spd(dp, &psr_vsc, true);
}
-/*
- * This function is a bit of a catch-all for panel preparation, hopefully
- * simplifying the logic of functions that need to prepare/unprepare the panel
- * below.
- *
- * If @prepare is true, this function will prepare the panel. Conversely, if it
- * is false, the panel will be unprepared.
- *
- * If @is_modeset_prepare is true, the function will disregard the current state
- * of the panel and either prepare/unprepare the panel based on @prepare. Once
- * it finishes, it will update dp->panel_is_modeset to reflect the current state
- * of the panel.
- */
-static int analogix_dp_prepare_panel(struct analogix_dp_device *dp,
- bool prepare, bool is_modeset_prepare)
-{
- int ret = 0;
-
- if (!dp->plat_data->panel)
- return 0;
-
- mutex_lock(&dp->panel_lock);
-
- /*
- * Exit early if this is a temporary prepare/unprepare and we're already
- * modeset (since we neither want to prepare twice or unprepare early).
- */
- if (dp->panel_is_modeset && !is_modeset_prepare)
- goto out;
-
- if (prepare)
- ret = drm_panel_prepare(dp->plat_data->panel);
- else
- ret = drm_panel_unprepare(dp->plat_data->panel);
-
- if (ret)
- goto out;
-
- if (is_modeset_prepare)
- dp->panel_is_modeset = prepare;
-
-out:
- mutex_unlock(&dp->panel_lock);
- return ret;
-}
-
static int analogix_dp_get_modes(struct drm_connector *connector)
{
struct analogix_dp_device *dp = to_dp(connector);
const struct drm_edid *drm_edid;
- int ret, num_modes = 0;
+ int num_modes = 0;
if (dp->plat_data->panel) {
num_modes += drm_panel_get_modes(dp->plat_data->panel, connector);
} else {
- ret = analogix_dp_prepare_panel(dp, true, false);
- if (ret) {
- DRM_ERROR("Failed to prepare panel (%d)\n", ret);
- return 0;
- }
-
drm_edid = drm_edid_read_ddc(connector, &dp->aux.ddc);
drm_edid_connector_update(&dp->connector, drm_edid);
@@ -1024,10 +963,6 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
num_modes += drm_edid_connector_add_modes(&dp->connector);
drm_edid_free(drm_edid);
}
-
- ret = analogix_dp_prepare_panel(dp, false, false);
- if (ret)
- DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
}
if (dp->plat_data->get_modes)
@@ -1082,24 +1017,13 @@ analogix_dp_detect(struct drm_connector *connector, bool force)
{
struct analogix_dp_device *dp = to_dp(connector);
enum drm_connector_status status = connector_status_disconnected;
- int ret;
if (dp->plat_data->panel)
return connector_status_connected;
- ret = analogix_dp_prepare_panel(dp, true, false);
- if (ret) {
- DRM_ERROR("Failed to prepare panel (%d)\n", ret);
- return connector_status_disconnected;
- }
-
if (!analogix_dp_detect_hpd(dp))
status = connector_status_connected;
- ret = analogix_dp_prepare_panel(dp, false, false);
- if (ret)
- DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
-
return status;
}
@@ -1113,10 +1037,10 @@ static const struct drm_connector_funcs analogix_dp_connector_funcs = {
};
static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct analogix_dp_device *dp = bridge->driver_private;
- struct drm_encoder *encoder = dp->encoder;
struct drm_connector *connector = NULL;
int ret = 0;
@@ -1203,7 +1127,6 @@ static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
- int ret;
crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
@@ -1214,9 +1137,7 @@ static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
if (old_crtc_state && old_crtc_state->self_refresh_active)
return;
- ret = analogix_dp_prepare_panel(dp, true, true);
- if (ret)
- DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+ drm_panel_prepare(dp->plat_data->panel);
}
static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
@@ -1296,17 +1217,11 @@ static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
{
struct analogix_dp_device *dp = bridge->driver_private;
- int ret;
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
- if (dp->plat_data->panel) {
- if (drm_panel_disable(dp->plat_data->panel)) {
- DRM_ERROR("failed to disable the panel\n");
- return;
- }
- }
+ drm_panel_disable(dp->plat_data->panel);
disable_irq(dp->irq);
@@ -1314,9 +1229,7 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
pm_runtime_put_sync(dp->dev);
- ret = analogix_dp_prepare_panel(dp, false, true);
- if (ret)
- DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+ drm_panel_unprepare(dp->plat_data->panel);
dp->fast_train_enable = false;
dp->psr_supported = false;
@@ -1505,6 +1418,10 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
video_info->max_link_rate = 0x0A;
video_info->max_lane_count = 0x04;
break;
+ case RK3588_EDP:
+ video_info->max_link_rate = 0x14;
+ video_info->max_lane_count = 0x04;
+ break;
case EXYNOS_DP:
/*
* NOTE: those property parseing code is used for
@@ -1540,6 +1457,26 @@ out:
return ret;
}
+static int analogix_dpaux_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us)
+{
+ struct analogix_dp_device *dp = to_dp(aux);
+ int val;
+ int ret;
+
+ if (dp->force_hpd)
+ return 0;
+
+ pm_runtime_get_sync(dp->dev);
+
+ ret = readx_poll_timeout(analogix_dp_get_plug_in_status, dp, val, !val,
+ wait_us / 100, wait_us);
+
+ pm_runtime_mark_last_busy(dp->dev);
+ pm_runtime_put_autosuspend(dp->dev);
+
+ return ret;
+}
+
struct analogix_dp_device *
analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
{
@@ -1560,9 +1497,6 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
- mutex_init(&dp->panel_lock);
- dp->panel_is_modeset = false;
-
/*
* platform dp driver need containor_of the plat_data to get
* the driver private data, so we need to store the point of
@@ -1597,10 +1531,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
}
dp->reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(dp->reg_base)) {
- ret = PTR_ERR(dp->reg_base);
- goto err_disable_clk;
- }
+ if (IS_ERR(dp->reg_base))
+ return ERR_CAST(dp->reg_base);
dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd");
@@ -1612,8 +1544,7 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
if (IS_ERR(dp->hpd_gpiod)) {
dev_err(dev, "error getting HDP GPIO: %ld\n",
PTR_ERR(dp->hpd_gpiod));
- ret = PTR_ERR(dp->hpd_gpiod);
- goto err_disable_clk;
+ return ERR_CAST(dp->hpd_gpiod);
}
if (dp->hpd_gpiod) {
@@ -1625,16 +1556,15 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
* that we can get the current state of the GPIO.
*/
dp->irq = gpiod_to_irq(dp->hpd_gpiod);
- irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+ irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN;
} else {
dp->irq = platform_get_irq(pdev, 0);
- irq_flags = 0;
+ irq_flags = IRQF_NO_AUTOEN;
}
if (dp->irq == -ENXIO) {
dev_err(&pdev->dev, "failed to get irq\n");
- ret = -ENODEV;
- goto err_disable_clk;
+ return ERR_PTR(-ENODEV);
}
ret = devm_request_threaded_irq(&pdev->dev, dp->irq,
@@ -1643,15 +1573,22 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
irq_flags, "analogix-dp", dp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
- goto err_disable_clk;
+ return ERR_PTR(ret);
}
- disable_irq(dp->irq);
- return dp;
+ dp->aux.name = "DP-AUX";
+ dp->aux.transfer = analogix_dpaux_transfer;
+ dp->aux.wait_hpd_asserted = analogix_dpaux_wait_hpd_asserted;
+ dp->aux.dev = dp->dev;
+ drm_dp_aux_init(&dp->aux);
-err_disable_clk:
- clk_disable_unprepare(dp->clock);
- return ERR_PTR(ret);
+ pm_runtime_use_autosuspend(dp->dev);
+ pm_runtime_set_autosuspend_delay(dp->dev, 100);
+ ret = devm_pm_runtime_enable(dp->dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dp;
}
EXPORT_SYMBOL_GPL(analogix_dp_probe);
@@ -1681,6 +1618,7 @@ int analogix_dp_resume(struct analogix_dp_device *dp)
if (dp->plat_data->power_on)
dp->plat_data->power_on(dp->plat_data);
+ phy_set_mode(dp->phy, PHY_MODE_DP);
phy_power_on(dp->phy);
analogix_dp_init_dp(dp);
@@ -1696,25 +1634,12 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
dp->drm_dev = drm_dev;
dp->encoder = dp->plat_data->encoder;
- if (IS_ENABLED(CONFIG_PM)) {
- pm_runtime_use_autosuspend(dp->dev);
- pm_runtime_set_autosuspend_delay(dp->dev, 100);
- pm_runtime_enable(dp->dev);
- } else {
- ret = analogix_dp_resume(dp);
- if (ret)
- return ret;
- }
-
- dp->aux.name = "DP-AUX";
- dp->aux.transfer = analogix_dpaux_transfer;
- dp->aux.dev = dp->dev;
dp->aux.drm_dev = drm_dev;
ret = drm_dp_aux_register(&dp->aux);
if (ret) {
DRM_ERROR("failed to register AUX (%d)\n", ret);
- goto err_disable_pm_runtime;
+ return ret;
}
ret = analogix_dp_create_bridge(drm_dev, dp);
@@ -1727,13 +1652,6 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
err_unregister_aux:
drm_dp_aux_unregister(&dp->aux);
-err_disable_pm_runtime:
- if (IS_ENABLED(CONFIG_PM)) {
- pm_runtime_dont_use_autosuspend(dp->dev);
- pm_runtime_disable(dp->dev);
- } else {
- analogix_dp_suspend(dp);
- }
return ret;
}
@@ -1744,19 +1662,9 @@ void analogix_dp_unbind(struct analogix_dp_device *dp)
analogix_dp_bridge_disable(dp->bridge);
dp->connector.funcs->destroy(&dp->connector);
- if (dp->plat_data->panel) {
- if (drm_panel_unprepare(dp->plat_data->panel))
- DRM_ERROR("failed to turnoff the panel\n");
- }
+ drm_panel_unprepare(dp->plat_data->panel);
drm_dp_aux_unregister(&dp->aux);
-
- if (IS_ENABLED(CONFIG_PM)) {
- pm_runtime_dont_use_autosuspend(dp->dev);
- pm_runtime_disable(dp->dev);
- } else {
- analogix_dp_suspend(dp);
- }
}
EXPORT_SYMBOL_GPL(analogix_dp_unbind);
@@ -1782,6 +1690,20 @@ int analogix_dp_stop_crc(struct drm_connector *connector)
}
EXPORT_SYMBOL_GPL(analogix_dp_stop_crc);
+struct analogix_dp_plat_data *analogix_dp_aux_to_plat_data(struct drm_dp_aux *aux)
+{
+ struct analogix_dp_device *dp = to_dp(aux);
+
+ return dp->plat_data;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_aux_to_plat_data);
+
+struct drm_dp_aux *analogix_dp_get_aux(struct analogix_dp_device *dp)
+{
+ return &dp->aux;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_get_aux);
+
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
MODULE_DESCRIPTION("Analogix DP Core Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index 774d11574b09..2b54120ba4a3 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -169,9 +169,6 @@ struct analogix_dp_device {
bool fast_train_enable;
bool psr_supported;
- struct mutex panel_lock;
- bool panel_is_modeset;
-
struct analogix_dp_plat_data *plat_data;
};
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 3afc73c858c4..38fd8d5014d2 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -11,6 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
#include <drm/bridge/analogix_dp.h>
@@ -513,10 +514,24 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp)
void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype)
{
u32 reg;
+ int ret;
reg = bwtype;
if ((bwtype == DP_LINK_BW_2_7) || (bwtype == DP_LINK_BW_1_62))
writel(reg, dp->reg_base + ANALOGIX_DP_LINK_BW_SET);
+
+ if (dp->phy) {
+ union phy_configure_opts phy_cfg = {0};
+
+ phy_cfg.dp.link_rate =
+ drm_dp_bw_code_to_link_rate(dp->link_train.link_rate) / 100;
+ phy_cfg.dp.set_rate = true;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
+ return;
+ }
+ }
}
void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype)
@@ -530,9 +545,22 @@ void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype)
void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count)
{
u32 reg;
+ int ret;
reg = count;
writel(reg, dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET);
+
+ if (dp->phy) {
+ union phy_configure_opts phy_cfg = {0};
+
+ phy_cfg.dp.lanes = dp->link_train.lane_count;
+ phy_cfg.dp.set_lanes = true;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
+ return;
+ }
+ }
}
void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count)
@@ -546,10 +574,34 @@ void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count)
void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp)
{
u8 lane;
+ int ret;
for (lane = 0; lane < dp->link_train.lane_count; lane++)
writel(dp->link_train.training_lane[lane],
dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL + 4 * lane);
+
+ if (dp->phy) {
+ union phy_configure_opts phy_cfg = {0};
+
+ for (lane = 0; lane < dp->link_train.lane_count; lane++) {
+ u8 training_lane = dp->link_train.training_lane[lane];
+ u8 vs, pe;
+
+ vs = (training_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ pe = (training_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ phy_cfg.dp.voltage[lane] = vs;
+ phy_cfg.dp.pre[lane] = pe;
+ }
+
+ phy_cfg.dp.set_voltages = true;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(dp->dev, "%s: phy_configure() failed: %d\n", __func__, ret);
+ return;
+ }
+ }
}
u32 analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, u8 lane)
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 0b97b66de577..8a9079c2ed5c 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1257,10 +1257,10 @@ static void anx7625_power_on(struct anx7625_data *ctx)
usleep_range(11000, 12000);
/* Power on pin enable */
- gpiod_set_value(ctx->pdata.gpio_p_on, 1);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_p_on, 1);
usleep_range(10000, 11000);
/* Power reset pin enable */
- gpiod_set_value(ctx->pdata.gpio_reset, 1);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_reset, 1);
usleep_range(10000, 11000);
DRM_DEV_DEBUG_DRIVER(dev, "power on !\n");
@@ -1280,9 +1280,9 @@ static void anx7625_power_standby(struct anx7625_data *ctx)
return;
}
- gpiod_set_value(ctx->pdata.gpio_reset, 0);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_reset, 0);
usleep_range(1000, 1100);
- gpiod_set_value(ctx->pdata.gpio_p_on, 0);
+ gpiod_set_value_cansleep(ctx->pdata.gpio_p_on, 0);
usleep_range(1000, 1100);
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->pdata.supplies),
@@ -1814,9 +1814,6 @@ static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx)
DRM_DEV_DEBUG_DRIVER(dev, "sink detect\n");
- if (ctx->pdata.panel_bridge)
- return connector_status_connected;
-
return ctx->hpd_status ? connector_status_connected :
connector_status_disconnected;
}
@@ -2141,6 +2138,7 @@ static void hdcp_check_work_func(struct work_struct *work)
}
static int anx7625_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
@@ -2159,7 +2157,7 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge,
}
if (ctx->pdata.panel_bridge) {
- err = drm_bridge_attach(bridge->encoder,
+ err = drm_bridge_attach(encoder,
ctx->pdata.panel_bridge,
&ctx->bridge, flags);
if (err)
@@ -2474,6 +2472,22 @@ static const struct drm_edid *anx7625_bridge_edid_read(struct drm_bridge *bridge
return anx7625_edid_read(ctx);
}
+static void anx7625_bridge_hpd_enable(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = ctx->dev;
+
+ pm_runtime_get_sync(dev);
+}
+
+static void anx7625_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = ctx->dev;
+
+ pm_runtime_put_sync(dev);
+}
+
static const struct drm_bridge_funcs anx7625_bridge_funcs = {
.attach = anx7625_bridge_attach,
.detach = anx7625_bridge_detach,
@@ -2487,6 +2501,8 @@ static const struct drm_bridge_funcs anx7625_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.detect = anx7625_bridge_detect,
.edid_read = anx7625_bridge_edid_read,
+ .hpd_enable = anx7625_bridge_hpd_enable,
+ .hpd_disable = anx7625_bridge_hpd_disable,
};
static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx,
@@ -2568,12 +2584,6 @@ static const struct dev_pm_ops anx7625_pm_ops = {
anx7625_runtime_pm_resume, NULL)
};
-static void anx7625_runtime_disable(void *data)
-{
- pm_runtime_dont_use_autosuspend(data);
- pm_runtime_disable(data);
-}
-
static int anx7625_link_bridge(struct drm_dp_aux *aux)
{
struct anx7625_data *platform = container_of(aux, struct anx7625_data, aux);
@@ -2590,9 +2600,8 @@ static int anx7625_link_bridge(struct drm_dp_aux *aux)
platform->bridge.of_node = dev->of_node;
if (!anx7625_of_panel_on_aux_bus(dev))
platform->bridge.ops |= DRM_BRIDGE_OP_EDID;
- if (!platform->pdata.panel_bridge)
- platform->bridge.ops |= DRM_BRIDGE_OP_HPD |
- DRM_BRIDGE_OP_DETECT;
+ if (!platform->pdata.panel_bridge || !anx7625_of_panel_on_aux_bus(dev))
+ platform->bridge.ops |= DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_DETECT;
platform->bridge.type = platform->pdata.panel_bridge ?
DRM_MODE_CONNECTOR_eDP :
DRM_MODE_CONNECTOR_DisplayPort;
@@ -2707,11 +2716,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
goto free_wq;
}
- pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
pm_suspend_ignore_children(dev, true);
- ret = devm_add_action_or_reset(dev, anx7625_runtime_disable, dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
goto free_wq;
@@ -2771,7 +2779,6 @@ static void anx7625_i2c_remove(struct i2c_client *client)
if (platform->hdcp_workqueue) {
cancel_delayed_work(&platform->hdcp_work);
- flush_workqueue(platform->hdcp_workqueue);
destroy_workqueue(platform->hdcp_workqueue);
}
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
index 015983c015e5..c179b86d208f 100644
--- a/drivers/gpu/drm/bridge/aux-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -86,6 +86,7 @@ struct drm_aux_bridge_data {
};
static int drm_aux_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct drm_aux_bridge_data *data;
@@ -95,7 +96,7 @@ static int drm_aux_bridge_attach(struct drm_bridge *bridge,
data = container_of(bridge, struct drm_aux_bridge_data, bridge);
- return drm_bridge_attach(bridge->encoder, data->next_bridge, bridge,
+ return drm_bridge_attach(encoder, data->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
index 48f297c78ee6..b3f588b71a7d 100644
--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -156,6 +156,7 @@ void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status sta
EXPORT_SYMBOL_GPL(drm_aux_hpd_bridge_notify);
static int drm_aux_hpd_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index c7a0247e06ad..b022dd6e6b6e 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -425,6 +425,17 @@
#define DSI_NULL_FRAME_OVERHEAD 6
#define DSI_EOT_PKT_SIZE 4
+struct cdns_dsi_bridge_state {
+ struct drm_bridge_state base;
+ struct cdns_dsi_cfg dsi_cfg;
+};
+
+static inline struct cdns_dsi_bridge_state *
+to_cdns_dsi_bridge_state(struct drm_bridge_state *bridge_state)
+{
+ return container_of(bridge_state, struct cdns_dsi_bridge_state, base);
+}
+
static inline struct cdns_dsi *input_to_dsi(struct cdns_dsi_input *input)
{
return container_of(input, struct cdns_dsi, input);
@@ -568,15 +579,18 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
unsigned long dsi_hss_hsa_hse_hbp;
unsigned int nlanes = output->dev->lanes;
+ int mode_clock = (mode_valid_check ? mode->clock : mode->crtc_clock);
int ret;
ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check);
if (ret)
return ret;
- phy_mipi_dphy_get_default_config(mode->crtc_clock * 1000,
- mipi_dsi_pixel_format_to_bpp(output->dev->format),
- nlanes, phy_cfg);
+ ret = phy_mipi_dphy_get_default_config(mode_clock * 1000,
+ mipi_dsi_pixel_format_to_bpp(output->dev->format),
+ nlanes, phy_cfg);
+ if (ret)
+ return ret;
ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check);
if (ret)
@@ -605,6 +619,7 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
}
static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
@@ -617,7 +632,7 @@ static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
return -ENOTSUPP;
}
- return drm_bridge_attach(bridge->encoder, output->bridge, bridge,
+ return drm_bridge_attach(encoder, output->bridge, bridge,
flags);
}
@@ -655,7 +670,8 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
@@ -675,11 +691,17 @@ static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
pm_runtime_put(dsi->base.dev);
}
-static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
+ dsi->phy_initialized = false;
+ dsi->link_initialized = false;
+ phy_power_off(dsi->dphy);
+ phy_exit(dsi->dphy);
+
pm_runtime_put(dsi->base.dev);
}
@@ -752,32 +774,59 @@ static void cdns_dsi_init_link(struct cdns_dsi *dsi)
dsi->link_initialized = true;
}
-static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
struct cdns_dsi_output *output = &dsi->output;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct cdns_dsi_bridge_state *dsi_state;
+ struct drm_bridge_state *new_bridge_state;
struct drm_display_mode *mode;
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
+ struct drm_connector *connector;
unsigned long tx_byte_period;
struct cdns_dsi_cfg dsi_cfg;
- u32 tmp, reg_wakeup, div;
+ u32 tmp, reg_wakeup, div, status;
int nlanes;
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
return;
+ new_bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+ if (WARN_ON(!new_bridge_state))
+ return;
+
+ dsi_state = to_cdns_dsi_bridge_state(new_bridge_state);
+ dsi_cfg = dsi_state->dsi_cfg;
+
if (dsi->platform_ops && dsi->platform_ops->enable)
dsi->platform_ops->enable(dsi);
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ mode = &crtc_state->adjusted_mode;
nlanes = output->dev->lanes;
- WARN_ON_ONCE(cdns_dsi_check_conf(dsi, mode, &dsi_cfg, false));
-
cdns_dsi_hs_init(dsi);
cdns_dsi_init_link(dsi);
+ /*
+ * Now that the DSI Link and DSI Phy are initialized,
+ * wait for the CLK and Data Lanes to be ready.
+ */
+ tmp = CLK_LANE_RDY;
+ for (int i = 0; i < nlanes; i++)
+ tmp |= DATA_LANE_RDY(i);
+
+ if (readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status,
+ (tmp == (status & tmp)), 100, 500000))
+ dev_err(dsi->base.dev,
+ "Timed Out: DSI-DPhy Clock and Data Lanes not ready.\n");
+
writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa),
dsi->regs + VID_HSIZE1);
writel(HFP_LEN(dsi_cfg.hfp) | HACT_LEN(dsi_cfg.hact),
@@ -892,7 +941,8 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
writel(tmp, dsi->regs + MCTL_MAIN_EN);
}
-static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge)
+static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
@@ -904,13 +954,109 @@ static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge)
cdns_dsi_hs_init(dsi);
}
+static u32 *cdns_dsi_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+ struct cdns_dsi *dsi = input_to_dsi(input);
+ struct cdns_dsi_output *output = &dsi->output;
+ u32 *input_fmts;
+
+ *num_input_fmts = 0;
+
+ input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = drm_mipi_dsi_get_input_bus_fmt(output->dev->format);
+ if (!input_fmts[0])
+ return NULL;
+
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
+ struct cdns_dsi *dsi = input_to_dsi(input);
+ struct cdns_dsi_bridge_state *dsi_state = to_cdns_dsi_bridge_state(bridge_state);
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ struct cdns_dsi_cfg *dsi_cfg = &dsi_state->dsi_cfg;
+
+ return cdns_dsi_check_conf(dsi, mode, dsi_cfg, false);
+}
+
+static struct drm_bridge_state *
+cdns_dsi_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
+{
+ struct cdns_dsi_bridge_state *dsi_state, *old_dsi_state;
+ struct drm_bridge_state *bridge_state;
+
+ if (WARN_ON(!bridge->base.state))
+ return NULL;
+
+ bridge_state = drm_priv_to_bridge_state(bridge->base.state);
+ old_dsi_state = to_cdns_dsi_bridge_state(bridge_state);
+
+ dsi_state = kzalloc(sizeof(*dsi_state), GFP_KERNEL);
+ if (!dsi_state)
+ return NULL;
+
+ __drm_atomic_helper_bridge_duplicate_state(bridge, &dsi_state->base);
+
+ memcpy(&dsi_state->dsi_cfg, &old_dsi_state->dsi_cfg,
+ sizeof(dsi_state->dsi_cfg));
+
+ return &dsi_state->base;
+}
+
+static void
+cdns_dsi_bridge_atomic_destroy_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ struct cdns_dsi_bridge_state *dsi_state;
+
+ dsi_state = to_cdns_dsi_bridge_state(state);
+
+ kfree(dsi_state);
+}
+
+static struct drm_bridge_state *
+cdns_dsi_bridge_atomic_reset(struct drm_bridge *bridge)
+{
+ struct cdns_dsi_bridge_state *dsi_state;
+
+ dsi_state = kzalloc(sizeof(*dsi_state), GFP_KERNEL);
+ if (!dsi_state)
+ return NULL;
+
+ memset(dsi_state, 0, sizeof(*dsi_state));
+ dsi_state->base.bridge = bridge;
+
+ return &dsi_state->base;
+}
+
static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = {
.attach = cdns_dsi_bridge_attach,
.mode_valid = cdns_dsi_bridge_mode_valid,
- .disable = cdns_dsi_bridge_disable,
- .pre_enable = cdns_dsi_bridge_pre_enable,
- .enable = cdns_dsi_bridge_enable,
- .post_disable = cdns_dsi_bridge_post_disable,
+ .atomic_disable = cdns_dsi_bridge_atomic_disable,
+ .atomic_pre_enable = cdns_dsi_bridge_atomic_pre_enable,
+ .atomic_enable = cdns_dsi_bridge_atomic_enable,
+ .atomic_post_disable = cdns_dsi_bridge_atomic_post_disable,
+ .atomic_check = cdns_dsi_bridge_atomic_check,
+ .atomic_reset = cdns_dsi_bridge_atomic_reset,
+ .atomic_duplicate_state = cdns_dsi_bridge_atomic_duplicate_state,
+ .atomic_destroy_state = cdns_dsi_bridge_atomic_destroy_state,
+ .atomic_get_input_bus_fmts = cdns_dsi_bridge_get_input_bus_fmts,
};
static int cdns_dsi_attach(struct mipi_dsi_host *host,
@@ -920,8 +1066,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
struct cdns_dsi_output *output = &dsi->output;
struct cdns_dsi_input *input = &dsi->input;
struct drm_bridge *bridge;
- struct drm_panel *panel;
- struct device_node *np;
int ret;
/*
@@ -939,26 +1083,10 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
/*
* The host <-> device link might be described using an OF-graph
* representation, in this case we extract the device of_node from
- * this representation, otherwise we use dsidev->dev.of_node which
- * should have been filled by the core.
+ * this representation.
*/
- np = of_graph_get_remote_node(dsi->base.dev->of_node, DSI_OUTPUT_PORT,
- dev->channel);
- if (!np)
- np = of_node_get(dev->dev.of_node);
-
- panel = of_drm_find_panel(np);
- if (!IS_ERR(panel)) {
- bridge = drm_panel_bridge_add_typed(panel,
- DRM_MODE_CONNECTOR_DSI);
- } else {
- bridge = of_drm_find_bridge(dev->dev.of_node);
- if (!bridge)
- bridge = ERR_PTR(-EINVAL);
- }
-
- of_node_put(np);
-
+ bridge = devm_drm_of_get_bridge(dsi->base.dev, dsi->base.dev->of_node,
+ DSI_OUTPUT_PORT, dev->channel);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
dev_err(host->dev, "failed to add DSI device %s (err = %d)",
@@ -968,7 +1096,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
output->dev = dev;
output->bridge = bridge;
- output->panel = panel;
/*
* The DSI output has been properly configured, we can now safely
@@ -984,12 +1111,9 @@ static int cdns_dsi_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *dev)
{
struct cdns_dsi *dsi = to_cdns_dsi(host);
- struct cdns_dsi_output *output = &dsi->output;
struct cdns_dsi_input *input = &dsi->input;
drm_bridge_remove(&input->bridge);
- if (output->panel)
- drm_panel_bridge_remove(output->bridge);
return 0;
}
@@ -1152,7 +1276,6 @@ static int __maybe_unused cdns_dsi_suspend(struct device *dev)
clk_disable_unprepare(dsi->dsi_sys_clk);
clk_disable_unprepare(dsi->dsi_p_clk);
reset_control_assert(dsi->dsi_p_rst);
- dsi->link_initialized = false;
return 0;
}
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
index ca7ea2da635c..5db5dbbbcaad 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
@@ -10,7 +10,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
#include <linux/bits.h>
#include <linux/completion.h>
@@ -21,7 +20,6 @@ struct reset_control;
struct cdns_dsi_output {
struct mipi_dsi_device *dev;
- struct drm_panel *panel;
struct drm_bridge *bridge;
union phy_configure_opts phy_opts;
};
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 81fad14c2cd5..b431e7efd1f0 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -546,76 +546,6 @@ out:
}
/**
- * cdns_mhdp_link_power_up() - power up a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-static
-int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must exit the
- * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
- * Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
-
- return 0;
-}
-
-/**
- * cdns_mhdp_link_power_down() - power down a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-static
-int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
- struct cdns_mhdp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-/**
* cdns_mhdp_link_configure() - configure a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
@@ -1453,7 +1383,7 @@ static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
- cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
+ drm_dp_link_power_up(&mhdp->aux, mhdp->link.revision);
cdns_mhdp_fill_sink_caps(mhdp, dpcd);
@@ -1500,7 +1430,7 @@ static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
if (mhdp->plugged)
- cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
+ drm_dp_link_power_down(&mhdp->aux, mhdp->link.revision);
mhdp->link_up = false;
}
@@ -1726,6 +1656,7 @@ static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
}
static int cdns_mhdp_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
@@ -2305,7 +2236,7 @@ static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
* If everything looks fine, just return, as we don't handle
* DP IRQs.
*/
- if (ret > 0 &&
+ if (!ret &&
drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
goto out;
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 81f7c701961f..634c5b030667 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -580,11 +580,13 @@ static int chipone_dsi_host_attach(struct chipone *icn)
return ret;
}
-static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
+static int chipone_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
struct chipone *icn = bridge_to_chipone(bridge);
- return drm_bridge_attach(bridge->encoder, icn->panel_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, icn->panel_bridge, bridge, flags);
}
#define MAX_INPUT_SEL_FORMATS 1
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index da17f0978a79..210c45c1efd4 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -268,13 +268,14 @@ static void ch7033_hpd_event(void *arg, enum drm_connector_status status)
}
static int ch7033_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
struct drm_connector *connector = &priv->connector;
int ret;
- ret = drm_bridge_attach(bridge->encoder, priv->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, priv->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
return ret;
@@ -305,7 +306,7 @@ static int ch7033_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- return drm_connector_attach_encoder(&priv->connector, bridge->encoder);
+ return drm_connector_attach_encoder(&priv->connector, encoder);
}
static void ch7033_bridge_detach(struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 72bc508d4e6e..badd2c7f91a1 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -34,6 +34,7 @@ to_display_connector(struct drm_bridge *bridge)
}
static int display_connector_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
@@ -209,9 +210,10 @@ static int display_connector_probe(struct platform_device *pdev)
const char *label = NULL;
int ret;
- conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL);
- if (!conn)
- return -ENOMEM;
+ conn = devm_drm_bridge_alloc(&pdev->dev, struct display_connector, bridge,
+ &display_connector_bridge_funcs);
+ if (IS_ERR(conn))
+ return PTR_ERR(conn);
platform_set_drvdata(pdev, conn);
@@ -361,7 +363,6 @@ static int display_connector_probe(struct platform_device *pdev)
}
}
- conn->bridge.funcs = &display_connector_bridge_funcs;
conn->bridge.of_node = pdev->dev.of_node;
if (conn->bridge.ddc)
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index 26ae1ab5237f..2cb6dfc7a6d3 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -113,11 +113,12 @@ static unsigned long fsl_ldb_link_frequency(struct fsl_ldb *fsl_ldb, int clock)
}
static int fsl_ldb_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
- return drm_bridge_attach(bridge->encoder, fsl_ldb->panel_bridge,
+ return drm_bridge_attach(encoder, fsl_ldb->panel_bridge,
bridge, flags);
}
@@ -180,9 +181,9 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
configured_link_freq = clk_get_rate(fsl_ldb->clk);
if (configured_link_freq != requested_link_freq)
- dev_warn(fsl_ldb->dev, "Configured LDB clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n",
- configured_link_freq,
- requested_link_freq);
+ dev_warn(fsl_ldb->dev,
+ "Configured %pC clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n",
+ fsl_ldb->clk, configured_link_freq, requested_link_freq);
clk_prepare_enable(fsl_ldb->clk);
diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
index 9b5bebbe357d..6149ba141a38 100644
--- a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
+++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
@@ -104,7 +104,7 @@ void ldb_bridge_disable_helper(struct drm_bridge *bridge)
}
EXPORT_SYMBOL_GPL(ldb_bridge_disable_helper);
-int ldb_bridge_attach_helper(struct drm_bridge *bridge,
+int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ldb_channel *ldb_ch = bridge->driver_private;
@@ -116,9 +116,8 @@ int ldb_bridge_attach_helper(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
- ldb_ch->next_bridge, bridge,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ return drm_bridge_attach(encoder, ldb_ch->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
EXPORT_SYMBOL_GPL(ldb_bridge_attach_helper);
@@ -191,8 +190,7 @@ int ldb_find_next_bridge_helper(struct ldb *ldb)
}
EXPORT_SYMBOL_GPL(ldb_find_next_bridge_helper);
-void ldb_add_bridge_helper(struct ldb *ldb,
- const struct drm_bridge_funcs *bridge_funcs)
+void ldb_add_bridge_helper(struct ldb *ldb)
{
struct ldb_channel *ldb_ch;
int i;
@@ -204,7 +202,6 @@ void ldb_add_bridge_helper(struct ldb *ldb,
continue;
ldb_ch->bridge.driver_private = ldb_ch;
- ldb_ch->bridge.funcs = bridge_funcs;
ldb_ch->bridge.of_node = ldb_ch->np;
drm_bridge_add(&ldb_ch->bridge);
diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
index a0a5cde27fbc..de187e326999 100644
--- a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
+++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h
@@ -81,15 +81,14 @@ void ldb_bridge_enable_helper(struct drm_bridge *bridge);
void ldb_bridge_disable_helper(struct drm_bridge *bridge);
-int ldb_bridge_attach_helper(struct drm_bridge *bridge,
+int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags);
int ldb_init_helper(struct ldb *ldb);
int ldb_find_next_bridge_helper(struct ldb *ldb);
-void ldb_add_bridge_helper(struct ldb *ldb,
- const struct drm_bridge_funcs *bridge_funcs);
+void ldb_add_bridge_helper(struct ldb *ldb);
void ldb_remove_bridge_helper(struct ldb *ldb);
diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
index 3ebf0b9866de..f072c6ed39ef 100644
--- a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
+++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
@@ -23,7 +23,8 @@ struct imx_legacy_bridge {
#define to_imx_legacy_bridge(bridge) container_of(bridge, struct imx_legacy_bridge, base)
static int imx_legacy_bridge_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
@@ -76,9 +77,9 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
imx_bridge->base.ops = DRM_BRIDGE_OP_MODES;
imx_bridge->base.type = type;
- ret = devm_drm_bridge_add(dev, &imx_bridge->base);
- if (ret)
- return ERR_PTR(ret);
+ ret = devm_drm_bridge_add(dev, &imx_bridge->base);
+ if (ret)
+ return ERR_PTR(ret);
return &imx_bridge->base;
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
index a17433a7c755..8a4fd7d77a8d 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
@@ -40,11 +40,12 @@ to_imx8mp_hdmi_pvi(struct drm_bridge *bridge)
}
static int imx8mp_hdmi_pvi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge);
- return drm_bridge_attach(bridge->encoder, pvi->next_bridge,
+ return drm_bridge_attach(encoder, pvi->next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
index 524aac751359..47aa65938e6a 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
@@ -47,7 +47,7 @@ struct imx8qm_ldb_channel {
struct imx8qm_ldb {
struct ldb base;
struct device *dev;
- struct imx8qm_ldb_channel channel[MAX_LDB_CHAN_NUM];
+ struct imx8qm_ldb_channel *channel[MAX_LDB_CHAN_NUM];
struct clk *clk_pixel;
struct clk *clk_bypass;
int active_chno;
@@ -107,7 +107,7 @@ static int imx8qm_ldb_bridge_atomic_check(struct drm_bridge *bridge,
if (is_split) {
imx8qm_ldb_ch =
- &imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
+ imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true,
phy_cfg);
ret = phy_validate(imx8qm_ldb_ch->phy, PHY_MODE_LVDS, 0, &opts);
@@ -158,7 +158,7 @@ imx8qm_ldb_bridge_mode_set(struct drm_bridge *bridge,
if (is_split) {
imx8qm_ldb_ch =
- &imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
+ imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1];
imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true,
phy_cfg);
ret = phy_configure(imx8qm_ldb_ch->phy, &opts);
@@ -226,13 +226,13 @@ static void imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge,
}
if (is_split) {
- ret = phy_power_on(imx8qm_ldb->channel[0].phy);
+ ret = phy_power_on(imx8qm_ldb->channel[0]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power on channel0 PHY: %d\n",
ret);
- ret = phy_power_on(imx8qm_ldb->channel[1].phy);
+ ret = phy_power_on(imx8qm_ldb->channel[1]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power on channel1 PHY: %d\n",
@@ -261,12 +261,12 @@ static void imx8qm_ldb_bridge_atomic_disable(struct drm_bridge *bridge,
ldb_bridge_disable_helper(bridge);
if (is_split) {
- ret = phy_power_off(imx8qm_ldb->channel[0].phy);
+ ret = phy_power_off(imx8qm_ldb->channel[0]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power off channel0 PHY: %d\n",
ret);
- ret = phy_power_off(imx8qm_ldb->channel[1].phy);
+ ret = phy_power_off(imx8qm_ldb->channel[1]->phy);
if (ret)
DRM_DEV_ERROR(dev,
"failed to power off channel1 PHY: %d\n",
@@ -412,7 +412,7 @@ static int imx8qm_ldb_get_phy(struct imx8qm_ldb *imx8qm_ldb)
int i, ret;
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
- imx8qm_ldb_ch = &imx8qm_ldb->channel[i];
+ imx8qm_ldb_ch = imx8qm_ldb->channel[i];
ldb_ch = &imx8qm_ldb_ch->base;
if (!ldb_ch->is_available)
@@ -448,6 +448,14 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
if (!imx8qm_ldb)
return -ENOMEM;
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
+ imx8qm_ldb->channel[i] =
+ devm_drm_bridge_alloc(dev, struct imx8qm_ldb_channel, base.bridge,
+ &imx8qm_ldb_bridge_funcs);
+ if (IS_ERR(imx8qm_ldb->channel[i]))
+ return PTR_ERR(imx8qm_ldb->channel[i]);
+ }
+
imx8qm_ldb->clk_pixel = devm_clk_get(dev, "pixel");
if (IS_ERR(imx8qm_ldb->clk_pixel)) {
ret = PTR_ERR(imx8qm_ldb->clk_pixel);
@@ -473,7 +481,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
ldb->ctrl_reg = 0xe0;
for (i = 0; i < MAX_LDB_CHAN_NUM; i++)
- ldb->channel[i] = &imx8qm_ldb->channel[i].base;
+ ldb->channel[i] = &imx8qm_ldb->channel[i]->base;
ret = ldb_init_helper(ldb);
if (ret)
@@ -499,12 +507,12 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
}
imx8qm_ldb->active_chno = 0;
- imx8qm_ldb_ch = &imx8qm_ldb->channel[0];
+ imx8qm_ldb_ch = imx8qm_ldb->channel[0];
ldb_ch = &imx8qm_ldb_ch->base;
ldb_ch->link_type = pixel_order;
} else {
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
- imx8qm_ldb_ch = &imx8qm_ldb->channel[i];
+ imx8qm_ldb_ch = imx8qm_ldb->channel[i];
ldb_ch = &imx8qm_ldb_ch->base;
if (ldb_ch->is_available) {
@@ -525,7 +533,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imx8qm_ldb);
pm_runtime_enable(dev);
- ldb_add_bridge_helper(ldb, &imx8qm_ldb_bridge_funcs);
+ ldb_add_bridge_helper(ldb);
return ret;
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
index 3cb484773ddf..5d272916e200 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
@@ -44,7 +44,7 @@ struct imx8qxp_ldb_channel {
struct imx8qxp_ldb {
struct ldb base;
struct device *dev;
- struct imx8qxp_ldb_channel channel[MAX_LDB_CHAN_NUM];
+ struct imx8qxp_ldb_channel *channel[MAX_LDB_CHAN_NUM];
struct clk *clk_pixel;
struct clk *clk_bypass;
struct drm_bridge *companion;
@@ -410,7 +410,7 @@ static const struct drm_bridge_funcs imx8qxp_ldb_bridge_funcs = {
static int imx8qxp_ldb_set_di_id(struct imx8qxp_ldb *imx8qxp_ldb)
{
struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
- &imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
+ imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base;
struct device_node *ep, *remote;
struct device *dev = imx8qxp_ldb->dev;
@@ -456,7 +456,7 @@ imx8qxp_ldb_check_chno_and_dual_link(struct ldb_channel *ldb_ch, int link)
static int imx8qxp_ldb_parse_dt_companion(struct imx8qxp_ldb *imx8qxp_ldb)
{
struct imx8qxp_ldb_channel *imx8qxp_ldb_ch =
- &imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
+ imx8qxp_ldb->channel[imx8qxp_ldb->active_chno];
struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base;
struct ldb_channel *companion_ldb_ch;
struct device_node *companion;
@@ -586,6 +586,14 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
if (!imx8qxp_ldb)
return -ENOMEM;
+ for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
+ imx8qxp_ldb->channel[i] =
+ devm_drm_bridge_alloc(dev, struct imx8qxp_ldb_channel, base.bridge,
+ &imx8qxp_ldb_bridge_funcs);
+ if (IS_ERR(imx8qxp_ldb->channel[i]))
+ return PTR_ERR(imx8qxp_ldb->channel[i]);
+ }
+
imx8qxp_ldb->clk_pixel = devm_clk_get(dev, "pixel");
if (IS_ERR(imx8qxp_ldb->clk_pixel)) {
ret = PTR_ERR(imx8qxp_ldb->clk_pixel);
@@ -611,7 +619,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
ldb->ctrl_reg = 0xe0;
for (i = 0; i < MAX_LDB_CHAN_NUM; i++)
- ldb->channel[i] = &imx8qxp_ldb->channel[i].base;
+ ldb->channel[i] = &imx8qxp_ldb->channel[i]->base;
ret = ldb_init_helper(ldb);
if (ret)
@@ -627,7 +635,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
}
for (i = 0; i < MAX_LDB_CHAN_NUM; i++) {
- imx8qxp_ldb_ch = &imx8qxp_ldb->channel[i];
+ imx8qxp_ldb_ch = imx8qxp_ldb->channel[i];
ldb_ch = &imx8qxp_ldb_ch->base;
if (ldb_ch->is_available) {
@@ -660,9 +668,9 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imx8qxp_ldb);
pm_runtime_enable(dev);
- ldb_add_bridge_helper(ldb, &imx8qxp_ldb_bridge_funcs);
+ ldb_add_bridge_helper(ldb);
- return ret;
+ return 0;
}
static void imx8qxp_ldb_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
index 1d9529dc7f2a..1f6fd488e703 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
@@ -108,6 +108,7 @@ imx8qxp_pc_bridge_mode_valid(struct drm_bridge *bridge,
}
static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pc_channel *ch = bridge->driver_private;
@@ -119,7 +120,7 @@ static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
ch->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
index cd6818db0fd3..e092c9ea99b0 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
@@ -128,6 +128,7 @@ static void imx8qxp_pixel_link_set_mst_addr(struct imx8qxp_pixel_link *pl)
}
static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pixel_link *pl = bridge->driver_private;
@@ -138,7 +139,7 @@ static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
pl->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
index 49dd4f96d52c..da138ab51b3b 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
@@ -48,6 +48,7 @@ struct imx8qxp_pxl2dpi {
#define bridge_to_p2d(b) container_of(b, struct imx8qxp_pxl2dpi, bridge)
static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx8qxp_pxl2dpi *p2d = bridge->driver_private;
@@ -58,7 +59,7 @@ static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
- return drm_bridge_attach(bridge->encoder,
+ return drm_bridge_attach(encoder,
p2d->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
}
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index 21152a1c28f7..a3a63a977b0a 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -665,13 +665,14 @@ it6263_bridge_mode_valid(struct drm_bridge *bridge,
}
static int it6263_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it6263 *it = bridge_to_it6263(bridge);
struct drm_connector *connector;
int ret;
- ret = drm_bridge_attach(bridge->encoder, it->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, it->next_bridge, bridge,
flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -679,7 +680,7 @@ static int it6263_bridge_attach(struct drm_bridge *bridge,
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
return 0;
- connector = drm_bridge_connector_init(bridge->dev, bridge->encoder);
+ connector = drm_bridge_connector_init(bridge->dev, encoder);
if (IS_ERR(connector)) {
ret = PTR_ERR(connector);
dev_err(it->dev, "failed to initialize bridge connector: %d\n",
@@ -687,7 +688,7 @@ static int it6263_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- drm_connector_attach_encoder(connector, bridge->encoder);
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 8a607558ac89..1383d1e21afe 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -771,40 +771,6 @@ static void it6505_calc_video_info(struct it6505 *it6505)
DRM_MODE_ARG(&it6505->video_info));
}
-static int it6505_drm_dp_link_set_power(struct drm_dp_aux *aux,
- struct it6505_drm_dp_link *link,
- u8 mode)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < DPCD_V_1_1)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= mode;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- if (mode == DP_SET_POWER_D0) {
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
-
- return 0;
-}
-
static void it6505_clear_int(struct it6505 *it6505)
{
it6505_write(it6505, INT_STATUS_01, 0xFF);
@@ -2578,8 +2544,7 @@ static void it6505_irq_hpd(struct it6505 *it6505)
}
it6505->auto_train_retry = AUTO_TRAIN_RETRY;
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
@@ -2910,8 +2875,7 @@ static enum drm_connector_status it6505_detect(struct it6505 *it6505)
}
if (it6505->hpd_state) {
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT);
it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count);
DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d branch:%d",
@@ -3124,6 +3088,7 @@ static inline struct it6505 *bridge_to_it6505(struct drm_bridge *bridge)
}
static int it6505_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
@@ -3233,8 +3198,7 @@ static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
it6505_int_mask_enable(it6505);
it6505_video_reset(it6505);
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D0);
+ drm_dp_link_power_up(&it6505->aux, it6505->link.revision);
}
static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
@@ -3246,8 +3210,7 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
DRM_DEV_DEBUG_DRIVER(dev, "start");
if (it6505->powered) {
- it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
- DP_SET_POWER_D3);
+ drm_dp_link_power_down(&it6505->aux, it6505->link.revision);
it6505_video_disable(it6505);
}
}
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index b9f90f32145d..7b110ae53291 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -586,6 +586,7 @@ static bool it66121_is_hpd_detect(struct it66121_ctx *ctx)
}
static int it66121_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
@@ -594,7 +595,7 @@ static int it66121_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- ret = drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
+ ret = drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 52da204f5740..3e49d855b364 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -543,12 +543,13 @@ exit:
}
static int lt8912_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt8912 *lt = bridge_to_lt8912(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, lt->hdmi_port, bridge,
+ ret = drm_bridge_attach(encoder, lt->hdmi_port, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0) {
dev_err(lt->dev, "Failed to attach next bridge (%d)\n", ret);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 0fc5ea18fe6a..9b2dac9bd63c 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -99,11 +99,12 @@ static struct lt9211 *bridge_to_lt9211(struct drm_bridge *bridge)
}
static int lt9211_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9211 *ctx = bridge_to_lt9211(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
&ctx->bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 026803034231..a35a8b8ca89c 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -740,11 +740,12 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
}
static int lt9611_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- return drm_bridge_attach(bridge->encoder, lt9611->next_bridge,
+ return drm_bridge_attach(encoder, lt9611->next_bridge,
bridge, flags);
}
@@ -1130,7 +1131,7 @@ static int lt9611_probe(struct i2c_client *client)
lt9611->bridge.of_node = client->dev.of_node;
lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES |
- DRM_BRIDGE_OP_HDMI;
+ DRM_BRIDGE_OP_HDMI | DRM_BRIDGE_OP_HDMI_AUDIO;
lt9611->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
lt9611->bridge.vendor = "Lontium";
lt9611->bridge.product = "LT9611";
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index f4c3ff1fdc69..766da2cb45a7 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -280,11 +280,12 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
}
static int lt9611uxc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
- return drm_bridge_attach(bridge->encoder, lt9611uxc->next_bridge,
+ return drm_bridge_attach(encoder, lt9611uxc->next_bridge,
bridge, flags);
}
@@ -774,9 +775,9 @@ static int lt9611uxc_probe(struct i2c_client *client)
return -ENODEV;
}
- lt9611uxc = devm_kzalloc(dev, sizeof(*lt9611uxc), GFP_KERNEL);
- if (!lt9611uxc)
- return -ENOMEM;
+ lt9611uxc = devm_drm_bridge_alloc(dev, struct lt9611uxc, bridge, &lt9611uxc_bridge_funcs);
+ if (IS_ERR(lt9611uxc))
+ return PTR_ERR(lt9611uxc);
lt9611uxc->dev = dev;
lt9611uxc->client = client;
@@ -855,7 +856,6 @@ retry:
i2c_set_clientdata(client, lt9611uxc);
- lt9611uxc->bridge.funcs = &lt9611uxc_bridge_funcs;
lt9611uxc->bridge.of_node = client->dev.of_node;
lt9611uxc->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
if (lt9611uxc->hpd_supported)
@@ -880,7 +880,11 @@ retry:
}
}
- return lt9611uxc_audio_init(dev, lt9611uxc);
+ ret = lt9611uxc_audio_init(dev, lt9611uxc);
+ if (ret)
+ goto err_remove_bridge;
+
+ return 0;
err_remove_bridge:
free_irq(client->irq, lt9611uxc);
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 389af0233fcd..1646e454e0b0 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -34,11 +34,12 @@ static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge)
}
static int lvds_codec_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
- return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge,
+ return drm_bridge_attach(encoder, lvds_codec->panel_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index a47aabf134fd..15a5a1f644fc 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -190,6 +190,7 @@ static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id)
}
static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct i2c_client *stdp4028_i2c
diff --git a/drivers/gpu/drm/bridge/microchip-lvds.c b/drivers/gpu/drm/bridge/microchip-lvds.c
index 53dd140a1b8d..1d4ae0097df8 100644
--- a/drivers/gpu/drm/bridge/microchip-lvds.c
+++ b/drivers/gpu/drm/bridge/microchip-lvds.c
@@ -104,11 +104,12 @@ static void lvds_serialiser_on(struct mchp_lvds *lvds)
}
static int mchp_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mchp_lvds *lvds = bridge_to_lvds(bridge);
- return drm_bridge_attach(bridge->encoder, lvds->panel_bridge,
+ return drm_bridge_attach(encoder, lvds->panel_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index d04c62a0cb9f..55912ae11f46 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -910,6 +910,7 @@ static void nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
}
static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
@@ -919,7 +920,7 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);
- return drm_bridge_attach(bridge->encoder, panel_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, panel_bridge, bridge, flags);
}
static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 27261b2ac9c8..25d7c415478b 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -214,13 +214,14 @@ static const struct drm_connector_funcs ptn3460_connector_funcs = {
};
static int ptn3460_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
/* Let this driver create connector if requested */
- ret = drm_bridge_attach(bridge->encoder, ptn_bridge->panel_bridge,
+ ret = drm_bridge_attach(encoder, ptn_bridge->panel_bridge,
bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -239,7 +240,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge,
&ptn3460_connector_helper_funcs);
drm_connector_register(&ptn_bridge->connector);
drm_connector_attach_encoder(&ptn_bridge->connector,
- bridge->encoder);
+ encoder);
drm_helper_hpd_irq_event(ptn_bridge->connector.dev);
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 258c85c83a28..79b009ab9396 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -58,6 +58,7 @@ static const struct drm_connector_funcs panel_bridge_connector_funcs = {
};
static int panel_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
@@ -81,7 +82,7 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
drm_panel_bridge_set_orientation(connector, bridge);
drm_connector_attach_encoder(&panel_bridge->connector,
- bridge->encoder);
+ encoder);
if (bridge->dev->registered) {
if (connector->funcs->reset)
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 13ada42a5514..8726fefc5c65 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -418,6 +418,7 @@ static void ps8622_post_disable(struct drm_bridge *bridge)
}
static int ps8622_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index a42138b33258..2422ff68c104 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -494,6 +494,7 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
}
static int ps8640_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
@@ -518,7 +519,7 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
}
/* Attach the panel-bridge to the dsi bridge */
- ret = drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
+ ret = drm_bridge_attach(encoder, ps_bridge->panel_bridge,
&ps_bridge->bridge, flags);
if (ret)
goto err_bridge_attach;
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index 54de6ed2fae8..0014c497e3fe 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -1640,11 +1640,12 @@ static void samsung_dsim_mode_set(struct drm_bridge *bridge,
}
static int samsung_dsim_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->out_bridge, bridge,
flags);
}
@@ -1935,9 +1936,9 @@ int samsung_dsim_probe(struct platform_device *pdev)
struct samsung_dsim *dsi;
int ret, i;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct samsung_dsim, bridge, &samsung_dsim_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
init_completion(&dsi->completed);
spin_lock_init(&dsi->transfer_lock);
@@ -2007,7 +2008,6 @@ int samsung_dsim_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- dsi->bridge.funcs = &samsung_dsim_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 914a2609a685..6de61d9fe064 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -416,6 +416,7 @@ out:
}
static int sii902x_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
@@ -424,7 +425,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
int ret;
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
- return drm_bridge_attach(bridge->encoder, sii902x->next_bridge,
+ return drm_bridge_attach(encoder, sii902x->next_bridge,
bridge, flags);
drm_connector_helper_add(&sii902x->connector,
@@ -452,7 +453,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge,
if (ret)
return ret;
- drm_connector_attach_encoder(&sii902x->connector, bridge->encoder);
+ drm_connector_attach_encoder(&sii902x->connector, encoder);
return 0;
}
@@ -1138,6 +1139,7 @@ static int sii902x_init(struct sii902x *sii902x)
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
sii902x->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+ sii902x->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
if (sii902x->i2c->irq > 0)
sii902x->bridge.ops |= DRM_BRIDGE_OP_HPD;
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 28a2e1ee04b2..3af650dc92a1 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2203,6 +2203,7 @@ static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
}
static int sii8620_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index ab0b0e36e97a..70db5b99e5bb 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -103,12 +103,13 @@ static const struct drm_connector_funcs simple_bridge_con_funcs = {
};
static int simple_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, sbridge->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, sbridge->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -127,7 +128,7 @@ static int simple_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- drm_connector_attach_encoder(&sbridge->connector, bridge->encoder);
+ drm_connector_attach_encoder(&sbridge->connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index 6166f197e37b..5e5f8c2f95be 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -1077,6 +1077,7 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HDMI_AUDIO |
DRM_BRIDGE_OP_HPD;
hdmi->bridge.of_node = pdev->dev.of_node;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 0890add5f707..8791408dd1ff 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -22,8 +22,8 @@
#include <media/cec-notifier.h>
-#include <uapi/linux/media-bus-format.h>
-#include <uapi/linux/videodev2.h>
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
#include <drm/bridge/dw_hdmi.h>
#include <drm/display/drm_hdmi_helper.h>
@@ -2889,12 +2889,13 @@ static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge,
}
static int dw_hdmi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_hdmi *hdmi = bridge->driver_private;
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
- return drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
+ return drm_bridge_attach(encoder, hdmi->next_bridge,
bridge, flags);
return dw_hdmi_connector_create(hdmi);
@@ -3332,9 +3333,9 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
u8 config0;
u8 config3;
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return ERR_PTR(-ENOMEM);
+ hdmi = devm_drm_bridge_alloc(dev, struct dw_hdmi, bridge, &dw_hdmi_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return hdmi;
hdmi->plat_data = plat_data;
hdmi->dev = dev;
@@ -3494,7 +3495,6 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
}
hdmi->bridge.driver_private = hdmi;
- hdmi->bridge.funcs = &dw_hdmi_bridge_funcs;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
hdmi->bridge.interlace_allowed = true;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 2b6e70a49f43..b08ada920a50 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -1072,15 +1072,16 @@ dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+ encoder->encoder_type = DRM_MODE_ENCODER_DSI;
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->panel_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
index 5fd7a459efdd..c76f5f2e74d1 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
@@ -870,15 +870,16 @@ dw_mipi_dsi2_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dw_mipi_dsi2_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+ encoder->encoder_type = DRM_MODE_ENCODER_DSI;
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi2->panel_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi2->panel_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index 49c76027f831..edf01476f2ef 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -202,11 +202,12 @@ static void tc358762_enable(struct drm_bridge *bridge,
}
static int tc358762_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358762 *ctx = bridge_to_tc358762(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 3d3d135b4348..3f76c890fad9 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -295,11 +295,12 @@ static void tc358764_pre_enable(struct drm_bridge *bridge)
}
static int tc358764_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs tc358764_bridge_funcs = {
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 39e2d3a7a27d..7e5449fb86a3 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1795,6 +1795,7 @@ static const struct drm_connector_funcs tc_connector_funcs = {
};
static int tc_dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc_data *tc = bridge_to_tc(bridge);
@@ -1807,6 +1808,7 @@ static int tc_dpi_bridge_attach(struct drm_bridge *bridge,
}
static int tc_edp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index ec79b0dd0e2c..063f217a17b6 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -554,6 +554,7 @@ static const struct mipi_dsi_host_ops tc358768_dsi_host_ops = {
};
static int tc358768_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
@@ -563,7 +564,7 @@ static int tc358768_bridge_attach(struct drm_bridge *bridge,
return -ENOTSUPP;
}
- return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge,
+ return drm_bridge_attach(encoder, priv->output.bridge, bridge,
flags);
}
@@ -580,7 +581,8 @@ tc358768_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void tc358768_bridge_disable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
int ret;
@@ -602,7 +604,8 @@ static void tc358768_bridge_disable(struct drm_bridge *bridge)
dev_warn(priv->dev, "Software disable failed: %d\n", ret);
}
-static void tc358768_bridge_post_disable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
@@ -682,13 +685,17 @@ static u32 tc358768_dsi_bytes_to_ns(struct tc358768_priv *priv, u32 val)
return (u32)div_u64(m, n);
}
-static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
struct mipi_dsi_device *dsi_dev = priv->output.dev;
unsigned long mode_flags = dsi_dev->mode_flags;
u32 val, val2, lptxcnt, hact, data_type;
s32 raw_val;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
const struct drm_display_mode *mode;
u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
u32 dsiclk, hsbyteclk;
@@ -719,7 +726,10 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
return;
}
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ mode = &crtc_state->adjusted_mode;
ret = tc358768_setup_pll(priv, mode);
if (ret) {
dev_err(dev, "PLL setup failed: %d\n", ret);
@@ -1076,14 +1086,12 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
tc358768_write(priv, TC358768_DSI_CONFW, val);
ret = tc358768_clear_error(priv);
- if (ret) {
+ if (ret)
dev_err(dev, "Bridge pre_enable failed: %d\n", ret);
- tc358768_bridge_disable(bridge);
- tc358768_bridge_post_disable(bridge);
- }
}
-static void tc358768_bridge_enable(struct drm_bridge *bridge)
+static void tc358768_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc358768_priv *priv = bridge_to_tc358768(bridge);
int ret;
@@ -1100,11 +1108,8 @@ static void tc358768_bridge_enable(struct drm_bridge *bridge)
tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), BIT(6));
ret = tc358768_clear_error(priv);
- if (ret) {
+ if (ret)
dev_err(priv->dev, "Bridge enable failed: %d\n", ret);
- tc358768_bridge_disable(bridge);
- tc358768_bridge_post_disable(bridge);
- }
}
#define MAX_INPUT_SEL_FORMATS 1
@@ -1166,10 +1171,10 @@ static const struct drm_bridge_funcs tc358768_bridge_funcs = {
.attach = tc358768_bridge_attach,
.mode_valid = tc358768_bridge_mode_valid,
.mode_fixup = tc358768_mode_fixup,
- .pre_enable = tc358768_bridge_pre_enable,
- .enable = tc358768_bridge_enable,
- .disable = tc358768_bridge_disable,
- .post_disable = tc358768_bridge_post_disable,
+ .atomic_pre_enable = tc358768_bridge_atomic_pre_enable,
+ .atomic_enable = tc358768_bridge_atomic_enable,
+ .atomic_disable = tc358768_bridge_atomic_disable,
+ .atomic_post_disable = tc358768_bridge_atomic_post_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index c89757bec4e6..1b10e6ee1724 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -286,7 +286,8 @@ static inline struct tc_data *bridge_to_tc(struct drm_bridge *b)
return container_of(b, struct tc_data, bridge);
}
-static void tc_bridge_pre_enable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
struct device *dev = &tc->dsi->dev;
@@ -309,7 +310,8 @@ static void tc_bridge_pre_enable(struct drm_bridge *bridge)
usleep_range(10, 20);
}
-static void tc_bridge_post_disable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
struct device *dev = &tc->dsi->dev;
@@ -368,30 +370,21 @@ static void d2l_write(struct i2c_client *i2c, u16 addr, u32 val)
ret, addr);
}
-/* helper function to access bus_formats */
-static struct drm_connector *get_connector(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_connector *connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->encoder == encoder)
- return connector;
-
- return NULL;
-}
-
-static void tc_bridge_enable(struct drm_bridge *bridge)
+static void tc_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct tc_data *tc = bridge_to_tc(bridge);
u32 hback_porch, hsync_len, hfront_porch, hactive, htime1, htime2;
u32 vback_porch, vsync_len, vfront_porch, vactive, vtime1, vtime2;
u32 val = 0;
u16 dsiclk, clkdiv, byteclk, t1, t2, t3, vsdelay;
- struct drm_display_mode *mode;
- struct drm_connector *connector = get_connector(bridge->encoder);
-
- mode = &bridge->encoder->crtc->state->adjusted_mode;
+ struct drm_connector *connector =
+ drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
hback_porch = mode->htotal - mode->hsync_end;
hsync_len = mode->hsync_end - mode->hsync_start;
@@ -589,21 +582,25 @@ static int tc358775_parse_dt(struct device_node *np, struct tc_data *tc)
}
static int tc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tc_data *tc = bridge_to_tc(bridge);
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, tc->panel_bridge,
+ return drm_bridge_attach(encoder, tc->panel_bridge,
&tc->bridge, flags);
}
static const struct drm_bridge_funcs tc_bridge_funcs = {
.attach = tc_bridge_attach,
- .pre_enable = tc_bridge_pre_enable,
- .enable = tc_bridge_enable,
+ .atomic_pre_enable = tc_bridge_atomic_pre_enable,
+ .atomic_enable = tc_bridge_atomic_enable,
.mode_valid = tc_mode_valid,
- .post_disable = tc_bridge_post_disable,
+ .atomic_post_disable = tc_bridge_atomic_post_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
};
static int tc_attach_host(struct tc_data *tc)
diff --git a/drivers/gpu/drm/bridge/tda998x_drv.c b/drivers/gpu/drm/bridge/tda998x_drv.c
index 20658258fb51..e636459d9185 100644
--- a/drivers/gpu/drm/bridge/tda998x_drv.c
+++ b/drivers/gpu/drm/bridge/tda998x_drv.c
@@ -751,7 +751,8 @@ tda998x_reset(struct tda998x_priv *priv)
*/
static void tda998x_edid_delay_done(struct timer_list *t)
{
- struct tda998x_priv *priv = from_timer(priv, t, edid_delay_timer);
+ struct tda998x_priv *priv = timer_container_of(priv, t,
+ edid_delay_timer);
priv->edid_delay_active = false;
wake_up(&priv->edid_delay_waitq);
@@ -1365,6 +1366,7 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
/* DRM bridge functions */
static int tda998x_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
@@ -1780,9 +1782,9 @@ static int tda998x_create(struct device *dev)
u32 video;
int rev_lo, rev_hi, ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_bridge_alloc(dev, struct tda998x_priv, bridge, &tda998x_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dev_set_drvdata(dev, priv);
@@ -1947,7 +1949,6 @@ static int tda998x_create(struct device *dev)
tda998x_audio_codec_init(priv, &client->dev);
}
- priv->bridge.funcs = &tda998x_bridge_funcs;
#ifdef CONFIG_OF
priv->bridge.of_node = dev->of_node;
#endif
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index bba10cf9b4f9..e2fc78adebcf 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -43,11 +43,12 @@ static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge)
}
static int thc63_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct thc63_dev *thc63 = to_thc63(bridge);
- return drm_bridge_attach(bridge->encoder, thc63->next, bridge, flags);
+ return drm_bridge_attach(encoder, thc63->next, bridge, flags);
}
static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index 85f2a0e74a1c..47638d1c96ec 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -242,12 +242,12 @@ static void dlpc_mode_set(struct drm_bridge *bridge,
drm_mode_copy(&dlpc->mode, adjusted_mode);
}
-static int dlpc_attach(struct drm_bridge *bridge,
+static int dlpc_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dlpc *dlpc = bridge_to_dlpc(bridge);
- return drm_bridge_attach(bridge->encoder, dlpc->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, dlpc->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs dlpc_bridge_funcs = {
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 95563aa1b450..033c44326552 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -40,7 +40,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_drv.h> /* DRM_MODESET_LOCK_ALL_BEGIN() needs drm_drv_uses_atomic_modeset() */
+#include <drm/drm_bridge_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
@@ -290,11 +290,12 @@ static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge)
}
static int sn65dsi83_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ return drm_bridge_attach(encoder, ctx->panel_bridge,
&ctx->bridge, flags);
}
@@ -370,7 +371,6 @@ static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83)
{
- struct drm_device *dev = sn65dsi83->bridge.dev;
struct drm_modeset_acquire_ctx ctx;
int err;
@@ -385,26 +385,21 @@ static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83)
* Keep the lock during the whole operation to be atomic.
*/
- DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
-
- if (!sn65dsi83->bridge.encoder->crtc) {
- /*
- * No CRTC attached -> No CRTC active outputs to reset
- * This can happen when the SN65DSI83 is reset. Simply do
- * nothing without returning any errors.
- */
- err = 0;
- goto end;
- }
+ drm_modeset_acquire_init(&ctx, 0);
dev_warn(sn65dsi83->dev, "reset the pipe\n");
- err = drm_atomic_helper_reset_crtc(sn65dsi83->bridge.encoder->crtc, &ctx);
+retry:
+ err = drm_bridge_helper_reset_crtc(&sn65dsi83->bridge, &ctx);
+ if (err == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
-end:
- DRM_MODESET_LOCK_ALL_END(dev, ctx, err);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
- return err;
+ return 0;
}
static void sn65dsi83_reset_work(struct work_struct *ws)
@@ -946,9 +941,9 @@ static int sn65dsi83_probe(struct i2c_client *client)
struct sn65dsi83 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct sn65dsi83, bridge, &sn65dsi83_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
INIT_WORK(&ctx->reset_work, sn65dsi83_reset_work);
@@ -988,7 +983,6 @@ static int sn65dsi83_probe(struct i2c_client *client)
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &sn65dsi83_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
ctx->bridge.type = DRM_MODE_CONNECTOR_LVDS;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 01d456b955ab..60224f476e1d 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -35,6 +35,7 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#define SN_DEVICE_ID_REGS 0x00 /* up to 0x07 */
#define SN_DEVICE_REV_REG 0x08
#define SN_DPPLL_SRC_REG 0x0A
#define DPPLL_CLK_SRC_DSICLK BIT(0)
@@ -243,11 +244,26 @@ static void ti_sn65dsi86_write_u16(struct ti_sn65dsi86 *pdata,
regmap_bulk_write(pdata->regmap, reg, buf, ARRAY_SIZE(buf));
}
-static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata)
+static struct drm_display_mode *
+get_new_adjusted_display_mode(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector =
+ drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+
+ return &crtc_state->adjusted_mode;
+}
+
+static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
u32 bit_rate_khz, clk_freq_khz;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
bit_rate_khz = mode->clock *
mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
@@ -274,7 +290,8 @@ static const u32 ti_sn_bridge_dsiclk_lut[] = {
460800000,
};
-static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
int i;
u32 refclk_rate;
@@ -287,7 +304,7 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_refclk_lut);
clk_prepare_enable(pdata->refclk);
} else {
- refclk_rate = ti_sn_bridge_get_dsi_freq(pdata) * 1000;
+ refclk_rate = ti_sn_bridge_get_dsi_freq(pdata, state) * 1000;
refclk_lut = ti_sn_bridge_dsiclk_lut;
refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_dsiclk_lut);
}
@@ -311,12 +328,13 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
pdata->pwm_refclk_freq = ti_sn_bridge_refclk_lut[i];
}
-static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata)
+static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
mutex_lock(&pdata->comms_mutex);
/* configure bridge ref_clk */
- ti_sn_bridge_set_refclk_freq(pdata);
+ ti_sn_bridge_set_refclk_freq(pdata, state);
/*
* HPD on this bridge chip is a bit useless. This is an eDP bridge
@@ -376,7 +394,7 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
* clock so reading early doesn't work.
*/
if (pdata->refclk)
- ti_sn65dsi86_enable_comms(pdata);
+ ti_sn65dsi86_enable_comms(pdata, NULL);
return ret;
}
@@ -423,36 +441,8 @@ static int status_show(struct seq_file *s, void *data)
return 0;
}
-
DEFINE_SHOW_ATTRIBUTE(status);
-static void ti_sn65dsi86_debugfs_remove(void *data)
-{
- debugfs_remove_recursive(data);
-}
-
-static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata)
-{
- struct device *dev = pdata->dev;
- struct dentry *debugfs;
- int ret;
-
- debugfs = debugfs_create_dir(dev_name(dev), NULL);
-
- /*
- * We might get an error back if debugfs wasn't enabled in the kernel
- * so let's just silently return upon failure.
- */
- if (IS_ERR_OR_NULL(debugfs))
- return;
-
- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_debugfs_remove, debugfs);
- if (ret)
- return;
-
- debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
-}
-
/* -----------------------------------------------------------------------------
* Auxiliary Devices (*not* AUX)
*/
@@ -732,6 +722,7 @@ static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86
}
static int ti_sn_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -748,7 +739,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
* Attach the next bridge.
* We never want the next bridge to *also* create a connector.
*/
- ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge,
+ ret = drm_bridge_attach(encoder, pdata->next_bridge,
&pdata->bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
goto err_initted_aux;
@@ -821,12 +812,13 @@ static void ti_sn_bridge_atomic_disable(struct drm_bridge *bridge,
regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, 0);
}
-static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
unsigned int bit_rate_mhz, clk_freq_mhz;
unsigned int val;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
/* set DSIA clk frequency */
bit_rate_mhz = (mode->clock / 1000) *
@@ -856,12 +848,14 @@ static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
};
-static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata, unsigned int bpp)
+static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state,
+ unsigned int bpp)
{
unsigned int bit_rate_khz, dp_rate_mhz;
unsigned int i;
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
/* Calculate minimum bit rate based on our pixel clock. */
bit_rate_khz = mode->clock * bpp;
@@ -960,10 +954,11 @@ static unsigned int ti_sn_bridge_read_valid_rates(struct ti_sn65dsi86 *pdata)
return valid_rates;
}
-static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata)
+static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata,
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ get_new_adjusted_display_mode(&pdata->bridge, state);
u8 hsync_polarity = 0, vsync_polarity = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -1105,7 +1100,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
pdata->ln_polrs << LN_POLRS_OFFSET);
/* set dsi clk frequency value */
- ti_sn_bridge_set_dsi_rate(pdata);
+ ti_sn_bridge_set_dsi_rate(pdata, state);
/*
* The SN65DSI86 only supports ASSR Display Authentication method and
@@ -1140,7 +1135,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
valid_rates = ti_sn_bridge_read_valid_rates(pdata);
/* Train until we run out of rates */
- for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, bpp);
+ for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, state, bpp);
dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
dp_rate_idx++) {
if (!(valid_rates & BIT(dp_rate_idx)))
@@ -1156,7 +1151,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
}
/* config video parameters */
- ti_sn_bridge_set_video_timings(pdata);
+ ti_sn_bridge_set_video_timings(pdata, state);
/* enable video stream */
regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE,
@@ -1171,7 +1166,7 @@ static void ti_sn_bridge_atomic_pre_enable(struct drm_bridge *bridge,
pm_runtime_get_sync(pdata->dev);
if (!pdata->refclk)
- ti_sn65dsi86_enable_comms(pdata);
+ ti_sn65dsi86_enable_comms(pdata, state);
/* td7: min 100 us after enable before DSI data */
usleep_range(100, 110);
@@ -1216,6 +1211,15 @@ static const struct drm_edid *ti_sn_bridge_edid_read(struct drm_bridge *bridge,
return drm_edid_read_ddc(connector, &pdata->aux.ddc);
}
+static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ struct dentry *debugfs;
+
+ debugfs = debugfs_create_dir(dev_name(pdata->dev), root);
+ debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
+}
+
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
@@ -1229,6 +1233,7 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .debugfs_init = ti_sn65dsi86_debugfs_init,
};
static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata,
@@ -1312,7 +1317,6 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
if (ret)
return ret;
- pdata->bridge.funcs = &ti_sn_bridge_funcs;
pdata->bridge.of_node = np;
pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort
? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP;
@@ -1894,6 +1898,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ti_sn65dsi86 *pdata;
+ u8 id_buf[8];
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
@@ -1901,9 +1906,9 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
return -ENODEV;
}
- pdata = devm_kzalloc(dev, sizeof(struct ti_sn65dsi86), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ pdata = devm_drm_bridge_alloc(dev, struct ti_sn65dsi86, bridge, &ti_sn_bridge_funcs);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
dev_set_drvdata(dev, pdata);
pdata->dev = dev;
@@ -1937,7 +1942,15 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
if (ret)
return ret;
- ti_sn65dsi86_debugfs_init(pdata);
+ pm_runtime_get_sync(dev);
+ ret = regmap_bulk_read(pdata->regmap, SN_DEVICE_ID_REGS, id_buf, ARRAY_SIZE(id_buf));
+ pm_runtime_put_autosuspend(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to read device id\n");
+
+ /* The ID string is stored backwards */
+ if (strncmp(id_buf, "68ISD ", ARRAY_SIZE(id_buf)))
+ return dev_err_probe(dev, -EOPNOTSUPP, "unsupported device id\n");
/*
* Break ourselves up into a collection of aux devices. The only real
diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c
index 22316382451f..cca75443f012 100644
--- a/drivers/gpu/drm/bridge/ti-tdp158.c
+++ b/drivers/gpu/drm/bridge/ti-tdp158.c
@@ -45,11 +45,13 @@ static void tdp158_disable(struct drm_bridge *bridge,
regulator_disable(tdp158->vcc);
}
-static int tdp158_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
+static int tdp158_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
struct tdp158 *tdp158 = bridge->driver_private;
- return drm_bridge_attach(bridge->encoder, tdp158->next, bridge, flags);
+ return drm_bridge_attach(encoder, tdp158->next, bridge, flags);
}
static const struct drm_bridge_funcs tdp158_bridge_funcs = {
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 79ab5da827e1..e15d232ddbac 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -120,12 +120,13 @@ static void tfp410_hpd_callback(void *arg, enum drm_connector_status status)
}
static int tfp410_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
int ret;
- ret = drm_bridge_attach(bridge->encoder, dvi->next_bridge, bridge,
+ ret = drm_bridge_attach(encoder, dvi->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
return ret;
@@ -159,7 +160,7 @@ static int tfp410_attach(struct drm_bridge *bridge,
drm_display_info_set_bus_formats(&dvi->connector.display_info,
&dvi->bus_format, 1);
- drm_connector_attach_encoder(&dvi->connector, bridge->encoder);
+ drm_connector_attach_encoder(&dvi->connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
index 47b74cb25b14..1c289051a598 100644
--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -38,6 +38,7 @@ static inline struct tpd12s015_device *to_tpd12s015(struct drm_bridge *bridge)
}
static int tpd12s015_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tpd12s015_device *tpd = to_tpd12s015(bridge);
@@ -46,7 +47,7 @@ static int tpd12s015_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- ret = drm_bridge_attach(bridge->encoder, tpd->next_bridge,
+ ret = drm_bridge_attach(encoder, tpd->next_bridge,
bridge, flags);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config
index a8fca079921b..fddfbd4d2493 100644
--- a/drivers/gpu/drm/ci/arm64.config
+++ b/drivers/gpu/drm/ci/arm64.config
@@ -193,6 +193,8 @@ CONFIG_PWM_MTK_DISP=y
CONFIG_MTK_CMDQ=y
CONFIG_REGULATOR_DA9211=y
CONFIG_DRM_ANALOGIX_ANX7625=y
+CONFIG_PHY_MTK_HDMI=y
+CONFIG_PHY_MTK_MIPI_DSI=y
# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
CONFIG_ARCH_TEGRA=y
diff --git a/drivers/gpu/drm/ci/build-igt.sh b/drivers/gpu/drm/ci/build-igt.sh
index eddb5f782a5e..caa2f4804ed5 100644
--- a/drivers/gpu/drm/ci/build-igt.sh
+++ b/drivers/gpu/drm/ci/build-igt.sh
@@ -71,4 +71,4 @@ tar -cf artifacts/igt.tar /igt
# Pass needed files to the test stage
S3_ARTIFACT_NAME="igt.tar.gz"
gzip -c artifacts/igt.tar > ${S3_ARTIFACT_NAME}
-ci-fairy s3cp --token-file "${S3_JWT_FILE}" ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${KERNEL_ARCH}/${S3_ARTIFACT_NAME}
+s3_upload ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${KERNEL_ARCH}/
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
index 19fe01257ab9..6fb74c51abe2 100644
--- a/drivers/gpu/drm/ci/build.sh
+++ b/drivers/gpu/drm/ci/build.sh
@@ -98,14 +98,14 @@ done
make ${KERNEL_IMAGE_NAME}
-mkdir -p /lava-files/
+mkdir -p /kernel/
for image in ${KERNEL_IMAGE_NAME}; do
- cp arch/${KERNEL_ARCH}/boot/${image} /lava-files/.
+ cp arch/${KERNEL_ARCH}/boot/${image} /kernel/.
done
if [[ -n ${DEVICE_TREES} ]]; then
make dtbs
- cp ${DEVICE_TREES} /lava-files/.
+ cp ${DEVICE_TREES} /kernel/.
fi
make modules
@@ -121,11 +121,11 @@ if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
-d arch/arm64/boot/Image.lzma \
-C lzma\
-b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
- /lava-files/cheza-kernel
+ /kernel/cheza-kernel
KERNEL_IMAGE_NAME+=" cheza-kernel"
# Make a gzipped copy of the Image for db410c.
- gzip -k /lava-files/Image
+ gzip -k /kernel/Image
KERNEL_IMAGE_NAME+=" Image.gz"
fi
@@ -139,7 +139,7 @@ cp -rfv drivers/gpu/drm/ci/* install/.
. .gitlab-ci/container/container_post_build.sh
if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
- xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /lava-files/vmlinux.xz
+ xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > /kernel/vmlinux.xz
FILES_TO_UPLOAD="$KERNEL_IMAGE_NAME vmlinux.xz"
if [[ -n $DEVICE_TREES ]]; then
@@ -148,13 +148,13 @@ if [[ "$UPLOAD_TO_MINIO" = "1" ]]; then
ls -l "${S3_JWT_FILE}"
for f in $FILES_TO_UPLOAD; do
- ci-fairy s3cp --token-file "${S3_JWT_FILE}" /lava-files/$f \
- https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/$f
+ s3_upload /kernel/$f \
+ https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/
done
S3_ARTIFACT_NAME="kernel-files.tar.zst"
tar --zstd -cf $S3_ARTIFACT_NAME install
- ci-fairy s3cp --token-file "${S3_JWT_FILE}" ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/${S3_ARTIFACT_NAME}
+ s3_upload ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/
echo "Download vmlinux.xz from https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/vmlinux.xz"
fi
@@ -165,7 +165,7 @@ ln -s common artifacts/install/ci-common
cp .config artifacts/${CI_JOB_NAME}_config
for image in ${KERNEL_IMAGE_NAME}; do
- cp /lava-files/$image artifacts/install/.
+ cp /kernel/$image artifacts/install/.
done
tar -C artifacts -cf artifacts/install.tar install
diff --git a/drivers/gpu/drm/ci/build.yml b/drivers/gpu/drm/ci/build.yml
index 274f118533a7..8eb56ebcf4aa 100644
--- a/drivers/gpu/drm/ci/build.yml
+++ b/drivers/gpu/drm/ci/build.yml
@@ -67,7 +67,7 @@ testing:arm32:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: arm.config
@@ -79,7 +79,7 @@ testing:arm64:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: arm64.config
@@ -91,7 +91,7 @@ testing:x86_64:
#
# db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel
# becoming too big for their bootloaders.
- ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT"
+ ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH"
UPLOAD_TO_MINIO: 1
MERGE_FRAGMENT: x86_64.config
@@ -143,6 +143,10 @@ debian-arm64-release:
rules:
- when: never
+debian-arm64-ubsan:
+ rules:
+ - when: never
+
debian-build-testing:
rules:
- when: never
@@ -183,6 +187,10 @@ debian-testing-msan:
rules:
- when: never
+debian-testing-ubsan:
+ rules:
+ - when: never
+
debian-vulkan:
rules:
- when: never
diff --git a/drivers/gpu/drm/ci/container.yml b/drivers/gpu/drm/ci/container.yml
index 07dc13ff865d..56c95c2f91ae 100644
--- a/drivers/gpu/drm/ci/container.yml
+++ b/drivers/gpu/drm/ci/container.yml
@@ -24,6 +24,18 @@ alpine/x86_64_build:
rules:
- when: never
+debian/arm32_test-base:
+ rules:
+ - when: never
+
+debian/arm32_test-gl:
+ rules:
+ - when: never
+
+debian/arm32_test-vk:
+ rules:
+ - when: never
+
debian/arm64_test-gl:
rules:
- when: never
@@ -32,6 +44,10 @@ debian/arm64_test-vk:
rules:
- when: never
+debian/baremetal_arm32_test:
+ rules:
+ - when: never
+
debian/ppc64el_build:
rules:
- when: never
@@ -40,6 +56,14 @@ debian/s390x_build:
rules:
- when: never
+debian/x86_32_build:
+ rules:
+ - when: never
+
+debian/x86_64_test-android:
+ rules:
+ - when: never
+
debian/x86_64_test-vk:
rules:
- when: never
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index f04aabe8327c..ba75b3a7eca4 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -1,11 +1,11 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 7d3062470f3ccc6cb40540e772e902c7e2248024
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha f73132f1215a37ce8ffc711a0136c90649aaf128
UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git
TARGET_BRANCH: drm-next
- IGT_VERSION: 33adea9ebafd059ac88a5ccfec60536394f36c7c
+ IGT_VERSION: 04bedb9238586b81d4d4ca62b02e584f6cfc77af
DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git
DEQP_RUNNER_GIT_TAG: v0.20.0
@@ -20,8 +20,10 @@ variables:
rm download-git-cache.sh
set +o xtrace
S3_JWT_FILE: /s3_jwt
+ S3_JWT_HEADER_FILE: /s3_jwt_header
S3_JWT_FILE_SCRIPT: |-
echo -n '${S3_JWT}' > '${S3_JWT_FILE}' &&
+ echo -n "Authorization: Bearer ${S3_JWT}" > '${S3_JWT_HEADER_FILE}' &&
unset CI_JOB_JWT S3_JWT # Unsetting vulnerable env variables
S3_HOST: s3.freedesktop.org
# This bucket is used to fetch the kernel image
@@ -143,11 +145,11 @@ stages:
# Pre-merge pipeline
- if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event"
# Push to a branch on a fork
- - if: &is-fork-push $CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"
+ - if: &is-fork-push $CI_PIPELINE_SOURCE == "push"
# nightly pipeline
- if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule"
# pipeline for direct pushes that bypassed the CI
- - if: &is-direct-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
+ - if: &is-direct-push $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
# Rules applied to every job in the pipeline
@@ -170,29 +172,48 @@ stages:
- !reference [.disable-farm-mr-rules, rules]
# Never run immediately after merging, as we just ran everything
- !reference [.never-post-merge-rules, rules]
- # Build everything in merge pipelines, if any files affecting the pipeline
- # were changed
+ # Build everything in merge pipelines
- if: *is-merge-attempt
- changes: &all_paths
- - drivers/gpu/drm/ci/**/*
when: on_success
# Same as above, but for pre-merge pipelines
- if: *is-pre-merge
- changes:
- *all_paths
when: manual
- # Skip everything for pre-merge and merge pipelines which don't change
- # anything in the build
+ # Build everything after someone bypassed the CI
+ - if: *is-direct-push
+ when: manual
+ # Build everything in scheduled pipelines
+ - if: *is-scheduled-pipeline
+ when: on_success
+ # Allow building everything in fork pipelines, but build nothing unless
+ # manually triggered
+ - when: manual
+
+
+# Repeat of the above but with `when: on_success` replaced with
+# `when: delayed` + `start_in:`, for build-only jobs.
+# Note: make sure the branches in this list are the same as in
+# `.container+build-rules` above.
+.build-only-delayed-rules:
+ rules:
+ - !reference [.common-rules, rules]
+ # Run when re-enabling a disabled farm, but not when disabling it
+ - !reference [.disable-farm-mr-rules, rules]
+ # Never run immediately after merging, as we just ran everything
+ - !reference [.never-post-merge-rules, rules]
+ # Build everything in merge pipelines
- if: *is-merge-attempt
- when: never
+ when: delayed
+ start_in: &build-delay 5 minutes
+ # Same as above, but for pre-merge pipelines
- if: *is-pre-merge
- when: never
+ when: manual
# Build everything after someone bypassed the CI
- if: *is-direct-push
- when: on_success
+ when: manual
# Build everything in scheduled pipelines
- if: *is-scheduled-pipeline
- when: on_success
+ when: delayed
+ start_in: *build-delay
# Allow building everything in fork pipelines, but build nothing unless
# manually triggered
- when: manual
@@ -232,7 +253,7 @@ make git archive:
- tar -cvzf ../$CI_PROJECT_NAME.tar.gz .
# Use id_tokens for JWT auth
- - ci-fairy s3cp --token-file "${S3_JWT_FILE}" ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/${S3_GITCACHE_BUCKET}/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/$CI_PROJECT_NAME.tar.gz
+ - s3_upload ../$CI_PROJECT_NAME.tar.gz https://$S3_HOST/${S3_GITCACHE_BUCKET}/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/
# Sanity checks of MR settings and commit logs
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
index 68b042e43b7f..2a0599f12c58 100755
--- a/drivers/gpu/drm/ci/igt_runner.sh
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -85,5 +85,16 @@ deqp-runner junit \
--limit 50 \
--template "See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml"
+# Check if /proc/lockdep_stats exists
+if [ -f /proc/lockdep_stats ]; then
+ # If debug_locks is 0, it indicates lockdep is detected and it turns itself off.
+ debug_locks=$(grep 'debug_locks:' /proc/lockdep_stats | awk '{print $2}')
+ if [ "$debug_locks" -eq 0 ] && [ "$ret" -eq 0 ]; then
+ echo "Warning: LOCKDEP issue detected. Please check dmesg logs for more information."
+ cat /proc/lockdep_stats
+ ret=101
+ fi
+fi
+
cd $oldpath
exit $ret
diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml
index 20049f3626b2..53fe34b86578 100644
--- a/drivers/gpu/drm/ci/image-tags.yml
+++ b/drivers/gpu/drm/ci/image-tags.yml
@@ -1,5 +1,5 @@
variables:
- CONTAINER_TAG: "20250204-mesa-uprev"
+ CONTAINER_TAG: "20250328-mesa-uprev"
DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
@@ -20,3 +20,5 @@ variables:
DEBIAN_PYUTILS_TAG: "${CONTAINER_TAG}"
ALPINE_X86_64_LAVA_SSH_TAG: "${CONTAINER_TAG}"
+
+ CONDITIONAL_BUILD_ANGLE_TAG: fec96cc945650c5fe9f7188cabe80d8a
diff --git a/drivers/gpu/drm/ci/lava-submit.sh b/drivers/gpu/drm/ci/lava-submit.sh
index 6e5ac51e8c0a..a1e8b34fb2d4 100755
--- a/drivers/gpu/drm/ci/lava-submit.sh
+++ b/drivers/gpu/drm/ci/lava-submit.sh
@@ -48,12 +48,13 @@ ROOTFS_URL="$(get_path_to_artifact lava-rootfs.tar.zst)"
rm -rf results
mkdir -p results/job-rootfs-overlay/
-artifacts/ci-common/generate-env.sh > results/job-rootfs-overlay/set-job-env-vars.sh
+artifacts/ci-common/export-gitlab-job-env-for-dut.sh \
+ > results/job-rootfs-overlay/set-job-env-vars.sh
cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
-ci-fairy s3cp --token-file "${S3_JWT_FILE}" job-rootfs-overlay.tar.gz "https://${JOB_ROOTFS_OVERLAY_PATH}"
+s3_upload job-rootfs-overlay.tar.gz "https://${JOB_ARTIFACTS_BASE}"
# Prepare env vars for upload.
section_switch variables "Environment variables passed through to device:"
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index 6a1e059858e5..84a25f0e783b 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -1,6 +1,14 @@
+.allow_failure_lockdep:
+ variables:
+ FF_USE_NEW_BASH_EVAL_STRATEGY: 'true'
+ allow_failure:
+ exit_codes:
+ - 101
+
.lava-test:
extends:
- .container+build-rules
+ - .allow_failure_lockdep
timeout: "1h30m"
rules:
- !reference [.scheduled_pipeline-rules, rules]
@@ -69,6 +77,7 @@
extends:
- .baremetal-test-arm64
- .use-debian/baremetal_arm64_test
+ - .allow_failure_lockdep
timeout: "1h30m"
rules:
- !reference [.scheduled_pipeline-rules, rules]
@@ -89,6 +98,28 @@
tags:
- $RUNNER_TAG
+.software-driver:
+ stage: software-driver
+ extends:
+ - .allow_failure_lockdep
+ timeout: "1h30m"
+ rules:
+ - !reference [.scheduled_pipeline-rules, rules]
+ - when: on_success
+ extends:
+ - .test-gl
+ tags:
+ - kvm
+ script:
+ - ln -sf $CI_PROJECT_DIR/install /install
+ - mv install/bzImage /kernel/bzImage
+ - mkdir -p /lib/modules
+ - install/crosvm-runner.sh install/igt_runner.sh
+ needs:
+ - debian/x86_64_test-gl
+ - testing:x86_64
+ - igt:x86_64
+
.msm-sc7180:
extends:
- .lava-igt:arm64
@@ -133,7 +164,7 @@ msm:apq8016:
BM_KERNEL_EXTRA_ARGS: clk_ignore_unused
RUNNER_TAG: google-freedreno-db410c
script:
- - ./install/bare-metal/fastboot.sh
+ - ./install/bare-metal/fastboot.sh || exit $?
msm:apq8096:
extends:
@@ -147,7 +178,7 @@ msm:apq8096:
GPU_VERSION: apq8096
RUNNER_TAG: google-freedreno-db820c
script:
- - ./install/bare-metal/fastboot.sh
+ - ./install/bare-metal/fastboot.sh || exit $?
msm:sdm845:
extends:
@@ -161,7 +192,7 @@ msm:sdm845:
GPU_VERSION: sdm845
RUNNER_TAG: google-freedreno-cheza
script:
- - ./install/bare-metal/cros-servo.sh
+ - ./install/bare-metal/cros-servo.sh || exit $?
msm:sm8350-hdk:
extends:
@@ -440,47 +471,16 @@ panfrost:g12b:
- .panfrost-gpu
virtio_gpu:none:
- stage: software-driver
- timeout: "1h30m"
- rules:
- - !reference [.scheduled_pipeline-rules, rules]
- - when: on_success
+ extends:
+ - .software-driver
variables:
CROSVM_GALLIUM_DRIVER: llvmpipe
DRIVER_NAME: virtio_gpu
GPU_VERSION: none
- extends:
- - .test-gl
- tags:
- - kvm
- script:
- - ln -sf $CI_PROJECT_DIR/install /install
- - mv install/bzImage /lava-files/bzImage
- - install/crosvm-runner.sh install/igt_runner.sh
- needs:
- - debian/x86_64_test-gl
- - testing:x86_64
- - igt:x86_64
vkms:none:
- stage: software-driver
- timeout: "1h30m"
- rules:
- - !reference [.scheduled_pipeline-rules, rules]
- - when: on_success
+ extends:
+ - .software-driver
variables:
DRIVER_NAME: vkms
GPU_VERSION: none
- extends:
- - .test-gl
- tags:
- - kvm
- script:
- - ln -sf $CI_PROJECT_DIR/install /install
- - mv install/bzImage /lava-files/bzImage
- - mkdir -p /lib/modules
- - ./install/crosvm-runner.sh ./install/igt_runner.sh
- needs:
- - debian/x86_64_test-gl
- - testing:x86_64
- - igt:x86_64
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
index 75374085f40f..f44dbce3151a 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
@@ -14,16 +14,10 @@ amdgpu/amd_plane@mpo-scale-nv12,Fail
amdgpu/amd_plane@mpo-scale-p010,Fail
amdgpu/amd_plane@mpo-scale-rgb,Crash
amdgpu/amd_plane@mpo-swizzle-toggle,Fail
-amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Crash
+amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Fail
kms_addfb_basic@bad-pitch-65536,Fail
kms_addfb_basic@bo-too-small,Fail
kms_addfb_basic@too-high,Fail
-kms_async_flips@alternate-sync-async-flip,Fail
-kms_async_flips@alternate-sync-async-flip-atomic,Fail
-kms_async_flips@test-cursor,Fail
-kms_async_flips@test-cursor-atomic,Fail
-kms_async_flips@test-time-stamp,Fail
-kms_async_flips@test-time-stamp-atomic,Fail
kms_atomic_transition@plane-all-modeset-transition-internal-panels,Fail
kms_atomic_transition@plane-all-transition,Fail
kms_atomic_transition@plane-all-transition-nonblocking,Fail
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
index 3879c4812a22..902d54027506 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
@@ -14,6 +14,7 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
index a29cea4f234c..8e2b5504004e 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
@@ -1,22 +1,18 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
-kms_async_flips@test-time-stamp,Timeout
-kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
-kms_flip@dpms-off-confusion-interruptible,Timeout
-kms_flip@wf_vblank-ts-check-interruptible,Fail
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_fb_coherency@memset-crc,Crash
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
@@ -31,12 +27,18 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -44,8 +46,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
index 2ef1dc35a7fa..922327632eff 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
index ee11999e3da1..7353ab11e940 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
@@ -1,4 +1,3 @@
-core_setmaster@master-drop-set-user,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -16,6 +15,7 @@ kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
@@ -30,7 +30,6 @@ kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
kms_pm_backlight@basic-brightness,Fail
-kms_pm_backlight@brightness-with-dpms,Crash
kms_pm_backlight@fade,Fail
kms_pm_backlight@fade-with-dpms,Fail
kms_pm_rpm@modeset-stress-extra-wait,Timeout
@@ -43,8 +42,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
index 4f50e0240ff4..80bf2741866c 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
index 47b3f1d42bb6..6fef7c1e56ea 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
@@ -1,4 +1,4 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -8,8 +8,9 @@ i915_pipe_stress@stress-xrgb8888-ytiled,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_async_flips@test-time-stamp,Timeout
-kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_cursor_crc@cursor-suspend,Timeout
+kms_fb_coherency@memset-crc,Crash
kms_flip@busy-flip,Timeout
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
@@ -34,17 +35,22 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Timeout
+kms_pipe_stress@stress-xrgb8888-untiled,Fail
+kms_pipe_stress@stress-xrgb8888-ytiled,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
kms_psr2_sf@cursor-plane-update-sf,Fail
kms_psr2_sf@overlay-plane-update-continuous-sf,Fail
kms_psr2_sf@overlay-plane-update-sf-dmg-area,Fail
kms_psr2_sf@overlay-primary-update-sf-dmg-area,Fail
kms_psr2_sf@plane-move-sf-dmg-area,Fail
-kms_psr2_sf@pr-cursor-plane-update-sf,Timeout
kms_psr2_sf@primary-plane-update-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_sf@psr2-cursor-plane-update-sf,Fail
@@ -57,6 +63,7 @@ kms_psr2_sf@psr2-primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_su@page_flip-NV12,Fail
kms_psr2_su@page_flip-P010,Fail
kms_setmode@basic,Fail
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -65,6 +72,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
index c87ff8b40e99..c393a138b8a6 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
@@ -16,7 +17,6 @@ gem_.*
# Hangs the machine and timeout occurs
i915_pm_rc6_residency.*
i915_suspend.*
-xe_module_load.*
api_intel_allocator.*
kms_cursor_legacy.*
i915_pm_rpm.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
index 843c363b42f5..8adf5f0a6e80 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
@@ -1,39 +1,47 @@
-core_setmaster@master-drop-set-shared-fd,Fail
-core_setmaster@master-drop-set-user,Fail
+core_setmaster_vs_auth,Fail
gen9_exec_parse@unaligned-access,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-kms_dirtyfb@default-dirtyfb-ioctl,Fail
kms_dirtyfb@drrs-dirtyfb-ioctl,Fail
-kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
+kms_flip@blocking-wf_vblank,Fail
+kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
-kms_frontbuffer_tracking@fbcdrrs-tiling-linear,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
+kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_rotation_crc@multiplane-rotation,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@non-zero-reason,Timeout
sysfs_heartbeat_interval@long,Timeout
+sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
index 219ae839323a..2e4ef9f35654 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
@@ -12,6 +12,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
index 0e08fff741aa..57453e340040 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
@@ -3,12 +3,13 @@ i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
+i915_pm_rpm@gem-execbuf-stress,Timeout
kms_flip@dpms-off-confusion,Fail
+kms_flip@nonexisting-fb,Fail
kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset,Fail
-kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,UnexpectedImprovement(Skip)
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
@@ -28,7 +29,6 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
-kms_frontbuffer_tracking@fbc-rgb565-draw-blt,Timeout
kms_lease@lease-uevent,Fail
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_rotation_crc@bad-pixel-format,Fail
@@ -37,13 +37,10 @@ kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
+perf_pmu@most-busy-idle-check-all,Fail
perf_pmu@rc6,Crash
+prime_busy@before-wait,Fail
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
index 1a3d87c0ca6e..8dec57da1bb3 100644
--- a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
index d4fba4f55ec1..117098bc95d9 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
@@ -21,8 +21,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
index dc722d6a774e..e287462a491a 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
@@ -12,6 +12,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
index 93d42b146df9..462c050a8b2d 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
@@ -1,13 +1,14 @@
api_intel_allocator@reopen,Timeout
api_intel_bb@destroy-bb,Timeout
core_hotunplug@hotrebind-lateclose,Timeout
-drm_read@short-buffer-block,Timeout
dumb_buffer@map-valid,Timeout
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
+i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rps@engine-order,Timeout
+i915_pm_rps@waitboost,Fail
kms_lease@lease-uevent,Fail
kms_rotation_crc@multiplane-rotation,Fail
perf@i915-ref-count,Fail
@@ -16,6 +17,7 @@ perf_pmu@enable-race,Timeout
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
perf_pmu@semaphore-wait-idle,Timeout
+prime_busy@before,Fail
prime_mmap@test_refcounting,Timeout
sriov_basic@enable-vfs-bind-unbind-each-numvfs-all,Timeout
syncobj_basic@illegal-fd-to-handle,Timeout
@@ -26,8 +28,3 @@ syncobj_wait@multi-wait-all-submitted,Timeout
syncobj_wait@multi-wait-for-submit-submitted-signaled,Timeout
syncobj_wait@wait-any-complex,Timeout
syncobj_wait@wait-delayed-signal,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
index 938377896841..429dc3c731df 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
@@ -18,6 +18,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
index 1cb6978c86dc..0f167cfd503c 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
@@ -1,4 +1,4 @@
-core_setmaster@master-drop-set-shared-fd,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -6,10 +6,9 @@ i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_async_flips@test-time-stamp,Timeout
-kms_ccs@crc-sprite-planes-basic-y-tiled-ccs,Timeout
-kms_dirtyfb@default-dirtyfb-ioctl,Fail
-kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
+kms_ccs@ccs-on-another-bo-y-tiled-gen12-rc-ccs-cc,Timeout
+kms_cursor_crc@cursor-suspend,Timeout
+kms_fb_coherency@memset-crc,Crash
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@@ -30,13 +29,19 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb101010-draw-mmap-cpu,Timeout
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Timeout
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
+kms_plane_scaling@planes-upscale-factor-0-25,Timeout
+kms_pm_backlight@brightness-with-dpms,Crash
+kms_pm_backlight@fade,Crash
+kms_prop_blob@invalid-set-prop-any,Fail
+kms_properties@connector-properties-legacy,Timeout
+kms_universal_plane@disable-primary-vs-flip,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
@@ -45,8 +50,3 @@ sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-xe_module_load@force-load,Fail
-xe_module_load@load,Fail
-xe_module_load@many-reload,Fail
-xe_module_load@reload,Fail
-xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
index 29bff8922ae1..7e7374ebf3d1 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
@@ -9,6 +9,7 @@ nouveau_.*
^v3d.*
^vc4.*
^vmwgfx*
+^xe.*
# GEM tests takes ~1000 hours, so skip it
gem_.*
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
index 4f176c04ec4e..592d7d69e6fc 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
@@ -17,8 +17,28 @@ kms_bw@linear-tiling-2-displays-3840x2160p,Fail
kms_color@invalid-gamma-lut-sizes,Fail
kms_cursor_legacy@cursor-vs-flip-atomic,Fail
kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-varying-size,Fail
+kms_flip@basic-plain-flip,Fail
+kms_flip@dpms-off-confusion,Fail
+kms_flip@dpms-off-confusion-interruptible,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-expired-vblank,Fail
+kms_flip@flip-vs-expired-vblank-interruptible,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@flip-vs-suspend,Fail
kms_flip@flip-vs-suspend-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
index 2956567c3048..443596d9e662 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
@@ -46,3 +46,10 @@ kms_prop_blob@invalid-set-prop
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@connected-linear-tiling-1-displays-2160x1440p
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/d25442b9-0b6b-433c-8e23-997840fad305@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@flip-vs-wf_vblank-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
index d0db51874aef..b5ee7323a160 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
index 5a063361d7f2..184d0cccc318 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
@@ -1,12 +1,38 @@
-core_setmaster@master-drop-set-user,Fail
dumb_buffer@create-clear,Crash
kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
kms_bw@linear-tiling-1-displays-1920x1080p,Fail
kms_bw@linear-tiling-1-displays-2160x1440p,Fail
kms_bw@linear-tiling-1-displays-3840x2160p,Fail
+kms_color@invalid-gamma-lut-sizes,Fail
kms_cursor_legacy@cursor-vs-flip-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-varying-size,Fail
+kms_flip@basic-flip-vs-wf_vblank,Fail
+kms_flip@basic-plain-flip,Fail
+kms_flip@dpms-off-confusion,Fail
+kms_flip@dpms-off-confusion-interruptible,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-expired-vblank,Fail
+kms_flip@flip-vs-expired-vblank-interruptible,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@flip-vs-suspend,Fail
+kms_flip@flip-vs-suspend-interruptible,Fail
+kms_flip@flip-vs-wf_vblank-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
index df7e5ce7a036..0c67fec92450 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
@@ -18,3 +18,24 @@ kms_cursor_legacy@cursor-vs-flip-atomic-transitions
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
fbdev@write
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/a520d1d6-95b3-4573-b8f2-689f05bc2230@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@basic-flip-vs-modeset
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/ca960a82-00fc-4183-b983-998f7ac2fbb5@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_atomic_transition@plane-all-modeset-transition-internal-panels
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/da578eed-224f-4374-853a-1ff0aa20d03b@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_atomic_transition@plane-toggle-modeset-transition
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
index d0db51874aef..b5ee7323a160 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
index 8198e06344a3..9fd44a4b962a 100644
--- a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
@@ -11,6 +11,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
index 7752adff05c1..72c469021b66 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -1,8 +1,4 @@
kms_3d,Fail
-kms_cursor_legacy@forked-bo,Fail
-kms_cursor_legacy@forked-move,Fail
-kms_cursor_legacy@single-bo,Fail
-kms_cursor_legacy@torture-bo,Fail
kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
index 1674c8e214d6..87724413174c 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
index 5550be5486ed..a4d2f2a7963a 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
index 8910afb6acf2..d270af1cca52 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
@@ -32,3 +32,10 @@ kms_lease@page-flip-implicit-plane
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_plane@plane-position-hole-dpms
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/73
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_plane@plane-position-covered
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
index 478d7c161616..d4b8ba3a54a9 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -28,3 +29,6 @@ kms_cursor_crc@cursor-random-max-size
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
kms_display_modes@extended-mode-basic
kms_display_modes@mst-extended-mode-negative
+
+# It causes other tests to fail, so skip it.
+kms_invalid_mode@overflow-vrefresh
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
index cd3d3b0befe4..cafc802cecea 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
@@ -11,3 +11,10 @@ msm/msm_mapping@shadow
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
kms_lease@page-flip-implicit-plane
+
+# Board Name: sc7180-trogdor-lazor-limozeen-nots-r5
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/74
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_cursor_crc@cursor-random-128x128
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
index ef9318afcd89..022db559cc7d 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
index 38ec0305c1f4..e32d73c6c98e 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
@@ -130,3 +130,10 @@ kms_lease@page-flip-implicit-plane
# IGT Version: 1.28-ga73311079
# Linux Version: 6.11.0-rc5
kms_flip@flip-vs-expired-vblank
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/75
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@plain-flip-ts-check-interruptible
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
index 2ce7f7e23a01..6c86d1953e11 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
@@ -18,6 +18,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -35,3 +36,315 @@ kms_content_protection@uevent
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
kms_display_modes@extended-mode-basic
kms_display_modes@mst-extended-mode-negative
+
+# Kernel panic
+msm/msm_recovery@hangcheck
+# DEBUG - Begin test msm/msm_recovery@hangcheck
+# Console: switching to colour dummy device 80x25
+# [ 489.526286] [IGT] msm_recovery: executing
+# [ 489.531926] [IGT] msm_recovery: starting subtest hangcheck
+# [ 492.808574] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 492.820358] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 492.831154] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 493.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 493.844177] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 493.854971] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 494.824633] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 494.836237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 494.847034] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 495.816570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 495.828170] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 495.838966] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 496.804643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 496.816246] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 496.827041] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 497.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 497.844170] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 497.854963] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 498.820636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 498.832232] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 498.843024] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 499.816568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 499.828163] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 499.838958] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 500.808570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 500.820165] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 500.830960] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 501.832570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 501.844175] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 501.854965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 502.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 502.836171] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 502.846965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 503.816570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 503.828176] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 503.838969] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 504.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 504.816237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 504.827033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 505.828643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 505.840247] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 505.851043] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 506.820637] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 506.832233] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 506.843026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 507.816567] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 507.828171] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 507.838965] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 508.808568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 508.820173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 508.830969] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 509.832568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 509.844173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 509.854967] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 510.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 510.836162] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 510.846954] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 511.816569] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 511.828173] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 511.838968] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 512.804641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 512.816246] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 512.827040] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 513.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 513.840239] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 513.851035] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 514.824568] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 514.836164] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 514.846959] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 515.812640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 515.824235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 515.835030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 515.912427] rcu: INFO: rcu_preempt self-detected stall on CPU
+# [ 515.918398] rcu: 0-....: (6452 ticks this GP) idle=6afc/1/0x4000000000000000 softirq=12492/12697 fqs=3179
+# [ 515.929296] rcu: (t=6505 jiffies g=36205 q=58 ncpus=8)
+# [ 515.934709] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W 6.14.0-rc4-gdddf15cff632 #1
+# [ 515.934727] Tainted: [W]=WARN
+# [ 515.934732] Hardware name: Google Cheza (rev3+) (DT)
+# [ 515.934739] pstate: 00400009 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+# [ 515.934751] pc : rcu_core+0x59c/0xe68
+# [ 515.934769] lr : rcu_core+0x74/0xe68
+# [ 515.934781] sp : ffff800080003e50
+# [ 515.934785] x29: ffff800080003e50 x28: ffff225d038e9bc0 x27: 0000000000000002
+# [ 515.934805] x26: ffffc171a8ee6108 x25: ffffc171a85bc2c0 x24: ffff60ecd691e000
+# [ 515.934820] x23: ffffc171a85d15c0 x22: ffffc171a8f8d780 x21: ffff225e7eeef5c0
+# [ 515.934835] x20: ffffc171a8ef0e80 x19: ffffc171a85d15d1 x18: ffffc171a9461e70
+# [ 515.934850] x17: ffff60ecd691e000 x16: ffff800080000000 x15: 0000000000000000
+# [ 515.934866] x14: ffffc171a85d0780 x13: 0000000000000400 x12: 0000000000000000
+# [ 515.934880] x11: ffffc171a85ce900 x10: ffffc171a8ef5000 x9 : ffffc171a8ef0000
+# [ 515.934894] x8 : ffff800080003d88 x7 : ffffc171a8ee6100 x6 : ffff800080003de0
+# [ 515.934909] x5 : ffff800080003dc8 x4 : 0000000000000003 x3 : 0000000000000000
+# [ 515.934923] x2 : 0000000000000101 x1 : 0000000000000000 x0 : ffff225d038e9bc0
+# [ 515.934939] Call trace:
+# [ 515.934945] rcu_core+0x59c/0xe68 (P)
+# [ 515.934962] rcu_core_si+0x10/0x1c
+# [ 515.934976] handle_softirqs+0x118/0x4b8
+# [ 515.934994] __do_softirq+0x14/0x20
+# [ 515.935007] ____do_softirq+0x10/0x1c
+# [ 515.935021] call_on_irq_stack+0x24/0x4c
+# [ 515.935034] do_softirq_own_stack+0x1c/0x28
+# [ 515.935048] __irq_exit_rcu+0x174/0x1b4
+# [ 515.935063] irq_exit_rcu+0x10/0x38
+# [ 515.935077] el1_interrupt+0x38/0x64
+# [ 515.935092] el1h_64_irq_handler+0x18/0x24
+# [ 515.935104] el1h_64_irq+0x6c/0x70
+# [ 515.935115] lock_acquire+0x1e0/0x338 (P)
+# [ 515.935129] __mutex_lock+0xa8/0x4b8
+# [ 515.935144] mutex_lock_nested+0x24/0x30
+# [ 515.935159] _find_opp_table_unlocked+0x40/0xfc
+# [ 515.935174] _find_key+0x64/0x16c
+# [ 515.935184] dev_pm_opp_find_freq_exact+0x4c/0x74
+# [ 515.935197] qcom_cpufreq_hw_target_index+0xe8/0x128
+# [ 515.935211] __cpufreq_driver_target+0x144/0x29c
+# [ 515.935227] sugov_work+0x58/0x74
+# [ 515.935239] kthread_worker_fn+0xf4/0x324
+# [ 515.935254] kthread+0x12c/0x208
+# [ 515.935266] ret_from_fork+0x10/0x20
+# [ 516.808569] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 516.820174] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 516.830968] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 517.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 517.840236] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 517.851032] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 518.820642] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 518.832237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 518.843030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 519.812636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 519.824231] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 519.835026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 520.808570] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 520.820165] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 520.830959] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 521.828643] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 521.840238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 521.851033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 522.820636] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 522.832232] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 522.843027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 523.812639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 523.824239] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 523.835034] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 524.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 524.816235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 524.827026] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 525.828641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 525.840236] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 525.851031] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 526.820641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 526.832244] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 526.843041] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 527.812642] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 527.824242] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 527.835038] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 528.804639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 528.816234] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 528.827027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 529.832634] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 529.844231] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 529.855017] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 530.820646] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 530.832270] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 530.843065] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 531.812640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 531.824238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 531.835030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 532.804640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 532.816237] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 532.827031] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 533.828640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 533.840243] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 533.851037] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 534.820640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 534.832245] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 534.843038] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 535.812641] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 535.824238] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 535.835033] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 536.804639] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 536.816235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 536.827030] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 537.828640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 537.840234] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 537.851020] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 538.820640] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 538.832235] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 538.843027] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 539.812644] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: hangcheck detected gpu lockup rb 0!
+# [ 539.824247] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: completed fence: 45605
+# [ 539.835040] msm_dpu ae01000.display-controller: [drm:hangcheck_handler] *ERROR* 6.3.0.2: submitted fence: 45611
+# [ 540.124426] watchdog: BUG: soft lockup - CPU#0 stuck for 49s! [sugov:0:126]
+# [ 540.124439] Modules linked in:
+# [ 540.124448] irq event stamp: 9912389
+# [ 540.124453] hardirqs last enabled at (9912388): [<ffffc171a767a24c>] exit_to_kernel_mode+0x38/0x130
+# [ 540.124473] hardirqs last disabled at (9912389): [<ffffc171a767a368>] el1_interrupt+0x24/0x64
+# [ 540.124486] softirqs last enabled at (9898068): [<ffffc171a62bc290>] handle_softirqs+0x4a0/0x4b8
+# [ 540.124505] softirqs last disabled at (9898071): [<ffffc171a62105b0>] __do_softirq+0x14/0x20
+# [ 540.124525] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W 6.14.0-rc4-gdddf15cff632 #1
+# [ 540.124540] Tainted: [W]=WARN
+# [ 540.124544] Hardware name: Google Cheza (rev3+) (DT)
+# [ 540.124549] pstate: 60400009 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+# [ 540.124560] pc : xhci_urb_enqueue+0xbc/0x32c
+# [ 540.124573] lr : xhci_urb_enqueue+0xb4/0x32c
+# [ 540.124581] sp : ffff800080003c20
+# [ 540.124586] x29: ffff800080003c20 x28: 0000000000000000 x27: ffff225d00b1e6a0
+# [ 540.124602] x26: ffff225d01c3d800 x25: 0000000000000001 x24: 0000000000000006
+# [ 540.124617] x23: ffff225d044dc000 x22: ffff225d044dc000 x21: 0000000000000001
+# [ 540.124632] x20: ffff225d002d7280 x19: ffff225d0573a780 x18: ffff225e7eff0f50
+# [ 540.124647] x17: 000000000000cab0 x16: 0000000000000000 x15: ffff225d0353a000
+# [ 540.124661] x14: 0000000000000000 x13: 0000000000000820 x12: 0000000000000000
+# [ 540.124674] x11: ffff800080003a30 x10: 0000000000000001 x9 : 0000000000000000
+# [ 540.124689] x8 : ffff225d002d7300 x7 : 0000000000000000 x6 : 000000000000003f
+# [ 540.124702] x5 : 00000000ffffffff x4 : 0000000000000920 x3 : 0000000000000080
+# [ 540.124716] x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffff225d002d7280
+# [ 540.124731] Call trace:
+# [ 540.124736] xhci_urb_enqueue+0xbc/0x32c (P)
+# [ 540.124751] usb_hcd_submit_urb+0x98/0x7fc
+# [ 540.124766] usb_submit_urb+0x294/0x560
+# [ 540.124780] intr_callback+0x78/0x1fc
+# [ 540.124798] __usb_hcd_giveback_urb+0x68/0x128
+# [ 540.124812] usb_giveback_urb_bh+0xa8/0x140
+# [ 540.124825] process_one_work+0x208/0x5e8
+# [ 540.124840] bh_worker+0x1a8/0x20c
+# [ 540.124853] workqueue_softirq_action+0x78/0x88
+# [ 540.124868] tasklet_hi_action+0x14/0x3c
+# [ 540.124883] handle_softirqs+0x118/0x4b8
+# [ 540.124897] __do_softirq+0x14/0x20
+# [ 540.124908] ____do_softirq+0x10/0x1c
+# [ 540.124922] call_on_irq_stack+0x24/0x4c
+# [ 540.124934] do_softirq_own_stack+0x1c/0x28
+# [ 540.124947] __irq_exit_rcu+0x174/0x1b4
+# [ 540.124961] irq_exit_rcu+0x10/0x38
+# [ 540.124976] el1_interrupt+0x38/0x64
+# [ 540.124987] el1h_64_irq_handler+0x18/0x24
+# [ 540.124998] el1h_64_irq+0x6c/0x70
+# [ 540.125009] lock_acquire+0x1e0/0x338 (P)
+# [ 540.125023] __mutex_lock+0xa8/0x4b8
+# [ 540.125038] mutex_lock_nested+0x24/0x30
+# [ 540.125052] _find_opp_table_unlocked+0x40/0xfc
+# [ 540.125067] _find_key+0x64/0x16c
+# [ 540.125078] dev_pm_opp_find_freq_exact+0x4c/0x74
+# [ 540.125090] qcom_cpufreq_hw_target_index+0xe8/0x128
+# [ 540.125105] __cpufreq_driver_target+0x144/0x29c
+# [ 540.125121] sugov_work+0x58/0x74
+# [ 540.125133] kthread_worker_fn+0xf4/0x324
+# [ 540.125148] kthread+0x12c/0x208
+# [ 540.125160] ret_from_fork+0x10/0x20
+# [ 540.125176] Kernel panic - not syncing: softlockup: hung tasks
+# [ 540.423567] CPU: 0 UID: 0 PID: 126 Comm: sugov:0 Tainted: G W L 6.14.0-rc4-gdddf15cff632 #1
+# [ 540.433411] Tainted: [W]=WARN, [L]=SOFTLOCKUP
+# [ 540.437901] Hardware name: Google Cheza (rev3+) (DT)
+# [ 540.443022] Call trace:
+# [ 540.445559] show_stack+0x18/0x24 (C)
+# [ 540.449357] dump_stack_lvl+0x38/0xd0
+# [ 540.453157] dump_stack+0x18/0x24
+# [ 540.456599] panic+0x3bc/0x41c
+# [ 540.459767] watchdog_timer_fn+0x254/0x2e4
+# [ 540.464005] __hrtimer_run_queues+0x3c4/0x440
+# [ 540.468508] hrtimer_interrupt+0xe4/0x244
+# [ 540.472662] arch_timer_handler_phys+0x2c/0x44
+# [ 540.477256] handle_percpu_devid_irq+0x90/0x1f0
+# [ 540.481943] handle_irq_desc+0x40/0x58
+# [ 540.485829] generic_handle_domain_irq+0x1c/0x28
+# [ 540.490604] gic_handle_irq+0x4c/0x11c
+# [ 540.494483] do_interrupt_handler+0x50/0x84
+# [ 540.498811] el1_interrupt+0x34/0x64
+# [ 540.502518] el1h_64_irq_handler+0x18/0x24
+# [ 540.506758] el1h_64_irq+0x6c/0x70
+# [ 540.510279] xhci_urb_enqueue+0xbc/0x32c (P)
+# [ 540.514693] usb_hcd_submit_urb+0x98/0x7fc
+# [ 540.518932] usb_submit_urb+0x294/0x560
+# [ 540.522901] intr_callback+0x78/0x1fc
+# [ 540.526700] __usb_hcd_giveback_urb+0x68/0x128
+# [ 540.531288] usb_giveback_urb_bh+0xa8/0x140
+# [ 540.535614] process_one_work+0x208/0x5e8
+# [ 540.539769] bh_worker+0x1a8/0x20c
+# [ 540.543293] workqueue_softirq_action+0x78/0x88
+# [ 540.547980] tasklet_hi_action+0x14/0x3c
+# [ 540.552038] handle_softirqs+0x118/0x4b8
+# [ 540.556096] __do_softirq+0x14/0x20
+# [ 540.559705] ____do_softirq+0x10/0x1c
+# [ 540.563500] call_on_irq_stack+0x24/0x4c
+# [ 540.567554] do_softirq_own_stack+0x1c/0x28
+# [ 540.571878] __irq_exit_rcu+0x174/0x1b4
+# [ 540.575849] irq_exit_rcu+0x10/0x38
+# [ 540.579462] el1_interrupt+0x38/0x64
+# [ 540.583158] el1h_64_irq_handler+0x18/0x24
+# [ 540.587397] el1h_64_irq+0x6c/0x70
+# [ 540.590918] lock_acquire+0x1e0/0x338 (P)
+# [ 540.595060] __mutex_lock+0xa8/0x4b8
+# [ 540.598760] mutex_lock_nested+0x24/0x30
+# [ 540.602818] _find_opp_table_unlocked+0x40/0xfc
+# [ 540.607503] _find_key+0x64/0x16c
+# [ 540.610940] dev_pm_opp_find_freq_exact+0x4c/0x74
+# [ 540.615798] qcom_cpufreq_hw_target_index+0xe8/0x128
+# [ 540.620924] __cpufreq_driver_target+0x144/0x29c
+# [ 540.625698] sugov_work+0x58/0x74
+# [ 540.629134] kthread_worker_fn+0xf4/0x324
+# [ 540.633278] kthread+0x12c/0x208
+# [ 540.636619] ret_from_fork+0x10/0x20
+# [ 540.640321] SMP: stopping secondary CPUs
+# [ 540.644518] Kernel Offset: 0x417126200000 from 0xffff800080000000
+# [ 540.650848] PHYS_OFFSET: 0xfff0dda400000000
+# [ 540.655170] CPU features: 0x000,00000100,00901250,8200721b
+# [ 540.660829] Memory Limit: none
+# [ 540.663999] ---[ end Kernel panic - not syncing: softlockup: hung tasks ]---
diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
index 329770c520d9..9450f2a002fd 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
index 3c7e494857b5..198deea3faa9 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
index 3c7e494857b5..198deea3faa9 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
@@ -10,6 +10,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
index feeed89b6c3f..af99ac54c3a5 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
index feeed89b6c3f..af99ac54c3a5 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
@@ -13,6 +13,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Panfrost is not a KMS driver, so skip the KMS tests
kms_.*
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
index ba9160d4d8eb..61122ea7f008 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
@@ -5,6 +5,5 @@ core_setmaster_vs_auth,Crash
dumb_buffer@create-clear,Crash
fbdev@pan,Crash
kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
-kms_flip@flip-vs-modeset-vs-hang,Crash
kms_prop_blob@invalid-set-prop,Crash
kms_prop_blob@invalid-set-prop-any,Crash
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
index eb16b29dee48..71418ea35a17 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
@@ -14,6 +14,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
index 2803d0d80192..45dd8d493f6e 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
@@ -2,7 +2,6 @@ dumb_buffer@create-clear,Crash
kms_atomic_transition@modeset-transition,Fail
kms_atomic_transition@modeset-transition-fencing,Fail
kms_atomic_transition@plane-toggle-modeset-transition,Fail
-kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
kms_color@gamma,Fail
kms_color@legacy-gamma,Fail
kms_cursor_crc@cursor-alpha-opaque,Fail
@@ -55,6 +54,7 @@ kms_flip@plain-flip-ts-check,Fail
kms_flip@plain-flip-ts-check-interruptible,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_invalid_mode@int-max-clock,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@lease-uevent,Fail
kms_lease@page-flip-implicit-plane,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
index 348b4ce7eb4b..b467991d4094 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
@@ -75,58 +75,72 @@ kms_bw@linear-tiling-2-displays-2160x1440p
# Linux Version: 6.11.0-rc5
kms_flip@flip-vs-expired-vblank
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/f944dd08-c88c-49ae-aff0-274374550a93@collabora.com/T/#u
# Failure Rate: 40
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@linear-tiling-1-displays-2160x1440p
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/afa2d3bf-29f2-488d-8cc9-f30d461444b0@collabora.com/T/#u
# Failure Rate: 80
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_plane_multiple@tiling-none
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/6fdaa97f-c1a5-4216-831f-dbb7c5f90498@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@linear-tiling-1-displays-1920x1080p
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/616aa015-9574-4527-9d07-d8d698bbcc3c@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_plane@plane-position-hole-dpms
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/7a1b888f-d7db-4ed7-96cd-3975ace837fb@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_flip@flip-vs-absolute-wf_vblank
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/f17fffb6-abc4-464e-8465-395311b01f6a@collabora.com/T/#u
# Failure Rate: 100
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_flip@flip-vs-blocking-wf-vblank
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/9b590b26-1bf9-4951-b6a3-ef6c67e6a1c6@collabora.com/T/#u
# Failure Rate: 60
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_bw@linear-tiling-2-displays-1920x1080p
-# Board Name: hp-11A-G6-EE-grunt
+# Board Name: rk3399-gru-kevin
# Bug Report: https://lore.kernel.org/dri-devel/059545fa-65b1-4f5c-a13e-4d2898679f51@collabora.com/T/#u
# Failure Rate: 20
# IGT Version: 1.29-g33adea9eb
# Linux Version: 6.13.0-rc2
kms_flip@modeset-vs-vblank-race-interruptible
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/eece9a80-42f3-41f4-86cc-69d8a51b976a@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_bw@connected-linear-tiling-1-displays-2160x1440p
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/63dfd5b7-8a54-44a3-9530-f8dcd77a21d1@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_bw@linear-tiling-1-displays-3840x2160p
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
index e8e994d92557..b83ec75161b2 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
@@ -14,6 +14,7 @@ nouveau_.*
gem_.*
i915_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
index c72fee70e739..9749ddb75121 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
@@ -157,6 +157,7 @@ kms_flip@plain-flip-ts-check-interruptible,Fail
kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_invalid_mode@int-max-clock,Fail
+kms_invalid_mode@overflow-vrefresh,Fail
kms_lease@cursor-implicit-plane,Fail
kms_lease@lease-uevent,Fail
kms_lease@page-flip-implicit-plane,Fail
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
index adbcdd0f28d2..28e37185bac0 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
@@ -19,6 +19,7 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
index 62428f3c8f31..e3ca6da8cde7 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
@@ -88,3 +88,31 @@ kms_flip@flip-vs-expired-vblank
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
kms_pipe_crc_basic@nonblocking-crc-frame-sequence
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/2364a6bf-e6bc-4741-8c78-cea8bdb06e03@collabora.com/T/#u
+# Failure Rate: 20
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_flip@modeset-vs-vblank-race
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/f7d72ed9-a783-46d7-b75d-54072bda32a3@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_pipe_crc_basic@suspend-read-crc
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/98d3ba54-bcb9-41ab-adb1-a18ba61ee2e4@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_plane@plane-panning-bottom-right-suspend
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/b58d15eb-094d-4ac2-aad3-83e518c2f55d@collabora.com/T/#u
+# Failure Rate: 100
+# IGT Version: 1.30-g04bedb923
+# Linux Version: 6.14.0-rc4
+kms_vblank@ts-continuation-dpms-suspend
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
index 319789806271..716d2d4e452d 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
@@ -1,5 +1,6 @@
# keeps printing vkms_vblank_simulate: vblank timer overrun and never ends
kms_invalid_mode@int-max-clock
+kms_invalid_mode@overflow-vrefresh
# kernel panic seen with kms_cursor_crc tests
kms_cursor_crc.*
@@ -802,6 +803,7 @@ gem_.*
i915_.*
xe_.*
tools_test.*
+kms_dp_link_training.*
# IGT issue. is_joiner_mode() should return false for non-Intel hardware.
# https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/issues/162
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 30c736fc0067..7d2e499ea5de 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -98,6 +98,21 @@ struct drm_bridge_connector {
* HDMI connector infrastructure, if any (see &DRM_BRIDGE_OP_HDMI).
*/
struct drm_bridge *bridge_hdmi;
+ /**
+ * @bridge_hdmi_audio:
+ *
+ * The bridge in the chain that implements necessary support for the
+ * HDMI Audio infrastructure, if any (see &DRM_BRIDGE_OP_HDMI_AUDIO).
+ */
+ struct drm_bridge *bridge_hdmi_audio;
+ /**
+ * @bridge_dp_audio:
+ *
+ * The bridge in the chain that implements necessary support for the
+ * DisplayPort Audio infrastructure, if any (see
+ * &DRM_BRIDGE_OP_DP_AUDIO).
+ */
+ struct drm_bridge *bridge_dp_audio;
};
#define to_drm_bridge_connector(x) \
@@ -433,14 +448,25 @@ static int drm_bridge_connector_audio_startup(struct drm_connector *connector)
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+
+ if (!bridge->funcs->hdmi_audio_startup)
+ return 0;
+
+ return bridge->funcs->hdmi_audio_startup(connector, bridge);
+ }
+
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
- if (!bridge->funcs->hdmi_audio_startup)
- return 0;
+ if (!bridge->funcs->dp_audio_startup)
+ return 0;
- return bridge->funcs->hdmi_audio_startup(connector, bridge);
+ return bridge->funcs->dp_audio_startup(connector, bridge);
+ }
+
+ return -EINVAL;
}
static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
@@ -451,11 +477,19 @@ static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+
+ return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms);
+ }
- return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms);
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+
+ return bridge->funcs->dp_audio_prepare(connector, bridge, fmt, hparms);
+ }
+
+ return -EINVAL;
}
static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
@@ -464,11 +498,15 @@ static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+ bridge->funcs->hdmi_audio_shutdown(connector, bridge);
+ }
- bridge->funcs->hdmi_audio_shutdown(connector, bridge);
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+ bridge->funcs->dp_audio_shutdown(connector, bridge);
+ }
}
static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connector,
@@ -478,15 +516,27 @@ static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connecto
to_drm_bridge_connector(connector);
struct drm_bridge *bridge;
- bridge = bridge_connector->bridge_hdmi;
- if (!bridge)
- return -EINVAL;
+ if (bridge_connector->bridge_hdmi_audio) {
+ bridge = bridge_connector->bridge_hdmi_audio;
+
+ if (!bridge->funcs->hdmi_audio_mute_stream)
+ return -ENOTSUPP;
- if (bridge->funcs->hdmi_audio_mute_stream)
return bridge->funcs->hdmi_audio_mute_stream(connector, bridge,
enable, direction);
- else
- return -ENOTSUPP;
+ }
+
+ if (bridge_connector->bridge_dp_audio) {
+ bridge = bridge_connector->bridge_dp_audio;
+
+ if (!bridge->funcs->dp_audio_mute_stream)
+ return -ENOTSUPP;
+
+ return bridge->funcs->dp_audio_mute_stream(connector, bridge,
+ enable, direction);
+ }
+
+ return -EINVAL;
}
static const struct drm_connector_hdmi_audio_funcs drm_bridge_connector_hdmi_audio_funcs = {
@@ -576,6 +626,42 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
max_bpc = bridge->max_bpc;
}
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI_AUDIO) {
+ if (bridge_connector->bridge_hdmi_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (bridge_connector->bridge_dp_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (!bridge->hdmi_audio_max_i2s_playback_channels &&
+ !bridge->hdmi_audio_spdif_playback)
+ return ERR_PTR(-EINVAL);
+
+ if (!bridge->funcs->hdmi_audio_prepare ||
+ !bridge->funcs->hdmi_audio_shutdown)
+ return ERR_PTR(-EINVAL);
+
+ bridge_connector->bridge_hdmi_audio = bridge;
+ }
+
+ if (bridge->ops & DRM_BRIDGE_OP_DP_AUDIO) {
+ if (bridge_connector->bridge_dp_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (bridge_connector->bridge_hdmi_audio)
+ return ERR_PTR(-EBUSY);
+
+ if (!bridge->hdmi_audio_max_i2s_playback_channels &&
+ !bridge->hdmi_audio_spdif_playback)
+ return ERR_PTR(-EINVAL);
+
+ if (!bridge->funcs->dp_audio_prepare ||
+ !bridge->funcs->dp_audio_shutdown)
+ return ERR_PTR(-EINVAL);
+
+ bridge_connector->bridge_dp_audio = bridge;
+ }
+
if (!drm_bridge_get_next_bridge(bridge))
connector_type = bridge->type;
@@ -611,22 +697,6 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
max_bpc);
if (ret)
return ERR_PTR(ret);
-
- if (bridge->hdmi_audio_max_i2s_playback_channels ||
- bridge->hdmi_audio_spdif_playback) {
- if (!bridge->funcs->hdmi_audio_prepare ||
- !bridge->funcs->hdmi_audio_shutdown)
- return ERR_PTR(-EINVAL);
-
- ret = drm_connector_hdmi_audio_init(connector,
- bridge->hdmi_audio_dev,
- &drm_bridge_connector_hdmi_audio_funcs,
- bridge->hdmi_audio_max_i2s_playback_channels,
- bridge->hdmi_audio_spdif_playback,
- bridge->hdmi_audio_dai_port);
- if (ret)
- return ERR_PTR(ret);
- }
} else {
ret = drmm_connector_init(drm, connector,
&drm_bridge_connector_funcs,
@@ -635,6 +705,24 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
return ERR_PTR(ret);
}
+ if (bridge_connector->bridge_hdmi_audio ||
+ bridge_connector->bridge_dp_audio) {
+ struct device *dev;
+
+ if (bridge_connector->bridge_hdmi_audio)
+ dev = bridge_connector->bridge_hdmi_audio->hdmi_audio_dev;
+ else
+ dev = bridge_connector->bridge_dp_audio->hdmi_audio_dev;
+
+ ret = drm_connector_hdmi_audio_init(connector, dev,
+ &drm_bridge_connector_hdmi_audio_funcs,
+ bridge->hdmi_audio_max_i2s_playback_channels,
+ bridge->hdmi_audio_spdif_playback,
+ bridge->hdmi_audio_dai_port);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
if (bridge_connector->bridge_hpd)
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index 56a4965e518c..ed31471bd0e2 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -96,7 +96,7 @@ static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable)
u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0;
ssize_t err = 0;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ err = drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_CONTROL, val);
return (enable && err < 0) ? err : 0;
}
@@ -112,7 +112,7 @@ static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
la_mask |= adap->log_addrs.log_addr_mask | (1 << addr);
mask[0] = la_mask & 0xff;
mask[1] = la_mask >> 8;
- err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
+ err = drm_dp_dpcd_write_data(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0;
}
@@ -123,15 +123,14 @@ static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
unsigned int retries = min(5, attempts - 1);
ssize_t err;
- err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER,
- msg->msg, msg->len);
+ err = drm_dp_dpcd_write_data(aux, DP_CEC_TX_MESSAGE_BUFFER,
+ msg->msg, msg->len);
if (err < 0)
return err;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO,
- (msg->len - 1) | (retries << 4) |
- DP_CEC_TX_MESSAGE_SEND);
- return err < 0 ? err : 0;
+ return drm_dp_dpcd_write_byte(aux, DP_CEC_TX_MESSAGE_INFO,
+ (msg->len - 1) | (retries << 4) |
+ DP_CEC_TX_MESSAGE_SEND);
}
static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
@@ -144,13 +143,13 @@ static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
if (!(adap->capabilities & CEC_CAP_MONITOR_ALL))
return 0;
- err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val);
- if (err >= 0) {
+ err = drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_CONTROL, &val);
+ if (!err) {
if (enable)
val |= DP_CEC_SNOOPING_ENABLE;
else
val &= ~DP_CEC_SNOOPING_ENABLE;
- err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+ err = drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_CONTROL, val);
}
return (enable && err < 0) ? err : 0;
}
@@ -194,7 +193,7 @@ static int drm_dp_cec_received(struct drm_dp_aux *aux)
u8 rx_msg_info;
ssize_t err;
- err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
+ err = drm_dp_dpcd_read_byte(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
if (err < 0)
return err;
@@ -202,7 +201,7 @@ static int drm_dp_cec_received(struct drm_dp_aux *aux)
return 0;
msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1;
- err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
+ err = drm_dp_dpcd_read_data(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
if (err < 0)
return err;
@@ -215,7 +214,7 @@ static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
struct cec_adapter *adap = aux->cec.adap;
u8 flags;
- if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
return;
if (flags & DP_CEC_RX_MESSAGE_INFO_VALID)
@@ -230,7 +229,7 @@ static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
(DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR))
cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK |
CEC_TX_STATUS_MAX_RETRIES);
- drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
+ drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
}
/**
@@ -253,13 +252,13 @@ void drm_dp_cec_irq(struct drm_dp_aux *aux)
if (!aux->cec.adap)
goto unlock;
- ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
- &cec_irq);
+ ret = drm_dp_dpcd_read_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
+ &cec_irq);
if (ret < 0 || !(cec_irq & DP_CEC_IRQ))
goto unlock;
drm_dp_cec_handle_irq(aux);
- drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
+ drm_dp_dpcd_write_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
unlock:
mutex_unlock(&aux->cec.lock);
}
@@ -269,7 +268,7 @@ static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap)
{
u8 cap = 0;
- if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 ||
+ if (drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) < 0 ||
!(cap & DP_CEC_TUNNELING_CAPABLE))
return false;
if (cec_cap)
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index dbce1c3f4969..f2a6559a2710 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -327,7 +327,7 @@ static int __read_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SI
if (offset < DP_RECEIVER_CAP_SIZE) {
rd_interval = dpcd[offset];
} else {
- if (drm_dp_dpcd_readb(aux, offset, &rd_interval) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, offset, &rd_interval) < 0) {
drm_dbg_kms(aux->drm_dev, "%s: failed rd interval read\n",
aux->name);
/* arbitrary default delay */
@@ -358,7 +358,7 @@ int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux)
int unit;
u8 val;
- if (drm_dp_dpcd_readb(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) < 0) {
drm_err(aux->drm_dev, "%s: failed rd interval read\n",
aux->name);
/* default to max */
@@ -704,6 +704,8 @@ EXPORT_SYMBOL(drm_dp_dpcd_set_powered);
* function returns -EPROTO. Errors from the underlying AUX channel transfer
* function, with the exception of -EBUSY (which causes the transaction to
* be retried), are propagated to the caller.
+ *
+ * In most of the cases you want to use drm_dp_dpcd_read_data() instead.
*/
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
@@ -752,6 +754,8 @@ EXPORT_SYMBOL(drm_dp_dpcd_read);
* function returns -EPROTO. Errors from the underlying AUX channel transfer
* function, with the exception of -EBUSY (which causes the transaction to
* be retried), are propagated to the caller.
+ *
+ * In most of the cases you want to use drm_dp_dpcd_write_data() instead.
*/
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
@@ -774,14 +778,13 @@ EXPORT_SYMBOL(drm_dp_dpcd_write);
* @aux: DisplayPort AUX channel
* @status: buffer to store the link status in (must be at least 6 bytes)
*
- * Returns the number of bytes transferred on success or a negative error
- * code on failure.
+ * Returns a negative error code on failure or 0 on success.
*/
int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
u8 status[DP_LINK_STATUS_SIZE])
{
- return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status,
- DP_LINK_STATUS_SIZE);
+ return drm_dp_dpcd_read_data(aux, DP_LANE0_1_STATUS, status,
+ DP_LINK_STATUS_SIZE);
}
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
@@ -804,30 +807,20 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
{
int ret;
- if (dp_phy == DP_PHY_DPRX) {
- ret = drm_dp_dpcd_read(aux,
- DP_LANE0_1_STATUS,
- link_status,
- DP_LINK_STATUS_SIZE);
-
- if (ret < 0)
- return ret;
-
- WARN_ON(ret != DP_LINK_STATUS_SIZE);
-
- return 0;
- }
+ if (dp_phy == DP_PHY_DPRX)
+ return drm_dp_dpcd_read_data(aux,
+ DP_LANE0_1_STATUS,
+ link_status,
+ DP_LINK_STATUS_SIZE);
- ret = drm_dp_dpcd_read(aux,
- DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy),
- link_status,
- DP_LINK_STATUS_SIZE - 1);
+ ret = drm_dp_dpcd_read_data(aux,
+ DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy),
+ link_status,
+ DP_LINK_STATUS_SIZE - 1);
if (ret < 0)
return ret;
- WARN_ON(ret != DP_LINK_STATUS_SIZE - 1);
-
/* Convert the LTTPR to the sink PHY link status layout */
memmove(&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS + 1],
&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS],
@@ -838,12 +831,81 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_dpcd_read_phy_link_status);
+/**
+ * drm_dp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @revision: DPCD revision supported on the link
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_up(struct drm_dp_aux *aux, unsigned char revision)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (revision < DP_DPCD_REV_11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must exit the
+ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+ * Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_up);
+
+/**
+ * drm_dp_link_power_down() - power down a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @revision: DPCD revision supported on the link
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_down(struct drm_dp_aux *aux, unsigned char revision)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (revision < DP_DPCD_REV_11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_down);
+
static int read_payload_update_status(struct drm_dp_aux *aux)
{
int ret;
u8 status;
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
if (ret < 0)
return ret;
@@ -870,21 +932,21 @@ int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux,
int ret;
int retries = 0;
- drm_dp_dpcd_writeb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
- DP_PAYLOAD_TABLE_UPDATED);
+ drm_dp_dpcd_write_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ DP_PAYLOAD_TABLE_UPDATED);
payload_alloc[0] = vcpid;
payload_alloc[1] = start_time_slot;
payload_alloc[2] = time_slot_count;
- ret = drm_dp_dpcd_write(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
- if (ret != 3) {
+ ret = drm_dp_dpcd_write_data(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "failed to write payload allocation %d\n", ret);
goto fail;
}
retry:
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "failed to read payload table status %d\n", ret);
goto fail;
@@ -1040,15 +1102,15 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
{
u8 link_edid_read = 0, auto_test_req = 0, test_resp = 0;
- if (drm_dp_dpcd_read(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
- &auto_test_req, 1) < 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &auto_test_req) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n",
aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
return false;
}
auto_test_req &= DP_AUTOMATED_TEST_REQUEST;
- if (drm_dp_dpcd_read(aux, DP_TEST_REQUEST, &link_edid_read, 1) < 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_TEST_REQUEST, &link_edid_read) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n",
aux->name, DP_TEST_REQUEST);
return false;
@@ -1061,23 +1123,23 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
return false;
}
- if (drm_dp_dpcd_write(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
- &auto_test_req, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ auto_test_req) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
return false;
}
/* send back checksum for the last edid extension block data */
- if (drm_dp_dpcd_write(aux, DP_TEST_EDID_CHECKSUM,
- &real_edid_checksum, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_TEST_EDID_CHECKSUM,
+ real_edid_checksum) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_TEST_EDID_CHECKSUM);
return false;
}
test_resp |= DP_TEST_EDID_CHECKSUM_WRITE;
- if (drm_dp_dpcd_write(aux, DP_TEST_RESPONSE, &test_resp, 1) < 1) {
+ if (drm_dp_dpcd_write_byte(aux, DP_TEST_RESPONSE, test_resp) < 0) {
drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
aux->name, DP_TEST_RESPONSE);
return false;
@@ -1114,12 +1176,10 @@ static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux,
DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
return 0;
- ret = drm_dp_dpcd_read(aux, DP_DP13_DPCD_REV, &dpcd_ext,
- sizeof(dpcd_ext));
+ ret = drm_dp_dpcd_read_data(aux, DP_DP13_DPCD_REV, &dpcd_ext,
+ sizeof(dpcd_ext));
if (ret < 0)
return ret;
- if (ret != sizeof(dpcd_ext))
- return -EIO;
if (dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
drm_dbg_kms(aux->drm_dev,
@@ -1156,10 +1216,10 @@ int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
{
int ret;
- ret = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE);
+ ret = drm_dp_dpcd_read_data(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE);
if (ret < 0)
return ret;
- if (ret != DP_RECEIVER_CAP_SIZE || dpcd[DP_DPCD_REV] == 0)
+ if (dpcd[DP_DPCD_REV] == 0)
return -EIO;
ret = drm_dp_read_extended_dpcd_caps(aux, dpcd);
@@ -1209,11 +1269,9 @@ int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE)
len *= 4;
- ret = drm_dp_dpcd_read(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len);
+ ret = drm_dp_dpcd_read_data(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len);
if (ret < 0)
return ret;
- if (ret != len)
- return -EIO;
drm_dbg_kms(aux->drm_dev, "%s: DPCD DFP: %*ph\n", aux->name, len, downstream_ports);
@@ -1570,7 +1628,7 @@ EXPORT_SYMBOL(drm_dp_downstream_mode);
*/
int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6])
{
- return drm_dp_dpcd_read(aux, DP_BRANCH_ID, id, 6);
+ return drm_dp_dpcd_read_data(aux, DP_BRANCH_ID, id, 6);
}
EXPORT_SYMBOL(drm_dp_downstream_id);
@@ -1635,13 +1693,13 @@ void drm_dp_downstream_debug(struct seq_file *m,
drm_dp_downstream_id(aux, id);
seq_printf(m, "\t\tID: %s\n", id);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_HW_REV, &rev[0], 1);
- if (len > 0)
+ len = drm_dp_dpcd_read_data(aux, DP_BRANCH_HW_REV, &rev[0], 1);
+ if (!len)
seq_printf(m, "\t\tHW: %d.%d\n",
(rev[0] & 0xf0) >> 4, rev[0] & 0xf);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2);
- if (len > 0)
+ len = drm_dp_dpcd_read_data(aux, DP_BRANCH_SW_REV, rev, 2);
+ if (!len)
seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
if (detailed_cap_info) {
@@ -1779,11 +1837,9 @@ int drm_dp_read_sink_count(struct drm_dp_aux *aux)
u8 count;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_SINK_COUNT, &count);
+ ret = drm_dp_dpcd_read_byte(aux, DP_SINK_COUNT, &count);
if (ret < 0)
return ret;
- if (ret != 1)
- return -EIO;
return DP_GET_SINK_COUNT(count);
}
@@ -2081,14 +2137,17 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
- drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
- /* Send a bare address packet to start the transaction.
- * Zero sized messages specify an address only (bare
- * address) transaction.
- */
- msg.buffer = NULL;
- msg.size = 0;
- err = drm_dp_i2c_do_msg(aux, &msg);
+
+ if (!aux->no_zero_sized) {
+ drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
+ /* Send a bare address packet to start the transaction.
+ * Zero sized messages specify an address only (bare
+ * address) transaction.
+ */
+ msg.buffer = NULL;
+ msg.size = 0;
+ err = drm_dp_i2c_do_msg(aux, &msg);
+ }
/*
* Reset msg.request in case in case it got
@@ -2107,6 +2166,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
msg.buffer = msgs[i].buf + j;
msg.size = min(transfer_size, msgs[i].len - j);
+ if (j + msg.size == msgs[i].len && aux->no_zero_sized)
+ msg.request &= ~DP_AUX_I2C_MOT;
err = drm_dp_i2c_drain_msg(aux, &msg);
/*
@@ -2124,15 +2185,17 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
}
if (err >= 0)
err = num;
- /* Send a bare address packet to close out the transaction.
- * Zero sized messages specify an address only (bare
- * address) transaction.
- */
- msg.request &= ~DP_AUX_I2C_MOT;
- msg.buffer = NULL;
- msg.size = 0;
- (void)drm_dp_i2c_do_msg(aux, &msg);
+ if (!aux->no_zero_sized) {
+ /* Send a bare address packet to close out the transaction.
+ * Zero sized messages specify an address only (bare
+ * address) transaction.
+ */
+ msg.request &= ~DP_AUX_I2C_MOT;
+ msg.buffer = NULL;
+ msg.size = 0;
+ (void)drm_dp_i2c_do_msg(aux, &msg);
+ }
return err;
}
@@ -2172,13 +2235,13 @@ static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc)
u8 buf, count;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
WARN_ON(!(buf & DP_TEST_SINK_START));
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK_MISC, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK_MISC, &buf);
if (ret < 0)
return ret;
@@ -2192,11 +2255,7 @@ static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc)
* At DP_TEST_CRC_R_CR, there's 6 bytes containing CRC data, 2 bytes
* per component (RGB or CrYCb).
*/
- ret = drm_dp_dpcd_read(aux, DP_TEST_CRC_R_CR, crc, 6);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_read_data(aux, DP_TEST_CRC_R_CR, crc, 6);
}
static void drm_dp_aux_crc_work(struct work_struct *work)
@@ -2395,11 +2454,11 @@ int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START);
+ ret = drm_dp_dpcd_write_byte(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START);
if (ret < 0)
return ret;
@@ -2422,11 +2481,11 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START);
+ ret = drm_dp_dpcd_write_byte(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START);
if (ret < 0)
return ret;
@@ -2512,11 +2571,7 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
static int drm_dp_read_ident(struct drm_dp_aux *aux, unsigned int offset,
struct drm_dp_dpcd_ident *ident)
{
- int ret;
-
- ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
-
- return ret < 0 ? ret : 0;
+ return drm_dp_dpcd_read_data(aux, offset, ident, sizeof(*ident));
}
static void drm_dp_dump_desc(struct drm_dp_aux *aux,
@@ -2774,13 +2829,11 @@ static int drm_dp_read_lttpr_regs(struct drm_dp_aux *aux,
int ret;
for (offset = 0; offset < buf_size; offset += block_size) {
- ret = drm_dp_dpcd_read(aux,
- address + offset,
- &buf[offset], block_size);
+ ret = drm_dp_dpcd_read_data(aux,
+ address + offset,
+ &buf[offset], block_size);
if (ret < 0)
return ret;
-
- WARN_ON(ret != block_size);
}
return 0;
@@ -2995,12 +3048,12 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
int err;
u8 rate, lanes;
- err = drm_dp_dpcd_readb(aux, DP_TEST_LINK_RATE, &rate);
+ err = drm_dp_dpcd_read_byte(aux, DP_TEST_LINK_RATE, &rate);
if (err < 0)
return err;
data->link_rate = drm_dp_bw_code_to_link_rate(rate);
- err = drm_dp_dpcd_readb(aux, DP_TEST_LANE_COUNT, &lanes);
+ err = drm_dp_dpcd_read_byte(aux, DP_TEST_LANE_COUNT, &lanes);
if (err < 0)
return err;
data->num_lanes = lanes & DP_MAX_LANE_COUNT_MASK;
@@ -3008,22 +3061,22 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
if (lanes & DP_ENHANCED_FRAME_CAP)
data->enhanced_frame_cap = true;
- err = drm_dp_dpcd_readb(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
+ err = drm_dp_dpcd_read_byte(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
if (err < 0)
return err;
switch (data->phy_pattern) {
case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
- err = drm_dp_dpcd_read(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
- &data->custom80, sizeof(data->custom80));
+ err = drm_dp_dpcd_read_data(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
+ &data->custom80, sizeof(data->custom80));
if (err < 0)
return err;
break;
case DP_PHY_TEST_PATTERN_CP2520:
- err = drm_dp_dpcd_read(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
- &data->hbr2_reset,
- sizeof(data->hbr2_reset));
+ err = drm_dp_dpcd_read_data(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
+ &data->hbr2_reset,
+ sizeof(data->hbr2_reset));
if (err < 0)
return err;
}
@@ -3050,15 +3103,15 @@ int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
if (dp_rev < 0x12) {
test_pattern = (test_pattern << 2) &
DP_LINK_QUAL_PATTERN_11_MASK;
- err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET,
- test_pattern);
+ err = drm_dp_dpcd_write_byte(aux, DP_TRAINING_PATTERN_SET,
+ test_pattern);
if (err < 0)
return err;
} else {
for (i = 0; i < data->num_lanes; i++) {
- err = drm_dp_dpcd_writeb(aux,
- DP_LINK_QUAL_LANE0_SET + i,
- test_pattern);
+ err = drm_dp_dpcd_write_byte(aux,
+ DP_LINK_QUAL_LANE0_SET + i,
+ test_pattern);
if (err < 0)
return err;
}
@@ -3265,8 +3318,8 @@ bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_C
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
return false;
- if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
- &rx_feature) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
+ &rx_feature) < 0) {
drm_dbg_dp(aux->drm_dev,
"Failed to read DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1\n");
return false;
@@ -3290,7 +3343,7 @@ bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13)
return false;
- if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) != 1) {
+ if (drm_dp_dpcd_read_byte(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) < 0) {
drm_dbg_dp(aux->drm_dev, "failed to read DP_DPRX_FEATURE_ENUMERATION_LIST\n");
return false;
}
@@ -3421,16 +3474,13 @@ EXPORT_SYMBOL(drm_dp_get_pcon_max_frl_bw);
*/
int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd)
{
- int ret;
u8 buf = DP_PCON_ENABLE_SOURCE_CTL_MODE |
DP_PCON_ENABLE_LINK_FRL_MODE;
if (enable_frl_ready_hpd)
buf |= DP_PCON_ENABLE_HPD_READY;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
-
- return ret;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_prepare);
@@ -3445,7 +3495,7 @@ bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux)
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
if (ret < 0)
return false;
@@ -3474,7 +3524,7 @@ int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
if (ret < 0)
return ret;
@@ -3509,11 +3559,7 @@ int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
return -EINVAL;
}
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_configure_1);
@@ -3539,7 +3585,7 @@ int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
else
buf &= ~DP_PCON_FRL_LINK_TRAIN_EXTENDED;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf);
if (ret < 0)
return ret;
@@ -3555,13 +3601,7 @@ EXPORT_SYMBOL(drm_dp_pcon_frl_configure_2);
*/
int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux)
{
- int ret;
-
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0);
}
EXPORT_SYMBOL(drm_dp_pcon_reset_frl_config);
@@ -3576,7 +3616,7 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
int ret;
u8 buf = 0;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf);
if (ret < 0)
return ret;
if (!(buf & DP_PCON_ENABLE_SOURCE_CTL_MODE)) {
@@ -3585,11 +3625,7 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
return -EINVAL;
}
buf |= DP_PCON_ENABLE_HDMI_LINK;
- ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_frl_enable);
@@ -3604,7 +3640,7 @@ bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf);
if (ret < 0)
return false;
@@ -3629,7 +3665,7 @@ int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask)
int mode;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf);
if (ret < 0)
return ret;
@@ -3658,7 +3694,7 @@ void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
for (i = 0; i < hdmi->max_lanes; i++) {
- if (drm_dp_dpcd_readb(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0)
return;
error_count = buf & DP_PCON_HDMI_ERROR_COUNT_MASK;
@@ -3793,7 +3829,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
u8 buf;
int ret;
- ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
if (ret < 0)
return ret;
@@ -3804,11 +3840,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
buf |= pps_buf_config << 2;
}
- ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
}
/**
@@ -3820,13 +3852,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config)
*/
int drm_dp_pcon_pps_default(struct drm_dp_aux *aux)
{
- int ret;
-
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_default);
@@ -3842,15 +3868,11 @@ int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128])
{
int ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128);
if (ret < 0)
return ret;
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_override_buf);
@@ -3867,21 +3889,17 @@ int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6])
{
int ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2);
if (ret < 0)
return ret;
- ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
+ ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2);
if (ret < 0)
return ret;
- ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER);
}
EXPORT_SYMBOL(drm_dp_pcon_pps_override_param);
@@ -3897,7 +3915,7 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
int ret;
u8 buf;
- ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
+ ret = drm_dp_dpcd_read_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf);
if (ret < 0)
return ret;
@@ -3906,11 +3924,7 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc)
else
buf &= ~DP_CONVERSION_RGB_YCBCR_MASK;
- ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return drm_dp_dpcd_write_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf);
}
EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr);
@@ -3942,12 +3956,12 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
buf[0] = level;
}
- ret = drm_dp_dpcd_write(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf));
- if (ret != sizeof(buf)) {
+ ret = drm_dp_dpcd_write_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf));
+ if (ret < 0) {
drm_err(aux->drm_dev,
"%s: Failed to write aux backlight level: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
return 0;
@@ -3965,22 +3979,22 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
if (!bl->aux_enable)
return 0;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf);
+ if (ret < 0) {
drm_err(aux->drm_dev, "%s: Failed to read eDP display control register: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
if (enable)
buf |= DP_EDP_BACKLIGHT_ENABLE;
else
buf &= ~DP_EDP_BACKLIGHT_ENABLE;
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf);
+ if (ret < 0) {
drm_err(aux->drm_dev, "%s: Failed to write eDP display control register: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
return 0;
@@ -4016,15 +4030,16 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_PWM;
if (bl->pwmgen_bit_count) {
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
- if (ret != 1)
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count);
+ if (ret < 0)
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
aux->name, ret);
}
if (bl->pwm_freq_pre_divider) {
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_FREQ_SET, bl->pwm_freq_pre_divider);
- if (ret != 1)
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_BACKLIGHT_FREQ_SET,
+ bl->pwm_freq_pre_divider);
+ if (ret < 0)
drm_dbg_kms(aux->drm_dev,
"%s: Failed to write aux backlight frequency: %d\n",
aux->name, ret);
@@ -4032,8 +4047,8 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli
dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
}
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight mode: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
@@ -4088,8 +4103,8 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
if (!bl->aux_set)
return 0;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n",
aux->name, ret);
return -ENODEV;
@@ -4122,14 +4137,14 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
* - FxP is within 25% of desired value.
* Note: 25% is arbitrary value and may need some tweak.
*/
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n",
aux->name, ret);
return 0;
}
- ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n",
aux->name, ret);
return 0;
@@ -4154,8 +4169,8 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
break;
}
- ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, pn);
- if (ret != 1) {
+ ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, pn);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
aux->name, ret);
return 0;
@@ -4180,8 +4195,8 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
u8 buf[2];
u8 mode_reg;
- ret = drm_dp_dpcd_readb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight mode: %d\n",
aux->name, ret);
return ret < 0 ? ret : -EIO;
@@ -4194,11 +4209,11 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i
if (*current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
int size = 1 + bl->lsb_reg_used;
- ret = drm_dp_dpcd_read(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size);
- if (ret != size) {
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size);
+ if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight level: %d\n",
aux->name, ret);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
if (bl->lsb_reg_used)
@@ -4343,8 +4358,8 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
if (!panel || !panel->dev || !aux)
return -EINVAL;
- ret = drm_dp_dpcd_read(aux, DP_EDP_DPCD_REV, edp_dpcd,
- EDP_DISPLAY_CTL_CAP_SIZE);
+ ret = drm_dp_dpcd_read_data(aux, DP_EDP_DPCD_REV, edp_dpcd,
+ EDP_DISPLAY_CTL_CAP_SIZE);
if (ret < 0)
return ret;
@@ -4385,8 +4400,9 @@ EXPORT_SYMBOL(drm_panel_dp_aux_backlight);
#endif
/* See DP Standard v2.1 2.6.4.4.1.1, 2.8.4.4, 2.8.7 */
-static int drm_dp_link_symbol_cycles(int lane_count, int pixels, int bpp_x16,
- int symbol_size, bool is_mst)
+static int drm_dp_link_data_symbol_cycles(int lane_count, int pixels,
+ int bpp_x16, int symbol_size,
+ bool is_mst)
{
int cycles = DIV_ROUND_UP(pixels * bpp_x16, 16 * symbol_size * lane_count);
int align = is_mst ? 4 / lane_count : 1;
@@ -4394,22 +4410,42 @@ static int drm_dp_link_symbol_cycles(int lane_count, int pixels, int bpp_x16,
return ALIGN(cycles, align);
}
-static int drm_dp_link_dsc_symbol_cycles(int lane_count, int pixels, int slice_count,
- int bpp_x16, int symbol_size, bool is_mst)
+/**
+ * drm_dp_link_symbol_cycles - calculate the link symbol count with/without dsc
+ * @lane_count: DP link lane count
+ * @pixels: number of pixels in a scanline
+ * @dsc_slice_count: number of slices for DSC or '0' for non-DSC
+ * @bpp_x16: bits per pixel in .4 binary fixed format
+ * @symbol_size: DP symbol size
+ * @is_mst: %true for MST and %false for SST
+ *
+ * Calculate the link symbol cycles for both DSC (@dsc_slice_count !=0) and
+ * non-DSC case (@dsc_slice_count == 0) and return the count.
+ */
+int drm_dp_link_symbol_cycles(int lane_count, int pixels, int dsc_slice_count,
+ int bpp_x16, int symbol_size, bool is_mst)
{
+ int slice_count = dsc_slice_count ? : 1;
int slice_pixels = DIV_ROUND_UP(pixels, slice_count);
- int slice_data_cycles = drm_dp_link_symbol_cycles(lane_count, slice_pixels,
- bpp_x16, symbol_size, is_mst);
- int slice_eoc_cycles = is_mst ? 4 / lane_count : 1;
+ int slice_data_cycles = drm_dp_link_data_symbol_cycles(lane_count,
+ slice_pixels,
+ bpp_x16,
+ symbol_size,
+ is_mst);
+ int slice_eoc_cycles = 0;
+
+ if (dsc_slice_count)
+ slice_eoc_cycles = is_mst ? 4 / lane_count : 1;
return slice_count * (slice_data_cycles + slice_eoc_cycles);
}
+EXPORT_SYMBOL(drm_dp_link_symbol_cycles);
/**
* drm_dp_bw_overhead - Calculate the BW overhead of a DP link stream
* @lane_count: DP link lane count
* @hactive: pixel count of the active period in one scanline of the stream
- * @dsc_slice_count: DSC slice count if @flags/DRM_DP_LINK_BW_OVERHEAD_DSC is set
+ * @dsc_slice_count: number of slices for DSC or '0' for non-DSC
* @bpp_x16: bits per pixel in .4 binary fixed point
* @flags: DRM_DP_OVERHEAD_x flags
*
@@ -4423,7 +4459,7 @@ static int drm_dp_link_dsc_symbol_cycles(int lane_count, int pixels, int slice_c
* as well as the stream's
* - @hactive timing
* - @bpp_x16 color depth
- * - compression mode (@flags / %DRM_DP_OVERHEAD_DSC).
+ * - compression mode (@dsc_slice_count != 0)
* Note that this overhead doesn't account for the 8b/10b, 128b/132b
* channel coding efficiency, for that see
* @drm_dp_link_bw_channel_coding_efficiency().
@@ -4478,15 +4514,10 @@ int drm_dp_bw_overhead(int lane_count, int hactive,
WARN_ON((flags & DRM_DP_BW_OVERHEAD_UHBR) &&
(flags & DRM_DP_BW_OVERHEAD_FEC));
- if (flags & DRM_DP_BW_OVERHEAD_DSC)
- symbol_cycles = drm_dp_link_dsc_symbol_cycles(lane_count, hactive,
- dsc_slice_count,
- bpp_x16, symbol_size,
- is_mst);
- else
- symbol_cycles = drm_dp_link_symbol_cycles(lane_count, hactive,
- bpp_x16, symbol_size,
- is_mst);
+ symbol_cycles = drm_dp_link_symbol_cycles(lane_count, hactive,
+ dsc_slice_count,
+ bpp_x16, symbol_size,
+ is_mst);
return DIV_ROUND_UP_ULL(mul_u32_u32(symbol_cycles * symbol_size * lane_count,
overhead * 16),
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 3a1f1ffc7b55..a89f38fd3218 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -2192,24 +2192,20 @@ static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid)
guid_copy(&mstb->guid, guid);
if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) {
+ struct drm_dp_aux *aux;
u8 buf[UUID_SIZE];
export_guid(buf, &mstb->guid);
- if (mstb->port_parent) {
- ret = drm_dp_send_dpcd_write(mstb->mgr,
- mstb->port_parent,
- DP_GUID, sizeof(buf), buf);
- } else {
- ret = drm_dp_dpcd_write(mstb->mgr->aux,
- DP_GUID, buf, sizeof(buf));
- }
- }
+ if (mstb->port_parent)
+ aux = &mstb->port_parent->aux;
+ else
+ aux = mstb->mgr->aux;
- if (ret < 16 && ret > 0)
- return -EPROTO;
+ ret = drm_dp_dpcd_write_data(aux, DP_GUID, buf, sizeof(buf));
+ }
- return ret == 16 ? 0 : ret;
+ return ret;
}
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
@@ -2744,14 +2740,13 @@ retry:
do {
tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
- ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
- &msg[offset],
- tosend);
- if (ret != tosend) {
- if (ret == -EIO && retries < 5) {
- retries++;
- goto retry;
- }
+ ret = drm_dp_dpcd_write_data(mgr->aux, regbase + offset,
+ &msg[offset],
+ tosend);
+ if (ret == -EIO && retries < 5) {
+ retries++;
+ goto retry;
+ } else if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
return -EIO;
@@ -3624,7 +3619,7 @@ enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux,
if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
return DRM_DP_SST;
- if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
+ if (drm_dp_dpcd_read_byte(aux, DP_MSTM_CAP, &mstm_cap) < 0)
return DRM_DP_SST;
if (mstm_cap & DP_MST_CAP)
@@ -3679,10 +3674,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_primary = mstb;
drm_dp_mst_topology_get_mstb(mgr->mst_primary);
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN |
- DP_UP_REQ_EN |
- DP_UPSTREAM_IS_SRC);
+ ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
if (ret < 0)
goto out_unlock;
@@ -3697,7 +3692,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mstb = mgr->mst_primary;
mgr->mst_primary = NULL;
/* this can fail if the device is gone */
- drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
mgr->payload_id_table_cleared = false;
@@ -3763,8 +3758,8 @@ EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe);
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
{
mutex_lock(&mgr->lock);
- drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UPSTREAM_IS_SRC);
+ drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
flush_work(&mgr->up_req_work);
flush_work(&mgr->work);
@@ -3813,18 +3808,18 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
goto out_fail;
}
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN |
- DP_UP_REQ_EN |
- DP_UPSTREAM_IS_SRC);
+ ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
goto out_fail;
}
/* Some hubs forget their guids after they resume */
- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf));
- if (ret != sizeof(buf)) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_GUID, buf, sizeof(buf));
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
@@ -3883,8 +3878,8 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
*mstb = NULL;
len = min(mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
- if (ret != len) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, basereg, replyblock, len);
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
return false;
}
@@ -3922,9 +3917,9 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
curreply = len;
while (replylen > 0) {
len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
- replyblock, len);
- if (ret != len) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, basereg + curreply,
+ replyblock, len);
+ if (ret < 0) {
drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
len, ret);
return false;
@@ -4881,9 +4876,9 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
int i;
for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
- if (drm_dp_dpcd_read(mgr->aux,
- DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
- &buf[i], 16) != 16)
+ if (drm_dp_dpcd_read_data(mgr->aux,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
+ &buf[i], 16) < 0)
return false;
}
return true;
@@ -4972,23 +4967,24 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
}
seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
- ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
- if (ret != 2) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_FAUX_CAP, buf, 2);
+ if (ret < 0) {
seq_printf(m, "faux/mst read failed\n");
goto out;
}
seq_printf(m, "faux/mst: %*ph\n", 2, buf);
- ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
- if (ret != 1) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_MSTM_CTRL, buf, 1);
+ if (ret < 0) {
seq_printf(m, "mst ctrl read failed\n");
goto out;
}
seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
/* dump the standard OUI branch header */
- ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
- if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
+ ret = drm_dp_dpcd_read_data(mgr->aux, DP_BRANCH_OUI, buf,
+ DP_BRANCH_OUI_HEADER_SIZE);
+ if (ret < 0) {
seq_printf(m, "branch oui read failed\n");
goto out;
}
@@ -6112,14 +6108,14 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
/* DP-to-DP peer device */
if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
- if (drm_dp_dpcd_read(&port->aux,
- DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&port->aux,
- DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
- DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&immediate_upstream_port->aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
return NULL;
/* Enpoint decompression with DP-to-DP peer device */
@@ -6157,8 +6153,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
- if (drm_dp_dpcd_read(immediate_upstream_aux,
- DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(immediate_upstream_aux,
+ DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
return NULL;
if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
@@ -6180,11 +6176,11 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
* therefore the endpoint needs to be
* both DSC and FEC capable.
*/
- if (drm_dp_dpcd_read(&port->aux,
- DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
return NULL;
- if (drm_dp_dpcd_read(&port->aux,
- DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
+ if (drm_dp_dpcd_read_data(&port->aux,
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
return NULL;
if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
(endpoint_fec & DP_FEC_CAPABLE))
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
index 90fe07a89260..076edf161048 100644
--- a/drivers/gpu/drm/display/drm_dp_tunnel.c
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -222,7 +222,7 @@ static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *r
while ((len = next_reg_area(&offset))) {
int address = DP_TUNNELING_BASE + offset;
- if (drm_dp_dpcd_read(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
+ if (drm_dp_dpcd_read_data(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
return -EIO;
offset += len;
@@ -913,7 +913,7 @@ static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ;
u8 val;
- if (drm_dp_dpcd_readb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
goto out_err;
if (enable)
@@ -921,7 +921,7 @@ static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
else
val &= ~mask;
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
goto out_err;
tunnel->bw_alloc_enabled = enable;
@@ -1039,7 +1039,7 @@ static int clear_bw_req_state(struct drm_dp_aux *aux)
{
u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;
- if (drm_dp_dpcd_writeb(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
+ if (drm_dp_dpcd_write_byte(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
return -EIO;
return 0;
@@ -1052,7 +1052,7 @@ static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed)
u8 val;
int err;
- if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0)
return -EIO;
*status_changed = val & status_change_mask;
@@ -1095,7 +1095,7 @@ static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw)
if (err)
goto out;
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
err = -EIO;
goto out;
}
@@ -1196,13 +1196,13 @@ static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
u8 val;
- if (drm_dp_dpcd_readb(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
goto out_err;
val &= mask;
if (val) {
- if (drm_dp_dpcd_writeb(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
+ if (drm_dp_dpcd_write_byte(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
goto out_err;
return 1;
@@ -1215,7 +1215,7 @@ static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
* Check for estimated BW changes explicitly to account for lost
* BW change notifications.
*/
- if (drm_dp_dpcd_readb(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
+ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
goto out_err;
if (val * tunnel->bw_granularity != tunnel->estimated_bw)
@@ -1300,7 +1300,7 @@ int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *a
{
u8 val;
- if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+ if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0)
return -EIO;
if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED))
diff --git a/drivers/gpu/drm/display/drm_hdmi_audio_helper.c b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
index 05afc9f0bdd6..ae8a0cf595fc 100644
--- a/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
@@ -103,7 +103,8 @@ static int drm_connector_hdmi_audio_hook_plugged_cb(struct device *dev,
connector->hdmi_audio.plugged_cb = fn;
connector->hdmi_audio.plugged_cb_dev = codec_dev;
- fn(codec_dev, connector->hdmi_audio.last_state);
+ if (fn)
+ fn(codec_dev, connector->hdmi_audio.last_state);
mutex_unlock(&connector->hdmi_audio.lock);
diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c
index 74dd4d01dd9b..855cb02b827d 100644
--- a/drivers/gpu/drm/display/drm_hdmi_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_helper.c
@@ -256,3 +256,171 @@ drm_hdmi_compute_mode_clock(const struct drm_display_mode *mode,
return DIV_ROUND_CLOSEST_ULL(clock * bpc, 8);
}
EXPORT_SYMBOL(drm_hdmi_compute_mode_clock);
+
+struct drm_hdmi_acr_n_cts_entry {
+ unsigned int n;
+ unsigned int cts;
+};
+
+struct drm_hdmi_acr_data {
+ unsigned long tmds_clock_khz;
+ struct drm_hdmi_acr_n_cts_entry n_cts_32k,
+ n_cts_44k1,
+ n_cts_48k;
+};
+
+static const struct drm_hdmi_acr_data hdmi_acr_n_cts[] = {
+ {
+ /* "Other" entry */
+ .n_cts_32k = { .n = 4096, },
+ .n_cts_44k1 = { .n = 6272, },
+ .n_cts_48k = { .n = 6144, },
+ }, {
+ .tmds_clock_khz = 25175,
+ .n_cts_32k = { .n = 4576, .cts = 28125, },
+ .n_cts_44k1 = { .n = 7007, .cts = 31250, },
+ .n_cts_48k = { .n = 6864, .cts = 28125, },
+ }, {
+ .tmds_clock_khz = 25200,
+ .n_cts_32k = { .n = 4096, .cts = 25200, },
+ .n_cts_44k1 = { .n = 6272, .cts = 28000, },
+ .n_cts_48k = { .n = 6144, .cts = 25200, },
+ }, {
+ .tmds_clock_khz = 27000,
+ .n_cts_32k = { .n = 4096, .cts = 27000, },
+ .n_cts_44k1 = { .n = 6272, .cts = 30000, },
+ .n_cts_48k = { .n = 6144, .cts = 27000, },
+ }, {
+ .tmds_clock_khz = 27027,
+ .n_cts_32k = { .n = 4096, .cts = 27027, },
+ .n_cts_44k1 = { .n = 6272, .cts = 30030, },
+ .n_cts_48k = { .n = 6144, .cts = 27027, },
+ }, {
+ .tmds_clock_khz = 54000,
+ .n_cts_32k = { .n = 4096, .cts = 54000, },
+ .n_cts_44k1 = { .n = 6272, .cts = 60000, },
+ .n_cts_48k = { .n = 6144, .cts = 54000, },
+ }, {
+ .tmds_clock_khz = 54054,
+ .n_cts_32k = { .n = 4096, .cts = 54054, },
+ .n_cts_44k1 = { .n = 6272, .cts = 60060, },
+ .n_cts_48k = { .n = 6144, .cts = 54054, },
+ }, {
+ .tmds_clock_khz = 74176,
+ .n_cts_32k = { .n = 11648, .cts = 210937, }, /* and 210938 */
+ .n_cts_44k1 = { .n = 17836, .cts = 234375, },
+ .n_cts_48k = { .n = 11648, .cts = 140625, },
+ }, {
+ .tmds_clock_khz = 74250,
+ .n_cts_32k = { .n = 4096, .cts = 74250, },
+ .n_cts_44k1 = { .n = 6272, .cts = 82500, },
+ .n_cts_48k = { .n = 6144, .cts = 74250, },
+ }, {
+ .tmds_clock_khz = 148352,
+ .n_cts_32k = { .n = 11648, .cts = 421875, },
+ .n_cts_44k1 = { .n = 8918, .cts = 234375, },
+ .n_cts_48k = { .n = 5824, .cts = 140625, },
+ }, {
+ .tmds_clock_khz = 148500,
+ .n_cts_32k = { .n = 4096, .cts = 148500, },
+ .n_cts_44k1 = { .n = 6272, .cts = 165000, },
+ .n_cts_48k = { .n = 6144, .cts = 148500, },
+ }, {
+ .tmds_clock_khz = 296703,
+ .n_cts_32k = { .n = 5824, .cts = 421875, },
+ .n_cts_44k1 = { .n = 4459, .cts = 234375, },
+ .n_cts_48k = { .n = 5824, .cts = 281250, },
+ }, {
+ .tmds_clock_khz = 297000,
+ .n_cts_32k = { .n = 3072, .cts = 222750, },
+ .n_cts_44k1 = { .n = 4704, .cts = 247500, },
+ .n_cts_48k = { .n = 5120, .cts = 247500, },
+ }, {
+ .tmds_clock_khz = 593407,
+ .n_cts_32k = { .n = 5824, .cts = 843750, },
+ .n_cts_44k1 = { .n = 8918, .cts = 937500, },
+ .n_cts_48k = { .n = 5824, .cts = 562500, },
+ }, {
+ .tmds_clock_khz = 594000,
+ .n_cts_32k = { .n = 3072, .cts = 445500, },
+ .n_cts_44k1 = { .n = 9408, .cts = 990000, },
+ .n_cts_48k = { .n = 6144, .cts = 594000, },
+ },
+};
+
+static int drm_hdmi_acr_find_tmds_entry(unsigned long tmds_clock_khz)
+{
+ int i;
+
+ /* skip the "other" entry */
+ for (i = 1; i < ARRAY_SIZE(hdmi_acr_n_cts); i++) {
+ if (hdmi_acr_n_cts[i].tmds_clock_khz == tmds_clock_khz)
+ return i;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_hdmi_acr_get_n_cts() - get N and CTS values for Audio Clock Regeneration
+ *
+ * @tmds_char_rate: TMDS clock (char rate) as used by the HDMI connector
+ * @sample_rate: audio sample rate
+ * @out_n: a pointer to write the N value
+ * @out_cts: a pointer to write the CTS value
+ *
+ * Get the N and CTS values (either by calculating them or by returning data
+ * from the tables. This follows the HDMI 1.4b Section 7.2 "Audio Sample Clock
+ * Capture and Regeneration".
+ *
+ * Note, @sample_rate corresponds to the Fs value, see sections 7.2.4 - 7.2.6
+ * on how to select Fs for non-L-PCM formats.
+ */
+void
+drm_hdmi_acr_get_n_cts(unsigned long long tmds_char_rate,
+ unsigned int sample_rate,
+ unsigned int *out_n,
+ unsigned int *out_cts)
+{
+ /* be a bit more tolerant, especially for the 1.001 entries */
+ unsigned long tmds_clock_khz = DIV_ROUND_CLOSEST_ULL(tmds_char_rate, 1000);
+ const struct drm_hdmi_acr_n_cts_entry *entry;
+ unsigned int n, cts, mult;
+ int tmds_idx;
+
+ tmds_idx = drm_hdmi_acr_find_tmds_entry(tmds_clock_khz);
+
+ /*
+ * Don't change the order, 192 kHz is divisible by 48k and 32k, but it
+ * should use 48k entry.
+ */
+ if (sample_rate % 48000 == 0) {
+ entry = &hdmi_acr_n_cts[tmds_idx].n_cts_48k;
+ mult = sample_rate / 48000;
+ } else if (sample_rate % 44100 == 0) {
+ entry = &hdmi_acr_n_cts[tmds_idx].n_cts_44k1;
+ mult = sample_rate / 44100;
+ } else if (sample_rate % 32000 == 0) {
+ entry = &hdmi_acr_n_cts[tmds_idx].n_cts_32k;
+ mult = sample_rate / 32000;
+ } else {
+ entry = NULL;
+ }
+
+ if (entry) {
+ n = entry->n * mult;
+ cts = entry->cts;
+ } else {
+ /* Recommended optimal value, HDMI 1.4b, Section 7.2.1 */
+ n = 128 * sample_rate / 1000;
+ cts = 0;
+ }
+
+ if (!cts)
+ cts = DIV_ROUND_CLOSEST_ULL(tmds_char_rate * n,
+ 128 * sample_rate);
+
+ *out_n = n;
+ *out_cts = cts;
+}
+EXPORT_SYMBOL(drm_hdmi_acr_get_n_cts);
diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
index c205f37da1e1..d9d9948b29e9 100644
--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
@@ -10,6 +10,298 @@
#include <drm/display/drm_hdmi_state_helper.h>
/**
+ * DOC: hdmi helpers
+ *
+ * These functions contain an implementation of the HDMI specification
+ * in the form of KMS helpers.
+ *
+ * It contains TMDS character rate computation, automatic selection of
+ * output formats, infoframes generation, etc.
+ *
+ * Infoframes Compliance
+ * ~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Drivers using the helpers will expose the various infoframes
+ * generated according to the HDMI specification in debugfs.
+ *
+ * Compliance can then be tested using ``edid-decode`` from the ``v4l-utils`` project
+ * (https://git.linuxtv.org/v4l-utils.git/). A sample run would look like:
+ *
+ * .. code-block:: bash
+ *
+ * # edid-decode \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/audio \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/avi \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdmi \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdr_drm \
+ * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/spd \
+ * /sys/class/drm/card1-HDMI-A-1/edid \
+ * -c
+ *
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 1e 6d f4 5b 1e ef 06 00
+ * 07 20 01 03 80 2f 34 78 ea 24 05 af 4f 42 ab 25
+ * 0f 50 54 21 08 00 d1 c0 61 40 45 40 01 01 01 01
+ * 01 01 01 01 01 01 98 d0 00 40 a1 40 d4 b0 30 20
+ * 3a 00 d1 0b 12 00 00 1a 00 00 00 fd 00 3b 3d 1e
+ * b2 31 00 0a 20 20 20 20 20 20 00 00 00 fc 00 4c
+ * 47 20 53 44 51 48 44 0a 20 20 20 20 00 00 00 ff
+ * 00 32 30 37 4e 54 52 4c 44 43 34 33 30 0a 01 46
+ *
+ * 02 03 42 72 23 09 07 07 4d 01 03 04 90 12 13 1f
+ * 22 5d 5e 5f 60 61 83 01 00 00 6d 03 0c 00 10 00
+ * b8 3c 20 00 60 01 02 03 67 d8 5d c4 01 78 80 03
+ * e3 0f 00 18 e2 00 6a e3 05 c0 00 e6 06 05 01 52
+ * 52 51 11 5d 00 a0 a0 40 29 b0 30 20 3a 00 d1 0b
+ * 12 00 00 1a 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 c3
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: GSM
+ * Model: 23540
+ * Serial Number: 454430 (0x0006ef1e)
+ * Made in: week 7 of 2022
+ * Basic Display Parameters & Features:
+ * Digital display
+ * Maximum image size: 47 cm x 52 cm
+ * Gamma: 2.20
+ * DPMS levels: Standby Suspend Off
+ * RGB color display
+ * First detailed timing is the preferred timing
+ * Color Characteristics:
+ * Red : 0.6835, 0.3105
+ * Green: 0.2587, 0.6679
+ * Blue : 0.1445, 0.0585
+ * White: 0.3134, 0.3291
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * DMT 0x09: 800x600 60.316541 Hz 4:3 37.879 kHz 40.000000 MHz
+ * DMT 0x10: 1024x768 60.003840 Hz 4:3 48.363 kHz 65.000000 MHz
+ * Standard Timings:
+ * DMT 0x52: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz
+ * DMT 0x10: 1024x768 60.003840 Hz 4:3 48.363 kHz 65.000000 MHz
+ * DMT 0x09: 800x600 60.316541 Hz 4:3 37.879 kHz 40.000000 MHz
+ * Detailed Timing Descriptors:
+ * DTD 1: 2560x2880 59.966580 Hz 8:9 185.417 kHz 534.000000 MHz (465 mm x 523 mm)
+ * Hfront 48 Hsync 32 Hback 240 Hpol P
+ * Vfront 3 Vsync 10 Vback 199 Vpol N
+ * Display Range Limits:
+ * Monitor ranges (GTF): 59-61 Hz V, 30-178 kHz H, max dotclock 490 MHz
+ * Display Product Name: 'LG SDQHD'
+ * Display Product Serial Number: '207NTRLDC430'
+ * Extension blocks: 1
+ * Checksum: 0x46
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Basic audio support
+ * Supports YCbCr 4:4:4
+ * Supports YCbCr 4:2:2
+ * Native detailed modes: 2
+ * Audio Data Block:
+ * Linear PCM:
+ * Max channels: 2
+ * Supported sample rates (kHz): 48 44.1 32
+ * Supported sample sizes (bits): 24 20 16
+ * Video Data Block:
+ * VIC 1: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * VIC 3: 720x480 59.940060 Hz 16:9 31.469 kHz 27.000000 MHz
+ * VIC 4: 1280x720 60.000000 Hz 16:9 45.000 kHz 74.250000 MHz
+ * VIC 16: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (native)
+ * VIC 18: 720x576 50.000000 Hz 16:9 31.250 kHz 27.000000 MHz
+ * VIC 19: 1280x720 50.000000 Hz 16:9 37.500 kHz 74.250000 MHz
+ * VIC 31: 1920x1080 50.000000 Hz 16:9 56.250 kHz 148.500000 MHz
+ * VIC 34: 1920x1080 30.000000 Hz 16:9 33.750 kHz 74.250000 MHz
+ * VIC 93: 3840x2160 24.000000 Hz 16:9 54.000 kHz 297.000000 MHz
+ * VIC 94: 3840x2160 25.000000 Hz 16:9 56.250 kHz 297.000000 MHz
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * VIC 96: 3840x2160 50.000000 Hz 16:9 112.500 kHz 594.000000 MHz
+ * VIC 97: 3840x2160 60.000000 Hz 16:9 135.000 kHz 594.000000 MHz
+ * Speaker Allocation Data Block:
+ * FL/FR - Front Left/Right
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.0.0.0
+ * Supports_AI
+ * DC_36bit
+ * DC_30bit
+ * DC_Y444
+ * Maximum TMDS clock: 300 MHz
+ * Extended HDMI video details:
+ * HDMI VICs:
+ * HDMI VIC 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * HDMI VIC 2: 3840x2160 25.000000 Hz 16:9 56.250 kHz 297.000000 MHz
+ * HDMI VIC 3: 3840x2160 24.000000 Hz 16:9 54.000 kHz 297.000000 MHz
+ * Vendor-Specific Data Block (HDMI Forum), OUI C4-5D-D8:
+ * Version: 1
+ * Maximum TMDS Character Rate: 600 MHz
+ * SCDC Present
+ * Supports 12-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 10-bits/component Deep Color 4:2:0 Pixel Encoding
+ * YCbCr 4:2:0 Capability Map Data Block:
+ * VIC 96: 3840x2160 50.000000 Hz 16:9 112.500 kHz 594.000000 MHz
+ * VIC 97: 3840x2160 60.000000 Hz 16:9 135.000 kHz 594.000000 MHz
+ * Video Capability Data Block:
+ * YCbCr quantization: No Data
+ * RGB quantization: Selectable (via AVI Q)
+ * PT scan behavior: Always Underscanned
+ * IT scan behavior: Always Underscanned
+ * CE scan behavior: Always Underscanned
+ * Colorimetry Data Block:
+ * BT2020YCC
+ * BT2020RGB
+ * HDR Static Metadata Data Block:
+ * Electro optical transfer functions:
+ * Traditional gamma - SDR luminance range
+ * SMPTE ST2084
+ * Supported static metadata descriptors:
+ * Static metadata type 1
+ * Desired content max luminance: 82 (295.365 cd/m^2)
+ * Desired content max frame-average luminance: 82 (295.365 cd/m^2)
+ * Desired content min luminance: 81 (0.298 cd/m^2)
+ * Detailed Timing Descriptors:
+ * DTD 2: 2560x2880 29.986961 Hz 8:9 87.592 kHz 238.250000 MHz (465 mm x 523 mm)
+ * Hfront 48 Hsync 32 Hback 80 Hpol P
+ * Vfront 3 Vsync 10 Vback 28 Vpol N
+ * Checksum: 0xc3 Unused space in Extension Block: 43 bytes
+ *
+ * ----------------
+ *
+ * edid-decode 1.29.0-5346
+ * edid-decode SHA: c363e9aa6d70 2025-03-11 11:41:18
+ *
+ * Warnings:
+ *
+ * Block 1, CTA-861 Extension Block:
+ * IT Video Formats are overscanned by default, but normally this should be underscanned.
+ * Video Data Block: VIC 1 and the first DTD are not identical. Is this intended?
+ * Video Data Block: All VICs are in ascending order, and the first (preferred) VIC <= 4, is that intended?
+ * Video Capability Data Block: Set Selectable YCbCr Quantization to avoid interop issues.
+ * Video Capability Data Block: S_PT is equal to S_IT and S_CE, so should be set to 0 instead.
+ * Colorimetry Data Block: Set the sRGB colorimetry bit to avoid interop issues.
+ * Display Product Serial Number is set, so the Serial Number in the Base EDID should be 0.
+ * EDID:
+ * Base EDID: Some timings are out of range of the Monitor Ranges:
+ * Vertical Freq: 24.000 - 60.317 Hz (Monitor: 59.000 - 61.000 Hz)
+ * Horizontal Freq: 31.250 - 185.416 kHz (Monitor: 30.000 - 178.000 kHz)
+ * Maximum Clock: 594.000 MHz (Monitor: 490.000 MHz)
+ *
+ * Failures:
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Video Capability Data Block: IT video formats are always underscanned, but bit 7 of Byte 3 of the CTA-861 Extension header is set to overscanned.
+ * EDID:
+ * CTA-861: Native progressive timings are a mix of several resolutions.
+ *
+ * EDID conformity: FAIL
+ *
+ * ================
+ *
+ * InfoFrame of '/sys/kernel/debug/dri/1/HDMI-A-1/infoframes/audio' was empty.
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 82 02 0d 31 12 28 04 00 00 00 00 00 00 00 00 00
+ * 00
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x31
+ *
+ * AVI InfoFrame
+ * Version: 2
+ * Length: 13
+ * Y: Color Component Sample Format: RGB
+ * A: Active Format Information Present: Yes
+ * B: Bar Data Present: Bar Data not present
+ * S: Scan Information: Composed for an underscanned display
+ * C: Colorimetry: No Data
+ * M: Picture Aspect Ratio: 16:9
+ * R: Active Portion Aspect Ratio: 8
+ * ITC: IT Content: No Data
+ * EC: Extended Colorimetry: xvYCC601
+ * Q: RGB Quantization Range: Limited Range
+ * SC: Non-Uniform Picture Scaling: No Known non-uniform scaling
+ * YQ: YCC Quantization Range: Limited Range
+ * CN: IT Content Type: Graphics
+ * PR: Pixel Data Repetition Count: 0
+ * Line Number of End of Top Bar: 0
+ * Line Number of Start of Bottom Bar: 0
+ * Pixel Number of End of Left Bar: 0
+ * Pixel Number of Start of Right Bar: 0
+ *
+ * ----------------
+ *
+ * AVI InfoFrame conformity: PASS
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 81 01 05 49 03 0c 00 20 01
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x49
+ *
+ * Vendor-Specific InfoFrame (HDMI), OUI 00-0C-03
+ * Version: 1
+ * Length: 5
+ * HDMI Video Format: HDMI_VIC is present
+ * HDMI VIC 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ *
+ * ----------------
+ *
+ * Vendor-Specific InfoFrame (HDMI), OUI 00-0C-03 conformity: PASS
+ *
+ * ================
+ *
+ * InfoFrame of '/sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdr_drm' was empty.
+ *
+ * ================
+ *
+ * edid-decode InfoFrame (hex):
+ *
+ * 83 01 19 93 42 72 6f 61 64 63 6f 6d 56 69 64 65
+ * 6f 63 6f 72 65 00 00 00 00 00 00 00 09
+ *
+ * ----------------
+ *
+ * HDMI InfoFrame Checksum: 0x93
+ *
+ * Source Product Description InfoFrame
+ * Version: 1
+ * Length: 25
+ * Vendor Name: 'Broadcom'
+ * Product Description: 'Videocore'
+ * Source Information: PC general
+ *
+ * ----------------
+ *
+ * Source Product Description InfoFrame conformity: PASS
+ *
+ * Testing
+ * ~~~~~~~
+ *
+ * The helpers have unit testing and can be tested using kunit with:
+ *
+ * .. code-block:: bash
+ *
+ * $ ./tools/testing/kunit/kunit.py run \
+ * --kunitconfig=drivers/gpu/drm/tests \
+ * drm_atomic_helper_connector_hdmi_*
+ */
+
+/**
* __drm_atomic_helper_connector_hdmi_reset() - Initializes all HDMI @drm_connector_state resources
* @connector: DRM connector
* @new_conn_state: connector state to reset
@@ -816,7 +1108,7 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
* @status: Connection status
*
* This function should be called as a part of the .detect() / .detect_ctx()
- * callbacks, updating the HDMI-specific connector's data.
+ * callbacks for all status changes.
*/
void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector,
enum drm_connector_status status)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 9ea2611770f4..0138cf0b8b63 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -933,6 +933,9 @@ EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
* state). This is especially true in enable hooks because the pipeline has
* changed.
*
+ * If you don't have access to the atomic state, see
+ * drm_atomic_get_connector_for_encoder().
+ *
* Returns: The old connector connected to @encoder, or NULL if the encoder is
* not connected.
*/
@@ -967,6 +970,9 @@ EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder);
* attached to @encoder vs ones that do (and to inspect their state). This is
* especially true in disable hooks because the pipeline will change.
*
+ * If you don't have access to the atomic state, see
+ * drm_atomic_get_connector_for_encoder().
+ *
* Returns: The new connector connected to @encoder, or NULL if the encoder is
* not connected.
*/
@@ -988,6 +994,59 @@ drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder);
/**
+ * drm_atomic_get_connector_for_encoder - Get connector currently assigned to an encoder
+ * @encoder: The encoder to find the connector of
+ * @ctx: Modeset locking context
+ *
+ * This function finds and returns the connector currently assigned to
+ * an @encoder.
+ *
+ * It is similar to the drm_atomic_get_old_connector_for_encoder() and
+ * drm_atomic_get_new_connector_for_encoder() helpers, but doesn't
+ * require access to the atomic state. If you have access to it, prefer
+ * using these. This helper is typically useful in situations where you
+ * don't have access to the atomic state, like detect, link repair,
+ * threaded interrupt handlers, or hooks from other frameworks (ALSA,
+ * CEC, etc.).
+ *
+ * Returns:
+ * The connector connected to @encoder, or an error pointer otherwise.
+ * When the error is EDEADLK, a deadlock has been detected and the
+ * sequence must be restarted.
+ */
+struct drm_connector *
+drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *out_connector = ERR_PTR(-EINVAL);
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ int ret;
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (!connector->state)
+ continue;
+
+ if (encoder == connector->state->best_encoder) {
+ out_connector = connector;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return out_connector;
+}
+EXPORT_SYMBOL(drm_atomic_get_connector_for_encoder);
+
+
+/**
* drm_atomic_get_old_crtc_for_encoder - Get old crtc for an encoder
* @state: Atomic state
* @encoder: The encoder to fetch the crtc state for
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5302ab324898..ee64ca1b1bec 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3409,6 +3409,9 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_all);
* This implies a reset of all active components available between the CRTC and
* connectors.
*
+ * A variant of this function exists with
+ * drm_bridge_helper_reset_crtc(), dedicated to bridges.
+ *
* NOTE: This relies on resetting &drm_crtc_state.connectors_changed.
* For drivers which optimize out unnecessary modesets this will result in
* a no-op commit, achieving nothing.
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 6e74de833466..6852d73c931c 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -75,6 +75,12 @@
* the currently visible vertical area of the &drm_crtc.
* FB_ID:
* Mode object ID of the &drm_framebuffer this plane should scan out.
+ *
+ * When a KMS client is performing front-buffer rendering, it should set
+ * FB_ID to the same front-buffer FB on each atomic commit. This implies
+ * to the driver that it needs to re-read the same FB again. Otherwise
+ * drivers which do not employ continuously repeated scanout cycles might
+ * not update the screen.
* CRTC_ID:
* Mode object ID of the &drm_crtc this plane should be connected to.
*
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index fa2794217a90..b4c89ec01998 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
@@ -198,10 +199,96 @@
static DEFINE_MUTEX(bridge_lock);
static LIST_HEAD(bridge_list);
+static void __drm_bridge_free(struct kref *kref)
+{
+ struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
+
+ kfree(bridge->container);
+}
+
+/**
+ * drm_bridge_get - Acquire a bridge reference
+ * @bridge: DRM bridge
+ *
+ * This function increments the bridge's refcount.
+ *
+ * Returns:
+ * Pointer to @bridge.
+ */
+struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge)
+{
+ if (bridge)
+ kref_get(&bridge->refcount);
+
+ return bridge;
+}
+EXPORT_SYMBOL(drm_bridge_get);
+
+/**
+ * drm_bridge_put - Release a bridge reference
+ * @bridge: DRM bridge
+ *
+ * This function decrements the bridge's reference count and frees the
+ * object if the reference count drops to zero.
+ */
+void drm_bridge_put(struct drm_bridge *bridge)
+{
+ if (bridge)
+ kref_put(&bridge->refcount, __drm_bridge_free);
+}
+EXPORT_SYMBOL(drm_bridge_put);
+
+/**
+ * drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer
+ *
+ * @data: pointer to @struct drm_bridge, cast to a void pointer
+ *
+ * Wrapper of drm_bridge_put() to be used when a function taking a void
+ * pointer is needed, for example as a devm action.
+ */
+static void drm_bridge_put_void(void *data)
+{
+ struct drm_bridge *bridge = (struct drm_bridge *)data;
+
+ drm_bridge_put(bridge);
+}
+
+void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_bridge_funcs *funcs)
+{
+ void *container;
+ struct drm_bridge *bridge;
+ int err;
+
+ if (!funcs) {
+ dev_warn(dev, "Missing funcs pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ bridge = container + offset;
+ bridge->container = container;
+ bridge->funcs = funcs;
+ kref_init(&bridge->refcount);
+
+ err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge);
+ if (err)
+ return ERR_PTR(err);
+
+ return container;
+}
+EXPORT_SYMBOL(__devm_drm_bridge_alloc);
+
/**
* drm_bridge_add - add the given bridge to the global bridge list
*
* @bridge: bridge control structure
+ *
+ * The bridge to be added must have been allocated by
+ * devm_drm_bridge_alloc().
*/
void drm_bridge_add(struct drm_bridge *bridge)
{
@@ -280,6 +367,11 @@ static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
};
+static bool drm_bridge_is_atomic(struct drm_bridge *bridge)
+{
+ return bridge->funcs->atomic_reset != NULL;
+}
+
/**
* drm_bridge_attach - attach the bridge to an encoder's chain
*
@@ -327,12 +419,12 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
list_add(&bridge->chain_node, &encoder->bridge_chain);
if (bridge->funcs->attach) {
- ret = bridge->funcs->attach(bridge, flags);
+ ret = bridge->funcs->attach(bridge, encoder, flags);
if (ret < 0)
goto err_reset_bridge;
}
- if (bridge->funcs->atomic_reset) {
+ if (drm_bridge_is_atomic(bridge)) {
struct drm_bridge_state *state;
state = bridge->funcs->atomic_reset(bridge);
@@ -377,7 +469,7 @@ void drm_bridge_detach(struct drm_bridge *bridge)
if (WARN_ON(!bridge->dev))
return;
- if (bridge->funcs->atomic_reset)
+ if (drm_bridge_is_atomic(bridge))
drm_atomic_private_obj_fini(&bridge->base);
if (bridge->funcs->detach)
@@ -1300,6 +1392,75 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
EXPORT_SYMBOL(of_drm_find_bridge);
#endif
+static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
+ struct drm_bridge *bridge,
+ unsigned int idx)
+{
+ drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
+ drm_printf(p, "\ttype: [%d] %s\n",
+ bridge->type,
+ drm_get_connector_type_name(bridge->type));
+
+ if (bridge->of_node)
+ drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
+
+ drm_printf(p, "\tops: [0x%x]", bridge->ops);
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT)
+ drm_puts(p, " detect");
+ if (bridge->ops & DRM_BRIDGE_OP_EDID)
+ drm_puts(p, " edid");
+ if (bridge->ops & DRM_BRIDGE_OP_HPD)
+ drm_puts(p, " hpd");
+ if (bridge->ops & DRM_BRIDGE_OP_MODES)
+ drm_puts(p, " modes");
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI)
+ drm_puts(p, " hdmi");
+ drm_puts(p, "\n");
+}
+
+static int allbridges_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_bridge *bridge;
+ unsigned int idx = 0;
+
+ mutex_lock(&bridge_lock);
+
+ list_for_each_entry(bridge, &bridge_list, list)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
+
+ mutex_unlock(&bridge_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(allbridges);
+
+static int encoder_bridges_show(struct seq_file *m, void *data)
+{
+ struct drm_encoder *encoder = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_bridge *bridge;
+ unsigned int idx = 0;
+
+ drm_for_each_bridge_in_chain(encoder, bridge)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(encoder_bridges);
+
+void drm_bridge_debugfs_params(struct dentry *root)
+{
+ debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops);
+}
+
+void drm_bridge_debugfs_encoder_params(struct dentry *root,
+ struct drm_encoder *encoder)
+{
+ /* bridges list */
+ debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops);
+}
+
MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
MODULE_DESCRIPTION("DRM bridge infrastructure");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_bridge_helper.c b/drivers/gpu/drm/drm_bridge_helper.c
new file mode 100644
index 000000000000..af80d2496194
--- /dev/null
+++ b/drivers/gpu/drm/drm_bridge_helper.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_helper.h>
+#include <drm/drm_modeset_lock.h>
+
+/**
+ * drm_bridge_helper_reset_crtc - Reset the pipeline feeding a bridge
+ * @bridge: DRM bridge to reset
+ * @ctx: lock acquisition context
+ *
+ * Reset a @bridge pipeline. It will power-cycle all active components
+ * between the CRTC and connector that bridge is connected to.
+ *
+ * As it relies on drm_atomic_helper_reset_crtc(), the same limitations
+ * apply.
+ *
+ * Returns:
+ *
+ * 0 on success or a negative error code on failure. If the error
+ * returned is EDEADLK, the whole atomic sequence must be restarted.
+ */
+int drm_bridge_helper_reset_crtc(struct drm_bridge *bridge,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_connector *connector;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc;
+ int ret;
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ret;
+
+ connector = drm_atomic_get_connector_for_encoder(encoder, ctx);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ goto out;
+ }
+
+ if (!connector->state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ crtc = connector->state->crtc;
+ ret = drm_atomic_helper_reset_crtc(crtc, ctx);
+ if (ret)
+ goto out;
+
+out:
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(drm_bridge_helper_reset_crtc);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 549b28a5918c..f1de7faf9fb4 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -174,7 +174,7 @@ EXPORT_SYMBOL(drm_client_release);
static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
if (buffer->gem) {
- drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
+ drm_gem_vunmap(buffer->gem, &buffer->map);
drm_gem_object_put(buffer->gem);
}
@@ -252,7 +252,7 @@ int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
drm_gem_lock(gem);
- ret = drm_gem_vmap(gem, map);
+ ret = drm_gem_vmap_locked(gem, map);
if (ret)
goto err_drm_gem_vmap_unlocked;
*map_copy = *map;
@@ -278,7 +278,7 @@ void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer)
struct drm_gem_object *gem = buffer->gem;
struct iosys_map *map = &buffer->map;
- drm_gem_vunmap(gem, map);
+ drm_gem_vunmap_locked(gem, map);
drm_gem_unlock(gem);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap_local);
@@ -316,7 +316,7 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer,
ret = drm_gem_pin_locked(gem);
if (ret)
goto err_drm_gem_pin_locked;
- ret = drm_gem_vmap(gem, map);
+ ret = drm_gem_vmap_locked(gem, map);
if (ret)
goto err_drm_gem_vmap;
@@ -348,7 +348,7 @@ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
struct iosys_map *map = &buffer->map;
drm_gem_lock(gem);
- drm_gem_vunmap(gem, map);
+ drm_gem_vunmap_locked(gem, map);
drm_gem_unpin_locked(gem);
drm_gem_unlock(gem);
}
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index aca442c25209..0f9d5ba36c81 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -39,7 +39,7 @@ int drm_client_modeset_create(struct drm_client_dev *client)
unsigned int max_connector_count = 1;
struct drm_mode_set *modeset;
struct drm_crtc *crtc;
- unsigned int i = 0;
+ int i = 0;
/* Add terminating zero entry to enable index less iteration */
client->modesets = kcalloc(num_crtc + 1, sizeof(*client->modesets), GFP_KERNEL);
@@ -73,9 +73,10 @@ err_free:
static void drm_client_modeset_release(struct drm_client_dev *client)
{
struct drm_mode_set *modeset;
- unsigned int i;
drm_client_for_each_modeset(modeset, client) {
+ int i;
+
drm_mode_destroy(client->dev, modeset->mode);
modeset->mode = NULL;
modeset->fb = NULL;
@@ -117,10 +118,10 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_get_tiled_mode(struct drm_connector *connector)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay == connector->tile_h_size &&
@@ -130,10 +131,10 @@ drm_connector_get_tiled_mode(struct drm_connector *connector)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay == connector->tile_h_size &&
@@ -144,10 +145,10 @@ drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
return NULL;
}
-static struct drm_display_mode *
+static const struct drm_display_mode *
drm_connector_preferred_mode(struct drm_connector *connector, int width, int height)
{
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (mode->hdisplay > width ||
@@ -159,16 +160,18 @@ drm_connector_preferred_mode(struct drm_connector *connector, int width, int hei
return NULL;
}
-static struct drm_display_mode *drm_connector_first_mode(struct drm_connector *connector)
+static const struct drm_display_mode *
+drm_connector_first_mode(struct drm_connector *connector)
{
return list_first_entry_or_null(&connector->modes,
struct drm_display_mode, head);
}
-static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_connector *connector)
+static const struct drm_display_mode *
+drm_connector_pick_cmdline_mode(struct drm_connector *connector)
{
- struct drm_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode;
+ const struct drm_cmdline_mode *cmdline_mode;
+ const struct drm_display_mode *mode;
bool prefer_non_interlace;
/*
@@ -237,9 +240,9 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
return enable;
}
-static void drm_client_connectors_enabled(struct drm_connector **connectors,
+static void drm_client_connectors_enabled(struct drm_connector *connectors[],
unsigned int connector_count,
- bool *enabled)
+ bool enabled[])
{
bool any_enabled = false;
struct drm_connector *connector;
@@ -263,16 +266,35 @@ static void drm_client_connectors_enabled(struct drm_connector **connectors,
enabled[i] = drm_connector_enabled(connectors[i], false);
}
+static void mode_replace(struct drm_device *dev,
+ const struct drm_display_mode **dst,
+ const struct drm_display_mode *src)
+{
+ drm_mode_destroy(dev, (struct drm_display_mode *)*dst);
+
+ *dst = src ? drm_mode_duplicate(dev, src) : NULL;
+}
+
+static void modes_destroy(struct drm_device *dev,
+ const struct drm_display_mode *modes[],
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ mode_replace(dev, &modes[i], NULL);
+}
+
static bool drm_client_target_cloned(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
- int count, i, j;
+ int count, i;
bool can_clone = false;
- struct drm_display_mode *dmt_mode, *mode;
+ struct drm_display_mode *dmt_mode;
/* only contemplate cloning in the single crtc case */
if (dev->mode_config.num_crtc > 1)
@@ -291,9 +313,13 @@ static bool drm_client_target_cloned(struct drm_device *dev,
/* check the command line or if nothing common pick 1024x768 */
can_clone = true;
for (i = 0; i < connector_count; i++) {
+ int j;
+
if (!enabled[i])
continue;
- modes[i] = drm_connector_pick_cmdline_mode(connectors[i]);
+
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connectors[i]));
if (!modes[i]) {
can_clone = false;
break;
@@ -323,6 +349,8 @@ static bool drm_client_target_cloned(struct drm_device *dev,
goto fail;
for (i = 0; i < connector_count; i++) {
+ const struct drm_display_mode *mode;
+
if (!enabled[i])
continue;
@@ -332,7 +360,7 @@ static bool drm_client_target_cloned(struct drm_device *dev,
DRM_MODE_MATCH_CLOCK |
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS))
- modes[i] = mode;
+ mode_replace(dev, &modes[i], mode);
}
if (!modes[i])
can_clone = false;
@@ -349,19 +377,19 @@ fail:
}
static int drm_client_get_tile_offsets(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
int idx,
int h_idx, int v_idx)
{
- struct drm_connector *connector;
int i;
int hoffset = 0, voffset = 0;
for (i = 0; i < connector_count; i++) {
- connector = connectors[i];
+ struct drm_connector *connector = connectors[i];
+
if (!connector->has_tile)
continue;
@@ -384,14 +412,13 @@ static int drm_client_get_tile_offsets(struct drm_device *dev,
}
static bool drm_client_target_preferred(struct drm_device *dev,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
const u64 mask = BIT_ULL(connector_count) - 1;
- struct drm_connector *connector;
u64 conn_configured = 0;
int tile_pass = 0;
int num_tiled_conns = 0;
@@ -405,7 +432,9 @@ static bool drm_client_target_preferred(struct drm_device *dev,
retry:
for (i = 0; i < connector_count; i++) {
- connector = connectors[i];
+ struct drm_connector *connector = connectors[i];
+ const char *mode_type;
+
if (conn_configured & BIT_ULL(i))
continue;
@@ -438,20 +467,23 @@ retry:
modes, offsets, i,
connector->tile_h_loc, connector->tile_v_loc);
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n",
- connector->base.id, connector->name);
- /* got for command line mode first */
- modes[i] = drm_connector_pick_cmdline_mode(connector);
+ mode_type = "cmdline";
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connector));
+
if (!modes[i]) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for preferred mode, tile %d\n",
- connector->base.id, connector->name,
- connector->tile_group ? connector->tile_group->id : 0);
- modes[i] = drm_connector_preferred_mode(connector, width, height);
+ mode_type = "preferred";
+ mode_replace(dev, &modes[i],
+ drm_connector_preferred_mode(connector, width, height));
}
- /* No preferred modes, pick one off the list */
- if (!modes[i])
- modes[i] = drm_connector_first_mode(connector);
+
+ if (!modes[i]) {
+ mode_type = "first";
+ mode_replace(dev, &modes[i],
+ drm_connector_first_mode(connector));
+ }
+
/*
* In case of tiled mode if all tiles not present fallback to
* first available non tiled mode.
@@ -466,18 +498,24 @@ retry:
(connector->tile_h_loc == 0 &&
connector->tile_v_loc == 0 &&
!drm_connector_get_tiled_mode(connector))) {
- drm_dbg_kms(dev,
- "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ mode_type = "non tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_fallback_non_tiled_mode(connector));
} else {
- modes[i] = drm_connector_get_tiled_mode(connector);
+ mode_type = "tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_get_tiled_mode(connector));
}
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Found mode %s\n",
- connector->base.id, connector->name,
- modes[i] ? modes[i]->name : "none");
+ if (modes[i])
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] found %s mode: %s\n",
+ connector->base.id, connector->name,
+ mode_type, modes[i]->name);
+ else
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] no mode found\n",
+ connector->base.id, connector->name);
+
conn_configured |= BIT_ULL(i);
}
@@ -502,18 +540,17 @@ static bool connector_has_possible_crtc(struct drm_connector *connector,
}
static int drm_client_pick_crtcs(struct drm_client_dev *client,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_crtc **best_crtcs,
- struct drm_display_mode **modes,
+ struct drm_crtc *best_crtcs[],
+ const struct drm_display_mode *modes[],
int n, int width, int height)
{
struct drm_device *dev = client->dev;
struct drm_connector *connector;
int my_score, best_score, score;
- struct drm_crtc **crtcs, *crtc;
+ struct drm_crtc **crtcs;
struct drm_mode_set *modeset;
- int o;
if (n == connector_count)
return 0;
@@ -543,7 +580,8 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client,
* remaining connectors
*/
drm_client_for_each_modeset(modeset, client) {
- crtc = modeset->crtc;
+ struct drm_crtc *crtc = modeset->crtc;
+ int o;
if (!connector_has_possible_crtc(connector, crtc))
continue;
@@ -577,17 +615,17 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client,
/* Try to read the BIOS display configuration and use it for the initial config */
static bool drm_client_firmware_config(struct drm_client_dev *client,
- struct drm_connector **connectors,
+ struct drm_connector *connectors[],
unsigned int connector_count,
- struct drm_crtc **crtcs,
- struct drm_display_mode **modes,
- struct drm_client_offset *offsets,
- bool *enabled, int width, int height)
+ struct drm_crtc *crtcs[],
+ const struct drm_display_mode *modes[],
+ struct drm_client_offset offsets[],
+ bool enabled[], int width, int height)
{
const int count = min_t(unsigned int, connector_count, BITS_PER_LONG);
unsigned long conn_configured, conn_seq, mask;
struct drm_device *dev = client->dev;
- int i, j;
+ int i;
bool *save_enabled;
bool fallback = true, ret = true;
int num_connectors_enabled = 0;
@@ -621,11 +659,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
retry:
conn_seq = conn_configured;
for (i = 0; i < count; i++) {
- struct drm_connector *connector;
+ struct drm_connector *connector = connectors[i];
struct drm_encoder *encoder;
- struct drm_crtc *new_crtc;
-
- connector = connectors[i];
+ struct drm_crtc *crtc;
+ const char *mode_type;
+ int j;
if (conn_configured & BIT(i))
continue;
@@ -664,7 +702,7 @@ retry:
num_connectors_enabled++;
- new_crtc = connector->state->crtc;
+ crtc = connector->state->crtc;
/*
* Make sure we're not trying to drive multiple connectors
@@ -672,69 +710,52 @@ retry:
* match the BIOS.
*/
for (j = 0; j < count; j++) {
- if (crtcs[j] == new_crtc) {
- drm_dbg_kms(dev, "fallback: cloned configuration\n");
+ if (crtcs[j] == crtc) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] fallback: cloned configuration\n",
+ connector->base.id, connector->name);
goto bail;
}
}
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n",
- connector->base.id, connector->name);
-
- /* go for command line mode first */
- modes[i] = drm_connector_pick_cmdline_mode(connector);
+ mode_type = "cmdline";
+ mode_replace(dev, &modes[i],
+ drm_connector_pick_cmdline_mode(connector));
- /* try for preferred next */
if (!modes[i]) {
- drm_dbg_kms(dev,
- "[CONNECTOR:%d:%s] looking for preferred mode, has tile: %s\n",
- connector->base.id, connector->name,
- str_yes_no(connector->has_tile));
- modes[i] = drm_connector_preferred_mode(connector, width, height);
+ mode_type = "preferred";
+ mode_replace(dev, &modes[i],
+ drm_connector_preferred_mode(connector, width, height));
}
- /* No preferred mode marked by the EDID? Are there any modes? */
- if (!modes[i] && !list_empty(&connector->modes)) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] using first listed mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_first_mode(connector);
+ if (!modes[i]) {
+ mode_type = "first";
+ mode_replace(dev, &modes[i],
+ drm_connector_first_mode(connector));
}
/* last resort: use current mode */
if (!modes[i]) {
- /*
- * IMPORTANT: We want to use the adjusted mode (i.e.
- * after the panel fitter upscaling) as the initial
- * config, not the input mode, which is what crtc->mode
- * usually contains. But since our current
- * code puts a mode derived from the post-pfit timings
- * into crtc->mode this works out correctly.
- *
- * This is crtc->mode and not crtc->state->mode for the
- * fastboot check to work correctly.
- */
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for current mode\n",
- connector->base.id, connector->name);
- modes[i] = &connector->state->crtc->mode;
+ mode_type = "current";
+ mode_replace(dev, &modes[i],
+ &crtc->state->mode);
}
+
/*
* In case of tiled modes, if all tiles are not present
* then fallback to a non tiled mode.
*/
if (connector->has_tile &&
num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n",
- connector->base.id, connector->name);
- modes[i] = drm_connector_fallback_non_tiled_mode(connector);
+ mode_type = "non tiled";
+ mode_replace(dev, &modes[i],
+ drm_connector_fallback_non_tiled_mode(connector));
}
- crtcs[i] = new_crtc;
+ crtcs[i] = crtc;
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] on [CRTC:%d:%s]: %dx%d%s\n",
+ drm_dbg_kms(dev, "[CONNECTOR::%d:%s] on [CRTC:%d:%s] using %s mode: %s\n",
connector->base.id, connector->name,
- connector->state->crtc->base.id,
- connector->state->crtc->name,
- modes[i]->hdisplay, modes[i]->vdisplay,
- modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "");
+ crtc->base.id, crtc->name,
+ mode_type, modes[i]->name);
fallback = false;
conn_configured |= BIT(i);
@@ -799,8 +820,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
- /* points to modes protected by mode_config.mutex */
- struct drm_display_mode **modes;
+ const struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
bool *enabled;
@@ -851,7 +871,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
if (!drm_client_firmware_config(client, connectors, connector_count, crtcs,
modes, offsets, enabled, width, height)) {
- memset(modes, 0, connector_count * sizeof(*modes));
+ modes_destroy(dev, modes, connector_count);
memset(crtcs, 0, connector_count * sizeof(*crtcs));
memset(offsets, 0, connector_count * sizeof(*offsets));
@@ -868,10 +888,12 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
crtcs, modes, 0, width, height);
}
+ mutex_unlock(&dev->mode_config.mutex);
+
drm_client_modeset_release(client);
for (i = 0; i < connector_count; i++) {
- struct drm_display_mode *mode = modes[i];
+ const struct drm_display_mode *mode = modes[i];
struct drm_crtc *crtc = crtcs[i];
struct drm_client_offset *offset = &offsets[i];
@@ -902,11 +924,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
- mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:
kfree(crtcs);
+ modes_destroy(dev, modes, connector_count);
kfree(modes);
kfree(offsets);
kfree(enabled);
@@ -938,7 +960,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
struct drm_plane *plane = modeset->crtc->primary;
struct drm_cmdline_mode *cmdline;
u64 valid_mask = 0;
- unsigned int i;
+ int i;
if (!modeset->num_connectors)
return false;
@@ -1219,11 +1241,12 @@ static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dp
struct drm_connector *connector;
struct drm_mode_set *modeset;
struct drm_modeset_acquire_ctx ctx;
- int j;
int ret;
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
drm_client_for_each_modeset(modeset, client) {
+ int j;
+
if (!modeset->crtc->enabled)
continue;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 0955f1c385dd..39497493f74c 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -334,7 +334,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!encoder_funcs)
continue;
- encoder_funcs = encoder->helper_private;
if (encoder_funcs->mode_fixup) {
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 6b2178864c7e..3dfd8b34dceb 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -740,40 +740,6 @@ void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
crtc->debugfs_entry = NULL;
}
-static int bridges_show(struct seq_file *m, void *data)
-{
- struct drm_encoder *encoder = m->private;
- struct drm_printer p = drm_seq_file_printer(m);
- struct drm_bridge *bridge;
- unsigned int idx = 0;
-
- drm_for_each_bridge_in_chain(encoder, bridge) {
- drm_printf(&p, "bridge[%u]: %ps\n", idx++, bridge->funcs);
- drm_printf(&p, "\ttype: [%d] %s\n",
- bridge->type,
- drm_get_connector_type_name(bridge->type));
-
- if (bridge->of_node)
- drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node);
-
- drm_printf(&p, "\tops: [0x%x]", bridge->ops);
- if (bridge->ops & DRM_BRIDGE_OP_DETECT)
- drm_puts(&p, " detect");
- if (bridge->ops & DRM_BRIDGE_OP_EDID)
- drm_puts(&p, " edid");
- if (bridge->ops & DRM_BRIDGE_OP_HPD)
- drm_puts(&p, " hpd");
- if (bridge->ops & DRM_BRIDGE_OP_MODES)
- drm_puts(&p, " modes");
- if (bridge->ops & DRM_BRIDGE_OP_HDMI)
- drm_puts(&p, " hdmi");
- drm_puts(&p, "\n");
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(bridges);
-
void drm_debugfs_encoder_add(struct drm_encoder *encoder)
{
struct drm_minor *minor = encoder->dev->primary;
@@ -789,9 +755,7 @@ void drm_debugfs_encoder_add(struct drm_encoder *encoder)
encoder->debugfs_entry = root;
- /* bridges list */
- debugfs_create_file("bridges", 0444, root, encoder,
- &bridges_fops);
+ drm_bridge_debugfs_encoder_params(root, encoder);
if (encoder->funcs && encoder->funcs->debugfs_init)
encoder->funcs->debugfs_init(encoder, root);
diff --git a/drivers/gpu/drm/drm_displayid_internal.h b/drivers/gpu/drm/drm_displayid_internal.h
index aee1b86a73c1..957dd0619f5c 100644
--- a/drivers/gpu/drm/drm_displayid_internal.h
+++ b/drivers/gpu/drm/drm_displayid_internal.h
@@ -66,6 +66,7 @@ struct drm_edid;
#define DATA_BLOCK_2_STEREO_DISPLAY_INTERFACE 0x27
#define DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY 0x28
#define DATA_BLOCK_2_CONTAINER_ID 0x29
+#define DATA_BLOCK_2_TYPE_10_FORMULA_TIMING 0x2a
#define DATA_BLOCK_2_VENDOR_SPECIFIC 0x7e
#define DATA_BLOCK_2_CTA_DISPLAY_ID 0x81
@@ -114,20 +115,32 @@ struct displayid_tiled_block {
struct displayid_detailed_timings_1 {
u8 pixel_clock[3];
u8 flags;
- u8 hactive[2];
- u8 hblank[2];
- u8 hsync[2];
- u8 hsw[2];
- u8 vactive[2];
- u8 vblank[2];
- u8 vsync[2];
- u8 vsw[2];
+ __le16 hactive;
+ __le16 hblank;
+ __le16 hsync;
+ __le16 hsw;
+ __le16 vactive;
+ __le16 vblank;
+ __le16 vsync;
+ __le16 vsw;
} __packed;
struct displayid_detailed_timing_block {
struct displayid_block base;
struct displayid_detailed_timings_1 timings[];
-};
+} __packed;
+
+struct displayid_formula_timings_9 {
+ u8 flags;
+ __le16 hactive;
+ __le16 vactive;
+ u8 vrefresh;
+} __packed;
+
+struct displayid_formula_timing_block {
+ struct displayid_block base;
+ struct displayid_formula_timings_9 timings[];
+} __packed;
#define DISPLAYID_VESA_MSO_OVERLAP GENMASK(3, 0)
#define DISPLAYID_VESA_MSO_MODE GENMASK(6, 5)
diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
index 385eb5e10047..9dc0408fbbea 100644
--- a/drivers/gpu/drm/drm_draw.c
+++ b/drivers/gpu/drm/drm_draw.c
@@ -13,85 +13,7 @@
#include <drm/drm_fourcc.h>
#include "drm_draw_internal.h"
-
-/*
- * Conversions from xrgb8888
- */
-
-static u16 convert_xrgb8888_to_rgb565(u32 pix)
-{
- return ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_rgba5551(u32 pix)
-{
- return ((pix & 0x00f80000) >> 8) |
- ((pix & 0x0000f800) >> 5) |
- ((pix & 0x000000f8) >> 2) |
- BIT(0); /* set alpha bit */
-}
-
-static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
-{
- return ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_argb1555(u32 pix)
-{
- return BIT(15) | /* set alpha bit */
- ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u32 convert_xrgb8888_to_argb8888(u32 pix)
-{
- return pix | GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- ((pix & 0xff000000) >> 24) << 24;
-}
-
-static u32 convert_xrgb8888_to_abgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_argb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
-{
- pix = ((pix & 0x00FF0000) >> 14) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x000000FF) << 22);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
+#include "drm_format_internal.h"
/**
* drm_draw_color_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
@@ -106,28 +28,28 @@ u32 drm_draw_color_from_xrgb8888(u32 color, u32 format)
{
switch (format) {
case DRM_FORMAT_RGB565:
- return convert_xrgb8888_to_rgb565(color);
+ return drm_pixel_xrgb8888_to_rgb565(color);
case DRM_FORMAT_RGBA5551:
- return convert_xrgb8888_to_rgba5551(color);
+ return drm_pixel_xrgb8888_to_rgba5551(color);
case DRM_FORMAT_XRGB1555:
- return convert_xrgb8888_to_xrgb1555(color);
+ return drm_pixel_xrgb8888_to_xrgb1555(color);
case DRM_FORMAT_ARGB1555:
- return convert_xrgb8888_to_argb1555(color);
+ return drm_pixel_xrgb8888_to_argb1555(color);
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
return color;
case DRM_FORMAT_ARGB8888:
- return convert_xrgb8888_to_argb8888(color);
+ return drm_pixel_xrgb8888_to_argb8888(color);
case DRM_FORMAT_XBGR8888:
- return convert_xrgb8888_to_xbgr8888(color);
+ return drm_pixel_xrgb8888_to_xbgr8888(color);
case DRM_FORMAT_ABGR8888:
- return convert_xrgb8888_to_abgr8888(color);
+ return drm_pixel_xrgb8888_to_abgr8888(color);
case DRM_FORMAT_XRGB2101010:
- return convert_xrgb8888_to_xrgb2101010(color);
+ return drm_pixel_xrgb8888_to_xrgb2101010(color);
case DRM_FORMAT_ARGB2101010:
- return convert_xrgb8888_to_argb2101010(color);
+ return drm_pixel_xrgb8888_to_argb2101010(color);
case DRM_FORMAT_ABGR2101010:
- return convert_xrgb8888_to_abgr2101010(color);
+ return drm_pixel_xrgb8888_to_abgr2101010(color);
default:
WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
return 0;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 60e5ac179c15..56dd61f8e05a 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -40,6 +40,7 @@
#include <linux/xarray.h>
#include <drm/drm_accel.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_cache.h>
#include <drm/drm_client_event.h>
#include <drm/drm_color_mgmt.h>
@@ -500,6 +501,25 @@ void drm_dev_unplug(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_unplug);
+/**
+ * drm_dev_set_dma_dev - set the DMA device for a DRM device
+ * @dev: DRM device
+ * @dma_dev: DMA device or NULL
+ *
+ * Sets the DMA device of the given DRM device. Only required if
+ * the DMA device is different from the DRM device's parent. After
+ * calling this function, the DRM device holds a reference on
+ * @dma_dev. Pass NULL to clear the DMA device.
+ */
+void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev)
+{
+ dma_dev = get_device(dma_dev);
+
+ put_device(dev->dma_dev);
+ dev->dma_dev = dma_dev;
+}
+EXPORT_SYMBOL(drm_dev_set_dma_dev);
+
/*
* Available recovery methods for wedged device. To be sent along with device
* wedged uevent.
@@ -654,6 +674,8 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
{
drm_fs_inode_free(dev->anon_inode);
+ put_device(dev->dma_dev);
+ dev->dma_dev = NULL;
put_device(dev->dev);
/* Prevent use-after-free in drm_managed_release when debugging is
* enabled. Slightly awkward, but can't really be helped. */
@@ -808,36 +830,62 @@ void *__devm_drm_dev_alloc(struct device *parent,
EXPORT_SYMBOL(__devm_drm_dev_alloc);
/**
- * drm_dev_alloc - Allocate new DRM device
- * @driver: DRM driver to allocate device for
+ * __drm_dev_alloc - Allocation of a &drm_device instance
* @parent: Parent device object
+ * @driver: DRM driver
+ * @size: the size of the struct which contains struct drm_device
+ * @offset: the offset of the &drm_device within the container.
*
- * This is the deprecated version of devm_drm_dev_alloc(), which does not support
- * subclassing through embedding the struct &drm_device in a driver private
- * structure, and which does not support automatic cleanup through devres.
+ * This should *NOT* be by any drivers, but is a dedicated interface for the
+ * corresponding Rust abstraction.
*
- * RETURNS:
- * Pointer to new DRM device, or ERR_PTR on failure.
+ * This is the same as devm_drm_dev_alloc(), but without the corresponding
+ * resource management through the parent device, but not the same as
+ * drm_dev_alloc(), since the latter is the deprecated version, which does not
+ * support subclassing.
+ *
+ * Returns: A pointer to new DRM device, or an ERR_PTR on failure.
*/
-struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
- struct device *parent)
+void *__drm_dev_alloc(struct device *parent,
+ const struct drm_driver *driver,
+ size_t size, size_t offset)
{
- struct drm_device *dev;
+ void *container;
+ struct drm_device *drm;
int ret;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
return ERR_PTR(-ENOMEM);
- ret = drm_dev_init(dev, driver, parent);
+ drm = container + offset;
+ ret = drm_dev_init(drm, driver, parent);
if (ret) {
- kfree(dev);
+ kfree(container);
return ERR_PTR(ret);
}
+ drmm_add_final_kfree(drm, container);
- drmm_add_final_kfree(dev, dev);
+ return container;
+}
+EXPORT_SYMBOL(__drm_dev_alloc);
- return dev;
+/**
+ * drm_dev_alloc - Allocate new DRM device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
+ *
+ * This is the deprecated version of devm_drm_dev_alloc(), which does not support
+ * subclassing through embedding the struct &drm_device in a driver private
+ * structure, and which does not support automatic cleanup through devres.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or ERR_PTR on failure.
+ */
+struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
+ struct device *parent)
+{
+ return __drm_dev_alloc(parent, driver, sizeof(struct drm_device), 0);
}
EXPORT_SYMBOL(drm_dev_alloc);
@@ -1188,6 +1236,7 @@ static int __init drm_core_init(void)
}
drm_debugfs_root = debugfs_create_dir("dri", NULL);
+ drm_bridge_debugfs_params(drm_debugfs_root);
ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
if (ret < 0)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 13bc4c290b17..74e77742b2bd 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -6596,6 +6596,7 @@ static void drm_reset_display_info(struct drm_connector *connector)
info->has_hdmi_infoframe = false;
info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
+ memset(&connector->hdr_sink_metadata, 0, sizeof(connector->hdr_sink_metadata));
info->edid_hdmi_rgb444_dc_modes = 0;
info->edid_hdmi_ycbcr444_dc_modes = 0;
@@ -6760,23 +6761,23 @@ out:
}
static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *dev,
- struct displayid_detailed_timings_1 *timings,
+ const struct displayid_detailed_timings_1 *timings,
bool type_7)
{
struct drm_display_mode *mode;
- unsigned pixel_clock = (timings->pixel_clock[0] |
- (timings->pixel_clock[1] << 8) |
- (timings->pixel_clock[2] << 16)) + 1;
- unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
- unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
- unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
- unsigned hsync_width = (timings->hsw[0] | timings->hsw[1] << 8) + 1;
- unsigned vactive = (timings->vactive[0] | timings->vactive[1] << 8) + 1;
- unsigned vblank = (timings->vblank[0] | timings->vblank[1] << 8) + 1;
- unsigned vsync = (timings->vsync[0] | (timings->vsync[1] & 0x7f) << 8) + 1;
- unsigned vsync_width = (timings->vsw[0] | timings->vsw[1] << 8) + 1;
- bool hsync_positive = (timings->hsync[1] >> 7) & 0x1;
- bool vsync_positive = (timings->vsync[1] >> 7) & 0x1;
+ unsigned int pixel_clock = (timings->pixel_clock[0] |
+ (timings->pixel_clock[1] << 8) |
+ (timings->pixel_clock[2] << 16)) + 1;
+ unsigned int hactive = le16_to_cpu(timings->hactive) + 1;
+ unsigned int hblank = le16_to_cpu(timings->hblank) + 1;
+ unsigned int hsync = (le16_to_cpu(timings->hsync) & 0x7fff) + 1;
+ unsigned int hsync_width = le16_to_cpu(timings->hsw) + 1;
+ unsigned int vactive = le16_to_cpu(timings->vactive) + 1;
+ unsigned int vblank = le16_to_cpu(timings->vblank) + 1;
+ unsigned int vsync = (le16_to_cpu(timings->vsync) & 0x7fff) + 1;
+ unsigned int vsync_width = le16_to_cpu(timings->vsw) + 1;
+ bool hsync_positive = le16_to_cpu(timings->hsync) & (1 << 15);
+ bool vsync_positive = le16_to_cpu(timings->vsync) & (1 << 15);
mode = drm_mode_create(dev);
if (!mode)
@@ -6833,6 +6834,66 @@ static int add_displayid_detailed_1_modes(struct drm_connector *connector,
return num_modes;
}
+static struct drm_display_mode *drm_mode_displayid_formula(struct drm_device *dev,
+ const struct displayid_formula_timings_9 *timings,
+ bool type_10)
+{
+ struct drm_display_mode *mode;
+ u16 hactive = le16_to_cpu(timings->hactive) + 1;
+ u16 vactive = le16_to_cpu(timings->vactive) + 1;
+ u8 timing_formula = timings->flags & 0x7;
+
+ /* TODO: support RB-v2 & RB-v3 */
+ if (timing_formula > 1)
+ return NULL;
+
+ /* TODO: support video-optimized refresh rate */
+ if (timings->flags & (1 << 4))
+ drm_dbg_kms(dev, "Fractional vrefresh is not implemented, proceeding with non-video-optimized refresh rate");
+
+ mode = drm_cvt_mode(dev, hactive, vactive, timings->vrefresh + 1, timing_formula == 1, false, false);
+ if (!mode)
+ return NULL;
+
+ /* TODO: interpret S3D flags */
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ drm_mode_set_name(mode);
+
+ return mode;
+}
+
+static int add_displayid_formula_modes(struct drm_connector *connector,
+ const struct displayid_block *block)
+{
+ const struct displayid_formula_timing_block *formula_block = (struct displayid_formula_timing_block *)block;
+ int num_timings;
+ struct drm_display_mode *newmode;
+ int num_modes = 0;
+ bool type_10 = block->tag == DATA_BLOCK_2_TYPE_10_FORMULA_TIMING;
+ int timing_size = 6 + ((formula_block->base.rev & 0x70) >> 4);
+
+ /* extended blocks are not supported yet */
+ if (timing_size != 6)
+ return 0;
+
+ if (block->num_bytes % timing_size)
+ return 0;
+
+ num_timings = block->num_bytes / timing_size;
+ for (int i = 0; i < num_timings; i++) {
+ const struct displayid_formula_timings_9 *timings = &formula_block->timings[i];
+
+ newmode = drm_mode_displayid_formula(connector->dev, timings, type_10);
+ if (!newmode)
+ continue;
+
+ drm_mode_probed_add(connector, newmode);
+ num_modes++;
+ }
+ return num_modes;
+}
+
static int add_displayid_detailed_modes(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
@@ -6845,6 +6906,9 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
if (block->tag == DATA_BLOCK_TYPE_1_DETAILED_TIMING ||
block->tag == DATA_BLOCK_2_TYPE_7_DETAILED_TIMING)
num_modes += add_displayid_detailed_1_modes(connector, block);
+ else if (block->tag == DATA_BLOCK_2_TYPE_9_FORMULA_TIMING ||
+ block->tag == DATA_BLOCK_2_TYPE_10_FORMULA_TIMING)
+ num_modes += add_displayid_formula_modes(connector, block);
}
displayid_iter_end(&iter);
@@ -7099,18 +7163,12 @@ EXPORT_SYMBOL(drm_add_edid_modes);
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_modes_noedid(struct drm_connector *connector,
- int hdisplay, int vdisplay)
+ unsigned int hdisplay, unsigned int vdisplay)
{
- int i, count, num_modes = 0;
+ int i, count = ARRAY_SIZE(drm_dmt_modes), num_modes = 0;
struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
- count = ARRAY_SIZE(drm_dmt_modes);
- if (hdisplay < 0)
- hdisplay = 0;
- if (vdisplay < 0)
- vdisplay = 0;
-
for (i = 0; i < count; i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index cf2463090d3a..246cf845e2c9 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -993,6 +993,40 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
EXPORT_SYMBOL(drm_show_fdinfo);
/**
+ * drm_file_err - log process name, pid and client_name associated with a drm_file
+ * @file_priv: context of interest for process name and pid
+ * @fmt: printf() like format string
+ *
+ * Helper function for clients which needs to log process details such
+ * as name and pid etc along with user logs.
+ */
+void drm_file_err(struct drm_file *file_priv, const char *fmt, ...)
+{
+ va_list args;
+ struct va_format vaf;
+ struct pid *pid;
+ struct task_struct *task;
+ struct drm_device *dev = file_priv->minor->dev;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ mutex_lock(&file_priv->client_name_lock);
+ rcu_read_lock();
+ pid = rcu_dereference(file_priv->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+
+ drm_err(dev, "comm: %s pid: %d client: %s ... %pV", task ? task->comm : "Unset",
+ task ? task->pid : 0, file_priv->client_name ?: "Unset", &vaf);
+
+ va_end(args);
+ rcu_read_unlock();
+ mutex_unlock(&file_priv->client_name_lock);
+}
+EXPORT_SYMBOL(drm_file_err);
+
+/**
* mock_drm_getfile - Create a new struct file for the drm device
* @minor: drm minor to wrap (e.g. #drm_device.primary)
* @flags: file creation mode (O_RDWR etc)
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 01d3ab307ac3..d36e6cacc575 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -20,6 +20,8 @@
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
+#include "drm_format_internal.h"
+
/**
* drm_format_conv_state_init - Initialize format-conversion state
* @state: The state to initialize
@@ -244,6 +246,152 @@ static int drm_fb_xfrm(struct iosys_map *dst,
xfrm_line);
}
+#define ALIGN_DOWN_PIXELS(end, n, a) \
+ ((end) - ((n) & ((a) - 1)))
+
+static __always_inline void drm_fb_xfrm_line_32to8(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ u8 *dbuf8;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ /* write 4 pixels at once */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 pix[4] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ le32_to_cpup(sbuf32 + 2),
+ le32_to_cpup(sbuf32 + 3),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u32 val32 = xfrm_pixel(pix[0]) |
+ (xfrm_pixel(pix[1]) << 8) |
+ (xfrm_pixel(pix[2]) << 16) |
+ (xfrm_pixel(pix[3]) << 24);
+ *dbuf32++ = cpu_to_le32(val32);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+
+ /* write trailing pixels */
+ dbuf8 = (u8 __force *)dbuf32;
+ while (sbuf32 < send32)
+ *dbuf8++ = xfrm_pixel(le32_to_cpup(sbuf32++));
+}
+
+static __always_inline void drm_fb_xfrm_line_32to16(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le64 *dbuf64 = dbuf;
+ __le32 *dbuf32;
+ __le16 *dbuf16;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+#if defined(CONFIG_64BIT)
+ /* write 4 pixels at once */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 pix[4] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ le32_to_cpup(sbuf32 + 2),
+ le32_to_cpup(sbuf32 + 3),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u64 val64 = ((u64)xfrm_pixel(pix[0])) |
+ ((u64)xfrm_pixel(pix[1]) << 16) |
+ ((u64)xfrm_pixel(pix[2]) << 32) |
+ ((u64)xfrm_pixel(pix[3]) << 48);
+ *dbuf64++ = cpu_to_le64(val64);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+#endif
+
+ /* write 2 pixels at once */
+ dbuf32 = (__le32 __force *)dbuf64;
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 2)) {
+ u32 pix[2] = {
+ le32_to_cpup(sbuf32),
+ le32_to_cpup(sbuf32 + 1),
+ };
+ /* write output bytes in reverse order for little endianness */
+ u32 val32 = xfrm_pixel(pix[0]) |
+ (xfrm_pixel(pix[1]) << 16);
+ *dbuf32++ = cpu_to_le32(val32);
+ sbuf32 += ARRAY_SIZE(pix);
+ }
+
+ /* write trailing pixel */
+ dbuf16 = (__le16 __force *)dbuf32;
+ while (sbuf32 < send32)
+ *dbuf16++ = cpu_to_le16(xfrm_pixel(le32_to_cpup(sbuf32++)));
+}
+
+static __always_inline void drm_fb_xfrm_line_32to24(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ u8 *dbuf8;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ /* write pixels in chunks of 4 */
+ while (sbuf32 < ALIGN_DOWN_PIXELS(send32, pixels, 4)) {
+ u32 val24[4] = {
+ xfrm_pixel(le32_to_cpup(sbuf32)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 1)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 2)),
+ xfrm_pixel(le32_to_cpup(sbuf32 + 3)),
+ };
+ u32 out32[3] = {
+ /* write output bytes in reverse order for little endianness */
+ ((val24[0] & 0x000000ff)) |
+ ((val24[0] & 0x0000ff00)) |
+ ((val24[0] & 0x00ff0000)) |
+ ((val24[1] & 0x000000ff) << 24),
+ ((val24[1] & 0x0000ff00) >> 8) |
+ ((val24[1] & 0x00ff0000) >> 8) |
+ ((val24[2] & 0x000000ff) << 16) |
+ ((val24[2] & 0x0000ff00) << 16),
+ ((val24[2] & 0x00ff0000) >> 16) |
+ ((val24[3] & 0x000000ff) << 8) |
+ ((val24[3] & 0x0000ff00) << 8) |
+ ((val24[3] & 0x00ff0000) << 8),
+ };
+
+ *dbuf32++ = cpu_to_le32(out32[0]);
+ *dbuf32++ = cpu_to_le32(out32[1]);
+ *dbuf32++ = cpu_to_le32(out32[2]);
+ sbuf32 += ARRAY_SIZE(val24);
+ }
+
+ /* write trailing pixel */
+ dbuf8 = (u8 __force *)dbuf32;
+ while (sbuf32 < send32) {
+ u32 val24 = xfrm_pixel(le32_to_cpup(sbuf32++));
+ /* write output in reverse order for little endianness */
+ *dbuf8++ = (val24 & 0x000000ff);
+ *dbuf8++ = (val24 & 0x0000ff00) >> 8;
+ *dbuf8++ = (val24 & 0x00ff0000) >> 16;
+ }
+}
+
+static __always_inline void drm_fb_xfrm_line_32to32(void *dbuf, const void *sbuf,
+ unsigned int pixels,
+ u32 (*xfrm_pixel)(u32))
+{
+ __le32 *dbuf32 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ const __le32 *send32 = sbuf32 + pixels;
+
+ while (sbuf32 < send32)
+ *dbuf32++ = cpu_to_le32(xfrm_pixel(le32_to_cpup(sbuf32++)));
+}
+
/**
* drm_fb_memcpy - Copy clip buffer
* @dst: Array of destination buffers
@@ -368,17 +516,7 @@ EXPORT_SYMBOL(drm_fb_swab);
static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- dbuf8[x] = ((pix & 0x00e00000) >> 16) |
- ((pix & 0x0000e000) >> 11) |
- ((pix & 0x000000c0) >> 6);
- }
+ drm_fb_xfrm_line_32to8(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb332);
}
/**
@@ -417,38 +555,19 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb565);
+}
+
+static __always_inline u32 drm_xrgb8888_to_rgb565_swab(u32 pix)
+{
+ return swab16(drm_pixel_xrgb8888_to_rgb565(pix));
}
/* TODO: implement this helper as conversion to RGB565|BIG_ENDIAN */
static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
- dbuf16[x] = cpu_to_le16(swab16(val16));
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_xrgb8888_to_rgb565_swab);
}
/**
@@ -495,19 +614,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xrgb1555);
}
/**
@@ -547,20 +654,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb1555);
static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = BIT(15) | /* set alpha bit */
- ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb1555);
}
/**
@@ -600,20 +694,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb1555);
static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le16 *dbuf16 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u16 val16;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val16 = ((pix & 0x00f80000) >> 8) |
- ((pix & 0x0000f800) >> 5) |
- ((pix & 0x000000f8) >> 2) |
- BIT(0); /* set alpha bit */
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgba5551);
}
/**
@@ -653,18 +734,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgba5551);
static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- /* write blue-green-red to output in little endianness */
- *dbuf8++ = (pix & 0x000000FF) >> 0;
- *dbuf8++ = (pix & 0x0000FF00) >> 8;
- *dbuf8++ = (pix & 0x00FF0000) >> 16;
- }
+ drm_fb_xfrm_line_32to24(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb888);
}
/**
@@ -704,18 +774,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
static void drm_fb_xrgb8888_to_bgr888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- /* write red-green-blue to output in little endianness */
- *dbuf8++ = (pix & 0x00ff0000) >> 16;
- *dbuf8++ = (pix & 0x0000ff00) >> 8;
- *dbuf8++ = (pix & 0x000000ff) >> 0;
- }
+ drm_fb_xfrm_line_32to24(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_bgr888);
}
/**
@@ -755,16 +814,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgr888);
static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix |= GENMASK(31, 24); /* fill alpha bits */
- dbuf32[x] = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb8888);
}
/**
@@ -804,19 +854,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888);
static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix = ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- GENMASK(31, 24); /* fill alpha bits */
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_abgr8888);
}
static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
@@ -835,19 +873,7 @@ static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned in
static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- pix = ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- ((pix & 0xff000000) >> 24) << 24;
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xbgr8888);
}
static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
@@ -866,20 +892,7 @@ static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned in
static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val32 = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- pix = val32 | ((val32 >> 8) & 0x00300C03);
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xrgb2101010);
}
/**
@@ -920,21 +933,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- __le32 *dbuf32 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u32 pix;
-
- for (x = 0; x < pixels; x++) {
- pix = le32_to_cpu(sbuf32[x]);
- val32 = ((pix & 0x000000ff) << 2) |
- ((pix & 0x0000ff00) << 4) |
- ((pix & 0x00ff0000) << 6);
- pix = GENMASK(31, 30) | /* set alpha bits */
- val32 | ((val32 >> 8) & 0x00300c03);
- *dbuf32++ = cpu_to_le32(pix);
- }
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb2101010);
}
/**
@@ -975,19 +974,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb2101010);
static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u8 *dbuf8 = dbuf;
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
-
- for (x = 0; x < pixels; x++) {
- u32 pix = le32_to_cpu(sbuf32[x]);
- u8 r = (pix & 0x00ff0000) >> 16;
- u8 g = (pix & 0x0000ff00) >> 8;
- u8 b = pix & 0x000000ff;
-
- /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
- *dbuf8++ = (3 * r + 6 * g + b) / 10;
- }
+ drm_fb_xfrm_line_32to8(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_r8_bt601);
}
/**
@@ -1031,36 +1018,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
static void drm_fb_argb8888_to_argb4444_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- unsigned int pixels2 = pixels & ~GENMASK_ULL(0, 0);
- __le32 *dbuf32 = dbuf;
- __le16 *dbuf16 = dbuf + pixels2 * sizeof(*dbuf16);
- const __le32 *sbuf32 = sbuf;
- unsigned int x;
- u32 val32;
- u16 val16;
- u32 pix[2];
-
- for (x = 0; x < pixels2; x += 2, ++dbuf32) {
- pix[0] = le32_to_cpu(sbuf32[x]);
- pix[1] = le32_to_cpu(sbuf32[x + 1]);
- val32 = ((pix[0] & 0xf0000000) >> 16) |
- ((pix[0] & 0x00f00000) >> 12) |
- ((pix[0] & 0x0000f000) >> 8) |
- ((pix[0] & 0x000000f0) >> 4) |
- ((pix[1] & 0xf0000000) >> 0) |
- ((pix[1] & 0x00f00000) << 4) |
- ((pix[1] & 0x0000f000) << 8) |
- ((pix[1] & 0x000000f0) << 12);
- *dbuf32 = cpu_to_le32(val32);
- }
- for (; x < pixels; x++) {
- pix[0] = le32_to_cpu(sbuf32[x]);
- val16 = ((pix[0] & 0xf0000000) >> 16) |
- ((pix[0] & 0x00f00000) >> 12) |
- ((pix[0] & 0x0000f000) >> 8) |
- ((pix[0] & 0x000000f0) >> 4);
- dbuf16[x] = cpu_to_le16(val16);
- }
+ drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_argb8888_to_argb4444);
}
/**
diff --git a/drivers/gpu/drm/drm_format_internal.h b/drivers/gpu/drm/drm_format_internal.h
new file mode 100644
index 000000000000..9f857bfa368d
--- /dev/null
+++ b/drivers/gpu/drm/drm_format_internal.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+
+#ifndef DRM_FORMAT_INTERNAL_H
+#define DRM_FORMAT_INTERNAL_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/*
+ * Each pixel-format conversion helper takes a raw pixel in a
+ * specific input format and returns a raw pixel in a specific
+ * output format. All pixels are in little-endian byte order.
+ *
+ * Function names are
+ *
+ * drm_pixel_<input>_to_<output>_<algorithm>()
+ *
+ * where <input> and <output> refer to pixel formats. The
+ * <algorithm> is optional and hints to the method used for the
+ * conversion. Helpers with no algorithm given apply pixel-bit
+ * shifting.
+ *
+ * The argument type is u32. We expect this to be wide enough to
+ * hold all conversion input from 32-bit RGB to any output format.
+ * The Linux kernel should avoid format conversion for anything
+ * but XRGB8888 input data. Converting from other format can still
+ * be acceptable in some cases.
+ *
+ * The return type is u32. It is wide enough to hold all conversion
+ * output from XRGB8888. For output formats wider than 32 bit, a
+ * return type of u64 would be acceptable.
+ */
+
+/*
+ * Conversions from XRGB8888
+ */
+
+static inline u32 drm_pixel_xrgb8888_to_r8_bt601(u32 pix)
+{
+ u32 r = (pix & 0x00ff0000) >> 16;
+ u32 g = (pix & 0x0000ff00) >> 8;
+ u32 b = pix & 0x000000ff;
+
+ /* ITU-R BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
+ return (3 * r + 6 * g + b) / 10;
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb332(u32 pix)
+{
+ return ((pix & 0x00e00000) >> 16) |
+ ((pix & 0x0000e000) >> 11) |
+ ((pix & 0x000000c0) >> 6);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb565(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000fc00) >> 5) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgbx5551(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000f800) >> 5) |
+ ((pix & 0x000000f8) >> 2);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgba5551(u32 pix)
+{
+ return drm_pixel_xrgb8888_to_rgbx5551(pix) |
+ BIT(0); /* set alpha bit */
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xrgb1555(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb1555(u32 pix)
+{
+ return BIT(15) | /* set alpha bit */
+ drm_pixel_xrgb8888_to_xrgb1555(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_rgb888(u32 pix)
+{
+ return pix & GENMASK(23, 0);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_bgr888(u32 pix)
+{
+ return ((pix & 0x00ff0000) >> 16) |
+ ((pix & 0x0000ff00)) |
+ ((pix & 0x000000ff) << 16);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb8888(u32 pix)
+{
+ return GENMASK(31, 24) | /* fill alpha bits */
+ pix;
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xbgr8888(u32 pix)
+{
+ return ((pix & 0xff000000)) | /* also copy filler bits */
+ ((pix & 0x00ff0000) >> 16) |
+ ((pix & 0x0000ff00)) |
+ ((pix & 0x000000ff) << 16);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_abgr8888(u32 pix)
+{
+ return GENMASK(31, 24) | /* fill alpha bits */
+ drm_pixel_xrgb8888_to_xbgr8888(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xrgb2101010(u32 pix)
+{
+ pix = ((pix & 0x000000ff) << 2) |
+ ((pix & 0x0000ff00) << 4) |
+ ((pix & 0x00ff0000) << 6);
+ return pix | ((pix >> 8) & 0x00300c03);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_argb2101010(u32 pix)
+{
+ return GENMASK(31, 30) | /* set alpha bits */
+ drm_pixel_xrgb8888_to_xrgb2101010(pix);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_xbgr2101010(u32 pix)
+{
+ pix = ((pix & 0x00ff0000) >> 14) |
+ ((pix & 0x0000ff00) << 4) |
+ ((pix & 0x000000ff) << 22);
+ return pix | ((pix >> 8) & 0x00300c03);
+}
+
+static inline u32 drm_pixel_xrgb8888_to_abgr2101010(u32 pix)
+{
+ return GENMASK(31, 30) | /* set alpha bits */
+ drm_pixel_xrgb8888_to_xbgr2101010(pix);
+}
+
+/*
+ * Conversion from ARGB8888
+ */
+
+static inline u32 drm_pixel_argb8888_to_argb4444(u32 pix)
+{
+ return ((pix & 0xf0000000) >> 16) |
+ ((pix & 0x00f00000) >> 12) |
+ ((pix & 0x0000f000) >> 8) |
+ ((pix & 0x000000f0) >> 4);
+}
+
+#endif
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index c6240bab3fa5..1e659d2660f7 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1216,7 +1216,7 @@ void drm_gem_unpin(struct drm_gem_object *obj)
dma_resv_unlock(obj->resv);
}
-int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
@@ -1233,9 +1233,9 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
return 0;
}
-EXPORT_SYMBOL(drm_gem_vmap);
+EXPORT_SYMBOL(drm_gem_vmap_locked);
-void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_assert_held(obj->resv);
@@ -1248,7 +1248,7 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
/* Always set the mapping to NULL. Callers may rely on this. */
iosys_map_clear(map);
}
-EXPORT_SYMBOL(drm_gem_vunmap);
+EXPORT_SYMBOL(drm_gem_vunmap_locked);
void drm_gem_lock(struct drm_gem_object *obj)
{
@@ -1262,25 +1262,25 @@ void drm_gem_unlock(struct drm_gem_object *obj)
}
EXPORT_SYMBOL(drm_gem_unlock);
-int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
+int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
dma_resv_lock(obj->resv, NULL);
- ret = drm_gem_vmap(obj, map);
+ ret = drm_gem_vmap_locked(obj, map);
dma_resv_unlock(obj->resv);
return ret;
}
-EXPORT_SYMBOL(drm_gem_vmap_unlocked);
+EXPORT_SYMBOL(drm_gem_vmap);
-void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
+void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_lock(obj->resv, NULL);
- drm_gem_vunmap(obj, map);
+ drm_gem_vunmap_locked(obj, map);
dma_resv_unlock(obj->resv);
}
-EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
+EXPORT_SYMBOL(drm_gem_vunmap);
/**
* drm_gem_lock_reservations - Sets up the ww context and acquires
@@ -1543,10 +1543,10 @@ tail:
EXPORT_SYMBOL(drm_gem_lru_scan);
/**
- * drm_gem_evict - helper to evict backing pages for a GEM object
+ * drm_gem_evict_locked - helper to evict backing pages for a GEM object
* @obj: obj in question
*/
-int drm_gem_evict(struct drm_gem_object *obj)
+int drm_gem_evict_locked(struct drm_gem_object *obj)
{
dma_resv_assert_held(obj->resv);
@@ -1558,4 +1558,4 @@ int drm_gem_evict(struct drm_gem_object *obj)
return 0;
}
-EXPORT_SYMBOL(drm_gem_evict);
+EXPORT_SYMBOL(drm_gem_evict_locked);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 0fbeb686e561..6f72e7a0f427 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -362,7 +362,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
ret = -EINVAL;
goto err_drm_gem_vunmap;
}
- ret = drm_gem_vmap_unlocked(obj, &map[i]);
+ ret = drm_gem_vmap(obj, &map[i]);
if (ret)
goto err_drm_gem_vunmap;
}
@@ -384,7 +384,7 @@ err_drm_gem_vunmap:
obj = drm_gem_fb_get_obj(fb, i);
if (!obj)
continue;
- drm_gem_vunmap_unlocked(obj, &map[i]);
+ drm_gem_vunmap(obj, &map[i]);
}
return ret;
}
@@ -411,7 +411,7 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map)
continue;
if (iosys_map_is_null(&map[i]))
continue;
- drm_gem_vunmap_unlocked(obj, &map[i]);
+ drm_gem_vunmap(obj, &map[i]);
}
}
EXPORT_SYMBOL(drm_gem_fb_vunmap);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index d99dee67353a..aa43265f4f4f 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -165,7 +165,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
} else {
dma_resv_lock(shmem->base.resv, NULL);
- drm_WARN_ON(obj->dev, shmem->vmap_use_count);
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
if (shmem->sgt) {
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
@@ -174,9 +174,10 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
kfree(shmem->sgt);
}
if (shmem->pages)
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
- drm_WARN_ON(obj->dev, shmem->pages_use_count);
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
dma_resv_unlock(shmem->base.resv);
}
@@ -186,21 +187,20 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
-static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct page **pages;
dma_resv_assert_held(shmem->base.resv);
- if (shmem->pages_use_count++ > 0)
+ if (refcount_inc_not_zero(&shmem->pages_use_count))
return 0;
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
PTR_ERR(pages));
- shmem->pages_use_count = 0;
return PTR_ERR(pages);
}
@@ -216,38 +216,36 @@ static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
shmem->pages = pages;
+ refcount_set(&shmem->pages_use_count, 1);
+
return 0;
}
/*
- * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
+ * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object
* @shmem: shmem GEM object
*
* This function decreases the use count and puts the backing pages when use drops to zero.
*/
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
dma_resv_assert_held(shmem->base.resv);
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- return;
-
- if (--shmem->pages_use_count > 0)
- return;
-
+ if (refcount_dec_and_test(&shmem->pages_use_count)) {
#ifdef CONFIG_X86
- if (shmem->map_wc)
- set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
+ if (shmem->map_wc)
+ set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
#endif
- drm_gem_put_pages(obj, shmem->pages,
- shmem->pages_mark_dirty_on_put,
- shmem->pages_mark_accessed_on_put);
- shmem->pages = NULL;
+ drm_gem_put_pages(obj, shmem->pages,
+ shmem->pages_mark_dirty_on_put,
+ shmem->pages_mark_accessed_on_put);
+ shmem->pages = NULL;
+ }
}
-EXPORT_SYMBOL(drm_gem_shmem_put_pages);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
{
@@ -257,7 +255,12 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base));
- ret = drm_gem_shmem_get_pages(shmem);
+ if (refcount_inc_not_zero(&shmem->pages_pin_count))
+ return 0;
+
+ ret = drm_gem_shmem_get_pages_locked(shmem);
+ if (!ret)
+ refcount_set(&shmem->pages_pin_count, 1);
return ret;
}
@@ -267,7 +270,8 @@ void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
{
dma_resv_assert_held(shmem->base.resv);
- drm_gem_shmem_put_pages(shmem);
+ if (refcount_dec_and_test(&shmem->pages_pin_count))
+ drm_gem_shmem_put_pages_locked(shmem);
}
EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
@@ -288,6 +292,9 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
+ if (refcount_inc_not_zero(&shmem->pages_pin_count))
+ return 0;
+
ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
if (ret)
return ret;
@@ -296,7 +303,7 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
return ret;
}
-EXPORT_SYMBOL(drm_gem_shmem_pin);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_pin);
/**
* drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
@@ -311,14 +318,17 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
+ if (refcount_dec_not_one(&shmem->pages_pin_count))
+ return;
+
dma_resv_lock(shmem->base.resv, NULL);
drm_gem_shmem_unpin_locked(shmem);
dma_resv_unlock(shmem->base.resv);
}
-EXPORT_SYMBOL(drm_gem_shmem_unpin);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin);
/*
- * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing
* store.
@@ -327,47 +337,43 @@ EXPORT_SYMBOL(drm_gem_shmem_unpin);
* exists for the buffer backing the shmem GEM object. It hides the differences
* between dma-buf imported and natively allocated objects.
*
- * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
+ * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked().
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
+int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
if (drm_gem_is_imported(obj)) {
ret = dma_buf_vmap(obj->dma_buf, map);
- if (!ret) {
- if (drm_WARN_ON(obj->dev, map->is_iomem)) {
- dma_buf_vunmap(obj->dma_buf, map);
- return -EIO;
- }
- }
} else {
pgprot_t prot = PAGE_KERNEL;
dma_resv_assert_held(shmem->base.resv);
- if (shmem->vmap_use_count++ > 0) {
+ if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
iosys_map_set_vaddr(map, shmem->vaddr);
return 0;
}
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_pin_locked(shmem);
if (ret)
- goto err_zero_use;
+ return ret;
if (shmem->map_wc)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
- if (!shmem->vaddr)
+ if (!shmem->vaddr) {
ret = -ENOMEM;
- else
+ } else {
iosys_map_set_vaddr(map, shmem->vaddr);
+ refcount_set(&shmem->vmap_use_count, 1);
+ }
}
if (ret) {
@@ -379,28 +385,26 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
err_put_pages:
if (!drm_gem_is_imported(obj))
- drm_gem_shmem_put_pages(shmem);
-err_zero_use:
- shmem->vmap_use_count = 0;
+ drm_gem_shmem_unpin_locked(shmem);
return ret;
}
-EXPORT_SYMBOL(drm_gem_shmem_vmap);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked);
/*
- * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
+ * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
* @map: Kernel virtual address where the SHMEM GEM object was mapped
*
* This function cleans up a kernel virtual address mapping acquired by
- * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
- * zero.
+ * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count
+ * drops to zero.
*
* This function hides the differences between dma-buf imported and natively
* allocated objects.
*/
-void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map)
+void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map)
{
struct drm_gem_object *obj = &shmem->base;
@@ -409,19 +413,15 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
} else {
dma_resv_assert_held(shmem->base.resv);
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
- return;
-
- if (--shmem->vmap_use_count > 0)
- return;
+ if (refcount_dec_and_test(&shmem->vmap_use_count)) {
+ vunmap(shmem->vaddr);
+ shmem->vaddr = NULL;
- vunmap(shmem->vaddr);
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_unpin_locked(shmem);
+ }
}
-
- shmem->vaddr = NULL;
}
-EXPORT_SYMBOL(drm_gem_shmem_vunmap);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked);
static int
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
@@ -449,7 +449,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
/* Update madvise status, returns true if not purged, else
* false or -errno.
*/
-int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
+int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv)
{
dma_resv_assert_held(shmem->base.resv);
@@ -460,9 +460,9 @@ int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
return (madv >= 0);
}
-EXPORT_SYMBOL(drm_gem_shmem_madvise);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked);
-void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev;
@@ -476,7 +476,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
kfree(shmem->sgt);
shmem->sgt = NULL;
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
shmem->madv = -1;
@@ -492,7 +492,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
}
-EXPORT_SYMBOL(drm_gem_shmem_purge);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
/**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
@@ -575,8 +575,8 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
* mmap'd, vm_open() just grabs an additional reference for the new
* mm the vma is getting copied into (ie. on fork()).
*/
- if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
- shmem->pages_use_count++;
+ drm_WARN_ON_ONCE(obj->dev,
+ !refcount_inc_not_zero(&shmem->pages_use_count));
dma_resv_unlock(shmem->base.resv);
@@ -589,7 +589,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
dma_resv_lock(shmem->base.resv, NULL);
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
dma_resv_unlock(shmem->base.resv);
drm_gem_vm_close(vma);
@@ -639,7 +639,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
return -EINVAL;
dma_resv_lock(shmem->base.resv, NULL);
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_get_pages_locked(shmem);
dma_resv_unlock(shmem->base.resv);
if (ret)
@@ -666,11 +666,12 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
if (drm_gem_is_imported(&shmem->base))
return;
- drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
- drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+ drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
+ drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
+ drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
}
-EXPORT_SYMBOL(drm_gem_shmem_print_info);
+EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
/**
* drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
@@ -707,7 +708,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
drm_WARN_ON(obj->dev, drm_gem_is_imported(obj));
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_get_pages_locked(shmem);
if (ret)
return ERR_PTR(ret);
@@ -729,7 +730,7 @@ err_free_sgt:
sg_free_table(sgt);
kfree(sgt);
err_put_pages:
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index de424e670995..735bfdf4322f 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -1118,6 +1118,10 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
lockdep_assert_held(&gpusvm->notifier_lock);
if (range->flags.has_dma_mapping) {
+ struct drm_gpusvm_range_flags flags = {
+ .__flags = range->flags.__flags,
+ };
+
for (i = 0, j = 0; i < npages; j++) {
struct drm_pagemap_device_addr *addr = &range->dma_addr[j];
@@ -1131,8 +1135,12 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
dev, *addr);
i += 1 << addr->order;
}
- range->flags.has_devmem_pages = false;
- range->flags.has_dma_mapping = false;
+
+ /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
+ flags.has_devmem_pages = false;
+ flags.has_dma_mapping = false;
+ WRITE_ONCE(range->flags.__flags, flags.__flags);
+
range->dpagemap = NULL;
}
}
@@ -1330,10 +1338,10 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
unsigned long num_dma_mapped;
unsigned int order = 0;
unsigned long *pfns;
- struct page **pages;
int err = 0;
struct dev_pagemap *pagemap;
struct drm_pagemap *dpagemap;
+ struct drm_gpusvm_range_flags flags;
retry:
hmm_range.notifier_seq = mmu_interval_read_begin(notifier);
@@ -1369,7 +1377,6 @@ retry:
if (err)
goto err_free;
- pages = (struct page **)pfns;
map_pages:
/*
* Perform all dma mappings under the notifier lock to not
@@ -1378,7 +1385,8 @@ map_pages:
*/
drm_gpusvm_notifier_lock(gpusvm);
- if (range->flags.unmapped) {
+ flags.__flags = range->flags.__flags;
+ if (flags.unmapped) {
drm_gpusvm_notifier_unlock(gpusvm);
err = -EFAULT;
goto err_free;
@@ -1444,8 +1452,6 @@ map_pages:
err = -EFAULT;
goto err_unmap;
}
-
- pages[i] = page;
} else {
dma_addr_t addr;
@@ -1454,6 +1460,11 @@ map_pages:
goto err_unmap;
}
+ if (ctx->devmem_only) {
+ err = -EFAULT;
+ goto err_unmap;
+ }
+
addr = dma_map_page(gpusvm->drm->dev,
page, 0,
PAGE_SIZE << order,
@@ -1469,14 +1480,17 @@ map_pages:
}
i += 1 << order;
num_dma_mapped = i;
- range->flags.has_dma_mapping = true;
+ flags.has_dma_mapping = true;
}
if (zdd) {
- range->flags.has_devmem_pages = true;
+ flags.has_devmem_pages = true;
range->dpagemap = dpagemap;
}
+ /* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
+ WRITE_ONCE(range->flags.__flags, flags.__flags);
+
drm_gpusvm_notifier_unlock(gpusvm);
kvfree(pfns);
set_seqno:
@@ -1765,6 +1779,8 @@ int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm,
goto err_finalize;
/* Upon success bind devmem allocation to range and zdd */
+ devmem_allocation->timeslice_expiration = get_jiffies_64() +
+ msecs_to_jiffies(ctx->timeslice_ms);
zdd->devmem_allocation = devmem_allocation; /* Owns ref */
err_finalize:
@@ -1985,6 +2001,13 @@ static int __drm_gpusvm_migrate_to_ram(struct vm_area_struct *vas,
void *buf;
int i, err = 0;
+ if (page) {
+ zdd = page->zone_device_data;
+ if (time_before64(get_jiffies_64(),
+ zdd->devmem_allocation->timeslice_expiration))
+ return 0;
+ }
+
start = ALIGN_DOWN(fault_addr, size);
end = ALIGN(fault_addr + 1, size);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index b2b6a8e49dda..e44f28fd81d3 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -179,8 +179,8 @@ int drm_gem_pin_locked(struct drm_gem_object *obj);
void drm_gem_unpin_locked(struct drm_gem_object *obj);
int drm_gem_pin(struct drm_gem_object *obj);
void drm_gem_unpin(struct drm_gem_object *obj);
-int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
-void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
+int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
+void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
/* drm_debugfs.c drm_debugfs_crc.c */
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index dfa595556320..e5184a0c2465 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -36,6 +36,8 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_print.h>
+#include <linux/media-bus-format.h>
+
#include <video/mipi_display.h>
/**
@@ -871,6 +873,41 @@ ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
EXPORT_SYMBOL(mipi_dsi_generic_read);
/**
+ * drm_mipi_dsi_get_input_bus_fmt() - Get the required MEDIA_BUS_FMT_* based
+ * input pixel format for a given DSI output
+ * pixel format
+ * @dsi_format: pixel format that a DSI host needs to output
+ *
+ * Various DSI hosts can use this function during their
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts operation to ascertain
+ * the MEDIA_BUS_FMT_* pixel format required as input.
+ *
+ * RETURNS:
+ * a 32-bit MEDIA_BUS_FMT_* value on success or 0 in case of failure.
+ */
+u32 drm_mipi_dsi_get_input_bus_fmt(enum mipi_dsi_pixel_format dsi_format)
+{
+ switch (dsi_format) {
+ case MIPI_DSI_FMT_RGB888:
+ return MEDIA_BUS_FMT_RGB888_1X24;
+
+ case MIPI_DSI_FMT_RGB666:
+ return MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return MEDIA_BUS_FMT_RGB666_1X18;
+
+ case MIPI_DSI_FMT_RGB565:
+ return MEDIA_BUS_FMT_RGB565_1X16;
+
+ default:
+ /* Unsupported DSI Format */
+ return 0;
+ }
+}
+EXPORT_SYMBOL(drm_mipi_dsi_get_input_bus_fmt);
+
+/**
* mipi_dsi_dcs_write_buffer() - transmit a DCS command with payload
* @dsi: DSI peripheral device
* @data: buffer containing data to be transmitted
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 8642a2fb25a9..b4239fd04e9d 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -383,6 +383,13 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
prop = drm_property_create(dev,
DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
+ "IN_FORMATS_ASYNC", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.async_modifiers_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
"SIZE_HINTS", 0);
if (!prop)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index c627e42a7ce7..650de4da0853 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -74,8 +74,9 @@ EXPORT_SYMBOL(drm_panel_init);
* drm_panel_add - add a panel to the global registry
* @panel: panel to add
*
- * Add a panel to the global registry so that it can be looked up by display
- * drivers.
+ * Add a panel to the global registry so that it can be looked
+ * up by display drivers. The panel to be added must have been
+ * allocated by devm_drm_panel_alloc().
*/
void drm_panel_add(struct drm_panel *panel)
{
@@ -105,21 +106,21 @@ EXPORT_SYMBOL(drm_panel_remove);
*
* Calling this function will enable power and deassert any reset signals to
* the panel. After this has completed it is possible to communicate with any
- * integrated circuitry via a command bus.
- *
- * Return: 0 on success or a negative error code on failure.
+ * integrated circuitry via a command bus. This function cannot fail (as it is
+ * called from the pre_enable call chain). There will always be a call to
+ * drm_panel_disable() afterwards.
*/
-int drm_panel_prepare(struct drm_panel *panel)
+void drm_panel_prepare(struct drm_panel *panel)
{
struct drm_panel_follower *follower;
int ret;
if (!panel)
- return -EINVAL;
+ return;
if (panel->prepared) {
dev_warn(panel->dev, "Skipping prepare of already prepared panel\n");
- return 0;
+ return;
}
mutex_lock(&panel->follower_lock);
@@ -138,11 +139,8 @@ int drm_panel_prepare(struct drm_panel *panel)
follower->funcs->panel_prepared, ret);
}
- ret = 0;
exit:
mutex_unlock(&panel->follower_lock);
-
- return ret;
}
EXPORT_SYMBOL(drm_panel_prepare);
@@ -154,16 +152,14 @@ EXPORT_SYMBOL(drm_panel_prepare);
* reset, turn off power supplies, ...). After this function has completed, it
* is usually no longer possible to communicate with the panel until another
* call to drm_panel_prepare().
- *
- * Return: 0 on success or a negative error code on failure.
*/
-int drm_panel_unprepare(struct drm_panel *panel)
+void drm_panel_unprepare(struct drm_panel *panel)
{
struct drm_panel_follower *follower;
int ret;
if (!panel)
- return -EINVAL;
+ return;
/*
* If you are seeing the warning below it likely means one of two things:
@@ -176,7 +172,7 @@ int drm_panel_unprepare(struct drm_panel *panel)
*/
if (!panel->prepared) {
dev_warn(panel->dev, "Skipping unprepare of already unprepared panel\n");
- return 0;
+ return;
}
mutex_lock(&panel->follower_lock);
@@ -195,11 +191,8 @@ int drm_panel_unprepare(struct drm_panel *panel)
}
panel->prepared = false;
- ret = 0;
exit:
mutex_unlock(&panel->follower_lock);
-
- return ret;
}
EXPORT_SYMBOL(drm_panel_unprepare);
@@ -209,26 +202,26 @@ EXPORT_SYMBOL(drm_panel_unprepare);
*
* Calling this function will cause the panel display drivers to be turned on
* and the backlight to be enabled. Content will be visible on screen after
- * this call completes.
- *
- * Return: 0 on success or a negative error code on failure.
+ * this call completes. This function cannot fail (as it is called from the
+ * enable call chain). There will always be a call to drm_panel_disable()
+ * afterwards.
*/
-int drm_panel_enable(struct drm_panel *panel)
+void drm_panel_enable(struct drm_panel *panel)
{
int ret;
if (!panel)
- return -EINVAL;
+ return;
if (panel->enabled) {
dev_warn(panel->dev, "Skipping enable of already enabled panel\n");
- return 0;
+ return;
}
if (panel->funcs && panel->funcs->enable) {
ret = panel->funcs->enable(panel);
if (ret < 0)
- return ret;
+ return;
}
panel->enabled = true;
@@ -236,8 +229,6 @@ int drm_panel_enable(struct drm_panel *panel)
if (ret < 0)
DRM_DEV_INFO(panel->dev, "failed to enable backlight: %d\n",
ret);
-
- return 0;
}
EXPORT_SYMBOL(drm_panel_enable);
@@ -248,15 +239,13 @@ EXPORT_SYMBOL(drm_panel_enable);
* This will typically turn off the panel's backlight or disable the display
* drivers. For smart panels it should still be possible to communicate with
* the integrated circuitry via any command bus after this call.
- *
- * Return: 0 on success or a negative error code on failure.
*/
-int drm_panel_disable(struct drm_panel *panel)
+void drm_panel_disable(struct drm_panel *panel)
{
int ret;
if (!panel)
- return -EINVAL;
+ return;
/*
* If you are seeing the warning below it likely means one of two things:
@@ -269,7 +258,7 @@ int drm_panel_disable(struct drm_panel *panel)
*/
if (!panel->enabled) {
dev_warn(panel->dev, "Skipping disable of already disabled panel\n");
- return 0;
+ return;
}
ret = backlight_disable(panel->backlight);
@@ -280,11 +269,9 @@ int drm_panel_disable(struct drm_panel *panel)
if (panel->funcs && panel->funcs->disable) {
ret = panel->funcs->disable(panel);
if (ret < 0)
- return ret;
+ return;
}
panel->enabled = false;
-
- return 0;
}
EXPORT_SYMBOL(drm_panel_disable);
@@ -317,6 +304,93 @@ int drm_panel_get_modes(struct drm_panel *panel,
}
EXPORT_SYMBOL(drm_panel_get_modes);
+static void __drm_panel_free(struct kref *kref)
+{
+ struct drm_panel *panel = container_of(kref, struct drm_panel, refcount);
+
+ kfree(panel->container);
+}
+
+/**
+ * drm_panel_get - Acquire a panel reference
+ * @panel: DRM panel
+ *
+ * This function increments the panel's refcount.
+ * Returns:
+ * Pointer to @panel
+ */
+struct drm_panel *drm_panel_get(struct drm_panel *panel)
+{
+ if (!panel)
+ return panel;
+
+ kref_get(&panel->refcount);
+
+ return panel;
+}
+EXPORT_SYMBOL(drm_panel_get);
+
+/**
+ * drm_panel_put - Release a panel reference
+ * @panel: DRM panel
+ *
+ * This function decrements the panel's reference count and frees the
+ * object if the reference count drops to zero.
+ */
+void drm_panel_put(struct drm_panel *panel)
+{
+ if (panel)
+ kref_put(&panel->refcount, __drm_panel_free);
+}
+EXPORT_SYMBOL(drm_panel_put);
+
+/**
+ * drm_panel_put_void - wrapper to drm_panel_put() taking a void pointer
+ *
+ * @data: pointer to @struct drm_panel, cast to a void pointer
+ *
+ * Wrapper of drm_panel_put() to be used when a function taking a void
+ * pointer is needed, for example as a devm action.
+ */
+static void drm_panel_put_void(void *data)
+{
+ struct drm_panel *panel = (struct drm_panel *)data;
+
+ drm_panel_put(panel);
+}
+
+void *__devm_drm_panel_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_panel_funcs *funcs,
+ int connector_type)
+{
+ void *container;
+ struct drm_panel *panel;
+ int err;
+
+ if (!funcs) {
+ dev_warn(dev, "Missing funcs pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ panel = container + offset;
+ panel->container = container;
+ panel->funcs = funcs;
+ kref_init(&panel->refcount);
+
+ err = devm_add_action_or_reset(dev, drm_panel_put_void, panel);
+ if (err)
+ return ERR_PTR(err);
+
+ drm_panel_init(panel, dev, funcs, connector_type);
+
+ return container;
+}
+EXPORT_SYMBOL(__devm_drm_panel_alloc);
+
#ifdef CONFIG_OF
/**
* of_drm_find_panel - look up a panel using a device tree node
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index c554ad8f246b..7ac0fd5391fe 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -517,6 +517,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* ZOTAC Gaming Zone */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ZOTAC"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "G0A1W"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* One Mix 2S (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index b47ea25fdfaa..b4de79583805 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -7,6 +7,7 @@
*/
#include <linux/font.h>
+#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/iosys-map.h>
#include <linux/kdebug.h>
@@ -154,6 +155,90 @@ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, fg_color);
}
+static void drm_panic_write_pixel16(void *vaddr, unsigned int offset, u16 color)
+{
+ u16 *p = vaddr + offset;
+
+ *p = color;
+}
+
+static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color)
+{
+ u8 *p = vaddr + offset;
+
+ *p++ = color & 0xff;
+ color >>= 8;
+ *p++ = color & 0xff;
+ color >>= 8;
+ *p = color & 0xff;
+}
+
+static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color)
+{
+ u32 *p = vaddr + offset;
+
+ *p = color;
+}
+
+static void drm_panic_write_pixel(void *vaddr, unsigned int offset, u32 color, unsigned int cpp)
+{
+ switch (cpp) {
+ case 2:
+ drm_panic_write_pixel16(vaddr, offset, color);
+ break;
+ case 3:
+ drm_panic_write_pixel24(vaddr, offset, color);
+ break;
+ case 4:
+ drm_panic_write_pixel32(vaddr, offset, color);
+ break;
+ default:
+ pr_debug_once("Can't blit with pixel width %d\n", cpp);
+ }
+}
+
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void drm_panic_blit_page(struct page **pages, unsigned int dpitch,
+ unsigned int cpp, const u8 *sbuf8,
+ unsigned int spitch, struct drm_rect *clip,
+ unsigned int scale, u32 fg32)
+{
+ unsigned int y, x;
+ unsigned int page = ~0;
+ unsigned int height = drm_rect_height(clip);
+ unsigned int width = drm_rect_width(clip);
+ void *vaddr = NULL;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
+ unsigned int new_page;
+ unsigned int offset;
+
+ offset = (y + clip->y1) * dpitch + (x + clip->x1) * cpp;
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != page) {
+ if (!pages[new_page])
+ continue;
+ if (vaddr)
+ kunmap_local(vaddr);
+ page = new_page;
+ vaddr = kmap_local_page_try_from_panic(pages[page]);
+ }
+ if (vaddr)
+ drm_panic_write_pixel(vaddr, offset, fg32, cpp);
+ }
+ }
+ }
+ if (vaddr)
+ kunmap_local(vaddr);
+}
+
/*
* drm_panic_blit - convert a monochrome image to a linear framebuffer
* @sb: destination scanout buffer
@@ -177,6 +262,10 @@ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip,
if (sb->set_pixel)
return drm_panic_blit_pixel(sb, clip, sbuf8, spitch, scale, fg_color);
+ if (sb->pages)
+ return drm_panic_blit_page(sb->pages, sb->pitch[0], sb->format->cpp[0],
+ sbuf8, spitch, clip, scale, fg_color);
+
map = sb->map[0];
iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]);
@@ -209,6 +298,35 @@ static void drm_panic_fill_pixel(struct drm_scanout_buffer *sb,
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, color);
}
+static void drm_panic_fill_page(struct page **pages, unsigned int dpitch,
+ unsigned int cpp, struct drm_rect *clip,
+ u32 color)
+{
+ unsigned int y, x;
+ unsigned int page = ~0;
+ void *vaddr = NULL;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ for (x = clip->x1; x < clip->x2; x++) {
+ unsigned int new_page;
+ unsigned int offset;
+
+ offset = y * dpitch + x * cpp;
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != page) {
+ if (vaddr)
+ kunmap_local(vaddr);
+ page = new_page;
+ vaddr = kmap_local_page_try_from_panic(pages[page]);
+ }
+ drm_panic_write_pixel(vaddr, offset, color, cpp);
+ }
+ }
+ if (vaddr)
+ kunmap_local(vaddr);
+}
+
/*
* drm_panic_fill - Fill a rectangle with a color
* @sb: destination scanout buffer
@@ -225,6 +343,10 @@ static void drm_panic_fill(struct drm_scanout_buffer *sb, struct drm_rect *clip,
if (sb->set_pixel)
return drm_panic_fill_pixel(sb, clip, color);
+ if (sb->pages)
+ return drm_panic_fill_page(sb->pages, sb->pitch[0], sb->format->cpp[0],
+ clip, color);
+
map = sb->map[0];
iosys_map_incr(&map, clip->y1 * sb->pitch[0] + clip->x1 * sb->format->cpp[0]);
@@ -709,16 +831,24 @@ static void draw_panic_plane(struct drm_plane *plane, const char *description)
if (!drm_panic_trylock(plane->dev, flags))
return;
+ ret = plane->helper_private->get_scanout_buffer(plane, &sb);
+
+ if (ret || !drm_panic_is_format_supported(sb.format))
+ goto unlock;
+
+ /* One of these should be set, or it can't draw pixels */
+ if (!sb.set_pixel && !sb.pages && iosys_map_is_null(&sb.map[0]))
+ goto unlock;
+
drm_panic_set_description(description);
- ret = plane->helper_private->get_scanout_buffer(plane, &sb);
+ draw_panic_dispatch(&sb);
+ if (plane->helper_private->panic_flush)
+ plane->helper_private->panic_flush(plane);
- if (!ret && drm_panic_is_format_supported(sb.format)) {
- draw_panic_dispatch(&sb);
- if (plane->helper_private->panic_flush)
- plane->helper_private->panic_flush(plane);
- }
drm_panic_clear_description();
+
+unlock:
drm_panic_unlock(plane->dev, flags);
}
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index f2a99681b998..dd55b1cb764d 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -5,7 +5,7 @@
//! It is called from a panic handler, so it should't allocate memory and
//! does all the work on the stack or on the provided buffers. For
//! simplification, it only supports low error correction, and applies the
-//! first mask (checkerboard). It will draw the smallest QRcode that can
+//! first mask (checkerboard). It will draw the smallest QR code that can
//! contain the string passed as parameter. To get the most compact
//! QR code, the start of the URL is encoded as binary, and the
//! compressed kmsg is encoded as numeric.
@@ -315,7 +315,7 @@ impl Segment<'_> {
}
}
- // Returns the size of the length field in bits, depending on QR Version.
+ /// Returns the size of the length field in bits, depending on QR Version.
fn length_bits_count(&self, version: Version) -> usize {
let Version(v) = version;
match self {
@@ -331,7 +331,7 @@ impl Segment<'_> {
}
}
- // Number of characters in the segment.
+ /// Number of characters in the segment.
fn character_count(&self) -> usize {
match self {
Segment::Binary(data) => data.len(),
@@ -366,8 +366,48 @@ impl Segment<'_> {
SegmentIterator {
segment: self,
offset: 0,
- carry: 0,
- carry_len: 0,
+ decfifo: Default::default(),
+ }
+ }
+}
+
+/// Max fifo size is 17 (max push) + 2 (max remaining)
+const MAX_FIFO_SIZE: usize = 19;
+
+/// A simple Decimal digit FIFO
+#[derive(Default)]
+struct DecFifo {
+ decimals: [u8; MAX_FIFO_SIZE],
+ len: usize,
+}
+
+impl DecFifo {
+ fn push(&mut self, data: u64, len: usize) {
+ let mut chunk = data;
+ for i in (0..self.len).rev() {
+ self.decimals[i + len] = self.decimals[i];
+ }
+ for i in 0..len {
+ self.decimals[i] = (chunk % 10) as u8;
+ chunk /= 10;
+ }
+ self.len += len;
+ }
+
+ /// Pop 3 decimal digits from the FIFO
+ fn pop3(&mut self) -> Option<(u16, usize)> {
+ if self.len == 0 {
+ None
+ } else {
+ let poplen = 3.min(self.len);
+ self.len -= poplen;
+ let mut out = 0;
+ let mut exp = 1;
+ for i in 0..poplen {
+ out += self.decimals[self.len + i] as u16 * exp;
+ exp *= 10;
+ }
+ Some((out, NUM_CHARS_BITS[poplen]))
}
}
}
@@ -375,8 +415,7 @@ impl Segment<'_> {
struct SegmentIterator<'a> {
segment: &'a Segment<'a>,
offset: usize,
- carry: u64,
- carry_len: usize,
+ decfifo: DecFifo,
}
impl Iterator for SegmentIterator<'_> {
@@ -394,31 +433,17 @@ impl Iterator for SegmentIterator<'_> {
}
}
Segment::Numeric(data) => {
- if self.carry_len < 3 && self.offset < data.len() {
- // If there are less than 3 decimal digits in the carry,
- // take the next 7 bytes of input, and add them to the carry.
+ if self.decfifo.len < 3 && self.offset < data.len() {
+ // If there are less than 3 decimal digits in the fifo,
+ // take the next 7 bytes of input, and push them to the fifo.
let mut buf = [0u8; 8];
let len = 7.min(data.len() - self.offset);
buf[..len].copy_from_slice(&data[self.offset..self.offset + len]);
let chunk = u64::from_le_bytes(buf);
- let pow = u64::pow(10, BYTES_TO_DIGITS[len] as u32);
- self.carry = chunk + self.carry * pow;
+ self.decfifo.push(chunk, BYTES_TO_DIGITS[len]);
self.offset += len;
- self.carry_len += BYTES_TO_DIGITS[len];
- }
- match self.carry_len {
- 0 => None,
- len => {
- // take the next 3 decimal digits of the carry
- // and return 10bits of numeric data.
- let out_len = 3.min(len);
- self.carry_len -= out_len;
- let pow = u64::pow(10, self.carry_len as u32);
- let out = (self.carry / pow) as u16;
- self.carry = self.carry % pow;
- Some((out, NUM_CHARS_BITS[out_len]))
- }
}
+ self.decfifo.pop3()
}
}
}
@@ -569,8 +594,8 @@ struct EncodedMsgIterator<'a> {
impl Iterator for EncodedMsgIterator<'_> {
type Item = u8;
- // Send the bytes in interleaved mode, first byte of first block of group1,
- // then first byte of second block of group1, ...
+ /// Send the bytes in interleaved mode, first byte of first block of group1,
+ /// then first byte of second block of group1, ...
fn next(&mut self) -> Option<Self::Item> {
let em = self.em;
let blocks = em.g1_blocks + em.g2_blocks;
@@ -638,7 +663,7 @@ impl QrImage<'_> {
self.data.fill(0);
}
- // Set pixel to light color.
+ /// Set pixel to light color.
fn set(&mut self, x: u8, y: u8) {
let off = y as usize * self.stride as usize + x as usize / 8;
let mut v = self.data[off];
@@ -646,13 +671,13 @@ impl QrImage<'_> {
self.data[off] = v;
}
- // Invert a module color.
+ /// Invert a module color.
fn xor(&mut self, x: u8, y: u8) {
let off = y as usize * self.stride as usize + x as usize / 8;
self.data[off] ^= 0x80 >> (x % 8);
}
- // Draw a light square at (x, y) top left corner.
+ /// Draw a light square at (x, y) top left corner.
fn draw_square(&mut self, x: u8, y: u8, size: u8) {
for k in 0..size {
self.set(x + k, y);
@@ -784,7 +809,7 @@ impl QrImage<'_> {
vinfo != 0 && ((x >= pos && x < pos + 3 && y < 6) || (y >= pos && y < pos + 3 && x < 6))
}
- // Returns true if the module is reserved (Not usable for data and EC).
+ /// Returns true if the module is reserved (Not usable for data and EC).
fn is_reserved(&self, x: u8, y: u8) -> bool {
self.is_alignment(x, y)
|| self.is_finder(x, y)
@@ -793,13 +818,14 @@ impl QrImage<'_> {
|| self.is_version_info(x, y)
}
- // Last module to draw, at bottom left corner.
+ /// Last module to draw, at bottom left corner.
fn is_last(&self, x: u8, y: u8) -> bool {
x == 0 && y == self.width - 1
}
- // Move to the next module according to QR code order.
- // From bottom right corner, to bottom left corner.
+ /// Move to the next module according to QR code order.
+ ///
+ /// From bottom right corner, to bottom left corner.
fn next(&self, x: u8, y: u8) -> (u8, u8) {
let x_adj = if x <= 6 { x + 1 } else { x };
let column_type = (self.width - x_adj) % 4;
@@ -812,7 +838,7 @@ impl QrImage<'_> {
}
}
- // Find next module that can hold data.
+ /// Find next module that can hold data.
fn next_available(&self, x: u8, y: u8) -> (u8, u8) {
let (mut x, mut y) = self.next(x, y);
while self.is_reserved(x, y) && !self.is_last(x, y) {
@@ -841,7 +867,7 @@ impl QrImage<'_> {
}
}
- // Apply checkerboard mask to all non-reserved modules.
+ /// Apply checkerboard mask to all non-reserved modules.
fn apply_mask(&mut self) {
for x in 0..self.width {
for y in 0..self.width {
@@ -852,7 +878,7 @@ impl QrImage<'_> {
}
}
- // Draw the QR code with the provided data iterator.
+ /// Draw the QR code with the provided data iterator.
fn draw_all(&mut self, data: impl Iterator<Item = u8>) {
// First clear the table, as it may have already some data.
self.clear();
@@ -876,7 +902,7 @@ impl QrImage<'_> {
/// will be encoded as binary segment, otherwise it will be encoded
/// efficiently as a numeric segment, and appended to the URL.
/// * `data_len`: Length of the data, that needs to be encoded, must be less
-/// than data_size.
+/// than `data_size`.
/// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold
/// a V40 QR code. It will then be overwritten with the QR code image.
/// * `tmp`: A temporary buffer that the QR code encoder will use, to write the
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index a28b22fdd7a4..04992dfd4c79 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -141,6 +141,14 @@
* various bugs in this area with inconsistencies between the capability
* flag and per-plane properties.
*
+ * IN_FORMATS_ASYNC:
+ * Blob property which contains the set of buffer format and modifier
+ * pairs supported by this plane for asynchronous flips. The blob is a struct
+ * drm_format_modifier_blob. Userspace cannot change this property. This is an
+ * optional property and if not present then user should expect a failure in
+ * atomic ioctl when the modifier/format is not supported by that plane under
+ * asynchronous flip.
+ *
* SIZE_HINTS:
* Blob property which contains the set of recommended plane size
* which can used for simple "cursor like" use cases (eg. no scaling).
@@ -185,9 +193,13 @@ modifiers_ptr(struct drm_format_modifier_blob *blob)
return (struct drm_format_modifier *)(((char *)blob) + blob->modifiers_offset);
}
-static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane)
+static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
+ struct drm_plane *plane,
+ bool (*format_mod_supported)
+ (struct drm_plane *plane,
+ u32 format,
+ u64 modifier))
{
- const struct drm_mode_config *config = &dev->mode_config;
struct drm_property_blob *blob;
struct drm_format_modifier *mod;
size_t blob_size, formats_size, modifiers_size;
@@ -213,7 +225,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
blob = drm_property_create_blob(dev, blob_size, NULL);
if (IS_ERR(blob))
- return -1;
+ return NULL;
blob_data = blob->data;
blob_data->version = FORMAT_BLOB_CURRENT;
@@ -229,10 +241,10 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
mod = modifiers_ptr(blob_data);
for (i = 0; i < plane->modifier_count; i++) {
for (j = 0; j < plane->format_count; j++) {
- if (!plane->funcs->format_mod_supported ||
- plane->funcs->format_mod_supported(plane,
- plane->format_types[j],
- plane->modifiers[i])) {
+ if (!format_mod_supported ||
+ format_mod_supported(plane,
+ plane->format_types[j],
+ plane->modifiers[i])) {
mod->formats |= 1ULL << j;
}
}
@@ -243,10 +255,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
mod++;
}
- drm_object_attach_property(&plane->base, config->modifiers_property,
- blob->base.id);
-
- return 0;
+ return blob;
}
/**
@@ -358,6 +367,7 @@ static int __drm_universal_plane_init(struct drm_device *dev,
const char *name, va_list ap)
{
struct drm_mode_config *config = &dev->mode_config;
+ struct drm_property_blob *blob;
static const uint64_t default_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
};
@@ -469,8 +479,24 @@ static int __drm_universal_plane_init(struct drm_device *dev,
drm_plane_create_hotspot_properties(plane);
}
- if (format_modifier_count)
- create_in_format_blob(dev, plane);
+ if (format_modifier_count) {
+ blob = create_in_format_blob(dev, plane,
+ plane->funcs->format_mod_supported);
+ if (!IS_ERR(blob))
+ drm_object_attach_property(&plane->base,
+ config->modifiers_property,
+ blob->base.id);
+ }
+
+ if (plane->funcs->format_mod_supported_async) {
+ blob = create_in_format_blob(dev, plane,
+ plane->funcs->format_mod_supported_async);
+ if (!IS_ERR(blob))
+ drm_object_attach_property(&plane->base,
+ config->async_modifiers_property,
+ blob->base.id);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index bdb51c8f262e..d828502268b8 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -707,7 +707,7 @@ int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
- return drm_gem_vmap(obj, map);
+ return drm_gem_vmap_locked(obj, map);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
@@ -723,7 +723,7 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
- drm_gem_vunmap(obj, map);
+ drm_gem_vunmap_locked(obj, map);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
@@ -804,7 +804,6 @@ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
- .cache_sgt_mapping = true,
.attach = drm_gem_map_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
@@ -998,7 +997,7 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
- return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
+ return drm_gem_prime_import_dev(dev, dma_buf, drm_dev_dma_dev(dev));
}
EXPORT_SYMBOL(drm_gem_prime_import);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 7ba16323e7c2..6b3541159c0f 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -958,15 +958,16 @@ static void drm_kms_helper_poll_init_release(struct drm_device *dev, void *res)
* cleaned up when the DRM device goes away.
*
* See drm_kms_helper_poll_init() for more information.
- *
- * Returns:
- * 0 on success, or a negative errno code otherwise.
*/
-int drmm_kms_helper_poll_init(struct drm_device *dev)
+void drmm_kms_helper_poll_init(struct drm_device *dev)
{
+ int ret;
+
drm_kms_helper_poll_init(dev);
- return drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev);
+ ret = drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev);
+ if (ret)
+ drm_warn(dev, "Connector status will not be updated, error %d\n", ret);
}
EXPORT_SYMBOL(drmm_kms_helper_poll_init);
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 4f2ab8a7b50f..636cd83ca29e 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -741,7 +741,7 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
}
static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
- int fd, int handle)
+ int fd, int handle, u64 point)
{
struct dma_fence *fence = sync_file_get_fence(fd);
struct drm_syncobj *syncobj;
@@ -755,14 +755,24 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
return -ENOENT;
}
- drm_syncobj_replace_fence(syncobj, fence);
+ if (point) {
+ struct dma_fence_chain *chain = dma_fence_chain_alloc();
+
+ if (!chain)
+ return -ENOMEM;
+
+ drm_syncobj_add_point(syncobj, chain, fence, point);
+ } else {
+ drm_syncobj_replace_fence(syncobj, fence);
+ }
+
dma_fence_put(fence);
drm_syncobj_put(syncobj);
return 0;
}
static int drm_syncobj_export_sync_file(struct drm_file *file_private,
- int handle, int *p_fd)
+ int handle, u64 point, int *p_fd)
{
int ret;
struct dma_fence *fence;
@@ -772,7 +782,7 @@ static int drm_syncobj_export_sync_file(struct drm_file *file_private,
if (fd < 0)
return fd;
- ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
+ ret = drm_syncobj_find_fence(file_private, handle, point, 0, &fence);
if (ret)
goto err_put_fd;
@@ -869,6 +879,9 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_handle *args = data;
+ unsigned int valid_flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE |
+ DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE;
+ u64 point = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
@@ -876,13 +889,18 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- if (args->flags != 0 &&
- args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
+ if (args->flags & ~valid_flags)
return -EINVAL;
+ if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE)
+ point = args->point;
+
if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
return drm_syncobj_export_sync_file(file_private, args->handle,
- &args->fd);
+ point, &args->fd);
+
+ if (args->point)
+ return -EINVAL;
return drm_syncobj_handle_to_fd(file_private, args->handle,
&args->fd);
@@ -893,6 +911,9 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
{
struct drm_syncobj_handle *args = data;
+ unsigned int valid_flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE |
+ DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE;
+ u64 point = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
@@ -900,14 +921,20 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- if (args->flags != 0 &&
- args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
+ if (args->flags & ~valid_flags)
return -EINVAL;
+ if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE)
+ point = args->point;
+
if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
return drm_syncobj_import_sync_file_fence(file_private,
args->fd,
- args->handle);
+ args->handle,
+ point);
+
+ if (args->point)
+ return -EINVAL;
return drm_syncobj_fd_to_handle(file_private, args->fd,
&args->handle);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 78958ddf8485..46f59883183d 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -487,7 +487,8 @@ out:
static void vblank_disable_fn(struct timer_list *t)
{
- struct drm_vblank_crtc *vblank = from_timer(vblank, t, disable_timer);
+ struct drm_vblank_crtc *vblank = timer_container_of(vblank, t,
+ disable_timer);
struct drm_device *dev = vblank->dev;
unsigned int pipe = vblank->pipe;
unsigned long irqflags;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 42e57d142554..917ad527c961 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -39,7 +39,7 @@ int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
{
- if (!obj->import_attach) {
+ if (!drm_gem_is_imported(obj)) {
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
mutex_lock(&etnaviv_obj->lock);
@@ -51,7 +51,7 @@ int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
{
- if (!obj->import_attach) {
+ if (!drm_gem_is_imported(obj)) {
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
mutex_lock(&etnaviv_obj->lock);
@@ -65,7 +65,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr);
if (etnaviv_obj->vaddr)
- dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map);
+ dma_buf_vunmap_unlocked(etnaviv_obj->base.dma_buf, &map);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
@@ -82,7 +82,7 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
lockdep_assert_held(&etnaviv_obj->lock);
- ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map);
+ ret = dma_buf_vmap(etnaviv_obj->base.dma_buf, &map);
if (ret)
return NULL;
return map.vaddr;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 76a3a3e517d8..71e2e6b9d713 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -35,6 +35,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
*sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+ struct drm_gpu_scheduler *sched = sched_job->sched;
struct etnaviv_gpu *gpu = submit->gpu;
u32 dma_addr, primid = 0;
int change;
@@ -89,7 +90,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
return DRM_GPU_SCHED_STAT_NOMINAL;
out_no_timeout:
- list_add(&sched_job->list, &sched_job->sched->pending_list);
+ spin_lock(&sched->job_list_lock);
+ list_add(&sched_job->list, &sched->pending_list);
+ spin_unlock(&sched->job_list_lock);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index b34ec6728337..29a8366513fa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -379,11 +379,11 @@ static int exynos_mic_probe(struct platform_device *pdev)
struct resource res;
int ret, i;
- mic = devm_kzalloc(dev, sizeof(*mic), GFP_KERNEL);
- if (!mic) {
+ mic = devm_drm_bridge_alloc(dev, struct exynos_mic, bridge, &mic_bridge_funcs);
+ if (IS_ERR(mic)) {
DRM_DEV_ERROR(dev,
"mic: Failed to allocate memory for MIC object\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(mic);
goto err;
}
@@ -421,7 +421,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mic);
- mic->bridge.funcs = &mic_bridge_funcs;
mic->bridge.of_node = dev->of_node;
drm_bridge_add(&mic->bridge);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e644e2382d77..e094b8bbc0f1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -159,7 +159,7 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
static void vidi_fake_vblank_timer(struct timer_list *t)
{
- struct vidi_context *ctx = from_timer(ctx, t, timer);
+ struct vidi_context *ctx = timer_container_of(ctx, t, timer);
if (drm_crtc_handle_vblank(&ctx->crtc->base))
mod_timer(&ctx->timer,
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 03b076db9381..3bbfc1b56a65 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -260,7 +260,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
struct fsl_dcu_drm_device *fsl_dev;
struct drm_device *drm;
struct device *dev = &pdev->dev;
- struct resource *res;
void __iomem *base;
struct clk *pix_clk_in;
char pix_clk_name[32];
@@ -278,8 +277,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
return -ENODEV;
fsl_dev->soc = id->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
return ret;
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index aa2ea128aa2f..a2acaa699dd5 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_GMA500
tristate "Intel GMA500/600/3600/3650 KMS Framebuffer"
- depends on DRM && PCI && X86 && MMU && HAS_IOPORT
+ depends on DRM && PCI && X86 && HAS_IOPORT
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index 4d78b33eaa82..e6753282e70e 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -730,44 +730,3 @@ out:
return ret;
}
-
-int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
- unsigned long *pfn)
-{
- int ret;
- struct psb_mmu_pt *pt;
- uint32_t tmp;
- spinlock_t *lock = &pd->driver->lock;
-
- down_read(&pd->driver->sem);
- pt = psb_mmu_pt_map_lock(pd, virtual);
- if (!pt) {
- uint32_t *v;
-
- spin_lock(lock);
- v = kmap_atomic(pd->p);
- tmp = v[psb_mmu_pd_index(virtual)];
- kunmap_atomic(v);
- spin_unlock(lock);
-
- if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
- !(pd->invalid_pte & PSB_PTE_VALID)) {
- ret = -EINVAL;
- goto out;
- }
- ret = 0;
- *pfn = pd->invalid_pte >> PAGE_SHIFT;
- goto out;
- }
- tmp = pt->v[psb_mmu_pt_index(virtual)];
- if (!(tmp & PSB_PTE_VALID)) {
- ret = -EINVAL;
- } else {
- ret = 0;
- *pfn = tmp >> PAGE_SHIFT;
- }
- psb_mmu_pt_unmap_unlock(pt);
-out:
- up_read(&pd->driver->sem);
- return ret;
-}
diff --git a/drivers/gpu/drm/gma500/mmu.h b/drivers/gpu/drm/gma500/mmu.h
index d4b5720ef08e..e6d39703718c 100644
--- a/drivers/gpu/drm/gma500/mmu.h
+++ b/drivers/gpu/drm/gma500/mmu.h
@@ -71,8 +71,6 @@ extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
uint32_t start_pfn,
unsigned long address,
uint32_t num_pages, int type);
-extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
- unsigned long *pfn);
extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long address, uint32_t num_pages,
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index de8ccfe9890f..ea9b41af0867 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -658,10 +658,3 @@ const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
.prepare = gma_crtc_prepare,
.commit = gma_crtc_commit,
};
-
-/* Not used yet */
-const struct gma_clock_funcs mrst_clock_funcs = {
- .clock = mrst_lvds_clock,
- .limit = mrst_limit,
- .pll_is_valid = gma_pll_is_valid,
-};
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 9dc9dcd1b09f..979ea8ecf0d5 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -182,7 +182,6 @@ struct gma_i2c_chan *gma_i2c_create(struct drm_device *dev, const u32 reg,
void gma_i2c_destroy(struct gma_i2c_chan *chan);
int psb_intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter);
-extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter);
extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct psb_intel_mode_device *mode_dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index 8be0ec340de5..45b10f30a2a9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -12,37 +12,6 @@
#include "psb_intel_drv.h"
/**
- * psb_intel_ddc_probe
- * @adapter: Associated I2C adaptor
- */
-bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
-{
- u8 out_buf[] = { 0x0, 0x0 };
- u8 buf[2];
- int ret;
- struct i2c_msg msgs[] = {
- {
- .addr = 0x50,
- .flags = 0,
- .len = 1,
- .buf = out_buf,
- },
- {
- .addr = 0x50,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
-
- ret = i2c_transfer(adapter, msgs, 2);
- if (ret == 2)
- return true;
-
- return false;
-}
-
-/**
* psb_intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
* @adapter: Associated I2C adaptor
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index cb405771d6e2..5385a2126e45 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -309,21 +309,6 @@ out:
return ret;
}
-/*
- * FIXME: Dma-buf sharing requires DMA support by the importing device.
- * This function is a workaround to make USB devices work as well.
- * See todo.rst for how to fix the issue in the dma-buf framework.
- */
-static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struct dma_buf *dma_buf)
-{
- struct gud_device *gdrm = to_gud_device(drm);
-
- if (!gdrm->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(drm, dma_buf, gdrm->dmadev);
-}
-
static int gud_stats_debugfs(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
@@ -376,7 +361,6 @@ static const struct drm_driver gud_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.fops = &gud_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = gud_gem_prime_import,
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = "gud",
@@ -434,6 +418,7 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
size_t max_buffer_size = 0;
struct gud_device *gdrm;
struct drm_device *drm;
+ struct device *dma_dev;
u8 *formats_dev;
u32 *formats;
int ret, i;
@@ -609,17 +594,19 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
usb_set_intfdata(intf, gdrm);
- gdrm->dmadev = usb_intf_get_dma_device(intf);
- if (!gdrm->dmadev)
- dev_warn(dev, "buffer sharing not supported");
+ dma_dev = usb_intf_get_dma_device(intf);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(drm, dma_dev);
+ put_device(dma_dev);
+ } else {
+ dev_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL);
ret = drm_dev_register(drm, 0);
- if (ret) {
- put_device(gdrm->dmadev);
+ if (ret)
return ret;
- }
drm_kms_helper_poll_init(drm);
@@ -638,8 +625,6 @@ static void gud_disconnect(struct usb_interface *interface)
drm_kms_helper_poll_fini(drm);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
- put_device(gdrm->dmadev);
- gdrm->dmadev = NULL;
}
static int gud_suspend(struct usb_interface *intf, pm_message_t message)
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index 0d148a6f27aa..d6fb25388722 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -16,7 +16,6 @@
struct gud_device {
struct drm_device drm;
struct drm_simple_display_pipe pipe;
- struct device *dmadev;
struct work_struct work;
u32 flags;
const struct drm_format_info *xrgb8888_emulation_format;
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 77cfcf37ddd2..adadd526641d 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -234,7 +234,7 @@ struct gud_usb_bulk_context {
static void gud_usb_bulk_timeout(struct timer_list *t)
{
- struct gud_usb_bulk_context *ctx = from_timer(ctx, t, timer);
+ struct gud_usb_bulk_context *ctx = timer_container_of(ctx, t, timer);
usb_sg_cancel(&ctx->sgr);
}
@@ -261,7 +261,7 @@ static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
else if (ctx.sgr.bytes != len)
ret = -EIO;
- destroy_timer_on_stack(&ctx.timer);
+ timer_destroy_on_stack(&ctx.timer);
return ret;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 98d77d74999d..d1f3f5793f34 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -2,7 +2,6 @@
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
depends on DRM && PCI
- depends on MMU
select DRM_CLIENT_SELECTION
select DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index 95a4ed599d98..1f65c683282f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o \
- dp/dp_aux.o dp/dp_link.o dp/dp_hw.o hibmc_drm_dp.o
+ dp/dp_aux.o dp/dp_link.o dp/dp_hw.o dp/dp_serdes.o hibmc_drm_dp.o \
+ hibmc_drm_debugfs.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
index 0a903cce1fa9..8732cd1d8cb6 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
@@ -8,6 +8,7 @@
#include <drm/drm_print.h>
#include "dp_comm.h"
#include "dp_reg.h"
+#include "dp_hw.h"
#define HIBMC_AUX_CMD_REQ_LEN GENMASK(7, 4)
#define HIBMC_AUX_CMD_ADDR GENMASK(27, 8)
@@ -124,7 +125,8 @@ static int hibmc_dp_aux_parse_xfer(struct hibmc_dp_dev *dp, struct drm_dp_aux_ms
/* ret >= 0 ,ret is size; ret < 0, ret is err code */
static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
- struct hibmc_dp_dev *dp = container_of(aux, struct hibmc_dp_dev, aux);
+ struct hibmc_dp *dp_priv = container_of(aux, struct hibmc_dp, aux);
+ struct hibmc_dp_dev *dp = dp_priv->dp_dev;
u32 aux_cmd;
int ret;
u32 val; /* val will be assigned at the beginning of readl_poll_timeout function */
@@ -151,14 +153,16 @@ static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *
return hibmc_dp_aux_parse_xfer(dp, msg);
}
-void hibmc_dp_aux_init(struct hibmc_dp_dev *dp)
+void hibmc_dp_aux_init(struct hibmc_dp *dp)
{
- hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
- hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
- hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM,
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM,
HIBMC_DP_MIN_PULSE_NUM);
dp->aux.transfer = hibmc_dp_aux_xfer;
- dp->aux.is_remote = 0;
+ dp->aux.name = "HIBMC DRM dp aux";
+ dp->aux.drm_dev = dp->drm_dev;
drm_dp_aux_init(&dp->aux);
+ dp->dp_dev->aux = &dp->aux;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
index 2c52a4476c4d..4add05c7f161 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
@@ -13,6 +13,8 @@
#include <linux/io.h>
#include <drm/display/drm_dp_helper.h>
+#include "dp_hw.h"
+
#define HIBMC_DP_LANE_NUM_MAX 2
struct hibmc_link_status {
@@ -32,12 +34,13 @@ struct hibmc_dp_link {
};
struct hibmc_dp_dev {
- struct drm_dp_aux aux;
+ struct drm_dp_aux *aux;
struct drm_device *dev;
void __iomem *base;
struct mutex lock; /* protects concurrent RW in hibmc_dp_reg_write_field() */
struct hibmc_dp_link link;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ void __iomem *serdes_base;
};
#define dp_field_modify(reg_value, mask, val) \
@@ -57,7 +60,10 @@ struct hibmc_dp_dev {
mutex_unlock(&_dp->lock); \
} while (0)
-void hibmc_dp_aux_init(struct hibmc_dp_dev *dp);
+void hibmc_dp_aux_init(struct hibmc_dp *dp);
int hibmc_dp_link_training(struct hibmc_dp_dev *dp);
+int hibmc_dp_serdes_init(struct hibmc_dp_dev *dp);
+int hibmc_dp_serdes_rate_switch(u8 rate, struct hibmc_dp_dev *dp);
+int hibmc_dp_serdes_set_tx_cfg(struct hibmc_dp_dev *dp, u8 train_set[HIBMC_DP_LANE_NUM_MAX]);
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
index 74dd9956144e..08f9e1caf7fc 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
@@ -15,5 +15,7 @@
#define HIBMC_DP_CLK_EN 0x7
#define HIBMC_DP_SYNC_EN_MASK 0x3
#define HIBMC_DP_LINK_RATE_CAL 27
+#define HIBMC_DP_SYNC_DELAY(lanes) ((lanes) == 0x2 ? 86 : 46)
+#define HIBMC_DP_INT_ENABLE 0xc
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
index a8d543881c09..8f0daec7d174 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
@@ -72,6 +72,9 @@ static void hibmc_dp_set_sst(struct hibmc_dp_dev *dp, struct drm_display_mode *m
HIBMC_DP_CFG_STREAM_HTOTAL_SIZE, htotal_size);
hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_HORIZONTAL_SIZE,
HIBMC_DP_CFG_STREAM_HBLANK_SIZE, hblank_size);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_PACKET,
+ HIBMC_DP_CFG_STREAM_SYNC_CALIBRATION,
+ HIBMC_DP_SYNC_DELAY(dp->link.cap.lanes));
}
static void hibmc_dp_link_cfg(struct hibmc_dp_dev *dp, struct drm_display_mode *mode)
@@ -151,6 +154,7 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
{
struct drm_device *drm_dev = dp->drm_dev;
struct hibmc_dp_dev *dp_dev;
+ int ret;
dp_dev = devm_kzalloc(drm_dev->dev, sizeof(struct hibmc_dp_dev), GFP_KERNEL);
if (!dp_dev)
@@ -163,10 +167,14 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
dp_dev->dev = drm_dev;
dp_dev->base = dp->mmio + HIBMC_DP_OFFSET;
- hibmc_dp_aux_init(dp_dev);
+ hibmc_dp_aux_init(dp);
+
+ ret = hibmc_dp_serdes_init(dp_dev);
+ if (ret)
+ return ret;
dp_dev->link.cap.lanes = 0x2;
- dp_dev->link.cap.link_rate = DP_LINK_BW_2_7;
+ dp_dev->link.cap.link_rate = DP_LINK_BW_8_1;
/* hdcp data */
writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG);
@@ -181,6 +189,36 @@ int hibmc_dp_hw_init(struct hibmc_dp *dp)
return 0;
}
+void hibmc_dp_enable_int(struct hibmc_dp *dp)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ writel(HIBMC_DP_INT_ENABLE, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+}
+
+void hibmc_dp_disable_int(struct hibmc_dp *dp)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS);
+}
+
+void hibmc_dp_hpd_cfg(struct hibmc_dp *dp)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
+ hibmc_dp_reg_write_field(dp->dp_dev, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM, 0x9);
+ writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG);
+ writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS);
+ writel(HIBMC_DP_INT_ENABLE, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_DPTX_RST, dp_dev->base + HIBMC_DP_DPTX_RST_CTRL);
+ writel(HIBMC_DP_CLK_EN, dp_dev->base + HIBMC_DP_DPTX_CLK_CTRL);
+}
+
void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable)
{
struct hibmc_dp_dev *dp_dev = dp->dp_dev;
@@ -218,3 +256,52 @@ int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode)
return 0;
}
+
+void hibmc_dp_reset_link(struct hibmc_dp *dp)
+{
+ dp->dp_dev->link.status.clock_recovered = false;
+ dp->dp_dev->link.status.channel_equalized = false;
+}
+
+static const struct hibmc_dp_color_raw g_rgb_raw[] = {
+ {CBAR_COLOR_BAR, 0x000, 0x000, 0x000},
+ {CBAR_WHITE, 0xfff, 0xfff, 0xfff},
+ {CBAR_RED, 0xfff, 0x000, 0x000},
+ {CBAR_ORANGE, 0xfff, 0x800, 0x000},
+ {CBAR_YELLOW, 0xfff, 0xfff, 0x000},
+ {CBAR_GREEN, 0x000, 0xfff, 0x000},
+ {CBAR_CYAN, 0x000, 0x800, 0x800},
+ {CBAR_BLUE, 0x000, 0x000, 0xfff},
+ {CBAR_PURPLE, 0x800, 0x000, 0x800},
+ {CBAR_BLACK, 0x000, 0x000, 0x000},
+};
+
+void hibmc_dp_set_cbar(struct hibmc_dp *dp, const struct hibmc_dp_cbar_cfg *cfg)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+ struct hibmc_dp_color_raw raw_data;
+
+ if (cfg->enable) {
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(9),
+ cfg->self_timing);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, GENMASK(8, 1),
+ cfg->dynamic_rate);
+ if (cfg->pattern == CBAR_COLOR_BAR) {
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(10), 0);
+ } else {
+ raw_data = g_rgb_raw[cfg->pattern];
+ drm_dbg_dp(dp->drm_dev, "r:%x g:%x b:%x\n", raw_data.r_value,
+ raw_data.g_value, raw_data.b_value);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(10), 1);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, GENMASK(23, 12),
+ raw_data.r_value);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL1, GENMASK(23, 12),
+ raw_data.g_value);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL1, GENMASK(11, 0),
+ raw_data.b_value);
+ }
+ }
+
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_COLOR_BAR_CTRL, BIT(0), cfg->enable);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
index 4dc13b3d9875..665f5b166dfb 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
@@ -10,19 +10,55 @@
#include <drm/drm_encoder.h>
#include <drm/drm_connector.h>
#include <drm/drm_print.h>
+#include <drm/display/drm_dp_helper.h>
struct hibmc_dp_dev;
+enum hibmc_dp_cbar_pattern {
+ CBAR_COLOR_BAR,
+ CBAR_WHITE,
+ CBAR_RED,
+ CBAR_ORANGE,
+ CBAR_YELLOW,
+ CBAR_GREEN,
+ CBAR_CYAN,
+ CBAR_BLUE,
+ CBAR_PURPLE,
+ CBAR_BLACK,
+};
+
+struct hibmc_dp_color_raw {
+ enum hibmc_dp_cbar_pattern pattern;
+ u32 r_value;
+ u32 g_value;
+ u32 b_value;
+};
+
+struct hibmc_dp_cbar_cfg {
+ u8 enable;
+ u8 self_timing;
+ u8 dynamic_rate; /* 0:static, 1-255(frame):dynamic */
+ enum hibmc_dp_cbar_pattern pattern;
+};
+
struct hibmc_dp {
struct hibmc_dp_dev *dp_dev;
struct drm_device *drm_dev;
struct drm_encoder encoder;
struct drm_connector connector;
void __iomem *mmio;
+ struct drm_dp_aux aux;
+ struct hibmc_dp_cbar_cfg cfg;
+ u32 irq_status;
};
int hibmc_dp_hw_init(struct hibmc_dp *dp);
int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode);
void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable);
+void hibmc_dp_set_cbar(struct hibmc_dp *dp, const struct hibmc_dp_cbar_cfg *cfg);
+void hibmc_dp_reset_link(struct hibmc_dp *dp);
+void hibmc_dp_hpd_cfg(struct hibmc_dp *dp);
+void hibmc_dp_enable_int(struct hibmc_dp *dp);
+void hibmc_dp_disable_int(struct hibmc_dp *dp);
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
index f6355c16cc0a..74f7832ea53e 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
@@ -9,6 +9,22 @@
#define HIBMC_EQ_MAX_RETRY 5
+static inline int hibmc_dp_get_serdes_rate_cfg(struct hibmc_dp_dev *dp)
+{
+ switch (dp->link.cap.link_rate) {
+ case DP_LINK_BW_1_62:
+ return DP_SERDES_BW_1_62;
+ case DP_LINK_BW_2_7:
+ return DP_SERDES_BW_2_7;
+ case DP_LINK_BW_5_4:
+ return DP_SERDES_BW_5_4;
+ case DP_LINK_BW_8_1:
+ return DP_SERDES_BW_8_1;
+ default:
+ return -EINVAL;
+ }
+}
+
static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
{
u8 buf[2];
@@ -26,7 +42,7 @@ static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
/* set rate and lane count */
buf[0] = dp->link.cap.link_rate;
buf[1] = DP_LANE_COUNT_ENHANCED_FRAME_EN | dp->link.cap.lanes;
- ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
+ ret = drm_dp_dpcd_write(dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
if (ret != sizeof(buf)) {
drm_dbg_dp(dp->dev, "dp aux write link rate and lanes failed, ret: %d\n", ret);
return ret >= 0 ? -EIO : ret;
@@ -35,17 +51,13 @@ static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
/* set 8b/10b and downspread */
buf[0] = DP_SPREAD_AMP_0_5;
buf[1] = DP_SET_ANSI_8B10B;
- ret = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
+ ret = drm_dp_dpcd_write(dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
if (ret != sizeof(buf)) {
drm_dbg_dp(dp->dev, "dp aux write 8b/10b and downspread failed, ret: %d\n", ret);
return ret >= 0 ? -EIO : ret;
}
- ret = drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd);
- if (ret)
- drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
-
- return ret;
+ return 0;
}
static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern)
@@ -84,7 +96,7 @@ static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern)
hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_PAT_SEL, val);
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf));
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf));
if (ret != sizeof(buf)) {
drm_dbg_dp(dp->dev, "dp aux write training pattern set failed\n");
return ret >= 0 ? -EIO : ret;
@@ -108,9 +120,13 @@ static int hibmc_dp_link_training_cr_pre(struct hibmc_dp_dev *dp)
return ret;
for (i = 0; i < dp->link.cap.lanes; i++)
- train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+
+ ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
+ if (ret)
+ return ret;
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes);
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes);
if (ret != dp->link.cap.lanes) {
drm_dbg_dp(dp->dev, "dp aux write training lane set failed\n");
return ret >= 0 ? -EIO : ret;
@@ -137,21 +153,29 @@ static bool hibmc_dp_link_get_adjust_train(struct hibmc_dp_dev *dp,
return false;
}
-static inline int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp)
+static int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp)
{
+ int ret;
+
switch (dp->link.cap.link_rate) {
case DP_LINK_BW_2_7:
dp->link.cap.link_rate = DP_LINK_BW_1_62;
- return 0;
+ break;
case DP_LINK_BW_5_4:
dp->link.cap.link_rate = DP_LINK_BW_2_7;
- return 0;
+ break;
case DP_LINK_BW_8_1:
dp->link.cap.link_rate = DP_LINK_BW_5_4;
- return 0;
+ break;
default:
return -EINVAL;
}
+
+ ret = hibmc_dp_get_serdes_rate_cfg(dp);
+ if (ret < 0)
+ return ret;
+
+ return hibmc_dp_serdes_rate_switch(ret, dp);
}
static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp)
@@ -159,6 +183,7 @@ static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp)
switch (dp->link.cap.lanes) {
case 0x2:
dp->link.cap.lanes--;
+ drm_dbg_dp(dp->dev, "dp link training reduce to 1 lane\n");
break;
case 0x1:
drm_err(dp->dev, "dp link training reduce lane failed, already reach minimum\n");
@@ -185,10 +210,10 @@ static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp)
voltage_tries = 1;
for (cr_tries = 0; cr_tries < 80; cr_tries++) {
- drm_dp_link_train_clock_recovery_delay(&dp->aux, dp->dpcd);
+ drm_dp_link_train_clock_recovery_delay(dp->aux, dp->dpcd);
- ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
- if (ret != DP_LINK_STATUS_SIZE) {
+ ret = drm_dp_dpcd_read_link_status(dp->aux, lane_status);
+ if (ret) {
drm_err(dp->dev, "Get lane status failed\n");
return ret;
}
@@ -206,7 +231,12 @@ static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp)
}
level_changed = hibmc_dp_link_get_adjust_train(dp, lane_status);
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set,
+
+ ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set,
dp->link.cap.lanes);
if (ret != dp->link.cap.lanes) {
drm_dbg_dp(dp->dev, "Update link training failed\n");
@@ -233,10 +263,10 @@ static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp)
return ret;
for (eq_tries = 0; eq_tries < HIBMC_EQ_MAX_RETRY; eq_tries++) {
- drm_dp_link_train_channel_eq_delay(&dp->aux, dp->dpcd);
+ drm_dp_link_train_channel_eq_delay(dp->aux, dp->dpcd);
- ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
- if (ret != DP_LINK_STATUS_SIZE) {
+ ret = drm_dp_dpcd_read_link_status(dp->aux, lane_status);
+ if (ret) {
drm_err(dp->dev, "get lane status failed\n");
break;
}
@@ -255,7 +285,12 @@ static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp)
}
hibmc_dp_link_get_adjust_train(dp, lane_status);
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+
+ ret = hibmc_dp_serdes_set_tx_cfg(dp, dp->link.train_set);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_write(dp->aux, DP_TRAINING_LANE0_SET,
dp->link.train_set, dp->link.cap.lanes);
if (ret != dp->link.cap.lanes) {
drm_dbg_dp(dp->dev, "Update link training failed\n");
@@ -295,6 +330,21 @@ int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
struct hibmc_dp_link *link = &dp->link;
int ret;
+ ret = drm_dp_read_dpcd_caps(dp->aux, dp->dpcd);
+ if (ret)
+ drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
+
+ dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE];
+ dp->link.cap.lanes = 0x2;
+
+ ret = hibmc_dp_get_serdes_rate_cfg(dp);
+ if (ret < 0)
+ return ret;
+
+ ret = hibmc_dp_serdes_rate_switch(ret, dp);
+ if (ret)
+ return ret;
+
while (true) {
ret = hibmc_dp_link_training_cr_pre(dp);
if (ret)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
index 4a515c726d52..394b1e933c3a 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
@@ -5,72 +5,128 @@
#define DP_REG_H
#define HIBMC_DP_AUX_CMD_ADDR 0x50
+
#define HIBMC_DP_AUX_WR_DATA0 0x54
#define HIBMC_DP_AUX_WR_DATA1 0x58
#define HIBMC_DP_AUX_WR_DATA2 0x5c
#define HIBMC_DP_AUX_WR_DATA3 0x60
#define HIBMC_DP_AUX_RD_DATA0 0x64
+
#define HIBMC_DP_AUX_REQ 0x74
+#define HIBMC_DP_CFG_AUX_REQ BIT(0)
+#define HIBMC_DP_CFG_AUX_SYNC_LEN_SEL BIT(1)
+#define HIBMC_DP_CFG_AUX_TIMER_TIMEOUT BIT(2)
+#define HIBMC_DP_CFG_AUX_MIN_PULSE_NUM GENMASK(13, 9)
+
#define HIBMC_DP_AUX_STATUS 0x78
+#define HIBMC_DP_CFG_AUX_TIMEOUT BIT(0)
+#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4)
+#define HIBMC_DP_CFG_AUX_READY_DATA_BYTE GENMASK(16, 12)
+#define HIBMC_DP_CFG_AUX GENMASK(24, 17)
+
#define HIBMC_DP_PHYIF_CTRL0 0xa0
+#define HIBMC_DP_CFG_SCRAMBLE_EN BIT(0)
+#define HIBMC_DP_CFG_PAT_SEL GENMASK(7, 4)
+#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8)
+
#define HIBMC_DP_VIDEO_CTRL 0x100
+#define HIBMC_DP_CFG_STREAM_RGB_ENABLE BIT(1)
+#define HIBMC_DP_CFG_STREAM_VIDEO_MAPPING GENMASK(5, 2)
+#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6)
+#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7)
+#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8)
+
#define HIBMC_DP_VIDEO_CONFIG0 0x104
+#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_CONFIG1 0x108
+#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_CONFIG2 0x10c
+#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_CONFIG3 0x110
+#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16)
+
#define HIBMC_DP_VIDEO_PACKET 0x114
+#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE GENMASK(5, 0)
+#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE GENMASK(9, 6)
+#define HIBMC_DP_CFG_STREAM_SYNC_CALIBRATION GENMASK(31, 20)
+
#define HIBMC_DP_VIDEO_MSA0 0x118
+#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0)
+
#define HIBMC_DP_VIDEO_MSA1 0x11c
#define HIBMC_DP_VIDEO_MSA2 0x120
+
#define HIBMC_DP_VIDEO_HORIZONTAL_SIZE 0X124
+#define HIBMC_DP_CFG_STREAM_HTOTAL_SIZE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HBLANK_SIZE GENMASK(15, 0)
+
+#define HIBMC_DP_COLOR_BAR_CTRL 0x260
+#define HIBMC_DP_COLOR_BAR_CTRL1 0x264
+
#define HIBMC_DP_TIMING_GEN_CONFIG0 0x26c
+#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0)
+
#define HIBMC_DP_TIMING_GEN_CONFIG2 0x274
+#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0)
+
#define HIBMC_DP_TIMING_GEN_CONFIG3 0x278
+#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16)
+
#define HIBMC_DP_HDCP_CFG 0x600
+
#define HIBMC_DP_DPTX_RST_CTRL 0x700
+#define HIBMC_DP_CFG_AUX_RST_N BIT(4)
+
#define HIBMC_DP_DPTX_CLK_CTRL 0x704
+
#define HIBMC_DP_DPTX_GCTL0 0x708
+#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1)
+
#define HIBMC_DP_INTR_ENABLE 0x720
#define HIBMC_DP_INTR_ORIGINAL_STATUS 0x728
+
#define HIBMC_DP_TIMING_MODEL_CTRL 0x884
+#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16)
+
#define HIBMC_DP_TIMING_SYNC_CTRL 0xFF0
-#define HIBMC_DP_CFG_AUX_SYNC_LEN_SEL BIT(1)
-#define HIBMC_DP_CFG_AUX_TIMER_TIMEOUT BIT(2)
-#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6)
-#define HIBMC_DP_CFG_AUX_MIN_PULSE_NUM GENMASK(13, 9)
-#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8)
-#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1)
-#define HIBMC_DP_CFG_AUX_REQ BIT(0)
-#define HIBMC_DP_CFG_AUX_RST_N BIT(4)
-#define HIBMC_DP_CFG_AUX_TIMEOUT BIT(0)
-#define HIBMC_DP_CFG_AUX_READY_DATA_BYTE GENMASK(16, 12)
-#define HIBMC_DP_CFG_AUX GENMASK(24, 17)
-#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4)
-#define HIBMC_DP_CFG_SCRAMBLE_EN BIT(0)
-#define HIBMC_DP_CFG_PAT_SEL GENMASK(7, 4)
-#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0)
-#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8)
-#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7)
-#define HIBMC_DP_CFG_STREAM_RGB_ENABLE BIT(1)
-#define HIBMC_DP_CFG_STREAM_VIDEO_MAPPING GENMASK(5, 2)
-#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE GENMASK(5, 0)
-#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE GENMASK(9, 6)
-#define HIBMC_DP_CFG_STREAM_HTOTAL_SIZE GENMASK(31, 16)
-#define HIBMC_DP_CFG_STREAM_HBLANK_SIZE GENMASK(15, 0)
+#define HIBMC_DP_INTSTAT 0x1e0724
+#define HIBMC_DP_INTCLR 0x1e0728
+
+/* dp serdes reg */
+#define HIBMC_DP_HOST_OFFSET 0x10000
+#define HIBMC_DP_LANE0_RATE_OFFSET 0x4
+#define HIBMC_DP_LANE1_RATE_OFFSET 0xc
+#define HIBMC_DP_LANE_STATUS_OFFSET 0x10
+#define HIBMC_DP_PMA_LANE0_OFFSET 0x18
+#define HIBMC_DP_PMA_LANE1_OFFSET 0x1c
+#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c
+#define HIBMC_DP_PMA_TXDEEMPH GENMASK(18, 1)
+#define DP_SERDES_DONE 0x3
+
+/* dp serdes TX-Deempth Configuration */
+#define DP_SERDES_VOL0_PRE0 0x280
+#define DP_SERDES_VOL0_PRE1 0x2300
+#define DP_SERDES_VOL0_PRE2 0x53c0
+#define DP_SERDES_VOL0_PRE3 0x8400
+#define DP_SERDES_VOL1_PRE0 0x380
+#define DP_SERDES_VOL1_PRE1 0x3440
+#define DP_SERDES_VOL1_PRE2 0x6480
+#define DP_SERDES_VOL2_PRE0 0x4c1
+#define DP_SERDES_VOL2_PRE1 0x4500
+#define DP_SERDES_VOL3_PRE0 0x600
+#define DP_SERDES_BW_8_1 0x3
+#define DP_SERDES_BW_5_4 0x2
+#define DP_SERDES_BW_2_7 0x1
+#define DP_SERDES_BW_1_62 0x0
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c
new file mode 100644
index 000000000000..676059d4c1e6
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_serdes.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2025 Hisilicon Limited.
+
+#include <linux/delay.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include "dp_comm.h"
+#include "dp_config.h"
+#include "dp_reg.h"
+
+int hibmc_dp_serdes_set_tx_cfg(struct hibmc_dp_dev *dp, u8 train_set[HIBMC_DP_LANE_NUM_MAX])
+{
+ static const u32 serdes_tx_cfg[4][4] = { {DP_SERDES_VOL0_PRE0, DP_SERDES_VOL0_PRE1,
+ DP_SERDES_VOL0_PRE2, DP_SERDES_VOL0_PRE3},
+ {DP_SERDES_VOL1_PRE0, DP_SERDES_VOL1_PRE1,
+ DP_SERDES_VOL1_PRE2}, {DP_SERDES_VOL2_PRE0,
+ DP_SERDES_VOL2_PRE1}, {DP_SERDES_VOL3_PRE0}};
+ int cfg[2];
+ int i;
+
+ for (i = 0; i < HIBMC_DP_LANE_NUM_MAX; i++) {
+ cfg[i] = serdes_tx_cfg[FIELD_GET(DP_TRAIN_VOLTAGE_SWING_MASK, train_set[i])]
+ [FIELD_GET(DP_TRAIN_PRE_EMPHASIS_MASK, train_set[i])];
+ if (!cfg[i])
+ return -EINVAL;
+
+ /* lane1 offset is 4 */
+ writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, cfg[i]),
+ dp->serdes_base + HIBMC_DP_PMA_LANE0_OFFSET + i * 4);
+ }
+
+ usleep_range(300, 500);
+
+ if (readl(dp->serdes_base + HIBMC_DP_LANE_STATUS_OFFSET) != DP_SERDES_DONE) {
+ drm_dbg_dp(dp->dev, "dp serdes cfg failed\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+int hibmc_dp_serdes_rate_switch(u8 rate, struct hibmc_dp_dev *dp)
+{
+ writel(rate, dp->serdes_base + HIBMC_DP_LANE0_RATE_OFFSET);
+ writel(rate, dp->serdes_base + HIBMC_DP_LANE1_RATE_OFFSET);
+
+ usleep_range(300, 500);
+
+ if (readl(dp->serdes_base + HIBMC_DP_LANE_STATUS_OFFSET) != DP_SERDES_DONE) {
+ drm_dbg_dp(dp->dev, "dp serdes rate switching failed\n");
+ return -EAGAIN;
+ }
+
+ if (rate < DP_SERDES_BW_8_1)
+ drm_dbg_dp(dp->dev, "reducing serdes rate to :%d\n",
+ rate ? rate * HIBMC_DP_LINK_RATE_CAL * 10 : 162);
+
+ return 0;
+}
+
+int hibmc_dp_serdes_init(struct hibmc_dp_dev *dp)
+{
+ dp->serdes_base = dp->base + HIBMC_DP_HOST_OFFSET;
+
+ writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, DP_SERDES_VOL0_PRE0),
+ dp->serdes_base + HIBMC_DP_PMA_LANE0_OFFSET);
+ writel(FIELD_PREP(HIBMC_DP_PMA_TXDEEMPH, DP_SERDES_VOL0_PRE0),
+ dp->serdes_base + HIBMC_DP_PMA_LANE1_OFFSET);
+
+ return hibmc_dp_serdes_rate_switch(DP_SERDES_BW_8_1, dp);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c
new file mode 100644
index 000000000000..f585387c3a49
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_debugfs.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_edid.h>
+
+#include "hibmc_drm_drv.h"
+
+#define MAX_BUF_SIZE 12
+
+static ssize_t hibmc_control_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct hibmc_drm_private *priv = file_inode(file)->i_private;
+ struct hibmc_dp_cbar_cfg *cfg = &priv->dp.cfg;
+ int ret, idx;
+ u8 buf[MAX_BUF_SIZE];
+
+ if (count >= MAX_BUF_SIZE)
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+
+ /* Only 4 parameters is allowed, the ranger are as follow:
+ * [0] enable/disable colorbar feature
+ 0: enable colorbar, 1: disable colorbar
+ * [1] the timing source of colorbar displaying
+ 0: timing follows XDP, 1: internal self timing
+ * [2] the movment of colorbar displaying
+ 0: static colorbar image,
+ * 1~255: right shifting a type of color per (1~255)frames
+ * [3] the color type of colorbar displaying
+ 0~9: color bar, white, red, orange,
+ * yellow, green, cyan, bule, pupper, black
+ */
+ if (sscanf(buf, "%hhu %hhu %hhu %u", &cfg->enable, &cfg->self_timing,
+ &cfg->dynamic_rate, &cfg->pattern) != 4) {
+ return -EINVAL;
+ }
+
+ if (cfg->pattern > 9 || cfg->enable > 1 || cfg->self_timing > 1)
+ return -EINVAL;
+
+ ret = drm_dev_enter(&priv->dev, &idx);
+ if (!ret)
+ return -ENODEV;
+
+ hibmc_dp_set_cbar(&priv->dp, cfg);
+
+ drm_dev_exit(idx);
+
+ return count;
+}
+
+static int hibmc_dp_dbgfs_show(struct seq_file *m, void *arg)
+{
+ struct hibmc_drm_private *priv = m->private;
+ struct hibmc_dp_cbar_cfg *cfg = &priv->dp.cfg;
+ int idx;
+
+ if (!drm_dev_enter(&priv->dev, &idx))
+ return -ENODEV;
+
+ seq_printf(m, "hibmc dp colorbar cfg: %u %u %u %u\n", cfg->enable, cfg->self_timing,
+ cfg->dynamic_rate, cfg->pattern);
+
+ drm_dev_exit(idx);
+
+ return 0;
+}
+
+static int hibmc_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, hibmc_dp_dbgfs_show, inode->i_private);
+}
+
+static const struct file_operations hibmc_dbg_fops = {
+ .owner = THIS_MODULE,
+ .write = hibmc_control_write,
+ .read = seq_read,
+ .open = hibmc_open,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void hibmc_debugfs_init(struct drm_connector *connector, struct dentry *root)
+{
+ struct drm_device *dev = connector->dev;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+
+ /* create the file in drm directory, so we don't need to remove manually */
+ debugfs_create_file("colorbar-cfg", 0200,
+ root, priv, &hibmc_dbg_fops);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
index 603d6b198a54..d06832e62e96 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
@@ -13,27 +13,64 @@
#include "hibmc_drm_drv.h"
#include "dp/dp_hw.h"
+#define DP_MASKED_SINK_HPD_PLUG_INT BIT(2)
+
static int hibmc_dp_connector_get_modes(struct drm_connector *connector)
{
+ const struct drm_edid *drm_edid;
int count;
- count = drm_add_modes_noedid(connector, connector->dev->mode_config.max_width,
- connector->dev->mode_config.max_height);
- drm_set_preferred_mode(connector, 1024, 768); // temporary implementation
+ drm_edid = drm_edid_read(connector);
+
+ drm_edid_connector_update(connector, drm_edid);
+
+ count = drm_edid_connector_add_modes(connector);
+
+ drm_edid_free(drm_edid);
return count;
}
+static int hibmc_dp_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ mdelay(200);
+
+ return drm_connector_helper_detect_from_ddc(connector, ctx, force);
+}
+
static const struct drm_connector_helper_funcs hibmc_dp_conn_helper_funcs = {
.get_modes = hibmc_dp_connector_get_modes,
+ .detect_ctx = hibmc_dp_detect,
};
+static int hibmc_dp_late_register(struct drm_connector *connector)
+{
+ struct hibmc_dp *dp = to_hibmc_dp(connector);
+
+ hibmc_dp_enable_int(dp);
+
+ return drm_dp_aux_register(&dp->aux);
+}
+
+static void hibmc_dp_early_unregister(struct drm_connector *connector)
+{
+ struct hibmc_dp *dp = to_hibmc_dp(connector);
+
+ drm_dp_aux_unregister(&dp->aux);
+
+ hibmc_dp_disable_int(dp);
+}
+
static const struct drm_connector_funcs hibmc_dp_conn_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = hibmc_dp_late_register,
+ .early_unregister = hibmc_dp_early_unregister,
+ .debugfs_init = hibmc_debugfs_init,
};
static inline int hibmc_dp_prepare(struct hibmc_dp *dp, struct drm_display_mode *mode)
@@ -74,6 +111,31 @@ static const struct drm_encoder_helper_funcs hibmc_dp_encoder_helper_funcs = {
.atomic_disable = hibmc_dp_encoder_disable,
};
+irqreturn_t hibmc_dp_hpd_isr(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
+
+ if (priv->dp.irq_status & DP_MASKED_SINK_HPD_PLUG_INT) {
+ drm_dbg_dp(&priv->dev, "HPD IN isr occur!\n");
+ hibmc_dp_hpd_cfg(&priv->dp);
+ } else {
+ drm_dbg_dp(&priv->dev, "HPD OUT isr occur!\n");
+ hibmc_dp_reset_link(&priv->dp);
+ }
+
+ if (dev->registered)
+ drm_connector_helper_hpd_irq_event(&priv->dp.connector);
+
+ drm_dev_exit(idx);
+
+ return IRQ_HANDLED;
+}
+
int hibmc_dp_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
@@ -103,8 +165,8 @@ int hibmc_dp_init(struct hibmc_drm_private *priv)
drm_encoder_helper_add(encoder, &hibmc_dp_encoder_helper_funcs);
- ret = drm_connector_init(dev, connector, &hibmc_dp_conn_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
+ ret = drm_connector_init_with_ddc(dev, connector, &hibmc_dp_conn_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort, &dp->aux.ddc);
if (ret) {
drm_err(dev, "init dp connector failed: %d\n", ret);
return ret;
@@ -114,5 +176,7 @@ int hibmc_dp_init(struct hibmc_drm_private *priv)
drm_connector_attach_encoder(connector, encoder);
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
return 0;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index e6de6d5edf6b..768b97f9e74a 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -28,12 +28,12 @@
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
-#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c
-#define HIBMC_DP_HOST_SERDES_CTRL_VAL 0x8a00
-#define HIBMC_DP_HOST_SERDES_CTRL_MASK 0x7ffff
+#include "dp/dp_reg.h"
DEFINE_DRM_GEM_FOPS(hibmc_fops);
+static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "vblank", "hpd" };
+
static irqreturn_t hibmc_interrupt(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
@@ -51,6 +51,22 @@ static irqreturn_t hibmc_interrupt(int irq, void *arg)
return IRQ_HANDLED;
}
+static irqreturn_t hibmc_dp_interrupt(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+ u32 status;
+
+ status = readl(priv->mmio + HIBMC_DP_INTSTAT);
+ if (status) {
+ priv->dp.irq_status = status;
+ writel(status, priv->mmio + HIBMC_DP_INTCLR);
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_HANDLED;
+}
+
static int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
@@ -121,9 +137,12 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
return ret;
}
- /* if DP existed, init DP */
- if ((readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL) &
- HIBMC_DP_HOST_SERDES_CTRL_MASK) == HIBMC_DP_HOST_SERDES_CTRL_VAL) {
+ /*
+ * If the serdes reg is readable and is not equal to 0,
+ * DP block exists and initializes it.
+ */
+ ret = readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL);
+ if (ret) {
ret = hibmc_dp_init(priv);
if (ret)
drm_err(dev, "failed to init dp: %d\n", ret);
@@ -250,15 +269,48 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv)
return 0;
}
-static int hibmc_unload(struct drm_device *dev)
+static void hibmc_unload(struct drm_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
drm_atomic_helper_shutdown(dev);
+}
- free_irq(pdev->irq, dev);
+static int hibmc_msi_init(struct drm_device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ char name[32] = {0};
+ int valid_irq_num;
+ int irq;
+ int ret;
- pci_disable_msi(to_pci_dev(dev->dev));
+ ret = pci_alloc_irq_vectors(pdev, HIBMC_MIN_VECTORS,
+ HIBMC_MAX_VECTORS, PCI_IRQ_MSI);
+ if (ret < 0) {
+ drm_err(dev, "enabling MSI failed: %d\n", ret);
+ return ret;
+ }
+
+ valid_irq_num = ret;
+
+ for (int i = 0; i < valid_irq_num; i++) {
+ snprintf(name, ARRAY_SIZE(name) - 1, "%s-%s-%s",
+ dev->driver->name, pci_name(pdev), g_irqs_names_map[i]);
+
+ irq = pci_irq_vector(pdev, i);
+
+ if (i)
+ /* PCI devices require shared interrupts. */
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ hibmc_dp_interrupt,
+ hibmc_dp_hpd_isr,
+ IRQF_SHARED, name, dev);
+ else
+ ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt,
+ IRQF_SHARED, name, dev);
+ if (ret) {
+ drm_err(dev, "install irq failed: %d\n", ret);
+ return ret;
+ }
+ }
return 0;
}
@@ -290,15 +342,10 @@ static int hibmc_load(struct drm_device *dev)
goto err;
}
- ret = pci_enable_msi(pdev);
+ ret = hibmc_msi_init(dev);
if (ret) {
- drm_warn(dev, "enabling MSI failed: %d\n", ret);
- } else {
- /* PCI devices require shared interrupts. */
- ret = request_irq(pdev->irq, hibmc_interrupt, IRQF_SHARED,
- dev->driver->name, dev);
- if (ret)
- drm_warn(dev, "install irq failed: %d\n", ret);
+ drm_err(dev, "hibmc msi init failed, ret:%d\n", ret);
+ goto err;
}
/* reset all the states of crtc/plane/encoder/connector */
@@ -374,7 +421,7 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
static void hibmc_pci_shutdown(struct pci_dev *pdev)
{
- drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
+ hibmc_pci_remove(pdev);
}
static const struct pci_device_id hibmc_pci_table[] = {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index d982f1e4b958..274feabe7df0 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -22,6 +22,9 @@
#include "dp/dp_hw.h"
+#define HIBMC_MIN_VECTORS 1
+#define HIBMC_MAX_VECTORS 2
+
struct hibmc_vdac {
struct drm_device *dev;
struct drm_encoder encoder;
@@ -47,6 +50,11 @@ static inline struct hibmc_vdac *to_hibmc_vdac(struct drm_connector *connector)
return container_of(connector, struct hibmc_vdac, connector);
}
+static inline struct hibmc_dp *to_hibmc_dp(struct drm_connector *connector)
+{
+ return container_of(connector, struct hibmc_dp, connector);
+}
+
static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
{
return container_of(dev, struct hibmc_drm_private, dev);
@@ -64,4 +72,8 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
int hibmc_dp_init(struct hibmc_drm_private *priv);
+void hibmc_debugfs_init(struct drm_connector *connector, struct dentry *root);
+
+irqreturn_t hibmc_dp_hpd_isr(int irq, void *arg);
+
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 05e19ea4c9f9..e8a527ede854 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -60,6 +60,7 @@ static void hibmc_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs
hibmc_connector_helper_funcs = {
.get_modes = hibmc_connector_get_modes,
+ .detect_ctx = drm_connector_helper_detect_from_ddc,
};
static const struct drm_connector_funcs hibmc_connector_funcs = {
@@ -127,5 +128,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
drm_connector_attach_encoder(connector, encoder);
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
return 0;
}
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 2eea9fb0e76b..e80debdc4176 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -825,7 +825,6 @@ static const struct component_ops dsi_ops = {
static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
{
struct dsi_hw_ctx *ctx = dsi->ctx;
- struct resource *res;
ctx->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(ctx->pclk)) {
@@ -833,8 +832,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
return PTR_ERR(ctx->pclk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base)) {
DRM_ERROR("failed to remap dsi io region\n");
return PTR_ERR(ctx->base);
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 2eb49177ac42..45c4eb008ad5 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -844,7 +844,6 @@ static struct drm_plane_funcs ade_plane_funcs = {
static void *ade_hw_ctx_alloc(struct platform_device *pdev,
struct drm_crtc *crtc)
{
- struct resource *res;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct ade_hw_ctx *ctx = NULL;
@@ -856,8 +855,7 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev,
return ERR_PTR(-ENOMEM);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->base = devm_ioremap_resource(dev, res);
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base)) {
DRM_ERROR("failed to remap ade io base\n");
return ERR_PTR(-EIO);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index ed05b131ed3a..e153686256c9 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -52,7 +52,6 @@ i915-y += \
i915-y += \
soc/intel_dram.o \
soc/intel_gmch.o \
- soc/intel_pch.o \
soc/intel_rom.o
# core library code
@@ -247,6 +246,7 @@ i915-y += \
display/intel_display_power_map.o \
display/intel_display_power_well.o \
display/intel_display_reset.o \
+ display/intel_display_rpm.o \
display/intel_display_rps.o \
display/intel_display_snapshot.o \
display/intel_display_wa.o \
@@ -281,6 +281,7 @@ i915-y += \
display/intel_modeset_setup.o \
display/intel_modeset_verify.o \
display/intel_overlay.o \
+ display/intel_pch.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
display/intel_plane_initial.o \
@@ -408,7 +409,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o
#
# Enable locally for CONFIG_DRM_I915_WERROR=y. See also scripts/Makefile.build
ifdef CONFIG_DRM_I915_WERROR
- cmd_checkdoc = $(srctree)/scripts/kernel-doc -none -Werror $<
+ cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none -Werror $<
endif
# header test
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
index 206818f9ad49..f10c0fb8d2c8 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
@@ -25,6 +25,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index 10ab3cc73e58..49f02aca818b 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -26,6 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
index d9c3152d4338..0713b2709412 100644
--- a/drivers/gpu/drm/i915/display/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
@@ -29,6 +29,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index 92d32d6b5bce..80b71bd6a837 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -26,6 +26,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index b42c717085f3..017b617a8069 100644
--- a/drivers/gpu/drm/i915/display/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -26,6 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
index 280699438526..ed560e3438db 100644
--- a/drivers/gpu/drm/i915/display/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
@@ -25,6 +25,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 55b9e9bfcc4d..e0a98e6fd6d1 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -7,9 +7,11 @@
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
#include "g4x_dp.h"
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_audio.h"
#include "intel_backlight.h"
#include "intel_connector.h"
@@ -28,7 +30,6 @@
#include "intel_hotplug.h"
#include "intel_pch_display.h"
#include "intel_pps.h"
-#include "vlv_sideband.h"
static const struct dpll g4x_dpll[] = {
{ .dot = 162000, .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8, },
@@ -60,14 +61,13 @@ static void g4x_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct dpll *divisor = NULL;
int i, count = 0;
if (display->platform.g4x) {
divisor = g4x_dpll;
count = ARRAY_SIZE(g4x_dpll);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(display)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
} else if (display->platform.cherryview) {
@@ -93,7 +93,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
@@ -141,7 +140,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
- } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
+ } else if (HAS_PCH_CPT(display) && port != PORT_A) {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
intel_de_rmw(display, TRANS_DP_CTL(crtc->pipe),
@@ -183,7 +182,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
static void assert_edp_pll(struct intel_display *display, bool state)
{
- bool cur_state = intel_de_read(display, DP_A) & DP_PLL_ENABLE;
+ bool cur_state = intel_de_read(display, DP_A) & EDP_PLL_ENABLE;
INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
@@ -205,12 +204,12 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "enabling eDP PLL for clock %d\n",
pipe_config->port_clock);
- intel_dp->DP &= ~DP_PLL_FREQ_MASK;
+ intel_dp->DP &= ~EDP_PLL_FREQ_MASK;
if (pipe_config->port_clock == 162000)
- intel_dp->DP |= DP_PLL_FREQ_162MHZ;
+ intel_dp->DP |= EDP_PLL_FREQ_162MHZ;
else
- intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ intel_dp->DP |= EDP_PLL_FREQ_270MHZ;
intel_de_write(display, DP_A, intel_dp->DP);
intel_de_posting_read(display, DP_A);
@@ -225,7 +224,7 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
if (display->platform.ironlake)
intel_wait_for_vblank_if_active(display, !crtc->pipe);
- intel_dp->DP |= DP_PLL_ENABLE;
+ intel_dp->DP |= EDP_PLL_ENABLE;
intel_de_write(display, DP_A, intel_dp->DP);
intel_de_posting_read(display, DP_A);
@@ -243,7 +242,7 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "disabling eDP PLL\n");
- intel_dp->DP &= ~DP_PLL_ENABLE;
+ intel_dp->DP &= ~EDP_PLL_ENABLE;
intel_de_write(display, DP_A, intel_dp->DP);
intel_de_posting_read(display, DP_A);
@@ -277,7 +276,6 @@ bool g4x_dp_port_enabled(struct intel_display *display,
i915_reg_t dp_reg, enum port port,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
bool ret;
u32 val;
@@ -287,13 +285,13 @@ bool g4x_dp_port_enabled(struct intel_display *display,
/* asserts want to know the pipe even if the port is disabled */
if (display->platform.ivybridge && port == PORT_A)
- *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
- else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
+ *pipe = REG_FIELD_GET(DP_PIPE_SEL_MASK_IVB, val);
+ else if (HAS_PCH_CPT(display) && port != PORT_A)
ret &= cpt_dp_port_selected(display, port, pipe);
else if (display->platform.cherryview)
- *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
+ *pipe = REG_FIELD_GET(DP_PIPE_SEL_MASK_CHV, val);
else
- *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
+ *pipe = REG_FIELD_GET(DP_PIPE_SEL_MASK, val);
return ret;
}
@@ -338,7 +336,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 tmp, flags = 0;
enum port port = encoder->port;
@@ -353,7 +350,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
- if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
+ if (HAS_PCH_CPT(display) && port != PORT_A) {
u32 trans_dp = intel_de_read(display,
TRANS_DP_CTL(crtc->pipe));
@@ -389,13 +386,12 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
if (display->platform.g4x && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
- pipe_config->lane_count =
- ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
+ pipe_config->lane_count = REG_FIELD_GET(DP_PORT_WIDTH_MASK, tmp) + 1;
g4x_dp_get_m_n(pipe_config);
if (port == PORT_A) {
- if ((intel_de_read(display, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
+ if ((intel_de_read(display, DP_A) & EDP_PLL_FREQ_MASK) == EDP_PLL_FREQ_162MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
@@ -416,7 +412,6 @@ intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
@@ -429,7 +424,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
drm_dbg_kms(display->drm, "\n");
if ((display->platform.ivybridge && port == PORT_A) ||
- (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
+ (HAS_PCH_CPT(display) && port != PORT_A)) {
intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
} else {
@@ -448,7 +443,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
* to transcoder A after disabling it to allow the
* matching HDMI port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
+ if (HAS_PCH_IBX(display) && crtc->pipe == PIPE_B && port != PORT_A) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -519,7 +514,7 @@ static void intel_disable_dp(struct intel_atomic_state *state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
/*
* Make sure the panel is off before trying to change the mode.
@@ -581,16 +576,10 @@ static void chv_post_disable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
intel_dp_link_down(encoder, old_crtc_state);
- vlv_dpio_get(dev_priv);
-
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, old_crtc_state, true);
-
- vlv_dpio_put(dev_priv);
}
static void
@@ -1223,10 +1212,10 @@ static int g4x_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
int ret;
- if (HAS_PCH_SPLIT(i915) && encoder->port != PORT_A)
+ if (HAS_PCH_SPLIT(display) && encoder->port != PORT_A)
crtc_state->has_pch_encoder = true;
ret = intel_dp_compute_config(encoder, crtc_state, conn_state);
@@ -1279,7 +1268,6 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
bool g4x_dp_init(struct intel_display *display,
i915_reg_t output_reg, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
@@ -1353,7 +1341,7 @@ bool g4x_dp_init(struct intel_display *display,
intel_encoder->audio_disable = g4x_dp_audio_disable;
if ((display->platform.ivybridge && port == PORT_A) ||
- (HAS_PCH_CPT(dev_priv) && port != PORT_A))
+ (HAS_PCH_CPT(display) && port != PORT_A))
dig_port->dp.set_link_train = cpt_set_link_train;
else
dig_port->dp.set_link_train = g4x_set_link_train;
@@ -1370,7 +1358,7 @@ bool g4x_dp_init(struct intel_display *display,
intel_encoder->set_signal_levels = g4x_set_signal_levels;
if (display->platform.valleyview || display->platform.cherryview ||
- (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
+ (HAS_PCH_SPLIT(display) && port != PORT_A)) {
dig_port->dp.preemph_max = intel_dp_preemph_max_3;
dig_port->dp.voltage_max = intel_dp_voltage_max_3;
} else {
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 3dc2c59a3df0..1d252432d729 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -5,8 +5,9 @@
* HDMI support for G4x,ILK,SNB,IVB,VLV,CHV (HSW+ handled by the DDI code).
*/
+#include <drm/drm_print.h>
+
#include "g4x_hdmi.h"
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -22,13 +23,11 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_sdvo.h"
-#include "vlv_sideband.h"
static void intel_hdmi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -37,7 +36,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
hdmi_val = SDVO_ENCODING_HDMI;
- if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
+ if (!HAS_PCH_SPLIT(display) && crtc_state->limited_color_range)
hdmi_val |= HDMI_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
@@ -52,7 +51,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
if (crtc_state->has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else if (display->platform.cherryview)
hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
@@ -134,9 +133,8 @@ static int g4x_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_display *display = to_intel_display(encoder);
struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
crtc_state->has_pch_encoder = true;
if (!intel_fdi_compute_pipe_bpp(crtc_state))
return -EINVAL;
@@ -155,7 +153,6 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 tmp, flags = 0;
int dotclock;
@@ -186,7 +183,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_AUDIO_ENABLE)
pipe_config->has_audio = true;
- if (!HAS_PCH_SPLIT(dev_priv) &&
+ if (!HAS_PCH_SPLIT(display) &&
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -383,7 +380,6 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_digital_port *dig_port =
hdmi_to_dig_port(intel_hdmi);
@@ -401,7 +397,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
+ if (HAS_PCH_IBX(display) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -539,15 +535,8 @@ static void chv_hdmi_post_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- vlv_dpio_get(dev_priv);
-
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, old_crtc_state, true);
-
- vlv_dpio_put(dev_priv);
}
static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
@@ -682,7 +671,6 @@ static bool assert_hdmi_port_valid(struct intel_display *display, enum port port
bool g4x_hdmi_init(struct intel_display *display,
i915_reg_t hdmi_reg, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct intel_bios_encoder_data *devdata;
struct intel_digital_port *dig_port;
struct intel_encoder *intel_encoder;
@@ -724,7 +712,7 @@ bool g4x_hdmi_init(struct intel_display *display,
intel_encoder->hotplug = intel_hdmi_hotplug;
intel_encoder->compute_config = g4x_hdmi_compute_config;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
intel_encoder->disable = pch_disable_hdmi;
intel_encoder->post_disable = pch_post_disable_hdmi;
} else {
@@ -745,9 +733,9 @@ bool g4x_hdmi_init(struct intel_display *display,
intel_encoder->post_disable = vlv_hdmi_post_disable;
} else {
intel_encoder->pre_enable = intel_hdmi_pre_enable;
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
intel_encoder->enable = cpt_enable_hdmi;
- else if (HAS_PCH_IBX(dev_priv))
+ else if (HAS_PCH_IBX(display))
intel_encoder->enable = ibx_enable_hdmi;
else
intel_encoder->enable = g4x_enable_hdmi;
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 674a0e5f0858..4307e2ed03d9 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -10,6 +10,7 @@
#include "i915_reg.h"
#include "intel_color_regs.h"
#include "intel_de.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_pcode.h"
@@ -344,10 +345,9 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_crtc *crtc = m->private;
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
seq_printf(m, "Enabled by kernel parameter: %s\n",
str_yes_no(display->params.enable_ips));
@@ -361,7 +361,7 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
seq_puts(m, "Currently: disabled\n");
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 013295f66d56..a2a6d52be0a5 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -7,9 +7,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic.h"
@@ -630,85 +631,85 @@ vlv_primary_async_flip(struct intel_dsb *dsb,
static void
bdw_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&display->irq.lock);
}
static void
bdw_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&display->irq.lock);
}
static void
ivb_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_enable_display_irq(display, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
ivb_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_disable_display_irq(display, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
ilk_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_enable_display_irq(display, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
ilk_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- spin_lock_irq(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ ilk_disable_display_irq(display, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&display->irq.lock);
}
static void
vlv_primary_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- i915_enable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&display->irq.lock);
}
static void
vlv_primary_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- i915_disable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ i915_disable_pipestat(display, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&display->irq.lock);
}
static bool i9xx_plane_can_async_flip(u64 modifier)
@@ -820,7 +821,7 @@ unsigned int vlv_plane_min_alignment(struct intel_plane *plane,
{
struct intel_display *display = to_intel_display(plane);
- if (intel_plane_can_async_flip(plane, fb->modifier))
+ if (intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
return 256 * 1024;
/* FIXME undocumented so not sure what's actually needed */
@@ -844,7 +845,7 @@ static unsigned int g4x_primary_min_alignment(struct intel_plane *plane,
{
struct intel_display *display = to_intel_display(plane);
- if (intel_plane_can_async_flip(plane, fb->modifier))
+ if (intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
return 256 * 1024;
if (intel_scanout_needs_vtd_wa(display))
@@ -889,6 +890,7 @@ static const struct drm_plane_funcs i965_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = i965_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static const struct drm_plane_funcs i8xx_plane_funcs = {
@@ -898,6 +900,7 @@ static const struct drm_plane_funcs i8xx_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = i8xx_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
struct intel_plane *
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 7c80e37c1c5f..77876ef735b7 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -9,6 +9,7 @@
#include "i9xx_wm_regs.h"
#include "intel_atomic.h"
#include "intel_bo.h"
+#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_trace.h"
#include "intel_fb.h"
@@ -81,13 +82,14 @@ static const struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static const struct cxsr_latency *pnv_get_cxsr_latency(struct drm_i915_private *i915)
+static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int i;
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
const struct cxsr_latency *latency = &cxsr_latency_table[i];
- bool is_desktop = !IS_MOBILE(i915);
+ bool is_desktop = !display->platform.mobile;
if (is_desktop == latency->is_desktop &&
i915->is_ddr3 == latency->is_ddr3 &&
@@ -96,15 +98,16 @@ static const struct cxsr_latency *pnv_get_cxsr_latency(struct drm_i915_private *
return latency;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Could not find CxSR latency for DDR%s, FSB %u kHz, MEM %u kHz\n",
i915->is_ddr3 ? "3" : "2", i915->fsb_freq, i915->mem_freq);
return NULL;
}
-static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
+static void chv_set_memory_dvfs(struct intel_display *display, bool enable)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
vlv_punit_get(dev_priv);
@@ -120,14 +123,15 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"timed out waiting for Punit DDR DVFS request\n");
vlv_punit_put(dev_priv);
}
-static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
+static void chv_set_memory_pm5(struct intel_display *display, bool enable)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
vlv_punit_get(dev_priv);
@@ -145,53 +149,52 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
#define FW_WM(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
-static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+static bool _intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
- struct intel_display *display = &dev_priv->display;
bool was_enabled;
u32 val;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
- } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_PINEVIEW(dev_priv)) {
- val = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ if (display->platform.valleyview || display->platform.cherryview) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ intel_de_write(display, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ intel_de_posting_read(display, FW_BLC_SELF_VLV);
+ } else if (display->platform.g4x || display->platform.i965gm) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_de_write(display, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ intel_de_posting_read(display, FW_BLC_SELF);
+ } else if (display->platform.pineview) {
+ val = intel_de_read(display, DSPFW3(display));
was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
if (enable)
val |= PINEVIEW_SELF_REFRESH_EN;
else
val &= ~PINEVIEW_SELF_REFRESH_EN;
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), val);
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW3(dev_priv));
- } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
- was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ intel_de_write(display, DSPFW3(display), val);
+ intel_de_posting_read(display, DSPFW3(display));
+ } else if (display->platform.i945g || display->platform.i945gm) {
+ was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
- intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
- } else if (IS_I915GM(dev_priv)) {
+ intel_de_write(display, FW_BLC_SELF, val);
+ intel_de_posting_read(display, FW_BLC_SELF);
+ } else if (display->platform.i915gm) {
/*
* FIXME can't find a bit like this for 915G, and
* yet it does have the related watermark in
* FW_BLC_SELF. What's going on?
*/
- was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
+ was_enabled = intel_de_read(display, INSTPM) & INSTPM_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
- intel_uncore_write(&dev_priv->uncore, INSTPM, val);
- intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
+ intel_de_write(display, INSTPM, val);
+ intel_de_posting_read(display, INSTPM);
} else {
return false;
}
trace_intel_memory_cxsr(display, was_enabled, enable);
- drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
+ drm_dbg_kms(display->drm, "memory self-refresh is %s (was %s)\n",
str_enabled_disabled(enable),
str_enabled_disabled(was_enabled));
@@ -200,7 +203,7 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
/**
* intel_set_memory_cxsr - Configure CxSR state
- * @dev_priv: i915 device
+ * @display: display device
* @enable: Allow vs. disallow CxSR
*
* Allow or disallow the system to enter a special CxSR
@@ -235,17 +238,17 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
* the hardware w.r.t. HPLL SR when writing to plane registers.
* Disallowing just CxSR is sufficient.
*/
-bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+bool intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
bool ret;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
- ret = _intel_set_memory_cxsr(dev_priv, enable);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->display.wm.vlv.cxsr = enable;
- else if (IS_G4X(dev_priv))
- dev_priv->display.wm.g4x.cxsr = enable;
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
+ ret = _intel_set_memory_cxsr(display, enable);
+ if (display->platform.valleyview || display->platform.cherryview)
+ display->wm.vlv.cxsr = enable;
+ else if (display->platform.g4x)
+ display->wm.g4x.cxsr = enable;
+ mutex_unlock(&display->wm.wm_mutex);
return ret;
}
@@ -271,8 +274,8 @@ static const int pessimal_latency_ns = 5000;
static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
enum pipe pipe = crtc->pipe;
int sprite0_start, sprite1_start;
@@ -280,22 +283,20 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
switch (pipe) {
case PIPE_A:
- dsparb = intel_uncore_read(&dev_priv->uncore,
- DSPARB(dev_priv));
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb = intel_de_read(display, DSPARB(display));
+ dsparb2 = intel_de_read(display, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
break;
case PIPE_B:
- dsparb = intel_uncore_read(&dev_priv->uncore,
- DSPARB(dev_priv));
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
+ dsparb = intel_de_read(display, DSPARB(display));
+ dsparb2 = intel_de_read(display, DSPARB2);
sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
break;
case PIPE_C:
- dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
- dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
+ dsparb2 = intel_de_read(display, DSPARB2);
+ dsparb3 = intel_de_read(display, DSPARB3);
sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
break;
@@ -310,26 +311,26 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
fifo_state->plane[PLANE_CURSOR] = 63;
}
-static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i9xx_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x7f;
if (i9xx_plane == PLANE_B)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
}
-static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i830_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x1ff;
@@ -337,22 +338,22 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
}
-static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
+static int i845_get_fifo_size(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
+ u32 dsparb = intel_de_read(display, DSPARB(display));
int size;
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
- drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
+ drm_dbg_kms(display->drm, "FIFO size - (0x%08x) %c: %d\n",
dsparb, plane_name(i9xx_plane), size);
return size;
@@ -537,7 +538,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
/**
* intel_calculate_wm - calculate watermark level
- * @i915: the device
+ * @display: display device
* @pixel_rate: pixel clock
* @wm: chip FIFO params
* @fifo_size: size of the FIFO buffer
@@ -555,7 +556,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
* past the watermark point. If the FIFO drains completely, a FIFO underrun
* will occur, and a display engine hang could result.
*/
-static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
+static unsigned int intel_calculate_wm(struct intel_display *display,
int pixel_rate,
const struct intel_watermark_params *wm,
int fifo_size, int cpp,
@@ -573,10 +574,10 @@ static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
wm->guard_size;
- drm_dbg_kms(&i915->drm, "FIFO entries required for mode: %d\n", entries);
+ drm_dbg_kms(display->drm, "FIFO entries required for mode: %d\n", entries);
wm_size = fifo_size - entries;
- drm_dbg_kms(&i915->drm, "FIFO watermark level: %d\n", wm_size);
+ drm_dbg_kms(display->drm, "FIFO watermark level: %d\n", wm_size);
/* Don't promote wm_size to unsigned... */
if (wm_size > wm->max_wm)
@@ -626,11 +627,11 @@ static bool intel_crtc_active(struct intel_crtc *crtc)
crtc->config->hw.adjusted_mode.crtc_clock;
}
-static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
+static struct intel_crtc *single_enabled_crtc(struct intel_display *display)
{
struct intel_crtc *crtc, *enabled = NULL;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
@@ -641,21 +642,21 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
return enabled;
}
-static void pnv_update_wm(struct drm_i915_private *dev_priv)
+static void pnv_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned int wm;
- latency = pnv_get_cxsr_latency(dev_priv);
+ latency = pnv_get_cxsr_latency(display);
if (!latency) {
- drm_dbg_kms(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
- intel_set_memory_cxsr(dev_priv, false);
+ drm_dbg_kms(display->drm, "Unknown FSB/MEM, disabling CxSR\n");
+ intel_set_memory_cxsr(display, false);
return;
}
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
@@ -663,47 +664,46 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
int cpp = fb->format->cpp[0];
/* Display SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_display_wm,
pnv_display_wm.fifo_size,
cpp, latency->display_sr);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ reg = intel_de_read(display, DSPFW1(display));
reg &= ~DSPFW_SR_MASK;
reg |= FW_WM(wm, SR);
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv), reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
+ intel_de_write(display, DSPFW1(display), reg);
+ drm_dbg_kms(display->drm, "DSPFW1 register is %x\n", reg);
/* cursor SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_cursor_wm,
pnv_display_wm.fifo_size,
4, latency->cursor_sr);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3(dev_priv),
- DSPFW_CURSOR_SR_MASK,
- FW_WM(wm, CURSOR_SR));
+ intel_de_rmw(display, DSPFW3(display),
+ DSPFW_CURSOR_SR_MASK, FW_WM(wm, CURSOR_SR));
/* Display HPLL off SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_display_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
cpp, latency->display_hpll_disable);
- intel_uncore_rmw(&dev_priv->uncore, DSPFW3(dev_priv),
- DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
+ intel_de_rmw(display, DSPFW3(display),
+ DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
/* cursor HPLL off SR */
- wm = intel_calculate_wm(dev_priv, pixel_rate,
+ wm = intel_calculate_wm(display, pixel_rate,
&pnv_cursor_hplloff_wm,
pnv_display_hplloff_wm.fifo_size,
4, latency->cursor_hpll_disable);
- reg = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ reg = intel_de_read(display, DSPFW3(display));
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= FW_WM(wm, HPLL_CURSOR);
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), reg);
- drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
+ intel_de_write(display, DSPFW3(display), reg);
+ drm_dbg_kms(display->drm, "DSPFW3 register is %x\n", reg);
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
} else {
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
}
}
@@ -794,53 +794,51 @@ static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
return max(0, tlb_miss);
}
-static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
+static void g4x_write_wm_values(struct intel_display *display,
const struct g4x_wm_values *wm)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
trace_g4x_wm(intel_crtc_for_pipe(display, pipe), wm);
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
- FW_WM(wm->sr.fbc, FBC_SR) |
- FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
- FW_WM(wm->sr.cursor, CURSOR_SR) |
- FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
- FW_WM(wm->hpll.plane, HPLL_SR));
-
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
+ FW_WM(wm->sr.fbc, FBC_SR) |
+ FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_de_write(display, DSPFW3(display),
+ (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
+ FW_WM(wm->sr.cursor, CURSOR_SR) |
+ FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
+ FW_WM(wm->hpll.plane, HPLL_SR));
+
+ intel_de_posting_read(display, DSPFW1(display));
}
#define FW_WM_VLV(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
-static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
+static void vlv_write_wm_values(struct intel_display *display,
const struct vlv_wm_values *wm)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
trace_vlv_wm(intel_crtc_for_pipe(display, pipe), wm);
- intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
- (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
- (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
- (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
- (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
+ intel_de_write(display, VLV_DDL(pipe),
+ (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
+ (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
}
/*
@@ -848,72 +846,72 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
* high order bits so that there are no out of bounds values
* present in the registers during the reprogramming.
*/
- intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
- intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
- intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
-
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
- FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- FW_WM(wm->sr.cursor, CURSOR_SR));
-
- if (IS_CHERRYVIEW(dev_priv)) {
- intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
- intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
- FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ intel_de_write(display, DSPHOWM, 0);
+ intel_de_write(display, DSPHOWM1, 0);
+ intel_de_write(display, DSPFW4, 0);
+ intel_de_write(display, DSPFW5, 0);
+ intel_de_write(display, DSPFW6, 0);
+
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ intel_de_write(display, DSPFW3(display),
+ FW_WM(wm->sr.cursor, CURSOR_SR));
+
+ if (display->platform.cherryview) {
+ intel_de_write(display, DSPFW7_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_de_write(display, DSPFW8_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
+ intel_de_write(display, DSPFW9_CHV,
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
+ intel_de_write(display, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
} else {
- intel_uncore_write(&dev_priv->uncore, DSPFW7,
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
- intel_uncore_write(&dev_priv->uncore, DSPHOWM,
- FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
+ intel_de_write(display, DSPFW7,
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
+ intel_de_write(display, DSPHOWM,
+ FW_WM(wm->sr.plane >> 9, SR_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
}
- intel_uncore_posting_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ intel_de_posting_read(display, DSPFW1(display));
}
#undef FW_WM_VLV
-static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void g4x_setup_wm_latency(struct intel_display *display)
{
/* all latencies in usec */
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
- dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
+ display->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
+ display->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
+ display->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
- dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
+ display->wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
}
static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
@@ -962,11 +960,11 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
- unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
+ unsigned int latency = display->wm.pri_latency[level] * 10;
unsigned int pixel_rate, htotal, cpp, width, wm;
if (latency == 0)
@@ -1017,10 +1015,10 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
dirty |= raw->plane[plane_id] != value;
@@ -1033,13 +1031,13 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
int level, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
/* NORMAL level doesn't have an FBC watermark */
level = max(level, G4X_WM_LEVEL_SR);
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
dirty |= raw->fbc != value;
@@ -1056,8 +1054,8 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = plane->id;
bool dirty = false;
int level;
@@ -1069,7 +1067,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
goto out;
}
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
int wm, max_wm;
@@ -1109,7 +1107,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
plane->base.name,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
@@ -1117,7 +1115,7 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
if (plane_id == PLANE_PRIMARY)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FBC watermarks: SR=%d, HPLL=%d\n",
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
@@ -1137,9 +1135,9 @@ static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
int level)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (level >= dev_priv->display.wm.num_levels)
+ if (level >= display->wm.num_levels)
return false;
return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
@@ -1281,7 +1279,7 @@ static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
@@ -1311,7 +1309,7 @@ static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
max(optimal->wm.plane[plane_id],
active->wm.plane[plane_id]);
- drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
+ drm_WARN_ON(display->drm, intermediate->wm.plane[plane_id] >
g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
}
@@ -1329,23 +1327,23 @@ static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
intermediate->hpll.fbc = max(optimal->hpll.fbc,
active->hpll.fbc);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
(intermediate->sr.plane >
g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
intermediate->sr.cursor >
g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
(intermediate->sr.plane >
g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
intermediate->sr.cursor >
g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
intermediate->hpll_en);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
intermediate->fbc_en && intermediate->cxsr);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
intermediate->fbc_en && intermediate->hpll_en);
@@ -1376,7 +1374,7 @@ static int g4x_compute_watermarks(struct intel_atomic_state *state,
return 0;
}
-static void g4x_merge_wm(struct drm_i915_private *dev_priv,
+static void g4x_merge_wm(struct intel_display *display,
struct g4x_wm_values *wm)
{
struct intel_crtc *crtc;
@@ -1386,7 +1384,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
wm->hpll_en = true;
wm->fbc_en = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
if (!crtc->active)
@@ -1408,7 +1406,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
wm->fbc_en = false;
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
enum pipe pipe = crtc->pipe;
@@ -1420,23 +1418,23 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
}
}
-static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
+static void g4x_program_watermarks(struct intel_display *display)
{
- struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
+ struct g4x_wm_values *old_wm = &display->wm.g4x;
struct g4x_wm_values new_wm = {};
- g4x_merge_wm(dev_priv, &new_wm);
+ g4x_merge_wm(display, &new_wm);
if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
return;
if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
+ _intel_set_memory_cxsr(display, false);
- g4x_write_wm_values(dev_priv, &new_wm);
+ g4x_write_wm_values(display, &new_wm);
if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
+ _intel_set_memory_cxsr(display, true);
*old_wm = new_wm;
}
@@ -1444,30 +1442,30 @@ static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
static void g4x_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ g4x_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void g4x_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
- g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ g4x_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
/* latency must be in 0.1us units. */
@@ -1486,18 +1484,18 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
return ret;
}
-static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void vlv_setup_wm_latency(struct intel_display *display)
{
/* all latencies in usec */
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+ display->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
- dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+ if (display->platform.cherryview) {
+ display->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+ display->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
}
}
@@ -1505,13 +1503,13 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
unsigned int pixel_rate, htotal, cpp, width, wm;
- if (dev_priv->display.wm.pri_latency[level] == 0)
+ if (display->wm.pri_latency[level] == 0)
return USHRT_MAX;
if (!intel_wm_plane_visible(crtc_state, plane_state))
@@ -1532,7 +1530,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
wm = 63;
} else {
wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
- dev_priv->display.wm.pri_latency[level] * 10);
+ display->wm.pri_latency[level] * 10);
}
return min_t(unsigned int, wm, USHRT_MAX);
@@ -1546,8 +1544,8 @@ static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
@@ -1616,11 +1614,11 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
fifo_left -= plane_extra;
}
- drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
+ drm_WARN_ON(display->drm, active_planes != 0 && fifo_left != 0);
/* give it all to the first plane if none are active */
if (active_planes == 0) {
- drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
+ drm_WARN_ON(display->drm, fifo_left != fifo_size);
fifo_state->plane[PLANE_PRIMARY] = fifo_left;
}
@@ -1631,9 +1629,9 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
static void vlv_invalidate_wms(struct intel_crtc *crtc,
struct vlv_wm_state *wm_state, int level)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id)
@@ -1659,10 +1657,10 @@ static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
bool dirty = false;
- for (; level < dev_priv->display.wm.num_levels; level++) {
+ for (; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
dirty |= raw->plane[plane_id] != value;
@@ -1675,8 +1673,8 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum plane_id plane_id = plane->id;
int level;
bool dirty = false;
@@ -1686,7 +1684,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
goto out;
}
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
@@ -1703,7 +1701,7 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
plane->base.name,
crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
@@ -1734,8 +1732,8 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
@@ -1745,7 +1743,7 @@ static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
int level;
/* initially allow all levels */
- wm_state->num_levels = dev_priv->display.wm.num_levels;
+ wm_state->num_levels = display->wm.num_levels;
/*
* Note that enabling cxsr with no primary/sprite planes
* enabled can wedge the pipe. Hence we only allow cxsr
@@ -1755,7 +1753,7 @@ static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for (level = 0; level < wm_state->num_levels; level++) {
const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
+ const int sr_fifo_size = INTEL_NUM_PIPES(display) * 512 - 1;
if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
break;
@@ -1855,6 +1853,7 @@ static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_uncore *uncore = &dev_priv->uncore;
const struct intel_crtc_state *crtc_state =
@@ -1871,8 +1870,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
- drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
- drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
+ drm_WARN_ON(display->drm, fifo_state->plane[PLANE_CURSOR] != 63);
+ drm_WARN_ON(display->drm, fifo_size != 511);
trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
@@ -1889,8 +1888,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
switch (crtc->pipe) {
case PIPE_A:
- dsparb = intel_uncore_read_fw(uncore, DSPARB(dev_priv));
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb = intel_de_read_fw(display, DSPARB(display));
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
VLV_FIFO(SPRITEB, 0xff));
@@ -1902,12 +1901,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB(dev_priv), dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB(display), dsparb);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
case PIPE_B:
- dsparb = intel_uncore_read_fw(uncore, DSPARB(dev_priv));
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb = intel_de_read_fw(display, DSPARB(display));
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
VLV_FIFO(SPRITED, 0xff));
@@ -1919,12 +1918,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB(dev_priv), dsparb);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB(display), dsparb);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
case PIPE_C:
- dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
- dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
+ dsparb3 = intel_de_read_fw(display, DSPARB3);
+ dsparb2 = intel_de_read_fw(display, DSPARB2);
dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
VLV_FIFO(SPRITEF, 0xff));
@@ -1936,14 +1935,14 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
- intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
- intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
+ intel_de_write_fw(display, DSPARB3, dsparb3);
+ intel_de_write_fw(display, DSPARB2, dsparb2);
break;
default:
break;
}
- intel_uncore_posting_read_fw(uncore, DSPARB(dev_priv));
+ intel_de_read_fw(display, DSPARB(display));
spin_unlock(&uncore->lock);
}
@@ -2018,16 +2017,16 @@ static int vlv_compute_watermarks(struct intel_atomic_state *state,
return 0;
}
-static void vlv_merge_wm(struct drm_i915_private *dev_priv,
+static void vlv_merge_wm(struct intel_display *display,
struct vlv_wm_values *wm)
{
struct intel_crtc *crtc;
int num_active_pipes = 0;
- wm->level = dev_priv->display.wm.num_levels - 1;
+ wm->level = display->wm.num_levels - 1;
wm->cxsr = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
if (!crtc->active)
@@ -2046,7 +2045,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
if (num_active_pipes > 1)
wm->level = VLV_WM_LEVEL_PM2;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
enum pipe pipe = crtc->pipe;
@@ -2061,35 +2060,35 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
}
}
-static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
+static void vlv_program_watermarks(struct intel_display *display)
{
- struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
+ struct vlv_wm_values *old_wm = &display->wm.vlv;
struct vlv_wm_values new_wm = {};
- vlv_merge_wm(dev_priv, &new_wm);
+ vlv_merge_wm(display, &new_wm);
if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
return;
if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, false);
+ chv_set_memory_dvfs(display, false);
if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, false);
+ chv_set_memory_pm5(display, false);
if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, false);
+ _intel_set_memory_cxsr(display, false);
- vlv_write_wm_values(dev_priv, &new_wm);
+ vlv_write_wm_values(display, &new_wm);
if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
- _intel_set_memory_cxsr(dev_priv, true);
+ _intel_set_memory_cxsr(display, true);
if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
- chv_set_memory_pm5(dev_priv, true);
+ chv_set_memory_pm5(display, true);
if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
- chv_set_memory_dvfs(dev_priv, true);
+ chv_set_memory_dvfs(display, true);
*old_wm = new_wm;
}
@@ -2097,33 +2096,33 @@ static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
static void vlv_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ vlv_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void vlv_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
- vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ vlv_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
-static void i965_update_wm(struct drm_i915_private *dev_priv)
+static void i965_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
int srwm = 1;
@@ -2131,7 +2130,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
bool cxsr_enabled;
/* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
@@ -2152,7 +2151,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
if (srwm < 0)
srwm = 1;
srwm &= 0x1ff;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh entries: %d, wm: %d\n",
entries, srwm);
@@ -2167,7 +2166,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
@@ -2175,39 +2174,38 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
} else {
cxsr_enabled = false;
/* Turn off self refresh if both pipes are enabled */
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
srwm);
/* 965 has limitations... */
- intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
- FW_WM(srwm, SR) |
- FW_WM(8, CURSORB) |
- FW_WM(8, PLANEB) |
- FW_WM(8, PLANEA));
- intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
- FW_WM(8, CURSORA) |
- FW_WM(8, PLANEC_OLD));
+ intel_de_write(display, DSPFW1(display),
+ FW_WM(srwm, SR) |
+ FW_WM(8, CURSORB) |
+ FW_WM(8, PLANEB) |
+ FW_WM(8, PLANEA));
+ intel_de_write(display, DSPFW2(display),
+ FW_WM(8, CURSORA) |
+ FW_WM(8, PLANEC_OLD));
/* update cursor SR watermark */
- intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
- FW_WM(cursor_sr, CURSOR_SR));
+ intel_de_write(display, DSPFW3(display),
+ FW_WM(cursor_sr, CURSOR_SR));
if (cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
}
#undef FW_WM
-static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
+static struct intel_crtc *intel_crtc_for_plane(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
- struct intel_display *display = &i915->display;
struct intel_plane *plane;
- for_each_intel_plane(&i915->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
if (plane->id == PLANE_PRIMARY &&
plane->i9xx_plane == i9xx_plane)
return intel_crtc_for_pipe(display, plane->pipe);
@@ -2216,7 +2214,7 @@ static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
return NULL;
}
-static void i9xx_update_wm(struct drm_i915_private *dev_priv)
+static void i9xx_update_wm(struct intel_display *display)
{
const struct intel_watermark_params *wm_info;
u32 fwater_lo;
@@ -2226,29 +2224,29 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
int planea_wm, planeb_wm;
struct intel_crtc *crtc;
- if (IS_I945GM(dev_priv))
+ if (display->platform.i945gm)
wm_info = &i945_wm_info;
- else if (DISPLAY_VER(dev_priv) != 2)
+ else if (DISPLAY_VER(display) != 2)
wm_info = &i915_wm_info;
else
wm_info = &i830_a_wm_info;
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
+ if (DISPLAY_VER(display) == 2)
+ fifo_size = i830_get_fifo_size(display, PLANE_A);
else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
+ fifo_size = i9xx_get_fifo_size(display, PLANE_A);
+ crtc = intel_crtc_for_plane(display, PLANE_A);
if (intel_crtc_active(crtc)) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
cpp = 4;
else
cpp = fb->format->cpp[0];
- planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planea_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
} else {
@@ -2257,25 +2255,25 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
planea_wm = wm_info->max_wm;
}
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
wm_info = &i830_bc_wm_info;
- if (DISPLAY_VER(dev_priv) == 2)
- fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
+ if (DISPLAY_VER(display) == 2)
+ fifo_size = i830_get_fifo_size(display, PLANE_B);
else
- fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
- crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
+ fifo_size = i9xx_get_fifo_size(display, PLANE_B);
+ crtc = intel_crtc_for_plane(display, PLANE_B);
if (intel_crtc_active(crtc)) {
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
cpp = 4;
else
cpp = fb->format->cpp[0];
- planeb_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planeb_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
wm_info, fifo_size, cpp,
pessimal_latency_ns);
} else {
@@ -2284,11 +2282,11 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
planeb_wm = wm_info->max_wm;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
- crtc = single_enabled_crtc(dev_priv);
- if (IS_I915GM(dev_priv) && crtc) {
+ crtc = single_enabled_crtc(display);
+ if (display->platform.i915gm && crtc) {
struct drm_gem_object *obj;
obj = intel_fb_bo(crtc->base.primary->state->fb);
@@ -2304,10 +2302,10 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
cwm = 2;
/* Play safe and disable self-refresh before adjusting watermarks. */
- intel_set_memory_cxsr(dev_priv, false);
+ intel_set_memory_cxsr(display, false);
/* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev_priv) && crtc) {
+ if (HAS_FW_BLC(display) && crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
const struct drm_display_mode *pipe_mode =
@@ -2320,7 +2318,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
int cpp;
int entries;
- if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ if (display->platform.i915gm || display->platform.i945gm)
cpp = 4;
else
cpp = fb->format->cpp[0];
@@ -2328,20 +2326,20 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
sr_latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"self-refresh entries: %d\n", entries);
srwm = wm_info->fifo_size - entries;
if (srwm < 0)
srwm = 1;
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
- FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ if (display->platform.i945g || display->platform.i945gm)
+ intel_de_write(display, FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else
- intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
+ intel_de_write(display, FW_BLC_SELF, srwm & 0x3f);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
planea_wm, planeb_wm, cwm, srwm);
@@ -2352,34 +2350,34 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
fwater_hi = fwater_hi | (1 << 8);
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
- intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
+ intel_de_write(display, FW_BLC, fwater_lo);
+ intel_de_write(display, FW_BLC2, fwater_hi);
if (crtc)
- intel_set_memory_cxsr(dev_priv, true);
+ intel_set_memory_cxsr(display, true);
}
-static void i845_update_wm(struct drm_i915_private *dev_priv)
+static void i845_update_wm(struct intel_display *display)
{
struct intel_crtc *crtc;
u32 fwater_lo;
int planea_wm;
- crtc = single_enabled_crtc(dev_priv);
+ crtc = single_enabled_crtc(display);
if (crtc == NULL)
return;
- planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
+ planea_wm = intel_calculate_wm(display, crtc->config->pixel_rate,
&i845_wm_info,
- i845_get_fifo_size(dev_priv, PLANE_A),
+ i845_get_fifo_size(display, PLANE_A),
4, pessimal_latency_ns);
- fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
+ fwater_lo = intel_de_read(display, FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting FIFO watermarks - A: %d\n", planea_wm);
- intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
+ intel_de_write(display, FW_BLC, fwater_lo);
}
/* latency must be in 0.1us units. */
@@ -2534,24 +2532,24 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
}
static unsigned int
-ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
+ilk_display_fifo_size(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
return 3072;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
return 768;
else
return 512;
}
static unsigned int
-ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
+ilk_plane_wm_reg_max(struct intel_display *display,
int level, bool is_sprite)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
/* BDW primary/sprite plane watermarks */
return level == 0 ? 255 : 2047;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
/* IVB/HSW primary/sprite plane watermarks */
return level == 0 ? 127 : 1023;
else if (!is_sprite)
@@ -2563,30 +2561,30 @@ ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
}
static unsigned int
-ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
+ilk_cursor_wm_reg_max(struct intel_display *display, int level)
{
- if (DISPLAY_VER(dev_priv) >= 7)
+ if (DISPLAY_VER(display) >= 7)
return level == 0 ? 63 : 255;
else
return level == 0 ? 31 : 63;
}
-static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
+static unsigned int ilk_fbc_wm_reg_max(struct intel_display *display)
{
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
return 31;
else
return 15;
}
/* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
+static unsigned int ilk_plane_wm_max(struct intel_display *display,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
bool is_sprite)
{
- unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
+ unsigned int fifo_size = ilk_display_fifo_size(display);
/* if sprites aren't enabled, sprites get nothing */
if (is_sprite && !config->sprites_enabled)
@@ -2594,14 +2592,14 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
/* HSW allows LP1+ watermarks even with multiple pipes */
if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_NUM_PIPES(dev_priv);
+ fifo_size /= INTEL_NUM_PIPES(display);
/*
* For some reason the non self refresh
* FIFO size is only half of the self
* refresh FIFO size on ILK/SNB.
*/
- if (DISPLAY_VER(dev_priv) < 7)
+ if (DISPLAY_VER(display) < 7)
fifo_size /= 2;
}
@@ -2617,11 +2615,11 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
}
/* clamp to max that the registers can hold */
- return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
+ return min(fifo_size, ilk_plane_wm_reg_max(display, level, is_sprite));
}
/* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
+static unsigned int ilk_cursor_wm_max(struct intel_display *display,
int level,
const struct intel_wm_config *config)
{
@@ -2630,32 +2628,32 @@ static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
return 64;
/* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(dev_priv, level);
+ return ilk_cursor_wm_reg_max(display, level);
}
-static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_maximums(struct intel_display *display,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
- max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
- max->cur = ilk_cursor_wm_max(dev_priv, level, config);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+ max->pri = ilk_plane_wm_max(display, level, config, ddb_partitioning, false);
+ max->spr = ilk_plane_wm_max(display, level, config, ddb_partitioning, true);
+ max->cur = ilk_cursor_wm_max(display, level, config);
+ max->fbc = ilk_fbc_wm_reg_max(display);
}
-static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_reg_maximums(struct intel_display *display,
int level,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
- max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
- max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
- max->fbc = ilk_fbc_wm_reg_max(dev_priv);
+ max->pri = ilk_plane_wm_reg_max(display, level, false);
+ max->spr = ilk_plane_wm_reg_max(display, level, true);
+ max->cur = ilk_cursor_wm_reg_max(display, level);
+ max->fbc = ilk_fbc_wm_reg_max(display);
}
-static bool ilk_validate_wm_level(struct drm_i915_private *i915,
+static bool ilk_validate_wm_level(struct intel_display *display,
int level,
const struct ilk_wm_maximums *max,
struct intel_wm_level *result)
@@ -2679,15 +2677,15 @@ static bool ilk_validate_wm_level(struct drm_i915_private *i915,
*/
if (level == 0 && !result->enable) {
if (result->pri_val > max->pri)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Primary WM%d too large %u (max %u)\n",
level, result->pri_val, max->pri);
if (result->spr_val > max->spr)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Sprite WM%d too large %u (max %u)\n",
level, result->spr_val, max->spr);
if (result->cur_val > max->cur)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Cursor WM%d too large %u (max %u)\n",
level, result->cur_val, max->cur);
@@ -2700,7 +2698,7 @@ static bool ilk_validate_wm_level(struct drm_i915_private *i915,
return ret;
}
-static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_level(struct intel_display *display,
const struct intel_crtc *crtc,
int level,
struct intel_crtc_state *crtc_state,
@@ -2709,9 +2707,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
- u16 pri_latency = dev_priv->display.wm.pri_latency[level];
- u16 spr_latency = dev_priv->display.wm.spr_latency[level];
- u16 cur_latency = dev_priv->display.wm.cur_latency[level];
+ u16 pri_latency = display->wm.pri_latency[level];
+ u16 spr_latency = display->wm.spr_latency[level];
+ u16 cur_latency = display->wm.cur_latency[level];
/* WM1+ latency values stored in 0.5us units */
if (level > 0) {
@@ -2735,11 +2733,12 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
result->enable = true;
}
-static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void hsw_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u64 sskpd;
- i915->display.wm.num_levels = 5;
+ display->wm.num_levels = 5;
sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
@@ -2752,11 +2751,12 @@ static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
}
-static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void snb_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 sskpd;
- i915->display.wm.num_levels = 4;
+ display->wm.num_levels = 4;
sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
@@ -2766,11 +2766,12 @@ static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
}
-static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void ilk_read_wm_latency(struct intel_display *display, u16 wm[])
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 mltr;
- i915->display.wm.num_levels = 3;
+ display->wm.num_levels = 3;
mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
@@ -2780,24 +2781,21 @@ static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
}
-static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
+static void intel_fixup_spr_wm_latency(struct intel_display *display, u16 wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
+ if (DISPLAY_VER(display) == 5)
wm[0] = 13;
}
-static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5])
+static void intel_fixup_cur_wm_latency(struct intel_display *display, u16 wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (DISPLAY_VER(dev_priv) == 5)
+ if (DISPLAY_VER(display) == 5)
wm[0] = 13;
}
-static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[5], u16 min)
+static bool ilk_increase_wm_latency(struct intel_display *display, u16 wm[5], u16 min)
{
int level;
@@ -2805,13 +2803,13 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
return false;
wm[0] = max(wm[0], min);
- for (level = 1; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 1; level < display->wm.num_levels; level++)
wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
return true;
}
-static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
+static void snb_wm_latency_quirk(struct intel_display *display)
{
bool changed;
@@ -2819,21 +2817,21 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
* The BIOS provided WM memory latency values are often
* inadequate for high resolution displays. Adjust them.
*/
- changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
+ changed = ilk_increase_wm_latency(display, display->wm.pri_latency, 12);
+ changed |= ilk_increase_wm_latency(display, display->wm.spr_latency, 12);
+ changed |= ilk_increase_wm_latency(display, display->wm.cur_latency, 12);
if (!changed)
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"WM latency values increased to avoid potential underruns\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
}
-static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
+static void snb_wm_lp3_irq_quirk(struct intel_display *display)
{
/*
* On some SNB machines (Thinkpad X220 Tablet at least)
@@ -2846,50 +2844,50 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
* interrupts only. To play it safe we disable LP3
* watermarks entirely.
*/
- if (dev_priv->display.wm.pri_latency[3] == 0 &&
- dev_priv->display.wm.spr_latency[3] == 0 &&
- dev_priv->display.wm.cur_latency[3] == 0)
+ if (display->wm.pri_latency[3] == 0 &&
+ display->wm.spr_latency[3] == 0 &&
+ display->wm.cur_latency[3] == 0)
return;
- dev_priv->display.wm.pri_latency[3] = 0;
- dev_priv->display.wm.spr_latency[3] = 0;
- dev_priv->display.wm.cur_latency[3] = 0;
+ display->wm.pri_latency[3] = 0;
+ display->wm.spr_latency[3] = 0;
+ display->wm.cur_latency[3] = 0;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"LP3 watermarks disabled due to potential for lost interrupts\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
}
-static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
+static void ilk_setup_wm_latency(struct intel_display *display)
{
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
- else if (DISPLAY_VER(dev_priv) >= 6)
- snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ if (display->platform.broadwell || display->platform.haswell)
+ hsw_read_wm_latency(display, display->wm.pri_latency);
+ else if (DISPLAY_VER(display) >= 6)
+ snb_read_wm_latency(display, display->wm.pri_latency);
else
- ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ ilk_read_wm_latency(display, display->wm.pri_latency);
- memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
- memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
- sizeof(dev_priv->display.wm.pri_latency));
+ memcpy(display->wm.spr_latency, display->wm.pri_latency,
+ sizeof(display->wm.pri_latency));
+ memcpy(display->wm.cur_latency, display->wm.pri_latency,
+ sizeof(display->wm.pri_latency));
- intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
- intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
+ intel_fixup_spr_wm_latency(display, display->wm.spr_latency);
+ intel_fixup_cur_wm_latency(display, display->wm.cur_latency);
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
+ intel_print_wm_latency(display, "Primary", display->wm.pri_latency);
+ intel_print_wm_latency(display, "Sprite", display->wm.spr_latency);
+ intel_print_wm_latency(display, "Cursor", display->wm.cur_latency);
- if (DISPLAY_VER(dev_priv) == 6) {
- snb_wm_latency_quirk(dev_priv);
- snb_wm_lp3_irq_quirk(dev_priv);
+ if (DISPLAY_VER(display) == 6) {
+ snb_wm_latency_quirk(display);
+ snb_wm_lp3_irq_quirk(display);
}
}
-static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
+static bool ilk_validate_pipe_wm(struct intel_display *display,
struct intel_pipe_wm *pipe_wm)
{
/* LP0 watermark maximums depend on this pipe alone */
@@ -2901,11 +2899,11 @@ static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
struct ilk_wm_maximums max;
/* LP0 watermarks always use 1/2 DDB partitioning */
- ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_compute_wm_maximums(display, 0, &config, INTEL_DDB_PART_1_2, &max);
/* At least LP0 must be valid */
- if (!ilk_validate_wm_level(dev_priv, 0, &max, &pipe_wm->wm[0])) {
- drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
+ if (!ilk_validate_wm_level(display, 0, &max, &pipe_wm->wm[0])) {
+ drm_dbg_kms(display->drm, "LP0 watermark invalid\n");
return false;
}
@@ -2916,7 +2914,7 @@ static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_pipe_wm *pipe_wm;
@@ -2943,10 +2941,10 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
- usable_level = dev_priv->display.wm.num_levels - 1;
+ usable_level = display->wm.num_levels - 1;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (DISPLAY_VER(dev_priv) < 7 && pipe_wm->sprites_enabled)
+ if (DISPLAY_VER(display) < 7 && pipe_wm->sprites_enabled)
usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
@@ -2954,18 +2952,18 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
usable_level = 0;
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
+ ilk_compute_wm_level(display, crtc, 0, crtc_state,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
- if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
+ if (!ilk_validate_pipe_wm(display, pipe_wm))
return -EINVAL;
- ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
+ ilk_compute_wm_reg_maximums(display, 1, &max);
for (level = 1; level <= usable_level; level++) {
struct intel_wm_level *wm = &pipe_wm->wm[level];
- ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
+ ilk_compute_wm_level(display, crtc, level, crtc_state,
pristate, sprstate, curstate, wm);
/*
@@ -2973,7 +2971,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
* register maximums since such watermarks are
* always invalid.
*/
- if (!ilk_validate_wm_level(dev_priv, level, &max, wm)) {
+ if (!ilk_validate_wm_level(display, level, &max, wm)) {
memset(wm, 0, sizeof(*wm));
break;
}
@@ -2990,7 +2988,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
@@ -3015,7 +3013,7 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
intermediate->sprites_enabled |= active->sprites_enabled;
intermediate->sprites_scaled |= active->sprites_scaled;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct intel_wm_level *intermediate_wm = &intermediate->wm[level];
const struct intel_wm_level *active_wm = &active->wm[level];
@@ -3036,7 +3034,7 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
* there's no safe way to transition from the old state to
* the new state, so we need to fail the atomic transaction.
*/
- if (!ilk_validate_pipe_wm(dev_priv, intermediate))
+ if (!ilk_validate_pipe_wm(display, intermediate))
return -EINVAL;
/*
@@ -3068,7 +3066,7 @@ static int ilk_compute_watermarks(struct intel_atomic_state *state,
/*
* Merge the watermarks from all active pipes for a specific level.
*/
-static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
+static void ilk_merge_wm_level(struct intel_display *display,
int level,
struct intel_wm_level *ret_wm)
{
@@ -3076,7 +3074,7 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
ret_wm->enable = true;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
const struct intel_wm_level *wm = &active->wm[level];
@@ -3101,31 +3099,31 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
/*
* Merge all low power watermarks for all active pipes.
*/
-static void ilk_wm_merge(struct drm_i915_private *dev_priv,
+static void ilk_wm_merge(struct intel_display *display,
const struct intel_wm_config *config,
const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
- int level, num_levels = dev_priv->display.wm.num_levels;
+ int level, num_levels = display->wm.num_levels;
int last_enabled_level = num_levels - 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((DISPLAY_VER(dev_priv) < 7 || IS_IVYBRIDGE(dev_priv)) &&
+ if ((DISPLAY_VER(display) < 7 || display->platform.ivybridge) &&
config->num_pipes_active > 1)
last_enabled_level = 0;
/* ILK: FBC WM must be disabled always */
- merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
+ merged->fbc_wm_enabled = DISPLAY_VER(display) >= 6;
/* merge each WM1+ level */
for (level = 1; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
- ilk_merge_wm_level(dev_priv, level, wm);
+ ilk_merge_wm_level(display, level, wm);
if (level > last_enabled_level)
wm->enable = false;
- else if (!ilk_validate_wm_level(dev_priv, level, max, wm))
+ else if (!ilk_validate_wm_level(display, level, max, wm))
/* make sure all following levels get disabled */
last_enabled_level = level - 1;
@@ -3141,8 +3139,8 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
}
/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
- if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
- dev_priv->display.params.enable_fbc && !merged->fbc_wm_enabled) {
+ if (DISPLAY_VER(display) == 5 && HAS_FBC(display) &&
+ display->params.enable_fbc && !merged->fbc_wm_enabled) {
for (level = 2; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -3158,16 +3156,16 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
}
/* The value we need to program into the WM_LPx latency field */
-static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
+static unsigned int ilk_wm_lp_latency(struct intel_display *display,
int level)
{
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (display->platform.haswell || display->platform.broadwell)
return 2 * level;
else
- return dev_priv->display.wm.pri_latency[level];
+ return display->wm.pri_latency[level];
}
-static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_results(struct intel_display *display,
const struct intel_pipe_wm *merged,
enum intel_ddb_partitioning partitioning,
struct ilk_wm_values *results)
@@ -3191,14 +3189,14 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* disabled. Doing otherwise could cause underruns.
*/
results->wm_lp[wm_lp - 1] =
- WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
+ WM_LP_LATENCY(ilk_wm_lp_latency(display, level)) |
WM_LP_PRIMARY(r->pri_val) |
WM_LP_CURSOR(r->cur_val);
if (r->enable)
results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
else
results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
@@ -3209,19 +3207,19 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
- if (DISPLAY_VER(dev_priv) < 7 && r->spr_val) {
- drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
+ if (DISPLAY_VER(display) < 7 && r->spr_val) {
+ drm_WARN_ON(display->drm, wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
}
}
/* LP0 register values */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
enum pipe pipe = crtc->pipe;
const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
const struct intel_wm_level *r = &pipe_wm->wm[0];
- if (drm_WARN_ON(&dev_priv->drm, !r->enable))
+ if (drm_WARN_ON(display->drm, !r->enable))
continue;
results->wm_pipe[pipe] =
@@ -3236,13 +3234,13 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* case both are at the same level. Prefer r1 in case they're the same.
*/
static struct intel_pipe_wm *
-ilk_find_best_result(struct drm_i915_private *dev_priv,
+ilk_find_best_result(struct intel_display *display,
struct intel_pipe_wm *r1,
struct intel_pipe_wm *r2)
{
int level, level1 = 0, level2 = 0;
- for (level = 1; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 1; level < display->wm.num_levels; level++) {
if (r1->wm[level].enable)
level1 = level;
if (r2->wm[level].enable)
@@ -3268,7 +3266,7 @@ ilk_find_best_result(struct drm_i915_private *dev_priv,
#define WM_DIRTY_FBC (1 << 24)
#define WM_DIRTY_DDB (1 << 25)
-static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
+static unsigned int ilk_compute_wm_dirty(struct intel_display *display,
const struct ilk_wm_values *old,
const struct ilk_wm_values *new)
{
@@ -3276,7 +3274,7 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
enum pipe pipe;
int wm_lp;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
dirty |= WM_DIRTY_PIPE(pipe);
/* Must disable LP1+ watermarks too */
@@ -3314,25 +3312,25 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
return dirty;
}
-static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
+static bool _ilk_disable_lp_wm(struct intel_display *display,
unsigned int dirty)
{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *previous = &display->wm.hw;
bool changed = false;
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
previous->wm_lp[2] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
+ intel_de_write(display, WM3_LP_ILK, previous->wm_lp[2]);
changed = true;
}
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
previous->wm_lp[1] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
+ intel_de_write(display, WM2_LP_ILK, previous->wm_lp[1]);
changed = true;
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
previous->wm_lp[0] &= ~WM_LP_ENABLE;
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
+ intel_de_write(display, WM1_LP_ILK, previous->wm_lp[0]);
changed = true;
}
@@ -3348,73 +3346,73 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
* The spec says we shouldn't write when we don't need, because every write
* causes WMs to be re-evaluated, expending some power.
*/
-static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
+static void ilk_write_wm_values(struct intel_display *display,
struct ilk_wm_values *results)
{
- struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *previous = &display->wm.hw;
unsigned int dirty;
- dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
+ dirty = ilk_compute_wm_dirty(display, previous, results);
if (!dirty)
return;
- _ilk_disable_lp_wm(dev_priv, dirty);
+ _ilk_disable_lp_wm(display, dirty);
if (dirty & WM_DIRTY_PIPE(PIPE_A))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
if (dirty & WM_DIRTY_PIPE(PIPE_B))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
if (dirty & WM_DIRTY_PIPE(PIPE_C))
- intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
+ intel_de_write(display, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
if (dirty & WM_DIRTY_DDB) {
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- WM_MISC_DATA_PARTITION_5_6);
+ if (display->platform.haswell || display->platform.broadwell)
+ intel_de_rmw(display, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ WM_MISC_DATA_PARTITION_5_6);
else
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
- results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
- DISP_DATA_PARTITION_5_6);
+ intel_de_rmw(display, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
+ results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
+ DISP_DATA_PARTITION_5_6);
}
if (dirty & WM_DIRTY_FBC)
- intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS,
- results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
+ intel_de_rmw(display, DISP_ARB_CTL, DISP_FBC_WM_DIS,
+ results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
if (dirty & WM_DIRTY_LP(1) &&
previous->wm_lp_spr[0] != results->wm_lp_spr[0])
- intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
+ intel_de_write(display, WM1S_LP_ILK, results->wm_lp_spr[0]);
- if (DISPLAY_VER(dev_priv) >= 7) {
+ if (DISPLAY_VER(display) >= 7) {
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
- intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
+ intel_de_write(display, WM2S_LP_IVB, results->wm_lp_spr[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
- intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
+ intel_de_write(display, WM3S_LP_IVB, results->wm_lp_spr[2]);
}
if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
- intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
+ intel_de_write(display, WM1_LP_ILK, results->wm_lp[0]);
if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
- intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
+ intel_de_write(display, WM2_LP_ILK, results->wm_lp[1]);
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
- intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
+ intel_de_write(display, WM3_LP_ILK, results->wm_lp[2]);
- dev_priv->display.wm.hw = *results;
+ display->wm.hw = *results;
}
-bool ilk_disable_cxsr(struct drm_i915_private *dev_priv)
+bool ilk_disable_cxsr(struct intel_display *display)
{
- return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
+ return _ilk_disable_lp_wm(display, WM_DIRTY_LP_ALL);
}
-static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
+static void ilk_compute_wm_config(struct intel_display *display,
struct intel_wm_config *config)
{
struct intel_crtc *crtc;
/* Compute the currently _active_ config */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
if (!wm->pipe_enabled)
@@ -3426,7 +3424,7 @@ static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
}
}
-static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_program_watermarks(struct intel_display *display)
{
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
@@ -3434,18 +3432,18 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
- ilk_compute_wm_config(dev_priv, &config);
+ ilk_compute_wm_config(display, &config);
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
+ ilk_compute_wm_maximums(display, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(display, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
- if (DISPLAY_VER(dev_priv) >= 7 &&
+ if (DISPLAY_VER(display) >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
- ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
- ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
+ ilk_compute_wm_maximums(display, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(display, &config, &max, &lp_wm_5_6);
- best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
+ best_lp_wm = ilk_find_best_result(display, &lp_wm_1_2, &lp_wm_5_6);
} else {
best_lp_wm = &lp_wm_1_2;
}
@@ -3453,50 +3451,49 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
partitioning = (best_lp_wm == &lp_wm_1_2) ?
INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
- ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
+ ilk_compute_wm_results(display, best_lp_wm, partitioning, &results);
- ilk_write_wm_values(dev_priv, &results);
+ ilk_write_wm_values(display, &results);
}
static void ilk_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ ilk_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void ilk_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
- ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ ilk_program_watermarks(display);
+ mutex_unlock(&display->wm.wm_mutex);
}
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct intel_display *display = to_intel_display(crtc);
+ struct ilk_wm_values *hw = &display->wm.hw;
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
enum pipe pipe = crtc->pipe;
- hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
+ hw->wm_pipe[pipe] = intel_de_read(display, WM0_PIPE_ILK(pipe));
memset(active, 0, sizeof(*active));
@@ -3523,7 +3520,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
* should be marked as enabled but zeroed,
* which is what we'd compute them to.
*/
- for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 0; level < display->wm.num_levels; level++)
active->wm[level].enable = true;
}
@@ -3572,7 +3569,7 @@ static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state)
* through the atomic check code to calculate new watermark values in the
* state object.
*/
-void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
+void ilk_wm_sanitize(struct intel_display *display)
{
struct drm_atomic_state *state;
struct intel_atomic_state *intel_state;
@@ -3583,14 +3580,14 @@ void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
int i;
/* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->display.funcs.wm->optimize_watermarks)
+ if (!display->funcs.wm->optimize_watermarks)
return;
- if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9))
+ if (drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 9))
return;
- state = drm_atomic_state_alloc(&dev_priv->drm);
- if (drm_WARN_ON(&dev_priv->drm, !state))
+ state = drm_atomic_state_alloc(display->drm);
+ if (drm_WARN_ON(display->drm, !state))
return;
intel_state = to_intel_atomic_state(state);
@@ -3606,14 +3603,14 @@ retry:
* intermediate watermarks (since we don't trust the current
* watermarks).
*/
- if (!HAS_GMCH(dev_priv))
+ if (!HAS_GMCH(display))
intel_state->skip_intermediate_wm = true;
ret = ilk_sanitize_watermarks_add_affected(state);
if (ret)
goto fail;
- ret = intel_atomic_check(&dev_priv->drm, state);
+ ret = intel_atomic_check(display->drm, state);
if (ret)
goto fail;
@@ -3643,7 +3640,7 @@ fail:
* If this actually happens, we'll have to just leave the
* BIOS-programmed watermarks untouched and hope for the best.
*/
- drm_WARN(&dev_priv->drm, ret,
+ drm_WARN(display->drm, ret,
"Could not determine valid watermarks for inherited state\n");
drm_atomic_state_put(state);
@@ -3657,18 +3654,18 @@ fail:
#define _FW_WM_VLV(value, plane) \
(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
-static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
+static void g4x_read_wm_values(struct intel_display *display,
struct g4x_wm_values *wm)
{
u32 tmp;
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ tmp = intel_de_read(display, DSPFW1(display));
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2(dev_priv));
+ tmp = intel_de_read(display, DSPFW2(display));
wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
wm->sr.fbc = _FW_WM(tmp, FBC_SR);
wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
@@ -3676,21 +3673,21 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ tmp = intel_de_read(display, DSPFW3(display));
wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
}
-static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+static void vlv_read_wm_values(struct intel_display *display,
struct vlv_wm_values *wm)
{
enum pipe pipe;
u32 tmp;
- for_each_pipe(dev_priv, pipe) {
- tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
+ for_each_pipe(display, pipe) {
+ tmp = intel_de_read(display, VLV_DDL(pipe));
wm->ddl[pipe].plane[PLANE_PRIMARY] =
(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
@@ -3702,34 +3699,34 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
}
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
+ tmp = intel_de_read(display, DSPFW1(display));
wm->sr.plane = _FW_WM(tmp, SR);
wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2(dev_priv));
+ tmp = intel_de_read(display, DSPFW2(display));
wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
+ tmp = intel_de_read(display, DSPFW3(display));
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
- if (IS_CHERRYVIEW(dev_priv)) {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
+ if (display->platform.cherryview) {
+ tmp = intel_de_read(display, DSPFW7_CHV);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
+ tmp = intel_de_read(display, DSPFW8_CHV);
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
+ tmp = intel_de_read(display, DSPFW9_CHV);
wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ tmp = intel_de_read(display, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
@@ -3741,11 +3738,11 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
} else {
- tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
+ tmp = intel_de_read(display, DSPFW7);
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
- tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
+ tmp = intel_de_read(display, DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
@@ -3759,16 +3756,16 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
#undef _FW_WM
#undef _FW_WM_VLV
-static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void g4x_wm_get_hw_state(struct intel_display *display)
{
- struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
+ struct g4x_wm_values *wm = &display->wm.g4x;
struct intel_crtc *crtc;
- g4x_read_wm_values(dev_priv, wm);
+ g4x_read_wm_values(display, wm);
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ wm->cxsr = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct g4x_wm_state *active = &crtc->wm.active.g4x;
@@ -3833,7 +3830,7 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.g4x.optimal = *active;
crtc_state->wm.g4x.intermediate = *active;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
pipe_name(pipe),
wm->pipe[pipe].plane[PLANE_PRIMARY],
@@ -3841,26 +3838,25 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->pipe[pipe].plane[PLANE_SPRITE0]);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
- drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
+ drm_dbg_kms(display->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
str_yes_no(wm->fbc_en));
}
-static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
+static void g4x_wm_sanitize(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
- for_each_intel_plane(&dev_priv->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(display, plane->pipe);
struct intel_crtc_state *crtc_state =
@@ -3873,7 +3869,7 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
if (plane_state->uapi.visible)
continue;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw =
&crtc_state->wm.g4x.raw[level];
@@ -3884,36 +3880,37 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
}
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int ret;
ret = _g4x_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
+ drm_WARN_ON(display->drm, ret);
crtc_state->wm.g4x.intermediate =
crtc_state->wm.g4x.optimal;
crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
}
- g4x_program_watermarks(dev_priv);
+ g4x_program_watermarks(display);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_unlock(&display->wm.wm_mutex);
}
-static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void vlv_wm_get_hw_state(struct intel_display *display)
{
- struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct vlv_wm_values *wm = &display->wm.vlv;
struct intel_crtc *crtc;
u32 val;
- vlv_read_wm_values(dev_priv, wm);
+ vlv_read_wm_values(display, wm);
- wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+ wm->cxsr = intel_de_read(display, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
wm->level = VLV_WM_LEVEL_PM2;
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
@@ -3935,10 +3932,10 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Punit not acking DDR DVFS request, "
"assuming DDR DVFS is disabled\n");
- dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
+ display->wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
} else {
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
if ((val & FORCE_DDR_HIGH_FREQ) == 0)
@@ -3948,7 +3945,7 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
vlv_punit_put(dev_priv);
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct vlv_wm_state *active = &crtc->wm.active.vlv;
@@ -3988,7 +3985,7 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
crtc_state->wm.vlv.optimal = *active;
crtc_state->wm.vlv.intermediate = *active;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
pipe_name(pipe),
wm->pipe[pipe].plane[PLANE_PRIMARY],
@@ -3997,20 +3994,19 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
wm->pipe[pipe].plane[PLANE_SPRITE1]);
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
}
-static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
+static void vlv_wm_sanitize(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->display.wm.wm_mutex);
+ mutex_lock(&display->wm.wm_mutex);
- for_each_intel_plane(&dev_priv->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(display, plane->pipe);
struct intel_crtc_state *crtc_state =
@@ -4023,7 +4019,7 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
if (plane_state->uapi.visible)
continue;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[level];
@@ -4031,33 +4027,33 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
}
}
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int ret;
ret = _vlv_compute_pipe_wm(crtc_state);
- drm_WARN_ON(&dev_priv->drm, ret);
+ drm_WARN_ON(display->drm, ret);
crtc_state->wm.vlv.intermediate =
crtc_state->wm.vlv.optimal;
crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
}
- vlv_program_watermarks(dev_priv);
+ vlv_program_watermarks(display);
- mutex_unlock(&dev_priv->display.wm.wm_mutex);
+ mutex_unlock(&display->wm.wm_mutex);
}
/*
* FIXME should probably kill this and improve
* the real watermark readout/sanitation instead
*/
-static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_init_lp_watermarks(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0);
- intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM3_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM2_LP_ILK, WM_LP_ENABLE, 0);
+ intel_de_rmw(display, WM1_LP_ILK, WM_LP_ENABLE, 0);
/*
* Don't touch WM_LP_SPRITE_ENABLE here.
@@ -4065,37 +4061,37 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
*/
}
-static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
+static void ilk_wm_get_hw_state(struct intel_display *display)
{
- struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
+ struct ilk_wm_values *hw = &display->wm.hw;
struct intel_crtc *crtc;
- ilk_init_lp_watermarks(dev_priv);
+ ilk_init_lp_watermarks(display);
- for_each_intel_crtc(&dev_priv->drm, crtc)
+ for_each_intel_crtc(display->drm, crtc)
ilk_pipe_wm_get_hw_state(crtc);
- hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
- hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
- hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
+ hw->wm_lp[0] = intel_de_read(display, WM1_LP_ILK);
+ hw->wm_lp[1] = intel_de_read(display, WM2_LP_ILK);
+ hw->wm_lp[2] = intel_de_read(display, WM3_LP_ILK);
- hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
- if (DISPLAY_VER(dev_priv) >= 7) {
- hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
- hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
+ hw->wm_lp_spr[0] = intel_de_read(display, WM1S_LP_ILK);
+ if (DISPLAY_VER(display) >= 7) {
+ hw->wm_lp_spr[1] = intel_de_read(display, WM2S_LP_IVB);
+ hw->wm_lp_spr[2] = intel_de_read(display, WM3S_LP_IVB);
}
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) &
+ if (display->platform.haswell || display->platform.broadwell)
+ hw->partitioning = (intel_de_read(display, WM_MISC) &
WM_MISC_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
- else if (IS_IVYBRIDGE(dev_priv))
- hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) &
+ else if (display->platform.ivybridge)
+ hw->partitioning = (intel_de_read(display, DISP_ARB_CTL2) &
DISP_DATA_PARTITION_5_6) ?
INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
hw->enable_fbc_wm =
- !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+ !(intel_de_read(display, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
static const struct intel_wm_funcs ilk_wm_funcs = {
@@ -4145,39 +4141,39 @@ static const struct intel_wm_funcs i845_wm_funcs = {
static const struct intel_wm_funcs nop_funcs = {
};
-void i9xx_wm_init(struct drm_i915_private *dev_priv)
+void i9xx_wm_init(struct intel_display *display)
{
/* For FIFO watermark updates */
- if (HAS_PCH_SPLIT(dev_priv)) {
- ilk_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &ilk_wm_funcs;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &vlv_wm_funcs;
- } else if (IS_G4X(dev_priv)) {
- g4x_setup_wm_latency(dev_priv);
- dev_priv->display.funcs.wm = &g4x_wm_funcs;
- } else if (IS_PINEVIEW(dev_priv)) {
- if (!pnv_get_cxsr_latency(dev_priv)) {
- drm_info(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
+ if (HAS_PCH_SPLIT(display)) {
+ ilk_setup_wm_latency(display);
+ display->funcs.wm = &ilk_wm_funcs;
+ } else if (display->platform.valleyview || display->platform.cherryview) {
+ vlv_setup_wm_latency(display);
+ display->funcs.wm = &vlv_wm_funcs;
+ } else if (display->platform.g4x) {
+ g4x_setup_wm_latency(display);
+ display->funcs.wm = &g4x_wm_funcs;
+ } else if (display->platform.pineview) {
+ if (!pnv_get_cxsr_latency(display)) {
+ drm_info(display->drm, "Unknown FSB/MEM, disabling CxSR\n");
/* Disable CxSR and never update its watermark again */
- intel_set_memory_cxsr(dev_priv, false);
- dev_priv->display.funcs.wm = &nop_funcs;
+ intel_set_memory_cxsr(display, false);
+ display->funcs.wm = &nop_funcs;
} else {
- dev_priv->display.funcs.wm = &pnv_wm_funcs;
+ display->funcs.wm = &pnv_wm_funcs;
}
- } else if (DISPLAY_VER(dev_priv) == 4) {
- dev_priv->display.funcs.wm = &i965_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 3) {
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
- } else if (DISPLAY_VER(dev_priv) == 2) {
- if (INTEL_NUM_PIPES(dev_priv) == 1)
- dev_priv->display.funcs.wm = &i845_wm_funcs;
+ } else if (DISPLAY_VER(display) == 4) {
+ display->funcs.wm = &i965_wm_funcs;
+ } else if (DISPLAY_VER(display) == 3) {
+ display->funcs.wm = &i9xx_wm_funcs;
+ } else if (DISPLAY_VER(display) == 2) {
+ if (INTEL_NUM_PIPES(display) == 1)
+ display->funcs.wm = &i845_wm_funcs;
else
- dev_priv->display.funcs.wm = &i9xx_wm_funcs;
+ display->funcs.wm = &i9xx_wm_funcs;
} else {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"unexpected fall-through in %s\n", __func__);
- dev_priv->display.funcs.wm = &nop_funcs;
+ display->funcs.wm = &nop_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h
index 06ac37c6c94b..7bb363b2a756 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.h
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.h
@@ -8,28 +8,28 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_plane_state;
#ifdef I915
-bool ilk_disable_cxsr(struct drm_i915_private *i915);
-void ilk_wm_sanitize(struct drm_i915_private *i915);
-bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable);
-void i9xx_wm_init(struct drm_i915_private *i915);
+bool ilk_disable_cxsr(struct intel_display *display);
+void ilk_wm_sanitize(struct intel_display *display);
+bool intel_set_memory_cxsr(struct intel_display *display, bool enable);
+void i9xx_wm_init(struct intel_display *display);
#else
-static inline bool ilk_disable_cxsr(struct drm_i915_private *i915)
+static inline bool ilk_disable_cxsr(struct intel_display *display)
{
return false;
}
-static inline void ilk_wm_sanitize(struct drm_i915_private *i915)
+static inline void ilk_wm_sanitize(struct intel_display *display)
{
}
-static inline bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable)
+static inline bool intel_set_memory_cxsr(struct intel_display *display, bool enable)
{
return false;
}
-static inline void i9xx_wm_init(struct drm_i915_private *i915)
+static inline void i9xx_wm_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 402b7b2e1829..ca7033251e91 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -29,6 +29,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "i915_reg.h"
@@ -1826,107 +1827,56 @@ static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
.transfer = gen11_dsi_host_transfer,
};
-#define ICL_PREPARE_CNT_MAX 0x7
-#define ICL_CLK_ZERO_CNT_MAX 0xf
-#define ICL_TRAIL_CNT_MAX 0x7
-#define ICL_TCLK_PRE_CNT_MAX 0x3
-#define ICL_TCLK_POST_CNT_MAX 0x7
-#define ICL_HS_ZERO_CNT_MAX 0xf
-#define ICL_EXIT_ZERO_CNT_MAX 0x7
-
static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
{
- struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns;
- u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
- u32 ths_prepare_ns, tclk_trail_ns;
- u32 hs_zero_cnt;
- u32 tclk_pre_cnt;
+ u32 tclk_prepare_esc_clk, tclk_zero_esc_clk, tclk_pre_esc_clk;
+ u32 ths_prepare_esc_clk, ths_zero_esc_clk, ths_exit_esc_clk;
tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
- tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
- ths_prepare_ns = max(mipi_config->ths_prepare,
- mipi_config->tclk_prepare);
-
/*
- * prepare cnt in escape clocks
- * this field represents a hexadecimal value with a precision
- * of 1.2 – i.e. the most significant bit is the integer
- * and the least significant 2 bits are fraction bits.
- * so, the field can represent a range of 0.25 to 1.75
+ * The clock and data lane prepare timing parameters are in expressed in
+ * units of 1/4 escape clocks, and all the other timings parameters in
+ * escape clocks.
*/
- prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
- if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
- drm_dbg_kms(display->drm, "prepare_cnt out of range (%d)\n",
- prepare_cnt);
- prepare_cnt = ICL_PREPARE_CNT_MAX;
- }
+ tclk_prepare_esc_clk = DIV_ROUND_UP(mipi_config->tclk_prepare * 4, tlpx_ns);
+ tclk_prepare_esc_clk = min(tclk_prepare_esc_clk, 7);
- /* clk zero count in escape clocks */
- clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
- ths_prepare_ns, tlpx_ns);
- if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
- clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
- }
+ tclk_zero_esc_clk = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
+ mipi_config->tclk_prepare, tlpx_ns);
+ tclk_zero_esc_clk = min(tclk_zero_esc_clk, 15);
- /* trail cnt in escape clocks*/
- trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
- if (trail_cnt > ICL_TRAIL_CNT_MAX) {
- drm_dbg_kms(display->drm, "trail_cnt out of range (%d)\n",
- trail_cnt);
- trail_cnt = ICL_TRAIL_CNT_MAX;
- }
+ tclk_pre_esc_clk = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
+ tclk_pre_esc_clk = min(tclk_pre_esc_clk, 3);
- /* tclk pre count in escape clocks */
- tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
- if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
- tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
- }
+ ths_prepare_esc_clk = DIV_ROUND_UP(mipi_config->ths_prepare * 4, tlpx_ns);
+ ths_prepare_esc_clk = min(ths_prepare_esc_clk, 7);
- /* hs zero cnt in escape clocks */
- hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
- ths_prepare_ns, tlpx_ns);
- if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm, "hs_zero_cnt out of range (%d)\n",
- hs_zero_cnt);
- hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
- }
+ ths_zero_esc_clk = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
+ mipi_config->ths_prepare, tlpx_ns);
+ ths_zero_esc_clk = min(ths_zero_esc_clk, 15);
- /* hs exit zero cnt in escape clocks */
- exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
- if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
- drm_dbg_kms(display->drm,
- "exit_zero_cnt out of range (%d)\n",
- exit_zero_cnt);
- exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
- }
+ ths_exit_esc_clk = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
+ ths_exit_esc_clk = min(ths_exit_esc_clk, 7);
/* clock lane dphy timings */
intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
- CLK_PREPARE(prepare_cnt) |
+ CLK_PREPARE(tclk_prepare_esc_clk) |
CLK_ZERO_OVERRIDE |
- CLK_ZERO(clk_zero_cnt) |
+ CLK_ZERO(tclk_zero_esc_clk) |
CLK_PRE_OVERRIDE |
- CLK_PRE(tclk_pre_cnt) |
- CLK_TRAIL_OVERRIDE |
- CLK_TRAIL(trail_cnt));
+ CLK_PRE(tclk_pre_esc_clk));
/* data lanes dphy timings */
intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
- HS_PREPARE(prepare_cnt) |
+ HS_PREPARE(ths_prepare_esc_clk) |
HS_ZERO_OVERRIDE |
- HS_ZERO(hs_zero_cnt) |
- HS_TRAIL_OVERRIDE |
- HS_TRAIL(trail_cnt) |
+ HS_ZERO(ths_zero_esc_clk) |
HS_EXIT_OVERRIDE |
- HS_EXIT(exit_zero_cnt));
+ HS_EXIT(ths_exit_esc_clk));
intel_dsi_log_params(intel_dsi);
}
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index 55f3ae1e68c9..c176bdbc19a3 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -5,12 +5,15 @@
#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
#include "intel_alpm.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
+#include "intel_psr.h"
#include "intel_psr_regs.h"
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp)
@@ -23,7 +26,7 @@ bool intel_alpm_aux_less_wake_supported(struct intel_dp *intel_dp)
return intel_dp->alpm_dpcd & DP_ALPM_AUX_LESS_CAP;
}
-void intel_alpm_init_dpcd(struct intel_dp *intel_dp)
+void intel_alpm_init(struct intel_dp *intel_dp)
{
u8 dpcd;
@@ -31,6 +34,7 @@ void intel_alpm_init_dpcd(struct intel_dp *intel_dp)
return;
intel_dp->alpm_dpcd = dpcd;
+ mutex_init(&intel_dp->alpm_parameters.lock);
}
/*
@@ -276,6 +280,14 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
int waketime_in_lines, first_sdp_position;
int context_latency, guardband;
+ if (intel_dp->alpm_parameters.lobf_disable_debug) {
+ drm_dbg_kms(display->drm, "LOBF is disabled by debug flag\n");
+ return;
+ }
+
+ if (intel_dp->alpm_parameters.sink_alpm_error)
+ return;
+
if (!intel_dp_is_edp(intel_dp))
return;
@@ -288,6 +300,10 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
if (crtc_state->has_psr)
return;
+ if (crtc_state->vrr.vmin != crtc_state->vrr.vmax ||
+ crtc_state->vrr.vmin != crtc_state->vrr.flipline)
+ return;
+
if (!(intel_alpm_aux_wake_supported(intel_dp) ||
intel_alpm_aux_less_wake_supported(intel_dp)))
return;
@@ -316,15 +332,16 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
enum port port = dp_to_dig_port(intel_dp)->base.port;
u32 alpm_ctl;
- if (DISPLAY_VER(display) < 20 ||
- (!intel_dp->psr.sel_update_enabled && !intel_dp_is_edp(intel_dp)))
+ if (DISPLAY_VER(display) < 20 || (!intel_psr_needs_alpm(intel_dp, crtc_state) &&
+ !crtc_state->has_lobf))
return;
+ mutex_lock(&intel_dp->alpm_parameters.lock);
/*
* Panel Replay on eDP is always using ALPM aux less. I.e. no need to
* check panel support at this point.
*/
- if ((intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) ||
+ if ((crtc_state->has_panel_replay && intel_dp_is_edp(intel_dp)) ||
(crtc_state->has_lobf && intel_alpm_aux_less_wake_supported(intel_dp))) {
alpm_ctl = ALPM_CTL_ALPM_ENABLE |
ALPM_CTL_ALPM_AUX_LESS_ENABLE |
@@ -353,18 +370,107 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
ALPM_CTL_EXTENDED_FAST_WAKE_TIME(intel_dp->alpm_parameters.fast_wake_lines);
}
- if (crtc_state->has_lobf)
+ if (crtc_state->has_lobf) {
alpm_ctl |= ALPM_CTL_LOBF_ENABLE;
+ drm_dbg_kms(display->drm, "Link off between frames (LOBF) enabled\n");
+ }
alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(intel_dp->alpm_parameters.check_entry_lines);
intel_de_write(display, ALPM_CTL(display, cpu_transcoder), alpm_ctl);
+ mutex_unlock(&intel_dp->alpm_parameters.lock);
}
void intel_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
lnl_alpm_configure(intel_dp, crtc_state);
+ intel_dp->alpm_parameters.transcoder = crtc_state->cpu_transcoder;
+}
+
+void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct intel_encoder *encoder;
+
+ if (DISPLAY_VER(display) < 20)
+ return;
+
+ if (crtc_state->has_lobf || crtc_state->has_lobf == old_crtc_state->has_lobf)
+ return;
+
+ for_each_intel_encoder_mask(display->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp;
+
+ if (!intel_encoder_is_dp(encoder))
+ continue;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ if (!intel_dp_is_edp(intel_dp))
+ continue;
+
+ if (old_crtc_state->has_lobf) {
+ mutex_lock(&intel_dp->alpm_parameters.lock);
+ intel_de_write(display, ALPM_CTL(display, cpu_transcoder), 0);
+ drm_dbg_kms(display->drm, "Link off between frames (LOBF) disabled\n");
+ mutex_unlock(&intel_dp->alpm_parameters.lock);
+ }
+ }
+}
+
+void intel_alpm_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 val;
+
+ if (!intel_psr_needs_alpm(intel_dp, crtc_state) && !crtc_state->has_lobf)
+ return;
+
+ val = DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE;
+
+ if (crtc_state->has_panel_replay || (crtc_state->has_lobf &&
+ intel_alpm_aux_less_wake_supported(intel_dp)))
+ val |= DP_ALPM_MODE_AUX_LESS;
+
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
+}
+
+void intel_alpm_post_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_encoder *encoder;
+
+ if (crtc_state->has_psr || !crtc_state->has_lobf ||
+ crtc_state->has_lobf == old_crtc_state->has_lobf)
+ return;
+
+ for_each_intel_encoder_mask(display->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp;
+
+ if (!intel_encoder_is_dp(encoder))
+ continue;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ if (intel_dp_is_edp(intel_dp)) {
+ intel_alpm_enable_sink(intel_dp, crtc_state);
+ intel_alpm_configure(intel_dp, crtc_state);
+ }
+ }
}
static int i915_edp_lobf_info_show(struct seq_file *m, void *data)
@@ -403,6 +509,32 @@ out:
DEFINE_SHOW_ATTRIBUTE(i915_edp_lobf_info);
+static int
+i915_edp_lobf_debug_get(void *data, u64 *val)
+{
+ struct intel_connector *connector = data;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+
+ *val = intel_dp->alpm_parameters.lobf_disable_debug;
+
+ return 0;
+}
+
+static int
+i915_edp_lobf_debug_set(void *data, u64 val)
+{
+ struct intel_connector *connector = data;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+
+ intel_dp->alpm_parameters.lobf_disable_debug = val;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_lobf_debug_fops,
+ i915_edp_lobf_debug_get, i915_edp_lobf_debug_set,
+ "%llu\n");
+
void intel_alpm_lobf_debugfs_add(struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
@@ -412,6 +544,55 @@ void intel_alpm_lobf_debugfs_add(struct intel_connector *connector)
connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
return;
+ debugfs_create_file("i915_edp_lobf_debug", 0644, root,
+ connector, &i915_edp_lobf_debug_fops);
+
debugfs_create_file("i915_edp_lobf_info", 0444, root,
connector, &i915_edp_lobf_info_fops);
}
+
+void intel_alpm_disable(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->alpm_parameters.transcoder;
+
+ if (DISPLAY_VER(display) < 20 || !intel_dp->alpm_dpcd)
+ return;
+
+ mutex_lock(&intel_dp->alpm_parameters.lock);
+
+ intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
+ ALPM_CTL_ALPM_ENABLE | ALPM_CTL_LOBF_ENABLE |
+ ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+
+ intel_de_rmw(display,
+ PORT_ALPM_CTL(cpu_transcoder),
+ PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+
+ drm_dbg_kms(display->drm, "Disabling ALPM\n");
+ mutex_unlock(&intel_dp->alpm_parameters.lock);
+}
+
+bool intel_alpm_get_error(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ u8 val;
+ int r;
+
+ r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
+ if (r != 1) {
+ drm_err(display->drm, "Error reading ALPM status\n");
+ return true;
+ }
+
+ if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
+ drm_dbg_kms(display->drm, "ALPM lock timeout error\n");
+
+ /* Clearing error */
+ drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.h b/drivers/gpu/drm/i915/display/intel_alpm.h
index 8c409b10dce6..c9fe21e3e72c 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.h
+++ b/drivers/gpu/drm/i915/display/intel_alpm.h
@@ -12,8 +12,10 @@ struct intel_dp;
struct intel_crtc_state;
struct drm_connector_state;
struct intel_connector;
+struct intel_atomic_state;
+struct intel_crtc;
-void intel_alpm_init_dpcd(struct intel_dp *intel_dp);
+void intel_alpm_init(struct intel_dp *intel_dp);
bool intel_alpm_compute_params(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
@@ -21,7 +23,15 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
struct drm_connector_state *conn_state);
void intel_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
+void intel_alpm_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_alpm_post_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void intel_alpm_lobf_debugfs_add(struct intel_connector *connector);
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp);
bool intel_alpm_aux_less_wake_supported(struct intel_dp *intel_dp);
+void intel_alpm_disable(struct intel_dp *intel_dp);
+bool intel_alpm_get_error(struct intel_dp *intel_dp);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 03dc54c802d3..e83feca5c9c9 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -33,16 +33,17 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_cdclk.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dp_tunnel.h"
+#include "intel_fb.h"
#include "intel_global_state.h"
#include "intel_hdcp.h"
#include "intel_psr.h"
-#include "intel_fb.h"
#include "skl_universal_plane.h"
/**
@@ -59,17 +60,16 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
struct drm_property *property,
u64 *val)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->display.properties.force_audio)
+ if (property == display->properties.force_audio)
*val = intel_conn_state->force_audio;
- else if (property == dev_priv->display.properties.broadcast_rgb)
+ else if (property == display->properties.broadcast_rgb)
*val = intel_conn_state->broadcast_rgb;
else {
- drm_dbg_atomic(&dev_priv->drm,
+ drm_dbg_atomic(display->drm,
"Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
return -EINVAL;
@@ -92,22 +92,21 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct drm_property *property,
u64 val)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->display.properties.force_audio) {
+ if (property == display->properties.force_audio) {
intel_conn_state->force_audio = val;
return 0;
}
- if (property == dev_priv->display.properties.broadcast_rgb) {
+ if (property == display->properties.broadcast_rgb) {
intel_conn_state->broadcast_rgb = val;
return 0;
}
- drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n",
+ drm_dbg_atomic(display->drm, "Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 7276179df878..1bcfa5f4fd63 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -174,11 +174,27 @@ bool intel_plane_needs_physical(struct intel_plane *plane)
DISPLAY_INFO(display)->cursor_needs_physical;
}
-bool intel_plane_can_async_flip(struct intel_plane *plane, u64 modifier)
+bool intel_plane_can_async_flip(struct intel_plane *plane, u32 format,
+ u64 modifier)
{
+ if (intel_format_info_is_yuv_semiplanar(drm_format_info(format), modifier) ||
+ format == DRM_FORMAT_C8)
+ return false;
+
return plane->can_async_flip && plane->can_async_flip(modifier);
}
+bool intel_plane_format_mod_supported_async(struct drm_plane *plane,
+ u32 format,
+ u64 modifier)
+{
+ if (!plane->funcs->format_mod_supported(plane, format, modifier))
+ return false;
+
+ return intel_plane_can_async_flip(to_intel_plane(plane),
+ format, modifier);
+}
+
unsigned int intel_adjusted_rate(const struct drm_rect *src,
const struct drm_rect *dst,
unsigned int rate)
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 6efac923dcbc..317320c32285 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -21,7 +21,8 @@ enum plane_id;
struct intel_plane *
intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id);
-bool intel_plane_can_async_flip(struct intel_plane *plane, u64 modifier);
+bool intel_plane_can_async_flip(struct intel_plane *plane, u32 format,
+ u64 modifier);
unsigned int intel_adjusted_rate(const struct drm_rect *src,
const struct drm_rect *dst,
unsigned int rate);
@@ -89,5 +90,8 @@ int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
int intel_atomic_check_planes(struct intel_atomic_state *state);
u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state);
+bool intel_plane_format_mod_supported_async(struct drm_plane *plane,
+ u32 format,
+ u64 modifier);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index ea935a5d94c8..55af3a553c58 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -27,9 +27,9 @@
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include <drm/intel/i915_component.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_audio_regs.h"
@@ -397,6 +397,19 @@ hsw_audio_config_update(struct intel_encoder *encoder,
hsw_hdmi_audio_config_update(encoder, crtc_state);
}
+static void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state,
+ bool enable)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder trans = crtc_state->cpu_transcoder;
+
+ if (!HAS_DP20(display))
+ return;
+
+ intel_de_rmw(display, AUD_DP_2DOT0_CTRL(trans), AUD_ENABLE_SDP_SPLIT,
+ enable && crtc_state->sdp_split_enable ? AUD_ENABLE_SDP_SPLIT : 0);
+}
+
static void hsw_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -430,6 +443,8 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
if (needs_wa_14020863754(display))
intel_de_rmw(display, AUD_CHICKENBIT_REG3, DACBE_DISABLE_MIN_HBLANK_FIX, 0);
+ intel_audio_sdp_split_update(old_crtc_state, false);
+
mutex_unlock(&display->audio.mutex);
}
@@ -555,6 +570,8 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
enable_audio_dsc_wa(encoder, crtc_state);
+ intel_audio_sdp_split_update(crtc_state, true);
+
if (needs_wa_14020863754(display))
intel_de_rmw(display, AUD_CHICKENBIT_REG3, 0, DACBE_DISABLE_MIN_HBLANK_FIX);
@@ -587,19 +604,17 @@ static void ibx_audio_regs_init(struct intel_display *display,
enum pipe pipe,
struct ibx_audio_regs *regs)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (display->platform.valleyview || display->platform.cherryview) {
regs->hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
regs->aud_config = VLV_AUD_CFG(pipe);
regs->aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
regs->aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
- } else if (HAS_PCH_CPT(i915)) {
+ } else if (HAS_PCH_CPT(display)) {
regs->hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
regs->aud_config = CPT_AUD_CFG(pipe);
regs->aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
regs->aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
- } else if (HAS_PCH_IBX(i915)) {
+ } else if (HAS_PCH_IBX(display)) {
regs->hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
regs->aud_config = IBX_AUD_CFG(pipe);
regs->aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
@@ -683,16 +698,6 @@ static void ibx_audio_codec_enable(struct intel_encoder *encoder,
mutex_unlock(&display->audio.mutex);
}
-void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
- enum transcoder trans = crtc_state->cpu_transcoder;
-
- if (HAS_DP20(display))
- intel_de_rmw(display, AUD_DP_2DOT0_CTRL(trans), AUD_ENABLE_SDP_SPLIT,
- crtc_state->sdp_split_enable ? AUD_ENABLE_SDP_SPLIT : 0);
-}
-
bool intel_audio_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -889,12 +894,10 @@ static const struct intel_audio_funcs hsw_audio_funcs = {
*/
void intel_audio_hooks_init(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (display->platform.g4x)
display->funcs.audio = &g4x_audio_funcs;
else if (display->platform.valleyview || display->platform.cherryview ||
- HAS_PCH_CPT(i915) || HAS_PCH_IBX(i915))
+ HAS_PCH_CPT(display) || HAS_PCH_IBX(display))
display->funcs.audio = &ibx_audio_funcs;
else if (display->platform.haswell || DISPLAY_VER(display) >= 8)
display->funcs.audio = &hsw_audio_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index ad49eefa7182..42cf886f3d24 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -31,6 +31,5 @@ int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state);
void intel_audio_init(struct intel_display *display);
void intel_audio_register(struct intel_display *display);
void intel_audio_deinit(struct intel_display *display);
-void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 178dc6c8de80..5827da586003 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -10,12 +10,16 @@
#include <acpi/video.h>
-#include "i915_drv.h"
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_backlight_regs.h"
#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
#include "intel_dsi_dcs_backlight.h"
@@ -472,7 +476,6 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2;
@@ -485,7 +488,7 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1);
}
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
intel_de_rmw(display, SOUTH_CHICKEN2, LPT_PWM_GRANULARITY,
panel->backlight.alternate_pwm_increment ?
LPT_PWM_GRANULARITY : 0);
@@ -502,7 +505,7 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
pch_ctl1 |= BLM_PCH_POLARITY;
/* After LPT, override is the default. */
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
intel_de_write(display, BLC_PWM_PCH_CTL1, pch_ctl1);
@@ -901,11 +904,9 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- intel_wakeref_t wakeref;
int ret = 0;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ with_intel_display_rpm(display) {
u32 hw_level;
drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
@@ -1065,7 +1066,7 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
u32 mul, clock;
@@ -1074,7 +1075,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
else
mul = 128;
- if (HAS_PCH_LPT_H(i915))
+ if (HAS_PCH_LPT_H(display))
clock = MHz(135); /* LPT:H */
else
clock = MHz(24); /* LPT:LP */
@@ -1231,12 +1232,11 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
bool alt, cpu_mode;
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
alt = intel_de_read(display, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
else
alt = intel_de_read(display, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
@@ -1260,7 +1260,7 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
- cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(i915) &&
+ cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(display) &&
!(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
(cpu_ctl2 & BLM_PWM_ENABLE);
@@ -1467,15 +1467,13 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
static int cnp_num_backlight_controllers(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
return 2;
- if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
+ if (INTEL_PCH_TYPE(display) >= PCH_DG1)
return 1;
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
return 2;
return 1;
@@ -1483,14 +1481,12 @@ static int cnp_num_backlight_controllers(struct intel_display *display)
static bool cnp_backlight_controller_is_valid(struct intel_display *display, int controller)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (controller < 0 || controller >= cnp_num_backlight_controllers(display))
return false;
if (controller == 1 &&
- INTEL_PCH_TYPE(i915) >= PCH_ICP &&
- INTEL_PCH_TYPE(i915) <= PCH_ADP)
+ INTEL_PCH_TYPE(display) >= PCH_ICP &&
+ INTEL_PCH_TYPE(display) <= PCH_ADP)
return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
@@ -1819,7 +1815,6 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
struct intel_connector *connector =
container_of(panel, struct intel_connector, panel);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
@@ -1827,14 +1822,14 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
if (display->platform.geminilake || display->platform.broxton) {
panel->backlight.pwm_funcs = &bxt_pwm_funcs;
- } else if (INTEL_PCH_TYPE(i915) >= PCH_CNP) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_CNP) {
panel->backlight.pwm_funcs = &cnp_pwm_funcs;
- } else if (INTEL_PCH_TYPE(i915) >= PCH_LPT_H) {
- if (HAS_PCH_LPT(i915))
+ } else if (INTEL_PCH_TYPE(display) >= PCH_LPT_H) {
+ if (HAS_PCH_LPT(display))
panel->backlight.pwm_funcs = &lpt_pwm_funcs;
else
panel->backlight.pwm_funcs = &spt_pwm_funcs;
- } else if (HAS_PCH_SPLIT(i915)) {
+ } else if (HAS_PCH_SPLIT(display)) {
panel->backlight.pwm_funcs = &pch_pwm_funcs;
} else if (display->platform.valleyview || display->platform.cherryview) {
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index a8d08d7d82b3..ba7b8938b17c 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -37,6 +37,7 @@
#include "i915_drv.h"
#include "intel_display.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
@@ -2244,28 +2245,27 @@ static const u8 adlp_ddc_pin_map[] = {
static u8 map_ddc_pin(struct intel_display *display, u8 vbt_pin)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const u8 *ddc_pin_map;
int i, n_entries;
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL || display->platform.alderlake_p) {
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL || display->platform.alderlake_p) {
ddc_pin_map = adlp_ddc_pin_map;
n_entries = ARRAY_SIZE(adlp_ddc_pin_map);
} else if (display->platform.alderlake_s) {
ddc_pin_map = adls_ddc_pin_map;
n_entries = ARRAY_SIZE(adls_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_DG1) {
return vbt_pin;
- } else if (display->platform.rocketlake && INTEL_PCH_TYPE(i915) == PCH_TGP) {
+ } else if (display->platform.rocketlake && INTEL_PCH_TYPE(display) == PCH_TGP) {
ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
- } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(display) == 9) {
+ } else if (HAS_PCH_TGP(display) && DISPLAY_VER(display) == 9) {
ddc_pin_map = gen9bc_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_ICP) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
- } else if (HAS_PCH_CNP(i915)) {
+ } else if (HAS_PCH_CNP(display)) {
ddc_pin_map = cnp_ddc_pin_map;
n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
} else {
@@ -2864,8 +2864,6 @@ parse_general_definitions(struct intel_display *display)
static void
init_vbt_defaults(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
display->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* general features */
@@ -2882,7 +2880,7 @@ init_vbt_defaults(struct intel_display *display)
* clock for LVDS.
*/
display->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(display,
- !HAS_PCH_SPLIT(i915));
+ !HAS_PCH_SPLIT(display));
drm_dbg_kms(display->drm, "Set default to SSC at %d kHz\n",
display->vbt.lvds_ssc_freq);
}
@@ -3115,7 +3113,6 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display
{
struct drm_i915_private *i915 = to_i915(display->drm);
const struct vbt_header *vbt = NULL;
- intel_wakeref_t wakeref;
vbt = firmware_get_vbt(display, sizep);
@@ -3126,12 +3123,12 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display
* If the OpRegion does not have VBT, look in SPI flash
* through MMIO or PCI mapping
*/
- if (!vbt && IS_DGFX(i915))
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ if (!vbt && display->platform.dgfx)
+ with_intel_display_rpm(display)
vbt = oprom_get_vbt(display, intel_rom_spi(i915), sizep, "SPI flash");
if (!vbt)
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_display_rpm(display)
vbt = oprom_get_vbt(display, intel_rom_pci(i915), sizep, "PCI ROM");
return vbt;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index f9841f0498c6..6cd7a011b8c4 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -24,7 +24,7 @@
/*
* Please use intel_vbt_defs.h for VBT private data, to hide and abstract away
* the VBT from the rest of the driver. Add the parsed, clean data to struct
- * intel_vbt_data within struct drm_i915_private.
+ * intel_vbt_data within struct intel_display.
*/
#ifndef _INTEL_BIOS_H_
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 98b898a1de8f..a5dd2932b852 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -39,14 +39,15 @@ struct intel_qgv_info {
u8 deinterleave;
};
-static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int dg1_mchbar_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 dclk_ratio, dclk_reference;
u32 val;
- val = intel_uncore_read(&dev_priv->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
+ val = intel_uncore_read(&i915->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val);
if (val & DG1_QCLK_REFERENCE)
dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */
@@ -54,18 +55,18 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */
sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000);
- val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
+ val = intel_uncore_read(&i915->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
if (val & DG1_GEAR_TYPE)
sp->dclk *= 2;
if (sp->dclk == 0)
return -EINVAL;
- val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
+ val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val);
sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val);
- val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
+ val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val);
sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val);
@@ -74,22 +75,23 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int icl_pcode_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 val = 0, val2 = 0;
u16 dclk;
int ret;
- ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
&val, &val2);
if (ret)
return ret;
dclk = val & 0xffff;
- sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) >= 12 ? 500 : 0),
+ sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(display) >= 12 ? 500 : 0),
1000);
sp->t_rp = (val & 0xff0000) >> 16;
sp->t_rcd = (val & 0xff000000) >> 24;
@@ -102,14 +104,15 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
- struct intel_psf_gv_point *points)
+static int adls_pcode_read_psf_gv_point_info(struct intel_display *display,
+ struct intel_psf_gv_point *points)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 val = 0;
int ret;
int i;
- ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
if (ret)
return ret;
@@ -122,10 +125,10 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
-static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
+static u16 icl_qgv_points_mask(struct intel_display *display)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
u16 qgv_points = 0, psf_points = 0;
/*
@@ -142,49 +145,51 @@ static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
}
-static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask)
+static bool is_sagv_enabled(struct intel_display *display, u16 points_mask)
{
- return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) &
+ return !is_power_of_2(~points_mask & icl_qgv_points_mask(display) &
ICL_PCODE_REQ_QGV_PT_MASK);
}
-int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+int icl_pcode_restrict_qgv_points(struct intel_display *display,
u32 points_mask)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return 0;
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ ret = skl_pcode_request(&i915->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
points_mask,
ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
1);
if (ret < 0) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to disable qgv points (0x%x) points: 0x%x\n",
ret, points_mask);
return ret;
}
- dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ?
+ display->sagv.status = is_sagv_enabled(display, points_mask) ?
I915_SAGV_ENABLED : I915_SAGV_DISABLED;
return 0;
}
-static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
+static int mtl_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp, int point)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 val, val2;
u16 dclk;
- val = intel_uncore_read(&dev_priv->uncore,
+ val = intel_uncore_read(&i915->uncore,
MTL_MEM_SS_INFO_QGV_POINT_LOW(point));
- val2 = intel_uncore_read(&dev_priv->uncore,
+ val2 = intel_uncore_read(&i915->uncore,
MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000);
@@ -200,29 +205,30 @@ static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
}
static int
-intel_read_qgv_point_info(struct drm_i915_private *dev_priv,
+intel_read_qgv_point_info(struct intel_display *display,
struct intel_qgv_point *sp,
int point)
{
- if (DISPLAY_VER(dev_priv) >= 14)
- return mtl_read_qgv_point_info(dev_priv, sp, point);
- else if (IS_DG1(dev_priv))
- return dg1_mchbar_read_qgv_point_info(dev_priv, sp, point);
+ if (DISPLAY_VER(display) >= 14)
+ return mtl_read_qgv_point_info(display, sp, point);
+ else if (display->platform.dg1)
+ return dg1_mchbar_read_qgv_point_info(display, sp, point);
else
- return icl_pcode_read_qgv_point_info(dev_priv, sp, point);
+ return icl_pcode_read_qgv_point_info(display, sp, point);
}
-static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
+static int icl_get_qgv_points(struct intel_display *display,
struct intel_qgv_info *qi,
bool is_y_tile)
{
- const struct dram_info *dram_info = &dev_priv->dram_info;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ const struct dram_info *dram_info = &i915->dram_info;
int i, ret;
qi->num_points = dram_info->num_qgv_points;
qi->num_psf_points = dram_info->num_psf_gv_points;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = 4;
@@ -251,7 +257,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
MISSING_CASE(dram_info->type);
return -EINVAL;
}
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = is_y_tile ? 8 : 4;
@@ -266,7 +272,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->deinterleave = is_y_tile ? 1 : 2;
break;
case INTEL_DRAM_LPDDR4:
- if (IS_ROCKETLAKE(dev_priv)) {
+ if (display->platform.rocketlake) {
qi->t_bl = 8;
qi->max_numchannels = 4;
qi->channel_width = 32;
@@ -285,39 +291,39 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->max_numchannels = 1;
break;
}
- } else if (DISPLAY_VER(dev_priv) == 11) {
- qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
+ } else if (DISPLAY_VER(display) == 11) {
+ qi->t_bl = dram_info->type == INTEL_DRAM_DDR4 ? 4 : 8;
qi->max_numchannels = 1;
}
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
qi->num_points > ARRAY_SIZE(qi->points)))
qi->num_points = ARRAY_SIZE(qi->points);
for (i = 0; i < qi->num_points; i++) {
struct intel_qgv_point *sp = &qi->points[i];
- ret = intel_read_qgv_point_info(dev_priv, sp, i);
+ ret = intel_read_qgv_point_info(display, sp, i);
if (ret) {
- drm_dbg_kms(&dev_priv->drm, "Could not read QGV %d info\n", i);
+ drm_dbg_kms(display->drm, "Could not read QGV %d info\n", i);
return ret;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
sp->t_rcd, sp->t_rc);
}
if (qi->num_psf_points > 0) {
- ret = adls_pcode_read_psf_gv_point_info(dev_priv, qi->psf_points);
+ ret = adls_pcode_read_psf_gv_point_info(display, qi->psf_points);
if (ret) {
- drm_err(&dev_priv->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
+ drm_err(display->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
qi->num_psf_points = 0;
}
for (i = 0; i < qi->num_psf_points; i++)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSF GV %d: CLK=%d \n",
i, qi->psf_points[i].clk);
}
@@ -405,20 +411,28 @@ static const struct intel_sa_info xe2_hpd_ecc_sa_info = {
/* Other values not used by simplified algorithm */
};
-static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
+static const struct intel_sa_info xe3lpd_sa_info = {
+ .deburst = 32,
+ .deprogbwlimit = 65, /* GB/s */
+ .displayrtids = 256,
+ .derating = 10,
+};
+
+static int icl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
+ int num_channels = max_t(u8, 1, i915->dram_info.num_channels);
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw;
- int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i, ret;
- ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
+ ret = icl_get_qgv_points(display, &qi, is_y_tile);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
@@ -429,7 +443,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
int clpchgroup;
int j;
@@ -456,7 +470,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bi->deratedbw[j] = min(maxdebw,
bw * (100 - sa->derating) / 100);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
i, j, bi->num_planes, bi->deratedbw[j]);
}
@@ -467,44 +481,45 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
+static int tgl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
- const struct dram_info *dram_info = &dev_priv->dram_info;
+ const struct dram_info *dram_info = &i915->dram_info;
bool is_y_tile = true; /* assume y tile may be used */
- int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
+ int num_channels = max_t(u8, 1, dram_info->num_channels);
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw, peakbw;
int clperchgroup;
- int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i, ret;
- ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
+ ret = icl_get_qgv_points(display, &qi, is_y_tile);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
- if (DISPLAY_VER(dev_priv) < 14 &&
+ if (DISPLAY_VER(display) < 14 &&
(dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5))
num_channels *= 2;
qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
- if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12)
+ if (num_channels < qi.max_numchannels && DISPLAY_VER(display) >= 12)
qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
- if (DISPLAY_VER(dev_priv) >= 12 && num_channels > qi.max_numchannels)
- drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels.");
+ if (DISPLAY_VER(display) >= 12 && num_channels > qi.max_numchannels)
+ drm_warn(display->drm, "Number of channels exceeds max number of channels.");
if (qi.max_numchannels != 0)
num_channels = min_t(u8, num_channels, qi.max_numchannels);
@@ -521,7 +536,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
struct intel_bw_info *bi_next;
int clpchgroup;
int j;
@@ -529,7 +544,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
if (i < num_groups - 1) {
- bi_next = &dev_priv->display.bw.max[i + 1];
+ bi_next = &display->bw.max[i + 1];
if (clpchgroup < clperchgroup)
bi_next->num_planes = (ipqdepth - clpchgroup) /
@@ -561,7 +576,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
num_channels *
qi.channel_width, 8);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n",
i, j, bi->num_planes, bi->deratedbw[j],
bi->peakbw[j]);
@@ -572,7 +587,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bi->psf_bw[j] = adl_calc_psf_bw(sp->clk);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BW%d / PSF GV %d: num_planes=%d bw=%u\n",
i, j, bi->num_planes, bi->psf_bw[j]);
}
@@ -584,17 +599,17 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static void dg2_get_bw_info(struct drm_i915_private *i915)
+static void dg2_get_bw_info(struct intel_display *display)
{
- unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000;
- int num_groups = ARRAY_SIZE(i915->display.bw.max);
+ unsigned int deratedbw = display->platform.dg2_g11 ? 38000 : 50000;
+ int num_groups = ARRAY_SIZE(display->bw.max);
int i;
/*
@@ -605,7 +620,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
* whereas DG2-G11 platforms have 38 GB/s.
*/
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &i915->display.bw.max[i];
+ struct intel_bw_info *bi = &display->bw.max[i];
bi->num_planes = 1;
/* Need only one dummy QGV point per group */
@@ -613,20 +628,21 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
bi->deratedbw[0] = deratedbw;
}
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
}
-static int xe2_hpd_get_bw_info(struct drm_i915_private *i915,
+static int xe2_hpd_get_bw_info(struct intel_display *display,
const struct intel_sa_info *sa)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_qgv_info qi = {};
int num_channels = i915->dram_info.num_channels;
int peakbw, maxdebw;
int ret, i;
- ret = icl_get_qgv_points(i915, &qi, true);
+ ret = icl_get_qgv_points(display, &qi, true);
if (ret) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
@@ -638,33 +654,33 @@ static int xe2_hpd_get_bw_info(struct drm_i915_private *i915,
const struct intel_qgv_point *point = &qi.points[i];
int bw = num_channels * (qi.channel_width / 8) * point->dclk;
- i915->display.bw.max[0].deratedbw[i] =
+ display->bw.max[0].deratedbw[i] =
min(maxdebw, (100 - sa->derating) * bw / 100);
- i915->display.bw.max[0].peakbw[i] = bw;
+ display->bw.max[0].peakbw[i] = bw;
- drm_dbg_kms(&i915->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
- i, i915->display.bw.max[0].deratedbw[i],
- i915->display.bw.max[0].peakbw[i]);
+ drm_dbg_kms(display->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
+ i, display->bw.max[0].deratedbw[i],
+ display->bw.max[0].peakbw[i]);
}
/* Bandwidth does not depend on # of planes; set all groups the same */
- i915->display.bw.max[0].num_planes = 1;
- i915->display.bw.max[0].num_qgv_points = qi.num_points;
- for (i = 1; i < ARRAY_SIZE(i915->display.bw.max); i++)
- memcpy(&i915->display.bw.max[i], &i915->display.bw.max[0],
- sizeof(i915->display.bw.max[0]));
+ display->bw.max[0].num_planes = 1;
+ display->bw.max[0].num_qgv_points = qi.num_points;
+ for (i = 1; i < ARRAY_SIZE(display->bw.max); i++)
+ memcpy(&display->bw.max[i], &display->bw.max[0],
+ sizeof(display->bw.max[0]));
/*
* Xe2_HPD should always have exactly two QGV points representing
* battery and plugged-in operation.
*/
- drm_WARN_ON(&i915->drm, qi.num_points != 2);
- i915->display.sagv.status = I915_SAGV_ENABLED;
+ drm_WARN_ON(display->drm, qi.num_points != 2);
+ display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
-static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
+static unsigned int icl_max_bw_index(struct intel_display *display,
int num_planes, int qgv_point)
{
int i;
@@ -674,9 +690,9 @@ static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = 0; i < ARRAY_SIZE(dev_priv->display.bw.max); i++) {
+ for (i = 0; i < ARRAY_SIZE(display->bw.max); i++) {
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[i];
+ &display->bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -692,7 +708,7 @@ static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
return UINT_MAX;
}
-static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
+static unsigned int tgl_max_bw_index(struct intel_display *display,
int num_planes, int qgv_point)
{
int i;
@@ -702,9 +718,9 @@ static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = ARRAY_SIZE(dev_priv->display.bw.max) - 1; i >= 0; i--) {
+ for (i = ARRAY_SIZE(display->bw.max) - 1; i >= 0; i--) {
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[i];
+ &display->bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -720,57 +736,59 @@ static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
return 0;
}
-static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
+static unsigned int adl_psf_bw(struct intel_display *display,
int psf_gv_point)
{
const struct intel_bw_info *bi =
- &dev_priv->display.bw.max[0];
+ &display->bw.max[0];
return bi->psf_bw[psf_gv_point];
}
-static unsigned int icl_qgv_bw(struct drm_i915_private *i915,
+static unsigned int icl_qgv_bw(struct intel_display *display,
int num_active_planes, int qgv_point)
{
unsigned int idx;
- if (DISPLAY_VER(i915) >= 12)
- idx = tgl_max_bw_index(i915, num_active_planes, qgv_point);
+ if (DISPLAY_VER(display) >= 12)
+ idx = tgl_max_bw_index(display, num_active_planes, qgv_point);
else
- idx = icl_max_bw_index(i915, num_active_planes, qgv_point);
+ idx = icl_max_bw_index(display, num_active_planes, qgv_point);
- if (idx >= ARRAY_SIZE(i915->display.bw.max))
+ if (idx >= ARRAY_SIZE(display->bw.max))
return 0;
- return i915->display.bw.max[idx].deratedbw[qgv_point];
+ return display->bw.max[idx].deratedbw[qgv_point];
}
-void intel_bw_init_hw(struct drm_i915_private *dev_priv)
+void intel_bw_init_hw(struct intel_display *display)
{
- const struct dram_info *dram_info = &dev_priv->dram_info;
+ const struct dram_info *dram_info = &to_i915(display->drm)->dram_info;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv) &&
+ if (DISPLAY_VER(display) >= 30)
+ tgl_get_bw_info(display, &xe3lpd_sa_info);
+ else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx &&
dram_info->type == INTEL_DRAM_GDDR_ECC)
- xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_ecc_sa_info);
- else if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv))
- xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info);
- else if (DISPLAY_VER(dev_priv) >= 14)
- tgl_get_bw_info(dev_priv, &mtl_sa_info);
- else if (IS_DG2(dev_priv))
- dg2_get_bw_info(dev_priv);
- else if (IS_ALDERLAKE_P(dev_priv))
- tgl_get_bw_info(dev_priv, &adlp_sa_info);
- else if (IS_ALDERLAKE_S(dev_priv))
- tgl_get_bw_info(dev_priv, &adls_sa_info);
- else if (IS_ROCKETLAKE(dev_priv))
- tgl_get_bw_info(dev_priv, &rkl_sa_info);
- else if (DISPLAY_VER(dev_priv) == 12)
- tgl_get_bw_info(dev_priv, &tgl_sa_info);
- else if (DISPLAY_VER(dev_priv) == 11)
- icl_get_bw_info(dev_priv, &icl_sa_info);
+ xe2_hpd_get_bw_info(display, &xe2_hpd_ecc_sa_info);
+ else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx)
+ xe2_hpd_get_bw_info(display, &xe2_hpd_sa_info);
+ else if (DISPLAY_VER(display) >= 14)
+ tgl_get_bw_info(display, &mtl_sa_info);
+ else if (display->platform.dg2)
+ dg2_get_bw_info(display);
+ else if (display->platform.alderlake_p)
+ tgl_get_bw_info(display, &adlp_sa_info);
+ else if (display->platform.alderlake_s)
+ tgl_get_bw_info(display, &adls_sa_info);
+ else if (display->platform.rocketlake)
+ tgl_get_bw_info(display, &rkl_sa_info);
+ else if (DISPLAY_VER(display) == 12)
+ tgl_get_bw_info(display, &tgl_sa_info);
+ else if (DISPLAY_VER(display) == 11)
+ icl_get_bw_info(display, &icl_sa_info);
}
static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
@@ -784,8 +802,8 @@ static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_stat
static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
unsigned int data_rate = 0;
enum plane_id plane_id;
@@ -799,7 +817,7 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_
data_rate += crtc_state->data_rate[plane_id];
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
data_rate += crtc_state->data_rate_y[plane_id];
}
@@ -807,39 +825,38 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_
}
/* "Maximum Pipe Read Bandwidth" */
-static int intel_bw_crtc_min_cdclk(const struct intel_crtc_state *crtc_state)
+static int intel_bw_crtc_min_cdclk(struct intel_display *display,
+ unsigned int data_rate)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- if (DISPLAY_VER(i915) < 12)
+ if (DISPLAY_VER(display) < 12)
return 0;
- return DIV_ROUND_UP_ULL(mul_u32_u32(intel_bw_crtc_data_rate(crtc_state), 10), 512);
+ return DIV_ROUND_UP_ULL(mul_u32_u32(data_rate, 10), 512);
}
-static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
+static unsigned int intel_bw_num_active_planes(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
unsigned int num_active_planes = 0;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
num_active_planes += bw_state->num_active_planes[pipe];
return num_active_planes;
}
-static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
+static unsigned int intel_bw_data_rate(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int data_rate = 0;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
data_rate += bw_state->data_rate[pipe];
- if (DISPLAY_VER(dev_priv) >= 13 && i915_vtd_active(dev_priv))
+ if (DISPLAY_VER(display) >= 13 && i915_vtd_active(i915))
data_rate = DIV_ROUND_UP(data_rate * 105, 100);
return data_rate;
@@ -848,10 +865,10 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_old_global_obj_state(state, &display->bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -859,10 +876,10 @@ intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
struct intel_bw_state *
intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_new_global_obj_state(state, &display->bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -870,27 +887,27 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.bw.obj);
+ bw_state = intel_atomic_get_global_obj_state(state, &display->bw.obj);
if (IS_ERR(bw_state))
return ERR_CAST(bw_state);
return to_intel_bw_state(bw_state);
}
-static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915,
+static unsigned int icl_max_bw_qgv_point_mask(struct intel_display *display,
int num_active_planes)
{
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
unsigned int max_bw_point = 0;
unsigned int max_bw = 0;
int i;
for (i = 0; i < num_qgv_points; i++) {
unsigned int max_data_rate =
- icl_qgv_bw(i915, num_active_planes, i);
+ icl_qgv_bw(display, num_active_planes, i);
/*
* We need to know which qgv point gives us
@@ -909,23 +926,23 @@ static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915,
return max_bw_point;
}
-static u16 icl_prepare_qgv_points_mask(struct drm_i915_private *i915,
+static u16 icl_prepare_qgv_points_mask(struct intel_display *display,
unsigned int qgv_points,
unsigned int psf_points)
{
return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
- ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(i915);
+ ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(display);
}
-static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915)
+static unsigned int icl_max_bw_psf_gv_point_mask(struct intel_display *display)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
unsigned int max_bw_point_mask = 0;
unsigned int max_bw = 0;
int i;
for (i = 0; i < num_psf_gv_points; i++) {
- unsigned int max_data_rate = adl_psf_bw(i915, i);
+ unsigned int max_data_rate = adl_psf_bw(display, i);
if (max_data_rate > max_bw) {
max_bw_point_mask = BIT(i);
@@ -938,29 +955,29 @@ static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915)
return max_bw_point_mask;
}
-static void icl_force_disable_sagv(struct drm_i915_private *i915,
+static void icl_force_disable_sagv(struct intel_display *display,
struct intel_bw_state *bw_state)
{
- unsigned int qgv_points = icl_max_bw_qgv_point_mask(i915, 0);
- unsigned int psf_points = icl_max_bw_psf_gv_point_mask(i915);
+ unsigned int qgv_points = icl_max_bw_qgv_point_mask(display, 0);
+ unsigned int psf_points = icl_max_bw_psf_gv_point_mask(display);
- bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
qgv_points,
psf_points);
- drm_dbg_kms(&i915->drm, "Forcing SAGV disable: mask 0x%x\n",
+ drm_dbg_kms(display->drm, "Forcing SAGV disable: mask 0x%x\n",
bw_state->qgv_points_mask);
- icl_pcode_restrict_qgv_points(i915, bw_state->qgv_points_mask);
+ icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask);
}
-static int mtl_find_qgv_points(struct drm_i915_private *i915,
+static int mtl_find_qgv_points(struct intel_display *display,
unsigned int data_rate,
unsigned int num_active_planes,
struct intel_bw_state *new_bw_state)
{
unsigned int best_rate = UINT_MAX;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
unsigned int qgv_peak_bw = 0;
int i;
int ret;
@@ -974,9 +991,9 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
* for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is
* not enabled. PM Demand code will clamp the value for the register
*/
- if (!intel_can_enable_sagv(i915, new_bw_state)) {
+ if (!intel_can_enable_sagv(display, new_bw_state)) {
new_bw_state->qgv_point_peakbw = U16_MAX;
- drm_dbg_kms(&i915->drm, "No SAGV, use UINT_MAX as peak bw.");
+ drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw.");
return 0;
}
@@ -986,27 +1003,27 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
*/
for (i = 0; i < num_qgv_points; i++) {
unsigned int bw_index =
- tgl_max_bw_index(i915, num_active_planes, i);
+ tgl_max_bw_index(display, num_active_planes, i);
unsigned int max_data_rate;
- if (bw_index >= ARRAY_SIZE(i915->display.bw.max))
+ if (bw_index >= ARRAY_SIZE(display->bw.max))
continue;
- max_data_rate = i915->display.bw.max[bw_index].deratedbw[i];
+ max_data_rate = display->bw.max[bw_index].deratedbw[i];
if (max_data_rate < data_rate)
continue;
if (max_data_rate - data_rate < best_rate) {
best_rate = max_data_rate - data_rate;
- qgv_peak_bw = i915->display.bw.max[bw_index].peakbw[i];
+ qgv_peak_bw = display->bw.max[bw_index].peakbw[i];
}
- drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
+ drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
i, max_data_rate, data_rate, qgv_peak_bw);
}
- drm_dbg_kms(&i915->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
+ drm_dbg_kms(display->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
qgv_peak_bw, data_rate);
/*
@@ -1014,7 +1031,7 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
* satisfying the required data rate is found
*/
if (qgv_peak_bw == 0) {
- drm_dbg_kms(&i915->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
+ drm_dbg_kms(display->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
@@ -1025,14 +1042,14 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915,
return 0;
}
-static int icl_find_qgv_points(struct drm_i915_private *i915,
+static int icl_find_qgv_points(struct intel_display *display,
unsigned int data_rate,
unsigned int num_active_planes,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
- unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
u16 psf_points = 0;
u16 qgv_points = 0;
int i;
@@ -1043,22 +1060,22 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
return ret;
for (i = 0; i < num_qgv_points; i++) {
- unsigned int max_data_rate = icl_qgv_bw(i915,
+ unsigned int max_data_rate = icl_qgv_bw(display,
num_active_planes, i);
if (max_data_rate >= data_rate)
qgv_points |= BIT(i);
- drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d\n",
+ drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d\n",
i, max_data_rate, data_rate);
}
for (i = 0; i < num_psf_gv_points; i++) {
- unsigned int max_data_rate = adl_psf_bw(i915, i);
+ unsigned int max_data_rate = adl_psf_bw(display, i);
if (max_data_rate >= data_rate)
psf_points |= BIT(i);
- drm_dbg_kms(&i915->drm, "PSF GV point %d: max bw %d"
+ drm_dbg_kms(display->drm, "PSF GV point %d: max bw %d"
" required %d\n",
i, max_data_rate, data_rate);
}
@@ -1069,14 +1086,14 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* reasons.
*/
if (qgv_points == 0) {
- drm_dbg_kms(&i915->drm, "No QGV points provide sufficient memory"
+ drm_dbg_kms(display->drm, "No QGV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
if (num_psf_gv_points > 0 && psf_points == 0) {
- drm_dbg_kms(&i915->drm, "No PSF GV points provide sufficient memory"
+ drm_dbg_kms(display->drm, "No PSF GV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
@@ -1087,9 +1104,9 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* we can't enable SAGV due to the increased memory latency it may
* cause.
*/
- if (!intel_can_enable_sagv(i915, new_bw_state)) {
- qgv_points = icl_max_bw_qgv_point_mask(i915, num_active_planes);
- drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point mask 0x%x\n",
+ if (!intel_can_enable_sagv(display, new_bw_state)) {
+ qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes);
+ drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n",
qgv_points);
}
@@ -1097,7 +1114,7 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
* We store the ones which need to be masked as that is what PCode
* actually accepts as a parameter.
*/
- new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915,
+ new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
qgv_points,
psf_points);
/*
@@ -1113,80 +1130,90 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
return 0;
}
-static int intel_bw_check_qgv_points(struct drm_i915_private *i915,
+static int intel_bw_check_qgv_points(struct intel_display *display,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
- unsigned int data_rate = intel_bw_data_rate(i915, new_bw_state);
+ unsigned int data_rate = intel_bw_data_rate(display, new_bw_state);
unsigned int num_active_planes =
- intel_bw_num_active_planes(i915, new_bw_state);
+ intel_bw_num_active_planes(display, new_bw_state);
data_rate = DIV_ROUND_UP(data_rate, 1000);
- if (DISPLAY_VER(i915) >= 14)
- return mtl_find_qgv_points(i915, data_rate, num_active_planes,
+ if (DISPLAY_VER(display) >= 14)
+ return mtl_find_qgv_points(display, data_rate, num_active_planes,
new_bw_state);
else
- return icl_find_qgv_points(i915, data_rate, num_active_planes,
+ return icl_find_qgv_points(display, data_rate, num_active_planes,
old_bw_state, new_bw_state);
}
-static bool intel_bw_state_changed(struct drm_i915_private *i915,
+static bool intel_dbuf_bw_changed(struct intel_display *display,
+ const struct intel_dbuf_bw *old_dbuf_bw,
+ const struct intel_dbuf_bw *new_dbuf_bw)
+{
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(display, slice) {
+ if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] ||
+ old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice])
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel_bw_state_changed(struct intel_display *display,
const struct intel_bw_state *old_bw_state,
const struct intel_bw_state *new_bw_state)
{
enum pipe pipe;
- for_each_pipe(i915, pipe) {
- const struct intel_dbuf_bw *old_crtc_bw =
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *old_dbuf_bw =
&old_bw_state->dbuf_bw[pipe];
- const struct intel_dbuf_bw *new_crtc_bw =
+ const struct intel_dbuf_bw *new_dbuf_bw =
&new_bw_state->dbuf_bw[pipe];
- enum dbuf_slice slice;
- for_each_dbuf_slice(i915, slice) {
- if (old_crtc_bw->max_bw[slice] != new_crtc_bw->max_bw[slice] ||
- old_crtc_bw->active_planes[slice] != new_crtc_bw->active_planes[slice])
- return true;
- }
+ if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw))
+ return true;
- if (old_bw_state->min_cdclk[pipe] != new_bw_state->min_cdclk[pipe])
+ if (intel_bw_crtc_min_cdclk(display, old_bw_state->data_rate[pipe]) !=
+ intel_bw_crtc_min_cdclk(display, new_bw_state->data_rate[pipe]))
return true;
}
return false;
}
-static void skl_plane_calc_dbuf_bw(struct intel_bw_state *bw_state,
+static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
struct intel_crtc *crtc,
enum plane_id plane_id,
const struct skl_ddb_entry *ddb,
unsigned int data_rate)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
- unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(i915, ddb);
+ struct intel_display *display = to_intel_display(crtc);
+ unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb);
enum dbuf_slice slice;
/*
* The arbiter can only really guarantee an
* equal share of the total bw to each plane.
*/
- for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask) {
- crtc_bw->max_bw[slice] = max(crtc_bw->max_bw[slice], data_rate);
- crtc_bw->active_planes[slice] |= BIT(plane_id);
+ for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
+ dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
+ dbuf_bw->active_planes[slice] |= BIT(plane_id);
}
}
-static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
+static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
enum plane_id plane_id;
- memset(crtc_bw, 0, sizeof(*crtc_bw));
+ memset(dbuf_bw, 0, sizeof(*dbuf_bw));
if (!crtc_state->hw.active)
return;
@@ -1199,12 +1226,12 @@ static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
if (plane_id == PLANE_CURSOR)
continue;
- skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
&crtc_state->wm.skl.plane_ddb[plane_id],
crtc_state->data_rate[plane_id]);
- if (DISPLAY_VER(i915) < 11)
- skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
+ if (DISPLAY_VER(display) < 11)
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
&crtc_state->wm.skl.plane_ddb_y[plane_id],
crtc_state->data_rate[plane_id]);
}
@@ -1212,13 +1239,13 @@ static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
/* "Maximum Data Buffer Bandwidth" */
static int
-intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
+intel_bw_dbuf_min_cdclk(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
unsigned int total_max_bw = 0;
enum dbuf_slice slice;
- for_each_dbuf_slice(i915, slice) {
+ for_each_dbuf_slice(display, slice) {
int num_active_planes = 0;
unsigned int max_bw = 0;
enum pipe pipe;
@@ -1227,11 +1254,11 @@ intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
* The arbiter can only really guarantee an
* equal share of the total bw to each plane.
*/
- for_each_pipe(i915, pipe) {
- const struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[pipe];
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *dbuf_bw = &bw_state->dbuf_bw[pipe];
- max_bw = max(crtc_bw->max_bw[slice], max_bw);
- num_active_planes += hweight8(crtc_bw->active_planes[slice]);
+ max_bw = max(dbuf_bw->max_bw[slice], max_bw);
+ num_active_planes += hweight8(dbuf_bw->active_planes[slice]);
}
max_bw *= num_active_planes;
@@ -1241,16 +1268,18 @@ intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
return DIV_ROUND_UP(total_max_bw, 64);
}
-int intel_bw_min_cdclk(struct drm_i915_private *i915,
+int intel_bw_min_cdclk(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
enum pipe pipe;
int min_cdclk;
- min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state);
+ min_cdclk = intel_bw_dbuf_min_cdclk(display, bw_state);
- for_each_pipe(i915, pipe)
- min_cdclk = max(min_cdclk, bw_state->min_cdclk[pipe]);
+ for_each_pipe(display, pipe)
+ min_cdclk = max(min_cdclk,
+ intel_bw_crtc_min_cdclk(display,
+ bw_state->data_rate[pipe]));
return min_cdclk;
}
@@ -1258,42 +1287,49 @@ int intel_bw_min_cdclk(struct drm_i915_private *i915,
int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
bool *need_cdclk_calc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_bw_state *new_bw_state = NULL;
const struct intel_bw_state *old_bw_state = NULL;
const struct intel_cdclk_state *cdclk_state;
- const struct intel_crtc_state *crtc_state;
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
int old_min_cdclk, new_min_cdclk;
struct intel_crtc *crtc;
int i;
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return 0;
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw;
+
+ skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state);
+ skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state);
+
+ if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw))
+ continue;
+
new_bw_state = intel_atomic_get_bw_state(state);
if (IS_ERR(new_bw_state))
return PTR_ERR(new_bw_state);
old_bw_state = intel_atomic_get_old_bw_state(state);
- skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state);
-
- new_bw_state->min_cdclk[crtc->pipe] =
- intel_bw_crtc_min_cdclk(crtc_state);
+ new_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw;
}
if (!old_bw_state)
return 0;
- if (intel_bw_state_changed(dev_priv, old_bw_state, new_bw_state)) {
+ if (intel_bw_state_changed(display, old_bw_state, new_bw_state)) {
int ret = intel_atomic_lock_global_state(&new_bw_state->base);
if (ret)
return ret;
}
- old_min_cdclk = intel_bw_min_cdclk(dev_priv, old_bw_state);
- new_min_cdclk = intel_bw_min_cdclk(dev_priv, new_bw_state);
+ old_min_cdclk = intel_bw_min_cdclk(display, old_bw_state);
+ new_min_cdclk = intel_bw_min_cdclk(display, new_bw_state);
/*
* No need to check against the cdclk state if
@@ -1321,7 +1357,7 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
if (new_min_cdclk <= cdclk_state->bw_min_cdclk)
return 0;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
new_min_cdclk, cdclk_state->bw_min_cdclk);
*need_cdclk_calc = true;
@@ -1331,7 +1367,7 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
int i;
@@ -1365,7 +1401,7 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
*changed = true;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] data rate %u num active planes %u\n",
crtc->base.base.id, crtc->base.name,
new_bw_state->data_rate[crtc->pipe],
@@ -1375,16 +1411,103 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
return 0;
}
-int intel_bw_atomic_check(struct intel_atomic_state *state)
+static int intel_bw_modeset_checks(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_bw_state *old_bw_state;
+ struct intel_bw_state *new_bw_state;
+
+ if (DISPLAY_VER(display) < 9)
+ return 0;
+
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ new_bw_state->active_pipes =
+ intel_calc_active_pipes(state, old_bw_state->active_pipes);
+
+ if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
+ int ret;
+
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_bw_check_sagv_mask(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ const struct intel_bw_state *old_bw_state = NULL;
+ struct intel_bw_state *new_bw_state = NULL;
+ struct intel_crtc *crtc;
+ int ret, i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (intel_crtc_can_enable_sagv(old_crtc_state) ==
+ intel_crtc_can_enable_sagv(new_crtc_state))
+ continue;
+
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ if (intel_crtc_can_enable_sagv(new_crtc_state))
+ new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
+ else
+ new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
+ }
+
+ if (!new_bw_state)
+ return 0;
+
+ if (intel_can_enable_sagv(display, new_bw_state) !=
+ intel_can_enable_sagv(display, old_bw_state)) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms)
+{
+ struct intel_display *display = to_intel_display(state);
bool changed = false;
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_bw_state *new_bw_state;
const struct intel_bw_state *old_bw_state;
int ret;
+ if (DISPLAY_VER(display) < 9)
+ return 0;
+
+ if (any_ms) {
+ ret = intel_bw_modeset_checks(state);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_bw_check_sagv_mask(state);
+ if (ret)
+ return ret;
+
/* FIXME earlier gens need some checks too */
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
return 0;
ret = intel_bw_check_data_rate(state, &changed);
@@ -1395,9 +1518,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state = intel_atomic_get_new_bw_state(state);
if (new_bw_state &&
- (intel_can_enable_sagv(i915, old_bw_state) !=
- intel_can_enable_sagv(i915, new_bw_state) ||
- new_bw_state->force_check_qgv))
+ intel_can_enable_sagv(display, old_bw_state) !=
+ intel_can_enable_sagv(display, new_bw_state))
changed = true;
/*
@@ -1407,28 +1529,25 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
if (!changed)
return 0;
- ret = intel_bw_check_qgv_points(i915, old_bw_state, new_bw_state);
+ ret = intel_bw_check_qgv_points(display, old_bw_state, new_bw_state);
if (ret)
return ret;
- new_bw_state->force_check_qgv = false;
-
return 0;
}
static void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
bw_state->data_rate[crtc->pipe] =
intel_bw_crtc_data_rate(crtc_state);
bw_state->num_active_planes[crtc->pipe] =
intel_bw_crtc_num_active_planes(crtc_state);
- bw_state->force_check_qgv = true;
- drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
+ drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
bw_state->data_rate[crtc->pipe],
bw_state->num_active_planes[crtc->pipe]);
@@ -1444,6 +1563,7 @@ void intel_bw_update_hw_state(struct intel_display *display)
return;
bw_state->active_pipes = 0;
+ bw_state->pipe_sagv_reject = 0;
for_each_intel_crtc(display->drm, crtc) {
const struct intel_crtc_state *crtc_state =
@@ -1455,6 +1575,11 @@ void intel_bw_update_hw_state(struct intel_display *display)
if (DISPLAY_VER(display) >= 11)
intel_bw_crtc_update(bw_state, crtc_state);
+
+ skl_crtc_calc_dbuf_bw(&bw_state->dbuf_bw[pipe], crtc_state);
+
+ /* initially SAGV has been forced off */
+ bw_state->pipe_sagv_reject |= BIT(pipe);
}
}
@@ -1470,6 +1595,7 @@ void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
+ memset(&bw_state->dbuf_bw[pipe], 0, sizeof(bw_state->dbuf_bw[pipe]));
}
static struct intel_global_state *
@@ -1495,9 +1621,8 @@ static const struct intel_global_state_funcs intel_bw_funcs = {
.atomic_destroy_state = intel_bw_destroy_state,
};
-int intel_bw_init(struct drm_i915_private *i915)
+int intel_bw_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_bw_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -1511,8 +1636,8 @@ int intel_bw_init(struct drm_i915_private *i915)
* Limit this only if we have SAGV. And for Display version 14 onwards
* sagv is handled though pmdemand requests
*/
- if (intel_has_sagv(i915) && IS_DISPLAY_VER(i915, 11, 13))
- icl_force_disable_sagv(i915, state);
+ if (intel_has_sagv(display) && IS_DISPLAY_VER(display, 11, 13))
+ icl_force_disable_sagv(display, state);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 3313e4eac4f0..eb2cc883e9c1 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -12,7 +12,6 @@
#include "intel_display_power.h"
#include "intel_global_state.h"
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -49,13 +48,6 @@ struct intel_bw_state {
*/
u16 qgv_points_mask;
- /*
- * Flag to force the QGV comparison in atomic check right after the
- * hw state readout
- */
- bool force_check_qgv;
-
- int min_cdclk[I915_MAX_PIPES];
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
};
@@ -72,14 +64,14 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state);
struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state);
-void intel_bw_init_hw(struct drm_i915_private *dev_priv);
-int intel_bw_init(struct drm_i915_private *dev_priv);
-int intel_bw_atomic_check(struct intel_atomic_state *state);
-int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+void intel_bw_init_hw(struct intel_display *display);
+int intel_bw_init(struct intel_display *display);
+int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms);
+int icl_pcode_restrict_qgv_points(struct intel_display *display,
u32 points_mask);
int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
bool *need_cdclk_calc);
-int intel_bw_min_cdclk(struct drm_i915_private *i915,
+int intel_bw_min_cdclk(struct intel_display *display,
const struct intel_bw_state *bw_state);
void intel_bw_update_hw_state(struct intel_display *display);
void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 2a8749a0213e..b1718b491ffd 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1972,9 +1972,7 @@ int intel_mdclk_cdclk_ratio(struct intel_display *display,
static void xe2lpd_mdclk_cdclk_ratio_program(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ intel_dbuf_mdclk_cdclk_ratio_update(display,
intel_mdclk_cdclk_ratio(display, cdclk_config),
cdclk_config->joined_mbus);
}
@@ -2808,7 +2806,6 @@ static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_stat
static int intel_compute_min_cdclk(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_cdclk_state *cdclk_state =
intel_atomic_get_new_cdclk_state(state);
const struct intel_bw_state *bw_state;
@@ -2836,7 +2833,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
bw_state = intel_atomic_get_new_bw_state(state);
if (bw_state) {
- min_cdclk = intel_bw_min_cdclk(dev_priv, bw_state);
+ min_cdclk = intel_bw_min_cdclk(display, bw_state);
if (cdclk_state->bw_min_cdclk != min_cdclk) {
int ret;
@@ -3342,6 +3339,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
void intel_cdclk_update_hw_state(struct intel_display *display)
{
+ const struct intel_bw_state *bw_state =
+ to_intel_bw_state(display->bw.obj.state);
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(display->cdclk.obj.state);
struct intel_crtc *crtc;
@@ -3359,6 +3358,8 @@ void intel_cdclk_update_hw_state(struct intel_display *display)
cdclk_state->min_cdclk[pipe] = intel_crtc_compute_min_cdclk(crtc_state);
cdclk_state->min_voltage_level[pipe] = crtc_state->min_voltage_level;
}
+
+ cdclk_state->bw_min_cdclk = intel_bw_min_cdclk(display, bw_state);
}
void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc)
@@ -3493,7 +3494,6 @@ static int dg1_rawclk(struct intel_display *display)
static int cnp_rawclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int divider, fraction;
u32 rawclk;
@@ -3513,7 +3513,7 @@ static int cnp_rawclk(struct intel_display *display)
rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
fraction) - 1);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
rawclk |= ICP_RAWCLK_NUM(numerator);
}
@@ -3552,21 +3552,20 @@ static int i9xx_hrawclk(struct intel_display *display)
*/
u32 intel_read_rawclk(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 freq;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
/*
* MTL always uses a 38.4 MHz rawclk. The bspec tells us
* "RAWCLK_FREQ defaults to the values for 38.4 and does
* not need to be programmed."
*/
freq = 38400;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ else if (INTEL_PCH_TYPE(display) >= PCH_DG1)
freq = dg1_rawclk(display);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ else if (INTEL_PCH_TYPE(display) >= PCH_CNP)
freq = cnp_rawclk(display);
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
freq = pch_rawclk(display);
else if (display->platform.valleyview || display->platform.cherryview)
freq = vlv_hrawclk(display);
diff --git a/drivers/gpu/drm/i915/display/intel_cmtg.c b/drivers/gpu/drm/i915/display/intel_cmtg.c
index 07d7f4e8f60f..82606ebae1de 100644
--- a/drivers/gpu/drm/i915/display/intel_cmtg.c
+++ b/drivers/gpu/drm/i915/display/intel_cmtg.c
@@ -9,7 +9,6 @@
#include <drm/drm_device.h>
#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_crtc.h"
#include "intel_cmtg.h"
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index cfe14162231d..98dddf72c0eb 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -22,7 +22,9 @@
*
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
#include "i9xx_plane_regs.h"
#include "intel_color.h"
#include "intel_color_regs.h"
@@ -405,14 +407,13 @@ static void icl_read_csc(struct intel_crtc_state *crtc_state)
static bool ilk_limited_range(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(display->drm);
/* icl+ have dedicated output CSC */
if (DISPLAY_VER(display) >= 11)
return false;
/* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */
- if (DISPLAY_VER(display) < 7 || IS_IVYBRIDGE(i915))
+ if (DISPLAY_VER(display) < 7 || display->platform.ivybridge)
return false;
return crtc_state->limited_color_range;
@@ -516,7 +517,6 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
static void ilk_assign_csc(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(display->drm);
bool limited_color_range = ilk_csc_limited_range(crtc_state);
if (crtc_state->hw.ctm) {
@@ -538,7 +538,7 @@ static void ilk_assign_csc(struct intel_crtc_state *crtc_state)
* LUT is needed but CSC is not we need to load an
* identity matrix.
*/
- drm_WARN_ON(display->drm, !IS_GEMINILAKE(i915));
+ drm_WARN_ON(display->drm, !display->platform.geminilake);
ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_identity);
} else {
@@ -3983,12 +3983,10 @@ int intel_color_init(struct intel_display *display)
void intel_color_init_hooks(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (HAS_GMCH(display)) {
- if (IS_CHERRYVIEW(i915))
+ if (display->platform.cherryview)
display->funcs.color = &chv_color_funcs;
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
display->funcs.color = &vlv_color_funcs;
else if (DISPLAY_VER(display) >= 4)
display->funcs.color = &i965_color_funcs;
@@ -4005,7 +4003,7 @@ void intel_color_init_hooks(struct intel_display *display)
display->funcs.color = &skl_color_funcs;
else if (DISPLAY_VER(display) == 8)
display->funcs.color = &bdw_color_funcs;
- else if (IS_HASWELL(i915))
+ else if (display->platform.haswell)
display->funcs.color = &hsw_color_funcs;
else if (DISPLAY_VER(display) == 7)
display->funcs.color = &ivb_color_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 17eea244cc83..f5cc38dbe559 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -3,6 +3,8 @@
* Copyright © 2018 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_combo_phy.h"
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index e42357bd9e80..6c81c9f2fd09 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -31,8 +31,10 @@
#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_connector.h"
+#include "intel_display_core.h"
#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
@@ -154,13 +156,14 @@ void intel_connector_destroy(struct drm_connector *connector)
int intel_connector_register(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_i915_private *i915 = to_i915(connector->dev);
int ret;
ret = intel_backlight_device_register(intel_connector);
if (ret)
goto err;
- if (i915_inject_probe_failure(to_i915(connector->dev))) {
+ if (i915_inject_probe_failure(i915)) {
ret = -EFAULT;
goto err_backlight;
}
@@ -204,10 +207,10 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
enum pipe intel_connector_get_pipe(struct intel_connector *connector)
{
- struct drm_device *dev = connector->base.dev;
+ struct intel_display *display = to_intel_display(connector);
- drm_WARN_ON(dev,
- !drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ drm_WARN_ON(display->drm,
+ !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
if (!connector->base.state->crtc)
return INVALID_PIPE;
@@ -264,20 +267,19 @@ static const struct drm_prop_enum_list force_audio_names[] = {
void
intel_attach_force_audio_property(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_property *prop;
- prop = dev_priv->display.properties.force_audio;
+ prop = display->properties.force_audio;
if (prop == NULL) {
- prop = drm_property_create_enum(dev, 0,
- "audio",
- force_audio_names,
- ARRAY_SIZE(force_audio_names));
+ prop = drm_property_create_enum(display->drm, 0,
+ "audio",
+ force_audio_names,
+ ARRAY_SIZE(force_audio_names));
if (prop == NULL)
return;
- dev_priv->display.properties.force_audio = prop;
+ display->properties.force_audio = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
}
@@ -291,20 +293,19 @@ static const struct drm_prop_enum_list broadcast_rgb_names[] = {
void
intel_attach_broadcast_rgb_property(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_property *prop;
- prop = dev_priv->display.properties.broadcast_rgb;
+ prop = display->properties.broadcast_rgb;
if (prop == NULL) {
- prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
- "Broadcast RGB",
- broadcast_rgb_names,
- ARRAY_SIZE(broadcast_rgb_names));
+ prop = drm_property_create_enum(display->drm, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ broadcast_rgb_names,
+ ARRAY_SIZE(broadcast_rgb_names));
if (prop == NULL)
return;
- dev_priv->display.properties.broadcast_rgb = prop;
+ display->properties.broadcast_rgb = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
@@ -336,14 +337,14 @@ intel_attach_dp_colorspace_property(struct drm_connector *connector)
void
intel_attach_scaling_mode_property(struct drm_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
u32 scaling_modes;
scaling_modes = BIT(DRM_MODE_SCALE_ASPECT) |
BIT(DRM_MODE_SCALE_FULLSCREEN);
/* On GMCH platforms borders are only possible on the LVDS port */
- if (!HAS_GMCH(i915) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ if (!HAS_GMCH(display) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
scaling_modes |= BIT(DRM_MODE_SCALE_CENTER);
drm_connector_attach_scaling_mode_property(connector, scaling_modes);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 76ffb3f8467c..38b50a779b6b 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -31,9 +31,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_connector.h"
@@ -91,13 +91,12 @@ static struct intel_crt *intel_attached_crt(struct intel_connector *connector)
bool intel_crt_port_enabled(struct intel_display *display,
i915_reg_t adpa_reg, enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
val = intel_de_read(display, adpa_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
*pipe = REG_FIELD_GET(ADPA_PIPE_SEL_MASK_CPT, val);
else
*pipe = REG_FIELD_GET(ADPA_PIPE_SEL_MASK, val);
@@ -177,7 +176,6 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
int mode)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -194,14 +192,14 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
- if (HAS_PCH_LPT(dev_priv))
+ if (HAS_PCH_LPT(display))
; /* Those bits don't exist here */
- else if (HAS_PCH_CPT(dev_priv))
+ else if (HAS_PCH_CPT(display))
adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe);
else
adpa |= ADPA_PIPE_SEL(crtc->pipe);
- if (!HAS_PCH_SPLIT(dev_priv))
+ if (!HAS_PCH_SPLIT(display))
intel_de_write(display, BCLRPAT(display, crtc->pipe), 0);
switch (mode) {
@@ -356,7 +354,6 @@ intel_crt_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
int max_dotclk = display->cdclk.max_dotclk_freq;
enum drm_mode_status status;
int max_clock;
@@ -368,9 +365,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
- if (HAS_PCH_LPT(dev_priv))
+ if (HAS_PCH_LPT(display))
max_clock = 180000;
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
/*
* 270 MHz due to current DPLL limits,
* DAC limit supposedly 355 MHz.
@@ -387,7 +384,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
- if (HAS_PCH_LPT(dev_priv) &&
+ if (HAS_PCH_LPT(display) &&
ilk_get_lanes_required(mode->clock, 270000, 24) > 2)
return MODE_CLOCK_HIGH;
@@ -438,7 +435,6 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -457,7 +453,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
/* LPT FDI RX only supports 8bpc. */
- if (HAS_PCH_LPT(dev_priv)) {
+ if (HAS_PCH_LPT(display)) {
/* TODO: Check crtc_state->max_link_bpp_x16 instead of bw_constrained */
if (crtc_state->bw_constrained && crtc_state->pipe_bpp < 24) {
drm_dbg_kms(display->drm,
@@ -482,13 +478,12 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
u32 adpa;
bool ret;
/* The first time through, trigger an explicit detection cycle */
if (crt->force_hotplug_required) {
- bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
+ bool turn_off_dac = HAS_PCH_SPLIT(display);
u32 save_adpa;
crt->force_hotplug_required = false;
@@ -532,8 +527,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- bool reenable_hpd;
u32 adpa;
bool ret;
u32 save_adpa;
@@ -550,7 +543,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
*
* Just disable HPD interrupts here to prevent this
*/
- reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
+ intel_hpd_block(&crt->base);
save_adpa = adpa = intel_de_read(display, crt->adpa_reg);
drm_dbg_kms(display->drm,
@@ -577,8 +570,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
drm_dbg_kms(display->drm,
"valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
- if (reenable_hpd)
- intel_hpd_enable(dev_priv, crt->base.hpd_pin);
+ intel_hpd_clear_and_unblock(&crt->base);
return ret;
}
@@ -586,15 +578,14 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
u32 stat;
bool ret = false;
int i, tries = 0;
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(display))
return ilk_crt_detect_hotplug(connector);
- if (IS_VALLEYVIEW(dev_priv))
+ if (display->platform.valleyview)
return valleyview_crt_detect_hotplug(connector);
/*
@@ -602,14 +593,14 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
* to get a reliable result.
*/
- if (IS_G45(dev_priv))
+ if (display->platform.g45)
tries = 2;
else
tries = 1;
for (i = 0; i < tries ; i++) {
/* turn on the FORCE_DETECT */
- i915_hotplug_interrupt_update(dev_priv,
+ i915_hotplug_interrupt_update(display,
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
@@ -627,7 +618,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
intel_de_write(display, PORT_HOTPLUG_STAT(display),
CRT_HOTPLUG_INT_STATUS);
- i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
+ i915_hotplug_interrupt_update(display, CRT_HOTPLUG_FORCE_DETECT, 0);
return ret;
}
@@ -880,7 +871,7 @@ intel_crt_detect(struct drm_connector *connector,
wakeref = intel_display_power_get(display, encoder->power_domain);
- if (I915_HAS_HOTPLUG(display)) {
+ if (HAS_HOTPLUG(display)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
* only trust an assertion that the monitor is connected.
@@ -904,7 +895,7 @@ intel_crt_detect(struct drm_connector *connector,
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(display)) {
+ if (HAS_HOTPLUG(display)) {
status = connector_status_disconnected;
goto out;
}
@@ -943,7 +934,6 @@ out:
static int intel_crt_get_modes(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct intel_encoder *encoder = &crt->base;
intel_wakeref_t wakeref;
@@ -956,7 +946,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
wakeref = intel_display_power_get(display, encoder->power_domain);
ret = intel_crt_ddc_get_modes(connector, connector->ddc);
- if (ret || !IS_G4X(dev_priv))
+ if (ret || !display->platform.g4x)
goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
@@ -1015,16 +1005,15 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
void intel_crt_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_connector *connector;
struct intel_crt *crt;
i915_reg_t adpa_reg;
u8 ddc_pin;
u32 adpa;
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(display))
adpa_reg = PCH_ADPA;
- else if (IS_VALLEYVIEW(dev_priv))
+ else if (display->platform.valleyview)
adpa_reg = VLV_ADPA;
else
adpa_reg = ADPA;
@@ -1072,7 +1061,7 @@ void intel_crt_init(struct intel_display *display)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = BIT(INTEL_OUTPUT_DVO) | BIT(INTEL_OUTPUT_HDMI);
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
crt->base.pipe_mask = BIT(PIPE_A);
else
crt->base.pipe_mask = ~0;
@@ -1084,7 +1073,7 @@ void intel_crt_init(struct intel_display *display)
crt->base.power_domain = POWER_DOMAIN_PORT_CRT;
- if (I915_HAS_HOTPLUG(display) &&
+ if (HAS_HOTPLUG(display) &&
!dmi_check_system(intel_spurious_crt_detect)) {
crt->base.hpd_pin = HPD_CRT;
crt->base.hotplug = intel_encoder_hotplug;
@@ -1112,7 +1101,7 @@ void intel_crt_init(struct intel_display *display)
intel_ddi_buf_trans_init(&crt->base);
} else {
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
crt->base.compute_config = pch_crt_compute_config;
crt->base.disable = pch_disable_crt;
crt->base.post_disable = pch_post_disable_crt;
@@ -1134,7 +1123,7 @@ void intel_crt_init(struct intel_display *display)
* polarity and link reversal bits or not, instead of relying on the
* BIOS.
*/
- if (HAS_PCH_LPT(dev_priv)) {
+ if (HAS_PCH_LPT(display)) {
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 5b2603ef2ff7..29cfc38f12e0 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -124,7 +124,7 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- crtc->block_dc_for_vblank = intel_psr_needs_block_dc_vblank(crtc_state);
+ crtc->vblank_psr_notify = intel_psr_needs_vblank_notification(crtc_state);
assert_vblank_disabled(&crtc->base);
drm_crtc_set_max_vblank_count(&crtc->base,
@@ -154,9 +154,9 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
drm_crtc_vblank_off(&crtc->base);
assert_vblank_disabled(&crtc->base);
- crtc->block_dc_for_vblank = false;
+ crtc->vblank_psr_notify = false;
- flush_work(&display->irq.vblank_dc_work);
+ flush_work(&display->irq.vblank_notify_work);
}
struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
@@ -305,7 +305,6 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = {
int intel_crtc_init(struct intel_display *display, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_plane *primary, *cursor;
const struct drm_crtc_funcs *funcs;
struct intel_crtc *crtc;
@@ -333,7 +332,7 @@ int intel_crtc_init(struct intel_display *display, enum pipe pipe)
for_each_sprite(display, pipe, sprite) {
struct intel_plane *plane;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
plane = skl_universal_plane_create(display, pipe, PLANE_2 + sprite);
else
plane = intel_sprite_plane_create(display, pipe, sprite);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 599ddce96371..0c7f91046996 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -5,9 +5,10 @@
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_crtc_state_dump.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_hdmi.h"
#include "intel_vblank.h"
@@ -42,13 +43,13 @@ intel_dump_m_n_config(struct drm_printer *p,
}
static void
-intel_dump_infoframe(struct drm_i915_private *i915,
+intel_dump_infoframe(struct intel_display *display,
const union hdmi_infoframe *frame)
{
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, frame);
+ hdmi_infoframe_log(KERN_DEBUG, display->drm->dev, frame);
}
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
@@ -136,7 +137,7 @@ static void intel_dump_plane_state(struct drm_printer *p,
}
static void
-ilk_dump_csc(struct drm_i915_private *i915,
+ilk_dump_csc(struct intel_display *display,
struct drm_printer *p,
const char *name,
const struct intel_csc_matrix *csc)
@@ -152,7 +153,7 @@ ilk_dump_csc(struct drm_i915_private *i915,
csc->coeff[3 * i + 1],
csc->coeff[3 * i + 2]);
- if (DISPLAY_VER(i915) < 7)
+ if (DISPLAY_VER(display) < 7)
return;
drm_printf(p, "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name,
@@ -178,7 +179,6 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
{
struct intel_display *display = to_intel_display(pipe_config);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_plane_state *plane_state;
struct intel_plane *plane;
struct drm_printer p;
@@ -188,7 +188,7 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
drm_printf(&p, "[CRTC:%d:%s] enable: %s [%s]\n",
crtc->base.base.id, crtc->base.name,
@@ -262,19 +262,19 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "GCP: 0x%x\n", pipe_config->infoframes.gcp);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
- intel_dump_infoframe(i915, &pipe_config->infoframes.avi);
+ intel_dump_infoframe(display, &pipe_config->infoframes.avi);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
- intel_dump_infoframe(i915, &pipe_config->infoframes.spd);
+ intel_dump_infoframe(display, &pipe_config->infoframes.spd);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
- intel_dump_infoframe(i915, &pipe_config->infoframes.hdmi);
+ intel_dump_infoframe(display, &pipe_config->infoframes.hdmi);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
- intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ intel_dump_infoframe(display, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
- intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ intel_dump_infoframe(display, &pipe_config->infoframes.drm);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(DP_SDP_VSC))
drm_dp_vsc_sdp_log(&p, &pipe_config->infoframes.vsc);
@@ -294,8 +294,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
pipe_config->hw.adjusted_mode.crtc_vdisplay,
pipe_config->framestart_delay, pipe_config->msa_timing_delay);
- drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n",
+ drm_printf(&p, "vrr: %s, fixed rr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n",
str_yes_no(pipe_config->vrr.enable),
+ str_yes_no(intel_vrr_is_fixed_rr(pipe_config)),
pipe_config->vrr.vmin, pipe_config->vrr.vmax, pipe_config->vrr.flipline,
pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
pipe_config->vrr.vsync_start, pipe_config->vrr.vsync_end);
@@ -319,14 +320,14 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "linetime: %d, ips linetime: %d\n",
pipe_config->linetime, pipe_config->ips_linetime);
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
drm_printf(&p, "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
crtc->num_scalers,
pipe_config->scaler_state.scaler_users,
pipe_config->scaler_state.scaler_id,
pipe_config->hw.scaling_filter);
- if (HAS_GMCH(i915))
+ if (HAS_GMCH(display))
drm_printf(&p, "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
@@ -343,7 +344,7 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
intel_dpll_dump_hw_state(display, &p, &pipe_config->dpll_hw_state);
- if (IS_CHERRYVIEW(i915))
+ if (display->platform.cherryview)
drm_printf(&p, "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
pipe_config->cgm_mode, pipe_config->gamma_mode,
pipe_config->gamma_enable, pipe_config->csc_enable);
@@ -354,20 +355,20 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "pre csc lut: %s%d entries, post csc lut: %d entries\n",
pipe_config->pre_csc_lut && pipe_config->pre_csc_lut ==
- i915->display.color.glk_linear_degamma_lut ? "(linear) " : "",
+ display->color.glk_linear_degamma_lut ? "(linear) " : "",
pipe_config->pre_csc_lut ?
drm_color_lut_size(pipe_config->pre_csc_lut) : 0,
pipe_config->post_csc_lut ?
drm_color_lut_size(pipe_config->post_csc_lut) : 0);
- if (DISPLAY_VER(i915) >= 11)
- ilk_dump_csc(i915, &p, "output csc", &pipe_config->output_csc);
+ if (DISPLAY_VER(display) >= 11)
+ ilk_dump_csc(display, &p, "output csc", &pipe_config->output_csc);
- if (!HAS_GMCH(i915))
- ilk_dump_csc(i915, &p, "pipe csc", &pipe_config->csc);
- else if (IS_CHERRYVIEW(i915))
+ if (!HAS_GMCH(display))
+ ilk_dump_csc(display, &p, "pipe csc", &pipe_config->csc);
+ else if (display->platform.cherryview)
vlv_dump_csc(&p, "cgm csc", &pipe_config->csc);
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
vlv_dump_csc(&p, "wgc csc", &pipe_config->csc);
intel_vdsc_state_dump(&p, 0, pipe_config);
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 3276a5b4a9b0..2fec5ba58373 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -9,10 +9,11 @@
#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_cursor.h"
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 22595766eac5..a82b93cbc81d 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -6,8 +6,10 @@
#include <linux/log2.h>
#include <linux/math64.h>
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_cx0_phy.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -2761,9 +2763,9 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
val |= XELPDP_FORWARD_CLOCK_UNGATE;
if (!is_dp && is_hdmi_frl(port_clock))
- val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
else
- val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
/* TODO: HDMI FRL */
/* DP2.0 10G and 20G rates enable MPLLA*/
@@ -2774,7 +2776,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
- XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA |
+ XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_SSC_ENABLE_PLLA |
XELPDP_SSC_ENABLE_PLLB, val);
}
@@ -3097,10 +3099,7 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
- if (DISPLAY_VER(display) >= 30)
- clock = REG_FIELD_GET(XE3_DDI_CLOCK_SELECT_MASK, val);
- else
- clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
+ clock = XELPDP_DDI_CLOCK_SELECT_GET(display, val);
drm_WARN_ON(display->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE));
drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_REQUEST));
@@ -3168,13 +3167,9 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
* clock muxes, gating and SSC
*/
- if (DISPLAY_VER(display) >= 30) {
- mask = XE3_DDI_CLOCK_SELECT_MASK;
- val |= XE3_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
- } else {
- mask = XELPDP_DDI_CLOCK_SELECT_MASK;
- val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
- }
+ mask = XELPDP_DDI_CLOCK_SELECT_MASK(display);
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display,
+ intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
mask |= XELPDP_FORWARD_CLOCK_UNGATE;
val |= XELPDP_FORWARD_CLOCK_UNGATE;
@@ -3287,7 +3282,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
/* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_DDI_CLOCK_SELECT_MASK, 0);
+ XELPDP_DDI_CLOCK_SELECT_MASK(display), 0);
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_FORWARD_CLOCK_UNGATE, 0);
@@ -3336,7 +3331,7 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
* 5. Program PORT CLOCK CTRL register to disable and gate clocks
*/
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_DDI_CLOCK_SELECT_MASK |
+ XELPDP_DDI_CLOCK_SELECT_MASK(display) |
XELPDP_FORWARD_CLOCK_UNGATE, 0);
/* 6. Program DDI_CLK_VALFREQ to 0. */
@@ -3365,7 +3360,7 @@ intel_mtl_port_pll_type(struct intel_encoder *encoder,
* handling is done via the standard shared DPLL framework.
*/
val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
- clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
+ clock = XELPDP_DDI_CLOCK_SELECT_GET(display, val);
if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK ||
clock == XELPDP_DDI_CLOCK_SELECT_DIV18CLK)
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index 960f7f778fb8..59c22beaf1de 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -192,10 +192,17 @@
#define XELPDP_TBT_CLOCK_REQUEST REG_BIT(19)
#define XELPDP_TBT_CLOCK_ACK REG_BIT(18)
-#define XELPDP_DDI_CLOCK_SELECT_MASK REG_GENMASK(15, 12)
-#define XE3_DDI_CLOCK_SELECT_MASK REG_GENMASK(16, 12)
-#define XELPDP_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XELPDP_DDI_CLOCK_SELECT_MASK, val)
-#define XE3_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XE3_DDI_CLOCK_SELECT_MASK, val)
+#define _XELPDP_DDI_CLOCK_SELECT_MASK REG_GENMASK(15, 12)
+#define _XE3_DDI_CLOCK_SELECT_MASK REG_GENMASK(16, 12)
+#define XELPDP_DDI_CLOCK_SELECT_MASK(display) (DISPLAY_VER(display) >= 30 ? \
+ _XE3_DDI_CLOCK_SELECT_MASK : _XELPDP_DDI_CLOCK_SELECT_MASK)
+#define XELPDP_DDI_CLOCK_SELECT_PREP(display, val) (DISPLAY_VER(display) >= 30 ? \
+ REG_FIELD_PREP(_XE3_DDI_CLOCK_SELECT_MASK, (val)) : \
+ REG_FIELD_PREP(_XELPDP_DDI_CLOCK_SELECT_MASK, (val)))
+#define XELPDP_DDI_CLOCK_SELECT_GET(display, val) (DISPLAY_VER(display) >= 30 ? \
+ REG_FIELD_GET(_XE3_DDI_CLOCK_SELECT_MASK, (val)) : \
+ REG_FIELD_GET(_XELPDP_DDI_CLOCK_SELECT_MASK, (val)))
+
#define XELPDP_DDI_CLOCK_SELECT_NONE 0x0
#define XELPDP_DDI_CLOCK_SELECT_MAXPCLK 0x8
#define XELPDP_DDI_CLOCK_SELECT_DIV18CLK 0x9
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index f38c998935b9..d58f8fc37326 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -30,11 +30,13 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_scdc_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_privacy_screen_consumer.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "icl_dsi.h"
+#include "intel_alpm.h"
#include "intel_audio.h"
#include "intel_audio_regs.h"
#include "intel_backlight.h"
@@ -78,6 +80,7 @@
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vdsc_regs.h"
+#include "intel_vrr.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -106,14 +109,14 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
return level;
}
-static bool has_buf_trans_select(struct drm_i915_private *i915)
+static bool has_buf_trans_select(struct intel_display *display)
{
- return DISPLAY_VER(i915) < 10 && !IS_BROXTON(i915);
+ return DISPLAY_VER(display) < 10 && !display->platform.broxton;
}
-static bool has_iboost(struct drm_i915_private *i915)
+static bool has_iboost(struct intel_display *display)
{
- return DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915);
+ return DISPLAY_VER(display) == 9 && !display->platform.broxton;
}
/*
@@ -124,25 +127,25 @@ static bool has_iboost(struct drm_i915_private *i915)
void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 iboost_bit = 0;
int i, n_entries;
enum port port = encoder->port;
const struct intel_ddi_buf_trans *trans;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
/* If we're boosting the current, set bit 31 of trans1 */
- if (has_iboost(dev_priv) &&
+ if (has_iboost(display) &&
intel_bios_dp_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
- intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i),
+ intel_de_write(display, DDI_BUF_TRANS_LO(port, i),
trans->entries[i].hsw.trans1 | iboost_bit);
- intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i),
+ intel_de_write(display, DDI_BUF_TRANS_HI(port, i),
trans->entries[i].hsw.trans2);
}
}
@@ -155,7 +158,7 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
int level = intel_ddi_level(encoder, crtc_state, 0);
u32 iboost_bit = 0;
int n_entries;
@@ -163,27 +166,25 @@ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
const struct intel_ddi_buf_trans *trans;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
/* If we're boosting the current, set bit 31 of trans1 */
- if (has_iboost(dev_priv) &&
+ if (has_iboost(display) &&
intel_bios_hdmi_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
- intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9),
+ intel_de_write(display, DDI_BUF_TRANS_LO(port, 9),
trans->entries[level].hsw.trans1 | iboost_bit);
- intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9),
+ intel_de_write(display, DDI_BUF_TRANS_HI(port, 9),
trans->entries[level].hsw.trans2);
}
static i915_reg_t intel_ddi_buf_status_reg(struct intel_display *display, enum port port)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 14)
- return XELPDP_PORT_BUF_CTL1(i915, port);
+ return XELPDP_PORT_BUF_CTL1(display, port);
else
return DDI_BUF_CTL(port);
}
@@ -346,7 +347,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -359,14 +359,14 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
if (dig_port->ddi_a_4_lanes)
intel_dp->DP |= DDI_A_4_LANES;
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
if (intel_dp_is_uhbr(crtc_state))
intel_dp->DP |= DDI_BUF_PORT_DATA_40BIT;
else
intel_dp->DP |= DDI_BUF_PORT_DATA_10BIT;
}
- if (IS_ALDERLAKE_P(i915) && intel_encoder_is_tc(encoder)) {
+ if (display->platform.alderlake_p && intel_encoder_is_tc(encoder)) {
intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock);
if (!intel_tc_port_in_tbt_alt_mode(dig_port))
intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
@@ -379,8 +379,7 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
}
}
-static int icl_calc_tbt_pll_link(struct intel_display *display,
- enum port port)
+static int icl_calc_tbt_pll_link(struct intel_display *display, enum port port)
{
u32 val = intel_de_read(display, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
@@ -414,15 +413,14 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 temp;
if (!intel_crtc_has_dp_encoder(crtc_state))
return;
- drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder));
+ drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder));
temp = DP_MSA_MISC_SYNC_CLOCK;
@@ -445,7 +443,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
}
/* nonsense combination */
- drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ drm_WARN_ON(display->drm, crtc_state->limited_color_range &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range)
@@ -468,7 +466,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
if (intel_dp_needs_vsc_sdp(crtc_state, conn_state))
temp |= DP_MSA_MISC_COLOR_VSC_SDP;
- intel_de_write(dev_priv, TRANS_MSA_MISC(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_MSA_MISC(display, cpu_transcoder),
temp);
}
@@ -507,8 +505,8 @@ static u32
intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
@@ -516,7 +514,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = TRANS_DDI_FUNC_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
temp |= TGL_TRANS_DDI_SELECT_PORT(port);
else
temp |= TRANS_DDI_SELECT_PORT(port);
@@ -578,7 +576,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= TRANS_DDI_HDMI_SCRAMBLING;
if (crtc_state->hdmi_high_tmds_clock_ratio)
temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
temp |= TRANS_DDI_PORT_WIDTH(crtc_state->lane_count);
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
@@ -591,11 +589,11 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder master;
master = crtc_state->mst_master_transcoder;
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
master == INVALID_TRANSCODER);
temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
}
@@ -604,7 +602,7 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
- if (IS_DISPLAY_VER(dev_priv, 8, 10) &&
+ if (IS_DISPLAY_VER(display, 8, 10) &&
crtc_state->master_transcoder != INVALID_TRANSCODER) {
u8 master_select =
bdw_trans_port_sync_master_select(crtc_state->master_transcoder);
@@ -619,11 +617,10 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
enum transcoder master_transcoder = crtc_state->master_transcoder;
u32 ctl2 = 0;
@@ -635,12 +632,12 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
PORT_SYNC_MODE_MASTER_SELECT(master_select);
}
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder),
+ intel_de_write(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder),
ctl2);
}
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
intel_ddi_transcoder_func_reg_val_get(encoder,
crtc_state));
}
@@ -654,8 +651,7 @@ void
intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
@@ -663,7 +659,7 @@ intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
ctl &= ~TRANS_DDI_FUNC_ENABLE;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
ctl);
}
@@ -677,27 +673,26 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
- if (DISPLAY_VER(dev_priv) >= 11)
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder),
+ if (DISPLAY_VER(display) >= 11)
+ intel_de_write(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder),
0);
- ctl = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ ctl = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
drm_WARN_ON(crtc->base.dev, ctl & TRANS_DDI_HDCP_SIGNALLING);
ctl &= ~TRANS_DDI_FUNC_ENABLE;
- if (IS_DISPLAY_VER(dev_priv, 8, 10))
+ if (IS_DISPLAY_VER(display, 8, 10))
ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE |
TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
if (!intel_dp_mst_is_master_trans(crtc_state)) {
ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
TRANS_DDI_MODE_SELECT_MASK);
@@ -706,7 +701,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
ctl &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
}
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
ctl);
if (intel_dp_mst_is_slave_trans(crtc_state))
@@ -725,17 +720,15 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
bool enable, u32 hdcp_mask)
{
struct intel_display *display = to_intel_display(intel_encoder);
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
intel_wakeref_t wakeref;
int ret = 0;
wakeref = intel_display_power_get_if_enabled(display,
intel_encoder->power_domain);
- if (drm_WARN_ON(dev, !wakeref))
+ if (drm_WARN_ON(display->drm, !wakeref))
return -ENXIO;
- intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
hdcp_mask, enable ? hdcp_mask : 0);
intel_display_power_put(display, intel_encoder->power_domain, wakeref);
return ret;
@@ -744,7 +737,6 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
struct intel_display *display = to_intel_display(intel_connector);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
int type = intel_connector->base.connector_type;
enum port port = encoder->port;
@@ -765,12 +757,12 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
goto out;
}
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
cpu_transcoder = (enum transcoder) pipe;
- ddi_mode = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) &
+ ddi_mode = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) &
TRANS_DDI_MODE_SELECT_MASK;
if (ddi_mode == TRANS_DDI_MODE_SELECT_HDMI ||
@@ -804,7 +796,6 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
u8 *pipe_mask, bool *is_dp_mst)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = encoder->port;
intel_wakeref_t wakeref;
enum pipe p;
@@ -819,13 +810,13 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!wakeref)
return;
- tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ tmp = intel_de_read(display, DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) {
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_EDP));
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A) {
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
@@ -846,7 +837,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
goto out;
}
- for_each_pipe(dev_priv, p) {
+ for_each_pipe(display, p) {
enum transcoder cpu_transcoder = (enum transcoder)p;
u32 port_mask, ddi_select, ddi_mode;
intel_wakeref_t trans_wakeref;
@@ -856,7 +847,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!trans_wakeref)
continue;
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
port_mask = TGL_TRANS_DDI_PORT_MASK;
ddi_select = TGL_TRANS_DDI_SELECT_PORT(port);
} else {
@@ -864,8 +855,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
ddi_select = TRANS_DDI_SELECT_PORT(port);
}
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
intel_display_power_put(display, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
trans_wakeref);
@@ -883,12 +874,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (!*pipe_mask)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"No pipe for [ENCODER:%d:%s] found\n",
encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && dp128b132b_pipe_mask) {
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
/*
* If we don't have 8b/10b MST, but have more than one
@@ -901,12 +892,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
* can assume it's SST.
*/
if (hweight8(dp128b132b_pipe_mask) > 1 ||
- intel_dp_mst_encoder_active_links(dig_port))
+ intel_dp_mst_active_streams(intel_dp))
mst_pipe_mask = dp128b132b_pipe_mask;
}
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
encoder->base.base.id, encoder->base.name,
*pipe_mask);
@@ -914,7 +905,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe masks: all %02x, MST %02x, 128b/132b %02x)\n",
encoder->base.base.id, encoder->base.name,
*pipe_mask, mst_pipe_mask, dp128b132b_pipe_mask);
@@ -922,12 +913,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
*is_dp_mst = mst_pipe_mask;
out:
- if (*pipe_mask && (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))) {
- tmp = intel_de_read(dev_priv, BXT_PHY_CTL(port));
+ if (*pipe_mask && (display->platform.geminilake || display->platform.broxton)) {
+ tmp = intel_de_read(display, BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"[ENCODER:%d:%s] enabled but PHY powered down? (PHY_CTL %08x)\n",
encoder->base.base.id, encoder->base.name, tmp);
}
@@ -1041,8 +1032,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
@@ -1050,53 +1040,53 @@ void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
if (cpu_transcoder == TRANSCODER_EDP)
return;
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(display) >= 13)
val = TGL_TRANS_CLK_SEL_PORT(phy);
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
val = TGL_TRANS_CLK_SEL_PORT(encoder->port);
else
val = TRANS_CLK_SEL_PORT(encoder->port);
- intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
+ intel_de_write(display, TRANS_CLK_SEL(cpu_transcoder), val);
}
void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val;
if (cpu_transcoder == TRANSCODER_EDP)
return;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
val = TGL_TRANS_CLK_SEL_DISABLED;
else
val = TRANS_CLK_SEL_DISABLED;
- intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
+ intel_de_write(display, TRANS_CLK_SEL(cpu_transcoder), val);
}
-static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+static void _skl_ddi_set_iboost(struct intel_display *display,
enum port port, u8 iboost)
{
u32 tmp;
- tmp = intel_de_read(dev_priv, DISPIO_CR_TX_BMU_CR0);
+ tmp = intel_de_read(display, DISPIO_CR_TX_BMU_CR0);
tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
if (iboost)
tmp |= iboost << BALANCE_LEG_SHIFT(port);
else
tmp |= BALANCE_LEG_DISABLE(port);
- intel_de_write(dev_priv, DISPIO_CR_TX_BMU_CR0, tmp);
+ intel_de_write(display, DISPIO_CR_TX_BMU_CR0, tmp);
}
static void skl_ddi_set_iboost(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int level)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u8 iboost;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -1109,7 +1099,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
int n_entries;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
iboost = trans->entries[level].hsw.i_boost;
@@ -1117,28 +1107,28 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
/* Make sure that the requested I_boost is valid */
if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
- drm_err(&dev_priv->drm, "Invalid I_boost value %u\n", iboost);
+ drm_err(display->drm, "Invalid I_boost value %u\n", iboost);
return;
}
- _skl_ddi_set_iboost(dev_priv, encoder->port, iboost);
+ _skl_ddi_set_iboost(display, encoder->port, iboost);
if (encoder->port == PORT_A && dig_port->max_lanes == 4)
- _skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
+ _skl_ddi_set_iboost(display, PORT_E, iboost);
}
static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int n_entries;
encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
+ if (drm_WARN_ON(display->drm, n_entries < 1))
n_entries = 1;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
n_entries = ARRAY_SIZE(index_to_dp_signal_levels);
@@ -1171,14 +1161,14 @@ static u32 icl_combo_phy_loadgen_select(const struct intel_crtc_state *crtc_stat
static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_encoder_to_phy(encoder);
int n_entries, ln;
u32 val;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
@@ -1186,25 +1176,25 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
val = EDP4K2K_MODE_OVRD_EN | EDP4K2K_MODE_OVRD_OPTIMIZED;
intel_dp->hobl_active = is_hobl_buf_trans(trans);
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), val,
+ intel_de_rmw(display, ICL_PORT_CL_DW10(phy), val,
intel_dp->hobl_active ? val : 0);
}
/* Set PORT_TX_DW5 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
COEFF_POLARITY | CURSOR_PROGRAM |
TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
val |= TAP3_DISABLE;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW2_LN(ln, phy),
SWING_SEL_UPPER_MASK | SWING_SEL_LOWER_MASK | RCOMP_SCALAR_MASK,
SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel) |
SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel) |
@@ -1216,7 +1206,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW4_LN(ln, phy),
POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK,
POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1) |
POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2) |
@@ -1227,7 +1217,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
for (ln = 0; ln < 4; ln++) {
int level = intel_ddi_level(encoder, crtc_state, ln);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW7_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW7_LN(ln, phy),
N_SCALAR_MASK,
N_SCALAR(trans->entries[level].icl.dw7_n_scalar));
}
@@ -1236,7 +1226,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
u32 val;
int ln;
@@ -1246,12 +1236,12 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
val &= ~COMMON_KEEPER_EN;
else
val |= COMMON_KEEPER_EN;
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), val);
/* 2. Program loadgen select */
/*
@@ -1261,33 +1251,33 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln < 4; ln++) {
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW4_LN(ln, phy),
LOADGEN_SELECT,
icl_combo_phy_loadgen_select(crtc_state, ln));
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy),
+ intel_de_rmw(display, ICL_PORT_CL_DW5(phy),
0, SUS_CLOCK_CONFIG);
/* 4. Clear training enable to change swing values */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
icl_ddi_combo_vswing_program(encoder, crtc_state);
/* 6. Set training enable to trigger update */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
val |= TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), val);
}
static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
@@ -1296,13 +1286,13 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
return;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_LINK_PARAMS(ln, tc_port),
CRI_USE_FS32, 0);
- intel_de_rmw(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_LINK_PARAMS(ln, tc_port),
CRI_USE_FS32, 0);
}
@@ -1312,13 +1302,13 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
- intel_de_rmw(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_SWINGCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
- intel_de_rmw(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_SWINGCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
}
@@ -1329,7 +1319,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
- intel_de_rmw(dev_priv, MG_TX1_DRVCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_DRVCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
@@ -1338,7 +1328,7 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
- intel_de_rmw(dev_priv, MG_TX2_DRVCTRL(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_DRVCTRL(ln, tc_port),
CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
@@ -1354,21 +1344,21 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_CLKHUB(ln, tc_port),
+ intel_de_rmw(display, MG_CLKHUB(ln, tc_port),
CFG_LOW_RATE_LKREN_EN,
crtc_state->port_clock < 300000 ? CFG_LOW_RATE_LKREN_EN : 0);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_DCC(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_DCC(ln, tc_port),
CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
CFG_AMI_CK_DIV_OVERRIDE_EN,
crtc_state->port_clock > 500000 ?
CFG_AMI_CK_DIV_OVERRIDE_VAL(1) |
CFG_AMI_CK_DIV_OVERRIDE_EN : 0);
- intel_de_rmw(dev_priv, MG_TX2_DCC(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_DCC(ln, tc_port),
CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
CFG_AMI_CK_DIV_OVERRIDE_EN,
crtc_state->port_clock > 500000 ?
@@ -1378,9 +1368,9 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- intel_de_rmw(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port),
+ intel_de_rmw(display, MG_TX1_PISO_READLOAD(ln, tc_port),
0, CRI_CALCINIT);
- intel_de_rmw(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port),
+ intel_de_rmw(display, MG_TX2_PISO_READLOAD(ln, tc_port),
0, CRI_CALCINIT);
}
}
@@ -1490,12 +1480,12 @@ int intel_ddi_level(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int lane)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
int level, n_entries;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&i915->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return 0;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -1504,7 +1494,7 @@ int intel_ddi_level(struct intel_encoder *encoder,
level = intel_ddi_dp_level(enc_to_intel_dp(encoder), crtc_state,
lane);
- if (drm_WARN_ON_ONCE(&i915->drm, level >= n_entries))
+ if (drm_WARN_ON_ONCE(display->drm, level >= n_entries))
level = n_entries - 1;
return level;
@@ -1514,13 +1504,13 @@ static void
hsw_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int level = intel_ddi_level(encoder, crtc_state, 0);
enum port port = encoder->port;
u32 signal_levels;
- if (has_iboost(dev_priv))
+ if (has_iboost(display))
skl_ddi_set_iboost(encoder, crtc_state, level);
/* HDMI ignores the rest */
@@ -1529,46 +1519,46 @@ hsw_set_signal_levels(struct intel_encoder *encoder,
signal_levels = DDI_BUF_TRANS_SELECT(level);
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ drm_dbg_kms(display->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~DDI_BUF_EMP_MASK;
intel_dp->DP |= signal_levels;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
+ intel_de_write(display, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(display, DDI_BUF_CTL(port));
}
-static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+static void _icl_ddi_enable_clock(struct intel_display *display, i915_reg_t reg,
u32 clk_sel_mask, u32 clk_sel, u32 clk_off)
{
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, reg, clk_sel_mask, clk_sel);
+ intel_de_rmw(display, reg, clk_sel_mask, clk_sel);
/*
* "This step and the step before must be
* done with separate register writes."
*/
- intel_de_rmw(i915, reg, clk_off, 0);
+ intel_de_rmw(display, reg, clk_off, 0);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
-static void _icl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+static void _icl_ddi_disable_clock(struct intel_display *display, i915_reg_t reg,
u32 clk_off)
{
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, reg, 0, clk_off);
+ intel_de_rmw(display, reg, 0, clk_off);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
-static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg,
+static bool _icl_ddi_is_clock_enabled(struct intel_display *display, i915_reg_t reg,
u32 clk_off)
{
- return !(intel_de_read(i915, reg) & clk_off);
+ return !(intel_de_read(display, reg) & clk_off);
}
static struct intel_shared_dpll *
@@ -1585,14 +1575,14 @@ _icl_ddi_get_pll(struct intel_display *display, i915_reg_t reg,
static void adls_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ _icl_ddi_enable_clock(display, ADLS_DPCLKA_CFGCR(phy),
ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
pll->info->id << ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1600,19 +1590,19 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder,
static void adls_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ _icl_ddi_disable_clock(display, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
+ return _icl_ddi_is_clock_enabled(display, ADLS_DPCLKA_CFGCR(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
@@ -1629,14 +1619,14 @@ static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_enable_clock(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1644,19 +1634,19 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_disable_clock(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ return _icl_ddi_is_clock_enabled(display, ICL_DPCLKA_CFGCR0,
RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
@@ -1673,23 +1663,23 @@ static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
/*
* If we fail this, something went very wrong: first 2 PLLs should be
* used by first 2 phys and last 2 PLLs by last phys
*/
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
(pll->info->id < DPLL_ID_DG1_DPLL2 && phy >= PHY_C) ||
(pll->info->id >= DPLL_ID_DG1_DPLL2 && phy < PHY_C)))
return;
- _icl_ddi_enable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ _icl_ddi_enable_clock(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1697,19 +1687,19 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ _icl_ddi_disable_clock(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
+ return _icl_ddi_is_clock_enabled(display, DG1_DPCLKA_CFGCR0(phy),
DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
@@ -1739,14 +1729,14 @@ static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy = intel_encoder_to_phy(encoder);
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_enable_clock(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1754,19 +1744,19 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ _icl_ddi_disable_clock(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ return _icl_ddi_is_clock_enabled(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
}
@@ -1783,39 +1773,39 @@ struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
static void jsl_ddi_tc_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
/*
* "For DDIC and DDID, program DDI_CLK_SEL to map the MG clock to the port.
* MG does not exist, but the programming is required to ungate DDIC and DDID."
*/
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
icl_ddi_combo_enable_clock(encoder, crtc_state);
}
static void jsl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
icl_ddi_combo_disable_clock(encoder);
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
}
static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+ tmp = intel_de_read(display, DDI_CLK_SEL(port));
if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
return false;
@@ -1826,54 +1816,54 @@ static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- intel_de_write(i915, DDI_CLK_SEL(port),
+ intel_de_write(display, DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ intel_de_rmw(display, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0);
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ intel_de_rmw(display, ICL_DPCLKA_CFGCR0,
0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
- intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+ intel_de_write(display, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
}
static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum tc_port tc_port = intel_encoder_to_tc(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+ tmp = intel_de_read(display, DDI_CLK_SEL(port));
if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
return false;
- tmp = intel_de_read(i915, ICL_DPCLKA_CFGCR0);
+ tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0);
return !(tmp & ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
}
@@ -1934,47 +1924,47 @@ static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
static void skl_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, DPLL_CTRL2,
+ intel_de_rmw(display, DPLL_CTRL2,
DPLL_CTRL2_DDI_CLK_OFF(port) |
DPLL_CTRL2_DDI_CLK_SEL_MASK(port),
DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static void skl_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- mutex_lock(&i915->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- intel_de_rmw(i915, DPLL_CTRL2,
+ intel_de_rmw(display, DPLL_CTRL2,
0, DPLL_CTRL2_DDI_CLK_OFF(port));
- mutex_unlock(&i915->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
/*
* FIXME Not sure if the override affects both
* the PLL selection and the CLK_OFF bit.
*/
- return !(intel_de_read(i915, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
+ return !(intel_de_read(display, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
}
static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
@@ -2002,30 +1992,30 @@ static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
void hsw_ddi_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum port port = encoder->port;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return;
- intel_de_write(i915, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
+ intel_de_write(display, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
}
void hsw_ddi_disable_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- intel_de_write(i915, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+ intel_de_write(display, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
}
bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- return intel_de_read(i915, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
+ return intel_de_read(display, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
}
static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
@@ -2081,7 +2071,7 @@ void intel_ddi_disable_clock(struct intel_encoder *encoder)
void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 port_mask;
bool ddi_clk_needed;
@@ -2101,7 +2091,7 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* In the unlikely case that BIOS enables DP in MST mode, just
* warn since our MST HW readout is incomplete.
*/
- if (drm_WARN_ON(&i915->drm, is_mst))
+ if (drm_WARN_ON(display->drm, is_mst))
return;
}
@@ -2116,11 +2106,11 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* Sanity check that we haven't incorrectly registered another
* encoder using any of the ports of this DSI encoder.
*/
- for_each_intel_encoder(&i915->drm, other_encoder) {
+ for_each_intel_encoder(display->drm, other_encoder) {
if (other_encoder == encoder)
continue;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
port_mask & BIT(other_encoder->port)))
return;
}
@@ -2135,7 +2125,7 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
!encoder->is_clock_enabled(encoder))
return;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] is disabled/in DSI mode with an ungated DDI clock, gate it\n",
encoder->base.base.id, encoder->base.name);
@@ -2255,10 +2245,10 @@ tgl_dp_tp_transcoder(const struct intel_crtc_state *crtc_state)
i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- return TGL_DP_TP_CTL(dev_priv,
+ if (DISPLAY_VER(display) >= 12)
+ return TGL_DP_TP_CTL(display,
tgl_dp_tp_transcoder(crtc_state));
else
return DP_TP_CTL(encoder->port);
@@ -2267,10 +2257,10 @@ i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
static i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- return TGL_DP_TP_STATUS(dev_priv,
+ if (DISPLAY_VER(display) >= 12)
+ return TGL_DP_TP_STATUS(display,
tgl_dp_tp_transcoder(crtc_state));
else
return DP_TP_STATUS(encoder->port);
@@ -2445,14 +2435,14 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
static void intel_ddi_disable_fec(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
if (!crtc_state->fec_enable)
return;
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_FEC_ENABLE, 0);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state));
}
static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
@@ -2474,11 +2464,11 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
* Splitter enable for eDP MSO is limited to certain pipes, on certain
* platforms.
*/
-static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
+static u8 intel_ddi_splitter_pipe_mask(struct intel_display *display)
{
- if (DISPLAY_VER(i915) > 20)
+ if (DISPLAY_VER(display) > 20)
return ~0;
- else if (IS_ALDERLAKE_P(i915))
+ else if (display->platform.alderlake_p)
return BIT(PIPE_A) | BIT(PIPE_B);
else
return BIT(PIPE_A);
@@ -2487,28 +2477,28 @@ static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(pipe_config);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 dss1;
- if (!HAS_MSO(i915))
+ if (!HAS_MSO(display))
return;
- dss1 = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe));
+ dss1 = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
pipe_config->splitter.enable = dss1 & SPLITTER_ENABLE;
if (!pipe_config->splitter.enable)
return;
- if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) {
+ if (drm_WARN_ON(display->drm, !(intel_ddi_splitter_pipe_mask(display) & BIT(pipe)))) {
pipe_config->splitter.enable = false;
return;
}
switch (dss1 & SPLITTER_CONFIGURATION_MASK) {
default:
- drm_WARN(&i915->drm, true,
+ drm_WARN(display->drm, true,
"Invalid splitter configuration, dss1=0x%08x\n", dss1);
fallthrough;
case SPLITTER_CONFIGURATION_2_SEGMENT:
@@ -2524,12 +2514,12 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 dss1 = 0;
- if (!HAS_MSO(i915))
+ if (!HAS_MSO(display))
return;
if (crtc_state->splitter.enable) {
@@ -2541,7 +2531,7 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
dss1 |= SPLITTER_CONFIGURATION_4_SEGMENT;
}
- intel_de_rmw(i915, ICL_PIPE_DSS_CTL1(pipe),
+ intel_de_rmw(display, ICL_PIPE_DSS_CTL1(pipe),
SPLITTER_ENABLE | SPLITTER_CONFIGURATION_MASK |
OVERLAP_PIXELS_MASK, dss1);
}
@@ -2549,27 +2539,27 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
static void
mtl_ddi_enable_d2d(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
i915_reg_t reg;
u32 set_bits, wait_bits;
- if (DISPLAY_VER(dev_priv) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
- if (DISPLAY_VER(dev_priv) >= 20) {
+ if (DISPLAY_VER(display) >= 20) {
reg = DDI_BUF_CTL(port);
set_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
} else {
- reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
+ reg = XELPDP_PORT_BUF_CTL1(display, port);
set_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
}
- intel_de_rmw(dev_priv, reg, 0, set_bits);
- if (wait_for_us(intel_de_read(dev_priv, reg) & wait_bits, 100)) {
- drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
+ intel_de_rmw(display, reg, 0, set_bits);
+ if (wait_for_us(intel_de_read(display, reg) & wait_bits, 100)) {
+ drm_err(display->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
}
@@ -2599,13 +2589,13 @@ static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 val;
val = intel_tc_port_in_tbt_alt_mode(dig_port) ?
XELPDP_PORT_BUF_IO_SELECT_TBT : 0;
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port),
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, val);
}
@@ -2734,7 +2724,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int ret;
@@ -2778,7 +2767,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
}
@@ -2882,16 +2871,15 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
- if (DISPLAY_VER(dev_priv) < 11)
- drm_WARN_ON(&dev_priv->drm,
+ if (DISPLAY_VER(display) < 11)
+ drm_WARN_ON(display->drm,
is_mst && (port == PORT_A || port == PORT_E));
else
- drm_WARN_ON(&dev_priv->drm, is_mst && port == PORT_A);
+ drm_WARN_ON(display->drm, is_mst && port == PORT_A);
intel_dp_set_link_params(intel_dp,
crtc_state->port_clock,
@@ -2908,14 +2896,14 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_clock(encoder, crtc_state);
if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
}
icl_program_mg_dp_mode(dig_port, crtc_state);
- if (has_buf_trans_select(dev_priv))
+ if (has_buf_trans_select(display))
hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
encoder->set_signal_levels(encoder, crtc_state);
@@ -2931,7 +2919,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
crtc_state);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
intel_dp_start_link_train(state, intel_dp, crtc_state);
- if ((port != PORT_A || DISPLAY_VER(dev_priv) >= 9) &&
+ if ((port != PORT_A || DISPLAY_VER(display) >= 9) &&
!is_trans_port_sync_mode(crtc_state))
intel_dp_stop_link_train(intel_dp, crtc_state);
@@ -2979,12 +2967,11 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_enable_clock(encoder, crtc_state);
- drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ drm_WARN_ON(display->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(display,
dig_port->ddi_io_power_domain);
@@ -3022,10 +3009,9 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
+ drm_WARN_ON(display->drm, crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
@@ -3050,27 +3036,27 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
static void
mtl_ddi_disable_d2d(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
i915_reg_t reg;
u32 clr_bits, wait_bits;
- if (DISPLAY_VER(dev_priv) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
- if (DISPLAY_VER(dev_priv) >= 20) {
+ if (DISPLAY_VER(display) >= 20) {
reg = DDI_BUF_CTL(port);
clr_bits = XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
wait_bits = XE2LPD_DDI_BUF_D2D_LINK_STATE;
} else {
- reg = XELPDP_PORT_BUF_CTL1(dev_priv, port);
+ reg = XELPDP_PORT_BUF_CTL1(display, port);
clr_bits = XELPDP_PORT_BUF_D2D_LINK_ENABLE;
wait_bits = XELPDP_PORT_BUF_D2D_LINK_STATE;
}
- intel_de_rmw(dev_priv, reg, clr_bits, 0);
- if (wait_for_us(!(intel_de_read(dev_priv, reg) & wait_bits), 100))
- drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
+ intel_de_rmw(display, reg, clr_bits, 0);
+ if (wait_for_us(!(intel_de_read(display, reg) & wait_bits), 100))
+ drm_err(display->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
@@ -3089,10 +3075,9 @@ static void intel_ddi_buf_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
+ intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
if (DISPLAY_VER(display) >= 14)
intel_wait_ddi_buf_idle(display, port);
@@ -3100,7 +3085,7 @@ static void intel_ddi_buf_disable(struct intel_encoder *encoder,
mtl_ddi_disable_d2d(encoder);
if (intel_crtc_has_dp_encoder(crtc_state)) {
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_ENABLE, 0);
}
@@ -3118,7 +3103,6 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &dig_port->dp;
intel_wakeref_t wakeref;
@@ -3135,12 +3119,12 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
*/
intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
if (is_mst || intel_dp_is_uhbr(old_crtc_state)) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- intel_de_rmw(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder),
TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK,
0);
}
@@ -3160,7 +3144,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
* Configure Transcoder Clock select to direct no clock to the
* transcoder"
*/
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_pps_vdd_on(intel_dp);
@@ -3176,8 +3160,8 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_ddi_disable_clock(encoder);
/* De-select Thunderbolt */
- if (DISPLAY_VER(dev_priv) >= 14)
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, encoder->port),
+ if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
XELPDP_PORT_BUF_IO_SELECT_TBT, 0);
}
@@ -3187,7 +3171,6 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
intel_wakeref_t wakeref;
@@ -3195,12 +3178,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
dig_port->set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_ddi_buf_disable(encoder, old_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
wakeref = fetch_and_zero(&dig_port->ddi_io_wakeref);
@@ -3220,7 +3203,6 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *pipe_crtc;
bool is_hdmi = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI);
@@ -3249,6 +3231,8 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
drm_dp_dpcd_poll_act_handled(&intel_dp->aux, 0);
}
+ intel_vrr_transcoder_disable(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -3257,7 +3241,7 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
intel_dsc_disable(old_pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_scaler_disable(old_pipe_crtc_state);
else
ilk_pfit_disable(old_pipe_crtc_state);
@@ -3359,12 +3343,12 @@ static void intel_ddi_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum port port = encoder->port;
- if (port == PORT_A && DISPLAY_VER(dev_priv) < 9)
+ if (port == PORT_A && DISPLAY_VER(display) < 9)
intel_dp_stop_link_train(intel_dp, crtc_state);
drm_connector_update_privacy_screen(conn_state);
@@ -3401,7 +3385,6 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
@@ -3410,11 +3393,11 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
crtc_state->hdmi_scrambling))
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
connector->base.id, connector->name);
- if (has_buf_trans_select(dev_priv))
+ if (has_buf_trans_select(display))
hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state);
/* e. Enable D2D Link for C10/C20 Phy */
@@ -3423,7 +3406,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
encoder->set_signal_levels(encoder, crtc_state);
/* Display WA #1143: skl,kbl,cfl */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
+ if (DISPLAY_VER(display) == 9 && !display->platform.broxton) {
/*
* For some reason these chicken bits have been
* stuffed into a transcoder register, event though
@@ -3433,7 +3416,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
i915_reg_t reg = gen9_chicken_trans_reg_by_port(display, port);
u32 val;
- val = intel_de_read(dev_priv, reg);
+ val = intel_de_read(display, reg);
if (port == PORT_E)
val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@@ -3442,8 +3425,8 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
val |= DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
udelay(1);
@@ -3454,7 +3437,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE);
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
}
intel_ddi_power_up_lanes(encoder, crtc_state);
@@ -3475,7 +3458,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
if (dig_port->ddi_a_4_lanes)
buf_ctl |= DDI_A_4_LANES;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
u32 port_buf = 0;
port_buf |= XELPDP_PORT_WIDTH(crtc_state->lane_count);
@@ -3483,15 +3466,15 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
if (dig_port->lane_reversal)
port_buf |= XELPDP_PORT_REVERSAL;
- intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port),
XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf);
buf_ctl |= DDI_PORT_WIDTH(crtc_state->lane_count);
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
- } else if (IS_ALDERLAKE_P(dev_priv) && intel_encoder_is_tc(encoder)) {
- drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
+ } else if (display->platform.alderlake_p && intel_encoder_is_tc(encoder)) {
+ drm_WARN_ON(display->drm, !intel_tc_port_in_legacy_mode(dig_port));
buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
@@ -3522,8 +3505,7 @@ static void intel_ddi_enable(struct intel_atomic_state *state,
intel_ddi_enable_transcoder_func(encoder, crtc_state);
- /* Enable/Disable DP2.0 SDP split config before transcoder */
- intel_audio_sdp_split_update(crtc_state);
+ intel_vrr_transcoder_enable(crtc_state);
/* 128b/132b SST */
if (!is_hdmi && intel_dp_is_uhbr(crtc_state)) {
@@ -3567,9 +3549,10 @@ static void intel_ddi_disable_dp(struct intel_atomic_state *state,
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
intel_psr_disable(intel_dp, old_crtc_state);
+ intel_alpm_disable(intel_dp);
intel_edp_backlight_off(old_conn_state);
/* Disable the decompression in DP Sink */
intel_dp_sink_disable_decompression(state,
@@ -3584,12 +3567,12 @@ static void intel_ddi_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = old_conn_state->connector;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
false, false))
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
connector->base.id, connector->name);
}
@@ -3653,16 +3636,16 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc *pipe_crtc;
/* FIXME: Add MTL pll_mgr */
- if (DISPLAY_VER(i915) >= 14 || !intel_encoder_is_tc(encoder))
+ if (DISPLAY_VER(display) >= 14 || !intel_encoder_is_tc(encoder))
return;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
intel_crtc_joined_pipe_mask(crtc_state))
intel_update_active_dpll(state, pipe_crtc, encoder);
}
@@ -3678,7 +3661,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_tc_port = intel_encoder_is_tc(encoder);
@@ -3697,7 +3680,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
* Type-C ports. Skip this step for TBT.
*/
intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
bxt_dpio_phy_set_lane_optim_mask(encoder,
crtc_state->lane_lat_optim_mask);
}
@@ -3765,10 +3748,9 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 dp_tp_ctl;
- dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ dp_tp_ctl = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
drm_WARN_ON(display->drm, dp_tp_ctl & DP_TP_CTL_ENABLE);
@@ -3781,10 +3763,10 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
if (crtc_state->enhanced_framing)
dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
+ intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state));
- if (IS_ALDERLAKE_P(dev_priv) &&
+ if (display->platform.alderlake_p &&
(intel_tc_port_in_dp_alt_mode(dig_port) || intel_tc_port_in_legacy_mode(dig_port)))
adlp_tbt_to_dp_alt_switch_wa(encoder);
@@ -3796,11 +3778,11 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 temp;
- temp = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ temp = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
@@ -3821,17 +3803,17 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
break;
}
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), temp);
+ intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), temp);
}
static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_LINK_TRAIN_MASK, DP_TP_CTL_LINK_TRAIN_IDLE);
/*
@@ -3841,28 +3823,26 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
* In this case there is requirement to wait for a minimum number of
* idle patterns to be sent.
*/
- if (port == PORT_A && DISPLAY_VER(dev_priv) < 12)
+ if (port == PORT_A && DISPLAY_VER(display) < 12)
return;
- if (intel_de_wait_for_set(dev_priv,
+ if (intel_de_wait_for_set(display,
dp_tp_status_reg(encoder, crtc_state),
DP_TP_STATUS_IDLE_DONE, 2))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timed out waiting for DP idle patterns\n");
}
-static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+static bool intel_ddi_is_audio_enabled(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- struct intel_display *display = &dev_priv->display;
-
if (cpu_transcoder == TRANSCODER_EDP)
return false;
if (!intel_display_power_is_enabled(display, POWER_DOMAIN_AUDIO_MMIO))
return false;
- return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) &
+ return intel_de_read(display, HSW_AUD_PIN_ELD_CP_VLD) &
AUDIO_OUTPUT_ENABLE(cpu_transcoder);
}
@@ -3892,34 +3872,34 @@ static int icl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state)
void intel_ddi_compute_min_voltage_level(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
crtc_state->min_voltage_level = tgl_ddi_min_voltage_level(crtc_state);
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
crtc_state->min_voltage_level = jsl_ddi_min_voltage_level(crtc_state);
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
}
-static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
+static enum transcoder bdw_transcoder_master_readout(struct intel_display *display,
enum transcoder cpu_transcoder)
{
u32 master_select;
- if (DISPLAY_VER(dev_priv) >= 11) {
- u32 ctl2 = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, cpu_transcoder));
+ if (DISPLAY_VER(display) >= 11) {
+ u32 ctl2 = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL2(display, cpu_transcoder));
if ((ctl2 & PORT_SYNC_MODE_ENABLE) == 0)
return INVALID_TRANSCODER;
master_select = REG_FIELD_GET(PORT_SYNC_MODE_MASTER_SELECT_MASK, ctl2);
} else {
- u32 ctl = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ u32 ctl = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
if ((ctl & TRANS_DDI_PORT_SYNC_ENABLE) == 0)
return INVALID_TRANSCODER;
@@ -3936,15 +3916,14 @@ static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *de
static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 transcoders = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum transcoder cpu_transcoder;
crtc_state->master_transcoder =
- bdw_transcoder_master_readout(dev_priv, crtc_state->cpu_transcoder);
+ bdw_transcoder_master_readout(display, crtc_state->cpu_transcoder);
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ for_each_cpu_transcoder_masked(display, cpu_transcoder, transcoders) {
enum intel_display_power_domain power_domain;
intel_wakeref_t trans_wakeref;
@@ -3955,14 +3934,14 @@ static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
if (!trans_wakeref)
continue;
- if (bdw_transcoder_master_readout(dev_priv, cpu_transcoder) ==
+ if (bdw_transcoder_master_readout(display, cpu_transcoder) ==
crtc_state->cpu_transcoder)
crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
intel_display_power_put(display, power_domain, trans_wakeref);
}
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
crtc_state->master_transcoder != INVALID_TRANSCODER &&
crtc_state->sync_mode_slaves_mask);
}
@@ -4085,11 +4064,10 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 ddi_func_ctl, ddi_mode, flags = 0;
- ddi_func_ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ ddi_func_ctl = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
if (ddi_func_ctl & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -4131,13 +4109,13 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST) {
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
/*
* If this is true, we know we're being called from mst stream
* encoder's ->get_config().
*/
- if (intel_dp_mst_encoder_active_links(dig_port))
+ if (intel_dp_mst_active_streams(intel_dp))
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
else
intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
@@ -4152,11 +4130,11 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
static void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
/* XXX: DSI transcoder paranoia */
- if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
+ if (drm_WARN_ON(display->drm, transcoder_is_dsi(cpu_transcoder)))
return;
intel_ddi_read_func_ctl(encoder, pipe_config);
@@ -4164,14 +4142,14 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_mso_get_config(encoder, pipe_config);
pipe_config->has_audio =
- intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
+ intel_ddi_is_audio_enabled(display, cpu_transcoder);
if (encoder->type == INTEL_OUTPUT_EDP)
intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
ddi_dotclock_get(pipe_config);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_get_lane_lat_optim_mask(encoder);
@@ -4192,7 +4170,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
HDMI_INFOFRAME_TYPE_DRM,
&pipe_config->infoframes.drm);
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
bdw_get_trans_port_sync_config(pipe_config);
intel_psr_get_config(encoder, pipe_config);
@@ -4285,10 +4263,10 @@ static enum icl_port_dpll_id
icl_ddi_tc_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- if (drm_WARN_ON(&i915->drm, !pll))
+ if (drm_WARN_ON(display->drm, !pll))
return ICL_PORT_DPLL_DEFAULT;
if (icl_ddi_tc_pll_is_tbt(pll))
@@ -4382,11 +4360,11 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
bool fastset = true;
if (intel_encoder_is_tc(encoder)) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
fastset = false;
@@ -4421,12 +4399,12 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int ret;
- if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
+ if (HAS_TRANSCODER(display, TRANSCODER_EDP) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
@@ -4441,13 +4419,13 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
+ if (display->platform.haswell && crtc->pipe == PIPE_A &&
pipe_config->cpu_transcoder == TRANSCODER_EDP)
pipe_config->pch_pfit.force_thru =
pipe_config->pch_pfit.enabled ||
pipe_config->crc_enabled;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
@@ -4498,9 +4476,9 @@ static u8
intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
int tile_group_id)
{
+ struct intel_display *display = to_intel_display(ref_crtc_state);
struct drm_connector *connector;
const struct drm_connector_state *conn_state;
- struct drm_i915_private *dev_priv = to_i915(ref_crtc_state->uapi.crtc->dev);
struct intel_atomic_state *state =
to_intel_atomic_state(ref_crtc_state->uapi.state);
u8 transcoders = 0;
@@ -4510,7 +4488,7 @@ intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
*/
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return 0;
if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
@@ -4542,11 +4520,11 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = conn_state->connector;
u8 port_sync_transcoders = 0;
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n",
encoder->base.base.id, encoder->base.name,
crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
@@ -4618,7 +4596,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_connector *connector;
enum port port = dig_port->base.port;
@@ -4627,7 +4605,7 @@ static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
return -ENOMEM;
dig_port->dp.output_reg = DDI_BUF_CTL(port);
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
dig_port->dp.prepare_link_retrain = mtl_ddi_prepare_link_retrain;
else
dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain;
@@ -4643,15 +4621,14 @@ static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
}
if (dig_port->base.type == INTEL_OUTPUT_EDP) {
- struct drm_device *dev = dig_port->base.base.dev;
struct drm_privacy_screen *privacy_screen;
- privacy_screen = drm_privacy_screen_get(dev->dev, NULL);
+ privacy_screen = drm_privacy_screen_get(display->drm->dev, NULL);
if (!IS_ERR(privacy_screen)) {
drm_connector_attach_privacy_screen_provider(&connector->base,
privacy_screen);
} else if (PTR_ERR(privacy_screen) != -ENODEV) {
- drm_warn(dev, "Error getting privacy-screen\n");
+ drm_warn(display->drm, "Error getting privacy-screen\n");
}
}
@@ -4662,7 +4639,6 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
struct intel_connector *connector = hdmi->attached_connector;
struct i2c_adapter *ddc = connector->base.ddc;
@@ -4675,7 +4651,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
if (connector->base.status != connector_status_connected)
return 0;
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex,
ctx);
if (ret)
return ret;
@@ -4692,7 +4668,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
crtc_state = to_intel_crtc_state(crtc->base.state);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
if (!crtc_state->hw.active)
@@ -4708,7 +4684,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
ret = drm_scdc_readb(ddc, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
- drm_err(&dev_priv->drm, "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
+ drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to read TMDS config: %d\n",
connector->base.base.id, connector->base.name, ret);
return 0;
}
@@ -4733,11 +4709,11 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
static void intel_ddi_link_check(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/* TODO: Move checking the HDMI link state here as well. */
- drm_WARN_ON(&i915->drm, !dig_port->dp.attached_connector);
+ drm_WARN_ON(display->drm, !dig_port->dp.attached_connector);
intel_dp_link_check(encoder);
}
@@ -4800,26 +4776,26 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
static bool lpt_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.pch_hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, SDEISR) & bit;
+ return intel_de_read(display, SDEISR) & bit;
}
static bool hsw_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, DEISR) & bit;
+ return intel_de_read(display, DEISR) & bit;
}
static bool bdw_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
+ return intel_de_read(display, GEN8_DE_PORT_ISR) & bit;
}
static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
@@ -4848,7 +4824,7 @@ static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
if (dig_port->base.port != PORT_A)
return false;
@@ -4859,7 +4835,7 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
/* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
* supported configuration
*/
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
return true;
return false;
@@ -4868,15 +4844,15 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
static int
intel_ddi_max_lanes(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
enum port port = dig_port->base.port;
int max_lanes = 4;
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return max_lanes;
if (port == PORT_A || port == PORT_E) {
- if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
max_lanes = port == PORT_A ? 4 : 0;
else
/* Both A and E share 2 lanes */
@@ -4889,7 +4865,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
* so we use the proper lane count for our calculations.
*/
if (intel_ddi_a_force_4_lanes(dig_port)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Forcing DDI_A_4_LANES for port A\n");
dig_port->ddi_a_4_lanes = true;
max_lanes = 4;
@@ -4898,8 +4874,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
return max_lanes;
}
-static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin xelpd_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_D_XELPD)
return HPD_PORT_D + port - PORT_D_XELPD;
@@ -4909,8 +4884,7 @@ static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin dg1_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_TC1)
return HPD_PORT_C + port - PORT_TC1;
@@ -4918,8 +4892,7 @@ static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin tgl_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_TC1)
return HPD_PORT_TC1 + port - PORT_TC1;
@@ -4927,11 +4900,10 @@ static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin rkl_hpd_pin(struct intel_display *display, enum port port)
{
- if (HAS_PCH_TGP(dev_priv))
- return tgl_hpd_pin(dev_priv, port);
+ if (HAS_PCH_TGP(display))
+ return tgl_hpd_pin(display, port);
if (port >= PORT_TC1)
return HPD_PORT_C + port - PORT_TC1;
@@ -4939,8 +4911,7 @@ static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin icl_hpd_pin(struct intel_display *display, enum port port)
{
if (port >= PORT_C)
return HPD_PORT_TC1 + port - PORT_C;
@@ -4948,31 +4919,30 @@ static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv,
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin ehl_hpd_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static enum hpd_pin ehl_hpd_pin(struct intel_display *display, enum port port)
{
if (port == PORT_D)
return HPD_PORT_A;
- if (HAS_PCH_TGP(dev_priv))
- return icl_hpd_pin(dev_priv, port);
+ if (HAS_PCH_TGP(display))
+ return icl_hpd_pin(display, port);
return HPD_PORT_A + port - PORT_A;
}
-static enum hpd_pin skl_hpd_pin(struct drm_i915_private *dev_priv, enum port port)
+static enum hpd_pin skl_hpd_pin(struct intel_display *display, enum port port)
{
- if (HAS_PCH_TGP(dev_priv))
- return icl_hpd_pin(dev_priv, port);
+ if (HAS_PCH_TGP(display))
+ return icl_hpd_pin(display, port);
return HPD_PORT_A + port - PORT_A;
}
-static bool intel_ddi_is_tc(struct drm_i915_private *i915, enum port port)
+static bool intel_ddi_is_tc(struct intel_display *display, enum port port)
{
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return port >= PORT_TC1;
- else if (DISPLAY_VER(i915) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
return port >= PORT_C;
else
return false;
@@ -5015,21 +4985,21 @@ static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder
#define port_tc_name(port) ((port) - PORT_TC1 + '1')
#define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1')
-static bool port_strap_detected(struct drm_i915_private *i915, enum port port)
+static bool port_strap_detected(struct intel_display *display, enum port port)
{
/* straps not used on skl+ */
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return true;
switch (port) {
case PORT_A:
- return intel_de_read(i915, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
+ return intel_de_read(display, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
case PORT_B:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDIB_DETECTED;
case PORT_C:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDIC_DETECTED;
case PORT_D:
- return intel_de_read(i915, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED;
+ return intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_DDID_DETECTED;
case PORT_E:
return true; /* no strap for DDI-E */
default:
@@ -5043,18 +5013,18 @@ static bool need_aux_ch(struct intel_encoder *encoder, bool init_dp)
return init_dp || intel_encoder_is_tc(encoder);
}
-static bool assert_has_icl_dsi(struct drm_i915_private *i915)
+static bool assert_has_icl_dsi(struct intel_display *display)
{
- return !drm_WARN(&i915->drm, !IS_ALDERLAKE_P(i915) &&
- !IS_TIGERLAKE(i915) && DISPLAY_VER(i915) != 11,
+ return !drm_WARN(display->drm, !display->platform.alderlake_p &&
+ !display->platform.tigerlake && DISPLAY_VER(display) != 11,
"Platform does not support DSI\n");
}
-static bool port_in_use(struct drm_i915_private *i915, enum port port)
+static bool port_in_use(struct intel_display *display, enum port port)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
/* FIXME what about second port for dual link DSI? */
if (encoder->port == port)
return true;
@@ -5066,7 +5036,6 @@ static bool port_in_use(struct drm_i915_private *i915, enum port port)
void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port;
struct intel_encoder *encoder;
bool init_hdmi, init_dp;
@@ -5078,8 +5047,8 @@ void intel_ddi_init(struct intel_display *display,
if (port == PORT_NONE)
return;
- if (!port_strap_detected(dev_priv, port)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!port_strap_detected(display, port)) {
+ drm_dbg_kms(display->drm,
"Port %c strap not detected\n", port_name(port));
return;
}
@@ -5087,15 +5056,15 @@ void intel_ddi_init(struct intel_display *display,
if (!assert_port_valid(display, port))
return;
- if (port_in_use(dev_priv, port)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (port_in_use(display, port)) {
+ drm_dbg_kms(display->drm,
"Port %c already claimed\n", port_name(port));
return;
}
if (intel_bios_encoder_supports_dsi(devdata)) {
/* BXT/GLK handled elsewhere, for now at least */
- if (!assert_has_icl_dsi(dev_priv))
+ if (!assert_has_icl_dsi(display))
return;
icl_dsi_init(display, devdata);
@@ -5111,7 +5080,7 @@ void intel_ddi_init(struct intel_display *display,
* outputs.
*/
if (intel_hti_uses_phy(display, phy)) {
- drm_dbg_kms(&dev_priv->drm, "PORT %c / PHY %c reserved by HTI\n",
+ drm_dbg_kms(display->drm, "PORT %c / PHY %c reserved by HTI\n",
port_name(port), phy_name(phy));
return;
}
@@ -5128,20 +5097,20 @@ void intel_ddi_init(struct intel_display *display,
*/
init_dp = true;
init_hdmi = false;
- drm_dbg_kms(&dev_priv->drm, "VBT says port %c has lspcon\n",
+ drm_dbg_kms(display->drm, "VBT says port %c has lspcon\n",
port_name(port));
}
if (!init_dp && !init_hdmi) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
port_name(port));
return;
}
if (intel_phy_is_snps(display, phy) &&
- dev_priv->display.snps.phy_failed_calibration & BIT(phy)) {
- drm_dbg_kms(&dev_priv->drm,
+ display->snps.phy_failed_calibration & BIT(phy)) {
+ drm_dbg_kms(display->drm,
"SNPS PHY %c failed to calibrate, proceeding anyway\n",
phy_name(phy));
}
@@ -5155,26 +5124,26 @@ void intel_ddi_init(struct intel_display *display,
encoder = &dig_port->base;
encoder->devdata = devdata;
- if (DISPLAY_VER(dev_priv) >= 13 && port >= PORT_D_XELPD) {
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) {
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %c/PHY %c",
port_name(port - PORT_D_XELPD + PORT_D),
phy_name(phy));
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
enum tc_port tc_port = intel_port_to_tc(display, port);
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %s%c/PHY %s%c",
port >= PORT_TC1 ? "TC" : "",
port >= PORT_TC1 ? port_tc_name(port) : port_name(port),
tc_port != TC_PORT_NONE ? "TC" : "",
tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
enum tc_port tc_port = intel_port_to_tc(display, port);
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %c%s/PHY %s%c",
port_name(port),
@@ -5182,7 +5151,7 @@ void intel_ddi_init(struct intel_display *display,
tc_port != TC_PORT_NONE ? "TC" : "",
tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
} else {
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS,
"DDI %c/PHY %c", port_name(port), phy_name(phy));
}
@@ -5218,32 +5187,32 @@ void intel_ddi_init(struct intel_display *display,
encoder->cloneable = 0;
encoder->pipe_mask = ~0;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
encoder->enable_clock = intel_mtl_pll_enable;
encoder->disable_clock = intel_mtl_pll_disable;
encoder->port_pll_type = intel_mtl_port_pll_type;
encoder->get_config = mtl_ddi_get_config;
- } else if (IS_DG2(dev_priv)) {
+ } else if (display->platform.dg2) {
encoder->enable_clock = intel_mpllb_enable;
encoder->disable_clock = intel_mpllb_disable;
encoder->get_config = dg2_ddi_get_config;
- } else if (IS_ALDERLAKE_S(dev_priv)) {
+ } else if (display->platform.alderlake_s) {
encoder->enable_clock = adls_ddi_enable_clock;
encoder->disable_clock = adls_ddi_disable_clock;
encoder->is_clock_enabled = adls_ddi_is_clock_enabled;
encoder->get_config = adls_ddi_get_config;
- } else if (IS_ROCKETLAKE(dev_priv)) {
+ } else if (display->platform.rocketlake) {
encoder->enable_clock = rkl_ddi_enable_clock;
encoder->disable_clock = rkl_ddi_disable_clock;
encoder->is_clock_enabled = rkl_ddi_is_clock_enabled;
encoder->get_config = rkl_ddi_get_config;
- } else if (IS_DG1(dev_priv)) {
+ } else if (display->platform.dg1) {
encoder->enable_clock = dg1_ddi_enable_clock;
encoder->disable_clock = dg1_ddi_disable_clock;
encoder->is_clock_enabled = dg1_ddi_is_clock_enabled;
encoder->get_config = dg1_ddi_get_config;
- } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
- if (intel_ddi_is_tc(dev_priv, port)) {
+ } else if (display->platform.jasperlake || display->platform.elkhartlake) {
+ if (intel_ddi_is_tc(display, port)) {
encoder->enable_clock = jsl_ddi_tc_enable_clock;
encoder->disable_clock = jsl_ddi_tc_disable_clock;
encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled;
@@ -5255,8 +5224,8 @@ void intel_ddi_init(struct intel_display *display,
encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
encoder->get_config = icl_ddi_combo_get_config;
}
- } else if (DISPLAY_VER(dev_priv) >= 11) {
- if (intel_ddi_is_tc(dev_priv, port)) {
+ } else if (DISPLAY_VER(display) >= 11) {
+ if (intel_ddi_is_tc(display, port)) {
encoder->enable_clock = icl_ddi_tc_enable_clock;
encoder->disable_clock = icl_ddi_tc_disable_clock;
encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled;
@@ -5268,36 +5237,36 @@ void intel_ddi_init(struct intel_display *display,
encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
encoder->get_config = icl_ddi_combo_get_config;
}
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
/* BXT/GLK have fixed PLL->port mapping */
encoder->get_config = bxt_ddi_get_config;
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
encoder->enable_clock = skl_ddi_enable_clock;
encoder->disable_clock = skl_ddi_disable_clock;
encoder->is_clock_enabled = skl_ddi_is_clock_enabled;
encoder->get_config = skl_ddi_get_config;
- } else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ } else if (display->platform.broadwell || display->platform.haswell) {
encoder->enable_clock = hsw_ddi_enable_clock;
encoder->disable_clock = hsw_ddi_disable_clock;
encoder->is_clock_enabled = hsw_ddi_is_clock_enabled;
encoder->get_config = hsw_ddi_get_config;
}
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
encoder->set_signal_levels = intel_cx0_phy_set_signal_levels;
- } else if (IS_DG2(dev_priv)) {
+ } else if (display->platform.dg2) {
encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
if (intel_encoder_is_combo(encoder))
encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
else
encoder->set_signal_levels = icl_mg_phy_set_signal_levels;
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
encoder->set_signal_levels = bxt_dpio_phy_set_signal_levels;
} else {
encoder->set_signal_levels = hsw_set_signal_levels;
@@ -5305,29 +5274,29 @@ void intel_ddi_init(struct intel_display *display,
intel_ddi_buf_trans_init(encoder);
- if (DISPLAY_VER(dev_priv) >= 13)
- encoder->hpd_pin = xelpd_hpd_pin(dev_priv, port);
- else if (IS_DG1(dev_priv))
- encoder->hpd_pin = dg1_hpd_pin(dev_priv, port);
- else if (IS_ROCKETLAKE(dev_priv))
- encoder->hpd_pin = rkl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) >= 12)
- encoder->hpd_pin = tgl_hpd_pin(dev_priv, port);
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
- encoder->hpd_pin = ehl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) == 11)
- encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
- else if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- encoder->hpd_pin = skl_hpd_pin(dev_priv, port);
+ if (DISPLAY_VER(display) >= 13)
+ encoder->hpd_pin = xelpd_hpd_pin(display, port);
+ else if (display->platform.dg1)
+ encoder->hpd_pin = dg1_hpd_pin(display, port);
+ else if (display->platform.rocketlake)
+ encoder->hpd_pin = rkl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) >= 12)
+ encoder->hpd_pin = tgl_hpd_pin(display, port);
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
+ encoder->hpd_pin = ehl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) == 11)
+ encoder->hpd_pin = icl_hpd_pin(display, port);
+ else if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
+ encoder->hpd_pin = skl_hpd_pin(display, port);
else
encoder->hpd_pin = intel_hpd_pin_default(port);
- ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ ddi_buf_ctl = intel_de_read(display, DDI_BUF_CTL(port));
dig_port->lane_reversal = intel_bios_encoder_lane_reversal(devdata) ||
ddi_buf_ctl & DDI_BUF_PORT_REVERSAL;
- dig_port->ddi_a_4_lanes = DISPLAY_VER(dev_priv) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
+ dig_port->ddi_a_4_lanes = DISPLAY_VER(display) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
@@ -5346,7 +5315,7 @@ void intel_ddi_init(struct intel_display *display,
if (!is_legacy && init_hdmi) {
is_legacy = !init_dp;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"VBT says port %c is non-legacy TC and has HDMI (with DP: %s), assume it's %s\n",
port_name(port),
str_yes_no(init_dp),
@@ -5363,24 +5332,24 @@ void intel_ddi_init(struct intel_display *display,
goto err;
}
- drm_WARN_ON(&dev_priv->drm, port > PORT_I);
+ drm_WARN_ON(display->drm, port > PORT_I);
dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(display, port);
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
if (intel_encoder_is_tc(encoder))
dig_port->connected = intel_tc_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
dig_port->connected = bdw_digital_port_connected;
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_BROADWELL(dev_priv)) {
+ } else if (display->platform.broadwell) {
if (port == PORT_A)
dig_port->connected = bdw_digital_port_connected;
else
dig_port->connected = lpt_digital_port_connected;
- } else if (IS_HASWELL(dev_priv)) {
+ } else if (display->platform.haswell) {
if (port == PORT_A)
dig_port->connected = hsw_digital_port_connected;
else
@@ -5396,7 +5365,7 @@ void intel_ddi_init(struct intel_display *display,
dig_port->hpd_pulse = intel_dp_hpd_pulse;
if (dig_port->dp.mso_link_count)
- encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
+ encoder->pipe_mask = intel_ddi_splitter_pipe_mask(display);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index b7399e9d11cc..54ce3e4f8fd9 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -6,7 +6,6 @@
#ifndef __INTEL_DE_H__
#define __INTEL_DE_H__
-#include "intel_display_conversion.h"
#include "intel_display_core.h"
#include "intel_dmc_wl.h"
#include "intel_dsb.h"
@@ -19,7 +18,7 @@ static inline struct intel_uncore *__to_uncore(struct intel_display *display)
}
static inline u32
-__intel_de_read(struct intel_display *display, i915_reg_t reg)
+intel_de_read(struct intel_display *display, i915_reg_t reg)
{
u32 val;
@@ -31,7 +30,6 @@ __intel_de_read(struct intel_display *display, i915_reg_t reg)
return val;
}
-#define intel_de_read(p,...) __intel_de_read(__to_intel_display(p), __VA_ARGS__)
static inline u8
intel_de_read8(struct intel_display *display, i915_reg_t reg)
@@ -66,7 +64,7 @@ intel_de_read64_2x32(struct intel_display *display,
}
static inline void
-__intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
+intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
{
intel_dmc_wl_get(display, reg);
@@ -74,10 +72,9 @@ __intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
intel_dmc_wl_put(display, reg);
}
-#define intel_de_posting_read(p,...) __intel_de_posting_read(__to_intel_display(p), __VA_ARGS__)
static inline void
-__intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
+intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
{
intel_dmc_wl_get(display, reg);
@@ -85,7 +82,6 @@ __intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
intel_dmc_wl_put(display, reg);
}
-#define intel_de_write(p,...) __intel_de_write(__to_intel_display(p), __VA_ARGS__)
static inline u32
__intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg,
@@ -95,8 +91,7 @@ __intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg,
}
static inline u32
-__intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear,
- u32 set)
+intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set)
{
u32 val;
@@ -108,7 +103,6 @@ __intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear,
return val;
}
-#define intel_de_rmw(p,...) __intel_de_rmw(__to_intel_display(p), __VA_ARGS__)
static inline int
__intel_de_wait_for_register_nowl(struct intel_display *display,
@@ -181,20 +175,18 @@ intel_de_wait_custom(struct intel_display *display, i915_reg_t reg,
}
static inline int
-__intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
{
return intel_de_wait(display, reg, mask, mask, timeout);
}
-#define intel_de_wait_for_set(p,...) __intel_de_wait_for_set(__to_intel_display(p), __VA_ARGS__)
static inline int
-__intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout)
+intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
{
return intel_de_wait(display, reg, mask, 0, timeout);
}
-#define intel_de_wait_for_clear(p,...) __intel_de_wait_for_clear(__to_intel_display(p), __VA_ARGS__)
/*
* Unlocked mmio-accessors, think carefully before using these.
@@ -205,7 +197,7 @@ __intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
* a more localised lock guarding all access to that bank of registers.
*/
static inline u32
-__intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
+intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
{
u32 val;
@@ -214,15 +206,13 @@ __intel_de_read_fw(struct intel_display *display, i915_reg_t reg)
return val;
}
-#define intel_de_read_fw(p,...) __intel_de_read_fw(__to_intel_display(p), __VA_ARGS__)
static inline void
-__intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
+intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
{
trace_i915_reg_rw(true, reg, val, sizeof(val), true);
intel_uncore_write_fw(__to_uncore(display), reg, val);
}
-#define intel_de_write_fw(p,...) __intel_de_write_fw(__to_intel_display(p), __VA_ARGS__)
static inline u32
intel_de_read_notrace(struct intel_display *display, i915_reg_t reg)
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 3b509c70fb58..6f0a0bc71b06 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -55,6 +55,7 @@
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "i9xx_wm.h"
+#include "intel_alpm.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_audio.h"
@@ -73,6 +74,7 @@
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_power.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
@@ -663,7 +665,6 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
@@ -696,7 +697,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
* wait-for-vblank between disabling the plane and the pipe.
*/
if (HAS_GMCH(display) &&
- intel_set_memory_cxsr(dev_priv, false))
+ intel_set_memory_cxsr(display, false))
intel_plane_initial_vblank_wait(crtc);
/*
@@ -1043,19 +1044,16 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
- intel_psr_post_plane_update(state, crtc);
-
- intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
+ intel_frontbuffer_flip(display, new_crtc_state->fb_bits);
if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
intel_fbc_post_update(state, crtc);
@@ -1080,6 +1078,10 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (audio_enabling(old_crtc_state, new_crtc_state))
intel_encoders_audio_enable(state, crtc);
+
+ intel_alpm_post_plane_update(state, crtc);
+
+ intel_psr_post_plane_update(state, crtc);
}
static void intel_post_plane_update_after_readout(struct intel_atomic_state *state,
@@ -1168,13 +1170,15 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
+ intel_alpm_pre_plane_update(state, crtc);
+ intel_psr_pre_plane_update(state, crtc);
+
if (intel_crtc_vrr_disabling(state, crtc)) {
intel_vrr_disable(old_crtc_state);
intel_crtc_update_active_timings(old_crtc_state, false);
@@ -1185,8 +1189,6 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_drrs_deactivate(old_crtc_state);
- intel_psr_pre_plane_update(state, crtc);
-
if (hsw_ips_pre_update(state, crtc))
intel_crtc_wait_for_next_vblank(crtc);
@@ -1222,7 +1224,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* wait-for-vblank between disabling the plane and the pipe.
*/
if (HAS_GMCH(display) && old_crtc_state->hw.active &&
- new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
+ new_crtc_state->disable_cxsr && intel_set_memory_cxsr(display, false))
intel_crtc_wait_for_next_vblank(crtc);
/*
@@ -1233,7 +1235,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* WaCxSRDisabledForSpriteScaling:ivb
*/
if (!HAS_GMCH(display) && old_crtc_state->hw.active &&
- new_crtc_state->disable_cxsr && ilk_disable_cxsr(dev_priv))
+ new_crtc_state->disable_cxsr && ilk_disable_cxsr(display))
intel_crtc_wait_for_next_vblank(crtc);
/*
@@ -1257,7 +1259,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
*/
if (!intel_initial_watermarks(state, crtc))
if (new_crtc_state->update_wm_pre)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
}
/*
@@ -1282,7 +1284,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
static void intel_crtc_disable_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
unsigned int update_mask = new_crtc_state->update_planes;
@@ -1304,7 +1306,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
fb_bits |= plane->frontbuffer_bit;
}
- intel_frontbuffer_flip(dev_priv, fb_bits);
+ intel_frontbuffer_flip(display, fb_bits);
}
static void intel_encoders_update_prepare(struct intel_atomic_state *state)
@@ -1511,7 +1513,6 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (drm_WARN_ON(display->drm, crtc->active))
@@ -1563,7 +1564,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_encoders_enable(state, crtc);
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
intel_wait_for_pipe_scanline_moving(crtc);
/*
@@ -1662,13 +1663,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_encoders_pre_pll_enable(state, crtc);
- for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
- const struct intel_crtc_state *pipe_crtc_state =
- intel_atomic_get_new_crtc_state(state, pipe_crtc);
-
- if (pipe_crtc_state->shared_dpll)
- intel_enable_shared_dpll(pipe_crtc_state);
- }
+ if (new_crtc_state->shared_dpll)
+ intel_enable_shared_dpll(new_crtc_state);
intel_encoders_pre_enable(state, crtc);
@@ -1779,8 +1775,6 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
intel_set_cpu_fifo_underrun_reporting(display, pipe, true);
intel_set_pch_fifo_underrun_reporting(display, pipe, true);
-
- intel_disable_shared_dpll(old_crtc_state);
}
static void hsw_crtc_disable(struct intel_atomic_state *state,
@@ -1799,12 +1793,7 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
intel_encoders_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
- for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
- const struct intel_crtc_state *old_pipe_crtc_state =
- intel_atomic_get_old_crtc_state(state, pipe_crtc);
-
- intel_disable_shared_dpll(old_pipe_crtc_state);
- }
+ intel_disable_shared_dpll(old_crtc_state);
intel_encoders_post_pll_disable(state, crtc);
@@ -2083,7 +2072,6 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (drm_WARN_ON(display->drm, crtc->active))
@@ -2107,7 +2095,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
intel_color_modeset(new_crtc_state);
if (!intel_initial_watermarks(state, crtc))
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
intel_enable_transcoder(new_crtc_state);
intel_crtc_vblank_on(new_crtc_state);
@@ -2123,7 +2111,6 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
@@ -2147,9 +2134,9 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
if (display->platform.cherryview)
- chv_disable_pll(dev_priv, pipe);
+ chv_disable_pll(display, pipe);
else if (display->platform.valleyview)
- vlv_disable_pll(dev_priv, pipe);
+ vlv_disable_pll(display, pipe);
else
i9xx_disable_pll(old_crtc_state);
}
@@ -2160,7 +2147,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
intel_set_cpu_fifo_underrun_reporting(display, pipe, false);
if (!display->funcs.wm->initial_watermarks)
- intel_update_watermarks(dev_priv);
+ intel_update_watermarks(display);
/* clock the pipe down to 640x480@60 to potentially save power */
if (display->platform.i830)
@@ -2343,7 +2330,6 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
intel_joiner_compute_pipe_src(crtc_state);
@@ -2362,7 +2348,7 @@ static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
}
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_is_dual_link_lvds(i915)) {
+ intel_is_dual_link_lvds(display)) {
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
crtc->base.base.id, crtc->base.name);
@@ -2420,14 +2406,6 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
return 0;
}
-static bool intel_crtc_needs_wa_14015401596(const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
-
- return intel_vrr_possible(crtc_state) && crtc_state->has_psr &&
- IS_DISPLAY_VER(display, 13, 14);
-}
-
static int intel_crtc_vblank_delay(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -2436,9 +2414,7 @@ static int intel_crtc_vblank_delay(const struct intel_crtc_state *crtc_state)
if (!HAS_DSB(display))
return 0;
- /* Wa_14015401596 */
- if (intel_crtc_needs_wa_14015401596(crtc_state))
- vblank_delay = max(vblank_delay, 1);
+ vblank_delay = max(vblank_delay, intel_psr_min_vblank_delay(crtc_state));
return vblank_delay;
}
@@ -2550,15 +2526,13 @@ intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
void intel_panel_sanitize_ssc(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
/*
* There may be no VBT; and if the BIOS enabled SSC we can
* just keep using it to avoid unnecessary flicker. Whereas if the
* BIOS isn't using it, don't assume it will work even if the VBT
* indicates as much.
*/
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display)) {
bool bios_lvds_use_ssc = intel_de_read(display,
PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE;
@@ -2639,6 +2613,15 @@ void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
PIPE_LINK_N2(display, transcoder));
}
+static bool
+transcoder_has_vrr(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ return HAS_VRR(display) && !transcoder_is_dsi(cpu_transcoder);
+}
+
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -2703,6 +2686,15 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
+ /*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not required. Since the support for these bits is going to
+ * be deprecated in upcoming platforms, avoid writing these bits for the
+ * platforms that do not use legacy Timing Generator.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_vtotal = 1;
+
intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
@@ -2722,6 +2714,19 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
intel_de_write(display, TRANS_VTOTAL(display, pipe),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
+
+ if (DISPLAY_VER(display) >= 30) {
+ /*
+ * Address issues for resolutions with high refresh rate that
+ * have small Hblank, specifically where Hblank is smaller than
+ * one MTP. Simulations indicate this will address the
+ * jitter issues that currently causes BS to be immediately
+ * followed by BE which DPRX devices are unable to handle.
+ * https://groups.vesa.org/wg/DP/document/20494
+ */
+ intel_de_write(display, DP_MIN_HBLANK_CTL(cpu_transcoder),
+ crtc_state->min_hblank);
+ }
}
static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state)
@@ -2764,12 +2769,24 @@ static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc
VBLANK_START(crtc_vblank_start - 1) |
VBLANK_END(crtc_vblank_end - 1));
/*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not required. Since the support for these bits is going to
+ * be deprecated in upcoming platforms, avoid writing these bits for the
+ * platforms that do not use legacy Timing Generator.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_vtotal = 1;
+
+ /*
* The double buffer latch point for TRANS_VTOTAL
* is the transcoder's undelayed vblank.
*/
intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(crtc_vdisplay - 1) |
VTOTAL(crtc_vtotal - 1));
+
+ intel_vrr_set_fixed_rr_timings(crtc_state);
+ intel_vrr_transcoder_enable(crtc_state);
}
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
@@ -2853,6 +2870,10 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc,
adjusted_mode->crtc_vdisplay +
intel_de_read(display,
TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder));
+
+ if (DISPLAY_VER(display) >= 30)
+ pipe_config->min_hblank = intel_de_read(display,
+ DP_MIN_HBLANK_CTL(cpu_transcoder));
}
static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
@@ -3835,7 +3856,6 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_display_power_domain_set *power_domain_set)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder;
enum port port;
u32 tmp;
@@ -3857,7 +3877,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
* registers/MIPI[BXT]. We can break out here early, since we
* need the same DSI PLL to be enabled for both DSI ports.
*/
- if (!bxt_dsi_pll_is_enabled(dev_priv))
+ if (!bxt_dsi_pll_is_enabled(display))
break;
/* XXX: this works for video mode only */
@@ -3920,7 +3940,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
DISPLAY_VER(display) >= 11)
intel_get_transcoder_timings(crtc, pipe_config);
- if (HAS_VRR(display) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ if (transcoder_has_vrr(pipe_config))
intel_vrr_get_config(pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
@@ -4147,8 +4167,6 @@ static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
int linetime_wm;
@@ -4161,7 +4179,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
/* Display WA #1135: BXT:ALL GLK:ALL */
if ((display->platform.geminilake || display->platform.broxton) &&
- skl_watermark_ipc_enabled(dev_priv))
+ skl_watermark_ipc_enabled(display))
linetime_wm /= 2;
return min(linetime_wm, 0x1ff);
@@ -5206,6 +5224,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
+ PIPE_CONF_CHECK_I(min_hblank);
+
if (HAS_DOUBLE_BUFFERED_M_N(display)) {
if (!fastset || !pipe_config->update_m_n)
PIPE_CONF_CHECK_M_N(dp_m_n);
@@ -5387,8 +5407,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(vrr.vmin);
PIPE_CONF_CHECK_I(vrr.vmax);
PIPE_CONF_CHECK_I(vrr.flipline);
- PIPE_CONF_CHECK_I(vrr.pipeline_full);
- PIPE_CONF_CHECK_I(vrr.guardband);
PIPE_CONF_CHECK_I(vrr.vsync_start);
PIPE_CONF_CHECK_I(vrr.vsync_end);
PIPE_CONF_CHECK_LLI(cmrr.cmrr_m);
@@ -5396,6 +5414,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(cmrr.enable);
}
+ if (!fastset || intel_vrr_always_use_vrr_tg(display)) {
+ PIPE_CONF_CHECK_I(vrr.pipeline_full);
+ PIPE_CONF_CHECK_I(vrr.guardband);
+ }
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_LLI
@@ -6007,22 +6030,16 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (!plane->async_flip)
continue;
- if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->modifier)) {
+ if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->format->format,
+ new_plane_state->hw.fb->modifier)) {
drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n",
+ "[PLANE:%d:%s] pixel format %p4cc / modifier 0x%llx does not support async flip\n",
plane->base.base.id, plane->base.name,
+ &new_plane_state->hw.fb->format->format,
new_plane_state->hw.fb->modifier);
return -EINVAL;
}
- if (intel_format_info_is_yuv_semiplanar(new_plane_state->hw.fb->format,
- new_plane_state->hw.fb->modifier)) {
- drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] Planar formats do not support async flips\n",
- plane->base.base.id, plane->base.name);
- return -EINVAL;
- }
-
/*
* We turn the first async flip request into a sync flip
* so that we can reconfigure the plane (eg. change modifier).
@@ -6429,7 +6446,7 @@ int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- ret = intel_bw_atomic_check(state);
+ ret = intel_bw_atomic_check(state, any_ms);
if (ret)
goto fail;
@@ -6533,7 +6550,6 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
{
struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/*
* Update pipe size and adjust fitter if needed: the reason for this is
@@ -6549,7 +6565,7 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
if (DISPLAY_VER(display) >= 9) {
if (new_crtc_state->pch_pfit.enabled)
skl_pfit_enable(new_crtc_state);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(display)) {
if (new_crtc_state->pch_pfit.enabled)
ilk_pfit_enable(new_crtc_state);
else if (old_crtc_state->pch_pfit.enabled)
@@ -6650,6 +6666,8 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
intel_crtc_update_active_timings(pipe_crtc_state, false);
}
+ intel_psr_notify_pipe_change(state, crtc, true);
+
display->funcs.display->crtc_enable(state, crtc);
/* vblanks work again, re-enable pipe CRC. */
@@ -6769,6 +6787,8 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
intel_crtc_joined_pipe_mask(old_crtc_state))
intel_crtc_disable_pipe_crc(pipe_crtc);
+ intel_psr_notify_pipe_change(state, crtc, false);
+
display->funcs.display->crtc_disable(state, crtc);
for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
@@ -7231,7 +7251,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
@@ -7445,7 +7465,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* toggling overhead at and above 60 FPS.
*/
intel_display_power_put_async_delay(display, POWER_DOMAIN_DC_OFF, wakeref, 17);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
/*
* Defer the cleanup of the old state to a separate worker to not
@@ -7517,10 +7537,9 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
{
struct intel_display *display = to_intel_display(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
- struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ state->wakeref = intel_display_rpm_get(display);
/*
* The intel_legacy_cursor_update() fast path takes care
@@ -7554,7 +7573,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
if (ret) {
drm_dbg_atomic(display->drm,
"Preparing state failed with %i\n", ret);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
return ret;
}
@@ -7564,7 +7583,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
if (ret) {
drm_atomic_helper_unprepare_planes(dev, &state->base);
- intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ intel_display_rpm_put(display, state->wakeref);
return ret;
}
@@ -7626,15 +7645,13 @@ static bool ilk_has_edp_a(struct intel_display *display)
static bool intel_ddi_crt_present(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 9)
return false;
if (display->platform.haswell_ult || display->platform.broadwell_ult)
return false;
- if (HAS_PCH_LPT_H(dev_priv) &&
+ if (HAS_PCH_LPT_H(display) &&
intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
@@ -7656,7 +7673,6 @@ bool assert_port_valid(struct intel_display *display, enum port port)
void intel_setup_outputs(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
bool dpd_is_edp = false;
@@ -7672,8 +7688,8 @@ void intel_setup_outputs(struct intel_display *display)
intel_bios_for_each_encoder(display, intel_ddi_init);
if (display->platform.geminilake || display->platform.broxton)
- vlv_dsi_init(dev_priv);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ vlv_dsi_init(display);
+ } else if (HAS_PCH_SPLIT(display)) {
int found;
/*
@@ -7681,7 +7697,7 @@ void intel_setup_outputs(struct intel_display *display)
* to prevent the registration of both eDP and LVDS and the
* incorrect sharing of the PPS.
*/
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
dpd_is_edp = intel_dp_is_port_edp(display, PORT_D);
@@ -7756,15 +7772,15 @@ void intel_setup_outputs(struct intel_display *display)
g4x_hdmi_init(display, CHV_HDMID, PORT_D);
}
- vlv_dsi_init(dev_priv);
+ vlv_dsi_init(display);
} else if (display->platform.pineview) {
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
} else if (IS_DISPLAY_VER(display, 3, 4)) {
bool found = false;
if (display->platform.mobile)
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
@@ -7806,10 +7822,10 @@ void intel_setup_outputs(struct intel_display *display)
intel_tv_init(display);
} else if (DISPLAY_VER(display) == 2) {
if (display->platform.i85x)
- intel_lvds_init(dev_priv);
+ intel_lvds_init(display);
intel_crt_init(display);
- intel_dvo_init(dev_priv);
+ intel_dvo_init(display);
}
for_each_intel_encoder(display->drm, encoder) {
@@ -7819,7 +7835,7 @@ void intel_setup_outputs(struct intel_display *display)
intel_encoder_possible_clones(encoder);
}
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
drm_helper_move_panel_connectors_to_head(display->drm);
}
@@ -8041,13 +8057,11 @@ static const struct intel_display_funcs i9xx_display_funcs = {
*/
void intel_init_display_hooks(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 9) {
display->funcs.display = &skl_display_funcs;
} else if (HAS_DDI(display)) {
display->funcs.display = &ddi_display_funcs;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(display)) {
display->funcs.display = &pch_split_display_funcs;
} else if (display->platform.cherryview ||
display->platform.valleyview) {
@@ -8083,6 +8097,9 @@ retry:
goto out;
}
+ if (!crtc_state->hw.active)
+ crtc_state->inherited = false;
+
if (crtc_state->hw.active) {
struct intel_encoder *encoder;
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index eeb7ae3eaea8..b4937e102360 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -21,17 +21,15 @@
#include "intel_display_limits.h"
#include "intel_display_params.h"
#include "intel_display_power.h"
+#include "intel_dmc_wl.h"
#include "intel_dpll_mgr.h"
#include "intel_fbc.h"
#include "intel_global_state.h"
#include "intel_gmbus.h"
#include "intel_opregion.h"
-#include "intel_dmc_wl.h"
+#include "intel_pch.h"
#include "intel_wm_types.h"
-struct task_struct;
-
-struct drm_i915_private;
struct drm_property;
struct drm_property_blob;
struct i915_audio_component;
@@ -52,6 +50,7 @@ struct intel_hotplug_funcs;
struct intel_initial_plane_config;
struct intel_opregion;
struct intel_overlay;
+struct task_struct;
/* Amount of SAGV/QGV points, BSpec precisely defines this */
#define I915_NUM_QGV_POINTS 8
@@ -80,7 +79,7 @@ struct intel_display_funcs {
/* functions used for watermark calcs for display. */
struct intel_wm_funcs {
/* update_wm is for legacy wm management */
- void (*update_wm)(struct drm_i915_private *dev_priv);
+ void (*update_wm)(struct intel_display *display);
int (*compute_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void (*initial_watermarks)(struct intel_atomic_state *state,
@@ -90,8 +89,8 @@ struct intel_wm_funcs {
void (*optimize_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
- void (*get_hw_state)(struct drm_i915_private *i915);
- void (*sanitize)(struct drm_i915_private *i915);
+ void (*get_hw_state)(struct intel_display *display);
+ void (*sanitize)(struct intel_display *display);
};
struct intel_audio_state {
@@ -160,6 +159,7 @@ struct intel_hotplug {
struct {
unsigned long last_jiffies;
int count;
+ int blocked_count;
enum {
HPD_ENABLED = 0,
HPD_DISABLED = 1,
@@ -170,8 +170,8 @@ struct intel_hotplug {
u32 retry_bits;
struct delayed_work reenable_work;
- u32 long_port_mask;
- u32 short_port_mask;
+ u32 long_hpd_pin_mask;
+ u32 short_hpd_pin_mask;
struct work_struct dig_port_work;
struct work_struct poll_init_work;
@@ -179,7 +179,7 @@ struct intel_hotplug {
/*
* Queuing of hotplug_work, reenable_work and poll_init_work is
- * enabled. Protected by drm_i915_private::irq_lock.
+ * enabled. Protected by intel_display::irq::lock.
*/
bool detection_work_enabled;
@@ -288,6 +288,9 @@ struct intel_display {
/* Platform (and subplatform, if any) identification */
struct intel_display_platforms platform;
+ /* Intel PCH: where the south display engine lives */
+ enum intel_pch pch_type;
+
/* Display functions */
struct {
/* Top level crtc-ish functions */
@@ -425,7 +428,7 @@ struct intel_display {
* reused when sending message to gsc cs.
* this is only populated post Meteorlake
*/
- struct intel_hdcp_gsc_message *hdcp_message;
+ struct intel_hdcp_gsc_context *gsc_context;
/* Mutex to protect the above hdcp related values. */
struct mutex hdcp_mutex;
} hdcp;
@@ -453,6 +456,9 @@ struct intel_display {
} ips;
struct {
+ /* protects the irq masks */
+ spinlock_t lock;
+
/*
* Most platforms treat the display irq block as an always-on
* power domain. vlv/chv can disable it at runtime and need
@@ -465,9 +471,9 @@ struct intel_display {
/* For i915gm/i945gm vblank irq workaround */
u8 vblank_enabled;
- int vblank_wa_num_pipes;
+ int vblank_enable_count;
- struct work_struct vblank_dc_work;
+ struct work_struct vblank_notify_work;
u32 de_irq_mask[I915_MAX_PIPES];
u32 pipestat_irq_mask[I915_MAX_PIPES];
@@ -574,6 +580,8 @@ struct intel_display {
struct intel_vbt_data vbt;
struct intel_dmc_wl wl;
struct intel_wm wm;
+
+ struct work_struct psr_dc5_dc6_wa_work;
};
#endif /* __INTEL_DISPLAY_CORE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index fdedf65bee53..8d0a1779dd19 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -7,11 +7,12 @@
#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include "hsw_ips.h"
-#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "i9xx_wm_regs.h"
@@ -24,6 +25,7 @@
#include "intel_display_debugfs_params.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
@@ -54,6 +56,8 @@ static int intel_display_caps(struct seq_file *m, void *data)
struct intel_display *display = node_to_intel_display(m->private);
struct drm_printer p = drm_seq_file_printer(m);
+ drm_printf(&p, "PCH type: %d\n", INTEL_PCH_TYPE(display));
+
intel_display_device_info_print(DISPLAY_INFO(display),
DISPLAY_RUNTIME_INFO(display), &p);
intel_display_params_dump(&display->params, display->drm->driver->name, &p);
@@ -81,7 +85,6 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
static int i915_sr_status(struct seq_file *m, void *unused)
{
struct intel_display *display = node_to_intel_display(m->private);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
intel_wakeref_t wakeref;
bool sr_enabled = false;
@@ -89,7 +92,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
if (DISPLAY_VER(display) >= 9)
/* no global SR status; inspect per-plane WM */;
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
sr_enabled = intel_de_read(display, WM1_LP_ILK) & WM_LP_ENABLE;
else if (display->platform.i965gm || display->platform.g4x ||
display->platform.i945g || display->platform.i945gm)
@@ -554,6 +557,8 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
seq_printf(m, "\tpipe src=" DRM_RECT_FMT ", dither=%s, bpp=%d\n",
DRM_RECT_ARG(&crtc_state->pipe_src),
str_yes_no(crtc_state->dither), crtc_state->pipe_bpp);
+ seq_printf(m, "\tport_clock=%d, lane_count=%d\n",
+ crtc_state->port_clock, crtc_state->lane_count);
intel_scaler_info(m, crtc);
@@ -580,13 +585,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
static int i915_display_info(struct seq_file *m, void *unused)
{
struct intel_display *display = node_to_intel_display(m->private);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
drm_modeset_lock_all(display->drm);
@@ -605,18 +609,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
drm_modeset_unlock_all(display->drm);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_display_capabilities(struct seq_file *m, void *unused)
-{
- struct intel_display *display = node_to_intel_display(m->private);
- struct drm_printer p = drm_seq_file_printer(m);
-
- intel_display_device_info_print(DISPLAY_INFO(display),
- DISPLAY_RUNTIME_INFO(display), &p);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
@@ -690,14 +683,11 @@ static bool
intel_lpsp_power_well_enabled(struct intel_display *display,
enum i915_power_well_id power_well_id)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
- intel_wakeref_t wakeref;
bool is_enabled;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- is_enabled = intel_display_power_well_is_enabled(display,
- power_well_id);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ with_intel_display_rpm(display)
+ is_enabled = intel_display_power_well_is_enabled(display,
+ power_well_id);
return is_enabled;
}
@@ -820,7 +810,6 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
- {"i915_display_capabilities", i915_display_capabilities, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_ddb_info", i915_ddb_info, 0},
@@ -829,7 +818,6 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
void intel_display_debugfs_register(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_fifo_underrun_reset", 0644, minor->debugfs_root,
@@ -844,10 +832,10 @@ void intel_display_debugfs_register(struct intel_display *display)
intel_dmc_debugfs_register(display);
intel_dp_test_debugfs_register(display);
intel_fbc_debugfs_register(display);
- intel_hpd_debugfs_register(i915);
+ intel_hpd_debugfs_register(display);
intel_opregion_debugfs_register(display);
intel_psr_debugfs_register(display);
- intel_wm_debugfs_register(i915);
+ intel_wm_debugfs_register(display);
intel_display_debugfs_params(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 738ae522c8f4..90d714598664 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -3,11 +3,13 @@
* Copyright © 2023 Intel Corporation
*/
-#include <drm/intel/pciids.h>
-#include <drm/drm_color_mgmt.h>
#include <linux/pci.h>
-#include "i915_drv.h"
+#include <drm/drm_color_mgmt.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
+#include <drm/intel/pciids.h>
+
#include "i915_reg.h"
#include "intel_cx0_phy_regs.h"
#include "intel_de.h"
@@ -1711,7 +1713,6 @@ void intel_display_device_remove(struct intel_display *display)
static void __intel_display_device_info_runtime_init(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(display);
enum pipe pipe;
@@ -1775,7 +1776,7 @@ static void __intel_display_device_info_runtime_init(struct intel_display *displ
goto display_fused_off;
}
- if (IS_DISPLAY_VER(display, 7, 8) && HAS_PCH_SPLIT(i915)) {
+ if (IS_DISPLAY_VER(display, 7, 8) && HAS_PCH_SPLIT(display)) {
u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
u32 sfuse_strap = intel_de_read(display, SFUSE_STRAP);
@@ -1790,7 +1791,7 @@ static void __intel_display_device_info_runtime_init(struct intel_display *displ
*/
if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
- (HAS_PCH_CPT(i915) &&
+ (HAS_PCH_CPT(display) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
drm_info(display->drm,
"Display fused off, disabling\n");
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 7a3bb77c7af7..87c666792c0d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -143,9 +143,11 @@ struct intel_display_platforms {
#define HAS_4TILE(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_ASYNC_FLIPS(__display) (DISPLAY_VER(__display) >= 5)
+#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
#define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display))
#define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl)
#define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash)
+#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_CMTG(__display) (!(__display)->platform.dg2 && DISPLAY_VER(__display) >= 13)
#define HAS_CUR_FBC(__display) (!HAS_GMCH(__display) && IS_DISPLAY_VER(__display, 7, 13))
#define HAS_D12_PLANE_MINIMIZATION(__display) ((__display)->platform.rocketlake || (__display)->platform.alderlake_s)
@@ -156,9 +158,9 @@ struct intel_display_platforms {
#define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell)
#define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4)
-#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst)
#define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_DPT(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst)
#define HAS_DSB(__display) (DISPLAY_INFO(__display)->has_dsb)
#define HAS_DSC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dsc)
#define HAS_DSC_3ENGINES(__display) (DISPLAY_VERx100(__display) == 1401 && HAS_DSC(__display))
@@ -167,9 +169,10 @@ struct intel_display_platforms {
#define HAS_FBC_DIRTY_RECT(__display) (DISPLAY_VER(__display) >= 30)
#define HAS_FPGA_DBG_UNCLAIMED(__display) (DISPLAY_INFO(__display)->has_fpga_dbg)
#define HAS_FW_BLC(__display) (DISPLAY_VER(__display) >= 3)
-#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
#define HAS_GMBUS_BURST_READ(__display) (DISPLAY_VER(__display) >= 10 || (__display)->platform.kabylake)
+#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
#define HAS_GMCH(__display) (DISPLAY_INFO(__display)->has_gmch)
+#define HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
#define HAS_HW_SAGV_WM(__display) (DISPLAY_VER(__display) >= 13 && !(__display)->platform.dgfx)
#define HAS_IPC(__display) (DISPLAY_INFO(__display)->has_ipc)
#define HAS_IPS(__display) ((__display)->platform.haswell_ult || (__display)->platform.broadwell)
@@ -190,10 +193,7 @@ struct intel_display_platforms {
((__display)->platform.dgfx && DISPLAY_VER(__display) == 14)) && \
HAS_DSC(__display))
#define HAS_VRR(__display) (DISPLAY_VER(__display) >= 11)
-#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
-#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
#define INTEL_NUM_PIPES(__display) (hweight8(DISPLAY_RUNTIME_INFO(__display)->pipe_mask))
-#define I915_HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
#define OVERLAY_NEEDS_PHYSICAL(__display) (DISPLAY_INFO(__display)->overlay_needs_physical)
#define SUPPORTS_TV(__display) (DISPLAY_INFO(__display)->supports_tv)
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 31740a677dd8..411fe7b918a7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -54,6 +54,7 @@
#include "intel_plane_initial.h"
#include "intel_pmdemand.h"
#include "intel_pps.h"
+#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_vga.h"
#include "intel_wm.h"
@@ -82,7 +83,6 @@ bool intel_display_driver_probe_defer(struct pci_dev *pdev)
void intel_display_driver_init_hw(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_cdclk_state *cdclk_state;
if (!HAS_DISPLAY(display))
@@ -94,7 +94,7 @@ void intel_display_driver_init_hw(struct intel_display *display)
intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
cdclk_state->logical = cdclk_state->actual = display->cdclk.hw;
- intel_display_wa_apply(i915);
+ intel_display_wa_apply(display);
}
static const struct drm_mode_config_funcs intel_mode_funcs = {
@@ -181,7 +181,8 @@ static void intel_plane_possible_crtcs_init(struct intel_display *display)
void intel_display_driver_early_probe(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_pch_detect(display);
if (!HAS_DISPLAY(display))
return;
@@ -193,12 +194,12 @@ void intel_display_driver_early_probe(struct intel_display *display)
mutex_init(&display->pps.mutex);
mutex_init(&display->hdcp.hdcp_mutex);
- intel_display_irq_init(i915);
+ intel_display_irq_init(display);
intel_dkl_phy_init(display);
intel_color_init_hooks(display);
intel_init_cdclk_hooks(display);
intel_audio_hooks_init(display);
- intel_dpll_init_clock_hook(i915);
+ intel_dpll_init_clock_hook(display);
intel_init_display_hooks(display);
intel_fdi_init_hook(display);
intel_dmc_wl_init(display);
@@ -226,6 +227,8 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
if (ret)
goto cleanup_bios;
+ intel_psr_dc5_dc6_wa_init(display);
+
/* FIXME: completely on the wrong abstraction layer */
ret = intel_power_domains_init(display);
if (ret < 0)
@@ -241,31 +244,45 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
intel_dmc_init(display);
display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
+ if (!display->wq.modeset) {
+ ret = -ENOMEM;
+ goto cleanup_vga_client_pw_domain_dmc;
+ }
+
display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+ if (!display->wq.flip) {
+ ret = -ENOMEM;
+ goto cleanup_wq_modeset;
+ }
+
display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0);
+ if (!display->wq.cleanup) {
+ ret = -ENOMEM;
+ goto cleanup_wq_flip;
+ }
intel_mode_config_init(display);
ret = intel_cdclk_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_cleanup;
ret = intel_color_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_cleanup;
- ret = intel_dbuf_init(i915);
+ ret = intel_dbuf_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_cleanup;
- ret = intel_bw_init(i915);
+ ret = intel_bw_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_cleanup;
ret = intel_pmdemand_init(display);
if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_wq_cleanup;
intel_init_quirks(display);
@@ -273,6 +290,12 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
return 0;
+cleanup_wq_cleanup:
+ destroy_workqueue(display->wq.cleanup);
+cleanup_wq_flip:
+ destroy_workqueue(display->wq.flip);
+cleanup_wq_modeset:
+ destroy_workqueue(display->wq.modeset);
cleanup_vga_client_pw_domain_dmc:
intel_dmc_fini(display);
intel_power_domains_driver_remove(display);
@@ -315,11 +338,9 @@ static void set_display_access(struct intel_display *display,
*/
void intel_display_driver_enable_user_access(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
set_display_access(display, true, NULL);
- intel_hpd_enable_detection_work(i915);
+ intel_hpd_enable_detection_work(display);
}
/**
@@ -341,9 +362,7 @@ void intel_display_driver_enable_user_access(struct intel_display *display)
*/
void intel_display_driver_disable_user_access(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- intel_hpd_disable_detection_work(i915);
+ intel_hpd_disable_detection_work(display);
set_display_access(display, false, current);
}
@@ -422,14 +441,13 @@ bool intel_display_driver_check_access(struct intel_display *display)
/* part #2: call after irq install, but before gem init */
int intel_display_driver_probe_nogem(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe;
int ret;
if (!HAS_DISPLAY(display))
return 0;
- intel_wm_init(i915);
+ intel_wm_init(display);
intel_panel_sanitize_ssc(display);
@@ -460,8 +478,6 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
intel_hti_init(display);
- /* Just disable it once at startup */
- intel_vga_disable(display);
intel_setup_outputs(display);
ret = intel_dp_tunnel_mgr_init(display);
@@ -471,7 +487,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
intel_display_driver_disable_user_access(display);
drm_modeset_lock_all(display->drm);
- intel_modeset_setup_hw_state(i915, display->drm->mode_config.acquire_ctx);
+ intel_modeset_setup_hw_state(display, display->drm->mode_config.acquire_ctx);
intel_acpi_assign_connector_fwnodes(display);
drm_modeset_unlock_all(display->drm);
@@ -483,7 +499,7 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
* since the watermark calculation done here will use pstate->fb.
*/
if (!HAS_GMCH(display))
- ilk_wm_sanitize(i915);
+ ilk_wm_sanitize(display);
return 0;
@@ -498,7 +514,6 @@ err_mode_config:
/* part #3: call after gem init */
int intel_display_driver_probe(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
if (!HAS_DISPLAY(display))
@@ -524,16 +539,15 @@ int intel_display_driver_probe(struct intel_display *display)
intel_overlay_setup(display);
/* Only enable hotplug handling once the fbdev is fully set up. */
- intel_hpd_init(i915);
+ intel_hpd_init(display);
- skl_watermark_ipc_init(i915);
+ skl_watermark_ipc_init(display);
return 0;
}
void intel_display_driver_register(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
"i915 display info:");
@@ -558,9 +572,9 @@ void intel_display_driver_register(struct intel_display *display)
* fbdev->async_cookie.
*/
drm_kms_helper_poll_init(display->drm);
- intel_hpd_poll_disable(i915);
+ intel_hpd_poll_disable(display);
- intel_fbdev_setup(i915);
+ intel_fbdev_setup(display);
intel_display_device_info_print(DISPLAY_INFO(display),
DISPLAY_RUNTIME_INFO(display), &p);
@@ -600,7 +614,7 @@ void intel_display_driver_remove_noirq(struct intel_display *display)
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
*/
- intel_hpd_poll_fini(i915);
+ intel_hpd_poll_fini(display);
intel_unregister_dsm_handler();
@@ -695,13 +709,11 @@ __intel_display_driver_resume(struct intel_display *display,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int ret, i;
- intel_modeset_setup_hw_state(i915, ctx);
- intel_vga_redisable(display);
+ intel_modeset_setup_hw_state(display, ctx);
if (!state)
return 0;
@@ -733,7 +745,6 @@ __intel_display_driver_resume(struct intel_display *display,
void intel_display_driver_resume(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_atomic_state *state = display->restore.modeset_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
@@ -761,7 +772,7 @@ void intel_display_driver_resume(struct intel_display *display)
if (!ret)
ret = __intel_display_driver_resume(display, state, &ctx);
- skl_watermark_ipc_update(i915);
+ skl_watermark_ipc_update(display);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index aa23bb817805..3e73832e5e81 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -5,7 +5,6 @@
#include <drm/drm_vblank.h>
-#include "gt/intel_rps.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -14,6 +13,8 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_rpm.h"
+#include "intel_display_rps.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_dmc_wl.h"
@@ -115,9 +116,8 @@ static void intel_pipe_fault_irq_handler(struct intel_display *display,
}
static void
-intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
+intel_handle_vblank(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
drm_crtc_handle_vblank(&crtc->base);
@@ -125,59 +125,59 @@ intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
/**
* ilk_update_display_irq - update DEIMR
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+void ilk_update_display_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
new_val = dev_priv->irq_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != dev_priv->irq_mask &&
- !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
+ !drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) {
dev_priv->irq_mask = new_val;
intel_de_write(display, DEIMR, dev_priv->irq_mask);
intel_de_posting_read(display, DEIMR);
}
}
-void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
+void ilk_enable_display_irq(struct intel_display *display, u32 bits)
{
- ilk_update_display_irq(i915, bits, bits);
+ ilk_update_display_irq(display, bits, bits);
}
-void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
+void ilk_disable_display_irq(struct intel_display *display, u32 bits)
{
- ilk_update_display_irq(i915, bits, 0);
+ ilk_update_display_irq(display, bits, 0);
}
/**
* bdw_update_port_irq - update DE port interrupt
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void bdw_update_port_irq(struct drm_i915_private *dev_priv,
+void bdw_update_port_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
u32 old_val;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
old_val = intel_de_read(display, GEN8_DE_PORT_IMR);
@@ -194,93 +194,92 @@ void bdw_update_port_irq(struct drm_i915_private *dev_priv,
/**
* bdw_update_pipe_irq - update DE pipe interrupt
- * @dev_priv: driver private
+ * @display: display device
* @pipe: pipe whose interrupt to update
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
+static void bdw_update_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 interrupt_mask,
u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
- new_val = dev_priv->display.irq.de_irq_mask[pipe];
+ new_val = display->irq.de_irq_mask[pipe];
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) {
- dev_priv->display.irq.de_irq_mask[pipe] = new_val;
+ if (new_val != display->irq.de_irq_mask[pipe]) {
+ display->irq.de_irq_mask[pipe] = new_val;
intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]);
intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe));
}
}
-void bdw_enable_pipe_irq(struct drm_i915_private *i915,
+void bdw_enable_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 bits)
{
- bdw_update_pipe_irq(i915, pipe, bits, bits);
+ bdw_update_pipe_irq(display, pipe, bits, bits);
}
-void bdw_disable_pipe_irq(struct drm_i915_private *i915,
+void bdw_disable_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 bits)
{
- bdw_update_pipe_irq(i915, pipe, bits, 0);
+ bdw_update_pipe_irq(display, pipe, bits, 0);
}
/**
* ibx_display_interrupt_update - update SDEIMR
- * @dev_priv: driver private
+ * @display: display device
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+void ibx_display_interrupt_update(struct intel_display *display,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 sdeimr = intel_de_read(display, SDEIMR);
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
- drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
intel_de_write(display, SDEIMR, sdeimr);
intel_de_posting_read(display, SDEIMR);
}
-void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
+void ibx_enable_display_interrupt(struct intel_display *display, u32 bits)
{
- ibx_display_interrupt_update(i915, bits, bits);
+ ibx_display_interrupt_update(display, bits, bits);
}
-void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
+void ibx_disable_display_interrupt(struct intel_display *display, u32 bits)
{
- ibx_display_interrupt_update(i915, bits, 0);
+ ibx_display_interrupt_update(display, bits, 0);
}
u32 i915_pipestat_enable_mask(struct intel_display *display,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 status_mask = display->irq.pipestat_irq_mask[pipe];
u32 enable_mask = status_mask << 16;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if (DISPLAY_VER(display) < 5)
goto out;
@@ -318,48 +317,48 @@ out:
return enable_mask;
}
-void i915_enable_pipestat(struct drm_i915_private *dev_priv,
+void i915_enable_pipestat(struct intel_display *display,
enum pipe pipe, u32 status_mask)
{
- struct intel_display *display = &dev_priv->display;
- i915_reg_t reg = PIPESTAT(dev_priv, pipe);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ i915_reg_t reg = PIPESTAT(display, pipe);
u32 enable_mask;
- drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
"pipe %c: status_mask=0x%x\n",
pipe_name(pipe), status_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
+ if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
return;
- dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask;
+ display->irq.pipestat_irq_mask[pipe] |= status_mask;
enable_mask = i915_pipestat_enable_mask(display, pipe);
intel_de_write(display, reg, enable_mask | status_mask);
intel_de_posting_read(display, reg);
}
-void i915_disable_pipestat(struct drm_i915_private *dev_priv,
+void i915_disable_pipestat(struct intel_display *display,
enum pipe pipe, u32 status_mask)
{
- struct intel_display *display = &dev_priv->display;
- i915_reg_t reg = PIPESTAT(dev_priv, pipe);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ i915_reg_t reg = PIPESTAT(display, pipe);
u32 enable_mask;
- drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
"pipe %c: status_mask=0x%x\n",
pipe_name(pipe), status_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
- if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0)
+ if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == 0)
return;
- dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask;
+ display->irq.pipestat_irq_mask[pipe] &= ~status_mask;
enable_mask = i915_pipestat_enable_mask(display, pipe);
intel_de_write(display, reg, enable_mask | status_mask);
@@ -368,49 +367,41 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
static bool i915_has_legacy_blc_interrupt(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (IS_I85X(i915))
+ if (display->platform.i85x)
return true;
- if (IS_PINEVIEW(i915))
+ if (display->platform.pineview)
return true;
- return IS_DISPLAY_VER(display, 3, 4) && IS_MOBILE(i915);
+ return IS_DISPLAY_VER(display, 3, 4) && display->platform.mobile;
}
-/**
- * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
- * @dev_priv: i915 device private
- */
-void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
+/* enable ASLE pipestat for OpRegion */
+static void i915_enable_asle_pipestat(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
if (!intel_opregion_asle_present(display))
return;
if (!i915_has_legacy_blc_interrupt(display))
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
- if (DISPLAY_VER(dev_priv) >= 4)
- i915_enable_pipestat(dev_priv, PIPE_A,
+ i915_enable_pipestat(display, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
+ if (DISPLAY_VER(display) >= 4)
+ i915_enable_pipestat(display, PIPE_A,
PIPE_LEGACY_BLC_EVENT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
#if IS_ENABLED(CONFIG_DEBUG_FS)
-static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void display_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe,
u32 crc0, u32 crc1,
u32 crc2, u32 crc3,
u32 crc4)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
@@ -427,7 +418,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
* don't trust that one either.
*/
if (pipe_crc->skipped <= 0 ||
- (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
+ (DISPLAY_VER(display) >= 8 && pipe_crc->skipped == 1)) {
pipe_crc->skipped++;
spin_unlock(&pipe_crc->lock);
return;
@@ -440,20 +431,19 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
}
#else
static inline void
-display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+display_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe,
u32 crc0, u32 crc1,
u32 crc2, u32 crc3,
u32 crc4) {}
#endif
-static void flip_done_handler(struct drm_i915_private *i915,
+static void flip_done_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &i915->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
- spin_lock(&i915->drm.event_lock);
+ spin_lock(&display->drm->event_lock);
if (crtc->flip_done_event) {
trace_intel_crtc_flip_done(crtc);
@@ -461,25 +451,21 @@ static void flip_done_handler(struct drm_i915_private *i915,
crtc->flip_done_event = NULL;
}
- spin_unlock(&i915->drm.event_lock);
+ spin_unlock(&display->drm->event_lock);
}
-static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void hsw_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
-
- display_pipe_crc_irq_handler(dev_priv, pipe,
+ display_pipe_crc_irq_handler(display, pipe,
intel_de_read(display, PIPE_CRC_RES_HSW(pipe)),
0, 0, 0, 0);
}
-static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void ivb_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
-
- display_pipe_crc_irq_handler(dev_priv, pipe,
+ display_pipe_crc_irq_handler(display, pipe,
intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)),
intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)),
intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)),
@@ -487,58 +473,55 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe)));
}
-static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+static void i9xx_pipe_crc_irq_handler(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
u32 res1, res2;
- if (DISPLAY_VER(dev_priv) >= 3)
- res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(dev_priv, pipe));
+ if (DISPLAY_VER(display) >= 3)
+ res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(display, pipe));
else
res1 = 0;
- if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
- res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(dev_priv, pipe));
+ if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
+ res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(display, pipe));
else
res2 = 0;
- display_pipe_crc_irq_handler(dev_priv, pipe,
- intel_de_read(display, PIPE_CRC_RES_RED(dev_priv, pipe)),
- intel_de_read(display, PIPE_CRC_RES_GREEN(dev_priv, pipe)),
- intel_de_read(display, PIPE_CRC_RES_BLUE(dev_priv, pipe)),
+ display_pipe_crc_irq_handler(display, pipe,
+ intel_de_read(display, PIPE_CRC_RES_RED(display, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_GREEN(display, pipe)),
+ intel_de_read(display, PIPE_CRC_RES_BLUE(display, pipe)),
res1, res2);
}
-static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
+static void i9xx_pipestat_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
intel_de_write(display,
- PIPESTAT(dev_priv, pipe),
+ PIPESTAT(display, pipe),
PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS);
- dev_priv->display.irq.pipestat_irq_mask[pipe] = 0;
+ display->irq.pipestat_irq_mask[pipe] = 0;
}
}
-void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
+void i9xx_pipestat_irq_ack(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- spin_lock(&dev_priv->irq_lock);
+ spin_lock(&display->irq.lock);
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- !dev_priv->display.irq.vlv_display_irqs_enabled) {
- spin_unlock(&dev_priv->irq_lock);
+ if ((display->platform.valleyview || display->platform.cherryview) &&
+ !display->irq.vlv_display_irqs_enabled) {
+ spin_unlock(&display->irq.lock);
return;
}
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
i915_reg_t reg;
u32 status_mask, enable_mask, iir_bit = 0;
@@ -566,12 +549,12 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
break;
}
if (iir & iir_bit)
- status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe];
+ status_mask |= display->irq.pipestat_irq_mask[pipe];
if (!status_mask)
continue;
- reg = PIPESTAT(dev_priv, pipe);
+ reg = PIPESTAT(display, pipe);
pipe_stats[pipe] = intel_de_read(display, reg) & status_mask;
enable_mask = i915_pipestat_enable_mask(display, pipe);
@@ -589,25 +572,24 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
intel_de_write(display, reg, enable_mask);
}
}
- spin_unlock(&dev_priv->irq_lock);
+ spin_unlock(&display->irq.lock);
}
-void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void i915_pipestat_irq_handler(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
bool blc_event = false;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
@@ -617,22 +599,21 @@ void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_opregion_asle_intr(display);
}
-void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void i965_pipestat_irq_handler(struct intel_display *display,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
bool blc_event = false;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
@@ -645,21 +626,20 @@ void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_gmbus_irq_handler(display);
}
-void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+void valleyview_pipestat_irq_handler(struct intel_display *display,
u32 pipe_stats[I915_MAX_PIPES])
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
@@ -669,18 +649,17 @@ void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_gmbus_irq_handler(display);
}
-static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void ibx_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ibx_hpd_irq_handler(display, hotplug_trigger);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
- drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
+ drm_dbg(display->drm, "PCH audio power change on port %d\n",
port_name(port));
}
@@ -691,26 +670,26 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_gmbus_irq_handler(display);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
- drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
+ drm_dbg(display->drm, "PCH HDCP audio interrupt\n");
if (pch_iir & SDE_AUDIO_TRANS_MASK)
- drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
+ drm_dbg(display->drm, "PCH transcoder audio interrupt\n");
if (pch_iir & SDE_POISON)
- drm_err(&dev_priv->drm, "PCH poison interrupt\n");
+ drm_err(display->drm, "PCH poison interrupt\n");
if (pch_iir & SDE_FDI_MASK) {
- for_each_pipe(dev_priv, pipe)
- drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ for_each_pipe(display, pipe)
+ drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
intel_de_read(display, FDI_RX_IIR(pipe)));
}
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
- drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
+ drm_dbg(display->drm, "PCH transcoder CRC done interrupt\n");
if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
@@ -753,14 +732,13 @@ static const struct pipe_fault_handler ivb_pipe_fault_handlers[] = {
{}
};
-static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
+static void ivb_err_int_handler(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 err_int = intel_de_read(display, GEN7_ERR_INT);
enum pipe pipe;
if (err_int & ERR_INT_POISON)
- drm_err(&dev_priv->drm, "Poison interrupt\n");
+ drm_err(display->drm, "Poison interrupt\n");
if (err_int & ERR_INT_INVALID_GTT_PTE)
drm_err_ratelimited(display->drm, "Invalid GTT PTE\n");
@@ -768,17 +746,17 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
if (err_int & ERR_INT_INVALID_PTE_DATA)
drm_err_ratelimited(display->drm, "Invalid PTE data\n");
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
u32 fault_errors;
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(display, pipe);
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
- if (IS_IVYBRIDGE(dev_priv))
- ivb_pipe_crc_irq_handler(dev_priv, pipe);
+ if (display->platform.ivybridge)
+ ivb_pipe_crc_irq_handler(display, pipe);
else
- hsw_pipe_crc_irq_handler(dev_priv, pipe);
+ hsw_pipe_crc_irq_handler(display, pipe);
}
fault_errors = err_int & ivb_err_int_pipe_fault_mask(pipe);
@@ -790,34 +768,32 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
intel_de_write(display, GEN7_ERR_INT, err_int);
}
-static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
+static void cpt_serr_int_handler(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 serr_int = intel_de_read(display, SERR_INT);
enum pipe pipe;
if (serr_int & SERR_INT_POISON)
- drm_err(&dev_priv->drm, "PCH poison interrupt\n");
+ drm_err(display->drm, "PCH poison interrupt\n");
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
intel_pch_fifo_underrun_irq_handler(display, pipe);
intel_de_write(display, SERR_INT, serr_int);
}
-static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void cpt_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ibx_hpd_irq_handler(display, hotplug_trigger);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
SDE_AUDIO_POWER_SHIFT_CPT);
- drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
+ drm_dbg(display->drm, "PCH audio power change on port %c\n",
port_name(port));
}
@@ -828,20 +804,20 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_gmbus_irq_handler(display);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
- drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
+ drm_dbg(display->drm, "Audio CP request interrupt\n");
if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
- drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
+ drm_dbg(display->drm, "Audio CP change interrupt\n");
if (pch_iir & SDE_FDI_MASK_CPT) {
- for_each_pipe(dev_priv, pipe)
- drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ for_each_pipe(display, pipe)
+ drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
intel_de_read(display, FDI_RX_IIR(pipe)));
}
if (pch_iir & SDE_ERROR_CPT)
- cpt_serr_int_handler(dev_priv);
+ cpt_serr_int_handler(display);
}
static u32 ilk_gtt_fault_pipe_fault_mask(enum pipe pipe)
@@ -894,14 +870,13 @@ static void ilk_gtt_fault_irq_handler(struct intel_display *display)
}
}
-void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
+void ilk_display_irq_handler(struct intel_display *display, u32 de_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
if (de_iir & DE_AUX_CHANNEL_A)
intel_dp_aux_irq_handler(display);
@@ -910,58 +885,57 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
intel_opregion_asle_intr(display);
if (de_iir & DE_POISON)
- drm_err(&dev_priv->drm, "Poison interrupt\n");
+ drm_err(display->drm, "Poison interrupt\n");
if (de_iir & DE_GTT_FAULT)
ilk_gtt_fault_irq_handler(display);
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe))
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (de_iir & DE_PLANE_FLIP_DONE(pipe))
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(display, pipe);
if (de_iir & DE_PIPE_CRC_DONE(pipe))
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+ i9xx_pipe_crc_irq_handler(display, pipe);
}
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
u32 pch_iir = intel_de_read(display, SDEIIR);
- if (HAS_PCH_CPT(dev_priv))
- cpt_irq_handler(dev_priv, pch_iir);
+ if (HAS_PCH_CPT(display))
+ cpt_irq_handler(display, pch_iir);
else
- ibx_irq_handler(dev_priv, pch_iir);
+ ibx_irq_handler(display, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
intel_de_write(display, SDEIIR, pch_iir);
}
- if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
- gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
+ if (DISPLAY_VER(display) == 5 && de_iir & DE_PCU_EVENT)
+ ilk_display_rps_irq_handler(display);
}
-void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
+void ivb_display_irq_handler(struct intel_display *display, u32 de_iir)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
if (de_iir & DE_ERR_INT_IVB)
- ivb_err_int_handler(dev_priv);
+ ivb_err_int_handler(display);
if (de_iir & DE_EDP_PSR_INT_HSW) {
struct intel_encoder *encoder;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 psr_iir;
@@ -977,35 +951,35 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(display);
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
- flip_done_handler(dev_priv, pipe);
+ flip_done_handler(display, pipe);
}
/* check event from PCH */
- if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
+ if (!HAS_PCH_NOP(display) && (de_iir & DE_PCH_EVENT_IVB)) {
u32 pch_iir = intel_de_read(display, SDEIIR);
- cpt_irq_handler(dev_priv, pch_iir);
+ cpt_irq_handler(display, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
intel_de_write(display, SDEIIR, pch_iir);
}
}
-static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
+static u32 gen8_de_port_aux_mask(struct intel_display *display)
{
u32 mask;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
return 0;
- else if (DISPLAY_VER(dev_priv) >= 14)
+ else if (DISPLAY_VER(display) >= 14)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB;
- else if (DISPLAY_VER(dev_priv) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
TGL_DE_PORT_AUX_DDIC |
@@ -1015,7 +989,7 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
TGL_DE_PORT_AUX_USBC2 |
TGL_DE_PORT_AUX_USBC3 |
TGL_DE_PORT_AUX_USBC4;
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
TGL_DE_PORT_AUX_DDIC |
@@ -1027,12 +1001,12 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
TGL_DE_PORT_AUX_USBC6;
mask = GEN8_AUX_CHANNEL_A;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
mask |= GEN9_AUX_CHANNEL_B |
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (DISPLAY_VER(dev_priv) == 11) {
+ if (DISPLAY_VER(display) == 11) {
mask |= ICL_AUX_CHANNEL_F;
mask |= ICL_AUX_CHANNEL_E;
}
@@ -1040,10 +1014,8 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
return mask;
}
-static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
+static u32 gen8_de_pipe_fault_mask(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
if (DISPLAY_VER(display) >= 14)
return MTL_PIPEDMC_ATS_FAULT |
MTL_PLANE_ATS_FAULT |
@@ -1195,15 +1167,14 @@ gen8_pipe_fault_handlers(struct intel_display *display)
return bdw_pipe_fault_handlers;
}
-static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv)
+static void intel_pmdemand_irq_handler(struct intel_display *display)
{
- wake_up_all(&dev_priv->display.pmdemand.waitqueue);
+ wake_up_all(&display->pmdemand.waitqueue);
}
static void
-gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+gen8_de_misc_irq_handler(struct intel_display *display, u32 iir)
{
- struct intel_display *display = &dev_priv->display;
bool found = false;
if (HAS_DBUF_OVERLAP_DETECTION(display)) {
@@ -1213,20 +1184,20 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
}
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
if (iir & (XELPDP_PMDEMAND_RSP |
XELPDP_PMDEMAND_RSPTOUT_ERR)) {
if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Error waiting for Punit PM Demand Response\n");
- intel_pmdemand_irq_handler(dev_priv);
+ intel_pmdemand_irq_handler(display);
found = true;
}
if (iir & XELPDP_RM_TIMEOUT) {
u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE);
- drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val);
+ drm_warn(display->drm, "Register Access Timeout = 0x%x\n", val);
found = true;
}
} else if (iir & GEN8_DE_MISC_GSE) {
@@ -1239,12 +1210,12 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
u32 psr_iir;
i915_reg_t iir_reg;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (DISPLAY_VER(dev_priv) >= 12)
- iir_reg = TRANS_PSR_IIR(dev_priv,
- intel_dp->psr.transcoder);
+ if (DISPLAY_VER(display) >= 12)
+ iir_reg = TRANS_PSR_IIR(display,
+ intel_dp->psr.transcoder);
else
iir_reg = EDP_PSR_IIR;
@@ -1256,19 +1227,18 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
intel_psr_irq_handler(intel_dp, psr_iir);
/* prior GEN12 only have one EDP PSR */
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
break;
}
}
if (!found)
- drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
+ drm_err(display->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
}
-static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
+static void gen11_dsi_te_interrupt_handler(struct intel_display *display,
u32 te_trigger)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe = INVALID_PIPE;
enum transcoder dsi_trans;
enum port port;
@@ -1278,7 +1248,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
* Incase of dual link, TE comes from DSI_1
* this is to check if dual link is enabled
*/
- val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0));
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_DSI_0));
val &= PORT_SYNC_MODE_ENABLE;
/*
@@ -1294,12 +1264,12 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
val = val & OP_MODE_MASK;
if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
- drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
+ drm_err(display->drm, "DSI trancoder not configured in command mode\n");
return;
}
/* Get PIPE for handling VBLANK event */
- val = intel_de_read(display, TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
+ val = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, dsi_trans));
switch (val & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
pipe = PIPE_A;
@@ -1311,28 +1281,27 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
pipe = PIPE_C;
break;
default:
- drm_err(&dev_priv->drm, "Invalid PIPE\n");
+ drm_err(display->drm, "Invalid PIPE\n");
return;
}
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
/* clear TE in dsi IIR */
port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0);
}
-static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
+static u32 gen8_de_pipe_flip_done_mask(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
return GEN9_PIPE_PLANE1_FLIP_DONE;
else
return GEN8_PIPE_PRIMARY_FLIP_DONE;
}
-static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
+static void gen8_read_and_ack_pch_irqs(struct intel_display *display, u32 *pch_iir, u32 *pica_iir)
{
- struct intel_display *display = &i915->display;
u32 pica_ier = 0;
*pica_iir = 0;
@@ -1346,7 +1315,7 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
* their flags both in the PICA and SDE IIR.
*/
if (*pch_iir & SDE_PICAINTERRUPT) {
- drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL);
+ drm_WARN_ON(display->drm, INTEL_PCH_TYPE(display) < PCH_MTL);
pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0);
*pica_iir = intel_de_read(display, PICAINTERRUPT_IIR);
@@ -1359,32 +1328,31 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
intel_de_write(display, PICAINTERRUPT_IER, pica_ier);
}
-void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
+void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl)
{
- struct intel_display *display = &dev_priv->display;
u32 iir;
enum pipe pipe;
- drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
+ drm_WARN_ON_ONCE(display->drm, !HAS_DISPLAY(display));
if (master_ctl & GEN8_DE_MISC_IRQ) {
iir = intel_de_read(display, GEN8_DE_MISC_IIR);
if (iir) {
intel_de_write(display, GEN8_DE_MISC_IIR, iir);
- gen8_de_misc_irq_handler(dev_priv, iir);
+ gen8_de_misc_irq_handler(display, iir);
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied (DE MISC)!\n");
}
}
- if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
+ if (DISPLAY_VER(display) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
iir = intel_de_read(display, GEN11_DE_HPD_IIR);
if (iir) {
intel_de_write(display, GEN11_DE_HPD_IIR, iir);
- gen11_hpd_irq_handler(dev_priv, iir);
+ gen11_hpd_irq_handler(display, iir);
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied, (DE HPD)!\n");
}
}
@@ -1396,52 +1364,52 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
intel_de_write(display, GEN8_DE_PORT_IIR, iir);
- if (iir & gen8_de_port_aux_mask(dev_priv)) {
+ if (iir & gen8_de_port_aux_mask(display)) {
intel_dp_aux_irq_handler(display);
found = true;
}
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (hotplug_trigger) {
- bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
+ bxt_hpd_irq_handler(display, hotplug_trigger);
found = true;
}
- } else if (IS_BROADWELL(dev_priv)) {
+ } else if (display->platform.broadwell) {
u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
if (hotplug_trigger) {
- ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
+ ilk_hpd_irq_handler(display, hotplug_trigger);
found = true;
}
}
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
(iir & BXT_DE_PORT_GMBUS)) {
intel_gmbus_irq_handler(display);
found = true;
}
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
if (te_trigger) {
- gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
+ gen11_dsi_te_interrupt_handler(display, te_trigger);
found = true;
}
}
if (!found)
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"Unexpected DE Port interrupt\n");
} else {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied (DE PORT)!\n");
}
}
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
u32 fault_errors;
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
@@ -1449,7 +1417,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
- drm_err_ratelimited(&dev_priv->drm,
+ drm_err_ratelimited(display->drm,
"The master control interrupt lied (DE PIPE)!\n");
continue;
}
@@ -1457,36 +1425,36 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
- intel_handle_vblank(dev_priv, pipe);
+ intel_handle_vblank(display, pipe);
- if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
- flip_done_handler(dev_priv, pipe);
+ if (iir & gen8_de_pipe_flip_done_mask(display))
+ flip_done_handler(display, pipe);
- if (HAS_DSB(dev_priv)) {
+ if (HAS_DSB(display)) {
if (iir & GEN12_DSB_INT(INTEL_DSB_0))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_0);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_0);
if (iir & GEN12_DSB_INT(INTEL_DSB_1))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_1);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_1);
if (iir & GEN12_DSB_INT(INTEL_DSB_2))
- intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_2);
+ intel_dsb_irq_handler(display, pipe, INTEL_DSB_2);
}
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
- hsw_pipe_crc_irq_handler(dev_priv, pipe);
+ hsw_pipe_crc_irq_handler(display, pipe);
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(display, pipe);
- fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
+ fault_errors = iir & gen8_de_pipe_fault_mask(display);
if (fault_errors)
intel_pipe_fault_irq_handler(display,
gen8_pipe_fault_handlers(display),
pipe, fault_errors);
}
- if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
+ if (HAS_PCH_SPLIT(display) && !HAS_PCH_NOP(display) &&
master_ctl & GEN8_DE_PCH_IRQ) {
u32 pica_iir;
@@ -1495,31 +1463,30 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
- gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir);
+ gen8_read_and_ack_pch_irqs(display, &iir, &pica_iir);
if (iir) {
if (pica_iir)
- xelpdp_pica_irq_handler(dev_priv, pica_iir);
+ xelpdp_pica_irq_handler(display, pica_iir);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_handler(dev_priv, iir);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
- spt_irq_handler(dev_priv, iir);
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ icp_irq_handler(display, iir);
+ else if (INTEL_PCH_TYPE(display) >= PCH_SPT)
+ spt_irq_handler(display, iir);
else
- cpt_irq_handler(dev_priv, iir);
+ cpt_irq_handler(display, iir);
} else {
/*
* Like on previous PCH there seems to be something
* fishy going on with forwarding PCH interrupts.
*/
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"The master control interrupt lied (SDE)!\n");
}
}
}
-u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
+u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl)
{
- struct intel_display *display = &i915->display;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -1532,20 +1499,17 @@ u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
return iir;
}
-void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
+void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir)
{
- struct intel_display *display = &i915->display;
-
if (iir & GEN11_GU_MISC_GSE)
intel_opregion_asle_intr(display);
}
-void gen11_display_irq_handler(struct drm_i915_private *i915)
+void gen11_display_irq_handler(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
u32 disp_ctl;
- disable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_display_rpm_assert_block(display);
/*
* GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
* for the display related bits.
@@ -1553,16 +1517,15 @@ void gen11_display_irq_handler(struct drm_i915_private *i915)
disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
- gen8_de_irq_handler(i915, disp_ctl);
+ gen8_de_irq_handler(display, disp_ctl);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_display_rpm_assert_unblock(display);
}
-static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
+static void i915gm_irq_cstate_wa_enable(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- lockdep_assert_held(&i915->drm.vblank_time_lock);
+ lockdep_assert_held(&display->drm->vblank_time_lock);
/*
* Vblank/CRC interrupts fail to wake the device up from C2+.
@@ -1570,117 +1533,116 @@ static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
* the problem. There is a small power cost so we do this
* only when vblank/CRC interrupts are actually enabled.
*/
- if (i915->display.irq.vblank_enabled++ == 0)
+ if (display->irq.vblank_enabled++ == 0)
intel_de_write(display, SCPD0,
_MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
-static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915)
+static void i915gm_irq_cstate_wa_disable(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- lockdep_assert_held(&i915->drm.vblank_time_lock);
+ lockdep_assert_held(&display->drm->vblank_time_lock);
- if (--i915->display.irq.vblank_enabled == 0)
+ if (--display->irq.vblank_enabled == 0)
intel_de_write(display, SCPD0,
_MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
-void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable)
+void i915gm_irq_cstate_wa(struct intel_display *display, bool enable)
{
- spin_lock_irq(&i915->drm.vblank_time_lock);
+ spin_lock_irq(&display->drm->vblank_time_lock);
if (enable)
- i915gm_irq_cstate_wa_enable(i915);
+ i915gm_irq_cstate_wa_enable(display);
else
- i915gm_irq_cstate_wa_disable(i915);
+ i915gm_irq_cstate_wa_disable(display);
- spin_unlock_irq(&i915->drm.vblank_time_lock);
+ spin_unlock_irq(&display->drm->vblank_time_lock);
}
int i8xx_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_enable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
return 0;
}
void i8xx_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_disable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
}
int i915gm_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
- i915gm_irq_cstate_wa_enable(i915);
+ i915gm_irq_cstate_wa_enable(display);
return i8xx_enable_vblank(crtc);
}
void i915gm_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
i8xx_disable_vblank(crtc);
- i915gm_irq_cstate_wa_disable(i915);
+ i915gm_irq_cstate_wa_disable(display);
}
int i965_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, pipe,
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_enable_pipestat(display, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
return 0;
}
void i965_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ i915_disable_pipestat(display, pipe,
PIPE_START_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
}
int ilk_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
+ u32 bit = DISPLAY_VER(display) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_enable_display_irq(dev_priv, bit);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ ilk_enable_display_irq(display, bit);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
/* Even though there is no DMC, frame counter can get stuck when
* PSR is active as no frames are generated.
*/
- if (HAS_PSR(dev_priv))
+ if (HAS_PSR(display))
drm_crtc_vblank_restore(crtc);
return 0;
@@ -1688,15 +1650,15 @@ int ilk_enable_vblank(struct drm_crtc *crtc)
void ilk_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
- u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
+ u32 bit = DISPLAY_VER(display) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_disable_display_irq(dev_priv, bit);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ ilk_disable_display_irq(display, bit);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
}
static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
@@ -1722,44 +1684,36 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
return true;
}
-static void intel_display_vblank_dc_work(struct work_struct *work)
+static void intel_display_vblank_notify_work(struct work_struct *work)
{
struct intel_display *display =
- container_of(work, typeof(*display), irq.vblank_dc_work);
- int vblank_wa_num_pipes = READ_ONCE(display->irq.vblank_wa_num_pipes);
+ container_of(work, typeof(*display), irq.vblank_notify_work);
+ int vblank_enable_count = READ_ONCE(display->irq.vblank_enable_count);
- /*
- * NOTE: intel_display_power_set_target_dc_state is used only by PSR
- * code for DC3CO handling. DC3CO target state is currently disabled in
- * PSR code. If DC3CO is taken into use we need take that into account
- * here as well.
- */
- intel_display_power_set_target_dc_state(display, vblank_wa_num_pipes ? DC_STATE_DISABLE :
- DC_STATE_EN_UPTO_DC6);
+ intel_psr_notify_vblank_enable_disable(display, vblank_enable_count);
}
int bdw_enable_vblank(struct drm_crtc *_crtc)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
unsigned long irqflags;
if (gen11_dsi_configure_te(crtc, true))
return 0;
- if (crtc->block_dc_for_vblank && display->irq.vblank_wa_num_pipes++ == 0)
- schedule_work(&display->irq.vblank_dc_work);
+ if (crtc->vblank_psr_notify && display->irq.vblank_enable_count++ == 0)
+ schedule_work(&display->irq.vblank_notify_work);
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
/* Even if there is no DMC, frame counter can get stuck when
* PSR is active as no frames are generated, so check only for PSR.
*/
- if (HAS_PSR(dev_priv))
+ if (HAS_PSR(display))
drm_crtc_vblank_restore(&crtc->base);
return 0;
@@ -1769,19 +1723,18 @@ void bdw_disable_vblank(struct drm_crtc *_crtc)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
unsigned long irqflags;
if (gen11_dsi_configure_te(crtc, false))
return;
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ spin_lock_irqsave(&display->irq.lock, irqflags);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK);
+ spin_unlock_irqrestore(&display->irq.lock, irqflags);
- if (crtc->block_dc_for_vblank && --display->irq.vblank_wa_num_pipes == 0)
- schedule_work(&display->irq.vblank_dc_work);
+ if (crtc->vblank_psr_notify && --display->irq.vblank_enable_count == 0)
+ schedule_work(&display->irq.vblank_notify_work);
}
static u32 vlv_dpinvgtt_pipe_fault_mask(enum pipe pipe)
@@ -1892,11 +1845,11 @@ void vlv_display_error_irq_handler(struct intel_display *display,
vlv_page_table_error_irq_handler(display, dpinvgtt);
}
-static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+static void _vlv_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
@@ -1904,31 +1857,60 @@ static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
gen2_error_reset(to_intel_uncore(display->drm),
VLV_ERROR_REGS);
- i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
- intel_de_rmw(display, PORT_HOTPLUG_STAT(dev_priv), 0, 0);
+ i915_hotplug_interrupt_update_locked(display, 0xffffffff, 0);
+ intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0);
- i9xx_pipestat_irq_reset(dev_priv);
+ i9xx_pipestat_irq_reset(display);
intel_display_irq_regs_reset(display, VLV_IRQ_REGS);
dev_priv->irq_mask = ~0u;
}
-void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+void vlv_display_irq_reset(struct intel_display *display)
{
- if (dev_priv->display.irq.vlv_display_irqs_enabled)
- _vlv_display_irq_reset(dev_priv);
+ spin_lock_irq(&display->irq.lock);
+ if (display->irq.vlv_display_irqs_enabled)
+ _vlv_display_irq_reset(display);
+ spin_unlock_irq(&display->irq.lock);
}
-void i9xx_display_irq_reset(struct drm_i915_private *i915)
+void i9xx_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
- if (I915_HAS_HOTPLUG(i915)) {
- i915_hotplug_interrupt_update(i915, 0xffffffff, 0);
- intel_de_rmw(display, PORT_HOTPLUG_STAT(i915), 0, 0);
+ if (HAS_HOTPLUG(display)) {
+ i915_hotplug_interrupt_update(display, 0xffffffff, 0);
+ intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0);
}
- i9xx_pipestat_irq_reset(i915);
+ i9xx_pipestat_irq_reset(display);
+}
+
+void i915_display_irq_postinstall(struct intel_display *display)
+{
+ /*
+ * Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy.
+ */
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ spin_unlock_irq(&display->irq.lock);
+
+ i915_enable_asle_pipestat(display);
+}
+
+void i965_display_irq_postinstall(struct intel_display *display)
+{
+ /*
+ * Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy.
+ */
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ spin_unlock_irq(&display->irq.lock);
+
+ i915_enable_asle_pipestat(display);
}
static u32 vlv_error_mask(void)
@@ -1937,17 +1919,14 @@ static u32 vlv_error_mask(void)
return VLV_ERROR_PAGE_TABLE;
}
-void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
+static void _vlv_display_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
- if (!dev_priv->display.irq.vlv_display_irqs_enabled)
- return;
-
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
intel_de_write(display, DPINVGTT,
DPINVGTT_STATUS_MASK_CHV |
DPINVGTT_EN_MASK_CHV);
@@ -1961,9 +1940,9 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- for_each_pipe(dev_priv, pipe)
- i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
+ i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(display, pipe)
+ i915_enable_pipestat(display, pipe, pipestat_mask);
enable_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -1972,53 +1951,76 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
I915_LPE_PIPE_B_INTERRUPT |
I915_MASTER_ERROR_INTERRUPT;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT;
- drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
+ drm_WARN_ON(display->drm, dev_priv->irq_mask != ~0u);
dev_priv->irq_mask = ~enable_mask;
intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
}
-void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
+void vlv_display_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ spin_lock_irq(&display->irq.lock);
+ if (display->irq.vlv_display_irqs_enabled)
+ _vlv_display_irq_postinstall(display);
+ spin_unlock_irq(&display->irq.lock);
+}
+
+void ibx_display_irq_reset(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (HAS_PCH_NOP(i915))
+ return;
+
+ gen2_irq_reset(to_intel_uncore(display->drm), SDE_IRQ_REGS);
+
+ if (HAS_PCH_CPT(i915) || HAS_PCH_LPT(i915))
+ intel_de_write(display, SERR_INT, 0xffffffff);
+}
+
+void gen8_display_irq_reset(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
+
+ if (HAS_PCH_SPLIT(i915))
+ ibx_display_irq_reset(display);
}
-void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
+void gen11_display_irq_reset(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0);
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder trans;
- for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
+ for_each_cpu_transcoder_masked(display, trans, trans_mask) {
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
@@ -2026,10 +2028,10 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
continue;
intel_de_write(display,
- TRANS_PSR_IMR(dev_priv, trans),
+ TRANS_PSR_IMR(display, trans),
0xffffffff);
intel_de_write(display,
- TRANS_PSR_IIR(dev_priv, trans),
+ TRANS_PSR_IIR(display, trans),
0xffffffff);
}
} else {
@@ -2037,7 +2039,7 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
}
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
@@ -2045,55 +2047,55 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS);
else
intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
intel_display_irq_regs_reset(display, SDE_IRQ_REGS);
}
-void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
+void gen8_irq_power_well_post_enable(struct intel_display *display,
u8 pipe_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
- gen8_de_pipe_flip_done_mask(dev_priv);
+ gen8_de_pipe_flip_done_mask(display);
enum pipe pipe;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
if (!intel_irqs_enabled(dev_priv)) {
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
return;
}
- for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+ for_each_pipe_masked(display, pipe, pipe_mask)
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
- dev_priv->display.irq.de_irq_mask[pipe],
- ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
+ display->irq.de_irq_mask[pipe],
+ ~display->irq.de_irq_mask[pipe] | extra_ier);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
-void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
+void gen8_irq_power_well_pre_disable(struct intel_display *display,
u8 pipe_mask)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
if (!intel_irqs_enabled(dev_priv)) {
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
return;
}
- for_each_pipe_masked(dev_priv, pipe, pipe_mask)
+ for_each_pipe_masked(display, pipe, pipe_mask)
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
@@ -2110,17 +2112,16 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
* to avoid races with the irq handler, assuming we have MSI. Shared legacy
* interrupts could still race.
*/
-static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
+static void ibx_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 mask;
- if (HAS_PCH_NOP(dev_priv))
+ if (HAS_PCH_NOP(display))
return;
- if (HAS_PCH_IBX(dev_priv))
+ if (HAS_PCH_IBX(display))
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
- else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
+ else if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display))
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
else
mask = SDE_GMBUS_CPT;
@@ -2128,40 +2129,50 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
-void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
+void valleyview_enable_display_irqs(struct intel_display *display)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (dev_priv->display.irq.vlv_display_irqs_enabled)
- return;
+ spin_lock_irq(&display->irq.lock);
+
+ if (display->irq.vlv_display_irqs_enabled)
+ goto out;
- dev_priv->display.irq.vlv_display_irqs_enabled = true;
+ display->irq.vlv_display_irqs_enabled = true;
if (intel_irqs_enabled(dev_priv)) {
- _vlv_display_irq_reset(dev_priv);
- vlv_display_irq_postinstall(dev_priv);
+ _vlv_display_irq_reset(display);
+ _vlv_display_irq_postinstall(display);
}
+
+out:
+ spin_unlock_irq(&display->irq.lock);
}
-void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
+void valleyview_disable_display_irqs(struct intel_display *display)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- if (!dev_priv->display.irq.vlv_display_irqs_enabled)
- return;
+ spin_lock_irq(&display->irq.lock);
+
+ if (!display->irq.vlv_display_irqs_enabled)
+ goto out;
- dev_priv->display.irq.vlv_display_irqs_enabled = false;
+ display->irq.vlv_display_irqs_enabled = false;
if (intel_irqs_enabled(dev_priv))
- _vlv_display_irq_reset(dev_priv);
+ _vlv_display_irq_reset(display);
+out:
+ spin_unlock_irq(&display->irq.lock);
}
-void ilk_de_irq_postinstall(struct drm_i915_private *i915)
+void ilk_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
u32 display_mask, extra_mask;
- if (DISPLAY_VER(i915) >= 7) {
+ if (DISPLAY_VER(display) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
@@ -2182,59 +2193,57 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
DE_DP_A_HOTPLUG);
}
- if (IS_HASWELL(i915)) {
+ if (display->platform.haswell) {
intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
display_mask |= DE_EDP_PSR_INT_HSW;
}
- if (IS_IRONLAKE_M(i915))
+ if (display->platform.ironlake && display->platform.mobile)
extra_mask |= DE_PCU_EVENT;
i915->irq_mask = ~display_mask;
- ibx_irq_postinstall(i915);
+ ibx_irq_postinstall(display);
intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask,
display_mask | extra_mask);
}
-static void mtp_irq_postinstall(struct drm_i915_private *i915);
-static void icp_irq_postinstall(struct drm_i915_private *i915);
+static void mtp_irq_postinstall(struct intel_display *display);
+static void icp_irq_postinstall(struct intel_display *display);
-void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
+void gen8_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
- u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
+ u32 de_pipe_masked = gen8_de_pipe_fault_mask(display) |
GEN8_PIPE_CDCLK_CRC_DONE;
u32 de_pipe_enables;
- u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
+ u32 de_port_masked = gen8_de_port_aux_mask(display);
u32 de_port_enables;
u32 de_misc_masked = GEN8_DE_EDP_PSR;
u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum pipe pipe;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- if (DISPLAY_VER(dev_priv) >= 14)
- mtp_irq_postinstall(dev_priv);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_postinstall(dev_priv);
- else if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_postinstall(dev_priv);
+ if (DISPLAY_VER(display) >= 14)
+ mtp_irq_postinstall(display);
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ icp_irq_postinstall(display);
+ else if (HAS_PCH_SPLIT(display))
+ ibx_irq_postinstall(display);
- if (DISPLAY_VER(dev_priv) < 11)
+ if (DISPLAY_VER(display) < 11)
de_misc_masked |= GEN8_DE_MISC_GSE;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
de_port_masked |= BXT_DE_PORT_GMBUS;
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR |
XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
enum port port;
if (intel_bios_is_dsi_present(display, &port))
@@ -2244,25 +2253,25 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_DBUF_OVERLAP_DETECTION(display))
de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED;
- if (HAS_DSB(dev_priv))
+ if (HAS_DSB(display))
de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) |
GEN12_DSB_INT(INTEL_DSB_1) |
GEN12_DSB_INT(INTEL_DSB_2);
de_pipe_enables = de_pipe_masked |
GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
- gen8_de_pipe_flip_done_mask(dev_priv);
+ gen8_de_pipe_flip_done_mask(display);
de_port_enables = de_port_masked;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
- else if (IS_BROADWELL(dev_priv))
+ else if (display->platform.broadwell)
de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
enum transcoder trans;
- for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
+ for_each_cpu_transcoder_masked(display, trans, trans_mask) {
enum intel_display_power_domain domain;
domain = POWER_DOMAIN_TRANSCODER(trans);
@@ -2270,19 +2279,19 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
continue;
intel_display_irq_regs_assert_irr_is_zero(display,
- TRANS_PSR_IIR(dev_priv, trans));
+ TRANS_PSR_IIR(display, trans));
}
} else {
intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
}
- for_each_pipe(dev_priv, pipe) {
- dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked;
+ for_each_pipe(display, pipe) {
+ display->irq.de_irq_mask[pipe] = ~de_pipe_masked;
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
- dev_priv->display.irq.de_irq_mask[pipe],
+ display->irq.de_irq_mask[pipe],
de_pipe_enables);
}
@@ -2291,7 +2300,7 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked,
de_misc_masked);
- if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
+ if (IS_DISPLAY_VER(display, 11, 13)) {
u32 de_hpd_masked = 0;
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
GEN11_DE_TBT_HOTPLUG_MASK;
@@ -2301,9 +2310,8 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
}
-static void mtp_irq_postinstall(struct drm_i915_private *i915)
+static void mtp_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
@@ -2315,43 +2323,68 @@ static void mtp_irq_postinstall(struct drm_i915_private *i915)
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
}
-static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
+static void icp_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
u32 mask = SDE_GMBUS_ICP;
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
-void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
+void gen11_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- gen8_de_irq_postinstall(dev_priv);
+ gen8_de_irq_postinstall(display);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
}
-void dg1_de_irq_postinstall(struct drm_i915_private *i915)
+void dg1_de_irq_postinstall(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- gen8_de_irq_postinstall(i915);
+ gen8_de_irq_postinstall(display);
intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
}
-void intel_display_irq_init(struct drm_i915_private *i915)
+void intel_display_irq_init(struct intel_display *display)
{
- i915->drm.vblank_disable_immediate = true;
+ spin_lock_init(&display->irq.lock);
- intel_hotplug_irq_init(i915);
+ display->drm->vblank_disable_immediate = true;
+
+ intel_hotplug_irq_init(display);
+
+ INIT_WORK(&display->irq.vblank_notify_work,
+ intel_display_vblank_notify_work);
+}
+
+struct intel_display_irq_snapshot {
+ u32 derrmr;
+};
+
+struct intel_display_irq_snapshot *
+intel_display_irq_snapshot_capture(struct intel_display *display)
+{
+ struct intel_display_irq_snapshot *snapshot;
+
+ snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
+ if (!snapshot)
+ return NULL;
+
+ if (DISPLAY_VER(display) >= 6 && DISPLAY_VER(display) < 20 && !HAS_GMCH(display))
+ snapshot->derrmr = intel_de_read(display, DERRMR);
+
+ return snapshot;
+}
+
+void intel_display_irq_snapshot_print(const struct intel_display_irq_snapshot *snapshot,
+ struct drm_printer *p)
+{
+ if (!snapshot)
+ return;
- INIT_WORK(&i915->display.irq.vblank_dc_work,
- intel_display_vblank_dc_work);
+ drm_printf(p, "DERRMR: 0x%08x\n", snapshot->derrmr);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h
index d9867cd0a220..c66db3851da4 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.h
@@ -12,28 +12,29 @@
enum pipe;
struct drm_crtc;
-struct drm_i915_private;
+struct drm_printer;
struct intel_display;
+struct intel_display_irq_snapshot;
-void valleyview_enable_display_irqs(struct drm_i915_private *i915);
-void valleyview_disable_display_irqs(struct drm_i915_private *i915);
+void valleyview_enable_display_irqs(struct intel_display *display);
+void valleyview_disable_display_irqs(struct intel_display *display);
-void ilk_update_display_irq(struct drm_i915_private *i915,
+void ilk_update_display_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask);
-void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits);
-void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits);
+void ilk_enable_display_irq(struct intel_display *display, u32 bits);
+void ilk_disable_display_irq(struct intel_display *display, u32 bits);
-void bdw_update_port_irq(struct drm_i915_private *i915, u32 interrupt_mask, u32 enabled_irq_mask);
-void bdw_enable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits);
-void bdw_disable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits);
+void bdw_update_port_irq(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask);
+void bdw_enable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits);
+void bdw_disable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits);
-void ibx_display_interrupt_update(struct drm_i915_private *i915,
+void ibx_display_interrupt_update(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask);
-void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits);
-void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits);
+void ibx_enable_display_interrupt(struct intel_display *display, u32 bits);
+void ibx_disable_display_interrupt(struct intel_display *display, u32 bits);
-void gen8_irq_power_well_post_enable(struct drm_i915_private *i915, u8 pipe_mask);
-void gen8_irq_power_well_pre_disable(struct drm_i915_private *i915, u8 pipe_mask);
+void gen8_irq_power_well_post_enable(struct intel_display *display, u8 pipe_mask);
+void gen8_irq_power_well_pre_disable(struct intel_display *display, u8 pipe_mask);
int i8xx_enable_vblank(struct drm_crtc *crtc);
int i915gm_enable_vblank(struct drm_crtc *crtc);
@@ -46,41 +47,46 @@ void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
-void ivb_display_irq_handler(struct drm_i915_private *i915, u32 de_iir);
-void ilk_display_irq_handler(struct drm_i915_private *i915, u32 de_iir);
-void gen8_de_irq_handler(struct drm_i915_private *i915, u32 master_ctl);
-void gen11_display_irq_handler(struct drm_i915_private *i915);
+void ivb_display_irq_handler(struct intel_display *display, u32 de_iir);
+void ilk_display_irq_handler(struct intel_display *display, u32 de_iir);
+void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl);
+void gen11_display_irq_handler(struct intel_display *display);
-u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl);
-void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir);
+u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl);
+void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir);
-void i9xx_display_irq_reset(struct drm_i915_private *i915);
-void vlv_display_irq_reset(struct drm_i915_private *i915);
-void gen8_display_irq_reset(struct drm_i915_private *i915);
-void gen11_display_irq_reset(struct drm_i915_private *i915);
+void i9xx_display_irq_reset(struct intel_display *display);
+void ibx_display_irq_reset(struct intel_display *display);
+void vlv_display_irq_reset(struct intel_display *display);
+void gen8_display_irq_reset(struct intel_display *display);
+void gen11_display_irq_reset(struct intel_display *display);
-void vlv_display_irq_postinstall(struct drm_i915_private *i915);
-void ilk_de_irq_postinstall(struct drm_i915_private *i915);
-void gen8_de_irq_postinstall(struct drm_i915_private *i915);
-void gen11_de_irq_postinstall(struct drm_i915_private *i915);
-void dg1_de_irq_postinstall(struct drm_i915_private *i915);
+void i915_display_irq_postinstall(struct intel_display *display);
+void i965_display_irq_postinstall(struct intel_display *display);
+void vlv_display_irq_postinstall(struct intel_display *display);
+void ilk_de_irq_postinstall(struct intel_display *display);
+void gen8_de_irq_postinstall(struct intel_display *display);
+void gen11_de_irq_postinstall(struct intel_display *display);
+void dg1_de_irq_postinstall(struct intel_display *display);
u32 i915_pipestat_enable_mask(struct intel_display *display, enum pipe pipe);
-void i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
-void i915_disable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
-void i915_enable_asle_pipestat(struct drm_i915_private *i915);
+void i915_enable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask);
+void i915_disable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask);
-void i9xx_pipestat_irq_ack(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void i9xx_pipestat_irq_ack(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void i915_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
-void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]);
+void i915_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void i965_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
+void valleyview_pipestat_irq_handler(struct intel_display *display, u32 pipe_stats[I915_MAX_PIPES]);
void vlv_display_error_irq_ack(struct intel_display *display, u32 *eir, u32 *dpinvgtt);
void vlv_display_error_irq_handler(struct intel_display *display, u32 eir, u32 dpinvgtt);
-void intel_display_irq_init(struct drm_i915_private *i915);
+void intel_display_irq_init(struct intel_display *display);
-void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable);
+void i915gm_irq_cstate_wa(struct intel_display *display, bool enable);
+
+struct intel_display_irq_snapshot *intel_display_irq_snapshot_capture(struct intel_display *display);
+void intel_display_irq_snapshot_print(const struct intel_display_irq_snapshot *snapshot, struct drm_printer *p);
#endif /* __INTEL_DISPLAY_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index f7171e6932dc..16356523816f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -16,6 +16,7 @@
#include "intel_display_power.h"
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_mchbar_regs.h"
@@ -204,7 +205,7 @@ static bool __intel_display_power_is_enabled(struct intel_display *display,
struct i915_power_well *power_well;
bool is_enabled;
- if (pm_runtime_suspended(display->drm->dev))
+ if (intel_display_rpm_suspended(display))
return false;
is_enabled = true;
@@ -322,6 +323,35 @@ unlock:
mutex_unlock(&power_domains->lock);
}
+/**
+ * intel_display_power_get_current_dc_state - Set target dc state.
+ * @display: display device
+ *
+ * This function set the "DC off" power well target_dc_state,
+ * based upon this target_dc_stste, "DC off" power well will
+ * enable desired DC state.
+ */
+u32 intel_display_power_get_current_dc_state(struct intel_display *display)
+{
+ struct i915_power_well *power_well;
+ struct i915_power_domains *power_domains = &display->power.domains;
+ u32 current_dc_state = DC_STATE_DISABLE;
+
+ mutex_lock(&power_domains->lock);
+ power_well = lookup_power_well(display, SKL_DISP_DC_OFF);
+
+ if (drm_WARN_ON(display->drm, !power_well))
+ goto unlock;
+
+ current_dc_state = intel_power_well_is_enabled(display, power_well) ?
+ DC_STATE_DISABLE : power_domains->target_dc_state;
+
+unlock:
+ mutex_unlock(&power_domains->lock);
+
+ return current_dc_state;
+}
+
static void __async_put_domains_mask(struct i915_power_domains *power_domains,
struct intel_power_domain_mask *mask)
{
@@ -455,7 +485,6 @@ static bool
intel_display_power_grab_async_put_ref(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
bool ret = false;
@@ -473,8 +502,8 @@ intel_display_power_grab_async_put_ref(struct intel_display *display,
goto out_verify;
cancel_async_put_work(power_domains, false);
- intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
- fetch_and_zero(&power_domains->async_put_wakeref));
+ intel_display_rpm_put_raw(display,
+ fetch_and_zero(&power_domains->async_put_wakeref));
out_verify:
verify_async_put_domains_state(power_domains);
@@ -512,9 +541,10 @@ __intel_display_power_get_domain(struct intel_display *display,
intel_wakeref_t intel_display_power_get(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ struct ref_tracker *wakeref;
+
+ wakeref = intel_display_rpm_get(display);
mutex_lock(&power_domains->lock);
__intel_display_power_get_domain(display, domain);
@@ -539,12 +569,11 @@ intel_wakeref_t
intel_display_power_get_if_enabled(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
bool is_enabled;
- wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get_if_in_use(display);
if (!wakeref)
return NULL;
@@ -560,7 +589,7 @@ intel_display_power_get_if_enabled(struct intel_display *display,
mutex_unlock(&power_domains->lock);
if (!is_enabled) {
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
wakeref = NULL;
}
@@ -623,12 +652,10 @@ release_async_put_domains(struct i915_power_domains *power_domains,
struct intel_display *display = container_of(power_domains,
struct intel_display,
power.domains);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
enum intel_display_power_domain domain;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
- wakeref = intel_runtime_pm_get_noresume(rpm);
+ wakeref = intel_display_rpm_get_noresume(display);
for_each_power_domain(domain, mask) {
/* Clear before put, so put's sanity check is happy. */
@@ -636,7 +663,7 @@ release_async_put_domains(struct i915_power_domains *power_domains,
__intel_display_power_put_domain(display, domain);
}
- intel_runtime_pm_put(rpm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
static void
@@ -644,11 +671,10 @@ intel_display_power_put_async_work(struct work_struct *work)
{
struct intel_display *display = container_of(work, struct intel_display,
power.domains.async_put_work.work);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
- intel_wakeref_t old_work_wakeref = NULL;
+ struct ref_tracker *new_work_wakeref, *old_work_wakeref = NULL;
+
+ new_work_wakeref = intel_display_rpm_get_raw(display);
mutex_lock(&power_domains->lock);
@@ -688,9 +714,9 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (old_work_wakeref)
- intel_runtime_pm_put_raw(rpm, old_work_wakeref);
+ intel_display_rpm_put_raw(display, old_work_wakeref);
if (new_work_wakeref)
- intel_runtime_pm_put_raw(rpm, new_work_wakeref);
+ intel_display_rpm_put_raw(display, new_work_wakeref);
}
/**
@@ -711,10 +737,10 @@ void __intel_display_power_put_async(struct intel_display *display,
intel_wakeref_t wakeref,
int delay_ms)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
- struct intel_runtime_pm *rpm = &i915->runtime_pm;
- intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
+ struct ref_tracker *work_wakeref;
+
+ work_wakeref = intel_display_rpm_get_raw(display);
delay_ms = delay_ms >= 0 ? delay_ms : 100;
@@ -746,9 +772,9 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (work_wakeref)
- intel_runtime_pm_put_raw(rpm, work_wakeref);
+ intel_display_rpm_put_raw(display, work_wakeref);
- intel_runtime_pm_put(rpm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
/**
@@ -765,7 +791,6 @@ out_verify:
*/
void intel_display_power_flush_work(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
@@ -786,7 +811,7 @@ out_verify:
mutex_unlock(&power_domains->lock);
if (work_wakeref)
- intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
+ intel_display_rpm_put_raw(display, work_wakeref);
}
/**
@@ -824,10 +849,8 @@ void intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
__intel_display_power_put(display, domain);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
#else
/**
@@ -846,10 +869,8 @@ void intel_display_power_put(struct intel_display *display,
void intel_display_power_put_unchecked(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
__intel_display_power_put(display, domain);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_display_rpm_put_unchecked(display);
}
#endif
@@ -1373,26 +1394,24 @@ static void hsw_restore_lcpll(struct intel_display *display)
*/
static void hsw_enable_pc8(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
drm_dbg_kms(display->drm, "Enabling package C8+\n");
- if (HAS_PCH_LPT_LP(dev_priv))
+ if (HAS_PCH_LPT_LP(display))
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
PCH_LP_PARTITION_LEVEL_DISABLE, 0);
- lpt_disable_clkout_dp(dev_priv);
+ lpt_disable_clkout_dp(display);
hsw_disable_lcpll(display, true, true);
}
static void hsw_disable_pc8(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
drm_dbg_kms(display->drm, "Disabling package C8+\n");
hsw_restore_lcpll(display);
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
/* Many display registers don't survive PC8+ */
#ifdef I915 /* FIXME */
@@ -1423,14 +1442,13 @@ static void intel_pch_reset_handshake(struct intel_display *display,
static void skl_display_core_init(struct intel_display *display,
bool resume)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
gen9_set_dc_state(display, DC_STATE_DISABLE);
/* enable PCH reset handshake */
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
if (!HAS_DISPLAY(display))
return;
@@ -1632,20 +1650,19 @@ static void tgl_bw_buddy_init(struct intel_display *display)
static void icl_display_core_init(struct intel_display *display,
bool resume)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
gen9_set_dc_state(display, DC_STATE_DISABLE);
/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
- INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
+ if (INTEL_PCH_TYPE(display) >= PCH_TGP &&
+ INTEL_PCH_TYPE(display) < PCH_DG1)
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0,
PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
/* 1. Enable PCH reset handshake. */
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
if (!HAS_DISPLAY(display))
return;
@@ -1916,7 +1933,6 @@ static void intel_power_domains_verify_state(struct intel_display *display);
*/
void intel_power_domains_init_hw(struct intel_display *display, bool resume)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
power_domains->initializing = true;
@@ -1940,9 +1956,9 @@ void intel_power_domains_init_hw(struct intel_display *display, bool resume)
assert_isp_power_gated(display);
} else if (display->platform.broadwell || display->platform.haswell) {
hsw_assert_cdclk(display);
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
} else if (display->platform.ivybridge) {
- intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
}
/*
@@ -1979,7 +1995,6 @@ void intel_power_domains_init_hw(struct intel_display *display, bool resume)
*/
void intel_power_domains_driver_remove(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&display->power.domains.init_wakeref);
@@ -1993,7 +2008,7 @@ void intel_power_domains_driver_remove(struct intel_display *display)
intel_power_domains_verify_state(display);
/* Keep the power well enabled, but cancel its rpm wakeref. */
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
/**
@@ -2238,8 +2253,6 @@ static void intel_power_domains_verify_state(struct intel_display *display)
void intel_display_power_suspend_late(struct intel_display *display, bool s2idle)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
intel_power_domains_suspend(display, s2idle);
if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
@@ -2250,14 +2263,12 @@ void intel_display_power_suspend_late(struct intel_display *display, bool s2idle
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
- if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
- intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
+ if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
+ intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
}
void intel_display_power_resume_early(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
display->platform.broxton) {
gen9_sanitize_dc_state(display);
@@ -2267,8 +2278,8 @@ void intel_display_power_resume_early(struct intel_display *display)
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
- if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
- intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
+ if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
+ intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
intel_power_domains_resume(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 1b53d67f9b60..f8813b0e16df 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -183,6 +183,7 @@ void intel_display_power_suspend(struct intel_display *display);
void intel_display_power_resume(struct intel_display *display);
void intel_display_power_set_target_dc_state(struct intel_display *display,
u32 state);
+u32 intel_display_power_get_current_dc_state(struct intel_display *display);
bool intel_display_power_is_enabled(struct intel_display *display,
enum intel_display_power_domain domain);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index e80e1fd611ca..ab1163744bc5 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -1696,6 +1696,7 @@ I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_dc_off,
XE3LPD_PW_C_POWER_DOMAINS,
XE3LPD_PW_D_POWER_DOMAINS,
POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
POWER_DOMAIN_INIT);
static const struct i915_power_well_desc xe3lpd_power_wells_dcoff[] = {
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 8ec87ffd87d2..b104bce0e14d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -13,6 +13,7 @@
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_power_well.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
@@ -24,6 +25,7 @@
#include "intel_hotplug.h"
#include "intel_pcode.h"
#include "intel_pps.h"
+#include "intel_psr.h"
#include "intel_tc.h"
#include "intel_vga.h"
#include "skl_watermark.h"
@@ -186,22 +188,18 @@ int intel_power_well_refcount(struct i915_power_well *power_well)
static void hsw_power_well_post_enable(struct intel_display *display,
u8 irq_pipe_mask, bool has_vga)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (has_vga)
intel_vga_reset_io_mem(display);
if (irq_pipe_mask)
- gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
+ gen8_irq_power_well_post_enable(display, irq_pipe_mask);
}
static void hsw_power_well_pre_disable(struct intel_display *display,
u8 irq_pipe_mask)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (irq_pipe_mask)
- gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
+ gen8_irq_power_well_pre_disable(display, irq_pipe_mask);
}
#define ICL_AUX_PW_TO_PHY(pw_idx) \
@@ -752,8 +750,9 @@ void gen9_sanitize_dc_state(struct intel_display *display)
void gen9_set_dc_state(struct intel_display *display, u32 state)
{
struct i915_power_domains *power_domains = &display->power.domains;
- u32 val;
+ bool dc6_was_enabled, enable_dc6;
u32 mask;
+ u32 val;
if (!HAS_DISPLAY(display))
return;
@@ -762,6 +761,9 @@ void gen9_set_dc_state(struct intel_display *display, u32 state)
state & ~power_domains->allowed_dc_mask))
state &= power_domains->allowed_dc_mask;
+ if (!power_domains->initializing)
+ intel_psr_notify_dc5_dc6(display);
+
val = intel_de_read(display, DC_STATE_EN);
mask = gen9_dc_mask(display);
drm_dbg_kms(display->drm, "Setting DC state from %02x to %02x\n",
@@ -772,11 +774,19 @@ void gen9_set_dc_state(struct intel_display *display, u32 state)
drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n",
power_domains->dc_state, val & mask);
+ enable_dc6 = state & DC_STATE_EN_UPTO_DC6;
+ dc6_was_enabled = val & DC_STATE_EN_UPTO_DC6;
+ if (!dc6_was_enabled && enable_dc6)
+ intel_dmc_update_dc6_allowed_count(display, true);
+
val &= ~mask;
val |= state;
gen9_write_dc_state(display, val);
+ if (!enable_dc6 && dc6_was_enabled)
+ intel_dmc_update_dc6_allowed_count(display, false);
+
power_domains->dc_state = val & mask;
}
@@ -816,7 +826,8 @@ static void assert_can_enable_dc5(struct intel_display *display)
(intel_de_read(display, DC_STATE_EN) &
DC_STATE_EN_UPTO_DC5),
"DC5 already programmed to be enabled.\n");
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+
+ assert_display_rpm_held(display);
assert_dmc_loaded(display);
}
@@ -1201,7 +1212,6 @@ static void vlv_init_display_clock_gating(struct intel_display *display)
static void vlv_display_power_well_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
enum pipe pipe;
@@ -1225,9 +1235,7 @@ static void vlv_display_power_well_init(struct intel_display *display)
vlv_init_display_clock_gating(display);
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_enable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(display);
/*
* During driver initialization/resume we can avoid restoring the
@@ -1236,8 +1244,8 @@ static void vlv_display_power_well_init(struct intel_display *display)
if (display->power.domains.initializing)
return;
- intel_hpd_init(dev_priv);
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
/* Re-enable the ADPA, if we have one */
for_each_intel_encoder(display->drm, encoder) {
@@ -1245,7 +1253,7 @@ static void vlv_display_power_well_init(struct intel_display *display)
intel_crt_reset(&encoder->base);
}
- intel_vga_redisable_power_on(display);
+ intel_vga_disable(display);
intel_pps_unlock_regs_wa(display);
}
@@ -1254,9 +1262,7 @@ static void vlv_display_power_well_deinit(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(display);
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
@@ -1265,7 +1271,7 @@ static void vlv_display_power_well_deinit(struct intel_display *display)
/* Prevent us from re-enabling polling on accident in late suspend */
if (!display->drm->dev->power.is_suspended)
- intel_hpd_poll_enable(dev_priv);
+ intel_hpd_poll_enable(display);
}
static void vlv_display_power_well_enable(struct intel_display *display,
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index 1f2798404f2c..1dbd3e841df3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -107,14 +107,14 @@ void intel_display_reset_finish(struct intel_display *display, bool test_only)
intel_display_driver_init_hw(display);
intel_clock_gating_init(i915);
intel_cx0_pll_power_save_wa(display);
- intel_hpd_init(i915);
+ intel_hpd_init(display);
ret = __intel_display_driver_resume(display, state, ctx);
if (ret)
drm_err(display->drm,
"Restoring old state failed with %i\n", ret);
- intel_hpd_poll_disable(i915);
+ intel_hpd_poll_disable(display);
}
drm_atomic_state_put(state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.c b/drivers/gpu/drm/i915/display/intel_display_rpm.c
new file mode 100644
index 000000000000..48da67dd0136
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include "i915_drv.h"
+#include "intel_display_rpm.h"
+#include "intel_runtime_pm.h"
+
+static struct intel_runtime_pm *display_to_rpm(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ return &i915->runtime_pm;
+}
+
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
+{
+ return intel_runtime_pm_get_raw(display_to_rpm(display));
+}
+
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ intel_runtime_pm_put_raw(display_to_rpm(display), wakeref);
+}
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
+{
+ return intel_runtime_pm_get(display_to_rpm(display));
+}
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
+{
+ return intel_runtime_pm_get_if_in_use(display_to_rpm(display));
+}
+
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
+{
+ return intel_runtime_pm_get_noresume(display_to_rpm(display));
+}
+
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ intel_runtime_pm_put(display_to_rpm(display), wakeref);
+}
+
+void intel_display_rpm_put_unchecked(struct intel_display *display)
+{
+ intel_runtime_pm_put_unchecked(display_to_rpm(display));
+}
+
+bool intel_display_rpm_suspended(struct intel_display *display)
+{
+ return intel_runtime_pm_suspended(display_to_rpm(display));
+}
+
+void assert_display_rpm_held(struct intel_display *display)
+{
+ assert_rpm_wakelock_held(display_to_rpm(display));
+}
+
+void intel_display_rpm_assert_block(struct intel_display *display)
+{
+ disable_rpm_wakeref_asserts(display_to_rpm(display));
+}
+
+void intel_display_rpm_assert_unblock(struct intel_display *display)
+{
+ enable_rpm_wakeref_asserts(display_to_rpm(display));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.h b/drivers/gpu/drm/i915/display/intel_display_rpm.h
new file mode 100644
index 000000000000..6ef48515f84b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_RPM__
+#define __INTEL_DISPLAY_RPM__
+
+#include <linux/types.h>
+
+struct intel_display;
+struct ref_tracker;
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display);
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref);
+
+#define __with_intel_display_rpm(__display, __wakeref) \
+ for (struct ref_tracker *(__wakeref) = intel_display_rpm_get(__display); (__wakeref); \
+ intel_display_rpm_put((__display), (__wakeref)), (__wakeref) = NULL)
+
+#define with_intel_display_rpm(__display) \
+ __with_intel_display_rpm((__display), __UNIQUE_ID(wakeref))
+
+/* Only for special cases. */
+bool intel_display_rpm_suspended(struct intel_display *display);
+
+void assert_display_rpm_held(struct intel_display *display);
+void intel_display_rpm_assert_block(struct intel_display *display);
+void intel_display_rpm_assert_unblock(struct intel_display *display);
+
+/* Only for display power implementation. */
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display);
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref);
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display);
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display);
+void intel_display_rpm_put_unchecked(struct intel_display *display);
+
+#endif /* __INTEL_DISPLAY_RPM__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c
index 4074a1879828..678b24115951 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.c
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.c
@@ -8,6 +8,8 @@
#include "gt/intel_rps.h"
#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_display_irq.h"
#include "intel_display_rps.h"
#include "intel_display_types.h"
@@ -81,3 +83,24 @@ void intel_display_rps_mark_interactive(struct intel_display *display,
intel_rps_mark_interactive(&to_gt(i915)->rps, interactive);
state->rps_interactive = interactive;
}
+
+void ilk_display_rps_enable(struct intel_display *display)
+{
+ spin_lock(&display->irq.lock);
+ ilk_enable_display_irq(display, DE_PCU_EVENT);
+ spin_unlock(&display->irq.lock);
+}
+
+void ilk_display_rps_disable(struct intel_display *display)
+{
+ spin_lock(&display->irq.lock);
+ ilk_disable_display_irq(display, DE_PCU_EVENT);
+ spin_unlock(&display->irq.lock);
+}
+
+void ilk_display_rps_irq_handler(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ gen5_rps_irq_handler(&to_gt(i915)->rps);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.h b/drivers/gpu/drm/i915/display/intel_display_rps.h
index 556891edb2dd..183d154f2c7c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.h
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.h
@@ -13,10 +13,34 @@ struct drm_crtc;
struct intel_atomic_state;
struct intel_display;
+#ifdef I915
void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
struct dma_fence *fence);
void intel_display_rps_mark_interactive(struct intel_display *display,
struct intel_atomic_state *state,
bool interactive);
+void ilk_display_rps_enable(struct intel_display *display);
+void ilk_display_rps_disable(struct intel_display *display);
+void ilk_display_rps_irq_handler(struct intel_display *display);
+#else
+static inline void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence)
+{
+}
+static inline void intel_display_rps_mark_interactive(struct intel_display *display,
+ struct intel_atomic_state *state,
+ bool interactive)
+{
+}
+static inline void ilk_display_rps_enable(struct intel_display *display)
+{
+}
+static inline void ilk_display_rps_disable(struct intel_display *display)
+{
+}
+static inline void ilk_display_rps_irq_handler(struct intel_display *display)
+{
+}
+#endif
#endif /* __INTEL_DISPLAY_RPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_snapshot.c b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
index 25ba043cbb65..66087302fdbc 100644
--- a/drivers/gpu/drm/i915/display/intel_display_snapshot.c
+++ b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
@@ -7,6 +7,7 @@
#include "intel_display_core.h"
#include "intel_display_device.h"
+#include "intel_display_irq.h"
#include "intel_display_params.h"
#include "intel_display_snapshot.h"
#include "intel_dmc.h"
@@ -20,6 +21,7 @@ struct intel_display_snapshot {
struct intel_display_params params;
struct intel_overlay_snapshot *overlay;
struct intel_dmc_snapshot *dmc;
+ struct intel_display_irq_snapshot *irq;
};
struct intel_display_snapshot *intel_display_snapshot_capture(struct intel_display *display)
@@ -38,6 +40,7 @@ struct intel_display_snapshot *intel_display_snapshot_capture(struct intel_displ
intel_display_params_copy(&snapshot->params);
+ snapshot->irq = intel_display_irq_snapshot_capture(display);
snapshot->overlay = intel_overlay_snapshot_capture(display);
snapshot->dmc = intel_dmc_snapshot_capture(display);
@@ -57,6 +60,7 @@ void intel_display_snapshot_print(const struct intel_display_snapshot *snapshot,
intel_display_device_info_print(&snapshot->info, &snapshot->runtime_info, p);
intel_display_params_dump(&snapshot->params, display->drm->driver->name, p);
+ intel_display_irq_snapshot_print(snapshot->irq, p);
intel_overlay_snapshot_print(snapshot->overlay, p);
intel_dmc_snapshot_print(snapshot->dmc, p);
}
@@ -68,6 +72,7 @@ void intel_display_snapshot_free(struct intel_display_snapshot *snapshot)
intel_display_params_free(&snapshot->params);
+ kfree(snapshot->irq);
kfree(snapshot->overlay);
kfree(snapshot->dmc);
kfree(snapshot);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 99a6fd2900b9..d6d0440dcee9 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -581,7 +581,7 @@ struct dpll {
struct intel_atomic_state {
struct drm_atomic_state base;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct __intel_global_objs_state *global_objs;
int num_global_objs;
@@ -1114,6 +1114,7 @@ struct intel_crtc_state {
bool wm_level_disabled;
u32 dc3co_exitline;
u16 su_y_granularity;
+ u8 active_non_psr_pipes;
/*
* Frequency the dpll for the port should run at. Differs from the
@@ -1387,7 +1388,7 @@ struct intel_crtc {
/* armed event for DSB based updates */
struct drm_pending_vblank_event *dsb_event;
- /* Access to these should be protected by dev_priv->irq_lock. */
+ /* Access to these should be protected by display->irq.lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
@@ -1439,7 +1440,7 @@ struct intel_crtc {
struct intel_pipe_crc pipe_crc;
#endif
- bool block_dc_for_vblank;
+ bool vblank_psr_notify;
};
struct intel_plane_error {
@@ -1620,7 +1621,7 @@ struct intel_psr {
bool sink_support;
bool source_support;
bool enabled;
- bool paused;
+ int pause_counter;
enum pipe pipe;
enum transcoder transcoder;
bool active;
@@ -1650,6 +1651,8 @@ struct intel_psr {
u8 entry_setup_frames;
bool link_ok;
+
+ u8 active_non_psr_pipes;
};
struct intel_dp {
@@ -1658,7 +1661,6 @@ struct intel_dp {
int link_rate;
u8 lane_count;
u8 sink_count;
- bool link_trained;
bool needs_modeset_retry;
bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
@@ -1683,6 +1685,7 @@ struct intel_dp {
int common_rates[DP_MAX_SUPPORTED_RATES];
struct {
/* TODO: move the rest of link specific fields to here */
+ bool active;
/* common rate,lane_count configs in bw order */
int num_configs;
#define INTEL_DP_MAX_LANE_COUNT 4
@@ -1739,7 +1742,7 @@ struct intel_dp {
struct {
struct intel_dp_mst_encoder *stream_encoders[I915_MAX_PIPES];
struct drm_dp_mst_topology_mgr mgr;
- int active_links;
+ int active_streams;
} mst;
u32 (*get_aux_clock_divider)(struct intel_dp *dp, int index);
@@ -1805,12 +1808,16 @@ struct intel_dp {
struct {
u8 io_wake_lines;
u8 fast_wake_lines;
+ enum transcoder transcoder;
+ struct mutex lock;
/* LNL and beyond */
u8 check_entry_lines;
u8 aux_less_wake_lines;
u8 silence_period_sym_clocks;
u8 lfps_half_cycle_num_of_syms;
+ bool lobf_disable_debug;
+ bool sink_alpm_error;
} alpm_parameters;
u8 alpm_dpcd;
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index e5a8022db664..da429c332914 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -3,38 +3,38 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_core.h"
#include "intel_display_wa.h"
-static void gen11_display_wa_apply(struct drm_i915_private *i915)
+static void gen11_display_wa_apply(struct intel_display *display)
{
/* Wa_14010594013 */
- intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP);
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, ICL_DELAY_PMRSP);
}
-static void xe_d_display_wa_apply(struct drm_i915_private *i915)
+static void xe_d_display_wa_apply(struct intel_display *display)
{
/* Wa_14013723622 */
- intel_de_rmw(i915, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0);
+ intel_de_rmw(display, CLKREQ_POLICY, CLKREQ_POLICY_MEM_UP_OVRD, 0);
}
-static void adlp_display_wa_apply(struct drm_i915_private *i915)
+static void adlp_display_wa_apply(struct intel_display *display)
{
/* Wa_22011091694:adlp */
- intel_de_rmw(i915, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
/* Bspec/49189 Initialize Sequence */
- intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
}
-void intel_display_wa_apply(struct drm_i915_private *i915)
+void intel_display_wa_apply(struct intel_display *display)
{
- if (IS_ALDERLAKE_P(i915))
- adlp_display_wa_apply(i915);
- else if (DISPLAY_VER(i915) == 12)
- xe_d_display_wa_apply(i915);
- else if (DISPLAY_VER(i915) == 11)
- gen11_display_wa_apply(i915);
+ if (display->platform.alderlake_p)
+ adlp_display_wa_apply(display);
+ else if (DISPLAY_VER(display) == 12)
+ xe_d_display_wa_apply(display);
+ else if (DISPLAY_VER(display) == 11)
+ gen11_display_wa_apply(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h
index be644ab6ae00..babd9d16603d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.h
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.h
@@ -8,14 +8,17 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_display;
-void intel_display_wa_apply(struct drm_i915_private *i915);
+void intel_display_wa_apply(struct intel_display *display);
#ifdef I915
-static inline bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915) { return false; }
+static inline bool intel_display_needs_wa_16023588340(struct intel_display *display)
+{
+ return false;
+}
#else
-bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915);
+bool intel_display_needs_wa_16023588340(struct intel_display *display);
#endif
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.c b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
index 0813fb9b5823..dad7192132ad 100644
--- a/drivers/gpu/drm/i915/display/intel_dkl_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include "intel_de.h"
#include "intel_display.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index fa6944e55d95..b58189d24e7e 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -28,6 +28,8 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display_rpm.h"
+#include "intel_display_power_well.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
#include "intel_step.h"
@@ -57,6 +59,10 @@ struct intel_dmc {
const char *fw_path;
u32 max_fw_size; /* bytes */
u32 version;
+ struct {
+ u32 dc5_start;
+ u32 count;
+ } dc6_allowed;
struct dmc_fw_info {
u32 mmio_count;
i915_reg_t mmioaddr[20];
@@ -167,7 +173,6 @@ MODULE_FIRMWARE(BXT_DMC_PATH);
static const char *dmc_firmware_default(struct intel_display *display, u32 *size)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const char *fw_path = NULL;
u32 max_fw_size = 0;
@@ -183,39 +188,39 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size
} else if (DISPLAY_VERx100(display) == 1400) {
fw_path = MTL_DMC_PATH;
max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
- } else if (IS_DG2(i915)) {
+ } else if (display->platform.dg2) {
fw_path = DG2_DMC_PATH;
max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_P(i915)) {
+ } else if (display->platform.alderlake_p) {
fw_path = ADLP_DMC_PATH;
max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
- } else if (IS_ALDERLAKE_S(i915)) {
+ } else if (display->platform.alderlake_s) {
fw_path = ADLS_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_DG1(i915)) {
+ } else if (display->platform.dg1) {
fw_path = DG1_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_ROCKETLAKE(i915)) {
+ } else if (display->platform.rocketlake) {
fw_path = RKL_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (IS_TIGERLAKE(i915)) {
+ } else if (display->platform.tigerlake) {
fw_path = TGL_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
} else if (DISPLAY_VER(display) == 11) {
fw_path = ICL_DMC_PATH;
max_fw_size = ICL_DMC_MAX_FW_SIZE;
- } else if (IS_GEMINILAKE(i915)) {
+ } else if (display->platform.geminilake) {
fw_path = GLK_DMC_PATH;
max_fw_size = GLK_DMC_MAX_FW_SIZE;
- } else if (IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915) ||
- IS_COMETLAKE(i915)) {
+ } else if (display->platform.kabylake ||
+ display->platform.coffeelake ||
+ display->platform.cometlake) {
fw_path = KBL_DMC_PATH;
max_fw_size = KBL_DMC_MAX_FW_SIZE;
- } else if (IS_SKYLAKE(i915)) {
+ } else if (display->platform.skylake) {
fw_path = SKL_DMC_PATH;
max_fw_size = SKL_DMC_MAX_FW_SIZE;
- } else if (IS_BROXTON(i915)) {
+ } else if (display->platform.broxton) {
fw_path = BXT_DMC_PATH;
max_fw_size = BXT_DMC_MAX_FW_SIZE;
}
@@ -511,6 +516,54 @@ void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe)
intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
}
+/**
+ * intel_dmc_block_pkgc() - block PKG C-state
+ * @display: display instance
+ * @pipe: pipe which register use to block
+ * @block: block/unblock
+ *
+ * This interface is target for Wa_16025596647 usage. I.e. to set/clear
+ * PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS bit in PIPEDMC_BLOCK_PKGC_SW register.
+ */
+void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe,
+ bool block)
+{
+ intel_de_rmw(display, PIPEDMC_BLOCK_PKGC_SW(pipe),
+ PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS, block ?
+ PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS : 0);
+}
+
+/**
+ * intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank() - start of PKG
+ * C-state exit
+ * @display: display instance
+ * @pipe: pipe which register use to block
+ * @enable: enable/disable
+ *
+ * This interface is target for Wa_16025596647 usage. I.e. start the package C
+ * exit at the start of the undelayed vblank
+ */
+void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display,
+ enum pipe pipe, bool enable)
+{
+ u32 val;
+
+ if (enable)
+ val = DMC_EVT_CTL_ENABLE | DMC_EVT_CTL_RECURRING |
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_VBLANK_A);
+ else
+ val = REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_FALSE) |
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1);
+
+ intel_de_write(display, MTL_PIPEDMC_EVT_CTL_4(pipe),
+ val);
+}
+
static bool is_dmc_evt_ctl_reg(struct intel_display *display,
enum intel_dmc_id dmc_id, i915_reg_t reg)
{
@@ -535,8 +588,6 @@ static bool disable_dmc_evt(struct intel_display *display,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (!is_dmc_evt_ctl_reg(display, dmc_id, reg))
return false;
@@ -545,12 +596,12 @@ static bool disable_dmc_evt(struct intel_display *display,
return true;
/* also disable the flip queue event on the main DMC on TGL */
- if (IS_TIGERLAKE(i915) &&
+ if (display->platform.tigerlake &&
REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_CLK_MSEC)
return true;
/* also disable the HRR event on the main DMC on TGL/ADLS */
- if ((IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915)) &&
+ if ((display->platform.tigerlake || display->platform.alderlake_s) &&
REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_VBLANK_A)
return true;
@@ -582,7 +633,6 @@ static u32 dmc_mmiodata(struct intel_display *display,
*/
void intel_dmc_load_program(struct intel_display *display)
{
- struct drm_i915_private *i915 __maybe_unused = to_i915(display->drm);
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_dmc *dmc = display_to_dmc(display);
enum intel_dmc_id dmc_id;
@@ -595,7 +645,7 @@ void intel_dmc_load_program(struct intel_display *display)
disable_all_event_handlers(display);
- assert_rpm_wakelock_held(&i915->runtime_pm);
+ assert_display_rpm_held(display);
preempt_disable();
@@ -1006,9 +1056,7 @@ static void intel_dmc_runtime_pm_put(struct intel_display *display)
static const char *dmc_fallback_path(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (IS_ALDERLAKE_P(i915))
+ if (display->platform.alderlake_p)
return ADLP_DMC_FALLBACK_PATH;
return NULL;
@@ -1232,18 +1280,56 @@ void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct
DMC_VERSION_MINOR(snapshot->version));
}
+void intel_dmc_update_dc6_allowed_count(struct intel_display *display,
+ bool start_tracking)
+{
+ struct intel_dmc *dmc = display_to_dmc(display);
+ u32 dc5_cur_count;
+
+ if (DISPLAY_VER(dmc->display) < 14)
+ return;
+
+ dc5_cur_count = intel_de_read(dmc->display, DG1_DMC_DEBUG_DC5_COUNT);
+
+ if (!start_tracking)
+ dmc->dc6_allowed.count += dc5_cur_count - dmc->dc6_allowed.dc5_start;
+
+ dmc->dc6_allowed.dc5_start = dc5_cur_count;
+}
+
+static bool intel_dmc_get_dc6_allowed_count(struct intel_display *display, u32 *count)
+{
+ struct i915_power_domains *power_domains = &display->power.domains;
+ struct intel_dmc *dmc = display_to_dmc(display);
+ bool dc6_enabled;
+
+ if (DISPLAY_VER(display) < 14)
+ return false;
+
+ mutex_lock(&power_domains->lock);
+ dc6_enabled = intel_de_read(display, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC6;
+ if (dc6_enabled)
+ intel_dmc_update_dc6_allowed_count(display, false);
+
+ *count = dmc->dc6_allowed.count;
+ mutex_unlock(&power_domains->lock);
+
+ return true;
+}
+
static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_display *display = m->private;
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_dmc *dmc = display_to_dmc(display);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
+ u32 dc6_allowed_count;
if (!HAS_DMC(display))
return -ENODEV;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
seq_printf(m, "fw loaded: %s\n",
@@ -1254,7 +1340,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
seq_printf(m, "Pipe A fw loaded: %s\n",
str_yes_no(has_dmc_id_fw(display, DMC_FW_PIPEA)));
seq_printf(m, "Pipe B fw needed: %s\n",
- str_yes_no(IS_ALDERLAKE_P(i915) ||
+ str_yes_no(display->platform.alderlake_p ||
DISPLAY_VER(display) >= 14));
seq_printf(m, "Pipe B fw loaded: %s\n",
str_yes_no(has_dmc_id_fw(display, DMC_FW_PIPEB)));
@@ -1268,7 +1354,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
if (DISPLAY_VER(display) >= 12) {
i915_reg_t dc3co_reg;
- if (IS_DGFX(i915) || DISPLAY_VER(display) >= 14) {
+ if (display->platform.dgfx || DISPLAY_VER(display) >= 14) {
dc3co_reg = DG1_DMC_DEBUG3;
dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
} else {
@@ -1280,14 +1366,18 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
seq_printf(m, "DC3CO count: %d\n",
intel_de_read(display, dc3co_reg));
} else {
- dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT :
+ dc5_reg = display->platform.broxton ? BXT_DMC_DC3_DC5_COUNT :
SKL_DMC_DC3_DC5_COUNT;
- if (!IS_GEMINILAKE(i915) && !IS_BROXTON(i915))
+ if (!display->platform.geminilake && !display->platform.broxton)
dc6_reg = SKL_DMC_DC5_DC6_COUNT;
}
seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(display, dc5_reg));
- if (i915_mmio_reg_valid(dc6_reg))
+
+ if (intel_dmc_get_dc6_allowed_count(display, &dc6_allowed_count))
+ seq_printf(m, "DC5 -> DC6 allowed count: %d\n",
+ dc6_allowed_count);
+ else if (i915_mmio_reg_valid(dc6_reg))
seq_printf(m, "DC5 -> DC6 count: %d\n",
intel_de_read(display, dc6_reg));
@@ -1299,7 +1389,7 @@ out:
intel_de_read(display, DMC_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", intel_de_read(display, DMC_HTP_SKL));
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index 44cecef98e73..bd1c459b0075 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -18,6 +18,10 @@ void intel_dmc_load_program(struct intel_display *display);
void intel_dmc_disable_program(struct intel_display *display);
void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe);
void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe);
+void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe,
+ bool block);
+void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display,
+ enum pipe pipe, bool enable);
void intel_dmc_fini(struct intel_display *display);
void intel_dmc_suspend(struct intel_display *display);
void intel_dmc_resume(struct intel_display *display);
@@ -26,6 +30,7 @@ void intel_dmc_debugfs_register(struct intel_display *display);
struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *display);
void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p);
+void intel_dmc_update_dc6_allowed_count(struct intel_display *display, bool start_tracking);
void assert_dmc_loaded(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index 1bf446f96a10..e16ea3f16ed8 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -21,6 +21,20 @@
#define MTL_PIPEDMC_CONTROL _MMIO(0x45250)
#define PIPEDMC_ENABLE_MTL(pipe) REG_BIT(((pipe) - PIPE_A) * 4)
+#define _MTL_PIPEDMC_EVT_CTL_4_A 0x5f044
+#define _MTL_PIPEDMC_EVT_CTL_4_B 0x5f444
+#define MTL_PIPEDMC_EVT_CTL_4(pipe) _MMIO_PIPE(pipe, \
+ _MTL_PIPEDMC_EVT_CTL_4_A, \
+ _MTL_PIPEDMC_EVT_CTL_4_B)
+
+#define PIPEDMC_BLOCK_PKGC_SW_A 0x5f1d0
+#define PIPEDMC_BLOCK_PKGC_SW_B 0x5F5d0
+#define PIPEDMC_BLOCK_PKGC_SW(pipe) _MMIO_PIPE(pipe, \
+ PIPEDMC_BLOCK_PKGC_SW_A, \
+ PIPEDMC_BLOCK_PKGC_SW_B)
+#define PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS BIT(31)
+#define PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_UNTIL_NEXT_FRAMESTART BIT(15)
+
#define _ADLP_PIPEDMC_REG_MMIO_BASE_A 0x5f000
#define _TGL_PIPEDMC_REG_MMIO_BASE_A 0x92000
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 392c3653d0d7..640c43bf62d4 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -45,12 +45,13 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "g4x_dp.h"
-#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -58,10 +59,12 @@
#include "intel_combo_phy_regs.h"
#include "intel_connector.h"
#include "intel_crtc.h"
+#include "intel_crtc_state_dump.h"
#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
@@ -87,12 +90,10 @@
#include "intel_pfit.h"
#include "intel_pps.h"
#include "intel_psr.h"
-#include "intel_runtime_pm.h"
#include "intel_quirks.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
-#include "intel_crtc_state_dump.h"
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
@@ -2523,6 +2524,7 @@ intel_dp_dsc_compute_pipe_bpp_limits(struct intel_dp *intel_dp,
bool
intel_dp_compute_config_limits(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
@@ -2576,7 +2578,7 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
intel_dp_test_compute_config(intel_dp, crtc_state, limits);
return intel_dp_compute_config_link_bpp_limits(intel_dp,
- intel_dp->attached_connector,
+ connector,
crtc_state,
dsc,
limits);
@@ -2637,7 +2639,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
- !intel_dp_compute_config_limits(intel_dp, pipe_config,
+ !intel_dp_compute_config_limits(intel_dp, connector, pipe_config,
respect_downstream_limits,
false,
&limits);
@@ -2671,7 +2673,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
- if (!intel_dp_compute_config_limits(intel_dp, pipe_config,
+ if (!intel_dp_compute_config_limits(intel_dp, connector, pipe_config,
respect_downstream_limits,
true,
&limits))
@@ -3104,6 +3106,76 @@ intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
}
}
+int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ int symbol_size = intel_dp_is_uhbr(crtc_state) ? 32 : 8;
+ /*
+ * min symbol cycles is 3(BS,VBID, BE) for 128b/132b and
+ * 5(BS, VBID, MVID, MAUD, BE) for 8b/10b
+ */
+ int min_sym_cycles = intel_dp_is_uhbr(crtc_state) ? 3 : 5;
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int num_joined_pipes = intel_crtc_num_joined_pipes(crtc_state);
+ int min_hblank;
+ int max_lane_count = 4;
+ int hactive_sym_cycles, htotal_sym_cycles;
+ int dsc_slices = 0;
+ int link_bpp_x16;
+
+ if (DISPLAY_VER(display) < 30)
+ return 0;
+
+ /* MIN_HBLANK should be set only for 8b/10b MST or for 128b/132b SST/MST */
+ if (!is_mst && !intel_dp_is_uhbr(crtc_state))
+ return 0;
+
+ if (crtc_state->dsc.compression_enable) {
+ dsc_slices = intel_dp_dsc_get_slice_count(connector,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay,
+ num_joined_pipes);
+ if (!dsc_slices) {
+ drm_dbg(display->drm, "failed to calculate dsc slice count\n");
+ return -EINVAL;
+ }
+ }
+
+ if (crtc_state->dsc.compression_enable)
+ link_bpp_x16 = crtc_state->dsc.compressed_bpp_x16;
+ else
+ link_bpp_x16 = fxp_q4_from_int(intel_dp_output_bpp(crtc_state->output_format,
+ crtc_state->pipe_bpp));
+
+ /* Calculate min Hblank Link Layer Symbol Cycle Count for 8b/10b MST & 128b/132b */
+ hactive_sym_cycles = drm_dp_link_symbol_cycles(max_lane_count,
+ adjusted_mode->hdisplay,
+ dsc_slices,
+ link_bpp_x16,
+ symbol_size, is_mst);
+ htotal_sym_cycles = adjusted_mode->htotal * hactive_sym_cycles /
+ adjusted_mode->hdisplay;
+
+ min_hblank = htotal_sym_cycles - hactive_sym_cycles;
+ /* minimum Hblank calculation: https://groups.vesa.org/wg/DP/document/20494 */
+ min_hblank = max(min_hblank, min_sym_cycles);
+
+ /*
+ * adjust the BlankingStart/BlankingEnd framing control from
+ * the calculated value
+ */
+ min_hblank = min_hblank - 2;
+
+ min_hblank = min(10, min_hblank);
+ crtc_state->min_hblank = min_hblank;
+
+ return 0;
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -3203,6 +3275,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
&pipe_config->dp_m_n);
}
+ ret = intel_dp_compute_min_hblank(pipe_config, conn_state);
+ if (ret)
+ return ret;
+
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count;
@@ -3223,7 +3299,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, int lane_count)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
- intel_dp->link_trained = false;
+ intel_dp->link.active = false;
intel_dp->needs_modeset_retry = false;
intel_dp->link_rate = link_rate;
intel_dp->lane_count = lane_count;
@@ -3587,7 +3663,7 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
if (crtc_state) {
intel_dp_reset_link_params(intel_dp);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count);
- intel_dp->link_trained = true;
+ intel_dp->link.active = true;
}
}
@@ -4456,6 +4532,23 @@ intel_dp_mst_disconnect(struct intel_dp *intel_dp)
static bool
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ /*
+ * Display WA for HSD #13013007775: mtl/arl/lnl
+ * Read the sink count and link service IRQ registers in separate
+ * transactions to prevent disconnecting the sink on a TBT link
+ * inadvertently.
+ */
+ if (IS_DISPLAY_VER(display, 14, 20) && !display->platform.battlemage) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 3) != 3)
+ return false;
+
+ /* DP_SINK_COUNT_ESI + 3 == DP_LINK_SERVICE_IRQ_VECTOR_ESI0 */
+ return drm_dp_dpcd_readb(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0,
+ &esi[3]) == 1;
+ }
+
return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4;
}
@@ -5005,8 +5098,6 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
bool link_ok = true;
bool reprobe_needed = false;
- drm_WARN_ON_ONCE(display->drm, intel_dp->mst.active_links < 0);
-
for (;;) {
u8 esi[4] = {};
u8 ack[4] = {};
@@ -5021,7 +5112,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_dbg_kms(display->drm, "DPRX ESI: %4ph\n", esi);
- if (intel_dp->mst.active_links > 0 && link_ok &&
+ if (intel_dp_mst_active_streams(intel_dp) > 0 && link_ok &&
esi[3] & LINK_STATUS_CHANGED) {
if (!intel_dp_mst_link_status(intel_dp))
link_ok = false;
@@ -5082,7 +5173,7 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
{
u8 link_status[DP_LINK_STATUS_SIZE];
- if (!intel_dp->link_trained)
+ if (!intel_dp->link.active)
return false;
/*
@@ -5394,6 +5485,11 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
intel_psr_short_pulse(intel_dp);
+ if (intel_alpm_get_error(intel_dp)) {
+ intel_alpm_disable(intel_dp);
+ intel_dp->alpm_parameters.sink_alpm_error = true;
+ }
+
if (intel_dp_test_short_pulse(intel_dp))
reprobe_needed = true;
@@ -5829,20 +5925,21 @@ out_vdd_off:
}
static void
-intel_dp_force(struct drm_connector *connector)
+intel_dp_force(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ connector->base.base.id, connector->base.name);
if (!intel_display_driver_check_access(display))
return;
intel_dp_unset_edid(intel_dp);
- if (connector->status != connector_status_connected)
+ if (connector->base.status != connector_status_connected)
return;
intel_dp_set_edid(intel_dp);
@@ -5881,24 +5978,25 @@ static int intel_dp_get_modes(struct drm_connector *_connector)
}
static int
-intel_dp_connector_register(struct drm_connector *connector)
+intel_dp_connector_register(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
int ret;
- ret = intel_connector_register(connector);
+ ret = intel_connector_register(&connector->base);
if (ret)
return ret;
drm_dbg_kms(display->drm, "registering %s bus for %s\n",
- intel_dp->aux.name, connector->kdev->kobj.name);
+ intel_dp->aux.name, connector->base.kdev->kobj.name);
- intel_dp->aux.dev = connector->kdev;
+ intel_dp->aux.dev = connector->base.kdev;
ret = drm_dp_aux_register(&intel_dp->aux);
if (!ret)
- drm_dp_cec_register_connector(&intel_dp->aux, connector);
+ drm_dp_cec_register_connector(&intel_dp->aux, &connector->base);
if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
return ret;
@@ -5909,20 +6007,21 @@ intel_dp_connector_register(struct drm_connector *connector)
*/
if (intel_lspcon_init(dig_port)) {
if (intel_lspcon_detect_hdr_capability(dig_port))
- drm_connector_attach_hdr_output_metadata_property(connector);
+ drm_connector_attach_hdr_output_metadata_property(&connector->base);
}
return ret;
}
static void
-intel_dp_connector_unregister(struct drm_connector *connector)
+intel_dp_connector_unregister(struct drm_connector *_connector)
{
- struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
drm_dp_cec_unregister_connector(&intel_dp->aux);
drm_dp_aux_unregister(&intel_dp->aux);
- intel_connector_unregister(connector);
+ intel_connector_unregister(&connector->base);
}
void intel_dp_connector_sync_state(struct intel_connector *connector,
@@ -5983,21 +6082,21 @@ static int intel_modeset_tile_group(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(state);
struct drm_connector_list_iter conn_iter;
- struct drm_connector *connector;
+ struct intel_connector *connector;
int ret = 0;
drm_connector_list_iter_begin(display->drm, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
+ for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_connector_state *conn_state;
struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
- if (!connector->has_tile ||
- connector->tile_group->id != tile_group_id)
+ if (!connector->base.has_tile ||
+ connector->base.tile_group->id != tile_group_id)
continue;
conn_state = drm_atomic_get_connector_state(&state->base,
- connector);
+ &connector->base);
if (IS_ERR(conn_state)) {
ret = PTR_ERR(conn_state);
break;
@@ -6061,10 +6160,11 @@ static int intel_modeset_affected_transcoders(struct intel_atomic_state *state,
}
static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
- struct drm_connector *connector)
+ struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
const struct drm_connector_state *old_conn_state =
- drm_atomic_get_old_connector_state(&state->base, connector);
+ drm_atomic_get_old_connector_state(&state->base, &connector->base);
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc *crtc;
u8 transcoders;
@@ -6086,17 +6186,18 @@ static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
transcoders);
}
-static int intel_dp_connector_atomic_check(struct drm_connector *conn,
+static int intel_dp_connector_atomic_check(struct drm_connector *_connector,
struct drm_atomic_state *_state)
{
- struct intel_display *display = to_intel_display(conn->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
- struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn);
- struct intel_connector *intel_conn = to_intel_connector(conn);
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_conn->encoder);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(_state, &connector->base);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
int ret;
- ret = intel_digital_connector_atomic_check(conn, &state->base);
+ ret = intel_digital_connector_atomic_check(&connector->base, &state->base);
if (ret)
return ret;
@@ -6106,12 +6207,12 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
return ret;
}
- if (!intel_connector_needs_modeset(state, conn))
+ if (!intel_connector_needs_modeset(state, &connector->base))
return 0;
ret = intel_dp_tunnel_atomic_check_state(state,
intel_dp,
- intel_conn);
+ connector);
if (ret)
return ret;
@@ -6122,26 +6223,26 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
if (DISPLAY_VER(display) < 9)
return 0;
- if (conn->has_tile) {
- ret = intel_modeset_tile_group(state, conn->tile_group->id);
+ if (connector->base.has_tile) {
+ ret = intel_modeset_tile_group(state, connector->base.tile_group->id);
if (ret)
return ret;
}
- return intel_modeset_synced_crtcs(state, conn);
+ return intel_modeset_synced_crtcs(state, &connector->base);
}
-static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
+static void intel_dp_oob_hotplug_event(struct drm_connector *_connector,
enum drm_connector_status hpd_state)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
bool hpd_high = hpd_state == connector_status_connected;
unsigned int hpd_pin = encoder->hpd_pin;
bool need_work = false;
- spin_lock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
if (hpd_high != test_bit(hpd_pin, &display->hotplug.oob_hotplug_last_state)) {
display->hotplug.event_bits |= BIT(hpd_pin);
@@ -6150,10 +6251,10 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
hpd_high);
need_work = true;
}
- spin_unlock_irq(&i915->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
if (need_work)
- intel_hpd_schedule_detection(i915);
+ intel_hpd_schedule_detection(display);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -6180,13 +6281,12 @@ enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_dp *intel_dp = &dig_port->dp;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
if (dig_port->base.type == INTEL_OUTPUT_EDP &&
(long_hpd ||
- intel_runtime_pm_suspended(&i915->runtime_pm) ||
+ intel_display_rpm_suspended(display) ||
!intel_pps_have_panel_power_or_vdd(intel_dp))) {
/*
* vdd off can generate a long/short pulse on eDP which
@@ -6283,36 +6383,37 @@ intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder)
}
static void
-intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
+intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
struct intel_display *display = to_intel_display(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->base.port;
if (!intel_dp_is_edp(intel_dp))
- drm_connector_attach_dp_subconnector_property(connector);
+ drm_connector_attach_dp_subconnector_property(&connector->base);
if (!display->platform.g4x && port != PORT_A)
- intel_attach_force_audio_property(connector);
+ intel_attach_force_audio_property(&connector->base);
- intel_attach_broadcast_rgb_property(connector);
+ intel_attach_broadcast_rgb_property(&connector->base);
if (HAS_GMCH(display))
- drm_connector_attach_max_bpc_property(connector, 6, 10);
+ drm_connector_attach_max_bpc_property(&connector->base, 6, 10);
else if (DISPLAY_VER(display) >= 5)
- drm_connector_attach_max_bpc_property(connector, 6, 12);
+ drm_connector_attach_max_bpc_property(&connector->base, 6, 12);
/* Register HDMI colorspace for case of lspcon */
if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) {
- drm_connector_attach_content_type_property(connector);
- intel_attach_hdmi_colorspace_property(connector);
+ drm_connector_attach_content_type_property(&connector->base);
+ intel_attach_hdmi_colorspace_property(&connector->base);
} else {
- intel_attach_dp_colorspace_property(connector);
+ intel_attach_dp_colorspace_property(&connector->base);
}
if (intel_dp_has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base))
- drm_connector_attach_hdr_output_metadata_property(connector);
+ drm_connector_attach_hdr_output_metadata_property(&connector->base);
if (HAS_VRR(display))
- drm_connector_attach_vrr_capable_property(connector);
+ drm_connector_attach_vrr_capable_property(&connector->base);
}
static void
@@ -6347,7 +6448,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct drm_display_mode *fixed_mode;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool has_dpcd;
@@ -6362,9 +6462,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* eDP and LVDS bail out early in this case to prevent interfering
* with an already powered-on LVDS power sequencer.
*/
- if (intel_get_lvds_encoder(dev_priv)) {
+ if (intel_get_lvds_encoder(display)) {
drm_WARN_ON(display->drm,
- !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+ !(HAS_PCH_IBX(display) || HAS_PCH_CPT(display)));
drm_info(display->drm,
"LVDS was detected, not registering eDP\n");
@@ -6395,7 +6495,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
*/
intel_hpd_enable_detection(encoder);
- intel_alpm_init_dpcd(intel_dp);
+ intel_alpm_init(intel_dp);
/* Cache DPCD and EDID for edp. */
has_dpcd = intel_edp_init_dpcd(intel_dp, connector);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 9189db4c2594..742ae26ac4a9 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -194,6 +194,7 @@ void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
int intel_dp_output_bpp(enum intel_output_format output_format, int bpp);
bool intel_dp_compute_config_limits(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
@@ -208,5 +209,7 @@ bool intel_dp_has_connector(struct intel_dp *intel_dp,
const struct drm_connector_state *conn_state);
int intel_dp_dsc_max_src_input_bpc(struct intel_display *display);
int intel_dp_dsc_min_src_input_bpc(void);
+int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index ec27bbd70bcf..bf8e8e0cc19c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -3,8 +3,10 @@
* Copyright © 2020-2021 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
@@ -111,10 +113,9 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) {
+ if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(display)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
@@ -177,12 +178,11 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
u32 aux_clock_divider)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(intel_dp);
u32 timeout;
/* Max timeout value on G4x-BDW: 1.6ms */
- if (IS_BROADWELL(i915))
+ if (display->platform.broadwell)
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -247,7 +247,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u32 aux_clock_divider;
enum intel_display_power_domain aux_domain;
intel_wakeref_t aux_wakeref;
- intel_wakeref_t pps_wakeref;
+ intel_wakeref_t pps_wakeref = NULL;
int i, ret, recv_bytes;
int try, clock = 0;
u32 status;
@@ -272,7 +272,20 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
aux_domain = intel_aux_power_domain(dig_port);
aux_wakeref = intel_display_power_get(display, aux_domain);
- pps_wakeref = intel_pps_lock(intel_dp);
+
+ /*
+ * The PPS state needs to be locked for:
+ * - eDP on all platforms, since AUX transfers on eDP need VDD power
+ * (either forced or via panel power) which depends on the PPS
+ * state.
+ * - non-eDP on platforms where the PPS is a pipe instance (VLV/CHV),
+ * since changing the PPS state (via a parallel modeset for
+ * instance) may interfere with the AUX transfers on a non-eDP
+ * output as well.
+ */
+ if (intel_dp_is_edp(intel_dp) ||
+ display->platform.valleyview || display->platform.cherryview)
+ pps_wakeref = intel_pps_lock(intel_dp);
/*
* We will be called with VDD already enabled for dpcd/edid/oui reads.
@@ -430,7 +443,9 @@ out:
if (vdd)
intel_pps_vdd_off_unlocked(intel_dp, false);
- intel_pps_unlock(intel_dp, pps_wakeref);
+ if (pps_wakeref)
+ intel_pps_unlock(intel_dp, pps_wakeref);
+
intel_display_power_put_async(display, aux_domain, aux_wakeref);
out_unlock:
intel_digital_port_unlock(encoder);
@@ -771,7 +786,6 @@ void intel_dp_aux_fini(struct intel_dp *intel_dp)
void intel_dp_aux_init(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -786,10 +800,10 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
} else if (DISPLAY_VER(display) >= 9) {
intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = skl_aux_data_reg;
- } else if (HAS_PCH_SPLIT(i915)) {
+ } else if (HAS_PCH_SPLIT(display)) {
intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
- } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ } else if (display->platform.valleyview || display->platform.cherryview) {
intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg;
intel_dp->aux_ch_data_reg = vlv_aux_data_reg;
} else {
@@ -799,9 +813,9 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
if (DISPLAY_VER(display) >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_BROADWELL(i915) || IS_HASWELL(i915))
+ else if (display->platform.broadwell || display->platform.haswell)
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(i915))
+ else if (HAS_PCH_SPLIT(display))
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 8173de8aec63..271b27c9de51 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -36,7 +36,6 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
@@ -149,7 +148,7 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
!(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
BIT(HDMI_STATIC_METADATA_TYPE1))) {
drm_info(display->drm,
- "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
+ "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d.\n",
connector->base.base.id, connector->base.name,
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
return false;
@@ -663,7 +662,8 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_panel *panel = &connector->panel;
- if ((intel_dp->edp_dpcd[3] & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE)) {
+ if ((intel_dp->edp_dpcd[3] & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) &&
+ (intel_dp->edp_dpcd[3] & DP_EDP_SMOOTH_BRIGHTNESS_CAPABLE)) {
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] AUX Luminance Based Backlight Control Supported!\n",
connector->base.base.id, connector->base.name);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 2966f5b39392..a479b63112ea 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -56,6 +56,8 @@
lt_dbg(_intel_dp, _dp_phy, "Sink disconnected: " _format, ## __VA_ARGS__); \
} while (0)
+#define MAX_SEQ_TRAIN_FAILURES 2
+
static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
{
memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
@@ -164,7 +166,7 @@ static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_
* resetting its internal state when the mode is changed from
* non-transparent to transparent.
*/
- if (intel_dp->link_trained) {
+ if (intel_dp->link.active) {
if (lttpr_count < 0 || intel_dp_lttpr_transparent_mode_enabled(intel_dp))
goto out_reset_lttpr_count;
@@ -711,8 +713,21 @@ void intel_dp_link_training_set_mode(struct intel_dp *intel_dp, int link_rate, b
static void intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ /*
+ * Currently, we set the MSA ignore bit based on vrr.in_range.
+ * We can't really read that out during driver load since we don't have
+ * the connector information read in yet. So if we do end up doing a
+ * modeset during initial_commit() we'll clear the MSA ignore bit.
+ * GOP likely wouldn't have set this bit so after the initial commit,
+ * if there are no modesets and we enable VRR mode seamlessly
+ * (without a full modeset), the MSA ignore bit might never get set.
+ *
+ * #TODO: Implement readout of vrr.in_range.
+ * We need fastset support for setting the MSA ignore bit in DPCD,
+ * especially on the first real commit when clearing the inherited flag.
+ */
intel_dp_link_training_set_mode(intel_dp,
- crtc_state->port_clock, crtc_state->vrr.flipline);
+ crtc_state->port_clock, crtc_state->vrr.in_range);
}
void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
@@ -1110,7 +1125,10 @@ intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp,
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- intel_dp->link_trained = true;
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ intel_dp->link.active = true;
intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
@@ -1120,6 +1138,15 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n");
}
+
+ intel_hpd_unblock(encoder);
+
+ if (!display->hotplug.ignore_long_hpd &&
+ intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES) {
+ int delay_ms = intel_dp->link.seq_train_failures ? 0 : 2000;
+
+ intel_encoder_link_check_queue_work(encoder, delay_ms);
+ }
}
static bool
@@ -1602,7 +1629,11 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
* non-transparent mode. During an earlier LTTPR detection this
* could've been prevented by an active link.
*/
- int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
+ int lttpr_count;
+
+ intel_hpd_block(encoder);
+
+ lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
if (lttpr_count < 0)
/* Still continue with enabling the port and link training. */
@@ -1620,7 +1651,6 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
lt_dbg(intel_dp, DP_PHY_DPRX, "Forcing link training failure\n");
} else if (passed) {
intel_dp->link.seq_train_failures = 0;
- intel_encoder_link_check_queue_work(encoder, 2000);
return;
}
@@ -1643,10 +1673,8 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
return;
}
- if (intel_dp->link.seq_train_failures < 2) {
- intel_encoder_link_check_queue_work(encoder, 0);
+ if (intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES)
return;
- }
if (intel_dp_schedule_fallback_link_training(state, intel_dp, crtc_state))
return;
@@ -1693,7 +1721,7 @@ static int i915_dp_force_link_rate_show(struct seq_file *m, void *data)
if (err)
return err;
- if (intel_dp->link_trained)
+ if (intel_dp->link.active)
current_rate = intel_dp->link_rate;
force_rate = intel_dp->link.force_rate;
@@ -1791,7 +1819,7 @@ static int i915_dp_force_lane_count_show(struct seq_file *m, void *data)
if (err)
return err;
- if (intel_dp->link_trained)
+ if (intel_dp->link.active)
current_lane_count = intel_dp->lane_count;
force_lane_count = intel_dp->link.force_lane_count;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 6dc2d31ccb5a..e1501b13f08f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -27,10 +27,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
@@ -51,7 +52,9 @@
#include "intel_link_bw.h"
#include "intel_pfit.h"
#include "intel_psr.h"
+#include "intel_step.h"
#include "intel_vdsc.h"
+#include "intel_vrr.h"
#include "skl_scaler.h"
/*
@@ -104,6 +107,34 @@ static struct intel_dp *to_primary_dp(struct intel_encoder *encoder)
return &dig_port->dp;
}
+int intel_dp_mst_active_streams(struct intel_dp *intel_dp)
+{
+ return intel_dp->mst.active_streams;
+}
+
+static bool intel_dp_mst_dec_active_streams(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ drm_dbg_kms(display->drm, "active MST streams %d -> %d\n",
+ intel_dp->mst.active_streams, intel_dp->mst.active_streams - 1);
+
+ if (drm_WARN_ON(display->drm, intel_dp->mst.active_streams == 0))
+ return true;
+
+ return --intel_dp->mst.active_streams == 0;
+}
+
+static bool intel_dp_mst_inc_active_streams(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ drm_dbg_kms(display->drm, "active MST streams %d -> %d\n",
+ intel_dp->mst.active_streams, intel_dp->mst.active_streams + 1);
+
+ return intel_dp->mst.active_streams++ == 0;
+}
+
static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
bool dsc)
{
@@ -210,26 +241,6 @@ static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connec
num_joined_pipes);
}
-static void intel_dp_mst_compute_min_hblank(struct intel_crtc_state *crtc_state,
- int bpp_x16)
-{
- struct intel_display *display = to_intel_display(crtc_state);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- int symbol_size = intel_dp_is_uhbr(crtc_state) ? 32 : 8;
- int hblank;
-
- if (DISPLAY_VER(display) < 20)
- return;
-
- /* Calculate min Hblank Link Layer Symbol Cycle Count for 8b/10b MST & 128b/132b */
- hblank = DIV_ROUND_UP((DIV_ROUND_UP
- (adjusted_mode->htotal - adjusted_mode->hdisplay, 4) * bpp_x16),
- symbol_size);
-
- crtc_state->min_hblank = hblank;
-}
-
int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
@@ -300,8 +311,6 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state,
false, dsc_slice_count, link_bpp_x16);
- intel_dp_mst_compute_min_hblank(crtc_state, link_bpp_x16);
-
intel_dp_mst_compute_m_n(crtc_state,
local_bw_overhead,
link_bpp_x16,
@@ -590,12 +599,13 @@ adjust_limits_for_dsc_hblank_expansion_quirk(struct intel_dp *intel_dp,
static bool
mst_stream_compute_config_limits(struct intel_dp *intel_dp,
- const struct intel_connector *connector,
+ struct intel_connector *connector,
struct intel_crtc_state *crtc_state,
bool dsc,
struct link_config_limits *limits)
{
- if (!intel_dp_compute_config_limits(intel_dp, crtc_state, false, dsc,
+ if (!intel_dp_compute_config_limits(intel_dp, connector,
+ crtc_state, false, dsc,
limits))
return false;
@@ -710,6 +720,12 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+ ret = intel_dp_compute_min_hblank(pipe_config, conn_state);
+ if (ret)
+ return ret;
+
+ intel_vrr_compute_config(pipe_config, conn_state);
+
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
intel_ddi_compute_min_voltage_level(pipe_config);
@@ -990,25 +1006,17 @@ static void mst_stream_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_display *display = to_intel_display(state);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- enum transcoder trans = old_crtc_state->cpu_transcoder;
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
-
- if (intel_dp->mst.active_links == 1)
- intel_dp->link_trained = false;
+ if (intel_dp_mst_active_streams(intel_dp) == 1)
+ intel_dp->link.active = false;
intel_hdcp_disable(intel_mst->connector);
intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
-
- if (DISPLAY_VER(display) >= 20)
- intel_de_write(display, DP_MIN_HBLANK_CTL(trans), 0);
}
static void mst_stream_post_disable(struct intel_atomic_state *state,
@@ -1034,8 +1042,8 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
bool last_mst_stream;
int i;
- intel_dp->mst.active_links--;
- last_mst_stream = intel_dp->mst.active_links == 0;
+ last_mst_stream = intel_dp_mst_dec_active_streams(intel_dp);
+
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && last_mst_stream &&
!intel_dp_mst_is_master_trans(old_crtc_state));
@@ -1062,6 +1070,8 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
drm_dp_remove_payload_part2(&intel_dp->mst.mgr, new_mst_state,
old_payload, new_payload);
+ intel_vrr_transcoder_disable(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
@@ -1104,8 +1114,6 @@ static void mst_stream_post_disable(struct intel_atomic_state *state,
primary_encoder->post_disable(state, primary_encoder,
old_crtc_state, NULL);
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
}
static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
@@ -1116,7 +1124,7 @@ static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
- if (intel_dp->mst.active_links == 0 &&
+ if (intel_dp_mst_active_streams(intel_dp) == 0 &&
primary_encoder->post_pll_disable)
primary_encoder->post_pll_disable(state, primary_encoder, old_crtc_state, old_conn_state);
}
@@ -1129,7 +1137,7 @@ static void mst_stream_pre_pll_enable(struct intel_atomic_state *state,
struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
struct intel_dp *intel_dp = to_primary_dp(encoder);
- if (intel_dp->mst.active_links == 0)
+ if (intel_dp_mst_active_streams(intel_dp) == 0)
primary_encoder->pre_pll_enable(state, primary_encoder,
pipe_config, NULL);
else
@@ -1189,13 +1197,11 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
*/
connector->encoder = encoder;
intel_mst->connector = connector;
- first_mst_stream = intel_dp->mst.active_links == 0;
+
+ first_mst_stream = intel_dp_mst_inc_active_streams(intel_dp);
drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && first_mst_stream &&
!intel_dp_mst_is_master_trans(pipe_config));
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
-
if (first_mst_stream)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
@@ -1210,8 +1216,6 @@ static void mst_stream_pre_enable(struct intel_atomic_state *state,
intel_mst_reprobe_topology(intel_dp, pipe_config);
}
- intel_dp->mst.active_links++;
-
ret = drm_dp_add_payload_part1(&intel_dp->mst.mgr, mst_state,
drm_atomic_get_mst_payload_state(mst_state, connector->mst.port));
if (ret < 0)
@@ -1279,9 +1283,9 @@ static void mst_stream_enable(struct intel_atomic_state *state,
struct drm_dp_mst_topology_state *mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst.mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
- bool first_mst_stream = intel_dp->mst.active_links == 1;
+ bool first_mst_stream = intel_dp_mst_active_streams(intel_dp) == 1;
struct intel_crtc *pipe_crtc;
- int ret, i, min_hblank;
+ int ret, i;
drm_WARN_ON(display->drm, pipe_config->has_pch_encoder);
@@ -1296,41 +1300,17 @@ static void mst_stream_enable(struct intel_atomic_state *state,
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
}
- if (DISPLAY_VER(display) >= 20) {
- /*
- * adjust the BlankingStart/BlankingEnd framing control from
- * the calculated value
- */
- min_hblank = pipe_config->min_hblank - 2;
-
- /* Maximum value to be programmed is limited to 0x10 */
- min_hblank = min(0x10, min_hblank);
-
- /*
- * Minimum hblank accepted for 128b/132b would be 5 and for
- * 8b/10b would be 3 symbol count
- */
- if (intel_dp_is_uhbr(pipe_config))
- min_hblank = max(min_hblank, 5);
- else
- min_hblank = max(min_hblank, 3);
-
- intel_de_write(display, DP_MIN_HBLANK_CTL(trans),
- min_hblank);
- }
-
enable_bs_jitter_was(pipe_config);
intel_ddi_enable_transcoder_func(encoder, pipe_config);
+ intel_vrr_transcoder_enable(pipe_config);
+
intel_ddi_clear_act_sent(encoder, pipe_config);
intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, trans), 0,
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
- drm_dbg_kms(display->drm, "active links %d\n",
- intel_dp->mst.active_links);
-
intel_ddi_wait_for_act_sent(encoder, pipe_config);
drm_dp_check_act_status(&intel_dp->mst.mgr);
@@ -1348,8 +1328,6 @@ static void mst_stream_enable(struct intel_atomic_state *state,
FECSTALL_DIS_DPTSTREAM_DPTTG,
pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0);
- intel_audio_sdp_split_update(pipe_config);
-
intel_enable_transcoder(pipe_config);
for_each_pipe_crtc_modeset_enable(display, pipe_crtc, pipe_config, i) {
@@ -1870,12 +1848,6 @@ mst_stream_encoders_create(struct intel_digital_port *dig_port)
}
int
-intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
-{
- return dig_port->dp.mst.active_links;
-}
-
-int
intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
{
struct intel_display *display = to_intel_display(dig_port);
@@ -2101,7 +2073,7 @@ void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp)
u8 rate_select;
u8 link_bw;
- if (intel_dp->link_trained)
+ if (intel_dp->link.active)
return;
if (intel_mst_probed_link_params_valid(intel_dp, link_rate, lane_count))
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index c1bbfeb02ca9..ab09b487c6bb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -18,7 +18,7 @@ struct intel_link_bw_limits;
int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
-int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
+int intel_dp_mst_active_streams(struct intel_dp *intel_dp);
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 429f89543789..69f242139420 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -222,9 +222,7 @@ static const struct bxt_dpio_phy_info glk_dpio_phy_info[] = {
static const struct bxt_dpio_phy_info *
bxt_get_phy_list(struct intel_display *display, int *count)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (IS_GEMINILAKE(dev_priv)) {
+ if (display->platform.geminilake) {
*count = ARRAY_SIZE(glk_dpio_phy_info);
return glk_dpio_phy_info;
} else {
@@ -808,9 +806,9 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
vlv_dpio_put(dev_priv);
}
-void chv_data_lane_soft_reset(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- bool reset)
+static void __chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -853,6 +851,17 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
}
}
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool reset)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ vlv_dpio_get(i915);
+ __chv_data_lane_soft_reset(encoder, crtc_state, reset);
+ vlv_dpio_put(i915);
+}
+
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
@@ -880,7 +889,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
vlv_dpio_get(dev_priv);
/* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, crtc_state, true);
+ __chv_data_lane_soft_reset(encoder, crtc_state, true);
/* program left/right clock distribution */
if (pipe != PIPE_B) {
@@ -1008,7 +1017,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
}
/* Deassert data lane reset */
- chv_data_lane_soft_reset(encoder, crtc_state, false);
+ __chv_data_lane_soft_reset(encoder, crtc_state, false);
vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 08a30e5aafce..a9e9b98d0bf9 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -373,14 +373,14 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
if ((hw_state->dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
- return i915->display.vbt.lvds_ssc_freq;
- else if (HAS_PCH_SPLIT(i915))
+ return display->vbt.lvds_ssc_freq;
+ else if (HAS_PCH_SPLIT(display))
return 120000;
- else if (DISPLAY_VER(i915) != 2)
+ else if (DISPLAY_VER(display) != 2)
return 96000;
else
return 48000;
@@ -389,27 +389,27 @@ static int i9xx_pll_refclk(const struct intel_crtc_state *crtc_state)
void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
struct intel_dpll_hw_state *dpll_hw_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
- if (DISPLAY_VER(dev_priv) >= 4) {
+ if (DISPLAY_VER(display) >= 4) {
u32 tmp;
/* No way to read it out on pipes B and C */
- if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
- tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
+ if (display->platform.cherryview && crtc->pipe != PIPE_A)
+ tmp = display->state.chv_dpll_md[crtc->pipe];
else
- tmp = intel_de_read(dev_priv,
- DPLL_MD(dev_priv, crtc->pipe));
+ tmp = intel_de_read(display,
+ DPLL_MD(display, crtc->pipe));
hw_state->dpll_md = tmp;
}
- hw_state->dpll = intel_de_read(dev_priv, DPLL(dev_priv, crtc->pipe));
+ hw_state->dpll = intel_de_read(display, DPLL(display, crtc->pipe));
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- hw_state->fp0 = intel_de_read(dev_priv, FP0(crtc->pipe));
- hw_state->fp1 = intel_de_read(dev_priv, FP1(crtc->pipe));
+ if (!display->platform.valleyview && !display->platform.cherryview) {
+ hw_state->fp0 = intel_de_read(display, FP0(crtc->pipe));
+ hw_state->fp1 = intel_de_read(display, FP1(crtc->pipe));
} else {
/* Mask out read-only status bits. */
hw_state->dpll &= ~(DPLL_LOCK_VLV |
@@ -421,8 +421,8 @@ void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
/* Returns the clock of the currently programmed mode of the given pipe. */
void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
u32 dpll = hw_state->dpll;
u32 fp;
@@ -436,7 +436,7 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
fp = hw_state->fp1;
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- if (IS_PINEVIEW(dev_priv)) {
+ if (display->platform.pineview) {
clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
} else {
@@ -444,8 +444,8 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (DISPLAY_VER(dev_priv) != 2) {
- if (IS_PINEVIEW(dev_priv))
+ if (DISPLAY_VER(display) != 2) {
+ if (display->platform.pineview)
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
else
@@ -462,23 +462,23 @@ void i9xx_crtc_clock_get(struct intel_crtc_state *crtc_state)
7 : 14;
break;
default:
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
return;
}
- if (IS_PINEVIEW(dev_priv))
+ if (display->platform.pineview)
port_clock = pnv_calc_dpll_params(refclk, &clock);
else
port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
enum pipe lvds_pipe;
- if (IS_I85X(dev_priv) &&
- intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
+ if (display->platform.i85x &&
+ intel_lvds_port_enabled(display, LVDS, &lvds_pipe) &&
lvds_pipe == crtc->pipe) {
- u32 lvds = intel_de_read(dev_priv, LVDS);
+ u32 lvds = intel_de_read(display, LVDS);
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
@@ -577,7 +577,7 @@ void chv_crtc_clock_get(struct intel_crtc_state *crtc_state)
* Returns whether the given set of divisors are valid for a given refclk with
* the given connectors.
*/
-static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
+static bool intel_pll_is_valid(struct intel_display *display,
const struct intel_limit *limit,
const struct dpll *clock)
{
@@ -590,14 +590,14 @@ static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
return false;
- if (!IS_PINEVIEW(dev_priv) &&
- !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv))
+ if (!display->platform.pineview &&
+ !display->platform.valleyview && !display->platform.cherryview &&
+ !display->platform.broxton && !display->platform.geminilake)
if (clock->m1 <= clock->m2)
return false;
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
+ if (!display->platform.valleyview && !display->platform.cherryview &&
+ !display->platform.broxton && !display->platform.geminilake) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
return false;
if (clock->m < limit->m.min || limit->m.max < clock->m)
@@ -620,7 +620,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
const struct intel_crtc_state *crtc_state,
int target)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
@@ -628,7 +628,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
* We haven't figured out how to reliably set up different
* single/dual channel state, if we even can.
*/
- if (intel_is_dual_link_lvds(dev_priv))
+ if (intel_is_dual_link_lvds(display))
return limit->p2.p2_fast;
else
return limit->p2.p2_slow;
@@ -656,7 +656,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
int err = target;
@@ -677,7 +677,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
@@ -714,7 +714,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
int err = target;
@@ -733,7 +733,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
int this_err;
pnv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
@@ -770,7 +770,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
int max_n;
bool found = false;
@@ -794,7 +794,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
int this_err;
i9xx_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
@@ -817,7 +817,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
* Check if the calculated PLL configuration is more optimal compared to the
* best configuration and error found so far. Return the calculated error.
*/
-static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+static bool vlv_PLL_is_optimal(struct intel_display *display, int target_freq,
const struct dpll *calculated_clock,
const struct dpll *best_clock,
unsigned int best_error_ppm,
@@ -827,13 +827,13 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
* For CHV ignore the error and consider only the P value.
* Prefer a bigger P value based on HW requirements.
*/
- if (IS_CHERRYVIEW(to_i915(dev))) {
+ if (display->platform.cherryview) {
*error_ppm = 0;
return calculated_clock->p > best_clock->p;
}
- if (drm_WARN_ON_ONCE(dev, !target_freq))
+ if (drm_WARN_ON_ONCE(display->drm, !target_freq))
return false;
*error_ppm = div_u64(1000000ULL *
@@ -864,8 +864,7 @@ vlv_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
+ struct intel_display *display = to_intel_display(crtc_state);
struct dpll clock;
unsigned int bestppm = 1000000;
/* min update 19.2 MHz */
@@ -889,12 +888,12 @@ vlv_find_best_dpll(const struct intel_limit *limit,
vlv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev),
+ if (!intel_pll_is_valid(display,
limit,
&clock))
continue;
- if (!vlv_PLL_is_optimal(dev, target,
+ if (!vlv_PLL_is_optimal(display, target,
&clock,
best_clock,
bestppm, &ppm))
@@ -922,8 +921,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
const struct dpll *match_clock,
struct dpll *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
+ struct intel_display *display = to_intel_display(crtc_state);
unsigned int best_error_ppm;
struct dpll clock;
u64 m2;
@@ -958,10 +956,10 @@ chv_find_best_dpll(const struct intel_limit *limit,
chv_calc_dpll_params(refclk, &clock);
- if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
+ if (!intel_pll_is_valid(display, limit, &clock))
continue;
- if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+ if (!vlv_PLL_is_optimal(display, target, &clock, best_clock,
best_error_ppm, &error_ppm))
continue;
@@ -1005,8 +1003,6 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *reduced_clock)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
@@ -1016,8 +1012,8 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+ if (display->platform.i945g || display->platform.i945gm ||
+ display->platform.g33 || display->platform.pineview) {
dpll |= (crtc_state->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
@@ -1030,10 +1026,10 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- if (IS_G4X(dev_priv)) {
+ if (display->platform.g4x) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- } else if (IS_PINEVIEW(dev_priv)) {
+ } else if (display->platform.pineview) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
WARN_ON(reduced_clock->p1 != clock->p1);
} else {
@@ -1057,7 +1053,7 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
}
WARN_ON(reduced_clock->p2 != clock->p2);
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
if (crtc_state->sdvo_tv_clock)
@@ -1075,11 +1071,10 @@ static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
const struct dpll *clock,
const struct dpll *reduced_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
- if (IS_PINEVIEW(dev_priv)) {
+ if (display->platform.pineview) {
hw_state->fp0 = pnv_dpll_compute_fp(clock);
hw_state->fp1 = pnv_dpll_compute_fp(reduced_clock);
} else {
@@ -1089,7 +1084,7 @@ static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
hw_state->dpll = i9xx_dpll(crtc_state, clock, reduced_clock);
- if (DISPLAY_VER(dev_priv) >= 4)
+ if (DISPLAY_VER(display) >= 4)
hw_state->dpll_md = i965_dpll_md(crtc_state);
}
@@ -1098,8 +1093,6 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *reduced_clock)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
dpll = DPLL_VCO_ENABLE | DPLL_VGA_MODE_DIS;
@@ -1129,7 +1122,7 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
* both DPLLS. The spec says we should disable the DVO 2X clock
* when not needed, but this seems to work fine in practice.
*/
- if (IS_I830(dev_priv) ||
+ if (display->platform.i830 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
@@ -1157,14 +1150,14 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
int ret;
- if (DISPLAY_VER(dev_priv) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
@@ -1186,13 +1179,13 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- if (DISPLAY_VER(dev_priv) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
@@ -1241,12 +1234,10 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- ((intel_panel_use_ssc(display) && i915->display.vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915))))
+ ((intel_panel_use_ssc(display) && display->vbt.lvds_ssc_freq == 100000) ||
+ (HAS_PCH_IBX(display) && intel_is_dual_link_lvds(display))))
return 25;
if (crtc_state->sdvo_tv_clock)
@@ -1276,8 +1267,6 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *reduced_clock)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
dpll = DPLL_VCO_ENABLE;
@@ -1311,7 +1300,7 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
* clear if it''s a win or loss power wise. No point in doing
* this on ILK at all since it has a fixed DPLL<->pipe mapping.
*/
- if (INTEL_NUM_PIPES(dev_priv) == 3 &&
+ if (INTEL_NUM_PIPES(display) == 3 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
dpll |= DPLL_SDVO_HIGH_SPEED;
@@ -1362,7 +1351,6 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1375,13 +1363,13 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
- dev_priv->display.vbt.lvds_ssc_freq);
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
+ display->vbt.lvds_ssc_freq);
+ refclk = display->vbt.lvds_ssc_freq;
}
- if (intel_is_dual_link_lvds(dev_priv)) {
+ if (intel_is_dual_link_lvds(display)) {
if (refclk == 100000)
limit = &ilk_limits_dual_lvds_100m;
else
@@ -1539,7 +1527,6 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1547,13 +1534,13 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
- if (intel_is_dual_link_lvds(dev_priv))
+ if (intel_is_dual_link_lvds(display))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
@@ -1589,7 +1576,6 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1597,8 +1583,8 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
@@ -1628,7 +1614,6 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1636,8 +1621,8 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
@@ -1669,7 +1654,6 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
@@ -1677,8 +1661,8 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(display)) {
- refclk = dev_priv->display.vbt.lvds_ssc_freq;
- drm_dbg_kms(&dev_priv->drm,
+ refclk = display->vbt.lvds_ssc_freq;
+ drm_dbg_kms(display->drm,
"using SSC reference clock of %d kHz\n",
refclk);
}
@@ -1751,12 +1735,12 @@ static const struct intel_dpll_funcs i8xx_dpll_funcs = {
int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
+ drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -1764,9 +1748,9 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
if (!crtc_state->hw.enable)
return 0;
- ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc);
+ ret = display->funcs.dpll->crtc_compute_clock(state, crtc);
if (ret) {
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
crtc->base.base.id, crtc->base.name);
return ret;
}
@@ -1777,23 +1761,23 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
- drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
+ drm_WARN_ON(display->drm, !intel_crtc_needs_modeset(crtc_state));
+ drm_WARN_ON(display->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
if (!crtc_state->hw.enable || crtc_state->shared_dpll)
return 0;
- if (!i915->display.funcs.dpll->crtc_get_shared_dpll)
+ if (!display->funcs.dpll->crtc_get_shared_dpll)
return 0;
- ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc);
+ ret = display->funcs.dpll->crtc_get_shared_dpll(state, crtc);
if (ret) {
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
crtc->base.base.id, crtc->base.name);
return ret;
}
@@ -1802,43 +1786,42 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
}
void
-intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 14)
- dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
- else if (IS_DG2(dev_priv))
- dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
- else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
- dev_priv->display.funcs.dpll = &hsw_dpll_funcs;
- else if (HAS_PCH_SPLIT(dev_priv))
- dev_priv->display.funcs.dpll = &ilk_dpll_funcs;
- else if (IS_CHERRYVIEW(dev_priv))
- dev_priv->display.funcs.dpll = &chv_dpll_funcs;
- else if (IS_VALLEYVIEW(dev_priv))
- dev_priv->display.funcs.dpll = &vlv_dpll_funcs;
- else if (IS_G4X(dev_priv))
- dev_priv->display.funcs.dpll = &g4x_dpll_funcs;
- else if (IS_PINEVIEW(dev_priv))
- dev_priv->display.funcs.dpll = &pnv_dpll_funcs;
- else if (DISPLAY_VER(dev_priv) != 2)
- dev_priv->display.funcs.dpll = &i9xx_dpll_funcs;
+intel_dpll_init_clock_hook(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) >= 14)
+ display->funcs.dpll = &mtl_dpll_funcs;
+ else if (display->platform.dg2)
+ display->funcs.dpll = &dg2_dpll_funcs;
+ else if (DISPLAY_VER(display) >= 9 || HAS_DDI(display))
+ display->funcs.dpll = &hsw_dpll_funcs;
+ else if (HAS_PCH_SPLIT(display))
+ display->funcs.dpll = &ilk_dpll_funcs;
+ else if (display->platform.cherryview)
+ display->funcs.dpll = &chv_dpll_funcs;
+ else if (display->platform.valleyview)
+ display->funcs.dpll = &vlv_dpll_funcs;
+ else if (display->platform.g4x)
+ display->funcs.dpll = &g4x_dpll_funcs;
+ else if (display->platform.pineview)
+ display->funcs.dpll = &pnv_dpll_funcs;
+ else if (DISPLAY_VER(display) != 2)
+ display->funcs.dpll = &i9xx_dpll_funcs;
else
- dev_priv->display.funcs.dpll = &i8xx_dpll_funcs;
+ display->funcs.dpll = &i8xx_dpll_funcs;
}
-static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
+static bool i9xx_has_pps(struct intel_display *display)
{
- if (IS_I830(dev_priv))
+ if (display->platform.i830)
return false;
- return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+ return display->platform.pineview || display->platform.mobile;
}
void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
int i;
@@ -1846,27 +1829,27 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
assert_transcoder_disabled(display, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
- if (i9xx_has_pps(dev_priv))
+ if (i9xx_has_pps(display))
assert_pps_unlocked(display, pipe);
- intel_de_write(dev_priv, FP0(pipe), hw_state->fp0);
- intel_de_write(dev_priv, FP1(pipe), hw_state->fp1);
+ intel_de_write(display, FP0(pipe), hw_state->fp0);
+ intel_de_write(display, FP1(pipe), hw_state->fp1);
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- intel_de_write(dev_priv, DPLL(dev_priv, pipe),
+ intel_de_write(display, DPLL(display, pipe),
hw_state->dpll & ~DPLL_VGA_MODE_DIS);
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
/* Wait for the clocks to stabilize. */
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150);
- if (DISPLAY_VER(dev_priv) >= 4) {
- intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe),
+ if (DISPLAY_VER(display) >= 4) {
+ intel_de_write(display, DPLL_MD(display, pipe),
hw_state->dpll_md);
} else {
/* The pixel multiplier can only be updated once the
@@ -1874,20 +1857,21 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
*
* So write it again.
*/
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
}
/* We do this three times for luck */
for (i = 0; i < 3; i++) {
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150); /* wait for warmup */
}
}
-static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
+static void vlv_pllb_recal_opamp(struct intel_display *display,
enum dpio_phy phy, enum dpio_channel ch)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
/*
@@ -1916,6 +1900,7 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct dpll *clock = &crtc_state->dpll;
@@ -1930,7 +1915,7 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
/* PLL B needs special handling */
if (pipe == PIPE_B)
- vlv_pllb_recal_opamp(dev_priv, phy, ch);
+ vlv_pllb_recal_opamp(display, phy, ch);
/* Set up Tx target for periodic Rcomp update */
vlv_dpio_write(dev_priv, phy, VLV_PCS_DW17_BCAST, 0x0100000f);
@@ -2003,24 +1988,23 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150);
- if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
+ if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
+ drm_err(display->drm, "DPLL %d failed to lock\n", pipe);
}
void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
@@ -2030,7 +2014,7 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
assert_pps_unlocked(display, pipe);
/* Enable Refclk */
- intel_de_write(dev_priv, DPLL(dev_priv, pipe),
+ intel_de_write(display, DPLL(display, pipe),
hw_state->dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
if (hw_state->dpll & DPLL_VCO_ENABLE) {
@@ -2038,8 +2022,8 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
_vlv_enable_pll(crtc_state);
}
- intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe), hw_state->dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe));
+ intel_de_write(display, DPLL_MD(display, pipe), hw_state->dpll_md);
+ intel_de_posting_read(display, DPLL_MD(display, pipe));
}
static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
@@ -2133,6 +2117,7 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -2156,18 +2141,17 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
udelay(1);
/* Enable PLL */
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), hw_state->dpll);
+ intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
/* Check PLL is locked */
- if (intel_de_wait_for_set(dev_priv, DPLL(dev_priv, pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
+ if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
+ drm_err(display->drm, "PLL %d failed to lock\n", pipe);
}
void chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
enum pipe pipe = crtc->pipe;
@@ -2177,7 +2161,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
assert_pps_unlocked(display, pipe);
/* Enable Refclk and SSC */
- intel_de_write(dev_priv, DPLL(dev_priv, pipe),
+ intel_de_write(display, DPLL(display, pipe),
hw_state->dpll & ~DPLL_VCO_ENABLE);
if (hw_state->dpll & DPLL_VCO_ENABLE) {
@@ -2192,29 +2176,29 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
* DPLLCMD is AWOL. Use chicken bits to propagate
* the value from DPLLBMD to either pipe B or C.
*/
- intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
- intel_de_write(dev_priv, DPLL_MD(dev_priv, PIPE_B),
+ intel_de_write(display, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
+ intel_de_write(display, DPLL_MD(display, PIPE_B),
hw_state->dpll_md);
- intel_de_write(dev_priv, CBR4_VLV, 0);
- dev_priv->display.state.chv_dpll_md[pipe] = hw_state->dpll_md;
+ intel_de_write(display, CBR4_VLV, 0);
+ display->state.chv_dpll_md[pipe] = hw_state->dpll_md;
/*
* DPLLB VGA mode also seems to cause problems.
* We should always have it disabled.
*/
- drm_WARN_ON(&dev_priv->drm,
- (intel_de_read(dev_priv, DPLL(dev_priv, PIPE_B)) &
+ drm_WARN_ON(display->drm,
+ (intel_de_read(display, DPLL(display, PIPE_B)) &
DPLL_VGA_MODE_DIS) == 0);
} else {
- intel_de_write(dev_priv, DPLL_MD(dev_priv, pipe),
+ intel_de_write(display, DPLL_MD(display, pipe),
hw_state->dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(dev_priv, pipe));
+ intel_de_posting_read(display, DPLL_MD(display, pipe));
}
}
/**
* vlv_force_pll_on - forcibly enable just the PLL
- * @dev_priv: i915 private structure
+ * @display: display device
* @pipe: pipe PLL to enable
* @dpll: PLL configuration
*
@@ -2222,10 +2206,9 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
* in cases where we need the PLL enabled even when @pipe is not going to
* be enabled.
*/
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+int vlv_force_pll_on(struct intel_display *display, enum pipe pipe,
const struct dpll *dpll)
{
- struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
struct intel_crtc_state *crtc_state;
@@ -2238,7 +2221,7 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
crtc_state->dpll = *dpll;
crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
chv_compute_dpll(crtc_state);
chv_enable_pll(crtc_state);
} else {
@@ -2251,9 +2234,8 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
return 0;
}
-void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+void vlv_disable_pll(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
u32 val;
/* Make sure the pipe isn't still relying on us */
@@ -2268,9 +2250,9 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_de_posting_read(display, DPLL(display, pipe));
}
-void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+void chv_disable_pll(struct intel_display *display, enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
@@ -2316,18 +2298,18 @@ void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
/**
* vlv_force_pll_off - forcibly disable just the PLL
- * @dev_priv: i915 private structure
+ * @display: display device
* @pipe: pipe PLL to disable
*
* Disable the PLL for @pipe. To be used in cases where we need
* the PLL enabled even when @pipe is not going to be enabled.
*/
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
+void vlv_force_pll_off(struct intel_display *display, enum pipe pipe)
{
- if (IS_CHERRYVIEW(dev_priv))
- chv_disable_pll(dev_priv, pipe);
+ if (display->platform.cherryview)
+ chv_disable_pll(display, pipe);
else
- vlv_disable_pll(dev_priv, pipe);
+ vlv_disable_pll(display, pipe);
}
/* Only for pre-ILK configs */
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index 21d06cbd2ce7..280e90a57c87 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -8,16 +8,15 @@
#include <linux/types.h>
+enum pipe;
struct dpll;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_display;
struct intel_dpll_hw_state;
-enum pipe;
-void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
+void intel_dpll_init_clock_hook(struct intel_display *display);
int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
@@ -29,14 +28,14 @@ void i9xx_dpll_get_hw_state(struct intel_crtc *crtc,
void vlv_compute_dpll(struct intel_crtc_state *crtc_state);
void chv_compute_dpll(struct intel_crtc_state *crtc_state);
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+int vlv_force_pll_on(struct intel_display *display, enum pipe pipe,
const struct dpll *dpll);
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
+void vlv_force_pll_off(struct intel_display *display, enum pipe pipe);
void chv_enable_pll(const struct intel_crtc_state *crtc_state);
-void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void chv_disable_pll(struct intel_display *display, enum pipe pipe);
void vlv_enable_pll(const struct intel_crtc_state *crtc_state);
-void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void vlv_disable_pll(struct intel_display *display, enum pipe pipe);
void i9xx_enable_pll(const struct intel_crtc_state *crtc_state);
void i9xx_disable_pll(const struct intel_crtc_state *crtc_state);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index c825a507b905..9da051a3f455 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -24,9 +24,11 @@
#include <linux/math.h>
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
#include "bxt_dpio_phy_regs.h"
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_cx0_phy.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -38,6 +40,7 @@
#include "intel_hti.h"
#include "intel_mg_phy_regs.h"
#include "intel_pch_refclk.h"
+#include "intel_step.h"
#include "intel_tc.h"
/**
@@ -257,7 +260,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- unsigned int pipe_mask = BIT(crtc->pipe);
+ unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
unsigned int old_mask;
if (drm_WARN_ON(display->drm, !pll))
@@ -303,7 +306,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- unsigned int pipe_mask = BIT(crtc->pipe);
+ unsigned int pipe_mask = intel_crtc_joined_pipe_mask(crtc_state);
/* PCH only available on ILK+ */
if (DISPLAY_VER(display) < 5)
@@ -609,13 +612,12 @@ static int ibx_get_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_shared_dpll *pll;
enum intel_dpll_id id;
- if (HAS_PCH_IBX(i915)) {
+ if (HAS_PCH_IBX(display)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
id = (enum intel_dpll_id) crtc->pipe;
pll = intel_get_shared_dpll_by_id(display, id);
@@ -715,7 +717,6 @@ static void hsw_ddi_spll_enable(struct intel_display *display,
static void hsw_ddi_wrpll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const enum intel_dpll_id id = pll->info->id;
intel_de_rmw(display, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
@@ -726,13 +727,12 @@ static void hsw_ddi_wrpll_disable(struct intel_display *display,
* that depend on it have been shut down.
*/
if (display->dpll.pch_ssc_use & BIT(id))
- intel_init_pch_refclk(i915);
+ intel_init_pch_refclk(display);
}
static void hsw_ddi_spll_disable(struct intel_display *display,
struct intel_shared_dpll *pll)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum intel_dpll_id id = pll->info->id;
intel_de_rmw(display, SPLL_CTL, SPLL_PLL_ENABLE, 0);
@@ -743,7 +743,7 @@ static void hsw_ddi_spll_disable(struct intel_display *display,
* that depend on it have been shut down.
*/
if (display->dpll.pch_ssc_use & BIT(id))
- intel_init_pch_refclk(i915);
+ intel_init_pch_refclk(display);
}
static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
@@ -2606,10 +2606,8 @@ ehl_combo_pll_div_frac_wa_needed(struct intel_display *display)
{
return ((display->platform.elkhartlake &&
IS_DISPLAY_STEP(display, STEP_B0, STEP_FOREVER)) ||
- display->platform.tigerlake ||
- display->platform.alderlake_s ||
- display->platform.alderlake_p) &&
- display->dpll.ref_clks.nssc == 38400;
+ DISPLAY_VER(display) >= 12) &&
+ display->dpll.ref_clks.nssc == 38400;
}
struct icl_combo_pll_params {
@@ -4309,7 +4307,6 @@ static const struct intel_dpll_mgr adlp_pll_mgr = {
*/
void intel_shared_dpll_init(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_dpll_mgr *dpll_mgr = NULL;
const struct dpll_info *dpll_info;
int i;
@@ -4339,7 +4336,7 @@ void intel_shared_dpll_init(struct intel_display *display)
dpll_mgr = &skl_pll_mgr;
else if (HAS_DDI(display))
dpll_mgr = &hsw_pll_mgr;
- else if (HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915))
+ else if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display))
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr)
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 0d8ebe38226e..43bd97e4f589 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -9,6 +9,7 @@
#include "gt/gen8_ppgtt.h"
#include "i915_drv.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -127,7 +128,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
struct drm_i915_private *i915 = vm->i915;
struct intel_display *display = &i915->display;
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct i915_vma *vma;
void __iomem *iomem;
struct i915_gem_ww_ctx ww;
@@ -137,7 +138,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
if (i915_gem_object_is_stolen(dpt->obj))
pin_flags |= PIN_MAPPABLE;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
atomic_inc(&display->restore.pending_fb_pin);
for_i915_gem_ww(&ww, err, true) {
@@ -169,7 +170,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
dpt->obj->mm.dirty = true;
atomic_dec(&display->restore.pending_fb_pin);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return err ? ERR_PTR(err) : vma;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.c b/drivers/gpu/drm/i915/display/intel_dpt_common.c
index d2dede0a5229..ce5aa0ca0fa5 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt_common.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt_common.c
@@ -3,7 +3,6 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -12,9 +11,9 @@
void intel_dpt_configure(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- if (DISPLAY_VER(i915) == 14) {
+ if (DISPLAY_VER(display) == 14) {
enum pipe pipe = crtc->pipe;
enum plane_id plane_id;
@@ -22,15 +21,15 @@ void intel_dpt_configure(struct intel_crtc *crtc)
if (plane_id == PLANE_CURSOR)
continue;
- intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id),
+ intel_de_rmw(display, PLANE_CHICKEN(pipe, plane_id),
PLANE_CHICKEN_DISABLE_DPT,
- i915->display.params.enable_dpt ? 0 :
+ display->params.enable_dpt ? 0 :
PLANE_CHICKEN_DISABLE_DPT);
}
- } else if (DISPLAY_VER(i915) == 13) {
- intel_de_rmw(i915, CHICKEN_MISC_2,
+ } else if (DISPLAY_VER(display) == 13) {
+ intel_de_rmw(display, CHICKEN_MISC_2,
CHICKEN_MISC_DISABLE_DPT,
- i915->display.params.enable_dpt ? 0 :
+ display->params.enable_dpt ? 0 :
CHICKEN_MISC_DISABLE_DPT);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 9fc4003d1579..481488d1fe67 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -4,13 +4,15 @@
*
*/
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dsb.h"
#include "intel_dsb_buffer.h"
@@ -142,10 +144,10 @@ static int dsb_vtotal(struct intel_atomic_state *state,
static int dsb_dewake_scanline_start(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *crtc_state =
intel_pre_commit_crtc_state(state, crtc);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- unsigned int latency = skl_watermark_max_latency(i915, 0);
+ unsigned int latency = skl_watermark_max_latency(display, 0);
return intel_mode_vdisplay(&crtc_state->hw.adjusted_mode) -
intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, latency);
@@ -795,22 +797,22 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
enum intel_dsb_id dsb_id,
unsigned int max_cmds)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- intel_wakeref_t wakeref;
+ struct intel_display *display = to_intel_display(state);
+ struct ref_tracker *wakeref;
struct intel_dsb *dsb;
unsigned int size;
- if (!HAS_DSB(i915))
+ if (!HAS_DSB(display))
return NULL;
- if (!i915->display.params.enable_dsb)
+ if (!display->params.enable_dsb)
return NULL;
dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
if (!dsb)
goto out;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
/* ~1 qword per instruction, full cachelines */
size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
@@ -818,7 +820,7 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size))
goto out_put_rpm;
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
dsb->id = dsb_id;
dsb->crtc = crtc;
@@ -831,10 +833,10 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
return dsb;
out_put_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
kfree(dsb);
out:
- drm_info_once(&i915->drm,
+ drm_info_once(display->drm,
"[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n",
crtc->base.base.id, crtc->base.name, dsb_id);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 403151175a87..a8f012119165 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -4,8 +4,9 @@
*/
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_dsi.h"
#include "intel_panel.h"
@@ -116,14 +117,14 @@ struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
enum drm_panel_orientation
intel_dsi_get_panel_orientation(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
enum drm_panel_orientation orientation;
orientation = connector->panel.vbt.dsi.orientation;
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
- orientation = dev_priv->display.vbt.orientation;
+ orientation = display->vbt.orientation;
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 049443245310..b3c453bf7d5c 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -24,9 +24,10 @@
*/
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
#include <video/mipi_display.h>
-#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_dcs_backlight.h"
@@ -162,7 +163,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
static int dcs_setup_backlight(struct intel_connector *connector,
enum pipe unused)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_panel *panel = &connector->panel;
if (panel->vbt.backlight.brightness_precision_bits > 8)
@@ -172,7 +173,7 @@ static int dcs_setup_backlight(struct intel_connector *connector,
panel->backlight.level = panel->backlight.max;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using DCS for backlight control\n",
connector->base.base.id, connector->base.name);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 7b2ffd14ae6e..29c920983413 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -31,16 +31,16 @@
#include <linux/pinctrl/machine.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
-
#include <linux/unaligned.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <video/mipi_display.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
@@ -102,13 +102,13 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi,
static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
const u8 *data)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct mipi_dsi_device *dsi_device;
u8 type, flags, seq_port;
u16 len;
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
flags = *data++;
type = *data++;
@@ -120,12 +120,12 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
port = intel_dsi_seq_port_to_port(intel_dsi, seq_port);
- if (drm_WARN_ON(&dev_priv->drm, !intel_dsi->dsi_hosts[port]))
+ if (drm_WARN_ON(display->drm, !intel_dsi->dsi_hosts[port]))
goto out;
dsi_device = intel_dsi->dsi_hosts[port]->device;
if (!dsi_device) {
- drm_dbg_kms(&dev_priv->drm, "no dsi device for port %c\n",
+ drm_dbg_kms(display->drm, "no dsi device for port %c\n",
port_name(port));
goto out;
}
@@ -150,8 +150,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
- drm_dbg(&dev_priv->drm,
- "Generic Read not yet implemented or used\n");
+ drm_dbg_kms(display->drm, "Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
mipi_dsi_generic_write(dsi_device, data, len);
@@ -163,15 +162,14 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
break;
case MIPI_DSI_DCS_READ:
- drm_dbg(&dev_priv->drm,
- "DCS Read not yet implemented or used\n");
+ drm_dbg_kms(display->drm, "DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
mipi_dsi_dcs_write_buffer(dsi_device, data, len);
break;
}
- if (DISPLAY_VER(dev_priv) < 11)
+ if (DISPLAY_VER(display) < 11)
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
@@ -182,10 +180,10 @@ out:
static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
u32 delay = *((const u32 *) data);
- drm_dbg_kms(&i915->drm, "%d usecs\n", delay);
+ drm_dbg_kms(display->drm, "%d usecs\n", delay);
usleep_range(delay, delay + 10);
data += 4;
@@ -196,7 +194,7 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index,
const char *con_id, u8 idx, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/* XXX: this table is a quick ugly hack. */
static struct gpio_desc *soc_gpio_table[U8_MAX + 1];
struct gpio_desc *gpio_desc = soc_gpio_table[gpio_index];
@@ -204,10 +202,10 @@ static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index,
if (gpio_desc) {
gpiod_set_value(gpio_desc, value);
} else {
- gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, con_id, idx,
+ gpio_desc = devm_gpiod_get_index(display->drm->dev, con_id, idx,
value ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW);
if (IS_ERR(gpio_desc)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"GPIO index %u request failed (%pe)\n",
gpio_index, gpio_desc);
return;
@@ -242,16 +240,16 @@ static void soc_opaque_gpio_set_value(struct intel_connector *connector,
static void vlv_gpio_set_value(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
if (connector->panel.vbt.dsi.seq_version < 3) {
if (gpio_source == 1) {
- drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n");
+ drm_dbg_kms(display->drm, "SC gpio not supported\n");
return;
}
if (gpio_source > 1) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"unknown gpio source %u\n", gpio_source);
return;
}
@@ -264,7 +262,7 @@ static void vlv_gpio_set_value(struct intel_connector *connector,
static void chv_gpio_set_value(struct intel_connector *connector,
u8 gpio_source, u8 gpio_index, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (connector->panel.vbt.dsi.seq_version >= 3) {
if (gpio_index >= CHV_GPIO_IDX_START_SE) {
@@ -284,13 +282,13 @@ static void chv_gpio_set_value(struct intel_connector *connector,
} else {
/* XXX: The spec is unclear about CHV GPIO on seq v2 */
if (gpio_source != 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"unknown gpio source %u\n", gpio_source);
return;
}
if (gpio_index >= CHV_GPIO_IDX_START_E) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"invalid gpio index %u for GPIO N\n",
gpio_index);
return;
@@ -320,13 +318,12 @@ enum {
MIPI_VIO_EN_2,
};
-static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
+static void icl_native_gpio_set_value(struct intel_display *display,
int gpio, bool value)
{
- struct intel_display *display = &dev_priv->display;
int index;
- if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2))
+ if (drm_WARN_ON(display->drm, DISPLAY_VER(display) == 11 && gpio >= MIPI_RESET_2))
return;
switch (gpio) {
@@ -343,25 +340,25 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
* The locking protects against concurrent SHOTPLUG_CTL_DDI
* modifications in irq setup and handling.
*/
- spin_lock_irq(&dev_priv->irq_lock);
- intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI,
+ spin_lock_irq(&display->irq.lock);
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
SHOTPLUG_CTL_DDI_HPD_ENABLE(index) |
SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index),
value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
break;
case MIPI_AVDD_EN_1:
case MIPI_AVDD_EN_2:
index = gpio == MIPI_AVDD_EN_1 ? 0 : 1;
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, index), PANEL_POWER_ON,
+ intel_de_rmw(display, PP_CONTROL(display, index), PANEL_POWER_ON,
value ? PANEL_POWER_ON : 0);
break;
case MIPI_BKLT_EN_1:
case MIPI_BKLT_EN_2:
index = gpio == MIPI_BKLT_EN_1 ? 0 : 1;
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, index), EDP_BLC_ENABLE,
+ intel_de_rmw(display, PP_CONTROL(display, index), EDP_BLC_ENABLE,
value ? EDP_BLC_ENABLE : 0);
break;
case MIPI_AVEE_EN_1:
@@ -389,13 +386,12 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
u8 gpio_source = 0, gpio_index = 0, gpio_number;
bool value;
int size;
- bool native = DISPLAY_VER(i915) >= 11;
+ bool native = DISPLAY_VER(display) >= 11;
if (connector->panel.vbt.dsi.seq_version >= 3) {
size = 3;
@@ -416,16 +412,16 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
gpio_source = (data[1] >> 1) & 3;
}
- drm_dbg_kms(&i915->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
+ drm_dbg_kms(display->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));
if (native)
- icl_native_gpio_set_value(i915, gpio_number, value);
- else if (DISPLAY_VER(i915) >= 9)
+ icl_native_gpio_set_value(display, gpio_number, value);
+ else if (DISPLAY_VER(display) >= 9)
bxt_gpio_set_value(connector, gpio_index, value);
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
vlv_gpio_set_value(connector, gpio_source, gpio_number, value);
- else if (IS_CHERRYVIEW(i915))
+ else if (display->platform.cherryview)
chv_gpio_set_value(connector, gpio_source, gpio_number, value);
return data + size;
@@ -463,8 +459,8 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
const u16 target_addr)
{
- struct drm_device *drm_dev = intel_dsi->base.base.dev;
- struct acpi_device *adev = ACPI_COMPANION(drm_dev->dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
+ struct acpi_device *adev = ACPI_COMPANION(display->drm->dev);
struct i2c_adapter_lookup lookup = {
.target_addr = target_addr,
.intel_dsi = intel_dsi,
@@ -484,7 +480,7 @@ static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct i2c_adapter *adapter;
struct i2c_msg msg;
int ret;
@@ -494,7 +490,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
u8 payload_size = *(data + 6);
u8 *payload_data;
- drm_dbg_kms(&i915->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n",
+ drm_dbg_kms(display->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n",
vbt_i2c_bus_num, target_addr, reg_offset, payload_size, data + 7);
if (intel_dsi->i2c_bus_num < 0) {
@@ -504,7 +500,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
if (!adapter) {
- drm_err(&i915->drm, "Cannot find a valid i2c bus for xfer\n");
+ drm_err(display->drm, "Cannot find a valid i2c bus for xfer\n");
goto err_bus;
}
@@ -522,7 +518,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
ret = i2c_transfer(adapter, &msg, 1);
if (ret < 0)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Failed to xfer payload of size (%u) to reg (%u)\n",
payload_size, reg_offset);
@@ -535,16 +531,16 @@ err_bus:
static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
- drm_dbg_kms(&i915->drm, "Skipping SPI element execution\n");
+ drm_dbg_kms(display->drm, "Skipping SPI element execution\n");
return data + *(data + 5) + 6;
}
static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
#ifdef CONFIG_PMIC_OPREGION
u32 value, mask, reg_address;
u16 i2c_address;
@@ -560,9 +556,9 @@ static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
reg_address,
value, mask);
if (ret)
- drm_err(&i915->drm, "%s failed, error: %d\n", __func__, ret);
+ drm_err(display->drm, "%s failed, error: %d\n", __func__, ret);
#else
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
#endif
@@ -612,12 +608,12 @@ static const char *sequence_name(enum mipi_seq seq_id)
static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
const u8 *data;
fn_mipi_elem_exec mipi_elem_exec;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
seq_id >= ARRAY_SIZE(connector->panel.vbt.dsi.sequence)))
return;
@@ -625,9 +621,9 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
if (!data)
return;
- drm_WARN_ON(&dev_priv->drm, *data != seq_id);
+ drm_WARN_ON(display->drm, *data != seq_id);
- drm_dbg_kms(&dev_priv->drm, "Starting MIPI sequence %d - %s\n",
+ drm_dbg_kms(display->drm, "Starting MIPI sequence %d - %s\n",
seq_id, sequence_name(seq_id));
/* Skip Sequence Byte. */
@@ -657,19 +653,19 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
/* Consistency check if we have size. */
if (operation_size && data != next) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Inconsistent operation size\n");
return;
}
} else if (operation_size) {
/* We have size, skip. */
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Unsupported MIPI operation byte %u\n",
operation_byte);
data += operation_size;
} else {
/* No size, can't skip without parsing. */
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unsupported MIPI operation byte %u\n",
operation_byte);
return;
@@ -695,54 +691,44 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
void intel_dsi_log_params(struct intel_dsi *intel_dsi)
{
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
-
- drm_dbg_kms(&i915->drm, "Pclk %d\n", intel_dsi->pclk);
- drm_dbg_kms(&i915->drm, "Pixel overlap %d\n",
- intel_dsi->pixel_overlap);
- drm_dbg_kms(&i915->drm, "Lane count %d\n", intel_dsi->lane_count);
- drm_dbg_kms(&i915->drm, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
- drm_dbg_kms(&i915->drm, "Video mode format %s\n",
- intel_dsi->video_mode == NON_BURST_SYNC_PULSE ?
- "non-burst with sync pulse" :
- intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ?
- "non-burst with sync events" :
- intel_dsi->video_mode == BURST_MODE ?
- "burst" : "<unknown>");
- drm_dbg_kms(&i915->drm, "Burst mode ratio %d\n",
- intel_dsi->burst_mode_ratio);
- drm_dbg_kms(&i915->drm, "Reset timer %d\n", intel_dsi->rst_timer_val);
- drm_dbg_kms(&i915->drm, "Eot %s\n",
- str_enabled_disabled(intel_dsi->eotp_pkt));
- drm_dbg_kms(&i915->drm, "Clockstop %s\n",
- str_enabled_disabled(!intel_dsi->clock_stop));
- drm_dbg_kms(&i915->drm, "Mode %s\n",
- intel_dsi->operation_mode ? "command" : "video");
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
+ struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
+ "DSI parameters:");
+
+ drm_printf(&p, "Pclk %d\n", intel_dsi->pclk);
+ drm_printf(&p, "Pixel overlap %d\n", intel_dsi->pixel_overlap);
+ drm_printf(&p, "Lane count %d\n", intel_dsi->lane_count);
+ drm_printf(&p, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
+ drm_printf(&p, "Video mode format %s\n",
+ intel_dsi->video_mode == NON_BURST_SYNC_PULSE ?
+ "non-burst with sync pulse" :
+ intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ?
+ "non-burst with sync events" :
+ intel_dsi->video_mode == BURST_MODE ?
+ "burst" : "<unknown>");
+ drm_printf(&p, "Burst mode ratio %d\n", intel_dsi->burst_mode_ratio);
+ drm_printf(&p, "Reset timer %d\n", intel_dsi->rst_timer_val);
+ drm_printf(&p, "Eot %s\n", str_enabled_disabled(intel_dsi->eotp_pkt));
+ drm_printf(&p, "Clockstop %s\n", str_enabled_disabled(!intel_dsi->clock_stop));
+ drm_printf(&p, "Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
- drm_dbg_kms(&i915->drm,
- "Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
+ drm_printf(&p, "Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
- drm_dbg_kms(&i915->drm,
- "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
+ drm_printf(&p, "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
else
- drm_dbg_kms(&i915->drm, "Dual link: NONE\n");
- drm_dbg_kms(&i915->drm, "Pixel Format %d\n", intel_dsi->pixel_format);
- drm_dbg_kms(&i915->drm, "TLPX %d\n", intel_dsi->escape_clk_div);
- drm_dbg_kms(&i915->drm, "LP RX Timeout 0x%x\n",
- intel_dsi->lp_rx_timeout);
- drm_dbg_kms(&i915->drm, "Turnaround Timeout 0x%x\n",
- intel_dsi->turn_arnd_val);
- drm_dbg_kms(&i915->drm, "Init Count 0x%x\n", intel_dsi->init_count);
- drm_dbg_kms(&i915->drm, "HS to LP Count 0x%x\n",
- intel_dsi->hs_to_lp_count);
- drm_dbg_kms(&i915->drm, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
- drm_dbg_kms(&i915->drm, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
- drm_dbg_kms(&i915->drm, "LP to HS Clock Count 0x%x\n",
- intel_dsi->clk_lp_to_hs_count);
- drm_dbg_kms(&i915->drm, "HS to LP Clock Count 0x%x\n",
- intel_dsi->clk_hs_to_lp_count);
- drm_dbg_kms(&i915->drm, "BTA %s\n",
- str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
+ drm_printf(&p, "Dual link: NONE\n");
+ drm_printf(&p, "Pixel Format %d\n", intel_dsi->pixel_format);
+ drm_printf(&p, "TLPX %d\n", intel_dsi->escape_clk_div);
+ drm_printf(&p, "LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
+ drm_printf(&p, "Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
+ drm_printf(&p, "Init Count 0x%x\n", intel_dsi->init_count);
+ drm_printf(&p, "HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
+ drm_printf(&p, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
+ drm_printf(&p, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
+ drm_printf(&p, "LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
+ drm_printf(&p, "HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
+ drm_printf(&p, "BTA %s\n",
+ str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
}
static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format)
@@ -764,8 +750,7 @@ static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format)
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
struct mipi_pps_data *pps = connector->panel.vbt.dsi.pps;
@@ -773,7 +758,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
u16 burst_mode_ratio;
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
@@ -819,7 +804,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
u32 bitrate;
if (mipi_config->target_burst_mode_freq == 0) {
- drm_err(&dev_priv->drm, "Burst mode target is not set\n");
+ drm_err(display->drm, "Burst mode target is not set\n");
return false;
}
@@ -836,7 +821,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
mipi_config->target_burst_mode_freq = bitrate;
if (mipi_config->target_burst_mode_freq < bitrate) {
- drm_err(&dev_priv->drm, "Burst mode freq is less than computed\n");
+ drm_err(display->drm, "Burst mode freq is less than computed\n");
return false;
}
@@ -900,8 +885,7 @@ static const struct pinctrl_map soc_pwm_pinctrl_map[] = {
void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
@@ -911,13 +895,13 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
struct pinctrl *pinctrl;
int ret;
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
mipi_config->pwm_blc == PPS_BLC_PMIC) {
gpiod_lookup_table = &pmic_panel_gpio_table;
want_panel_gpio = true;
}
- if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
+ if (display->platform.valleyview && mipi_config->pwm_blc == PPS_BLC_SOC) {
gpiod_lookup_table = &soc_panel_gpio_table;
want_panel_gpio = true;
want_backlight_gpio = true;
@@ -926,12 +910,12 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
ret = pinctrl_register_mappings(soc_pwm_pinctrl_map,
ARRAY_SIZE(soc_pwm_pinctrl_map));
if (ret)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to register pwm0 pinmux mapping\n");
- pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0");
+ pinctrl = devm_pinctrl_get_select(display->drm->dev, "soc_pwm0");
if (IS_ERR(pinctrl))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to set pinmux to PWM\n");
}
@@ -939,9 +923,9 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
gpiod_add_lookup_table(gpiod_lookup_table);
if (want_panel_gpio) {
- intel_dsi->gpio_panel = devm_gpiod_get(dev->dev, "panel", flags);
+ intel_dsi->gpio_panel = devm_gpiod_get(display->drm->dev, "panel", flags);
if (IS_ERR(intel_dsi->gpio_panel)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to own gpio for panel control\n");
intel_dsi->gpio_panel = NULL;
}
@@ -949,9 +933,9 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
if (want_backlight_gpio) {
intel_dsi->gpio_backlight =
- devm_gpiod_get(dev->dev, "backlight", flags);
+ devm_gpiod_get(display->drm->dev, "backlight", flags);
if (IS_ERR(intel_dsi->gpio_backlight)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to own gpio for backlight control\n");
intel_dsi->gpio_backlight = NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index c16fb34b737d..b61520353c92 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -31,10 +31,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_driver.h"
@@ -129,13 +130,13 @@ static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector)
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DVO(port));
+ tmp = intel_de_read(display, DVO(port));
if (!(tmp & DVO_ENABLE))
return false;
@@ -146,11 +147,11 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(i915, DVO(port));
+ tmp = intel_de_read(display, DVO(port));
*pipe = REG_FIELD_GET(DVO_PIPE_SEL_MASK, tmp);
@@ -160,13 +161,13 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
u32 tmp, flags = 0;
pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO);
- tmp = intel_de_read(i915, DVO(port));
+ tmp = intel_de_read(display, DVO(port));
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -186,14 +187,14 @@ static void intel_disable_dvo(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum port port = encoder->port;
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
- intel_de_rmw(i915, DVO(port), DVO_ENABLE, 0);
- intel_de_posting_read(i915, DVO(port));
+ intel_de_rmw(display, DVO(port), DVO_ENABLE, 0);
+ intel_de_posting_read(display, DVO(port));
}
static void intel_enable_dvo(struct intel_atomic_state *state,
@@ -201,7 +202,7 @@ static void intel_enable_dvo(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum port port = encoder->port;
@@ -209,8 +210,8 @@ static void intel_enable_dvo(struct intel_atomic_state *state,
&pipe_config->hw.mode,
&pipe_config->hw.adjusted_mode);
- intel_de_rmw(i915, DVO(port), 0, DVO_ENABLE);
- intel_de_posting_read(i915, DVO(port));
+ intel_de_rmw(display, DVO(port), 0, DVO_ENABLE);
+ intel_de_posting_read(display, DVO(port));
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
@@ -288,7 +289,7 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
enum port port = encoder->port;
@@ -296,7 +297,7 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
u32 dvo_val;
/* Save the active data order, since I don't know what it should be set to. */
- dvo_val = intel_de_read(i915, DVO(port)) &
+ dvo_val = intel_de_read(display, DVO(port)) &
(DVO_DEDICATED_INT_ENABLE |
DVO_PRESERVE_MASK | DVO_ACT_DATA_ORDER_MASK);
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
@@ -309,10 +310,10 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
- intel_de_write(i915, DVO_SRCDIM(port),
+ intel_de_write(display, DVO_SRCDIM(port),
DVO_SRCDIM_HORIZONTAL(adjusted_mode->crtc_hdisplay) |
DVO_SRCDIM_VERTICAL(adjusted_mode->crtc_vdisplay));
- intel_de_write(i915, DVO(port), dvo_val);
+ intel_de_write(display, DVO(port), dvo_val);
}
static enum drm_connector_status
@@ -320,10 +321,9 @@ intel_dvo_detect(struct drm_connector *_connector, bool force)
{
struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.base.id, connector->base.name);
if (!intel_display_device_enabled(display))
@@ -414,11 +414,10 @@ static int intel_dvo_connector_type(const struct intel_dvo_device *dvo)
}
}
-static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
+static bool intel_dvo_init_dev(struct intel_display *display,
struct intel_dvo *intel_dvo,
const struct intel_dvo_device *dvo)
{
- struct intel_display *display = &dev_priv->display;
struct i2c_adapter *i2c;
u32 dpll[I915_MAX_PIPES];
enum pipe pipe;
@@ -458,15 +457,15 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
* the clock enabled before we attempt to initialize
* the device.
*/
- for_each_pipe(dev_priv, pipe)
- dpll[pipe] = intel_de_rmw(dev_priv, DPLL(dev_priv, pipe), 0,
+ for_each_pipe(display, pipe)
+ dpll[pipe] = intel_de_rmw(display, DPLL(display, pipe), 0,
DPLL_DVO_2X_MODE);
ret = dvo->dev_ops->init(&intel_dvo->dev, i2c);
/* restore the DVO 2x clock state to original */
- for_each_pipe(dev_priv, pipe) {
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll[pipe]);
+ for_each_pipe(display, pipe) {
+ intel_de_write(display, DPLL(display, pipe), dpll[pipe]);
}
intel_gmbus_force_bit(i2c, false);
@@ -474,14 +473,14 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
return ret;
}
-static bool intel_dvo_probe(struct drm_i915_private *i915,
+static bool intel_dvo_probe(struct intel_display *display,
struct intel_dvo *intel_dvo)
{
int i;
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
- if (intel_dvo_init_dev(i915, intel_dvo,
+ if (intel_dvo_init_dev(display, intel_dvo,
&intel_dvo_devices[i]))
return true;
}
@@ -489,9 +488,8 @@ static bool intel_dvo_probe(struct drm_i915_private *i915,
return false;
}
-void intel_dvo_init(struct drm_i915_private *i915)
+void intel_dvo_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_connector *connector;
struct intel_encoder *encoder;
struct intel_dvo *intel_dvo;
@@ -518,7 +516,7 @@ void intel_dvo_init(struct drm_i915_private *i915)
encoder->pre_enable = intel_dvo_pre_enable;
connector->get_hw_state = intel_dvo_connector_get_hw_state;
- if (!intel_dvo_probe(i915, intel_dvo)) {
+ if (!intel_dvo_probe(display, intel_dvo)) {
kfree(intel_dvo);
intel_connector_free(connector);
return;
@@ -535,12 +533,12 @@ void intel_dvo_init(struct drm_i915_private *i915)
encoder->cloneable = BIT(INTEL_OUTPUT_ANALOG) |
BIT(INTEL_OUTPUT_DVO);
- drm_encoder_init(&i915->drm, &encoder->base,
+ drm_encoder_init(display->drm, &encoder->base,
&intel_dvo_enc_funcs,
intel_dvo_encoder_type(&intel_dvo->dev),
"DVO %c", port_name(encoder->port));
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] detected %s\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] detected %s\n",
encoder->base.base.id, encoder->base.name,
intel_dvo->dev.name);
@@ -549,7 +547,7 @@ void intel_dvo_init(struct drm_i915_private *i915)
DRM_CONNECTOR_POLL_DISCONNECT;
connector->base.polled = connector->polled;
- drm_connector_init_with_ddc(&i915->drm, &connector->base,
+ drm_connector_init_with_ddc(display->drm, &connector->base,
&intel_dvo_connector_funcs,
intel_dvo_connector_type(&intel_dvo->dev),
intel_gmbus_get_adapter(display, GMBUS_PIN_DPC));
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.h b/drivers/gpu/drm/i915/display/intel_dvo.h
index bf7a356422ab..83776552fc87 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo.h
@@ -6,12 +6,12 @@
#ifndef __INTEL_DVO_H__
#define __INTEL_DVO_H__
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-void intel_dvo_init(struct drm_i915_private *dev_priv);
+void intel_dvo_init(struct intel_display *display);
#else
-static inline void intel_dvo_init(struct drm_i915_private *dev_priv)
+static inline void intel_dvo_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 2b0e0f220442..05393bd60c98 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -560,11 +560,11 @@ static bool plane_has_modifier(struct intel_display *display,
return false;
if (md->modifier == I915_FORMAT_MOD_4_TILED_BMG_CCS &&
- (GRAPHICS_VER(i915) < 20 || !IS_DGFX(i915)))
+ (GRAPHICS_VER(i915) < 20 || !display->platform.dgfx))
return false;
if (md->modifier == I915_FORMAT_MOD_4_TILED_LNL_CCS &&
- (GRAPHICS_VER(i915) < 20 || IS_DGFX(i915)))
+ (GRAPHICS_VER(i915) < 20 || display->platform.dgfx))
return false;
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 30ac9b089ad6..c648ab8a93d7 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -12,6 +12,7 @@
#include "i915_drv.h"
#include "intel_atomic_plane.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -117,7 +118,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_gem_object *_obj = intel_fb_bo(fb);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
unsigned int pinctl;
@@ -136,7 +137,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
* intel_runtime_pm_put(), so it is correct to wrap only the
* pin/unpin/fence and not more.
*/
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
atomic_inc(&display->restore.pending_fb_pin);
@@ -215,7 +216,7 @@ err:
vma = ERR_PTR(ret);
atomic_dec(&display->restore.pending_fb_pin);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return vma;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index b6978135e8ad..bed2bba20b55 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -55,6 +55,7 @@
#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_device.h"
+#include "intel_display_rpm.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_display_wa.h"
@@ -251,9 +252,12 @@ static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_s
* Gen9 hw miscalculates cfb stride for linear as
* PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
* we always need to use the override there.
+ *
+ * wa_14022269668 For bmg, always program the FBC_STRIDE before fbc enable
*/
if (stride != stride_aligned ||
- (DISPLAY_VER(display) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
+ (DISPLAY_VER(display) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR) ||
+ display->platform.battlemage)
return stride_aligned * 4 / 64;
return 0;
@@ -519,6 +523,20 @@ static void ilk_fbc_activate(struct intel_fbc *fbc)
DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
}
+static void fbc_compressor_clkgate_disable_wa(struct intel_fbc *fbc,
+ bool disable)
+{
+ struct intel_display *display = fbc->display;
+
+ if (display->platform.dg2)
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_4, DG2_DPFC_GATING_DIS,
+ disable ? DG2_DPFC_GATING_DIS : 0);
+ else if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, MTL_PIPE_CLKGATE_DIS2(fbc->id),
+ MTL_DPFC_GATING_DIS,
+ disable ? MTL_DPFC_GATING_DIS : 0);
+}
+
static void ilk_fbc_deactivate(struct intel_fbc *fbc)
{
struct intel_display *display = fbc->display;
@@ -532,6 +550,10 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc)
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
+
+ /* wa_18038517565 Enable DPFC clock gating after FBC disable */
+ if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
+ fbc_compressor_clkgate_disable_wa(fbc, false);
}
}
@@ -921,6 +943,10 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
+
+ /* wa_18038517565 Disable DPFC clock gating before FBC enable */
+ if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
+ fbc_compressor_clkgate_disable_wa(fbc, true);
}
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
@@ -1436,7 +1462,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (intel_display_needs_wa_16023588340(i915)) {
+ if (intel_display_needs_wa_16023588340(display)) {
plane_state->no_fbc_reason = "Wa_16023588340";
return 0;
}
@@ -1464,14 +1490,15 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* Recommendation is to keep this combination disabled
* Bspec: 50422 HSD: 14010260002
*
- * In Xe3, PSR2 selective fetch and FBC dirty rect feature cannot
- * coexist. So if PSR2 selective fetch is supported then mark that
- * FBC is not supported.
- * TODO: Need a logic to decide between PSR2 and FBC Dirty rect
+ * TODO: Implement a logic to select between PSR2 selective fetch and
+ * FBC based on Bspec: 68881 in xe2lpd onwards.
+ *
+ * As we still see some strange underruns in those platforms while
+ * disabling PSR2, keep FBC disabled in case of selective update is on
+ * until the selection logic is implemented.
*/
- if ((IS_DISPLAY_VER(display, 12, 14) || HAS_FBC_DIRTY_RECT(display)) &&
- crtc_state->has_sel_update && !crtc_state->has_panel_replay) {
- plane_state->no_fbc_reason = "PSR2 enabled";
+ if (DISPLAY_VER(display) >= 12 && crtc_state->has_sel_update) {
+ plane_state->no_fbc_reason = "Selective update enabled";
return 0;
}
@@ -2120,13 +2147,12 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_fbc *fbc = m->private;
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_plane *plane;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
drm_modeset_lock_all(display->drm);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
mutex_lock(&fbc->lock);
if (fbc->active) {
@@ -2151,7 +2177,7 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
}
mutex_unlock(&fbc->lock);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
drm_modeset_unlock_all(display->drm);
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index adc19d5607de..2dc4029d71ed 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -47,9 +47,10 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_vma.h"
#include "intel_bo.h"
+#include "intel_display_core.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
@@ -65,9 +66,9 @@ struct intel_fbdev {
static struct intel_fbdev *to_intel_fbdev(struct drm_fb_helper *fb_helper)
{
- struct drm_i915_private *i915 = to_i915(fb_helper->client.dev);
+ struct intel_display *display = to_intel_display(fb_helper->client.dev);
- return i915->display.fbdev.fbdev;
+ return display->fbdev.fbdev;
}
static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
@@ -209,11 +210,10 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_display *display = to_intel_display(helper->dev);
struct intel_fbdev *ifbdev = to_intel_fbdev(helper);
struct intel_framebuffer *fb = ifbdev->fb;
- struct drm_device *dev = helper->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
struct fb_info *info;
struct i915_vma *vma;
unsigned long flags = 0;
@@ -226,7 +226,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
if (fb &&
(sizes->fb_width > fb->base.width ||
sizes->fb_height > fb->base.height)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"BIOS fb too small (%dx%d), we require (%dx%d),"
" releasing it\n",
fb->base.width, fb->base.height,
@@ -234,20 +234,20 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
drm_framebuffer_put(&fb->base);
fb = NULL;
}
- if (!fb || drm_WARN_ON(dev, !intel_fb_bo(&fb->base))) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!fb || drm_WARN_ON(display->drm, !intel_fb_bo(&fb->base))) {
+ drm_dbg_kms(display->drm,
"no BIOS fb, allocating a new one\n");
fb = intel_fbdev_fb_alloc(helper, sizes);
if (IS_ERR(fb))
return PTR_ERR(fb);
} else {
- drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n");
+ drm_dbg_kms(display->drm, "re-using BIOS fb\n");
prealloc = true;
sizes->fb_width = fb->base.width;
sizes->fb_height = fb->base.height;
}
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
@@ -265,7 +265,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
info = drm_fb_helper_alloc_info(helper);
if (IS_ERR(info)) {
- drm_err(&dev_priv->drm, "Failed to allocate fb_info (%pe)\n", info);
+ drm_err(display->drm, "Failed to allocate fb_info (%pe)\n", info);
ret = PTR_ERR(info);
goto out_unpin;
}
@@ -277,11 +277,11 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
obj = intel_fb_bo(&fb->base);
- ret = intel_fbdev_fb_fill_info(dev_priv, info, obj, vma);
+ ret = intel_fbdev_fb_fill_info(display, info, obj, vma);
if (ret)
goto out_unpin;
- drm_fb_helper_fill_info(info, dev->fb_helper, sizes);
+ drm_fb_helper_fill_info(info, display->drm->fb_helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
@@ -292,21 +292,22 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- drm_dbg_kms(&dev_priv->drm, "allocated %dx%d fb: 0x%08x\n",
+ drm_dbg_kms(display->drm, "allocated %dx%d fb: 0x%08x\n",
fb->base.width, fb->base.height,
i915_ggtt_offset(vma));
ifbdev->fb = fb;
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
out_unpin:
intel_fb_unpin_vma(vma, flags);
out_unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
+
return ret;
}
@@ -319,16 +320,15 @@ out_unlock:
* Note we only support a single fb shared across pipes for boot (mostly for
* fbcon), so we just find the biggest and use that.
*/
-static bool intel_fbdev_init_bios(struct drm_device *dev,
+static bool intel_fbdev_init_bios(struct intel_display *display,
struct intel_fbdev *ifbdev)
{
- struct drm_i915_private *i915 = to_i915(dev);
struct intel_framebuffer *fb = NULL;
struct intel_crtc *crtc;
unsigned int max_size = 0;
/* Find the largest fb */
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
@@ -338,21 +338,21 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
struct drm_gem_object *obj = intel_fb_bo(plane_state->uapi.fb);
if (!crtc_state->uapi.active) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] not active, skipping\n",
crtc->base.base.id, crtc->base.name);
continue;
}
if (!obj) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] no fb, skipping\n",
plane->base.base.id, plane->base.name);
continue;
}
if (obj->size > max_size) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"found possible fb from [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
fb = to_intel_framebuffer(plane_state->uapi.fb);
@@ -361,13 +361,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
}
if (!fb) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"no active fbs found, not using BIOS config\n");
goto out;
}
/* Now make sure all the pipes will fit into it */
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
@@ -375,13 +375,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
unsigned int cur_size;
if (!crtc_state->uapi.active) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] not active, skipping\n",
crtc->base.base.id, crtc->base.name);
continue;
}
- drm_dbg_kms(&i915->drm, "checking [PLANE:%d:%s] for BIOS fb\n",
+ drm_dbg_kms(display->drm, "checking [PLANE:%d:%s] for BIOS fb\n",
plane->base.base.id, plane->base.name);
/*
@@ -392,7 +392,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = crtc_state->uapi.adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.format->cpp[0];
if (fb->base.pitches[0] < cur_size) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"fb not wide enough for [PLANE:%d:%s] (%d vs %d)\n",
plane->base.base.id, plane->base.name,
cur_size, fb->base.pitches[0]);
@@ -403,7 +403,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
cur_size *= fb->base.pitches[0];
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] area: %dx%d, bpp: %d, size: %d\n",
crtc->base.base.id, crtc->base.name,
crtc_state->uapi.adjusted_mode.crtc_hdisplay,
@@ -412,7 +412,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size);
if (cur_size > max_size) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"fb not big enough for [PLANE:%d:%s] (%d vs %d)\n",
plane->base.base.id, plane->base.name,
cur_size, max_size);
@@ -420,14 +420,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
break;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"fb big enough [PLANE:%d:%s] (%d >= %d)\n",
plane->base.base.id, plane->base.name,
max_size, cur_size);
}
if (!fb) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"BIOS fb not suitable for all pipes, not using\n");
goto out;
}
@@ -437,7 +437,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
drm_framebuffer_get(&ifbdev->fb->base);
/* Final pass to check if any active pipes don't have fbs */
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane =
@@ -448,13 +448,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
if (!crtc_state->uapi.active)
continue;
- drm_WARN(dev, !plane_state->uapi.fb,
+ drm_WARN(display->drm, !plane_state->uapi.fb,
"re-used BIOS config but lost an fb on [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
}
- drm_dbg_kms(&i915->drm, "using BIOS fb for initial console\n");
+ drm_dbg_kms(display->drm, "using BIOS fb for initial console\n");
return true;
out:
@@ -479,26 +479,25 @@ static unsigned int intel_fbdev_color_mode(const struct drm_format_info *info)
}
}
-void intel_fbdev_setup(struct drm_i915_private *i915)
+void intel_fbdev_setup(struct intel_display *display)
{
- struct drm_device *dev = &i915->drm;
struct intel_fbdev *ifbdev;
unsigned int preferred_bpp = 0;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- ifbdev = drmm_kzalloc(dev, sizeof(*ifbdev), GFP_KERNEL);
+ ifbdev = drmm_kzalloc(display->drm, sizeof(*ifbdev), GFP_KERNEL);
if (!ifbdev)
return;
- i915->display.fbdev.fbdev = ifbdev;
- if (intel_fbdev_init_bios(dev, ifbdev))
+ display->fbdev.fbdev = ifbdev;
+ if (intel_fbdev_init_bios(display, ifbdev))
preferred_bpp = intel_fbdev_color_mode(ifbdev->fb->base.format);
if (!preferred_bpp)
preferred_bpp = 32;
- drm_client_setup_with_color_mode(dev, preferred_bpp);
+ drm_client_setup_with_color_mode(display->drm, preferred_bpp);
}
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index 89bad3a2b01a..a15e3e222a0c 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -10,7 +10,7 @@
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
-struct drm_i915_private;
+struct intel_display;
struct intel_fbdev;
struct intel_framebuffer;
@@ -19,14 +19,14 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
#define INTEL_FBDEV_DRIVER_OPS \
.fbdev_probe = intel_fbdev_driver_fbdev_probe
-void intel_fbdev_setup(struct drm_i915_private *dev_priv);
+void intel_fbdev_setup(struct intel_display *display);
struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev);
#else
#define INTEL_FBDEV_DRIVER_OPS \
.fbdev_probe = NULL
-static inline void intel_fbdev_setup(struct drm_i915_private *dev_priv)
+static inline void intel_fbdev_setup(struct intel_display *display)
{
}
static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
index 4991c35a2632..5f4cb3328265 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -15,9 +15,9 @@
struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_display *display = to_intel_display(helper->dev);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct drm_framebuffer *fb;
- struct drm_device *dev = helper->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
int size;
@@ -50,14 +50,14 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
*
* Also skip stolen on MTL as Wa_22018444074 mitigation.
*/
- if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size)
+ if (!display->platform.meteorlake && size * 2 < dev_priv->dsm.usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
}
if (IS_ERR(obj)) {
- drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj);
+ drm_err(display->drm, "failed to allocate framebuffer (%pe)\n", obj);
return ERR_PTR(-ENOMEM);
}
@@ -67,9 +67,10 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
return to_intel_framebuffer(fb);
}
-int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
+int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
struct drm_gem_object *_obj, struct i915_vma *vma)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
struct i915_gem_ww_ctx ww;
void __iomem *vaddr;
@@ -101,7 +102,7 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
ret = PTR_ERR(vaddr);
continue;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
index e502ae375fc0..cb7957272715 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
@@ -9,13 +9,13 @@
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
struct drm_gem_object;
-struct drm_i915_private;
struct fb_info;
struct i915_vma;
+struct intel_display;
struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
-int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
+int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
struct drm_gem_object *obj, struct i915_vma *vma);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 40deee0769ae..169bbe154b5c 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -6,15 +6,16 @@
#include <linux/string_helpers.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
-#include "intel_dp.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
#include "intel_link_bw.h"
@@ -464,7 +465,6 @@ static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_st
void intel_fdi_normal_train(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -483,7 +483,7 @@ void intel_fdi_normal_train(struct intel_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(display, reg);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
} else {
@@ -607,7 +607,6 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, retry;
@@ -647,7 +646,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(display, reg);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
@@ -698,7 +697,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(display, reg);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
} else {
@@ -1077,7 +1076,6 @@ void ilk_fdi_pll_disable(struct intel_crtc *crtc)
void ilk_fdi_disable(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -1096,7 +1094,7 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev_priv))
+ if (HAS_PCH_IBX(display))
intel_de_write(display, FDI_RX_CHICKEN(pipe),
FDI_RX_PHASE_SYNC_POINTER_OVR);
@@ -1106,7 +1104,7 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
reg = FDI_RX_CTL(pipe);
temp = intel_de_read(display, reg);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 7a8fbff39be0..2a787897b2d3 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -25,7 +25,8 @@
*
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_irq.h"
@@ -57,11 +58,10 @@
static bool ivb_can_enable_err_int(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
enum pipe pipe;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
for_each_pipe(display, pipe) {
crtc = intel_crtc_for_pipe(display, pipe);
@@ -75,11 +75,10 @@ static bool ivb_can_enable_err_int(struct intel_display *display)
static bool cpt_can_enable_serr_int(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
struct intel_crtc *crtc;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
for_each_pipe(display, pipe) {
crtc = intel_crtc_for_pipe(display, pipe);
@@ -94,11 +93,10 @@ static bool cpt_can_enable_serr_int(struct intel_display *display)
static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = PIPESTAT(display, crtc->pipe);
u32 enable_mask;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if ((intel_de_read(display, reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
return;
@@ -115,10 +113,9 @@ static void i9xx_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
i915_reg_t reg = PIPESTAT(display, pipe);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if (enable) {
u32 enable_mask = i915_pipestat_enable_mask(display, pipe);
@@ -136,24 +133,22 @@ static void i9xx_set_fifo_underrun_reporting(struct intel_display *display,
static void ilk_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bit = (pipe == PIPE_A) ?
DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN;
if (enable)
- ilk_enable_display_irq(dev_priv, bit);
+ ilk_enable_display_irq(display, bit);
else
- ilk_disable_display_irq(dev_priv, bit);
+ ilk_disable_display_irq(display, bit);
}
static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 err_int = intel_de_read(display, GEN7_ERR_INT);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
return;
@@ -169,7 +164,6 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable,
bool old)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
if (enable) {
intel_de_write(display, GEN7_ERR_INT,
ERR_INT_FIFO_UNDERRUN(pipe));
@@ -177,9 +171,9 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display,
if (!ivb_can_enable_err_int(display))
return;
- ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ ilk_enable_display_irq(display, DE_ERR_INT_IVB);
} else {
- ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ ilk_disable_display_irq(display, DE_ERR_INT_IVB);
if (old &&
intel_de_read(display, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
@@ -193,36 +187,32 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display,
static void bdw_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (enable)
- bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+ bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_FIFO_UNDERRUN);
else
- bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+ bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_FIFO_UNDERRUN);
}
static void ibx_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bit = (pch_transcoder == PIPE_A) ?
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
if (enable)
- ibx_enable_display_interrupt(dev_priv, bit);
+ ibx_enable_display_interrupt(display, bit);
else
- ibx_disable_display_interrupt(dev_priv, bit);
+ ibx_disable_display_interrupt(display, bit);
}
static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pch_transcoder = crtc->pipe;
u32 serr_int = intel_de_read(display, SERR_INT);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
return;
@@ -240,8 +230,6 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (enable) {
intel_de_write(display, SERR_INT,
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
@@ -249,9 +237,9 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display,
if (!cpt_can_enable_serr_int(display))
return;
- ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+ ibx_enable_display_interrupt(display, SDE_ERROR_CPT);
} else {
- ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+ ibx_disable_display_interrupt(display, SDE_ERROR_CPT);
if (old && intel_de_read(display, SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
@@ -265,11 +253,10 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display,
static bool __intel_set_cpu_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
bool old;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
old = !crtc->cpu_fifo_underrun_disabled;
crtc->cpu_fifo_underrun_disabled = !enable;
@@ -305,13 +292,12 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct intel_display *displa
bool intel_set_cpu_fifo_underrun_reporting(struct intel_display *display,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
unsigned long flags;
bool ret;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ spin_lock_irqsave(&display->irq.lock, flags);
ret = __intel_set_cpu_fifo_underrun_reporting(display, pipe, enable);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ spin_unlock_irqrestore(&display->irq.lock, flags);
return ret;
}
@@ -334,7 +320,6 @@ bool intel_set_pch_fifo_underrun_reporting(struct intel_display *display,
enum pipe pch_transcoder,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pch_transcoder);
unsigned long flags;
bool old;
@@ -348,12 +333,12 @@ bool intel_set_pch_fifo_underrun_reporting(struct intel_display *display,
* crtc on LPT won't cause issues.
*/
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ spin_lock_irqsave(&display->irq.lock, flags);
old = !crtc->pch_fifo_underrun_disabled;
crtc->pch_fifo_underrun_disabled = !enable;
- if (HAS_PCH_IBX(dev_priv))
+ if (HAS_PCH_IBX(display))
ibx_set_fifo_underrun_reporting(display,
pch_transcoder,
enable);
@@ -362,7 +347,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct intel_display *display,
pch_transcoder,
enable, old);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ spin_unlock_irqrestore(&display->irq.lock, flags);
return old;
}
@@ -429,10 +414,9 @@ void intel_pch_fifo_underrun_irq_handler(struct intel_display *display,
*/
void intel_check_cpu_fifo_underruns(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
for_each_intel_crtc(display->drm, crtc) {
if (crtc->cpu_fifo_underrun_disabled)
@@ -444,7 +428,7 @@ void intel_check_cpu_fifo_underruns(struct intel_display *display)
ivb_check_fifo_underruns(crtc);
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
/**
@@ -457,28 +441,25 @@ void intel_check_cpu_fifo_underruns(struct intel_display *display)
*/
void intel_check_pch_fifo_underruns(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
for_each_intel_crtc(display->drm, crtc) {
if (crtc->pch_fifo_underrun_disabled)
continue;
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
cpt_check_pch_fifo_underruns(crtc);
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
void intel_init_fifo_underrun_reporting(struct intel_display *display,
struct intel_crtc *crtc,
bool enable)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
crtc->cpu_fifo_underrun_disabled = !enable;
/*
@@ -490,6 +471,6 @@ void intel_init_fifo_underrun_reporting(struct intel_display *display,
* PCH transcoders B and C would prevent enabling the south
* error interrupt (see cpt_can_enable_serr_int()).
*/
- if (intel_has_pch_trancoder(i915, crtc->pipe))
+ if (intel_has_pch_trancoder(display, crtc->pipe))
crtc->pch_fifo_underrun_disabled = !enable;
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index ba2f88ca6117..43be5377ddc1 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -58,7 +58,6 @@
#include <drm/drm_gem.h>
#include "i915_active.h"
-#include "i915_drv.h"
#include "i915_vma.h"
#include "intel_bo.h"
#include "intel_display_trace.h"
@@ -72,7 +71,7 @@
/**
* frontbuffer_flush - flush frontbuffer
- * @i915: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the flush
*
@@ -82,16 +81,14 @@
*
* Can be called without any locks held.
*/
-static void frontbuffer_flush(struct drm_i915_private *i915,
+static void frontbuffer_flush(struct intel_display *display,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
- struct intel_display *display = &i915->display;
-
/* Delay flushing when rings are still busy.*/
- spin_lock(&i915->display.fb_tracking.lock);
- frontbuffer_bits &= ~i915->display.fb_tracking.busy_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
+ frontbuffer_bits &= ~display->fb_tracking.busy_bits;
+ spin_unlock(&display->fb_tracking.lock);
if (!frontbuffer_bits)
return;
@@ -107,7 +104,7 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
/**
* intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @i915: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. The actual
@@ -117,19 +114,19 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
+void intel_frontbuffer_flip_prepare(struct intel_display *display,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->display.fb_tracking.lock);
- i915->display.fb_tracking.flip_bits |= frontbuffer_bits;
+ spin_lock(&display->fb_tracking.lock);
+ display->fb_tracking.flip_bits |= frontbuffer_bits;
/* Remove stale busy bits due to the old buffer. */
- i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ display->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&display->fb_tracking.lock);
}
/**
* intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @i915: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after the flip has been latched and will complete
@@ -137,22 +134,22 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
+void intel_frontbuffer_flip_complete(struct intel_display *display,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
/* Mask any cancelled flips. */
- frontbuffer_bits &= i915->display.fb_tracking.flip_bits;
- i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ frontbuffer_bits &= display->fb_tracking.flip_bits;
+ display->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&display->fb_tracking.lock);
if (frontbuffer_bits)
- frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
+ frontbuffer_flush(display, frontbuffer_bits, ORIGIN_FLIP);
}
/**
* intel_frontbuffer_flip - synchronous frontbuffer flip
- * @i915: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. This is for
@@ -161,15 +158,15 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip(struct drm_i915_private *i915,
+void intel_frontbuffer_flip(struct intel_display *display,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
/* Remove stale busy bits due to the old buffer. */
- i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ display->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&display->fb_tracking.lock);
- frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
+ frontbuffer_flush(display, frontbuffer_bits, ORIGIN_FLIP);
}
void __intel_fb_invalidate(struct intel_frontbuffer *front,
@@ -198,7 +195,6 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
unsigned int frontbuffer_bits)
{
struct intel_display *display = to_intel_display(front->obj->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (origin == ORIGIN_CS) {
spin_lock(&display->fb_tracking.lock);
@@ -209,7 +205,7 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
}
if (frontbuffer_bits)
- frontbuffer_flush(i915, frontbuffer_bits, origin);
+ frontbuffer_flush(display, frontbuffer_bits, origin);
}
static void intel_frontbuffer_flush_work(struct work_struct *work)
@@ -280,7 +276,7 @@ static void frontbuffer_release(struct kref *ref)
struct intel_frontbuffer *
intel_frontbuffer_get(struct drm_gem_object *obj)
{
- struct drm_i915_private *i915 = to_i915(obj->dev);
+ struct intel_display *display = to_intel_display(obj->dev);
struct intel_frontbuffer *front, *cur;
front = intel_bo_get_frontbuffer(obj);
@@ -300,9 +296,9 @@ intel_frontbuffer_get(struct drm_gem_object *obj)
I915_ACTIVE_RETIRE_SLEEPS);
INIT_WORK(&front->flush_work, intel_frontbuffer_flush_work);
- spin_lock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
cur = intel_bo_set_frontbuffer(obj, front);
- spin_unlock(&i915->display.fb_tracking.lock);
+ spin_unlock(&display->fb_tracking.lock);
if (cur != front)
kfree(front);
return cur;
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index 6237780a9f68..2fee12eaf9b6 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -31,7 +31,7 @@
#include "i915_active_types.h"
struct drm_gem_object;
-struct drm_i915_private;
+struct intel_display;
enum fb_op_origin {
ORIGIN_CPU = 0,
@@ -68,11 +68,11 @@ struct intel_frontbuffer {
GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
+void intel_frontbuffer_flip_prepare(struct intel_display *display,
unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
+void intel_frontbuffer_flip_complete(struct intel_display *display,
unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_i915_private *i915,
+void intel_frontbuffer_flip(struct intel_display *display,
unsigned frontbuffer_bits);
void intel_frontbuffer_put(struct intel_frontbuffer *front);
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index 8a49e2bb37fa..000a898c9480 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -3,10 +3,13 @@
* Copyright © 2020 Intel Corporation
*/
+#include <linux/pci.h>
#include <linux/string.h>
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "intel_atomic.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_global_state.h"
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index abf457e68ee9..d55cc77650b7 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -152,32 +152,31 @@ static const struct gmbus_pin gmbus_pins_mtp[] = {
static const struct gmbus_pin *get_gmbus_pin(struct intel_display *display,
unsigned int pin)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct gmbus_pin *pins;
size_t size;
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL) {
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL) {
pins = gmbus_pins_mtp;
size = ARRAY_SIZE(gmbus_pins_mtp);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG2) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_DG2) {
pins = gmbus_pins_dg2;
size = ARRAY_SIZE(gmbus_pins_dg2);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_DG1) {
pins = gmbus_pins_dg1;
size = ARRAY_SIZE(gmbus_pins_dg1);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
+ } else if (INTEL_PCH_TYPE(display) >= PCH_ICP) {
pins = gmbus_pins_icp;
size = ARRAY_SIZE(gmbus_pins_icp);
- } else if (HAS_PCH_CNP(i915)) {
+ } else if (HAS_PCH_CNP(display)) {
pins = gmbus_pins_cnp;
size = ARRAY_SIZE(gmbus_pins_cnp);
- } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
pins = gmbus_pins_bxt;
size = ARRAY_SIZE(gmbus_pins_bxt);
} else if (DISPLAY_VER(display) == 9) {
pins = gmbus_pins_skl;
size = ARRAY_SIZE(gmbus_pins_skl);
- } else if (IS_BROADWELL(i915)) {
+ } else if (display->platform.broadwell) {
pins = gmbus_pins_bdw;
size = ARRAY_SIZE(gmbus_pins_bdw);
} else {
@@ -240,11 +239,10 @@ static void bxt_gmbus_clock_gating(struct intel_display *display,
static u32 get_reserved(struct intel_gmbus *bus)
{
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
- if (!IS_I830(i915) && !IS_I845G(i915))
+ if (!display->platform.i830 && !display->platform.i845g)
reserved = intel_de_read_notrace(display, bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE);
@@ -314,11 +312,10 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_gmbus_reset(display);
- if (IS_PINEVIEW(i915))
+ if (display->platform.pineview)
pnv_gmbus_clock_gating(display, false);
set_data(bus, 1);
@@ -332,12 +329,11 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
set_data(bus, 1);
set_clock(bus, 1);
- if (IS_PINEVIEW(i915))
+ if (display->platform.pineview)
pnv_gmbus_clock_gating(display, true);
}
@@ -630,14 +626,13 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct intel_display *display = bus->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
int i = 0, inc, try = 0;
int ret = 0;
/* Display WA #0868: skl,bxt,kbl,cfl,glk */
- if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ if (display->platform.geminilake || display->platform.broxton)
bxt_gmbus_clock_gating(display, false);
- else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+ else if (HAS_PCH_SPT(display) || HAS_PCH_CNP(display))
pch_gmbus_clock_gating(display, false);
retry:
@@ -748,9 +743,9 @@ timeout:
out:
/* Display WA #0868: skl,bxt,kbl,cfl,glk */
- if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ if (display->platform.geminilake || display->platform.broxton)
bxt_gmbus_clock_gating(display, true);
- else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+ else if (HAS_PCH_SPT(display) || HAS_PCH_CNP(display))
pch_gmbus_clock_gating(display, true);
return ret;
@@ -873,12 +868,11 @@ static const struct i2c_lock_operations gmbus_lock_ops = {
*/
int intel_gmbus_setup(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
unsigned int pin;
int ret;
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ if (display->platform.valleyview || display->platform.cherryview)
display->gmbus.mmio_base = VLV_DISPLAY_BASE;
else if (!HAS_GMCH(display))
/*
@@ -925,7 +919,7 @@ int intel_gmbus_setup(struct intel_display *display)
bus->reg0 = pin | GMBUS_RATE_100KHZ;
/* gmbus seems to be broken on i830 */
- if (IS_I830(i915))
+ if (display->platform.i830)
bus->force_bit = 1;
intel_gpio_setup(bus, GPIO(display, gmbus_pin->gpio));
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 1bf424a822f3..3e3038f4ee1f 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -22,13 +22,18 @@
#include "intel_de.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_dp_mst.h"
#include "intel_hdcp.h"
#include "intel_hdcp_gsc.h"
+#include "intel_hdcp_gsc_message.h"
#include "intel_hdcp_regs.h"
#include "intel_hdcp_shim.h"
#include "intel_pcode.h"
+#define USE_HDCP_GSC(__display) (DISPLAY_VER(__display) >= 14)
+
#define KEY_LOAD_TRIES 5
#define HDCP2_LC_RETRY_CNT 3
@@ -136,7 +141,7 @@ intel_hdcp_required_content_stream(struct intel_atomic_state *state,
data->k++;
/* if there is only one active stream */
- if (dig_port->dp.mst.active_links <= 1)
+ if (intel_dp_mst_active_streams(&dig_port->dp) <= 1)
break;
}
drm_connector_list_iter_end(&conn_iter);
@@ -248,8 +253,8 @@ static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
return false;
/* If MTL+ make sure gsc is loaded and proxy is setup */
- if (intel_hdcp_gsc_cs_required(display)) {
- if (!intel_hdcp_gsc_check_status(display))
+ if (USE_HDCP_GSC(display)) {
+ if (!intel_hdcp_gsc_check_status(display->drm))
return false;
}
@@ -334,9 +339,7 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
static bool hdcp_key_loadable(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum i915_power_well_id id;
- intel_wakeref_t wakeref;
bool enabled = false;
/*
@@ -349,7 +352,7 @@ static bool hdcp_key_loadable(struct intel_display *display)
id = SKL_DISP_PW_1;
/* PG1 (power well #1) needs to be enabled */
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_display_rpm(display)
enabled = intel_display_power_well_is_enabled(display, id);
/*
@@ -2339,7 +2342,7 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
static bool is_hdcp2_supported(struct intel_display *display)
{
- if (intel_hdcp_gsc_cs_required(display))
+ if (USE_HDCP_GSC(display))
return true;
if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
@@ -2363,7 +2366,7 @@ void intel_hdcp_component_init(struct intel_display *display)
display->hdcp.comp_added = true;
mutex_unlock(&display->hdcp.hdcp_mutex);
- if (intel_hdcp_gsc_cs_required(display))
+ if (USE_HDCP_GSC(display))
ret = intel_hdcp_gsc_init(display);
else
ret = component_add_typed(display->drm->dev, &i915_hdcp_ops,
@@ -2638,7 +2641,7 @@ void intel_hdcp_component_fini(struct intel_display *display)
display->hdcp.comp_added = false;
mutex_unlock(&display->hdcp.hdcp_mutex);
- if (intel_hdcp_gsc_cs_required(display))
+ if (USE_HDCP_GSC(display))
intel_hdcp_gsc_fini(display);
else
component_del(display->drm->dev, &i915_hdcp_ops);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
index 55965844d829..6a22862d6be1 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -11,27 +11,22 @@
#include "i915_drv.h"
#include "i915_utils.h"
#include "intel_hdcp_gsc.h"
-#include "intel_hdcp_gsc_message.h"
-struct intel_hdcp_gsc_message {
+struct intel_hdcp_gsc_context {
+ struct drm_i915_private *i915;
struct i915_vma *vma;
void *hdcp_cmd_in;
void *hdcp_cmd_out;
};
-bool intel_hdcp_gsc_cs_required(struct intel_display *display)
+bool intel_hdcp_gsc_check_status(struct drm_device *drm)
{
- return DISPLAY_VER(display) >= 14;
-}
-
-bool intel_hdcp_gsc_check_status(struct intel_display *display)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
+ struct drm_i915_private *i915 = to_i915(drm);
struct intel_gt *gt = i915->media_gt;
struct intel_gsc_uc *gsc = gt ? &gt->uc.gsc : NULL;
if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) {
- drm_dbg_kms(display->drm,
+ drm_dbg_kms(&i915->drm,
"GSC components required for HDCP2.2 are not ready\n");
return false;
}
@@ -41,7 +36,7 @@ bool intel_hdcp_gsc_check_status(struct intel_display *display)
/*This function helps allocate memory for the command that we will send to gsc cs */
static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
- struct intel_hdcp_gsc_message *hdcp_message)
+ struct intel_hdcp_gsc_context *gsc_context)
{
struct intel_gt *gt = i915->media_gt;
struct drm_i915_gem_object *obj = NULL;
@@ -78,9 +73,10 @@ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
memset(cmd_in, 0, obj->base.size);
- hdcp_message->hdcp_cmd_in = cmd_in;
- hdcp_message->hdcp_cmd_out = cmd_out;
- hdcp_message->vma = vma;
+ gsc_context->hdcp_cmd_in = cmd_in;
+ gsc_context->hdcp_cmd_out = cmd_out;
+ gsc_context->vma = vma;
+ gsc_context->i915 = i915;
return 0;
@@ -91,80 +87,37 @@ out_unpin:
return err;
}
-static const struct i915_hdcp_ops gsc_hdcp_ops = {
- .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
- .verify_receiver_cert_prepare_km =
- intel_hdcp_gsc_verify_receiver_cert_prepare_km,
- .verify_hprime = intel_hdcp_gsc_verify_hprime,
- .store_pairing_info = intel_hdcp_gsc_store_pairing_info,
- .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
- .verify_lprime = intel_hdcp_gsc_verify_lprime,
- .get_session_key = intel_hdcp_gsc_get_session_key,
- .repeater_check_flow_prepare_ack =
- intel_hdcp_gsc_repeater_check_flow_prepare_ack,
- .verify_mprime = intel_hdcp_gsc_verify_mprime,
- .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
- .close_hdcp_session = intel_hdcp_gsc_close_session,
-};
-
-static int intel_hdcp_gsc_hdcp2_init(struct intel_display *display)
+struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct intel_hdcp_gsc_message *hdcp_message;
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct intel_hdcp_gsc_context *gsc_context;
int ret;
- hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
-
- if (!hdcp_message)
- return -ENOMEM;
+ gsc_context = kzalloc(sizeof(*gsc_context), GFP_KERNEL);
+ if (!gsc_context)
+ return ERR_PTR(-ENOMEM);
/*
* NOTE: No need to lock the comp mutex here as it is already
* going to be taken before this function called
*/
- display->hdcp.hdcp_message = hdcp_message;
- ret = intel_hdcp_gsc_initialize_message(i915, hdcp_message);
-
- if (ret)
- drm_err(display->drm, "Could not initialize hdcp_message\n");
-
- return ret;
-}
-
-static void intel_hdcp_gsc_free_message(struct intel_display *display)
-{
- struct intel_hdcp_gsc_message *hdcp_message =
- display->hdcp.hdcp_message;
+ ret = intel_hdcp_gsc_initialize_message(i915, gsc_context);
+ if (ret) {
+ drm_err(&i915->drm, "Could not initialize gsc_context\n");
+ kfree(gsc_context);
+ gsc_context = ERR_PTR(ret);
+ }
- hdcp_message->hdcp_cmd_in = NULL;
- hdcp_message->hdcp_cmd_out = NULL;
- i915_vma_unpin_and_release(&hdcp_message->vma, I915_VMA_RELEASE_MAP);
- kfree(hdcp_message);
+ return gsc_context;
}
-int intel_hdcp_gsc_init(struct intel_display *display)
+void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context)
{
- struct i915_hdcp_arbiter *data;
- int ret;
-
- data = kzalloc(sizeof(struct i915_hdcp_arbiter), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ if (!gsc_context)
+ return;
- mutex_lock(&display->hdcp.hdcp_mutex);
- display->hdcp.arbiter = data;
- display->hdcp.arbiter->hdcp_dev = display->drm->dev;
- display->hdcp.arbiter->ops = &gsc_hdcp_ops;
- ret = intel_hdcp_gsc_hdcp2_init(display);
- mutex_unlock(&display->hdcp.hdcp_mutex);
-
- return ret;
-}
-
-void intel_hdcp_gsc_fini(struct intel_display *display)
-{
- intel_hdcp_gsc_free_message(display);
- kfree(display->hdcp.arbiter);
+ i915_vma_unpin_and_release(&gsc_context->vma, I915_VMA_RELEASE_MAP);
+ kfree(gsc_context);
}
static int intel_gsc_send_sync(struct drm_i915_private *i915,
@@ -211,18 +164,18 @@ static int intel_gsc_send_sync(struct drm_i915_private *i915,
/*
* This function can now be used for sending requests and will also handle
* receipt of reply messages hence no different function of message retrieval
- * is required. We will initialize intel_hdcp_gsc_message structure then add
+ * is required. We will initialize intel_hdcp_gsc_context structure then add
* gsc cs memory header as stated in specs after which the normal HDCP payload
* will follow
*/
-ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len)
+ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len)
{
+ struct drm_i915_private *i915 = gsc_context->i915;
struct intel_gt *gt = i915->media_gt;
struct intel_gsc_mtl_header *header_in, *header_out;
const size_t max_msg_size = PAGE_SIZE - sizeof(*header_in);
- struct intel_hdcp_gsc_message *hdcp_message;
u64 addr_in, addr_out, host_session_id;
u32 reply_size, msg_size_in, msg_size_out;
int ret, tries = 0;
@@ -235,10 +188,9 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
msg_size_in = msg_in_len + sizeof(*header_in);
msg_size_out = msg_out_len + sizeof(*header_out);
- hdcp_message = i915->display.hdcp.hdcp_message;
- header_in = hdcp_message->hdcp_cmd_in;
- header_out = hdcp_message->hdcp_cmd_out;
- addr_in = i915_ggtt_offset(hdcp_message->vma);
+ header_in = gsc_context->hdcp_cmd_in;
+ header_out = gsc_context->hdcp_cmd_out;
+ addr_in = i915_ggtt_offset(gsc_context->vma);
addr_out = addr_in + PAGE_SIZE;
memset(header_in, 0, msg_size_in);
@@ -246,7 +198,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
get_random_bytes(&host_session_id, sizeof(u64));
intel_gsc_uc_heci_cmd_emit_mtl_header(header_in, HECI_MEADDRESS_HDCP,
msg_size_in, host_session_id);
- memcpy(hdcp_message->hdcp_cmd_in + sizeof(*header_in), msg_in, msg_in_len);
+ memcpy(gsc_context->hdcp_cmd_in + sizeof(*header_in), msg_in, msg_in_len);
/*
* Keep sending request in case the pending bit is set no need to add
@@ -280,7 +232,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
reply_size, (u32)msg_out_len);
}
- memcpy(msg_out, hdcp_message->hdcp_cmd_out + sizeof(*header_out), msg_out_len);
+ memcpy(msg_out, gsc_context->hdcp_cmd_out + sizeof(*header_out), msg_out_len);
err:
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
index 5695a5e4f609..9305c14aaffe 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
@@ -6,19 +6,17 @@
#ifndef __INTEL_HDCP_GSC_H__
#define __INTEL_HDCP_GSC_H__
-#include <linux/err.h>
#include <linux/types.h>
-struct drm_i915_private;
-struct intel_display;
-struct intel_hdcp_gsc_message;
+struct drm_device;
+struct intel_hdcp_gsc_context;
-bool intel_hdcp_gsc_cs_required(struct intel_display *display);
-ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len);
-int intel_hdcp_gsc_init(struct intel_display *display);
-void intel_hdcp_gsc_fini(struct intel_display *display);
-bool intel_hdcp_gsc_check_status(struct intel_display *display);
+ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len);
+bool intel_hdcp_gsc_check_status(struct drm_device *drm);
+
+struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm);
+void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context);
#endif /* __INTEL_HDCP_GCS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
index 129104fa9b16..98967bb148e3 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
@@ -4,20 +4,23 @@
*/
#include <linux/err.h>
+
+#include <drm/drm_print.h>
#include <drm/intel/i915_hdcp_interface.h>
-#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "intel_hdcp_gsc.h"
#include "intel_hdcp_gsc_message.h"
-int
+static int
intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_init *ake_data)
{
struct wired_cmd_initiate_hdcp2_session_in session_init_in = {};
struct wired_cmd_initiate_hdcp2_session_out session_init_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !ake_data)
@@ -28,7 +31,7 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
session_init_in.header.api_version = HDCP_API_VERSION;
session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
@@ -41,9 +44,9 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
session_init_in.protocol = data->protocol;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_init_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &session_init_in,
sizeof(session_init_in),
- (u8 *)&session_init_out,
+ &session_init_out,
sizeof(session_init_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -64,7 +67,7 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
return 0;
}
-int
+static int
intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_ake_send_cert *rx_cert,
@@ -75,8 +78,8 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
{
struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = {};
struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz)
@@ -87,7 +90,7 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
verify_rxcert_in.header.api_version = HDCP_API_VERSION;
verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
@@ -103,9 +106,9 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_rxcert_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &verify_rxcert_in,
sizeof(verify_rxcert_in),
- (u8 *)&verify_rxcert_out,
+ &verify_rxcert_out,
sizeof(verify_rxcert_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte);
@@ -134,14 +137,14 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
return 0;
}
-int
+static int
intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_send_hprime *rx_hprime)
{
struct wired_cmd_ake_send_hprime_in send_hprime_in = {};
struct wired_cmd_ake_send_hprime_out send_hprime_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_hprime)
@@ -152,7 +155,7 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
send_hprime_in.header.api_version = HDCP_API_VERSION;
send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
@@ -166,9 +169,9 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
HDCP_2_2_H_PRIME_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&send_hprime_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &send_hprime_in,
sizeof(send_hprime_in),
- (u8 *)&send_hprime_out,
+ &send_hprime_out,
sizeof(send_hprime_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -184,14 +187,14 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
return 0;
}
-int
+static int
intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_send_pairing_info *pairing_info)
{
struct wired_cmd_ake_send_pairing_info_in pairing_info_in = {};
struct wired_cmd_ake_send_pairing_info_out pairing_info_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !pairing_info)
@@ -202,7 +205,7 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
pairing_info_in.header.api_version = HDCP_API_VERSION;
pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
@@ -217,9 +220,9 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
HDCP_2_2_E_KH_KM_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&pairing_info_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &pairing_info_in,
sizeof(pairing_info_in),
- (u8 *)&pairing_info_out,
+ &pairing_info_out,
sizeof(pairing_info_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -236,15 +239,15 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
return 0;
}
-int
+static int
intel_hdcp_gsc_initiate_locality_check(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_lc_init *lc_init_data)
{
struct wired_cmd_init_locality_check_in lc_init_in = {};
struct wired_cmd_init_locality_check_out lc_init_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !lc_init_data)
@@ -255,7 +258,7 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
lc_init_in.header.api_version = HDCP_API_VERSION;
lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
@@ -266,8 +269,8 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
lc_init_in.port.physical_port = (u8)data->hdcp_ddi;
lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&lc_init_in, sizeof(lc_init_in),
- (u8 *)&lc_init_out, sizeof(lc_init_out));
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &lc_init_in, sizeof(lc_init_in),
+ &lc_init_out, sizeof(lc_init_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@@ -285,14 +288,14 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
return 0;
}
-int
+static int
intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_lc_send_lprime *rx_lprime)
{
struct wired_cmd_validate_locality_in verify_lprime_in = {};
struct wired_cmd_validate_locality_out verify_lprime_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_lprime)
@@ -303,7 +306,7 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
verify_lprime_in.header.api_version = HDCP_API_VERSION;
verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
@@ -318,9 +321,9 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
HDCP_2_2_L_PRIME_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_lprime_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &verify_lprime_in,
sizeof(verify_lprime_in),
- (u8 *)&verify_lprime_out,
+ &verify_lprime_out,
sizeof(verify_lprime_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -337,14 +340,15 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
return 0;
}
-int intel_hdcp_gsc_get_session_key(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ske_send_eks *ske_data)
+static int
+intel_hdcp_gsc_get_session_key(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ske_send_eks *ske_data)
{
struct wired_cmd_get_session_key_in get_skey_in = {};
struct wired_cmd_get_session_key_out get_skey_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !ske_data)
@@ -355,7 +359,7 @@ int intel_hdcp_gsc_get_session_key(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
get_skey_in.header.api_version = HDCP_API_VERSION;
get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
@@ -366,8 +370,8 @@ int intel_hdcp_gsc_get_session_key(struct device *dev,
get_skey_in.port.physical_port = (u8)data->hdcp_ddi;
get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&get_skey_in, sizeof(get_skey_in),
- (u8 *)&get_skey_out, sizeof(get_skey_out));
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &get_skey_in, sizeof(get_skey_in),
+ &get_skey_out, sizeof(get_skey_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@@ -387,7 +391,7 @@ int intel_hdcp_gsc_get_session_key(struct device *dev,
return 0;
}
-int
+static int
intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
struct hdcp_port_data *data,
struct hdcp2_rep_send_receiverid_list
@@ -397,8 +401,8 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
{
struct wired_cmd_verify_repeater_in verify_repeater_in = {};
struct wired_cmd_verify_repeater_out verify_repeater_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !rep_topology || !rep_send_ack || !data)
@@ -409,7 +413,7 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
verify_repeater_in.header.api_version = HDCP_API_VERSION;
verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
@@ -430,9 +434,9 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids,
HDCP_2_2_RECEIVER_IDS_MAX_LEN);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_repeater_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &verify_repeater_in,
sizeof(verify_repeater_in),
- (u8 *)&verify_repeater_out,
+ &verify_repeater_out,
sizeof(verify_repeater_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -453,14 +457,15 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
return 0;
}
-int intel_hdcp_gsc_verify_mprime(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_rep_stream_ready *stream_ready)
+static int
+intel_hdcp_gsc_verify_mprime(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_stream_ready *stream_ready)
{
struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in;
struct wired_cmd_repeater_auth_stream_req_out verify_mprime_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
size_t cmd_size;
@@ -472,7 +477,7 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
cmd_size = struct_size(verify_mprime_in, streams, data->k);
if (cmd_size == SIZE_MAX)
@@ -499,8 +504,8 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
verify_mprime_in->k = cpu_to_be16(data->k);
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)verify_mprime_in, cmd_size,
- (u8 *)&verify_mprime_out,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, verify_mprime_in, cmd_size,
+ &verify_mprime_out,
sizeof(verify_mprime_out));
kfree(verify_mprime_in);
if (byte < 0) {
@@ -518,13 +523,13 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
return 0;
}
-int intel_hdcp_gsc_enable_authentication(struct device *dev,
- struct hdcp_port_data *data)
+static int intel_hdcp_gsc_enable_authentication(struct device *dev,
+ struct hdcp_port_data *data)
{
struct wired_cmd_enable_auth_in enable_auth_in = {};
struct wired_cmd_enable_auth_out enable_auth_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data)
@@ -535,7 +540,7 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev,
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
enable_auth_in.header.api_version = HDCP_API_VERSION;
enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
@@ -547,9 +552,9 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev,
enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
enable_auth_in.stream_type = data->streams[0].stream_type;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&enable_auth_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &enable_auth_in,
sizeof(enable_auth_in),
- (u8 *)&enable_auth_out,
+ &enable_auth_out,
sizeof(enable_auth_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -565,13 +570,13 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev,
return 0;
}
-int
+static int
intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
{
struct wired_cmd_close_session_in session_close_in = {};
struct wired_cmd_close_session_out session_close_out = {};
+ struct intel_hdcp_gsc_context *gsc_context;
struct intel_display *display;
- struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data)
@@ -582,7 +587,7 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
- i915 = to_i915(display->drm);
+ gsc_context = display->hdcp.gsc_context;
session_close_in.header.api_version = HDCP_API_VERSION;
session_close_in.header.command_id = WIRED_CLOSE_SESSION;
@@ -594,9 +599,9 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
session_close_in.port.physical_port = (u8)data->hdcp_ddi;
session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
- byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_close_in,
+ byte = intel_hdcp_gsc_msg_send(gsc_context, &session_close_in,
sizeof(session_close_in),
- (u8 *)&session_close_out,
+ &session_close_out,
sizeof(session_close_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@@ -611,3 +616,57 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
return 0;
}
+
+static const struct i915_hdcp_ops gsc_hdcp_ops = {
+ .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
+ .verify_receiver_cert_prepare_km =
+ intel_hdcp_gsc_verify_receiver_cert_prepare_km,
+ .verify_hprime = intel_hdcp_gsc_verify_hprime,
+ .store_pairing_info = intel_hdcp_gsc_store_pairing_info,
+ .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
+ .verify_lprime = intel_hdcp_gsc_verify_lprime,
+ .get_session_key = intel_hdcp_gsc_get_session_key,
+ .repeater_check_flow_prepare_ack =
+ intel_hdcp_gsc_repeater_check_flow_prepare_ack,
+ .verify_mprime = intel_hdcp_gsc_verify_mprime,
+ .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
+ .close_hdcp_session = intel_hdcp_gsc_close_session,
+};
+
+int intel_hdcp_gsc_init(struct intel_display *display)
+{
+ struct intel_hdcp_gsc_context *gsc_context;
+ struct i915_hdcp_arbiter *arbiter;
+ int ret = 0;
+
+ arbiter = kzalloc(sizeof(*arbiter), GFP_KERNEL);
+ if (!arbiter)
+ return -ENOMEM;
+
+ mutex_lock(&display->hdcp.hdcp_mutex);
+
+ gsc_context = intel_hdcp_gsc_context_alloc(display->drm);
+ if (IS_ERR(gsc_context)) {
+ ret = PTR_ERR(gsc_context);
+ kfree(arbiter);
+ goto out;
+ }
+
+ display->hdcp.arbiter = arbiter;
+ display->hdcp.arbiter->hdcp_dev = display->drm->dev;
+ display->hdcp.arbiter->ops = &gsc_hdcp_ops;
+ display->hdcp.gsc_context = gsc_context;
+
+out:
+ mutex_unlock(&display->hdcp.hdcp_mutex);
+
+ return ret;
+}
+
+void intel_hdcp_gsc_fini(struct intel_display *display)
+{
+ intel_hdcp_gsc_context_free(display->hdcp.gsc_context);
+ display->hdcp.gsc_context = NULL;
+ kfree(display->hdcp.arbiter);
+ display->hdcp.arbiter = NULL;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h
index 2d597f27e931..9f54157a4a3e 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h
@@ -6,68 +6,9 @@
#ifndef __INTEL_HDCP_GSC_MESSAGE_H__
#define __INTEL_HDCP_GSC_MESSAGE_H__
-#include <linux/types.h>
-
-struct device;
-struct drm_i915_private;
-struct hdcp_port_data;
-struct hdcp2_ake_init;
-struct hdcp2_ake_send_cert;
-struct hdcp2_ake_no_stored_km;
-struct hdcp2_ake_send_hprime;
-struct hdcp2_ake_send_pairing_info;
-struct hdcp2_lc_init;
-struct hdcp2_lc_send_lprime;
-struct hdcp2_ske_send_eks;
-struct hdcp2_rep_send_receiverid_list;
-struct hdcp2_rep_send_ack;
-struct hdcp2_rep_stream_ready;
struct intel_display;
-ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len);
-bool intel_hdcp_gsc_check_status(struct intel_display *display);
-int
-intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
- struct hdcp2_ake_init *ake_data);
-int
-intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ake_send_cert *rx_cert,
- bool *km_stored,
- struct hdcp2_ake_no_stored_km
- *ek_pub_km,
- size_t *msg_sz);
-int
-intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
- struct hdcp2_ake_send_hprime *rx_hprime);
-int
-intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
- struct hdcp2_ake_send_pairing_info *pairing_info);
-int
-intel_hdcp_gsc_initiate_locality_check(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_lc_init *lc_init_data);
-int
-intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
- struct hdcp2_lc_send_lprime *rx_lprime);
-int intel_hdcp_gsc_get_session_key(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ske_send_eks *ske_data);
-int
-intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_rep_send_receiverid_list
- *rep_topology,
- struct hdcp2_rep_send_ack
- *rep_send_ack);
-int intel_hdcp_gsc_verify_mprime(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_rep_stream_ready *stream_ready);
-int intel_hdcp_gsc_enable_authentication(struct device *dev,
- struct hdcp_port_data *data);
-int
-intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data);
+int intel_hdcp_gsc_init(struct intel_display *display);
+void intel_hdcp_gsc_fini(struct intel_display *display);
#endif /* __INTEL_HDCP_GSC_MESSAGE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 33b8d5229db0..98033471902c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -38,14 +38,15 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/intel/intel_lpe_audio.h>
#include <media/cec-notifier.h>
#include "g4x_hdmi.h"
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
@@ -64,6 +65,7 @@
#include "intel_panel.h"
#include "intel_pfit.h"
#include "intel_snps_phy.h"
+#include "intel_vrr.h"
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
@@ -714,7 +716,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- struct drm_connector *connector = conn_state->connector;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
int ret;
if (!crtc_state->has_infoframe)
@@ -723,7 +725,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
crtc_state->infoframes.enable |=
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
- ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector,
+ ret = drm_hdmi_avi_infoframe_from_display_mode(frame, &connector->base,
adjusted_mode);
if (ret)
return false;
@@ -742,7 +744,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
- drm_hdmi_avi_infoframe_quant_range(frame, connector,
+ drm_hdmi_avi_infoframe_quant_range(frame, &connector->base,
adjusted_mode,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
@@ -768,7 +770,7 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct hdmi_spd_infoframe *frame = &crtc_state->infoframes.spd.spd;
int ret;
@@ -778,7 +780,7 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
crtc_state->infoframes.enable |=
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD);
- if (IS_DGFX(i915))
+ if (display->platform.dgfx)
ret = hdmi_spd_infoframe_init(frame, "Intel", "Discrete gfx");
else
ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
@@ -978,7 +980,6 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
@@ -988,9 +989,9 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
if (HAS_DDI(display))
reg = HSW_TVIDEO_DIP_GCP(display, crtc_state->cpu_transcoder);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.valleyview || display->platform.cherryview)
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
reg = TVIDEO_DIP_GCP(crtc->pipe);
else
return false;
@@ -1004,7 +1005,6 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
@@ -1014,9 +1014,9 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
if (HAS_DDI(display))
reg = HSW_TVIDEO_DIP_GCP(display, crtc_state->cpu_transcoder);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.valleyview || display->platform.cherryview)
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(display))
reg = TVIDEO_DIP_GCP(crtc->pipe);
else
return;
@@ -1028,9 +1028,9 @@ static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (IS_G4X(dev_priv) || !crtc_state->has_infoframe)
+ if (display->platform.g4x || !crtc_state->has_infoframe)
return;
crtc_state->infoframes.enable |=
@@ -1538,7 +1538,6 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
struct intel_display *display = to_intel_display(dig_port);
struct intel_hdmi *hdmi = &dig_port->hdmi;
struct intel_connector *connector = hdmi->attached_connector;
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int ret;
if (!enable)
@@ -1557,7 +1556,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
* WA: To fix incorrect positioning of the window of
* opportunity and enc_en signalling in KABYLAKE.
*/
- if (IS_KABYLAKE(dev_priv) && enable)
+ if (display->platform.kabylake && enable)
return kbl_repositioning_enc_en_signal(connector,
cpu_transcoder);
@@ -1569,7 +1568,6 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
int ret;
@@ -1582,15 +1580,15 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
if (ret)
return false;
- intel_de_write(i915, HDCP_RPRIME(i915, cpu_transcoder, port), ri.reg);
+ intel_de_write(display, HDCP_RPRIME(display, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
+ if (wait_for((intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
drm_dbg_kms(display->drm, "Ri' mismatch detected (%x)\n",
- intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
- port)));
+ intel_de_read(display, HDCP_STATUS(display, cpu_transcoder,
+ port)));
return false;
}
return true;
@@ -1813,14 +1811,13 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int max_tmds_clock, vbt_max_tmds_clock;
- if (DISPLAY_VER(display) >= 13 || IS_ALDERLAKE_S(dev_priv))
+ if (DISPLAY_VER(display) >= 13 || display->platform.alderlake_s)
max_tmds_clock = 600000;
else if (DISPLAY_VER(display) >= 10)
max_tmds_clock = 594000;
- else if (DISPLAY_VER(display) >= 8 || IS_HASWELL(dev_priv))
+ else if (DISPLAY_VER(display) >= 8 || display->platform.haswell)
max_tmds_clock = 300000;
else if (DISPLAY_VER(display) >= 5)
max_tmds_clock = 225000;
@@ -1879,7 +1876,6 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
bool has_hdmi_sink)
{
struct intel_display *display = to_intel_display(hdmi);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
if (clock < 25000)
@@ -1889,16 +1885,16 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_CLOCK_HIGH;
/* GLK DPLL can't generate 446-480 MHz */
- if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000)
+ if (display->platform.geminilake && clock > 446666 && clock < 480000)
return MODE_CLOCK_RANGE;
/* BXT/GLK DPLL can't generate 223-240 MHz */
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
/* CHV DPLL can't generate 216-240 MHz */
- if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000)
+ if (display->platform.cherryview && clock > 216000 && clock < 240000)
return MODE_CLOCK_RANGE;
/* ICL+ combo PHY PLL can't generate 500-533.2 MHz */
@@ -1942,11 +1938,12 @@ static bool intel_hdmi_source_bpc_possible(struct intel_display *display, int bp
}
}
-static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
+static bool intel_hdmi_sink_bpc_possible(struct drm_connector *_connector,
int bpc, bool has_hdmi_sink,
enum intel_output_format sink_format)
{
- const struct drm_display_info *info = &connector->display_info;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ const struct drm_display_info *info = &connector->base.display_info;
const struct drm_hdmi_info *hdmi = &info->hdmi;
switch (bpc) {
@@ -1975,12 +1972,13 @@ static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
}
static enum drm_mode_status
-intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
+intel_hdmi_mode_clock_valid(struct drm_connector *_connector, int clock,
bool has_hdmi_sink,
enum intel_output_format sink_format)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
enum drm_mode_status status = MODE_OK;
int bpc;
@@ -1995,7 +1993,8 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
if (!intel_hdmi_source_bpc_possible(display, bpc))
continue;
- if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, sink_format))
+ if (!intel_hdmi_sink_bpc_possible(&connector->base, bpc, has_hdmi_sink,
+ sink_format))
continue;
status = hdmi_port_clock_valid(hdmi, tmds_clock, true, has_hdmi_sink);
@@ -2010,15 +2009,16 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
}
static enum drm_mode_status
-intel_hdmi_mode_valid(struct drm_connector *connector,
+intel_hdmi_mode_valid(struct drm_connector *_connector,
const struct drm_display_mode *mode)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
enum drm_mode_status status;
int clock = mode->clock;
- int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
- bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
+ int max_dotclk = display->cdclk.max_dotclk_freq;
+ bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->base.state);
bool ycbcr_420_only;
enum intel_output_format sink_format;
@@ -2047,22 +2047,23 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
if (clock > 600000)
return MODE_CLOCK_HIGH;
- ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, mode);
+ ycbcr_420_only = drm_mode_is_420_only(&connector->base.display_info, mode);
if (ycbcr_420_only)
sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
else
sink_format = INTEL_OUTPUT_FORMAT_RGB;
- status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, sink_format);
+ status = intel_hdmi_mode_clock_valid(&connector->base, clock, has_hdmi_sink, sink_format);
if (status != MODE_OK) {
if (ycbcr_420_only ||
- !connector->ycbcr_420_allowed ||
- !drm_mode_is_420_also(&connector->display_info, mode))
+ !connector->base.ycbcr_420_allowed ||
+ !drm_mode_is_420_also(&connector->base.display_info, mode))
return status;
sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
- status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, sink_format);
+ status = intel_hdmi_mode_clock_valid(&connector->base, clock, has_hdmi_sink,
+ sink_format);
if (status != MODE_OK)
return status;
}
@@ -2073,16 +2074,16 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
int bpc, bool has_hdmi_sink)
{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_connector_state *connector_state;
- struct drm_connector *connector;
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_digital_connector_state *connector_state;
+ struct intel_connector *connector;
int i;
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc_state->uapi.crtc)
+ for_each_new_intel_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->base.crtc != crtc_state->uapi.crtc)
continue;
- if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink,
+ if (!intel_hdmi_sink_bpc_possible(&connector->base, bpc, has_hdmi_sink,
crtc_state->sink_format))
return false;
}
@@ -2210,7 +2211,7 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_connector *connector = conn_state->connector;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
@@ -2218,7 +2219,7 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder,
return false;
if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
- return connector->display_info.has_audio;
+ return connector->base.display_info.has_audio;
else
return intel_conn_state->force_audio == HDMI_AUDIO_ON;
}
@@ -2322,14 +2323,14 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- struct drm_connector *connector = conn_state->connector;
- struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_scdc *scdc = &connector->base.display_info.hdmi.scdc;
int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- if (!connector->interlace_allowed &&
+ if (!connector->base.interlace_allowed &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return -EINVAL;
@@ -2384,6 +2385,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
}
}
+ intel_vrr_compute_config(pipe_config, conn_state);
+
intel_hdmi_compute_gcp_infoframe(encoder, pipe_config,
conn_state);
@@ -2422,25 +2425,26 @@ void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder)
}
static void
-intel_hdmi_unset_edid(struct drm_connector *connector)
+intel_hdmi_unset_edid(struct drm_connector *_connector)
{
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
- drm_edid_free(to_intel_connector(connector)->detect_edid);
- to_intel_connector(connector)->detect_edid = NULL;
+ drm_edid_free(connector->detect_edid);
+ connector->detect_edid = NULL;
}
static void
-intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
+intel_hdmi_dp_dual_mode_detect(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
- struct i2c_adapter *ddc = connector->ddc;
+ struct i2c_adapter *ddc = connector->base.ddc;
enum drm_dp_dual_mode_type type;
type = drm_dp_dual_mode_detect(display->drm, ddc);
@@ -2455,7 +2459,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
* if the port is a dual mode capable DP port.
*/
if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
- if (!connector->force &&
+ if (!connector->base.force &&
intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) {
drm_dbg_kms(display->drm,
"Assuming DP dual mode adaptor presence based on VBT\n");
@@ -2478,7 +2482,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
hdmi->dp_dual_mode.max_tmds_clock);
/* Older VBTs are often buggy and can't be trusted :( Play it safe. */
- if ((DISPLAY_VER(display) >= 8 || IS_HASWELL(dev_priv)) &&
+ if ((DISPLAY_VER(display) >= 8 || display->platform.haswell) &&
!intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) {
drm_dbg_kms(display->drm,
"Ignoring DP dual mode adaptor max TMDS clock for native HDMI port\n");
@@ -2487,34 +2491,35 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
}
static bool
-intel_hdmi_set_edid(struct drm_connector *connector)
+intel_hdmi_set_edid(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
- struct i2c_adapter *ddc = connector->ddc;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct i2c_adapter *ddc = connector->base.ddc;
intel_wakeref_t wakeref;
const struct drm_edid *drm_edid;
bool connected = false;
wakeref = intel_display_power_get(display, POWER_DOMAIN_GMBUS);
- drm_edid = drm_edid_read_ddc(connector, ddc);
+ drm_edid = drm_edid_read_ddc(&connector->base, ddc);
if (!drm_edid && !intel_gmbus_is_forced_bit(ddc)) {
drm_dbg_kms(display->drm,
"HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(ddc, true);
- drm_edid = drm_edid_read_ddc(connector, ddc);
+ drm_edid = drm_edid_read_ddc(&connector->base, ddc);
intel_gmbus_force_bit(ddc, false);
}
/* Below we depend on display info having been updated */
- drm_edid_connector_update(connector, drm_edid);
+ drm_edid_connector_update(&connector->base, drm_edid);
- to_intel_connector(connector)->detect_edid = drm_edid;
+ connector->detect_edid = drm_edid;
if (drm_edid_is_digital(drm_edid)) {
- intel_hdmi_dp_dual_mode_detect(connector);
+ intel_hdmi_dp_dual_mode_detect(&connector->base);
connected = true;
}
@@ -2522,28 +2527,29 @@ intel_hdmi_set_edid(struct drm_connector *connector)
intel_display_power_put(display, POWER_DOMAIN_GMBUS, wakeref);
cec_notifier_set_phys_addr(intel_hdmi->cec_notifier,
- connector->display_info.source_physical_address);
+ connector->base.display_info.source_physical_address);
return connected;
}
static enum drm_connector_status
-intel_hdmi_detect(struct drm_connector *connector, bool force)
+intel_hdmi_detect(struct drm_connector *_connector, bool force)
{
- struct intel_display *display = to_intel_display(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
enum drm_connector_status status = connector_status_disconnected;
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
intel_wakeref_t wakeref;
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ connector->base.base.id, connector->base.name);
if (!intel_display_device_enabled(display))
return connector_status_disconnected;
if (!intel_display_driver_check_access(display))
- return connector->status;
+ return connector->base.status;
wakeref = intel_display_power_get(display, POWER_DOMAIN_GMBUS);
@@ -2551,9 +2557,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
!intel_digital_port_connected(encoder))
goto out;
- intel_hdmi_unset_edid(connector);
+ intel_hdmi_unset_edid(&connector->base);
- if (intel_hdmi_set_edid(connector))
+ if (intel_hdmi_set_edid(&connector->base))
status = connector_status_connected;
out:
@@ -2566,49 +2572,54 @@ out:
}
static void
-intel_hdmi_force(struct drm_connector *connector)
+intel_hdmi_force(struct drm_connector *_connector)
{
- struct intel_display *display = to_intel_display(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ connector->base.base.id, connector->base.name);
if (!intel_display_driver_check_access(display))
return;
- intel_hdmi_unset_edid(connector);
+ intel_hdmi_unset_edid(&connector->base);
- if (connector->status != connector_status_connected)
+ if (connector->base.status != connector_status_connected)
return;
- intel_hdmi_set_edid(connector);
+ intel_hdmi_set_edid(&connector->base);
}
-static int intel_hdmi_get_modes(struct drm_connector *connector)
+static int intel_hdmi_get_modes(struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
+
/* drm_edid_connector_update() done in ->detect() or ->force() */
- return drm_edid_connector_add_modes(connector);
+ return drm_edid_connector_add_modes(&connector->base);
}
static int
-intel_hdmi_connector_register(struct drm_connector *connector)
+intel_hdmi_connector_register(struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
int ret;
- ret = intel_connector_register(connector);
+ ret = intel_connector_register(&connector->base);
if (ret)
return ret;
return ret;
}
-static void intel_hdmi_connector_unregister(struct drm_connector *connector)
+static void intel_hdmi_connector_unregister(struct drm_connector *_connector)
{
- struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier;
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct cec_notifier *n = intel_attached_hdmi(connector)->cec_notifier;
cec_notifier_conn_unregister(n);
- intel_connector_unregister(connector);
+ intel_connector_unregister(&connector->base);
}
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
@@ -2624,15 +2635,16 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static int intel_hdmi_connector_atomic_check(struct drm_connector *connector,
+static int intel_hdmi_connector_atomic_check(struct drm_connector *_connector,
struct drm_atomic_state *state)
{
- struct intel_display *display = to_intel_display(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_display *display = to_intel_display(connector);
if (HAS_DDI(display))
- return intel_digital_connector_atomic_check(connector, state);
+ return intel_digital_connector_atomic_check(&connector->base, state);
else
- return g4x_hdmi_connector_atomic_check(connector, state);
+ return g4x_hdmi_connector_atomic_check(&connector->base, state);
}
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
@@ -2642,22 +2654,23 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs
};
static void
-intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
+intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *_connector)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
struct intel_display *display = to_intel_display(intel_hdmi);
- intel_attach_force_audio_property(connector);
- intel_attach_broadcast_rgb_property(connector);
- intel_attach_aspect_ratio_property(connector);
+ intel_attach_force_audio_property(&connector->base);
+ intel_attach_broadcast_rgb_property(&connector->base);
+ intel_attach_aspect_ratio_property(&connector->base);
- intel_attach_hdmi_colorspace_property(connector);
- drm_connector_attach_content_type_property(connector);
+ intel_attach_hdmi_colorspace_property(&connector->base);
+ drm_connector_attach_content_type_property(&connector->base);
if (DISPLAY_VER(display) >= 10)
- drm_connector_attach_hdr_output_metadata_property(connector);
+ drm_connector_attach_hdr_output_metadata_property(&connector->base);
if (!HAS_GMCH(display))
- drm_connector_attach_max_bpc_property(connector, 8, 12);
+ drm_connector_attach_max_bpc_property(&connector->base, 8, 12);
}
/*
@@ -2679,25 +2692,26 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
* True on success, false on failure.
*/
bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
- struct drm_connector *connector,
+ struct drm_connector *_connector,
bool high_tmds_clock_ratio,
bool scrambling)
{
+ struct intel_connector *connector = to_intel_connector(_connector);
struct intel_display *display = to_intel_display(encoder);
struct drm_scrambling *sink_scrambling =
- &connector->display_info.hdmi.scdc.scrambling;
+ &connector->base.display_info.hdmi.scdc.scrambling;
if (!sink_scrambling->supported)
return true;
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
- connector->base.id, connector->name,
+ connector->base.base.id, connector->base.name,
str_yes_no(scrambling), high_tmds_clock_ratio ? 40 : 10);
/* Set TMDS bit clock ratio to 1/40 or 1/10, and enable/disable scrambling */
- return drm_scdc_set_high_tmds_clock_ratio(connector, high_tmds_clock_ratio) &&
- drm_scdc_set_scrambling(connector, scrambling);
+ return drm_scdc_set_high_tmds_clock_ratio(&connector->base, high_tmds_clock_ratio) &&
+ drm_scdc_set_scrambling(&connector->base, scrambling);
}
static u8 chv_encoder_to_ddc_pin(struct intel_encoder *encoder)
@@ -2808,7 +2822,7 @@ static u8 mcc_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
WARN_ON(encoder->port == PORT_C);
@@ -2819,7 +2833,7 @@ static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
* combo outputs. With CMP, the traditional DDI A-D pins are used for
* all outputs.
*/
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && phy >= PHY_C)
+ if (INTEL_PCH_TYPE(display) >= PCH_TGP && phy >= PHY_C)
return GMBUS_PIN_9_TC1_ICP + phy - PHY_C;
return GMBUS_PIN_1_BXT + phy;
@@ -2828,7 +2842,6 @@ static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum phy phy = intel_encoder_to_phy(encoder);
drm_WARN_ON(display->drm, encoder->port == PORT_A);
@@ -2839,7 +2852,7 @@ static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder)
* combo outputs. With CMP, the traditional DDI A-D pins are used for
* all outputs.
*/
- if (INTEL_PCH_TYPE(i915) >= PCH_TGP && phy >= PHY_C)
+ if (INTEL_PCH_TYPE(display) >= PCH_TGP && phy >= PHY_C)
return GMBUS_PIN_9_TC1_ICP + phy - PHY_C;
return GMBUS_PIN_1_BXT + phy;
@@ -2892,27 +2905,26 @@ static u8 g4x_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u8 ddc_pin;
- if (IS_ALDERLAKE_S(dev_priv))
+ if (display->platform.alderlake_s)
ddc_pin = adls_encoder_to_ddc_pin(encoder);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ else if (INTEL_PCH_TYPE(display) >= PCH_DG1)
ddc_pin = dg1_encoder_to_ddc_pin(encoder);
- else if (IS_ROCKETLAKE(dev_priv))
+ else if (display->platform.rocketlake)
ddc_pin = rkl_encoder_to_ddc_pin(encoder);
- else if (DISPLAY_VER(display) == 9 && HAS_PCH_TGP(dev_priv))
+ else if (DISPLAY_VER(display) == 9 && HAS_PCH_TGP(display))
ddc_pin = gen9bc_tgp_encoder_to_ddc_pin(encoder);
- else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
- HAS_PCH_TGP(dev_priv))
+ else if ((display->platform.jasperlake || display->platform.elkhartlake) &&
+ HAS_PCH_TGP(display))
ddc_pin = mcc_encoder_to_ddc_pin(encoder);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
ddc_pin = icl_encoder_to_ddc_pin(encoder);
- else if (HAS_PCH_CNP(dev_priv))
+ else if (HAS_PCH_CNP(display))
ddc_pin = cnp_encoder_to_ddc_pin(encoder);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
ddc_pin = bxt_encoder_to_ddc_pin(encoder);
- else if (IS_CHERRYVIEW(dev_priv))
+ else if (display->platform.cherryview)
ddc_pin = chv_encoder_to_ddc_pin(encoder);
else
ddc_pin = g4x_encoder_to_ddc_pin(encoder);
@@ -2986,15 +2998,13 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
void intel_infoframe_init(struct intel_digital_port *dig_port)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *dev_priv =
- to_i915(dig_port->base.base.dev);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
dig_port->write_infoframe = vlv_write_infoframe;
dig_port->read_infoframe = vlv_read_infoframe;
dig_port->set_infoframes = vlv_set_infoframes;
dig_port->infoframes_enabled = vlv_infoframes_enabled;
- } else if (IS_G4X(dev_priv)) {
+ } else if (display->platform.g4x) {
dig_port->write_infoframe = g4x_write_infoframe;
dig_port->read_infoframe = g4x_read_infoframe;
dig_port->set_infoframes = g4x_set_infoframes;
@@ -3011,7 +3021,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
dig_port->set_infoframes = hsw_set_infoframes;
dig_port->infoframes_enabled = hsw_infoframes_enabled;
}
- } else if (HAS_PCH_IBX(dev_priv)) {
+ } else if (HAS_PCH_IBX(display)) {
dig_port->write_infoframe = ibx_write_infoframe;
dig_port->read_infoframe = ibx_read_infoframe;
dig_port->set_infoframes = ibx_set_infoframes;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 00d7b1ccf190..fc5d8928c37e 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -30,6 +30,7 @@
#include "i915_irq.h"
#include "intel_connector.h"
#include "intel_display_power.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
@@ -118,7 +119,7 @@ intel_connector_hpd_pin(struct intel_connector *connector)
/**
* intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
- * @dev_priv: private driver data pointer
+ * @display: display device
* @pin: the pin to gather stats on
* @long_hpd: whether the HPD IRQ was long or short
*
@@ -127,13 +128,13 @@ intel_connector_hpd_pin(struct intel_connector *connector)
* responsible for further action.
*
* The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
- * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to
+ * stored in @display->hotplug.hpd_storm_threshold which defaults to
* @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
* short IRQs count as +1. If this threshold is exceeded, it's considered an
* IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
*
* By default, most systems will only count long IRQs towards
- * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also
+ * &display->hotplug.hpd_storm_threshold. However, some older systems also
* suffer from short IRQ storms and must also track these. Because short IRQ
* storms are naturally caused by sideband interactions with DP MST devices,
* short IRQ detection is only enabled for systems without DP MST support.
@@ -145,10 +146,10 @@ intel_connector_hpd_pin(struct intel_connector *connector)
*
* Return true if an IRQ storm was detected on @pin.
*/
-static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
+static bool intel_hpd_irq_storm_detect(struct intel_display *display,
enum hpd_pin pin, bool long_hpd)
{
- struct intel_hotplug *hpd = &dev_priv->display.hotplug;
+ struct intel_hotplug *hpd = &display->hotplug;
unsigned long start = hpd->stats[pin].last_jiffies;
unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
const int increment = long_hpd ? 10 : 1;
@@ -156,7 +157,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
bool storm = false;
if (!threshold ||
- (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled))
+ (!long_hpd && !display->hotplug.hpd_short_storm_enabled))
return false;
if (!time_in_range(jiffies, start, end)) {
@@ -167,11 +168,11 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
hpd->stats[pin].count += increment;
if (hpd->stats[pin].count > threshold) {
hpd->stats[pin].state = HPD_MARK_DISABLED;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"HPD interrupt storm detected on PIN %d\n", pin);
storm = true;
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Received HPD interrupt on PIN %d - cnt: %d\n",
pin,
hpd->stats[pin].count);
@@ -180,56 +181,62 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
return storm;
}
-static bool detection_work_enabled(struct drm_i915_private *i915)
+static bool detection_work_enabled(struct intel_display *display)
{
- lockdep_assert_held(&i915->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- return i915->display.hotplug.detection_work_enabled;
+ return display->hotplug.detection_work_enabled;
}
static bool
-mod_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
+mod_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
{
- lockdep_assert_held(&i915->irq_lock);
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (!detection_work_enabled(i915))
+ lockdep_assert_held(&display->irq.lock);
+
+ if (!detection_work_enabled(display))
return false;
return mod_delayed_work(i915->unordered_wq, work, delay);
}
static bool
-queue_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
+queue_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
{
- lockdep_assert_held(&i915->irq_lock);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ lockdep_assert_held(&display->irq.lock);
- if (!detection_work_enabled(i915))
+ if (!detection_work_enabled(display))
return false;
return queue_delayed_work(i915->unordered_wq, work, delay);
}
static bool
-queue_detection_work(struct drm_i915_private *i915, struct work_struct *work)
+queue_detection_work(struct intel_display *display, struct work_struct *work)
{
- lockdep_assert_held(&i915->irq_lock);
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (!detection_work_enabled(i915))
+ lockdep_assert_held(&display->irq.lock);
+
+ if (!detection_work_enabled(display))
return false;
return queue_work(i915->unordered_wq, work);
}
static void
-intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
+intel_hpd_irq_storm_switch_to_polling(struct intel_display *display)
{
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
bool hpd_disabled = false;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -238,15 +245,15 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
- dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED)
+ display->hotplug.stats[pin].state != HPD_MARK_DISABLED)
continue;
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"HPD interrupt storm detected on connector %s: "
"switching from hotplug detection to polling\n",
connector->base.name);
- dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
+ display->hotplug.stats[pin].state = HPD_DISABLED;
connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
hpd_disabled = true;
@@ -255,36 +262,35 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
- drm_kms_helper_poll_reschedule(&dev_priv->drm);
- mod_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.reenable_work,
+ drm_kms_helper_poll_reschedule(display->drm);
+ mod_delayed_detection_work(display,
+ &display->hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
}
static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv),
- display.hotplug.reenable_work.work);
+ struct intel_display *display =
+ container_of(work, typeof(*display), hotplug.reenable_work.work);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
enum hpd_pin pin;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
- dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED)
+ display->hotplug.stats[pin].state != HPD_DISABLED)
continue;
if (connector->base.polled != connector->polled)
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Reenabling HPD on connector %s\n",
connector->base.name);
connector->base.polled = connector->polled;
@@ -292,15 +298,15 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
drm_connector_list_iter_end(&conn_iter);
for_each_hpd_pin(pin) {
- if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
- dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
+ if (display->hotplug.stats[pin].state == HPD_DISABLED)
+ display->hotplug.stats[pin].state = HPD_ENABLED;
}
- intel_hpd_irq_setup(dev_priv);
+ intel_hpd_irq_setup(display);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
}
static enum intel_hotplug_state
@@ -349,32 +355,72 @@ static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
enc_to_dig_port(encoder)->hpd_pulse != NULL;
}
+static bool hpd_pin_has_pulse(struct intel_display *display, enum hpd_pin pin)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(display->drm, encoder) {
+ if (encoder->hpd_pin != pin)
+ continue;
+
+ if (intel_encoder_has_hpd_pulse(encoder))
+ return true;
+ }
+
+ return false;
+}
+
+static bool hpd_pin_is_blocked(struct intel_display *display, enum hpd_pin pin)
+{
+ lockdep_assert_held(&display->irq.lock);
+
+ return display->hotplug.stats[pin].blocked_count;
+}
+
+static u32 get_blocked_hpd_pin_mask(struct intel_display *display)
+{
+ enum hpd_pin pin;
+ u32 hpd_pin_mask = 0;
+
+ for_each_hpd_pin(pin) {
+ if (hpd_pin_is_blocked(display, pin))
+ hpd_pin_mask |= BIT(pin);
+ }
+
+ return hpd_pin_mask;
+}
+
static void i915_digport_work_func(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, display.hotplug.dig_port_work);
- u32 long_port_mask, short_port_mask;
+ struct intel_display *display =
+ container_of(work, struct intel_display, hotplug.dig_port_work);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ u32 long_hpd_pin_mask, short_hpd_pin_mask;
struct intel_encoder *encoder;
+ u32 blocked_hpd_pin_mask;
u32 old_bits = 0;
- spin_lock_irq(&dev_priv->irq_lock);
- long_port_mask = dev_priv->display.hotplug.long_port_mask;
- dev_priv->display.hotplug.long_port_mask = 0;
- short_port_mask = dev_priv->display.hotplug.short_port_mask;
- dev_priv->display.hotplug.short_port_mask = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
+ long_hpd_pin_mask = hotplug->long_hpd_pin_mask & ~blocked_hpd_pin_mask;
+ hotplug->long_hpd_pin_mask &= ~long_hpd_pin_mask;
+ short_hpd_pin_mask = hotplug->short_hpd_pin_mask & ~blocked_hpd_pin_mask;
+ hotplug->short_hpd_pin_mask &= ~short_hpd_pin_mask;
+
+ spin_unlock_irq(&display->irq.lock);
+
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_digital_port *dig_port;
- enum port port = encoder->port;
+ enum hpd_pin pin = encoder->hpd_pin;
bool long_hpd, short_hpd;
enum irqreturn ret;
if (!intel_encoder_has_hpd_pulse(encoder))
continue;
- long_hpd = long_port_mask & BIT(port);
- short_hpd = short_port_mask & BIT(port);
+ long_hpd = long_hpd_pin_mask & BIT(pin);
+ short_hpd = short_hpd_pin_mask & BIT(pin);
if (!long_hpd && !short_hpd)
continue;
@@ -384,16 +430,16 @@ static void i915_digport_work_func(struct work_struct *work)
ret = dig_port->hpd_pulse(dig_port, long_hpd);
if (ret == IRQ_NONE) {
/* fall back to old school hpd */
- old_bits |= BIT(encoder->hpd_pin);
+ old_bits |= BIT(pin);
}
}
if (old_bits) {
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->display.hotplug.event_bits |= old_bits;
- queue_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.hotplug_work, 0);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ display->hotplug.event_bits |= old_bits;
+ queue_delayed_detection_work(display,
+ &display->hotplug.hotplug_work, 0);
+ spin_unlock_irq(&display->irq.lock);
}
}
@@ -406,13 +452,17 @@ static void i915_digport_work_func(struct work_struct *work)
*/
void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ struct intel_encoder *encoder = &dig_port->base;
+
+ spin_lock_irq(&display->irq.lock);
- spin_lock_irq(&i915->irq_lock);
- i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port);
- spin_unlock_irq(&i915->irq_lock);
+ hotplug->short_hpd_pin_mask |= BIT(encoder->hpd_pin);
+ if (!hpd_pin_is_blocked(display, encoder->hpd_pin))
+ queue_work(hotplug->dp_wq, &hotplug->dig_port_work);
- queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work);
+ spin_unlock_irq(&display->irq.lock);
}
/*
@@ -420,9 +470,9 @@ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
*/
static void i915_hotplug_work_func(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- display.hotplug.hotplug_work.work);
+ struct intel_display *display =
+ container_of(work, struct intel_display, hotplug.hotplug_work.work);
+ struct intel_hotplug *hotplug = &display->hotplug;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
u32 changed = 0, retry = 0;
@@ -430,30 +480,32 @@ static void i915_hotplug_work_func(struct work_struct *work)
u32 hpd_retry_bits;
struct drm_connector *first_changed_connector = NULL;
int changed_connectors = 0;
+ u32 blocked_hpd_pin_mask;
- mutex_lock(&dev_priv->drm.mode_config.mutex);
- drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
+ mutex_lock(&display->drm->mode_config.mutex);
+ drm_dbg_kms(display->drm, "running encoder hotplug functions\n");
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- hpd_event_bits = dev_priv->display.hotplug.event_bits;
- dev_priv->display.hotplug.event_bits = 0;
- hpd_retry_bits = dev_priv->display.hotplug.retry_bits;
- dev_priv->display.hotplug.retry_bits = 0;
+ blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
+ hpd_event_bits = hotplug->event_bits & ~blocked_hpd_pin_mask;
+ hotplug->event_bits &= ~hpd_event_bits;
+ hpd_retry_bits = hotplug->retry_bits & ~blocked_hpd_pin_mask;
+ hotplug->retry_bits &= ~hpd_retry_bits;
/* Enable polling for connectors which had HPD IRQ storms */
- intel_hpd_irq_storm_switch_to_polling(dev_priv);
+ intel_hpd_irq_storm_switch_to_polling(display);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* Skip calling encode hotplug handlers if ignore long HPD set*/
- if (dev_priv->display.hotplug.ignore_long_hpd) {
- drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ if (display->hotplug.ignore_long_hpd) {
+ drm_dbg_kms(display->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
+ mutex_unlock(&display->drm->mode_config.mutex);
return;
}
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
u32 hpd_bit;
@@ -472,7 +524,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
else
connector->hotplug_retries++;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Connector %s (pin %i) received hotplug event. (retry %d)\n",
connector->base.name, pin,
connector->hotplug_retries);
@@ -495,12 +547,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
}
}
drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
if (changed_connectors == 1)
drm_kms_helper_connector_hotplug_event(first_changed_connector);
else if (changed_connectors > 0)
- drm_kms_helper_hotplug_event(&dev_priv->drm);
+ drm_kms_helper_hotplug_event(display->drm);
if (first_changed_connector)
drm_connector_put(first_changed_connector);
@@ -508,20 +560,20 @@ static void i915_hotplug_work_func(struct work_struct *work)
/* Remove shared HPD pins that have changed */
retry &= ~changed;
if (retry) {
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->display.hotplug.retry_bits |= retry;
+ spin_lock_irq(&display->irq.lock);
+ display->hotplug.retry_bits |= retry;
- mod_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.hotplug_work,
+ mod_delayed_detection_work(display,
+ &display->hotplug.hotplug_work,
msecs_to_jiffies(HPD_RETRY_DELAY));
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
}
/**
* intel_hpd_irq_handler - main hotplug irq handler
- * @dev_priv: drm_i915_private
+ * @display: display device
* @pin_mask: a mask of hpd pins that have triggered the irq
* @long_mask: a mask of hpd pins that may be long hpd pulses
*
@@ -535,7 +587,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
* Here, we do hotplug irq storm detection and mitigation, and pass further
* processing to appropriate bottom halves.
*/
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+void intel_hpd_irq_handler(struct intel_display *display,
u32 pin_mask, u32 long_mask)
{
struct intel_encoder *encoder;
@@ -548,7 +600,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!pin_mask)
return;
- spin_lock(&dev_priv->irq_lock);
+ spin_lock(&display->irq.lock);
/*
* Determine whether ->hpd_pulse() exists for each pin, and
@@ -556,8 +608,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* as each pin may have up to two encoders (HDMI and DP) and
* only the one of them (DP) will have ->hpd_pulse().
*/
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- enum port port = encoder->port;
+ for_each_intel_encoder(display->drm, encoder) {
bool long_hpd;
pin = encoder->hpd_pin;
@@ -569,18 +620,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
long_hpd = long_mask & BIT(pin);
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"digital hpd on [ENCODER:%d:%s] - %s\n",
encoder->base.base.id, encoder->base.name,
long_hpd ? "long" : "short");
- queue_dig = true;
+
+ if (!hpd_pin_is_blocked(display, pin))
+ queue_dig = true;
if (long_hpd) {
long_hpd_pulse_mask |= BIT(pin);
- dev_priv->display.hotplug.long_port_mask |= BIT(port);
+ display->hotplug.long_hpd_pin_mask |= BIT(pin);
} else {
short_hpd_pulse_mask |= BIT(pin);
- dev_priv->display.hotplug.short_port_mask |= BIT(port);
+ display->hotplug.short_hpd_pin_mask |= BIT(pin);
}
}
@@ -591,20 +644,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!(BIT(pin) & pin_mask))
continue;
- if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) {
+ if (display->hotplug.stats[pin].state == HPD_DISABLED) {
/*
* On GMCH platforms the interrupt mask bits only
* prevent irq generation, not the setting of the
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
- drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
+ drm_WARN_ONCE(display->drm, !HAS_GMCH(display),
"Received HPD interrupt on pin %d although disabled\n",
pin);
continue;
}
- if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED)
+ if (display->hotplug.stats[pin].state != HPD_ENABLED)
continue;
/*
@@ -615,13 +668,15 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
long_hpd = long_hpd_pulse_mask & BIT(pin);
} else {
- dev_priv->display.hotplug.event_bits |= BIT(pin);
+ display->hotplug.event_bits |= BIT(pin);
long_hpd = true;
- queue_hp = true;
+
+ if (!hpd_pin_is_blocked(display, pin))
+ queue_hp = true;
}
- if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
- dev_priv->display.hotplug.event_bits &= ~BIT(pin);
+ if (intel_hpd_irq_storm_detect(display, pin, long_hpd)) {
+ display->hotplug.event_bits &= ~BIT(pin);
storm_detected = true;
queue_hp = true;
}
@@ -632,7 +687,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* happens later in our hotplug work.
*/
if (storm_detected)
- intel_hpd_irq_setup(dev_priv);
+ intel_hpd_irq_setup(display);
/*
* Our hotplug handler can grab modeset locks (by calling down into the
@@ -641,17 +696,17 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* deadlock.
*/
if (queue_dig)
- queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
+ queue_work(display->hotplug.dp_wq, &display->hotplug.dig_port_work);
if (queue_hp)
- queue_delayed_detection_work(dev_priv,
- &dev_priv->display.hotplug.hotplug_work, 0);
+ queue_delayed_detection_work(display,
+ &display->hotplug.hotplug_work, 0);
- spin_unlock(&dev_priv->irq_lock);
+ spin_unlock(&display->irq.lock);
}
/**
* intel_hpd_init - initializes and enables hpd support
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* This function enables the hotplug support. It requires that interrupts have
* already been enabled with intel_irq_init_hw(). From this point on hotplug and
@@ -663,40 +718,40 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
*
* Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable().
*/
-void intel_hpd_init(struct drm_i915_private *dev_priv)
+void intel_hpd_init(struct intel_display *display)
{
int i;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
for_each_hpd_pin(i) {
- dev_priv->display.hotplug.stats[i].count = 0;
- dev_priv->display.hotplug.stats[i].state = HPD_ENABLED;
+ display->hotplug.stats[i].count = 0;
+ display->hotplug.stats[i].state = HPD_ENABLED;
}
/*
* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked checks happy.
*/
- spin_lock_irq(&dev_priv->irq_lock);
- intel_hpd_irq_setup(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ intel_hpd_irq_setup(display);
+ spin_unlock_irq(&display->irq.lock);
}
-static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915)
+static void i915_hpd_poll_detect_connectors(struct intel_display *display)
{
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
struct intel_connector *first_changed_connector = NULL;
int changed = 0;
- mutex_lock(&i915->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
- if (!i915->drm.mode_config.poll_enabled)
+ if (!display->drm->mode_config.poll_enabled)
goto out;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (!(connector->base.polled & DRM_CONNECTOR_POLL_HPD))
continue;
@@ -714,7 +769,7 @@ static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915)
drm_connector_list_iter_end(&conn_iter);
out:
- mutex_unlock(&i915->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
if (!changed)
return;
@@ -722,25 +777,23 @@ out:
if (changed == 1)
drm_kms_helper_connector_hotplug_event(&first_changed_connector->base);
else
- drm_kms_helper_hotplug_event(&i915->drm);
+ drm_kms_helper_hotplug_event(display->drm);
drm_connector_put(&first_changed_connector->base);
}
static void i915_hpd_poll_init_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- display.hotplug.poll_init_work);
- struct intel_display *display = &dev_priv->display;
+ struct intel_display *display =
+ container_of(work, typeof(*display), hotplug.poll_init_work);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
intel_wakeref_t wakeref;
bool enabled;
- mutex_lock(&dev_priv->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
- enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled);
+ enabled = READ_ONCE(display->hotplug.poll_enabled);
/*
* Prevent taking a power reference from this sequence of
* i915_hpd_poll_init_work() -> drm_helper_hpd_irq_event() ->
@@ -750,14 +803,14 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (!enabled) {
wakeref = intel_display_power_get(display,
POWER_DOMAIN_DISPLAY_CORE);
- drm_WARN_ON(&dev_priv->drm,
- READ_ONCE(dev_priv->display.hotplug.poll_enabled));
- cancel_work(&dev_priv->display.hotplug.poll_init_work);
+ drm_WARN_ON(display->drm,
+ READ_ONCE(display->hotplug.poll_enabled));
+ cancel_work(&display->hotplug.poll_init_work);
}
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
enum hpd_pin pin;
@@ -765,7 +818,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
if (pin == HPD_NONE)
continue;
- if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
+ if (display->hotplug.stats[pin].state == HPD_DISABLED)
continue;
connector->base.polled = connector->polled;
@@ -776,19 +829,19 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
}
drm_connector_list_iter_end(&conn_iter);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
if (enabled)
- drm_kms_helper_poll_reschedule(&dev_priv->drm);
+ drm_kms_helper_poll_reschedule(display->drm);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
/*
* We might have missed any hotplugs that happened while we were
* in the middle of disabling polling
*/
if (!enabled) {
- i915_hpd_poll_detect_connectors(dev_priv);
+ i915_hpd_poll_detect_connectors(display);
intel_display_power_put(display,
POWER_DOMAIN_DISPLAY_CORE,
@@ -798,7 +851,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
/**
* intel_hpd_poll_enable - enable polling for connectors with hpd
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* This function enables polling for all connectors which support HPD.
* Under certain conditions HPD may not be functional. On most Intel GPUs,
@@ -812,15 +865,12 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
*
* Also see: intel_hpd_init() and intel_hpd_poll_disable().
*/
-void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
+void intel_hpd_poll_enable(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
- if (!HAS_DISPLAY(dev_priv) ||
- !intel_display_device_enabled(display))
+ if (!HAS_DISPLAY(display) || !intel_display_device_enabled(display))
return;
- WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true);
+ WRITE_ONCE(display->hotplug.poll_enabled, true);
/*
* We might already be holding dev->mode_config.mutex, so do this in a
@@ -828,15 +878,15 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* As well, there's no issue if we race here since we always reschedule
* this worker anyway
*/
- spin_lock_irq(&dev_priv->irq_lock);
- queue_detection_work(dev_priv,
- &dev_priv->display.hotplug.poll_init_work);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ queue_detection_work(display,
+ &display->hotplug.poll_init_work);
+ spin_unlock_irq(&display->irq.lock);
}
/**
* intel_hpd_poll_disable - disable polling for connectors with hpd
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* This function disables polling for all connectors which support HPD.
* Under certain conditions HPD may not be functional. On most Intel GPUs,
@@ -853,26 +903,26 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
*
* Also see: intel_hpd_init() and intel_hpd_poll_enable().
*/
-void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
+void intel_hpd_poll_disable(struct intel_display *display)
{
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
+ WRITE_ONCE(display->hotplug.poll_enabled, false);
- spin_lock_irq(&dev_priv->irq_lock);
- queue_detection_work(dev_priv,
- &dev_priv->display.hotplug.poll_init_work);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ queue_detection_work(display,
+ &display->hotplug.poll_init_work);
+ spin_unlock_irq(&display->irq.lock);
}
-void intel_hpd_poll_fini(struct drm_i915_private *i915)
+void intel_hpd_poll_fini(struct intel_display *display)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
/* Kill all the work that may have been queued by hpd. */
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
intel_connector_cancel_modeset_retry_work(connector);
intel_hdcp_cancel_works(connector);
@@ -880,157 +930,261 @@ void intel_hpd_poll_fini(struct drm_i915_private *i915)
drm_connector_list_iter_end(&conn_iter);
}
-void intel_hpd_init_early(struct drm_i915_private *i915)
+void intel_hpd_init_early(struct intel_display *display)
{
- INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work,
+ INIT_DELAYED_WORK(&display->hotplug.hotplug_work,
i915_hotplug_work_func);
- INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func);
- INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work);
- INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work,
+ INIT_WORK(&display->hotplug.dig_port_work, i915_digport_work_func);
+ INIT_WORK(&display->hotplug.poll_init_work, i915_hpd_poll_init_work);
+ INIT_DELAYED_WORK(&display->hotplug.reenable_work,
intel_hpd_irq_storm_reenable_work);
- i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ display->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
/* If we have MST support, we want to avoid doing short HPD IRQ storm
* detection, as short HPD storms will occur as a natural part of
* sideband messaging with MST.
* On older platforms however, IRQ storms can occur with both long and
* short pulses, as seen on some G4x systems.
*/
- i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915);
+ display->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(display);
}
-static bool cancel_all_detection_work(struct drm_i915_private *i915)
+static bool cancel_all_detection_work(struct intel_display *display)
{
bool was_pending = false;
- if (cancel_delayed_work_sync(&i915->display.hotplug.hotplug_work))
+ if (cancel_delayed_work_sync(&display->hotplug.hotplug_work))
was_pending = true;
- if (cancel_work_sync(&i915->display.hotplug.poll_init_work))
+ if (cancel_work_sync(&display->hotplug.poll_init_work))
was_pending = true;
- if (cancel_delayed_work_sync(&i915->display.hotplug.reenable_work))
+ if (cancel_delayed_work_sync(&display->hotplug.reenable_work))
was_pending = true;
return was_pending;
}
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+void intel_hpd_cancel_work(struct intel_display *display)
{
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+
+ drm_WARN_ON(display->drm, get_blocked_hpd_pin_mask(display));
- dev_priv->display.hotplug.long_port_mask = 0;
- dev_priv->display.hotplug.short_port_mask = 0;
- dev_priv->display.hotplug.event_bits = 0;
- dev_priv->display.hotplug.retry_bits = 0;
+ display->hotplug.long_hpd_pin_mask = 0;
+ display->hotplug.short_hpd_pin_mask = 0;
+ display->hotplug.event_bits = 0;
+ display->hotplug.retry_bits = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
- cancel_work_sync(&dev_priv->display.hotplug.dig_port_work);
+ cancel_work_sync(&display->hotplug.dig_port_work);
/*
* All other work triggered by hotplug events should be canceled by
* now.
*/
- if (cancel_all_detection_work(dev_priv))
- drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n");
+ if (cancel_all_detection_work(display))
+ drm_dbg_kms(display->drm, "Hotplug detection work still active\n");
}
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+static void queue_work_for_missed_irqs(struct intel_display *display)
{
- bool ret = false;
+ struct intel_hotplug *hotplug = &display->hotplug;
+ bool queue_hp_work = false;
+ u32 blocked_hpd_pin_mask;
+ enum hpd_pin pin;
- if (pin == HPD_NONE)
- return false;
+ lockdep_assert_held(&display->irq.lock);
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) {
- dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
- ret = true;
+ blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
+ if ((hotplug->event_bits | hotplug->retry_bits) & ~blocked_hpd_pin_mask)
+ queue_hp_work = true;
+
+ for_each_hpd_pin(pin) {
+ switch (display->hotplug.stats[pin].state) {
+ case HPD_MARK_DISABLED:
+ queue_hp_work = true;
+ break;
+ case HPD_DISABLED:
+ case HPD_ENABLED:
+ break;
+ default:
+ MISSING_CASE(display->hotplug.stats[pin].state);
+ }
}
- spin_unlock_irq(&dev_priv->irq_lock);
- return ret;
+ if ((hotplug->long_hpd_pin_mask | hotplug->short_hpd_pin_mask) & ~blocked_hpd_pin_mask)
+ queue_work(hotplug->dp_wq, &hotplug->dig_port_work);
+
+ if (queue_hp_work)
+ queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0);
}
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+static bool block_hpd_pin(struct intel_display *display, enum hpd_pin pin)
{
- if (pin == HPD_NONE)
+ struct intel_hotplug *hotplug = &display->hotplug;
+
+ lockdep_assert_held(&display->irq.lock);
+
+ hotplug->stats[pin].blocked_count++;
+
+ return hotplug->stats[pin].blocked_count == 1;
+}
+
+static bool unblock_hpd_pin(struct intel_display *display, enum hpd_pin pin)
+{
+ struct intel_hotplug *hotplug = &display->hotplug;
+
+ lockdep_assert_held(&display->irq.lock);
+
+ if (drm_WARN_ON(display->drm, hotplug->stats[pin].blocked_count == 0))
+ return true;
+
+ hotplug->stats[pin].blocked_count--;
+
+ return hotplug->stats[pin].blocked_count == 0;
+}
+
+/**
+ * intel_hpd_block - Block handling of HPD IRQs on an HPD pin
+ * @encoder: Encoder to block the HPD handling for
+ *
+ * Blocks the handling of HPD IRQs on the HPD pin of @encoder.
+ *
+ * On return:
+ *
+ * - It's guaranteed that the blocked encoders' HPD pulse handler
+ * (via intel_digital_port::hpd_pulse()) is not running.
+ * - The hotplug event handling (via intel_encoder::hotplug()) of an
+ * HPD IRQ pending at the time this function is called may be still
+ * running.
+ * - Detection on the encoder's connector (via
+ * drm_connector_helper_funcs::detect_ctx(),
+ * drm_connector_funcs::detect()) remains allowed, for instance as part of
+ * userspace connector probing, or DRM core's connector polling.
+ *
+ * The call must be followed by calling intel_hpd_unblock(), or
+ * intel_hpd_clear_and_unblock().
+ *
+ * Note that the handling of HPD IRQs for another encoder using the same HPD
+ * pin as that of @encoder will be also blocked.
+ */
+void intel_hpd_block(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ bool do_flush = false;
+
+ if (encoder->hpd_pin == HPD_NONE)
+ return;
+
+ spin_lock_irq(&display->irq.lock);
+
+ if (block_hpd_pin(display, encoder->hpd_pin))
+ do_flush = true;
+
+ spin_unlock_irq(&display->irq.lock);
+
+ if (do_flush && hpd_pin_has_pulse(display, encoder->hpd_pin))
+ flush_work(&hotplug->dig_port_work);
+}
+
+/**
+ * intel_hpd_unblock - Unblock handling of HPD IRQs on an HPD pin
+ * @encoder: Encoder to unblock the HPD handling for
+ *
+ * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was
+ * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the
+ * HPD pin while it was blocked will be handled for @encoder and for any
+ * other encoder sharing the same HPD pin.
+ */
+void intel_hpd_unblock(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ if (encoder->hpd_pin == HPD_NONE)
return;
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+
+ if (unblock_hpd_pin(display, encoder->hpd_pin))
+ queue_work_for_missed_irqs(display);
+
+ spin_unlock_irq(&display->irq.lock);
}
-static void queue_work_for_missed_irqs(struct drm_i915_private *i915)
+/**
+ * intel_hpd_clear_and_unblock - Unblock handling of new HPD IRQs on an HPD pin
+ * @encoder: Encoder to unblock the HPD handling for
+ *
+ * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was
+ * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the
+ * HPD pin while it was blocked will be cleared, handling only new IRQs.
+ */
+void intel_hpd_clear_and_unblock(struct intel_encoder *encoder)
{
- bool queue_work = false;
- enum hpd_pin pin;
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_hotplug *hotplug = &display->hotplug;
+ enum hpd_pin pin = encoder->hpd_pin;
- lockdep_assert_held(&i915->irq_lock);
+ if (pin == HPD_NONE)
+ return;
- if (i915->display.hotplug.event_bits ||
- i915->display.hotplug.retry_bits)
- queue_work = true;
+ spin_lock_irq(&display->irq.lock);
- for_each_hpd_pin(pin) {
- switch (i915->display.hotplug.stats[pin].state) {
- case HPD_MARK_DISABLED:
- queue_work = true;
- break;
- case HPD_ENABLED:
- break;
- default:
- MISSING_CASE(i915->display.hotplug.stats[pin].state);
- }
+ if (unblock_hpd_pin(display, pin)) {
+ hotplug->event_bits &= ~BIT(pin);
+ hotplug->retry_bits &= ~BIT(pin);
+ hotplug->short_hpd_pin_mask &= ~BIT(pin);
+ hotplug->long_hpd_pin_mask &= ~BIT(pin);
}
- if (queue_work)
- queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
+ spin_unlock_irq(&display->irq.lock);
}
-void intel_hpd_enable_detection_work(struct drm_i915_private *i915)
+void intel_hpd_enable_detection_work(struct intel_display *display)
{
- spin_lock_irq(&i915->irq_lock);
- i915->display.hotplug.detection_work_enabled = true;
- queue_work_for_missed_irqs(i915);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ display->hotplug.detection_work_enabled = true;
+ queue_work_for_missed_irqs(display);
+ spin_unlock_irq(&display->irq.lock);
}
-void intel_hpd_disable_detection_work(struct drm_i915_private *i915)
+void intel_hpd_disable_detection_work(struct intel_display *display)
{
- spin_lock_irq(&i915->irq_lock);
- i915->display.hotplug.detection_work_enabled = false;
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ display->hotplug.detection_work_enabled = false;
+ spin_unlock_irq(&display->irq.lock);
- cancel_all_detection_work(i915);
+ cancel_all_detection_work(display);
}
-bool intel_hpd_schedule_detection(struct drm_i915_private *i915)
+bool intel_hpd_schedule_detection(struct intel_display *display)
{
unsigned long flags;
bool ret;
- spin_lock_irqsave(&i915->irq_lock, flags);
- ret = queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
- spin_unlock_irqrestore(&i915->irq_lock, flags);
+ spin_lock_irqsave(&display->irq.lock, flags);
+ ret = queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0);
+ spin_unlock_irqrestore(&display->irq.lock, flags);
return ret;
}
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ struct intel_display *display = m->private;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_hotplug *hotplug = &display->hotplug;
/* Synchronize with everything first in case there's been an HPD
* storm, but we haven't finished handling it in the kernel yet
*/
intel_synchronize_irq(dev_priv);
- flush_work(&dev_priv->display.hotplug.dig_port_work);
- flush_delayed_work(&dev_priv->display.hotplug.hotplug_work);
+ flush_work(&display->hotplug.dig_port_work);
+ flush_delayed_work(&display->hotplug.hotplug_work);
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
@@ -1044,8 +1198,8 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ struct intel_display *display = m->private;
+ struct intel_hotplug *hotplug = &display->hotplug;
unsigned int new_threshold;
int i;
char *newline;
@@ -1070,21 +1224,21 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
return -EINVAL;
if (new_threshold > 0)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Setting HPD storm detection threshold to %d\n",
new_threshold);
else
- drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
+ drm_dbg_kms(display->drm, "Disabling HPD storm detection\n");
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
hotplug->hpd_storm_threshold = new_threshold;
/* Reset the HPD storm stats so we don't accidentally trigger a storm */
for_each_hpd_pin(i)
hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
+ flush_delayed_work(&display->hotplug.reenable_work);
return len;
}
@@ -1105,10 +1259,10 @@ static const struct file_operations i915_hpd_storm_ctl_fops = {
static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
seq_printf(m, "Enabled: %s\n",
- str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled));
+ str_yes_no(display->hotplug.hpd_short_storm_enabled));
return 0;
}
@@ -1125,8 +1279,8 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ struct intel_display *display = m->private;
+ struct intel_hotplug *hotplug = &display->hotplug;
char *newline;
char tmp[16];
int i;
@@ -1147,22 +1301,22 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
/* Reset to the "default" state for this system */
if (strcmp(tmp, "reset") == 0)
- new_state = !HAS_DP_MST(dev_priv);
+ new_state = !HAS_DP_MST(display);
else if (kstrtobool(tmp, &new_state) != 0)
return -EINVAL;
- drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
+ drm_dbg_kms(display->drm, "%sabling HPD short storm detection\n",
new_state ? "En" : "Dis");
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
hotplug->hpd_short_storm_enabled = new_state;
/* Reset the HPD storm stats so we don't accidentally trigger a storm */
for_each_hpd_pin(i)
hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
/* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
+ flush_delayed_work(&display->hotplug.reenable_work);
return len;
}
@@ -1176,14 +1330,14 @@ static const struct file_operations i915_hpd_short_storm_ctl_fops = {
.write = i915_hpd_short_storm_ctl_write,
};
-void intel_hpd_debugfs_register(struct drm_i915_private *i915)
+void intel_hpd_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root,
- i915, &i915_hpd_storm_ctl_fops);
+ display, &i915_hpd_storm_ctl_fops);
debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
- i915, &i915_hpd_short_storm_ctl_fops);
+ display, &i915_hpd_short_storm_ctl_fops);
debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root,
- &i915->display.hotplug.ignore_long_hpd);
+ &display->hotplug.ignore_long_hpd);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index d6986902b054..edc41c9d3d65 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -8,30 +8,31 @@
#include <linux/types.h>
-struct drm_i915_private;
+enum port;
struct intel_connector;
struct intel_digital_port;
+struct intel_display;
struct intel_encoder;
-enum port;
-void intel_hpd_poll_enable(struct drm_i915_private *dev_priv);
-void intel_hpd_poll_disable(struct drm_i915_private *dev_priv);
-void intel_hpd_poll_fini(struct drm_i915_private *i915);
+void intel_hpd_poll_enable(struct intel_display *display);
+void intel_hpd_poll_disable(struct intel_display *display);
+void intel_hpd_poll_fini(struct intel_display *display);
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector);
-void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+void intel_hpd_irq_handler(struct intel_display *display,
u32 pin_mask, u32 long_mask);
void intel_hpd_trigger_irq(struct intel_digital_port *dig_port);
-void intel_hpd_init(struct drm_i915_private *dev_priv);
-void intel_hpd_init_early(struct drm_i915_private *i915);
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+void intel_hpd_init(struct intel_display *display);
+void intel_hpd_init_early(struct intel_display *display);
+void intel_hpd_cancel_work(struct intel_display *display);
enum hpd_pin intel_hpd_pin_default(enum port port);
-bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
-void intel_hpd_debugfs_register(struct drm_i915_private *i915);
+void intel_hpd_block(struct intel_encoder *encoder);
+void intel_hpd_unblock(struct intel_encoder *encoder);
+void intel_hpd_clear_and_unblock(struct intel_encoder *encoder);
+void intel_hpd_debugfs_register(struct intel_display *display);
-void intel_hpd_enable_detection_work(struct drm_i915_private *i915);
-void intel_hpd_disable_detection_work(struct drm_i915_private *i915);
-bool intel_hpd_schedule_detection(struct drm_i915_private *i915);
+void intel_hpd_enable_detection_work(struct intel_display *display);
+void intel_hpd_disable_detection_work(struct intel_display *display);
+bool intel_hpd_schedule_detection(struct intel_display *display);
#endif /* __INTEL_HOTPLUG_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 2137ac7b882a..c024b42369c8 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -3,8 +3,10 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_types.h"
@@ -131,68 +133,67 @@ static const u32 hpd_mtp[HPD_NUM_PINS] = {
[HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
};
-static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
+static void intel_hpd_init_pins(struct intel_display *display)
{
- struct intel_hotplug *hpd = &dev_priv->display.hotplug;
+ struct intel_hotplug *hpd = &display->hotplug;
- if (HAS_GMCH(dev_priv)) {
- if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv))
+ if (HAS_GMCH(display)) {
+ if (display->platform.g4x || display->platform.valleyview ||
+ display->platform.cherryview)
hpd->hpd = hpd_status_g4x;
else
hpd->hpd = hpd_status_i915;
return;
}
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
hpd->hpd = hpd_xelpdp;
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
hpd->hpd = hpd_gen11;
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
hpd->hpd = hpd_bxt;
- else if (DISPLAY_VER(dev_priv) == 9)
+ else if (DISPLAY_VER(display) == 9)
hpd->hpd = NULL; /* no north HPD on SKL */
- else if (DISPLAY_VER(dev_priv) >= 8)
+ else if (DISPLAY_VER(display) >= 8)
hpd->hpd = hpd_bdw;
- else if (DISPLAY_VER(dev_priv) >= 7)
+ else if (DISPLAY_VER(display) >= 7)
hpd->hpd = hpd_ivb;
else
hpd->hpd = hpd_ilk;
- if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
- (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
+ if ((INTEL_PCH_TYPE(display) < PCH_DG1) &&
+ (!HAS_PCH_SPLIT(display) || HAS_PCH_NOP(display)))
return;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
hpd->pch_hpd = hpd_mtp;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ else if (INTEL_PCH_TYPE(display) >= PCH_DG1)
hpd->pch_hpd = hpd_sde_dg1;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
hpd->pch_hpd = hpd_icp;
- else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
+ else if (HAS_PCH_CNP(display) || HAS_PCH_SPT(display))
hpd->pch_hpd = hpd_spt;
- else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
+ else if (HAS_PCH_LPT(display) || HAS_PCH_CPT(display))
hpd->pch_hpd = hpd_cpt;
- else if (HAS_PCH_IBX(dev_priv))
+ else if (HAS_PCH_IBX(display))
hpd->pch_hpd = hpd_ibx;
else
- MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
+ MISSING_CASE(INTEL_PCH_TYPE(display));
}
/* For display hotplug interrupt */
-void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
+void i915_hotplug_interrupt_update_locked(struct intel_display *display,
u32 mask, u32 bits)
{
- lockdep_assert_held(&dev_priv->irq_lock);
- drm_WARN_ON(&dev_priv->drm, bits & ~mask);
+ lockdep_assert_held(&display->irq.lock);
+ drm_WARN_ON(display->drm, bits & ~mask);
- intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN(dev_priv), mask,
- bits);
+ intel_de_rmw(display, PORT_HOTPLUG_EN(display), mask, bits);
}
/**
* i915_hotplug_interrupt_update - update hotplug interrupt enable
- * @dev_priv: driver private
+ * @display: display device
* @mask: bits to update
* @bits: bits to enable
* NOTE: the HPD enable bits are modified both inside and outside
@@ -202,13 +203,13 @@ void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
* held already, this function acquires the lock itself. A non-locking
* version is also available.
*/
-void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
+void i915_hotplug_interrupt_update(struct intel_display *display,
u32 mask,
u32 bits)
{
- spin_lock_irq(&dev_priv->irq_lock);
- i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ i915_hotplug_interrupt_update_locked(display, mask, bits);
+ spin_unlock_irq(&display->irq.lock);
}
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
@@ -339,7 +340,7 @@ static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
*
* Note that the caller is expected to zero out the masks initially.
*/
-static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
+static void intel_get_hpd_pins(struct intel_display *display,
u32 *pin_mask, u32 *long_mask,
u32 hotplug_trigger, u32 dig_hotplug_reg,
const u32 hpd[HPD_NUM_PINS],
@@ -359,37 +360,37 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
*long_mask |= BIT(pin);
}
- drm_dbg(&dev_priv->drm,
- "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
- hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
+ drm_dbg_kms(display->drm,
+ "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
}
-static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
+static u32 intel_hpd_enabled_irqs(struct intel_display *display,
const u32 hpd[HPD_NUM_PINS])
{
struct intel_encoder *encoder;
u32 enabled_irqs = 0;
- for_each_intel_encoder(&dev_priv->drm, encoder)
- if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
+ for_each_intel_encoder(display->drm, encoder)
+ if (display->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd[encoder->hpd_pin];
return enabled_irqs;
}
-static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
+static u32 intel_hpd_hotplug_irqs(struct intel_display *display,
const u32 hpd[HPD_NUM_PINS])
{
struct intel_encoder *encoder;
u32 hotplug_irqs = 0;
- for_each_intel_encoder(&dev_priv->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
hotplug_irqs |= hpd[encoder->hpd_pin];
return hotplug_irqs;
}
-static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915,
+static u32 intel_hpd_hotplug_mask(struct intel_display *display,
hotplug_mask_func hotplug_mask)
{
enum hpd_pin pin;
@@ -401,25 +402,25 @@ static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915,
return hotplug;
}
-static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
+static u32 intel_hpd_hotplug_enables(struct intel_display *display,
hotplug_enables_func hotplug_enables)
{
struct intel_encoder *encoder;
u32 hotplug = 0;
- for_each_intel_encoder(&i915->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
hotplug |= hotplug_enables(encoder);
return hotplug;
}
-u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
+u32 i9xx_hpd_irq_ack(struct intel_display *display)
{
u32 hotplug_status = 0, hotplug_status_mask;
int i;
- if (IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview)
hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
else
@@ -435,53 +436,51 @@ u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
* bits can itself generate a new hotplug interrupt :(
*/
for (i = 0; i < 10; i++) {
- u32 tmp = intel_uncore_read(&dev_priv->uncore,
- PORT_HOTPLUG_STAT(dev_priv)) & hotplug_status_mask;
+ u32 tmp = intel_de_read(display,
+ PORT_HOTPLUG_STAT(display)) & hotplug_status_mask;
if (tmp == 0)
return hotplug_status;
hotplug_status |= tmp;
- intel_uncore_write(&dev_priv->uncore,
- PORT_HOTPLUG_STAT(dev_priv),
- hotplug_status);
+ intel_de_write(display, PORT_HOTPLUG_STAT(display),
+ hotplug_status);
}
- drm_WARN_ONCE(&dev_priv->drm, 1,
+ drm_WARN_ONCE(display->drm, 1,
"PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
- intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT(dev_priv)));
+ intel_de_read(display, PORT_HOTPLUG_STAT(display)));
return hotplug_status;
}
-void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status)
+void i9xx_hpd_irq_handler(struct intel_display *display, u32 hotplug_status)
{
- struct intel_display *display = &dev_priv->display;
u32 pin_mask = 0, long_mask = 0;
u32 hotplug_trigger;
- if (IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview)
hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
else
hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
if (hotplug_trigger) {
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, hotplug_trigger,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
i9xx_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
- if ((IS_G4X(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.g4x ||
+ display->platform.valleyview || display->platform.cherryview) &&
hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
intel_dp_aux_irq_handler(display);
}
-void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
+void ibx_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
@@ -491,7 +490,7 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
* zero. Not acking leads to "The master control interrupt lied (SDE)!"
* errors.
*/
- dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
+ dig_hotplug_reg = intel_de_read(display, PCH_PORT_HOTPLUG);
if (!hotplug_trigger) {
u32 mask = PORTA_HOTPLUG_STATUS_MASK |
PORTD_HOTPLUG_STATUS_MASK |
@@ -500,63 +499,61 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
dig_hotplug_reg &= ~mask;
}
- intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
+ intel_de_write(display, PCH_PORT_HOTPLUG, dig_hotplug_reg);
if (!hotplug_trigger)
return;
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
pch_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
-void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir)
+void xelpdp_pica_irq_handler(struct intel_display *display, u32 iir)
{
- struct intel_display *display = &i915->display;
enum hpd_pin pin;
u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK);
u32 trigger_aux = iir & XELPDP_AUX_TC_MASK;
u32 pin_mask = 0, long_mask = 0;
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 20)
trigger_aux |= iir & XE2LPD_AUX_DDI_MASK;
for (pin = HPD_PORT_TC1; pin <= HPD_PORT_TC4; pin++) {
u32 val;
- if (!(i915->display.hotplug.hpd[pin] & hotplug_trigger))
+ if (!(display->hotplug.hpd[pin] & hotplug_trigger))
continue;
pin_mask |= BIT(pin);
- val = intel_de_read(i915, XELPDP_PORT_HOTPLUG_CTL(pin));
- intel_de_write(i915, XELPDP_PORT_HOTPLUG_CTL(pin), val);
+ val = intel_de_read(display, XELPDP_PORT_HOTPLUG_CTL(pin));
+ intel_de_write(display, XELPDP_PORT_HOTPLUG_CTL(pin), val);
if (val & (XELPDP_DP_ALT_HPD_LONG_DETECT | XELPDP_TBT_HPD_LONG_DETECT))
long_mask |= BIT(pin);
}
if (pin_mask) {
- drm_dbg(&i915->drm,
- "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n",
- hotplug_trigger, pin_mask, long_mask);
+ drm_dbg_kms(display->drm,
+ "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, pin_mask, long_mask);
- intel_hpd_irq_handler(i915, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
if (trigger_aux)
intel_dp_aux_irq_handler(display);
if (!pin_mask && !trigger_aux)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Unexpected DE HPD/AUX interrupt 0x%08x\n", iir);
}
-void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+void icp_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
u32 pin_mask = 0, long_mask = 0;
@@ -565,37 +562,36 @@ void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
u32 dig_hotplug_reg;
/* Locking due to DSI native GPIO sequences */
- spin_lock(&dev_priv->irq_lock);
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
- spin_unlock(&dev_priv->irq_lock);
+ spin_lock(&display->irq.lock);
+ dig_hotplug_reg = intel_de_rmw(display, SHOTPLUG_CTL_DDI, 0, 0);
+ spin_unlock(&display->irq.lock);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
ddi_hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
icp_ddi_port_hotplug_long_detect);
}
if (tc_hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, SHOTPLUG_CTL_TC, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
tc_hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
icp_tc_port_hotplug_long_detect);
}
if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_ICP)
intel_gmbus_irq_handler(display);
}
-void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+void spt_irq_handler(struct intel_display *display, u32 pch_iir)
{
- struct intel_display *display = &dev_priv->display;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
~SDE_PORTE_HOTPLUG_SPT;
u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
@@ -604,61 +600,61 @@ void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (hotplug_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
spt_port_hotplug_long_detect);
}
if (hotplug2_trigger) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG2, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug2_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.pch_hpd,
+ display->hotplug.pch_hpd,
spt_port_hotplug2_long_detect);
}
if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_CPT)
intel_gmbus_irq_handler(display);
}
-void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
+void ilk_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
ilk_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
-void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
+void bxt_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger)
{
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
bxt_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
}
-void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+void gen11_hpd_irq_handler(struct intel_display *display, u32 iir)
{
u32 pin_mask = 0, long_mask = 0;
u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
@@ -667,29 +663,29 @@ void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (trigger_tc) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
trigger_tc, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
gen11_port_hotplug_long_detect);
}
if (trigger_tbt) {
u32 dig_hotplug_reg;
- dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0);
+ dig_hotplug_reg = intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL, 0, 0);
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ intel_get_hpd_pins(display, &pin_mask, &long_mask,
trigger_tbt, dig_hotplug_reg,
- dev_priv->display.hotplug.hpd,
+ display->hotplug.hpd,
gen11_port_hotplug_long_detect);
}
if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+ intel_hpd_irq_handler(display, pin_mask, long_mask);
else
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unexpected DE HPD interrupt 0x%08x\n", iir);
}
@@ -711,7 +707,7 @@ static u32 ibx_hotplug_mask(enum hpd_pin hpd_pin)
static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
switch (encoder->hpd_pin) {
case HPD_PORT_A:
@@ -719,7 +715,7 @@ static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
* When CPU and PCH are on the same package, port A
* HPD must be enabled in both north and south.
*/
- return HAS_PCH_LPT_LP(i915) ?
+ return HAS_PCH_LPT_LP(display) ?
PORTA_HOTPLUG_ENABLE : 0;
case HPD_PORT_B:
return PORTB_HOTPLUG_ENABLE |
@@ -735,37 +731,37 @@ static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
}
}
-static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void ibx_hpd_detection_setup(struct intel_display *display)
{
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec).
* The pulse duration bits are reserved on LPT+.
*/
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- intel_hpd_hotplug_mask(dev_priv, ibx_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ intel_hpd_hotplug_mask(display, ibx_hotplug_mask),
+ intel_hpd_hotplug_enables(display, ibx_hotplug_enables));
}
static void ibx_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
- ibx_hotplug_mask(encoder->hpd_pin),
- ibx_hotplug_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ ibx_hotplug_mask(encoder->hpd_pin),
+ ibx_hotplug_enables(encoder));
}
-static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void ibx_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- ibx_hpd_detection_setup(dev_priv);
+ ibx_hpd_detection_setup(display);
}
static u32 icp_ddi_hotplug_mask(enum hpd_pin hpd_pin)
@@ -806,36 +802,36 @@ static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder)
return icp_tc_hotplug_mask(encoder->hpd_pin);
}
-static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void icp_ddi_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI,
- intel_hpd_hotplug_mask(dev_priv, icp_ddi_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ intel_hpd_hotplug_mask(display, icp_ddi_hotplug_mask),
+ intel_hpd_hotplug_enables(display, icp_ddi_hotplug_enables));
}
static void icp_ddi_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_DDI,
- icp_ddi_hotplug_mask(encoder->hpd_pin),
- icp_ddi_hotplug_enables(encoder));
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ icp_ddi_hotplug_mask(encoder->hpd_pin),
+ icp_ddi_hotplug_enables(encoder));
}
-static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void icp_tc_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC,
- intel_hpd_hotplug_mask(dev_priv, icp_tc_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
+ intel_hpd_hotplug_mask(display, icp_tc_hotplug_mask),
+ intel_hpd_hotplug_enables(display, icp_tc_hotplug_enables));
}
static void icp_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_TC,
- icp_tc_hotplug_mask(encoder->hpd_pin),
- icp_tc_hotplug_enables(encoder));
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
+ icp_tc_hotplug_mask(encoder->hpd_pin),
+ icp_tc_hotplug_enables(encoder));
}
static void icp_hpd_enable_detection(struct intel_encoder *encoder)
@@ -844,23 +840,23 @@ static void icp_hpd_enable_detection(struct intel_encoder *encoder)
icp_tc_hpd_enable_detection(encoder);
}
-static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void icp_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
/*
* We reduce the value to 250us to be able to detect SHPD when an external display
* is connected. This is also expected of us as stated in DP1.4a Table 3-4.
*/
- intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
+ intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- icp_ddi_hpd_detection_setup(dev_priv);
- icp_tc_hpd_detection_setup(dev_priv);
+ icp_ddi_hpd_detection_setup(display);
+ icp_tc_hpd_detection_setup(display);
}
static u32 gen11_hotplug_mask(enum hpd_pin hpd_pin)
@@ -883,88 +879,88 @@ static u32 gen11_hotplug_enables(struct intel_encoder *encoder)
return gen11_hotplug_mask(encoder->hpd_pin);
}
-static void dg1_hpd_invert(struct drm_i915_private *i915)
+static void dg1_hpd_invert(struct intel_display *display)
{
u32 val = (INVERT_DDIA_HPD |
INVERT_DDIB_HPD |
INVERT_DDIC_HPD |
INVERT_DDID_HPD);
- intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val);
+ intel_de_rmw(display, SOUTH_CHICKEN1, 0, val);
}
static void dg1_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- dg1_hpd_invert(i915);
+ dg1_hpd_invert(display);
icp_hpd_enable_detection(encoder);
}
-static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void dg1_hpd_irq_setup(struct intel_display *display)
{
- dg1_hpd_invert(dev_priv);
- icp_hpd_irq_setup(dev_priv);
+ dg1_hpd_invert(display);
+ icp_hpd_irq_setup(display);
}
-static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void gen11_tc_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL,
- intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
+ intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL,
+ intel_hpd_hotplug_mask(display, gen11_hotplug_mask),
+ intel_hpd_hotplug_enables(display, gen11_hotplug_enables));
}
static void gen11_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, GEN11_TC_HOTPLUG_CTL,
- gen11_hotplug_mask(encoder->hpd_pin),
- gen11_hotplug_enables(encoder));
+ intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL,
+ gen11_hotplug_mask(encoder->hpd_pin),
+ gen11_hotplug_enables(encoder));
}
-static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void gen11_tbt_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL,
- intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
+ intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL,
+ intel_hpd_hotplug_mask(display, gen11_hotplug_mask),
+ intel_hpd_hotplug_enables(display, gen11_hotplug_enables));
}
static void gen11_tbt_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, GEN11_TBT_HOTPLUG_CTL,
- gen11_hotplug_mask(encoder->hpd_pin),
- gen11_hotplug_enables(encoder));
+ intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL,
+ gen11_hotplug_mask(encoder->hpd_pin),
+ gen11_hotplug_enables(encoder));
}
static void gen11_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
gen11_tc_hpd_enable_detection(encoder);
gen11_tbt_hpd_enable_detection(encoder);
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
icp_hpd_enable_detection(encoder);
}
-static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void gen11_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs,
- ~enabled_irqs & hotplug_irqs);
- intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
+ intel_de_rmw(display, GEN11_DE_HPD_IMR, hotplug_irqs,
+ ~enabled_irqs & hotplug_irqs);
+ intel_de_posting_read(display, GEN11_DE_HPD_IMR);
- gen11_tc_hpd_detection_setup(dev_priv);
- gen11_tbt_hpd_detection_setup(dev_priv);
+ gen11_tc_hpd_detection_setup(display);
+ gen11_tbt_hpd_detection_setup(display);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_hpd_irq_setup(dev_priv);
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ icp_hpd_irq_setup(display);
}
static u32 mtp_ddi_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1001,39 +997,39 @@ static u32 mtp_tc_hotplug_enables(struct intel_encoder *encoder)
return mtp_tc_hotplug_mask(encoder->hpd_pin);
}
-static void mtp_ddi_hpd_detection_setup(struct drm_i915_private *i915)
+static void mtp_ddi_hpd_detection_setup(struct intel_display *display)
{
- intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
- intel_hpd_hotplug_mask(i915, mtp_ddi_hotplug_mask),
- intel_hpd_hotplug_enables(i915, mtp_ddi_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ intel_hpd_hotplug_mask(display, mtp_ddi_hotplug_mask),
+ intel_hpd_hotplug_enables(display, mtp_ddi_hotplug_enables));
}
static void mtp_ddi_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
mtp_ddi_hotplug_mask(encoder->hpd_pin),
mtp_ddi_hotplug_enables(encoder));
}
-static void mtp_tc_hpd_detection_setup(struct drm_i915_private *i915)
+static void mtp_tc_hpd_detection_setup(struct intel_display *display)
{
- intel_de_rmw(i915, SHOTPLUG_CTL_TC,
- intel_hpd_hotplug_mask(i915, mtp_tc_hotplug_mask),
- intel_hpd_hotplug_enables(i915, mtp_tc_hotplug_enables));
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
+ intel_hpd_hotplug_mask(display, mtp_tc_hotplug_mask),
+ intel_hpd_hotplug_enables(display, mtp_tc_hotplug_enables));
}
static void mtp_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
+ intel_de_rmw(display, SHOTPLUG_CTL_DDI,
mtp_tc_hotplug_mask(encoder->hpd_pin),
mtp_tc_hotplug_enables(encoder));
}
-static void mtp_hpd_invert(struct drm_i915_private *i915)
+static void mtp_hpd_invert(struct intel_display *display)
{
u32 val = (INVERT_DDIA_HPD |
INVERT_DDIB_HPD |
@@ -1044,49 +1040,49 @@ static void mtp_hpd_invert(struct drm_i915_private *i915)
INVERT_TC4_HPD |
INVERT_DDID_HPD_MTP |
INVERT_DDIE_HPD);
- intel_de_rmw(i915, SOUTH_CHICKEN1, 0, val);
+ intel_de_rmw(display, SOUTH_CHICKEN1, 0, val);
}
static void mtp_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- mtp_hpd_invert(i915);
+ mtp_hpd_invert(display);
mtp_ddi_hpd_enable_detection(encoder);
mtp_tc_hpd_enable_detection(encoder);
}
-static void mtp_hpd_irq_setup(struct drm_i915_private *i915)
+static void mtp_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
/*
* Use 250us here to align with the DP1.4a(Table 3-4) spec as to what the
* SHPD_FILTER_CNT value should be.
*/
- intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
+ intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
- mtp_hpd_invert(i915);
- ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
+ mtp_hpd_invert(display);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- mtp_ddi_hpd_detection_setup(i915);
- mtp_tc_hpd_detection_setup(i915);
+ mtp_ddi_hpd_detection_setup(display);
+ mtp_tc_hpd_detection_setup(display);
}
-static void xe2lpd_sde_hpd_irq_setup(struct drm_i915_private *i915)
+static void xe2lpd_sde_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
- ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- mtp_ddi_hpd_detection_setup(i915);
- mtp_tc_hpd_detection_setup(i915);
+ mtp_ddi_hpd_detection_setup(display);
+ mtp_tc_hpd_detection_setup(display);
}
static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin)
@@ -1094,7 +1090,7 @@ static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin)
return hpd_pin >= HPD_PORT_TC1 && hpd_pin <= HPD_PORT_TC4;
}
-static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915,
+static void _xelpdp_pica_hpd_detection_setup(struct intel_display *display,
enum hpd_pin hpd_pin, bool enable)
{
u32 mask = XELPDP_TBT_HOTPLUG_ENABLE |
@@ -1103,18 +1099,18 @@ static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915,
if (!is_xelpdp_pica_hpd_pin(hpd_pin))
return;
- intel_de_rmw(i915, XELPDP_PORT_HOTPLUG_CTL(hpd_pin),
+ intel_de_rmw(display, XELPDP_PORT_HOTPLUG_CTL(hpd_pin),
mask, enable ? mask : 0);
}
static void xelpdp_pica_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- _xelpdp_pica_hpd_detection_setup(i915, encoder->hpd_pin, true);
+ _xelpdp_pica_hpd_detection_setup(display, encoder->hpd_pin, true);
}
-static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915)
+static void xelpdp_pica_hpd_detection_setup(struct intel_display *display)
{
struct intel_encoder *encoder;
u32 available_pins = 0;
@@ -1122,11 +1118,11 @@ static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915)
BUILD_BUG_ON(BITS_PER_TYPE(available_pins) < HPD_NUM_PINS);
- for_each_intel_encoder(&i915->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
available_pins |= BIT(encoder->hpd_pin);
for_each_hpd_pin(pin)
- _xelpdp_pica_hpd_detection_setup(i915, pin, available_pins & BIT(pin));
+ _xelpdp_pica_hpd_detection_setup(display, pin, available_pins & BIT(pin));
}
static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder)
@@ -1135,23 +1131,23 @@ static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder)
mtp_hpd_enable_detection(encoder);
}
-static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915)
+static void xelpdp_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- intel_de_rmw(i915, PICAINTERRUPT_IMR, hotplug_irqs,
+ intel_de_rmw(display, PICAINTERRUPT_IMR, hotplug_irqs,
~enabled_irqs & hotplug_irqs);
- intel_uncore_posting_read(&i915->uncore, PICAINTERRUPT_IMR);
+ intel_de_posting_read(display, PICAINTERRUPT_IMR);
- xelpdp_pica_hpd_detection_setup(i915);
+ xelpdp_pica_hpd_detection_setup(display);
- if (INTEL_PCH_TYPE(i915) >= PCH_LNL)
- xe2lpd_sde_hpd_irq_setup(i915);
- else if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
- mtp_hpd_irq_setup(i915);
+ if (INTEL_PCH_TYPE(display) >= PCH_LNL)
+ xe2lpd_sde_hpd_irq_setup(display);
+ else if (INTEL_PCH_TYPE(display) >= PCH_MTL)
+ mtp_hpd_irq_setup(display);
}
static u32 spt_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1190,57 +1186,57 @@ static u32 spt_hotplug2_enables(struct intel_encoder *encoder)
return spt_hotplug2_mask(encoder->hpd_pin);
}
-static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void spt_hpd_detection_setup(struct intel_display *display)
{
/* Display WA #1179 WaHardHangonHotPlug: cnp */
- if (HAS_PCH_CNP(dev_priv)) {
- intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
- CHASSIS_CLK_REQ_DURATION(0xf));
+ if (HAS_PCH_CNP(display)) {
+ intel_de_rmw(display, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
+ CHASSIS_CLK_REQ_DURATION(0xf));
}
/* Enable digital hotplug on the PCH */
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- intel_hpd_hotplug_mask(dev_priv, spt_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ intel_hpd_hotplug_mask(display, spt_hotplug_mask),
+ intel_hpd_hotplug_enables(display, spt_hotplug_enables));
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2,
- intel_hpd_hotplug_mask(dev_priv, spt_hotplug2_mask),
- intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG2,
+ intel_hpd_hotplug_mask(display, spt_hotplug2_mask),
+ intel_hpd_hotplug_enables(display, spt_hotplug2_enables));
}
static void spt_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
/* Display WA #1179 WaHardHangonHotPlug: cnp */
- if (HAS_PCH_CNP(i915)) {
- intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1,
- CHASSIS_CLK_REQ_DURATION_MASK,
- CHASSIS_CLK_REQ_DURATION(0xf));
+ if (HAS_PCH_CNP(display)) {
+ intel_de_rmw(display, SOUTH_CHICKEN1,
+ CHASSIS_CLK_REQ_DURATION_MASK,
+ CHASSIS_CLK_REQ_DURATION(0xf));
}
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
- spt_hotplug_mask(encoder->hpd_pin),
- spt_hotplug_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ spt_hotplug_mask(encoder->hpd_pin),
+ spt_hotplug_enables(encoder));
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG2,
- spt_hotplug2_mask(encoder->hpd_pin),
- spt_hotplug2_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG2,
+ spt_hotplug2_mask(encoder->hpd_pin),
+ spt_hotplug2_enables(encoder));
}
-static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void spt_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+ if (INTEL_PCH_TYPE(display) >= PCH_CNP)
+ intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd);
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs);
- spt_hpd_detection_setup(dev_priv);
+ spt_hpd_detection_setup(display);
}
static u32 ilk_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1265,44 +1261,44 @@ static u32 ilk_hotplug_enables(struct intel_encoder *encoder)
}
}
-static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void ilk_hpd_detection_setup(struct intel_display *display)
{
/*
* Enable digital hotplug on the CPU, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
* The pulse duration bits are reserved on HSW+.
*/
- intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
- intel_hpd_hotplug_mask(dev_priv, ilk_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables));
+ intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL,
+ intel_hpd_hotplug_mask(display, ilk_hotplug_mask),
+ intel_hpd_hotplug_enables(display, ilk_hotplug_enables));
}
static void ilk_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
- ilk_hotplug_mask(encoder->hpd_pin),
- ilk_hotplug_enables(encoder));
+ intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL,
+ ilk_hotplug_mask(encoder->hpd_pin),
+ ilk_hotplug_enables(encoder));
ibx_hpd_enable_detection(encoder);
}
-static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void ilk_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- if (DISPLAY_VER(dev_priv) >= 8)
- bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ if (DISPLAY_VER(display) >= 8)
+ bdw_update_port_irq(display, hotplug_irqs, enabled_irqs);
else
- ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ ilk_update_display_irq(display, hotplug_irqs, enabled_irqs);
- ilk_hpd_detection_setup(dev_priv);
+ ilk_hpd_detection_setup(display);
- ibx_hpd_irq_setup(dev_priv);
+ ibx_hpd_irq_setup(display);
}
static u32 bxt_hotplug_mask(enum hpd_pin hpd_pin)
@@ -1344,80 +1340,80 @@ static u32 bxt_hotplug_enables(struct intel_encoder *encoder)
}
}
-static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void bxt_hpd_detection_setup(struct intel_display *display)
{
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- intel_hpd_hotplug_mask(dev_priv, bxt_hotplug_mask),
- intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ intel_hpd_hotplug_mask(display, bxt_hotplug_mask),
+ intel_hpd_hotplug_enables(display, bxt_hotplug_enables));
}
static void bxt_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
- bxt_hotplug_mask(encoder->hpd_pin),
- bxt_hotplug_enables(encoder));
+ intel_de_rmw(display, PCH_PORT_HOTPLUG,
+ bxt_hotplug_mask(encoder->hpd_pin),
+ bxt_hotplug_enables(encoder));
}
-static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void bxt_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd);
- bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ bdw_update_port_irq(display, hotplug_irqs, enabled_irqs);
- bxt_hpd_detection_setup(dev_priv);
+ bxt_hpd_detection_setup(display);
}
-static void g45_hpd_peg_band_gap_wa(struct drm_i915_private *i915)
+static void g45_hpd_peg_band_gap_wa(struct intel_display *display)
{
/*
* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
- intel_de_rmw(i915, PEG_BAND_GAP_DATA, 0xf, 0xd);
+ intel_de_rmw(display, PEG_BAND_GAP_DATA, 0xf, 0xd);
}
static void i915_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin];
- if (IS_G45(i915))
- g45_hpd_peg_band_gap_wa(i915);
+ if (display->platform.g45)
+ g45_hpd_peg_band_gap_wa(display);
/* HPD sense and interrupt enable are one and the same */
- i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en);
+ i915_hotplug_interrupt_update(display, hotplug_en, hotplug_en);
}
-static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void i915_hpd_irq_setup(struct intel_display *display)
{
u32 hotplug_en;
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&display->irq.lock);
/*
* Note HDMI and DP share hotplug bits. Enable bits are the same for all
* generations.
*/
- hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
+ hotplug_en = intel_hpd_enabled_irqs(display, hpd_mask_i915);
/*
* Programming the CRT detection parameters tends to generate a spurious
* hotplug event about three seconds later. So just do it once.
*/
- if (IS_G4X(dev_priv))
+ if (display->platform.g4x)
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
- if (IS_G45(dev_priv))
- g45_hpd_peg_band_gap_wa(dev_priv);
+ if (display->platform.g45)
+ g45_hpd_peg_band_gap_wa(display);
/* Ignore TV since it's buggy */
- i915_hotplug_interrupt_update_locked(dev_priv,
+ i915_hotplug_interrupt_update_locked(display,
HOTPLUG_INT_EN_MASK |
CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
CRT_HOTPLUG_ACTIVATION_PERIOD_64,
@@ -1426,7 +1422,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
struct intel_hotplug_funcs {
/* Enable HPD sense and interrupts for all present encoders */
- void (*hpd_irq_setup)(struct drm_i915_private *i915);
+ void (*hpd_irq_setup)(struct intel_display *display);
/* Enable HPD sense for a single encoder */
void (*hpd_enable_detection)(struct intel_encoder *encoder);
};
@@ -1449,47 +1445,47 @@ HPD_FUNCS(ilk);
void intel_hpd_enable_detection(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (i915->display.funcs.hotplug)
- i915->display.funcs.hotplug->hpd_enable_detection(encoder);
+ if (display->funcs.hotplug)
+ display->funcs.hotplug->hpd_enable_detection(encoder);
}
-void intel_hpd_irq_setup(struct drm_i915_private *i915)
+void intel_hpd_irq_setup(struct intel_display *display)
{
- if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
- !i915->display.irq.vlv_display_irqs_enabled)
+ if ((display->platform.valleyview || display->platform.cherryview) &&
+ !display->irq.vlv_display_irqs_enabled)
return;
- if (i915->display.funcs.hotplug)
- i915->display.funcs.hotplug->hpd_irq_setup(i915);
+ if (display->funcs.hotplug)
+ display->funcs.hotplug->hpd_irq_setup(display);
}
-void intel_hotplug_irq_init(struct drm_i915_private *i915)
+void intel_hotplug_irq_init(struct intel_display *display)
{
- intel_hpd_init_pins(i915);
+ intel_hpd_init_pins(display);
- intel_hpd_init_early(i915);
+ intel_hpd_init_early(display);
- if (HAS_GMCH(i915)) {
- if (I915_HAS_HOTPLUG(i915))
- i915->display.funcs.hotplug = &i915_hpd_funcs;
+ if (HAS_GMCH(display)) {
+ if (HAS_HOTPLUG(display))
+ display->funcs.hotplug = &i915_hpd_funcs;
} else {
- if (HAS_PCH_DG2(i915))
- i915->display.funcs.hotplug = &icp_hpd_funcs;
- else if (HAS_PCH_DG1(i915))
- i915->display.funcs.hotplug = &dg1_hpd_funcs;
- else if (DISPLAY_VER(i915) >= 14)
- i915->display.funcs.hotplug = &xelpdp_hpd_funcs;
- else if (DISPLAY_VER(i915) >= 11)
- i915->display.funcs.hotplug = &gen11_hpd_funcs;
- else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
- i915->display.funcs.hotplug = &bxt_hpd_funcs;
- else if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
- i915->display.funcs.hotplug = &icp_hpd_funcs;
- else if (INTEL_PCH_TYPE(i915) >= PCH_SPT)
- i915->display.funcs.hotplug = &spt_hpd_funcs;
+ if (HAS_PCH_DG2(display))
+ display->funcs.hotplug = &icp_hpd_funcs;
+ else if (HAS_PCH_DG1(display))
+ display->funcs.hotplug = &dg1_hpd_funcs;
+ else if (DISPLAY_VER(display) >= 14)
+ display->funcs.hotplug = &xelpdp_hpd_funcs;
+ else if (DISPLAY_VER(display) >= 11)
+ display->funcs.hotplug = &gen11_hpd_funcs;
+ else if (display->platform.geminilake || display->platform.broxton)
+ display->funcs.hotplug = &bxt_hpd_funcs;
+ else if (INTEL_PCH_TYPE(display) >= PCH_ICP)
+ display->funcs.hotplug = &icp_hpd_funcs;
+ else if (INTEL_PCH_TYPE(display) >= PCH_SPT)
+ display->funcs.hotplug = &spt_hpd_funcs;
else
- i915->display.funcs.hotplug = &ilk_hpd_funcs;
+ display->funcs.hotplug = &ilk_hpd_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.h b/drivers/gpu/drm/i915/display/intel_hotplug_irq.h
index e4db752df096..9063bb02a2e9 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.h
@@ -8,28 +8,28 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_display;
struct intel_encoder;
-u32 i9xx_hpd_irq_ack(struct drm_i915_private *i915);
+u32 i9xx_hpd_irq_ack(struct intel_display *display);
-void i9xx_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_status);
-void ibx_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger);
-void ilk_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger);
-void gen11_hpd_irq_handler(struct drm_i915_private *i915, u32 iir);
-void bxt_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger);
-void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir);
-void icp_irq_handler(struct drm_i915_private *i915, u32 pch_iir);
-void spt_irq_handler(struct drm_i915_private *i915, u32 pch_iir);
+void i9xx_hpd_irq_handler(struct intel_display *display, u32 hotplug_status);
+void ibx_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger);
+void ilk_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger);
+void gen11_hpd_irq_handler(struct intel_display *display, u32 iir);
+void bxt_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger);
+void xelpdp_pica_irq_handler(struct intel_display *display, u32 iir);
+void icp_irq_handler(struct intel_display *display, u32 pch_iir);
+void spt_irq_handler(struct intel_display *display, u32 pch_iir);
-void i915_hotplug_interrupt_update_locked(struct drm_i915_private *i915,
+void i915_hotplug_interrupt_update_locked(struct intel_display *display,
u32 mask, u32 bits);
-void i915_hotplug_interrupt_update(struct drm_i915_private *i915,
+void i915_hotplug_interrupt_update(struct intel_display *display,
u32 mask, u32 bits);
void intel_hpd_enable_detection(struct intel_encoder *encoder);
-void intel_hpd_irq_setup(struct drm_i915_private *i915);
+void intel_hpd_irq_setup(struct intel_display *display);
-void intel_hotplug_irq_init(struct drm_i915_private *i915);
+void intel_hotplug_irq_init(struct intel_display *display);
#endif /* __INTEL_HOTPLUG_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hti.c b/drivers/gpu/drm/i915/display/intel_hti.c
index fb6b84f6a81d..dc454420c134 100644
--- a/drivers/gpu/drm/i915/display/intel_hti.c
+++ b/drivers/gpu/drm/i915/display/intel_hti.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include "intel_de.h"
#include "intel_display.h"
diff --git a/drivers/gpu/drm/i915/display/intel_load_detect.c b/drivers/gpu/drm/i915/display/intel_load_detect.c
index 86cc03a4413c..aad52d0d83e1 100644
--- a/drivers/gpu/drm/i915/display/intel_load_detect.c
+++ b/drivers/gpu/drm/i915/display/intel_load_detect.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_print.h>
#include "intel_atomic.h"
#include "intel_crtc.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 59551c8414c2..666148a14522 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -179,7 +179,7 @@ static int lpe_audio_irq_init(struct intel_display *display)
handle_simple_irq,
"hdmi_lpe_audio_irq_handler");
- return irq_set_chip_data(irq, dev_priv);
+ return 0;
}
static bool lpe_audio_detect(struct intel_display *display)
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 63c1afa30b05..f94b7eeae20f 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -27,6 +27,7 @@
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include "i915_reg.h"
#include "i915_utils.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 19f52d1659fa..8ce7c630da52 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -37,9 +37,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
@@ -84,15 +84,15 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct intel_encoder *encoder)
return container_of(encoder, struct intel_lvds_encoder, base);
}
-bool intel_lvds_port_enabled(struct drm_i915_private *i915,
+bool intel_lvds_port_enabled(struct intel_display *display,
i915_reg_t lvds_reg, enum pipe *pipe)
{
u32 val;
- val = intel_de_read(i915, lvds_reg);
+ val = intel_de_read(display, lvds_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(i915))
+ if (HAS_PCH_CPT(display))
*pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK_CPT, val);
else
*pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK, val);
@@ -104,7 +104,6 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
intel_wakeref_t wakeref;
bool ret;
@@ -113,7 +112,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
if (!wakeref)
return false;
- ret = intel_lvds_port_enabled(i915, lvds_encoder->reg, pipe);
+ ret = intel_lvds_port_enabled(display, lvds_encoder->reg, pipe);
intel_display_power_put(display, encoder->power_domain, wakeref);
@@ -123,13 +122,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
u32 tmp, flags = 0;
crtc_state->output_types |= BIT(INTEL_OUTPUT_LVDS);
- tmp = intel_de_read(dev_priv, lvds_encoder->reg);
+ tmp = intel_de_read(display, lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NHSYNC;
else
@@ -141,13 +140,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
crtc_state->hw.adjusted_mode.flags |= flags;
- if (DISPLAY_VER(dev_priv) < 5)
+ if (DISPLAY_VER(display) < 5)
crtc_state->gmch_pfit.lvds_border_bits =
tmp & LVDS_BORDER_ENABLE;
/* gen2/3 store dither state in pfit control, needs to match */
- if (DISPLAY_VER(dev_priv) < 4) {
- tmp = intel_de_read(dev_priv, PFIT_CONTROL(dev_priv));
+ if (DISPLAY_VER(display) < 4) {
+ tmp = intel_de_read(display, PFIT_CONTROL(display));
crtc_state->gmch_pfit.control |= tmp & PFIT_PANEL_8TO6_DITHER_ENABLE;
}
@@ -155,24 +154,24 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock;
}
-static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
+static void intel_lvds_pps_get_hw_state(struct intel_display *display,
struct intel_lvds_pps *pps)
{
u32 val;
- pps->powerdown_on_reset = intel_de_read(dev_priv,
- PP_CONTROL(dev_priv, 0)) & PANEL_POWER_RESET;
+ pps->powerdown_on_reset = intel_de_read(display,
+ PP_CONTROL(display, 0)) & PANEL_POWER_RESET;
- val = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0));
+ val = intel_de_read(display, PP_ON_DELAYS(display, 0));
pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
pps->delays.power_up = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
pps->delays.backlight_on = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
- val = intel_de_read(dev_priv, PP_OFF_DELAYS(dev_priv, 0));
+ val = intel_de_read(display, PP_OFF_DELAYS(display, 0));
pps->delays.power_down = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
pps->delays.backlight_off = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
- val = intel_de_read(dev_priv, PP_DIVISOR(dev_priv, 0));
+ val = intel_de_read(display, PP_DIVISOR(display, 0));
pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val);
/*
@@ -185,12 +184,12 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
/* Convert from 100ms to 100us units */
pps->delays.power_cycle = val * 1000;
- if (DISPLAY_VER(dev_priv) < 5 &&
+ if (DISPLAY_VER(display) < 5 &&
pps->delays.power_up == 0 &&
pps->delays.backlight_on == 0 &&
pps->delays.power_down == 0 &&
pps->delays.backlight_off == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Panel power timings uninitialized, "
"setting defaults\n");
/* Set T2 to 40ms and T5 to 200ms in 100 usec units */
@@ -201,7 +200,7 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->delays.backlight_off = 200 * 10;
}
- drm_dbg(&dev_priv->drm, "LVDS PPS:power_up %d power_down %d power_cycle %d backlight_on %d backlight_off %d "
+ drm_dbg(display->drm, "LVDS PPS:power_up %d power_down %d power_cycle %d backlight_on %d backlight_off %d "
"divider %d port %d powerdown_on_reset %d\n",
pps->delays.power_up, pps->delays.power_down,
pps->delays.power_cycle, pps->delays.backlight_on,
@@ -209,28 +208,28 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->port, pps->powerdown_on_reset);
}
-static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
+static void intel_lvds_pps_init_hw(struct intel_display *display,
struct intel_lvds_pps *pps)
{
u32 val;
- val = intel_de_read(dev_priv, PP_CONTROL(dev_priv, 0));
- drm_WARN_ON(&dev_priv->drm,
+ val = intel_de_read(display, PP_CONTROL(display, 0));
+ drm_WARN_ON(display->drm,
(val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
if (pps->powerdown_on_reset)
val |= PANEL_POWER_RESET;
- intel_de_write(dev_priv, PP_CONTROL(dev_priv, 0), val);
+ intel_de_write(display, PP_CONTROL(display, 0), val);
- intel_de_write(dev_priv, PP_ON_DELAYS(dev_priv, 0),
+ intel_de_write(display, PP_ON_DELAYS(display, 0),
REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->delays.power_up) |
REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->delays.backlight_on));
- intel_de_write(dev_priv, PP_OFF_DELAYS(dev_priv, 0),
+ intel_de_write(display, PP_OFF_DELAYS(display, 0),
REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->delays.power_down) |
REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->delays.backlight_off));
- intel_de_write(dev_priv, PP_DIVISOR(dev_priv, 0),
+ intel_de_write(display, PP_DIVISOR(display, 0),
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
DIV_ROUND_UP(pps->delays.power_cycle, 1000) + 1));
@@ -243,25 +242,24 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
{
struct intel_display *display = to_intel_display(state);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
enum pipe pipe = crtc->pipe;
u32 temp;
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
assert_fdi_rx_pll_disabled(display, pipe);
assert_shared_dpll_disabled(display, crtc_state->shared_dpll);
} else {
assert_pll_disabled(display, pipe);
}
- intel_lvds_pps_init_hw(i915, &lvds_encoder->init_pps);
+ intel_lvds_pps_init_hw(display, &lvds_encoder->init_pps);
temp = lvds_encoder->init_lvds_val;
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(i915)) {
+ if (HAS_PCH_CPT(display)) {
temp &= ~LVDS_PIPE_SEL_MASK_CPT;
temp |= LVDS_PIPE_SEL_CPT(pipe);
} else {
@@ -296,7 +294,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the TRANSCONF reg.
*/
- if (DISPLAY_VER(i915) == 4) {
+ if (DISPLAY_VER(display) == 4) {
/*
* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels.
@@ -312,7 +310,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
- intel_de_write(i915, lvds_encoder->reg, temp);
+ intel_de_write(display, lvds_encoder->reg, temp);
}
/*
@@ -323,16 +321,16 @@ static void intel_enable_lvds(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_rmw(dev_priv, lvds_encoder->reg, 0, LVDS_PORT_EN);
+ intel_de_rmw(display, lvds_encoder->reg, 0, LVDS_PORT_EN);
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, 0), 0, PANEL_POWER_ON);
- intel_de_posting_read(dev_priv, lvds_encoder->reg);
+ intel_de_rmw(display, PP_CONTROL(display, 0), 0, PANEL_POWER_ON);
+ intel_de_posting_read(display, lvds_encoder->reg);
- if (intel_de_wait_for_set(dev_priv, PP_STATUS(dev_priv, 0), PP_ON, 5000))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait_for_set(display, PP_STATUS(display, 0), PP_ON, 5000))
+ drm_err(display->drm,
"timed out waiting for panel to power on\n");
intel_backlight_enable(crtc_state, conn_state);
@@ -343,16 +341,16 @@ static void intel_disable_lvds(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, 0), PANEL_POWER_ON, 0);
- if (intel_de_wait_for_clear(dev_priv, PP_STATUS(dev_priv, 0), PP_ON, 1000))
- drm_err(&dev_priv->drm,
+ intel_de_rmw(display, PP_CONTROL(display, 0), PANEL_POWER_ON, 0);
+ if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_ON, 1000))
+ drm_err(display->drm,
"timed out waiting for panel to power off\n");
- intel_de_rmw(dev_priv, lvds_encoder->reg, LVDS_PORT_EN, 0);
- intel_de_posting_read(dev_priv, lvds_encoder->reg);
+ intel_de_rmw(display, lvds_encoder->reg, LVDS_PORT_EN, 0);
+ intel_de_posting_read(display, lvds_encoder->reg);
}
static void gmch_disable_lvds(struct intel_atomic_state *state,
@@ -384,10 +382,10 @@ static void pch_post_disable_lvds(struct intel_atomic_state *state,
static void intel_lvds_shutdown(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (intel_de_wait_for_clear(dev_priv, PP_STATUS(dev_priv, 0), PP_CYCLE_DELAY_ACTIVE, 5000))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_CYCLE_DELAY_ACTIVE, 5000))
+ drm_err(display->drm,
"timed out waiting for panel power cycle delay\n");
}
@@ -420,7 +418,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct intel_connector *connector = lvds_encoder->attached_connector;
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@@ -429,12 +427,12 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
int ret;
/* Should never happen!! */
- if (DISPLAY_VER(i915) < 4 && crtc->pipe == 0) {
- drm_err(&i915->drm, "Can't support LVDS on pipe A\n");
+ if (DISPLAY_VER(display) < 4 && crtc->pipe == 0) {
+ drm_err(display->drm, "Can't support LVDS on pipe A\n");
return -EINVAL;
}
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
crtc_state->has_pch_encoder = true;
if (!intel_fdi_compute_pipe_bpp(crtc_state))
return -EINVAL;
@@ -447,7 +445,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
/* TODO: Check crtc_state->max_link_bpp_x16 instead of bw_constrained */
if (lvds_bpp != crtc_state->pipe_bpp && !crtc_state->bw_constrained) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"forcing display bpp (was %d) to LVDS (%d)\n",
crtc_state->pipe_bpp, lvds_bpp);
crtc_state->pipe_bpp = lvds_bpp;
@@ -775,11 +773,11 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
{ } /* terminating entry */
};
-struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915)
+struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
if (encoder->type == INTEL_OUTPUT_LVDS)
return encoder;
}
@@ -787,24 +785,24 @@ struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915)
return NULL;
}
-bool intel_is_dual_link_lvds(struct drm_i915_private *i915)
+bool intel_is_dual_link_lvds(struct intel_display *display)
{
- struct intel_encoder *encoder = intel_get_lvds_encoder(i915);
+ struct intel_encoder *encoder = intel_get_lvds_encoder(display);
return encoder && to_lvds_encoder(encoder)->is_dual_link;
}
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
{
- struct drm_i915_private *i915 = to_i915(lvds_encoder->base.base.dev);
+ struct intel_display *display = to_intel_display(&lvds_encoder->base);
struct intel_connector *connector = lvds_encoder->attached_connector;
const struct drm_display_mode *fixed_mode =
intel_panel_preferred_fixed_mode(connector);
unsigned int val;
/* use the module option value if specified */
- if (i915->display.params.lvds_channel_mode > 0)
- return i915->display.params.lvds_channel_mode == 2;
+ if (display->params.lvds_channel_mode > 0)
+ return display->params.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (fixed_mode->clock > 112999)
@@ -819,8 +817,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
*/
- val = intel_de_read(i915, lvds_encoder->reg);
- if (HAS_PCH_CPT(i915))
+ val = intel_de_read(display, lvds_encoder->reg);
+ if (HAS_PCH_CPT(display))
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
else
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
@@ -837,14 +835,13 @@ static void intel_lvds_add_properties(struct drm_connector *connector)
/**
* intel_lvds_init - setup LVDS connectors on this device
- * @i915: i915 device
+ * @display: display device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void intel_lvds_init(struct drm_i915_private *i915)
+void intel_lvds_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_lvds_encoder *lvds_encoder;
struct intel_connector *connector;
const struct drm_edid *drm_edid;
@@ -855,25 +852,25 @@ void intel_lvds_init(struct drm_i915_private *i915)
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
- drm_WARN(&i915->drm, !i915->display.vbt.int_lvds_support,
+ drm_WARN(display->drm, !display->vbt.int_lvds_support,
"Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
}
- if (!i915->display.vbt.int_lvds_support) {
- drm_dbg_kms(&i915->drm,
+ if (!display->vbt.int_lvds_support) {
+ drm_dbg_kms(display->drm,
"Internal LVDS support disabled by VBT\n");
return;
}
- if (HAS_PCH_SPLIT(i915))
+ if (HAS_PCH_SPLIT(display))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
- lvds = intel_de_read(i915, lvds_reg);
+ lvds = intel_de_read(display, lvds_reg);
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
}
@@ -881,11 +878,11 @@ void intel_lvds_init(struct drm_i915_private *i915)
ddc_pin = GMBUS_PIN_PANEL;
if (!intel_bios_is_lvds_present(display, &ddc_pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"LVDS is not present in VBT\n");
return;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"LVDS is not present in VBT, but enabled anyway\n");
}
@@ -902,18 +899,18 @@ void intel_lvds_init(struct drm_i915_private *i915)
lvds_encoder->attached_connector = connector;
encoder = &lvds_encoder->base;
- drm_connector_init_with_ddc(&i915->drm, &connector->base,
+ drm_connector_init_with_ddc(display->drm, &connector->base,
&intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS,
intel_gmbus_get_adapter(display, ddc_pin));
- drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS, "LVDS");
encoder->enable = intel_enable_lvds;
encoder->pre_enable = intel_pre_enable_lvds;
encoder->compute_config = intel_lvds_compute_config;
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
encoder->disable = pch_disable_lvds;
encoder->post_disable = pch_post_disable_lvds;
} else {
@@ -931,7 +928,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
encoder->port = PORT_NONE;
encoder->cloneable = 0;
- if (DISPLAY_VER(i915) < 4)
+ if (DISPLAY_VER(display) < 4)
encoder->pipe_mask = BIT(PIPE_B);
else
encoder->pipe_mask = ~0;
@@ -943,7 +940,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
intel_lvds_add_properties(&connector->base);
- intel_lvds_pps_get_hw_state(i915, &lvds_encoder->init_pps);
+ intel_lvds_pps_get_hw_state(display, &lvds_encoder->init_pps);
lvds_encoder->init_lvds_val = lvds;
/*
@@ -958,7 +955,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- mutex_lock(&i915->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC)
drm_edid = drm_edid_read_switcheroo(&connector->base, connector->base.ddc);
else
@@ -991,7 +988,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
if (!intel_panel_preferred_fixed_mode(connector))
intel_panel_add_encoder_fixed_mode(connector, encoder);
- mutex_unlock(&i915->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
/* If we still don't have a mode after all that, give up. */
if (!intel_panel_preferred_fixed_mode(connector))
@@ -1002,7 +999,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
intel_backlight_setup(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
- drm_dbg_kms(&i915->drm, "detected %s-link lvds configuration\n",
+ drm_dbg_kms(display->drm, "detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
@@ -1010,7 +1007,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
return;
failed:
- drm_dbg_kms(&i915->drm, "No LVDS modes found, disabling.\n");
+ drm_dbg_kms(display->drm, "No LVDS modes found, disabling.\n");
drm_connector_cleanup(&connector->base);
drm_encoder_cleanup(&encoder->base);
kfree(lvds_encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.h b/drivers/gpu/drm/i915/display/intel_lvds.h
index 7ad5fa9c0434..a6db1706a97c 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.h
+++ b/drivers/gpu/drm/i915/display/intel_lvds.h
@@ -11,28 +11,28 @@
#include "i915_reg_defs.h"
enum pipe;
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_lvds_port_enabled(struct intel_display *display,
i915_reg_t lvds_reg, enum pipe *pipe);
-void intel_lvds_init(struct drm_i915_private *dev_priv);
-struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv);
-bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv);
+void intel_lvds_init(struct intel_display *display);
+struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display);
+bool intel_is_dual_link_lvds(struct intel_display *display);
#else
-static inline bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+static inline bool intel_lvds_port_enabled(struct intel_display *display,
i915_reg_t lvds_reg, enum pipe *pipe)
{
return false;
}
-static inline void intel_lvds_init(struct drm_i915_private *dev_priv)
+static inline void intel_lvds_init(struct intel_display *display)
{
}
-static inline struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
+static inline struct intel_encoder *intel_get_lvds_encoder(struct intel_display *display)
{
return NULL;
}
-static inline bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
+static inline bool intel_is_dual_link_lvds(struct intel_display *display)
{
return false;
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 312b21b1ab59..0325b0c9506d 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -6,11 +6,11 @@
* state.
*/
-#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
@@ -31,13 +31,14 @@
#include "intel_pmdemand.h"
#include "intel_tc.h"
#include "intel_vblank.h"
+#include "intel_vga.h"
#include "intel_wm.h"
#include "skl_watermark.h"
static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
@@ -48,7 +49,7 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
if (!crtc_state->hw.active)
return;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -56,9 +57,9 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
intel_plane_disable_noatomic(crtc, plane);
}
- state = drm_atomic_state_alloc(&i915->drm);
+ state = drm_atomic_state_alloc(display->drm);
if (!state) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"failed to disable [CRTC:%d:%s], out of memory",
crtc->base.base.id, crtc->base.name);
return;
@@ -68,7 +69,7 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
to_intel_atomic_state(state)->internal = true;
/* Everything's already locked, -EDEADLK can't happen. */
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc,
BIT(pipe) |
intel_crtc_joiner_secondary_pipes(crtc_state)) {
struct intel_crtc_state *temp_crtc_state =
@@ -77,14 +78,14 @@ static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc,
ret = drm_atomic_add_affected_connectors(state, &temp_crtc->base);
- drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret);
+ drm_WARN_ON(display->drm, IS_ERR(temp_crtc_state) || ret);
}
- i915->display.funcs.display->crtc_disable(to_intel_atomic_state(state), crtc);
+ display->funcs.display->crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
crtc->base.base.id, crtc->base.name);
@@ -118,13 +119,12 @@ static void set_encoder_for_connector(struct intel_connector *connector,
static void reset_encoder_connector_state(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->base.encoder != &encoder->base)
continue;
@@ -143,10 +143,10 @@ static void reset_encoder_connector_state(struct intel_encoder *encoder)
static void reset_crtc_encoder_state(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_encoder *encoder;
- for_each_encoder_on_crtc(&i915->drm, &crtc->base, encoder) {
+ for_each_encoder_on_crtc(display->drm, &crtc->base, encoder) {
reset_encoder_connector_state(encoder);
encoder->base.crtc = NULL;
}
@@ -155,9 +155,8 @@ static void reset_crtc_encoder_state(struct intel_crtc *crtc)
static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
@@ -169,7 +168,7 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
reset_crtc_encoder_state(crtc);
intel_fbc_disable(crtc);
- intel_update_watermarks(i915);
+ intel_update_watermarks(display);
intel_display_power_put_all_in_set(display, &crtc->enabled_power_domains);
@@ -184,13 +183,13 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
* Return all the pipes using a transcoder in @transcoder_mask.
* For joiner configs return only the joiner primary.
*/
-static u8 get_transcoder_pipes(struct drm_i915_private *i915,
+static u8 get_transcoder_pipes(struct intel_display *display,
u8 transcoder_mask)
{
struct intel_crtc *temp_crtc;
u8 pipes = 0;
- for_each_intel_crtc(&i915->drm, temp_crtc) {
+ for_each_intel_crtc(display->drm, temp_crtc) {
struct intel_crtc_state *temp_crtc_state =
to_intel_crtc_state(temp_crtc->base.state);
@@ -215,7 +214,6 @@ static void get_portsync_pipes(struct intel_crtc *crtc,
u8 *master_pipe_mask, u8 *slave_pipes_mask)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_crtc *master_crtc;
@@ -234,20 +232,20 @@ static void get_portsync_pipes(struct intel_crtc *crtc,
else
master_transcoder = crtc_state->master_transcoder;
- *master_pipe_mask = get_transcoder_pipes(i915, BIT(master_transcoder));
- drm_WARN_ON(&i915->drm, !is_power_of_2(*master_pipe_mask));
+ *master_pipe_mask = get_transcoder_pipes(display, BIT(master_transcoder));
+ drm_WARN_ON(display->drm, !is_power_of_2(*master_pipe_mask));
master_crtc = intel_crtc_for_pipe(display, ffs(*master_pipe_mask) - 1);
master_crtc_state = to_intel_crtc_state(master_crtc->base.state);
- *slave_pipes_mask = get_transcoder_pipes(i915, master_crtc_state->sync_mode_slaves_mask);
+ *slave_pipes_mask = get_transcoder_pipes(display, master_crtc_state->sync_mode_slaves_mask);
}
-static u8 get_joiner_secondary_pipes(struct drm_i915_private *i915, u8 primary_pipes_mask)
+static u8 get_joiner_secondary_pipes(struct intel_display *display, u8 primary_pipes_mask)
{
struct intel_crtc *primary_crtc;
u8 pipes = 0;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, primary_crtc, primary_pipes_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, primary_crtc, primary_pipes_mask) {
struct intel_crtc_state *primary_crtc_state =
to_intel_crtc_state(primary_crtc->base.state);
@@ -260,45 +258,45 @@ static u8 get_joiner_secondary_pipes(struct drm_i915_private *i915, u8 primary_p
static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_crtc *temp_crtc;
u8 portsync_master_mask;
u8 portsync_slaves_mask;
u8 joiner_secondaries_mask;
- struct intel_crtc *temp_crtc;
/* TODO: Add support for MST */
get_portsync_pipes(crtc, &portsync_master_mask, &portsync_slaves_mask);
- joiner_secondaries_mask = get_joiner_secondary_pipes(i915,
+ joiner_secondaries_mask = get_joiner_secondary_pipes(display,
portsync_master_mask |
portsync_slaves_mask);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
portsync_master_mask & portsync_slaves_mask ||
portsync_master_mask & joiner_secondaries_mask ||
portsync_slaves_mask & joiner_secondaries_mask);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, joiner_secondaries_mask)
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc, joiner_secondaries_mask)
intel_crtc_disable_noatomic_begin(temp_crtc, ctx);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, portsync_slaves_mask)
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc, portsync_slaves_mask)
intel_crtc_disable_noatomic_begin(temp_crtc, ctx);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, portsync_master_mask)
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc, portsync_master_mask)
intel_crtc_disable_noatomic_begin(temp_crtc, ctx);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, temp_crtc,
joiner_secondaries_mask |
portsync_slaves_mask |
portsync_master_mask)
intel_crtc_disable_noatomic_complete(temp_crtc);
}
-static void intel_modeset_update_connector_atomic_state(struct drm_i915_private *i915)
+static void intel_modeset_update_connector_atomic_state(struct intel_display *display)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_connector_state *conn_state = connector->base.state;
struct intel_encoder *encoder =
@@ -320,7 +318,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_i915_private
static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (intel_crtc_is_joiner_secondary(crtc_state))
return;
@@ -333,7 +331,7 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
- if (DISPLAY_INFO(i915)->color.degamma_lut_size) {
+ if (DISPLAY_INFO(display)->color.degamma_lut_size) {
/* assume 1:1 mapping */
drm_property_replace_blob(&crtc_state->hw.degamma_lut,
crtc_state->pre_csc_lut);
@@ -348,7 +346,7 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
* to gamma_lut as that is the only valid source of LUTs
* in the uapi.
*/
- drm_WARN_ON(&i915->drm, crtc_state->post_csc_lut &&
+ drm_WARN_ON(display->drm, crtc_state->post_csc_lut &&
crtc_state->pre_csc_lut);
drm_property_replace_blob(&crtc_state->hw.degamma_lut,
@@ -367,15 +365,14 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
}
static void
-intel_sanitize_plane_mapping(struct drm_i915_private *i915)
+intel_sanitize_plane_mapping(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_crtc *crtc;
- if (DISPLAY_VER(i915) >= 4)
+ if (DISPLAY_VER(display) >= 4)
return;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
struct intel_crtc *plane_crtc;
@@ -387,7 +384,7 @@ intel_sanitize_plane_mapping(struct drm_i915_private *i915)
if (pipe == crtc->pipe)
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
plane->base.base.id, plane->base.name);
@@ -424,12 +421,12 @@ static bool intel_crtc_needs_link_reset(struct intel_crtc *crtc)
static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
struct intel_connector *found_connector = NULL;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (&encoder->base == connector->base.encoder) {
found_connector = connector;
@@ -467,7 +464,7 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state
static bool intel_sanitize_crtc(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
bool needs_link_reset;
@@ -475,7 +472,7 @@ static bool intel_sanitize_crtc(struct intel_crtc *crtc,
struct intel_plane *plane;
/* Disable everything but the primary plane */
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -516,7 +513,7 @@ static bool intel_sanitize_crtc(struct intel_crtc *crtc,
return true;
}
-static void intel_sanitize_all_crtcs(struct drm_i915_private *i915,
+static void intel_sanitize_all_crtcs(struct intel_display *display,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_crtc *crtc;
@@ -531,7 +528,7 @@ static void intel_sanitize_all_crtcs(struct drm_i915_private *i915,
for (;;) {
u32 old_mask = crtcs_forced_off;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
u32 crtc_mask = drm_crtc_mask(&crtc->base);
if (crtcs_forced_off & crtc_mask)
@@ -544,7 +541,7 @@ static void intel_sanitize_all_crtcs(struct drm_i915_private *i915,
break;
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -554,7 +551,7 @@ static void intel_sanitize_all_crtcs(struct drm_i915_private *i915,
static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/*
* Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
@@ -566,7 +563,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
* without several WARNs, but for now let's take the easy
* road.
*/
- return IS_SANDYBRIDGE(i915) &&
+ return display->platform.sandybridge &&
crtc_state->hw.active &&
crtc_state->shared_dpll &&
crtc_state->port_clock == 0;
@@ -575,13 +572,12 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_connector *connector;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_crtc_state *crtc_state = crtc ?
to_intel_crtc_state(crtc->base.state) : NULL;
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
/*
* We need to check both for a crtc link (meaning that the encoder is
@@ -592,7 +588,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
crtc_state->hw.active;
if (crtc_state && has_bogus_dpll_config(crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"BIOS has misprogrammed the hardware. Disabling pipe %c\n",
pipe_name(crtc->pipe));
has_active_crtc = false;
@@ -600,7 +596,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
connector = intel_encoder_find_connector(encoder);
if (connector && !has_active_crtc) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] has active connectors but no active pipe!\n",
encoder->base.base.id,
encoder->base.name);
@@ -617,7 +613,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
if (crtc_state) {
struct drm_encoder *best_encoder;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] manually disabled\n",
encoder->base.base.id,
encoder->base.name);
@@ -651,18 +647,17 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
/* notify opregion of the sanitized encoder state */
intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
- if (HAS_DDI(i915))
+ if (HAS_DDI(display))
intel_ddi_sanitize_encoder_pll_mapping(encoder);
}
/* FIXME read out full plane state for all planes */
-static void readout_plane_state(struct drm_i915_private *i915)
+static void readout_plane_state(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
- for_each_intel_plane(&i915->drm, plane) {
+ for_each_intel_plane(display->drm, plane) {
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
struct intel_crtc_state *crtc_state;
@@ -676,13 +671,13 @@ static void readout_plane_state(struct drm_i915_private *i915)
intel_set_plane_visible(crtc_state, plane_state, visible);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
plane->base.base.id, plane->base.name,
str_enabled_disabled(visible), pipe_name(pipe));
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -690,18 +685,17 @@ static void readout_plane_state(struct drm_i915_private *i915)
}
}
-static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
+static void intel_modeset_readout_hw_state(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_pmdemand_state *pmdemand_state =
- to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
+ to_intel_pmdemand_state(display->pmdemand.obj.state);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -716,15 +710,15 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
crtc->base.enabled = crtc_state->hw.enable;
crtc->active = crtc_state->hw.active;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
str_enabled_disabled(crtc_state->hw.active));
}
- readout_plane_state(i915);
+ readout_plane_state(display);
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_crtc_state *crtc_state = NULL;
pipe = 0;
@@ -743,7 +737,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
/* encoder should read be linked to joiner primary */
WARN_ON(intel_crtc_is_joiner_secondary(crtc_state));
- for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc,
+ for_each_intel_crtc_in_pipe_mask(display->drm, secondary_crtc,
intel_crtc_joiner_secondary_pipes(crtc_state)) {
struct intel_crtc_state *secondary_crtc_state;
@@ -766,7 +760,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
if (encoder->sync_state)
encoder->sync_state(encoder, crtc_state);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
encoder->base.base.id, encoder->base.name,
str_enabled_disabled(encoder->base.crtc),
@@ -775,7 +769,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
intel_dpll_readout_hw_state(display);
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
struct intel_crtc_state *crtc_state = NULL;
@@ -809,37 +803,37 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
if (connector->sync_state)
connector->sync_state(connector, crtc_state);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] hw state readout: %s\n",
connector->base.base.id, connector->base.name,
str_enabled_disabled(connector->base.encoder));
}
drm_connector_list_iter_end(&conn_iter);
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
- if (crtc_state->hw.active) {
- /*
- * The initial mode needs to be set in order to keep
- * the atomic core happy. It wants a valid mode if the
- * crtc's enabled, so we do the above call.
- *
- * But we don't set all the derived state fully, hence
- * set a flag to indicate that a full recalculation is
- * needed on the next commit.
- */
- crtc_state->inherited = true;
+ /*
+ * The initial mode needs to be set in order to keep
+ * the atomic core happy. It wants a valid mode if the
+ * crtc's enabled, so we do the above call.
+ *
+ * But we don't set all the derived state fully, hence
+ * set a flag to indicate that a full recalculation is
+ * needed on the next commit.
+ */
+ crtc_state->inherited = true;
+ if (crtc_state->hw.active) {
intel_crtc_update_active_timings(crtc_state,
crtc_state->vrr.enable);
intel_crtc_copy_hw_to_uapi_state(crtc_state);
}
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -855,14 +849,14 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
* use plane->min_cdclk() :(
*/
if (plane_state->uapi.visible && plane->min_cdclk) {
- if (crtc_state->double_wide || DISPLAY_VER(i915) >= 10)
+ if (crtc_state->double_wide || DISPLAY_VER(display) >= 10)
crtc_state->min_cdclk[plane->id] =
DIV_ROUND_UP(crtc_state->pixel_rate, 2);
else
crtc_state->min_cdclk[plane->id] =
crtc_state->pixel_rate;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] min_cdclk %d kHz\n",
plane->base.base.id, plane->base.name,
crtc_state->min_cdclk[plane->id]);
@@ -874,7 +868,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
/* TODO move here (or even earlier?) on all platforms */
if (DISPLAY_VER(display) >= 9)
- intel_wm_get_hw_state(i915);
+ intel_wm_get_hw_state(display);
intel_bw_update_hw_state(display);
intel_cdclk_update_hw_state(display);
@@ -883,11 +877,11 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
}
static void
-get_encoder_power_domains(struct drm_i915_private *i915)
+get_encoder_power_domains(struct intel_display *display)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_crtc_state *crtc_state;
if (!encoder->get_power_domains)
@@ -905,49 +899,51 @@ get_encoder_power_domains(struct drm_i915_private *i915)
}
}
-static void intel_early_display_was(struct drm_i915_private *i915)
+static void intel_early_display_was(struct intel_display *display)
{
/*
* Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
* Also known as Wa_14010480278.
*/
- if (IS_DISPLAY_VER(i915, 10, 12))
- intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS);
+ if (IS_DISPLAY_VER(display, 10, 12))
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS);
/*
* WaRsPkgCStateDisplayPMReq:hsw
* System hang if this isn't done before disabling all planes!
*/
- if (IS_HASWELL(i915))
- intel_de_rmw(i915, CHICKEN_PAR1_1, 0, FORCE_ARB_IDLE_PLANES);
+ if (display->platform.haswell)
+ intel_de_rmw(display, CHICKEN_PAR1_1, 0, FORCE_ARB_IDLE_PLANES);
- if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
+ if (display->platform.kabylake || display->platform.coffeelake ||
+ display->platform.cometlake) {
/* Display WA #1142:kbl,cfl,cml */
- intel_de_rmw(i915, CHICKEN_PAR1_1,
+ intel_de_rmw(display, CHICKEN_PAR1_1,
KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
- intel_de_rmw(i915, CHICKEN_MISC_2,
+ intel_de_rmw(display, CHICKEN_MISC_2,
KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
KBL_ARB_FILL_SPARE_14);
}
}
-void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
+void intel_modeset_setup_hw_state(struct intel_display *display,
struct drm_modeset_acquire_ctx *ctx)
{
- struct intel_display *display = &i915->display;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
wakeref = intel_display_power_get(display, POWER_DOMAIN_INIT);
- intel_early_display_was(i915);
- intel_modeset_readout_hw_state(i915);
+ intel_early_display_was(display);
+ intel_vga_disable(display);
+
+ intel_modeset_readout_hw_state(display);
/* HW state is read out, now we need to sanitize this mess. */
- get_encoder_power_domains(i915);
+ get_encoder_power_domains(display);
- intel_pch_sanitize(i915);
+ intel_pch_sanitize(display);
intel_cmtg_sanitize(display);
@@ -955,7 +951,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
* intel_sanitize_plane_mapping() may need to do vblank
* waits, so we need vblank interrupts restored beforehand.
*/
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -969,35 +965,35 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
}
}
- intel_fbc_sanitize(&i915->display);
+ intel_fbc_sanitize(display);
- intel_sanitize_plane_mapping(i915);
+ intel_sanitize_plane_mapping(display);
- for_each_intel_encoder(&i915->drm, encoder)
+ for_each_intel_encoder(display->drm, encoder)
intel_sanitize_encoder(encoder);
/*
* Sanitizing CRTCs needs their connector atomic state to be
* up-to-date, so ensure that already here.
*/
- intel_modeset_update_connector_atomic_state(i915);
+ intel_modeset_update_connector_atomic_state(display);
- intel_sanitize_all_crtcs(i915, ctx);
+ intel_sanitize_all_crtcs(display, ctx);
intel_dpll_sanitize_state(display);
/* TODO move earlier on all platforms */
if (DISPLAY_VER(display) < 9)
- intel_wm_get_hw_state(i915);
- intel_wm_sanitize(i915);
+ intel_wm_get_hw_state(display);
+ intel_wm_sanitize(display);
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_power_domain_mask put_domains;
intel_modeset_get_crtc_power_domains(crtc_state, &put_domains);
- if (drm_WARN_ON(&i915->drm, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
+ if (drm_WARN_ON(display->drm, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
intel_modeset_put_crtc_power_domains(crtc, &put_domains);
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.h b/drivers/gpu/drm/i915/display/intel_modeset_setup.h
index 3beff67b33d0..f5e6f3ae9572 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.h
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.h
@@ -6,10 +6,10 @@
#ifndef __INTEL_MODESET_SETUP_H__
#define __INTEL_MODESET_SETUP_H__
-struct drm_i915_private;
struct drm_modeset_acquire_ctx;
+struct intel_display;
-void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
+void intel_modeset_setup_hw_state(struct intel_display *display,
struct drm_modeset_acquire_ctx *ctx);
#endif /* __INTEL_MODESET_SETUP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index a008412fdd04..766a9983665a 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -6,13 +6,14 @@
*/
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
#include "intel_cx0_phy.h"
#include "intel_display.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_modeset_verify.h"
@@ -28,9 +29,8 @@ static void intel_connector_verify_state(const struct intel_crtc_state *crtc_sta
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.base.id, connector->base.name);
if (connector->get_hw_state(connector)) {
@@ -91,7 +91,6 @@ verify_connector_state(struct intel_atomic_state *state,
static void intel_pipe_config_sanity_check(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
if (crtc_state->has_pch_encoder) {
int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(display, crtc_state),
@@ -103,7 +102,7 @@ static void intel_pipe_config_sanity_check(const struct intel_crtc_state *crtc_s
* Yell if the encoder disagrees. Allow for slight
* rounding differences.
*/
- drm_WARN(&i915->drm, abs(fdi_dotclock - dotclock) > 1,
+ drm_WARN(display->drm, abs(fdi_dotclock - dotclock) > 1,
"FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
fdi_dotclock, dotclock);
}
@@ -113,17 +112,16 @@ static void
verify_encoder_state(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_encoder *encoder;
struct drm_connector *connector;
const struct drm_connector_state *old_conn_state, *new_conn_state;
int i;
- for_each_intel_encoder(&i915->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
bool enabled = false, found = false;
enum pipe pipe;
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s]\n",
encoder->base.base.id,
encoder->base.name);
@@ -166,7 +164,6 @@ verify_crtc_state(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_crtc_state *sw_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc_state *hw_crtc_state;
@@ -185,7 +182,7 @@ verify_crtc_state(struct intel_atomic_state *state,
intel_crtc_get_pipe_config(hw_crtc_state);
/* we keep both pipes enabled on 830 */
- if (IS_I830(i915) && hw_crtc_state->hw.active)
+ if (display->platform.i830 && hw_crtc_state->hw.active)
hw_crtc_state->hw.active = sw_crtc_state->hw.active;
INTEL_DISPLAY_STATE_WARN(display,
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index aff9a3455c1b..12308495afa5 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -291,7 +291,6 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
struct i915_vma *vma)
{
struct intel_display *display = overlay->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe = overlay->crtc->pipe;
struct intel_frontbuffer *frontbuffer = NULL;
@@ -307,7 +306,7 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
intel_frontbuffer_put(overlay->frontbuffer);
overlay->frontbuffer = frontbuffer;
- intel_frontbuffer_flip_prepare(i915, INTEL_FRONTBUFFER_OVERLAY(pipe));
+ intel_frontbuffer_flip_prepare(display, INTEL_FRONTBUFFER_OVERLAY(pipe));
overlay->old_vma = overlay->vma;
if (vma)
@@ -359,14 +358,13 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
{
struct intel_display *display = overlay->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_vma *vma;
vma = fetch_and_zero(&overlay->old_vma);
if (drm_WARN_ON(display->drm, !vma))
return;
- intel_frontbuffer_flip_complete(i915, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+ intel_frontbuffer_flip_complete(display, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
i915_vma_unpin(vma);
i915_vma_put(vma);
diff --git a/drivers/gpu/drm/i915/display/intel_pch.c b/drivers/gpu/drm/i915/display/intel_pch.c
new file mode 100644
index 000000000000..469e8a3cfb49
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pch.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Intel Corporation.
+ */
+
+#include <drm/drm_print.h>
+
+#include "i915_utils.h"
+#include "intel_display_core.h"
+#include "intel_pch.h"
+
+#define INTEL_PCH_DEVICE_ID_MASK 0xff80
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
+#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
+#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
+#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
+#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
+#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
+#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
+#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
+#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
+#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
+#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880
+#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
+#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
+#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
+#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
+#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
+#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
+#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
+#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
+#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
+#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
+#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
+
+/*
+ * Check for platforms where the south display is on the same PCI device or SoC
+ * die as the north display. The PCH (if it even exists) is not involved in
+ * display. Return a fake PCH type for south display handling on these
+ * platforms, without actually detecting the PCH, and PCH_NONE otherwise.
+ */
+static enum intel_pch intel_pch_fake_for_south_display(struct intel_display *display)
+{
+ enum intel_pch pch_type = PCH_NONE;
+
+ if (DISPLAY_VER(display) >= 20)
+ pch_type = PCH_LNL;
+ else if (display->platform.battlemage || display->platform.meteorlake)
+ pch_type = PCH_MTL;
+ else if (display->platform.dg2)
+ pch_type = PCH_DG2;
+ else if (display->platform.dg1)
+ pch_type = PCH_DG1;
+
+ return pch_type;
+}
+
+/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
+static enum intel_pch
+intel_pch_type(const struct intel_display *display, unsigned short id)
+{
+ switch (id) {
+ case INTEL_PCH_IBX_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Ibex Peak PCH\n");
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) != 5);
+ return PCH_IBX;
+ case INTEL_PCH_CPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found CougarPoint PCH\n");
+ drm_WARN_ON(display->drm,
+ DISPLAY_VER(display) != 6 &&
+ !display->platform.ivybridge);
+ return PCH_CPT;
+ case INTEL_PCH_PPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found PantherPoint PCH\n");
+ drm_WARN_ON(display->drm,
+ DISPLAY_VER(display) != 6 &&
+ !display->platform.ivybridge);
+ /* PPT is CPT compatible */
+ return PCH_CPT;
+ case INTEL_PCH_LPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found LynxPoint PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell &&
+ !display->platform.broadwell);
+ drm_WARN_ON(display->drm,
+ display->platform.haswell_ult ||
+ display->platform.broadwell_ult);
+ return PCH_LPT_H;
+ case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found LynxPoint LP PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell &&
+ !display->platform.broadwell);
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell_ult &&
+ !display->platform.broadwell_ult);
+ return PCH_LPT_LP;
+ case INTEL_PCH_WPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found WildcatPoint PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell &&
+ !display->platform.broadwell);
+ drm_WARN_ON(display->drm,
+ display->platform.haswell_ult ||
+ display->platform.broadwell_ult);
+ /* WPT is LPT compatible */
+ return PCH_LPT_H;
+ case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found WildcatPoint LP PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell &&
+ !display->platform.broadwell);
+ drm_WARN_ON(display->drm,
+ !display->platform.haswell_ult &&
+ !display->platform.broadwell_ult);
+ /* WPT is LPT compatible */
+ return PCH_LPT_LP;
+ case INTEL_PCH_SPT_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found SunrisePoint PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.skylake &&
+ !display->platform.kabylake &&
+ !display->platform.coffeelake);
+ return PCH_SPT;
+ case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found SunrisePoint LP PCH\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.skylake &&
+ !display->platform.kabylake &&
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ return PCH_SPT;
+ case INTEL_PCH_KBP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Kaby Lake PCH (KBP)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.skylake &&
+ !display->platform.kabylake &&
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ /* KBP is SPT compatible */
+ return PCH_SPT;
+ case INTEL_PCH_CNP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Cannon Lake PCH (CNP)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ return PCH_CNP;
+ case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm,
+ "Found Cannon Lake LP PCH (CNP-LP)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ return PCH_CNP;
+ case INTEL_PCH_CMP_DEVICE_ID_TYPE:
+ case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Comet Lake PCH (CMP)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.coffeelake &&
+ !display->platform.cometlake &&
+ !display->platform.rocketlake);
+ /* CMP is CNP compatible */
+ return PCH_CNP;
+ case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Comet Lake V PCH (CMP-V)\n");
+ drm_WARN_ON(display->drm,
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ /* CMP-V is based on KBP, which is SPT compatible */
+ return PCH_SPT;
+ case INTEL_PCH_ICP_DEVICE_ID_TYPE:
+ case INTEL_PCH_ICP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Ice Lake PCH\n");
+ drm_WARN_ON(display->drm, !display->platform.icelake);
+ return PCH_ICP;
+ case INTEL_PCH_MCC_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Mule Creek Canyon PCH\n");
+ drm_WARN_ON(display->drm, !(display->platform.jasperlake ||
+ display->platform.elkhartlake));
+ /* MCC is TGP compatible */
+ return PCH_TGP;
+ case INTEL_PCH_TGP_DEVICE_ID_TYPE:
+ case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Tiger Lake LP PCH\n");
+ drm_WARN_ON(display->drm, !display->platform.tigerlake &&
+ !display->platform.rocketlake &&
+ !display->platform.skylake &&
+ !display->platform.kabylake &&
+ !display->platform.coffeelake &&
+ !display->platform.cometlake);
+ return PCH_TGP;
+ case INTEL_PCH_JSP_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Jasper Lake PCH\n");
+ drm_WARN_ON(display->drm, !(display->platform.jasperlake ||
+ display->platform.elkhartlake));
+ /* JSP is ICP compatible */
+ return PCH_ICP;
+ case INTEL_PCH_ADP_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP2_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP3_DEVICE_ID_TYPE:
+ case INTEL_PCH_ADP4_DEVICE_ID_TYPE:
+ drm_dbg_kms(display->drm, "Found Alder Lake PCH\n");
+ drm_WARN_ON(display->drm, !display->platform.alderlake_s &&
+ !display->platform.alderlake_p);
+ return PCH_ADP;
+ default:
+ return PCH_NONE;
+ }
+}
+
+static bool intel_is_virt_pch(unsigned short id,
+ unsigned short svendor, unsigned short sdevice)
+{
+ return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
+ id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
+ (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
+ svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
+ sdevice == PCI_SUBDEVICE_ID_QEMU));
+}
+
+static void
+intel_virt_detect_pch(const struct intel_display *display,
+ unsigned short *pch_id, enum intel_pch *pch_type)
+{
+ unsigned short id = 0;
+
+ /*
+ * In a virtualized passthrough environment we can be in a
+ * setup where the ISA bridge is not able to be passed through.
+ * In this case, a south bridge can be emulated and we have to
+ * make an educated guess as to which PCH is really there.
+ */
+
+ if (display->platform.alderlake_s || display->platform.alderlake_p)
+ id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
+ else if (display->platform.tigerlake || display->platform.rocketlake)
+ id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
+ id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
+ else if (display->platform.icelake)
+ id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
+ else if (display->platform.coffeelake ||
+ display->platform.cometlake)
+ id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+ else if (display->platform.kabylake || display->platform.skylake)
+ id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
+ else if (display->platform.haswell_ult ||
+ display->platform.broadwell_ult)
+ id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+ else if (display->platform.haswell || display->platform.broadwell)
+ id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
+ else if (DISPLAY_VER(display) == 6 || display->platform.ivybridge)
+ id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+ else if (DISPLAY_VER(display) == 5)
+ id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
+
+ if (id)
+ drm_dbg_kms(display->drm, "Assuming PCH ID %04x\n", id);
+ else
+ drm_dbg_kms(display->drm, "Assuming no PCH\n");
+
+ *pch_type = intel_pch_type(display, id);
+
+ /* Sanity check virtual PCH id */
+ if (drm_WARN_ON(display->drm,
+ id && *pch_type == PCH_NONE))
+ id = 0;
+
+ *pch_id = id;
+}
+
+void intel_pch_detect(struct intel_display *display)
+{
+ struct pci_dev *pch = NULL;
+ unsigned short id;
+ enum intel_pch pch_type;
+
+ pch_type = intel_pch_fake_for_south_display(display);
+ if (pch_type != PCH_NONE) {
+ display->pch_type = pch_type;
+ drm_dbg_kms(display->drm,
+ "PCH not involved in display, using fake PCH type %d for south display\n",
+ pch_type);
+ return;
+ }
+
+ /*
+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+ * make graphics device passthrough work easy for VMM, that only
+ * need to expose ISA bridge to let driver know the real hardware
+ * underneath. This is a requirement from virtualization team.
+ *
+ * In some virtualized environments (e.g. XEN), there is irrelevant
+ * ISA bridge in the system. To work reliably, we should scan through
+ * all the ISA bridge devices and check for the first match, instead
+ * of only checking the first one.
+ */
+ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
+ if (pch->vendor != PCI_VENDOR_ID_INTEL)
+ continue;
+
+ id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+
+ pch_type = intel_pch_type(display, id);
+ if (pch_type != PCH_NONE) {
+ display->pch_type = pch_type;
+ break;
+ } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
+ pch->subsystem_device)) {
+ intel_virt_detect_pch(display, &id, &pch_type);
+ display->pch_type = pch_type;
+ break;
+ }
+ }
+
+ /*
+ * Use PCH_NOP (PCH but no South Display) for PCH platforms without
+ * display.
+ */
+ if (pch && !HAS_DISPLAY(display)) {
+ drm_dbg_kms(display->drm,
+ "Display disabled, reverting to NOP PCH\n");
+ display->pch_type = PCH_NOP;
+ } else if (!pch) {
+ if (i915_run_as_guest() && HAS_DISPLAY(display)) {
+ intel_virt_detect_pch(display, &id, &pch_type);
+ display->pch_type = pch_type;
+ } else {
+ drm_dbg_kms(display->drm, "No PCH found.\n");
+ }
+ }
+
+ pci_dev_put(pch);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pch.h b/drivers/gpu/drm/i915/display/intel_pch.h
new file mode 100644
index 000000000000..cf4dab1b98bf
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pch.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Intel Corporation.
+ */
+
+#ifndef __INTEL_PCH__
+#define __INTEL_PCH__
+
+#include "intel_display_conversion.h"
+
+struct intel_display;
+
+/*
+ * Sorted by south display engine compatibility.
+ * If the new PCH comes with a south display engine that is not
+ * inherited from the latest item, please do not add it to the
+ * end. Instead, add it right after its "parent" PCH.
+ */
+enum intel_pch {
+ PCH_NOP = -1, /* PCH without south display */
+ PCH_NONE = 0, /* No PCH present */
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
+ PCH_LPT_H, /* Lynxpoint/Wildcatpoint H PCH */
+ PCH_LPT_LP, /* Lynxpoint/Wildcatpoint LP PCH */
+ PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
+ PCH_CNP, /* Cannon/Comet Lake PCH */
+ PCH_ICP, /* Ice Lake/Jasper Lake PCH */
+ PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
+ PCH_ADP, /* Alder Lake PCH */
+
+ /* Fake PCHs, functionality handled on the same PCI dev */
+ PCH_DG1 = 1024,
+ PCH_DG2,
+ PCH_MTL,
+ PCH_LNL,
+};
+
+#define INTEL_PCH_TYPE(_display) (__to_intel_display(_display)->pch_type)
+#define HAS_PCH_DG2(display) (INTEL_PCH_TYPE(display) == PCH_DG2)
+#define HAS_PCH_ADP(display) (INTEL_PCH_TYPE(display) == PCH_ADP)
+#define HAS_PCH_DG1(display) (INTEL_PCH_TYPE(display) == PCH_DG1)
+#define HAS_PCH_TGP(display) (INTEL_PCH_TYPE(display) == PCH_TGP)
+#define HAS_PCH_ICP(display) (INTEL_PCH_TYPE(display) == PCH_ICP)
+#define HAS_PCH_CNP(display) (INTEL_PCH_TYPE(display) == PCH_CNP)
+#define HAS_PCH_SPT(display) (INTEL_PCH_TYPE(display) == PCH_SPT)
+#define HAS_PCH_LPT_H(display) (INTEL_PCH_TYPE(display) == PCH_LPT_H)
+#define HAS_PCH_LPT_LP(display) (INTEL_PCH_TYPE(display) == PCH_LPT_LP)
+#define HAS_PCH_LPT(display) (INTEL_PCH_TYPE(display) == PCH_LPT_H || \
+ INTEL_PCH_TYPE(display) == PCH_LPT_LP)
+#define HAS_PCH_CPT(display) (INTEL_PCH_TYPE(display) == PCH_CPT)
+#define HAS_PCH_IBX(display) (INTEL_PCH_TYPE(display) == PCH_IBX)
+#define HAS_PCH_NOP(display) (INTEL_PCH_TYPE(display) == PCH_NOP)
+#define HAS_PCH_SPLIT(display) (INTEL_PCH_TYPE(display) != PCH_NONE)
+
+void intel_pch_detect(struct intel_display *display);
+
+#endif /* __INTEL_PCH__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 99f6d6f53fa7..1743ebf551cb 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -3,8 +3,9 @@
* Copyright © 2021 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "g4x_dp.h"
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_crt.h"
#include "intel_crt_regs.h"
@@ -20,28 +21,27 @@
#include "intel_pps.h"
#include "intel_sdvo.h"
-bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+bool intel_has_pch_trancoder(struct intel_display *display,
enum pipe pch_transcoder)
{
- return HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915) ||
- (HAS_PCH_LPT_H(i915) && pch_transcoder == PIPE_A);
+ return HAS_PCH_IBX(display) || HAS_PCH_CPT(display) ||
+ (HAS_PCH_LPT_H(display) && pch_transcoder == PIPE_A);
}
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- if (HAS_PCH_LPT(i915))
+ if (HAS_PCH_LPT(display))
return PIPE_A;
else
return crtc->pipe;
}
-static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_dp_disabled(struct intel_display *display,
enum pipe pipe, enum port port,
i915_reg_t dp_reg)
{
- struct intel_display *display = &dev_priv->display;
enum pipe port_pipe;
bool state;
@@ -52,16 +52,15 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
port_name(port), pipe_name(pipe));
INTEL_DISPLAY_STATE_WARN(display,
- HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ HAS_PCH_IBX(display) && !state && port_pipe == PIPE_B,
"IBX PCH DP %c still using transcoder B\n",
port_name(port));
}
-static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_hdmi_disabled(struct intel_display *display,
enum pipe pipe, enum port port,
i915_reg_t hdmi_reg)
{
- struct intel_display *display = &dev_priv->display;
enum pipe port_pipe;
bool state;
@@ -72,20 +71,19 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
port_name(port), pipe_name(pipe));
INTEL_DISPLAY_STATE_WARN(display,
- HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ HAS_PCH_IBX(display) && !state && port_pipe == PIPE_B,
"IBX PCH HDMI %c still using transcoder B\n",
port_name(port));
}
-static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_ports_disabled(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
enum pipe port_pipe;
- assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
- assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
- assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
+ assert_pch_dp_disabled(display, pipe, PORT_B, PCH_DP_B);
+ assert_pch_dp_disabled(display, pipe, PORT_C, PCH_DP_C);
+ assert_pch_dp_disabled(display, pipe, PORT_D, PCH_DP_D);
INTEL_DISPLAY_STATE_WARN(display,
intel_crt_port_enabled(display, PCH_ADPA, &port_pipe) && port_pipe == pipe,
@@ -93,20 +91,19 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
INTEL_DISPLAY_STATE_WARN(display,
- intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && port_pipe == pipe,
+ intel_lvds_port_enabled(display, PCH_LVDS, &port_pipe) && port_pipe == pipe,
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
/* PCH SDVOB multiplex with HDMIB */
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
+ assert_pch_hdmi_disabled(display, pipe, PORT_B, PCH_HDMIB);
+ assert_pch_hdmi_disabled(display, pipe, PORT_C, PCH_HDMIC);
+ assert_pch_hdmi_disabled(display, pipe, PORT_D, PCH_HDMID);
}
-static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+static void assert_pch_transcoder_disabled(struct intel_display *display,
enum pipe pipe)
{
- struct intel_display *display = &dev_priv->display;
u32 val;
bool enabled;
@@ -117,45 +114,45 @@ static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
-static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
+static void ibx_sanitize_pch_hdmi_port(struct intel_display *display,
enum port port, i915_reg_t hdmi_reg)
{
- u32 val = intel_de_read(dev_priv, hdmi_reg);
+ u32 val = intel_de_read(display, hdmi_reg);
if (val & SDVO_ENABLE ||
(val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Sanitizing transcoder select for HDMI %c\n",
port_name(port));
val &= ~SDVO_PIPE_SEL_MASK;
val |= SDVO_PIPE_SEL(PIPE_A);
- intel_de_write(dev_priv, hdmi_reg, val);
+ intel_de_write(display, hdmi_reg, val);
}
-static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
+static void ibx_sanitize_pch_dp_port(struct intel_display *display,
enum port port, i915_reg_t dp_reg)
{
- u32 val = intel_de_read(dev_priv, dp_reg);
+ u32 val = intel_de_read(display, dp_reg);
if (val & DP_PORT_EN ||
(val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Sanitizing transcoder select for DP %c\n",
port_name(port));
val &= ~DP_PIPE_SEL_MASK;
val |= DP_PIPE_SEL(PIPE_A);
- intel_de_write(dev_priv, dp_reg, val);
+ intel_de_write(display, dp_reg, val);
}
-static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
+static void ibx_sanitize_pch_ports(struct intel_display *display)
{
/*
* The BIOS may select transcoder B on some of the PCH
@@ -168,14 +165,14 @@ static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
* (see. intel_dp_link_down(), intel_disable_hdmi(),
* intel_disable_sdvo()).
*/
- ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
- ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
- ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
+ ibx_sanitize_pch_dp_port(display, PORT_B, PCH_DP_B);
+ ibx_sanitize_pch_dp_port(display, PORT_C, PCH_DP_C);
+ ibx_sanitize_pch_dp_port(display, PORT_D, PCH_DP_D);
/* PCH SDVOB multiplex with HDMIB */
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
- ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
+ ibx_sanitize_pch_hdmi_port(display, PORT_B, PCH_HDMIB);
+ ibx_sanitize_pch_hdmi_port(display, PORT_C, PCH_HDMIC);
+ ibx_sanitize_pch_hdmi_port(display, PORT_D, PCH_HDMID);
}
static void intel_pch_transcoder_set_m1_n1(struct intel_crtc *crtc,
@@ -225,32 +222,30 @@ void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
enum pipe pch_transcoder)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
- intel_de_read(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
- intel_de_read(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
- intel_de_read(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder)));
-
- intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
- intel_de_read(dev_priv, TRANS_VSYNCSHIFT(dev_priv, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_HTOTAL(pch_transcoder),
+ intel_de_read(display, TRANS_HTOTAL(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_HBLANK(pch_transcoder),
+ intel_de_read(display, TRANS_HBLANK(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_HSYNC(pch_transcoder),
+ intel_de_read(display, TRANS_HSYNC(display, cpu_transcoder)));
+
+ intel_de_write(display, PCH_TRANS_VTOTAL(pch_transcoder),
+ intel_de_read(display, TRANS_VTOTAL(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_VBLANK(pch_transcoder),
+ intel_de_read(display, TRANS_VBLANK(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_VSYNC(pch_transcoder),
+ intel_de_read(display, TRANS_VSYNC(display, cpu_transcoder)));
+ intel_de_write(display, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
+ intel_de_read(display, TRANS_VSYNCSHIFT(display, cpu_transcoder)));
}
static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 val, pipeconf_val;
@@ -262,7 +257,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
assert_fdi_tx_enabled(display, pipe);
assert_fdi_rx_enabled(display, pipe);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
reg = TRANS_CHICKEN2(pipe);
val = intel_de_read(display, reg);
/*
@@ -280,7 +275,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val = intel_de_read(display, reg);
pipeconf_val = intel_de_read(display, TRANSCONF(display, pipe));
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(display)) {
/* Configure frame start delay to match the CPU */
val &= ~TRANS_FRAME_START_DELAY_MASK;
val |= TRANS_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
@@ -299,7 +294,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_ILK) == TRANSCONF_INTERLACE_IF_ID_ILK) {
- if (HAS_PCH_IBX(dev_priv) &&
+ if (HAS_PCH_IBX(display) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_INTERLACE_LEGACY_VSYNC_IBX;
else
@@ -317,7 +312,6 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
@@ -326,18 +320,18 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
assert_fdi_rx_disabled(display, pipe);
/* Ports must be off as well */
- assert_pch_ports_disabled(dev_priv, pipe);
+ assert_pch_ports_disabled(display, pipe);
reg = PCH_TRANSCONF(pipe);
- intel_de_rmw(dev_priv, reg, TRANS_ENABLE, 0);
+ intel_de_rmw(display, reg, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
- drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
+ if (intel_de_wait_for_clear(display, reg, TRANS_STATE_ENABLE, 50))
+ drm_err(display->drm, "failed to disable transcoder %c\n",
pipe_name(pipe));
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
/* Workaround: Clear the timing override chicken bit again. */
- intel_de_rmw(dev_priv, TRANS_CHICKEN2(pipe),
+ intel_de_rmw(display, TRANS_CHICKEN2(pipe),
TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
@@ -366,14 +360,13 @@ void ilk_pch_pre_enable(struct intel_atomic_state *state,
void ilk_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
u32 temp;
- assert_pch_transcoder_disabled(dev_priv, pipe);
+ assert_pch_transcoder_disabled(display, pipe);
/* For PCH output, training FDI link */
intel_fdi_link_train(crtc, crtc_state);
@@ -382,7 +375,7 @@ void ilk_pch_enable(struct intel_atomic_state *state,
* We need to program the right clock selection
* before writing the pixel multiplier into the DPLL.
*/
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
u32 sel;
temp = intel_de_read(display, PCH_DPLL_SEL);
@@ -418,7 +411,7 @@ void ilk_pch_enable(struct intel_atomic_state *state,
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev_priv) &&
+ if (HAS_PCH_CPT(display) &&
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -459,23 +452,27 @@ void ilk_pch_disable(struct intel_atomic_state *state,
void ilk_pch_post_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
ilk_disable_pch_transcoder(crtc);
- if (HAS_PCH_CPT(dev_priv)) {
+ if (HAS_PCH_CPT(display)) {
/* disable TRANS_DP_CTL */
- intel_de_rmw(dev_priv, TRANS_DP_CTL(pipe),
+ intel_de_rmw(display, TRANS_DP_CTL(pipe),
TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK,
TRANS_DP_PORT_SEL_NONE);
/* disable DPLL_SEL */
- intel_de_rmw(dev_priv, PCH_DPLL_SEL,
+ intel_de_rmw(display, PCH_DPLL_SEL,
TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe), 0);
}
ilk_fdi_pll_disable(crtc);
+
+ intel_disable_shared_dpll(old_crtc_state);
}
static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
@@ -497,9 +494,8 @@ static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum pipe pipe = crtc->pipe;
enum intel_dpll_id pll_id;
@@ -518,7 +514,7 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder,
&crtc_state->fdi_m_n);
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(display)) {
/*
* The pipe->pch transcoder and pch transcoder->pll
* mapping is fixed.
@@ -550,8 +546,6 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val, pipeconf_val;
@@ -559,49 +553,49 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
assert_fdi_tx_enabled(display, (enum pipe)cpu_transcoder);
assert_fdi_rx_enabled(display, PIPE_A);
- val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
+ val = intel_de_read(display, TRANS_CHICKEN2(PIPE_A));
/* Workaround: set timing override bit. */
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
- intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
+ intel_de_write(display, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
- pipeconf_val = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, cpu_transcoder));
+ pipeconf_val = intel_de_read(display,
+ TRANSCONF(display, cpu_transcoder));
if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_HSW) == TRANSCONF_INTERLACE_IF_ID_ILK)
val |= TRANS_INTERLACE_INTERLACED;
else
val |= TRANS_INTERLACE_PROGRESSIVE;
- intel_de_write(dev_priv, LPT_TRANSCONF, val);
- if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
+ intel_de_write(display, LPT_TRANSCONF, val);
+ if (intel_de_wait_for_set(display, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 100))
- drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
+ drm_err(display->drm, "Failed to enable PCH transcoder\n");
}
-static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+static void lpt_disable_pch_transcoder(struct intel_display *display)
{
- intel_de_rmw(dev_priv, LPT_TRANSCONF, TRANS_ENABLE, 0);
+ intel_de_rmw(display, LPT_TRANSCONF, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
+ if (intel_de_wait_for_clear(display, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 50))
- drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
+ drm_err(display->drm, "Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
- intel_de_rmw(dev_priv, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
+ intel_de_rmw(display, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0);
}
void lpt_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- assert_pch_transcoder_disabled(dev_priv, PIPE_A);
+ assert_pch_transcoder_disabled(display, PIPE_A);
lpt_program_iclkip(crtc_state);
@@ -614,36 +608,36 @@ void lpt_pch_enable(struct intel_atomic_state *state,
void lpt_pch_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- lpt_disable_pch_transcoder(dev_priv);
+ lpt_disable_pch_transcoder(display);
- lpt_disable_iclkip(dev_priv);
+ lpt_disable_iclkip(display);
}
void lpt_pch_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- if ((intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) == 0)
+ if ((intel_de_read(display, LPT_TRANSCONF) & TRANS_ENABLE) == 0)
return;
crtc_state->has_pch_encoder = true;
- tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ tmp = intel_de_read(display, FDI_RX_CTL(PIPE_A));
crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder,
&crtc_state->fdi_m_n);
- crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
+ crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(display);
}
-void intel_pch_sanitize(struct drm_i915_private *i915)
+void intel_pch_sanitize(struct intel_display *display)
{
- if (HAS_PCH_IBX(i915))
- ibx_sanitize_pch_ports(i915);
+ if (HAS_PCH_IBX(display))
+ ibx_sanitize_pch_ports(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h
index 35f8288af3d1..cd6b3ed05887 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.h
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.h
@@ -9,14 +9,14 @@
#include <linux/types.h>
enum pipe;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_link_m_n;
#ifdef I915
-bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+bool intel_has_pch_trancoder(struct intel_display *display,
enum pipe pch_transcoder);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
@@ -41,9 +41,9 @@ void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc,
void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
struct intel_link_m_n *m_n);
-void intel_pch_sanitize(struct drm_i915_private *i915);
+void intel_pch_sanitize(struct intel_display *display);
#else
-static inline bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+static inline bool intel_has_pch_trancoder(struct intel_display *display,
enum pipe pch_transcoder)
{
return false;
@@ -90,7 +90,7 @@ static inline void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
}
-static inline void intel_pch_sanitize(struct drm_i915_private *i915)
+static inline void intel_pch_sanitize(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 33467de3d115..693b90e3dfc3 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -11,27 +11,28 @@
#include "intel_pch_refclk.h"
#include "intel_sbi.h"
-static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
+static void lpt_fdi_reset_mphy(struct intel_display *display)
{
- intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
+ intel_de_rmw(display, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
- if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ if (wait_for_us(intel_de_read(display, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
+ drm_err(display->drm, "FDI mPHY reset assert timeout\n");
- intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
+ intel_de_rmw(display, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
- if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ if (wait_for_us((intel_de_read(display, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
+ drm_err(display->drm, "FDI mPHY reset de-assert timeout\n");
}
/* WaMPhyProgramming:hsw */
-static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
+static void lpt_fdi_program_mphy(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
- lpt_fdi_reset_mphy(dev_priv);
+ lpt_fdi_reset_mphy(display);
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
@@ -103,11 +104,12 @@ static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
}
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+void lpt_disable_iclkip(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 temp;
- intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
+ intel_de_write(display, PIXCLK_GATE, PIXCLK_GATE_GATE);
intel_sbi_lock(dev_priv);
@@ -175,24 +177,25 @@ int lpt_iclkip(const struct intel_crtc_state *crtc_state)
/* Program iCLKIP clock to the desired frequency */
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int clock = crtc_state->hw.adjusted_mode.crtc_clock;
struct iclkip_params p;
u32 temp;
- lpt_disable_iclkip(dev_priv);
+ lpt_disable_iclkip(display);
lpt_compute_iclkip(&p, clock);
- drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock);
+ drm_WARN_ON(display->drm, lpt_iclkip_freq(&p) != clock);
/* This should not happen with any sane values */
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
+ drm_WARN_ON(display->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
+ drm_WARN_ON(display->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
~SBI_SSCDIVINTPHASE_INCVAL_MASK);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
@@ -224,15 +227,16 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
/* Wait for initialization time */
udelay(24);
- intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+ intel_de_write(display, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
}
-int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+int lpt_get_iclkip(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct iclkip_params p;
u32 temp;
- if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
+ if ((intel_de_read(display, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
return 0;
iclkip_params_init(&p);
@@ -268,15 +272,16 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
* - Sequence to enable CLKOUT_DP without spread
* - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
*/
-static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
+static void lpt_enable_clkout_dp(struct intel_display *display,
bool with_spread, bool with_fdi)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, tmp;
- if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
+ if (drm_WARN(display->drm, with_fdi && !with_spread,
"FDI requires downspread\n"))
with_spread = true;
- if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
+ if (drm_WARN(display->drm, HAS_PCH_LPT_LP(display) &&
with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
@@ -295,10 +300,10 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
if (with_fdi)
- lpt_fdi_program_mphy(dev_priv);
+ lpt_fdi_program_mphy(display);
}
- reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
+ reg = HAS_PCH_LPT_LP(display) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -307,13 +312,14 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
}
/* Sequence to disable CLKOUT_DP */
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
+void lpt_disable_clkout_dp(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, tmp;
intel_sbi_lock(dev_priv);
- reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
+ reg = HAS_PCH_LPT_LP(display) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -364,15 +370,16 @@ static const u16 sscdivintphase[] = {
* < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
* change in clock period = -(steps / 10) * 5.787 ps
*/
-static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
+static void lpt_bend_clkout_dp(struct intel_display *display, int steps)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
int idx = BEND_IDX(steps);
- if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
+ if (drm_WARN_ON(display->drm, steps % 5 != 0))
return;
- if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
+ if (drm_WARN_ON(display->drm, idx >= ARRAY_SIZE(sscdivintphase)))
return;
intel_sbi_lock(dev_priv);
@@ -393,10 +400,10 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
#undef BEND_IDX
-static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
+static bool spll_uses_pch_ssc(struct intel_display *display)
{
- u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
- u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
+ u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
+ u32 ctl = intel_de_read(display, SPLL_CTL);
if ((ctl & SPLL_PLL_ENABLE) == 0)
return false;
@@ -405,18 +412,17 @@ static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
(fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
return true;
- if (IS_BROADWELL(dev_priv) &&
+ if (display->platform.broadwell &&
(ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
return true;
return false;
}
-static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
- enum intel_dpll_id id)
+static bool wrpll_uses_pch_ssc(struct intel_display *display, enum intel_dpll_id id)
{
- u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
- u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
+ u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
+ u32 ctl = intel_de_read(display, WRPLL_CTL(id));
if ((ctl & WRPLL_PLL_ENABLE) == 0)
return false;
@@ -424,7 +430,7 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
return true;
- if ((IS_BROADWELL(dev_priv) || IS_HASWELL_ULT(dev_priv)) &&
+ if ((display->platform.broadwell || display->platform.haswell_ult) &&
(ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
(fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
return true;
@@ -432,12 +438,12 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
return false;
}
-static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
+static void lpt_init_pch_refclk(struct intel_display *display)
{
struct intel_encoder *encoder;
bool has_fdi = false;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
has_fdi = true;
@@ -462,37 +468,36 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
* clock hierarchy. That would also allow us to do
* clock bending finally.
*/
- dev_priv->display.dpll.pch_ssc_use = 0;
+ display->dpll.pch_ssc_use = 0;
- if (spll_uses_pch_ssc(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
- dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL);
+ if (spll_uses_pch_ssc(display)) {
+ drm_dbg_kms(display->drm, "SPLL using PCH SSC\n");
+ display->dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL);
}
- if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
- drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
- dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
+ if (wrpll_uses_pch_ssc(display, DPLL_ID_WRPLL1)) {
+ drm_dbg_kms(display->drm, "WRPLL1 using PCH SSC\n");
+ display->dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
}
- if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
- drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
- dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
+ if (wrpll_uses_pch_ssc(display, DPLL_ID_WRPLL2)) {
+ drm_dbg_kms(display->drm, "WRPLL2 using PCH SSC\n");
+ display->dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
}
- if (dev_priv->display.dpll.pch_ssc_use)
+ if (display->dpll.pch_ssc_use)
return;
if (has_fdi) {
- lpt_bend_clkout_dp(dev_priv, 0);
- lpt_enable_clkout_dp(dev_priv, true, true);
+ lpt_bend_clkout_dp(display, 0);
+ lpt_enable_clkout_dp(display, true, true);
} else {
- lpt_disable_clkout_dp(dev_priv);
+ lpt_disable_clkout_dp(display);
}
}
-static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
+static void ilk_init_pch_refclk(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_encoder *encoder;
struct intel_shared_dpll *pll;
int i;
@@ -521,7 +526,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
}
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(display)) {
has_ck505 = display->vbt.display_clock_mode;
can_ssc = has_ck505;
} else {
@@ -607,7 +612,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* SSC must be turned on before enabling the CPU output */
if (intel_panel_use_ssc(display) && can_ssc) {
- drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
+ drm_dbg_kms(display->drm, "Using SSC on panel\n");
val |= DREF_SSC1_ENABLE;
} else {
val &= ~DREF_SSC1_ENABLE;
@@ -623,7 +628,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* Enable CPU source on CPU attached eDP */
if (has_cpu_edp) {
if (intel_panel_use_ssc(display) && can_ssc) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Using SSC on eDP\n");
val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
} else {
@@ -670,10 +675,10 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/*
* Initialize reference clocks when the driver loads
*/
-void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
+void intel_init_pch_refclk(struct intel_display *display)
{
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
- ilk_init_pch_refclk(dev_priv);
- else if (HAS_PCH_LPT(dev_priv))
- lpt_init_pch_refclk(dev_priv);
+ if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display))
+ ilk_init_pch_refclk(display);
+ else if (HAS_PCH_LPT(display))
+ lpt_init_pch_refclk(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
index ae3403c0ced8..25cc53c568bc 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.h
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
@@ -8,25 +8,25 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
#ifdef I915
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state);
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
-int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+void lpt_disable_iclkip(struct intel_display *display);
+int lpt_get_iclkip(struct intel_display *display);
int lpt_iclkip(const struct intel_crtc_state *crtc_state);
-void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
+void intel_init_pch_refclk(struct intel_display *display);
+void lpt_disable_clkout_dp(struct intel_display *display);
#else
static inline void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
}
-static inline void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+static inline void lpt_disable_iclkip(struct intel_display *display)
{
}
-static inline int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+static inline int lpt_get_iclkip(struct intel_display *display)
{
return 0;
}
@@ -34,10 +34,10 @@ static inline int lpt_iclkip(const struct intel_crtc_state *crtc_state)
{
return 0;
}
-static inline void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
+static inline void intel_init_pch_refclk(struct intel_display *display)
{
}
-static inline void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
+static inline void lpt_disable_clkout_dp(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 10e26c3db946..6182f484b5bd 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -75,7 +75,7 @@ static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+static void i9xx_pipe_crc_auto_source(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source)
{
@@ -85,8 +85,8 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
- drm_modeset_lock_all(&dev_priv->drm);
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ drm_modeset_lock_all(display->drm);
+ for_each_intel_encoder(display->drm, encoder) {
if (!encoder->base.crtc)
continue;
@@ -113,7 +113,7 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
*source = INTEL_PIPE_CRC_SOURCE_DP_D;
break;
default:
- drm_WARN(&dev_priv->drm, 1, "nonexisting DP port %c\n",
+ drm_WARN(display->drm, 1, "nonexisting DP port %c\n",
port_name(dig_port->base.port));
break;
}
@@ -122,10 +122,10 @@ static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
break;
}
}
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
}
-static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int vlv_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
@@ -133,7 +133,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
bool need_stable_symbols = false;
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+ i9xx_pipe_crc_auto_source(display, pipe, source);
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
@@ -148,7 +148,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
need_stable_symbols = true;
break;
case INTEL_PIPE_CRC_SOURCE_DP_D:
- if (!IS_CHERRYVIEW(dev_priv))
+ if (!display->platform.cherryview)
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
need_stable_symbols = true;
@@ -170,7 +170,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
* - DisplayPort scrambling: used for EMI reduction
*/
if (need_stable_symbols) {
- u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X(dev_priv));
+ u32 tmp = intel_de_read(display, PORT_DFT2_G4X(display));
tmp |= DC_BALANCE_RESET_VLV;
switch (pipe) {
@@ -186,26 +186,26 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
default:
return -EINVAL;
}
- intel_de_write(dev_priv, PORT_DFT2_G4X(dev_priv), tmp);
+ intel_de_write(display, PORT_DFT2_G4X(display), tmp);
}
return 0;
}
-static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int i9xx_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+ i9xx_pipe_crc_auto_source(display, pipe, source);
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
break;
case INTEL_PIPE_CRC_SOURCE_TV:
- if (!SUPPORTS_TV(dev_priv))
+ if (!SUPPORTS_TV(display))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
break;
@@ -229,10 +229,10 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
-static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
+static void vlv_undo_pipe_scramble_reset(struct intel_display *display,
enum pipe pipe)
{
- u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X(dev_priv));
+ u32 tmp = intel_de_read(display, PORT_DFT2_G4X(display));
switch (pipe) {
case PIPE_A:
@@ -249,7 +249,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
}
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
tmp &= ~DC_BALANCE_RESET_VLV;
- intel_de_write(dev_priv, PORT_DFT2_G4X(dev_priv), tmp);
+ intel_de_write(display, PORT_DFT2_G4X(display), tmp);
}
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -281,18 +281,18 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
static void
intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *pipe_config;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
int ret;
- if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
- i915gm_irq_cstate_wa(dev_priv, enable);
+ if (display->platform.i945gm || display->platform.i915gm)
+ i915gm_irq_cstate_wa(display, enable);
drm_modeset_acquire_init(&ctx, 0);
- state = drm_atomic_state_alloc(&dev_priv->drm);
+ state = drm_atomic_state_alloc(display->drm);
if (!state) {
ret = -ENOMEM;
goto unlock;
@@ -311,7 +311,7 @@ retry:
pipe_config->uapi.mode_changed = pipe_config->has_psr;
pipe_config->crc_enabled = enable;
- if (IS_HASWELL(dev_priv) &&
+ if (display->platform.haswell &&
pipe_config->hw.active && crtc->pipe == PIPE_A &&
pipe_config->cpu_transcoder == TRANSCODER_EDP)
pipe_config->uapi.mode_changed = true;
@@ -327,13 +327,13 @@ put_state:
drm_atomic_state_put(state);
unlock:
- drm_WARN(&dev_priv->drm, ret,
+ drm_WARN(display->drm, ret,
"Toggling workaround to %i returns %i\n", enable, ret);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
-static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int ivb_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
@@ -361,7 +361,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
-static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int skl_pipe_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source,
u32 *val)
@@ -404,22 +404,22 @@ static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
-static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
+static int get_new_crc_ctl_reg(struct intel_display *display,
enum pipe pipe,
enum intel_pipe_crc_source *source, u32 *val)
{
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
return i8xx_pipe_crc_ctl_reg(source, val);
- else if (DISPLAY_VER(dev_priv) < 5)
- return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
- else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
+ else if (DISPLAY_VER(display) < 5)
+ return i9xx_pipe_crc_ctl_reg(display, pipe, source, val);
+ else if (display->platform.valleyview || display->platform.cherryview)
+ return vlv_pipe_crc_ctl_reg(display, pipe, source, val);
+ else if (display->platform.ironlake || display->platform.sandybridge)
return ilk_pipe_crc_ctl_reg(source, val);
- else if (DISPLAY_VER(dev_priv) < 9)
- return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ else if (DISPLAY_VER(display) < 9)
+ return ivb_pipe_crc_ctl_reg(display, pipe, source, val);
else
- return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ return skl_pipe_crc_ctl_reg(display, pipe, source, val);
}
static int
@@ -447,7 +447,7 @@ void intel_crtc_crc_init(struct intel_crtc *crtc)
spin_lock_init(&pipe_crc->lock);
}
-static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
+static int i8xx_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -459,7 +459,7 @@ static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
+static int i9xx_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -472,7 +472,7 @@ static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
+static int vlv_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -487,7 +487,7 @@ static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
+static int ilk_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -501,7 +501,7 @@ static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
+static int ivb_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -515,7 +515,7 @@ static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
}
}
-static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
+static int skl_crc_source_valid(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
switch (source) {
@@ -535,21 +535,21 @@ static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
}
static int
-intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
+intel_is_valid_crc_source(struct intel_display *display,
const enum intel_pipe_crc_source source)
{
- if (DISPLAY_VER(dev_priv) == 2)
- return i8xx_crc_source_valid(dev_priv, source);
- else if (DISPLAY_VER(dev_priv) < 5)
- return i9xx_crc_source_valid(dev_priv, source);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_crc_source_valid(dev_priv, source);
- else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
- return ilk_crc_source_valid(dev_priv, source);
- else if (DISPLAY_VER(dev_priv) < 9)
- return ivb_crc_source_valid(dev_priv, source);
+ if (DISPLAY_VER(display) == 2)
+ return i8xx_crc_source_valid(display, source);
+ else if (DISPLAY_VER(display) < 5)
+ return i9xx_crc_source_valid(display, source);
+ else if (display->platform.valleyview || display->platform.cherryview)
+ return vlv_crc_source_valid(display, source);
+ else if (display->platform.ironlake || display->platform.sandybridge)
+ return ilk_crc_source_valid(display, source);
+ else if (DISPLAY_VER(display) < 9)
+ return ivb_crc_source_valid(display, source);
else
- return skl_crc_source_valid(dev_priv, source);
+ return skl_crc_source_valid(display, source);
}
const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
@@ -562,16 +562,16 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
enum intel_pipe_crc_source source;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
- drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
+ drm_dbg_kms(display->drm, "unknown source %s\n", source_name);
return -EINVAL;
}
if (source == INTEL_PIPE_CRC_SOURCE_AUTO ||
- intel_is_valid_crc_source(dev_priv, source) == 0) {
+ intel_is_valid_crc_source(display, source) == 0) {
*values_cnt = 5;
return 0;
}
@@ -583,7 +583,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
@@ -594,14 +593,14 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
bool enable;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
- drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
+ drm_dbg_kms(display->drm, "unknown source %s\n", source_name);
return -EINVAL;
}
power_domain = POWER_DOMAIN_PIPE(pipe);
wakeref = intel_display_power_get_if_enabled(display, power_domain);
if (!wakeref) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Trying to capture CRC while pipe is off\n");
return -EIO;
}
@@ -610,17 +609,17 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
if (enable)
intel_crtc_crc_setup_workarounds(crtc, true);
- ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val);
+ ret = get_new_crc_ctl_reg(display, pipe, &source, &val);
if (ret != 0)
goto out;
pipe_crc->source = source;
- intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), val);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe));
+ intel_de_write(display, PIPE_CRC_CTL(display, pipe), val);
+ intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe));
if (!source) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_undo_pipe_scramble_reset(dev_priv, pipe);
+ if (display->platform.valleyview || display->platform.cherryview)
+ vlv_undo_pipe_scramble_reset(display, pipe);
}
pipe_crc->skipped = 0;
@@ -636,7 +635,7 @@ out:
void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum pipe pipe = crtc->pipe;
u32 val = 0;
@@ -644,19 +643,20 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
if (!crtc->base.crc.opened)
return;
- if (get_new_crc_ctl_reg(dev_priv, pipe, &pipe_crc->source, &val) < 0)
+ if (get_new_crc_ctl_reg(display, pipe, &pipe_crc->source, &val) < 0)
return;
/* Don't need pipe_crc->lock here, IRQs are not generated. */
pipe_crc->skipped = 0;
- intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), val);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe));
+ intel_de_write(display, PIPE_CRC_CTL(display, pipe), val);
+ intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe));
}
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum pipe pipe = crtc->pipe;
@@ -665,7 +665,7 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
pipe_crc->skipped = INT_MIN;
spin_unlock_irq(&pipe_crc->lock);
- intel_de_write(dev_priv, PIPE_CRC_CTL(dev_priv, pipe), 0);
- intel_de_posting_read(dev_priv, PIPE_CRC_CTL(dev_priv, pipe));
+ intel_de_write(display, PIPE_CRC_CTL(display, pipe), 0);
+ intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe));
intel_synchronize_irq(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index b1675b46e06c..c00d9184c586 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -52,44 +52,57 @@ intel_reuse_initial_plane_obj(struct intel_crtc *this,
return false;
}
+static enum intel_memory_type
+initial_plane_memory_type(struct intel_display *display)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (display->platform.dgfx)
+ return INTEL_MEMORY_LOCAL;
+ else if (HAS_LMEMBAR_SMEM_STOLEN(i915))
+ return INTEL_MEMORY_STOLEN_LOCAL;
+ else
+ return INTEL_MEMORY_STOLEN_SYSTEM;
+}
+
static bool
-initial_plane_phys_lmem(struct intel_display *display,
- struct intel_initial_plane_config *plane_config)
+initial_plane_phys(struct intel_display *display,
+ struct intel_initial_plane_config *plane_config)
{
struct drm_i915_private *i915 = to_i915(display->drm);
- gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct intel_memory_region *mem;
+ enum intel_memory_type mem_type;
+ bool is_present, is_local;
dma_addr_t dma_addr;
- gen8_pte_t pte;
u32 base;
+ mem_type = initial_plane_memory_type(display);
+ mem = intel_memory_region_by_type(i915, mem_type);
+ if (!mem) {
+ drm_dbg_kms(display->drm,
+ "Initial plane memory region (type %s) not initialized\n",
+ intel_memory_type_str(mem_type));
+ return false;
+ }
+
base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
- gte += base / I915_GTT_PAGE_SIZE;
+ dma_addr = intel_ggtt_read_entry(&ggtt->vm, base, &is_present, &is_local);
- pte = ioread64(gte);
- if (!(pte & GEN12_GGTT_PTE_LM)) {
+ if (!is_present) {
drm_err(display->drm,
- "Initial plane programming missing PTE_LM bit\n");
+ "Initial plane FB PTE not present\n");
return false;
}
- dma_addr = pte & GEN12_GGTT_PTE_ADDR_MASK;
-
- if (IS_DGFX(i915))
- mem = i915->mm.regions[INTEL_REGION_LMEM_0];
- else
- mem = i915->mm.stolen_region;
- if (!mem) {
- drm_dbg_kms(display->drm,
- "Initial plane memory region not initialized\n");
+ if (intel_memory_type_is_local(mem->type) != is_local) {
+ drm_err(display->drm,
+ "Initial plane FB PTE unsuitable for %s\n",
+ mem->region.name);
return false;
}
- /*
- * On lmem we don't currently expect this to
- * ever be placed in the stolen portion.
- */
if (dma_addr < mem->region.start || dma_addr > mem->region.end) {
drm_err(display->drm,
"Initial plane programming using invalid range, dma_addr=%pa (%s [%pa-%pa])\n",
@@ -107,42 +120,6 @@ initial_plane_phys_lmem(struct intel_display *display,
return true;
}
-static bool
-initial_plane_phys_smem(struct intel_display *display,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct intel_memory_region *mem;
- u32 base;
-
- base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
-
- mem = i915->mm.stolen_region;
- if (!mem) {
- drm_dbg_kms(display->drm,
- "Initial plane memory region not initialized\n");
- return false;
- }
-
- /* FIXME get and validate the dma_addr from the PTE */
- plane_config->phys_base = base;
- plane_config->mem = mem;
-
- return true;
-}
-
-static bool
-initial_plane_phys(struct intel_display *display,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (IS_DGFX(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
- return initial_plane_phys_lmem(display, plane_config);
- else
- return initial_plane_phys_smem(display, plane_config);
-}
-
static struct i915_vma *
initial_plane_vma(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index 63301a01906c..1253376c7654 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -5,6 +5,8 @@
#include <linux/bitops.h>
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_atomic.h"
@@ -476,13 +478,34 @@ static bool intel_pmdemand_req_complete(struct intel_display *display)
XELPDP_PMDEMAND_REQ_ENABLE);
}
-static void intel_pmdemand_wait(struct intel_display *display)
+static void intel_pmdemand_poll(struct intel_display *display)
{
- if (!wait_event_timeout(display->pmdemand.waitqueue,
- intel_pmdemand_req_complete(display),
- msecs_to_jiffies_timeout(10)))
+ const unsigned int timeout_ms = 10;
+ u32 status;
+ int ret;
+
+ ret = intel_de_wait_custom(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
+ XELPDP_PMDEMAND_REQ_ENABLE, 0,
+ 50, timeout_ms, &status);
+
+ if (ret == -ETIMEDOUT)
drm_err(display->drm,
- "timed out waiting for Punit PM Demand Response\n");
+ "timed out waiting for Punit PM Demand Response within %ums (status 0x%08x)\n",
+ timeout_ms, status);
+}
+
+static void intel_pmdemand_wait(struct intel_display *display)
+{
+ /* Wa_14024400148 For lnl use polling method */
+ if (DISPLAY_VER(display) == 20) {
+ intel_pmdemand_poll(display);
+ } else {
+ if (!wait_event_timeout(display->pmdemand.waitqueue,
+ intel_pmdemand_req_complete(display),
+ msecs_to_jiffies_timeout(10)))
+ drm_err(display->drm,
+ "timed out waiting for Punit PM Demand Response\n");
+ }
}
/* Required to be programmed during Display Init Sequences. */
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 617ce4993172..05e1e5c7e8b7 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -91,7 +91,6 @@ static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = intel_dp->pps.vlv_pps_pipe;
bool pll_enabled, release_cl_override = false;
@@ -134,7 +133,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
release_cl_override = display->platform.cherryview &&
!chv_phy_powergate_ch(display, phy, ch, true);
- if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(display))) {
+ if (vlv_force_pll_on(display, pipe, vlv_get_dpll(display))) {
drm_err(display->drm,
"Failed to force on PLL for pipe %c!\n",
pipe_name(pipe));
@@ -158,7 +157,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
intel_de_posting_read(display, intel_dp->output_reg);
if (!pll_enabled) {
- vlv_force_pll_off(dev_priv, pipe);
+ vlv_force_pll_off(display, pipe);
if (release_cl_override)
chv_phy_powergate_ch(display, phy, ch, false);
@@ -351,21 +350,19 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
static int intel_num_pps(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (display->platform.valleyview || display->platform.cherryview)
return 2;
if (display->platform.geminilake || display->platform.broxton)
return 2;
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
+ if (INTEL_PCH_TYPE(display) >= PCH_MTL)
return 2;
- if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
+ if (INTEL_PCH_TYPE(display) >= PCH_DG1)
return 1;
- if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(display) >= PCH_ICP)
return 2;
return 1;
@@ -374,11 +371,10 @@ static int intel_num_pps(struct intel_display *display)
static bool intel_pps_is_valid(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (intel_dp->pps.pps_idx == 1 &&
- INTEL_PCH_TYPE(i915) >= PCH_ICP &&
- INTEL_PCH_TYPE(i915) <= PCH_ADP)
+ INTEL_PCH_TYPE(display) >= PCH_ICP &&
+ INTEL_PCH_TYPE(display) <= PCH_ADP)
return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
@@ -500,7 +496,6 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
struct pps_registers *regs)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int pps_idx;
memset(regs, 0, sizeof(*regs));
@@ -519,7 +514,7 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
if (display->platform.geminilake || display->platform.broxton ||
- INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ INTEL_PCH_TYPE(display) >= PCH_CNP)
regs->pp_div = INVALID_MMIO_REG;
else
regs->pp_div = PP_DIVISOR(display, pps_idx);
@@ -744,11 +739,11 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->pps.want_panel_vdd;
- lockdep_assert_held(&display->pps.mutex);
-
if (!intel_dp_is_edp(intel_dp))
return false;
+ lockdep_assert_held(&display->pps.mutex);
+
cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
intel_dp->pps.want_panel_vdd = true;
@@ -925,11 +920,11 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
{
struct intel_display *display = to_intel_display(intel_dp);
- lockdep_assert_held(&display->pps.mutex);
-
if (!intel_dp_is_edp(intel_dp))
return;
+ lockdep_assert_held(&display->pps.mutex);
+
INTEL_DISPLAY_STATE_WARN(display, !intel_dp->pps.want_panel_vdd,
"[ENCODER:%d:%s] %s VDD not forced on",
dp_to_dig_port(intel_dp)->base.base.base.id,
@@ -1592,7 +1587,6 @@ static void pps_init_delays(struct intel_dp *intel_dp)
static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pp_on, pp_off, port_sel = 0;
int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000;
struct pps_registers regs;
@@ -1639,7 +1633,7 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
* power sequencer any more. */
if (display->platform.valleyview || display->platform.cherryview) {
port_sel = PANEL_PORT_SELECT_VLV(port);
- } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ } else if (HAS_PCH_IBX(display) || HAS_PCH_CPT(display)) {
switch (port) {
case PORT_A:
port_sel = PANEL_PORT_SELECT_DPA;
@@ -1792,9 +1786,7 @@ void intel_pps_unlock_regs_wa(struct intel_display *display)
void intel_pps_setup(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (HAS_PCH_SPLIT(i915) || display->platform.geminilake || display->platform.broxton)
+ if (HAS_PCH_SPLIT(display) || display->platform.geminilake || display->platform.broxton)
display->pps.mmio_base = PCH_PPS_BASE;
else if (display->platform.valleyview || display->platform.cherryview)
display->pps.mmio_base = VLV_PPS_BASE;
@@ -1837,7 +1829,6 @@ void intel_pps_connector_debugfs_add(struct intel_connector *connector)
void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
i915_reg_t pp_reg;
u32 val;
enum pipe panel_pipe = INVALID_PIPE;
@@ -1846,7 +1837,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
if (drm_WARN_ON(display->drm, HAS_DDI(display)))
return;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
u32 port_sel;
pp_reg = PP_CONTROL(display, 0);
@@ -1855,7 +1846,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
switch (port_sel) {
case PANEL_PORT_SELECT_LVDS:
- intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
+ intel_lvds_port_enabled(display, PCH_LVDS, &panel_pipe);
break;
case PANEL_PORT_SELECT_DPA:
g4x_dp_port_enabled(display, DP_A, PORT_A, &panel_pipe);
@@ -1883,7 +1874,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
drm_WARN_ON(display->drm,
port_sel != PANEL_PORT_SELECT_LVDS);
- intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
+ intel_lvds_port_enabled(display, LVDS, &panel_pipe);
}
val = intel_de_read(display, pp_reg);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 4e938bad808c..430ad4ef7146 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_vblank.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -36,7 +37,9 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_irq.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_frontbuffer.h"
@@ -45,6 +48,7 @@
#include "intel_psr_regs.h"
#include "intel_snps_phy.h"
#include "intel_vblank.h"
+#include "intel_vrr.h"
#include "skl_universal_plane.h"
/**
@@ -463,8 +467,8 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
if (DISPLAY_VER(display) >= 9) {
u32 val;
- val = intel_de_rmw(dev_priv,
- PSR_EVENT(dev_priv, cpu_transcoder),
+ val = intel_de_rmw(display,
+ PSR_EVENT(display, cpu_transcoder),
0, 0);
psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
@@ -689,7 +693,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 aux_clock_divider, aux_ctl;
/* write DP_SET_POWER=D0 */
@@ -704,7 +707,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
- intel_de_write(dev_priv,
+ intel_de_write(display,
psr_aux_data_reg(display, cpu_transcoder, i >> 2),
intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
@@ -794,31 +797,10 @@ static void _psr_enable_sink(struct intel_dp *intel_dp,
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
}
-static void intel_psr_enable_sink_alpm(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- u8 val;
-
- /*
- * eDP Panel Replay uses always ALPM
- * PSR2 uses ALPM but PSR1 doesn't
- */
- if (!intel_dp_is_edp(intel_dp) || (!crtc_state->has_panel_replay &&
- !crtc_state->has_sel_update))
- return;
-
- val = DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE;
-
- if (crtc_state->has_panel_replay)
- val |= DP_ALPM_MODE_AUX_LESS;
-
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
-}
-
static void intel_psr_enable_sink(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- intel_psr_enable_sink_alpm(intel_dp, crtc_state);
+ intel_alpm_enable_sink(intel_dp, crtc_state);
crtc_state->has_panel_replay ?
_panel_replay_enable_sink(intel_dp, crtc_state) :
@@ -839,7 +821,6 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val = 0;
if (DISPLAY_VER(display) >= 11)
@@ -873,7 +854,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
* WA 0479: hsw,bdw
* "Do not skip both TP1 and TP2/TP3"
*/
- if (DISPLAY_VER(dev_priv) < 9 &&
+ if (DISPLAY_VER(display) < 9 &&
connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
val |= EDP_PSR_TP2_TP3_TIME_100us;
@@ -906,10 +887,21 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
return idle_frames;
}
+static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ u32 current_dc_state = intel_display_power_get_current_dc_state(display);
+ struct drm_vblank_crtc *vblank = &display->drm->vblank[intel_dp->psr.pipe];
+
+ return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
+ current_dc_state != DC_STATE_EN_UPTO_DC6) ||
+ intel_dp->psr.active_non_psr_pipes ||
+ READ_ONCE(vblank->enabled);
+}
+
static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 max_sleep_time = 0x1f;
u32 val = EDP_PSR_ENABLE;
@@ -919,7 +911,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (DISPLAY_VER(display) < 20)
val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
if (intel_dp->psr.link_standby)
@@ -935,6 +927,14 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
+
+ /* Wa_16025596647 */
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ is_dc5_dc6_blocked(intel_dp))
+ intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
+ intel_dp->psr.pipe,
+ true);
}
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
@@ -1013,14 +1013,21 @@ static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val = EDP_PSR2_ENABLE;
u32 psr_val = 0;
+ u8 idle_frames;
- val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
+ /* Wa_16025596647 */
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ is_dc5_dc6_blocked(intel_dp))
+ idle_frames = 0;
+ else
+ idle_frames = psr_compute_idle_frames(intel_dp);
+ val |= EDP_PSR2_IDLE_FRAMES(idle_frames);
- if (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))
+ if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)
val |= EDP_SU_TRACK_ENABLE;
if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
@@ -1038,7 +1045,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
}
/* Wa_22012278275:adl-p */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
static const u8 map[] = {
2, /* 5 lines */
1, /* 6 lines */
@@ -1103,9 +1110,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
static bool
transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
else if (DISPLAY_VER(display) >= 12)
return cpu_transcoder == TRANSCODER_A;
@@ -1183,10 +1188,9 @@ dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = dig_port->base.port;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
return pipe <= PIPE_B && port <= PORT_B;
else
return pipe == PIPE_A && port == PORT_A;
@@ -1197,7 +1201,6 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
struct i915_power_domains *power_domains = &display->power.domains;
u32 exit_scanlines;
@@ -1223,7 +1226,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
return;
/* Wa_16011303918:adl-p */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
return;
/*
@@ -1264,7 +1267,6 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
@@ -1286,7 +1288,7 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
* For other platforms with SW tracking we can adjust the y coordinates
* to match sink requirement if multiple of 4.
*/
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
y_granularity = intel_dp->psr.su_y_granularity;
else if (intel_dp->psr.su_y_granularity <= 2)
y_granularity = 4;
@@ -1412,7 +1414,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
@@ -1421,20 +1422,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
/* JSL and EHL only supports eDP 1.3 */
- if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
+ if (display->platform.jasperlake || display->platform.elkhartlake) {
drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
return false;
}
/* Wa_16011181250 */
- if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
- IS_DG2(dev_priv)) {
+ if (display->platform.rocketlake || display->platform.alderlake_s ||
+ display->platform.dg2) {
drm_dbg_kms(display->drm,
"PSR2 is defeatured for this platform\n");
return false;
}
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
drm_dbg_kms(display->drm,
"PSR2 not completely functional in this stepping\n");
return false;
@@ -1453,7 +1454,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* over PSR2.
*/
if (crtc_state->dsc.compression_enable &&
- (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
+ (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) {
drm_dbg_kms(display->drm,
"PSR2 cannot be enabled since DSC is enabled\n");
return false;
@@ -1486,7 +1487,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
/* Wa_16011303918:adl-p */
if (crtc_state->vrr.enable &&
- IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
drm_dbg_kms(display->drm,
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
return false;
@@ -1604,6 +1605,12 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return false;
}
+ if (crtc_state->crc_enabled) {
+ drm_dbg_kms(display->drm,
+ "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
+ return false;
+ }
+
if (!intel_dp_is_edp(intel_dp))
return true;
@@ -1634,12 +1641,6 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
if (!alpm_config_valid(intel_dp, crtc_state, true))
return false;
- if (crtc_state->crc_enabled) {
- drm_dbg_kms(display->drm,
- "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
- return false;
- }
-
return true;
}
@@ -1658,6 +1659,9 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
{
struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_crtc *crtc;
+ u8 active_pipes = 0;
if (!psr_global_enabled(intel_dp)) {
drm_dbg_kms(display->drm, "PSR disabled by flag\n");
@@ -1711,6 +1715,24 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm,
"PSR disabled to workaround PSR FSM hang issue\n");
}
+
+ /* Rest is for Wa_16025596647 */
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ /* Not needed by Panel Replay */
+ if (crtc_state->has_panel_replay)
+ return;
+
+ /* We ignore possible secondary PSR/Panel Replay capable eDP */
+ for_each_intel_crtc(display->drm, crtc)
+ active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
+
+ active_pipes = intel_calc_active_pipes(state, active_pipes);
+
+ crtc_state->active_non_psr_pipes = active_pipes &
+ ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
}
void intel_psr_get_config(struct intel_encoder *encoder,
@@ -1827,7 +1849,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask = 0;
@@ -1866,7 +1887,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* As a workaround leave LPSP unmasked to prevent PSR entry
* when external displays are active.
*/
- if (DISPLAY_VER(display) >= 8 || IS_HASWELL_ULT(dev_priv))
+ if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult)
mask |= EDP_PSR_DEBUG_MASK_LPSP;
if (DISPLAY_VER(display) < 20)
@@ -1880,7 +1901,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
/* allow PSR with sprite enabled */
- if (IS_HASWELL(dev_priv))
+ if (display->platform.haswell)
mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
}
@@ -1903,9 +1924,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
- if (intel_dp_is_edp(intel_dp))
- intel_alpm_configure(intel_dp, crtc_state);
-
/*
* Wa_16013835468
* Wa_14015648006
@@ -1925,7 +1943,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
*/
if (!intel_dp->psr.panel_replay_enabled &&
(IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
- IS_ALDERLAKE_P(dev_priv)))
+ display->platform.alderlake_p))
intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
0, ADLP_1_BASED_X_GRANULARITY);
@@ -1936,10 +1954,18 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
0,
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
- else if (IS_ALDERLAKE_P(dev_priv))
+ else if (display->platform.alderlake_p)
intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
CLKGATE_DIS_MISC_DMASC_GATING_DIS);
}
+
+ /* Wa_16025596647 */
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ !intel_dp->psr.panel_replay_enabled)
+ intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true);
+
+ intel_alpm_configure(intel_dp, crtc_state);
}
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
@@ -1995,6 +2021,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
intel_dp->psr.req_psr2_sdp_prior_scanline =
crtc_state->req_psr2_sdp_prior_scanline;
+ intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
if (!psr_interrupt_error_check(intel_dp))
return;
@@ -2006,8 +2033,9 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.sel_update_enabled ? "2" : "1");
/*
- * Enabling here only for PSR. Panel Replay enable bit is already
- * written at this point. See
+ * Enabling sink PSR/Panel Replay here only for PSR. Panel Replay enable
+ * bit is already written at this point. Sink ALPM is enabled here for
+ * PSR and Panel Replay. See
* intel_psr_panel_replay_enable_sink. Modifiers/options:
* - Selective Update
* - Region Early Transport
@@ -2024,7 +2052,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_psr_enable_source(intel_dp, crtc_state);
intel_dp->psr.enabled = true;
- intel_dp->psr.paused = false;
+ intel_dp->psr.pause_counter = 0;
/*
* Link_ok is sticky and set here on PSR enable. We can assume link
@@ -2070,6 +2098,12 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
} else {
+ if (DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
+ intel_dp->psr.pipe,
+ false);
+
val = intel_de_rmw(display,
psr_ctl_reg(display, cpu_transcoder),
EDP_PSR_ENABLE, 0);
@@ -2104,7 +2138,6 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
lockdep_assert_held(&intel_dp->psr.lock);
@@ -2136,7 +2169,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_de_rmw(display,
MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
- else if (IS_ALDERLAKE_P(dev_priv))
+ else if (display->platform.alderlake_p)
intel_de_rmw(display, CLKGATE_DIS_MISC,
CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
}
@@ -2144,16 +2177,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
if (intel_dp_is_edp(intel_dp))
intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
- /* Panel Replay on eDP is always using ALPM aux less. */
- if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
- intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
- ALPM_CTL_ALPM_ENABLE |
- ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
-
- intel_de_rmw(display,
- PORT_ALPM_CTL(cpu_transcoder),
- PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
- }
+ if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp))
+ intel_alpm_disable(intel_dp);
/* Disable PSR on Sink */
if (!intel_dp->psr.panel_replay_enabled) {
@@ -2164,12 +2189,19 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
DP_RECEIVER_ALPM_CONFIG, 0);
}
+ /* Wa_16025596647 */
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ !intel_dp->psr.panel_replay_enabled)
+ intel_dmc_block_pkgc(display, intel_dp->psr.pipe, false);
+
intel_dp->psr.enabled = false;
intel_dp->psr.panel_replay_enabled = false;
intel_dp->psr.sel_update_enabled = false;
intel_dp->psr.psr2_sel_fetch_enabled = false;
intel_dp->psr.su_region_et_enabled = false;
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
+ intel_dp->psr.active_non_psr_pipes = 0;
}
/**
@@ -2210,7 +2242,6 @@ void intel_psr_disable(struct intel_dp *intel_dp,
*/
void intel_psr_pause(struct intel_dp *intel_dp)
{
- struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
@@ -2223,12 +2254,10 @@ void intel_psr_pause(struct intel_dp *intel_dp)
return;
}
- /* If we ever hit this, we will need to add refcount to pause/resume */
- drm_WARN_ON(display->drm, psr->paused);
-
- intel_psr_exit(intel_dp);
- intel_psr_wait_exit_locked(intel_dp);
- psr->paused = true;
+ if (intel_dp->psr.pause_counter++ == 0) {
+ intel_psr_exit(intel_dp);
+ intel_psr_wait_exit_locked(intel_dp);
+ }
mutex_unlock(&psr->lock);
@@ -2244,6 +2273,7 @@ void intel_psr_pause(struct intel_dp *intel_dp)
*/
void intel_psr_resume(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
@@ -2251,28 +2281,36 @@ void intel_psr_resume(struct intel_dp *intel_dp)
mutex_lock(&psr->lock);
- if (!psr->paused)
- goto unlock;
+ if (!psr->enabled)
+ goto out;
- psr->paused = false;
- intel_psr_activate(intel_dp);
+ if (!psr->pause_counter) {
+ drm_warn(display->drm, "Unbalanced PSR pause/resume!\n");
+ goto out;
+ }
-unlock:
+ if (--intel_dp->psr.pause_counter == 0)
+ intel_psr_activate(intel_dp);
+
+out:
mutex_unlock(&psr->lock);
}
/**
- * intel_psr_needs_block_dc_vblank - Check if block dc entry is needed
+ * intel_psr_needs_vblank_notification - Check if PSR need vblank enable/disable
+ * notification.
* @crtc_state: CRTC status
*
* We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
* prevent it in case of Panel Replay. Panel Replay switches main link off on
* DC entry. This means vblank interrupts are not fired and is a problem if
- * user-space is polling for vblank events.
+ * user-space is polling for vblank events. Also Wa_16025596647 needs
+ * information when vblank is enabled/disabled.
*/
-bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state)
+bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_encoder *encoder;
for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
@@ -2283,8 +2321,15 @@ bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state)
intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp_is_edp(intel_dp) &&
- CAN_PANEL_REPLAY(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
+ continue;
+
+ if (CAN_PANEL_REPLAY(intel_dp))
+ return true;
+
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ CAN_PSR(intel_dp))
return true;
}
@@ -2312,37 +2357,76 @@ void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
CURSURFLIVE(display, crtc->pipe), 0);
}
-static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
+/**
+ * intel_psr_min_vblank_delay - Minimum vblank delay needed by PSR
+ * @crtc_state: the crtc state
+ *
+ * Return minimum vblank delay needed by PSR.
+ */
+int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (!crtc_state->has_psr)
+ return 0;
+
+ /* Wa_14015401596 */
+ if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
+ return 1;
+
+ /* Rest is for SRD_STATUS needed on LunarLake and onwards */
+ if (DISPLAY_VER(display) < 20)
+ return 0;
+
+ /*
+ * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
+ *
+ * To deterministically capture the transition of the state machine
+ * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
+ * one line after the non-delayed V. Blank.
+ *
+ * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
+ * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
+ * - TRANS_VTOTAL[ Vertical Active ])
+ *
+ * SRD_STATUS is used only by PSR1 on PantherLake.
+ * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
+ */
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? 0 :
+ if (DISPLAY_VER(display) >= 30 && (crtc_state->has_panel_replay ||
+ crtc_state->has_sel_update))
+ return 0;
+ else if (DISPLAY_VER(display) < 30 && (crtc_state->has_sel_update ||
+ intel_crtc_has_type(crtc_state,
+ INTEL_OUTPUT_EDP)))
+ return 0;
+ else
+ return 1;
+}
+
+static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
+{
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 :
PSR2_MAN_TRK_CTL_ENABLE;
}
static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
}
static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
}
static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
+ return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
}
@@ -2405,8 +2489,6 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
bool full_update)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val = man_trk_ctl_enable_bit_get(display);
/* SF partial frame enable has to be set even on full update */
@@ -2420,7 +2502,7 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
if (crtc_state->psr2_su_area.y1 == -1)
goto exit;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) {
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) {
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
} else {
@@ -2474,13 +2556,12 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
u16 y_alignment;
/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
if (crtc_state->dsc.compression_enable &&
- (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14))
+ (display->platform.alderlake_p || DISPLAY_VER(display) >= 14))
y_alignment = vdsc_cfg->slice_height;
else
y_alignment = crtc_state->su_y_granularity;
@@ -2601,12 +2682,11 @@ static void
intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
/* Wa_14014971492 */
if (!crtc_state->has_panel_replay &&
((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
- IS_ALDERLAKE_P(i915) || IS_TIGERLAKE(i915))) &&
+ display->platform.alderlake_p || display->platform.tigerlake)) &&
crtc_state->splitter.enable)
crtc_state->psr2_su_area.y1 = 0;
@@ -2807,7 +2887,6 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
@@ -2839,7 +2918,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
new_crtc_state->has_sel_update != psr->sel_update_enabled ||
new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
- (DISPLAY_VER(i915) < 11 && new_crtc_state->wm_level_disabled))
+ (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled))
intel_psr_disable_locked(intel_dp);
else if (new_crtc_state->wm_level_disabled)
/* Wa_14015648006 */
@@ -3322,7 +3401,7 @@ void intel_psr_flush(struct intel_display *display,
* we have to ensure that the PSR is not activated until
* intel_psr_resume() is called.
*/
- if (intel_dp->psr.paused)
+ if (intel_dp->psr.pause_counter)
goto unlock;
if (origin == ORIGIN_FLIP ||
@@ -3419,29 +3498,14 @@ static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
static void psr_alpm_check(struct intel_dp *intel_dp)
{
- struct intel_display *display = to_intel_display(intel_dp);
- struct drm_dp_aux *aux = &intel_dp->aux;
struct intel_psr *psr = &intel_dp->psr;
- u8 val;
- int r;
if (!psr->sel_update_enabled)
return;
- r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
- if (r != 1) {
- drm_err(display->drm, "Error reading ALPM status\n");
- return;
- }
-
- if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
+ if (intel_alpm_get_error(intel_dp)) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
- drm_dbg_kms(display->drm,
- "ALPM lock timeout error, disabling PSR\n");
-
- /* Clearing error */
- drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
}
}
@@ -3626,6 +3690,168 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
}
}
+/* Wa_16025596647 */
+static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ bool dc5_dc6_blocked;
+
+ if (!intel_dp->psr.active)
+ return;
+
+ dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);
+
+ if (intel_dp->psr.sel_update_enabled)
+ psr2_program_idle_frames(intel_dp, dc5_dc6_blocked ? 0 :
+ psr_compute_idle_frames(intel_dp));
+ else
+ intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
+ intel_dp->psr.pipe,
+ dc5_dc6_blocked);
+}
+
+static void psr_dc5_dc6_wa_work(struct work_struct *work)
+{
+ struct intel_display *display = container_of(work, typeof(*display),
+ psr_dc5_dc6_wa_work);
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock(&intel_dp->psr.lock);
+
+ if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled)
+ intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
+
+ mutex_unlock(&intel_dp->psr.lock);
+ }
+}
+
+/**
+ * intel_psr_notify_dc5_dc6 - Notify PSR about enable/disable dc5/dc6
+ * @display: intel atomic state
+ *
+ * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to schedule
+ * psr_dc5_dc6_wa_work used for applying/removing the workaround.
+ */
+void intel_psr_notify_dc5_dc6(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ schedule_work(&display->psr_dc5_dc6_wa_work);
+}
+
+/**
+ * intel_psr_dc5_dc6_wa_init - Init work for underrun on idle PSR HW bug wa
+ * @display: intel atomic state
+ *
+ * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to init
+ * psr_dc5_dc6_wa_work used for applying the workaround.
+ */
+void intel_psr_dc5_dc6_wa_init(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work);
+}
+
+/**
+ * intel_psr_notify_pipe_change - Notify PSR about enable/disable of a pipe
+ * @state: intel atomic state
+ * @crtc: intel crtc
+ * @enable: enable/disable
+ *
+ * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
+ * remove the workaround when pipe is getting enabled/disabled
+ */
+void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
+ struct intel_crtc *crtc, bool enable)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_encoder *encoder;
+
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ u8 active_non_psr_pipes;
+
+ mutex_lock(&intel_dp->psr.lock);
+
+ if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
+ goto unlock;
+
+ active_non_psr_pipes = intel_dp->psr.active_non_psr_pipes;
+
+ if (enable)
+ active_non_psr_pipes |= BIT(crtc->pipe);
+ else
+ active_non_psr_pipes &= ~BIT(crtc->pipe);
+
+ if (active_non_psr_pipes == intel_dp->psr.active_non_psr_pipes)
+ goto unlock;
+
+ if ((enable && intel_dp->psr.active_non_psr_pipes) ||
+ (!enable && !intel_dp->psr.active_non_psr_pipes)) {
+ intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
+ goto unlock;
+ }
+
+ intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
+
+ intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
+unlock:
+ mutex_unlock(&intel_dp->psr.lock);
+ }
+}
+
+/**
+ * intel_psr_notify_vblank_enable_disable - Notify PSR about enable/disable of vblank
+ * @display: intel display struct
+ * @enable: enable/disable
+ *
+ * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
+ * remove the workaround when vblank is getting enabled/disabled
+ */
+void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
+ bool enable)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock(&intel_dp->psr.lock);
+ if (intel_dp->psr.panel_replay_enabled) {
+ mutex_unlock(&intel_dp->psr.lock);
+ break;
+ }
+
+ if (intel_dp->psr.enabled)
+ intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
+
+ mutex_unlock(&intel_dp->psr.lock);
+ return;
+ }
+
+ /*
+ * NOTE: intel_display_power_set_target_dc_state is used
+ * only by PSR * code for DC3CO handling. DC3CO target
+ * state is currently disabled in * PSR code. If DC3CO
+ * is taken into use we need take that into account here
+ * as well.
+ */
+ intel_display_power_set_target_dc_state(display, enable ? DC_STATE_DISABLE :
+ DC_STATE_EN_UPTO_DC6);
+}
+
static void
psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
{
@@ -3634,8 +3860,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
const char *status = "unknown";
u32 val, status_val;
- if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
- intel_dp->psr.panel_replay_enabled)) {
+ if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) &&
+ (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) {
static const char * const live_status[] = {
"IDLE",
"CAPTURE",
@@ -3728,10 +3954,9 @@ static void intel_psr_print_mode(struct intel_dp *intel_dp,
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct intel_psr *psr = &intel_dp->psr;
- intel_wakeref_t wakeref;
+ struct ref_tracker *wakeref;
bool enabled;
u32 val, psr2_ctl;
@@ -3740,7 +3965,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
if (!(psr->sink_support || psr->sink_panel_replay_support))
return 0;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_display_rpm_get(display);
mutex_lock(&psr->lock);
intel_psr_print_mode(intel_dp, m);
@@ -3822,7 +4047,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
unlock:
mutex_unlock(&psr->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_display_rpm_put(display, wakeref);
return 0;
}
@@ -3853,9 +4078,7 @@ static int
i915_edp_psr_debug_set(void *data, u64 val)
{
struct intel_display *display = data;
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
- intel_wakeref_t wakeref;
int ret = -ENODEV;
if (!HAS_PSR(display))
@@ -3866,12 +4089,9 @@ i915_edp_psr_debug_set(void *data, u64 val)
drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
// TODO: split to each transcoder's PSR debug state
- ret = intel_psr_debug_set(intel_dp, val);
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ with_intel_display_rpm(display)
+ ret = intel_psr_debug_set(intel_dp, val);
}
return ret;
@@ -4004,3 +4224,13 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
debugfs_create_file("i915_psr_status", 0444, root,
connector, &i915_psr_status_fops);
}
+
+bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * eDP Panel Replay uses always ALPM
+ * PSR2 uses ALPM but PSR1 doesn't
+ */
+ return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update ||
+ crtc_state->has_panel_replay);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index a43a374cff55..73c3fa40844b 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -59,7 +59,13 @@ void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
-bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state);
+bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state);
+void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
+ struct intel_crtc *crtc, bool enable);
+void intel_psr_notify_dc5_dc6(struct intel_display *display);
+void intel_psr_dc5_dc6_wa_init(struct intel_display *display);
+void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
+ bool enable);
bool intel_psr_link_ok(struct intel_dp *intel_dp);
void intel_psr_lock(const struct intel_crtc_state *crtc_state);
@@ -67,7 +73,9 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
struct intel_atomic_state *state,
struct intel_crtc *crtc);
+int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state);
void intel_psr_connector_debugfs_add(struct intel_connector *connector);
void intel_psr_debugfs_register(struct intel_display *display);
+bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index 795e6b9cc575..248136456048 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -325,8 +325,8 @@
#define PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(20, 16)
#define PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(12, 8)
-#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
+#define PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION_MASK, val)
#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK REG_GENMASK(4, 0)
-#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION_MASK, val)
+#define PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(val) REG_FIELD_PREP(PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION_MASK, val)
#endif /* __INTEL_PSR_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 6e2d9929b4d7..8a38df2c0283 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -36,9 +36,9 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -214,18 +214,17 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
{
struct intel_display *display = to_intel_display(&intel_sdvo->base);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 bval = val, cval = val;
int i;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
intel_de_write(display, intel_sdvo->sdvo_reg, val);
intel_de_posting_read(display, intel_sdvo->sdvo_reg);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- if (HAS_PCH_IBX(dev_priv)) {
+ if (HAS_PCH_IBX(display)) {
intel_de_write(display, intel_sdvo->sdvo_reg, val);
intel_de_posting_read(display, intel_sdvo->sdvo_reg);
}
@@ -1360,14 +1359,13 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct drm_display_mode *mode = &pipe_config->hw.mode;
- if (HAS_PCH_SPLIT(i915)) {
+ if (HAS_PCH_SPLIT(display)) {
pipe_config->has_pch_encoder = true;
if (!intel_fdi_compute_pipe_bpp(pipe_config))
return -EINVAL;
@@ -1527,7 +1525,6 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_encoder);
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
const struct intel_sdvo_connector_state *sdvo_state =
@@ -1634,7 +1631,7 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
@@ -1670,13 +1667,12 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
bool intel_sdvo_port_enabled(struct intel_display *display,
i915_reg_t sdvo_reg, enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
val = intel_de_read(display, sdvo_reg);
/* asserts want to know the pipe even if the port is disabled */
- if (HAS_PCH_CPT(dev_priv))
+ if (HAS_PCH_CPT(display))
*pipe = (val & SDVO_PIPE_SEL_MASK_CPT) >> SDVO_PIPE_SEL_SHIFT_CPT;
else if (display->platform.cherryview)
*pipe = (val & SDVO_PIPE_SEL_MASK_CHV) >> SDVO_PIPE_SEL_SHIFT_CHV;
@@ -1841,7 +1837,6 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
@@ -1861,7 +1856,7 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
- if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
+ if (HAS_PCH_IBX(display) && crtc->pipe == PIPE_B) {
/*
* We get CPU/PCH FIFO underruns on the other pipe when
* doing the workaround. Sweep them under the rug.
@@ -2036,7 +2031,7 @@ static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
struct intel_display *display = to_intel_display(&intel_sdvo->base);
u16 hotplug;
- if (!I915_HAS_HOTPLUG(display))
+ if (!HAS_HOTPLUG(display))
return 0;
/*
@@ -3367,9 +3362,7 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo_ddc *ddc,
static bool is_sdvo_port_valid(struct intel_display *display, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (HAS_PCH_SPLIT(dev_priv))
+ if (HAS_PCH_SPLIT(display))
return port == PORT_B;
else
return port == PORT_B || port == PORT_C;
@@ -3384,7 +3377,6 @@ static bool assert_sdvo_port_valid(struct intel_display *display, enum port port
bool intel_sdvo_init(struct intel_display *display,
i915_reg_t sdvo_reg, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
int i;
@@ -3427,7 +3419,7 @@ bool intel_sdvo_init(struct intel_display *display,
}
intel_encoder->compute_config = intel_sdvo_compute_config;
- if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(display)) {
intel_encoder->disable = pch_disable_sdvo;
intel_encoder->post_disable = pch_post_disable_sdvo;
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
index c6321dafef4f..74bb3bedf30f 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
@@ -41,12 +41,12 @@ static s64 interp(s64 x, s64 x1, s64 x2, s64 y1, s64 y2)
{
s64 dydx;
- dydx = DIV_ROUND_UP_ULL((y2 - y1) * 100000, (x2 - x1));
+ dydx = DIV64_U64_ROUND_UP((y2 - y1) * 100000, (x2 - x1));
- return (y1 + DIV_ROUND_UP_ULL(dydx * (x - x1), 100000));
+ return (y1 + DIV64_U64_ROUND_UP(dydx * (x - x1), 100000));
}
-static void get_ana_cp_int_prop(u32 vco_clk,
+static void get_ana_cp_int_prop(u64 vco_clk,
u32 refclk_postscalar,
int mpll_ana_v2i,
int c, int a,
@@ -115,16 +115,16 @@ static void get_ana_cp_int_prop(u32 vco_clk,
CURVE0_MULTIPLIER));
scaled_interpolated_sqrt =
- int_sqrt(DIV_ROUND_UP_ULL(interpolated_product, vco_div_refclk_float) *
+ int_sqrt(DIV64_U64_ROUND_UP(interpolated_product, vco_div_refclk_float) *
DIV_ROUND_DOWN_ULL(1000000000000ULL, 55));
/* Scale vco_div_refclk for ana_cp_int */
scaled_vco_div_refclk2 = DIV_ROUND_UP_ULL(vco_div_refclk_float, 1000000);
- adjusted_vco_clk2 = 1460281 * DIV_ROUND_UP_ULL(scaled_interpolated_sqrt *
+ adjusted_vco_clk2 = 1460281 * DIV64_U64_ROUND_UP(scaled_interpolated_sqrt *
scaled_vco_div_refclk2,
curve_1_interpolated);
- *ana_cp_prop = DIV_ROUND_UP_ULL(adjusted_vco_clk2, curve_2_scaled2);
+ *ana_cp_prop = DIV64_U64_ROUND_UP(adjusted_vco_clk2, curve_2_scaled2);
*ana_cp_prop = max(1, min(*ana_cp_prop, 127));
}
@@ -165,10 +165,10 @@ static void compute_hdmi_tmds_pll(u64 pixel_clock, u32 refclk,
/* Select appropriate v2i point */
if (datarate <= INTEL_SNPS_PHY_HDMI_9999MHZ) {
mpll_ana_v2i = 2;
- tx_clk_div = ilog2(DIV_ROUND_DOWN_ULL(INTEL_SNPS_PHY_HDMI_9999MHZ, datarate));
+ tx_clk_div = ilog2(div64_u64(INTEL_SNPS_PHY_HDMI_9999MHZ, datarate));
} else {
mpll_ana_v2i = 3;
- tx_clk_div = ilog2(DIV_ROUND_DOWN_ULL(INTEL_SNPS_PHY_HDMI_16GHZ, datarate));
+ tx_clk_div = ilog2(div64_u64(INTEL_SNPS_PHY_HDMI_16GHZ, datarate));
}
vco_clk = (datarate << tx_clk_div) >> 1;
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index b9acd9fe160c..2b53ac9f4935 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -5,6 +5,8 @@
#include <linux/math.h>
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_ddi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 1ad6c8a94b3d..fd92e6b89b43 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -36,9 +36,10 @@
#include <drm/drm_blend.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/drm_rect.h>
-#include "i915_drv.h"
+#include "i915_utils.h"
#include "i9xx_plane.h"
#include "intel_atomic_plane.h"
#include "intel_de.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
index 1d0b84b464c1..4981cc34da05 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
@@ -3,21 +3,21 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
#include "intel_crtc.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_sprite_uapi.h"
-static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
+static bool has_dst_key_in_primary_plane(struct intel_display *display)
{
- return DISPLAY_VER(dev_priv) >= 9;
+ return DISPLAY_VER(display) >= 9;
}
static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
const struct drm_intel_sprite_colorkey *set)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
*key = *set;
@@ -34,7 +34,7 @@ static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
* On SKL+ we want dst key enabled on
* the primary and not on the sprite.
*/
- if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
+ if (DISPLAY_VER(display) >= 9 && plane->id != PLANE_PRIMARY &&
set->flags & I915_SET_COLORKEY_DESTINATION)
key->flags = 0;
}
@@ -43,7 +43,6 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct intel_display *display = to_intel_display(dev);
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_intel_sprite_colorkey *set = data;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
@@ -61,7 +60,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
@@ -74,7 +73,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
* Also multiple planes can't do destination keying on the same
* pipe simultaneously.
*/
- if (DISPLAY_VER(dev_priv) >= 9 &&
+ if (DISPLAY_VER(display) >= 9 &&
to_intel_plane(plane)->id >= PLANE_3 &&
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
@@ -99,7 +98,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
* On some platforms we have to configure
* the dst colorkey on the primary plane.
*/
- if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
+ if (!ret && has_dst_key_in_primary_plane(display)) {
struct intel_crtc *crtc =
intel_crtc_for_pipe(display,
to_intel_plane(plane)->pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index b8d14ed8a56e..c1014e74791f 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -3,8 +3,10 @@
* Copyright © 2019 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -92,11 +94,6 @@ static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
return dig_port->tc;
}
-static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
-{
- return to_i915(tc->dig_port->base.base.dev);
-}
-
static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
enum tc_port_mode mode)
{
@@ -219,10 +216,11 @@ __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain doma
static void
tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
{
+ struct intel_display __maybe_unused *display = to_intel_display(tc->dig_port);
enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
- drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain);
+ drm_WARN_ON(display->drm, tc->lock_power_domain != domain);
#endif
__tc_cold_unblock(tc, domain, wakeref);
}
@@ -266,13 +264,13 @@ assert_tc_port_power_enabled(struct intel_tc_port *tc)
static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
u32 lane_mask;
- lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
+ lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
- drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
+ drm_WARN_ON(display->drm, lane_mask == 0xffffffff);
assert_tc_cold_blocked(tc);
lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
@@ -281,13 +279,13 @@ static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
u32 pin_mask;
- pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia));
+ pin_mask = intel_de_read(display, PORT_TX_DFLEXPA1(tc->phy_fia));
- drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
+ drm_WARN_ON(display->drm, pin_mask == 0xffffffff);
assert_tc_cold_blocked(tc);
return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
@@ -297,13 +295,12 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
{
struct intel_display *display = to_intel_display(dig_port);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
intel_wakeref_t wakeref;
u32 val, pin_assignment;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
+ val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
pin_assignment =
REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
@@ -369,7 +366,7 @@ static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT)
@@ -377,10 +374,10 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
assert_tc_cold_blocked(tc);
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 20)
return lnl_tc_port_get_max_lane_count(dig_port);
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return mtl_tc_port_get_max_lane_count(dig_port);
return intel_tc_port_get_max_lane_count(dig_port);
@@ -389,20 +386,20 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
bool lane_reversal = dig_port->lane_reversal;
u32 val;
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
lane_reversal && tc->mode != TC_PORT_LEGACY);
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
switch (required_lanes) {
@@ -423,16 +420,16 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
MISSING_CASE(required_lanes);
}
- intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
+ intel_de_write(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
}
static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
u32 live_status_mask)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 valid_hpd_mask;
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
if (hweight32(live_status_mask) != 1)
return;
@@ -447,7 +444,7 @@ static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
return;
/* If live status mismatches the VBT flag, trust the live status. */
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
tc->port_name, live_status_mask, valid_hpd_mask);
@@ -490,21 +487,20 @@ icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
- u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
+ u32 isr_bit = display->hotplug.pch_hpd[dig_port->base.hpd_pin];
intel_wakeref_t wakeref;
u32 fia_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref) {
- fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
- pch_isr = intel_de_read(i915, SDEISR);
+ fia_isr = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
+ pch_isr = intel_de_read(display, SDEISR);
}
if (fia_isr == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, nothing connected\n",
tc->port_name);
return mask;
@@ -531,14 +527,14 @@ static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
*/
static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPPMS(tc->phy_fia));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, assuming not ready\n",
tc->port_name);
return false;
@@ -550,14 +546,14 @@ static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
bool take)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, can't %s ownership\n",
tc->port_name, take ? "take" : "release");
@@ -568,21 +564,21 @@ static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
if (take)
val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
- intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
+ intel_de_write(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
return true;
}
static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
+ val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, assume not owned\n",
tc->port_name);
return false;
@@ -619,30 +615,30 @@ static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
int max_lanes;
max_lanes = intel_tc_port_max_lane_count(dig_port);
if (tc->mode == TC_PORT_LEGACY) {
- drm_WARN_ON(&i915->drm, max_lanes != 4);
+ drm_WARN_ON(display->drm, max_lanes != 4);
return true;
}
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_DP_ALT);
/*
* Now we have to re-check the live state, in case the port recently
* became disconnected. Not necessary for legacy mode.
*/
if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
+ drm_dbg_kms(display->drm, "Port %s: PHY sudden disconnect\n",
tc->port_name);
return false;
}
if (max_lanes < required_lanes) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY max lanes %d < required lanes %d\n",
tc->port_name,
max_lanes, required_lanes);
@@ -655,7 +651,7 @@ static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
static bool icl_tc_phy_connect(struct intel_tc_port *tc,
int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
tc->lock_wakeref = tc_cold_block(tc);
@@ -664,8 +660,8 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc,
if ((!tc_phy_is_ready(tc) ||
!icl_tc_phy_take_ownership(tc, true)) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n",
+ !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership (ready %s)\n",
tc->port_name,
str_yes_no(tc_phy_is_ready(tc)));
goto out_unblock_tc_cold;
@@ -733,14 +729,13 @@ tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static void tgl_tc_phy_init(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
intel_wakeref_t wakeref;
u32 val;
with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref)
- val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
+ val = intel_de_read(display, PORT_TX_DFLEXDPSP(FIA1));
- drm_WARN_ON(&i915->drm, val == 0xffffffff);
+ drm_WARN_ON(display->drm, val == 0xffffffff);
tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
}
@@ -775,19 +770,18 @@ adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
- u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin];
- u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
+ u32 cpu_isr_bits = display->hotplug.hpd[hpd_pin];
+ u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
intel_wakeref_t wakeref;
u32 cpu_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
- cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR);
- pch_isr = intel_de_read(i915, SDEISR);
+ cpu_isr = intel_de_read(display, GEN11_DE_HPD_ISR);
+ pch_isr = intel_de_read(display, SDEISR);
}
if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
@@ -810,15 +804,15 @@ static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
*/
static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
u32 val;
assert_display_core_power_enabled(tc);
- val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
+ val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
if (val == 0xffffffff) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY in TCCOLD, assuming not ready\n",
tc->port_name);
return false;
@@ -830,12 +824,12 @@ static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
bool take)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
assert_tc_port_power_enabled(tc);
- intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
+ intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
return true;
@@ -843,13 +837,13 @@ static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
u32 val;
assert_tc_port_power_enabled(tc);
- val = intel_de_read(i915, DDI_BUF_CTL(port));
+ val = intel_de_read(display, DDI_BUF_CTL(port));
return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
@@ -872,7 +866,6 @@ static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
enum intel_display_power_domain port_power_domain =
tc_port_power_domain(tc);
intel_wakeref_t port_wakeref;
@@ -885,15 +878,15 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
port_wakeref = intel_display_power_get(display, port_power_domain);
if (!adlp_tc_phy_take_ownership(tc, true) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n",
+ !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership\n",
tc->port_name);
goto out_put_port_power;
}
if (!tc_phy_is_ready(tc) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
+ !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
+ drm_dbg_kms(display->drm, "Port %s: PHY not ready\n",
tc->port_name);
goto out_release_phy;
}
@@ -965,19 +958,18 @@ static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct drm_i915_private *i915 = tc_to_i915(tc);
struct intel_digital_port *dig_port = tc->dig_port;
enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
- u32 pica_isr_bits = i915->display.hotplug.hpd[hpd_pin];
- u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
+ u32 pica_isr_bits = display->hotplug.hpd[hpd_pin];
+ u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
intel_wakeref_t wakeref;
u32 pica_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
- pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR);
- pch_isr = intel_de_read(i915, SDEISR);
+ pica_isr = intel_de_read(display, PICAINTERRUPT_ISR);
+ pch_isr = intel_de_read(display, SDEISR);
}
if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
@@ -994,22 +986,22 @@ static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
static bool
xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
assert_tc_cold_blocked(tc);
- return intel_de_read(i915, reg) & XELPDP_TCSS_POWER_STATE;
+ return intel_de_read(display, reg) & XELPDP_TCSS_POWER_STATE;
}
static bool
xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: timeout waiting for TCSS power to get %s\n",
str_enabled_disabled(enabled),
tc->port_name);
@@ -1069,7 +1061,7 @@ static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool ena
static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
__xelpdp_tc_phy_enable_tcss_power(tc, enable);
@@ -1082,7 +1074,7 @@ static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enabl
return true;
out_disable:
- if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY))
+ if (drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY))
return false;
if (!enable)
@@ -1096,35 +1088,35 @@ out_disable:
static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, reg);
+ val = intel_de_read(display, reg);
if (take)
val |= XELPDP_TC_PHY_OWNERSHIP;
else
val &= ~XELPDP_TC_PHY_OWNERSHIP;
- intel_de_write(i915, reg, val);
+ intel_de_write(display, reg, val);
}
static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
assert_tc_cold_blocked(tc);
- return intel_de_read(i915, reg) & XELPDP_TC_PHY_OWNERSHIP;
+ return intel_de_read(display, reg) & XELPDP_TC_PHY_OWNERSHIP;
}
static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
intel_wakeref_t tc_cold_wref;
enum intel_display_power_domain domain;
@@ -1134,7 +1126,7 @@ static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
if (tc->mode != TC_PORT_DISCONNECTED)
tc->lock_wakeref = tc_cold_block(tc);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
(tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
!xelpdp_tc_phy_tcss_power_is_enabled(tc));
@@ -1207,13 +1199,13 @@ tc_phy_cold_off_domain(struct intel_tc_port *tc)
static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 mask;
mask = tc->phy_ops->hpd_live_status(tc);
/* The sink can be connected only in a single mode. */
- drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1);
+ drm_WARN_ON_ONCE(display->drm, hweight32(mask) > 1);
return mask;
}
@@ -1236,9 +1228,9 @@ static void tc_phy_get_hw_state(struct intel_tc_port *tc)
static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
bool phy_is_ready, bool phy_is_owned)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
- drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
+ drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready);
return phy_is_ready && phy_is_owned;
}
@@ -1246,8 +1238,7 @@ static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
static bool tc_phy_is_connected(struct intel_tc_port *tc,
enum icl_port_dpll_id port_pll_type)
{
- struct intel_encoder *encoder = &tc->dig_port->base;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(tc->dig_port);
bool phy_is_ready = tc_phy_is_ready(tc);
bool phy_is_owned = tc_phy_is_owned(tc);
bool is_connected;
@@ -1257,7 +1248,7 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
else
is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
tc->port_name,
str_yes_no(is_connected),
@@ -1270,10 +1261,10 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
if (wait_for(tc_phy_is_ready(tc), 500)) {
- drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
+ drm_err(display->drm, "Port %s: timeout waiting for PHY ready\n",
tc->port_name);
return false;
@@ -1343,7 +1334,7 @@ get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
static enum tc_port_mode
tc_phy_get_current_mode(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
bool phy_is_ready;
bool phy_is_owned;
@@ -1363,11 +1354,11 @@ tc_phy_get_current_mode(struct intel_tc_port *tc)
if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
} else {
- drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
+ drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT);
mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
tc->port_name,
tc_port_mode_name(mode),
@@ -1407,7 +1398,7 @@ tc_phy_get_target_mode(struct intel_tc_port *tc)
static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
u32 live_status_mask = tc_phy_hpd_live_status(tc);
bool connected;
@@ -1421,7 +1412,7 @@ static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
connected = tc->phy_ops->connect(tc, required_lanes);
}
- drm_WARN_ON(&i915->drm, !connected);
+ drm_WARN_ON(display->drm, !connected);
}
static void tc_phy_disconnect(struct intel_tc_port *tc)
@@ -1491,12 +1482,12 @@ static void __intel_tc_port_put_link(struct intel_tc_port *tc)
static bool tc_port_is_enabled(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
assert_tc_port_power_enabled(tc);
- return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
+ return intel_de_read(display, DDI_BUF_CTL(dig_port->base.port)) &
DDI_BUF_CTL_ENABLE;
}
@@ -1509,15 +1500,15 @@ static bool tc_port_is_enabled(struct intel_tc_port *tc)
*/
void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
bool update_mode = false;
mutex_lock(&tc->lock);
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
- drm_WARN_ON(&i915->drm, tc->lock_wakeref);
- drm_WARN_ON(&i915->drm, tc->link_refcount);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
+ drm_WARN_ON(display->drm, tc->lock_wakeref);
+ drm_WARN_ON(display->drm, tc->link_refcount);
tc_phy_get_hw_state(tc);
/*
@@ -1540,8 +1531,8 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
if (!tc_port_is_enabled(tc)) {
update_mode = true;
} else if (tc->mode == TC_PORT_DISCONNECTED) {
- drm_WARN_ON(&i915->drm, !tc->legacy_port);
- drm_err(&i915->drm,
+ drm_WARN_ON(display->drm, !tc->legacy_port);
+ drm_err(display->drm,
"Port %s: PHY disconnected on enabled port, connecting it\n",
tc->port_name);
update_mode = true;
@@ -1556,28 +1547,28 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
mutex_unlock(&tc->lock);
}
-static bool tc_port_has_active_links(struct intel_tc_port *tc,
- const struct intel_crtc_state *crtc_state)
+static bool tc_port_has_active_streams(struct intel_tc_port *tc,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
- int active_links = 0;
+ int active_streams = 0;
if (dig_port->dp.is_mst) {
/* TODO: get the PLL type for MST, once HW readout is done for it. */
- active_links = intel_dp_mst_encoder_active_links(dig_port);
+ active_streams = intel_dp_mst_active_streams(&dig_port->dp);
} else if (crtc_state && crtc_state->hw.active) {
pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
- active_links = 1;
+ active_streams = 1;
}
- if (active_links && !tc_phy_is_connected(tc, pll_type))
- drm_err(&i915->drm,
- "Port %s: PHY disconnected with %d active link(s)\n",
- tc->port_name, active_links);
+ if (active_streams && !tc_phy_is_connected(tc, pll_type))
+ drm_err(display->drm,
+ "Port %s: PHY disconnected with %d active stream(s)\n",
+ tc->port_name, active_streams);
- return active_links;
+ return active_streams;
}
/**
@@ -1595,13 +1586,13 @@ static bool tc_port_has_active_links(struct intel_tc_port *tc,
void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
mutex_lock(&tc->lock);
- drm_WARN_ON(&i915->drm, tc->link_refcount != 1);
- if (!tc_port_has_active_links(tc, crtc_state)) {
+ drm_WARN_ON(display->drm, tc->link_refcount != 1);
+ if (!tc_port_has_active_streams(tc, crtc_state)) {
/*
* TBT-alt is the default mode in any case the PHY ownership is not
* held (regardless of the sink's connected live state), so
@@ -1610,7 +1601,7 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
*/
if (tc->init_mode != TC_PORT_TBT_ALT &&
tc->init_mode != TC_PORT_DISCONNECTED)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
tc->port_name,
tc_port_mode_name(tc->init_mode));
@@ -1618,7 +1609,7 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
__intel_tc_port_put_link(tc);
}
- drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
+ drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s)\n",
tc->port_name,
tc_port_mode_name(tc->mode));
@@ -1637,12 +1628,12 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
*/
bool intel_tc_port_connected(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_tc_port *tc = to_tc_port(dig_port);
u32 mask = ~0;
- drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
+ drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port));
if (tc->mode != TC_PORT_DISCONNECTED)
mask = BIT(tc->mode);
@@ -1677,14 +1668,14 @@ static int reset_link_commit(struct intel_tc_port *tc,
struct intel_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
struct intel_crtc *crtc;
u8 pipe_mask;
int ret;
- ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex, ctx);
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, ctx);
if (ret)
return ret;
@@ -1695,7 +1686,7 @@ static int reset_link_commit(struct intel_tc_port *tc,
if (!pipe_mask)
return 0;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
struct intel_crtc_state *crtc_state;
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
@@ -1713,13 +1704,13 @@ static int reset_link_commit(struct intel_tc_port *tc,
static int reset_link(struct intel_tc_port *tc)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *_state;
struct intel_atomic_state *state;
int ret;
- _state = drm_atomic_state_alloc(&i915->drm);
+ _state = drm_atomic_state_alloc(display->drm);
if (!_state)
return -ENOMEM;
@@ -1738,21 +1729,21 @@ static void intel_tc_port_link_reset_work(struct work_struct *work)
{
struct intel_tc_port *tc =
container_of(work, struct intel_tc_port, link_reset_work.work);
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
int ret;
if (!__intel_tc_port_link_needs_reset(tc))
return;
- mutex_lock(&i915->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Port %s: TypeC DP-alt sink disconnected, resetting link\n",
tc->port_name);
ret = reset_link(tc);
- drm_WARN_ON(&i915->drm, ret);
+ drm_WARN_ON(display->drm, ret);
- mutex_unlock(&i915->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
}
bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
@@ -1780,7 +1771,7 @@ void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
static void __intel_tc_port_lock(struct intel_tc_port *tc,
int required_lanes)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
mutex_lock(&tc->lock);
@@ -1790,9 +1781,8 @@ static void __intel_tc_port_lock(struct intel_tc_port *tc,
intel_tc_port_update_mode(tc, required_lanes,
false);
- drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED);
- drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT &&
- !tc_phy_is_owned(tc));
+ drm_WARN_ON(display->drm, tc->mode == TC_PORT_DISCONNECTED);
+ drm_WARN_ON(display->drm, tc->mode != TC_PORT_TBT_ALT && !tc_phy_is_owned(tc));
}
void intel_tc_port_lock(struct intel_digital_port *dig_port)
@@ -1885,12 +1875,12 @@ void intel_tc_port_put_link(struct intel_digital_port *dig_port)
int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc;
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
- if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
+ if (drm_WARN_ON(display->drm, tc_port == TC_PORT_NONE))
return -EINVAL;
tc = kzalloc(sizeof(*tc), GFP_KERNEL);
@@ -1900,11 +1890,11 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
dig_port->tc = tc;
tc->dig_port = dig_port;
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
tc->phy_ops = &xelpdp_tc_phy_ops;
- else if (DISPLAY_VER(i915) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
tc->phy_ops = &adlp_tc_phy_ops;
- else if (DISPLAY_VER(i915) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
tc->phy_ops = &tgl_tc_phy_ops;
else
tc->phy_ops = &icl_tc_phy_ops;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 5dbe857ea85b..acf0b3733908 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -33,15 +33,15 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
-#include "intel_display_irq.h"
#include "intel_display_driver.h"
+#include "intel_display_irq.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
@@ -1585,19 +1585,17 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
{
struct intel_display *display = to_intel_display(connector->dev);
struct intel_crtc *crtc = to_intel_crtc(connector->state->crtc);
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
int type;
/* Disable TV interrupts around load detect or we'll recurse */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irq(&dev_priv->irq_lock);
- i915_disable_pipestat(dev_priv, 0,
+ spin_lock_irq(&display->irq.lock);
+ i915_disable_pipestat(display, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
save_tv_dac = tv_dac = intel_de_read(display, TV_DAC);
@@ -1668,11 +1666,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
- spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, 0,
+ spin_lock_irq(&display->irq.lock);
+ i915_enable_pipestat(display, 0,
PIPE_HOTPLUG_INTERRUPT_STATUS |
PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&display->irq.lock);
}
return type;
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 7b240ce681a0..139fa5deba80 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -224,12 +224,13 @@ int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
*/
if (DISPLAY_VER(display) >= 20 || display->platform.battlemage)
return 1;
- else if (DISPLAY_VER(display) == 2)
- return -1;
- else if (HAS_DDI(display) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return 2;
- else
+ else if (DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell)
+ return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ? 2 : 1;
+ else if (DISPLAY_VER(display) >= 3)
return 1;
+ else
+ return -1;
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 3ed64c17bdff..8e799e225af1 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -9,6 +9,7 @@
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include "i915_utils.h"
#include "intel_crtc.h"
@@ -259,6 +260,15 @@ static int intel_dsc_slice_dimensions_valid(struct intel_crtc_state *pipe_config
return 0;
}
+static bool is_dsi_dsc_1_1(struct intel_crtc_state *crtc_state)
+{
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+
+ return vdsc_cfg->dsc_version_major == 1 &&
+ vdsc_cfg->dsc_version_minor == 1 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI);
+}
+
int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(pipe_config);
@@ -317,8 +327,19 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
* From XE_LPD onwards we supports compression bpps in steps of 1
* upto uncompressed bpp-1, hence add calculations for all the rc
* parameters
+ *
+ * We don't want to calculate all rc parameters when the panel
+ * is MIPI DSI and it's using DSC 1.1. The reason being that some
+ * DSI panels vendors have hardcoded PPS params in the VBT causing
+ * the parameters sent from the source which are derived through
+ * interpolation to differ from the params the panel expects.
+ * This causes a noise in the display.
+ * Furthermore for DSI panels we are currently using bits_per_pixel
+ * (compressed bpp) hardcoded from VBT, (unlike other encoders where we
+ * find the optimum compressed bpp) so dont need to rely on interpolation,
+ * as we can get the required rc parameters from the tables.
*/
- if (DISPLAY_VER(display) >= 13) {
+ if (DISPLAY_VER(display) >= 13 && !is_dsi_dsc_1_1(pipe_config)) {
calculate_rc_params(vdsc_cfg);
} else {
if ((compressed_bpp == 8 ||
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index 684b5d1bc87c..05d140c8032d 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -4,15 +4,20 @@
*/
#include <linux/delay.h>
+#include <linux/pci.h>
#include <linux/vgaarb.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include <video/vga.h>
+
#include "soc/intel_gmch.h"
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
+#include "intel_display.h"
#include "intel_vga.h"
+#include "intel_vga_regs.h"
static i915_reg_t intel_vga_cntrl_reg(struct intel_display *display)
{
@@ -24,16 +29,42 @@ static i915_reg_t intel_vga_cntrl_reg(struct intel_display *display)
return VGACNTRL;
}
+static bool has_vga_pipe_sel(struct intel_display *display)
+{
+ if (display->platform.i845g ||
+ display->platform.i865g)
+ return false;
+
+ if (display->platform.valleyview ||
+ display->platform.cherryview)
+ return true;
+
+ return DISPLAY_VER(display) < 7;
+}
+
/* Disable the VGA plane that we never use */
void intel_vga_disable(struct intel_display *display)
{
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
i915_reg_t vga_reg = intel_vga_cntrl_reg(display);
+ enum pipe pipe;
+ u32 tmp;
u8 sr1;
- if (intel_de_read(display, vga_reg) & VGA_DISP_DISABLE)
+ tmp = intel_de_read(display, vga_reg);
+ if (tmp & VGA_DISP_DISABLE)
return;
+ if (display->platform.cherryview)
+ pipe = REG_FIELD_GET(VGA_PIPE_SEL_MASK_CHV, tmp);
+ else if (has_vga_pipe_sel(display))
+ pipe = REG_FIELD_GET(VGA_PIPE_SEL_MASK, tmp);
+ else
+ pipe = PIPE_A;
+
+ drm_dbg_kms(display->drm, "Disabling VGA plane on pipe %c\n",
+ pipe_name(pipe));
+
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
outb(0x01, VGA_SEQ_I);
@@ -46,39 +77,6 @@ void intel_vga_disable(struct intel_display *display)
intel_de_posting_read(display, vga_reg);
}
-void intel_vga_redisable_power_on(struct intel_display *display)
-{
- i915_reg_t vga_reg = intel_vga_cntrl_reg(display);
-
- if (!(intel_de_read(display, vga_reg) & VGA_DISP_DISABLE)) {
- drm_dbg_kms(display->drm,
- "Something enabled VGA plane, disabling it\n");
- intel_vga_disable(display);
- }
-}
-
-void intel_vga_redisable(struct intel_display *display)
-{
- intel_wakeref_t wakeref;
-
- /*
- * This function can be called both from intel_modeset_setup_hw_state or
- * at a very early point in our resume sequence, where the power well
- * structures are not yet restored. Since this function is at a very
- * paranoid "someone might have enabled VGA while we were not looking"
- * level, just check if the power well is enabled instead of trying to
- * follow the "don't touch the power well if we don't need it" policy
- * the rest of the driver uses.
- */
- wakeref = intel_display_power_get_if_enabled(display, POWER_DOMAIN_VGA);
- if (!wakeref)
- return;
-
- intel_vga_redisable_power_on(display);
-
- intel_display_power_put(display, POWER_DOMAIN_VGA, wakeref);
-}
-
void intel_vga_reset_io_mem(struct intel_display *display)
{
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
diff --git a/drivers/gpu/drm/i915/display/intel_vga.h b/drivers/gpu/drm/i915/display/intel_vga.h
index 824dfc32a199..16d699f3b641 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.h
+++ b/drivers/gpu/drm/i915/display/intel_vga.h
@@ -10,8 +10,6 @@ struct intel_display;
void intel_vga_reset_io_mem(struct intel_display *display);
void intel_vga_disable(struct intel_display *display);
-void intel_vga_redisable(struct intel_display *display);
-void intel_vga_redisable_power_on(struct intel_display *display);
int intel_vga_register(struct intel_display *display);
void intel_vga_unregister(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_vga_regs.h b/drivers/gpu/drm/i915/display/intel_vga_regs.h
new file mode 100644
index 000000000000..cbacced1a69f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vga_regs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_VGA_REGS_H__
+#define __INTEL_VGA_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define VGACNTRL _MMIO(0x71400)
+#define VLV_VGACNTRL _MMIO(VLV_DISPLAY_BASE + 0x71400)
+#define CPU_VGACNTRL _MMIO(0x41000)
+#define VGA_DISP_DISABLE REG_BIT(31)
+#define VGA_2X_MODE REG_BIT(30) /* pre-ilk */
+#define VGA_PIPE_SEL_MASK REG_BIT(29) /* pre-ivb */
+#define VGA_PIPE_SEL(pipe) REG_FIELD_PREP(VGA_PIPE_SEL_MASK, (pipe))
+#define VGA_PIPE_SEL_MASK_CHV REG_GENMASK(29, 28) /* chv */
+#define VGA_PIPE_SEL_CHV(pipe) REG_FIELD_PREP(VGA_PIPE_SEL_MASK_CHV, (pipe))
+#define VGA_BORDER_ENABLE REG_BIT(26)
+#define VGA_PIPE_CSC_ENABLE REG_BIT(24) /* ilk+ */
+#define VGA_CENTERING_ENABLE_MASK REG_GENMASK(25, 24) /* pre-ilk */
+#define VGA_PALETTE_READ_SEL REG_BIT(23) /* pre-ivb */
+#define VGA_PALETTE_A_WRITE_DISABLE REG_BIT(22) /* pre-ivb */
+#define VGA_PALETTE_B_WRITE_DISABLE REG_BIT(21) /* pre-ivb */
+#define VGA_LEGACY_8BIT_PALETTE_ENABLE REG_BIT(20)
+#define VGA_PALETTE_BYPASS REG_BIT(19)
+#define VGA_NINE_DOT_DISABLE REG_BIT(18)
+#define VGA_PALETTE_READ_SEL_HI_CHV REG_BIT(15) /* chv */
+#define VGA_PALETTE_C_WRITE_DISABLE_CHV REG_BIT(14) /* chv */
+#define VGA_ACTIVE_THROTTLING_MASK REG_GENMASK(15, 12) /* ilk+ */
+#define VGA_BLANK_THROTTLING_MASK REG_GENMASK(11, 8) /* ilk+ */
+#define VGA_BLINK_DUTY_CYCLE_MASK REG_GENMASK(7, 6)
+#define VGA_VSYNC_BLINK_RATE_MASK REG_GENMASK(5, 0)
+
+#endif /* __INTEL_VGA_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index cac49319026d..c6565baf815a 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -4,6 +4,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -32,6 +34,8 @@ bool intel_vrr_is_capable(struct intel_connector *connector)
return false;
fallthrough;
case DRM_MODE_CONNECTOR_DisplayPort:
+ if (connector->mst.dp)
+ return false;
intel_dp = intel_attached_dp(connector);
if (!drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd))
@@ -182,7 +186,8 @@ is_cmrr_frac_required(struct intel_crtc_state *crtc_state)
int calculated_refresh_k, actual_refresh_k, pixel_clock_per_line;
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- if (!HAS_CMRR(display))
+ /* Avoid CMRR for now till we have VRR with fixed timings working */
+ if (!HAS_CMRR(display) || true)
return false;
actual_refresh_k =
@@ -222,6 +227,121 @@ cmrr_get_vtotal(struct intel_crtc_state *crtc_state, bool video_mode_required)
return vtotal;
}
+static
+void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->cmrr.enable = true;
+ /*
+ * TODO: Compute precise target refresh rate to determine
+ * if video_mode_required should be true. Currently set to
+ * false due to uncertainty about the precise target
+ * refresh Rate.
+ */
+ crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false);
+ crtc_state->vrr.vmin = crtc_state->vrr.vmax;
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
+
+static
+void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->vrr.enable = true;
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
+
+/*
+ * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to
+ * Vtotal value.
+ */
+static
+int intel_vrr_fixed_rr_vtotal(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int crtc_vtotal = crtc_state->hw.adjusted_mode.crtc_vtotal;
+
+ if (DISPLAY_VER(display) >= 13)
+ return crtc_vtotal;
+ else
+ return crtc_vtotal -
+ intel_vrr_real_vblank_delay(crtc_state);
+}
+
+static
+int intel_vrr_fixed_rr_vmax(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_fixed_rr_vtotal(crtc_state);
+}
+
+static
+int intel_vrr_fixed_rr_vmin(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return intel_vrr_fixed_rr_vtotal(crtc_state) -
+ intel_vrr_flipline_offset(display);
+}
+
+static
+int intel_vrr_fixed_rr_flipline(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_fixed_rr_vtotal(crtc_state);
+}
+
+void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!intel_vrr_possible(crtc_state))
+ return;
+
+ intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
+ intel_vrr_fixed_rr_vmin(crtc_state) - 1);
+ intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
+ intel_vrr_fixed_rr_vmax(crtc_state) - 1);
+ intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
+ intel_vrr_fixed_rr_flipline(crtc_state) - 1);
+}
+
+static
+void intel_vrr_compute_fixed_rr_timings(struct intel_crtc_state *crtc_state)
+{
+ /*
+ * For fixed rr, vmin = vmax = flipline.
+ * vmin is already set to crtc_vtotal set vmax and flipline the same.
+ */
+ crtc_state->vrr.vmax = crtc_state->hw.adjusted_mode.crtc_vtotal;
+ crtc_state->vrr.flipline = crtc_state->hw.adjusted_mode.crtc_vtotal;
+}
+
+static
+int intel_vrr_compute_vmin(struct intel_crtc_state *crtc_state)
+{
+ /*
+ * To make fixed rr and vrr work seamless the guardband/pipeline full
+ * should be set such that it satisfies both the fixed and variable
+ * timings.
+ * For this set the vmin as crtc_vtotal. With this we never need to
+ * change anything to do with the guardband.
+ */
+ return crtc_state->hw.adjusted_mode.crtc_vtotal;
+}
+
+static
+int intel_vrr_compute_vmax(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode)
+{
+ const struct drm_display_info *info = &connector->base.display_info;
+ int vmax;
+
+ vmax = adjusted_mode->crtc_clock * 1000 /
+ (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
+ vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
+
+ return vmax;
+}
+
void
intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -232,14 +352,9 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct intel_dp *intel_dp = intel_attached_dp(connector);
bool is_edp = intel_dp_is_edp(intel_dp);
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- const struct drm_display_info *info = &connector->base.display_info;
int vmin, vmax;
- /*
- * FIXME all joined pipes share the same transcoder.
- * Need to account for that during VRR toggle/push/etc.
- */
- if (crtc_state->joiner_pipes)
+ if (!HAS_VRR(display))
return;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -247,28 +362,40 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
crtc_state->vrr.in_range =
intel_vrr_is_in_range(connector, drm_mode_vrefresh(adjusted_mode));
- if (!crtc_state->vrr.in_range)
- return;
-
- if (HAS_LRR(display))
- crtc_state->update_lrr = true;
- vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000,
- adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq);
- vmax = adjusted_mode->crtc_clock * 1000 /
- (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
+ /*
+ * Allow fixed refresh rate with VRR Timing Generator.
+ * For now set the vrr.in_range to 0, to allow fixed_rr but skip actual
+ * VRR and LRR.
+ * #TODO For actual VRR with joiner, we need to figure out how to
+ * correctly sequence transcoder level stuff vs. pipe level stuff
+ * in the commit.
+ */
+ if (crtc_state->joiner_pipes)
+ crtc_state->vrr.in_range = false;
- vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal);
- vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
+ vmin = intel_vrr_compute_vmin(crtc_state);
- if (vmin >= vmax)
- return;
+ if (crtc_state->vrr.in_range) {
+ if (HAS_LRR(display))
+ crtc_state->update_lrr = true;
+ vmax = intel_vrr_compute_vmax(connector, adjusted_mode);
+ } else {
+ vmax = vmin;
+ }
crtc_state->vrr.vmin = vmin;
crtc_state->vrr.vmax = vmax;
crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+ if (crtc_state->uapi.vrr_enabled && vmin < vmax)
+ intel_vrr_compute_vrr_timings(crtc_state);
+ else if (is_cmrr_frac_required(crtc_state) && is_edp)
+ intel_vrr_compute_cmrr_timings(crtc_state);
+ else
+ intel_vrr_compute_fixed_rr_timings(crtc_state);
+
/*
* flipline determines the min vblank length the hardware will
* generate, and on ICL/TGL flipline>=vmin+1, hence we reduce
@@ -276,29 +403,6 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
*/
crtc_state->vrr.vmin -= intel_vrr_flipline_offset(display);
- /*
- * When panel is VRR capable and userspace has
- * not enabled adaptive sync mode then Fixed Average
- * Vtotal mode should be enabled.
- */
- if (crtc_state->uapi.vrr_enabled) {
- crtc_state->vrr.enable = true;
- crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
- } else if (is_cmrr_frac_required(crtc_state) && is_edp) {
- crtc_state->vrr.enable = true;
- crtc_state->cmrr.enable = true;
- /*
- * TODO: Compute precise target refresh rate to determine
- * if video_mode_required should be true. Currently set to
- * false due to uncertainty about the precise target
- * refresh Rate.
- */
- crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false);
- crtc_state->vrr.vmin = crtc_state->vrr.vmax;
- crtc_state->vrr.flipline = crtc_state->vrr.vmin;
- crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
- }
-
if (HAS_AS_SDP(display)) {
crtc_state->vrr.vsync_start =
(crtc_state->hw.adjusted_mode.crtc_vtotal -
@@ -340,7 +444,10 @@ static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- if (DISPLAY_VER(display) >= 13)
+ if (DISPLAY_VER(display) >= 14)
+ return VRR_CTL_FLIP_LINE_EN |
+ XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
+ else if (DISPLAY_VER(display) >= 13)
return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
else
@@ -380,14 +487,11 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
lower_32_bits(crtc_state->cmrr.cmrr_n));
}
- intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
- crtc_state->vrr.vmin - 1);
- intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
- crtc_state->vrr.vmax - 1);
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- trans_vrr_ctl(crtc_state));
- intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
- crtc_state->vrr.flipline - 1);
+ intel_vrr_set_fixed_rr_timings(crtc_state);
+
+ if (!intel_vrr_always_use_vrr_tg(display) && !crtc_state->vrr.enable)
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(crtc_state));
if (HAS_AS_SDP(display))
intel_de_write(display,
@@ -461,6 +565,17 @@ bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state)
return intel_de_read(display, TRANS_PUSH(display, cpu_transcoder)) & TRANS_PUSH_SEND;
}
+bool intel_vrr_always_use_vrr_tg(struct intel_display *display)
+{
+ if (!HAS_VRR(display))
+ return false;
+
+ if (DISPLAY_VER(display) >= 30)
+ return true;
+
+ return false;
+}
+
void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -469,16 +584,25 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
if (!crtc_state->vrr.enable)
return;
+ intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
+ crtc_state->vrr.vmin - 1);
+ intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
+ crtc_state->vrr.vmax - 1);
+ intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
+ crtc_state->vrr.flipline - 1);
+
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
TRANS_PUSH_EN);
- if (crtc_state->cmrr.enable) {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
- trans_vrr_ctl(crtc_state));
- } else {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+ if (!intel_vrr_always_use_vrr_tg(display)) {
+ if (crtc_state->cmrr.enable) {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
+ trans_vrr_ctl(crtc_state));
+ } else {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+ }
}
}
@@ -490,24 +614,77 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
if (!old_crtc_state->vrr.enable)
return;
+ if (!intel_vrr_always_use_vrr_tg(display)) {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(old_crtc_state));
+ intel_de_wait_for_clear(display,
+ TRANS_VRR_STATUS(display, cpu_transcoder),
+ VRR_STATUS_VRR_EN_LIVE, 1000);
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
+ }
+
+ intel_vrr_set_fixed_rr_timings(old_crtc_state);
+}
+
+void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!HAS_VRR(display))
+ return;
+
+ if (!intel_vrr_possible(crtc_state))
+ return;
+
+ if (!intel_vrr_always_use_vrr_tg(display)) {
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(crtc_state));
+ return;
+ }
+
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
+ TRANS_PUSH_EN);
+
intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- trans_vrr_ctl(old_crtc_state));
- intel_de_wait_for_clear(display,
- TRANS_VRR_STATUS(display, cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+}
+
+void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!HAS_VRR(display))
+ return;
+
+ if (!intel_vrr_possible(crtc_state))
+ return;
+
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), 0);
+
+ intel_de_wait_for_clear(display, TRANS_VRR_STATUS(display, cpu_transcoder),
VRR_STATUS_VRR_EN_LIVE, 1000);
intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
}
+bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->vrr.flipline &&
+ crtc_state->vrr.flipline == crtc_state->vrr.vmax &&
+ crtc_state->vrr.flipline == intel_vrr_vmin_flipline(crtc_state);
+}
+
void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 trans_vrr_ctl, trans_vrr_vsync;
+ bool vrr_enable;
trans_vrr_ctl = intel_de_read(display,
TRANS_VRR_CTL(display, cpu_transcoder));
- crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
if (HAS_CMRR(display))
crtc_state->cmrr.enable = (trans_vrr_ctl & VRR_CTL_CMRR_ENABLE);
@@ -536,6 +713,16 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
crtc_state->vrr.vmin = intel_de_read(display,
TRANS_VRR_VMIN(display, cpu_transcoder)) + 1;
+ /*
+ * For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
+ * bits are not filled. Since for these platforms TRAN_VMIN is always
+ * filled with crtc_vtotal, use TRAN_VRR_VMIN to get the vtotal for
+ * adjusted_mode.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_state->hw.adjusted_mode.crtc_vtotal =
+ intel_vrr_vmin_vtotal(crtc_state);
+
if (HAS_AS_SDP(display)) {
trans_vrr_vsync =
intel_de_read(display,
@@ -547,6 +734,18 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
}
}
+ vrr_enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
+
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_state->vrr.enable = vrr_enable && !intel_vrr_is_fixed_rr(crtc_state);
+ else
+ crtc_state->vrr.enable = vrr_enable;
+
+ /*
+ * #TODO: For Both VRR and CMRR the flag I915_MODE_FLAG_VRR is set for mode_flags.
+ * Since CMRR is currently disabled, set this flag for VRR for now.
+ * Need to keep this in mind while re-enabling CMRR.
+ */
if (crtc_state->vrr.enable)
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index 514822577e8a..38bf9996b883 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -13,6 +13,7 @@ struct intel_atomic_state;
struct intel_connector;
struct intel_crtc_state;
struct intel_dsb;
+struct intel_display;
bool intel_vrr_is_capable(struct intel_connector *connector);
bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh);
@@ -35,5 +36,10 @@ int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state);
+bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state);
+void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state);
+void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state);
+void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state);
+bool intel_vrr_always_use_vrr_tg(struct intel_display *display);
#endif /* __INTEL_VRR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c
index f00f4cfc58e5..bba82e888db2 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.c
+++ b/drivers/gpu/drm/i915/display/intel_wm.c
@@ -5,15 +5,18 @@
#include <linux/debugfs.h>
-#include "i915_drv.h"
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
#include "i9xx_wm.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_wm.h"
#include "skl_watermark.h"
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
- * @i915: i915 device
+ * @display: display device
*
* Calculate watermark values for the various WM regs based on current mode
* and plane configuration.
@@ -44,10 +47,10 @@
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
-void intel_update_watermarks(struct drm_i915_private *i915)
+void intel_update_watermarks(struct intel_display *display)
{
- if (i915->display.funcs.wm->update_wm)
- i915->display.funcs.wm->update_wm(i915);
+ if (display->funcs.wm->update_wm)
+ display->funcs.wm->update_wm(display);
}
int intel_wm_compute(struct intel_atomic_state *state,
@@ -64,10 +67,10 @@ int intel_wm_compute(struct intel_atomic_state *state,
bool intel_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->initial_watermarks) {
- i915->display.funcs.wm->initial_watermarks(state, crtc);
+ if (display->funcs.wm->initial_watermarks) {
+ display->funcs.wm->initial_watermarks(state, crtc);
return true;
}
@@ -77,41 +80,41 @@ bool intel_initial_watermarks(struct intel_atomic_state *state,
void intel_atomic_update_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->atomic_update_watermarks)
- i915->display.funcs.wm->atomic_update_watermarks(state, crtc);
+ if (display->funcs.wm->atomic_update_watermarks)
+ display->funcs.wm->atomic_update_watermarks(state, crtc);
}
void intel_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->optimize_watermarks)
- i915->display.funcs.wm->optimize_watermarks(state, crtc);
+ if (display->funcs.wm->optimize_watermarks)
+ display->funcs.wm->optimize_watermarks(state, crtc);
}
int intel_compute_global_watermarks(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- if (i915->display.funcs.wm->compute_global_watermarks)
- return i915->display.funcs.wm->compute_global_watermarks(state);
+ if (display->funcs.wm->compute_global_watermarks)
+ return display->funcs.wm->compute_global_watermarks(state);
return 0;
}
-void intel_wm_get_hw_state(struct drm_i915_private *i915)
+void intel_wm_get_hw_state(struct intel_display *display)
{
- if (i915->display.funcs.wm->get_hw_state)
- return i915->display.funcs.wm->get_hw_state(i915);
+ if (display->funcs.wm->get_hw_state)
+ return display->funcs.wm->get_hw_state(display);
}
-void intel_wm_sanitize(struct drm_i915_private *i915)
+void intel_wm_sanitize(struct intel_display *display)
{
- if (i915->display.funcs.wm->sanitize)
- return i915->display.funcs.wm->sanitize(i915);
+ if (display->funcs.wm->sanitize)
+ return display->funcs.wm->sanitize(display);
}
bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
@@ -137,16 +140,16 @@ bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
return plane_state->uapi.visible;
}
-void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+void intel_print_wm_latency(struct intel_display *display,
const char *name, const u16 wm[])
{
int level;
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
unsigned int latency = wm[level];
if (latency == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s WM%d latency not provided\n",
name, level);
continue;
@@ -156,43 +159,43 @@ void intel_print_wm_latency(struct drm_i915_private *dev_priv,
* - latencies are in us on gen9.
* - before then, WM1+ latency values are in 0.5us units
*/
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
latency *= 10;
else if (level > 0)
latency *= 5;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s WM%d latency %u (%u.%u usec)\n", name, level,
wm[level], latency / 10, latency % 10);
}
}
-void intel_wm_init(struct drm_i915_private *i915)
+void intel_wm_init(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 9)
- skl_wm_init(i915);
+ if (DISPLAY_VER(display) >= 9)
+ skl_wm_init(display);
else
- i9xx_wm_init(i915);
+ i9xx_wm_init(display);
}
static void wm_latency_show(struct seq_file *m, const u16 wm[8])
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
int level;
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(display->drm);
- for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
unsigned int latency = wm[level];
/*
* - WM1+ latency values in 0.5us units
* - latencies are in us on gen9/vlv/chv
*/
- if (DISPLAY_VER(dev_priv) >= 9 ||
- IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_G4X(dev_priv))
+ if (DISPLAY_VER(display) >= 9 ||
+ display->platform.valleyview ||
+ display->platform.cherryview ||
+ display->platform.g4x)
latency *= 10;
else if (level > 0)
latency *= 5;
@@ -201,18 +204,18 @@ static void wm_latency_show(struct seq_file *m, const u16 wm[8])
level, wm[level], latency / 10, latency % 10);
}
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
}
static int pri_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
const u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.pri_latency;
+ latencies = display->wm.pri_latency;
wm_latency_show(m, latencies);
@@ -221,13 +224,13 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
static int spr_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
const u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.spr_latency;
+ latencies = display->wm.spr_latency;
wm_latency_show(m, latencies);
@@ -236,13 +239,13 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
static int cur_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
const u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.cur_latency;
+ latencies = display->wm.cur_latency;
wm_latency_show(m, latencies);
@@ -251,39 +254,39 @@ static int cur_wm_latency_show(struct seq_file *m, void *data)
static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct intel_display *display = inode->i_private;
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ if (DISPLAY_VER(display) < 5 && !display->platform.g4x)
return -ENODEV;
- return single_open(file, pri_wm_latency_show, dev_priv);
+ return single_open(file, pri_wm_latency_show, display);
}
static int spr_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct intel_display *display = inode->i_private;
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
return -ENODEV;
- return single_open(file, spr_wm_latency_show, dev_priv);
+ return single_open(file, spr_wm_latency_show, display);
}
static int cur_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct intel_display *display = inode->i_private;
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
return -ENODEV;
- return single_open(file, cur_wm_latency_show, dev_priv);
+ return single_open(file, cur_wm_latency_show, display);
}
static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp, u16 wm[8])
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 new[8] = {};
int level;
int ret;
@@ -300,15 +303,15 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
&new[0], &new[1], &new[2], &new[3],
&new[4], &new[5], &new[6], &new[7]);
- if (ret != dev_priv->display.wm.num_levels)
+ if (ret != display->wm.num_levels)
return -EINVAL;
- drm_modeset_lock_all(&dev_priv->drm);
+ drm_modeset_lock_all(display->drm);
- for (level = 0; level < dev_priv->display.wm.num_levels; level++)
+ for (level = 0; level < display->wm.num_levels; level++)
wm[level] = new[level];
- drm_modeset_unlock_all(&dev_priv->drm);
+ drm_modeset_unlock_all(display->drm);
return len;
}
@@ -317,13 +320,13 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.pri_latency;
+ latencies = display->wm.pri_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -332,13 +335,13 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.spr_latency;
+ latencies = display->wm.spr_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -347,13 +350,13 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
u16 *latencies;
- if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->display.wm.skl_latency;
+ if (DISPLAY_VER(display) >= 9)
+ latencies = display->wm.skl_latency;
else
- latencies = dev_priv->display.wm.cur_latency;
+ latencies = display->wm.cur_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -385,18 +388,18 @@ static const struct file_operations i915_cur_wm_latency_fops = {
.write = cur_wm_latency_write
};
-void intel_wm_debugfs_register(struct drm_i915_private *i915)
+void intel_wm_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root,
- i915, &i915_pri_wm_latency_fops);
+ display, &i915_pri_wm_latency_fops);
debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root,
- i915, &i915_spr_wm_latency_fops);
+ display, &i915_spr_wm_latency_fops);
debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root,
- i915, &i915_cur_wm_latency_fops);
+ display, &i915_cur_wm_latency_fops);
- skl_watermark_debugfs_register(i915);
+ skl_watermark_debugfs_register(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h
index 7d3a447054b3..9ad4e9eae5ca 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.h
+++ b/drivers/gpu/drm/i915/display/intel_wm.h
@@ -8,13 +8,13 @@
#include <linux/types.h>
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_plane_state;
-void intel_update_watermarks(struct drm_i915_private *i915);
+void intel_update_watermarks(struct intel_display *display);
int intel_wm_compute(struct intel_atomic_state *state,
struct intel_crtc *crtc);
bool intel_initial_watermarks(struct intel_atomic_state *state,
@@ -24,13 +24,13 @@ void intel_atomic_update_watermarks(struct intel_atomic_state *state,
void intel_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_compute_global_watermarks(struct intel_atomic_state *state);
-void intel_wm_get_hw_state(struct drm_i915_private *i915);
-void intel_wm_sanitize(struct drm_i915_private *i915);
+void intel_wm_get_hw_state(struct intel_display *display);
+void intel_wm_sanitize(struct intel_display *display);
bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
-void intel_print_wm_latency(struct drm_i915_private *i915,
+void intel_print_wm_latency(struct intel_display *display,
const char *name, const u16 wm[]);
-void intel_wm_init(struct drm_i915_private *i915);
-void intel_wm_debugfs_register(struct drm_i915_private *i915);
+void intel_wm_init(struct intel_display *display);
+void intel_wm_debugfs_register(struct intel_display *display);
#endif /* __INTEL_WM_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index ee81220a7c88..c855426544cf 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -3,8 +3,10 @@
* Copyright © 2020 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 70e550539bb2..c7b336359a5e 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -601,7 +601,7 @@ static u32 tgl_plane_min_alignment(struct intel_plane *plane,
* Figure out what's going on here...
*/
if (display->platform.alderlake_p &&
- intel_plane_can_async_flip(plane, fb->modifier))
+ intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
return mult * 16 * 1024;
switch (fb->modifier) {
@@ -2666,6 +2666,7 @@ static const struct drm_plane_funcs skl_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = skl_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static const struct drm_plane_funcs icl_plane_funcs = {
@@ -2675,6 +2676,7 @@ static const struct drm_plane_funcs icl_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = icl_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static const struct drm_plane_funcs tgl_plane_funcs = {
@@ -2684,28 +2686,29 @@ static const struct drm_plane_funcs tgl_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = tgl_plane_format_mod_supported,
+ .format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static void
skl_plane_enable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_enable_pipe_irq(display, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&display->irq.lock);
}
static void
skl_plane_disable_flip_done(struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
- spin_lock_irq(&i915->irq_lock);
- bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&display->irq.lock);
+ bdw_disable_pipe_irq(display, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&display->irq.lock);
}
static bool skl_plane_has_rc_ccs(struct intel_display *display,
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 621e97943542..8080f777910a 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -19,6 +19,7 @@
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power.h"
+#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fixed.h"
@@ -34,7 +35,7 @@
*/
#define DSB_EXE_TIME 100
-static void skl_sagv_disable(struct drm_i915_private *i915);
+static void skl_sagv_disable(struct intel_display *display);
/* Stores plane specific WM parameters */
struct skl_wm_params {
@@ -69,23 +70,21 @@ u8 intel_enabled_dbuf_slices_mask(struct intel_display *display)
* FIXME: We still don't have the proper code detect if we need to apply the WA,
* so assume we'll always need it in order to avoid underruns.
*/
-static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
+static bool skl_needs_memory_bw_wa(struct intel_display *display)
{
- return DISPLAY_VER(i915) == 9;
+ return DISPLAY_VER(display) == 9;
}
bool
-intel_has_sagv(struct drm_i915_private *i915)
+intel_has_sagv(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
return HAS_SAGV(display) && display->sagv.status != I915_SAGV_NOT_CONTROLLED;
}
static u32
-intel_sagv_block_time(struct drm_i915_private *i915)
+intel_sagv_block_time(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (DISPLAY_VER(display) >= 14) {
u32 val;
@@ -115,10 +114,8 @@ intel_sagv_block_time(struct drm_i915_private *i915)
}
}
-static void intel_sagv_init(struct drm_i915_private *i915)
+static void intel_sagv_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
if (!HAS_SAGV(display))
display->sagv.status = I915_SAGV_NOT_CONTROLLED;
@@ -127,14 +124,14 @@ static void intel_sagv_init(struct drm_i915_private *i915)
* For icl+ this was already determined by intel_bw_init_hw().
*/
if (DISPLAY_VER(display) < 11)
- skl_sagv_disable(i915);
+ skl_sagv_disable(display);
drm_WARN_ON(display->drm, display->sagv.status == I915_SAGV_UNKNOWN);
- display->sagv.block_time_us = intel_sagv_block_time(i915);
+ display->sagv.block_time_us = intel_sagv_block_time(display);
drm_dbg_kms(display->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
- str_yes_no(intel_has_sagv(i915)), display->sagv.block_time_us);
+ str_yes_no(intel_has_sagv(display)), display->sagv.block_time_us);
/* avoid overflow when adding with wm0 latency/etc. */
if (drm_WARN(display->drm, display->sagv.block_time_us > U16_MAX,
@@ -142,7 +139,7 @@ static void intel_sagv_init(struct drm_i915_private *i915)
display->sagv.block_time_us))
display->sagv.block_time_us = 0;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
display->sagv.block_time_us = 0;
}
@@ -157,17 +154,18 @@ static void intel_sagv_init(struct drm_i915_private *i915)
* - All planes can enable watermarks for latencies >= SAGV engine block time
* - We're not using an interlaced display configuration
*/
-static void skl_sagv_enable(struct drm_i915_private *i915)
+static void skl_sagv_enable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (i915->display.sagv.status == I915_SAGV_ENABLED)
+ if (display->sagv.status == I915_SAGV_ENABLED)
return;
- drm_dbg_kms(&i915->drm, "Enabling SAGV\n");
+ drm_dbg_kms(display->drm, "Enabling SAGV\n");
ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
@@ -177,29 +175,30 @@ static void skl_sagv_enable(struct drm_i915_private *i915)
* Some skl systems, pre-release machines in particular,
* don't actually have SAGV.
*/
- if (IS_SKYLAKE(i915) && ret == -ENXIO) {
- drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ if (display->platform.skylake && ret == -ENXIO) {
+ drm_dbg(display->drm, "No SAGV found on system, ignoring\n");
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
return;
} else if (ret < 0) {
- drm_err(&i915->drm, "Failed to enable SAGV\n");
+ drm_err(display->drm, "Failed to enable SAGV\n");
return;
}
- i915->display.sagv.status = I915_SAGV_ENABLED;
+ display->sagv.status = I915_SAGV_ENABLED;
}
-static void skl_sagv_disable(struct drm_i915_private *i915)
+static void skl_sagv_disable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (i915->display.sagv.status == I915_SAGV_DISABLED)
+ if (display->sagv.status == I915_SAGV_DISABLED)
return;
- drm_dbg_kms(&i915->drm, "Disabling SAGV\n");
+ drm_dbg_kms(display->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_DISABLE,
@@ -209,47 +208,47 @@ static void skl_sagv_disable(struct drm_i915_private *i915)
* Some skl systems, pre-release machines in particular,
* don't actually have SAGV.
*/
- if (IS_SKYLAKE(i915) && ret == -ENXIO) {
- drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ if (display->platform.skylake && ret == -ENXIO) {
+ drm_dbg(display->drm, "No SAGV found on system, ignoring\n");
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
return;
} else if (ret < 0) {
- drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret);
+ drm_err(display->drm, "Failed to disable SAGV (%d)\n", ret);
return;
}
- i915->display.sagv.status = I915_SAGV_DISABLED;
+ display->sagv.status = I915_SAGV_DISABLED;
}
static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *new_bw_state =
intel_atomic_get_new_bw_state(state);
if (!new_bw_state)
return;
- if (!intel_can_enable_sagv(i915, new_bw_state))
- skl_sagv_disable(i915);
+ if (!intel_can_enable_sagv(display, new_bw_state))
+ skl_sagv_disable(display);
}
static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *new_bw_state =
intel_atomic_get_new_bw_state(state);
if (!new_bw_state)
return;
- if (intel_can_enable_sagv(i915, new_bw_state))
- skl_sagv_enable(i915);
+ if (intel_can_enable_sagv(display, new_bw_state))
+ skl_sagv_enable(display);
}
static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *old_bw_state =
intel_atomic_get_old_bw_state(state);
const struct intel_bw_state *new_bw_state =
@@ -267,7 +266,7 @@ static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
WARN_ON(!new_bw_state->base.changed);
- drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
+ drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
old_mask, new_mask);
/*
@@ -276,12 +275,12 @@ static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
* time. Also masking should be done before updating the configuration
* and unmasking afterwards.
*/
- icl_pcode_restrict_qgv_points(i915, new_mask);
+ icl_pcode_restrict_qgv_points(display, new_mask);
}
static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *old_bw_state =
intel_atomic_get_old_bw_state(state);
const struct intel_bw_state *new_bw_state =
@@ -299,7 +298,7 @@ static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
WARN_ON(!new_bw_state->base.changed);
- drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
+ drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
old_mask, new_mask);
/*
@@ -308,12 +307,12 @@ static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
* time. Also masking should be done before updating the configuration
* and unmasking afterwards.
*/
- icl_pcode_restrict_qgv_points(i915, new_mask);
+ icl_pcode_restrict_qgv_points(display, new_mask);
}
void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
/*
* Just return if we can't control SAGV or don't have it.
@@ -322,10 +321,10 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
* disabled in a BIOS, we are not even allowed to send a PCode request,
* as it will throw an error. So have to check it here.
*/
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
icl_sagv_pre_plane_update(state);
else
skl_sagv_pre_plane_update(state);
@@ -333,7 +332,7 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
void intel_sagv_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
/*
* Just return if we can't control SAGV or don't have it.
@@ -342,10 +341,10 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state)
* disabled in a BIOS, we are not even allowed to send a PCode request,
* as it will throw an error. So have to check it here.
*/
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
icl_sagv_post_plane_update(state);
else
skl_sagv_post_plane_update(state);
@@ -353,12 +352,12 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state)
static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum plane_id plane_id;
int max_level = INT_MAX;
- if (!intel_has_sagv(i915))
+ if (!intel_has_sagv(display))
return false;
if (!crtc_state->hw.active)
@@ -377,7 +376,7 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
continue;
/* Find the highest enabled wm level for this plane */
- for (level = i915->display.wm.num_levels - 1;
+ for (level = display->wm.num_levels - 1;
!wm->wm[level].enable; --level)
{ }
@@ -423,104 +422,37 @@ static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
return true;
}
-static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (!i915->display.params.enable_sagv)
+ if (!display->params.enable_sagv)
return false;
- if (DISPLAY_VER(i915) >= 12)
+ /*
+ * SAGV is initially forced off because its current
+ * state can't be queried from pcode. Allow SAGV to
+ * be enabled upon the first real commit.
+ */
+ if (crtc_state->inherited)
+ return false;
+
+ if (DISPLAY_VER(display) >= 12)
return tgl_crtc_can_enable_sagv(crtc_state);
else
return skl_crtc_can_enable_sagv(crtc_state);
}
-bool intel_can_enable_sagv(struct drm_i915_private *i915,
+bool intel_can_enable_sagv(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
return false;
return bw_state->pipe_sagv_reject == 0;
}
-static int intel_compute_sagv_mask(struct intel_atomic_state *state)
-{
- struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- int ret;
- struct intel_crtc *crtc;
- struct intel_crtc_state *new_crtc_state;
- struct intel_bw_state *new_bw_state = NULL;
- const struct intel_bw_state *old_bw_state = NULL;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc,
- new_crtc_state, i) {
- struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
-
- new_bw_state = intel_atomic_get_bw_state(state);
- if (IS_ERR(new_bw_state))
- return PTR_ERR(new_bw_state);
-
- old_bw_state = intel_atomic_get_old_bw_state(state);
-
- /*
- * We store use_sagv_wm in the crtc state rather than relying on
- * that bw state since we have no convenient way to get at the
- * latter from the plane commit hooks (especially in the legacy
- * cursor case).
- *
- * drm_atomic_check_only() gets upset if we pull more crtcs
- * into the state, so we have to calculate this based on the
- * individual intel_crtc_can_enable_sagv() rather than
- * the overall intel_can_enable_sagv(). Otherwise the
- * crtcs not included in the commit would not switch to the
- * SAGV watermarks when we are about to enable SAGV, and that
- * would lead to underruns. This does mean extra power draw
- * when only a subset of the crtcs are blocking SAGV as the
- * other crtcs can't be allowed to use the more optimal
- * normal (ie. non-SAGV) watermarks.
- */
- pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) &&
- DISPLAY_VER(i915) >= 12 &&
- intel_crtc_can_enable_sagv(new_crtc_state);
-
- if (intel_crtc_can_enable_sagv(new_crtc_state))
- new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
- else
- new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
- }
-
- if (!new_bw_state)
- return 0;
-
- new_bw_state->active_pipes =
- intel_calc_active_pipes(state, old_bw_state->active_pipes);
-
- if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- if (intel_can_enable_sagv(i915, new_bw_state) !=
- intel_can_enable_sagv(i915, old_bw_state)) {
- ret = intel_atomic_serialize_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
u16 start, u16 end)
{
@@ -530,17 +462,17 @@ static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
return end;
}
-static int intel_dbuf_slice_size(struct drm_i915_private *i915)
+static int intel_dbuf_slice_size(struct intel_display *display)
{
- return DISPLAY_INFO(i915)->dbuf.size /
- hweight8(DISPLAY_INFO(i915)->dbuf.slice_mask);
+ return DISPLAY_INFO(display)->dbuf.size /
+ hweight8(DISPLAY_INFO(display)->dbuf.slice_mask);
}
static void
-skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
+skl_ddb_entry_for_slices(struct intel_display *display, u8 slice_mask,
struct skl_ddb_entry *ddb)
{
- int slice_size = intel_dbuf_slice_size(i915);
+ int slice_size = intel_dbuf_slice_size(display);
if (!slice_mask) {
ddb->start = 0;
@@ -552,10 +484,10 @@ skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
ddb->end = fls(slice_mask) * slice_size;
WARN_ON(ddb->start >= ddb->end);
- WARN_ON(ddb->end > DISPLAY_INFO(i915)->dbuf.size);
+ WARN_ON(ddb->end > DISPLAY_INFO(display)->dbuf.size);
}
-static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
+static unsigned int mbus_ddb_offset(struct intel_display *display, u8 slice_mask)
{
struct skl_ddb_entry ddb;
@@ -564,15 +496,15 @@ static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask
else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
slice_mask = BIT(DBUF_S3);
- skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
+ skl_ddb_entry_for_slices(display, slice_mask, &ddb);
return ddb.start;
}
-u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+u32 skl_ddb_dbuf_slice_mask(struct intel_display *display,
const struct skl_ddb_entry *entry)
{
- int slice_size = intel_dbuf_slice_size(i915);
+ int slice_size = intel_dbuf_slice_size(display);
enum dbuf_slice start_slice, end_slice;
u8 slice_mask = 0;
@@ -618,15 +550,14 @@ static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
unsigned int *weight_end,
unsigned int *weight_total)
{
- struct drm_i915_private *i915 =
- to_i915(dbuf_state->base.state->base.dev);
+ struct intel_display *display = to_intel_display(dbuf_state->base.state->base.dev);
enum pipe pipe;
*weight_start = 0;
*weight_end = 0;
*weight_total = 0;
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
int weight = dbuf_state->weight[pipe];
/*
@@ -652,7 +583,7 @@ static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
static int
skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
unsigned int weight_total, weight_start, weight_end;
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
@@ -674,8 +605,8 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
dbuf_slice_mask = new_dbuf_state->slices[pipe];
- skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices);
- mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask);
+ skl_ddb_entry_for_slices(display, dbuf_slice_mask, &ddb_slices);
+ mbus_offset = mbus_ddb_offset(display, dbuf_slice_mask);
ddb_range_size = skl_ddb_entry_size(&ddb_slices);
intel_crtc_dbuf_weights(new_dbuf_state, pipe,
@@ -709,7 +640,7 @@ out:
crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
crtc->base.base.id, crtc->base.name,
old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
@@ -734,10 +665,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */);
-static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
+static unsigned int skl_wm_latency(struct intel_display *display, int level,
const struct skl_wm_params *wp)
{
- unsigned int latency = i915->display.wm.skl_latency[level];
+ unsigned int latency = display->wm.skl_latency[level];
if (latency == 0)
return 0;
@@ -746,11 +677,11 @@ static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level,
* WaIncreaseLatencyIPCEnabled: kbl,cfl
* Display WA #1141: kbl,cfl
*/
- if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
- skl_watermark_ipc_enabled(i915))
+ if ((display->platform.kabylake || display->platform.coffeelake ||
+ display->platform.cometlake) && skl_watermark_ipc_enabled(display))
latency += 4;
- if (skl_needs_memory_bw_wa(i915) && wp && wp->x_tiled)
+ if (skl_needs_memory_bw_wa(display) && wp && wp->x_tiled)
latency += 15;
return latency;
@@ -760,8 +691,8 @@ static unsigned int
skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
int num_active)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct skl_wm_level wm = {};
int ret, min_ddb_alloc = 0;
struct skl_wm_params wp;
@@ -772,10 +703,10 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
DRM_FORMAT_MOD_LINEAR,
DRM_MODE_ROTATE_0,
crtc_state->pixel_rate, &wp, 0, 0);
- drm_WARN_ON(&i915->drm, ret);
+ drm_WARN_ON(display->drm, ret);
- for (level = 0; level < i915->display.wm.num_levels; level++) {
- unsigned int latency = skl_wm_latency(i915, level, &wp);
+ for (level = 0; level < display->wm.num_levels; level++) {
+ unsigned int latency = skl_wm_latency(display, level, &wp);
skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
if (wm.min_ddb_alloc == U16_MAX)
@@ -797,14 +728,13 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
}
static void
-skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
+skl_ddb_get_hw_plane_state(struct intel_display *display,
const enum pipe pipe,
const enum plane_id plane_id,
struct skl_ddb_entry *ddb,
struct skl_ddb_entry *ddb_y,
u16 *min_ddb, u16 *interim_ddb)
{
- struct intel_display *display = &i915->display;
u32 val;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
@@ -837,7 +767,6 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
u16 *min_ddb, u16 *interim_ddb)
{
struct intel_display *display = to_intel_display(crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
@@ -849,7 +778,7 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
return;
for_each_plane_id_on_crtc(crtc, plane_id)
- skl_ddb_get_hw_plane_state(i915, pipe,
+ skl_ddb_get_hw_plane_state(display, pipe,
plane_id,
&ddb[plane_id],
&ddb_y[plane_id],
@@ -1367,16 +1296,16 @@ static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbu
static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- if (IS_DG2(i915))
+ if (display->platform.dg2)
return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(i915) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(i915) == 12)
+ else if (DISPLAY_VER(display) == 12)
return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(i915) == 11)
+ else if (DISPLAY_VER(display) == 11)
return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
/*
* For anything else just return one slice yet.
@@ -1416,8 +1345,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
static u64
skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum plane_id plane_id;
u64 data_rate = 0;
@@ -1427,7 +1356,7 @@ skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
data_rate += crtc_state->rel_data_rate[plane_id];
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
data_rate += crtc_state->rel_data_rate_y[plane_id];
}
@@ -1489,7 +1418,7 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
}
}
-static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level,
+static bool skl_need_wm_copy_wa(struct intel_display *display, int level,
const struct skl_plane_wm *wm)
{
/*
@@ -1543,7 +1472,6 @@ static int
skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_dbuf_state *dbuf_state =
@@ -1585,7 +1513,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* Find the highest watermark level for which we can satisfy the block
* requirement of active planes.
*/
- for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
+ for (level = display->wm.num_levels - 1; level >= 0; level--) {
blocks = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
@@ -1596,7 +1524,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.plane_ddb[plane_id];
if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
wm->wm[level].min_ddb_alloc != U16_MAX);
blocks = U32_MAX;
break;
@@ -1615,9 +1543,9 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
}
if (level < 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Requested display configuration exceeds system DDB limitations");
- drm_dbg_kms(&i915->drm, "minimum required %d/%d\n",
+ drm_dbg_kms(display->drm, "minimum required %d/%d\n",
blocks, iter.size);
return -EINVAL;
}
@@ -1645,7 +1573,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
if (plane_id == PLANE_CURSOR)
continue;
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id)) {
skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
crtc_state->rel_data_rate_y[plane_id]);
@@ -1661,7 +1589,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
*interim_ddb = wm->sagv.wm0.min_ddb_alloc;
}
}
- drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
+ drm_WARN_ON(display->drm, iter.size != 0 || iter.data_rate != 0);
/*
* When we calculated watermark values we didn't know how high
@@ -1669,7 +1597,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* all levels as "enabled." Go back now and disable the ones
* that aren't actually possible.
*/
- for (level++; level < i915->display.wm.num_levels; level++) {
+ for (level++; level < display->wm.num_levels; level++) {
for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb[plane_id];
@@ -1678,7 +1606,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id))
skl_check_nv12_wm_level(&wm->wm[level],
&wm->uv_wm[level],
@@ -1686,7 +1614,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
else
skl_check_wm_level(&wm->wm[level], ddb);
- if (skl_need_wm_copy_wa(i915, level, wm)) {
+ if (skl_need_wm_copy_wa(display, level, wm)) {
wm->wm[level].blocks = wm->wm[level - 1].blocks;
wm->wm[level].lines = wm->wm[level - 1].lines;
wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines;
@@ -1708,7 +1636,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (DISPLAY_VER(i915) < 11 &&
+ if (DISPLAY_VER(display) < 11 &&
crtc_state->nv12_planes & BIT(plane_id)) {
skl_check_wm_level(&wm->trans_wm, ddb_y);
} else {
@@ -1734,7 +1662,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/
static uint_fixed_16_16_t
-skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
+skl_wm_method1(struct intel_display *display, u32 pixel_rate,
u8 cpp, u32 latency, u32 dbuf_block_size)
{
u32 wm_intermediate_val;
@@ -1746,7 +1674,7 @@ skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
wm_intermediate_val = latency * pixel_rate * cpp;
ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
- if (DISPLAY_VER(i915) >= 10)
+ if (DISPLAY_VER(display) >= 10)
ret = add_fixed16_u32(ret, 1);
return ret;
@@ -1772,7 +1700,7 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
static uint_fixed_16_16_t
intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
u32 pixel_rate;
u32 crtc_htotal;
uint_fixed_16_16_t linetime_us;
@@ -1782,7 +1710,7 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
pixel_rate = crtc_state->pixel_rate;
- if (drm_WARN_ON(&i915->drm, pixel_rate == 0))
+ if (drm_WARN_ON(display->drm, pixel_rate == 0))
return u32_to_fixed16(0);
crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
@@ -1798,15 +1726,13 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
u32 plane_pixel_rate, struct skl_wm_params *wp,
int color_plane, unsigned int pan_x)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 interm_pbpl;
/* only planar format has two planes */
if (color_plane == 1 &&
!intel_format_info_is_yuv_semiplanar(format, modifier)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Non planar format have single plane\n");
return -EINVAL;
}
@@ -1824,7 +1750,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->cpp = format->cpp[color_plane];
wp->plane_pixel_rate = plane_pixel_rate;
- if (DISPLAY_VER(i915) >= 11 &&
+ if (DISPLAY_VER(display) >= 11 &&
modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
wp->dbuf_block_size = 256;
else
@@ -1849,7 +1775,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->y_min_scanlines = 4;
}
- if (skl_needs_memory_bw_wa(i915))
+ if (skl_needs_memory_bw_wa(display))
wp->y_min_scanlines *= 2;
wp->plane_bytes_per_line = wp->width * wp->cpp;
@@ -1860,7 +1786,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
if (DISPLAY_VER(display) >= 30)
interm_pbpl += (pan_x != 0);
- else if (DISPLAY_VER(i915) >= 10)
+ else if (DISPLAY_VER(display) >= 10)
interm_pbpl++;
wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
@@ -1869,7 +1795,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
wp->dbuf_block_size);
- if (!wp->x_tiled || DISPLAY_VER(i915) >= 10)
+ if (!wp->x_tiled || DISPLAY_VER(display) >= 10)
interm_pbpl++;
wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
@@ -1906,18 +1832,18 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
plane_state->uapi.src.x1);
}
-static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
+static bool skl_wm_has_lines(struct intel_display *display, int level)
{
- if (DISPLAY_VER(i915) >= 10)
+ if (DISPLAY_VER(display) >= 10)
return true;
/* The number of lines are ignored for the level 0 watermark. */
return level > 0;
}
-static int skl_wm_max_lines(struct drm_i915_private *i915)
+static int skl_wm_max_lines(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return 255;
else
return 31;
@@ -1938,7 +1864,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
u32 blocks, lines, min_ddb_alloc = 0;
@@ -1950,7 +1876,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
return;
}
- method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
+ method1 = skl_wm_method1(display, wp->plane_pixel_rate,
wp->cpp, latency, wp->dbuf_block_size);
method2 = skl_wm_method2(wp->plane_pixel_rate,
crtc_state->hw.pipe_mode.crtc_htotal,
@@ -1965,7 +1891,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
(wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
} else if (latency >= wp->linetime_us) {
- if (DISPLAY_VER(i915) == 9)
+ if (DISPLAY_VER(display) == 9)
selected_result = min_fixed16(method1, method2);
else
selected_result = method2;
@@ -1975,7 +1901,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
blocks = fixed16_to_u32_round_up(selected_result);
- if (DISPLAY_VER(i915) < 30)
+ if (DISPLAY_VER(display) < 30)
blocks++;
/*
@@ -1994,13 +1920,13 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
* channels' impact on the level 0 memory latency and the relevant
* wm calculations.
*/
- if (skl_wm_has_lines(i915, level))
+ if (skl_wm_has_lines(display, level))
blocks = max(blocks,
fixed16_to_u32_round_up(wp->plane_blocks_per_line));
lines = div_round_up_fixed16(selected_result,
wp->plane_blocks_per_line);
- if (DISPLAY_VER(i915) == 9) {
+ if (DISPLAY_VER(display) == 9) {
/* Display WA #1125: skl,bxt,kbl */
if (level == 0 && wp->rc_surface)
blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
@@ -2025,7 +1951,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
}
- if (DISPLAY_VER(i915) >= 11) {
+ if (DISPLAY_VER(display) >= 11) {
if (wp->y_tiled) {
int extra_lines;
@@ -2042,10 +1968,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
}
- if (!skl_wm_has_lines(i915, level))
+ if (!skl_wm_has_lines(display, level))
lines = 0;
- if (lines > skl_wm_max_lines(i915)) {
+ if (lines > skl_wm_max_lines(display)) {
/* reject it */
result->min_ddb_alloc = U16_MAX;
return;
@@ -2064,8 +1990,8 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
result->enable = true;
result->auto_min_alloc_wm_enable = xe3_auto_min_alloc_capable(plane, level);
- if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
- result->can_sagv = latency >= i915->display.sagv.block_time_us;
+ if (DISPLAY_VER(display) < 12 && display->sagv.block_time_us)
+ result->can_sagv = latency >= display->sagv.block_time_us;
}
static void
@@ -2074,13 +2000,13 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct skl_wm_level *result_prev = &levels[0];
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
struct skl_wm_level *result = &levels[level];
- unsigned int latency = skl_wm_latency(i915, level, wm_params);
+ unsigned int latency = skl_wm_latency(display, level, wm_params);
skl_compute_plane_wm(crtc_state, plane, level, latency,
wm_params, result_prev, result);
@@ -2094,21 +2020,21 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wm_params,
struct skl_plane_wm *plane_wm)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
struct skl_wm_level *levels = plane_wm->wm;
unsigned int latency = 0;
- if (i915->display.sagv.block_time_us)
- latency = i915->display.sagv.block_time_us +
- skl_wm_latency(i915, 0, wm_params);
+ if (display->sagv.block_time_us)
+ latency = display->sagv.block_time_us +
+ skl_wm_latency(display, 0, wm_params);
skl_compute_plane_wm(crtc_state, plane, 0, latency,
wm_params, &levels[0],
sagv_wm);
}
-static void skl_compute_transition_wm(struct drm_i915_private *i915,
+static void skl_compute_transition_wm(struct intel_display *display,
struct skl_wm_level *trans_wm,
const struct skl_wm_level *wm0,
const struct skl_wm_params *wp)
@@ -2117,23 +2043,23 @@ static void skl_compute_transition_wm(struct drm_i915_private *i915,
u16 wm0_blocks, trans_offset, blocks;
/* Transition WM don't make any sense if ipc is disabled */
- if (!skl_watermark_ipc_enabled(i915))
+ if (!skl_watermark_ipc_enabled(display))
return;
/*
* WaDisableTWM:skl,kbl,cfl,bxt
* Transition WM are not recommended by HW team for GEN9
*/
- if (DISPLAY_VER(i915) == 9)
+ if (DISPLAY_VER(display) == 9)
return;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
trans_min = 4;
else
trans_min = 14;
/* Display WA #1140: glk,cnl */
- if (DISPLAY_VER(i915) == 10)
+ if (DISPLAY_VER(display) == 10)
trans_amount = 0;
else
trans_amount = 10; /* This is configurable amount */
@@ -2175,8 +2101,7 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
struct intel_plane *plane, int color_plane)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
struct skl_wm_params wm_params;
int ret;
@@ -2188,13 +2113,13 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
- skl_compute_transition_wm(i915, &wm->trans_wm,
+ skl_compute_transition_wm(display, &wm->trans_wm,
&wm->wm[0], &wm_params);
- if (DISPLAY_VER(i915) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
- skl_compute_transition_wm(i915, &wm->sagv.trans_wm,
+ skl_compute_transition_wm(display, &wm->sagv.trans_wm,
&wm->sagv.wm0, &wm_params);
}
@@ -2254,8 +2179,8 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
int ret;
@@ -2269,9 +2194,9 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
if (plane_state->planar_linked_plane) {
const struct drm_framebuffer *fb = plane_state->hw.fb;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
!intel_wm_plane_visible(crtc_state, plane_state));
- drm_WARN_ON(&i915->drm, !fb->format->is_yuv ||
+ drm_WARN_ON(display->drm, !fb->format->is_yuv ||
fb->format->num_planes == 1);
ret = skl_build_plane_wm_single(crtc_state, plane_state,
@@ -2411,15 +2336,14 @@ static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
int wm0_lines)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
int level;
- for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
+ for (level = display->wm.num_levels - 1; level >= 0; level--) {
int latency;
/* FIXME should we care about the latency w/a's? */
- latency = skl_wm_latency(i915, level, NULL);
+ latency = skl_wm_latency(display, level, NULL);
if (latency == 0)
continue;
@@ -2436,8 +2360,8 @@ static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
int wm0_lines, level;
if (!crtc_state->hw.active)
@@ -2453,9 +2377,9 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
* PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_*
* based on whether we're limited by the vblank duration.
*/
- crtc_state->wm_level_disabled = level < i915->display.wm.num_levels - 1;
+ crtc_state->wm_level_disabled = level < display->wm.num_levels - 1;
- for (level++; level < i915->display.wm.num_levels; level++) {
+ for (level++; level < display->wm.num_levels; level++) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -2471,10 +2395,10 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
}
}
- if (DISPLAY_VER(i915) >= 12 &&
- i915->display.sagv.block_time_us &&
+ if (DISPLAY_VER(display) >= 12 &&
+ display->sagv.block_time_us &&
skl_is_vblank_too_short(crtc_state, wm0_lines,
- i915->display.sagv.block_time_us)) {
+ display->sagv.block_time_us)) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -2492,7 +2416,7 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
static int skl_build_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_plane_state *plane_state;
@@ -2508,7 +2432,7 @@ static int skl_build_pipe_wm(struct intel_atomic_state *state,
if (plane->pipe != crtc->pipe)
continue;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
ret = icl_build_plane_wm(crtc_state, plane_state);
else
ret = skl_build_plane_wm(crtc_state, plane_state);
@@ -2531,11 +2455,10 @@ static bool skl_wm_level_equals(const struct skl_wm_level *l1,
l1->auto_min_alloc_wm_enable == l2->auto_min_alloc_wm_enable;
}
-static bool skl_plane_wm_equals(struct drm_i915_private *i915,
+static bool skl_plane_wm_equals(struct intel_display *display,
const struct skl_plane_wm *wm1,
const struct skl_plane_wm *wm2)
{
- struct intel_display *display = &i915->display;
int level;
for (level = 0; level < display->wm.num_levels; level++) {
@@ -2590,14 +2513,14 @@ static int
skl_ddb_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
struct intel_plane_state *plane_state;
enum plane_id plane_id = plane->id;
@@ -2608,7 +2531,7 @@ skl_ddb_add_affected_planes(struct intel_atomic_state *state,
continue;
if (new_crtc_state->do_async_flip) {
- drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n",
+ drm_dbg_kms(display->drm, "[PLANE:%d:%s] Can't change DDB during async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -2627,7 +2550,7 @@ skl_ddb_add_affected_planes(struct intel_atomic_state *state,
static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
{
- struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev);
+ struct intel_display *display = to_intel_display(dbuf_state->base.state->base.dev);
u8 enabled_slices;
enum pipe pipe;
@@ -2637,7 +2560,7 @@ static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
*/
enabled_slices = BIT(DBUF_S1);
- for_each_pipe(i915, pipe)
+ for_each_pipe(display, pipe)
enabled_slices |= dbuf_state->slices[pipe];
return enabled_slices;
@@ -2647,7 +2570,6 @@ static int
skl_compute_ddb(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_dbuf_state *old_dbuf_state;
struct intel_dbuf_state *new_dbuf_state = NULL;
struct intel_crtc_state *new_crtc_state;
@@ -2686,7 +2608,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
}
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
enum pipe pipe = crtc->pipe;
new_dbuf_state->slices[pipe] =
@@ -2709,11 +2631,11 @@ skl_compute_ddb(struct intel_atomic_state *state)
if (ret)
return ret;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
old_dbuf_state->enabled_slices,
new_dbuf_state->enabled_slices,
- DISPLAY_INFO(i915)->dbuf.slice_mask,
+ DISPLAY_INFO(display)->dbuf.slice_mask,
str_yes_no(old_dbuf_state->joined_mbus),
str_yes_no(new_dbuf_state->joined_mbus));
}
@@ -2731,7 +2653,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
ret = skl_crtc_allocate_ddb(state, crtc);
if (ret)
return ret;
@@ -2758,7 +2680,7 @@ static char enast(bool enable)
static void
skl_print_wm_changes(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state;
const struct intel_crtc_state *new_crtc_state;
struct intel_plane *plane;
@@ -2775,7 +2697,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
old_pipe_wm = &old_crtc_state->wm.skl.optimal;
new_pipe_wm = &new_crtc_state->wm.skl.optimal;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
@@ -2785,24 +2707,24 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_ddb_entry_equal(old, new))
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
plane->base.base.id, plane->base.name,
old->start, old->end, new->start, new->end,
skl_ddb_entry_size(old), skl_ddb_entry_size(new));
}
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
enum plane_id plane_id = plane->id;
const struct skl_plane_wm *old_wm, *new_wm;
old_wm = &old_pipe_wm->planes[plane_id];
new_wm = &new_pipe_wm->planes[plane_id];
- if (skl_plane_wm_equals(i915, old_wm, new_wm))
+ if (skl_plane_wm_equals(display, old_wm, new_wm))
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
" -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
plane->base.base.id, plane->base.name,
@@ -2821,7 +2743,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
enast(new_wm->sagv.wm0.enable),
enast(new_wm->sagv.trans_wm.enable));
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
" -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
plane->base.base.id, plane->base.name,
@@ -2848,7 +2770,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
" -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
plane->base.base.id, plane->base.name,
@@ -2867,7 +2789,7 @@ skl_print_wm_changes(struct intel_atomic_state *state)
new_wm->sagv.wm0.blocks,
new_wm->sagv.trans_wm.blocks);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
" -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
plane->base.base.id, plane->base.name,
@@ -2945,14 +2867,14 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane *plane;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
struct intel_plane_state *plane_state;
enum plane_id plane_id = plane->id;
@@ -2971,7 +2893,7 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
continue;
if (new_crtc_state->do_async_flip) {
- drm_dbg_kms(&i915->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n",
+ drm_dbg_kms(display->drm, "[PLANE:%d:%s] Can't change watermarks during async flip\n",
plane->base.base.id, plane->base.name);
return -EINVAL;
}
@@ -3002,7 +2924,6 @@ void
intel_program_dpkgc_latency(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_crtc *crtc;
struct intel_crtc_state *new_crtc_state;
u32 latency = LNL_PKG_C_LATENCY_MASK;
@@ -3028,7 +2949,7 @@ intel_program_dpkgc_latency(struct intel_atomic_state *state)
added_wake_time = DSB_EXE_TIME +
display->sagv.block_time_us;
- latency = skl_watermark_max_latency(i915, 1);
+ latency = skl_watermark_max_latency(display, 1);
/* Wa_22020432604 */
if ((DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30) && !latency) {
@@ -3055,6 +2976,7 @@ intel_program_dpkgc_latency(struct intel_atomic_state *state)
static int
skl_compute_wm(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
struct intel_crtc_state __maybe_unused *new_crtc_state;
int ret, i;
@@ -3069,16 +2991,35 @@ skl_compute_wm(struct intel_atomic_state *state)
if (ret)
return ret;
- ret = intel_compute_sagv_mask(state);
- if (ret)
- return ret;
-
/*
* skl_compute_ddb() will have adjusted the final watermarks
* based on how much ddb is available. Now we can actually
* check if the final watermarks changed.
*/
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
+
+ /*
+ * We store use_sagv_wm in the crtc state rather than relying on
+ * that bw state since we have no convenient way to get at the
+ * latter from the plane commit hooks (especially in the legacy
+ * cursor case).
+ *
+ * drm_atomic_check_only() gets upset if we pull more crtcs
+ * into the state, so we have to calculate this based on the
+ * individual intel_crtc_can_enable_sagv() rather than
+ * the overall intel_can_enable_sagv(). Otherwise the
+ * crtcs not included in the commit would not switch to the
+ * SAGV watermarks when we are about to enable SAGV, and that
+ * would lead to underruns. This does mean extra power draw
+ * when only a subset of the crtcs are blocking SAGV as the
+ * other crtcs can't be allowed to use the more optimal
+ * normal (ie. non-SAGV) watermarks.
+ */
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) &&
+ DISPLAY_VER(display) >= 12 &&
+ intel_crtc_can_enable_sagv(new_crtc_state);
+
ret = skl_wm_add_affected_planes(state, crtc);
if (ret)
return ret;
@@ -3149,11 +3090,10 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
}
}
-static void skl_wm_get_hw_state(struct drm_i915_private *i915)
+static void skl_wm_get_hw_state(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ to_intel_dbuf_state(display->dbuf.obj.state);
struct intel_crtc *crtc;
if (HAS_MBUS_JOINING(display))
@@ -3193,7 +3133,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
if (!crtc_state->hw.active)
continue;
- skl_ddb_get_hw_plane_state(i915, crtc->pipe,
+ skl_ddb_get_hw_plane_state(display, crtc->pipe,
plane_id, ddb, ddb_y,
min_ddb, interim_ddb);
@@ -3209,13 +3149,13 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
*/
slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
dbuf_state->joined_mbus);
- mbus_offset = mbus_ddb_offset(i915, slices);
+ mbus_offset = mbus_ddb_offset(display, slices);
crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
/* The slices actually used by the planes on the pipe */
dbuf_state->slices[pipe] =
- skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
+ skl_ddb_dbuf_slice_mask(display, &crtc_state->wm.skl.ddb);
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
@@ -3228,49 +3168,52 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
dbuf_state->enabled_slices = display->dbuf.enabled_slices;
}
-bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
+bool skl_watermark_ipc_enabled(struct intel_display *display)
{
- return i915->display.wm.ipc_enabled;
+ return display->wm.ipc_enabled;
}
-void skl_watermark_ipc_update(struct drm_i915_private *i915)
+void skl_watermark_ipc_update(struct intel_display *display)
{
- if (!HAS_IPC(i915))
+ if (!HAS_IPC(display))
return;
- intel_de_rmw(i915, DISP_ARB_CTL2, DISP_IPC_ENABLE,
- skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
+ intel_de_rmw(display, DISP_ARB_CTL2, DISP_IPC_ENABLE,
+ skl_watermark_ipc_enabled(display) ? DISP_IPC_ENABLE : 0);
}
-static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
+static bool skl_watermark_ipc_can_enable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
/* Display WA #0477 WaDisableIPC: skl */
- if (IS_SKYLAKE(i915))
+ if (display->platform.skylake)
return false;
/* Display WA #1141: SKL:all KBL:all CFL */
- if (IS_KABYLAKE(i915) ||
- IS_COFFEELAKE(i915) ||
- IS_COMETLAKE(i915))
+ if (display->platform.kabylake ||
+ display->platform.coffeelake ||
+ display->platform.cometlake)
return i915->dram_info.symmetric_memory;
return true;
}
-void skl_watermark_ipc_init(struct drm_i915_private *i915)
+void skl_watermark_ipc_init(struct intel_display *display)
{
- if (!HAS_IPC(i915))
+ if (!HAS_IPC(display))
return;
- i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915);
+ display->wm.ipc_enabled = skl_watermark_ipc_can_enable(display);
- skl_watermark_ipc_update(i915);
+ skl_watermark_ipc_update(display);
}
static void
-adjust_wm_latency(struct drm_i915_private *i915,
+adjust_wm_latency(struct intel_display *display,
u16 wm[], int num_levels, int read_latency)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
int i, level;
@@ -3311,31 +3254,32 @@ adjust_wm_latency(struct drm_i915_private *i915,
wm[0] += 1;
}
-static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void mtl_read_wm_latency(struct intel_display *display, u16 wm[])
{
- int num_levels = i915->display.wm.num_levels;
+ int num_levels = display->wm.num_levels;
u32 val;
- val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
+ val = intel_de_read(display, MTL_LATENCY_LP0_LP1);
wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- val = intel_de_read(i915, MTL_LATENCY_LP2_LP3);
+ val = intel_de_read(display, MTL_LATENCY_LP2_LP3);
wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- val = intel_de_read(i915, MTL_LATENCY_LP4_LP5);
+ val = intel_de_read(display, MTL_LATENCY_LP4_LP5);
wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- adjust_wm_latency(i915, wm, num_levels, 6);
+ adjust_wm_latency(display, wm, num_levels, 6);
}
-static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
{
- int num_levels = i915->display.wm.num_levels;
- int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
- int mult = IS_DG2(i915) ? 2 : 1;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ int num_levels = display->wm.num_levels;
+ int read_latency = DISPLAY_VER(display) >= 12 ? 3 : 2;
+ int mult = display->platform.dg2 ? 2 : 1;
u32 val;
int ret;
@@ -3343,7 +3287,7 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
val = 0; /* data0 to be programmed to 0 for first set */
ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
- drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
}
@@ -3356,7 +3300,7 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
val = 1; /* data0 to be programmed to 1 for second set */
ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
- drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
}
@@ -3365,24 +3309,22 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
- adjust_wm_latency(i915, wm, num_levels, read_latency);
+ adjust_wm_latency(display, wm, num_levels, read_latency);
}
-static void skl_setup_wm_latency(struct drm_i915_private *i915)
+static void skl_setup_wm_latency(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
if (HAS_HW_SAGV_WM(display))
display->wm.num_levels = 6;
else
display->wm.num_levels = 8;
if (DISPLAY_VER(display) >= 14)
- mtl_read_wm_latency(i915, display->wm.skl_latency);
+ mtl_read_wm_latency(display, display->wm.skl_latency);
else
- skl_read_wm_latency(i915, display->wm.skl_latency);
+ skl_read_wm_latency(display, display->wm.skl_latency);
- intel_print_wm_latency(i915, "Gen9 Plane", display->wm.skl_latency);
+ intel_print_wm_latency(display, "Gen9 Plane", display->wm.skl_latency);
}
static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
@@ -3410,19 +3352,18 @@ static const struct intel_global_state_funcs intel_dbuf_funcs = {
struct intel_dbuf_state *
intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *dbuf_state;
- dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj);
+ dbuf_state = intel_atomic_get_global_obj_state(state, &display->dbuf.obj);
if (IS_ERR(dbuf_state))
return ERR_CAST(dbuf_state);
return to_intel_dbuf_state(dbuf_state);
}
-int intel_dbuf_init(struct drm_i915_private *i915)
+int intel_dbuf_init(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state;
dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
@@ -3457,34 +3398,34 @@ static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
const struct intel_dbuf_state *dbuf_state)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
u32 val = 0;
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
val |= MBUS_DBOX_I_CREDIT(2);
- if (DISPLAY_VER(i915) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
}
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
val |= dbuf_state->joined_mbus ?
MBUS_DBOX_A_CREDIT(12) : MBUS_DBOX_A_CREDIT(8);
- else if (IS_ALDERLAKE_P(i915))
+ else if (display->platform.alderlake_p)
/* Wa_22010947358:adl-p */
val |= dbuf_state->joined_mbus ?
MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
else
val |= MBUS_DBOX_A_CREDIT(2);
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
val |= MBUS_DBOX_B_CREDIT(0xA);
- } else if (IS_ALDERLAKE_P(i915)) {
+ } else if (display->platform.alderlake_p) {
val |= MBUS_DBOX_BW_CREDIT(2);
val |= MBUS_DBOX_B_CREDIT(8);
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
val |= MBUS_DBOX_BW_CREDIT(2);
val |= MBUS_DBOX_B_CREDIT(12);
} else {
@@ -3492,7 +3433,7 @@ static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
val |= MBUS_DBOX_B_CREDIT(8);
}
- if (DISPLAY_VERx100(i915) == 1400) {
+ if (DISPLAY_VERx100(display) == 1400) {
if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, dbuf_state->active_pipes))
val |= MBUS_DBOX_BW_8CREDITS_MTL;
else
@@ -3502,22 +3443,22 @@ static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
return val;
}
-static void pipe_mbus_dbox_ctl_update(struct drm_i915_private *i915,
+static void pipe_mbus_dbox_ctl_update(struct intel_display *display,
const struct intel_dbuf_state *dbuf_state)
{
struct intel_crtc *crtc;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, dbuf_state->active_pipes)
- intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe),
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, dbuf_state->active_pipes)
+ intel_de_write(display, PIPE_MBUS_DBOX_CTL(crtc->pipe),
pipe_mbus_dbox_ctl(crtc, dbuf_state));
}
static void intel_mbus_dbox_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
return;
new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
@@ -3527,7 +3468,7 @@ static void intel_mbus_dbox_update(struct intel_atomic_state *state)
new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
return;
- pipe_mbus_dbox_ctl_update(i915, new_dbuf_state);
+ pipe_mbus_dbox_ctl_update(display, new_dbuf_state);
}
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
@@ -3544,10 +3485,9 @@ int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
return intel_atomic_lock_global_state(&dbuf_state->base);
}
-void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display,
int ratio, bool joined_mbus)
{
- struct intel_display *display = &i915->display;
enum dbuf_slice slice;
if (!HAS_MBUS_JOINING(display))
@@ -3571,7 +3511,7 @@ void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
const struct intel_dbuf_state *new_dbuf_state =
@@ -3586,7 +3526,7 @@ static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state
mdclk_cdclk_ratio = new_dbuf_state->mdclk_cdclk_ratio;
}
- intel_dbuf_mdclk_cdclk_ratio_update(i915, mdclk_cdclk_ratio,
+ intel_dbuf_mdclk_cdclk_ratio_update(display, mdclk_cdclk_ratio,
new_dbuf_state->joined_mbus);
}
@@ -3594,13 +3534,12 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
const struct intel_dbuf_state *dbuf_state)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
enum pipe pipe = ffs(dbuf_state->active_pipes) - 1;
const struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
- drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus);
- drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes));
+ drm_WARN_ON(display->drm, !dbuf_state->joined_mbus);
+ drm_WARN_ON(display->drm, !is_power_of_2(dbuf_state->active_pipes));
crtc = intel_crtc_for_pipe(display, pipe);
new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -3611,7 +3550,7 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
return INVALID_PIPE;
}
-static void mbus_ctl_join_update(struct drm_i915_private *i915,
+static void mbus_ctl_join_update(struct intel_display *display,
const struct intel_dbuf_state *dbuf_state,
enum pipe pipe)
{
@@ -3627,7 +3566,7 @@ static void mbus_ctl_join_update(struct drm_i915_private *i915,
else
mbus_ctl |= MBUS_JOIN_PIPE_SELECT_NONE;
- intel_de_rmw(i915, MBUS_CTL,
+ intel_de_rmw(display, MBUS_CTL,
MBUS_HASHING_MODE_MASK | MBUS_JOIN |
MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
}
@@ -3635,18 +3574,18 @@ static void mbus_ctl_join_update(struct drm_i915_private *i915,
static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *old_dbuf_state =
intel_atomic_get_old_dbuf_state(state);
const struct intel_dbuf_state *new_dbuf_state =
intel_atomic_get_new_dbuf_state(state);
- drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
+ drm_dbg_kms(display->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
str_yes_no(old_dbuf_state->joined_mbus),
str_yes_no(new_dbuf_state->joined_mbus),
pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
- mbus_ctl_join_update(i915, new_dbuf_state, pipe);
+ mbus_ctl_join_update(display, new_dbuf_state, pipe);
}
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
@@ -3751,9 +3690,8 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
gen9_dbuf_slices_update(display, new_slices);
}
-static void skl_mbus_sanitize(struct drm_i915_private *i915)
+static void skl_mbus_sanitize(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(display->dbuf.obj.state);
@@ -3768,28 +3706,28 @@ static void skl_mbus_sanitize(struct drm_i915_private *i915)
dbuf_state->active_pipes);
dbuf_state->joined_mbus = false;
- intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ intel_dbuf_mdclk_cdclk_ratio_update(display,
dbuf_state->mdclk_cdclk_ratio,
dbuf_state->joined_mbus);
- pipe_mbus_dbox_ctl_update(i915, dbuf_state);
- mbus_ctl_join_update(i915, dbuf_state, INVALID_PIPE);
+ pipe_mbus_dbox_ctl_update(display, dbuf_state);
+ mbus_ctl_join_update(display, dbuf_state, INVALID_PIPE);
}
-static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+static bool skl_dbuf_is_misconfigured(struct intel_display *display)
{
const struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ to_intel_dbuf_state(display->dbuf.obj.state);
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
entries[crtc->pipe] = crtc_state->wm.skl.ddb;
}
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
u8 slices;
@@ -3807,7 +3745,7 @@ static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
return false;
}
-static void skl_dbuf_sanitize(struct drm_i915_private *i915)
+static void skl_dbuf_sanitize(struct intel_display *display)
{
struct intel_crtc *crtc;
@@ -3822,12 +3760,12 @@ static void skl_dbuf_sanitize(struct drm_i915_private *i915)
* all the planes so that skl_commit_modeset_enables() can
* simply ignore them.
*/
- if (!skl_dbuf_is_misconfigured(i915))
+ if (!skl_dbuf_is_misconfigured(display))
return;
- drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+ drm_dbg_kms(display->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -3837,16 +3775,16 @@ static void skl_dbuf_sanitize(struct drm_i915_private *i915)
if (plane_state->uapi.visible)
intel_plane_disable_noatomic(crtc, plane);
- drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+ drm_WARN_ON(display->drm, crtc_state->active_planes != 0);
memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
}
}
-static void skl_wm_sanitize(struct drm_i915_private *i915)
+static void skl_wm_sanitize(struct intel_display *display)
{
- skl_mbus_sanitize(i915);
- skl_dbuf_sanitize(i915);
+ skl_mbus_sanitize(display);
+ skl_dbuf_sanitize(display);
}
void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc)
@@ -3897,7 +3835,6 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct skl_hw_state {
@@ -3912,7 +3849,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
u8 hw_enabled_slices;
int level;
- if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
+ if (DISPLAY_VER(display) < 9 || !new_crtc_state->hw.active)
return;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
@@ -3925,26 +3862,26 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
hw_enabled_slices = intel_enabled_dbuf_slices_mask(display);
- if (DISPLAY_VER(i915) >= 11 &&
- hw_enabled_slices != i915->display.dbuf.enabled_slices)
- drm_err(&i915->drm,
+ if (DISPLAY_VER(display) >= 11 &&
+ hw_enabled_slices != display->dbuf.enabled_slices)
+ drm_err(display->drm,
"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
- i915->display.dbuf.enabled_slices,
+ display->dbuf.enabled_slices,
hw_enabled_slices);
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
const struct skl_wm_level *hw_wm_level, *sw_wm_level;
/* Watermarks */
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
hw_wm_level = &hw->wm.planes[plane->id].wm[level];
sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
continue;
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name, level,
sw_wm_level->enable,
@@ -3959,7 +3896,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name,
sw_wm_level->enable,
@@ -3975,7 +3912,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
if (HAS_HW_SAGV_WM(display) &&
!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name,
sw_wm_level->enable,
@@ -3991,7 +3928,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
if (HAS_HW_SAGV_WM(display) &&
!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
plane->base.base.id, plane->base.name,
sw_wm_level->enable,
@@ -4007,7 +3944,7 @@ void intel_wm_state_verify(struct intel_atomic_state *state,
sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
plane->base.base.id, plane->base.name,
sw_ddb_entry->start, sw_ddb_entry->end,
@@ -4024,29 +3961,29 @@ static const struct intel_wm_funcs skl_wm_funcs = {
.sanitize = skl_wm_sanitize,
};
-void skl_wm_init(struct drm_i915_private *i915)
+void skl_wm_init(struct intel_display *display)
{
- intel_sagv_init(i915);
+ intel_sagv_init(display);
- skl_setup_wm_latency(i915);
+ skl_setup_wm_latency(display);
- i915->display.funcs.wm = &skl_wm_funcs;
+ display->funcs.wm = &skl_wm_funcs;
}
static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *i915 = m->private;
+ struct intel_display *display = m->private;
seq_printf(m, "Isochronous Priority Control: %s\n",
- str_yes_no(skl_watermark_ipc_enabled(i915)));
+ str_yes_no(skl_watermark_ipc_enabled(display)));
return 0;
}
static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *i915 = inode->i_private;
+ struct intel_display *display = inode->i_private;
- return single_open(file, skl_watermark_ipc_status_show, i915);
+ return single_open(file, skl_watermark_ipc_status_show, display);
}
static ssize_t skl_watermark_ipc_status_write(struct file *file,
@@ -4054,8 +3991,7 @@ static ssize_t skl_watermark_ipc_status_write(struct file *file,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_i915_private *i915 = m->private;
- intel_wakeref_t wakeref;
+ struct intel_display *display = m->private;
bool enable;
int ret;
@@ -4063,12 +3999,12 @@ static ssize_t skl_watermark_ipc_status_write(struct file *file,
if (ret < 0)
return ret;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- if (!skl_watermark_ipc_enabled(i915) && enable)
- drm_info(&i915->drm,
+ with_intel_display_rpm(display) {
+ if (!skl_watermark_ipc_enabled(display) && enable)
+ drm_info(display->drm,
"Enabling IPC: WM will be proper only after next commit\n");
- i915->display.wm.ipc_enabled = enable;
- skl_watermark_ipc_update(i915);
+ display->wm.ipc_enabled = enable;
+ skl_watermark_ipc_update(display);
}
return len;
@@ -4085,7 +4021,7 @@ static const struct file_operations skl_watermark_ipc_status_fops = {
static int intel_sagv_status_show(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = m->private;
+ struct intel_display *display = m->private;
static const char * const sagv_status[] = {
[I915_SAGV_UNKNOWN] = "unknown",
[I915_SAGV_DISABLED] = "disabled",
@@ -4093,37 +4029,36 @@ static int intel_sagv_status_show(struct seq_file *m, void *unused)
[I915_SAGV_NOT_CONTROLLED] = "not controlled",
};
- seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915)));
+ seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(display)));
seq_printf(m, "SAGV modparam: %s\n",
- str_enabled_disabled(i915->display.params.enable_sagv));
- seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]);
- seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us);
+ str_enabled_disabled(display->params.enable_sagv));
+ seq_printf(m, "SAGV status: %s\n", sagv_status[display->sagv.status]);
+ seq_printf(m, "SAGV block time: %d usec\n", display->sagv.block_time_us);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
-void skl_watermark_debugfs_register(struct drm_i915_private *i915)
+void skl_watermark_debugfs_register(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
struct drm_minor *minor = display->drm->primary;
if (HAS_IPC(display))
- debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
- &skl_watermark_ipc_status_fops);
+ debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root,
+ display, &skl_watermark_ipc_status_fops);
if (HAS_SAGV(display))
- debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915,
- &intel_sagv_status_fops);
+ debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root,
+ display, &intel_sagv_status_fops);
}
-unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, int initial_wm_level)
+unsigned int skl_watermark_max_latency(struct intel_display *display, int initial_wm_level)
{
int level;
- for (level = i915->display.wm.num_levels - 1; level >= initial_wm_level; level--) {
- unsigned int latency = skl_wm_latency(i915, level, NULL);
+ for (level = display->wm.num_levels - 1; level >= initial_wm_level; level--) {
+ unsigned int latency = skl_wm_latency(display, level, NULL);
if (latency)
return latency;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index d9cff6c54310..95b0b599d5c3 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -12,7 +12,6 @@
#include "intel_global_state.h"
#include "intel_wm_types.h"
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_bw_state;
struct intel_crtc;
@@ -27,11 +26,12 @@ u8 intel_enabled_dbuf_slices_mask(struct intel_display *display);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
-bool intel_can_enable_sagv(struct drm_i915_private *i915,
+bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state);
+bool intel_can_enable_sagv(struct intel_display *display,
const struct intel_bw_state *bw_state);
-bool intel_has_sagv(struct drm_i915_private *i915);
+bool intel_has_sagv(struct intel_display *display);
-u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+u32 skl_ddb_dbuf_slice_mask(struct intel_display *display,
const struct skl_ddb_entry *entry);
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
@@ -45,14 +45,14 @@ void skl_wm_crtc_disable_noatomic(struct intel_crtc *crtc);
void skl_wm_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane);
-void skl_watermark_ipc_init(struct drm_i915_private *i915);
-void skl_watermark_ipc_update(struct drm_i915_private *i915);
-bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
-void skl_watermark_debugfs_register(struct drm_i915_private *i915);
+void skl_watermark_ipc_init(struct intel_display *display);
+void skl_watermark_ipc_update(struct intel_display *display);
+bool skl_watermark_ipc_enabled(struct intel_display *display);
+void skl_watermark_debugfs_register(struct intel_display *display);
-unsigned int skl_watermark_max_latency(struct drm_i915_private *i915,
+unsigned int skl_watermark_max_latency(struct intel_display *display,
int initial_wm_level);
-void skl_wm_init(struct drm_i915_private *i915);
+void skl_wm_init(struct intel_display *display);
const struct skl_wm_level *skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
enum plane_id plane_id,
@@ -86,13 +86,13 @@ intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
#define intel_atomic_get_new_dbuf_state(state) \
to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
-int intel_dbuf_init(struct drm_i915_private *i915);
+int intel_dbuf_init(struct intel_display *display);
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
int ratio);
void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
-void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
+void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display,
int ratio, bool joined_mbus);
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state);
void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index af717df83197..21c1e10caf68 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -251,8 +251,10 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
return 0;
}
-static void band_gap_reset(struct drm_i915_private *dev_priv)
+static void band_gap_reset(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
vlv_flisdsi_get(dev_priv);
vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
@@ -269,13 +271,13 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -298,7 +300,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
else
pipe_config->pipe_bpp = 18;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
/* Enable Frame time stamp based scanline reporting */
pipe_config->mode_flags |=
I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
@@ -468,7 +470,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
vlv_flisdsi_put(dev_priv);
/* bandgap reset is needed after everytime we do power gate */
- band_gap_reset(dev_priv);
+ band_gap_reset(display);
for_each_dsi_port(port, intel_dsi->ports) {
@@ -495,11 +497,11 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
glk_dsi_device_ready(encoder);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
bxt_dsi_device_ready(encoder);
else
vlv_dsi_device_ready(encoder);
@@ -559,23 +561,22 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
glk_dsi_disable_mipi_io(encoder);
}
-static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port)
+static i915_reg_t port_ctrl_reg(struct intel_display *display, enum port port)
{
- return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ?
+ return display->platform.geminilake || display->platform.broxton ?
BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(port);
}
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
drm_dbg_kms(display->drm, "\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
+ i915_reg_t port_ctrl = display->platform.broxton ?
BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(PORT_A);
intel_de_write(display, MIPI_DEVICE_READY(display, port),
@@ -594,7 +595,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI
* Port A only. MIPI Port C has no similar bit for checking.
*/
- if ((IS_BROXTON(dev_priv) || port == PORT_A) &&
+ if ((display->platform.broxton || port == PORT_A) &&
intel_de_wait_for_clear(display, port_ctrl,
AFE_LATCHOUT, 30))
drm_err(display->drm, "DSI LP not going Low\n");
@@ -612,7 +613,6 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
@@ -620,7 +620,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
u32 temp = intel_dsi->pixel_overlap;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
for_each_dsi_port(port, intel_dsi->ports)
intel_de_rmw(display, MIPI_CTRL(display, port),
BXT_PIXEL_OVERLAP_CNT_MASK,
@@ -633,7 +633,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ i915_reg_t port_ctrl = port_ctrl_reg(display, port);
u32 temp;
temp = intel_de_read(display, port_ctrl);
@@ -644,7 +644,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) {
temp |= (intel_dsi->dual_link - 1)
<< DUAL_LINK_MODE_SHIFT;
- if (IS_BROXTON(dev_priv))
+ if (display->platform.broxton)
temp |= LANE_CONFIGURATION_DUAL_LINK_A;
else
temp |= crtc->pipe ?
@@ -664,12 +664,11 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
static void intel_dsi_port_disable(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ i915_reg_t port_ctrl = port_ctrl_reg(display, port);
/* de-assert ip_tg_enable signal */
intel_de_rmw(display, port_ctrl, DPI_ENABLE, 0);
@@ -730,7 +729,6 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum port port;
bool glk_cold_boot = false;
@@ -745,7 +743,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
* The BIOS may leave the PLL in a wonky state where it doesn't
* lock. It needs to be fully powered down to fix it.
*/
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
bxt_dsi_pll_disable(encoder);
bxt_dsi_pll_enable(encoder, pipe_config);
} else {
@@ -753,7 +751,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
vlv_dsi_pll_enable(encoder, pipe_config);
}
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
/* Add MIPI IO reset programming for modeset */
intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL);
@@ -762,13 +760,13 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL, 0);
}
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
/* Disable DPOunit clock gating, can stall pipe */
- intel_de_rmw(display, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(display),
0, DPOUNIT_CLOCK_GATE_DISABLE);
}
- if (!IS_GEMINILAKE(dev_priv))
+ if (!display->platform.geminilake)
intel_dsi_prepare(encoder, pipe_config);
/* Give the panel time to power-on and then deassert its reset */
@@ -776,7 +774,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
msleep(intel_dsi->panel_on_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
- if (IS_GEMINILAKE(dev_priv)) {
+ if (display->platform.geminilake) {
glk_cold_boot = glk_dsi_enable_io(encoder);
/* Prepare port in cold boot(s3/s4) scenario */
@@ -788,7 +786,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_dsi_device_ready(encoder);
/* Prepare port in normal boot scenario */
- if (IS_GEMINILAKE(dev_priv) && !glk_cold_boot)
+ if (display->platform.geminilake && !glk_cold_boot)
intel_dsi_prepare(encoder, pipe_config);
/* Send initialization commands in LP mode */
@@ -836,11 +834,11 @@ static void intel_dsi_disable(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&i915->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_backlight_disable(old_conn_state);
@@ -860,9 +858,9 @@ static void intel_dsi_disable(struct intel_atomic_state *state,
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
glk_dsi_clear_device_ready(encoder);
else
vlv_dsi_clear_device_ready(encoder);
@@ -874,13 +872,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
drm_dbg_kms(display->drm, "\n");
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
intel_crtc_vblank_off(old_crtc_state);
skl_scaler_disable(old_crtc_state);
@@ -907,7 +904,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
/* Transition to LP-00 */
intel_dsi_clear_device_ready(encoder);
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
/* Power down DSI regulator to save power */
intel_de_write(display, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL,
@@ -917,12 +914,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0);
}
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
bxt_dsi_pll_disable(encoder);
} else {
vlv_dsi_pll_disable(encoder);
- intel_de_rmw(display, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(display),
DPOUNIT_CLOCK_GATE_DISABLE, 0);
}
@@ -939,7 +936,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_wakeref_t wakeref;
enum port port;
@@ -957,13 +953,13 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* configuration, otherwise accessing DSI registers will hang the
* machine. See BSpec North Display Engine registers/MIPI[BXT].
*/
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
- !bxt_dsi_pll_is_enabled(dev_priv))
+ if ((display->platform.geminilake || display->platform.broxton) &&
+ !bxt_dsi_pll_is_enabled(display))
goto out_put_power;
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ i915_reg_t port_ctrl = port_ctrl_reg(display, port);
bool enabled = intel_de_read(display, port_ctrl) & DPI_ENABLE;
/*
@@ -971,10 +967,10 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* bit in port C control register does not get set. As a
* workaround, check pipe B conf instead.
*/
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if ((display->platform.valleyview || display->platform.cherryview) &&
port == PORT_C)
enabled = intel_de_read(display,
- TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE;
+ TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE;
/* Try command mode if video mode not enabled */
if (!enabled) {
@@ -989,7 +985,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
if (!(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY))
continue;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
u32 tmp = intel_de_read(display, MIPI_CTRL(display, port));
tmp &= BXT_PIPE_SELECT_MASK;
tmp >>= BXT_PIPE_SELECT_SHIFT;
@@ -1060,7 +1056,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
BXT_MIPI_TRANS_VACTIVE(port));
adjusted_mode->crtc_vtotal =
intel_de_read(display,
- BXT_MIPI_TRANS_VTOTAL(port));
+ BXT_MIPI_TRANS_VTOTAL(port)) + 1;
hactive = adjusted_mode->crtc_hdisplay;
hfp = intel_de_read(display, MIPI_HFP_COUNT(display, port));
@@ -1177,15 +1173,15 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 pclk;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
bxt_dsi_get_pipe_config(encoder, pipe_config);
pclk = bxt_dsi_get_pclk(encoder, pipe_config);
} else {
@@ -1218,7 +1214,6 @@ static void set_dsi_timings(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1253,7 +1248,7 @@ static void set_dsi_timings(struct intel_encoder *encoder,
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
/*
* Program hdisplay and vdisplay on MIPI transcoder.
* This is different from calculated hactive and
@@ -1265,7 +1260,7 @@ static void set_dsi_timings(struct intel_encoder *encoder,
intel_de_write(display, BXT_MIPI_TRANS_VACTIVE(port),
adjusted_mode->crtc_vdisplay);
intel_de_write(display, BXT_MIPI_TRANS_VTOTAL(port),
- adjusted_mode->crtc_vtotal);
+ adjusted_mode->crtc_vtotal - 1);
}
intel_de_write(display, MIPI_HACTIVE_AREA_COUNT(display, port),
@@ -1307,7 +1302,6 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -1327,7 +1321,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
/*
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
@@ -1342,7 +1336,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
tmp &= ~READ_REQUEST_PRIORITY_MASK;
intel_de_write(display, MIPI_CTRL(display, port),
tmp | READ_REQUEST_PRIORITY_HIGH);
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
enum pipe pipe = crtc->pipe;
intel_de_rmw(display, MIPI_CTRL(display, port),
@@ -1377,7 +1371,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
if (intel_dsi->clock_stop)
tmp |= CLOCKSTOP;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (display->platform.geminilake || display->platform.broxton) {
tmp |= BXT_DPHY_DEFEATURE_EN;
if (!is_cmd_mode(intel_dsi))
tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
@@ -1424,7 +1418,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
intel_de_write(display, MIPI_INIT_COUNT(display, port),
txclkesc(intel_dsi->escape_clk_div, 100));
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ if ((display->platform.geminilake || display->platform.broxton) &&
!intel_dsi->dual_link) {
/*
* BXT spec says write MIPI_INIT_COUNT for
@@ -1461,7 +1455,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
intel_de_write(display, MIPI_LP_BYTECLK(display, port),
intel_dsi->lp_byte_clk);
- if (IS_GEMINILAKE(dev_priv)) {
+ if (display->platform.geminilake) {
intel_de_write(display, MIPI_TLPX_TIME_COUNT(display, port),
intel_dsi->lp_byte_clk);
/* Shadow of DPHY reg */
@@ -1513,18 +1507,17 @@ static void intel_dsi_prepare(struct intel_encoder *encoder,
static void intel_dsi_unprepare(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
return;
for_each_dsi_port(port, intel_dsi->ports) {
/* Panel commands can be sent when clock is in LP11 */
intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x0);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
bxt_dsi_reset_clocks(encoder, port);
else
vlv_dsi_reset_clocks(encoder, port);
@@ -1596,8 +1589,8 @@ static void vlv_dsi_add_properties(struct intel_connector *connector)
static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
struct intel_connector *connector = intel_dsi->attached_connector;
+ struct intel_display *display = to_intel_display(connector);
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns, extra_byte_count, tlpx_ui;
u32 ui_num, ui_den;
@@ -1645,7 +1638,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
* For GEMINILAKE dphy_param_reg will be programmed in terms of
* HS byte clock count for other platform in HS ddr clock count
*/
- mul = IS_GEMINILAKE(dev_priv) ? 8 : 2;
+ mul = display->platform.geminilake ? 8 : 2;
ths_prepare_ns = max(mipi_config->ths_prepare,
mipi_config->tclk_prepare);
@@ -1653,7 +1646,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
if (prepare_cnt > PREPARE_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "prepare count too high %u\n",
+ drm_dbg_kms(display->drm, "prepare count too high %u\n",
prepare_cnt);
prepare_cnt = PREPARE_CNT_MAX;
}
@@ -1674,7 +1667,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
exit_zero_cnt += 1;
if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "exit zero count too high %u\n",
+ drm_dbg_kms(display->drm, "exit zero count too high %u\n",
exit_zero_cnt);
exit_zero_cnt = EXIT_ZERO_CNT_MAX;
}
@@ -1685,7 +1678,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
* ui_den, ui_num * mul);
if (clk_zero_cnt > CLK_ZERO_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "clock zero count too high %u\n",
+ drm_dbg_kms(display->drm, "clock zero count too high %u\n",
clk_zero_cnt);
clk_zero_cnt = CLK_ZERO_CNT_MAX;
}
@@ -1695,7 +1688,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
if (trail_cnt > TRAIL_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "trail count too high %u\n",
+ drm_dbg_kms(display->drm, "trail count too high %u\n",
trail_cnt);
trail_cnt = TRAIL_CNT_MAX;
}
@@ -1761,7 +1754,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
@@ -1770,7 +1763,7 @@ int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
* On Valleyview some DSI panels lose (v|h)sync when the clock is lower
* than 320000KHz.
*/
- if (IS_VALLEYVIEW(dev_priv))
+ if (display->platform.valleyview)
return 320000;
/*
@@ -1778,7 +1771,7 @@ int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
* picture gets unstable, despite that values are
* correct for DSI PLL and DE PLL.
*/
- if (IS_GEMINILAKE(dev_priv))
+ if (display->platform.geminilake)
return 158400;
return 0;
@@ -1903,9 +1896,8 @@ static const struct dmi_system_id vlv_dsi_dmi_quirk_table[] = {
{ }
};
-void vlv_dsi_init(struct drm_i915_private *dev_priv)
+void vlv_dsi_init(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
struct intel_dsi *intel_dsi;
struct intel_encoder *encoder;
struct intel_connector *connector;
@@ -1914,16 +1906,16 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
enum port port;
enum pipe pipe;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
/* There is no detection method for MIPI so rely on VBT */
if (!intel_bios_is_dsi_present(display, &port))
return;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- dev_priv->display.dsi.mmio_base = BXT_MIPI_BASE;
+ if (display->platform.geminilake || display->platform.broxton)
+ display->dsi.mmio_base = BXT_MIPI_BASE;
else
- dev_priv->display.dsi.mmio_base = VLV_MIPI_BASE;
+ display->dsi.mmio_base = VLV_MIPI_BASE;
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
@@ -1938,12 +1930,12 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
encoder = &intel_dsi->base;
intel_dsi->attached_connector = connector;
- drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_dsi_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &intel_dsi_funcs,
DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
encoder->compute_config = intel_dsi_compute_config;
encoder->pre_enable = intel_dsi_pre_enable;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
encoder->enable = bxt_dsi_enable;
encoder->disable = intel_dsi_disable;
encoder->post_disable = intel_dsi_post_disable;
@@ -1963,7 +1955,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
*/
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
encoder->pipe_mask = ~0;
else if (port == PORT_A)
encoder->pipe_mask = BIT(PIPE_A);
@@ -1979,10 +1971,10 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
else
intel_dsi->ports = BIT(port);
- if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ if (drm_WARN_ON(display->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
- if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ if (drm_WARN_ON(display->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
/* Create a DSI host (and a device) for each port. */
@@ -1998,18 +1990,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
}
if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
- drm_dbg_kms(&dev_priv->drm, "no device found\n");
+ drm_dbg_kms(display->drm, "no device found\n");
goto err;
}
/* Use clock read-back from current hw-state for fastboot */
current_mode = intel_encoder_current_mode(encoder);
if (current_mode) {
- drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n",
+ drm_dbg_kms(display->drm, "Calculated pclk %d GOP %d\n",
intel_dsi->pclk, current_mode->clock);
if (intel_fuzzy_clock_check(intel_dsi->pclk,
current_mode->clock)) {
- drm_dbg_kms(&dev_priv->drm, "Using GOP pclk\n");
+ drm_dbg_kms(display->drm, "Using GOP pclk\n");
intel_dsi->pclk = current_mode->clock;
}
@@ -2021,7 +2013,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_dsi_vbt_gpio_init(intel_dsi,
intel_dsi_get_hw_state(encoder, &pipe));
- drm_connector_init(&dev_priv->drm, &connector->base, &intel_dsi_connector_funcs,
+ drm_connector_init(display->drm, &connector->base, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
drm_connector_helper_add(&connector->base, &intel_dsi_connector_helper_funcs);
@@ -2030,12 +2022,12 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(connector, encoder);
- mutex_lock(&dev_priv->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
intel_panel_add_vbt_lfp_fixed_mode(connector);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
if (!intel_panel_preferred_fixed_mode(connector)) {
- drm_dbg_kms(&dev_priv->drm, "no fixed mode\n");
+ drm_dbg_kms(display->drm, "no fixed mode\n");
goto err_cleanup_connector;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.h b/drivers/gpu/drm/i915/display/vlv_dsi.h
index 277bacfbc551..ff349b5876c2 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.h
@@ -7,14 +7,14 @@
#define __VLV_DSI_H__
enum port;
-struct drm_i915_private;
struct intel_crtc_state;
+struct intel_display;
struct intel_dsi;
#ifdef I915
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state);
-void vlv_dsi_init(struct drm_i915_private *dev_priv);
+void vlv_dsi_init(struct intel_display *display);
#else
static inline void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
@@ -23,7 +23,7 @@ static inline int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
{
return 0;
}
-static inline void vlv_dsi_init(struct drm_i915_private *dev_priv)
+static inline void vlv_dsi_init(struct intel_display *display)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 2ed47e7d1051..7ce924a5ef90 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -57,7 +57,7 @@ static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt,
return dsi_clk_khz;
}
-static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
+static int dsi_calc_mnp(struct intel_display *display,
struct intel_crtc_state *config,
int target_dsi_clk)
{
@@ -68,11 +68,11 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
/* target_dsi_clk is expected in kHz */
if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
- drm_err(&dev_priv->drm, "DSI CLK Out of Range\n");
+ drm_err(display->drm, "DSI CLK Out of Range\n");
return -ECHRNG;
}
- if (IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.cherryview) {
ref_clk = 100000;
n = 4;
m_min = 70;
@@ -116,13 +116,13 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
static int vlv_dsi_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 dsi_clock;
u32 pll_ctl, pll_div;
u32 m = 0, p = 0, n;
- int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
+ int refclk = display->platform.cherryview ? 100000 : 25000;
int i;
pll_ctl = config->dsi_pll.ctrl;
@@ -147,7 +147,7 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
p--;
if (!p) {
- drm_err(&dev_priv->drm, "wrong P1 divisor\n");
+ drm_err(display->drm, "wrong P1 divisor\n");
return 0;
}
@@ -157,7 +157,7 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
}
if (i == ARRAY_SIZE(lfsr_converts)) {
- drm_err(&dev_priv->drm, "wrong m_seed programmed\n");
+ drm_err(display->drm, "wrong m_seed programmed\n");
return 0;
}
@@ -175,16 +175,16 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
int vlv_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int pclk, dsi_clk, ret;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
- ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
+ ret = dsi_calc_mnp(display, config, dsi_clk);
if (ret) {
- drm_dbg_kms(&dev_priv->drm, "dsi_calc_mnp failed\n");
+ drm_dbg_kms(display->drm, "dsi_calc_mnp failed\n");
return ret;
}
@@ -196,7 +196,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
config->dsi_pll.ctrl |= DSI_PLL_VCO_EN;
- drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n",
+ drm_dbg_kms(display->drm, "dsi pll div %08x, ctrl %08x\n",
config->dsi_pll.div, config->dsi_pll.ctrl);
pclk = vlv_dsi_pclk(encoder, config);
@@ -213,9 +213,10 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
void vlv_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
vlv_cck_get(dev_priv);
@@ -235,20 +236,21 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
DSI_PLL_LOCK, 20)) {
vlv_cck_put(dev_priv);
- drm_err(&dev_priv->drm, "DSI PLL lock failed\n");
+ drm_err(display->drm, "DSI PLL lock failed\n");
return;
}
vlv_cck_put(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
+ drm_dbg_kms(display->drm, "DSI PLL locked\n");
}
void vlv_dsi_pll_disable(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
vlv_cck_get(dev_priv);
@@ -260,14 +262,14 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder)
vlv_cck_put(dev_priv);
}
-bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+bool bxt_dsi_pll_is_enabled(struct intel_display *display)
{
bool enabled;
u32 val;
u32 mask;
mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED;
- val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
+ val = intel_de_read(display, BXT_DSI_PLL_ENABLE);
enabled = (val & mask) == mask;
if (!enabled)
@@ -281,17 +283,17 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
* times, and since accessing DSI registers with invalid dividers
* causes a system hang.
*/
- val = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
- if (IS_GEMINILAKE(dev_priv)) {
+ val = intel_de_read(display, BXT_DSI_PLL_CTL);
+ if (display->platform.geminilake) {
if (!(val & BXT_DSIA_16X_MASK)) {
- drm_dbg(&dev_priv->drm,
- "Invalid PLL divider (%08x)\n", val);
+ drm_dbg_kms(display->drm,
+ "Invalid PLL divider (%08x)\n", val);
enabled = false;
}
} else {
if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
- drm_dbg(&dev_priv->drm,
- "Invalid PLL divider (%08x)\n", val);
+ drm_dbg_kms(display->drm,
+ "Invalid PLL divider (%08x)\n", val);
enabled = false;
}
}
@@ -301,29 +303,30 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
void bxt_dsi_pll_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
- intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0);
+ intel_de_rmw(display, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0);
/*
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
- if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE,
+ if (intel_de_wait_for_clear(display, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timeout waiting for PLL lock deassertion\n");
}
u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 pll_ctl, pll_div;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
vlv_cck_get(dev_priv);
pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
@@ -352,14 +355,14 @@ static int bxt_dsi_pclk(struct intel_encoder *encoder,
u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 pclk;
- config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
+ config->dsi_pll.ctrl = intel_de_read(display, BXT_DSI_PLL_CTL);
pclk = bxt_dsi_pclk(encoder, config);
- drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk);
+ drm_dbg_kms(display->drm, "Calculated pclk=%u\n", pclk);
return pclk;
}
@@ -375,10 +378,9 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT);
}
-static void glk_dsi_program_esc_clock(struct drm_device *dev,
- const struct intel_crtc_state *config)
+static void glk_dsi_program_esc_clock(struct intel_display *display,
+ const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dsi_rate = 0;
u32 pll_ratio = 0;
u32 ddr_clk = 0;
@@ -415,17 +417,16 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
txesc2_div = min_t(u32, div2_value, 10);
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1,
+ intel_de_write(display, MIPIO_TXESC_CLK_DIV1,
(1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
- intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2,
+ intel_de_write(display, MIPIO_TXESC_CLK_DIV2,
(1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
}
/* Program BXT Mipi clocks and dividers */
-static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
+static void bxt_dsi_program_clocks(struct intel_display *display, enum port port,
const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
u32 dsi_rate = 0;
u32 pll_ratio = 0;
@@ -436,7 +437,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
u32 mipi_8by3_divider;
/* Clear old configurations */
- tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
+ tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
@@ -472,13 +473,13 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower);
tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper);
- intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
+ intel_de_write(display, BXT_MIPI_CLOCK_CTL, tmp);
}
int bxt_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
u32 dsi_clk;
@@ -494,7 +495,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
*/
dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
dsi_ratio_min = BXT_DSI_PLL_RATIO_MIN;
dsi_ratio_max = BXT_DSI_PLL_RATIO_MAX;
} else {
@@ -503,11 +504,11 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
}
if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Can't get a suitable ratio from DSI PLL ratios\n");
return -ECHRNG;
} else
- drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n");
+ drm_dbg_kms(display->drm, "DSI PLL calculation is Done!!\n");
/*
* Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
@@ -519,7 +520,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
/* As per recommendation from hardware team,
* Prog PVD ratio =1 if dsi ratio <= 50
*/
- if (IS_BROXTON(dev_priv) && dsi_ratio <= 50)
+ if (display->platform.broxton && dsi_ratio <= 50)
config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
pclk = bxt_dsi_pclk(encoder, config);
@@ -536,46 +537,45 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
void bxt_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
/* Configure PLL vales */
- intel_de_write(dev_priv, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
- intel_de_posting_read(dev_priv, BXT_DSI_PLL_CTL);
+ intel_de_write(display, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
+ intel_de_posting_read(display, BXT_DSI_PLL_CTL);
/* Program TX, RX, Dphy clocks */
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
for_each_dsi_port(port, intel_dsi->ports)
- bxt_dsi_program_clocks(encoder->base.dev, port, config);
+ bxt_dsi_program_clocks(display, port, config);
} else {
- glk_dsi_program_esc_clock(encoder->base.dev, config);
+ glk_dsi_program_esc_clock(display, config);
}
/* Enable DSI PLL */
- intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE);
+ intel_de_rmw(display, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE);
/* Timeout and fail if PLL not locked */
- if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
+ if (intel_de_wait_for_set(display, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timed out waiting for DSI PLL to lock\n");
return;
}
- drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
+ drm_dbg_kms(display->drm, "DSI PLL locked\n");
}
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
/* Clear old configurations */
- if (IS_BROXTON(dev_priv)) {
+ if (display->platform.broxton) {
tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
index f975660fa609..f26e31a7dd69 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
@@ -9,7 +9,6 @@
#include <linux/types.h>
enum port;
-struct drm_i915_private;
struct intel_crtc_state;
struct intel_display;
struct intel_encoder;
@@ -33,11 +32,11 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
#ifdef I915
-bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
+bool bxt_dsi_pll_is_enabled(struct intel_display *display);
void assert_dsi_pll_enabled(struct intel_display *display);
void assert_dsi_pll_disabled(struct intel_display *display);
#else
-static inline bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+static inline bool bxt_dsi_pll_is_enabled(struct intel_display *display)
{
return false;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index ddda468241ef..6e4d0ce3952f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 7d97ea2a653e..c4854c5b4e0f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.h b/drivers/gpu/drm/i915/gem/i915_gem_clflush.h
index e6c382973129..9d7ee1579900 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index ab1af978911b..15835952352e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2011-2012 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index e5b0f66ea1fe..6e682a6a0574 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 67ac2586a0f3..0267c924634b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 9473050ac842..05e440643aa2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright 2012 Red Hat Inc
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 75a143d996e0..7a0cc51923b3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 7796c4119ef5..ca7e9216934a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008,2010 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index ea7561ae6e13..232b984f60b6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
index 28d6526e32ab..8044d34707b6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 388f90784d8a..f566191d843b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -48,8 +48,7 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
i915_gem_object_evictable(obj))
assert_object_held(obj);
#endif
- return mr && (mr->type == INTEL_MEMORY_LOCAL ||
- mr->type == INTEL_MEMORY_STOLEN_LOCAL);
+ return mr && intel_memory_type_is_local(mr->type);
}
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index c3dabb857960..f6d37dff320d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
index 196417fd0f5c..946fb9825eb3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 356530b599ce..1f38e367c60b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2017 Intel Corporation
*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index a5f34542135c..c34f41605b46 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 68413c05c812..64600aa8227f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 8780aa243105..7f83f8bdc8fb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index ef85c6dc9fd5..f9e7cab140f8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 900c08337942..f0857c5c96df 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
index bedf1e95941a..bd5bd2c5e7f9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2019 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index ae3343c81a64..19a3eb82dc6a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
@@ -305,36 +304,20 @@ void __shmem_writeback(size_t size, struct address_space *mapping)
.range_end = LLONG_MAX,
.for_reclaim = 1,
};
- unsigned long i;
+ struct folio *folio = NULL;
+ int error = 0;
/*
* Leave mmapings intact (GTT will have been revoked on unbinding,
- * leaving only CPU mmapings around) and add those pages to the LRU
+ * leaving only CPU mmapings around) and add those folios to the LRU
* instead of invoking writeback so they are aged and paged out
* as normal.
*/
-
- /* Begin writeback on each dirty page */
- for (i = 0; i < size >> PAGE_SHIFT; i++) {
- struct page *page;
-
- page = find_lock_page(mapping, i);
- if (!page)
- continue;
-
- if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
- int ret;
-
- SetPageReclaim(page);
- ret = mapping->a_ops->writepage(page, &wbc);
- if (!PageWriteback(page))
- ClearPageReclaim(page);
- if (!ret)
- goto put;
- }
- unlock_page(page);
-put:
- put_page(page);
+ while ((folio = writeback_iter(mapping, &wbc, folio, &error))) {
+ if (folio_mapped(folio))
+ folio_redirty_for_writepage(&wbc, folio);
+ else
+ error = shmem_writeout(folio, &wbc);
}
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index aec41f0f098f..b81e67504bbe 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008-2015 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 9d958a6f377e..3380151edfc1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008-2012 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
index af85d0c28168..8814cbcde5b5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2014-2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 5ac23ff3feff..5a296ba3758a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2008 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 09b68713ab32..307a18eede72 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2012-2014 Intel Corporation
*
- * Based on amdgpu_mn, which bears the following notice:
+ * Based on amdgpu_mn, which bears the following notice:
*
* Copyright 2014 Advanced Micro Devices, Inc.
* All Rights Reserved.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 1f55e62044a4..7127e90c1a8f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2016 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index 46b9a17d6abc..65d84a93c525 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2017 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.h b/drivers/gpu/drm/i915/gem/i915_gemfs.h
index 5d835e44c4f6..16d4333c9a4e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.h
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2017 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 804f74084bd4..9c3f17e51885 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1837,6 +1837,8 @@ static int igt_mmap_revoke(void *arg)
int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
{
+ int ret;
+ bool unuse_mm = false;
static const struct i915_subtest tests[] = {
SUBTEST(igt_partial_tiling),
SUBTEST(igt_smoke_tiling),
@@ -1848,5 +1850,15 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_mmap_gpu),
};
- return i915_live_subtests(tests, i915);
+ if (!current->mm) {
+ kthread_use_mm(current->active_mm);
+ unuse_mm = true;
+ }
+
+ ret = i915_live_subtests(tests, i915);
+
+ if (unuse_mm)
+ kthread_unuse_mm(current->active_mm);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index f6c59f20832f..46a5aa4ab9c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -289,6 +289,14 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
return pte;
}
+static dma_addr_t gen8_ggtt_pte_decode(u64 pte, bool *is_present, bool *is_local)
+{
+ *is_present = pte & GEN8_PAGE_PRESENT;
+ *is_local = pte & GEN12_GGTT_PTE_LM;
+
+ return pte & GEN12_GGTT_PTE_ADDR_MASK;
+}
+
static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt)
{
struct intel_gt *gt = ggtt->vm.gt;
@@ -435,6 +443,11 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
writeq(pte, addr);
}
+static gen8_pte_t gen8_get_pte(void __iomem *addr)
+{
+ return readq(addr);
+}
+
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
@@ -450,6 +463,16 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
+static dma_addr_t gen8_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ return ggtt->vm.pte_decode(gen8_get_pte(pte), is_present, is_local);
+}
+
static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm,
dma_addr_t addr, u64 offset,
unsigned int pat_index, u32 flags)
@@ -605,6 +628,17 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
+static dma_addr_t gen6_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset,
+ bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *pte =
+ (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ return vm->pte_decode(ioread32(pte), is_present, is_local);
+}
+
/*
* Binds an object into the global gtt with the specified cache level.
* The object will be accessible to the GPU via commands whose operands
@@ -769,6 +803,14 @@ void intel_ggtt_unbind_vma(struct i915_address_space *vm,
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
+dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ return ggtt->vm.read_entry(vm, offset, is_present, is_local);
+}
+
/*
* Reserve the top of the GuC address space for firmware images. Addresses
* beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC,
@@ -1245,6 +1287,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.scratch_range = gen8_ggtt_clear_range;
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
+ ggtt->vm.read_entry = gen8_ggtt_read_entry;
/*
* Serialize GTT updates with aperture access on BXT if VT-d is on,
@@ -1291,6 +1334,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
else
ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
+ ggtt->vm.pte_decode = gen8_ggtt_pte_decode;
+
return ggtt_probe_common(ggtt, size);
}
@@ -1390,6 +1435,14 @@ static u64 iris_pte_encode(dma_addr_t addr,
return pte;
}
+static dma_addr_t gen6_pte_decode(u64 pte, bool *is_present, bool *is_local)
+{
+ *is_present = pte & GEN6_PTE_VALID;
+ *is_local = false;
+
+ return ((pte & 0xff0) << 28) | (pte & ~0xfff);
+}
+
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
@@ -1428,6 +1481,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.scratch_range = gen6_ggtt_clear_range;
ggtt->vm.insert_page = gen6_ggtt_insert_page;
ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.read_entry = gen6_ggtt_read_entry;
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->invalidate = gen6_ggtt_invalidate;
@@ -1443,6 +1497,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
else
ggtt->vm.pte_encode = snb_pte_encode;
+ ggtt->vm.pte_decode = gen6_pte_decode;
+
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
index 59eed0a0ce90..c5f5f0bdfb2c 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
@@ -27,6 +27,13 @@ static void gmch_ggtt_insert_page(struct i915_address_space *vm,
intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
}
+static dma_addr_t gmch_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local)
+{
+ return intel_gmch_gtt_read_entry(offset >> PAGE_SHIFT,
+ is_present, is_local);
+}
+
static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
@@ -103,6 +110,7 @@ int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
ggtt->vm.clear_range = gmch_ggtt_clear_range;
ggtt->vm.scratch_range = gmch_ggtt_clear_range;
+ ggtt->vm.read_entry = gmch_ggtt_read_entry;
ggtt->vm.cleanup = gmch_ggtt_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 30b128b1fde7..afbc5c769308 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -176,7 +176,6 @@ static void clear_vm_list(struct list_head *list)
i915_vma_destroy_locked(vma);
i915_gem_object_put(obj);
}
-
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 0a36ea751b63..9d3a3ad567a0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -312,6 +312,7 @@ struct i915_address_space {
u64 (*pte_encode)(dma_addr_t addr,
unsigned int pat_index,
u32 flags); /* Create a valid PTE */
+ dma_addr_t (*pte_decode)(u64 pte, bool *is_present, bool *is_local);
#define PTE_READ_ONLY BIT(0)
#define PTE_LM BIT(1)
@@ -340,6 +341,8 @@ struct i915_address_space {
struct i915_vma_resource *vma_res,
unsigned int pat_index,
u32 flags);
+ dma_addr_t (*read_entry)(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local);
void (*cleanup)(struct i915_address_space *vm);
void (*foreach)(struct i915_address_space *vm,
@@ -590,6 +593,9 @@ void intel_ggtt_bind_vma(struct i915_address_space *vm,
void intel_ggtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res);
+dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm,
+ u64 offset, bool *is_present, bool *is_local);
+
int i915_ggtt_probe_hw(struct drm_i915_private *i915);
int i915_ggtt_init_hw(struct drm_i915_private *i915);
int i915_ggtt_enable_hw(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 51847a846002..c481b56fa67d 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -751,7 +751,6 @@ static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
{
-
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
/*
* Note that the CSFE context has a dummy slot for CMD_BUF_CCTL
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index cf41d325712e..5dd8121f4b15 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -314,7 +314,6 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = {
};
static const struct drm_i915_mocs_entry dg1_mocs_table[] = {
-
/* UC */
MOCS_ENTRY(1, 0, L3_1_UC),
/* WB - L3 */
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 6e9977b2d180..a876a34455f1 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -365,7 +365,13 @@ static void reset_prepare(struct intel_engine_cs *engine)
ENGINE_READ_FW(engine, RING_HEAD),
ENGINE_READ_FW(engine, RING_TAIL),
ENGINE_READ_FW(engine, RING_START));
- if (!stop_ring(engine)) {
+ /*
+ * Sometimes engine head failed to set to zero even after writing into it.
+ * Use wait_for_atomic() with 20ms delay to let engine resumes from
+ * correct RING_HEAD. Experimented different values and determined
+ * that 20ms works best based on testing.
+ */
+ if (wait_for_atomic((!stop_ring(engine) == 0), 20)) {
drm_err(&engine->i915->drm,
"failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 71ee01d9ef64..eb89948cc112 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -8,7 +8,7 @@
#include <drm/intel/i915_drm.h>
#include "display/intel_display.h"
-#include "display/intel_display_irq.h"
+#include "display/intel_display_rps.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -74,7 +74,7 @@ static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
static void rps_timer(struct timer_list *t)
{
- struct intel_rps *rps = from_timer(rps, t, timer);
+ struct intel_rps *rps = timer_container_of(rps, t, timer);
struct intel_gt *gt = rps_to_gt(rps);
struct intel_engine_cs *engine;
ktime_t dt, last, timestamp;
@@ -550,6 +550,7 @@ static unsigned int init_emon(struct intel_uncore *uncore)
static bool gen5_rps_enable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_display *display = &i915->display;
struct intel_uncore *uncore = rps_to_uncore(rps);
u8 fstart, vstart;
u32 rgvmodectl;
@@ -607,9 +608,7 @@ static bool gen5_rps_enable(struct intel_rps *rps)
rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
rps->ips.last_time2 = ktime_get_raw_ns();
- spin_lock(&i915->irq_lock);
- ilk_enable_display_irq(i915, DE_PCU_EVENT);
- spin_unlock(&i915->irq_lock);
+ ilk_display_rps_enable(display);
spin_unlock_irq(&mchdev_lock);
@@ -621,14 +620,13 @@ static bool gen5_rps_enable(struct intel_rps *rps)
static void gen5_rps_disable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_display *display = &i915->display;
struct intel_uncore *uncore = rps_to_uncore(rps);
u16 rgvswctl;
spin_lock_irq(&mchdev_lock);
- spin_lock(&i915->irq_lock);
- ilk_disable_display_irq(i915, DE_PCU_EVENT);
- spin_unlock(&i915->irq_lock);
+ ilk_display_rps_disable(display);
rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 5135b90a2a40..ece445109305 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -57,7 +57,7 @@ struct intel_rps {
/*
* work, interrupts_enabled and pm_iir are protected by
- * i915->irq_lock
+ * gt->irq_lock
*/
struct timer_list timer;
struct work_struct work;
diff --git a/drivers/gpu/drm/i915/gt/intel_wopcm.h b/drivers/gpu/drm/i915/gt/intel_wopcm.h
index 17d6aa86008a..d2038b6de5e7 100644
--- a/drivers/gpu/drm/i915/gt/intel_wopcm.h
+++ b/drivers/gpu/drm/i915/gt/intel_wopcm.h
@@ -1,6 +1,5 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2017-2018 Intel Corporation
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 116683ebe074..b37e400f74e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -156,7 +156,7 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
struct i915_wa *list;
- list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+ list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*list),
GFP_KERNEL);
if (!list) {
drm_err(&i915->drm, "No space for workaround init!\n");
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 64315b714743..79741f043f03 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -108,7 +108,7 @@ static void advance(struct i915_request *request)
static void hw_delay_complete(struct timer_list *t)
{
- struct mock_engine *engine = from_timer(engine, t, hw_delay);
+ struct mock_engine *engine = timer_container_of(engine, t, hw_delay);
struct i915_request *request;
unsigned long flags;
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 22e750108c5f..23f04f6f8fba 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -7,6 +7,7 @@
#include "gem/i915_gem_internal.h"
+#include "i915_drv.h"
#include "i915_selftest.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
@@ -859,6 +860,14 @@ static int live_lrc_timestamp(void *arg)
};
/*
+ * This test was designed to isolate a hardware bug.
+ * The bug was found and fixed in future generations but
+ * now the test pollutes our CI on previous generation.
+ */
+ if (GRAPHICS_VER(gt->i915) == 12)
+ return 0;
+
+ /*
* We want to verify that the timestamp is saved and restore across
* context switches and is monotonic.
*
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index 401bee030dbc..54bc447efce0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -537,7 +537,7 @@ struct spinner_timer {
static void spinner_kill(struct timer_list *timer)
{
- struct spinner_timer *st = from_timer(st, timer, timer);
+ struct spinner_timer *st = timer_container_of(st, timer, timer);
igt_spinner_end(&st->spin);
pr_info("%s\n", __func__);
@@ -661,7 +661,7 @@ static int live_emit_pte_full_ring(void *arg)
out_rq:
i915_request_add(rq); /* GEM_BUG_ON(rq->reserved_space > ring->space)? */
timer_delete_sync(&st.timer);
- destroy_timer_on_stack(&st.timer);
+ timer_destroy_on_stack(&st.timer);
out_unpin:
intel_context_unpin(ce);
out_put:
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 908483ab0bc8..41716ed454b7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -33,15 +33,22 @@ int live_rc6_manual(void *arg)
{
struct intel_gt *gt = arg;
struct intel_rc6 *rc6 = &gt->rc6;
- u64 rc0_power, rc6_power;
+ struct intel_rps *rps = &gt->rps;
intel_wakeref_t wakeref;
+ u64 rc0_sample_energy[2];
+ u64 rc6_sample_energy[2];
+ u64 sleep_time = 1000;
+ u32 rc0_freq = 0;
+ u32 rc6_freq = 0;
+ u64 rc0_power;
+ u64 rc6_power;
bool has_power;
+ u64 threshold;
ktime_t dt;
u64 res[2];
int err = 0;
- u32 rc0_freq = 0;
- u32 rc6_freq = 0;
- struct intel_rps *rps = &gt->rps;
+ u64 diff;
+
/*
* Our claim is that we can "encourage" the GPU to enter rc6 at will.
@@ -60,14 +67,15 @@ int live_rc6_manual(void *arg)
/* Force RC6 off for starters */
__intel_rc6_disable(rc6);
- msleep(1); /* wakeup is not immediate, takes about 100us on icl */
+ /* wakeup is not immediate, takes about 100us on icl */
+ usleep_range(1000, 2000);
res[0] = rc6_residency(rc6);
dt = ktime_get();
- rc0_power = librapl_energy_uJ();
- msleep(1000);
- rc0_power = librapl_energy_uJ() - rc0_power;
+ rc0_sample_energy[0] = librapl_energy_uJ();
+ msleep(sleep_time);
+ rc0_sample_energy[1] = librapl_energy_uJ() - rc0_sample_energy[0];
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
rc0_freq = intel_rps_read_actual_frequency_fw(rps);
@@ -79,11 +87,12 @@ int live_rc6_manual(void *arg)
}
if (has_power) {
- rc0_power = div64_u64(NSEC_PER_SEC * rc0_power,
+ rc0_power = div64_u64(NSEC_PER_SEC * rc0_sample_energy[1],
ktime_to_ns(dt));
+
if (!rc0_power) {
if (rc0_freq)
- pr_debug("No power measured while in RC0! GPU Freq: %u in RC0\n",
+ pr_debug("No power measured while in RC0! GPU Freq: %uMHz in RC0\n",
rc0_freq);
else
pr_err("No power and freq measured while in RC0\n");
@@ -98,10 +107,10 @@ int live_rc6_manual(void *arg)
res[0] = rc6_residency(rc6);
intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL);
dt = ktime_get();
- rc6_power = librapl_energy_uJ();
- msleep(1000);
+ rc6_sample_energy[0] = librapl_energy_uJ();
+ msleep(sleep_time);
rc6_freq = intel_rps_read_actual_frequency_fw(rps);
- rc6_power = librapl_energy_uJ() - rc6_power;
+ rc6_sample_energy[1] = librapl_energy_uJ() - rc6_sample_energy[0];
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
if (res[1] == res[0]) {
@@ -113,13 +122,24 @@ int live_rc6_manual(void *arg)
}
if (has_power) {
- rc6_power = div64_u64(NSEC_PER_SEC * rc6_power,
+ rc6_power = div64_u64(NSEC_PER_SEC * rc6_sample_energy[1],
ktime_to_ns(dt));
- pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
+ pr_info("GPU consumed %lluuW in RC0 and %lluuW in RC6\n",
rc0_power, rc6_power);
+
if (2 * rc6_power > rc0_power) {
- pr_err("GPU leaked energy while in RC6! GPU Freq: %u in RC6 and %u in RC0\n",
- rc6_freq, rc0_freq);
+ pr_err("GPU leaked energy while in RC6!\n"
+ "GPU Freq: %uMHz in RC6 and %uMHz in RC0\n"
+ "RC0 energy before & after sleep respectively: %lluuJ %lluuJ\n"
+ "RC6 energy before & after sleep respectively: %lluuJ %lluuJ\n",
+ rc6_freq, rc0_freq, rc0_sample_energy[0], rc0_sample_energy[1],
+ rc6_sample_energy[0], rc6_sample_energy[1]);
+
+ diff = res[1] - res[0];
+ threshold = (9 * NSEC_PER_MSEC * sleep_time) / 10;
+ if (diff < threshold)
+ pr_err("Did not enter RC6 properly, RC6 start residency=%lluns, RC6 end residency=%lluns\n",
+ res[0], res[1]);
err = -EINVAL;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c
index 3941f2d6fa47..69ed946a39e5 100644
--- a/drivers/gpu/drm/i915/gt/selftest_tlb.c
+++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c
@@ -143,7 +143,7 @@ pte_tlbinv(struct intel_context *ce,
if (ce->engine->class == OTHER_CLASS)
msleep(200);
else
- msleep(10);
+ usleep_range(10000, 20000);
if (va == vb) {
if (!i915_request_completed(rq)) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index 5dc0ccd07636..d550eb6edfb8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -230,7 +230,7 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s
gt_info(gt, "Invalid GSC firmware for MTL/ARL, got %d.%d.%d.%d but need 102.x.x.x",
gsc->release.major, gsc->release.minor,
gsc->release.patch, gsc->release.build);
- return -EINVAL;
+ return -EINVAL;
}
if (min_ver.major) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index f8cb7c630d5b..127316d2c8aa 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -633,7 +633,7 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc,
atomic_inc(&guc->outstanding_submission_g2h);
ret = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
- if (ret)
+ if (ret && g2h_len_dw)
atomic_dec(&guc->outstanding_submission_g2h);
return ret;
@@ -3443,18 +3443,29 @@ static inline int guc_lrc_desc_unpin(struct intel_context *ce)
* GuC is active, lets destroy this context, but at this point we can still be racing
* with suspend, so we undo everything if the H2G fails in deregister_context so
* that GuC reset will find this context during clean up.
+ *
+ * There is a race condition where the reset code could have altered
+ * this context's state and done a wakeref put before we try to
+ * deregister it here. So check if the context is still set to be
+ * destroyed before undoing earlier changes, to avoid two wakeref puts
+ * on the same context.
*/
ret = deregister_context(ce, ce->guc_id.id);
if (ret) {
+ bool pending_destroyed;
spin_lock_irqsave(&ce->guc_state.lock, flags);
- set_context_registered(ce);
- clr_context_destroyed(ce);
+ pending_destroyed = context_destroyed(ce);
+ if (pending_destroyed) {
+ set_context_registered(ce);
+ clr_context_destroyed(ce);
+ }
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
/*
* As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements
* the wakeref immediately but per function spec usage call this after unlock.
*/
- intel_wakeref_put_async(&gt->wakeref);
+ if (pending_destroyed)
+ intel_wakeref_put_async(&gt->wakeref);
}
return ret;
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index eedd1865bb98..62d14f82256f 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -46,6 +46,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
unsigned int flags;
u64 start, end, size;
struct drm_mm_node *node;
+ intel_wakeref_t wakeref;
int ret;
if (high_gm) {
@@ -63,12 +64,12 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
}
mutex_lock(&gt->ggtt->vm.mutex);
- mmio_hw_access_pre(gt);
+ wakeref = mmio_hw_access_pre(gt);
ret = i915_gem_gtt_insert(&gt->ggtt->vm, NULL, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
- mmio_hw_access_post(gt);
+ mmio_hw_access_post(gt, wakeref);
mutex_unlock(&gt->ggtt->vm.mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
@@ -226,7 +227,7 @@ out_free_fence:
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&gvt->gt->ggtt->vm.mutex);
- intel_runtime_pm_put_unchecked(uncore->rpm);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return -ENOSPC;
}
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index baccbf1761b7..673534f061ef 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -91,16 +91,17 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
.diff = 0,
};
struct diff_mmio *node, *next;
+ intel_wakeref_t wakeref;
INIT_LIST_HEAD(&param.diff_mmio_list);
mutex_lock(&gvt->lock);
spin_lock_bh(&gvt->scheduler.mmio_context_lock);
- mmio_hw_access_pre(gvt->gt);
+ wakeref = mmio_hw_access_pre(gvt->gt);
/* Recognize all the diff mmios to list. */
intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
mutex_unlock(&gvt->lock);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2fa7ca19ba5d..ae9b0ded3651 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -220,9 +220,11 @@ static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
static void ggtt_invalidate(struct intel_gt *gt)
{
- mmio_hw_access_pre(gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gt);
intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- mmio_hw_access_post(gt);
+ mmio_hw_access_post(gt, wakeref);
}
static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 01d890999f25..1d10c16e6465 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -570,14 +570,15 @@ enum {
GVT_FAILSAFE_GUEST_ERR,
};
-static inline void mmio_hw_access_pre(struct intel_gt *gt)
+static inline intel_wakeref_t mmio_hw_access_pre(struct intel_gt *gt)
{
- intel_runtime_pm_get(gt->uncore->rpm);
+ return intel_runtime_pm_get(gt->uncore->rpm);
}
-static inline void mmio_hw_access_post(struct intel_gt *gt)
+static inline void mmio_hw_access_post(struct intel_gt *gt,
+ intel_wakeref_t wakeref)
{
- intel_runtime_pm_put_unchecked(gt->uncore->rpm);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 4efee6797873..1344e6d20a34 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -56,6 +56,7 @@
#include "display/intel_pps_regs.h"
#include "display/intel_psr_regs.h"
#include "display/intel_sprite_regs.h"
+#include "display/intel_vga_regs.h"
#include "display/skl_universal_plane_regs.h"
#include "display/skl_watermark_regs.h"
#include "display/vlv_dsi_pll_regs.h"
@@ -264,6 +265,7 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
{
struct intel_gvt *gvt = vgpu->gvt;
unsigned int fence_num = offset_to_fence_num(off);
+ intel_wakeref_t wakeref;
int ret;
ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
@@ -271,10 +273,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return ret;
write_vreg(vgpu, off, p_data, bytes);
- mmio_hw_access_pre(gvt->gt);
+ wakeref = mmio_hw_access_pre(gvt->gt);
intel_vgpu_write_fence(vgpu, fence_num,
vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
return 0;
}
@@ -513,7 +515,7 @@ static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
switch (wrpll_ctl & WRPLL_REF_MASK) {
case WRPLL_REF_PCH_SSC:
- refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc;
+ refclk = 135000;
break;
case WRPLL_REF_LCPLL:
refclk = 2700000;
@@ -544,7 +546,7 @@ out:
static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
{
u32 dp_br = 0;
- int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc;
+ int refclk = 100000;
enum dpio_phy phy = DPIO_PHY0;
enum dpio_channel ch = DPIO_CH0;
struct dpll clock = {};
@@ -1975,10 +1977,12 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
vgpu == gvt->scheduler.engine_owner[engine->id] ||
offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
- mmio_hw_access_pre(gvt->gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gvt->gt);
vgpu_vreg(vgpu, offset) =
intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
}
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
@@ -3209,10 +3213,12 @@ void intel_gvt_restore_fence(struct intel_gvt *gvt)
int i, id;
idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
- mmio_hw_access_pre(gvt->gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gvt->gt);
for (i = 0; i < vgpu_fence_sz(vgpu); i++)
intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
}
}
@@ -3233,8 +3239,10 @@ void intel_gvt_restore_mmio(struct intel_gvt *gvt)
int id;
idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
- mmio_hw_access_pre(gvt->gt);
+ intel_wakeref_t wakeref;
+
+ wakeref = mmio_hw_access_pre(gvt->gt);
intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
- mmio_hw_access_post(gvt->gt);
+ mmio_hw_access_post(gvt->gt, wakeref);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 9f97f743aa71..6c2d68e88266 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -447,6 +447,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
if (!vgpu_data->active)
return;
@@ -465,7 +466,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
scheduler->current_vgpu = NULL;
}
- intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_bh(&scheduler->mmio_context_lock);
for_each_engine(engine, vgpu->gvt->gt, id) {
if (scheduler->engine_owner[engine->id] == vgpu) {
@@ -474,6 +475,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0d9e263913ff..967c0501e91e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -66,8 +66,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
struct drm_i915_private *i915 = node_to_i915(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
-
intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p);
i915_print_iommu_status(i915, &p);
intel_gt_info_print(&to_gt(i915)->info, &p);
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index ce3cc93ea211..273bc43468a0 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -62,7 +62,6 @@
#include "display/intel_pch_refclk.h"
#include "display/intel_pps.h"
#include "display/intel_sprite_uapi.h"
-#include "display/intel_vga.h"
#include "display/skl_watermark.h"
#include "gem/i915_gem_context.h"
@@ -235,7 +234,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_uncore_mmio_debug_init_early(dev_priv);
- spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
intel_sbi_init(dev_priv);
@@ -263,9 +261,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
i915_gem_init_early(dev_priv);
- /* This must be called before any calls to HAS_PCH_* */
- intel_detect_pch(dev_priv);
-
intel_irq_init(dev_priv);
intel_display_driver_early_probe(display);
intel_clock_gating_hooks_init(dev_priv);
@@ -578,7 +573,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
*/
intel_dram_detect(dev_priv);
- intel_bw_init_hw(dev_priv);
+ intel_bw_init_hw(display);
return 0;
@@ -622,11 +617,12 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
* Perform any steps necessary to make the driver available via kernel
* internal or userspace interfaces.
*/
-static void i915_driver_register(struct drm_i915_private *dev_priv)
+static int i915_driver_register(struct drm_i915_private *dev_priv)
{
struct intel_display *display = &dev_priv->display;
struct intel_gt *gt;
unsigned int i;
+ int ret;
i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
@@ -634,10 +630,14 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
intel_vgpu_register(dev_priv);
/* Reveal our presence to userspace */
- if (drm_dev_register(&dev_priv->drm, 0)) {
- drm_err(&dev_priv->drm,
- "Failed to register driver for userspace access!\n");
- return;
+ ret = drm_dev_register(&dev_priv->drm, 0);
+ if (ret) {
+ i915_probe_error(dev_priv,
+ "Failed to register driver for userspace access!\n");
+ drm_dev_unregister(&dev_priv->drm);
+ i915_pmu_unregister(dev_priv);
+ i915_gem_driver_unregister(dev_priv);
+ return ret;
}
i915_debugfs_register(dev_priv);
@@ -660,6 +660,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
if (i915_switcheroo_register(dev_priv))
drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
+
+ return 0;
}
/**
@@ -834,7 +836,9 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_gem;
- i915_driver_register(i915);
+ ret = i915_driver_register(i915);
+ if (ret)
+ goto out_cleanup_gem;
enable_rpm_wakeref_asserts(&i915->runtime_pm);
@@ -845,6 +849,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
out_cleanup_gem:
+ intel_pxp_fini(i915);
i915_gem_suspend(i915);
i915_gem_driver_remove(i915);
i915_gem_driver_release(i915);
@@ -981,7 +986,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_dp_mst_suspend(display);
intel_irq_suspend(i915);
- intel_hpd_cancel_work(i915);
+ intel_hpd_cancel_work(display);
if (HAS_DISPLAY(i915))
intel_display_driver_suspend_access(display);
@@ -1064,7 +1069,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_display_driver_suspend(display);
intel_irq_suspend(dev_priv);
- intel_hpd_cancel_work(dev_priv);
+ intel_hpd_cancel_work(display);
if (HAS_DISPLAY(dev_priv))
intel_display_driver_suspend_access(display);
@@ -1195,13 +1200,11 @@ static int i915_drm_resume(struct drm_device *dev)
i9xx_display_sr_restore(display);
- intel_vga_redisable(display);
-
intel_gmbus_reset(display);
intel_pps_unlock_regs_wa(display);
- intel_init_pch_refclk(dev_priv);
+ intel_init_pch_refclk(display);
/*
* Interrupts have to be enabled before any batches are run. If not the
@@ -1227,7 +1230,7 @@ static int i915_drm_resume(struct drm_device *dev)
if (HAS_DISPLAY(dev_priv))
intel_display_driver_resume_access(display);
- intel_hpd_init(dev_priv);
+ intel_hpd_init(display);
intel_display_driver_resume(display);
@@ -1235,7 +1238,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_display_driver_enable_user_access(display);
drm_kms_helper_poll_enable(dev);
}
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -1575,7 +1578,7 @@ static int intel_runtime_suspend(struct device *kdev)
assert_forcewakes_inactive(&dev_priv->uncore);
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
- intel_hpd_poll_enable(dev_priv);
+ intel_hpd_poll_enable(display);
drm_dbg(&dev_priv->drm, "Device suspended\n");
return 0;
@@ -1633,11 +1636,11 @@ static int intel_runtime_resume(struct device *kdev)
* everyone else do it here.
*/
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- intel_hpd_init(dev_priv);
- intel_hpd_poll_disable(dev_priv);
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
}
- skl_watermark_ipc_update(dev_priv);
+ skl_watermark_ipc_update(display);
enable_rpm_wakeref_asserts(rpm);
diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h
index 4b67ad9a61cd..1e95ecb2a163 100644
--- a/drivers/gpu/drm/i915/i915_driver.h
+++ b/drivers/gpu/drm/i915/i915_driver.h
@@ -15,7 +15,6 @@ struct drm_printer;
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_TIMESTAMP 1695980603
extern const struct dev_pm_ops i915_pm_ops;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 54538b6f85df..d0e1980dcba2 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -49,8 +49,6 @@
#include "gt/intel_workarounds.h"
#include "gt/uc/intel_uc.h"
-#include "soc/intel_pch.h"
-
#include "i915_drm_client.h"
#include "i915_gem.h"
#include "i915_gpu_error.h"
@@ -224,8 +222,6 @@ struct drm_i915_private {
};
unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1];
- /* protects the irq masks */
- spinlock_t irq_lock;
bool irqs_enabled;
/* LPT/WPT IOSF sideband protection */
@@ -272,9 +268,6 @@ struct drm_i915_private {
/* pm private clock gating functions */
const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
- /* PCH chipset type */
- enum intel_pch pch_type;
-
unsigned long gem_quirks;
struct i915_gem_mm mm;
@@ -306,6 +299,7 @@ struct drm_i915_private {
INTEL_DRAM_LPDDR5,
INTEL_DRAM_GDDR,
INTEL_DRAM_GDDR_ECC,
+ __INTEL_DRAM_TYPE_MAX,
} type;
u8 num_qgv_points;
u8 num_psf_gv_points;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index a6613eed3398..4f785cdbd155 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -726,13 +726,6 @@ static void err_print_gt_info(struct drm_i915_error_state_buf *m,
intel_sseu_print_topology(gt->_gt->i915, &gt->info.sseu, &p);
}
-static void err_print_gt_display(struct drm_i915_error_state_buf *m,
- struct intel_gt_coredump *gt)
-{
- err_printf(m, "IER: 0x%08x\n", gt->ier);
- err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
-}
-
static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
struct intel_gt_coredump *gt)
{
@@ -878,7 +871,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (error->gt->uc && error->gt->uc->guc.is_guc_capture)
print_guc_capture = true;
- err_print_gt_display(m, error->gt);
err_print_gt_global_nonguc(m, error->gt);
err_print_gt_fences(m, error->gt);
@@ -1767,27 +1759,6 @@ gt_record_uc(struct intel_gt_coredump *gt,
return error_uc;
}
-/* Capture display registers. */
-static void gt_record_display_regs(struct intel_gt_coredump *gt)
-{
- struct intel_uncore *uncore = gt->_gt->uncore;
- struct drm_i915_private *i915 = uncore->i915;
-
- if (DISPLAY_VER(i915) >= 6 && DISPLAY_VER(i915) < 20)
- gt->derrmr = intel_uncore_read(uncore, DERRMR);
-
- if (GRAPHICS_VER(i915) >= 8)
- gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
- else if (IS_VALLEYVIEW(i915))
- gt->ier = intel_uncore_read(uncore, VLV_IER);
- else if (HAS_PCH_SPLIT(i915))
- gt->ier = intel_uncore_read(uncore, DEIER);
- else if (GRAPHICS_VER(i915) == 2)
- gt->ier = intel_uncore_read16(uncore, GEN2_IER);
- else
- gt->ier = intel_uncore_read(uncore, GEN2_IER);
-}
-
/* Capture all other registers that GuC doesn't capture. */
static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
{
@@ -1821,9 +1792,12 @@ static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
gt->gtier[i] =
intel_uncore_read(uncore, GEN8_GT_IER(i));
gt->ngtier = 4;
- } else if (HAS_PCH_SPLIT(i915)) {
+ } else if (GRAPHICS_VER(i915) >= 5) {
gt->gtier[0] = intel_uncore_read(uncore, GTIER);
gt->ngtier = 1;
+ } else {
+ gt->gtier[0] = intel_uncore_read(uncore, GEN2_IER);
+ gt->ngtier = 1;
}
gt->eir = intel_uncore_read(uncore, EIR);
@@ -2043,7 +2017,6 @@ intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
gc->_gt = gt;
gc->awake = intel_gt_pm_is_awake(gt);
- gt_record_display_regs(gc);
gt_record_global_nonguc_regs(gc);
/*
@@ -2160,7 +2133,6 @@ i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump
void i915_error_state_store(struct i915_gpu_coredump *error)
{
struct drm_i915_private *i915;
- static bool warned;
if (IS_ERR_OR_NULL(error))
return;
@@ -2174,16 +2146,8 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
i915_gpu_coredump_get(error);
- if (!xchg(&warned, true) &&
- ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
- pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
- pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
- pr_info("Please see https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html for details.\n");
- pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
- pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
- pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
- i915->drm.primary->index);
- }
+ drm_info(&i915->drm, "GPU error state saved to /sys/class/drm/card%d/error\n",
+ i915->drm.primary->index);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 749e1c55613e..182324979278 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -146,7 +146,6 @@ struct intel_gt_coredump {
/* Generic register state */
u32 eir;
u32 pgtbl_er;
- u32 ier;
u32 gtier[6], ngtier;
u32 forcewake;
u32 error; /* gen6+ */
@@ -164,8 +163,6 @@ struct intel_gt_coredump {
u32 clock_frequency;
u32 clock_period_ns;
- /* Display related */
- u32 derrmr;
u32 sfc_done[I915_MAX_SFC]; /* gen12 */
u32 nfence;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 37ca4a35daf2..95042879bec4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -277,14 +277,14 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT))
@@ -306,12 +306,12 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_handler(display, eir, dpinvgtt);
- valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
+ valleyview_pipestat_irq_handler(display, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, ret);
@@ -367,14 +367,14 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_ack(display, &eir, &dpinvgtt);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT |
@@ -392,12 +392,12 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
if (iir & I915_MASTER_ERROR_INTERRUPT)
vlv_display_error_irq_handler(display, eir, dpinvgtt);
- valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
+ valleyview_pipestat_irq_handler(display, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, ret);
@@ -418,6 +418,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
static irqreturn_t ilk_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct intel_display *display = &i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
@@ -458,9 +459,9 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
if (de_iir) {
raw_reg_write(regs, DEIIR, de_iir);
if (DISPLAY_VER(i915) >= 7)
- ivb_display_irq_handler(i915, de_iir);
+ ivb_display_irq_handler(display, de_iir);
else
- ilk_display_irq_handler(i915, de_iir);
+ ilk_display_irq_handler(display, de_iir);
ret = IRQ_HANDLED;
}
@@ -506,6 +507,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = &dev_priv->display;
void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore);
u32 master_ctl;
@@ -524,7 +526,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- gen8_de_irq_handler(dev_priv, master_ctl);
+ gen8_de_irq_handler(display, master_ctl);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
}
@@ -556,6 +558,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct intel_display *display = &i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
struct intel_gt *gt = to_gt(i915);
u32 master_ctl;
@@ -575,13 +578,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & GEN11_DISPLAY_IRQ)
- gen11_display_irq_handler(i915);
+ gen11_display_irq_handler(display);
- gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
gen11_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(i915, gu_misc_iir);
+ gen11_gu_misc_irq_handler(display, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
@@ -613,6 +616,7 @@ static inline void dg1_master_intr_enable(void __iomem * const regs)
static irqreturn_t dg1_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = arg;
+ struct intel_display *display = &i915->display;
struct intel_gt *gt = to_gt(i915);
void __iomem * const regs = intel_uncore_regs(gt->uncore);
u32 master_tile_ctl, master_ctl;
@@ -641,36 +645,22 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
gen11_gt_irq_handler(gt, master_ctl);
if (master_ctl & GEN11_DISPLAY_IRQ)
- gen11_display_irq_handler(i915);
+ gen11_display_irq_handler(display);
- gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl);
dg1_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(i915, gu_misc_iir);
+ gen11_gu_misc_irq_handler(display, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
return IRQ_HANDLED;
}
-static void ibx_irq_reset(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- if (HAS_PCH_NOP(dev_priv))
- return;
-
- gen2_irq_reset(uncore, SDE_IRQ_REGS);
-
- if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
- intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
-}
-
-/* drm_dma.h hooks
-*/
static void ilk_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
gen2_irq_reset(uncore, DE_IRQ_REGS);
@@ -686,45 +676,43 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
gen5_gt_irq_reset(to_gt(dev_priv));
- ibx_irq_reset(dev_priv);
+ ibx_display_irq_reset(display);
}
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
gen5_gt_irq_reset(to_gt(dev_priv));
- spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_reset(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_irq_reset(display);
}
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
gen8_master_intr_disable(intel_uncore_regs(uncore));
gen8_gt_irq_reset(to_gt(dev_priv));
- gen8_display_irq_reset(dev_priv);
+ gen8_display_irq_reset(display);
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
-
- if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_reset(dev_priv);
-
}
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
gen11_gt_irq_reset(gt);
- gen11_display_irq_reset(dev_priv);
+ gen11_display_irq_reset(display);
gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
@@ -732,6 +720,7 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
static void dg1_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_gt *gt;
unsigned int i;
@@ -741,7 +730,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv)
for_each_gt(gt, dev_priv, i)
gen11_gt_irq_reset(gt);
- gen11_display_irq_reset(dev_priv);
+ gen11_display_irq_reset(display);
gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
@@ -751,6 +740,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv)
static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
@@ -760,25 +750,25 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
- spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_reset(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_irq_reset(display);
}
static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
gen5_gt_irq_postinstall(to_gt(dev_priv));
- ilk_de_irq_postinstall(dev_priv);
+ ilk_de_irq_postinstall(display);
}
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
gen5_gt_irq_postinstall(to_gt(dev_priv));
- spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_postinstall(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_irq_postinstall(display);
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
@@ -786,20 +776,23 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
gen8_gt_irq_postinstall(to_gt(dev_priv));
- gen8_de_irq_postinstall(dev_priv);
+ gen8_de_irq_postinstall(display);
gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore));
}
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
gen11_gt_irq_postinstall(gt);
- gen11_de_irq_postinstall(dev_priv);
+ gen11_de_irq_postinstall(display);
gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
@@ -809,6 +802,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
struct intel_gt *gt;
@@ -819,7 +813,7 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
- dg1_de_irq_postinstall(dev_priv);
+ dg1_de_irq_postinstall(display);
dg1_master_intr_enable(intel_uncore_regs(uncore));
intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
@@ -827,11 +821,11 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
gen8_gt_irq_postinstall(to_gt(dev_priv));
- spin_lock_irq(&dev_priv->irq_lock);
- vlv_display_irq_postinstall(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ vlv_display_irq_postinstall(display);
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
@@ -900,9 +894,10 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
static void i915_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
- i9xx_display_irq_reset(dev_priv);
+ i9xx_display_irq_reset(display);
gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
@@ -911,6 +906,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -932,26 +928,20 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
enable_mask |= I915_ASLE_INTERRUPT;
}
- if (I915_HAS_HOTPLUG(dev_priv)) {
+ if (HAS_HOTPLUG(dev_priv)) {
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
}
gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- i915_enable_asle_pipestat(dev_priv);
+ i915_display_irq_postinstall(display);
}
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = &dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -972,13 +962,13 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
- if (I915_HAS_HOTPLUG(dev_priv) &&
+ if (HAS_HOTPLUG(dev_priv) &&
iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
@@ -992,9 +982,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
- i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+ i915_pipestat_irq_handler(display, iir, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, ret);
@@ -1006,9 +996,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
static void i965_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
- i9xx_display_irq_reset(dev_priv);
+ i9xx_display_irq_reset(display);
gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
@@ -1036,6 +1027,7 @@ static u32 i965_error_mask(struct drm_i915_private *i915)
static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -1061,20 +1053,13 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- i915_enable_asle_pipestat(dev_priv);
+ i965_display_irq_postinstall(display);
}
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ struct intel_display *display = &dev_priv->display;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -1096,11 +1081,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
if (iir & I915_DISPLAY_PORT_INTERRUPT)
- hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ hotplug_status = i9xx_hpd_irq_ack(display);
/* Call regardless, as some status bits might not be
* signalled in IIR */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(display, iir, pipe_stats);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
@@ -1119,9 +1104,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
+ i9xx_hpd_irq_handler(display, hotplug_status);
- i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+ i965_pipestat_irq_handler(display, iir, pipe_stats);
} while (0);
pmu_irq_stats(dev_priv, IRQ_HANDLED);
@@ -1280,6 +1265,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
int irq = to_pci_dev(dev_priv->drm.dev)->irq;
if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled))
@@ -1289,7 +1275,7 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
free_irq(irq, dev_priv);
- intel_hpd_cancel_work(dev_priv);
+ intel_hpd_cancel_work(display);
dev_priv->irqs_enabled = false;
}
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 76e2801619f0..c33bd3d83069 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -100,7 +100,7 @@ int remap_io_mapping(struct vm_area_struct *vma,
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
- /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */
r.mm = vma->vm_mm;
r.pfn = pfn;
r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
@@ -140,7 +140,7 @@ int remap_io_sg(struct vm_area_struct *vma,
};
int err;
- /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
while (offset >= r.sgt.max >> PAGE_SHIFT) {
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index de0b413600a1..1658f1246c6f 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1666,6 +1666,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
struct i915_perf *perf = stream->perf;
struct intel_gt *gt = stream->engine->gt;
struct i915_perf_group *g = stream->engine->oa_group;
+ int m;
if (WARN_ON(stream != g->exclusive_stream))
return;
@@ -1690,10 +1691,9 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
free_oa_configs(stream);
free_noa_wait(stream);
- if (perf->spurious_report_rs.missed) {
- gt_notice(gt, "%d spurious OA report notices suppressed due to ratelimiting\n",
- perf->spurious_report_rs.missed);
- }
+ m = ratelimit_state_get_miss(&perf->spurious_report_rs);
+ if (m)
+ gt_notice(gt, "%d spurious OA report notices suppressed due to ratelimiting\n", m);
}
static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index e5a188ce3185..990bfaba3ce4 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -112,7 +112,7 @@ static u32 config_mask(const u64 config)
{
unsigned int bit = config_bit(config);
- if (__builtin_constant_p(config))
+ if (__builtin_constant_p(bit))
BUILD_BUG_ON(bit >
BITS_PER_TYPE(typeof_member(struct i915_pmu,
enable)) - 1);
@@ -121,7 +121,7 @@ static u32 config_mask(const u64 config)
BITS_PER_TYPE(typeof_member(struct i915_pmu,
enable)) - 1);
- return BIT(config_bit(config));
+ return BIT(bit);
}
static bool is_engine_event(struct perf_event *event)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c5064eebe063..2e4190da3e0d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -866,6 +866,7 @@
#define FP_M2_DIV_MASK 0x0000003f
#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
#define FP_M2_DIV_SHIFT 0
+
#define DPLL_TEST _MMIO(0x606c)
#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
@@ -877,11 +878,13 @@
#define DPLLA_TEST_N_BYPASS (1 << 3)
#define DPLLA_TEST_M_BYPASS (1 << 2)
#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
+
#define D_STATE _MMIO(0x6104)
#define DSTATE_GFX_RESET_I830 (1 << 6)
#define DSTATE_PLL_D3_OFF (1 << 3)
#define DSTATE_GFX_CLOCK_GATING (1 << 1)
#define DSTATE_DOT_CLOCK_GATING (1 << 0)
+
#define DSPCLK_GATE_D(__i915) _MMIO(DISPLAY_MMIO_BASE(__i915) + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
@@ -1050,7 +1053,6 @@
/*
* Overlay regs
*/
-
#define OVADD _MMIO(0x30000)
#define DOVSTA _MMIO(0x30008)
#define OC_BUF (0x3 << 20)
@@ -1077,6 +1079,7 @@
#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
#define BXT_GMBUS_GATING_DIS (1 << 14)
+#define DG2_DPFC_GATING_DIS REG_BIT(31)
#define GEN9_CLKGATE_DIS_5 _MMIO(0x46540)
#define DPCE_GATING_DIS REG_BIT(17)
@@ -1105,7 +1108,6 @@
/*
* Display engine regs
*/
-
/* Pipe/transcoder A timing regs */
#define _TRANS_HTOTAL_A 0x60000
#define _TRANS_HTOTAL_B 0x61000
@@ -1396,88 +1398,50 @@
#define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100)
#define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200)
#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
-#define DP_PORT_EN (1 << 31)
-#define DP_PIPE_SEL_SHIFT 30
-#define DP_PIPE_SEL_MASK (1 << 30)
-#define DP_PIPE_SEL(pipe) ((pipe) << 30)
-#define DP_PIPE_SEL_SHIFT_IVB 29
-#define DP_PIPE_SEL_MASK_IVB (3 << 29)
-#define DP_PIPE_SEL_IVB(pipe) ((pipe) << 29)
+#define DP_PORT_EN REG_BIT(31)
+#define DP_PIPE_SEL_MASK REG_GENMASK(30, 30)
+#define DP_PIPE_SEL(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK, (pipe))
+#define DP_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29)
+#define DP_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_IVB, (pipe))
#define DP_PIPE_SEL_SHIFT_CHV 16
-#define DP_PIPE_SEL_MASK_CHV (3 << 16)
-#define DP_PIPE_SEL_CHV(pipe) ((pipe) << 16)
-
-/* Link training mode - select a suitable mode for each stage */
-#define DP_LINK_TRAIN_PAT_1 (0 << 28)
-#define DP_LINK_TRAIN_PAT_2 (1 << 28)
-#define DP_LINK_TRAIN_PAT_IDLE (2 << 28)
-#define DP_LINK_TRAIN_OFF (3 << 28)
-#define DP_LINK_TRAIN_MASK (3 << 28)
-#define DP_LINK_TRAIN_SHIFT 28
-
-/* CPT Link training mode */
-#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
-#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8)
-#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8)
-#define DP_LINK_TRAIN_OFF_CPT (3 << 8)
-#define DP_LINK_TRAIN_MASK_CPT (7 << 8)
-#define DP_LINK_TRAIN_SHIFT_CPT 8
-
-/* Signal voltages. These are mostly controlled by the other end */
-#define DP_VOLTAGE_0_4 (0 << 25)
-#define DP_VOLTAGE_0_6 (1 << 25)
-#define DP_VOLTAGE_0_8 (2 << 25)
-#define DP_VOLTAGE_1_2 (3 << 25)
-#define DP_VOLTAGE_MASK (7 << 25)
-#define DP_VOLTAGE_SHIFT 25
-
-/* Signal pre-emphasis levels, like voltages, the other end tells us what
- * they want
- */
-#define DP_PRE_EMPHASIS_0 (0 << 22)
-#define DP_PRE_EMPHASIS_3_5 (1 << 22)
-#define DP_PRE_EMPHASIS_6 (2 << 22)
-#define DP_PRE_EMPHASIS_9_5 (3 << 22)
-#define DP_PRE_EMPHASIS_MASK (7 << 22)
-#define DP_PRE_EMPHASIS_SHIFT 22
-
-/* How many wires to use. I guess 3 was too hard */
-#define DP_PORT_WIDTH(width) (((width) - 1) << 19)
-#define DP_PORT_WIDTH_MASK (7 << 19)
-#define DP_PORT_WIDTH_SHIFT 19
-
-/* Mystic DPCD version 1.1 special mode */
-#define DP_ENHANCED_FRAMING (1 << 18)
-
-/* eDP */
-#define DP_PLL_FREQ_270MHZ (0 << 16)
-#define DP_PLL_FREQ_162MHZ (1 << 16)
-#define DP_PLL_FREQ_MASK (3 << 16)
-
-/* locked once port is enabled */
-#define DP_PORT_REVERSAL (1 << 15)
-
-/* eDP */
-#define DP_PLL_ENABLE (1 << 14)
-
-/* sends the clock on lane 15 of the PEG for debug */
-#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
-
-#define DP_SCRAMBLING_DISABLE (1 << 12)
-#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
-
-/* limit RGB values to avoid confusing TVs */
-#define DP_COLOR_RANGE_16_235 (1 << 8)
-
-/* Turn on the audio link */
-#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
-
-/* vs and hs sync polarity */
-#define DP_SYNC_VS_HIGH (1 << 4)
-#define DP_SYNC_HS_HIGH (1 << 3)
-
-/* A fantasy */
-#define DP_DETECTED (1 << 2)
+#define DP_PIPE_SEL_MASK_CHV REG_GENMASK(17, 16)
+#define DP_PIPE_SEL_CHV(pipe) REG_FIELD_PREP(DP_PIPE_SEL_MASK_CHV, (pipe))
+#define DP_LINK_TRAIN_MASK REG_GENMASK(29, 28)
+#define DP_LINK_TRAIN_PAT_1 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 0)
+#define DP_LINK_TRAIN_PAT_2 REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 1)
+#define DP_LINK_TRAIN_PAT_IDLE REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 2)
+#define DP_LINK_TRAIN_OFF REG_FIELD_PREP(DP_LINK_TRAIN_MASK, 3)
+#define DP_LINK_TRAIN_MASK_CPT REG_GENMASK(10, 8)
+#define DP_LINK_TRAIN_PAT_1_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 0)
+#define DP_LINK_TRAIN_PAT_2_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 1)
+#define DP_LINK_TRAIN_PAT_IDLE_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 2)
+#define DP_LINK_TRAIN_OFF_CPT REG_FIELD_PREP(DP_LINK_TRAIN_MASK_CPT, 3)
+#define DP_VOLTAGE_MASK REG_GENMASK(27, 25)
+#define DP_VOLTAGE_0_4 REG_FIELD_PREP(DP_VOLTAGE_MASK, 0)
+#define DP_VOLTAGE_0_6 REG_FIELD_PREP(DP_VOLTAGE_MASK, 1)
+#define DP_VOLTAGE_0_8 REG_FIELD_PREP(DP_VOLTAGE_MASK, 2)
+#define DP_VOLTAGE_1_2 REG_FIELD_PREP(DP_VOLTAGE_MASK, 3)
+#define DP_PRE_EMPHASIS_MASK REG_GENMASK(24, 22)
+#define DP_PRE_EMPHASIS_0 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 0)
+#define DP_PRE_EMPHASIS_3_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 1)
+#define DP_PRE_EMPHASIS_6 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 2)
+#define DP_PRE_EMPHASIS_9_5 REG_FIELD_PREP(DP_PRE_EMPHASIS_MASK, 3)
+#define DP_PORT_WIDTH_MASK REG_GENMASK(21, 19)
+#define DP_PORT_WIDTH(width) REG_FIELD_PREP(DP_PORT_WIDTH_MASK, (width) - 1)
+#define DP_ENHANCED_FRAMING REG_BIT(18)
+#define EDP_PLL_FREQ_MASK REG_GENMASK(17, 16)
+#define EDP_PLL_FREQ_270MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 0)
+#define EDP_PLL_FREQ_162MHZ REG_FIELD_PREP(EDP_PLL_FREQ_MASK, 1)
+#define DP_PORT_REVERSAL REG_BIT(15)
+#define EDP_PLL_ENABLE REG_BIT(14)
+#define DP_CLOCK_OUTPUT_ENABLE REG_BIT(13)
+#define DP_SCRAMBLING_DISABLE REG_BIT(12)
+#define DP_SCRAMBLING_DISABLE_ILK REG_BIT(7)
+#define DP_COLOR_RANGE_16_235 REG_BIT(8)
+#define DP_AUDIO_OUTPUT_ENABLE REG_BIT(6)
+#define DP_SYNC_VS_HIGH REG_BIT(4)
+#define DP_SYNC_HS_HIGH REG_BIT(3)
+#define DP_DETECTED REG_BIT(2)
/*
* Computing GMCH M and N values for the Display Port link
@@ -1811,18 +1775,6 @@
#define SWF3(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
-/* VBIOS regs */
-#define VGACNTRL _MMIO(0x71400)
-# define VGA_DISP_DISABLE (1 << 31)
-# define VGA_2X_MODE (1 << 30)
-# define VGA_PIPE_B_SELECT (1 << 29)
-
-#define VLV_VGACNTRL _MMIO(VLV_DISPLAY_BASE + 0x71400)
-
-/* Ironlake */
-
-#define CPU_VGACNTRL _MMIO(0x41000)
-
#define DIGITAL_PORT_HOTPLUG_CNTRL _MMIO(0x44030)
#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
@@ -2783,7 +2735,6 @@
* functionality covered in PCH_PORT_HOTPLUG is split into
* SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
*/
-
#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
#define SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin) (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4))
#define SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(hpd_pin) (0x4 << (_HPD_PIN_DDI(hpd_pin) * 4))
@@ -2863,7 +2814,6 @@
#define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3))
/* transcoder */
-
#define _PCH_TRANS_HTOTAL_A 0xe0000
#define _PCH_TRANS_HTOTAL_B 0xe1000
#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
@@ -3794,7 +3744,6 @@ enum skl_power_gate {
/*
* SKL Clocks
*/
-
/* CDCLK_CTL */
#define CDCLK_CTL _MMIO(0x46000)
#define CDCLK_FREQ_SEL_MASK REG_GENMASK(27, 26)
@@ -4242,6 +4191,11 @@ enum skl_power_gate {
#define MTL_CLKGATE_DIS_TRANS(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _MTL_CLKGATE_DIS_TRANS_A)
#define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7)
+#define _MTL_PIPE_CLKGATE_DIS2_A 0x60114
+#define _MTL_PIPE_CLKGATE_DIS2_B 0x61114
+#define MTL_PIPE_CLKGATE_DIS2(pipe) _MMIO_PIPE(pipe, _MTL_PIPE_CLKGATE_DIS2_A, _MTL_PIPE_CLKGATE_DIS2_B)
+#define MTL_DPFC_GATING_DIS REG_BIT(6)
+
#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8)
#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4)
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index 94a8f902689e..bfe98cb9a038 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -9,76 +9,19 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
-/**
- * REG_BIT() - Prepare a u32 bit value
- * @__n: 0-based bit number
- *
- * Local wrapper for BIT() to force u32, with compile time checks.
- *
- * @return: Value with bit @__n set.
- */
-#define REG_BIT(__n) \
- ((u32)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
- ((__n) < 0 || (__n) > 31))))
-
-/**
- * REG_BIT8() - Prepare a u8 bit value
- * @__n: 0-based bit number
- *
- * Local wrapper for BIT() to force u8, with compile time checks.
- *
- * @return: Value with bit @__n set.
- */
-#define REG_BIT8(__n) \
- ((u8)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
- ((__n) < 0 || (__n) > 7))))
-
-/**
- * REG_GENMASK() - Prepare a continuous u32 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK() to force u32, with compile time checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
- */
-#define REG_GENMASK(__high, __low) \
- ((u32)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 31 || (__low) > (__high)))))
-
-/**
- * REG_GENMASK64() - Prepare a continuous u64 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK_ULL() to force u64, with compile time checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
+/*
+ * Wrappers over the generic fixed width BIT_U*() and GENMASK_U*()
+ * implementations, for compatibility reasons with previous implementation.
*/
-#define REG_GENMASK64(__high, __low) \
- ((u64)(GENMASK_ULL(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 63 || (__low) > (__high)))))
+#define REG_GENMASK(high, low) GENMASK_U32(high, low)
+#define REG_GENMASK64(high, low) GENMASK_U64(high, low)
+#define REG_GENMASK16(high, low) GENMASK_U16(high, low)
+#define REG_GENMASK8(high, low) GENMASK_U8(high, low)
-/**
- * REG_GENMASK8() - Prepare a continuous u8 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK() to force u8, with compile time checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
- */
-#define REG_GENMASK8(__high, __low) \
- ((u8)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 7 || (__low) > (__high)))))
+#define REG_BIT(n) BIT_U32(n)
+#define REG_BIT64(n) BIT_U64(n)
+#define REG_BIT16(n) BIT_U16(n)
+#define REG_BIT8(n) BIT_U8(n)
/*
* Local integer constant expression version of is_power_of_2().
@@ -143,35 +86,6 @@
*/
#define REG_FIELD_GET64(__mask, __val) ((u64)FIELD_GET(__mask, __val))
-/**
- * REG_BIT16() - Prepare a u16 bit value
- * @__n: 0-based bit number
- *
- * Local wrapper for BIT() to force u16, with compile time
- * checks.
- *
- * @return: Value with bit @__n set.
- */
-#define REG_BIT16(__n) \
- ((u16)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
- ((__n) < 0 || (__n) > 15))))
-
-/**
- * REG_GENMASK16() - Prepare a continuous u8 bitmask
- * @__high: 0-based high bit
- * @__low: 0-based low bit
- *
- * Local wrapper for GENMASK() to force u16, with compile time
- * checks.
- *
- * @return: Continuous bitmask from @__high to @__low, inclusive.
- */
-#define REG_GENMASK16(__high, __low) \
- ((u16)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
- __is_constexpr(__low) && \
- ((__low) < 0 || (__high) > 15 || (__low) > (__high)))))
/**
* REG_FIELD_PREP16() - Prepare a u16 bitfield value
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 1d4cc91c0e40..a4902ee08b6e 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -427,7 +427,8 @@ static void dma_i915_sw_fence_wake(struct dma_fence *dma,
static void timer_i915_sw_fence_wake(struct timer_list *t)
{
- struct i915_sw_dma_fence_cb_timer *cb = from_timer(cb, t, timer);
+ struct i915_sw_dma_fence_cb_timer *cb = timer_container_of(cb, t,
+ timer);
struct i915_sw_fence *fence;
fence = xchg(&cb->base.fence, NULL);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 609214231ffc..f7fb40cfdb70 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -40,8 +40,6 @@
struct drm_i915_private;
struct timer_list;
-#define FDO_BUG_URL "https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html"
-
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 76d84cbb8361..d581a9d2c063 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -21,6 +21,7 @@
#include "display/intel_pfit_regs.h"
#include "display/intel_psr_regs.h"
#include "display/intel_sprite_regs.h"
+#include "display/intel_vga_regs.h"
#include "display/skl_universal_plane_regs.h"
#include "display/skl_watermark_regs.h"
#include "display/vlv_dsi_pll_regs.h"
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index d40ee1b42110..59bd603e6deb 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -171,6 +171,17 @@ intel_memory_region_by_type(struct drm_i915_private *i915,
return NULL;
}
+bool intel_memory_type_is_local(enum intel_memory_type mem_type)
+{
+ switch (mem_type) {
+ case INTEL_MEMORY_LOCAL:
+ case INTEL_MEMORY_STOLEN_LOCAL:
+ return true;
+ default:
+ return false;
+ }
+}
+
/**
* intel_memory_region_reserve - Reserve a memory range
* @mem: The region for which we want to reserve a range.
@@ -216,7 +227,7 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
return err;
}
-static const char *region_type_str(u16 type)
+const char *intel_memory_type_str(enum intel_memory_type type)
{
switch (type) {
case INTEL_MEMORY_SYSTEM:
@@ -260,7 +271,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->instance = instance;
snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
- region_type_str(type), instance);
+ intel_memory_type_str(type), instance);
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 5973b6fe13cf..b3b75be9ced5 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -85,6 +85,8 @@ struct intel_memory_region {
void *region_private;
};
+bool intel_memory_type_is_local(enum intel_memory_type mem_type);
+
struct intel_memory_region *
intel_memory_region_lookup(struct drm_i915_private *i915,
u16 class, u16 instance);
@@ -107,6 +109,7 @@ void intel_memory_regions_driver_release(struct drm_i915_private *i915);
struct intel_memory_region *
intel_memory_region_by_type(struct drm_i915_private *i915,
enum intel_memory_type mem_type);
+const char *intel_memory_type_str(enum intel_memory_type type);
__printf(2, 3) void
intel_memory_region_set_name(struct intel_memory_region *mem,
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 07e81be4d392..51561b190b93 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -135,7 +135,7 @@ int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
static void wakeref_auto_timeout(struct timer_list *t)
{
- struct intel_wakeref_auto *wf = from_timer(wf, t, timer);
+ struct intel_wakeref_auto *wf = timer_container_of(wf, t, timer);
intel_wakeref_t wakeref;
unsigned long flags;
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 48836ef52d40..a2894a56e18f 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -7,8 +7,6 @@
#ifndef INTEL_WAKEREF_H
#define INTEL_WAKEREF_H
-#include <drm/drm_print.h>
-
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
@@ -16,11 +14,13 @@
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/ref_tracker.h>
-#include <linux/slab.h>
-#include <linux/stackdepot.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+struct drm_printer;
+struct intel_runtime_pm;
+struct intel_wakeref;
+
typedef struct ref_tracker *intel_wakeref_t;
#define INTEL_REFTRACK_DEAD_COUNT 16
@@ -32,9 +32,6 @@ typedef struct ref_tracker *intel_wakeref_t;
#define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
-struct intel_runtime_pm;
-struct intel_wakeref;
-
struct intel_wakeref_ops {
int (*get)(struct intel_wakeref *wf);
int (*put)(struct intel_wakeref *wf);
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index d5ecc68155da..d79e4defb71d 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -55,7 +55,7 @@ void onstack_fence_fini(struct i915_sw_fence *fence)
static void timed_fence_wake(struct timer_list *t)
{
- struct timed_fence *tf = from_timer(tf, t, timer);
+ struct timed_fence *tf = timer_container_of(tf, t, timer);
i915_sw_fence_commit(&tf->fence);
}
@@ -77,7 +77,7 @@ void timed_fence_fini(struct timed_fence *tf)
if (timer_delete_sync(&tf->timer))
i915_sw_fence_commit(&tf->fence);
- destroy_timer_on_stack(&tf->timer);
+ timer_destroy_on_stack(&tf->timer);
i915_sw_fence_fini(&tf->fence);
}
diff --git a/drivers/gpu/drm/i915/selftests/librapl.c b/drivers/gpu/drm/i915/selftests/librapl.c
index eb03b5b28bad..25b8726b9dff 100644
--- a/drivers/gpu/drm/i915/selftests/librapl.c
+++ b/drivers/gpu/drm/i915/selftests/librapl.c
@@ -22,12 +22,12 @@ u64 librapl_energy_uJ(void)
unsigned long long power;
u32 units;
- if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
+ if (rdmsrq_safe(MSR_RAPL_POWER_UNIT, &power))
return 0;
units = (power & 0x1f00) >> 8;
- if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power))
+ if (rdmsrq_safe(MSR_PP1_ENERGY_STATUS, &power))
return 0;
return (1000000 * power) >> units; /* convert to uJ */
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index f60eedb0e92c..eee5c4f45a43 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -33,8 +33,14 @@ static const char *intel_dram_type_str(enum intel_dram_type type)
DRAM_TYPE_STR(DDR4),
DRAM_TYPE_STR(LPDDR3),
DRAM_TYPE_STR(LPDDR4),
+ DRAM_TYPE_STR(DDR5),
+ DRAM_TYPE_STR(LPDDR5),
+ DRAM_TYPE_STR(GDDR),
+ DRAM_TYPE_STR(GDDR_ECC),
};
+ BUILD_BUG_ON(ARRAY_SIZE(str) != __INTEL_DRAM_TYPE_MAX);
+
if (type >= ARRAY_SIZE(str))
type = INTEL_DRAM_UNKNOWN;
@@ -444,8 +450,6 @@ skl_get_dram_info(struct drm_i915_private *i915)
int ret;
dram_info->type = skl_get_dram_type(i915);
- drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
- intel_dram_type_str(dram_info->type));
ret = skl_dram_get_channels_info(i915);
if (ret)
@@ -560,10 +564,9 @@ static int bxt_get_dram_info(struct drm_i915_private *i915)
dram_info->type != type);
drm_dbg_kms(&i915->drm,
- "CH%u DIMM size: %u Gb, width: X%u, ranks: %u, type: %s\n",
+ "CH%u DIMM size: %u Gb, width: X%u, ranks: %u\n",
i - BXT_D_CR_DRP0_DUNIT_START,
- dimm.size, dimm.width, dimm.ranks,
- intel_dram_type_str(type));
+ dimm.size, dimm.width, dimm.ranks);
if (valid_ranks == 0)
valid_ranks = dimm.ranks;
@@ -730,6 +733,10 @@ void intel_dram_detect(struct drm_i915_private *i915)
ret = bxt_get_dram_info(i915);
else
ret = skl_get_dram_info(i915);
+
+ drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
+ intel_dram_type_str(dram_info->type));
+
if (ret)
return;
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.c b/drivers/gpu/drm/i915/soc/intel_pch.c
deleted file mode 100644
index 82dc7fbd1a3e..000000000000
--- a/drivers/gpu/drm/i915/soc/intel_pch.c
+++ /dev/null
@@ -1,316 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright 2019 Intel Corporation.
- */
-
-#include "i915_drv.h"
-#include "i915_utils.h"
-#include "intel_pch.h"
-
-#define INTEL_PCH_DEVICE_ID_MASK 0xff80
-#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
-#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
-#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
-#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
-#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
-#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
-#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
-#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
-#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
-#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
-#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
-#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
-#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
-#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
-#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
-#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
-#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880
-#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
-#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
-#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
-#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
-#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
-#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
-#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
-#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
-#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
-#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
-#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
-
-/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
-static enum intel_pch
-intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
-{
- switch (id) {
- case INTEL_PCH_IBX_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n");
- drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) != 5);
- return PCH_IBX;
- case INTEL_PCH_CPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv));
- return PCH_CPT;
- case INTEL_PCH_PPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- GRAPHICS_VER(dev_priv) != 6 && !IS_IVYBRIDGE(dev_priv));
- /* PPT is CPT compatible */
- return PCH_CPT;
- case INTEL_PCH_LPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- drm_WARN_ON(&dev_priv->drm,
- IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
- return PCH_LPT_H;
- case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
- return PCH_LPT_LP;
- case INTEL_PCH_WPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- drm_WARN_ON(&dev_priv->drm,
- IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv));
- /* WPT is LPT compatible */
- return PCH_LPT_H;
- case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- drm_WARN_ON(&dev_priv->drm,
- !IS_HASWELL_ULT(dev_priv) && !IS_BROADWELL_ULT(dev_priv));
- /* WPT is LPT compatible */
- return PCH_LPT_LP;
- case INTEL_PCH_SPT_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
- return PCH_SPT;
- case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- return PCH_SPT;
- case INTEL_PCH_KBP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- /* KBP is SPT compatible */
- return PCH_SPT;
- case INTEL_PCH_CNP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- return PCH_CNP;
- case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm,
- "Found Cannon Lake LP PCH (CNP-LP)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- return PCH_CNP;
- case INTEL_PCH_CMP_DEVICE_ID_TYPE:
- case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv) &&
- !IS_ROCKETLAKE(dev_priv));
- /* CMP is CNP compatible */
- return PCH_CNP;
- case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
- drm_WARN_ON(&dev_priv->drm,
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- /* CMP-V is based on KBP, which is SPT compatible */
- return PCH_SPT;
- case INTEL_PCH_ICP_DEVICE_ID_TYPE:
- case INTEL_PCH_ICP2_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- return PCH_ICP;
- case INTEL_PCH_MCC_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
- drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) ||
- IS_ELKHARTLAKE(dev_priv)));
- /* MCC is TGP compatible */
- return PCH_TGP;
- case INTEL_PCH_TGP_DEVICE_ID_TYPE:
- case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv) &&
- !IS_ROCKETLAKE(dev_priv) &&
- !IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv) &&
- !IS_COMETLAKE(dev_priv));
- return PCH_TGP;
- case INTEL_PCH_JSP_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !(IS_JASPERLAKE(dev_priv) ||
- IS_ELKHARTLAKE(dev_priv)));
- /* JSP is ICP compatible */
- return PCH_ICP;
- case INTEL_PCH_ADP_DEVICE_ID_TYPE:
- case INTEL_PCH_ADP2_DEVICE_ID_TYPE:
- case INTEL_PCH_ADP3_DEVICE_ID_TYPE:
- case INTEL_PCH_ADP4_DEVICE_ID_TYPE:
- drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n");
- drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) &&
- !IS_ALDERLAKE_P(dev_priv));
- return PCH_ADP;
- default:
- return PCH_NONE;
- }
-}
-
-static bool intel_is_virt_pch(unsigned short id,
- unsigned short svendor, unsigned short sdevice)
-{
- return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
- id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
- (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
- svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
- sdevice == PCI_SUBDEVICE_ID_QEMU));
-}
-
-static void
-intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
- unsigned short *pch_id, enum intel_pch *pch_type)
-{
- unsigned short id = 0;
-
- /*
- * In a virtualized passthrough environment we can be in a
- * setup where the ISA bridge is not able to be passed through.
- * In this case, a south bridge can be emulated and we have to
- * make an educated guess as to which PCH is really there.
- */
-
- if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
- id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
- else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
- id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
- id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
- else if (IS_ICELAKE(dev_priv))
- id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
- else if (IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv))
- id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
- else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
- id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
- else if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
- id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
- else if (GRAPHICS_VER(dev_priv) == 6 || IS_IVYBRIDGE(dev_priv))
- id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
- else if (GRAPHICS_VER(dev_priv) == 5)
- id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
-
- if (id)
- drm_dbg_kms(&dev_priv->drm, "Assuming PCH ID %04x\n", id);
- else
- drm_dbg_kms(&dev_priv->drm, "Assuming no PCH\n");
-
- *pch_type = intel_pch_type(dev_priv, id);
-
- /* Sanity check virtual PCH id */
- if (drm_WARN_ON(&dev_priv->drm,
- id && *pch_type == PCH_NONE))
- id = 0;
-
- *pch_id = id;
-}
-
-void intel_detect_pch(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pch = NULL;
- unsigned short id;
- enum intel_pch pch_type;
-
- /*
- * South display engine on the same PCI device: just assign the fake
- * PCH.
- */
- if (DISPLAY_VER(dev_priv) >= 20) {
- dev_priv->pch_type = PCH_LNL;
- return;
- } else if (IS_BATTLEMAGE(dev_priv) || IS_METEORLAKE(dev_priv)) {
- /*
- * Both north display and south display are on the SoC die.
- * The real PCH (if it even exists) is uninvolved in display.
- */
- dev_priv->pch_type = PCH_MTL;
- return;
- } else if (IS_DG2(dev_priv)) {
- dev_priv->pch_type = PCH_DG2;
- return;
- } else if (IS_DG1(dev_priv)) {
- dev_priv->pch_type = PCH_DG1;
- return;
- }
-
- /*
- * The reason to probe ISA bridge instead of Dev31:Fun0 is to
- * make graphics device passthrough work easy for VMM, that only
- * need to expose ISA bridge to let driver know the real hardware
- * underneath. This is a requirement from virtualization team.
- *
- * In some virtualized environments (e.g. XEN), there is irrelevant
- * ISA bridge in the system. To work reliably, we should scan through
- * all the ISA bridge devices and check for the first match, instead
- * of only checking the first one.
- */
- while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
- if (pch->vendor != PCI_VENDOR_ID_INTEL)
- continue;
-
- id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
-
- pch_type = intel_pch_type(dev_priv, id);
- if (pch_type != PCH_NONE) {
- dev_priv->pch_type = pch_type;
- break;
- } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
- pch->subsystem_device)) {
- intel_virt_detect_pch(dev_priv, &id, &pch_type);
- dev_priv->pch_type = pch_type;
- break;
- }
- }
-
- /*
- * Use PCH_NOP (PCH but no South Display) for PCH platforms without
- * display.
- */
- if (pch && !HAS_DISPLAY(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
- "Display disabled, reverting to NOP PCH\n");
- dev_priv->pch_type = PCH_NOP;
- } else if (!pch) {
- if (i915_run_as_guest() && HAS_DISPLAY(dev_priv)) {
- intel_virt_detect_pch(dev_priv, &id, &pch_type);
- dev_priv->pch_type = pch_type;
- } else {
- drm_dbg_kms(&dev_priv->drm, "No PCH found.\n");
- }
- }
-
- pci_dev_put(pch);
-}
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.h b/drivers/gpu/drm/i915/soc/intel_pch.h
deleted file mode 100644
index 635aea7a5539..000000000000
--- a/drivers/gpu/drm/i915/soc/intel_pch.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright 2019 Intel Corporation.
- */
-
-#ifndef __INTEL_PCH__
-#define __INTEL_PCH__
-
-struct drm_i915_private;
-
-/*
- * Sorted by south display engine compatibility.
- * If the new PCH comes with a south display engine that is not
- * inherited from the latest item, please do not add it to the
- * end. Instead, add it right after its "parent" PCH.
- */
-enum intel_pch {
- PCH_NOP = -1, /* PCH without south display */
- PCH_NONE = 0, /* No PCH present */
- PCH_IBX, /* Ibexpeak PCH */
- PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
- PCH_LPT_H, /* Lynxpoint/Wildcatpoint H PCH */
- PCH_LPT_LP, /* Lynxpoint/Wildcatpoint LP PCH */
- PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
- PCH_CNP, /* Cannon/Comet Lake PCH */
- PCH_ICP, /* Ice Lake/Jasper Lake PCH */
- PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
- PCH_ADP, /* Alder Lake PCH */
-
- /* Fake PCHs, functionality handled on the same PCI dev */
- PCH_DG1 = 1024,
- PCH_DG2,
- PCH_MTL,
- PCH_LNL,
-};
-
-#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
-#define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2)
-#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
-#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
-#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
-#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
-#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
-#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
-#define HAS_PCH_LPT_H(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT_H)
-#define HAS_PCH_LPT_LP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT_LP)
-#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT_H || \
- INTEL_PCH_TYPE(dev_priv) == PCH_LPT_LP)
-#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
-#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
-#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
-#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
-
-void intel_detect_pch(struct drm_i915_private *dev_priv);
-
-#endif /* __INTEL_PCH__ */
diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile
index 3d9d4d40fb80..7cca66f00a38 100644
--- a/drivers/gpu/drm/imagination/Makefile
+++ b/drivers/gpu/drm/imagination/Makefile
@@ -12,8 +12,10 @@ powervr-y := \
pvr_fw.o \
pvr_fw_meta.o \
pvr_fw_mips.o \
+ pvr_fw_riscv.o \
pvr_fw_startstop.o \
pvr_fw_trace.o \
+ pvr_fw_util.o \
pvr_gem.o \
pvr_hwrt.o \
pvr_job.o \
diff --git a/drivers/gpu/drm/imagination/pvr_debugfs.c b/drivers/gpu/drm/imagination/pvr_debugfs.c
index 6b77c9b4bde8..c7ce7daaa87a 100644
--- a/drivers/gpu/drm/imagination/pvr_debugfs.c
+++ b/drivers/gpu/drm/imagination/pvr_debugfs.c
@@ -28,9 +28,8 @@ pvr_debugfs_init(struct drm_minor *minor)
struct drm_device *drm_dev = minor->dev;
struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
struct dentry *root = minor->debugfs_root;
- size_t i;
- for (i = 0; i < ARRAY_SIZE(pvr_debugfs_entries); ++i) {
+ for (size_t i = 0; i < ARRAY_SIZE(pvr_debugfs_entries); ++i) {
const struct pvr_debugfs_entry *entry = &pvr_debugfs_entries[i];
struct dentry *dir;
diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c
index 1704c0268589..8b9ba4983c4c 100644
--- a/drivers/gpu/drm/imagination/pvr_device.c
+++ b/drivers/gpu/drm/imagination/pvr_device.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/types.h>
@@ -120,6 +121,21 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev)
return 0;
}
+static int pvr_device_reset_init(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct reset_control *reset;
+
+ reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL);
+ if (IS_ERR(reset))
+ return dev_err_probe(drm_dev->dev, PTR_ERR(reset),
+ "failed to get gpu reset line\n");
+
+ pvr_dev->reset = reset;
+
+ return 0;
+}
+
/**
* pvr_device_process_active_queues() - Process all queue related events.
* @pvr_dev: PowerVR device to check
@@ -146,9 +162,61 @@ static void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
mutex_unlock(&pvr_dev->queues.lock);
}
+static bool pvr_device_safety_irq_pending(struct pvr_device *pvr_dev)
+{
+ u32 events;
+
+ WARN_ON_ONCE(!pvr_dev->has_safety_events);
+
+ events = pvr_cr_read32(pvr_dev, ROGUE_CR_EVENT_STATUS);
+
+ return (events & ROGUE_CR_EVENT_STATUS_SAFETY_EN) != 0;
+}
+
+static void pvr_device_safety_irq_clear(struct pvr_device *pvr_dev)
+{
+ WARN_ON_ONCE(!pvr_dev->has_safety_events);
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_EVENT_CLEAR,
+ ROGUE_CR_EVENT_CLEAR_SAFETY_EN);
+}
+
+static void pvr_device_handle_safety_events(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ u32 events;
+
+ WARN_ON_ONCE(!pvr_dev->has_safety_events);
+
+ events = pvr_cr_read32(pvr_dev, ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE);
+
+ /* Handle only these events on the host and leave the rest to the FW. */
+ events &= ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN |
+ ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN;
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE, events);
+
+ if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN) {
+ u32 fault_fw = pvr_cr_read32(pvr_dev, ROGUE_CR_FAULT_FW_STATUS);
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_FAULT_FW_CLEAR, fault_fw);
+
+ drm_info(drm_dev, "Safety event: FW fault (mask=0x%08x)\n", fault_fw);
+ }
+
+ if (events & ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN) {
+ /*
+ * The watchdog timer is disabled by the driver so this event
+ * should never be fired.
+ */
+ drm_info(drm_dev, "Safety event: Watchdog timeout\n");
+ }
+}
+
static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
{
struct pvr_device *pvr_dev = data;
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
irqreturn_t ret = IRQ_NONE;
/* We are in the threaded handler, we can keep dequeuing events until we
@@ -164,30 +232,76 @@ static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
pvr_device_process_active_queues(pvr_dev);
}
- pm_runtime_mark_last_busy(from_pvr_device(pvr_dev)->dev);
+ pm_runtime_mark_last_busy(drm_dev->dev);
ret = IRQ_HANDLED;
}
- /* Unmask FW irqs before returning, so new interrupts can be received. */
- pvr_fw_irq_enable(pvr_dev);
+ if (pvr_dev->has_safety_events) {
+ int err;
+
+ /*
+ * Ensure the GPU is powered on since some safety events (such
+ * as ECC faults) can happen outside of job submissions, which
+ * are otherwise the only time a power reference is held.
+ */
+ err = pvr_power_get(pvr_dev);
+ if (err) {
+ drm_err_ratelimited(drm_dev,
+ "%s: could not take power reference (%d)\n",
+ __func__, err);
+ return ret;
+ }
+
+ while (pvr_device_safety_irq_pending(pvr_dev)) {
+ pvr_device_safety_irq_clear(pvr_dev);
+ pvr_device_handle_safety_events(pvr_dev);
+
+ ret = IRQ_HANDLED;
+ }
+
+ pvr_power_put(pvr_dev);
+ }
+
return ret;
}
static irqreturn_t pvr_device_irq_handler(int irq, void *data)
{
struct pvr_device *pvr_dev = data;
+ bool safety_irq_pending = false;
+
+ if (pvr_dev->has_safety_events)
+ safety_irq_pending = pvr_device_safety_irq_pending(pvr_dev);
- if (!pvr_fw_irq_pending(pvr_dev))
+ if (!pvr_fw_irq_pending(pvr_dev) && !safety_irq_pending)
return IRQ_NONE; /* Spurious IRQ - ignore. */
- /* Mask the FW interrupts before waking up the thread. Will be unmasked
- * when the thread handler is done processing events.
- */
- pvr_fw_irq_disable(pvr_dev);
return IRQ_WAKE_THREAD;
}
+static void pvr_device_safety_irq_init(struct pvr_device *pvr_dev)
+{
+ u32 num_ecc_rams = 0;
+
+ /*
+ * Safety events are an optional feature of the RogueXE platform. They
+ * are only enabled if at least one of ECC memory or the watchdog timer
+ * are present in HW. While safety events can be generated by other
+ * systems, that will never happen if the above mentioned hardware is
+ * not present.
+ */
+ if (!PVR_HAS_FEATURE(pvr_dev, roguexe)) {
+ pvr_dev->has_safety_events = false;
+ return;
+ }
+
+ PVR_FEATURE_VALUE(pvr_dev, ecc_rams, &num_ecc_rams);
+
+ pvr_dev->has_safety_events =
+ num_ecc_rams > 0 || PVR_HAS_FEATURE(pvr_dev, watchdog_timer);
+}
+
/**
* pvr_device_irq_init() - Initialise IRQ required by a PowerVR device
* @pvr_dev: Target PowerVR device.
@@ -205,17 +319,25 @@ pvr_device_irq_init(struct pvr_device *pvr_dev)
init_waitqueue_head(&pvr_dev->kccb.rtn_q);
+ pvr_device_safety_irq_init(pvr_dev);
+
pvr_dev->irq = platform_get_irq(plat_dev, 0);
if (pvr_dev->irq < 0)
return pvr_dev->irq;
/* Clear any pending events before requesting the IRQ line. */
pvr_fw_irq_clear(pvr_dev);
- pvr_fw_irq_enable(pvr_dev);
+ if (pvr_dev->has_safety_events)
+ pvr_device_safety_irq_clear(pvr_dev);
+
+ /*
+ * The ONESHOT flag ensures IRQs are masked while the thread handler is
+ * running.
+ */
return request_threaded_irq(pvr_dev->irq, pvr_device_irq_handler,
pvr_device_irq_thread_handler,
- IRQF_SHARED, "gpu", pvr_dev);
+ IRQF_SHARED | IRQF_ONESHOT, "gpu", pvr_dev);
}
/**
@@ -509,6 +631,11 @@ pvr_device_init(struct pvr_device *pvr_dev)
if (err)
return err;
+ /* Get the reset line for the GPU */
+ err = pvr_device_reset_init(pvr_dev);
+ if (err)
+ return err;
+
/* Explicitly power the GPU so we can access control registers before the FW is booted. */
err = pm_runtime_resume_and_get(dev);
if (err)
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
index 6d0dfacb677b..7cb01c38d2a9 100644
--- a/drivers/gpu/drm/imagination/pvr_device.h
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -18,6 +18,7 @@
#include <linux/bits.h>
#include <linux/compiler_attributes.h>
#include <linux/compiler_types.h>
+#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -131,6 +132,22 @@ struct pvr_device {
*/
struct clk *mem_clk;
+ struct pvr_device_power {
+ struct device **domain_devs;
+ struct device_link **domain_links;
+
+ u32 domain_count;
+ } power;
+
+ /**
+ * @reset: Optional reset line.
+ *
+ * This may be used on some platforms to provide a reset line that needs to be de-asserted
+ * after power-up procedure. It would also need to be asserted after the power-down
+ * procedure.
+ */
+ struct reset_control *reset;
+
/** @irq: IRQ number. */
int irq;
@@ -300,6 +317,9 @@ struct pvr_device {
* struct pvr_file.
*/
spinlock_t ctx_list_lock;
+
+ /** @has_safety_events: Whether this device can raise safety events. */
+ bool has_safety_events;
};
/**
@@ -728,8 +748,22 @@ pvr_ioctl_union_padding_check(void *instance, size_t union_offset,
__union_size, __member_size); \
})
-#define PVR_FW_PROCESSOR_TYPE_META 0
-#define PVR_FW_PROCESSOR_TYPE_MIPS 1
-#define PVR_FW_PROCESSOR_TYPE_RISCV 2
+/*
+ * These utility functions should more properly be placed in pvr_fw.h, but that
+ * would cause a dependency cycle between that header and this one. Since
+ * they're primarily used in pvr_device.c, let's put them in here for now.
+ */
+
+static __always_inline bool
+pvr_fw_irq_pending(struct pvr_device *pvr_dev)
+{
+ return pvr_dev->fw_dev.defs->irq_pending(pvr_dev);
+}
+
+static __always_inline void
+pvr_fw_irq_clear(struct pvr_device *pvr_dev)
+{
+ pvr_dev->fw_dev.defs->irq_clear(pvr_dev);
+}
#endif /* PVR_DEVICE_H */
diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c
index 0639502137b4..b058ec183bb3 100644
--- a/drivers/gpu/drm/imagination/pvr_drv.c
+++ b/drivers/gpu/drm/imagination/pvr_drv.c
@@ -44,6 +44,7 @@
* This driver supports the following PowerVR/IMG graphics cores from Imagination Technologies:
*
* * AXE-1-16M (found in Texas Instruments AM62)
+ * * BXS-4-64 MC1 (found in Texas Instruments J721S2/AM68)
*/
/**
@@ -1411,6 +1412,10 @@ pvr_probe(struct platform_device *plat_dev)
platform_set_drvdata(plat_dev, drm_dev);
+ err = pvr_power_domains_init(pvr_dev);
+ if (err)
+ return err;
+
init_rwsem(&pvr_dev->reset_sem);
pvr_context_device_init(pvr_dev);
@@ -1450,6 +1455,8 @@ err_watchdog_fini:
err_context_fini:
pvr_context_device_fini(pvr_dev);
+ pvr_power_domains_fini(pvr_dev);
+
return err;
}
@@ -1470,9 +1477,17 @@ static void pvr_remove(struct platform_device *plat_dev)
pvr_watchdog_fini(pvr_dev);
pvr_queue_device_fini(pvr_dev);
pvr_context_device_fini(pvr_dev);
+ pvr_power_domains_fini(pvr_dev);
}
static const struct of_device_id dt_match[] = {
+ { .compatible = "img,img-rogue", .data = NULL },
+
+ /*
+ * This legacy compatible string was introduced early on before the more generic
+ * "img,img-rogue" was added. Keep it around here for compatibility, but never use
+ * "img,img-axe" in new devicetrees.
+ */
{ .compatible = "img,img-axe", .data = NULL },
{}
};
@@ -1498,3 +1513,4 @@ MODULE_DESCRIPTION(PVR_DRIVER_DESC);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_IMPORT_NS("DMA_BUF");
MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");
+MODULE_FIRMWARE("powervr/rogue_36.53.104.796_v1.fw");
diff --git a/drivers/gpu/drm/imagination/pvr_free_list.c b/drivers/gpu/drm/imagination/pvr_free_list.c
index 5e51bc980751..5228e214491c 100644
--- a/drivers/gpu/drm/imagination/pvr_free_list.c
+++ b/drivers/gpu/drm/imagination/pvr_free_list.c
@@ -237,11 +237,10 @@ pvr_free_list_insert_pages_locked(struct pvr_free_list *free_list,
dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
u64 dma_pfn = dma_addr >>
ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
- u32 dma_addr_offset;
BUILD_BUG_ON(ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE > PAGE_SIZE);
- for (dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE;
+ for (u32 dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE;
dma_addr_offset += ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE) {
WARN_ON_ONCE(dma_pfn >> 32);
diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c
index d09c4c684116..b2f8cba77346 100644
--- a/drivers/gpu/drm/imagination/pvr_fw.c
+++ b/drivers/gpu/drm/imagination/pvr_fw.c
@@ -50,9 +50,8 @@ pvr_fw_find_layout_entry(struct pvr_device *pvr_dev, enum pvr_fw_section_id id)
{
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
- u32 entry;
- for (entry = 0; entry < num_layout_entries; entry++) {
+ for (u32 entry = 0; entry < num_layout_entries; entry++) {
if (layout_entries[entry].id == id)
return &layout_entries[entry];
}
@@ -65,9 +64,8 @@ pvr_fw_find_private_data(struct pvr_device *pvr_dev)
{
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
- u32 entry;
- for (entry = 0; entry < num_layout_entries; entry++) {
+ for (u32 entry = 0; entry < num_layout_entries; entry++) {
if (layout_entries[entry].id == META_PRIVATE_DATA ||
layout_entries[entry].id == MIPS_PRIVATE_DATA ||
layout_entries[entry].id == RISCV_PRIVATE_DATA)
@@ -97,7 +95,6 @@ pvr_fw_validate(struct pvr_device *pvr_dev)
const u8 *fw = firmware->data;
u32 fw_offset = firmware->size - SZ_4K;
u32 layout_table_size;
- u32 entry;
if (firmware->size < SZ_4K || (firmware->size % FW_BLOCK_SIZE))
return -EINVAL;
@@ -144,7 +141,7 @@ pvr_fw_validate(struct pvr_device *pvr_dev)
return -EINVAL;
layout_entries = (const struct pvr_fw_layout_entry *)&fw[fw_offset];
- for (entry = 0; entry < header->layout_entry_num; entry++) {
+ for (u32 entry = 0; entry < header->layout_entry_num; entry++) {
u32 start_addr = layout_entries[entry].base_addr;
u32 end_addr = start_addr + layout_entries[entry].alloc_size;
@@ -233,13 +230,12 @@ pvr_fw_find_mmu_segment(struct pvr_device *pvr_dev, u32 addr, u32 size, void *fw
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
u32 end_addr = addr + size;
- int entry = 0;
/* Ensure requested range is not zero, and size is not causing addr to overflow. */
if (end_addr <= addr)
return -EINVAL;
- for (entry = 0; entry < num_layout_entries; entry++) {
+ for (int entry = 0; entry < num_layout_entries; entry++) {
u32 entry_start_addr = layout_entries[entry].base_addr;
u32 entry_end_addr = entry_start_addr + layout_entries[entry].alloc_size;
@@ -441,6 +437,9 @@ fw_runtime_cfg_init(void *cpu_ptr, void *priv)
runtime_cfg->active_pm_latency_persistant = true;
WARN_ON(PVR_FEATURE_VALUE(pvr_dev, num_clusters,
&runtime_cfg->default_dusts_num_init) != 0);
+
+ /* Keep watchdog timer disabled. */
+ runtime_cfg->wdg_period_us = 0;
}
static void
@@ -663,7 +662,7 @@ pvr_fw_process(struct pvr_device *pvr_dev)
return PTR_ERR(fw_code_ptr);
}
- if (pvr_dev->fw_dev.defs->has_fixed_data_addr()) {
+ if (pvr_dev->fw_dev.defs->has_fixed_data_addr) {
u32 base_addr = private_data->base_addr & pvr_dev->fw_dev.fw_heap_info.offset_mask;
fw_data_ptr =
@@ -939,18 +938,22 @@ pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev)
int
pvr_fw_init(struct pvr_device *pvr_dev)
{
+ static const struct pvr_fw_defs *fw_defs[PVR_FW_PROCESSOR_TYPE_COUNT] = {
+ [PVR_FW_PROCESSOR_TYPE_META] = &pvr_fw_defs_meta,
+ [PVR_FW_PROCESSOR_TYPE_MIPS] = &pvr_fw_defs_mips,
+ [PVR_FW_PROCESSOR_TYPE_RISCV] = &pvr_fw_defs_riscv,
+ };
+
u32 kccb_size_log2 = ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
u32 kccb_rtn_size = (1 << kccb_size_log2) * sizeof(*pvr_dev->kccb.rtn);
struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
int err;
- if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_META)
- fw_dev->defs = &pvr_fw_defs_meta;
- else if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_MIPS)
- fw_dev->defs = &pvr_fw_defs_mips;
- else
+ if (fw_dev->processor_type >= PVR_FW_PROCESSOR_TYPE_COUNT)
return -EINVAL;
+ fw_dev->defs = fw_defs[fw_dev->processor_type];
+
err = fw_dev->defs->init(pvr_dev);
if (err)
return err;
@@ -1456,6 +1459,15 @@ void pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object *fw_obj, u32 offset,
*fw_addr_out = pvr_dev->fw_dev.defs->get_fw_addr_with_offset(fw_obj, offset);
}
+u64
+pvr_fw_obj_get_gpu_addr(struct pvr_fw_object *fw_obj)
+{
+ struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(fw_obj->gem)->dev);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ return fw_dev->fw_heap_info.gpu_addr + fw_obj->fw_addr_offset;
+}
+
/*
* pvr_fw_hard_reset() - Re-initialise the FW code and data segments, and reset all global FW
* structures
diff --git a/drivers/gpu/drm/imagination/pvr_fw.h b/drivers/gpu/drm/imagination/pvr_fw.h
index b7966bd574a9..1404dd492d7c 100644
--- a/drivers/gpu/drm/imagination/pvr_fw.h
+++ b/drivers/gpu/drm/imagination/pvr_fw.h
@@ -167,47 +167,30 @@ struct pvr_fw_defs {
int (*wrapper_init)(struct pvr_device *pvr_dev);
/**
- * @has_fixed_data_addr:
+ * @irq_pending: Check interrupt status register for pending interrupts.
*
- * Called to check if firmware fixed data must be loaded at the address given by the
- * firmware layout table.
+ * @pvr_dev: Target PowerVR device.
*
* This function is mandatory.
+ */
+ bool (*irq_pending)(struct pvr_device *pvr_dev);
+
+ /**
+ * @irq_clear: Clear pending interrupts.
*
- * Returns:
- * * %true if firmware fixed data must be loaded at the address given by the firmware
- * layout table.
- * * %false otherwise.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function is mandatory.
*/
- bool (*has_fixed_data_addr)(void);
+ void (*irq_clear)(struct pvr_device *pvr_dev);
/**
- * @irq: FW Interrupt information.
+ * @has_fixed_data_addr: Specify whether the firmware fixed data must be loaded at the
+ * address given by the firmware layout table.
*
- * Those are processor dependent, and should be initialized by the
- * processor backend in pvr_fw_funcs::init().
+ * This value is mandatory.
*/
- struct {
- /** @enable_reg: FW interrupt enable register. */
- u32 enable_reg;
-
- /** @status_reg: FW interrupt status register. */
- u32 status_reg;
-
- /**
- * @clear_reg: FW interrupt clear register.
- *
- * If @status_reg == @clear_reg, we clear by write a bit to zero,
- * otherwise we clear by writing a bit to one.
- */
- u32 clear_reg;
-
- /** @event_mask: Bitmask of events to listen for. */
- u32 event_mask;
-
- /** @clear_mask: Value to write to the clear_reg in order to clear FW IRQs. */
- u32 clear_mask;
- } irq;
+ bool has_fixed_data_addr;
};
/**
@@ -400,26 +383,16 @@ struct pvr_fw_device {
} fw_objs;
};
-#define pvr_fw_irq_read_reg(pvr_dev, name) \
- pvr_cr_read32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg)
-
-#define pvr_fw_irq_write_reg(pvr_dev, name, value) \
- pvr_cr_write32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg, value)
-
-#define pvr_fw_irq_pending(pvr_dev) \
- (pvr_fw_irq_read_reg(pvr_dev, status) & (pvr_dev)->fw_dev.defs->irq.event_mask)
-
-#define pvr_fw_irq_clear(pvr_dev) \
- pvr_fw_irq_write_reg(pvr_dev, clear, (pvr_dev)->fw_dev.defs->irq.clear_mask)
-
-#define pvr_fw_irq_enable(pvr_dev) \
- pvr_fw_irq_write_reg(pvr_dev, enable, (pvr_dev)->fw_dev.defs->irq.event_mask)
-
-#define pvr_fw_irq_disable(pvr_dev) \
- pvr_fw_irq_write_reg(pvr_dev, enable, 0)
+enum pvr_fw_processor_type {
+ PVR_FW_PROCESSOR_TYPE_META = 0,
+ PVR_FW_PROCESSOR_TYPE_MIPS,
+ PVR_FW_PROCESSOR_TYPE_RISCV,
+ PVR_FW_PROCESSOR_TYPE_COUNT,
+};
extern const struct pvr_fw_defs pvr_fw_defs_meta;
extern const struct pvr_fw_defs pvr_fw_defs_mips;
+extern const struct pvr_fw_defs pvr_fw_defs_riscv;
int pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev);
int pvr_fw_init(struct pvr_device *pvr_dev);
@@ -506,4 +479,18 @@ pvr_fw_object_get_fw_addr(struct pvr_fw_object *fw_obj, u32 *fw_addr_out)
pvr_fw_object_get_fw_addr_offset(fw_obj, 0, fw_addr_out);
}
+u64
+pvr_fw_obj_get_gpu_addr(struct pvr_fw_object *fw_obj);
+
+static __always_inline size_t
+pvr_fw_obj_get_object_size(struct pvr_fw_object *fw_obj)
+{
+ return pvr_gem_object_size(fw_obj->gem);
+}
+
+/* Util functions defined in pvr_fw_util.c. These are intended for use in pvr_fw_<arch>.c files. */
+int
+pvr_fw_process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr,
+ u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr);
+
#endif /* PVR_FW_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
index 6d13864851fc..60db3668ad3c 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_meta.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
@@ -370,13 +370,12 @@ configure_seg_mmu(struct pvr_device *pvr_dev, u32 **boot_conf_ptr)
const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
u64 seg_out_addr_top;
- u32 i;
seg_out_addr_top =
ROGUE_FW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV,
ROGUE_FW_SEGMMU_META_BIFDM_ID);
- for (i = 0; i < num_layout_entries; i++) {
+ for (u32 i = 0; i < num_layout_entries; i++) {
/*
* FW code is using the bootloader segment which is already
* configured on boot. FW coremem code and data don't use the
@@ -534,9 +533,17 @@ pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
}
static bool
-pvr_meta_has_fixed_data_addr(void)
+pvr_meta_irq_pending(struct pvr_device *pvr_dev)
{
- return false;
+ return pvr_cr_read32(pvr_dev, ROGUE_CR_META_SP_MSLVIRQSTATUS) &
+ ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN;
+}
+
+static void
+pvr_meta_irq_clear(struct pvr_device *pvr_dev)
+{
+ pvr_cr_write32(pvr_dev, ROGUE_CR_META_SP_MSLVIRQSTATUS,
+ ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK);
}
const struct pvr_fw_defs pvr_fw_defs_meta = {
@@ -546,12 +553,7 @@ const struct pvr_fw_defs pvr_fw_defs_meta = {
.vm_unmap = pvr_meta_vm_unmap,
.get_fw_addr_with_offset = pvr_meta_get_fw_addr_with_offset,
.wrapper_init = pvr_meta_wrapper_init,
- .has_fixed_data_addr = pvr_meta_has_fixed_data_addr,
- .irq = {
- .enable_reg = ROGUE_CR_META_SP_MSLVIRQENABLE,
- .status_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS,
- .clear_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS,
- .event_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN,
- .clear_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK,
- },
+ .irq_pending = pvr_meta_irq_pending,
+ .irq_clear = pvr_meta_irq_clear,
+ .has_fixed_data_addr = false,
};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.c b/drivers/gpu/drm/imagination/pvr_fw_mips.c
index 0bed0257e2ab..6914fc46db50 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_mips.c
@@ -8,7 +8,6 @@
#include "pvr_rogue_mips.h"
#include "pvr_vm_mips.h"
-#include <linux/elf.h>
#include <linux/err.h>
#include <linux/types.h>
@@ -16,60 +15,6 @@
#define ROGUE_FW_HEAP_MIPS_SHIFT 24 /* 16 MB */
#define ROGUE_FW_HEAP_MIPS_RESERVED_SIZE SZ_1M
-/**
- * process_elf_command_stream() - Process ELF firmware image and populate
- * firmware sections
- * @pvr_dev: Device pointer.
- * @fw: Pointer to firmware image.
- * @fw_code_ptr: Pointer to FW code section.
- * @fw_data_ptr: Pointer to FW data section.
- * @fw_core_code_ptr: Pointer to FW coremem code section.
- * @fw_core_data_ptr: Pointer to FW coremem data section.
- *
- * Returns :
- * * 0 on success, or
- * * -EINVAL on any error in ELF command stream.
- */
-static int
-process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr,
- u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr)
-{
- struct elf32_hdr *header = (struct elf32_hdr *)fw;
- struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff);
- struct drm_device *drm_dev = from_pvr_device(pvr_dev);
- u32 entry;
- int err;
-
- for (entry = 0; entry < header->e_phnum; entry++, program_header++) {
- void *write_addr;
-
- /* Only consider loadable entries in the ELF segment table */
- if (program_header->p_type != PT_LOAD)
- continue;
-
- err = pvr_fw_find_mmu_segment(pvr_dev, program_header->p_vaddr,
- program_header->p_memsz, fw_code_ptr, fw_data_ptr,
- fw_core_code_ptr, fw_core_data_ptr, &write_addr);
- if (err) {
- drm_err(drm_dev,
- "Addr 0x%x (size: %d) not found in any firmware segment",
- program_header->p_vaddr, program_header->p_memsz);
- return err;
- }
-
- /* Write to FW allocation only if available */
- if (write_addr) {
- memcpy(write_addr, fw + program_header->p_offset,
- program_header->p_filesz);
-
- memset((u8 *)write_addr + program_header->p_filesz, 0,
- program_header->p_memsz - program_header->p_filesz);
- }
- }
-
- return 0;
-}
-
static int
pvr_mips_init(struct pvr_device *pvr_dev)
{
@@ -97,11 +42,10 @@ pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
const struct pvr_fw_layout_entry *stack_entry;
struct rogue_mipsfw_boot_data *boot_data;
dma_addr_t dma_addr;
- u32 page_nr;
int err;
- err = process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, fw_core_code_ptr,
- fw_core_data_ptr);
+ err = pvr_fw_process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr);
if (err)
return err;
@@ -132,7 +76,7 @@ pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
boot_data->reg_base = pvr_dev->regs_resource->start;
- for (page_nr = 0; page_nr < ARRAY_SIZE(boot_data->pt_phys_addr); page_nr++) {
+ for (u32 page_nr = 0; page_nr < ARRAY_SIZE(boot_data->pt_phys_addr); page_nr++) {
/* Firmware expects 4k pages, but host page size might be different. */
u32 src_page_nr = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) >> PAGE_SHIFT;
u32 page_offset = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) & ~PAGE_MASK;
@@ -228,9 +172,17 @@ pvr_mips_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
}
static bool
-pvr_mips_has_fixed_data_addr(void)
+pvr_mips_irq_pending(struct pvr_device *pvr_dev)
+{
+ return pvr_cr_read32(pvr_dev, ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS) &
+ ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN;
+}
+
+static void
+pvr_mips_irq_clear(struct pvr_device *pvr_dev)
{
- return true;
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR,
+ ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN);
}
const struct pvr_fw_defs pvr_fw_defs_mips = {
@@ -241,12 +193,7 @@ const struct pvr_fw_defs pvr_fw_defs_mips = {
.vm_unmap = pvr_vm_mips_unmap,
.get_fw_addr_with_offset = pvr_mips_get_fw_addr_with_offset,
.wrapper_init = pvr_mips_wrapper_init,
- .has_fixed_data_addr = pvr_mips_has_fixed_data_addr,
- .irq = {
- .enable_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE,
- .status_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS,
- .clear_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR,
- .event_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN,
- .clear_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN,
- },
+ .irq_pending = pvr_mips_irq_pending,
+ .irq_clear = pvr_mips_irq_clear,
+ .has_fixed_data_addr = true,
};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_riscv.c b/drivers/gpu/drm/imagination/pvr_fw_riscv.c
new file mode 100644
index 000000000000..fc13d483be9a
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_riscv.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2024 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_info.h"
+#include "pvr_fw_mips.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_cr_defs.h"
+#include "pvr_rogue_riscv.h"
+#include "pvr_vm.h"
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#define ROGUE_FW_HEAP_RISCV_SHIFT 25 /* 32 MB */
+#define ROGUE_FW_HEAP_RISCV_SIZE (1u << ROGUE_FW_HEAP_RISCV_SHIFT)
+
+static int
+pvr_riscv_wrapper_init(struct pvr_device *pvr_dev)
+{
+ const u64 common_opts =
+ ((u64)(ROGUE_FW_HEAP_RISCV_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT)
+ << ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT) |
+ ((u64)MMU_CONTEXT_MAPPING_FWPRIV
+ << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT);
+
+ u64 code_addr = pvr_fw_obj_get_gpu_addr(pvr_dev->fw_dev.mem.code_obj);
+ u64 data_addr = pvr_fw_obj_get_gpu_addr(pvr_dev->fw_dev.mem.data_obj);
+
+ /* This condition allows us to OR the addresses into the register directly. */
+ static_assert(ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT ==
+ ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSHIFT);
+
+ WARN_ON(code_addr & ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK);
+ WARN_ON(data_addr & ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK);
+
+ pvr_cr_write64(pvr_dev, ROGUE_RISCVFW_REGION_REMAP_CR(BOOTLDR_CODE),
+ code_addr | common_opts | ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN);
+
+ pvr_cr_write64(pvr_dev, ROGUE_RISCVFW_REGION_REMAP_CR(BOOTLDR_DATA),
+ data_addr | common_opts |
+ ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN);
+
+ /* Garten IDLE bit controlled by RISC-V. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG,
+ ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META);
+
+ return 0;
+}
+
+struct rogue_riscv_fw_boot_data {
+ u64 coremem_code_dev_vaddr;
+ u64 coremem_data_dev_vaddr;
+ u32 coremem_code_fw_addr;
+ u32 coremem_data_fw_addr;
+ u32 coremem_code_size;
+ u32 coremem_data_size;
+ u32 flags;
+ u32 reserved;
+};
+
+static int
+pvr_riscv_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
+ u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr,
+ u32 core_code_alloc_size)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mem *fw_mem = &fw_dev->mem;
+ struct rogue_riscv_fw_boot_data *boot_data;
+ int err;
+
+ err = pvr_fw_process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr);
+ if (err)
+ goto err_out;
+
+ boot_data = (struct rogue_riscv_fw_boot_data *)fw_data_ptr;
+
+ if (fw_mem->core_code_obj) {
+ boot_data->coremem_code_dev_vaddr = pvr_fw_obj_get_gpu_addr(fw_mem->core_code_obj);
+ pvr_fw_object_get_fw_addr(fw_mem->core_code_obj, &boot_data->coremem_code_fw_addr);
+ boot_data->coremem_code_size = pvr_fw_obj_get_object_size(fw_mem->core_code_obj);
+ }
+
+ if (fw_mem->core_data_obj) {
+ boot_data->coremem_data_dev_vaddr = pvr_fw_obj_get_gpu_addr(fw_mem->core_data_obj);
+ pvr_fw_object_get_fw_addr(fw_mem->core_data_obj, &boot_data->coremem_data_fw_addr);
+ boot_data->coremem_data_size = pvr_fw_obj_get_object_size(fw_mem->core_data_obj);
+ }
+
+ return 0;
+
+err_out:
+ return err;
+}
+
+static int
+pvr_riscv_init(struct pvr_device *pvr_dev)
+{
+ pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_RISCV_SHIFT, 0);
+
+ return 0;
+}
+
+static u32
+pvr_riscv_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
+{
+ u32 fw_addr = fw_obj->fw_addr_offset + offset;
+
+ /* RISC-V cacheability is determined by address. */
+ if (fw_obj->gem->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED)
+ fw_addr |= ROGUE_RISCVFW_REGION_BASE(SHARED_UNCACHED_DATA);
+ else
+ fw_addr |= ROGUE_RISCVFW_REGION_BASE(SHARED_CACHED_DATA);
+
+ return fw_addr;
+}
+
+static int
+pvr_riscv_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+
+ return pvr_vm_map(pvr_dev->kernel_vm_ctx, pvr_obj, 0, fw_obj->fw_mm_node.start,
+ pvr_gem_object_size(pvr_obj));
+}
+
+static void
+pvr_riscv_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+
+ pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj,
+ fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size);
+}
+
+static bool
+pvr_riscv_irq_pending(struct pvr_device *pvr_dev)
+{
+ return pvr_cr_read32(pvr_dev, ROGUE_CR_IRQ_OS0_EVENT_STATUS) &
+ ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN;
+}
+
+static void
+pvr_riscv_irq_clear(struct pvr_device *pvr_dev)
+{
+ pvr_cr_write32(pvr_dev, ROGUE_CR_IRQ_OS0_EVENT_CLEAR,
+ ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN);
+}
+
+const struct pvr_fw_defs pvr_fw_defs_riscv = {
+ .init = pvr_riscv_init,
+ .fw_process = pvr_riscv_fw_process,
+ .vm_map = pvr_riscv_vm_map,
+ .vm_unmap = pvr_riscv_vm_unmap,
+ .get_fw_addr_with_offset = pvr_riscv_get_fw_addr_with_offset,
+ .wrapper_init = pvr_riscv_wrapper_init,
+ .irq_pending = pvr_riscv_irq_pending,
+ .irq_clear = pvr_riscv_irq_clear,
+ .has_fixed_data_addr = false,
+};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_startstop.c b/drivers/gpu/drm/imagination/pvr_fw_startstop.c
index 36cec227cfe3..dcbb9903e791 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_startstop.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_startstop.c
@@ -49,6 +49,14 @@ rogue_bif_init(struct pvr_device *pvr_dev)
pvr_cr_write64(pvr_dev, BIF_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV),
pc_addr);
+
+ if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV) {
+ pc_addr = (((u64)pc_dma_addr >> ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT)
+ << ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT) &
+ ~ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK;
+
+ pvr_cr_write64(pvr_dev, FWCORE_MEM_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV), pc_addr);
+ }
}
static int
@@ -114,6 +122,9 @@ pvr_fw_start(struct pvr_device *pvr_dev)
(void)pvr_cr_read32(pvr_dev, ROGUE_CR_SYS_BUS_SECURE); /* Fence write */
}
+ if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV)
+ pvr_cr_write32(pvr_dev, ROGUE_CR_FWCORE_BOOT, 0);
+
/* Set Rogue in soft-reset. */
pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, soft_reset_mask);
if (has_reset2)
@@ -167,6 +178,12 @@ pvr_fw_start(struct pvr_device *pvr_dev)
/* ... and afterwards. */
udelay(3);
+ if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_RISCV) {
+ /* Boot the FW. */
+ pvr_cr_write32(pvr_dev, ROGUE_CR_FWCORE_BOOT, 1);
+ udelay(3);
+ }
+
return 0;
err_reset:
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index 5dbb636d7d4f..a1098b521485 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -21,7 +21,6 @@ tracebuf_ctrl_init(void *cpu_ptr, void *priv)
{
struct rogue_fwif_tracebuf *tracebuf_ctrl = cpu_ptr;
struct pvr_fw_trace *fw_trace = priv;
- u32 thread_nr;
tracebuf_ctrl->tracebuf_size_in_dwords = ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
tracebuf_ctrl->tracebuf_flags = 0;
@@ -31,7 +30,7 @@ tracebuf_ctrl_init(void *cpu_ptr, void *priv)
else
tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE;
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct rogue_fwif_tracebuf_space *tracebuf_space =
&tracebuf_ctrl->tracebuf[thread_nr];
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
@@ -48,10 +47,9 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev)
{
struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
struct drm_device *drm_dev = from_pvr_device(pvr_dev);
- u32 thread_nr;
int err;
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
trace_buffer->buf =
@@ -88,7 +86,7 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev)
BUILD_BUG_ON(ARRAY_SIZE(fw_trace->tracebuf_ctrl->tracebuf) !=
ARRAY_SIZE(fw_trace->buffers));
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct rogue_fwif_tracebuf_space *tracebuf_space =
&fw_trace->tracebuf_ctrl->tracebuf[thread_nr];
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
@@ -99,7 +97,7 @@ int pvr_fw_trace_init(struct pvr_device *pvr_dev)
return 0;
err_free_buf:
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
if (trace_buffer->buf)
@@ -112,9 +110,8 @@ err_free_buf:
void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
{
struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
- u32 thread_nr;
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj);
@@ -122,8 +119,6 @@ void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
pvr_fw_object_unmap_and_destroy(fw_trace->tracebuf_ctrl_obj);
}
-#if defined(CONFIG_DEBUG_FS)
-
/**
* update_logtype() - Send KCCB command to trigger FW to update logtype
* @pvr_dev: Target PowerVR device
@@ -184,9 +179,7 @@ struct pvr_fw_trace_seq_data {
static u32 find_sfid(u32 id)
{
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(stid_fmts); i++) {
+ for (u32 i = 0; i < ARRAY_SIZE(stid_fmts); i++) {
if (stid_fmts[i].id == id)
return i;
}
@@ -285,12 +278,11 @@ static void fw_trace_get_first(struct pvr_fw_trace_seq_data *trace_seq_data)
static void *fw_trace_seq_start(struct seq_file *s, loff_t *pos)
{
struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
- u32 i;
/* Reset trace index, then advance to *pos. */
fw_trace_get_first(trace_seq_data);
- for (i = 0; i < *pos; i++) {
+ for (u32 i = 0; i < *pos; i++) {
if (!fw_trace_get_next(trace_seq_data))
return NULL;
}
@@ -447,7 +439,7 @@ static const struct file_operations pvr_fw_trace_fops = {
void
pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask, u32 new_mask)
{
- if (old_mask != new_mask)
+ if (IS_ENABLED(CONFIG_DEBUG_FS) && old_mask != new_mask)
update_logtype(pvr_dev, new_mask);
}
@@ -455,12 +447,14 @@ void
pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
{
struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
- u32 thread_nr;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return;
static_assert(ARRAY_SIZE(fw_trace->buffers) <= 10,
"The filename buffer is only large enough for a single-digit thread count");
- for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) {
+ for (u32 thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) {
char filename[8];
snprintf(filename, ARRAY_SIZE(filename), "trace_%u", thread_nr);
@@ -469,4 +463,3 @@ pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
&pvr_fw_trace_fops);
}
}
-#endif
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.h b/drivers/gpu/drm/imagination/pvr_fw_trace.h
index 0074d2b18da0..1d0ef937427a 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.h
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.h
@@ -65,7 +65,6 @@ struct pvr_fw_trace {
int pvr_fw_trace_init(struct pvr_device *pvr_dev);
void pvr_fw_trace_fini(struct pvr_device *pvr_dev);
-#if defined(CONFIG_DEBUG_FS)
/* Forward declaration from <linux/dcache.h>. */
struct dentry;
@@ -73,6 +72,5 @@ void pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask,
u32 new_mask);
void pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir);
-#endif /* defined(CONFIG_DEBUG_FS) */
#endif /* PVR_FW_TRACE_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_util.c b/drivers/gpu/drm/imagination/pvr_fw_util.c
new file mode 100644
index 000000000000..377fe72d86b8
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_util.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2024 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+
+#include <linux/elf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+/**
+ * pvr_fw_process_elf_command_stream() - Process ELF firmware image and populate
+ * firmware sections
+ * @pvr_dev: Device pointer.
+ * @fw: Pointer to firmware image.
+ * @fw_code_ptr: Pointer to FW code section.
+ * @fw_data_ptr: Pointer to FW data section.
+ * @fw_core_code_ptr: Pointer to FW coremem code section.
+ * @fw_core_data_ptr: Pointer to FW coremem data section.
+ *
+ * Returns :
+ * * 0 on success, or
+ * * -EINVAL on any error in ELF command stream.
+ */
+int
+pvr_fw_process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw,
+ u8 *fw_code_ptr, u8 *fw_data_ptr,
+ u8 *fw_core_code_ptr, u8 *fw_core_data_ptr)
+{
+ struct elf32_hdr *header = (struct elf32_hdr *)fw;
+ struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff);
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ int err;
+
+ for (u32 entry = 0; entry < header->e_phnum; entry++, program_header++) {
+ void *write_addr;
+
+ /* Only consider loadable entries in the ELF segment table */
+ if (program_header->p_type != PT_LOAD)
+ continue;
+
+ err = pvr_fw_find_mmu_segment(pvr_dev, program_header->p_vaddr,
+ program_header->p_memsz, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr, &write_addr);
+ if (err) {
+ drm_err(drm_dev,
+ "Addr 0x%x (size: %d) not found in any firmware segment",
+ program_header->p_vaddr, program_header->p_memsz);
+ return err;
+ }
+
+ /* Write to FW allocation only if available */
+ if (write_addr) {
+ memcpy(write_addr, fw + program_header->p_offset,
+ program_header->p_filesz);
+
+ memset((u8 *)write_addr + program_header->p_filesz, 0,
+ program_header->p_memsz - program_header->p_filesz);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c
index 6a8c81fe8c1e..a66cf082af24 100644
--- a/drivers/gpu/drm/imagination/pvr_gem.c
+++ b/drivers/gpu/drm/imagination/pvr_gem.c
@@ -19,6 +19,7 @@
#include <linux/log2.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
+#include <linux/property.h>
#include <linux/refcount.h>
#include <linux/scatterlist.h>
@@ -76,8 +77,6 @@ pvr_gem_object_flags_validate(u64 flags)
DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS),
};
- int i;
-
/*
* Check for bits set in undefined regions. Reserved regions refer to
* options that can only be set by the kernel. These are explicitly
@@ -91,7 +90,7 @@ pvr_gem_object_flags_validate(u64 flags)
* Check for all combinations of flags marked as invalid in the array
* above.
*/
- for (i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) {
+ for (int i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) {
u64 combo = invalid_combinations[i];
if ((flags & combo) == combo)
@@ -203,7 +202,7 @@ pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj)
dma_resv_lock(obj->resv, NULL);
- err = drm_gem_shmem_vmap(shmem_obj, &map);
+ err = drm_gem_shmem_vmap_locked(shmem_obj, &map);
if (err)
goto err_unlock;
@@ -257,7 +256,7 @@ pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj)
dma_sync_sgtable_for_device(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
}
- drm_gem_shmem_vunmap(shmem_obj, &map);
+ drm_gem_shmem_vunmap_locked(shmem_obj, &map);
dma_resv_unlock(obj->resv);
}
@@ -336,6 +335,7 @@ struct drm_gem_object *pvr_gem_create_object(struct drm_device *drm_dev, size_t
struct pvr_gem_object *
pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
struct drm_gem_shmem_object *shmem_obj;
struct pvr_gem_object *pvr_obj;
struct sg_table *sgt;
@@ -345,7 +345,10 @@ pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
if (size == 0 || !pvr_gem_object_flags_validate(flags))
return ERR_PTR(-EINVAL);
- shmem_obj = drm_gem_shmem_create(from_pvr_device(pvr_dev), size);
+ if (device_get_dma_attr(drm_dev->dev) == DEV_DMA_COHERENT)
+ flags |= PVR_BO_CPU_CACHED;
+
+ shmem_obj = drm_gem_shmem_create(drm_dev, size);
if (IS_ERR(shmem_obj))
return ERR_CAST(shmem_obj);
@@ -360,8 +363,7 @@ pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
goto err_shmem_object_free;
}
- dma_sync_sgtable_for_device(shmem_obj->base.dev->dev, sgt,
- DMA_BIDIRECTIONAL);
+ dma_sync_sgtable_for_device(drm_dev->dev, sgt, DMA_BIDIRECTIONAL);
/*
* Do this last because pvr_gem_object_zero() requires a fully
diff --git a/drivers/gpu/drm/imagination/pvr_gem.h b/drivers/gpu/drm/imagination/pvr_gem.h
index e0e5ea509a2e..c99f30cc6208 100644
--- a/drivers/gpu/drm/imagination/pvr_gem.h
+++ b/drivers/gpu/drm/imagination/pvr_gem.h
@@ -44,8 +44,10 @@ struct pvr_file;
* Bits not defined anywhere are "undefined".
*
* CPU mapping options
- * :PVR_BO_CPU_CACHED: By default, all GEM objects are mapped write-combined on the CPU. Set this
- * flag to override this behaviour and map the object cached.
+ * :PVR_BO_CPU_CACHED: By default, all GEM objects are mapped write-combined on the CPU. Set
+ * this flag to override this behaviour and map the object cached. If the dma_coherent
+ * property is present in devicetree, all allocations will be mapped as if this flag was set.
+ * This does not require any additional consideration at allocation time.
*
* Firmware options
* :PVR_BO_FW_NO_CLEAR_ON_RESET: By default, all FW objects are cleared and reinitialised on hard
diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.c b/drivers/gpu/drm/imagination/pvr_hwrt.c
index 54f88d6c01e5..dc0c25fa1847 100644
--- a/drivers/gpu/drm/imagination/pvr_hwrt.c
+++ b/drivers/gpu/drm/imagination/pvr_hwrt.c
@@ -44,13 +44,12 @@ hwrt_init_kernel_structure(struct pvr_file *pvr_file,
{
struct pvr_device *pvr_dev = pvr_file->pvr_dev;
int err;
- int i;
hwrt->pvr_dev = pvr_dev;
hwrt->max_rts = args->layers;
/* Get pointers to the free lists */
- for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
hwrt->free_lists[i] = pvr_free_list_lookup(pvr_file, args->free_list_handles[i]);
if (!hwrt->free_lists[i]) {
err = -EINVAL;
@@ -67,7 +66,7 @@ hwrt_init_kernel_structure(struct pvr_file *pvr_file,
return 0;
err_put_free_lists:
- for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
pvr_free_list_put(hwrt->free_lists[i]);
hwrt->free_lists[i] = NULL;
}
@@ -78,9 +77,7 @@ err_put_free_lists:
static void
hwrt_fini_kernel_structure(struct pvr_hwrt_dataset *hwrt)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ for (int i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
pvr_free_list_put(hwrt->free_lists[i]);
hwrt->free_lists[i] = NULL;
}
@@ -363,13 +360,12 @@ hwrt_data_init_fw_structure(struct pvr_file *pvr_file,
struct drm_pvr_create_hwrt_geom_data_args *geom_data_args = &args->geom_data_args;
struct pvr_device *pvr_dev = pvr_file->pvr_dev;
struct rogue_fwif_rta_ctl *rta_ctl;
- int free_list_i;
int err;
pvr_fw_object_get_fw_addr(hwrt->common_fw_obj,
&hwrt_data->data.hwrt_data_common_fw_addr);
- for (free_list_i = 0; free_list_i < ARRAY_SIZE(hwrt->free_lists); free_list_i++) {
+ for (int free_list_i = 0; free_list_i < ARRAY_SIZE(hwrt->free_lists); free_list_i++) {
pvr_fw_object_get_fw_addr(hwrt->free_lists[free_list_i]->fw_obj,
&hwrt_data->data.freelists_fw_addr[free_list_i]);
}
diff --git a/drivers/gpu/drm/imagination/pvr_mmu.c b/drivers/gpu/drm/imagination/pvr_mmu.c
index 4fe70610ed94..450d476d183f 100644
--- a/drivers/gpu/drm/imagination/pvr_mmu.c
+++ b/drivers/gpu/drm/imagination/pvr_mmu.c
@@ -17,6 +17,7 @@
#include <linux/dma-mapping.h>
#include <linux/kmemleak.h>
#include <linux/minmax.h>
+#include <linux/property.h>
#include <linux/sizes.h>
#define PVR_SHIFT_FROM_SIZE(size_) (__builtin_ctzll(size_))
@@ -259,6 +260,7 @@ pvr_mmu_backing_page_init(struct pvr_mmu_backing_page *page,
struct device *dev = from_pvr_device(pvr_dev)->dev;
struct page *raw_page;
+ pgprot_t prot;
int err;
dma_addr_t dma_addr;
@@ -268,7 +270,11 @@ pvr_mmu_backing_page_init(struct pvr_mmu_backing_page *page,
if (!raw_page)
return -ENOMEM;
- host_ptr = vmap(&raw_page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ prot = PAGE_KERNEL;
+ if (device_get_dma_attr(dev) != DEV_DMA_COHERENT)
+ prot = pgprot_writecombine(prot);
+
+ host_ptr = vmap(&raw_page, 1, VM_MAP, prot);
if (!host_ptr) {
err = -ENOMEM;
goto err_free_page;
diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c
index ba7816fd28ec..41f5d89e78b8 100644
--- a/drivers/gpu/drm/imagination/pvr_power.c
+++ b/drivers/gpu/drm/imagination/pvr_power.c
@@ -10,11 +10,15 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -252,6 +256,8 @@ pvr_power_device_suspend(struct device *dev)
clk_disable_unprepare(pvr_dev->sys_clk);
clk_disable_unprepare(pvr_dev->core_clk);
+ err = reset_control_assert(pvr_dev->reset);
+
err_drm_dev_exit:
drm_dev_exit(idx);
@@ -282,16 +288,33 @@ pvr_power_device_resume(struct device *dev)
if (err)
goto err_sys_clk_disable;
+ /*
+ * According to the hardware manual, a delay of at least 32 clock
+ * cycles is required between de-asserting the clkgen reset and
+ * de-asserting the GPU reset. Assuming a worst-case scenario with
+ * a very high GPU clock frequency, a delay of 1 microsecond is
+ * sufficient to ensure this requirement is met across all
+ * feasible GPU clock speeds.
+ */
+ udelay(1);
+
+ err = reset_control_deassert(pvr_dev->reset);
+ if (err)
+ goto err_mem_clk_disable;
+
if (pvr_dev->fw_dev.booted) {
err = pvr_power_fw_enable(pvr_dev);
if (err)
- goto err_mem_clk_disable;
+ goto err_reset_assert;
}
drm_dev_exit(idx);
return 0;
+err_reset_assert:
+ reset_control_assert(pvr_dev->reset);
+
err_mem_clk_disable:
clk_disable_unprepare(pvr_dev->mem_clk);
@@ -431,3 +454,114 @@ pvr_watchdog_fini(struct pvr_device *pvr_dev)
{
cancel_delayed_work_sync(&pvr_dev->watchdog.work);
}
+
+int pvr_power_domains_init(struct pvr_device *pvr_dev)
+{
+ struct device *dev = from_pvr_device(pvr_dev)->dev;
+
+ struct device_link **domain_links __free(kfree) = NULL;
+ struct device **domain_devs __free(kfree) = NULL;
+ int domain_count;
+ int link_count;
+
+ char dev_name[2] = "a";
+ int err;
+ int i;
+
+ domain_count = of_count_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells");
+ if (domain_count < 0)
+ return domain_count;
+
+ if (domain_count <= 1)
+ return 0;
+
+ link_count = domain_count + (domain_count - 1);
+
+ domain_devs = kcalloc(domain_count, sizeof(*domain_devs), GFP_KERNEL);
+ if (!domain_devs)
+ return -ENOMEM;
+
+ domain_links = kcalloc(link_count, sizeof(*domain_links), GFP_KERNEL);
+ if (!domain_links)
+ return -ENOMEM;
+
+ for (i = 0; i < domain_count; i++) {
+ struct device *domain_dev;
+
+ dev_name[0] = 'a' + i;
+ domain_dev = dev_pm_domain_attach_by_name(dev, dev_name);
+ if (IS_ERR_OR_NULL(domain_dev)) {
+ err = domain_dev ? PTR_ERR(domain_dev) : -ENODEV;
+ goto err_detach;
+ }
+
+ domain_devs[i] = domain_dev;
+ }
+
+ for (i = 0; i < domain_count; i++) {
+ struct device_link *link;
+
+ link = device_link_add(dev, domain_devs[i], DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
+ if (!link) {
+ err = -ENODEV;
+ goto err_unlink;
+ }
+
+ domain_links[i] = link;
+ }
+
+ for (i = domain_count; i < link_count; i++) {
+ struct device_link *link;
+
+ link = device_link_add(domain_devs[i - domain_count + 1],
+ domain_devs[i - domain_count],
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
+ if (!link) {
+ err = -ENODEV;
+ goto err_unlink;
+ }
+
+ domain_links[i] = link;
+ }
+
+ pvr_dev->power = (struct pvr_device_power){
+ .domain_devs = no_free_ptr(domain_devs),
+ .domain_links = no_free_ptr(domain_links),
+ .domain_count = domain_count,
+ };
+
+ return 0;
+
+err_unlink:
+ while (--i >= 0)
+ device_link_del(domain_links[i]);
+
+ i = domain_count;
+
+err_detach:
+ while (--i >= 0)
+ dev_pm_domain_detach(domain_devs[i], true);
+
+ return err;
+}
+
+void pvr_power_domains_fini(struct pvr_device *pvr_dev)
+{
+ const int domain_count = pvr_dev->power.domain_count;
+
+ int i = domain_count + (domain_count - 1);
+
+ while (--i >= 0)
+ device_link_del(pvr_dev->power.domain_links[i]);
+
+ i = domain_count;
+
+ while (--i >= 0)
+ dev_pm_domain_detach(pvr_dev->power.domain_devs[i], true);
+
+ kfree(pvr_dev->power.domain_links);
+ kfree(pvr_dev->power.domain_devs);
+
+ pvr_dev->power = (struct pvr_device_power){ 0 };
+}
diff --git a/drivers/gpu/drm/imagination/pvr_power.h b/drivers/gpu/drm/imagination/pvr_power.h
index 9a9312dcb2da..ada85674a7ca 100644
--- a/drivers/gpu/drm/imagination/pvr_power.h
+++ b/drivers/gpu/drm/imagination/pvr_power.h
@@ -38,4 +38,7 @@ pvr_power_put(struct pvr_device *pvr_dev)
return pm_runtime_put(drm_dev->dev);
}
+int pvr_power_domains_init(struct pvr_device *pvr_dev);
+void pvr_power_domains_fini(struct pvr_device *pvr_dev);
+
#endif /* PVR_POWER_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
index 2a90d02796d3..790c97f80a2a 100644
--- a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
+++ b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
@@ -827,6 +827,120 @@
#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK 0xFFFFFFFEU
#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_EN 0x00000001U
+/* Register ROGUE_CR_EVENT_CLEAR */
+#define ROGUE_CR_EVENT_CLEAR 0x0138U
+#define ROGUE_CR_EVENT_CLEAR__ROGUEXE__MASKFULL 0x00000000E01DFFFFULL
+#define ROGUE_CR_EVENT_CLEAR__SIGNALS__MASKFULL 0x00000000E007FFFFULL
+#define ROGUE_CR_EVENT_CLEAR_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_SHIFT 31U
+#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_CLRMSK 0x7FFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_EN 0x80000000U
+#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_SHIFT 30U
+#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_TDM_BUFFER_STALL_EN 0x40000000U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_SHIFT 29U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_EN 0x20000000U
+#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_SHIFT 28U
+#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_EN 0x10000000U
+#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_SHIFT 27U
+#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_EN 0x08000000U
+#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_SHIFT 26U
+#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_CLRMSK 0xFBFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_EN 0x04000000U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_SHIFT 25U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_CLRMSK 0xFDFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC3_FINISHED_EN 0x02000000U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_SHIFT 24U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_CLRMSK 0xFEFFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC2_FINISHED_EN 0x01000000U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_SHIFT 23U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_CLRMSK 0xFF7FFFFFU
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC1_FINISHED_EN 0x00800000U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_SHIFT 22U
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_FBA_FC0_FINISHED_EN 0x00400000U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_SHIFT 21U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_CLRMSK 0xFFDFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC3_FINISHED_EN 0x00200000U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_SHIFT 20U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC2_FINISHED_EN 0x00100000U
+#define ROGUE_CR_EVENT_CLEAR_SAFETY_SHIFT 20U
+#define ROGUE_CR_EVENT_CLEAR_SAFETY_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_EVENT_CLEAR_SAFETY_EN 0x00100000U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_SHIFT 19U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC1_FINISHED_EN 0x00080000U
+#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_SHIFT 19U
+#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_EVENT_CLEAR_SLAVE_REQ_EN 0x00080000U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_SHIFT 18U
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_EVENT_CLEAR_RDM_FC0_FINISHED_EN 0x00040000U
+#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_SHIFT 18U
+#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_EN 0x00040000U
+#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_SHIFT 17U
+#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_EVENT_CLEAR_SHG_FINISHED_EN 0x00020000U
+#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_SHIFT 17U
+#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_EN 0x00020000U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_SHIFT 16U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_EN 0x00010000U
+#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_SHIFT 15U
+#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_CLRMSK 0xFFFF7FFFU
+#define ROGUE_CR_EVENT_CLEAR_USC_TRIGGER_EN 0x00008000U
+#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_SHIFT 14U
+#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_CLRMSK 0xFFFFBFFFU
+#define ROGUE_CR_EVENT_CLEAR_ZLS_FINISHED_EN 0x00004000U
+#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_SHIFT 13U
+#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_CLRMSK 0xFFFFDFFFU
+#define ROGUE_CR_EVENT_CLEAR_GPIO_ACK_EN 0x00002000U
+#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_SHIFT 12U
+#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_CLRMSK 0xFFFFEFFFU
+#define ROGUE_CR_EVENT_CLEAR_GPIO_REQ_EN 0x00001000U
+#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_SHIFT 11U
+#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_CLRMSK 0xFFFFF7FFU
+#define ROGUE_CR_EVENT_CLEAR_POWER_ABORT_EN 0x00000800U
+#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_SHIFT 10U
+#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_CLRMSK 0xFFFFFBFFU
+#define ROGUE_CR_EVENT_CLEAR_POWER_COMPLETE_EN 0x00000400U
+#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_SHIFT 9U
+#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_EVENT_CLEAR_MMU_PAGE_FAULT_EN 0x00000200U
+#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_SHIFT 8U
+#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_EVENT_CLEAR_PM_3D_MEM_FREE_EN 0x00000100U
+#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_SHIFT 7U
+#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_EN 0x00000080U
+#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_SHIFT 6U
+#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_EVENT_CLEAR_TA_TERMINATE_EN 0x00000040U
+#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_SHIFT 5U
+#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_EVENT_CLEAR_TA_FINISHED_EN 0x00000020U
+#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_SHIFT 4U
+#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_EVENT_CLEAR_ISP_END_MACROTILE_EN 0x00000010U
+#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_SHIFT 3U
+#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_EVENT_CLEAR_PIXELBE_END_RENDER_EN 0x00000008U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_SHIFT 2U
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_EVENT_CLEAR_COMPUTE_FINISHED_EN 0x00000004U
+#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_SHIFT 1U
+#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_EVENT_CLEAR_KERNEL_FINISHED_EN 0x00000002U
+#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_SHIFT 0U
+#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_EVENT_CLEAR_TLA_COMPLETE_EN 0x00000001U
+
/* Register ROGUE_CR_TIMER */
#define ROGUE_CR_TIMER 0x0160U
#define ROGUE_CR_TIMER_MASKFULL 0x8000FFFFFFFFFFFFULL
@@ -6031,25 +6145,6 @@
#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT 0U
#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U
-/* Register ROGUE_CR_ECC_RAM_ERR_INJ */
-#define ROGUE_CR_ECC_RAM_ERR_INJ 0xF340U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_MASKFULL 0x000000000000001FULL
-#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_SHIFT 4U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU
-#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN 0x00000010U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_SHIFT 3U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_CLRMSK 0xFFFFFFF7U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_EN 0x00000008U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_SHIFT 2U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU
-#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN 0x00000004U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_SHIFT 1U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_CLRMSK 0xFFFFFFFDU
-#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_EN 0x00000002U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_SHIFT 0U
-#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_CLRMSK 0xFFFFFFFEU
-#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_EN 0x00000001U
-
/* Register ROGUE_CR_ECC_RAM_INIT_KICK */
#define ROGUE_CR_ECC_RAM_INIT_KICK 0xF348U
#define ROGUE_CR_ECC_RAM_INIT_KICK_MASKFULL 0x000000000000001FULL
@@ -6163,6 +6258,26 @@
#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
+/* Register ROGUE_CR_FAULT_FW_STATUS */
+#define ROGUE_CR_FAULT_FW_STATUS 0xF3B0U
+#define ROGUE_CR_FAULT_FW_STATUS_MASKFULL 0x0000000000010001ULL
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_SHIFT 16U
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_CORRECT_EN 0x00010000U
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_SHIFT 0U
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FAULT_FW_STATUS_CPU_DETECT_EN 0x00000001U
+
+/* Register ROGUE_CR_FAULT_FW_CLEAR */
+#define ROGUE_CR_FAULT_FW_CLEAR 0xF3B8U
+#define ROGUE_CR_FAULT_FW_CLEAR_MASKFULL 0x0000000000010001ULL
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_SHIFT 16U
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_CORRECT_EN 0x00010000U
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_SHIFT 0U
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FAULT_FW_CLEAR_CPU_DETECT_EN 0x00000001U
+
/* Register ROGUE_CR_MTS_SAFETY_EVENT_ENABLE */
#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE 0xF3D8U
#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL 0x000000000000007FULL
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_riscv.h b/drivers/gpu/drm/imagination/pvr_rogue_riscv.h
new file mode 100644
index 000000000000..9a070e24fa6a
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_riscv.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2024 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_RISCV_H
+#define PVR_ROGUE_RISCV_H
+
+#include "pvr_rogue_cr_defs.h"
+
+#include <linux/bitops.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#define ROGUE_RISCVFW_REGION_SIZE SZ_256M
+#define ROGUE_RISCVFW_REGION_SHIFT __ffs(ROGUE_RISCVFW_REGION_SIZE)
+
+enum rogue_riscvfw_region {
+ ROGUE_RISCV_REGION__RESERVED_0 = 0,
+ ROGUE_RISCV_REGION__RESERVED_1,
+ ROGUE_RISCV_REGION_SOCIF,
+ ROGUE_RISCV_REGION__RESERVED_3,
+ ROGUE_RISCV_REGION__RESERVED_4,
+ ROGUE_RISCV_REGION_BOOTLDR_DATA,
+ ROGUE_RISCV_REGION_SHARED_CACHED_DATA,
+ ROGUE_RISCV_REGION__RESERVED_7,
+ ROGUE_RISCV_REGION_COREMEM,
+ ROGUE_RISCV_REGION__RESERVED_9,
+ ROGUE_RISCV_REGION__RESERVED_A,
+ ROGUE_RISCV_REGION__RESERVED_B,
+ ROGUE_RISCV_REGION_BOOTLDR_CODE,
+ ROGUE_RISCV_REGION_SHARED_UNCACHED_DATA,
+ ROGUE_RISCV_REGION__RESERVED_E,
+ ROGUE_RISCV_REGION__RESERVED_F,
+
+ ROGUE_RISCV_REGION__COUNT,
+};
+
+#define ROGUE_RISCVFW_REGION_BASE(r) ((u32)(ROGUE_RISCV_REGION_##r) << ROGUE_RISCVFW_REGION_SHIFT)
+#define ROGUE_RISCVFW_REGION_REMAP_CR(r) \
+ (ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0 + (u32)(ROGUE_RISCV_REGION_##r) * 8U)
+
+#endif /* PVR_ROGUE_RISCV_H */
diff --git a/drivers/gpu/drm/imagination/pvr_stream.c b/drivers/gpu/drm/imagination/pvr_stream.c
index 975336a4facf..679aa618b7a9 100644
--- a/drivers/gpu/drm/imagination/pvr_stream.c
+++ b/drivers/gpu/drm/imagination/pvr_stream.c
@@ -67,9 +67,8 @@ pvr_stream_process_1(struct pvr_device *pvr_dev, const struct pvr_stream_def *st
u8 *dest, u32 dest_size, u32 *stream_offset_out)
{
int err = 0;
- u32 i;
- for (i = 0; i < nr_entries; i++) {
+ for (u32 i = 0; i < nr_entries; i++) {
if (stream_def[i].offset >= dest_size) {
err = -EINVAL;
break;
@@ -131,7 +130,6 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
u32 musthave_masks[PVR_STREAM_EXTHDR_TYPE_MAX];
u32 ext_header;
int err = 0;
- u32 i;
/* Copy "must have" mask from device. We clear this as we process the stream. */
memcpy(musthave_masks, pvr_dev->stream_musthave_quirks[cmd_defs->type],
@@ -159,7 +157,7 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
musthave_masks[type] &= ~data;
- for (i = 0; i < header->ext_streams_num; i++) {
+ for (u32 i = 0; i < header->ext_streams_num; i++) {
const struct pvr_stream_ext_def *ext_def = &header->ext_streams[i];
if (!(ext_header & ext_def->header_mask))
@@ -181,7 +179,7 @@ pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
* Verify that "must have" mask is now zero. If it isn't then one of the "must have" quirks
* for this command was not present.
*/
- for (i = 0; i < cmd_defs->ext_nr_headers; i++) {
+ for (u32 i = 0; i < cmd_defs->ext_nr_headers; i++) {
if (musthave_masks[i])
return -EINVAL;
}
@@ -245,13 +243,11 @@ pvr_stream_process(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs
if (err)
return err;
} else {
- u32 i;
-
/*
* If we don't have an extension stream then there must not be any "must have"
* quirks for this command.
*/
- for (i = 0; i < cmd_defs->ext_nr_headers; i++) {
+ for (u32 i = 0; i < cmd_defs->ext_nr_headers; i++) {
if (pvr_dev->stream_musthave_quirks[cmd_defs->type][i])
return -EINVAL;
}
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c
index 94af854547d6..5847a1c92bea 100644
--- a/drivers/gpu/drm/imagination/pvr_vm_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c
@@ -100,10 +100,9 @@ pvr_vm_mips_fini(struct pvr_device *pvr_dev)
{
struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
- int page_nr;
vunmap(mips_data->pt);
- for (page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) {
+ for (int page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) {
dma_unmap_page(from_pvr_device(pvr_dev)->dev,
mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 9e66eb77b1eb..6d8325c76697 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -162,11 +162,12 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
}
static int imx_pd_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
- return drm_bridge_attach(bridge->encoder, imxpd->next_bridge, bridge, flags);
+ return drm_bridge_attach(encoder, imxpd->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 20b93fff0239..f851e9ffdb28 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -791,11 +791,12 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
}
static int ingenic_drm_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
- struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(bridge->encoder);
+ struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(encoder);
- return drm_bridge_attach(bridge->encoder, ib->next_bridge,
+ return drm_bridge_attach(encoder, ib->next_bridge,
&ib->bridge, flags);
}
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 9bb997dbb4b9..5deec673c11e 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -47,7 +47,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
}
bo->base.pages = pages;
- bo->base.pages_use_count = 1;
+ refcount_set(&bo->base.pages_use_count, 1);
mapping_set_unevictable(mapping);
}
@@ -195,7 +195,7 @@ static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
if (bo->heap_size)
return -EINVAL;
- return drm_gem_shmem_vmap(&bo->base, map);
+ return drm_gem_shmem_vmap_locked(&bo->base, map);
}
static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 825135a26aa4..7934098e651b 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -371,7 +371,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
} else {
buffer_chunk->size = lima_bo_size(bo);
- ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
+ ret = drm_gem_vmap(&bo->base.base, &map);
if (ret) {
kvfree(et);
goto out;
@@ -379,7 +379,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
- drm_gem_vunmap_unlocked(&bo->base.base, &map);
+ drm_gem_vunmap(&bo->base.base, &map);
}
buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
diff --git a/drivers/gpu/drm/loongson/Kconfig b/drivers/gpu/drm/loongson/Kconfig
index 552edfec7afb..d739d51cf54c 100644
--- a/drivers/gpu/drm/loongson/Kconfig
+++ b/drivers/gpu/drm/loongson/Kconfig
@@ -2,7 +2,7 @@
config DRM_LOONGSON
tristate "DRM support for Loongson Graphics"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on LOONGARCH || MIPS || COMPILE_TEST
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 395449a72f0a..a3423459dd7a 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -1048,6 +1048,7 @@ void mcde_dsi_disable(struct drm_bridge *bridge)
}
static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
@@ -1059,7 +1060,7 @@ static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
}
/* Attach the DSI bridge to the output (panel etc) bridge */
- return drm_bridge_attach(bridge->encoder, d->bridge_out, bridge, flags);
+ return drm_bridge_attach(encoder, d->bridge_out, bridge, flags);
}
static const struct drm_bridge_funcs mcde_dsi_bridge_funcs = {
@@ -1137,7 +1138,6 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
d->bridge_out = bridge;
/* Create a bridge for this DSI channel */
- d->bridge.funcs = &mcde_dsi_bridge_funcs;
d->bridge.of_node = dev->of_node;
drm_bridge_add(&d->bridge);
@@ -1173,9 +1173,9 @@ static int mcde_dsi_probe(struct platform_device *pdev)
u32 dsi_id;
int ret;
- d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
- if (!d)
- return -ENOMEM;
+ d = devm_drm_bridge_alloc(dev, struct mcde_dsi, bridge, &mcde_dsi_bridge_funcs);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
d->dev = dev;
platform_set_drvdata(pdev, d);
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 32a2ed6c0cfe..43afd0a26d14 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -21,10 +21,8 @@ mediatek-drm-y := mtk_crtc.o \
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
-mediatek-drm-hdmi-objs := mtk_cec.o \
- mtk_hdmi.o \
- mtk_hdmi_ddc.o
-
-obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o
+obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_cec.o
+obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_hdmi.o
+obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_hdmi_ddc.o
obj-$(CONFIG_DRM_MEDIATEK_DP) += mtk_dp.o
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index b42c0d87eba3..c7be530ca041 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -12,7 +12,6 @@
#include <linux/platform_device.h>
#include "mtk_cec.h"
-#include "mtk_hdmi.h"
#include "mtk_drm_drv.h"
#define TR_CONFIG 0x00
@@ -102,6 +101,7 @@ void mtk_cec_set_hpd_event(struct device *dev,
cec->hpd_event = hpd_event;
spin_unlock_irqrestore(&cec->lock, flags);
}
+EXPORT_SYMBOL_NS_GPL(mtk_cec_set_hpd_event, "DRM_MTK_HDMI_V1");
bool mtk_cec_hpd_high(struct device *dev)
{
@@ -112,6 +112,7 @@ bool mtk_cec_hpd_high(struct device *dev)
return (status & (HDMI_PORD | HDMI_HTPLG)) == (HDMI_PORD | HDMI_HTPLG);
}
+EXPORT_SYMBOL_NS_GPL(mtk_cec_hpd_high, "DRM_MTK_HDMI_V1");
static void mtk_cec_htplg_irq_init(struct mtk_cec *cec)
{
@@ -247,3 +248,7 @@ struct platform_driver mtk_cec_driver = {
.of_match_table = mtk_cec_of_ids,
},
};
+module_platform_driver(mtk_cec_driver);
+
+MODULE_DESCRIPTION("MediaTek HDMI CEC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index fa0e95dd29a0..fe97bb97e004 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -492,11 +492,6 @@ static const struct of_device_id mtk_ovl_adaptor_comp_dt_ids[] = {
{ /* sentinel */ }
};
-static int compare_of(struct device *dev, void *data)
-{
- return dev->of_node == data;
-}
-
static int ovl_adaptor_of_get_ddp_comp_type(struct device_node *node,
enum mtk_ovl_adaptor_comp_type *ctype)
{
@@ -567,7 +562,7 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma
priv->ovl_adaptor_comp[id] = &comp_pdev->dev;
- drm_of_component_match_add(dev, match, compare_of, node);
+ drm_of_component_match_add(dev, match, component_compare_of, node);
dev_dbg(dev, "Adding component match for %pOF\n", node);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index fed3307d3374..58279ddaab3c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2287,6 +2287,7 @@ static void mtk_dp_poweroff(struct mtk_dp *mtk_dp)
}
static int mtk_dp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
@@ -2310,7 +2311,7 @@ static int mtk_dp_bridge_attach(struct drm_bridge *bridge,
goto err_aux_register;
if (mtk_dp->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, mtk_dp->next_bridge,
+ ret = drm_bridge_attach(encoder, mtk_dp->next_bridge,
&mtk_dp->bridge, flags);
if (ret) {
drm_warn(mtk_dp->drm_dev,
@@ -2568,7 +2569,7 @@ static const struct drm_bridge_funcs mtk_dp_bridge_funcs = {
static void mtk_dp_debounce_timer(struct timer_list *t)
{
- struct mtk_dp *mtk_dp = from_timer(mtk_dp, t, debounce_timer);
+ struct mtk_dp *mtk_dp = timer_container_of(mtk_dp, t, debounce_timer);
mtk_dp->need_debounce = true;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 0fd13e6dd3f1..6fb85bc6487a 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -59,7 +59,8 @@ enum mtk_dpi_out_channel_swap {
enum mtk_dpi_out_color_format {
MTK_DPI_COLOR_FORMAT_RGB,
- MTK_DPI_COLOR_FORMAT_YCBCR_422
+ MTK_DPI_COLOR_FORMAT_YCBCR_422,
+ MTK_DPI_COLOR_FORMAT_YCBCR_444
};
struct mtk_dpi {
@@ -450,9 +451,16 @@ static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi)
static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
enum mtk_dpi_out_color_format format)
{
- mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
+ mtk_dpi_config_channel_swap(dpi, dpi->channel_swap);
- if (format == MTK_DPI_COLOR_FORMAT_YCBCR_422) {
+ switch (format) {
+ case MTK_DPI_COLOR_FORMAT_YCBCR_444:
+ mtk_dpi_config_yuv422_enable(dpi, false);
+ mtk_dpi_config_csc_enable(dpi, true);
+ if (dpi->conf->swap_input_support)
+ mtk_dpi_config_swap_input(dpi, false);
+ break;
+ case MTK_DPI_COLOR_FORMAT_YCBCR_422:
mtk_dpi_config_yuv422_enable(dpi, true);
mtk_dpi_config_csc_enable(dpi, true);
@@ -463,11 +471,14 @@ static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
mtk_dpi_mask(dpi, DPI_MATRIX_SET, dpi->mode.hdisplay <= 720 ?
MATRIX_SEL_RGB_TO_BT601 : MATRIX_SEL_RGB_TO_JPEG,
INT_MATRIX_SEL_MASK);
- } else {
+ break;
+ default:
+ case MTK_DPI_COLOR_FORMAT_RGB:
mtk_dpi_config_yuv422_enable(dpi, false);
mtk_dpi_config_csc_enable(dpi, false);
if (dpi->conf->swap_input_support)
mtk_dpi_config_swap_input(dpi, false);
+ break;
}
}
@@ -734,6 +745,65 @@ static u32 *mtk_dpi_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
return input_fmts;
}
+static unsigned int mtk_dpi_bus_fmt_bit_num(unsigned int out_bus_format)
+{
+ switch (out_bus_format) {
+ default:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ return MTK_DPI_OUT_BIT_NUM_8BITS;
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ return MTK_DPI_OUT_BIT_NUM_10BITS;
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ return MTK_DPI_OUT_BIT_NUM_12BITS;
+ }
+}
+
+static unsigned int mtk_dpi_bus_fmt_channel_swap(unsigned int out_bus_format)
+{
+ switch (out_bus_format) {
+ default:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ return MTK_DPI_OUT_CHANNEL_SWAP_RGB;
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ return MTK_DPI_OUT_CHANNEL_SWAP_BGR;
+ }
+}
+
+static unsigned int mtk_dpi_bus_fmt_color_format(unsigned int out_bus_format)
+{
+ switch (out_bus_format) {
+ default:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ return MTK_DPI_COLOR_FORMAT_RGB;
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ return MTK_DPI_COLOR_FORMAT_YCBCR_422;
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ return MTK_DPI_COLOR_FORMAT_YCBCR_444;
+ }
+}
+
static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
@@ -753,18 +823,16 @@ static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge,
bridge_state->output_bus_cfg.format);
dpi->output_fmt = out_bus_format;
- dpi->bit_num = MTK_DPI_OUT_BIT_NUM_8BITS;
- dpi->channel_swap = MTK_DPI_OUT_CHANNEL_SWAP_RGB;
+ dpi->bit_num = mtk_dpi_bus_fmt_bit_num(out_bus_format);
+ dpi->channel_swap = mtk_dpi_bus_fmt_channel_swap(out_bus_format);
dpi->yc_map = MTK_DPI_OUT_YC_MAP_RGB;
- if (out_bus_format == MEDIA_BUS_FMT_YUYV8_1X16)
- dpi->color_format = MTK_DPI_COLOR_FORMAT_YCBCR_422;
- else
- dpi->color_format = MTK_DPI_COLOR_FORMAT_RGB;
+ dpi->color_format = mtk_dpi_bus_fmt_color_format(out_bus_format);
return 0;
}
static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_dpi *dpi = bridge_to_dpi(bridge);
@@ -783,7 +851,7 @@ static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
"Failed to get bridge\n");
}
- return drm_bridge_attach(bridge->encoder, dpi->next_bridge,
+ return drm_bridge_attach(encoder, dpi->next_bridge,
&dpi->bridge, flags);
}
@@ -1026,9 +1094,29 @@ static const u32 mt8183_output_fmts[] = {
MEDIA_BUS_FMT_RGB888_2X12_BE,
};
-static const u32 mt8195_output_fmts[] = {
+static const u32 mt8195_dpi_output_fmts[] = {
+ MEDIA_BUS_FMT_BGR888_1X24,
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB888_2X12_LE,
+ MEDIA_BUS_FMT_RGB888_2X12_BE,
+ MEDIA_BUS_FMT_RGB101010_1X30,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_YUYV12_1X24,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_YUV10_1X30,
+};
+
+static const u32 mt8195_dp_intf_output_fmts[] = {
+ MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB888_2X12_LE,
+ MEDIA_BUS_FMT_RGB888_2X12_BE,
+ MEDIA_BUS_FMT_RGB101010_1X30,
MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_YUV10_1X30,
};
static const struct mtk_dpi_factor dpi_factor_mt2701[] = {
@@ -1141,8 +1229,8 @@ static const struct mtk_dpi_conf mt8192_conf = {
static const struct mtk_dpi_conf mt8195_conf = {
.max_clock_khz = 594000,
- .output_fmts = mt8183_output_fmts,
- .num_output_fmts = ARRAY_SIZE(mt8183_output_fmts),
+ .output_fmts = mt8195_dpi_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8195_dpi_output_fmts),
.pixels_per_iter = 1,
.is_ck_de_pol = true,
.swap_input_support = true,
@@ -1161,8 +1249,8 @@ static const struct mtk_dpi_conf mt8195_dpintf_conf = {
.dpi_factor = dpi_factor_mt8195_dp_intf,
.num_dpi_factor = ARRAY_SIZE(dpi_factor_mt8195_dp_intf),
.max_clock_khz = 600000,
- .output_fmts = mt8195_output_fmts,
- .num_output_fmts = ARRAY_SIZE(mt8195_output_fmts),
+ .output_fmts = mt8195_dp_intf_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8195_dp_intf_output_fmts),
.pixels_per_iter = 4,
.dimension_mask = DPINTF_HPW_MASK,
.hvsize_mask = DPINTF_HSIZE_MASK,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 74158b9d6503..7c0c12dde488 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -470,7 +470,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
ret = drmm_mode_config_init(drm);
if (ret)
- goto put_mutex_dev;
+ return ret;
drm->mode_config.min_width = 64;
drm->mode_config.min_height = 64;
@@ -488,8 +488,11 @@ static int mtk_drm_kms_init(struct drm_device *drm)
for (i = 0; i < private->data->mmsys_dev_num; i++) {
drm->dev_private = private->all_drm_private[i];
ret = component_bind_all(private->all_drm_private[i]->dev, drm);
- if (ret)
- goto put_mutex_dev;
+ if (ret) {
+ while (--i >= 0)
+ component_unbind_all(private->all_drm_private[i]->dev, drm);
+ return ret;
+ }
}
/*
@@ -582,9 +585,6 @@ static int mtk_drm_kms_init(struct drm_device *drm)
err_component_unbind:
for (i = 0; i < private->data->mmsys_dev_num; i++)
component_unbind_all(private->all_drm_private[i]->dev, drm);
-put_mutex_dev:
- for (i = 0; i < private->data->mmsys_dev_num; i++)
- put_device(private->all_drm_private[i]->mutex_dev);
return ret;
}
@@ -655,8 +655,10 @@ static int mtk_drm_bind(struct device *dev)
return 0;
drm = drm_dev_alloc(&mtk_drm_driver, dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
+ if (IS_ERR(drm)) {
+ ret = PTR_ERR(drm);
+ goto err_put_dev;
+ }
private->drm_master = true;
drm->dev_private = private;
@@ -682,18 +684,31 @@ err_free:
drm_dev_put(drm);
for (i = 0; i < private->data->mmsys_dev_num; i++)
private->all_drm_private[i]->drm = NULL;
+err_put_dev:
+ for (i = 0; i < private->data->mmsys_dev_num; i++) {
+ /* For device_find_child in mtk_drm_get_all_priv() */
+ put_device(private->all_drm_private[i]->dev);
+ }
+ put_device(private->mutex_dev);
return ret;
}
static void mtk_drm_unbind(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
+ int i;
/* for multi mmsys dev, unregister drm dev in mmsys master */
if (private->drm_master) {
drm_dev_unregister(private->drm);
mtk_drm_kms_deinit(private->drm);
drm_dev_put(private->drm);
+
+ for (i = 0; i < private->data->mmsys_dev_num; i++) {
+ /* For device_find_child in mtk_drm_get_all_priv() */
+ put_device(private->all_drm_private[i]->dev);
+ }
+ put_device(private->mutex_dev);
}
private->mtk_drm_bound = false;
private->drm_master = false;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index d1f407fb7eb1..4fe1f38a3c4b 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -807,12 +807,13 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
}
static int mtk_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_dsi *dsi = bridge_to_dsi(bridge);
/* Attach the panel or bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->next_bridge,
+ return drm_bridge_attach(encoder, dsi->next_bridge,
&dsi->bridge, flags);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 06e4fac152b7..8803cd4a8bc9 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -31,7 +31,6 @@
#include <drm/drm_probe_helper.h>
#include "mtk_cec.h"
-#include "mtk_hdmi.h"
#include "mtk_hdmi_regs.h"
#define NCTS_BYTES 7
@@ -165,7 +164,7 @@ struct mtk_hdmi {
bool dvi_mode;
struct regmap *sys_regmap;
unsigned int sys_offset;
- void __iomem *regs;
+ struct regmap *regs;
struct platform_device *audio_pdev;
struct hdmi_audio_param aud_param;
bool audio_enable;
@@ -181,50 +180,10 @@ static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
return container_of(b, struct mtk_hdmi, bridge);
}
-static u32 mtk_hdmi_read(struct mtk_hdmi *hdmi, u32 offset)
-{
- return readl(hdmi->regs + offset);
-}
-
-static void mtk_hdmi_write(struct mtk_hdmi *hdmi, u32 offset, u32 val)
-{
- writel(val, hdmi->regs + offset);
-}
-
-static void mtk_hdmi_clear_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
-{
- void __iomem *reg = hdmi->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp &= ~bits;
- writel(tmp, reg);
-}
-
-static void mtk_hdmi_set_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
-{
- void __iomem *reg = hdmi->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp |= bits;
- writel(tmp, reg);
-}
-
-static void mtk_hdmi_mask(struct mtk_hdmi *hdmi, u32 offset, u32 val, u32 mask)
-{
- void __iomem *reg = hdmi->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp = (tmp & ~mask) | (val & mask);
- writel(tmp, reg);
-}
-
static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
{
- mtk_hdmi_mask(hdmi, VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH,
- VIDEO_SOURCE_SEL);
+ regmap_update_bits(hdmi->regs, VIDEO_SOURCE_SEL,
+ VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH);
}
static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
@@ -259,12 +218,12 @@ static void mtk_hdmi_hw_1p4_version_enable(struct mtk_hdmi *hdmi, bool enable)
static void mtk_hdmi_hw_aud_mute(struct mtk_hdmi *hdmi)
{
- mtk_hdmi_set_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
+ regmap_set_bits(hdmi->regs, GRL_AUDIO_CFG, AUDIO_ZERO);
}
static void mtk_hdmi_hw_aud_unmute(struct mtk_hdmi *hdmi)
{
- mtk_hdmi_clear_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
+ regmap_clear_bits(hdmi->regs, GRL_AUDIO_CFG, AUDIO_ZERO);
}
static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi)
@@ -273,25 +232,25 @@ static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi)
HDMI_RST, HDMI_RST);
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
HDMI_RST, 0);
- mtk_hdmi_clear_bits(hdmi, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY);
+ regmap_clear_bits(hdmi->regs, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY);
regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
ANLG_ON, ANLG_ON);
}
static void mtk_hdmi_hw_enable_notice(struct mtk_hdmi *hdmi, bool enable_notice)
{
- mtk_hdmi_mask(hdmi, GRL_CFG2, enable_notice ? CFG2_NOTICE_EN : 0,
- CFG2_NOTICE_EN);
+ regmap_update_bits(hdmi->regs, GRL_CFG2, CFG2_NOTICE_EN,
+ enable_notice ? CFG2_NOTICE_EN : 0);
}
static void mtk_hdmi_hw_write_int_mask(struct mtk_hdmi *hdmi, u32 int_mask)
{
- mtk_hdmi_write(hdmi, GRL_INT_MASK, int_mask);
+ regmap_write(hdmi->regs, GRL_INT_MASK, int_mask);
}
static void mtk_hdmi_hw_enable_dvi_mode(struct mtk_hdmi *hdmi, bool enable)
{
- mtk_hdmi_mask(hdmi, GRL_CFG1, enable ? CFG1_DVI : 0, CFG1_DVI);
+ regmap_update_bits(hdmi->regs, GRL_CFG1, CFG1_DVI, enable ? CFG1_DVI : 0);
}
static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
@@ -337,22 +296,22 @@ static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
dev_err(hdmi->dev, "Unknown infoframe type %d\n", frame_type);
return;
}
- mtk_hdmi_clear_bits(hdmi, ctrl_reg, ctrl_frame_en);
- mtk_hdmi_write(hdmi, GRL_INFOFRM_TYPE, frame_type);
- mtk_hdmi_write(hdmi, GRL_INFOFRM_VER, frame_ver);
- mtk_hdmi_write(hdmi, GRL_INFOFRM_LNG, frame_len);
+ regmap_clear_bits(hdmi->regs, ctrl_reg, ctrl_frame_en);
+ regmap_write(hdmi->regs, GRL_INFOFRM_TYPE, frame_type);
+ regmap_write(hdmi->regs, GRL_INFOFRM_VER, frame_ver);
+ regmap_write(hdmi->regs, GRL_INFOFRM_LNG, frame_len);
- mtk_hdmi_write(hdmi, GRL_IFM_PORT, checksum);
+ regmap_write(hdmi->regs, GRL_IFM_PORT, checksum);
for (i = 0; i < frame_len; i++)
- mtk_hdmi_write(hdmi, GRL_IFM_PORT, frame_data[i]);
+ regmap_write(hdmi->regs, GRL_IFM_PORT, frame_data[i]);
- mtk_hdmi_set_bits(hdmi, ctrl_reg, ctrl_frame_en);
+ regmap_set_bits(hdmi->regs, ctrl_reg, ctrl_frame_en);
}
static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable)
{
- mtk_hdmi_mask(hdmi, GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF,
- AUDIO_PACKET_OFF);
+ regmap_update_bits(hdmi->regs, AUDIO_PACKET_OFF,
+ GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF);
}
static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi)
@@ -373,44 +332,44 @@ static void mtk_hdmi_hw_set_deep_color_mode(struct mtk_hdmi *hdmi)
static void mtk_hdmi_hw_send_av_mute(struct mtk_hdmi *hdmi)
{
- mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
+ regmap_clear_bits(hdmi->regs, GRL_CFG4, CTRL_AVMUTE);
usleep_range(2000, 4000);
- mtk_hdmi_set_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
+ regmap_set_bits(hdmi->regs, GRL_CFG4, CTRL_AVMUTE);
}
static void mtk_hdmi_hw_send_av_unmute(struct mtk_hdmi *hdmi)
{
- mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_EN,
- CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
+ regmap_update_bits(hdmi->regs, GRL_CFG4, CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET,
+ CFG4_AV_UNMUTE_EN);
usleep_range(2000, 4000);
- mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_SET,
- CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
+ regmap_update_bits(hdmi->regs, GRL_CFG4, CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET,
+ CFG4_AV_UNMUTE_SET);
}
static void mtk_hdmi_hw_ncts_enable(struct mtk_hdmi *hdmi, bool on)
{
- mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, on ? 0 : CTS_CTRL_SOFT,
- CTS_CTRL_SOFT);
+ regmap_update_bits(hdmi->regs, GRL_CTS_CTRL, CTS_CTRL_SOFT,
+ on ? 0 : CTS_CTRL_SOFT);
}
static void mtk_hdmi_hw_ncts_auto_write_enable(struct mtk_hdmi *hdmi,
bool enable)
{
- mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, enable ? NCTS_WRI_ANYTIME : 0,
- NCTS_WRI_ANYTIME);
+ regmap_update_bits(hdmi->regs, GRL_CTS_CTRL, NCTS_WRI_ANYTIME,
+ enable ? NCTS_WRI_ANYTIME : 0);
}
static void mtk_hdmi_hw_msic_setting(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
- mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CFG4_MHL_MODE);
+ regmap_clear_bits(hdmi->regs, GRL_CFG4, CFG4_MHL_MODE);
if (mode->flags & DRM_MODE_FLAG_INTERLACE &&
mode->clock == 74250 &&
mode->vdisplay == 1080)
- mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
+ regmap_clear_bits(hdmi->regs, GRL_CFG2, CFG2_MHL_DE_SEL);
else
- mtk_hdmi_set_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
+ regmap_set_bits(hdmi->regs, GRL_CFG2, CFG2_MHL_DE_SEL);
}
static void mtk_hdmi_hw_aud_set_channel_swap(struct mtk_hdmi *hdmi,
@@ -438,7 +397,7 @@ static void mtk_hdmi_hw_aud_set_channel_swap(struct mtk_hdmi *hdmi,
swap_bit = LFE_CC_SWAP;
break;
}
- mtk_hdmi_mask(hdmi, GRL_CH_SWAP, swap_bit, 0xff);
+ regmap_update_bits(hdmi->regs, GRL_CH_SWAP, 0xff, swap_bit);
}
static void mtk_hdmi_hw_aud_set_bit_num(struct mtk_hdmi *hdmi,
@@ -459,7 +418,7 @@ static void mtk_hdmi_hw_aud_set_bit_num(struct mtk_hdmi *hdmi,
break;
}
- mtk_hdmi_mask(hdmi, GRL_AOUT_CFG, val, AOUT_BNUM_SEL_MASK);
+ regmap_update_bits(hdmi->regs, GRL_AOUT_CFG, AOUT_BNUM_SEL_MASK, val);
}
static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
@@ -467,7 +426,7 @@ static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_CFG0);
+ regmap_read(hdmi->regs, GRL_CFG0, &val);
val &= ~(CFG0_W_LENGTH_MASK | CFG0_I2S_MODE_MASK);
switch (i2s_fmt) {
@@ -491,7 +450,7 @@ static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_16BIT;
break;
}
- mtk_hdmi_write(hdmi, GRL_CFG0, val);
+ regmap_write(hdmi->regs, GRL_CFG0, val);
}
static void mtk_hdmi_hw_audio_config(struct mtk_hdmi *hdmi, bool dst)
@@ -500,14 +459,14 @@ static void mtk_hdmi_hw_audio_config(struct mtk_hdmi *hdmi, bool dst)
u8 val;
/* Disable high bitrate, set DST packet normal/double */
- mtk_hdmi_clear_bits(hdmi, GRL_AOUT_CFG, HIGH_BIT_RATE_PACKET_ALIGN);
+ regmap_clear_bits(hdmi->regs, GRL_AOUT_CFG, HIGH_BIT_RATE_PACKET_ALIGN);
if (dst)
val = DST_NORMAL_DOUBLE | SACD_DST;
else
val = 0;
- mtk_hdmi_mask(hdmi, GRL_AUDIO_CFG, val, mask);
+ regmap_update_bits(hdmi->regs, GRL_AUDIO_CFG, mask, val);
}
static void mtk_hdmi_hw_aud_set_i2s_chan_num(struct mtk_hdmi *hdmi,
@@ -548,10 +507,10 @@ static void mtk_hdmi_hw_aud_set_i2s_chan_num(struct mtk_hdmi *hdmi,
i2s_uv = I2S_UV_CH_EN(0);
}
- mtk_hdmi_write(hdmi, GRL_CH_SW0, ch_switch & 0xff);
- mtk_hdmi_write(hdmi, GRL_CH_SW1, (ch_switch >> 8) & 0xff);
- mtk_hdmi_write(hdmi, GRL_CH_SW2, (ch_switch >> 16) & 0xff);
- mtk_hdmi_write(hdmi, GRL_I2S_UV, i2s_uv);
+ regmap_write(hdmi->regs, GRL_CH_SW0, ch_switch & 0xff);
+ regmap_write(hdmi->regs, GRL_CH_SW1, (ch_switch >> 8) & 0xff);
+ regmap_write(hdmi->regs, GRL_CH_SW2, (ch_switch >> 16) & 0xff);
+ regmap_write(hdmi->regs, GRL_I2S_UV, i2s_uv);
}
static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
@@ -559,7 +518,7 @@ static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_CFG1);
+ regmap_read(hdmi->regs, GRL_CFG1, &val);
if (input_type == HDMI_AUD_INPUT_I2S &&
(val & CFG1_SPDIF) == CFG1_SPDIF) {
val &= ~CFG1_SPDIF;
@@ -567,7 +526,7 @@ static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
(val & CFG1_SPDIF) == 0) {
val |= CFG1_SPDIF;
}
- mtk_hdmi_write(hdmi, GRL_CFG1, val);
+ regmap_write(hdmi->regs, GRL_CFG1, val);
}
static void mtk_hdmi_hw_aud_set_channel_status(struct mtk_hdmi *hdmi,
@@ -576,13 +535,13 @@ static void mtk_hdmi_hw_aud_set_channel_status(struct mtk_hdmi *hdmi,
int i;
for (i = 0; i < 5; i++) {
- mtk_hdmi_write(hdmi, GRL_I2S_C_STA0 + i * 4, channel_status[i]);
- mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, channel_status[i]);
- mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, channel_status[i]);
+ regmap_write(hdmi->regs, GRL_I2S_C_STA0 + i * 4, channel_status[i]);
+ regmap_write(hdmi->regs, GRL_L_STATUS_0 + i * 4, channel_status[i]);
+ regmap_write(hdmi->regs, GRL_R_STATUS_0 + i * 4, channel_status[i]);
}
for (; i < 24; i++) {
- mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, 0);
- mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, 0);
+ regmap_write(hdmi->regs, GRL_L_STATUS_0 + i * 4, 0);
+ regmap_write(hdmi->regs, GRL_R_STATUS_0 + i * 4, 0);
}
}
@@ -590,13 +549,13 @@ static void mtk_hdmi_hw_aud_src_reenable(struct mtk_hdmi *hdmi)
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
+ regmap_read(hdmi->regs, GRL_MIX_CTRL, &val);
if (val & MIX_CTRL_SRC_EN) {
val &= ~MIX_CTRL_SRC_EN;
- mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+ regmap_write(hdmi->regs, GRL_MIX_CTRL, val);
usleep_range(255, 512);
val |= MIX_CTRL_SRC_EN;
- mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+ regmap_write(hdmi->regs, GRL_MIX_CTRL, val);
}
}
@@ -604,10 +563,10 @@ static void mtk_hdmi_hw_aud_src_disable(struct mtk_hdmi *hdmi)
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
+ regmap_read(hdmi->regs, GRL_MIX_CTRL, &val);
val &= ~MIX_CTRL_SRC_EN;
- mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
- mtk_hdmi_write(hdmi, GRL_SHIFT_L1, 0x00);
+ regmap_write(hdmi->regs, GRL_MIX_CTRL, val);
+ regmap_write(hdmi->regs, GRL_SHIFT_L1, 0x00);
}
static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
@@ -615,7 +574,7 @@ static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
{
u32 val;
- val = mtk_hdmi_read(hdmi, GRL_CFG5);
+ regmap_read(hdmi->regs, GRL_CFG5, &val);
val &= CFG5_CD_RATIO_MASK;
switch (mclk) {
@@ -638,7 +597,7 @@ static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
val |= CFG5_FS256;
break;
}
- mtk_hdmi_write(hdmi, GRL_CFG5, val);
+ regmap_write(hdmi->regs, GRL_CFG5, val);
}
struct hdmi_acr_n {
@@ -716,15 +675,22 @@ static unsigned int hdmi_expected_cts(unsigned int audio_sample_rate,
128 * audio_sample_rate);
}
+static void mtk_hdmi_get_ncts(unsigned int sample_rate, unsigned int clock,
+ unsigned int *n, unsigned int *cts)
+{
+ *n = hdmi_recommended_n(sample_rate, clock);
+ *cts = hdmi_expected_cts(sample_rate, clock, *n);
+}
+
static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n,
unsigned int cts)
{
unsigned char val[NCTS_BYTES];
int i;
- mtk_hdmi_write(hdmi, GRL_NCTS, 0);
- mtk_hdmi_write(hdmi, GRL_NCTS, 0);
- mtk_hdmi_write(hdmi, GRL_NCTS, 0);
+ regmap_write(hdmi->regs, GRL_NCTS, 0);
+ regmap_write(hdmi->regs, GRL_NCTS, 0);
+ regmap_write(hdmi->regs, GRL_NCTS, 0);
memset(val, 0, sizeof(val));
val[0] = (cts >> 24) & 0xff;
@@ -737,7 +703,7 @@ static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n,
val[6] = n & 0xff;
for (i = 0; i < NCTS_BYTES; i++)
- mtk_hdmi_write(hdmi, GRL_NCTS, val[i]);
+ regmap_write(hdmi->regs, GRL_NCTS, val[i]);
}
static void mtk_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi,
@@ -746,14 +712,12 @@ static void mtk_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi,
{
unsigned int n, cts;
- n = hdmi_recommended_n(sample_rate, clock);
- cts = hdmi_expected_cts(sample_rate, clock, n);
+ mtk_hdmi_get_ncts(sample_rate, clock, &n, &cts);
dev_dbg(hdmi->dev, "%s: sample_rate=%u, clock=%d, cts=%u, n=%u\n",
__func__, sample_rate, clock, n, cts);
- mtk_hdmi_mask(hdmi, DUMMY_304, AUDIO_I2S_NCTS_SEL_64,
- AUDIO_I2S_NCTS_SEL);
+ regmap_update_bits(hdmi->regs, DUMMY_304, AUDIO_I2S_NCTS_SEL, AUDIO_I2S_NCTS_SEL_64);
do_hdmi_hw_aud_set_ncts(hdmi, n, cts);
}
@@ -873,7 +837,7 @@ static void mtk_hdmi_aud_set_input(struct mtk_hdmi *hdmi)
bool dst;
mtk_hdmi_hw_aud_set_channel_swap(hdmi, HDMI_AUD_SWAP_LFE_CC);
- mtk_hdmi_set_bits(hdmi, GRL_MIX_CTRL, MIX_CTRL_FLAT);
+ regmap_set_bits(hdmi->regs, GRL_MIX_CTRL, MIX_CTRL_FLAT);
if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF &&
hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST) {
@@ -905,7 +869,7 @@ static int mtk_hdmi_aud_set_src(struct mtk_hdmi *hdmi,
mtk_hdmi_hw_ncts_enable(hdmi, false);
mtk_hdmi_hw_aud_src_disable(hdmi);
- mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_ACLK_INV);
+ regmap_clear_bits(hdmi->regs, GRL_CFG2, CFG2_ACLK_INV);
if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_I2S) {
switch (sample_rate) {
@@ -1061,20 +1025,6 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
return 0;
}
-static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
-{
- struct hdmi_audio_param *aud_param = &hdmi->aud_param;
-
- aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
- aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
- aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
- aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
- aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
-
- return 0;
-}
-
static void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi)
{
mtk_hdmi_hw_send_aud_packet(hdmi, true);
@@ -1087,20 +1037,6 @@ static void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi)
hdmi->audio_enable = false;
}
-static int mtk_hdmi_audio_set_param(struct mtk_hdmi *hdmi,
- struct hdmi_audio_param *param)
-{
- if (!hdmi->audio_enable) {
- dev_err(hdmi->dev, "hdmi audio is in disable state!\n");
- return -EINVAL;
- }
- dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n",
- param->aud_codec, param->aud_input_type,
- param->aud_input_chan_type, param->codec_params.sample_rate);
- memcpy(&hdmi->aud_param, param, sizeof(*param));
- return mtk_hdmi_aud_output_config(hdmi, &hdmi->mode);
-}
-
static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
@@ -1269,6 +1205,7 @@ static const struct drm_edid *mtk_hdmi_bridge_edid_read(struct drm_bridge *bridg
}
static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1281,7 +1218,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
}
if (hdmi->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
+ ret = drm_bridge_attach(encoder, hdmi->next_bridge,
bridge, flags);
if (ret)
return ret;
@@ -1407,30 +1344,20 @@ static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
.edid_read = mtk_hdmi_bridge_edid_read,
};
-static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
- struct platform_device *pdev)
+static int mtk_hdmi_get_cec_dev(struct mtk_hdmi *hdmi, struct device *dev, struct device_node *np)
{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct device_node *cec_np, *remote, *i2c_np;
struct platform_device *cec_pdev;
- struct regmap *regmap;
+ struct device_node *cec_np;
int ret;
ret = mtk_hdmi_get_all_clk(hdmi, np);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get clocks: %d\n", ret);
-
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
/* The CEC module handles HDMI hotplug detection */
cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec");
- if (!cec_np) {
- dev_err(dev, "Failed to find CEC node\n");
- return -EINVAL;
- }
+ if (!cec_np)
+ return dev_err_probe(dev, -EINVAL, "Failed to find CEC node\n");
cec_pdev = of_find_device_by_node(cec_np);
if (!cec_pdev) {
@@ -1440,82 +1367,77 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
return -EPROBE_DEFER;
}
of_node_put(cec_np);
- hdmi->cec_dev = &cec_pdev->dev;
/*
* The mediatek,syscon-hdmi property contains a phandle link to the
* MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG
* registers it contains.
*/
- regmap = syscon_regmap_lookup_by_phandle(np, "mediatek,syscon-hdmi");
- ret = of_property_read_u32_index(np, "mediatek,syscon-hdmi", 1,
- &hdmi->sys_offset);
- if (IS_ERR(regmap))
- ret = PTR_ERR(regmap);
- if (ret) {
- dev_err(dev,
- "Failed to get system configuration registers: %d\n",
- ret);
- goto put_device;
- }
- hdmi->sys_regmap = regmap;
+ hdmi->sys_regmap = syscon_regmap_lookup_by_phandle_args(np, "mediatek,syscon-hdmi",
+ 1, &hdmi->sys_offset);
+ if (IS_ERR(hdmi->sys_regmap))
+ return dev_err_probe(dev, PTR_ERR(hdmi->sys_regmap),
+ "Failed to get system configuration registers\n");
- hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(hdmi->regs)) {
- ret = PTR_ERR(hdmi->regs);
- goto put_device;
- }
+ hdmi->cec_dev = &cec_pdev->dev;
+ return 0;
+}
+
+static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *remote, *i2c_np;
+ int ret;
+
+ ret = mtk_hdmi_get_all_clk(hdmi, np);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+
+ hdmi->regs = device_node_to_regmap(dev->of_node);
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
remote = of_graph_get_remote_node(np, 1, 0);
- if (!remote) {
- ret = -EINVAL;
- goto put_device;
- }
+ if (!remote)
+ return -EINVAL;
if (!of_device_is_compatible(remote, "hdmi-connector")) {
hdmi->next_bridge = of_drm_find_bridge(remote);
if (!hdmi->next_bridge) {
dev_err(dev, "Waiting for external bridge\n");
of_node_put(remote);
- ret = -EPROBE_DEFER;
- goto put_device;
+ return -EPROBE_DEFER;
}
}
i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0);
- if (!i2c_np) {
- dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n",
- remote);
- of_node_put(remote);
- ret = -EINVAL;
- goto put_device;
- }
of_node_put(remote);
+ if (!i2c_np)
+ return dev_err_probe(dev, -EINVAL, "No ddc-i2c-bus in connector\n");
hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
of_node_put(i2c_np);
- if (!hdmi->ddc_adpt) {
- dev_err(dev, "Failed to get ddc i2c adapter by node\n");
- ret = -EINVAL;
- goto put_device;
- }
+ if (!hdmi->ddc_adpt)
+ return dev_err_probe(dev, -EINVAL, "Failed to get ddc i2c adapter by node\n");
+
+ ret = mtk_hdmi_get_cec_dev(hdmi, dev, np);
+ if (ret)
+ return ret;
return 0;
-put_device:
- put_device(hdmi->cec_dev);
- return ret;
}
/*
* HDMI audio codec callbacks
*/
-static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
+static int mtk_hdmi_audio_params(struct mtk_hdmi *hdmi,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
{
- struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- struct hdmi_audio_param hdmi_params;
+ struct hdmi_audio_param aud_params = { 0 };
unsigned int chan = params->cea.channels;
dev_dbg(hdmi->dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
@@ -1526,16 +1448,16 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
switch (chan) {
case 2:
- hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
break;
case 4:
- hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0;
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0;
break;
case 6:
- hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1;
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1;
break;
case 8:
- hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1;
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1;
break;
default:
dev_err(hdmi->dev, "channel[%d] not supported!\n", chan);
@@ -1559,27 +1481,45 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
switch (daifmt->fmt) {
case HDMI_I2S:
- hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
- hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S;
- hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
- hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS;
+ aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_params.aud_input_type = HDMI_AUD_INPUT_I2S;
+ aud_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+ aud_params.aud_mclk = HDMI_AUD_MCLK_128FS;
break;
case HDMI_SPDIF:
- hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
- hdmi_params.aud_input_type = HDMI_AUD_INPUT_SPDIF;
+ aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_params.aud_input_type = HDMI_AUD_INPUT_SPDIF;
break;
default:
dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__,
daifmt->fmt);
return -EINVAL;
}
+ memcpy(&aud_params.codec_params, params, sizeof(aud_params.codec_params));
+ memcpy(&hdmi->aud_param, &aud_params, sizeof(aud_params));
+
+ dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n",
+ aud_params.aud_codec, aud_params.aud_input_type,
+ aud_params.aud_input_chan_type, aud_params.codec_params.sample_rate);
- memcpy(&hdmi_params.codec_params, params,
- sizeof(hdmi_params.codec_params));
+ return 0;
+}
+
+static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- mtk_hdmi_audio_set_param(hdmi, &hdmi_params);
+ if (!hdmi->audio_enable) {
+ dev_err(hdmi->dev, "hdmi audio is in disable state!\n");
+ return -EINVAL;
+ }
+
+ mtk_hdmi_audio_params(hdmi, daifmt, params);
+ mtk_hdmi_aud_output_config(hdmi, &hdmi->mode);
return 0;
}
@@ -1625,17 +1565,22 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
return 0;
}
-static int mtk_hdmi_audio_hook_plugged_cb(struct device *dev, void *data,
- hdmi_codec_plugged_cb fn,
+static void mtk_hdmi_audio_set_plugged_cb(struct mtk_hdmi *hdmi, hdmi_codec_plugged_cb fn,
struct device *codec_dev)
{
- struct mtk_hdmi *hdmi = data;
-
mutex_lock(&hdmi->update_plugged_status_lock);
hdmi->plugged_cb = fn;
hdmi->codec_dev = codec_dev;
mutex_unlock(&hdmi->update_plugged_status_lock);
+}
+
+static int mtk_hdmi_audio_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct mtk_hdmi *hdmi = data;
+ mtk_hdmi_audio_set_plugged_cb(hdmi, fn, codec_dev);
mtk_hdmi_update_plugged_status(hdmi);
return 0;
@@ -1658,6 +1603,7 @@ static void mtk_hdmi_unregister_audio_driver(void *data)
static int mtk_hdmi_register_audio_driver(struct device *dev)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+ struct hdmi_audio_param *aud_param = &hdmi->aud_param;
struct hdmi_codec_pdata codec_data = {
.ops = &mtk_hdmi_audio_codec_ops,
.max_i2s_channels = 2,
@@ -1667,6 +1613,13 @@ static int mtk_hdmi_register_audio_driver(struct device *dev)
};
int ret;
+ aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
+ aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+ aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
+ aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+
hdmi->audio_pdev = platform_device_register_data(dev,
HDMI_CODEC_DRV_NAME,
PLATFORM_DEVID_AUTO,
@@ -1708,11 +1661,6 @@ static int mtk_hdmi_probe(struct platform_device *pdev)
mutex_init(&hdmi->update_plugged_status_lock);
platform_set_drvdata(pdev, hdmi);
- ret = mtk_hdmi_output_init(hdmi);
- if (ret)
- return dev_err_probe(dev, ret,
- "Failed to initialize hdmi output\n");
-
ret = mtk_hdmi_register_audio_driver(dev);
if (ret)
return dev_err_probe(dev, ret,
@@ -1789,28 +1737,9 @@ static struct platform_driver mtk_hdmi_driver = {
.pm = &mtk_hdmi_pm_ops,
},
};
-
-static struct platform_driver * const mtk_hdmi_drivers[] = {
- &mtk_hdmi_ddc_driver,
- &mtk_cec_driver,
- &mtk_hdmi_driver,
-};
-
-static int __init mtk_hdmitx_init(void)
-{
- return platform_register_drivers(mtk_hdmi_drivers,
- ARRAY_SIZE(mtk_hdmi_drivers));
-}
-
-static void __exit mtk_hdmitx_exit(void)
-{
- platform_unregister_drivers(mtk_hdmi_drivers,
- ARRAY_SIZE(mtk_hdmi_drivers));
-}
-
-module_init(mtk_hdmitx_init);
-module_exit(mtk_hdmitx_exit);
+module_platform_driver(mtk_hdmi_driver);
MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
MODULE_DESCRIPTION("MediaTek HDMI Driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("DRM_MTK_HDMI_V1");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.h b/drivers/gpu/drm/mediatek/mtk_hdmi.h
deleted file mode 100644
index 472bf141c92b..000000000000
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014 MediaTek Inc.
- * Author: Jie Qiu <jie.qiu@mediatek.com>
- */
-#ifndef _MTK_HDMI_CTRL_H
-#define _MTK_HDMI_CTRL_H
-
-struct platform_driver;
-
-extern struct platform_driver mtk_cec_driver;
-extern struct platform_driver mtk_hdmi_ddc_driver;
-
-#endif /* _MTK_HDMI_CTRL_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
index 07db68067844..6358e1af69b4 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
@@ -20,7 +20,6 @@
#include <linux/of_platform.h>
#include "mtk_drm_drv.h"
-#include "mtk_hdmi.h"
#define SIF1_CLOK (288)
#define DDC_DDCMCTL0 (0x0)
@@ -337,6 +336,7 @@ struct platform_driver mtk_hdmi_ddc_driver = {
.of_match_table = mtk_hdmi_ddc_match,
},
};
+module_platform_driver(mtk_hdmi_ddc_driver);
MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
MODULE_DESCRIPTION("MediaTek HDMI DDC Driver");
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index e79f7c3ce32e..c9678dc68fa1 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -83,12 +83,13 @@ meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
}
static int meson_encoder_cvbs_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_cvbs *meson_encoder_cvbs =
bridge_to_meson_encoder_cvbs(bridge);
- return drm_bridge_attach(bridge->encoder, meson_encoder_cvbs->next_bridge,
+ return drm_bridge_attach(encoder, meson_encoder_cvbs->next_bridge,
&meson_encoder_cvbs->bridge, flags);
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
index fe204437bd65..3db518e5f95d 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
@@ -33,11 +33,12 @@ struct meson_encoder_dsi {
container_of(x, struct meson_encoder_dsi, bridge)
static int meson_encoder_dsi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_dsi *encoder_dsi = bridge_to_meson_encoder_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, encoder_dsi->next_bridge,
+ return drm_bridge_attach(encoder, encoder_dsi->next_bridge,
&encoder_dsi->bridge, flags);
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 7752d8ac85f0..ab08d690d882 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -49,11 +49,12 @@ struct meson_encoder_hdmi {
container_of(x, struct meson_encoder_hdmi, bridge)
static int meson_encoder_hdmi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
- return drm_bridge_attach(bridge->encoder, encoder_hdmi->next_bridge,
+ return drm_bridge_attach(encoder, encoder_hdmi->next_bridge,
&encoder_hdmi->bridge, flags);
}
@@ -75,7 +76,7 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
unsigned long long venc_freq;
unsigned long long hdmi_freq;
- vclk_freq = mode->clock * 1000;
+ vclk_freq = mode->clock * 1000ULL;
/* For 420, pixel clock is half unlike venc clock */
if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
@@ -108,7 +109,7 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
venc_freq /= 2;
dev_dbg(priv->dev,
- "vclk:%lluHz phy=%lluHz venc=%lluHz hdmi=%lluHz enci=%d\n",
+ "phy:%lluHz vclk=%lluHz venc=%lluHz hdmi=%lluHz enci=%d\n",
phy_freq, vclk_freq, venc_freq, hdmi_freq,
priv->venc.hdmi_use_enci);
@@ -123,7 +124,7 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
struct meson_drm *priv = encoder_hdmi->priv;
bool is_hdmi2_sink = display_info->hdmi.scdc.supported;
- unsigned long long clock = mode->clock * 1000;
+ unsigned long long clock = mode->clock * 1000ULL;
unsigned long long phy_freq;
unsigned long long vclk_freq;
unsigned long long venc_freq;
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 3325580d885d..dfe0c28a0f05 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -110,10 +110,7 @@
#define HDMI_PLL_LOCK BIT(31)
#define HDMI_PLL_LOCK_G12A (3 << 30)
-#define PIXEL_FREQ_1000_1001(_freq) \
- DIV_ROUND_CLOSEST_ULL((_freq) * 1000ULL, 1001ULL)
-#define PHY_FREQ_1000_1001(_freq) \
- (PIXEL_FREQ_1000_1001(DIV_ROUND_DOWN_ULL(_freq, 10ULL)) * 10)
+#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST_ULL((_freq) * 1000ULL, 1001ULL)
/* VID PLL Dividers */
enum {
@@ -772,6 +769,36 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
pll_freq);
}
+static bool meson_vclk_freqs_are_matching_param(unsigned int idx,
+ unsigned long long phy_freq,
+ unsigned long long vclk_freq)
+{
+ DRM_DEBUG_DRIVER("i = %d vclk_freq = %lluHz alt = %lluHz\n",
+ idx, params[idx].vclk_freq,
+ FREQ_1000_1001(params[idx].vclk_freq));
+ DRM_DEBUG_DRIVER("i = %d phy_freq = %lluHz alt = %lluHz\n",
+ idx, params[idx].phy_freq,
+ FREQ_1000_1001(params[idx].phy_freq));
+
+ /* Match strict frequency */
+ if (phy_freq == params[idx].phy_freq &&
+ vclk_freq == params[idx].vclk_freq)
+ return true;
+
+ /* Match 1000/1001 variant: vclk deviation has to be less than 1kHz
+ * (drm EDID is defined in 1kHz steps, so everything smaller must be
+ * rounding error) and the PHY freq deviation has to be less than
+ * 10kHz (as the TMDS clock is 10 times the pixel clock, so anything
+ * smaller must be rounding error as well).
+ */
+ if (abs(vclk_freq - FREQ_1000_1001(params[idx].vclk_freq)) < 1000 &&
+ abs(phy_freq - FREQ_1000_1001(params[idx].phy_freq)) < 10000)
+ return true;
+
+ /* no match */
+ return false;
+}
+
enum drm_mode_status
meson_vclk_vic_supported_freq(struct meson_drm *priv,
unsigned long long phy_freq,
@@ -790,19 +817,7 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv,
}
for (i = 0 ; params[i].pixel_freq ; ++i) {
- DRM_DEBUG_DRIVER("i = %d pixel_freq = %lluHz alt = %lluHz\n",
- i, params[i].pixel_freq,
- PIXEL_FREQ_1000_1001(params[i].pixel_freq));
- DRM_DEBUG_DRIVER("i = %d phy_freq = %lluHz alt = %lluHz\n",
- i, params[i].phy_freq,
- PHY_FREQ_1000_1001(params[i].phy_freq));
- /* Match strict frequency */
- if (phy_freq == params[i].phy_freq &&
- vclk_freq == params[i].vclk_freq)
- return MODE_OK;
- /* Match 1000/1001 variant */
- if (phy_freq == PHY_FREQ_1000_1001(params[i].phy_freq) &&
- vclk_freq == PIXEL_FREQ_1000_1001(params[i].vclk_freq))
+ if (meson_vclk_freqs_are_matching_param(i, phy_freq, vclk_freq))
return MODE_OK;
}
@@ -1075,10 +1090,8 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
}
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
- if ((phy_freq == params[freq].phy_freq ||
- phy_freq == PHY_FREQ_1000_1001(params[freq].phy_freq)) &&
- (vclk_freq == params[freq].vclk_freq ||
- vclk_freq == PIXEL_FREQ_1000_1001(params[freq].vclk_freq))) {
+ if (meson_vclk_freqs_are_matching_param(freq, phy_freq,
+ vclk_freq)) {
if (vclk_freq != params[freq].vclk_freq)
vic_alternate_clock = true;
else
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index 412dcbea0e2d..a962ae564a75 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_MGAG200
tristate "Matrox G200"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select DRM_CLIENT_SELECTION
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/mgag200/mgag200_ddc.c b/drivers/gpu/drm/mgag200/mgag200_ddc.c
index 6d81ea8931e8..c31673eaa554 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ddc.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ddc.c
@@ -26,7 +26,6 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
-#include <linux/export.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c.h>
#include <linux/pci.h>
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 974bc7c0ea76..7f127e2ae442 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -104,6 +104,7 @@ config DRM_MSM_DPU
config DRM_MSM_DP
bool "Enable DisplayPort support in MSM DRM driver"
depends on DRM_MSM
+ select DRM_DISPLAY_HDMI_AUDIO_HELPER
select RATIONAL
default y
help
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 5df20cbeafb8..7a2ada6e2d74 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -48,7 +48,6 @@ msm-display-$(CONFIG_DRM_MSM_MDP4) += \
disp/mdp4/mdp4_dsi_encoder.o \
disp/mdp4/mdp4_dtv_encoder.o \
disp/mdp4/mdp4_lcdc_encoder.o \
- disp/mdp4/mdp4_lvds_connector.o \
disp/mdp4/mdp4_lvds_pll.o \
disp/mdp4/mdp4_irq.o \
disp/mdp4/mdp4_kms.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_catalog.c b/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
index 9ddb7b31fd98..5ddd015f930d 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
@@ -45,8 +45,3 @@ static const struct adreno_info a2xx_gpus[] = {
}
};
DECLARE_ADRENO_GPULIST(a2xx);
-
-MODULE_FIRMWARE("qcom/leia_pfp_470.fw");
-MODULE_FIRMWARE("qcom/leia_pm4_470.fw");
-MODULE_FIRMWARE("qcom/yamato_pfp.fw");
-MODULE_FIRMWARE("qcom/yamato_pm4.fw");
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
index 39641551eeb6..4280f71e472a 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
@@ -71,10 +71,6 @@ static int a2xx_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
return 0;
}
-static void a2xx_gpummu_resume_translation(struct msm_mmu *mmu)
-{
-}
-
static void a2xx_gpummu_destroy(struct msm_mmu *mmu)
{
struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
@@ -90,7 +86,6 @@ static const struct msm_mmu_funcs funcs = {
.map = a2xx_gpummu_map,
.unmap = a2xx_gpummu_unmap,
.destroy = a2xx_gpummu_destroy,
- .resume_translation = a2xx_gpummu_resume_translation,
};
struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
index 2eb6c3e93748..1498e6532f62 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
@@ -85,8 +85,3 @@ static const struct adreno_info a3xx_gpus[] = {
}
};
DECLARE_ADRENO_GPULIST(a3xx);
-
-MODULE_FIRMWARE("qcom/a300_pm4.fw");
-MODULE_FIRMWARE("qcom/a300_pfp.fw");
-MODULE_FIRMWARE("qcom/a330_pm4.fw");
-MODULE_FIRMWARE("qcom/a330_pfp.fw");
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_catalog.c b/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
index 93519f807f87..09f9f228b75e 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
@@ -45,6 +45,3 @@ static const struct adreno_info a4xx_gpus[] = {
}
};
DECLARE_ADRENO_GPULIST(a4xx);
-
-MODULE_FIRMWARE("qcom/a420_pm4.fw");
-MODULE_FIRMWARE("qcom/a420_pfp.fw");
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_catalog.c b/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
index 633f31539162..b48a636d8237 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
@@ -150,12 +150,3 @@ static const struct adreno_info a5xx_gpus[] = {
}
};
DECLARE_ADRENO_GPULIST(a5xx);
-
-MODULE_FIRMWARE("qcom/a530_pm4.fw");
-MODULE_FIRMWARE("qcom/a530_pfp.fw");
-MODULE_FIRMWARE("qcom/a530v3_gpmu.fw2");
-MODULE_FIRMWARE("qcom/a530_zap.mdt");
-MODULE_FIRMWARE("qcom/a530_zap.b00");
-MODULE_FIRMWARE("qcom/a530_zap.b01");
-MODULE_FIRMWARE("qcom/a530_zap.b02");
-MODULE_FIRMWARE("qcom/a540_gpmu.fw2");
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 650e5bac225f..60aef0796236 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -131,6 +131,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
+ adreno_check_and_reenable_stall(adreno_gpu);
+
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
ring->cur_ctx_seqno = 0;
a5xx_submit_in_rb(gpu, submit);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 36f72c43eae8..b5f9d40687d5 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -79,7 +79,8 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
static void a5xx_preempt_timer(struct timer_list *t)
{
- struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer);
+ struct a5xx_gpu *a5xx_gpu = timer_container_of(a5xx_gpu, t,
+ preempt_timer);
struct msm_gpu *gpu = &a5xx_gpu->base.base;
struct drm_device *dev = gpu->dev;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 53e2ff4406d8..70f7ad806c34 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -681,6 +681,7 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
},
.gmem = (SZ_128K + SZ_4K),
+ .quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a610_zap.mdt",
@@ -713,6 +714,7 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_512K,
+ .quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
@@ -743,7 +745,8 @@ static const struct adreno_info a6xx_gpus[] = {
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mbn",
.a6xx = &(const struct a6xx_info) {
@@ -769,7 +772,8 @@ static const struct adreno_info a6xx_gpus[] = {
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.init = a6xx_gpu_init,
.a6xx = &(const struct a6xx_info) {
.protect = &a630_protect,
@@ -791,6 +795,7 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
+ .quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
@@ -815,6 +820,7 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
+ .quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
@@ -838,8 +844,9 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
@@ -874,7 +881,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00010000,
},
- .address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 137, 1 },
@@ -907,7 +913,6 @@ static const struct adreno_info a6xx_gpus[] = {
{ /* sentinel */ },
},
},
- .address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
@@ -920,8 +925,9 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_1M,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a630_zap.mdt",
.a6xx = &(const struct a6xx_info) {
@@ -939,8 +945,9 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a640_gmu.bin",
},
.gmem = SZ_1M,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt",
.a6xx = &(const struct a6xx_info) {
@@ -973,7 +980,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00300200,
},
- .address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 1, 1 },
@@ -1000,7 +1006,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020000,
.prim_fifo_threshold = 0x00300200,
},
- .address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06060300),
.family = ADRENO_6XX_GEN4,
@@ -1019,7 +1024,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00300200,
},
- .address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06030500),
.family = ADRENO_6XX_GEN4,
@@ -1039,7 +1043,6 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00200200,
},
- .address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 117, 0 },
@@ -1056,8 +1059,9 @@ static const struct adreno_info a6xx_gpus[] = {
[ADRENO_FW_GMU] = "a640_gmu.bin",
},
.gmem = SZ_2M,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt",
.a6xx = &(const struct a6xx_info) {
@@ -1085,22 +1089,10 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00800200,
},
- .address_space_size = SZ_16G,
}
};
DECLARE_ADRENO_GPULIST(a6xx);
-MODULE_FIRMWARE("qcom/a615_zap.mbn");
-MODULE_FIRMWARE("qcom/a619_gmu.bin");
-MODULE_FIRMWARE("qcom/a630_sqe.fw");
-MODULE_FIRMWARE("qcom/a630_gmu.bin");
-MODULE_FIRMWARE("qcom/a630_zap.mbn");
-MODULE_FIRMWARE("qcom/a640_gmu.bin");
-MODULE_FIRMWARE("qcom/a650_gmu.bin");
-MODULE_FIRMWARE("qcom/a650_sqe.fw");
-MODULE_FIRMWARE("qcom/a660_gmu.bin");
-MODULE_FIRMWARE("qcom/a660_sqe.fw");
-
static const struct adreno_reglist a702_hwcg[] = {
{ REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222 },
{ REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220 },
@@ -1395,7 +1387,6 @@ static const struct adreno_info a7xx_gpus[] = {
.pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_cgc_mode = 0x00020000,
},
- .address_space_size = SZ_16G,
.preempt_record_size = 2860 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43050a01), /* "C510v2" */
@@ -1429,7 +1420,6 @@ static const struct adreno_info a7xx_gpus[] = {
{ /* sentinel */ },
},
},
- .address_space_size = SZ_16G,
.preempt_record_size = 4192 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43050c01), /* "C512v2" */
@@ -1451,7 +1441,6 @@ static const struct adreno_info a7xx_gpus[] = {
.gmu_chipid = 0x7050001,
.gmu_cgc_mode = 0x00020202,
},
- .address_space_size = SZ_256G,
.preempt_record_size = 4192 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43051401), /* "C520v2" */
@@ -1484,7 +1473,6 @@ static const struct adreno_info a7xx_gpus[] = {
{ /* sentinel */ },
},
},
- .address_space_size = SZ_16G,
.preempt_record_size = 3572 * SZ_1K,
}
};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index c8711938a5f4..38c0f8ef85c3 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1064,14 +1064,6 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
gmu->hung = false;
- /* Notify AOSS about the ACD state (unimplemented for now => disable it) */
- if (!IS_ERR(gmu->qmp)) {
- ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}",
- 0 /* Hardcode ACD to be disabled for now */);
- if (ret)
- dev_err(gmu->dev, "failed to send GPU ACD state\n");
- }
-
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
@@ -1671,6 +1663,75 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
return a6xx_gmu_rpmh_votes_init(gmu);
}
+static int a6xx_gmu_acd_probe(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct a6xx_hfi_acd_table *cmd = &gmu->acd_table;
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ int ret, i, cmd_idx = 0;
+ extern bool disable_acd;
+
+ /* Skip ACD probe if requested via module param */
+ if (disable_acd) {
+ DRM_DEV_ERROR(gmu->dev, "Skipping GPU ACD probe\n");
+ return 0;
+ }
+
+ cmd->version = 1;
+ cmd->stride = 1;
+ cmd->enable_by_level = 0;
+
+ /* Skip freq = 0 and parse acd-level for rest of the OPPs */
+ for (i = 1; i < gmu->nr_gpu_freqs; i++) {
+ struct dev_pm_opp *opp;
+ struct device_node *np;
+ unsigned long freq;
+ u32 val;
+
+ freq = gmu->gpu_freqs[i];
+ opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, freq, true);
+ np = dev_pm_opp_get_of_node(opp);
+
+ ret = of_property_read_u32(np, "qcom,opp-acd-level", &val);
+ of_node_put(np);
+ dev_pm_opp_put(opp);
+ if (ret == -EINVAL)
+ continue;
+ else if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to read acd level for freq %lu\n", freq);
+ return ret;
+ }
+
+ cmd->enable_by_level |= BIT(i);
+ cmd->data[cmd_idx++] = val;
+ }
+
+ cmd->num_levels = cmd_idx;
+
+ /* It is a problem if qmp node is unavailable when ACD is required */
+ if (cmd->enable_by_level && IS_ERR_OR_NULL(gmu->qmp)) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to send ACD state to AOSS\n");
+ return -EINVAL;
+ }
+
+ /* Otherwise, nothing to do if qmp is unavailable */
+ if (IS_ERR_OR_NULL(gmu->qmp))
+ return 0;
+
+ /*
+ * Notify AOSS about the ACD state. AOSS is supposed to assume that ACD is disabled on
+ * system reset. So it is harmless if we couldn't notify 'OFF' state
+ */
+ ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}", !!cmd->enable_by_level);
+ if (ret && cmd->enable_by_level) {
+ DRM_DEV_ERROR(gmu->dev, "Failed to send ACD state to AOSS\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
{
int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
@@ -1989,10 +2050,11 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto detach_cxpd;
}
+ /* Other errors are handled during GPU ACD probe */
gmu->qmp = qmp_get(gmu->dev);
- if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu)) {
- ret = PTR_ERR(gmu->qmp);
- goto remove_device_link;
+ if (PTR_ERR_OR_ZERO(gmu->qmp) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto detach_gxpd;
}
init_completion(&gmu->pd_gate);
@@ -2008,6 +2070,10 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Get the power levels for the GMU and GPU */
a6xx_gmu_pwrlevels_probe(gmu);
+ ret = a6xx_gmu_acd_probe(gmu);
+ if (ret)
+ goto detach_gxpd;
+
/* Set up the HFI queues */
a6xx_hfi_init(gmu);
@@ -2018,7 +2084,13 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
return 0;
-remove_device_link:
+detach_gxpd:
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ dev_pm_domain_detach(gmu->gxpd, false);
+
+ if (!IS_ERR_OR_NULL(gmu->qmp))
+ qmp_put(gmu->qmp);
+
device_link_del(link);
detach_cxpd:
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 0c888b326cfb..b2d4489b4024 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -93,6 +93,7 @@ struct a6xx_gmu {
int nr_gpu_freqs;
unsigned long gpu_freqs[GMU_MAX_GX_FREQS];
u32 gx_arc_votes[GMU_MAX_GX_FREQS];
+ struct a6xx_hfi_acd_table acd_table;
int nr_gpu_bws;
unsigned long gpu_bw_table[GMU_MAX_GX_FREQS];
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 242d02d48c0c..491fde0083a2 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -130,6 +130,20 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno - 1);
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_SET_THREAD_BOTH);
+
+ /* Reset state used to synchronize BR and BV */
+ OUT_PKT7(ring, CP_RESET_CONTEXT_STATE, 1);
+ OUT_RING(ring,
+ CP_RESET_CONTEXT_STATE_0_CLEAR_ON_CHIP_TS |
+ CP_RESET_CONTEXT_STATE_0_CLEAR_RESOURCE_TABLE |
+ CP_RESET_CONTEXT_STATE_0_CLEAR_BV_BR_COUNTER |
+ CP_RESET_CONTEXT_STATE_0_RESET_GLOBAL_LOCAL_TS);
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_SET_THREAD_BR);
}
if (!sysprof) {
@@ -212,6 +226,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
+ adreno_check_and_reenable_stall(adreno_gpu);
+
a6xx_set_pagetable(a6xx_gpu, ring, submit);
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
@@ -335,6 +351,8 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
+ adreno_check_and_reenable_stall(adreno_gpu);
+
/*
* Toggle concurrent binning for pagetable switch and set the thread to
* BR since only it can execute the pagetable switch packets.
@@ -655,7 +673,6 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
if (adreno_is_7c3(gpu)) {
gpu->ubwc_config.highest_bank_bit = 14;
gpu->ubwc_config.amsbc = 1;
- gpu->ubwc_config.rgb565_predicator = 1;
gpu->ubwc_config.uavflagprd_inv = 2;
gpu->ubwc_config.macrotile_mode = 1;
}
@@ -2268,7 +2285,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu)
return ERR_CAST(mmu);
return msm_gem_address_space_create(mmu,
- "gpu", 0x100000000ULL,
+ "gpu", ADRENO_VM_START,
adreno_private_address_space_size(gpu));
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 0989aee3dd2c..8e69b1e84657 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -100,16 +100,14 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
return 0;
}
-static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
- u32 *payload, u32 payload_size)
+static int a6xx_hfi_wait_for_msg_interrupt(struct a6xx_gmu *gmu, u32 id, u32 seqnum)
{
- struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
- u32 val;
int ret;
+ u32 val;
/* Wait for a response */
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
- val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
+ val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 1000000);
if (ret) {
DRM_DEV_ERROR(gmu->dev,
@@ -122,6 +120,19 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
+ return 0;
+}
+
+static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
+ u32 *payload, u32 payload_size)
+{
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
+ int ret;
+
+ ret = a6xx_hfi_wait_for_msg_interrupt(gmu, id, seqnum);
+ if (ret)
+ return ret;
+
for (;;) {
struct a6xx_hfi_msg_response resp;
@@ -129,12 +140,18 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
sizeof(resp) >> 2);
- /* If the queue is empty our response never made it */
+ /* If the queue is empty, there may have been previous missed
+ * responses that preceded the response to our packet. Wait
+ * further before we give up.
+ */
if (!ret) {
- DRM_DEV_ERROR(gmu->dev,
- "The HFI response queue is unexpectedly empty\n");
-
- return -ENOENT;
+ ret = a6xx_hfi_wait_for_msg_interrupt(gmu, id, seqnum);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev,
+ "The HFI response queue is unexpectedly empty\n");
+ return ret;
+ }
+ continue;
}
if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
@@ -748,6 +765,38 @@ send:
NULL, 0);
}
+#define HFI_FEATURE_ACD 12
+
+static int a6xx_hfi_enable_acd(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_acd_table *acd_table = &gmu->acd_table;
+ struct a6xx_hfi_msg_feature_ctrl msg = {
+ .feature = HFI_FEATURE_ACD,
+ .enable = 1,
+ .data = 0,
+ };
+ int ret;
+
+ if (!acd_table->enable_by_level)
+ return 0;
+
+ /* Enable ACD feature at GMU */
+ ret = a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to enable ACD (%d)\n", ret);
+ return ret;
+ }
+
+ /* Send ACD table to GMU */
+ ret = a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_ACD, acd_table, sizeof(*acd_table), NULL, 0);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to ACD table (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_msg_test msg = { 0 };
@@ -845,6 +894,10 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
if (ret)
return ret;
+ ret = a6xx_hfi_enable_acd(gmu);
+ if (ret)
+ return ret;
+
ret = a6xx_hfi_send_core_fw_start(gmu);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
index 52ba4a07d7b9..653ef720e2da 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -151,12 +151,33 @@ struct a6xx_hfi_msg_test {
u32 header;
};
+#define HFI_H2F_MSG_ACD 7
+#define MAX_ACD_STRIDE 2
+
+struct a6xx_hfi_acd_table {
+ u32 header;
+ u32 version;
+ u32 enable_by_level;
+ u32 stride;
+ u32 num_levels;
+ u32 data[16 * MAX_ACD_STRIDE];
+};
+
#define HFI_H2F_MSG_START 10
struct a6xx_hfi_msg_start {
u32 header;
};
+#define HFI_H2F_FEATURE_CTRL 11
+
+struct a6xx_hfi_msg_feature_ctrl {
+ u32 header;
+ u32 feature;
+ u32 enable;
+ u32 data;
+};
+
#define HFI_H2F_MSG_CORE_FW_START 14
struct a6xx_hfi_msg_core_fw_start {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
index 9b5e27d2373c..3b17fd2dba89 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
@@ -87,7 +87,8 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
static void a6xx_preempt_timer(struct timer_list *t)
{
- struct a6xx_gpu *a6xx_gpu = from_timer(a6xx_gpu, t, preempt_timer);
+ struct a6xx_gpu *a6xx_gpu = timer_container_of(a6xx_gpu, t,
+ preempt_timer);
struct msm_gpu *gpu = &a6xx_gpu->base.base;
struct drm_device *dev = gpu->dev;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 236b25c094cd..16e7ac444efd 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -24,6 +24,10 @@ int enable_preemption = -1;
MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))");
module_param(enable_preemption, int, 0600);
+bool disable_acd;
+MODULE_PARM_DESC(disable_acd, "Forcefully disable GPU ACD");
+module_param_unsafe(disable_acd, bool, 0400);
+
extern const struct adreno_gpulist a2xx_gpulist;
extern const struct adreno_gpulist a3xx_gpulist;
extern const struct adreno_gpulist a4xx_gpulist;
@@ -133,9 +137,8 @@ err_disable_rpm:
return NULL;
}
-static int find_chipid(struct device *dev, uint32_t *chipid)
+static int find_chipid(struct device_node *node, uint32_t *chipid)
{
- struct device_node *node = dev->of_node;
const char *compat;
int ret;
@@ -169,15 +172,36 @@ static int find_chipid(struct device *dev, uint32_t *chipid)
/* and if that fails, fall back to legacy "qcom,chipid" property: */
ret = of_property_read_u32(node, "qcom,chipid", chipid);
if (ret) {
- DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
+ DRM_ERROR("%pOF: could not parse qcom,chipid: %d\n",
+ node, ret);
return ret;
}
- dev_warn(dev, "Using legacy qcom,chipid binding!\n");
+ pr_warn("%pOF: Using legacy qcom,chipid binding!\n", node);
return 0;
}
+bool adreno_has_gpu(struct device_node *node)
+{
+ const struct adreno_info *info;
+ uint32_t chip_id;
+ int ret;
+
+ ret = find_chipid(node, &chip_id);
+ if (ret)
+ return false;
+
+ info = adreno_info(chip_id);
+ if (!info) {
+ pr_warn("%pOF: Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n",
+ node, ADRENO_CHIPID_ARGS(chip_id));
+ return false;
+ }
+
+ return true;
+}
+
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
@@ -187,19 +211,18 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
struct msm_gpu *gpu;
int ret;
- ret = find_chipid(dev, &config.chip_id);
- if (ret)
+ ret = find_chipid(dev->of_node, &config.chip_id);
+ /* We shouldn't have gotten this far if we can't parse the chip_id */
+ if (WARN_ON(ret))
return ret;
dev->platform_data = &config;
priv->gpu_pdev = to_platform_device(dev);
info = adreno_info(config.chip_id);
- if (!info) {
- dev_warn(drm->dev, "Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n",
- ADRENO_CHIPID_ARGS(config.chip_id));
+ /* We shouldn't have gotten this far if we don't recognize the GPU: */
+ if (WARN_ON(!info))
return -ENXIO;
- }
config.info = info;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 26db1f4b5fb9..86bff915c3e7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -236,34 +236,77 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
u64 adreno_private_address_space_size(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
+ const struct io_pgtable_cfg *ttbr1_cfg;
if (address_space_size)
return address_space_size;
- if (adreno_gpu->info->address_space_size)
- return adreno_gpu->info->address_space_size;
+ if (adreno_gpu->info->quirks & ADRENO_QUIRK_4GB_VA)
+ return SZ_4G;
- return SZ_4G;
+ if (!adreno_smmu || !adreno_smmu->get_ttbr1_cfg)
+ return SZ_4G;
+
+ ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
+
+ /*
+ * Userspace VM is actually using TTBR0, but both are the same size,
+ * with b48 (sign bit) selecting which TTBRn to use. So if IAS is
+ * 48, the total (kernel+user) address space size is effectively
+ * 49 bits. But what userspace is control of is the lower 48.
+ */
+ return BIT(ttbr1_cfg->ias) - ADRENO_VM_START;
+}
+
+void adreno_check_and_reenable_stall(struct adreno_gpu *adreno_gpu)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ unsigned long flags;
+
+ /*
+ * Wait until the cooldown period has passed and we would actually
+ * collect a crashdump to re-enable stall-on-fault.
+ */
+ spin_lock_irqsave(&priv->fault_stall_lock, flags);
+ if (!priv->stall_enabled &&
+ ktime_after(ktime_get(), priv->stall_reenable_time) &&
+ !READ_ONCE(gpu->crashstate)) {
+ priv->stall_enabled = true;
+
+ gpu->aspace->mmu->funcs->set_stall(gpu->aspace->mmu, true);
+ }
+ spin_unlock_irqrestore(&priv->fault_stall_lock, flags);
}
#define ARM_SMMU_FSR_TF BIT(1)
#define ARM_SMMU_FSR_PF BIT(3)
#define ARM_SMMU_FSR_EF BIT(4)
+#define ARM_SMMU_FSR_SS BIT(30)
int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
struct adreno_smmu_fault_info *info, const char *block,
u32 scratch[4])
{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
const char *type = "UNKNOWN";
- bool do_devcoredump = info && !READ_ONCE(gpu->crashstate);
+ bool do_devcoredump = info && (info->fsr & ARM_SMMU_FSR_SS) &&
+ !READ_ONCE(gpu->crashstate);
+ unsigned long irq_flags;
/*
- * If we aren't going to be resuming later from fault_worker, then do
- * it now.
+ * In case there is a subsequent storm of pagefaults, disable
+ * stall-on-fault for at least half a second.
*/
- if (!do_devcoredump) {
- gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
+ spin_lock_irqsave(&priv->fault_stall_lock, irq_flags);
+ if (priv->stall_enabled) {
+ priv->stall_enabled = false;
+
+ gpu->aspace->mmu->funcs->set_stall(gpu->aspace->mmu, false);
}
+ priv->stall_reenable_time = ktime_add_ms(ktime_get(), 500);
+ spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags);
/*
* Print a default message if we couldn't get the data from the
@@ -291,16 +334,18 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
scratch[0], scratch[1], scratch[2], scratch[3]);
if (do_devcoredump) {
+ struct msm_gpu_fault_info fault_info = {};
+
/* Turn off the hangcheck timer to keep it from bothering us */
timer_delete(&gpu->hangcheck_timer);
- gpu->fault_info.ttbr0 = info->ttbr0;
- gpu->fault_info.iova = iova;
- gpu->fault_info.flags = flags;
- gpu->fault_info.type = type;
- gpu->fault_info.block = block;
+ fault_info.ttbr0 = info->ttbr0;
+ fault_info.iova = iova;
+ fault_info.flags = flags;
+ fault_info.type = type;
+ fault_info.block = block;
- kthread_queue_work(gpu->worker, &gpu->fault_work);
+ msm_gpu_fault_crashstate_capture(gpu, &fault_info);
}
return 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 92caba3584da..bc063594a359 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -57,6 +57,7 @@ enum adreno_family {
#define ADRENO_QUIRK_HAS_HW_APRIV BIT(3)
#define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4)
#define ADRENO_QUIRK_PREEMPTION BIT(5)
+#define ADRENO_QUIRK_4GB_VA BIT(6)
/* Helper for formating the chip_id in the way that userspace tools like
* crashdec expect.
@@ -104,7 +105,6 @@ struct adreno_info {
union {
const struct a6xx_info *a6xx;
};
- u64 address_space_size;
/**
* @speedbins: Optional table of fuse to speedbin mappings
*
@@ -578,6 +578,8 @@ static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
adreno_is_a740_family(gpu);
}
+/* Put vm_start above 32b to catch issues with not setting xyz_BASE_HI */
+#define ADRENO_VM_START 0x100000000ULL
u64 adreno_private_address_space_size(struct msm_gpu *gpu);
int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t *value, uint32_t *len);
@@ -634,6 +636,8 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
struct adreno_smmu_fault_info *info, const char *block,
u32 scratch[4]);
+void adreno_check_and_reenable_stall(struct adreno_gpu *gpu);
+
int adreno_read_speedbin(struct device *dev, u32 *speedbin);
/*
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
index 6ac97c378056..ffc4d4257ae5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
@@ -27,17 +27,16 @@ static const struct dpu_mdp_cfg sm8650_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8650_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x1000,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x1000,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
index ad60089f18ea..39027a21c6fe 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
@@ -100,14 +100,12 @@ static const struct dpu_pingpong_cfg msm8937_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
index a1cf89a0a42d..8d1b43ea1663 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
@@ -93,7 +93,6 @@ static const struct dpu_pingpong_cfg msm8917_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
index eea9b80e2287..16c12499b24b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
@@ -100,14 +100,12 @@ static const struct dpu_pingpong_cfg msm8953_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_MSM8996_MASK,
.sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
index ae18a354e5d2..91f514d28ac6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
@@ -181,15 +181,15 @@ static const struct dpu_pingpong_cfg msm8996_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_MSM8996_TE2_MASK,
- .sblk = &msm8996_pp_sblk_te,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_MSM8996_TE2_MASK,
- .sblk = &msm8996_pp_sblk_te,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
index 746474679ef5..413cd59dc0c4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -170,15 +170,15 @@ static const struct dpu_pingpong_cfg msm8998_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
index bb89da0a481d..b2eb7ca699e3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
@@ -141,15 +141,15 @@ static const struct dpu_pingpong_cfg sdm660_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
index 7caf876ca3e3..85e121ad84a0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
@@ -115,14 +115,14 @@ static const struct dpu_pingpong_cfg sdm630_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
- .features = PINGPONG_SDM845_MASK,
+ .features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
index ab7b4822ca63..49363d7d5b93 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
@@ -194,15 +194,15 @@ static const struct dpu_pingpong_cfg sdm845_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
- .features = PINGPONG_SDM845_TE2_MASK,
- .sblk = &sdm845_pp_sblk_te,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
.intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
}, {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 979527d98fbc..08d38e1d420c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -37,17 +37,16 @@ static const struct dpu_mdp_cfg sm8150_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
@@ -76,7 +75,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -84,7 +83,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
@@ -92,7 +91,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
@@ -100,7 +99,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
@@ -108,7 +107,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -116,7 +115,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -124,7 +123,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
@@ -132,7 +131,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index d76b8992a6c1..d6f8b1030c68 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -41,12 +41,12 @@ static const struct dpu_ctl_cfg sc8180x_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
@@ -75,7 +75,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -83,7 +83,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
@@ -91,7 +91,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
@@ -99,7 +99,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
@@ -107,7 +107,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -115,7 +115,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -123,7 +123,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
@@ -131,7 +131,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
index 83db11339b29..71ba48b05656 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h
@@ -38,12 +38,12 @@ static const struct dpu_ctl_cfg sm7150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
@@ -72,7 +72,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -80,7 +80,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
@@ -88,7 +88,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
}, {
.name = "sspp_2", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -96,7 +96,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -104,7 +104,7 @@ static const struct dpu_sspp_cfg sm7150_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index d3d3a34d0b45..fcfb3774f7a1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -69,7 +69,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -77,7 +77,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -85,7 +85,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index 47e01c3c242f..a86fdb33ebdd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -35,17 +35,16 @@ static const struct dpu_mdp_cfg sm8250_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8250_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
- .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
index 040c94c0bb66..842fcc5887fe 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
@@ -51,7 +51,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -59,7 +59,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -67,7 +67,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -75,7 +75,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
index 43f64a005f5a..c5fd89dd7c89 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
@@ -38,7 +38,7 @@ static const struct dpu_sspp_cfg sm6115_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -46,7 +46,7 @@ static const struct dpu_sspp_cfg sm6115_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
index 397278ba999b..a234bb289d24 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -59,7 +59,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -67,7 +67,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -75,7 +75,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -83,7 +83,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
index 3cbb2fe8aba2..53f3be28f6f6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
@@ -46,7 +46,7 @@ static const struct dpu_sspp_cfg qcm2290_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
index a06c8634d2d7..3a3bc8e429be 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
@@ -39,7 +39,7 @@ static const struct dpu_sspp_cfg sm6375_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_NO_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -47,7 +47,7 @@ static const struct dpu_sspp_cfg sm6375_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_NO_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index 0c860e804cab..90e86063a372 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -35,17 +35,16 @@ static const struct dpu_mdp_cfg sm8350_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8350_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x1e8,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x1e8,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
index fcee1c3665f8..139f11321fea 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -35,17 +35,16 @@ static const struct dpu_mdp_cfg sc8280xp_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sc8280xp_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
@@ -74,7 +73,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x2ac,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
@@ -82,7 +81,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x2ac,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
@@ -90,7 +89,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x2ac,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
@@ -98,7 +97,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x2ac,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
@@ -106,7 +105,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x2ac,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
@@ -114,7 +113,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x2ac,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
@@ -122,7 +121,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x2ac,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
@@ -130,7 +129,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x2ac,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index 19b2ee8bbd5f..461294143a90 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -36,17 +36,16 @@ static const struct dpu_mdp_cfg sm8450_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8450_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
index 4d96ce71746f..c248b3b55c41 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
@@ -35,17 +35,16 @@ static const struct dpu_mdp_cfg sa8775p_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sa8775p_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x204,
- .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .features = CTL_SC7280_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index 24f988465bf6..59c7fdf28e89 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -27,17 +27,16 @@ static const struct dpu_mdp_cfg sm8550_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg sm8550_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
@@ -66,70 +65,70 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x344,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x344,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x344,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x344,
- .features = VIG_SDM845_MASK,
+ .features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x344,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x344,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x344,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x344,
- .features = DMA_SDM845_MASK,
+ .features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_12", .id = SSPP_DMA4,
.base = 0x2c000, .len = 0x344,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 14,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_13", .id = SSPP_DMA5,
.base = 0x2e000, .len = 0x344,
- .features = DMA_CURSOR_SDM845_MASK,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 15,
.type = SSPP_TYPE_DMA,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
new file mode 100644
index 000000000000..5667d055fbd1
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
@@ -0,0 +1,433 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_9_1_SAR2130P_H
+#define _DPU_9_1_SAR2130P_H
+
+static const struct dpu_caps sar2130p_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 5120,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sar2130p_mdp = {
+ .name = "top_0",
+ .base = 0, .len = 0x494,
+ .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+static const struct dpu_ctl_cfg sar2130p_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sar2130p_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_12", .id = SSPP_DMA4,
+ .base = 0x2c000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 14,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_13", .id = SSPP_DMA5,
+ .base = 0x2e000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 15,
+ .type = SSPP_TYPE_DMA,
+ },
+};
+
+static const struct dpu_lm_cfg sar2130p_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg sar2130p_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+static const struct dpu_pingpong_cfg sar2130p_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
+ .base = 0x66000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ }, {
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
+ .base = 0x66400, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sar2130p_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x66700, .len = 0x8,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg sar2130p_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ },
+};
+
+static const struct dpu_wb_cfg sar2130p_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+static const struct dpu_intf_cfg sar2130p_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
+};
+
+static const struct dpu_perf_cfg sar2130p_perf_data = {
+ .max_bw_low = 13600000,
+ .max_bw_high = 18200000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ /* FIXME: lut tables */
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 0, .wr_enable = 0},
+ {.rd_enable = 0, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sar2130p_mdss_ver = {
+ .core_major_ver = 9,
+ .core_minor_ver = 1,
+};
+
+const struct dpu_mdss_cfg dpu_sar2130p_cfg = {
+ .mdss_ver = &sar2130p_mdss_ver,
+ .caps = &sar2130p_dpu_caps,
+ .mdp = &sar2130p_mdp,
+ .cdm = &dpu_cdm_5_x,
+ .ctl_count = ARRAY_SIZE(sar2130p_ctl),
+ .ctl = sar2130p_ctl,
+ .sspp_count = ARRAY_SIZE(sar2130p_sspp),
+ .sspp = sar2130p_sspp,
+ .mixer_count = ARRAY_SIZE(sar2130p_lm),
+ .mixer = sar2130p_lm,
+ .dspp_count = ARRAY_SIZE(sar2130p_dspp),
+ .dspp = sar2130p_dspp,
+ .pingpong_count = ARRAY_SIZE(sar2130p_pp),
+ .pingpong = sar2130p_pp,
+ .dsc_count = ARRAY_SIZE(sar2130p_dsc),
+ .dsc = sar2130p_dsc,
+ .merge_3d_count = ARRAY_SIZE(sar2130p_merge_3d),
+ .merge_3d = sar2130p_merge_3d,
+ .wb_count = ARRAY_SIZE(sar2130p_wb),
+ .wb = sar2130p_wb,
+ .intf_count = ARRAY_SIZE(sar2130p_intf),
+ .intf = sar2130p_intf,
+ .vbif_count = ARRAY_SIZE(sm8550_vbif),
+ .vbif = sm8550_vbif,
+ .perf = &sar2130p_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index 6417baa84f82..52cc10aec1f9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -26,17 +26,16 @@ static const struct dpu_mdp_cfg x1e80100_mdp = {
},
};
-/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
static const struct dpu_ctl_cfg x1e80100_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x15000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x16000, .len = 0x290,
- .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .features = CTL_SM8550_MASK,
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 0714936d8835..a4b0fe0d9899 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -445,9 +445,9 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
uint32_t lm_idx;
bool bg_alpha_enable = false;
- DECLARE_BITMAP(fetch_active, SSPP_MAX);
+ DECLARE_BITMAP(active_fetch, SSPP_MAX);
- memset(fetch_active, 0, sizeof(fetch_active));
+ memset(active_fetch, 0, sizeof(active_fetch));
drm_atomic_crtc_for_each_plane(plane, crtc) {
state = plane->state;
if (!state)
@@ -464,7 +464,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
bg_alpha_enable = true;
- set_bit(pstate->pipe.sspp->idx, fetch_active);
+ set_bit(pstate->pipe.sspp->idx, active_fetch);
_dpu_crtc_blend_setup_pipe(crtc, plane,
mixer, cstate->num_mixers,
pstate->stage,
@@ -472,7 +472,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
&pstate->pipe, 0, stage_cfg);
if (pstate->r_pipe.sspp) {
- set_bit(pstate->r_pipe.sspp->idx, fetch_active);
+ set_bit(pstate->r_pipe.sspp->idx, active_fetch);
_dpu_crtc_blend_setup_pipe(crtc, plane,
mixer, cstate->num_mixers,
pstate->stage,
@@ -492,8 +492,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
}
}
- if (ctl->ops.set_active_pipes)
- ctl->ops.set_active_pipes(ctl, fetch_active);
+ if (ctl->ops.set_active_fetch_pipes)
+ ctl->ops.set_active_fetch_pipes(ctl, active_fetch);
_dpu_crtc_program_lm_output_roi(crtc);
}
@@ -519,6 +519,8 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
if (mixer[i].lm_ctl->ops.clear_all_blendstages)
mixer[i].lm_ctl->ops.clear_all_blendstages(
mixer[i].lm_ctl);
+ if (mixer[i].lm_ctl->ops.set_active_fetch_pipes)
+ mixer[i].lm_ctl->ops.set_active_fetch_pipes(mixer[i].lm_ctl, NULL);
}
/* initialize stage cfg */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 862e9e6bf0a5..c0ed110a7d30 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1246,7 +1246,11 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
return;
}
- phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL;
+ /* Use first (and only) CTL if active CTLs are supported */
+ if (num_ctl == 1)
+ phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[0]);
+ else
+ phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL;
if (!phys->hw_ctl) {
DPU_ERROR_ENC(dpu_enc,
"no ctl block assigned at idx: %d\n", i);
@@ -2190,6 +2194,9 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
/* clear all blendstages */
if (ctl->ops.setup_blendstage)
ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
+
+ if (ctl->ops.set_active_fetch_pipes)
+ ctl->ops.set_active_fetch_pipes(ctl, NULL);
}
}
@@ -2686,8 +2693,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
static void dpu_encoder_frame_done_timeout(struct timer_list *t)
{
- struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
- frame_done_timer);
+ struct dpu_encoder_virt *dpu_enc = timer_container_of(dpu_enc, t,
+ frame_done_timer);
struct drm_encoder *drm_enc = &dpu_enc->base;
u32 event;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index da9994a79ca2..a0ba55ab3c89 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -60,6 +60,8 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
return;
intf_cfg.intf = phys_enc->hw_intf->idx;
+ if (phys_enc->split_role == ENC_ROLE_MASTER)
+ intf_cfg.intf_master = phys_enc->hw_intf->idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
intf_cfg.stream_sel = cmd_enc->stream_sel;
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index abd6600046cb..1c468ca5d692 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -94,17 +94,21 @@ static void drm_mode_to_intf_timing_params(
timing->vsync_polarity = 0;
}
- /* for DP/EDP, Shift timings to align it to bottom right */
- if (phys_enc->hw_intf->cap->type == INTF_DP) {
+ timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
+ timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
+
+ /*
+ * For DP/EDP, Shift timings to align it to bottom right.
+ * wide_bus_en is set for everything excluding SDM845 &
+ * porch changes cause DisplayPort failure and HDMI tearing.
+ */
+ if (phys_enc->hw_intf->cap->type == INTF_DP && timing->wide_bus_en) {
timing->h_back_porch += timing->h_front_porch;
timing->h_front_porch = 0;
timing->v_back_porch += timing->v_front_porch;
timing->v_front_porch = 0;
}
- timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
- timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
-
/*
* for DP, divide the horizonal parameters by 2 when
* widebus is enabled
@@ -298,6 +302,8 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
if (phys_enc->hw_cdm)
intf_cfg.cdm = phys_enc->hw_cdm->idx;
intf_cfg.intf = phys_enc->hw_intf->idx;
+ if (phys_enc->split_role == ENC_ROLE_MASTER)
+ intf_cfg.intf_master = phys_enc->hw_intf->idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
@@ -372,7 +378,8 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg)
static bool dpu_encoder_phys_vid_needs_single_flush(
struct dpu_encoder_phys *phys_enc)
{
- return phys_enc->split_role != ENC_ROLE_SOLO;
+ return !(phys_enc->hw_ctl->caps->features & BIT(DPU_CTL_ACTIVE_CFG)) &&
+ phys_enc->split_role != ENC_ROLE_SOLO;
}
static void dpu_encoder_phys_vid_atomic_mode_set(
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 64265ca4656a..c878fe196aeb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -34,11 +34,11 @@
#define VIG_MSM8998_MASK \
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
-#define VIG_SDM845_MASK \
+#define VIG_SDM845_MASK_NO_SDMA \
(VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
#define VIG_SDM845_MASK_SDMA \
- (VIG_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+ (VIG_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2))
#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
@@ -54,24 +54,24 @@
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
#define VIG_SC7280_MASK \
- (VIG_SDM845_MASK | BIT(DPU_SSPP_INLINE_ROTATION))
+ (VIG_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_INLINE_ROTATION))
#define VIG_SC7280_MASK_SDMA \
(VIG_SC7280_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
-#define DMA_SDM845_MASK \
+#define DMA_SDM845_MASK_NO_SDMA \
(BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
-#define DMA_CURSOR_SDM845_MASK \
- (DMA_SDM845_MASK | BIT(DPU_SSPP_CURSOR))
+#define DMA_CURSOR_SDM845_MASK_NO_SDMA \
+ (DMA_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_CURSOR))
#define DMA_SDM845_MASK_SDMA \
- (DMA_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+ (DMA_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2))
#define DMA_CURSOR_SDM845_MASK_SDMA \
- (DMA_CURSOR_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+ (DMA_CURSOR_SDM845_MASK_NO_SDMA | BIT(DPU_SSPP_SMART_DMA_V2))
#define DMA_CURSOR_MSM8996_MASK \
(DMA_MSM8996_MASK | BIT(DPU_SSPP_CURSOR))
@@ -98,15 +98,9 @@
#define PINGPONG_MSM8996_MASK \
(BIT(DPU_PINGPONG_DSC))
-#define PINGPONG_MSM8996_TE2_MASK \
- (PINGPONG_MSM8996_MASK | BIT(DPU_PINGPONG_TE2))
-
#define PINGPONG_SDM845_MASK \
(BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
-#define PINGPONG_SDM845_TE2_MASK \
- (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
-
#define PINGPONG_SM8150_MASK \
(BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
@@ -376,8 +370,6 @@ static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK();
* MIXER sub blocks config
*************************************************************/
-/* MSM8998 */
-
static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 7, /* excluding base layer */
@@ -387,8 +379,6 @@ static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
},
};
-/* SDM845 */
-
static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 11, /* excluding base layer */
@@ -398,8 +388,6 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
},
};
-/* SC7180 */
-
static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 7, /* excluding base layer */
@@ -408,8 +396,6 @@ static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
},
};
-/* QCM2290 */
-
static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
.maxwidth = DEFAULT_DPU_LINE_WIDTH,
.maxblendstages = 4, /* excluding base layer */
@@ -434,22 +420,11 @@ static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
/*************************************************************
* PINGPONG sub blocks config
*************************************************************/
-static const struct dpu_pingpong_sub_blks msm8996_pp_sblk_te = {
- .te2 = {.name = "te2", .base = 0x2000, .len = 0x0,
- .version = 0x1},
-};
static const struct dpu_pingpong_sub_blks msm8996_pp_sblk = {
/* No dither block */
};
-static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
- .te2 = {.name = "te2", .base = 0x2000, .len = 0x0,
- .version = 0x1},
- .dither = {.name = "dither", .base = 0x30e0,
- .len = 0x20, .version = 0x10000},
-};
-
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
.dither = {.name = "dither", .base = 0x30e0,
.len = 0x20, .version = 0x10000},
@@ -759,7 +734,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_8_4_sa8775p.h"
#include "catalog/dpu_9_0_sm8550.h"
-
+#include "catalog/dpu_9_1_sar2130p.h"
#include "catalog/dpu_9_2_x1e80100.h"
#include "catalog/dpu_10_0_sm8650.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 4cea19e1a203..01dd6e65f777 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -115,7 +115,6 @@ enum {
/**
* PINGPONG sub-blocks
- * @DPU_PINGPONG_TE2 Additional tear check block for split pipes
* @DPU_PINGPONG_SPLIT PP block supports split fifo
* @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
* @DPU_PINGPONG_DITHER Dither blocks
@@ -123,8 +122,7 @@ enum {
* @DPU_PINGPONG_MAX
*/
enum {
- DPU_PINGPONG_TE2 = 0x1,
- DPU_PINGPONG_SPLIT,
+ DPU_PINGPONG_SPLIT = 0x1,
DPU_PINGPONG_SLAVE,
DPU_PINGPONG_DITHER,
DPU_PINGPONG_DSC,
@@ -404,8 +402,6 @@ struct dpu_dspp_sub_blks {
};
struct dpu_pingpong_sub_blks {
- struct dpu_pp_blk te;
- struct dpu_pp_blk te2;
struct dpu_pp_blk dither;
};
@@ -841,6 +837,7 @@ extern const struct dpu_mdss_cfg dpu_msm8937_cfg;
extern const struct dpu_mdss_cfg dpu_msm8953_cfg;
extern const struct dpu_mdss_cfg dpu_msm8996_cfg;
extern const struct dpu_mdss_cfg dpu_msm8998_cfg;
+extern const struct dpu_mdss_cfg dpu_sar2130p_cfg;
extern const struct dpu_mdss_cfg dpu_sdm630_cfg;
extern const struct dpu_mdss_cfg dpu_sdm660_cfg;
extern const struct dpu_mdss_cfg dpu_sdm845_cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 411a7cf088eb..573e42b06ad0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -261,6 +261,12 @@ static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
case LM_5:
ctx->pending_flush_mask |= BIT(20);
break;
+ case LM_6:
+ ctx->pending_flush_mask |= BIT(21);
+ break;
+ case LM_7:
+ ctx->pending_flush_mask |= BIT(27);
+ break;
default:
break;
}
@@ -563,6 +569,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
u32 wb_active = 0;
u32 cwb_active = 0;
u32 mode_sel = 0;
+ u32 merge_3d_active = 0;
/* CTL_TOP[31:28] carries group_id to collate CTL paths
* per VM. Explicitly disable it until VM support is
@@ -578,6 +585,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
+ merge_3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
if (cfg->intf)
intf_active |= BIT(cfg->intf - INTF_0);
@@ -591,15 +599,18 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
if (cfg->dsc)
dsc_active |= cfg->dsc;
+ if (cfg->merge_3d)
+ merge_3d_active |= BIT(cfg->merge_3d - MERGE_3D_0);
+
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
+ DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, merge_3d_active);
- if (cfg->merge_3d)
- DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
- BIT(cfg->merge_3d - MERGE_3D_0));
+ if (cfg->intf_master)
+ DPU_REG_WRITE(c, CTL_INTF_MASTER, BIT(cfg->intf_master - INTF_0));
if (cfg->cdm)
DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
@@ -643,6 +654,7 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
+ u32 intf_master = 0;
u32 wb_active = 0;
u32 cwb_active = 0;
u32 merge3d_active = 0;
@@ -666,10 +678,21 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
dpu_hw_ctl_clear_all_blendstages(ctx);
+ if (ctx->ops.set_active_fetch_pipes)
+ ctx->ops.set_active_fetch_pipes(ctx, NULL);
+
if (cfg->intf) {
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
intf_active &= ~BIT(cfg->intf - INTF_0);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+
+ intf_master = DPU_REG_READ(c, CTL_INTF_MASTER);
+
+ /* Unset this intf as master, if it is the current master */
+ if (intf_master == BIT(cfg->intf - INTF_0)) {
+ DPU_DEBUG_DRIVER("Unsetting INTF_%d master\n", cfg->intf - INTF_0);
+ DPU_REG_WRITE(c, CTL_INTF_MASTER, 0);
+ }
}
if (cfg->cwb) {
@@ -697,8 +720,8 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
}
}
-static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
- unsigned long *fetch_active)
+static void dpu_hw_ctl_set_active_fetch_pipes(struct dpu_hw_ctl *ctx,
+ unsigned long *fetch_active)
{
int i;
u32 val = 0;
@@ -761,7 +784,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
- ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
+ ops->set_active_fetch_pipes = dpu_hw_ctl_set_active_fetch_pipes;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 080a9550a0cc..feb09590bc8f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -36,6 +36,7 @@ struct dpu_hw_stage_cfg {
/**
* struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
* @intf : Interface id
+ * @intf_master: Master interface id in the dual pipe topology
* @mode_3d: 3d mux configuration
* @merge_3d: 3d merge block used
* @intf_mode_sel: Interface mode, cmd / vid
@@ -46,6 +47,7 @@ struct dpu_hw_stage_cfg {
*/
struct dpu_hw_intf_cfg {
enum dpu_intf intf;
+ enum dpu_intf intf_master;
enum dpu_wb wb;
enum dpu_3d_blend_mode mode_3d;
enum dpu_merge_3d merge_3d;
@@ -254,7 +256,7 @@ struct dpu_hw_ctl_ops {
void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
- void (*set_active_pipes)(struct dpu_hw_ctl *ctx,
+ void (*set_active_fetch_pipes)(struct dpu_hw_ctl *ctx,
unsigned long *fetch_active);
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 8d820cd1b554..175639c8bfbb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -125,6 +125,7 @@ enum dpu_lm {
LM_4,
LM_5,
LM_6,
+ LM_7,
LM_MAX
};
@@ -169,6 +170,8 @@ enum dpu_dsc {
DSC_3,
DSC_4,
DSC_5,
+ DSC_6,
+ DSC_7,
DSC_MAX
};
@@ -185,6 +188,8 @@ enum dpu_pingpong {
PINGPONG_3,
PINGPONG_4,
PINGPONG_5,
+ PINGPONG_6,
+ PINGPONG_7,
PINGPONG_CWB_0,
PINGPONG_CWB_1,
PINGPONG_CWB_2,
@@ -199,6 +204,7 @@ enum dpu_merge_3d {
MERGE_3D_2,
MERGE_3D_3,
MERGE_3D_4,
+ MERGE_3D_5,
MERGE_3D_MAX
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 3305ad0623ca..1fd82b6747e9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1512,6 +1512,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, },
{ .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, },
{ .compatible = "qcom,sa8775p-dpu", .data = &dpu_sa8775p_cfg, },
+ { .compatible = "qcom,sar2130p-dpu", .data = &dpu_sar2130p_cfg, },
{ .compatible = "qcom,sdm630-mdp5", .data = &dpu_sdm630_cfg, },
{ .compatible = "qcom,sdm660-mdp5", .data = &dpu_sdm660_cfg, },
{ .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, },
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index e03d6091f736..421138bc3cb7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -915,10 +915,9 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
return 0;
}
-static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
- struct dpu_sw_pipe_cfg *pipe_cfg,
- const struct msm_format *fmt,
- uint32_t max_linewidth)
+static int dpu_plane_is_multirect_capable(struct dpu_hw_sspp *sspp,
+ struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct msm_format *fmt)
{
if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) ||
drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect))
@@ -930,10 +929,6 @@ static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
if (MSM_FORMAT_IS_YUV(fmt))
return false;
- if (MSM_FORMAT_IS_UBWC(fmt) &&
- drm_rect_width(&pipe_cfg->src_rect) > max_linewidth / 2)
- return false;
-
if (!test_bit(DPU_SSPP_SMART_DMA_V1, &sspp->cap->features) &&
!test_bit(DPU_SSPP_SMART_DMA_V2, &sspp->cap->features))
return false;
@@ -941,6 +936,27 @@ static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
return true;
}
+static int dpu_plane_is_parallel_capable(struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ if (MSM_FORMAT_IS_UBWC(fmt) &&
+ drm_rect_width(&pipe_cfg->src_rect) > max_linewidth / 2)
+ return false;
+
+ return true;
+}
+
+static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
+ struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ return dpu_plane_is_multirect_capable(sspp, pipe_cfg, fmt) &&
+ dpu_plane_is_parallel_capable(pipe_cfg, fmt, max_linewidth);
+}
+
+
static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
struct drm_atomic_state *state,
const struct drm_crtc_state *crtc_state)
@@ -1002,6 +1018,69 @@ static bool dpu_plane_try_multirect_parallel(struct dpu_sw_pipe *pipe, struct dp
return true;
}
+static int dpu_plane_try_multirect_shared(struct dpu_plane_state *pstate,
+ struct dpu_plane_state *prev_adjacent_pstate,
+ const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ struct dpu_sw_pipe *pipe = &pstate->pipe;
+ struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
+ struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
+ struct dpu_sw_pipe *prev_pipe = &prev_adjacent_pstate->pipe;
+ struct dpu_sw_pipe_cfg *prev_pipe_cfg = &prev_adjacent_pstate->pipe_cfg;
+ const struct msm_format *prev_fmt = msm_framebuffer_format(prev_adjacent_pstate->base.fb);
+ u16 max_tile_height = 1;
+
+ if (prev_adjacent_pstate->r_pipe.sspp != NULL ||
+ prev_pipe->multirect_mode != DPU_SSPP_MULTIRECT_NONE)
+ return false;
+
+ if (!dpu_plane_is_multirect_capable(pipe->sspp, pipe_cfg, fmt) ||
+ !dpu_plane_is_multirect_capable(prev_pipe->sspp, prev_pipe_cfg, prev_fmt))
+ return false;
+
+ if (MSM_FORMAT_IS_UBWC(fmt))
+ max_tile_height = max(max_tile_height, fmt->tile_height);
+
+ if (MSM_FORMAT_IS_UBWC(prev_fmt))
+ max_tile_height = max(max_tile_height, prev_fmt->tile_height);
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->sspp = NULL;
+
+ if (dpu_plane_is_parallel_capable(pipe_cfg, fmt, max_linewidth) &&
+ dpu_plane_is_parallel_capable(prev_pipe_cfg, prev_fmt, max_linewidth) &&
+ (pipe_cfg->dst_rect.x1 >= prev_pipe_cfg->dst_rect.x2 ||
+ prev_pipe_cfg->dst_rect.x1 >= pipe_cfg->dst_rect.x2)) {
+ pipe->sspp = prev_pipe->sspp;
+
+ pipe->multirect_index = DPU_SSPP_RECT_1;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ prev_pipe->multirect_index = DPU_SSPP_RECT_0;
+ prev_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ return true;
+ }
+
+ if (pipe_cfg->dst_rect.y1 >= prev_pipe_cfg->dst_rect.y2 + 2 * max_tile_height ||
+ prev_pipe_cfg->dst_rect.y1 >= pipe_cfg->dst_rect.y2 + 2 * max_tile_height) {
+ pipe->sspp = prev_pipe->sspp;
+
+ pipe->multirect_index = DPU_SSPP_RECT_1;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+
+ prev_pipe->multirect_index = DPU_SSPP_RECT_0;
+ prev_pipe->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+
+ return true;
+ }
+
+ return false;
+}
+
static int dpu_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -1102,13 +1181,14 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
struct dpu_global_state *global_state,
struct drm_atomic_state *state,
- struct drm_plane_state *plane_state)
+ struct drm_plane_state *plane_state,
+ struct drm_plane_state *prev_adjacent_plane_state)
{
const struct drm_crtc_state *crtc_state = NULL;
struct drm_plane *plane = plane_state->plane;
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
struct dpu_rm_sspp_requirements reqs;
- struct dpu_plane_state *pstate;
+ struct dpu_plane_state *pstate, *prev_adjacent_pstate;
struct dpu_sw_pipe *pipe;
struct dpu_sw_pipe *r_pipe;
struct dpu_sw_pipe_cfg *pipe_cfg;
@@ -1120,6 +1200,8 @@ static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
plane_state->crtc);
pstate = to_dpu_plane_state(plane_state);
+ prev_adjacent_pstate = prev_adjacent_plane_state ?
+ to_dpu_plane_state(prev_adjacent_plane_state) : NULL;
pipe = &pstate->pipe;
r_pipe = &pstate->r_pipe;
pipe_cfg = &pstate->pipe_cfg;
@@ -1138,24 +1220,42 @@ static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
reqs.rot90 = drm_rotation_90_or_270(plane_state->rotation);
- pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
- if (!pipe->sspp)
- return -ENODEV;
+ if (drm_rect_width(&r_pipe_cfg->src_rect) == 0) {
+ if (!prev_adjacent_pstate ||
+ !dpu_plane_try_multirect_shared(pstate, prev_adjacent_pstate, fmt,
+ dpu_kms->catalog->caps->max_linewidth)) {
+ pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!pipe->sspp)
+ return -ENODEV;
- if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
- pipe->sspp,
- msm_framebuffer_format(plane_state->fb),
- dpu_kms->catalog->caps->max_linewidth)) {
- /* multirect is not possible, use two SSPP blocks */
- r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
- if (!r_pipe->sspp)
+ r_pipe->sspp = NULL;
+
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ }
+ } else {
+ pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!pipe->sspp)
return -ENODEV;
- pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
+ pipe->sspp,
+ msm_framebuffer_format(plane_state->fb),
+ dpu_kms->catalog->caps->max_linewidth)) {
+ /* multirect is not possible, use two SSPP blocks */
+ r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!r_pipe->sspp)
+ return -ENODEV;
- r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ }
}
return dpu_plane_atomic_check_sspp(plane, state, crtc_state);
@@ -1168,6 +1268,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
unsigned int num_planes)
{
unsigned int i;
+ struct drm_plane_state *prev_adjacent_plane_state = NULL;
for (i = 0; i < num_planes; i++) {
struct drm_plane_state *plane_state = states[i];
@@ -1177,9 +1278,12 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
continue;
int ret = dpu_plane_virtual_assign_resources(crtc, global_state,
- state, plane_state);
+ state, plane_state,
+ prev_adjacent_plane_state);
if (ret)
- return ret;
+ break;
+
+ prev_adjacent_plane_state = plane_state;
}
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 3efbba425ca6..2e296f79cba1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -53,6 +53,8 @@ int dpu_rm_init(struct drm_device *dev,
/* Clear, setup lists */
memset(rm, 0, sizeof(*rm));
+ rm->has_legacy_ctls = (cat->mdss_ver->core_major_ver < 5);
+
/* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) {
struct dpu_hw_mixer *hw;
@@ -434,20 +436,19 @@ static int _dpu_rm_reserve_ctls(
int i = 0, j, num_ctls;
bool needs_split_display;
- /*
- * For non-CWB mode, each hw_intf needs its own hw_ctl to program its
- * control path.
- *
- * Hardcode num_ctls to 1 if CWB is enabled because in CWB, both the
- * writeback and real-time encoders must be driven by the same control
- * path
- */
- if (top->cwb_enabled)
- num_ctls = 1;
- else
+ if (rm->has_legacy_ctls) {
+ /*
+ * TODO: check if there is a need for special handling if
+ * DPU < 5.0 get CWB support.
+ */
num_ctls = top->num_intf;
- needs_split_display = _dpu_rm_needs_split_display(top);
+ needs_split_display = _dpu_rm_needs_split_display(top);
+ } else {
+ /* use single CTL */
+ num_ctls = 1;
+ needs_split_display = false;
+ }
for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
const struct dpu_hw_ctl *ctl;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index a19dbdb1b6f4..aa62966056d4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -24,6 +24,7 @@ struct dpu_global_state;
* @dspp_blks: array of dspp hardware resources
* @hw_sspp: array of sspp hardware resources
* @cdm_blk: cdm hardware resource
+ * @has_legacy_ctls: DPU uses pre-ACTIVE CTL blocks.
*/
struct dpu_rm {
struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0];
@@ -37,6 +38,7 @@ struct dpu_rm {
struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0];
struct dpu_hw_sspp *hw_sspp[SSPP_MAX - SSPP_NONE];
struct dpu_hw_blk *cdm_blk;
+ bool has_legacy_ctls;
};
struct dpu_rm_sspp_requirements {
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index c469e66cfc11..7e942c1337b3 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -6,6 +6,8 @@
#include <linux/delay.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_vblank.h>
#include "msm_drv.h"
@@ -189,7 +191,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
struct msm_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
struct drm_connector *connector;
- struct device_node *panel_node;
+ struct drm_bridge *next_bridge;
int dsi_id;
int ret;
@@ -199,27 +201,43 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
* bail out early if there is no panel node (no need to
* initialize LCDC encoder and LVDS connector)
*/
- panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0);
- if (!panel_node)
- return 0;
+ next_bridge = devm_drm_of_get_bridge(dev->dev, dev->dev->of_node, 0, 0);
+ if (IS_ERR(next_bridge)) {
+ ret = PTR_ERR(next_bridge);
+ if (ret == -ENODEV)
+ return 0;
+ return ret;
+ }
- encoder = mdp4_lcdc_encoder_init(dev, panel_node);
+ encoder = mdp4_lcdc_encoder_init(dev);
if (IS_ERR(encoder)) {
DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
- of_node_put(panel_node);
return PTR_ERR(encoder);
}
/* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
encoder->possible_crtcs = 1 << DMA_P;
- connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
+ ret = drm_bridge_attach(encoder, next_bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to attach LVDS panel/bridge: %d\n", ret);
+
+ return ret;
+ }
+
+ connector = drm_bridge_connector_init(dev, encoder);
if (IS_ERR(connector)) {
DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
- of_node_put(panel_node);
return PTR_ERR(connector);
}
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to attach LVDS connector: %d\n", ret);
+
+ return ret;
+ }
+
break;
case DRM_MODE_ENCODER_TMDS:
encoder = mdp4_dtv_encoder_init(dev);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
index 94b1ba92785f..f9d988076337 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -191,12 +191,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
-long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
-struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
- struct device_node *panel_node);
-
-struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
- struct device_node *panel_node, struct drm_encoder *encoder);
+struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev);
#ifdef CONFIG_DRM_MSM_DSI
struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev);
@@ -207,13 +202,6 @@ static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
}
#endif
-#ifdef CONFIG_COMMON_CLK
-struct clk *mpd4_lvds_pll_init(struct drm_device *dev);
-#else
-static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
-{
- return ERR_PTR(-ENODEV);
-}
-#endif
+struct clk *mpd4_get_lcdc_clock(struct drm_device *dev);
#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 8bbc7fb881d5..06a307c1272d 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -14,7 +14,6 @@
struct mdp4_lcdc_encoder {
struct drm_encoder base;
- struct device_node *panel_node;
struct drm_panel *panel;
struct clk *lcdc_clk;
unsigned long int pixclock;
@@ -262,19 +261,12 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
to_mdp4_lcdc_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
- struct drm_panel *panel;
if (WARN_ON(!mdp4_lcdc_encoder->enabled))
return;
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
- panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
- if (!IS_ERR(panel)) {
- drm_panel_disable(panel);
- drm_panel_unprepare(panel);
- }
-
/*
* Wait for a vsync so we know the ENABLE=0 latched before
* the (connector) source of the vsync's gets disabled,
@@ -300,7 +292,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
to_mdp4_lcdc_encoder(encoder);
unsigned long pc = mdp4_lcdc_encoder->pixclock;
struct mdp4_kms *mdp4_kms = get_kms(encoder);
- struct drm_panel *panel;
uint32_t config;
int ret;
@@ -335,12 +326,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
if (ret)
DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
- panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
- if (!IS_ERR(panel)) {
- drm_panel_prepare(panel);
- drm_panel_enable(panel);
- }
-
setup_phy(encoder);
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1);
@@ -348,22 +333,34 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
mdp4_lcdc_encoder->enabled = true;
}
+static enum drm_mode_status
+mdp4_lcdc_encoder_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
+ to_mdp4_lcdc_encoder(encoder);
+ long actual, requested;
+
+ requested = 1000 * mode->clock;
+ actual = clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, requested);
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = {
.mode_set = mdp4_lcdc_encoder_mode_set,
.disable = mdp4_lcdc_encoder_disable,
.enable = mdp4_lcdc_encoder_enable,
+ .mode_valid = mdp4_lcdc_encoder_mode_valid,
};
-long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
-{
- struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
- to_mdp4_lcdc_encoder(encoder);
- return clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, rate);
-}
-
/* initialize encoder */
-struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
- struct device_node *panel_node)
+struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder;
@@ -374,14 +371,11 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
if (IS_ERR(mdp4_lcdc_encoder))
return ERR_CAST(mdp4_lcdc_encoder);
- mdp4_lcdc_encoder->panel_node = panel_node;
-
encoder = &mdp4_lcdc_encoder->base;
drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
- /* TODO: do we need different pll in other cases? */
- mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
+ mdp4_lcdc_encoder->lcdc_clk = mpd4_get_lcdc_clock(dev);
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
return ERR_CAST(mdp4_lcdc_encoder->lcdc_clk);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
deleted file mode 100644
index 52e728181b52..000000000000
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2014 Red Hat
- * Author: Rob Clark <robdclark@gmail.com>
- * Author: Vinay Simha <vinaysimha@inforcecomputing.com>
- */
-
-#include "mdp4_kms.h"
-
-struct mdp4_lvds_connector {
- struct drm_connector base;
- struct drm_encoder *encoder;
- struct device_node *panel_node;
- struct drm_panel *panel;
-};
-#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base)
-
-static enum drm_connector_status mdp4_lvds_connector_detect(
- struct drm_connector *connector, bool force)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
-
- if (!mdp4_lvds_connector->panel) {
- mdp4_lvds_connector->panel =
- of_drm_find_panel(mdp4_lvds_connector->panel_node);
- if (IS_ERR(mdp4_lvds_connector->panel))
- mdp4_lvds_connector->panel = NULL;
- }
-
- return mdp4_lvds_connector->panel ?
- connector_status_connected :
- connector_status_disconnected;
-}
-
-static void mdp4_lvds_connector_destroy(struct drm_connector *connector)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
-
- drm_connector_cleanup(connector);
-
- kfree(mdp4_lvds_connector);
-}
-
-static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
- struct drm_panel *panel = mdp4_lvds_connector->panel;
- int ret = 0;
-
- if (panel)
- ret = drm_panel_get_modes(panel, connector);
-
- return ret;
-}
-
-static enum drm_mode_status
-mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
- struct drm_encoder *encoder = mdp4_lvds_connector->encoder;
- long actual, requested;
-
- requested = 1000 * mode->clock;
- actual = mdp4_lcdc_round_pixclk(encoder, requested);
-
- DBG("requested=%ld, actual=%ld", requested, actual);
-
- if (actual != requested)
- return MODE_CLOCK_RANGE;
-
- return MODE_OK;
-}
-
-static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
- .detect = mdp4_lvds_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = mdp4_lvds_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
- .get_modes = mdp4_lvds_connector_get_modes,
- .mode_valid = mdp4_lvds_connector_mode_valid,
-};
-
-/* initialize connector */
-struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
- struct device_node *panel_node, struct drm_encoder *encoder)
-{
- struct drm_connector *connector = NULL;
- struct mdp4_lvds_connector *mdp4_lvds_connector;
-
- mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL);
- if (!mdp4_lvds_connector)
- return ERR_PTR(-ENOMEM);
-
- mdp4_lvds_connector->encoder = encoder;
- mdp4_lvds_connector->panel_node = panel_node;
-
- connector = &mdp4_lvds_connector->base;
-
- drm_connector_init(dev, connector, &mdp4_lvds_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- drm_connector_helper_add(connector, &mdp4_lvds_connector_helper_funcs);
-
- connector->polled = 0;
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- drm_connector_attach_encoder(connector, encoder);
-
- return connector;
-}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
index ab8c0c187fb2..fa2c29470510 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
@@ -122,40 +122,59 @@ static const struct clk_ops mpd4_lvds_pll_ops = {
.set_rate = mpd4_lvds_pll_set_rate,
};
-static const char *mpd4_lvds_pll_parents[] = {
- "pxo",
+static const struct clk_parent_data mpd4_lvds_pll_parents[] = {
+ { .fw_name = "pxo", .name = "pxo", },
};
static struct clk_init_data pll_init = {
.name = "mpd4_lvds_pll",
.ops = &mpd4_lvds_pll_ops,
- .parent_names = mpd4_lvds_pll_parents,
+ .parent_data = mpd4_lvds_pll_parents,
.num_parents = ARRAY_SIZE(mpd4_lvds_pll_parents),
};
-struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
+static struct clk_hw *mpd4_lvds_pll_init(struct drm_device *dev)
{
struct mdp4_lvds_pll *lvds_pll;
- struct clk *clk;
int ret;
lvds_pll = devm_kzalloc(dev->dev, sizeof(*lvds_pll), GFP_KERNEL);
- if (!lvds_pll) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!lvds_pll)
+ return ERR_PTR(-ENOMEM);
lvds_pll->dev = dev;
lvds_pll->pll_hw.init = &pll_init;
- clk = devm_clk_register(dev->dev, &lvds_pll->pll_hw);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto fail;
+ ret = devm_clk_hw_register(dev->dev, &lvds_pll->pll_hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = devm_of_clk_add_hw_provider(dev->dev, of_clk_hw_simple_get, &lvds_pll->pll_hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &lvds_pll->pll_hw;
+}
+
+struct clk *mpd4_get_lcdc_clock(struct drm_device *dev)
+{
+ struct clk_hw *hw;
+ struct clk *clk;
+
+
+ /* TODO: do we need different pll in other cases? */
+ hw = mpd4_lvds_pll_init(dev);
+ if (IS_ERR(hw)) {
+ DRM_DEV_ERROR(dev->dev, "failed to register LVDS PLL\n");
+ return ERR_CAST(hw);
}
- return clk;
+ clk = devm_clk_get(dev->dev, "lcdc_clk");
+ if (clk == ERR_PTR(-ENOENT)) {
+ drm_warn(dev, "can't get LCDC clock, using PLL directly\n");
-fail:
- return ERR_PTR(ret);
+ return devm_clk_hw_get_clk(dev->dev, hw, "lcdc_clk");
+ }
+
+ return clk;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 70fdc9fe228a..f8bfb908f9b4 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -13,13 +13,13 @@
#include "dp_catalog.h"
#include "dp_audio.h"
+#include "dp_drm.h"
#include "dp_panel.h"
#include "dp_reg.h"
#include "dp_display.h"
#include "dp_utils.h"
struct msm_dp_audio_private {
- struct platform_device *audio_pdev;
struct platform_device *pdev;
struct drm_device *drm_dev;
struct msm_dp_catalog *catalog;
@@ -160,24 +160,11 @@ static void msm_dp_audio_enable(struct msm_dp_audio_private *audio, bool enable)
msm_dp_catalog_audio_enable(catalog, enable);
}
-static struct msm_dp_audio_private *msm_dp_audio_get_data(struct platform_device *pdev)
+static struct msm_dp_audio_private *msm_dp_audio_get_data(struct msm_dp *msm_dp_display)
{
struct msm_dp_audio *msm_dp_audio;
- struct msm_dp *msm_dp_display;
-
- if (!pdev) {
- DRM_ERROR("invalid input\n");
- return ERR_PTR(-ENODEV);
- }
-
- msm_dp_display = platform_get_drvdata(pdev);
- if (!msm_dp_display) {
- DRM_ERROR("invalid input\n");
- return ERR_PTR(-ENODEV);
- }
msm_dp_audio = msm_dp_display->msm_dp_audio;
-
if (!msm_dp_audio) {
DRM_ERROR("invalid msm_dp_audio data\n");
return ERR_PTR(-EINVAL);
@@ -186,68 +173,16 @@ static struct msm_dp_audio_private *msm_dp_audio_get_data(struct platform_device
return container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio);
}
-static int msm_dp_audio_hook_plugged_cb(struct device *dev, void *data,
- hdmi_codec_plugged_cb fn,
- struct device *codec_dev)
-{
-
- struct platform_device *pdev;
- struct msm_dp *msm_dp_display;
-
- pdev = to_platform_device(dev);
- if (!pdev) {
- pr_err("invalid input\n");
- return -ENODEV;
- }
-
- msm_dp_display = platform_get_drvdata(pdev);
- if (!msm_dp_display) {
- pr_err("invalid input\n");
- return -ENODEV;
- }
-
- return msm_dp_display_set_plugged_cb(msm_dp_display, fn, codec_dev);
-}
-
-static int msm_dp_audio_get_eld(struct device *dev,
- void *data, uint8_t *buf, size_t len)
-{
- struct platform_device *pdev;
- struct msm_dp *msm_dp_display;
-
- pdev = to_platform_device(dev);
-
- if (!pdev) {
- DRM_ERROR("invalid input\n");
- return -ENODEV;
- }
-
- msm_dp_display = platform_get_drvdata(pdev);
- if (!msm_dp_display) {
- DRM_ERROR("invalid input\n");
- return -ENODEV;
- }
-
- mutex_lock(&msm_dp_display->connector->eld_mutex);
- memcpy(buf, msm_dp_display->connector->eld,
- min(sizeof(msm_dp_display->connector->eld), len));
- mutex_unlock(&msm_dp_display->connector->eld_mutex);
-
- return 0;
-}
-
-int msm_dp_audio_hw_params(struct device *dev,
- void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
+int msm_dp_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
{
int rc = 0;
struct msm_dp_audio_private *audio;
- struct platform_device *pdev;
struct msm_dp *msm_dp_display;
- pdev = to_platform_device(dev);
- msm_dp_display = platform_get_drvdata(pdev);
+ msm_dp_display = to_dp_bridge(bridge)->msm_dp_display;
/*
* there could be cases where sound card can be opened even
@@ -262,7 +197,7 @@ int msm_dp_audio_hw_params(struct device *dev,
goto end;
}
- audio = msm_dp_audio_get_data(pdev);
+ audio = msm_dp_audio_get_data(msm_dp_display);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
@@ -281,15 +216,14 @@ end:
return rc;
}
-static void msm_dp_audio_shutdown(struct device *dev, void *data)
+void msm_dp_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge)
{
struct msm_dp_audio_private *audio;
- struct platform_device *pdev;
struct msm_dp *msm_dp_display;
- pdev = to_platform_device(dev);
- msm_dp_display = platform_get_drvdata(pdev);
- audio = msm_dp_audio_get_data(pdev);
+ msm_dp_display = to_dp_bridge(bridge)->msm_dp_display;
+ audio = msm_dp_audio_get_data(msm_dp_display);
if (IS_ERR(audio)) {
DRM_ERROR("failed to get audio data\n");
return;
@@ -311,47 +245,6 @@ static void msm_dp_audio_shutdown(struct device *dev, void *data)
msm_dp_display_signal_audio_complete(msm_dp_display);
}
-static const struct hdmi_codec_ops msm_dp_audio_codec_ops = {
- .hw_params = msm_dp_audio_hw_params,
- .audio_shutdown = msm_dp_audio_shutdown,
- .get_eld = msm_dp_audio_get_eld,
- .hook_plugged_cb = msm_dp_audio_hook_plugged_cb,
-};
-
-static struct hdmi_codec_pdata codec_data = {
- .ops = &msm_dp_audio_codec_ops,
- .max_i2s_channels = 8,
- .i2s = 1,
-};
-
-void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio)
-{
- struct msm_dp_audio_private *audio_priv;
-
- audio_priv = container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio);
-
- if (audio_priv->audio_pdev) {
- platform_device_unregister(audio_priv->audio_pdev);
- audio_priv->audio_pdev = NULL;
- }
-}
-
-int msm_dp_register_audio_driver(struct device *dev,
- struct msm_dp_audio *msm_dp_audio)
-{
- struct msm_dp_audio_private *audio_priv;
-
- audio_priv = container_of(msm_dp_audio,
- struct msm_dp_audio_private, msm_dp_audio);
-
- audio_priv->audio_pdev = platform_device_register_data(dev,
- HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data,
- sizeof(codec_data));
- return PTR_ERR_OR_ZERO(audio_priv->audio_pdev);
-}
-
struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
struct msm_dp_catalog *catalog)
{
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
index beea34cbab77..58fc14693e48 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.h
+++ b/drivers/gpu/drm/msm/dp/dp_audio.h
@@ -36,23 +36,6 @@ struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
struct msm_dp_catalog *catalog);
/**
- * msm_dp_register_audio_driver()
- *
- * Registers DP device with hdmi_codec interface.
- *
- * @dev: DP device instance.
- * @msm_dp_audio: an instance of msm_dp_audio module.
- *
- *
- * Returns the error code in case of failure, otherwise
- * zero on success.
- */
-int msm_dp_register_audio_driver(struct device *dev,
- struct msm_dp_audio *msm_dp_audio);
-
-void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio);
-
-/**
* msm_dp_audio_put()
*
* Cleans the msm_dp_audio instance.
@@ -61,10 +44,12 @@ void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm
*/
void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio);
-int msm_dp_audio_hw_params(struct device *dev,
- void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params);
+int msm_dp_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params);
+void msm_dp_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge);
#endif /* _DP_AUDIO_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index d8633a596f8d..a50bfafbb4ea 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1034,10 +1034,12 @@ static int msm_dp_ctrl_set_vx_px(struct msm_dp_ctrl_private *ctrl,
return 0;
}
-static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_update_phy_vx_px(struct msm_dp_ctrl_private *ctrl,
+ enum drm_dp_phy dp_phy)
{
struct msm_dp_link *link = ctrl->link;
- int ret = 0, lane, lane_cnt;
+ int lane, lane_cnt, reg;
+ int ret = 0;
u8 buf[4];
u32 max_level_reached = 0;
u32 voltage_swing_level = link->phy_params.v_level;
@@ -1075,8 +1077,13 @@ static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl)
drm_dbg_dp(ctrl->drm_dev, "sink: p|v=0x%x\n",
voltage_swing_level | pre_emphasis_level);
- ret = drm_dp_dpcd_write(ctrl->aux, DP_TRAINING_LANE0_SET,
- buf, lane_cnt);
+
+ if (dp_phy == DP_PHY_DPRX)
+ reg = DP_TRAINING_LANE0_SET;
+ else
+ reg = DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
+
+ ret = drm_dp_dpcd_write(ctrl->aux, reg, buf, lane_cnt);
if (ret == lane_cnt)
ret = 0;
@@ -1084,9 +1091,10 @@ static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl)
}
static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl,
- u8 pattern)
+ u8 pattern, enum drm_dp_phy dp_phy)
{
u8 buf;
+ int reg;
int ret = 0;
drm_dbg_dp(ctrl->drm_dev, "sink: pattern=%x\n", pattern);
@@ -1096,31 +1104,26 @@ static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl,
if (pattern && pattern != DP_TRAINING_PATTERN_4)
buf |= DP_LINK_SCRAMBLING_DISABLE;
- ret = drm_dp_dpcd_writeb(ctrl->aux, DP_TRAINING_PATTERN_SET, buf);
- return ret == 1;
-}
-
-static int msm_dp_ctrl_read_link_status(struct msm_dp_ctrl_private *ctrl,
- u8 *link_status)
-{
- int ret = 0, len;
-
- len = drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
- if (len != DP_LINK_STATUS_SIZE) {
- DRM_ERROR("DP link status read failed, err: %d\n", len);
- ret = -EINVAL;
- }
+ if (dp_phy == DP_PHY_DPRX)
+ reg = DP_TRAINING_PATTERN_SET;
+ else
+ reg = DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
- return ret;
+ ret = drm_dp_dpcd_writeb(ctrl->aux, reg, buf);
+ return ret == 1;
}
static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
- int *training_step)
+ int *training_step, enum drm_dp_phy dp_phy)
{
+ int delay_us;
int tries, old_v_level, ret = 0;
u8 link_status[DP_LINK_STATUS_SIZE];
int const maximum_retries = 4;
+ delay_us = drm_dp_read_clock_recovery_delay(ctrl->aux,
+ ctrl->panel->dpcd, dp_phy, false);
+
msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
*training_step = DP_TRAINING_1;
@@ -1129,18 +1132,19 @@ static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
if (ret)
return ret;
msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
- DP_LINK_SCRAMBLING_DISABLE);
+ DP_LINK_SCRAMBLING_DISABLE, dp_phy);
- ret = msm_dp_ctrl_update_vx_px(ctrl);
+ msm_dp_link_reset_phy_params_vx_px(ctrl->link);
+ ret = msm_dp_ctrl_update_phy_vx_px(ctrl, dp_phy);
if (ret)
return ret;
tries = 0;
old_v_level = ctrl->link->phy_params.v_level;
for (tries = 0; tries < maximum_retries; tries++) {
- drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd);
+ fsleep(delay_us);
- ret = msm_dp_ctrl_read_link_status(ctrl, link_status);
+ ret = drm_dp_dpcd_read_phy_link_status(ctrl->aux, dp_phy, link_status);
if (ret)
return ret;
@@ -1161,7 +1165,7 @@ static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
}
msm_dp_link_adjust_levels(ctrl->link, link_status);
- ret = msm_dp_ctrl_update_vx_px(ctrl);
+ ret = msm_dp_ctrl_update_phy_vx_px(ctrl, dp_phy);
if (ret)
return ret;
}
@@ -1213,21 +1217,31 @@ static int msm_dp_ctrl_link_lane_down_shift(struct msm_dp_ctrl_private *ctrl)
return 0;
}
-static void msm_dp_ctrl_clear_training_pattern(struct msm_dp_ctrl_private *ctrl)
+static void msm_dp_ctrl_clear_training_pattern(struct msm_dp_ctrl_private *ctrl,
+ enum drm_dp_phy dp_phy)
{
- msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE);
- drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
+ int delay_us;
+
+ msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE, dp_phy);
+
+ delay_us = drm_dp_read_channel_eq_delay(ctrl->aux,
+ ctrl->panel->dpcd, dp_phy, false);
+ fsleep(delay_us);
}
static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
- int *training_step)
+ int *training_step, enum drm_dp_phy dp_phy)
{
+ int delay_us;
int tries = 0, ret = 0;
u8 pattern;
u32 state_ctrl_bit;
int const maximum_retries = 5;
u8 link_status[DP_LINK_STATUS_SIZE];
+ delay_us = drm_dp_read_channel_eq_delay(ctrl->aux,
+ ctrl->panel->dpcd, dp_phy, false);
+
msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
*training_step = DP_TRAINING_2;
@@ -1247,12 +1261,12 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
if (ret)
return ret;
- msm_dp_ctrl_train_pattern_set(ctrl, pattern);
+ msm_dp_ctrl_train_pattern_set(ctrl, pattern, dp_phy);
for (tries = 0; tries <= maximum_retries; tries++) {
- drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
+ fsleep(delay_us);
- ret = msm_dp_ctrl_read_link_status(ctrl, link_status);
+ ret = drm_dp_dpcd_read_phy_link_status(ctrl->aux, dp_phy, link_status);
if (ret)
return ret;
@@ -1262,7 +1276,7 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
}
msm_dp_link_adjust_levels(ctrl->link, link_status);
- ret = msm_dp_ctrl_update_vx_px(ctrl);
+ ret = msm_dp_ctrl_update_phy_vx_px(ctrl, dp_phy);
if (ret)
return ret;
@@ -1271,9 +1285,32 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
return -ETIMEDOUT;
}
+static int msm_dp_ctrl_link_train_1_2(struct msm_dp_ctrl_private *ctrl,
+ int *training_step, enum drm_dp_phy dp_phy)
+{
+ int ret;
+
+ ret = msm_dp_ctrl_link_train_1(ctrl, training_step, dp_phy);
+ if (ret) {
+ DRM_ERROR("link training #1 on phy %d failed. ret=%d\n", dp_phy, ret);
+ return ret;
+ }
+ drm_dbg_dp(ctrl->drm_dev, "link training #1 on phy %d successful\n", dp_phy);
+
+ ret = msm_dp_ctrl_link_train_2(ctrl, training_step, dp_phy);
+ if (ret) {
+ DRM_ERROR("link training #2 on phy %d failed. ret=%d\n", dp_phy, ret);
+ return ret;
+ }
+ drm_dbg_dp(ctrl->drm_dev, "link training #2 on phy %d successful\n", dp_phy);
+
+ return 0;
+}
+
static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl,
int *training_step)
{
+ int i;
int ret = 0;
const u8 *dpcd = ctrl->panel->dpcd;
u8 encoding[] = { 0, DP_SET_ANSI_8B10B };
@@ -1286,8 +1323,6 @@ static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl,
link_info.rate = ctrl->link->link_params.rate;
link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
- msm_dp_link_reset_phy_params_vx_px(ctrl->link);
-
msm_dp_aux_link_configure(ctrl->aux, &link_info);
if (drm_dp_max_downspread(dpcd))
@@ -1302,24 +1337,27 @@ static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl,
&assr, 1);
}
- ret = msm_dp_ctrl_link_train_1(ctrl, training_step);
+ for (i = ctrl->link->lttpr_count - 1; i >= 0; i--) {
+ enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);
+
+ ret = msm_dp_ctrl_link_train_1_2(ctrl, training_step, dp_phy);
+ msm_dp_ctrl_clear_training_pattern(ctrl, dp_phy);
+
+ if (ret)
+ break;
+ }
+
if (ret) {
- DRM_ERROR("link training #1 failed. ret=%d\n", ret);
+ DRM_ERROR("link training of LTTPR(s) failed. ret=%d\n", ret);
goto end;
}
- /* print success info as this is a result of user initiated action */
- drm_dbg_dp(ctrl->drm_dev, "link training #1 successful\n");
-
- ret = msm_dp_ctrl_link_train_2(ctrl, training_step);
+ ret = msm_dp_ctrl_link_train_1_2(ctrl, training_step, DP_PHY_DPRX);
if (ret) {
- DRM_ERROR("link training #2 failed. ret=%d\n", ret);
+ DRM_ERROR("link training on sink failed. ret=%d\n", ret);
goto end;
}
- /* print success info as this is a result of user initiated action */
- drm_dbg_dp(ctrl->drm_dev, "link training #2 successful\n");
-
end:
msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
@@ -1636,7 +1674,7 @@ static int msm_dp_ctrl_link_maintenance(struct msm_dp_ctrl_private *ctrl)
if (ret)
goto end;
- msm_dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
@@ -1660,7 +1698,7 @@ static bool msm_dp_ctrl_send_phy_test_pattern(struct msm_dp_ctrl_private *ctrl)
return false;
}
msm_dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested);
- msm_dp_ctrl_update_vx_px(ctrl);
+ msm_dp_ctrl_update_phy_vx_px(ctrl, DP_PHY_DPRX);
msm_dp_link_send_test_response(ctrl->link);
pattern_sent = msm_dp_catalog_ctrl_read_phy_pattern(ctrl->catalog);
@@ -1805,7 +1843,7 @@ static bool msm_dp_ctrl_channel_eq_ok(struct msm_dp_ctrl_private *ctrl)
u8 link_status[DP_LINK_STATUS_SIZE];
int num_lanes = ctrl->link->link_params.num_lanes;
- msm_dp_ctrl_read_link_status(ctrl, link_status);
+ drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
return drm_dp_channel_eq_ok(link_status, num_lanes);
}
@@ -1863,7 +1901,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
break;
- msm_dp_ctrl_read_link_status(ctrl, link_status);
+ drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
rc = msm_dp_ctrl_link_rate_down_shift(ctrl);
if (rc < 0) { /* already in RBR = 1.6G */
@@ -1888,7 +1926,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
break;
- msm_dp_ctrl_read_link_status(ctrl, link_status);
+ drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
if (!drm_dp_clock_recovery_ok(link_status,
ctrl->link->link_params.num_lanes))
@@ -1902,7 +1940,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
}
/* stop link training before start re training */
- msm_dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
}
rc = msm_dp_ctrl_reinitialize_mainlink(ctrl);
@@ -1926,7 +1964,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
* link training failed
* end txing train pattern here
*/
- msm_dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
msm_dp_ctrl_deinitialize_mainlink(ctrl);
rc = -ECONNRESET;
@@ -1997,7 +2035,7 @@ int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train
msm_dp_ctrl_link_retrain(ctrl);
/* stop txing train pattern to end link training */
- msm_dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl, DP_PHY_DPRX);
/*
* Set up transfer unit values and set controller state to send
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index bbc47d86ae9e..a48e6db4f156 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/string_choices.h>
#include <drm/display/drm_dp_aux_bus.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/drm_edid.h>
#include "msm_drv.h"
@@ -127,6 +128,11 @@ static const struct msm_dp_desc msm_dp_desc_sa8775p[] = {
{}
};
+static const struct msm_dp_desc msm_dp_desc_sdm845[] = {
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 },
+ {}
+};
+
static const struct msm_dp_desc msm_dp_desc_sc7180[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{}
@@ -179,7 +185,7 @@ static const struct of_device_id msm_dp_dt_match[] = {
{ .compatible = "qcom,sc8180x-edp", .data = &msm_dp_desc_sc8180x },
{ .compatible = "qcom,sc8280xp-dp", .data = &msm_dp_desc_sc8280xp },
{ .compatible = "qcom,sc8280xp-edp", .data = &msm_dp_desc_sc8280xp },
- { .compatible = "qcom,sdm845-dp", .data = &msm_dp_desc_sc7180 },
+ { .compatible = "qcom,sdm845-dp", .data = &msm_dp_desc_sdm845 },
{ .compatible = "qcom,sm8350-dp", .data = &msm_dp_desc_sc7180 },
{ .compatible = "qcom,sm8650-dp", .data = &msm_dp_desc_sm8650 },
{ .compatible = "qcom,x1e80100-dp", .data = &msm_dp_desc_x1e80100 },
@@ -288,13 +294,6 @@ static int msm_dp_display_bind(struct device *dev, struct device *master,
goto end;
}
-
- rc = msm_dp_register_audio_driver(dev, dp->audio);
- if (rc) {
- DRM_ERROR("Audio registration Dp failed\n");
- goto end;
- }
-
rc = msm_dp_hpd_event_thread_start(dp);
if (rc) {
DRM_ERROR("Event thread create failed\n");
@@ -316,7 +315,6 @@ static void msm_dp_display_unbind(struct device *dev, struct device *master,
of_dp_aux_depopulate_bus(dp->aux);
- msm_dp_unregister_audio_driver(dev, dp->audio);
msm_dp_aux_unregister(dp->aux);
dp->drm_dev = NULL;
dp->aux->drm_dev = NULL;
@@ -367,17 +365,21 @@ static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *d
return 0;
}
-static void msm_dp_display_lttpr_init(struct msm_dp_display_private *dp)
+static int msm_dp_display_lttpr_init(struct msm_dp_display_private *dp, u8 *dpcd)
{
- u8 lttpr_caps[DP_LTTPR_COMMON_CAP_SIZE];
- int rc;
+ int rc, lttpr_count;
- if (drm_dp_read_lttpr_common_caps(dp->aux, dp->panel->dpcd, lttpr_caps))
- return;
+ if (drm_dp_read_lttpr_common_caps(dp->aux, dpcd, dp->link->lttpr_common_caps))
+ return 0;
- rc = drm_dp_lttpr_init(dp->aux, drm_dp_lttpr_count(lttpr_caps));
- if (rc)
+ lttpr_count = drm_dp_lttpr_count(dp->link->lttpr_common_caps);
+ rc = drm_dp_lttpr_init(dp->aux, lttpr_count);
+ if (rc) {
DRM_ERROR("failed to set LTTPRs transparency mode, rc=%d\n", rc);
+ return 0;
+ }
+
+ return lttpr_count;
}
static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp)
@@ -385,12 +387,17 @@ static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp)
struct drm_connector *connector = dp->msm_dp_display.connector;
const struct drm_display_info *info = &connector->display_info;
int rc = 0;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
- rc = msm_dp_panel_read_sink_caps(dp->panel, connector);
+ rc = drm_dp_read_dpcd_caps(dp->aux, dpcd);
if (rc)
goto end;
- msm_dp_display_lttpr_init(dp);
+ dp->link->lttpr_count = msm_dp_display_lttpr_init(dp, dpcd);
+
+ rc = msm_dp_panel_read_sink_caps(dp->panel, connector);
+ if (rc)
+ goto end;
msm_dp_link_process_request(dp->link);
@@ -626,9 +633,9 @@ static void msm_dp_display_handle_plugged_change(struct msm_dp *msm_dp_display,
struct msm_dp_display_private, msm_dp_display);
/* notify audio subsystem only if sink supports audio */
- if (msm_dp_display->plugged_cb && msm_dp_display->codec_dev &&
- dp->audio_supported)
- msm_dp_display->plugged_cb(msm_dp_display->codec_dev, plugged);
+ if (dp->audio_supported)
+ drm_connector_hdmi_audio_plugged_notify(msm_dp_display->connector,
+ plugged);
}
static int msm_dp_hpd_unplug_handle(struct msm_dp_display_private *dp, u32 data)
@@ -907,19 +914,6 @@ static int msm_dp_display_disable(struct msm_dp_display_private *dp)
return 0;
}
-int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display,
- hdmi_codec_plugged_cb fn, struct device *codec_dev)
-{
- bool plugged;
-
- msm_dp_display->plugged_cb = fn;
- msm_dp_display->codec_dev = codec_dev;
- plugged = msm_dp_display->link_ready;
- msm_dp_display_handle_plugged_change(msm_dp_display, plugged);
-
- return 0;
-}
-
/**
* msm_dp_bridge_mode_valid - callback to determine if specified mode is valid
* @bridge: Pointer to drm bridge structure
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index ecbc2d92f546..cc6e2cab36e9 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -7,7 +7,6 @@
#define _DP_DISPLAY_H_
#include "dp_panel.h"
-#include <sound/hdmi-codec.h>
#include "disp/msm_disp_snapshot.h"
#define DP_MAX_PIXEL_CLK_KHZ 675000
@@ -15,7 +14,6 @@
struct msm_dp {
struct drm_device *drm_dev;
struct platform_device *pdev;
- struct device *codec_dev;
struct drm_connector *connector;
struct drm_bridge *next_bridge;
bool link_ready;
@@ -25,14 +23,10 @@ struct msm_dp {
bool is_edp;
bool internal_hpd;
- hdmi_codec_plugged_cb plugged_cb;
-
struct msm_dp_audio *msm_dp_audio;
bool psr_supported;
};
-int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display,
- hdmi_codec_plugged_cb fn, struct device *codec_dev);
int msm_dp_display_get_modes(struct msm_dp *msm_dp_display);
bool msm_dp_display_check_video_test(struct msm_dp *msm_dp_display);
int msm_dp_display_get_test_bpp(struct msm_dp *msm_dp_display);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index cca57e56c906..f222d7ccaa88 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -12,6 +12,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
+#include "dp_audio.h"
#include "dp_drm.h"
/**
@@ -114,6 +115,9 @@ static const struct drm_bridge_funcs msm_dp_bridge_ops = {
.hpd_disable = msm_dp_bridge_hpd_disable,
.hpd_notify = msm_dp_bridge_hpd_notify,
.debugfs_init = msm_dp_bridge_debugfs_init,
+
+ .dp_audio_prepare = msm_dp_audio_prepare,
+ .dp_audio_shutdown = msm_dp_audio_shutdown,
};
static int msm_edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
@@ -296,14 +300,15 @@ int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
struct msm_dp_bridge *msm_dp_bridge;
struct drm_bridge *bridge;
- msm_dp_bridge = devm_kzalloc(dev->dev, sizeof(*msm_dp_bridge), GFP_KERNEL);
- if (!msm_dp_bridge)
- return -ENOMEM;
+ msm_dp_bridge = devm_drm_bridge_alloc(dev->dev, struct msm_dp_bridge, bridge,
+ msm_dp_display->is_edp ? &msm_edp_bridge_ops :
+ &msm_dp_bridge_ops);
+ if (IS_ERR(msm_dp_bridge))
+ return PTR_ERR(msm_dp_bridge);
msm_dp_bridge->msm_dp_display = msm_dp_display;
bridge = &msm_dp_bridge->bridge;
- bridge->funcs = msm_dp_display->is_edp ? &msm_edp_bridge_ops : &msm_dp_bridge_ops;
bridge->type = msm_dp_display->connector_type;
bridge->ycbcr_420_allowed = yuv_supported;
@@ -320,9 +325,13 @@ int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
*/
if (!msm_dp_display->is_edp) {
bridge->ops =
+ DRM_BRIDGE_OP_DP_AUDIO |
DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_HPD |
DRM_BRIDGE_OP_MODES;
+ bridge->hdmi_audio_dev = &msm_dp_display->pdev->dev;
+ bridge->hdmi_audio_max_i2s_playback_channels = 8;
+ bridge->hdmi_audio_dai_port = -1;
}
rc = devm_drm_bridge_add(dev->dev, bridge);
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 1a1fbb2d7d4f..92a9077959b3 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -714,21 +714,21 @@ end:
static int msm_dp_link_parse_sink_status_field(struct msm_dp_link_private *link)
{
- int len;
+ int ret;
link->prev_sink_count = link->msm_dp_link.sink_count;
- len = drm_dp_read_sink_count(link->aux);
- if (len < 0) {
+ ret = drm_dp_read_sink_count(link->aux);
+ if (ret < 0) {
DRM_ERROR("DP parse sink count failed\n");
- return len;
+ return ret;
}
- link->msm_dp_link.sink_count = len;
+ link->msm_dp_link.sink_count = ret;
- len = drm_dp_dpcd_read_link_status(link->aux,
- link->link_status);
- if (len < DP_LINK_STATUS_SIZE) {
+ ret = drm_dp_dpcd_read_link_status(link->aux,
+ link->link_status);
+ if (ret < 0) {
DRM_ERROR("DP link status read failed\n");
- return len;
+ return ret;
}
return msm_dp_link_parse_request(link);
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index 8db5d5698a97..ba47c6d19fbf 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -7,6 +7,7 @@
#define _DP_LINK_H_
#include "dp_aux.h"
+#include <drm/display/drm_dp_helper.h>
#define DS_PORT_STATUS_CHANGED 0x200
#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF
@@ -60,6 +61,9 @@ struct msm_dp_link_phy_params {
};
struct msm_dp_link {
+ u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE];
+ int lttpr_count;
+
u32 sink_request;
u32 test_response;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 92415bf8aa16..4e8ab75c771b 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -47,7 +47,7 @@ static void msm_dp_panel_read_psr_cap(struct msm_dp_panel_private *panel)
static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel)
{
- int rc;
+ int rc, max_lttpr_lanes, max_lttpr_rate;
struct msm_dp_panel_private *panel;
struct msm_dp_link_info *link_info;
u8 *dpcd, major, minor;
@@ -75,6 +75,16 @@ static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel)
if (link_info->rate > msm_dp_panel->max_dp_link_rate)
link_info->rate = msm_dp_panel->max_dp_link_rate;
+ /* Limit data lanes from LTTPR capabilities, if any */
+ max_lttpr_lanes = drm_dp_lttpr_max_lane_count(panel->link->lttpr_common_caps);
+ if (max_lttpr_lanes && max_lttpr_lanes < link_info->num_lanes)
+ link_info->num_lanes = max_lttpr_lanes;
+
+ /* Limit link rate from LTTPR capabilities, if any */
+ max_lttpr_rate = drm_dp_lttpr_max_link_rate(panel->link->lttpr_common_caps);
+ if (max_lttpr_rate && max_lttpr_rate < link_info->rate)
+ link_info->rate = max_lttpr_rate;
+
drm_dbg_dp(panel->drm_dev, "version: %d.%d\n", major, minor);
drm_dbg_dp(panel->drm_dev, "link_rate=%d\n", link_info->rate);
drm_dbg_dp(panel->drm_dev, "lane_count=%d\n", link_info->num_lanes);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 7754dcec33d0..7675558ae2e5 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -221,6 +221,22 @@ static const struct msm_dsi_config sc7280_dsi_cfg = {
},
};
+static const struct regulator_bulk_data sa8775p_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 8300 }, /* 1.2 V */
+ { .supply = "refgen" },
+};
+
+static const struct msm_dsi_config sa8775p_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = sa8775p_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sa8775p_dsi_regulators),
+ .bus_clk_names = dsi_v2_4_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names),
+ .io_start = {
+ { 0xae94000, 0xae96000 },
+ },
+};
+
static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
.link_clk_set_rate = dsi_link_clk_set_rate_v2,
.link_clk_enable = dsi_link_clk_enable_v2,
@@ -294,6 +310,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_0,
&sc7280_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_1,
+ &sa8775p_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_6_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_7_0,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 120cb65164c1..65b0705fac0e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -27,6 +27,7 @@
#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000
#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000
+#define MSM_DSI_6G_VER_MINOR_V2_5_1 0x20050001
#define MSM_DSI_6G_VER_MINOR_V2_6_0 0x20060000
#define MSM_DSI_6G_VER_MINOR_V2_7_0 0x20070000
#define MSM_DSI_6G_VER_MINOR_V2_8_0 0x20080000
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 4fabb01345aa..ca400924d4ee 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -434,12 +434,13 @@ static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge,
}
static int dsi_mgr_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- return drm_bridge_attach(bridge->encoder, msm_dsi->next_bridge,
+ return drm_bridge_attach(encoder, msm_dsi->next_bridge,
bridge, flags);
}
@@ -461,15 +462,14 @@ int msm_dsi_manager_connector_init(struct msm_dsi *msm_dsi,
struct drm_connector *connector;
int ret;
- dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
- sizeof(*dsi_bridge), GFP_KERNEL);
- if (!dsi_bridge)
- return -ENOMEM;
+ dsi_bridge = devm_drm_bridge_alloc(msm_dsi->dev->dev, struct dsi_bridge, base,
+ &dsi_mgr_bridge_funcs);
+ if (IS_ERR(dsi_bridge))
+ return PTR_ERR(dsi_bridge);
dsi_bridge->id = msm_dsi->id;
bridge = &dsi_bridge->base;
- bridge->funcs = &dsi_mgr_bridge_funcs;
ret = devm_drm_bridge_add(msm_dsi->dev->dev, bridge);
if (ret)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index c0bcc6828963..5973d7325699 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -581,6 +581,10 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_7nm_cfgs },
{ .compatible = "qcom,dsi-phy-7nm-8150",
.data = &dsi_phy_7nm_8150_cfgs },
+ { .compatible = "qcom,sa8775p-dsi-phy-5nm",
+ .data = &dsi_phy_5nm_8775p_cfgs },
+ { .compatible = "qcom,sar2130p-dsi-phy-5nm",
+ .data = &dsi_phy_5nm_sar2130p_cfgs },
{ .compatible = "qcom,sc7280-dsi-phy-7nm",
.data = &dsi_phy_7nm_7280_cfgs },
{ .compatible = "qcom,sm6375-dsi-phy-7nm",
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 1925418d9999..7ea608f620fe 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -59,6 +59,8 @@ extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8350_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8775p_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_5nm_sar2130p_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 9812b4d69197..af2e30f3f842 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -704,6 +704,13 @@ static int dsi_pll_10nm_init(struct msm_dsi_phy *phy)
/* TODO: Remove this when we have proper display handover support */
msm_dsi_phy_pll_save_state(phy);
+ /*
+ * Store also proper vco_current_rate, because its value will be used in
+ * dsi_10nm_pll_restore_state().
+ */
+ if (!dsi_pll_10nm_vco_recalc_rate(&pll_10nm->clk_hw, VCO_REF_CLK_RATE))
+ pll_10nm->vco_current_rate = pll_10nm->phy->cfg->min_pll_rate;
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index a92decbee5b5..c19890358b74 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -1147,6 +1147,10 @@ static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 37550 },
};
+static const struct regulator_bulk_data dsi_phy_7nm_48000uA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 48000 },
+};
+
static const struct regulator_bulk_data dsi_phy_7nm_98000uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 98000 },
};
@@ -1289,6 +1293,52 @@ const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs = {
.quirks = DSI_PHY_7NM_QUIRK_V4_3,
};
+const struct msm_dsi_phy_cfg dsi_phy_5nm_8775p_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_48000uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_48000uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V4_2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_5nm_sar2130p_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_97800uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_97800uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae95000, 0xae97000 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V5_2,
+};
+
const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_7nm_98400uA_regulators,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 248541ff4492..2fd388b892dc 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -8,6 +8,7 @@
#include <linux/gpio/consumer.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <drm/drm_bridge_connector.h>
@@ -199,12 +200,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- ret = msm_hdmi_hpd_enable(hdmi->bridge);
- if (ret < 0) {
- DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
- goto fail;
- }
-
return 0;
fail:
@@ -220,28 +215,24 @@ fail:
* The hdmi device:
*/
-#define HDMI_CFG(item, entry) \
- .item ## _names = item ##_names_ ## entry, \
- .item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry)
-
-static const char *hpd_reg_names_8960[] = {"core-vdda"};
-static const char *hpd_clk_names_8960[] = {"core", "master_iface", "slave_iface"};
+static const char * const pwr_reg_names_8960[] = {"core-vdda"};
+static const char * const pwr_clk_names_8960[] = {"core", "master_iface", "slave_iface"};
static const struct hdmi_platform_config hdmi_tx_8960_config = {
- HDMI_CFG(hpd_reg, 8960),
- HDMI_CFG(hpd_clk, 8960),
+ .pwr_reg_names = pwr_reg_names_8960,
+ .pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names_8960),
+ .pwr_clk_names = pwr_clk_names_8960,
+ .pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names_8960),
};
-static const char *pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"};
-static const char *pwr_clk_names_8x74[] = {"extp", "alt_iface"};
-static const char *hpd_clk_names_8x74[] = {"iface", "core", "mdp_core"};
-static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
+static const char * const pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"};
+static const char * const pwr_clk_names_8x74[] = {"iface", "core", "mdp_core", "alt_iface"};
static const struct hdmi_platform_config hdmi_tx_8974_config = {
- HDMI_CFG(pwr_reg, 8x74),
- HDMI_CFG(pwr_clk, 8x74),
- HDMI_CFG(hpd_clk, 8x74),
- .hpd_freq = hpd_clk_freq_8x74,
+ .pwr_reg_names = pwr_reg_names_8x74,
+ .pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names_8x74),
+ .pwr_clk_names = pwr_clk_names_8x74,
+ .pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names_8x74),
};
static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
@@ -264,9 +255,6 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
struct msm_drm_private *priv = dev_get_drvdata(master);
if (priv->hdmi) {
- if (priv->hdmi->bridge)
- msm_hdmi_hpd_disable(priv->hdmi);
-
msm_hdmi_destroy(priv->hdmi);
priv->hdmi = NULL;
}
@@ -296,6 +284,7 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
hdmi->pdev = pdev;
hdmi->config = config;
spin_lock_init(&hdmi->reg_lock);
+ mutex_init(&hdmi->state_mutex);
ret = drm_of_find_panel_or_bridge(pdev->dev.of_node, 1, 0, NULL, &hdmi->next_bridge);
if (ret && ret != -ENODEV)
@@ -322,20 +311,6 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
if (hdmi->irq < 0)
return hdmi->irq;
- hdmi->hpd_regs = devm_kcalloc(&pdev->dev,
- config->hpd_reg_cnt,
- sizeof(hdmi->hpd_regs[0]),
- GFP_KERNEL);
- if (!hdmi->hpd_regs)
- return -ENOMEM;
-
- for (i = 0; i < config->hpd_reg_cnt; i++)
- hdmi->hpd_regs[i].supply = config->hpd_reg_names[i];
-
- ret = devm_regulator_bulk_get(&pdev->dev, config->hpd_reg_cnt, hdmi->hpd_regs);
- if (ret)
- return dev_err_probe(dev, ret, "failed to get hpd regulators\n");
-
hdmi->pwr_regs = devm_kcalloc(&pdev->dev,
config->pwr_reg_cnt,
sizeof(hdmi->pwr_regs[0]),
@@ -350,25 +325,6 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to get pwr regulators\n");
- hdmi->hpd_clks = devm_kcalloc(&pdev->dev,
- config->hpd_clk_cnt,
- sizeof(hdmi->hpd_clks[0]),
- GFP_KERNEL);
- if (!hdmi->hpd_clks)
- return -ENOMEM;
-
- for (i = 0; i < config->hpd_clk_cnt; i++) {
- struct clk *clk;
-
- clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
- if (IS_ERR(clk))
- return dev_err_probe(dev, PTR_ERR(clk),
- "failed to get hpd clk: %s\n",
- config->hpd_clk_names[i]);
-
- hdmi->hpd_clks[i] = clk;
- }
-
hdmi->pwr_clks = devm_kcalloc(&pdev->dev,
config->pwr_clk_cnt,
sizeof(hdmi->pwr_clks[0]),
@@ -376,17 +332,17 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
if (!hdmi->pwr_clks)
return -ENOMEM;
- for (i = 0; i < config->pwr_clk_cnt; i++) {
- struct clk *clk;
+ for (i = 0; i < config->pwr_clk_cnt; i++)
+ hdmi->pwr_clks[i].id = config->pwr_clk_names[i];
- clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
- if (IS_ERR(clk))
- return dev_err_probe(dev, PTR_ERR(clk),
- "failed to get pwr clk: %s\n",
- config->pwr_clk_names[i]);
+ ret = devm_clk_bulk_get(&pdev->dev, config->pwr_clk_cnt, hdmi->pwr_clks);
+ if (ret)
+ return ret;
- hdmi->pwr_clks[i] = clk;
- }
+ hdmi->extp_clk = devm_clk_get_optional(&pdev->dev, "extp");
+ if (IS_ERR(hdmi->extp_clk))
+ return dev_err_probe(dev, PTR_ERR(hdmi->extp_clk),
+ "failed to get extp clock\n");
hdmi->hpd_gpiod = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
/* This will catch e.g. -EPROBE_DEFER */
@@ -432,6 +388,48 @@ static void msm_hdmi_dev_remove(struct platform_device *pdev)
msm_hdmi_put_phy(hdmi);
}
+static int msm_hdmi_runtime_suspend(struct device *dev)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+ const struct hdmi_platform_config *config = hdmi->config;
+
+ clk_bulk_disable_unprepare(config->pwr_clk_cnt, hdmi->pwr_clks);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ regulator_bulk_disable(config->pwr_reg_cnt, hdmi->pwr_regs);
+
+ return 0;
+}
+
+static int msm_hdmi_runtime_resume(struct device *dev)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+ const struct hdmi_platform_config *config = hdmi->config;
+ int ret;
+
+ ret = regulator_bulk_enable(config->pwr_reg_cnt, hdmi->pwr_regs);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ goto fail;
+
+ ret = clk_bulk_prepare_enable(config->pwr_clk_cnt, hdmi->pwr_clks);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ pinctrl_pm_select_sleep_state(dev);
+
+ return ret;
+}
+
+DEFINE_RUNTIME_DEV_PM_OPS(msm_hdmi_pm_ops, msm_hdmi_runtime_suspend, msm_hdmi_runtime_resume, NULL);
+
static const struct of_device_id msm_hdmi_dt_match[] = {
{ .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8974_config },
@@ -449,6 +447,7 @@ static struct platform_driver msm_hdmi_driver = {
.driver = {
.name = "hdmi_msm",
.of_match_table = msm_hdmi_dt_match,
+ .pm = &msm_hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index a5f481c39277..d5e572d10d6a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -41,16 +41,17 @@ struct hdmi {
/* video state: */
bool power_on;
+ bool hpd_enabled;
+ struct mutex state_mutex; /* protects two booleans */
unsigned long int pixclock;
void __iomem *mmio;
void __iomem *qfprom_mmio;
phys_addr_t mmio_phy_addr;
- struct regulator_bulk_data *hpd_regs;
struct regulator_bulk_data *pwr_regs;
- struct clk **hpd_clks;
- struct clk **pwr_clks;
+ struct clk_bulk_data *pwr_clks;
+ struct clk *extp_clk;
struct gpio_desc *hpd_gpiod;
@@ -83,21 +84,12 @@ struct hdmi {
/* platform config data (ie. from DT, or pdata) */
struct hdmi_platform_config {
- /* regulators that need to be on for hpd: */
- const char **hpd_reg_names;
- int hpd_reg_cnt;
-
/* regulators that need to be on for screen pwr: */
- const char **pwr_reg_names;
+ const char * const *pwr_reg_names;
int pwr_reg_cnt;
- /* clks that need to be on for hpd: */
- const char **hpd_clk_names;
- const long unsigned *hpd_freq;
- int hpd_clk_cnt;
-
- /* clks that need to be on for screen pwr (ie pixel clk): */
- const char **pwr_clk_names;
+ /* clks that need to be on: */
+ const char * const *pwr_clk_names;
int pwr_clk_cnt;
};
@@ -224,8 +216,8 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi);
void msm_hdmi_hpd_irq(struct drm_bridge *bridge);
enum drm_connector_status msm_hdmi_bridge_detect(
struct drm_bridge *bridge);
-int msm_hdmi_hpd_enable(struct drm_bridge *bridge);
-void msm_hdmi_hpd_disable(struct hdmi *hdmi);
+void msm_hdmi_hpd_enable(struct drm_bridge *bridge);
+void msm_hdmi_hpd_disable(struct drm_bridge *bridge);
/*
* i2c adapter for ddc:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index 8bb975e82c17..b9ec14ef2c20 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -4,6 +4,7 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
#include <linux/hdmi.h>
@@ -12,71 +13,9 @@
#include "hdmi.h"
-/* Supported HDMI Audio sample rates */
-#define MSM_HDMI_SAMPLE_RATE_32KHZ 0
-#define MSM_HDMI_SAMPLE_RATE_44_1KHZ 1
-#define MSM_HDMI_SAMPLE_RATE_48KHZ 2
-#define MSM_HDMI_SAMPLE_RATE_88_2KHZ 3
-#define MSM_HDMI_SAMPLE_RATE_96KHZ 4
-#define MSM_HDMI_SAMPLE_RATE_176_4KHZ 5
-#define MSM_HDMI_SAMPLE_RATE_192KHZ 6
-#define MSM_HDMI_SAMPLE_RATE_MAX 7
-
-
-struct hdmi_msm_audio_acr {
- uint32_t n; /* N parameter for clock regeneration */
- uint32_t cts; /* CTS parameter for clock regeneration */
-};
-
-struct hdmi_msm_audio_arcs {
- unsigned long int pixclock;
- struct hdmi_msm_audio_acr lut[MSM_HDMI_SAMPLE_RATE_MAX];
-};
-
-#define HDMI_MSM_AUDIO_ARCS(pclk, ...) { (1000 * (pclk)), __VA_ARGS__ }
-
-/* Audio constants lookup table for hdmi_msm_audio_acr_setup */
-/* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */
-static const struct hdmi_msm_audio_arcs acr_lut[] = {
- /* 25.200MHz */
- HDMI_MSM_AUDIO_ARCS(25200, {
- {4096, 25200}, {6272, 28000}, {6144, 25200}, {12544, 28000},
- {12288, 25200}, {25088, 28000}, {24576, 25200} }),
- /* 27.000MHz */
- HDMI_MSM_AUDIO_ARCS(27000, {
- {4096, 27000}, {6272, 30000}, {6144, 27000}, {12544, 30000},
- {12288, 27000}, {25088, 30000}, {24576, 27000} }),
- /* 27.027MHz */
- HDMI_MSM_AUDIO_ARCS(27030, {
- {4096, 27027}, {6272, 30030}, {6144, 27027}, {12544, 30030},
- {12288, 27027}, {25088, 30030}, {24576, 27027} }),
- /* 74.250MHz */
- HDMI_MSM_AUDIO_ARCS(74250, {
- {4096, 74250}, {6272, 82500}, {6144, 74250}, {12544, 82500},
- {12288, 74250}, {25088, 82500}, {24576, 74250} }),
- /* 148.500MHz */
- HDMI_MSM_AUDIO_ARCS(148500, {
- {4096, 148500}, {6272, 165000}, {6144, 148500}, {12544, 165000},
- {12288, 148500}, {25088, 165000}, {24576, 148500} }),
-};
-
-static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(acr_lut); i++) {
- const struct hdmi_msm_audio_arcs *arcs = &acr_lut[i];
- if (arcs->pixclock == pixclock)
- return arcs;
- }
-
- return NULL;
-}
-
int msm_hdmi_audio_update(struct hdmi *hdmi)
{
struct hdmi_audio *audio = &hdmi->audio;
- const struct hdmi_msm_audio_arcs *arcs = NULL;
bool enabled = audio->enabled;
uint32_t acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl;
uint32_t audio_config;
@@ -94,15 +33,6 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
enabled = false;
}
- if (enabled) {
- arcs = get_arcs(hdmi->pixclock);
- if (!arcs) {
- DBG("disabling audio: unsupported pixclock: %lu",
- hdmi->pixclock);
- enabled = false;
- }
- }
-
/* Read first before writing */
acr_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_ACR_PKT_CTRL);
vbi_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
@@ -116,15 +46,12 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
uint32_t n, cts, multiplier;
enum hdmi_acr_cts select;
- n = arcs->lut[audio->rate].n;
- cts = arcs->lut[audio->rate].cts;
+ drm_hdmi_acr_get_n_cts(hdmi->pixclock, audio->rate, &n, &cts);
- if ((MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate)) {
+ if (audio->rate == 192000 || audio->rate == 176400) {
multiplier = 4;
n >>= 2; /* divide N by 4 and use multiplier */
- } else if ((MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate)) {
+ } else if (audio->rate == 96000 || audio->rate == 88200) {
multiplier = 2;
n >>= 1; /* divide N by 2 and use multiplier */
} else {
@@ -137,13 +64,11 @@ int msm_hdmi_audio_update(struct hdmi *hdmi)
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY;
acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_N_MULTIPLIER(multiplier);
- if ((MSM_HDMI_SAMPLE_RATE_48KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate))
+ if (audio->rate == 48000 || audio->rate == 96000 ||
+ audio->rate == 192000)
select = ACR_48;
- else if ((MSM_HDMI_SAMPLE_RATE_44_1KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate) ||
- (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate))
+ else if (audio->rate == 44100 || audio->rate == 88200 ||
+ audio->rate == 176400)
select = ACR_44;
else /* default to 32k */
select = ACR_32;
@@ -204,7 +129,6 @@ int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- unsigned int rate;
int ret;
drm_dbg_driver(bridge->dev, "%u Hz, %d bit, %d channels\n",
@@ -214,25 +138,12 @@ int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
switch (params->sample_rate) {
case 32000:
- rate = MSM_HDMI_SAMPLE_RATE_32KHZ;
- break;
case 44100:
- rate = MSM_HDMI_SAMPLE_RATE_44_1KHZ;
- break;
case 48000:
- rate = MSM_HDMI_SAMPLE_RATE_48KHZ;
- break;
case 88200:
- rate = MSM_HDMI_SAMPLE_RATE_88_2KHZ;
- break;
case 96000:
- rate = MSM_HDMI_SAMPLE_RATE_96KHZ;
- break;
case 176400:
- rate = MSM_HDMI_SAMPLE_RATE_176_4KHZ;
- break;
case 192000:
- rate = MSM_HDMI_SAMPLE_RATE_192KHZ;
break;
default:
drm_err(bridge->dev, "rate[%d] not supported!\n",
@@ -245,7 +156,7 @@ int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector,
if (ret)
return ret;
- hdmi->audio.rate = rate;
+ hdmi->audio.rate = params->sample_rate;
hdmi->audio.channels = params->cea.channels;
hdmi->audio.enabled = true;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 1456354c8af4..53a7ce8cc7bc 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -18,52 +18,34 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
struct drm_device *dev = bridge->dev;
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
- int i, ret;
-
- pm_runtime_get_sync(&hdmi->pdev->dev);
+ int ret;
- ret = regulator_bulk_enable(config->pwr_reg_cnt, hdmi->pwr_regs);
- if (ret)
- DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %d\n", ret);
+ pm_runtime_resume_and_get(&hdmi->pdev->dev);
- if (config->pwr_clk_cnt > 0) {
+ if (hdmi->extp_clk) {
DBG("pixclock: %lu", hdmi->pixclock);
- ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to set pixel clk: %s (%d)\n",
- config->pwr_clk_names[0], ret);
- }
- }
+ ret = clk_set_rate(hdmi->extp_clk, hdmi->pixclock);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to set extp clk rate: %d\n", ret);
- for (i = 0; i < config->pwr_clk_cnt; i++) {
- ret = clk_prepare_enable(hdmi->pwr_clks[i]);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to enable pwr clk: %s (%d)\n",
- config->pwr_clk_names[i], ret);
- }
+ ret = clk_prepare_enable(hdmi->extp_clk);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enable extp clk: %d\n", ret);
}
}
static void power_off(struct drm_bridge *bridge)
{
- struct drm_device *dev = bridge->dev;
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
- int i, ret;
/* TODO do we need to wait for final vblank somewhere before
* cutting the clocks?
*/
mdelay(16 + 4);
- for (i = 0; i < config->pwr_clk_cnt; i++)
- clk_disable_unprepare(hdmi->pwr_clks[i]);
-
- ret = regulator_bulk_disable(config->pwr_reg_cnt, hdmi->pwr_regs);
- if (ret)
- DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %d\n", ret);
+ if (hdmi->extp_clk)
+ clk_disable_unprepare(hdmi->extp_clk);
pm_runtime_put(&hdmi->pdev->dev);
}
@@ -320,13 +302,16 @@ static void msm_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
msm_hdmi_set_timings(hdmi, &crtc_state->adjusted_mode);
+ mutex_lock(&hdmi->state_mutex);
if (!hdmi->power_on) {
msm_hdmi_phy_resource_enable(phy);
msm_hdmi_power_on(bridge);
hdmi->power_on = true;
- if (connector->display_info.is_hdmi)
- msm_hdmi_audio_update(hdmi);
}
+ mutex_unlock(&hdmi->state_mutex);
+
+ if (connector->display_info.is_hdmi)
+ msm_hdmi_audio_update(hdmi);
drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
@@ -349,7 +334,10 @@ static void msm_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
msm_hdmi_hdcp_off(hdmi->hdcp_ctrl);
DBG("power down");
- msm_hdmi_set_mode(hdmi, false);
+
+ /* Keep the HDMI enabled if the HPD is enabled */
+ mutex_lock(&hdmi->state_mutex);
+ msm_hdmi_set_mode(hdmi, hdmi->hpd_enabled);
msm_hdmi_phy_powerdown(phy);
@@ -360,6 +348,7 @@ static void msm_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
msm_hdmi_audio_update(hdmi);
msm_hdmi_phy_resource_disable(phy);
}
+ mutex_unlock(&hdmi->state_mutex);
}
static void msm_hdmi_set_timings(struct hdmi *hdmi,
@@ -411,9 +400,6 @@ static void msm_hdmi_set_timings(struct hdmi *hdmi,
frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
DBG("frame_ctrl=%08x", frame_ctrl);
hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
-
- if (hdmi->connector->display_info.is_hdmi)
- msm_hdmi_audio_update(hdmi);
}
static const struct drm_edid *msm_hdmi_bridge_edid_read(struct drm_bridge *bridge,
@@ -440,7 +426,6 @@ static enum drm_mode_status msm_hdmi_bridge_tmds_char_rate_valid(const struct dr
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
struct msm_drm_private *priv = bridge->dev->dev_private;
struct msm_kms *kms = priv->kms;
long actual;
@@ -453,8 +438,8 @@ static enum drm_mode_status msm_hdmi_bridge_tmds_char_rate_valid(const struct dr
actual = kms->funcs->round_pixclk(kms,
tmds_rate,
hdmi_bridge->hdmi->encoder);
- else if (config->pwr_clk_cnt > 0)
- actual = clk_round_rate(hdmi->pwr_clks[0], tmds_rate);
+ else if (hdmi->extp_clk)
+ actual = clk_round_rate(hdmi->extp_clk, tmds_rate);
else
actual = tmds_rate;
@@ -474,6 +459,8 @@ static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
.atomic_post_disable = msm_hdmi_bridge_atomic_post_disable,
.edid_read = msm_hdmi_bridge_edid_read,
.detect = msm_hdmi_bridge_detect,
+ .hpd_enable = msm_hdmi_hpd_enable,
+ .hpd_disable = msm_hdmi_hpd_disable,
.hdmi_tmds_char_rate_valid = msm_hdmi_bridge_tmds_char_rate_valid,
.hdmi_clear_infoframe = msm_hdmi_bridge_clear_infoframe,
.hdmi_write_infoframe = msm_hdmi_bridge_write_infoframe,
@@ -498,16 +485,15 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi)
struct hdmi_bridge *hdmi_bridge;
int ret;
- hdmi_bridge = devm_kzalloc(hdmi->dev->dev,
- sizeof(*hdmi_bridge), GFP_KERNEL);
- if (!hdmi_bridge)
- return -ENOMEM;
+ hdmi_bridge = devm_drm_bridge_alloc(hdmi->dev->dev, struct hdmi_bridge, base,
+ &msm_hdmi_bridge_funcs);
+ if (IS_ERR(hdmi_bridge))
+ return PTR_ERR(hdmi_bridge);
hdmi_bridge->hdmi = hdmi;
INIT_WORK(&hdmi_bridge->hpd_work, msm_hdmi_hotplug_work);
bridge = &hdmi_bridge->base;
- bridge->funcs = &msm_hdmi_bridge_funcs;
bridge->ddc = hdmi->i2c;
bridge->type = DRM_MODE_CONNECTOR_HDMIA;
bridge->vendor = "Qualcomm";
@@ -515,6 +501,7 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi)
bridge->ops = DRM_BRIDGE_OP_HPD |
DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HDMI_AUDIO |
DRM_BRIDGE_OP_EDID;
bridge->hdmi_audio_max_i2s_playback_channels = 8;
bridge->hdmi_audio_dev = &hdmi->pdev->dev;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
index 9ce0ffa35417..407e6c449ee0 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
@@ -60,68 +60,30 @@ static void msm_hdmi_phy_reset(struct hdmi *hdmi)
}
}
-static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
-{
- const struct hdmi_platform_config *config = hdmi->config;
- struct device *dev = &hdmi->pdev->dev;
- int i, ret;
-
- if (enable) {
- for (i = 0; i < config->hpd_clk_cnt; i++) {
- if (config->hpd_freq && config->hpd_freq[i]) {
- ret = clk_set_rate(hdmi->hpd_clks[i],
- config->hpd_freq[i]);
- if (ret)
- dev_warn(dev,
- "failed to set clk %s (%d)\n",
- config->hpd_clk_names[i], ret);
- }
-
- ret = clk_prepare_enable(hdmi->hpd_clks[i]);
- if (ret) {
- DRM_DEV_ERROR(dev,
- "failed to enable hpd clk: %s (%d)\n",
- config->hpd_clk_names[i], ret);
- }
- }
- } else {
- for (i = config->hpd_clk_cnt - 1; i >= 0; i--)
- clk_disable_unprepare(hdmi->hpd_clks[i]);
- }
-}
-
-int msm_hdmi_hpd_enable(struct drm_bridge *bridge)
+void msm_hdmi_hpd_enable(struct drm_bridge *bridge)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
- const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
uint32_t hpd_ctrl;
int ret;
unsigned long flags;
- ret = regulator_bulk_enable(config->hpd_reg_cnt, hdmi->hpd_regs);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to enable hpd regulators: %d\n", ret);
- goto fail;
- }
-
- ret = pinctrl_pm_select_default_state(dev);
- if (ret) {
- DRM_DEV_ERROR(dev, "pinctrl state chg failed: %d\n", ret);
- goto fail;
- }
-
if (hdmi->hpd_gpiod)
gpiod_set_value_cansleep(hdmi->hpd_gpiod, 1);
- pm_runtime_get_sync(dev);
- enable_hpd_clocks(hdmi, true);
+ ret = pm_runtime_resume_and_get(dev);
+ if (WARN_ON(ret))
+ return;
+ mutex_lock(&hdmi->state_mutex);
msm_hdmi_set_mode(hdmi, false);
msm_hdmi_phy_reset(hdmi);
msm_hdmi_set_mode(hdmi, true);
+ hdmi->hpd_enabled = true;
+ mutex_unlock(&hdmi->state_mutex);
+
hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
/* enable HPD events: */
@@ -140,34 +102,23 @@ int msm_hdmi_hpd_enable(struct drm_bridge *bridge)
hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
-
- return 0;
-
-fail:
- return ret;
}
-void msm_hdmi_hpd_disable(struct hdmi *hdmi)
+void msm_hdmi_hpd_disable(struct drm_bridge *bridge)
{
- const struct hdmi_platform_config *config = hdmi->config;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
struct device *dev = &hdmi->pdev->dev;
- int ret;
/* Disable HPD interrupt */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
- msm_hdmi_set_mode(hdmi, false);
+ mutex_lock(&hdmi->state_mutex);
+ hdmi->hpd_enabled = false;
+ msm_hdmi_set_mode(hdmi, hdmi->power_on);
+ mutex_unlock(&hdmi->state_mutex);
- enable_hpd_clocks(hdmi, false);
pm_runtime_put(dev);
-
- ret = pinctrl_pm_select_sleep_state(dev);
- if (ret)
- dev_warn(dev, "pinctrl state chg failed: %d\n", ret);
-
- ret = regulator_bulk_disable(config->hpd_reg_cnt, hdmi->hpd_regs);
- if (ret)
- dev_warn(dev, "failed to disable hpd regulator: %d\n", ret);
}
void msm_hdmi_hpd_irq(struct drm_bridge *bridge)
@@ -202,14 +153,16 @@ void msm_hdmi_hpd_irq(struct drm_bridge *bridge)
static enum drm_connector_status detect_reg(struct hdmi *hdmi)
{
- uint32_t hpd_int_status;
+ u32 hpd_int_status = 0;
+ int ret;
- pm_runtime_get_sync(&hdmi->pdev->dev);
- enable_hpd_clocks(hdmi, true);
+ ret = pm_runtime_resume_and_get(&hdmi->pdev->dev);
+ if (ret)
+ goto out;
hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
- enable_hpd_clocks(hdmi, false);
+out:
pm_runtime_put(&hdmi->pdev->dev);
return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
index 7aa500d24240..ebefea4fb408 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -107,11 +107,15 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c,
if (num == 0)
return num;
+ ret = pm_runtime_resume_and_get(&hdmi->pdev->dev);
+ if (ret)
+ return ret;
+
init_ddc(hdmi_i2c);
ret = ddc_clear_irq(hdmi_i2c);
if (ret)
- return ret;
+ goto fail;
for (i = 0; i < num; i++) {
struct i2c_msg *p = &msgs[i];
@@ -169,7 +173,7 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c,
hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS),
hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS),
hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL));
- return ret;
+ goto fail;
}
ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS);
@@ -202,7 +206,13 @@ static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c,
}
}
+ pm_runtime_put(&hdmi->pdev->dev);
+
return i;
+
+fail:
+ pm_runtime_put(&hdmi->pdev->dev);
+ return ret;
}
static u32 msm_hdmi_i2c_func(struct i2c_adapter *adapter)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 03120c54ced6..667573f1db7c 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -58,7 +58,11 @@ int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy)
struct device *dev = &phy->pdev->dev;
int i, ret = 0;
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "runtime resume failed: %d\n", ret);
+ return ret;
+ }
ret = regulator_bulk_enable(cfg->num_regs, phy->regs);
if (ret) {
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 7ab607252d18..6af72162cda4 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -208,6 +208,35 @@ DEFINE_DEBUGFS_ATTRIBUTE(shrink_fops,
shrink_get, shrink_set,
"0x%08llx\n");
+/*
+ * Return the number of microseconds to wait until stall-on-fault is
+ * re-enabled. If 0 then it is already enabled or will be re-enabled on the
+ * next submit (unless there's a leftover devcoredump). This is useful for
+ * kernel tests that intentionally produce a fault and check the devcoredump to
+ * wait until the cooldown period is over.
+ */
+
+static int
+stall_reenable_time_get(void *data, u64 *val)
+{
+ struct msm_drm_private *priv = data;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&priv->fault_stall_lock, irq_flags);
+
+ if (priv->stall_enabled)
+ *val = 0;
+ else
+ *val = max(ktime_us_delta(priv->stall_reenable_time, ktime_get()), 0);
+
+ spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(stall_reenable_time_fops,
+ stall_reenable_time_get, NULL,
+ "%lld\n");
static int msm_gem_show(struct seq_file *m, void *arg)
{
@@ -319,6 +348,9 @@ static void msm_debugfs_gpu_init(struct drm_minor *minor)
debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root,
&priv->disable_err_irq);
+ debugfs_create_file("stall_reenable_time_us", 0400, minor->debugfs_root,
+ priv, &stall_reenable_time_fops);
+
gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root);
debugfs_create_bool("idle_clamp",0600, gpu_devfreq,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c3588dc9e537..d007687c2446 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -245,6 +245,10 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
+ /* Initialize stall-on-fault */
+ spin_lock_init(&priv->fault_stall_lock);
+ priv->stall_enabled = true;
+
/* Teach lockdep about lock ordering wrt. shrinker: */
fs_reclaim_acquire(GFP_KERNEL);
might_lock(&priv->lru.lock);
@@ -671,7 +675,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
break;
case MSM_INFO_GET_FLAGS:
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
ret = -EINVAL;
break;
}
@@ -926,7 +930,7 @@ static const struct drm_driver msm_driver = {
* is no external component that we need to add since LVDS is within MDP4
* itself.
*/
-static int add_components_mdp(struct device *master_dev,
+static int add_mdp_components(struct device *master_dev,
struct component_match **matchptr)
{
struct device_node *np = master_dev->of_node;
@@ -1030,7 +1034,7 @@ static int add_gpu_components(struct device *dev,
if (!np)
return 0;
- if (of_device_is_available(np))
+ if (of_device_is_available(np) && adreno_has_gpu(np))
drm_of_component_match_add(dev, matchptr, component_compare_of, np);
of_node_put(np);
@@ -1071,7 +1075,7 @@ int msm_drv_probe(struct device *master_dev,
/* Add mdp components if we have KMS. */
if (kms_init) {
- ret = add_components_mdp(master_dev, &match);
+ ret = add_mdp_components(master_dev, &match);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index a65077855201..c8afb1ea6040 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -222,6 +222,29 @@ struct msm_drm_private {
* the sw hangcheck mechanism.
*/
bool disable_err_irq;
+
+ /**
+ * @fault_stall_lock:
+ *
+ * Serialize changes to stall-on-fault state.
+ */
+ spinlock_t fault_stall_lock;
+
+ /**
+ * @fault_stall_reenable_time:
+ *
+ * If stall_enabled is false, when to reenable stall-on-fault.
+ * Protected by @fault_stall_lock.
+ */
+ ktime_t stall_reenable_time;
+
+ /**
+ * @stall_enabled:
+ *
+ * Whether stall-on-fault is currently enabled. Protected by
+ * @fault_stall_lock.
+ */
+ bool stall_enabled;
};
const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index ebc9ba66efb8..2995e80fec3b 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -735,7 +735,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
msm_gem_assert_locked(obj);
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return ERR_PTR(-ENODEV);
pages = msm_gem_get_pages_locked(obj, madv);
@@ -1074,7 +1074,7 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
put_iova_spaces(obj, true);
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
GEM_WARN_ON(msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 85f0257e83da..ba5c4ff76292 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -224,7 +224,7 @@ msm_gem_assert_locked(struct drm_gem_object *obj)
/* imported/exported objects are not purgeable: */
static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
{
- return msm_obj->base.import_attach || msm_obj->pin_count;
+ return drm_gem_is_imported(&msm_obj->base) || msm_obj->pin_count;
}
static inline bool is_purgeable(struct msm_gem_object *msm_obj)
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index ee267490c935..2e37913d5a6a 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -50,7 +50,7 @@ int msm_gem_prime_pin(struct drm_gem_object *obj)
struct page **pages;
int ret = 0;
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return 0;
pages = msm_gem_pin_pages_locked(obj);
@@ -62,7 +62,7 @@ int msm_gem_prime_pin(struct drm_gem_object *obj)
void msm_gem_prime_unpin(struct drm_gem_object *obj)
{
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
return;
msm_gem_unpin_pages_locked(obj);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3e9aa2cc38ef..d4f71bb54e84 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -85,6 +85,15 @@ void __msm_gem_submit_destroy(struct kref *kref)
container_of(kref, struct msm_gem_submit, ref);
unsigned i;
+ /*
+ * In error paths, we could unref the submit without calling
+ * drm_sched_entity_push_job(), so msm_job_free() will never
+ * get called. Since drm_sched_job_cleanup() will NULL out
+ * s_fence, we can use that to detect this case.
+ */
+ if (submit->base.s_fence)
+ drm_sched_job_cleanup(&submit->base);
+
if (submit->fence_id) {
spin_lock(&submit->queue->idr_lock);
idr_remove(&submit->queue->fence_idr, submit->fence_id);
@@ -649,6 +658,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_ringbuffer *ring;
struct msm_submit_post_dep *post_deps = NULL;
struct drm_syncobj **syncobjs_to_reset = NULL;
+ struct sync_file *sync_file = NULL;
int out_fence_fd = -1;
unsigned i;
int ret;
@@ -858,7 +868,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
- struct sync_file *sync_file = sync_file_create(submit->user_fence);
+ sync_file = sync_file_create(submit->user_fence);
if (!sync_file) {
ret = -ENOMEM;
} else {
@@ -892,8 +902,11 @@ out:
out_unlock:
mutex_unlock(&queue->lock);
out_post_unlock:
- if (ret && (out_fence_fd >= 0))
+ if (ret && (out_fence_fd >= 0)) {
put_unused_fd(out_fence_fd);
+ if (sync_file)
+ fput(sync_file->file);
+ }
if (!IS_ERR_OR_NULL(submit)) {
msm_gem_submit_put(submit);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index c380d9d9f5af..3947f7ba1421 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -257,7 +257,8 @@ out:
}
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
- struct msm_gem_submit *submit, char *comm, char *cmd)
+ struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
+ char *comm, char *cmd)
{
struct msm_gpu_state *state;
@@ -276,7 +277,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
/* Fill in the additional crash state information */
state->comm = kstrdup(comm, GFP_KERNEL);
state->cmd = kstrdup(cmd, GFP_KERNEL);
- state->fault_info = gpu->fault_info;
+ if (fault_info)
+ state->fault_info = *fault_info;
if (submit) {
int i;
@@ -308,7 +310,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
}
#else
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
- struct msm_gem_submit *submit, char *comm, char *cmd)
+ struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
+ char *comm, char *cmd)
{
}
#endif
@@ -405,7 +408,7 @@ static void recover_worker(struct kthread_work *work)
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
- msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+ msm_gpu_crashstate_capture(gpu, submit, NULL, comm, cmd);
kfree(cmd);
kfree(comm);
@@ -459,9 +462,8 @@ out_unlock:
msm_gpu_retire(gpu);
}
-static void fault_worker(struct kthread_work *work)
+void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info)
{
- struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
@@ -484,16 +486,13 @@ static void fault_worker(struct kthread_work *work)
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
- msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+ msm_gpu_crashstate_capture(gpu, submit, fault_info, comm, cmd);
pm_runtime_put_sync(&gpu->pdev->dev);
kfree(cmd);
kfree(comm);
resume_smmu:
- memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
- gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
-
mutex_unlock(&gpu->lock);
}
@@ -521,7 +520,7 @@ static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
static void hangcheck_handler(struct timer_list *t)
{
- struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
+ struct msm_gpu *gpu = timer_container_of(gpu, t, hangcheck_timer);
struct drm_device *dev = gpu->dev;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
uint32_t fence = ring->memptrs->fence;
@@ -882,7 +881,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
init_waitqueue_head(&gpu->retire_event);
kthread_init_work(&gpu->retire_work, retire_worker);
kthread_init_work(&gpu->recover_work, recover_worker);
- kthread_init_work(&gpu->fault_work, fault_worker);
priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index e25009150579..5bf7cd985b9c 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -253,12 +253,6 @@ struct msm_gpu {
#define DRM_MSM_HANGCHECK_PROGRESS_RETRIES 3
struct timer_list hangcheck_timer;
- /* Fault info for most recent iova fault: */
- struct msm_gpu_fault_info fault_info;
-
- /* work for handling GPU ioval faults: */
- struct kthread_work fault_work;
-
/* work for handling GPU recovery: */
struct kthread_work recover_work;
@@ -668,6 +662,7 @@ msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *ta
void msm_gpu_cleanup(struct msm_gpu *gpu);
struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
+bool adreno_has_gpu(struct device_node *node);
void __init adreno_register(void);
void __exit adreno_unregister(void);
@@ -705,6 +700,8 @@ static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
mutex_unlock(&gpu->lock);
}
+void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info);
+
/*
* Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
* support expanded privileges
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 6970b0f7f457..2e1d5c343272 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -156,6 +156,7 @@ void msm_devfreq_init(struct msm_gpu *gpu)
priv->gpu_devfreq_config.downdifferential = 10;
mutex_init(&df->lock);
+ df->suspended = true;
ret = dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
DEV_PM_QOS_MIN_FREQUENCY, 0);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index fd73dcd3f30e..739ce2c283a4 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -345,7 +345,6 @@ static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev
unsigned long iova, int flags, void *arg)
{
struct msm_iommu *iommu = arg;
- struct msm_mmu *mmu = &iommu->base;
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
struct adreno_smmu_fault_info info, *ptr = NULL;
@@ -359,9 +358,6 @@ static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev
pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
- if (mmu->funcs->resume_translation)
- mmu->funcs->resume_translation(mmu);
-
return 0;
}
@@ -376,12 +372,12 @@ static int msm_disp_fault_handler(struct iommu_domain *domain, struct device *de
return -ENOSYS;
}
-static void msm_iommu_resume_translation(struct msm_mmu *mmu)
+static void msm_iommu_set_stall(struct msm_mmu *mmu, bool enable)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
- if (adreno_smmu->resume_translation)
- adreno_smmu->resume_translation(adreno_smmu->cookie, true);
+ if (adreno_smmu->set_stall)
+ adreno_smmu->set_stall(adreno_smmu->cookie, enable);
}
static void msm_iommu_detach(struct msm_mmu *mmu)
@@ -431,7 +427,7 @@ static const struct msm_mmu_funcs funcs = {
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.destroy = msm_iommu_destroy,
- .resume_translation = msm_iommu_resume_translation,
+ .set_stall = msm_iommu_set_stall,
};
struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index dcb49fd30402..709979fcfab6 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -150,7 +150,7 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
dev = msm_mdss->dev;
- domain = irq_domain_add_linear(dev->of_node, 32,
+ domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), 32,
&msm_mdss_irqdomain_ops, msm_mdss);
if (!domain) {
dev_err(dev, "failed to add irq_domain\n");
@@ -592,6 +592,16 @@ static const struct msm_mdss_data sa8775p_data = {
.reg_bus_bw = 74000,
};
+static const struct msm_mdss_data sar2130p_data = {
+ .ubwc_enc_version = UBWC_3_0, /* 4.0.2 in hw */
+ .ubwc_dec_version = UBWC_4_3,
+ .ubwc_swizzle = 6,
+ .ubwc_bank_spread = true,
+ .highest_bank_bit = 0,
+ .macrotile_mode = 1,
+ .reg_bus_bw = 74000,
+};
+
static const struct msm_mdss_data sc7180_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
@@ -738,6 +748,7 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,msm8998-mdss", .data = &msm8998_data },
{ .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data },
{ .compatible = "qcom,sa8775p-mdss", .data = &sa8775p_data },
+ { .compatible = "qcom,sar2130p-mdss", .data = &sar2130p_data },
{ .compatible = "qcom,sdm670-mdss", .data = &sdm670_data },
{ .compatible = "qcom,sdm845-mdss", .data = &sdm845_data },
{ .compatible = "qcom,sc7180-mdss", .data = &sc7180_data },
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index daf91529e02b..0c694907140d 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -15,7 +15,7 @@ struct msm_mmu_funcs {
size_t len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
void (*destroy)(struct msm_mmu *mmu);
- void (*resume_translation)(struct msm_mmu *mmu);
+ void (*set_stall)(struct msm_mmu *mmu, bool enable);
};
enum msm_mmu_type {
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index c5651c39ac2a..89dce15eed3b 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -93,7 +93,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
}
msm_gem_object_set_name(ring->bo, "ring%d", id);
- args.name = to_msm_bo(ring->bo)->name,
+ args.name = to_msm_bo(ring->bo)->name;
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
index 5a6ae9fc3194..462713401622 100644
--- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
@@ -2255,7 +2255,8 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<reg32 offset="0" name="0">
<bitfield name="CLEAR_ON_CHIP_TS" pos="0" type="boolean"/>
<bitfield name="CLEAR_RESOURCE_TABLE" pos="1" type="boolean"/>
- <bitfield name="CLEAR_GLOBAL_LOCAL_TS" pos="2" type="boolean"/>
+ <bitfield name="CLEAR_BV_BR_COUNTER" pos="2" type="boolean"/>
+ <bitfield name="RESET_GLOBAL_LOCAL_TS" pos="3" type="boolean"/>
</reg32>
</domain>
diff --git a/drivers/gpu/drm/msm/registers/gen_header.py b/drivers/gpu/drm/msm/registers/gen_header.py
index 3926485bb197..a409404627c7 100644
--- a/drivers/gpu/drm/msm/registers/gen_header.py
+++ b/drivers/gpu/drm/msm/registers/gen_header.py
@@ -11,6 +11,7 @@ import collections
import argparse
import time
import datetime
+import re
class Error(Exception):
def __init__(self, message):
@@ -877,13 +878,14 @@ The rules-ng-ng source files this header was generated from are:
""")
maxlen = 0
for filepath in p.xml_files:
- maxlen = max(maxlen, len(filepath))
+ new_filepath = re.sub("^.+drivers","drivers",filepath)
+ maxlen = max(maxlen, len(new_filepath))
for filepath in p.xml_files:
- pad = " " * (maxlen - len(filepath))
+ pad = " " * (maxlen - len(new_filepath))
filesize = str(os.path.getsize(filepath))
filesize = " " * (7 - len(filesize)) + filesize
filetime = time.ctime(os.path.getmtime(filepath))
- print("- " + filepath + pad + " (" + filesize + " bytes, from " + filetime + ")")
+ print("- " + new_filepath + pad + " (" + filesize + " bytes, from <stripped>)")
if p.copyright_year:
current_year = str(datetime.date.today().year)
print()
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index 8ee00f59ca82..fcb2a7517377 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -134,7 +134,6 @@ static int lcdif_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct lcdif_drm_private *lcdif;
- struct resource *res;
int ret;
lcdif = devm_kzalloc(&pdev->dev, sizeof(*lcdif), GFP_KERNEL);
@@ -144,8 +143,7 @@ static int lcdif_load(struct drm_device *drm)
lcdif->drm = drm;
drm->dev_private = lcdif;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lcdif->base = devm_ioremap_resource(drm->dev, res);
+ lcdif->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lcdif->base))
return PTR_ERR(lcdif->base);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 59020862cf65..c183b1112bc4 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -8,6 +8,7 @@
* Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved.
*/
+#include <linux/aperture.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -215,7 +216,6 @@ static int mxsfb_load(struct drm_device *drm,
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct mxsfb_drm_private *mxsfb;
- struct resource *res;
int ret;
mxsfb = devm_kzalloc(&pdev->dev, sizeof(*mxsfb), GFP_KERNEL);
@@ -226,8 +226,7 @@ static int mxsfb_load(struct drm_device *drm,
drm->dev_private = mxsfb;
mxsfb->devdata = devdata;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mxsfb->base = devm_ioremap_resource(drm->dev, res);
+ mxsfb->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mxsfb->base))
return PTR_ERR(mxsfb->base);
@@ -361,6 +360,15 @@ static int mxsfb_probe(struct platform_device *pdev)
if (ret)
goto err_free;
+ /*
+ * Remove early framebuffers (ie. simplefb). The framebuffer can be
+ * located anywhere in RAM
+ */
+ ret = aperture_remove_all_conflicting_devices(mxsfb_driver.name);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "can't kick out existing framebuffers\n");
+
ret = drm_dev_register(drm, 0);
if (ret)
goto err_unload;
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 7b863355c5c6..385d24530d1e 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -2,6 +2,7 @@
ccflags-y += -I $(src)/include
ccflags-y += -I $(src)/include/nvkm
ccflags-y += -I $(src)/nvkm
+ccflags-y += -I $(src)/nvkm/subdev/gsp
ccflags-y += -I $(src)
# NVKM - HW resource manager
@@ -68,5 +69,6 @@ nouveau-y += nv17_fence.o
nouveau-y += nv50_fence.o
nouveau-y += nv84_fence.o
nouveau-y += nvc0_fence.o
+nouveau-y += gv100_fence.o
obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 7b3e979c51ec..d1587639ebb0 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_NOUVEAU
tristate "Nouveau (NVIDIA) cards"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select IOMMU_API
select FW_LOADER
select FW_CACHE if PM_SLEEP
@@ -94,7 +94,6 @@ config DRM_NOUVEAU_SVM
bool "(EXPERIMENTAL) Enable SVM (Shared Virtual Memory) support"
depends on DEVICE_PRIVATE
depends on DRM_NOUVEAU
- depends on MMU
depends on STAGING
select HMM_MIRROR
select MMU_NOTIFIER
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 67146f1e8482..c063756eaea3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -768,9 +768,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
disp->image[nv_crtc->index] = NULL;
}
- nouveau_bo_unmap(nv_crtc->cursor.nvbo);
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_fini(nv_crtc->cursor.nvbo);
+ nouveau_bo_unpin_del(&nv_crtc->cursor.nvbo);
nvif_event_dtor(&nv_crtc->vblank);
nvif_head_dtor(&nv_crtc->head);
kfree(nv_crtc);
@@ -1303,6 +1301,7 @@ nv04_crtc_vblank_handler(struct nvif_event *event, void *repv, u32 repc)
int
nv04_crtc_create(struct drm_device *dev, int crtc_num)
{
+ struct nouveau_cli *cli = &nouveau_drm(dev)->client;
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_crtc *nv_crtc;
struct drm_plane *primary;
@@ -1336,20 +1335,9 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
- ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100,
- NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL,
- &nv_crtc->cursor.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo,
- NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (!ret) {
- ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
- if (ret)
- nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- }
- if (ret)
- nouveau_bo_fini(nv_crtc->cursor.nvbo);
- }
+ ret = nouveau_bo_new_map(cli, NOUVEAU_GEM_DOMAIN_VRAM, 64 * 64 * 4, &nv_crtc->cursor.nvbo);
+ if (ret)
+ return ret;
nv04_cursor_init(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 28be2912ff74..d5049dee4b8c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -9,11 +9,13 @@ nouveau-y += dispnv50/core907d.o
nouveau-y += dispnv50/core917d.o
nouveau-y += dispnv50/corec37d.o
nouveau-y += dispnv50/corec57d.o
+nouveau-y += dispnv50/coreca7d.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc907d.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc37d.o
nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc57d.o
+nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcca7d.o
nouveau-y += dispnv50/dac507d.o
nouveau-y += dispnv50/dac907d.o
@@ -31,6 +33,7 @@ nouveau-y += dispnv50/head907d.o
nouveau-y += dispnv50/head917d.o
nouveau-y += dispnv50/headc37d.o
nouveau-y += dispnv50/headc57d.o
+nouveau-y += dispnv50/headca7d.o
nouveau-y += dispnv50/wimm.o
nouveau-y += dispnv50/wimmc37b.o
@@ -39,6 +42,7 @@ nouveau-y += dispnv50/wndw.o
nouveau-y += dispnv50/wndwc37e.o
nouveau-y += dispnv50/wndwc57e.o
nouveau-y += dispnv50/wndwc67e.o
+nouveau-y += dispnv50/wndwca7e.o
nouveau-y += dispnv50/base.o
nouveau-y += dispnv50/base507c.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
index f045515696cb..c6331bf97582 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
int version;
int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
} cores[] = {
+ { GB202_DISP_CORE_CHANNEL_DMA, 0, coreca7d_new },
{ AD102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index f75088186fba..aa07a3ad5dfd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -7,7 +7,10 @@
struct nv50_core {
const struct nv50_core_func *func;
+ struct nv50_disp *disp;
+
struct nv50_dmac chan;
+
bool assign_windows;
};
@@ -18,6 +21,7 @@ struct nv50_core_func {
int (*init)(struct nv50_core *);
void (*ntfy_init)(struct nouveau_bo *, u32 offset);
int (*caps_init)(struct nouveau_drm *, struct nv50_disp *);
+ u32 caps_class;
int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset,
struct nvif_device *);
int (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
@@ -70,4 +74,6 @@ int corec37d_wndw_owner(struct nv50_core *);
extern const struct nv50_outp_func sorc37d;
int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **);
+
+int coreca7d_new(struct nouveau_drm *, s32, struct nv50_core **);
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index ce2cb78bbdd3..4b947b67a844 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -165,6 +165,7 @@ core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm,
if (!(core = *pcore = kzalloc(sizeof(*core), GFP_KERNEL)))
return -ENOMEM;
core->func = func;
+ core->disp = disp;
ret = nv50_dmac_create(drm,
&oclass, 0, &args, sizeof(args),
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 7f637b8830be..83eec2f091f0 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -105,7 +105,7 @@ int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
int ret;
ret = nvif_object_ctor(&disp->disp->object, "dispCaps", 0,
- GV100_DISP_CAPS, NULL, 0, &disp->caps);
+ disp->core->func->caps_class, NULL, 0, &disp->caps);
if (ret) {
NV_ERROR(drm,
"Failed to init notifier caps region: %d\n",
@@ -162,6 +162,7 @@ corec37d = {
.init = corec37d_init,
.ntfy_init = corec37d_ntfy_init,
.caps_init = corec37d_caps_init,
+ .caps_class = GV100_DISP_CAPS,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
.wndw.owner = corec37d_wndw_owner,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index 421d0d57e1d8..39be576eadcb 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -22,6 +22,7 @@
#include "core.h"
#include "head.h"
+#include <nvif/class.h>
#include <nvif/pushc37b.h>
#include <nvhw/class/clc57d.h>
@@ -63,6 +64,7 @@ corec57d = {
.init = corec57d_init,
.ntfy_init = corec37d_ntfy_init,
.caps_init = corec37d_caps_init,
+ .caps_class = GV100_DISP_CAPS,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
.wndw.owner = corec37d_wndw_owner,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/coreca7d.c b/drivers/gpu/drm/nouveau/dispnv50/coreca7d.c
new file mode 100644
index 000000000000..171727be400e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/coreca7d.c
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "core.h"
+#include "head.h"
+
+#include <nvif/class.h>
+#include <nvif/pushc97b.h>
+
+#include <nvhw/class/clca7d.h>
+
+#include <nouveau_bo.h>
+
+static int
+coreca7d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
+{
+ const u64 ntfy_addr = core->disp->sync->offset + NV50_DISP_CORE_NTFY;
+ const u32 ntfy_hi = upper_32_bits(ntfy_addr);
+ const u32 ntfy_lo = lower_32_bits(ntfy_addr);
+ struct nvif_push *push = &core->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5 + (ntfy ? 5 + 2 : 0));
+ if (ret)
+ return ret;
+
+ if (ntfy) {
+ PUSH_MTHD(push, NVCA7D, SET_SURFACE_ADDRESS_HI_NOTIFIER, ntfy_hi,
+
+ SET_SURFACE_ADDRESS_LO_NOTIFIER,
+ NVVAL(NVCA7D, SET_SURFACE_ADDRESS_LO_NOTIFIER, ADDRESS_LO, ntfy_lo >> 4) |
+ NVDEF(NVCA7D, SET_SURFACE_ADDRESS_LO_NOTIFIER, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7D, SET_SURFACE_ADDRESS_LO_NOTIFIER, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7D, SET_NOTIFIER_CONTROL,
+ NVDEF(NVCA7D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
+ NVDEF(NVCA7D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
+ }
+
+ PUSH_MTHD(push, NVCA7D, SET_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_CURS],
+ SET_WINDOW_INTERLOCK_FLAGS, interlock[NV50_DISP_INTERLOCK_WNDW]);
+
+ PUSH_MTHD(push, NVCA7D, UPDATE,
+ NVDEF(NVCA7D, UPDATE, RELEASE_ELV, TRUE) |
+ NVDEF(NVCA7D, UPDATE, SPECIAL_HANDLING, NONE) |
+ NVDEF(NVCA7D, UPDATE, INHIBIT_INTERRUPTS, FALSE));
+
+ if (ntfy) {
+ PUSH_MTHD(push, NVCA7D, SET_NOTIFIER_CONTROL,
+ NVDEF(NVCA7D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
+ }
+
+ return PUSH_KICK(push);
+}
+
+static int
+coreca7d_init(struct nv50_core *core)
+{
+ struct nvif_push *push = &core->chan.push;
+ const u32 windows = 8, heads = 4;
+ int ret, i;
+
+ ret = PUSH_WAIT(push, windows * 6 + heads * 6);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < windows; i++) {
+ PUSH_MTHD(push, NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(i),
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED1BPP, TRUE) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED2BPP, TRUE) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED4BPP, TRUE) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, RGB_PACKED8BPP, TRUE),
+
+ WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(i), 0x00000000);
+
+ PUSH_MTHD(push, NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS(i),
+ NVVAL(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, MAX_PIXELS_FETCHED_PER_LINE, 0x7fff) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, ILUT_ALLOWED, TRUE) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, INPUT_SCALER_TAPS, TAPS_2) |
+ NVDEF(NVCA7D, WINDOW_SET_WINDOW_USAGE_BOUNDS, UPSCALING_ALLOWED, FALSE),
+
+ WINDOW_SET_PHYSICAL(i), BIT(i));
+ }
+
+ for (i = 0; i < heads; i++) {
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS(i),
+ NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, CURSOR, USAGE_W256_H256) |
+ NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, OLUT_ALLOWED, TRUE) |
+ NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, OUTPUT_SCALER_TAPS, TAPS_2) |
+ NVDEF(NVCA7D, HEAD_SET_HEAD_USAGE_BOUNDS, UPSCALING_ALLOWED, TRUE));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_TILE_MASK(i), BIT(i));
+
+ PUSH_MTHD(push, NVCA7D, TILE_SET_TILE_SIZE(i), 0);
+ }
+
+ core->assign_windows = true;
+ return PUSH_KICK(push);
+}
+
+static const struct nv50_core_func
+coreca7d = {
+ .init = coreca7d_init,
+ .ntfy_init = corec37d_ntfy_init,
+ .caps_init = corec37d_caps_init,
+ .caps_class = GB202_DISP_CAPS,
+ .ntfy_wait_done = corec37d_ntfy_wait_done,
+ .update = coreca7d_update,
+ .wndw.owner = corec37d_wndw_owner,
+ .head = &headca7d,
+ .sor = &sorc37d,
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ .crc = &crcca7d,
+#endif
+};
+
+int
+coreca7d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+ return core507d_new_(&coreca7d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c
index 5936b6b3b15d..deb6af40ef32 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c
@@ -509,6 +509,10 @@ nv50_crc_ctx_init(struct nv50_head *head, struct nvif_mmu *mmu,
if (ret)
return ret;
+ /* No CTXDMAs on Blackwell. */
+ if (core->chan.base.user.oclass >= GB202_DISP_CORE_CHANNEL_DMA)
+ return 0;
+
ret = nvif_object_ctor(&core->chan.base.user, "kmsCrcNtfyCtxDma",
NV50_DISP_HANDLE_CRC_CTX(head, idx),
NV_DMA_IN_MEMORY,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.h b/drivers/gpu/drm/nouveau/dispnv50/crc.h
index 4823f1fde2dd..75a2009e8193 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.h
@@ -94,6 +94,7 @@ void nv50_crc_atomic_clr(struct nv50_head *);
extern const struct nv50_crc_func crc907d;
extern const struct nv50_crc_func crcc37d;
extern const struct nv50_crc_func crcc57d;
+extern const struct nv50_crc_func crcca7d;
#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
struct nv50_crc {};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcca7d.c b/drivers/gpu/drm/nouveau/dispnv50/crcca7d.c
new file mode 100644
index 000000000000..912f59aebe87
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcca7d.c
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "crcc37d.h"
+#include "core.h"
+#include "head.h"
+
+#include <nvif/pushc97b.h>
+
+#include <nvhw/class/clca7d.h>
+
+static int
+crcca7d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, ctx ? 3 : 2);
+ if (ret)
+ return ret;
+
+ if (ctx) {
+ const u32 crc_hi = upper_32_bits(ctx->mem.addr);
+ const u32 crc_lo = lower_32_bits(ctx->mem.addr);
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_HI_CRC(i), crc_hi,
+
+ HEAD_SET_SURFACE_ADDRESS_LO_CRC(i),
+ NVVAL(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, ADDRESS_LO, crc_lo >> 4) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, ENABLE, ENABLE));
+ } else {
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC(i),
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CRC, ENABLE, DISABLE));
+ }
+
+ return 0;
+}
+
+static int
+crcca7d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
+ struct nv50_crc_notifier_ctx *ctx)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int primary_crc, ret;
+
+ if (!source) {
+ ret = PUSH_WAIT(push, 1);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CRC_CONTROL(i), 0);
+
+ return crcca7d_set_ctx(head, NULL);
+ }
+
+ switch (source) {
+ case NV50_CRC_SOURCE_TYPE_SOR:
+ primary_crc = NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(or);
+ break;
+ case NV50_CRC_SOURCE_TYPE_SF:
+ primary_crc = NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF;
+ break;
+ default:
+ break;
+ }
+
+ ret = crcca7d_set_ctx(head, ctx);
+ if (ret)
+ return ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CRC_CONTROL(i),
+ NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
+ NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
+ NVVAL(NVCA7D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, primary_crc) |
+ NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) |
+ NVDEF(NVCA7D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE));
+
+ return 0;
+}
+
+const struct nv50_crc_func
+crcca7d = {
+ .set_src = crcca7d_set_src,
+ .set_ctx = crcca7d_set_ctx,
+ .get_entry = crcc37d_get_entry,
+ .ctx_finished = crcc37d_ctx_finished,
+ .flip_threshold = CRCC37D_FLIP_THRESHOLD,
+ .num_entries = CRCC37D_MAX_ENTRIES,
+ .notifier_len = sizeof(struct crcc37d_notifier),
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
index 31d8b2e4791d..557bd05240fa 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
int version;
int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
} curses[] = {
+ { GB202_DISP_CURSOR, 0, cursc37a_new },
{ GA102_DISP_CURSOR, 0, cursc37a_new },
{ TU102_DISP_CURSOR, 0, cursc37a_new },
{ GV100_DISP_CURSOR, 0, cursc37a_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 504cb3f2054b..e5d37eee4301 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -279,6 +279,16 @@ nv50_dmac_create(struct nouveau_drm *drm,
if (syncbuf < 0)
return 0;
+ /* No CTXDMAs on Blackwell. */
+ if (disp->oclass >= GB202_DISP) {
+ /* "handle != NULL_HANDLE" is used to determine enable status
+ * in a number of places, so fill in some fake object handles.
+ */
+ dmac->sync.handle = NV50_DISP_HANDLE_SYNCBUF;
+ dmac->vram.handle = NV50_DISP_HANDLE_VRAM;
+ return 0;
+ }
+
ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
@@ -775,10 +785,8 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
union hdmi_infoframe infoframe = { 0 };
const u8 rekey = 56; /* binary driver, and tegra, constant */
u32 max_ac_packet;
- struct {
- struct nvif_outp_infoframe_v0 infoframe;
- u8 data[17];
- } args = { 0 };
+ DEFINE_RAW_FLEX(struct nvif_outp_infoframe_v0, args, data, 17);
+ const u8 data_len = __member_size(args->data);
int ret, size;
max_ac_packet = mode->htotal - mode->hdisplay;
@@ -815,29 +823,29 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
return;
/* AVI InfoFrame. */
- args.infoframe.version = 0;
- args.infoframe.head = nv_crtc->index;
+ args->version = 0;
+ args->head = nv_crtc->index;
if (!drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi, &nv_connector->base, mode)) {
drm_hdmi_avi_infoframe_quant_range(&infoframe.avi, &nv_connector->base, mode,
HDMI_QUANTIZATION_RANGE_FULL);
- size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data));
+ size = hdmi_infoframe_pack(&infoframe, args->data, data_len);
} else {
size = 0;
}
- nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, &args.infoframe, size);
+ nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, args, size);
/* Vendor InfoFrame. */
- memset(&args.data, 0, sizeof(args.data));
+ memset(args->data, 0, data_len);
if (!drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi,
&nv_connector->base, mode))
- size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data));
+ size = hdmi_infoframe_pack(&infoframe, args->data, data_len);
else
size = 0;
- nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, &args.infoframe, size);
+ nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, args, size);
nv_encoder->hdmi.enabled = true;
}
@@ -2810,10 +2818,7 @@ nv50_display_destroy(struct drm_device *dev)
nvif_object_dtor(&disp->caps);
nv50_core_del(&disp->core);
- nouveau_bo_unmap(disp->sync);
- if (disp->sync)
- nouveau_bo_unpin(disp->sync);
- nouveau_bo_fini(disp->sync);
+ nouveau_bo_unpin_del(&disp->sync);
nouveau_display(dev)->priv = NULL;
kfree(disp);
@@ -2845,20 +2850,7 @@ nv50_display_create(struct drm_device *dev)
dev->mode_config.normalize_zpos = true;
/* small shared memory area we use for notifiers and semaphores */
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
- NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, NULL, NULL, &disp->sync);
- if (!ret) {
- ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true);
- if (!ret) {
- ret = nouveau_bo_map(disp->sync);
- if (ret)
- nouveau_bo_unpin(disp->sync);
- }
- if (ret)
- nouveau_bo_fini(disp->sync);
- }
-
+ ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &disp->sync);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index d7c74cc43ba5..3dd742b4f823 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -577,6 +577,7 @@ nv50_head_create(struct drm_device *dev, int index)
return ERR_PTR(-ENOMEM);
head->func = disp->core->func->head;
+ head->disp = disp;
head->base.index = index;
if (disp->disp->object.oclass < GF110_DISP)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index e9d17037ffcf..8bd2fcb1eff5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -13,6 +13,8 @@
struct nv50_head {
const struct nv50_head_func *func;
+ struct nv50_disp *disp;
+
struct nouveau_crtc base;
struct nv50_crc crc;
struct nv50_lut olut;
@@ -98,4 +100,7 @@ int headc37d_dither(struct nv50_head *, struct nv50_head_atom *);
void headc37d_static_wndw_map(struct nv50_head *, struct nv50_head_atom *);
extern const struct nv50_head_func headc57d;
+bool headc57d_olut(struct nv50_head *, struct nv50_head_atom *, int size);
+
+extern const struct nv50_head_func headca7d;
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index fde4087e7691..3f8ba495de8f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -182,7 +182,7 @@ headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
-static bool
+bool
headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
{
if (size != 0 && size != 256 && size != 1024)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headca7d.c b/drivers/gpu/drm/nouveau/dispnv50/headca7d.c
new file mode 100644
index 000000000000..eeaeb15aa664
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/headca7d.c
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "head.h"
+#include "atom.h"
+#include "core.h"
+
+#include <nvif/pushc97b.h>
+
+#include <nvhw/class/clca7d.h>
+
+static int
+headca7d_display_id(struct nv50_head *head, u32 display_id)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_DISPLAY_ID(i, 0), display_id);
+
+ return 0;
+}
+
+static int
+headca7d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ u8 depth;
+ int ret;
+
+ switch (asyh->or.depth) {
+ case 6:
+ depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444;
+ break;
+ case 5:
+ depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444;
+ break;
+ case 2:
+ depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444;
+ break;
+ case 0:
+ depth = NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE(i),
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, CRC_MODE, asyh->or.crc_raster) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, HSYNC_POLARITY, asyh->or.nhsync) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, VSYNC_POLARITY, asyh->or.nvsync) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, PIXEL_DEPTH, depth) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, COLOR_SPACE_OVERRIDE, DISABLE) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_OUTPUT_RESOURCE, EXT_PACKET_WIN, NONE));
+
+ return 0;
+}
+
+static int
+headca7d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_PROCAMP(i),
+ NVDEF(NVCA7D, HEAD_SET_PROCAMP, COLOR_SPACE, RGB) |
+ NVDEF(NVCA7D, HEAD_SET_PROCAMP, CHROMA_LPF, DISABLE) |
+ NVDEF(NVCA7D, HEAD_SET_PROCAMP, DYNAMIC_RANGE, VESA));
+
+ return 0;
+}
+
+static int
+headca7d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_DITHER_CONTROL(i),
+ NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, ENABLE, asyh->dither.enable) |
+ NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, BITS, asyh->dither.bits) |
+ NVDEF(NVCA7D, HEAD_SET_DITHER_CONTROL, OFFSET_ENABLE, DISABLE) |
+ NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, MODE, asyh->dither.mode) |
+ NVVAL(NVCA7D, HEAD_SET_DITHER_CONTROL, PHASE, 0));
+
+ return 0;
+}
+
+static int
+headca7d_curs_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 4);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR, ENABLE, DISABLE) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR, FORMAT, A8R8G8B8));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(i, 0),
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+headca7d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const u32 curs_hi = upper_32_bits(asyh->curs.offset);
+ const u32 curs_lo = lower_32_bits(asyh->curs.offset);
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 7);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_HI_CURSOR(i, 0), curs_hi);
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(i, 0),
+ NVVAL(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, ADDRESS_LO, curs_lo >> 4) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_CURSOR, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL_CURSOR(i),
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0),
+
+ HEAD_SET_CONTROL_CURSOR_COMPOSITION(i),
+ NVVAL(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, K1, 0xff) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, CURSOR_COLOR_FACTOR_SELECT,
+ K1) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, VIEWPORT_COLOR_FACTOR_SELECT,
+ NEG_K1_TIMES_SRC) |
+ NVDEF(NVCA7D, HEAD_SET_CONTROL_CURSOR_COMPOSITION, MODE, BLEND));
+
+ return 0;
+}
+
+static int
+headca7d_olut_clr(struct nv50_head *head)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT(i),
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+headca7d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const u32 olut_hi = upper_32_bits(asyh->olut.offset);
+ const u32 olut_lo = lower_32_bits(asyh->olut.offset);
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 6);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_SURFACE_ADDRESS_HI_OLUT(i), olut_hi,
+
+ HEAD_SET_SURFACE_ADDRESS_LO_OLUT(i),
+ NVVAL(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, ADDRESS_LO, olut_lo >> 4) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7D, HEAD_SET_SURFACE_ADDRESS_LO_OLUT, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_OLUT_CONTROL(i),
+ NVVAL(NVCA7D, HEAD_SET_OLUT_CONTROL, INTERPOLATE, asyh->olut.output_mode) |
+ NVDEF(NVCA7D, HEAD_SET_OLUT_CONTROL, MIRROR, DISABLE) |
+ NVVAL(NVCA7D, HEAD_SET_OLUT_CONTROL, MODE, asyh->olut.mode) |
+ NVVAL(NVCA7D, HEAD_SET_OLUT_CONTROL, SIZE, asyh->olut.size),
+
+ HEAD_SET_OLUT_FP_NORM_SCALE(i), 0xffffffff);
+
+ return 0;
+}
+
+static int
+headca7d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ struct nv50_head_mode *m = &asyh->mode;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 11);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_RASTER_SIZE(i),
+ NVVAL(NVCA7D, HEAD_SET_RASTER_SIZE, WIDTH, m->h.active) |
+ NVVAL(NVCA7D, HEAD_SET_RASTER_SIZE, HEIGHT, m->v.active),
+
+ HEAD_SET_RASTER_SYNC_END(i),
+ NVVAL(NVCA7D, HEAD_SET_RASTER_SYNC_END, X, m->h.synce) |
+ NVVAL(NVCA7D, HEAD_SET_RASTER_SYNC_END, Y, m->v.synce),
+
+ HEAD_SET_RASTER_BLANK_END(i),
+ NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_END, X, m->h.blanke) |
+ NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_END, Y, m->v.blanke),
+
+ HEAD_SET_RASTER_BLANK_START(i),
+ NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_START, X, m->h.blanks) |
+ NVVAL(NVCA7D, HEAD_SET_RASTER_BLANK_START, Y, m->v.blanks));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_CONTROL(i),
+ NVDEF(NVCA7D, HEAD_SET_CONTROL, STRUCTURE, PROGRESSIVE));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY(i),
+ NVVAL(NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY, HERTZ, m->clock * 1000));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(i),
+ NVVAL(NVCA7D, HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, HERTZ, m->clock * 1000));
+
+ return 0;
+}
+
+static int
+headca7d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nvif_push *push = &head->disp->core->chan.push;
+ const int i = head->base.index;
+ int ret;
+
+ ret = PUSH_WAIT(push, 4);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_VIEWPORT_SIZE_IN(i),
+ NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_IN, WIDTH, asyh->view.iW) |
+ NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_IN, HEIGHT, asyh->view.iH));
+
+ PUSH_MTHD(push, NVCA7D, HEAD_SET_VIEWPORT_SIZE_OUT(i),
+ NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_OUT, WIDTH, asyh->view.oW) |
+ NVVAL(NVCA7D, HEAD_SET_VIEWPORT_SIZE_OUT, HEIGHT, asyh->view.oH));
+ return 0;
+}
+
+const struct nv50_head_func
+headca7d = {
+ .view = headca7d_view,
+ .mode = headca7d_mode,
+ .olut = headc57d_olut,
+ .ilut_check = head907d_ilut_check,
+ .olut_identity = true,
+ .olut_size = 1024,
+ .olut_set = headca7d_olut_set,
+ .olut_clr = headca7d_olut_clr,
+ .curs_layout = head917d_curs_layout,
+ .curs_format = headc37d_curs_format,
+ .curs_set = headca7d_curs_set,
+ .curs_clr = headca7d_curs_clr,
+ .dither = headca7d_dither,
+ .procamp = headca7d_procamp,
+ .or = headca7d_or,
+ .static_wndw_map = headc37d_static_wndw_map,
+ .display_id = headca7d_display_id,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.c b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
index 566fbddfc8d7..53c9ab6c138b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimm.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
@@ -31,6 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
int version;
int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
} wimms[] = {
+ { GB202_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GA102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ TU102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index f6be426dd525..11d5b923d6e7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -556,14 +556,24 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
return ret;
if (wndw->ctxdma.parent) {
- ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
- if (IS_ERR(ctxdma)) {
- nouveau_bo_unpin(nvbo);
- return PTR_ERR(ctxdma);
+ if (wndw->wndw.base.user.oclass < GB202_DISP_WINDOW_CHANNEL_DMA) {
+ ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
+ if (IS_ERR(ctxdma)) {
+ nouveau_bo_unpin(nvbo);
+ return PTR_ERR(ctxdma);
+ }
+
+ if (asyw->visible)
+ asyw->image.handle[0] = ctxdma->object.handle;
+ } else {
+ /* No CTXDMAs on Blackwell. */
+ if (asyw->visible) {
+ /* "handle != NULL_HANDLE" is used to determine enable status
+ * in a number of places, so fill in a fake object handle.
+ */
+ asyw->image.handle[0] = NV50_DISP_HANDLE_WNDW_CTX(0);
+ }
}
-
- if (asyw->visible)
- asyw->image.handle[0] = ctxdma->object.handle;
}
ret = drm_gem_plane_helper_prepare_fb(plane, state);
@@ -901,6 +911,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
int (*new)(struct nouveau_drm *, enum drm_plane_type,
int, s32, struct nv50_wndw **);
} wndws[] = {
+ { GB202_DISP_WINDOW_CHANNEL_DMA, 0, wndwca7e_new },
{ GA102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc67e_new },
{ TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index 76a6ae5d5652..90d100514bef 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -134,6 +134,9 @@ int wndwc57e_csc_clr(struct nv50_wndw *);
int wndwc67e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
+int wndwca7e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+ struct nv50_wndw **);
+
int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
struct nv50_wndw **);
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index 50a7b97d37a2..554c4f91f8be 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -25,6 +25,7 @@
#include <drm/drm_atomic_helper.h>
#include <nouveau_bo.h>
+#include <nvif/class.h>
#include <nvif/if0014.h>
#include <nvif/pushc37b.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c
new file mode 100644
index 000000000000..0d8e9a9d1a57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <nvif/pushc97b.h>
+
+#include <nvhw/class/clca7e.h>
+
+#include <nouveau_bo.h>
+
+static int
+wndwca7e_image_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 4);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_PRESENT_CONTROL,
+ NVVAL(NVCA7E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, 0) |
+ NVDEF(NVCA7E, SET_PRESENT_CONTROL, BEGIN_MODE, NON_TEARING));
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_ISO(0),
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+wndwca7e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ const u32 iso0_hi = upper_32_bits(asyw->image.offset[0]);
+ const u32 iso0_lo = lower_32_bits(asyw->image.offset[0]);
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret, kind;
+
+ if (asyw->image.kind)
+ kind = NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_BLOCKLINEAR;
+ else
+ kind = NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_PITCH;
+
+ ret = PUSH_WAIT(push, 17);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_HI_ISO(0), iso0_hi);
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_ISO(0),
+ NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, ADDRESS_LO, iso0_lo >> 4) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, TARGET, PHYSICAL_NVM) |
+ NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, KIND, kind) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ISO, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7E, SET_PRESENT_CONTROL,
+ NVVAL(NVCA7E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+ NVVAL(NVCA7E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+ NVDEF(NVCA7E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+ PUSH_MTHD(push, NVCA7E, SET_SIZE,
+ NVVAL(NVCA7E, SET_SIZE, WIDTH, asyw->image.w) |
+ NVVAL(NVCA7E, SET_SIZE, HEIGHT, asyw->image.h),
+
+ SET_STORAGE,
+ NVVAL(NVCA7E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
+
+ SET_PARAMS,
+ NVVAL(NVCA7E, SET_PARAMS, FORMAT, asyw->image.format) |
+ NVDEF(NVCA7E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+ NVDEF(NVCA7E, SET_PARAMS, SWAP_UV, DISABLE) |
+ NVDEF(NVCA7E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
+
+ SET_PLANAR_STORAGE(0),
+ NVVAL(NVCA7E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+ NVVAL(NVCA7E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+ PUSH_MTHD(push, NVCA7E, SET_POINT_IN(0),
+ NVVAL(NVCA7E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+ NVVAL(NVCA7E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+ PUSH_MTHD(push, NVCA7E, SET_SIZE_IN,
+ NVVAL(NVCA7E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+ NVVAL(NVCA7E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+ PUSH_MTHD(push, NVCA7E, SET_SIZE_OUT,
+ NVVAL(NVCA7E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+ NVVAL(NVCA7E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+
+ return 0;
+}
+
+static int
+wndwca7e_ilut_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT,
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+wndwca7e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ const u32 ilut_hi = upper_32_bits(asyw->xlut.i.offset);
+ const u32 ilut_lo = lower_32_bits(asyw->xlut.i.offset);
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_HI_ILUT, ilut_hi,
+
+ SET_SURFACE_ADDRESS_LO_ILUT,
+ NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, ADDRESS_LO, ilut_lo >> 4) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_ILUT, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7E, SET_ILUT_CONTROL,
+ NVVAL(NVCA7E, SET_ILUT_CONTROL, SIZE, asyw->xlut.i.size) |
+ NVVAL(NVCA7E, SET_ILUT_CONTROL, MODE, asyw->xlut.i.mode) |
+ NVVAL(NVCA7E, SET_ILUT_CONTROL, INTERPOLATE, asyw->xlut.i.output_mode));
+
+ return 0;
+}
+
+static int
+wndwca7e_ntfy_clr(struct nv50_wndw *wndw)
+{
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 2);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER,
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, ENABLE, DISABLE));
+
+ return 0;
+}
+
+static int
+wndwca7e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
+ const u64 ntfy_addr = disp->sync->offset + asyw->ntfy.offset;
+ const u32 ntfy_hi = upper_32_bits(ntfy_addr);
+ const u32 ntfy_lo = lower_32_bits(ntfy_addr);
+ struct nvif_push *push = &wndw->wndw.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 5);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVCA7E, SET_SURFACE_ADDRESS_HI_NOTIFIER, ntfy_hi,
+
+ SET_SURFACE_ADDRESS_LO_NOTIFIER,
+ NVVAL(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, ADDRESS_LO, ntfy_lo >> 4) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, TARGET, PHYSICAL_NVM) |
+ NVDEF(NVCA7E, SET_SURFACE_ADDRESS_LO_NOTIFIER, ENABLE, ENABLE));
+
+ PUSH_MTHD(push, NVCA7E, SET_NOTIFIER_CONTROL,
+ NVVAL(NVCA7E, SET_NOTIFIER_CONTROL, MODE, asyw->ntfy.awaken));
+
+ return 0;
+}
+
+static const struct nv50_wndw_func
+wndwca7e = {
+ .acquire = wndwc37e_acquire,
+ .release = wndwc37e_release,
+ .ntfy_set = wndwca7e_ntfy_set,
+ .ntfy_clr = wndwca7e_ntfy_clr,
+ .ntfy_reset = corec37d_ntfy_init,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .ilut = wndwc57e_ilut,
+ .ilut_identity = true,
+ .ilut_size = 1024,
+ .xlut_set = wndwca7e_ilut_set,
+ .xlut_clr = wndwca7e_ilut_clr,
+ .csc = base907c_csc,
+ .csc_set = wndwc57e_csc_set,
+ .csc_clr = wndwc57e_csc_clr,
+ .image_set = wndwca7e_image_set,
+ .image_clr = wndwca7e_image_clr,
+ .blend_set = wndwc37e_blend_set,
+ .update = wndwc37e_update,
+};
+
+int
+wndwca7e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+ s32 oclass, struct nv50_wndw **pwndw)
+{
+ return wndwc37e_new_(&wndwca7e, drm, type, index, oclass, BIT(index >> 1), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/gv100_fence.c b/drivers/gpu/drm/nouveau/gv100_fence.c
new file mode 100644
index 000000000000..cccdeca72002
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/gv100_fence.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_fence.h"
+
+#include "nv50_display.h"
+
+#include <nvif/push906f.h>
+
+#include <nvhw/class/clc36f.h>
+
+static int
+gv100_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
+{
+ struct nvif_push *push = &chan->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 8);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVC36F, SEM_ADDR_LO, lower_32_bits(virtual),
+ SEM_ADDR_HI, upper_32_bits(virtual),
+ SEM_PAYLOAD_LO, sequence);
+
+ PUSH_MTHD(push, NVC36F, SEM_EXECUTE,
+ NVDEF(NVC36F, SEM_EXECUTE, OPERATION, RELEASE) |
+ NVDEF(NVC36F, SEM_EXECUTE, RELEASE_WFI, EN) |
+ NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT) |
+ NVDEF(NVC36F, SEM_EXECUTE, RELEASE_TIMESTAMP, DIS));
+
+ PUSH_MTHD(push, NVC36F, NON_STALL_INTERRUPT, 0);
+
+ PUSH_KICK(push);
+ return 0;
+}
+
+static int
+gv100_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
+{
+ struct nvif_push *push = &chan->chan.push;
+ int ret;
+
+ ret = PUSH_WAIT(push, 6);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVC36F, SEM_ADDR_LO, lower_32_bits(virtual),
+ SEM_ADDR_HI, upper_32_bits(virtual),
+ SEM_PAYLOAD_LO, sequence);
+
+ PUSH_MTHD(push, NVC36F, SEM_EXECUTE,
+ NVDEF(NVC36F, SEM_EXECUTE, OPERATION, ACQ_CIRC_GEQ) |
+ NVDEF(NVC36F, SEM_EXECUTE, ACQUIRE_SWITCH_TSG, EN) |
+ NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT));
+
+ PUSH_KICK(push);
+ return 0;
+}
+
+static int
+gv100_fence_context_new(struct nouveau_channel *chan)
+{
+ struct nv84_fence_chan *fctx;
+ int ret;
+
+ ret = nv84_fence_context_new(chan);
+ if (ret)
+ return ret;
+
+ fctx = chan->fence;
+ fctx->base.emit32 = gv100_fence_emit32;
+ fctx->base.sync32 = gv100_fence_sync32;
+ return 0;
+}
+
+int
+gv100_fence_create(struct nouveau_drm *drm)
+{
+ struct nv84_fence_priv *priv;
+ int ret;
+
+ ret = nv84_fence_create(drm);
+ if (ret)
+ return ret;
+
+ priv = drm->fence;
+ priv->base.context_new = gv100_fence_context_new;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h
new file mode 100644
index 000000000000..8735dda4c8a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef _clc36f_h_
+#define _clc36f_h_
+
+#define NVC36F_NON_STALL_INTERRUPT (0x00000020)
+#define NVC36F_NON_STALL_INTERRUPT_HANDLE 31:0
+#define NVC36F_SEM_ADDR_LO (0x0000005c)
+#define NVC36F_SEM_ADDR_LO_OFFSET 31:2
+#define NVC36F_SEM_ADDR_HI (0x00000060)
+#define NVC36F_SEM_ADDR_HI_OFFSET 7:0
+#define NVC36F_SEM_PAYLOAD_LO (0x00000064)
+#define NVC36F_SEM_PAYLOAD_LO_PAYLOAD 31:0
+#define NVC36F_SEM_PAYLOAD_HI (0x00000068)
+#define NVC36F_SEM_PAYLOAD_HI_PAYLOAD 31:0
+#define NVC36F_SEM_EXECUTE (0x0000006c)
+#define NVC36F_SEM_EXECUTE_OPERATION 2:0
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000
+#define NVC36F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004
+#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005
+#define NVC36F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006
+#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12
+#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000
+#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001
+#define NVC36F_SEM_EXECUTE_RELEASE_WFI 20:20
+#define NVC36F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000
+#define NVC36F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001
+#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE 24:24
+#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000
+#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001
+#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25
+#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000
+#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001
+#define NVC36F_SEM_EXECUTE_REDUCTION 30:27
+#define NVC36F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000
+#define NVC36F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001
+#define NVC36F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002
+#define NVC36F_SEM_EXECUTE_REDUCTION_IAND 0x00000003
+#define NVC36F_SEM_EXECUTE_REDUCTION_IOR 0x00000004
+#define NVC36F_SEM_EXECUTE_REDUCTION_IADD 0x00000005
+#define NVC36F_SEM_EXECUTE_REDUCTION_INC 0x00000006
+#define NVC36F_SEM_EXECUTE_REDUCTION_DEC 0x00000007
+#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT 31:31
+#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000
+#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h
new file mode 100644
index 000000000000..092aebe9551c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc97b.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef _clc97b_h_
+#define _clc97b_h_
+
+// dma opcode instructions
+#define NVC97B_DMA
+#define NVC97B_DMA_OPCODE 31:29
+#define NVC97B_DMA_OPCODE_METHOD 0x00000000
+#define NVC97B_DMA_OPCODE_JUMP 0x00000001
+#define NVC97B_DMA_OPCODE_NONINC_METHOD 0x00000002
+#define NVC97B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003
+#define NVC97B_DMA_METHOD_COUNT 27:18
+#define NVC97B_DMA_METHOD_OFFSET 15:2
+#define NVC97B_DMA_DATA 31:0
+#define NVC97B_DMA_DATA_NOP 0x00000000
+#define NVC97B_DMA_JUMP_OFFSET 15:2
+#define NVC97B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0
+
+#endif // _clc97b_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h
new file mode 100644
index 000000000000..0fec6fc21d44
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7d.h
@@ -0,0 +1,868 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef _clca7d_h_
+#define _clca7d_h_
+
+// class methods
+#define NVCA7D_UPDATE (0x00000200)
+#define NVCA7D_UPDATE_SPECIAL_HANDLING 21:20
+#define NVCA7D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000)
+#define NVCA7D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001)
+#define NVCA7D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002)
+#define NVCA7D_UPDATE_SPECIAL_HANDLING_REASON 19:12
+#define NVCA7D_UPDATE_INHIBIT_INTERRUPTS 24:24
+#define NVCA7D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000)
+#define NVCA7D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001)
+#define NVCA7D_UPDATE_RELEASE_ELV 0:0
+#define NVCA7D_UPDATE_RELEASE_ELV_FALSE (0x00000000)
+#define NVCA7D_UPDATE_RELEASE_ELV_TRUE (0x00000001)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN 8:4
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i))
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i))
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E)
+#define NVCA7D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F)
+#define NVCA7D_UPDATE_FORCE_FULLSCREEN 28:28
+#define NVCA7D_UPDATE_FORCE_FULLSCREEN_FALSE (0x00000000)
+#define NVCA7D_UPDATE_FORCE_FULLSCREEN_TRUE (0x00000001)
+#define NVCA7D_SET_NOTIFIER_CONTROL (0x0000020C)
+#define NVCA7D_SET_NOTIFIER_CONTROL_MODE 0:0
+#define NVCA7D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000)
+#define NVCA7D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001)
+#define NVCA7D_SET_NOTIFIER_CONTROL_NOTIFY 12:12
+#define NVCA7D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000)
+#define NVCA7D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS (0x00000218)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000)
+#define NVCA7D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000)
+#define NVCA7D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001)
+#define NVCA7D_SET_SURFACE_ADDRESS_HI_NOTIFIER (0x00000260)
+#define NVCA7D_SET_SURFACE_ADDRESS_HI_NOTIFIER_ADDRESS_HI 31:0
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER (0x00000264)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ADDRESS_LO 31:4
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET 3:2
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_IOVA (0x00000000)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE 0:0
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_ENABLE (0x00000001)
+
+#define NVCA7D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK 7:0
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040)
+#define NVCA7D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL 11:8
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_HDMI_FRL (0x0000000C)
+#define NVCA7D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F)
+#define NVCA7D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16
+#define NVCA7D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVCA7D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20
+#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000)
+#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001)
+#define NVCA7D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002)
+
+#define NVCA7D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER 3:0
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i))
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007)
+#define NVCA7D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F)
+#define NVCA7D_WINDOW_SET_CONTROL_HIDE 8:8
+#define NVCA7D_WINDOW_SET_CONTROL_HIDE_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_CONTROL_HIDE_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS 9:9
+#define NVCA7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_CONTROL_DISABLE_PHYSICAL_FLIPS_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME 10:10
+#define NVCA7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_CONTROL_ALLOW_SUPERFRAME_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED 16:16
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED 28:28
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED 30:30
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT 26:25
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_PITCH_BLOCKLINEAR (0x00000000)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_PITCH (0x00000001)
+#define NVCA7D_WINDOW_SET_WINDOW_USAGE_BOUNDS_LAYOUT_BLOCKLINEAR (0x00000002)
+#define NVCA7D_WINDOW_SET_PHYSICAL(a) (0x00001014 + (a)*0x00000080)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW 31:0
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_NONE (0x00000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW0 (0x00000001)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW1 (0x00000002)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW2 (0x00000004)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW3 (0x00000008)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW4 (0x00000010)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW5 (0x00000020)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW6 (0x00000040)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW7 (0x00000080)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW8 (0x00000100)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW9 (0x00000200)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW10 (0x00000400)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW11 (0x00000800)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW12 (0x00001000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW13 (0x00002000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW14 (0x00004000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW15 (0x00008000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW16 (0x00010000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW17 (0x00020000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW18 (0x00040000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW19 (0x00080000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW20 (0x00100000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW21 (0x00200000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW22 (0x00400000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW23 (0x00800000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW24 (0x01000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW25 (0x02000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW26 (0x04000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW27 (0x08000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW28 (0x10000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW29 (0x20000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW30 (0x40000000)
+#define NVCA7D_WINDOW_SET_PHYSICAL_WINDOW_WINDOW31 (0x80000000)
+
+#define NVCA7D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000)
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001)
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002)
+#define NVCA7D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003)
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V 4:4
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28
+#define NVCA7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000)
+#define NVCA7D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_444 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444NP (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN 31:26
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0 (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10 (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11 (0x0000000B)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12 (0x0000000C)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13 (0x0000000D)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14 (0x0000000E)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15 (0x0000000F)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16 (0x00000010)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17 (0x00000011)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18 (0x00000012)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19 (0x00000013)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20 (0x00000014)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21 (0x00000015)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22 (0x00000016)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23 (0x00000017)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24 (0x00000018)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25 (0x00000019)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26 (0x0000001A)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27 (0x0000001B)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28 (0x0000001C)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29 (0x0000001D)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30 (0x0000001E)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31 (0x0000001F)
+#define NVCA7D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE (0x0000003F)
+#define NVCA7D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CONTROL_STRUCTURE 1:0
+#define NVCA7D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2
+#define NVCA7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_YUV420PACKER 3:3
+#define NVCA7D_HEAD_SET_CONTROL_YUV420PACKER_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_YUV420PACKER_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE 11:10
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_NO_LOCK (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_FRAME_LOCK (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_MODE_RASTER_LOCK (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN 8:4
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN__SIZE_1 16
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_0 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_2 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_3 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_4 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_5 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_6 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_7 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_8 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_9 (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_A (0x0000000B)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_B (0x0000000C)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_C (0x0000000D)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_D (0x0000000E)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_E (0x0000000F)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_LOCK_PIN_F (0x00000010)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_LOCKOUT_WINDOW 15:12
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE 23:22
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_NO_LOCK (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_FRAME_LOCK (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_MODE_RASTER_LOCK (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN 20:16
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN__SIZE_1 16
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_0 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_2 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_3 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_4 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_5 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_6 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_7 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_8 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_9 (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_A (0x0000000B)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_B (0x0000000C)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_C (0x0000000D)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_D (0x0000000E)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_E (0x0000000F)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_LOCK_PIN_F (0x00000010)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN 28:24
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i))
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E)
+#define NVCA7D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE 30:30
+#define NVCA7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SINK_STEREO_LOCK_MODE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE 31:31
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_SOURCE_STEREO_LOCK_MODE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS 5:4
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE 10:8
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_MODE_ROUND (0x00000005)
+#define NVCA7D_HEAD_SET_DITHER_CONTROL_PHASE 13:12
+#define NVCA7D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000800 + (b)*0x00000004)
+#define NVCA7D_HEAD_SET_DISPLAY_ID_CODE 31:0
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED 4:4
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED 5:5
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_LTM_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS 14:12
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2 (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5 (0x00000004)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED 16:16
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_HEAD_USAGE_BOUNDS_ELV_START 31:17
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0
+#define NVCA7D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16
+#define NVCA7D_HEAD_SET_TILE_MASK(a) (0x00002060 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE 7:0
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE0 (0x00000001)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE1 (0x00000002)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE2 (0x00000004)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE3 (0x00000008)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE4 (0x00000010)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE5 (0x00000020)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE6 (0x00000040)
+#define NVCA7D_HEAD_SET_TILE_MASK_TILE_TILE7 (0x00000080)
+#define NVCA7D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_RASTER_SIZE_WIDTH 15:0
+#define NVCA7D_HEAD_SET_RASTER_SIZE_HEIGHT 31:16
+#define NVCA7D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_RASTER_SYNC_END_X 14:0
+#define NVCA7D_HEAD_SET_RASTER_SYNC_END_Y 30:16
+#define NVCA7D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_RASTER_BLANK_END_X 14:0
+#define NVCA7D_HEAD_SET_RASTER_BLANK_END_Y 30:16
+#define NVCA7D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_RASTER_BLANK_START_X 14:0
+#define NVCA7D_HEAD_SET_RASTER_BLANK_START_Y 30:16
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS 20:20
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CRC(a) (0x00002150 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CRC_ADDRESS_HI 31:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC(a) (0x00002154 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ADDRESS_LO 31:4
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET 3:2
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_IOVA (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE 0:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CRC_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_OLUT(a) (0x00002158 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_OLUT_ADDRESS_HI 31:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT(a) (0x0000215C + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ADDRESS_LO 31:4
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET 3:2
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_IOVA (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE 0:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_OLUT_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CURSOR(a,b) (0x00002170 + (a)*0x00000800 + (b)*0x00000004)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_HI_CURSOR_ADDRESS_HI 31:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR(a,b) (0x00002178 + (a)*0x00000800 + (b)*0x00000004)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ADDRESS_LO 31:4
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET 3:2
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_IOVA (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND 1:1
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND_PITCH (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_KIND_BLOCKLINEAR (0x00000001)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE 0:0
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_SURFACE_ADDRESS_LO_CURSOR_ENABLE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8
+#define NVCA7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i))
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i))
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057)
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9
+#define NVCA7D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MIRROR 1:1
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE 3:2
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8 (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10 (0x00000002)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_SIZE 18:8
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND 4:4
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND_DISABLE (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_DIRECT_ROUND_ENABLE (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_LEVEL 25:20
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS 5:5
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS_SIZE_3BITS (0x00000000)
+#define NVCA7D_HEAD_SET_OLUT_CONTROL_SEGMENT_SIZE_BITS_SIZE_4BITS (0x00000001)
+#define NVCA7D_HEAD_SET_OLUT_FP_NORM_SCALE(a) (0x00002284 + (a)*0x00000800)
+#define NVCA7D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE 31:0
+
+#define NVCA7D_TILE_SET_TILE_SIZE(a) (0x00006000 + (a)*0x00000200)
+#define NVCA7D_TILE_SET_TILE_SIZE_START 14:0
+#define NVCA7D_TILE_SET_TILE_SIZE_WIDTH 30:16
+
+#endif // _clca7d_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h
new file mode 100644
index 000000000000..ebfb2e48a4f4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clca7e.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef _clca7e_h_
+#define _clca7e_h_
+
+// class methods
+#define NVCA7E_SET_NOTIFIER_CONTROL (0x00000220)
+#define NVCA7E_SET_NOTIFIER_CONTROL_MODE 0:0
+#define NVCA7E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000)
+#define NVCA7E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001)
+#define NVCA7E_SET_SIZE (0x00000224)
+#define NVCA7E_SET_SIZE_WIDTH 15:0
+#define NVCA7E_SET_SIZE_HEIGHT 31:16
+#define NVCA7E_SET_STORAGE (0x00000228)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT 3:0
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004)
+#define NVCA7E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005)
+#define NVCA7E_SET_PARAMS (0x0000022C)
+#define NVCA7E_SET_PARAMS_FORMAT 7:0
+#define NVCA7E_SET_PARAMS_FORMAT_I8 (0x0000001E)
+#define NVCA7E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F)
+#define NVCA7E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8)
+#define NVCA7E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9)
+#define NVCA7E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E)
+#define NVCA7E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF)
+#define NVCA7E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6)
+#define NVCA7E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5)
+#define NVCA7E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9)
+#define NVCA7E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF)
+#define NVCA7E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1)
+#define NVCA7E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023)
+#define NVCA7E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6)
+#define NVCA7E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028)
+#define NVCA7E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8___V8_N444 (0x0000003A)
+#define NVCA7E_SET_PARAMS_FORMAT_Y8___U8___V8_N420 (0x0000003B)
+#define NVCA7E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055)
+#define NVCA7E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056)
+#define NVCA7E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058)
+#define NVCA7E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075)
+#define NVCA7E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076)
+#define NVCA7E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078)
+#define NVCA7E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18
+#define NVCA7E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000)
+#define NVCA7E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001)
+#define NVCA7E_SET_PARAMS_SWAP_UV 19:19
+#define NVCA7E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000)
+#define NVCA7E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001)
+#define NVCA7E_SET_PARAMS_FMT_ROUNDING_MODE 22:22
+#define NVCA7E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_TO_NEAREST (0x00000000)
+#define NVCA7E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_DOWN (0x00000001)
+#define NVCA7E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004)
+#define NVCA7E_SET_PLANAR_STORAGE_PITCH 12:0
+#define NVCA7E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004)
+#define NVCA7E_SET_POINT_IN_X 15:0
+#define NVCA7E_SET_POINT_IN_Y 31:16
+#define NVCA7E_SET_SIZE_IN (0x00000298)
+#define NVCA7E_SET_SIZE_IN_WIDTH 15:0
+#define NVCA7E_SET_SIZE_IN_HEIGHT 31:16
+#define NVCA7E_SET_SIZE_OUT (0x000002A4)
+#define NVCA7E_SET_SIZE_OUT_WIDTH 15:0
+#define NVCA7E_SET_SIZE_OUT_HEIGHT 31:16
+#define NVCA7E_SET_PRESENT_CONTROL (0x00000308)
+#define NVCA7E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0
+#define NVCA7E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4
+#define NVCA7E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000)
+#define NVCA7E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001)
+#define NVCA7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8
+#define NVCA7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000)
+#define NVCA7E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001)
+#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE 13:12
+#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000)
+#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001)
+#define NVCA7E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002)
+#define NVCA7E_SET_ILUT_CONTROL (0x00000440)
+#define NVCA7E_SET_ILUT_CONTROL_INTERPOLATE 0:0
+#define NVCA7E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE (0x00000000)
+#define NVCA7E_SET_ILUT_CONTROL_INTERPOLATE_ENABLE (0x00000001)
+#define NVCA7E_SET_ILUT_CONTROL_MIRROR 1:1
+#define NVCA7E_SET_ILUT_CONTROL_MIRROR_DISABLE (0x00000000)
+#define NVCA7E_SET_ILUT_CONTROL_MIRROR_ENABLE (0x00000001)
+#define NVCA7E_SET_ILUT_CONTROL_MODE 3:2
+#define NVCA7E_SET_ILUT_CONTROL_MODE_SEGMENTED (0x00000000)
+#define NVCA7E_SET_ILUT_CONTROL_MODE_DIRECT8 (0x00000001)
+#define NVCA7E_SET_ILUT_CONTROL_MODE_DIRECT10 (0x00000002)
+#define NVCA7E_SET_ILUT_CONTROL_SIZE 18:8
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_NOTIFIER (0x00000650)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_NOTIFIER_ADDRESS_HI 31:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER (0x00000654)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ADDRESS_LO 31:4
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET 3:2
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_IOVA (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE 0:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_DISABLE (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_NOTIFIER_ENABLE_ENABLE (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_ISO(b) (0x00000658 + (b)*0x00000004)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_ISO_ADDRESS_HI 31:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO(b) (0x00000670 + (b)*0x00000004)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ADDRESS_LO 31:4
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET 3:2
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_IOVA (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND 1:1
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_PITCH (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_KIND_BLOCKLINEAR (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE 0:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE_DISABLE (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ISO_ENABLE_ENABLE (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_ILUT (0x00000688)
+#define NVCA7E_SET_SURFACE_ADDRESS_HI_ILUT_ADDRESS_HI 31:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT (0x0000068C)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ADDRESS_LO 31:4
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET 3:2
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_IOVA (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_NVM (0x00000001)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_PCI (0x00000002)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_TARGET_PHYSICAL_PCI_COHERENT (0x00000003)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE 0:0
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE_DISABLE (0x00000000)
+#define NVCA7E_SET_SURFACE_ADDRESS_LO_ILUT_ENABLE_ENABLE (0x00000001)
+
+#endif // _clca7e_h
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h
new file mode 100644
index 000000000000..c9d74bd95e0b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb100/dev_hshub_base.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gb100_dev_hshub_base_h__
+#define __gb100_dev_hshub_base_h__
+
+#define NV_PFB_HSHUB0 0x00870fff:0x00870000
+
+#define NV_PFB_HSHUB 0x00000FFF:0x00000000 /* RW--D */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO 0x00000E50 /* RW-4R */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_MASK 0xFFFFFF00 /* ----V */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI 0x00000E54 /* RW-4R */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000FFFFF /* ----V */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO 0x000006C0 /* RW-4R */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_MASK 0xFFFFFF00 /* ----V */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI 0x000006C4 /* RW-4R */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000FFFFF /* ----V */
+
+#endif // __gb100_dev_hshub_base_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h
new file mode 100644
index 000000000000..4d0bb8e14298
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb10b/dev_fbhub.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gb10b_dev_fb_h__
+#define __gb10b_dev_fb_h__
+
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO 0x008a1d58 /* RW-4R */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR_MASK 0xffffff00 /* RW--V */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI 0x008a1d5c /* RW-4R */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_INIT 0x00000000 /* RWI-V */
+#define NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000fffff /* RW--V */
+
+#endif // __gb10b_dev_fb_h__
+
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h
new file mode 100644
index 000000000000..b09f04b31738
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_ce.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gb202_dev_ce_h__
+#define __gb202_dev_ce_h__
+
+#define NV_CE_GRCE_MASK 0x001040d8 /* C--4R */
+#define NV_CE_GRCE_MASK_VALUE 9:0 /* C--VF */
+#define NV_CE_GRCE_MASK_VALUE_INIT 0x00f /* C---V */
+
+#endif // __gb202_dev_ce_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h
new file mode 100644
index 000000000000..ed359cb528fb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gb202/dev_therm.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gb202_dev_therm_h__
+#define __gb202_dev_therm_h__
+
+#define NV_THERM_I2CS_SCRATCH 0x00ad00bc /* RW-4R */
+#define NV_THERM_I2CS_SCRATCH_DATA 31:0 /* RWIVF */
+#define NV_THERM_I2CS_SCRATCH_DATA_INIT 0x00000000 /* RWI-V */
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE NV_THERM_I2CS_SCRATCH
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS 31:0
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS 0x000000FF
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_FAILED 0x00000000
+
+#endif // __gb202_dev_therm_h__
+
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h
new file mode 100644
index 000000000000..52171b412aa1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_falcon_v4.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_falcon_v4_h__
+#define __gh100_dev_falcon_v4_h__
+
+#define NV_PFALCON_FALCON_MAILBOX0 0x00000040 /* RW-4R */
+#define NV_PFALCON_FALCON_MAILBOX0_DATA 31:0 /* RWIVF */
+#define NV_PFALCON_FALCON_MAILBOX0_DATA_INIT 0x00000000 /* RWI-V */
+#define NV_PFALCON_FALCON_MAILBOX1 0x00000044 /* RW-4R */
+#define NV_PFALCON_FALCON_MAILBOX1_DATA 31:0 /* RWIVF */
+#define NV_PFALCON_FALCON_MAILBOX1_DATA_INIT 0x00000000 /* RWI-V */
+
+#define NV_PFALCON_FALCON_HWCFG2 0x000000f4 /* R--4R */
+#define NV_PFALCON_FALCON_HWCFG2_RISCV_BR_PRIV_LOCKDOWN 13:13 /* R--VF */
+#define NV_PFALCON_FALCON_HWCFG2_RISCV_BR_PRIV_LOCKDOWN_LOCK 0x00000001 /* R---V */
+#define NV_PFALCON_FALCON_HWCFG2_RISCV_BR_PRIV_LOCKDOWN_UNLOCK 0x00000000 /* R---V */
+
+#endif // __gh100_dev_falcon_v4_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h
new file mode 100644
index 000000000000..819f09465952
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fb.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_fb_h_
+#define __gh100_dev_fb_h_
+
+#define NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT 8 /* */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO 0x00100A34 /* RW-4R */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO_ADR 31:0 /* RWIVF */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI 0x00100A38 /* RW-4R */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR 31:0 /* RWIVF */
+#define NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI_ADR_MASK 0x000FFFFF /* ----V */
+
+#endif // __gh100_dev_fb_h_
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h
new file mode 100644
index 000000000000..e9507242cae5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_fsp_pri.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_fsp_pri_h__
+#define __gh100_dev_fsp_pri_h__
+
+#define NV_PFSP 0x8F3FFF:0x8F0000 /* RW--D */
+
+#define NV_PFSP_MSGQ_HEAD(i) (0x008F2c80+(i)*8) /* RW-4A */
+#define NV_PFSP_MSGQ_HEAD__SIZE_1 8 /* */
+#define NV_PFSP_MSGQ_HEAD_VAL 31:0 /* RWIUF */
+#define NV_PFSP_MSGQ_HEAD_VAL_INIT 0x00000000 /* RWI-V */
+#define NV_PFSP_MSGQ_TAIL(i) (0x008F2c84+(i)*8) /* RW-4A */
+#define NV_PFSP_MSGQ_TAIL__SIZE_1 8 /* */
+#define NV_PFSP_MSGQ_TAIL_VAL 31:0 /* RWIUF */
+#define NV_PFSP_MSGQ_TAIL_VAL_INIT 0x00000000 /* RWI-V */
+
+#define NV_PFSP_QUEUE_HEAD(i) (0x008F2c00+(i)*8) /* RW-4A */
+#define NV_PFSP_QUEUE_HEAD__SIZE_1 8 /* */
+#define NV_PFSP_QUEUE_HEAD_ADDRESS 31:0 /* RWIVF */
+#define NV_PFSP_QUEUE_HEAD_ADDRESS_INIT 0x00000000 /* RWI-V */
+#define NV_PFSP_QUEUE_TAIL(i) (0x008F2c04+(i)*8) /* RW-4A */
+#define NV_PFSP_QUEUE_TAIL__SIZE_1 8 /* */
+#define NV_PFSP_QUEUE_TAIL_ADDRESS 31:0 /* RWIVF */
+#define NV_PFSP_QUEUE_TAIL_ADDRESS_INIT 0x00000000 /* RWI-V */
+
+#endif // __gh100_dev_fsp_pri_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h
new file mode 100644
index 000000000000..6707e0e3b96b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_mmu.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_mmu_h__
+#define __gh100_dev_mmu_h__
+
+#define NV_MMU_PTE /* ----G */
+#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */
+#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */
+#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_PTE_KIND (1*32+7):(1*32+4) /* RWXVF */
+#define NV_MMU_PTE_KIND_INVALID 0x07 /* R---V */
+#define NV_MMU_PTE_KIND_PITCH 0x00 /* R---V */
+#define NV_MMU_PTE_KIND_GENERIC_MEMORY 0x6 /* R---V */
+#define NV_MMU_PTE_KIND_Z16 0x1 /* R---V */
+#define NV_MMU_PTE_KIND_S8 0x2 /* R---V */
+#define NV_MMU_PTE_KIND_S8Z24 0x3 /* R---V */
+#define NV_MMU_PTE_KIND_ZF32_X24S8 0x4 /* R---V */
+#define NV_MMU_PTE_KIND_Z24S8 0x5 /* R---V */
+#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE 0x8 /* R---V */
+#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC 0x9 /* R---V */
+#define NV_MMU_PTE_KIND_S8_COMPRESSIBLE_DISABLE_PLC 0xA /* R---V */
+#define NV_MMU_PTE_KIND_Z16_COMPRESSIBLE_DISABLE_PLC 0xB /* R---V */
+#define NV_MMU_PTE_KIND_S8Z24_COMPRESSIBLE_DISABLE_PLC 0xC /* R---V */
+#define NV_MMU_PTE_KIND_ZF32_X24S8_COMPRESSIBLE_DISABLE_PLC 0xD /* R---V */
+#define NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC 0xE /* R---V */
+#define NV_MMU_PTE_KIND_SMSKED_MESSAGE 0xF /* R---V */
+
+#define NV_MMU_VER3_PDE /* ----G */
+#define NV_MMU_VER3_PDE_IS_PTE 0:0 /* RWXVF */
+#define NV_MMU_VER3_PDE_IS_PTE_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_PDE_IS_PTE_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_PDE_VALID 0:0 /* RWXVF */
+#define NV_MMU_VER3_PDE_VALID_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_PDE_VALID_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_PDE_APERTURE 2:1 /* RWXVF */
+#define NV_MMU_VER3_PDE_APERTURE_INVALID 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PDE_APERTURE_VIDEO_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PDE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PDE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF 5:3 /* RWXVF */
+#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PDE_PCF_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PDE_ADDRESS 51:12 /* RWXVF */
+#define NV_MMU_VER3_PDE_ADDRESS_SHIFT 0x0000000c /* */
+#define NV_MMU_VER3_PDE__SIZE 8
+
+#define NV_MMU_VER3_DUAL_PDE /* ----G */
+#define NV_MMU_VER3_DUAL_PDE_IS_PTE 0:0 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_IS_PTE_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_IS_PTE_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_VALID 0:0 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_VALID_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_VALID_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG 2:1 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG 5:3 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_BIG_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_ADDRESS_BIG 51:8 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL 66:65 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL 69:67 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_ALLOWED__OR__INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_INVALID_ATS_ALLOWED 0x00000000 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_ALLOWED__OR__SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_SPARSE_ATS_ALLOWED 0x00000001 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_NOT_ALLOWED__OR__INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_CACHED_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_INVALID_ATS_NOT_ALLOWED 0x00000002 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_NOT_ALLOWED__OR__SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_PCF_SMALL_SPARSE_ATS_NOT_ALLOWED 0x00000003 /* RW--V */
+#define NV_MMU_VER3_DUAL_PDE_ADDRESS_SMALL 115:76 /* RWXVF */
+#define NV_MMU_VER3_DUAL_PDE_ADDRESS_SHIFT 0x0000000c /* */
+#define NV_MMU_VER3_DUAL_PDE_ADDRESS_BIG_SHIFT 8 /* */
+#define NV_MMU_VER3_DUAL_PDE__SIZE 16
+
+#define NV_MMU_VER3_PTE /* ----G */
+#define NV_MMU_VER3_PTE_VALID 0:0 /* RWXVF */
+#define NV_MMU_VER3_PTE_VALID_TRUE 0x1 /* RW--V */
+#define NV_MMU_VER3_PTE_VALID_FALSE 0x0 /* RW--V */
+#define NV_MMU_VER3_PTE_APERTURE 2:1 /* RWXVF */
+#define NV_MMU_VER3_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF 7:3 /* RWXVF */
+#define NV_MMU_VER3_PTE_PCF_INVALID 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_SPARSE 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_MAPPING_NOWHERE 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_NO_VALID_4KB_PAGE 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACE 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACE 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACE 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACE 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACE 0x00000004 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACE 0x00000005 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACE 0x00000006 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACE 0x00000007 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACE 0x00000008 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACE 0x00000009 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_CACHED_ACE 0x0000000A /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_UNCACHED_ACE 0x0000000B /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACE 0x0000000C /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACE 0x0000000D /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_CACHED_ACE 0x0000000E /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACE 0x0000000F /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACD 0x00000010 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACD 0x00000011 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACD 0x00000012 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACD 0x00000013 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACD 0x00000014 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACD 0x00000015 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACD 0x00000016 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACD 0x00000017 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACD 0x00000018 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACD 0x00000019 /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_CACHED_ACD 0x0000001A /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_UNCACHED_ACD 0x0000001B /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACD 0x0000001C /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACD 0x0000001D /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_CACHED_ACD 0x0000001E /* RW--V */
+#define NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACD 0x0000001F /* RW--V */
+#define NV_MMU_VER3_PTE_KIND 11:8 /* RWXVF */
+#define NV_MMU_VER3_PTE_ADDRESS 51:12 /* RWXVF */
+#define NV_MMU_VER3_PTE_ADDRESS_SYS 51:12 /* RWXVF */
+#define NV_MMU_VER3_PTE_ADDRESS_PEER 51:12 /* RWXVF */
+#define NV_MMU_VER3_PTE_ADDRESS_VID 39:12 /* RWXVF */
+#define NV_MMU_VER3_PTE_PEER_ID 63:(64-3) /* RWXVF */
+#define NV_MMU_VER3_PTE_PEER_ID_0 0x00000000 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_1 0x00000001 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_2 0x00000002 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_3 0x00000003 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_4 0x00000004 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_5 0x00000005 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_6 0x00000006 /* RW--V */
+#define NV_MMU_VER3_PTE_PEER_ID_7 0x00000007 /* RW--V */
+#define NV_MMU_VER3_PTE_ADDRESS_SHIFT 0x0000000c /* */
+#define NV_MMU_VER3_PTE__SIZE 8
+
+#endif // __gh100_dev_mmu_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h
new file mode 100644
index 000000000000..8ff4663168d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_riscv_pri.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_riscv_pri_h__
+#define __gh100_dev_riscv_pri_h__
+
+#define NV_PRISCV_RISCV_CPUCTL 0x00000388 /* RW-4R */
+#define NV_PRISCV_RISCV_CPUCTL_HALTED 4:4 /* R-IVF */
+#define NV_PRISCV_RISCV_CPUCTL_HALTED_INIT 0x00000001 /* R-I-V */
+#define NV_PRISCV_RISCV_CPUCTL_HALTED_TRUE 0x00000001 /* R---V */
+#define NV_PRISCV_RISCV_CPUCTL_HALTED_FALSE 0x00000000 /* R---V */
+
+#endif // __gh100_dev_riscv_pri_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h
new file mode 100644
index 000000000000..49b4816cb00b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_therm.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_therm_h__
+#define __gh100_dev_therm_h__
+
+#define NV_THERM_I2CS_SCRATCH 0x000200bc /* RW-4R */
+#define NV_THERM_I2CS_SCRATCH_DATA 31:0 /* RWIVF */
+#define NV_THERM_I2CS_SCRATCH_DATA_INIT 0x00000000 /* RWI-V */
+
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE NV_THERM_I2CS_SCRATCH
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS 31:0
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS 0x000000FF
+#define NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_FAILED 0x00000000
+
+#endif // __gh100_dev_therm_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h
new file mode 100644
index 000000000000..12b49e9894a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/dev_xtl_ep_pri.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_dev_xtl_ep_pri_h__
+#define __gh100_dev_xtl_ep_pri_h__
+
+#define NV_EP_PCFGM 0x92FFF:0x92000 /* RW--D */
+
+#endif // __gh100_dev_xtl_ep_pri_h__
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h
new file mode 100644
index 000000000000..1a891bd33fa3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvhw/ref/gh100/pri_nv_xal_ep.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __gh100_pri_nv_xal_ep_h__
+#define __gh100_pri_nv_xal_ep_h__
+
+#define NV_XAL_EP_BAR0_WINDOW_BASE_SHIFT 0x000010
+#define NV_XAL_EP_BAR0_WINDOW_BASE 21:0
+#define NV_XAL_EP_BAR0_WINDOW 0x0010fd40
+
+#endif // __gh100_pri_nv_xal_ep_h__
+
diff --git a/drivers/gpu/drm/nouveau/include/nvif/chan.h b/drivers/gpu/drm/nouveau/include/nvif/chan.h
new file mode 100644
index 000000000000..c329a29068d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/chan.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVIF_CHAN_H__
+#define __NVIF_CHAN_H__
+#include "push.h"
+
+struct nvif_chan {
+ const struct nvif_chan_func {
+ struct {
+ u32 (*read_get)(struct nvif_chan *);
+ } push;
+
+ struct {
+ u32 (*read_get)(struct nvif_chan *);
+ void (*push)(struct nvif_chan *, bool main, u64 addr, u32 size,
+ bool no_prefetch);
+ void (*kick)(struct nvif_chan *);
+ int (*post)(struct nvif_chan *, u32 gpptr, u32 pbptr);
+ u32 post_size;
+ } gpfifo;
+
+ struct {
+ int (*release)(struct nvif_chan *, u64 addr, u32 data);
+ } sem;
+ } *func;
+
+ struct {
+ struct nvif_map map;
+ } userd;
+
+ struct {
+ struct nvif_map map;
+ u32 cur;
+ u32 max;
+ int free;
+ } gpfifo;
+
+ struct {
+ struct nvif_map map;
+ u64 addr;
+ } sema;
+
+ struct nvif_push push;
+
+ struct nvif_user *usermode;
+ u32 doorbell_token;
+};
+
+int nvif_chan_dma_wait(struct nvif_chan *, u32 push_nr);
+
+void nvif_chan_gpfifo_ctor(const struct nvif_chan_func *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, struct nvif_chan *);
+int nvif_chan_gpfifo_wait(struct nvif_chan *, u32 gpfifo_nr, u32 push_nr);
+void nvif_chan_gpfifo_push(struct nvif_chan *, u64 addr, u32 size, bool no_prefetch);
+int nvif_chan_gpfifo_post(struct nvif_chan *);
+
+void nvif_chan506f_gpfifo_push(struct nvif_chan *, bool main, u64 addr, u32 size, bool no_prefetch);
+void nvif_chan506f_gpfifo_kick(struct nvif_chan *);
+
+int nvif_chan906f_ctor_(const struct nvif_chan_func *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr,
+ struct nvif_chan *);
+u32 nvif_chan906f_read_get(struct nvif_chan *);
+u32 nvif_chan906f_gpfifo_read_get(struct nvif_chan *);
+int nvif_chan906f_gpfifo_post(struct nvif_chan *, u32 gpptr, u32 pbptr);
+
+int nvif_chan506f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size);
+int nvif_chan906f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr);
+int nvif_chanc36f_ctor(struct nvif_chan *, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr,
+ struct nvif_user *usermode, u32 doorbell_token);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index ea937fa7bc55..ea8267e0d8da 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -29,6 +29,8 @@ struct nv_device_info_v0 {
#define NV_DEVICE_INFO_V0_TURING 0x0c
#define NV_DEVICE_INFO_V0_AMPERE 0x0d
#define NV_DEVICE_INFO_V0_ADA 0x0e
+#define NV_DEVICE_INFO_V0_HOPPER 0x0f
+#define NV_DEVICE_INFO_V0_BLACKWELL 0x10
__u8 family;
__u8 pad06[2];
__u64 ram_size;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 824e052dcc25..ff6823cb2cd8 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -57,12 +57,15 @@
#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040
#define KEPLER_INLINE_TO_MEMORY_B 0x0000a140
+#define BLACKWELL_INLINE_TO_MEMORY_A 0x0000cd40
#define NV04_DISP /* cl0046.h */ 0x00000046
#define VOLTA_USERMODE_A 0x0000c361
#define TURING_USERMODE_A 0x0000c461
#define AMPERE_USERMODE_A 0x0000c561
+#define HOPPER_USERMODE_A 0x0000c661
+#define BLACKWELL_USERMODE_A 0x0000c761
#define MAXWELL_FAULT_BUFFER_A /* clb069.h */ 0x0000b069
#define VOLTA_FAULT_BUFFER_A /* clb069.h */ 0x0000c369
@@ -85,6 +88,9 @@
#define TURING_CHANNEL_GPFIFO_A /* if0020.h */ 0x0000c46f
#define AMPERE_CHANNEL_GPFIFO_A /* if0020.h */ 0x0000c56f
#define AMPERE_CHANNEL_GPFIFO_B /* if0020.h */ 0x0000c76f
+#define HOPPER_CHANNEL_GPFIFO_A 0x0000c86f
+#define BLACKWELL_CHANNEL_GPFIFO_A 0x0000c96f
+#define BLACKWELL_CHANNEL_GPFIFO_B 0x0000ca6f
#define NV50_DISP /* if0010.h */ 0x00005070
#define G82_DISP /* if0010.h */ 0x00008270
@@ -102,8 +108,10 @@
#define TU102_DISP /* if0010.h */ 0x0000c570
#define GA102_DISP /* if0010.h */ 0x0000c670
#define AD102_DISP /* if0010.h */ 0x0000c770
+#define GB202_DISP 0x0000ca70
#define GV100_DISP_CAPS 0x0000c373
+#define GB202_DISP_CAPS 0x0000ca73
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
@@ -118,6 +126,7 @@
#define GV100_DISP_CURSOR /* if0014.h */ 0x0000c37a
#define TU102_DISP_CURSOR /* if0014.h */ 0x0000c57a
#define GA102_DISP_CURSOR /* if0014.h */ 0x0000c67a
+#define GB202_DISP_CURSOR 0x0000ca7a
#define NV50_DISP_OVERLAY /* if0014.h */ 0x0000507b
#define G82_DISP_OVERLAY /* if0014.h */ 0x0000827b
@@ -128,6 +137,7 @@
#define GV100_DISP_WINDOW_IMM_CHANNEL_DMA /* if0014.h */ 0x0000c37b
#define TU102_DISP_WINDOW_IMM_CHANNEL_DMA /* if0014.h */ 0x0000c57b
#define GA102_DISP_WINDOW_IMM_CHANNEL_DMA /* if0014.h */ 0x0000c67b
+#define GB202_DISP_WINDOW_IMM_CHANNEL_DMA 0x0000ca7b
#define NV50_DISP_BASE_CHANNEL_DMA /* if0014.h */ 0x0000507c
#define G82_DISP_BASE_CHANNEL_DMA /* if0014.h */ 0x0000827c
@@ -153,6 +163,7 @@
#define TU102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c57d
#define GA102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c67d
#define AD102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c77d
+#define GB202_DISP_CORE_CHANNEL_DMA 0x0000ca7d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* if0014.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* if0014.h */ 0x0000827e
@@ -164,6 +175,7 @@
#define GV100_DISP_WINDOW_CHANNEL_DMA /* if0014.h */ 0x0000c37e
#define TU102_DISP_WINDOW_CHANNEL_DMA /* if0014.h */ 0x0000c57e
#define GA102_DISP_WINDOW_CHANNEL_DMA /* if0014.h */ 0x0000c67e
+#define GB202_DISP_WINDOW_CHANNEL_DMA 0x0000ca7e
#define NV50_TESLA 0x00005097
#define G82_TESLA 0x00008297
@@ -189,16 +201,25 @@
#define TURING_A /* cl9097.h */ 0x0000c597
+#define AMPERE_A 0x0000c697
#define AMPERE_B /* cl9097.h */ 0x0000c797
#define ADA_A /* cl9097.h */ 0x0000c997
+#define HOPPER_A 0x0000cb97
+
+#define BLACKWELL_A 0x0000cd97
+#define BLACKWELL_B 0x0000ce97
+
#define NV74_BSP 0x000074b0
+#define NVB8B0_VIDEO_DECODER 0x0000b8b0
#define NVC4B0_VIDEO_DECODER 0x0000c4b0
#define NVC6B0_VIDEO_DECODER 0x0000c6b0
#define NVC7B0_VIDEO_DECODER 0x0000c7b0
#define NVC9B0_VIDEO_DECODER 0x0000c9b0
+#define NVCDB0_VIDEO_DECODER 0x0000cdb0
+#define NVCFB0_VIDEO_DECODER 0x0000cfb0
#define GT212_MSVLD 0x000085b1
#define IGT21A_MSVLD 0x000086b1
@@ -227,10 +248,14 @@
#define TURING_DMA_COPY_A 0x0000c5b5
#define AMPERE_DMA_COPY_A 0x0000c6b5
#define AMPERE_DMA_COPY_B 0x0000c7b5
+#define HOPPER_DMA_COPY_A 0x0000c8b5
+#define BLACKWELL_DMA_COPY_A 0x0000c9b5
+#define BLACKWELL_DMA_COPY_B 0x0000cab5
#define NVC4B7_VIDEO_ENCODER 0x0000c4b7
#define NVC7B7_VIDEO_ENCODER 0x0000c7b7
#define NVC9B7_VIDEO_ENCODER 0x0000c9b7
+#define NVCFB7_VIDEO_ENCODER 0x0000cfb7
#define FERMI_DECOMPRESS 0x000090b8
@@ -246,15 +271,25 @@
#define PASCAL_COMPUTE_B 0x0000c1c0
#define VOLTA_COMPUTE_A 0x0000c3c0
#define TURING_COMPUTE_A 0x0000c5c0
+#define AMPERE_COMPUTE_A 0x0000c6c0
#define AMPERE_COMPUTE_B 0x0000c7c0
#define ADA_COMPUTE_A 0x0000c9c0
+#define HOPPER_COMPUTE_A 0x0000cbc0
+#define BLACKWELL_COMPUTE_A 0x0000cdc0
+#define BLACKWELL_COMPUTE_B 0x0000cec0
#define NV74_CIPHER 0x000074c1
+#define NVB8D1_VIDEO_NVJPG 0x0000b8d1
#define NVC4D1_VIDEO_NVJPG 0x0000c4d1
#define NVC9D1_VIDEO_NVJPG 0x0000c9d1
+#define NVCDD1_VIDEO_NVJPG 0x0000cdd1
+#define NVCFD1_VIDEO_NVJPG 0x0000cfd1
+#define NVB8FA_VIDEO_OFA 0x0000b8fa
#define NVC6FA_VIDEO_OFA 0x0000c6fa
#define NVC7FA_VIDEO_OFA 0x0000c7fa
#define NVC9FA_VIDEO_OFA 0x0000c9fa
+#define NVCDFA_VIDEO_OFA 0x0000cdfa
+#define NVCFFA_VIDEO_OFA 0x0000cffa
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 8d205b6af46a..1b32dc701f61 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -16,7 +16,7 @@ struct nvif_object {
u32 handle;
s32 oclass;
void *priv; /*XXX: hack */
- struct {
+ struct nvif_map {
void __iomem *ptr;
u64 size;
} map;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push.h b/drivers/gpu/drm/nouveau/include/nvif/push.h
index 6d3a8a3d2087..a493fababe3c 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/push.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/push.h
@@ -31,6 +31,12 @@ struct nvif_push {
void (*kick)(struct nvif_push *push);
struct nvif_mem mem;
+ u64 addr;
+
+ struct {
+ u32 get;
+ u32 max;
+ } hw;
u32 *bgn;
u32 *cur;
@@ -41,7 +47,7 @@ struct nvif_push {
static inline __must_check int
PUSH_WAIT(struct nvif_push *push, u32 size)
{
- if (push->cur + size >= push->end) {
+ if (push->cur + size > push->end) {
int ret = push->wait(push, size);
if (ret)
return ret;
@@ -55,7 +61,11 @@ PUSH_WAIT(struct nvif_push *push, u32 size)
static inline int
PUSH_KICK(struct nvif_push *push)
{
- push->kick(push);
+ if (push->cur != push->bgn) {
+ push->kick(push);
+ push->bgn = push->cur;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push906f.h b/drivers/gpu/drm/nouveau/include/nvif/push906f.h
index cc2866bc8b0a..79df71de98d2 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/push906f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/push906f.h
@@ -7,6 +7,7 @@
#ifndef PUSH906F_SUBC
// Host methods
#define PUSH906F_SUBC_NV906F 0
+#define PUSH906F_SUBC_NVC36F 0
// Twod
#define PUSH906F_SUBC_NV902D 3
diff --git a/drivers/gpu/drm/nouveau/include/nvif/pushc97b.h b/drivers/gpu/drm/nouveau/include/nvif/pushc97b.h
new file mode 100644
index 000000000000..c8d6b6319134
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/pushc97b.h
@@ -0,0 +1,18 @@
+#ifndef __NVIF_PUSHC97B_H__
+#define __NVIF_PUSHC97B_H__
+#include <nvif/push.h>
+
+#include <nvhw/class/clc97b.h>
+
+#define PUSH_HDR(p,m,c) do { \
+ PUSH_ASSERT(!((m) & ~DRF_SMASK(NVC97B_DMA_METHOD_OFFSET)), "mthd"); \
+ PUSH_ASSERT(!((c) & ~DRF_MASK(NVC97B_DMA_METHOD_COUNT)), "size"); \
+ PUSH_DATA__((p), NVDEF(NVC97B, DMA, OPCODE, METHOD) | \
+ NVVAL(NVC97B, DMA, METHOD_COUNT, (c)) | \
+ NVVAL(NVC97B, DMA, METHOD_OFFSET, (m) >> 2), \
+ " mthd 0x%04x size %d - %s", (u32)(m), (u32)(c), __func__); \
+} while(0)
+
+#define PUSH_MTHD_HDR(p,s,m,c) PUSH_HDR(p,m,c)
+#define PUSH_MTHD_INC 4:4
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 46afb877a296..99579e7b9376 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -46,7 +46,10 @@ struct nvkm_device {
GV100 = 0x140,
TU100 = 0x160,
GA100 = 0x170,
+ GH100 = 0x180,
AD100 = 0x190,
+ GB10x = 0x1a0,
+ GB20x = 0x1b0,
} card_type;
u32 chipset;
u8 chiprev;
@@ -77,6 +80,13 @@ struct nvkm_device {
struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int type, int inst);
struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int type, int inst);
+enum nvkm_bar_id {
+ NVKM_BAR_INVALID = 0,
+ NVKM_BAR0_PRI,
+ NVKM_BAR1_FB,
+ NVKM_BAR2_INST,
+};
+
struct nvkm_device_func {
struct nvkm_device_pci *(*pci)(struct nvkm_device *);
struct nvkm_device_tegra *(*tegra)(struct nvkm_device *);
@@ -85,8 +95,8 @@ struct nvkm_device_func {
int (*init)(struct nvkm_device *);
void (*fini)(struct nvkm_device *, bool suspend);
int (*irq)(struct nvkm_device *);
- resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
- resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
+ resource_size_t (*resource_addr)(struct nvkm_device *, enum nvkm_bar_id);
+ resource_size_t (*resource_size)(struct nvkm_device *, enum nvkm_bar_id);
bool cpu_coherent;
};
@@ -124,6 +134,9 @@ struct nvkm_device *nvkm_device_find(u64 name);
_temp; \
})
+#define NVKM_RD32_(p,o,dr) nvkm_rd32((p), (o) + (dr))
+#define NVKM_RD32(p,A...) DRF_RV(NVKM_RD32_, (p), 0, ##A)
+
void nvkm_device_del(struct nvkm_device **);
struct nvkm_device_oclass {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
index 9d2a1abf64f9..d92ffd17b729 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: MIT */
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FSP , struct nvkm_fsp , fsp)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_GSP , struct nvkm_gsp , gsp)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TOP , struct nvkm_top , top)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VFN , struct nvkm_vfn , vfn)
@@ -29,7 +30,7 @@ NVKM_LAYOUT_INST(NVKM_SUBDEV_IOCTRL , struct nvkm_subdev , ioctrl, 3)
NVKM_LAYOUT_ONCE(NVKM_SUBDEV_FLA , struct nvkm_subdev , fla)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_BSP , struct nvkm_engine , bsp)
-NVKM_LAYOUT_INST(NVKM_ENGINE_CE , struct nvkm_engine , ce, 10)
+NVKM_LAYOUT_INST(NVKM_ENGINE_CE , struct nvkm_engine , ce, 20)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_CIPHER , struct nvkm_engine , cipher)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_DISP , struct nvkm_disp , disp)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_DMAOBJ , struct nvkm_dma , dma)
@@ -43,9 +44,9 @@ NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPDEC , struct nvkm_engine , mspdec)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPPP , struct nvkm_engine , msppp)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSVLD , struct nvkm_engine , msvld)
NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC , struct nvkm_nvdec , nvdec, 8)
-NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 3)
+NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 4)
NVKM_LAYOUT_INST(NVKM_ENGINE_NVJPG , struct nvkm_engine , nvjpg, 8)
-NVKM_LAYOUT_ONCE(NVKM_ENGINE_OFA , struct nvkm_engine , ofa)
+NVKM_LAYOUT_INST(NVKM_ENGINE_OFA , struct nvkm_engine , ofa, 2)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC , struct nvkm_engine , sec)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC2 , struct nvkm_sec2 , sec2)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SW , struct nvkm_sw , sw)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index 3e8db8280e2a..7903d7470d19 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -87,5 +87,4 @@ int gp102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gv100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
int tu102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
int ga102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
-int ad102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index be508f65b280..96c16cfccf16 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -78,9 +78,6 @@ struct nvkm_fifo {
struct {
struct nvkm_memory *mem;
struct nvkm_vma *bar1;
-
- struct mutex mutex;
- struct list_head list;
} userd;
struct {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 8145796ffc61..a2333cfe6955 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -55,5 +55,4 @@ int gp10b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n
int gv100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
int tu102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
int ga102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
-int ad102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index 8d2e170883e1..ca83caa55157 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -13,7 +13,5 @@ struct nvkm_nvdec {
int gm107_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
int tu102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
-int ga100_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
int ga102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
-int ad102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
index 018c58fc32ba..1f6eef13f872 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -13,6 +13,4 @@ struct nvkm_nvenc {
int gm107_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
int tu102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
-int ga102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
-int ad102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h
deleted file mode 100644
index 80b7933a789e..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_NVJPG_H__
-#define __NVKM_NVJPG_H__
-#include <core/engine.h>
-
-int ga100_nvjpg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-int ad102_nvjpg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h
deleted file mode 100644
index e72e2115333b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_OFA_H__
-#define __NVKM_OFA_H__
-#include <core/engine.h>
-
-int ga100_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-int ga102_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-int ad102_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 5b798a1a313d..e0d777a933e1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -102,6 +102,9 @@ int gv100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n
int tu102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int ga100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int ga102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gh100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gb100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int gb202_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h
new file mode 100644
index 000000000000..8a3dbb1cbb46
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fsp.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_FSP_H__
+#define __NVKM_FSP_H__
+#include <core/subdev.h>
+#include <core/falcon.h>
+
+struct nvkm_fsp {
+ const struct nvkm_fsp_func *func;
+ struct nvkm_subdev subdev;
+
+ struct nvkm_falcon falcon;
+};
+
+bool nvkm_fsp_verify_gsp_fmc(struct nvkm_fsp *, u32 hash_size, u32 pkey_size, u32 sig_size);
+int nvkm_fsp_boot_gsp_fmc(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig);
+
+int gh100_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **);
+int gb100_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **);
+int gb202_fsp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 746e126c3ecf..226c7ec56b8e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -17,6 +17,9 @@ struct nvkm_gsp_mem {
dma_addr_t addr;
};
+int nvkm_gsp_mem_ctor(struct nvkm_gsp *, size_t size, struct nvkm_gsp_mem *);
+void nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *);
+
struct nvkm_gsp_radix3 {
struct nvkm_gsp_mem lvl0;
struct nvkm_gsp_mem lvl1;
@@ -31,6 +34,29 @@ typedef int (*nvkm_gsp_msg_ntfy_func)(void *priv, u32 fn, void *repv, u32 repc);
struct nvkm_gsp_event;
typedef void (*nvkm_gsp_event_func)(struct nvkm_gsp_event *, void *repv, u32 repc);
+/**
+ * DOC: GSP message handling policy
+ *
+ * When sending a GSP RPC command, there can be multiple cases of handling
+ * the GSP RPC messages, which are the reply of GSP RPC commands, according
+ * to the requirement of the callers and the nature of the GSP RPC commands.
+ *
+ * NVKM_GSP_RPC_REPLY_NOWAIT - If specified, immediately return to the
+ * caller after the GSP RPC command is issued.
+ *
+ * NVKM_GSP_RPC_REPLY_RECV - If specified, wait and receive the entire GSP
+ * RPC message after the GSP RPC command is issued.
+ *
+ * NVKM_GSP_RPC_REPLY_POLL - If specified, wait for the specific reply and
+ * discard the reply before returning to the caller.
+ *
+ */
+enum nvkm_gsp_rpc_reply_policy {
+ NVKM_GSP_RPC_REPLY_NOWAIT = 0,
+ NVKM_GSP_RPC_REPLY_RECV,
+ NVKM_GSP_RPC_REPLY_POLL,
+};
+
struct nvkm_gsp {
const struct nvkm_gsp_func *func;
struct nvkm_subdev subdev;
@@ -42,6 +68,9 @@ struct nvkm_gsp {
const struct firmware *load;
const struct firmware *unload;
} booter;
+
+ const struct firmware *fmc;
+
const struct firmware *bl;
const struct firmware *rm;
} fws;
@@ -89,6 +118,15 @@ struct nvkm_gsp {
struct {
struct nvkm_gsp_mem fw;
+ u8 *hash;
+ u8 *pkey;
+ u8 *sig;
+
+ struct nvkm_gsp_mem args;
+ } fmc;
+
+ struct {
+ struct nvkm_gsp_mem fw;
u32 code_offset;
u32 data_offset;
u32 manifest_offset;
@@ -107,6 +145,7 @@ struct nvkm_gsp {
struct sg_table sgt;
struct nvkm_gsp_radix3 radix3;
struct nvkm_gsp_mem meta;
+ struct sg_table fbsr;
} sr;
struct {
@@ -186,31 +225,7 @@ struct nvkm_gsp {
u8 tpcs;
} gr;
- const struct nvkm_gsp_rm {
- void *(*rpc_get)(struct nvkm_gsp *, u32 fn, u32 argc);
- void *(*rpc_push)(struct nvkm_gsp *, void *argv, bool wait, u32 repc);
- void (*rpc_done)(struct nvkm_gsp *gsp, void *repv);
-
- void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc);
- int (*rm_ctrl_push)(struct nvkm_gsp_object *, void **argv, u32 repc);
- void (*rm_ctrl_done)(struct nvkm_gsp_object *, void *repv);
-
- void *(*rm_alloc_get)(struct nvkm_gsp_object *, u32 oclass, u32 argc);
- void *(*rm_alloc_push)(struct nvkm_gsp_object *, void *argv);
- void (*rm_alloc_done)(struct nvkm_gsp_object *, void *repv);
-
- int (*rm_free)(struct nvkm_gsp_object *);
-
- int (*client_ctor)(struct nvkm_gsp *, struct nvkm_gsp_client *);
- void (*client_dtor)(struct nvkm_gsp_client *);
-
- int (*device_ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *);
- void (*device_dtor)(struct nvkm_gsp_device *);
-
- int (*event_ctor)(struct nvkm_gsp_device *, u32 handle, u32 id,
- nvkm_gsp_event_func, struct nvkm_gsp_event *);
- void (*event_dtor)(struct nvkm_gsp_event *);
- } *rm;
+ struct nvkm_rm *rm;
struct {
struct mutex mutex;
@@ -248,16 +263,19 @@ nvkm_gsp_rm(struct nvkm_gsp *gsp)
return gsp && (gsp->fws.rm || gsp->fw.img);
}
+#include <rm/rm.h>
+
static inline void *
nvkm_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
{
- return gsp->rm->rpc_get(gsp, fn, argc);
+ return gsp->rm->api->rpc->get(gsp, fn, argc);
}
static inline void *
-nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 repc)
{
- return gsp->rm->rpc_push(gsp, argv, wait, repc);
+ return gsp->rm->api->rpc->push(gsp, argv, policy, repc);
}
static inline void *
@@ -268,13 +286,14 @@ nvkm_gsp_rpc_rd(struct nvkm_gsp *gsp, u32 fn, u32 argc)
if (IS_ERR_OR_NULL(argv))
return argv;
- return nvkm_gsp_rpc_push(gsp, argv, true, argc);
+ return nvkm_gsp_rpc_push(gsp, argv, NVKM_GSP_RPC_REPLY_RECV, argc);
}
static inline int
-nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, bool wait)
+nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv,
+ enum nvkm_gsp_rpc_reply_policy policy)
{
- void *repv = nvkm_gsp_rpc_push(gsp, argv, wait, 0);
+ void *repv = nvkm_gsp_rpc_push(gsp, argv, policy, 0);
if (IS_ERR(repv))
return PTR_ERR(repv);
@@ -285,19 +304,19 @@ nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, bool wait)
static inline void
nvkm_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
{
- gsp->rm->rpc_done(gsp, repv);
+ gsp->rm->api->rpc->done(gsp, repv);
}
static inline void *
nvkm_gsp_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
{
- return object->client->gsp->rm->rm_ctrl_get(object, cmd, argc);
+ return object->client->gsp->rm->api->ctrl->get(object, cmd, argc);
}
static inline int
nvkm_gsp_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
{
- return object->client->gsp->rm->rm_ctrl_push(object, argv, repc);
+ return object->client->gsp->rm->api->ctrl->push(object, argv, repc);
}
static inline void *
@@ -328,7 +347,7 @@ nvkm_gsp_rm_ctrl_wr(struct nvkm_gsp_object *object, void *argv)
static inline void
nvkm_gsp_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
{
- object->client->gsp->rm->rm_ctrl_done(object, repv);
+ object->client->gsp->rm->api->ctrl->done(object, repv);
}
static inline void *
@@ -343,7 +362,7 @@ nvkm_gsp_rm_alloc_get(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u3
object->parent = parent;
object->handle = handle;
- argv = gsp->rm->rm_alloc_get(object, oclass, argc);
+ argv = gsp->rm->api->alloc->get(object, oclass, argc);
if (IS_ERR_OR_NULL(argv)) {
object->client = NULL;
return argv;
@@ -355,7 +374,7 @@ nvkm_gsp_rm_alloc_get(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u3
static inline void *
nvkm_gsp_rm_alloc_push(struct nvkm_gsp_object *object, void *argv)
{
- void *repv = object->client->gsp->rm->rm_alloc_push(object, argv);
+ void *repv = object->client->gsp->rm->api->alloc->push(object, argv);
if (IS_ERR(repv))
object->client = NULL;
@@ -377,7 +396,7 @@ nvkm_gsp_rm_alloc_wr(struct nvkm_gsp_object *object, void *argv)
static inline void
nvkm_gsp_rm_alloc_done(struct nvkm_gsp_object *object, void *repv)
{
- object->client->gsp->rm->rm_alloc_done(object, repv);
+ object->client->gsp->rm->api->alloc->done(object, repv);
}
static inline int
@@ -395,39 +414,29 @@ nvkm_gsp_rm_alloc(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u32 ar
static inline int
nvkm_gsp_rm_free(struct nvkm_gsp_object *object)
{
- if (object->client)
- return object->client->gsp->rm->rm_free(object);
+ if (object->client) {
+ int ret = object->client->gsp->rm->api->alloc->free(object);
+ object->client = NULL;
+ return ret;
+ }
return 0;
}
-static inline int
-nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
-{
- if (WARN_ON(!gsp->rm))
- return -ENOSYS;
-
- return gsp->rm->client_ctor(gsp, client);
-}
-
-static inline void
-nvkm_gsp_client_dtor(struct nvkm_gsp_client *client)
-{
- if (client->gsp)
- client->gsp->rm->client_dtor(client);
-}
+int nvkm_gsp_client_ctor(struct nvkm_gsp *, struct nvkm_gsp_client *);
+void nvkm_gsp_client_dtor(struct nvkm_gsp_client *);
static inline int
nvkm_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
{
- return client->gsp->rm->device_ctor(client, device);
+ return client->gsp->rm->api->device->ctor(client, device);
}
static inline void
nvkm_gsp_device_dtor(struct nvkm_gsp_device *device)
{
if (device->object.client)
- device->object.client->gsp->rm->device_dtor(device);
+ device->object.client->gsp->rm->api->device->dtor(device);
}
static inline int
@@ -459,7 +468,9 @@ static inline int
nvkm_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
{
- return device->object.client->gsp->rm->event_ctor(device, handle, id, func, event);
+ struct nvkm_rm *rm = device->object.client->gsp->rm;
+
+ return rm->api->device->event.ctor(device, handle, id, func, event);
}
static inline void
@@ -468,7 +479,7 @@ nvkm_gsp_event_dtor(struct nvkm_gsp_event *event)
struct nvkm_gsp_device *device = event->device;
if (device)
- device->object.client->gsp->rm->event_dtor(event);
+ device->object.client->gsp->rm->api->device->event.dtor(event);
}
int nvkm_gsp_intr_stall(struct nvkm_gsp *, enum nvkm_subdev_type, int);
@@ -479,5 +490,8 @@ int tu102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_
int tu116_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int ga100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int ga102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int gh100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int ad102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int gb100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int gb202_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index e10cbd9203ec..db835cf7b8ac 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -24,11 +24,6 @@ struct nvkm_instmem {
struct nvkm_ramht *ramht;
struct nvkm_memory *ramro;
struct nvkm_memory *ramfc;
-
- struct {
- struct sg_table fbsr;
- bool fbsr_valid;
- } rm;
};
u32 nvkm_instmem_rd32(struct nvkm_instmem *, u32 addr);
@@ -41,4 +36,5 @@ int nv04_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nv
int nv40_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
int nv50_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
int gk20a_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
+int gh100_instmem_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 935b1cacd528..abcb0dbcde70 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -8,7 +8,7 @@ struct nvkm_vma {
struct list_head head;
struct rb_node tree;
u64 addr;
- u64 size:50;
+ u64 size;
bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
#define NVKM_VMA_PAGE_NONE 7
@@ -73,6 +73,7 @@ struct nvkm_vmm {
struct nvkm_gsp_object object;
struct nvkm_vma *rsvd;
+ bool external;
} rm;
};
@@ -165,4 +166,5 @@ int gp100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gp10b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
int gv100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
int tu102_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
+int gh100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index 3c103101d5fc..112b674ed9c8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -50,6 +50,7 @@ int gf100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gf106_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
int gk104_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
int gp100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
+int gh100_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
/* pcie functions */
int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width);
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h
deleted file mode 100644
index 7a3fc023072d..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl0000_h__
-#define __src_common_sdk_nvidia_inc_class_cl0000_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
-
-typedef struct NV0000_ALLOC_PARAMETERS {
- NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
- NvU32 processID;
- char processName[NV_PROC_NAME_MAX_LENGTH];
-} NV0000_ALLOC_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h
deleted file mode 100644
index e4de36d63666..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl0005_h__
-#define __src_common_sdk_nvidia_inc_class_cl0005_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV0005_ALLOC_PARAMETERS {
- NvHandle hParentClient;
- NvHandle hSrcResource;
-
- NvV32 hClass;
- NvV32 notifyIndex;
- NV_DECLARE_ALIGNED(NvP64 data, 8);
-} NV0005_ALLOC_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h
deleted file mode 100644
index 8868118e47d6..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl0080_h__
-#define __src_common_sdk_nvidia_inc_class_cl0080_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV01_DEVICE_0 (0x80U) /* finn: Evaluated from "NV0080_ALLOC_PARAMETERS_MESSAGE_ID" */
-
-typedef struct NV0080_ALLOC_PARAMETERS {
- NvU32 deviceId;
- NvHandle hClientShare;
- NvHandle hTargetClient;
- NvHandle hTargetDevice;
- NvV32 flags;
- NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8);
- NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8);
- NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8);
- NvV32 vaMode;
-} NV0080_ALLOC_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h
deleted file mode 100644
index 9040ea5608a0..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl2080_h__
-#define __src_common_sdk_nvidia_inc_class_cl2080_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2002-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV20_SUBDEVICE_0 (0x2080U) /* finn: Evaluated from "NV2080_ALLOC_PARAMETERS_MESSAGE_ID" */
-
-typedef struct NV2080_ALLOC_PARAMETERS {
- NvU32 subDeviceId;
-} NV2080_ALLOC_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h
deleted file mode 100644
index ba659d6477d3..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl2080_notification_h__
-#define __src_common_sdk_nvidia_inc_class_cl2080_notification_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_NOTIFIERS_HOTPLUG (1)
-
-#define NV2080_NOTIFIERS_DP_IRQ (7)
-
-#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001)
-#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS
-
-#define NV2080_ENGINE_TYPE_COPY0 (0x00000009)
-
-#define NV2080_ENGINE_TYPE_BSP (0x00000013)
-#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP
-
-#define NV2080_ENGINE_TYPE_MSENC (0x0000001b)
-#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */
-
-#define NV2080_ENGINE_TYPE_SW (0x00000022)
-
-#define NV2080_ENGINE_TYPE_SEC2 (0x00000026)
-
-#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b)
-#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG
-
-#define NV2080_ENGINE_TYPE_OFA (0x00000033)
-
-typedef struct {
- NvU32 plugDisplayMask;
- NvU32 unplugDisplayMask;
-} Nv2080HotplugNotification;
-
-typedef struct Nv2080DpIrqNotificationRec {
- NvU32 displayId;
-} Nv2080DpIrqNotification;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h
deleted file mode 100644
index 9eb780a1ac72..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl84a0_h__
-#define __src_common_sdk_nvidia_inc_class_cl84a0_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV01_MEMORY_LIST_SYSTEM (0x00000081)
-
-#define NV01_MEMORY_LIST_FBMEM (0x00000082)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h
deleted file mode 100644
index f1d21776e395..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_cl90f1_h__
-#define __src_common_sdk_nvidia_inc_class_cl90f1_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define FERMI_VASPACE_A (0x000090f1)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h
deleted file mode 100644
index b8f32576cfaa..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_class_clc0b5sw_h__
-#define __src_common_sdk_nvidia_inc_class_clc0b5sw_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NVC0B5_ALLOCATION_PARAMETERS {
- NvU32 version;
- NvU32 engineType;
-} NVC0B5_ALLOCATION_PARAMETERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h
deleted file mode 100644
index 58b3ba7badf1..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
- NvBool bDscSupported;
- NvU32 encoderColorFormatMask;
- NvU32 lineBufferSizeKB;
- NvU32 rateBufferSizeKB;
- NvU32 bitsPerPixelPrecision;
- NvU32 maxNumHztSlices;
- NvU32 lineBufferBitDepth;
-} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
deleted file mode 100644
index 596f2ea8344e..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
+++ /dev/null
@@ -1,166 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dfp_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dfp_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 flags;
- NvU32 flags2;
-} NV0073_CTRL_DFP_GET_INFO_PARAMS;
-
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U)
-#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U)
-#define NV0073_CTRL_DFP_FLAGS_LANE 5:3
-#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U)
-#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U)
-#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6
-#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7
-#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8
-#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9
-#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10
-#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14
-#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15
-#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U)
-#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U)
-#define NV0073_CTRL_DFP_FLAGS_LINK 21:20
-#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22
-#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U)
-#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U)
-#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25
-#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26
-#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30
-#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U)
-
-#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER 96U
-
-typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 numELDSize;
- NvU8 bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER];
- NvU32 maxFreqSupported;
- NvU32 ctrl;
- NvU32 deviceEntry;
-} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS;
-
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD 0:0
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE (0x00000001U)
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV 1:1
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE (0x00000000U)
-#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE (0x00000001U)
-
-#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvBool enable;
-} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS;
-
-typedef NvU32 NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG;
-
-typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO {
- NvU32 displayMask;
- NvU32 sorType;
-} NV0073_CTRL_DFP_ASSIGN_SOR_INFO;
-
-#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS 4U
-
-typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU8 sorExcludeMask;
- NvU32 slaveDisplayId;
- NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig;
- NvBool bIs2Head1Or;
- NvU32 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
- NV0073_CTRL_DFP_ASSIGN_SOR_INFO sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
- NvU8 reservedSorMask;
- NvU32 flags;
-} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS;
-
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO 0:0
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL (0x00000001U)
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT (0x00000000U)
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE 1:1
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO (0x00000000U)
-#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
deleted file mode 100644
index bae4b1997736..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
+++ /dev/null
@@ -1,335 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dp_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dp_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0073_CTRL_CMD_DP_AUXCH_CTRL (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U
-
-typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvBool bAddrOnly;
- NvU32 cmd;
- NvU32 addr;
- NvU8 data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE];
- NvU32 size;
- NvU32 replyType;
- NvU32 retryTimeMs;
-} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS;
-
-#define NV0073_CTRL_DP_AUXCH_CMD_TYPE 3:3
-#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C (0x00000000U)
-#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX (0x00000001U)
-#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT 2:2
-#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE 1:0
-#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE (0x00000000U)
-#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ (0x00000001U)
-#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS (0x00000002U)
-
-#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 cmd;
- NvU32 data;
- NvU32 err;
- NvU32 retryTimeMs;
- NvU32 eightLaneDpcdBaseAddr;
-} NV0073_CTRL_DP_CTRL_PARAMS;
-
-#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT 0:0
-#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_SET_LINK_BW 1:1
-#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD 2:2
-#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_UNUSED 3:3
-#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE 4:4
-#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM (0x00000001U)
-#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING 5:5
-#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING 6:6
-#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING 7:7
-#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING 8:8
-#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT (0x00000000U)
-#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING 9:9
-#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED 10:10
-#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING 12:11
-#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U)
-#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON (0x00000002U)
-#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER 13:13
-#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG 14:14
-#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_ENABLE_FEC 15:15
-#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE (0x00000001U)
-
-#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST 29:29
-#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO (0x00000000U)
-#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES (0x00000001U)
-#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE 30:30
-#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG 31:31
-#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE (0x00000000U)
-#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE (0x00000001U)
-
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT 4:0
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 (0x00000000U)
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 (0x00000001U)
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 (0x00000002U)
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 (0x00000004U)
-#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 (0x00000008U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW 15:8
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS (0x00000006U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS (0x00000008U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS (0x00000009U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS (0x0000000AU)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS (0x0000000CU)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS (0x00000010U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS (0x00000014U)
-#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS (0x0000001EU)
-#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING 18:18
-#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO (0x00000000U)
-#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES (0x00000001U)
-#define NV0073_CTRL_DP_DATA_TARGET 22:19
-#define NV0073_CTRL_DP_DATA_TARGET_SINK (0x00000000U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0 (0x00000001U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1 (0x00000002U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2 (0x00000003U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3 (0x00000004U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4 (0x00000005U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5 (0x00000006U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6 (0x00000007U)
-#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7 (0x00000008U)
-
-#define NV0073_CTRL_MAX_LANES 8U
-
-typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 numLanes;
- NvU32 data[NV0073_CTRL_MAX_LANES];
-} NV0073_CTRL_DP_LANE_DATA_PARAMS;
-
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS 1:0
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE (0x00000000U)
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 (0x00000001U)
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 (0x00000002U)
-#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 (0x00000003U)
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT 3:2
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U)
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U)
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U)
-#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U)
-
-#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 mute;
-} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 preferredDisplayId;
-
- NvBool force;
- NvBool useBFM;
-
- NvU32 displayIdAssigned;
- NvU32 allDisplayMask;
-} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
-} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_CONFIG_STREAM (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 head;
- NvU32 sorIndex;
- NvU32 dpLink;
-
- NvBool bEnableOverride;
- NvBool bMST;
- NvU32 singleHeadMultistreamMode;
- NvU32 hBlankSym;
- NvU32 vBlankSym;
- NvU32 colorFormat;
- NvBool bEnableTwoHeadOneOr;
-
- struct {
- NvU32 slotStart;
- NvU32 slotEnd;
- NvU32 PBN;
- NvU32 Timeslice;
- NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
- NvU32 singleHeadMSTPipeline;
- NvBool bEnableAudioOverRightPanel;
- } MST;
-
- struct {
- NvBool bEnhancedFraming;
- NvU32 tuSize;
- NvU32 waterMark;
- NvU32 actualPclkHz; // deprecated -Use MvidWarParams
- NvU32 linkClkFreqHz; // deprecated -Use MvidWarParams
- NvBool bEnableAudioOverRightPanel;
- struct {
- NvU32 activeCnt;
- NvU32 activeFrac;
- NvU32 activePolarity;
- NvBool mvidWarEnabled;
- struct {
- NvU32 actualPclkHz;
- NvU32 linkClkFreqHz;
- } MvidWarParams;
- } Legacy;
- } SST;
-} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS {
- NvU32 subDeviceInstance;
-} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
-
-typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 sorIndex;
- NvU32 maxLinkRate;
- NvU32 dpVersionsSupported;
- NvU32 UHBRSupported;
- NvBool bIsMultistreamSupported;
- NvBool bIsSCEnabled;
- NvBool bHasIncreasedWatermarkLimits;
- NvBool bIsPC2Disabled;
- NvBool isSingleHeadMSTSupported;
- NvBool bFECSupported;
- NvBool bIsTrainPhyRepeater;
- NvBool bOverrideLinkBw;
- NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
-} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U)
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U)
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
-
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U)
-#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U)
-
-#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U
-
-typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
- // In
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
-
- // Out
- NvU8 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
- NvU8 linkBwCount;
-} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
-
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE 3:0
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN (0x00000000U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK (0x00000002U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN (0x00000003U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE (0x00000004U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK (0x00000005U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR (0x00000006U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO (0x00000007U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO (0x00000008U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK (0x00000009U)
-
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK (0x00000000U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING (0x80000001U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR (0x80000002U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR (0x80000003U)
-#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR (0x80000004U)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
deleted file mode 100644
index 954958dcf834..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
+++ /dev/null
@@ -1,216 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073specific_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073specific_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U
-
-typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 bufferSize;
- NvU32 flags;
- NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES];
-} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_MAX_CONNECTORS 4U
-
-typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 flags;
- NvU32 DDCPartners;
- NvU32 count;
- struct {
- NvU32 index;
- NvU32 type;
- NvU32 location;
- } data[NV0073_CTRL_MAX_CONNECTORS];
- NvU32 platform;
-} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS {
- NvU8 subDeviceInstance;
- NvU32 displayId;
- NvU8 enable;
-} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM (0x730275U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS {
- NvU8 subDeviceInstance;
- NvU32 displayId;
- NvU8 mute;
-} NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 headMask;
-} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE 36U
-
-typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 transmitControl;
- NvU32 packetSize;
- NvU32 targetHead;
- NvBool bUsePsrHeadforSdp;
- NvU8 aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE];
-} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS;
-
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE 0:0
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME 1:1
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME 2:2
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK 3:3
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE 4:4
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT 5:5
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY 6:6
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING 7:7
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE 9:8
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0 (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1 (0x0000001U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE 31:31
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO (0x0000000U)
-#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES (0x0000001U)
-
-#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 index;
- NvU32 type;
- NvU32 protocol;
- NvU32 ditherType;
- NvU32 ditherAlgo;
- NvU32 location;
- NvU32 rootPortId;
- NvU32 dcbIndex;
- NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8);
- NvBool bIsLitByVbios;
- NvBool bIsDispDynamic;
-} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS;
-
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE (0x00000000U)
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC (0x00000001U)
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR (0x00000002U)
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR (0x00000003U)
-
-#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI (0x00000005U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT (0x00000000U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS (0x00000005U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A (0x00000008U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B (0x00000009U)
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI (0x00000010U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI (0x00000011U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC (0x00000000U)
-
-#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN (0xFFFFFFFFU)
-
-#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 brightness;
- NvBool bUncalibrated;
-} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayId;
- NvU32 caps;
-} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS;
-
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED 0:0
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED 1:1
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED 2:2
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED 5:3
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED 6:6
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED 9:7
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
-#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
deleted file mode 100644
index d69cef3c01fd..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 flags;
- NvU32 numHeads;
-} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS;
-
-#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 displayMask;
- NvU32 displayMaskDDC;
-} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
-
-#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 flags;
- NvU32 displayMask;
- NvU32 retryTimeMs;
-} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
-
-#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
-
-typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
- NvU32 subDeviceInstance;
- NvU32 head;
- NvU32 flags;
- NvU32 displayId;
-} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
-
-#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
deleted file mode 100644
index 3db099e62364..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gpu_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gpu_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
- NvU32 totalVFs;
- NvU32 firstVfOffset;
- NvU32 vfFeatureMask;
- NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
- NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
- NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
- NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
- NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
- NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
- NvBool b64bitBar0;
- NvBool b64bitBar1;
- NvBool b64bitBar2;
- NvBool bSriovEnabled;
- NvBool bSriovHeavyEnabled;
- NvBool bEmulateVFBar0TlbInvalidationRegister;
- NvBool bClientRmAllocatedCtxBuffer;
-} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
deleted file mode 100644
index ed01df925573..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gr_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gr_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
deleted file mode 100644
index b5b7631de99b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080bios_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080bios_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
- NvU32 BoardID;
- char chipSKU[4];
- char chipSKUMod[2];
- char project[5];
- char projectSKU[5];
- char CDP[6];
- char projectSKUMod[2];
- NvU32 businessCycle;
-} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
deleted file mode 100644
index fe912d2bd183..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080ce_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080ce_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS {
- NvU32 size;
-} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS;
-
-#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
deleted file mode 100644
index 87bc4ff92ce1..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS {
- NvU32 event;
- NvU32 action;
- NvBool bNotifyState;
- NvU32 info32;
- NvU16 info16;
-} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS;
-
-#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
deleted file mode 100644
index 68c81f9f803c..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fb_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fb_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U
-
-typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
-
-typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
- NV_DECLARE_ALIGNED(NvU64 base, 8);
- NV_DECLARE_ALIGNED(NvU64 limit, 8);
- NV_DECLARE_ALIGNED(NvU64 reserved, 8);
- NvU32 performance;
- NvBool supportCompressed;
- NvBool supportISO;
- NvBool bProtected;
- NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
-} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
-
-#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
-
-typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
- NvU32 numFBRegions;
- NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
-} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
deleted file mode 100644
index bc0f63699b06..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fifo_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fifo_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */
-
-#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES 32
-#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES 16
-#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA 2
-#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16
-
-typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY {
- NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES];
- NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
- NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
- NvU32 numPbdmas;
- char engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN];
-} NV2080_CTRL_FIFO_DEVICE_ENTRY;
-
-typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS {
- NvU32 baseIndex;
- NvU32 numEntries;
- NvBool bMore;
- // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
- NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
-} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
deleted file mode 100644
index 29d7a1052142..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
+++ /dev/null
@@ -1,100 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U)
-
-#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0 (0x00000000U)
-
-#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U)
-
-typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY {
- NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8);
- NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8);
- NV_DECLARE_ALIGNED(NvU64 size, 8);
- NvU32 physAttr;
- NvU16 bufferId;
- NvU8 bInitialize;
- NvU8 bNonmapped;
-} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY;
-
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN 0U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM 1U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH 2U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB 3U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL 4U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB 5U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL 6U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL 7U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK 8U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT 9U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP 10U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U
-#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP 12U
-
-#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES 16U
-
-#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS {
- NvU32 engineType;
- NvHandle hClient;
- NvU32 ChID;
- NvHandle hChanClient;
- NvHandle hObject;
- NvHandle hVirtMemory;
- NV_DECLARE_ALIGNED(NvU64 virtAddress, 8);
- NV_DECLARE_ALIGNED(NvU64 size, 8);
- NvU32 entryCount;
- // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES];
- NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8);
-} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS;
-
-typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
- NvU32 gpcMask;
-} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
-
-typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
- NvU32 gpcId;
- NvU32 tpcMask;
-} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
-
-typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS {
- NvU32 gpcId;
- NvU32 zcullMask;
-} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS;
-
-#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL)
-
-typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
- NvU32 index;
- NvU32 flags;
- NvU32 length;
- NvU8 data[NV2080_GPU_MAX_GID_LENGTH];
-} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
deleted file mode 100644
index 59f8895bc5d7..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gr_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gr_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS {
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7,
- NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8,
-} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
deleted file mode 100644
index e11b2dbe5288..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
+++ /dev/null
@@ -1,162 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
- NvU32 feHwSysCap;
- NvU32 windowPresentMask;
- NvBool bFbRemapperEnabled;
- NvU32 numHeads;
- NvBool bPrimaryVga;
- NvU32 i2cPort;
- NvU32 internalDispActiveMask;
-} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
-
-#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8
-
-#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19
-
-typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
- NvU32 size;
- NvU32 alignment;
-} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
-
-typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
- NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
-} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
-
-typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
- NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
-} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO {
- NvU32 engDesc;
- NvU32 ctxAttr;
- NvU32 ctxBufferSize;
- NvU32 addrSpaceList;
- NvU32 registerBase;
-} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO;
-#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS 0x40
-
-#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
- NvU32 numConstructedFalcons;
- NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS];
-} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS {
- NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8);
- NV_DECLARE_ALIGNED(NvU64 instMemSize, 8);
- NvU32 instMemAddrSpace;
- NvU32 instMemCpuCacheAttr;
-} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
- NvU32 addressSpace;
- NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
- NV_DECLARE_ALIGNED(NvU64 limit, 8);
- NvU32 cacheSnoop;
- NvU32 hclass;
- NvU32 channelInstance;
- NvBool valid;
-} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */
-
-#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128
-
-typedef enum NV2080_INTR_CATEGORY {
- NV2080_INTR_CATEGORY_DEFAULT = 0,
- NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1,
- NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2,
- NV2080_INTR_CATEGORY_RUNLIST = 3,
- NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4,
- NV2080_INTR_CATEGORY_UVM_OWNED = 5,
- NV2080_INTR_CATEGORY_UVM_SHARED = 6,
- NV2080_INTR_CATEGORY_ENUM_COUNT = 7,
-} NV2080_INTR_CATEGORY;
-
-typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP {
- NvU8 subtreeStart;
- NvU8 subtreeEnd;
-} NV2080_INTR_CATEGORY_SUBTREE_MAP;
-
-typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY {
- NvU16 engineIdx;
- NvU32 pmcIntrMask;
- NvU32 vectorStall;
- NvU32 vectorNonStall;
-} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY;
-
-typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS {
- NvU32 tableLen;
- NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE];
- NV2080_INTR_CATEGORY_SUBTREE_MAP subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT];
-} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
- NvU32 fbsrType;
- NvU32 numRegions;
- NvHandle hClient;
- NvHandle hSysMem;
- NV_DECLARE_ALIGNED(NvU64 gspFbAllocsSysOffset, 8);
- NvBool bEnteringGcoffState;
-} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS_MESSAGE_ID" */
-
-typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS {
- NvU32 fbsrType;
- NvHandle hClient;
- NvHandle hVidMem;
- NV_DECLARE_ALIGNED(NvU64 vidOffset, 8);
- NV_DECLARE_ALIGNED(NvU64 sysOffset, 8);
- NV_DECLARE_ALIGNED(NvU64 size, 8);
-} NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS;
-
-#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */
-
-#define NV2080_CTRL_ACPI_DSM_READ_SIZE (0x1000) /* finn: Evaluated from "(4 * 1024)" */
-
-typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS {
- NvU32 status;
- NvU16 backLightDataSize;
- NvU8 backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE];
-} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
deleted file mode 100644
index 977e59818533..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
+++ /dev/null
@@ -1,95 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define GMMU_FMT_MAX_LEVELS 6U
-
-#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */
-
-typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS {
- /*!
- * [in] GPU sub-device handle - this API only supports unicast.
- * Pass 0 to use subDeviceId instead.
- */
- NvHandle hSubDevice;
-
- /*!
- * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero.
- */
- NvU32 subDeviceId;
-
- /*!
- * [in] Page size (VA coverage) of the level to reserve.
- * This need not be a leaf (page table) page size - it can be
- * the coverage of an arbitrary level (including root page directory).
- */
- NV_DECLARE_ALIGNED(NvU64 pageSize, 8);
-
- /*!
- * [in] First GPU virtual address of the range to reserve.
- * This must be aligned to pageSize.
- */
- NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8);
-
- /*!
- * [in] Last GPU virtual address of the range to reserve.
- * This (+1) must be aligned to pageSize.
- */
- NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8);
-
- /*!
- * [in] Number of PDE levels to copy.
- */
- NvU32 numLevelsToCopy;
-
- /*!
- * [in] Per-level information.
- */
- struct {
- /*!
- * Physical address of this page level instance.
- */
- NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
-
- /*!
- * Size in bytes allocated for this level instance.
- */
- NV_DECLARE_ALIGNED(NvU64 size, 8);
-
- /*!
- * Aperture in which this page level instance resides.
- */
- NvU32 aperture;
-
- /*!
- * Page shift corresponding to the level
- */
- NvU8 pageShift;
- } levels[GMMU_FMT_MAX_LEVELS];
-} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h
deleted file mode 100644
index 684045796232..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrla06f_ctrla06fgpfifo_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrla06f_ctrla06fgpfifo_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */
-
-typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS {
- NvBool bEnable;
- NvBool bSkipSubmit;
-} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS;
-
-#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */
-
-typedef struct NVA06F_CTRL_BIND_PARAMS {
- NvU32 engineType;
-} NVA06F_CTRL_BIND_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h
deleted file mode 100644
index 5c5a004a8031..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_nvlimits_h__
-#define __src_common_sdk_nvidia_inc_nvlimits_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV_MAX_SUBDEVICES 8
-
-#define NV_PROC_NAME_MAX_LENGTH 100U
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h
deleted file mode 100644
index 51b5591c603e..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h
+++ /dev/null
@@ -1,148 +0,0 @@
-#ifndef __src_common_sdk_nvidia_inc_nvos_h__
-#define __src_common_sdk_nvidia_inc_nvos_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NVOS02_FLAGS_PHYSICALITY 7:4
-#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS (0x00000000)
-#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS (0x00000001)
-#define NVOS02_FLAGS_LOCATION 11:8
-#define NVOS02_FLAGS_LOCATION_PCI (0x00000000)
-#define NVOS02_FLAGS_LOCATION_AGP (0x00000001)
-#define NVOS02_FLAGS_LOCATION_VIDMEM (0x00000002)
-#define NVOS02_FLAGS_COHERENCY 15:12
-#define NVOS02_FLAGS_COHERENCY_UNCACHED (0x00000000)
-#define NVOS02_FLAGS_COHERENCY_CACHED (0x00000001)
-#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE (0x00000002)
-#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH (0x00000003)
-#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT (0x00000004)
-#define NVOS02_FLAGS_COHERENCY_WRITE_BACK (0x00000005)
-#define NVOS02_FLAGS_ALLOC 17:16
-#define NVOS02_FLAGS_ALLOC_NONE (0x00000001)
-#define NVOS02_FLAGS_GPU_CACHEABLE 18:18
-#define NVOS02_FLAGS_GPU_CACHEABLE_NO (0x00000000)
-#define NVOS02_FLAGS_GPU_CACHEABLE_YES (0x00000001)
-
-#define NVOS02_FLAGS_KERNEL_MAPPING 19:19
-#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP (0x00000000)
-#define NVOS02_FLAGS_KERNEL_MAPPING_MAP (0x00000001)
-#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY 20:20
-#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO (0x00000000)
-#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES (0x00000001)
-
-#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY 21:21
-#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO (0x00000000)
-#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES (0x00000001)
-
-#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY 22:22
-#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO (0x00000000)
-#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES (0x00000001)
-
-#define NVOS02_FLAGS_PEER_MAP_OVERRIDE 23:23
-#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT (0x00000000)
-#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED (0x00000001)
-
-#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT 24:24
-#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE (0x00000001)
-
-#define NVOS02_FLAGS_MEMORY_PROTECTION 26:25
-#define NVOS02_FLAGS_MEMORY_PROTECTION_DEFAULT (0x00000000)
-#define NVOS02_FLAGS_MEMORY_PROTECTION_PROTECTED (0x00000001)
-#define NVOS02_FLAGS_MEMORY_PROTECTION_UNPROTECTED (0x00000002)
-
-#define NVOS02_FLAGS_MAPPING 31:30
-#define NVOS02_FLAGS_MAPPING_DEFAULT (0x00000000)
-#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001)
-#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002)
-
-#define NV01_EVENT_CLIENT_RM (0x04000000)
-
-typedef struct
-{
- NvV32 channelInstance; // One of the n channel instances of a given channel type.
- // Note that core channel has only one instance
- // while all others have two (one per head).
- NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer
- NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
- NvU32 offset; // Initial offset for put/get, usually zero.
- NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
-
- NvU32 flags;
-#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1
-#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000
-#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001
-
-} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvV32 channelInstance; // One of the n channel instances of a given channel type.
- // All PIO channels have two instances (one per head).
- NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors.
- NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel
-} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 size;
- NvU32 prohibitMultipleInstances;
- NvU32 engineInstance; // Select NVDEC0 or NVDEC1 or NVDEC2
-} NV_BSP_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 size;
- NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of MSENC?
- NvU32 engineInstance; // Select MSENC/NVENC0 or NVENC1 or NVENC2
-} NV_MSENC_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 size;
- NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of NVJPG?
- NvU32 engineInstance;
-} NV_NVJPG_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 size;
- NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA?
-} NV_OFA_ALLOCATION_PARAMETERS;
-
-typedef struct
-{
- NvU32 index;
- NvV32 flags;
- NvU64 vaSize NV_ALIGN_BYTES(8);
- NvU64 vaStartInternal NV_ALIGN_BYTES(8);
- NvU64 vaLimitInternal NV_ALIGN_BYTES(8);
- NvU32 bigPageSize;
- NvU64 vaBase NV_ALIGN_BYTES(8);
-} NV_VASPACE_ALLOCATION_PARAMETERS;
-
-#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 //<! Create new VASpace, by default
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
deleted file mode 100644
index 0e32e71e123f..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
+++ /dev/null
@@ -1,97 +0,0 @@
-#ifndef __src_common_shared_msgq_inc_msgq_msgq_priv_h__
-#define __src_common_shared_msgq_inc_msgq_msgq_priv_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-/**
- * msgqTxHeader -- TX queue data structure
- * @version: the version of this structure, must be 0
- * @size: the size of the entire queue, including this header
- * @msgSize: the padded size of queue element, 16 is minimum
- * @msgCount: the number of elements in this queue
- * @writePtr: head index of this queue
- * @flags: 1 = swap the RX pointers
- * @rxHdrOff: offset of readPtr in this structure
- * @entryOff: offset of beginning of queue (msgqRxHeader), relative to
- * beginning of this structure
- *
- * The command queue is a queue of RPCs that are sent from the driver to the
- * GSP. The status queue is a queue of messages/responses from GSP-RM to the
- * driver. Although the driver allocates memory for both queues, the command
- * queue is owned by the driver and the status queue is owned by GSP-RM. In
- * addition, the headers of the two queues must not share the same 4K page.
- *
- * Each queue is prefixed with this data structure. The idea is that a queue
- * and its header are written to only by their owner. That is, only the
- * driver writes to the command queue and command queue header, and only the
- * GSP writes to the status (receive) queue and its header.
- *
- * This is enforced by the concept of "swapping" the RX pointers. This is
- * why the 'flags' field must be set to 1. 'rxHdrOff' is how the GSP knows
- * where the where the tail pointer of its status queue.
- *
- * When the driver writes a new RPC to the command queue, it updates writePtr.
- * When it reads a new message from the status queue, it updates readPtr. In
- * this way, the GSP knows when a new command is in the queue (it polls
- * writePtr) and it knows how much free space is in the status queue (it
- * checks readPtr). The driver never cares about how much free space is in
- * the status queue.
- *
- * As usual, producers write to the head pointer, and consumers read from the
- * tail pointer. When head == tail, the queue is empty.
- *
- * So to summarize:
- * command.writePtr = head of command queue
- * command.readPtr = tail of status queue
- * status.writePtr = head of status queue
- * status.readPtr = tail of command queue
- */
-typedef struct
-{
- NvU32 version; // queue version
- NvU32 size; // bytes, page aligned
- NvU32 msgSize; // entry size, bytes, must be power-of-2, 16 is minimum
- NvU32 msgCount; // number of entries in queue
- NvU32 writePtr; // message id of next slot
- NvU32 flags; // if set it means "i want to swap RX"
- NvU32 rxHdrOff; // Offset of msgqRxHeader from start of backing store.
- NvU32 entryOff; // Offset of entries from start of backing store.
-} msgqTxHeader;
-
-/**
- * msgqRxHeader - RX queue data structure
- * @readPtr: tail index of the other queue
- *
- * Although this is a separate struct, it could easily be merged into
- * msgqTxHeader. msgqTxHeader.rxHdrOff is simply the offset of readPtr
- * from the beginning of msgqTxHeader.
- */
-typedef struct
-{
- NvU32 readPtr; // message id of last message read
-} msgqRxHeader;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h
deleted file mode 100644
index 83cf1b2c15a3..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef __src_common_uproc_os_common_include_libos_init_args_h__
-#define __src_common_uproc_os_common_include_libos_init_args_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef NvU64 LibosAddress;
-
-typedef enum {
- LIBOS_MEMORY_REGION_NONE,
- LIBOS_MEMORY_REGION_CONTIGUOUS,
- LIBOS_MEMORY_REGION_RADIX3
-} LibosMemoryRegionKind;
-
-typedef enum {
- LIBOS_MEMORY_REGION_LOC_NONE,
- LIBOS_MEMORY_REGION_LOC_SYSMEM,
- LIBOS_MEMORY_REGION_LOC_FB
-} LibosMemoryRegionLoc;
-
-typedef struct
-{
- LibosAddress id8; // Id tag.
- LibosAddress pa; // Physical address.
- LibosAddress size; // Size of memory area.
- NvU8 kind; // See LibosMemoryRegionKind above.
- NvU8 loc; // See LibosMemoryRegionLoc above.
-} LibosMemoryRegionInitArgument;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h
deleted file mode 100644
index 73213bdfcbda..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#ifndef __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_sr_meta_h__
-#define __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_sr_meta_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define GSP_FW_SR_META_MAGIC 0x8a3bb9e6c6c39d93ULL
-#define GSP_FW_SR_META_REVISION 2
-
-typedef struct
-{
- //
- // Magic
- // Use for verification by Booter
- //
- NvU64 magic; // = GSP_FW_SR_META_MAGIC;
-
- //
- // Revision number
- // Bumped up when we change this interface so it is not backward compatible.
- // Bumped up when we revoke GSP-RM ucode
- //
- NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
-
- //
- // ---- Members regarding data in SYSMEM ----------------------------
- // Consumed by Booter for DMA
- //
- NvU64 sysmemAddrOfSuspendResumeData;
- NvU64 sizeOfSuspendResumeData;
-
- // ---- Members for crypto ops across S/R ---------------------------
-
- //
- // HMAC over the entire GspFwSRMeta structure (including padding)
- // with the hmac field itself zeroed.
- //
- NvU8 hmac[32];
-
- // Hash over GspFwWprMeta structure
- NvU8 wprMetaHash[32];
-
- // Hash over GspFwHeapFreeList structure. All zeros signifies no free list.
- NvU8 heapFreeListHash[32];
-
- // Hash over data in WPR2 (skipping over free heap chunks; see Booter for details)
- NvU8 dataHash[32];
-
- //
- // Pad structure to exactly 256 bytes (1 DMA chunk).
- // Padding initialized to zero.
- //
- NvU32 padding[24];
-
-} GspFwSRMeta;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h
deleted file mode 100644
index a2e141e4b459..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h
+++ /dev/null
@@ -1,170 +0,0 @@
-#ifndef __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_wpr_meta_h__
-#define __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_wpr_meta_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct
-{
- // Magic
- // BL to use for verification (i.e. Booter locked it in WPR2)
- NvU64 magic; // = 0xdc3aae21371a60b3;
-
- // Revision number of Booter-BL-Sequencer handoff interface
- // Bumped up when we change this interface so it is not backward compatible.
- // Bumped up when we revoke GSP-RM ucode
- NvU64 revision; // = 1;
-
- // ---- Members regarding data in SYSMEM ----------------------------
- // Consumed by Booter for DMA
-
- NvU64 sysmemAddrOfRadix3Elf;
- NvU64 sizeOfRadix3Elf;
-
- NvU64 sysmemAddrOfBootloader;
- NvU64 sizeOfBootloader;
-
- // Offsets inside bootloader image needed by Booter
- NvU64 bootloaderCodeOffset;
- NvU64 bootloaderDataOffset;
- NvU64 bootloaderManifestOffset;
-
- union
- {
- // Used only at initial boot
- struct
- {
- NvU64 sysmemAddrOfSignature;
- NvU64 sizeOfSignature;
- };
-
- //
- // Used at suspend/resume to read GspFwHeapFreeList
- // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
- //
- struct
- {
- NvU32 gspFwHeapFreeListWprOffset;
- NvU32 unused0;
- NvU64 unused1;
- };
- };
-
- // ---- Members describing FB layout --------------------------------
- NvU64 gspFwRsvdStart;
-
- NvU64 nonWprHeapOffset;
- NvU64 nonWprHeapSize;
-
- NvU64 gspFwWprStart;
-
- // GSP-RM to use to setup heap.
- NvU64 gspFwHeapOffset;
- NvU64 gspFwHeapSize;
-
- // BL to use to find ELF for jump
- NvU64 gspFwOffset;
- // Size is sizeOfRadix3Elf above.
-
- NvU64 bootBinOffset;
- // Size is sizeOfBootloader above.
-
- NvU64 frtsOffset;
- NvU64 frtsSize;
-
- NvU64 gspFwWprEnd;
-
- // GSP-RM to use for fbRegionInfo?
- NvU64 fbSize;
-
- // ---- Other members -----------------------------------------------
-
- // GSP-RM to use for fbRegionInfo?
- NvU64 vgaWorkspaceOffset;
- NvU64 vgaWorkspaceSize;
-
- // Boot count. Used to determine whether to load the firmware image.
- NvU64 bootCount;
-
- // TODO: the partitionRpc* fields below do not really belong in this
- // structure. The values are patched in by the partition bootstrapper
- // when GSP-RM is booted in a partition, and this structure was a
- // convenient place for the bootstrapper to access them. These should
- // be moved to a different comm. mechanism between the bootstrapper
- // and the GSP-RM tasks.
-
- union
- {
- struct
- {
- // Shared partition RPC memory (physical address)
- NvU64 partitionRpcAddr;
-
- // Offsets relative to partitionRpcAddr
- NvU16 partitionRpcRequestOffset;
- NvU16 partitionRpcReplyOffset;
-
- // Code section and dataSection offset and size.
- NvU32 elfCodeOffset;
- NvU32 elfDataOffset;
- NvU32 elfCodeSize;
- NvU32 elfDataSize;
-
- // Used during GSP-RM resume to check for revocation
- NvU32 lsUcodeVersion;
- };
-
- struct
- {
- // Pad for the partitionRpc* fields, plus 4 bytes
- NvU32 partitionRpcPadding[4];
-
- // CrashCat (contiguous) buffer size/location - occupies same bytes as the
- // elf(Code|Data)(Offset|Size) fields above.
- // TODO: move to GSP_FMC_INIT_PARAMS
- NvU64 sysmemAddrOfCrashReportQueue;
- NvU32 sizeOfCrashReportQueue;
-
- // Pad for the lsUcodeVersion field
- NvU32 lsUcodeVersionPadding[1];
- };
- };
-
- // Number of VF partitions allocating sub-heaps from the WPR heap
- // Used during boot to ensure the heap is adequately sized
- NvU8 gspFwHeapVfPartitionCount;
-
- // Pad structure to exactly 256 bytes. Can replace padding with additional
- // fields without incrementing revision. Padding initialized to 0.
- NvU8 padding[7];
-
- // BL to use for verification (i.e. Booter says OK to boot)
- NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
-} GspFwWprMeta;
-
-#define GSP_FW_WPR_META_REVISION 1
-#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h
deleted file mode 100644
index 4eff473e8990..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h
+++ /dev/null
@@ -1,82 +0,0 @@
-#ifndef __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__
-#define __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct {
- //
- // Version 1
- // Version 2
- // Version 3 = for Partition boot
- // Version 4 = for eb riscv boot
- // Version 5 = Support signing entire RISC-V image as "code" in code section for hopper and later.
- //
- NvU32 version; // structure version
- NvU32 bootloaderOffset;
- NvU32 bootloaderSize;
- NvU32 bootloaderParamOffset;
- NvU32 bootloaderParamSize;
- NvU32 riscvElfOffset;
- NvU32 riscvElfSize;
- NvU32 appVersion; // Changelist number associated with the image
- //
- // Manifest contains information about Monitor and it is
- // input to BR
- //
- NvU32 manifestOffset;
- NvU32 manifestSize;
- //
- // Monitor Data offset within RISCV image and size
- //
- NvU32 monitorDataOffset;
- NvU32 monitorDataSize;
- //
- // Monitor Code offset withtin RISCV image and size
- //
- NvU32 monitorCodeOffset;
- NvU32 monitorCodeSize;
- NvU32 bIsMonitorEnabled;
- //
- // Swbrom Code offset within RISCV image and size
- //
- NvU32 swbromCodeOffset;
- NvU32 swbromCodeSize;
- //
- // Swbrom Data offset within RISCV image and size
- //
- NvU32 swbromDataOffset;
- NvU32 swbromDataSize;
- //
- // Total size of FB carveout (image and reserved space).
- //
- NvU32 fbReservedSize;
- //
- // Indicates whether the entire RISC-V image is signed as "code" in code section.
- //
- NvU32 bSignedAsCode;
-} RM_RISCV_UCODE_DESC;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h
deleted file mode 100644
index 341ab0dbeaf2..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h
+++ /dev/null
@@ -1,100 +0,0 @@
-#ifndef __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__
-#define __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum GSP_SEQ_BUF_OPCODE
-{
- GSP_SEQ_BUF_OPCODE_REG_WRITE = 0,
- GSP_SEQ_BUF_OPCODE_REG_MODIFY,
- GSP_SEQ_BUF_OPCODE_REG_POLL,
- GSP_SEQ_BUF_OPCODE_DELAY_US,
- GSP_SEQ_BUF_OPCODE_REG_STORE,
- GSP_SEQ_BUF_OPCODE_CORE_RESET,
- GSP_SEQ_BUF_OPCODE_CORE_START,
- GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
- GSP_SEQ_BUF_OPCODE_CORE_RESUME,
-} GSP_SEQ_BUF_OPCODE;
-
-#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode) \
- ((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE) / sizeof(NvU32)) : \
- (opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \
- (opcode == GSP_SEQ_BUF_OPCODE_REG_POLL) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL) / sizeof(NvU32)) : \
- (opcode == GSP_SEQ_BUF_OPCODE_DELAY_US) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US) / sizeof(NvU32)) : \
- (opcode == GSP_SEQ_BUF_OPCODE_REG_STORE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE) / sizeof(NvU32)) : \
- /* GSP_SEQ_BUF_OPCODE_CORE_RESET */ \
- /* GSP_SEQ_BUF_OPCODE_CORE_START */ \
- /* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */ \
- /* GSP_SEQ_BUF_OPCODE_CORE_RESUME */ \
- 0)
-
-typedef struct
-{
- NvU32 addr;
- NvU32 val;
-} GSP_SEQ_BUF_PAYLOAD_REG_WRITE;
-
-typedef struct
-{
- NvU32 addr;
- NvU32 mask;
- NvU32 val;
-} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY;
-
-typedef struct
-{
- NvU32 addr;
- NvU32 mask;
- NvU32 val;
- NvU32 timeout;
- NvU32 error;
-} GSP_SEQ_BUF_PAYLOAD_REG_POLL;
-
-typedef struct
-{
- NvU32 val;
-} GSP_SEQ_BUF_PAYLOAD_DELAY_US;
-
-typedef struct
-{
- NvU32 addr;
- NvU32 index;
-} GSP_SEQ_BUF_PAYLOAD_REG_STORE;
-
-typedef struct GSP_SEQUENCER_BUFFER_CMD
-{
- GSP_SEQ_BUF_OPCODE opCode;
- union
- {
- GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite;
- GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify;
- GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll;
- GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs;
- GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore;
- } payload;
-} GSP_SEQUENCER_BUFFER_CMD;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h
deleted file mode 100644
index 3144e9beac61..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_nvidia_generated_g_allclasses_h__
-#define __src_nvidia_generated_g_allclasses_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e)
-
-#define NV04_DISPLAY_COMMON (0x00000073)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h
deleted file mode 100644
index 6b8921138c7d..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __src_nvidia_generated_g_chipset_nvoc_h__
-#define __src_nvidia_generated_g_chipset_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct
-{
- NvU16 deviceID; // deviceID
- NvU16 vendorID; // vendorID
- NvU16 subdeviceID; // subsystem deviceID
- NvU16 subvendorID; // subsystem vendorID
- NvU8 revisionID; // revision ID
-} BUSINFO;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h
deleted file mode 100644
index a5128f00225b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __src_nvidia_generated_g_fbsr_nvoc_h__
-#define __src_nvidia_generated_g_fbsr_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define FBSR_TYPE_DMA 4 // Copy using DMA. Fastest.
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h
deleted file mode 100644
index 5641a21cacca..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef __src_nvidia_generated_g_gpu_nvoc_h__
-#define __src_nvidia_generated_g_gpu_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum
-{
- COMPUTE_BRANDING_TYPE_NONE,
- COMPUTE_BRANDING_TYPE_TESLA,
-} COMPUTE_BRANDING_TYPE;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h
deleted file mode 100644
index b5ad55f854dc..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef __src_nvidia_generated_g_kernel_channel_nvoc_h__
-#define __src_nvidia_generated_g_kernel_channel_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum {
- /*!
- * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
- * kernel CPU-RM clients.
- */
- ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
- /*! @brief Error notifier is explicitly not set.
- *
- * The corresponding hErrorContext or hEccErrorContext must be
- * NV01_NULL_OBJECT.
- */
- ERROR_NOTIFIER_TYPE_NONE,
- /*! @brief Error notifier is a ContextDma */
- ERROR_NOTIFIER_TYPE_CTXDMA,
- /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
- ERROR_NOTIFIER_TYPE_MEMORY
-} ErrorNotifierType;
-
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
-#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h
deleted file mode 100644
index 946954ac5b3d..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h
+++ /dev/null
@@ -1,119 +0,0 @@
-#ifndef __src_nvidia_generated_g_kernel_fifo_nvoc_h__
-#define __src_nvidia_generated_g_kernel_fifo_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum
-{
- /* *************************************************************************
- * Bug 3820969
- * THINK BEFORE CHANGING ENUM ORDER HERE.
- * VGPU-guest uses this same ordering. Because this enum is not versioned,
- * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
- * ************************************************************************/
-
- // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc.,
- ENGINE_INFO_TYPE_ENG_DESC = 0,
-
- // HW engine ID
- ENGINE_INFO_TYPE_FIFO_TAG,
-
- // RM_ENGINE_TYPE_*
- ENGINE_INFO_TYPE_RM_ENGINE_TYPE,
-
- //
- // runlist id (meaning varies by GPU)
- // Valid only for Esched-driven engines
- //
- ENGINE_INFO_TYPE_RUNLIST,
-
- // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_*
- ENGINE_INFO_TYPE_MMU_FAULT_ID,
-
- // ROBUST_CHANNEL_*
- ENGINE_INFO_TYPE_RC_MASK,
-
- // Reset Bit Position. On Ampere, only valid if not _INVALID
- ENGINE_INFO_TYPE_RESET,
-
- // Interrupt Bit Position
- ENGINE_INFO_TYPE_INTR,
-
- // log2(MC_ENGINE_*)
- ENGINE_INFO_TYPE_MC,
-
- // The DEV_TYPE_ENUM for this engine
- ENGINE_INFO_TYPE_DEV_TYPE_ENUM,
-
- // The particular instance of this engine type
- ENGINE_INFO_TYPE_INSTANCE_ID,
-
- //
- // The base address for this engine's NV_RUNLIST. Valid only on Ampere+
- // Valid only for Esched-driven engines
- //
- ENGINE_INFO_TYPE_RUNLIST_PRI_BASE,
-
- //
- // If this entry is a host-driven engine.
- // Update _isEngineInfoTypeValidForOnlyHostDriven when adding any new entry.
- //
- ENGINE_INFO_TYPE_IS_HOST_DRIVEN_ENGINE,
-
- //
- // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+
- // Valid only for Esched-driven engines
- //
- ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID,
-
- //
- // The base address for this engine's NV_CHRAM registers. Valid only on
- // Ampere+
- //
- // Valid only for Esched-driven engines
- //
- ENGINE_INFO_TYPE_CHRAM_PRI_BASE,
-
- // This entry added to copy data at RMCTRL_EXPORT() call for Kernel RM
- ENGINE_INFO_TYPE_KERNEL_RM_MAX,
- // Used for iterating the engine info table by the index passed.
- ENGINE_INFO_TYPE_INVALID = ENGINE_INFO_TYPE_KERNEL_RM_MAX,
-
- // Size of FIFO_ENGINE_LIST.engineData
- ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE = ENGINE_INFO_TYPE_INVALID,
-
- // Input-only parameter for kfifoEngineInfoXlate.
- ENGINE_INFO_TYPE_PBDMA_ID
-
- /* *************************************************************************
- * Bug 3820969
- * THINK BEFORE CHANGING ENUM ORDER HERE.
- * VGPU-guest uses this same ordering. Because this enum is not versioned,
- * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
- * ************************************************************************/
-} ENGINE_INFO_TYPE;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h
deleted file mode 100644
index daabaee41c87..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __src_nvidia_generated_g_mem_desc_nvoc_h__
-#define __src_nvidia_generated_g_mem_desc_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define ADDR_SYSMEM 1 // System memory (PCI)
-#define ADDR_FBMEM 2 // Frame buffer memory space
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
deleted file mode 100644
index 10121218f4d3..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef __src_nvidia_generated_g_os_nvoc_h__
-#define __src_nvidia_generated_g_os_nvoc_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct PACKED_REGISTRY_ENTRY
-{
- NvU32 nameOffset;
- NvU8 type;
- NvU32 data;
- NvU32 length;
-} PACKED_REGISTRY_ENTRY;
-
-typedef struct PACKED_REGISTRY_TABLE
-{
- NvU32 size;
- NvU32 numEntries;
- PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries);
-} PACKED_REGISTRY_TABLE;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h
deleted file mode 100644
index 8d925e24faea..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h
+++ /dev/null
@@ -1,124 +0,0 @@
-#ifndef __src_nvidia_generated_g_rpc_structures_h__
-#define __src_nvidia_generated_g_rpc_structures_h__
-#include <nvrm/535.113.01/nvidia/generated/g_sdk-structures.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct rpc_alloc_memory_v13_01
-{
- NvHandle hClient;
- NvHandle hDevice;
- NvHandle hMemory;
- NvU32 hClass;
- NvU32 flags;
- NvU32 pteAdjust;
- NvU32 format;
- NvU64 length NV_ALIGN_BYTES(8);
- NvU32 pageCount;
- struct pte_desc pteDesc;
-} rpc_alloc_memory_v13_01;
-
-typedef struct rpc_free_v03_00
-{
- NVOS00_PARAMETERS_v03_00 params;
-} rpc_free_v03_00;
-
-typedef struct rpc_unloading_guest_driver_v1F_07
-{
- NvBool bInPMTransition;
- NvBool bGc6Entering;
- NvU32 newLevel;
-} rpc_unloading_guest_driver_v1F_07;
-
-typedef struct rpc_update_bar_pde_v15_00
-{
- UpdateBarPde_v15_00 info;
-} rpc_update_bar_pde_v15_00;
-
-typedef struct rpc_gsp_rm_alloc_v03_00
-{
- NvHandle hClient;
- NvHandle hParent;
- NvHandle hObject;
- NvU32 hClass;
- NvU32 status;
- NvU32 paramsSize;
- NvU32 flags;
- NvU8 reserved[4];
- NvU8 params[];
-} rpc_gsp_rm_alloc_v03_00;
-
-typedef struct rpc_gsp_rm_control_v03_00
-{
- NvHandle hClient;
- NvHandle hObject;
- NvU32 cmd;
- NvU32 status;
- NvU32 paramsSize;
- NvU32 flags;
- NvU8 params[];
-} rpc_gsp_rm_control_v03_00;
-
-typedef struct rpc_run_cpu_sequencer_v17_00
-{
- NvU32 bufferSizeDWord;
- NvU32 cmdIndex;
- NvU32 regSaveArea[8];
- NvU32 commandBuffer[];
-} rpc_run_cpu_sequencer_v17_00;
-
-typedef struct rpc_post_event_v17_00
-{
- NvHandle hClient;
- NvHandle hEvent;
- NvU32 notifyIndex;
- NvU32 data;
- NvU16 info16;
- NvU32 status;
- NvU32 eventDataSize;
- NvBool bNotifyList;
- NvU8 eventData[];
-} rpc_post_event_v17_00;
-
-typedef struct rpc_rc_triggered_v17_02
-{
- NvU32 nv2080EngineType;
- NvU32 chid;
- NvU32 exceptType;
- NvU32 scope;
- NvU16 partitionAttributionId;
-} rpc_rc_triggered_v17_02;
-
-typedef struct rpc_os_error_log_v17_00
-{
- NvU32 exceptType;
- NvU32 runlistId;
- NvU32 chid;
- char errString[0x100];
-} rpc_os_error_log_v17_00;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h
deleted file mode 100644
index e9fed4140468..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef __src_nvidia_generated_g_sdk_structures_h__
-#define __src_nvidia_generated_g_sdk_structures_h__
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct NVOS00_PARAMETERS_v03_00
-{
- NvHandle hRoot;
- NvHandle hObjectParent;
- NvHandle hObjectOld;
- NvV32 status;
-} NVOS00_PARAMETERS_v03_00;
-
-typedef struct UpdateBarPde_v15_00
-{
- NV_RPC_UPDATE_PDE_BAR_TYPE barType;
- NvU64 entryValue NV_ALIGN_BYTES(8);
- NvU64 entryLevelShift NV_ALIGN_BYTES(8);
-} UpdateBarPde_v15_00;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h
deleted file mode 100644
index af50b11ec3b4..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h
+++ /dev/null
@@ -1,74 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__
-#define __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct DOD_METHOD_DATA
-{
- NV_STATUS status;
- NvU32 acpiIdListLen;
- NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
-} DOD_METHOD_DATA;
-
-typedef struct JT_METHOD_DATA
-{
- NV_STATUS status;
- NvU32 jtCaps;
- NvU16 jtRevId;
- NvBool bSBIOSCaps;
-} JT_METHOD_DATA;
-
-typedef struct MUX_METHOD_DATA_ELEMENT
-{
- NvU32 acpiId;
- NvU32 mode;
- NV_STATUS status;
-} MUX_METHOD_DATA_ELEMENT;
-
-typedef struct MUX_METHOD_DATA
-{
- NvU32 tableLen;
- MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
- MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
-} MUX_METHOD_DATA;
-
-typedef struct CAPS_METHOD_DATA
-{
- NV_STATUS status;
- NvU32 optimusCaps;
-} CAPS_METHOD_DATA;
-
-typedef struct ACPI_METHOD_DATA
-{
- NvBool bValid;
- DOD_METHOD_DATA dodMethodData;
- JT_METHOD_DATA jtMethodData;
- MUX_METHOD_DATA muxMethodData;
- CAPS_METHOD_DATA capsMethodData;
-} ACPI_METHOD_DATA;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h
deleted file mode 100644
index e3160c60036d..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h
+++ /dev/null
@@ -1,86 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gpu_engine_type_h__
-#define __src_nvidia_inc_kernel_gpu_gpu_engine_type_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef enum
-{
- RM_ENGINE_TYPE_NULL = (0x00000000),
- RM_ENGINE_TYPE_GR0 = (0x00000001),
- RM_ENGINE_TYPE_GR1 = (0x00000002),
- RM_ENGINE_TYPE_GR2 = (0x00000003),
- RM_ENGINE_TYPE_GR3 = (0x00000004),
- RM_ENGINE_TYPE_GR4 = (0x00000005),
- RM_ENGINE_TYPE_GR5 = (0x00000006),
- RM_ENGINE_TYPE_GR6 = (0x00000007),
- RM_ENGINE_TYPE_GR7 = (0x00000008),
- RM_ENGINE_TYPE_COPY0 = (0x00000009),
- RM_ENGINE_TYPE_COPY1 = (0x0000000a),
- RM_ENGINE_TYPE_COPY2 = (0x0000000b),
- RM_ENGINE_TYPE_COPY3 = (0x0000000c),
- RM_ENGINE_TYPE_COPY4 = (0x0000000d),
- RM_ENGINE_TYPE_COPY5 = (0x0000000e),
- RM_ENGINE_TYPE_COPY6 = (0x0000000f),
- RM_ENGINE_TYPE_COPY7 = (0x00000010),
- RM_ENGINE_TYPE_COPY8 = (0x00000011),
- RM_ENGINE_TYPE_COPY9 = (0x00000012),
- RM_ENGINE_TYPE_NVDEC0 = (0x0000001d),
- RM_ENGINE_TYPE_NVDEC1 = (0x0000001e),
- RM_ENGINE_TYPE_NVDEC2 = (0x0000001f),
- RM_ENGINE_TYPE_NVDEC3 = (0x00000020),
- RM_ENGINE_TYPE_NVDEC4 = (0x00000021),
- RM_ENGINE_TYPE_NVDEC5 = (0x00000022),
- RM_ENGINE_TYPE_NVDEC6 = (0x00000023),
- RM_ENGINE_TYPE_NVDEC7 = (0x00000024),
- RM_ENGINE_TYPE_NVENC0 = (0x00000025),
- RM_ENGINE_TYPE_NVENC1 = (0x00000026),
- RM_ENGINE_TYPE_NVENC2 = (0x00000027),
- RM_ENGINE_TYPE_VP = (0x00000028),
- RM_ENGINE_TYPE_ME = (0x00000029),
- RM_ENGINE_TYPE_PPP = (0x0000002a),
- RM_ENGINE_TYPE_MPEG = (0x0000002b),
- RM_ENGINE_TYPE_SW = (0x0000002c),
- RM_ENGINE_TYPE_TSEC = (0x0000002d),
- RM_ENGINE_TYPE_VIC = (0x0000002e),
- RM_ENGINE_TYPE_MP = (0x0000002f),
- RM_ENGINE_TYPE_SEC2 = (0x00000030),
- RM_ENGINE_TYPE_HOST = (0x00000031),
- RM_ENGINE_TYPE_DPU = (0x00000032),
- RM_ENGINE_TYPE_PMU = (0x00000033),
- RM_ENGINE_TYPE_FBFLCN = (0x00000034),
- RM_ENGINE_TYPE_NVJPEG0 = (0x00000035),
- RM_ENGINE_TYPE_NVJPEG1 = (0x00000036),
- RM_ENGINE_TYPE_NVJPEG2 = (0x00000037),
- RM_ENGINE_TYPE_NVJPEG3 = (0x00000038),
- RM_ENGINE_TYPE_NVJPEG4 = (0x00000039),
- RM_ENGINE_TYPE_NVJPEG5 = (0x0000003a),
- RM_ENGINE_TYPE_NVJPEG6 = (0x0000003b),
- RM_ENGINE_TYPE_NVJPEG7 = (0x0000003c),
- RM_ENGINE_TYPE_OFA = (0x0000003d),
- RM_ENGINE_TYPE_LAST = (0x0000003e),
-} RM_ENGINE_TYPE;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h
deleted file mode 100644
index 3abec59f0cc4..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__
-#define __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB (96 << 10) // All architectures
-
-#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE ((48 << 10) * 2048) // Support 2048 channels
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h
deleted file mode 100644
index 4033a6f85a76..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__
-#define __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct {
- RmPhysAddr sharedMemPhysAddr;
- NvU32 pageTableEntryCount;
- NvLength cmdQueueOffset;
- NvLength statQueueOffset;
- NvLength locklessCmdQueueOffset;
- NvLength locklessStatQueueOffset;
-} MESSAGE_QUEUE_INIT_ARGUMENTS;
-
-typedef struct {
- NvU32 oldLevel;
- NvU32 flags;
- NvBool bInPMTransition;
-} GSP_SR_INIT_ARGUMENTS;
-
-typedef struct
-{
- MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
- GSP_SR_INIT_ARGUMENTS srInitArguments;
- NvU32 gpuInstance;
-
- struct
- {
- NvU64 pa;
- NvU64 size;
- } profilerArgs;
-} GSP_ARGUMENTS_CACHED;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h
deleted file mode 100644
index eeab25a5e290..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h
+++ /dev/null
@@ -1,174 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__
-#define __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h>
-#include <nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-typedef struct GSP_VF_INFO
-{
- NvU32 totalVFs;
- NvU32 firstVFOffset;
- NvU64 FirstVFBar0Address;
- NvU64 FirstVFBar1Address;
- NvU64 FirstVFBar2Address;
- NvBool b64bitBar0;
- NvBool b64bitBar1;
- NvBool b64bitBar2;
-} GSP_VF_INFO;
-
-typedef struct GspSMInfo_t
-{
- NvU32 version;
- NvU32 regBankCount;
- NvU32 regBankRegCount;
- NvU32 maxWarpsPerSM;
- NvU32 maxThreadsPerWarp;
- NvU32 geomGsObufEntries;
- NvU32 geomXbufEntries;
- NvU32 maxSPPerSM;
- NvU32 rtCoreCount;
-} GspSMInfo;
-
-typedef struct GspStaticConfigInfo_t
-{
- NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
- NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
- NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS gpcInfo;
- NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS tpcInfo[MAX_GPC_COUNT];
- NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS zcullInfo[MAX_GPC_COUNT];
- NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
- NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
- COMPUTE_BRANDING_TYPE computeBranding;
-
- NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
- NvU32 sriovMaxGfid;
-
- NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
-
- GspSMInfo SM_info;
-
- NvBool poisonFuseEnabled;
-
- NvU64 fb_length;
- NvU32 fbio_mask;
- NvU32 fb_bus_width;
- NvU32 fb_ram_type;
- NvU32 fbp_mask;
- NvU32 l2_cache_size;
-
- NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
- NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
-
- NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
- NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
- NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
- NvBool bGpuInternalSku;
- NvBool bIsQuadroGeneric;
- NvBool bIsQuadroAd;
- NvBool bIsNvidiaNvs;
- NvBool bIsVgx;
- NvBool bGeforceSmb;
- NvBool bIsTitan;
- NvBool bIsTesla;
- NvBool bIsMobile;
- NvBool bIsGc6Rtd3Allowed;
- NvBool bIsGcOffRtd3Allowed;
- NvBool bIsGcoffLegacyAllowed;
-
- NvU64 bar1PdeBase;
- NvU64 bar2PdeBase;
-
- NvBool bVbiosValid;
- NvU32 vbiosSubVendor;
- NvU32 vbiosSubDevice;
-
- NvBool bPageRetirementSupported;
-
- NvBool bSplitVasBetweenServerClientRm;
-
- NvBool bClRootportNeedsNosnoopWAR;
-
- VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
- VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
- NvU64 displaylessMaxPixels;
-
- // Client handle for internal RMAPI control.
- NvHandle hInternalClient;
-
- // Device handle for internal RMAPI control.
- NvHandle hInternalDevice;
-
- // Subdevice handle for internal RMAPI control.
- NvHandle hInternalSubdevice;
-
- NvBool bSelfHostedMode;
- NvBool bAtsSupported;
-
- NvBool bIsGpuUefi;
-} GspStaticConfigInfo;
-
-typedef struct GspSystemInfo
-{
- NvU64 gpuPhysAddr;
- NvU64 gpuPhysFbAddr;
- NvU64 gpuPhysInstAddr;
- NvU64 nvDomainBusDeviceFunc;
- NvU64 simAccessBufPhysAddr;
- NvU64 pcieAtomicsOpMask;
- NvU64 consoleMemSize;
- NvU64 maxUserVa;
- NvU32 pciConfigMirrorBase;
- NvU32 pciConfigMirrorSize;
- NvU8 oorArch;
- NvU64 clPdbProperties;
- NvU32 Chipset;
- NvBool bGpuBehindBridge;
- NvBool bMnocAvailable;
- NvBool bUpstreamL0sUnsupported;
- NvBool bUpstreamL1Unsupported;
- NvBool bUpstreamL1PorSupported;
- NvBool bUpstreamL1PorMobileOnly;
- NvU8 upstreamAddressValid;
- BUSINFO FHBBusInfo;
- BUSINFO chipsetIDInfo;
- ACPI_METHOD_DATA acpiMethodData;
- NvU32 hypervisorType;
- NvBool bIsPassthru;
- NvU64 sysTimerOffsetNs;
- GSP_VF_INFO gspVFInfo;
-} GspSystemInfo;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h
deleted file mode 100644
index bd5e01f9814b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__
-#define __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define MC_ENGINE_IDX_DISP 2
-
-#define MC_ENGINE_IDX_CE0 15
-
-#define MC_ENGINE_IDX_CE9 24
-
-#define MC_ENGINE_IDX_MSENC 38
-
-#define MC_ENGINE_IDX_MSENC2 40
-
-#define MC_ENGINE_IDX_GSP 49
-#define MC_ENGINE_IDX_NVJPG 50
-#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG
-#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG
-
-#define MC_ENGINE_IDX_NVJPEG7 57
-
-#define MC_ENGINE_IDX_BSP 64
-#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP
-#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC
-
-#define MC_ENGINE_IDX_NVDEC7 71
-
-#define MC_ENGINE_IDX_OFA0 80
-
-#define MC_ENGINE_IDX_GR 82
-#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h
deleted file mode 100644
index 366447a368bf..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_gpu_nvbitmask_h__
-#define __src_nvidia_inc_kernel_gpu_nvbitmask_h__
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NVGPU_ENGINE_CAPS_MASK_BITS 32
-#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h
deleted file mode 100644
index 4a850dad4776..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __src_nvidia_inc_kernel_os_nv_memory_type_h__
-#define __src_nvidia_inc_kernel_os_nv_memory_type_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define NV_MEMORY_WRITECOMBINED 2
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h
deleted file mode 100644
index f14b23852456..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __src_nvidia_kernel_inc_vgpu_rpc_headers_h__
-#define __src_nvidia_kernel_inc_vgpu_rpc_headers_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#define MAX_GPC_COUNT 32
-
-typedef enum
-{
- NV_RPC_UPDATE_PDE_BAR_1,
- NV_RPC_UPDATE_PDE_BAR_2,
- NV_RPC_UPDATE_PDE_BAR_INVALID,
-} NV_RPC_UPDATE_PDE_BAR_TYPE;
-
-typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
-{
- NvU32 headIndex;
- NvU32 maxHResolution;
- NvU32 maxVResolution;
-} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
-
-typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
-{
- NvU32 numHeads;
- NvU32 maxNumHeads;
-} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h
deleted file mode 100644
index 7801af232dff..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef __src_nvidia_kernel_inc_vgpu_sdk_structures_h__
-#define __src_nvidia_kernel_inc_vgpu_sdk_structures_h__
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-struct pte_desc
-{
- NvU32 idr:2;
- NvU32 reserved1:14;
- NvU32 length:16;
- union {
- NvU64 pte; // PTE when IDR==0; PDE when IDR > 0
- NvU64 pde; // PTE when IDR==0; PDE when IDR > 0
- } pte_pde[] NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0
-};
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h b/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
index e6833df1ccc7..af11648ad9c8 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
+++ b/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
@@ -21,4 +21,6 @@ typedef NvU64 NvLength;
typedef NvU64 RmPhysAddr;
typedef NvU32 NV_STATUS;
+
+typedef union {} rpc_generic_union;
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 2a0617e5fe2a..a3ba07fc48a0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -315,7 +315,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
break;
}
case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
- getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
+ getparam->value = nvkm_device->func->resource_size(nvkm_device, NVKM_BAR1_FB);
break;
case NOUVEAU_GETPARAM_VRAM_USED: {
struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
@@ -416,7 +416,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
*/
if (nouveau_cli_uvmm(cli)) {
ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq,
- chan->chan->dma.ib_max);
+ chan->chan->chan.gpfifo.max);
if (ret)
goto done;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index d47442125fa1..9aae26eb7d8f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -42,7 +42,7 @@
#include "nouveau_acpi.h"
static struct ida bl_ida;
-#define BL_NAME_SIZE 15 // 12 for name + 2 for digits + 1 for '\0'
+#define BL_NAME_SIZE 24 // 12 for name + 11 for digits + 1 for '\0'
static bool
nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE],
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2016c1e7242f..b96f0555ca14 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -401,6 +401,83 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
return 0;
}
+void
+nouveau_bo_unpin_del(struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo = *pnvbo;
+
+ if (!nvbo)
+ return;
+
+ nouveau_bo_unmap(nvbo);
+ nouveau_bo_unpin(nvbo);
+ nouveau_bo_fini(nvbo);
+
+ *pnvbo = NULL;
+}
+
+int
+nouveau_bo_new_pin(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new(cli, size, 0, domain, 0, 0, NULL, NULL, &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_bo_pin(nvbo, domain, false);
+ if (ret) {
+ nouveau_bo_fini(nvbo);
+ return ret;
+ }
+
+ *pnvbo = nvbo;
+ return 0;
+}
+
+int
+nouveau_bo_new_map(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new_pin(cli, domain, size, &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_bo_map(nvbo);
+ if (ret) {
+ nouveau_bo_unpin_del(&nvbo);
+ return ret;
+ }
+
+ *pnvbo = nvbo;
+ return 0;
+}
+
+int
+nouveau_bo_new_map_gpu(struct nouveau_cli *cli, u32 domain, u32 size,
+ struct nouveau_bo **pnvbo, struct nouveau_vma **pvma)
+{
+ struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new_map(cli, domain, size, &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_vma_new(nvbo, vmm, pvma);
+ if (ret) {
+ nouveau_bo_unpin_del(&nvbo);
+ return ret;
+ }
+
+ *pnvbo = nvbo;
+ return 0;
+}
+
static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
@@ -923,6 +1000,9 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct ttm_resource *, struct ttm_resource *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
+ { "COPY", 4, 0xcab5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "COPY", 4, 0xc9b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "COPY", 4, 0xc8b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xc7b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xc6b5, nve0_bo_move_copy, nve0_bo_move_init },
@@ -1204,7 +1284,7 @@ retry:
fallthrough; /* tiled memory */
case TTM_PL_VRAM:
reg->bus.offset = (reg->start << PAGE_SHIFT) +
- device->func->resource_addr(device, 1);
+ device->func->resource_addr(device, NVKM_BAR1_FB);
reg->bus.is_iomem = true;
/* Some BARs do not support being ioremapped WC */
@@ -1295,7 +1375,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvkm_device *device = nvxx_device(drm);
- u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
+ u32 mappable = device->func->resource_size(device, NVKM_BAR1_FB) >> PAGE_SHIFT;
int i, ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 596a63a50a20..d59fd12268b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -9,6 +9,7 @@ struct nouveau_channel;
struct nouveau_cli;
struct nouveau_drm;
struct nouveau_fence;
+struct nouveau_vma;
struct nouveau_bo {
struct ttm_buffer_object bo;
@@ -89,6 +90,12 @@ void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo);
void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo);
+int nouveau_bo_new_pin(struct nouveau_cli *, u32 domain, u32 size, struct nouveau_bo **);
+int nouveau_bo_new_map(struct nouveau_cli *, u32 domain, u32 size, struct nouveau_bo **);
+int nouveau_bo_new_map_gpu(struct nouveau_cli *, u32 domain, u32 size,
+ struct nouveau_bo **, struct nouveau_vma **);
+void nouveau_bo_unpin_del(struct nouveau_bo **);
+
/* TODO: submit equivalent to TTM generic API upstream? */
static inline void __iomem *
nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index cd659b9fd1d9..b1e92b1f7a26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -103,12 +103,11 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nvif_event_dtor(&chan->kill);
nvif_object_dtor(&chan->user);
nvif_mem_dtor(&chan->mem_userd);
+ nouveau_vma_del(&chan->sema.vma);
+ nouveau_bo_unpin_del(&chan->sema.bo);
nvif_object_dtor(&chan->push.ctxdma);
nouveau_vma_del(&chan->push.vma);
- nouveau_bo_unmap(chan->push.buffer);
- if (chan->push.buffer && chan->push.buffer->bo.pin_count)
- nouveau_bo_unpin(chan->push.buffer);
- nouveau_bo_fini(chan->push.buffer);
+ nouveau_bo_unpin_del(&chan->push.buffer);
kfree(chan);
}
*pchan = NULL;
@@ -163,14 +162,7 @@ nouveau_channel_prep(struct nouveau_cli *cli,
if (nouveau_vram_pushbuf)
target = NOUVEAU_GEM_DOMAIN_VRAM;
- ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL,
- &chan->push.buffer);
- if (ret == 0) {
- ret = nouveau_bo_pin(chan->push.buffer, target, false);
- if (ret == 0)
- ret = nouveau_bo_map(chan->push.buffer);
- }
-
+ ret = nouveau_bo_new_map(cli, target, size, &chan->push.buffer);
if (ret) {
nouveau_channel_del(pchan);
return ret;
@@ -199,8 +191,10 @@ nouveau_channel_prep(struct nouveau_cli *cli,
chan->push.addr = chan->push.vma->addr;
- if (device->info.family >= NV_DEVICE_INFO_V0_FERMI)
- return 0;
+ if (device->info.family >= NV_DEVICE_INFO_V0_FERMI) {
+ return nouveau_bo_new_map_gpu(cli, NOUVEAU_GEM_DOMAIN_GART, PAGE_SIZE,
+ &chan->sema.bo, &chan->sema.vma);
+ }
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
@@ -209,13 +203,15 @@ nouveau_channel_prep(struct nouveau_cli *cli,
} else
if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) {
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
+ struct nvkm_device *nvkm_device = nvxx_device(drm);
+
/* nv04 vram pushbuf hack, retarget to its location in
* the framebuffer bar rather than direct vram access..
* nfi why this exists, it came from the -nv ddx.
*/
args.target = NV_DMA_V0_TARGET_PCI;
args.access = NV_DMA_V0_ACCESS_RDWR;
- args.start = nvxx_device(drm)->func->resource_addr(nvxx_device(drm), 1);
+ args.start = nvkm_device->func->resource_addr(nvkm_device, NVKM_BAR1_FB);
args.limit = args.start + device->info.ram_user - 1;
} else {
args.target = NV_DMA_V0_TARGET_VRAM;
@@ -253,27 +249,27 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
struct nouveau_channel **pchan)
{
const struct nvif_mclass hosts[] = {
- { AMPERE_CHANNEL_GPFIFO_B, 0 },
- { AMPERE_CHANNEL_GPFIFO_A, 0 },
- { TURING_CHANNEL_GPFIFO_A, 0 },
- { VOLTA_CHANNEL_GPFIFO_A, 0 },
- { PASCAL_CHANNEL_GPFIFO_A, 0 },
- { MAXWELL_CHANNEL_GPFIFO_A, 0 },
- { KEPLER_CHANNEL_GPFIFO_B, 0 },
- { KEPLER_CHANNEL_GPFIFO_A, 0 },
- { FERMI_CHANNEL_GPFIFO , 0 },
- { G82_CHANNEL_GPFIFO , 0 },
- { NV50_CHANNEL_GPFIFO , 0 },
- { NV40_CHANNEL_DMA , 0 },
- { NV17_CHANNEL_DMA , 0 },
- { NV10_CHANNEL_DMA , 0 },
- { NV03_CHANNEL_DMA , 0 },
+ { BLACKWELL_CHANNEL_GPFIFO_B, 0 },
+ { BLACKWELL_CHANNEL_GPFIFO_A, 0 },
+ { HOPPER_CHANNEL_GPFIFO_A, 0 },
+ { AMPERE_CHANNEL_GPFIFO_B, 0 },
+ { AMPERE_CHANNEL_GPFIFO_A, 0 },
+ { TURING_CHANNEL_GPFIFO_A, 0 },
+ { VOLTA_CHANNEL_GPFIFO_A, 0 },
+ { PASCAL_CHANNEL_GPFIFO_A, 0 },
+ { MAXWELL_CHANNEL_GPFIFO_A, 0 },
+ { KEPLER_CHANNEL_GPFIFO_B, 0 },
+ { KEPLER_CHANNEL_GPFIFO_A, 0 },
+ { FERMI_CHANNEL_GPFIFO , 0 },
+ { G82_CHANNEL_GPFIFO , 0 },
+ { NV50_CHANNEL_GPFIFO , 0 },
+ { NV40_CHANNEL_DMA , 0 },
+ { NV17_CHANNEL_DMA , 0 },
+ { NV10_CHANNEL_DMA , 0 },
+ { NV03_CHANNEL_DMA , 0 },
{}
};
- struct {
- struct nvif_chan_v0 chan;
- char name[TASK_COMM_LEN+16];
- } args;
+ DEFINE_RAW_FLEX(struct nvif_chan_v0, args, name, TASK_COMM_LEN + 16);
struct nvif_device *device = &cli->device;
struct nouveau_channel *chan;
const u64 plength = 0x10000;
@@ -298,28 +294,28 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
return ret;
/* create channel object */
- args.chan.version = 0;
- args.chan.namelen = sizeof(args.name);
- args.chan.runlist = __ffs64(runm);
- args.chan.runq = 0;
- args.chan.priv = priv;
- args.chan.devm = BIT(0);
+ args->version = 0;
+ args->namelen = __member_size(args->name);
+ args->runlist = __ffs64(runm);
+ args->runq = 0;
+ args->priv = priv;
+ args->devm = BIT(0);
if (hosts[cid].oclass < NV50_CHANNEL_GPFIFO) {
- args.chan.vmm = 0;
- args.chan.ctxdma = nvif_handle(&chan->push.ctxdma);
- args.chan.offset = chan->push.addr;
- args.chan.length = 0;
+ args->vmm = 0;
+ args->ctxdma = nvif_handle(&chan->push.ctxdma);
+ args->offset = chan->push.addr;
+ args->length = 0;
} else {
- args.chan.vmm = nvif_handle(&chan->vmm->vmm.object);
+ args->vmm = nvif_handle(&chan->vmm->vmm.object);
if (hosts[cid].oclass < FERMI_CHANNEL_GPFIFO)
- args.chan.ctxdma = nvif_handle(&chan->push.ctxdma);
+ args->ctxdma = nvif_handle(&chan->push.ctxdma);
else
- args.chan.ctxdma = 0;
- args.chan.offset = ioffset + chan->push.addr;
- args.chan.length = ilength;
+ args->ctxdma = 0;
+ args->offset = ioffset + chan->push.addr;
+ args->length = ilength;
}
- args.chan.huserd = 0;
- args.chan.ouserd = 0;
+ args->huserd = 0;
+ args->ouserd = 0;
/* allocate userd */
if (hosts[cid].oclass >= VOLTA_CHANNEL_GPFIFO_A) {
@@ -329,27 +325,28 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
if (ret)
return ret;
- args.chan.huserd = nvif_handle(&chan->mem_userd.object);
- args.chan.ouserd = 0;
+ args->huserd = nvif_handle(&chan->mem_userd.object);
+ args->ouserd = 0;
chan->userd = &chan->mem_userd.object;
} else {
chan->userd = &chan->user;
}
- snprintf(args.name, sizeof(args.name), "%s[%d]", current->comm, task_pid_nr(current));
+ snprintf(args->name, __member_size(args->name), "%s[%d]",
+ current->comm, task_pid_nr(current));
ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0, hosts[cid].oclass,
- &args, sizeof(args), &chan->user);
+ args, __struct_size(args), &chan->user);
if (ret) {
nouveau_channel_del(pchan);
return ret;
}
- chan->runlist = args.chan.runlist;
- chan->chid = args.chan.chid;
- chan->inst = args.chan.inst;
- chan->token = args.chan.token;
+ chan->runlist = args->runlist;
+ chan->chid = args->chid;
+ chan->inst = args->inst;
+ chan->token = args->token;
return 0;
}
@@ -367,17 +364,17 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
return ret;
if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
- struct {
- struct nvif_event_v0 base;
- struct nvif_chan_event_v0 host;
- } args;
+ DEFINE_RAW_FLEX(struct nvif_event_v0, args, data,
+ sizeof(struct nvif_chan_event_v0));
+ struct nvif_chan_event_v0 *host =
+ (struct nvif_chan_event_v0 *)args->data;
- args.host.version = 0;
- args.host.type = NVIF_CHAN_EVENT_V0_KILLED;
+ host->version = 0;
+ host->type = NVIF_CHAN_EVENT_V0_KILLED;
ret = nvif_event_ctor(&chan->user, "abi16ChanKilled", chan->chid,
nouveau_channel_killed, false,
- &args.base, sizeof(args), &chan->kill);
+ args, __struct_size(args), &chan->kill);
if (ret == 0)
ret = nvif_event_allow(&chan->kill);
if (ret) {
@@ -433,25 +430,33 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
}
/* initialise dma tracking parameters */
- switch (chan->user.oclass) {
- case NV03_CHANNEL_DMA:
- case NV10_CHANNEL_DMA:
- case NV17_CHANNEL_DMA:
- case NV40_CHANNEL_DMA:
+ if (chan->user.oclass < NV50_CHANNEL_GPFIFO) {
chan->user_put = 0x40;
chan->user_get = 0x44;
chan->dma.max = (0x10000 / 4) - 2;
- break;
- default:
- chan->user_put = 0x40;
- chan->user_get = 0x44;
- chan->user_get_hi = 0x60;
- chan->dma.ib_base = 0x10000 / 4;
- chan->dma.ib_max = NV50_DMA_IB_MAX;
- chan->dma.ib_put = 0;
- chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
- chan->dma.max = chan->dma.ib_base;
- break;
+ } else
+ if (chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
+ ret = nvif_chan506f_ctor(&chan->chan, chan->userd->map.ptr,
+ (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000,
+ chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000);
+ if (ret)
+ return ret;
+ } else
+ if (chan->user.oclass < VOLTA_CHANNEL_GPFIFO_A) {
+ ret = nvif_chan906f_ctor(&chan->chan, chan->userd->map.ptr,
+ (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000,
+ chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000,
+ chan->sema.bo->kmap.virtual, chan->sema.vma->addr);
+ if (ret)
+ return ret;
+ } else {
+ ret = nvif_chanc36f_ctor(&chan->chan, chan->userd->map.ptr,
+ (u8*)chan->push.buffer->kmap.virtual + 0x10000, 0x2000,
+ chan->push.buffer->kmap.virtual, chan->push.addr, 0x10000,
+ chan->sema.bo->kmap.virtual, chan->sema.vma->addr,
+ &drm->client.device.user, chan->token);
+ if (ret)
+ return ret;
}
chan->dma.put = 0;
@@ -520,46 +525,44 @@ nouveau_channels_fini(struct nouveau_drm *drm)
int
nouveau_channels_init(struct nouveau_drm *drm)
{
- struct {
- struct nv_device_info_v1 m;
- struct {
- struct nv_device_info_v1_data channels;
- struct nv_device_info_v1_data runlists;
- } v;
- } args = {
- .m.version = 1,
- .m.count = sizeof(args.v) / sizeof(args.v.channels),
- .v.channels.mthd = NV_DEVICE_HOST_CHANNELS,
- .v.runlists.mthd = NV_DEVICE_HOST_RUNLISTS,
- };
+ DEFINE_RAW_FLEX(struct nv_device_info_v1, args, data, 2);
+ struct nv_device_info_v1_data *channels = &args->data[0];
+ struct nv_device_info_v1_data *runlists = &args->data[1];
struct nvif_object *device = &drm->client.device.object;
int ret, i;
- ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
+ args->version = 1;
+ args->count = __member_size(args->data) / sizeof(*args->data);
+ channels->mthd = NV_DEVICE_HOST_CHANNELS;
+ runlists->mthd = NV_DEVICE_HOST_RUNLISTS;
+
+ ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, args,
+ __struct_size(args));
if (ret ||
- args.v.runlists.mthd == NV_DEVICE_INFO_INVALID || !args.v.runlists.data ||
- args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
+ runlists->mthd == NV_DEVICE_INFO_INVALID || !runlists->data ||
+ channels->mthd == NV_DEVICE_INFO_INVALID)
return -ENODEV;
- drm->chan_nr = drm->chan_total = args.v.channels.data;
- drm->runl_nr = fls64(args.v.runlists.data);
+ drm->chan_nr = drm->chan_total = channels->data;
+ drm->runl_nr = fls64(runlists->data);
drm->runl = kcalloc(drm->runl_nr, sizeof(*drm->runl), GFP_KERNEL);
if (!drm->runl)
return -ENOMEM;
if (drm->chan_nr == 0) {
for (i = 0; i < drm->runl_nr; i++) {
- if (!(args.v.runlists.data & BIT(i)))
+ if (!(runlists->data & BIT(i)))
continue;
- args.v.channels.mthd = NV_DEVICE_HOST_RUNLIST_CHANNELS;
- args.v.channels.data = i;
+ channels->mthd = NV_DEVICE_HOST_RUNLIST_CHANNELS;
+ channels->data = i;
- ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, &args, sizeof(args));
- if (ret || args.v.channels.mthd == NV_DEVICE_INFO_INVALID)
+ ret = nvif_object_mthd(device, NV_DEVICE_V0_INFO, args,
+ __struct_size(args));
+ if (ret || channels->mthd == NV_DEVICE_INFO_INVALID)
return -ENODEV;
- drm->runl[i].chan_nr = args.v.channels.data;
+ drm->runl[i].chan_nr = channels->data;
drm->runl[i].chan_id_base = drm->chan_total;
drm->runl[i].context_base = dma_fence_context_alloc(drm->runl[i].chan_nr);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 016f668c0bc1..561877725aac 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -3,13 +3,11 @@
#define __NOUVEAU_CHAN_H__
#include <nvif/object.h>
#include <nvif/event.h>
-#include <nvif/push.h>
+#include <nvif/chan.h>
struct nvif_device;
struct nouveau_channel {
- struct {
- struct nvif_push push;
- } chan;
+ struct nvif_chan chan;
struct nouveau_cli *cli;
struct nouveau_vmm *vmm;
@@ -41,15 +39,15 @@ struct nouveau_channel {
int free;
int cur;
int put;
- int ib_base;
- int ib_max;
- int ib_free;
- int ib_put;
} dma;
- u32 user_get_hi;
u32 user_get;
u32 user_put;
+ struct {
+ struct nouveau_bo *bo;
+ struct nouveau_vma *vma;
+ } sema;
+
struct nvif_object user;
struct nvif_object blit;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 1b10c6c12f46..63621b1510f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1401,6 +1401,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
nv_connector->aux.drm_dev = dev;
nv_connector->aux.transfer = nouveau_connector_aux_xfer;
nv_connector->aux.name = connector->name;
+ if (disp->disp.object.oclass >= GB202_DISP)
+ nv_connector->aux.no_zero_sized = true;
drm_dp_aux_init(&nv_connector->aux);
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index a1f329ef0641..017a803121d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -43,8 +43,6 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
uint64_t val;
val = nvif_rd32(chan->userd, chan->user_get);
- if (chan->user_get_hi)
- val |= (uint64_t)nvif_rd32(chan->userd, chan->user_get_hi) << 32;
/* reset counter as long as GET is still advancing, this is
* to avoid misdetecting a GPU lockup if the GPU happens to
@@ -68,111 +66,12 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
return (val - chan->push.addr) >> 2;
}
-void
-nv50_dma_push(struct nouveau_channel *chan, u64 offset, u32 length,
- bool no_prefetch)
-{
- struct nvif_user *user = &chan->cli->drm->client.device.user;
- struct nouveau_bo *pb = chan->push.buffer;
- int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
-
- BUG_ON(chan->dma.ib_free < 1);
- WARN_ON(length > NV50_DMA_PUSH_MAX_LENGTH);
-
- nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
- nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8 |
- (no_prefetch ? (1 << 31) : 0));
-
- chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
-
- mb();
- /* Flush writes. */
- nouveau_bo_rd32(pb, 0);
-
- nvif_wr32(chan->userd, 0x8c, chan->dma.ib_put);
- if (user->func && user->func->doorbell)
- user->func->doorbell(user, chan->token);
- chan->dma.ib_free--;
-}
-
-static int
-nv50_dma_push_wait(struct nouveau_channel *chan, int count)
-{
- uint32_t cnt = 0, prev_get = 0;
-
- while (chan->dma.ib_free < count) {
- uint32_t get = nvif_rd32(chan->userd, 0x88);
- if (get != prev_get) {
- prev_get = get;
- cnt = 0;
- }
-
- if ((++cnt & 0xff) == 0) {
- udelay(1);
- if (cnt > 100000)
- return -EBUSY;
- }
-
- chan->dma.ib_free = get - chan->dma.ib_put;
- if (chan->dma.ib_free <= 0)
- chan->dma.ib_free += chan->dma.ib_max;
- }
-
- return 0;
-}
-
-static int
-nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
-{
- uint64_t prev_get = 0;
- int ret, cnt = 0;
-
- ret = nv50_dma_push_wait(chan, slots + 1);
- if (unlikely(ret))
- return ret;
-
- while (chan->dma.free < count) {
- int get = READ_GET(chan, &prev_get, &cnt);
- if (unlikely(get < 0)) {
- if (get == -EINVAL)
- continue;
-
- return get;
- }
-
- if (get <= chan->dma.cur) {
- chan->dma.free = chan->dma.max - chan->dma.cur;
- if (chan->dma.free >= count)
- break;
-
- FIRE_RING(chan);
- do {
- get = READ_GET(chan, &prev_get, &cnt);
- if (unlikely(get < 0)) {
- if (get == -EINVAL)
- continue;
- return get;
- }
- } while (get == 0);
- chan->dma.cur = 0;
- chan->dma.put = 0;
- }
-
- chan->dma.free = get - chan->dma.cur - 1;
- }
-
- return 0;
-}
-
int
-nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
+nouveau_dma_wait(struct nouveau_channel *chan, int size)
{
uint64_t prev_get = 0;
int cnt = 0, get;
- if (chan->dma.ib_max)
- return nv50_dma_wait(chan, slots, size);
-
while (chan->dma.free < size) {
get = READ_GET(chan, &prev_get, &cnt);
if (unlikely(get == -EBUSY))
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index c52cda82353e..0e27b76d1e1c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -30,9 +30,7 @@
#include "nouveau_bo.h"
#include "nouveau_chan.h"
-int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
-void nv50_dma_push(struct nouveau_channel *, u64 addr, u32 length,
- bool no_prefetch);
+int nouveau_dma_wait(struct nouveau_channel *, int size);
/*
* There's a hw race condition where you can't jump to your PUT offset,
@@ -67,7 +65,7 @@ RING_SPACE(struct nouveau_channel *chan, int size)
{
int ret;
- ret = nouveau_dma_wait(chan, 1, size);
+ ret = nouveau_dma_wait(chan, size);
if (ret)
return ret;
@@ -94,12 +92,7 @@ FIRE_RING(struct nouveau_channel *chan)
return;
chan->accel_done = true;
- if (chan->dma.ib_max) {
- nv50_dma_push(chan, chan->push.addr + (chan->dma.put << 2),
- (chan->dma.cur - chan->dma.put) << 2, false);
- } else {
- WRITE_PUT(chan->dma.cur);
- }
+ WRITE_PUT(chan->dma.cur);
chan->dma.put = chan->dma.cur;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 61d0f411ef84..ca4932a150e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -256,20 +256,15 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
chunk->pagemap.owner = drm->dev;
- ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
- NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
- &chunk->bo);
+ ret = nouveau_bo_new_pin(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, DMEM_CHUNK_SIZE,
+ &chunk->bo);
if (ret)
goto out_release;
- ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (ret)
- goto out_bo_free;
-
ptr = memremap_pages(&chunk->pagemap, numa_node_id());
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
- goto out_bo_unpin;
+ goto out_bo_free;
}
mutex_lock(&drm->dmem->mutex);
@@ -292,10 +287,8 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
return 0;
-out_bo_unpin:
- nouveau_bo_unpin(chunk->bo);
out_bo_free:
- nouveau_bo_fini(chunk->bo);
+ nouveau_bo_unpin_del(&chunk->bo);
out_release:
release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
out_free:
@@ -426,8 +419,7 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
nouveau_dmem_evict_chunk(chunk);
- nouveau_bo_unpin(chunk->bo);
- nouveau_bo_fini(chunk->bo);
+ nouveau_bo_unpin_del(&chunk->bo);
WARN_ON(chunk->callocated);
list_del(&chunk->list);
memunmap_pages(&chunk->pagemap);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index e154d08857c5..0c82a63cd49d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -503,11 +503,16 @@ nouveau_accel_init(struct nouveau_drm *drm)
case KEPLER_CHANNEL_GPFIFO_B:
case MAXWELL_CHANNEL_GPFIFO_A:
case PASCAL_CHANNEL_GPFIFO_A:
+ ret = nvc0_fence_create(drm);
+ break;
case VOLTA_CHANNEL_GPFIFO_A:
case TURING_CHANNEL_GPFIFO_A:
case AMPERE_CHANNEL_GPFIFO_A:
case AMPERE_CHANNEL_GPFIFO_B:
- ret = nvc0_fence_create(drm);
+ case HOPPER_CHANNEL_GPFIFO_A:
+ case BLACKWELL_CHANNEL_GPFIFO_A:
+ case BLACKWELL_CHANNEL_GPFIFO_B:
+ ret = gv100_fence_create(drm);
break;
default:
break;
@@ -1079,6 +1084,10 @@ nouveau_pmops_freeze(struct device *dev)
{
struct nouveau_drm *drm = dev_get_drvdata(dev);
+ if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ return 0;
+
return nouveau_do_suspend(drm, false);
}
@@ -1087,6 +1096,10 @@ nouveau_pmops_thaw(struct device *dev)
{
struct nouveau_drm *drm = dev_get_drvdata(dev);
+ if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ return 0;
+
return nouveau_do_resume(drm, false);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index a0b5f1b16e8b..41b7c608c905 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -10,6 +10,8 @@
#include "nouveau_sched.h"
#include "nouveau_uvmm.h"
+#include <nvif/class.h>
+
/**
* DOC: Overview
*
@@ -131,7 +133,7 @@ nouveau_exec_job_run(struct nouveau_job *job)
struct nouveau_fence *fence = exec_job->fence;
int i, ret;
- ret = nouveau_dma_wait(chan, exec_job->push.count + 1, 16);
+ ret = nvif_chan_gpfifo_wait(&chan->chan, exec_job->push.count + 1, 16);
if (ret) {
NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret);
return ERR_PTR(ret);
@@ -141,9 +143,11 @@ nouveau_exec_job_run(struct nouveau_job *job)
struct drm_nouveau_exec_push *p = &exec_job->push.s[i];
bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH;
- nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
+ nvif_chan_gpfifo_push(&chan->chan, p->va, p->va_len, no_prefetch);
}
+ nvif_chan_gpfifo_post(&chan->chan);
+
ret = nouveau_fence_emit(fence);
if (ret) {
nouveau_fence_unref(&exec_job->fence);
@@ -375,10 +379,10 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
if (unlikely(atomic_read(&chan->killed)))
return nouveau_abi16_put(abi16, -ENODEV);
- if (!chan->dma.ib_max)
+ if (chan->user.oclass < NV50_CHANNEL_GPFIFO)
return nouveau_abi16_put(abi16, -ENOSYS);
- push_max = nouveau_exec_push_max_from_ib_max(chan->dma.ib_max);
+ push_max = nouveau_exec_push_max_from_ib_max(chan->chan.gpfifo.max);
if (unlikely(req->push_count > push_max)) {
NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
req->push_count, push_max);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index edddfc036c6d..6ded8c2b6d3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -184,10 +184,10 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
struct nouveau_cli *cli = chan->cli;
struct nouveau_drm *drm = cli->drm;
struct nouveau_fence_priv *priv = (void*)drm->fence;
- struct {
- struct nvif_event_v0 base;
- struct nvif_chan_event_v0 host;
- } args;
+ DEFINE_RAW_FLEX(struct nvif_event_v0, args, data,
+ sizeof(struct nvif_chan_event_v0));
+ struct nvif_chan_event_v0 *host =
+ (struct nvif_chan_event_v0 *)args->data;
int ret;
INIT_WORK(&fctx->uevent_work, nouveau_fence_uevent_work);
@@ -207,12 +207,12 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
if (!priv->uevent)
return;
- args.host.version = 0;
- args.host.type = NVIF_CHAN_EVENT_V0_NON_STALL_INTR;
+ host->version = 0;
+ host->type = NVIF_CHAN_EVENT_V0_NON_STALL_INTR;
ret = nvif_event_ctor(&chan->user, "fenceNonStallIntr", (chan->runlist << 16) | chan->chid,
nouveau_fence_wait_uevent_handler, false,
- &args.base, sizeof(args), &fctx->event);
+ args, __struct_size(args), &fctx->event);
WARN_ON(ret);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 8bc065acfe35..6a983dd9f7b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -83,6 +83,7 @@ void nv17_fence_resume(struct nouveau_drm *drm);
int nv50_fence_create(struct nouveau_drm *);
int nv84_fence_create(struct nouveau_drm *);
int nvc0_fence_create(struct nouveau_drm *);
+int gv100_fence_create(struct nouveau_drm *);
struct nv84_fence_chan {
struct nouveau_fence_chan base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 67e3c99de73a..690e10fbf0bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -850,8 +850,8 @@ revalidate:
}
}
- if (chan->dma.ib_max) {
- ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
+ if (chan->user.oclass >= NV50_CHANNEL_GPFIFO) {
+ ret = nvif_chan_gpfifo_wait(&chan->chan, req->nr_push + 1, 16);
if (ret) {
NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
goto out;
@@ -864,8 +864,10 @@ revalidate:
u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
- nv50_dma_push(chan, addr, length, no_prefetch);
+ nvif_chan_gpfifo_push(&chan->chan, addr, length, no_prefetch);
}
+
+ nvif_chan_gpfifo_post(&chan->chan);
} else
if (drm->client.device.info.chipset >= 0x25) {
ret = PUSH_WAIT(&chan->chan.push, req->nr_push * 2);
@@ -958,7 +960,7 @@ out_prevalid:
u_free(push);
out_next:
- if (chan->dma.ib_max) {
+ if (chan->user.oclass >= NV50_CHANNEL_GPFIFO) {
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index e12e2596ed84..6fa387da0637 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -720,10 +720,7 @@ nouveau_svm_fault(struct work_struct *work)
struct nouveau_svm *svm = container_of(buffer, typeof(*svm), buffer[buffer->id]);
struct nvif_object *device = &svm->drm->client.device.object;
struct nouveau_svmm *svmm;
- struct {
- struct nouveau_pfnmap_args i;
- u64 phys[1];
- } args;
+ DEFINE_RAW_FLEX(struct nouveau_pfnmap_args, args, p.phys, 1);
unsigned long hmm_flags;
u64 inst, start, limit;
int fi, fn;
@@ -772,11 +769,11 @@ nouveau_svm_fault(struct work_struct *work)
mutex_unlock(&svm->mutex);
/* Process list of faults. */
- args.i.i.version = 0;
- args.i.i.type = NVIF_IOCTL_V0_MTHD;
- args.i.m.version = 0;
- args.i.m.method = NVIF_VMM_V0_PFNMAP;
- args.i.p.version = 0;
+ args->i.version = 0;
+ args->i.type = NVIF_IOCTL_V0_MTHD;
+ args->m.version = 0;
+ args->m.method = NVIF_VMM_V0_PFNMAP;
+ args->p.version = 0;
for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
struct svm_notifier notifier;
@@ -802,9 +799,9 @@ nouveau_svm_fault(struct work_struct *work)
* fault window, determining required pages and access
* permissions based on pending faults.
*/
- args.i.p.addr = start;
- args.i.p.page = PAGE_SHIFT;
- args.i.p.size = PAGE_SIZE;
+ args->p.addr = start;
+ args->p.page = PAGE_SHIFT;
+ args->p.size = PAGE_SIZE;
/*
* Determine required permissions based on GPU fault
* access flags.
@@ -832,16 +829,16 @@ nouveau_svm_fault(struct work_struct *work)
notifier.svmm = svmm;
if (atomic)
- ret = nouveau_atomic_range_fault(svmm, svm->drm,
- &args.i, sizeof(args),
+ ret = nouveau_atomic_range_fault(svmm, svm->drm, args,
+ __struct_size(args),
&notifier);
else
- ret = nouveau_range_fault(svmm, svm->drm, &args.i,
- sizeof(args), hmm_flags,
- &notifier);
+ ret = nouveau_range_fault(svmm, svm->drm, args,
+ __struct_size(args),
+ hmm_flags, &notifier);
mmput(mm);
- limit = args.i.p.addr + args.i.p.size;
+ limit = args->p.addr + args->p.size;
for (fn = fi; ++fn < buffer->fault_nr; ) {
/* It's okay to skip over duplicate addresses from the
* same SVMM as faults are ordered by access type such
@@ -855,14 +852,14 @@ nouveau_svm_fault(struct work_struct *work)
if (buffer->fault[fn]->svmm != svmm ||
buffer->fault[fn]->addr >= limit ||
(buffer->fault[fi]->access == FAULT_ACCESS_READ &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
+ !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
(buffer->fault[fi]->access != FAULT_ACCESS_READ &&
buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
+ !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
(buffer->fault[fi]->access != FAULT_ACCESS_READ &&
buffer->fault[fi]->access != FAULT_ACCESS_WRITE &&
buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
- !(args.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
+ !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index e244927eb5d4..7d2436e5d50d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -312,8 +312,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
/* VRAM init */
drm->gem.vram_available = drm->client.device.info.ram_user;
- arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
- device->func->resource_size(device, 1));
+ arch_io_reserve_memtype_wc(device->func->resource_addr(device, NVKM_BAR1_FB),
+ device->func->resource_size(device, NVKM_BAR1_FB));
ret = nouveau_ttm_init_vram(drm);
if (ret) {
@@ -321,8 +321,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
return ret;
}
- drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
- device->func->resource_size(device, 1));
+ drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, NVKM_BAR1_FB),
+ device->func->resource_size(device, NVKM_BAR1_FB));
/* GART init */
if (!drm->agp.bridge) {
@@ -357,7 +357,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
- arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
- device->func->resource_size(device, 1));
+ arch_io_free_memtype_wc(device->func->resource_addr(device, NVKM_BAR1_FB),
+ device->func->resource_size(device, NVKM_BAR1_FB));
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 8c73f40e3bda..40ee95340814 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -85,10 +85,8 @@ void
nv10_fence_destroy(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv = drm->fence;
- nouveau_bo_unmap(priv->bo);
- if (priv->bo)
- nouveau_bo_unpin(priv->bo);
- nouveau_bo_fini(priv->bo);
+
+ nouveau_bo_unpin_del(&priv->bo);
drm->fence = NULL;
kfree(priv);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index d09bfd11369f..1b0c0aa3c305 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -130,20 +130,7 @@ nv17_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
- NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, NULL, NULL, &priv->bo);
- if (!ret) {
- ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (!ret) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_fini(priv->bo);
- }
-
+ ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &priv->bo);
if (ret) {
nv10_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 62e28dddf87c..e1f0e8adf313 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -81,20 +81,7 @@ nv50_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
- NOUVEAU_GEM_DOMAIN_VRAM,
- 0, 0x0000, NULL, NULL, &priv->bo);
- if (!ret) {
- ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (!ret) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_fini(priv->bo);
- }
-
+ ret = nouveau_bo_new_map(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, PAGE_SIZE, &priv->bo);
if (ret) {
nv10_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index aa7dd0c5d917..1765b2cedaf9 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -185,10 +185,8 @@ static void
nv84_fence_destroy(struct nouveau_drm *drm)
{
struct nv84_fence_priv *priv = drm->fence;
- nouveau_bo_unmap(priv->bo);
- if (priv->bo)
- nouveau_bo_unpin(priv->bo);
- nouveau_bo_fini(priv->bo);
+
+ nouveau_bo_unpin_del(&priv->bo);
drm->fence = NULL;
kfree(priv);
}
@@ -222,19 +220,8 @@ nv84_fence_create(struct nouveau_drm *drm)
* will lose CPU/GPU coherency!
*/
NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT;
- ret = nouveau_bo_new(&drm->client, 16 * drm->chan_total, 0,
- domain, 0, 0, NULL, NULL, &priv->bo);
- if (ret == 0) {
- ret = nouveau_bo_pin(priv->bo, domain, false);
- if (ret == 0) {
- ret = nouveau_bo_map(priv->bo);
- if (ret)
- nouveau_bo_unpin(priv->bo);
- }
- if (ret)
- nouveau_bo_fini(priv->bo);
- }
+ ret = nouveau_bo_new_map(&drm->client, domain, 16 * drm->chan_total, &priv->bo);
if (ret)
nv84_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index b7963a39dd91..198889c20ce1 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -14,6 +14,12 @@ nvif-y += nvif/outp.o
nvif-y += nvif/timer.o
nvif-y += nvif/vmm.o
+# Channel classes
+nvif-y += nvif/chan.o
+nvif-y += nvif/chan506f.o
+nvif-y += nvif/chan906f.o
+nvif-y += nvif/chanc36f.o
+
# Usermode classes
nvif-y += nvif/user.o
nvif-y += nvif/userc361.o
diff --git a/drivers/gpu/drm/nouveau/nvif/chan.c b/drivers/gpu/drm/nouveau/nvif/chan.c
new file mode 100644
index 000000000000..baa10227d51a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/chan.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <nvif/chan.h>
+
+static void
+nvif_chan_gpfifo_push_kick(struct nvif_push *push)
+{
+ struct nvif_chan *chan = container_of(push, typeof(*chan), push);
+ u32 put = push->bgn - (u32 *)chan->push.mem.object.map.ptr;
+ u32 cnt;
+
+ if (chan->func->gpfifo.post) {
+ if (push->end - push->cur < chan->func->gpfifo.post_size)
+ push->end = push->cur + chan->func->gpfifo.post_size;
+
+ WARN_ON(nvif_chan_gpfifo_post(chan));
+ }
+
+ cnt = push->cur - push->bgn;
+
+ chan->func->gpfifo.push(chan, true, chan->push.addr + (put << 2), cnt << 2, false);
+ chan->func->gpfifo.kick(chan);
+}
+
+static int
+nvif_chan_gpfifo_push_wait(struct nvif_push *push, u32 push_nr)
+{
+ struct nvif_chan *chan = container_of(push, typeof(*chan), push);
+
+ return nvif_chan_gpfifo_wait(chan, 1, push_nr);
+}
+
+int
+nvif_chan_gpfifo_post(struct nvif_chan *chan)
+{
+ const u32 *map = chan->push.mem.object.map.ptr;
+ const u32 pbptr = (chan->push.cur - map) + chan->func->gpfifo.post_size;
+ const u32 gpptr = (chan->gpfifo.cur + 1) & chan->gpfifo.max;
+
+ return chan->func->gpfifo.post(chan, gpptr, pbptr);
+}
+
+void
+nvif_chan_gpfifo_push(struct nvif_chan *chan, u64 addr, u32 size, bool no_prefetch)
+{
+ chan->func->gpfifo.push(chan, false, addr, size, no_prefetch);
+}
+
+int
+nvif_chan_gpfifo_wait(struct nvif_chan *chan, u32 gpfifo_nr, u32 push_nr)
+{
+ struct nvif_push *push = &chan->push;
+ int ret = 0, time = 1000000;
+
+ if (gpfifo_nr) {
+ /* Account for pushbuf space needed by nvif_chan_gpfifo_post(),
+ * if used after pushing userspace GPFIFO entries.
+ */
+ if (chan->func->gpfifo.post)
+ push_nr += chan->func->gpfifo.post_size;
+ }
+
+ /* Account for the GPFIFO entry needed to submit pushbuf. */
+ if (push_nr)
+ gpfifo_nr++;
+
+ /* Wait for space in main push buffer. */
+ if (push->cur + push_nr > push->end) {
+ ret = nvif_chan_dma_wait(chan, push_nr);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait for GPFIFO space. */
+ while (chan->gpfifo.free < gpfifo_nr) {
+ chan->gpfifo.free = chan->func->gpfifo.read_get(chan) - chan->gpfifo.cur - 1;
+ if (chan->gpfifo.free < 0)
+ chan->gpfifo.free += chan->gpfifo.max + 1;
+
+ if (chan->gpfifo.free < gpfifo_nr) {
+ if (!time--)
+ return -ETIMEDOUT;
+ udelay(1);
+ }
+ }
+
+ return 0;
+}
+
+void
+nvif_chan_gpfifo_ctor(const struct nvif_chan_func *func, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, struct nvif_chan *chan)
+{
+ chan->func = func;
+
+ chan->userd.map.ptr = userd;
+
+ chan->gpfifo.map.ptr = gpfifo;
+ chan->gpfifo.max = (gpfifo_size >> 3) - 1;
+ chan->gpfifo.free = chan->gpfifo.max;
+
+ chan->push.mem.object.map.ptr = push;
+ chan->push.wait = nvif_chan_gpfifo_push_wait;
+ chan->push.kick = nvif_chan_gpfifo_push_kick;
+ chan->push.addr = push_addr;
+ chan->push.hw.max = push_size >> 2;
+ chan->push.bgn = chan->push.cur = chan->push.end = push;
+}
+
+int
+nvif_chan_dma_wait(struct nvif_chan *chan, u32 nr)
+{
+ struct nvif_push *push = &chan->push;
+ u32 cur = push->cur - (u32 *)push->mem.object.map.ptr;
+ u32 free, time = 1000000;
+
+ nr += chan->func->gpfifo.post_size;
+
+ do {
+ u32 get = chan->func->push.read_get(chan);
+
+ if (get <= cur) {
+ free = push->hw.max - cur;
+ if (free >= nr)
+ break;
+
+ PUSH_KICK(push);
+
+ while (get == 0) {
+ get = chan->func->push.read_get(chan);
+ if (get == 0) {
+ if (!time--)
+ return -ETIMEDOUT;
+ udelay(1);
+ }
+ }
+
+ cur = 0;
+ }
+
+ free = get - cur - 1;
+
+ if (free < nr) {
+ if (!time--)
+ return -ETIMEDOUT;
+ udelay(1);
+ }
+ } while (free < nr);
+
+ push->bgn = (u32 *)push->mem.object.map.ptr + cur;
+ push->cur = push->bgn;
+ push->end = push->bgn + free - chan->func->gpfifo.post_size;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/chan506f.c b/drivers/gpu/drm/nouveau/nvif/chan506f.c
new file mode 100644
index 000000000000..d3900887c4a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/chan506f.c
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <nvif/chan.h>
+
+void
+nvif_chan506f_gpfifo_kick(struct nvif_chan *chan)
+{
+ wmb();
+ nvif_wr32(&chan->userd, 0x8c, chan->gpfifo.cur);
+}
+
+void
+nvif_chan506f_gpfifo_push(struct nvif_chan *chan, bool main, u64 addr, u32 size, bool no_prefetch)
+{
+ u32 gpptr = chan->gpfifo.cur << 3;
+
+ if (WARN_ON(!chan->gpfifo.free))
+ return;
+
+ nvif_wr32(&chan->gpfifo, gpptr + 0, lower_32_bits(addr));
+ nvif_wr32(&chan->gpfifo, gpptr + 4, upper_32_bits(addr) |
+ (main ? 0 : BIT(9)) |
+ (size >> 2) << 10 |
+ (no_prefetch ? BIT(31) : 0));
+
+ chan->gpfifo.cur = (chan->gpfifo.cur + 1) & chan->gpfifo.max;
+ chan->gpfifo.free--;
+ if (!chan->gpfifo.free)
+ chan->push.end = chan->push.cur;
+}
+
+static u32
+nvif_chan506f_gpfifo_read_get(struct nvif_chan *chan)
+{
+ return nvif_rd32(&chan->userd, 0x88);
+}
+
+static u32
+nvif_chan506f_read_get(struct nvif_chan *chan)
+{
+ u32 tlgetlo = nvif_rd32(&chan->userd, 0x58);
+ u32 tlgethi = nvif_rd32(&chan->userd, 0x5c);
+ struct nvif_push *push = &chan->push;
+
+ /* Update cached GET pointer if TOP_LEVEL_GET is valid. */
+ if (tlgethi & BIT(31)) {
+ u64 tlget = ((u64)(tlgethi & 0xff) << 32) | tlgetlo;
+
+ push->hw.get = (tlget - push->addr) >> 2;
+ }
+
+ return push->hw.get;
+}
+
+static const struct nvif_chan_func
+nvif_chan506f = {
+ .push.read_get = nvif_chan506f_read_get,
+ .gpfifo.read_get = nvif_chan506f_gpfifo_read_get,
+ .gpfifo.push = nvif_chan506f_gpfifo_push,
+ .gpfifo.kick = nvif_chan506f_gpfifo_kick,
+};
+
+int
+nvif_chan506f_ctor(struct nvif_chan *chan, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size)
+{
+ nvif_chan_gpfifo_ctor(&nvif_chan506f, userd, gpfifo, gpfifo_size,
+ push, push_addr, push_size, chan);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/chan906f.c b/drivers/gpu/drm/nouveau/nvif/chan906f.c
new file mode 100644
index 000000000000..c9cfb85179b0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/chan906f.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <nvif/chan.h>
+#include <nvif/user.h>
+#include <nvif/push906f.h>
+
+#include <nvhw/class/cl906f.h>
+
+/* Limits GPFIFO size to 1MiB, and "main" push buffer size to 64KiB. */
+#define NVIF_CHAN906F_PBPTR_BITS 15
+#define NVIF_CHAN906F_PBPTR_MASK ((1 << NVIF_CHAN906F_PBPTR_BITS) - 1)
+
+#define NVIF_CHAN906F_GPPTR_SHIFT NVIF_CHAN906F_PBPTR_BITS
+#define NVIF_CHAN906F_GPPTR_BITS (32 - NVIF_CHAN906F_PBPTR_BITS)
+#define NVIF_CHAN906F_GPPTR_MASK ((1 << NVIF_CHAN906F_GPPTR_BITS) - 1)
+
+#define NVIF_CHAN906F_SEM_RELEASE_SIZE 5
+
+static int
+nvif_chan906f_sem_release(struct nvif_chan *chan, u64 addr, u32 data)
+{
+ struct nvif_push *push = &chan->push;
+ int ret;
+
+ ret = PUSH_WAIT(push, NVIF_CHAN906F_SEM_RELEASE_SIZE);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NV906F, SEMAPHOREA,
+ NVVAL(NV906F, SEMAPHOREA, OFFSET_UPPER, upper_32_bits(addr)),
+
+ SEMAPHOREB, lower_32_bits(addr),
+
+ SEMAPHOREC, data,
+
+ SEMAPHORED,
+ NVDEF(NV906F, SEMAPHORED, OPERATION, RELEASE) |
+ NVDEF(NV906F, SEMAPHORED, RELEASE_WFI, DIS) |
+ NVDEF(NV906F, SEMAPHORED, RELEASE_SIZE, 16BYTE));
+
+ return 0;
+}
+
+int
+nvif_chan906f_gpfifo_post(struct nvif_chan *chan, u32 gpptr, u32 pbptr)
+{
+ return chan->func->sem.release(chan, chan->sema.addr,
+ (gpptr << NVIF_CHAN906F_GPPTR_SHIFT) | pbptr);
+}
+
+u32
+nvif_chan906f_gpfifo_read_get(struct nvif_chan *chan)
+{
+ return nvif_rd32(&chan->sema, 0) >> NVIF_CHAN906F_GPPTR_SHIFT;
+}
+
+u32
+nvif_chan906f_read_get(struct nvif_chan *chan)
+{
+ return nvif_rd32(&chan->sema, 0) & NVIF_CHAN906F_PBPTR_MASK;
+}
+
+static const struct nvif_chan_func
+nvif_chan906f = {
+ .push.read_get = nvif_chan906f_read_get,
+ .gpfifo.read_get = nvif_chan906f_gpfifo_read_get,
+ .gpfifo.push = nvif_chan506f_gpfifo_push,
+ .gpfifo.kick = nvif_chan506f_gpfifo_kick,
+ .gpfifo.post = nvif_chan906f_gpfifo_post,
+ .gpfifo.post_size = NVIF_CHAN906F_SEM_RELEASE_SIZE,
+ .sem.release = nvif_chan906f_sem_release,
+};
+
+int
+nvif_chan906f_ctor_(const struct nvif_chan_func *func, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr,
+ struct nvif_chan *chan)
+{
+ nvif_chan_gpfifo_ctor(func, userd, gpfifo, gpfifo_size, push, push_addr, push_size, chan);
+ chan->sema.map.ptr = sema;
+ chan->sema.addr = sema_addr;
+ return 0;
+}
+
+int
+nvif_chan906f_ctor(struct nvif_chan *chan, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr)
+{
+ return nvif_chan906f_ctor_(&nvif_chan906f, userd, gpfifo, gpfifo_size,
+ push, push_addr, push_size, sema, sema_addr, chan);
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/chanc36f.c b/drivers/gpu/drm/nouveau/nvif/chanc36f.c
new file mode 100644
index 000000000000..ca02b939c3fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/chanc36f.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <nvif/chan.h>
+#include <nvif/user.h>
+
+#include <nvif/push906f.h>
+#include <nvhw/class/clc36f.h>
+
+static void
+nvif_chanc36f_gpfifo_kick(struct nvif_chan *chan)
+{
+ struct nvif_user *usermode = chan->usermode;
+
+ nvif_wr32(&chan->userd, 0x8c, chan->gpfifo.cur);
+
+ wmb(); /* ensure CPU writes are flushed to BAR1 */
+ nvif_rd32(&chan->userd, 0); /* ensure BAR1 writes are flushed to vidmem */
+
+ usermode->func->doorbell(usermode, chan->doorbell_token);
+}
+
+#define NVIF_CHANC36F_SEM_RELEASE_SIZE 6
+
+static int
+nvif_chanc36f_sem_release(struct nvif_chan *chan, u64 addr, u32 data)
+{
+ struct nvif_push *push = &chan->push;
+ int ret;
+
+ ret = PUSH_WAIT(push, NVIF_CHANC36F_SEM_RELEASE_SIZE);
+ if (ret)
+ return ret;
+
+ PUSH_MTHD(push, NVC36F, SEM_ADDR_LO, lower_32_bits(addr),
+
+ SEM_ADDR_HI, upper_32_bits(addr),
+
+ SEM_PAYLOAD_LO, data);
+
+ PUSH_MTHD(push, NVC36F, SEM_EXECUTE,
+ NVDEF(NVC36F, SEM_EXECUTE, OPERATION, RELEASE) |
+ NVDEF(NVC36F, SEM_EXECUTE, RELEASE_WFI, DIS) |
+ NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT) |
+ NVDEF(NVC36F, SEM_EXECUTE, RELEASE_TIMESTAMP, DIS));
+
+ return 0;
+}
+
+static const struct nvif_chan_func
+nvif_chanc36f = {
+ .push.read_get = nvif_chan906f_read_get,
+ .gpfifo.read_get = nvif_chan906f_gpfifo_read_get,
+ .gpfifo.push = nvif_chan506f_gpfifo_push,
+ .gpfifo.kick = nvif_chanc36f_gpfifo_kick,
+ .gpfifo.post = nvif_chan906f_gpfifo_post,
+ .gpfifo.post_size = NVIF_CHANC36F_SEM_RELEASE_SIZE,
+ .sem.release = nvif_chanc36f_sem_release,
+};
+
+int
+nvif_chanc36f_ctor(struct nvif_chan *chan, void *userd, void *gpfifo, u32 gpfifo_size,
+ void *push, u64 push_addr, u32 push_size, void *sema, u64 sema_addr,
+ struct nvif_user *usermode, u32 doorbell_token)
+{
+ int ret;
+
+ ret = nvif_chan906f_ctor_(&nvif_chanc36f, userd, gpfifo, gpfifo_size,
+ push, push_addr, push_size, sema, sema_addr, chan);
+ if (ret)
+ return ret;
+
+ chan->usermode = usermode;
+ chan->doorbell_token = doorbell_token;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/conn.c b/drivers/gpu/drm/nouveau/nvif/conn.c
index 9ee18cb99264..5a1a83c62a2a 100644
--- a/drivers/gpu/drm/nouveau/nvif/conn.c
+++ b/drivers/gpu/drm/nouveau/nvif/conn.c
@@ -30,17 +30,17 @@ int
nvif_conn_event_ctor(struct nvif_conn *conn, const char *name, nvif_event_func func, u8 types,
struct nvif_event *event)
{
- struct {
- struct nvif_event_v0 base;
- struct nvif_conn_event_v0 conn;
- } args;
+ DEFINE_RAW_FLEX(struct nvif_event_v0, args, data,
+ sizeof(struct nvif_conn_event_v0));
+ struct nvif_conn_event_v0 *args_conn =
+ (struct nvif_conn_event_v0 *)args->data;
int ret;
- args.conn.version = 0;
- args.conn.types = types;
+ args_conn->version = 0;
+ args_conn->types = types;
ret = nvif_event_ctor_(&conn->object, name ?: "nvifConnHpd", nvif_conn_id(conn),
- func, true, &args.base, sizeof(args), false, event);
+ func, true, args, __struct_size(args), false, event);
NVIF_DEBUG(&conn->object, "[NEW EVENT:HPD types:%02x]", types);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
index 14da22fa3b5b..fa42146252da 100644
--- a/drivers/gpu/drm/nouveau/nvif/disp.c
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -36,6 +36,7 @@ int
nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass, struct nvif_disp *disp)
{
static const struct nvif_mclass disps[] = {
+ { GB202_DISP, 0 },
{ AD102_DISP, 0 },
{ GA102_DISP, 0 },
{ TU102_DISP, 0 },
diff --git a/drivers/gpu/drm/nouveau/nvif/outp.c b/drivers/gpu/drm/nouveau/nvif/outp.c
index 6daeb7f0b09b..32f6c5eb92af 100644
--- a/drivers/gpu/drm/nouveau/nvif/outp.c
+++ b/drivers/gpu/drm/nouveau/nvif/outp.c
@@ -195,20 +195,17 @@ nvif_outp_dp_aux_pwr(struct nvif_outp *outp, bool enable)
int
nvif_outp_hda_eld(struct nvif_outp *outp, int head, void *data, u32 size)
{
- struct {
- struct nvif_outp_hda_eld_v0 mthd;
- u8 data[128];
- } args;
+ DEFINE_RAW_FLEX(struct nvif_outp_hda_eld_v0, mthd, data, 128);
int ret;
- if (WARN_ON(size > ARRAY_SIZE(args.data)))
+ if (WARN_ON(size > __member_size(mthd->data)))
return -EINVAL;
- args.mthd.version = 0;
- args.mthd.head = head;
+ mthd->version = 0;
+ mthd->head = head;
- memcpy(args.data, data, size);
- ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_HDA_ELD, &args, sizeof(args.mthd) + size);
+ memcpy(mthd->data, data, size);
+ ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_HDA_ELD, mthd, sizeof(*mthd) + size);
NVIF_ERRON(ret, &outp->object, "[HDA_ELD head:%d size:%d]", head, size);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/user.c b/drivers/gpu/drm/nouveau/nvif/user.c
index b648a5e036af..53f03fa1c9c2 100644
--- a/drivers/gpu/drm/nouveau/nvif/user.c
+++ b/drivers/gpu/drm/nouveau/nvif/user.c
@@ -41,9 +41,11 @@ nvif_user_ctor(struct nvif_device *device, const char *name)
int version;
const struct nvif_user_func *func;
} users[] = {
- { AMPERE_USERMODE_A, -1, &nvif_userc361 },
- { TURING_USERMODE_A, -1, &nvif_userc361 },
- { VOLTA_USERMODE_A, -1, &nvif_userc361 },
+ { BLACKWELL_USERMODE_A, -1, &nvif_userc361 },
+ { HOPPER_USERMODE_A, -1, &nvif_userc361 },
+ { AMPERE_USERMODE_A, -1, &nvif_userc361 },
+ { TURING_USERMODE_A, -1, &nvif_userc361 },
+ { VOLTA_USERMODE_A, -1, &nvif_userc361 },
{}
};
int cid, ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index 2e48b0816670..ddcf8782d6b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -17,8 +17,6 @@ include $(src)/nvkm/engine/msppp/Kbuild
include $(src)/nvkm/engine/msvld/Kbuild
include $(src)/nvkm/engine/nvenc/Kbuild
include $(src)/nvkm/engine/nvdec/Kbuild
-include $(src)/nvkm/engine/nvjpg/Kbuild
-include $(src)/nvkm/engine/ofa/Kbuild
include $(src)/nvkm/engine/sec/Kbuild
include $(src)/nvkm/engine/sec2/Kbuild
include $(src)/nvkm/engine/sw/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 165d61fc5d6c..9754bac65df7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -10,5 +10,4 @@ nvkm-y += nvkm/engine/ce/gv100.o
nvkm-y += nvkm/engine/ce/tu102.o
nvkm-y += nvkm/engine/ce/ga100.o
nvkm-y += nvkm/engine/ce/ga102.o
-
-nvkm-y += nvkm/engine/ce/r535.o
+nvkm-y += nvkm/engine/ce/gb202.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
index 9427a592bd16..1c0c60138706 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
@@ -90,7 +90,7 @@ ga100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_ce_new(&ga100_ce, device, type, inst, pengine);
+ return -ENODEV;
return nvkm_engine_new_(&ga100_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
index ce56ede7c2e9..9359c5e7aa3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
@@ -44,7 +44,7 @@ ga102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_ce_new(&ga102_ce, device, type, inst, pengine);
+ return -ENODEV;
return nvkm_engine_new_(&ga102_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c
new file mode 100644
index 000000000000..37c3c619c71b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gb202.c
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gb202/dev_ce.h>
+
+u32
+gb202_ce_grce_mask(struct nvkm_device *device)
+{
+ u32 data = nvkm_rd32(device, NV_CE_GRCE_MASK);
+
+ return NVVAL_GET(data, NV_CE, GRCE_MASK, VALUE);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
index 806a76a72249..34fd2657134b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -16,4 +16,6 @@ int ga100_ce_oneinit(struct nvkm_engine *);
int ga100_ce_init(struct nvkm_engine *);
int ga100_ce_fini(struct nvkm_engine *, bool);
int ga100_ce_nonstall(struct nvkm_engine *);
+
+u32 gb202_ce_grce_mask(struct nvkm_device *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c
deleted file mode 100644
index bd0d435dbbd3..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h>
-
-struct r535_ce_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_ce_obj_dtor(struct nvkm_object *object)
-{
- struct r535_ce_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_ce_obj = {
- .dtor = r535_ce_obj_dtor,
-};
-
-static int
-r535_ce_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_ce_obj *obj;
- NVC0B5_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_ce_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->version = 1;
- args->engineType = NV2080_ENGINE_TYPE_COPY0 + oclass->engine->subdev.inst;
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_ce_dtor(struct nvkm_engine *engine)
-{
- kfree(engine->func);
- return engine;
-}
-
-int
-r535_ce_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
-{
- struct nvkm_engine_func *rm;
- int nclass, ret;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_ce_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_ce_obj_ctor;
- }
-
- ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
- if (ret)
- kfree(rm);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
index 7c8647dcb349..67d0545cf902 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
@@ -40,7 +40,7 @@ tu102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_engine **pengine)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_ce_new(&tu102_ce, device, type, inst, pengine);
+ return -ENODEV;
return nvkm_engine_new_(&tu102_ce, device, type, inst, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 9093d89b16f3..3375a59ebf1a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2529,9 +2529,6 @@ nv170_chipset = {
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x000003ff, ga100_ce_new },
.fifo = { 0x00000001, ga100_fifo_new },
- .nvdec = { 0x0000001f, ga100_nvdec_new },
- .nvjpg = { 0x00000001, ga100_nvjpg_new },
- .ofa = { 0x00000001, ga100_ofa_new },
};
static const struct nvkm_device_chip
@@ -2561,8 +2558,6 @@ nv172_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2593,8 +2588,6 @@ nv173_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2625,8 +2618,6 @@ nv174_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2657,8 +2648,6 @@ nv176_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2689,12 +2678,26 @@ nv177_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
.gr = { 0x00000001, ga102_gr_new },
.nvdec = { 0x00000003, ga102_nvdec_new },
- .nvenc = { 0x00000001, ga102_nvenc_new },
- .ofa = { 0x00000001, ga102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
static const struct nvkm_device_chip
+nv180_chipset = {
+ .name = "GH100",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fault = { 0x00000001, tu102_fault_new },
+ .fb = { 0x00000001, gh100_fb_new },
+ .fsp = { 0x00000001, gh100_fsp_new },
+ .gsp = { 0x00000001, gh100_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
nv192_chipset = {
.name = "AD102",
.bar = { 0x00000001, tu102_bar_new },
@@ -2709,14 +2712,9 @@ nv192_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2735,14 +2733,9 @@ nv193_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2761,14 +2754,9 @@ nv194_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2787,14 +2775,9 @@ nv196_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
@@ -2813,17 +2796,122 @@ nv197_chipset = {
.timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x0000001f, ga102_ce_new },
- .disp = { 0x00000001, ad102_disp_new },
+ .disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
- .gr = { 0x00000001, ad102_gr_new },
- .nvdec = { 0x0000000f, ad102_nvdec_new },
- .nvenc = { 0x00000007, ad102_nvenc_new },
- .nvjpg = { 0x0000000f, ad102_nvjpg_new },
- .ofa = { 0x00000001, ad102_ofa_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
+static const struct nvkm_device_chip
+nv1a0_chipset = {
+ .name = "GB100",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb100_fb_new },
+ .fsp = { 0x00000001, gb100_fsp_new },
+ .gsp = { 0x00000001, gb100_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1a2_chipset = {
+ .name = "GB102",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb100_fb_new },
+ .fsp = { 0x00000001, gb100_fsp_new },
+ .gsp = { 0x00000001, gb100_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b2_chipset = {
+ .name = "GB202",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b3_chipset = {
+ .name = "GB203",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b5_chipset = {
+ .name = "GB205",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b6_chipset = {
+ .name = "GB206",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
+nv1b7_chipset = {
+ .name = "GB207",
+ .bar = { 0x00000001, tu102_bar_new },
+ .fb = { 0x00000001, gb202_fb_new },
+ .fsp = { 0x00000001, gb202_fsp_new },
+ .gsp = { 0x00000001, gb202_gsp_new },
+ .imem = { 0x00000001, gh100_instmem_new },
+ .mmu = { 0x00000001, gh100_mmu_new },
+ .pci = { 0x00000001, gh100_pci_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .vfn = { 0x00000001, ga100_vfn_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
struct nvkm_subdev *
nvkm_device_subdev(struct nvkm_device *device, int type, int inst)
{
@@ -3065,8 +3153,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
device->debug = nvkm_dbgopt(device->dbgopt, "device");
INIT_LIST_HEAD(&device->subdev);
- mmio_base = device->func->resource_addr(device, 0);
- mmio_size = device->func->resource_size(device, 0);
+ mmio_base = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ mmio_size = device->func->resource_size(device, NVKM_BAR0_PRI);
device->pri = ioremap(mmio_base, mmio_size);
if (device->pri == NULL) {
@@ -3139,7 +3227,10 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x140: device->card_type = GV100; break;
case 0x160: device->card_type = TU100; break;
case 0x170: device->card_type = GA100; break;
+ case 0x180: device->card_type = GH100; break;
case 0x190: device->card_type = AD100; break;
+ case 0x1a0: device->card_type = GB10x; break;
+ case 0x1b0: device->card_type = GB20x; break;
default:
break;
}
@@ -3242,11 +3333,19 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x174: device->chip = &nv174_chipset; break;
case 0x176: device->chip = &nv176_chipset; break;
case 0x177: device->chip = &nv177_chipset; break;
+ case 0x180: device->chip = &nv180_chipset; break;
case 0x192: device->chip = &nv192_chipset; break;
case 0x193: device->chip = &nv193_chipset; break;
case 0x194: device->chip = &nv194_chipset; break;
case 0x196: device->chip = &nv196_chipset; break;
case 0x197: device->chip = &nv197_chipset; break;
+ case 0x1a0: device->chip = &nv1a0_chipset; break;
+ case 0x1a2: device->chip = &nv1a2_chipset; break;
+ case 0x1b2: device->chip = &nv1b2_chipset; break;
+ case 0x1b3: device->chip = &nv1b3_chipset; break;
+ case 0x1b5: device->chip = &nv1b5_chipset; break;
+ case 0x1b6: device->chip = &nv1b6_chipset; break;
+ case 0x1b7: device->chip = &nv1b7_chipset; break;
default:
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
switch (device->chipset) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 3ff6436007fa..8f0261a0d618 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1560,18 +1560,42 @@ nvkm_device_pci(struct nvkm_device *device)
return container_of(device, struct nvkm_device_pci, device);
}
+static int
+nvkm_device_pci_resource_idx(struct nvkm_device_pci *pdev, enum nvkm_bar_id bar)
+{
+ int idx = 0;
+
+ if (bar == NVKM_BAR0_PRI)
+ return idx;
+
+ idx += (pci_resource_flags(pdev->pdev, idx) & IORESOURCE_MEM_64) ? 2 : 1;
+ if (bar == NVKM_BAR1_FB)
+ return idx;
+
+ idx += (pci_resource_flags(pdev->pdev, idx) & IORESOURCE_MEM_64) ? 2 : 1;
+ if (bar == NVKM_BAR2_INST)
+ return idx;
+
+ WARN_ON(1);
+ return -1;
+}
+
static resource_size_t
-nvkm_device_pci_resource_addr(struct nvkm_device *device, unsigned bar)
+nvkm_device_pci_resource_addr(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
- return pci_resource_start(pdev->pdev, bar);
+ int idx = nvkm_device_pci_resource_idx(pdev, bar);
+
+ return idx >= 0 ? pci_resource_start(pdev->pdev, idx) : 0;
}
static resource_size_t
-nvkm_device_pci_resource_size(struct nvkm_device *device, unsigned bar)
+nvkm_device_pci_resource_size(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
- return pci_resource_len(pdev->pdev, bar);
+ int idx = nvkm_device_pci_resource_idx(pdev, bar);
+
+ return idx >= 0 ? pci_resource_len(pdev->pdev, idx) : 0;
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index e42b18820a95..75ee7506d443 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -11,6 +11,7 @@
#include <subdev/devinit.h>
#include <subdev/fault.h>
#include <subdev/fb.h>
+#include <subdev/fsp.h>
#include <subdev/fuse.h>
#include <subdev/gpio.h>
#include <subdev/gsp.h>
@@ -43,8 +44,6 @@
#include <engine/msvld.h>
#include <engine/nvenc.h>
#include <engine/nvdec.h>
-#include <engine/nvjpg.h>
-#include <engine/ofa.h>
#include <engine/sec.h>
#include <engine/sec2.h>
#include <engine/sw.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 78a83f904bbd..114e50ca1827 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -186,21 +186,31 @@ nvkm_device_tegra(struct nvkm_device *device)
}
static struct resource *
-nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
+nvkm_device_tegra_resource(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
- return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
+ int idx;
+
+ switch (bar) {
+ case NVKM_BAR0_PRI: idx = 0; break;
+ case NVKM_BAR1_FB : idx = 1; break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return platform_get_resource(tdev->pdev, IORESOURCE_MEM, idx);
}
static resource_size_t
-nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
+nvkm_device_tegra_resource_addr(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct resource *res = nvkm_device_tegra_resource(device, bar);
return res ? res->start : 0;
}
static resource_size_t
-nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
+nvkm_device_tegra_resource_size(struct nvkm_device *device, enum nvkm_bar_id bar)
{
struct resource *res = nvkm_device_tegra_resource(device, bar);
return res ? resource_size(res) : 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index d7f75b3a43c8..58191b7a0494 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -148,6 +148,9 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
case AD100: args->v0.family = NV_DEVICE_INFO_V0_ADA; break;
+ case GH100: args->v0.family = NV_DEVICE_INFO_V0_HOPPER; break;
+ case GB10x: args->v0.family = NV_DEVICE_INFO_V0_BLACKWELL; break;
+ case GB20x: args->v0.family = NV_DEVICE_INFO_V0_BLACKWELL; break;
default:
args->v0.family = 0;
break;
@@ -209,8 +212,8 @@ nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_udevice *udev = nvkm_udevice(object);
struct nvkm_device *device = udev->device;
*type = NVKM_OBJECT_MAP_IO;
- *addr = device->func->resource_addr(device, 0);
- *size = device->func->resource_size(device, 0);
+ *addr = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ *size = device->func->resource_size(device, NVKM_BAR0_PRI);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index e346e924fee8..e1aecd3fe96c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -27,9 +27,6 @@ nvkm-y += nvkm/engine/disp/gp102.o
nvkm-y += nvkm/engine/disp/gv100.o
nvkm-y += nvkm/engine/disp/tu102.o
nvkm-y += nvkm/engine/disp/ga102.o
-nvkm-y += nvkm/engine/disp/ad102.o
-
-nvkm-y += nvkm/engine/disp/r535.o
nvkm-y += nvkm/engine/disp/udisp.o
nvkm-y += nvkm/engine/disp/uconn.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c
deleted file mode 100644
index 7f300a79aa29..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-#include "chan.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_disp_func
-ad102_disp = {
- .uevent = &gv100_disp_chan_uevent,
- .ramht_size = 0x2000,
- .root = { 0, 0,AD102_DISP },
- .user = {
- {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
- {{ 0, 0,GA102_DISP_CURSOR }, nvkm_disp_chan_new, &gv100_disp_curs },
- {{ 0, 0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, nvkm_disp_wndw_new, &gv100_disp_wimm },
- {{ 0, 0,AD102_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gv100_disp_core },
- {{ 0, 0,GA102_DISP_WINDOW_CHANNEL_DMA }, nvkm_disp_wndw_new, &gv100_disp_wndw },
- {}
- },
-};
-
-int
-ad102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_disp **pdisp)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_disp_new(&ad102_disp, device, type, inst, pdisp);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
index 4e43ee383c34..9b84e357d354 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
@@ -49,7 +49,7 @@ nvkm_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc,
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
struct nvkm_device *device = chan->disp->engine.subdev.device;
- const u64 base = device->func->resource_addr(device, 0);
+ const u64 base = device->func->resource_addr(device, NVKM_BAR0_PRI);
*type = NVKM_OBJECT_MAP_IO;
*addr = base + chan->func->user(chan, size);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index cfa3698d3a2f..614921166fba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -805,7 +805,7 @@ gv100_disp_caps_map(struct nvkm_object *object, void *argv, u32 argc,
struct gv100_disp_caps *caps = gv100_disp_caps(object);
struct nvkm_device *device = caps->disp->engine.subdev.device;
*type = NVKM_OBJECT_MAP_IO;
- *addr = 0x640000 + device->func->resource_addr(device, 0);
+ *addr = 0x640000 + device->func->resource_addr(device, NVKM_BAR0_PRI);
*size = 0x1000;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index aff92848abfe..376e9c3bcb1a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -25,8 +25,7 @@ nvkm-y += nvkm/engine/fifo/gv100.o
nvkm-y += nvkm/engine/fifo/tu102.o
nvkm-y += nvkm/engine/fifo/ga100.o
nvkm-y += nvkm/engine/fifo/ga102.o
-
-nvkm-y += nvkm/engine/fifo/r535.o
+nvkm-y += nvkm/engine/fifo/gb202.o
nvkm-y += nvkm/engine/fifo/ucgrp.o
nvkm-y += nvkm/engine/fifo/uchan.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 22443fe4a39f..fdffa0391b31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -303,7 +303,7 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine)
}
/* Allocate USERD + BAR1 polling area. */
- if (fifo->func->chan.func->userd->bar == 1) {
+ if (fifo->func->chan.func->userd->bar == NVKM_BAR1_FB) {
struct nvkm_vmm *bar1 = nvkm_bar_bar1_vmm(device);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, fifo->chid->nr *
@@ -349,8 +349,6 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
nvkm_chid_unref(&fifo->cgid);
nvkm_chid_unref(&fifo->chid);
- mutex_destroy(&fifo->userd.mutex);
-
nvkm_event_fini(&fifo->nonstall.event);
mutex_destroy(&fifo->mutex);
@@ -391,8 +389,5 @@ nvkm_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
spin_lock_init(&fifo->lock);
mutex_init(&fifo->mutex);
- INIT_LIST_HEAD(&fifo->userd.list);
- mutex_init(&fifo->userd.mutex);
-
return nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index 7d4716dcd512..4e09985424b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -275,11 +275,7 @@ nvkm_chan_del(struct nvkm_chan **pchan)
nvkm_gpuobj_del(&chan->ramfc);
if (chan->cgrp) {
- if (!chan->func->id_put)
- nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
- else
- chan->func->id_put(chan);
-
+ nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
nvkm_cgrp_unref(&chan->cgrp);
}
@@ -359,14 +355,14 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru
/* Validate arguments against class requirements. */
if ((runq && runq >= runl->func->runqs) ||
(!func->inst->vmm != !vmm) ||
- ((func->userd->bar < 0) == !userd) ||
+ (!func->userd->bar == !userd) ||
(!func->ramfc->ctxdma != !dmaobj) ||
((func->ramfc->devm < devm) && devm != BIT(0)) ||
(!func->ramfc->priv && priv)) {
RUNL_DEBUG(runl, "args runq:%d:%d vmm:%d:%p userd:%d:%p "
"push:%d:%p devm:%08x:%08x priv:%d:%d",
runl->func->runqs, runq, func->inst->vmm, vmm,
- func->userd->bar < 0, userd, func->ramfc->ctxdma, dmaobj,
+ func->userd->bar, userd, func->ramfc->ctxdma, dmaobj,
func->ramfc->devm, devm, func->ramfc->priv, priv);
return -EINVAL;
}
@@ -441,30 +437,26 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru
}
/* Allocate channel ID. */
- if (!chan->func->id_get) {
- chan->id = nvkm_chid_get(runl->chid, chan);
- if (chan->id >= 0) {
- if (func->userd->bar < 0) {
- if (ouserd + chan->func->userd->size >=
- nvkm_memory_size(userd)) {
- RUNL_DEBUG(runl, "ouserd %llx", ouserd);
- return -EINVAL;
- }
-
- ret = nvkm_memory_kmap(userd, &chan->userd.mem);
- if (ret) {
- RUNL_DEBUG(runl, "userd %d", ret);
- return ret;
- }
-
- chan->userd.base = ouserd;
- } else {
- chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
- chan->userd.base = chan->id * chan->func->userd->size;
+ chan->id = nvkm_chid_get(runl->chid, chan);
+ if (chan->id >= 0) {
+ if (!func->userd->bar) {
+ if (ouserd + chan->func->userd->size >=
+ nvkm_memory_size(userd)) {
+ RUNL_DEBUG(runl, "ouserd %llx", ouserd);
+ return -EINVAL;
+ }
+
+ ret = nvkm_memory_kmap(userd, &chan->userd.mem);
+ if (ret) {
+ RUNL_DEBUG(runl, "userd %d", ret);
+ return ret;
}
+
+ chan->userd.base = ouserd;
+ } else {
+ chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
+ chan->userd.base = chan->id * chan->func->userd->size;
}
- } else {
- chan->id = chan->func->id_get(chan, userd, ouserd);
}
if (chan->id < 0) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
index 013682a709d5..445db5dfd1e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -17,9 +17,6 @@ struct nvkm_cctx {
};
struct nvkm_chan_func {
- int (*id_get)(struct nvkm_chan *, struct nvkm_memory *userd, u64 ouserd);
- void (*id_put)(struct nvkm_chan *);
-
const struct nvkm_chan_func_inst {
u32 size;
bool zero;
@@ -27,7 +24,7 @@ struct nvkm_chan_func {
} *inst;
const struct nvkm_chan_func_userd {
- int bar;
+ enum nvkm_bar_id bar;
u32 base;
u32 size;
void (*clear)(struct nvkm_chan *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c
new file mode 100644
index 000000000000..b469e8afeb0b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gb202.c
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "runl.h"
+
+u32
+gb202_chan_doorbell_handle(struct nvkm_chan *chan)
+{
+ return BIT(30) | (chan->cgrp->runl->id << 16) | chan->id;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index 6c94451d0faa..e4a4fad2eafc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -133,7 +133,7 @@ gf100_chan_userd_clear(struct nvkm_chan *chan)
static const struct nvkm_chan_func_userd
gf100_chan_userd = {
- .bar = 1,
+ .bar = NVKM_BAR1_FB,
.size = 0x1000,
.clear = gf100_chan_userd_clear,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index d8a4d773a58c..5655eda52a7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -113,7 +113,7 @@ gk104_chan_ramfc = {
const struct nvkm_chan_func_userd
gk104_chan_userd = {
- .bar = 1,
+ .bar = NVKM_BAR1_FB,
.size = 0x200,
.clear = gf100_chan_userd_clear,
};
@@ -745,7 +745,7 @@ gk104_fifo_init(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
- if (fifo->func->chan.func->userd->bar == 1)
+ if (fifo->func->chan.func->userd->bar == NVKM_BAR1_FB)
nvkm_wr32(device, 0x002254, 0x10000000 | fifo->userd.bar1->addr >> 12);
nvkm_wr32(device, 0x002100, 0xffffffff);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
index 33066c8cdc64..d7f046c03cfd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
@@ -70,7 +70,6 @@ gv100_chan_ramfc = {
const struct nvkm_chan_func_userd
gv100_chan_userd = {
- .bar = -1,
.size = 0x200,
.clear = gf100_chan_userd_clear,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index 674faf002b20..c4b8e567d86f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -154,7 +154,7 @@ nv04_chan_ramfc = {
const struct nvkm_chan_func_userd
nv04_chan_userd = {
- .bar = 0,
+ .bar = NVKM_BAR0_PRI,
.base = 0x800000,
.size = 0x010000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
index e50a94b6d7f8..084ca5561ee1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -93,7 +93,7 @@ nv40_chan_ramfc = {
static const struct nvkm_chan_func_userd
nv40_chan_userd = {
- .bar = 0,
+ .bar = NVKM_BAR0_PRI,
.base = 0xc00000,
.size = 0x001000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index 954b5f3a7d57..7bf77661157d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -124,7 +124,7 @@ nv50_chan_ramfc = {
const struct nvkm_chan_func_userd
nv50_chan_userd = {
- .bar = 0,
+ .bar = NVKM_BAR0_PRI,
.base = 0xc00000,
.size = 0x002000,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index a0f3277605a5..5e81ae195329 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -6,6 +6,7 @@
#include <core/enum.h>
struct nvkm_cctx;
struct nvkm_cgrp;
+struct nvkm_chan;
struct nvkm_engn;
struct nvkm_memory;
struct nvkm_runl;
@@ -195,6 +196,7 @@ extern const struct nvkm_chan_func_ramfc gv100_chan_ramfc;
void tu102_fifo_intr_ctxsw_timeout_info(struct nvkm_engn *, u32 info);
extern const struct nvkm_fifo_func_mmu_fault tu102_fifo_mmu_fault;
+u32 tu102_chan_doorbell_handle(struct nvkm_chan *);
int ga100_fifo_runl_ctor(struct nvkm_fifo *);
int ga100_fifo_nonstall_ctor(struct nvkm_fifo *);
@@ -206,6 +208,8 @@ extern const struct nvkm_engn_func ga100_engn_ce;
extern const struct nvkm_cgrp_func ga100_cgrp;
extern const struct nvkm_chan_func ga100_chan;
+u32 gb202_chan_doorbell_handle(struct nvkm_chan *);
+
int nvkm_uchan_new(struct nvkm_fifo *, struct nvkm_cgrp *, const struct nvkm_oclass *,
void *argv, u32 argc, struct nvkm_object **);
int nvkm_ucgrp_new(struct nvkm_fifo *, const struct nvkm_oclass *, void *argv, u32 argc,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
index 1d39a6840a40..c5a03298e88c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
@@ -31,7 +31,7 @@
#include <nvif/class.h>
-static u32
+u32
tu102_chan_doorbell_handle(struct nvkm_chan *chan)
{
return (chan->cgrp->runl->id << 16) | chan->id;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
index 9e56bcc166ed..52420a1edca5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
@@ -258,7 +258,7 @@ nvkm_uchan_map(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_chan *chan = nvkm_uchan(object)->chan;
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
- if (chan->func->userd->bar < 0)
+ if (!chan->func->userd->bar)
return -ENOSYS;
*type = NVKM_OBJECT_MAP_IO;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 1555f8c40b4f..b5418f05ccd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -41,9 +41,6 @@ nvkm-y += nvkm/engine/gr/gp10b.o
nvkm-y += nvkm/engine/gr/gv100.o
nvkm-y += nvkm/engine/gr/tu102.o
nvkm-y += nvkm/engine/gr/ga102.o
-nvkm-y += nvkm/engine/gr/ad102.o
-
-nvkm-y += nvkm/engine/gr/r535.o
nvkm-y += nvkm/engine/gr/ctxnv40.o
nvkm-y += nvkm/engine/gr/ctxnv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
index d285c597aff9..2b51f1d0c281 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
@@ -352,7 +352,7 @@ int
ga102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_gr_new(&ga102_gr, device, type, inst, pgr);
+ return -ENODEV;
return gf100_gr_new_(ga102_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index b0e0c9305034..54f686ba39ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -445,6 +445,4 @@ void gp108_gr_acr_bld_patch(struct nvkm_acr *, u32, s64);
int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gr **);
-int r535_gr_new(const struct gf100_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
- struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index 02a8c62a0a32..13407fafe947 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -297,7 +297,7 @@ nv20_gr_init(struct nvkm_gr *base)
nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
/* begin RAM config */
- vramsz = device->func->resource_size(device, 1) - 1;
+ vramsz = device->func->resource_size(device, NVKM_BAR1_FB) - 1;
nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index a5e1f02791b4..b609b0150ba1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -386,7 +386,7 @@ nv40_gr_init(struct nvkm_gr *base)
}
/* begin RAM config */
- vramsz = device->func->resource_size(device, 1) - 1;
+ vramsz = device->func->resource_size(device, NVKM_BAR1_FB) - 1;
switch (device->chipset) {
case 0x40:
nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c
deleted file mode 100644
index f4bed3eb1ec2..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c
+++ /dev/null
@@ -1,508 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "gf100.h"
-
-#include <core/memory.h>
-#include <subdev/gsp.h>
-#include <subdev/mmu/vmm.h>
-#include <engine/fifo/priv.h>
-
-#include <nvif/if900d.h>
-
-#include <nvhw/drf.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
-
-#define r535_gr(p) container_of((p), struct r535_gr, base)
-
-#define R515_GR_MAX_CTXBUFS 9
-
-struct r535_gr {
- struct nvkm_gr base;
-
- struct {
- u16 bufferId;
- u32 size;
- u8 page;
- u8 align;
- bool global;
- bool init;
- bool ro;
- } ctxbuf[R515_GR_MAX_CTXBUFS];
- int ctxbuf_nr;
-
- struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS];
-};
-
-struct r535_gr_chan {
- struct nvkm_object object;
- struct r535_gr *gr;
-
- struct nvkm_vmm *vmm;
- struct nvkm_chan *chan;
-
- struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
- struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
-};
-
-struct r535_gr_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_gr_obj_dtor(struct nvkm_object *object)
-{
- struct r535_gr_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_gr_obj = {
- .dtor = r535_gr_obj_dtor,
-};
-
-static int
-r535_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object);
- struct r535_gr_obj *obj;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_gr_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- return nvkm_gsp_rm_alloc(&chan->chan->rm.object, oclass->handle, oclass->base.oclass, 0,
- &obj->rm);
-}
-
-static void *
-r535_gr_chan_dtor(struct nvkm_object *object)
-{
- struct r535_gr_chan *grc = container_of(object, typeof(*grc), object);
- struct r535_gr *gr = grc->gr;
-
- for (int i = 0; i < gr->ctxbuf_nr; i++) {
- nvkm_vmm_put(grc->vmm, &grc->vma[i]);
- nvkm_memory_unref(&grc->mem[i]);
- }
-
- nvkm_vmm_unref(&grc->vmm);
- return grc;
-}
-
-static const struct nvkm_object_func
-r535_gr_chan = {
- .dtor = r535_gr_chan_dtor,
-};
-
-static int
-r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm,
- struct nvkm_memory **pmem, struct nvkm_vma **pvma,
- struct nvkm_gsp_object *chan)
-{
- struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
-
- ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice,
- NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
- if (WARN_ON(IS_ERR(ctrl)))
- return PTR_ERR(ctrl);
-
- ctrl->engineType = 1;
- ctrl->hChanClient = vmm->rm.client.object.handle;
- ctrl->hObject = chan->handle;
-
- for (int i = 0; i < gr->ctxbuf_nr; i++) {
- NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry =
- &ctrl->promoteEntry[ctrl->entryCount];
- const bool alloc = golden || !gr->ctxbuf[i].global;
- int ret;
-
- entry->bufferId = gr->ctxbuf[i].bufferId;
- entry->bInitialize = gr->ctxbuf[i].init && alloc;
-
- if (alloc) {
- ret = nvkm_memory_new(device, gr->ctxbuf[i].init ?
- NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST,
- gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page,
- gr->ctxbuf[i].init, &pmem[i]);
- if (WARN_ON(ret))
- return ret;
-
- if (gr->ctxbuf[i].bufferId ==
- NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP)
- entry->bNonmapped = 1;
- } else {
- if (gr->ctxbuf[i].bufferId ==
- NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP)
- continue;
-
- pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]);
- }
-
- if (!entry->bNonmapped) {
- struct gf100_vmm_map_v0 args = {
- .priv = 1,
- .ro = gr->ctxbuf[i].ro,
- };
-
- mutex_lock(&vmm->mutex.vmm);
- ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align,
- nvkm_memory_size(pmem[i]), &pvma[i]);
- mutex_unlock(&vmm->mutex.vmm);
- if (ret)
- return ret;
-
- ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args));
- if (ret)
- return ret;
-
- entry->gpuVirtAddr = pvma[i]->addr;
- }
-
- if (entry->bInitialize) {
- entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]);
- entry->size = gr->ctxbuf[i].size;
- entry->physAttr = 4;
- }
-
- nvkm_debug(subdev,
- "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n",
- entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size,
- entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped);
-
- ctrl->entryCount++;
- }
-
- return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl);
-}
-
-static int
-r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
- struct nvkm_object **pobject)
-{
- struct r535_gr *gr = r535_gr(base);
- struct r535_gr_chan *grc;
- int ret;
-
- if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object);
- grc->gr = gr;
- grc->vmm = nvkm_vmm_ref(chan->vmm);
- grc->chan = chan;
- *pobject = &grc->object;
-
- ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static u64
-r535_gr_units(struct nvkm_gr *gr)
-{
- struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp;
-
- return (gsp->gr.tpcs << 8) | gsp->gr.gpcs;
-}
-
-static int
-r535_gr_oneinit(struct nvkm_gr *base)
-{
- NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
- struct r535_gr *gr = container_of(base, typeof(*gr), base);
- struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_gsp *gsp = device->gsp;
- struct nvkm_mmu *mmu = device->mmu;
- struct {
- struct nvkm_memory *inst;
- struct nvkm_vmm *vmm;
- struct nvkm_gsp_object chan;
- struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
- } golden = {};
- int ret;
-
- /* Allocate a channel to use for golden context init. */
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst);
- if (ret)
- goto done;
-
- ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm);
- if (ret)
- goto done;
-
- ret = mmu->func->promote_vmm(golden.vmm);
- if (ret)
- goto done;
-
- {
- NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
-
- args = nvkm_gsp_rm_alloc_get(&golden.vmm->rm.device.object, 0xf1f00000,
- device->fifo->func->chan.user.oclass,
- sizeof(*args), &golden.chan);
- if (IS_ERR(args)) {
- ret = PTR_ERR(args);
- goto done;
- }
-
- args->gpFifoOffset = 0;
- args->gpFifoEntries = 0x1000 / 8;
- args->flags =
- NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL) |
- NVDEF(NVOS04, FLAGS, VPR, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE) |
- NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, 0) |
- NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE) |
- NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE) |
- NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, 0) |
- NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE) |
- NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, 0) |
- NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE) |
- NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE) |
- NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE) |
- NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT) |
- NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE) |
- NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
- args->hVASpace = golden.vmm->rm.object.handle;
- args->engineType = 1;
- args->instanceMem.base = nvkm_memory_addr(golden.inst);
- args->instanceMem.size = 0x1000;
- args->instanceMem.addressSpace = 2;
- args->instanceMem.cacheAttrib = 1;
- args->ramfcMem.base = nvkm_memory_addr(golden.inst);
- args->ramfcMem.size = 0x200;
- args->ramfcMem.addressSpace = 2;
- args->ramfcMem.cacheAttrib = 1;
- args->userdMem.base = nvkm_memory_addr(golden.inst) + 0x1000;
- args->userdMem.size = 0x200;
- args->userdMem.addressSpace = 2;
- args->userdMem.cacheAttrib = 1;
- args->mthdbufMem.base = nvkm_memory_addr(golden.inst) + 0x2000;
- args->mthdbufMem.size = 0x5000;
- args->mthdbufMem.addressSpace = 2;
- args->mthdbufMem.cacheAttrib = 1;
- args->internalFlags =
- NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN) |
- NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE) |
- NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
-
- ret = nvkm_gsp_rm_alloc_wr(&golden.chan, args);
- if (ret)
- goto done;
- }
-
- /* Fetch context buffer info from RM and allocate each of them here to use
- * during golden context init (or later as a global context buffer).
- *
- * Also build the information that'll be used to create channel contexts.
- */
- info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
- NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
- sizeof(*info));
- if (WARN_ON(IS_ERR(info))) {
- ret = PTR_ERR(info);
- goto done;
- }
-
- for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) {
- static const struct {
- u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */
- u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */
- bool global;
- bool init;
- bool ro;
- } map[] = {
-#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \
- .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \
- .global = (G), .init = (I), .ro = (R) }
-#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R))
- /* global init ro */
- _A( GRAPHICS, MAIN, false, true, false),
- _B( PATCH, false, true, false),
- _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false),
- _B( PAGEPOOL, true, false, false),
- _B( ATTRIBUTE_CB, true, false, false),
- _B( RTV_CB_GLOBAL, true, false, false),
- _B( FECS_EVENT, true, true, false),
- _B( PRIV_ACCESS_MAP, true, true, true),
-#undef _B
-#undef _A
- };
- u32 size = info->engineContextBuffersInfo[0].engine[i].size;
- u8 align, page;
- int id;
-
- for (id = 0; id < ARRAY_SIZE(map); id++) {
- if (map[id].id0 == i)
- break;
- }
-
- nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i,
- size, (id < ARRAY_SIZE(map)) ? "*" : "");
- if (id >= ARRAY_SIZE(map))
- continue;
-
- if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN)
- size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */
-
- if (size >= 1 << 21) page = 21;
- else if (size >= 1 << 16) page = 16;
- else page = 12;
-
- if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB)
- align = order_base_2(size);
- else
- align = page;
-
- if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
- continue;
-
- gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1;
- gr->ctxbuf[gr->ctxbuf_nr].size = size;
- gr->ctxbuf[gr->ctxbuf_nr].page = page;
- gr->ctxbuf[gr->ctxbuf_nr].align = align;
- gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global;
- gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init;
- gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro;
- gr->ctxbuf_nr++;
-
- if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) {
- if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
- continue;
-
- gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1];
- gr->ctxbuf[gr->ctxbuf_nr].bufferId =
- NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP;
- gr->ctxbuf_nr++;
- }
- }
-
- nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
-
- /* Promote golden context to RM. */
- ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan);
- if (ret)
- goto done;
-
- /* Allocate 3D class on channel to trigger golden context init in RM. */
- {
- int i;
-
- for (i = 0; gr->base.func->sclass[i].ctor; i++) {
- if ((gr->base.func->sclass[i].oclass & 0xff) == 0x97) {
- struct nvkm_gsp_object threed;
-
- ret = nvkm_gsp_rm_alloc(&golden.chan, 0x97000000,
- gr->base.func->sclass[i].oclass, 0,
- &threed);
- if (ret)
- goto done;
-
- nvkm_gsp_rm_free(&threed);
- break;
- }
- }
-
- if (WARN_ON(!gr->base.func->sclass[i].ctor)) {
- ret = -EINVAL;
- goto done;
- }
- }
-
-done:
- nvkm_gsp_rm_free(&golden.chan);
- for (int i = gr->ctxbuf_nr - 1; i >= 0; i--)
- nvkm_vmm_put(golden.vmm, &golden.vma[i]);
- nvkm_vmm_unref(&golden.vmm);
- nvkm_memory_unref(&golden.inst);
- return ret;
-
-}
-
-static void *
-r535_gr_dtor(struct nvkm_gr *base)
-{
- struct r535_gr *gr = r535_gr(base);
-
- while (gr->ctxbuf_nr)
- nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]);
-
- kfree(gr->base.func);
- return gr;
-}
-
-int
-r535_gr_new(const struct gf100_gr_func *hw,
- struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
-{
- struct nvkm_gr_func *rm;
- struct r535_gr *gr;
- int nclass;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_gr_dtor;
- rm->oneinit = r535_gr_oneinit;
- rm->units = r535_gr_units;
- rm->chan_new = r535_gr_chan_new;
-
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_gr_obj_ctor;
- }
-
- if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) {
- kfree(rm);
- return -ENOMEM;
- }
-
- *pgr = &gr->base;
-
- return nvkm_gr_ctor(rm, device, type, inst, true, &gr->base);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
index b7a458e9040a..bda8054c6b59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -219,7 +219,7 @@ int
tu102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_gr_new(&tu102_gr, device, type, inst, pgr);
+ return -ENODEV;
return gf100_gr_new_(tu102_gr_fwif, device, type, inst, pgr);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
index 2b0e923cb755..37b0cdc760c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
@@ -2,8 +2,4 @@
nvkm-y += nvkm/engine/nvdec/base.o
nvkm-y += nvkm/engine/nvdec/gm107.o
nvkm-y += nvkm/engine/nvdec/tu102.o
-nvkm-y += nvkm/engine/nvdec/ga100.o
nvkm-y += nvkm/engine/nvdec/ga102.o
-nvkm-y += nvkm/engine/nvdec/ad102.o
-
-nvkm-y += nvkm/engine/nvdec/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
index 022a9c824304..eea6368adae2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
@@ -23,16 +23,6 @@
#include <subdev/gsp.h>
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga102_nvdec_gsp = {
- .sclass = {
- { -1, -1, NVC7B0_VIDEO_DECODER },
- {}
- }
-};
-
static const struct nvkm_falcon_func
ga102_nvdec_flcn = {
.disable = gm200_flcn_disable,
@@ -67,7 +57,7 @@ ga102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst
struct nvkm_nvdec **pnvdec)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&ga102_nvdec_gsp, device, type, inst, pnvdec);
+ return -ENODEV;
return nvkm_nvdec_new_(ga102_nvdec_fwif, device, type, inst, 0x848000, pnvdec);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
index f506ae83bfd7..f8d43e913093 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -20,7 +20,4 @@ extern const struct nvkm_nvdec_fwif gm107_nvdec_fwif[];
int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *,
enum nvkm_subdev_type, int, u32 addr, struct nvkm_nvdec **);
-
-int r535_nvdec_new(const struct nvkm_engine_func *, struct nvkm_device *,
- enum nvkm_subdev_type, int, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c
deleted file mode 100644
index 75a24f3e6617..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_nvdec_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_nvdec_obj_dtor(struct nvkm_object *object)
-{
- struct r535_nvdec_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_nvdec_obj = {
- .dtor = r535_nvdec_obj_dtor,
-};
-
-static int
-r535_nvdec_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_nvdec_obj *obj;
- NV_BSP_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_nvdec_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->size = sizeof(*args);
- args->engineInstance = oclass->engine->subdev.inst;
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_nvdec_dtor(struct nvkm_engine *engine)
-{
- struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
-
- kfree(nvdec->engine.func);
- return nvdec;
-}
-
-int
-r535_nvdec_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec)
-{
- struct nvkm_engine_func *rm;
- int nclass;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_nvdec_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_nvdec_obj_ctor;
- }
-
- if (!(*pnvdec = kzalloc(sizeof(**pnvdec), GFP_KERNEL))) {
- kfree(rm);
- return -ENOMEM;
- }
-
- return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvdec)->engine);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c
index 808c8e010b9e..fe95b6e22f21 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c
@@ -23,22 +23,12 @@
#include <subdev/gsp.h>
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-tu102_nvdec = {
- .sclass = {
- { -1, -1, NVC4B0_VIDEO_DECODER },
- {}
- }
-};
-
int
tu102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_nvdec **pnvdec)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&tu102_nvdec, device, type, inst, pnvdec);
+ return -ENODEV;
return nvkm_nvdec_new_(gm107_nvdec_fwif, device, type, inst, 0, pnvdec);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
index 2c1495b730f3..6dcb20d1d156 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
@@ -2,7 +2,3 @@
nvkm-y += nvkm/engine/nvenc/base.o
nvkm-y += nvkm/engine/nvenc/gm107.o
nvkm-y += nvkm/engine/nvenc/tu102.o
-nvkm-y += nvkm/engine/nvenc/ga102.o
-nvkm-y += nvkm/engine/nvenc/ad102.o
-
-nvkm-y += nvkm/engine/nvenc/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c
deleted file mode 100644
index 6463ab8e5871..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga102_nvenc = {
- .sclass = {
- { -1, -1, NVC7B7_VIDEO_ENCODER },
- {}
- }
-};
-
-int
-ga102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvenc **pnvenc)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvenc_new(&ga102_nvenc, device, type, inst, pnvenc);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
index 7917affc6505..b097e3f2867b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
@@ -18,7 +18,4 @@ extern const struct nvkm_nvenc_fwif gm107_nvenc_fwif[];
int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, enum nvkm_subdev_type,
int, struct nvkm_nvenc **pnvenc);
-
-int r535_nvenc_new(const struct nvkm_engine_func *, struct nvkm_device *,
- enum nvkm_subdev_type, int, struct nvkm_nvenc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c
deleted file mode 100644
index c8a2a9196ce5..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_nvenc_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_nvenc_obj_dtor(struct nvkm_object *object)
-{
- struct r535_nvenc_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_nvenc_obj = {
- .dtor = r535_nvenc_obj_dtor,
-};
-
-static int
-r535_nvenc_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_nvenc_obj *obj;
- NV_MSENC_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_nvenc_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->size = sizeof(*args);
- args->engineInstance = oclass->engine->subdev.inst;
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_nvenc_dtor(struct nvkm_engine *engine)
-{
- struct nvkm_nvenc *nvenc = nvkm_nvenc(engine);
-
- kfree(nvenc->engine.func);
- return nvenc;
-}
-
-int
-r535_nvenc_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc)
-{
- struct nvkm_engine_func *rm;
- int nclass;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_nvenc_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_nvenc_obj_ctor;
- }
-
- if (!(*pnvenc = kzalloc(sizeof(**pnvenc), GFP_KERNEL))) {
- kfree(rm);
- return -ENOMEM;
- }
-
- return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvenc)->engine);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c
index 933864423bb3..8a436b398749 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c
@@ -23,22 +23,12 @@
#include <subdev/gsp.h>
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-tu102_nvenc = {
- .sclass = {
- { -1, -1, NVC4B7_VIDEO_ENCODER },
- {}
- }
-};
-
int
tu102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_nvenc **pnvenc)
{
if (nvkm_gsp_rm(device->gsp))
- return r535_nvenc_new(&tu102_nvenc, device, type, inst, pnvenc);
+ return -ENODEV;
return nvkm_nvenc_new_(gm107_nvenc_fwif, device, type, inst, pnvenc);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild
deleted file mode 100644
index 1408f664add6..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/engine/nvjpg/ga100.o
-nvkm-y += nvkm/engine/nvjpg/ad102.o
-
-nvkm-y += nvkm/engine/nvjpg/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c
deleted file mode 100644
index 62705dc6494c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ad102_nvjpg = {
- .sclass = {
- { -1, -1, NVC9D1_VIDEO_NVJPG },
- {}
- }
-};
-
-int
-ad102_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvjpg_new(&ad102_nvjpg, device, type, inst, pengine);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c
deleted file mode 100644
index f550eb07da5a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga100_nvjpg = {
- .sclass = {
- { -1, -1, NVC4D1_VIDEO_NVJPG },
- {}
- }
-};
-
-int
-ga100_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvjpg_new(&ga100_nvjpg, device, type, inst, pengine);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h
deleted file mode 100644
index 1e80cf70033a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_NVJPG_PRIV_H__
-#define __NVKM_NVJPG_PRIV_H__
-#include <engine/nvjpg.h>
-
-int r535_nvjpg_new(const struct nvkm_engine_func *, struct nvkm_device *,
- enum nvkm_subdev_type, int, struct nvkm_engine **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c
deleted file mode 100644
index 1babddc4eb80..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_nvjpg_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_nvjpg_obj_dtor(struct nvkm_object *object)
-{
- struct r535_nvjpg_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_nvjpg_obj = {
- .dtor = r535_nvjpg_obj_dtor,
-};
-
-static int
-r535_nvjpg_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_nvjpg_obj *obj;
- NV_NVJPG_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_nvjpg_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->size = sizeof(*args);
- args->engineInstance = oclass->engine->subdev.inst;
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_nvjpg_dtor(struct nvkm_engine *engine)
-{
- kfree(engine->func);
- return engine;
-}
-
-int
-r535_nvjpg_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
-{
- struct nvkm_engine_func *rm;
- int nclass, ret;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_nvjpg_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_nvjpg_obj_ctor;
- }
-
- ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
- if (ret)
- kfree(rm);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild
deleted file mode 100644
index 99f1713d7e51..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/engine/ofa/ga100.o
-nvkm-y += nvkm/engine/ofa/ga102.o
-nvkm-y += nvkm/engine/ofa/ad102.o
-
-nvkm-y += nvkm/engine/ofa/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c
deleted file mode 100644
index ef474f61a1b5..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga100_ofa = {
- .sclass = {
- { -1, -1, NVC6FA_VIDEO_OFA },
- {}
- }
-};
-
-int
-ga100_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_ofa_new(&ga100_ofa, device, type, inst, pengine);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c
deleted file mode 100644
index bea255529993..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <subdev/gsp.h>
-
-#include <nvif/class.h>
-
-static const struct nvkm_engine_func
-ga102_ofa = {
- .sclass = {
- { -1, -1, NVC7FA_VIDEO_OFA },
- {}
- }
-};
-
-int
-ga102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_engine **pengine)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_ofa_new(&ga102_ofa, device, type, inst, pengine);
-
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h
deleted file mode 100644
index caf29e6bddb4..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_OFA_PRIV_H__
-#define __NVKM_OFA_PRIV_H__
-#include <engine/ofa.h>
-
-int r535_ofa_new(const struct nvkm_engine_func *, struct nvkm_device *,
- enum nvkm_subdev_type, int, struct nvkm_engine **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c
deleted file mode 100644
index 438dc692eefe..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright 2023 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#include "priv.h"
-
-#include <core/object.h>
-#include <subdev/gsp.h>
-#include <subdev/mmu.h>
-#include <engine/fifo.h>
-
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-
-struct r535_ofa_obj {
- struct nvkm_object object;
- struct nvkm_gsp_object rm;
-};
-
-static void *
-r535_ofa_obj_dtor(struct nvkm_object *object)
-{
- struct r535_ofa_obj *obj = container_of(object, typeof(*obj), object);
-
- nvkm_gsp_rm_free(&obj->rm);
- return obj;
-}
-
-static const struct nvkm_object_func
-r535_ofa_obj = {
- .dtor = r535_ofa_obj_dtor,
-};
-
-static int
-r535_ofa_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
- struct nvkm_object **pobject)
-{
- struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
- struct r535_ofa_obj *obj;
- NV_OFA_ALLOCATION_PARAMETERS *args;
-
- if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
- return -ENOMEM;
-
- nvkm_object_ctor(&r535_ofa_obj, oclass, &obj->object);
- *pobject = &obj->object;
-
- args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
- sizeof(*args), &obj->rm);
- if (WARN_ON(IS_ERR(args)))
- return PTR_ERR(args);
-
- args->size = sizeof(*args);
-
- return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
-}
-
-static void *
-r535_ofa_dtor(struct nvkm_engine *engine)
-{
- kfree(engine->func);
- return engine;
-}
-
-int
-r535_ofa_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
-{
- struct nvkm_engine_func *rm;
- int nclass, ret;
-
- for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
-
- if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
- return -ENOMEM;
-
- rm->dtor = r535_ofa_dtor;
- for (int i = 0; i < nclass; i++) {
- rm->sclass[i].minver = hw->sclass[i].minver;
- rm->sclass[i].maxver = hw->sclass[i].maxver;
- rm->sclass[i].oclass = hw->sclass[i].oclass;
- rm->sclass[i].ctor = r535_ofa_obj_ctor;
- }
-
- ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
- if (ret)
- kfree(rm);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index 4c2f6fc4ef58..c19ea4ea9bd3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -9,6 +9,7 @@ include $(src)/nvkm/subdev/fault/Kbuild
include $(src)/nvkm/subdev/fb/Kbuild
include $(src)/nvkm/subdev/fuse/Kbuild
include $(src)/nvkm/subdev/gpio/Kbuild
+include $(src)/nvkm/subdev/fsp/Kbuild
include $(src)/nvkm/subdev/gsp/Kbuild
include $(src)/nvkm/subdev/i2c/Kbuild
include $(src)/nvkm/subdev/iccsense/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index 9754c6872543..8faee3317a74 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -7,5 +7,3 @@ nvkm-y += nvkm/subdev/bar/gk20a.o
nvkm-y += nvkm/subdev/bar/gm107.o
nvkm-y += nvkm/subdev/bar/gm20b.o
nvkm-y += nvkm/subdev/bar/tu102.o
-
-nvkm-y += nvkm/subdev/bar/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index 51070b7dda85..e5e60915029c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -82,7 +82,7 @@ gf100_bar_bar2_init(struct nvkm_bar *base)
static int
gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
- struct lock_class_key *key, int bar_nr)
+ struct lock_class_key *key, enum nvkm_bar_id bar_id)
{
struct nvkm_device *device = bar->base.subdev.device;
resource_size_t bar_len;
@@ -93,14 +93,14 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
if (ret)
return ret;
- bar_len = device->func->resource_size(device, bar_nr);
+ bar_len = device->func->resource_size(device, bar_id);
if (!bar_len)
return -ENOMEM;
- if (bar_nr == 3 && bar->bar2_halve)
+ if (bar_id == NVKM_BAR2_INST && bar->bar2_halve)
bar_len >>= 1;
ret = nvkm_vmm_new(device, 0, bar_len, NULL, 0, key,
- (bar_nr == 3) ? "bar2" : "bar1", &bar_vm->vmm);
+ (bar_id == NVKM_BAR2_INST) ? "bar2" : "bar1", &bar_vm->vmm);
if (ret)
return ret;
@@ -110,7 +110,7 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
/*
* Bootstrap page table lookup.
*/
- if (bar_nr == 3) {
+ if (bar_id == NVKM_BAR2_INST) {
ret = nvkm_vmm_boot(bar_vm->vmm);
if (ret)
return ret;
@@ -129,7 +129,7 @@ gf100_bar_oneinit(struct nvkm_bar *base)
/* BAR2 */
if (bar->base.func->bar2.init) {
- ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, 3);
+ ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, NVKM_BAR2_INST);
if (ret)
return ret;
@@ -138,7 +138,7 @@ gf100_bar_oneinit(struct nvkm_bar *base)
}
/* BAR1 */
- ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, 1);
+ ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, NVKM_BAR1_FB);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 27d8a1be43e4..6a881becb02c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -127,7 +127,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
/* BAR2 */
start = 0x0100000000ULL;
- size = device->func->resource_size(device, 3);
+ size = device->func->resource_size(device, NVKM_BAR2_INST);
if (!size)
return -ENOMEM;
limit = start + size;
@@ -167,7 +167,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
/* BAR1 */
start = 0x0000000000ULL;
- size = device->func->resource_size(device, 1);
+ size = device->func->resource_size(device, NVKM_BAR1_FB);
if (!size)
return -ENOMEM;
limit = start + size;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
index 6c5bbff12eb4..b918e22df5a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
@@ -47,8 +47,8 @@
static inline struct io_mapping *
fbmem_init(struct nvkm_device *dev)
{
- return io_mapping_create_wc(dev->func->resource_addr(dev, 1),
- dev->func->resource_size(dev, 1));
+ return io_mapping_create_wc(dev->func->resource_addr(dev, NVKM_BAR1_FB),
+ dev->func->resource_size(dev, NVKM_BAR1_FB));
}
static inline void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
index c123e5893d76..cd2fbc0472d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
@@ -50,7 +50,7 @@ nvkm_ufault_map(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
struct nvkm_device *device = buffer->fault->subdev.device;
*type = NVKM_OBJECT_MAP_IO;
- *addr = device->func->resource_addr(device, 3) + buffer->addr;
+ *addr = device->func->resource_addr(device, NVKM_BAR2_INST) + buffer->addr;
*size = nvkm_memory_size(buffer->mem);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index d1611ad3bf81..8d8a5382d1b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -35,6 +35,9 @@ nvkm-y += nvkm/subdev/fb/gv100.o
nvkm-y += nvkm/subdev/fb/tu102.o
nvkm-y += nvkm/subdev/fb/ga100.o
nvkm-y += nvkm/subdev/fb/ga102.o
+nvkm-y += nvkm/subdev/fb/gh100.o
+nvkm-y += nvkm/subdev/fb/gb100.o
+nvkm-y += nvkm/subdev/fb/gb202.o
nvkm-y += nvkm/subdev/fb/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
index 25f82b372bca..2819780050d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
@@ -25,7 +25,7 @@
#include <subdev/gsp.h>
#include <engine/nvdec.h>
-static u64
+u64
ga102_fb_vidmem_size(struct nvkm_fb *fb)
{
return (u64)nvkm_rd32(fb->subdev.device, 0x1183a4) << 20;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c
new file mode 100644
index 000000000000..1c78c8853617
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gb100/dev_hshub_base.h>
+
+static void
+gb100_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
+{
+ const u32 addr_hi = upper_32_bits(fb->sysmem.flush_page_addr);
+ const u32 addr_lo = lower_32_bits(fb->sysmem.flush_page_addr);
+ const u32 hshub = DRF_LO(NV_PFB_HSHUB0);
+ struct nvkm_device *device = fb->subdev.device;
+
+ nvkm_wr32(device, hshub + NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI, addr_hi);
+ nvkm_wr32(device, hshub + NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO, addr_lo);
+ nvkm_wr32(device, hshub + NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI, addr_hi);
+ nvkm_wr32(device, hshub + NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_LO, addr_lo);
+}
+
+static const struct nvkm_fb_func
+gb100_fb = {
+ .sysmem.flush_page_init = gb100_fb_sysmem_flush_page_init,
+ .vidmem.size = ga102_fb_vidmem_size,
+};
+
+int
+gb100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return r535_fb_new(&gb100_fb, device, type, inst, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c
new file mode 100644
index 000000000000..848505026d02
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gb10b/dev_fbhub.h>
+
+static void
+gb202_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ const u64 addr = fb->sysmem.flush_page_addr;
+
+ nvkm_wr32(device, NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI, upper_32_bits(addr));
+ nvkm_wr32(device, NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO, lower_32_bits(addr));
+}
+
+static const struct nvkm_fb_func
+gb202_fb = {
+ .sysmem.flush_page_init = gb202_fb_sysmem_flush_page_init,
+ .vidmem.size = ga102_fb_vidmem_size,
+};
+
+int
+gb202_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return r535_fb_new(&gb202_fb, device, type, inst, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c
new file mode 100644
index 000000000000..2d8c51f882d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_fb.h>
+
+static void
+gh100_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
+{
+ const u64 addr = fb->sysmem.flush_page_addr >> NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT;
+ struct nvkm_device *device = fb->subdev.device;
+
+ nvkm_wr32(device, NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI, upper_32_bits(addr));
+ nvkm_wr32(device, NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO, lower_32_bits(addr));
+}
+
+static const struct nvkm_fb_func
+gh100_fb = {
+ .sysmem.flush_page_init = gh100_fb_sysmem_flush_page_init,
+ .vidmem.size = ga102_fb_vidmem_size,
+};
+
+int
+gh100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return r535_fb_new(&gh100_fb, device, type, inst, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 35c55dfba23d..ebe996503ab2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -98,4 +98,6 @@ int gp102_fb_vpr_scrub(struct nvkm_fb *);
int gv100_fb_init_page(struct nvkm_fb *);
bool tu102_fb_vpr_scrub_required(struct nvkm_fb *);
+
+u64 ga102_fb_vidmem_size(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild
new file mode 100644
index 000000000000..1a9ded3a86f8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/Kbuild
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+
+nvkm-y += nvkm/subdev/fsp/base.o
+nvkm-y += nvkm/subdev/fsp/gh100.o
+nvkm-y += nvkm/subdev/fsp/gb100.o
+nvkm-y += nvkm/subdev/fsp/gb202.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c
new file mode 100644
index 000000000000..e366a980baa9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/base.c
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+int
+nvkm_fsp_boot_gsp_fmc(struct nvkm_fsp *fsp, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig)
+{
+ return fsp->func->cot.boot_gsp_fmc(fsp, args_addr, rsvd_size, resume,
+ img_addr, hash, pkey, sig);
+}
+
+bool
+nvkm_fsp_verify_gsp_fmc(struct nvkm_fsp *fsp, u32 hash_size, u32 pkey_size, u32 sig_size)
+{
+ return hash_size == fsp->func->cot.size_hash &&
+ pkey_size == fsp->func->cot.size_pkey &&
+ sig_size == fsp->func->cot.size_sig;
+}
+
+static int
+nvkm_fsp_preinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_fsp *fsp = nvkm_fsp(subdev);
+
+ return fsp->func->wait_secure_boot(fsp);
+}
+
+static void *
+nvkm_fsp_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_fsp *fsp = nvkm_fsp(subdev);
+
+ nvkm_falcon_dtor(&fsp->falcon);
+ return fsp;
+}
+
+static const struct nvkm_falcon_func
+nvkm_fsp_flcn = {
+ .emem_pio = &gp102_flcn_emem_pio,
+};
+
+static const struct nvkm_subdev_func
+nvkm_fsp = {
+ .dtor = nvkm_fsp_dtor,
+ .preinit = nvkm_fsp_preinit,
+};
+
+int
+nvkm_fsp_new_(const struct nvkm_fsp_func *func,
+ struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fsp **pfsp)
+{
+ struct nvkm_fsp *fsp;
+
+ fsp = *pfsp = kzalloc(sizeof(*fsp), GFP_KERNEL);
+ if (!fsp)
+ return -ENOMEM;
+
+ fsp->func = func;
+ nvkm_subdev_ctor(&nvkm_fsp, device, type, inst, &fsp->subdev);
+
+ return nvkm_falcon_ctor(&nvkm_fsp_flcn, &fsp->subdev, "fsp", 0x8f2000, &fsp->falcon);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c
new file mode 100644
index 000000000000..e06636bf54b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb100.c
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+static const struct nvkm_fsp_func
+gb100_fsp = {
+ .wait_secure_boot = gh100_fsp_wait_secure_boot,
+ .cot = {
+ .version = 2,
+ .size_hash = 48,
+ .size_pkey = 97,
+ .size_sig = 96,
+ .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc,
+ },
+};
+
+int
+gb100_fsp_new(struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp)
+{
+ return nvkm_fsp_new_(&gb100_fsp, device, type, inst, pfsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c
new file mode 100644
index 000000000000..3438aac6383e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gb202.c
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gb202/dev_therm.h>
+
+static int
+gb202_fsp_wait_secure_boot(struct nvkm_fsp *fsp)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ unsigned timeout_ms = 4000;
+
+ do {
+ u32 status = NVKM_RD32(device, NV_THERM, I2CS_SCRATCH, FSP_BOOT_COMPLETE_STATUS);
+
+ if (status == NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout_ms--);
+
+ return -ETIMEDOUT;
+}
+
+static const struct nvkm_fsp_func
+gb202_fsp = {
+ .wait_secure_boot = gb202_fsp_wait_secure_boot,
+ .cot = {
+ .version = 2,
+ .size_hash = 48,
+ .size_pkey = 97,
+ .size_sig = 96,
+ .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc,
+ },
+};
+
+int
+gb202_fsp_new(struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp)
+{
+ return nvkm_fsp_new_(&gb202_fsp, device, type, inst, pfsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c
new file mode 100644
index 000000000000..2815be4bf5de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/gh100.c
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_fsp_pri.h>
+#include <nvhw/ref/gh100/dev_therm.h>
+
+#include <nvrm/nvtypes.h>
+
+#define MCTP_HEADER_VERSION 3:0
+#define MCTP_HEADER_RSVD 7:4
+
+#define MCTP_HEADER_DEID 15:8
+#define MCTP_HEADER_SEID 23:16
+
+#define MCTP_HEADER_TAG 26:24
+#define MCTP_HEADER_TO 27:27
+#define MCTP_HEADER_SEQ 29:28
+#define MCTP_HEADER_EOM 30:30
+#define MCTP_HEADER_SOM 31:31
+
+#define MCTP_MSG_HEADER_TYPE 6:0
+#define MCTP_MSG_HEADER_IC 7:7
+
+#define MCTP_MSG_HEADER_VENDOR_ID 23:8
+#define MCTP_MSG_HEADER_NVDM_TYPE 31:24
+
+#define MCTP_MSG_HEADER_TYPE_VENDOR_PCI 0x7e
+#define MCTP_MSG_HEADER_VENDOR_ID_NV 0x10de
+
+#define NVDM_TYPE_COT 0x14
+#define NVDM_TYPE_FSP_RESPONSE 0x15
+
+#pragma pack(1)
+typedef struct nvdm_payload_cot
+{
+ NvU16 version;
+ NvU16 size;
+ NvU64 gspFmcSysmemOffset;
+ NvU64 frtsSysmemOffset;
+ NvU32 frtsSysmemSize;
+
+ // Note this is an offset from the end of FB
+ NvU64 frtsVidmemOffset;
+ NvU32 frtsVidmemSize;
+
+ // Authentication related fields
+ NvU32 hash384[12];
+ NvU32 publicKey[96];
+ NvU32 signature[96];
+
+ NvU64 gspBootArgsSysmemOffset;
+} NVDM_PAYLOAD_COT;
+#pragma pack()
+
+#pragma pack(1)
+typedef struct
+{
+ NvU32 taskId;
+ NvU32 commandNvdmType;
+ NvU32 errorCode;
+} NVDM_PAYLOAD_COMMAND_RESPONSE;
+#pragma pack()
+
+static u32
+gh100_fsp_poll(struct nvkm_fsp *fsp)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ u32 head, tail;
+
+ head = nvkm_rd32(device, NV_PFSP_MSGQ_HEAD(0));
+ tail = nvkm_rd32(device, NV_PFSP_MSGQ_TAIL(0));
+
+ if (head == tail)
+ return 0;
+
+ return (tail - head) + sizeof(u32); /* TAIL points at last DWORD written. */
+}
+
+static int
+gh100_fsp_recv(struct nvkm_fsp *fsp, u8 *packet, u32 max_packet_size)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ u32 packet_size;
+ int ret;
+
+ packet_size = gh100_fsp_poll(fsp);
+ if (!packet_size || WARN_ON(packet_size % 4 || packet_size > max_packet_size))
+ return -EINVAL;
+
+ ret = nvkm_falcon_pio_rd(&fsp->falcon, 0, EMEM, 0, packet, 0, packet_size);
+ if (ret)
+ return ret;
+
+ nvkm_wr32(device, NV_PFSP_MSGQ_TAIL(0), 0);
+ nvkm_wr32(device, NV_PFSP_MSGQ_HEAD(0), 0);
+
+ return packet_size;
+}
+
+static int
+gh100_fsp_wait(struct nvkm_fsp *fsp)
+{
+ int time = 1000;
+
+ do {
+ if (gh100_fsp_poll(fsp))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ return -ETIMEDOUT;
+}
+
+static int
+gh100_fsp_send(struct nvkm_fsp *fsp, const u8 *packet, u32 packet_size)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ int time = 1000, ret;
+
+ if (WARN_ON(packet_size % sizeof(u32)))
+ return -EINVAL;
+
+ /* Ensure any previously sent message has been consumed. */
+ do {
+ u32 head = nvkm_rd32(device, NV_PFSP_QUEUE_HEAD(0));
+ u32 tail = nvkm_rd32(device, NV_PFSP_QUEUE_TAIL(0));
+
+ if (tail == head)
+ break;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ if (time < 0)
+ return -ETIMEDOUT;
+
+ /* Write message to EMEM. */
+ ret = nvkm_falcon_pio_wr(&fsp->falcon, packet, 0, 0, EMEM, 0, packet_size, 0, false);
+ if (ret)
+ return ret;
+
+ /* Update queue pointers - TAIL points at last DWORD written. */
+ nvkm_wr32(device, NV_PFSP_QUEUE_TAIL(0), packet_size - sizeof(u32));
+ nvkm_wr32(device, NV_PFSP_QUEUE_HEAD(0), 0);
+ return 0;
+}
+
+static int
+gh100_fsp_send_sync(struct nvkm_fsp *fsp, u8 nvdm_type, const u8 *packet, u32 packet_size)
+{
+ struct nvkm_subdev *subdev = &fsp->subdev;
+ struct {
+ u32 mctp_header;
+ u32 nvdm_header;
+ NVDM_PAYLOAD_COMMAND_RESPONSE response;
+ } reply;
+ int ret;
+
+ ret = gh100_fsp_send(fsp, packet, packet_size);
+ if (ret)
+ return ret;
+
+ ret = gh100_fsp_wait(fsp);
+ if (ret)
+ return ret;
+
+ ret = gh100_fsp_recv(fsp, (u8 *)&reply, sizeof(reply));
+ if (ret < 0)
+ return ret;
+
+ if (NVVAL_TEST(reply.mctp_header, MCTP, HEADER, SOM, !=, 1) ||
+ NVVAL_TEST(reply.mctp_header, MCTP, HEADER, EOM, !=, 1)) {
+ nvkm_error(subdev, "unexpected MCTP header in reply: 0x%08x\n", reply.mctp_header);
+ return -EIO;
+ }
+
+ if (NVDEF_TEST(reply.nvdm_header, MCTP, MSG_HEADER, TYPE, !=, VENDOR_PCI) ||
+ NVDEF_TEST(reply.nvdm_header, MCTP, MSG_HEADER, VENDOR_ID, !=, NV) ||
+ NVVAL_TEST(reply.nvdm_header, MCTP, MSG_HEADER, NVDM_TYPE, !=, NVDM_TYPE_FSP_RESPONSE)) {
+ nvkm_error(subdev, "unexpected NVDM header in reply: 0x%08x\n", reply.nvdm_header);
+ return -EIO;
+ }
+
+ if (reply.response.commandNvdmType != nvdm_type) {
+ nvkm_error(subdev, "expected NVDM type 0x%02x in reply, got 0x%02x\n",
+ nvdm_type, reply.response.commandNvdmType);
+ return -EIO;
+ }
+
+ if (reply.response.errorCode) {
+ nvkm_error(subdev, "NVDM command 0x%02x failed with error 0x%08x\n",
+ nvdm_type, reply.response.errorCode);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+gh100_fsp_boot_gsp_fmc(struct nvkm_fsp *fsp, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig)
+{
+ struct {
+ u32 mctp_header;
+ u32 nvdm_header;
+ NVDM_PAYLOAD_COT cot;
+ } msg = {};
+
+ msg.mctp_header = NVVAL(MCTP, HEADER, SOM, 1) |
+ NVVAL(MCTP, HEADER, EOM, 1) |
+ NVVAL(MCTP, HEADER, SEID, 0) |
+ NVVAL(MCTP, HEADER, SEQ, 0);
+
+ msg.nvdm_header = NVDEF(MCTP, MSG_HEADER, TYPE, VENDOR_PCI) |
+ NVDEF(MCTP, MSG_HEADER, VENDOR_ID, NV) |
+ NVVAL(MCTP, MSG_HEADER, NVDM_TYPE, NVDM_TYPE_COT);
+
+ msg.cot.version = fsp->func->cot.version;
+ msg.cot.size = sizeof(msg.cot);
+ msg.cot.gspFmcSysmemOffset = img_addr;
+ if (!resume) {
+ msg.cot.frtsVidmemOffset = ALIGN(rsvd_size, 0x200000);
+ msg.cot.frtsVidmemSize = 0x100000;
+ }
+
+ memcpy(msg.cot.hash384, hash, fsp->func->cot.size_hash);
+ memcpy(msg.cot.publicKey, pkey, fsp->func->cot.size_pkey);
+ memcpy(msg.cot.signature, sig, fsp->func->cot.size_sig);
+
+ msg.cot.gspBootArgsSysmemOffset = args_addr;
+
+ return gh100_fsp_send_sync(fsp, NVDM_TYPE_COT, (const u8 *)&msg, sizeof(msg));
+}
+
+int
+gh100_fsp_wait_secure_boot(struct nvkm_fsp *fsp)
+{
+ struct nvkm_device *device = fsp->subdev.device;
+ unsigned timeout_ms = 4000;
+
+ do {
+ u32 status = NVKM_RD32(device, NV_THERM, I2CS_SCRATCH, FSP_BOOT_COMPLETE_STATUS);
+
+ if (status == NV_THERM_I2CS_SCRATCH_FSP_BOOT_COMPLETE_STATUS_SUCCESS)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout_ms--);
+
+ return -ETIMEDOUT;
+}
+
+static const struct nvkm_fsp_func
+gh100_fsp = {
+ .wait_secure_boot = gh100_fsp_wait_secure_boot,
+ .cot = {
+ .version = 1,
+ .size_hash = 48,
+ .size_pkey = 384,
+ .size_sig = 384,
+ .boot_gsp_fmc = gh100_fsp_boot_gsp_fmc,
+ },
+};
+
+int
+gh100_fsp_new(struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_fsp **pfsp)
+{
+ return nvkm_fsp_new_(&gh100_fsp, device, type, inst, pfsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h
new file mode 100644
index 000000000000..f0b2c605c33d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fsp/priv.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_FSP_PRIV_H__
+#define __NVKM_FSP_PRIV_H__
+#define nvkm_fsp(p) container_of((p), struct nvkm_fsp, subdev)
+#include <subdev/fsp.h>
+
+struct nvkm_fsp_func {
+ int (*wait_secure_boot)(struct nvkm_fsp *);
+
+ struct {
+ u32 version;
+ u32 size_hash;
+ u32 size_pkey;
+ u32 size_sig;
+ int (*boot_gsp_fmc)(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig);
+ } cot;
+};
+
+int nvkm_fsp_new_(const struct nvkm_fsp_func *,
+ struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fsp **);
+
+int gh100_fsp_wait_secure_boot(struct nvkm_fsp *);
+int gh100_fsp_boot_gsp_fmc(struct nvkm_fsp *, u64 args_addr, u32 rsvd_size, bool resume,
+ u64 img_addr, const u8 *hash, const u8 *pkey, const u8 *sig);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
index 16bf2f1bb780..e9c948b67bbd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
@@ -7,6 +7,9 @@ nvkm-y += nvkm/subdev/gsp/tu102.o
nvkm-y += nvkm/subdev/gsp/tu116.o
nvkm-y += nvkm/subdev/gsp/ga100.o
nvkm-y += nvkm/subdev/gsp/ga102.o
+nvkm-y += nvkm/subdev/gsp/gh100.o
nvkm-y += nvkm/subdev/gsp/ad102.o
+nvkm-y += nvkm/subdev/gsp/gb100.o
+nvkm-y += nvkm/subdev/gsp/gb202.o
-nvkm-y += nvkm/subdev/gsp/r535.o
+include $(src)/nvkm/subdev/gsp/rm/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
index c849c6299c52..eb765da0876e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
@@ -22,30 +22,27 @@
#include "priv.h"
static const struct nvkm_gsp_func
-ad102_gsp_r535_113_01 = {
+ad102_gsp = {
.flcn = &ga102_gsp_flcn,
.fwsec = &ga102_gsp_fwsec,
.sig_section = ".fwsignature_ad10x",
- .wpr_heap.os_carveout_size = 20 << 20,
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 84 << 20,
-
.booter.ctor = ga102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = ga102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &ad10x_gpu,
};
static struct nvkm_gsp_fwif
ad102_gsps[] = {
- { 0, r535_gsp_load, &ad102_gsp_r535_113_01, "535.113.01", true },
+ { 1, tu102_gsp_load, &ad102_gsp, &r570_rm_ga102, "570.144", true },
+ { 0, tu102_gsp_load, &ad102_gsp, &r535_rm_ga102, "535.113.01", true },
{}
};
@@ -55,3 +52,15 @@ ad102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(ad102_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(ad102, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad103, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad104, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad106, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ad107, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(ad102, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad103, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad104, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad106, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ad107, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
index da1bebb896f7..d23243a83a4c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -52,7 +52,7 @@ nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_gsp *gsp = nvkm_gsp(subdev);
- if (!gsp->func->fini)
+ if (!gsp->func->fini || !gsp->running)
return 0;
return gsp->func->fini(gsp, suspend);
@@ -80,6 +80,21 @@ nvkm_gsp_oneinit(struct nvkm_subdev *subdev)
return gsp->func->oneinit(gsp);
}
+void
+nvkm_gsp_dtor_fws(struct nvkm_gsp *gsp)
+{
+ nvkm_firmware_put(gsp->fws.fmc);
+ gsp->fws.fmc = NULL;
+ nvkm_firmware_put(gsp->fws.bl);
+ gsp->fws.bl = NULL;
+ nvkm_firmware_put(gsp->fws.booter.unload);
+ gsp->fws.booter.unload = NULL;
+ nvkm_firmware_put(gsp->fws.booter.load);
+ gsp->fws.booter.load = NULL;
+ nvkm_firmware_put(gsp->fws.rm);
+ gsp->fws.rm = NULL;
+}
+
static void *
nvkm_gsp_dtor(struct nvkm_subdev *subdev)
{
@@ -89,6 +104,7 @@ nvkm_gsp_dtor(struct nvkm_subdev *subdev)
gsp->func->dtor(gsp);
nvkm_falcon_dtor(&gsp->falcon);
+ kfree(gsp->rm);
return gsp;
}
@@ -101,6 +117,16 @@ nvkm_gsp = {
};
int
+nvkm_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver,
+ const struct firmware **pfw)
+{
+ char fwname[64];
+
+ snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver);
+ return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw);
+}
+
+int
nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_gsp **pgsp)
{
@@ -116,7 +142,19 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
return PTR_ERR(fwif);
gsp->func = fwif->func;
- gsp->rm = gsp->func->rm;
+
+ if (fwif->rm) {
+ nvkm_info(&gsp->subdev, "RM version: %s\n", fwif->ver);
+
+ gsp->rm = kzalloc(sizeof(*gsp->rm), GFP_KERNEL);
+ if (!gsp->rm)
+ return -ENOMEM;
+
+ gsp->rm->device = device;
+ gsp->rm->gpu = fwif->func->rm.gpu;
+ gsp->rm->wpr = fwif->rm->wpr;
+ gsp->rm->api = fwif->rm->api;
+ }
return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0x110000,
&gsp->falcon);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
index 223f68b532ef..d201e8697226 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
@@ -39,29 +39,27 @@ ga100_gsp_flcn = {
};
static const struct nvkm_gsp_func
-ga100_gsp_r535_113_01 = {
+ga100_gsp = {
.flcn = &ga100_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_ga100",
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 64 << 20,
-
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = tu102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &ga100_gpu,
};
static struct nvkm_gsp_fwif
ga100_gsps[] = {
- { 0, r535_gsp_load, &ga100_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &ga100_gsp, &r570_rm_tu102, "570.144" },
+ { 0, tu102_gsp_load, &ga100_gsp, &r535_rm_tu102, "535.113.01" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};
@@ -72,3 +70,6 @@ ga100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(ga100_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(ga100, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga100, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
index 4c4b4168a266..917f7e2f6c46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
@@ -150,25 +150,21 @@ ga102_gsp_flcn = {
};
static const struct nvkm_gsp_func
-ga102_gsp_r535_113_01 = {
+ga102_gsp_r535 = {
.flcn = &ga102_gsp_flcn,
.fwsec = &ga102_gsp_fwsec,
.sig_section = ".fwsignature_ga10x",
- .wpr_heap.os_carveout_size = 20 << 20,
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 84 << 20,
-
.booter.ctor = ga102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = ga102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &ga1xx_gpu,
};
static const struct nvkm_gsp_func
@@ -178,7 +174,8 @@ ga102_gsp = {
static struct nvkm_gsp_fwif
ga102_gsps[] = {
- { 0, r535_gsp_load, &ga102_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &ga102_gsp_r535, &r570_rm_ga102, "570.144" },
+ { 0, tu102_gsp_load, &ga102_gsp_r535, &r535_rm_ga102, "535.113.01" },
{ -1, gv100_gsp_nofw, &ga102_gsp },
{}
};
@@ -189,3 +186,15 @@ ga102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(ga102_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(ga102, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga103, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga104, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga106, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(ga107, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(ga102, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga103, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga104, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga106, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(ga107, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
new file mode 100644
index 000000000000..12a3f2c1ed82
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+static const struct nvkm_gsp_func
+gb100_gsp = {
+ .flcn = &ga102_gsp_flcn,
+
+ .sig_section = ".fwsignature_gb10x",
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = gh100_gsp_oneinit,
+ .init = gh100_gsp_init,
+ .fini = gh100_gsp_fini,
+
+ .rm.gpu = &gb10x_gpu,
+};
+
+static struct nvkm_gsp_fwif
+gb100_gsps[] = {
+ { 0, gh100_gsp_load, &gb100_gsp, &r570_rm_gb10x, "570.144", true },
+ {}
+};
+
+int
+gb100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(gb100_gsps, device, type, inst, pgsp);
+}
+
+NVKM_GSP_FIRMWARE_FMC(gb100, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb102, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
new file mode 100644
index 000000000000..c1d718172ddf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+static const struct nvkm_gsp_func
+gb202_gsp = {
+ .flcn = &ga102_gsp_flcn,
+
+ .sig_section = ".fwsignature_gb20x",
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = gh100_gsp_oneinit,
+ .init = gh100_gsp_init,
+ .fini = gh100_gsp_fini,
+
+ .rm.gpu = &gb20x_gpu,
+};
+
+static struct nvkm_gsp_fwif
+gb202_gsps[] = {
+ { 0, gh100_gsp_load, &gb202_gsp, &r570_rm_gb20x, "570.144", true },
+ {}
+};
+
+int
+gb202_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(gb202_gsps, device, type, inst, pgsp);
+}
+
+NVKM_GSP_FIRMWARE_FMC(gb202, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb203, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb205, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb206, 570.144);
+NVKM_GSP_FIRMWARE_FMC(gb207, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
new file mode 100644
index 000000000000..ce31e8248807
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <linux/elf.h>
+#include <linux/crc32.h>
+
+#include <subdev/fb.h>
+#include <subdev/fsp.h>
+
+#include <rm/r570/nvrm/gsp.h>
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_falcon_v4.h>
+#include <nvhw/ref/gh100/dev_riscv_pri.h>
+
+int
+gh100_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
+{
+ struct nvkm_falcon *falcon = &gsp->falcon;
+ int ret, time = 4000;
+
+ /* Shutdown RM. */
+ ret = r535_gsp_fini(gsp, suspend);
+ if (ret && suspend)
+ return ret;
+
+ /* Wait for RISC-V to halt. */
+ do {
+ u32 data = nvkm_falcon_rd32(falcon, falcon->addr2 + NV_PRISCV_RISCV_CPUCTL);
+
+ if (NVVAL_GET(data, NV_PRISCV, RISCV_CPUCTL, HALTED))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ return -ETIMEDOUT;
+}
+
+static bool
+gh100_gsp_lockdown_released(struct nvkm_gsp *gsp, u32 *mbox0)
+{
+ u32 data;
+
+ /* Wait for GSP access via BAR0 to be allowed. */
+ *mbox0 = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_MAILBOX0);
+
+ if (*mbox0 && (*mbox0 & 0xffffff00) == 0xbadf4100)
+ return false;
+
+ /* Check if an error code has been reported. */
+ if (*mbox0) {
+ u32 mbox1 = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_MAILBOX1);
+
+ /* Any value that's not GSP_FMC_BOOT_PARAMS addr is an error. */
+ if ((((u64)mbox1 << 32) | *mbox0) != gsp->fmc.args.addr)
+ return true;
+ }
+
+ /* Check if lockdown has been released. */
+ data = nvkm_falcon_rd32(&gsp->falcon, NV_PFALCON_FALCON_HWCFG2);
+ return !NVVAL_GET(data, NV_PFALCON, FALCON_HWCFG2, RISCV_BR_PRIV_LOCKDOWN);
+}
+
+int
+gh100_gsp_init(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ const bool resume = gsp->sr.meta.data != NULL;
+ struct nvkm_gsp_mem *meta;
+ GSP_FMC_BOOT_PARAMS *args;
+ int ret, time = 4000;
+ u32 rsvd_size;
+ u32 mbox0;
+
+ if (!resume) {
+ ret = nvkm_gsp_mem_ctor(gsp, sizeof(*args), &gsp->fmc.args);
+ if (ret)
+ return ret;
+
+ meta = &gsp->wpr_meta;
+ } else {
+ gsp->rm->api->gsp->set_rmargs(gsp, true);
+ meta = &gsp->sr.meta;
+ }
+
+ args = gsp->fmc.args.data;
+
+ args->bootGspRmParams.gspRmDescOffset = meta->addr;
+ args->bootGspRmParams.gspRmDescSize = meta->size;
+ args->bootGspRmParams.target = GSP_DMA_TARGET_COHERENT_SYSTEM;
+ args->bootGspRmParams.bIsGspRmBoot = 1;
+
+ args->gspRmParams.target = GSP_DMA_TARGET_NONCOHERENT_SYSTEM;
+ args->gspRmParams.bootArgsOffset = gsp->libos.addr;
+
+ rsvd_size = gsp->fb.heap.size;
+ if (gsp->rm->wpr->rsvd_size_pmu)
+ rsvd_size = ALIGN(rsvd_size + gsp->rm->wpr->rsvd_size_pmu, 0x200000);
+
+ ret = nvkm_fsp_boot_gsp_fmc(device->fsp, gsp->fmc.args.addr, rsvd_size, resume,
+ gsp->fmc.fw.addr, gsp->fmc.hash, gsp->fmc.pkey, gsp->fmc.sig);
+ if (ret)
+ return ret;
+
+ do {
+ if (gh100_gsp_lockdown_released(gsp, &mbox0))
+ break;
+
+ usleep_range(1000, 2000);
+ } while(time--);
+
+ if (time < 0) {
+ nvkm_error(subdev, "GSP-FMC boot timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (mbox0) {
+ nvkm_error(subdev, "GSP-FMC boot failed (mbox: 0x%08x)\n", mbox0);
+ return -EIO;
+ }
+
+ return r535_gsp_init(gsp);
+}
+
+static int
+gh100_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta;
+ int ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, sizeof(*meta), &gsp->wpr_meta);
+ if (ret)
+ return ret;
+
+ gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device);
+ gsp->fb.bios.vga_workspace.size = 128 * 1024;
+ gsp->fb.heap.size = gsp->rm->wpr->heap_size_non_wpr;
+
+ meta = gsp->wpr_meta.data;
+
+ meta->magic = GSP_FW_WPR_META_MAGIC;
+ meta->revision = GSP_FW_WPR_META_REVISION;
+
+ meta->sizeOfRadix3Elf = gsp->fw.len;
+ meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
+
+ meta->sizeOfBootloader = gsp->boot.fw.size;
+ meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
+ meta->bootloaderCodeOffset = gsp->boot.code_offset;
+ meta->bootloaderDataOffset = gsp->boot.data_offset;
+ meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
+
+ meta->sysmemAddrOfSignature = gsp->sig.addr;
+ meta->sizeOfSignature = gsp->sig.size;
+
+ meta->nonWprHeapSize = gsp->fb.heap.size;
+ meta->gspFwHeapSize = tu102_gsp_wpr_heap_size(gsp);
+ meta->frtsSize = 0x100000;
+ meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
+ meta->pmuReservedSize = gsp->rm->wpr->rsvd_size_pmu;
+ return 0;
+}
+
+/* The sh_flags value for the binary blobs in the ELF image */
+#define FMC_SHF_FLAGS (SHF_MASKPROC | SHF_MASKOS | SHF_OS_NONCONFORMING | SHF_ALLOC)
+
+#define ELF_HDR_SIZE ((u8)sizeof(struct elf32_hdr))
+#define ELF_SHDR_SIZE ((u8)sizeof(struct elf32_shdr))
+
+/* The FMC ELF header must be exactly this */
+static const u8 elf_header[] = {
+ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 1, 0, 0, 0, /* e_type, e_machine, e_version */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* e_entry, e_phoff */
+
+ ELF_HDR_SIZE, 0, 0, 0, 0, 0, 0, 0, /* e_shoff, e_flags */
+ ELF_HDR_SIZE, 0, 0, 0, /* e_ehsize, e_phentsize */
+ 0, 0, ELF_SHDR_SIZE, 0, /* e_phnum, e_shentsize */
+
+ 6, 0, 1, 0, /* e_shnum, e_shstrndx */
+};
+
+/**
+ * elf_validate_sections - validate each section in the FMC ELF image
+ * @elf: ELF image
+ * @length: size of the entire ELF image
+ */
+static bool
+elf_validate_sections(const void *elf, size_t length)
+{
+ const struct elf32_hdr *ehdr = elf;
+ const struct elf32_shdr *shdr = elf + ehdr->e_shoff;
+
+ /* The offset of the first section */
+ Elf32_Off section_begin = ehdr->e_shoff + ehdr->e_shnum * ehdr->e_shentsize;
+
+ if (section_begin > length)
+ return false;
+
+ /* The first section header is the null section, so skip it */
+ for (unsigned int i = 1; i < ehdr->e_shnum; i++) {
+ if (i == ehdr->e_shstrndx) {
+ if (shdr[i].sh_type != SHT_STRTAB)
+ return false;
+ if (shdr[i].sh_flags != SHF_STRINGS)
+ return false;
+ } else {
+ if (shdr[i].sh_type != SHT_PROGBITS)
+ return false;
+ if (shdr[i].sh_flags != FMC_SHF_FLAGS)
+ return false;
+ }
+
+ /* Ensure that each section is inside the image */
+ if (shdr[i].sh_offset < section_begin ||
+ (u64)shdr[i].sh_offset + shdr[i].sh_size > length)
+ return false;
+
+ /* Non-zero sh_info is a CRC */
+ if (shdr[i].sh_info) {
+ /* The kernel's CRC32 needs a pre- and post-xor to match standard CRCs */
+ u32 crc32 = crc32_le(~0, elf + shdr[i].sh_offset, shdr[i].sh_size) ^ ~0;
+
+ if (shdr[i].sh_info != crc32)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * elf_section - return a pointer to the data for a given section
+ * @elf: ELF image
+ * @name: section name to search for
+ * @len: pointer to returned length of found section
+ */
+static const void *
+elf_section(const void *elf, const char *name, unsigned int *len)
+{
+ const struct elf32_hdr *ehdr = elf;
+ const struct elf32_shdr *shdr = elf + ehdr->e_shoff;
+ const char *names = elf + shdr[ehdr->e_shstrndx].sh_offset;
+
+ for (unsigned int i = 1; i < ehdr->e_shnum; i++) {
+ if (!strcmp(&names[shdr[i].sh_name], name)) {
+ *len = shdr[i].sh_size;
+ return elf + shdr[i].sh_offset;
+ }
+ }
+
+ return NULL;
+}
+
+int
+gh100_gsp_oneinit(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_fsp *fsp = device->fsp;
+ const void *fw = gsp->fws.fmc->data;
+ const void *hash, *sig, *pkey, *img;
+ unsigned int img_len = 0, hash_len = 0, pkey_len = 0, sig_len = 0;
+ int ret;
+
+ if (gsp->fws.fmc->size < ELF_HDR_SIZE ||
+ memcmp(fw, elf_header, sizeof(elf_header)) ||
+ !elf_validate_sections(fw, gsp->fws.fmc->size)) {
+ nvkm_error(subdev, "fmc firmware image is invalid\n");
+ return -ENODATA;
+ }
+
+ hash = elf_section(fw, "hash", &hash_len);
+ sig = elf_section(fw, "signature", &sig_len);
+ pkey = elf_section(fw, "publickey", &pkey_len);
+ img = elf_section(fw, "image", &img_len);
+
+ if (!hash || !sig || !pkey || !img) {
+ nvkm_error(subdev, "fmc firmware image is invalid\n");
+ return -ENODATA;
+ }
+
+ if (!nvkm_fsp_verify_gsp_fmc(fsp, hash_len, pkey_len, sig_len))
+ return -EINVAL;
+
+ /* Load GSP-FMC FW into memory. */
+ ret = nvkm_gsp_mem_ctor(gsp, img_len, &gsp->fmc.fw);
+ if (ret)
+ return ret;
+
+ memcpy(gsp->fmc.fw.data, img, img_len);
+
+ gsp->fmc.hash = kmemdup(hash, hash_len, GFP_KERNEL);
+ gsp->fmc.pkey = kmemdup(pkey, pkey_len, GFP_KERNEL);
+ gsp->fmc.sig = kmemdup(sig, sig_len, GFP_KERNEL);
+ if (!gsp->fmc.hash || !gsp->fmc.pkey || !gsp->fmc.sig)
+ return -ENOMEM;
+
+ ret = r535_gsp_oneinit(gsp);
+ if (ret)
+ return ret;
+
+ return gh100_gsp_wpr_meta_init(gsp);
+}
+
+static const struct nvkm_gsp_func
+gh100_gsp = {
+ .flcn = &ga102_gsp_flcn,
+
+ .sig_section = ".fwsignature_gh100",
+
+ .dtor = r535_gsp_dtor,
+ .oneinit = gh100_gsp_oneinit,
+ .init = gh100_gsp_init,
+ .fini = gh100_gsp_fini,
+
+ .rm.gpu = &gh100_gpu,
+};
+
+int
+gh100_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
+{
+ int ret;
+
+ ret = tu102_gsp_load_rm(gsp, fwif);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_load_fw(gsp, "fmc", fwif->ver, &gsp->fws.fmc);
+
+done:
+ if (ret)
+ nvkm_gsp_dtor_fws(gsp);
+
+ return ret;
+}
+
+static struct nvkm_gsp_fwif
+gh100_gsps[] = {
+ { 0, gh100_gsp_load, &gh100_gsp, &r570_rm_gh100, "570.144", true },
+ {}
+};
+
+int
+gh100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_gsp **pgsp)
+{
+ return nvkm_gsp_new_(gh100_gsps, device, type, inst, pgsp);
+}
+
+NVKM_GSP_FIRMWARE_FMC(gh100, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index 9f4a62375a27..4f14e85fc69e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -2,6 +2,7 @@
#ifndef __NVKM_GSP_PRIV_H__
#define __NVKM_GSP_PRIV_H__
#include <subdev/gsp.h>
+#include <rm/gpu.h>
enum nvkm_acr_lsf_id;
int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
@@ -11,12 +12,32 @@ struct nvkm_gsp_fwif {
int version;
int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *);
const struct nvkm_gsp_func *func;
+ const struct nvkm_rm_impl *rm;
const char *ver;
bool enable;
};
+int nvkm_gsp_load_fw(struct nvkm_gsp *, const char *name, const char *ver,
+ const struct firmware **);
+void nvkm_gsp_dtor_fws(struct nvkm_gsp *);
+
int gv100_gsp_nofw(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
-int r535_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+
+int tu102_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+int tu102_gsp_load_rm(struct nvkm_gsp *, const struct nvkm_gsp_fwif *);
+
+int gh100_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+
+#define NVKM_GSP_FIRMWARE_BOOTER(chip,vers) \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-"#vers".bin")
+
+#define NVKM_GSP_FIRMWARE_FMC(chip,vers) \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/fmc-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-"#vers".bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-"#vers".bin")
struct nvkm_gsp_func {
const struct nvkm_falcon_func *flcn;
@@ -25,12 +46,6 @@ struct nvkm_gsp_func {
char *sig_section;
struct {
- u32 os_carveout_size;
- u32 base_size;
- u64 min_size;
- } wpr_heap;
-
- struct {
int (*ctor)(struct nvkm_gsp *, const char *name, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
} booter;
@@ -41,7 +56,9 @@ struct nvkm_gsp_func {
int (*fini)(struct nvkm_gsp *, bool suspend);
int (*reset)(struct nvkm_gsp *);
- const struct nvkm_gsp_rm *rm;
+ struct {
+ const struct nvkm_rm_gpu *gpu;
+ } rm;
};
extern const struct nvkm_falcon_func tu102_gsp_flcn;
@@ -49,7 +66,10 @@ extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec;
int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
int tu102_gsp_oneinit(struct nvkm_gsp *);
+int tu102_gsp_init(struct nvkm_gsp *);
+int tu102_gsp_fini(struct nvkm_gsp *, bool suspend);
int tu102_gsp_reset(struct nvkm_gsp *);
+u64 tu102_gsp_wpr_heap_size(struct nvkm_gsp *);
extern const struct nvkm_falcon_func ga102_gsp_flcn;
extern const struct nvkm_falcon_fw_func ga102_gsp_fwsec;
@@ -57,11 +77,14 @@ int ga102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware
struct nvkm_falcon *, struct nvkm_falcon_fw *);
int ga102_gsp_reset(struct nvkm_gsp *);
+int gh100_gsp_oneinit(struct nvkm_gsp *);
+int gh100_gsp_init(struct nvkm_gsp *);
+int gh100_gsp_fini(struct nvkm_gsp *, bool suspend);
+
void r535_gsp_dtor(struct nvkm_gsp *);
int r535_gsp_oneinit(struct nvkm_gsp *);
int r535_gsp_init(struct nvkm_gsp *);
int r535_gsp_fini(struct nvkm_gsp *, bool suspend);
-extern const struct nvkm_gsp_rm r535_gsp_rm;
int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gsp **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild
new file mode 100644
index 000000000000..04037394a2da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/Kbuild
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+nvkm-y += nvkm/subdev/gsp/rm/client.o
+nvkm-y += nvkm/subdev/gsp/rm/engine.o
+nvkm-y += nvkm/subdev/gsp/rm/gr.o
+nvkm-y += nvkm/subdev/gsp/rm/nvdec.o
+nvkm-y += nvkm/subdev/gsp/rm/nvenc.o
+
+nvkm-y += nvkm/subdev/gsp/rm/tu1xx.o
+nvkm-y += nvkm/subdev/gsp/rm/ga100.o
+nvkm-y += nvkm/subdev/gsp/rm/ga1xx.o
+nvkm-y += nvkm/subdev/gsp/rm/ad10x.o
+nvkm-y += nvkm/subdev/gsp/rm/gh100.o
+nvkm-y += nvkm/subdev/gsp/rm/gb10x.o
+nvkm-y += nvkm/subdev/gsp/rm/gb20x.o
+
+include $(src)/nvkm/subdev/gsp/rm/r535/Kbuild
+include $(src)/nvkm/subdev/gsp/rm/r570/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c
new file mode 100644
index 000000000000..e1ce6355c35f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ad10x.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+ad10x_gpu = {
+ .disp.class = {
+ .root = AD102_DISP,
+ .caps = GV100_DISP_CAPS,
+ .core = AD102_DISP_CORE_CHANNEL_DMA,
+ .wndw = GA102_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = GA102_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = GA102_DISP_CURSOR,
+ },
+
+ .usermode.class = AMPERE_USERMODE_A,
+
+ .fifo.chan = {
+ .class = AMPERE_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = AMPERE_DMA_COPY_B,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = ADA_A,
+ .compute = ADA_COMPUTE_A,
+ },
+ .nvdec.class = NVC9B0_VIDEO_DECODER,
+ .nvenc.class = NVC9B7_VIDEO_ENCODER,
+ .ofa.class = NVC9FA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c
new file mode 100644
index 000000000000..72d3e3ca84c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/client.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "rm.h"
+
+void
+nvkm_gsp_client_dtor(struct nvkm_gsp_client *client)
+{
+ const unsigned int id = client->object.handle - NVKM_RM_CLIENT(0);
+ struct nvkm_gsp *gsp = client->gsp;
+
+ if (!gsp)
+ return;
+
+ if (client->object.client)
+ nvkm_gsp_rm_free(&client->object);
+
+ mutex_lock(&gsp->client_id.mutex);
+ idr_remove(&gsp->client_id.idr, id);
+ mutex_unlock(&gsp->client_id.mutex);
+
+ client->gsp = NULL;
+}
+
+int
+nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
+{
+ int id, ret;
+
+ if (WARN_ON(!gsp->rm))
+ return -ENOSYS;
+
+ mutex_lock(&gsp->client_id.mutex);
+ id = idr_alloc(&gsp->client_id.idr, client, 0, NVKM_RM_CLIENT_MASK + 1, GFP_KERNEL);
+ mutex_unlock(&gsp->client_id.mutex);
+ if (id < 0)
+ return id;
+
+ client->gsp = gsp;
+ client->object.client = client;
+ INIT_LIST_HEAD(&client->events);
+
+ ret = gsp->rm->api->client->ctor(client, NVKM_RM_CLIENT(id));
+ if (ret)
+ nvkm_gsp_client_dtor(client);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c
new file mode 100644
index 000000000000..3b0e83b2f57f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.c
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "engine.h"
+#include "gpu.h"
+
+#include <core/object.h>
+#include <engine/fifo/chan.h>
+
+struct nvkm_rm_engine {
+ struct nvkm_engine engine;
+
+ struct nvkm_engine_func func;
+};
+
+struct nvkm_rm_engine_obj {
+ struct nvkm_object object;
+ struct nvkm_gsp_object rm;
+};
+
+static void*
+nvkm_rm_engine_obj_dtor(struct nvkm_object *object)
+{
+ struct nvkm_rm_engine_obj *obj = container_of(object, typeof(*obj), object);
+
+ nvkm_gsp_rm_free(&obj->rm);
+ return obj;
+}
+
+static const struct nvkm_object_func
+nvkm_rm_engine_obj = {
+ .dtor = nvkm_rm_engine_obj_dtor,
+};
+
+int
+nvkm_rm_engine_obj_new(struct nvkm_gsp_object *chan, int chid, const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_rm *rm = chan->client->gsp->rm;
+ const int inst = oclass->engine->subdev.inst;
+ const u32 class = oclass->base.oclass;
+ const u32 handle = oclass->handle;
+ struct nvkm_rm_engine_obj *obj;
+ int ret;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ switch (oclass->engine->subdev.type) {
+ case NVKM_ENGINE_CE:
+ ret = rm->api->ce->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_GR:
+ ret = nvkm_gsp_rm_alloc(chan, handle, class, 0, &obj->rm);
+ break;
+ case NVKM_ENGINE_NVDEC:
+ ret = rm->api->nvdec->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_NVENC:
+ ret = rm->api->nvenc->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_NVJPG:
+ ret = rm->api->nvjpg->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ case NVKM_ENGINE_OFA:
+ ret = rm->api->ofa->alloc(chan, handle, class, inst, &obj->rm);
+ break;
+ default:
+ ret = -EINVAL;
+ WARN_ON(1);
+ break;
+ }
+
+ if (ret) {
+ kfree(obj);
+ return ret;
+ }
+
+ nvkm_object_ctor(&nvkm_rm_engine_obj, oclass, &obj->object);
+ *pobject = &obj->object;
+ return 0;
+}
+
+static int
+nvkm_rm_engine_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+
+ return nvkm_rm_engine_obj_new(&chan->rm.object, chan->id, oclass, pobject);
+}
+
+static void *
+nvkm_rm_engine_dtor(struct nvkm_engine *engine)
+{
+ kfree(engine->func);
+ return engine;
+}
+
+int
+nvkm_rm_engine_ctor(void *(*dtor)(struct nvkm_engine *), struct nvkm_rm *rm,
+ enum nvkm_subdev_type type, int inst,
+ const u32 *class, int nclass, struct nvkm_engine *engine)
+{
+ struct nvkm_engine_func *func;
+
+ func = kzalloc(struct_size(func, sclass, nclass + 1), GFP_KERNEL);
+ if (!func)
+ return -ENOMEM;
+
+ func->dtor = dtor;
+
+ for (int i = 0; i < nclass; i++) {
+ func->sclass[i].oclass = class[i];
+ func->sclass[i].minver = -1;
+ func->sclass[i].maxver = 0;
+ func->sclass[i].ctor = nvkm_rm_engine_obj_ctor;
+ }
+
+ nvkm_engine_ctor(func, rm->device, type, inst, true, engine);
+ return 0;
+}
+
+static int
+nvkm_rm_engine_new_(struct nvkm_rm *rm, enum nvkm_subdev_type type, int inst, u32 class,
+ struct nvkm_engine **pengine)
+{
+ struct nvkm_engine *engine;
+ int ret;
+
+ engine = kzalloc(sizeof(*engine), GFP_KERNEL);
+ if (!engine)
+ return -ENOMEM;
+
+ ret = nvkm_rm_engine_ctor(nvkm_rm_engine_dtor, rm, type, inst, &class, 1, engine);
+ if (ret) {
+ kfree(engine);
+ return ret;
+ }
+
+ *pengine = engine;
+ return 0;
+}
+
+int
+nvkm_rm_engine_new(struct nvkm_rm *rm, enum nvkm_subdev_type type, int inst)
+{
+ const struct nvkm_rm_gpu *gpu = rm->gpu;
+ struct nvkm_device *device = rm->device;
+
+ switch (type) {
+ case NVKM_ENGINE_CE:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->ce)))
+ return -EINVAL;
+
+ return nvkm_rm_engine_new_(rm, type, inst, gpu->ce.class, &device->ce[inst]);
+ case NVKM_ENGINE_GR:
+ if (inst != 0)
+ return -ENODEV; /* MiG not supported, just ignore. */
+
+ return nvkm_rm_gr_new(rm);
+ case NVKM_ENGINE_NVDEC:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->nvdec)))
+ return -EINVAL;
+
+ return nvkm_rm_nvdec_new(rm, inst);
+ case NVKM_ENGINE_NVENC:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->nvenc)))
+ return -EINVAL;
+
+ return nvkm_rm_nvenc_new(rm, inst);
+ case NVKM_ENGINE_NVJPG:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->nvjpg)))
+ return -EINVAL;
+
+ return nvkm_rm_engine_new_(rm, type, inst, gpu->nvjpg.class, &device->nvjpg[inst]);
+ case NVKM_ENGINE_OFA:
+ if (WARN_ON(inst >= ARRAY_SIZE(device->ofa)))
+ return -EINVAL;
+
+ return nvkm_rm_engine_new_(rm, type, inst, gpu->ofa.class, &device->ofa[inst]);
+ default:
+ break;
+ }
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h
new file mode 100644
index 000000000000..5b8c9c3901d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/engine.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_ENGINE_H__
+#define __NVKM_RM_ENGINE_H__
+#include "gpu.h"
+
+int nvkm_rm_engine_ctor(void *(*dtor)(struct nvkm_engine *), struct nvkm_rm *,
+ enum nvkm_subdev_type type, int inst,
+ const u32 *class, int nclass, struct nvkm_engine *);
+int nvkm_rm_engine_new(struct nvkm_rm *, enum nvkm_subdev_type, int inst);
+
+int nvkm_rm_engine_obj_new(struct nvkm_gsp_object *chan, int chid, const struct nvkm_oclass *,
+ struct nvkm_object **);
+
+int nvkm_rm_gr_new(struct nvkm_rm *);
+int nvkm_rm_nvdec_new(struct nvkm_rm *, int inst);
+int nvkm_rm_nvenc_new(struct nvkm_rm *, int inst);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c
new file mode 100644
index 000000000000..a48c6134075d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga100.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+ga100_gpu = {
+ .usermode.class = AMPERE_USERMODE_A,
+
+ .fifo.chan = {
+ .class = AMPERE_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = AMPERE_DMA_COPY_A,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = AMPERE_A,
+ .compute = AMPERE_COMPUTE_A,
+ },
+ .nvdec.class = NVC6B0_VIDEO_DECODER,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c
new file mode 100644
index 000000000000..50536ad7f85d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/ga1xx.c
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+ga1xx_gpu = {
+ .disp.class = {
+ .root = GA102_DISP,
+ .caps = GV100_DISP_CAPS,
+ .core = GA102_DISP_CORE_CHANNEL_DMA,
+ .wndw = GA102_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = GA102_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = GA102_DISP_CURSOR,
+ },
+
+ .usermode.class = AMPERE_USERMODE_A,
+
+ .fifo.chan = {
+ .class = AMPERE_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = AMPERE_DMA_COPY_B,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = AMPERE_B,
+ .compute = AMPERE_COMPUTE_B,
+ },
+ .nvdec.class = NVC7B0_VIDEO_DECODER,
+ .nvenc.class = NVC7B7_VIDEO_ENCODER,
+ .ofa.class = NVC7FA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c
new file mode 100644
index 000000000000..2f517dcd721a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb10x.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+gb10x_gpu = {
+ .usermode.class = HOPPER_USERMODE_A,
+
+ .fifo.chan = {
+ .class = BLACKWELL_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = BLACKWELL_DMA_COPY_A,
+ .gr.class = {
+ .i2m = BLACKWELL_INLINE_TO_MEMORY_A,
+ .twod = FERMI_TWOD_A,
+ .threed = BLACKWELL_A,
+ .compute = BLACKWELL_COMPUTE_A,
+ },
+ .nvdec.class = NVCDB0_VIDEO_DECODER,
+ .nvjpg.class = NVCDD1_VIDEO_NVJPG,
+ .ofa.class = NVCDFA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c
new file mode 100644
index 000000000000..950471d9996e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gb20x.c
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/ce/priv.h>
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+gb20x_gpu = {
+ .disp.class = {
+ .root = GB202_DISP,
+ .caps = GB202_DISP_CAPS,
+ .core = GB202_DISP_CORE_CHANNEL_DMA,
+ .wndw = GB202_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = GB202_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = GB202_DISP_CURSOR,
+ },
+
+ .usermode.class = BLACKWELL_USERMODE_A,
+
+ .fifo.chan = {
+ .class = BLACKWELL_CHANNEL_GPFIFO_B,
+ .doorbell_handle = gb202_chan_doorbell_handle,
+ },
+
+ .ce = {
+ .class = BLACKWELL_DMA_COPY_B,
+ .grce_mask = gb202_ce_grce_mask,
+ },
+ .gr.class = {
+ .i2m = BLACKWELL_INLINE_TO_MEMORY_A,
+ .twod = FERMI_TWOD_A,
+ .threed = BLACKWELL_B,
+ .compute = BLACKWELL_COMPUTE_B,
+ },
+ .nvdec.class = NVCFB0_VIDEO_DECODER,
+ .nvenc.class = NVCFB7_VIDEO_ENCODER,
+ .nvjpg.class = NVCFD1_VIDEO_NVJPG,
+ .ofa.class = NVCFFA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c
new file mode 100644
index 000000000000..49e2c54e1aa8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gh100.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+gh100_gpu = {
+ .usermode.class = HOPPER_USERMODE_A,
+
+ .fifo.chan = {
+ .class = HOPPER_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = HOPPER_DMA_COPY_A,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = HOPPER_A,
+ .compute = HOPPER_COMPUTE_A,
+ },
+ .nvdec.class = NVB8B0_VIDEO_DECODER,
+ .nvjpg.class = NVB8D1_VIDEO_NVJPG,
+ .ofa.class = NVB8FA_VIDEO_OFA,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h
new file mode 100644
index 000000000000..46a6325641b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gpu.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_GPU_H__
+#define __NVKM_RM_GPU_H__
+#include "rm.h"
+
+struct nvkm_rm_gpu {
+ struct {
+ struct {
+ u32 root;
+ u32 caps;
+ u32 core;
+ u32 wndw;
+ u32 wimm;
+ u32 curs;
+ } class;
+ } disp;
+
+ struct {
+ u32 class;
+ } usermode;
+
+ struct {
+ struct {
+ u32 class;
+ u32 (*doorbell_handle)(struct nvkm_chan *);
+ } chan;
+ } fifo;
+
+ struct {
+ u32 class;
+ u32 (*grce_mask)(struct nvkm_device *);
+ } ce;
+
+ struct {
+ struct {
+ u32 i2m;
+ u32 twod;
+ u32 threed;
+ u32 compute;
+ } class;
+ } gr;
+
+ struct {
+ u32 class;
+ } nvdec;
+
+ struct {
+ u32 class;
+ } nvenc;
+
+ struct {
+ u32 class;
+ } nvjpg;
+
+ struct {
+ u32 class;
+ } ofa;
+};
+
+extern const struct nvkm_rm_gpu tu1xx_gpu;
+extern const struct nvkm_rm_gpu ga100_gpu;
+extern const struct nvkm_rm_gpu ga1xx_gpu;
+extern const struct nvkm_rm_gpu ad10x_gpu;
+extern const struct nvkm_rm_gpu gh100_gpu;
+extern const struct nvkm_rm_gpu gb10x_gpu;
+extern const struct nvkm_rm_gpu gb20x_gpu;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c
new file mode 100644
index 000000000000..f40b8fcc2bcb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.c
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gr.h"
+
+#include <engine/fifo.h>
+#include <engine/gr/priv.h>
+
+static int
+nvkm_rm_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object);
+
+ return nvkm_rm_engine_obj_new(&chan->chan->rm.object, chan->chan->id, oclass, pobject);
+}
+
+static int
+nvkm_rm_gr_fini(struct nvkm_gr *base, bool suspend)
+{
+ struct nvkm_rm *rm = base->engine.subdev.device->gsp->rm;
+ struct r535_gr *gr = container_of(base, typeof(*gr), base);
+
+ if (rm->api->gr->scrubber.fini)
+ rm->api->gr->scrubber.fini(gr);
+
+ return 0;
+}
+
+static int
+nvkm_rm_gr_init(struct nvkm_gr *base)
+{
+ struct nvkm_rm *rm = base->engine.subdev.device->gsp->rm;
+ struct r535_gr *gr = container_of(base, typeof(*gr), base);
+ int ret;
+
+ if (rm->api->gr->scrubber.init) {
+ ret = rm->api->gr->scrubber.init(gr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nvkm_rm_gr_new(struct nvkm_rm *rm)
+{
+ const u32 classes[] = {
+ rm->gpu->gr.class.i2m,
+ rm->gpu->gr.class.twod,
+ rm->gpu->gr.class.threed,
+ rm->gpu->gr.class.compute,
+ };
+ struct nvkm_gr_func *func;
+ struct r535_gr *gr;
+
+ func = kzalloc(struct_size(func, sclass, ARRAY_SIZE(classes) + 1), GFP_KERNEL);
+ if (!func)
+ return -ENOMEM;
+
+ func->dtor = r535_gr_dtor;
+ func->oneinit = r535_gr_oneinit;
+ func->init = nvkm_rm_gr_init;
+ func->fini = nvkm_rm_gr_fini;
+ func->units = r535_gr_units;
+ func->chan_new = r535_gr_chan_new;
+
+ for (int i = 0; i < ARRAY_SIZE(classes); i++) {
+ func->sclass[i].oclass = classes[i];
+ func->sclass[i].minver = -1;
+ func->sclass[i].maxver = 0;
+ func->sclass[i].ctor = nvkm_rm_gr_obj_ctor;
+ }
+
+ gr = kzalloc(sizeof(*gr), GFP_KERNEL);
+ if (!gr) {
+ kfree(func);
+ return -ENOMEM;
+ }
+
+ nvkm_gr_ctor(func, rm->device, NVKM_ENGINE_GR, 0, true, &gr->base);
+ gr->scrubber.chid = -1;
+ rm->device->gr = &gr->base;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h
new file mode 100644
index 000000000000..24980f23aab9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/gr.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_RM_GR_H__
+#define __NVKM_RM_GR_H__
+#include "engine.h"
+
+#include <core/object.h>
+#include <engine/gr.h>
+
+#define R515_GR_MAX_CTXBUFS 9
+
+struct r535_gr_chan {
+ struct nvkm_object object;
+ struct r535_gr *gr;
+
+ struct nvkm_vmm *vmm;
+ struct nvkm_chan *chan;
+
+ struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+};
+
+struct r535_gr {
+ struct nvkm_gr base;
+
+ struct {
+ u16 bufferId;
+ u32 size;
+ u8 page;
+ u8 align;
+ bool global;
+ bool init;
+ bool ro;
+ } ctxbuf[R515_GR_MAX_CTXBUFS];
+ int ctxbuf_nr;
+
+ struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS];
+
+ struct {
+ int chid;
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
+ struct nvkm_gsp_object chan;
+ struct nvkm_gsp_object threed;
+ struct {
+ struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+ } ctxbuf;
+ bool enabled;
+ } scrubber;
+};
+
+struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+void r535_gr_get_ctxbuf_info(struct r535_gr *, int i,
+ struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h
new file mode 100644
index 000000000000..3bdb5ad320d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/handles.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_HANDLES_H__
+#define __NVKM_RM_HANDLES_H__
+
+/* RMAPI handles for various objects allocated from GSP-RM with RM_ALLOC. */
+
+#define NVKM_RM_CLIENT(id) (0xc1d00000 | (id))
+#define NVKM_RM_CLIENT_MASK 0x0000ffff
+#define NVKM_RM_DEVICE 0xde1d0000
+#define NVKM_RM_SUBDEVICE 0x5d1d0000
+#define NVKM_RM_DISP 0x00730000
+#define NVKM_RM_VASPACE 0x90f10000
+#define NVKM_RM_CHAN(chid) (0xf1f00000 | (chid))
+#define NVKM_RM_THREED 0x97000000
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c
new file mode 100644
index 000000000000..d9fbfc377864
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvdec.c
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "engine.h"
+#include <engine/nvdec.h>
+
+static void *
+nvkm_rm_nvdec_dtor(struct nvkm_engine *engine)
+{
+ return container_of(engine, struct nvkm_nvdec, engine);
+}
+
+int
+nvkm_rm_nvdec_new(struct nvkm_rm *rm, int inst)
+{
+ struct nvkm_nvdec *nvdec;
+ int ret;
+
+ nvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL);
+ if (!nvdec)
+ return -ENOMEM;
+
+ ret = nvkm_rm_engine_ctor(nvkm_rm_nvdec_dtor, rm, NVKM_ENGINE_NVDEC, inst,
+ &rm->gpu->nvdec.class, 1, &nvdec->engine);
+ if (ret) {
+ kfree(nvdec);
+ return ret;
+ }
+
+ rm->device->nvdec[inst] = nvdec;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c
new file mode 100644
index 000000000000..6dfa7b789e07
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/nvenc.c
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "engine.h"
+#include <engine/nvenc.h>
+
+static void *
+nvkm_rm_nvenc_dtor(struct nvkm_engine *engine)
+{
+ return container_of(engine, struct nvkm_nvenc, engine);
+}
+
+int
+nvkm_rm_nvenc_new(struct nvkm_rm *rm, int inst)
+{
+ struct nvkm_nvenc *nvenc;
+ int ret;
+
+ nvenc = kzalloc(sizeof(*nvenc), GFP_KERNEL);
+ if (!nvenc)
+ return -ENOMEM;
+
+ ret = nvkm_rm_engine_ctor(nvkm_rm_nvenc_dtor, rm, NVKM_ENGINE_NVENC, inst,
+ &rm->gpu->nvenc.class, 1, &nvenc->engine);
+ if (ret) {
+ kfree(nvenc);
+ return ret;
+ }
+
+ rm->device->nvenc[inst] = nvenc;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild
new file mode 100644
index 000000000000..a5f6b2abfd33
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/Kbuild
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/rm.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/gsp.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/rpc.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ctrl.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/alloc.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/client.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/device.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/bar.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/fbsr.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/vmm.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/disp.o
+
+nvkm-y += nvkm/subdev/gsp/rm/r535/fifo.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ce.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/gr.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvdec.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvenc.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/nvjpg.o
+nvkm-y += nvkm/subdev/gsp/rm/r535/ofa.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c
new file mode 100644
index 000000000000..46e3a29f2ad7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/alloc.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rpc.h>
+
+#include "nvrm/alloc.h"
+#include "nvrm/rpcfn.h"
+
+static int
+r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_free_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n",
+ client->object.handle, object->handle);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc));
+ if (WARN_ON(IS_ERR_OR_NULL(rpc)))
+ return -EIO;
+
+ rpc->params.hRoot = client->object.handle;
+ rpc->params.hObjectParent = 0;
+ rpc->params.hObjectOld = object->handle;
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
+}
+
+static void
+r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *params)
+{
+ rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
+
+ nvkm_gsp_rpc_done(object->client->gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params)
+{
+ rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
+ struct nvkm_gsp *gsp = object->client->gsp;
+ void *ret = NULL;
+
+ rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, sizeof(*rpc));
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ if (rpc->status) {
+ ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
+ if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY)
+ nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
+ }
+
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ return ret;
+}
+
+static void *
+r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass,
+ u32 params_size)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_gsp_rm_alloc_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x\n",
+ client->object.handle, object->parent->handle,
+ object->handle);
+
+ nvkm_debug(&gsp->subdev, "cls:0x%08x params_size:%d\n", oclass,
+ params_size);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC,
+ sizeof(*rpc) + params_size);
+ if (IS_ERR(rpc))
+ return rpc;
+
+ rpc->hClient = client->object.handle;
+ rpc->hParent = object->parent->handle;
+ rpc->hObject = object->handle;
+ rpc->hClass = oclass;
+ rpc->status = 0;
+ rpc->paramsSize = params_size;
+ return rpc->params;
+}
+
+const struct nvkm_rm_api_alloc
+r535_alloc = {
+ .get = r535_gsp_rpc_rm_alloc_get,
+ .push = r535_gsp_rpc_rm_alloc_push,
+ .done = r535_gsp_rpc_rm_alloc_done,
+ .free = r535_gsp_rpc_rm_free,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c
index 3a30bea30e36..d06bf95b9a4a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/bar.c
@@ -19,7 +19,7 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "gf100.h"
+#include <subdev/bar/gf100.h>
#include <core/mm.h>
#include <subdev/fb.h>
@@ -27,14 +27,20 @@
#include <subdev/instmem.h>
#include <subdev/mmu/vmm.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
+#include "nvrm/bar.h"
+#include "nvrm/rpcfn.h"
static void
r535_bar_flush(struct nvkm_bar *bar)
{
+ /* Use NV_PFLUSH in resume path - needed on R570 to flush writes before
+ * BAR2 page tables have been restored.
+ */
+ if (unlikely(!bar->bar2)) {
+ g84_bar_flush(bar);
+ return;
+ }
+
ioread32_native(bar->flushBAR2);
}
@@ -44,7 +50,7 @@ r535_bar_bar2_wait(struct nvkm_bar *base)
}
static int
-r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr)
+r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u8 page_shift, u64 pdbe)
{
rpc_update_bar_pde_v15_00 *rpc;
@@ -53,21 +59,22 @@ r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr)
return -EIO;
rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2;
- rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */
- rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu!
+ rpc->info.entryValue = pdbe;
+ rpc->info.entryLevelShift = page_shift;
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
}
static void
r535_bar_bar2_fini(struct nvkm_bar *bar)
{
+ struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm;
struct nvkm_gsp *gsp = bar->subdev.device->gsp;
bar->flushBAR2 = bar->flushBAR2PhysMode;
nvkm_done(bar->flushFBZero);
- WARN_ON(r535_bar_bar2_update_pde(gsp, 0));
+ WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->func->page[0].shift, 0));
}
static void
@@ -76,8 +83,18 @@ r535_bar_bar2_init(struct nvkm_bar *bar)
struct nvkm_device *device = bar->subdev.device;
struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm;
struct nvkm_gsp *gsp = device->gsp;
-
- WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->pd->pde[0]->pt[0]->addr));
+ struct nvkm_memory *pdb = vmm->pd->pt[0]->memory;
+ u32 pdb_offset = vmm->pd->pt[0]->base;
+ u32 pdbe_lo, pdbe_hi;
+ u64 pdbe;
+
+ nvkm_kmap(pdb);
+ pdbe_lo = nvkm_ro32(pdb, pdb_offset + 0);
+ pdbe_hi = nvkm_ro32(pdb, pdb_offset + 4);
+ pdbe = ((u64)pdbe_hi << 32) | pdbe_lo;
+ nvkm_done(pdb);
+
+ WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->func->page[0].shift, pdbe));
vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb;
if (!bar->flushFBZero) {
@@ -174,7 +191,7 @@ r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
}
*pbar = bar;
- bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE);
+ bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, NVKM_BAR2_INST), PAGE_SIZE);
if (!bar->flushBAR2PhysMode)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c
new file mode 100644
index 000000000000..2d1ce9db2dcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ce.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/ce.h"
+#include "nvrm/engine.h"
+
+static int
+r535_ce_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *ce)
+{
+ NVC0B5_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), ce);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->version = 1;
+ args->engineType = NV2080_ENGINE_TYPE_COPY0 + inst;
+
+ return nvkm_gsp_rm_alloc_wr(ce, args);
+}
+
+const struct nvkm_rm_api_engine
+r535_ce = {
+ .alloc = r535_ce_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c
index 1b4619ff9e8e..ec71f683e609 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/client.c
@@ -19,26 +19,27 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
+#include <rm/rm.h>
-#include <subdev/gsp.h>
+#include "nvrm/client.h"
-#include <nvif/class.h>
+static int
+r535_gsp_client_ctor(struct nvkm_gsp_client *client, u32 handle)
+{
+ NV0000_ALLOC_PARAMETERS *args;
-static const struct nvkm_engine_func
-ad102_nvenc = {
- .sclass = {
- { -1, -1, NVC9B7_VIDEO_ENCODER },
- {}
- }
-};
+ args = nvkm_gsp_rm_alloc_get(&client->object, handle, NV01_ROOT, sizeof(*args),
+ &client->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
-int
-ad102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvenc **pnvenc)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvenc_new(&ad102_nvenc, device, type, inst, pnvenc);
+ args->hClient = client->object.handle;
+ args->processID = ~0;
- return -ENODEV;
+ return nvkm_gsp_rm_alloc_wr(&client->object, args);
}
+
+const struct nvkm_rm_api_client
+r535_client = {
+ .ctor = r535_gsp_client_ctor,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c
new file mode 100644
index 000000000000..70b9ee911c5e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ctrl.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rpc.h>
+
+#include "nvrm/ctrl.h"
+#include "nvrm/rpcfn.h"
+
+static void
+r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params)
+{
+ rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr(params, rpc);
+
+ if (!params)
+ return;
+ nvkm_gsp_rpc_done(object->client->gsp, rpc);
+}
+
+static int
+r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 repc)
+{
+ rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr((*params), rpc);
+ struct nvkm_gsp *gsp = object->client->gsp;
+ int ret = 0;
+
+ rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, repc);
+ if (IS_ERR_OR_NULL(rpc)) {
+ *params = NULL;
+ return PTR_ERR(rpc);
+ }
+
+ if (rpc->status) {
+ ret = r535_rpc_status_to_errno(rpc->status);
+ if (ret != -EAGAIN && ret != -EBUSY)
+ nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
+ object->client->object.handle, object->handle, rpc->cmd, rpc->status);
+ }
+
+ if (repc)
+ *params = rpc->params;
+ else
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ return ret;
+}
+
+static void *
+r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_size)
+{
+ struct nvkm_gsp_client *client = object->client;
+ struct nvkm_gsp *gsp = client->gsp;
+ rpc_gsp_rm_control_v03_00 *rpc;
+
+ nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x params_size:%d\n",
+ client->object.handle, object->handle, cmd, params_size);
+
+ rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL,
+ sizeof(*rpc) + params_size);
+ if (IS_ERR(rpc))
+ return rpc;
+
+ rpc->hClient = client->object.handle;
+ rpc->hObject = object->handle;
+ rpc->cmd = cmd;
+ rpc->status = 0;
+ rpc->paramsSize = params_size;
+ return rpc->params;
+}
+
+const struct nvkm_rm_api_ctrl
+r535_ctrl = {
+ .get = r535_gsp_rpc_rm_ctrl_get,
+ .push = r535_gsp_rpc_rm_ctrl_push,
+ .done = r535_gsp_rpc_rm_ctrl_done,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c
new file mode 100644
index 000000000000..f830e12a8f6e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/device.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/device.h"
+#include "nvrm/event.h"
+
+static void
+r535_gsp_event_dtor(struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_device *device = event->device;
+ struct nvkm_gsp_client *client = device->object.client;
+ struct nvkm_gsp *gsp = client->gsp;
+
+ mutex_lock(&gsp->client_id.mutex);
+ if (event->func) {
+ list_del(&event->head);
+ event->func = NULL;
+ }
+ mutex_unlock(&gsp->client_id.mutex);
+
+ nvkm_gsp_rm_free(&event->object);
+ event->device = NULL;
+}
+
+static int
+r535_gsp_device_event_get(struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_device *device = event->device;
+ NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice,
+ NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->event = event->id;
+ ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
+ return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl);
+}
+
+static int
+r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
+ nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
+{
+ struct nvkm_gsp_client *client = device->object.client;
+ struct nvkm_gsp *gsp = client->gsp;
+ NV0005_ALLOC_PARAMETERS *args;
+ int ret;
+
+ args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle,
+ NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args),
+ &event->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hParentClient = client->object.handle;
+ args->hSrcResource = 0;
+ args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
+ args->notifyIndex = NV01_EVENT_CLIENT_RM | id;
+ args->data = NULL;
+
+ ret = nvkm_gsp_rm_alloc_wr(&event->object, args);
+ if (ret)
+ return ret;
+
+ event->device = device;
+ event->id = id;
+
+ ret = r535_gsp_device_event_get(event);
+ if (ret) {
+ nvkm_gsp_event_dtor(event);
+ return ret;
+ }
+
+ mutex_lock(&gsp->client_id.mutex);
+ event->func = func;
+ list_add(&event->head, &client->events);
+ mutex_unlock(&gsp->client_id.mutex);
+ return 0;
+}
+
+static void
+r535_gsp_device_dtor(struct nvkm_gsp_device *device)
+{
+ nvkm_gsp_rm_free(&device->subdevice);
+ nvkm_gsp_rm_free(&device->object);
+}
+
+static int
+r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device)
+{
+ NV2080_ALLOC_PARAMETERS *args;
+
+ return nvkm_gsp_rm_alloc(&device->object, NVKM_RM_SUBDEVICE, NV20_SUBDEVICE_0,
+ sizeof(*args), &device->subdevice);
+}
+
+static int
+r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
+{
+ NV0080_ALLOC_PARAMETERS *args;
+ int ret;
+
+ args = nvkm_gsp_rm_alloc_get(&client->object, NVKM_RM_DEVICE, NV01_DEVICE_0, sizeof(*args),
+ &device->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hClientShare = client->object.handle;
+
+ ret = nvkm_gsp_rm_alloc_wr(&device->object, args);
+ if (ret)
+ return ret;
+
+ ret = r535_gsp_subdevice_ctor(device);
+ if (ret)
+ nvkm_gsp_rm_free(&device->object);
+
+ return ret;
+}
+
+const struct nvkm_rm_api_device
+r535_device = {
+ .ctor = r535_gsp_device_ctor,
+ .dtor = r535_gsp_device_dtor,
+ .event.ctor = r535_gsp_device_event_ctor,
+ .event.dtor = r535_gsp_event_dtor,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
index 99110ab2f44d..7e9e2d3564da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
@@ -19,13 +19,13 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
-#include "chan.h"
-#include "conn.h"
-#include "dp.h"
-#include "head.h"
-#include "ior.h"
-#include "outp.h"
+#include <engine/disp/priv.h>
+#include <engine/disp/chan.h>
+#include <engine/disp/conn.h>
+#include <engine/disp/dp.h>
+#include <engine/disp/head.h>
+#include <engine/disp/ior.h>
+#include <engine/disp/outp.h>
#include <core/ramht.h>
#include <subdev/bios.h>
@@ -34,19 +34,11 @@
#include <subdev/mmu.h>
#include <subdev/vfn.h>
+#include <rm/gpu.h>
+
#include <nvhw/drf.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
-#include <nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h>
+#include "nvrm/disp.h"
#include <linux/acpi.h>
@@ -78,9 +70,9 @@ r535_chan_fini(struct nvkm_disp_chan *chan)
}
static int
-r535_chan_push(struct nvkm_disp_chan *chan)
+r535_disp_chan_set_pushbuf(struct nvkm_disp *disp, s32 oclass, int inst, struct nvkm_memory *memory)
{
- struct nvkm_gsp *gsp = chan->disp->engine.subdev.device->gsp;
+ struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp;
NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl;
ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
@@ -89,8 +81,8 @@ r535_chan_push(struct nvkm_disp_chan *chan)
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
- if (chan->memory) {
- switch (nvkm_memory_target(chan->memory)) {
+ if (memory) {
+ switch (nvkm_memory_target(memory)) {
case NVKM_MEM_TARGET_NCOH:
ctrl->addressSpace = ADDR_SYSMEM;
ctrl->cacheSnoop = 0;
@@ -107,13 +99,13 @@ r535_chan_push(struct nvkm_disp_chan *chan)
return -EINVAL;
}
- ctrl->physicalAddr = nvkm_memory_addr(chan->memory);
- ctrl->limit = nvkm_memory_size(chan->memory) - 1;
+ ctrl->physicalAddr = nvkm_memory_addr(memory);
+ ctrl->limit = nvkm_memory_size(memory) - 1;
}
- ctrl->hclass = chan->object.oclass;
- ctrl->channelInstance = chan->head;
- ctrl->valid = ((chan->object.oclass & 0xff) != 0x7a) ? 1 : 0;
+ ctrl->hclass = oclass;
+ ctrl->channelInstance = inst;
+ ctrl->valid = ((oclass & 0xff) != 0x7a) ? 1 : 0;
return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
}
@@ -121,10 +113,11 @@ r535_chan_push(struct nvkm_disp_chan *chan)
static int
r535_curs_init(struct nvkm_disp_chan *chan)
{
+ const struct nvkm_rm_api *rmapi = chan->disp->rm.objcom.client->gsp->rm->api;
NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *args;
int ret;
- ret = r535_chan_push(chan);
+ ret = rmapi->disp->chan.set_pushbuf(chan->disp, chan->object.oclass, chan->head, NULL);
if (ret)
return ret;
@@ -172,25 +165,34 @@ r535_dmac_fini(struct nvkm_disp_chan *chan)
}
static int
-r535_dmac_init(struct nvkm_disp_chan *chan)
+r535_dmac_alloc(struct nvkm_disp *disp, u32 oclass, int inst, u32 put_offset,
+ struct nvkm_gsp_object *dmac)
{
NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args;
- int ret;
-
- ret = r535_chan_push(chan);
- if (ret)
- return ret;
- args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object,
- (chan->object.oclass << 16) | chan->head,
- chan->object.oclass, sizeof(*args), &chan->rm.object);
+ args = nvkm_gsp_rm_alloc_get(&disp->rm.object, (oclass << 16) | inst, oclass,
+ sizeof(*args), dmac);
if (IS_ERR(args))
return PTR_ERR(args);
- args->channelInstance = chan->head;
- args->offset = chan->suspend_put;
+ args->channelInstance = inst;
+ args->offset = put_offset;
- return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+ return nvkm_gsp_rm_alloc_wr(dmac, args);
+}
+
+static int
+r535_dmac_init(struct nvkm_disp_chan *chan)
+{
+ const struct nvkm_rm_api *rmapi = chan->disp->rm.objcom.client->gsp->rm->api;
+ int ret;
+
+ ret = rmapi->disp->chan.set_pushbuf(chan->disp, chan->object.oclass, chan->head, chan->memory);
+ if (ret)
+ return ret;
+
+ return rmapi->disp->chan.dmac_alloc(chan->disp, chan->object.oclass, chan->head,
+ chan->suspend_put, &chan->rm.object);
}
static int
@@ -260,47 +262,47 @@ r535_core = {
};
static int
-r535_sor_bl_set(struct nvkm_ior *sor, int lvl)
+r535_bl_ctrl(struct nvkm_disp *disp, unsigned display_id, bool set, int *pval)
{
- struct nvkm_disp *disp = sor->disp;
+ u32 cmd = set ? NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS :
+ NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS;
NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+ int ret;
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
- NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS,
- sizeof(*ctrl));
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, cmd, sizeof(*ctrl));
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
- ctrl->displayId = BIT(sor->asy.outp->index);
- ctrl->brightness = lvl;
+ ctrl->displayId = BIT(display_id);
+ ctrl->brightness = *pval;
- return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret)
+ return ret;
+
+ *pval = ctrl->brightness;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
}
static int
-r535_sor_bl_get(struct nvkm_ior *sor)
+r535_sor_bl_set(struct nvkm_ior *sor, int lvl)
{
struct nvkm_disp *disp = sor->disp;
- NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
- int ret, lvl;
-
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
- NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS,
- sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ const struct nvkm_rm_api *rmapi = disp->engine.subdev.device->gsp->rm->api;
- ctrl->displayId = BIT(sor->asy.outp->index);
+ return rmapi->disp->bl_ctrl(disp, sor->asy.outp->index, true, &lvl);
+}
- ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
- if (ret) {
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return ret;
- }
+static int
+r535_sor_bl_get(struct nvkm_ior *sor)
+{
+ struct nvkm_disp *disp = sor->disp;
+ const struct nvkm_rm_api *rmapi = disp->engine.subdev.device->gsp->rm->api;
+ int lvl, ret = rmapi->disp->bl_ctrl(disp, sor->asy.outp->index, false, &lvl);
- lvl = ctrl->brightness;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return lvl;
+ return (ret == 0) ? lvl : ret;
}
static const struct nvkm_ior_func_bl
@@ -730,7 +732,7 @@ r535_outp_acquire(struct nvkm_outp *outp, bool hda)
}
static int
-r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid)
+r535_disp_get_active(struct nvkm_disp *disp, unsigned head, u32 *displayid)
{
NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
int ret;
@@ -763,7 +765,9 @@ r535_outp_inherit(struct nvkm_outp *outp)
int ret;
list_for_each_entry(head, &disp->heads, head) {
- ret = r535_disp_head_displayid(disp, head->id, &displayid);
+ const struct nvkm_rm_api *rmapi = disp->rm.objcom.client->gsp->rm->api;
+
+ ret = rmapi->disp->get_active(disp, head->id, &displayid);
if (WARN_ON(ret))
return NULL;
@@ -858,10 +862,9 @@ r535_outp_dfp_get_info(struct nvkm_outp *outp)
}
static int
-r535_outp_detect(struct nvkm_outp *outp)
+r535_disp_get_connect_state(struct nvkm_disp *disp, unsigned display_id)
{
NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl;
- struct nvkm_disp *disp = outp->disp;
int ret;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
@@ -870,23 +873,29 @@ r535_outp_detect(struct nvkm_outp *outp)
return PTR_ERR(ctrl);
ctrl->subDeviceInstance = 0;
- ctrl->displayMask = BIT(outp->index);
+ ctrl->displayMask = BIT(display_id);
ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
- if (ret) {
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return ret;
- }
+ if (ret == 0 && (ctrl->displayMask & BIT(display_id)))
+ ret = 1;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+}
- if (ctrl->displayMask & BIT(outp->index)) {
+static int
+r535_outp_detect(struct nvkm_outp *outp)
+{
+ const struct nvkm_rm_api *rmapi = outp->disp->rm.objcom.client->gsp->rm->api;
+ int ret;
+
+ ret = rmapi->disp->get_connect_state(outp->disp, outp->index);
+ if (ret == 1) {
ret = r535_outp_dfp_get_info(outp);
if (ret == 0)
ret = 1;
- } else {
- ret = 0;
}
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return ret;
}
@@ -1029,15 +1038,11 @@ r535_dp_train(struct nvkm_outp *outp, bool retrain)
}
static int
-r535_dp_rates(struct nvkm_outp *outp)
+r535_dp_set_indexed_link_rates(struct nvkm_outp *outp)
{
NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl;
struct nvkm_disp *disp = outp->disp;
- if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
- !outp->dp.rates || outp->dp.rate[0].dpcd < 0)
- return 0;
-
if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl)))
return -EINVAL;
@@ -1054,6 +1059,18 @@ r535_dp_rates(struct nvkm_outp *outp)
}
static int
+r535_dp_rates(struct nvkm_outp *outp)
+{
+ struct nvkm_rm *rm = outp->disp->rm.objcom.client->gsp->rm;
+
+ if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
+ !outp->dp.rates || outp->dp.rate[0].dpcd < 0)
+ return 0;
+
+ return rm->api->disp->dp.set_indexed_link_rates(outp);
+}
+
+static int
r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
{
struct nvkm_disp *disp = outp->disp;
@@ -1151,6 +1168,49 @@ r535_dp = {
};
static int
+r535_dp_get_caps(struct nvkm_disp *disp, int *plink_bw, bool *pmst, bool *pwm)
+{
+ NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->sorIndex = ~0;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
+ *plink_bw = 0x06;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
+ *plink_bw = 0x0a;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
+ *plink_bw = 0x14;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
+ *plink_bw = 0x1e;
+ break;
+ default:
+ *plink_bw = 0x00;
+ break;
+ }
+
+ *pmst = ctrl->bIsMultistreamSupported;
+ *pwm = ctrl->bHasIncreasedWatermarkLimits;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize)
{
NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *ctrl;
@@ -1194,6 +1254,7 @@ r535_tmds = {
static int
r535_outp_new(struct nvkm_disp *disp, u32 id)
{
+ const struct nvkm_rm_api *rmapi = disp->rm.objcom.client->gsp->rm->api;
NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl;
enum nvkm_ior_proto proto;
struct dcb_output dcbE = {};
@@ -1278,43 +1339,11 @@ r535_outp_new(struct nvkm_disp *disp, u32 id)
if (ret)
return ret;
} else {
- NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
bool mst, wm;
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
- NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- ctrl->sorIndex = ~0;
-
- ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
- if (ret) {
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ ret = rmapi->disp->dp.get_caps(disp, &dcbE.dpconf.link_bw, &mst, &wm);
+ if (ret)
return ret;
- }
-
- switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
- case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
- dcbE.dpconf.link_bw = 0x06;
- break;
- case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
- dcbE.dpconf.link_bw = 0x0a;
- break;
- case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
- dcbE.dpconf.link_bw = 0x14;
- break;
- case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
- dcbE.dpconf.link_bw = 0x1e;
- break;
- default:
- dcbE.dpconf.link_bw = 0x00;
- break;
- }
-
- mst = ctrl->bIsMultistreamSupported;
- wm = ctrl->bHasIncreasedWatermarkLimits;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
if (WARN_ON(!dcbE.dpconf.link_bw))
return -EINVAL;
@@ -1441,11 +1470,47 @@ r535_disp_init(struct nvkm_disp *disp)
}
static int
+r535_disp_get_supported(struct nvkm_disp *disp, unsigned long *pmask)
+{
+ NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *pmask = ctrl->displayMask;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r535_disp_get_static_info(struct nvkm_disp *disp)
+{
+ NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ disp->wndw.mask = ctrl->windowPresentMask;
+ disp->wndw.nr = fls(disp->wndw.mask);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
r535_disp_oneinit(struct nvkm_disp *disp)
{
struct nvkm_device *device = disp->engine.subdev.device;
struct nvkm_gsp *gsp = device->gsp;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *ctrl;
+ unsigned long mask;
int ret, i;
/* RAMIN. */
@@ -1476,24 +1541,14 @@ r535_disp_oneinit(struct nvkm_disp *disp)
if (ret)
return ret;
- ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, 0x00730000, NV04_DISPLAY_COMMON, 0,
+ ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, NVKM_RM_DISP, NV04_DISPLAY_COMMON, 0,
&disp->rm.objcom);
if (ret)
return ret;
- {
- NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
-
- ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
- NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
- sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- disp->wndw.mask = ctrl->windowPresentMask;
- disp->wndw.nr = fls(disp->wndw.mask);
- nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
- }
+ ret = rmapi->disp->get_static_info(disp);
+ if (ret)
+ return ret;
/* */
{
@@ -1622,25 +1677,14 @@ r535_disp_oneinit(struct nvkm_disp *disp)
return ret;
}
- /* */
- {
- NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
- unsigned long mask;
- int i;
-
- ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
- NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- mask = ctrl->displayMask;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ ret = rmapi->disp->get_supported(disp, &mask);
+ if (ret)
+ return ret;
- for_each_set_bit(i, &mask, 32) {
- ret = r535_outp_new(disp, i);
- if (ret)
- return ret;
- }
+ for_each_set_bit(i, &mask, 32) {
+ ret = r535_outp_new(disp, i);
+ if (ret)
+ return ret;
}
ret = nvkm_event_init(&r535_disp_event, &gsp->subdev, 3, 32, &disp->rm.event);
@@ -1686,6 +1730,7 @@ int
r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
{
+ const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu;
struct nvkm_disp_func *rm;
int ret;
@@ -1701,20 +1746,26 @@ r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
rm->sor.new = r535_sor_new;
rm->ramht_size = hw->ramht_size;
- rm->root = hw->root;
+ rm->root.oclass = gpu->disp.class.root;
- for (int i = 0; hw->user[i].ctor; i++) {
- switch (hw->user[i].base.oclass & 0xff) {
- case 0x73: rm->user[i] = hw->user[i]; break;
- case 0x7d: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_core; break;
- case 0x7e: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wndw; break;
- case 0x7b: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wimm; break;
- case 0x7a: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_curs; break;
- default:
- WARN_ON(1);
- continue;
- }
- }
+ rm->user[0].base.oclass = gpu->disp.class.caps;
+ rm->user[0].ctor = gv100_disp_caps_new;
+
+ rm->user[1].base.oclass = gpu->disp.class.core;
+ rm->user[1].ctor = nvkm_disp_core_new;
+ rm->user[1].chan = &r535_core;
+
+ rm->user[2].base.oclass = gpu->disp.class.wndw;
+ rm->user[2].ctor = nvkm_disp_wndw_new;
+ rm->user[2].chan = &r535_wndw;
+
+ rm->user[3].base.oclass = gpu->disp.class.wimm;
+ rm->user[3].ctor = nvkm_disp_wndw_new;
+ rm->user[3].chan = &r535_wimm;
+
+ rm->user[4].base.oclass = gpu->disp.class.curs;
+ rm->user[4].ctor = nvkm_disp_chan_new;
+ rm->user[4].chan = &r535_curs;
ret = nvkm_disp_new_(rm, device, type, inst, pdisp);
if (ret)
@@ -1723,3 +1774,20 @@ r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
mutex_init(&(*pdisp)->super.mutex); //XXX
return ret;
}
+
+const struct nvkm_rm_api_disp
+r535_disp = {
+ .get_static_info = r535_disp_get_static_info,
+ .get_supported = r535_disp_get_supported,
+ .get_connect_state = r535_disp_get_connect_state,
+ .get_active = r535_disp_get_active,
+ .bl_ctrl = r535_bl_ctrl,
+ .dp = {
+ .get_caps = r535_dp_get_caps,
+ .set_indexed_link_rates = r535_dp_set_indexed_link_rates,
+ },
+ .chan = {
+ .set_pushbuf = r535_disp_chan_set_pushbuf,
+ .dmac_alloc = r535_dmac_alloc,
+ }
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c
index 5f3c9c02a4c0..150e22fde2ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c
@@ -19,19 +19,13 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
-
+#include <subdev/instmem/priv.h>
#include <subdev/gsp.h>
#include <nvhw/drf.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-#include <nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+#include "nvrm/fbsr.h"
+#include "nvrm/rpcfn.h"
struct fbsr_item {
const char *type;
@@ -54,9 +48,9 @@ struct fbsr {
u64 sys_offset;
};
-static int
-fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper,
- u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object)
+int
+r535_fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper,
+ u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object)
{
struct nvkm_gsp_client *client = device->object.client;
struct nvkm_gsp *gsp = client->gsp;
@@ -105,7 +99,7 @@ fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target
rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i;
}
- ret = nvkm_gsp_rpc_wr(gsp, rpc, true);
+ ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_POLL);
if (ret)
return ret;
@@ -123,8 +117,8 @@ fbsr_send(struct fbsr *fbsr, struct fbsr_item *item)
struct nvkm_gsp_object memlist;
int ret;
- ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM,
- item->addr, item->size, NULL, &memlist);
+ ret = r535_fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM,
+ item->addr, item->size, NULL, &memlist);
if (ret)
return ret;
@@ -161,8 +155,8 @@ fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size)
struct nvkm_gsp_object memlist;
int ret;
- ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST,
- 0, fbsr->size, sgt, &memlist);
+ ret = r535_fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST,
+ 0, fbsr->size, sgt, &memlist);
if (ret)
return ret;
@@ -206,22 +200,19 @@ fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory)
return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory));
}
-static void
-r535_instmem_resume(struct nvkm_instmem *imem)
+void
+r535_fbsr_resume(struct nvkm_gsp *gsp)
{
/* RM has restored VRAM contents already, so just need to free the sysmem buffer. */
- if (imem->rm.fbsr_valid) {
- nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr);
- imem->rm.fbsr_valid = false;
- }
+ nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.fbsr);
}
static int
-r535_instmem_suspend(struct nvkm_instmem *imem)
+r535_fbsr_suspend(struct nvkm_gsp *gsp)
{
- struct nvkm_subdev *subdev = &imem->subdev;
+ struct nvkm_subdev *subdev = &gsp->subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_instmem *imem = device->imem;
struct nvkm_instobj *iobj;
struct fbsr fbsr = {};
struct fbsr_item *item, *temp;
@@ -262,7 +253,7 @@ r535_instmem_suspend(struct nvkm_instmem *imem)
fbsr.size += gsp->fb.bios.vga_workspace.size;
nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size);
- ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr);
+ ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &gsp->sr.fbsr);
if (ret)
goto done;
@@ -271,7 +262,7 @@ r535_instmem_suspend(struct nvkm_instmem *imem)
if (ret)
goto done_sgt;
- ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size);
+ ret = fbsr_init(&fbsr, &gsp->sr.fbsr, items_size);
if (WARN_ON(ret))
goto done_sgt;
@@ -282,12 +273,10 @@ r535_instmem_suspend(struct nvkm_instmem *imem)
goto done_sgt;
}
- imem->rm.fbsr_valid = true;
-
/* Cleanup everything except the sysmem backup, which will be removed after resume. */
done_sgt:
if (ret) /* ... unless we failed already. */
- nvkm_gsp_sg_free(device, &imem->rm.fbsr);
+ nvkm_gsp_sg_free(device, &gsp->sr.fbsr);
done:
list_for_each_entry_safe(item, temp, &fbsr.items, head) {
list_del(&item->head);
@@ -299,6 +288,12 @@ done:
return ret;
}
+const struct nvkm_rm_api_fbsr
+r535_fbsr = {
+ .suspend = r535_fbsr_suspend,
+ .resume = r535_fbsr_resume,
+};
+
static void *
r535_instmem_dtor(struct nvkm_instmem *imem)
{
@@ -319,11 +314,10 @@ r535_instmem_new(const struct nvkm_instmem_func *hw,
rm->dtor = r535_instmem_dtor;
rm->fini = hw->fini;
- rm->suspend = r535_instmem_suspend;
- rm->resume = r535_instmem_resume;
rm->memory_new = hw->memory_new;
rm->memory_wrap = hw->memory_wrap;
rm->zero = false;
+ rm->set_bar0_window_addr = hw->set_bar0_window_addr;
ret = nv50_instmem_new_(rm, device, type, inst, pinstmem);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
index 3454c7d29502..1ac5628c5140 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
@@ -19,11 +19,11 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
-#include "cgrp.h"
-#include "chan.h"
-#include "chid.h"
-#include "runl.h"
+#include <engine/fifo/priv.h>
+#include <engine/fifo/cgrp.h>
+#include <engine/fifo/chan.h>
+#include <engine/fifo/chid.h>
+#include <engine/fifo/runl.h>
#include <core/gpuobj.h>
#include <subdev/gsp.h>
@@ -31,24 +31,19 @@
#include <subdev/vfn.h>
#include <engine/gr.h>
+#include <rm/engine.h>
+
#include <nvhw/drf.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h>
-#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
+#include "nvrm/fifo.h"
+#include "nvrm/engine.h"
static u32
r535_chan_doorbell_handle(struct nvkm_chan *chan)
{
- return (chan->cgrp->runl->id << 16) | chan->id;
+ struct nvkm_gsp *gsp = chan->rm.object.client->gsp;
+
+ return gsp->rm->gpu->fifo.chan.doorbell_handle(chan);
}
static void
@@ -77,50 +72,29 @@ r535_chan_ramfc_clear(struct nvkm_chan *chan)
#define CHID_PER_USERD 8
static int
-r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+r535_chan_alloc(struct nvkm_gsp_device *device, u32 handle, u32 nv2080_engine_type, u8 runq,
+ bool priv, int chid, u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr,
+ struct nvkm_vmm *vmm, u64 gpfifo_offset, u32 gpfifo_length,
+ struct nvkm_gsp_object *chan)
{
- struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
- struct nvkm_engn *engn;
- struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_gsp *gsp = device->object.client->gsp;
+ struct nvkm_fifo *fifo = gsp->subdev.device->fifo;
+ const int userd_p = chid / CHID_PER_USERD;
+ const int userd_i = chid % CHID_PER_USERD;
NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
- const int userd_p = chan->id / CHID_PER_USERD;
- const int userd_i = chan->id % CHID_PER_USERD;
- u32 eT = ~0;
- int ret;
-
- if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) {
- ret = nvkm_subdev_oneinit(&device->gr->engine.subdev);
- if (ret)
- return ret;
- }
-
- nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
- eT = engn->id;
- break;
- }
-
- if (WARN_ON(eT == ~0))
- return -EINVAL;
- chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev,
- fifo->rm.mthdbuf_size,
- &chan->rm.mthdbuf.addr, GFP_KERNEL);
- if (!chan->rm.mthdbuf.ptr)
- return -ENOMEM;
-
- args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, 0xf1f00000 | chan->id,
- fifo->func->chan.user.oclass, sizeof(*args),
- &chan->rm.object);
+ args = nvkm_gsp_rm_alloc_get(&device->object, handle,
+ fifo->func->chan.user.oclass, sizeof(*args), chan);
if (WARN_ON(IS_ERR(args)))
return PTR_ERR(args);
- args->gpFifoOffset = offset;
- args->gpFifoEntries = length / 8;
+ args->gpFifoOffset = gpfifo_offset;
+ args->gpFifoEntries = gpfifo_length / 8;
args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
- args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, chan->runq);
+ args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, runq);
if (!priv)
args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
else
@@ -143,25 +117,25 @@ r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm,
args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
- args->hVASpace = chan->vmm->rm.object.handle;
- args->engineType = eT;
+ args->hVASpace = vmm->rm.object.handle;
+ args->engineType = nv2080_engine_type;
- args->instanceMem.base = chan->inst->addr;
- args->instanceMem.size = chan->inst->size;
+ args->instanceMem.base = inst_addr;
+ args->instanceMem.size = fifo->func->chan.func->inst->size;
args->instanceMem.addressSpace = 2;
args->instanceMem.cacheAttrib = 1;
- args->userdMem.base = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
+ args->userdMem.base = userd_addr;
args->userdMem.size = fifo->func->chan.func->userd->size;
args->userdMem.addressSpace = 2;
args->userdMem.cacheAttrib = 1;
- args->ramfcMem.base = chan->inst->addr + 0;
+ args->ramfcMem.base = inst_addr;
args->ramfcMem.size = 0x200;
args->ramfcMem.addressSpace = 2;
args->ramfcMem.cacheAttrib = 1;
- args->mthdbufMem.base = chan->rm.mthdbuf.addr;
+ args->mthdbufMem.base = mthdbuf_addr;
args->mthdbufMem.size = fifo->rm.mthdbuf_size;
args->mthdbufMem.addressSpace = 1;
args->mthdbufMem.cacheAttrib = 0;
@@ -173,7 +147,44 @@ r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm,
args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
- ret = nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+ return nvkm_gsp_rm_alloc_wr(chan, args);
+}
+
+static int
+r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+ struct nvkm_engn *engn;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ const struct nvkm_rm_api *rmapi = device->gsp->rm->api;
+ u32 eT = ~0;
+ int ret;
+
+ if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) {
+ ret = nvkm_subdev_oneinit(&device->gr->engine.subdev);
+ if (ret)
+ return ret;
+ }
+
+ nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
+ eT = engn->id;
+ break;
+ }
+
+ if (WARN_ON(eT == ~0))
+ return -EINVAL;
+
+ chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev,
+ fifo->rm.mthdbuf_size,
+ &chan->rm.mthdbuf.addr, GFP_KERNEL);
+ if (!chan->rm.mthdbuf.ptr)
+ return -ENOMEM;
+
+ ret = rmapi->fifo->chan.alloc(&chan->vmm->rm.device, NVKM_RM_CHAN(chan->id),
+ eT, chan->runq, priv, chan->id, chan->inst->addr,
+ nvkm_memory_addr(chan->userd.mem) + chan->userd.base,
+ chan->rm.mthdbuf.addr, chan->vmm, offset, length,
+ &chan->rm.object);
if (ret)
return ret;
@@ -215,123 +226,8 @@ r535_chan_ramfc = {
.priv = true,
};
-struct r535_chan_userd {
- struct nvkm_memory *mem;
- struct nvkm_memory *map;
- int chid;
- u32 used;
-
- struct list_head head;
-} *userd;
-
-static void
-r535_chan_id_put(struct nvkm_chan *chan)
-{
- struct nvkm_runl *runl = chan->cgrp->runl;
- struct nvkm_fifo *fifo = runl->fifo;
- struct r535_chan_userd *userd;
-
- mutex_lock(&fifo->userd.mutex);
- list_for_each_entry(userd, &fifo->userd.list, head) {
- if (userd->map == chan->userd.mem) {
- u32 chid = chan->userd.base / chan->func->userd->size;
-
- userd->used &= ~BIT(chid);
- if (!userd->used) {
- nvkm_memory_unref(&userd->map);
- nvkm_memory_unref(&userd->mem);
- nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
- list_del(&userd->head);
- kfree(userd);
- }
-
- break;
- }
- }
- mutex_unlock(&fifo->userd.mutex);
-
-}
-
-static int
-r535_chan_id_get_locked(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
-{
- const u32 userd_size = CHID_PER_USERD * chan->func->userd->size;
- struct nvkm_runl *runl = chan->cgrp->runl;
- struct nvkm_fifo *fifo = runl->fifo;
- struct r535_chan_userd *userd;
- u32 chid;
- int ret;
-
- if (ouserd + chan->func->userd->size >= userd_size ||
- (ouserd & (chan->func->userd->size - 1))) {
- RUNL_DEBUG(runl, "ouserd %llx", ouserd);
- return -EINVAL;
- }
-
- chid = div_u64(ouserd, chan->func->userd->size);
-
- list_for_each_entry(userd, &fifo->userd.list, head) {
- if (userd->mem == muserd) {
- if (userd->used & BIT(chid))
- return -EBUSY;
- break;
- }
- }
-
- if (&userd->head == &fifo->userd.list) {
- if (nvkm_memory_size(muserd) < userd_size) {
- RUNL_DEBUG(runl, "userd too small");
- return -EINVAL;
- }
-
- userd = kzalloc(sizeof(*userd), GFP_KERNEL);
- if (!userd)
- return -ENOMEM;
-
- userd->chid = nvkm_chid_get(runl->chid, chan);
- if (userd->chid < 0) {
- ret = userd->chid;
- kfree(userd);
- return ret;
- }
-
- userd->mem = nvkm_memory_ref(muserd);
-
- ret = nvkm_memory_kmap(userd->mem, &userd->map);
- if (ret) {
- nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
- kfree(userd);
- return ret;
- }
-
-
- list_add(&userd->head, &fifo->userd.list);
- }
-
- userd->used |= BIT(chid);
-
- chan->userd.mem = nvkm_memory_ref(userd->map);
- chan->userd.base = ouserd;
-
- return (userd->chid * CHID_PER_USERD) + chid;
-}
-
-static int
-r535_chan_id_get(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
-{
- struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
- int ret;
-
- mutex_lock(&fifo->userd.mutex);
- ret = r535_chan_id_get_locked(chan, muserd, ouserd);
- mutex_unlock(&fifo->userd.mutex);
- return ret;
-}
-
static const struct nvkm_chan_func
r535_chan = {
- .id_get = r535_chan_id_get,
- .id_put = r535_chan_id_put,
.inst = &gf100_chan_inst,
.userd = &gv100_chan_userd,
.ramfc = &r535_chan_ramfc,
@@ -340,10 +236,6 @@ r535_chan = {
.doorbell_handle = r535_chan_doorbell_handle,
};
-static const struct nvkm_cgrp_func
-r535_cgrp = {
-};
-
static int
r535_engn_nonstall(struct nvkm_engn *engn)
{
@@ -356,7 +248,7 @@ r535_engn_nonstall(struct nvkm_engn *engn)
}
static const struct nvkm_engn_func
-r535_ce = {
+r535_engn_ce = {
.nonstall = r535_engn_nonstall,
};
@@ -376,7 +268,7 @@ r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *c
}
static const struct nvkm_engn_func
-r535_gr = {
+r535_engn_gr = {
.nonstall = r535_engn_nonstall,
.ctor2 = r535_gr_ctor,
};
@@ -449,57 +341,86 @@ r535_runl = {
.allow = r535_runl_allow,
};
-static int
-r535_fifo_2080_type(enum nvkm_subdev_type type, int inst)
+void
+r535_fifo_rc_chid(struct nvkm_fifo *fifo, int chid)
{
- switch (type) {
- case NVKM_ENGINE_GR: return NV2080_ENGINE_TYPE_GR0;
- case NVKM_ENGINE_CE: return NV2080_ENGINE_TYPE_COPY0 + inst;
- case NVKM_ENGINE_SEC2: return NV2080_ENGINE_TYPE_SEC2;
- case NVKM_ENGINE_NVDEC: return NV2080_ENGINE_TYPE_NVDEC0 + inst;
- case NVKM_ENGINE_NVENC: return NV2080_ENGINE_TYPE_NVENC0 + inst;
- case NVKM_ENGINE_NVJPG: return NV2080_ENGINE_TYPE_NVJPEG0 + inst;
- case NVKM_ENGINE_OFA: return NV2080_ENGINE_TYPE_OFA;
- case NVKM_ENGINE_SW: return NV2080_ENGINE_TYPE_SW;
- default:
- break;
+ struct nvkm_chan *chan;
+ unsigned long flags;
+
+ chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags);
+ if (!chan) {
+ nvkm_error(&fifo->engine.subdev, "rc: chid %d not found!\n", chid);
+ return;
}
- WARN_ON(1);
- return -EINVAL;
+ nvkm_chan_error(chan, false);
+ nvkm_chan_put(&chan, flags);
}
static int
-r535_fifo_engn_type(RM_ENGINE_TYPE rm, enum nvkm_subdev_type *ptype)
+r535_fifo_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
{
+ rpc_rc_triggered_v17_02 *msg = repv;
+ struct nvkm_gsp *gsp = priv;
+
+ if (WARN_ON(repc < sizeof(*msg)))
+ return -EINVAL;
+
+ nvkm_error(&gsp->subdev, "rc: engn:%08x chid:%d type:%d scope:%d part:%d\n",
+ msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope,
+ msg->partitionAttributionId);
+
+ r535_fifo_rc_chid(gsp->subdev.device->fifo, msg->chid);
+ return 0;
+}
+
+static int
+r535_fifo_xlat_rm_engine_type(u32 rm, enum nvkm_subdev_type *ptype, int *p2080)
+{
+#define RM_ENGINE_TYPE(RM,NVKM,INST) \
+ RM_ENGINE_TYPE_##RM: \
+ *ptype = NVKM_ENGINE_##NVKM; \
+ *p2080 = NV2080_ENGINE_TYPE_##RM; \
+ return INST
+
switch (rm) {
- case RM_ENGINE_TYPE_GR0:
- *ptype = NVKM_ENGINE_GR;
- return 0;
- case RM_ENGINE_TYPE_COPY0...RM_ENGINE_TYPE_COPY9:
- *ptype = NVKM_ENGINE_CE;
- return rm - RM_ENGINE_TYPE_COPY0;
- case RM_ENGINE_TYPE_NVDEC0...RM_ENGINE_TYPE_NVDEC7:
- *ptype = NVKM_ENGINE_NVDEC;
- return rm - RM_ENGINE_TYPE_NVDEC0;
- case RM_ENGINE_TYPE_NVENC0...RM_ENGINE_TYPE_NVENC2:
- *ptype = NVKM_ENGINE_NVENC;
- return rm - RM_ENGINE_TYPE_NVENC0;
- case RM_ENGINE_TYPE_SW:
- *ptype = NVKM_ENGINE_SW;
- return 0;
- case RM_ENGINE_TYPE_SEC2:
- *ptype = NVKM_ENGINE_SEC2;
- return 0;
- case RM_ENGINE_TYPE_NVJPEG0...RM_ENGINE_TYPE_NVJPEG7:
- *ptype = NVKM_ENGINE_NVJPG;
- return rm - RM_ENGINE_TYPE_NVJPEG0;
- case RM_ENGINE_TYPE_OFA:
- *ptype = NVKM_ENGINE_OFA;
- return 0;
+ case RM_ENGINE_TYPE( GR0, GR, 0);
+ case RM_ENGINE_TYPE( COPY0, CE, 0);
+ case RM_ENGINE_TYPE( COPY1, CE, 1);
+ case RM_ENGINE_TYPE( COPY2, CE, 2);
+ case RM_ENGINE_TYPE( COPY3, CE, 3);
+ case RM_ENGINE_TYPE( COPY4, CE, 4);
+ case RM_ENGINE_TYPE( COPY5, CE, 5);
+ case RM_ENGINE_TYPE( COPY6, CE, 6);
+ case RM_ENGINE_TYPE( COPY7, CE, 7);
+ case RM_ENGINE_TYPE( COPY8, CE, 8);
+ case RM_ENGINE_TYPE( COPY9, CE, 9);
+ case RM_ENGINE_TYPE( NVDEC0, NVDEC, 0);
+ case RM_ENGINE_TYPE( NVDEC1, NVDEC, 1);
+ case RM_ENGINE_TYPE( NVDEC2, NVDEC, 2);
+ case RM_ENGINE_TYPE( NVDEC3, NVDEC, 3);
+ case RM_ENGINE_TYPE( NVDEC4, NVDEC, 4);
+ case RM_ENGINE_TYPE( NVDEC5, NVDEC, 5);
+ case RM_ENGINE_TYPE( NVDEC6, NVDEC, 6);
+ case RM_ENGINE_TYPE( NVDEC7, NVDEC, 7);
+ case RM_ENGINE_TYPE( NVENC0, NVENC, 0);
+ case RM_ENGINE_TYPE( NVENC1, NVENC, 1);
+ case RM_ENGINE_TYPE( NVENC2, NVENC, 2);
+ case RM_ENGINE_TYPE(NVJPEG0, NVJPG, 0);
+ case RM_ENGINE_TYPE(NVJPEG1, NVJPG, 1);
+ case RM_ENGINE_TYPE(NVJPEG2, NVJPG, 2);
+ case RM_ENGINE_TYPE(NVJPEG3, NVJPG, 3);
+ case RM_ENGINE_TYPE(NVJPEG4, NVJPG, 4);
+ case RM_ENGINE_TYPE(NVJPEG5, NVJPG, 5);
+ case RM_ENGINE_TYPE(NVJPEG6, NVJPG, 6);
+ case RM_ENGINE_TYPE(NVJPEG7, NVJPG, 7);
+ case RM_ENGINE_TYPE( SW, SW, 0);
+ case RM_ENGINE_TYPE( SEC2, SEC2, 0);
+ case RM_ENGINE_TYPE( OFA, OFA, 0);
default:
return -EINVAL;
}
+#undef RM_ENGINE_TYPE
}
static int
@@ -536,16 +457,19 @@ static int
r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
{
struct nvkm_subdev *subdev = &fifo->engine.subdev;
- struct nvkm_gsp *gsp = subdev->device->gsp;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_rm *rm = gsp->rm;
struct nvkm_runl *runl;
struct nvkm_engn *engn;
- u32 cgids = 2048;
u32 chids = 2048;
+ u32 first = rm->api->fifo->rsvd_chids;
+ u32 count = chids - first;
int ret;
NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl;
- if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, cgids, 0, cgids, &fifo->cgid)) ||
- (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid)))
+ if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, first, count, &fifo->cgid)) ||
+ (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, first, count, &fifo->chid)))
return ret;
ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
@@ -576,25 +500,43 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
if (!runl)
continue;
- inst = r535_fifo_engn_type(rmid, &type);
+ inst = rm->api->fifo->xlat_rm_engine_type(rmid, &type, &nv2080);
if (inst < 0) {
nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid);
nvkm_runl_del(runl);
continue;
}
- nv2080 = r535_fifo_2080_type(type, inst);
- if (nv2080 < 0) {
+ /* Skip SW engine - there's currently no support for NV SW classes. */
+ if (type == NVKM_ENGINE_SW)
+ continue;
+
+ /* Skip lone GRCEs (ones not paired with GR on a runlist), as they
+ * don't appear to function as async copy engines.
+ */
+ if (type == NVKM_ENGINE_CE &&
+ rm->gpu->ce.grce_mask &&
+ (rm->gpu->ce.grce_mask(device) & BIT(inst)) &&
+ !nvkm_runl_find_engn(engn, runl, engn->engine->subdev.type == NVKM_ENGINE_GR)) {
+ RUNL_DEBUG(runl, "skip LCE %d - GRCE without GR", inst);
+ nvkm_runl_del(runl);
+ continue;
+ }
+
+ ret = nvkm_rm_engine_new(gsp->rm, type, inst);
+ if (ret) {
nvkm_runl_del(runl);
continue;
}
+ engn = NULL;
+
switch (type) {
case NVKM_ENGINE_CE:
- engn = nvkm_runl_add(runl, nv2080, &r535_ce, type, inst);
+ engn = nvkm_runl_add(runl, nv2080, &r535_engn_ce, type, inst);
break;
case NVKM_ENGINE_GR:
- engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst);
+ engn = nvkm_runl_add(runl, nv2080, &r535_engn_gr, type, inst);
break;
case NVKM_ENGINE_NVDEC:
case NVKM_ENGINE_NVENC:
@@ -633,7 +575,7 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
}
- return r535_fifo_ectx_size(fifo);
+ return rm->api->fifo->ectx_size(fifo);
}
static void
@@ -646,6 +588,7 @@ int
r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
{
+ const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu;
struct nvkm_fifo_func *rm;
if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
@@ -654,12 +597,20 @@ r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
rm->dtor = r535_fifo_dtor;
rm->runl_ctor = r535_fifo_runl_ctor;
rm->runl = &r535_runl;
- rm->cgrp = hw->cgrp;
- rm->cgrp.func = &r535_cgrp;
- rm->chan = hw->chan;
+ rm->chan.user.oclass = gpu->fifo.chan.class;
rm->chan.func = &r535_chan;
rm->nonstall = &ga100_fifo_nonstall;
rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
return nvkm_fifo_new_(rm, device, type, inst, pfifo);
}
+
+const struct nvkm_rm_api_fifo
+r535_fifo = {
+ .xlat_rm_engine_type = r535_fifo_xlat_rm_engine_type,
+ .ectx_size = r535_fifo_ectx_size,
+ .rc_triggered = r535_fifo_rc_triggered,
+ .chan = {
+ .alloc = r535_chan_alloc,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
new file mode 100644
index 000000000000..ddb57d5e73d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/gr.h>
+
+#include <core/memory.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu/vmm.h>
+#include <engine/fifo/priv.h>
+#include <engine/gr/priv.h>
+
+#include <nvif/if900d.h>
+
+#include <nvhw/drf.h>
+
+#include "nvrm/gr.h"
+#include "nvrm/vmm.h"
+
+#define r535_gr(p) container_of((p), struct r535_gr, base)
+
+static void *
+r535_gr_chan_dtor(struct nvkm_object *object)
+{
+ struct r535_gr_chan *grc = container_of(object, typeof(*grc), object);
+ struct r535_gr *gr = grc->gr;
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ nvkm_vmm_put(grc->vmm, &grc->vma[i]);
+ nvkm_memory_unref(&grc->mem[i]);
+ }
+
+ nvkm_vmm_unref(&grc->vmm);
+ return grc;
+}
+
+static const struct nvkm_object_func
+r535_gr_chan = {
+ .dtor = r535_gr_chan_dtor,
+};
+
+int
+r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm,
+ struct nvkm_memory **pmem, struct nvkm_vma **pvma,
+ struct nvkm_gsp_object *chan)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice,
+ NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ ctrl->engineType = 1;
+ ctrl->hChanClient = vmm->rm.client.object.handle;
+ ctrl->hObject = chan->handle;
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry =
+ &ctrl->promoteEntry[ctrl->entryCount];
+ const bool alloc = golden || !gr->ctxbuf[i].global;
+ int ret;
+
+ entry->bufferId = gr->ctxbuf[i].bufferId;
+ entry->bInitialize = gr->ctxbuf[i].init && alloc;
+
+ if (alloc) {
+ ret = nvkm_memory_new(device, gr->ctxbuf[i].init ?
+ NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST,
+ gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page,
+ gr->ctxbuf[i].init, &pmem[i]);
+ if (WARN_ON(ret))
+ return ret;
+
+ if (gr->ctxbuf[i].bufferId ==
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP)
+ entry->bNonmapped = 1;
+ } else {
+ if (gr->ctxbuf[i].bufferId ==
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP)
+ continue;
+
+ pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]);
+ }
+
+ if (!entry->bNonmapped) {
+ struct gf100_vmm_map_v0 args = {
+ .priv = 1,
+ .ro = gr->ctxbuf[i].ro,
+ };
+
+ mutex_lock(&vmm->mutex.vmm);
+ ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align,
+ nvkm_memory_size(pmem[i]), &pvma[i]);
+ mutex_unlock(&vmm->mutex.vmm);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args));
+ if (ret)
+ return ret;
+
+ entry->gpuVirtAddr = pvma[i]->addr;
+ }
+
+ if (entry->bInitialize) {
+ entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]);
+ entry->size = gr->ctxbuf[i].size;
+ entry->physAttr = 4;
+ }
+
+ nvkm_debug(subdev,
+ "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n",
+ entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size,
+ entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped);
+
+ ctrl->entryCount++;
+ }
+
+ return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl);
+}
+
+int
+r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
+ struct nvkm_object **pobject)
+{
+ struct r535_gr *gr = r535_gr(base);
+ struct r535_gr_chan *grc;
+ int ret;
+
+ if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object);
+ grc->gr = gr;
+ grc->vmm = nvkm_vmm_ref(chan->vmm);
+ grc->chan = chan;
+ *pobject = &grc->object;
+
+ ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+u64
+r535_gr_units(struct nvkm_gr *gr)
+{
+ struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp;
+
+ return (gsp->gr.tpcs << 8) | gsp->gr.gpcs;
+}
+
+void
+r535_gr_get_ctxbuf_info(struct r535_gr *gr, int i,
+ struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO *info)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ static const struct {
+ u32 id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */
+ u32 id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */
+ bool global;
+ bool init;
+ bool ro;
+ } map[] = {
+#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \
+ .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \
+ .global = (G), .init = (I), .ro = (R) }
+#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R))
+ /* global init ro */
+ _A( GRAPHICS, MAIN, false, true, false),
+ _B( PATCH, false, true, false),
+ _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB, true, false, false),
+ _B( PAGEPOOL, true, false, false),
+ _B( ATTRIBUTE_CB, true, false, false),
+ _B( RTV_CB_GLOBAL, true, false, false),
+ _B( FECS_EVENT, true, true, false),
+ _B( PRIV_ACCESS_MAP, true, true, true),
+#undef _B
+#undef _A
+ };
+ u32 size = info->size;
+ u8 align, page;
+ int id;
+
+ for (id = 0; id < ARRAY_SIZE(map); id++) {
+ if (map[id].id0 == i)
+ break;
+ }
+
+ nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i,
+ size, (id < ARRAY_SIZE(map)) ? "*" : "");
+ if (id >= ARRAY_SIZE(map))
+ return;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN)
+ size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */
+
+ if (size >= 1 << 21) page = 21;
+ else if (size >= 1 << 16) page = 16;
+ else page = 12;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB)
+ align = order_base_2(size);
+ else
+ align = page;
+
+ if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+ return;
+
+ gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1;
+ gr->ctxbuf[gr->ctxbuf_nr].size = size;
+ gr->ctxbuf[gr->ctxbuf_nr].page = page;
+ gr->ctxbuf[gr->ctxbuf_nr].align = align;
+ gr->ctxbuf[gr->ctxbuf_nr].global = map[id].global;
+ gr->ctxbuf[gr->ctxbuf_nr].init = map[id].init;
+ gr->ctxbuf[gr->ctxbuf_nr].ro = map[id].ro;
+ gr->ctxbuf_nr++;
+
+ if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) {
+ if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+ return;
+
+ gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1];
+ gr->ctxbuf[gr->ctxbuf_nr].bufferId =
+ NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP;
+ gr->ctxbuf_nr++;
+ }
+}
+
+static int
+r535_gr_get_ctxbufs_info(struct r535_gr *gr)
+{
+ NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_gsp *gsp = subdev->device->gsp;
+
+ info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
+ sizeof(*info));
+ if (WARN_ON(IS_ERR(info)))
+ return PTR_ERR(info);
+
+ for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++)
+ r535_gr_get_ctxbuf_info(gr, i, &info->engineContextBuffersInfo[0].engine[i]);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
+ return 0;
+}
+
+int
+r535_gr_oneinit(struct nvkm_gr *base)
+{
+ struct r535_gr *gr = container_of(base, typeof(*gr), base);
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_rm *rm = gsp->rm;
+ struct {
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
+ struct nvkm_gsp_object chan;
+ struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+ } golden = {};
+ struct nvkm_gsp_object threed;
+ int ret;
+
+ /* Allocate a channel to use for golden context init. */
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst);
+ if (ret)
+ goto done;
+
+ ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm);
+ if (ret)
+ goto done;
+
+ ret = r535_mmu_vaspace_new(golden.vmm, NVKM_RM_VASPACE, false);
+ if (ret)
+ goto done;
+
+ ret = rm->api->fifo->chan.alloc(&golden.vmm->rm.device, NVKM_RM_CHAN(0),
+ 1, 0, true, rm->api->fifo->rsvd_chids,
+ nvkm_memory_addr(golden.inst),
+ nvkm_memory_addr(golden.inst) + 0x1000,
+ nvkm_memory_addr(golden.inst) + 0x2000,
+ golden.vmm, 0, 0x1000, &golden.chan);
+ if (ret)
+ goto done;
+
+ /* Fetch context buffer info from RM and allocate each of them here to use
+ * during golden context init (or later as a global context buffer).
+ *
+ * Also build the information that'll be used to create channel contexts.
+ */
+ ret = rm->api->gr->get_ctxbufs_info(gr);
+ if (ret)
+ goto done;
+
+ /* Promote golden context to RM. */
+ ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan);
+ if (ret)
+ goto done;
+
+ /* Allocate 3D class on channel to trigger golden context init in RM. */
+ ret = nvkm_gsp_rm_alloc(&golden.chan, NVKM_RM_THREED, rm->gpu->gr.class.threed, 0, &threed);
+ if (ret)
+ goto done;
+
+ /* There's no need to keep the golden channel around, as RM caches the context. */
+ nvkm_gsp_rm_free(&threed);
+done:
+ nvkm_gsp_rm_free(&golden.chan);
+ for (int i = gr->ctxbuf_nr - 1; i >= 0; i--)
+ nvkm_vmm_put(golden.vmm, &golden.vma[i]);
+ nvkm_vmm_unref(&golden.vmm);
+ nvkm_memory_unref(&golden.inst);
+ return ret;
+
+}
+
+void *
+r535_gr_dtor(struct nvkm_gr *base)
+{
+ struct r535_gr *gr = r535_gr(base);
+
+ while (gr->ctxbuf_nr)
+ nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]);
+
+ kfree(gr->base.func);
+ return gr;
+}
+
+const struct nvkm_rm_api_gr
+r535_gr = {
+ .get_ctxbufs_info = r535_gr_get_ctxbufs_info,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
index db2602e88006..baf42339f93e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
@@ -19,9 +19,12 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <rm/rpc.h>
+
#include "priv.h"
#include <core/pci.h>
+#include <subdev/pci/priv.h>
#include <subdev/timer.h>
#include <subdev/vfn.h>
#include <engine/fifo/chan.h>
@@ -30,29 +33,11 @@
#include <nvfw/fw.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
-#include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h>
-#include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h>
-#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h>
-#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
-#include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h>
-#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h>
-#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h>
-#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+#include "nvrm/gsp.h"
+#include "nvrm/rpcfn.h"
+#include "nvrm/msgfn.h"
+#include "nvrm/event.h"
+#include "nvrm/fifo.h"
#include <linux/acpi.h>
#include <linux/ctype.h>
@@ -60,990 +45,6 @@
extern struct dentry *nouveau_debugfs_root;
-#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
-#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16)
-
-/**
- * DOC: GSP message queue element
- *
- * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h
- *
- * The GSP command queue and status queue are message queues for the
- * communication between software and GSP. The software submits the GSP
- * RPC via the GSP command queue, GSP writes the status of the submitted
- * RPC in the status queue.
- *
- * A GSP message queue element consists of three parts:
- *
- * - message element header (struct r535_gsp_msg), which mostly maintains
- * the metadata for queuing the element.
- *
- * - RPC message header (struct nvfw_gsp_rpc), which maintains the info
- * of the RPC. E.g., the RPC function number.
- *
- * - The payload, where the RPC message stays. E.g. the params of a
- * specific RPC function. Some RPC functions also have their headers
- * in the payload. E.g. rm_alloc, rm_control.
- *
- * The memory layout of a GSP message element can be illustrated below::
- *
- * +------------------------+
- * | Message Element Header |
- * | (r535_gsp_msg) |
- * | |
- * | (r535_gsp_msg.data) |
- * | | |
- * |----------V-------------|
- * | GSP RPC Header |
- * | (nvfw_gsp_rpc) |
- * | |
- * | (nvfw_gsp_rpc.data) |
- * | | |
- * |----------V-------------|
- * | Payload |
- * | |
- * | header(optional) |
- * | params |
- * +------------------------+
- *
- * The max size of a message queue element is 16 pages (including the
- * headers). When a GSP message to be sent is larger than 16 pages, the
- * message should be split into multiple elements and sent accordingly.
- *
- * In the bunch of the split elements, the first element has the expected
- * function number, while the rest of the elements are sent with the
- * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD.
- *
- * GSP consumes the elements from the cmdq and always writes the result
- * back to the msgq. The result is also formed as split elements.
- *
- * Terminology:
- *
- * - gsp_msg(msg): GSP message element (element header + GSP RPC header +
- * payload)
- * - gsp_rpc(rpc): GSP RPC (RPC header + payload)
- * - gsp_rpc_buf: buffer for (GSP RPC header + payload)
- * - gsp_rpc_len: size of (GSP RPC header + payload)
- * - params_size: size of params in the payload
- * - payload_size: size of (header if exists + params) in the payload
- */
-
-struct r535_gsp_msg {
- u8 auth_tag_buffer[16];
- u8 aad_buffer[16];
- u32 checksum;
- u32 sequence;
- u32 elem_count;
- u32 pad;
- u8 data[];
-};
-
-struct nvfw_gsp_rpc {
- u32 header_version;
- u32 signature;
- u32 length;
- u32 function;
- u32 rpc_result;
- u32 rpc_result_private;
- u32 sequence;
- union {
- u32 spare;
- u32 cpuRmGfid;
- };
- u8 data[];
-};
-
-#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
-
-#define to_gsp_hdr(p, header) \
- container_of((void *)p, typeof(*header), data)
-
-#define to_payload_hdr(p, header) \
- container_of((void *)p, typeof(*header), params)
-
-static int
-r535_rpc_status_to_errno(uint32_t rpc_status)
-{
- switch (rpc_status) {
- case 0x55: /* NV_ERR_NOT_READY */
- case 0x66: /* NV_ERR_TIMEOUT_RETRY */
- return -EBUSY;
- case 0x51: /* NV_ERR_NO_MEMORY */
- return -ENOMEM;
- default:
- return -EINVAL;
- }
-}
-
-static int
-r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime)
-{
- u32 size, rptr = *gsp->msgq.rptr;
- int used;
-
- size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len,
- GSP_PAGE_SIZE);
- if (WARN_ON(!size || size >= gsp->msgq.cnt))
- return -EINVAL;
-
- do {
- u32 wptr = *gsp->msgq.wptr;
-
- used = wptr + gsp->msgq.cnt - rptr;
- if (used >= gsp->msgq.cnt)
- used -= gsp->msgq.cnt;
- if (used >= size)
- break;
-
- usleep_range(1, 2);
- } while (--(*ptime));
-
- if (WARN_ON(!*ptime))
- return -ETIMEDOUT;
-
- return used;
-}
-
-static struct r535_gsp_msg *
-r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp)
-{
- u32 rptr = *gsp->msgq.rptr;
-
- /* Skip the first page, which is the message queue info */
- return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE +
- rptr * GSP_PAGE_SIZE);
-}
-
-/**
- * DOC: Receive a GSP message queue element
- *
- * Receiving a GSP message queue element from the message queue consists of
- * the following steps:
- *
- * - Peek the element from the queue: r535_gsp_msgq_peek().
- * Peek the first page of the element to determine the total size of the
- * message before allocating the proper memory.
- *
- * - Allocate memory for the message.
- * Once the total size of the message is determined from the GSP message
- * queue element, the caller of r535_gsp_msgq_recv() allocates the
- * required memory.
- *
- * - Receive the message: r535_gsp_msgq_recv().
- * Copy the message into the allocated memory. Advance the read pointer.
- * If the message is a large GSP message, r535_gsp_msgq_recv() calls
- * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts
- * until the complete message is received.
- * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into
- * the return of the large GSP message.
- *
- * - Free the allocated memory: r535_gsp_msg_done().
- * The user is responsible for freeing the memory allocated for the GSP
- * message pages after they have been processed.
- */
-static void *
-r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
-{
- struct r535_gsp_msg *mqe;
- int ret;
-
- ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries);
- if (ret < 0)
- return ERR_PTR(ret);
-
- mqe = r535_gsp_msgq_get_entry(gsp);
-
- return mqe->data;
-}
-
-struct r535_gsp_msg_info {
- int *retries;
- u32 gsp_rpc_len;
- void *gsp_rpc_buf;
- bool continuation;
-};
-
-static void
-r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl);
-
-static void *
-r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp,
- struct r535_gsp_msg_info *info)
-{
- u8 *buf = info->gsp_rpc_buf;
- u32 rptr = *gsp->msgq.rptr;
- struct r535_gsp_msg *mqe;
- u32 size, expected, len;
- int ret;
-
- expected = info->gsp_rpc_len;
-
- ret = r535_gsp_msgq_wait(gsp, expected, info->retries);
- if (ret < 0)
- return ERR_PTR(ret);
-
- mqe = r535_gsp_msgq_get_entry(gsp);
-
- if (info->continuation) {
- struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data;
-
- if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) {
- nvkm_error(&gsp->subdev,
- "Not a continuation of a large RPC\n");
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
- return ERR_PTR(-EIO);
- }
- }
-
- size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
-
- len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
- len = min_t(u32, expected, len);
-
- if (info->continuation)
- memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc),
- len - sizeof(struct nvfw_gsp_rpc));
- else
- memcpy(buf, mqe->data, len);
-
- expected -= len;
-
- if (expected) {
- mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
- memcpy(buf + len, mqe, expected);
- }
-
- rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
-
- mb();
- (*gsp->msgq.rptr) = rptr;
- return buf;
-}
-
-static void *
-r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
-{
- struct r535_gsp_msg *mqe;
- const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe);
- struct nvfw_gsp_rpc *rpc;
- struct r535_gsp_msg_info info = {0};
- u32 expected = gsp_rpc_len;
- void *buf;
-
- mqe = r535_gsp_msgq_get_entry(gsp);
- rpc = (struct nvfw_gsp_rpc *)mqe->data;
-
- if (WARN_ON(rpc->length > max_rpc_size))
- return NULL;
-
- buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- info.gsp_rpc_buf = buf;
- info.retries = retries;
- info.gsp_rpc_len = rpc->length;
-
- buf = r535_gsp_msgq_recv_one_elem(gsp, &info);
- if (IS_ERR(buf)) {
- kvfree(info.gsp_rpc_buf);
- info.gsp_rpc_buf = NULL;
- return buf;
- }
-
- if (expected <= max_rpc_size)
- return buf;
-
- info.gsp_rpc_buf += info.gsp_rpc_len;
- expected -= info.gsp_rpc_len;
-
- while (expected) {
- u32 size;
-
- rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
- if (IS_ERR_OR_NULL(rpc)) {
- kfree(buf);
- return rpc;
- }
-
- info.gsp_rpc_len = rpc->length;
- info.continuation = true;
-
- rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
- if (IS_ERR_OR_NULL(rpc)) {
- kfree(buf);
- return rpc;
- }
-
- size = info.gsp_rpc_len - sizeof(*rpc);
- expected -= size;
- info.gsp_rpc_buf += size;
- }
-
- rpc = buf;
- rpc->length = gsp_rpc_len;
- return buf;
-}
-
-static int
-r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc)
-{
- struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
- struct r535_gsp_msg *cqe;
- u32 gsp_rpc_len = msg->checksum;
- u64 *ptr = (void *)msg;
- u64 *end;
- u64 csum = 0;
- int free, time = 1000000;
- u32 wptr, size, step, len;
- u32 off = 0;
-
- len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE);
-
- end = (u64 *)((char *)ptr + len);
- msg->pad = 0;
- msg->checksum = 0;
- msg->sequence = gsp->cmdq.seq++;
- msg->elem_count = DIV_ROUND_UP(len, 0x1000);
-
- while (ptr < end)
- csum ^= *ptr++;
-
- msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
-
- wptr = *gsp->cmdq.wptr;
- do {
- do {
- free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1;
- if (free >= gsp->cmdq.cnt)
- free -= gsp->cmdq.cnt;
- if (free >= 1)
- break;
-
- usleep_range(1, 2);
- } while(--time);
-
- if (WARN_ON(!time)) {
- kvfree(msg);
- return -ETIMEDOUT;
- }
-
- cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
- step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
- size = min_t(u32, len, step * GSP_PAGE_SIZE);
-
- memcpy(cqe, (u8 *)msg + off, size);
-
- wptr += DIV_ROUND_UP(size, 0x1000);
- if (wptr == gsp->cmdq.cnt)
- wptr = 0;
-
- off += size;
- len -= size;
- } while (len);
-
- nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
- wmb();
- (*gsp->cmdq.wptr) = wptr;
- mb();
-
- nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
-
- kvfree(msg);
- return 0;
-}
-
-static void *
-r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len)
-{
- struct r535_gsp_msg *msg;
- u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len;
-
- size = ALIGN(size, GSP_MSG_MIN_SIZE);
- msg = kvzalloc(size, GFP_KERNEL);
- if (!msg)
- return ERR_PTR(-ENOMEM);
-
- msg->checksum = gsp_rpc_len;
- return msg->data;
-}
-
-static void
-r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
-{
- kvfree(msg);
-}
-
-static void
-r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
-{
- if (gsp->subdev.debug >= lvl) {
- nvkm_printk__(&gsp->subdev, lvl, info,
- "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n",
- msg->function, msg->length, msg->length - sizeof(*msg),
- msg->rpc_result, msg->rpc_result_private);
- print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1,
- msg->data, msg->length - sizeof(*msg), true);
- }
-}
-
-static struct nvfw_gsp_rpc *
-r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len)
-{
- struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvfw_gsp_rpc *rpc;
- int retries = 4000000, i;
-
-retry:
- rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries);
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
-
- rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries);
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
-
- if (rpc->rpc_result) {
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
- r535_gsp_msg_done(gsp, rpc);
- return ERR_PTR(-EINVAL);
- }
-
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE);
-
- if (fn && rpc->function == fn) {
- if (gsp_rpc_len) {
- if (rpc->length < gsp_rpc_len) {
- nvkm_error(subdev, "rpc len %d < %d\n",
- rpc->length, gsp_rpc_len);
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
- r535_gsp_msg_done(gsp, rpc);
- return ERR_PTR(-EIO);
- }
-
- return rpc;
- }
-
- r535_gsp_msg_done(gsp, rpc);
- return NULL;
- }
-
- for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
- struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
-
- if (ntfy->fn == rpc->function) {
- if (ntfy->func)
- ntfy->func(ntfy->priv, ntfy->fn, rpc->data,
- rpc->length - sizeof(*rpc));
- break;
- }
- }
-
- if (i == gsp->msgq.ntfy_nr)
- r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN);
-
- r535_gsp_msg_done(gsp, rpc);
- if (fn)
- goto retry;
-
- if (*gsp->msgq.rptr != *gsp->msgq.wptr)
- goto retry;
-
- return NULL;
-}
-
-static int
-r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv)
-{
- int ret = 0;
-
- mutex_lock(&gsp->msgq.mutex);
- if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) {
- ret = -ENOSPC;
- } else {
- gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn;
- gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func;
- gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv;
- gsp->msgq.ntfy_nr++;
- }
- mutex_unlock(&gsp->msgq.mutex);
- return ret;
-}
-
-static int
-r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
-{
- void *repv;
-
- mutex_lock(&gsp->cmdq.mutex);
- repv = r535_gsp_msg_recv(gsp, fn, 0);
- mutex_unlock(&gsp->cmdq.mutex);
- if (IS_ERR(repv))
- return PTR_ERR(repv);
-
- return 0;
-}
-
-static void *
-r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, bool wait,
- u32 gsp_rpc_len)
-{
- struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
- struct nvfw_gsp_rpc *msg;
- u32 fn = rpc->function;
- void *repv = NULL;
- int ret;
-
- if (gsp->subdev.debug >= NV_DBG_TRACE) {
- nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function,
- rpc->length, rpc->length - sizeof(*rpc));
- print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1,
- rpc->data, rpc->length - sizeof(*rpc), true);
- }
-
- ret = r535_gsp_cmdq_push(gsp, rpc);
- if (ret)
- return ERR_PTR(ret);
-
- if (wait) {
- msg = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
- if (!IS_ERR_OR_NULL(msg))
- repv = msg->data;
- else
- repv = msg;
- }
-
- return repv;
-}
-
-static void
-r535_gsp_event_dtor(struct nvkm_gsp_event *event)
-{
- struct nvkm_gsp_device *device = event->device;
- struct nvkm_gsp_client *client = device->object.client;
- struct nvkm_gsp *gsp = client->gsp;
-
- mutex_lock(&gsp->client_id.mutex);
- if (event->func) {
- list_del(&event->head);
- event->func = NULL;
- }
- mutex_unlock(&gsp->client_id.mutex);
-
- nvkm_gsp_rm_free(&event->object);
- event->device = NULL;
-}
-
-static int
-r535_gsp_device_event_get(struct nvkm_gsp_event *event)
-{
- struct nvkm_gsp_device *device = event->device;
- NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl;
-
- ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice,
- NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
-
- ctrl->event = event->id;
- ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
- return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl);
-}
-
-static int
-r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
- nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
-{
- struct nvkm_gsp_client *client = device->object.client;
- struct nvkm_gsp *gsp = client->gsp;
- NV0005_ALLOC_PARAMETERS *args;
- int ret;
-
- args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle,
- NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args),
- &event->object);
- if (IS_ERR(args))
- return PTR_ERR(args);
-
- args->hParentClient = client->object.handle;
- args->hSrcResource = 0;
- args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
- args->notifyIndex = NV01_EVENT_CLIENT_RM | id;
- args->data = NULL;
-
- ret = nvkm_gsp_rm_alloc_wr(&event->object, args);
- if (ret)
- return ret;
-
- event->device = device;
- event->id = id;
-
- ret = r535_gsp_device_event_get(event);
- if (ret) {
- nvkm_gsp_event_dtor(event);
- return ret;
- }
-
- mutex_lock(&gsp->client_id.mutex);
- event->func = func;
- list_add(&event->head, &client->events);
- mutex_unlock(&gsp->client_id.mutex);
- return 0;
-}
-
-static void
-r535_gsp_device_dtor(struct nvkm_gsp_device *device)
-{
- nvkm_gsp_rm_free(&device->subdevice);
- nvkm_gsp_rm_free(&device->object);
-}
-
-static int
-r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device)
-{
- NV2080_ALLOC_PARAMETERS *args;
-
- return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args),
- &device->subdevice);
-}
-
-static int
-r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
-{
- NV0080_ALLOC_PARAMETERS *args;
- int ret;
-
- args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args),
- &device->object);
- if (IS_ERR(args))
- return PTR_ERR(args);
-
- args->hClientShare = client->object.handle;
-
- ret = nvkm_gsp_rm_alloc_wr(&device->object, args);
- if (ret)
- return ret;
-
- ret = r535_gsp_subdevice_ctor(device);
- if (ret)
- nvkm_gsp_rm_free(&device->object);
-
- return ret;
-}
-
-static void
-r535_gsp_client_dtor(struct nvkm_gsp_client *client)
-{
- struct nvkm_gsp *gsp = client->gsp;
-
- nvkm_gsp_rm_free(&client->object);
-
- mutex_lock(&gsp->client_id.mutex);
- idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff);
- mutex_unlock(&gsp->client_id.mutex);
-
- client->gsp = NULL;
-}
-
-static int
-r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
-{
- NV0000_ALLOC_PARAMETERS *args;
- int ret;
-
- mutex_lock(&gsp->client_id.mutex);
- ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL);
- mutex_unlock(&gsp->client_id.mutex);
- if (ret < 0)
- return ret;
-
- client->gsp = gsp;
- client->object.client = client;
- INIT_LIST_HEAD(&client->events);
-
- args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args),
- &client->object);
- if (IS_ERR(args)) {
- r535_gsp_client_dtor(client);
- return ret;
- }
-
- args->hClient = client->object.handle;
- args->processID = ~0;
-
- ret = nvkm_gsp_rm_alloc_wr(&client->object, args);
- if (ret) {
- r535_gsp_client_dtor(client);
- return ret;
- }
-
- return 0;
-}
-
-static int
-r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
-{
- struct nvkm_gsp_client *client = object->client;
- struct nvkm_gsp *gsp = client->gsp;
- rpc_free_v03_00 *rpc;
-
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n",
- client->object.handle, object->handle);
-
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc));
- if (WARN_ON(IS_ERR_OR_NULL(rpc)))
- return -EIO;
-
- rpc->params.hRoot = client->object.handle;
- rpc->params.hObjectParent = 0;
- rpc->params.hObjectOld = object->handle;
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
-}
-
-static void
-r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *params)
-{
- rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
-
- nvkm_gsp_rpc_done(object->client->gsp, rpc);
-}
-
-static void *
-r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params)
-{
- rpc_gsp_rm_alloc_v03_00 *rpc = to_payload_hdr(params, rpc);
- struct nvkm_gsp *gsp = object->client->gsp;
- void *ret = NULL;
-
- rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc));
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
-
- if (rpc->status) {
- ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
- if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY)
- nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
- }
-
- nvkm_gsp_rpc_done(gsp, rpc);
-
- return ret;
-}
-
-static void *
-r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass,
- u32 params_size)
-{
- struct nvkm_gsp_client *client = object->client;
- struct nvkm_gsp *gsp = client->gsp;
- rpc_gsp_rm_alloc_v03_00 *rpc;
-
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x\n",
- client->object.handle, object->parent->handle,
- object->handle);
-
- nvkm_debug(&gsp->subdev, "cls:0x%08x params_size:%d\n", oclass,
- params_size);
-
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC,
- sizeof(*rpc) + params_size);
- if (IS_ERR(rpc))
- return rpc;
-
- rpc->hClient = client->object.handle;
- rpc->hParent = object->parent->handle;
- rpc->hObject = object->handle;
- rpc->hClass = oclass;
- rpc->status = 0;
- rpc->paramsSize = params_size;
- return rpc->params;
-}
-
-static void
-r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *params)
-{
- rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr(params, rpc);
-
- if (!params)
- return;
- nvkm_gsp_rpc_done(object->client->gsp, rpc);
-}
-
-static int
-r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 repc)
-{
- rpc_gsp_rm_control_v03_00 *rpc = to_payload_hdr((*params), rpc);
- struct nvkm_gsp *gsp = object->client->gsp;
- int ret = 0;
-
- rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc);
- if (IS_ERR_OR_NULL(rpc)) {
- *params = NULL;
- return PTR_ERR(rpc);
- }
-
- if (rpc->status) {
- ret = r535_rpc_status_to_errno(rpc->status);
- if (ret != -EAGAIN && ret != -EBUSY)
- nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
- object->client->object.handle, object->handle, rpc->cmd, rpc->status);
- }
-
- if (repc)
- *params = rpc->params;
- else
- nvkm_gsp_rpc_done(gsp, rpc);
-
- return ret;
-}
-
-static void *
-r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 params_size)
-{
- struct nvkm_gsp_client *client = object->client;
- struct nvkm_gsp *gsp = client->gsp;
- rpc_gsp_rm_control_v03_00 *rpc;
-
- nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x params_size:%d\n",
- client->object.handle, object->handle, cmd, params_size);
-
- rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL,
- sizeof(*rpc) + params_size);
- if (IS_ERR(rpc))
- return rpc;
-
- rpc->hClient = client->object.handle;
- rpc->hObject = object->handle;
- rpc->cmd = cmd;
- rpc->status = 0;
- rpc->paramsSize = params_size;
- return rpc->params;
-}
-
-static void
-r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
-{
- struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data);
-
- r535_gsp_msg_done(gsp, rpc);
-}
-
-static void *
-r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size)
-{
- struct nvfw_gsp_rpc *rpc;
-
- rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size,
- sizeof(u64)));
- if (IS_ERR(rpc))
- return ERR_CAST(rpc);
-
- rpc->header_version = 0x03000000;
- rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
- rpc->function = fn;
- rpc->rpc_result = 0xffffffff;
- rpc->rpc_result_private = 0xffffffff;
- rpc->length = sizeof(*rpc) + payload_size;
- return rpc->data;
-}
-
-static void *
-r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait,
- u32 gsp_rpc_len)
-{
- struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
- struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
- const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg);
- const u32 max_payload_size = max_rpc_size - sizeof(*rpc);
- u32 payload_size = rpc->length - sizeof(*rpc);
- void *repv;
-
- mutex_lock(&gsp->cmdq.mutex);
- if (payload_size > max_payload_size) {
- const u32 fn = rpc->function;
- u32 remain_payload_size = payload_size;
-
- /* Adjust length, and send initial RPC. */
- rpc->length = sizeof(*rpc) + max_payload_size;
- msg->checksum = rpc->length;
-
- repv = r535_gsp_rpc_send(gsp, payload, false, 0);
- if (IS_ERR(repv))
- goto done;
-
- payload += max_payload_size;
- remain_payload_size -= max_payload_size;
-
- /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
- while (remain_payload_size) {
- u32 size = min(remain_payload_size,
- max_payload_size);
- void *next;
-
- next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
- if (IS_ERR(next)) {
- repv = next;
- goto done;
- }
-
- memcpy(next, payload, size);
-
- repv = r535_gsp_rpc_send(gsp, next, false, 0);
- if (IS_ERR(repv))
- goto done;
-
- payload += size;
- remain_payload_size -= size;
- }
-
- /* Wait for reply. */
- rpc = r535_gsp_msg_recv(gsp, fn, payload_size +
- sizeof(*rpc));
- if (!IS_ERR_OR_NULL(rpc)) {
- if (wait) {
- repv = rpc->data;
- } else {
- nvkm_gsp_rpc_done(gsp, rpc);
- repv = NULL;
- }
- } else {
- repv = wait ? rpc : NULL;
- }
- } else {
- repv = r535_gsp_rpc_send(gsp, payload, wait, gsp_rpc_len);
- }
-
-done:
- mutex_unlock(&gsp->cmdq.mutex);
- return repv;
-}
-
-const struct nvkm_gsp_rm
-r535_gsp_rm = {
- .rpc_get = r535_gsp_rpc_get,
- .rpc_push = r535_gsp_rpc_push,
- .rpc_done = r535_gsp_rpc_done,
-
- .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get,
- .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push,
- .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done,
-
- .rm_alloc_get = r535_gsp_rpc_rm_alloc_get,
- .rm_alloc_push = r535_gsp_rpc_rm_alloc_push,
- .rm_alloc_done = r535_gsp_rpc_rm_alloc_done,
-
- .rm_free = r535_gsp_rpc_rm_free,
-
- .client_ctor = r535_gsp_client_ctor,
- .client_dtor = r535_gsp_client_dtor,
-
- .device_ctor = r535_gsp_device_ctor,
- .device_dtor = r535_gsp_device_dtor,
-
- .event_ctor = r535_gsp_device_event_ctor,
- .event_dtor = r535_gsp_event_dtor,
-};
-
static void
r535_gsp_msgq_work(struct work_struct *work)
{
@@ -1086,10 +87,52 @@ r535_gsp_intr(struct nvkm_inth *inth)
return IRQ_HANDLED;
}
+static bool
+r535_gsp_xlat_mc_engine_idx(u32 mc_engine_idx, enum nvkm_subdev_type *ptype, int *pinst)
+{
+ switch (mc_engine_idx) {
+ case MC_ENGINE_IDX_GSP:
+ *ptype = NVKM_SUBDEV_GSP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_DISP:
+ *ptype = NVKM_ENGINE_DISP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9:
+ *ptype = NVKM_ENGINE_CE;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_CE0;
+ return true;
+ case MC_ENGINE_IDX_GR0:
+ *ptype = NVKM_ENGINE_GR;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
+ *ptype = NVKM_ENGINE_NVDEC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVDEC0;
+ return true;
+ case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2:
+ *ptype = NVKM_ENGINE_NVENC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_MSENC;
+ return true;
+ case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
+ *ptype = NVKM_ENGINE_NVJPG;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVJPEG0;
+ return true;
+ case MC_ENGINE_IDX_OFA0:
+ *ptype = NVKM_ENGINE_OFA;
+ *pinst = 0;
+ return true;
+ default:
+ return false;
+ }
+}
+
static int
r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
{
NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
int ret = 0;
ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
@@ -1112,42 +155,8 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask,
ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall);
- switch (ctrl->table[i].engineIdx) {
- case MC_ENGINE_IDX_GSP:
- type = NVKM_SUBDEV_GSP;
- inst = 0;
- break;
- case MC_ENGINE_IDX_DISP:
- type = NVKM_ENGINE_DISP;
- inst = 0;
- break;
- case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9:
- type = NVKM_ENGINE_CE;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0;
- break;
- case MC_ENGINE_IDX_GR0:
- type = NVKM_ENGINE_GR;
- inst = 0;
- break;
- case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
- type = NVKM_ENGINE_NVDEC;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0;
- break;
- case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2:
- type = NVKM_ENGINE_NVENC;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC;
- break;
- case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
- type = NVKM_ENGINE_NVJPG;
- inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0;
- break;
- case MC_ENGINE_IDX_OFA0:
- type = NVKM_ENGINE_OFA;
- inst = 0;
- break;
- default:
+ if (!rmapi->gsp->xlat_mc_engine_idx(ctrl->table[i].engineIdx, &type, &inst))
continue;
- }
if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) {
ret = -ENOSPC;
@@ -1165,35 +174,14 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
return ret;
}
-static int
-r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
+void
+r535_gsp_get_static_info_fb(struct nvkm_gsp *gsp,
+ const struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *info)
{
- GspStaticConfigInfo *rpc;
int last_usable = -1;
- rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
- if (IS_ERR(rpc))
- return PTR_ERR(rpc);
-
- gsp->internal.client.object.client = &gsp->internal.client;
- gsp->internal.client.object.parent = NULL;
- gsp->internal.client.object.handle = rpc->hInternalClient;
- gsp->internal.client.gsp = gsp;
-
- gsp->internal.device.object.client = &gsp->internal.client;
- gsp->internal.device.object.parent = &gsp->internal.client.object;
- gsp->internal.device.object.handle = rpc->hInternalDevice;
-
- gsp->internal.device.subdevice.client = &gsp->internal.client;
- gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
- gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
-
- gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
- gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
-
- for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) {
- NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg =
- &rpc->fbRegionInfoParams.fbRegion[i];
+ for (int i = 0; i < info->numFBRegions; i++) {
+ const NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = &info->fbRegion[i];
nvkm_debug(&gsp->subdev, "fb region %d: "
"%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i,
@@ -1215,10 +203,38 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
}
if (last_usable >= 0) {
- u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1;
+ u32 rsvd_base = info->fbRegion[last_usable].limit + 1;
gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base;
}
+}
+
+static int
+r535_gsp_get_static_info(struct nvkm_gsp *gsp)
+{
+ GspStaticConfigInfo *rpc;
+
+ rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
+ if (IS_ERR(rpc))
+ return PTR_ERR(rpc);
+
+ gsp->internal.client.object.client = &gsp->internal.client;
+ gsp->internal.client.object.parent = NULL;
+ gsp->internal.client.object.handle = rpc->hInternalClient;
+ gsp->internal.client.gsp = gsp;
+
+ gsp->internal.device.object.client = &gsp->internal.client;
+ gsp->internal.device.object.parent = &gsp->internal.client.object;
+ gsp->internal.device.object.handle = rpc->hInternalDevice;
+
+ gsp->internal.device.subdevice.client = &gsp->internal.client;
+ gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
+ gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
+
+ gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
+ gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
+
+ r535_gsp_get_static_info_fb(gsp, &rpc->fbRegionInfoParams);
for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) {
if (rpc->gpcInfo.gpcMask & BIT(gpc)) {
@@ -1231,7 +247,7 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
return 0;
}
-static void
+void
nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem)
{
if (mem->data) {
@@ -1260,7 +276,7 @@ nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem)
* so we take a device reference to ensure its lifetime. The reference is
* dropped in the destructor.
*/
-static int
+int
nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem)
{
mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
@@ -1277,9 +293,10 @@ static int
r535_gsp_postinit(struct nvkm_gsp *gsp)
{
struct nvkm_device *device = gsp->subdev.device;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
int ret;
- ret = r535_gsp_rpc_get_gsp_static_info(gsp);
+ ret = rmapi->gsp->get_static_info(gsp);
if (WARN_ON(ret))
return ret;
@@ -1327,7 +344,7 @@ r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend)
rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0;
}
- return nvkm_gsp_rpc_wr(gsp, rpc, true);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV);
}
enum registry_type {
@@ -1684,7 +701,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
build_registry(gsp, rpc);
- return nvkm_gsp_rpc_wr(gsp, rpc, false);
+ return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT);
fail:
clean_registry(gsp);
@@ -1692,7 +709,7 @@ fail:
}
#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
-static void
+void
r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
{
const guid_t NVOP_DSM_GUID =
@@ -1726,7 +743,7 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
kfree(argv4.buffer.pointer);
}
-static void
+void
r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
{
const guid_t JT_DSM_GUID =
@@ -1818,7 +835,7 @@ r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux
}
}
-static void
+void
r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod)
{
acpi_status status;
@@ -1871,7 +888,7 @@ r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
}
static int
-r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
+r535_gsp_set_system_info(struct nvkm_gsp *gsp)
{
struct nvkm_device *device = gsp->subdev.device;
struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device);
@@ -1884,16 +901,16 @@ r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
if (IS_ERR(info))
return PTR_ERR(info);
- info->gpuPhysAddr = device->func->resource_addr(device, 0);
- info->gpuPhysFbAddr = device->func->resource_addr(device, 1);
- info->gpuPhysInstAddr = device->func->resource_addr(device, 3);
+ info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB);
+ info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST);
info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev);
info->maxUserVa = TASK_SIZE;
- info->pciConfigMirrorBase = 0x088000;
- info->pciConfigMirrorSize = 0x001000;
+ info->pciConfigMirrorBase = device->pci->func->cfg.addr;
+ info->pciConfigMirrorSize = device->pci->func->cfg.size;
r535_gsp_acpi_info(gsp, &info->acpiMethodData);
- return nvkm_gsp_rpc_wr(gsp, info, false);
+ return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT);
}
static int
@@ -1911,33 +928,6 @@ r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc)
}
static int
-r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
-{
- rpc_rc_triggered_v17_02 *msg = repv;
- struct nvkm_gsp *gsp = priv;
- struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvkm_chan *chan;
- unsigned long flags;
-
- if (WARN_ON(repc < sizeof(*msg)))
- return -EINVAL;
-
- nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n",
- msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope,
- msg->partitionAttributionId);
-
- chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags);
- if (!chan) {
- nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid);
- return 0;
- }
-
- nvkm_chan_error(chan, false);
- nvkm_chan_put(&chan, flags);
- return 0;
-}
-
-static int
r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc)
{
struct nvkm_gsp *gsp = priv;
@@ -2130,97 +1120,6 @@ r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
}
static int
-r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
-{
- struct nvkm_subdev *subdev = &gsp->subdev;
- struct nvkm_device *device = subdev->device;
- u32 wpr2_hi;
- int ret;
-
- wpr2_hi = nvkm_rd32(device, 0x1fa828);
- if (!wpr2_hi) {
- nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n");
- return 0;
- }
-
- ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
- if (WARN_ON(ret))
- return ret;
-
- wpr2_hi = nvkm_rd32(device, 0x1fa828);
- if (WARN_ON(wpr2_hi))
- return -EIO;
-
- return 0;
-}
-
-static int
-r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
-{
- int ret;
-
- ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
- if (ret)
- return ret;
-
- nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
-
- if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
- return -EIO;
-
- return 0;
-}
-
-static int
-r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
-{
- GspFwWprMeta *meta;
- int ret;
-
- ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta);
- if (ret)
- return ret;
-
- meta = gsp->wpr_meta.data;
-
- meta->magic = GSP_FW_WPR_META_MAGIC;
- meta->revision = GSP_FW_WPR_META_REVISION;
-
- meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
- meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
-
- meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
- meta->sizeOfBootloader = gsp->boot.fw.size;
- meta->bootloaderCodeOffset = gsp->boot.code_offset;
- meta->bootloaderDataOffset = gsp->boot.data_offset;
- meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
-
- meta->sysmemAddrOfSignature = gsp->sig.addr;
- meta->sizeOfSignature = gsp->sig.size;
-
- meta->gspFwRsvdStart = gsp->fb.heap.addr;
- meta->nonWprHeapOffset = gsp->fb.heap.addr;
- meta->nonWprHeapSize = gsp->fb.heap.size;
- meta->gspFwWprStart = gsp->fb.wpr2.addr;
- meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr;
- meta->gspFwHeapSize = gsp->fb.wpr2.heap.size;
- meta->gspFwOffset = gsp->fb.wpr2.elf.addr;
- meta->bootBinOffset = gsp->fb.wpr2.boot.addr;
- meta->frtsOffset = gsp->fb.wpr2.frts.addr;
- meta->frtsSize = gsp->fb.wpr2.frts.size;
- meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000);
- meta->fbSize = gsp->fb.size;
- meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr;
- meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
- meta->bootCount = 0;
- meta->partitionRpcAddr = 0;
- meta->partitionRpcRequestOffset = 0;
- meta->partitionRpcReplyOffset = 0;
- meta->verified = 0;
- return 0;
-}
-
-static int
r535_gsp_shared_init(struct nvkm_gsp *gsp)
{
struct {
@@ -2271,23 +1170,11 @@ r535_gsp_shared_init(struct nvkm_gsp *gsp)
return 0;
}
-static int
-r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
+static void
+r535_gsp_set_rmargs(struct nvkm_gsp *gsp, bool resume)
{
- GSP_ARGUMENTS_CACHED *args;
- int ret;
-
- if (!resume) {
- ret = r535_gsp_shared_init(gsp);
- if (ret)
- return ret;
+ GSP_ARGUMENTS_CACHED *args = gsp->rmargs.data;
- ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs);
- if (ret)
- return ret;
- }
-
- args = gsp->rmargs.data;
args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
args->messageQueueInitArguments.cmdQueueOffset =
@@ -2304,7 +1191,24 @@ r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
args->srInitArguments.flags = 0;
args->srInitArguments.bInPMTransition = 1;
}
+}
+static int
+r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
+{
+ int ret;
+
+ if (!resume) {
+ ret = r535_gsp_shared_init(gsp);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs);
+ if (ret)
+ return ret;
+ }
+
+ gsp->rm->api->gsp->set_rmargs(gsp, resume);
return 0;
}
@@ -2797,18 +1701,22 @@ lvl1_fail:
return ret;
}
+static u32
+r535_gsp_sr_data_size(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta = gsp->wpr_meta.data;
+
+ return meta->gspFwWprEnd - meta->gspFwWprStart;
+}
+
int
r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
{
- u32 mbox0 = 0xff, mbox1 = 0xff;
+ struct nvkm_rm *rm = gsp->rm;
int ret;
- if (!gsp->running)
- return 0;
-
if (suspend) {
- GspFwWprMeta *meta = gsp->wpr_meta.data;
- u64 len = meta->gspFwWprEnd - meta->gspFwWprStart;
+ u32 len = rm->api->gsp->sr_data_size(gsp);
GspFwSRMeta *sr;
ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt);
@@ -2829,8 +1737,13 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr;
sr->sizeOfSuspendResumeData = len;
- mbox0 = lower_32_bits(gsp->sr.meta.addr);
- mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ ret = rm->api->fbsr->suspend(gsp);
+ if (ret) {
+ nvkm_gsp_mem_dtor(&gsp->sr.meta);
+ nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
+ nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
+ return ret;
+ }
}
ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend);
@@ -2838,18 +1751,10 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
return ret;
nvkm_msec(gsp->subdev.device, 2000,
- if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000)
+ if (nvkm_falcon_rd32(&gsp->falcon, 0x040) == 0x80000000)
break;
);
- nvkm_falcon_reset(&gsp->falcon);
-
- ret = nvkm_gsp_fwsec_sb(gsp);
- WARN_ON(ret);
-
- ret = r535_gsp_booter_unload(gsp, mbox0, mbox1);
- WARN_ON(ret);
-
gsp->running = false;
return 0;
}
@@ -2857,23 +1762,12 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
int
r535_gsp_init(struct nvkm_gsp *gsp)
{
- u32 mbox0, mbox1;
int ret;
- if (!gsp->sr.meta.data) {
- mbox0 = lower_32_bits(gsp->wpr_meta.addr);
- mbox1 = upper_32_bits(gsp->wpr_meta.addr);
- } else {
- r535_gsp_rmargs_init(gsp, true);
-
- mbox0 = lower_32_bits(gsp->sr.meta.addr);
- mbox1 = upper_32_bits(gsp->sr.meta.addr);
- }
+ nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
- /* Execute booter to handle (eventually...) booting GSP-RM. */
- ret = r535_gsp_booter_load(gsp, mbox0, mbox1);
- if (WARN_ON(ret))
- goto done;
+ if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
+ return -EIO;
ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE);
if (ret)
@@ -2883,6 +1777,8 @@ r535_gsp_init(struct nvkm_gsp *gsp)
done:
if (gsp->sr.meta.data) {
+ gsp->rm->api->fbsr->resume(gsp);
+
nvkm_gsp_mem_dtor(&gsp->sr.meta);
nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
@@ -2944,19 +1840,6 @@ r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u
return -ENOENT;
}
-static void
-r535_gsp_dtor_fws(struct nvkm_gsp *gsp)
-{
- nvkm_firmware_put(gsp->fws.bl);
- gsp->fws.bl = NULL;
- nvkm_firmware_put(gsp->fws.booter.unload);
- gsp->fws.booter.unload = NULL;
- nvkm_firmware_put(gsp->fws.booter.load);
- gsp->fws.booter.load = NULL;
- nvkm_firmware_put(gsp->fws.rm);
- gsp->fws.rm = NULL;
-}
-
#ifdef CONFIG_DEBUG_FS
struct r535_gsp_log {
@@ -3190,10 +2073,16 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
nvkm_falcon_fw_dtor(&gsp->booter.unload);
nvkm_falcon_fw_dtor(&gsp->booter.load);
+ nvkm_gsp_mem_dtor(&gsp->fmc.args);
+ kfree(gsp->fmc.sig);
+ kfree(gsp->fmc.pkey);
+ kfree(gsp->fmc.hash);
+ nvkm_gsp_mem_dtor(&gsp->fmc.fw);
+
mutex_destroy(&gsp->msgq.mutex);
mutex_destroy(&gsp->cmdq.mutex);
- r535_gsp_dtor_fws(gsp);
+ nvkm_gsp_dtor_fws(gsp);
nvkm_gsp_mem_dtor(&gsp->rmargs);
nvkm_gsp_mem_dtor(&gsp->wpr_meta);
@@ -3206,10 +2095,17 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
nvkm_gsp_mem_dtor(&gsp->logrm);
}
+static void
+r535_gsp_drop_send_user_shared_data(struct nvkm_gsp *gsp)
+{
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL);
+}
+
int
r535_gsp_oneinit(struct nvkm_gsp *gsp)
{
struct nvkm_device *device = gsp->subdev.device;
+ const struct nvkm_rm_api *rmapi = gsp->rm->api;
const u8 *data;
u64 size;
int ret;
@@ -3217,16 +2113,6 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
mutex_init(&gsp->cmdq.mutex);
mutex_init(&gsp->msgq.mutex);
- ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load,
- &device->sec2->falcon, &gsp->booter.load);
- if (ret)
- return ret;
-
- ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload,
- &device->sec2->falcon, &gsp->booter.unload);
- if (ret)
- return ret;
-
/* Load GSP firmware from ELF image into DMA-accessible memory. */
ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size);
if (ret)
@@ -3255,65 +2141,29 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER,
r535_gsp_msg_run_cpu_sequencer, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp);
- r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED,
- r535_gsp_msg_rc_triggered, gsp);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, rmapi->fifo->rc_triggered, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
r535_gsp_msg_mmu_fault_queued, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL);
- r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL);
+ if (rmapi->gsp->drop_send_user_shared_data)
+ rmapi->gsp->drop_send_user_shared_data(gsp);
+ if (rmapi->gsp->drop_post_nocat_record)
+ rmapi->gsp->drop_post_nocat_record(gsp);
+
ret = r535_gsp_rm_boot_ctor(gsp);
if (ret)
return ret;
/* Release FW images - we've copied them to DMA buffers now. */
- r535_gsp_dtor_fws(gsp);
-
- /* Calculate FB layout. */
- gsp->fb.wpr2.frts.size = 0x100000;
- gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
-
- gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
- gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000);
-
- gsp->fb.wpr2.elf.size = gsp->fw.len;
- gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000);
-
- {
- u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30);
-
- gsp->fb.wpr2.heap.size =
- gsp->func->wpr_heap.os_carveout_size +
- gsp->func->wpr_heap.base_size +
- ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) +
- ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20);
-
- gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size);
- }
-
- gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000);
- gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000);
-
- gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000);
- gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr;
-
- gsp->fb.heap.size = 0x100000;
- gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
-
- ret = nvkm_gsp_fwsec_frts(gsp);
- if (WARN_ON(ret))
- return ret;
+ nvkm_gsp_dtor_fws(gsp);
ret = r535_gsp_libos_init(gsp);
if (WARN_ON(ret))
return ret;
- ret = r535_gsp_wpr_meta_init(gsp);
- if (WARN_ON(ret))
- return ret;
-
- ret = r535_gsp_rpc_set_system_info(gsp);
+ ret = rmapi->gsp->set_system_info(gsp);
if (WARN_ON(ret))
return ret;
@@ -3321,76 +2171,17 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
if (WARN_ON(ret))
return ret;
- /* Reset GSP into RISC-V mode. */
- ret = gsp->func->reset(gsp);
- if (WARN_ON(ret))
- return ret;
-
- nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
- nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
-
mutex_init(&gsp->client_id.mutex);
idr_init(&gsp->client_id.idr);
return 0;
}
-static int
-r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver,
- const struct firmware **pfw)
-{
- char fwname[64];
-
- snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver);
- return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw);
-}
-
-int
-r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
-{
- struct nvkm_subdev *subdev = &gsp->subdev;
- int ret;
- bool enable_gsp = fwif->enable;
-
-#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
- enable_gsp = true;
-#endif
- if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
- return -EINVAL;
-
- if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) ||
- (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) ||
- (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) ||
- (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) {
- r535_gsp_dtor_fws(gsp);
- return ret;
- }
-
- return 0;
-}
-
-#define NVKM_GSP_FIRMWARE(chip) \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin"); \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin"); \
-MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin")
-
-NVKM_GSP_FIRMWARE(tu102);
-NVKM_GSP_FIRMWARE(tu104);
-NVKM_GSP_FIRMWARE(tu106);
-
-NVKM_GSP_FIRMWARE(tu116);
-NVKM_GSP_FIRMWARE(tu117);
-
-NVKM_GSP_FIRMWARE(ga100);
-
-NVKM_GSP_FIRMWARE(ga102);
-NVKM_GSP_FIRMWARE(ga103);
-NVKM_GSP_FIRMWARE(ga104);
-NVKM_GSP_FIRMWARE(ga106);
-NVKM_GSP_FIRMWARE(ga107);
-
-NVKM_GSP_FIRMWARE(ad102);
-NVKM_GSP_FIRMWARE(ad103);
-NVKM_GSP_FIRMWARE(ad104);
-NVKM_GSP_FIRMWARE(ad106);
-NVKM_GSP_FIRMWARE(ad107);
+const struct nvkm_rm_api_gsp
+r535_gsp = {
+ .set_rmargs = r535_gsp_set_rmargs,
+ .set_system_info = r535_gsp_set_system_info,
+ .get_static_info = r535_gsp_get_static_info,
+ .xlat_mc_engine_idx = r535_gsp_xlat_mc_engine_idx,
+ .drop_send_user_shared_data = r535_gsp_drop_send_user_shared_data,
+ .sr_data_size = r535_gsp_sr_data_size,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c
index 7bfa6240d283..a8c42ec0367b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvdec.c
@@ -19,28 +19,27 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "gf100.h"
+#include <rm/engine.h>
-#include <subdev/gsp.h>
+#include "nvrm/nvdec.h"
-#include <nvif/class.h>
+static int
+r535_nvdec_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *nvdec)
+{
+ NV_BSP_ALLOCATION_PARAMETERS *args;
-static const struct gf100_gr_func
-ad102_gr = {
- .sclass = {
- { -1, -1, FERMI_TWOD_A },
- { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
- { -1, -1, ADA_A },
- { -1, -1, ADA_COMPUTE_A },
- {}
- }
-};
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvdec);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
-int
-ad102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_gr_new(&ad102_gr, device, type, inst, pgr);
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
- return -ENODEV;
+ return nvkm_gsp_rm_alloc_wr(nvdec, args);
}
+
+const struct nvkm_rm_api_engine
+r535_nvdec = {
+ .alloc = r535_nvdec_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c
index 932934227b9c..acb3ce8bb9de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvenc.c
@@ -19,26 +19,27 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
+#include <rm/engine.h>
-#include <subdev/gsp.h>
+#include "nvrm/nvenc.h"
-#include <nvif/class.h>
+static int
+r535_nvenc_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *nvenc)
+{
+ NV_MSENC_ALLOCATION_PARAMETERS *args;
-static const struct nvkm_engine_func
-ga100_nvdec = {
- .sclass = {
- { -1, -1, NVC6B0_VIDEO_DECODER },
- {}
- }
-};
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvenc);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
-int
-ga100_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvdec **pnvdec)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&ga100_nvdec, device, type, inst, pnvdec);
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
- return -ENODEV;
+ return nvkm_gsp_rm_alloc_wr(nvenc, args);
}
+
+const struct nvkm_rm_api_engine
+r535_nvenc = {
+ .alloc = r535_nvenc_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c
new file mode 100644
index 000000000000..fbc4080ad8d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvjpg.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/nvjpg.h"
+
+static int
+r535_nvjpg_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *nvjpg)
+{
+ NV_NVJPG_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), nvjpg);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
+
+ return nvkm_gsp_rm_alloc_wr(nvjpg, args);
+}
+
+const struct nvkm_rm_api_engine
+r535_nvjpg = {
+ .alloc = r535_nvjpg_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h
new file mode 100644
index 000000000000..cbc7e611fbda
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/alloc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_ALLOC_H__
+#define __NVRM_ALLOC_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct rpc_gsp_rm_alloc_v03_00
+{
+ NvHandle hClient;
+ NvHandle hParent;
+ NvHandle hObject;
+ NvU32 hClass;
+ NvU32 status;
+ NvU32 paramsSize;
+ NvU32 flags;
+ NvU8 reserved[4];
+ NvU8 params[];
+} rpc_gsp_rm_alloc_v03_00;
+
+typedef struct NVOS00_PARAMETERS_v03_00
+{
+ NvHandle hRoot;
+ NvHandle hObjectParent;
+ NvHandle hObjectOld;
+ NvV32 status;
+} NVOS00_PARAMETERS_v03_00;
+
+typedef struct rpc_free_v03_00
+{
+ NVOS00_PARAMETERS_v03_00 params;
+} rpc_free_v03_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h
new file mode 100644
index 000000000000..60b0b08491ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/bar.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_BAR_H__
+#define __NVRM_BAR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef enum
+{
+ NV_RPC_UPDATE_PDE_BAR_1,
+ NV_RPC_UPDATE_PDE_BAR_2,
+ NV_RPC_UPDATE_PDE_BAR_INVALID,
+} NV_RPC_UPDATE_PDE_BAR_TYPE;
+
+typedef struct UpdateBarPde_v15_00
+{
+ NV_RPC_UPDATE_PDE_BAR_TYPE barType;
+ NvU64 entryValue NV_ALIGN_BYTES(8);
+ NvU64 entryLevelShift NV_ALIGN_BYTES(8);
+} UpdateBarPde_v15_00;
+
+typedef struct rpc_update_bar_pde_v15_00
+{
+ UpdateBarPde_v15_00 info;
+} rpc_update_bar_pde_v15_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h
new file mode 100644
index 000000000000..90b0325203d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ce.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CE_H__
+#define __NVRM_CE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct NVC0B5_ALLOCATION_PARAMETERS {
+ NvU32 version;
+ NvU32 engineType;
+} NVC0B5_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h
new file mode 100644
index 000000000000..df0e63c0cb6b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/client.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CLIENT_H__
+#define __NVRM_CLIENT_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+#define NV_PROC_NAME_MAX_LENGTH 100U
+
+typedef struct NV0000_ALLOC_PARAMETERS {
+ NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
+ NvU32 processID;
+ char processName[NV_PROC_NAME_MAX_LENGTH];
+} NV0000_ALLOC_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h
new file mode 100644
index 000000000000..77f10acd82c9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ctrl.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CTRL_H__
+#define __NVRM_CTRL_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct rpc_gsp_rm_control_v03_00
+{
+ NvHandle hClient;
+ NvHandle hObject;
+ NvU32 cmd;
+ NvU32 status;
+ NvU32 paramsSize;
+ NvU32 flags;
+ NvU8 params[];
+} rpc_gsp_rm_control_v03_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h
new file mode 100644
index 000000000000..3933b9ad61ce
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/device.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_DEVICE_H__
+#define __NVRM_DEVICE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_DEVICE_0 (0x80U) /* finn: Evaluated from "NV0080_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV0080_ALLOC_PARAMETERS {
+ NvU32 deviceId;
+ NvHandle hClientShare;
+ NvHandle hTargetClient;
+ NvHandle hTargetDevice;
+ NvV32 flags;
+ NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8);
+ NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8);
+ NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8);
+ NvV32 vaMode;
+} NV0080_ALLOC_PARAMETERS;
+
+#define NV20_SUBDEVICE_0 (0x2080U) /* finn: Evaluated from "NV2080_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV2080_ALLOC_PARAMETERS {
+ NvU32 subDeviceId;
+} NV2080_ALLOC_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h
new file mode 100644
index 000000000000..7b7539639540
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/disp.h
@@ -0,0 +1,741 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_DISP_H__
+#define __NVRM_DISP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 instMemSize, 8);
+ NvU32 instMemAddrSpace;
+ NvU32 instMemCpuCacheAttr;
+} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS;
+
+#define NV_MEMORY_WRITECOMBINED 2
+
+#define NV04_DISPLAY_COMMON (0x00000073)
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
+ NvU32 feHwSysCap;
+ NvU32 windowPresentMask;
+ NvBool bFbRemapperEnabled;
+ NvU32 numHeads;
+ NvBool bPrimaryVga;
+ NvU32 i2cPort;
+ NvU32 internalDispActiveMask;
+} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
+
+#define NV2080_CTRL_ACPI_DSM_READ_SIZE (0x1000) /* finn: Evaluated from "(4 * 1024)" */
+
+#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS {
+ NvU32 status;
+ NvU16 backLightDataSize;
+ NvU8 backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE];
+} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS {
+ NvU32 subDeviceInstance;
+} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 numHeads;
+} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 headMask;
+} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayMask;
+ NvU32 displayMaskDDC;
+} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
+
+#define NV0073_CTRL_MAX_CONNECTORS 4U
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 DDCPartners;
+ NvU32 count;
+ struct {
+ NvU32 index;
+ NvU32 type;
+ NvU32 location;
+ } data[NV0073_CTRL_MAX_CONNECTORS];
+ NvU32 platform;
+} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 index;
+ NvU32 type;
+ NvU32 protocol;
+ NvU32 ditherType;
+ NvU32 ditherAlgo;
+ NvU32 location;
+ NvU32 rootPortId;
+ NvU32 dcbIndex;
+ NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8);
+ NvBool bIsLitByVbios;
+ NvBool bIsDispDynamic;
+} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC (0x00000001U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR (0x00000002U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR (0x00000003U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI (0x00000005U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS (0x00000005U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A (0x00000008U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B (0x00000009U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI (0x00000010U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI (0x00000011U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN (0xFFFFFFFFU)
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
+ NvBool bDscSupported;
+ NvU32 encoderColorFormatMask;
+ NvU32 lineBufferSizeKB;
+ NvU32 rateBufferSizeKB;
+ NvU32 bitsPerPixelPrecision;
+ NvU32 maxNumHztSlices;
+ NvU32 lineBufferBitDepth;
+} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 sorIndex;
+ NvU32 maxLinkRate;
+ NvU32 dpVersionsSupported;
+ NvU32 UHBRSupported;
+ NvBool bIsMultistreamSupported;
+ NvBool bIsSCEnabled;
+ NvBool bHasIncreasedWatermarkLimits;
+ NvBool bIsPC2Disabled;
+ NvBool isSingleHeadMSTSupported;
+ NvBool bFECSupported;
+ NvBool bIsTrainPhyRepeater;
+ NvBool bOverrideLinkBw;
+ NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
+} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U)
+
+#define NV2080_NOTIFIERS_HOTPLUG (1)
+
+typedef struct {
+ NvU32 plugDisplayMask;
+ NvU32 unplugDisplayMask;
+} Nv2080HotplugNotification;
+
+#define NV2080_NOTIFIERS_DP_IRQ (7)
+
+typedef struct Nv2080DpIrqNotificationRec {
+ NvU32 displayId;
+} Nv2080DpIrqNotification;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 displayMask;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 flags2;
+} NV0073_CTRL_DFP_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U)
+#define NV0073_CTRL_DFP_FLAGS_LANE 5:3
+#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LINK 21:20
+#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U)
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 flags;
+ NvU32 displayId;
+} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
+
+typedef NvU32 NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG;
+
+typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO {
+ NvU32 displayMask;
+ NvU32 sorType;
+} NV0073_CTRL_DFP_ASSIGN_SOR_INFO;
+
+#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS 4U
+
+#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 sorExcludeMask;
+ NvU32 slaveDisplayId;
+ NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig;
+ NvBool bIs2Head1Or;
+ NvU32 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
+ NV0073_CTRL_DFP_ASSIGN_SOR_INFO sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
+ NvU8 reservedSorMask;
+ NvU32 flags;
+} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS;
+
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO 0:0
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL (0x00000001U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT (0x00000000U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE 1:1
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO (0x00000000U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U)
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 brightness;
+ NvBool bUncalibrated;
+} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER 96U
+
+typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 numELDSize;
+ NvU8 bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER];
+ NvU32 maxFreqSupported;
+ NvU32 ctrl;
+ NvU32 deviceEntry;
+} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS;
+
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD 0:0
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV 1:1
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE (0x00000001U)
+
+#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 bufferSize;
+ NvU32 flags;
+ NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES];
+} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS {
+ NvU8 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 enable;
+} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 caps;
+} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED 0:0
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED 1:1
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED 2:2
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED 5:3
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED 6:6
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED 9:7
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
+
+#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE 36U
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 transmitControl;
+ NvU32 packetSize;
+ NvU32 targetHead;
+ NvBool bUsePsrHeadforSdp;
+ NvU8 aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE];
+} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS;
+
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE 0:0
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME 1:1
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME 2:2
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK 3:3
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE 4:4
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT 5:5
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY 6:6
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING 7:7
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE 9:8
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0 (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1 (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE 31:31
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES (0x0000001U)
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM (0x730275U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS {
+ NvU8 subDeviceInstance;
+ NvU32 displayId;
+ NvU8 mute;
+} NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U
+
+#define NV0073_CTRL_CMD_DP_AUXCH_CTRL (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvBool bAddrOnly;
+ NvU32 cmd;
+ NvU32 addr;
+ NvU8 data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE];
+ NvU32 size;
+ NvU32 replyType;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS;
+
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE 3:3
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT 2:2
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE 1:0
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS (0x00000002U)
+
+#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
+ // In
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+
+ // Out
+ NvU8 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+ NvU8 linkBwCount;
+} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 cmd;
+ NvU32 data;
+ NvU32 err;
+ NvU32 retryTimeMs;
+ NvU32 eightLaneDpcdBaseAddr;
+} NV0073_CTRL_DP_CTRL_PARAMS;
+
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT 0:0
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW 1:1
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD 2:2
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_UNUSED 3:3
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE 4:4
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING 5:5
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING 6:6
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING 7:7
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING 8:8
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT (0x00000000U)
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING 9:9
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED 10:10
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING 12:11
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON (0x00000002U)
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER 13:13
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG 14:14
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC 15:15
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST 29:29
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO (0x00000000U)
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES (0x00000001U)
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE 30:30
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG 31:31
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE (0x00000000U)
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE 3:0
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK (0x00000002U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN (0x00000003U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE (0x00000004U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK (0x00000005U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR (0x00000006U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO (0x00000007U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO (0x00000008U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK (0x00000009U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING (0x80000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR (0x80000002U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR (0x80000003U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR (0x80000004U)
+
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT 4:0
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 (0x00000000U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 (0x00000001U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 (0x00000002U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 (0x00000004U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 (0x00000008U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW 15:8
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS (0x00000006U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS (0x00000008U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS (0x00000009U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS (0x0000000AU)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS (0x0000000CU)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS (0x00000010U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS (0x00000014U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS (0x0000001EU)
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING 18:18
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO (0x00000000U)
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES (0x00000001U)
+#define NV0073_CTRL_DP_DATA_TARGET 22:19
+#define NV0073_CTRL_DP_DATA_TARGET_SINK (0x00000000U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0 (0x00000001U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1 (0x00000002U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2 (0x00000003U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3 (0x00000004U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4 (0x00000005U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5 (0x00000006U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6 (0x00000007U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7 (0x00000008U)
+
+#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_MAX_LANES 8U
+
+typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 numLanes;
+ NvU32 data[NV0073_CTRL_MAX_LANES];
+} NV0073_CTRL_DP_LANE_DATA_PARAMS;
+
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS 1:0
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE (0x00000000U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 (0x00000001U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 (0x00000002U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 (0x00000003U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT 3:2
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U)
+
+#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 preferredDisplayId;
+
+ NvBool force;
+ NvBool useBFM;
+
+ NvU32 displayIdAssigned;
+ NvU32 allDisplayMask;
+} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CONFIG_STREAM (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 sorIndex;
+ NvU32 dpLink;
+
+ NvBool bEnableOverride;
+ NvBool bMST;
+ NvU32 singleHeadMultistreamMode;
+ NvU32 hBlankSym;
+ NvU32 vBlankSym;
+ NvU32 colorFormat;
+ NvBool bEnableTwoHeadOneOr;
+
+ struct {
+ NvU32 slotStart;
+ NvU32 slotEnd;
+ NvU32 PBN;
+ NvU32 Timeslice;
+ NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
+ NvU32 singleHeadMSTPipeline;
+ NvBool bEnableAudioOverRightPanel;
+ } MST;
+
+ struct {
+ NvBool bEnhancedFraming;
+ NvU32 tuSize;
+ NvU32 waterMark;
+ NvU32 actualPclkHz; // deprecated -Use MvidWarParams
+ NvU32 linkClkFreqHz; // deprecated -Use MvidWarParams
+ NvBool bEnableAudioOverRightPanel;
+ struct {
+ NvU32 activeCnt;
+ NvU32 activeFrac;
+ NvU32 activePolarity;
+ NvBool mvidWarEnabled;
+ struct {
+ NvU32 actualPclkHz;
+ NvU32 linkClkFreqHz;
+ } MvidWarParams;
+ } Legacy;
+ } SST;
+} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvBool enable;
+} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 mute;
+} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
+ NvU32 addressSpace;
+ NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NvU32 cacheSnoop;
+ NvU32 hclass;
+ NvU32 channelInstance;
+ NvBool valid;
+} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
+
+#define ADDR_SYSMEM (1) // System memory (PCI)
+
+#define ADDR_FBMEM 2 // Frame buffer memory space
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // All PIO channels have two instances (one per head).
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel
+} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // Note that core channel has only one instance
+ // while all others have two (one per head).
+ NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
+ NvU32 offset; // Initial offset for put/get, usually zero.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
+
+ NvU32 flags;
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001
+
+} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h
new file mode 100644
index 000000000000..b26dfc8f8087
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/engine.h
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_ENGINE_H__
+#define __NVRM_ENGINE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define MC_ENGINE_IDX_NULL 0 // This must be 0
+#define MC_ENGINE_IDX_TMR 1
+#define MC_ENGINE_IDX_DISP 2
+#define MC_ENGINE_IDX_FB 3
+#define MC_ENGINE_IDX_FIFO 4
+#define MC_ENGINE_IDX_VIDEO 5
+#define MC_ENGINE_IDX_MD 6
+#define MC_ENGINE_IDX_BUS 7
+#define MC_ENGINE_IDX_PMGR 8
+#define MC_ENGINE_IDX_VP2 9
+#define MC_ENGINE_IDX_CIPHER 10
+#define MC_ENGINE_IDX_BIF 11
+#define MC_ENGINE_IDX_PPP 12
+#define MC_ENGINE_IDX_PRIVRING 13
+#define MC_ENGINE_IDX_PMU 14
+#define MC_ENGINE_IDX_CE0 15
+#define MC_ENGINE_IDX_CE1 16
+#define MC_ENGINE_IDX_CE2 17
+#define MC_ENGINE_IDX_CE3 18
+#define MC_ENGINE_IDX_CE4 19
+#define MC_ENGINE_IDX_CE5 20
+#define MC_ENGINE_IDX_CE6 21
+#define MC_ENGINE_IDX_CE7 22
+#define MC_ENGINE_IDX_CE8 23
+#define MC_ENGINE_IDX_CE9 24
+#define MC_ENGINE_IDX_CE_MAX MC_ENGINE_IDX_CE9
+#define MC_ENGINE_IDX_VIC 35
+#define MC_ENGINE_IDX_ISOHUB 36
+#define MC_ENGINE_IDX_VGPU 37
+#define MC_ENGINE_IDX_MSENC 38
+#define MC_ENGINE_IDX_MSENC1 39
+#define MC_ENGINE_IDX_MSENC2 40
+#define MC_ENGINE_IDX_C2C 41
+#define MC_ENGINE_IDX_LTC 42
+#define MC_ENGINE_IDX_FBHUB 43
+#define MC_ENGINE_IDX_HDACODEC 44
+#define MC_ENGINE_IDX_GMMU 45
+#define MC_ENGINE_IDX_SEC2 46
+#define MC_ENGINE_IDX_FSP 47
+#define MC_ENGINE_IDX_NVLINK 48
+#define MC_ENGINE_IDX_GSP 49
+#define MC_ENGINE_IDX_NVJPG 50
+#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG
+#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG
+#define MC_ENGINE_IDX_NVJPEG1 51
+#define MC_ENGINE_IDX_NVJPEG2 52
+#define MC_ENGINE_IDX_NVJPEG3 53
+#define MC_ENGINE_IDX_NVJPEG4 54
+#define MC_ENGINE_IDX_NVJPEG5 55
+#define MC_ENGINE_IDX_NVJPEG6 56
+#define MC_ENGINE_IDX_NVJPEG7 57
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT 58
+#define MC_ENGINE_IDX_ACCESS_CNTR 59
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT 60
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR 61
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_ERROR 62
+#define MC_ENGINE_IDX_INFO_FAULT 63
+#define MC_ENGINE_IDX_BSP 64
+#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP
+#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC
+#define MC_ENGINE_IDX_NVDEC1 65
+#define MC_ENGINE_IDX_NVDEC2 66
+#define MC_ENGINE_IDX_NVDEC3 67
+#define MC_ENGINE_IDX_NVDEC4 68
+#define MC_ENGINE_IDX_NVDEC5 69
+#define MC_ENGINE_IDX_NVDEC6 70
+#define MC_ENGINE_IDX_NVDEC7 71
+#define MC_ENGINE_IDX_CPU_DOORBELL 72
+#define MC_ENGINE_IDX_PRIV_DOORBELL 73
+#define MC_ENGINE_IDX_MMU_ECC_ERROR 74
+#define MC_ENGINE_IDX_BLG 75
+#define MC_ENGINE_IDX_PERFMON 76
+#define MC_ENGINE_IDX_BUF_RESET 77
+#define MC_ENGINE_IDX_XBAR 78
+#define MC_ENGINE_IDX_ZPW 79
+#define MC_ENGINE_IDX_OFA0 80
+#define MC_ENGINE_IDX_TEGRA 81
+#define MC_ENGINE_IDX_GR 82
+#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR
+#define MC_ENGINE_IDX_GR1 83
+#define MC_ENGINE_IDX_GR2 84
+#define MC_ENGINE_IDX_GR3 85
+#define MC_ENGINE_IDX_GR4 86
+#define MC_ENGINE_IDX_GR5 87
+#define MC_ENGINE_IDX_GR6 88
+#define MC_ENGINE_IDX_GR7 89
+#define MC_ENGINE_IDX_ESCHED 90
+#define MC_ENGINE_IDX_ESCHED__SIZE 64
+#define MC_ENGINE_IDX_GR_FECS_LOG 154
+#define MC_ENGINE_IDX_GR0_FECS_LOG MC_ENGINE_IDX_GR_FECS_LOG
+#define MC_ENGINE_IDX_GR1_FECS_LOG 155
+#define MC_ENGINE_IDX_GR2_FECS_LOG 156
+#define MC_ENGINE_IDX_GR3_FECS_LOG 157
+#define MC_ENGINE_IDX_GR4_FECS_LOG 158
+#define MC_ENGINE_IDX_GR5_FECS_LOG 159
+#define MC_ENGINE_IDX_GR6_FECS_LOG 160
+#define MC_ENGINE_IDX_GR7_FECS_LOG 161
+#define MC_ENGINE_IDX_TMR_SWRL 162
+#define MC_ENGINE_IDX_DISP_GSP 163
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_CPU 164
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_CPU 165
+#define MC_ENGINE_IDX_PXUC 166
+#define MC_ENGINE_IDX_MAX 167 // This must be kept as the max bit if
+#define MC_ENGINE_IDX_INVALID 0xFFFFFFFF
+#define MC_ENGINE_IDX_GRn(x) (MC_ENGINE_IDX_GR0 + (x))
+#define MC_ENGINE_IDX_GRn_FECS_LOG(x) (MC_ENGINE_IDX_GR0_FECS_LOG + (x))
+#define MC_ENGINE_IDX_CE(x) (MC_ENGINE_IDX_CE0 + (x))
+#define MC_ENGINE_IDX_MSENCn(x) (MC_ENGINE_IDX_MSENC + (x))
+#define MC_ENGINE_IDX_NVDECn(x) (MC_ENGINE_IDX_NVDEC + (x))
+#define MC_ENGINE_IDX_NVJPEGn(x) (MC_ENGINE_IDX_NVJPEG + (x))
+#define MC_ENGINE_IDX_ESCHEDn(x) (MC_ENGINE_IDX_ESCHED + (x))
+
+typedef enum
+{
+ RM_ENGINE_TYPE_NULL = (0x00000000),
+ RM_ENGINE_TYPE_GR0 = (0x00000001),
+ RM_ENGINE_TYPE_GR1 = (0x00000002),
+ RM_ENGINE_TYPE_GR2 = (0x00000003),
+ RM_ENGINE_TYPE_GR3 = (0x00000004),
+ RM_ENGINE_TYPE_GR4 = (0x00000005),
+ RM_ENGINE_TYPE_GR5 = (0x00000006),
+ RM_ENGINE_TYPE_GR6 = (0x00000007),
+ RM_ENGINE_TYPE_GR7 = (0x00000008),
+ RM_ENGINE_TYPE_COPY0 = (0x00000009),
+ RM_ENGINE_TYPE_COPY1 = (0x0000000a),
+ RM_ENGINE_TYPE_COPY2 = (0x0000000b),
+ RM_ENGINE_TYPE_COPY3 = (0x0000000c),
+ RM_ENGINE_TYPE_COPY4 = (0x0000000d),
+ RM_ENGINE_TYPE_COPY5 = (0x0000000e),
+ RM_ENGINE_TYPE_COPY6 = (0x0000000f),
+ RM_ENGINE_TYPE_COPY7 = (0x00000010),
+ RM_ENGINE_TYPE_COPY8 = (0x00000011),
+ RM_ENGINE_TYPE_COPY9 = (0x00000012),
+ RM_ENGINE_TYPE_NVDEC0 = (0x0000001d),
+ RM_ENGINE_TYPE_NVDEC1 = (0x0000001e),
+ RM_ENGINE_TYPE_NVDEC2 = (0x0000001f),
+ RM_ENGINE_TYPE_NVDEC3 = (0x00000020),
+ RM_ENGINE_TYPE_NVDEC4 = (0x00000021),
+ RM_ENGINE_TYPE_NVDEC5 = (0x00000022),
+ RM_ENGINE_TYPE_NVDEC6 = (0x00000023),
+ RM_ENGINE_TYPE_NVDEC7 = (0x00000024),
+ RM_ENGINE_TYPE_NVENC0 = (0x00000025),
+ RM_ENGINE_TYPE_NVENC1 = (0x00000026),
+ RM_ENGINE_TYPE_NVENC2 = (0x00000027),
+ RM_ENGINE_TYPE_VP = (0x00000028),
+ RM_ENGINE_TYPE_ME = (0x00000029),
+ RM_ENGINE_TYPE_PPP = (0x0000002a),
+ RM_ENGINE_TYPE_MPEG = (0x0000002b),
+ RM_ENGINE_TYPE_SW = (0x0000002c),
+ RM_ENGINE_TYPE_TSEC = (0x0000002d),
+ RM_ENGINE_TYPE_VIC = (0x0000002e),
+ RM_ENGINE_TYPE_MP = (0x0000002f),
+ RM_ENGINE_TYPE_SEC2 = (0x00000030),
+ RM_ENGINE_TYPE_HOST = (0x00000031),
+ RM_ENGINE_TYPE_DPU = (0x00000032),
+ RM_ENGINE_TYPE_PMU = (0x00000033),
+ RM_ENGINE_TYPE_FBFLCN = (0x00000034),
+ RM_ENGINE_TYPE_NVJPEG0 = (0x00000035),
+ RM_ENGINE_TYPE_NVJPEG1 = (0x00000036),
+ RM_ENGINE_TYPE_NVJPEG2 = (0x00000037),
+ RM_ENGINE_TYPE_NVJPEG3 = (0x00000038),
+ RM_ENGINE_TYPE_NVJPEG4 = (0x00000039),
+ RM_ENGINE_TYPE_NVJPEG5 = (0x0000003a),
+ RM_ENGINE_TYPE_NVJPEG6 = (0x0000003b),
+ RM_ENGINE_TYPE_NVJPEG7 = (0x0000003c),
+ RM_ENGINE_TYPE_OFA = (0x0000003d),
+ RM_ENGINE_TYPE_LAST = (0x0000003e),
+} RM_ENGINE_TYPE;
+
+#define NV2080_ENGINE_TYPE_NULL (0x00000000)
+#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001)
+#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS
+#define NV2080_ENGINE_TYPE_GR1 (0x00000002)
+#define NV2080_ENGINE_TYPE_GR2 (0x00000003)
+#define NV2080_ENGINE_TYPE_GR3 (0x00000004)
+#define NV2080_ENGINE_TYPE_GR4 (0x00000005)
+#define NV2080_ENGINE_TYPE_GR5 (0x00000006)
+#define NV2080_ENGINE_TYPE_GR6 (0x00000007)
+#define NV2080_ENGINE_TYPE_GR7 (0x00000008)
+#define NV2080_ENGINE_TYPE_COPY0 (0x00000009)
+#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a)
+#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b)
+#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c)
+#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d)
+#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e)
+#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f)
+#define NV2080_ENGINE_TYPE_COPY7 (0x00000010)
+#define NV2080_ENGINE_TYPE_COPY8 (0x00000011)
+#define NV2080_ENGINE_TYPE_COPY9 (0x00000012)
+#define NV2080_ENGINE_TYPE_BSP (0x00000013)
+#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP
+#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014)
+#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015)
+#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016)
+#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017)
+#define NV2080_ENGINE_TYPE_NVDEC5 (0x00000018)
+#define NV2080_ENGINE_TYPE_NVDEC6 (0x00000019)
+#define NV2080_ENGINE_TYPE_NVDEC7 (0x0000001a)
+#define NV2080_ENGINE_TYPE_MSENC (0x0000001b)
+#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */
+#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c)
+#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d)
+#define NV2080_ENGINE_TYPE_VP (0x0000001e)
+#define NV2080_ENGINE_TYPE_ME (0x0000001f)
+#define NV2080_ENGINE_TYPE_PPP (0x00000020)
+#define NV2080_ENGINE_TYPE_MPEG (0x00000021)
+#define NV2080_ENGINE_TYPE_SW (0x00000022)
+#define NV2080_ENGINE_TYPE_CIPHER (0x00000023)
+#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER
+#define NV2080_ENGINE_TYPE_VIC (0x00000024)
+#define NV2080_ENGINE_TYPE_MP (0x00000025)
+#define NV2080_ENGINE_TYPE_SEC2 (0x00000026)
+#define NV2080_ENGINE_TYPE_HOST (0x00000027)
+#define NV2080_ENGINE_TYPE_DPU (0x00000028)
+#define NV2080_ENGINE_TYPE_PMU (0x00000029)
+#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a)
+#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b)
+#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG
+#define NV2080_ENGINE_TYPE_NVJPEG1 (0x0000002c)
+#define NV2080_ENGINE_TYPE_NVJPEG2 (0x0000002d)
+#define NV2080_ENGINE_TYPE_NVJPEG3 (0x0000002e)
+#define NV2080_ENGINE_TYPE_NVJPEG4 (0x0000002f)
+#define NV2080_ENGINE_TYPE_NVJPEG5 (0x00000030)
+#define NV2080_ENGINE_TYPE_NVJPEG6 (0x00000031)
+#define NV2080_ENGINE_TYPE_NVJPEG7 (0x00000032)
+#define NV2080_ENGINE_TYPE_OFA (0x00000033)
+#define NV2080_ENGINE_TYPE_LAST (0x0000003e)
+#define NV2080_ENGINE_TYPE_ALLENGINES (0xffffffff)
+#define NV2080_ENGINE_TYPE_COPY_SIZE 10
+#define NV2080_ENGINE_TYPE_NVENC_SIZE 3
+#define NV2080_ENGINE_TYPE_NVJPEG_SIZE 8
+#define NV2080_ENGINE_TYPE_NVDEC_SIZE 8
+#define NV2080_ENGINE_TYPE_GR_SIZE 8
+#define NV2080_ENGINE_TYPE_COPY(i) (NV2080_ENGINE_TYPE_COPY0+(i))
+#define NV2080_ENGINE_TYPE_IS_COPY(i) (((i) >= NV2080_ENGINE_TYPE_COPY0) && ((i) <= NV2080_ENGINE_TYPE_COPY9))
+#define NV2080_ENGINE_TYPE_COPY_IDX(i) ((i) - NV2080_ENGINE_TYPE_COPY0)
+#define NV2080_ENGINE_TYPE_NVENC(i) (NV2080_ENGINE_TYPE_NVENC0+(i))
+#define NV2080_ENGINE_TYPE_IS_NVENC(i) (((i) >= NV2080_ENGINE_TYPE_NVENC0) && ((i) < NV2080_ENGINE_TYPE_NVENC(NV2080_ENGINE_TYPE_NVENC_SIZE)))
+#define NV2080_ENGINE_TYPE_NVENC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVENC0)
+#define NV2080_ENGINE_TYPE_NVDEC(i) (NV2080_ENGINE_TYPE_NVDEC0+(i))
+#define NV2080_ENGINE_TYPE_IS_NVDEC(i) (((i) >= NV2080_ENGINE_TYPE_NVDEC0) && ((i) < NV2080_ENGINE_TYPE_NVDEC(NV2080_ENGINE_TYPE_NVDEC_SIZE)))
+#define NV2080_ENGINE_TYPE_NVDEC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVDEC0)
+#define NV2080_ENGINE_TYPE_NVJPEG(i) (NV2080_ENGINE_TYPE_NVJPEG0+(i))
+#define NV2080_ENGINE_TYPE_IS_NVJPEG(i) (((i) >= NV2080_ENGINE_TYPE_NVJPEG0) && ((i) < NV2080_ENGINE_TYPE_NVJPEG(NV2080_ENGINE_TYPE_NVJPEG_SIZE)))
+#define NV2080_ENGINE_TYPE_NVJPEG_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVJPEG0)
+#define NV2080_ENGINE_TYPE_GR(i) (NV2080_ENGINE_TYPE_GR0 + (i))
+#define NV2080_ENGINE_TYPE_IS_GR(i) (((i) >= NV2080_ENGINE_TYPE_GR0) && ((i) < NV2080_ENGINE_TYPE_GR(NV2080_ENGINE_TYPE_GR_SIZE)))
+#define NV2080_ENGINE_TYPE_GR_IDX(i) ((i) - NV2080_ENGINE_TYPE_GR0)
+#define NV2080_ENGINE_TYPE_IS_VALID(i) (((i) > (NV2080_ENGINE_TYPE_NULL)) && ((i) < (NV2080_ENGINE_TYPE_LAST)))
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h
new file mode 100644
index 000000000000..057f7220c225
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/event.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_EVENT_H__
+#define __NVRM_EVENT_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e)
+
+typedef struct NV0005_ALLOC_PARAMETERS {
+ NvHandle hParentClient;
+ NvHandle hSrcResource;
+
+ NvV32 hClass;
+ NvV32 notifyIndex;
+ NV_DECLARE_ALIGNED(NvP64 data, 8);
+} NV0005_ALLOC_PARAMETERS;
+
+#define NV01_EVENT_CLIENT_RM (0x04000000)
+
+#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS {
+ NvU32 event;
+ NvU32 action;
+ NvBool bNotifyState;
+ NvU32 info32;
+ NvU16 info16;
+} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS;
+
+#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002)
+
+typedef struct rpc_post_event_v17_00
+{
+ NvHandle hClient;
+ NvHandle hEvent;
+ NvU32 notifyIndex;
+ NvU32 data;
+ NvU16 info16;
+ NvU32 status;
+ NvU32 eventDataSize;
+ NvBool bNotifyList;
+ NvU8 eventData[];
+} rpc_post_event_v17_00;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h
new file mode 100644
index 000000000000..28786ef013a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fbsr.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FBSR_H__
+#define __NVRM_FBSR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV01_MEMORY_LIST_FBMEM (0x00000082)
+
+#define NV01_MEMORY_LIST_SYSTEM (0x00000081)
+
+#define NVOS02_FLAGS_PHYSICALITY 7:4
+#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS (0x00000000)
+#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS (0x00000001)
+#define NVOS02_FLAGS_LOCATION 11:8
+#define NVOS02_FLAGS_LOCATION_PCI (0x00000000)
+#define NVOS02_FLAGS_LOCATION_AGP (0x00000001)
+#define NVOS02_FLAGS_LOCATION_VIDMEM (0x00000002)
+#define NVOS02_FLAGS_COHERENCY 15:12
+#define NVOS02_FLAGS_COHERENCY_UNCACHED (0x00000000)
+#define NVOS02_FLAGS_COHERENCY_CACHED (0x00000001)
+#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE (0x00000002)
+#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH (0x00000003)
+#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT (0x00000004)
+#define NVOS02_FLAGS_COHERENCY_WRITE_BACK (0x00000005)
+#define NVOS02_FLAGS_ALLOC 17:16
+#define NVOS02_FLAGS_ALLOC_NONE (0x00000001)
+#define NVOS02_FLAGS_GPU_CACHEABLE 18:18
+#define NVOS02_FLAGS_GPU_CACHEABLE_NO (0x00000000)
+#define NVOS02_FLAGS_GPU_CACHEABLE_YES (0x00000001)
+#define NVOS02_FLAGS_KERNEL_MAPPING 19:19
+#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP (0x00000000)
+#define NVOS02_FLAGS_KERNEL_MAPPING_MAP (0x00000001)
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY 20:20
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES (0x00000001)
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY 21:21
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES (0x00000001)
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY 22:22
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO (0x00000000)
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES (0x00000001)
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE 23:23
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED (0x00000001)
+#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT 24:24
+#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE (0x00000001)
+#define NVOS02_FLAGS_MEMORY_PROTECTION 26:25
+#define NVOS02_FLAGS_MEMORY_PROTECTION_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_MEMORY_PROTECTION_PROTECTED (0x00000001)
+#define NVOS02_FLAGS_MEMORY_PROTECTION_UNPROTECTED (0x00000002)
+#define NVOS02_FLAGS_MAPPING 31:30
+#define NVOS02_FLAGS_MAPPING_DEFAULT (0x00000000)
+#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001)
+#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002)
+
+struct pte_desc
+{
+ NvU32 idr:2;
+ NvU32 reserved1:14;
+ NvU32 length:16;
+ union {
+ NvU64 pte; // PTE when IDR==0; PDE when IDR > 0
+ NvU64 pde; // PTE when IDR==0; PDE when IDR > 0
+ } pte_pde[] NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0
+};
+
+typedef struct rpc_alloc_memory_v13_01
+{
+ NvHandle hClient;
+ NvHandle hDevice;
+ NvHandle hMemory;
+ NvU32 hClass;
+ NvU32 flags;
+ NvU32 pteAdjust;
+ NvU32 format;
+ NvU64 length NV_ALIGN_BYTES(8);
+ NvU32 pageCount;
+ struct pte_desc pteDesc;
+} rpc_alloc_memory_v13_01;
+
+#define FBSR_TYPE_DMA 4 // Copy using DMA. Fastest.
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
+ NvU32 fbsrType;
+ NvU32 numRegions;
+ NvHandle hClient;
+ NvHandle hSysMem;
+ NV_DECLARE_ALIGNED(NvU64 gspFbAllocsSysOffset, 8);
+ NvBool bEnteringGcoffState;
+} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS {
+ NvU32 fbsrType;
+ NvHandle hClient;
+ NvHandle hVidMem;
+ NV_DECLARE_ALIGNED(NvU64 vidOffset, 8);
+ NV_DECLARE_ALIGNED(NvU64 sysOffset, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+} NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h
new file mode 100644
index 000000000000..325fdd8b6090
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/fifo.h
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FIFO_H__
+#define __NVRM_FIFO_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES 32
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES 16
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA 2
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16
+
+typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY {
+ NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES];
+ NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
+ NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
+ NvU32 numPbdmas;
+ char engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN];
+} NV2080_CTRL_FIFO_DEVICE_ENTRY;
+
+#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS {
+ NvU32 baseIndex;
+ NvU32 numEntries;
+ NvBool bMore;
+ // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
+ NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
+} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS;
+
+typedef enum
+{
+ /* *************************************************************************
+ * Bug 3820969
+ * THINK BEFORE CHANGING ENUM ORDER HERE.
+ * VGPU-guest uses this same ordering. Because this enum is not versioned,
+ * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
+ * ************************************************************************/
+
+ // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc.,
+ ENGINE_INFO_TYPE_ENG_DESC = 0,
+
+ // HW engine ID
+ ENGINE_INFO_TYPE_FIFO_TAG,
+
+ // RM_ENGINE_TYPE_*
+ ENGINE_INFO_TYPE_RM_ENGINE_TYPE,
+
+ //
+ // runlist id (meaning varies by GPU)
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST,
+
+ // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_*
+ ENGINE_INFO_TYPE_MMU_FAULT_ID,
+
+ // ROBUST_CHANNEL_*
+ ENGINE_INFO_TYPE_RC_MASK,
+
+ // Reset Bit Position. On Ampere, only valid if not _INVALID
+ ENGINE_INFO_TYPE_RESET,
+
+ // Interrupt Bit Position
+ ENGINE_INFO_TYPE_INTR,
+
+ // log2(MC_ENGINE_*)
+ ENGINE_INFO_TYPE_MC,
+
+ // The DEV_TYPE_ENUM for this engine
+ ENGINE_INFO_TYPE_DEV_TYPE_ENUM,
+
+ // The particular instance of this engine type
+ ENGINE_INFO_TYPE_INSTANCE_ID,
+
+ //
+ // The base address for this engine's NV_RUNLIST. Valid only on Ampere+
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST_PRI_BASE,
+
+ //
+ // If this entry is a host-driven engine.
+ // Update _isEngineInfoTypeValidForOnlyHostDriven when adding any new entry.
+ //
+ ENGINE_INFO_TYPE_IS_HOST_DRIVEN_ENGINE,
+
+ //
+ // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID,
+
+ //
+ // The base address for this engine's NV_CHRAM registers. Valid only on
+ // Ampere+
+ //
+ // Valid only for Esched-driven engines
+ //
+ ENGINE_INFO_TYPE_CHRAM_PRI_BASE,
+
+ // This entry added to copy data at RMCTRL_EXPORT() call for Kernel RM
+ ENGINE_INFO_TYPE_KERNEL_RM_MAX,
+ // Used for iterating the engine info table by the index passed.
+ ENGINE_INFO_TYPE_INVALID = ENGINE_INFO_TYPE_KERNEL_RM_MAX,
+
+ // Size of FIFO_ENGINE_LIST.engineData
+ ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE = ENGINE_INFO_TYPE_INVALID,
+
+ // Input-only parameter for kfifoEngineInfoXlate.
+ ENGINE_INFO_TYPE_PBDMA_ID
+
+ /* *************************************************************************
+ * Bug 3820969
+ * THINK BEFORE CHANGING ENUM ORDER HERE.
+ * VGPU-guest uses this same ordering. Because this enum is not versioned,
+ * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
+ * ************************************************************************/
+} ENGINE_INFO_TYPE;
+
+#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS {
+ NvU32 size;
+} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS 0x40
+
+typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO {
+ NvU32 engDesc;
+ NvU32 ctxAttr;
+ NvU32 ctxBufferSize;
+ NvU32 addrSpaceList;
+ NvU32 registerBase;
+} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO;
+
+#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
+ NvU32 numConstructedFalcons;
+ NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS];
+} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
+
+#define NV_MAX_SUBDEVICES 8
+
+typedef struct NV_MEMORY_DESC_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 addressSpace;
+ NvU32 cacheAttrib;
+} NV_MEMORY_DESC_PARAMS;
+
+#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U
+
+#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
+
+typedef struct NV_CHANNEL_ALLOC_PARAMS {
+
+ NvHandle hObjectError; // error context DMA
+ NvHandle hObjectBuffer; // no longer used
+ NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO
+ NvU32 gpFifoEntries; // number of GP FIFO entries
+
+ NvU32 flags;
+
+
+ NvHandle hContextShare; // context share handle
+ NvHandle hVASpace; // VASpace for the channel
+
+ // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
+ NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
+
+ // offset to beginning of UserD within hUserdMemory[x]
+ NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
+
+ // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
+ NvU32 engineType;
+ // Channel identifier that is unique for the duration of a RM session
+ NvU32 cid;
+ // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
+ NvU32 subDeviceId;
+ NvHandle hObjectEccError; // ECC error context DMA
+
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
+
+ NvHandle hPhysChannelGroup; // reserved
+ NvU32 internalFlags; // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
+ NvU32 ProcessID; // reserved
+ NvU32 SubProcessID; // reserved
+
+ // IV used for CPU-side encryption / GPU-side decryption.
+ NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // IV used for CPU-side decryption / GPU-side encryption.
+ NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // Nonce used CPU-side signing / GPU-side signature verification.
+ NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved
+} NV_CHANNEL_ALLOC_PARAMS;
+
+typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
+
+#define NVOS04_FLAGS_CHANNEL_TYPE 1:0
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000
+#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE
+#define NVOS04_FLAGS_VPR 2:2
+#define NVOS04_FLAGS_VPR_FALSE 0x00000000
+#define NVOS04_FLAGS_VPR_TRUE 0x00000001
+#define NVOS04_FLAGS_CC_SECURE 2:2
+#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000
+#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002
+#define NVOS04_FLAGS_MAP_CHANNEL 30:30
+#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000
+#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001
+
+typedef enum {
+ /*!
+ * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
+ * kernel CPU-RM clients.
+ */
+ ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
+ /*! @brief Error notifier is explicitly not set.
+ *
+ * The corresponding hErrorContext or hEccErrorContext must be
+ * NV01_NULL_OBJECT.
+ */
+ ERROR_NOTIFIER_TYPE_NONE,
+ /*! @brief Error notifier is a ContextDma */
+ ERROR_NOTIFIER_TYPE_CTXDMA,
+ /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
+ ERROR_NOTIFIER_TYPE_MEMORY
+} ErrorNotifierType;
+
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+
+#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */
+typedef struct NVA06F_CTRL_BIND_PARAMS {
+ NvU32 engineType;
+} NVA06F_CTRL_BIND_PARAMS;
+
+#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */
+typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS {
+ NvBool bEnable;
+ NvBool bSkipSubmit;
+} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS;
+
+#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES 16U
+
+typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY {
+ NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 physAttr;
+ NvU16 bufferId;
+ NvU8 bInitialize;
+ NvU8 bNonmapped;
+} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY;
+
+#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS {
+ NvU32 engineType;
+ NvHandle hClient;
+ NvU32 ChID;
+ NvHandle hChanClient;
+ NvHandle hObject;
+ NvHandle hVirtMemory;
+ NV_DECLARE_ALIGNED(NvU64 virtAddress, 8);
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+ NvU32 entryCount;
+ // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES];
+ NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8);
+} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS;
+
+typedef struct rpc_rc_triggered_v17_02
+{
+ NvU32 nv2080EngineType;
+ NvU32 chid;
+ NvU32 exceptType;
+ NvU32 scope;
+ NvU16 partitionAttributionId;
+} rpc_rc_triggered_v17_02;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h
index 6acb3f73242d..82c5ec727bb4 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gr.h
@@ -1,30 +1,31 @@
-#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080fifo_h__
-#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080fifo_h__
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GR_H__
+#define __NVRM_GR_H__
+#include <nvrm/nvtypes.h>
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8
+
+#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19
+
+typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
+ NvU32 size;
+ NvU32 alignment;
+} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
+ NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
+} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
+ NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
+} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0
#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000)
@@ -54,4 +55,19 @@
#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018)
#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x00000019)
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN 0U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM 1U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH 2U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB 3U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL 4U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB 5U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL 6U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL 7U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK 8U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT 9U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP 10U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP 12U
+
+#include "fifo.h"
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h
new file mode 100644
index 000000000000..b6683a5bf870
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/gsp.h
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GSP_H__
+#define __NVRM_GSP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U
+
+typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NV_DECLARE_ALIGNED(NvU64 reserved, 8);
+ NvU32 performance;
+ NvBool supportCompressed;
+ NvBool supportISO;
+ NvBool bProtected;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
+ NvU32 numFBRegions;
+ NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+
+#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23
+
+#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL)
+
+typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
+ NvU32 index;
+ NvU32 flags;
+ NvU32 length;
+ NvU8 data[NV2080_GPU_MAX_GID_LENGTH];
+} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
+ NvU32 gpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 tpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 zcullMask;
+} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
+ NvU32 BoardID;
+ char chipSKU[4];
+ char chipSKUMod[2];
+ char project[5];
+ char projectSKU[5];
+ char CDP[6];
+ char projectSKUMod[2];
+ NvU32 businessCycle;
+} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
+
+typedef enum
+{
+ COMPUTE_BRANDING_TYPE_NONE,
+ COMPUTE_BRANDING_TYPE_TESLA,
+} COMPUTE_BRANDING_TYPE;
+
+#define MAX_GPC_COUNT 32
+
+typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
+ NvU32 totalVFs;
+ NvU32 firstVfOffset;
+ NvU32 vfFeatureMask;
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+ NvBool bSriovEnabled;
+ NvBool bSriovHeavyEnabled;
+ NvBool bEmulateVFBar0TlbInvalidationRegister;
+ NvBool bClientRmAllocatedCtxBuffer;
+} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
+
+#include "engine.h"
+
+#define NVGPU_ENGINE_CAPS_MASK_BITS 32
+
+#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
+
+typedef struct GspSMInfo_t
+{
+ NvU32 version;
+ NvU32 regBankCount;
+ NvU32 regBankRegCount;
+ NvU32 maxWarpsPerSM;
+ NvU32 maxThreadsPerWarp;
+ NvU32 geomGsObufEntries;
+ NvU32 geomXbufEntries;
+ NvU32 maxSPPerSM;
+ NvU32 rtCoreCount;
+} GspSMInfo;
+
+typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS {
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7,
+ NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8,
+} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS;
+
+#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U)
+
+typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
+{
+ NvU32 numHeads;
+ NvU32 maxNumHeads;
+} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
+
+typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
+{
+ NvU32 headIndex;
+ NvU32 maxHResolution;
+ NvU32 maxVResolution;
+} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
+
+typedef struct GspStaticConfigInfo_t
+{
+ NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
+ NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
+ NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS gpcInfo;
+ NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS tpcInfo[MAX_GPC_COUNT];
+ NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS zcullInfo[MAX_GPC_COUNT];
+ NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
+ COMPUTE_BRANDING_TYPE computeBranding;
+
+ NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
+ NvU32 sriovMaxGfid;
+
+ NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
+
+ GspSMInfo SM_info;
+
+ NvBool poisonFuseEnabled;
+
+ NvU64 fb_length;
+ NvU32 fbio_mask;
+ NvU32 fb_bus_width;
+ NvU32 fb_ram_type;
+ NvU32 fbp_mask;
+ NvU32 l2_cache_size;
+
+ NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
+ NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
+
+ NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvBool bGpuInternalSku;
+ NvBool bIsQuadroGeneric;
+ NvBool bIsQuadroAd;
+ NvBool bIsNvidiaNvs;
+ NvBool bIsVgx;
+ NvBool bGeforceSmb;
+ NvBool bIsTitan;
+ NvBool bIsTesla;
+ NvBool bIsMobile;
+ NvBool bIsGc6Rtd3Allowed;
+ NvBool bIsGcOffRtd3Allowed;
+ NvBool bIsGcoffLegacyAllowed;
+
+ NvU64 bar1PdeBase;
+ NvU64 bar2PdeBase;
+
+ NvBool bVbiosValid;
+ NvU32 vbiosSubVendor;
+ NvU32 vbiosSubDevice;
+
+ NvBool bPageRetirementSupported;
+
+ NvBool bSplitVasBetweenServerClientRm;
+
+ NvBool bClRootportNeedsNosnoopWAR;
+
+ VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
+ VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
+ NvU64 displaylessMaxPixels;
+
+ // Client handle for internal RMAPI control.
+ NvHandle hInternalClient;
+
+ // Device handle for internal RMAPI control.
+ NvHandle hInternalDevice;
+
+ // Subdevice handle for internal RMAPI control.
+ NvHandle hInternalSubdevice;
+
+ NvBool bSelfHostedMode;
+ NvBool bAtsSupported;
+
+ NvBool bIsGpuUefi;
+} GspStaticConfigInfo;
+
+typedef struct rpc_unloading_guest_driver_v1F_07
+{
+ NvBool bInPMTransition;
+ NvBool bGc6Entering;
+ NvU32 newLevel;
+} rpc_unloading_guest_driver_v1F_07;
+
+typedef struct PACKED_REGISTRY_ENTRY
+{
+ NvU32 nameOffset;
+ NvU8 type;
+ NvU32 data;
+ NvU32 length;
+} PACKED_REGISTRY_ENTRY;
+
+typedef struct PACKED_REGISTRY_TABLE
+{
+ NvU32 size;
+ NvU32 numEntries;
+ PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries);
+} PACKED_REGISTRY_TABLE;
+
+typedef struct
+{
+ NvU16 deviceID; // deviceID
+ NvU16 vendorID; // vendorID
+ NvU16 subdeviceID; // subsystem deviceID
+ NvU16 subvendorID; // subsystem vendorID
+ NvU8 revisionID; // revision ID
+} BUSINFO;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct DOD_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 acpiIdListLen;
+ NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} DOD_METHOD_DATA;
+
+typedef struct JT_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 jtCaps;
+ NvU16 jtRevId;
+ NvBool bSBIOSCaps;
+} JT_METHOD_DATA;
+
+typedef struct MUX_METHOD_DATA_ELEMENT
+{
+ NvU32 acpiId;
+ NvU32 mode;
+ NV_STATUS status;
+} MUX_METHOD_DATA_ELEMENT;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct MUX_METHOD_DATA
+{
+ NvU32 tableLen;
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} MUX_METHOD_DATA;
+
+typedef struct CAPS_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 optimusCaps;
+} CAPS_METHOD_DATA;
+
+typedef struct ACPI_METHOD_DATA
+{
+ NvBool bValid;
+ DOD_METHOD_DATA dodMethodData;
+ JT_METHOD_DATA jtMethodData;
+ MUX_METHOD_DATA muxMethodData;
+ CAPS_METHOD_DATA capsMethodData;
+} ACPI_METHOD_DATA;
+
+typedef struct GSP_VF_INFO
+{
+ NvU32 totalVFs;
+ NvU32 firstVFOffset;
+ NvU64 FirstVFBar0Address;
+ NvU64 FirstVFBar1Address;
+ NvU64 FirstVFBar2Address;
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+} GSP_VF_INFO;
+
+typedef struct GspSystemInfo
+{
+ NvU64 gpuPhysAddr;
+ NvU64 gpuPhysFbAddr;
+ NvU64 gpuPhysInstAddr;
+ NvU64 nvDomainBusDeviceFunc;
+ NvU64 simAccessBufPhysAddr;
+ NvU64 pcieAtomicsOpMask;
+ NvU64 consoleMemSize;
+ NvU64 maxUserVa;
+ NvU32 pciConfigMirrorBase;
+ NvU32 pciConfigMirrorSize;
+ NvU8 oorArch;
+ NvU64 clPdbProperties;
+ NvU32 Chipset;
+ NvBool bGpuBehindBridge;
+ NvBool bMnocAvailable;
+ NvBool bUpstreamL0sUnsupported;
+ NvBool bUpstreamL1Unsupported;
+ NvBool bUpstreamL1PorSupported;
+ NvBool bUpstreamL1PorMobileOnly;
+ NvU8 upstreamAddressValid;
+ BUSINFO FHBBusInfo;
+ BUSINFO chipsetIDInfo;
+ ACPI_METHOD_DATA acpiMethodData;
+ NvU32 hypervisorType;
+ NvBool bIsPassthru;
+ NvU64 sysTimerOffsetNs;
+ GSP_VF_INFO gspVFInfo;
+} GspSystemInfo;
+
+typedef struct rpc_os_error_log_v17_00
+{
+ NvU32 exceptType;
+ NvU32 runlistId;
+ NvU32 chid;
+ char errString[0x100];
+} rpc_os_error_log_v17_00;
+
+typedef struct rpc_run_cpu_sequencer_v17_00
+{
+ NvU32 bufferSizeDWord;
+ NvU32 cmdIndex;
+ NvU32 regSaveArea[8];
+ NvU32 commandBuffer[];
+} rpc_run_cpu_sequencer_v17_00;
+
+typedef enum GSP_SEQ_BUF_OPCODE
+{
+ GSP_SEQ_BUF_OPCODE_REG_WRITE = 0,
+ GSP_SEQ_BUF_OPCODE_REG_MODIFY,
+ GSP_SEQ_BUF_OPCODE_REG_POLL,
+ GSP_SEQ_BUF_OPCODE_DELAY_US,
+ GSP_SEQ_BUF_OPCODE_REG_STORE,
+ GSP_SEQ_BUF_OPCODE_CORE_RESET,
+ GSP_SEQ_BUF_OPCODE_CORE_START,
+ GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
+ GSP_SEQ_BUF_OPCODE_CORE_RESUME,
+} GSP_SEQ_BUF_OPCODE;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_REG_WRITE;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 mask;
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 mask;
+ NvU32 val;
+ NvU32 timeout;
+ NvU32 error;
+} GSP_SEQ_BUF_PAYLOAD_REG_POLL;
+
+typedef struct
+{
+ NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_DELAY_US;
+
+typedef struct
+{
+ NvU32 addr;
+ NvU32 index;
+} GSP_SEQ_BUF_PAYLOAD_REG_STORE;
+
+typedef struct GSP_SEQUENCER_BUFFER_CMD
+{
+ GSP_SEQ_BUF_OPCODE opCode;
+ union
+ {
+ GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite;
+ GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify;
+ GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll;
+ GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs;
+ GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore;
+ } payload;
+} GSP_SEQUENCER_BUFFER_CMD;
+
+typedef struct
+{
+ // Magic
+ // BL to use for verification (i.e. Booter locked it in WPR2)
+ NvU64 magic; // = 0xdc3aae21371a60b3;
+
+ // Revision number of Booter-BL-Sequencer handoff interface
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ NvU64 revision; // = 1;
+
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+
+ NvU64 sysmemAddrOfRadix3Elf;
+ NvU64 sizeOfRadix3Elf;
+
+ NvU64 sysmemAddrOfBootloader;
+ NvU64 sizeOfBootloader;
+
+ // Offsets inside bootloader image needed by Booter
+ NvU64 bootloaderCodeOffset;
+ NvU64 bootloaderDataOffset;
+ NvU64 bootloaderManifestOffset;
+
+ union
+ {
+ // Used only at initial boot
+ struct
+ {
+ NvU64 sysmemAddrOfSignature;
+ NvU64 sizeOfSignature;
+ };
+
+ //
+ // Used at suspend/resume to read GspFwHeapFreeList
+ // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
+ //
+ struct
+ {
+ NvU32 gspFwHeapFreeListWprOffset;
+ NvU32 unused0;
+ NvU64 unused1;
+ };
+ };
+
+ // ---- Members describing FB layout --------------------------------
+ NvU64 gspFwRsvdStart;
+
+ NvU64 nonWprHeapOffset;
+ NvU64 nonWprHeapSize;
+
+ NvU64 gspFwWprStart;
+
+ // GSP-RM to use to setup heap.
+ NvU64 gspFwHeapOffset;
+ NvU64 gspFwHeapSize;
+
+ // BL to use to find ELF for jump
+ NvU64 gspFwOffset;
+ // Size is sizeOfRadix3Elf above.
+
+ NvU64 bootBinOffset;
+ // Size is sizeOfBootloader above.
+
+ NvU64 frtsOffset;
+ NvU64 frtsSize;
+
+ NvU64 gspFwWprEnd;
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 fbSize;
+
+ // ---- Other members -----------------------------------------------
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 vgaWorkspaceOffset;
+ NvU64 vgaWorkspaceSize;
+
+ // Boot count. Used to determine whether to load the firmware image.
+ NvU64 bootCount;
+
+ // This union is organized the way it is to start at an 8-byte boundary and achieve natural
+ // packing of the internal struct fields.
+ union
+ {
+ struct
+ {
+ // TODO: the partitionRpc* fields below do not really belong in this
+ // structure. The values are patched in by the partition bootstrapper
+ // when GSP-RM is booted in a partition, and this structure was a
+ // convenient place for the bootstrapper to access them. These should
+ // be moved to a different comm. mechanism between the bootstrapper
+ // and the GSP-RM tasks.
+
+ // Shared partition RPC memory (physical address)
+ NvU64 partitionRpcAddr;
+
+ // Offsets relative to partitionRpcAddr
+ NvU16 partitionRpcRequestOffset;
+ NvU16 partitionRpcReplyOffset;
+
+ // Code section and dataSection offset and size.
+ NvU32 elfCodeOffset;
+ NvU32 elfDataOffset;
+ NvU32 elfCodeSize;
+ NvU32 elfDataSize;
+
+ // Used during GSP-RM resume to check for revocation
+ NvU32 lsUcodeVersion;
+ };
+
+ struct
+ {
+ // Pad for the partitionRpc* fields, plus 4 bytes
+ NvU32 partitionRpcPadding[4];
+
+ // CrashCat (contiguous) buffer size/location - occupies same bytes as the
+ // elf(Code|Data)(Offset|Size) fields above.
+ // TODO: move to GSP_FMC_INIT_PARAMS
+ NvU64 sysmemAddrOfCrashReportQueue;
+ NvU32 sizeOfCrashReportQueue;
+
+ // Pad for the lsUcodeVersion field
+ NvU32 lsUcodeVersionPadding[1];
+ };
+ };
+
+ // Number of VF partitions allocating sub-heaps from the WPR heap
+ // Used during boot to ensure the heap is adequately sized
+ NvU8 gspFwHeapVfPartitionCount;
+
+ // Pad structure to exactly 256 bytes. Can replace padding with additional
+ // fields without incrementing revision. Padding initialized to 0.
+ NvU8 padding[7];
+
+ // BL to use for verification (i.e. Booter says OK to boot)
+ NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
+} GspFwWprMeta;
+
+#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
+
+#define GSP_FW_WPR_META_REVISION 1
+
+typedef struct
+{
+ NvU32 version; // queue version
+ NvU32 size; // bytes, page aligned
+ NvU32 msgSize; // entry size, bytes, must be power-of-2, 16 is minimum
+ NvU32 msgCount; // number of entries in queue
+ NvU32 writePtr; // message id of next slot
+ NvU32 flags; // if set it means "i want to swap RX"
+ NvU32 rxHdrOff; // Offset of msgqRxHeader from start of backing store.
+ NvU32 entryOff; // Offset of entries from start of backing store.
+} msgqTxHeader;
+
+typedef struct
+{
+ NvU32 readPtr; // message id of last message read
+} msgqRxHeader;
+
+typedef struct {
+ RmPhysAddr sharedMemPhysAddr;
+ NvU32 pageTableEntryCount;
+ NvLength cmdQueueOffset;
+ NvLength statQueueOffset;
+ NvLength locklessCmdQueueOffset;
+ NvLength locklessStatQueueOffset;
+} MESSAGE_QUEUE_INIT_ARGUMENTS;
+
+typedef struct {
+ NvU32 oldLevel;
+ NvU32 flags;
+ NvBool bInPMTransition;
+} GSP_SR_INIT_ARGUMENTS;
+
+typedef struct
+{
+ MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
+ GSP_SR_INIT_ARGUMENTS srInitArguments;
+ NvU32 gpuInstance;
+
+ struct
+ {
+ NvU64 pa;
+ NvU64 size;
+ } profilerArgs;
+} GSP_ARGUMENTS_CACHED;
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0 (0x00000000U)
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U)
+
+typedef NvU64 LibosAddress;
+
+typedef struct
+{
+ LibosAddress id8; // Id tag.
+ LibosAddress pa; // Physical address.
+ LibosAddress size; // Size of memory area.
+ NvU8 kind; // See LibosMemoryRegionKind above.
+ NvU8 loc; // See LibosMemoryRegionLoc above.
+} LibosMemoryRegionInitArgument;
+
+typedef enum {
+ LIBOS_MEMORY_REGION_NONE,
+ LIBOS_MEMORY_REGION_CONTIGUOUS,
+ LIBOS_MEMORY_REGION_RADIX3
+} LibosMemoryRegionKind;
+
+typedef enum {
+ LIBOS_MEMORY_REGION_LOC_NONE,
+ LIBOS_MEMORY_REGION_LOC_SYSMEM,
+ LIBOS_MEMORY_REGION_LOC_FB
+} LibosMemoryRegionLoc;
+
+typedef struct
+{
+ //
+ // Magic
+ // Use for verification by Booter
+ //
+ NvU64 magic; // = GSP_FW_SR_META_MAGIC;
+
+ //
+ // Revision number
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ //
+ NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
+
+ //
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+ //
+ NvU64 sysmemAddrOfSuspendResumeData;
+ NvU64 sizeOfSuspendResumeData;
+
+ // ---- Members for crypto ops across S/R ---------------------------
+
+ //
+ // HMAC over the entire GspFwSRMeta structure (including padding)
+ // with the hmac field itself zeroed.
+ //
+ NvU8 hmac[32];
+
+ // Hash over GspFwWprMeta structure
+ NvU8 wprMetaHash[32];
+
+ // Hash over GspFwHeapFreeList structure. All zeros signifies no free list.
+ NvU8 heapFreeListHash[32];
+
+ // Hash over data in WPR2 (skipping over free heap chunks; see Booter for details)
+ NvU8 dataHash[32];
+
+ //
+ // Pad structure to exactly 256 bytes (1 DMA chunk).
+ // Padding initialized to zero.
+ //
+ NvU32 padding[24];
+
+} GspFwSRMeta;
+
+#define GSP_FW_SR_META_MAGIC 0x8a3bb9e6c6c39d93ULL
+
+#define GSP_FW_SR_META_REVISION 2
+
+#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode) \
+ ((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_POLL) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_DELAY_US) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US) / sizeof(NvU32)) : \
+ (opcode == GSP_SEQ_BUF_OPCODE_REG_STORE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE) / sizeof(NvU32)) : \
+ /* GSP_SEQ_BUF_OPCODE_CORE_RESET */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_START */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */ \
+ /* GSP_SEQ_BUF_OPCODE_CORE_RESUME */ \
+ 0)
+
+typedef struct {
+ //
+ // Version 1
+ // Version 2
+ // Version 3 = for Partition boot
+ // Version 4 = for eb riscv boot
+ // Version 5 = Support signing entire RISC-V image as "code" in code section for hopper and later.
+ //
+ NvU32 version; // structure version
+ NvU32 bootloaderOffset;
+ NvU32 bootloaderSize;
+ NvU32 bootloaderParamOffset;
+ NvU32 bootloaderParamSize;
+ NvU32 riscvElfOffset;
+ NvU32 riscvElfSize;
+ NvU32 appVersion; // Changelist number associated with the image
+ //
+ // Manifest contains information about Monitor and it is
+ // input to BR
+ //
+ NvU32 manifestOffset;
+ NvU32 manifestSize;
+ //
+ // Monitor Data offset within RISCV image and size
+ //
+ NvU32 monitorDataOffset;
+ NvU32 monitorDataSize;
+ //
+ // Monitor Code offset withtin RISCV image and size
+ //
+ NvU32 monitorCodeOffset;
+ NvU32 monitorCodeSize;
+ NvU32 bIsMonitorEnabled;
+ //
+ // Swbrom Code offset within RISCV image and size
+ //
+ NvU32 swbromCodeOffset;
+ NvU32 swbromCodeSize;
+ //
+ // Swbrom Data offset within RISCV image and size
+ //
+ NvU32 swbromDataOffset;
+ NvU32 swbromDataSize;
+ //
+ // Total size of FB carveout (image and reserved space).
+ //
+ NvU32 fbReservedSize;
+ //
+ // Indicates whether the entire RISC-V image is signed as "code" in code section.
+ //
+ NvU32 bSignedAsCode;
+} RM_RISCV_UCODE_DESC;
+
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY {
+ NvU16 engineIdx;
+ NvU32 pmcIntrMask;
+ NvU32 vectorStall;
+ NvU32 vectorNonStall;
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY;
+
+typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP {
+ NvU8 subtreeStart;
+ NvU8 subtreeEnd;
+} NV2080_INTR_CATEGORY_SUBTREE_MAP;
+
+#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128
+
+typedef enum NV2080_INTR_CATEGORY {
+ NV2080_INTR_CATEGORY_DEFAULT = 0,
+ NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1,
+ NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2,
+ NV2080_INTR_CATEGORY_RUNLIST = 3,
+ NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4,
+ NV2080_INTR_CATEGORY_UVM_OWNED = 5,
+ NV2080_INTR_CATEGORY_UVM_SHARED = 6,
+ NV2080_INTR_CATEGORY_ENUM_COUNT = 7,
+} NV2080_INTR_CATEGORY;
+
+#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS {
+ NvU32 tableLen;
+ NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE];
+ NV2080_INTR_CATEGORY_SUBTREE_MAP subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT];
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS;
+
+#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB (96 << 10) // All architectures
+
+#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE ((48 << 10) * 2048) // Support 2048 channels
+
+typedef union rpc_message_rpc_union_field_v03_00
+{
+ NvU32 spare;
+ NvU32 cpuRmGfid;
+} rpc_message_rpc_union_field_v03_00;
+
+typedef rpc_message_rpc_union_field_v03_00 rpc_message_rpc_union_field_v;
+
+typedef struct rpc_message_header_v03_00
+{
+ NvU32 header_version;
+ NvU32 signature;
+ NvU32 length;
+ NvU32 function;
+ NvU32 rpc_result;
+ NvU32 rpc_result_private;
+ NvU32 sequence;
+ rpc_message_rpc_union_field_v u;
+ rpc_generic_union rpc_message_data[];
+} rpc_message_header_v03_00;
+
+typedef rpc_message_header_v03_00 rpc_message_header_v;
+
+typedef struct GSP_MSG_QUEUE_ELEMENT
+{
+ NvU8 authTagBuffer[16]; // Authentication tag buffer.
+ NvU8 aadBuffer[16]; // AAD buffer.
+ NvU32 checkSum; // Set to value needed to make checksum always zero.
+ NvU32 seqNum; // Sequence number maintained by the message queue.
+ NvU32 elemCount; // Number of message queue elements this message has.
+ NV_DECLARE_ALIGNED(rpc_message_header_v rpc, 8);
+} GSP_MSG_QUEUE_ELEMENT;
+
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2 (0 << 20) // No FB heap usage
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3 (20 << 20)
+
+#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X (8 << 20) // Turing thru Ada
+
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB (64u)
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB (84u)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h
new file mode 100644
index 000000000000..642c13aec325
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/msgfn.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_MSGFN_H__
+#define __NVRM_MSGFN_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#ifndef E
+# define E(RPC) NV_VGPU_MSG_EVENT_##RPC,
+# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ E(FIRST_EVENT = 0x1000) // 0x1000
+ E(GSP_INIT_DONE) // 0x1001
+ E(GSP_RUN_CPU_SEQUENCER) // 0x1002
+ E(POST_EVENT) // 0x1003
+ E(RC_TRIGGERED) // 0x1004
+ E(MMU_FAULT_QUEUED) // 0x1005
+ E(OS_ERROR_LOG) // 0x1006
+ E(RG_LINE_INTR) // 0x1007
+ E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008
+ E(SIM_READ) // 0x1009
+ E(SIM_WRITE) // 0x100a
+ E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b
+ E(UCODE_LIBOS_PRINT) // 0x100c
+ E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d
+ E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e
+ E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f
+ E(VGPU_CONFIG) // 0x1010
+ E(DISPLAY_MODESET) // 0x1011
+ E(EXTDEV_INTR_SERVICE) // 0x1012
+ E(NVLINK_INBAND_RECEIVED_DATA_256) // 0x1013
+ E(NVLINK_INBAND_RECEIVED_DATA_512) // 0x1014
+ E(NVLINK_INBAND_RECEIVED_DATA_1024) // 0x1015
+ E(NVLINK_INBAND_RECEIVED_DATA_2048) // 0x1016
+ E(NVLINK_INBAND_RECEIVED_DATA_4096) // 0x1017
+ E(TIMED_SEMAPHORE_RELEASE) // 0x1018
+ E(NVLINK_IS_GPU_DEGRADED) // 0x1019
+ E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK) // 0x101a
+ E(GSP_SEND_USER_SHARED_DATA) // 0x101b
+ E(NVLINK_FAULT_UP) // 0x101c
+ E(GSP_LOCKDOWN_NOTICE) // 0x101d
+ E(MIG_CI_CONFIG_UPDATE) // 0x101e
+ E(NUM_EVENTS) // END
+#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef E
+# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+#endif
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h
new file mode 100644
index 000000000000..3a04e702677f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvdec.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_NVDEC_H__
+#define __NVRM_NVDEC_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances;
+ NvU32 engineInstance; // Select NVDEC0 or NVDEC1 or NVDEC2
+} NV_BSP_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h
new file mode 100644
index 000000000000..203c1d5304d9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvenc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_NVENC_H__
+#define __NVRM_NVENC_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of MSENC?
+ NvU32 engineInstance; // Select MSENC/NVENC0 or NVENC1 or NVENC2
+} NV_MSENC_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h
new file mode 100644
index 000000000000..71fc53889ec7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/nvjpg.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_NVJPG_H__
+#define __NVRM_NVJPG_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of NVJPG?
+ NvU32 engineInstance;
+} NV_NVJPG_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h
new file mode 100644
index 000000000000..49d81c7673d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/ofa.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_OFA_H__
+#define __NVRM_OFA_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA?
+} NV_OFA_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h
index 73c57f235f6a..2a037acc6b1e 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/rpcfn.h
@@ -1,5 +1,10 @@
-#ifndef __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__
-#define __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_RPCFN_H__
+#define __NVRM_RPCFN_H__
+#include <nvrm/nvtypes.h>
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
@@ -209,54 +214,12 @@ enum {
X(RM, CTRL_SET_HS_CREDITS) // 198
X(RM, CTRL_PM_AREA_PC_SAMPLER) // 199
X(RM, INVALIDATE_TLB) // 200
+ X(RM, RESERVED_201) // 201
+ X(RM, ECC_NOTIFIER_WRITE_ACK) // 202
X(RM, NUM_FUNCTIONS) //END
#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
};
# undef X
# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
#endif
-
-#ifndef E
-# define E(RPC) NV_VGPU_MSG_EVENT_##RPC,
-# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
-enum {
-#endif
- E(FIRST_EVENT = 0x1000) // 0x1000
- E(GSP_INIT_DONE) // 0x1001
- E(GSP_RUN_CPU_SEQUENCER) // 0x1002
- E(POST_EVENT) // 0x1003
- E(RC_TRIGGERED) // 0x1004
- E(MMU_FAULT_QUEUED) // 0x1005
- E(OS_ERROR_LOG) // 0x1006
- E(RG_LINE_INTR) // 0x1007
- E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008
- E(SIM_READ) // 0x1009
- E(SIM_WRITE) // 0x100a
- E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b
- E(UCODE_LIBOS_PRINT) // 0x100c
- E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d
- E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e
- E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f
- E(VGPU_CONFIG) // 0x1010
- E(DISPLAY_MODESET) // 0x1011
- E(EXTDEV_INTR_SERVICE) // 0x1012
- E(NVLINK_INBAND_RECEIVED_DATA_256) // 0x1013
- E(NVLINK_INBAND_RECEIVED_DATA_512) // 0x1014
- E(NVLINK_INBAND_RECEIVED_DATA_1024) // 0x1015
- E(NVLINK_INBAND_RECEIVED_DATA_2048) // 0x1016
- E(NVLINK_INBAND_RECEIVED_DATA_4096) // 0x1017
- E(TIMED_SEMAPHORE_RELEASE) // 0x1018
- E(NVLINK_IS_GPU_DEGRADED) // 0x1019
- E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK) // 0x101a
- E(GSP_SEND_USER_SHARED_DATA) // 0x101b
- E(NVLINK_FAULT_UP) // 0x101c
- E(GSP_LOCKDOWN_NOTICE) // 0x101d
- E(MIG_CI_CONFIG_UPDATE) // 0x101e
- E(NUM_EVENTS) // END
-#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
-};
-# undef E
-# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
-#endif
-
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h
new file mode 100644
index 000000000000..f6ec04efd119
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/nvrm/vmm.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_VMM_H__
+#define __NVRM_VMM_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#define FERMI_VASPACE_A (0x000090f1)
+
+typedef struct
+{
+ NvU32 index;
+ NvV32 flags;
+ NvU64 vaSize NV_ALIGN_BYTES(8);
+ NvU64 vaStartInternal NV_ALIGN_BYTES(8);
+ NvU64 vaLimitInternal NV_ALIGN_BYTES(8);
+ NvU32 bigPageSize;
+ NvU64 vaBase NV_ALIGN_BYTES(8);
+} NV_VASPACE_ALLOCATION_PARAMETERS;
+
+#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 //<! Create new VASpace, by default
+
+#define NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED BIT(3)
+
+#define SPLIT_VAS_SERVER_RM_MANAGED_VA_START 0x100000000ULL // 4GB
+#define SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE 0x20000000ULL // 512MB
+
+#define GMMU_FMT_MAX_LEVELS 6U
+
+#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */
+typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS {
+ /*!
+ * [in] GPU sub-device handle - this API only supports unicast.
+ * Pass 0 to use subDeviceId instead.
+ */
+ NvHandle hSubDevice;
+
+ /*!
+ * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero.
+ */
+ NvU32 subDeviceId;
+
+ /*!
+ * [in] Page size (VA coverage) of the level to reserve.
+ * This need not be a leaf (page table) page size - it can be
+ * the coverage of an arbitrary level (including root page directory).
+ */
+ NV_DECLARE_ALIGNED(NvU64 pageSize, 8);
+
+ /*!
+ * [in] First GPU virtual address of the range to reserve.
+ * This must be aligned to pageSize.
+ */
+ NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8);
+
+ /*!
+ * [in] Last GPU virtual address of the range to reserve.
+ * This (+1) must be aligned to pageSize.
+ */
+ NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8);
+
+ /*!
+ * [in] Number of PDE levels to copy.
+ */
+ NvU32 numLevelsToCopy;
+
+ /*!
+ * [in] Per-level information.
+ */
+ struct {
+ /*!
+ * Physical address of this page level instance.
+ */
+ NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
+
+ /*!
+ * Size in bytes allocated for this level instance.
+ */
+ NV_DECLARE_ALIGNED(NvU64 size, 8);
+
+ /*!
+ * Aperture in which this page level instance resides.
+ */
+ NvU32 aperture;
+
+ /*!
+ * Page shift corresponding to the level
+ */
+ NvU8 pageShift;
+ } levels[GMMU_FMT_MAX_LEVELS];
+} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS;
+
+#define NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY (0x801813U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_
+ID << 8) | NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS {
+ NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
+ NvU32 numEntries;
+ NvU32 flags;
+ NvHandle hVASpace;
+ NvU32 chId;
+ NvU32 subDeviceId; // ID+1, 0 for BC
+ NvU32 pasid;
+} NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS;
+
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE 1:0
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_VIDMEM (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_COH (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_NONCOH (0x00000002U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES 2:2
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_TRUE (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS 3:3
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_TRUE (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY 4:4
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_TRUE (0x00000001U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE 5:5
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_FALSE (0x00000000U)
+#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_TRUE (0x00000001U)
+
+#define NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY (0x801814U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS {
+ NvHandle hVASpace;
+ NvU32 subDeviceId; // ID+1, 0 for BC
+} NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c
index d72b3aae9a2b..2156808cba4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ofa.c
@@ -19,26 +19,26 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "priv.h"
+#include <rm/engine.h>
-#include <subdev/gsp.h>
+#include "nvrm/ofa.h"
-#include <nvif/class.h>
+static int
+r535_ofa_alloc(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *ofa)
+{
+ NV_OFA_ALLOCATION_PARAMETERS *args;
-static const struct nvkm_engine_func
-ad102_nvdec = {
- .sclass = {
- { -1, -1, NVC9B0_VIDEO_DECODER },
- {}
- }
-};
+ args = nvkm_gsp_rm_alloc_get(chan, handle, class, sizeof(*args), ofa);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
-int
-ad102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
- struct nvkm_nvdec **pnvdec)
-{
- if (nvkm_gsp_rm(device->gsp))
- return r535_nvdec_new(&ad102_nvdec, device, type, inst, pnvdec);
+ args->size = sizeof(*args);
- return -ENODEV;
+ return nvkm_gsp_rm_alloc_wr(ofa, args);
}
+
+const struct nvkm_rm_api_engine
+r535_ofa = {
+ .alloc = r535_ofa_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c
new file mode 100644
index 000000000000..a4190676e1ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rm.c
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/gsp.h"
+
+static const struct nvkm_rm_wpr
+r535_wpr_libos2 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB,
+};
+
+static const struct nvkm_rm_wpr
+r535_wpr_libos3 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+};
+
+static const struct nvkm_rm_api
+r535_api = {
+ .gsp = &r535_gsp,
+ .rpc = &r535_rpc,
+ .ctrl = &r535_ctrl,
+ .alloc = &r535_alloc,
+ .client = &r535_client,
+ .device = &r535_device,
+ .fbsr = &r535_fbsr,
+ .disp = &r535_disp,
+ .fifo = &r535_fifo,
+ .ce = &r535_ce,
+ .gr = &r535_gr,
+ .nvdec = &r535_nvdec,
+ .nvenc = &r535_nvenc,
+ .nvjpg = &r535_nvjpg,
+ .ofa = &r535_ofa,
+};
+
+const struct nvkm_rm_impl
+r535_rm_tu102 = {
+ .wpr = &r535_wpr_libos2,
+ .api = &r535_api,
+};
+
+const struct nvkm_rm_impl
+r535_rm_ga102 = {
+ .wpr = &r535_wpr_libos3,
+ .api = &r535_api,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
new file mode 100644
index 000000000000..9d06ff722fea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
@@ -0,0 +1,698 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <rm/rpc.h>
+
+#include "nvrm/rpcfn.h"
+
+#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
+#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16)
+
+/**
+ * DOC: GSP message queue element
+ *
+ * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h
+ *
+ * The GSP command queue and status queue are message queues for the
+ * communication between software and GSP. The software submits the GSP
+ * RPC via the GSP command queue, GSP writes the status of the submitted
+ * RPC in the status queue.
+ *
+ * A GSP message queue element consists of three parts:
+ *
+ * - message element header (struct r535_gsp_msg), which mostly maintains
+ * the metadata for queuing the element.
+ *
+ * - RPC message header (struct nvfw_gsp_rpc), which maintains the info
+ * of the RPC. E.g., the RPC function number.
+ *
+ * - The payload, where the RPC message stays. E.g. the params of a
+ * specific RPC function. Some RPC functions also have their headers
+ * in the payload. E.g. rm_alloc, rm_control.
+ *
+ * The memory layout of a GSP message element can be illustrated below::
+ *
+ * +------------------------+
+ * | Message Element Header |
+ * | (r535_gsp_msg) |
+ * | |
+ * | (r535_gsp_msg.data) |
+ * | | |
+ * |----------V-------------|
+ * | GSP RPC Header |
+ * | (nvfw_gsp_rpc) |
+ * | |
+ * | (nvfw_gsp_rpc.data) |
+ * | | |
+ * |----------V-------------|
+ * | Payload |
+ * | |
+ * | header(optional) |
+ * | params |
+ * +------------------------+
+ *
+ * The max size of a message queue element is 16 pages (including the
+ * headers). When a GSP message to be sent is larger than 16 pages, the
+ * message should be split into multiple elements and sent accordingly.
+ *
+ * In the bunch of the split elements, the first element has the expected
+ * function number, while the rest of the elements are sent with the
+ * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD.
+ *
+ * GSP consumes the elements from the cmdq and always writes the result
+ * back to the msgq. The result is also formed as split elements.
+ *
+ * Terminology:
+ *
+ * - gsp_msg(msg): GSP message element (element header + GSP RPC header +
+ * payload)
+ * - gsp_rpc(rpc): GSP RPC (RPC header + payload)
+ * - gsp_rpc_buf: buffer for (GSP RPC header + payload)
+ * - gsp_rpc_len: size of (GSP RPC header + payload)
+ * - params_size: size of params in the payload
+ * - payload_size: size of (header if exists + params) in the payload
+ */
+
+struct r535_gsp_msg {
+ u8 auth_tag_buffer[16];
+ u8 aad_buffer[16];
+ u32 checksum;
+ u32 sequence;
+ u32 elem_count;
+ u32 pad;
+ u8 data[];
+};
+
+struct nvfw_gsp_rpc {
+ u32 header_version;
+ u32 signature;
+ u32 length;
+ u32 function;
+ u32 rpc_result;
+ u32 rpc_result_private;
+ u32 sequence;
+ union {
+ u32 spare;
+ u32 cpuRmGfid;
+ };
+ u8 data[];
+};
+
+#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
+
+#define to_gsp_hdr(p, header) \
+ container_of((void *)p, typeof(*header), data)
+
+#define to_payload_hdr(p, header) \
+ container_of((void *)p, typeof(*header), params)
+
+int
+r535_rpc_status_to_errno(uint32_t rpc_status)
+{
+ switch (rpc_status) {
+ case 0x55: /* NV_ERR_NOT_READY */
+ case 0x66: /* NV_ERR_TIMEOUT_RETRY */
+ return -EBUSY;
+ case 0x51: /* NV_ERR_NO_MEMORY */
+ return -ENOMEM;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime)
+{
+ u32 size, rptr = *gsp->msgq.rptr;
+ int used;
+
+ size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len,
+ GSP_PAGE_SIZE);
+ if (WARN_ON(!size || size >= gsp->msgq.cnt))
+ return -EINVAL;
+
+ do {
+ u32 wptr = *gsp->msgq.wptr;
+
+ used = wptr + gsp->msgq.cnt - rptr;
+ if (used >= gsp->msgq.cnt)
+ used -= gsp->msgq.cnt;
+ if (used >= size)
+ break;
+
+ usleep_range(1, 2);
+ } while (--(*ptime));
+
+ if (WARN_ON(!*ptime))
+ return -ETIMEDOUT;
+
+ return used;
+}
+
+static struct r535_gsp_msg *
+r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp)
+{
+ u32 rptr = *gsp->msgq.rptr;
+
+ /* Skip the first page, which is the message queue info */
+ return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE +
+ rptr * GSP_PAGE_SIZE);
+}
+
+/**
+ * DOC: Receive a GSP message queue element
+ *
+ * Receiving a GSP message queue element from the message queue consists of
+ * the following steps:
+ *
+ * - Peek the element from the queue: r535_gsp_msgq_peek().
+ * Peek the first page of the element to determine the total size of the
+ * message before allocating the proper memory.
+ *
+ * - Allocate memory for the message.
+ * Once the total size of the message is determined from the GSP message
+ * queue element, the caller of r535_gsp_msgq_recv() allocates the
+ * required memory.
+ *
+ * - Receive the message: r535_gsp_msgq_recv().
+ * Copy the message into the allocated memory. Advance the read pointer.
+ * If the message is a large GSP message, r535_gsp_msgq_recv() calls
+ * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts
+ * until the complete message is received.
+ * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into
+ * the return of the large GSP message.
+ *
+ * - Free the allocated memory: r535_gsp_msg_done().
+ * The user is responsible for freeing the memory allocated for the GSP
+ * message pages after they have been processed.
+ */
+static void *
+r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
+{
+ struct r535_gsp_msg *mqe;
+ int ret;
+
+ ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+
+ return mqe->data;
+}
+
+struct r535_gsp_msg_info {
+ int *retries;
+ u32 gsp_rpc_len;
+ void *gsp_rpc_buf;
+ bool continuation;
+};
+
+static void
+r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl);
+
+static void *
+r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp,
+ struct r535_gsp_msg_info *info)
+{
+ u8 *buf = info->gsp_rpc_buf;
+ u32 rptr = *gsp->msgq.rptr;
+ struct r535_gsp_msg *mqe;
+ u32 size, expected, len;
+ int ret;
+
+ expected = info->gsp_rpc_len;
+
+ ret = r535_gsp_msgq_wait(gsp, expected, info->retries);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+
+ if (info->continuation) {
+ struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data;
+
+ if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) {
+ nvkm_error(&gsp->subdev,
+ "Not a continuation of a large RPC\n");
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ return ERR_PTR(-EIO);
+ }
+ }
+
+ size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
+
+ len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
+ len = min_t(u32, expected, len);
+
+ if (info->continuation)
+ memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc),
+ len - sizeof(struct nvfw_gsp_rpc));
+ else
+ memcpy(buf, mqe->data, len);
+
+ expected -= len;
+
+ if (expected) {
+ mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
+ memcpy(buf + len, mqe, expected);
+ }
+
+ rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
+
+ mb();
+ (*gsp->msgq.rptr) = rptr;
+ return buf;
+}
+
+static void *
+r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
+{
+ struct r535_gsp_msg *mqe;
+ const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe);
+ struct nvfw_gsp_rpc *rpc;
+ struct r535_gsp_msg_info info = {0};
+ u32 expected = gsp_rpc_len;
+ void *buf;
+
+ mqe = r535_gsp_msgq_get_entry(gsp);
+ rpc = (struct nvfw_gsp_rpc *)mqe->data;
+
+ if (WARN_ON(rpc->length > max_rpc_size))
+ return NULL;
+
+ buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ info.gsp_rpc_buf = buf;
+ info.retries = retries;
+ info.gsp_rpc_len = rpc->length;
+
+ buf = r535_gsp_msgq_recv_one_elem(gsp, &info);
+ if (IS_ERR(buf)) {
+ kvfree(info.gsp_rpc_buf);
+ info.gsp_rpc_buf = NULL;
+ return buf;
+ }
+
+ if (expected <= max_rpc_size)
+ return buf;
+
+ info.gsp_rpc_buf += info.gsp_rpc_len;
+ expected -= info.gsp_rpc_len;
+
+ while (expected) {
+ u32 size;
+
+ rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
+ if (IS_ERR_OR_NULL(rpc)) {
+ kfree(buf);
+ return rpc;
+ }
+
+ info.gsp_rpc_len = rpc->length;
+ info.continuation = true;
+
+ rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
+ if (IS_ERR_OR_NULL(rpc)) {
+ kfree(buf);
+ return rpc;
+ }
+
+ size = info.gsp_rpc_len - sizeof(*rpc);
+ expected -= size;
+ info.gsp_rpc_buf += size;
+ }
+
+ rpc = buf;
+ rpc->length = gsp_rpc_len;
+ return buf;
+}
+
+static int
+r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc)
+{
+ struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
+ struct r535_gsp_msg *cqe;
+ u32 gsp_rpc_len = msg->checksum;
+ u64 *ptr = (void *)msg;
+ u64 *end;
+ u64 csum = 0;
+ int free, time = 1000000;
+ u32 wptr, size, step, len;
+ u32 off = 0;
+
+ len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE);
+
+ end = (u64 *)((char *)ptr + len);
+ msg->pad = 0;
+ msg->checksum = 0;
+ msg->sequence = gsp->cmdq.seq++;
+ msg->elem_count = DIV_ROUND_UP(len, 0x1000);
+
+ while (ptr < end)
+ csum ^= *ptr++;
+
+ msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
+
+ wptr = *gsp->cmdq.wptr;
+ do {
+ do {
+ free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1;
+ if (free >= gsp->cmdq.cnt)
+ free -= gsp->cmdq.cnt;
+ if (free >= 1)
+ break;
+
+ usleep_range(1, 2);
+ } while(--time);
+
+ if (WARN_ON(!time)) {
+ kvfree(msg);
+ return -ETIMEDOUT;
+ }
+
+ cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
+ step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
+ size = min_t(u32, len, step * GSP_PAGE_SIZE);
+
+ memcpy(cqe, (u8 *)msg + off, size);
+
+ wptr += DIV_ROUND_UP(size, 0x1000);
+ if (wptr == gsp->cmdq.cnt)
+ wptr = 0;
+
+ off += size;
+ len -= size;
+ } while (len);
+
+ nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
+ wmb();
+ (*gsp->cmdq.wptr) = wptr;
+ mb();
+
+ nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
+
+ kvfree(msg);
+ return 0;
+}
+
+static void *
+r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len)
+{
+ struct r535_gsp_msg *msg;
+ u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len;
+
+ size = ALIGN(size, GSP_MSG_MIN_SIZE);
+ msg = kvzalloc(size, GFP_KERNEL);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+
+ msg->checksum = gsp_rpc_len;
+ return msg->data;
+}
+
+static void
+r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
+{
+ kvfree(msg);
+}
+
+static void
+r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
+{
+ if (gsp->subdev.debug >= lvl) {
+ nvkm_printk__(&gsp->subdev, lvl, info,
+ "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n",
+ msg->function, msg->length, msg->length - sizeof(*msg),
+ msg->rpc_result, msg->rpc_result_private);
+ print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1,
+ msg->data, msg->length - sizeof(*msg), true);
+ }
+}
+
+struct nvfw_gsp_rpc *
+r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvfw_gsp_rpc *rpc;
+ int retries = 4000000, i;
+
+retry:
+ rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries);
+ if (IS_ERR_OR_NULL(rpc))
+ return rpc;
+
+ if (rpc->rpc_result) {
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ r535_gsp_msg_done(gsp, rpc);
+ return ERR_PTR(-EINVAL);
+ }
+
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE);
+
+ if (fn && rpc->function == fn) {
+ if (gsp_rpc_len) {
+ if (rpc->length < gsp_rpc_len) {
+ nvkm_error(subdev, "rpc len %d < %d\n",
+ rpc->length, gsp_rpc_len);
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
+ r535_gsp_msg_done(gsp, rpc);
+ return ERR_PTR(-EIO);
+ }
+
+ return rpc;
+ }
+
+ r535_gsp_msg_done(gsp, rpc);
+ return NULL;
+ }
+
+ for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
+ struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
+
+ if (ntfy->fn == rpc->function) {
+ if (ntfy->func)
+ ntfy->func(ntfy->priv, ntfy->fn, rpc->data,
+ rpc->length - sizeof(*rpc));
+ break;
+ }
+ }
+
+ if (i == gsp->msgq.ntfy_nr)
+ r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN);
+
+ r535_gsp_msg_done(gsp, rpc);
+ if (fn)
+ goto retry;
+
+ if (*gsp->msgq.rptr != *gsp->msgq.wptr)
+ goto retry;
+
+ return NULL;
+}
+
+int
+r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv)
+{
+ int ret = 0;
+
+ mutex_lock(&gsp->msgq.mutex);
+ if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) {
+ ret = -ENOSPC;
+ } else {
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn;
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func;
+ gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv;
+ gsp->msgq.ntfy_nr++;
+ }
+ mutex_unlock(&gsp->msgq.mutex);
+ return ret;
+}
+
+int
+r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
+{
+ void *repv;
+
+ mutex_lock(&gsp->cmdq.mutex);
+ repv = r535_gsp_msg_recv(gsp, fn, 0);
+ mutex_unlock(&gsp->cmdq.mutex);
+ if (IS_ERR(repv))
+ return PTR_ERR(repv);
+
+ return 0;
+}
+
+static void *
+r535_gsp_rpc_handle_reply(struct nvkm_gsp *gsp, u32 fn,
+ enum nvkm_gsp_rpc_reply_policy policy,
+ u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *reply;
+ void *repv = NULL;
+
+ switch (policy) {
+ case NVKM_GSP_RPC_REPLY_NOWAIT:
+ break;
+ case NVKM_GSP_RPC_REPLY_RECV:
+ reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
+ if (!IS_ERR_OR_NULL(reply))
+ repv = reply->data;
+ else
+ repv = reply;
+ break;
+ case NVKM_GSP_RPC_REPLY_POLL:
+ repv = r535_gsp_msg_recv(gsp, fn, 0);
+ break;
+ }
+
+ return repv;
+}
+
+static void *
+r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
+ u32 fn = rpc->function;
+ int ret;
+
+ if (gsp->subdev.debug >= NV_DBG_TRACE) {
+ nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function,
+ rpc->length, rpc->length - sizeof(*rpc));
+ print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1,
+ rpc->data, rpc->length - sizeof(*rpc), true);
+ }
+
+ ret = r535_gsp_cmdq_push(gsp, rpc);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return r535_gsp_rpc_handle_reply(gsp, fn, policy, gsp_rpc_len);
+}
+
+static void
+r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
+{
+ struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data);
+
+ r535_gsp_msg_done(gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size)
+{
+ struct nvfw_gsp_rpc *rpc;
+
+ rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size,
+ sizeof(u64)));
+ if (IS_ERR(rpc))
+ return ERR_CAST(rpc);
+
+ rpc->header_version = 0x03000000;
+ rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
+ rpc->function = fn;
+ rpc->rpc_result = 0xffffffff;
+ rpc->rpc_result_private = 0xffffffff;
+ rpc->length = sizeof(*rpc) + payload_size;
+ return rpc->data;
+}
+
+static void *
+r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len)
+{
+ struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
+ struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
+ const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg);
+ const u32 max_payload_size = max_rpc_size - sizeof(*rpc);
+ u32 payload_size = rpc->length - sizeof(*rpc);
+ void *repv;
+
+ mutex_lock(&gsp->cmdq.mutex);
+ if (payload_size > max_payload_size) {
+ const u32 fn = rpc->function;
+ u32 remain_payload_size = payload_size;
+ void *next;
+
+ /* Send initial RPC. */
+ next = r535_gsp_rpc_get(gsp, fn, max_payload_size);
+ if (IS_ERR(next)) {
+ repv = next;
+ goto done;
+ }
+
+ memcpy(next, payload, max_payload_size);
+
+ repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
+ if (IS_ERR(repv))
+ goto done;
+
+ payload += max_payload_size;
+ remain_payload_size -= max_payload_size;
+
+ /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
+ while (remain_payload_size) {
+ u32 size = min(remain_payload_size,
+ max_payload_size);
+
+ next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
+ if (IS_ERR(next)) {
+ repv = next;
+ goto done;
+ }
+
+ memcpy(next, payload, size);
+
+ repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
+ if (IS_ERR(repv))
+ goto done;
+
+ payload += size;
+ remain_payload_size -= size;
+ }
+
+ /* Wait for reply. */
+ repv = r535_gsp_rpc_handle_reply(gsp, fn, policy, payload_size +
+ sizeof(*rpc));
+ if (!IS_ERR(repv))
+ kvfree(msg);
+ } else {
+ repv = r535_gsp_rpc_send(gsp, payload, policy, gsp_rpc_len);
+ }
+
+done:
+ mutex_unlock(&gsp->cmdq.mutex);
+ return repv;
+}
+
+const struct nvkm_rm_api_rpc
+r535_rpc = {
+ .get = r535_gsp_rpc_get,
+ .push = r535_gsp_rpc_push,
+ .done = r535_gsp_rpc_done,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c
index d3e95453f25d..f25ea610cd99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c
@@ -19,15 +19,38 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "vmm.h"
+#include <subdev/mmu/vmm.h>
-#include <nvrm/nvtypes.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h>
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+#include <nvhw/drf.h>
+#include "nvrm/vmm.h"
-static int
-r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
+void
+r535_mmu_vaspace_del(struct nvkm_vmm *vmm)
+{
+ if (vmm->rm.external) {
+ NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.object,
+ NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY,
+ sizeof(*ctrl));
+ if (!IS_ERR(ctrl)) {
+ ctrl->hVASpace = vmm->rm.object.handle;
+
+ WARN_ON(nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.object, ctrl));
+ }
+
+ vmm->rm.external = false;
+ }
+
+ nvkm_gsp_rm_free(&vmm->rm.object);
+ nvkm_gsp_device_dtor(&vmm->rm.device);
+ nvkm_gsp_client_dtor(&vmm->rm.client);
+
+ nvkm_vmm_put(vmm, &vmm->rm.rsvd);
+}
+
+int
+r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle, bool external)
{
NV_VASPACE_ALLOCATION_PARAMETERS *args;
int ret;
@@ -37,58 +60,103 @@ r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
if (ret)
return ret;
- args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, 0x90f10000, FERMI_VASPACE_A,
+ args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, handle, FERMI_VASPACE_A,
sizeof(*args), &vmm->rm.object);
if (IS_ERR(args))
return PTR_ERR(args);
args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW;
+ if (external)
+ args->flags = NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED;
ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args);
if (ret)
return ret;
- {
+ if (!external) {
NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl;
+ u8 page_shift = 29; /* 512MiB */
+ const u64 page_size = BIT_ULL(page_shift);
+ const struct nvkm_vmm_page *page;
+ const struct nvkm_vmm_desc *desc;
+ struct nvkm_vmm_pt *pd = vmm->pd;
+
+ for (page = vmm->func->page; page->shift; page++) {
+ if (page->shift == page_shift)
+ break;
+ }
+
+ if (WARN_ON(!page->shift))
+ return -EINVAL;
mutex_lock(&vmm->mutex.vmm);
- ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000,
+ ret = nvkm_vmm_get_locked(vmm, true, false, false, page_shift, 32, page_size,
&vmm->rm.rsvd);
mutex_unlock(&vmm->mutex.vmm);
if (ret)
return ret;
+ /* Some parts of RM expect the server-reserved area to be in a specific location. */
+ if (WARN_ON(vmm->rm.rsvd->addr != SPLIT_VAS_SERVER_RM_MANAGED_VA_START ||
+ vmm->rm.rsvd->size != SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE))
+ return -EINVAL;
+
ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object,
NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES,
sizeof(*ctrl));
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
- ctrl->pageSize = 0x20000000;
+ ctrl->pageSize = page_size;
ctrl->virtAddrLo = vmm->rm.rsvd->addr;
ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1;
- ctrl->numLevelsToCopy = vmm->pd->pde[0]->pde[0] ? 3 : 2;
- ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr;
- ctrl->levels[0].size = 0x20;
- ctrl->levels[0].aperture = 1;
- ctrl->levels[0].pageShift = 0x2f;
- ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr;
- ctrl->levels[1].size = 0x1000;
- ctrl->levels[1].aperture = 1;
- ctrl->levels[1].pageShift = 0x26;
- if (vmm->pd->pde[0]->pde[0]) {
- ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr;
- ctrl->levels[2].size = 0x1000;
- ctrl->levels[2].aperture = 1;
- ctrl->levels[2].pageShift = 0x1d;
+
+ for (desc = page->desc; desc->bits; desc++) {
+ ctrl->numLevelsToCopy++;
+ page_shift += desc->bits;
+ }
+ desc--;
+
+ for (int i = 0; i < ctrl->numLevelsToCopy; i++, desc--) {
+ page_shift -= desc->bits;
+
+ ctrl->levels[i].physAddress = pd->pt[0]->addr;
+ ctrl->levels[i].size = BIT_ULL(desc->bits) * desc->size;
+ ctrl->levels[i].aperture = 1;
+ ctrl->levels[i].pageShift = page_shift;
+
+ pd = pd->pde[0];
}
ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl);
+ } else {
+ NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.object,
+ NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->physAddress = vmm->pd->pt[0]->addr;
+ ctrl->numEntries = 1 << vmm->func->page[0].desc->bits;
+ ctrl->flags = NVDEF(NV0080_CTRL_DMA_SET_PAGE_DIRECTORY, FLAGS, APERTURE, VIDMEM);
+ ctrl->hVASpace = vmm->rm.object.handle;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.object, ctrl);
+ if (ret == 0)
+ vmm->rm.external = true;
}
return ret;
}
+static int
+r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
+{
+ return r535_mmu_vaspace_new(vmm, NVKM_RM_VASPACE, true);
+}
+
static void
r535_mmu_dtor(struct nvkm_mmu *mmu)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild
new file mode 100644
index 000000000000..5db0e7009e1f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/Kbuild
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/subdev/gsp/rm/r570/rm.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/gsp.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/client.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/fbsr.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/disp.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/fifo.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/gr.o
+nvkm-y += nvkm/subdev/gsp/rm/r570/ofa.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c
new file mode 100644
index 000000000000..87e6240662ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/client.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/client.h"
+
+static int
+r570_gsp_client_ctor(struct nvkm_gsp_client *client, u32 handle)
+{
+ NV0000_ALLOC_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&client->object, handle, NV01_ROOT, sizeof(*args),
+ &client->object);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->hClient = client->object.handle;
+ args->processID = ~0;
+
+ return nvkm_gsp_rm_alloc_wr(&client->object, args);
+}
+
+const struct nvkm_rm_api_client
+r570_client = {
+ .ctor = r570_gsp_client_ctor,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c
new file mode 100644
index 000000000000..a96e31c2d80b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/disp.c
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include <engine/disp.h>
+#include <engine/disp/outp.h>
+
+#include "nvhw/drf.h"
+
+#include "nvrm/disp.h"
+
+static int
+r570_dmac_alloc(struct nvkm_disp *disp, u32 oclass, int inst, u32 put_offset,
+ struct nvkm_gsp_object *dmac)
+{
+ NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&disp->rm.object, (oclass << 16) | inst, oclass,
+ sizeof(*args), dmac);
+ if (IS_ERR(args))
+ return PTR_ERR(args);
+
+ args->channelInstance = inst;
+ args->offset = put_offset;
+ args->subDeviceId = BIT(0);
+
+ return nvkm_gsp_rm_alloc_wr(dmac, args);
+}
+
+static int
+r570_disp_chan_set_pushbuf(struct nvkm_disp *disp, s32 oclass, int inst, struct nvkm_memory *memory)
+{
+ struct nvkm_gsp *gsp = disp->rm.objcom.client->gsp;
+ NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ if (memory) {
+ switch (nvkm_memory_target(memory)) {
+ case NVKM_MEM_TARGET_NCOH:
+ ctrl->addressSpace = ADDR_SYSMEM;
+ ctrl->cacheSnoop = 0;
+ ctrl->pbTargetAperture = PHYS_PCI;
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ ctrl->addressSpace = ADDR_SYSMEM;
+ ctrl->cacheSnoop = 1;
+ ctrl->pbTargetAperture = PHYS_PCI_COHERENT;
+ break;
+ case NVKM_MEM_TARGET_VRAM:
+ ctrl->addressSpace = ADDR_FBMEM;
+ ctrl->pbTargetAperture = PHYS_NVM;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ ctrl->physicalAddr = nvkm_memory_addr(memory);
+ ctrl->limit = nvkm_memory_size(memory) - 1;
+ }
+
+ ctrl->hclass = oclass;
+ ctrl->channelInstance = inst;
+ ctrl->valid = ((oclass & 0xff) != 0x7a) ? 1 : 0;
+ ctrl->subDeviceId = BIT(0);
+
+ return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+}
+
+static int
+r570_dp_set_indexed_link_rates(struct nvkm_outp *outp)
+{
+ NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl;
+ struct nvkm_disp *disp = outp->disp;
+
+ if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl)))
+ return -EINVAL;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(outp->index);
+ for (int i = 0; i < outp->dp.rates; i++)
+ ctrl->linkRateTbl[outp->dp.rate[i].dpcd] = outp->dp.rate[i].rate * 10 / 200;
+
+ return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r570_dp_get_caps(struct nvkm_disp *disp, int *plink_bw, bool *pmst, bool *pwm)
+{
+ NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->sorIndex = ~0;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
+ *plink_bw = 0x06;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
+ *plink_bw = 0x0a;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
+ *plink_bw = 0x14;
+ break;
+ case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
+ *plink_bw = 0x1e;
+ break;
+ default:
+ *plink_bw = 0x00;
+ break;
+ }
+
+ *pmst = ctrl->bIsMultistreamSupported;
+ *pwm = ctrl->bHasIncreasedWatermarkLimits;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r570_bl_ctrl(struct nvkm_disp *disp, unsigned display_id, bool set, int *pval)
+{
+ u32 cmd = set ? NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS :
+ NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS;
+ NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, cmd, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->displayId = BIT(display_id);
+ ctrl->brightness = *pval;
+ ctrl->brightnessType = NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret)
+ return ret;
+
+ *pval = ctrl->brightness;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r570_disp_get_active(struct nvkm_disp *disp, unsigned head, u32 *displayid)
+{
+ NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->head = head;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+
+ *displayid = ctrl->displayId;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+static int
+r570_disp_get_connect_state(struct nvkm_disp *disp, unsigned display_id)
+{
+ NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayMask = BIT(display_id);
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret == 0 && (ctrl->displayMask & BIT(display_id)))
+ ret = 1;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+}
+
+static int
+r570_disp_get_supported(struct nvkm_disp *disp, unsigned long *pmask)
+{
+ NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+ NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *pmask = ctrl->displayMask;
+
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return 0;
+}
+
+static int
+r570_disp_get_static_info(struct nvkm_disp *disp)
+{
+ NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = disp->engine.subdev.device->gsp;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ disp->wndw.mask = ctrl->windowPresentMask;
+ disp->wndw.nr = fls(disp->wndw.mask);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+const struct nvkm_rm_api_disp
+r570_disp = {
+ .get_static_info = r570_disp_get_static_info,
+ .get_supported = r570_disp_get_supported,
+ .get_connect_state = r570_disp_get_connect_state,
+ .get_active = r570_disp_get_active,
+ .bl_ctrl = r570_bl_ctrl,
+ .dp = {
+ .get_caps = r570_dp_get_caps,
+ .set_indexed_link_rates = r570_dp_set_indexed_link_rates,
+ },
+ .chan = {
+ .set_pushbuf = r570_disp_chan_set_pushbuf,
+ .dmac_alloc = r570_dmac_alloc,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c
new file mode 100644
index 000000000000..2945d5b4e570
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <subdev/instmem/priv.h>
+#include <subdev/bar.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu/vmm.h>
+
+#include "nvrm/fbsr.h"
+#include "nvrm/fifo.h"
+
+static int
+r570_fbsr_suspend_channels(struct nvkm_gsp *gsp, bool suspend)
+{
+ NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->bDisableActiveChannels = suspend;
+
+ return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+}
+
+static void
+r570_fbsr_resume(struct nvkm_gsp *gsp)
+{
+ struct nvkm_device *device = gsp->subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_instobj *iobj;
+ struct nvkm_vmm *vmm;
+
+ /* Restore BAR2 page tables via BAR0 window, and re-enable BAR2. */
+ list_for_each_entry(iobj, &imem->boot, head) {
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
+ }
+
+ device->bar->bar2 = true;
+
+ vmm = nvkm_bar_bar2_vmm(device);
+ vmm->func->flush(vmm, 0);
+
+ /* Restore remaining BAR2 allocations (including BAR1 page tables) via BAR2. */
+ list_for_each_entry(iobj, &imem->list, head) {
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
+ }
+
+ vmm = nvkm_bar_bar1_vmm(device);
+ vmm->func->flush(vmm, 0);
+
+ /* Resume channel scheduling. */
+ r570_fbsr_suspend_channels(device->gsp, false);
+
+ /* Finish cleaning up. */
+ r535_fbsr_resume(gsp);
+}
+
+static int
+r570_fbsr_init(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size)
+{
+ NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl;
+ struct nvkm_gsp_object memlist;
+ int ret;
+
+ ret = r535_fbsr_memlist(&gsp->internal.device, 0xcaf00003, NVKM_MEM_TARGET_HOST,
+ 0, size, sgt, &memlist);
+ if (ret)
+ return ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->hClient = gsp->internal.client.object.handle;
+ ctrl->hSysMem = memlist.handle;
+ ctrl->sysmemAddrOfSuspendResumeData = gsp->sr.meta.addr;
+ ctrl->bEnteringGcoffState = 1;
+
+ ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+ if (ret)
+ return ret;
+
+ nvkm_gsp_rm_free(&memlist);
+ return 0;
+}
+
+static int
+r570_fbsr_suspend(struct nvkm_gsp *gsp)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_instobj *iobj;
+ u64 size;
+ int ret;
+
+ /* Stop channel scheduling. */
+ r570_fbsr_suspend_channels(gsp, true);
+
+ /* Save BAR2 allocations to system memory. */
+ list_for_each_entry(iobj, &imem->list, head) {
+ if (iobj->preserve) {
+ ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
+ }
+ }
+
+ list_for_each_entry(iobj, &imem->boot, head) {
+ ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
+ }
+
+ /* Disable BAR2 access. */
+ device->bar->bar2 = false;
+
+ /* Allocate system memory to hold RM's VRAM allocations across suspend. */
+ size = gsp->fb.heap.size;
+ size += gsp->fb.rsvd_size;
+ size += gsp->fb.bios.vga_workspace.size;
+ nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", size);
+
+ ret = nvkm_gsp_sg(device, size, &gsp->sr.fbsr);
+ if (ret)
+ return ret;
+
+ /* Initialise FBSR on RM. */
+ ret = r570_fbsr_init(gsp, &gsp->sr.fbsr, size);
+ if (ret) {
+ nvkm_gsp_sg_free(device, &gsp->sr.fbsr);
+ return ret;
+ }
+
+ return 0;
+}
+
+const struct nvkm_rm_api_fbsr
+r570_fbsr = {
+ .suspend = r570_fbsr_suspend,
+ .resume = r570_fbsr_resume,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c
new file mode 100644
index 000000000000..79132805cfcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fifo.c
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include <subdev/mmu.h>
+#include <engine/fifo/priv.h>
+#include <engine/fifo/chan.h>
+#include <engine/fifo/runl.h>
+
+#include "nvhw/drf.h"
+
+#include "nvrm/fifo.h"
+#include "nvrm/engine.h"
+
+#define CHID_PER_USERD 8
+
+static int
+r570_chan_alloc(struct nvkm_gsp_device *device, u32 handle, u32 nv2080_engine_type, u8 runq,
+ bool priv, int chid, u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr,
+ struct nvkm_vmm *vmm, u64 gpfifo_offset, u32 gpfifo_length,
+ struct nvkm_gsp_object *chan)
+{
+ struct nvkm_gsp *gsp = device->object.client->gsp;
+ struct nvkm_fifo *fifo = gsp->subdev.device->fifo;
+ const int userd_p = chid / CHID_PER_USERD;
+ const int userd_i = chid % CHID_PER_USERD;
+ NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(&device->object, handle,
+ fifo->func->chan.user.oclass, sizeof(*args), chan);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->gpFifoOffset = gpfifo_offset;
+ args->gpFifoEntries = gpfifo_length / 8;
+
+ args->flags = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
+ args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
+ args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, runq);
+ if (!priv)
+ args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
+ else
+ args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE);
+ args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE);
+
+ args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE);
+ args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE);
+
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT);
+ args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
+ args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
+
+ args->hVASpace = vmm->rm.object.handle;
+ args->engineType = nv2080_engine_type;
+
+ args->instanceMem.base = inst_addr;
+ args->instanceMem.size = fifo->func->chan.func->inst->size;
+ args->instanceMem.addressSpace = 2;
+ args->instanceMem.cacheAttrib = 1;
+
+ args->userdMem.base = userd_addr;
+ args->userdMem.size = fifo->func->chan.func->userd->size;
+ args->userdMem.addressSpace = 2;
+ args->userdMem.cacheAttrib = 1;
+
+ args->ramfcMem.base = inst_addr;
+ args->ramfcMem.size = 0x200;
+ args->ramfcMem.addressSpace = 2;
+ args->ramfcMem.cacheAttrib = 1;
+
+ args->mthdbufMem.base = mthdbuf_addr;
+ args->mthdbufMem.size = fifo->rm.mthdbuf_size;
+ args->mthdbufMem.addressSpace = 1;
+ args->mthdbufMem.cacheAttrib = 0;
+
+ if (!priv)
+ args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER);
+ else
+ args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN);
+ args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
+ args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
+
+ return nvkm_gsp_rm_alloc_wr(chan, args);
+}
+
+static int
+r570_fifo_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
+{
+ rpc_rc_triggered_v17_02 *msg = repv;
+ struct nvkm_gsp *gsp = priv;
+
+ if (WARN_ON(repc < sizeof(*msg)))
+ return -EINVAL;
+
+ nvkm_error(&gsp->subdev, "rc engn:%08x chid:%d gfid:%d level:%d type:%d scope:%d part:%d "
+ "fault_addr:%08x%08x fault_type:%08x\n",
+ msg->nv2080EngineType, msg->chid, msg->gfid, msg->exceptLevel, msg->exceptType,
+ msg->scope, msg->partitionAttributionId,
+ msg->mmuFaultAddrHi, msg->mmuFaultAddrLo, msg->mmuFaultType);
+
+ r535_fifo_rc_chid(gsp->subdev.device->fifo, msg->chid);
+ return 0;
+}
+
+static int
+r570_fifo_ectx_size(struct nvkm_fifo *fifo)
+{
+ NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl;
+ struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp;
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO,
+ sizeof(*ctrl));
+ if (WARN_ON(IS_ERR(ctrl)))
+ return PTR_ERR(ctrl);
+
+ for (int i = 0; i < ctrl->numConstructedFalcons; i++) {
+ nvkm_runl_foreach(runl, fifo) {
+ nvkm_runl_foreach_engn(engn, runl) {
+ if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) {
+ engn->rm.size =
+ ctrl->constructedFalconsTable[i].ctxBufferSize;
+ break;
+ }
+ }
+ }
+ }
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
+r570_fifo_xlat_rm_engine_type(u32 rm, enum nvkm_subdev_type *ptype, int *p2080)
+{
+#define RM_ENGINE_TYPE(RM,NVKM,INST) \
+ RM_ENGINE_TYPE_##RM: \
+ *ptype = NVKM_ENGINE_##NVKM; \
+ *p2080 = NV2080_ENGINE_TYPE_##RM; \
+ return INST
+
+ switch (rm) {
+ case RM_ENGINE_TYPE( GR0, GR, 0);
+ case RM_ENGINE_TYPE( COPY0, CE, 0);
+ case RM_ENGINE_TYPE( COPY1, CE, 1);
+ case RM_ENGINE_TYPE( COPY2, CE, 2);
+ case RM_ENGINE_TYPE( COPY3, CE, 3);
+ case RM_ENGINE_TYPE( COPY4, CE, 4);
+ case RM_ENGINE_TYPE( COPY5, CE, 5);
+ case RM_ENGINE_TYPE( COPY6, CE, 6);
+ case RM_ENGINE_TYPE( COPY7, CE, 7);
+ case RM_ENGINE_TYPE( COPY8, CE, 8);
+ case RM_ENGINE_TYPE( COPY9, CE, 9);
+ case RM_ENGINE_TYPE( COPY10, CE, 10);
+ case RM_ENGINE_TYPE( COPY11, CE, 11);
+ case RM_ENGINE_TYPE( COPY12, CE, 12);
+ case RM_ENGINE_TYPE( COPY13, CE, 13);
+ case RM_ENGINE_TYPE( COPY14, CE, 14);
+ case RM_ENGINE_TYPE( COPY15, CE, 15);
+ case RM_ENGINE_TYPE( COPY16, CE, 16);
+ case RM_ENGINE_TYPE( COPY17, CE, 17);
+ case RM_ENGINE_TYPE( COPY18, CE, 18);
+ case RM_ENGINE_TYPE( COPY19, CE, 19);
+ case RM_ENGINE_TYPE( NVDEC0, NVDEC, 0);
+ case RM_ENGINE_TYPE( NVDEC1, NVDEC, 1);
+ case RM_ENGINE_TYPE( NVDEC2, NVDEC, 2);
+ case RM_ENGINE_TYPE( NVDEC3, NVDEC, 3);
+ case RM_ENGINE_TYPE( NVDEC4, NVDEC, 4);
+ case RM_ENGINE_TYPE( NVDEC5, NVDEC, 5);
+ case RM_ENGINE_TYPE( NVDEC6, NVDEC, 6);
+ case RM_ENGINE_TYPE( NVDEC7, NVDEC, 7);
+ case RM_ENGINE_TYPE( NVENC0, NVENC, 0);
+ case RM_ENGINE_TYPE( NVENC1, NVENC, 1);
+ case RM_ENGINE_TYPE( NVENC2, NVENC, 2);
+ case RM_ENGINE_TYPE( NVENC3, NVENC, 3);
+ case RM_ENGINE_TYPE(NVJPEG0, NVJPG, 0);
+ case RM_ENGINE_TYPE(NVJPEG1, NVJPG, 1);
+ case RM_ENGINE_TYPE(NVJPEG2, NVJPG, 2);
+ case RM_ENGINE_TYPE(NVJPEG3, NVJPG, 3);
+ case RM_ENGINE_TYPE(NVJPEG4, NVJPG, 4);
+ case RM_ENGINE_TYPE(NVJPEG5, NVJPG, 5);
+ case RM_ENGINE_TYPE(NVJPEG6, NVJPG, 6);
+ case RM_ENGINE_TYPE(NVJPEG7, NVJPG, 7);
+ case RM_ENGINE_TYPE( SW, SW, 0);
+ case RM_ENGINE_TYPE( SEC2, SEC2, 0);
+ case RM_ENGINE_TYPE( OFA0, OFA, 0);
+ case RM_ENGINE_TYPE( OFA1, OFA, 1);
+ default:
+ return -EINVAL;
+ }
+#undef RM_ENGINE_TYPE
+}
+
+const struct nvkm_rm_api_fifo
+r570_fifo = {
+ .xlat_rm_engine_type = r570_fifo_xlat_rm_engine_type,
+ .ectx_size = r570_fifo_ectx_size,
+ .rsvd_chids = 1,
+ .rc_triggered = r570_fifo_rc_triggered,
+ .chan = {
+ .alloc = r570_chan_alloc,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c
new file mode 100644
index 000000000000..b6cced9b8aa1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gr.c
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/gr.h>
+
+#include <subdev/mmu.h>
+#include <engine/fifo.h>
+#include <engine/fifo/chid.h>
+#include <engine/gr/priv.h>
+
+#include "nvrm/gr.h"
+#include "nvrm/engine.h"
+
+int
+r570_gr_tpc_mask(struct nvkm_gsp *gsp, int gpc, u32 *pmask)
+{
+ NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS *ctrl;
+ int ret;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->gpcId = gpc;
+
+ ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl));
+ if (ret)
+ return ret;
+
+ *pmask = ctrl->tpcMask;
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+int
+r570_gr_gpc_mask(struct nvkm_gsp *gsp, u32 *pmask)
+{
+ NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ *pmask = ctrl->gpcMask;
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return 0;
+}
+
+static int
+r570_gr_scrubber_ctrl(struct r535_gr *gr, bool teardown)
+{
+ NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS *ctrl;
+
+ ctrl = nvkm_gsp_rm_ctrl_get(&gr->scrubber.vmm->rm.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->bTeardown = teardown;
+
+ return nvkm_gsp_rm_ctrl_wr(&gr->scrubber.vmm->rm.device.subdevice, ctrl);
+}
+
+static void
+r570_gr_scrubber_fini(struct r535_gr *gr)
+{
+ /* Teardown scrubber channel on RM. */
+ if (gr->scrubber.enabled) {
+ WARN_ON(r570_gr_scrubber_ctrl(gr, true));
+ gr->scrubber.enabled = false;
+ }
+
+ /* Free scrubber channel. */
+ nvkm_gsp_rm_free(&gr->scrubber.threed);
+ nvkm_gsp_rm_free(&gr->scrubber.chan);
+
+ for (int i = 0; i < gr->ctxbuf_nr; i++) {
+ nvkm_vmm_put(gr->scrubber.vmm, &gr->scrubber.ctxbuf.vma[i]);
+ nvkm_memory_unref(&gr->scrubber.ctxbuf.mem[i]);
+ }
+
+ nvkm_vmm_unref(&gr->scrubber.vmm);
+ nvkm_memory_unref(&gr->scrubber.inst);
+}
+
+static int
+r570_gr_scrubber_init(struct r535_gr *gr)
+{
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_gsp *gsp = device->gsp;
+ struct nvkm_rm *rm = gsp->rm;
+ int ret;
+
+ /* Scrubber channel only required on TU10x. */
+ switch (device->chipset) {
+ case 0x162:
+ case 0x164:
+ case 0x166:
+ break;
+ default:
+ return 0;
+ }
+
+ if (gr->scrubber.chid < 0) {
+ gr->scrubber.chid = nvkm_chid_get(device->fifo->chid, NULL);
+ if (gr->scrubber.chid < 0)
+ return gr->scrubber.chid;
+ }
+
+ /* Allocate scrubber channel. */
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ 0x2000 + device->fifo->rm.mthdbuf_size, 0, true,
+ &gr->scrubber.inst);
+ if (ret)
+ goto done;
+
+ ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grScrubberVmm",
+ &gr->scrubber.vmm);
+ if (ret)
+ goto done;
+
+ ret = r535_mmu_vaspace_new(gr->scrubber.vmm, KGRAPHICS_SCRUBBER_HANDLE_VAS, false);
+ if (ret)
+ goto done;
+
+ ret = rm->api->fifo->chan.alloc(&gr->scrubber.vmm->rm.device, KGRAPHICS_SCRUBBER_HANDLE_CHANNEL,
+ NV2080_ENGINE_TYPE_GR0, 0, false, gr->scrubber.chid,
+ nvkm_memory_addr(gr->scrubber.inst),
+ nvkm_memory_addr(gr->scrubber.inst) + 0x1000,
+ nvkm_memory_addr(gr->scrubber.inst) + 0x2000,
+ gr->scrubber.vmm, 0, 0x1000, &gr->scrubber.chan);
+ if (ret)
+ goto done;
+
+ ret = r535_gr_promote_ctx(gr, false, gr->scrubber.vmm, gr->scrubber.ctxbuf.mem,
+ gr->scrubber.ctxbuf.vma, &gr->scrubber.chan);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_rm_alloc(&gr->scrubber.chan, KGRAPHICS_SCRUBBER_HANDLE_3DOBJ,
+ rm->gpu->gr.class.threed, 0, &gr->scrubber.threed);
+ if (ret)
+ goto done;
+
+ /* Initialise scrubber channel on RM. */
+ ret = r570_gr_scrubber_ctrl(gr, false);
+ if (ret)
+ goto done;
+
+ gr->scrubber.enabled = true;
+
+done:
+ if (ret)
+ r570_gr_scrubber_fini(gr);
+
+ return ret;
+}
+
+static int
+r570_gr_get_ctxbufs_info(struct r535_gr *gr)
+{
+ NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_gsp *gsp = subdev->device->gsp;
+
+ info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+ NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
+ sizeof(*info));
+ if (WARN_ON(IS_ERR(info)))
+ return PTR_ERR(info);
+
+ for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++)
+ r535_gr_get_ctxbuf_info(gr, i, &info->engineContextBuffersInfo[0].engine[i]);
+
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
+ return 0;
+}
+
+const struct nvkm_rm_api_gr
+r570_gr = {
+ .get_ctxbufs_info = r570_gr_get_ctxbufs_info,
+ .scrubber.init = r570_gr_scrubber_init,
+ .scrubber.fini = r570_gr_scrubber_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c
new file mode 100644
index 000000000000..9d2fa4e66d59
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+#include <rm/rpc.h>
+
+#include <asm-generic/video.h>
+
+#include "nvrm/gsp.h"
+#include "nvrm/rpcfn.h"
+#include "nvrm/msgfn.h"
+
+#include <core/pci.h>
+#include <subdev/pci/priv.h>
+
+static u32
+r570_gsp_sr_data_size(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta = gsp->wpr_meta.data;
+
+ return (meta->frtsOffset + meta->frtsSize) -
+ (meta->nonWprHeapOffset + meta->nonWprHeapSize);
+}
+
+static void
+r570_gsp_drop_post_nocat_record(struct nvkm_gsp *gsp)
+{
+ if (gsp->subdev.debug < NV_DBG_DEBUG) {
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD, NULL, NULL);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE, NULL, NULL);
+ }
+}
+
+static bool
+r570_gsp_xlat_mc_engine_idx(u32 mc_engine_idx, enum nvkm_subdev_type *ptype, int *pinst)
+{
+ switch (mc_engine_idx) {
+ case MC_ENGINE_IDX_GSP:
+ *ptype = NVKM_SUBDEV_GSP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_DISP:
+ *ptype = NVKM_ENGINE_DISP;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE19:
+ *ptype = NVKM_ENGINE_CE;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_CE0;
+ return true;
+ case MC_ENGINE_IDX_GR0:
+ *ptype = NVKM_ENGINE_GR;
+ *pinst = 0;
+ return true;
+ case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
+ *ptype = NVKM_ENGINE_NVDEC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVDEC0;
+ return true;
+ case MC_ENGINE_IDX_NVENC ... MC_ENGINE_IDX_NVENC3:
+ *ptype = NVKM_ENGINE_NVENC;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVENC;
+ return true;
+ case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
+ *ptype = NVKM_ENGINE_NVJPG;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_NVJPEG0;
+ return true;
+ case MC_ENGINE_IDX_OFA0 ... MC_ENGINE_IDX_OFA1:
+ *ptype = NVKM_ENGINE_OFA;
+ *pinst = mc_engine_idx - MC_ENGINE_IDX_OFA0;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int
+r570_gsp_get_static_info(struct nvkm_gsp *gsp)
+{
+ GspStaticConfigInfo *rpc;
+ u32 gpc_mask;
+ u32 tpc_mask;
+ int ret;
+
+ rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
+ if (IS_ERR(rpc))
+ return PTR_ERR(rpc);
+
+ gsp->internal.client.object.client = &gsp->internal.client;
+ gsp->internal.client.object.parent = NULL;
+ gsp->internal.client.object.handle = rpc->hInternalClient;
+ gsp->internal.client.gsp = gsp;
+ INIT_LIST_HEAD(&gsp->internal.client.events);
+
+ gsp->internal.device.object.client = &gsp->internal.client;
+ gsp->internal.device.object.parent = &gsp->internal.client.object;
+ gsp->internal.device.object.handle = rpc->hInternalDevice;
+
+ gsp->internal.device.subdevice.client = &gsp->internal.client;
+ gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
+ gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
+
+ gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
+ gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
+
+ r535_gsp_get_static_info_fb(gsp, &rpc->fbRegionInfoParams);
+
+ if (gsp->rm->wpr->offset_set_by_acr) {
+ GspFwWprMeta *meta = gsp->wpr_meta.data;
+
+ meta->nonWprHeapOffset = rpc->fwWprLayoutOffset.nonWprHeapOffset;
+ meta->frtsOffset = rpc->fwWprLayoutOffset.frtsOffset;
+ }
+
+ nvkm_gsp_rpc_done(gsp, rpc);
+
+ ret = r570_gr_gpc_mask(gsp, &gpc_mask);
+ if (ret)
+ return ret;
+
+ for (int gpc = 0; gpc < 32; gpc++) {
+ if (gpc_mask & BIT(gpc)) {
+ ret = r570_gr_tpc_mask(gsp, gpc, &tpc_mask);
+ if (ret)
+ return ret;
+
+ gsp->gr.tpcs += hweight32(tpc_mask);
+ gsp->gr.gpcs++;
+ }
+ }
+
+ return 0;
+}
+
+static void
+r570_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
+{
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+ acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev);
+
+ if (!handle)
+ return;
+
+ acpi->bValid = 1;
+
+ r535_gsp_acpi_dod(handle, &acpi->dodMethodData);
+ r535_gsp_acpi_jt(handle, &acpi->jtMethodData);
+ r535_gsp_acpi_caps(handle, &acpi->capsMethodData);
+#endif
+}
+
+static int
+r570_gsp_set_system_info(struct nvkm_gsp *gsp)
+{
+ struct nvkm_device *device = gsp->subdev.device;
+ struct pci_dev *pdev = container_of(device, struct nvkm_device_pci, device)->pdev;
+ GspSystemInfo *info;
+
+ if (WARN_ON(device->type == NVKM_DEVICE_TEGRA))
+ return -ENOSYS;
+
+ info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info));
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI);
+ info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB);
+ info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST);
+ info->nvDomainBusDeviceFunc = pci_dev_id(pdev);
+ info->maxUserVa = TASK_SIZE;
+ info->pciConfigMirrorBase = device->pci->func->cfg.addr;
+ info->pciConfigMirrorSize = device->pci->func->cfg.size;
+ info->PCIDeviceID = (pdev->device << 16) | pdev->vendor;
+ info->PCISubDeviceID = (pdev->subsystem_device << 16) | pdev->subsystem_vendor;
+ info->PCIRevisionID = pdev->revision;
+ r570_gsp_acpi_info(gsp, &info->acpiMethodData);
+ info->bIsPrimary = video_is_primary_device(device->dev);
+ info->bPreserveVideoMemoryAllocations = false;
+
+ return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT);
+}
+
+static void
+r570_gsp_set_rmargs(struct nvkm_gsp *gsp, bool resume)
+{
+ GSP_ARGUMENTS_CACHED *args;
+
+ args = gsp->rmargs.data;
+ args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
+ args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
+ args->messageQueueInitArguments.cmdQueueOffset =
+ (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data;
+ args->messageQueueInitArguments.statQueueOffset =
+ (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data;
+
+ if (!resume) {
+ args->srInitArguments.oldLevel = 0;
+ args->srInitArguments.flags = 0;
+ args->srInitArguments.bInPMTransition = 0;
+ } else {
+ args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
+ args->srInitArguments.flags = 0;
+ args->srInitArguments.bInPMTransition = 1;
+ }
+
+ args->bDmemStack = 1;
+}
+
+const struct nvkm_rm_api_gsp
+r570_gsp = {
+ .set_rmargs = r570_gsp_set_rmargs,
+ .set_system_info = r570_gsp_set_system_info,
+ .get_static_info = r570_gsp_get_static_info,
+ .xlat_mc_engine_idx = r570_gsp_xlat_mc_engine_idx,
+ .drop_post_nocat_record = r570_gsp_drop_post_nocat_record,
+ .sr_data_size = r570_gsp_sr_data_size,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h
new file mode 100644
index 000000000000..e8714e0abc37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/client.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_CLIENT_H__
+#define __NVRM_CLIENT_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV01_ROOT (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+#define NV_PROC_NAME_MAX_LENGTH 100U
+
+typedef struct NV0000_ALLOC_PARAMETERS {
+ NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
+ NvU32 processID;
+ char processName[NV_PROC_NAME_MAX_LENGTH];
+ NV_DECLARE_ALIGNED(NvP64 pOsPidInfo, 8);
+} NV0000_ALLOC_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h
new file mode 100644
index 000000000000..06e972835d77
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/disp.h
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_DISP_H__
+#define __NVRM_DISP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
+ NvU32 feHwSysCap;
+ NvU32 windowPresentMask;
+ NvBool bFbRemapperEnabled;
+ NvU32 numHeads;
+ NvU32 i2cPort;
+ NvU32 internalDispActiveMask;
+ NvU32 embeddedDisplayPortMask;
+ NvBool bExternalMuxSupported;
+ NvBool bInternalMuxSupported;
+ NvU32 numDispChannels;
+} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730107U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayMask;
+ NvU32 displayMaskDDC;
+} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
+
+#define NV0073_CTRL_MAX_CONNECTORS 4U
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 DDCPartners;
+ NvU32 count;
+ struct {
+ NvU32 index;
+ NvU32 type;
+ NvU32 location;
+ } data[NV0073_CTRL_MAX_CONNECTORS];
+ NvU32 platform;
+} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
+ NvBool bDscSupported;
+ NvU32 encoderColorFormatMask;
+ NvU32 lineBufferSizeKB;
+ NvU32 rateBufferSizeKB;
+ NvU32 bitsPerPixelPrecision;
+ NvU32 maxNumHztSlices;
+ NvU32 lineBufferBitDepth;
+} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 sorIndex;
+ NvU32 maxLinkRate;
+ NvU32 dpVersionsSupported;
+ NvU32 UHBRSupportedByGpu;
+ NvU32 minPClkForCompressed;
+ NvBool bIsMultistreamSupported;
+ NvBool bIsSCEnabled;
+ NvBool bHasIncreasedWatermarkLimits;
+ NvBool bIsPC2Disabled;
+ NvBool isSingleHeadMSTSupported;
+ NvBool bFECSupported;
+ NvBool bIsTrainPhyRepeater;
+ NvBool bOverrideLinkBw;
+ NvBool bUseRgFlushSequence;
+ NvBool bSupportDPDownSpread;
+ NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
+} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2 0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4 1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0 2:2
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP2_0_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0 0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR10_0_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5 1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR13_5_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0 2:2
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0_NO (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_UHBR_SUPPORTED_UHBR20_0_YES (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U)
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730108U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 flags;
+ NvU32 displayMask;
+ NvU32 retryTimeMs;
+} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
+
+#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 flags;
+ NvU32 UHBRSupportedByDfp;
+} NV0073_CTRL_DFP_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U)
+#define NV0073_CTRL_DFP_FLAGS_LANE 5:3
+#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR 13:13
+#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_TYPE_C_TO_DP_CONNECTOR_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LINK 21:20
+#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS 0:0
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_10_0GBPS_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS 1:1
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_13_5GBPS_TRUE (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS 2:2
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS_FALSE (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS2_DP_UHBR_SUPPORTED_20_0GBPS_TRUE (0x00000001U)
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x73010cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 flags;
+ NvU32 displayId;
+} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 brightness;
+ NvBool bUncalibrated;
+ NvU8 brightnessType;
+} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
+ // In
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+
+ // Out
+ NvU16 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+ NvU8 linkBwCount;
+} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 cmd;
+ NvU32 data;
+ NvU32 err;
+ NvU32 retryTimeMs;
+ NvU32 eightLaneDpcdBaseAddr;
+} NV0073_CTRL_DP_CTRL_PARAMS;
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 head;
+ NvU32 sorIndex;
+ NvU32 dpLink;
+
+ NvBool bEnableOverride;
+ NvBool bMST;
+ NvU32 singleHeadMultistreamMode;
+ NvU32 hBlankSym;
+ NvU32 vBlankSym;
+ NvU32 colorFormat;
+ NvBool bEnableTwoHeadOneOr;
+
+ struct {
+ NvU32 slotStart;
+ NvU32 slotEnd;
+ NvU32 PBN;
+ NvU32 Timeslice;
+ NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
+ NvU32 singleHeadMSTPipeline;
+ NvBool bEnableAudioOverRightPanel;
+ } MST;
+
+ struct {
+ NvBool bEnhancedFraming;
+ NvU32 tuSize;
+ NvU32 waterMark;
+ NvBool bEnableAudioOverRightPanel;
+ } SST;
+} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
+ NvU32 subDeviceInstance;
+ NvU32 displayId;
+ NvU32 mute;
+} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
+ NvU32 addressSpace;
+ NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NvU32 cacheSnoop;
+ NvU32 hclass;
+ NvU32 channelInstance;
+ NvBool valid;
+ NvU32 pbTargetAperture;
+ NvU32 channelPBSize;
+ NvU32 subDeviceId;
+} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
+
+#define ADDR_SYSMEM (1)
+
+#define ADDR_FBMEM 2 // Frame buffer memory space
+
+typedef enum
+{
+ PB_SIZE_4KB = 0,
+ PB_SIZE_8KB,
+ PB_SIZE_16KB,
+ PB_SIZE_32KB,
+ PB_SIZE_64KB
+} ChannelPBSize;
+
+typedef struct
+{
+ NvV32 channelInstance; // One of the n channel instances of a given channel type.
+ // Note that core channel has only one instance
+ // while all others have two (one per head).
+ NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer
+ NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
+ NvU32 offset; // Initial offset for put/get, usually zero.
+ NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
+
+ NvU32 flags;
+ ChannelPBSize channelPBSize; // Size of Push Buffer requested by client (allowed values in enum)
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001
+
+ NvU32 subDeviceId; // One-hot encoded subDeviceId (i.e. SDM) that will be used to address the channel in the pushbuffer stream (via SSDM method)
+} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
+
+#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT100 1
+#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_PERCENT1000 2
+#define NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_TYPE_NITS 3
+
+typedef enum
+{
+ IOVA,
+ PHYS_NVM,
+ PHYS_PCI,
+ PHYS_PCI_COHERENT
+} PBTARGETAPERTURE;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h
new file mode 100644
index 000000000000..7997050a4f29
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/engine.h
@@ -0,0 +1,318 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_ENGINE_H__
+#define __NVRM_ENGINE_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define MC_ENGINE_IDX_NULL 0 // This must be 0
+#define MC_ENGINE_IDX_TMR 1
+#define MC_ENGINE_IDX_DISP 2
+#define MC_ENGINE_IDX_FB 3
+#define MC_ENGINE_IDX_FIFO 4
+#define MC_ENGINE_IDX_VIDEO 5
+#define MC_ENGINE_IDX_MD 6
+#define MC_ENGINE_IDX_BUS 7
+#define MC_ENGINE_IDX_PMGR 8
+#define MC_ENGINE_IDX_VP2 9
+#define MC_ENGINE_IDX_CIPHER 10
+#define MC_ENGINE_IDX_BIF 11
+#define MC_ENGINE_IDX_PPP 12
+#define MC_ENGINE_IDX_PRIVRING 13
+#define MC_ENGINE_IDX_PMU 14
+#define MC_ENGINE_IDX_CE0 15
+#define MC_ENGINE_IDX_CE1 16
+#define MC_ENGINE_IDX_CE2 17
+#define MC_ENGINE_IDX_CE3 18
+#define MC_ENGINE_IDX_CE4 19
+#define MC_ENGINE_IDX_CE5 20
+#define MC_ENGINE_IDX_CE6 21
+#define MC_ENGINE_IDX_CE7 22
+#define MC_ENGINE_IDX_CE8 23
+#define MC_ENGINE_IDX_CE9 24
+#define MC_ENGINE_IDX_CE10 25
+#define MC_ENGINE_IDX_CE11 26
+#define MC_ENGINE_IDX_CE12 27
+#define MC_ENGINE_IDX_CE13 28
+#define MC_ENGINE_IDX_CE14 29
+#define MC_ENGINE_IDX_CE15 30
+#define MC_ENGINE_IDX_CE16 31
+#define MC_ENGINE_IDX_CE17 32
+#define MC_ENGINE_IDX_CE18 33
+#define MC_ENGINE_IDX_CE19 34
+#define MC_ENGINE_IDX_CE_MAX MC_ENGINE_IDX_CE19
+#define MC_ENGINE_IDX_VIC 35
+#define MC_ENGINE_IDX_ISOHUB 36
+#define MC_ENGINE_IDX_VGPU 37
+#define MC_ENGINE_IDX_NVENC 38
+#define MC_ENGINE_IDX_NVENC1 39
+#define MC_ENGINE_IDX_NVENC2 40
+#define MC_ENGINE_IDX_NVENC3 41
+#define MC_ENGINE_IDX_C2C 42
+#define MC_ENGINE_IDX_LTC 43
+#define MC_ENGINE_IDX_FBHUB 44
+#define MC_ENGINE_IDX_HDACODEC 45
+#define MC_ENGINE_IDX_GMMU 46
+#define MC_ENGINE_IDX_SEC2 47
+#define MC_ENGINE_IDX_FSP 48
+#define MC_ENGINE_IDX_NVLINK 49
+#define MC_ENGINE_IDX_GSP 50
+#define MC_ENGINE_IDX_NVJPG 51
+#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG
+#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG
+#define MC_ENGINE_IDX_NVJPEG1 52
+#define MC_ENGINE_IDX_NVJPEG2 53
+#define MC_ENGINE_IDX_NVJPEG3 54
+#define MC_ENGINE_IDX_NVJPEG4 55
+#define MC_ENGINE_IDX_NVJPEG5 56
+#define MC_ENGINE_IDX_NVJPEG6 57
+#define MC_ENGINE_IDX_NVJPEG7 58
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT 59
+#define MC_ENGINE_IDX_ACCESS_CNTR 60
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT 61
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR 62
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_ERROR 63
+#define MC_ENGINE_IDX_INFO_FAULT 64
+#define MC_ENGINE_IDX_BSP 65
+#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP
+#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC
+#define MC_ENGINE_IDX_NVDEC1 66
+#define MC_ENGINE_IDX_NVDEC2 67
+#define MC_ENGINE_IDX_NVDEC3 68
+#define MC_ENGINE_IDX_NVDEC4 69
+#define MC_ENGINE_IDX_NVDEC5 70
+#define MC_ENGINE_IDX_NVDEC6 71
+#define MC_ENGINE_IDX_NVDEC7 72
+#define MC_ENGINE_IDX_CPU_DOORBELL 73
+#define MC_ENGINE_IDX_PRIV_DOORBELL 74
+#define MC_ENGINE_IDX_MMU_ECC_ERROR 75
+#define MC_ENGINE_IDX_BLG 76
+#define MC_ENGINE_IDX_PERFMON 77
+#define MC_ENGINE_IDX_BUF_RESET 78
+#define MC_ENGINE_IDX_XBAR 79
+#define MC_ENGINE_IDX_ZPW 80
+#define MC_ENGINE_IDX_OFA0 81
+#define MC_ENGINE_IDX_OFA1 82
+#define MC_ENGINE_IDX_TEGRA 83
+#define MC_ENGINE_IDX_GR 84
+#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR
+#define MC_ENGINE_IDX_GR1 85
+#define MC_ENGINE_IDX_GR2 86
+#define MC_ENGINE_IDX_GR3 87
+#define MC_ENGINE_IDX_GR4 88
+#define MC_ENGINE_IDX_GR5 89
+#define MC_ENGINE_IDX_GR6 90
+#define MC_ENGINE_IDX_GR7 91
+#define MC_ENGINE_IDX_ESCHED 92
+#define MC_ENGINE_IDX_ESCHED__SIZE 64
+#define MC_ENGINE_IDX_GR_FECS_LOG 156
+#define MC_ENGINE_IDX_GR0_FECS_LOG MC_ENGINE_IDX_GR_FECS_LOG
+#define MC_ENGINE_IDX_GR1_FECS_LOG 157
+#define MC_ENGINE_IDX_GR2_FECS_LOG 158
+#define MC_ENGINE_IDX_GR3_FECS_LOG 159
+#define MC_ENGINE_IDX_GR4_FECS_LOG 160
+#define MC_ENGINE_IDX_GR5_FECS_LOG 161
+#define MC_ENGINE_IDX_GR6_FECS_LOG 162
+#define MC_ENGINE_IDX_GR7_FECS_LOG 163
+#define MC_ENGINE_IDX_TMR_SWRL 164
+#define MC_ENGINE_IDX_DISP_GSP 165
+#define MC_ENGINE_IDX_REPLAYABLE_FAULT_CPU 166
+#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_CPU 167
+#define MC_ENGINE_IDX_PXUC 168
+#define MC_ENGINE_IDX_SYSLTC 169
+#define MC_ENGINE_IDX_LRCC 170
+#define MC_ENGINE_IDX_GSPLITE 171
+#define MC_ENGINE_IDX_GSPLITE0 MC_ENGINE_IDX_GSPLITE
+#define MC_ENGINE_IDX_GSPLITE1 172
+#define MC_ENGINE_IDX_GSPLITE2 173
+#define MC_ENGINE_IDX_GSPLITE3 174
+#define MC_ENGINE_IDX_GSPLITE_MAX MC_ENGINE_IDX_GSPLITE3
+#define MC_ENGINE_IDX_DPAUX 175
+#define MC_ENGINE_IDX_DISP_LOW 176
+#define MC_ENGINE_IDX_MAX 177
+
+typedef enum
+{
+ RM_ENGINE_TYPE_NULL = (0x00000000),
+ RM_ENGINE_TYPE_GR0 = (0x00000001),
+ RM_ENGINE_TYPE_GR1 = (0x00000002),
+ RM_ENGINE_TYPE_GR2 = (0x00000003),
+ RM_ENGINE_TYPE_GR3 = (0x00000004),
+ RM_ENGINE_TYPE_GR4 = (0x00000005),
+ RM_ENGINE_TYPE_GR5 = (0x00000006),
+ RM_ENGINE_TYPE_GR6 = (0x00000007),
+ RM_ENGINE_TYPE_GR7 = (0x00000008),
+ RM_ENGINE_TYPE_COPY0 = (0x00000009),
+ RM_ENGINE_TYPE_COPY1 = (0x0000000a),
+ RM_ENGINE_TYPE_COPY2 = (0x0000000b),
+ RM_ENGINE_TYPE_COPY3 = (0x0000000c),
+ RM_ENGINE_TYPE_COPY4 = (0x0000000d),
+ RM_ENGINE_TYPE_COPY5 = (0x0000000e),
+ RM_ENGINE_TYPE_COPY6 = (0x0000000f),
+ RM_ENGINE_TYPE_COPY7 = (0x00000010),
+ RM_ENGINE_TYPE_COPY8 = (0x00000011),
+ RM_ENGINE_TYPE_COPY9 = (0x00000012),
+ RM_ENGINE_TYPE_COPY10 = (0x00000013),
+ RM_ENGINE_TYPE_COPY11 = (0x00000014),
+ RM_ENGINE_TYPE_COPY12 = (0x00000015),
+ RM_ENGINE_TYPE_COPY13 = (0x00000016),
+ RM_ENGINE_TYPE_COPY14 = (0x00000017),
+ RM_ENGINE_TYPE_COPY15 = (0x00000018),
+ RM_ENGINE_TYPE_COPY16 = (0x00000019),
+ RM_ENGINE_TYPE_COPY17 = (0x0000001a),
+ RM_ENGINE_TYPE_COPY18 = (0x0000001b),
+ RM_ENGINE_TYPE_COPY19 = (0x0000001c),
+ RM_ENGINE_TYPE_NVDEC0 = (0x0000001d),
+ RM_ENGINE_TYPE_NVDEC1 = (0x0000001e),
+ RM_ENGINE_TYPE_NVDEC2 = (0x0000001f),
+ RM_ENGINE_TYPE_NVDEC3 = (0x00000020),
+ RM_ENGINE_TYPE_NVDEC4 = (0x00000021),
+ RM_ENGINE_TYPE_NVDEC5 = (0x00000022),
+ RM_ENGINE_TYPE_NVDEC6 = (0x00000023),
+ RM_ENGINE_TYPE_NVDEC7 = (0x00000024),
+ RM_ENGINE_TYPE_NVENC0 = (0x00000025),
+ RM_ENGINE_TYPE_NVENC1 = (0x00000026),
+ RM_ENGINE_TYPE_NVENC2 = (0x00000027),
+ // Bug 4175886 - Use this new value for all chips once GB20X is released
+ RM_ENGINE_TYPE_NVENC3 = (0x00000028),
+ RM_ENGINE_TYPE_VP = (0x00000029),
+ RM_ENGINE_TYPE_ME = (0x0000002a),
+ RM_ENGINE_TYPE_PPP = (0x0000002b),
+ RM_ENGINE_TYPE_MPEG = (0x0000002c),
+ RM_ENGINE_TYPE_SW = (0x0000002d),
+ RM_ENGINE_TYPE_TSEC = (0x0000002e),
+ RM_ENGINE_TYPE_VIC = (0x0000002f),
+ RM_ENGINE_TYPE_MP = (0x00000030),
+ RM_ENGINE_TYPE_SEC2 = (0x00000031),
+ RM_ENGINE_TYPE_HOST = (0x00000032),
+ RM_ENGINE_TYPE_DPU = (0x00000033),
+ RM_ENGINE_TYPE_PMU = (0x00000034),
+ RM_ENGINE_TYPE_FBFLCN = (0x00000035),
+ RM_ENGINE_TYPE_NVJPEG0 = (0x00000036),
+ RM_ENGINE_TYPE_NVJPEG1 = (0x00000037),
+ RM_ENGINE_TYPE_NVJPEG2 = (0x00000038),
+ RM_ENGINE_TYPE_NVJPEG3 = (0x00000039),
+ RM_ENGINE_TYPE_NVJPEG4 = (0x0000003a),
+ RM_ENGINE_TYPE_NVJPEG5 = (0x0000003b),
+ RM_ENGINE_TYPE_NVJPEG6 = (0x0000003c),
+ RM_ENGINE_TYPE_NVJPEG7 = (0x0000003d),
+ RM_ENGINE_TYPE_OFA0 = (0x0000003e),
+ RM_ENGINE_TYPE_OFA1 = (0x0000003f),
+ RM_ENGINE_TYPE_RESERVED40 = (0x00000040),
+ RM_ENGINE_TYPE_RESERVED41 = (0x00000041),
+ RM_ENGINE_TYPE_RESERVED42 = (0x00000042),
+ RM_ENGINE_TYPE_RESERVED43 = (0x00000043),
+ RM_ENGINE_TYPE_RESERVED44 = (0x00000044),
+ RM_ENGINE_TYPE_RESERVED45 = (0x00000045),
+ RM_ENGINE_TYPE_RESERVED46 = (0x00000046),
+ RM_ENGINE_TYPE_RESERVED47 = (0x00000047),
+ RM_ENGINE_TYPE_RESERVED48 = (0x00000048),
+ RM_ENGINE_TYPE_RESERVED49 = (0x00000049),
+ RM_ENGINE_TYPE_RESERVED4a = (0x0000004a),
+ RM_ENGINE_TYPE_RESERVED4b = (0x0000004b),
+ RM_ENGINE_TYPE_RESERVED4c = (0x0000004c),
+ RM_ENGINE_TYPE_RESERVED4d = (0x0000004d),
+ RM_ENGINE_TYPE_RESERVED4e = (0x0000004e),
+ RM_ENGINE_TYPE_RESERVED4f = (0x0000004f),
+ RM_ENGINE_TYPE_RESERVED50 = (0x00000050),
+ RM_ENGINE_TYPE_RESERVED51 = (0x00000051),
+ RM_ENGINE_TYPE_RESERVED52 = (0x00000052),
+ RM_ENGINE_TYPE_RESERVED53 = (0x00000053),
+ RM_ENGINE_TYPE_LAST = (0x00000054),
+} RM_ENGINE_TYPE;
+
+#define NV2080_ENGINE_TYPE_NULL (0x00000000)
+#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001)
+#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS
+#define NV2080_ENGINE_TYPE_GR1 (0x00000002)
+#define NV2080_ENGINE_TYPE_GR2 (0x00000003)
+#define NV2080_ENGINE_TYPE_GR3 (0x00000004)
+#define NV2080_ENGINE_TYPE_GR4 (0x00000005)
+#define NV2080_ENGINE_TYPE_GR5 (0x00000006)
+#define NV2080_ENGINE_TYPE_GR6 (0x00000007)
+#define NV2080_ENGINE_TYPE_GR7 (0x00000008)
+#define NV2080_ENGINE_TYPE_COPY0 (0x00000009)
+#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a)
+#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b)
+#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c)
+#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d)
+#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e)
+#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f)
+#define NV2080_ENGINE_TYPE_COPY7 (0x00000010)
+#define NV2080_ENGINE_TYPE_COPY8 (0x00000011)
+#define NV2080_ENGINE_TYPE_COPY9 (0x00000012)
+#define NV2080_ENGINE_TYPE_BSP (0x00000013)
+#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP
+#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014)
+#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015)
+#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016)
+#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017)
+#define NV2080_ENGINE_TYPE_NVDEC5 (0x00000018)
+#define NV2080_ENGINE_TYPE_NVDEC6 (0x00000019)
+#define NV2080_ENGINE_TYPE_NVDEC7 (0x0000001a)
+#define NV2080_ENGINE_TYPE_MSENC (0x0000001b)
+#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */
+#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c)
+#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d)
+#define NV2080_ENGINE_TYPE_VP (0x0000001e)
+#define NV2080_ENGINE_TYPE_ME (0x0000001f)
+#define NV2080_ENGINE_TYPE_PPP (0x00000020)
+#define NV2080_ENGINE_TYPE_MPEG (0x00000021)
+#define NV2080_ENGINE_TYPE_SW (0x00000022)
+#define NV2080_ENGINE_TYPE_CIPHER (0x00000023)
+#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER
+#define NV2080_ENGINE_TYPE_VIC (0x00000024)
+#define NV2080_ENGINE_TYPE_MP (0x00000025)
+#define NV2080_ENGINE_TYPE_SEC2 (0x00000026)
+#define NV2080_ENGINE_TYPE_HOST (0x00000027)
+#define NV2080_ENGINE_TYPE_DPU (0x00000028)
+#define NV2080_ENGINE_TYPE_PMU (0x00000029)
+#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a)
+#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b)
+#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG
+#define NV2080_ENGINE_TYPE_NVJPEG1 (0x0000002c)
+#define NV2080_ENGINE_TYPE_NVJPEG2 (0x0000002d)
+#define NV2080_ENGINE_TYPE_NVJPEG3 (0x0000002e)
+#define NV2080_ENGINE_TYPE_NVJPEG4 (0x0000002f)
+#define NV2080_ENGINE_TYPE_NVJPEG5 (0x00000030)
+#define NV2080_ENGINE_TYPE_NVJPEG6 (0x00000031)
+#define NV2080_ENGINE_TYPE_NVJPEG7 (0x00000032)
+#define NV2080_ENGINE_TYPE_OFA (0x00000033)
+#define NV2080_ENGINE_TYPE_OFA0 NV2080_ENGINE_TYPE_OFA
+#define NV2080_ENGINE_TYPE_COPY10 (0x00000034)
+#define NV2080_ENGINE_TYPE_COPY11 (0x00000035)
+#define NV2080_ENGINE_TYPE_COPY12 (0x00000036)
+#define NV2080_ENGINE_TYPE_COPY13 (0x00000037)
+#define NV2080_ENGINE_TYPE_COPY14 (0x00000038)
+#define NV2080_ENGINE_TYPE_COPY15 (0x00000039)
+#define NV2080_ENGINE_TYPE_COPY16 (0x0000003a)
+#define NV2080_ENGINE_TYPE_COPY17 (0x0000003b)
+#define NV2080_ENGINE_TYPE_COPY18 (0x0000003c)
+#define NV2080_ENGINE_TYPE_COPY19 (0x0000003d)
+#define NV2080_ENGINE_TYPE_OFA1 (0x0000003e)
+#define NV2080_ENGINE_TYPE_NVENC3 (0x0000003f)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY0 (0x00000040)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY1 (0x00000041)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY2 (0x00000042)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY3 (0x00000043)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY4 (0x00000044)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY5 (0x00000045)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY6 (0x00000046)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY7 (0x00000047)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY8 (0x00000048)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY9 (0x00000049)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY10 (0x0000004a)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY11 (0x0000004b)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY12 (0x0000004c)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY13 (0x0000004d)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY14 (0x0000004e)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY15 (0x0000004f)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY16 (0x00000050)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY17 (0x00000051)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY18 (0x00000052)
+#define NV2080_ENGINE_TYPE_COMP_DECOMP_COPY19 (0x00000053)
+#define NV2080_ENGINE_TYPE_LAST (0x00000054)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h
new file mode 100644
index 000000000000..8af432375f7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fbsr.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FBSR_H__
+#define __NVRM_FBSR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
+ NvHandle hClient;
+ NvHandle hSysMem;
+ NvBool bEnteringGcoffState;
+ NV_DECLARE_ALIGNED(NvU64 sysmemAddrOfSuspendResumeData, 8);
+} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h
index 7157c7757698..2b002ca64e0f 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/fifo.h
@@ -1,31 +1,14 @@
-#ifndef __src_common_sdk_nvidia_inc_alloc_alloc_channel_h__
-#define __src_common_sdk_nvidia_inc_alloc_alloc_channel_h__
-#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h>
-
-/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_FIFO_H__
+#define __NVRM_FIFO_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV_MAX_SUBDEVICES 8
typedef struct NV_MEMORY_DESC_PARAMS {
NV_DECLARE_ALIGNED(NvU64 base, 8);
@@ -34,137 +17,197 @@ typedef struct NV_MEMORY_DESC_PARAMS {
NvU32 cacheAttrib;
} NV_MEMORY_DESC_PARAMS;
+#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U
+
+#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
+
+typedef struct NV_CHANNEL_ALLOC_PARAMS {
+
+ NvHandle hObjectError; // error context DMA
+ NvHandle hObjectBuffer; // no longer used
+ NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO
+ NvU32 gpFifoEntries; // number of GP FIFO entries
+
+ NvU32 flags;
+
+
+ NvHandle hContextShare; // context share handle
+ NvHandle hVASpace; // VASpace for the channel
+
+ // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
+ NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
+
+ // offset to beginning of UserD within hUserdMemory[x]
+ NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
+
+ // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
+ NvU32 engineType;
+ // Channel identifier that is unique for the duration of a RM session
+ NvU32 cid;
+ // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
+ NvU32 subDeviceId;
+ NvHandle hObjectEccError; // ECC error context DMA
+
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
+
+ NvHandle hPhysChannelGroup; // reserved
+ NvU32 internalFlags; // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
+ NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
+ NvU32 ProcessID; // reserved
+ NvU32 SubProcessID; // reserved
+
+ // IV used for CPU-side encryption / GPU-side decryption.
+ NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // IV used for CPU-side decryption / GPU-side encryption.
+ NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
+ // Nonce used CPU-side signing / GPU-side signature verification.
+ NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved
+ NvU32 tpcConfigID; // TPC Configuration Id as supported by DTD-PG Feature
+} NV_CHANNEL_ALLOC_PARAMS;
+
+typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
+
#define NVOS04_FLAGS_CHANNEL_TYPE 1:0
#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000
#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE
#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE
-
#define NVOS04_FLAGS_VPR 2:2
#define NVOS04_FLAGS_VPR_FALSE 0x00000000
#define NVOS04_FLAGS_VPR_TRUE 0x00000001
-
#define NVOS04_FLAGS_CC_SECURE 2:2
#define NVOS04_FLAGS_CC_SECURE_FALSE 0x00000000
#define NVOS04_FLAGS_CC_SECURE_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3
#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001
-
#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4
#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000
#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001
-
#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5
#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000
#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001
-
#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6
#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000
#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7
#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8
-
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12
-
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22
#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23
#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24
#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001
-
#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25
#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000
#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26
#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001
-
#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27
#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000
#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001
-
#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28
#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000
#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001
#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002
-
#define NVOS04_FLAGS_MAP_CHANNEL 30:30
#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000
#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001
-
#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31
#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000
#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001
-#define CC_CHAN_ALLOC_IV_SIZE_DWORD 3U
-#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
-
-typedef struct NV_CHANNEL_ALLOC_PARAMS {
-
- NvHandle hObjectError; // error context DMA
- NvHandle hObjectBuffer; // no longer used
- NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); // offset to beginning of GP FIFO
- NvU32 gpFifoEntries; // number of GP FIFO entries
-
- NvU32 flags;
-
-
- NvHandle hContextShare; // context share handle
- NvHandle hVASpace; // VASpace for the channel
-
- // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
- NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
-
- // offset to beginning of UserD within hUserdMemory[x]
- NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
-
- // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
- NvU32 engineType;
- // Channel identifier that is unique for the duration of a RM session
- NvU32 cid;
- // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
- NvU32 subDeviceId;
- NvHandle hObjectEccError; // ECC error context DMA
-
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
-
- NvHandle hPhysChannelGroup; // reserved
- NvU32 internalFlags; // reserved
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
- NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
- NvU32 ProcessID; // reserved
- NvU32 SubProcessID; // reserved
- // IV used for CPU-side encryption / GPU-side decryption.
- NvU32 encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
- // IV used for CPU-side decryption / GPU-side encryption.
- NvU32 decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD]; // reserved
- // Nonce used CPU-side signing / GPU-side signature verification.
- NvU32 hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD]; // reserved
-} NV_CHANNEL_ALLOC_PARAMS;
-
-typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
-
+typedef enum {
+ /*!
+ * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
+ * kernel CPU-RM clients.
+ */
+ ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
+ /*! @brief Error notifier is explicitly not set.
+ *
+ * The corresponding hErrorContext or hEccErrorContext must be
+ * NV01_NULL_OBJECT.
+ */
+ ERROR_NOTIFIER_TYPE_NONE,
+ /*! @brief Error notifier is a ContextDma */
+ ERROR_NOTIFIER_TYPE_CTXDMA,
+ /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
+ ERROR_NOTIFIER_TYPE_MEMORY
+} ErrorNotifierType;
+
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED 6:6
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED_NO 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_GSP_OWNED_YES 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED 7:7
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED_NO 0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_UVM_OWNED_YES 0x1
+
+typedef struct rpc_rc_triggered_v17_02
+{
+ NvU32 nv2080EngineType;
+ NvU32 chid;
+ NvU32 gfid;
+ NvU32 exceptLevel;
+ NvU32 exceptType;
+ NvU32 scope;
+ NvU16 partitionAttributionId;
+ NvU32 mmuFaultAddrLo;
+ NvU32 mmuFaultAddrHi;
+ NvU32 mmuFaultType;
+ NvBool bCallbackNeeded;
+ NvU32 rcJournalBufferSize;
+ NvU8 rcJournalBuffer[];
+} rpc_rc_triggered_v17_02;
+
+#define NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS 0x40
+
+typedef struct NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO {
+ NvU32 engDesc;
+ NvU32 ctxAttr;
+ NvU32 ctxBufferSize;
+ NvU32 addrSpaceList;
+ NvU32 registerBase;
+} NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO;
+
+#define NV2080_CTRL_CMD_GPU_GET_CONSTRUCTED_FALCON_INFO (0x208001b0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
+ NvU32 numConstructedFalcons;
+ NV2080_CTRL_GPU_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_GPU_MAX_CONSTRUCTED_FALCONS];
+} NV2080_CTRL_GPU_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS {
+ NvBool bDisableActiveChannels;
+} NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS_MESSAGE_ID" */
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h
new file mode 100644
index 000000000000..feed1dabd9d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gr.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GR_H__
+#define __NVRM_GR_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8
+
+#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x1a
+
+typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
+ NvU32 size;
+ NvU32 alignment;
+} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
+ NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
+} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
+ NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
+} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
+
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD (0x00000001)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO (0x00000002)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG (0x00000003)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE (0x00000004)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY (0x00000005)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION (0x00000006)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS (0x00000007)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL (0x00000008)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM (0x00000009)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT (0x0000000a)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT (0x0000000b)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL (0x0000000c)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL (0x0000000d)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB (0x0000000e)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV (0x0000000f)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH (0x00000010)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB (0x00000011)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB (0x00000013)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL (0x00000014)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL (0x00000015)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK (0x00000016)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT (0x00000017)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SETUP (0x00000019)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x0000001a)
+
+#define NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO (0x20800137U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
+ NvU32 gpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
+
+#define NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO (0x20800138U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS_MESSAGE_ID" */
+typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
+ NvU32 gpcId;
+ NvU32 tpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
+
+#define KGRAPHICS_SCRUBBER_HANDLE_VAS 0xdada0042
+#define KGRAPHICS_SCRUBBER_HANDLE_CHANNEL (KGRAPHICS_SCRUBBER_HANDLE_VAS + 3)
+#define KGRAPHICS_SCRUBBER_HANDLE_3DOBJ (KGRAPHICS_SCRUBBER_HANDLE_VAS + 4)
+
+typedef struct NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS {
+ NvBool bTeardown;
+} NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR (0x20800a46) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_KGR_INIT_BUG4208224_WAR_PARAMS_MESSAGE_ID" */
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h
new file mode 100644
index 000000000000..b6075021e74f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/gsp.h
@@ -0,0 +1,634 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_GSP_H__
+#define __NVRM_GSP_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17U
+
+typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
+ NV_DECLARE_ALIGNED(NvU64 base, 8);
+ NV_DECLARE_ALIGNED(NvU64 limit, 8);
+ NV_DECLARE_ALIGNED(NvU64 reserved, 8);
+ NvU32 performance;
+ NvBool supportCompressed;
+ NvBool supportISO;
+ NvBool bProtected;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
+ NvU32 numFBRegions;
+ NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+
+#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23
+
+#define NV2080_GPU_MAX_GID_LENGTH (0x000000100ULL)
+
+typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
+ NvU32 index;
+ NvU32 flags;
+ NvU32 length;
+ NvU8 data[NV2080_GPU_MAX_GID_LENGTH];
+} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
+ NvU32 BoardID;
+ char chipSKU[9];
+ char chipSKUMod[5];
+ NvU32 skuConfigVersion;
+ char project[5];
+ char projectSKU[5];
+ char CDP[6];
+ char projectSKUMod[2];
+ NvU32 businessCycle;
+} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
+
+#define MAX_GPC_COUNT 32
+
+typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
+ NvU32 totalVFs;
+ NvU32 firstVfOffset;
+ NvU32 vfFeatureMask;
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
+ NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+ NvBool bSriovEnabled;
+ NvBool bSriovHeavyEnabled;
+ NvBool bEmulateVFBar0TlbInvalidationRegister;
+ NvBool bClientRmAllocatedCtxBuffer;
+ NvBool bNonPowerOf2ChannelCountSupported;
+ NvBool bVfResizableBAR1Supported;
+} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
+
+#include "engine.h"
+
+#define NVGPU_ENGINE_CAPS_MASK_BITS 32
+
+#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
+
+#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040U)
+
+typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS
+{
+ NvU32 numHeads;
+ NvU32 maxNumHeads;
+} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
+
+typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS
+{
+ NvU32 headIndex;
+ NvU32 maxHResolution;
+ NvU32 maxVResolution;
+} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
+
+#define MAX_GROUP_COUNT 2
+
+typedef struct
+{
+ NvU32 ecidLow;
+ NvU32 ecidHigh;
+ NvU32 ecidExtended;
+} EcidManufacturingInfo;
+
+typedef struct
+{
+ NvU64 nonWprHeapOffset;
+ NvU64 frtsOffset;
+} FW_WPR_LAYOUT_OFFSET;
+
+typedef struct GspStaticConfigInfo_t
+{
+ NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
+ NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
+ NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
+ NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
+
+ NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
+ NvU32 sriovMaxGfid;
+
+ NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
+
+ NvBool poisonFuseEnabled;
+
+ NvU64 fb_length;
+ NvU64 fbio_mask;
+ NvU32 fb_bus_width;
+ NvU32 fb_ram_type;
+ NvU64 fbp_mask;
+ NvU32 l2_cache_size;
+
+ NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+ NvBool bGpuInternalSku;
+ NvBool bIsQuadroGeneric;
+ NvBool bIsQuadroAd;
+ NvBool bIsNvidiaNvs;
+ NvBool bIsVgx;
+ NvBool bGeforceSmb;
+ NvBool bIsTitan;
+ NvBool bIsTesla;
+ NvBool bIsMobile;
+ NvBool bIsGc6Rtd3Allowed;
+ NvBool bIsGc8Rtd3Allowed;
+ NvBool bIsGcOffRtd3Allowed;
+ NvBool bIsGcoffLegacyAllowed;
+ NvBool bIsMigSupported;
+
+ /* "Total Board Power" refers to power requirement of GPU,
+ * while in GC6 state. Majority of this power will be used
+ * to keep V-RAM active to preserve its content.
+ * Some energy maybe consumed by Always-on components on GPU chip.
+ * This power will be provided by 3.3v voltage rail.
+ */
+ NvU16 RTD3GC6TotalBoardPower;
+
+ /* PERST# (i.e. PCI Express Reset) is a sideband signal
+ * generated by the PCIe Host to indicate the PCIe devices,
+ * that the power-rails and the reference-clock are stable.
+ * The endpoint device typically uses this signal as a global reset.
+ */
+ NvU16 RTD3GC6PerstDelay;
+
+ NvU64 bar1PdeBase;
+ NvU64 bar2PdeBase;
+
+ NvBool bVbiosValid;
+ NvU32 vbiosSubVendor;
+ NvU32 vbiosSubDevice;
+
+ NvBool bPageRetirementSupported;
+
+ NvBool bSplitVasBetweenServerClientRm;
+
+ NvBool bClRootportNeedsNosnoopWAR;
+
+ VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
+ VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
+ NvU64 displaylessMaxPixels;
+
+ // Client handle for internal RMAPI control.
+ NvHandle hInternalClient;
+
+ // Device handle for internal RMAPI control.
+ NvHandle hInternalDevice;
+
+ // Subdevice handle for internal RMAPI control.
+ NvHandle hInternalSubdevice;
+
+ NvBool bSelfHostedMode;
+ NvBool bAtsSupported;
+
+ NvBool bIsGpuUefi;
+ NvBool bIsEfiInit;
+
+ EcidManufacturingInfo ecidInfo[MAX_GROUP_COUNT];
+
+ FW_WPR_LAYOUT_OFFSET fwWprLayoutOffset;
+} GspStaticConfigInfo;
+
+typedef struct
+{
+ NvU16 deviceID; // deviceID
+ NvU16 vendorID; // vendorID
+ NvU16 subdeviceID; // subsystem deviceID
+ NvU16 subvendorID; // subsystem vendorID
+ NvU8 revisionID; // revision ID
+} BUSINFO;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct DOD_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 acpiIdListLen;
+ NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} DOD_METHOD_DATA;
+
+typedef struct JT_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 jtCaps;
+ NvU16 jtRevId;
+ NvBool bSBIOSCaps;
+} JT_METHOD_DATA;
+
+typedef struct MUX_METHOD_DATA_ELEMENT
+{
+ NvU32 acpiId;
+ NvU32 mode;
+ NV_STATUS status;
+} MUX_METHOD_DATA_ELEMENT;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
+
+typedef struct MUX_METHOD_DATA
+{
+ NvU32 tableLen;
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+ MUX_METHOD_DATA_ELEMENT acpiIdMuxStateTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} MUX_METHOD_DATA;
+
+typedef struct CAPS_METHOD_DATA
+{
+ NV_STATUS status;
+ NvU32 optimusCaps;
+} CAPS_METHOD_DATA;
+
+typedef struct ACPI_METHOD_DATA
+{
+ NvBool bValid;
+ DOD_METHOD_DATA dodMethodData;
+ JT_METHOD_DATA jtMethodData;
+ MUX_METHOD_DATA muxMethodData;
+ CAPS_METHOD_DATA capsMethodData;
+} ACPI_METHOD_DATA;
+
+typedef struct GSP_VF_INFO
+{
+ NvU32 totalVFs;
+ NvU32 firstVFOffset;
+ NvU64 FirstVFBar0Address;
+ NvU64 FirstVFBar1Address;
+ NvU64 FirstVFBar2Address;
+ NvBool b64bitBar0;
+ NvBool b64bitBar1;
+ NvBool b64bitBar2;
+} GSP_VF_INFO;
+
+typedef struct
+{
+ // Link capabilities
+ NvU32 linkCap;
+} GSP_PCIE_CONFIG_REG;
+
+typedef struct GspSystemInfo
+{
+ NvU64 gpuPhysAddr;
+ NvU64 gpuPhysFbAddr;
+ NvU64 gpuPhysInstAddr;
+ NvU64 gpuPhysIoAddr;
+ NvU64 nvDomainBusDeviceFunc;
+ NvU64 simAccessBufPhysAddr;
+ NvU64 notifyOpSharedSurfacePhysAddr;
+ NvU64 pcieAtomicsOpMask;
+ NvU64 consoleMemSize;
+ NvU64 maxUserVa;
+ NvU32 pciConfigMirrorBase;
+ NvU32 pciConfigMirrorSize;
+ NvU32 PCIDeviceID;
+ NvU32 PCISubDeviceID;
+ NvU32 PCIRevisionID;
+ NvU32 pcieAtomicsCplDeviceCapMask;
+ NvU8 oorArch;
+ NvU64 clPdbProperties;
+ NvU32 Chipset;
+ NvBool bGpuBehindBridge;
+ NvBool bFlrSupported;
+ NvBool b64bBar0Supported;
+ NvBool bMnocAvailable;
+ NvU32 chipsetL1ssEnable;
+ NvBool bUpstreamL0sUnsupported;
+ NvBool bUpstreamL1Unsupported;
+ NvBool bUpstreamL1PorSupported;
+ NvBool bUpstreamL1PorMobileOnly;
+ NvBool bSystemHasMux;
+ NvU8 upstreamAddressValid;
+ BUSINFO FHBBusInfo;
+ BUSINFO chipsetIDInfo;
+ ACPI_METHOD_DATA acpiMethodData;
+ NvU32 hypervisorType;
+ NvBool bIsPassthru;
+ NvU64 sysTimerOffsetNs;
+ GSP_VF_INFO gspVFInfo;
+ NvBool bIsPrimary;
+ NvBool isGridBuild;
+ GSP_PCIE_CONFIG_REG pcieConfigReg;
+ NvU32 gridBuildCsp;
+ NvBool bPreserveVideoMemoryAllocations;
+ NvBool bTdrEventSupported;
+ NvBool bFeatureStretchVblankCapable;
+ NvBool bEnableDynamicGranularityPageArrays;
+ NvBool bClockBoostSupported;
+ NvBool bRouteDispIntrsToCPU;
+ NvU64 hostPageSize;
+} GspSystemInfo;
+
+typedef struct rpc_os_error_log_v17_00
+{
+ NvU32 exceptType;
+ NvU32 runlistId;
+ NvU32 chid;
+ char errString[0x100];
+ NvU32 preemptiveRemovalPreviousXid;
+} rpc_os_error_log_v17_00;
+
+typedef struct
+{
+ // Magic
+ // BL to use for verification (i.e. Booter locked it in WPR2)
+ NvU64 magic; // = 0xdc3aae21371a60b3;
+
+ // Revision number of Booter-BL-Sequencer handoff interface
+ // Bumped up when we change this interface so it is not backward compatible.
+ // Bumped up when we revoke GSP-RM ucode
+ NvU64 revision; // = 1;
+
+ // ---- Members regarding data in SYSMEM ----------------------------
+ // Consumed by Booter for DMA
+
+ NvU64 sysmemAddrOfRadix3Elf;
+ NvU64 sizeOfRadix3Elf;
+
+ NvU64 sysmemAddrOfBootloader;
+ NvU64 sizeOfBootloader;
+
+ // Offsets inside bootloader image needed by Booter
+ NvU64 bootloaderCodeOffset;
+ NvU64 bootloaderDataOffset;
+ NvU64 bootloaderManifestOffset;
+
+ union
+ {
+ // Used only at initial boot
+ struct
+ {
+ NvU64 sysmemAddrOfSignature;
+ NvU64 sizeOfSignature;
+ };
+
+ //
+ // Used at suspend/resume to read GspFwHeapFreeList
+ // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
+ //
+ struct
+ {
+ NvU32 gspFwHeapFreeListWprOffset;
+ NvU32 unused0;
+ NvU64 unused1;
+ };
+ };
+
+ // ---- Members describing FB layout --------------------------------
+ NvU64 gspFwRsvdStart;
+
+ NvU64 nonWprHeapOffset;
+ NvU64 nonWprHeapSize;
+
+ NvU64 gspFwWprStart;
+
+ // GSP-RM to use to setup heap.
+ NvU64 gspFwHeapOffset;
+ NvU64 gspFwHeapSize;
+
+ // BL to use to find ELF for jump
+ NvU64 gspFwOffset;
+ // Size is sizeOfRadix3Elf above.
+
+ NvU64 bootBinOffset;
+ // Size is sizeOfBootloader above.
+
+ NvU64 frtsOffset;
+ NvU64 frtsSize;
+
+ NvU64 gspFwWprEnd;
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 fbSize;
+
+ // ---- Other members -----------------------------------------------
+
+ // GSP-RM to use for fbRegionInfo?
+ NvU64 vgaWorkspaceOffset;
+ NvU64 vgaWorkspaceSize;
+
+ // Boot count. Used to determine whether to load the firmware image.
+ NvU64 bootCount;
+
+ // This union is organized the way it is to start at an 8-byte boundary and achieve natural
+ // packing of the internal struct fields.
+ union
+ {
+ struct
+ {
+ // TODO: the partitionRpc* fields below do not really belong in this
+ // structure. The values are patched in by the partition bootstrapper
+ // when GSP-RM is booted in a partition, and this structure was a
+ // convenient place for the bootstrapper to access them. These should
+ // be moved to a different comm. mechanism between the bootstrapper
+ // and the GSP-RM tasks.
+
+ // Shared partition RPC memory (physical address)
+ NvU64 partitionRpcAddr;
+
+ // Offsets relative to partitionRpcAddr
+ NvU16 partitionRpcRequestOffset;
+ NvU16 partitionRpcReplyOffset;
+
+ // Code section and dataSection offset and size.
+ NvU32 elfCodeOffset;
+ NvU32 elfDataOffset;
+ NvU32 elfCodeSize;
+ NvU32 elfDataSize;
+
+ // Used during GSP-RM resume to check for revocation
+ NvU32 lsUcodeVersion;
+ };
+
+ struct
+ {
+ // Pad for the partitionRpc* fields, plus 4 bytes
+ NvU32 partitionRpcPadding[4];
+
+ // CrashCat (contiguous) buffer size/location - occupies same bytes as the
+ // elf(Code|Data)(Offset|Size) fields above.
+ // TODO: move to GSP_FMC_INIT_PARAMS
+ NvU64 sysmemAddrOfCrashReportQueue;
+ NvU32 sizeOfCrashReportQueue;
+
+ // Pad for the lsUcodeVersion field
+ NvU32 lsUcodeVersionPadding[1];
+ };
+ };
+
+ // Number of VF partitions allocating sub-heaps from the WPR heap
+ // Used during boot to ensure the heap is adequately sized
+ NvU8 gspFwHeapVfPartitionCount;
+
+ // Flags to help decide GSP-FW flow.
+ NvU8 flags;
+
+ // Pad structure to exactly 256 bytes. Can replace padding with additional
+ // fields without incrementing revision. Padding initialized to 0.
+ NvU8 padding[2];
+
+ //
+ // Starts at gspFwWprEnd+frtsSize b/c FRTS is positioned
+ // to end where this allocation starts (when RM requests FSP to create
+ // FRTS).
+ //
+ NvU32 pmuReservedSize;
+
+ // BL to use for verification (i.e. Booter says OK to boot)
+ NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
+} GspFwWprMeta;
+
+#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
+
+#define GSP_FW_WPR_META_REVISION 1
+
+typedef struct {
+ NvU64 sharedMemPhysAddr;
+ NvU32 pageTableEntryCount;
+ NvLength cmdQueueOffset;
+ NvLength statQueueOffset;
+} MESSAGE_QUEUE_INIT_ARGUMENTS;
+
+typedef struct {
+ NvU32 oldLevel;
+ NvU32 flags;
+ NvBool bInPMTransition;
+} GSP_SR_INIT_ARGUMENTS;
+
+typedef struct
+{
+ MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
+ GSP_SR_INIT_ARGUMENTS srInitArguments;
+ NvU32 gpuInstance;
+ NvBool bDmemStack;
+
+ struct
+ {
+ NvU64 pa;
+ NvU64 size;
+ } profilerArgs;
+} GSP_ARGUMENTS_CACHED;
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U)
+
+typedef struct
+{
+ // Magic for verification by secure ucode
+ NvU64 magic; // = GSP_FW_SR_META_MAGIC;
+
+ //
+ // Revision number
+ // Bumped up when we change this interface so it is not backward compatible.
+ //
+ NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
+
+ // Members regarding data in SYSMEM
+ NvU64 sysmemAddrOfSuspendResumeData;
+ NvU64 sizeOfSuspendResumeData;
+
+ //
+ // Internal members for use by secure ucode
+ // Must be exactly GSP_FW_SR_META_INTERNAL_SIZE bytes.
+ //
+ NvU32 internal[32];
+
+ // Same as flags of GspFwWprMeta
+ NvU32 flags;
+
+ // Subrevision number used by secure ucode
+ NvU32 subrevision;
+
+ //
+ // Pad structure to exactly 256 bytes (1 DMA chunk).
+ // Padding initialized to zero.
+ //
+ NvU32 padding[22];
+} GspFwSRMeta;
+
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2 (0 << 20) // No FB heap usage
+
+#define GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL (22 << 20)
+
+#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X (8 << 20) // Turing thru Ada
+
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB (64u)
+
+#define BULLSEYE_ROOT_HEAP_ALLOC_RM_DATA_SECTION_SIZE_DELTA (12u)
+
+#define BULLSEYE_ROOT_HEAP_ALLOC_BAREMETAL_LIBOS_HEAP_SIZE_DELTA (70u)
+
+#define GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB \
+ (88u + (BULLSEYE_ROOT_HEAP_ALLOC_RM_DATA_SECTION_SIZE_DELTA) + \
+ (BULLSEYE_ROOT_HEAP_ALLOC_BAREMETAL_LIBOS_HEAP_SIZE_DELTA))
+
+typedef struct GSP_FMC_INIT_PARAMS
+{
+ // CC initialization "registry keys"
+ NvU32 regkeys;
+} GSP_FMC_INIT_PARAMS;
+
+typedef enum {
+ GSP_DMA_TARGET_LOCAL_FB,
+ GSP_DMA_TARGET_COHERENT_SYSTEM,
+ GSP_DMA_TARGET_NONCOHERENT_SYSTEM,
+ GSP_DMA_TARGET_COUNT
+} GSP_DMA_TARGET;
+
+typedef struct GSP_ACR_BOOT_GSP_RM_PARAMS
+{
+ // Physical memory aperture through which gspRmDescPa is accessed
+ GSP_DMA_TARGET target;
+ // Size in bytes of the GSP-RM descriptor structure
+ NvU32 gspRmDescSize;
+ // Physical offset in the target aperture of the GSP-RM descriptor structure
+ NvU64 gspRmDescOffset;
+ // Physical offset in FB to set the start of the WPR containing GSP-RM
+ NvU64 wprCarveoutOffset;
+ // Size in bytes of the WPR containing GSP-RM
+ NvU32 wprCarveoutSize;
+ // Whether to boot GSP-RM or GSP-Proxy through ACR
+ NvBool bIsGspRmBoot;
+} GSP_ACR_BOOT_GSP_RM_PARAMS;
+
+typedef struct GSP_RM_PARAMS
+{
+ // Physical memory aperture through which bootArgsOffset is accessed
+ GSP_DMA_TARGET target;
+ // Physical offset in the memory aperture that will be passed to GSP-RM
+ NvU64 bootArgsOffset;
+} GSP_RM_PARAMS;
+
+typedef struct GSP_SPDM_PARAMS
+{
+ // Physical Memory Aperture through which all addresses are accessed
+ GSP_DMA_TARGET target;
+
+ // Physical offset in the memory aperture where SPDM payload is stored
+ NvU64 payloadBufferOffset;
+
+ // Size of the above payload buffer
+ NvU32 payloadBufferSize;
+} GSP_SPDM_PARAMS;
+
+typedef struct GSP_FMC_BOOT_PARAMS
+{
+ GSP_FMC_INIT_PARAMS initParams;
+ GSP_ACR_BOOT_GSP_RM_PARAMS bootGspRmParams;
+ GSP_RM_PARAMS gspRmParams;
+ GSP_SPDM_PARAMS gspSpdmParams;
+} GSP_FMC_BOOT_PARAMS;
+
+#define GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100 (14 << 20) // Hopper+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h
new file mode 100644
index 000000000000..e06643f57695
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/msgfn.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_MSGFN_H__
+#define __NVRM_MSGFN_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#ifndef E
+# define E(RPC, VAL) NV_VGPU_MSG_EVENT_##RPC = VAL,
+# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ E(FIRST_EVENT, 0x1000)
+ E(GSP_INIT_DONE, 0x1001)
+ E(GSP_RUN_CPU_SEQUENCER, 0x1002)
+ E(POST_EVENT, 0x1003)
+ E(RC_TRIGGERED, 0x1004)
+ E(MMU_FAULT_QUEUED, 0x1005)
+ E(OS_ERROR_LOG, 0x1006)
+ E(RG_LINE_INTR, 0x1007)
+ E(GPUACCT_PERFMON_UTIL_SAMPLES, 0x1008)
+ E(SIM_READ, 0x1009)
+ E(SIM_WRITE, 0x100a)
+ E(SEMAPHORE_SCHEDULE_CALLBACK, 0x100b)
+ E(UCODE_LIBOS_PRINT, 0x100c)
+ E(VGPU_GSP_PLUGIN_TRIGGERED, 0x100d)
+ E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK, 0x100e)
+ E(PERF_BRIDGELESS_INFO_UPDATE, 0x100f)
+ E(VGPU_CONFIG, 0x1010)
+ E(DISPLAY_MODESET, 0x1011)
+ E(EXTDEV_INTR_SERVICE, 0x1012)
+ E(NVLINK_INBAND_RECEIVED_DATA_256, 0x1013)
+ E(NVLINK_INBAND_RECEIVED_DATA_512, 0x1014)
+ E(NVLINK_INBAND_RECEIVED_DATA_1024, 0x1015)
+ E(NVLINK_INBAND_RECEIVED_DATA_2048, 0x1016)
+ E(NVLINK_INBAND_RECEIVED_DATA_4096, 0x1017)
+ E(TIMED_SEMAPHORE_RELEASE, 0x1018)
+ E(NVLINK_IS_GPU_DEGRADED, 0x1019)
+ E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK, 0x101a)
+ E(NVLINK_FAULT_UP, 0x101b)
+ E(GSP_LOCKDOWN_NOTICE, 0x101c)
+ E(MIG_CI_CONFIG_UPDATE, 0x101d)
+ E(UPDATE_GSP_TRACE, 0x101e)
+ E(NVLINK_FATAL_ERROR_RECOVERY, 0x101f)
+ E(GSP_POST_NOCAT_RECORD, 0x1020)
+ E(FECS_ERROR, 0x1021)
+ E(RECOVERY_ACTION, 0x1022)
+ E(NUM_EVENTS, 0x1023)
+#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef E
+# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+#endif
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h
new file mode 100644
index 000000000000..fcaef7f553a6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/ofa.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_OFA_H__
+#define __NVRM_OFA_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+typedef struct
+{
+ NvU32 size;
+ NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA?
+ NvU32 engineInstance;
+} NV_OFA_ALLOCATION_PARAMETERS;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h
new file mode 100644
index 000000000000..2d67b598c58b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/nvrm/rpcfn.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __NVRM_RPCFN_H__
+#define __NVRM_RPCFN_H__
+#include <nvrm/nvtypes.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/570.144 */
+
+#ifndef X
+# define X(UNIT, RPC, VAL) NV_VGPU_MSG_FUNCTION_##RPC = VAL,
+# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+ X(RM, NOP, 0)
+ X(RM, SET_GUEST_SYSTEM_INFO, 1)
+ X(RM, ALLOC_ROOT, 2)
+ X(RM, ALLOC_DEVICE, 3) // deprecated
+ X(RM, ALLOC_MEMORY, 4)
+ X(RM, ALLOC_CTX_DMA, 5)
+ X(RM, ALLOC_CHANNEL_DMA, 6)
+ X(RM, MAP_MEMORY, 7)
+ X(RM, BIND_CTX_DMA, 8) // deprecated
+ X(RM, ALLOC_OBJECT, 9)
+ X(RM, FREE, 10)
+ X(RM, LOG, 11)
+ X(RM, ALLOC_VIDMEM, 12)
+ X(RM, UNMAP_MEMORY, 13)
+ X(RM, MAP_MEMORY_DMA, 14)
+ X(RM, UNMAP_MEMORY_DMA, 15)
+ X(RM, GET_EDID, 16) // deprecated
+ X(RM, ALLOC_DISP_CHANNEL, 17)
+ X(RM, ALLOC_DISP_OBJECT, 18)
+ X(RM, ALLOC_SUBDEVICE, 19)
+ X(RM, ALLOC_DYNAMIC_MEMORY, 20)
+ X(RM, DUP_OBJECT, 21)
+ X(RM, IDLE_CHANNELS, 22)
+ X(RM, ALLOC_EVENT, 23)
+ X(RM, SEND_EVENT, 24) // deprecated
+ X(RM, REMAPPER_CONTROL, 25) // deprecated
+ X(RM, DMA_CONTROL, 26) // deprecated
+ X(RM, DMA_FILL_PTE_MEM, 27)
+ X(RM, MANAGE_HW_RESOURCE, 28)
+ X(RM, BIND_ARBITRARY_CTX_DMA, 29) // deprecated
+ X(RM, CREATE_FB_SEGMENT, 30)
+ X(RM, DESTROY_FB_SEGMENT, 31)
+ X(RM, ALLOC_SHARE_DEVICE, 32)
+ X(RM, DEFERRED_API_CONTROL, 33)
+ X(RM, REMOVE_DEFERRED_API, 34)
+ X(RM, SIM_ESCAPE_READ, 35)
+ X(RM, SIM_ESCAPE_WRITE, 36)
+ X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA, 37)
+ X(RM, FREE_VIDMEM_VIRT, 38)
+ X(RM, PERF_GET_PSTATE_INFO, 39) // deprecated
+ X(RM, PERF_GET_PERFMON_SAMPLE, 40)
+ X(RM, PERF_GET_VIRTUAL_PSTATE_INFO, 41) // deprecated
+ X(RM, PERF_GET_LEVEL_INFO, 42)
+ X(RM, MAP_SEMA_MEMORY, 43)
+ X(RM, UNMAP_SEMA_MEMORY, 44)
+ X(RM, SET_SURFACE_PROPERTIES, 45)
+ X(RM, CLEANUP_SURFACE, 46)
+ X(RM, UNLOADING_GUEST_DRIVER, 47)
+ X(RM, TDR_SET_TIMEOUT_STATE, 48)
+ X(RM, SWITCH_TO_VGA, 49)
+ X(RM, GPU_EXEC_REG_OPS, 50)
+ X(RM, GET_STATIC_INFO, 51)
+ X(RM, ALLOC_VIRTMEM, 52)
+ X(RM, UPDATE_PDE_2, 53)
+ X(RM, SET_PAGE_DIRECTORY, 54)
+ X(RM, GET_STATIC_PSTATE_INFO, 55)
+ X(RM, TRANSLATE_GUEST_GPU_PTES, 56)
+ X(RM, RESERVED_57, 57)
+ X(RM, RESET_CURRENT_GR_CONTEXT, 58)
+ X(RM, SET_SEMA_MEM_VALIDATION_STATE, 59)
+ X(RM, GET_ENGINE_UTILIZATION, 60)
+ X(RM, UPDATE_GPU_PDES, 61)
+ X(RM, GET_ENCODER_CAPACITY, 62)
+ X(RM, VGPU_PF_REG_READ32, 63) // deprecated
+ X(RM, SET_GUEST_SYSTEM_INFO_EXT, 64)
+ X(GSP, GET_GSP_STATIC_INFO, 65)
+ X(RM, RMFS_INIT, 66) // deprecated
+ X(RM, RMFS_CLOSE_QUEUE, 67) // deprecated
+ X(RM, RMFS_CLEANUP, 68) // deprecated
+ X(RM, RMFS_TEST, 69) // deprecated
+ X(RM, UPDATE_BAR_PDE, 70)
+ X(RM, CONTINUATION_RECORD, 71)
+ X(RM, GSP_SET_SYSTEM_INFO, 72)
+ X(RM, SET_REGISTRY, 73)
+ X(GSP, GSP_INIT_POST_OBJGPU, 74) // deprecated
+ X(RM, SUBDEV_EVENT_SET_NOTIFICATION, 75) // deprecated
+ X(GSP, GSP_RM_CONTROL, 76)
+ X(RM, GET_STATIC_INFO2, 77)
+ X(RM, DUMP_PROTOBUF_COMPONENT, 78)
+ X(RM, UNSET_PAGE_DIRECTORY, 79)
+ X(RM, GET_CONSOLIDATED_STATIC_INFO, 80) // deprecated
+ X(RM, GMMU_REGISTER_FAULT_BUFFER, 81) // deprecated
+ X(RM, GMMU_UNREGISTER_FAULT_BUFFER, 82) // deprecated
+ X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER, 83) // deprecated
+ X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER, 84) // deprecated
+ X(RM, CTRL_SET_VGPU_FB_USAGE, 85)
+ X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO, 86)
+ X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO, 87)
+ X(RM, CTRL_RESET_CHANNEL, 88)
+ X(RM, CTRL_RESET_ISOLATED_CHANNEL, 89)
+ X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT, 90)
+ X(RM, CTRL_CLK_GET_EXTENDED_INFO, 91)
+ X(RM, CTRL_PERF_BOOST, 92)
+ X(RM, CTRL_PERF_VPSTATES_GET_CONTROL, 93)
+ X(RM, CTRL_GET_ZBC_CLEAR_TABLE, 94)
+ X(RM, CTRL_SET_ZBC_COLOR_CLEAR, 95)
+ X(RM, CTRL_SET_ZBC_DEPTH_CLEAR, 96)
+ X(RM, CTRL_GPFIFO_SCHEDULE, 97)
+ X(RM, CTRL_SET_TIMESLICE, 98)
+ X(RM, CTRL_PREEMPT, 99)
+ X(RM, CTRL_FIFO_DISABLE_CHANNELS, 100)
+ X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL, 101)
+ X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL, 102)
+ X(GSP, GSP_RM_ALLOC, 103)
+ X(RM, CTRL_GET_P2P_CAPS_V2, 104)
+ X(RM, CTRL_CIPHER_AES_ENCRYPT, 105)
+ X(RM, CTRL_CIPHER_SESSION_KEY, 106)
+ X(RM, CTRL_CIPHER_SESSION_KEY_STATUS, 107)
+ X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES, 108)
+ X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES, 109)
+ X(RM, CTRL_DBG_SET_EXCEPTION_MASK, 110)
+ X(RM, CTRL_GPU_PROMOTE_CTX, 111)
+ X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND, 112)
+ X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE, 113)
+ X(RM, CTRL_GR_CTXSW_ZCULL_BIND, 114)
+ X(RM, CTRL_GPU_INITIALIZE_CTX, 115)
+ X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES, 116)
+ X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT, 117)
+ X(RM, CTRL_GET_LATEST_ECC_ADDRESSES, 118)
+ X(RM, CTRL_MC_SERVICE_INTERRUPTS, 119)
+ X(RM, CTRL_DMA_SET_DEFAULT_VASPACE, 120)
+ X(RM, CTRL_GET_CE_PCE_MASK, 121)
+ X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY, 122)
+ X(RM, CTRL_GET_NVLINK_PEER_ID_MASK, 123) // deprecated
+ X(RM, CTRL_GET_NVLINK_STATUS, 124)
+ X(RM, CTRL_GET_P2P_CAPS, 125)
+ X(RM, CTRL_GET_P2P_CAPS_MATRIX, 126)
+ X(RM, RESERVED_0, 127)
+ X(RM, CTRL_RESERVE_PM_AREA_SMPC, 128)
+ X(RM, CTRL_RESERVE_HWPM_LEGACY, 129)
+ X(RM, CTRL_B0CC_EXEC_REG_OPS, 130)
+ X(RM, CTRL_BIND_PM_RESOURCES, 131)
+ X(RM, CTRL_DBG_SUSPEND_CONTEXT, 132)
+ X(RM, CTRL_DBG_RESUME_CONTEXT, 133)
+ X(RM, CTRL_DBG_EXEC_REG_OPS, 134)
+ X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG, 135)
+ X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE, 136)
+ X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE, 137)
+ X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG, 138)
+ X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE, 139)
+ X(RM, CTRL_ALLOC_PMA_STREAM, 140)
+ X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT, 141)
+ X(RM, CTRL_FB_GET_INFO_V2, 142)
+ X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES, 143)
+ X(RM, CTRL_GR_GET_CTX_BUFFER_INFO, 144)
+ X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES, 145)
+ X(RM, CTRL_GPU_EVICT_CTX, 146)
+ X(RM, CTRL_FB_GET_FS_INFO, 147)
+ X(RM, CTRL_GRMGR_GET_GR_FS_INFO, 148)
+ X(RM, CTRL_STOP_CHANNEL, 149)
+ X(RM, CTRL_GR_PC_SAMPLING_MODE, 150)
+ X(RM, CTRL_PERF_RATED_TDP_GET_STATUS, 151)
+ X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL, 152)
+ X(RM, CTRL_FREE_PMA_STREAM, 153)
+ X(RM, CTRL_TIMER_SET_GR_TICK_FREQ, 154)
+ X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB, 155)
+ X(RM, GET_CONSOLIDATED_GR_STATIC_INFO, 156)
+ X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP, 157)
+ X(RM, CTRL_GR_GET_TPC_PARTITION_MODE, 158)
+ X(RM, CTRL_GR_SET_TPC_PARTITION_MODE, 159)
+ X(UVM, UVM_PAGING_CHANNEL_ALLOCATE, 160)
+ X(UVM, UVM_PAGING_CHANNEL_DESTROY, 161)
+ X(UVM, UVM_PAGING_CHANNEL_MAP, 162)
+ X(UVM, UVM_PAGING_CHANNEL_UNMAP, 163)
+ X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM, 164)
+ X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES, 165)
+ X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION, 166)
+ X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL, 167)
+ X(RM, DCE_RM_INIT, 168)
+ X(RM, REGISTER_VIRTUAL_EVENT_BUFFER, 169)
+ X(RM, CTRL_EVENT_BUFFER_UPDATE_GET, 170)
+ X(RM, GET_PLCABLE_ADDRESS_KIND, 171)
+ X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2, 172)
+ X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM, 173)
+ X(RM, CTRL_GET_MMU_DEBUG_MODE, 174)
+ X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS, 175)
+ X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE, 176)
+ X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO, 177)
+ X(RM, DISABLE_CHANNELS, 178)
+ X(RM, CTRL_FABRIC_MEMORY_DESCRIBE, 179)
+ X(RM, CTRL_FABRIC_MEM_STATS, 180)
+ X(RM, SAVE_HIBERNATION_DATA, 181)
+ X(RM, RESTORE_HIBERNATION_DATA, 182)
+ X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED, 183)
+ X(RM, CTRL_EXEC_PARTITIONS_CREATE, 184)
+ X(RM, CTRL_EXEC_PARTITIONS_DELETE, 185)
+ X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN, 186)
+ X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX, 187)
+ X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION, 188)
+ X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK, 189)
+ X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER, 190)
+ X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS, 191)
+ X(RM, CTRL_BUS_SET_P2P_MAPPING, 192)
+ X(RM, CTRL_BUS_UNSET_P2P_MAPPING, 193)
+ X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK, 194)
+ X(RM, CTRL_GPU_MIGRATABLE_OPS, 195)
+ X(RM, CTRL_GET_TOTAL_HS_CREDITS, 196)
+ X(RM, CTRL_GET_HS_CREDITS, 197)
+ X(RM, CTRL_SET_HS_CREDITS, 198)
+ X(RM, CTRL_PM_AREA_PC_SAMPLER, 199)
+ X(RM, INVALIDATE_TLB, 200)
+ X(RM, CTRL_GPU_QUERY_ECC_STATUS, 201) // deprecated
+ X(RM, ECC_NOTIFIER_WRITE_ACK, 202)
+ X(RM, CTRL_DBG_GET_MODE_MMU_DEBUG, 203)
+ X(RM, RM_API_CONTROL, 204)
+ X(RM, CTRL_CMD_INTERNAL_GPU_START_FABRIC_PROBE, 205)
+ X(RM, CTRL_NVLINK_GET_INBAND_RECEIVED_DATA, 206)
+ X(RM, GET_STATIC_DATA, 207)
+ X(RM, RESERVED_208, 208)
+ X(RM, CTRL_GPU_GET_INFO_V2, 209)
+ X(RM, GET_BRAND_CAPS, 210)
+ X(RM, CTRL_CMD_NVLINK_INBAND_SEND_DATA, 211)
+ X(RM, UPDATE_GPM_GUEST_BUFFER_INFO, 212)
+ X(RM, CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE, 213)
+ X(RM, CTRL_SET_ZBC_STENCIL_CLEAR, 214)
+ X(RM, CTRL_SUBDEVICE_GET_VGPU_HEAP_STATS, 215)
+ X(RM, CTRL_SUBDEVICE_GET_LIBOS_HEAP_STATS, 216)
+ X(RM, CTRL_DBG_SET_MODE_MMU_GCC_DEBUG, 217)
+ X(RM, CTRL_DBG_GET_MODE_MMU_GCC_DEBUG, 218)
+ X(RM, CTRL_RESERVE_HES, 219)
+ X(RM, CTRL_RELEASE_HES, 220)
+ X(RM, CTRL_RESERVE_CCU_PROF, 221)
+ X(RM, CTRL_RELEASE_CCU_PROF, 222)
+ X(RM, RESERVED, 223)
+ X(RM, CTRL_CMD_GET_CHIPLET_HS_CREDIT_POOL, 224)
+ X(RM, CTRL_CMD_GET_HS_CREDITS_MAPPING, 225)
+ X(RM, CTRL_EXEC_PARTITIONS_EXPORT, 226)
+ X(RM, NUM_FUNCTIONS, 227)
+#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+};
+# undef X
+# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+#endif
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c
new file mode 100644
index 000000000000..6fb3083edde3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/ofa.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/engine.h>
+
+#include "nvrm/ofa.h"
+
+static int
+r570_ofa_alloc(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, int inst,
+ struct nvkm_gsp_object *ofa)
+{
+ NV_OFA_ALLOCATION_PARAMETERS *args;
+
+ args = nvkm_gsp_rm_alloc_get(parent, handle, oclass, sizeof(*args), ofa);
+ if (WARN_ON(IS_ERR(args)))
+ return PTR_ERR(args);
+
+ args->size = sizeof(*args);
+ args->engineInstance = inst;
+
+ return nvkm_gsp_rm_alloc_wr(ofa, args);
+}
+
+const struct nvkm_rm_api_engine
+r570_ofa = {
+ .alloc = r570_ofa_alloc,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c
new file mode 100644
index 000000000000..498658d0c60c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/rm.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <rm/rm.h>
+
+#include "nvrm/gsp.h"
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos2 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3_gh100 = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+ .heap_size_non_wpr = 0x200000,
+ .offset_set_by_acr = true,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3_gb10x = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+ .heap_size_non_wpr = 0x200000,
+ .rsvd_size_pmu = ALIGN(0x0800000 + 0x1000000 + 0x0001000, 0x20000),
+ .offset_set_by_acr = true,
+};
+
+static const struct nvkm_rm_wpr
+r570_wpr_libos3_gb20x = {
+ .os_carveout_size = GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL,
+ .base_size = GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100,
+ .heap_size_min = GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+ .heap_size_non_wpr = 0x220000,
+ .rsvd_size_pmu = ALIGN(0x0800000 + 0x1000000 + 0x0001000, 0x20000),
+ .offset_set_by_acr = true,
+};
+
+static const struct nvkm_rm_api
+r570_api = {
+ .gsp = &r570_gsp,
+ .rpc = &r535_rpc,
+ .ctrl = &r535_ctrl,
+ .alloc = &r535_alloc,
+ .client = &r570_client,
+ .device = &r535_device,
+ .fbsr = &r570_fbsr,
+ .disp = &r570_disp,
+ .fifo = &r570_fifo,
+ .ce = &r535_ce,
+ .gr = &r570_gr,
+ .nvdec = &r535_nvdec,
+ .nvenc = &r535_nvenc,
+ .nvjpg = &r535_nvjpg,
+ .ofa = &r570_ofa,
+};
+
+const struct nvkm_rm_impl
+r570_rm_tu102 = {
+ .wpr = &r570_wpr_libos2,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_ga102 = {
+ .wpr = &r570_wpr_libos3,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_gh100 = {
+ .wpr = &r570_wpr_libos3_gh100,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_gb10x = {
+ .wpr = &r570_wpr_libos3_gb10x,
+ .api = &r570_api,
+};
+
+const struct nvkm_rm_impl
+r570_rm_gb20x = {
+ .wpr = &r570_wpr_libos3_gb20x,
+ .api = &r570_api,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h
new file mode 100644
index 000000000000..393ea775941f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include <subdev/gsp.h>
+#ifndef __NVKM_RM_H__
+#define __NVKM_RM_H__
+#include "handles.h"
+struct nvkm_outp;
+struct r535_gr;
+
+struct nvkm_rm_impl {
+ const struct nvkm_rm_wpr *wpr;
+ const struct nvkm_rm_api *api;
+};
+
+struct nvkm_rm {
+ struct nvkm_device *device;
+ const struct nvkm_rm_gpu *gpu;
+ const struct nvkm_rm_wpr *wpr;
+ const struct nvkm_rm_api *api;
+};
+
+struct nvkm_rm_wpr {
+ u32 os_carveout_size;
+ u32 base_size;
+ u64 heap_size_min;
+ u32 heap_size_non_wpr;
+ u32 rsvd_size_pmu;
+ bool offset_set_by_acr;
+};
+
+struct nvkm_rm_api {
+ const struct nvkm_rm_api_gsp {
+ void (*set_rmargs)(struct nvkm_gsp *, bool resume);
+ int (*set_system_info)(struct nvkm_gsp *);
+ int (*get_static_info)(struct nvkm_gsp *);
+ bool (*xlat_mc_engine_idx)(u32 mc_engine_idx, enum nvkm_subdev_type *, int *inst);
+ void (*drop_send_user_shared_data)(struct nvkm_gsp *);
+ void (*drop_post_nocat_record)(struct nvkm_gsp *);
+ u32 (*sr_data_size)(struct nvkm_gsp *);
+ } *gsp;
+
+ const struct nvkm_rm_api_rpc {
+ void *(*get)(struct nvkm_gsp *, u32 fn, u32 argc);
+ void *(*push)(struct nvkm_gsp *gsp, void *argv,
+ enum nvkm_gsp_rpc_reply_policy policy, u32 repc);
+ void (*done)(struct nvkm_gsp *gsp, void *repv);
+ } *rpc;
+
+ const struct nvkm_rm_api_ctrl {
+ void *(*get)(struct nvkm_gsp_object *, u32 cmd, u32 params_size);
+ int (*push)(struct nvkm_gsp_object *, void **params, u32 repc);
+ void (*done)(struct nvkm_gsp_object *, void *params);
+ } *ctrl;
+
+ const struct nvkm_rm_api_alloc {
+ void *(*get)(struct nvkm_gsp_object *, u32 oclass, u32 params_size);
+ void *(*push)(struct nvkm_gsp_object *, void *params);
+ void (*done)(struct nvkm_gsp_object *, void *params);
+
+ int (*free)(struct nvkm_gsp_object *);
+ } *alloc;
+
+ const struct nvkm_rm_api_client {
+ int (*ctor)(struct nvkm_gsp_client *, u32 handle);
+ } *client;
+
+ const struct nvkm_rm_api_device {
+ int (*ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *);
+ void (*dtor)(struct nvkm_gsp_device *);
+
+ struct {
+ int (*ctor)(struct nvkm_gsp_device *, u32 handle, u32 id,
+ nvkm_gsp_event_func, struct nvkm_gsp_event *);
+ void (*dtor)(struct nvkm_gsp_event *);
+ } event;
+ } *device;
+
+ const struct nvkm_rm_api_fbsr {
+ int (*suspend)(struct nvkm_gsp *);
+ void (*resume)(struct nvkm_gsp *);
+ } *fbsr;
+
+ const struct nvkm_rm_api_disp {
+ int (*get_static_info)(struct nvkm_disp *);
+ int (*get_supported)(struct nvkm_disp *, unsigned long *display_mask);
+ int (*get_connect_state)(struct nvkm_disp *, unsigned display_id);
+ int (*get_active)(struct nvkm_disp *, unsigned head, u32 *display_id);
+
+ int (*bl_ctrl)(struct nvkm_disp *, unsigned display_id, bool set, int *val);
+
+ struct {
+ int (*get_caps)(struct nvkm_disp *, int *link_bw, bool *mst, bool *wm);
+ int (*set_indexed_link_rates)(struct nvkm_outp *);
+ } dp;
+
+ struct {
+ int (*set_pushbuf)(struct nvkm_disp *, s32 oclass, int inst,
+ struct nvkm_memory *);
+ int (*dmac_alloc)(struct nvkm_disp *, u32 oclass, int inst, u32 put_offset,
+ struct nvkm_gsp_object *);
+ } chan;
+ } *disp;
+
+ const struct nvkm_rm_api_fifo {
+ int (*xlat_rm_engine_type)(u32 rm_engine_type,
+ enum nvkm_subdev_type *, int *nv2080_type);
+ int (*ectx_size)(struct nvkm_fifo *);
+ unsigned rsvd_chids;
+ int (*rc_triggered)(void *priv, u32 fn, void *repv, u32 repc);
+ struct {
+ int (*alloc)(struct nvkm_gsp_device *, u32 handle,
+ u32 nv2080_engine_type, u8 runq, bool priv, int chid,
+ u64 inst_addr, u64 userd_addr, u64 mthdbuf_addr,
+ struct nvkm_vmm *, u64 gpfifo_offset, u32 gpfifo_length,
+ struct nvkm_gsp_object *);
+ } chan;
+ } *fifo;
+
+ const struct nvkm_rm_api_engine {
+ int (*alloc)(struct nvkm_gsp_object *chan, u32 handle, u32 class, int inst,
+ struct nvkm_gsp_object *);
+ } *ce, *nvdec, *nvenc, *nvjpg, *ofa;
+
+ const struct nvkm_rm_api_gr {
+ int (*get_ctxbufs_info)(struct r535_gr *);
+ struct {
+ int (*init)(struct r535_gr *);
+ void (*fini)(struct r535_gr *);
+ } scrubber;
+ } *gr;
+};
+
+extern const struct nvkm_rm_impl r535_rm_tu102;
+extern const struct nvkm_rm_impl r535_rm_ga102;
+extern const struct nvkm_rm_api_gsp r535_gsp;
+typedef struct DOD_METHOD_DATA DOD_METHOD_DATA;
+typedef struct JT_METHOD_DATA JT_METHOD_DATA;
+typedef struct CAPS_METHOD_DATA CAPS_METHOD_DATA;
+void r535_gsp_acpi_dod(acpi_handle, DOD_METHOD_DATA *);
+void r535_gsp_acpi_jt(acpi_handle, JT_METHOD_DATA *);
+void r535_gsp_acpi_caps(acpi_handle, CAPS_METHOD_DATA *);
+struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+void r535_gsp_get_static_info_fb(struct nvkm_gsp *,
+ const struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *);
+extern const struct nvkm_rm_api_rpc r535_rpc;
+extern const struct nvkm_rm_api_ctrl r535_ctrl;
+extern const struct nvkm_rm_api_alloc r535_alloc;
+extern const struct nvkm_rm_api_client r535_client;
+void r535_gsp_client_dtor(struct nvkm_gsp_client *);
+extern const struct nvkm_rm_api_device r535_device;
+int r535_mmu_vaspace_new(struct nvkm_vmm *, u32 handle, bool external);
+void r535_mmu_vaspace_del(struct nvkm_vmm *);
+extern const struct nvkm_rm_api_fbsr r535_fbsr;
+void r535_fbsr_resume(struct nvkm_gsp *);
+int r535_fbsr_memlist(struct nvkm_gsp_device *, u32 handle, enum nvkm_memory_target,
+ u64 phys, u64 size, struct sg_table *, struct nvkm_gsp_object *);
+extern const struct nvkm_rm_api_disp r535_disp;
+extern const struct nvkm_rm_api_fifo r535_fifo;
+void r535_fifo_rc_chid(struct nvkm_fifo *, int chid);
+extern const struct nvkm_rm_api_engine r535_ce;
+extern const struct nvkm_rm_api_gr r535_gr;
+void *r535_gr_dtor(struct nvkm_gr *);
+int r535_gr_oneinit(struct nvkm_gr *);
+u64 r535_gr_units(struct nvkm_gr *);
+int r535_gr_chan_new(struct nvkm_gr *, struct nvkm_chan *, const struct nvkm_oclass *,
+ struct nvkm_object **);
+int r535_gr_promote_ctx(struct r535_gr *, bool golden, struct nvkm_vmm *,
+ struct nvkm_memory **pctxbuf_mem, struct nvkm_vma **pctxbuf_vma,
+ struct nvkm_gsp_object *chan);
+extern const struct nvkm_rm_api_engine r535_nvdec;
+extern const struct nvkm_rm_api_engine r535_nvenc;
+extern const struct nvkm_rm_api_engine r535_nvjpg;
+extern const struct nvkm_rm_api_engine r535_ofa;
+
+extern const struct nvkm_rm_impl r570_rm_tu102;
+extern const struct nvkm_rm_impl r570_rm_ga102;
+extern const struct nvkm_rm_impl r570_rm_gh100;
+extern const struct nvkm_rm_impl r570_rm_gb10x;
+extern const struct nvkm_rm_impl r570_rm_gb20x;
+extern const struct nvkm_rm_api_gsp r570_gsp;
+extern const struct nvkm_rm_api_client r570_client;
+extern const struct nvkm_rm_api_fbsr r570_fbsr;
+extern const struct nvkm_rm_api_disp r570_disp;
+extern const struct nvkm_rm_api_fifo r570_fifo;
+extern const struct nvkm_rm_api_gr r570_gr;
+int r570_gr_gpc_mask(struct nvkm_gsp *, u32 *mask);
+int r570_gr_tpc_mask(struct nvkm_gsp *, int gpc, u32 *mask);
+extern const struct nvkm_rm_api_engine r570_ofa;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h
new file mode 100644
index 000000000000..4431e33b3304
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rpc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#ifndef __NVKM_RM_RPC_H__
+#define __NVKM_RM_RPC_H__
+#include "rm.h"
+
+#define to_payload_hdr(p, header) \
+ container_of((void *)p, typeof(*header), params)
+
+int r535_gsp_rpc_poll(struct nvkm_gsp *, u32 fn);
+
+struct nvfw_gsp_rpc *r535_gsp_msg_recv(struct nvkm_gsp *, int fn, u32 gsp_rpc_len);
+int r535_gsp_msg_ntfy_add(struct nvkm_gsp *, u32 fn, nvkm_gsp_msg_ntfy_func, void *priv);
+
+int r535_rpc_status_to_errno(uint32_t rpc_status);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c
new file mode 100644
index 000000000000..423502f870db
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/tu1xx.c
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "gpu.h"
+
+#include <engine/fifo/priv.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_rm_gpu
+tu1xx_gpu = {
+ .disp.class = {
+ .root = TU102_DISP,
+ .caps = GV100_DISP_CAPS,
+ .core = TU102_DISP_CORE_CHANNEL_DMA,
+ .wndw = TU102_DISP_WINDOW_CHANNEL_DMA,
+ .wimm = TU102_DISP_WINDOW_IMM_CHANNEL_DMA,
+ .curs = TU102_DISP_CURSOR,
+ },
+
+ .usermode.class = TURING_USERMODE_A,
+
+ .fifo.chan = {
+ .class = TURING_CHANNEL_GPFIFO_A,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+ },
+
+ .ce.class = TURING_DMA_COPY_A,
+ .gr.class = {
+ .i2m = KEPLER_INLINE_TO_MEMORY_B,
+ .twod = FERMI_TWOD_A,
+ .threed = TURING_A,
+ .compute = TURING_COMPUTE_A,
+ },
+ .nvdec.class = NVC4B0_VIDEO_DECODER,
+ .nvenc.class = NVC4B7_VIDEO_ENCODER,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
index 59c5f2b9172a..58e233bc53b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
@@ -22,11 +22,45 @@
#include "priv.h"
#include <subdev/fb.h>
+#include <engine/sec2.h>
+
+#include <rm/r535/nvrm/gsp.h>
#include <nvfw/flcn.h>
#include <nvfw/fw.h>
#include <nvfw/hs.h>
+static int
+tu102_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 wpr2_hi;
+ int ret;
+
+ wpr2_hi = nvkm_rd32(device, 0x1fa828);
+ if (!wpr2_hi) {
+ nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n");
+ return 0;
+ }
+
+ ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
+ if (WARN_ON(ret))
+ return ret;
+
+ wpr2_hi = nvkm_rd32(device, 0x1fa828);
+ if (WARN_ON(wpr2_hi))
+ return -EIO;
+
+ return 0;
+}
+
+static int
+tu102_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
+{
+ return nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
+}
+
int
tu102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob,
struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
@@ -114,6 +148,118 @@ tu102_gsp_reset(struct nvkm_gsp *gsp)
return gsp->falcon.func->reset_eng(&gsp->falcon);
}
+int
+tu102_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
+{
+ u32 mbox0 = 0xff, mbox1 = 0xff;
+ int ret;
+
+ ret = r535_gsp_fini(gsp, suspend);
+ if (ret && suspend)
+ return ret;
+
+ nvkm_falcon_reset(&gsp->falcon);
+
+ ret = nvkm_gsp_fwsec_sb(gsp);
+ WARN_ON(ret);
+
+ if (suspend) {
+ mbox0 = lower_32_bits(gsp->sr.meta.addr);
+ mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ }
+
+ ret = tu102_gsp_booter_unload(gsp, mbox0, mbox1);
+ WARN_ON(ret);
+ return 0;
+}
+
+int
+tu102_gsp_init(struct nvkm_gsp *gsp)
+{
+ u32 mbox0, mbox1;
+ int ret;
+
+ if (!gsp->sr.meta.data) {
+ mbox0 = lower_32_bits(gsp->wpr_meta.addr);
+ mbox1 = upper_32_bits(gsp->wpr_meta.addr);
+ } else {
+ gsp->rm->api->gsp->set_rmargs(gsp, true);
+
+ mbox0 = lower_32_bits(gsp->sr.meta.addr);
+ mbox1 = upper_32_bits(gsp->sr.meta.addr);
+ }
+
+ /* Execute booter to handle (eventually...) booting GSP-RM. */
+ ret = tu102_gsp_booter_load(gsp, mbox0, mbox1);
+ if (WARN_ON(ret))
+ return ret;
+
+ return r535_gsp_init(gsp);
+}
+
+static int
+tu102_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
+{
+ GspFwWprMeta *meta;
+ int ret;
+
+ ret = nvkm_gsp_mem_ctor(gsp, sizeof(*meta), &gsp->wpr_meta);
+ if (ret)
+ return ret;
+
+ meta = gsp->wpr_meta.data;
+
+ meta->magic = GSP_FW_WPR_META_MAGIC;
+ meta->revision = GSP_FW_WPR_META_REVISION;
+
+ meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
+ meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
+
+ meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
+ meta->sizeOfBootloader = gsp->boot.fw.size;
+ meta->bootloaderCodeOffset = gsp->boot.code_offset;
+ meta->bootloaderDataOffset = gsp->boot.data_offset;
+ meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
+
+ meta->sysmemAddrOfSignature = gsp->sig.addr;
+ meta->sizeOfSignature = gsp->sig.size;
+
+ meta->gspFwRsvdStart = gsp->fb.heap.addr;
+ meta->nonWprHeapOffset = gsp->fb.heap.addr;
+ meta->nonWprHeapSize = gsp->fb.heap.size;
+ meta->gspFwWprStart = gsp->fb.wpr2.addr;
+ meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr;
+ meta->gspFwHeapSize = gsp->fb.wpr2.heap.size;
+ meta->gspFwOffset = gsp->fb.wpr2.elf.addr;
+ meta->bootBinOffset = gsp->fb.wpr2.boot.addr;
+ meta->frtsOffset = gsp->fb.wpr2.frts.addr;
+ meta->frtsSize = gsp->fb.wpr2.frts.size;
+ meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000);
+ meta->fbSize = gsp->fb.size;
+ meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr;
+ meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
+ meta->bootCount = 0;
+ meta->partitionRpcAddr = 0;
+ meta->partitionRpcRequestOffset = 0;
+ meta->partitionRpcReplyOffset = 0;
+ meta->verified = 0;
+ return 0;
+}
+
+u64
+tu102_gsp_wpr_heap_size(struct nvkm_gsp *gsp)
+{
+ u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30);
+ u64 heap_size;
+
+ heap_size = gsp->rm->wpr->os_carveout_size +
+ gsp->rm->wpr->base_size +
+ ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) +
+ ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20);
+
+ return max(heap_size, gsp->rm->wpr->heap_size_min);
+}
+
static u64
tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size)
{
@@ -136,14 +282,67 @@ tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size)
int
tu102_gsp_oneinit(struct nvkm_gsp *gsp)
{
- gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device);
+ struct nvkm_device *device = gsp->subdev.device;
+ int ret;
+
+ gsp->fb.size = nvkm_fb_vidmem_size(device);
gsp->fb.bios.vga_workspace.addr = tu102_gsp_vga_workspace_addr(gsp, gsp->fb.size);
gsp->fb.bios.vga_workspace.size = gsp->fb.size - gsp->fb.bios.vga_workspace.addr;
gsp->fb.bios.addr = gsp->fb.bios.vga_workspace.addr;
gsp->fb.bios.size = gsp->fb.bios.vga_workspace.size;
- return r535_gsp_oneinit(gsp);
+ ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load,
+ &device->sec2->falcon, &gsp->booter.load);
+ if (ret)
+ return ret;
+
+ ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload,
+ &device->sec2->falcon, &gsp->booter.unload);
+ if (ret)
+ return ret;
+
+ ret = r535_gsp_oneinit(gsp);
+ if (ret)
+ return ret;
+
+ /* Calculate FB layout. */
+ gsp->fb.wpr2.frts.size = 0x100000;
+ gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
+
+ gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
+ gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000);
+
+ gsp->fb.wpr2.elf.size = gsp->fw.len;
+ gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000);
+
+ gsp->fb.wpr2.heap.size = tu102_gsp_wpr_heap_size(gsp);
+
+ gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000);
+ gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000);
+
+ gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000);
+ gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr;
+
+ gsp->fb.heap.size = 0x100000;
+ gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
+
+ ret = tu102_gsp_wpr_meta_init(gsp);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_fwsec_frts(gsp);
+ if (WARN_ON(ret))
+ return ret;
+
+ /* Reset GSP into RISC-V mode. */
+ ret = gsp->func->reset(gsp);
+ if (ret)
+ return ret;
+
+ nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
+ nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
+ return 0;
}
const struct nvkm_falcon_func
@@ -163,29 +362,73 @@ tu102_gsp_flcn = {
};
static const struct nvkm_gsp_func
-tu102_gsp_r535_113_01 = {
+tu102_gsp = {
.flcn = &tu102_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_tu10x",
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 64 << 20,
-
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = tu102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &tu1xx_gpu,
};
+int
+tu102_gsp_load_rm(struct nvkm_gsp *gsp, const struct nvkm_gsp_fwif *fwif)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ bool enable_gsp = fwif->enable;
+ int ret;
+
+#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
+ enable_gsp = true;
+#endif
+ if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
+ return -EINVAL;
+
+ ret = nvkm_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+tu102_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
+{
+ int ret;
+
+ ret = tu102_gsp_load_rm(gsp, fwif);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load);
+ if (ret)
+ goto done;
+
+ ret = nvkm_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload);
+
+done:
+ if (ret)
+ nvkm_gsp_dtor_fws(gsp);
+
+ return ret;
+}
+
static struct nvkm_gsp_fwif
tu102_gsps[] = {
- { 0, r535_gsp_load, &tu102_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &tu102_gsp, &r570_rm_tu102, "570.144" },
+ { 0, tu102_gsp_load, &tu102_gsp, &r535_rm_tu102, "535.113.01" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};
@@ -196,3 +439,11 @@ tu102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(tu102_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(tu102, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(tu104, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(tu106, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(tu102, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(tu104, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(tu106, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
index 04fbd9ed28b1..97eb046c25d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
@@ -22,29 +22,27 @@
#include "priv.h"
static const struct nvkm_gsp_func
-tu116_gsp_r535_113_01 = {
+tu116_gsp = {
.flcn = &tu102_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_tu11x",
- .wpr_heap.base_size = 8 << 20,
- .wpr_heap.min_size = 64 << 20,
-
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
- .init = r535_gsp_init,
- .fini = r535_gsp_fini,
+ .init = tu102_gsp_init,
+ .fini = tu102_gsp_fini,
.reset = tu102_gsp_reset,
- .rm = &r535_gsp_rm,
+ .rm.gpu = &tu1xx_gpu,
};
static struct nvkm_gsp_fwif
tu116_gsps[] = {
- { 0, r535_gsp_load, &tu116_gsp_r535_113_01, "535.113.01" },
+ { 1, tu102_gsp_load, &tu116_gsp, &r570_rm_tu102, "570.144" },
+ { 0, tu102_gsp_load, &tu116_gsp, &r535_rm_tu102, "535.113.01" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};
@@ -55,3 +53,9 @@ tu116_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
{
return nvkm_gsp_new_(tu116_gsps, device, type, inst, pgsp);
}
+
+NVKM_GSP_FIRMWARE_BOOTER(tu116, 535.113.01);
+NVKM_GSP_FIRMWARE_BOOTER(tu117, 535.113.01);
+
+NVKM_GSP_FIRMWARE_BOOTER(tu116, 570.144);
+NVKM_GSP_FIRMWARE_BOOTER(tu117, 570.144);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
index 553d540f2736..fa7a2862dd1f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
@@ -4,5 +4,4 @@ nvkm-y += nvkm/subdev/instmem/nv04.o
nvkm-y += nvkm/subdev/instmem/nv40.o
nvkm-y += nvkm/subdev/instmem/nv50.o
nvkm-y += nvkm/subdev/instmem/gk20a.o
-
-nvkm-y += nvkm/subdev/instmem/r535.o
+nvkm-y += nvkm/subdev/instmem/gh100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index a2cd3330efc6..2f55bab8e132 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -182,9 +182,11 @@ nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
int ret;
if (suspend) {
- ret = imem->func->suspend(imem);
- if (ret)
- return ret;
+ if (imem->func->suspend) {
+ ret = imem->func->suspend(imem);
+ if (ret)
+ return ret;
+ }
imem->suspend = true;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c
new file mode 100644
index 000000000000..8d8dd5f8a6c7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gh100.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/ref/gh100/pri_nv_xal_ep.h>
+
+static void
+gh100_instmem_set_bar0_window_addr(struct nvkm_device *device, u64 addr)
+{
+ nvkm_wr32(device, NV_XAL_EP_BAR0_WINDOW, addr >> NV_XAL_EP_BAR0_WINDOW_BASE_SHIFT);
+}
+
+static const struct nvkm_instmem_func
+gh100_instmem = {
+ .fini = nv50_instmem_fini,
+ .memory_new = nv50_instobj_new,
+ .memory_wrap = nv50_instobj_wrap,
+ .set_bar0_window_addr = gh100_instmem_set_bar0_window_addr,
+};
+
+int
+gh100_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_instmem **pimem)
+{
+ return r535_instmem_new(&gh100_instmem, device, type, inst, pimem);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index 6b462f960922..2544b9f0ec85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -239,7 +239,6 @@ nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int ins
struct nvkm_instmem **pimem)
{
struct nv40_instmem *imem;
- int bar;
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
@@ -247,13 +246,8 @@ nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int ins
*pimem = &imem->base;
/* map bar */
- if (device->func->resource_size(device, 2))
- bar = 2;
- else
- bar = 3;
-
- imem->iomem = ioremap_wc(device->func->resource_addr(device, bar),
- device->func->resource_size(device, bar));
+ imem->iomem = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST),
+ device->func->resource_size(device, NVKM_BAR2_INST));
if (!imem->iomem) {
nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
return -EFAULT;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index dd5b5a17ece0..4ca6fb30743d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -65,7 +65,7 @@ nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
spin_lock_irqsave(&imem->base.lock, flags);
if (unlikely(imem->addr != base)) {
- nvkm_wr32(device, 0x001700, base >> 16);
+ imem->base.func->set_bar0_window_addr(device, base);
imem->addr = base;
}
nvkm_wr32(device, 0x700000 + addr, data);
@@ -85,7 +85,7 @@ nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
spin_lock_irqsave(&imem->base.lock, flags);
if (unlikely(imem->addr != base)) {
- nvkm_wr32(device, 0x001700, base >> 16);
+ imem->base.func->set_bar0_window_addr(device, base);
imem->addr = base;
}
data = nvkm_rd32(device, 0x700000 + addr);
@@ -172,7 +172,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
/* Make the mapping visible to the host. */
iobj->bar = bar;
- iobj->map = ioremap_wc(device->func->resource_addr(device, 3) +
+ iobj->map = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST) +
(u32)iobj->bar->addr, size);
if (!iobj->map) {
nvkm_warn(subdev, "PRAMIN ioremap failed\n");
@@ -353,7 +353,7 @@ nv50_instobj_func = {
.map = nv50_instobj_map,
};
-static int
+int
nv50_instobj_wrap(struct nvkm_instmem *base,
struct nvkm_memory *memory, struct nvkm_memory **pmemory)
{
@@ -373,7 +373,7 @@ nv50_instobj_wrap(struct nvkm_instmem *base,
return 0;
}
-static int
+int
nv50_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
struct nvkm_memory **pmemory)
{
@@ -395,6 +395,12 @@ nv50_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
*****************************************************************************/
static void
+nv50_instmem_set_bar0_window_addr(struct nvkm_device *device, u64 addr)
+{
+ nvkm_wr32(device, 0x001700, addr >> 16);
+}
+
+void
nv50_instmem_fini(struct nvkm_instmem *base)
{
nv50_instmem(base)->addr = ~0ULL;
@@ -415,6 +421,7 @@ nv50_instmem = {
.memory_new = nv50_instobj_new,
.memory_wrap = nv50_instobj_wrap,
.zero = false,
+ .set_bar0_window_addr = nv50_instmem_set_bar0_window_addr,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index 4c14c96fb60a..87bbdd786eaa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -16,10 +16,16 @@ struct nvkm_instmem_func {
bool zero, struct nvkm_memory **);
int (*memory_wrap)(struct nvkm_instmem *, struct nvkm_memory *, struct nvkm_memory **);
bool zero;
+ void (*set_bar0_window_addr)(struct nvkm_device *, u64 addr);
};
int nv50_instmem_new_(const struct nvkm_instmem_func *, struct nvkm_device *,
enum nvkm_subdev_type, int, struct nvkm_instmem **);
+void nv50_instmem_fini(struct nvkm_instmem *);
+int nv50_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero,
+ struct nvkm_memory **);
+int nv50_instobj_wrap(struct nvkm_instmem *, struct nvkm_memory *vram,
+ struct nvkm_memory **bar2);
void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
enum nvkm_subdev_type, int, struct nvkm_instmem *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 7ba35ea59c06..ea4848931540 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -15,8 +15,7 @@ nvkm-y += nvkm/subdev/mmu/gp100.o
nvkm-y += nvkm/subdev/mmu/gp10b.o
nvkm-y += nvkm/subdev/mmu/gv100.o
nvkm-y += nvkm/subdev/mmu/tu102.o
-
-nvkm-y += nvkm/subdev/mmu/r535.o
+nvkm-y += nvkm/subdev/mmu/gh100.o
nvkm-y += nvkm/subdev/mmu/mem.o
nvkm-y += nvkm/subdev/mmu/memnv04.o
@@ -38,6 +37,7 @@ nvkm-y += nvkm/subdev/mmu/vmmgp100.o
nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
nvkm-y += nvkm/subdev/mmu/vmmgv100.o
nvkm-y += nvkm/subdev/mmu/vmmtu102.o
+nvkm-y += nvkm/subdev/mmu/vmmgh100.o
nvkm-y += nvkm/subdev/mmu/umem.o
nvkm-y += nvkm/subdev/mmu/ummu.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c
new file mode 100644
index 000000000000..2918fb32cc91
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gh100.c
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gh100_mmu = {
+ .dma_bits = 52,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gh100_vmm_new },
+ .kind = tu102_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gh100_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_mmu **pmmu)
+{
+ return r535_mmu_new(&gh100_mmu, device, type, inst, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
index d9c9bee45222..160a5749a29f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
@@ -60,7 +60,7 @@ gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
if (ret)
return ret;
- *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
+ *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + (*pvma)->addr;
*psize = (*pvma)->size;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
index 79a3b0cc9f5b..1e3db52de6cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
@@ -41,7 +41,7 @@ nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
return ret;
- *paddr = device->func->resource_addr(device, 1) + addr;
+ *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + addr;
*psize = nvkm_memory_size(memory);
*pvma = ERR_PTR(-ENODEV);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
index 46759b89fc1f..33b2321e9d87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
@@ -57,7 +57,7 @@ nv50_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
if (ret)
return ret;
- *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
+ *paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + (*pvma)->addr;
*psize = (*pvma)->size;
return nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index e9ca6537778c..90efef8f0b54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -53,6 +53,8 @@ const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count, u8 *invalid);
const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *, u8 *);
+const u8 *tu102_mmu_kind(struct nvkm_mmu *, int *, u8 *);
+
struct nvkm_mmu_pt {
union {
struct nvkm_mmu_ptc *ptc;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
index df662ce4a4b0..7acff3642e20 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
@@ -28,7 +28,7 @@
#include <nvif/class.h>
-static const u8 *
+const u8 *
tu102_mmu_kind(struct nvkm_mmu *mmu, int *count, u8 *invalid)
{
static const u8
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 9c97800fe037..f95c58b67633 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -19,7 +19,7 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#define NVKM_VMM_LEVELS_MAX 5
+#define NVKM_VMM_LEVELS_MAX 6
#include "vmm.h"
#include <subdev/fb.h>
@@ -1030,12 +1030,8 @@ nvkm_vmm_dtor(struct nvkm_vmm *vmm)
struct nvkm_vma *vma;
struct rb_node *node;
- if (vmm->rm.client.gsp) {
- nvkm_gsp_rm_free(&vmm->rm.object);
- nvkm_gsp_device_dtor(&vmm->rm.device);
- nvkm_gsp_client_dtor(&vmm->rm.client);
- nvkm_vmm_put(vmm, &vmm->rm.rsvd);
- }
+ if (vmm->rm.client.gsp)
+ r535_mmu_vaspace_del(vmm);
if (0)
nvkm_vmm_dump(vmm);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index f9bc30cdb2b3..4586a425dbe4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -143,6 +143,8 @@ struct nvkm_vmm_func {
int (*aper)(enum nvkm_memory_target);
int (*valid)(struct nvkm_vmm *, void *argv, u32 argc,
struct nvkm_vmm_map *);
+ int (*valid2)(struct nvkm_vmm *, bool ro, bool priv, u8 kind, u8 comp,
+ struct nvkm_vmm_map *);
void (*flush)(struct nvkm_vmm *, int depth);
int (*mthd)(struct nvkm_vmm *, struct nvkm_client *,
@@ -254,6 +256,8 @@ void gp100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr);
int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+void tu102_vmm_flush(struct nvkm_vmm *, int depth);
+
int nv04_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
int nv41_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
@@ -296,6 +300,9 @@ int gv100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
struct lock_class_key *, const char *,
struct nvkm_vmm **);
+int gh100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
#define VMM_PRINT(l,v,p,f,a...) do { \
struct nvkm_vmm *_vmm = (v); \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
new file mode 100644
index 000000000000..5614df3432da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgh100.c
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_mmu.h>
+
+static inline void
+gh100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes,
+ struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 data = addr | map->type;
+
+ while (ptes--) {
+ VMM_WO064(pt, vmm, ptei++ * NV_MMU_VER3_PTE__SIZE, data);
+ data += map->next;
+ }
+}
+
+static void
+gh100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes,
+ struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte);
+}
+
+static void
+gh100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes,
+ struct nvkm_vmm_map *map)
+{
+ if (map->page->shift == PAGE_SHIFT) {
+ VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
+
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ const u64 data = *map->dma++ | map->type;
+
+ VMM_WO064(pt, vmm, ptei++ * NV_MMU_VER3_PTE__SIZE, data);
+ }
+ nvkm_done(pt->memory);
+ return;
+ }
+
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte);
+}
+
+static void
+gh100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes,
+ struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gh100_vmm_pgt_pte);
+}
+
+static void
+gh100_vmm_pgt_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ const u64 data = NVDEF(NV_MMU, VER3_PTE, PCF, SPARSE);
+
+ VMM_FO064(pt, vmm, ptei * NV_MMU_VER3_PTE__SIZE, data, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gh100_vmm_desc_spt = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gh100_vmm_pgt_sparse,
+ .mem = gh100_vmm_pgt_mem,
+ .dma = gh100_vmm_pgt_dma,
+ .sgl = gh100_vmm_pgt_sgl,
+};
+
+static void
+gh100_vmm_lpt_invalid(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ const u64 data = NVDEF(NV_MMU, VER3_PTE, PCF, NO_VALID_4KB_PAGE);
+
+ VMM_FO064(pt, vmm, ptei * NV_MMU_VER3_PTE__SIZE, data, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gh100_vmm_desc_lpt = {
+ .invalid = gh100_vmm_lpt_invalid,
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gh100_vmm_pgt_sparse,
+ .mem = gh100_vmm_pgt_mem,
+};
+
+static inline void
+gh100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 data = addr | map->type;
+
+ while (ptes--) {
+ VMM_WO128(pt, vmm, ptei++ * NV_MMU_VER3_DUAL_PDE__SIZE, data, 0ULL);
+ data += map->next;
+ }
+}
+
+static void
+gh100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gh100_vmm_pd0_pte);
+}
+
+static inline bool
+gh100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data)
+{
+ switch (nvkm_memory_target(pt->memory)) {
+ case NVKM_MEM_TARGET_VRAM:
+ *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, VIDEO_MEMORY);
+ *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_CACHED_ATS_NOT_ALLOWED);
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, SYSTEM_COHERENT_MEMORY);
+ *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_UNCACHED_ATS_ALLOWED);
+ break;
+ case NVKM_MEM_TARGET_NCOH:
+ *data |= NVDEF(NV_MMU, VER3_PDE, APERTURE, SYSTEM_NON_COHERENT_MEMORY);
+ *data |= NVDEF(NV_MMU, VER3_PDE, PCF, VALID_CACHED_ATS_ALLOWED);
+ break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+
+ *data |= pt->addr;
+ return true;
+}
+
+static void
+gh100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ u64 data[2] = {};
+
+ if (pgt->pt[0] && !gh100_vmm_pde(pgt->pt[0], &data[0]))
+ return;
+ if (pgt->pt[1] && !gh100_vmm_pde(pgt->pt[1], &data[1]))
+ return;
+
+ nvkm_kmap(pd->memory);
+ VMM_WO128(pd, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, data[0], data[1]);
+ nvkm_done(pd->memory);
+}
+
+static void
+gh100_vmm_pd0_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ const u64 data = NVDEF(NV_MMU, VER3_DUAL_PDE, PCF_BIG, SPARSE_ATS_ALLOWED);
+
+ VMM_FO128(pt, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, data, 0ULL, pdes);
+}
+
+static void
+gh100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ VMM_FO128(pt, vmm, pdei * NV_MMU_VER3_DUAL_PDE__SIZE, 0ULL, 0ULL, pdes);
+}
+
+static const struct nvkm_vmm_desc_func
+gh100_vmm_desc_pd0 = {
+ .unmap = gh100_vmm_pd0_unmap,
+ .sparse = gh100_vmm_pd0_sparse,
+ .pde = gh100_vmm_pd0_pde,
+ .mem = gh100_vmm_pd0_mem,
+};
+
+static void
+gh100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ u64 data = 0;
+
+ if (!gh100_vmm_pde(pgt->pt[0], &data))
+ return;
+
+ nvkm_kmap(pd->memory);
+ VMM_WO064(pd, vmm, pdei * NV_MMU_VER3_PDE__SIZE, data);
+ nvkm_done(pd->memory);
+}
+
+static const struct nvkm_vmm_desc_func
+gh100_vmm_desc_pd1 = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gh100_vmm_pgt_sparse,
+ .pde = gh100_vmm_pd1_pde,
+};
+
+static const struct nvkm_vmm_desc
+gh100_vmm_desc_16[] = {
+ { LPT, 5, 8, 0x0100, &gh100_vmm_desc_lpt },
+ { PGD, 8, 16, 0x1000, &gh100_vmm_desc_pd0 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 1, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ {}
+};
+
+static const struct nvkm_vmm_desc
+gh100_vmm_desc_12[] = {
+ { SPT, 9, 8, 0x1000, &gh100_vmm_desc_spt },
+ { PGD, 8, 16, 0x1000, &gh100_vmm_desc_pd0 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ { PGD, 1, 8, 0x1000, &gh100_vmm_desc_pd1 },
+ {}
+};
+
+static int
+gh100_vmm_valid(struct nvkm_vmm *vmm, bool ro, bool priv, u8 kind, u8 comp,
+ struct nvkm_vmm_map *map)
+{
+ const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
+ const bool vol = target == NVKM_MEM_TARGET_HOST;
+ const struct nvkm_vmm_page *page = map->page;
+ u8 kind_inv, pcf;
+ int kindn, aper;
+ const u8 *kindm;
+
+ map->next = 1ULL << page->shift;
+ map->type = 0;
+
+ aper = vmm->func->aper(target);
+ if (WARN_ON(aper < 0))
+ return aper;
+
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv);
+ if (kind >= kindn || kindm[kind] == kind_inv) {
+ VMM_DEBUG(vmm, "kind %02x", kind);
+ return -EINVAL;
+ }
+
+ if (priv) {
+ if (ro) {
+ if (vol)
+ pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACD;
+ else
+ pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RO_ATOMIC_CACHED_ACD;
+ } else {
+ if (vol)
+ pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACD;
+ else
+ pcf = NV_MMU_VER3_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACD;
+ }
+ } else {
+ if (ro) {
+ if (vol)
+ pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACD;
+ else
+ pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACD;
+ } else {
+ if (vol)
+ pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACD;
+ else
+ pcf = NV_MMU_VER3_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACD;
+ }
+ }
+
+ map->type |= NVDEF(NV_MMU, VER3_PTE, VALID, TRUE);
+ map->type |= NVVAL(NV_MMU, VER3_PTE, APERTURE, aper);
+ map->type |= NVVAL(NV_MMU, VER3_PTE, PCF, pcf);
+ map->type |= NVVAL(NV_MMU, VER3_PTE, KIND, kind);
+ return 0;
+}
+
+static const struct nvkm_vmm_func
+gh100_vmm = {
+ .join = gv100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gp100_vmm_valid,
+ .valid2 = gh100_vmm_valid,
+ .flush = tu102_vmm_flush,
+ .page = {
+ { 56, &gh100_vmm_desc_16[5], NVKM_VMM_PAGE_Sxxx },
+ { 47, &gh100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+ { 38, &gh100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+ { 29, &gh100_vmm_desc_16[2], NVKM_VMM_PAGE_SVxC },
+ { 21, &gh100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
+ { 16, &gh100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gh100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+int
+gh100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gp100_vmm_new_(&gh100_vmm, mmu, managed, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index bddac77f48f0..851fd847a2a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -436,6 +436,9 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return ret;
}
+ if (vmm->func->valid2)
+ return vmm->func->valid2(vmm, ro, priv, kind, 0, map);
+
aper = vmm->func->aper(target);
if (WARN_ON(aper < 0))
return aper;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
index 8379e72d77ab..4b30eab40bba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -23,7 +23,7 @@
#include <subdev/timer.h>
-static void
+void
tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
struct nvkm_device *device = vmm->mmu->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
index 174bdf995271..a14ea0f7b1c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -13,3 +13,4 @@ nvkm-y += nvkm/subdev/pci/gf100.o
nvkm-y += nvkm/subdev/pci/gf106.o
nvkm-y += nvkm/subdev/pci/gk104.o
nvkm-y += nvkm/subdev/pci/gp100.o
+nvkm-y += nvkm/subdev/pci/gh100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index 5a0de45d36ce..6867934256a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -39,26 +39,26 @@ nvkm_pci_msi_rearm(struct nvkm_device *device)
u32
nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr)
{
- return pci->func->rd32(pci, addr);
+ return nvkm_rd32(pci->subdev.device, pci->func->cfg.addr + addr);
}
void
nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
{
- pci->func->wr08(pci, addr, data);
+ nvkm_wr08(pci->subdev.device, pci->func->cfg.addr + addr, data);
}
void
nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
{
- pci->func->wr32(pci, addr, data);
+ nvkm_wr32(pci->subdev.device, pci->func->cfg.addr + addr, data);
}
u32
nvkm_pci_mask(struct nvkm_pci *pci, u16 addr, u32 mask, u32 value)
{
- u32 data = pci->func->rd32(pci, addr);
- pci->func->wr32(pci, addr, (data & ~mask) | value);
+ u32 data = nvkm_pci_rd32(pci, addr);
+ nvkm_pci_wr32(pci, addr, (data & ~mask) | value);
return data;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
index 5b29aacedef3..5308f6539a3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g84.c
@@ -132,10 +132,9 @@ g84_pcie_init(struct nvkm_pci *pci)
static const struct nvkm_pci_func
g84_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv46_pci_msi_rearm,
.pcie.init = g84_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
index a9e0674009c6..8ae7aa02e675 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g92.c
@@ -33,10 +33,9 @@ g92_pcie_version_supported(struct nvkm_pci *pci)
static const struct nvkm_pci_func
g92_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv46_pci_msi_rearm,
.pcie.init = g84_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
index 7bacd0693283..df745d0690ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/g94.c
@@ -25,10 +25,9 @@
static const struct nvkm_pci_func
g94_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv40_pci_msi_rearm,
.pcie.init = g84_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
index 099906092fe1..6ce941df87b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
@@ -78,10 +78,9 @@ gf100_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
static const struct nvkm_pci_func
gf100_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = gf100_pci_msi_rearm,
.pcie.init = gf100_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
index bcde609ba866..712ca7e0959a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf106.c
@@ -25,10 +25,9 @@
static const struct nvkm_pci_func
gf106_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv40_pci_msi_rearm,
.pcie.init = gf100_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c
new file mode 100644
index 000000000000..42da92d7a5fe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gh100.c
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+ */
+#include "priv.h"
+
+#include <nvhw/drf.h>
+#include <nvhw/ref/gh100/dev_xtl_ep_pri.h>
+
+static void
+gh100_pci_msi_rearm(struct nvkm_pci *pci)
+{
+ /* Handled by top-level intr ACK. */
+}
+
+static const struct nvkm_pci_func
+gh100_pci = {
+ .cfg = {
+ .addr = DRF_LO(NV_EP_PCFGM),
+ .size = DRF_HI(NV_EP_PCFGM) - DRF_LO(NV_EP_PCFGM) + 1,
+ },
+ .msi_rearm = gh100_pci_msi_rearm,
+};
+
+int
+gh100_pci_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_pci **ppci)
+{
+ return nvkm_pci_new_(&gh100_pci, device, type, inst, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
index 6be87ecffc89..ec6d0a7de995 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gk104.c
@@ -204,10 +204,9 @@ gk104_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width)
static const struct nvkm_pci_func
gk104_pci_func = {
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
+
.init = g84_pci_init,
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
.msi_rearm = nv40_pci_msi_rearm,
.pcie.init = gk104_pcie_init,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
index a5fafda0014d..4204316a544f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
@@ -31,9 +31,7 @@ gp100_pci_msi_rearm(struct nvkm_pci *pci)
static const struct nvkm_pci_func
gp100_pci_func = {
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
.msi_rearm = gp100_pci_msi_rearm,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
index 9ab64194b185..b8a3f6850fa7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
@@ -23,32 +23,9 @@
*/
#include "priv.h"
-static u32
-nv04_pci_rd32(struct nvkm_pci *pci, u16 addr)
-{
- struct nvkm_device *device = pci->subdev.device;
- return nvkm_rd32(device, 0x001800 + addr);
-}
-
-static void
-nv04_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
-{
- struct nvkm_device *device = pci->subdev.device;
- nvkm_wr08(device, 0x001800 + addr, data);
-}
-
-static void
-nv04_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
-{
- struct nvkm_device *device = pci->subdev.device;
- nvkm_wr32(device, 0x001800 + addr, data);
-}
-
static const struct nvkm_pci_func
nv04_pci_func = {
- .rd32 = nv04_pci_rd32,
- .wr08 = nv04_pci_wr08,
- .wr32 = nv04_pci_wr32,
+ .cfg = { .addr = 0x001800, .size = 0x1000 },
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
index 6a3c31cf0200..1971dbbdeb2b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
@@ -23,27 +23,6 @@
*/
#include "priv.h"
-u32
-nv40_pci_rd32(struct nvkm_pci *pci, u16 addr)
-{
- struct nvkm_device *device = pci->subdev.device;
- return nvkm_rd32(device, 0x088000 + addr);
-}
-
-void
-nv40_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
-{
- struct nvkm_device *device = pci->subdev.device;
- nvkm_wr08(device, 0x088000 + addr, data);
-}
-
-void
-nv40_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
-{
- struct nvkm_device *device = pci->subdev.device;
- nvkm_wr32(device, 0x088000 + addr, data);
-}
-
void
nv40_pci_msi_rearm(struct nvkm_pci *pci)
{
@@ -52,9 +31,7 @@ nv40_pci_msi_rearm(struct nvkm_pci *pci)
static const struct nvkm_pci_func
nv40_pci_func = {
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
.msi_rearm = nv40_pci_msi_rearm,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
index 9cad17f178ec..0093eabac9ae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv46.c
@@ -38,9 +38,7 @@ nv46_pci_msi_rearm(struct nvkm_pci *pci)
static const struct nvkm_pci_func
nv46_pci_func = {
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
.msi_rearm = nv46_pci_msi_rearm,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
index 741e34bf307c..b445081bb80e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
@@ -25,9 +25,7 @@
static const struct nvkm_pci_func
nv4c_pci_func = {
- .rd32 = nv40_pci_rd32,
- .wr08 = nv40_pci_wr08,
- .wr32 = nv40_pci_wr32,
+ .cfg = { .addr = 0x088000, .size = 0x1000 },
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
index 9b7583532962..988eeee1471c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -8,10 +8,12 @@ int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *, enum nvkm_
struct nvkm_pci **);
struct nvkm_pci_func {
+ struct {
+ u32 addr;
+ u16 size;
+ } cfg;
+
void (*init)(struct nvkm_pci *);
- u32 (*rd32)(struct nvkm_pci *, u16 addr);
- void (*wr08)(struct nvkm_pci *, u16 addr, u8 data);
- void (*wr32)(struct nvkm_pci *, u16 addr, u32 data);
void (*msi_rearm)(struct nvkm_pci *);
struct {
@@ -27,9 +29,6 @@ struct nvkm_pci_func {
} pcie;
};
-u32 nv40_pci_rd32(struct nvkm_pci *, u16);
-void nv40_pci_wr08(struct nvkm_pci *, u16, u8);
-void nv40_pci_wr32(struct nvkm_pci *, u16, u32);
void nv40_pci_msi_rearm(struct nvkm_pci *);
void nv46_pci_msi_rearm(struct nvkm_pci *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
index dce337306cab..d294844d9eae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
@@ -21,6 +21,8 @@
*/
#include "priv.h"
+#include <rm/gpu.h>
+
static void
r535_vfn_dtor(struct nvkm_vfn *vfn)
{
@@ -32,6 +34,7 @@ r535_vfn_new(const struct nvkm_vfn_func *hw,
struct nvkm_device *device, enum nvkm_subdev_type type, int inst, u32 addr,
struct nvkm_vfn **pvfn)
{
+ const struct nvkm_rm_gpu *gpu = device->gsp->rm->gpu;
struct nvkm_vfn_func *rm;
int ret;
@@ -39,8 +42,12 @@ r535_vfn_new(const struct nvkm_vfn_func *hw,
return -ENOMEM;
rm->dtor = r535_vfn_dtor;
- rm->intr = hw->intr;
- rm->user = hw->user;
+ rm->intr = &tu102_vfn_intr;
+ rm->user.addr = 0x030000;
+ rm->user.size = 0x010000;
+ rm->user.base.minver = -1;
+ rm->user.base.maxver = -1;
+ rm->user.base.oclass = gpu->usermode.class;
ret = nvkm_vfn_new_(rm, device, type, inst, addr, pvfn);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c
index c5460a14c541..4e64d8843373 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/uvfn.c
@@ -36,7 +36,7 @@ nvkm_uvfn_map(struct nvkm_object *object, void *argv, u32 argc,
struct nvkm_vfn *vfn = nvkm_uvfn(object)->vfn;
struct nvkm_device *device = vfn->subdev.device;
- *addr = device->func->resource_addr(device, 0) + vfn->addr.user;
+ *addr = device->func->resource_addr(device, NVKM_BAR0_PRI) + vfn->addr.user;
*size = vfn->func->user.size;
*type = NVKM_OBJECT_MAP_IO;
return 0;
diff --git a/drivers/gpu/drm/nova/Kconfig b/drivers/gpu/drm/nova/Kconfig
new file mode 100644
index 000000000000..cca6a3fea879
--- /dev/null
+++ b/drivers/gpu/drm/nova/Kconfig
@@ -0,0 +1,14 @@
+config DRM_NOVA
+ tristate "Nova DRM driver"
+ depends on DRM=y
+ depends on PCI
+ depends on RUST
+ select AUXILIARY_BUS
+ default n
+ help
+ Choose this if you want to build the Nova DRM driver for Nvidia
+ GSP-based GPUs.
+
+ This driver is work in progress and may not be functional.
+
+ If M is selected, the module will be called nova.
diff --git a/drivers/gpu/drm/nova/Makefile b/drivers/gpu/drm/nova/Makefile
new file mode 100644
index 000000000000..42019bff3173
--- /dev/null
+++ b/drivers/gpu/drm/nova/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_DRM_NOVA) += nova.o
diff --git a/drivers/gpu/drm/nova/driver.rs b/drivers/gpu/drm/nova/driver.rs
new file mode 100644
index 000000000000..b28b2e05cc15
--- /dev/null
+++ b/drivers/gpu/drm/nova/driver.rs
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::{auxiliary, c_str, device::Core, drm, drm::gem, drm::ioctl, prelude::*, types::ARef};
+
+use crate::file::File;
+use crate::gem::NovaObject;
+
+pub(crate) struct NovaDriver {
+ #[expect(unused)]
+ drm: ARef<drm::Device<Self>>,
+}
+
+/// Convienence type alias for the DRM device type for this driver
+pub(crate) type NovaDevice = drm::Device<NovaDriver>;
+
+#[pin_data]
+pub(crate) struct NovaData {
+ pub(crate) adev: ARef<auxiliary::Device>,
+}
+
+const INFO: drm::DriverInfo = drm::DriverInfo {
+ major: 0,
+ minor: 0,
+ patchlevel: 0,
+ name: c_str!("nova"),
+ desc: c_str!("Nvidia Graphics"),
+};
+
+const NOVA_CORE_MODULE_NAME: &CStr = c_str!("NovaCore");
+const AUXILIARY_NAME: &CStr = c_str!("nova-drm");
+
+kernel::auxiliary_device_table!(
+ AUX_TABLE,
+ MODULE_AUX_TABLE,
+ <NovaDriver as auxiliary::Driver>::IdInfo,
+ [(
+ auxiliary::DeviceId::new(NOVA_CORE_MODULE_NAME, AUXILIARY_NAME),
+ ()
+ )]
+);
+
+impl auxiliary::Driver for NovaDriver {
+ type IdInfo = ();
+ const ID_TABLE: auxiliary::IdTable<Self::IdInfo> = &AUX_TABLE;
+
+ fn probe(adev: &auxiliary::Device<Core>, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
+ let data = try_pin_init!(NovaData { adev: adev.into() });
+
+ let drm = drm::Device::<Self>::new(adev.as_ref(), data)?;
+ drm::Registration::new_foreign_owned(&drm, adev.as_ref(), 0)?;
+
+ Ok(KBox::new(Self { drm }, GFP_KERNEL)?.into())
+ }
+}
+
+#[vtable]
+impl drm::Driver for NovaDriver {
+ type Data = NovaData;
+ type File = File;
+ type Object = gem::Object<NovaObject>;
+
+ const INFO: drm::DriverInfo = INFO;
+
+ kernel::declare_drm_ioctls! {
+ (NOVA_GETPARAM, drm_nova_getparam, ioctl::RENDER_ALLOW, File::get_param),
+ (NOVA_GEM_CREATE, drm_nova_gem_create, ioctl::AUTH | ioctl::RENDER_ALLOW, File::gem_create),
+ (NOVA_GEM_INFO, drm_nova_gem_info, ioctl::AUTH | ioctl::RENDER_ALLOW, File::gem_info),
+ }
+}
diff --git a/drivers/gpu/drm/nova/file.rs b/drivers/gpu/drm/nova/file.rs
new file mode 100644
index 000000000000..7e59a34b830d
--- /dev/null
+++ b/drivers/gpu/drm/nova/file.rs
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use crate::driver::{NovaDevice, NovaDriver};
+use crate::gem::NovaObject;
+use crate::uapi::{GemCreate, GemInfo, Getparam};
+use kernel::{
+ alloc::flags::*,
+ drm::{self, gem::BaseObject},
+ pci,
+ prelude::*,
+ types::Opaque,
+ uapi,
+};
+
+pub(crate) struct File;
+
+impl drm::file::DriverFile for File {
+ type Driver = NovaDriver;
+
+ fn open(_dev: &NovaDevice) -> Result<Pin<KBox<Self>>> {
+ Ok(KBox::new(Self, GFP_KERNEL)?.into())
+ }
+}
+
+impl File {
+ /// IOCTL: get_param: Query GPU / driver metadata.
+ pub(crate) fn get_param(
+ dev: &NovaDevice,
+ getparam: &Opaque<uapi::drm_nova_getparam>,
+ _file: &drm::File<File>,
+ ) -> Result<u32> {
+ let adev = &dev.adev;
+ let parent = adev.parent().ok_or(ENOENT)?;
+ let pdev: &pci::Device = parent.try_into()?;
+ let getparam: &Getparam = getparam.into();
+
+ let value = match getparam.param() as u32 {
+ uapi::NOVA_GETPARAM_VRAM_BAR_SIZE => pdev.resource_len(1)?,
+ _ => return Err(EINVAL),
+ };
+
+ getparam.set_value(value);
+
+ Ok(0)
+ }
+
+ /// IOCTL: gem_create: Create a new DRM GEM object.
+ pub(crate) fn gem_create(
+ dev: &NovaDevice,
+ req: &Opaque<uapi::drm_nova_gem_create>,
+ file: &drm::File<File>,
+ ) -> Result<u32> {
+ let req: &GemCreate = req.into();
+ let obj = NovaObject::new(dev, req.size().try_into()?)?;
+
+ req.set_handle(obj.create_handle(file)?);
+
+ Ok(0)
+ }
+
+ /// IOCTL: gem_info: Query GEM metadata.
+ pub(crate) fn gem_info(
+ _dev: &NovaDevice,
+ req: &Opaque<uapi::drm_nova_gem_info>,
+ file: &drm::File<File>,
+ ) -> Result<u32> {
+ let req: &GemInfo = req.into();
+ let bo = NovaObject::lookup_handle(file, req.handle())?;
+
+ req.set_size(bo.size().try_into()?);
+
+ Ok(0)
+ }
+}
diff --git a/drivers/gpu/drm/nova/gem.rs b/drivers/gpu/drm/nova/gem.rs
new file mode 100644
index 000000000000..33b62d21400c
--- /dev/null
+++ b/drivers/gpu/drm/nova/gem.rs
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::{
+ drm,
+ drm::{gem, gem::BaseObject},
+ prelude::*,
+ types::ARef,
+};
+
+use crate::{
+ driver::{NovaDevice, NovaDriver},
+ file::File,
+};
+
+/// GEM Object inner driver data
+#[pin_data]
+pub(crate) struct NovaObject {}
+
+impl gem::BaseDriverObject<gem::Object<NovaObject>> for NovaObject {
+ fn new(_dev: &NovaDevice, _size: usize) -> impl PinInit<Self, Error> {
+ try_pin_init!(NovaObject {})
+ }
+}
+
+impl gem::DriverObject for NovaObject {
+ type Driver = NovaDriver;
+}
+
+impl NovaObject {
+ /// Create a new DRM GEM object.
+ pub(crate) fn new(dev: &NovaDevice, size: usize) -> Result<ARef<gem::Object<Self>>> {
+ let aligned_size = size.next_multiple_of(1 << 12);
+
+ if size == 0 || size > aligned_size {
+ return Err(EINVAL);
+ }
+
+ gem::Object::new(dev, aligned_size)
+ }
+
+ /// Look up a GEM object handle for a `File` and return an `ObjectRef` for it.
+ #[inline]
+ pub(crate) fn lookup_handle(
+ file: &drm::File<File>,
+ handle: u32,
+ ) -> Result<ARef<gem::Object<Self>>> {
+ gem::Object::lookup_handle(file, handle)
+ }
+}
diff --git a/drivers/gpu/drm/nova/nova.rs b/drivers/gpu/drm/nova/nova.rs
new file mode 100644
index 000000000000..902876aa14d1
--- /dev/null
+++ b/drivers/gpu/drm/nova/nova.rs
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Nova DRM Driver
+
+mod driver;
+mod file;
+mod gem;
+mod uapi;
+
+use crate::driver::NovaDriver;
+
+kernel::module_auxiliary_driver! {
+ type: NovaDriver,
+ name: "Nova",
+ author: "Danilo Krummrich",
+ description: "Nova GPU driver",
+ license: "GPL v2",
+}
diff --git a/drivers/gpu/drm/nova/uapi.rs b/drivers/gpu/drm/nova/uapi.rs
new file mode 100644
index 000000000000..eb228a58d423
--- /dev/null
+++ b/drivers/gpu/drm/nova/uapi.rs
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::uapi;
+
+// TODO Work out some common infrastructure to avoid boilerplate code for uAPI abstractions.
+
+macro_rules! define_uapi_abstraction {
+ ($name:ident <= $inner:ty) => {
+ #[repr(transparent)]
+ pub struct $name(::kernel::types::Opaque<$inner>);
+
+ impl ::core::convert::From<&::kernel::types::Opaque<$inner>> for &$name {
+ fn from(value: &::kernel::types::Opaque<$inner>) -> Self {
+ // SAFETY: `Self` is a transparent wrapper of `$inner`.
+ unsafe { ::core::mem::transmute(value) }
+ }
+ }
+ };
+}
+
+define_uapi_abstraction!(Getparam <= uapi::drm_nova_getparam);
+
+impl Getparam {
+ pub fn param(&self) -> u64 {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_getparam`.
+ unsafe { (*self.0.get()).param }
+ }
+
+ pub fn set_value(&self, v: u64) {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_getparam`.
+ unsafe { (*self.0.get()).value = v };
+ }
+}
+
+define_uapi_abstraction!(GemCreate <= uapi::drm_nova_gem_create);
+
+impl GemCreate {
+ pub fn size(&self) -> u64 {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_create`.
+ unsafe { (*self.0.get()).size }
+ }
+
+ pub fn set_handle(&self, handle: u32) {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_create`.
+ unsafe { (*self.0.get()).handle = handle };
+ }
+}
+
+define_uapi_abstraction!(GemInfo <= uapi::drm_nova_gem_info);
+
+impl GemInfo {
+ pub fn handle(&self) -> u32 {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_info`.
+ unsafe { (*self.0.get()).handle }
+ }
+
+ pub fn set_size(&self, size: u64) {
+ // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_info`.
+ unsafe { (*self.0.get()).size = size };
+ }
+}
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index b17e77f700dd..6eff97a09160 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -420,6 +420,7 @@ static void dpi_init_pll(struct dpi_data *dpi)
*/
static int dpi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
@@ -429,7 +430,7 @@ static int dpi_bridge_attach(struct drm_bridge *bridge,
dpi_init_pll(dpi);
- return drm_bridge_attach(bridge->encoder, dpi->output.next_bridge,
+ return drm_bridge_attach(encoder, dpi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 9b9cc593790c..91ee63bfe0bc 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -4617,6 +4617,7 @@ static const struct component_ops dsi_component_ops = {
*/
static int dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct dsi_data *dsi = drm_bridge_to_dsi(bridge);
@@ -4624,7 +4625,7 @@ static int dsi_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, dsi->output.next_bridge,
+ return drm_bridge_attach(encoder, dsi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index e1ac447221ee..a3b22952fdc3 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -314,6 +314,7 @@ void hdmi4_core_disable(struct hdmi_core_data *core)
*/
static int hdmi4_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
@@ -321,7 +322,7 @@ static int hdmi4_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ return drm_bridge_attach(encoder, hdmi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index fa9904e4c218..0c98444d39a9 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -312,6 +312,7 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi)
*/
static int hdmi5_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
@@ -319,7 +320,7 @@ static int hdmi5_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ return drm_bridge_attach(encoder, hdmi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index f9ae358e8e52..e78826e4b560 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -128,6 +128,7 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi)
*/
static int sdi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
@@ -135,7 +136,7 @@ static int sdi_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, sdi->output.next_bridge,
+ return drm_bridge_attach(encoder, sdi->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index aaeef603682c..50349518eda1 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -538,6 +538,7 @@ static int venc_get_clocks(struct venc_device *venc)
*/
static int venc_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct venc_device *venc = drm_bridge_to_venc(bridge);
@@ -545,7 +546,7 @@ static int venc_bridge_attach(struct drm_bridge *bridge,
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
return -EINVAL;
- return drm_bridge_attach(bridge->encoder, venc->output.next_bridge,
+ return drm_bridge_attach(encoder, venc->output.next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index e059b06e0239..cfebb08e8a62 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -67,6 +67,15 @@ config DRM_PANEL_BOE_HIMAX8279D
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
+config DRM_PANEL_BOE_TD4320
+ tristate "BOE TD4320 DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for BOE TD4320 1080x2340
+ video mode panel found in Xiaomi Redmi Note 7 smartphones.
+
config DRM_PANEL_BOE_TH101MB31UIG002_28A
tristate "Boe TH101MB31UIG002-28A panel"
depends on OF
@@ -154,6 +163,17 @@ config DRM_PANEL_LVDS
handling of power supplies or control signals. It implements automatic
backlight handling if the panel is attached to a backlight controller.
+config DRM_PANEL_HIMAX_HX8279
+ tristate "Himax HX8279-based panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Himax HX8279 controller, such as the Startek KD070FHFID078
+ 7.0" 1200x1920 IPS LCD panel that uses a MIPI-DSI interface
+ and others.
+
config DRM_PANEL_HIMAX_HX83102
tristate "Himax HX83102-based panels"
depends on OF
@@ -497,6 +517,18 @@ config DRM_PANEL_NOVATEK_NT36672E
LCD panel module. The panel has a resolution of 1080x2408 and uses 24 bit
RGB per pixel.
+config DRM_PANEL_NOVATEK_NT37801
+ tristate "Novatek NT37801/NT37810 AMOLED DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_DISPLAY_DSC_HELPER
+ select DRM_DISPLAY_HELPER
+ help
+ Say Y here if you want to enable support for Novatek NT37801 (or
+ NT37810) AMOLED DSI Video Mode LCD panel module with 1440x3200
+ resolution.
+
config DRM_PANEL_NOVATEK_NT39016
tristate "Novatek NT39016 RGB/SPI panel"
depends on OF && SPI
@@ -996,6 +1028,15 @@ config DRM_PANEL_TRULY_NT35597_WQXGA
Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
Video Mode panel
+config DRM_PANEL_VISIONOX_G2647FB105
+ tristate "Visionox G2647FB105"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Visionox
+ G2647FB105 (2340x1080@60Hz) AMOLED DSI cmd mode panel.
+
config DRM_PANEL_VISIONOX_R66451
tristate "Visionox R66451"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 1bb8ae46b59b..714cbac830e3 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.
obj-$(CONFIG_DRM_PANEL_AUO_A030JTN01) += panel-auo-a030jtn01.o
obj-$(CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0) += panel-boe-bf060y8m-aj0.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
+obj-$(CONFIG_DRM_PANEL_BOE_TD4320) += panel-boe-td4320.o
obj-$(CONFIG_DRM_PANEL_BOE_TH101MB31UIG002_28A) += panel-boe-th101mb31ig002-28a.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_LL2) += panel-boe-tv101wum-ll2.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
@@ -16,6 +17,7 @@ obj-$(CONFIG_DRM_PANEL_EBBG_FT8719) += panel-ebbg-ft8719.o
obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
+obj-$(CONFIG_DRM_PANEL_HIMAX_HX8279) += panel-himax-hx8279.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83102) += panel-himax-hx83102.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112A) += panel-himax-hx83112a.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o
@@ -49,6 +51,7 @@ obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35950) += panel-novatek-nt35950.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36523) += panel-novatek-nt36523.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672E) += panel-novatek-nt36672e.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT37801) += panel-novatek-nt37801.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
@@ -101,6 +104,7 @@ obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
+obj-$(CONFIG_DRM_PANEL_VISIONOX_G2647FB105) += panel-visionox-g2647fb105.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_RM692E5) += panel-visionox-rm692e5.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_VTDR6130) += panel-visionox-vtdr6130.o
diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
index 4692c36fe217..87fb0fd29658 100644
--- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
+++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c
@@ -279,9 +279,10 @@ static int y030xx067a_probe(struct spi_device *spi)
struct y030xx067a *priv;
int err;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct y030xx067a, panel,
+ &y030xx067a_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->spi = spi;
spi_set_drvdata(spi, priv);
@@ -306,9 +307,6 @@ static int y030xx067a_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
"Failed to get reset GPIO\n");
- drm_panel_init(&priv->panel, dev, &y030xx067a_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index 503ecea72c5e..ea5119018df4 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -306,9 +306,11 @@ static int versatile_panel_probe(struct platform_device *pdev)
return PTR_ERR(map);
}
- vpanel = devm_kzalloc(dev, sizeof(*vpanel), GFP_KERNEL);
- if (!vpanel)
- return -ENOMEM;
+ vpanel = devm_drm_panel_alloc(dev, struct versatile_panel, panel,
+ &versatile_panel_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(vpanel))
+ return PTR_ERR(vpanel);
ret = regmap_read(map, SYS_CLCD, &val);
if (ret) {
@@ -348,9 +350,6 @@ static int versatile_panel_probe(struct platform_device *pdev)
dev_info(dev, "panel mounted on IB2 daughterboard\n");
}
- drm_panel_init(&vpanel->panel, dev, &versatile_panel_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&vpanel->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
index b05a663c134c..db006576d704 100644
--- a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
+++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
@@ -224,9 +224,11 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
struct tm5p5_nt35596 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct tm5p5_nt35596, panel,
+ &tm5p5_nt35596_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vdd";
ctx->supplies[1].supply = "vddio";
@@ -253,9 +255,6 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &tm5p5_nt35596_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = tm5p5_nt35596_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight)) {
ret = PTR_ERR(ctx->panel.backlight);
diff --git a/drivers/gpu/drm/panel/panel-auo-a030jtn01.c b/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
index 77604d6a4e72..6e52bf6830e1 100644
--- a/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
+++ b/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
@@ -200,9 +200,10 @@ static int a030jtn01_probe(struct spi_device *spi)
spi->mode |= SPI_MODE_3 | SPI_3WIRE;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct a030jtn01, panel,
+ &a030jtn01_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->spi = spi;
spi_set_drvdata(spi, priv);
@@ -223,9 +224,6 @@ static int a030jtn01_probe(struct spi_device *spi)
if (IS_ERR(priv->reset_gpio))
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), "Failed to get reset GPIO");
- drm_panel_init(&priv->panel, dev, &a030jtn01_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
index 7e66db4a88bb..84c21c62a43e 100644
--- a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
+++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
@@ -55,77 +55,56 @@ static void boe_bf060y8m_aj0_reset(struct boe_bf060y8m_aj0 *boe)
static int boe_bf060y8m_aj0_on(struct boe_bf060y8m_aj0 *boe)
{
struct mipi_dsi_device *dsi = boe->dsi;
- struct device *dev = &dsi->dev;
- int ret;
-
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x4c);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x10);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE);
- mipi_dsi_dcs_write_seq(dsi, 0xf8,
- 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(30);
-
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc0,
- 0x08, 0x48, 0x65, 0x33, 0x33, 0x33,
- 0x2a, 0x31, 0x39, 0x20, 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92,
- 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83,
- 0x5c, 0x5c, 0x5c);
- mipi_dsi_dcs_write_seq(dsi, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e);
-
- msleep(30);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(50);
-
- return 0;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0xa5, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x00, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_3D_CONTROL, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf8,
+ 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 30);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0xa5, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0,
+ 0x08, 0x48, 0x65, 0x33, 0x33, 0x33,
+ 0x2a, 0x31, 0x39, 0x20, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92,
+ 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83,
+ 0x5c, 0x5c, 0x5c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e);
+
+ mipi_dsi_msleep(&dsi_ctx, 30);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
+
+ return dsi_ctx.accum_err;
}
-static int boe_bf060y8m_aj0_off(struct boe_bf060y8m_aj0 *boe)
+static void boe_bf060y8m_aj0_off(struct boe_bf060y8m_aj0 *boe)
{
struct mipi_dsi_device *dsi = boe->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
/* OFF commands sent in HS mode */
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(20);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- usleep_range(1000, 2000);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 1000, 2000);
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
-
- return 0;
}
static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel)
{
struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel);
- struct device *dev = &boe->dsi->dev;
int ret;
/*
@@ -157,13 +136,14 @@ static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel)
ret = boe_bf060y8m_aj0_on(boe);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(boe->reset_gpio, 1);
- return ret;
+ goto err_on;
}
return 0;
+err_on:
+ regulator_disable(boe->vregs[BF060Y8M_VREG_VCI].consumer);
err_vci:
regulator_disable(boe->vregs[BF060Y8M_VREG_VDDIO].consumer);
err_vddio:
@@ -178,15 +158,11 @@ err_elvss:
static int boe_bf060y8m_aj0_unprepare(struct drm_panel *panel)
{
struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel);
- struct device *dev = &boe->dsi->dev;
- int ret;
- ret = boe_bf060y8m_aj0_off(boe);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ boe_bf060y8m_aj0_off(boe);
gpiod_set_value_cansleep(boe->reset_gpio, 1);
- ret = regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs);
+ regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs);
return 0;
}
@@ -234,13 +210,11 @@ static int boe_bf060y8m_aj0_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness = backlight_get_brightness(bl);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, brightness);
- return 0;
+ return dsi_ctx.accum_err;
}
static int boe_bf060y8m_aj0_bl_get_brightness(struct backlight_device *bl)
@@ -350,9 +324,11 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi)
struct boe_bf060y8m_aj0 *boe;
int ret;
- boe = devm_kzalloc(dev, sizeof(*boe), GFP_KERNEL);
- if (!boe)
- return -ENOMEM;
+ boe = devm_drm_panel_alloc(dev, struct boe_bf060y8m_aj0, panel,
+ &boe_bf060y8m_aj0_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(boe))
+ return PTR_ERR(boe);
ret = boe_bf060y8m_aj0_init_vregs(boe, dev);
if (ret)
@@ -374,9 +350,6 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_CLOCK_NON_CONTINUOUS |
MIPI_DSI_MODE_LPM;
- drm_panel_init(&boe->panel, dev, &boe_bf060y8m_aj0_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
boe->panel.prepare_prev_first = true;
boe->panel.backlight = boe_bf060y8m_aj0_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-boe-td4320.c b/drivers/gpu/drm/panel/panel-boe-td4320.c
new file mode 100644
index 000000000000..1956daa2c71b
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-td4320.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2024 Barnabas Czeman <barnabas.czeman@mainlining.org>
+// Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+// Copyright (c) 2013, The Linux Foundation. All rights reserved.
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+struct boe_td4320 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data *supplies;
+ struct gpio_desc *reset_gpio;
+};
+
+static const struct regulator_bulk_data boe_td4320_supplies[] = {
+ { .supply = "iovcc" },
+ { .supply = "vsn" },
+ { .supply = "vsp" },
+};
+
+static inline struct boe_td4320 *to_boe_td4320(struct drm_panel *panel)
+{
+ return container_of(panel, struct boe_td4320, panel);
+}
+
+static void boe_td4320_reset(struct boe_td4320 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(30);
+}
+
+static int boe_td4320_on(struct boe_td4320 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ ctx->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x04);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xd6, 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb8,
+ 0x19, 0x55, 0x00, 0xbe, 0x00, 0x00,
+ 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb9,
+ 0x4d, 0x55, 0x05, 0xe6, 0x00, 0x02,
+ 0x03);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xba,
+ 0x9b, 0x5b, 0x07, 0xe6, 0x00, 0x13,
+ 0x00);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf9,
+ 0x44, 0x3f, 0x00, 0x8d, 0xbf);
+ mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xce,
+ 0x5d, 0x00, 0x0f, 0x1f, 0x2f, 0x3f,
+ 0x4f, 0x5f, 0x6f, 0x7f, 0x8f, 0x9f,
+ 0xaf, 0xbf, 0xcf, 0xdf, 0xef, 0xff,
+ 0x04, 0x00, 0x02, 0x02, 0x42, 0x01,
+ 0x69, 0x5a, 0x40, 0x40, 0x00, 0x00,
+ 0x04, 0xfa, 0x00);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x00b8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x2c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x00);
+ mipi_dsi_msleep(&dsi_ctx, 96);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+}
+
+static int boe_td4320_off(struct boe_td4320 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ ctx->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ return dsi_ctx.accum_err;
+}
+
+static int boe_td4320_prepare(struct drm_panel *panel)
+{
+ struct boe_td4320 *ctx = to_boe_td4320(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(boe_td4320_supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ boe_td4320_reset(ctx);
+
+ ret = boe_td4320_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(boe_td4320_supplies), ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int boe_td4320_unprepare(struct drm_panel *panel)
+{
+ struct boe_td4320 *ctx = to_boe_td4320(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = boe_td4320_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(boe_td4320_supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode boe_td4320_mode = {
+ .clock = (1080 + 86 + 2 + 100) * (2340 + 4 + 4 + 60) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 86,
+ .hsync_end = 1080 + 86 + 2,
+ .htotal = 1080 + 86 + 2 + 100,
+ .vdisplay = 2340,
+ .vsync_start = 2340 + 4,
+ .vsync_end = 2340 + 4 + 4,
+ .vtotal = 2340 + 4 + 4 + 60,
+ .width_mm = 67,
+ .height_mm = 145,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int boe_td4320_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &boe_td4320_mode);
+}
+
+static const struct drm_panel_funcs boe_td4320_panel_funcs = {
+ .prepare = boe_td4320_prepare,
+ .unprepare = boe_td4320_unprepare,
+ .get_modes = boe_td4320_get_modes,
+};
+
+static int boe_td4320_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct boe_td4320 *ctx;
+ int ret;
+
+ ctx = devm_drm_panel_alloc(dev, struct boe_td4320, panel,
+ &boe_td4320_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(boe_td4320_supplies),
+ boe_td4320_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ctx->panel.prepare_prev_first = true;
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void boe_td4320_remove(struct mipi_dsi_device *dsi)
+{
+ struct boe_td4320 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id boe_td4320_of_match[] = {
+ { .compatible = "boe,td4320" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, boe_td4320_of_match);
+
+static struct mipi_dsi_driver boe_td4320_driver = {
+ .probe = boe_td4320_probe,
+ .remove = boe_td4320_remove,
+ .driver = {
+ .name = "panel-boe-td4320",
+ .of_match_table = boe_td4320_of_match,
+ },
+};
+module_mipi_dsi_driver(boe_td4320_driver);
+
+MODULE_AUTHOR("Barnabas Czeman <barnabas.czeman@mainlining.org>");
+MODULE_DESCRIPTION("DRM driver for boe td4320 fhdplus video mode dsi panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
index 0b87f1e6ecae..f33d4f855929 100644
--- a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
+++ b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
@@ -349,9 +349,11 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
const struct panel_desc *desc;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct boe_th101mb31ig002, panel,
+ &boe_th101mb31ig002_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
@@ -383,9 +385,6 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(&dsi->dev, ret,
"Failed to get orientation\n");
- drm_panel_init(&ctx->panel, &dsi->dev, &boe_th101mb31ig002_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
index 50e4a5341bc6..20b6e11a7d84 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-ll2.c
@@ -166,9 +166,11 @@ static int boe_tv101wum_ll2_probe(struct mipi_dsi_device *dsi)
struct boe_tv101wum_ll2 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct boe_tv101wum_ll2, panel,
+ &boe_tv101wum_ll2_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(boe_tv101wum_ll2_supplies),
@@ -190,8 +192,6 @@ static int boe_tv101wum_ll2_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_VIDEO_HSE;
- drm_panel_init(&ctx->panel, dev, &boe_tv101wum_ll2_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
index 6b3f4d664d2a..ae6e9ffc46cb 100644
--- a/drivers/gpu/drm/panel/panel-dsi-cm.c
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -511,9 +511,10 @@ static int dsicm_probe(struct mipi_dsi_device *dsi)
dev_dbg(dev, "probe\n");
- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
+ ddata = devm_drm_panel_alloc(dev, struct panel_drv_data, panel,
+ &dsicm_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ddata))
+ return PTR_ERR(ddata);
mipi_dsi_set_drvdata(dsi, ddata);
ddata->dsi = dsi;
@@ -530,9 +531,6 @@ static int dsicm_probe(struct mipi_dsi_device *dsi)
dsicm_hw_reset(ddata);
- drm_panel_init(&ddata->panel, dev, &dsicm_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
if (ddata->use_dsi_backlight) {
struct backlight_properties props = { 0 };
props.max_brightness = 255;
diff --git a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
index 0bfed0ec0bbc..fb9f9f42be4f 100644
--- a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
+++ b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
@@ -163,9 +163,11 @@ static int ebbg_ft8719_probe(struct mipi_dsi_device *dsi)
struct ebbg_ft8719 *ctx;
int i, ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ebbg_ft8719, panel,
+ &ebbg_ft8719_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
ctx->supplies[i].supply = regulator_names[i];
@@ -196,9 +198,6 @@ static int ebbg_ft8719_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &ebbg_ft8719_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 52028c8f8988..90e8c154a978 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -839,9 +839,10 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
struct device_node *ddc;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct panel_edp, base,
+ &panel_edp_funcs, DRM_MODE_CONNECTOR_eDP);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
panel->prepared_time = 0;
panel->desc = desc;
@@ -886,8 +887,6 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
dev_set_drvdata(dev, panel);
- drm_panel_init(&panel->base, dev, &panel_edp_funcs, DRM_MODE_CONNECTOR_eDP);
-
err = drm_panel_of_backlight(&panel->base);
if (err)
goto err_finished_ddc_init;
@@ -1763,6 +1762,13 @@ static const struct panel_delay delay_80_500_e50 = {
.enable = 50,
};
+static const struct panel_delay delay_80_500_e80_p2e200 = {
+ .hpd_absent = 80,
+ .unprepare = 500,
+ .enable = 80,
+ .prepare_to_enable = 200,
+};
+
static const struct panel_delay delay_100_500_e200 = {
.hpd_absent = 100,
.unprepare = 500,
@@ -1878,6 +1884,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa7b3, &delay_200_500_e50, "B140UAN04.4"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xc9a8, &delay_200_500_e50, "B140QAN08.H"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
@@ -1938,6 +1945,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d73, &delay_200_500_e80, "NE140WUM-N6S"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
@@ -1973,6 +1981,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1103, &delay_200_500_e80_d50, "MNB601LS1-3"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50, "MNB601LS1-4"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1457, &delay_80_500_e80_p2e200, "MNE007QS3-8"),
EDP_PANEL_ENTRY('E', 'T', 'C', 0x0000, &delay_50_500_e200_d200_po2e335, "LP079QX1-SP0V"),
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8279.c b/drivers/gpu/drm/panel/panel-himax-hx8279.c
new file mode 100644
index 000000000000..fb302d1f91b9
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-himax-hx8279.c
@@ -0,0 +1,1296 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Himax HX8279 DriverIC panels driver
+ *
+ * Copyright (c) 2025 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+/* Page selection */
+#define HX8279_REG_PAGE 0xb0
+ #define HX8279_PAGE_SEL GENMASK(3, 0)
+
+/* Page 0 - Driver/Module Configuration */
+#define HX8279_P0_VGHS 0xbf
+#define HX8279_P0_VGLS 0xc0
+#define HX8279_P0_VGPHS 0xc2
+#define HX8279_P0_VGNHS 0xc4
+ #define HX8279_P0_VG_SEL GENMASK(4, 0)
+ #define HX8279_VGH_MIN_MV 8700
+ #define HX8279_VGH_STEP_MV 300
+ #define HX8279_VGL_MIN_MV 6700
+ #define HX8279_VGL_STEP_MV 300
+ #define HX8279_VGPNH_MIN_MV 4000
+ #define HX8279_VGPNX_STEP_MV 50
+ #define HX8279_VGH_VOLT_SEL(x) ((x - HX8279_VGH_MIN_MV) / HX8279_VGH_STEP_MV)
+ #define HX8279_VGL_VOLT_SEL(x) ((x - HX8279_VGL_MIN_MV) / HX8279_VGL_STEP_MV)
+ #define HX8279_VGPN_VOLT_SEL(x) ((x - HX8279_VGPNH_MIN_MV) / HX8279_VGPNX_STEP_MV)
+
+/* Page 1 - Gate driver On Array (GOA) Mux */
+#define HX8279_P1_REG_GOA_L 0xc0
+#define HX8279_P1_REG_GOUTL(x) (HX8279_P1_REG_GOA_L + (x))
+#define HX8279_P1_REG_GOA_R 0xd4
+#define HX8279_P1_REG_GOUTR(x) (HX8279_P1_REG_GOA_R + (x))
+ #define HX8279_GOUT_STB GENMASK(7, 6)
+ #define HX8279_GOUT_SEL GENMASK(5, 0)
+
+/* Page 2 - Analog Gamma Configuration */
+#define HX8279_P2_REG_ANALOG_GAMMA 0xc0
+ #define HX8279_P2_REG_GAMMA_T_PVP(x) (HX8279_P2_REG_ANALOG_GAMMA + (x)) /* 0..16 */
+ #define HX8279_P2_REG_GAMMA_T_PVN(x) (HX8279_P2_REG_GAMMA_T_PVP(17) + (x)) /* 0..16 */
+
+/* Page 3 - Gate driver On Array (GOA) Configuration */
+#define HX8279_P3_REG_UNKNOWN_BA 0xba
+#define HX8279_P3_REG_GOA_CKV_FALL_PREC 0xbc
+#define HX8279_P3_REG_GOA_TIMING_ODD 0xc2
+ #define HX8279_P3_REG_GOA_TO(x) (HX8279_P3_REG_GOA_TIMING_ODD + x) /* GOA_T0..5 */
+#define HX8279_P3_REG_GOA_STVL 0xc8
+ #define HX8279_P3_GOA_STV_LEAD GENMASK(4, 0)
+#define HX8279_P3_REG_GOA_CKVL 0xc9
+ #define HX8279_P3_GOA_CKV_LEAD GENMASK(4, 0)
+#define HX8279_P3_REG_GOA_CKVD 0xca
+ #define HX8279_P3_GOA_CKV_NONOVERLAP BIT(7)
+ #define HX8279_P3_GOA_CKV_RESERVED BIT(6)
+ #define HX8279_P3_GOA_CKV_DUMMY GENMASK(5, 0)
+#define HX8279_P3_REG_GOA_CKV_RISE_PREC 0xcb
+#define HX8279_P3_REG_GOA_CLR1_W_ADJ 0xd2
+#define HX8279_P3_REG_GOA_CLR234_W_ADJ 0xd3
+#define HX8279_P3_REG_GOA_CLR1_CFG 0xd4
+#define HX8279_P3_REG_GOA_CLR_CFG(x) (HX8279_P3_REG_GOA_CLR1_CFG + (x)) /* CLR1..4 */
+ #define HX8279_P3_GOA_CLR_CFG_POLARITY BIT(7)
+ #define HX8279_P3_GOA_CLR_CFG_STARTPOS GENMASK(6, 0)
+#define HX8279_P3_REG_GOA_TIMING_EVEN 0xdd
+ #define HX8279_P3_REG_GOA_TE(x) (HX8279_P3_REG_GOA_TIMING_EVEN + x)
+#define HX8279_P3_REG_UNKNOWN_E4 0xe4
+#define HX8279_P3_REG_UNKNOWN_E5 0xe5
+
+/* Page 5 - MIPI */
+#define HX8279_P5_REG_TIMING 0xb3
+ #define HX8279_P5_TIMING_THS_SETTLE GENMASK(7, 5)
+ #define HX8279_P5_TIMING_LHS_SETTLE BIT(4)
+ #define HX8279_P5_TIMING_TLPX GENMASK(3, 0)
+#define HX8279_P5_REG_UNKNOWN_B8 0xb8
+#define HX8279_P5_REG_UNKNOWN_BC 0xbc
+#define HX8279_P5_REG_UNKNOWN_D6 0xd6
+
+/* Page 6 - Engineer */
+#define HX8279_P6_REG_ENGINEER_PWD 0xb8
+#define HX8279_P6_REG_INHOUSE_FUNC 0xc0
+ #define HX8279_P6_ENG_UNLOCK_WORD 0xa5
+#define HX8279_P6_REG_GAMMA_CHOPPER 0xbc
+ #define HX8279_P6_GAMMA_POCGM_CTL GENMASK(6, 4)
+ #define HX8279_P6_GAMMA_POGCMD_CTL GENMASK(2, 0)
+#define HX8279_P6_REG_VOLT_ADJ 0xc7
+ /* For VCCIFS and VCCS - 0: 1450, 1: 1500, 2: 1550, 3: 1600 uV */
+ #define HX8279_P6_VOLT_ADJ_VCCIFS GENMASK(3, 2)
+ #define HX8279_P6_VOLT_ADJ_VCCS GENMASK(1, 0)
+#define HX8279_P6_REG_DLY_TIME_ADJ 0xd5
+
+/* Page 7...12 - Digital Gamma Adjustment */
+#define HX8279_PG_DIGITAL_GAMMA 0xb1
+#define HX8279_DGAMMA_DGMA1_HI GENMASK(7, 6)
+#define HX8279_DGAMMA_DGMA2_HI GENMASK(5, 4)
+#define HX8279_DGAMMA_DGMA3_HI GENMASK(3, 2)
+#define HX8279_DGAMMA_DGMA4_HI GENMASK(1, 0)
+#define HX8279_PG_DGAMMA_NUM_LO_BYTES 24
+#define HX8279_PG_DGAMMA_NUM_HI_BYTES 6
+
+struct hx8279 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi[2];
+ struct regulator_bulk_data vregs[2];
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *reset_gpio;
+ const struct hx8279_panel_desc *desc;
+ u8 last_page;
+ bool skip_voltage_config;
+ bool skip_goa_config;
+ bool skip_goa_timing;
+ bool skip_goa_even_timing;
+ bool skip_mipi_timing;
+};
+
+struct hx8279_panel_mode {
+ const struct drm_display_mode mode;
+ u8 bpc;
+ bool is_video_mode;
+};
+
+/**
+ * struct hx8279_goa_mux - Gate driver On Array Muxer
+ * @gout_l: Mux GOA signal to GOUT Left pin
+ * @gout_r: Mux GOA signal to GOUT Right pin
+ */
+struct hx8279_goa_mux {
+ u8 gout_l[20];
+ u8 gout_r[20];
+};
+
+/**
+ * struct hx8279_analog_gamma - Analog Gamma Adjustment
+ * @pos: Positive gamma op's input voltage, adjusted by VGP(H/L)
+ * @neg: Negative gamma op's input voltage, adjusted by VGN(H/L)
+ *
+ * Analog Gamma correction is performed with 17+17 reference voltages,
+ * changed with resistor streams, and defined with 17 register values
+ * for positive and 17 for negative.
+ *
+ * Each register holds resistance values, in 8.5ohms per unit, for the
+ * following gamma levels:
+ * 0, 8, 16, 28, 40, 56, 80, 128, 176, 200, 216, 228, 240, 248, 252, 255.
+ */
+struct hx8279_analog_gamma {
+ u8 pos[17];
+ u8 neg[17];
+};
+
+/**
+ * struct hx8279_digital_gamma - Digital Gamma Adjustment
+ * @r: Adjustment for red component
+ * @g: Adjustment for green component
+ * @b: Adjustment for blue component
+ *
+ * The layout of this structure follows the register layout to simplify
+ * both the handling and the declaration of those values in the driver.
+ * Gamma correction is internally done with a 24 segment piecewise
+ * linear interpolation; those segments are defined with 24 ten bits
+ * values of which:
+ * - The LOW eight bits for the first 24 registers start at the first
+ * register (at 0xb1) of the Digital Gamma Adjustment page;
+ * - The HIGH two bits for each of the 24 registers are contained
+ * in the last six registers;
+ * - The last six registers contain four groups of two-bits HI values
+ * for each of the first 24 registers, but in an inverted fashion,
+ * this means that the first two bits relate to the last register
+ * of a set of four.
+ *
+ * The 24 segments refer to the following gamma levels:
+ * 0, 1, 3, 7, 11, 15, 23, 31, 47, 63, 95, 127, 128, 160,
+ * 192, 208, 224, 232, 240, 244, 248, 252, 254, 255
+ */
+struct hx8279_digital_gamma {
+ u8 r[HX8279_PG_DGAMMA_NUM_LO_BYTES + HX8279_PG_DGAMMA_NUM_HI_BYTES];
+ u8 g[HX8279_PG_DGAMMA_NUM_LO_BYTES + HX8279_PG_DGAMMA_NUM_HI_BYTES];
+ u8 b[HX8279_PG_DGAMMA_NUM_LO_BYTES + HX8279_PG_DGAMMA_NUM_HI_BYTES];
+};
+
+struct hx8279_panel_desc {
+ const struct mipi_dsi_device_info dsi_info;
+ const struct hx8279_panel_mode *mode_data;
+ u8 num_lanes;
+ u8 num_modes;
+
+ /* Page 0 */
+ unsigned int vgh_mv;
+ unsigned int vgl_mv;
+ unsigned int vgph_mv;
+ unsigned int vgnh_mv;
+
+ /* Page 1 */
+ const struct hx8279_goa_mux *gmux;
+
+ /* Page 2 */
+ const struct hx8279_analog_gamma *agamma;
+
+ /* Page 3 */
+ u8 goa_unk_ba;
+ u8 goa_odd_timing[6];
+ u8 goa_even_timing[6];
+ u8 goa_stv_lead_time_ck;
+ u8 goa_ckv_lead_time_ck;
+ u8 goa_ckv_dummy_vblank_num;
+ u8 goa_ckv_rise_precharge;
+ u8 goa_ckv_fall_precharge;
+ bool goa_ckv_non_overlap_ctl;
+ u8 goa_clr1_width_adj;
+ u8 goa_clr234_width_adj;
+ s8 goa_clr_polarity[4];
+ int goa_clr_start_pos[4];
+ u8 goa_unk_e4;
+ u8 goa_unk_e5;
+
+ /* Page 5 */
+ u8 bta_tlpx;
+ bool lhs_settle_time_by_osc25;
+ u8 ths_settle_time;
+ u8 timing_unk_b8;
+ u8 timing_unk_bc;
+ u8 timing_unk_d6;
+
+ /* Page 6 */
+ u8 gamma_ctl;
+ u8 volt_adj;
+ u8 src_delay_time_adj_ck;
+
+ /* Page 7..12 */
+ const struct hx8279_digital_gamma *dgamma;
+};
+
+static inline struct hx8279 *to_hx8279(struct drm_panel *panel)
+{
+ return container_of(panel, struct hx8279, panel);
+}
+
+static void hx8279_set_page(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx, u8 page)
+{
+ const u8 cmd_set_page[] = { HX8279_REG_PAGE, page };
+
+ if (hx->last_page == page)
+ return;
+
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_page, ARRAY_SIZE(cmd_set_page));
+ if (!dsi_ctx->accum_err)
+ hx->last_page = page;
+}
+
+static void hx8279_set_module_config(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ u8 cmd_set_voltage[2];
+
+ if (hx->skip_voltage_config)
+ return;
+
+ /* Page 0 - Driver/Module Configuration */
+ hx8279_set_page(hx, dsi_ctx, 0);
+
+ if (desc->vgh_mv) {
+ cmd_set_voltage[0] = HX8279_P0_VGHS;
+ cmd_set_voltage[1] = HX8279_VGH_VOLT_SEL(desc->vgh_mv);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage,
+ ARRAY_SIZE(cmd_set_voltage));
+ }
+
+ if (desc->vgl_mv) {
+ cmd_set_voltage[0] = HX8279_P0_VGLS;
+ cmd_set_voltage[1] = HX8279_VGL_VOLT_SEL(desc->vgl_mv);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage,
+ ARRAY_SIZE(cmd_set_voltage));
+ }
+
+ if (desc->vgph_mv) {
+ cmd_set_voltage[0] = HX8279_P0_VGPHS;
+ cmd_set_voltage[1] = HX8279_VGPN_VOLT_SEL(desc->vgph_mv);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage,
+ ARRAY_SIZE(cmd_set_voltage));
+ }
+
+ if (desc->vgnh_mv) {
+ cmd_set_voltage[0] = HX8279_P0_VGNHS;
+ cmd_set_voltage[1] = HX8279_VGPN_VOLT_SEL(desc->vgnh_mv);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_voltage,
+ ARRAY_SIZE(cmd_set_voltage));
+ }
+}
+
+static void hx8279_set_gmux(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_goa_mux *gmux = hx->desc->gmux;
+ u8 cmd_set_gmux[2];
+ int i;
+
+ if (!gmux)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 1);
+
+ for (i = 0; i < ARRAY_SIZE(gmux->gout_l); i++) {
+ cmd_set_gmux[0] = HX8279_P1_REG_GOUTL(i);
+ cmd_set_gmux[1] = gmux->gout_l[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_gmux,
+ ARRAY_SIZE(cmd_set_gmux));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(gmux->gout_r); i++) {
+ cmd_set_gmux[0] = HX8279_P1_REG_GOUTR(i);
+ cmd_set_gmux[1] = gmux->gout_r[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_gmux,
+ ARRAY_SIZE(cmd_set_gmux));
+ }
+}
+
+static void hx8279_set_analog_gamma(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_analog_gamma *agamma = hx->desc->agamma;
+ u8 cmd_set_ana_gamma[2];
+ int i;
+
+ if (!agamma)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 2);
+
+ for (i = 0; i < ARRAY_SIZE(agamma->pos); i++) {
+ cmd_set_ana_gamma[0] = HX8279_P2_REG_GAMMA_T_PVP(i);
+ cmd_set_ana_gamma[1] = agamma->pos[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_ana_gamma,
+ ARRAY_SIZE(cmd_set_ana_gamma));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(agamma->neg); i++) {
+ cmd_set_ana_gamma[0] = HX8279_P2_REG_GAMMA_T_PVN(i);
+ cmd_set_ana_gamma[1] = agamma->neg[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_ana_gamma,
+ ARRAY_SIZE(cmd_set_ana_gamma));
+ }
+}
+
+static void hx8279_set_goa_timing(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ u8 cmd_set_goa_t[2];
+ int i;
+
+ if (hx->skip_goa_timing)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 3);
+
+ for (i = 0; i < ARRAY_SIZE(desc->goa_odd_timing); i++) {
+ cmd_set_goa_t[0] = HX8279_P3_REG_GOA_TO(i);
+ cmd_set_goa_t[1] = desc->goa_odd_timing[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa_t,
+ ARRAY_SIZE(cmd_set_goa_t));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(desc->goa_even_timing); i++) {
+ cmd_set_goa_t[0] = HX8279_P3_REG_GOA_TE(i);
+ cmd_set_goa_t[1] = desc->goa_odd_timing[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa_t,
+ ARRAY_SIZE(cmd_set_goa_t));
+ }
+}
+
+static void hx8279_set_goa_cfg(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ u8 cmd_set_goa[2];
+ int i;
+
+ if (hx->skip_goa_config)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 3);
+
+ if (desc->goa_unk_ba) {
+ cmd_set_goa[0] = HX8279_P3_REG_UNKNOWN_BA;
+ cmd_set_goa[1] = desc->goa_unk_ba;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_stv_lead_time_ck) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_STVL;
+ cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_STV_LEAD,
+ desc->goa_stv_lead_time_ck);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_ckv_lead_time_ck) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CKVL;
+ cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_CKV_DUMMY,
+ desc->goa_ckv_lead_time_ck);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_ckv_dummy_vblank_num) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CKVD;
+ cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_CKV_LEAD,
+ desc->goa_ckv_dummy_vblank_num);
+ cmd_set_goa[1] |= FIELD_PREP(HX8279_P3_GOA_CKV_NONOVERLAP,
+ desc->goa_ckv_non_overlap_ctl);
+ /* RESERVED must be always set */
+ cmd_set_goa[1] |= HX8279_P3_GOA_CKV_RESERVED;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ /*
+ * One of the two being more than zero means that we want to write
+ * both of them. Anyway, the register default is zero in this case.
+ */
+ if (desc->goa_ckv_rise_precharge || desc->goa_ckv_fall_precharge) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CKV_RISE_PREC;
+ cmd_set_goa[1] = desc->goa_ckv_rise_precharge;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CKV_FALL_PREC;
+ cmd_set_goa[1] = desc->goa_ckv_fall_precharge;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_clr1_width_adj) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CLR1_W_ADJ;
+ cmd_set_goa[1] = desc->goa_clr1_width_adj;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_clr234_width_adj) {
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CLR234_W_ADJ;
+ cmd_set_goa[1] = desc->goa_clr234_width_adj;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ /* Polarity and Start Position arrays are of the same size */
+ for (i = 0; i < ARRAY_SIZE(desc->goa_clr_polarity); i++) {
+ if (desc->goa_clr_polarity[i] < 0 || desc->goa_clr_start_pos[i] < 0)
+ continue;
+
+ cmd_set_goa[0] = HX8279_P3_REG_GOA_CLR_CFG(i);
+ cmd_set_goa[1] = FIELD_PREP(HX8279_P3_GOA_CLR_CFG_STARTPOS,
+ desc->goa_clr_start_pos[i]);
+ cmd_set_goa[1] |= FIELD_PREP(HX8279_P3_GOA_CLR_CFG_POLARITY,
+ desc->goa_clr_polarity[i]);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ if (desc->goa_unk_e4) {
+ cmd_set_goa[0] = HX8279_P3_REG_UNKNOWN_E4;
+ cmd_set_goa[1] = desc->goa_unk_e4;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+ }
+
+ cmd_set_goa[0] = HX8279_P3_REG_UNKNOWN_E5;
+ cmd_set_goa[1] = desc->goa_unk_e5;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_goa,
+ ARRAY_SIZE(cmd_set_goa));
+}
+
+static void hx8279_set_mipi_cfg(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ u8 cmd_set_mipi[2];
+
+ if (hx->skip_mipi_timing)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 5);
+
+ if (desc->bta_tlpx || desc->ths_settle_time || desc->lhs_settle_time_by_osc25) {
+ cmd_set_mipi[0] = HX8279_P5_REG_TIMING;
+ cmd_set_mipi[1] = FIELD_PREP(HX8279_P5_TIMING_TLPX, desc->bta_tlpx);
+ cmd_set_mipi[1] |= FIELD_PREP(HX8279_P5_TIMING_THS_SETTLE,
+ desc->ths_settle_time);
+ cmd_set_mipi[1] |= FIELD_PREP(HX8279_P5_TIMING_LHS_SETTLE,
+ desc->lhs_settle_time_by_osc25);
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi,
+ ARRAY_SIZE(cmd_set_mipi));
+ }
+
+ if (desc->timing_unk_b8) {
+ cmd_set_mipi[0] = HX8279_P5_REG_UNKNOWN_B8;
+ cmd_set_mipi[1] = desc->timing_unk_b8;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi,
+ ARRAY_SIZE(cmd_set_mipi));
+ }
+
+ if (desc->timing_unk_bc) {
+ cmd_set_mipi[0] = HX8279_P5_REG_UNKNOWN_BC;
+ cmd_set_mipi[1] = desc->timing_unk_bc;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi,
+ ARRAY_SIZE(cmd_set_mipi));
+ }
+
+ if (desc->timing_unk_d6) {
+ cmd_set_mipi[0] = HX8279_P5_REG_UNKNOWN_D6;
+ cmd_set_mipi[1] = desc->timing_unk_d6;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_mipi,
+ ARRAY_SIZE(cmd_set_mipi));
+ }
+}
+
+static void hx8279_set_adv_cfg(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ const u8 cmd_set_dly[] = { HX8279_P6_REG_DLY_TIME_ADJ, desc->src_delay_time_adj_ck };
+ const u8 cmd_set_gamma[] = { HX8279_P6_REG_GAMMA_CHOPPER, desc->gamma_ctl };
+ const u8 cmd_set_volt_adj[] = { HX8279_P6_REG_VOLT_ADJ, desc->volt_adj };
+ u8 cmd_set_eng[] = { HX8279_P6_REG_ENGINEER_PWD, HX8279_P6_ENG_UNLOCK_WORD };
+
+ if (!desc->gamma_ctl && !desc->src_delay_time_adj_ck && !desc->volt_adj)
+ return;
+
+ hx8279_set_page(hx, dsi_ctx, 6);
+
+ /* Unlock ENG settings: write same word to both ENGINEER_PWD and INHOUSE_FUNC */
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng));
+
+ cmd_set_eng[0] = HX8279_P6_REG_INHOUSE_FUNC;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng));
+
+ /* Set Gamma Chopper and Gamma buffer Chopper control */
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_gamma, ARRAY_SIZE(cmd_set_gamma));
+
+ /* Set Source delay time adjustment (CKV falling to Source off) */
+ if (desc->src_delay_time_adj_ck)
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dly,
+ ARRAY_SIZE(cmd_set_dly));
+
+ /* Set voltage adjustment */
+ if (desc->volt_adj)
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_volt_adj,
+ ARRAY_SIZE(cmd_set_volt_adj));
+
+ /* Lock ENG settings again */
+ cmd_set_eng[0] = HX8279_P6_REG_ENGINEER_PWD;
+ cmd_set_eng[1] = 0;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng));
+
+ cmd_set_eng[0] = HX8279_P6_REG_INHOUSE_FUNC;
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_eng, ARRAY_SIZE(cmd_set_eng));
+}
+
+static void hx8279_set_digital_gamma(struct hx8279 *hx,
+ struct mipi_dsi_multi_context *dsi_ctx)
+{
+ const struct hx8279_digital_gamma *dgamma = hx->desc->dgamma;
+ u8 cmd_set_dig_gamma[2];
+ int i;
+
+ if (!dgamma)
+ return;
+
+ /*
+ * Pages 7..9 are for RGB Positive, 10..12 are for RGB Negative:
+ * The first iteration sets all positive component registers,
+ * the second one sets all negatives.
+ */
+ for (i = 0; i < 2; i++) {
+ u8 pg_neg = i * 3;
+
+ hx8279_set_page(hx, dsi_ctx, 7 + pg_neg);
+
+ for (i = 0; i < ARRAY_SIZE(dgamma->r); i++) {
+ cmd_set_dig_gamma[0] = HX8279_PG_DIGITAL_GAMMA + i;
+ cmd_set_dig_gamma[1] = dgamma->r[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dig_gamma,
+ ARRAY_SIZE(cmd_set_dig_gamma));
+ }
+
+ hx8279_set_page(hx, dsi_ctx, 8 + pg_neg);
+
+ for (i = 0; i < ARRAY_SIZE(dgamma->g); i++) {
+ cmd_set_dig_gamma[0] = HX8279_PG_DIGITAL_GAMMA + i;
+ cmd_set_dig_gamma[1] = dgamma->g[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dig_gamma,
+ ARRAY_SIZE(cmd_set_dig_gamma));
+ }
+
+ hx8279_set_page(hx, dsi_ctx, 9 + pg_neg);
+
+ for (i = 0; i < ARRAY_SIZE(dgamma->b); i++) {
+ cmd_set_dig_gamma[0] = HX8279_PG_DIGITAL_GAMMA + i;
+ cmd_set_dig_gamma[1] = dgamma->b[i];
+ mipi_dsi_generic_write_multi(dsi_ctx, cmd_set_dig_gamma,
+ ARRAY_SIZE(cmd_set_dig_gamma));
+ }
+ }
+}
+
+static int hx8279_on(struct hx8279 *hx)
+{
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ /* Page 5 */
+ hx8279_set_mipi_cfg(hx, &dsi_ctx);
+
+ /* Page 1 */
+ hx8279_set_gmux(hx, &dsi_ctx);
+
+ /* Page 2 */
+ hx8279_set_analog_gamma(hx, &dsi_ctx);
+
+ /* Page 3 */
+ hx8279_set_goa_cfg(hx, &dsi_ctx);
+ hx8279_set_goa_timing(hx, &dsi_ctx);
+
+ /* Page 0 - Driver/Module Configuration */
+ hx8279_set_module_config(hx, &dsi_ctx);
+
+ /* Page 6 */
+ hx8279_set_adv_cfg(hx, &dsi_ctx);
+
+ /* Pages 7..12 */
+ hx8279_set_digital_gamma(hx, &dsi_ctx);
+
+ return dsi_ctx.accum_err;
+}
+
+static void hx8279_power_off(struct hx8279 *hx)
+{
+ gpiod_set_value_cansleep(hx->reset_gpio, 0);
+ usleep_range(100, 500);
+ gpiod_set_value_cansleep(hx->enable_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(hx->vregs), hx->vregs);
+}
+
+static int hx8279_disable(struct drm_panel *panel)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+
+ return 0;
+}
+
+static int hx8279_enable(struct drm_panel *panel)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ return 0;
+}
+
+static int hx8279_prepare(struct drm_panel *panel)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(hx->vregs), hx->vregs);
+ if (ret)
+ return ret;
+
+ gpiod_set_value_cansleep(hx->enable_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(hx->reset_gpio, 1);
+ usleep_range(6000, 7000);
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+ if (hx->dsi[1])
+ hx->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = hx8279_on(hx);
+ if (ret) {
+ hx8279_power_off(hx);
+ return ret;
+ }
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 130);
+
+ return dsi_ctx.accum_err;
+}
+
+static int hx8279_unprepare(struct drm_panel *panel)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ struct mipi_dsi_device *dsi = hx->dsi[0];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 130);
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+ if (hx->dsi[1])
+ hx->dsi[1]->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ hx8279_power_off(hx);
+
+ return dsi_ctx.accum_err;
+}
+
+static int hx8279_get_modes(struct drm_panel *panel, struct drm_connector *connector)
+{
+ struct hx8279 *hx = to_hx8279(panel);
+ int i;
+
+ for (i = 0; i < hx->desc->num_modes; i++) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &hx->desc->mode_data[i].mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+ if (hx->desc->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ connector->display_info.bpc = hx->desc->mode_data[0].bpc;
+ connector->display_info.height_mm = hx->desc->mode_data[0].mode.height_mm;
+ connector->display_info.width_mm = hx->desc->mode_data[0].mode.width_mm;
+
+ return hx->desc->num_modes;
+}
+
+static const struct drm_panel_funcs hx8279_panel_funcs = {
+ .disable = hx8279_disable,
+ .unprepare = hx8279_unprepare,
+ .prepare = hx8279_prepare,
+ .enable = hx8279_enable,
+ .get_modes = hx8279_get_modes,
+};
+
+static int hx8279_init_vregs(struct hx8279 *hx, struct device *dev)
+{
+ int ret;
+
+ hx->vregs[0].supply = "vdd";
+ hx->vregs[1].supply = "iovcc";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hx->vregs),
+ hx->vregs);
+ if (ret < 0)
+ return ret;
+
+ ret = regulator_is_supported_voltage(hx->vregs[0].consumer,
+ 3000000, 5000000);
+ if (!ret)
+ return -EINVAL;
+
+ ret = regulator_is_supported_voltage(hx->vregs[1].consumer,
+ 1700000, 1900000);
+ if (!ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hx8279_check_gmux_config(struct hx8279 *hx, struct device *dev)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ const struct hx8279_goa_mux *gmux = desc->gmux;
+ int i;
+
+ /* No gmux defined means we simply skip the GOA mux configuration */
+ if (!gmux)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(gmux->gout_l); i++) {
+ if (gmux->gout_l[i] > (HX8279_GOUT_STB | HX8279_GOUT_SEL))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid value found in gout_l[%d]\n", i);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(gmux->gout_r); i++) {
+ if (gmux->gout_r[i] > (HX8279_GOUT_STB | HX8279_GOUT_SEL))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid value found in gout_r[%d]\n", i);
+ }
+
+ return 0;
+}
+
+static int hx8279_check_goa_config(struct hx8279 *hx, struct device *dev)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ bool goa_odd_valid, goa_even_valid;
+ int i, num_zero, num_clr = 0;
+
+ /* Up to 4 zero values is a valid configuration. Check them all. */
+ num_zero = 1;
+ for (i = 0; i < ARRAY_SIZE(desc->goa_odd_timing); i++) {
+ if (desc->goa_odd_timing[i])
+ num_zero++;
+ }
+
+ goa_odd_valid = (num_zero != ARRAY_SIZE(desc->goa_odd_timing));
+
+ /* Up to 3 zeroes is a valid config. Check them all. */
+ num_zero = 1;
+ for (i = 0; i < ARRAY_SIZE(desc->goa_even_timing); i++) {
+ if (desc->goa_even_timing[i])
+ num_zero++;
+ }
+
+ goa_even_valid = (num_zero != ARRAY_SIZE(desc->goa_even_timing));
+
+ /* Programming one without the other would make no sense! */
+ if (goa_odd_valid != goa_even_valid)
+ return -EINVAL;
+
+ /* We know that both are either true or false now, check just one */
+ if (!goa_odd_valid)
+ hx->skip_goa_timing = true;
+
+ if (!desc->goa_unk_ba && !desc->goa_stv_lead_time_ck &&
+ !desc->goa_ckv_lead_time_ck && !desc->goa_ckv_dummy_vblank_num &&
+ !desc->goa_ckv_rise_precharge && !desc->goa_ckv_fall_precharge &&
+ !desc->goa_clr1_width_adj && !desc->goa_clr234_width_adj &&
+ !desc->goa_unk_e4 && !desc->goa_unk_e5) {
+ hx->skip_goa_config = true;
+ return 0;
+ }
+
+ if ((desc->goa_stv_lead_time_ck > HX8279_P3_GOA_STV_LEAD) ||
+ (desc->goa_ckv_lead_time_ck > HX8279_P3_GOA_CKV_LEAD) ||
+ (desc->goa_ckv_dummy_vblank_num > HX8279_P3_GOA_CKV_DUMMY))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid lead timings in GOA config\n");
+
+ /*
+ * Don't perform zero check for polarity and start position, as
+ * both pol=0 and start=0 are valid configuration values.
+ */
+ for (i = 0; i < ARRAY_SIZE(desc->goa_clr_start_pos); i++) {
+ if (desc->goa_clr_start_pos[i] < 0)
+ continue;
+ else if (desc->goa_clr_start_pos[i] > HX8279_P3_GOA_CLR_CFG_STARTPOS)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid start position for CLR%d\n", i + 1);
+ else
+ num_clr++;
+ }
+ if (!num_clr)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(desc->goa_clr_polarity); i++) {
+ if (num_clr < 0)
+ return -EINVAL;
+
+ if (desc->goa_clr_polarity[i] < 0)
+ continue;
+ else if (desc->goa_clr_polarity[i] > 1)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid polarity for CLR%d\n", i + 1);
+ else
+ num_clr--;
+ }
+
+ return 0;
+}
+
+static int hx8279_check_dig_gamma(struct hx8279 *hx, struct device *dev, const u8 *component)
+{
+ u8 gamma_high_bits[4];
+ u16 prev_val = 0;
+ int i, j, k, x;
+
+ /*
+ * The gamma values are 10 bits long and shall be incremental
+ * to form a digital gamma correction reference curve.
+ *
+ * As for the registers format: the first 24 bytes contain each the
+ * lowest 8 bits for each of the gamma level references, and the last
+ * 6 bytes contain the high two bits of 4 registers at a time, where
+ * the first two bits are relative to the last register, and the last
+ * two are relative to the first register.
+ *
+ * Another way of saying, those are the first four LOW values:
+ * DGMA1_LO = 0xb1, DGMA2_LO = 0xb2, DGMA3_LO = 0xb3, DGMA4_LO = 0xb4
+ *
+ * The high values for those four are at DGMA1_4_HI = 0xc9;
+ * ...and DGMA1_4_HI's data contains the following bits:
+ * [1:0] = DGMA4_HI, [3:2] = DGMA3_HI, [5:4] = DGMA2_HI, [7:6] = DGMA1_HI
+ */
+ for (i = 0; i < HX8279_PG_DGAMMA_NUM_HI_BYTES; i++) {
+ k = HX8279_PG_DGAMMA_NUM_LO_BYTES + i;
+ j = i * 4;
+ x = 0;
+
+ gamma_high_bits[0] = FIELD_GET(HX8279_DGAMMA_DGMA1_HI, component[k]);
+ gamma_high_bits[1] = FIELD_GET(HX8279_DGAMMA_DGMA2_HI, component[k]);
+ gamma_high_bits[2] = FIELD_GET(HX8279_DGAMMA_DGMA3_HI, component[k]);
+ gamma_high_bits[3] = FIELD_GET(HX8279_DGAMMA_DGMA4_HI, component[k]);
+
+ do {
+ u16 cur_val = component[j] | (gamma_high_bits[x] << 8);
+
+ if (cur_val < prev_val)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid dgamma values: %u < %u!\n",
+ cur_val, prev_val);
+ prev_val = cur_val;
+ j++;
+ x++;
+ } while (x < 4);
+ };
+
+ return 0;
+}
+
+static int hx8279_check_params(struct hx8279 *hx, struct device *dev)
+{
+ const struct hx8279_panel_desc *desc = hx->desc;
+ int ret;
+
+ /* Voltages config validation */
+ if (!desc->vgh_mv && !desc->vgl_mv && !desc->vgph_mv && !desc->vgnh_mv)
+ hx->skip_voltage_config = true;
+ else if ((desc->vgh_mv && desc->vgh_mv < HX8279_VGH_MIN_MV) ||
+ (desc->vgl_mv && desc->vgl_mv < HX8279_VGL_MIN_MV) ||
+ (desc->vgph_mv && desc->vgph_mv < HX8279_VGPNH_MIN_MV) ||
+ (desc->vgnh_mv && desc->vgnh_mv < HX8279_VGPNH_MIN_MV))
+ return -EINVAL;
+
+ /* GOA Muxing validation */
+ ret = hx8279_check_gmux_config(hx, dev);
+ if (ret)
+ return ret;
+
+ /* GOA Configuration validation */
+ ret = hx8279_check_goa_config(hx, dev);
+ if (ret)
+ return ret;
+
+ /* MIPI Configuration validation */
+ if (!desc->bta_tlpx && !desc->lhs_settle_time_by_osc25 &&
+ !desc->ths_settle_time && !desc->timing_unk_b8 &&
+ !desc->timing_unk_bc && !desc->timing_unk_d6)
+ hx->skip_mipi_timing = true;
+
+ /* ENG/Gamma Configuration validation */
+ if (desc->gamma_ctl > (HX8279_P6_GAMMA_POCGM_CTL | HX8279_P6_GAMMA_POGCMD_CTL))
+ return -EINVAL;
+
+ /* Digital Gamma values validation */
+ if (desc->dgamma) {
+ ret = hx8279_check_dig_gamma(hx, dev, desc->dgamma->r);
+ if (ret)
+ return ret;
+
+ ret = hx8279_check_dig_gamma(hx, dev, desc->dgamma->g);
+ if (ret)
+ return ret;
+
+ ret = hx8279_check_dig_gamma(hx, dev, desc->dgamma->b);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hx8279_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct device_node *dsi_r;
+ struct hx8279 *hx;
+ int i, ret;
+
+ hx = devm_drm_panel_alloc(dev, struct hx8279, panel,
+ &hx8279_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(hx))
+ return PTR_ERR(hx);
+
+ ret = hx8279_init_vregs(hx, dev);
+ if (ret)
+ return ret;
+
+ hx->desc = device_get_match_data(dev);
+ if (!hx->desc)
+ return -ENODEV;
+
+ /*
+ * In some DriverICs some or all fields may be OTP: perform a
+ * basic configuration check before writing to help avoiding
+ * irreparable mistakes.
+ *
+ * Please note that this is not perfect and will only check if
+ * the values may be plausible; values that are wrong for a
+ * specific display, but still plausible for DrIC config will
+ * be accepted.
+ */
+ ret = hx8279_check_params(hx, dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Invalid DriverIC configuration\n");
+
+ /* The enable line may be always tied to VCCIO, so this is optional */
+ hx->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_ASIS);
+ if (IS_ERR(hx->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(hx->enable_gpio),
+ "Failed to get enable GPIO\n");
+
+ hx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(hx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(hx->reset_gpio),
+ "Failed to get reset GPIO\n");
+
+ /* If the panel is connected on two DSIs then DSI0 left, DSI1 right */
+ dsi_r = of_graph_get_remote_node(dsi->dev.of_node, 1, -1);
+ if (dsi_r) {
+ const struct mipi_dsi_device_info *info = &hx->desc->dsi_info;
+ struct mipi_dsi_host *dsi_r_host;
+
+ dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r);
+ of_node_put(dsi_r);
+ if (!dsi_r_host)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "Cannot get secondary DSI host\n");
+
+ hx->dsi[1] = devm_mipi_dsi_device_register_full(dev, dsi_r_host, info);
+ if (IS_ERR(hx->dsi[1]))
+ return dev_err_probe(dev, PTR_ERR(hx->dsi[1]),
+ "Cannot get secondary DSI node\n");
+ mipi_dsi_set_drvdata(hx->dsi[1], hx);
+ }
+
+ hx->dsi[0] = dsi;
+ mipi_dsi_set_drvdata(dsi, hx);
+
+ ret = drm_panel_of_backlight(&hx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&hx->panel);
+
+ for (i = 0; i < 2; i++) {
+ if (!hx->dsi[i])
+ continue;
+
+ hx->dsi[i]->lanes = hx->desc->num_lanes;
+ hx->dsi[i]->format = MIPI_DSI_FMT_RGB888;
+
+ hx->dsi[i]->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_LPM;
+
+ if (hx->desc->mode_data[0].is_video_mode)
+ hx->dsi[i]->mode_flags |= MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+
+ ret = devm_mipi_dsi_attach(dev, hx->dsi[i]);
+ if (ret < 0) {
+ drm_panel_remove(&hx->panel);
+ return dev_err_probe(dev, ret,
+ "Cannot attach to DSI%d host.\n", i);
+ }
+ }
+
+ return 0;
+}
+
+static void hx8279_remove(struct mipi_dsi_device *dsi)
+{
+ struct hx8279 *hx = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&hx->panel);
+}
+
+static const struct hx8279_panel_mode aoly_sl101pm1794fog_v15_modes[] = {
+ {
+ .mode = {
+ .clock = 159420,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 60,
+ .htotal = 1200 + 80 + 60 + 24,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 10,
+ .vsync_end = 1920 + 10 + 14,
+ .vtotal = 1920 + 10 + 14 + 4,
+ .width_mm = 136,
+ .height_mm = 217,
+ .type = DRM_MODE_TYPE_DRIVER
+ },
+ .bpc = 8,
+ .is_video_mode = true,
+ },
+};
+
+static const struct hx8279_panel_mode startek_kd070fhfid078_modes[] = {
+ {
+ .mode = {
+ .clock = 156458,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 50,
+ .hsync_end = 1200 + 50 + 24,
+ .htotal = 1200 + 50 + 24 + 66,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 14,
+ .vsync_end = 1920 + 14 + 2,
+ .vtotal = 1920 + 14 + 2 + 10,
+ .width_mm = 95,
+ .height_mm = 151,
+ .type = DRM_MODE_TYPE_DRIVER
+ },
+ .bpc = 8,
+ .is_video_mode = true,
+ },
+};
+
+static const struct hx8279_goa_mux aoly_sl101pm1794fog_v15_gmux = {
+ .gout_l = { 0x5, 0x5, 0xb, 0xb, 0x9, 0x9, 0x16, 0x16, 0xe, 0xe,
+ 0x7, 0x7, 0x26, 0x26, 0x15, 0x15, 0x1, 0x1, 0x3, 0x3 },
+ .gout_r = { 0x6, 0x6, 0xc, 0xc, 0xa, 0xa, 0x16, 0x16, 0xe, 0xe,
+ 0x8, 0x8, 0x26, 0x26, 0x15, 0x15, 0x2, 0x2, 0x4, 0x4 },
+};
+
+static const struct hx8279_analog_gamma aoly_sl101pm1794fog_v15_ana_gamma = {
+ .pos = { 0x0, 0xd, 0x17, 0x26, 0x31, 0x1c, 0x2c, 0x33, 0x31,
+ 0x37, 0x37, 0x37, 0x39, 0x2e, 0x2f, 0x2f, 0x7 },
+ .neg = { 0x0, 0xd, 0x17, 0x26, 0x31, 0x3f, 0x3f, 0x3f, 0x3f,
+ 0x37, 0x37, 0x37, 0x39, 0x2e, 0x2f, 0x2f, 0x7 },
+};
+
+static const struct hx8279_digital_gamma aoly_sl101pm1794fog_v15_dig_gamma = {
+ .r = { 0x0, 0x5, 0x10, 0x22, 0x36, 0x4a, 0x6c, 0x9a, 0xd7, 0x17,
+ 0x92, 0x15, 0x18, 0x8c, 0x0, 0x3a, 0x72, 0x8c, 0xa5, 0xb1,
+ 0xbe, 0xca, 0xd1, 0xd4, 0x0, 0x0, 0x16, 0xaf, 0xff, 0xff },
+ .g = { 0x4, 0x5, 0x11, 0x24, 0x39, 0x4e, 0x72, 0xa3, 0xe1, 0x25,
+ 0xa8, 0x2e, 0x32, 0xad, 0x28, 0x63, 0x9b, 0xb5, 0xcf, 0xdb,
+ 0xe8, 0xf5, 0xfa, 0xfc, 0x0, 0x0, 0x16, 0xaf, 0xff, 0xff },
+ .b = { 0x4, 0x4, 0xf, 0x22, 0x37, 0x4d, 0x71, 0xa2, 0xe1, 0x26,
+ 0xa9, 0x2f, 0x33, 0xac, 0x24, 0x5d, 0x94, 0xac, 0xc5, 0xd1,
+ 0xdc, 0xe8, 0xed, 0xf0, 0x0, 0x0, 0x16, 0xaf, 0xff, 0xff },
+};
+
+static const struct hx8279_panel_desc aoly_sl101pm1794fog_v15 = {
+ .dsi_info = {
+ .type = "L101PM1794FOG-V15",
+ .channel = 0,
+ .node = NULL,
+ },
+ .mode_data = aoly_sl101pm1794fog_v15_modes,
+ .num_modes = ARRAY_SIZE(aoly_sl101pm1794fog_v15_modes),
+ .num_lanes = 4,
+
+ /* Driver/Module Configuration: LC Matrix voltages */
+ .vgh_mv = 16500,
+ .vgl_mv = 11200,
+ .vgph_mv = 4600,
+ .vgnh_mv = 4600,
+
+ /* Analog Gamma correction */
+ .agamma = &aoly_sl101pm1794fog_v15_ana_gamma,
+
+ /* Gate driver On Array (GOA) Muxing */
+ .gmux = &aoly_sl101pm1794fog_v15_gmux,
+
+ /* Gate driver On Array (GOA) Mux Config */
+ .goa_unk_ba = 0xf0,
+ .goa_odd_timing = { 0, 0, 0, 42, 0, 0 },
+ .goa_even_timing = { 1, 42, 0, 0 },
+ .goa_stv_lead_time_ck = 11,
+ .goa_ckv_lead_time_ck = 7,
+ .goa_ckv_dummy_vblank_num = 3,
+ .goa_ckv_rise_precharge = 1,
+ .goa_clr1_width_adj = 0,
+ .goa_clr234_width_adj = 0,
+ .goa_clr_polarity = { 1, 0, 0, 0 },
+ .goa_clr_start_pos = { 8, 9, 3, 4 },
+ .goa_unk_e4 = 0xc0,
+ .goa_unk_e5 = 0x0d,
+
+ /* MIPI Configuration */
+ .bta_tlpx = 2,
+ .lhs_settle_time_by_osc25 = true,
+ .ths_settle_time = 2,
+ .timing_unk_b8 = 0xa5,
+ .timing_unk_bc = 0x20,
+ .timing_unk_d6 = 0x7f,
+
+ /* ENG/Gamma Configuration */
+ .gamma_ctl = 0,
+ .volt_adj = FIELD_PREP_CONST(HX8279_P6_VOLT_ADJ_VCCIFS, 3) |
+ FIELD_PREP_CONST(HX8279_P6_VOLT_ADJ_VCCS, 3),
+ .src_delay_time_adj_ck = 50,
+
+ /* Digital Gamma Adjustment */
+ .dgamma = &aoly_sl101pm1794fog_v15_dig_gamma,
+};
+
+static const struct hx8279_goa_mux startek_kd070fhfid078_gmux = {
+ .gout_l = { 0xd, 0xd, 0x6, 0x6, 0x8, 0x8, 0xa, 0xa, 0xc, 0xc,
+ 0x0, 0x0, 0xe, 0xe, 0x1, 0x1, 0x4, 0x4, 0x0, 0x0 },
+ .gout_r = { 0xd, 0xd, 0x5, 0x5, 0x7, 0x7, 0x9, 0x9, 0xb, 0xb,
+ 0x0, 0x0, 0xe, 0xe, 0x1, 0x1, 0x3, 0x3, 0x0, 0x0 },
+};
+
+static const struct hx8279_panel_desc startek_kd070fhfid078 = {
+ .dsi_info = {
+ .type = "KD070FHFID078",
+ .channel = 0,
+ .node = NULL,
+ },
+ .mode_data = startek_kd070fhfid078_modes,
+ .num_modes = ARRAY_SIZE(startek_kd070fhfid078_modes),
+ .num_lanes = 4,
+
+ /* Driver/Module Configuration: LC Matrix voltages */
+ .vgh_mv = 18000,
+ .vgl_mv = 12100,
+ .vgph_mv = 5500,
+ .vgnh_mv = 5500,
+
+ /* Gate driver On Array (GOA) Mux Config */
+ .gmux = &startek_kd070fhfid078_gmux,
+
+ /* Gate driver On Array (GOA) Configuration */
+ .goa_unk_ba = 0xf0,
+ .goa_stv_lead_time_ck = 7,
+ .goa_ckv_lead_time_ck = 3,
+ .goa_ckv_dummy_vblank_num = 1,
+ .goa_ckv_rise_precharge = 0,
+ .goa_ckv_fall_precharge = 0,
+ .goa_clr1_width_adj = 1,
+ .goa_clr234_width_adj = 5,
+ .goa_clr_polarity = { 0, 1, -1, -1 },
+ .goa_clr_start_pos = { 5, 10, -1, -1 },
+ .goa_unk_e4 = 0xc0,
+ .goa_unk_e5 = 0x00,
+
+ /* MIPI Configuration */
+ .bta_tlpx = 2,
+ .lhs_settle_time_by_osc25 = true,
+ .ths_settle_time = 2,
+ .timing_unk_b8 = 0x7f,
+ .timing_unk_bc = 0x20,
+ .timing_unk_d6 = 0x7f,
+
+ /* ENG/Gamma Configuration */
+ .gamma_ctl = FIELD_PREP_CONST(HX8279_P6_GAMMA_POCGM_CTL, 1) |
+ FIELD_PREP_CONST(HX8279_P6_GAMMA_POGCMD_CTL, 1),
+ .src_delay_time_adj_ck = 72,
+};
+
+static const struct of_device_id hx8279_of_match[] = {
+ { .compatible = "aoly,sl101pm1794fog-v15", .data = &aoly_sl101pm1794fog_v15 },
+ { .compatible = "startek,kd070fhfid078", .data = &startek_kd070fhfid078 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hx8279_of_match);
+
+static struct mipi_dsi_driver hx8279_driver = {
+ .probe = hx8279_probe,
+ .remove = hx8279_remove,
+ .driver = {
+ .name = "panel-himax-hx8279",
+ .of_match_table = hx8279_of_match,
+ },
+};
+module_mipi_dsi_driver(hx8279_driver);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("Himax HX8279 DriverIC panels driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
index 92b03a2f65a3..ff994bf0e3cc 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -80,7 +80,7 @@ struct hx8394_panel_desc {
unsigned int lanes;
unsigned long mode_flags;
enum mipi_dsi_pixel_format format;
- int (*init_sequence)(struct hx8394 *ctx);
+ void (*init_sequence)(struct mipi_dsi_multi_context *dsi_ctx);
};
static inline struct hx8394 *panel_to_hx8394(struct drm_panel *panel)
@@ -88,98 +88,94 @@ static inline struct hx8394 *panel_to_hx8394(struct drm_panel *panel)
return container_of(panel, struct hx8394, panel);
}
-static int hsd060bhw4_init_sequence(struct hx8394 *ctx)
+static void hsd060bhw4_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
/* 5.19.8 SETEXTC: Set extension command (B9h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
- 0xff, 0x83, 0x94);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x48, 0x11, 0x71, 0x09, 0x32, 0x24, 0x71, 0x31, 0x55, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x48, 0x11, 0x71, 0x09, 0x32, 0x24, 0x71, 0x31, 0x55, 0x30);
/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
- 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
/* 5.19.3 SETDISP: Set display related register (B2h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
- 0x00, 0x80, 0x78, 0x0c, 0x07);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x78, 0x0c, 0x07);
/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
- 0x12, 0x63, 0x12, 0x63, 0x12, 0x63, 0x01, 0x0c, 0x7c, 0x55,
- 0x00, 0x3f, 0x12, 0x6b, 0x12, 0x6b, 0x12, 0x6b, 0x01, 0x0c,
- 0x7c);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x12, 0x63, 0x12, 0x63, 0x12, 0x63, 0x01, 0x0c, 0x7c, 0x55,
+ 0x00, 0x3f, 0x12, 0x6b, 0x12, 0x6b, 0x12, 0x6b, 0x01, 0x0c,
+ 0x7c);
/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
- 0x00, 0x00, 0x00, 0x00, 0x3c, 0x1c, 0x00, 0x00, 0x32, 0x10,
- 0x09, 0x00, 0x09, 0x32, 0x15, 0xad, 0x05, 0xad, 0x32, 0x00,
- 0x00, 0x00, 0x00, 0x37, 0x03, 0x0b, 0x0b, 0x37, 0x00, 0x00,
- 0x00, 0x0c, 0x40);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x00, 0x00, 0x3c, 0x1c, 0x00, 0x00, 0x32, 0x10,
+ 0x09, 0x00, 0x09, 0x32, 0x15, 0xad, 0x05, 0xad, 0x32, 0x00,
+ 0x00, 0x00, 0x00, 0x37, 0x03, 0x0b, 0x0b, 0x37, 0x00, 0x00,
+ 0x00, 0x0c, 0x40);
/* 5.19.20 Set GIP Option1 (D5h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
- 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, 0x1a, 0x1a, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x24, 0x25, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, 0x1a, 0x1a, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x24, 0x25, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
/* 5.19.21 Set GIP Option2 (D6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
- 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, 0x1a, 0x1a, 0x07, 0x06,
- 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x25, 0x24, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, 0x1a, 0x1a, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x25, 0x24, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
- 0x00, 0x04, 0x0c, 0x12, 0x14, 0x18, 0x1a, 0x18, 0x31, 0x3f,
- 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, 0x82, 0x7e, 0x8a,
- 0x99, 0x4a, 0x48, 0x49, 0x4b, 0x4a, 0x4c, 0x4b, 0x7f, 0x00,
- 0x04, 0x0c, 0x11, 0x13, 0x17, 0x1a, 0x18, 0x31,
- 0x3f, 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f,
- 0x82, 0x7e, 0x8a, 0x99, 0x4a, 0x48, 0x49, 0x4b,
- 0x4a, 0x4c, 0x4b, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x04, 0x0c, 0x12, 0x14, 0x18, 0x1a, 0x18, 0x31, 0x3f,
+ 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, 0x82, 0x7e, 0x8a,
+ 0x99, 0x4a, 0x48, 0x49, 0x4b, 0x4a, 0x4c, 0x4b, 0x7f, 0x00,
+ 0x04, 0x0c, 0x11, 0x13, 0x17, 0x1a, 0x18, 0x31,
+ 0x3f, 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f,
+ 0x82, 0x7e, 0x8a, 0x99, 0x4a, 0x48, 0x49, 0x4b,
+ 0x4a, 0x4c, 0x4b, 0x7f);
/* 5.19.17 SETPANEL (CCh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
- 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x0b);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
- 0x1f, 0x31);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x31);
/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
- 0x7d, 0x7d);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x7d, 0x7d);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0x02);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0xed);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0xed);
}
static const struct drm_display_mode hsd060bhw4_mode = {
@@ -205,114 +201,110 @@ static const struct hx8394_panel_desc hsd060bhw4_desc = {
.init_sequence = hsd060bhw4_init_sequence,
};
-static int powkiddy_x55_init_sequence(struct hx8394 *ctx)
+static void powkiddy_x55_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
/* 5.19.8 SETEXTC: Set extension command (B9h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
- 0xff, 0x83, 0x94);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
- 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47);
/* 5.19.3 SETDISP: Set display related register (B2h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
- 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f);
/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
- 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75,
- 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c,
- 0x86);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75,
+ 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c,
+ 0x86);
/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
- 0x6e, 0x6e);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x6e, 0x6e);
/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
- 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10,
- 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15,
- 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
- 0x07, 0x0c, 0x40);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10,
+ 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15,
+ 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+ 0x07, 0x0c, 0x40);
/* 5.19.20 Set GIP Option1 (D5h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
- 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18,
- 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21,
- 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18,
+ 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21,
+ 0x18, 0x18, 0x18, 0x18);
/* 5.19.21 Set GIP Option2 (D6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
- 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18,
- 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24,
- 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
+ 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18,
+ 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24,
+ 0x18, 0x18, 0x18, 0x18);
/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
- 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56,
- 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8,
- 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
- 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65,
- 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba,
- 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56,
+ 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8,
+ 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+ 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65,
+ 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba,
+ 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
- 0x1f, 0x31);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x31);
/* 5.19.17 SETPANEL (CCh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
- 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x0b);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0x02);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x02);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN5,
- 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN5,
+ 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01);
/* Unknown command, not listed in the HX8394-F datasheet */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
- 0xed);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN2,
+ 0xed);
}
static const struct drm_display_mode powkiddy_x55_mode = {
@@ -339,131 +331,127 @@ static const struct hx8394_panel_desc powkiddy_x55_desc = {
.init_sequence = powkiddy_x55_init_sequence,
};
-static int mchp_ac40t08a_init_sequence(struct hx8394 *ctx)
+static void mchp_ac40t08a_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
/* DCS commands do not seem to be sent correclty without this delay */
- msleep(20);
+ mipi_dsi_msleep(dsi_ctx, 20);
/* 5.19.8 SETEXTC: Set extension command (B9h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
- 0xff, 0x83, 0x94);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
- 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x48, 0x12, 0x72, 0x09, 0x32, 0x54,
- 0x71, 0x71, 0x57, 0x47);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x48, 0x12, 0x72, 0x09, 0x32, 0x54,
+ 0x71, 0x71, 0x57, 0x47);
/* 5.19.3 SETDISP: Set display related register (B2h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
- 0x00, 0x80, 0x64, 0x0c, 0x0d, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x64, 0x0c, 0x0d, 0x2f);
/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
- 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
- 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f,
- 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
- 0x01, 0x0c, 0x86);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
+ 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
+ 0x01, 0x0c, 0x86);
/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
- 0x6e, 0x6e);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x6e, 0x6e);
/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
- 0x00, 0x00, 0x07, 0x07, 0x40, 0x07,
- 0x0c, 0x00, 0x08, 0x10, 0x08, 0x00,
- 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a,
- 0x02, 0x15, 0x06, 0x05, 0x06, 0x47,
- 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
- 0x07, 0x0c, 0x40);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x07, 0x07, 0x40, 0x07,
+ 0x0c, 0x00, 0x08, 0x10, 0x08, 0x00,
+ 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a,
+ 0x02, 0x15, 0x06, 0x05, 0x06, 0x47,
+ 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+ 0x07, 0x0c, 0x40);
/* 5.19.20 Set GIP Option1 (D5h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25,
- 0x18, 0x18, 0x26, 0x27, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x20, 0x21, 0x18, 0x18,
- 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25,
+ 0x18, 0x18, 0x26, 0x27, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x20, 0x21, 0x18, 0x18,
+ 0x18, 0x18);
/* 5.19.21 Set GIP Option2 (D6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
- 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06,
- 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
- 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20,
- 0x18, 0x18, 0x27, 0x26, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x25, 0x24, 0x18, 0x18,
- 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
+ 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20,
+ 0x18, 0x18, 0x27, 0x26, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x25, 0x24, 0x18, 0x18,
+ 0x18, 0x18);
/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
- 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21,
- 0x24, 0x22, 0x47, 0x56, 0x65, 0x66,
- 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d,
- 0x98, 0xa8, 0xb9, 0x5d, 0x5c, 0x61,
- 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
- 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24,
- 0x22, 0x47, 0x56, 0x65, 0x65, 0x6e,
- 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99,
- 0xa8, 0xba, 0x5d, 0x5d, 0x62, 0x67,
- 0x6b, 0x72, 0x7f, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21,
+ 0x24, 0x22, 0x47, 0x56, 0x65, 0x66,
+ 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d,
+ 0x98, 0xa8, 0xb9, 0x5d, 0x5c, 0x61,
+ 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+ 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24,
+ 0x22, 0x47, 0x56, 0x65, 0x65, 0x6e,
+ 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99,
+ 0xa8, 0xba, 0x5d, 0x5d, 0x62, 0x67,
+ 0x6b, 0x72, 0x7f, 0x7f);
/* Unknown command, not listed in the HX8394-F datasheet (C0H) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
- 0x1f, 0x73);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x73);
/* Set CABC control (C9h)*/
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCABC,
- 0x76, 0x00, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCABC,
+ 0x76, 0x00, 0x30);
/* 5.19.17 SETPANEL (CCh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
- 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x0b);
/* Unknown command, not listed in the HX8394-F datasheet (D4h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN3,
+ 0x02);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x02);
/* 5.19.11 Set register bank (D8h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
/* 5.19.2 SETPOWER: Set power (B1h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x00);
/* 5.19.11 Set register bank (BDh) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
- 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
/* Unknown command, not listed in the HX8394-F datasheet (C6h) */
- mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
- 0xed);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN2,
+ 0xed);
}
static const struct drm_display_mode mchp_ac40t08a_mode = {
@@ -493,35 +481,31 @@ static int hx8394_enable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
int ret;
- ret = ctx->desc->init_sequence(ctx);
- if (ret) {
- dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
- return ret;
- }
+ ctx->desc->init_sequence(&dsi_ctx);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret) {
- dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ if (dsi_ctx.accum_err)
+ return dsi_ctx.accum_err;
/* Panel is operational 120 msec after reset */
msleep(120);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret) {
- dev_err(ctx->dev, "Failed to turn on the display: %d\n", ret);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ if (dsi_ctx.accum_err)
goto sleep_in;
- }
return 0;
sleep_in:
+ ret = dsi_ctx.accum_err;
+ dsi_ctx.accum_err = 0;
+
/* This will probably fail, but let's try orderly power off anyway. */
- if (!mipi_dsi_dcs_enter_sleep_mode(dsi))
- msleep(50);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
return ret;
}
@@ -530,17 +514,12 @@ static int hx8394_disable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret) {
- dev_err(ctx->dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- msleep(50); /* about 3 frames */
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50); /* about 3 frames */
- return 0;
+ return dsi_ctx.accum_err;
}
static int hx8394_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
index 5d115ecd5dd4..b6429795e8f5 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
@@ -413,15 +413,10 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi)
static void panel_nv3051d_shutdown(struct mipi_dsi_device *dsi)
{
struct panel_nv3051d *ctx = mipi_dsi_get_drvdata(dsi);
- int ret;
- ret = drm_panel_unprepare(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
+ drm_panel_unprepare(&ctx->panel);
- ret = drm_panel_disable(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
+ drm_panel_disable(&ctx->panel);
}
static void panel_nv3051d_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index 04f1d2676c78..116d67bfa114 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -23,10 +23,12 @@
#define DSI_NUM_MIN 1
-#define mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, cmd, seq...) \
- do { \
- mipi_dsi_dcs_write_seq(dsi0, cmd, seq); \
- mipi_dsi_dcs_write_seq(dsi1, cmd, seq); \
+#define mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, cmd, seq...) \
+ do { \
+ dsi_ctx.dsi = dsi0; \
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \
+ dsi_ctx.dsi = dsi1; \
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \
} while (0)
struct panel_info {
@@ -67,868 +69,829 @@ static int elish_boe_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi0 = pinfo->dsi[0];
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
/* No datasheet, so write magic init sequence directly */
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x47);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x47);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x47);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd2, 0x30);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x76, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x77, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x49);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x59);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x48);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd7, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdc, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdd, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe1, 0x43);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe2, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf2, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf3, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf4, 0x48);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x13, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x97, 0x3c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x98, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x99, 0x95);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9b, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9c, 0x0b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9d, 0x0a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9e, 0x90);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa3, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x60);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0xc0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11);
- msleep(70);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29);
-
- return 0;
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd2, 0x30);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x76, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x77, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x59);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x48);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd7, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdc, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdd, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe1, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe2, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf3, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf4, 0x48);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x13, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x97, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x98, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x99, 0x95);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9b, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9c, 0x0b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9d, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9e, 0x90);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa3, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x60);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11);
+ mipi_dsi_msleep(&dsi_ctx, 70);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29);
+
+ return dsi_ctx.accum_err;
}
static int elish_csot_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi0 = pinfo->dsi[0];
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
/* No datasheet, so write magic init sequence directly */
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x30);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x55, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x46);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x46);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x46);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x4d);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x4b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x96);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x4b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x07);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x4b);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x07);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x5c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x3f);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x08);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x40);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x1c);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11);
- msleep(70);
- mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29);
-
- return 0;
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x30);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x55, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x4d);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x96);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x07);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x07);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x5c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x1c);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11);
+ mipi_dsi_msleep(&dsi_ctx, 70);
+ mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29);
+
+ return dsi_ctx.accum_err;
}
static int j606f_boe_init_sequence(struct panel_info *pinfo)
{
struct mipi_dsi_device *dsi = pinfo->dsi[0];
- struct device *dev = &dsi->dev;
- int ret;
-
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0xd9);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x78);
- mipi_dsi_dcs_write_seq(dsi, 0x08, 0x5a);
- mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x63);
- mipi_dsi_dcs_write_seq(dsi, 0x0e, 0x91);
- mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0x95, 0xeb);
- mipi_dsi_dcs_write_seq(dsi, 0x96, 0xeb);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x66);
- mipi_dsi_dcs_write_seq(dsi, 0x75, 0xa2);
- mipi_dsi_dcs_write_seq(dsi, 0x77, 0xb3);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
- 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
- mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
- 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
- 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
- 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
- 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
- mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
- 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
- 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
- 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00,
- 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01,
- 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03,
- 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03,
- 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x21);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
- 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
- mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
- 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
- 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
- 0xf5, 0x03, 0xf7);
- mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
- 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
- mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
- 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
- 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
- 0xf5, 0x03, 0xf7);
- mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00,
- 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01,
- 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03,
- 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03,
- 0xf5, 0x03, 0xf7);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x23);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x11, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x12, 0x77);
- mipi_dsi_dcs_write_seq(dsi, 0x15, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x01, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x02, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x03, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x04, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x06, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x07, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x08, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x09, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x0a, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x0b, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x0e, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x10, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x11, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x12, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x13, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x14, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x15, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x17, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x18, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x1c, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x21, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x23, 0x0d);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x28, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x29, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_LUT, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x44);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x32);
- mipi_dsi_dcs_write_seq(dsi, 0x37, 0x44);
- mipi_dsi_dcs_write_seq(dsi, 0x38, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0x39, 0x00);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x9a);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x3b, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x42);
- mipi_dsi_dcs_write_seq(dsi, 0x3f, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x43, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x47, 0x66);
- mipi_dsi_dcs_write_seq(dsi, 0x4a, 0x9a);
- mipi_dsi_dcs_write_seq(dsi, 0x4b, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x4c, 0x91);
- mipi_dsi_dcs_write_seq(dsi, 0x4d, 0x21);
- mipi_dsi_dcs_write_seq(dsi, 0x4e, 0x43);
-
- ret = mipi_dsi_dcs_set_display_brightness(dsi, 18);
- if (ret < 0) {
- dev_err(dev, "Failed to set display brightness: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x52, 0x34);
- mipi_dsi_dcs_write_seq(dsi, 0x55, 0x82, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0x56, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x58, 0x21);
- mipi_dsi_dcs_write_seq(dsi, 0x59, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0x5a, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x5f, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x82);
- mipi_dsi_dcs_write_seq(dsi, 0x7e, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0x7f, 0x3c);
- mipi_dsi_dcs_write_seq(dsi, 0x82, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0x97, 0xc0);
- mipi_dsi_dcs_write_seq(dsi, 0xb6,
- 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
- 0x05, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x92, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0x93, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0x94, 0x5f);
- mipi_dsi_dcs_write_seq(dsi, 0xd7, 0x55);
- mipi_dsi_dcs_write_seq(dsi, 0xda, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0xde, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xdb, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xdc, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x22);
- mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe0, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xe1, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe2, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xe3, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe4, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe6, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x88);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x88);
- mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x90);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x05, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x3f, 0xe0);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_VSYNC_TIMING, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x44, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_GET_SCANLINE, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0x48, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x49, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0xd0);
- mipi_dsi_dcs_write_seq(dsi, 0x61, 0xba);
- mipi_dsi_dcs_write_seq(dsi, 0x62, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0xf1, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x64, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0x67, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0x6a, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0x70, 0x30);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_READ_PPS_START, 0xf3);
- mipi_dsi_dcs_write_seq(dsi, 0xa3, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xa4, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xa5, 0xff);
- mipi_dsi_dcs_write_seq(dsi, 0xd6, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0xa1);
- mipi_dsi_dcs_write_seq(dsi, 0x0a, 0xf2);
- mipi_dsi_dcs_write_seq(dsi, 0x04, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0x06, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x13);
- mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x11, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x12, 0x50);
- mipi_dsi_dcs_write_seq(dsi, 0x13, 0x51);
- mipi_dsi_dcs_write_seq(dsi, 0x14, 0x65);
- mipi_dsi_dcs_write_seq(dsi, 0x15, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0x17, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0x18, 0x86);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0x1c, 0xbb);
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x23, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x7b);
- mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x1e, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x05);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x32, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, 0x39, 0x00);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0xc3);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x78);
- mipi_dsi_dcs_write_seq(dsi, 0x35, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0xc8, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xc9, 0x82);
- mipi_dsi_dcs_write_seq(dsi, 0xca, 0x4e);
- mipi_dsi_dcs_write_seq(dsi, 0xcb, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_READ_PPS_CONTINUE, 0x4c);
- mipi_dsi_dcs_write_seq(dsi, 0xaa, 0x47);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x27);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x56, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x58, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0x59, 0x53);
- mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x14);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x01);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0x5f, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0x60, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x61, 0x1d);
- mipi_dsi_dcs_write_seq(dsi, 0x62, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x63, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x64, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0x65, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0x66, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x67, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x68, 0x25);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x78, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc3, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xd1, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x2f);
- mipi_dsi_dcs_write_seq(dsi, 0x23, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0x24, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0xf8);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x28, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0x29, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_LUT, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0xe0);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x14, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x16, 0xc0);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0xf0);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x08);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x5d);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0x3b, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x4a, 0x5d);
- mipi_dsi_dcs_write_seq(dsi, 0x4b, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x91, 0x44);
- mipi_dsi_dcs_write_seq(dsi, 0x92, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xdb, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xdc, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x22);
- mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe0, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xe1, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xe3, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0xe6, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x20, 0x60);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x27, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x33, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x34, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x48, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x49, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x61, 0x70);
- mipi_dsi_dcs_write_seq(dsi, 0x62, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x02, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0x19, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x7f);
- mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x1c, 0x0c);
- mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x0a);
- mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x7f);
- mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x75);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x75);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x05);
- mipi_dsi_dcs_write_seq(dsi, 0x32, 0x8d);
-
- ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x75);
- if (ret < 0) {
- dev_err(dev, "Failed to set pixel format: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x25, 0x75);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0x18, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x02);
-
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x13);
- mipi_dsi_dcs_write_seq(dsi, 0x3b, 0x03, 0x5f, 0x1a, 0x04, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10);
- usleep_range(10000, 11000);
- mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01);
-
- ret = mipi_dsi_dcs_set_display_brightness(dsi, 0);
- if (ret < 0) {
- dev_err(dev, "Failed to set display brightness: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0x68, 0x05, 0x01);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(100);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(30);
-
- return 0;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0xd9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x78);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x63);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x91);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x95, 0xeb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x96, 0xeb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x66);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0xa2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0xb3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d,
+ 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e,
+ 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08,
+ 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
+ 0x03, 0xfd, 0x03, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d,
+ 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e,
+ 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08,
+ 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
+ 0x03, 0xfd, 0x03, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d,
+ 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e,
+ 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xba, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08,
+ 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
+ 0x03, 0xfd, 0x03, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x21);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
+ 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
+ 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
+ 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
+ 0x03, 0xf5, 0x03, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
+ 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
+ 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
+ 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
+ 0x03, 0xf5, 0x03, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
+ 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
+ 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
+ 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
+ 0x03, 0xf5, 0x03, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0b, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x10, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x21, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_LUT, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x00);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x9a);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_3D_CONTROL, 0x42);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x47, 0x66);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x9a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4c, 0x91);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x21);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4e, 0x43);
+
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 18);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x52, 0x34);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x82, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x21);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x82);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7e, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0x3c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x97, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6,
+ 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
+ 0x05, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x93, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x94, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd7, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xda, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xde, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdb, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdc, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdd, 0x22);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe5, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8d, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8e, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x90);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x25);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0xe0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_VSYNC_TIMING, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_GET_SCANLINE, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0xd0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf1, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_START, 0xf3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa3, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa4, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa5, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd6, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0xa1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0xf2);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x50);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x51);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x65);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x86);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0xbb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x7b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x00);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0xc3);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x78);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc8, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc9, 0x82);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xca, 0x4e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcb, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_CONTINUE, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xaa, 0x47);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x27);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x53);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x25);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc3, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd1, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd2, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_LUT, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xe0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x08);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x5d);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x5d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x91, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdb, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdc, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdd, 0x22);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe5, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x25);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x60);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0x8d);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x75);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x02);
+
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x03, 0x5f, 0x1a, 0x04, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01);
+
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x05, 0x01);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 100);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 30);
+
+ return dsi_ctx.accum_err;
}
static const struct drm_display_mode elish_boe_modes[] = {
@@ -1063,18 +1026,18 @@ static int nt36523_prepare(struct drm_panel *panel)
static int nt36523_disable(struct drm_panel *panel)
{
struct panel_info *pinfo = to_panel_info(panel);
- int i, ret;
+ int i;
for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) {
- ret = mipi_dsi_dcs_set_display_off(pinfo->dsi[i]);
- if (ret < 0)
- dev_err(&pinfo->dsi[i]->dev, "failed to set display off: %d\n", ret);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = pinfo->dsi[i]};
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
}
for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) {
- ret = mipi_dsi_dcs_enter_sleep_mode(pinfo->dsi[i]);
- if (ret < 0)
- dev_err(&pinfo->dsi[i]->dev, "failed to enter sleep mode: %d\n", ret);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = pinfo->dsi[i]};
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
}
msleep(70);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt37801.c b/drivers/gpu/drm/panel/panel-novatek-nt37801.c
new file mode 100644
index 000000000000..d6a37d7e0cc6
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt37801.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2024 Linaro Limited
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include <drm/display/drm_dsc.h>
+#include <drm/display/drm_dsc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/mipi_display.h>
+
+struct novatek_nt37801 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct drm_dsc_config dsc;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data novatek_nt37801_supplies[] = {
+ { .supply = "vddio" },
+ { .supply = "vci" },
+ { .supply = "vdd" },
+};
+
+static inline struct novatek_nt37801 *to_novatek_nt37801(struct drm_panel *panel)
+{
+ return container_of(panel, struct novatek_nt37801, panel);
+}
+
+static void novatek_nt37801_reset(struct novatek_nt37801 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 21000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 21000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 21000);
+}
+
+#define NT37801_DCS_SWITCH_PAGE 0xf0
+
+#define novatek_nt37801_switch_page(dsi_ctx, page) \
+ mipi_dsi_dcs_write_seq_multi((dsi_ctx), NT37801_DCS_SWITCH_PAGE, \
+ 0x55, 0xaa, 0x52, 0x08, (page))
+
+static int novatek_nt37801_on(struct novatek_nt37801 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ novatek_nt37801_switch_page(&dsi_ctx, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc5, 0x0b, 0x0b, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xaa, 0x55, 0xa5, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf5, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x1b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x18);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf8, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfc, 0x00);
+ mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0x0000, 0x059f);
+ mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 0x0c7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x90, 0x03, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x91,
+ 0x89, 0x28, 0x00, 0x28, 0xc2, 0x00, 0x02,
+ 0x68, 0x04, 0x6c, 0x00, 0x0a, 0x02, 0x77,
+ 0x01, 0xe9, 0x10, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xaa, 0x55, 0xa5, 0x81);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb,
+ 0x00, 0x01, 0x00, 0x11, 0x33, 0x33, 0x33,
+ 0x55, 0x57, 0xd0, 0x00, 0x00, 0x44, 0x56,
+ 0x77, 0x78, 0x9a, 0xbc, 0xdd, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0xdc);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x00);
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x00, 0x18, 0x00, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x51,
+ 0x07, 0xff, 0x07, 0xff, 0x0f, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x9c, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_MEMORY_START);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x00);
+
+ novatek_nt37801_switch_page(&dsi_ctx, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x55, 0x01, 0xff, 0x03);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+}
+
+static int novatek_nt37801_off(struct novatek_nt37801 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ return dsi_ctx.accum_err;
+}
+
+static int novatek_nt37801_prepare(struct drm_panel *panel)
+{
+ struct novatek_nt37801 *ctx = to_novatek_nt37801(panel);
+ struct device *dev = &ctx->dsi->dev;
+ struct drm_dsc_picture_parameter_set pps;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(novatek_nt37801_supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ novatek_nt37801_reset(ctx);
+
+ ret = novatek_nt37801_on(ctx);
+ if (ret < 0)
+ goto err;
+
+ drm_dsc_pps_payload_pack(&pps, &ctx->dsc);
+
+ ret = mipi_dsi_picture_parameter_set(ctx->dsi, &pps);
+ if (ret < 0) {
+ dev_err(panel->dev, "failed to transmit PPS: %d\n", ret);
+ goto err;
+ }
+
+ ret = mipi_dsi_compression_mode(ctx->dsi, true);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable compression mode: %d\n", ret);
+ goto err;
+ }
+
+ msleep(28);
+
+ return 0;
+
+err:
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(novatek_nt37801_supplies),
+ ctx->supplies);
+
+ return ret;
+}
+
+static int novatek_nt37801_unprepare(struct drm_panel *panel)
+{
+ struct novatek_nt37801 *ctx = to_novatek_nt37801(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = novatek_nt37801_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ regulator_bulk_disable(ARRAY_SIZE(novatek_nt37801_supplies),
+ ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode novatek_nt37801_mode = {
+ .clock = (1440 + 20 + 4 + 20) * (3200 + 20 + 2 + 18) * 120 / 1000,
+ .hdisplay = 1440,
+ .hsync_start = 1440 + 20,
+ .hsync_end = 1440 + 20 + 4,
+ .htotal = 1440 + 20 + 4 + 20,
+ .vdisplay = 3200,
+ .vsync_start = 3200 + 20,
+ .vsync_end = 3200 + 20 + 2,
+ .vtotal = 3200 + 20 + 2 + 18,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int novatek_nt37801_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector,
+ &novatek_nt37801_mode);
+}
+
+static const struct drm_panel_funcs novatek_nt37801_panel_funcs = {
+ .prepare = novatek_nt37801_prepare,
+ .unprepare = novatek_nt37801_unprepare,
+ .get_modes = novatek_nt37801_get_modes,
+};
+
+static int novatek_nt37801_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static const struct backlight_ops novatek_nt37801_bl_ops = {
+ .update_status = novatek_nt37801_bl_update_status,
+};
+
+static struct backlight_device *
+novatek_nt37801_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 4095,
+ .max_brightness = 4095,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &novatek_nt37801_bl_ops, &props);
+}
+
+static int novatek_nt37801_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct novatek_nt37801 *ctx;
+ int ret;
+
+ ctx = devm_drm_panel_alloc(dev, struct novatek_nt37801, panel,
+ &novatek_nt37801_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(novatek_nt37801_supplies),
+ novatek_nt37801_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ctx->panel.prepare_prev_first = true;
+ ctx->panel.backlight = novatek_nt37801_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ /* This panel only supports DSC; unconditionally enable it */
+ dsi->dsc = &ctx->dsc;
+ ctx->dsc.dsc_version_major = 1;
+ ctx->dsc.dsc_version_minor = 1;
+ ctx->dsc.slice_height = 40;
+ ctx->dsc.slice_width = 720;
+ ctx->dsc.slice_count = 1440 / ctx->dsc.slice_width;
+ ctx->dsc.bits_per_component = 8;
+ ctx->dsc.bits_per_pixel = 8 << 4; /* 4 fractional bits */
+ ctx->dsc.block_pred_enable = true;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void novatek_nt37801_remove(struct mipi_dsi_device *dsi)
+{
+ struct novatek_nt37801 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id novatek_nt37801_of_match[] = {
+ { .compatible = "novatek,nt37801" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, novatek_nt37801_of_match);
+
+static struct mipi_dsi_driver novatek_nt37801_driver = {
+ .probe = novatek_nt37801_probe,
+ .remove = novatek_nt37801_remove,
+ .driver = {
+ .name = "panel-novatek-nt37801",
+ .of_match_table = novatek_nt37801_of_match,
+ },
+};
+module_mipi_dsi_driver(novatek_nt37801_driver);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>");
+MODULE_DESCRIPTION("Panel driver for the Novatek NT37801/NT37810 AMOLED DSI panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
index f23d8832a1ad..93f11e2e9398 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
@@ -34,8 +34,8 @@ struct s6d7aa0 {
struct s6d7aa0_panel_desc {
unsigned int panel_type;
- int (*init_func)(struct s6d7aa0 *ctx);
- int (*off_func)(struct s6d7aa0 *ctx);
+ void (*init_func)(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx);
+ void (*off_func)(struct mipi_dsi_multi_context *dsi_ctx);
const struct drm_display_mode *drm_mode;
unsigned long mode_flags;
u32 bus_flags;
@@ -62,93 +62,61 @@ static void s6d7aa0_reset(struct s6d7aa0 *ctx)
msleep(50);
}
-static int s6d7aa0_lock(struct s6d7aa0 *ctx, bool lock)
+static void s6d7aa0_lock(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx, bool lock)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
-
if (lock) {
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0xa5, 0xa5);
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD2, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD1, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD2, 0xa5, 0xa5);
if (ctx->desc->use_passwd3)
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD3, 0x5a, 0x5a);
} else {
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0x5a, 0x5a);
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD2, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD1, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD2, 0x5a, 0x5a);
if (ctx->desc->use_passwd3)
- mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD3, 0xa5, 0xa5);
}
-
- return 0;
}
static int s6d7aa0_on(struct s6d7aa0 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = ctx->desc->init_func(ctx);
- if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
- gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- return ret;
- }
+ ctx->desc->init_func(ctx, &dsi_ctx);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
-static int s6d7aa0_off(struct s6d7aa0 *ctx)
+static void s6d7aa0_off(struct s6d7aa0 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = ctx->desc->off_func(ctx);
- if (ret < 0) {
- dev_err(dev, "Panel-specific off function failed: %d\n", ret);
- return ret;
- }
+ ctx->desc->off_func(&dsi_ctx);
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(64);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 64);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
- return 0;
+ mipi_dsi_msleep(&dsi_ctx, 120);
}
static int s6d7aa0_prepare(struct drm_panel *panel)
{
struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- if (ret < 0) {
- dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ if (ret < 0)
return ret;
- }
s6d7aa0_reset(ctx);
ret = s6d7aa0_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
@@ -159,12 +127,8 @@ static int s6d7aa0_prepare(struct drm_panel *panel)
static int s6d7aa0_disable(struct drm_panel *panel)
{
struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
- ret = s6d7aa0_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ s6d7aa0_off(ctx);
return 0;
}
@@ -185,13 +149,11 @@ static int s6d7aa0_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
u16 brightness = backlight_get_brightness(bl);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, brightness);
- return 0;
+ return dsi_ctx.accum_err;
}
static int s6d7aa0_bl_get_brightness(struct backlight_device *bl)
@@ -228,65 +190,39 @@ s6d7aa0_create_backlight(struct mipi_dsi_device *dsi)
/* Initialization code and structures for LSL080AL02 panel */
-static int s6d7aa0_lsl080al02_init(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al02_init(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ mipi_dsi_usleep_range(dsi_ctx, 20000, 25000);
- usleep_range(20000, 25000);
+ s6d7aa0_lock(ctx, dsi_ctx, false);
- ret = s6d7aa0_lock(ctx, false);
- if (ret < 0) {
- dev_err(dev, "Failed to unlock registers: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_write_seq(dsi, MCS_OTP_RELOAD, 0x00, 0x10);
- usleep_range(1000, 1500);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_OTP_RELOAD, 0x00, 0x10);
+ mipi_dsi_usleep_range(dsi_ctx, 1000, 1500);
/* SEQ_B6_PARAM_8_R01 */
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x10);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb6, 0x10);
/* BL_CTL_ON */
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x28);
-
- usleep_range(5000, 6000);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x28);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x04);
+ mipi_dsi_usleep_range(dsi_ctx, 5000, 6000);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x04);
- msleep(120);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_dcs_exit_sleep_mode_multi(dsi_ctx);
- ret = s6d7aa0_lock(ctx, true);
- if (ret < 0) {
- dev_err(dev, "Failed to lock registers: %d\n", ret);
- return ret;
- }
+ mipi_dsi_msleep(dsi_ctx, 120);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ s6d7aa0_lock(ctx, dsi_ctx, true);
- return 0;
+ mipi_dsi_dcs_set_display_on_multi(dsi_ctx);
}
-static int s6d7aa0_lsl080al02_off(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al02_off(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
-
/* BL_CTL_OFF */
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x20);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x20);
}
static const struct drm_display_mode s6d7aa0_lsl080al02_mode = {
@@ -317,79 +253,51 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = {
/* Initialization code and structures for LSL080AL03 panel */
-static int s6d7aa0_lsl080al03_init(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al03_init(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
-
- usleep_range(20000, 25000);
+ mipi_dsi_usleep_range(dsi_ctx, 20000, 25000);
- ret = s6d7aa0_lock(ctx, false);
- if (ret < 0) {
- dev_err(dev, "Failed to unlock registers: %d\n", ret);
- return ret;
- }
+ s6d7aa0_lock(ctx, dsi_ctx, false);
if (ctx->desc->panel_type == S6D7AA0_PANEL_LSL080AL03) {
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0xc7, 0x00, 0x29);
- mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x01, 0x4e, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, 0xfd, 0x16, 0x10, 0x11, 0x23,
- 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00, 0x02, 0x03, 0x21,
- 0x80, 0x78);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0xc7, 0x00, 0x29);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x01, 0x4e, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfd, 0x16, 0x10, 0x11, 0x23,
+ 0x09);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfe, 0x00, 0x02, 0x03, 0x21,
+ 0x80, 0x78);
} else if (ctx->desc->panel_type == S6D7AA0_PANEL_LTL101AT01) {
- mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x01, 0x4e, 0x0b);
- mipi_dsi_dcs_write_seq(dsi, 0xfd, 0x16, 0x10, 0x11, 0x23,
- 0x09);
- mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00, 0x02, 0x03, 0x21,
- 0x80, 0x68);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x08);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x01, 0x4e, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfd, 0x16, 0x10, 0x11, 0x23,
+ 0x09);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfe, 0x00, 0x02, 0x03, 0x21,
+ 0x80, 0x68);
}
- mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x51);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
- mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x02, 0x08, 0x08);
-
- usleep_range(10000, 11000);
-
- mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x80, 0x80, 0x30);
- mipi_dsi_dcs_write_seq(dsi, 0xcd,
- 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
- 0x2e, 0x2e, 0x2e, 0x2e, 0x2e);
- mipi_dsi_dcs_write_seq(dsi, 0xce,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x03);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb3, 0x51);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xf2, 0x02, 0x08, 0x08);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
-
- ret = s6d7aa0_lock(ctx, true);
- if (ret < 0) {
- dev_err(dev, "Failed to lock registers: %d\n", ret);
- return ret;
- }
+ mipi_dsi_usleep_range(dsi_ctx, 10000, 11000);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x80, 0x80, 0x30);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xcd,
+ 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e,
+ 0x2e, 0x2e, 0x2e, 0x2e, 0x2e);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xce,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x03);
- return 0;
+ mipi_dsi_dcs_exit_sleep_mode_multi(dsi_ctx);
+ s6d7aa0_lock(ctx, dsi_ctx, true);
+ mipi_dsi_dcs_set_display_on_multi(dsi_ctx);
}
-static int s6d7aa0_lsl080al03_off(struct s6d7aa0 *ctx)
+static void s6d7aa0_lsl080al03_off(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
-
- mipi_dsi_dcs_write_seq(dsi, 0x22, 0x00);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0x22, 0x00);
}
static const struct drm_display_mode s6d7aa0_lsl080al03_mode = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
index 04ce925b3d9d..d92ae6b6100f 100644
--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2020 Caleb Connolly <caleb@connolly.tech>
+/* Copyright (c) 2020 Casey Connolly <casey.connolly@linaro.org>
* Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
@@ -22,7 +22,6 @@ struct sofef00_panel {
struct mipi_dsi_device *dsi;
struct regulator *supply;
struct gpio_desc *reset_gpio;
- const struct drm_display_mode *mode;
};
static inline
@@ -44,66 +43,44 @@ static void sofef00_panel_reset(struct sofef00_panel *ctx)
static int sofef00_panel_on(struct sofef00_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- usleep_range(10000, 11000);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a);
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x12);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sofef00_panel_off(struct sofef00_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(40);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 40);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(160);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 160);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sofef00_panel_prepare(struct drm_panel *panel)
@@ -122,7 +99,6 @@ static int sofef00_panel_prepare(struct drm_panel *panel)
ret = sofef00_panel_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
return ret;
}
@@ -133,13 +109,8 @@ static int sofef00_panel_prepare(struct drm_panel *panel)
static int sofef00_panel_unprepare(struct drm_panel *panel)
{
struct sofef00_panel *ctx = to_sofef00_panel(panel);
- struct device *dev = &ctx->dsi->dev;
- int ret;
-
- ret = sofef00_panel_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ sofef00_panel_off(ctx);
regulator_disable(ctx->supply);
return 0;
@@ -159,26 +130,11 @@ static const struct drm_display_mode enchilada_panel_mode = {
.height_mm = 145,
};
-static const struct drm_display_mode fajita_panel_mode = {
- .clock = (1080 + 72 + 16 + 36) * (2340 + 32 + 4 + 18) * 60 / 1000,
- .hdisplay = 1080,
- .hsync_start = 1080 + 72,
- .hsync_end = 1080 + 72 + 16,
- .htotal = 1080 + 72 + 16 + 36,
- .vdisplay = 2340,
- .vsync_start = 2340 + 32,
- .vsync_end = 2340 + 32 + 4,
- .vtotal = 2340 + 32 + 4 + 18,
- .width_mm = 68,
- .height_mm = 145,
-};
-
static int sofef00_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector)
{
struct drm_display_mode *mode;
- struct sofef00_panel *ctx = to_sofef00_panel(panel);
- mode = drm_mode_duplicate(connector->dev, ctx->mode);
+ mode = drm_mode_duplicate(connector->dev, &enchilada_panel_mode);
if (!mode)
return -ENOMEM;
@@ -239,13 +195,6 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
if (!ctx)
return -ENOMEM;
- ctx->mode = of_device_get_match_data(dev);
-
- if (!ctx->mode) {
- dev_err(dev, "Missing device mode\n");
- return -ENODEV;
- }
-
ctx->supply = devm_regulator_get(dev, "vddio");
if (IS_ERR(ctx->supply))
return dev_err_probe(dev, PTR_ERR(ctx->supply),
@@ -295,14 +244,7 @@ static void sofef00_panel_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id sofef00_panel_of_match[] = {
- { // OnePlus 6 / enchilada
- .compatible = "samsung,sofef00",
- .data = &enchilada_panel_mode,
- },
- { // OnePlus 6T / fajita
- .compatible = "samsung,s6e3fc2x01",
- .data = &fajita_panel_mode,
- },
+ { .compatible = "samsung,sofef00" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sofef00_panel_of_match);
@@ -318,6 +260,6 @@ static struct mipi_dsi_driver sofef00_panel_driver = {
module_mipi_dsi_driver(sofef00_panel_driver);
-MODULE_AUTHOR("Caleb Connolly <caleb@connolly.tech>");
+MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>");
MODULE_DESCRIPTION("DRM driver for Samsung AMOLED DSI panels found in OnePlus 6/6T phones");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index 729cbb0d8403..36abfa2e65e9 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -36,60 +36,49 @@ static inline struct sharp_nt_panel *to_sharp_nt_panel(struct drm_panel *panel)
static int sharp_nt_panel_init(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
/* Novatek two-lane operation */
- ret = mipi_dsi_dcs_write(dsi, 0xae, (u8[]){ 0x03 }, 1);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xae, 0x03);
/* Set both MCU and RGB I/F to 24bpp */
- ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT |
- (MIPI_DCS_PIXEL_FMT_24BIT << 4));
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx,
+ MIPI_DCS_PIXEL_FMT_24BIT |
+ (MIPI_DCS_PIXEL_FMT_24BIT << 4));
- return 0;
+ return dsi_ctx.accum_err;
}
static int sharp_nt_panel_on(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sharp_nt_panel_off(struct sharp_nt_panel *sharp_nt)
{
struct mipi_dsi_device *dsi = sharp_nt->dsi;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0)
- return ret;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
- return 0;
+ return dsi_ctx.accum_err;
}
static int sharp_nt_panel_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 33a37539de57..0a3b26bb4d73 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -579,9 +579,10 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
u32 bus_flags;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct panel_simple, base,
+ &panel_simple_funcs, desc->connector_type);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
panel->desc = desc;
@@ -694,8 +695,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
- drm_panel_init(&panel->base, dev, &panel_simple_funcs, connector_type);
-
err = drm_panel_of_backlight(&panel->base);
if (err) {
dev_err_probe(dev, err, "Could not find backlight\n");
@@ -2199,13 +2198,14 @@ static const struct display_timing evervision_vgg644804_timing = {
static const struct panel_desc evervision_vgg644804 = {
.timings = &evervision_vgg644804_timing,
.num_timings = 1,
- .bpc = 8,
+ .bpc = 6,
.size = {
.width = 115,
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing evervision_vgg804821_timing = {
@@ -3528,6 +3528,30 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct drm_display_mode nlt_nl13676bc25_03f_mode = {
+ .clock = 75400,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 14,
+ .hsync_end = 1366 + 14 + 56,
+ .htotal = 1366 + 14 + 56 + 64,
+ .vdisplay = 768,
+ .vsync_start = 768 + 1,
+ .vsync_end = 768 + 1 + 3,
+ .vtotal = 768 + 1 + 3 + 22,
+};
+
+static const struct panel_desc nlt_nl13676bc25_03f = {
+ .modes = &nlt_nl13676bc25_03f_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 363,
+ .height = 215,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing nlt_nl192108ac18_02d_timing = {
.pixelclock = { 130000000, 148350000, 163000000 },
.hactive = { 1920, 1920, 1920 },
@@ -3797,6 +3821,32 @@ static const struct panel_desc pda_91_00156_a0 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode powertip_ph128800t004_zza01_mode = {
+ .clock = 71150,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 9,
+ .vsync_end = 800 + 9 + 8,
+ .vtotal = 800 + 9 + 8 + 6,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc powertip_ph128800t004_zza01 = {
+ .modes = &powertip_ph128800t004_zza01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 216,
+ .height = 135,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode powertip_ph128800t006_zhc01_mode = {
.clock = 66500,
.hdisplay = 1280,
@@ -4394,10 +4444,10 @@ static const struct panel_desc tianma_tm070jvhg33 = {
};
/*
- * The datasheet computes total blanking as back porch + front porch, not
- * including sync pulse width. This is for both H and V. To make the total
- * blanking and period correct, subtract the pulse width from the front
- * porch.
+ * The TM070JDHG34-00 datasheet computes total blanking as back porch +
+ * front porch, not including sync pulse width. This is for both H and
+ * V. To make the total blanking and period correct, subtract the pulse
+ * width from the front porch.
*
* This works well for the Min and Typ values, but for Max values the sync
* pulse width is higher than back porch + front porch, so work around that
@@ -4406,6 +4456,10 @@ static const struct panel_desc tianma_tm070jvhg33 = {
*
* Exact datasheet values are added as a comment where they differ from the
* ones implemented for the above reason.
+ *
+ * The P0700WXF1MBAA datasheet is even less detailed, only listing period
+ * and total blanking time, however the resulting values are the same as
+ * the TM070JDHG34-00.
*/
static const struct display_timing tianma_tm070jdhg34_00_timing = {
.pixelclock = { 68400000, 71900000, 78100000 },
@@ -4428,6 +4482,30 @@ static const struct panel_desc tianma_tm070jdhg34_00 = {
.width = 150, /* 149.76 */
.height = 94, /* 93.60 */
},
+ .delay = {
+ .prepare = 15, /* Tp1 */
+ .enable = 150, /* Tp2 */
+ .disable = 150, /* Tp4 */
+ .unprepare = 120, /* Tp3 */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
+static const struct panel_desc tianma_p0700wxf1mbaa = {
+ .timings = &tianma_tm070jdhg34_00_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 150, /* 149.76 */
+ .height = 94, /* 93.60 */
+ },
+ .delay = {
+ .prepare = 18, /* Tr + Tp1 */
+ .enable = 152, /* Tp2 + Tp5 */
+ .disable = 152, /* Tp6 + Tp4 */
+ .unprepare = 120, /* Tp3 */
+ },
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
@@ -5122,6 +5200,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "newhaven,nhd-4.3-480272ef-atxl",
.data = &newhaven_nhd_43_480272ef_atxl,
}, {
+ .compatible = "nlt,nl13676bc25-03f",
+ .data = &nlt_nl13676bc25_03f,
+ }, {
.compatible = "nlt,nl192108ac18-02d",
.data = &nlt_nl192108ac18_02d,
}, {
@@ -5155,6 +5236,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "pda,91-00156-a0",
.data = &pda_91_00156_a0,
}, {
+ .compatible = "powertip,ph128800t004-zza01",
+ .data = &powertip_ph128800t004_zza01,
+ }, {
.compatible = "powertip,ph128800t006-zhc01",
.data = &powertip_ph128800t006_zhc01,
}, {
@@ -5215,6 +5299,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "tfc,s9700rtwv43tr-01b",
.data = &tfc_s9700rtwv43tr_01b,
}, {
+ .compatible = "tianma,p0700wxf1mbaa",
+ .data = &tianma_p0700wxf1mbaa,
+ }, {
.compatible = "tianma,tm070jdhg30",
.data = &tianma_tm070jdhg30,
}, {
diff --git a/drivers/gpu/drm/panel/panel-synaptics-r63353.c b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
index 17349825543f..b148e6cba9bd 100644
--- a/drivers/gpu/drm/panel/panel-synaptics-r63353.c
+++ b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
@@ -106,53 +106,34 @@ static int r63353_panel_power_off(struct r63353_panel *rpanel)
static int r63353_panel_activate(struct r63353_panel *rpanel)
{
struct mipi_dsi_device *dsi = rpanel->dsi;
- struct device *dev = &dsi->dev;
- int i, ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+ int i;
- ret = mipi_dsi_dcs_soft_reset(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to do Software Reset (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_dcs_soft_reset_multi(&dsi_ctx);
- usleep_range(15000, 17000);
+ mipi_dsi_usleep_range(&dsi_ctx, 15000, 17000);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
for (i = 0; i < rpanel->pdata->init_length; i++) {
const struct r63353_instr *instr = &rpanel->pdata->init[i];
- ret = mipi_dsi_dcs_write_buffer(dsi, instr->data, instr->len);
- if (ret < 0)
- goto fail;
+ mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, instr->data,
+ instr->len);
}
- msleep(120);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_msleep(&dsi_ctx, 120);
- usleep_range(5000, 10000);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display ON (%d)\n", ret);
- goto fail;
- }
+ mipi_dsi_usleep_range(&dsi_ctx, 5000, 10000);
- return 0;
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
-fail:
- gpiod_set_value(rpanel->reset_gpio, 0);
+ if (dsi_ctx.accum_err)
+ gpiod_set_value(rpanel->reset_gpio, 0);
- return ret;
+ return dsi_ctx.accum_err;
}
static int r63353_panel_prepare(struct drm_panel *panel)
@@ -178,27 +159,16 @@ static int r63353_panel_prepare(struct drm_panel *panel)
return 0;
}
-static int r63353_panel_deactivate(struct r63353_panel *rpanel)
+static void r63353_panel_deactivate(struct r63353_panel *rpanel)
{
struct mipi_dsi_device *dsi = rpanel->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display OFF (%d)\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
- usleep_range(5000, 10000);
+ mipi_dsi_usleep_range(&dsi_ctx, 5000, 10000);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
- return ret;
- }
-
- return 0;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
}
static int r63353_panel_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index 4dbf8b88f264..11d460d2ea19 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -86,11 +86,7 @@ struct td028ttec1_panel {
#define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel)
-/*
- * noinline_for_stack so we don't get multiple copies of tx_buf
- * on the stack in case of gcc-plugin-structleak
- */
-static int noinline_for_stack
+static int
jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
{
struct spi_device *spi = lcd->spi;
diff --git a/drivers/gpu/drm/panel/panel-visionox-g2647fb105.c b/drivers/gpu/drm/panel/panel-visionox-g2647fb105.c
new file mode 100644
index 000000000000..413849f7b4de
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-visionox-g2647fb105.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2025, Alexander Baransky <sanyapilot496@gmail.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct visionox_g2647fb105 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data visionox_g2647fb105_supplies[] = {
+ { .supply = "vdd3p3" },
+ { .supply = "vddio" },
+ { .supply = "vsn" },
+ { .supply = "vsp" },
+};
+
+static inline
+struct visionox_g2647fb105 *to_visionox_g2647fb105(struct drm_panel *panel)
+{
+ return container_of(panel, struct visionox_g2647fb105, panel);
+}
+
+static void visionox_g2647fb105_reset(struct visionox_g2647fb105 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+static int visionox_g2647fb105_on(struct visionox_g2647fb105 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x32);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbe, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbf, 0xbb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0, 0xdd);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xd0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00);
+
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x0000);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 100);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ return dsi_ctx.accum_err;
+}
+
+static int visionox_g2647fb105_off(struct visionox_g2647fb105 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+}
+
+static int visionox_g2647fb105_prepare(struct drm_panel *panel)
+{
+ struct visionox_g2647fb105 *ctx = to_visionox_g2647fb105(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(visionox_g2647fb105_supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ visionox_g2647fb105_reset(ctx);
+
+ ret = visionox_g2647fb105_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int visionox_g2647fb105_unprepare(struct drm_panel *panel)
+{
+ struct visionox_g2647fb105 *ctx = to_visionox_g2647fb105(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = visionox_g2647fb105_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(visionox_g2647fb105_supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode visionox_g2647fb105_mode = {
+ .clock = (1080 + 28 + 4 + 36) * (2340 + 8 + 4 + 4) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 28,
+ .hsync_end = 1080 + 28 + 4,
+ .htotal = 1080 + 28 + 4 + 36,
+ .vdisplay = 2340,
+ .vsync_start = 2340 + 8,
+ .vsync_end = 2340 + 8 + 4,
+ .vtotal = 2340 + 8 + 4 + 4,
+ .width_mm = 69,
+ .height_mm = 149,
+};
+
+static int visionox_g2647fb105_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &visionox_g2647fb105_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs visionox_g2647fb105_panel_funcs = {
+ .prepare = visionox_g2647fb105_prepare,
+ .unprepare = visionox_g2647fb105_unprepare,
+ .get_modes = visionox_g2647fb105_get_modes,
+};
+
+static int visionox_g2647fb105_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static const struct backlight_ops visionox_g2647fb105_bl_ops = {
+ .update_status = visionox_g2647fb105_bl_update_status,
+};
+
+static struct backlight_device *
+visionox_g2647fb105_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 1023,
+ .max_brightness = 2047,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &visionox_g2647fb105_bl_ops, &props);
+}
+
+static int visionox_g2647fb105_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct visionox_g2647fb105 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(visionox_g2647fb105_supplies),
+ visionox_g2647fb105_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ ctx->panel.prepare_prev_first = true;
+
+ drm_panel_init(&ctx->panel, dev, &visionox_g2647fb105_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = visionox_g2647fb105_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void visionox_g2647fb105_remove(struct mipi_dsi_device *dsi)
+{
+ struct visionox_g2647fb105 *ctx = mipi_dsi_get_drvdata(dsi);
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id visionox_g2647fb105_of_match[] = {
+ { .compatible = "visionox,g2647fb105" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, visionox_g2647fb105_of_match);
+
+static struct mipi_dsi_driver visionox_g2647fb105_driver = {
+ .probe = visionox_g2647fb105_probe,
+ .remove = visionox_g2647fb105_remove,
+ .driver = {
+ .name = "panel-visionox-g2647fb105",
+ .of_match_table = visionox_g2647fb105_of_match,
+ },
+};
+module_mipi_dsi_driver(visionox_g2647fb105_driver);
+
+MODULE_AUTHOR("Alexander Baransky <sanyapilot496@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for Visionox G2647FB105 AMOLED DSI panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index a45e4addcc19..5d35076b2e6d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -209,10 +209,20 @@ int panfrost_device_init(struct panfrost_device *pfdev)
spin_lock_init(&pfdev->cycle_counter.lock);
+ err = panfrost_pm_domain_init(pfdev);
+ if (err)
+ return err;
+
+ err = panfrost_reset_init(pfdev);
+ if (err) {
+ dev_err(pfdev->dev, "reset init failed %d\n", err);
+ goto out_pm_domain;
+ }
+
err = panfrost_clk_init(pfdev);
if (err) {
dev_err(pfdev->dev, "clk init failed %d\n", err);
- return err;
+ goto out_reset;
}
err = panfrost_devfreq_init(pfdev);
@@ -229,25 +239,15 @@ int panfrost_device_init(struct panfrost_device *pfdev)
goto out_devfreq;
}
- err = panfrost_reset_init(pfdev);
- if (err) {
- dev_err(pfdev->dev, "reset init failed %d\n", err);
- goto out_regulator;
- }
-
- err = panfrost_pm_domain_init(pfdev);
- if (err)
- goto out_reset;
-
pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0);
if (IS_ERR(pfdev->iomem)) {
err = PTR_ERR(pfdev->iomem);
- goto out_pm_domain;
+ goto out_regulator;
}
err = panfrost_gpu_init(pfdev);
if (err)
- goto out_pm_domain;
+ goto out_regulator;
err = panfrost_mmu_init(pfdev);
if (err)
@@ -268,16 +268,16 @@ out_mmu:
panfrost_mmu_fini(pfdev);
out_gpu:
panfrost_gpu_fini(pfdev);
-out_pm_domain:
- panfrost_pm_domain_fini(pfdev);
-out_reset:
- panfrost_reset_fini(pfdev);
out_regulator:
panfrost_regulator_fini(pfdev);
out_devfreq:
panfrost_devfreq_fini(pfdev);
out_clk:
panfrost_clk_fini(pfdev);
+out_reset:
+ panfrost_reset_fini(pfdev);
+out_pm_domain:
+ panfrost_pm_domain_fini(pfdev);
return err;
}
@@ -287,11 +287,11 @@ void panfrost_device_fini(struct panfrost_device *pfdev)
panfrost_job_fini(pfdev);
panfrost_mmu_fini(pfdev);
panfrost_gpu_fini(pfdev);
- panfrost_pm_domain_fini(pfdev);
- panfrost_reset_fini(pfdev);
panfrost_devfreq_fini(pfdev);
panfrost_regulator_fini(pfdev);
panfrost_clk_fini(pfdev);
+ panfrost_reset_fini(pfdev);
+ panfrost_pm_domain_fini(pfdev);
}
#define PANFROST_EXCEPTION(id) \
@@ -406,11 +406,36 @@ void panfrost_device_reset(struct panfrost_device *pfdev)
static int panfrost_device_runtime_resume(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
+ int ret;
+
+ if (pfdev->comp->pm_features & BIT(GPU_PM_RT)) {
+ ret = reset_control_deassert(pfdev->rstc);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(pfdev->clock);
+ if (ret)
+ goto err_clk;
+
+ if (pfdev->bus_clock) {
+ ret = clk_enable(pfdev->bus_clock);
+ if (ret)
+ goto err_bus_clk;
+ }
+ }
panfrost_device_reset(pfdev);
panfrost_devfreq_resume(pfdev);
return 0;
+
+err_bus_clk:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_RT))
+ clk_disable(pfdev->clock);
+err_clk:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_RT))
+ reset_control_assert(pfdev->rstc);
+ return ret;
}
static int panfrost_device_runtime_suspend(struct device *dev)
@@ -426,6 +451,14 @@ static int panfrost_device_runtime_suspend(struct device *dev)
panfrost_gpu_suspend_irq(pfdev);
panfrost_gpu_power_off(pfdev);
+ if (pfdev->comp->pm_features & BIT(GPU_PM_RT)) {
+ if (pfdev->bus_clock)
+ clk_disable(pfdev->bus_clock);
+
+ clk_disable(pfdev->clock);
+ reset_control_assert(pfdev->rstc);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index cffcb0ac7c11..dcff70f905cd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -36,10 +36,21 @@ enum panfrost_drv_comp_bits {
* enum panfrost_gpu_pm - Supported kernel power management features
* @GPU_PM_CLK_DIS: Allow disabling clocks during system suspend
* @GPU_PM_VREG_OFF: Allow turning off regulators during system suspend
+ * @GPU_PM_RT: Allow disabling clocks and asserting the reset control during
+ * system runtime suspend
*/
enum panfrost_gpu_pm {
GPU_PM_CLK_DIS,
GPU_PM_VREG_OFF,
+ GPU_PM_RT
+};
+
+/**
+ * enum panfrost_gpu_quirks - GPU optional quirks
+ * @GPU_QUIRK_FORCE_AARCH64_PGTABLE: Use AARCH64_4K page table format
+ */
+enum panfrost_gpu_quirks {
+ GPU_QUIRK_FORCE_AARCH64_PGTABLE,
};
struct panfrost_features {
@@ -95,6 +106,9 @@ struct panfrost_compatible {
/* Allowed PM features */
u8 pm_features;
+
+ /* GPU configuration quirks */
+ u8 gpu_quirks;
};
struct panfrost_device {
@@ -162,6 +176,11 @@ struct panfrost_mmu {
int as;
atomic_t as_count;
struct list_head list;
+ struct {
+ u64 transtab;
+ u64 memattr;
+ u64 transcfg;
+ } cfg;
};
struct panfrost_engine_usage {
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 0f3935556ac7..f1ec3b02f15a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -476,7 +476,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
}
}
- args->retained = drm_gem_shmem_madvise(&bo->base, args->madv);
+ args->retained = drm_gem_shmem_madvise_locked(&bo->base, args->madv);
if (args->retained) {
if (args->madv == PANFROST_MADV_DONTNEED)
@@ -776,6 +776,13 @@ static const struct panfrost_compatible default_data = {
.pm_domain_names = NULL,
};
+static const struct panfrost_compatible allwinner_h616_data = {
+ .num_supplies = ARRAY_SIZE(default_supplies) - 1,
+ .supply_names = default_supplies,
+ .num_pm_domains = 1,
+ .pm_features = BIT(GPU_PM_RT),
+};
+
static const struct panfrost_compatible amlogic_data = {
.num_supplies = ARRAY_SIZE(default_supplies) - 1,
.supply_names = default_supplies,
@@ -824,6 +831,7 @@ static const struct panfrost_compatible mediatek_mt8188_data = {
.num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
.pm_domain_names = mediatek_mt8183_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
+ .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
};
static const char * const mediatek_mt8192_supplies[] = { "mali", NULL };
@@ -835,6 +843,7 @@ static const struct panfrost_compatible mediatek_mt8192_data = {
.num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains),
.pm_domain_names = mediatek_mt8192_pm_domains,
.pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
+ .gpu_quirks = BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE),
};
static const struct of_device_id dt_match[] = {
@@ -859,6 +868,7 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "mediatek,mt8186-mali", .data = &mediatek_mt8186_data },
{ .compatible = "mediatek,mt8188-mali", .data = &mediatek_mt8188_data },
{ .compatible = "mediatek,mt8192-mali", .data = &mediatek_mt8192_data },
+ { .compatible = "allwinner,sun50i-h616-mali", .data = &allwinner_h616_data },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c
index 47751302f1bc..4042afe2fbf4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_dump.c
+++ b/drivers/gpu/drm/panfrost/panfrost_dump.c
@@ -209,7 +209,7 @@ void panfrost_core_dump(struct panfrost_job *job)
goto dump_header;
}
- ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
+ ret = drm_gem_vmap(&bo->base.base, &map);
if (ret) {
dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n");
iter.hdr->bomap.valid = 0;
@@ -228,7 +228,7 @@ void panfrost_core_dump(struct panfrost_job *job)
vaddr = map.vaddr;
memcpy(iter.data, vaddr, bo->base.base.size);
- drm_gem_vunmap_unlocked(&bo->base.base, &map);
+ drm_gem_vunmap(&bo->base.base, &map);
iter.hdr->bomap.valid = 1;
diff --git a/drivers/gpu/drm/panfrost/panfrost_features.h b/drivers/gpu/drm/panfrost/panfrost_features.h
index 7ed0cd3ea2d4..52f9d69f6db9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_features.h
+++ b/drivers/gpu/drm/panfrost/panfrost_features.h
@@ -54,6 +54,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
#define hw_features_g72 (\
@@ -64,6 +65,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
#define hw_features_g51 hw_features_g72
@@ -77,6 +79,7 @@ enum panfrost_hw_feature {
BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
BIT_ULL(HW_FEATURE_IDVS_GROUP_SIZE) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
BIT_ULL(HW_FEATURE_COHERENCY_REG))
#define hw_features_g76 (\
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 8e0ff3efede7..963f04ba2de6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -200,7 +200,7 @@ static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
enum drm_gem_object_status res = 0;
- if (bo->base.base.import_attach || bo->base.pages)
+ if (drm_gem_is_imported(&bo->base.base) || bo->base.pages)
res |= DRM_GEM_OBJECT_RESIDENT;
if (bo->base.madv == PANFROST_MADV_DONTNEED)
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 3d9f51bd48b6..02b60ea1433a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -51,7 +51,7 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
goto unlock_mappings;
panfrost_gem_teardown_mappings_locked(bo);
- drm_gem_shmem_purge(&bo->base);
+ drm_gem_shmem_purge_locked(&bo->base);
ret = true;
dma_resv_unlock(shmem->base.resv);
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index b91019cd5acb..f6b91c052cfb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -26,6 +26,48 @@
#define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
#define mmu_read(dev, reg) readl(dev->iomem + reg)
+static u64 mair_to_memattr(u64 mair, bool coherent)
+{
+ u64 memattr = 0;
+ u32 i;
+
+ for (i = 0; i < 8; i++) {
+ u8 in_attr = mair >> (8 * i), out_attr;
+ u8 outer = in_attr >> 4, inner = in_attr & 0xf;
+
+ /* For caching to be enabled, inner and outer caching policy
+ * have to be both write-back, if one of them is write-through
+ * or non-cacheable, we just choose non-cacheable. Device
+ * memory is also translated to non-cacheable.
+ */
+ if (!(outer & 3) || !(outer & 4) || !(inner & 4)) {
+ out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC |
+ AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
+ AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
+ } else {
+ out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
+ AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
+ /* Use SH_MIDGARD_INNER mode when device isn't coherent,
+ * so SH_IS, which is used when IOMMU_CACHE is set, maps
+ * to Mali's internal-shareable mode. As per the Mali
+ * Spec, inner and outer-shareable modes aren't allowed
+ * for WB memory when coherency is disabled.
+ * Use SH_CPU_INNER mode when coherency is enabled, so
+ * that SH_IS actually maps to the standard definition of
+ * inner-shareable.
+ */
+ if (!coherent)
+ out_attr |= AS_MEMATTR_AARCH64_SH_MIDGARD_INNER;
+ else
+ out_attr |= AS_MEMATTR_AARCH64_SH_CPU_INNER;
+ }
+
+ memattr |= (u64)out_attr << (8 * i);
+ }
+
+ return memattr;
+}
+
static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
{
int ret;
@@ -124,9 +166,9 @@ static int mmu_hw_do_operation(struct panfrost_device *pfdev,
static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
int as_nr = mmu->as;
- struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
- u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
- u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
+ u64 transtab = mmu->cfg.transtab;
+ u64 memattr = mmu->cfg.memattr;
+ u64 transcfg = mmu->cfg.transcfg;
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
@@ -139,6 +181,9 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
+ mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
+ mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
+
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
@@ -152,9 +197,67 @@ static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
+ mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
+ mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), 0);
+
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
+static int mmu_cfg_init_mali_lpae(struct panfrost_mmu *mmu)
+{
+ struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
+
+ /* TODO: The following fields are duplicated between the MMU and Page
+ * Table config structs. Ideally, should be kept in one place.
+ */
+ mmu->cfg.transtab = pgtbl_cfg->arm_mali_lpae_cfg.transtab;
+ mmu->cfg.memattr = pgtbl_cfg->arm_mali_lpae_cfg.memattr;
+ mmu->cfg.transcfg = AS_TRANSCFG_ADRMODE_LEGACY;
+
+ return 0;
+}
+
+static int mmu_cfg_init_aarch64_4k(struct panfrost_mmu *mmu)
+{
+ struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
+ struct panfrost_device *pfdev = mmu->pfdev;
+
+ if (drm_WARN_ON(pfdev->ddev, pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
+ ~AS_TRANSTAB_AARCH64_4K_ADDR_MASK))
+ return -EINVAL;
+
+ mmu->cfg.transtab = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+
+ mmu->cfg.memattr = mair_to_memattr(pgtbl_cfg->arm_lpae_s1_cfg.mair,
+ pgtbl_cfg->coherent_walk);
+
+ mmu->cfg.transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
+ AS_TRANSCFG_PTW_RA |
+ AS_TRANSCFG_ADRMODE_AARCH64_4K |
+ AS_TRANSCFG_INA_BITS(55 - pgtbl_cfg->ias);
+ if (pgtbl_cfg->coherent_walk)
+ mmu->cfg.transcfg |= AS_TRANSCFG_PTW_SH_OS;
+
+ return 0;
+}
+
+static int panfrost_mmu_cfg_init(struct panfrost_mmu *mmu,
+ enum io_pgtable_fmt fmt)
+{
+ struct panfrost_device *pfdev = mmu->pfdev;
+
+ switch (fmt) {
+ case ARM_64_LPAE_S1:
+ return mmu_cfg_init_aarch64_4k(mmu);
+ case ARM_MALI_LPAE:
+ return mmu_cfg_init_mali_lpae(mmu);
+ default:
+ /* This should never happen */
+ drm_WARN(pfdev->ddev, 1, "Invalid pgtable format");
+ return -EINVAL;
+ }
+}
+
u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
int as;
@@ -327,7 +430,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
struct drm_gem_object *obj = &shmem->base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
- int prot = IOMMU_READ | IOMMU_WRITE;
+ int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE;
if (WARN_ON(mapping->active))
return 0;
@@ -489,7 +592,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_unlock;
}
bo->base.pages = pages;
- bo->base.pages_use_count = 1;
+ refcount_set(&bo->base.pages_use_count, 1);
} else {
pages = bo->base.pages;
if (pages[page_offset]) {
@@ -528,7 +631,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_map;
mmu_map_sg(pfdev, bomapping->mmu, addr,
- IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+ IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt);
bomapping->active = true;
bo->heap_rss_size += SZ_2M;
@@ -615,7 +718,22 @@ static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
{
+ u32 va_bits = GPU_MMU_FEATURES_VA_BITS(pfdev->features.mmu_features);
+ u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(pfdev->features.mmu_features);
struct panfrost_mmu *mmu;
+ enum io_pgtable_fmt fmt;
+ int ret;
+
+ if (pfdev->comp->gpu_quirks & BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE)) {
+ if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) {
+ dev_err_once(pfdev->dev,
+ "AARCH64_4K page table not supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+ fmt = ARM_64_LPAE_S1;
+ } else {
+ fmt = ARM_MALI_LPAE;
+ }
mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
if (!mmu)
@@ -633,23 +751,33 @@ struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = SZ_4K | SZ_2M,
- .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
- .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
+ .ias = va_bits,
+ .oas = pa_bits,
.coherent_walk = pfdev->coherent,
.tlb = &mmu_tlb_ops,
.iommu_dev = pfdev->dev,
};
- mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
- mmu);
+ mmu->pgtbl_ops = alloc_io_pgtable_ops(fmt, &mmu->pgtbl_cfg, mmu);
if (!mmu->pgtbl_ops) {
- kfree(mmu);
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto err_free_mmu;
}
+ ret = panfrost_mmu_cfg_init(mmu, fmt);
+ if (ret)
+ goto err_free_io_pgtable;
+
kref_init(&mmu->refcount);
return mmu;
+
+err_free_io_pgtable:
+ free_io_pgtable_ops(mmu->pgtbl_ops);
+
+err_free_mmu:
+ kfree(mmu);
+ return ERR_PTR(ret);
}
static const char *access_type_name(struct panfrost_device *pfdev,
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index ba9b6e2b2636..52befead08c6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -106,7 +106,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
goto err_close_bo;
}
- ret = drm_gem_vmap_unlocked(&bo->base, &map);
+ ret = drm_gem_vmap(&bo->base, &map);
if (ret)
goto err_put_mapping;
perfcnt->buf = map.vaddr;
@@ -165,7 +165,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
return 0;
err_vunmap:
- drm_gem_vunmap_unlocked(&bo->base, &map);
+ drm_gem_vunmap(&bo->base, &map);
err_put_mapping:
panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo:
@@ -195,7 +195,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL;
- drm_gem_vunmap_unlocked(&perfcnt->mapping->obj->base.base, &map);
+ drm_gem_vunmap(&perfcnt->mapping->obj->base.base, &map);
perfcnt->buf = NULL;
panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index c7bba476ab3f..2b8f1617b836 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -16,6 +16,8 @@
#define GROUPS_L2_COHERENT BIT(0) /* Cores groups are l2 coherent */
#define GPU_MMU_FEATURES 0x014 /* (RO) MMU features */
+#define GPU_MMU_FEATURES_VA_BITS(x) ((x) & GENMASK(7, 0))
+#define GPU_MMU_FEATURES_PA_BITS(x) (((x) >> 8) & GENMASK(7, 0))
#define GPU_AS_PRESENT 0x018 /* (RO) Address space slots present */
#define GPU_JS_PRESENT 0x01C /* (RO) Job slots present */
@@ -299,6 +301,17 @@
#define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x04) /* (RW) Translation Table Base Address for address space n, high word */
#define AS_MEMATTR_LO(as) (MMU_AS(as) + 0x08) /* (RW) Memory attributes for address space n, low word. */
#define AS_MEMATTR_HI(as) (MMU_AS(as) + 0x0C) /* (RW) Memory attributes for address space n, high word. */
+#define AS_MEMATTR_AARCH64_INNER_ALLOC_IMPL (2 << 2)
+#define AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(w, r) ((3 << 2) | \
+ ((w) ? BIT(0) : 0) | \
+ ((r) ? BIT(1) : 0))
+#define AS_MEMATTR_AARCH64_SH_MIDGARD_INNER (0 << 4)
+#define AS_MEMATTR_AARCH64_SH_CPU_INNER (1 << 4)
+#define AS_MEMATTR_AARCH64_SH_CPU_INNER_SHADER_COH (2 << 4)
+#define AS_MEMATTR_AARCH64_SHARED (0 << 6)
+#define AS_MEMATTR_AARCH64_INNER_OUTER_NC (1 << 6)
+#define AS_MEMATTR_AARCH64_INNER_OUTER_WB (2 << 6)
+#define AS_MEMATTR_AARCH64_FAULT (3 << 6)
#define AS_LOCKADDR_LO(as) (MMU_AS(as) + 0x10) /* (RW) Lock region address for address space n, low word */
#define AS_LOCKADDR_HI(as) (MMU_AS(as) + 0x14) /* (RW) Lock region address for address space n, high word */
#define AS_COMMAND(as) (MMU_AS(as) + 0x18) /* (WO) MMU command register for address space n */
@@ -309,6 +322,24 @@
/* Additional Bifrost AS registers */
#define AS_TRANSCFG_LO(as) (MMU_AS(as) + 0x30) /* (RW) Translation table configuration for address space n, low word */
#define AS_TRANSCFG_HI(as) (MMU_AS(as) + 0x34) /* (RW) Translation table configuration for address space n, high word */
+#define AS_TRANSCFG_ADRMODE_LEGACY (0 << 0)
+#define AS_TRANSCFG_ADRMODE_UNMAPPED (1 << 0)
+#define AS_TRANSCFG_ADRMODE_IDENTITY (2 << 0)
+#define AS_TRANSCFG_ADRMODE_AARCH64_4K (6 << 0)
+#define AS_TRANSCFG_ADRMODE_AARCH64_64K (8 << 0)
+#define AS_TRANSCFG_INA_BITS(x) ((x) << 6)
+#define AS_TRANSCFG_OUTA_BITS(x) ((x) << 14)
+#define AS_TRANSCFG_SL_CONCAT BIT(22)
+#define AS_TRANSCFG_PTW_MEMATTR_NC (1 << 24)
+#define AS_TRANSCFG_PTW_MEMATTR_WB (2 << 24)
+#define AS_TRANSCFG_PTW_SH_NS (0 << 28)
+#define AS_TRANSCFG_PTW_SH_OS (2 << 28)
+#define AS_TRANSCFG_PTW_SH_IS (3 << 28)
+#define AS_TRANSCFG_PTW_RA BIT(30)
+#define AS_TRANSCFG_DISABLE_HIER_AP BIT(33)
+#define AS_TRANSCFG_DISABLE_AF_FAULT BIT(34)
+#define AS_TRANSCFG_WXN BIT(35)
+#define AS_TRANSCFG_XREADABLE BIT(36)
#define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38) /* (RO) Secondary fault address for address space n, low word */
#define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C) /* (RO) Secondary fault address for address space n, high word */
@@ -324,6 +355,11 @@
#define AS_TRANSTAB_LPAE_READ_INNER BIT(2)
#define AS_TRANSTAB_LPAE_SHARE_OUTER BIT(4)
+/*
+ * Begin AARCH64_4K MMU TRANSTAB register values
+ */
+#define AS_TRANSTAB_AARCH64_4K_ADDR_MASK 0xfffffffffffffff0
+
#define AS_STATUS_AS_ACTIVE 0x01
#define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3 << 8)
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
index a9da1d1eeb70..f0b2da5b2b96 100644
--- a/drivers/gpu/drm/panthor/panthor_device.c
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -171,10 +171,6 @@ int panthor_device_init(struct panthor_device *ptdev)
struct page *p;
int ret;
- ret = panthor_gpu_coherency_init(ptdev);
- if (ret)
- return ret;
-
init_completion(&ptdev->unplug.done);
ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
if (ret)
@@ -184,6 +180,11 @@ int panthor_device_init(struct panthor_device *ptdev)
if (ret)
return ret;
+#ifdef CONFIG_DEBUG_FS
+ drmm_mutex_init(&ptdev->base, &ptdev->gems.lock);
+ INIT_LIST_HEAD(&ptdev->gems.node);
+#endif
+
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
p = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!p)
@@ -247,6 +248,10 @@ int panthor_device_init(struct panthor_device *ptdev)
if (ret)
goto err_rpm_put;
+ ret = panthor_gpu_coherency_init(ptdev);
+ if (ret)
+ goto err_unplug_gpu;
+
ret = panthor_mmu_init(ptdev);
if (ret)
goto err_unplug_gpu;
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
index da6574021664..465d3ab1b79e 100644
--- a/drivers/gpu/drm/panthor/panthor_device.h
+++ b/drivers/gpu/drm/panthor/panthor_device.h
@@ -205,6 +205,17 @@ struct panthor_device {
/** @fast_rate: Maximum device clock frequency. Set by DVFS */
unsigned long fast_rate;
+
+#ifdef CONFIG_DEBUG_FS
+ /** @gems: Device-wide list of GEM objects owned by at least one file. */
+ struct {
+ /** @gems.lock: Protects the device-wide list of GEM objects. */
+ struct mutex lock;
+
+ /** @node: Used to keep track of all the device's DRM objects */
+ struct list_head node;
+ } gems;
+#endif
};
struct panthor_gpu_usage {
@@ -383,8 +394,6 @@ static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *da
if (!status) \
break; \
\
- gpu_write(ptdev, __reg_prefix ## _INT_CLEAR, status); \
- \
__handler(ptdev, status); \
ret = IRQ_HANDLED; \
} \
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 06fe46e32073..6200cad22563 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -940,6 +940,7 @@ static int panthor_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct drm_panthor_bo_mmap_offset *args = data;
+ struct panthor_gem_object *bo;
struct drm_gem_object *obj;
int ret;
@@ -950,6 +951,12 @@ static int panthor_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
if (!obj)
return -ENOENT;
+ bo = to_panthor_bo(obj);
+ if (bo->flags & DRM_PANTHOR_BO_NO_MMAP) {
+ ret = -EPERM;
+ goto out;
+ }
+
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
@@ -1331,6 +1338,46 @@ static int panthor_ioctl_vm_get_state(struct drm_device *ddev, void *data,
return 0;
}
+static int panthor_ioctl_bo_set_label(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_panthor_bo_set_label *args = data;
+ struct drm_gem_object *obj;
+ const char *label = NULL;
+ int ret = 0;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (args->label) {
+ label = strndup_user((const char __user *)(uintptr_t)args->label,
+ PANTHOR_BO_LABEL_MAXLEN);
+ if (IS_ERR(label)) {
+ ret = PTR_ERR(label);
+ if (ret == -EINVAL)
+ ret = -E2BIG;
+ goto err_put_obj;
+ }
+ }
+
+ /*
+ * We treat passing a label of length 0 and passing a NULL label
+ * differently, because even though they might seem conceptually
+ * similar, future uses of the BO label might expect a different
+ * behaviour in each case.
+ */
+ panthor_gem_bo_set_label(obj, label);
+
+err_put_obj:
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
static int
panthor_open(struct drm_device *ddev, struct drm_file *file)
{
@@ -1400,6 +1447,7 @@ static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = {
PANTHOR_IOCTL(TILER_HEAP_CREATE, tiler_heap_create, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(TILER_HEAP_DESTROY, tiler_heap_destroy, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(GROUP_SUBMIT, group_submit, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(BO_SET_LABEL, bo_set_label, DRM_RENDER_ALLOW),
};
static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -1496,9 +1544,34 @@ static const struct file_operations panthor_drm_driver_fops = {
};
#ifdef CONFIG_DEBUG_FS
+static int panthor_gems_show(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
+
+ panthor_gem_debugfs_print_bos(ptdev, m);
+
+ return 0;
+}
+
+static struct drm_info_list panthor_debugfs_list[] = {
+ {"gems", panthor_gems_show, 0, NULL},
+};
+
+static int panthor_gems_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(panthor_debugfs_list,
+ ARRAY_SIZE(panthor_debugfs_list),
+ minor->debugfs_root, minor);
+
+ return 0;
+}
+
static void panthor_debugfs_init(struct drm_minor *minor)
{
panthor_mmu_debugfs_init(minor);
+ panthor_gems_debugfs_init(minor);
}
#endif
@@ -1509,6 +1582,7 @@ static void panthor_debugfs_init(struct drm_minor *minor)
* - 1.2 - adds DEV_QUERY_GROUP_PRIORITIES_INFO query
* - adds PANTHOR_GROUP_PRIORITY_REALTIME priority
* - 1.3 - adds DRM_PANTHOR_GROUP_STATE_INNOCENT flag
+ * - 1.4 - adds DRM_IOCTL_PANTHOR_BO_SET_LABEL ioctl
*/
static const struct drm_driver panthor_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
@@ -1522,7 +1596,7 @@ static const struct drm_driver panthor_drm_driver = {
.name = "panthor",
.desc = "Panthor DRM driver",
.major = 1,
- .minor = 3,
+ .minor = 4,
.gem_create_object = panthor_gem_create_object,
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index 0f52766a3120..7bc38e635329 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -449,7 +449,8 @@ panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Queue FW interface");
if (IS_ERR(mem))
return mem;
@@ -481,7 +482,8 @@ panthor_fw_alloc_suspend_buf_mem(struct panthor_device *ptdev, size_t size)
return panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev), size,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "FW suspend buffer");
}
static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
@@ -601,7 +603,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
section->mem = panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev),
section_size,
DRM_PANTHOR_BO_NO_MMAP,
- vm_map_flags, va);
+ vm_map_flags, va, "FW section");
if (IS_ERR(section->mem))
return PTR_ERR(section->mem);
@@ -1008,6 +1010,8 @@ static void panthor_fw_init_global_iface(struct panthor_device *ptdev)
static void panthor_job_irq_handler(struct panthor_device *ptdev, u32 status)
{
+ gpu_write(ptdev, JOB_INT_CLEAR, status);
+
if (!ptdev->fw->booted && (status & JOB_INT_GLOBAL_IF))
ptdev->fw->booted = true;
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index 8244a4e6c2a2..7c00fd77758b 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -2,6 +2,7 @@
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
/* Copyright 2023 Collabora ltd. */
+#include <linux/cleanup.h>
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
@@ -10,14 +11,64 @@
#include <drm/panthor_drm.h>
#include "panthor_device.h"
+#include "panthor_fw.h"
#include "panthor_gem.h"
#include "panthor_mmu.h"
+#ifdef CONFIG_DEBUG_FS
+static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev,
+ struct panthor_gem_object *bo)
+{
+ INIT_LIST_HEAD(&bo->debugfs.node);
+
+ bo->debugfs.creator.tgid = current->group_leader->pid;
+ get_task_comm(bo->debugfs.creator.process_name, current->group_leader);
+
+ mutex_lock(&ptdev->gems.lock);
+ list_add_tail(&bo->debugfs.node, &ptdev->gems.node);
+ mutex_unlock(&ptdev->gems.lock);
+}
+
+static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo)
+{
+ struct panthor_device *ptdev = container_of(bo->base.base.dev,
+ struct panthor_device, base);
+
+ if (list_empty(&bo->debugfs.node))
+ return;
+
+ mutex_lock(&ptdev->gems.lock);
+ list_del_init(&bo->debugfs.node);
+ mutex_unlock(&ptdev->gems.lock);
+}
+
+static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags)
+{
+ bo->debugfs.flags = usage_flags | PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED;
+}
+#else
+static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev,
+ struct panthor_gem_object *bo)
+{}
+static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) {}
+static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) {}
+#endif
+
static void panthor_gem_free_object(struct drm_gem_object *obj)
{
struct panthor_gem_object *bo = to_panthor_bo(obj);
struct drm_gem_object *vm_root_gem = bo->exclusive_vm_root_gem;
+ panthor_gem_debugfs_bo_rm(bo);
+
+ /*
+ * Label might have been allocated with kstrdup_const(),
+ * we need to take that into account when freeing the memory
+ */
+ kfree_const(bo->label.str);
+
+ mutex_destroy(&bo->label.lock);
+
drm_gem_free_mmap_offset(&bo->base.base);
mutex_destroy(&bo->gpuva_list_lock);
drm_gem_shmem_free(&bo->base);
@@ -67,17 +118,19 @@ out_free_bo:
* @gpu_va: GPU address assigned when mapping to the VM.
* If gpu_va == PANTHOR_VM_KERNEL_AUTO_VA, the virtual address will be
* automatically allocated.
+ * @name: Descriptive label of the BO's contents
*
* Return: A valid pointer in case of success, an ERR_PTR() otherwise.
*/
struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
size_t size, u32 bo_flags, u32 vm_map_flags,
- u64 gpu_va)
+ u64 gpu_va, const char *name)
{
struct drm_gem_shmem_object *obj;
struct panthor_kernel_bo *kbo;
struct panthor_gem_object *bo;
+ u32 debug_flags = PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL;
int ret;
if (drm_WARN_ON(&ptdev->base, !vm))
@@ -97,6 +150,12 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
kbo->obj = &obj->base;
bo->flags = bo_flags;
+ if (vm == panthor_fw_vm(ptdev))
+ debug_flags |= PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED;
+
+ panthor_gem_kernel_bo_set_label(kbo, name);
+ panthor_gem_debugfs_set_usage_flags(to_panthor_bo(kbo->obj), debug_flags);
+
/* The system and GPU MMU page size might differ, which becomes a
* problem for FW sections that need to be mapped at explicit address
* since our PAGE_SIZE alignment might cover a VA range that's
@@ -129,17 +188,6 @@ err_free_bo:
return ERR_PTR(ret);
}
-static int panthor_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- struct panthor_gem_object *bo = to_panthor_bo(obj);
-
- /* Don't allow mmap on objects that have the NO_MMAP flag set. */
- if (bo->flags & DRM_PANTHOR_BO_NO_MMAP)
- return -EINVAL;
-
- return drm_gem_shmem_object_mmap(obj, vma);
-}
-
static struct dma_buf *
panthor_gem_prime_export(struct drm_gem_object *obj, int flags)
{
@@ -155,7 +203,7 @@ static enum drm_gem_object_status panthor_gem_status(struct drm_gem_object *obj)
struct panthor_gem_object *bo = to_panthor_bo(obj);
enum drm_gem_object_status res = 0;
- if (bo->base.base.import_attach || bo->base.pages)
+ if (drm_gem_is_imported(&bo->base.base) || bo->base.pages)
res |= DRM_GEM_OBJECT_RESIDENT;
return res;
@@ -169,7 +217,7 @@ static const struct drm_gem_object_funcs panthor_gem_funcs = {
.get_sg_table = drm_gem_shmem_object_get_sg_table,
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
- .mmap = panthor_gem_mmap,
+ .mmap = drm_gem_shmem_object_mmap,
.status = panthor_gem_status,
.export = panthor_gem_prime_export,
.vm_ops = &drm_gem_shmem_vm_ops,
@@ -196,6 +244,9 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t
obj->base.map_wc = !ptdev->coherent;
mutex_init(&obj->gpuva_list_lock);
drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
+ mutex_init(&obj->label.lock);
+
+ panthor_gem_debugfs_bo_add(ptdev, obj);
return &obj->base.base;
}
@@ -245,5 +296,153 @@ panthor_gem_create_with_handle(struct drm_file *file,
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(&shmem->base);
+ /*
+ * No explicit flags are needed in the call below, since the
+ * function internally sets the INITIALIZED bit for us.
+ */
+ panthor_gem_debugfs_set_usage_flags(bo, 0);
+
return ret;
}
+
+void
+panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label)
+{
+ struct panthor_gem_object *bo = to_panthor_bo(obj);
+ const char *old_label;
+
+ scoped_guard(mutex, &bo->label.lock) {
+ old_label = bo->label.str;
+ bo->label.str = label;
+ }
+
+ kfree_const(old_label);
+}
+
+void
+panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label)
+{
+ const char *str;
+
+ /* We should never attempt labelling a UM-exposed GEM object */
+ if (drm_WARN_ON(bo->obj->dev, bo->obj->handle_count > 0))
+ return;
+
+ if (!label)
+ return;
+
+ str = kstrdup_const(label, GFP_KERNEL);
+ if (!str) {
+ /* Failing to allocate memory for a label isn't a fatal condition */
+ drm_warn(bo->obj->dev, "Not enough memory to allocate BO label");
+ return;
+ }
+
+ panthor_gem_bo_set_label(bo->obj, str);
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct gem_size_totals {
+ size_t size;
+ size_t resident;
+ size_t reclaimable;
+};
+
+static void panthor_gem_debugfs_print_flag_names(struct seq_file *m)
+{
+ int len;
+ int i;
+
+ static const char * const gem_state_flags_names[] = {
+ [PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT] = "imported",
+ [PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT] = "exported",
+ };
+
+ static const char * const gem_usage_flags_names[] = {
+ [PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT] = "kernel",
+ [PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT] = "fw-mapped",
+ };
+
+ seq_puts(m, "GEM state flags: ");
+ for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) {
+ if (!gem_state_flags_names[i])
+ continue;
+ seq_printf(m, "%s (0x%x)%s", gem_state_flags_names[i],
+ (u32)BIT(i), (i < len - 1) ? ", " : "\n");
+ }
+
+ seq_puts(m, "GEM usage flags: ");
+ for (i = 0, len = ARRAY_SIZE(gem_usage_flags_names); i < len; i++) {
+ if (!gem_usage_flags_names[i])
+ continue;
+ seq_printf(m, "%s (0x%x)%s", gem_usage_flags_names[i],
+ (u32)BIT(i), (i < len - 1) ? ", " : "\n\n");
+ }
+}
+
+static void panthor_gem_debugfs_bo_print(struct panthor_gem_object *bo,
+ struct seq_file *m,
+ struct gem_size_totals *totals)
+{
+ unsigned int refcount = kref_read(&bo->base.base.refcount);
+ char creator_info[32] = {};
+ size_t resident_size;
+ u32 gem_usage_flags = bo->debugfs.flags & (u32)~PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED;
+ u32 gem_state_flags = 0;
+
+ /* Skip BOs being destroyed. */
+ if (!refcount)
+ return;
+
+ resident_size = bo->base.pages ? bo->base.base.size : 0;
+
+ snprintf(creator_info, sizeof(creator_info),
+ "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
+ seq_printf(m, "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx",
+ creator_info,
+ bo->base.base.name,
+ refcount,
+ bo->base.base.size,
+ resident_size,
+ drm_vma_node_start(&bo->base.base.vma_node));
+
+ if (bo->base.base.import_attach)
+ gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED;
+ if (bo->base.base.dma_buf)
+ gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED;
+
+ seq_printf(m, "0x%-8x 0x%-10x", gem_state_flags, gem_usage_flags);
+
+ scoped_guard(mutex, &bo->label.lock) {
+ seq_printf(m, "%s\n", bo->label.str ? : "");
+ }
+
+ totals->size += bo->base.base.size;
+ totals->resident += resident_size;
+ if (bo->base.madv > 0)
+ totals->reclaimable += resident_size;
+}
+
+void panthor_gem_debugfs_print_bos(struct panthor_device *ptdev,
+ struct seq_file *m)
+{
+ struct gem_size_totals totals = {0};
+ struct panthor_gem_object *bo;
+
+ panthor_gem_debugfs_print_flag_names(m);
+
+ seq_puts(m, "created-by global-name refcount size resident-size file-offset state usage label\n");
+ seq_puts(m, "----------------------------------------------------------------------------------------------------------------------------------------------\n");
+
+ scoped_guard(mutex, &ptdev->gems.lock) {
+ list_for_each_entry(bo, &ptdev->gems.node, debugfs.node) {
+ if (bo->debugfs.flags & PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED)
+ panthor_gem_debugfs_bo_print(bo, m, &totals);
+ }
+ }
+
+ seq_puts(m, "==============================================================================================================================================\n");
+ seq_printf(m, "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n",
+ totals.size, totals.resident, totals.reclaimable);
+}
+#endif
diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
index 5749ef2ebe03..4dd732dcd59f 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.h
+++ b/drivers/gpu/drm/panthor/panthor_gem.h
@@ -13,6 +13,56 @@
struct panthor_vm;
+#define PANTHOR_BO_LABEL_MAXLEN 4096
+
+enum panthor_debugfs_gem_state_flags {
+ PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT = 0,
+ PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT = 1,
+
+ /** @PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED: GEM BO is PRIME imported. */
+ PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED = BIT(PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT),
+
+ /** @PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED: GEM BO is PRIME exported. */
+ PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED = BIT(PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT),
+};
+
+enum panthor_debugfs_gem_usage_flags {
+ PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT = 0,
+ PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT = 1,
+
+ /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL: BO is for kernel use only. */
+ PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL = BIT(PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT),
+
+ /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED: BO is mapped on the FW VM. */
+ PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED = BIT(PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT),
+
+ /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED: BO is ready for DebugFS display. */
+ PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED = BIT(31),
+};
+
+/**
+ * struct panthor_gem_debugfs - GEM object's DebugFS list information
+ */
+struct panthor_gem_debugfs {
+ /**
+ * @node: Node used to insert the object in the device-wide list of
+ * GEM objects, to display information about it through a DebugFS file.
+ */
+ struct list_head node;
+
+ /** @creator: Information about the UM process which created the GEM. */
+ struct {
+ /** @creator.process_name: Group leader name in owning thread's process */
+ char process_name[TASK_COMM_LEN];
+
+ /** @creator.tgid: PID of the thread's group leader within its process */
+ pid_t tgid;
+ } creator;
+
+ /** @flags: Combination of panthor_debugfs_gem_usage_flags flags */
+ u32 flags;
+};
+
/**
* struct panthor_gem_object - Driver specific GEM object.
*/
@@ -46,6 +96,24 @@ struct panthor_gem_object {
/** @flags: Combination of drm_panthor_bo_flags flags. */
u32 flags;
+
+ /**
+ * @label: BO tagging fields. The label can be assigned within the
+ * driver itself or through a specific IOCTL.
+ */
+ struct {
+ /**
+ * @label.str: Pointer to NULL-terminated string,
+ */
+ const char *str;
+
+ /** @lock.str: Protects access to the @label.str field. */
+ struct mutex lock;
+ } label;
+
+#ifdef CONFIG_DEBUG_FS
+ struct panthor_gem_debugfs debugfs;
+#endif
};
/**
@@ -91,6 +159,9 @@ panthor_gem_create_with_handle(struct drm_file *file,
struct panthor_vm *exclusive_vm,
u64 *size, u32 flags, uint32_t *handle);
+void panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label);
+void panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label);
+
static inline u64
panthor_kernel_bo_gpuva(struct panthor_kernel_bo *bo)
{
@@ -112,7 +183,7 @@ panthor_kernel_bo_vmap(struct panthor_kernel_bo *bo)
if (bo->kmap)
return 0;
- ret = drm_gem_vmap_unlocked(bo->obj, &map);
+ ret = drm_gem_vmap(bo->obj, &map);
if (ret)
return ret;
@@ -126,7 +197,7 @@ panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
if (bo->kmap) {
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->kmap);
- drm_gem_vunmap_unlocked(bo->obj, &map);
+ drm_gem_vunmap(bo->obj, &map);
bo->kmap = NULL;
}
}
@@ -134,8 +205,13 @@ panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
size_t size, u32 bo_flags, u32 vm_map_flags,
- u64 gpu_va);
+ u64 gpu_va, const char *name);
void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo);
+#ifdef CONFIG_DEBUG_FS
+void panthor_gem_debugfs_print_bos(struct panthor_device *pfdev,
+ struct seq_file *m);
+#endif
+
#endif /* __PANTHOR_GEM_H__ */
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
index 671049020afa..32d678a0114e 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.c
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -150,6 +150,8 @@ static void panthor_gpu_init_info(struct panthor_device *ptdev)
static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
{
+ gpu_write(ptdev, GPU_INT_CLEAR, status);
+
if (status & GPU_IRQ_FAULT) {
u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS);
u64 address = ((u64)gpu_read(ptdev, GPU_FAULT_ADDR_HI) << 32) |
diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c
index 3bdf61c14264..d236e9ceade4 100644
--- a/drivers/gpu/drm/panthor/panthor_heap.c
+++ b/drivers/gpu/drm/panthor/panthor_heap.c
@@ -151,7 +151,8 @@ static int panthor_alloc_heap_chunk(struct panthor_heap_pool *pool,
chunk->bo = panthor_kernel_bo_create(pool->ptdev, pool->vm, heap->chunk_size,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Tiler heap chunk");
if (IS_ERR(chunk->bo)) {
ret = PTR_ERR(chunk->bo);
goto err_free_chunk;
@@ -555,7 +556,8 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm)
pool->gpu_contexts = panthor_kernel_bo_create(ptdev, vm, bosize,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Heap pool");
if (IS_ERR(pool->gpu_contexts)) {
ret = PTR_ERR(pool->gpu_contexts);
goto err_destroy_pool;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 12a02e28f50f..6ca9a2642a4e 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -781,6 +781,7 @@ out_enable_as:
if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) {
gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as));
ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as);
+ ptdev->mmu->irq.mask |= panthor_mmu_as_fault_mask(ptdev, as);
gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask);
}
@@ -1103,7 +1104,7 @@ static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
/* If the vm_bo object was destroyed, release the pin reference that
* was hold by this object.
*/
- if (unpin && !bo->base.base.import_attach)
+ if (unpin && !drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
drm_gpuvm_put(vm);
@@ -1234,7 +1235,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
if (ret)
goto err_cleanup;
- if (!bo->base.base.import_attach) {
+ if (!drm_gem_is_imported(&bo->base.base)) {
/* Pre-reserve the BO pages, so the map operation doesn't have to
* allocate.
*/
@@ -1245,7 +1246,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
if (IS_ERR(sgt)) {
- if (!bo->base.base.import_attach)
+ if (!drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
ret = PTR_ERR(sgt);
@@ -1256,7 +1257,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base);
if (!preallocated_vm_bo) {
- if (!bo->base.base.import_attach)
+ if (!drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
ret = -ENOMEM;
@@ -1282,7 +1283,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
* which will be released in panthor_vm_bo_put().
*/
if (preallocated_vm_bo != op_ctx->map.vm_bo &&
- !bo->base.base.import_attach)
+ !drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
op_ctx->map.bo_offset = offset;
@@ -1709,11 +1710,17 @@ static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status)
access_type, access_type_name(ptdev, fault_status),
source_id);
+ /* We don't handle VM faults at the moment, so let's just clear the
+ * interrupt and let the writer/reader crash.
+ * Note that COMPLETED irqs are never cleared, but this is fine
+ * because they are always masked.
+ */
+ gpu_write(ptdev, MMU_INT_CLEAR, mask);
+
/* Ignore MMU interrupts on this AS until it's been
* re-enabled.
*/
ptdev->mmu->irq.mask = new_int_mask;
- gpu_write(ptdev, MMU_INT_MASK, new_int_mask);
if (ptdev->mmu->as.slots[as].vm)
ptdev->mmu->as.slots[as].vm->unhandled_fault = true;
diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h
index b7b3b3add166..a7a323dc5cf9 100644
--- a/drivers/gpu/drm/panthor/panthor_regs.h
+++ b/drivers/gpu/drm/panthor/panthor_regs.h
@@ -133,8 +133,8 @@
#define GPU_COHERENCY_PROT_BIT(name) BIT(GPU_COHERENCY_ ## name)
#define GPU_COHERENCY_PROTOCOL 0x304
-#define GPU_COHERENCY_ACE 0
-#define GPU_COHERENCY_ACE_LITE 1
+#define GPU_COHERENCY_ACE_LITE 0
+#define GPU_COHERENCY_ACE 1
#define GPU_COHERENCY_NONE 31
#define MCU_CONTROL 0x700
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 4d31d1967716..43ee57728de5 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -840,7 +840,7 @@ panthor_queue_put_syncwait_obj(struct panthor_queue *queue)
if (queue->syncwait.kmap) {
struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap);
- drm_gem_vunmap_unlocked(queue->syncwait.obj, &map);
+ drm_gem_vunmap(queue->syncwait.obj, &map);
queue->syncwait.kmap = NULL;
}
@@ -866,7 +866,7 @@ panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue
goto err_put_syncwait_obj;
queue->syncwait.obj = &bo->base.base;
- ret = drm_gem_vmap_unlocked(queue->syncwait.obj, &map);
+ ret = drm_gem_vmap(queue->syncwait.obj, &map);
if (drm_WARN_ON(&ptdev->base, ret))
goto err_put_syncwait_obj;
@@ -3332,7 +3332,8 @@ group_create_queue(struct panthor_group *group,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "CS ring buffer");
if (IS_ERR(queue->ringbuf)) {
ret = PTR_ERR(queue->ringbuf);
goto err_free_queue;
@@ -3362,7 +3363,8 @@ group_create_queue(struct panthor_group *group,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Group job stats");
if (IS_ERR(queue->profiling.slots)) {
ret = PTR_ERR(queue->profiling.slots);
@@ -3493,7 +3495,8 @@ int panthor_group_create(struct panthor_file *pfile,
DRM_PANTHOR_BO_NO_MMAP,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
- PANTHOR_VM_KERNEL_AUTO_VA);
+ PANTHOR_VM_KERNEL_AUTO_VA,
+ "Group sync objects");
if (IS_ERR(group->syncobjs)) {
ret = PTR_ERR(group->syncobjs);
goto err_put_group;
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 1e4b28d03f4d..5f460b296c0c 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -501,7 +501,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
* if we find it, it will take precedence. This is on the Integrator/AP
* which only has this option for PL110 graphics.
*/
- if (versatile_clcd_type == INTEGRATOR_CLCD_CM) {
+ if (versatile_clcd_type == INTEGRATOR_CLCD_CM) {
np = of_find_matching_node_and_match(NULL, impd1_clcd_of_match,
&clcd_id);
if (np)
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 69427eb8bed2..d8f24bcae34b 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_QXL
tristate "QXL virtual GPU"
- depends on DRM && PCI && MMU && HAS_IOPORT
+ depends on DRM && PCI && HAS_IOPORT
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index f51bace9555d..c479f0c0dd5c 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -2,7 +2,7 @@
config DRM_RADEON
tristate "ATI Radeon"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on AGP || !AGP
select FW_LOADER
select DRM_CLIENT_SELECTION
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 2db40789235c..1afa70566985 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -3825,8 +3825,7 @@ typedef struct _ATOM_DPCD_INFO
// note2: From RV770, the memory is more than 32bit addressable, so we will change
// ucTableFormatRevision=1,ucTableContentRevision=4, the structure remains
// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
-// (in offset to start of memory address) is KB aligned instead of byte aligend.
-/***********************************************************************************/
+// (in offset to start of memory address) is KB aligned instead of byte aligned.
// Note3:
/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter,
for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can have:
@@ -5072,7 +5071,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02
#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08
-#define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS 0x10
+#define SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS 0x10
/**********************************************************************************************************************
ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index fa78824931cc..3f3c360dce4b 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -501,8 +501,8 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
u8 link_status[DP_LINK_STATUS_SIZE];
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
- if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, link_status)
- <= 0)
+ if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux,
+ link_status) < 0)
return false;
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
@@ -678,7 +678,7 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
@@ -741,7 +741,7 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 11a492f21157..51a3e0fc2f56 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -8548,7 +8548,7 @@ int cik_suspend(struct radeon_device *rdev)
*/
int cik_init(struct radeon_device *rdev)
{
- struct radeon_ring *ring;
+ struct radeon_ring *ring, *ring_cp1, *ring_cp2;
int r;
/* Read BIOS */
@@ -8623,19 +8623,22 @@ int cik_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
- ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 1024 * 1024);
- r = radeon_doorbell_get(rdev, &ring->doorbell_index);
+ ring_cp1 = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ ring_cp2 = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ ring_cp1->ring_obj = NULL;
+ ring_cp2->ring_obj = NULL;
+ ring_cp1->doorbell_index = RADEON_MAX_DOORBELLS;
+ ring_cp2->doorbell_index = RADEON_MAX_DOORBELLS;
+
+ r600_ring_init(rdev, ring_cp1, 1024 * 1024);
+ r = radeon_doorbell_get(rdev, &ring_cp1->doorbell_index);
if (r)
return r;
- ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 1024 * 1024);
- r = radeon_doorbell_get(rdev, &ring->doorbell_index);
+ r600_ring_init(rdev, ring_cp2, 1024 * 1024);
+ r = radeon_doorbell_get(rdev, &ring_cp2->doorbell_index);
if (r)
- return r;
+ goto out;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
ring->ring_obj = NULL;
@@ -8653,12 +8656,16 @@ int cik_init(struct radeon_device *rdev)
r = r600_pcie_gart_init(rdev);
if (r)
- return r;
+ goto out;
rdev->accel_working = true;
r = cik_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
+ radeon_doorbell_free(rdev, ring_cp1->doorbell_index);
+ radeon_doorbell_free(rdev, ring_cp2->doorbell_index);
+ ring_cp1->doorbell_index = RADEON_MAX_DOORBELLS;
+ ring_cp2->doorbell_index = RADEON_MAX_DOORBELLS;
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
cik_irq_fini(rdev);
@@ -8678,10 +8685,16 @@ int cik_init(struct radeon_device *rdev)
*/
if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
DRM_ERROR("radeon: MC ucode required for NI+.\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
return 0;
+
+out:
+ radeon_doorbell_free(rdev, ring_cp1->doorbell_index);
+ radeon_doorbell_free(rdev, ring_cp2->doorbell_index);
+ return r;
}
/**
@@ -8695,6 +8708,7 @@ int cik_init(struct radeon_device *rdev)
*/
void cik_fini(struct radeon_device *rdev)
{
+ struct radeon_ring *ring;
radeon_pm_fini(rdev);
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
@@ -8708,6 +8722,10 @@ void cik_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
uvd_v1_0_fini(rdev);
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ radeon_doorbell_free(rdev, ring->doorbell_index);
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ radeon_doorbell_free(rdev, ring->doorbell_index);
radeon_uvd_fini(rdev);
radeon_vce_fini(rdev);
cik_pcie_gart_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 55dbf450bd9c..4aa050385284 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2329,7 +2329,7 @@ static int kv_parse_sys_info_table(struct radeon_device *rdev)
le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
}
if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
- SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
+ SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS)
pi->caps_enable_dfs_bypass = true;
sumo_construct_sclk_voltage_mapping_table(rdev,
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 661f374f5f27..9758f3a9df75 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -290,28 +290,6 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
return result;
}
-/*
- * write the audio workaround status to the hardware
- */
-void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- uint32_t offset = dig->afmt->offset;
- bool hdmi_audio_workaround = false; /* FIXME */
- u32 value;
-
- if (!hdmi_audio_workaround ||
- r600_hdmi_is_audio_buffer_filled(encoder))
- value = 0; /* disable workaround */
- else
- value = HDMI0_AUDIO_TEST_EN; /* enable workaround */
- WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
- value, ~HDMI0_AUDIO_TEST_EN);
-}
-
void r600_hdmi_audio_set_dto(struct radeon_device *rdev,
struct radeon_crtc *crtc, unsigned int clock)
{
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8605c074d9f7..63c47585afbc 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -394,9 +394,6 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool interruptible, l
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_any(struct radeon_device *rdev,
- struct radeon_fence **fences,
- bool intr);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 8f5e07834fcc..9e697f10f9ca 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -401,7 +401,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock);
void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
size_t size);
void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock);
-void r600_hdmi_audio_workaround(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
u32 r600_get_xclk(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 64b26bfeafc9..b8e6202f1d5b 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -409,7 +409,6 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
* radeon_cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
* @error: error number
- * @backoff: indicator to backoff the reservation
*
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 8ff4f18b51a9..5b5b54e876d4 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -575,48 +575,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
}
/**
- * radeon_fence_wait_any - wait for a fence to signal on any ring
- *
- * @rdev: radeon device pointer
- * @fences: radeon fence object(s)
- * @intr: use interruptable sleep
- *
- * Wait for any requested fence to signal (all asics). Fence
- * array is indexed by ring id. @intr selects whether to use
- * interruptable (true) or non-interruptable (false) sleep when
- * waiting for the fences. Used by the suballocator.
- * Returns 0 if any fence has passed, error for all other cases.
- */
-int radeon_fence_wait_any(struct radeon_device *rdev,
- struct radeon_fence **fences,
- bool intr)
-{
- uint64_t seq[RADEON_NUM_RINGS];
- unsigned int i, num_rings = 0;
- long r;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- seq[i] = 0;
-
- if (!fences[i])
- continue;
-
- seq[i] = fences[i]->seq;
- ++num_rings;
- }
-
- /* nothing to wait for ? */
- if (num_rings == 0)
- return -ENOENT;
-
- r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
- if (r < 0)
- return r;
-
- return 0;
-}
-
-/**
* radeon_fence_wait_next - wait for the next fence to signal
*
* @rdev: radeon device pointer
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 65a911ddd509..f9267b026f8d 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1109,7 +1109,7 @@
#define MIN_POWER_SHIFT 0
#define MAX_POWER(x) ((x) << 16)
#define MAX_POWER_MASK (0x3fff << 16)
-#define MAX_POWER_SHIFT 0
+#define MAX_POWER_SHIFT 16
#define SQ_POWER_THROTTLE2 0x8e5c
#define MAX_POWER_DELTA(x) ((x) << 0)
#define MAX_POWER_DELTA_MASK (0x3fff << 0)
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c b/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
index 79b67c406bd6..93ba115d654f 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
@@ -32,11 +32,6 @@ struct rcar_cmm {
} lut;
};
-static inline int rcar_cmm_read(struct rcar_cmm *rcmm, u32 reg)
-{
- return ioread32(rcmm->base + reg);
-}
-
static inline void rcar_cmm_write(struct rcar_cmm *rcmm, u32 reg, u32 data)
{
iowrite32(data, rcmm->base + reg);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
index 70d8ad065bfa..4c8fe83dd610 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
@@ -705,7 +705,7 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
ret = of_parse_phandle_with_fixed_args(np, vsps_prop_name,
cells, i, &args);
if (ret < 0)
- goto error;
+ goto done;
/*
* Add the VSP to the list or update the corresponding existing
@@ -743,13 +743,11 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
vsp->dev = rcdu;
ret = rcar_du_vsp_init(vsp, vsps[i].np, vsps[i].crtcs_mask);
- if (ret < 0)
- goto error;
+ if (ret)
+ goto done;
}
- return 0;
-
-error:
+done:
for (i = 0; i < ARRAY_SIZE(vsps); ++i)
of_node_put(vsps[i].np);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
index 380a855b832a..a9145253294f 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
@@ -634,6 +634,7 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
}
static int rcar_lvds_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -641,7 +642,7 @@ static int rcar_lvds_attach(struct drm_bridge *bridge,
if (!lvds->next_bridge)
return 0;
- return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge,
+ return drm_bridge_attach(encoder, lvds->next_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index d1e626068065..7ab8be46c7f6 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -799,11 +799,12 @@ static void rcar_mipi_dsi_stop_video(struct rcar_mipi_dsi *dsi)
*/
static int rcar_mipi_dsi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->next_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig
index 7c1817240846..e57536fd6f4d 100644
--- a/drivers/gpu/drm/renesas/rz-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rz-du/Kconfig
@@ -14,10 +14,15 @@ config DRM_RZG2L_DU
Choose this option if you have an RZ/G2L alike chipset.
If M is selected the module will be called rzg2l-du-drm.
-config DRM_RZG2L_MIPI_DSI
- tristate "RZ/G2L MIPI DSI Encoder Support"
- depends on DRM && DRM_BRIDGE && OF
- depends on ARCH_RENESAS || COMPILE_TEST
- select DRM_MIPI_DSI
+config DRM_RZG2L_USE_MIPI_DSI
+ bool "RZ/G2L MIPI DSI Encoder Support"
+ depends on DRM_BRIDGE && OF
+ depends on DRM_RZG2L_DU || COMPILE_TEST
+ default DRM_RZG2L_DU
help
Enable support for the RZ/G2L Display Unit embedded MIPI DSI encoders.
+
+config DRM_RZG2L_MIPI_DSI
+ def_tristate DRM_RZG2L_DU
+ depends on DRM_RZG2L_USE_MIPI_DSI
+ select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
index cbd9b9841267..5e40f0c1e7b0 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
@@ -79,7 +79,7 @@ DEFINE_DRM_GEM_DMA_FOPS(rzg2l_du_fops);
static const struct drm_driver rzg2l_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- .dumb_create = rzg2l_du_dumb_create,
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(rzg2l_du_dumb_create),
DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &rzg2l_du_fops,
.name = "rzg2l-du",
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
index 90c6269ccd29..55a97691e9b2 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
@@ -36,23 +36,129 @@
static const struct rzg2l_du_format_info rzg2l_du_format_infos[] = {
{
- .fourcc = DRM_FORMAT_XRGB8888,
- .v4l2 = V4L2_PIX_FMT_XBGR32,
- .bpp = 32,
+ .fourcc = DRM_FORMAT_RGB332,
+ .v4l2 = V4L2_PIX_FMT_RGB332,
.planes = 1,
.hsub = 1,
}, {
- .fourcc = DRM_FORMAT_ARGB8888,
- .v4l2 = V4L2_PIX_FMT_ABGR32,
- .bpp = 32,
+ .fourcc = DRM_FORMAT_ARGB4444,
+ .v4l2 = V4L2_PIX_FMT_ARGB444,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XRGB4444,
+ .v4l2 = V4L2_PIX_FMT_XRGB444,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ARGB1555,
+ .v4l2 = V4L2_PIX_FMT_ARGB555,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XRGB1555,
+ .v4l2 = V4L2_PIX_FMT_XRGB555,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_RGB565,
+ .v4l2 = V4L2_PIX_FMT_RGB565,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGR888,
+ .v4l2 = V4L2_PIX_FMT_RGB24,
.planes = 1,
.hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGB888,
.v4l2 = V4L2_PIX_FMT_BGR24,
- .bpp = 24,
.planes = 1,
.hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGRA8888,
+ .v4l2 = V4L2_PIX_FMT_ARGB32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGRX8888,
+ .v4l2 = V4L2_PIX_FMT_XRGB32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ARGB8888,
+ .v4l2 = V4L2_PIX_FMT_ABGR32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XRGB8888,
+ .v4l2 = V4L2_PIX_FMT_XBGR32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_UYVY,
+ .v4l2 = V4L2_PIX_FMT_UYVY,
+ .planes = 1,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUYV,
+ .v4l2 = V4L2_PIX_FMT_YUYV,
+ .planes = 1,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YVYU,
+ .v4l2 = V4L2_PIX_FMT_YVYU,
+ .planes = 1,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV12,
+ .v4l2 = V4L2_PIX_FMT_NV12M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV21,
+ .v4l2 = V4L2_PIX_FMT_NV21M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV16,
+ .v4l2 = V4L2_PIX_FMT_NV16M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_NV61,
+ .v4l2 = V4L2_PIX_FMT_NV61M,
+ .planes = 2,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUV420,
+ .v4l2 = V4L2_PIX_FMT_YUV420M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YVU420,
+ .v4l2 = V4L2_PIX_FMT_YVU420M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUV422,
+ .v4l2 = V4L2_PIX_FMT_YUV422M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YVU422,
+ .v4l2 = V4L2_PIX_FMT_YVU422M,
+ .planes = 3,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_YUV444,
+ .v4l2 = V4L2_PIX_FMT_YUV444M,
+ .planes = 3,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_YVU444,
+ .v4l2 = V4L2_PIX_FMT_YVU444M,
+ .planes = 3,
+ .hsub = 1,
}
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
index 876e97cfbf45..e2c599f115c6 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.h
@@ -23,7 +23,6 @@ struct sg_table;
struct rzg2l_du_format_info {
u32 fourcc;
u32 v4l2;
- unsigned int bpp;
unsigned int planes;
unsigned int hsub;
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
index 8643ff2eec46..040d4e4aff00 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_vsp.c
@@ -340,6 +340,15 @@ int rzg2l_du_vsp_init(struct rzg2l_du_vsp *vsp, struct device_node *np,
drm_plane_helper_add(&plane->plane,
&rzg2l_du_vsp_plane_helper_funcs);
+
+ drm_plane_create_alpha_property(&plane->plane);
+ drm_plane_create_zpos_property(&plane->plane, i, 0,
+ num_planes - 1);
+
+ drm_plane_create_blend_mode_property(&plane->plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
}
return 0;
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
index 4550c6d84796..dc6ab012cdb6 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
@@ -479,7 +479,7 @@ static int rzg2l_mipi_dsi_start_video(struct rzg2l_mipi_dsi *dsi)
u32 status;
int ret;
- /* Configuration for Blanking sequence and start video input*/
+ /* Configuration for Blanking sequence and start video input */
vich1set0r = VICH1SET0R_HFPNOLP | VICH1SET0R_HBPNOLP |
VICH1SET0R_HSANOLP | VICH1SET0R_VSTART;
rzg2l_mipi_dsi_link_write(dsi, VICH1SET0R, vich1set0r);
@@ -523,11 +523,12 @@ err:
*/
static int rzg2l_mipi_dsi_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge,
+ return drm_bridge_attach(encoder, dsi->next_bridge, bridge,
flags);
}
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 26c4410b2407..ab525668939a 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -2,12 +2,14 @@
config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
+ depends on OF
select DRM_CLIENT_SELECTION
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select VIDEOMODE_HELPERS
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
+ select DRM_DISPLAY_DP_AUX_BUS if ROCKCHIP_ANALOGIX_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_DW_HDMI_QP if ROCKCHIP_DW_HDMI_QP
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index a8265a1bf9ff..d30f0983a53a 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -21,6 +21,7 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/display/drm_dp_aux_bus.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -51,11 +52,13 @@ struct rockchip_grf_reg_field {
/**
* struct rockchip_dp_chip_data - splite the grf setting of kind of chips
* @lcdc_sel: grf register field of lcdc_sel
+ * @edp_mode: grf register field of edp_mode
* @chip_type: specific chip type
* @reg: register base address
*/
struct rockchip_dp_chip_data {
const struct rockchip_grf_reg_field lcdc_sel;
+ const struct rockchip_grf_reg_field edp_mode;
u32 chip_type;
u32 reg;
};
@@ -70,6 +73,7 @@ struct rockchip_dp_device {
struct clk *grfclk;
struct regmap *grf;
struct reset_control *rst;
+ struct reset_control *apbrst;
const struct rockchip_dp_chip_data *data;
@@ -115,6 +119,10 @@ static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
usleep_range(10, 20);
reset_control_deassert(dp->rst);
+ reset_control_assert(dp->apbrst);
+ usleep_range(10, 20);
+ reset_control_deassert(dp->apbrst);
+
return 0;
}
@@ -136,12 +144,21 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
return ret;
}
+ ret = rockchip_grf_field_write(dp->grf, &dp->data->edp_mode, 1);
+ if (ret != 0)
+ DRM_DEV_ERROR(dp->dev, "failed to set edp mode %d\n", ret);
+
return ret;
}
static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
{
struct rockchip_dp_device *dp = pdata_encoder_to_dp(plat_data);
+ int ret;
+
+ ret = rockchip_grf_field_write(dp->grf, &dp->data->edp_mode, 0);
+ if (ret != 0)
+ DRM_DEV_ERROR(dp->dev, "failed to set edp mode %d\n", ret);
clk_disable_unprepare(dp->pclk);
@@ -205,6 +222,10 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
struct rockchip_dp_device *dp = encoder_to_dp(encoder);
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
+ struct of_endpoint endpoint;
+ struct device_node *remote_port, *remote_port_parent;
+ char name[32];
+ u32 port_id;
int ret;
crtc = rockchip_dp_drm_get_new_crtc(encoder, state);
@@ -222,13 +243,27 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
return;
}
- ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
+ ret = drm_of_encoder_active_endpoint(dp->dev->of_node, encoder, &endpoint);
if (ret < 0)
return;
- DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
+ remote_port_parent = of_graph_get_remote_port_parent(endpoint.local_node);
+ if (remote_port_parent) {
+ if (of_get_child_by_name(remote_port_parent, "ports")) {
+ remote_port = of_graph_get_remote_port(endpoint.local_node);
+ of_property_read_u32(remote_port, "reg", &port_id);
+ of_node_put(remote_port);
+ sprintf(name, "%s vp%d", remote_port_parent->full_name, port_id);
+ } else {
+ sprintf(name, "%s %s",
+ remote_port_parent->full_name, endpoint.id ? "vopl" : "vopb");
+ }
+ of_node_put(remote_port_parent);
+
+ DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
+ }
- ret = rockchip_grf_field_write(dp->grf, &dp->data->lcdc_sel, ret);
+ ret = rockchip_grf_field_write(dp->grf, &dp->data->lcdc_sel, endpoint.id);
if (ret != 0)
DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
@@ -322,6 +357,12 @@ static int rockchip_dp_of_probe(struct rockchip_dp_device *dp)
return PTR_ERR(dp->rst);
}
+ dp->apbrst = devm_reset_control_get_optional(dev, "apb");
+ if (IS_ERR(dp->apbrst)) {
+ DRM_DEV_ERROR(dev, "failed to get apb reset control\n");
+ return PTR_ERR(dp->apbrst);
+ }
+
return 0;
}
@@ -392,11 +433,28 @@ static const struct component_ops rockchip_dp_component_ops = {
.unbind = rockchip_dp_unbind,
};
+static int rockchip_dp_link_panel(struct drm_dp_aux *aux)
+{
+ struct analogix_dp_plat_data *plat_data = analogix_dp_aux_to_plat_data(aux);
+ struct rockchip_dp_device *dp = pdata_encoder_to_dp(plat_data);
+ int ret;
+
+ /*
+ * If drm_of_find_panel_or_bridge() returns -ENODEV, there may be no valid panel
+ * or bridge nodes. The driver should go on for the driver-free bridge or the DP
+ * mode applications.
+ */
+ ret = drm_of_find_panel_or_bridge(dp->dev->of_node, 1, 0, &plat_data->panel, NULL);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ return component_add(dp->dev, &rockchip_dp_component_ops);
+}
+
static int rockchip_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct rockchip_dp_chip_data *dp_data;
- struct drm_panel *panel = NULL;
struct rockchip_dp_device *dp;
struct resource *res;
int i;
@@ -406,10 +464,6 @@ static int rockchip_dp_probe(struct platform_device *pdev)
if (!dp_data)
return -ENODEV;
- ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
- if (ret < 0 && ret != -ENODEV)
- return ret;
-
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
@@ -432,7 +486,6 @@ static int rockchip_dp_probe(struct platform_device *pdev)
dp->dev = dev;
dp->adp = ERR_PTR(-ENODEV);
- dp->plat_data.panel = panel;
dp->plat_data.dev_type = dp->data->chip_type;
dp->plat_data.power_on = rockchip_dp_poweron;
dp->plat_data.power_off = rockchip_dp_powerdown;
@@ -448,9 +501,20 @@ static int rockchip_dp_probe(struct platform_device *pdev)
if (IS_ERR(dp->adp))
return PTR_ERR(dp->adp);
- ret = component_add(dev, &rockchip_dp_component_ops);
- if (ret)
- return ret;
+ ret = devm_of_dp_aux_populate_bus(analogix_dp_get_aux(dp->adp), rockchip_dp_link_panel);
+ if (ret) {
+ /*
+ * If devm_of_dp_aux_populate_bus() returns -ENODEV, the done_probing() will not
+ * be called because there are no EP devices. Then the rockchip_dp_link_panel()
+ * will be called directly in order to support the other valid DT configurations.
+ *
+ * NOTE: The devm_of_dp_aux_populate_bus() is allowed to return -EPROBE_DEFER.
+ */
+ if (ret != -ENODEV)
+ return dev_err_probe(dp->dev, ret, "failed to populate aux bus\n");
+
+ return rockchip_dp_link_panel(analogix_dp_get_aux(dp->adp));
+ }
return 0;
}
@@ -501,9 +565,24 @@ static const struct rockchip_dp_chip_data rk3288_dp[] = {
{ /* sentinel */ }
};
+static const struct rockchip_dp_chip_data rk3588_edp[] = {
+ {
+ .edp_mode = GRF_REG_FIELD(0x0000, 0, 0),
+ .chip_type = RK3588_EDP,
+ .reg = 0xfdec0000,
+ },
+ {
+ .edp_mode = GRF_REG_FIELD(0x0004, 0, 0),
+ .chip_type = RK3588_EDP,
+ .reg = 0xfded0000,
+ },
+ { /* sentinel */ }
+};
+
static const struct of_device_id rockchip_dp_dt_ids[] = {
{.compatible = "rockchip,rk3288-dp", .data = &rk3288_dp },
{.compatible = "rockchip,rk3399-edp", .data = &rk3399_edp },
+ {.compatible = "rockchip,rk3588-edp", .data = &rk3588_edp },
{}
};
MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 483ecfeaebb0..db4b4038e51d 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -10,10 +10,12 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/hdmi.h>
+#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -29,8 +31,19 @@
#include "inno_hdmi.h"
+#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
+
#define INNO_HDMI_MIN_TMDS_CLOCK 25000000U
+#define RK3036_GRF_SOC_CON2 0x148
+#define RK3036_HDMI_PHSYNC BIT(4)
+#define RK3036_HDMI_PVSYNC BIT(5)
+
+enum inno_hdmi_dev_type {
+ RK3036_HDMI,
+ RK3128_HDMI,
+};
+
struct inno_hdmi_phy_config {
unsigned long pixelclock;
u8 pre_emphasis;
@@ -38,6 +51,7 @@ struct inno_hdmi_phy_config {
};
struct inno_hdmi_variant {
+ enum inno_hdmi_dev_type dev_type;
struct inno_hdmi_phy_config *phy_configs;
struct inno_hdmi_phy_config *default_phy_config;
};
@@ -58,6 +72,7 @@ struct inno_hdmi {
struct clk *pclk;
struct clk *refclk;
void __iomem *regs;
+ struct regmap *grf;
struct drm_connector connector;
struct rockchip_encoder encoder;
@@ -374,7 +389,15 @@ static int inno_hdmi_config_video_csc(struct inno_hdmi *hdmi)
static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
struct drm_display_mode *mode)
{
- int value;
+ int value, psync;
+
+ if (hdmi->variant->dev_type == RK3036_HDMI) {
+ psync = mode->flags & DRM_MODE_FLAG_PHSYNC ? RK3036_HDMI_PHSYNC : 0;
+ value = HIWORD_UPDATE(psync, RK3036_HDMI_PHSYNC);
+ psync = mode->flags & DRM_MODE_FLAG_PVSYNC ? RK3036_HDMI_PVSYNC : 0;
+ value |= HIWORD_UPDATE(psync, RK3036_HDMI_PVSYNC);
+ regmap_write(hdmi->grf, RK3036_GRF_SOC_CON2, value);
+ }
/* Set detail external video timing polarity and interlace mode */
value = v_EXTERANL_VIDEO(1);
@@ -885,32 +908,34 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
return PTR_ERR(hdmi->regs);
hdmi->pclk = devm_clk_get(hdmi->dev, "pclk");
- if (IS_ERR(hdmi->pclk)) {
- DRM_DEV_ERROR(hdmi->dev, "Unable to get HDMI pclk clk\n");
- return PTR_ERR(hdmi->pclk);
- }
+ if (IS_ERR(hdmi->pclk))
+ return dev_err_probe(dev, PTR_ERR(hdmi->pclk), "Unable to get HDMI pclk\n");
ret = clk_prepare_enable(hdmi->pclk);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev,
- "Cannot enable HDMI pclk clock: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot enable HDMI pclk: %d\n", ret);
hdmi->refclk = devm_clk_get_optional(hdmi->dev, "ref");
if (IS_ERR(hdmi->refclk)) {
- DRM_DEV_ERROR(hdmi->dev, "Unable to get HDMI reference clock\n");
- ret = PTR_ERR(hdmi->refclk);
+ ret = dev_err_probe(dev, PTR_ERR(hdmi->refclk), "Unable to get HDMI refclk\n");
goto err_disable_pclk;
}
ret = clk_prepare_enable(hdmi->refclk);
if (ret) {
- DRM_DEV_ERROR(hdmi->dev,
- "Cannot enable HDMI reference clock: %d\n", ret);
+ ret = dev_err_probe(dev, ret, "Cannot enable HDMI refclk: %d\n", ret);
goto err_disable_pclk;
}
+ if (hdmi->variant->dev_type == RK3036_HDMI) {
+ hdmi->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ if (IS_ERR(hdmi->grf)) {
+ ret = dev_err_probe(dev, PTR_ERR(hdmi->grf),
+ "Unable to get rockchip,grf\n");
+ goto err_disable_clk;
+ }
+ }
+
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
@@ -995,11 +1020,13 @@ static void inno_hdmi_remove(struct platform_device *pdev)
}
static const struct inno_hdmi_variant rk3036_inno_hdmi_variant = {
+ .dev_type = RK3036_HDMI,
.phy_configs = rk3036_hdmi_phy_configs,
.default_phy_config = &rk3036_hdmi_phy_configs[1],
};
static const struct inno_hdmi_variant rk3128_inno_hdmi_variant = {
+ .dev_type = RK3128_HDMI,
.phy_configs = rk3128_hdmi_phy_configs,
.default_phy_config = &rk3128_hdmi_phy_configs[1],
};
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index f7a460190313..e7875b52f298 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -5,6 +5,9 @@
*/
#include <drm/drm_atomic.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -46,27 +49,20 @@ struct rk3066_hdmi {
struct clk *hclk;
void __iomem *regs;
- struct drm_connector connector;
+ struct drm_bridge bridge;
+ struct drm_connector *connector;
struct rockchip_encoder encoder;
struct rk3066_hdmi_i2c *i2c;
- struct i2c_adapter *ddc;
unsigned int tmdsclk;
struct hdmi_data_info hdmi_data;
};
-static struct rk3066_hdmi *encoder_to_rk3066_hdmi(struct drm_encoder *encoder)
+static struct rk3066_hdmi *bridge_to_rk3066_hdmi(struct drm_bridge *bridge)
{
- struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
-
- return container_of(rkencoder, struct rk3066_hdmi, encoder);
-}
-
-static struct rk3066_hdmi *connector_to_rk3066_hdmi(struct drm_connector *connector)
-{
- return container_of(connector, struct rk3066_hdmi, connector);
+ return container_of(bridge, struct rk3066_hdmi, bridge);
}
static inline u8 hdmi_readb(struct rk3066_hdmi *hdmi, u16 offset)
@@ -161,57 +157,40 @@ static void rk3066_hdmi_set_power_mode(struct rk3066_hdmi *hdmi, int mode)
hdmi->tmdsclk = DEFAULT_PLLA_RATE;
}
-static int
-rk3066_hdmi_upload_frame(struct rk3066_hdmi *hdmi, int setup_rc,
- union hdmi_infoframe *frame, u32 frame_index,
- u32 mask, u32 disable, u32 enable)
+static int rk3066_hdmi_bridge_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
{
- if (mask)
- hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, disable);
-
- hdmi_writeb(hdmi, HDMI_CP_BUF_INDEX, frame_index);
-
- if (setup_rc >= 0) {
- u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
- ssize_t rc, i;
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
- rc = hdmi_infoframe_pack(frame, packed_frame,
- sizeof(packed_frame));
- if (rc < 0)
- return rc;
-
- for (i = 0; i < rc; i++)
- hdmi_writeb(hdmi, HDMI_CP_BUF_ACC_HB0 + i * 4,
- packed_frame[i]);
-
- if (mask)
- hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, enable);
+ if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ drm_err(bridge->dev, "Unsupported infoframe type: %u\n", type);
+ return 0;
}
- return setup_rc;
+ hdmi_writeb(hdmi, HDMI_CP_BUF_INDEX, HDMI_INFOFRAME_AVI);
+
+ return 0;
}
-static int rk3066_hdmi_config_avi(struct rk3066_hdmi *hdmi,
- struct drm_display_mode *mode)
+static int
+rk3066_hdmi_bridge_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
{
- union hdmi_infoframe frame;
- int rc;
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
+ ssize_t i;
- rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- &hdmi->connector, mode);
+ if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ drm_err(bridge->dev, "Unsupported infoframe type: %u\n", type);
+ return 0;
+ }
- if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444)
- frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
- else if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV422)
- frame.avi.colorspace = HDMI_COLORSPACE_YUV422;
- else
- frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+ rk3066_hdmi_bridge_clear_infoframe(bridge, type);
- frame.avi.colorimetry = hdmi->hdmi_data.colorimetry;
- frame.avi.scan_mode = HDMI_SCAN_MODE_NONE;
+ for (i = 0; i < len; i++)
+ hdmi_writeb(hdmi, HDMI_CP_BUF_ACC_HB0 + i * 4, buffer[i]);
- return rk3066_hdmi_upload_frame(hdmi, rc, &frame,
- HDMI_INFOFRAME_AVI, 0, 0, 0);
+ return 0;
}
static int rk3066_hdmi_config_video_timing(struct rk3066_hdmi *hdmi,
@@ -324,9 +303,27 @@ static void rk3066_hdmi_config_phy(struct rk3066_hdmi *hdmi)
}
static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
- struct drm_display_mode *mode)
+ struct drm_atomic_state *state)
{
- struct drm_display_info *display = &hdmi->connector.display_info;
+ struct drm_bridge *bridge = &hdmi->bridge;
+ struct drm_connector *connector;
+ struct drm_display_info *display;
+ struct drm_display_mode *mode;
+ struct drm_connector_state *new_conn_state;
+ struct drm_crtc_state *new_crtc_state;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+
+ new_conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!new_conn_state))
+ return -EINVAL;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+ if (WARN_ON(!new_crtc_state))
+ return -EINVAL;
+
+ display = &connector->display_info;
+ mode = &new_crtc_state->adjusted_mode;
hdmi->hdmi_data.vic = drm_match_cea_mode(mode);
hdmi->hdmi_data.enc_out_format = HDMI_COLORSPACE_RGB;
@@ -363,7 +360,7 @@ static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
if (display->is_hdmi) {
hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK,
HDMI_VIDEO_MODE_HDMI);
- rk3066_hdmi_config_avi(hdmi, mode);
+ drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
} else {
hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK, 0);
}
@@ -386,15 +383,15 @@ static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
return 0;
}
-static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+static void rk3066_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
- struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
int mux, val;
- conn_state = drm_atomic_get_new_connector_state(state, &hdmi->connector);
+ conn_state = drm_atomic_get_new_connector_state(state, hdmi->connector);
if (WARN_ON(!conn_state))
return;
@@ -402,7 +399,7 @@ static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder,
if (WARN_ON(!crtc_state))
return;
- mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
+ mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, &hdmi->encoder.encoder);
if (mux)
val = (HDMI_VIDEO_SEL << 16) | HDMI_VIDEO_SEL;
else
@@ -413,13 +410,13 @@ static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder,
DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder enable select: vop%s\n",
(mux) ? "1" : "0");
- rk3066_hdmi_setup(hdmi, &crtc_state->adjusted_mode);
+ rk3066_hdmi_setup(hdmi, state);
}
-static void rk3066_hdmi_encoder_disable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+static void rk3066_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
- struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder disable\n");
@@ -450,39 +447,34 @@ rk3066_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
static const
struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
.atomic_check = rk3066_hdmi_encoder_atomic_check,
- .atomic_enable = rk3066_hdmi_encoder_enable,
- .atomic_disable = rk3066_hdmi_encoder_disable,
};
static enum drm_connector_status
-rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force)
+rk3066_hdmi_bridge_detect(struct drm_bridge *bridge)
{
- struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
return (hdmi_readb(hdmi, HDMI_HPG_MENS_STA) & HDMI_HPG_IN_STATUS_HIGH) ?
connector_status_connected : connector_status_disconnected;
}
-static int rk3066_hdmi_connector_get_modes(struct drm_connector *connector)
+static const struct drm_edid *
+rk3066_hdmi_bridge_edid_read(struct drm_bridge *bridge, struct drm_connector *connector)
{
- struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
+ struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge);
const struct drm_edid *drm_edid;
- int ret = 0;
-
- if (!hdmi->ddc)
- return 0;
- drm_edid = drm_edid_read_ddc(connector, hdmi->ddc);
- drm_edid_connector_update(connector, drm_edid);
- ret = drm_edid_connector_add_modes(connector);
- drm_edid_free(drm_edid);
+ drm_edid = drm_edid_read_ddc(connector, bridge->ddc);
+ if (!drm_edid)
+ dev_dbg(hdmi->dev, "failed to get edid\n");
- return ret;
+ return drm_edid;
}
static enum drm_mode_status
-rk3066_hdmi_connector_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
+rk3066_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
{
u32 vic = drm_match_cea_mode(mode);
@@ -492,82 +484,19 @@ rk3066_hdmi_connector_mode_valid(struct drm_connector *connector,
return MODE_BAD;
}
-static struct drm_encoder *
-rk3066_hdmi_connector_best_encoder(struct drm_connector *connector)
-{
- struct rk3066_hdmi *hdmi = connector_to_rk3066_hdmi(connector);
-
- return &hdmi->encoder.encoder;
-}
-
-static int
-rk3066_hdmi_probe_single_connector_modes(struct drm_connector *connector,
- uint32_t maxX, uint32_t maxY)
-{
- if (maxX > 1920)
- maxX = 1920;
- if (maxY > 1080)
- maxY = 1080;
-
- return drm_helper_probe_single_connector_modes(connector, maxX, maxY);
-}
-
-static void rk3066_hdmi_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_funcs rk3066_hdmi_connector_funcs = {
- .fill_modes = rk3066_hdmi_probe_single_connector_modes,
- .detect = rk3066_hdmi_connector_detect,
- .destroy = rk3066_hdmi_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const
-struct drm_connector_helper_funcs rk3066_hdmi_connector_helper_funcs = {
- .get_modes = rk3066_hdmi_connector_get_modes,
- .mode_valid = rk3066_hdmi_connector_mode_valid,
- .best_encoder = rk3066_hdmi_connector_best_encoder,
+static const struct drm_bridge_funcs rk3066_hdmi_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = rk3066_hdmi_bridge_atomic_enable,
+ .atomic_disable = rk3066_hdmi_bridge_atomic_disable,
+ .detect = rk3066_hdmi_bridge_detect,
+ .edid_read = rk3066_hdmi_bridge_edid_read,
+ .hdmi_clear_infoframe = rk3066_hdmi_bridge_clear_infoframe,
+ .hdmi_write_infoframe = rk3066_hdmi_bridge_write_infoframe,
+ .mode_valid = rk3066_hdmi_bridge_mode_valid,
};
-static int
-rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
-{
- struct drm_encoder *encoder = &hdmi->encoder.encoder;
- struct device *dev = hdmi->dev;
-
- encoder->possible_crtcs =
- drm_of_find_possible_crtcs(drm, dev->of_node);
-
- /*
- * If we failed to find the CRTC(s) which this encoder is
- * supposed to be connected to, it's because the CRTC has
- * not been registered yet. Defer probing, and hope that
- * the required CRTC is added later.
- */
- if (encoder->possible_crtcs == 0)
- return -EPROBE_DEFER;
-
- drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
-
- hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
-
- drm_connector_helper_add(&hdmi->connector,
- &rk3066_hdmi_connector_helper_funcs);
- drm_connector_init_with_ddc(drm, &hdmi->connector,
- &rk3066_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA,
- hdmi->ddc);
-
- drm_connector_attach_encoder(&hdmi->connector, encoder);
-
- return 0;
-}
static irqreturn_t rk3066_hdmi_hardirq(int irq, void *dev_id)
{
@@ -597,7 +526,7 @@ static irqreturn_t rk3066_hdmi_irq(int irq, void *dev_id)
{
struct rk3066_hdmi *hdmi = dev_id;
- drm_helper_hpd_irq_event(hdmi->connector.dev);
+ drm_helper_hpd_irq_event(hdmi->connector->dev);
return IRQ_HANDLED;
}
@@ -720,7 +649,7 @@ static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi)
strscpy(adap->name, "RK3066 HDMI", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
- ret = i2c_add_adapter(adap);
+ ret = devm_i2c_add_adapter(hdmi->dev, adap);
if (ret) {
DRM_DEV_ERROR(hdmi->dev, "cannot add %s I2C adapter\n",
adap->name);
@@ -735,6 +664,66 @@ static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi)
return adap;
}
+static int
+rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
+{
+ struct drm_encoder *encoder = &hdmi->encoder.encoder;
+ struct device *dev = hdmi->dev;
+ int ret;
+
+ encoder->possible_crtcs =
+ drm_of_find_possible_crtcs(drm, dev->of_node);
+
+ /*
+ * If we failed to find the CRTC(s) which this encoder is
+ * supposed to be connected to, it's because the CRTC has
+ * not been registered yet. Defer probing, and hope that
+ * the required CRTC is added later.
+ */
+ if (encoder->possible_crtcs == 0)
+ return -EPROBE_DEFER;
+
+ drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
+
+ hdmi->bridge.driver_private = hdmi;
+ hdmi->bridge.funcs = &rk3066_hdmi_bridge_funcs;
+ hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HPD;
+ hdmi->bridge.of_node = hdmi->dev->of_node;
+ hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ hdmi->bridge.vendor = "Rockchip";
+ hdmi->bridge.product = "RK3066 HDMI";
+
+ hdmi->bridge.ddc = rk3066_hdmi_i2c_adapter(hdmi);
+ if (IS_ERR(hdmi->bridge.ddc))
+ return PTR_ERR(hdmi->bridge.ddc);
+
+ if (IS_ERR(hdmi->bridge.ddc))
+ return PTR_ERR(hdmi->bridge.ddc);
+
+ ret = devm_drm_bridge_add(dev, &hdmi->bridge);
+ if (ret)
+ return ret;
+
+ ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ return ret;
+
+ hdmi->connector = drm_bridge_connector_init(drm, encoder);
+ if (IS_ERR(hdmi->connector)) {
+ ret = PTR_ERR(hdmi->connector);
+ dev_err(hdmi->dev, "failed to init bridge connector: %d\n", ret);
+ return ret;
+ }
+
+ drm_connector_attach_encoder(hdmi->connector, encoder);
+
+ return 0;
+}
+
static int rk3066_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -781,13 +770,6 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
/* internal hclk = hdmi_hclk / 25 */
hdmi_writeb(hdmi, HDMI_INTERNAL_CLK_DIVIDER, 25);
- hdmi->ddc = rk3066_hdmi_i2c_adapter(hdmi);
- if (IS_ERR(hdmi->ddc)) {
- ret = PTR_ERR(hdmi->ddc);
- hdmi->ddc = NULL;
- goto err_disable_hclk;
- }
-
rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_B);
usleep_range(999, 1000);
hdmi_writeb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_HOTPLUG);
@@ -798,7 +780,7 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
ret = rk3066_hdmi_register(drm, hdmi);
if (ret)
- goto err_disable_i2c;
+ goto err_disable_hclk;
dev_set_drvdata(dev, hdmi);
@@ -813,10 +795,7 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
return 0;
err_cleanup_hdmi:
- hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
-err_disable_i2c:
- i2c_put_adapter(hdmi->ddc);
err_disable_hclk:
clk_disable_unprepare(hdmi->hclk);
@@ -828,10 +807,8 @@ static void rk3066_hdmi_unbind(struct device *dev, struct device *master,
{
struct rk3066_hdmi *hdmi = dev_get_drvdata(dev);
- hdmi->connector.funcs->destroy(&hdmi->connector);
hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder);
- i2c_put_adapter(hdmi->ddc);
clk_disable_unprepare(hdmi->hclk);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index e3596e2b557d..ba6b0528d1e5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -733,11 +733,10 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
WARN_ON(vop->event);
- if (crtc->state->self_refresh_active)
+ if (crtc->state->self_refresh_active) {
rockchip_drm_set_win_enabled(crtc, false);
-
- if (crtc->state->self_refresh_active)
goto out;
+ }
mutex_lock(&vop->vop_lock);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index 680bedbb770e..fc3ecb9fcd95 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -710,6 +710,7 @@ enum dst_factor_mode {
#define VOP2_COLOR_KEY_MASK BIT(31)
+#define RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL GENMASK(31, 30)
#define RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD BIT(28)
#define RK3568_OVL_CTRL__YUV_MODE(vp) BIT(vp)
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 0a2840cbe8e2..32c4ed685739 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -2070,7 +2070,10 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state);
ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL);
- ovl_ctrl |= RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD;
+ ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD;
+ ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL;
+ ovl_ctrl |= FIELD_PREP(RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL, vp->id);
+
if (vcstate->yuv_overlay)
ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id);
else
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 4e2099d86517..d1f788763318 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -906,21 +906,21 @@ static const struct vop_data rk3366_vop = {
static const struct vop_output rk3399_output = {
.dp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19),
- .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
- .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
- .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
- .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
+ .rgb_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 31),
.dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16),
- .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
- .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
- .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
- .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
+ .rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 28),
.dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11),
- .rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
- .hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
- .edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
- .mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
- .mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3),
+ .rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12),
+ .hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13),
+ .edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14),
+ .mipi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 15),
+ .mipi_dual_channel_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 3),
};
static const struct vop_common rk3399_common = {
@@ -975,23 +975,23 @@ static const struct vop_win_phy rk3399_win0_data = {
.data_formats = formats_win_full_10,
.nformats = ARRAY_SIZE(formats_win_full_10),
.format_modifiers = format_modifiers_win_full_afbc,
- .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
- .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
- .fmt_10 = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 4),
- .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
- .uv_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 15),
- .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21),
- .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
- .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
- .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
- .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
- .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
- .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
- .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
- .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
- .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
- .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
- .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
+ .enable = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(RK3399_WIN0_CTRL0, 0x7, 1),
+ .fmt_10 = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 4),
+ .rb_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 12),
+ .uv_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 15),
+ .x_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 21),
+ .y_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 22),
+ .act_info = VOP_REG(RK3399_WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(RK3399_WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3399_WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3399_WIN0_YRGB_MST, 0xffffffff, 0),
+ .uv_mst = VOP_REG(RK3399_WIN0_CBR_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 0),
+ .uv_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 16),
+ .src_alpha_ctl = VOP_REG(RK3399_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(RK3399_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+ .channel = VOP_REG(RK3399_WIN0_CTRL2, 0xff, 0),
};
static const struct vop_win_phy rk3399_win1_data = {
@@ -999,23 +999,23 @@ static const struct vop_win_phy rk3399_win1_data = {
.data_formats = formats_win_full_10,
.nformats = ARRAY_SIZE(formats_win_full_10),
.format_modifiers = format_modifiers_win_full,
- .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
- .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
- .fmt_10 = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 4),
- .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
- .uv_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 15),
- .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21),
- .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
- .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
- .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
- .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
- .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
- .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
- .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
- .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
- .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
- .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
- .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
+ .enable = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(RK3399_WIN0_CTRL0, 0x7, 1),
+ .fmt_10 = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 4),
+ .rb_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 12),
+ .uv_swap = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 15),
+ .x_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 21),
+ .y_mir_en = VOP_REG(RK3399_WIN0_CTRL0, 0x1, 22),
+ .act_info = VOP_REG(RK3399_WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(RK3399_WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3399_WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3399_WIN0_YRGB_MST, 0xffffffff, 0),
+ .uv_mst = VOP_REG(RK3399_WIN0_CBR_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 0),
+ .uv_vir = VOP_REG(RK3399_WIN0_VIR, 0x3fff, 16),
+ .src_alpha_ctl = VOP_REG(RK3399_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(RK3399_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+ .channel = VOP_REG(RK3399_WIN0_CTRL2, 0xff, 0),
};
/*
diff --git a/drivers/gpu/drm/scheduler/.kunitconfig b/drivers/gpu/drm/scheduler/.kunitconfig
new file mode 100644
index 000000000000..cece53609fcf
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/.kunitconfig
@@ -0,0 +1,12 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_SCHED_KUNIT_TEST=y
+CONFIG_EXPERT=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCKDEP=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_LIST=y
diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile
index 53863621829f..6e13e4c63e9d 100644
--- a/drivers/gpu/drm/scheduler/Makefile
+++ b/drivers/gpu/drm/scheduler/Makefile
@@ -23,3 +23,5 @@
gpu-sched-y := sched_main.o sched_fence.o sched_entity.o
obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
+
+obj-$(CONFIG_DRM_SCHED_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index bd39db7bb240..e671aa241720 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -176,6 +176,7 @@ static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
{
struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
+ drm_sched_fence_scheduled(job->s_fence, NULL);
drm_sched_fence_finished(job->s_fence, -ESRCH);
WARN_ON(job->s_fence->parent);
job->sched->ops->free_job(job);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index bfea608a7106..829579c41c6b 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -828,11 +828,15 @@ EXPORT_SYMBOL(drm_sched_job_init);
*
* This arms a scheduler job for execution. Specifically it initializes the
* &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
- * or other places that need to track the completion of this job.
+ * or other places that need to track the completion of this job. It also
+ * initializes sequence numbers, which are fundamental for fence ordering.
*
* Refer to drm_sched_entity_push_job() documentation for locking
* considerations.
*
+ * Once this function was called, you *must* submit @job with
+ * drm_sched_entity_push_job().
+ *
* This can only be called if drm_sched_job_init() succeeded.
*/
void drm_sched_job_arm(struct drm_sched_job *job)
@@ -1015,13 +1019,14 @@ EXPORT_SYMBOL(drm_sched_job_has_dependency);
* Cleans up the resources allocated with drm_sched_job_init().
*
* Drivers should call this from their error unwind code if @job is aborted
- * before it was submitted to an entity with drm_sched_entity_push_job().
+ * before drm_sched_job_arm() is called.
*
- * Since calling drm_sched_job_arm() causes the job's fences to be initialized,
- * it is up to the driver to ensure that fences that were exposed to external
- * parties get signaled. drm_sched_job_cleanup() does not ensure this.
+ * drm_sched_job_arm() is a point of no return since it initializes the fences
+ * and their sequence number etc. Once that function has been called, you *must*
+ * submit it with drm_sched_entity_push_job() and cannot simply abort it by
+ * calling drm_sched_job_cleanup().
*
- * This function must also be called in &struct drm_sched_backend_ops.free_job
+ * This function should be called in the &drm_sched_backend_ops.free_job callback.
*/
void drm_sched_job_cleanup(struct drm_sched_job *job)
{
@@ -1029,10 +1034,15 @@ void drm_sched_job_cleanup(struct drm_sched_job *job)
unsigned long index;
if (kref_read(&job->s_fence->finished.refcount)) {
- /* drm_sched_job_arm() has been called */
+ /* The job has been processed by the scheduler, i.e.,
+ * drm_sched_job_arm() and drm_sched_entity_push_job() have
+ * been called.
+ */
dma_fence_put(&job->s_fence->finished);
} else {
- /* aborted job before arming */
+ /* The job was aborted before it has been committed to be run;
+ * notably, drm_sched_job_arm() has not been called.
+ */
drm_sched_fence_free(job->s_fence);
}
@@ -1220,20 +1230,23 @@ static void drm_sched_run_job_work(struct work_struct *w)
drm_sched_job_begin(sched_job);
trace_drm_run_job(sched_job, entity);
+ /*
+ * The run_job() callback must by definition return a fence whose
+ * refcount has been incremented for the scheduler already.
+ */
fence = sched->ops->run_job(sched_job);
complete_all(&entity->entity_idle);
drm_sched_fence_scheduled(s_fence, fence);
if (!IS_ERR_OR_NULL(fence)) {
- /* Drop for original kref_init of the fence */
- dma_fence_put(fence);
-
r = dma_fence_add_callback(fence, &sched_job->cb,
drm_sched_job_done_cb);
if (r == -ENOENT)
drm_sched_job_done(sched_job, fence->error);
else if (r)
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
+
+ dma_fence_put(fence);
} else {
drm_sched_job_done(sched_job, IS_ERR(fence) ?
PTR_ERR(fence) : 0);
diff --git a/drivers/gpu/drm/scheduler/tests/Makefile b/drivers/gpu/drm/scheduler/tests/Makefile
new file mode 100644
index 000000000000..5bf707bad373
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+drm-sched-tests-y := \
+ mock_scheduler.o \
+ tests_basic.o
+
+obj-$(CONFIG_DRM_SCHED_KUNIT_TEST) += drm-sched-tests.o
diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
new file mode 100644
index 000000000000..f999c8859cf7
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Valve Corporation */
+
+#include "sched_tests.h"
+
+/*
+ * Here we implement the mock "GPU" (or the scheduler backend) which is used by
+ * the DRM scheduler unit tests in order to exercise the core functionality.
+ *
+ * Test cases are implemented in a separate file.
+ */
+
+/**
+ * drm_mock_sched_entity_new - Create a new mock scheduler entity
+ *
+ * @test: KUnit test owning the entity
+ * @priority: Scheduling priority
+ * @sched: Mock scheduler on which the entity can be scheduled
+ *
+ * Returns: New mock scheduler entity with allocation managed by the test
+ */
+struct drm_mock_sched_entity *
+drm_mock_sched_entity_new(struct kunit *test,
+ enum drm_sched_priority priority,
+ struct drm_mock_scheduler *sched)
+{
+ struct drm_mock_sched_entity *entity;
+ struct drm_gpu_scheduler *drm_sched;
+ int ret;
+
+ entity = kunit_kzalloc(test, sizeof(*entity), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, entity);
+
+ drm_sched = &sched->base;
+ ret = drm_sched_entity_init(&entity->base,
+ priority,
+ &drm_sched, 1,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ entity->test = test;
+
+ return entity;
+}
+
+/**
+ * drm_mock_sched_entity_free - Destroys a mock scheduler entity
+ *
+ * @entity: Entity to destroy
+ *
+ * To be used from the test cases once done with the entity.
+ */
+void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity)
+{
+ drm_sched_entity_destroy(&entity->base);
+}
+
+static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job)
+{
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(job->base.sched);
+
+ lockdep_assert_held(&sched->lock);
+
+ job->flags |= DRM_MOCK_SCHED_JOB_DONE;
+ list_move_tail(&job->link, &sched->done_list);
+ dma_fence_signal(&job->hw_fence);
+ complete(&job->done);
+}
+
+static enum hrtimer_restart
+drm_mock_sched_job_signal_timer(struct hrtimer *hrtimer)
+{
+ struct drm_mock_sched_job *job =
+ container_of(hrtimer, typeof(*job), timer);
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(job->base.sched);
+ struct drm_mock_sched_job *next;
+ ktime_t now = ktime_get();
+ unsigned long flags;
+ LIST_HEAD(signal);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &sched->job_list, link) {
+ if (!job->duration_us)
+ break;
+
+ if (ktime_before(now, job->finish_at))
+ break;
+
+ sched->hw_timeline.cur_seqno = job->hw_fence.seqno;
+ drm_mock_sched_job_complete(job);
+ }
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * drm_mock_sched_job_new - Create a new mock scheduler job
+ *
+ * @test: KUnit test owning the job
+ * @entity: Scheduler entity of the job
+ *
+ * Returns: New mock scheduler job with allocation managed by the test
+ */
+struct drm_mock_sched_job *
+drm_mock_sched_job_new(struct kunit *test,
+ struct drm_mock_sched_entity *entity)
+{
+ struct drm_mock_sched_job *job;
+ int ret;
+
+ job = kunit_kzalloc(test, sizeof(*job), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, job);
+
+ ret = drm_sched_job_init(&job->base,
+ &entity->base,
+ 1,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ job->test = test;
+
+ init_completion(&job->done);
+ spin_lock_init(&job->lock);
+ INIT_LIST_HEAD(&job->link);
+ hrtimer_setup(&job->timer, drm_mock_sched_job_signal_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+
+ return job;
+}
+
+static const char *drm_mock_sched_hw_fence_driver_name(struct dma_fence *fence)
+{
+ return "drm_mock_sched";
+}
+
+static const char *
+drm_mock_sched_hw_fence_timeline_name(struct dma_fence *fence)
+{
+ struct drm_mock_sched_job *job =
+ container_of(fence, typeof(*job), hw_fence);
+
+ return (const char *)job->base.sched->name;
+}
+
+static void drm_mock_sched_hw_fence_release(struct dma_fence *fence)
+{
+ struct drm_mock_sched_job *job =
+ container_of(fence, typeof(*job), hw_fence);
+
+ hrtimer_cancel(&job->timer);
+
+ /* Containing job is freed by the kunit framework */
+}
+
+static const struct dma_fence_ops drm_mock_sched_hw_fence_ops = {
+ .get_driver_name = drm_mock_sched_hw_fence_driver_name,
+ .get_timeline_name = drm_mock_sched_hw_fence_timeline_name,
+ .release = drm_mock_sched_hw_fence_release,
+};
+
+static struct dma_fence *mock_sched_run_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(sched_job->sched);
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+
+ dma_fence_init(&job->hw_fence,
+ &drm_mock_sched_hw_fence_ops,
+ &job->lock,
+ sched->hw_timeline.context,
+ atomic_inc_return(&sched->hw_timeline.next_seqno));
+
+ dma_fence_get(&job->hw_fence); /* Reference for the job_list */
+
+ spin_lock_irq(&sched->lock);
+ if (job->duration_us) {
+ ktime_t prev_finish_at = 0;
+
+ if (!list_empty(&sched->job_list)) {
+ struct drm_mock_sched_job *prev =
+ list_last_entry(&sched->job_list, typeof(*prev),
+ link);
+
+ prev_finish_at = prev->finish_at;
+ }
+
+ if (!prev_finish_at)
+ prev_finish_at = ktime_get();
+
+ job->finish_at = ktime_add_us(prev_finish_at, job->duration_us);
+ }
+ list_add_tail(&job->link, &sched->job_list);
+ if (job->finish_at)
+ hrtimer_start(&job->timer, job->finish_at, HRTIMER_MODE_ABS);
+ spin_unlock_irq(&sched->lock);
+
+ return &job->hw_fence;
+}
+
+static enum drm_gpu_sched_stat
+mock_sched_timedout_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+
+ job->flags |= DRM_MOCK_SCHED_JOB_TIMEDOUT;
+
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+}
+
+static void mock_sched_free_job(struct drm_sched_job *sched_job)
+{
+ struct drm_mock_scheduler *sched =
+ drm_sched_to_mock_sched(sched_job->sched);
+ struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job);
+ unsigned long flags;
+
+ /* Remove from the scheduler done list. */
+ spin_lock_irqsave(&sched->lock, flags);
+ list_del(&job->link);
+ spin_unlock_irqrestore(&sched->lock, flags);
+ dma_fence_put(&job->hw_fence);
+
+ drm_sched_job_cleanup(sched_job);
+
+ /* Mock job itself is freed by the kunit framework. */
+}
+
+static const struct drm_sched_backend_ops drm_mock_scheduler_ops = {
+ .run_job = mock_sched_run_job,
+ .timedout_job = mock_sched_timedout_job,
+ .free_job = mock_sched_free_job
+};
+
+/**
+ * drm_mock_sched_new - Create a new mock scheduler
+ *
+ * @test: KUnit test owning the job
+ * @timeout: Job timeout to set
+ *
+ * Returns: New mock scheduler with allocation managed by the test
+ */
+struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout)
+{
+ struct drm_sched_init_args args = {
+ .ops = &drm_mock_scheduler_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = U32_MAX,
+ .hang_limit = 1,
+ .timeout = timeout,
+ .name = "drm-mock-scheduler",
+ };
+ struct drm_mock_scheduler *sched;
+ int ret;
+
+ sched = kunit_kzalloc(test, sizeof(*sched), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, sched);
+
+ ret = drm_sched_init(&sched->base, &args);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ sched->test = test;
+ sched->hw_timeline.context = dma_fence_context_alloc(1);
+ atomic_set(&sched->hw_timeline.next_seqno, 0);
+ INIT_LIST_HEAD(&sched->job_list);
+ INIT_LIST_HEAD(&sched->done_list);
+ spin_lock_init(&sched->lock);
+
+ return sched;
+}
+
+/**
+ * drm_mock_sched_fini - Destroys a mock scheduler
+ *
+ * @sched: Scheduler to destroy
+ *
+ * To be used from the test cases once done with the scheduler.
+ */
+void drm_mock_sched_fini(struct drm_mock_scheduler *sched)
+{
+ struct drm_mock_sched_job *job, *next;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ drm_sched_wqueue_stop(&sched->base);
+
+ /* Force complete all unfinished jobs. */
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &sched->job_list, link)
+ list_move_tail(&job->link, &list);
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ list_for_each_entry(job, &list, link)
+ hrtimer_cancel(&job->timer);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &list, link)
+ drm_mock_sched_job_complete(job);
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ /*
+ * Free completed jobs and jobs not yet processed by the DRM scheduler
+ * free worker.
+ */
+ spin_lock_irqsave(&sched->lock, flags);
+ list_for_each_entry_safe(job, next, &sched->done_list, link)
+ list_move_tail(&job->link, &list);
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ list_for_each_entry_safe(job, next, &list, link)
+ mock_sched_free_job(&job->base);
+
+ drm_sched_fini(&sched->base);
+}
+
+/**
+ * drm_mock_sched_advance - Advances the mock scheduler timeline
+ *
+ * @sched: Scheduler timeline to advance
+ * @num: By how many jobs to advance
+ *
+ * Advancing the scheduler timeline by a number of seqnos will trigger
+ * signalling of the hardware fences and unlinking the jobs from the internal
+ * scheduler tracking.
+ *
+ * This can be used from test cases which want complete control of the simulated
+ * job execution timing. For example submitting one job with no set duration
+ * would never complete it before test cases advances the timeline by one.
+ */
+unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched,
+ unsigned int num)
+{
+ struct drm_mock_sched_job *job, *next;
+ unsigned int found = 0;
+ unsigned long flags;
+ LIST_HEAD(signal);
+
+ spin_lock_irqsave(&sched->lock, flags);
+ if (WARN_ON_ONCE(sched->hw_timeline.cur_seqno + num <
+ sched->hw_timeline.cur_seqno))
+ goto unlock;
+ sched->hw_timeline.cur_seqno += num;
+ list_for_each_entry_safe(job, next, &sched->job_list, link) {
+ if (sched->hw_timeline.cur_seqno < job->hw_fence.seqno)
+ break;
+
+ drm_mock_sched_job_complete(job);
+ found++;
+ }
+unlock:
+ spin_unlock_irqrestore(&sched->lock, flags);
+
+ return found;
+}
+
+MODULE_DESCRIPTION("DRM mock scheduler and tests");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h
new file mode 100644
index 000000000000..27caf8285fb7
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Valve Corporation */
+
+#ifndef _SCHED_TESTS_H_
+#define _SCHED_TESTS_H_
+
+#include <kunit/test.h>
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/dma-fence.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include <drm/gpu_scheduler.h>
+
+/*
+ * DOC: Mock DRM scheduler data structures
+ *
+ * drm_mock_* data structures are used to implement a mock "GPU".
+ *
+ * They subclass the core DRM scheduler objects and add their data on top, which
+ * enables tracking the submitted jobs and simulating their execution with the
+ * attributes as specified by the test case.
+ */
+
+/**
+ * struct drm_mock_scheduler - implements a trivial mock GPU execution engine
+ *
+ * @base: DRM scheduler base class
+ * @test: Backpointer to owning the kunit test case
+ * @lock: Lock to protect the simulated @hw_timeline, @job_list and @done_list
+ * @job_list: List of jobs submitted to the mock GPU
+ * @done_list: List of jobs completed by the mock GPU
+ * @hw_timeline: Simulated hardware timeline has a @context, @next_seqno and
+ * @cur_seqno for implementing a struct dma_fence signaling the
+ * simulated job completion.
+ *
+ * Trivial mock GPU execution engine tracks submitted jobs and enables
+ * completing them strictly in submission order.
+ */
+struct drm_mock_scheduler {
+ struct drm_gpu_scheduler base;
+
+ struct kunit *test;
+
+ spinlock_t lock;
+ struct list_head job_list;
+ struct list_head done_list;
+
+ struct {
+ u64 context;
+ atomic_t next_seqno;
+ unsigned int cur_seqno;
+ } hw_timeline;
+};
+
+/**
+ * struct drm_mock_sched_entity - implements a mock GPU sched entity
+ *
+ * @base: DRM scheduler entity base class
+ * @test: Backpointer to owning the kunit test case
+ *
+ * Mock GPU sched entity is used by the test cases to submit jobs to the mock
+ * scheduler.
+ */
+struct drm_mock_sched_entity {
+ struct drm_sched_entity base;
+
+ struct kunit *test;
+};
+
+/**
+ * struct drm_mock_sched_job - implements a mock GPU job
+ *
+ * @base: DRM sched job base class
+ * @done: Completion signaling job completion.
+ * @flags: Flags designating job state.
+ * @link: List head element used by job tracking by the drm_mock_scheduler
+ * @timer: Timer used for simulating job execution duration
+ * @duration_us: Simulated job duration in micro seconds, or zero if in manual
+ * timeline advance mode
+ * @finish_at: Absolute time when the jobs with set duration will complete
+ * @lock: Lock used for @hw_fence
+ * @hw_fence: Fence returned to DRM scheduler as the hardware fence
+ * @test: Backpointer to owning the kunit test case
+ *
+ * Mock GPU sched job is used by the test cases to submit jobs to the mock
+ * scheduler.
+ */
+struct drm_mock_sched_job {
+ struct drm_sched_job base;
+
+ struct completion done;
+
+#define DRM_MOCK_SCHED_JOB_DONE 0x1
+#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2
+ unsigned long flags;
+
+ struct list_head link;
+ struct hrtimer timer;
+
+ unsigned int duration_us;
+ ktime_t finish_at;
+
+ spinlock_t lock;
+ struct dma_fence hw_fence;
+
+ struct kunit *test;
+};
+
+static inline struct drm_mock_scheduler *
+drm_sched_to_mock_sched(struct drm_gpu_scheduler *sched)
+{
+ return container_of(sched, struct drm_mock_scheduler, base);
+};
+
+static inline struct drm_mock_sched_entity *
+drm_sched_entity_to_mock_entity(struct drm_sched_entity *sched_entity)
+{
+ return container_of(sched_entity, struct drm_mock_sched_entity, base);
+};
+
+static inline struct drm_mock_sched_job *
+drm_sched_job_to_mock_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct drm_mock_sched_job, base);
+};
+
+struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test,
+ long timeout);
+void drm_mock_sched_fini(struct drm_mock_scheduler *sched);
+unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched,
+ unsigned int num);
+
+struct drm_mock_sched_entity *
+drm_mock_sched_entity_new(struct kunit *test,
+ enum drm_sched_priority priority,
+ struct drm_mock_scheduler *sched);
+void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity);
+
+struct drm_mock_sched_job *
+drm_mock_sched_job_new(struct kunit *test,
+ struct drm_mock_sched_entity *entity);
+
+/**
+ * drm_mock_sched_job_submit - Arm and submit a job in one go
+ *
+ * @job: Job to arm and submit
+ */
+static inline void drm_mock_sched_job_submit(struct drm_mock_sched_job *job)
+{
+ drm_sched_job_arm(&job->base);
+ drm_sched_entity_push_job(&job->base);
+}
+
+/**
+ * drm_mock_sched_job_set_duration_us - Set a job duration
+ *
+ * @job: Job to set the duration for
+ * @duration_us: Duration in micro seconds
+ *
+ * Jobs with duration set will be automatically completed by the mock scheduler
+ * as the timeline progresses, unless a job without a set duration is
+ * encountered in the timelime in which case calling drm_mock_sched_advance()
+ * will be required to bump the timeline.
+ */
+static inline void
+drm_mock_sched_job_set_duration_us(struct drm_mock_sched_job *job,
+ unsigned int duration_us)
+{
+ job->duration_us = duration_us;
+}
+
+/**
+ * drm_mock_sched_job_is_finished - Check if a job is finished
+ *
+ * @job: Job to check
+ *
+ * Returns: true if finished
+ */
+static inline bool
+drm_mock_sched_job_is_finished(struct drm_mock_sched_job *job)
+{
+ return job->flags & DRM_MOCK_SCHED_JOB_DONE;
+}
+
+/**
+ * drm_mock_sched_job_wait_finished - Wait until a job is finished
+ *
+ * @job: Job to wait for
+ * @timeout: Wait time in jiffies
+ *
+ * Returns: true if finished within the timeout provided, otherwise false
+ */
+static inline bool
+drm_mock_sched_job_wait_finished(struct drm_mock_sched_job *job, long timeout)
+{
+ if (job->flags & DRM_MOCK_SCHED_JOB_DONE)
+ return true;
+
+ return wait_for_completion_timeout(&job->done, timeout) != 0;
+}
+
+/**
+ * drm_mock_sched_job_wait_scheduled - Wait until a job is scheduled
+ *
+ * @job: Job to wait for
+ * @timeout: Wait time in jiffies
+ *
+ * Returns: true if scheduled within the timeout provided, otherwise false
+ */
+static inline bool
+drm_mock_sched_job_wait_scheduled(struct drm_mock_sched_job *job, long timeout)
+{
+ KUNIT_ASSERT_EQ(job->test, job->flags & DRM_MOCK_SCHED_JOB_DONE, 0);
+
+ return dma_fence_wait_timeout(&job->base.s_fence->scheduled,
+ false,
+ timeout) != 0;
+}
+
+#endif
diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c
new file mode 100644
index 000000000000..7230057e0594
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Valve Corporation */
+
+#include <linux/delay.h>
+
+#include "sched_tests.h"
+
+/*
+ * DRM scheduler basic tests should check the basic functional correctness of
+ * the scheduler, including some very light smoke testing. More targeted tests,
+ * for example focusing on testing specific bugs and other more complicated test
+ * scenarios, should be implemented in separate source units.
+ */
+
+static int drm_sched_basic_init(struct kunit *test)
+{
+ test->priv = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+
+ return 0;
+}
+
+static void drm_sched_basic_exit(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+
+ drm_mock_sched_fini(sched);
+}
+
+static int drm_sched_timeout_init(struct kunit *test)
+{
+ test->priv = drm_mock_sched_new(test, HZ);
+
+ return 0;
+}
+
+static void drm_sched_basic_submit(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_sched_job *job;
+ unsigned int i;
+ bool done;
+
+ /*
+ * Submit one job to the scheduler and verify that it gets scheduled
+ * and completed only when the mock hw backend processes it.
+ */
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+ job = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job);
+
+ done = drm_mock_sched_job_wait_scheduled(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ / 2);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ drm_mock_sched_entity_free(entity);
+}
+
+struct drm_sched_basic_params {
+ const char *description;
+ unsigned int queue_depth;
+ unsigned int num_entities;
+ unsigned int job_us;
+ bool dep_chain;
+};
+
+static const struct drm_sched_basic_params drm_sched_basic_cases[] = {
+ {
+ .description = "A queue of jobs in a single entity",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 1,
+ },
+ {
+ .description = "A chain of dependent jobs across multiple entities",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 1,
+ .dep_chain = true,
+ },
+ {
+ .description = "Multiple independent job queues",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 4,
+ },
+ {
+ .description = "Multiple inter-dependent job queues",
+ .queue_depth = 100,
+ .job_us = 1000,
+ .num_entities = 4,
+ .dep_chain = true,
+ },
+};
+
+static void
+drm_sched_basic_desc(const struct drm_sched_basic_params *params, char *desc)
+{
+ strscpy(desc, params->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(drm_sched_basic, drm_sched_basic_cases, drm_sched_basic_desc);
+
+static void drm_sched_basic_test(struct kunit *test)
+{
+ const struct drm_sched_basic_params *params = test->param_value;
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_job *job, *prev = NULL;
+ struct drm_mock_sched_entity **entity;
+ unsigned int i, cur_ent = 0;
+ bool done;
+
+ entity = kunit_kcalloc(test, params->num_entities, sizeof(*entity),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, entity);
+
+ for (i = 0; i < params->num_entities; i++)
+ entity[i] = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ for (i = 0; i < params->queue_depth; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= params->num_entities;
+ drm_mock_sched_job_set_duration_us(job, params->job_us);
+ if (params->dep_chain && prev)
+ drm_sched_job_add_dependency(&job->base,
+ dma_fence_get(&prev->base.s_fence->finished));
+ drm_mock_sched_job_submit(job);
+ prev = job;
+ }
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ for (i = 0; i < params->num_entities; i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static void drm_sched_basic_entity_cleanup(struct kunit *test)
+{
+ struct drm_mock_sched_job *job, *mid, *prev = NULL;
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity[4];
+ const unsigned int qd = 100;
+ unsigned int i, cur_ent = 0;
+ bool done;
+
+ /*
+ * Submit a queue of jobs across different entities with an explicit
+ * chain of dependencies between them and trigger entity cleanup while
+ * the queue is still being processed.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ entity[i] = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ if (prev)
+ drm_sched_job_add_dependency(&job->base,
+ dma_fence_get(&prev->base.s_fence->finished));
+ drm_mock_sched_job_submit(job);
+ if (i == qd / 2)
+ mid = job;
+ prev = job;
+ }
+
+ done = drm_mock_sched_job_wait_finished(mid, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ /* Exit with half of the queue still pending to be executed. */
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static struct kunit_case drm_sched_basic_tests[] = {
+ KUNIT_CASE(drm_sched_basic_submit),
+ KUNIT_CASE_PARAM(drm_sched_basic_test, drm_sched_basic_gen_params),
+ KUNIT_CASE(drm_sched_basic_entity_cleanup),
+ {}
+};
+
+static struct kunit_suite drm_sched_basic = {
+ .name = "drm_sched_basic_tests",
+ .init = drm_sched_basic_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_basic_tests,
+};
+
+static void drm_sched_basic_timeout(struct kunit *test)
+{
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_sched_job *job;
+ bool done;
+
+ /*
+ * Submit a single job against a scheduler with the timeout configured
+ * and verify that the timeout handling will run if the backend fails
+ * to complete it in time.
+ */
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+ job = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job);
+
+ done = drm_mock_sched_job_wait_scheduled(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ / 2);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ KUNIT_ASSERT_EQ(test,
+ job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT,
+ 0);
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ KUNIT_ASSERT_EQ(test,
+ job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT,
+ DRM_MOCK_SCHED_JOB_TIMEDOUT);
+
+ drm_mock_sched_entity_free(entity);
+}
+
+static struct kunit_case drm_sched_timeout_tests[] = {
+ KUNIT_CASE(drm_sched_basic_timeout),
+ {}
+};
+
+static struct kunit_suite drm_sched_timeout = {
+ .name = "drm_sched_basic_timeout_tests",
+ .init = drm_sched_timeout_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_timeout_tests,
+};
+
+static void drm_sched_priorities(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity[DRM_SCHED_PRIORITY_COUNT];
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_job *job;
+ const unsigned int qd = 100;
+ unsigned int i, cur_ent = 0;
+ enum drm_sched_priority p;
+ bool done;
+
+ /*
+ * Submit a bunch of jobs against entities configured with different
+ * priorities.
+ */
+
+ BUILD_BUG_ON(DRM_SCHED_PRIORITY_KERNEL > DRM_SCHED_PRIORITY_LOW);
+ BUILD_BUG_ON(ARRAY_SIZE(entity) != DRM_SCHED_PRIORITY_COUNT);
+
+ for (p = DRM_SCHED_PRIORITY_KERNEL; p <= DRM_SCHED_PRIORITY_LOW; p++)
+ entity[p] = drm_mock_sched_entity_new(test, p, sched);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ drm_mock_sched_job_submit(job);
+ }
+
+ done = drm_mock_sched_job_wait_finished(job, HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static void drm_sched_change_priority(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity[DRM_SCHED_PRIORITY_COUNT];
+ struct drm_mock_scheduler *sched = test->priv;
+ struct drm_mock_sched_job *job;
+ const unsigned int qd = 1000;
+ unsigned int i, cur_ent = 0;
+ enum drm_sched_priority p;
+
+ /*
+ * Submit a bunch of jobs against entities configured with different
+ * priorities and while waiting for them to complete, periodically keep
+ * changing their priorities.
+ *
+ * We set up the queue-depth (qd) and job duration so the priority
+ * changing loop has some time to interact with submissions to the
+ * backend and job completions as they progress.
+ */
+
+ for (p = DRM_SCHED_PRIORITY_KERNEL; p <= DRM_SCHED_PRIORITY_LOW; p++)
+ entity[p] = drm_mock_sched_entity_new(test, p, sched);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ drm_mock_sched_job_submit(job);
+ }
+
+ do {
+ drm_sched_entity_set_priority(&entity[cur_ent]->base,
+ (entity[cur_ent]->base.priority + 1) %
+ DRM_SCHED_PRIORITY_COUNT);
+ cur_ent++;
+ cur_ent %= ARRAY_SIZE(entity);
+ usleep_range(200, 500);
+ } while (!drm_mock_sched_job_is_finished(job));
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+}
+
+static struct kunit_case drm_sched_priority_tests[] = {
+ KUNIT_CASE(drm_sched_priorities),
+ KUNIT_CASE(drm_sched_change_priority),
+ {}
+};
+
+static struct kunit_suite drm_sched_priority = {
+ .name = "drm_sched_basic_priority_tests",
+ .init = drm_sched_basic_init,
+ .exit = drm_sched_basic_exit,
+ .test_cases = drm_sched_priority_tests,
+};
+
+static void drm_sched_test_modify_sched(struct kunit *test)
+{
+ unsigned int i, cur_ent = 0, cur_sched = 0;
+ struct drm_mock_sched_entity *entity[13];
+ struct drm_mock_scheduler *sched[3];
+ struct drm_mock_sched_job *job;
+ const unsigned int qd = 1000;
+
+ /*
+ * Submit a bunch of jobs against entities configured with different
+ * schedulers and while waiting for them to complete, periodically keep
+ * changing schedulers associated with each entity.
+ *
+ * We set up the queue-depth (qd) and job duration so the sched modify
+ * loop has some time to interact with submissions to the backend and
+ * job completions as they progress.
+ *
+ * For the number of schedulers and entities we use primes in order to
+ * perturb the entity->sched assignments with less of a regular pattern.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(sched); i++)
+ sched[i] = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ entity[i] = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched[i % ARRAY_SIZE(sched)]);
+
+ for (i = 0; i < qd; i++) {
+ job = drm_mock_sched_job_new(test, entity[cur_ent++]);
+ cur_ent %= ARRAY_SIZE(entity);
+ drm_mock_sched_job_set_duration_us(job, 1000);
+ drm_mock_sched_job_submit(job);
+ }
+
+ do {
+ struct drm_gpu_scheduler *modify;
+
+ usleep_range(200, 500);
+ cur_ent++;
+ cur_ent %= ARRAY_SIZE(entity);
+ cur_sched++;
+ cur_sched %= ARRAY_SIZE(sched);
+ modify = &sched[cur_sched]->base;
+ drm_sched_entity_modify_sched(&entity[cur_ent]->base, &modify,
+ 1);
+ } while (!drm_mock_sched_job_is_finished(job));
+
+ for (i = 0; i < ARRAY_SIZE(entity); i++)
+ drm_mock_sched_entity_free(entity[i]);
+
+ for (i = 0; i < ARRAY_SIZE(sched); i++)
+ drm_mock_sched_fini(sched[i]);
+}
+
+static struct kunit_case drm_sched_modify_sched_tests[] = {
+ KUNIT_CASE(drm_sched_test_modify_sched),
+ {}
+};
+
+static struct kunit_suite drm_sched_modify_sched = {
+ .name = "drm_sched_basic_modify_sched_tests",
+ .test_cases = drm_sched_modify_sched_tests,
+};
+
+static void drm_sched_test_credits(struct kunit *test)
+{
+ struct drm_mock_sched_entity *entity;
+ struct drm_mock_scheduler *sched;
+ struct drm_mock_sched_job *job[2];
+ bool done;
+ int i;
+
+ /*
+ * Check that the configured credit limit is respected.
+ */
+
+ sched = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT);
+ sched->base.credit_limit = 1;
+
+ entity = drm_mock_sched_entity_new(test,
+ DRM_SCHED_PRIORITY_NORMAL,
+ sched);
+
+ job[0] = drm_mock_sched_job_new(test, entity);
+ job[1] = drm_mock_sched_job_new(test, entity);
+
+ drm_mock_sched_job_submit(job[0]);
+ drm_mock_sched_job_submit(job[1]);
+
+ done = drm_mock_sched_job_wait_scheduled(job[0], HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ done = drm_mock_sched_job_wait_scheduled(job[1], HZ);
+ KUNIT_ASSERT_FALSE(test, done);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_scheduled(job[1], HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ i = drm_mock_sched_advance(sched, 1);
+ KUNIT_ASSERT_EQ(test, i, 1);
+
+ done = drm_mock_sched_job_wait_finished(job[1], HZ);
+ KUNIT_ASSERT_TRUE(test, done);
+
+ drm_mock_sched_entity_free(entity);
+ drm_mock_sched_fini(sched);
+}
+
+static struct kunit_case drm_sched_credits_tests[] = {
+ KUNIT_CASE(drm_sched_test_credits),
+ {}
+};
+
+static struct kunit_suite drm_sched_credits = {
+ .name = "drm_sched_basic_credits_tests",
+ .test_cases = drm_sched_credits_tests,
+};
+
+kunit_test_suites(&drm_sched_basic,
+ &drm_sched_timeout,
+ &drm_sched_priority,
+ &drm_sched_modify_sched,
+ &drm_sched_credits);
diff --git a/drivers/gpu/drm/sitronix/Kconfig b/drivers/gpu/drm/sitronix/Kconfig
new file mode 100644
index 000000000000..741d1bb4b83f
--- /dev/null
+++ b/drivers/gpu/drm/sitronix/Kconfig
@@ -0,0 +1,52 @@
+config DRM_ST7571_I2C
+ tristate "DRM support for Sitronix ST7571 display panels (I2C)"
+ depends on DRM && I2C && MMU
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ select VIDEOMODE_HELPERS
+ help
+ DRM driver for Sitronix ST7571 panels controlled over I2C.
+
+ if M is selected the module will be called st7571-i2c.
+
+config TINYDRM_ST7586
+ tristate
+ default n
+
+config DRM_ST7586
+ tristate "DRM support for Sitronix ST7586 display panels"
+ depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
+ select DRM_KMS_HELPER
+ select DRM_GEM_DMA_HELPER
+ select DRM_MIPI_DBI
+ default TINYDRM_ST7586
+ help
+ DRM driver for the following Sitronix ST7586 panels:
+ * LEGO MINDSTORMS EV3
+
+ If M is selected the module will be called st7586.
+
+config TINYDRM_ST7735R
+ tristate
+ default n
+
+config DRM_ST7735R
+ tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
+ depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
+ select DRM_KMS_HELPER
+ select DRM_GEM_DMA_HELPER
+ select DRM_MIPI_DBI
+ select BACKLIGHT_CLASS_DEVICE
+ default TINYDRM_ST7735R
+ help
+ DRM driver for Sitronix ST7715R/ST7735R with one of the following
+ LCDs:
+ * Jianda JD-T18003-T01 1.8" 128x160 TFT
+ * Okaya RH128128T 1.44" 128x128 TFT
+
+ If M is selected the module will be called st7735r.
+
diff --git a/drivers/gpu/drm/sitronix/Makefile b/drivers/gpu/drm/sitronix/Makefile
new file mode 100644
index 000000000000..bd139e5a6995
--- /dev/null
+++ b/drivers/gpu/drm/sitronix/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_DRM_ST7571_I2C) += st7571-i2c.o
+obj-$(CONFIG_DRM_ST7586) += st7586.o
+obj-$(CONFIG_DRM_ST7735R) += st7735r.o
diff --git a/drivers/gpu/drm/sitronix/st7571-i2c.c b/drivers/gpu/drm/sitronix/st7571-i2c.c
new file mode 100644
index 000000000000..eec846892962
--- /dev/null
+++ b/drivers/gpu/drm/sitronix/st7571-i2c.c
@@ -0,0 +1,1000 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Sitronix ST7571, a 4 level gray scale dot matrix LCD controller
+ *
+ * Copyright (C) 2025 Marcus Folkesson <marcus.folkesson@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_module.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+
+#define ST7571_COMMAND_MODE (0x00)
+#define ST7571_DATA_MODE (0x40)
+
+/* Normal mode command set */
+#define ST7571_DISPLAY_OFF (0xae)
+#define ST7571_DISPLAY_ON (0xaf)
+#define ST7571_OSC_ON (0xab)
+#define ST7571_SET_COLUMN_LSB(c) (0x00 | FIELD_PREP(GENMASK(3, 0), (c)))
+#define ST7571_SET_COLUMN_MSB(c) (0x10 | FIELD_PREP(GENMASK(2, 0), (c) >> 4))
+#define ST7571_SET_COM0_LSB(x) (FIELD_PREP(GENMASK(6, 0), (x)))
+#define ST7571_SET_COM0_MSB (0x44)
+#define ST7571_SET_COM_SCAN_DIR(d) (0xc0 | FIELD_PREP(GENMASK(3, 3), (d)))
+#define ST7571_SET_CONTRAST_LSB(c) (FIELD_PREP(GENMASK(5, 0), (c)))
+#define ST7571_SET_CONTRAST_MSB (0x81)
+#define ST7571_SET_DISPLAY_DUTY_LSB(d) (FIELD_PREP(GENMASK(7, 0), (d)))
+#define ST7571_SET_DISPLAY_DUTY_MSB (0x48)
+#define ST7571_SET_ENTIRE_DISPLAY_ON(p) (0xa4 | FIELD_PREP(GENMASK(0, 0), (p)))
+#define ST7571_SET_LCD_BIAS(b) (0x50 | FIELD_PREP(GENMASK(2, 0), (b)))
+#define ST7571_SET_MODE_LSB(m) (FIELD_PREP(GENMASK(7, 2), (m)))
+#define ST7571_SET_MODE_MSB (0x38)
+#define ST7571_SET_PAGE(p) (0xb0 | FIELD_PREP(GENMASK(3, 0), (p)))
+#define ST7571_SET_POWER(p) (0x28 | FIELD_PREP(GENMASK(2, 0), (p)))
+#define ST7571_SET_REGULATOR_REG(r) (0x20 | FIELD_PREP(GENMASK(2, 0), (r)))
+#define ST7571_SET_REVERSE(r) (0xa6 | FIELD_PREP(GENMASK(0, 0), (r)))
+#define ST7571_SET_SEG_SCAN_DIR(d) (0xa0 | FIELD_PREP(GENMASK(0, 0), (d)))
+#define ST7571_SET_START_LINE_LSB(l) (FIELD_PREP(GENMASK(6, 0), (l)))
+#define ST7571_SET_START_LINE_MSB (0x40)
+
+/* Extension command set 3 */
+#define ST7571_COMMAND_SET_3 (0x7b)
+#define ST7571_SET_COLOR_MODE(c) (0x10 | FIELD_PREP(GENMASK(0, 0), (c)))
+#define ST7571_COMMAND_SET_NORMAL (0x00)
+
+#define ST7571_PAGE_HEIGHT 8
+
+#define DRIVER_NAME "st7571"
+#define DRIVER_DESC "ST7571 DRM driver"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+enum st7571_color_mode {
+ ST7571_COLOR_MODE_GRAY = 0,
+ ST7571_COLOR_MODE_BLACKWHITE = 1,
+};
+
+struct st7571_device;
+
+struct st7571_panel_constraints {
+ u32 min_nlines;
+ u32 max_nlines;
+ u32 min_ncols;
+ u32 max_ncols;
+ bool support_grayscale;
+};
+
+struct st7571_panel_data {
+ int (*init)(struct st7571_device *st7571);
+ struct st7571_panel_constraints constraints;
+};
+
+struct st7571_panel_format {
+ void (*prepare_buffer)(struct st7571_device *st7571,
+ const struct iosys_map *vmap,
+ struct drm_framebuffer *fb,
+ struct drm_rect *rect,
+ struct drm_format_conv_state *fmtcnv_state);
+ int (*update_rect)(struct drm_framebuffer *fb, struct drm_rect *rect);
+ enum st7571_color_mode mode;
+ const u8 nformats;
+ const u32 formats[];
+};
+
+struct st7571_device {
+ struct drm_device dev;
+
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+
+ struct drm_display_mode mode;
+
+ const struct st7571_panel_format *pformat;
+ const struct st7571_panel_data *pdata;
+ struct i2c_client *client;
+ struct gpio_desc *reset;
+ struct regmap *regmap;
+
+ /*
+ * Depending on the hardware design, the acknowledge signal may be hard to
+ * recognize as a valid logic "0" level.
+ * Therefor, ignore NAK if possible to stay compatible with most hardware designs
+ * and off-the-shelf panels out there.
+ *
+ * From section 6.4 MICROPOCESSOR INTERFACE section in the datasheet:
+ *
+ * "By connecting SDA_OUT to SDA_IN externally, the SDA line becomes fully
+ * I2C interface compatible.
+ * Separating acknowledge-output from serial data
+ * input is advantageous for chip-on-glass (COG) applications. In COG
+ * applications, the ITO resistance and the pull-up resistor will form a
+ * voltage divider, which affects acknowledge-signal level. Larger ITO
+ * resistance will raise the acknowledged-signal level and system cannot
+ * recognize this level as a valid logic “0” level. By separating SDA_IN from
+ * SDA_OUT, the IC can be used in a mode that ignores the acknowledge-bit.
+ * For applications which check acknowledge-bit, it is necessary to minimize
+ * the ITO resistance of the SDA_OUT trace to guarantee a valid low level."
+ *
+ */
+ bool ignore_nak;
+
+ bool grayscale;
+ u32 height_mm;
+ u32 width_mm;
+ u32 startline;
+ u32 nlines;
+ u32 ncols;
+ u32 bpp;
+
+ /* Intermediate buffer in LCD friendly format */
+ u8 *hwbuf;
+
+ /* Row of (transformed) pixels ready to be written to the display */
+ u8 *row;
+};
+
+static inline struct st7571_device *drm_to_st7571(struct drm_device *dev)
+{
+ return container_of(dev, struct st7571_device, dev);
+}
+
+static int st7571_regmap_write(void *context, const void *data, size_t count)
+{
+ struct i2c_client *client = context;
+ struct st7571_device *st7571 = i2c_get_clientdata(client);
+ int ret;
+
+ struct i2c_msg msg = {
+ .addr = st7571->client->addr,
+ .flags = st7571->ignore_nak ? I2C_M_IGNORE_NAK : 0,
+ .len = count,
+ .buf = (u8 *)data
+ };
+
+ ret = i2c_transfer(st7571->client->adapter, &msg, 1);
+
+ /*
+ * Unfortunately, there is no way to check if the transfer failed because of
+ * a NAK or something else as I2C bus drivers use different return values for NAK.
+ *
+ * However, if the transfer fails and ignore_nak is set, we know it is an error.
+ */
+ if (ret < 0 && st7571->ignore_nak)
+ return ret;
+
+ return 0;
+}
+
+/* The st7571 driver does not read registers but regmap expects a .read */
+static int st7571_regmap_read(void *context, const void *reg_buf,
+ size_t reg_size, void *val_buf, size_t val_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static int st7571_send_command_list(struct st7571_device *st7571,
+ const u8 *cmd_list, size_t len)
+{
+ int ret;
+
+ for (int i = 0; i < len; i++) {
+ ret = regmap_write(st7571->regmap, ST7571_COMMAND_MODE, cmd_list[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static inline u8 st7571_transform_xy(const char *p, int x, int y)
+{
+ int xrest = x % 8;
+ u8 result = 0;
+
+ /*
+ * Transforms an (x, y) pixel coordinate into a vertical 8-bit
+ * column from the framebuffer. It calculates the corresponding byte in the
+ * framebuffer, extracts the bit at the given x position across 8 consecutive
+ * rows, and packs those bits into a single byte.
+ *
+ * Return an 8-bit value representing a vertical column of pixels.
+ */
+ x = x / 8;
+ y = (y / 8) * 8;
+
+ for (int i = 0; i < 8; i++) {
+ int row_idx = y + i;
+ u8 byte = p[row_idx * 16 + x];
+ u8 bit = (byte >> xrest) & 1;
+
+ result |= (bit << i);
+ }
+
+ return result;
+}
+
+static int st7571_set_position(struct st7571_device *st7571, int x, int y)
+{
+ u8 cmd_list[] = {
+ ST7571_SET_COLUMN_LSB(x),
+ ST7571_SET_COLUMN_MSB(x),
+ ST7571_SET_PAGE(y / ST7571_PAGE_HEIGHT),
+ };
+
+ return st7571_send_command_list(st7571, cmd_list, ARRAY_SIZE(cmd_list));
+}
+
+static int st7571_fb_clear_screen(struct st7571_device *st7571)
+{
+ u32 npixels = st7571->ncols * round_up(st7571->nlines, ST7571_PAGE_HEIGHT) * st7571->bpp;
+ char pixelvalue = 0x00;
+
+ for (int i = 0; i < npixels; i++)
+ regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, &pixelvalue, 1);
+
+ return 0;
+}
+
+static void st7571_prepare_buffer_monochrome(struct st7571_device *st7571,
+ const struct iosys_map *vmap,
+ struct drm_framebuffer *fb,
+ struct drm_rect *rect,
+ struct drm_format_conv_state *fmtcnv_state)
+{
+ unsigned int dst_pitch;
+ struct iosys_map dst;
+ u32 size;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB8888:
+ dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
+ iosys_map_set_vaddr(&dst, st7571->hwbuf);
+
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
+ break;
+
+ case DRM_FORMAT_R1:
+ size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 8;
+ memcpy(st7571->hwbuf, vmap->vaddr, size);
+ break;
+ }
+}
+
+static void st7571_prepare_buffer_grayscale(struct st7571_device *st7571,
+ const struct iosys_map *vmap,
+ struct drm_framebuffer *fb,
+ struct drm_rect *rect,
+ struct drm_format_conv_state *fmtcnv_state)
+{
+ u32 size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 8;
+ unsigned int dst_pitch;
+ struct iosys_map dst;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB8888: /* Only support XRGB8888 in monochrome mode */
+ dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
+ iosys_map_set_vaddr(&dst, st7571->hwbuf);
+
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
+ break;
+
+ case DRM_FORMAT_R1:
+ size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 8;
+ memcpy(st7571->hwbuf, vmap->vaddr, size);
+ break;
+
+ case DRM_FORMAT_R2:
+ size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 4;
+ memcpy(st7571->hwbuf, vmap->vaddr, size);
+ break;
+ };
+}
+
+static int st7571_fb_update_rect_monochrome(struct drm_framebuffer *fb, struct drm_rect *rect)
+{
+ struct st7571_device *st7571 = drm_to_st7571(fb->dev);
+ char *row = st7571->row;
+
+ /* Align y to display page boundaries */
+ rect->y1 = round_down(rect->y1, ST7571_PAGE_HEIGHT);
+ rect->y2 = min_t(unsigned int, round_up(rect->y2, ST7571_PAGE_HEIGHT), st7571->nlines);
+
+ for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) {
+ for (int x = rect->x1; x < rect->x2; x++)
+ row[x] = st7571_transform_xy(st7571->hwbuf, x, y);
+
+ st7571_set_position(st7571, rect->x1, y);
+
+ /* TODO: Investige why we can't write multiple bytes at once */
+ for (int x = rect->x1; x < rect->x2; x++)
+ regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1);
+ }
+
+ return 0;
+}
+
+static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct drm_rect *rect)
+{
+ struct st7571_device *st7571 = drm_to_st7571(fb->dev);
+ u32 format = fb->format->format;
+ char *row = st7571->row;
+ int x1;
+ int x2;
+
+ /* Align y to display page boundaries */
+ rect->y1 = round_down(rect->y1, ST7571_PAGE_HEIGHT);
+ rect->y2 = min_t(unsigned int, round_up(rect->y2, ST7571_PAGE_HEIGHT), st7571->nlines);
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ /* Threated as monochrome (R1) */
+ fallthrough;
+ case DRM_FORMAT_R1:
+ x1 = rect->x1;
+ x2 = rect->x2;
+ break;
+ case DRM_FORMAT_R2:
+ x1 = rect->x1 * 2;
+ x2 = rect->x2 * 2;
+ break;
+ }
+
+ for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) {
+ for (int x = x1; x < x2; x++)
+ row[x] = st7571_transform_xy(st7571->hwbuf, x, y);
+
+ st7571_set_position(st7571, rect->x1, y);
+
+ /* TODO: Investige why we can't write multiple bytes at once */
+ for (int x = x1; x < x2; x++) {
+ regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1);
+
+ /*
+ * As the display supports grayscale, all pixels must be written as two bits
+ * even if the format is monochrome.
+ *
+ * The bit values maps to the following grayscale:
+ * 0 0 = White
+ * 0 1 = Light gray
+ * 1 0 = Dark gray
+ * 1 1 = Black
+ *
+ * For monochrome formats, write the same value twice to get
+ * either a black or white pixel.
+ */
+ if (format == DRM_FORMAT_R1 || format == DRM_FORMAT_XRGB8888)
+ regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1);
+ }
+ }
+
+ return 0;
+}
+
+static int st7571_connector_get_modes(struct drm_connector *conn)
+{
+ struct st7571_device *st7571 = drm_to_st7571(conn->dev);
+
+ return drm_connector_helper_get_modes_fixed(conn, &st7571->mode);
+}
+
+static const struct drm_connector_helper_funcs st7571_connector_helper_funcs = {
+ .get_modes = st7571_connector_get_modes,
+};
+
+static const struct st7571_panel_format st7571_monochrome = {
+ .prepare_buffer = st7571_prepare_buffer_monochrome,
+ .update_rect = st7571_fb_update_rect_monochrome,
+ .mode = ST7571_COLOR_MODE_BLACKWHITE,
+ .formats = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_R1,
+ },
+ .nformats = 2,
+};
+
+static const struct st7571_panel_format st7571_grayscale = {
+ .prepare_buffer = st7571_prepare_buffer_grayscale,
+ .update_rect = st7571_fb_update_rect_grayscale,
+ .mode = ST7571_COLOR_MODE_GRAY,
+ .formats = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_R1,
+ DRM_FORMAT_R2,
+ },
+ .nformats = 3,
+};
+
+static const u64 st7571_primary_plane_fmtmods[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static int st7571_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+}
+
+static void st7571_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_device *dev = plane->dev;
+ struct drm_rect damage;
+ struct st7571_device *st7571 = drm_to_st7571(plane->dev);
+ int ret, idx;
+
+ if (!fb)
+ return; /* no framebuffer; plane is disabled */
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ return;
+
+ if (!drm_dev_enter(dev, &idx))
+ goto out_drm_gem_fb_end_cpu_access;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ st7571->pformat->prepare_buffer(st7571,
+ &shadow_plane_state->data[0],
+ fb, &damage,
+ &shadow_plane_state->fmtcnv_state);
+
+ st7571->pformat->update_rect(fb, &damage);
+ }
+
+ drm_dev_exit(idx);
+
+out_drm_gem_fb_end_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+}
+
+static void st7571_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct st7571_device *st7571 = drm_to_st7571(plane->dev);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
+ st7571_fb_clear_screen(st7571);
+ drm_dev_exit(idx);
+}
+
+static const struct drm_plane_helper_funcs st7571_primary_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = st7571_primary_plane_helper_atomic_check,
+ .atomic_update = st7571_primary_plane_helper_atomic_update,
+ .atomic_disable = st7571_primary_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs st7571_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+};
+
+/*
+ * CRTC
+ */
+
+static enum drm_mode_status st7571_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct st7571_device *st7571 = drm_to_st7571(crtc->dev);
+
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &st7571->mode);
+}
+
+static const struct drm_crtc_helper_funcs st7571_crtc_helper_funcs = {
+ .atomic_check = drm_crtc_helper_atomic_check,
+ .mode_valid = st7571_crtc_mode_valid,
+};
+
+static const struct drm_crtc_funcs st7571_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+/*
+ * Encoder
+ */
+
+static void ssd130x_encoder_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = encoder->dev;
+ struct st7571_device *st7571 = drm_to_st7571(drm);
+ u8 command = ST7571_DISPLAY_ON;
+ int ret;
+
+ ret = st7571->pdata->init(st7571);
+ if (ret)
+ return;
+
+ st7571_send_command_list(st7571, &command, 1);
+}
+
+static void ssd130x_encoder_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = encoder->dev;
+ struct st7571_device *st7571 = drm_to_st7571(drm);
+ u8 command = ST7571_DISPLAY_OFF;
+
+ st7571_send_command_list(st7571, &command, 1);
+}
+
+static const struct drm_encoder_funcs st7571_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+
+};
+
+static const struct drm_encoder_helper_funcs st7571_encoder_helper_funcs = {
+ .atomic_enable = ssd130x_encoder_atomic_enable,
+ .atomic_disable = ssd130x_encoder_atomic_disable,
+};
+
+/*
+ * Connector
+ */
+
+static const struct drm_connector_funcs st7571_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_mode_config_funcs st7571_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static struct drm_display_mode st7571_mode(struct st7571_device *st7571)
+{
+ struct drm_display_mode mode = {
+ DRM_SIMPLE_MODE(st7571->ncols, st7571->nlines,
+ st7571->width_mm, st7571->height_mm),
+ };
+
+ return mode;
+}
+
+static int st7571_mode_config_init(struct st7571_device *st7571)
+{
+ struct drm_device *dev = &st7571->dev;
+ const struct st7571_panel_constraints *constraints = &st7571->pdata->constraints;
+ int ret;
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
+
+ dev->mode_config.min_width = constraints->min_ncols;
+ dev->mode_config.min_height = constraints->min_nlines;
+ dev->mode_config.max_width = constraints->max_ncols;
+ dev->mode_config.max_height = constraints->max_nlines;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.funcs = &st7571_mode_config_funcs;
+
+ return 0;
+}
+
+static int st7571_plane_init(struct st7571_device *st7571,
+ const struct st7571_panel_format *pformat)
+{
+ struct drm_plane *primary_plane = &st7571->primary_plane;
+ struct drm_device *dev = &st7571->dev;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &st7571_primary_plane_funcs,
+ pformat->formats,
+ pformat->nformats,
+ st7571_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(primary_plane, &st7571_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ return 0;
+}
+
+static int st7571_crtc_init(struct st7571_device *st7571)
+{
+ struct drm_plane *primary_plane = &st7571->primary_plane;
+ struct drm_crtc *crtc = &st7571->crtc;
+ struct drm_device *dev = &st7571->dev;
+ int ret;
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &st7571_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(crtc, &st7571_crtc_helper_funcs);
+
+ return 0;
+}
+
+static int st7571_encoder_init(struct st7571_device *st7571)
+{
+ struct drm_encoder *encoder = &st7571->encoder;
+ struct drm_crtc *crtc = &st7571->crtc;
+ struct drm_device *dev = &st7571->dev;
+ int ret;
+
+ ret = drm_encoder_init(dev, encoder, &st7571_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
+
+ drm_encoder_helper_add(encoder, &st7571_encoder_helper_funcs);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ return 0;
+}
+
+static int st7571_connector_init(struct st7571_device *st7571)
+{
+ struct drm_connector *connector = &st7571->connector;
+ struct drm_encoder *encoder = &st7571->encoder;
+ struct drm_device *dev = &st7571->dev;
+ int ret;
+
+ ret = drm_connector_init(dev, connector, &st7571_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(connector, &st7571_connector_helper_funcs);
+
+ return drm_connector_attach_encoder(connector, encoder);
+}
+
+DEFINE_DRM_GEM_FOPS(st7571_fops);
+
+static const struct drm_driver st7571_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+
+ .fops = &st7571_fops,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+};
+
+static const struct regmap_bus st7571_regmap_bus = {
+ .read = st7571_regmap_read,
+ .write = st7571_regmap_write,
+};
+
+static const struct regmap_config st7571_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .use_single_write = true,
+};
+
+static int st7571_validate_parameters(struct st7571_device *st7571)
+{
+ struct device *dev = st7571->dev.dev;
+ const struct st7571_panel_constraints *constraints = &st7571->pdata->constraints;
+
+ if (st7571->width_mm == 0) {
+ dev_err(dev, "Invalid panel width\n");
+ return -EINVAL;
+ }
+
+ if (st7571->height_mm == 0) {
+ dev_err(dev, "Invalid panel height\n");
+ return -EINVAL;
+ }
+
+ if (st7571->nlines < constraints->min_nlines ||
+ st7571->nlines > constraints->max_nlines) {
+ dev_err(dev, "Invalid timing configuration.\n");
+ return -EINVAL;
+ }
+
+ if (st7571->startline + st7571->nlines > constraints->max_nlines) {
+ dev_err(dev, "Invalid timing configuration.\n");
+ return -EINVAL;
+ }
+
+ if (st7571->ncols < constraints->min_ncols ||
+ st7571->ncols > constraints->max_ncols) {
+ dev_err(dev, "Invalid timing configuration.\n");
+ return -EINVAL;
+ }
+
+ if (st7571->grayscale && !constraints->support_grayscale) {
+ dev_err(dev, "Grayscale not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int st7571_parse_dt(struct st7571_device *st7571)
+{
+ struct device *dev = &st7571->client->dev;
+ struct device_node *np = dev->of_node;
+ struct display_timing dt;
+ int ret;
+
+ ret = of_get_display_timing(np, "panel-timing", &dt);
+ if (ret) {
+ dev_err(dev, "Failed to get display timing from DT\n");
+ return ret;
+ }
+
+ of_property_read_u32(np, "width-mm", &st7571->width_mm);
+ of_property_read_u32(np, "height-mm", &st7571->height_mm);
+ st7571->grayscale = of_property_read_bool(np, "sitronix,grayscale");
+
+ if (st7571->grayscale) {
+ st7571->pformat = &st7571_grayscale;
+ st7571->bpp = 2;
+ } else {
+ st7571->pformat = &st7571_monochrome;
+ st7571->bpp = 1;
+ }
+
+ st7571->startline = dt.vfront_porch.typ;
+ st7571->nlines = dt.vactive.typ;
+ st7571->ncols = dt.hactive.typ;
+
+ st7571->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(st7571->reset))
+ return PTR_ERR(st7571->reset);
+
+ return 0;
+}
+
+static void st7571_reset(struct st7571_device *st7571)
+{
+ gpiod_set_value_cansleep(st7571->reset, 1);
+ fsleep(20);
+ gpiod_set_value_cansleep(st7571->reset, 0);
+}
+
+static int st7571_lcd_init(struct st7571_device *st7571)
+{
+ /*
+ * Most of the initialization sequence is taken directly from the
+ * referential initial code in the ST7571 datasheet.
+ */
+ u8 commands[] = {
+ ST7571_DISPLAY_OFF,
+
+ ST7571_SET_MODE_MSB,
+ ST7571_SET_MODE_LSB(0x2e),
+
+ ST7571_SET_SEG_SCAN_DIR(0),
+ ST7571_SET_COM_SCAN_DIR(1),
+
+ ST7571_SET_COM0_MSB,
+ ST7571_SET_COM0_LSB(0x00),
+
+ ST7571_SET_START_LINE_MSB,
+ ST7571_SET_START_LINE_LSB(st7571->startline),
+
+ ST7571_OSC_ON,
+ ST7571_SET_REGULATOR_REG(5),
+ ST7571_SET_CONTRAST_MSB,
+ ST7571_SET_CONTRAST_LSB(0x33),
+ ST7571_SET_LCD_BIAS(0x04),
+ ST7571_SET_DISPLAY_DUTY_MSB,
+ ST7571_SET_DISPLAY_DUTY_LSB(st7571->nlines),
+
+ ST7571_SET_POWER(0x4), /* Power Control, VC: ON, VR: OFF, VF: OFF */
+ ST7571_SET_POWER(0x6), /* Power Control, VC: ON, VR: ON, VF: OFF */
+ ST7571_SET_POWER(0x7), /* Power Control, VC: ON, VR: ON, VF: ON */
+
+ ST7571_COMMAND_SET_3,
+ ST7571_SET_COLOR_MODE(st7571->pformat->mode),
+ ST7571_COMMAND_SET_NORMAL,
+
+ ST7571_SET_REVERSE(0),
+ ST7571_SET_ENTIRE_DISPLAY_ON(0),
+ };
+
+ /* Perform a reset before initializing the controller */
+ st7571_reset(st7571);
+
+ return st7571_send_command_list(st7571, commands, ARRAY_SIZE(commands));
+}
+
+static int st7571_probe(struct i2c_client *client)
+{
+ struct st7571_device *st7571;
+ struct drm_device *dev;
+ int ret;
+
+ st7571 = devm_drm_dev_alloc(&client->dev, &st7571_driver,
+ struct st7571_device, dev);
+ if (IS_ERR(st7571))
+ return PTR_ERR(st7571);
+
+ dev = &st7571->dev;
+ st7571->client = client;
+ i2c_set_clientdata(client, st7571);
+ st7571->pdata = device_get_match_data(&client->dev);
+
+ ret = st7571_parse_dt(st7571);
+ if (ret)
+ return ret;
+
+ ret = st7571_validate_parameters(st7571);
+ if (ret)
+ return ret;
+
+ st7571->mode = st7571_mode(st7571);
+
+ /*
+ * The hardware design could make it hard to detect a NAK on the I2C bus.
+ * If the adapter does not support protocol mangling do
+ * not set the I2C_M_IGNORE_NAK flag at the expense * of possible
+ * cruft in the logs.
+ */
+ if (i2c_check_functionality(client->adapter, I2C_FUNC_PROTOCOL_MANGLING))
+ st7571->ignore_nak = true;
+
+ st7571->regmap = devm_regmap_init(&client->dev, &st7571_regmap_bus,
+ client, &st7571_regmap_config);
+ if (IS_ERR(st7571->regmap)) {
+ return dev_err_probe(&client->dev, PTR_ERR(st7571->regmap),
+ "Failed to initialize regmap\n");
+ }
+
+ st7571->hwbuf = devm_kzalloc(&client->dev,
+ (st7571->nlines * st7571->ncols * st7571->bpp) / 8,
+ GFP_KERNEL);
+ if (!st7571->hwbuf)
+ return -ENOMEM;
+
+ st7571->row = devm_kzalloc(&client->dev,
+ (st7571->ncols * st7571->bpp),
+ GFP_KERNEL);
+ if (!st7571->row)
+ return -ENOMEM;
+
+ ret = st7571_mode_config_init(st7571);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize mode config\n");
+
+ ret = st7571_plane_init(st7571, st7571->pformat);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize primary plane\n");
+
+ ret = st7571_crtc_init(st7571);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize CRTC\n");
+
+ ret = st7571_encoder_init(st7571);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize encoder\n");
+
+ ret = st7571_connector_init(st7571);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to initialize connector\n");
+
+ drm_mode_config_reset(dev);
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to register DRM device\n");
+
+ drm_client_setup(dev, NULL);
+ return 0;
+}
+
+static void st7571_remove(struct i2c_client *client)
+{
+ struct st7571_device *st7571 = i2c_get_clientdata(client);
+
+ drm_dev_unplug(&st7571->dev);
+}
+
+struct st7571_panel_data st7571_config = {
+ .init = st7571_lcd_init,
+ .constraints = {
+ .min_nlines = 1,
+ .max_nlines = 128,
+ .min_ncols = 128,
+ .max_ncols = 128,
+ .support_grayscale = true,
+ },
+};
+
+static const struct of_device_id st7571_of_match[] = {
+ { .compatible = "sitronix,st7571", .data = &st7571_config },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st7571_of_match);
+
+static const struct i2c_device_id st7571_id[] = {
+ { "st7571", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, st7571_id);
+
+static struct i2c_driver st7571_i2c_driver = {
+ .driver = {
+ .name = "st7571",
+ .of_match_table = st7571_of_match,
+ },
+ .probe = st7571_probe,
+ .remove = st7571_remove,
+ .id_table = st7571_id,
+};
+
+module_i2c_driver(st7571_i2c_driver);
+
+MODULE_AUTHOR("Marcus Folkesson <marcus.folkesson@gmail.com>");
+MODULE_DESCRIPTION("DRM Driver for Sitronix ST7571 LCD controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/sitronix/st7586.c
index a29672d84ede..a29672d84ede 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/sitronix/st7586.c
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/sitronix/st7735r.c
index 1d60f6e5b3bc..1d60f6e5b3bc 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/sitronix/st7735r.c
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index dd2006d51c7a..eec43d1a5595 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -974,7 +974,7 @@ static void ssd130x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
static void ssd132x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
{
- unsigned int columns = DIV_ROUND_UP(ssd130x->height, SSD132X_SEGMENT_WIDTH);
+ unsigned int columns = DIV_ROUND_UP(ssd130x->width, SSD132X_SEGMENT_WIDTH);
unsigned int height = ssd130x->height;
memset(data_array, 0, columns * height);
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
index cb2816985305..a3447622a33c 100644
--- a/drivers/gpu/drm/sprd/sprd_dpu.c
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -784,19 +784,12 @@ static int sprd_dpu_context_init(struct sprd_dpu *dpu,
{
struct platform_device *pdev = to_platform_device(dev);
struct dpu_context *ctx = &dpu->ctx;
- struct resource *res;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get I/O resource\n");
- return -EINVAL;
- }
-
- ctx->base = devm_ioremap(dev, res->start, resource_size(res));
- if (!ctx->base) {
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->base)) {
dev_err(dev, "failed to map dpu registers\n");
- return -EFAULT;
+ return PTR_ERR(ctx->base);
}
ctx->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c
index 8fc26479bb6b..23b0e1dc547a 100644
--- a/drivers/gpu/drm/sprd/sprd_dsi.c
+++ b/drivers/gpu/drm/sprd/sprd_dsi.c
@@ -901,18 +901,11 @@ static int sprd_dsi_context_init(struct sprd_dsi *dsi,
{
struct platform_device *pdev = to_platform_device(dev);
struct dsi_context *ctx = &dsi->ctx;
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get I/O resource\n");
- return -EINVAL;
- }
-
- ctx->base = devm_ioremap(dev, res->start, resource_size(res));
- if (!ctx->base) {
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->base)) {
drm_err(dsi->drm, "failed to map dsi host registers\n");
- return -ENXIO;
+ return PTR_ERR(ctx->base);
}
ctx->regmap = devm_regmap_init(dev, &regmap_tst_io, dsi, &byte_config);
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 063f82d23d80..8c529b0cca8b 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -177,7 +177,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct device_node *vtg_np;
struct sti_compositor *compo;
- struct resource *res;
unsigned int i;
compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
@@ -194,17 +193,10 @@ static int sti_compositor_probe(struct platform_device *pdev)
memcpy(&compo->data, of_match_node(compositor_of_match, np)->data,
sizeof(struct sti_compositor_data));
-
- /* Get Memory ressources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- DRM_ERROR("Get memory resource failed\n");
- return -ENXIO;
- }
- compo->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (compo->regs == NULL) {
+ compo->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(compo->regs)) {
DRM_ERROR("Register mapping failed\n");
- return -ENXIO;
+ return PTR_ERR(compo->regs);
}
/* Get clock resources */
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 4dcddd02629b..74a1eef4674e 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -511,7 +511,6 @@ static int sti_dvo_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sti_dvo *dvo;
- struct resource *res;
struct device_node *np = dev->of_node;
DRM_INFO("%s\n", __func__);
@@ -523,16 +522,9 @@ static int sti_dvo_probe(struct platform_device *pdev)
}
dvo->dev = pdev->dev;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvo-reg");
- if (!res) {
- DRM_ERROR("Invalid dvo resource\n");
- return -ENOMEM;
- }
- dvo->regs = devm_ioremap(dev, res->start,
- resource_size(res));
- if (!dvo->regs)
- return -ENOMEM;
+ dvo->regs = devm_platform_ioremap_resource_byname(pdev, "dvo-reg");
+ if (IS_ERR(dvo->regs))
+ return PTR_ERR(dvo->regs);
dvo->clk_pix = devm_clk_get(dev, "dvo_pix");
if (IS_ERR(dvo->clk_pix)) {
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 14fdc00d2ba0..d202b6c1eb8f 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -693,7 +693,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
connector->hda = hda;
- bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
+ bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return -ENOMEM;
@@ -750,16 +750,9 @@ static int sti_hda_probe(struct platform_device *pdev)
return -ENOMEM;
hda->dev = pdev->dev;
-
- /* Get resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hda-reg");
- if (!res) {
- DRM_ERROR("Invalid hda resource\n");
- return -ENOMEM;
- }
- hda->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!hda->regs)
- return -ENOMEM;
+ hda->regs = devm_platform_ioremap_resource_byname(pdev, "hda-reg");
+ if (IS_ERR(hda->regs))
+ return PTR_ERR(hda->regs);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"video-dacs-ctrl");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 164a34d793d8..37b8d619066e 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1380,7 +1380,6 @@ static int sti_hdmi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sti_hdmi *hdmi;
struct device_node *np = dev->of_node;
- struct resource *res;
struct device_node *ddc;
int ret;
@@ -1399,17 +1398,9 @@ static int sti_hdmi_probe(struct platform_device *pdev)
}
hdmi->dev = pdev->dev;
-
- /* Get resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi-reg");
- if (!res) {
- DRM_ERROR("Invalid hdmi resource\n");
- ret = -ENOMEM;
- goto release_adapter;
- }
- hdmi->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!hdmi->regs) {
- ret = -ENOMEM;
+ hdmi->regs = devm_platform_ioremap_resource_byname(pdev, "hdmi-reg");
+ if (IS_ERR(hdmi->regs)) {
+ ret = PTR_ERR(hdmi->regs);
goto release_adapter;
}
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 0f658709c9d0..03684062309b 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1356,7 +1356,6 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *vtg_np;
struct sti_hqvdp *hqvdp;
- struct resource *res;
DRM_DEBUG_DRIVER("\n");
@@ -1367,17 +1366,10 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
}
hqvdp->dev = dev;
-
- /* Get Memory resources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("Get memory resource failed\n");
- return -ENXIO;
- }
- hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!hqvdp->regs) {
+ hqvdp->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hqvdp->regs)) {
DRM_ERROR("Register mapping failed\n");
- return -ENXIO;
+ return PTR_ERR(hqvdp->regs);
}
/* Get clock resources */
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index af6c06f448c4..6a464b035de8 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -838,7 +838,6 @@ static int sti_tvout_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct sti_tvout *tvout;
- struct resource *res;
DRM_INFO("%s\n", __func__);
@@ -850,16 +849,9 @@ static int sti_tvout_probe(struct platform_device *pdev)
return -ENOMEM;
tvout->dev = dev;
-
- /* get memory resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tvout-reg");
- if (!res) {
- DRM_ERROR("Invalid glue resource\n");
- return -ENOMEM;
- }
- tvout->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!tvout->regs)
- return -ENOMEM;
+ tvout->regs = devm_platform_ioremap_resource_byname(pdev, "tvout-reg");
+ if (IS_ERR(tvout->regs))
+ return PTR_ERR(tvout->regs);
/* get reset resources */
tvout->reset = devm_reset_control_get(dev, "tvout");
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 5ba469b711b5..ee81691b3203 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -380,23 +380,15 @@ static int vtg_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sti_vtg *vtg;
- struct resource *res;
int ret;
vtg = devm_kzalloc(dev, sizeof(*vtg), GFP_KERNEL);
if (!vtg)
return -ENOMEM;
-
- /* Get Memory ressources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("Get memory resource failed\n");
- return -ENOMEM;
- }
- vtg->regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!vtg->regs) {
+ vtg->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vtg->regs)) {
DRM_ERROR("failed to remap I/O memory\n");
- return -ENOMEM;
+ return PTR_ERR(vtg->regs);
}
vtg->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index 4613e8e3b8fd..a3ae9a93ce66 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -934,28 +934,27 @@ static const struct drm_connector_funcs lvds_conn_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int lvds_attach(struct drm_bridge *bridge,
+static int lvds_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct stm_lvds *lvds = bridge_to_stm_lvds(bridge);
struct drm_connector *connector = &lvds->connector;
- struct drm_encoder *encoder = bridge->encoder;
int ret;
- if (!bridge->encoder) {
+ if (!encoder) {
drm_err(bridge->dev, "Parent encoder object not found\n");
return -ENODEV;
}
/* Set the encoder type as caller does not know it */
- bridge->encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
/* No cloning support */
- bridge->encoder->possible_clones = 0;
+ encoder->possible_clones = 0;
/* If we have a next bridge just attach it. */
if (lvds->next_bridge)
- return drm_bridge_attach(bridge->encoder, lvds->next_bridge,
+ return drm_bridge_attach(encoder, lvds->next_bridge,
bridge, flags);
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
diff --git a/drivers/gpu/drm/sysfb/Kconfig b/drivers/gpu/drm/sysfb/Kconfig
new file mode 100644
index 000000000000..9c9884c7efc6
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/Kconfig
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+menu "Drivers for system framebuffers"
+ depends on DRM
+
+config DRM_SYSFB_HELPER
+ tristate
+ depends on DRM
+
+config DRM_EFIDRM
+ tristate "EFI framebuffer driver"
+ depends on DRM && MMU && EFI && (!SYSFB_SIMPLEFB || COMPILE_TEST)
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ select SYSFB
+ help
+ DRM driver for EFI framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the firmware or bootloader before the kernel boots. Scanout
+ buffer, size, and display format must be provided via EFI interfaces.
+
+config DRM_OFDRM
+ tristate "Open Firmware display driver"
+ depends on DRM && MMU && OF && (PPC || COMPILE_TEST)
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ help
+ DRM driver for Open Firmware framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the Open Firmware before the kernel boots. Scanout buffer, size,
+ and display format must be provided via device tree.
+
+config DRM_SIMPLEDRM
+ tristate "Simple framebuffer driver"
+ depends on DRM && MMU
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ help
+ DRM driver for simple platform-provided framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the firmware or bootloader before the kernel boots. Scanout
+ buffer, size, and display format must be provided via device tree,
+ UEFI, VESA, etc.
+
+ On x86 BIOS or UEFI systems, you should also select SYSFB_SIMPLEFB
+ to use UEFI and VESA framebuffers.
+
+config DRM_VESADRM
+ tristate "VESA framebuffer driver"
+ depends on DRM && MMU && X86 && (!SYSFB_SIMPLEFB || COMPILE_TEST)
+ select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SYSFB_HELPER
+ select SYSFB
+ help
+ DRM driver for VESA framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the firmware or bootloader before the kernel boots. Scanout
+ buffer, size, and display format must be provided via VBE interfaces.
+
+endmenu
diff --git a/drivers/gpu/drm/sysfb/Makefile b/drivers/gpu/drm/sysfb/Makefile
new file mode 100644
index 000000000000..a156c496413d
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+drm_sysfb_helper-y := \
+ drm_sysfb.o \
+ drm_sysfb_modeset.o
+drm_sysfb_helper-$(CONFIG_SCREEN_INFO) += drm_sysfb_screen_info.o
+obj-$(CONFIG_DRM_SYSFB_HELPER) += drm_sysfb_helper.o
+
+obj-$(CONFIG_DRM_EFIDRM) += efidrm.o
+obj-$(CONFIG_DRM_OFDRM) += ofdrm.o
+obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o
+obj-$(CONFIG_DRM_VESADRM) += vesadrm.o
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb.c b/drivers/gpu/drm/sysfb/drm_sysfb.c
new file mode 100644
index 000000000000..308f82153b15
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/export.h>
+#include <linux/limits.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+
+#include <drm/drm_print.h>
+
+#include "drm_sysfb_helper.h"
+
+int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
+ u64 value, u32 max)
+{
+ if (value > min(max, INT_MAX)) {
+ drm_warn(dev, "%s of %llu exceeds maximum of %u\n", name, value, max);
+ return -EINVAL;
+ }
+ return value;
+}
+EXPORT_SYMBOL(drm_sysfb_get_validated_int);
+
+int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
+ u64 value, u32 max)
+{
+ if (!value) {
+ drm_warn(dev, "%s of 0 not allowed\n", name);
+ return -EINVAL;
+ }
+ return drm_sysfb_get_validated_int(dev, name, value, max);
+}
+EXPORT_SYMBOL(drm_sysfb_get_validated_int0);
+
+MODULE_DESCRIPTION("Helpers for DRM sysfb drivers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
new file mode 100644
index 000000000000..cb08a88242cc
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef DRM_SYSFB_HELPER_H
+#define DRM_SYSFB_HELPER_H
+
+#include <linux/container_of.h>
+#include <linux/iosys-map.h>
+
+#include <video/pixel_format.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_modes.h>
+
+struct drm_format_info;
+struct drm_scanout_buffer;
+struct screen_info;
+
+/*
+ * Input parsing
+ */
+
+struct drm_sysfb_format {
+ struct pixel_format pixel;
+ u32 fourcc;
+};
+
+int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
+ u64 value, u32 max);
+int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
+ u64 value, u32 max);
+
+#if defined(CONFIG_SCREEN_INFO)
+int drm_sysfb_get_width_si(struct drm_device *dev, const struct screen_info *si);
+int drm_sysfb_get_height_si(struct drm_device *dev, const struct screen_info *si);
+struct resource *drm_sysfb_get_memory_si(struct drm_device *dev,
+ const struct screen_info *si,
+ struct resource *res);
+int drm_sysfb_get_stride_si(struct drm_device *dev, const struct screen_info *si,
+ const struct drm_format_info *format,
+ unsigned int width, unsigned int height, u64 size);
+u64 drm_sysfb_get_visible_size_si(struct drm_device *dev, const struct screen_info *si,
+ unsigned int height, unsigned int stride, u64 size);
+const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev,
+ const struct drm_sysfb_format *formats,
+ size_t nformats,
+ const struct screen_info *si);
+#endif
+
+/*
+ * Input parsing
+ */
+
+int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
+ u64 value, u32 max);
+int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
+ u64 value, u32 max);
+
+/*
+ * Display modes
+ */
+
+struct drm_display_mode drm_sysfb_mode(unsigned int width,
+ unsigned int height,
+ unsigned int width_mm,
+ unsigned int height_mm);
+
+/*
+ * Device
+ */
+
+struct drm_sysfb_device {
+ struct drm_device dev;
+
+ const u8 *edid; /* can be NULL */
+
+ /* hardware settings */
+ struct drm_display_mode fb_mode;
+ const struct drm_format_info *fb_format;
+ unsigned int fb_pitch;
+ unsigned int fb_gamma_lut_size;
+
+ /* hardware-framebuffer kernel address */
+ struct iosys_map fb_addr;
+};
+
+static inline struct drm_sysfb_device *to_drm_sysfb_device(struct drm_device *dev)
+{
+ return container_of(dev, struct drm_sysfb_device, dev);
+}
+
+/*
+ * Plane
+ */
+
+int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state);
+void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void drm_sysfb_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb);
+
+#define DRM_SYSFB_PLANE_NFORMATS(_num_native) \
+ ((_num_native) + 1)
+
+#define DRM_SYSFB_PLANE_FORMAT_MODIFIERS \
+ DRM_FORMAT_MOD_LINEAR, \
+ DRM_FORMAT_MOD_INVALID
+
+#define DRM_SYSFB_PLANE_HELPER_FUNCS \
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \
+ .atomic_check = drm_sysfb_plane_helper_atomic_check, \
+ .atomic_update = drm_sysfb_plane_helper_atomic_update, \
+ .atomic_disable = drm_sysfb_plane_helper_atomic_disable, \
+ .get_scanout_buffer = drm_sysfb_plane_helper_get_scanout_buffer
+
+#define DRM_SYSFB_PLANE_FUNCS \
+ .update_plane = drm_atomic_helper_update_plane, \
+ .disable_plane = drm_atomic_helper_disable_plane, \
+ DRM_GEM_SHADOW_PLANE_FUNCS
+
+/*
+ * CRTC
+ */
+
+struct drm_sysfb_crtc_state {
+ struct drm_crtc_state base;
+
+ /* Primary-plane format; required for color mgmt. */
+ const struct drm_format_info *format;
+};
+
+static inline struct drm_sysfb_crtc_state *
+to_drm_sysfb_crtc_state(struct drm_crtc_state *base)
+{
+ return container_of(base, struct drm_sysfb_crtc_state, base);
+}
+
+enum drm_mode_status drm_sysfb_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+int drm_sysfb_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state);
+
+#define DRM_SYSFB_CRTC_HELPER_FUNCS \
+ .mode_valid = drm_sysfb_crtc_helper_mode_valid, \
+ .atomic_check = drm_sysfb_crtc_helper_atomic_check
+
+void drm_sysfb_crtc_reset(struct drm_crtc *crtc);
+struct drm_crtc_state *drm_sysfb_crtc_atomic_duplicate_state(struct drm_crtc *crtc);
+void drm_sysfb_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state);
+
+#define DRM_SYSFB_CRTC_FUNCS \
+ .reset = drm_sysfb_crtc_reset, \
+ .set_config = drm_atomic_helper_set_config, \
+ .page_flip = drm_atomic_helper_page_flip, \
+ .atomic_duplicate_state = drm_sysfb_crtc_atomic_duplicate_state, \
+ .atomic_destroy_state = drm_sysfb_crtc_atomic_destroy_state
+
+/*
+ * Connector
+ */
+
+int drm_sysfb_connector_helper_get_modes(struct drm_connector *connector);
+
+#define DRM_SYSFB_CONNECTOR_HELPER_FUNCS \
+ .get_modes = drm_sysfb_connector_helper_get_modes
+
+#define DRM_SYSFB_CONNECTOR_FUNCS \
+ .reset = drm_atomic_helper_connector_reset, \
+ .fill_modes = drm_helper_probe_single_connector_modes, \
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, \
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state
+
+/*
+ * Mode config
+ */
+
+#define DRM_SYSFB_MODE_CONFIG_FUNCS \
+ .fb_create = drm_gem_fb_create_with_dirty, \
+ .atomic_check = drm_atomic_helper_check, \
+ .atomic_commit = drm_atomic_helper_commit
+
+#endif
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
new file mode 100644
index 000000000000..ffaa2522ab96
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/export.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_panic.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include "drm_sysfb_helper.h"
+
+struct drm_display_mode drm_sysfb_mode(unsigned int width,
+ unsigned int height,
+ unsigned int width_mm,
+ unsigned int height_mm)
+{
+ /*
+ * Assume a monitor resolution of 96 dpi to
+ * get a somewhat reasonable screen size.
+ */
+ if (!width_mm)
+ width_mm = DRM_MODE_RES_MM(width, 96ul);
+ if (!height_mm)
+ height_mm = DRM_MODE_RES_MM(height, 96ul);
+
+ {
+ const struct drm_display_mode mode = {
+ DRM_MODE_INIT(60, width, height, width_mm, height_mm)
+ };
+
+ return mode;
+ }
+}
+EXPORT_SYMBOL(drm_sysfb_mode);
+
+/*
+ * Plane
+ */
+
+int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(plane->dev);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
+ struct drm_shadow_plane_state *new_shadow_plane_state =
+ to_drm_shadow_plane_state(new_plane_state);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ struct drm_sysfb_crtc_state *new_sysfb_crtc_state;
+ int ret;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ if (new_fb->format != sysfb->fb_format) {
+ void *buf;
+
+ /* format conversion necessary; reserve buffer */
+ buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
+ sysfb->fb_pitch, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+
+ new_sysfb_crtc_state = to_drm_sysfb_crtc_state(new_crtc_state);
+ new_sysfb_crtc_state->format = new_fb->format;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_check);
+
+void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ unsigned int dst_pitch = sysfb->fb_pitch;
+ const struct drm_format_info *dst_format = sysfb->fb_format;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+ int ret, idx;
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ return;
+
+ if (!drm_dev_enter(dev, &idx))
+ goto out_drm_gem_fb_end_cpu_access;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ struct iosys_map dst = sysfb->fb_addr;
+ struct drm_rect dst_clip = plane_state->dst;
+
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
+
+ iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
+ drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
+ &damage, &shadow_plane_state->fmtcnv_state);
+ }
+
+ drm_dev_exit(idx);
+out_drm_gem_fb_end_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_update);
+
+void drm_sysfb_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct iosys_map dst = sysfb->fb_addr;
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ void __iomem *dst_vmap = dst.vaddr_iomem; /* TODO: Use mapping abstraction */
+ unsigned int dst_pitch = sysfb->fb_pitch;
+ const struct drm_format_info *dst_format = sysfb->fb_format;
+ struct drm_rect dst_clip;
+ unsigned long lines, linepixels, i;
+ int idx;
+
+ drm_rect_init(&dst_clip,
+ plane_state->src_x >> 16, plane_state->src_y >> 16,
+ plane_state->src_w >> 16, plane_state->src_h >> 16);
+
+ lines = drm_rect_height(&dst_clip);
+ linepixels = drm_rect_width(&dst_clip);
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
+ /* Clear buffer to black if disabled */
+ dst_vmap += drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip);
+ for (i = 0; i < lines; ++i) {
+ memset_io(dst_vmap, 0, linepixels * dst_format->cpp[0]);
+ dst_vmap += dst_pitch;
+ }
+
+ drm_dev_exit(idx);
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_disable);
+
+int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(plane->dev);
+
+ sb->width = sysfb->fb_mode.hdisplay;
+ sb->height = sysfb->fb_mode.vdisplay;
+ sb->format = sysfb->fb_format;
+ sb->pitch[0] = sysfb->fb_pitch;
+ sb->map[0] = sysfb->fb_addr;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_get_scanout_buffer);
+
+/*
+ * CRTC
+ */
+
+static void drm_sysfb_crtc_state_destroy(struct drm_sysfb_crtc_state *sysfb_crtc_state)
+{
+ __drm_atomic_helper_crtc_destroy_state(&sysfb_crtc_state->base);
+
+ kfree(sysfb_crtc_state);
+}
+
+enum drm_mode_status drm_sysfb_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(crtc->dev);
+
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sysfb->fb_mode);
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_helper_mode_valid);
+
+int drm_sysfb_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ int ret;
+
+ if (!new_crtc_state->enable)
+ return 0;
+
+ ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
+ if (ret)
+ return ret;
+
+ if (new_crtc_state->color_mgmt_changed) {
+ const size_t gamma_lut_length =
+ sysfb->fb_gamma_lut_size * sizeof(struct drm_color_lut);
+ const struct drm_property_blob *gamma_lut = new_crtc_state->gamma_lut;
+
+ if (gamma_lut && (gamma_lut->length != gamma_lut_length)) {
+ drm_dbg(dev, "Incorrect gamma_lut length %zu\n", gamma_lut->length);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_helper_atomic_check);
+
+void drm_sysfb_crtc_reset(struct drm_crtc *crtc)
+{
+ struct drm_sysfb_crtc_state *sysfb_crtc_state;
+
+ if (crtc->state)
+ drm_sysfb_crtc_state_destroy(to_drm_sysfb_crtc_state(crtc->state));
+
+ sysfb_crtc_state = kzalloc(sizeof(*sysfb_crtc_state), GFP_KERNEL);
+ if (sysfb_crtc_state)
+ __drm_atomic_helper_crtc_reset(crtc, &sysfb_crtc_state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_reset);
+
+struct drm_crtc_state *drm_sysfb_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_sysfb_crtc_state *new_sysfb_crtc_state;
+ struct drm_sysfb_crtc_state *sysfb_crtc_state;
+
+ if (drm_WARN_ON(dev, !crtc_state))
+ return NULL;
+
+ new_sysfb_crtc_state = kzalloc(sizeof(*new_sysfb_crtc_state), GFP_KERNEL);
+ if (!new_sysfb_crtc_state)
+ return NULL;
+
+ sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &new_sysfb_crtc_state->base);
+ new_sysfb_crtc_state->format = sysfb_crtc_state->format;
+
+ return &new_sysfb_crtc_state->base;
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_atomic_duplicate_state);
+
+void drm_sysfb_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state)
+{
+ drm_sysfb_crtc_state_destroy(to_drm_sysfb_crtc_state(crtc_state));
+}
+EXPORT_SYMBOL(drm_sysfb_crtc_atomic_destroy_state);
+
+/*
+ * Connector
+ */
+
+static int drm_sysfb_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
+{
+ struct drm_sysfb_device *sysfb = data;
+ const u8 *edid = sysfb->edid;
+ size_t off = block * EDID_LENGTH;
+ size_t end = off + len;
+
+ if (!edid)
+ return -EINVAL;
+ if (end > EDID_LENGTH)
+ return -EINVAL;
+ memcpy(buf, &edid[off], len);
+
+ /*
+ * We don't have EDID extensions available and reporting them
+ * will upset DRM helpers. Thus clear the extension field and
+ * update the checksum. Adding the extension flag to the checksum
+ * does this.
+ */
+ buf[127] += buf[126];
+ buf[126] = 0;
+
+ return 0;
+}
+
+int drm_sysfb_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(connector->dev);
+ const struct drm_edid *drm_edid;
+
+ if (sysfb->edid) {
+ drm_edid = drm_edid_read_custom(connector, drm_sysfb_get_edid_block, sysfb);
+ drm_edid_connector_update(connector, drm_edid);
+ drm_edid_free(drm_edid);
+ }
+
+ /* Return the fixed mode even with EDID */
+ return drm_connector_helper_get_modes_fixed(connector, &sysfb->fb_mode);
+}
+EXPORT_SYMBOL(drm_sysfb_connector_helper_get_modes);
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c b/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
new file mode 100644
index 000000000000..0b3fb874a51f
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/export.h>
+#include <linux/limits.h>
+#include <linux/minmax.h>
+#include <linux/screen_info.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
+
+#include "drm_sysfb_helper.h"
+
+static s64 drm_sysfb_get_validated_size0(struct drm_device *dev, const char *name,
+ u64 value, u64 max)
+{
+ if (!value) {
+ drm_warn(dev, "%s of 0 not allowed\n", name);
+ return -EINVAL;
+ } else if (value > min(max, S64_MAX)) {
+ drm_warn(dev, "%s of %llu exceeds maximum of %llu\n", name, value, max);
+ return -EINVAL;
+ }
+ return value;
+}
+
+int drm_sysfb_get_width_si(struct drm_device *dev, const struct screen_info *si)
+{
+ return drm_sysfb_get_validated_int0(dev, "width", si->lfb_width, U16_MAX);
+}
+EXPORT_SYMBOL(drm_sysfb_get_width_si);
+
+int drm_sysfb_get_height_si(struct drm_device *dev, const struct screen_info *si)
+{
+ return drm_sysfb_get_validated_int0(dev, "height", si->lfb_height, U16_MAX);
+}
+EXPORT_SYMBOL(drm_sysfb_get_height_si);
+
+struct resource *drm_sysfb_get_memory_si(struct drm_device *dev,
+ const struct screen_info *si,
+ struct resource *res)
+{
+ ssize_t num;
+
+ num = screen_info_resources(si, res, 1);
+ if (!num) {
+ drm_warn(dev, "memory resource not found\n");
+ return NULL;
+ }
+
+ return res;
+}
+EXPORT_SYMBOL(drm_sysfb_get_memory_si);
+
+int drm_sysfb_get_stride_si(struct drm_device *dev, const struct screen_info *si,
+ const struct drm_format_info *format,
+ unsigned int width, unsigned int height, u64 size)
+{
+ u64 lfb_linelength = si->lfb_linelength;
+
+ if (!lfb_linelength)
+ lfb_linelength = drm_format_info_min_pitch(format, 0, width);
+
+ return drm_sysfb_get_validated_int0(dev, "stride", lfb_linelength, div64_u64(size, height));
+}
+EXPORT_SYMBOL(drm_sysfb_get_stride_si);
+
+u64 drm_sysfb_get_visible_size_si(struct drm_device *dev, const struct screen_info *si,
+ unsigned int height, unsigned int stride, u64 size)
+{
+ u64 vsize = PAGE_ALIGN(height * stride);
+
+ return drm_sysfb_get_validated_size0(dev, "visible size", vsize, size);
+}
+EXPORT_SYMBOL(drm_sysfb_get_visible_size_si);
+
+const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev,
+ const struct drm_sysfb_format *formats,
+ size_t nformats,
+ const struct screen_info *si)
+{
+ const struct drm_format_info *format = NULL;
+ u32 bits_per_pixel;
+ size_t i;
+
+ bits_per_pixel = __screen_info_lfb_bits_per_pixel(si);
+
+ for (i = 0; i < nformats; ++i) {
+ const struct pixel_format *f = &formats[i].pixel;
+
+ if (bits_per_pixel == f->bits_per_pixel &&
+ si->red_size == f->red.length &&
+ si->red_pos == f->red.offset &&
+ si->green_size == f->green.length &&
+ si->green_pos == f->green.offset &&
+ si->blue_size == f->blue.length &&
+ si->blue_pos == f->blue.offset) {
+ format = drm_format_info(formats[i].fourcc);
+ break;
+ }
+ }
+
+ if (!format)
+ drm_warn(dev, "No compatible color format found\n");
+
+ return format;
+}
+EXPORT_SYMBOL(drm_sysfb_get_format_si);
diff --git a/drivers/gpu/drm/sysfb/efidrm.c b/drivers/gpu/drm/sysfb/efidrm.c
new file mode 100644
index 000000000000..46912924636a
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/efidrm.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/aperture.h>
+#include <linux/efi.h>
+#include <linux/limits.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/edid.h>
+#include <video/pixel_format.h>
+
+#include "drm_sysfb_helper.h"
+
+#define DRIVER_NAME "efidrm"
+#define DRIVER_DESC "DRM driver for EFI platform devices"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static const struct drm_format_info *efidrm_get_format_si(struct drm_device *dev,
+ const struct screen_info *si)
+{
+ static const struct drm_sysfb_format formats[] = {
+ { PIXEL_FORMAT_XRGB1555, DRM_FORMAT_XRGB1555, },
+ { PIXEL_FORMAT_RGB565, DRM_FORMAT_RGB565, },
+ { PIXEL_FORMAT_RGB888, DRM_FORMAT_RGB888, },
+ { PIXEL_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888, },
+ { PIXEL_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888, },
+ { PIXEL_FORMAT_XRGB2101010, DRM_FORMAT_XRGB2101010, },
+ };
+
+ return drm_sysfb_get_format_si(dev, formats, ARRAY_SIZE(formats), si);
+}
+
+static u64 efidrm_get_mem_flags(struct drm_device *dev, resource_size_t start,
+ resource_size_t len)
+{
+ u64 attribute = EFI_MEMORY_UC | EFI_MEMORY_WC |
+ EFI_MEMORY_WT | EFI_MEMORY_WB;
+ u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
+ resource_size_t end = start + len;
+ efi_memory_desc_t md;
+ u64 md_end;
+
+ if (!efi_enabled(EFI_MEMMAP) || efi_mem_desc_lookup(start, &md))
+ goto out;
+
+ md_end = md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT);
+ if (end > md_end)
+ goto out;
+
+ attribute &= md.attribute;
+ if (attribute) {
+ mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
+ mem_flags &= attribute;
+ }
+
+out:
+ return mem_flags;
+}
+
+/*
+ * EFI device
+ */
+
+struct efidrm_device {
+ struct drm_sysfb_device sysfb;
+
+ /* modesetting */
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+/*
+ * Modesetting
+ */
+
+static const u64 efidrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
+};
+
+static const struct drm_plane_helper_funcs efidrm_primary_plane_helper_funcs = {
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs efidrm_primary_plane_funcs = {
+ DRM_SYSFB_PLANE_FUNCS,
+ .destroy = drm_plane_cleanup,
+};
+
+static const struct drm_crtc_helper_funcs efidrm_crtc_helper_funcs = {
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs efidrm_crtc_funcs = {
+ DRM_SYSFB_CRTC_FUNCS,
+ .destroy = drm_crtc_cleanup,
+};
+
+static const struct drm_encoder_funcs efidrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_connector_helper_funcs efidrm_connector_helper_funcs = {
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs efidrm_connector_funcs = {
+ DRM_SYSFB_CONNECTOR_FUNCS,
+ .destroy = drm_connector_cleanup,
+};
+
+static const struct drm_mode_config_funcs efidrm_mode_config_funcs = {
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
+};
+
+/*
+ * Init / Cleanup
+ */
+
+static struct efidrm_device *efidrm_device_create(struct drm_driver *drv,
+ struct platform_device *pdev)
+{
+ const struct screen_info *si;
+ const struct drm_format_info *format;
+ int width, height, stride;
+ u64 vsize, mem_flags;
+ struct resource resbuf;
+ struct resource *res;
+ struct efidrm_device *efi;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ struct resource *mem = NULL;
+ void __iomem *screen_base = NULL;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ unsigned long max_width, max_height;
+ size_t nformats;
+ int ret;
+
+ si = dev_get_platdata(&pdev->dev);
+ if (!si)
+ return ERR_PTR(-ENODEV);
+ if (screen_info_video_type(si) != VIDEO_TYPE_EFI)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * EFI DRM driver
+ */
+
+ efi = devm_drm_dev_alloc(&pdev->dev, drv, struct efidrm_device, sysfb.dev);
+ if (IS_ERR(efi))
+ return ERR_CAST(efi);
+ sysfb = &efi->sysfb;
+ dev = &sysfb->dev;
+ platform_set_drvdata(pdev, dev);
+
+ /*
+ * Hardware settings
+ */
+
+ format = efidrm_get_format_si(dev, si);
+ if (!format)
+ return ERR_PTR(-EINVAL);
+ width = drm_sysfb_get_width_si(dev, si);
+ if (width < 0)
+ return ERR_PTR(width);
+ height = drm_sysfb_get_height_si(dev, si);
+ if (height < 0)
+ return ERR_PTR(height);
+ res = drm_sysfb_get_memory_si(dev, si, &resbuf);
+ if (!res)
+ return ERR_PTR(-EINVAL);
+ stride = drm_sysfb_get_stride_si(dev, si, format, width, height, resource_size(res));
+ if (stride < 0)
+ return ERR_PTR(stride);
+ vsize = drm_sysfb_get_visible_size_si(dev, si, height, stride, resource_size(res));
+ if (!vsize)
+ return ERR_PTR(-EINVAL);
+
+ drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d bytes\n",
+ &format->format, width, height, stride);
+
+#ifdef CONFIG_X86
+ if (drm_edid_header_is_valid(edid_info.dummy) == 8)
+ sysfb->edid = edid_info.dummy;
+#endif
+ sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = stride;
+
+ /*
+ * Memory management
+ */
+
+ ret = devm_aperture_acquire_for_platform_device(pdev, res->start, vsize);
+ if (ret) {
+ drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_dbg(dev, "using I/O memory framebuffer at %pr\n", res);
+
+ mem = devm_request_mem_region(&pdev->dev, res->start, vsize, drv->name);
+ if (!mem) {
+ /*
+ * We cannot make this fatal. Sometimes this comes from magic
+ * spaces our resource handlers simply don't know about. Use
+ * the I/O-memory resource as-is and try to map that instead.
+ */
+ drm_warn(dev, "could not acquire memory region %pr\n", res);
+ mem = res;
+ }
+
+ mem_flags = efidrm_get_mem_flags(dev, res->start, vsize);
+
+ if (mem_flags & EFI_MEMORY_WC)
+ screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem));
+ else if (mem_flags & EFI_MEMORY_UC)
+ screen_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ else if (mem_flags & EFI_MEMORY_WT)
+ screen_base = devm_memremap(&pdev->dev, mem->start, resource_size(mem),
+ MEMREMAP_WT);
+ else if (mem_flags & EFI_MEMORY_WB)
+ screen_base = devm_memremap(&pdev->dev, mem->start, resource_size(mem),
+ MEMREMAP_WB);
+ else
+ drm_err(dev, "invalid mem_flags: 0x%llx\n", mem_flags);
+ if (!screen_base)
+ return ERR_PTR(-ENOMEM);
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
+
+ /*
+ * Modesetting
+ */
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH);
+ max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT);
+
+ dev->mode_config.min_width = width;
+ dev->mode_config.max_width = max_width;
+ dev->mode_config.min_height = height;
+ dev->mode_config.max_height = max_height;
+ dev->mode_config.preferred_depth = format->depth;
+ dev->mode_config.funcs = &efidrm_mode_config_funcs;
+
+ /* Primary plane */
+
+ nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
+ efi->formats, ARRAY_SIZE(efi->formats));
+
+ primary_plane = &efi->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0, &efidrm_primary_plane_funcs,
+ efi->formats, nformats,
+ efidrm_primary_plane_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_plane_helper_add(primary_plane, &efidrm_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ /* CRTC */
+
+ crtc = &efi->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &efidrm_crtc_funcs, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_crtc_helper_add(crtc, &efidrm_crtc_helper_funcs);
+
+ /* Encoder */
+
+ encoder = &efi->encoder;
+ ret = drm_encoder_init(dev, encoder, &efidrm_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* Connector */
+
+ connector = &efi->connector;
+ ret = drm_connector_init(dev, connector, &efidrm_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_connector_helper_add(connector, &efidrm_connector_helper_funcs);
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ width, height);
+ if (sysfb->edid)
+ drm_connector_attach_edid_property(connector);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
+ return efi;
+}
+
+/*
+ * DRM driver
+ */
+
+DEFINE_DRM_GEM_FOPS(efidrm_fops);
+
+static struct drm_driver efidrm_driver = {
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
+ .fops = &efidrm_fops,
+};
+
+/*
+ * Platform driver
+ */
+
+static int efidrm_probe(struct platform_device *pdev)
+{
+ struct efidrm_device *efi;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ int ret;
+
+ efi = efidrm_device_create(&efidrm_driver, pdev);
+ if (IS_ERR(efi))
+ return PTR_ERR(efi);
+ sysfb = &efi->sysfb;
+ dev = &sysfb->dev;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ return ret;
+
+ drm_client_setup(dev, sysfb->fb_format);
+
+ return 0;
+}
+
+static void efidrm_remove(struct platform_device *pdev)
+{
+ struct drm_device *dev = platform_get_drvdata(pdev);
+
+ drm_dev_unplug(dev);
+}
+
+static struct platform_driver efidrm_platform_driver = {
+ .driver = {
+ .name = "efi-framebuffer",
+ },
+ .probe = efidrm_probe,
+ .remove = efidrm_remove,
+};
+
+module_platform_driver(efidrm_platform_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/sysfb/ofdrm.c
index 13491c0e704a..fddfe8bea9f7 100644
--- a/drivers/gpu/drm/tiny/ofdrm.c
+++ b/drivers/gpu/drm/sysfb/ofdrm.c
@@ -12,6 +12,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
@@ -21,7 +22,8 @@
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
+
+#include "drm_sysfb_helper.h"
#define DRIVER_NAME "ofdrm"
#define DRIVER_DESC "DRM driver for OF platform devices"
@@ -76,20 +78,12 @@ enum ofdrm_model {
static int display_get_validated_int(struct drm_device *dev, const char *name, uint32_t value)
{
- if (value > INT_MAX) {
- drm_err(dev, "invalid framebuffer %s of %u\n", name, value);
- return -EINVAL;
- }
- return (int)value;
+ return drm_sysfb_get_validated_int(dev, name, value, INT_MAX);
}
static int display_get_validated_int0(struct drm_device *dev, const char *name, uint32_t value)
{
- if (!value) {
- drm_err(dev, "invalid framebuffer %s of %u\n", name, value);
- return -EINVAL;
- }
- return display_get_validated_int(dev, name, value);
+ return drm_sysfb_get_validated_int0(dev, name, value, INT_MAX);
}
static const struct drm_format_info *display_get_validated_format(struct drm_device *dev,
@@ -226,6 +220,16 @@ static u64 display_get_address_of(struct drm_device *dev, struct device_node *of
return address;
}
+static const u8 *display_get_edid_of(struct drm_device *dev, struct device_node *of_node,
+ u8 buf[EDID_LENGTH])
+{
+ int ret = of_property_read_u8_array(of_node, "EDID", buf, EDID_LENGTH);
+
+ if (ret)
+ return NULL;
+ return buf;
+}
+
static bool is_avivo(u32 vendor, u32 device)
{
/* This will match most R5xx */
@@ -290,22 +294,17 @@ struct ofdrm_device_funcs {
};
struct ofdrm_device {
- struct drm_device dev;
- struct platform_device *pdev;
+ struct drm_sysfb_device sysfb;
const struct ofdrm_device_funcs *funcs;
- /* firmware-buffer settings */
- struct iosys_map screen_base;
- struct drm_display_mode mode;
- const struct drm_format_info *format;
- unsigned int pitch;
-
/* colormap */
void __iomem *cmap_base;
+ u8 edid[EDID_LENGTH];
+
/* modesetting */
- uint32_t formats[8];
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
@@ -314,7 +313,7 @@ struct ofdrm_device {
static struct ofdrm_device *ofdrm_device_of_dev(struct drm_device *dev)
{
- return container_of(dev, struct ofdrm_device, dev);
+ return container_of(to_drm_sysfb_device(dev), struct ofdrm_device, sysfb);
}
/*
@@ -354,7 +353,7 @@ static void ofdrm_pci_release(void *data)
static int ofdrm_device_init_pci(struct ofdrm_device *odev)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct pci_dev *pcidev;
@@ -397,7 +396,7 @@ static int ofdrm_device_init_pci(struct ofdrm_device *odev)
static struct resource *ofdrm_find_fb_resource(struct ofdrm_device *odev,
struct resource *fb_res)
{
- struct platform_device *pdev = to_platform_device(odev->dev.dev);
+ struct platform_device *pdev = to_platform_device(odev->sysfb.dev.dev);
struct resource *res, *max_res = NULL;
u32 i;
@@ -423,7 +422,7 @@ static struct resource *ofdrm_find_fb_resource(struct ofdrm_device *odev,
static void __iomem *get_cmap_address_of(struct ofdrm_device *odev, struct device_node *of_node,
int bar_no, unsigned long offset, unsigned long size)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
const __be32 *addr_p;
u64 max_size, address;
unsigned int flags;
@@ -456,7 +455,7 @@ static void __iomem *ofdrm_mach64_cmap_ioremap(struct ofdrm_device *odev,
struct device_node *of_node,
u64 fb_base)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
u64 address;
void __iomem *cmap_base;
@@ -618,7 +617,7 @@ static void __iomem *ofdrm_qemu_cmap_ioremap(struct ofdrm_device *odev,
cpu_to_be32(0x00),
};
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
u64 address;
void __iomem *cmap_base;
@@ -648,7 +647,7 @@ static void ofdrm_qemu_cmap_write(struct ofdrm_device *odev, unsigned char index
static void ofdrm_device_set_gamma_linear(struct ofdrm_device *odev,
const struct drm_format_info *format)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
int i;
switch (format->format) {
@@ -687,7 +686,7 @@ static void ofdrm_device_set_gamma(struct ofdrm_device *odev,
const struct drm_format_info *format,
struct drm_color_lut *lut)
{
- struct drm_device *dev = &odev->dev;
+ struct drm_device *dev = &odev->sysfb.dev;
int i;
switch (format->format) {
@@ -731,205 +730,27 @@ static void ofdrm_device_set_gamma(struct ofdrm_device *odev,
* Modesetting
*/
-struct ofdrm_crtc_state {
- struct drm_crtc_state base;
-
- /* Primary-plane format; required for color mgmt. */
- const struct drm_format_info *format;
+static const u64 ofdrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
};
-static struct ofdrm_crtc_state *to_ofdrm_crtc_state(struct drm_crtc_state *base)
-{
- return container_of(base, struct ofdrm_crtc_state, base);
-}
-
-static void ofdrm_crtc_state_destroy(struct ofdrm_crtc_state *ofdrm_crtc_state)
-{
- __drm_atomic_helper_crtc_destroy_state(&ofdrm_crtc_state->base);
- kfree(ofdrm_crtc_state);
-}
-
-static const uint64_t ofdrm_primary_plane_format_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static int ofdrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *new_state)
-{
- struct drm_device *dev = plane->dev;
- struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
- struct drm_shadow_plane_state *new_shadow_plane_state =
- to_drm_shadow_plane_state(new_plane_state);
- struct drm_framebuffer *new_fb = new_plane_state->fb;
- struct drm_crtc *new_crtc = new_plane_state->crtc;
- struct drm_crtc_state *new_crtc_state = NULL;
- struct ofdrm_crtc_state *new_ofdrm_crtc_state;
- int ret;
-
- if (new_crtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
- if (ret)
- return ret;
- else if (!new_plane_state->visible)
- return 0;
-
- if (new_fb->format != odev->format) {
- void *buf;
-
- /* format conversion necessary; reserve buffer */
- buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
- odev->pitch, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
-
- new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
-
- new_ofdrm_crtc_state = to_ofdrm_crtc_state(new_crtc_state);
- new_ofdrm_crtc_state->format = new_fb->format;
-
- return 0;
-}
-
-static void ofdrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_framebuffer *fb = plane_state->fb;
- unsigned int dst_pitch = odev->pitch;
- const struct drm_format_info *dst_format = odev->format;
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect damage;
- int ret, idx;
-
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return;
-
- if (!drm_dev_enter(dev, &idx))
- goto out_drm_gem_fb_end_cpu_access;
-
- drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
- drm_atomic_for_each_plane_damage(&iter, &damage) {
- struct iosys_map dst = odev->screen_base;
- struct drm_rect dst_clip = plane_state->dst;
-
- if (!drm_rect_intersect(&dst_clip, &damage))
- continue;
-
- iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
- drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
- &damage, &shadow_plane_state->fmtcnv_state);
- }
-
- drm_dev_exit(idx);
-out_drm_gem_fb_end_cpu_access:
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-}
-
-static void ofdrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
- struct iosys_map dst = odev->screen_base;
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- void __iomem *dst_vmap = dst.vaddr_iomem; /* TODO: Use mapping abstraction */
- unsigned int dst_pitch = odev->pitch;
- const struct drm_format_info *dst_format = odev->format;
- struct drm_rect dst_clip;
- unsigned long lines, linepixels, i;
- int idx;
-
- drm_rect_init(&dst_clip,
- plane_state->src_x >> 16, plane_state->src_y >> 16,
- plane_state->src_w >> 16, plane_state->src_h >> 16);
-
- lines = drm_rect_height(&dst_clip);
- linepixels = drm_rect_width(&dst_clip);
-
- if (!drm_dev_enter(dev, &idx))
- return;
-
- /* Clear buffer to black if disabled */
- dst_vmap += drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip);
- for (i = 0; i < lines; ++i) {
- memset_io(dst_vmap, 0, linepixels * dst_format->cpp[0]);
- dst_vmap += dst_pitch;
- }
-
- drm_dev_exit(idx);
-}
-
static const struct drm_plane_helper_funcs ofdrm_primary_plane_helper_funcs = {
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = ofdrm_primary_plane_helper_atomic_check,
- .atomic_update = ofdrm_primary_plane_helper_atomic_update,
- .atomic_disable = ofdrm_primary_plane_helper_atomic_disable,
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
};
static const struct drm_plane_funcs ofdrm_primary_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
+ DRM_SYSFB_PLANE_FUNCS,
.destroy = drm_plane_cleanup,
- DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static enum drm_mode_status ofdrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev);
-
- return drm_crtc_helper_mode_valid_fixed(crtc, mode, &odev->mode);
-}
-
-static int ofdrm_crtc_helper_atomic_check(struct drm_crtc *crtc,
- struct drm_atomic_state *new_state)
-{
- static const size_t gamma_lut_length = OFDRM_GAMMA_LUT_SIZE * sizeof(struct drm_color_lut);
-
- struct drm_device *dev = crtc->dev;
- struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
- int ret;
-
- if (!new_crtc_state->enable)
- return 0;
-
- ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
- if (ret)
- return ret;
-
- if (new_crtc_state->color_mgmt_changed) {
- struct drm_property_blob *gamma_lut = new_crtc_state->gamma_lut;
-
- if (gamma_lut && (gamma_lut->length != gamma_lut_length)) {
- drm_dbg(dev, "Incorrect gamma_lut length %zu\n", gamma_lut->length);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
- struct ofdrm_crtc_state *ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state);
+ struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
- const struct drm_format_info *format = ofdrm_crtc_state->format;
+ const struct drm_format_info *format = sysfb_crtc_state->format;
if (crtc_state->gamma_lut)
ofdrm_device_set_gamma(odev, format, crtc_state->gamma_lut->data);
@@ -938,91 +759,31 @@ static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_ato
}
}
-/*
- * The CRTC is always enabled. Screen updates are performed by
- * the primary plane's atomic_update function. Disabling clears
- * the screen in the primary plane's atomic_disable function.
- */
static const struct drm_crtc_helper_funcs ofdrm_crtc_helper_funcs = {
- .mode_valid = ofdrm_crtc_helper_mode_valid,
- .atomic_check = ofdrm_crtc_helper_atomic_check,
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
.atomic_flush = ofdrm_crtc_helper_atomic_flush,
};
-static void ofdrm_crtc_reset(struct drm_crtc *crtc)
-{
- struct ofdrm_crtc_state *ofdrm_crtc_state =
- kzalloc(sizeof(*ofdrm_crtc_state), GFP_KERNEL);
-
- if (crtc->state)
- ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc->state));
-
- if (ofdrm_crtc_state)
- __drm_atomic_helper_crtc_reset(crtc, &ofdrm_crtc_state->base);
- else
- __drm_atomic_helper_crtc_reset(crtc, NULL);
-}
-
-static struct drm_crtc_state *ofdrm_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_crtc_state *crtc_state = crtc->state;
- struct ofdrm_crtc_state *new_ofdrm_crtc_state;
- struct ofdrm_crtc_state *ofdrm_crtc_state;
-
- if (drm_WARN_ON(dev, !crtc_state))
- return NULL;
-
- new_ofdrm_crtc_state = kzalloc(sizeof(*new_ofdrm_crtc_state), GFP_KERNEL);
- if (!new_ofdrm_crtc_state)
- return NULL;
-
- ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state);
-
- __drm_atomic_helper_crtc_duplicate_state(crtc, &new_ofdrm_crtc_state->base);
- new_ofdrm_crtc_state->format = ofdrm_crtc_state->format;
-
- return &new_ofdrm_crtc_state->base;
-}
-
-static void ofdrm_crtc_atomic_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *crtc_state)
-{
- ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc_state));
-}
-
static const struct drm_crtc_funcs ofdrm_crtc_funcs = {
- .reset = ofdrm_crtc_reset,
+ DRM_SYSFB_CRTC_FUNCS,
.destroy = drm_crtc_cleanup,
- .set_config = drm_atomic_helper_set_config,
- .page_flip = drm_atomic_helper_page_flip,
- .atomic_duplicate_state = ofdrm_crtc_atomic_duplicate_state,
- .atomic_destroy_state = ofdrm_crtc_atomic_destroy_state,
};
-static int ofdrm_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct ofdrm_device *odev = ofdrm_device_of_dev(connector->dev);
-
- return drm_connector_helper_get_modes_fixed(connector, &odev->mode);
-}
+static const struct drm_encoder_funcs ofdrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
static const struct drm_connector_helper_funcs ofdrm_connector_helper_funcs = {
- .get_modes = ofdrm_connector_helper_get_modes,
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
};
static const struct drm_connector_funcs ofdrm_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
+ DRM_SYSFB_CONNECTOR_FUNCS,
.destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_mode_config_funcs ofdrm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
};
/*
@@ -1072,32 +833,19 @@ static const struct ofdrm_device_funcs ofdrm_qemu_device_funcs = {
.cmap_write = ofdrm_qemu_cmap_write,
};
-static struct drm_display_mode ofdrm_mode(unsigned int width, unsigned int height)
-{
- /*
- * Assume a monitor resolution of 96 dpi to
- * get a somewhat reasonable screen size.
- */
- const struct drm_display_mode mode = {
- DRM_MODE_INIT(60, width, height,
- DRM_MODE_RES_MM(width, 96ul),
- DRM_MODE_RES_MM(height, 96ul))
- };
-
- return mode;
-}
-
static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
struct platform_device *pdev)
{
struct device_node *of_node = pdev->dev.of_node;
struct ofdrm_device *odev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
enum ofdrm_model model;
bool big_endian;
int width, height, depth, linebytes;
const struct drm_format_info *format;
u64 address;
+ const u8 *edid;
resource_size_t fb_size, fb_base, fb_pgbase, fb_pgsize;
struct resource *res, *mem;
void __iomem *screen_base;
@@ -1109,10 +857,11 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
size_t nformats;
int ret;
- odev = devm_drm_dev_alloc(&pdev->dev, drv, struct ofdrm_device, dev);
+ odev = devm_drm_dev_alloc(&pdev->dev, drv, struct ofdrm_device, sysfb.dev);
if (IS_ERR(odev))
return ERR_CAST(odev);
- dev = &odev->dev;
+ sysfb = &odev->sysfb;
+ dev = &sysfb->dev;
platform_set_drvdata(pdev, dev);
ret = ofdrm_device_init_pci(odev);
@@ -1246,16 +995,22 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
}
}
+ /* EDID is optional */
+ edid = display_get_edid_of(dev, of_node, odev->edid);
+
/*
* Firmware framebuffer
*/
- iosys_map_set_vaddr_iomem(&odev->screen_base, screen_base);
- odev->mode = ofdrm_mode(width, height);
- odev->format = format;
- odev->pitch = linebytes;
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
+ sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = linebytes;
+ if (odev->cmap_base)
+ sysfb->fb_gamma_lut_size = OFDRM_GAMMA_LUT_SIZE;
+ sysfb->edid = edid;
- drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&odev->mode));
+ drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sysfb->fb_mode));
drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, linebytes=%d byte\n",
&format->format, width, height, linebytes);
@@ -1302,15 +1057,16 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
return ERR_PTR(ret);
drm_crtc_helper_add(crtc, &ofdrm_crtc_helper_funcs);
- if (odev->cmap_base) {
- drm_mode_crtc_set_gamma_size(crtc, OFDRM_GAMMA_LUT_SIZE);
- drm_crtc_enable_color_mgmt(crtc, 0, false, OFDRM_GAMMA_LUT_SIZE);
+ if (sysfb->fb_gamma_lut_size) {
+ ret = drm_mode_crtc_set_gamma_size(crtc, sysfb->fb_gamma_lut_size);
+ if (!ret)
+ drm_crtc_enable_color_mgmt(crtc, 0, false, sysfb->fb_gamma_lut_size);
}
/* Encoder */
encoder = &odev->encoder;
- ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_NONE);
+ ret = drm_encoder_init(dev, encoder, &ofdrm_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL);
if (ret)
return ERR_PTR(ret);
encoder->possible_crtcs = drm_crtc_mask(crtc);
@@ -1326,6 +1082,8 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
drm_connector_set_panel_orientation_with_quirk(connector,
DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
width, height);
+ if (edid)
+ drm_connector_attach_edid_property(connector);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
@@ -1360,19 +1118,21 @@ static struct drm_driver ofdrm_driver = {
static int ofdrm_probe(struct platform_device *pdev)
{
struct ofdrm_device *odev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
int ret;
odev = ofdrm_device_create(&ofdrm_driver, pdev);
if (IS_ERR(odev))
return PTR_ERR(odev);
- dev = &odev->dev;
+ sysfb = &odev->sysfb;
+ dev = &sysfb->dev;
ret = drm_dev_register(dev, 0);
if (ret)
return ret;
- drm_client_setup(dev, odev->format);
+ drm_client_setup(dev, sysfb->fb_format);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/sysfb/simpledrm.c
index 5d9ab8adf800..a1c3119330de 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/sysfb/simpledrm.c
@@ -14,7 +14,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@@ -26,9 +25,10 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_panic.h>
#include <drm/drm_probe_helper.h>
+#include "drm_sysfb_helper.h"
+
#define DRIVER_NAME "simpledrm"
#define DRIVER_DESC "DRM driver for simple-framebuffer platform devices"
#define DRIVER_MAJOR 1
@@ -42,24 +42,14 @@ static int
simplefb_get_validated_int(struct drm_device *dev, const char *name,
uint32_t value)
{
- if (value > INT_MAX) {
- drm_err(dev, "simplefb: invalid framebuffer %s of %u\n",
- name, value);
- return -EINVAL;
- }
- return (int)value;
+ return drm_sysfb_get_validated_int(dev, name, value, INT_MAX);
}
static int
simplefb_get_validated_int0(struct drm_device *dev, const char *name,
uint32_t value)
{
- if (!value) {
- drm_err(dev, "simplefb: invalid framebuffer %s of %u\n",
- name, value);
- return -EINVAL;
- }
- return simplefb_get_validated_int(dev, name, value);
+ return drm_sysfb_get_validated_int0(dev, name, value, INT_MAX);
}
static const struct drm_format_info *
@@ -217,7 +207,7 @@ simplefb_get_memory_of(struct drm_device *dev, struct device_node *of_node)
*/
struct simpledrm_device {
- struct drm_device dev;
+ struct drm_sysfb_device sysfb;
/* clocks */
#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
@@ -236,28 +226,14 @@ struct simpledrm_device {
struct device_link **pwr_dom_links;
#endif
- /* simplefb settings */
- struct drm_display_mode mode;
- const struct drm_format_info *format;
- unsigned int pitch;
-
- /* memory management */
- struct iosys_map screen_base;
-
/* modesetting */
- uint32_t formats[8];
- size_t nformats;
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
};
-static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
-{
- return container_of(dev, struct simpledrm_device, dev);
-}
-
/*
* Hardware
*/
@@ -284,7 +260,7 @@ static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
static void simpledrm_device_release_clocks(void *res)
{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(res);
+ struct simpledrm_device *sdev = res;
unsigned int i;
for (i = 0; i < sdev->clk_count; ++i) {
@@ -297,7 +273,7 @@ static void simpledrm_device_release_clocks(void *res)
static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
{
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = &sdev->sysfb.dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct clk *clock;
@@ -382,7 +358,7 @@ static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
static void simpledrm_device_release_regulators(void *res)
{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(res);
+ struct simpledrm_device *sdev = res;
unsigned int i;
for (i = 0; i < sdev->regulator_count; ++i) {
@@ -395,7 +371,7 @@ static void simpledrm_device_release_regulators(void *res)
static int simpledrm_device_init_regulators(struct simpledrm_device *sdev)
{
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = &sdev->sysfb.dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct property *prop;
@@ -516,7 +492,7 @@ static void simpledrm_device_detach_genpd(void *res)
static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
{
- struct device *dev = sdev->dev.dev;
+ struct device *dev = sdev->sysfb.dev.dev;
int i;
sdev->pwr_dom_count = of_count_phandle_with_args(dev->of_node, "power-domains",
@@ -548,7 +524,7 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
simpledrm_device_detach_genpd(sdev);
return ret;
}
- drm_warn(&sdev->dev,
+ drm_warn(&sdev->sysfb.dev,
"pm_domain_attach_by_id(%u) failed: %d\n", i, ret);
continue;
}
@@ -559,7 +535,7 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!sdev->pwr_dom_links[i])
- drm_warn(&sdev->dev, "failed to link power-domain %d\n", i);
+ drm_warn(&sdev->sysfb.dev, "failed to link power-domain %d\n", i);
}
return devm_add_action_or_reset(dev, simpledrm_device_detach_genpd, sdev);
@@ -575,210 +551,56 @@ static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
* Modesetting
*/
-static const uint64_t simpledrm_primary_plane_format_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
+static const u64 simpledrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
};
-static int simpledrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_shadow_plane_state *new_shadow_plane_state =
- to_drm_shadow_plane_state(new_plane_state);
- struct drm_framebuffer *new_fb = new_plane_state->fb;
- struct drm_crtc *new_crtc = new_plane_state->crtc;
- struct drm_crtc_state *new_crtc_state = NULL;
- struct drm_device *dev = plane->dev;
- struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
- int ret;
-
- if (new_crtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
- if (ret)
- return ret;
- else if (!new_plane_state->visible)
- return 0;
-
- if (new_fb->format != sdev->format) {
- void *buf;
-
- /* format conversion necessary; reserve buffer */
- buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
- sdev->pitch, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_device *dev = plane->dev;
- struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect damage;
- int ret, idx;
-
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return;
-
- if (!drm_dev_enter(dev, &idx))
- goto out_drm_gem_fb_end_cpu_access;
-
- drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
- drm_atomic_for_each_plane_damage(&iter, &damage) {
- struct drm_rect dst_clip = plane_state->dst;
- struct iosys_map dst = sdev->screen_base;
-
- if (!drm_rect_intersect(&dst_clip, &damage))
- continue;
-
- iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip));
- drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data,
- fb, &damage, &shadow_plane_state->fmtcnv_state);
- }
-
- drm_dev_exit(idx);
-out_drm_gem_fb_end_cpu_access:
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-}
-
-static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_device *dev = plane->dev;
- struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
- int idx;
-
- if (!drm_dev_enter(dev, &idx))
- return;
-
- /* Clear screen to black if disabled */
- iosys_map_memset(&sdev->screen_base, 0, 0, sdev->pitch * sdev->mode.vdisplay);
-
- drm_dev_exit(idx);
-}
-
-static int simpledrm_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
- struct drm_scanout_buffer *sb)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(plane->dev);
-
- sb->width = sdev->mode.hdisplay;
- sb->height = sdev->mode.vdisplay;
- sb->format = sdev->format;
- sb->pitch[0] = sdev->pitch;
- sb->map[0] = sdev->screen_base;
-
- return 0;
-}
-
static const struct drm_plane_helper_funcs simpledrm_primary_plane_helper_funcs = {
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = simpledrm_primary_plane_helper_atomic_check,
- .atomic_update = simpledrm_primary_plane_helper_atomic_update,
- .atomic_disable = simpledrm_primary_plane_helper_atomic_disable,
- .get_scanout_buffer = simpledrm_primary_plane_helper_get_scanout_buffer,
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
};
static const struct drm_plane_funcs simpledrm_primary_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
+ DRM_SYSFB_PLANE_FUNCS,
.destroy = drm_plane_cleanup,
- DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static enum drm_mode_status simpledrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(crtc->dev);
-
- return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sdev->mode);
-}
-
-/*
- * The CRTC is always enabled. Screen updates are performed by
- * the primary plane's atomic_update function. Disabling clears
- * the screen in the primary plane's atomic_disable function.
- */
static const struct drm_crtc_helper_funcs simpledrm_crtc_helper_funcs = {
- .mode_valid = simpledrm_crtc_helper_mode_valid,
- .atomic_check = drm_crtc_helper_atomic_check,
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
};
static const struct drm_crtc_funcs simpledrm_crtc_funcs = {
- .reset = drm_atomic_helper_crtc_reset,
+ DRM_SYSFB_CRTC_FUNCS,
.destroy = drm_crtc_cleanup,
- .set_config = drm_atomic_helper_set_config,
- .page_flip = drm_atomic_helper_page_flip,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_encoder_funcs simpledrm_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
-static int simpledrm_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(connector->dev);
-
- return drm_connector_helper_get_modes_fixed(connector, &sdev->mode);
-}
-
static const struct drm_connector_helper_funcs simpledrm_connector_helper_funcs = {
- .get_modes = simpledrm_connector_helper_get_modes,
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
};
static const struct drm_connector_funcs simpledrm_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
+ DRM_SYSFB_CONNECTOR_FUNCS,
.destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
};
/*
* Init / Cleanup
*/
-static struct drm_display_mode simpledrm_mode(unsigned int width,
- unsigned int height,
- unsigned int width_mm,
- unsigned int height_mm)
-{
- const struct drm_display_mode mode = {
- DRM_MODE_INIT(60, width, height, width_mm, height_mm)
- };
-
- return mode;
-}
-
static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
struct platform_device *pdev)
{
const struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct simpledrm_device *sdev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
int width, height, stride;
int width_mm = 0, height_mm = 0;
@@ -793,10 +615,11 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
size_t nformats;
int ret;
- sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device, dev);
+ sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device, sysfb.dev);
if (IS_ERR(sdev))
return ERR_CAST(sdev);
- dev = &sdev->dev;
+ sysfb = &sdev->sysfb;
+ dev = &sysfb->dev;
platform_set_drvdata(pdev, sdev);
/*
@@ -858,20 +681,11 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
return ERR_PTR(-EINVAL);
}
- /*
- * Assume a monitor resolution of 96 dpi if physical dimensions
- * are not specified to get a somewhat reasonable screen size.
- */
- if (!width_mm)
- width_mm = DRM_MODE_RES_MM(width, 96ul);
- if (!height_mm)
- height_mm = DRM_MODE_RES_MM(height, 96ul);
-
- sdev->mode = simpledrm_mode(width, height, width_mm, height_mm);
- sdev->format = format;
- sdev->pitch = stride;
+ sysfb->fb_mode = drm_sysfb_mode(width, height, width_mm, height_mm);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = stride;
- drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sdev->mode));
+ drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sysfb->fb_mode));
drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d byte\n",
&format->format, width, height, stride);
@@ -895,7 +709,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
if (IS_ERR(screen_base))
return screen_base;
- iosys_map_set_vaddr(&sdev->screen_base, screen_base);
+ iosys_map_set_vaddr(&sysfb->fb_addr, screen_base);
} else {
void __iomem *screen_base;
@@ -928,7 +742,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
if (!screen_base)
return ERR_PTR(-ENOMEM);
- iosys_map_set_vaddr_iomem(&sdev->screen_base, screen_base);
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
}
/*
@@ -1027,19 +841,21 @@ static struct drm_driver simpledrm_driver = {
static int simpledrm_probe(struct platform_device *pdev)
{
struct simpledrm_device *sdev;
+ struct drm_sysfb_device *sysfb;
struct drm_device *dev;
int ret;
sdev = simpledrm_device_create(&simpledrm_driver, pdev);
if (IS_ERR(sdev))
return PTR_ERR(sdev);
- dev = &sdev->dev;
+ sysfb = &sdev->sysfb;
+ dev = &sysfb->dev;
ret = drm_dev_register(dev, 0);
if (ret)
return ret;
- drm_client_setup(dev, sdev->format);
+ drm_client_setup(dev, sdev->sysfb.fb_format);
return 0;
}
@@ -1047,7 +863,7 @@ static int simpledrm_probe(struct platform_device *pdev)
static void simpledrm_remove(struct platform_device *pdev)
{
struct simpledrm_device *sdev = platform_get_drvdata(pdev);
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = &sdev->sysfb.dev;
drm_dev_unplug(dev);
}
diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c
new file mode 100644
index 000000000000..4d62c78e7d1e
--- /dev/null
+++ b/drivers/gpu/drm/sysfb/vesadrm.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/aperture.h>
+#include <linux/ioport.h>
+#include <linux/limits.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/edid.h>
+#include <video/pixel_format.h>
+#include <video/vga.h>
+
+#include "drm_sysfb_helper.h"
+
+#define DRIVER_NAME "vesadrm"
+#define DRIVER_DESC "DRM driver for VESA platform devices"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+#define VESADRM_GAMMA_LUT_SIZE 256
+
+static const struct drm_format_info *vesadrm_get_format_si(struct drm_device *dev,
+ const struct screen_info *si)
+{
+ static const struct drm_sysfb_format formats[] = {
+ { PIXEL_FORMAT_XRGB1555, DRM_FORMAT_XRGB1555, },
+ { PIXEL_FORMAT_RGB565, DRM_FORMAT_RGB565, },
+ { PIXEL_FORMAT_RGB888, DRM_FORMAT_RGB888, },
+ { PIXEL_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888, },
+ { PIXEL_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888, },
+ };
+
+ return drm_sysfb_get_format_si(dev, formats, ARRAY_SIZE(formats), si);
+}
+
+/*
+ * VESA device
+ */
+
+struct vesadrm_device {
+ struct drm_sysfb_device sysfb;
+
+#if defined(CONFIG_X86_32)
+ /* VESA Protected Mode interface */
+ struct {
+ const u8 *PrimaryPalette;
+ } pmi;
+#endif
+
+ void (*cmap_write)(struct vesadrm_device *vesa, unsigned int index,
+ u16 red, u16 green, u16 blue);
+
+ /* modesetting */
+ u32 formats[DRM_SYSFB_PLANE_NFORMATS(1)];
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+static struct vesadrm_device *to_vesadrm_device(struct drm_device *dev)
+{
+ return container_of(to_drm_sysfb_device(dev), struct vesadrm_device, sysfb);
+}
+
+/*
+ * Palette
+ */
+
+static void vesadrm_vga_cmap_write(struct vesadrm_device *vesa, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ u8 i8, r8, g8, b8;
+
+ if (index > 255)
+ return;
+
+ i8 = index;
+ r8 = red >> 8;
+ g8 = green >> 8;
+ b8 = blue >> 8;
+
+ outb_p(i8, VGA_PEL_IW);
+ outb_p(r8, VGA_PEL_D);
+ outb_p(g8, VGA_PEL_D);
+ outb_p(b8, VGA_PEL_D);
+}
+
+#if defined(CONFIG_X86_32)
+static void vesadrm_pmi_cmap_write(struct vesadrm_device *vesa, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ u32 i32 = index;
+ struct {
+ u8 b8;
+ u8 g8;
+ u8 r8;
+ u8 x8;
+ } PaletteEntry = {
+ blue >> 8,
+ green >> 8,
+ red >> 8,
+ 0x00,
+ };
+
+ if (index > 255)
+ return;
+
+ __asm__ __volatile__ (
+ "call *(%%esi)"
+ : /* no return value */
+ : "a" (0x4f09),
+ "b" (0),
+ "c" (1),
+ "d" (i32),
+ "D" (&PaletteEntry),
+ "S" (&vesa->pmi.PrimaryPalette));
+}
+#endif
+
+static void vesadrm_set_gamma_linear(struct vesadrm_device *vesa,
+ const struct drm_format_info *format)
+{
+ struct drm_device *dev = &vesa->sysfb.dev;
+ size_t i;
+ u16 r16, g16, b16;
+
+ switch (format->format) {
+ case DRM_FORMAT_XRGB1555:
+ for (i = 0; i < 32; ++i) {
+ r16 = i * 8 + i / 4;
+ r16 |= (r16 << 8) | r16;
+ vesa->cmap_write(vesa, i, r16, r16, r16);
+ }
+ break;
+ case DRM_FORMAT_RGB565:
+ for (i = 0; i < 32; ++i) {
+ r16 = i * 8 + i / 4;
+ r16 |= (r16 << 8) | r16;
+ g16 = i * 4 + i / 16;
+ g16 |= (g16 << 8) | g16;
+ b16 = r16;
+ vesa->cmap_write(vesa, i, r16, g16, b16);
+ }
+ for (i = 32; i < 64; ++i) {
+ g16 = i * 4 + i / 16;
+ g16 |= (g16 << 8) | g16;
+ vesa->cmap_write(vesa, i, 0, g16, 0);
+ }
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_BGRX8888:
+ for (i = 0; i < 256; ++i) {
+ r16 = (i << 8) | i;
+ vesa->cmap_write(vesa, i, r16, r16, r16);
+ }
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
+
+static void vesadrm_set_gamma_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
+{
+ struct drm_device *dev = &vesa->sysfb.dev;
+ size_t i;
+ u16 r16, g16, b16;
+
+ switch (format->format) {
+ case DRM_FORMAT_XRGB1555:
+ for (i = 0; i < 32; ++i) {
+ r16 = lut[i * 8 + i / 4].red;
+ g16 = lut[i * 8 + i / 4].green;
+ b16 = lut[i * 8 + i / 4].blue;
+ vesa->cmap_write(vesa, i, r16, g16, b16);
+ }
+ break;
+ case DRM_FORMAT_RGB565:
+ for (i = 0; i < 32; ++i) {
+ r16 = lut[i * 8 + i / 4].red;
+ g16 = lut[i * 4 + i / 16].green;
+ b16 = lut[i * 8 + i / 4].blue;
+ vesa->cmap_write(vesa, i, r16, g16, b16);
+ }
+ for (i = 32; i < 64; ++i)
+ vesa->cmap_write(vesa, i, 0, lut[i * 4 + i / 16].green, 0);
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_BGRX8888:
+ for (i = 0; i < 256; ++i)
+ vesa->cmap_write(vesa, i, lut[i].red, lut[i].green, lut[i].blue);
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
+
+/*
+ * Modesetting
+ */
+
+static const u64 vesadrm_primary_plane_format_modifiers[] = {
+ DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
+};
+
+static const struct drm_plane_helper_funcs vesadrm_primary_plane_helper_funcs = {
+ DRM_SYSFB_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs vesadrm_primary_plane_funcs = {
+ DRM_SYSFB_PLANE_FUNCS,
+ .destroy = drm_plane_cleanup,
+};
+
+static void vesadrm_crtc_helper_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
+ struct vesadrm_device *vesa = to_vesadrm_device(dev);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
+
+ /*
+ * The gamma LUT has to be reloaded after changing the primary
+ * plane's color format.
+ */
+ if (crtc_state->enable && crtc_state->color_mgmt_changed) {
+ if (sysfb_crtc_state->format == sysfb->fb_format) {
+ if (crtc_state->gamma_lut)
+ vesadrm_set_gamma_lut(vesa,
+ sysfb_crtc_state->format,
+ crtc_state->gamma_lut->data);
+ else
+ vesadrm_set_gamma_linear(vesa, sysfb_crtc_state->format);
+ } else {
+ vesadrm_set_gamma_linear(vesa, sysfb_crtc_state->format);
+ }
+ }
+}
+
+static const struct drm_crtc_helper_funcs vesadrm_crtc_helper_funcs = {
+ DRM_SYSFB_CRTC_HELPER_FUNCS,
+ .atomic_flush = vesadrm_crtc_helper_atomic_flush,
+};
+
+static const struct drm_crtc_funcs vesadrm_crtc_funcs = {
+ DRM_SYSFB_CRTC_FUNCS,
+ .destroy = drm_crtc_cleanup,
+};
+
+static const struct drm_encoder_funcs vesadrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_connector_helper_funcs vesadrm_connector_helper_funcs = {
+ DRM_SYSFB_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs vesadrm_connector_funcs = {
+ DRM_SYSFB_CONNECTOR_FUNCS,
+ .destroy = drm_connector_cleanup,
+};
+
+static const struct drm_mode_config_funcs vesadrm_mode_config_funcs = {
+ DRM_SYSFB_MODE_CONFIG_FUNCS,
+};
+
+/*
+ * Init / Cleanup
+ */
+
+static struct vesadrm_device *vesadrm_device_create(struct drm_driver *drv,
+ struct platform_device *pdev)
+{
+ const struct screen_info *si;
+ const struct drm_format_info *format;
+ int width, height, stride;
+ u64 vsize;
+ struct resource resbuf;
+ struct resource *res;
+ struct vesadrm_device *vesa;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ struct resource *mem = NULL;
+ void __iomem *screen_base;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ unsigned long max_width, max_height;
+ size_t nformats;
+ int ret;
+
+ si = dev_get_platdata(&pdev->dev);
+ if (!si)
+ return ERR_PTR(-ENODEV);
+ if (screen_info_video_type(si) != VIDEO_TYPE_VLFB)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * VESA DRM driver
+ */
+
+ vesa = devm_drm_dev_alloc(&pdev->dev, drv, struct vesadrm_device, sysfb.dev);
+ if (IS_ERR(vesa))
+ return ERR_CAST(vesa);
+ sysfb = &vesa->sysfb;
+ dev = &sysfb->dev;
+ platform_set_drvdata(pdev, dev);
+
+ /*
+ * Hardware settings
+ */
+
+ format = vesadrm_get_format_si(dev, si);
+ if (!format)
+ return ERR_PTR(-EINVAL);
+ width = drm_sysfb_get_width_si(dev, si);
+ if (width < 0)
+ return ERR_PTR(width);
+ height = drm_sysfb_get_height_si(dev, si);
+ if (height < 0)
+ return ERR_PTR(height);
+ res = drm_sysfb_get_memory_si(dev, si, &resbuf);
+ if (!res)
+ return ERR_PTR(-EINVAL);
+ stride = drm_sysfb_get_stride_si(dev, si, format, width, height, resource_size(res));
+ if (stride < 0)
+ return ERR_PTR(stride);
+ vsize = drm_sysfb_get_visible_size_si(dev, si, height, stride, resource_size(res));
+ if (!vsize)
+ return ERR_PTR(-EINVAL);
+
+ drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d bytes\n",
+ &format->format, width, height, stride);
+
+ if (!__screen_info_vbe_mode_nonvga(si)) {
+ vesa->cmap_write = vesadrm_vga_cmap_write;
+#if defined(CONFIG_X86_32)
+ } else {
+ phys_addr_t pmi_base = __screen_info_vesapm_info_base(si);
+ const u16 *pmi_addr = phys_to_virt(pmi_base);
+
+ vesa->pmi.PrimaryPalette = (u8 *)pmi_addr + pmi_addr[2];
+ vesa->cmap_write = vesadrm_pmi_cmap_write;
+#endif
+ }
+
+#ifdef CONFIG_X86
+ if (drm_edid_header_is_valid(edid_info.dummy) == 8)
+ sysfb->edid = edid_info.dummy;
+#endif
+ sysfb->fb_mode = drm_sysfb_mode(width, height, 0, 0);
+ sysfb->fb_format = format;
+ sysfb->fb_pitch = stride;
+ if (vesa->cmap_write)
+ sysfb->fb_gamma_lut_size = VESADRM_GAMMA_LUT_SIZE;
+
+ /*
+ * Memory management
+ */
+
+ ret = devm_aperture_acquire_for_platform_device(pdev, res->start, vsize);
+ if (ret) {
+ drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_dbg(dev, "using I/O memory framebuffer at %pr\n", res);
+
+ mem = devm_request_mem_region(&pdev->dev, res->start, vsize, drv->name);
+ if (!mem) {
+ /*
+ * We cannot make this fatal. Sometimes this comes from magic
+ * spaces our resource handlers simply don't know about. Use
+ * the I/O-memory resource as-is and try to map that instead.
+ */
+ drm_warn(dev, "could not acquire memory region %pr\n", res);
+ mem = res;
+ }
+
+ screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem));
+ if (!screen_base)
+ return ERR_PTR(-ENOMEM);
+ iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base);
+
+ /*
+ * Modesetting
+ */
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH);
+ max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT);
+
+ dev->mode_config.min_width = width;
+ dev->mode_config.max_width = max_width;
+ dev->mode_config.min_height = height;
+ dev->mode_config.max_height = max_height;
+ dev->mode_config.preferred_depth = format->depth;
+ dev->mode_config.funcs = &vesadrm_mode_config_funcs;
+
+ /* Primary plane */
+
+ nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
+ vesa->formats, ARRAY_SIZE(vesa->formats));
+
+ primary_plane = &vesa->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0, &vesadrm_primary_plane_funcs,
+ vesa->formats, nformats,
+ vesadrm_primary_plane_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_plane_helper_add(primary_plane, &vesadrm_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ /* CRTC */
+
+ crtc = &vesa->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &vesadrm_crtc_funcs, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_crtc_helper_add(crtc, &vesadrm_crtc_helper_funcs);
+
+ if (sysfb->fb_gamma_lut_size) {
+ ret = drm_mode_crtc_set_gamma_size(crtc, sysfb->fb_gamma_lut_size);
+ if (!ret)
+ drm_crtc_enable_color_mgmt(crtc, 0, false, sysfb->fb_gamma_lut_size);
+ }
+
+ /* Encoder */
+
+ encoder = &vesa->encoder;
+ ret = drm_encoder_init(dev, encoder, &vesadrm_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* Connector */
+
+ connector = &vesa->connector;
+ ret = drm_connector_init(dev, connector, &vesadrm_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ return ERR_PTR(ret);
+ drm_connector_helper_add(connector, &vesadrm_connector_helper_funcs);
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ width, height);
+ if (sysfb->edid)
+ drm_connector_attach_edid_property(connector);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
+ return vesa;
+}
+
+/*
+ * DRM driver
+ */
+
+DEFINE_DRM_GEM_FOPS(vesadrm_fops);
+
+static struct drm_driver vesadrm_driver = {
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
+ .fops = &vesadrm_fops,
+};
+
+/*
+ * Platform driver
+ */
+
+static int vesadrm_probe(struct platform_device *pdev)
+{
+ struct vesadrm_device *vesa;
+ struct drm_sysfb_device *sysfb;
+ struct drm_device *dev;
+ int ret;
+
+ vesa = vesadrm_device_create(&vesadrm_driver, pdev);
+ if (IS_ERR(vesa))
+ return PTR_ERR(vesa);
+ sysfb = &vesa->sysfb;
+ dev = &sysfb->dev;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ return ret;
+
+ drm_client_setup(dev, sysfb->fb_format);
+
+ return 0;
+}
+
+static void vesadrm_remove(struct platform_device *pdev)
+{
+ struct drm_device *dev = platform_get_drvdata(pdev);
+
+ drm_dev_unplug(dev);
+}
+
+static struct platform_driver vesadrm_platform_driver = {
+ .driver = {
+ .name = "vesa-framebuffer",
+ },
+ .probe = vesadrm_probe,
+ .remove = vesadrm_remove,
+};
+
+module_platform_driver(vesadrm_platform_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 798507a8ae56..59d5c1ba145a 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1321,10 +1321,16 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
if (wgrp->dc == dc->pipe) {
for (j = 0; j < wgrp->num_windows; j++) {
unsigned int index = wgrp->windows[j];
+ enum drm_plane_type type;
+
+ if (primary)
+ type = DRM_PLANE_TYPE_OVERLAY;
+ else
+ type = DRM_PLANE_TYPE_PRIMARY;
plane = tegra_shared_plane_create(drm, dc,
wgrp->index,
- index);
+ index, type);
if (IS_ERR(plane))
return plane;
@@ -1332,10 +1338,8 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
* Choose the first shared plane owned by this
* head as the primary plane.
*/
- if (!primary) {
- plane->type = DRM_PLANE_TYPE_PRIMARY;
+ if (!primary)
primary = plane;
- }
}
}
}
@@ -1389,7 +1393,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
tegra_crtc_atomic_destroy_state(crtc, crtc->state);
- __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ if (state)
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
static struct drm_crtc_state *
diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c
index 08fbd8f151a1..990e744b0923 100644
--- a/drivers/gpu/drm/tegra/dp.c
+++ b/drivers/gpu/drm/tegra/dp.c
@@ -256,73 +256,6 @@ int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
}
/**
- * drm_dp_link_power_up() - power up a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must exit the
- * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
- * Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
-
- return 0;
-}
-
-/**
- * drm_dp_link_power_down() - power down a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-/**
* drm_dp_link_configure() - configure a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
diff --git a/drivers/gpu/drm/tegra/dp.h b/drivers/gpu/drm/tegra/dp.h
index cb12ed0c54e7..695060cafac0 100644
--- a/drivers/gpu/drm/tegra/dp.h
+++ b/drivers/gpu/drm/tegra/dp.h
@@ -164,8 +164,6 @@ int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate);
void drm_dp_link_update_rates(struct drm_dp_link *link);
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_choose(struct drm_dp_link *link,
const struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 2cd8dcb959c0..e5297ac5c0fc 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -501,14 +501,9 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
dpaux->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
if (IS_ERR(dpaux->vdd)) {
- if (PTR_ERR(dpaux->vdd) != -ENODEV) {
- if (PTR_ERR(dpaux->vdd) != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to get VDD supply: %ld\n",
- PTR_ERR(dpaux->vdd));
-
- return PTR_ERR(dpaux->vdd);
- }
+ if (PTR_ERR(dpaux->vdd) != -ENODEV)
+ return dev_err_probe(&pdev->dev, PTR_ERR(dpaux->vdd),
+ "failed to get VDD supply\n");
dpaux->vdd = NULL;
}
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 9bb077558167..b5089b772267 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1564,7 +1564,6 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
static int tegra_dsi_probe(struct platform_device *pdev)
{
struct tegra_dsi *dsi;
- struct resource *regs;
int err;
dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
@@ -1636,8 +1635,7 @@ static int tegra_dsi_probe(struct platform_device *pdev)
goto remove;
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ dsi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->regs)) {
err = PTR_ERR(dsi->regs);
goto remove;
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index c0d85463eb1a..17f616bbcb45 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -30,6 +30,14 @@ int falcon_wait_idle(struct falcon *falcon)
(value == 0), 10, 100000);
}
+static int falcon_dma_wait_not_full(struct falcon *falcon)
+{
+ u32 value;
+
+ return readl_poll_timeout(falcon->regs + FALCON_DMATRFCMD, value,
+ !(value & FALCON_DMATRFCMD_FULL), 10, 100000);
+}
+
static int falcon_dma_wait_idle(struct falcon *falcon)
{
u32 value;
@@ -44,6 +52,7 @@ static int falcon_copy_chunk(struct falcon *falcon,
enum falcon_memory target)
{
u32 cmd = FALCON_DMATRFCMD_SIZE_256B;
+ int err;
if (target == FALCON_MEMORY_IMEM)
cmd |= FALCON_DMATRFCMD_IMEM;
@@ -56,11 +65,15 @@ static int falcon_copy_chunk(struct falcon *falcon,
*/
cmd |= FALCON_DMATRFCMD_DMACTX(1);
+ err = falcon_dma_wait_not_full(falcon);
+ if (err < 0)
+ return err;
+
falcon_writel(falcon, offset, FALCON_DMATRFMOFFS);
falcon_writel(falcon, base, FALCON_DMATRFFBOFFS);
falcon_writel(falcon, cmd, FALCON_DMATRFCMD);
- return falcon_dma_wait_idle(falcon);
+ return 0;
}
static void falcon_copy_firmware_image(struct falcon *falcon,
@@ -191,6 +204,11 @@ int falcon_boot(struct falcon *falcon)
falcon_copy_chunk(falcon, falcon->firmware.code.offset + offset,
offset, FALCON_MEMORY_IMEM);
+ /* wait for DMA to complete */
+ err = falcon_dma_wait_idle(falcon);
+ if (err < 0)
+ return err;
+
/* setup falcon interrupts */
falcon_writel(falcon, FALCON_IRQMSET_EXT(0xff) |
FALCON_IRQMSET_SWGEN1 |
diff --git a/drivers/gpu/drm/tegra/falcon.h b/drivers/gpu/drm/tegra/falcon.h
index 1955cf11a8a6..902bb7e4fd0f 100644
--- a/drivers/gpu/drm/tegra/falcon.h
+++ b/drivers/gpu/drm/tegra/falcon.h
@@ -47,6 +47,7 @@
#define FALCON_DMATRFMOFFS 0x00001114
#define FALCON_DMATRFCMD 0x00001118
+#define FALCON_DMATRFCMD_FULL (1 << 0)
#define FALCON_DMATRFCMD_IDLE (1 << 1)
#define FALCON_DMATRFCMD_IMEM (1 << 4)
#define FALCON_DMATRFCMD_SIZE_256B (6 << 8)
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index ace3e5a805cf..dbc1394f96b8 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -17,7 +17,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_prime.h>
-#include <drm/tegra_drm.h>
#include "drm.h"
#include "gem.h"
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index fa6140fc37fb..8f779f23dc09 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -755,9 +755,9 @@ static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int wgrp,
- unsigned int index)
+ unsigned int index,
+ enum drm_plane_type type)
{
- enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
struct tegra_drm *tegra = drm->dev_private;
struct tegra_display_hub *hub = tegra->hub;
struct tegra_shared_plane *plane;
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 23c4b2115ed1..a66f18c4facc 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -80,7 +80,8 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub);
struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int wgrp,
- unsigned int index);
+ unsigned int index,
+ enum drm_plane_type type);
int tegra_display_hub_atomic_check(struct drm_device *drm,
struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 1e8ec50b759e..ff5a749710db 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -200,6 +200,11 @@ static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = {
.atomic_check = tegra_rgb_encoder_atomic_check,
};
+static void tegra_dc_of_node_put(void *data)
+{
+ of_node_put(data);
+}
+
int tegra_dc_rgb_probe(struct tegra_dc *dc)
{
struct device_node *np;
@@ -207,7 +212,14 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
int err;
np = of_get_child_by_name(dc->dev->of_node, "rgb");
- if (!np || !of_device_is_available(np))
+ if (!np)
+ return -ENODEV;
+
+ err = devm_add_action_or_reset(dc->dev, tegra_dc_of_node_put, np);
+ if (err < 0)
+ return err;
+
+ if (!of_device_is_available(np))
return -ENODEV;
rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index f98f70eda906..21f3dfdcc5c9 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2666,7 +2666,7 @@ static void tegra_sor_dp_disable(struct drm_encoder *encoder)
* the AUX transactions would just be timing out.
*/
if (output->connector.status != connector_status_disconnected) {
- err = drm_dp_link_power_down(sor->aux, &sor->link);
+ err = drm_dp_link_power_down(sor->aux, sor->link.revision);
if (err < 0)
dev_err(sor->dev, "failed to power down link: %d\n",
err);
@@ -2882,7 +2882,7 @@ static void tegra_sor_dp_enable(struct drm_encoder *encoder)
else
dev_dbg(sor->dev, "link training succeeded\n");
- err = drm_dp_link_power_up(sor->aux, &sor->link);
+ err = drm_dp_link_power_up(sor->aux, sor->link.revision);
if (err < 0)
dev_err(sor->dev, "failed to power up DP link: %d\n", err);
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index 0109bcf7faa5..3afd6587df08 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -4,7 +4,9 @@ obj-$(CONFIG_DRM_KUNIT_TEST_HELPERS) += \
drm_kunit_helpers.o
obj-$(CONFIG_DRM_KUNIT_TEST) += \
+ drm_atomic_test.o \
drm_atomic_state_test.o \
+ drm_bridge_test.o \
drm_buddy_test.o \
drm_cmdline_parser_test.o \
drm_connector_test.o \
diff --git a/drivers/gpu/drm/tests/drm_atomic_test.c b/drivers/gpu/drm/tests/drm_atomic_test.c
new file mode 100644
index 000000000000..ea91bec6569e
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_atomic_test.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for drm_atomic functions
+ */
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_kunit_helpers.h>
+#include <drm/drm_modeset_helper_vtables.h>
+
+#include <kunit/test.h>
+
+struct drm_atomic_test_priv {
+ struct drm_device drm;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+static const struct drm_connector_helper_funcs drm_atomic_init_connector_helper_funcs = {
+};
+
+static const struct drm_connector_funcs drm_atomic_init_connector_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+static struct drm_atomic_test_priv *create_device(struct kunit *test)
+{
+ struct drm_atomic_test_priv *priv;
+ struct drm_connector *connector;
+ struct drm_encoder *enc;
+ struct drm_device *drm;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+
+ priv = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct drm_atomic_test_priv, drm,
+ DRIVER_MODESET | DRIVER_ATOMIC);
+ if (IS_ERR(priv))
+ return ERR_CAST(priv);
+
+ drm = &priv->drm;
+ plane = drm_kunit_helper_create_primary_plane(test, drm,
+ NULL,
+ NULL,
+ NULL, 0,
+ NULL);
+ if (IS_ERR(plane))
+ return ERR_CAST(plane);
+ priv->plane = plane;
+
+ crtc = drm_kunit_helper_create_crtc(test, drm,
+ plane, NULL,
+ NULL,
+ NULL);
+ if (IS_ERR(crtc))
+ return ERR_CAST(crtc);
+ priv->crtc = crtc;
+
+ enc = &priv->encoder;
+ ret = drmm_encoder_init(drm, enc, NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ enc->possible_crtcs = drm_crtc_mask(crtc);
+
+ connector = &priv->connector;
+ ret = drmm_connector_init(drm, connector,
+ &drm_atomic_init_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL,
+ NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_helper_add(connector, &drm_atomic_init_connector_helper_funcs);
+
+ drm_connector_attach_encoder(connector, enc);
+
+ drm_mode_config_reset(drm);
+
+ return priv;
+}
+
+static void drm_test_drm_atomic_get_connector_for_encoder(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_test_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_connector *curr_connector;
+ int ret;
+
+ priv = create_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, &priv->drm,
+ priv->crtc, &priv->connector,
+ mode, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_enable;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn:
+ curr_connector = drm_atomic_get_connector_for_encoder(&priv->encoder,
+ &ctx);
+ if (PTR_ERR(curr_connector) == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_conn;
+ }
+ KUNIT_EXPECT_PTR_EQ(test, curr_connector, &priv->connector);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+static struct kunit_case drm_atomic_get_connector_for_encoder_tests[] = {
+ KUNIT_CASE(drm_test_drm_atomic_get_connector_for_encoder),
+ { }
+};
+
+
+static struct kunit_suite drm_atomic_get_connector_for_encoder_test_suite = {
+ .name = "drm_test_atomic_get_connector_for_encoder",
+ .test_cases = drm_atomic_get_connector_for_encoder_tests,
+};
+
+kunit_test_suite(drm_atomic_get_connector_for_encoder_test_suite);
+
+MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
+MODULE_DESCRIPTION("Kunit test for drm_atomic functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_bridge_test.c b/drivers/gpu/drm/tests/drm_bridge_test.c
new file mode 100644
index 000000000000..ff88ec2e911c
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_bridge_test.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for drm_bridge functions
+ */
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_bridge_helper.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include <kunit/test.h>
+
+struct drm_bridge_init_priv {
+ struct drm_device drm;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder encoder;
+ struct drm_bridge bridge;
+ struct drm_connector *connector;
+ unsigned int enable_count;
+ unsigned int disable_count;
+};
+
+static void drm_test_bridge_enable(struct drm_bridge *bridge)
+{
+ struct drm_bridge_init_priv *priv =
+ container_of(bridge, struct drm_bridge_init_priv, bridge);
+
+ priv->enable_count++;
+}
+
+static void drm_test_bridge_disable(struct drm_bridge *bridge)
+{
+ struct drm_bridge_init_priv *priv =
+ container_of(bridge, struct drm_bridge_init_priv, bridge);
+
+ priv->disable_count++;
+}
+
+static const struct drm_bridge_funcs drm_test_bridge_legacy_funcs = {
+ .enable = drm_test_bridge_enable,
+ .disable = drm_test_bridge_disable,
+};
+
+static void drm_test_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_init_priv *priv =
+ container_of(bridge, struct drm_bridge_init_priv, bridge);
+
+ priv->enable_count++;
+}
+
+static void drm_test_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_init_priv *priv =
+ container_of(bridge, struct drm_bridge_init_priv, bridge);
+
+ priv->disable_count++;
+}
+
+static const struct drm_bridge_funcs drm_test_bridge_atomic_funcs = {
+ .atomic_enable = drm_test_bridge_atomic_enable,
+ .atomic_disable = drm_test_bridge_atomic_disable,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+};
+
+KUNIT_DEFINE_ACTION_WRAPPER(drm_bridge_remove_wrapper,
+ drm_bridge_remove,
+ struct drm_bridge *);
+
+static int drm_kunit_bridge_add(struct kunit *test,
+ struct drm_bridge *bridge)
+{
+ drm_bridge_add(bridge);
+
+ return kunit_add_action_or_reset(test,
+ drm_bridge_remove_wrapper,
+ bridge);
+}
+
+static struct drm_bridge_init_priv *
+drm_test_bridge_init(struct kunit *test, const struct drm_bridge_funcs *funcs)
+{
+ struct drm_bridge_init_priv *priv;
+ struct drm_encoder *enc;
+ struct drm_bridge *bridge;
+ struct drm_device *drm;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+
+ priv = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct drm_bridge_init_priv, drm,
+ DRIVER_MODESET | DRIVER_ATOMIC);
+ if (IS_ERR(priv))
+ return ERR_CAST(priv);
+
+ drm = &priv->drm;
+ priv->plane = drm_kunit_helper_create_primary_plane(test, drm,
+ NULL,
+ NULL,
+ NULL, 0,
+ NULL);
+ if (IS_ERR(priv->plane))
+ return ERR_CAST(priv->plane);
+
+ priv->crtc = drm_kunit_helper_create_crtc(test, drm,
+ priv->plane, NULL,
+ NULL,
+ NULL);
+ if (IS_ERR(priv->crtc))
+ return ERR_CAST(priv->crtc);
+
+ enc = &priv->encoder;
+ ret = drmm_encoder_init(drm, enc, NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ enc->possible_crtcs = drm_crtc_mask(priv->crtc);
+
+ bridge = &priv->bridge;
+ bridge->type = DRM_MODE_CONNECTOR_VIRTUAL;
+ bridge->funcs = funcs;
+
+ ret = drm_kunit_bridge_add(test, bridge);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_bridge_attach(enc, bridge, NULL, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ priv->connector = drm_bridge_connector_init(drm, enc);
+ if (IS_ERR(priv->connector))
+ return ERR_CAST(priv->connector);
+
+ drm_connector_attach_encoder(priv->connector, enc);
+
+ drm_mode_config_reset(drm);
+
+ return priv;
+}
+
+/*
+ * Test that drm_bridge_get_current_state() returns the last committed
+ * state for an atomic bridge.
+ */
+static void drm_test_drm_bridge_get_current_state_atomic(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_bridge_state *curr_bridge_state;
+ struct drm_bridge_state *bridge_state;
+ struct drm_atomic_state *state;
+ struct drm_bridge *bridge;
+ struct drm_device *drm;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ drm = &priv->drm;
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+retry_commit:
+ bridge = &priv->bridge;
+ bridge_state = drm_atomic_get_bridge_state(state, bridge);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bridge_state);
+
+ ret = drm_atomic_commit(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry_commit;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_state:
+ ret = drm_modeset_lock(&bridge->base.lock, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_state;
+ }
+
+ curr_bridge_state = drm_bridge_get_current_state(bridge);
+ KUNIT_EXPECT_PTR_EQ(test, curr_bridge_state, bridge_state);
+
+ drm_modeset_unlock(&bridge->base.lock);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
+ * Test that drm_bridge_get_current_state() returns NULL for a
+ * non-atomic bridge.
+ */
+static void drm_test_drm_bridge_get_current_state_legacy(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv;
+ struct drm_bridge *bridge;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ /*
+ * NOTE: Strictly speaking, we should take the bridge->base.lock
+ * before calling that function. However, bridge->base is only
+ * initialized if the bridge is atomic, while we explicitly
+ * initialize one that isn't there.
+ *
+ * In order to avoid unnecessary warnings, let's skip the
+ * locking. The function would return NULL in all cases anyway,
+ * so we don't really have any concurrency to worry about.
+ */
+ bridge = &priv->bridge;
+ KUNIT_EXPECT_NULL(test, drm_bridge_get_current_state(bridge));
+}
+
+static struct kunit_case drm_bridge_get_current_state_tests[] = {
+ KUNIT_CASE(drm_test_drm_bridge_get_current_state_atomic),
+ KUNIT_CASE(drm_test_drm_bridge_get_current_state_legacy),
+ { }
+};
+
+
+static struct kunit_suite drm_bridge_get_current_state_test_suite = {
+ .name = "drm_test_bridge_get_current_state",
+ .test_cases = drm_bridge_get_current_state_tests,
+};
+
+/*
+ * Test that an atomic bridge is properly power-cycled when calling
+ * drm_bridge_helper_reset_crtc().
+ */
+static void drm_test_drm_bridge_helper_reset_crtc_atomic(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_bridge *bridge;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_commit:
+ ret = drm_kunit_helper_enable_crtc_connector(test,
+ &priv->drm, priv->crtc,
+ priv->connector,
+ mode,
+ &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_commit;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ bridge = &priv->bridge;
+ KUNIT_ASSERT_EQ(test, priv->enable_count, 1);
+ KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_reset:
+ ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_reset;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ KUNIT_EXPECT_EQ(test, priv->enable_count, 2);
+ KUNIT_EXPECT_EQ(test, priv->disable_count, 1);
+}
+
+/*
+ * Test that calling drm_bridge_helper_reset_crtc() on a disabled atomic
+ * bridge will fail and not call the enable / disable callbacks
+ */
+static void drm_test_drm_bridge_helper_reset_crtc_atomic_disabled(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_bridge *bridge;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ bridge = &priv->bridge;
+ KUNIT_ASSERT_EQ(test, priv->enable_count, 0);
+ KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_reset:
+ ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_reset;
+ }
+ KUNIT_EXPECT_LT(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ KUNIT_EXPECT_EQ(test, priv->enable_count, 0);
+ KUNIT_EXPECT_EQ(test, priv->disable_count, 0);
+}
+
+/*
+ * Test that a non-atomic bridge is properly power-cycled when calling
+ * drm_bridge_helper_reset_crtc().
+ */
+static void drm_test_drm_bridge_helper_reset_crtc_legacy(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_bridge_init_priv *priv;
+ struct drm_display_mode *mode;
+ struct drm_bridge *bridge;
+ int ret;
+
+ priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_commit:
+ ret = drm_kunit_helper_enable_crtc_connector(test,
+ &priv->drm, priv->crtc,
+ priv->connector,
+ mode,
+ &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_commit;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ bridge = &priv->bridge;
+ KUNIT_ASSERT_EQ(test, priv->enable_count, 1);
+ KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_reset:
+ ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry_reset;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ KUNIT_EXPECT_EQ(test, priv->enable_count, 2);
+ KUNIT_EXPECT_EQ(test, priv->disable_count, 1);
+}
+
+static struct kunit_case drm_bridge_helper_reset_crtc_tests[] = {
+ KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_atomic),
+ KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_atomic_disabled),
+ KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_legacy),
+ { }
+};
+
+static struct kunit_suite drm_bridge_helper_reset_crtc_test_suite = {
+ .name = "drm_test_bridge_helper_reset_crtc",
+ .test_cases = drm_bridge_helper_reset_crtc_tests,
+};
+
+kunit_test_suites(
+ &drm_bridge_get_current_state_test_suite,
+ &drm_bridge_helper_reset_crtc_test_suite,
+);
+
+MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
+MODULE_DESCRIPTION("Kunit test for drm_bridge functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_client_modeset_test.c b/drivers/gpu/drm/tests/drm_client_modeset_test.c
index b2fdb1a774fe..3f44fe5e92e4 100644
--- a/drivers/gpu/drm/tests/drm_client_modeset_test.c
+++ b/drivers/gpu/drm/tests/drm_client_modeset_test.c
@@ -88,7 +88,8 @@ static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test)
struct drm_device *drm = priv->drm;
struct drm_connector *connector = &priv->connector;
struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
- struct drm_display_mode *expected_mode, *mode;
+ struct drm_display_mode *expected_mode;
+ const struct drm_display_mode *mode;
const char *cmdline = "1920x1080@60";
int ret;
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
index 925fbc2cda70..68f2c3162354 100644
--- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -134,7 +134,7 @@ static void drm_gem_shmem_test_pin_pages(struct kunit *test)
shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
KUNIT_EXPECT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0);
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -142,14 +142,14 @@ static void drm_gem_shmem_test_pin_pages(struct kunit *test)
ret = drm_gem_shmem_pin(shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1);
for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++)
KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
drm_gem_shmem_unpin(shmem);
KUNIT_EXPECT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0);
}
/*
@@ -168,24 +168,24 @@ static void drm_gem_shmem_test_vmap(struct kunit *test)
shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
KUNIT_EXPECT_NULL(test, shmem->vaddr);
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
- ret = drm_gem_shmem_vmap(shmem, &map);
+ ret = drm_gem_shmem_vmap_locked(shmem, &map);
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 1);
iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
for (i = 0; i < TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
- drm_gem_shmem_vunmap(shmem, &map);
+ drm_gem_shmem_vunmap_locked(shmem, &map);
KUNIT_EXPECT_NULL(test, shmem->vaddr);
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
}
/*
@@ -254,7 +254,7 @@ static void drm_gem_shmem_test_get_sg_table(struct kunit *test)
sgt = drm_gem_shmem_get_pages_sgt(shmem);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
- KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1);
KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt);
for_each_sgtable_sg(sgt, sg, si) {
@@ -284,17 +284,17 @@ static void drm_gem_shmem_test_madvise(struct kunit *test)
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
- ret = drm_gem_shmem_madvise(shmem, 1);
+ ret = drm_gem_shmem_madvise_locked(shmem, 1);
KUNIT_EXPECT_TRUE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, 1);
/* Set madv to a negative value */
- ret = drm_gem_shmem_madvise(shmem, -1);
+ ret = drm_gem_shmem_madvise_locked(shmem, -1);
KUNIT_EXPECT_FALSE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, -1);
/* Check that madv cannot be set back to a positive value */
- ret = drm_gem_shmem_madvise(shmem, 0);
+ ret = drm_gem_shmem_madvise_locked(shmem, 0);
KUNIT_EXPECT_FALSE(test, ret);
KUNIT_ASSERT_EQ(test, shmem->madv, -1);
}
@@ -322,7 +322,7 @@ static void drm_gem_shmem_test_purge(struct kunit *test)
ret = drm_gem_shmem_is_purgeable(shmem);
KUNIT_EXPECT_FALSE(test, ret);
- ret = drm_gem_shmem_madvise(shmem, 1);
+ ret = drm_gem_shmem_madvise_locked(shmem, 1);
KUNIT_EXPECT_TRUE(test, ret);
/* The scatter/gather table will be freed by drm_gem_shmem_free */
@@ -332,7 +332,7 @@ static void drm_gem_shmem_test_purge(struct kunit *test)
ret = drm_gem_shmem_is_purgeable(shmem);
KUNIT_EXPECT_TRUE(test, ret);
- drm_gem_shmem_purge(shmem);
+ drm_gem_shmem_purge_locked(shmem);
KUNIT_EXPECT_NULL(test, shmem->pages);
KUNIT_EXPECT_NULL(test, shmem->sgt);
KUNIT_EXPECT_EQ(test, shmem->madv, -1);
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index e97efd3af9ed..7ffd666753b1 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -55,49 +55,6 @@ static struct drm_display_mode *find_preferred_mode(struct drm_connector *connec
return preferred;
}
-static int light_up_connector(struct kunit *test,
- struct drm_device *drm,
- struct drm_crtc *crtc,
- struct drm_connector *connector,
- struct drm_display_mode *mode,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_atomic_state *state;
- struct drm_connector_state *conn_state;
- struct drm_crtc_state *crtc_state;
- int ret;
-
- state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
-
-retry:
- conn_state = drm_atomic_get_connector_state(state, connector);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
-
- ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
- if (ret == -EDEADLK) {
- drm_atomic_state_clear(state);
- ret = drm_modeset_backoff(ctx);
- if (!ret)
- goto retry;
- }
- KUNIT_EXPECT_EQ(test, ret, 0);
-
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
-
- ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
- KUNIT_EXPECT_EQ(test, ret, 0);
-
- crtc_state->enable = true;
- crtc_state->active = true;
-
- ret = drm_atomic_commit(state);
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- return 0;
-}
-
static int set_connector_edid(struct kunit *test, struct drm_connector *connector,
const char *edid, size_t edid_len)
{
@@ -298,7 +255,10 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -364,7 +324,10 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -432,7 +395,10 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -489,7 +455,10 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -547,7 +516,10 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -606,7 +578,10 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -666,7 +641,10 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -725,7 +703,10 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
KUNIT_ASSERT_NOT_NULL(test, mode);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -789,7 +770,10 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -865,7 +849,10 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
@@ -941,7 +928,10 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -988,7 +978,10 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1037,7 +1030,10 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1086,7 +1082,10 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1134,7 +1133,10 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
/* You shouldn't be doing that at home. */
@@ -1208,7 +1210,10 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1282,7 +1287,10 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1347,7 +1355,10 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, mode, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ mode,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1414,7 +1425,10 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1483,7 +1497,10 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1543,7 +1560,10 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1605,7 +1625,10 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
conn_state = conn->state;
@@ -1645,7 +1668,10 @@ static void drm_test_check_disable_connector(struct kunit *test)
drm = &priv->drm;
crtc = priv->crtc;
- ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx);
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm,
+ crtc, conn,
+ preferred,
+ &ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index 6f6616cf4966..5f7257840d8e 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -2,6 +2,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
@@ -271,6 +272,66 @@ drm_kunit_helper_create_crtc(struct kunit *test,
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc);
+/**
+ * drm_kunit_helper_enable_crtc_connector - Enables a CRTC -> Connector output
+ * @test: The test context object
+ * @drm: The device to alloc the plane for
+ * @crtc: The CRTC to enable
+ * @connector: The Connector to enable
+ * @mode: The display mode to configure the CRTC with
+ * @ctx: Locking context
+ *
+ * This function creates an atomic update to enable the route from @crtc
+ * to @connector, with the given @mode.
+ *
+ * Returns:
+ *
+ * A pointer to the new CRTC, or an ERR_PTR() otherwise. If the error
+ * returned is EDEADLK, the entire atomic sequence must be restarted.
+ */
+int drm_kunit_helper_enable_crtc_connector(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_crtc *crtc,
+ struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ conn_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
+ if (ret)
+ return ret;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
+ if (ret)
+ return ret;
+
+ crtc_state->enable = true;
+ crtc_state->active = true;
+
+ ret = drm_atomic_commit(state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_kunit_helper_enable_crtc_connector);
+
static void kunit_action_drm_mode_destroy(void *ptr)
{
struct drm_display_mode *mode = ptr;
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c
index 17a86bed8054..95b4aeff2775 100644
--- a/drivers/gpu/drm/tidss/tidss_encoder.c
+++ b/drivers/gpu/drm/tidss/tidss_encoder.c
@@ -34,11 +34,12 @@ static inline struct tidss_encoder
}
static int tidss_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct tidss_encoder *t_enc = bridge_to_tidss_encoder(bridge);
- return drm_bridge_attach(bridge->encoder, t_enc->next_bridge,
+ return drm_bridge_attach(encoder, t_enc->next_bridge,
bridge, flags);
}
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 54c84c9801c1..06e54694a7f2 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -3,6 +3,7 @@
config DRM_APPLETBDRM
tristate "DRM support for Apple Touch Bars"
depends on DRM && USB && MMU
+ depends on X86 || COMPILE_TEST
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
@@ -37,7 +38,7 @@ config DRM_BOCHS
config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
@@ -65,20 +66,6 @@ config DRM_GM12U320
This is a KMS driver for projectors which use the GM12U320 chipset
for video transfer over USB2/3, such as the Acer C120 mini projector.
-config DRM_OFDRM
- tristate "Open Firmware display driver"
- depends on DRM && MMU && OF && (PPC || COMPILE_TEST)
- select APERTURE_HELPERS
- select DRM_CLIENT_SELECTION
- select DRM_GEM_SHMEM_HELPER
- select DRM_KMS_HELPER
- help
- DRM driver for Open Firmware framebuffers.
-
- This driver assumes that the display hardware has been initialized
- by the Open Firmware before the kernel boots. Scanout buffer, size,
- and display format must be provided via device tree.
-
config DRM_PANEL_MIPI_DBI
tristate "DRM support for MIPI DBI compatible panels"
depends on DRM && SPI
@@ -95,24 +82,6 @@ config DRM_PANEL_MIPI_DBI
https://github.com/notro/panel-mipi-dbi/wiki.
To compile this driver as a module, choose M here.
-config DRM_SIMPLEDRM
- tristate "Simple framebuffer driver"
- depends on DRM && MMU
- select APERTURE_HELPERS
- select DRM_CLIENT_SELECTION
- select DRM_GEM_SHMEM_HELPER
- select DRM_KMS_HELPER
- help
- DRM driver for simple platform-provided framebuffers.
-
- This driver assumes that the display hardware has been initialized
- by the firmware or bootloader before the kernel boots. Scanout
- buffer, size, and display format must be provided via device tree,
- UEFI, VESA, etc.
-
- On x86 BIOS or UEFI systems, you should also select SYSFB_SIMPLEFB
- to use UEFI and VESA framebuffers.
-
config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
depends on DRM && SPI
@@ -230,32 +199,3 @@ config TINYDRM_SHARP_MEMORY
* 4.40" Sharp Memory LCD (LS044Q7DH01)
If M is selected the module will be called sharp_memory.
-
-config TINYDRM_ST7586
- tristate "DRM support for Sitronix ST7586 display panels"
- depends on DRM && SPI
- select DRM_CLIENT_SELECTION
- select DRM_KMS_HELPER
- select DRM_GEM_DMA_HELPER
- select DRM_MIPI_DBI
- help
- DRM driver for the following Sitronix ST7586 panels:
- * LEGO MINDSTORMS EV3
-
- If M is selected the module will be called st7586.
-
-config TINYDRM_ST7735R
- tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
- depends on DRM && SPI
- select DRM_CLIENT_SELECTION
- select DRM_KMS_HELPER
- select DRM_GEM_DMA_HELPER
- select DRM_MIPI_DBI
- select BACKLIGHT_CLASS_DEVICE
- help
- DRM driver for Sitronix ST7715R/ST7735R with one of the following
- LCDs:
- * Jianda JD-T18003-T01 1.8" 128x160 TFT
- * Okaya RH128128T 1.44" 128x128 TFT
-
- If M is selected the module will be called st7735r.
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 0a3a7837a58b..4a9ff61ec254 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -5,9 +5,7 @@ obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
obj-$(CONFIG_DRM_BOCHS) += bochs.o
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
-obj-$(CONFIG_DRM_OFDRM) += ofdrm.o
obj-$(CONFIG_DRM_PANEL_MIPI_DBI) += panel-mipi-dbi.o
-obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9163) += ili9163.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
@@ -16,5 +14,3 @@ obj-$(CONFIG_TINYDRM_ILI9486) += ili9486.o
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
obj-$(CONFIG_TINYDRM_SHARP_MEMORY) += sharp-memory.o
-obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
-obj-$(CONFIG_TINYDRM_ST7735R) += st7735r.o
diff --git a/drivers/gpu/drm/tiny/appletbdrm.c b/drivers/gpu/drm/tiny/appletbdrm.c
index 4370ba22dd88..751b05753c94 100644
--- a/drivers/gpu/drm/tiny/appletbdrm.c
+++ b/drivers/gpu/drm/tiny/appletbdrm.c
@@ -45,7 +45,7 @@
#define APPLETBDRM_BULK_MSG_TIMEOUT 1000
#define drm_to_adev(_drm) container_of(_drm, struct appletbdrm_device, drm)
-#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface(adev->dmadev))
+#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface((adev)->drm.dev))
struct appletbdrm_msg_request_header {
__le16 unk_00;
@@ -123,8 +123,6 @@ struct appletbdrm_fb_request_response {
} __packed;
struct appletbdrm_device {
- struct device *dmadev;
-
unsigned int in_ep;
unsigned int out_ep;
@@ -214,7 +212,7 @@ retry:
}
if (response->msg != expected_response) {
- drm_err(drm, "Unexpected response from device (expected %p4cc found %p4cc)\n",
+ drm_err(drm, "Unexpected response from device (expected %p4cl found %p4cl)\n",
&expected_response, &response->msg);
return -EIO;
}
@@ -288,7 +286,7 @@ static int appletbdrm_get_information(struct appletbdrm_device *adev)
}
if (pixel_format != APPLETBDRM_PIXEL_FORMAT) {
- drm_err(drm, "Encountered unknown pixel format (%p4cc)\n", &pixel_format);
+ drm_err(drm, "Encountered unknown pixel format (%p4cl)\n", &pixel_format);
ret = -EINVAL;
goto free_info;
}
@@ -612,22 +610,10 @@ static const struct drm_encoder_funcs appletbdrm_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
-static struct drm_gem_object *appletbdrm_driver_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct appletbdrm_device *adev = drm_to_adev(dev);
-
- if (!adev->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(dev, dma_buf, adev->dmadev);
-}
-
DEFINE_DRM_GEM_FOPS(appletbdrm_drm_fops);
static const struct drm_driver appletbdrm_drm_driver = {
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = appletbdrm_driver_gem_prime_import,
.name = "appletbdrm",
.desc = "Apple Touch Bar DRM Driver",
.major = 1,
@@ -747,6 +733,7 @@ static int appletbdrm_probe(struct usb_interface *intf,
struct device *dev = &intf->dev;
struct appletbdrm_device *adev;
struct drm_device *drm = NULL;
+ struct device *dma_dev;
int ret;
ret = usb_find_common_endpoints(intf->cur_altsetting, &bulk_in, &bulk_out, NULL, NULL);
@@ -761,12 +748,19 @@ static int appletbdrm_probe(struct usb_interface *intf,
adev->in_ep = bulk_in->bEndpointAddress;
adev->out_ep = bulk_out->bEndpointAddress;
- adev->dmadev = dev;
drm = &adev->drm;
usb_set_intfdata(intf, adev);
+ dma_dev = usb_intf_get_dma_device(intf);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(drm, dma_dev);
+ put_device(dma_dev);
+ } else {
+ drm_warn(drm, "buffer sharing not supported"); /* not an error */
+ }
+
ret = appletbdrm_get_information(adev);
if (ret) {
drm_err(drm, "Failed to get display information\n");
diff --git a/drivers/gpu/drm/tiny/cirrus-qemu.c b/drivers/gpu/drm/tiny/cirrus-qemu.c
index 52ec1e4ea9e5..97a93adc5669 100644
--- a/drivers/gpu/drm/tiny/cirrus-qemu.c
+++ b/drivers/gpu/drm/tiny/cirrus-qemu.c
@@ -70,20 +70,6 @@ struct cirrus_device {
#define to_cirrus(_dev) container_of(_dev, struct cirrus_device, dev)
-struct cirrus_primary_plane_state {
- struct drm_shadow_plane_state base;
-
- /* HW scanout buffer */
- const struct drm_format_info *format;
- unsigned int pitch;
-};
-
-static inline struct cirrus_primary_plane_state *
-to_cirrus_primary_plane_state(struct drm_plane_state *plane_state)
-{
- return container_of(plane_state, struct cirrus_primary_plane_state, base.base);
-};
-
/* ------------------------------------------------------------------ */
/*
* The meat of this driver. The core passes us a mode and we have to program
@@ -144,37 +130,6 @@ static void wreg_hdr(struct cirrus_device *cirrus, u8 val)
iowrite8(val, cirrus->mmio + VGA_DAC_MASK);
}
-static const struct drm_format_info *cirrus_convert_to(struct drm_framebuffer *fb)
-{
- if (fb->format->format == DRM_FORMAT_XRGB8888 && fb->pitches[0] > CIRRUS_MAX_PITCH) {
- if (fb->width * 3 <= CIRRUS_MAX_PITCH)
- /* convert from XR24 to RG24 */
- return drm_format_info(DRM_FORMAT_RGB888);
- else
- /* convert from XR24 to RG16 */
- return drm_format_info(DRM_FORMAT_RGB565);
- }
- return NULL;
-}
-
-static const struct drm_format_info *cirrus_format(struct drm_framebuffer *fb)
-{
- const struct drm_format_info *format = cirrus_convert_to(fb);
-
- if (format)
- return format;
- return fb->format;
-}
-
-static int cirrus_pitch(struct drm_framebuffer *fb)
-{
- const struct drm_format_info *format = cirrus_convert_to(fb);
-
- if (format)
- return drm_format_info_min_pitch(format, 0, fb->width);
- return fb->pitches[0];
-}
-
static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
{
u32 addr;
@@ -318,7 +273,6 @@ static void cirrus_pitch_set(struct cirrus_device *cirrus, unsigned int pitch)
/* Enable extended blanking and pitch bits, and enable full memory */
cr1b = 0x22;
cr1b |= (pitch >> 7) & 0x10;
- cr1b |= (pitch >> 6) & 0x40;
wreg_crt(cirrus, 0x1b, cr1b);
cirrus_set_start_address(cirrus, 0);
@@ -342,13 +296,10 @@ static int cirrus_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct cirrus_primary_plane_state *new_primary_plane_state =
- to_cirrus_primary_plane_state(new_plane_state);
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc *new_crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state = NULL;
int ret;
- unsigned int pitch;
if (new_crtc)
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
@@ -362,17 +313,12 @@ static int cirrus_primary_plane_helper_atomic_check(struct drm_plane *plane,
else if (!new_plane_state->visible)
return 0;
- pitch = cirrus_pitch(fb);
-
/* validate size constraints */
- if (pitch > CIRRUS_MAX_PITCH)
+ if (fb->pitches[0] > CIRRUS_MAX_PITCH)
return -EINVAL;
- else if (pitch * fb->height > CIRRUS_VRAM_SIZE)
+ else if (fb->pitches[0] > CIRRUS_VRAM_SIZE / fb->height)
return -EINVAL;
- new_primary_plane_state->format = cirrus_format(fb);
- new_primary_plane_state->pitch = pitch;
-
return 0;
}
@@ -381,15 +327,10 @@ static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane,
{
struct cirrus_device *cirrus = to_cirrus(plane->dev);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct cirrus_primary_plane_state *primary_plane_state =
- to_cirrus_primary_plane_state(plane_state);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
- const struct drm_format_info *format = primary_plane_state->format;
- unsigned int pitch = primary_plane_state->pitch;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct cirrus_primary_plane_state *old_primary_plane_state =
- to_cirrus_primary_plane_state(old_plane_state);
+ struct drm_framebuffer *old_fb = old_plane_state->fb;
struct iosys_map vaddr = IOSYS_MAP_INIT_VADDR_IOMEM(cirrus->vram);
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
@@ -401,18 +342,17 @@ static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane,
if (!drm_dev_enter(&cirrus->dev, &idx))
return;
- if (old_primary_plane_state->format != format)
- cirrus_format_set(cirrus, format);
- if (old_primary_plane_state->pitch != pitch)
- cirrus_pitch_set(cirrus, pitch);
+ if (!old_fb || old_fb->format != fb->format)
+ cirrus_format_set(cirrus, fb->format);
+ if (!old_fb || old_fb->pitches[0] != fb->pitches[0])
+ cirrus_pitch_set(cirrus, fb->pitches[0]);
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
- unsigned int offset = drm_fb_clip_offset(pitch, format, &damage);
+ unsigned int offset = drm_fb_clip_offset(fb->pitches[0], fb->format, &damage);
struct iosys_map dst = IOSYS_MAP_INIT_OFFSET(&vaddr, offset);
- drm_fb_blit(&dst, &pitch, format->format, shadow_plane_state->data, fb,
- &damage, &shadow_plane_state->fmtcnv_state);
+ drm_fb_memcpy(&dst, fb->pitches, shadow_plane_state->data, fb, &damage);
}
drm_dev_exit(idx);
@@ -424,62 +364,11 @@ static const struct drm_plane_helper_funcs cirrus_primary_plane_helper_funcs = {
.atomic_update = cirrus_primary_plane_helper_atomic_update,
};
-static struct drm_plane_state *
-cirrus_primary_plane_atomic_duplicate_state(struct drm_plane *plane)
-{
- struct drm_plane_state *plane_state = plane->state;
- struct cirrus_primary_plane_state *primary_plane_state =
- to_cirrus_primary_plane_state(plane_state);
- struct cirrus_primary_plane_state *new_primary_plane_state;
- struct drm_shadow_plane_state *new_shadow_plane_state;
-
- if (!plane_state)
- return NULL;
-
- new_primary_plane_state = kzalloc(sizeof(*new_primary_plane_state), GFP_KERNEL);
- if (!new_primary_plane_state)
- return NULL;
- new_shadow_plane_state = &new_primary_plane_state->base;
-
- __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
- new_primary_plane_state->format = primary_plane_state->format;
- new_primary_plane_state->pitch = primary_plane_state->pitch;
-
- return &new_shadow_plane_state->base;
-}
-
-static void cirrus_primary_plane_atomic_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *plane_state)
-{
- struct cirrus_primary_plane_state *primary_plane_state =
- to_cirrus_primary_plane_state(plane_state);
-
- __drm_gem_destroy_shadow_plane_state(&primary_plane_state->base);
- kfree(primary_plane_state);
-}
-
-static void cirrus_reset_primary_plane(struct drm_plane *plane)
-{
- struct cirrus_primary_plane_state *primary_plane_state;
-
- if (plane->state) {
- cirrus_primary_plane_atomic_destroy_state(plane, plane->state);
- plane->state = NULL; /* must be set to NULL here */
- }
-
- primary_plane_state = kzalloc(sizeof(*primary_plane_state), GFP_KERNEL);
- if (!primary_plane_state)
- return;
- __drm_gem_reset_shadow_plane(plane, &primary_plane_state->base);
-}
-
static const struct drm_plane_funcs cirrus_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .reset = cirrus_reset_primary_plane,
- .atomic_duplicate_state = cirrus_primary_plane_atomic_duplicate_state,
- .atomic_destroy_state = cirrus_primary_plane_atomic_destroy_state,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
static int cirrus_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
@@ -614,9 +503,17 @@ static enum drm_mode_status cirrus_mode_config_mode_valid(struct drm_device *dev
const struct drm_display_mode *mode)
{
const struct drm_format_info *format = drm_format_info(DRM_FORMAT_XRGB8888);
- uint64_t pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay);
+ u64 pitch;
- if (pitch * mode->vdisplay > CIRRUS_VRAM_SIZE)
+ if (drm_WARN_ON_ONCE(dev, !format))
+ return MODE_ERROR; /* driver bug */
+
+ pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay);
+ if (!pitch)
+ return MODE_BAD_WIDTH;
+ if (pitch > CIRRUS_MAX_PITCH)
+ return MODE_BAD_WIDTH; /* maximum programmable pitch */
+ if (pitch > CIRRUS_VRAM_SIZE / mode->vdisplay)
return MODE_MEM;
return MODE_OK;
@@ -681,7 +578,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- ret = pci_request_regions(pdev, DRIVER_NAME);
+ ret = pcim_request_all_regions(pdev, DRIVER_NAME);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 41e9bfb2e2ff..fb0004166f4a 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -86,7 +86,6 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
struct gm12u320_device {
struct drm_device dev;
- struct device *dmadev;
struct drm_simple_display_pipe pipe;
struct drm_connector conn;
unsigned char *cmd_buf;
@@ -602,22 +601,6 @@ static const uint64_t gm12u320_pipe_modifiers[] = {
DRM_FORMAT_MOD_INVALID
};
-/*
- * FIXME: Dma-buf sharing requires DMA support by the importing device.
- * This function is a workaround to make USB devices work as well.
- * See todo.rst for how to fix the issue in the dma-buf framework.
- */
-static struct drm_gem_object *gm12u320_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct gm12u320_device *gm12u320 = to_gm12u320(dev);
-
- if (!gm12u320->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(dev, dma_buf, gm12u320->dmadev);
-}
-
DEFINE_DRM_GEM_FOPS(gm12u320_fops);
static const struct drm_driver gm12u320_drm_driver = {
@@ -630,7 +613,6 @@ static const struct drm_driver gm12u320_drm_driver = {
.fops = &gm12u320_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = gm12u320_gem_prime_import,
DRM_FBDEV_SHMEM_DRIVER_OPS,
};
@@ -645,6 +627,7 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
{
struct gm12u320_device *gm12u320;
struct drm_device *dev;
+ struct device *dma_dev;
int ret;
/*
@@ -660,16 +643,20 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
return PTR_ERR(gm12u320);
dev = &gm12u320->dev;
- gm12u320->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
- if (!gm12u320->dmadev)
+ dma_dev = usb_intf_get_dma_device(interface);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(dev, dma_dev);
+ put_device(dma_dev);
+ } else {
drm_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
mutex_init(&gm12u320->fb_update.lock);
ret = drmm_mode_config_init(dev);
if (ret)
- goto err_put_device;
+ return ret;
dev->mode_config.min_width = GM12U320_USER_WIDTH;
dev->mode_config.max_width = GM12U320_USER_WIDTH;
@@ -679,15 +666,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
ret = gm12u320_usb_alloc(gm12u320);
if (ret)
- goto err_put_device;
+ return ret;
ret = gm12u320_set_ecomode(gm12u320);
if (ret)
- goto err_put_device;
+ return ret;
ret = gm12u320_conn_init(gm12u320);
if (ret)
- goto err_put_device;
+ return ret;
ret = drm_simple_display_pipe_init(&gm12u320->dev,
&gm12u320->pipe,
@@ -697,31 +684,24 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
gm12u320_pipe_modifiers,
&gm12u320->conn);
if (ret)
- goto err_put_device;
+ return ret;
drm_mode_config_reset(dev);
usb_set_intfdata(interface, dev);
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_put_device;
+ return ret;
drm_client_setup(dev, NULL);
return 0;
-
-err_put_device:
- put_device(gm12u320->dmadev);
- return ret;
}
static void gm12u320_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- struct gm12u320_device *gm12u320 = to_gm12u320(dev);
- put_device(gm12u320->dmadev);
- gm12u320->dmadev = NULL;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
}
diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
index 0460ecaef4bd..23914a9f7fd3 100644
--- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
@@ -390,7 +390,10 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, drm);
- drm_client_setup(drm, NULL);
+ if (bpp == 16)
+ drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB565);
+ else
+ drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB888);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
index f8f20d2f6174..6c77550c51af 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -175,7 +175,7 @@ struct signal_timer {
static void signal_for_ttm_bo_reserve(struct timer_list *t)
{
- struct signal_timer *s_timer = from_timer(s_timer, t, timer);
+ struct signal_timer *s_timer = timer_container_of(s_timer, t, timer);
struct task_struct *task = s_timer->ctx->task;
do_send_sig_info(SIGTERM, SEND_SIG_PRIV, task, PIDTYPE_PID);
@@ -201,7 +201,7 @@ static int threaded_ttm_bo_reserve(void *arg)
err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
timer_delete_sync(&s_timer.timer);
- destroy_timer_on_stack(&s_timer.timer);
+ timer_destroy_on_stack(&s_timer.timer);
ww_acquire_fini(&ctx);
@@ -340,7 +340,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
resv = kunit_kzalloc(test, sizeof(*resv), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+ KUNIT_ASSERT_NOT_NULL(test, resv);
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c
index 9e2d72c447ee..ffaab68bd5dd 100644
--- a/drivers/gpu/drm/ttm/ttm_backup.c
+++ b/drivers/gpu/drm/ttm/ttm_backup.c
@@ -120,13 +120,13 @@ ttm_backup_backup_page(struct file *backup, struct page *page,
.for_reclaim = 1,
};
folio_set_reclaim(to_folio);
- ret = mapping->a_ops->writepage(folio_file_page(to_folio, idx), &wbc);
+ ret = shmem_writeout(to_folio, &wbc);
if (!folio_test_writeback(to_folio))
folio_clear_reclaim(to_folio);
/*
- * If writepage succeeds, it unlocks the folio.
- * writepage() errors are otherwise dropped, since writepage()
- * is only best effort here.
+ * If writeout succeeds, it unlocks the folio. errors
+ * are otherwise dropped, since writeout is only best
+ * effort here.
*/
if (ret)
folio_unlock(to_folio);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5bf3c969907c..08a23ab037cb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -235,7 +235,7 @@ static void ttm_bo_delayed_delete(struct work_struct *work)
bo = container_of(work, typeof(*bo), delayed_delete);
- dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
+ dma_resv_wait_timeout(&bo->base._resv, DMA_RESV_USAGE_BOOKKEEP, false,
MAX_SCHEDULE_TIMEOUT);
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_cleanup_memtype_use(bo);
@@ -270,7 +270,7 @@ static void ttm_bo_release(struct kref *kref)
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_free(bdev, bo->resource);
- if (!dma_resv_test_signaled(bo->base.resv,
+ if (!dma_resv_test_signaled(&bo->base._resv,
DMA_RESV_USAGE_BOOKKEEP) ||
(want_init_on_free() && (bo->ttm != NULL)) ||
bo->type == ttm_bo_type_sg ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index a194db83421d..bdfa6ecfef05 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -220,7 +220,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
- .force_alloc = true
};
ttm = bo->ttm;
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 7e5a60c55813..769b0ca9be47 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -548,7 +548,6 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
- .force_alloc = true
};
struct dma_fence *fence;
int ret;
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 05b3a152cc33..1922988625eb 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -22,13 +22,14 @@ static int udl_usb_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct drm_device *dev = usb_get_intfdata(interface);
+ struct udl_device *udl = to_udl(dev);
int ret;
ret = drm_mode_config_helper_suspend(dev);
if (ret)
return ret;
- udl_sync_pending_urbs(dev);
+ udl_sync_pending_urbs(udl);
return 0;
}
@@ -49,22 +50,6 @@ static int udl_usb_reset_resume(struct usb_interface *interface)
return drm_mode_config_helper_resume(dev);
}
-/*
- * FIXME: Dma-buf sharing requires DMA support by the importing device.
- * This function is a workaround to make USB devices work as well.
- * See todo.rst for how to fix the issue in the dma-buf framework.
- */
-static struct drm_gem_object *udl_driver_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct udl_device *udl = to_udl(dev);
-
- if (!udl->dmadev)
- return ERR_PTR(-ENODEV);
-
- return drm_gem_prime_import_dev(dev, dma_buf, udl->dmadev);
-}
-
DEFINE_DRM_GEM_FOPS(udl_driver_fops);
static const struct drm_driver driver = {
@@ -73,7 +58,6 @@ static const struct drm_driver driver = {
/* GEM hooks */
.fops = &udl_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .gem_prime_import = udl_driver_gem_prime_import,
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
@@ -126,10 +110,10 @@ static int udl_usb_probe(struct usb_interface *interface,
static void udl_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
+ struct udl_device *udl = to_udl(dev);
- drm_kms_helper_poll_fini(dev);
- udl_drop_usb(dev);
drm_dev_unplug(dev);
+ udl_drop_usb(udl);
}
/*
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index be00dc1d87a1..145bb95ccc48 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -50,18 +50,14 @@ struct urb_list {
struct udl_device {
struct drm_device drm;
- struct device *dev;
- struct device *dmadev;
+
+ unsigned long sku_pixel_limit;
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
- struct mutex gem_lock;
-
- int sku_pixel_limit;
-
struct urb_list urbs;
};
@@ -73,22 +69,22 @@ static inline struct usb_device *udl_to_usb_device(struct udl_device *udl)
}
/* modeset */
-int udl_modeset_init(struct drm_device *dev);
+int udl_modeset_init(struct udl_device *udl);
struct drm_connector *udl_connector_init(struct drm_device *dev);
-struct urb *udl_get_urb(struct drm_device *dev);
+struct urb *udl_get_urb(struct udl_device *udl);
-int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
-void udl_sync_pending_urbs(struct drm_device *dev);
+int udl_submit_urb(struct udl_device *udl, struct urb *urb, size_t len);
+void udl_sync_pending_urbs(struct udl_device *udl);
void udl_urb_completion(struct urb *urb);
int udl_init(struct udl_device *udl);
-int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
+int udl_render_hline(struct udl_device *udl, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset, u32 byte_width);
-int udl_drop_usb(struct drm_device *dev);
+int udl_drop_usb(struct udl_device *udl);
int udl_select_std_channel(struct udl_device *udl);
#endif
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 3ebe2ce55dfd..bc58991a6f14 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -8,6 +8,8 @@
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
+#include <linux/unaligned.h>
+
#include <drm/drm.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -23,72 +25,99 @@
#define WRITES_IN_FLIGHT (20)
#define MAX_VENDOR_DESCRIPTOR_SIZE 256
+#define UDL_SKU_PIXEL_LIMIT_DEFAULT 2080000
+
static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout);
+/*
+ * Try to make sense of whatever we parse. Therefore return @end on
+ * errors, but don't fail hard.
+ */
+static const u8 *udl_parse_key_value_pair(struct udl_device *udl, const u8 *pos, const u8 *end)
+{
+ u16 key;
+ u8 len;
+
+ /* read key */
+ if (pos >= end - 2)
+ return end;
+ key = get_unaligned_le16(pos);
+ pos += 2;
+
+ /* read value length */
+ if (pos >= end - 1)
+ return end;
+ len = *pos++;
+
+ /* read value */
+ if (pos >= end - len)
+ return end;
+ switch (key) {
+ case 0x0200: { /* maximum number of pixels */
+ unsigned int sku_pixel_limit;
+
+ if (len < sizeof(__le32))
+ break;
+ sku_pixel_limit = get_unaligned_le32(pos);
+ if (sku_pixel_limit >= 16 * UDL_SKU_PIXEL_LIMIT_DEFAULT)
+ break; /* almost 100 MiB, so probably bogus */
+ udl->sku_pixel_limit = sku_pixel_limit;
+ break;
+ }
+ default:
+ break;
+ }
+ pos += len;
+
+ return pos;
+}
+
static int udl_parse_vendor_descriptor(struct udl_device *udl)
{
+ struct drm_device *dev = &udl->drm;
struct usb_device *udev = udl_to_usb_device(udl);
- char *desc;
- char *buf;
- char *desc_end;
-
- u8 total_len = 0;
+ bool detected = false;
+ void *buf;
+ int ret;
+ unsigned int len;
+ const u8 *desc;
+ const u8 *desc_end;
buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
if (!buf)
- return false;
+ return -ENOMEM;
+
+ ret = usb_get_descriptor(udev, 0x5f, /* vendor specific */
+ 0, buf, MAX_VENDOR_DESCRIPTOR_SIZE);
+ if (ret < 0)
+ goto out;
+ len = ret;
+
+ if (len < 5)
+ goto out;
+
desc = buf;
+ desc_end = desc + len;
- total_len = usb_get_descriptor(udev, 0x5f, /* vendor specific */
- 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
- if (total_len > 5) {
- DRM_INFO("vendor descriptor length:%x data:%11ph\n",
- total_len, desc);
-
- if ((desc[0] != total_len) || /* descriptor length */
- (desc[1] != 0x5f) || /* vendor descriptor type */
- (desc[2] != 0x01) || /* version (2 bytes) */
- (desc[3] != 0x00) ||
- (desc[4] != total_len - 2)) /* length after type */
- goto unrecognized;
-
- desc_end = desc + total_len;
- desc += 5; /* the fixed header we've already parsed */
-
- while (desc < desc_end) {
- u8 length;
- u16 key;
-
- key = le16_to_cpu(*((u16 *) desc));
- desc += sizeof(u16);
- length = *desc;
- desc++;
-
- switch (key) {
- case 0x0200: { /* max_area */
- u32 max_area;
- max_area = le32_to_cpu(*((u32 *)desc));
- DRM_DEBUG("DL chip limited to %d pixel modes\n",
- max_area);
- udl->sku_pixel_limit = max_area;
- break;
- }
- default:
- break;
- }
- desc += length;
- }
- }
+ if ((desc[0] != len) || /* descriptor length */
+ (desc[1] != 0x5f) || /* vendor descriptor type */
+ (desc[2] != 0x01) || /* version (2 bytes) */
+ (desc[3] != 0x00) ||
+ (desc[4] != len - 2)) /* length after type */
+ goto out;
+ desc += 5;
- goto success;
+ detected = true;
-unrecognized:
- /* allow udlfb to load for now even if firmware unrecognized */
- DRM_ERROR("Unrecognized vendor firmware descriptor\n");
+ while (desc < desc_end)
+ desc = udl_parse_key_value_pair(udl, desc, desc_end);
-success:
+out:
+ if (!detected)
+ drm_warn(dev, "Unrecognized vendor firmware descriptor\n");
kfree(buf);
- return true;
+
+ return 0;
}
/*
@@ -145,9 +174,8 @@ void udl_urb_completion(struct urb *urb)
wake_up(&udl->urbs.sleep);
}
-static void udl_free_urb_list(struct drm_device *dev)
+static void udl_free_urb_list(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
struct urb_node *unode;
struct urb *urb;
@@ -172,9 +200,8 @@ static void udl_free_urb_list(struct drm_device *dev)
wake_up_all(&udl->urbs.sleep);
}
-static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
+static int udl_alloc_urb_list(struct udl_device *udl, int count, size_t size)
{
- struct udl_device *udl = to_udl(dev);
struct urb *urb;
struct urb_node *unode;
char *buf;
@@ -210,7 +237,7 @@ retry:
usb_free_urb(urb);
if (size > PAGE_SIZE) {
size /= 2;
- udl_free_urb_list(dev);
+ udl_free_urb_list(udl);
goto retry;
}
break;
@@ -259,9 +286,8 @@ static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout)
}
#define GET_URB_TIMEOUT HZ
-struct urb *udl_get_urb(struct drm_device *dev)
+struct urb *udl_get_urb(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
struct urb *urb;
spin_lock_irq(&udl->urbs.lock);
@@ -270,9 +296,8 @@ struct urb *udl_get_urb(struct drm_device *dev)
return urb;
}
-int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
+int udl_submit_urb(struct udl_device *udl, struct urb *urb, size_t len)
{
- struct udl_device *udl = to_udl(dev);
int ret;
if (WARN_ON(len > udl->urbs.size)) {
@@ -290,9 +315,9 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
}
/* wait until all pending URBs have been processed */
-void udl_sync_pending_urbs(struct drm_device *dev)
+void udl_sync_pending_urbs(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
+ struct drm_device *dev = &udl->drm;
spin_lock_irq(&udl->urbs.lock);
/* 2 seconds as a sane timeout */
@@ -308,53 +333,55 @@ int udl_init(struct udl_device *udl)
{
struct drm_device *dev = &udl->drm;
int ret = -ENOMEM;
+ struct device *dma_dev;
DRM_DEBUG("\n");
- udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
- if (!udl->dmadev)
+ dma_dev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
+ if (dma_dev) {
+ drm_dev_set_dma_dev(dev, dma_dev);
+ put_device(dma_dev);
+ } else {
drm_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
- mutex_init(&udl->gem_lock);
+ /*
+ * Not all devices provide vendor descriptors with device
+ * information. Initialize to default values of real-world
+ * devices. It is just enough memory for FullHD.
+ */
+ udl->sku_pixel_limit = UDL_SKU_PIXEL_LIMIT_DEFAULT;
- if (!udl_parse_vendor_descriptor(udl)) {
- ret = -ENODEV;
- DRM_ERROR("firmware not recognized. Assume incompatible device\n");
+ ret = udl_parse_vendor_descriptor(udl);
+ if (ret)
goto err;
- }
if (udl_select_std_channel(udl))
DRM_ERROR("Selecting channel failed\n");
- if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
+ if (!udl_alloc_urb_list(udl, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
DRM_ERROR("udl_alloc_urb_list failed\n");
+ ret = -ENOMEM;
goto err;
}
DRM_DEBUG("\n");
- ret = udl_modeset_init(dev);
+ ret = udl_modeset_init(udl);
if (ret)
goto err;
- drm_kms_helper_poll_init(dev);
-
return 0;
err:
if (udl->urbs.count)
- udl_free_urb_list(dev);
- put_device(udl->dmadev);
+ udl_free_urb_list(udl);
DRM_ERROR("%d\n", ret);
return ret;
}
-int udl_drop_usb(struct drm_device *dev)
+int udl_drop_usb(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
-
- udl_free_urb_list(dev);
- put_device(udl->dmadev);
- udl->dmadev = NULL;
+ udl_free_urb_list(udl);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index bbb04f98886a..231e829bd709 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -205,6 +205,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
struct drm_device *dev = fb->dev;
+ struct udl_device *udl = to_udl(dev);
void *vaddr = map->vaddr; /* TODO: Use mapping abstraction properly */
int i, ret;
char *cmd;
@@ -216,7 +217,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
return ret;
log_bpp = ret;
- urb = udl_get_urb(dev);
+ urb = udl_get_urb(udl);
if (!urb)
return -ENOMEM;
cmd = urb->transfer_buffer;
@@ -226,7 +227,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
const int byte_offset = line_offset + (clip->x1 << log_bpp);
const int dev_byte_offset = (fb->width * i + clip->x1) << log_bpp;
const int byte_width = drm_rect_width(clip) << log_bpp;
- ret = udl_render_hline(dev, log_bpp, &urb, (char *)vaddr,
+ ret = udl_render_hline(udl, log_bpp, &urb, (char *)vaddr,
&cmd, byte_offset, dev_byte_offset,
byte_width);
if (ret)
@@ -239,7 +240,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
if (cmd < (char *)urb->transfer_buffer + urb->transfer_buffer_length)
*cmd++ = UDL_MSG_BULK;
len = cmd - (char *)urb->transfer_buffer;
- ret = udl_submit_urb(dev, urb, len);
+ ret = udl_submit_urb(udl, urb, len);
} else {
udl_urb_completion(urb);
}
@@ -330,6 +331,7 @@ static const struct drm_plane_funcs udl_primary_plane_funcs = {
static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
+ struct udl_device *udl = to_udl(dev);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct drm_display_mode *mode = &crtc_state->mode;
struct urb *urb;
@@ -339,7 +341,7 @@ static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atom
if (!drm_dev_enter(dev, &idx))
return;
- urb = udl_get_urb(dev);
+ urb = udl_get_urb(udl);
if (!urb)
goto out;
@@ -355,7 +357,7 @@ static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atom
buf = udl_vidreg_unlock(buf);
buf = udl_dummy_render(buf);
- udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
+ udl_submit_urb(udl, urb, buf - (char *)urb->transfer_buffer);
out:
drm_dev_exit(idx);
@@ -364,6 +366,7 @@ out:
static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
+ struct udl_device *udl = to_udl(dev);
struct urb *urb;
char *buf;
int idx;
@@ -371,7 +374,7 @@ static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_ato
if (!drm_dev_enter(dev, &idx))
return;
- urb = udl_get_urb(dev);
+ urb = udl_get_urb(udl);
if (!urb)
goto out;
@@ -381,7 +384,7 @@ static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_ato
buf = udl_vidreg_unlock(buf);
buf = udl_dummy_render(buf);
- udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
+ udl_submit_urb(udl, urb, buf - (char *)urb->transfer_buffer);
out:
drm_dev_exit(idx);
@@ -476,9 +479,9 @@ static const struct drm_mode_config_funcs udl_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-int udl_modeset_init(struct drm_device *dev)
+int udl_modeset_init(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
+ struct drm_device *dev = &udl->drm;
struct drm_plane *primary_plane;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
@@ -535,6 +538,7 @@ int udl_modeset_init(struct drm_device *dev)
return ret;
drm_mode_config_reset(dev);
+ drmm_kms_helper_poll_init(dev);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 62224992988f..7d670b3a5293 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -170,7 +170,7 @@ static void udl_compress_hline16(
* (that we can only write to, slowly, and can never read), and (optionally)
* our shadow copy that tracks what's been sent to that hardware buffer.
*/
-int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
+int udl_render_hline(struct udl_device *udl, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset,
u32 byte_width)
@@ -199,10 +199,10 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
if (cmd >= cmd_end) {
int len = cmd - (u8 *) urb->transfer_buffer;
- int ret = udl_submit_urb(dev, urb, len);
+ int ret = udl_submit_urb(udl, urb, len);
if (ret)
return ret;
- urb = udl_get_urb(dev);
+ urb = udl_get_urb(udl);
if (!urb)
return -EAGAIN;
*urb_ptr = urb;
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 76816f2551c1..7e789e181af0 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -21,74 +21,74 @@ struct v3d_reg_def {
};
static const struct v3d_reg_def v3d_hub_reg_defs[] = {
- REGDEF(33, 42, V3D_HUB_AXICFG),
- REGDEF(33, 71, V3D_HUB_UIFCFG),
- REGDEF(33, 71, V3D_HUB_IDENT0),
- REGDEF(33, 71, V3D_HUB_IDENT1),
- REGDEF(33, 71, V3D_HUB_IDENT2),
- REGDEF(33, 71, V3D_HUB_IDENT3),
- REGDEF(33, 71, V3D_HUB_INT_STS),
- REGDEF(33, 71, V3D_HUB_INT_MSK_STS),
-
- REGDEF(33, 71, V3D_MMU_CTL),
- REGDEF(33, 71, V3D_MMU_VIO_ADDR),
- REGDEF(33, 71, V3D_MMU_VIO_ID),
- REGDEF(33, 71, V3D_MMU_DEBUG_INFO),
-
- REGDEF(71, 71, V3D_GMP_STATUS(71)),
- REGDEF(71, 71, V3D_GMP_CFG(71)),
- REGDEF(71, 71, V3D_GMP_VIO_ADDR(71)),
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_HUB_AXICFG),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_UIFCFG),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT0),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT1),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT2),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT3),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_STS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_MSK_STS),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_CTL),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ADDR),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ID),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_DEBUG_INFO),
+
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_STATUS(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_CFG(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_VIO_ADDR(71)),
};
static const struct v3d_reg_def v3d_gca_reg_defs[] = {
- REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN),
- REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN_ACK),
+ REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN),
+ REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN_ACK),
};
static const struct v3d_reg_def v3d_core_reg_defs[] = {
- REGDEF(33, 71, V3D_CTL_IDENT0),
- REGDEF(33, 71, V3D_CTL_IDENT1),
- REGDEF(33, 71, V3D_CTL_IDENT2),
- REGDEF(33, 71, V3D_CTL_MISCCFG),
- REGDEF(33, 71, V3D_CTL_INT_STS),
- REGDEF(33, 71, V3D_CTL_INT_MSK_STS),
- REGDEF(33, 71, V3D_CLE_CT0CS),
- REGDEF(33, 71, V3D_CLE_CT0CA),
- REGDEF(33, 71, V3D_CLE_CT0EA),
- REGDEF(33, 71, V3D_CLE_CT1CS),
- REGDEF(33, 71, V3D_CLE_CT1CA),
- REGDEF(33, 71, V3D_CLE_CT1EA),
-
- REGDEF(33, 71, V3D_PTB_BPCA),
- REGDEF(33, 71, V3D_PTB_BPCS),
-
- REGDEF(33, 42, V3D_GMP_STATUS(33)),
- REGDEF(33, 42, V3D_GMP_CFG(33)),
- REGDEF(33, 42, V3D_GMP_VIO_ADDR(33)),
-
- REGDEF(33, 71, V3D_ERR_FDBGO),
- REGDEF(33, 71, V3D_ERR_FDBGB),
- REGDEF(33, 71, V3D_ERR_FDBGS),
- REGDEF(33, 71, V3D_ERR_STAT),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT0),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT1),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT2),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_MISCCFG),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_STS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_MSK_STS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0EA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1EA),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCA),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCS),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_STATUS(33)),
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_CFG(33)),
+ REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_VIO_ADDR(33)),
+
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGO),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGB),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGS),
+ REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_STAT),
};
static const struct v3d_reg_def v3d_csd_reg_defs[] = {
- REGDEF(41, 71, V3D_CSD_STATUS),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG0(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG1(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG2(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG3(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG4(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG5(41)),
- REGDEF(41, 42, V3D_CSD_CURRENT_CFG6(41)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG0(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG1(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG2(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG3(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG4(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG5(71)),
- REGDEF(71, 71, V3D_CSD_CURRENT_CFG6(71)),
- REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG7),
+ REGDEF(V3D_GEN_41, V3D_GEN_71, V3D_CSD_STATUS),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG0(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG1(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG2(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG3(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG4(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG5(41)),
+ REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG6(41)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG0(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG1(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG2(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG3(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG4(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG5(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG6(71)),
+ REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG7),
};
static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
@@ -164,7 +164,7 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU));
seq_printf(m, "TFU: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU));
- if (v3d->ver <= 42) {
+ if (v3d->ver <= V3D_GEN_42) {
seq_printf(m, "TSY: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY));
}
@@ -196,11 +196,11 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
seq_printf(m, " QPUs: %d\n", nslc * qups);
seq_printf(m, " Semaphores: %d\n",
V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM));
- if (v3d->ver <= 42) {
+ if (v3d->ver <= V3D_GEN_42) {
seq_printf(m, " BCG int: %d\n",
(ident2 & V3D_IDENT2_BCG_INT) != 0);
}
- if (v3d->ver < 40) {
+ if (v3d->ver < V3D_GEN_41) {
seq_printf(m, " Override TMU: %d\n",
(misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
}
@@ -234,7 +234,7 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
int core = 0;
int measure_ms = 1000;
- if (v3d->ver >= 40) {
+ if (v3d->ver >= V3D_GEN_41) {
int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver);
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
V3D_SET_FIELD_VER(cycle_count_reg,
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 852015214e97..5e997ae8bc9c 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -17,6 +17,7 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/sched/clock.h>
@@ -92,7 +93,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
args->value = 1;
return 0;
case DRM_V3D_PARAM_SUPPORTS_PERFMON:
- args->value = (v3d->ver >= 40);
+ args->value = (v3d->ver >= V3D_GEN_41);
return 0;
case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT:
args->value = 1;
@@ -254,14 +255,44 @@ static const struct drm_driver v3d_drm_driver = {
};
static const struct of_device_id v3d_of_match[] = {
- { .compatible = "brcm,2711-v3d" },
- { .compatible = "brcm,2712-v3d" },
- { .compatible = "brcm,7268-v3d" },
- { .compatible = "brcm,7278-v3d" },
+ { .compatible = "brcm,2711-v3d", .data = (void *)V3D_GEN_42 },
+ { .compatible = "brcm,2712-v3d", .data = (void *)V3D_GEN_71 },
+ { .compatible = "brcm,7268-v3d", .data = (void *)V3D_GEN_33 },
+ { .compatible = "brcm,7278-v3d", .data = (void *)V3D_GEN_41 },
{},
};
MODULE_DEVICE_TABLE(of, v3d_of_match);
+static void
+v3d_idle_sms(struct v3d_dev *v3d)
+{
+ if (v3d->ver < V3D_GEN_71)
+ return;
+
+ V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_CLEAR_POWER_OFF);
+
+ if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS),
+ V3D_SMS_STATE) == V3D_SMS_IDLE), 100)) {
+ DRM_ERROR("Failed to power up SMS\n");
+ }
+
+ v3d_reset_sms(v3d);
+}
+
+static void
+v3d_power_off_sms(struct v3d_dev *v3d)
+{
+ if (v3d->ver < V3D_GEN_71)
+ return;
+
+ V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_POWER_OFF);
+
+ if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS),
+ V3D_SMS_STATE) == V3D_SMS_POWER_OFF_STATE), 100)) {
+ DRM_ERROR("Failed to power off SMS\n");
+ }
+}
+
static int
map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
{
@@ -274,6 +305,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct drm_device *drm;
struct v3d_dev *v3d;
+ enum v3d_gen gen;
int ret;
u32 mmu_debug;
u32 ident1, ident3;
@@ -287,6 +319,9 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drm);
+ gen = (uintptr_t)of_device_get_match_data(dev);
+ v3d->ver = gen;
+
ret = map_regs(v3d, &v3d->hub_regs, "hub");
if (ret)
return ret;
@@ -295,6 +330,12 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (v3d->ver >= V3D_GEN_71) {
+ ret = map_regs(v3d, &v3d->sms_regs, "sms");
+ if (ret)
+ return ret;
+ }
+
v3d->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(v3d->clk))
return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n");
@@ -305,6 +346,8 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
return ret;
}
+ v3d_idle_sms(v3d);
+
mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
ret = dma_set_mask_and_coherent(dev, mask);
@@ -316,6 +359,11 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
ident1 = V3D_READ(V3D_HUB_IDENT1);
v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 +
V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV));
+ /* Make sure that the V3D tech version retrieved from the HW is equal
+ * to the one advertised by the device tree.
+ */
+ WARN_ON(v3d->ver != gen);
+
v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
WARN_ON(v3d->cores > 1); /* multicore not yet implemented */
@@ -340,7 +388,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
}
}
- if (v3d->ver < 41) {
+ if (v3d->ver < V3D_GEN_41) {
ret = map_regs(v3d, &v3d->gca_regs, "gca");
if (ret)
goto clk_disable;
@@ -400,6 +448,8 @@ static void v3d_platform_drm_remove(struct platform_device *pdev)
dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
v3d->mmu_scratch_paddr);
+ v3d_power_off_sms(v3d);
+
clk_disable_unprepare(v3d->clk);
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 9deaefa0f95b..b51f0b648a08 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -94,11 +94,18 @@ struct v3d_perfmon {
u64 values[] __counted_by(ncounters);
};
+enum v3d_gen {
+ V3D_GEN_33 = 33,
+ V3D_GEN_41 = 41,
+ V3D_GEN_42 = 42,
+ V3D_GEN_71 = 71,
+};
+
struct v3d_dev {
struct drm_device drm;
/* Short representation (e.g. 33, 41) of the V3D tech version */
- int ver;
+ enum v3d_gen ver;
/* Short representation (e.g. 5, 6) of the V3D tech revision */
int rev;
@@ -111,6 +118,7 @@ struct v3d_dev {
void __iomem *core_regs[3];
void __iomem *bridge_regs;
void __iomem *gca_regs;
+ void __iomem *sms_regs;
struct clk *clk;
struct reset_control *reset;
@@ -199,7 +207,7 @@ to_v3d_dev(struct drm_device *dev)
static inline bool
v3d_has_csd(struct v3d_dev *v3d)
{
- return v3d->ver >= 41;
+ return v3d->ver >= V3D_GEN_41;
}
#define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev)
@@ -261,6 +269,15 @@ to_v3d_fence(struct dma_fence *fence)
#define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset)
#define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset)
+#define V3D_SMS_IDLE 0x0
+#define V3D_SMS_ISOLATING_FOR_RESET 0xa
+#define V3D_SMS_RESETTING 0xb
+#define V3D_SMS_ISOLATING_FOR_POWER_OFF 0xc
+#define V3D_SMS_POWER_OFF_STATE 0xd
+
+#define V3D_SMS_READ(offset) readl(v3d->sms_regs + (offset))
+#define V3D_SMS_WRITE(offset, val) writel(val, v3d->sms_regs + (offset))
+
#define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset)
#define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset)
@@ -539,6 +556,7 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
/* v3d_gem.c */
int v3d_gem_init(struct drm_device *dev);
void v3d_gem_destroy(struct drm_device *dev);
+void v3d_reset_sms(struct v3d_dev *v3d);
void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
void v3d_clean_caches(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index b1e681630ded..d7d16da78db3 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -25,7 +25,7 @@ v3d_init_core(struct v3d_dev *v3d, int core)
* type. If you want the default behavior, you can still put
* "2" in the indirect texture state's output_type field.
*/
- if (v3d->ver < 40)
+ if (v3d->ver < V3D_GEN_41)
V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
/* Whenever we flush the L2T cache, we always want to flush
@@ -58,7 +58,7 @@ v3d_idle_axi(struct v3d_dev *v3d, int core)
static void
v3d_idle_gca(struct v3d_dev *v3d)
{
- if (v3d->ver >= 41)
+ if (v3d->ver >= V3D_GEN_41)
return;
V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
@@ -105,6 +105,22 @@ v3d_reset_v3d(struct v3d_dev *v3d)
}
void
+v3d_reset_sms(struct v3d_dev *v3d)
+{
+ if (v3d->ver < V3D_GEN_71)
+ return;
+
+ V3D_SMS_WRITE(V3D_SMS_REE_CS, V3D_SET_FIELD(0x4, V3D_SMS_STATE));
+
+ if (wait_for(!(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS),
+ V3D_SMS_STATE) == V3D_SMS_ISOLATING_FOR_RESET) &&
+ !(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS),
+ V3D_SMS_STATE) == V3D_SMS_RESETTING), 100)) {
+ DRM_ERROR("Failed to wait for SMS reset\n");
+ }
+}
+
+void
v3d_reset(struct v3d_dev *v3d)
{
struct drm_device *dev = &v3d->drm;
@@ -119,6 +135,7 @@ v3d_reset(struct v3d_dev *v3d)
v3d_idle_axi(v3d, 0);
v3d_idle_gca(v3d);
+ v3d_reset_sms(v3d);
v3d_reset_v3d(v3d);
v3d_mmu_set_page_table(v3d);
@@ -132,13 +149,13 @@ v3d_reset(struct v3d_dev *v3d)
static void
v3d_flush_l3(struct v3d_dev *v3d)
{
- if (v3d->ver < 41) {
+ if (v3d->ver < V3D_GEN_41) {
u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
- if (v3d->ver < 33) {
+ if (v3d->ver < V3D_GEN_33) {
V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
}
@@ -151,7 +168,7 @@ v3d_flush_l3(struct v3d_dev *v3d)
static void
v3d_invalidate_l2c(struct v3d_dev *v3d, int core)
{
- if (v3d->ver > 32)
+ if (v3d->ver >= V3D_GEN_33)
return;
V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 72b6a119412f..2cca5d3a26a2 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -143,7 +143,7 @@ v3d_irq(int irq, void *arg)
/* We shouldn't be triggering these if we have GMP in
* always-allowed mode.
*/
- if (v3d->ver < 71 && (intsts & V3D_INT_GMPV))
+ if (v3d->ver < V3D_GEN_71 && (intsts & V3D_INT_GMPV))
dev_err(v3d->drm.dev, "GMP violation\n");
/* V3D 4.2 wires the hub and core IRQs together, so if we &
@@ -186,27 +186,59 @@ v3d_hub_irq(int irq, void *arg)
u32 axi_id = V3D_READ(V3D_MMU_VIO_ID);
u64 vio_addr = ((u64)V3D_READ(V3D_MMU_VIO_ADDR) <<
(v3d->va_width - 32));
- static const char *const v3d41_axi_ids[] = {
- "L2T",
- "PTB",
- "PSE",
- "TLB",
- "CLE",
- "TFU",
- "MMU",
- "GMP",
+ static const struct {
+ u32 begin;
+ u32 end;
+ const char *client;
+ } v3d41_axi_ids[] = {
+ {0x00, 0x20, "L2T"},
+ {0x20, 0x21, "PTB"},
+ {0x40, 0x41, "PSE"},
+ {0x60, 0x80, "TLB"},
+ {0x80, 0x88, "CLE"},
+ {0xA0, 0xA1, "TFU"},
+ {0xC0, 0xE0, "MMU"},
+ {0xE0, 0xE1, "GMP"},
+ }, v3d71_axi_ids[] = {
+ {0x00, 0x30, "L2T"},
+ {0x30, 0x38, "CLE"},
+ {0x38, 0x39, "PTB"},
+ {0x39, 0x3A, "PSE"},
+ {0x3A, 0x3B, "CSD"},
+ {0x40, 0x60, "TLB"},
+ {0x60, 0x70, "MMU"},
+ {0x7C, 0x7E, "TFU"},
+ {0x7F, 0x80, "GMP"},
};
const char *client = "?";
V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL));
- if (v3d->ver >= 41) {
- axi_id = axi_id >> 5;
- if (axi_id < ARRAY_SIZE(v3d41_axi_ids))
- client = v3d41_axi_ids[axi_id];
+ if (v3d->ver >= V3D_GEN_71) {
+ size_t i;
+
+ axi_id = axi_id & 0x7F;
+ for (i = 0; i < ARRAY_SIZE(v3d71_axi_ids); i++) {
+ if (axi_id >= v3d71_axi_ids[i].begin &&
+ axi_id < v3d71_axi_ids[i].end) {
+ client = v3d71_axi_ids[i].client;
+ break;
+ }
+ }
+ } else if (v3d->ver >= V3D_GEN_41) {
+ size_t i;
+
+ axi_id = axi_id & 0xFF;
+ for (i = 0; i < ARRAY_SIZE(v3d41_axi_ids); i++) {
+ if (axi_id >= v3d41_axi_ids[i].begin &&
+ axi_id < v3d41_axi_ids[i].end) {
+ client = v3d41_axi_ids[i].client;
+ break;
+ }
+ }
}
- dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
+ dev_err(v3d->drm.dev, "MMU error from client %s (0x%x) at 0x%llx%s%s%s\n",
client, axi_id, (long long)vio_addr,
((intsts & V3D_HUB_INT_MMU_WRV) ?
", write violation" : ""),
@@ -217,7 +249,7 @@ v3d_hub_irq(int irq, void *arg)
status = IRQ_HANDLED;
}
- if (v3d->ver >= 71 && (intsts & V3D_V7_HUB_INT_GMPV)) {
+ if (v3d->ver >= V3D_GEN_71 && (intsts & V3D_V7_HUB_INT_GMPV)) {
dev_err(v3d->drm.dev, "GMP Violation\n");
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
index 3ebda2fa46fc..9a3fe5255874 100644
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
@@ -200,10 +200,10 @@ void v3d_perfmon_init(struct v3d_dev *v3d)
const struct v3d_perf_counter_desc *counters = NULL;
unsigned int max = 0;
- if (v3d->ver >= 71) {
+ if (v3d->ver >= V3D_GEN_71) {
counters = v3d_v71_performance_counters;
max = ARRAY_SIZE(v3d_v71_performance_counters);
- } else if (v3d->ver >= 42) {
+ } else if (v3d->ver >= V3D_GEN_42) {
counters = v3d_v42_performance_counters;
max = ARRAY_SIZE(v3d_v42_performance_counters);
}
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 6da3c69082bd..c1870265eaee 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -515,4 +515,30 @@
# define V3D_ERR_VPAERGS BIT(1)
# define V3D_ERR_VPAEABB BIT(0)
+#define V3D_SMS_REE_CS 0x00000
+#define V3D_SMS_TEE_CS 0x00400
+# define V3D_SMS_INTERRUPT BIT(31)
+# define V3D_SMS_POWER_OFF BIT(30)
+# define V3D_SMS_CLEAR_POWER_OFF BIT(29)
+# define V3D_SMS_LOCK BIT(28)
+# define V3D_SMS_CLEAR_LOCK BIT(27)
+# define V3D_SMS_SVP_MODE_EXIT BIT(26)
+# define V3D_SMS_CLEAR_SVP_MODE_EXIT BIT(25)
+# define V3D_SMS_SVP_MODE_ENTER BIT(24)
+# define V3D_SMS_CLEAR_SVP_MODE_ENTER BIT(23)
+# define V3D_SMS_THEIR_MODE_EXIT BIT(22)
+# define V3D_SMS_THEIR_MODE_ENTER BIT(21)
+# define V3D_SMS_OUR_MODE_EXIT BIT(20)
+# define V3D_SMS_CLEAR_OUR_MODE_EXIT BIT(19)
+# define V3D_SMS_SEQ_PC_MASK V3D_MASK(16, 10)
+# define V3D_SMS_SEQ_PC_SHIFT 10
+# define V3D_SMS_HUBCORE_STATUS_MASK V3D_MASK(9, 8)
+# define V3D_SMS_HUBCORE_STATUS_SHIFT 8
+# define V3D_SMS_NEW_MODE_MASK V3D_MASK(7, 6)
+# define V3D_SMS_NEW_MODE_SHIFT 6
+# define V3D_SMS_OLD_MODE_MASK V3D_MASK(5, 4)
+# define V3D_SMS_OLD_MODE_SHIFT 4
+# define V3D_SMS_STATE_MASK V3D_MASK(3, 0)
+# define V3D_SMS_STATE_SHIFT 0
+
#endif /* V3D_REGS_H */
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index eb35482f6fb5..42df9d3567e7 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -199,7 +199,6 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
struct v3d_dev *v3d = job->v3d;
struct v3d_file_priv *file = job->file->driver_priv;
struct v3d_stats *global_stats = &v3d->queue[queue].stats;
- struct v3d_stats *local_stats = &file->stats[queue];
u64 now = local_clock();
unsigned long flags;
@@ -209,7 +208,12 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
else
preempt_disable();
- v3d_stats_update(local_stats, now);
+ /* Don't update the local stats if the file context has already closed */
+ if (file)
+ v3d_stats_update(&file->stats[queue], now);
+ else
+ drm_dbg(&v3d->drm, "The file descriptor was closed before job completion\n");
+
v3d_stats_update(global_stats, now);
if (IS_ENABLED(CONFIG_LOCKDEP))
@@ -357,11 +361,11 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica);
V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua);
V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa);
- if (v3d->ver >= 71)
+ if (v3d->ver >= V3D_GEN_71)
V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc);
V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios);
V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]);
- if (v3d->ver >= 71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
+ if (v3d->ver >= V3D_GEN_71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]);
V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]);
V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]);
@@ -412,7 +416,7 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
*
* XXX: Set the CFG7 register
*/
- if (v3d->ver >= 71)
+ if (v3d->ver >= V3D_GEN_71)
V3D_CORE_WRITE(0, V3D_V7_CSD_QUEUED_CFG7, 0);
/* CFG0 write kicks off the job. */
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
index e70d7c3076ac..577d9a956369 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
@@ -61,6 +61,19 @@ static const struct drm_display_mode default_mode = {
DRM_SIMPLE_MODE(640, 480, 64, 48)
};
+/**
+ * vc4_mock_atomic_add_output() - Enables an output in a state
+ * @test: The test context object
+ * @state: Atomic state to enable the output in.
+ * @type: Type of the output encoder
+ *
+ * Adds an output CRTC and connector to a state, and enables them.
+ *
+ * Returns:
+ * 0 on success, a negative error code on failure. If the error is
+ * EDEADLK, the entire atomic sequence must be restarted. All other
+ * errors are fatal.
+ */
int vc4_mock_atomic_add_output(struct kunit *test,
struct drm_atomic_state *state,
enum vc4_encoder_type type)
@@ -75,30 +88,49 @@ int vc4_mock_atomic_add_output(struct kunit *test,
int ret;
encoder = vc4_find_encoder_by_type(drm, type);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
+ if (!encoder)
+ return -ENODEV;
crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
+ if (!crtc)
+ return -ENODEV;
output = encoder_to_vc4_dummy_output(encoder);
conn = &output->connector;
conn_state = drm_atomic_get_connector_state(state, conn);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
- KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
ret = drm_atomic_set_mode_for_crtc(crtc_state, &default_mode);
- KUNIT_EXPECT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
crtc_state->active = true;
return 0;
}
+/**
+ * vc4_mock_atomic_del_output() - Disables an output in a state
+ * @test: The test context object
+ * @state: Atomic state to disable the output in.
+ * @type: Type of the output encoder
+ *
+ * Adds an output CRTC and connector to a state, and disables them.
+ *
+ * Returns:
+ * 0 on success, a negative error code on failure. If the error is
+ * EDEADLK, the entire atomic sequence must be restarted. All other
+ * errors are fatal.
+ */
int vc4_mock_atomic_del_output(struct kunit *test,
struct drm_atomic_state *state,
enum vc4_encoder_type type)
@@ -113,26 +145,32 @@ int vc4_mock_atomic_del_output(struct kunit *test,
int ret;
encoder = vc4_find_encoder_by_type(drm, type);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
+ if (!encoder)
+ return -ENODEV;
crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
+ if (!crtc)
+ return -ENODEV;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
crtc_state->active = false;
ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
- KUNIT_ASSERT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
output = encoder_to_vc4_dummy_output(encoder);
conn = &output->connector;
conn_state = drm_atomic_get_connector_state(state, conn);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
- KUNIT_ASSERT_EQ(test, ret, 0);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
index 992e8f5c5c6e..d1f694029169 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
@@ -20,7 +20,6 @@
struct pv_muxing_priv {
struct vc4_dev *vc4;
- struct drm_atomic_state *state;
};
static bool check_fifo_conflict(struct kunit *test,
@@ -677,18 +676,41 @@ static void drm_vc4_test_pv_muxing(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
const struct pv_muxing_priv *priv = test->priv;
- struct drm_atomic_state *state = priv->state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
unsigned int i;
int ret;
+ drm_modeset_acquire_init(&ctx, 0);
+
+ vc4 = priv->vc4;
+ drm = &vc4->base;
+
+retry:
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
for (i = 0; i < params->nencoders; i++) {
enum vc4_encoder_type enc_type = params->encoders[i];
ret = vc4_mock_atomic_add_output(test, state, enc_type);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
}
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
KUNIT_EXPECT_TRUE(test,
@@ -700,33 +722,61 @@ static void drm_vc4_test_pv_muxing(struct kunit *test)
KUNIT_EXPECT_TRUE(test, check_channel_for_encoder(test, state, enc_type,
params->check_fn));
}
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
static void drm_vc4_test_pv_muxing_invalid(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
const struct pv_muxing_priv *priv = test->priv;
- struct drm_atomic_state *state = priv->state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
unsigned int i;
int ret;
+ drm_modeset_acquire_init(&ctx, 0);
+
+ vc4 = priv->vc4;
+ drm = &vc4->base;
+
+retry:
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
for (i = 0; i < params->nencoders; i++) {
enum vc4_encoder_type enc_type = params->encoders[i];
ret = vc4_mock_atomic_add_output(test, state, enc_type);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
}
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_EXPECT_LT(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
static int vc4_pv_muxing_test_init(struct kunit *test)
{
const struct pv_muxing_param *params = test->param_value;
- struct drm_modeset_acquire_ctx ctx;
struct pv_muxing_priv *priv;
- struct drm_device *drm;
struct vc4_dev *vc4;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
@@ -737,15 +787,6 @@ static int vc4_pv_muxing_test_init(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
priv->vc4 = vc4;
- drm_modeset_acquire_init(&ctx, 0);
-
- drm = &vc4->base;
- priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->state);
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
return 0;
}
@@ -800,13 +841,26 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
+retry_first:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -823,13 +877,26 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
+retry_second:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -874,16 +941,35 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
+retry_first:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -908,13 +994,26 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
+retry_second:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_del_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_hvs_state = vc4_hvs_get_new_global_state(state);
@@ -968,25 +1067,50 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku
drm_modeset_acquire_init(&ctx, 0);
drm = &vc4->base;
+retry_first:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_first;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
-
ret = drm_atomic_helper_swap_state(state, false);
KUNIT_ASSERT_EQ(test, ret, 0);
+retry_second:
state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_second;
+ }
KUNIT_ASSERT_EQ(test, ret, 0);
new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 7125773889f1..4aaa587be3a5 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -679,7 +679,7 @@ void vc4_bo_dec_usecnt(struct vc4_bo *bo)
static void vc4_bo_cache_time_timer(struct timer_list *t)
{
- struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
+ struct vc4_dev *vc4 = timer_container_of(vc4, t, bo_cache.time_timer);
schedule_work(&vc4->bo_cache.time_work);
}
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 779b22efe27b..458e5d987964 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -552,8 +552,6 @@ struct vc4_dsi {
struct vc4_encoder encoder;
struct mipi_dsi_host dsi_host;
- struct kref kref;
-
struct platform_device *pdev;
struct drm_bridge *out_bridge;
@@ -1160,12 +1158,13 @@ static void vc4_dsi_bridge_enable(struct drm_bridge *bridge,
}
static int vc4_dsi_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
/* Attach the panel or bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->out_bridge,
+ return drm_bridge_attach(encoder, dsi->out_bridge,
&dsi->bridge, flags);
}
@@ -1621,29 +1620,11 @@ static void vc4_dsi_dma_chan_release(void *ptr)
dsi->reg_dma_chan = NULL;
}
-static void vc4_dsi_release(struct kref *kref)
-{
- struct vc4_dsi *dsi =
- container_of(kref, struct vc4_dsi, kref);
-
- kfree(dsi);
-}
-
-static void vc4_dsi_get(struct vc4_dsi *dsi)
-{
- kref_get(&dsi->kref);
-}
-
-static void vc4_dsi_put(struct vc4_dsi *dsi)
-{
- kref_put(&dsi->kref, &vc4_dsi_release);
-}
-
static void vc4_dsi_release_action(struct drm_device *drm, void *ptr)
{
struct vc4_dsi *dsi = ptr;
- vc4_dsi_put(dsi);
+ drm_bridge_put(&dsi->bridge);
}
static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
@@ -1654,7 +1635,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
struct drm_encoder *encoder = &dsi->encoder.base;
int ret;
- vc4_dsi_get(dsi);
+ drm_bridge_get(&dsi->bridge);
ret = drmm_add_action_or_reset(drm, vc4_dsi_release_action, dsi);
if (ret)
@@ -1809,15 +1790,12 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct vc4_dsi *dsi;
- dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct vc4_dsi, bridge, &vc4_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dev_set_drvdata(dev, dsi);
- kref_init(&dsi->kref);
-
dsi->pdev = pdev;
- dsi->bridge.funcs = &vc4_dsi_bridge_funcs;
#ifdef CONFIG_OF
dsi->bridge.of_node = dev->of_node;
#endif
@@ -1835,7 +1813,6 @@ static void vc4_dsi_dev_remove(struct platform_device *pdev)
struct vc4_dsi *dsi = dev_get_drvdata(dev);
mipi_dsi_host_unregister(&dsi->dsi_host);
- vc4_dsi_put(dsi);
}
struct platform_driver vc4_dsi_driver = {
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 8125f87edc60..255e5817618e 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -326,7 +326,7 @@ vc4_reset_work(struct work_struct *work)
static void
vc4_hangcheck_elapsed(struct timer_list *t)
{
- struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
+ struct vc4_dev *vc4 = timer_container_of(vc4, t, hangcheck.timer);
struct drm_device *dev = &vc4->base;
uint32_t ct0ca, ct1ca;
unsigned long irqflags;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 37238a12baa5..163d092bd973 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -51,6 +51,7 @@
#include <linux/reset.h>
#include <sound/dmaengine_pcm.h>
#include <sound/hdmi-codec.h>
+#include <sound/jack.h>
#include <sound/pcm_drm_eld.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -372,13 +373,13 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
* the lock for now.
*/
+ drm_atomic_helper_connector_hdmi_hotplug(connector, status);
+
if (status == connector_status_disconnected) {
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
return;
}
- drm_atomic_helper_connector_hdmi_hotplug(connector, status);
-
cec_s_phys_addr(vc4_hdmi->cec_adap,
connector->display_info.source_physical_address, false);
@@ -559,12 +560,6 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
if (ret)
return ret;
- ret = drm_connector_hdmi_audio_init(connector, dev->dev,
- &vc4_hdmi_audio_funcs,
- 8, false, -1);
- if (ret)
- return ret;
-
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
/*
@@ -2175,6 +2170,22 @@ static const struct drm_connector_hdmi_audio_funcs vc4_hdmi_audio_funcs = {
.shutdown = vc4_hdmi_audio_shutdown,
};
+static int vc4_hdmi_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct vc4_hdmi *vc4_hdmi = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_component *component = snd_soc_rtd_to_codec(rtd, 0)->component;
+ int ret;
+
+ ret = snd_soc_card_jack_new(rtd->card, "HDMI Jack", SND_JACK_LINEOUT,
+ &vc4_hdmi->hdmi_jack);
+ if (ret) {
+ dev_err(rtd->dev, "HDMI Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ return snd_soc_component_set_jack(component, &vc4_hdmi->hdmi_jack, NULL);
+}
+
static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
{
const struct vc4_hdmi_register *mai_data =
@@ -2274,6 +2285,12 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
return ret;
}
+ ret = drm_connector_hdmi_audio_init(&vc4_hdmi->connector, dev,
+ &vc4_hdmi_audio_funcs, 8, false,
+ -1);
+ if (ret)
+ return ret;
+
dai_link->cpus = &vc4_hdmi->audio.cpu;
dai_link->codecs = &vc4_hdmi->audio.codec;
dai_link->platforms = &vc4_hdmi->audio.platform;
@@ -2288,6 +2305,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
dai_link->cpus->dai_name = dev_name(dev);
dai_link->codecs->name = dev_name(&vc4_hdmi->connector.hdmi_audio.codec_pdev->dev);
dai_link->platforms->name = dev_name(dev);
+ dai_link->init = vc4_hdmi_codec_init;
card->dai_link = dai_link;
card->num_links = 1;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index e3d989ca302b..a31157c99bee 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -4,6 +4,7 @@
#include <drm/drm_connector.h>
#include <media/cec.h>
#include <sound/dmaengine_pcm.h>
+#include <sound/hdmi-codec.h>
#include <sound/soc.h>
#include "vc4_drv.h"
@@ -211,6 +212,12 @@ struct vc4_hdmi {
* KMS hooks. Protected by @mutex.
*/
enum hdmi_colorspace output_format;
+
+ /**
+ * @hdmi_jack: Represents the connection state of the HDMI plug, for
+ * ALSA jack detection.
+ */
+ struct snd_soc_jack hdmi_jack;
};
#define connector_to_vc4_hdmi(_connector) \
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index c5e84d3494d2..056d344c5411 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -2080,7 +2080,7 @@ static int vc6_plane_mode_set(struct drm_plane *plane,
/* HPPF plane 1 */
vc4_dlist_write(vc4_state, kernel);
/* VPPF plane 1 */
- vc4_dlist_write(vc4_state, kernel);
+ vc4_dlist_write(vc4_state, kernel);
}
}
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 37bb1fb58cf9..fd76730fd38c 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -53,30 +53,15 @@ static void vgem_fence_release(struct dma_fence *base)
dma_fence_free(&fence->base);
}
-static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size)
-{
- snprintf(str, size, "%llu", fence->seqno);
-}
-
-static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str,
- int size)
-{
- snprintf(str, size, "%llu",
- dma_fence_is_signaled(fence) ? fence->seqno : 0);
-}
-
static const struct dma_fence_ops vgem_fence_ops = {
.get_driver_name = vgem_fence_get_driver_name,
.get_timeline_name = vgem_fence_get_timeline_name,
.release = vgem_fence_release,
-
- .fence_value_str = vgem_fence_value_str,
- .timeline_value_str = vgem_fence_timeline_value_str,
};
static void vgem_fence_timeout(struct timer_list *t)
{
- struct vgem_fence *fence = from_timer(fence, t, timer);
+ struct vgem_fence *fence = timer_container_of(fence, t, timer);
dma_fence_signal(&fence->base);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index f28357dbde35..44c1d8ef3c4d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -49,26 +49,10 @@ static bool virtio_gpu_fence_signaled(struct dma_fence *f)
return false;
}
-static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size)
-{
- snprintf(str, size, "[%llu, %llu]", f->context, f->seqno);
-}
-
-static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str,
- int size)
-{
- struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
-
- snprintf(str, size, "%llu",
- (u64)atomic64_read(&fence->drv->last_fence_id));
-}
-
static const struct dma_fence_ops virtio_gpu_fence_ops = {
.get_driver_name = virtio_gpu_get_driver_name,
.get_timeline_name = virtio_gpu_get_timeline_name,
.signaled = virtio_gpu_fence_signaled,
- .fence_value_str = virtio_gpu_fence_value_str,
- .timeline_value_str = virtio_gpu_timeline_value_str,
};
struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 87e584add042..698ea7adb951 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -366,7 +366,7 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return 0;
obj = new_state->fb->obj[0];
- if (bo->dumb || obj->import_attach) {
+ if (bo->dumb || drm_gem_is_imported(obj)) {
vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
vgdev->fence_drv.context,
0);
@@ -374,7 +374,7 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return -ENOMEM;
}
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
if (ret)
goto err_fence;
@@ -417,7 +417,7 @@ static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
}
obj = state->fb->obj[0];
- if (obj->import_attach)
+ if (drm_gem_is_imported(obj))
virtio_gpu_cleanup_imported_obj(obj);
}
@@ -508,11 +508,19 @@ static int virtio_drm_get_scanout_buffer(struct drm_plane *plane,
bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
- /* Only support mapped shmem bo */
- if (virtio_gpu_is_vram(bo) || bo->base.base.import_attach || !bo->base.vaddr)
+ if (virtio_gpu_is_vram(bo) || drm_gem_is_imported(&bo->base.base))
return -ENODEV;
- iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+ if (bo->base.vaddr) {
+ iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+ } else {
+ struct drm_gem_shmem_object *shmem = &bo->base;
+
+ if (!shmem->pages)
+ return -ENODEV;
+ /* map scanout buffer later */
+ sb->pages = shmem->pages;
+ }
sb->format = plane->state->fb->format;
sb->height = plane->state->fb->height;
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 4de2a63ccd18..1118a0250279 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -75,7 +75,6 @@ static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
.ops = {
- .cache_sgt_mapping = true,
.attach = virtio_dma_buf_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = virtgpu_gem_map_dma_buf,
@@ -205,16 +204,15 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
- struct dma_buf_attachment *attach = obj->import_attach;
- if (attach) {
- struct dma_buf *dmabuf = attach->dmabuf;
+ if (drm_gem_is_imported(obj)) {
+ struct dma_buf *dmabuf = obj->dma_buf;
dma_resv_lock(dmabuf->resv, NULL);
virtgpu_dma_buf_unmap(bo);
dma_resv_unlock(dmabuf->resv);
- dma_buf_detach(dmabuf, attach);
+ dma_buf_detach(dmabuf, obj->import_attach);
dma_buf_put(dmabuf);
}
diff --git a/drivers/gpu/drm/vkms/Kconfig b/drivers/gpu/drm/vkms/Kconfig
index 9def079f685b..3c02f928ffe6 100644
--- a/drivers/gpu/drm/vkms/Kconfig
+++ b/drivers/gpu/drm/vkms/Kconfig
@@ -14,3 +14,18 @@ config DRM_VKMS
a VKMS.
If M is selected the module will be called vkms.
+
+config DRM_VKMS_KUNIT_TEST
+ tristate "KUnit tests for VKMS" if !KUNIT_ALL_TESTS
+ depends on DRM_VKMS && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for VKMS. This option is not useful for
+ distributions or general kernels, but only for kernel
+ developers working on VKMS.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
index 1b28a6a32948..d657865e573f 100644
--- a/drivers/gpu/drm/vkms/Makefile
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -6,6 +6,9 @@ vkms-y := \
vkms_formats.o \
vkms_crtc.o \
vkms_composer.o \
- vkms_writeback.o
+ vkms_writeback.o \
+ vkms_connector.o \
+ vkms_config.o
obj-$(CONFIG_DRM_VKMS) += vkms.o
+obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/vkms/tests/.kunitconfig b/drivers/gpu/drm/vkms/tests/.kunitconfig
new file mode 100644
index 000000000000..6a2d87068edc
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/.kunitconfig
@@ -0,0 +1,4 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_VKMS=y
+CONFIG_DRM_VKMS_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/vkms/tests/Makefile b/drivers/gpu/drm/vkms/tests/Makefile
new file mode 100644
index 000000000000..9ded37b67a46
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms_config_test.o
diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c
new file mode 100644
index 000000000000..ff4566cf9925
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c
@@ -0,0 +1,951 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+
+#include "../vkms_config.h"
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static size_t vkms_config_get_num_planes(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg;
+ size_t count = 0;
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ count++;
+
+ return count;
+}
+
+static size_t vkms_config_get_num_encoders(struct vkms_config *config)
+{
+ struct vkms_config_encoder *encoder_cfg;
+ size_t count = 0;
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ count++;
+
+ return count;
+}
+
+static size_t vkms_config_get_num_connectors(struct vkms_config *config)
+{
+ struct vkms_config_connector *connector_cfg;
+ size_t count = 0;
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ count++;
+
+ return count;
+}
+
+static struct vkms_config_plane *get_first_plane(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg;
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ return plane_cfg;
+
+ return NULL;
+}
+
+static struct vkms_config_crtc *get_first_crtc(struct vkms_config *config)
+{
+ struct vkms_config_crtc *crtc_cfg;
+
+ vkms_config_for_each_crtc(config, crtc_cfg)
+ return crtc_cfg;
+
+ return NULL;
+}
+
+static struct vkms_config_encoder *get_first_encoder(struct vkms_config *config)
+{
+ struct vkms_config_encoder *encoder_cfg;
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ return encoder_cfg;
+
+ return NULL;
+}
+
+static struct vkms_config_connector *get_first_connector(struct vkms_config *config)
+{
+ struct vkms_config_connector *connector_cfg;
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ return connector_cfg;
+
+ return NULL;
+}
+
+struct default_config_case {
+ bool enable_cursor;
+ bool enable_writeback;
+ bool enable_overlay;
+};
+
+static void vkms_config_test_empty_config(struct kunit *test)
+{
+ struct vkms_config *config;
+ const char *dev_name = "test";
+
+ config = vkms_config_create(dev_name);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* The dev_name string and the config have different lifetimes */
+ dev_name = NULL;
+ KUNIT_EXPECT_STREQ(test, vkms_config_get_device_name(config), "test");
+
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_planes(config), 0);
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_crtcs(config), 0);
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_encoders(config), 0);
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_connectors(config), 0);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static struct default_config_case default_config_cases[] = {
+ { false, false, false },
+ { true, false, false },
+ { true, true, false },
+ { true, false, true },
+ { false, true, false },
+ { false, true, true },
+ { false, false, true },
+ { true, true, true },
+};
+
+KUNIT_ARRAY_PARAM(default_config, default_config_cases, NULL);
+
+static void vkms_config_test_default_config(struct kunit *test)
+{
+ const struct default_config_case *params = test->param_value;
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ int n_primaries = 0;
+ int n_cursors = 0;
+ int n_overlays = 0;
+
+ config = vkms_config_default_create(params->enable_cursor,
+ params->enable_writeback,
+ params->enable_overlay);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Planes */
+ vkms_config_for_each_plane(config, plane_cfg) {
+ switch (vkms_config_plane_get_type(plane_cfg)) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ n_primaries++;
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ n_cursors++;
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ n_overlays++;
+ break;
+ default:
+ KUNIT_FAIL_AND_ABORT(test, "Unknown plane type");
+ }
+ }
+ KUNIT_EXPECT_EQ(test, n_primaries, 1);
+ KUNIT_EXPECT_EQ(test, n_cursors, params->enable_cursor ? 1 : 0);
+ KUNIT_EXPECT_EQ(test, n_overlays, params->enable_overlay ? 8 : 0);
+
+ /* CRTCs */
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_crtcs(config), 1);
+
+ crtc_cfg = get_first_crtc(config);
+ KUNIT_EXPECT_EQ(test, vkms_config_crtc_get_writeback(crtc_cfg),
+ params->enable_writeback);
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ int n_possible_crtcs = 0;
+ unsigned long idx = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ KUNIT_EXPECT_PTR_EQ(test, crtc_cfg, possible_crtc);
+ n_possible_crtcs++;
+ }
+ KUNIT_EXPECT_EQ(test, n_possible_crtcs, 1);
+ }
+
+ /* Encoders */
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_encoders(config), 1);
+
+ /* Connectors */
+ KUNIT_EXPECT_EQ(test, vkms_config_get_num_connectors(config), 1);
+
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_planes(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_plane *plane_cfg1, *plane_cfg2;
+ int n_planes = 0;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ n_planes++;
+ KUNIT_ASSERT_EQ(test, n_planes, 0);
+
+ plane_cfg1 = vkms_config_create_plane(config);
+ vkms_config_for_each_plane(config, plane_cfg) {
+ n_planes++;
+ if (plane_cfg != plane_cfg1)
+ KUNIT_FAIL(test, "Unexpected plane");
+ }
+ KUNIT_ASSERT_EQ(test, n_planes, 1);
+ n_planes = 0;
+
+ plane_cfg2 = vkms_config_create_plane(config);
+ vkms_config_for_each_plane(config, plane_cfg) {
+ n_planes++;
+ if (plane_cfg != plane_cfg1 && plane_cfg != plane_cfg2)
+ KUNIT_FAIL(test, "Unexpected plane");
+ }
+ KUNIT_ASSERT_EQ(test, n_planes, 2);
+ n_planes = 0;
+
+ vkms_config_destroy_plane(plane_cfg1);
+ vkms_config_for_each_plane(config, plane_cfg) {
+ n_planes++;
+ if (plane_cfg != plane_cfg2)
+ KUNIT_FAIL(test, "Unexpected plane");
+ }
+ KUNIT_ASSERT_EQ(test, n_planes, 1);
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 0);
+ vkms_config_for_each_crtc(config, crtc_cfg)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+
+ crtc_cfg1 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1);
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (crtc_cfg != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+ }
+
+ crtc_cfg2 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 2);
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (crtc_cfg != crtc_cfg1 && crtc_cfg != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+ }
+
+ vkms_config_destroy_crtc(config, crtc_cfg2);
+ KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1);
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (crtc_cfg != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected CRTC");
+ }
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_encoders(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ int n_encoders = 0;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ n_encoders++;
+ KUNIT_ASSERT_EQ(test, n_encoders, 0);
+
+ encoder_cfg1 = vkms_config_create_encoder(config);
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ n_encoders++;
+ if (encoder_cfg != encoder_cfg1)
+ KUNIT_FAIL(test, "Unexpected encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+ n_encoders = 0;
+
+ encoder_cfg2 = vkms_config_create_encoder(config);
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ n_encoders++;
+ if (encoder_cfg != encoder_cfg1 && encoder_cfg != encoder_cfg2)
+ KUNIT_FAIL(test, "Unexpected encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 2);
+ n_encoders = 0;
+
+ vkms_config_destroy_encoder(config, encoder_cfg2);
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ n_encoders++;
+ if (encoder_cfg != encoder_cfg1)
+ KUNIT_FAIL(test, "Unexpected encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+ n_encoders = 0;
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_get_connectors(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_connector *connector_cfg;
+ struct vkms_config_connector *connector_cfg1, *connector_cfg2;
+ int n_connectors = 0;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ n_connectors++;
+ KUNIT_ASSERT_EQ(test, n_connectors, 0);
+
+ connector_cfg1 = vkms_config_create_connector(config);
+ vkms_config_for_each_connector(config, connector_cfg) {
+ n_connectors++;
+ if (connector_cfg != connector_cfg1)
+ KUNIT_FAIL(test, "Unexpected connector");
+ }
+ KUNIT_ASSERT_EQ(test, n_connectors, 1);
+ n_connectors = 0;
+
+ connector_cfg2 = vkms_config_create_connector(config);
+ vkms_config_for_each_connector(config, connector_cfg) {
+ n_connectors++;
+ if (connector_cfg != connector_cfg1 &&
+ connector_cfg != connector_cfg2)
+ KUNIT_FAIL(test, "Unexpected connector");
+ }
+ KUNIT_ASSERT_EQ(test, n_connectors, 2);
+ n_connectors = 0;
+
+ vkms_config_destroy_connector(connector_cfg2);
+ vkms_config_for_each_connector(config, connector_cfg) {
+ n_connectors++;
+ if (connector_cfg != connector_cfg1)
+ KUNIT_FAIL(test, "Unexpected connector");
+ }
+ KUNIT_ASSERT_EQ(test, n_connectors, 1);
+ n_connectors = 0;
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_plane_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No planes */
+ plane_cfg = get_first_plane(config);
+ vkms_config_destroy_plane(plane_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many planes */
+ for (n = 0; n <= 32; n++)
+ vkms_config_create_plane(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_plane_type(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ int err;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ plane_cfg = get_first_plane(config);
+ vkms_config_destroy_plane(plane_cfg);
+
+ crtc_cfg = get_first_crtc(config);
+
+ /* Invalid: No primary plane */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Multiple primary planes */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: One primary plane */
+ vkms_config_destroy_plane(plane_cfg);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Multiple cursor planes */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: One primary and one cursor plane */
+ vkms_config_destroy_plane(plane_cfg);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Second CRTC without primary plane */
+ crtc_cfg = vkms_config_create_crtc(config);
+ encoder_cfg = vkms_config_create_encoder(config);
+ err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: Second CRTC with a primary plane */
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_plane_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ plane_cfg = get_first_plane(config);
+ crtc_cfg = get_first_crtc(config);
+
+ /* Invalid: Primary plane without a possible CRTC */
+ vkms_config_plane_detach_crtc(plane_cfg, crtc_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_crtc_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_crtc *crtc_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No CRTCs */
+ crtc_cfg = get_first_crtc(config);
+ vkms_config_destroy_crtc(config, crtc_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many CRTCs */
+ for (n = 0; n <= 32; n++)
+ vkms_config_create_crtc(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_encoder_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No encoders */
+ encoder_cfg = get_first_encoder(config);
+ vkms_config_destroy_encoder(config, encoder_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many encoders */
+ for (n = 0; n <= 32; n++)
+ vkms_config_create_encoder(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_encoder_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_encoder *encoder_cfg;
+ int err;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ crtc_cfg1 = get_first_crtc(config);
+
+ /* Invalid: Encoder without a possible CRTC */
+ encoder_cfg = vkms_config_create_encoder(config);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: Second CRTC with shared encoder */
+ crtc_cfg2 = vkms_config_create_crtc(config);
+
+ plane_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+ err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Second CRTC without encoders */
+ vkms_config_encoder_detach_crtc(encoder_cfg, crtc_cfg2);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Valid: First CRTC with 2 possible encoder */
+ vkms_config_destroy_plane(plane_cfg);
+ vkms_config_destroy_crtc(config, crtc_cfg2);
+ KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_invalid_connector_number(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_connector *connector_cfg;
+ int n;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ /* Invalid: No connectors */
+ connector_cfg = get_first_connector(config);
+ vkms_config_destroy_connector(connector_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ /* Invalid: Too many connectors */
+ for (n = 0; n <= 32; n++)
+ connector_cfg = vkms_config_create_connector(config);
+
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_valid_connector_possible_encoders(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
+
+ config = vkms_config_default_create(false, false, false);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ encoder_cfg = get_first_encoder(config);
+ connector_cfg = get_first_connector(config);
+
+ /* Invalid: Connector without a possible encoder */
+ vkms_config_connector_detach_encoder(connector_cfg, encoder_cfg);
+ KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_attach_different_configs(struct kunit *test)
+{
+ struct vkms_config *config1, *config2;
+ struct vkms_config_plane *plane_cfg1, *plane_cfg2;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ struct vkms_config_connector *connector_cfg1, *connector_cfg2;
+ int err;
+
+ config1 = vkms_config_create("test1");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config1);
+
+ config2 = vkms_config_create("test2");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config2);
+
+ plane_cfg1 = vkms_config_create_plane(config1);
+ crtc_cfg1 = vkms_config_create_crtc(config1);
+ encoder_cfg1 = vkms_config_create_encoder(config1);
+ connector_cfg1 = vkms_config_create_connector(config1);
+
+ plane_cfg2 = vkms_config_create_plane(config2);
+ crtc_cfg2 = vkms_config_create_crtc(config2);
+ encoder_cfg2 = vkms_config_create_encoder(config2);
+ connector_cfg2 = vkms_config_create_connector(config2);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg2);
+
+ err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_NE(test, err, 0);
+ err = vkms_config_plane_attach_crtc(plane_cfg2, crtc_cfg1);
+ KUNIT_EXPECT_NE(test, err, 0);
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_NE(test, err, 0);
+ err = vkms_config_encoder_attach_crtc(encoder_cfg2, crtc_cfg1);
+ KUNIT_EXPECT_NE(test, err, 0);
+
+ err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg2);
+ KUNIT_EXPECT_NE(test, err, 0);
+ err = vkms_config_connector_attach_encoder(connector_cfg2, encoder_cfg1);
+ KUNIT_EXPECT_NE(test, err, 0);
+
+ vkms_config_destroy(config1);
+ vkms_config_destroy(config2);
+}
+
+static void vkms_config_test_plane_attach_crtc(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *overlay_cfg;
+ struct vkms_config_plane *primary_cfg;
+ struct vkms_config_plane *cursor_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ overlay_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(overlay_cfg, DRM_PLANE_TYPE_OVERLAY);
+ primary_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(primary_cfg, DRM_PLANE_TYPE_PRIMARY);
+ cursor_cfg = vkms_config_create_plane(config);
+ vkms_config_plane_set_type(cursor_cfg, DRM_PLANE_TYPE_CURSOR);
+
+ crtc_cfg = vkms_config_create_crtc(config);
+
+ /* No primary or cursor planes */
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ /* Overlay plane, but no primary or cursor planes */
+ err = vkms_config_plane_attach_crtc(overlay_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ /* Primary plane, attaching it twice must fail */
+ err = vkms_config_plane_attach_crtc(primary_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_plane_attach_crtc(primary_cfg, crtc_cfg);
+ KUNIT_EXPECT_NE(test, err, 0);
+ KUNIT_EXPECT_PTR_EQ(test,
+ vkms_config_crtc_primary_plane(config, crtc_cfg),
+ primary_cfg);
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ /* Primary and cursor planes */
+ err = vkms_config_plane_attach_crtc(cursor_cfg, crtc_cfg);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_PTR_EQ(test,
+ vkms_config_crtc_primary_plane(config, crtc_cfg),
+ primary_cfg);
+ KUNIT_EXPECT_PTR_EQ(test,
+ vkms_config_crtc_cursor_plane(config, crtc_cfg),
+ cursor_cfg);
+
+ /* Detach primary and destroy cursor plane */
+ vkms_config_plane_detach_crtc(overlay_cfg, crtc_cfg);
+ vkms_config_plane_detach_crtc(primary_cfg, crtc_cfg);
+ vkms_config_destroy_plane(cursor_cfg);
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
+ KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg));
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_plane_get_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg1, *plane_cfg2;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ int n_crtcs = 0;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ plane_cfg1 = vkms_config_create_plane(config);
+ plane_cfg2 = vkms_config_create_plane(config);
+ crtc_cfg1 = vkms_config_create_crtc(config);
+ crtc_cfg2 = vkms_config_create_crtc(config);
+
+ /* No possible CRTCs */
+ vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Plane 1 attached to CRTC 1 and 2 */
+ err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1 && possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 2);
+ n_crtcs = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Plane 1 attached to CRTC 1 and plane 2 to CRTC 2 */
+ vkms_config_plane_detach_crtc(plane_cfg1, crtc_cfg2);
+ vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+ n_crtcs = 0;
+
+ err = vkms_config_plane_attach_crtc(plane_cfg2, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_encoder_get_possible_crtcs(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2;
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ int n_crtcs = 0;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ encoder_cfg1 = vkms_config_create_encoder(config);
+ encoder_cfg2 = vkms_config_create_encoder(config);
+ crtc_cfg1 = vkms_config_create_crtc(config);
+ crtc_cfg2 = vkms_config_create_crtc(config);
+
+ /* No possible CRTCs */
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Encoder 1 attached to CRTC 1 and 2 */
+ err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1 && possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 2);
+ n_crtcs = 0;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+
+ /* Encoder 1 attached to CRTC 1 and encoder 2 to CRTC 2 */
+ vkms_config_encoder_detach_crtc(encoder_cfg1, crtc_cfg2);
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg1)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+ n_crtcs = 0;
+
+ err = vkms_config_encoder_attach_crtc(encoder_cfg2, crtc_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc) {
+ n_crtcs++;
+ if (possible_crtc != crtc_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible CRTC");
+ }
+ KUNIT_ASSERT_EQ(test, n_crtcs, 1);
+
+ vkms_config_destroy(config);
+}
+
+static void vkms_config_test_connector_get_possible_encoders(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_connector *connector_cfg1, *connector_cfg2;
+ struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2;
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+ int n_encoders = 0;
+ int err;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ connector_cfg1 = vkms_config_create_connector(config);
+ connector_cfg2 = vkms_config_create_connector(config);
+ encoder_cfg1 = vkms_config_create_encoder(config);
+ encoder_cfg2 = vkms_config_create_encoder(config);
+
+ /* No possible encoders */
+ vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
+ possible_encoder)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx,
+ possible_encoder)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+
+ /* Connector 1 attached to encoders 1 and 2 */
+ err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg1);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
+ possible_encoder) {
+ n_encoders++;
+ if (possible_encoder != encoder_cfg1 &&
+ possible_encoder != encoder_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 2);
+ n_encoders = 0;
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx,
+ possible_encoder)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+
+ /* Connector 1 attached to encoder 1 and connector 2 to encoder 2 */
+ vkms_config_connector_detach_encoder(connector_cfg1, encoder_cfg2);
+ vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
+ possible_encoder) {
+ n_encoders++;
+ if (possible_encoder != encoder_cfg1)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+ n_encoders = 0;
+
+ err = vkms_config_connector_attach_encoder(connector_cfg2, encoder_cfg2);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx,
+ possible_encoder) {
+ n_encoders++;
+ if (possible_encoder != encoder_cfg2)
+ KUNIT_FAIL(test, "Unexpected possible encoder");
+ }
+ KUNIT_ASSERT_EQ(test, n_encoders, 1);
+
+ vkms_config_destroy(config);
+}
+
+static struct kunit_case vkms_config_test_cases[] = {
+ KUNIT_CASE(vkms_config_test_empty_config),
+ KUNIT_CASE_PARAM(vkms_config_test_default_config,
+ default_config_gen_params),
+ KUNIT_CASE(vkms_config_test_get_planes),
+ KUNIT_CASE(vkms_config_test_get_crtcs),
+ KUNIT_CASE(vkms_config_test_get_encoders),
+ KUNIT_CASE(vkms_config_test_get_connectors),
+ KUNIT_CASE(vkms_config_test_invalid_plane_number),
+ KUNIT_CASE(vkms_config_test_valid_plane_type),
+ KUNIT_CASE(vkms_config_test_valid_plane_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_invalid_crtc_number),
+ KUNIT_CASE(vkms_config_test_invalid_encoder_number),
+ KUNIT_CASE(vkms_config_test_valid_encoder_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_invalid_connector_number),
+ KUNIT_CASE(vkms_config_test_valid_connector_possible_encoders),
+ KUNIT_CASE(vkms_config_test_attach_different_configs),
+ KUNIT_CASE(vkms_config_test_plane_attach_crtc),
+ KUNIT_CASE(vkms_config_test_plane_get_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_encoder_get_possible_crtcs),
+ KUNIT_CASE(vkms_config_test_connector_get_possible_encoders),
+ {}
+};
+
+static struct kunit_suite vkms_config_test_suite = {
+ .name = "vkms-config",
+ .test_cases = vkms_config_test_cases,
+};
+
+kunit_test_suite(vkms_config_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kunit test for vkms config utility");
diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c
new file mode 100644
index 000000000000..a1df5659b0fb
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_config.c
@@ -0,0 +1,640 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/slab.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_debugfs.h>
+#include <kunit/visibility.h>
+
+#include "vkms_config.h"
+
+struct vkms_config *vkms_config_create(const char *dev_name)
+{
+ struct vkms_config *config;
+
+ config = kzalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return ERR_PTR(-ENOMEM);
+
+ config->dev_name = kstrdup_const(dev_name, GFP_KERNEL);
+ if (!config->dev_name) {
+ kfree(config);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_LIST_HEAD(&config->planes);
+ INIT_LIST_HEAD(&config->crtcs);
+ INIT_LIST_HEAD(&config->encoders);
+ INIT_LIST_HEAD(&config->connectors);
+
+ return config;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create);
+
+struct vkms_config *vkms_config_default_create(bool enable_cursor,
+ bool enable_writeback,
+ bool enable_overlay)
+{
+ struct vkms_config *config;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
+ int n;
+
+ config = vkms_config_create(DEFAULT_DEVICE_NAME);
+ if (IS_ERR(config))
+ return config;
+
+ plane_cfg = vkms_config_create_plane(config);
+ if (IS_ERR(plane_cfg))
+ goto err_alloc;
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
+
+ crtc_cfg = vkms_config_create_crtc(config);
+ if (IS_ERR(crtc_cfg))
+ goto err_alloc;
+ vkms_config_crtc_set_writeback(crtc_cfg, enable_writeback);
+
+ if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
+ goto err_alloc;
+
+ if (enable_overlay) {
+ for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
+ plane_cfg = vkms_config_create_plane(config);
+ if (IS_ERR(plane_cfg))
+ goto err_alloc;
+
+ vkms_config_plane_set_type(plane_cfg,
+ DRM_PLANE_TYPE_OVERLAY);
+
+ if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
+ goto err_alloc;
+ }
+ }
+
+ if (enable_cursor) {
+ plane_cfg = vkms_config_create_plane(config);
+ if (IS_ERR(plane_cfg))
+ goto err_alloc;
+
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
+
+ if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
+ goto err_alloc;
+ }
+
+ encoder_cfg = vkms_config_create_encoder(config);
+ if (IS_ERR(encoder_cfg))
+ goto err_alloc;
+
+ if (vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg))
+ goto err_alloc;
+
+ connector_cfg = vkms_config_create_connector(config);
+ if (IS_ERR(connector_cfg))
+ goto err_alloc;
+
+ if (vkms_config_connector_attach_encoder(connector_cfg, encoder_cfg))
+ goto err_alloc;
+
+ return config;
+
+err_alloc:
+ vkms_config_destroy(config);
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_default_create);
+
+void vkms_config_destroy(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg, *plane_tmp;
+ struct vkms_config_crtc *crtc_cfg, *crtc_tmp;
+ struct vkms_config_encoder *encoder_cfg, *encoder_tmp;
+ struct vkms_config_connector *connector_cfg, *connector_tmp;
+
+ list_for_each_entry_safe(plane_cfg, plane_tmp, &config->planes, link)
+ vkms_config_destroy_plane(plane_cfg);
+
+ list_for_each_entry_safe(crtc_cfg, crtc_tmp, &config->crtcs, link)
+ vkms_config_destroy_crtc(config, crtc_cfg);
+
+ list_for_each_entry_safe(encoder_cfg, encoder_tmp, &config->encoders, link)
+ vkms_config_destroy_encoder(config, encoder_cfg);
+
+ list_for_each_entry_safe(connector_cfg, connector_tmp, &config->connectors, link)
+ vkms_config_destroy_connector(connector_cfg);
+
+ kfree_const(config->dev_name);
+ kfree(config);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy);
+
+static bool valid_plane_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_planes;
+
+ n_planes = list_count_nodes((struct list_head *)&config->planes);
+ if (n_planes <= 0 || n_planes >= 32) {
+ drm_info(dev, "The number of planes must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_planes_for_crtc(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_plane *plane_cfg;
+ bool has_primary_plane = false;
+ bool has_cursor_plane = false;
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ enum drm_plane_type type;
+
+ type = vkms_config_plane_get_type(plane_cfg);
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc != crtc_cfg)
+ continue;
+
+ if (type == DRM_PLANE_TYPE_PRIMARY) {
+ if (has_primary_plane) {
+ drm_info(dev, "Multiple primary planes\n");
+ return false;
+ }
+
+ has_primary_plane = true;
+ } else if (type == DRM_PLANE_TYPE_CURSOR) {
+ if (has_cursor_plane) {
+ drm_info(dev, "Multiple cursor planes\n");
+ return false;
+ }
+
+ has_cursor_plane = true;
+ }
+ }
+ }
+
+ if (!has_primary_plane) {
+ drm_info(dev, "Primary plane not found\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_plane_possible_crtcs(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_plane *plane_cfg;
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ if (xa_empty(&plane_cfg->possible_crtcs)) {
+ drm_info(dev, "All planes must have at least one possible CRTC\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool valid_crtc_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_crtcs;
+
+ n_crtcs = list_count_nodes((struct list_head *)&config->crtcs);
+ if (n_crtcs <= 0 || n_crtcs >= 32) {
+ drm_info(dev, "The number of CRTCs must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_encoder_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_encoders;
+
+ n_encoders = list_count_nodes((struct list_head *)&config->encoders);
+ if (n_encoders <= 0 || n_encoders >= 32) {
+ drm_info(dev, "The number of encoders must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_encoder_possible_crtcs(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ if (xa_empty(&encoder_cfg->possible_crtcs)) {
+ drm_info(dev, "All encoders must have at least one possible CRTC\n");
+ return false;
+ }
+ }
+
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ bool crtc_has_encoder = false;
+
+ vkms_config_for_each_encoder(config, encoder_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg,
+ idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ crtc_has_encoder = true;
+ }
+ }
+
+ if (!crtc_has_encoder) {
+ drm_info(dev, "All CRTCs must have at least one possible encoder\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool valid_connector_number(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ size_t n_connectors;
+
+ n_connectors = list_count_nodes((struct list_head *)&config->connectors);
+ if (n_connectors <= 0 || n_connectors >= 32) {
+ drm_info(dev, "The number of connectors must be between 1 and 31\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool valid_connector_possible_encoders(const struct vkms_config *config)
+{
+ struct drm_device *dev = config->dev ? &config->dev->drm : NULL;
+ struct vkms_config_connector *connector_cfg;
+
+ vkms_config_for_each_connector(config, connector_cfg) {
+ if (xa_empty(&connector_cfg->possible_encoders)) {
+ drm_info(dev,
+ "All connectors must have at least one possible encoder\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool vkms_config_is_valid(const struct vkms_config *config)
+{
+ struct vkms_config_crtc *crtc_cfg;
+
+ if (!valid_plane_number(config))
+ return false;
+
+ if (!valid_crtc_number(config))
+ return false;
+
+ if (!valid_encoder_number(config))
+ return false;
+
+ if (!valid_connector_number(config))
+ return false;
+
+ if (!valid_plane_possible_crtcs(config))
+ return false;
+
+ vkms_config_for_each_crtc(config, crtc_cfg) {
+ if (!valid_planes_for_crtc(config, crtc_cfg))
+ return false;
+ }
+
+ if (!valid_encoder_possible_crtcs(config))
+ return false;
+
+ if (!valid_connector_possible_encoders(config))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_is_valid);
+
+static int vkms_config_show(struct seq_file *m, void *data)
+{
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
+ const char *dev_name;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
+
+ dev_name = vkms_config_get_device_name((struct vkms_config *)vkmsdev->config);
+ seq_printf(m, "dev_name=%s\n", dev_name);
+
+ vkms_config_for_each_plane(vkmsdev->config, plane_cfg) {
+ seq_puts(m, "plane:\n");
+ seq_printf(m, "\ttype=%d\n",
+ vkms_config_plane_get_type(plane_cfg));
+ }
+
+ vkms_config_for_each_crtc(vkmsdev->config, crtc_cfg) {
+ seq_puts(m, "crtc:\n");
+ seq_printf(m, "\twriteback=%d\n",
+ vkms_config_crtc_get_writeback(crtc_cfg));
+ }
+
+ vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg)
+ seq_puts(m, "encoder\n");
+
+ vkms_config_for_each_connector(vkmsdev->config, connector_cfg)
+ seq_puts(m, "connector\n");
+
+ return 0;
+}
+
+static const struct drm_debugfs_info vkms_config_debugfs_list[] = {
+ { "vkms_config", vkms_config_show, 0 },
+};
+
+void vkms_config_register_debugfs(struct vkms_device *vkms_device)
+{
+ drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list,
+ ARRAY_SIZE(vkms_config_debugfs_list));
+}
+
+struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config)
+{
+ struct vkms_config_plane *plane_cfg;
+
+ plane_cfg = kzalloc(sizeof(*plane_cfg), GFP_KERNEL);
+ if (!plane_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ plane_cfg->config = config;
+ vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY);
+ xa_init_flags(&plane_cfg->possible_crtcs, XA_FLAGS_ALLOC);
+
+ list_add_tail(&plane_cfg->link, &config->planes);
+
+ return plane_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_plane);
+
+void vkms_config_destroy_plane(struct vkms_config_plane *plane_cfg)
+{
+ xa_destroy(&plane_cfg->possible_crtcs);
+ list_del(&plane_cfg->link);
+ kfree(plane_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_plane);
+
+int __must_check vkms_config_plane_attach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ u32 crtc_idx = 0;
+
+ if (plane_cfg->config != crtc_cfg->config)
+ return -EINVAL;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ return -EEXIST;
+ }
+
+ return xa_alloc(&plane_cfg->possible_crtcs, &crtc_idx, crtc_cfg,
+ xa_limit_32b, GFP_KERNEL);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_plane_attach_crtc);
+
+void vkms_config_plane_detach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ xa_erase(&plane_cfg->possible_crtcs, idx);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_plane_detach_crtc);
+
+struct vkms_config_crtc *vkms_config_create_crtc(struct vkms_config *config)
+{
+ struct vkms_config_crtc *crtc_cfg;
+
+ crtc_cfg = kzalloc(sizeof(*crtc_cfg), GFP_KERNEL);
+ if (!crtc_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ crtc_cfg->config = config;
+ vkms_config_crtc_set_writeback(crtc_cfg, false);
+
+ list_add_tail(&crtc_cfg->link, &config->crtcs);
+
+ return crtc_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_crtc);
+
+void vkms_config_destroy_crtc(struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+
+ vkms_config_for_each_plane(config, plane_cfg)
+ vkms_config_plane_detach_crtc(plane_cfg, crtc_cfg);
+
+ vkms_config_for_each_encoder(config, encoder_cfg)
+ vkms_config_encoder_detach_crtc(encoder_cfg, crtc_cfg);
+
+ list_del(&crtc_cfg->link);
+ kfree(crtc_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_crtc);
+
+/**
+ * vkms_config_crtc_get_plane() - Return the first attached plane to a CRTC with
+ * the specific type
+ * @config: Configuration containing the CRTC and the plane
+ * @crtc_cfg: Only find planes attached to this CRTC
+ * @type: Plane type to search
+ *
+ * Returns:
+ * The first plane found attached to @crtc_cfg with the type @type.
+ */
+static struct vkms_config_plane *vkms_config_crtc_get_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg,
+ enum drm_plane_type type)
+{
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *possible_crtc;
+ enum drm_plane_type current_type;
+ unsigned long idx = 0;
+
+ vkms_config_for_each_plane(config, plane_cfg) {
+ current_type = vkms_config_plane_get_type(plane_cfg);
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg && current_type == type)
+ return plane_cfg;
+ }
+ }
+
+ return NULL;
+}
+
+struct vkms_config_plane *vkms_config_crtc_primary_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ return vkms_config_crtc_get_plane(config, crtc_cfg, DRM_PLANE_TYPE_PRIMARY);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_crtc_primary_plane);
+
+struct vkms_config_plane *vkms_config_crtc_cursor_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ return vkms_config_crtc_get_plane(config, crtc_cfg, DRM_PLANE_TYPE_CURSOR);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_crtc_cursor_plane);
+
+struct vkms_config_encoder *vkms_config_create_encoder(struct vkms_config *config)
+{
+ struct vkms_config_encoder *encoder_cfg;
+
+ encoder_cfg = kzalloc(sizeof(*encoder_cfg), GFP_KERNEL);
+ if (!encoder_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ encoder_cfg->config = config;
+ xa_init_flags(&encoder_cfg->possible_crtcs, XA_FLAGS_ALLOC);
+
+ list_add_tail(&encoder_cfg->link, &config->encoders);
+
+ return encoder_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_encoder);
+
+void vkms_config_destroy_encoder(struct vkms_config *config,
+ struct vkms_config_encoder *encoder_cfg)
+{
+ struct vkms_config_connector *connector_cfg;
+
+ vkms_config_for_each_connector(config, connector_cfg)
+ vkms_config_connector_detach_encoder(connector_cfg, encoder_cfg);
+
+ xa_destroy(&encoder_cfg->possible_crtcs);
+ list_del(&encoder_cfg->link);
+ kfree(encoder_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_encoder);
+
+int __must_check vkms_config_encoder_attach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+ u32 crtc_idx = 0;
+
+ if (encoder_cfg->config != crtc_cfg->config)
+ return -EINVAL;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ return -EEXIST;
+ }
+
+ return xa_alloc(&encoder_cfg->possible_crtcs, &crtc_idx, crtc_cfg,
+ xa_limit_32b, GFP_KERNEL);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_encoder_attach_crtc);
+
+void vkms_config_encoder_detach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg)
+{
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
+ if (possible_crtc == crtc_cfg)
+ xa_erase(&encoder_cfg->possible_crtcs, idx);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_encoder_detach_crtc);
+
+struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *config)
+{
+ struct vkms_config_connector *connector_cfg;
+
+ connector_cfg = kzalloc(sizeof(*connector_cfg), GFP_KERNEL);
+ if (!connector_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ connector_cfg->config = config;
+ xa_init_flags(&connector_cfg->possible_encoders, XA_FLAGS_ALLOC);
+
+ list_add_tail(&connector_cfg->link, &config->connectors);
+
+ return connector_cfg;
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_connector);
+
+void vkms_config_destroy_connector(struct vkms_config_connector *connector_cfg)
+{
+ xa_destroy(&connector_cfg->possible_encoders);
+ list_del(&connector_cfg->link);
+ kfree(connector_cfg);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_connector);
+
+int __must_check vkms_config_connector_attach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg)
+{
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+ u32 encoder_idx = 0;
+
+ if (connector_cfg->config != encoder_cfg->config)
+ return -EINVAL;
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg, idx,
+ possible_encoder) {
+ if (possible_encoder == encoder_cfg)
+ return -EEXIST;
+ }
+
+ return xa_alloc(&connector_cfg->possible_encoders, &encoder_idx,
+ encoder_cfg, xa_limit_32b, GFP_KERNEL);
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_connector_attach_encoder);
+
+void vkms_config_connector_detach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg)
+{
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg, idx,
+ possible_encoder) {
+ if (possible_encoder == encoder_cfg)
+ xa_erase(&connector_cfg->possible_encoders, idx);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(vkms_config_connector_detach_encoder);
diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h
new file mode 100644
index 000000000000..0118e3f99706
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_config.h
@@ -0,0 +1,437 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _VKMS_CONFIG_H_
+#define _VKMS_CONFIG_H_
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+#include "vkms_drv.h"
+
+/**
+ * struct vkms_config - General configuration for VKMS driver
+ *
+ * @dev_name: Name of the device
+ * @planes: List of planes configured for the device
+ * @crtcs: List of CRTCs configured for the device
+ * @encoders: List of encoders configured for the device
+ * @connectors: List of connectors configured for the device
+ * @dev: Used to store the current VKMS device. Only set when the device is instantiated.
+ */
+struct vkms_config {
+ const char *dev_name;
+ struct list_head planes;
+ struct list_head crtcs;
+ struct list_head encoders;
+ struct list_head connectors;
+ struct vkms_device *dev;
+};
+
+/**
+ * struct vkms_config_plane
+ *
+ * @link: Link to the others planes in vkms_config
+ * @config: The vkms_config this plane belongs to
+ * @type: Type of the plane. The creator of configuration needs to ensures that
+ * at least one primary plane is present.
+ * @possible_crtcs: Array of CRTCs that can be used with this plane
+ * @plane: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS plane during
+ * device creation. This pointer is not managed by the configuration and
+ * must be managed by other means.
+ */
+struct vkms_config_plane {
+ struct list_head link;
+ struct vkms_config *config;
+
+ enum drm_plane_type type;
+ struct xarray possible_crtcs;
+
+ /* Internal usage */
+ struct vkms_plane *plane;
+};
+
+/**
+ * struct vkms_config_crtc
+ *
+ * @link: Link to the others CRTCs in vkms_config
+ * @config: The vkms_config this CRTC belongs to
+ * @writeback: If true, a writeback buffer can be attached to the CRTC
+ * @crtc: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS CRTC during
+ * device creation. This pointer is not managed by the configuration and
+ * must be managed by other means.
+ */
+struct vkms_config_crtc {
+ struct list_head link;
+ struct vkms_config *config;
+
+ bool writeback;
+
+ /* Internal usage */
+ struct vkms_output *crtc;
+};
+
+/**
+ * struct vkms_config_encoder
+ *
+ * @link: Link to the others encoders in vkms_config
+ * @config: The vkms_config this CRTC belongs to
+ * @possible_crtcs: Array of CRTCs that can be used with this encoder
+ * @encoder: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS encoder
+ * during device creation. This pointer is not managed by the
+ * configuration and must be managed by other means.
+ */
+struct vkms_config_encoder {
+ struct list_head link;
+ struct vkms_config *config;
+
+ struct xarray possible_crtcs;
+
+ /* Internal usage */
+ struct drm_encoder *encoder;
+};
+
+/**
+ * struct vkms_config_connector
+ *
+ * @link: Link to the others connector in vkms_config
+ * @config: The vkms_config this connector belongs to
+ * @possible_encoders: Array of encoders that can be used with this connector
+ * @connector: Internal usage. This pointer should never be considered as valid.
+ * It can be used to store a temporary reference to a VKMS connector
+ * during device creation. This pointer is not managed by the
+ * configuration and must be managed by other means.
+ */
+struct vkms_config_connector {
+ struct list_head link;
+ struct vkms_config *config;
+
+ struct xarray possible_encoders;
+
+ /* Internal usage */
+ struct vkms_connector *connector;
+};
+
+/**
+ * vkms_config_for_each_plane - Iterate over the vkms_config planes
+ * @config: &struct vkms_config pointer
+ * @plane_cfg: &struct vkms_config_plane pointer used as cursor
+ */
+#define vkms_config_for_each_plane(config, plane_cfg) \
+ list_for_each_entry((plane_cfg), &(config)->planes, link)
+
+/**
+ * vkms_config_for_each_crtc - Iterate over the vkms_config CRTCs
+ * @config: &struct vkms_config pointer
+ * @crtc_cfg: &struct vkms_config_crtc pointer used as cursor
+ */
+#define vkms_config_for_each_crtc(config, crtc_cfg) \
+ list_for_each_entry((crtc_cfg), &(config)->crtcs, link)
+
+/**
+ * vkms_config_for_each_encoder - Iterate over the vkms_config encoders
+ * @config: &struct vkms_config pointer
+ * @encoder_cfg: &struct vkms_config_encoder pointer used as cursor
+ */
+#define vkms_config_for_each_encoder(config, encoder_cfg) \
+ list_for_each_entry((encoder_cfg), &(config)->encoders, link)
+
+/**
+ * vkms_config_for_each_connector - Iterate over the vkms_config connectors
+ * @config: &struct vkms_config pointer
+ * @connector_cfg: &struct vkms_config_connector pointer used as cursor
+ */
+#define vkms_config_for_each_connector(config, connector_cfg) \
+ list_for_each_entry((connector_cfg), &(config)->connectors, link)
+
+/**
+ * vkms_config_plane_for_each_possible_crtc - Iterate over the vkms_config_plane
+ * possible CRTCs
+ * @plane_cfg: &struct vkms_config_plane pointer
+ * @idx: Index of the cursor
+ * @possible_crtc: &struct vkms_config_crtc pointer used as cursor
+ */
+#define vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) \
+ xa_for_each(&(plane_cfg)->possible_crtcs, idx, (possible_crtc))
+
+/**
+ * vkms_config_encoder_for_each_possible_crtc - Iterate over the
+ * vkms_config_encoder possible CRTCs
+ * @encoder_cfg: &struct vkms_config_encoder pointer
+ * @idx: Index of the cursor
+ * @possible_crtc: &struct vkms_config_crtc pointer used as cursor
+ */
+#define vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) \
+ xa_for_each(&(encoder_cfg)->possible_crtcs, idx, (possible_crtc))
+
+/**
+ * vkms_config_connector_for_each_possible_encoder - Iterate over the
+ * vkms_config_connector possible encoders
+ * @connector_cfg: &struct vkms_config_connector pointer
+ * @idx: Index of the cursor
+ * @possible_encoder: &struct vkms_config_encoder pointer used as cursor
+ */
+#define vkms_config_connector_for_each_possible_encoder(connector_cfg, idx, possible_encoder) \
+ xa_for_each(&(connector_cfg)->possible_encoders, idx, (possible_encoder))
+
+/**
+ * vkms_config_create() - Create a new VKMS configuration
+ * @dev_name: Name of the device
+ *
+ * Returns:
+ * The new vkms_config or an error. Call vkms_config_destroy() to free the
+ * returned configuration.
+ */
+struct vkms_config *vkms_config_create(const char *dev_name);
+
+/**
+ * vkms_config_default_create() - Create the configuration for the default device
+ * @enable_cursor: Create or not a cursor plane
+ * @enable_writeback: Create or not a writeback connector
+ * @enable_overlay: Create or not overlay planes
+ *
+ * Returns:
+ * The default vkms_config or an error. Call vkms_config_destroy() to free the
+ * returned configuration.
+ */
+struct vkms_config *vkms_config_default_create(bool enable_cursor,
+ bool enable_writeback,
+ bool enable_overlay);
+
+/**
+ * vkms_config_destroy() - Free a VKMS configuration
+ * @config: vkms_config to free
+ */
+void vkms_config_destroy(struct vkms_config *config);
+
+/**
+ * vkms_config_get_device_name() - Return the name of the device
+ * @config: Configuration to get the device name from
+ *
+ * Returns:
+ * The device name. Only valid while @config is valid.
+ */
+static inline const char *
+vkms_config_get_device_name(struct vkms_config *config)
+{
+ return config->dev_name;
+}
+
+/**
+ * vkms_config_get_num_crtcs() - Return the number of CRTCs in the configuration
+ * @config: Configuration to get the number of CRTCs from
+ */
+static inline size_t vkms_config_get_num_crtcs(struct vkms_config *config)
+{
+ return list_count_nodes(&config->crtcs);
+}
+
+/**
+ * vkms_config_is_valid() - Validate a configuration
+ * @config: Configuration to validate
+ *
+ * Returns:
+ * Whether the configuration is valid or not.
+ * For example, a configuration without primary planes is not valid.
+ */
+bool vkms_config_is_valid(const struct vkms_config *config);
+
+/**
+ * vkms_config_register_debugfs() - Register a debugfs file to show the device's
+ * configuration
+ * @vkms_device: Device to register
+ */
+void vkms_config_register_debugfs(struct vkms_device *vkms_device);
+
+/**
+ * vkms_config_create_plane() - Add a new plane configuration
+ * @config: Configuration to add the plane to
+ *
+ * Returns:
+ * The new plane configuration or an error. Call vkms_config_destroy_plane() to
+ * free the returned plane configuration.
+ */
+struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_plane() - Remove and free a plane configuration
+ * @plane_cfg: Plane configuration to destroy
+ */
+void vkms_config_destroy_plane(struct vkms_config_plane *plane_cfg);
+
+/**
+ * vkms_config_plane_type() - Return the plane type
+ * @plane_cfg: Plane to get the type from
+ */
+static inline enum drm_plane_type
+vkms_config_plane_get_type(struct vkms_config_plane *plane_cfg)
+{
+ return plane_cfg->type;
+}
+
+/**
+ * vkms_config_plane_set_type() - Set the plane type
+ * @plane_cfg: Plane to set the type to
+ * @type: New plane type
+ */
+static inline void
+vkms_config_plane_set_type(struct vkms_config_plane *plane_cfg,
+ enum drm_plane_type type)
+{
+ plane_cfg->type = type;
+}
+
+/**
+ * vkms_config_plane_attach_crtc - Attach a plane to a CRTC
+ * @plane_cfg: Plane to attach
+ * @crtc_cfg: CRTC to attach @plane_cfg to
+ */
+int __must_check vkms_config_plane_attach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_plane_detach_crtc - Detach a plane from a CRTC
+ * @plane_cfg: Plane to detach
+ * @crtc_cfg: CRTC to detach @plane_cfg from
+ */
+void vkms_config_plane_detach_crtc(struct vkms_config_plane *plane_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_create_crtc() - Add a new CRTC configuration
+ * @config: Configuration to add the CRTC to
+ *
+ * Returns:
+ * The new CRTC configuration or an error. Call vkms_config_destroy_crtc() to
+ * free the returned CRTC configuration.
+ */
+struct vkms_config_crtc *vkms_config_create_crtc(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_crtc() - Remove and free a CRTC configuration
+ * @config: Configuration to remove the CRTC from
+ * @crtc_cfg: CRTC configuration to destroy
+ */
+void vkms_config_destroy_crtc(struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_crtc_get_writeback() - If a writeback connector will be created
+ * @crtc_cfg: CRTC with or without a writeback connector
+ */
+static inline bool
+vkms_config_crtc_get_writeback(struct vkms_config_crtc *crtc_cfg)
+{
+ return crtc_cfg->writeback;
+}
+
+/**
+ * vkms_config_crtc_set_writeback() - If a writeback connector will be created
+ * @crtc_cfg: Target CRTC
+ * @writeback: Enable or disable the writeback connector
+ */
+static inline void
+vkms_config_crtc_set_writeback(struct vkms_config_crtc *crtc_cfg,
+ bool writeback)
+{
+ crtc_cfg->writeback = writeback;
+}
+
+/**
+ * vkms_config_crtc_primary_plane() - Return the primary plane for a CRTC
+ * @config: Configuration containing the CRTC
+ * @crtc_config: Target CRTC
+ *
+ * Note that, if multiple primary planes are found, the first one is returned.
+ * In this case, the configuration will be invalid. See vkms_config_is_valid().
+ *
+ * Returns:
+ * The primary plane or NULL if none is assigned yet.
+ */
+struct vkms_config_plane *vkms_config_crtc_primary_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_crtc_cursor_plane() - Return the cursor plane for a CRTC
+ * @config: Configuration containing the CRTC
+ * @crtc_config: Target CRTC
+ *
+ * Note that, if multiple cursor planes are found, the first one is returned.
+ * In this case, the configuration will be invalid. See vkms_config_is_valid().
+ *
+ * Returns:
+ * The cursor plane or NULL if none is assigned yet.
+ */
+struct vkms_config_plane *vkms_config_crtc_cursor_plane(const struct vkms_config *config,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_create_encoder() - Add a new encoder configuration
+ * @config: Configuration to add the encoder to
+ *
+ * Returns:
+ * The new encoder configuration or an error. Call vkms_config_destroy_encoder()
+ * to free the returned encoder configuration.
+ */
+struct vkms_config_encoder *vkms_config_create_encoder(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_encoder() - Remove and free a encoder configuration
+ * @config: Configuration to remove the encoder from
+ * @encoder_cfg: Encoder configuration to destroy
+ */
+void vkms_config_destroy_encoder(struct vkms_config *config,
+ struct vkms_config_encoder *encoder_cfg);
+
+/**
+ * vkms_config_encoder_attach_crtc - Attach a encoder to a CRTC
+ * @encoder_cfg: Encoder to attach
+ * @crtc_cfg: CRTC to attach @encoder_cfg to
+ */
+int __must_check vkms_config_encoder_attach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_encoder_detach_crtc - Detach a encoder from a CRTC
+ * @encoder_cfg: Encoder to detach
+ * @crtc_cfg: CRTC to detach @encoder_cfg from
+ */
+void vkms_config_encoder_detach_crtc(struct vkms_config_encoder *encoder_cfg,
+ struct vkms_config_crtc *crtc_cfg);
+
+/**
+ * vkms_config_create_connector() - Add a new connector configuration
+ * @config: Configuration to add the connector to
+ *
+ * Returns:
+ * The new connector configuration or an error. Call
+ * vkms_config_destroy_connector() to free the returned connector configuration.
+ */
+struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *config);
+
+/**
+ * vkms_config_destroy_connector() - Remove and free a connector configuration
+ * @connector_cfg: Connector configuration to destroy
+ */
+void vkms_config_destroy_connector(struct vkms_config_connector *connector_cfg);
+
+/**
+ * vkms_config_connector_attach_encoder - Attach a connector to an encoder
+ * @connector_cfg: Connector to attach
+ * @encoder_cfg: Encoder to attach @connector_cfg to
+ */
+int __must_check vkms_config_connector_attach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg);
+
+/**
+ * vkms_config_connector_detach_encoder - Detach a connector from an encoder
+ * @connector_cfg: Connector to detach
+ * @encoder_cfg: Encoder to detach @connector_cfg from
+ */
+void vkms_config_connector_detach_encoder(struct vkms_config_connector *connector_cfg,
+ struct vkms_config_encoder *encoder_cfg);
+
+#endif /* _VKMS_CONFIG_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_connector.c b/drivers/gpu/drm/vkms/vkms_connector.c
new file mode 100644
index 000000000000..48b10cba322a
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_connector.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_probe_helper.h>
+
+#include "vkms_connector.h"
+
+static const struct drm_connector_funcs vkms_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int vkms_conn_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ /* Use the default modes list from DRM */
+ count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
+ drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+
+ return count;
+}
+
+static struct drm_encoder *vkms_conn_best_encoder(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
+}
+
+static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
+ .get_modes = vkms_conn_get_modes,
+ .best_encoder = vkms_conn_best_encoder,
+};
+
+struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev)
+{
+ struct drm_device *dev = &vkmsdev->drm;
+ struct vkms_connector *connector;
+ int ret;
+
+ connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drmm_connector_init(dev, &connector->base, &vkms_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_helper_add(&connector->base, &vkms_conn_helper_funcs);
+
+ return connector;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_connector.h b/drivers/gpu/drm/vkms/vkms_connector.h
new file mode 100644
index 000000000000..c9149c1b7af0
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_connector.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _VKMS_CONNECTOR_H_
+#define _VKMS_CONNECTOR_H_
+
+#include "vkms_drv.h"
+
+/**
+ * struct vkms_connector - VKMS custom type wrapping around the DRM connector
+ *
+ * @drm: Base DRM connector
+ */
+struct vkms_connector {
+ struct drm_connector base;
+};
+
+/**
+ * vkms_connector_init() - Initialize a connector
+ * @vkmsdev: VKMS device containing the connector
+ *
+ * Returns:
+ * The connector or an error on failure.
+ */
+struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev);
+
+#endif /* _VKMS_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 12034ec12029..8c9898b9055d 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -194,7 +194,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
i++;
}
- vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
+ vkms_state->active_planes = kcalloc(i, sizeof(*vkms_state->active_planes), GFP_KERNEL);
if (!vkms_state->active_planes)
return -ENOMEM;
vkms_state->num_active_planes = i;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index b6de91134a22..a24d1655f7b8 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -27,11 +27,9 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_vblank.h>
+#include "vkms_config.h"
#include "vkms_drv.h"
-#include <drm/drm_print.h>
-#include <drm/drm_debugfs.h>
-
#define DRIVER_NAME "vkms"
#define DRIVER_DESC "Virtual Kernel Mode Setting"
#define DRIVER_MAJOR 1
@@ -81,23 +79,6 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_cleanup_planes(dev, old_state);
}
-static int vkms_config_show(struct seq_file *m, void *data)
-{
- struct drm_debugfs_entry *entry = m->private;
- struct drm_device *dev = entry->dev;
- struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
-
- seq_printf(m, "writeback=%d\n", vkmsdev->config->writeback);
- seq_printf(m, "cursor=%d\n", vkmsdev->config->cursor);
- seq_printf(m, "overlay=%d\n", vkmsdev->config->overlay);
-
- return 0;
-}
-
-static const struct drm_debugfs_info vkms_config_debugfs_list[] = {
- { "vkms_config", vkms_config_show, 0 },
-};
-
static const struct drm_driver vkms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
.fops = &vkms_driver_fops,
@@ -170,8 +151,10 @@ static int vkms_create(struct vkms_config *config)
int ret;
struct platform_device *pdev;
struct vkms_device *vkms_device;
+ const char *dev_name;
- pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
+ dev_name = vkms_config_get_device_name(config);
+ pdev = platform_device_register_simple(dev_name, -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
@@ -198,7 +181,8 @@ static int vkms_create(struct vkms_config *config)
goto out_devres;
}
- ret = drm_vblank_init(&vkms_device->drm, 1);
+ ret = drm_vblank_init(&vkms_device->drm,
+ vkms_config_get_num_crtcs(config));
if (ret) {
DRM_ERROR("Failed to vblank\n");
goto out_devres;
@@ -208,8 +192,7 @@ static int vkms_create(struct vkms_config *config)
if (ret)
goto out_devres;
- drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list,
- ARRAY_SIZE(vkms_config_debugfs_list));
+ vkms_config_register_debugfs(vkms_device);
ret = drm_dev_register(&vkms_device->drm, 0);
if (ret)
@@ -231,17 +214,13 @@ static int __init vkms_init(void)
int ret;
struct vkms_config *config;
- config = kmalloc(sizeof(*config), GFP_KERNEL);
- if (!config)
- return -ENOMEM;
-
- config->cursor = enable_cursor;
- config->writeback = enable_writeback;
- config->overlay = enable_overlay;
+ config = vkms_config_default_create(enable_cursor, enable_writeback, enable_overlay);
+ if (IS_ERR(config))
+ return PTR_ERR(config);
ret = vkms_create(config);
if (ret) {
- kfree(config);
+ vkms_config_destroy(config);
return ret;
}
@@ -275,7 +254,7 @@ static void __exit vkms_exit(void)
return;
vkms_destroy(default_config);
- kfree(default_config);
+ vkms_config_destroy(default_config);
}
module_init(vkms_init);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index abbb652be2b5..a74a7fc3a056 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -12,6 +12,8 @@
#include <drm/drm_encoder.h>
#include <drm/drm_writeback.h>
+#define DEFAULT_DEVICE_NAME "vkms"
+
#define XRES_MIN 10
#define YRES_MIN 10
@@ -189,20 +191,7 @@ struct vkms_output {
spinlock_t composer_lock;
};
-/**
- * struct vkms_config - General configuration for VKMS driver
- *
- * @writeback: If true, a writeback buffer can be attached to the CRTC
- * @cursor: If true, a cursor plane is created in the VKMS device
- * @overlay: If true, NUM_OVERLAY_PLANES will be created for the VKMS device
- * @dev: Used to store the current VKMS device. Only set when the device is instantiated.
- */
-struct vkms_config {
- bool writeback;
- bool cursor;
- bool overlay;
- struct vkms_device *dev;
-};
+struct vkms_config;
/**
* struct vkms_device - Description of a VKMS device
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 22f0d678af3a..8d7ca0cdd79f 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -1,121 +1,111 @@
// SPDX-License-Identifier: GPL-2.0+
+#include "vkms_config.h"
+#include "vkms_connector.h"
#include "vkms_drv.h"
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_edid.h>
#include <drm/drm_managed.h>
-#include <drm/drm_probe_helper.h>
-
-static const struct drm_connector_funcs vkms_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int vkms_conn_get_modes(struct drm_connector *connector)
-{
- int count;
-
- /* Use the default modes list from DRM */
- count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
- drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
-
- return count;
-}
-
-static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
- .get_modes = vkms_conn_get_modes,
-};
int vkms_output_init(struct vkms_device *vkmsdev)
{
struct drm_device *dev = &vkmsdev->drm;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct vkms_output *output;
- struct vkms_plane *primary, *overlay, *cursor = NULL;
+ struct vkms_config_plane *plane_cfg;
+ struct vkms_config_crtc *crtc_cfg;
+ struct vkms_config_encoder *encoder_cfg;
+ struct vkms_config_connector *connector_cfg;
int ret;
int writeback;
- unsigned int n;
-
- /*
- * Initialize used plane. One primary plane is required to perform the composition.
- *
- * The overlay and cursor planes are not mandatory, but can be used to perform complex
- * composition.
- */
- primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY);
- if (IS_ERR(primary))
- return PTR_ERR(primary);
-
- if (vkmsdev->config->cursor) {
- cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR);
- if (IS_ERR(cursor))
- return PTR_ERR(cursor);
- }
- output = vkms_crtc_init(dev, &primary->base,
- cursor ? &cursor->base : NULL);
- if (IS_ERR(output)) {
- DRM_ERROR("Failed to allocate CRTC\n");
- return PTR_ERR(output);
- }
+ if (!vkms_config_is_valid(vkmsdev->config))
+ return -EINVAL;
- if (vkmsdev->config->overlay) {
- for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
- overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY);
- if (IS_ERR(overlay)) {
- DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n");
- return PTR_ERR(overlay);
- }
- overlay->base.possible_crtcs = drm_crtc_mask(&output->crtc);
+ vkms_config_for_each_plane(vkmsdev->config, plane_cfg) {
+ enum drm_plane_type type;
+
+ type = vkms_config_plane_get_type(plane_cfg);
+
+ plane_cfg->plane = vkms_plane_init(vkmsdev, type);
+ if (IS_ERR(plane_cfg->plane)) {
+ DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n");
+ return PTR_ERR(plane_cfg->plane);
}
}
- connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
- if (!connector) {
- DRM_ERROR("Failed to allocate connector\n");
- return -ENOMEM;
- }
+ vkms_config_for_each_crtc(vkmsdev->config, crtc_cfg) {
+ struct vkms_config_plane *primary, *cursor;
- ret = drmm_connector_init(dev, connector, &vkms_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL, NULL);
- if (ret) {
- DRM_ERROR("Failed to init connector\n");
- return ret;
- }
+ primary = vkms_config_crtc_primary_plane(vkmsdev->config, crtc_cfg);
+ cursor = vkms_config_crtc_cursor_plane(vkmsdev->config, crtc_cfg);
- drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
+ crtc_cfg->crtc = vkms_crtc_init(dev, &primary->plane->base,
+ cursor ? &cursor->plane->base : NULL);
+ if (IS_ERR(crtc_cfg->crtc)) {
+ DRM_ERROR("Failed to allocate CRTC\n");
+ return PTR_ERR(crtc_cfg->crtc);
+ }
- encoder = drmm_kzalloc(dev, sizeof(*encoder), GFP_KERNEL);
- if (!encoder) {
- DRM_ERROR("Failed to allocate encoder\n");
- return -ENOMEM;
+ /* Initialize the writeback component */
+ if (vkms_config_crtc_get_writeback(crtc_cfg)) {
+ writeback = vkms_enable_writeback_connector(vkmsdev, crtc_cfg->crtc);
+ if (writeback)
+ DRM_ERROR("Failed to init writeback connector\n");
+ }
}
- ret = drmm_encoder_init(dev, encoder, NULL,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
- if (ret) {
- DRM_ERROR("Failed to init encoder\n");
- return ret;
+
+ vkms_config_for_each_plane(vkmsdev->config, plane_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) {
+ plane_cfg->plane->base.possible_crtcs |=
+ drm_crtc_mask(&possible_crtc->crtc->crtc);
+ }
}
- encoder->possible_crtcs = drm_crtc_mask(&output->crtc);
- /* Attach the encoder and the connector */
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret) {
- DRM_ERROR("Failed to attach connector to encoder\n");
- return ret;
+ vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg) {
+ struct vkms_config_crtc *possible_crtc;
+ unsigned long idx = 0;
+
+ encoder_cfg->encoder = drmm_kzalloc(dev, sizeof(*encoder_cfg->encoder), GFP_KERNEL);
+ if (!encoder_cfg->encoder) {
+ DRM_ERROR("Failed to allocate encoder\n");
+ return -ENOMEM;
+ }
+ ret = drmm_encoder_init(dev, encoder_cfg->encoder, NULL,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to init encoder\n");
+ return ret;
+ }
+
+ vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
+ encoder_cfg->encoder->possible_crtcs |=
+ drm_crtc_mask(&possible_crtc->crtc->crtc);
+ }
}
- /* Initialize the writeback component */
- if (vkmsdev->config->writeback) {
- writeback = vkms_enable_writeback_connector(vkmsdev, output);
- if (writeback)
- DRM_ERROR("Failed to init writeback connector\n");
+ vkms_config_for_each_connector(vkmsdev->config, connector_cfg) {
+ struct vkms_config_encoder *possible_encoder;
+ unsigned long idx = 0;
+
+ connector_cfg->connector = vkms_connector_init(vkmsdev);
+ if (IS_ERR(connector_cfg->connector)) {
+ DRM_ERROR("Failed to init connector\n");
+ return PTR_ERR(connector_cfg->connector);
+ }
+
+ vkms_config_connector_for_each_possible_encoder(connector_cfg,
+ idx,
+ possible_encoder) {
+ ret = drm_connector_attach_encoder(&connector_cfg->connector->base,
+ possible_encoder->encoder);
+ if (ret) {
+ DRM_ERROR("Failed to attach connector to encoder\n");
+ return ret;
+ }
+ }
}
drm_mode_config_reset(dev);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 6c3c2922ae8b..aab646b91ca9 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI
depends on (X86 && HYPERVISOR_GUEST) || ARM64
select DRM_CLIENT_SELECTION
select DRM_TTM
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 46a4ab688a7f..b168fd7fe9b3 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -10,6 +10,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \
- vmwgfx_gem.o vmwgfx_vkms.o
+ vmwgfx_gem.o vmwgfx_vkms.o vmwgfx_cursor_plane.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 64bd7d74854e..fa5841fda659 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -429,7 +429,7 @@ static void *map_external(struct vmw_bo *bo, struct iosys_map *map)
void *ptr = NULL;
int ret;
- if (bo->tbo.base.import_attach) {
+ if (drm_gem_is_imported(&bo->tbo.base)) {
ret = dma_buf_vmap(bo->tbo.base.dma_buf, map);
if (ret) {
drm_dbg_driver(&vmw->drm,
@@ -447,7 +447,7 @@ out:
static void unmap_external(struct vmw_bo *bo, struct iosys_map *map)
{
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_vunmap(bo->tbo.base.dma_buf, map);
else
vmw_bo_unmap(bo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 9b5b8c1f063b..f031a312c783 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -36,8 +36,7 @@ static void vmw_bo_release(struct vmw_bo *vbo)
{
struct vmw_resource *res;
- WARN_ON(vbo->tbo.base.funcs &&
- kref_read(&vbo->tbo.base.refcount) != 0);
+ WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo);
xa_destroy(&vbo->detached_resources);
@@ -51,11 +50,13 @@ static void vmw_bo_release(struct vmw_bo *vbo)
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void)vmw_resource_reserve(res, false, true);
vmw_resource_mob_detach(res);
+ if (res->dirty)
+ res->func->dirty_free(res);
if (res->coherent)
vmw_bo_dirty_release(res->guest_memory_bo);
res->guest_memory_bo = NULL;
res->guest_memory_offset = 0;
- vmw_resource_unreserve(res, false, false, false, NULL,
+ vmw_resource_unreserve(res, true, false, false, NULL,
0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@@ -73,9 +74,9 @@ static void vmw_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
- WARN_ON(vbo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
vmw_bo_release(vbo);
+ WARN_ON(vbo->dirty);
kfree(vbo);
}
@@ -467,6 +468,7 @@ int vmw_bo_create(struct vmw_private *vmw,
if (unlikely(ret != 0))
goto out_error;
+ (*p_bo)->tbo.base.funcs = &vmw_gem_object_funcs;
return ret;
out_error:
*p_bo = NULL;
@@ -848,9 +850,9 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
vmw_bo_placement_set(bo, domain, domain);
}
-void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
+int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
{
- xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
+ return xa_err(xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL));
}
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
@@ -887,3 +889,9 @@ out:
surf = vmw_res_to_srf(res);
return surf;
}
+
+s32 vmw_bo_mobid(struct vmw_bo *vbo)
+{
+ WARN_ON(vbo->tbo.resource->mem_type != VMW_PL_MOB);
+ return (s32)vbo->tbo.resource->start;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 11e330c7c7f5..cf84a163bfcb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -141,7 +141,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
-void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
+int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo);
@@ -204,12 +204,12 @@ static inline void vmw_bo_unreference(struct vmw_bo **buf)
*buf = NULL;
if (tmp_buf)
- ttm_bo_put(&tmp_buf->tbo);
+ drm_gem_object_put(&tmp_buf->tbo.base);
}
static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
{
- ttm_bo_get(&buf->tbo);
+ drm_gem_object_get(&buf->tbo.base);
return buf;
}
@@ -233,4 +233,6 @@ static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
return container_of((gobj), struct vmw_bo, tbo.base);
}
+s32 vmw_bo_mobid(struct vmw_bo *vbo);
+
#endif // VMWGFX_BO_H
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index a7c07692262b..98331c4c0335 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -432,7 +432,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
- ret = vmw_gem_object_create(dev_priv, &bo_params, &buf);
+ ret = vmw_bo_create(dev_priv, &bo_params, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
goto out_done;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
new file mode 100644
index 000000000000..718832b08d96
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
@@ -0,0 +1,844 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ **************************************************************************/
+#include "vmwgfx_cursor_plane.h"
+
+#include "vmwgfx_bo.h"
+#include "vmwgfx_drv.h"
+#include "vmwgfx_kms.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmw_surface_cache.h"
+
+#include "drm/drm_atomic.h"
+#include "drm/drm_atomic_helper.h"
+#include "drm/drm_plane.h"
+#include <asm/page.h>
+
+#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
+#define VMW_CURSOR_SNOOP_WIDTH 64
+#define VMW_CURSOR_SNOOP_HEIGHT 64
+
+struct vmw_svga_fifo_cmd_define_cursor {
+ u32 cmd;
+ SVGAFifoCmdDefineAlphaCursor cursor;
+};
+
+/**
+ * vmw_send_define_cursor_cmd - queue a define cursor command
+ * @dev_priv: the private driver struct
+ * @image: buffer which holds the cursor image
+ * @width: width of the mouse cursor image
+ * @height: height of the mouse cursor image
+ * @hotspotX: the horizontal position of mouse hotspot
+ * @hotspotY: the vertical position of mouse hotspot
+ */
+static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
+ u32 *image, u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
+{
+ struct vmw_svga_fifo_cmd_define_cursor *cmd;
+ const u32 image_size = width * height * sizeof(*image);
+ const u32 cmd_size = sizeof(*cmd) + image_size;
+
+ /*
+ * Try to reserve fifocmd space and swallow any failures;
+ * such reservations cannot be left unconsumed for long
+ * under the risk of clogging other fifocmd users, so
+ * we treat reservations separtely from the way we treat
+ * other fallible KMS-atomic resources at prepare_fb
+ */
+ cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
+
+ if (unlikely(!cmd))
+ return;
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ memcpy(&cmd[1], image, image_size);
+
+ cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
+ cmd->cursor.id = 0;
+ cmd->cursor.width = width;
+ cmd->cursor.height = height;
+ cmd->cursor.hotspotX = hotspotX;
+ cmd->cursor.hotspotY = hotspotY;
+
+ vmw_cmd_commit_flush(dev_priv, cmd_size);
+}
+
+static void
+vmw_cursor_plane_update_legacy(struct vmw_private *vmw,
+ struct vmw_plane_state *vps)
+{
+ struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
+ s32 hotspot_x = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
+ s32 hotspot_y = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
+
+ if (WARN_ON(!surface || !surface->snooper.image))
+ return;
+
+ if (vps->cursor.legacy.id != surface->snooper.id) {
+ vmw_send_define_cursor_cmd(vmw, surface->snooper.image,
+ vps->base.crtc_w, vps->base.crtc_h,
+ hotspot_x, hotspot_y);
+ vps->cursor.legacy.id = surface->snooper.id;
+ }
+}
+
+static enum vmw_cursor_update_type
+vmw_cursor_update_type(struct vmw_private *vmw, struct vmw_plane_state *vps)
+{
+ struct vmw_surface *surface = vmw_user_object_surface(&vps->uo);
+
+ if (surface && surface->snooper.image)
+ return VMW_CURSOR_UPDATE_LEGACY;
+
+ if (vmw->has_mob) {
+ if ((vmw->capabilities2 & SVGA_CAP2_CURSOR_MOB) != 0)
+ return VMW_CURSOR_UPDATE_MOB;
+ }
+
+ return VMW_CURSOR_UPDATE_NONE;
+}
+
+static void vmw_cursor_update_mob(struct vmw_private *vmw,
+ struct vmw_plane_state *vps)
+{
+ SVGAGBCursorHeader *header;
+ SVGAGBAlphaCursorHeader *alpha_header;
+ struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo);
+ u32 *image = vmw_bo_map_and_cache(bo);
+ const u32 image_size = vps->base.crtc_w * vps->base.crtc_h * sizeof(*image);
+
+ header = vmw_bo_map_and_cache(vps->cursor.mob);
+ alpha_header = &header->header.alphaHeader;
+
+ memset(header, 0, sizeof(*header));
+
+ header->type = SVGA_ALPHA_CURSOR;
+ header->sizeInBytes = image_size;
+
+ alpha_header->hotspotX = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x;
+ alpha_header->hotspotY = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y;
+ alpha_header->width = vps->base.crtc_w;
+ alpha_header->height = vps->base.crtc_h;
+
+ memcpy(header + 1, image, image_size);
+ vmw_write(vmw, SVGA_REG_CURSOR_MOBID, vmw_bo_mobid(vps->cursor.mob));
+
+ vmw_bo_unmap(bo);
+ vmw_bo_unmap(vps->cursor.mob);
+}
+
+static u32 vmw_cursor_mob_size(enum vmw_cursor_update_type update_type,
+ u32 w, u32 h)
+{
+ switch (update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ case VMW_CURSOR_UPDATE_NONE:
+ return 0;
+ case VMW_CURSOR_UPDATE_MOB:
+ return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
+ }
+ return 0;
+}
+
+static void vmw_cursor_mob_destroy(struct vmw_bo **vbo)
+{
+ if (!(*vbo))
+ return;
+
+ ttm_bo_unpin(&(*vbo)->tbo);
+ vmw_bo_unreference(vbo);
+}
+
+/**
+ * vmw_cursor_mob_unmap - Unmaps the cursor mobs.
+ *
+ * @vps: state of the cursor plane
+ *
+ * Returns 0 on success
+ */
+
+static int
+vmw_cursor_mob_unmap(struct vmw_plane_state *vps)
+{
+ int ret = 0;
+ struct vmw_bo *vbo = vps->cursor.mob;
+
+ if (!vbo || !vbo->map.virtual)
+ return 0;
+
+ ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
+ if (likely(ret == 0)) {
+ vmw_bo_unmap(vbo);
+ ttm_bo_unreserve(&vbo->tbo);
+ }
+
+ return ret;
+}
+
+static void vmw_cursor_mob_put(struct vmw_cursor_plane *vcp,
+ struct vmw_plane_state *vps)
+{
+ u32 i;
+
+ if (!vps->cursor.mob)
+ return;
+
+ vmw_cursor_mob_unmap(vps);
+
+ /* Look for a free slot to return this mob to the cache. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (!vcp->cursor_mobs[i]) {
+ vcp->cursor_mobs[i] = vps->cursor.mob;
+ vps->cursor.mob = NULL;
+ return;
+ }
+ }
+
+ /* Cache is full: See if this mob is bigger than an existing mob. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (vcp->cursor_mobs[i]->tbo.base.size <
+ vps->cursor.mob->tbo.base.size) {
+ vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
+ vcp->cursor_mobs[i] = vps->cursor.mob;
+ vps->cursor.mob = NULL;
+ return;
+ }
+ }
+
+ /* Destroy it if it's not worth caching. */
+ vmw_cursor_mob_destroy(&vps->cursor.mob);
+}
+
+static int vmw_cursor_mob_get(struct vmw_cursor_plane *vcp,
+ struct vmw_plane_state *vps)
+{
+ struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
+ u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
+ vps->base.crtc_w, vps->base.crtc_h);
+ u32 i;
+ u32 cursor_max_dim, mob_max_size;
+ struct vmw_fence_obj *fence = NULL;
+ int ret;
+
+ if (!dev_priv->has_mob ||
+ (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
+ return -EINVAL;
+
+ mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
+ cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
+
+ if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
+ vps->base.crtc_h > cursor_max_dim)
+ return -EINVAL;
+
+ if (vps->cursor.mob) {
+ if (vps->cursor.mob->tbo.base.size >= size)
+ return 0;
+ vmw_cursor_mob_put(vcp, vps);
+ }
+
+ /* Look for an unused mob in the cache. */
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
+ if (vcp->cursor_mobs[i] &&
+ vcp->cursor_mobs[i]->tbo.base.size >= size) {
+ vps->cursor.mob = vcp->cursor_mobs[i];
+ vcp->cursor_mobs[i] = NULL;
+ return 0;
+ }
+ }
+ /* Create a new mob if we can't find an existing one. */
+ ret = vmw_bo_create_and_populate(dev_priv, size, VMW_BO_DOMAIN_MOB,
+ &vps->cursor.mob);
+
+ if (ret != 0)
+ return ret;
+
+ /* Fence the mob creation so we are guarateed to have the mob */
+ ret = ttm_bo_reserve(&vps->cursor.mob->tbo, false, false, NULL);
+ if (ret != 0)
+ goto teardown;
+
+ ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (ret != 0) {
+ ttm_bo_unreserve(&vps->cursor.mob->tbo);
+ goto teardown;
+ }
+
+ dma_fence_wait(&fence->base, false);
+ dma_fence_put(&fence->base);
+
+ ttm_bo_unreserve(&vps->cursor.mob->tbo);
+
+ return 0;
+
+teardown:
+ vmw_cursor_mob_destroy(&vps->cursor.mob);
+ return ret;
+}
+
+static void vmw_cursor_update_position(struct vmw_private *dev_priv,
+ bool show, int x, int y)
+{
+ const u32 svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
+ : SVGA_CURSOR_ON_HIDE;
+ u32 count;
+
+ spin_lock(&dev_priv->cursor_lock);
+ if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
+ vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
+ } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
+ count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
+ } else {
+ vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
+ }
+ spin_unlock(&dev_priv->cursor_lock);
+}
+
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ struct ttm_object_file *tfile,
+ struct ttm_buffer_object *bo,
+ SVGA3dCmdHeader *header)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ SVGA3dCopyBox *box;
+ u32 box_count;
+ void *virtual;
+ bool is_iomem;
+ struct vmw_dma_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA dma;
+ } *cmd;
+ int i, ret;
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
+ const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
+
+ /* No snooper installed, nothing to copy */
+ if (!srf->snooper.image)
+ return;
+
+ if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
+ DRM_ERROR("face and mipmap for cursors should never != 0\n");
+ return;
+ }
+
+ if (cmd->header.size < 64) {
+ DRM_ERROR("at least one full copy box must be given\n");
+ return;
+ }
+
+ box = (SVGA3dCopyBox *)&cmd[1];
+ box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
+ sizeof(SVGA3dCopyBox);
+
+ if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+ box->x != 0 || box->y != 0 || box->z != 0 ||
+ box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
+ box->d != 1 || box_count != 1 ||
+ box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
+ /* TODO handle none page aligned offsets */
+ /* TODO handle more dst & src != 0 */
+ /* TODO handle more then one copy */
+ DRM_ERROR("Can't snoop dma request for cursor!\n");
+ DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
+ box->srcx, box->srcy, box->srcz,
+ box->x, box->y, box->z,
+ box->w, box->h, box->d, box_count,
+ cmd->dma.guest.ptr.offset);
+ return;
+ }
+
+ kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
+ kmap_num = (VMW_CURSOR_SNOOP_HEIGHT * image_pitch) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(bo, true, false, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
+
+ if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
+ memcpy(srf->snooper.image, virtual,
+ VMW_CURSOR_SNOOP_HEIGHT * image_pitch);
+ } else {
+ /* Image is unsigned pointer. */
+ for (i = 0; i < box->h; i++)
+ memcpy(srf->snooper.image + i * image_pitch,
+ virtual + i * cmd->dma.guest.pitch,
+ box->w * desc->pitchBytesPerBlock);
+ }
+ srf->snooper.id++;
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(bo);
+}
+
+void vmw_cursor_plane_destroy(struct drm_plane *plane)
+{
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ u32 i;
+
+ vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
+
+ for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
+ vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]);
+
+ drm_plane_cleanup(plane);
+}
+
+/**
+ * vmw_cursor_mob_map - Maps the cursor mobs.
+ *
+ * @vps: plane_state
+ *
+ * Returns 0 on success
+ */
+
+static int
+vmw_cursor_mob_map(struct vmw_plane_state *vps)
+{
+ int ret;
+ u32 size = vmw_cursor_mob_size(vps->cursor.update_type,
+ vps->base.crtc_w, vps->base.crtc_h);
+ struct vmw_bo *vbo = vps->cursor.mob;
+
+ if (!vbo)
+ return -EINVAL;
+
+ if (vbo->tbo.base.size < size)
+ return -EINVAL;
+
+ if (vbo->map.virtual)
+ return 0;
+
+ ret = ttm_bo_reserve(&vbo->tbo, false, false, NULL);
+ if (unlikely(ret != 0))
+ return -ENOMEM;
+
+ vmw_bo_map_and_cache(vbo);
+
+ ttm_bo_unreserve(&vbo->tbo);
+
+ return 0;
+}
+
+/**
+ * vmw_cursor_plane_cleanup_fb - Unpins the plane surface
+ *
+ * @plane: cursor plane
+ * @old_state: contains the state to clean up
+ *
+ * Unmaps all cursor bo mappings and unpins the cursor surface
+ *
+ * Returns 0 on success
+ */
+void
+vmw_cursor_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+
+ if (!vmw_user_object_is_null(&vps->uo))
+ vmw_user_object_unmap(&vps->uo);
+
+ vmw_cursor_mob_unmap(vps);
+ vmw_cursor_mob_put(vcp, vps);
+
+ vmw_du_plane_unpin_surf(vps);
+ vmw_user_object_unref(&vps->uo);
+}
+
+static bool
+vmw_cursor_buffer_changed(struct vmw_plane_state *new_vps,
+ struct vmw_plane_state *old_vps)
+{
+ struct vmw_bo *new_bo = vmw_user_object_buffer(&new_vps->uo);
+ struct vmw_bo *old_bo = vmw_user_object_buffer(&old_vps->uo);
+ struct vmw_surface *surf;
+ bool dirty = false;
+ int ret;
+
+ if (new_bo != old_bo)
+ return true;
+
+ if (new_bo) {
+ if (!old_bo) {
+ return true;
+ } else if (new_bo->dirty) {
+ vmw_bo_dirty_scan(new_bo);
+ dirty = vmw_bo_is_dirty(new_bo);
+ if (dirty) {
+ surf = vmw_user_object_surface(&new_vps->uo);
+ if (surf)
+ vmw_bo_dirty_transfer_to_res(&surf->res);
+ else
+ vmw_bo_dirty_clear(new_bo);
+ }
+ return dirty;
+ } else if (new_bo != old_bo) {
+ /*
+ * Currently unused because the top exits right away.
+ * In most cases buffer being different will mean
+ * that the contents is different. For the few percent
+ * of cases where that's not true the cost of doing
+ * the memcmp on all other seems to outweight the
+ * benefits. Leave the conditional to be able to
+ * trivially validate it by removing the initial
+ * if (new_bo != old_bo) at the start.
+ */
+ void *old_image;
+ void *new_image;
+ bool changed = false;
+ struct ww_acquire_ctx ctx;
+ const u32 size = new_vps->base.crtc_w *
+ new_vps->base.crtc_h * sizeof(u32);
+
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
+ ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ ww_acquire_fini(&ctx);
+ return true;
+ }
+
+ ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ ttm_bo_unreserve(&old_bo->tbo);
+ ww_acquire_fini(&ctx);
+ return true;
+ }
+
+ old_image = vmw_bo_map_and_cache(old_bo);
+ new_image = vmw_bo_map_and_cache(new_bo);
+
+ if (old_image && new_image && old_image != new_image)
+ changed = memcmp(old_image, new_image, size) !=
+ 0;
+
+ ttm_bo_unreserve(&new_bo->tbo);
+ ttm_bo_unreserve(&old_bo->tbo);
+
+ ww_acquire_fini(&ctx);
+
+ return changed;
+ }
+ return false;
+ }
+
+ return false;
+}
+
+static bool
+vmw_cursor_plane_changed(struct vmw_plane_state *new_vps,
+ struct vmw_plane_state *old_vps)
+{
+ if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
+ old_vps->base.crtc_h != new_vps->base.crtc_h)
+ return true;
+
+ if (old_vps->base.hotspot_x != new_vps->base.hotspot_x ||
+ old_vps->base.hotspot_y != new_vps->base.hotspot_y)
+ return true;
+
+ if (old_vps->cursor.legacy.hotspot_x !=
+ new_vps->cursor.legacy.hotspot_x ||
+ old_vps->cursor.legacy.hotspot_y !=
+ new_vps->cursor.legacy.hotspot_y)
+ return true;
+
+ if (old_vps->base.fb != new_vps->base.fb)
+ return true;
+
+ return false;
+}
+
+/**
+ * vmw_cursor_plane_prepare_fb - Readies the cursor by referencing it
+ *
+ * @plane: display plane
+ * @new_state: info on the new plane state, including the FB
+ *
+ * Returns 0 on success
+ */
+int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(plane->state);
+ struct vmw_private *vmw = vmw_priv(plane->dev);
+ struct vmw_bo *bo = NULL;
+ struct vmw_surface *surface;
+ int ret = 0;
+
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ vmw_user_object_unmap(&vps->uo);
+ vmw_user_object_unref(&vps->uo);
+ }
+
+ if (fb) {
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
+ vps->uo.surface = NULL;
+ } else {
+ memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
+ }
+ vmw_user_object_ref(&vps->uo);
+ }
+
+ vps->cursor.update_type = vmw_cursor_update_type(vmw, vps);
+ switch (vps->cursor.update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ surface = vmw_user_object_surface(&vps->uo);
+ if (!surface || vps->cursor.legacy.id == surface->snooper.id)
+ vps->cursor.update_type = VMW_CURSOR_UPDATE_NONE;
+ break;
+ case VMW_CURSOR_UPDATE_MOB: {
+ bo = vmw_user_object_buffer(&vps->uo);
+ if (bo) {
+ struct ttm_operation_ctx ctx = { false, false };
+
+ ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
+ if (ret != 0)
+ return -ENOMEM;
+
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret != 0)
+ return -ENOMEM;
+
+ /*
+ * vmw_bo_pin_reserved also validates, so to skip
+ * the extra validation use ttm_bo_pin directly
+ */
+ if (!bo->tbo.pin_count)
+ ttm_bo_pin(&bo->tbo);
+
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ const u32 size = new_state->crtc_w *
+ new_state->crtc_h *
+ sizeof(u32);
+
+ (void)vmw_bo_map_and_cache_size(bo, size);
+ } else {
+ vmw_bo_map_and_cache(bo);
+ }
+ ttm_bo_unreserve(&bo->tbo);
+ }
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ if (!vmw_cursor_plane_changed(vps, old_vps) &&
+ !vmw_cursor_buffer_changed(vps, old_vps)) {
+ vps->cursor.update_type =
+ VMW_CURSOR_UPDATE_NONE;
+ } else {
+ vmw_cursor_mob_get(vcp, vps);
+ vmw_cursor_mob_map(vps);
+ }
+ }
+ }
+ break;
+ case VMW_CURSOR_UPDATE_NONE:
+ /* do nothing */
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cursor_plane_atomic_check - check if the new state is okay
+ *
+ * @plane: cursor plane
+ * @state: info on the new plane state
+ *
+ * This is a chance to fail if the new cursor state does not fit
+ * our requirements.
+ *
+ * Returns 0 on success
+ */
+int vmw_cursor_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct vmw_private *vmw = vmw_priv(plane->dev);
+ int ret = 0;
+ struct drm_crtc_state *crtc_state = NULL;
+ struct vmw_surface *surface = NULL;
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ enum vmw_cursor_update_type update_type;
+ struct drm_framebuffer *fb = new_state->fb;
+
+ if (new_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING, true,
+ true);
+ if (ret)
+ return ret;
+
+ /* Turning off */
+ if (!fb)
+ return 0;
+
+ update_type = vmw_cursor_update_type(vmw, vps);
+ if (update_type == VMW_CURSOR_UPDATE_LEGACY) {
+ if (new_state->crtc_w != VMW_CURSOR_SNOOP_WIDTH ||
+ new_state->crtc_h != VMW_CURSOR_SNOOP_HEIGHT) {
+ drm_warn(&vmw->drm,
+ "Invalid cursor dimensions (%d, %d)\n",
+ new_state->crtc_w, new_state->crtc_h);
+ return -EINVAL;
+ }
+ surface = vmw_user_object_surface(&vps->uo);
+ if (!surface || !surface->snooper.image) {
+ drm_warn(&vmw->drm,
+ "surface not suitable for cursor\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+void
+vmw_cursor_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_state =
+ drm_atomic_get_old_plane_state(state, plane);
+ struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
+ struct vmw_private *dev_priv = vmw_priv(plane->dev);
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ s32 hotspot_x, hotspot_y, cursor_x, cursor_y;
+
+ /*
+ * Hide the cursor if the new bo is null
+ */
+ if (vmw_user_object_is_null(&vps->uo)) {
+ vmw_cursor_update_position(dev_priv, false, 0, 0);
+ return;
+ }
+
+ switch (vps->cursor.update_type) {
+ case VMW_CURSOR_UPDATE_LEGACY:
+ vmw_cursor_plane_update_legacy(dev_priv, vps);
+ break;
+ case VMW_CURSOR_UPDATE_MOB:
+ vmw_cursor_update_mob(dev_priv, vps);
+ break;
+ case VMW_CURSOR_UPDATE_NONE:
+ /* do nothing */
+ break;
+ }
+
+ /*
+ * For all update types update the cursor position
+ */
+ cursor_x = new_state->crtc_x + du->set_gui_x;
+ cursor_y = new_state->crtc_y + du->set_gui_y;
+
+ hotspot_x = vps->cursor.legacy.hotspot_x + new_state->hotspot_x;
+ hotspot_y = vps->cursor.legacy.hotspot_y + new_state->hotspot_y;
+
+ vmw_cursor_update_position(dev_priv, true, cursor_x + hotspot_x,
+ cursor_y + hotspot_y);
+}
+
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_cursor_bypass_arg *arg = data;
+ struct vmw_display_unit *du;
+ struct vmw_plane_state *vps;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ du = vmw_crtc_to_du(crtc);
+ vps = vmw_plane_state_to_vps(du->cursor.base.state);
+ vps->cursor.legacy.hotspot_x = arg->xhot;
+ vps->cursor.legacy.hotspot_y = arg->yhot;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return 0;
+ }
+
+ crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
+ if (!crtc) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ du = vmw_crtc_to_du(crtc);
+ vps = vmw_plane_state_to_vps(du->cursor.base.state);
+ vps->cursor.legacy.hotspot_x = arg->xhot;
+ vps->cursor.legacy.hotspot_y = arg->yhot;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+void *vmw_cursor_snooper_create(struct drm_file *file_priv,
+ struct vmw_surface_metadata *metadata)
+{
+ if (!file_priv->atomic && metadata->scanout &&
+ metadata->num_sizes == 1 &&
+ metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
+ metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
+ metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
+ const struct SVGA3dSurfaceDesc *desc =
+ vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
+ const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
+ VMW_CURSOR_SNOOP_HEIGHT *
+ desc->pitchBytesPerBlock;
+ void *image = kzalloc(cursor_size_bytes, GFP_KERNEL);
+
+ if (!image) {
+ DRM_ERROR("Failed to allocate cursor_image\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ return image;
+ }
+ return NULL;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
new file mode 100644
index 000000000000..40694925a70e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_CURSOR_PLANE_H
+#define VMWGFX_CURSOR_PLANE_H
+
+#include "device_include/svga3d_cmd.h"
+#include "drm/drm_file.h"
+#include "drm/drm_fourcc.h"
+#include "drm/drm_plane.h"
+
+#include <linux/types.h>
+
+struct SVGA3dCmdHeader;
+struct ttm_buffer_object;
+struct vmw_bo;
+struct vmw_cursor;
+struct vmw_private;
+struct vmw_surface;
+struct vmw_user_object;
+
+#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base)
+
+static const u32 __maybe_unused vmw_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+enum vmw_cursor_update_type {
+ VMW_CURSOR_UPDATE_NONE = 0,
+ VMW_CURSOR_UPDATE_LEGACY,
+ VMW_CURSOR_UPDATE_MOB,
+};
+
+struct vmw_cursor_plane_state {
+ enum vmw_cursor_update_type update_type;
+ bool changed;
+ bool surface_changed;
+ struct vmw_bo *mob;
+ struct {
+ s32 hotspot_x;
+ s32 hotspot_y;
+ u32 id;
+ } legacy;
+};
+
+/**
+ * Derived class for cursor plane object
+ *
+ * @base DRM plane object
+ * @cursor.cursor_mobs Cursor mobs available for re-use
+ */
+struct vmw_cursor_plane {
+ struct drm_plane base;
+
+ struct vmw_bo *cursor_mobs[3];
+};
+
+struct vmw_surface_metadata;
+void *vmw_cursor_snooper_create(struct drm_file *file_priv,
+ struct vmw_surface_metadata *metadata);
+void vmw_cursor_cmd_dma_snoop(SVGA3dCmdHeader *header,
+ struct vmw_surface *srf,
+ struct ttm_buffer_object *bo);
+
+void vmw_cursor_plane_destroy(struct drm_plane *plane);
+
+int vmw_cursor_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void vmw_cursor_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state);
+void vmw_cursor_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+
+#endif /* VMWGFX_CURSOR_H */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0f32471c8533..0695a342b1ef 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,31 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
-
#include "vmwgfx_drv.h"
#include "vmwgfx_bo.h"
@@ -1324,9 +1304,6 @@ static void vmw_master_set(struct drm_device *dev,
static void vmw_master_drop(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
-
- vmw_kms_legacy_hotspot_clear(dev_priv);
}
bool vmwgfx_supported(struct vmw_private *vmw)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5275ef632d4b..594af8eb04c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,29 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#ifndef _VMWGFX_DRV_H_
@@ -58,7 +38,7 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 20
+#define VMWGFX_DRIVER_MINOR 21
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_NUM_DISPLAY_UNITS 8
@@ -100,10 +80,6 @@
#define VMW_RES_SHADER ttm_driver_type4
#define VMW_RES_HT_ORDER 12
-#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
-#define VMW_CURSOR_SNOOP_WIDTH 64
-#define VMW_CURSOR_SNOOP_HEIGHT 64
-
#define MKSSTAT_CAPACITY_LOG2 5U
#define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2)
@@ -201,7 +177,7 @@ enum vmw_cmdbuf_res_type {
struct vmw_cmdbuf_res_manager;
struct vmw_cursor_snooper {
- size_t age;
+ size_t id;
uint32_t *image;
};
@@ -846,9 +822,7 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
* GEM related functionality - vmwgfx_gem.c
*/
struct vmw_bo_params;
-int vmw_gem_object_create(struct vmw_private *vmw,
- struct vmw_bo_params *params,
- struct vmw_bo **p_vbo);
+extern const struct drm_gem_object_funcs vmw_gem_object_funcs;
extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
@@ -1050,7 +1024,6 @@ int vmw_kms_init(struct vmw_private *dev_priv);
int vmw_kms_close(struct vmw_private *dev_priv);
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
@@ -1067,7 +1040,6 @@ int vmw_kms_present(struct vmw_private *dev_priv,
uint32_t num_clips);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
int vmw_kms_suspend(struct drm_device *dev);
int vmw_kms_resume(struct drm_device *dev);
void vmw_kms_lost_device(struct drm_device *dev);
@@ -1393,8 +1365,10 @@ int vmw_mksstat_remove_all(struct vmw_private *dev_priv);
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
/* Resource dirtying - vmwgfx_page_dirty.c */
+bool vmw_bo_is_dirty(struct vmw_bo *vbo);
void vmw_bo_dirty_scan(struct vmw_bo *vbo);
int vmw_bo_dirty_add(struct vmw_bo *vbo);
+void vmw_bo_dirty_clear(struct vmw_bo *vbo);
void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
void vmw_bo_dirty_clear_res(struct vmw_resource *res);
void vmw_bo_dirty_release(struct vmw_bo *vbo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 2e52d73eba48..e831e324e737 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,29 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
+
#include "vmwgfx_binding.h"
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
@@ -4086,6 +4068,23 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
return 0;
}
+/*
+ * DMA fence callback to remove a seqno_waiter
+ */
+struct seqno_waiter_rm_context {
+ struct dma_fence_cb base;
+ struct vmw_private *dev_priv;
+};
+
+static void seqno_waiter_rm_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ struct seqno_waiter_rm_context *ctx =
+ container_of(cb, struct seqno_waiter_rm_context, base);
+
+ vmw_seqno_waiter_remove(ctx->dev_priv);
+ kfree(ctx);
+}
+
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands, void *kernel_commands,
@@ -4266,6 +4265,15 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} else {
/* Link the fence with the FD created earlier */
fd_install(out_fence_fd, sync_file->file);
+ struct seqno_waiter_rm_context *ctx =
+ kmalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx->dev_priv = dev_priv;
+ vmw_seqno_waiter_add(dev_priv);
+ if (dma_fence_add_callback(&fence->base, &ctx->base,
+ seqno_waiter_rm_cb) < 0) {
+ vmw_seqno_waiter_remove(dev_priv);
+ kfree(ctx);
+ }
}
}
@@ -4512,8 +4520,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out;
- vmw_kms_cursor_post_execbuf(dev_priv);
-
out:
if (in_fence)
dma_fence_put(in_fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index ed5015ced392..c55382167c1b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -84,11 +84,11 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
int ret;
- if (obj->import_attach) {
- ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
+ if (drm_gem_is_imported(obj)) {
+ ret = dma_buf_vmap(obj->dma_buf, map);
if (!ret) {
if (drm_WARN_ON(obj->dev, map->is_iomem)) {
- dma_buf_vunmap(obj->import_attach->dmabuf, map);
+ dma_buf_vunmap(obj->dma_buf, map);
return -EIO;
}
}
@@ -101,8 +101,8 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
- if (obj->import_attach)
- dma_buf_vunmap(obj->import_attach->dmabuf, map);
+ if (drm_gem_is_imported(obj))
+ dma_buf_vunmap(obj->dma_buf, map);
else
drm_gem_ttm_vunmap(obj, map);
}
@@ -111,7 +111,7 @@ static int vmw_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
int ret;
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
/*
* Reset both vm_ops and vm_private_data, so we don't end up with
* vm_ops pointing to our implementation if the dma-buf backend
@@ -140,7 +140,7 @@ static const struct vm_operations_struct vmw_vm_ops = {
.close = ttm_bo_vm_close,
};
-static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
+const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.free = vmw_gem_object_free,
.open = vmw_gem_object_open,
.close = vmw_gem_object_close,
@@ -154,20 +154,6 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.vm_ops = &vmw_vm_ops,
};
-int vmw_gem_object_create(struct vmw_private *vmw,
- struct vmw_bo_params *params,
- struct vmw_bo **p_vbo)
-{
- int ret = vmw_bo_create(vmw, params, p_vbo);
-
- if (ret != 0)
- goto out_no_bo;
-
- (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
-out_no_bo:
- return ret;
-}
-
int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
@@ -183,7 +169,7 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
.pin = false
};
- ret = vmw_gem_object_create(dev_priv, &params, p_vbo);
+ ret = vmw_bo_create(dev_priv, &params, p_vbo);
if (ret != 0)
goto out_no_bo;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 1912ac1cde6d..05b1c54a070c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,33 +1,15 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
+
#include "vmwgfx_kms.h"
#include "vmwgfx_bo.h"
+#include "vmwgfx_resource_priv.h"
#include "vmwgfx_vkms.h"
#include "vmw_surface_cache.h"
@@ -59,474 +41,6 @@ void vmw_du_cleanup(struct vmw_display_unit *du)
drm_connector_cleanup(&du->connector);
}
-/*
- * Display Unit Cursor functions
- */
-
-static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
-static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY);
-
-struct vmw_svga_fifo_cmd_define_cursor {
- u32 cmd;
- SVGAFifoCmdDefineAlphaCursor cursor;
-};
-
-/**
- * vmw_send_define_cursor_cmd - queue a define cursor command
- * @dev_priv: the private driver struct
- * @image: buffer which holds the cursor image
- * @width: width of the mouse cursor image
- * @height: height of the mouse cursor image
- * @hotspotX: the horizontal position of mouse hotspot
- * @hotspotY: the vertical position of mouse hotspot
- */
-static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- struct vmw_svga_fifo_cmd_define_cursor *cmd;
- const u32 image_size = width * height * sizeof(*image);
- const u32 cmd_size = sizeof(*cmd) + image_size;
-
- /* Try to reserve fifocmd space and swallow any failures;
- such reservations cannot be left unconsumed for long
- under the risk of clogging other fifocmd users, so
- we treat reservations separtely from the way we treat
- other fallible KMS-atomic resources at prepare_fb */
- cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
-
- if (unlikely(!cmd))
- return;
-
- memset(cmd, 0, sizeof(*cmd));
-
- memcpy(&cmd[1], image, image_size);
-
- cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
- cmd->cursor.id = 0;
- cmd->cursor.width = width;
- cmd->cursor.height = height;
- cmd->cursor.hotspotX = hotspotX;
- cmd->cursor.hotspotY = hotspotY;
-
- vmw_cmd_commit_flush(dev_priv, cmd_size);
-}
-
-/**
- * vmw_cursor_update_image - update the cursor image on the provided plane
- * @dev_priv: the private driver struct
- * @vps: the plane state of the cursor plane
- * @image: buffer which holds the cursor image
- * @width: width of the mouse cursor image
- * @height: height of the mouse cursor image
- * @hotspotX: the horizontal position of mouse hotspot
- * @hotspotY: the vertical position of mouse hotspot
- */
-static void vmw_cursor_update_image(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- if (vps->cursor.bo)
- vmw_cursor_update_mob(dev_priv, vps, image,
- vps->base.crtc_w, vps->base.crtc_h,
- hotspotX, hotspotY);
-
- else
- vmw_send_define_cursor_cmd(dev_priv, image, width, height,
- hotspotX, hotspotY);
-}
-
-
-/**
- * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
- *
- * Called from inside vmw_du_cursor_plane_atomic_update to actually
- * make the cursor-image live.
- *
- * @dev_priv: device to work with
- * @vps: the plane state of the cursor plane
- * @image: cursor source data to fill the MOB with
- * @width: source data width
- * @height: source data height
- * @hotspotX: cursor hotspot x
- * @hotspotY: cursor hotspot Y
- */
-static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
- struct vmw_plane_state *vps,
- u32 *image, u32 width, u32 height,
- u32 hotspotX, u32 hotspotY)
-{
- SVGAGBCursorHeader *header;
- SVGAGBAlphaCursorHeader *alpha_header;
- const u32 image_size = width * height * sizeof(*image);
-
- header = vmw_bo_map_and_cache(vps->cursor.bo);
- alpha_header = &header->header.alphaHeader;
-
- memset(header, 0, sizeof(*header));
-
- header->type = SVGA_ALPHA_CURSOR;
- header->sizeInBytes = image_size;
-
- alpha_header->hotspotX = hotspotX;
- alpha_header->hotspotY = hotspotY;
- alpha_header->width = width;
- alpha_header->height = height;
-
- memcpy(header + 1, image, image_size);
- vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
- vps->cursor.bo->tbo.resource->start);
-}
-
-
-static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
-{
- return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
-}
-
-/**
- * vmw_du_cursor_plane_acquire_image -- Acquire the image data
- * @vps: cursor plane state
- */
-static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
-{
- struct vmw_surface *surf;
-
- if (vmw_user_object_is_null(&vps->uo))
- return NULL;
-
- surf = vmw_user_object_surface(&vps->uo);
- if (surf && !vmw_user_object_is_mapped(&vps->uo))
- return surf->snooper.image;
-
- return vmw_user_object_map(&vps->uo);
-}
-
-static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
- struct vmw_plane_state *new_vps)
-{
- void *old_image;
- void *new_image;
- u32 size;
- bool changed;
-
- if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
- old_vps->base.crtc_h != new_vps->base.crtc_h)
- return true;
-
- if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
- old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
- return true;
-
- size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
-
- old_image = vmw_du_cursor_plane_acquire_image(old_vps);
- new_image = vmw_du_cursor_plane_acquire_image(new_vps);
-
- changed = false;
- if (old_image && new_image && old_image != new_image)
- changed = memcmp(old_image, new_image, size) != 0;
-
- return changed;
-}
-
-static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
-{
- if (!(*vbo))
- return;
-
- ttm_bo_unpin(&(*vbo)->tbo);
- vmw_bo_unreference(vbo);
-}
-
-static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
- struct vmw_plane_state *vps)
-{
- u32 i;
-
- if (!vps->cursor.bo)
- return;
-
- vmw_du_cursor_plane_unmap_cm(vps);
-
- /* Look for a free slot to return this mob to the cache. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (!vcp->cursor_mobs[i]) {
- vcp->cursor_mobs[i] = vps->cursor.bo;
- vps->cursor.bo = NULL;
- return;
- }
- }
-
- /* Cache is full: See if this mob is bigger than an existing mob. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (vcp->cursor_mobs[i]->tbo.base.size <
- vps->cursor.bo->tbo.base.size) {
- vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
- vcp->cursor_mobs[i] = vps->cursor.bo;
- vps->cursor.bo = NULL;
- return;
- }
- }
-
- /* Destroy it if it's not worth caching. */
- vmw_du_destroy_cursor_mob(&vps->cursor.bo);
-}
-
-static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
- struct vmw_plane_state *vps)
-{
- struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
- u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
- u32 i;
- u32 cursor_max_dim, mob_max_size;
- struct vmw_fence_obj *fence = NULL;
- int ret;
-
- if (!dev_priv->has_mob ||
- (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
- return -EINVAL;
-
- mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
- cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
-
- if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
- vps->base.crtc_h > cursor_max_dim)
- return -EINVAL;
-
- if (vps->cursor.bo) {
- if (vps->cursor.bo->tbo.base.size >= size)
- return 0;
- vmw_du_put_cursor_mob(vcp, vps);
- }
-
- /* Look for an unused mob in the cache. */
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (vcp->cursor_mobs[i] &&
- vcp->cursor_mobs[i]->tbo.base.size >= size) {
- vps->cursor.bo = vcp->cursor_mobs[i];
- vcp->cursor_mobs[i] = NULL;
- return 0;
- }
- }
- /* Create a new mob if we can't find an existing one. */
- ret = vmw_bo_create_and_populate(dev_priv, size,
- VMW_BO_DOMAIN_MOB,
- &vps->cursor.bo);
-
- if (ret != 0)
- return ret;
-
- /* Fence the mob creation so we are guarateed to have the mob */
- ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
- if (ret != 0)
- goto teardown;
-
- ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- if (ret != 0) {
- ttm_bo_unreserve(&vps->cursor.bo->tbo);
- goto teardown;
- }
-
- dma_fence_wait(&fence->base, false);
- dma_fence_put(&fence->base);
-
- ttm_bo_unreserve(&vps->cursor.bo->tbo);
- return 0;
-
-teardown:
- vmw_du_destroy_cursor_mob(&vps->cursor.bo);
- return ret;
-}
-
-
-static void vmw_cursor_update_position(struct vmw_private *dev_priv,
- bool show, int x, int y)
-{
- const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
- : SVGA_CURSOR_ON_HIDE;
- uint32_t count;
-
- spin_lock(&dev_priv->cursor_lock);
- if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
- vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
- vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
- } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
- count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
- } else {
- vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
- vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
- vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
- }
- spin_unlock(&dev_priv->cursor_lock);
-}
-
-void vmw_kms_cursor_snoop(struct vmw_surface *srf,
- struct ttm_object_file *tfile,
- struct ttm_buffer_object *bo,
- SVGA3dCmdHeader *header)
-{
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_num;
- SVGA3dCopyBox *box;
- unsigned box_count;
- void *virtual;
- bool is_iomem;
- struct vmw_dma_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA dma;
- } *cmd;
- int i, ret;
- const struct SVGA3dSurfaceDesc *desc =
- vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
- const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
-
- cmd = container_of(header, struct vmw_dma_cmd, header);
-
- /* No snooper installed, nothing to copy */
- if (!srf->snooper.image)
- return;
-
- if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
- DRM_ERROR("face and mipmap for cursors should never != 0\n");
- return;
- }
-
- if (cmd->header.size < 64) {
- DRM_ERROR("at least one full copy box must be given\n");
- return;
- }
-
- box = (SVGA3dCopyBox *)&cmd[1];
- box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
- sizeof(SVGA3dCopyBox);
-
- if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
- box->x != 0 || box->y != 0 || box->z != 0 ||
- box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
- box->d != 1 || box_count != 1 ||
- box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
- /* TODO handle none page aligned offsets */
- /* TODO handle more dst & src != 0 */
- /* TODO handle more then one copy */
- DRM_ERROR("Can't snoop dma request for cursor!\n");
- DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
- box->srcx, box->srcy, box->srcz,
- box->x, box->y, box->z,
- box->w, box->h, box->d, box_count,
- cmd->dma.guest.ptr.offset);
- return;
- }
-
- kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
- kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
-
- ret = ttm_bo_reserve(bo, true, false, NULL);
- if (unlikely(ret != 0)) {
- DRM_ERROR("reserve failed\n");
- return;
- }
-
- ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0))
- goto err_unreserve;
-
- virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
-
- if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
- memcpy(srf->snooper.image, virtual,
- VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
- } else {
- /* Image is unsigned pointer. */
- for (i = 0; i < box->h; i++)
- memcpy(srf->snooper.image + i * image_pitch,
- virtual + i * cmd->dma.guest.pitch,
- box->w * desc->pitchBytesPerBlock);
- }
-
- srf->snooper.age++;
-
- ttm_bo_kunmap(&map);
-err_unreserve:
- ttm_bo_unreserve(bo);
-}
-
-/**
- * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
- *
- * @dev_priv: Pointer to the device private struct.
- *
- * Clears all legacy hotspots.
- */
-void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
-
- drm_modeset_lock_all(dev);
- drm_for_each_crtc(crtc, dev) {
- du = vmw_crtc_to_du(crtc);
-
- du->hotspot_x = 0;
- du->hotspot_y = 0;
- }
- drm_modeset_unlock_all(dev);
-}
-
-void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
-
- mutex_lock(&dev->mode_config.mutex);
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- du = vmw_crtc_to_du(crtc);
- if (!du->cursor_surface ||
- du->cursor_age == du->cursor_surface->snooper.age ||
- !du->cursor_surface->snooper.image)
- continue;
-
- du->cursor_age = du->cursor_surface->snooper.age;
- vmw_send_define_cursor_cmd(dev_priv,
- du->cursor_surface->snooper.image,
- VMW_CURSOR_SNOOP_WIDTH,
- VMW_CURSOR_SNOOP_HEIGHT,
- du->hotspot_x + du->core_hotspot_x,
- du->hotspot_y + du->core_hotspot_y);
- }
-
- mutex_unlock(&dev->mode_config.mutex);
-}
-
-
-void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
-{
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- u32 i;
-
- vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
-
- for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
- vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
-
- drm_plane_cleanup(plane);
-}
-
void vmw_du_primary_plane_destroy(struct drm_plane *plane)
{
@@ -575,262 +89,6 @@ vmw_du_plane_cleanup_fb(struct drm_plane *plane,
/**
- * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
- *
- * @vps: plane_state
- *
- * Returns 0 on success
- */
-
-static int
-vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
-{
- int ret;
- u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
- struct ttm_buffer_object *bo;
-
- if (!vps->cursor.bo)
- return -EINVAL;
-
- bo = &vps->cursor.bo->tbo;
-
- if (bo->base.size < size)
- return -EINVAL;
-
- if (vps->cursor.bo->map.virtual)
- return 0;
-
- ret = ttm_bo_reserve(bo, false, false, NULL);
- if (unlikely(ret != 0))
- return -ENOMEM;
-
- vmw_bo_map_and_cache(vps->cursor.bo);
-
- ttm_bo_unreserve(bo);
-
- if (unlikely(ret != 0))
- return -ENOMEM;
-
- return 0;
-}
-
-
-/**
- * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
- *
- * @vps: state of the cursor plane
- *
- * Returns 0 on success
- */
-
-static int
-vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
-{
- int ret = 0;
- struct vmw_bo *vbo = vps->cursor.bo;
-
- if (!vbo || !vbo->map.virtual)
- return 0;
-
- ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
- if (likely(ret == 0)) {
- vmw_bo_unmap(vbo);
- ttm_bo_unreserve(&vbo->tbo);
- }
-
- return ret;
-}
-
-
-/**
- * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
- *
- * @plane: cursor plane
- * @old_state: contains the state to clean up
- *
- * Unmaps all cursor bo mappings and unpins the cursor surface
- *
- * Returns 0 on success
- */
-void
-vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
-
- if (!vmw_user_object_is_null(&vps->uo))
- vmw_user_object_unmap(&vps->uo);
-
- vmw_du_cursor_plane_unmap_cm(vps);
- vmw_du_put_cursor_mob(vcp, vps);
-
- vmw_du_plane_unpin_surf(vps);
- vmw_user_object_unref(&vps->uo);
-}
-
-
-/**
- * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
- *
- * @plane: display plane
- * @new_state: info on the new plane state, including the FB
- *
- * Returns 0 on success
- */
-int
-vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_framebuffer *fb = new_state->fb;
- struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
- struct vmw_bo *bo = NULL;
- int ret = 0;
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- vmw_user_object_unmap(&vps->uo);
- vmw_user_object_unref(&vps->uo);
- }
-
- if (fb) {
- if (vmw_framebuffer_to_vfb(fb)->bo) {
- vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
- vps->uo.surface = NULL;
- } else {
- memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
- }
- vmw_user_object_ref(&vps->uo);
- }
-
- bo = vmw_user_object_buffer(&vps->uo);
- if (bo) {
- struct ttm_operation_ctx ctx = {false, false};
-
- ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
- if (ret != 0)
- return -ENOMEM;
-
- ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (ret != 0)
- return -ENOMEM;
-
- vmw_bo_pin_reserved(bo, true);
- if (vmw_framebuffer_to_vfb(fb)->bo) {
- const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
-
- (void)vmw_bo_map_and_cache_size(bo, size);
- } else {
- vmw_bo_map_and_cache(bo);
- }
- ttm_bo_unreserve(&bo->tbo);
- }
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- vmw_du_get_cursor_mob(vcp, vps);
- vmw_du_cursor_plane_map_cm(vps);
- }
-
- return 0;
-}
-
-
-void
-vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
- plane);
- struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
- struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
- struct vmw_bo *old_bo = NULL;
- struct vmw_bo *new_bo = NULL;
- struct ww_acquire_ctx ctx;
- s32 hotspot_x, hotspot_y;
- int ret;
-
- hotspot_x = du->hotspot_x + new_state->hotspot_x;
- hotspot_y = du->hotspot_y + new_state->hotspot_y;
-
- du->cursor_surface = vmw_user_object_surface(&vps->uo);
-
- if (vmw_user_object_is_null(&vps->uo)) {
- vmw_cursor_update_position(dev_priv, false, 0, 0);
- return;
- }
-
- vps->cursor.hotspot_x = hotspot_x;
- vps->cursor.hotspot_y = hotspot_y;
-
- if (du->cursor_surface)
- du->cursor_age = du->cursor_surface->snooper.age;
-
- ww_acquire_init(&ctx, &reservation_ww_class);
-
- if (!vmw_user_object_is_null(&old_vps->uo)) {
- old_bo = vmw_user_object_buffer(&old_vps->uo);
- ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
- if (ret != 0)
- return;
- }
-
- if (!vmw_user_object_is_null(&vps->uo)) {
- new_bo = vmw_user_object_buffer(&vps->uo);
- if (old_bo != new_bo) {
- ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
- if (ret != 0) {
- if (old_bo) {
- ttm_bo_unreserve(&old_bo->tbo);
- ww_acquire_fini(&ctx);
- }
- return;
- }
- } else {
- new_bo = NULL;
- }
- }
- if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
- /*
- * If it hasn't changed, avoid making the device do extra
- * work by keeping the old cursor active.
- */
- struct vmw_cursor_plane_state tmp = old_vps->cursor;
- old_vps->cursor = vps->cursor;
- vps->cursor = tmp;
- } else {
- void *image = vmw_du_cursor_plane_acquire_image(vps);
- if (image)
- vmw_cursor_update_image(dev_priv, vps, image,
- new_state->crtc_w,
- new_state->crtc_h,
- hotspot_x, hotspot_y);
- }
-
- if (new_bo)
- ttm_bo_unreserve(&new_bo->tbo);
- if (old_bo)
- ttm_bo_unreserve(&old_bo->tbo);
-
- ww_acquire_fini(&ctx);
-
- du->cursor_x = new_state->crtc_x + du->set_gui_x;
- du->cursor_y = new_state->crtc_y + du->set_gui_y;
-
- vmw_cursor_update_position(dev_priv, true,
- du->cursor_x + hotspot_x,
- du->cursor_y + hotspot_y);
-
- du->core_hotspot_x = hotspot_x - du->hotspot_x;
- du->core_hotspot_y = hotspot_y - du->hotspot_y;
-}
-
-
-/**
* vmw_du_primary_plane_atomic_check - check if the new state is okay
*
* @plane: display plane
@@ -873,66 +131,6 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
return ret;
}
-
-/**
- * vmw_du_cursor_plane_atomic_check - check if the new state is okay
- *
- * @plane: cursor plane
- * @state: info on the new plane state
- *
- * This is a chance to fail if the new cursor state does not fit
- * our requirements.
- *
- * Returns 0 on success
- */
-int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- int ret = 0;
- struct drm_crtc_state *crtc_state = NULL;
- struct vmw_surface *surface = NULL;
- struct drm_framebuffer *fb = new_state->fb;
-
- if (new_state->crtc)
- crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
- new_state->crtc);
-
- ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- true, true);
- if (ret)
- return ret;
-
- /* Turning off */
- if (!fb)
- return 0;
-
- /* A lot of the code assumes this */
- if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
- DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
- new_state->crtc_w, new_state->crtc_h);
- return -EINVAL;
- }
-
- if (!vmw_framebuffer_to_vfb(fb)->bo) {
- surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo);
-
- WARN_ON(!surface);
-
- if (!surface ||
- (!surface->snooper.image && !surface->res.guest_memory_bo)) {
- DRM_ERROR("surface not suitable for cursor\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1076,7 +274,7 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
vps->pinned = 0;
vps->cpp = 0;
- memset(&vps->cursor, 0, sizeof(vps->cursor));
+ vps->cursor.mob = NULL;
/* Each ref counted resource needs to be acquired again */
vmw_user_object_ref(&vps->uo);
@@ -1221,7 +419,20 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
+ struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
+ struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
+ if (bo) {
+ vmw_bo_dirty_release(bo);
+ /*
+ * bo->dirty is reference counted so it being NULL
+ * means that the surface wasn't coherent to begin
+ * with and so we have to free the dirty tracker
+ * in the vmw_resource
+ */
+ if (!bo->dirty && surf && surf->res.dirty)
+ surf->res.func->dirty_free(&surf->res);
+ }
drm_framebuffer_cleanup(framebuffer);
vmw_user_object_unref(&vfbs->uo);
@@ -1375,6 +586,7 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
+ vmw_bo_dirty_release(vfbd->buffer);
drm_framebuffer_cleanup(framebuffer);
vmw_bo_unreference(&vfbd->buffer);
@@ -1505,6 +717,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_framebuffer *vfb = NULL;
struct vmw_user_object uo = {0};
+ struct vmw_bo *bo;
+ struct vmw_surface *surface;
int ret;
/* returns either a bo or surface */
@@ -1534,6 +748,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
}
err_out:
+ bo = vmw_user_object_buffer(&uo);
+ surface = vmw_user_object_surface(&uo);
/* vmw_user_object_lookup takes one ref so does new_fb */
vmw_user_object_unref(&uo);
@@ -1542,6 +758,14 @@ err_out:
return ERR_PTR(ret);
}
+ ttm_bo_reserve(&bo->tbo, false, false, NULL);
+ ret = vmw_bo_dirty_add(bo);
+ if (!ret && surface && surface->res.func->dirty_alloc) {
+ surface->res.coherent = true;
+ ret = surface->res.func->dirty_alloc(&surface->res);
+ }
+ ttm_bo_unreserve(&bo->tbo);
+
return &vfb->base;
}
@@ -1974,44 +1198,6 @@ int vmw_kms_close(struct vmw_private *dev_priv)
return ret;
}
-int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vmw_cursor_bypass_arg *arg = data;
- struct vmw_display_unit *du;
- struct drm_crtc *crtc;
- int ret = 0;
-
- mutex_lock(&dev->mode_config.mutex);
- if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- du = vmw_crtc_to_du(crtc);
- du->hotspot_x = arg->xhot;
- du->hotspot_y = arg->yhot;
- }
-
- mutex_unlock(&dev->mode_config.mutex);
- return 0;
- }
-
- crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
- if (!crtc) {
- ret = -ENOENT;
- goto out;
- }
-
- du = vmw_crtc_to_du(crtc);
-
- du->hotspot_x = arg->xhot;
- du->hotspot_y = arg->yhot;
-
-out:
- mutex_unlock(&dev->mode_config.mutex);
-
- return ret;
-}
-
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 4eab581883e2..511e29cdb987 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,40 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#ifndef VMWGFX_KMS_H_
#define VMWGFX_KMS_H_
+#include "vmwgfx_cursor_plane.h"
+#include "vmwgfx_drv.h"
+
#include <drm/drm_encoder.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
-#include "vmwgfx_drv.h"
-
/**
* struct vmw_du_update_plane - Closure structure for vmw_du_helper_plane_update
* @plane: Plane which is being updated.
@@ -235,16 +216,11 @@ static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
DRM_FORMAT_XRGB1555,
};
-static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
- DRM_FORMAT_ARGB8888,
-};
-
#define vmw_crtc_state_to_vcs(x) container_of(x, struct vmw_crtc_state, base)
#define vmw_plane_state_to_vps(x) container_of(x, struct vmw_plane_state, base)
#define vmw_connector_state_to_vcs(x) \
container_of(x, struct vmw_connector_state, base)
-#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base)
/**
* Derived class for crtc state object
@@ -255,11 +231,6 @@ struct vmw_crtc_state {
struct drm_crtc_state base;
};
-struct vmw_cursor_plane_state {
- struct vmw_bo *bo;
- s32 hotspot_x;
- s32 hotspot_y;
-};
/**
* Derived class for plane state object
@@ -283,7 +254,6 @@ struct vmw_plane_state {
/* For CPU Blit */
unsigned int cpp;
- bool surf_mapped;
struct vmw_cursor_plane_state cursor;
};
@@ -317,17 +287,6 @@ struct vmw_connector_state {
int gui_y;
};
-/**
- * Derived class for cursor plane object
- *
- * @base DRM plane object
- * @cursor.cursor_mobs Cursor mobs available for re-use
- */
-struct vmw_cursor_plane {
- struct drm_plane base;
-
- struct vmw_bo *cursor_mobs[3];
-};
/**
* Base class display unit.
@@ -343,17 +302,6 @@ struct vmw_display_unit {
struct drm_plane primary;
struct vmw_cursor_plane cursor;
- struct vmw_surface *cursor_surface;
- size_t cursor_age;
-
- int cursor_x;
- int cursor_y;
-
- int hotspot_x;
- int hotspot_y;
- s32 core_hotspot_x;
- s32 core_hotspot_y;
-
unsigned unit;
/*
@@ -403,8 +351,6 @@ struct vmw_display_unit {
*/
void vmw_du_init(struct vmw_display_unit *du);
void vmw_du_cleanup(struct vmw_display_unit *du);
-void vmw_du_crtc_save(struct drm_crtc *crtc);
-void vmw_du_crtc_restore(struct drm_crtc *crtc);
int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
uint32_t size,
@@ -460,19 +406,10 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
/* Universal Plane Helpers */
void vmw_du_primary_plane_destroy(struct drm_plane *plane);
-void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
/* Atomic Helpers */
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state);
-int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state);
-void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state);
-int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state);
-void vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state);
void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
void vmw_du_plane_reset(struct drm_plane *plane);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index f0b429525467..c23c9195f0dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -372,7 +372,7 @@ static const struct drm_plane_funcs vmw_ldu_plane_funcs = {
static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -383,10 +383,10 @@ static const struct drm_plane_funcs vmw_ldu_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_ldu_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 7055cbefc768..d8204d4265d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -282,8 +282,7 @@ out_no_setup:
}
vmw_bo_unpin_unlocked(&batch->otable_bo->tbo);
- ttm_bo_put(&batch->otable_bo->tbo);
- batch->otable_bo = NULL;
+ vmw_bo_unreference(&batch->otable_bo);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 74ff2812d66a..7de20e56082c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -1,27 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2019-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2019-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
#include "vmwgfx_bo.h"
@@ -71,6 +52,11 @@ struct vmw_bo_dirty {
unsigned long bitmap[];
};
+bool vmw_bo_is_dirty(struct vmw_bo *vbo)
+{
+ return vbo->dirty && (vbo->dirty->start < vbo->dirty->end);
+}
+
/**
* vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
* @vbo: The buffer object to scan
@@ -341,6 +327,41 @@ void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
dirty->end = res_start;
}
+void vmw_bo_dirty_clear(struct vmw_bo *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ pgoff_t start, cur, end;
+ unsigned long res_start = 0;
+ unsigned long res_end = vbo->tbo.base.size;
+
+ WARN_ON_ONCE(res_start & ~PAGE_MASK);
+ res_start >>= PAGE_SHIFT;
+ res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
+
+ if (res_start >= dirty->end || res_end <= dirty->start)
+ return;
+
+ cur = max(res_start, dirty->start);
+ res_end = max(res_end, dirty->end);
+ while (cur < res_end) {
+ unsigned long num;
+
+ start = find_next_bit(&dirty->bitmap[0], res_end, cur);
+ if (start >= res_end)
+ break;
+
+ end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
+ cur = end + 1;
+ num = end - start;
+ bitmap_clear(&dirty->bitmap[0], start, num);
+ }
+
+ if (res_start <= dirty->start && res_end > dirty->start)
+ dirty->start = res_end;
+ if (res_start < dirty->end && res_end >= dirty->end)
+ dirty->end = res_start;
+}
+
/**
* vmw_bo_dirty_clear_res - Clear a resource's dirty region from
* its backing mob.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a73af8a355fb..388011696941 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -273,7 +273,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
goto out_bad_resource;
res = converter->base_obj_to_res(base);
- kref_get(&res->kref);
+ vmw_resource_reference(res);
*p_res = res;
ret = 0;
@@ -347,7 +347,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
return 0;
}
- ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
+ ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo);
if (unlikely(ret != 0))
goto out_no_bo;
@@ -531,9 +531,9 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
}
INIT_LIST_HEAD(&val_list);
- ttm_bo_get(&res->guest_memory_bo->tbo);
val_buf->bo = &res->guest_memory_bo->tbo;
val_buf->num_shared = 0;
+ drm_gem_object_get(&val_buf->bo->base);
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
@@ -557,7 +557,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
out_no_validate:
ttm_eu_backoff_reservation(ticket, &val_list);
out_no_reserve:
- ttm_bo_put(val_buf->bo);
+ drm_gem_object_put(&val_buf->bo->base);
val_buf->bo = NULL;
if (guest_memory_dirty)
vmw_user_bo_unref(&res->guest_memory_bo);
@@ -619,7 +619,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
list_add_tail(&val_buf->head, &val_list);
ttm_eu_backoff_reservation(ticket, &val_list);
- ttm_bo_put(val_buf->bo);
+ drm_gem_object_put(&val_buf->bo->base);
val_buf->bo = NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 32029d80b72b..5f5f5a94301f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -445,7 +445,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_gem_object_create(dev_priv, &bo_params, &vps->uo.buffer);
+ ret = vmw_bo_create(dev_priv, &bo_params, &vps->uo.buffer);
vmw_overlay_resume_all(dev_priv);
if (ret)
return ret;
@@ -764,7 +764,7 @@ static const struct drm_plane_funcs vmw_sou_plane_funcs = {
static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -775,10 +775,10 @@ static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index f5d2ed1b0a72..20aab725e53a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1482,7 +1482,7 @@ static const struct drm_plane_funcs vmw_stdu_plane_funcs = {
static const struct drm_plane_funcs vmw_stdu_cursor_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = vmw_du_cursor_plane_destroy,
+ .destroy = vmw_cursor_plane_destroy,
.reset = vmw_du_plane_reset,
.atomic_duplicate_state = vmw_du_plane_duplicate_state,
.atomic_destroy_state = vmw_du_plane_destroy_state,
@@ -1494,10 +1494,10 @@ static const struct drm_plane_funcs vmw_stdu_cursor_funcs = {
*/
static const struct
drm_plane_helper_funcs vmw_stdu_cursor_plane_helper_funcs = {
- .atomic_check = vmw_du_cursor_plane_atomic_check,
- .atomic_update = vmw_du_cursor_plane_atomic_update,
- .prepare_fb = vmw_du_cursor_plane_prepare_fb,
- .cleanup_fb = vmw_du_cursor_plane_cleanup_fb,
+ .atomic_check = vmw_cursor_plane_atomic_check,
+ .atomic_update = vmw_cursor_plane_atomic_update,
+ .prepare_fb = vmw_cursor_plane_prepare_fb,
+ .cleanup_fb = vmw_cursor_plane_cleanup_fb,
};
static const struct
@@ -1584,6 +1584,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_plane_helper_add(&cursor->base, &vmw_stdu_cursor_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(&cursor->base);
ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 5721c74da3e0..7e281c3c6bc5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,32 +1,13 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
**************************************************************************/
#include "vmwgfx_bo.h"
+#include "vmwgfx_cursor_plane.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
@@ -658,7 +639,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf);
- WARN_ON_ONCE(res->dirty);
+ WARN_ON(res->dirty);
if (user_srf->master)
drm_master_put(&user_srf->master);
kfree(srf->offsets);
@@ -689,8 +670,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
* Dumb buffers own the resource and they'll unref the
* resource themselves
*/
- if (res && res->guest_memory_bo && res->guest_memory_bo->is_dumb)
- return;
+ WARN_ON(res && res->guest_memory_bo && res->guest_memory_bo->is_dumb);
vmw_resource_unreference(&res);
}
@@ -818,25 +798,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
}
res->guest_memory_size = cur_bo_offset;
- if (!file_priv->atomic &&
- metadata->scanout &&
- metadata->num_sizes == 1 &&
- metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
- metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
- metadata->format == VMW_CURSOR_SNOOP_FORMAT) {
- const struct SVGA3dSurfaceDesc *desc =
- vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
- const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH *
- VMW_CURSOR_SNOOP_HEIGHT *
- desc->pitchBytesPerBlock;
- srf->snooper.image = kzalloc(cursor_size_bytes, GFP_KERNEL);
- if (!srf->snooper.image) {
- DRM_ERROR("Failed to allocate cursor_image\n");
- ret = -ENOMEM;
- goto out_no_copy;
- }
- } else {
- srf->snooper.image = NULL;
+
+ srf->snooper.image = vmw_cursor_snooper_create(file_priv, metadata);
+ if (IS_ERR(srf->snooper.image)) {
+ ret = PTR_ERR(srf->snooper.image);
+ goto out_no_copy;
}
if (drm_is_primary_client(file_priv))
@@ -864,14 +830,17 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
.pin = false
};
- ret = vmw_gem_object_create(dev_priv,
- &params,
- &res->guest_memory_bo);
+ ret = vmw_bo_create(dev_priv, &params, &res->guest_memory_bo);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
- vmw_bo_add_detached_resource(res->guest_memory_bo, res);
}
tmp = vmw_resource_reference(&srf->res);
@@ -1670,6 +1639,14 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
+ if (res->guest_memory_bo) {
+ ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+ }
+
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
VMW_RES_SURFACE,
@@ -1684,7 +1661,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
rep->handle = user_srf->prime.base.handle;
rep->backup_size = res->guest_memory_size;
if (res->guest_memory_bo) {
- vmw_bo_add_detached_resource(res->guest_memory_bo, res);
rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node);
rep->buffer_size = res->guest_memory_bo->tbo.base.size;
@@ -2358,12 +2334,19 @@ int vmw_dumb_create(struct drm_file *file_priv,
vbo = res->guest_memory_bo;
vbo->is_dumb = true;
vbo->dumb_surface = vmw_res_to_srf(res);
-
+ drm_gem_object_put(&vbo->tbo.base);
+ /*
+ * Unset the user surface dtor since this in not actually exposed
+ * to userspace. The suface is owned via the dumb_buffer's GEM handle
+ */
+ struct vmw_user_surface *usurf = container_of(vbo->dumb_surface,
+ struct vmw_user_surface, srf);
+ usurf->prime.base.refcount_release = NULL;
err:
if (res)
vmw_resource_unreference(&res);
- if (ret)
- ttm_ref_object_base_unref(tfile, arg.rep.handle);
+
+ ttm_ref_object_base_unref(tfile, arg.rep.handle);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index e7625b3f71e0..7ee93e7191c7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -262,9 +262,8 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
bo_node->hash.key);
}
val_buf = &bo_node->base;
- val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo);
- if (!val_buf->bo)
- return -ESRCH;
+ vmw_bo_reference(vbo);
+ val_buf->bo = &vbo->tbo;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &ctx->bo_list);
}
@@ -656,7 +655,7 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
struct vmw_validation_res_node *val;
list_for_each_entry(entry, &ctx->bo_list, base.head) {
- ttm_bo_put(entry->base.bo);
+ drm_gem_object_put(&entry->base.bo->base);
entry->base.bo = NULL;
}
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 5c2f459a2925..fcc2677a4229 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_XE
tristate "Intel Xe Graphics"
- depends on DRM && PCI && MMU && (m || (y && KUNIT=y))
+ depends on DRM && PCI && (m || (y && KUNIT=y))
+ depends on INTEL_VSEC || !INTEL_VSEC
+ depends on X86_PLATFORM_DEVICES || !(X86 && ACPI)
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
@@ -27,7 +29,6 @@ config DRM_XE
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
select ACPI_VIDEO if X86 && ACPI
- select X86_PLATFORM_DEVICES if X86 && ACPI
select ACPI_WMI if X86 && ACPI
select SYNC_FILE
select IOSF_MBI
@@ -39,7 +40,6 @@ config DRM_XE
select DRM_TTM_HELPER
select DRM_EXEC
select DRM_GPUVM
- select DRM_GPUSVM if !UML && DEVICE_PRIVATE
select DRM_SCHED
select MMU_NOTIFIER
select WANT_DEV_COREDUMP
@@ -74,9 +74,22 @@ config DRM_XE_DP_TUNNEL
If in doubt say "Y".
+config DRM_XE_GPUSVM
+ bool "Enable CPU to GPU address mirroring"
+ depends on DRM_XE
+ depends on !UML
+ depends on DEVICE_PRIVATE
+ default y
+ select DRM_GPUSVM
+ help
+ Enable this option if you want support for CPU to GPU address
+ mirroring.
+
+ If in doubut say "Y".
+
config DRM_XE_DEVMEM_MIRROR
bool "Enable device memory mirror"
- depends on DRM_XE
+ depends on DRM_XE_GPUSVM
select GET_FREE_REGION
default y
help
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 9699b08585f7..e4bf484d4121 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -125,12 +125,13 @@ xe-y += xe_bb.o \
xe_wopcm.o
xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o
-xe-$(CONFIG_DRM_GPUSVM) += xe_svm.o
+xe-$(CONFIG_DRM_XE_GPUSVM) += xe_svm.o
# graphics hardware monitoring (HWMON) support
xe-$(CONFIG_HWMON) += xe_hwmon.o
xe-$(CONFIG_PERF_EVENTS) += xe_pmu.o
+xe-$(CONFIG_CONFIGFS_FS) += xe_configfs.o
# graphics virtualization (SR-IOV) support
xe-y += \
@@ -185,7 +186,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/intel_fbdev_fb.o \
display/xe_display.o \
display/xe_display_misc.o \
- display/xe_display_rps.o \
+ display/xe_display_rpm.o \
display/xe_display_wa.o \
display/xe_dsb_buffer.o \
display/xe_fb_pin.o \
@@ -196,7 +197,6 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
# SOC code shared with i915
xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-soc/intel_dram.o \
- i915-soc/intel_pch.o \
i915-soc/intel_rom.o
# Display code shared with i915
@@ -271,6 +271,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_panel.o \
i915-display/intel_pfit.o \
i915-display/intel_pmdemand.o \
+ i915-display/intel_pch.o \
i915-display/intel_pps.o \
i915-display/intel_psr.o \
i915-display/intel_qp_tables.o \
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index ec516e838ee8..448afb86e05c 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -141,6 +141,7 @@ enum xe_guc_action {
XE_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
XE_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER = 0x550C,
+ XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER = 0x550D,
XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR = 0x6000,
XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC = 0x6002,
XE_GUC_ACTION_PAGE_FAULT_RES_DESC = 0x6003,
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index d633f1c739e4..7de8f827281f 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -367,6 +367,7 @@ enum xe_guc_klv_ids {
GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008,
GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET = 0x9009,
GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO = 0x900a,
+ GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH = 0x900b,
};
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h b/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h
deleted file mode 100644
index 21fec9cc837c..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __INTEL_RPS_H__
-#define __INTEL_RPS_H__
-
-#define gen5_rps_irq_handler(x) ({})
-
-#endif /* __INTEL_RPS_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index dfec5108d2c3..9b7572e06f34 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -13,7 +13,6 @@
#include <drm/drm_drv.h>
#include "i915_utils.h"
-#include "intel_runtime_pm.h"
#include "xe_device.h" /* for xe_device_has_flat_ccs() */
#include "xe_device_types.h"
@@ -22,28 +21,12 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
return container_of(dev, struct drm_i915_private, drm);
}
+/* compat platform checks only for soc/ usage */
#define IS_PLATFORM(xe, x) ((xe)->info.platform == x)
-#define INTEL_INFO(dev_priv) (&((dev_priv)->info))
-#define IS_I830(dev_priv) (dev_priv && 0)
-#define IS_I845G(dev_priv) (dev_priv && 0)
-#define IS_I85X(dev_priv) (dev_priv && 0)
-#define IS_I865G(dev_priv) (dev_priv && 0)
#define IS_I915G(dev_priv) (dev_priv && 0)
#define IS_I915GM(dev_priv) (dev_priv && 0)
-#define IS_I945G(dev_priv) (dev_priv && 0)
-#define IS_I945GM(dev_priv) (dev_priv && 0)
-#define IS_I965G(dev_priv) (dev_priv && 0)
-#define IS_I965GM(dev_priv) (dev_priv && 0)
-#define IS_G45(dev_priv) (dev_priv && 0)
-#define IS_GM45(dev_priv) (dev_priv && 0)
-#define IS_G4X(dev_priv) (dev_priv && 0)
#define IS_PINEVIEW(dev_priv) (dev_priv && 0)
-#define IS_G33(dev_priv) (dev_priv && 0)
-#define IS_IRONLAKE(dev_priv) (dev_priv && 0)
-#define IS_IRONLAKE_M(dev_priv) (dev_priv && 0)
-#define IS_SANDYBRIDGE(dev_priv) (dev_priv && 0)
#define IS_IVYBRIDGE(dev_priv) (dev_priv && 0)
-#define IS_IVB_GT1(dev_priv) (dev_priv && 0)
#define IS_VALLEYVIEW(dev_priv) (dev_priv && 0)
#define IS_CHERRYVIEW(dev_priv) (dev_priv && 0)
#define IS_HASWELL(dev_priv) (dev_priv && 0)
@@ -71,39 +54,10 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
#define IS_HASWELL_ULT(dev_priv) (dev_priv && 0)
#define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0)
-#define IS_BROADWELL_ULX(dev_priv) (dev_priv && 0)
#define IS_MOBILE(xe) (xe && 0)
-#define IS_TIGERLAKE_UY(xe) (xe && 0)
-#define IS_COMETLAKE_ULX(xe) (xe && 0)
-#define IS_COFFEELAKE_ULX(xe) (xe && 0)
-#define IS_KABYLAKE_ULX(xe) (xe && 0)
-#define IS_SKYLAKE_ULX(xe) (xe && 0)
-#define IS_HASWELL_ULX(xe) (xe && 0)
-#define IS_COMETLAKE_ULT(xe) (xe && 0)
-#define IS_COFFEELAKE_ULT(xe) (xe && 0)
-#define IS_KABYLAKE_ULT(xe) (xe && 0)
-#define IS_SKYLAKE_ULT(xe) (xe && 0)
-
-#define IS_DG2_G10(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G10)
-#define IS_DG2_G11(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G11)
-#define IS_DG2_G12(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G12)
-#define IS_RAPTORLAKE_U(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_ALDERLAKE_P_RPLU)
-#define IS_ICL_WITH_PORT_F(xe) (xe && 0)
#define HAS_FLAT_CCS(xe) (xe_device_has_flat_ccs(xe))
-
#define HAS_128_BYTE_Y_TILING(xe) (xe || 1)
-#ifdef CONFIG_ARM64
-/*
- * arm64 indirectly includes linux/rtc.h,
- * which defines a irq_lock, so include it
- * here before #define-ing it
- */
-#include <linux/rtc.h>
-#endif
-
-#define irq_lock irq.lock
-
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
deleted file mode 100644
index 274042bff1be..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __INTEL_RUNTIME_PM_H__
-#define __INTEL_RUNTIME_PM_H__
-
-#include "intel_wakeref.h"
-#include "xe_device_types.h"
-#include "xe_pm.h"
-
-#define intel_runtime_pm xe_runtime_pm
-
-static inline void disable_rpm_wakeref_asserts(void *rpm)
-{
-}
-
-static inline void enable_rpm_wakeref_asserts(void *rpm)
-{
-}
-
-static inline bool
-intel_runtime_pm_suspended(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return pm_runtime_suspended(xe->drm.dev);
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return xe_pm_runtime_resume_and_get(xe) ? INTEL_WAKEREF_DEF : NULL;
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return xe_pm_runtime_get_if_in_use(xe) ? INTEL_WAKEREF_DEF : NULL;
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- xe_pm_runtime_get_noresume(xe);
-
- return INTEL_WAKEREF_DEF;
-}
-
-static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- xe_pm_runtime_put(xe);
-}
-
-static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, intel_wakeref_t wakeref)
-{
- if (wakeref)
- intel_runtime_pm_put_unchecked(pm);
-}
-
-#define intel_runtime_pm_get_raw intel_runtime_pm_get
-#define intel_runtime_pm_put_raw intel_runtime_pm_put
-#define assert_rpm_wakelock_held(x) do { } while (0)
-#define assert_rpm_raw_wakeref_held(x) do { } while (0)
-
-#define with_intel_runtime_pm(rpm, wf) \
- for ((wf) = intel_runtime_pm_get(rpm); (wf); \
- intel_runtime_pm_put((rpm), (wf)), (wf) = NULL)
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h
deleted file mode 100644
index 9c46556d33a4..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "../../../i915/soc/intel_pch.h"
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index 3a1e505ff182..e8191562d122 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -45,7 +45,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_GGTT | XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_GGTT);
if (!IS_ERR(obj))
drm_info(&xe->drm, "Allocated fbdev into stolen\n");
else
@@ -56,7 +56,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_GGTT | XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_GGTT);
}
if (IS_ERR(obj)) {
@@ -79,11 +79,11 @@ err:
return ERR_CAST(fb);
}
-int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
- struct drm_gem_object *_obj, struct i915_vma *vma)
+int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
+ struct drm_gem_object *_obj, struct i915_vma *vma)
{
struct xe_bo *obj = gem_to_xe_bo(_obj);
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
if (!(obj->flags & XE_BO_FLAG_SYSTEM)) {
if (obj->flags & XE_BO_FLAG_STOLEN)
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 0b0aca7a25af..68f064f33d4b 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -133,9 +133,6 @@ int xe_display_init_early(struct xe_device *xe)
/* Fake uncore lock */
spin_lock_init(&xe->uncore.lock);
- /* This must be called before any calls to HAS_PCH_* */
- intel_detect_pch(xe);
-
intel_display_driver_early_probe(display);
/* Early display init.. */
@@ -147,7 +144,7 @@ int xe_display_init_early(struct xe_device *xe)
*/
intel_dram_detect(xe);
- intel_bw_init_hw(xe);
+ intel_bw_init_hw(display);
intel_display_device_info_runtime_init(display);
@@ -173,7 +170,7 @@ static void xe_display_fini(void *arg)
struct xe_device *xe = arg;
struct intel_display *display = &xe->display;
- intel_hpd_poll_fini(xe);
+ intel_hpd_poll_fini(display);
intel_hdcp_component_fini(display);
intel_audio_deinit(display);
intel_display_driver_remove(display);
@@ -220,11 +217,13 @@ void xe_display_unregister(struct xe_device *xe)
void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
if (master_ctl & DISPLAY_IRQ)
- gen11_display_irq_handler(xe);
+ gen11_display_irq_handler(display);
}
void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
@@ -240,19 +239,23 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
void xe_display_irq_reset(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- gen11_display_irq_reset(xe);
+ gen11_display_irq_reset(display);
}
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
if (gt->info.id == XE_GT0)
- gen11_de_irq_postinstall(xe);
+ gen11_de_irq_postinstall(display);
}
static bool suspend_to_idle(void)
@@ -305,7 +308,7 @@ static void xe_display_enable_d3cold(struct xe_device *xe)
intel_dmc_suspend(display);
if (has_display(xe))
- intel_hpd_poll_enable(xe);
+ intel_hpd_poll_enable(display);
}
static void xe_display_disable_d3cold(struct xe_device *xe)
@@ -322,10 +325,10 @@ static void xe_display_disable_d3cold(struct xe_device *xe)
intel_display_driver_init_hw(display);
- intel_hpd_init(xe);
+ intel_hpd_init(display);
if (has_display(xe))
- intel_hpd_poll_disable(xe);
+ intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -355,7 +358,7 @@ void xe_display_pm_suspend(struct xe_device *xe)
xe_display_flush_cleanup_work(xe);
- intel_hpd_cancel_work(xe);
+ intel_hpd_cancel_work(display);
if (has_display(xe)) {
intel_display_driver_suspend_access(display);
@@ -385,7 +388,7 @@ void xe_display_pm_shutdown(struct xe_device *xe)
xe_display_flush_cleanup_work(xe);
intel_dp_mst_suspend(display);
- intel_hpd_cancel_work(xe);
+ intel_hpd_cancel_work(display);
if (has_display(xe))
intel_display_driver_suspend_access(display);
@@ -400,6 +403,8 @@ void xe_display_pm_shutdown(struct xe_device *xe)
void xe_display_pm_runtime_suspend(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
@@ -408,7 +413,7 @@ void xe_display_pm_runtime_suspend(struct xe_device *xe)
return;
}
- intel_hpd_poll_enable(xe);
+ intel_hpd_poll_enable(display);
}
void xe_display_pm_suspend_late(struct xe_device *xe)
@@ -482,7 +487,7 @@ void xe_display_pm_resume(struct xe_device *xe)
if (has_display(xe))
intel_display_driver_resume_access(display);
- intel_hpd_init(xe);
+ intel_hpd_init(display);
if (has_display(xe)) {
intel_display_driver_resume(display);
@@ -491,7 +496,7 @@ void xe_display_pm_resume(struct xe_device *xe)
}
if (has_display(xe))
- intel_hpd_poll_disable(xe);
+ intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -502,6 +507,8 @@ void xe_display_pm_resume(struct xe_device *xe)
void xe_display_pm_runtime_resume(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
@@ -510,9 +517,9 @@ void xe_display_pm_runtime_resume(struct xe_device *xe)
return;
}
- intel_hpd_init(xe);
- intel_hpd_poll_disable(xe);
- skl_watermark_ipc_update(xe);
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
+ skl_watermark_ipc_update(display);
}
diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.c b/drivers/gpu/drm/xe/display/xe_display_rpm.c
new file mode 100644
index 000000000000..1955153aadba
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_rpm.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include "intel_display_rpm.h"
+#include "xe_device_types.h"
+#include "xe_pm.h"
+
+static struct xe_device *display_to_xe(struct intel_display *display)
+{
+ return container_of(display, struct xe_device, display);
+}
+
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
+{
+ return intel_display_rpm_get(display);
+}
+
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ intel_display_rpm_put(display, wakeref);
+}
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
+{
+ return xe_pm_runtime_resume_and_get(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
+}
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
+{
+ return xe_pm_runtime_get_if_in_use(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
+}
+
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
+{
+ xe_pm_runtime_get_noresume(display_to_xe(display));
+
+ return INTEL_WAKEREF_DEF;
+}
+
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ if (wakeref)
+ xe_pm_runtime_put(display_to_xe(display));
+}
+
+void intel_display_rpm_put_unchecked(struct intel_display *display)
+{
+ xe_pm_runtime_put(display_to_xe(display));
+}
+
+bool intel_display_rpm_suspended(struct intel_display *display)
+{
+ struct xe_device *xe = display_to_xe(display);
+
+ return pm_runtime_suspended(xe->drm.dev);
+}
+
+void assert_display_rpm_held(struct intel_display *display)
+{
+ /* FIXME */
+}
+
+void intel_display_rpm_assert_block(struct intel_display *display)
+{
+ /* FIXME */
+}
+
+void intel_display_rpm_assert_unblock(struct intel_display *display)
+{
+ /* FIXME */
+}
diff --git a/drivers/gpu/drm/xe/display/xe_display_rps.c b/drivers/gpu/drm/xe/display/xe_display_rps.c
deleted file mode 100644
index fa616f9688a5..000000000000
--- a/drivers/gpu/drm/xe/display/xe_display_rps.c
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "intel_display_rps.h"
-
-void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
- struct dma_fence *fence)
-{
-}
-
-void intel_display_rps_mark_interactive(struct intel_display *display,
- struct intel_atomic_state *state,
- bool interactive)
-{
-}
diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c
index 68e3d1959ad6..2933ca97d673 100644
--- a/drivers/gpu/drm/xe/display/xe_display_wa.c
+++ b/drivers/gpu/drm/xe/display/xe_display_wa.c
@@ -10,7 +10,9 @@
#include <generated/xe_wa_oob.h>
-bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915)
+bool intel_display_needs_wa_16023588340(struct intel_display *display)
{
- return XE_WA(xe_root_mmio_gt(i915), 16023588340);
+ struct xe_device *xe = to_xe_device(display->drm);
+
+ return XE_WA(xe_root_mmio_gt(xe), 16023588340);
}
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index 7c02323e9531..b35a6f201d4a 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -9,7 +9,6 @@
#include "abi/gsc_command_header_abi.h"
#include "intel_hdcp_gsc.h"
-#include "intel_hdcp_gsc_message.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_device_types.h"
@@ -22,7 +21,8 @@
#define HECI_MEADDRESS_HDCP 18
-struct intel_hdcp_gsc_message {
+struct intel_hdcp_gsc_context {
+ struct xe_device *xe;
struct xe_bo *hdcp_bo;
u64 hdcp_cmd_in;
u64 hdcp_cmd_out;
@@ -30,14 +30,9 @@ struct intel_hdcp_gsc_message {
#define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header)
-bool intel_hdcp_gsc_cs_required(struct intel_display *display)
+bool intel_hdcp_gsc_check_status(struct drm_device *drm)
{
- return DISPLAY_VER(display) >= 14;
-}
-
-bool intel_hdcp_gsc_check_status(struct intel_display *display)
-{
- struct xe_device *xe = to_xe_device(display->drm);
+ struct xe_device *xe = to_xe_device(drm);
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_gt *gt = tile->media_gt;
struct xe_gsc *gsc = &gt->uc.gsc;
@@ -69,10 +64,9 @@ out:
}
/*This function helps allocate memory for the command that we will send to gsc cs */
-static int intel_hdcp_gsc_initialize_message(struct intel_display *display,
- struct intel_hdcp_gsc_message *hdcp_message)
+static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
+ struct intel_hdcp_gsc_context *gsc_context)
{
- struct xe_device *xe = to_xe_device(display->drm);
struct xe_bo *bo = NULL;
u64 cmd_in, cmd_out;
int ret = 0;
@@ -84,7 +78,7 @@ static int intel_hdcp_gsc_initialize_message(struct intel_display *display,
XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) {
- drm_err(display->drm, "Failed to allocate bo for HDCP streaming command!\n");
+ drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
ret = PTR_ERR(bo);
goto out;
}
@@ -93,104 +87,60 @@ static int intel_hdcp_gsc_initialize_message(struct intel_display *display,
cmd_out = cmd_in + PAGE_SIZE;
xe_map_memset(xe, &bo->vmap, 0, 0, bo->size);
- hdcp_message->hdcp_bo = bo;
- hdcp_message->hdcp_cmd_in = cmd_in;
- hdcp_message->hdcp_cmd_out = cmd_out;
+ gsc_context->hdcp_bo = bo;
+ gsc_context->hdcp_cmd_in = cmd_in;
+ gsc_context->hdcp_cmd_out = cmd_out;
+ gsc_context->xe = xe;
+
out:
return ret;
}
-static int intel_hdcp_gsc_hdcp2_init(struct intel_display *display)
+struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm)
{
- struct intel_hdcp_gsc_message *hdcp_message;
+ struct xe_device *xe = to_xe_device(drm);
+ struct intel_hdcp_gsc_context *gsc_context;
int ret;
- hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
-
- if (!hdcp_message)
- return -ENOMEM;
+ gsc_context = kzalloc(sizeof(*gsc_context), GFP_KERNEL);
+ if (!gsc_context)
+ return ERR_PTR(-ENOMEM);
/*
* NOTE: No need to lock the comp mutex here as it is already
* going to be taken before this function called
*/
- ret = intel_hdcp_gsc_initialize_message(display, hdcp_message);
+ ret = intel_hdcp_gsc_initialize_message(xe, gsc_context);
if (ret) {
- drm_err(display->drm, "Could not initialize hdcp_message\n");
- kfree(hdcp_message);
- return ret;
+ drm_err(&xe->drm, "Could not initialize gsc_context\n");
+ kfree(gsc_context);
+ gsc_context = ERR_PTR(ret);
}
- display->hdcp.hdcp_message = hdcp_message;
- return ret;
-}
-
-static const struct i915_hdcp_ops gsc_hdcp_ops = {
- .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
- .verify_receiver_cert_prepare_km =
- intel_hdcp_gsc_verify_receiver_cert_prepare_km,
- .verify_hprime = intel_hdcp_gsc_verify_hprime,
- .store_pairing_info = intel_hdcp_gsc_store_pairing_info,
- .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
- .verify_lprime = intel_hdcp_gsc_verify_lprime,
- .get_session_key = intel_hdcp_gsc_get_session_key,
- .repeater_check_flow_prepare_ack =
- intel_hdcp_gsc_repeater_check_flow_prepare_ack,
- .verify_mprime = intel_hdcp_gsc_verify_mprime,
- .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
- .close_hdcp_session = intel_hdcp_gsc_close_session,
-};
-
-int intel_hdcp_gsc_init(struct intel_display *display)
-{
- struct i915_hdcp_arbiter *data;
- int ret;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- mutex_lock(&display->hdcp.hdcp_mutex);
- display->hdcp.arbiter = data;
- display->hdcp.arbiter->hdcp_dev = display->drm->dev;
- display->hdcp.arbiter->ops = &gsc_hdcp_ops;
- ret = intel_hdcp_gsc_hdcp2_init(display);
- if (ret)
- kfree(data);
-
- mutex_unlock(&display->hdcp.hdcp_mutex);
-
- return ret;
+ return gsc_context;
}
-void intel_hdcp_gsc_fini(struct intel_display *display)
+void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context)
{
- struct intel_hdcp_gsc_message *hdcp_message =
- display->hdcp.hdcp_message;
- struct i915_hdcp_arbiter *arb = display->hdcp.arbiter;
-
- if (hdcp_message) {
- xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo);
- kfree(hdcp_message);
- display->hdcp.hdcp_message = NULL;
- }
+ if (!gsc_context)
+ return;
- kfree(arb);
- display->hdcp.arbiter = NULL;
+ xe_bo_unpin_map_no_vm(gsc_context->hdcp_bo);
+ kfree(gsc_context);
}
static int xe_gsc_send_sync(struct xe_device *xe,
- struct intel_hdcp_gsc_message *hdcp_message,
+ struct intel_hdcp_gsc_context *gsc_context,
u32 msg_size_in, u32 msg_size_out,
u32 addr_out_off)
{
- struct xe_gt *gt = hdcp_message->hdcp_bo->tile->media_gt;
- struct iosys_map *map = &hdcp_message->hdcp_bo->vmap;
+ struct xe_gt *gt = gsc_context->hdcp_bo->tile->media_gt;
+ struct iosys_map *map = &gsc_context->hdcp_bo->vmap;
struct xe_gsc *gsc = &gt->uc.gsc;
int ret;
- ret = xe_gsc_pkt_submit_kernel(gsc, hdcp_message->hdcp_cmd_in, msg_size_in,
- hdcp_message->hdcp_cmd_out, msg_size_out);
+ ret = xe_gsc_pkt_submit_kernel(gsc, gsc_context->hdcp_cmd_in, msg_size_in,
+ gsc_context->hdcp_cmd_out, msg_size_out);
if (ret) {
drm_err(&xe->drm, "failed to send gsc HDCP msg (%d)\n", ret);
return ret;
@@ -205,12 +155,12 @@ static int xe_gsc_send_sync(struct xe_device *xe,
return ret;
}
-ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len)
+ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len)
{
+ struct xe_device *xe = gsc_context->xe;
const size_t max_msg_size = PAGE_SIZE - HDCP_GSC_HEADER_SIZE;
- struct intel_hdcp_gsc_message *hdcp_message;
u64 host_session_id;
u32 msg_size_in, msg_size_out;
u32 addr_out_off, addr_in_wr_off = 0;
@@ -223,15 +173,14 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE;
msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE;
- hdcp_message = xe->display.hdcp.hdcp_message;
addr_out_off = PAGE_SIZE;
host_session_id = xe_gsc_create_host_session_id();
xe_pm_runtime_get_noresume(xe);
- addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap,
+ addr_in_wr_off = xe_gsc_emit_header(xe, &gsc_context->hdcp_bo->vmap,
addr_in_wr_off, HECI_MEADDRESS_HDCP,
host_session_id, msg_in_len);
- xe_map_memcpy_to(xe, &hdcp_message->hdcp_bo->vmap, addr_in_wr_off,
+ xe_map_memcpy_to(xe, &gsc_context->hdcp_bo->vmap, addr_in_wr_off,
msg_in, msg_in_len);
/*
* Keep sending request in case the pending bit is set no need to add
@@ -240,7 +189,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
* 20 times each message 50 ms apart
*/
do {
- ret = xe_gsc_send_sync(xe, hdcp_message, msg_size_in, msg_size_out,
+ ret = xe_gsc_send_sync(xe, gsc_context, msg_size_in, msg_size_out,
addr_out_off);
/* Only try again if gsc says so */
@@ -254,7 +203,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
if (ret)
goto out;
- xe_map_memcpy_from(xe, msg_out, &hdcp_message->hdcp_bo->vmap,
+ xe_map_memcpy_from(xe, msg_out, &gsc_context->hdcp_bo->vmap,
addr_out_off + HDCP_GSC_HEADER_SIZE,
msg_out_len);
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 4ca0cb571194..6502b8274173 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -83,7 +83,7 @@ initial_plane_bo(struct xe_device *xe,
if (plane_config->size == 0)
return NULL;
- flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
+ flags = XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
base = round_down(plane_config->base, page_size);
if (IS_DGFX(xe)) {
diff --git a/drivers/gpu/drm/xe/instructions/xe_alu_commands.h b/drivers/gpu/drm/xe/instructions/xe_alu_commands.h
new file mode 100644
index 000000000000..2987b10d3e16
--- /dev/null
+++ b/drivers/gpu/drm/xe/instructions/xe_alu_commands.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_ALU_COMMANDS_H_
+#define _XE_ALU_COMMANDS_H_
+
+#include "instructions/xe_instr_defs.h"
+
+/* Instruction Opcodes */
+#define CS_ALU_OPCODE_NOOP 0x000
+#define CS_ALU_OPCODE_FENCE_RD 0x001
+#define CS_ALU_OPCODE_FENCE_WR 0x002
+#define CS_ALU_OPCODE_LOAD 0x080
+#define CS_ALU_OPCODE_LOADINV 0x480
+#define CS_ALU_OPCODE_LOAD0 0x081
+#define CS_ALU_OPCODE_LOAD1 0x481
+#define CS_ALU_OPCODE_LOADIND 0x082
+#define CS_ALU_OPCODE_ADD 0x100
+#define CS_ALU_OPCODE_SUB 0x101
+#define CS_ALU_OPCODE_AND 0x102
+#define CS_ALU_OPCODE_OR 0x103
+#define CS_ALU_OPCODE_XOR 0x104
+#define CS_ALU_OPCODE_SHL 0x105
+#define CS_ALU_OPCODE_SHR 0x106
+#define CS_ALU_OPCODE_SAR 0x107
+#define CS_ALU_OPCODE_STORE 0x180
+#define CS_ALU_OPCODE_STOREINV 0x580
+#define CS_ALU_OPCODE_STOREIND 0x181
+
+/* Instruction Operands */
+#define CS_ALU_OPERAND_REG(n) REG_FIELD_PREP(GENMASK(3, 0), (n))
+#define CS_ALU_OPERAND_REG0 0x0
+#define CS_ALU_OPERAND_REG1 0x1
+#define CS_ALU_OPERAND_REG2 0x2
+#define CS_ALU_OPERAND_REG3 0x3
+#define CS_ALU_OPERAND_REG4 0x4
+#define CS_ALU_OPERAND_REG5 0x5
+#define CS_ALU_OPERAND_REG6 0x6
+#define CS_ALU_OPERAND_REG7 0x7
+#define CS_ALU_OPERAND_REG8 0x8
+#define CS_ALU_OPERAND_REG9 0x9
+#define CS_ALU_OPERAND_REG10 0xa
+#define CS_ALU_OPERAND_REG11 0xb
+#define CS_ALU_OPERAND_REG12 0xc
+#define CS_ALU_OPERAND_REG13 0xd
+#define CS_ALU_OPERAND_REG14 0xe
+#define CS_ALU_OPERAND_REG15 0xf
+#define CS_ALU_OPERAND_SRCA 0x20
+#define CS_ALU_OPERAND_SRCB 0x21
+#define CS_ALU_OPERAND_ACCU 0x31
+#define CS_ALU_OPERAND_ZF 0x32
+#define CS_ALU_OPERAND_CF 0x33
+#define CS_ALU_OPERAND_NA 0 /* N/A operand */
+
+/* Command Streamer ALU Instructions */
+#define CS_ALU_INSTR(opcode, op1, op2) (REG_FIELD_PREP(GENMASK(31, 20), (opcode)) | \
+ REG_FIELD_PREP(GENMASK(19, 10), (op1)) | \
+ REG_FIELD_PREP(GENMASK(9, 0), (op2)))
+
+#define __CS_ALU_INSTR(opcode, op1, op2) CS_ALU_INSTR(CS_ALU_OPCODE_##opcode, \
+ CS_ALU_OPERAND_##op1, \
+ CS_ALU_OPERAND_##op2)
+
+#define CS_ALU_INSTR_NOOP __CS_ALU_INSTR(NOOP, NA, NA)
+#define CS_ALU_INSTR_LOAD(op1, op2) __CS_ALU_INSTR(LOAD, op1, op2)
+#define CS_ALU_INSTR_LOADINV(op1, op2) __CS_ALU_INSTR(LOADINV, op1, op2)
+#define CS_ALU_INSTR_LOAD0(op1) __CS_ALU_INSTR(LOAD0, op1, NA)
+#define CS_ALU_INSTR_LOAD1(op1) __CS_ALU_INSTR(LOAD1, op1, NA)
+#define CS_ALU_INSTR_ADD __CS_ALU_INSTR(ADD, NA, NA)
+#define CS_ALU_INSTR_SUB __CS_ALU_INSTR(SUB, NA, NA)
+#define CS_ALU_INSTR_AND __CS_ALU_INSTR(AND, NA, NA)
+#define CS_ALU_INSTR_OR __CS_ALU_INSTR(OR, NA, NA)
+#define CS_ALU_INSTR_XOR __CS_ALU_INSTR(XOR, NA, NA)
+#define CS_ALU_INSTR_STORE(op1, op2) __CS_ALU_INSTR(STORE, op1, op2)
+#define CS_ALU_INSTR_STOREINV(op1, op2) __CS_ALU_INSTR(STOREINV, op1, op2)
+
+#endif
diff --git a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
index 31d28a67ef6a..457881af8af9 100644
--- a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
@@ -137,6 +137,7 @@
#define CMD_3DSTATE_CLIP_MESH GFXPIPE_3D_CMD(0x0, 0x81)
#define CMD_3DSTATE_SBE_MESH GFXPIPE_3D_CMD(0x0, 0x82)
#define CMD_3DSTATE_CPSIZE_CONTROL_BUFFER GFXPIPE_3D_CMD(0x0, 0x83)
+#define CMD_3DSTATE_COARSE_PIXEL GFXPIPE_3D_CMD(0x0, 0x89)
#define CMD_3DSTATE_DRAWING_RECTANGLE GFXPIPE_3D_CMD(0x1, 0x0)
#define CMD_3DSTATE_CHROMA_KEY GFXPIPE_3D_CMD(0x1, 0x4)
diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
index 167fb0f742de..e3f5e8bb3ebc 100644
--- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
@@ -32,6 +32,7 @@
#define MI_BATCH_BUFFER_END __MI_INSTR(0xA)
#define MI_TOPOLOGY_FILTER __MI_INSTR(0xD)
#define MI_FORCE_WAKEUP __MI_INSTR(0x1D)
+#define MI_MATH(n) (__MI_INSTR(0x1A) | XE_INSTR_NUM_DW((n) + 1))
#define MI_STORE_DATA_IMM __MI_INSTR(0x20)
#define MI_SDI_GGTT REG_BIT(22)
@@ -47,6 +48,10 @@
#define MI_LRI_FORCE_POSTED REG_BIT(12)
#define MI_LRI_LEN(x) (((x) & 0xff) + 1)
+#define MI_STORE_REGISTER_MEM (__MI_INSTR(0x24) | XE_INSTR_NUM_DW(4))
+#define MI_SRM_USE_GGTT REG_BIT(22)
+#define MI_SRM_ADD_CS_OFFSET REG_BIT(19)
+
#define MI_FLUSH_DW __MI_INSTR(0x26)
#define MI_FLUSH_DW_PROTECTED_MEM_EN REG_BIT(22)
#define MI_FLUSH_DW_STORE_INDEX REG_BIT(21)
@@ -61,6 +66,10 @@
#define MI_LOAD_REGISTER_MEM (__MI_INSTR(0x29) | XE_INSTR_NUM_DW(4))
#define MI_LRM_USE_GGTT REG_BIT(22)
+#define MI_LOAD_REGISTER_REG (__MI_INSTR(0x2a) | XE_INSTR_NUM_DW(3))
+#define MI_LRR_DST_CS_MMIO REG_BIT(19)
+#define MI_LRR_SRC_CS_MMIO REG_BIT(18)
+
#define MI_COPY_MEM_MEM (__MI_INSTR(0x2e) | XE_INSTR_NUM_DW(5))
#define MI_COPY_MEM_MEM_SRC_GGTT REG_BIT(22)
#define MI_COPY_MEM_MEM_DST_GGTT REG_BIT(21)
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index fb8ec317b6ee..7ade41e2b7b3 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -43,6 +43,10 @@
#define XEHPC_BCS8_RING_BASE 0x3ee000
#define GSCCS_RING_BASE 0x11a000
+#define ENGINE_ID(base) XE_REG((base) + 0x8c)
+#define ENGINE_INSTANCE_ID REG_GENMASK(9, 4)
+#define ENGINE_CLASS_ID REG_GENMASK(2, 0)
+
#define RING_TAIL(base) XE_REG((base) + 0x30)
#define TAIL_ADDR REG_GENMASK(20, 3)
@@ -154,6 +158,7 @@
#define STOP_RING REG_BIT(8)
#define RING_CTX_TIMESTAMP(base) XE_REG((base) + 0x3a8)
+#define RING_CTX_TIMESTAMP_UDW(base) XE_REG((base) + 0x3ac)
#define CSBE_DEBUG_STATUS(base) XE_REG((base) + 0x3fc)
#define RING_FORCE_TO_NONPRIV(base, i) XE_REG(((base) + 0x4d0) + (i) * 4)
@@ -188,6 +193,10 @@
#define PREEMPT_GPGPU_LEVEL_MASK PREEMPT_GPGPU_LEVEL(1, 1)
#define PREEMPT_3D_OBJECT_LEVEL REG_BIT(0)
+#define CS_GPR_DATA(base, n) XE_REG((base) + 0x600 + (n) * 4)
+#define CS_GPR_REG(base, n) CS_GPR_DATA((base), (n) * 2)
+#define CS_GPR_REG_UDW(base, n) CS_GPR_DATA((base), (n) * 2 + 1)
+
#define VDBOX_CGCTL3F08(base) XE_REG((base) + 0x3f08)
#define CG3DDISHRS_CLKGATE_DIS REG_BIT(5)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index da1f198ac107..5cd5ab8529c5 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -62,7 +62,6 @@
#define LE_SSE_MASK REG_GENMASK(18, 17)
#define LE_SSE(value) REG_FIELD_PREP(LE_SSE_MASK, value)
#define LE_COS_MASK REG_GENMASK(16, 15)
-#define LE_COS(value) REG_FIELD_PREP(LE_COS_MASK)
#define LE_SCF_MASK REG_BIT(14)
#define LE_SCF(value) REG_FIELD_PREP(LE_SCF_MASK, value)
#define LE_PFM_MASK REG_GENMASK(13, 11)
@@ -157,6 +156,7 @@
#define XEHPG_SC_INSTDONE_EXTRA2 XE_REG_MCR(0x7108)
#define COMMON_SLICE_CHICKEN4 XE_REG(0x7300, XE_REG_OPTION_MASKED)
+#define SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE REG_BIT(12)
#define DISABLE_TDC_LOAD_BALANCING_CALC REG_BIT(6)
#define COMMON_SLICE_CHICKEN3 XE_REG(0x7304, XE_REG_OPTION_MASKED)
@@ -392,6 +392,18 @@
#define XEHP_L3NODEARBCFG XE_REG_MCR(0xb0b4)
#define XEHP_LNESPARE REG_BIT(19)
+#define LSN_VC_REG2 XE_REG_MCR(0xb0c8)
+#define LSN_LNI_WGT_MASK REG_GENMASK(31, 28)
+#define LSN_LNI_WGT(value) REG_FIELD_PREP(LSN_LNI_WGT_MASK, value)
+#define LSN_LNE_WGT_MASK REG_GENMASK(27, 24)
+#define LSN_LNE_WGT(value) REG_FIELD_PREP(LSN_LNE_WGT_MASK, value)
+#define LSN_DIM_X_WGT_MASK REG_GENMASK(23, 20)
+#define LSN_DIM_X_WGT(value) REG_FIELD_PREP(LSN_DIM_X_WGT_MASK, value)
+#define LSN_DIM_Y_WGT_MASK REG_GENMASK(19, 16)
+#define LSN_DIM_Y_WGT(value) REG_FIELD_PREP(LSN_DIM_Y_WGT_MASK, value)
+#define LSN_DIM_Z_WGT_MASK REG_GENMASK(15, 12)
+#define LSN_DIM_Z_WGT(value) REG_FIELD_PREP(LSN_DIM_Z_WGT_MASK, value)
+
#define L3SQCREG2 XE_REG_MCR(0xb104)
#define COMPMEMRD256BOVRFETCHEN REG_BIT(20)
diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
index 57944f90bbf6..994af591a2e8 100644
--- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
+++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
@@ -11,7 +11,9 @@
#define CTX_RING_TAIL (0x06 + 1)
#define CTX_RING_START (0x08 + 1)
#define CTX_RING_CTL (0x0a + 1)
+#define CTX_BB_PER_CTX_PTR (0x12 + 1)
#define CTX_TIMESTAMP (0x22 + 1)
+#define CTX_TIMESTAMP_UDW (0x24 + 1)
#define CTX_INDIRECT_RING_STATE (0x26 + 1)
#define CTX_PDP0_UDW (0x30 + 1)
#define CTX_PDP0_LDW (0x32 + 1)
diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
index f5e5234857c1..5394a1373a6b 100644
--- a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
@@ -38,10 +38,10 @@
#define TEMP_MASK REG_GENMASK(7, 0)
#define PCU_CR_PACKAGE_RAPL_LIMIT XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0)
-#define PKG_PWR_LIM_1 REG_GENMASK(14, 0)
-#define PKG_PWR_LIM_1_EN REG_BIT(15)
-#define PKG_PWR_LIM_1_TIME REG_GENMASK(23, 17)
-#define PKG_PWR_LIM_1_TIME_X REG_GENMASK(23, 22)
-#define PKG_PWR_LIM_1_TIME_Y REG_GENMASK(21, 17)
+#define PWR_LIM_VAL REG_GENMASK(14, 0)
+#define PWR_LIM_EN REG_BIT(15)
+#define PWR_LIM_TIME REG_GENMASK(23, 17)
+#define PWR_LIM_TIME_X REG_GENMASK(23, 22)
+#define PWR_LIM_TIME_Y REG_GENMASK(21, 17)
#endif /* _XE_MCHBAR_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
index 8846eb9ce2a4..c556a04670ee 100644
--- a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
@@ -18,13 +18,12 @@
#define PVC_GT0_PLATFORM_ENERGY_STATUS XE_REG(0x28106c)
#define PVC_GT0_PACKAGE_POWER_SKU XE_REG(0x281080)
-#define BMG_PACKAGE_POWER_SKU XE_REG(0x138098)
-#define BMG_PACKAGE_POWER_SKU_UNIT XE_REG(0x1380dc)
#define BMG_PACKAGE_ENERGY_STATUS XE_REG(0x138120)
+#define BMG_FAN_1_SPEED XE_REG(0x138140)
+#define BMG_FAN_2_SPEED XE_REG(0x138170)
+#define BMG_FAN_3_SPEED XE_REG(0x1381a0)
#define BMG_VRAM_TEMPERATURE XE_REG(0x1382c0)
#define BMG_PACKAGE_TEMPERATURE XE_REG(0x138434)
-#define BMG_PACKAGE_RAPL_LIMIT XE_REG(0x138440)
#define BMG_PLATFORM_ENERGY_STATUS XE_REG(0x138458)
-#define BMG_PLATFORM_POWER_LIMIT XE_REG(0x138460)
#endif /* _XE_PCODE_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 9fde67ca989f..378dcd0fb414 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -60,7 +60,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
}
/* Evict to system. CCS data should be copied. */
- ret = xe_bo_evict(bo, true);
+ ret = xe_bo_evict(bo);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return ret;
@@ -252,7 +252,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
for_each_gt(__gt, xe, id)
xe_gt_sanitize(__gt);
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
/*
* Snapshotting the CTB and copying back a potentially old
* version seems risky, depending on what might have been
@@ -273,7 +273,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
goto cleanup_all;
}
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err) {
KUNIT_FAIL(test, "restore user err=%pe\n", ERR_PTR(err));
goto cleanup_all;
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index cedd3e88a6fb..c53f67ce4b0a 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -65,7 +65,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
* the exporter and the importer should be the same bo.
*/
swap(exported->ttm.base.dma_buf, dmabuf);
- ret = xe_bo_evict(exported, true);
+ ret = xe_bo_evict(exported);
swap(exported->ttm.base.dma_buf, dmabuf);
if (ret) {
if (ret != -EINTR && ret != -ERESTARTSYS)
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index d5fe0ea889ad..4a65e3103f77 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -202,8 +202,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(big)) {
KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
goto vunmap;
@@ -211,8 +210,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(pt)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
@@ -222,8 +220,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
2 * SZ_4K,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(tiny)) {
KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
PTR_ERR(tiny));
@@ -512,7 +509,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
dma_fence_put(fence);
kunit_info(test, "Evict vram buffer object\n");
- ret = xe_bo_evict(vram_bo, true);
+ ret = xe_bo_evict(vram_bo);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return;
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 64f9c936eea0..7aa2c17825da 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -55,6 +55,8 @@ static struct ttm_placement sys_placement = {
.placement = &sys_placement_flags,
};
+static struct ttm_placement purge_placement;
+
static const struct ttm_place tt_placement_flags[] = {
{
.fpfn = 0,
@@ -189,11 +191,18 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
static bool force_contiguous(u32 bo_flags)
{
+ if (bo_flags & XE_BO_FLAG_STOLEN)
+ return true; /* users expect this */
+ else if (bo_flags & XE_BO_FLAG_PINNED &&
+ !(bo_flags & XE_BO_FLAG_PINNED_LATE_RESTORE))
+ return true; /* needs vmap */
+
/*
* For eviction / restore on suspend / resume objects pinned in VRAM
* must be contiguous, also only contiguous BOs support xe_bo_vmap.
*/
- return bo_flags & (XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT);
+ return bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS &&
+ bo_flags & XE_BO_FLAG_PINNED;
}
static void add_vram(struct xe_device *xe, struct xe_bo *bo,
@@ -281,6 +290,8 @@ int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
static void xe_evict_flags(struct ttm_buffer_object *tbo,
struct ttm_placement *placement)
{
+ struct xe_device *xe = container_of(tbo->bdev, typeof(*xe), ttm);
+ bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
struct xe_bo *bo;
if (!xe_bo_is_xe_bo(tbo)) {
@@ -290,7 +301,7 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
return;
}
- *placement = sys_placement;
+ *placement = device_unplugged ? purge_placement : sys_placement;
return;
}
@@ -300,6 +311,11 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
return;
}
+ if (device_unplugged && !tbo->base.dma_buf) {
+ *placement = purge_placement;
+ return;
+ }
+
/*
* For xe, sg bos that are evicted to system just triggers a
* rebind of the sg list upon subsequent validation to XE_PL_TT.
@@ -657,11 +673,20 @@ static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
ttm);
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
struct sg_table *sg;
xe_assert(xe, attach);
xe_assert(xe, ttm_bo->ttm);
+ if (device_unplugged && new_res->mem_type == XE_PL_SYSTEM &&
+ ttm_bo->sg) {
+ dma_resv_wait_timeout(ttm_bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
+ ttm_bo->sg = NULL;
+ }
+
if (new_res->mem_type == XE_PL_SYSTEM)
goto out;
@@ -816,21 +841,6 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
goto out;
}
- /* Reject BO eviction if BO is bound to current VM. */
- if (evict && ctx->resv) {
- struct drm_gpuvm_bo *vm_bo;
-
- drm_gem_for_each_gpuvm_bo(vm_bo, &bo->ttm.base) {
- struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
-
- if (xe_vm_resv(vm) == ctx->resv &&
- xe_vm_in_preempt_fence_mode(vm)) {
- ret = -EBUSY;
- goto out;
- }
- }
- }
-
/*
* Failed multi-hop where the old_mem is still marked as
* TTM_PL_FLAG_TEMPORARY, should just be a dummy move.
@@ -898,79 +908,44 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
xe_pm_runtime_get_noresume(xe);
}
- if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
- /*
- * Kernel memory that is pinned should only be moved on suspend
- * / resume, some of the pinned memory is required for the
- * device to resume / use the GPU to move other evicted memory
- * (user memory) around. This likely could be optimized a bit
- * further where we find the minimum set of pinned memory
- * required for resume but for simplity doing a memcpy for all
- * pinned memory.
- */
- ret = xe_bo_vmap(bo);
- if (!ret) {
- ret = ttm_bo_move_memcpy(ttm_bo, ctx, new_mem);
-
- /* Create a new VMAP once kernel BO back in VRAM */
- if (!ret && resource_is_vram(new_mem)) {
- struct xe_vram_region *vram = res_to_mem_region(new_mem);
- void __iomem *new_addr = vram->mapping +
- (new_mem->start << PAGE_SHIFT);
-
- if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
- ret = -EINVAL;
- xe_pm_runtime_put(xe);
- goto out;
- }
+ if (move_lacks_source) {
+ u32 flags = 0;
- xe_assert(xe, new_mem->start ==
- bo->placements->fpfn);
+ if (mem_type_is_vram(new_mem->mem_type))
+ flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
+ else if (handle_system_ccs)
+ flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;
- iosys_map_set_vaddr_iomem(&bo->vmap, new_addr);
- }
- }
+ fence = xe_migrate_clear(migrate, bo, new_mem, flags);
} else {
- if (move_lacks_source) {
- u32 flags = 0;
-
- if (mem_type_is_vram(new_mem->mem_type))
- flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
- else if (handle_system_ccs)
- flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;
-
- fence = xe_migrate_clear(migrate, bo, new_mem, flags);
- }
- else
- fence = xe_migrate_copy(migrate, bo, bo, old_mem,
- new_mem, handle_system_ccs);
- if (IS_ERR(fence)) {
- ret = PTR_ERR(fence);
- xe_pm_runtime_put(xe);
- goto out;
- }
- if (!move_lacks_source) {
- ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict,
- true, new_mem);
- if (ret) {
- dma_fence_wait(fence, false);
- ttm_bo_move_null(ttm_bo, new_mem);
- ret = 0;
- }
- } else {
- /*
- * ttm_bo_move_accel_cleanup() may blow up if
- * bo->resource == NULL, so just attach the
- * fence and set the new resource.
- */
- dma_resv_add_fence(ttm_bo->base.resv, fence,
- DMA_RESV_USAGE_KERNEL);
+ fence = xe_migrate_copy(migrate, bo, bo, old_mem, new_mem,
+ handle_system_ccs);
+ }
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ xe_pm_runtime_put(xe);
+ goto out;
+ }
+ if (!move_lacks_source) {
+ ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict, true,
+ new_mem);
+ if (ret) {
+ dma_fence_wait(fence, false);
ttm_bo_move_null(ttm_bo, new_mem);
+ ret = 0;
}
-
- dma_fence_put(fence);
+ } else {
+ /*
+ * ttm_bo_move_accel_cleanup() may blow up if
+ * bo->resource == NULL, so just attach the
+ * fence and set the new resource.
+ */
+ dma_resv_add_fence(ttm_bo->base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ ttm_bo_move_null(ttm_bo, new_mem);
}
+ dma_fence_put(fence);
xe_pm_runtime_put(xe);
out:
@@ -1023,6 +998,25 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
return lret;
}
+static bool
+xe_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place)
+{
+ struct drm_gpuvm_bo *vm_bo;
+
+ if (!ttm_bo_eviction_valuable(bo, place))
+ return false;
+
+ if (!xe_bo_is_xe_bo(bo))
+ return true;
+
+ drm_gem_for_each_gpuvm_bo(vm_bo, &bo->base) {
+ if (xe_vm_is_validating(gpuvm_to_vm(vm_bo->vm)))
+ return false;
+ }
+
+ return true;
+}
+
/**
* xe_bo_shrink() - Try to shrink an xe bo.
* @ctx: The struct ttm_operation_ctx used for shrinking.
@@ -1057,7 +1051,7 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
(flags.purge && !xe_tt->purgeable))
return -EBUSY;
- if (!ttm_bo_eviction_valuable(bo, &place))
+ if (!xe_bo_eviction_valuable(bo, &place))
return -EBUSY;
if (!xe_bo_is_xe_bo(bo) || !xe_bo_get_unless_zero(xe_bo))
@@ -1095,6 +1089,80 @@ out_unref:
}
/**
+ * xe_bo_notifier_prepare_pinned() - Prepare a pinned VRAM object to be backed
+ * up in system memory.
+ * @bo: The buffer object to prepare.
+ *
+ * On successful completion, the object backup pages are allocated. Expectation
+ * is that this is called from the PM notifier, prior to suspend/hibernation.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
+{
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_bo *backup;
+ int ret = 0;
+
+ xe_bo_lock(bo, false);
+
+ xe_assert(xe, !bo->backup_obj);
+
+ /*
+ * Since this is called from the PM notifier we might have raced with
+ * someone unpinning this after we dropped the pinned list lock and
+ * grabbing the above bo lock.
+ */
+ if (!xe_bo_is_pinned(bo))
+ goto out_unlock_bo;
+
+ if (!xe_bo_is_vram(bo))
+ goto out_unlock_bo;
+
+ if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+ goto out_unlock_bo;
+
+ backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size,
+ DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
+ if (IS_ERR(backup)) {
+ ret = PTR_ERR(backup);
+ goto out_unlock_bo;
+ }
+
+ backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+ ttm_bo_pin(&backup->ttm);
+ bo->backup_obj = backup;
+
+out_unlock_bo:
+ xe_bo_unlock(bo);
+ return ret;
+}
+
+/**
+ * xe_bo_notifier_unprepare_pinned() - Undo the previous prepare operation.
+ * @bo: The buffer object to undo the prepare for.
+ *
+ * Always returns 0. The backup object is removed, if still present. Expectation
+ * it that this called from the PM notifier when undoing the prepare step.
+ *
+ * Return: Always returns 0.
+ */
+int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo)
+{
+ xe_bo_lock(bo, false);
+ if (bo->backup_obj) {
+ ttm_bo_unpin(&bo->backup_obj->ttm);
+ xe_bo_put(bo->backup_obj);
+ bo->backup_obj = NULL;
+ }
+ xe_bo_unlock(bo);
+
+ return 0;
+}
+
+/**
* xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
* @bo: The buffer object to move.
*
@@ -1107,59 +1175,99 @@ out_unref:
*/
int xe_bo_evict_pinned(struct xe_bo *bo)
{
- struct ttm_place place = {
- .mem_type = XE_PL_TT,
- };
- struct ttm_placement placement = {
- .placement = &place,
- .num_placement = 1,
- };
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .gfp_retry_mayfail = true,
- };
- struct ttm_resource *new_mem;
- int ret;
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_bo *backup = bo->backup_obj;
+ bool backup_created = false;
+ bool unmap = false;
+ int ret = 0;
- xe_bo_assert_held(bo);
+ xe_bo_lock(bo, false);
- if (WARN_ON(!bo->ttm.resource))
- return -EINVAL;
+ if (WARN_ON(!bo->ttm.resource)) {
+ ret = -EINVAL;
+ goto out_unlock_bo;
+ }
- if (WARN_ON(!xe_bo_is_pinned(bo)))
- return -EINVAL;
+ if (WARN_ON(!xe_bo_is_pinned(bo))) {
+ ret = -EINVAL;
+ goto out_unlock_bo;
+ }
if (!xe_bo_is_vram(bo))
- return 0;
+ goto out_unlock_bo;
+
+ if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+ goto out_unlock_bo;
+
+ if (!backup) {
+ backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size,
+ DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
+ if (IS_ERR(backup)) {
+ ret = PTR_ERR(backup);
+ goto out_unlock_bo;
+ }
+ backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+ backup_created = true;
+ }
- ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
- if (ret)
- return ret;
+ if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
+ struct xe_migrate *migrate;
+ struct dma_fence *fence;
- if (!bo->ttm.ttm) {
- bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0);
- if (!bo->ttm.ttm) {
- ret = -ENOMEM;
- goto err_res_free;
+ if (bo->tile)
+ migrate = bo->tile->migrate;
+ else
+ migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
+
+ ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
+ if (ret)
+ goto out_backup;
+
+ ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
+ if (ret)
+ goto out_backup;
+
+ fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource,
+ backup->ttm.resource, false);
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ goto out_backup;
}
- }
- ret = ttm_bo_populate(&bo->ttm, &ctx);
- if (ret)
- goto err_res_free;
+ dma_resv_add_fence(bo->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_resv_add_fence(backup->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(fence);
+ } else {
+ ret = xe_bo_vmap(backup);
+ if (ret)
+ goto out_backup;
- ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
- if (ret)
- goto err_res_free;
+ if (iosys_map_is_null(&bo->vmap)) {
+ ret = xe_bo_vmap(bo);
+ if (ret)
+ goto out_backup;
+ unmap = true;
+ }
- ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
- if (ret)
- goto err_res_free;
+ xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0,
+ bo->size);
+ }
- return 0;
+ if (!bo->backup_obj)
+ bo->backup_obj = backup;
-err_res_free:
- ttm_resource_free(&bo->ttm, &new_mem);
+out_backup:
+ xe_bo_vunmap(backup);
+ if (ret && backup_created)
+ xe_bo_put(backup);
+out_unlock_bo:
+ if (unmap)
+ xe_bo_vunmap(bo);
+ xe_bo_unlock(bo);
return ret;
}
@@ -1180,50 +1288,109 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
.interruptible = false,
.gfp_retry_mayfail = false,
};
- struct ttm_resource *new_mem;
- struct ttm_place *place = &bo->placements[0];
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_bo *backup = bo->backup_obj;
+ bool unmap = false;
int ret;
- xe_bo_assert_held(bo);
+ if (!backup)
+ return 0;
- if (WARN_ON(!bo->ttm.resource))
- return -EINVAL;
+ xe_bo_lock(bo, false);
- if (WARN_ON(!xe_bo_is_pinned(bo)))
- return -EINVAL;
+ if (!xe_bo_is_pinned(backup)) {
+ ret = ttm_bo_validate(&backup->ttm, &backup->placement, &ctx);
+ if (ret)
+ goto out_unlock_bo;
+ }
- if (WARN_ON(xe_bo_is_vram(bo)))
- return -EINVAL;
+ if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
+ struct xe_migrate *migrate;
+ struct dma_fence *fence;
- if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo)))
- return -EINVAL;
+ if (bo->tile)
+ migrate = bo->tile->migrate;
+ else
+ migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
- if (!mem_type_is_vram(place->mem_type))
- return 0;
+ ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
+ if (ret)
+ goto out_unlock_bo;
- ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
- if (ret)
- return ret;
+ ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
+ if (ret)
+ goto out_unlock_bo;
- ret = ttm_bo_populate(&bo->ttm, &ctx);
- if (ret)
- goto err_res_free;
+ fence = xe_migrate_copy(migrate, backup, bo,
+ backup->ttm.resource, bo->ttm.resource,
+ false);
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ goto out_unlock_bo;
+ }
- ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
- if (ret)
- goto err_res_free;
+ dma_resv_add_fence(bo->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_resv_add_fence(backup->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(fence);
+ } else {
+ ret = xe_bo_vmap(backup);
+ if (ret)
+ goto out_unlock_bo;
- ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
- if (ret)
- goto err_res_free;
+ if (iosys_map_is_null(&bo->vmap)) {
+ ret = xe_bo_vmap(bo);
+ if (ret)
+ goto out_backup;
+ unmap = true;
+ }
- return 0;
+ xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr,
+ bo->size);
+ }
+
+ bo->backup_obj = NULL;
-err_res_free:
- ttm_resource_free(&bo->ttm, &new_mem);
+out_backup:
+ xe_bo_vunmap(backup);
+ if (!bo->backup_obj) {
+ if (xe_bo_is_pinned(backup))
+ ttm_bo_unpin(&backup->ttm);
+ xe_bo_put(backup);
+ }
+out_unlock_bo:
+ if (unmap)
+ xe_bo_vunmap(bo);
+ xe_bo_unlock(bo);
return ret;
}
+int xe_bo_dma_unmap_pinned(struct xe_bo *bo)
+{
+ struct ttm_buffer_object *ttm_bo = &bo->ttm;
+ struct ttm_tt *tt = ttm_bo->ttm;
+
+ if (tt) {
+ struct xe_ttm_tt *xe_tt = container_of(tt, typeof(*xe_tt), ttm);
+
+ if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
+ dma_buf_unmap_attachment(ttm_bo->base.import_attach,
+ ttm_bo->sg,
+ DMA_BIDIRECTIONAL);
+ ttm_bo->sg = NULL;
+ xe_tt->sg = NULL;
+ } else if (xe_tt->sg) {
+ dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg,
+ DMA_BIDIRECTIONAL, 0);
+ sg_free_table(xe_tt->sg);
+ xe_tt->sg = NULL;
+ }
+ }
+
+ return 0;
+}
+
static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
unsigned long page_offset)
{
@@ -1371,6 +1538,7 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
struct xe_res_cursor cursor;
struct xe_vram_region *vram;
int bytes_left = len;
+ int err = 0;
xe_bo_assert_held(bo);
xe_device_assert_mem_access(xe);
@@ -1378,9 +1546,14 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
if (!mem_type_is_vram(ttm_bo->resource->mem_type))
return -EIO;
- /* FIXME: Use GPU for non-visible VRAM */
- if (!xe_ttm_resource_visible(ttm_bo->resource))
- return -EIO;
+ if (!xe_ttm_resource_visible(ttm_bo->resource) || len >= SZ_16K) {
+ struct xe_migrate *migrate =
+ mem_type_to_migrate(xe, ttm_bo->resource->mem_type);
+
+ err = xe_migrate_access_memory(migrate, bo, offset, buf, len,
+ write);
+ goto out;
+ }
vram = res_to_mem_region(ttm_bo->resource);
xe_res_first(ttm_bo->resource, offset & PAGE_MASK,
@@ -1404,7 +1577,8 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
xe_res_next(&cursor, PAGE_SIZE);
} while (bytes_left);
- return len;
+out:
+ return err ?: len;
}
const struct ttm_device_funcs xe_ttm_funcs = {
@@ -1418,7 +1592,7 @@ const struct ttm_device_funcs xe_ttm_funcs = {
.io_mem_pfn = xe_ttm_io_mem_pfn,
.access_memory = xe_ttm_access_memory,
.release_notify = xe_ttm_bo_release_notify,
- .eviction_valuable = ttm_bo_eviction_valuable,
+ .eviction_valuable = xe_bo_eviction_valuable,
.delete_mem_notify = xe_ttm_bo_delete_mem_notify,
.swap_notify = xe_ttm_bo_swap_notify,
};
@@ -1448,6 +1622,9 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
if (bo->vm && xe_bo_is_user(bo))
xe_vm_put(bo->vm);
+ if (bo->parent_obj)
+ xe_bo_put(bo->parent_obj);
+
mutex_lock(&xe->mem_access.vram_userfault.lock);
if (!list_empty(&bo->vram_userfault_link))
list_del(&bo->vram_userfault_link);
@@ -1947,7 +2124,7 @@ struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
flags |= XE_BO_FLAG_GGTT;
bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
- flags | XE_BO_FLAG_NEEDS_CPU_ACCESS,
+ flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED,
alignment);
if (IS_ERR(bo))
return bo;
@@ -2049,7 +2226,8 @@ int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, str
struct xe_bo *bo;
u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT;
- dst_flags |= (*src)->flags & XE_BO_FLAG_GGTT_INVALIDATE;
+ dst_flags |= (*src)->flags & (XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
xe_assert(xe, IS_DGFX(xe));
xe_assert(xe, !(*src)->vmap.is_iomem);
@@ -2073,10 +2251,16 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res)
{
struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
- if (res->mem_type == XE_PL_STOLEN)
+ switch (res->mem_type) {
+ case XE_PL_STOLEN:
return xe_ttm_stolen_gpu_offset(xe);
-
- return res_to_mem_region(res)->dpa_base;
+ case XE_PL_TT:
+ case XE_PL_SYSTEM:
+ return 0;
+ default:
+ return res_to_mem_region(res)->dpa_base;
+ }
+ return 0;
}
/**
@@ -2102,12 +2286,9 @@ int xe_bo_pin_external(struct xe_bo *bo)
if (err)
return err;
- if (xe_bo_is_vram(bo)) {
- spin_lock(&xe->pinned.lock);
- list_add_tail(&bo->pinned_link,
- &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- }
+ spin_lock(&xe->pinned.lock);
+ list_add_tail(&bo->pinned_link, &xe->pinned.late.external);
+ spin_unlock(&xe->pinned.lock);
}
ttm_bo_pin(&bo->ttm);
@@ -2149,25 +2330,12 @@ int xe_bo_pin(struct xe_bo *bo)
if (err)
return err;
- /*
- * For pinned objects in on DGFX, which are also in vram, we expect
- * these to be in contiguous VRAM memory. Required eviction / restore
- * during suspend / resume (force restore to same physical address).
- */
- if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
- bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
- if (mem_type_is_vram(place->mem_type)) {
- xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
-
- place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
- vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
- place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
- }
- }
-
if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
spin_lock(&xe->pinned.lock);
- list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
+ if (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)
+ list_add_tail(&bo->pinned_link, &xe->pinned.late.kernel_bo_present);
+ else
+ list_add_tail(&bo->pinned_link, &xe->pinned.early.kernel_bo_present);
spin_unlock(&xe->pinned.lock);
}
@@ -2231,6 +2399,13 @@ void xe_bo_unpin(struct xe_bo *bo)
xe_assert(xe, !list_empty(&bo->pinned_link));
list_del_init(&bo->pinned_link);
spin_unlock(&xe->pinned.lock);
+
+ if (bo->backup_obj) {
+ if (xe_bo_is_pinned(bo->backup_obj))
+ ttm_bo_unpin(&bo->backup_obj->ttm);
+ xe_bo_put(bo->backup_obj);
+ bo->backup_obj = NULL;
+ }
}
ttm_bo_unpin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
@@ -2260,6 +2435,8 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
.no_wait_gpu = false,
.gfp_retry_mayfail = true,
};
+ struct pin_cookie cookie;
+ int ret;
if (vm) {
lockdep_assert_held(&vm->lock);
@@ -2269,8 +2446,12 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
ctx.resv = xe_vm_resv(vm);
}
+ cookie = xe_vm_set_validating(vm, allow_res_evict);
trace_xe_bo_validate(bo);
- return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
+ ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
+ xe_vm_clear_validating(vm, allow_res_evict, cookie);
+
+ return ret;
}
bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
@@ -2386,7 +2567,7 @@ typedef int (*xe_gem_create_set_property_fn)(struct xe_device *xe,
u64 value);
static const xe_gem_create_set_property_fn gem_create_set_property_funcs[] = {
- [DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY] = gem_create_set_pxp_type,
+ [DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE] = gem_create_set_pxp_type,
};
static int gem_create_user_ext_set_property(struct xe_device *xe,
@@ -2398,7 +2579,7 @@ static int gem_create_user_ext_set_property(struct xe_device *xe,
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -2435,7 +2616,7 @@ static int gem_create_user_extensions(struct xe_device *xe, struct xe_bo *bo,
if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -2759,19 +2940,17 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
/**
* xe_bo_evict - Evict an object to evict placement
* @bo: The buffer object to migrate.
- * @force_alloc: Set force_alloc in ttm_operation_ctx
*
* On successful completion, the object memory will be moved to evict
* placement. This function blocks until the object has been fully moved.
*
* Return: 0 on success. Negative error code on failure.
*/
-int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
+int xe_bo_evict(struct xe_bo *bo)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
- .force_alloc = force_alloc,
.gfp_retry_mayfail = true,
};
struct ttm_placement placement;
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index ec3e4446d027..02ada1fb8a23 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -39,20 +39,23 @@
#define XE_BO_FLAG_NEEDS_64K BIT(15)
#define XE_BO_FLAG_NEEDS_2M BIT(16)
#define XE_BO_FLAG_GGTT_INVALIDATE BIT(17)
-#define XE_BO_FLAG_GGTT0 BIT(18)
-#define XE_BO_FLAG_GGTT1 BIT(19)
-#define XE_BO_FLAG_GGTT2 BIT(20)
-#define XE_BO_FLAG_GGTT3 BIT(21)
-#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \
- XE_BO_FLAG_GGTT1 | \
- XE_BO_FLAG_GGTT2 | \
- XE_BO_FLAG_GGTT3)
-#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(22)
+#define XE_BO_FLAG_PINNED_NORESTORE BIT(18)
+#define XE_BO_FLAG_PINNED_LATE_RESTORE BIT(19)
+#define XE_BO_FLAG_GGTT0 BIT(20)
+#define XE_BO_FLAG_GGTT1 BIT(21)
+#define XE_BO_FLAG_GGTT2 BIT(22)
+#define XE_BO_FLAG_GGTT3 BIT(23)
+#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(24)
/* this one is trigger internally only */
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
#define XE_BO_FLAG_INTERNAL_64K BIT(31)
+#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \
+ XE_BO_FLAG_GGTT1 | \
+ XE_BO_FLAG_GGTT2 | \
+ XE_BO_FLAG_GGTT3)
+
#define XE_BO_FLAG_GGTTx(tile) \
(XE_BO_FLAG_GGTT0 << (tile)->id)
@@ -271,11 +274,15 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res);
bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
int xe_bo_migrate(struct xe_bo *bo, u32 mem_type);
-int xe_bo_evict(struct xe_bo *bo, bool force_alloc);
+int xe_bo_evict(struct xe_bo *bo);
int xe_bo_evict_pinned(struct xe_bo *bo);
+int xe_bo_notifier_prepare_pinned(struct xe_bo *bo);
+int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo);
int xe_bo_restore_pinned(struct xe_bo *bo);
+int xe_bo_dma_unmap_pinned(struct xe_bo *bo);
+
extern const struct ttm_device_funcs xe_ttm_funcs;
extern const char *const xe_mem_type_to_name[];
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index 6a40eedd9db1..ed3746d32b27 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -10,28 +10,103 @@
#include "xe_ggtt.h"
#include "xe_tile.h"
+typedef int (*xe_pinned_fn)(struct xe_bo *bo);
+
+static int xe_bo_apply_to_pinned(struct xe_device *xe,
+ struct list_head *pinned_list,
+ struct list_head *new_list,
+ const xe_pinned_fn pinned_fn)
+{
+ LIST_HEAD(still_in_list);
+ struct xe_bo *bo;
+ int ret = 0;
+
+ spin_lock(&xe->pinned.lock);
+ while (!ret) {
+ bo = list_first_entry_or_null(pinned_list, typeof(*bo),
+ pinned_link);
+ if (!bo)
+ break;
+ xe_bo_get(bo);
+ list_move_tail(&bo->pinned_link, &still_in_list);
+ spin_unlock(&xe->pinned.lock);
+
+ ret = pinned_fn(bo);
+ if (ret && pinned_list != new_list) {
+ spin_lock(&xe->pinned.lock);
+ /*
+ * We might no longer be pinned, since PM notifier can
+ * call this. If the pinned link is now empty, keep it
+ * that way.
+ */
+ if (!list_empty(&bo->pinned_link))
+ list_move(&bo->pinned_link, pinned_list);
+ spin_unlock(&xe->pinned.lock);
+ }
+ xe_bo_put(bo);
+ spin_lock(&xe->pinned.lock);
+ }
+ list_splice_tail(&still_in_list, new_list);
+ spin_unlock(&xe->pinned.lock);
+
+ return ret;
+}
+
/**
- * xe_bo_evict_all - evict all BOs from VRAM
+ * xe_bo_notifier_prepare_all_pinned() - Pre-allocate the backing pages for all
+ * pinned VRAM objects which need to be saved.
+ * @xe: xe device
+ *
+ * Should be called from PM notifier when preparing for s3/s4.
*
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe)
+{
+ int ret;
+
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_notifier_prepare_pinned);
+ if (!ret)
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_notifier_prepare_pinned);
+
+ return ret;
+}
+
+/**
+ * xe_bo_notifier_unprepare_all_pinned() - Remove the backing pages for all
+ * pinned VRAM objects which have been restored.
* @xe: xe device
*
- * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
- * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
- * All eviction magic done via TTM calls.
+ * Should be called from PM notifier after exiting s3/s4 (either on success or
+ * failure).
+ */
+void xe_bo_notifier_unprepare_all_pinned(struct xe_device *xe)
+{
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_notifier_unprepare_pinned);
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_notifier_unprepare_pinned);
+}
+
+/**
+ * xe_bo_evict_all_user - evict all non-pinned user BOs from VRAM
+ * @xe: xe device
*
- * Evict == move VRAM BOs to temporary (typically system) memory.
+ * Evict non-pinned user BOs (via GPU).
*
- * This function should be called before the device goes into a suspend state
- * where the VRAM loses power.
+ * Evict == move VRAM BOs to temporary (typically system) memory.
*/
-int xe_bo_evict_all(struct xe_device *xe)
+int xe_bo_evict_all_user(struct xe_device *xe)
{
struct ttm_device *bdev = &xe->ttm;
- struct xe_bo *bo;
- struct xe_tile *tile;
- struct list_head still_in_list;
u32 mem_type;
- u8 id;
int ret;
/* User memory */
@@ -57,34 +132,38 @@ int xe_bo_evict_all(struct xe_device *xe)
}
}
- /* Pinned user memory in VRAM */
- INIT_LIST_HEAD(&still_in_list);
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.external_vram,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- xe_bo_get(bo);
- list_move_tail(&bo->pinned_link, &still_in_list);
- spin_unlock(&xe->pinned.lock);
+ return 0;
+}
- xe_bo_lock(bo, false);
- ret = xe_bo_evict_pinned(bo);
- xe_bo_unlock(bo);
- xe_bo_put(bo);
- if (ret) {
- spin_lock(&xe->pinned.lock);
- list_splice_tail(&still_in_list,
- &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- return ret;
- }
+/**
+ * xe_bo_evict_all - evict all BOs from VRAM
+ * @xe: xe device
+ *
+ * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
+ * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
+ * All eviction magic done via TTM calls.
+ *
+ * Evict == move VRAM BOs to temporary (typically system) memory.
+ *
+ * This function should be called before the device goes into a suspend state
+ * where the VRAM loses power.
+ */
+int xe_bo_evict_all(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ u8 id;
+ int ret;
- spin_lock(&xe->pinned.lock);
- }
- list_splice_tail(&still_in_list, &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
+ ret = xe_bo_evict_all_user(xe);
+ if (ret)
+ return ret;
+
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.evicted, xe_bo_evict_pinned);
+
+ if (!ret)
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.evicted, xe_bo_evict_pinned);
/*
* Wait for all user BO to be evicted as those evictions depend on the
@@ -93,32 +172,49 @@ int xe_bo_evict_all(struct xe_device *xe)
for_each_tile(tile, xe, id)
xe_tile_migrate_wait(tile);
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.kernel_bo_present,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- xe_bo_get(bo);
- list_move_tail(&bo->pinned_link, &xe->pinned.evicted);
- spin_unlock(&xe->pinned.lock);
+ if (ret)
+ return ret;
- xe_bo_lock(bo, false);
- ret = xe_bo_evict_pinned(bo);
- xe_bo_unlock(bo);
- xe_bo_put(bo);
- if (ret)
- return ret;
+ return xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.evicted,
+ xe_bo_evict_pinned);
+}
- spin_lock(&xe->pinned.lock);
+static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ int ret;
+
+ ret = xe_bo_restore_pinned(bo);
+ if (ret)
+ return ret;
+
+ if (bo->flags & XE_BO_FLAG_GGTT) {
+ struct xe_tile *tile;
+ u8 id;
+
+ for_each_tile(tile, xe_bo_device(bo), id) {
+ if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
+ continue;
+
+ mutex_lock(&tile->mem.ggtt->lock);
+ xe_ggtt_map_bo(tile->mem.ggtt, bo);
+ mutex_unlock(&tile->mem.ggtt->lock);
+ }
}
- spin_unlock(&xe->pinned.lock);
+
+ /*
+ * We expect validate to trigger a move VRAM and our move code
+ * should setup the iosys map.
+ */
+ xe_assert(xe, !(bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE) ||
+ !iosys_map_is_null(&bo->vmap));
return 0;
}
/**
- * xe_bo_restore_kernel - restore kernel BOs to VRAM
+ * xe_bo_restore_early - restore early phase kernel BOs to VRAM
*
* @xe: xe device
*
@@ -128,111 +224,130 @@ int xe_bo_evict_all(struct xe_device *xe)
* This function should be called early, before trying to init the GT, on device
* resume.
*/
-int xe_bo_restore_kernel(struct xe_device *xe)
+int xe_bo_restore_early(struct xe_device *xe)
{
- struct xe_bo *bo;
- int ret;
+ return xe_bo_apply_to_pinned(xe, &xe->pinned.early.evicted,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_restore_and_map_ggtt);
+}
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.evicted,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- xe_bo_get(bo);
- list_move_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
- spin_unlock(&xe->pinned.lock);
+/**
+ * xe_bo_restore_late - restore pinned late phase BOs
+ *
+ * @xe: xe device
+ *
+ * Move pinned user and kernel BOs which can use blitter from temporary
+ * (typically system) memory to VRAM. All moves done via TTM calls.
+ *
+ * This function should be called late, after GT init, on device resume.
+ */
+int xe_bo_restore_late(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ int ret, id;
- xe_bo_lock(bo, false);
- ret = xe_bo_restore_pinned(bo);
- xe_bo_unlock(bo);
- if (ret) {
- xe_bo_put(bo);
- return ret;
- }
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.evicted,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_restore_and_map_ggtt);
- if (bo->flags & XE_BO_FLAG_GGTT) {
- struct xe_tile *tile;
- u8 id;
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
- for_each_tile(tile, xe, id) {
- if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
- continue;
+ if (ret)
+ return ret;
- mutex_lock(&tile->mem.ggtt->lock);
- xe_ggtt_map_bo(tile->mem.ggtt, bo);
- mutex_unlock(&tile->mem.ggtt->lock);
- }
- }
+ if (!IS_DGFX(xe))
+ return 0;
- /*
- * We expect validate to trigger a move VRAM and our move code
- * should setup the iosys map.
- */
- xe_assert(xe, !iosys_map_is_null(&bo->vmap));
+ /* Pinned user memory in VRAM should be validated on resume */
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
+ xe_bo_restore_pinned);
- xe_bo_put(bo);
+ /* Wait for restore to complete */
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
- spin_lock(&xe->pinned.lock);
- }
- spin_unlock(&xe->pinned.lock);
+ return ret;
+}
- return 0;
+static void xe_bo_pci_dev_remove_pinned(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ unsigned int id;
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
+ xe_bo_dma_unmap_pinned);
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
}
/**
- * xe_bo_restore_user - restore pinned user BOs to VRAM
- *
- * @xe: xe device
+ * xe_bo_pci_dev_remove_all() - Handle bos when the pci_device is about to be removed
+ * @xe: The xe device.
*
- * Move pinned user BOs from temporary (typically system) memory to VRAM via
- * CPU. All moves done via TTM calls.
+ * On pci_device removal we need to drop all dma mappings and move
+ * the data of exported bos out to system. This includes SVM bos and
+ * exported dma-buf bos. This is done by evicting all bos, but
+ * the evict placement in xe_evict_flags() is chosen such that all
+ * bos except those mentioned are purged, and thus their memory
+ * is released.
*
- * This function should be called late, after GT init, on device resume.
+ * For pinned bos, we're unmapping dma.
*/
-int xe_bo_restore_user(struct xe_device *xe)
+void xe_bo_pci_dev_remove_all(struct xe_device *xe)
{
- struct xe_bo *bo;
- struct xe_tile *tile;
- struct list_head still_in_list;
- u8 id;
- int ret;
+ unsigned int mem_type;
- if (!IS_DGFX(xe))
- return 0;
+ /*
+ * Move pagemap bos and exported dma-buf to system, and
+ * purge everything else.
+ */
+ for (mem_type = XE_PL_VRAM1; mem_type >= XE_PL_TT; --mem_type) {
+ struct ttm_resource_manager *man =
+ ttm_manager_type(&xe->ttm, mem_type);
- /* Pinned user memory in VRAM should be validated on resume */
- INIT_LIST_HEAD(&still_in_list);
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.external_vram,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- list_move_tail(&bo->pinned_link, &still_in_list);
- xe_bo_get(bo);
- spin_unlock(&xe->pinned.lock);
+ if (man) {
+ int ret = ttm_resource_manager_evict_all(&xe->ttm, man);
- xe_bo_lock(bo, false);
- ret = xe_bo_restore_pinned(bo);
- xe_bo_unlock(bo);
- xe_bo_put(bo);
- if (ret) {
- spin_lock(&xe->pinned.lock);
- list_splice_tail(&still_in_list,
- &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- return ret;
+ drm_WARN_ON(&xe->drm, ret);
}
-
- spin_lock(&xe->pinned.lock);
}
- list_splice_tail(&still_in_list, &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- /* Wait for restore to complete */
- for_each_tile(tile, xe, id)
- xe_tile_migrate_wait(tile);
+ xe_bo_pci_dev_remove_pinned(xe);
+}
- return 0;
+static void xe_bo_pinned_fini(void *arg)
+{
+ struct xe_device *xe = arg;
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_dma_unmap_pinned);
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_dma_unmap_pinned);
+}
+
+/**
+ * xe_bo_pinned_init() - Initialize pinned bo tracking
+ * @xe: The xe device.
+ *
+ * Initializes the lists and locks required for pinned bo
+ * tracking and registers a callback to dma-unmap
+ * any remaining pinned bos on pci device removal.
+ *
+ * Return: %0 on success, negative error code on error.
+ */
+int xe_bo_pinned_init(struct xe_device *xe)
+{
+ spin_lock_init(&xe->pinned.lock);
+ INIT_LIST_HEAD(&xe->pinned.early.kernel_bo_present);
+ INIT_LIST_HEAD(&xe->pinned.early.evicted);
+ INIT_LIST_HEAD(&xe->pinned.late.kernel_bo_present);
+ INIT_LIST_HEAD(&xe->pinned.late.evicted);
+ INIT_LIST_HEAD(&xe->pinned.late.external);
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_bo_pinned_fini, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.h b/drivers/gpu/drm/xe/xe_bo_evict.h
index 746894798852..e8385cb7f5e9 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.h
+++ b/drivers/gpu/drm/xe/xe_bo_evict.h
@@ -9,7 +9,13 @@
struct xe_device;
int xe_bo_evict_all(struct xe_device *xe);
-int xe_bo_restore_kernel(struct xe_device *xe);
-int xe_bo_restore_user(struct xe_device *xe);
+int xe_bo_evict_all_user(struct xe_device *xe);
+int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe);
+void xe_bo_notifier_unprepare_all_pinned(struct xe_device *xe);
+int xe_bo_restore_early(struct xe_device *xe);
+int xe_bo_restore_late(struct xe_device *xe);
+void xe_bo_pci_dev_remove_all(struct xe_device *xe);
+
+int xe_bo_pinned_init(struct xe_device *xe);
#endif
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 15a92e3d4898..eb5e83c5f233 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -28,6 +28,10 @@ struct xe_vm;
struct xe_bo {
/** @ttm: TTM base buffer object */
struct ttm_buffer_object ttm;
+ /** @backup_obj: The backup object when pinned and suspended (vram only) */
+ struct xe_bo *backup_obj;
+ /** @parent_obj: Ref to parent bo if this a backup_obj */
+ struct xe_bo *parent_obj;
/** @size: Size of this buffer object */
size_t size;
/** @flags: flags for this buffer object */
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
new file mode 100644
index 000000000000..cb9f175c89a1
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/configfs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "xe_configfs.h"
+#include "xe_module.h"
+
+/**
+ * DOC: Xe Configfs
+ *
+ * Overview
+ * =========
+ *
+ * Configfs is a filesystem-based manager of kernel objects. XE KMD registers a
+ * configfs subsystem called ``'xe'`` that creates a directory in the mounted configfs directory
+ * The user can create devices under this directory and configure them as necessary
+ * See Documentation/filesystems/configfs.rst for more information about how configfs works.
+ *
+ * Create devices
+ * ===============
+ *
+ * In order to create a device, the user has to create a directory inside ``'xe'``::
+ *
+ * mkdir /sys/kernel/config/xe/0000:03:00.0/
+ *
+ * Every device created is populated by the driver with entries that can be
+ * used to configure it::
+ *
+ * /sys/kernel/config/xe/
+ * .. 0000:03:00.0/
+ * ... survivability_mode
+ *
+ * Configure Attributes
+ * ====================
+ *
+ * Survivability mode:
+ * -------------------
+ *
+ * Enable survivability mode on supported cards. This setting only takes
+ * effect when probing the device. Example to enable it::
+ *
+ * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
+ * # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind (Enters survivability mode if supported)
+ *
+ * Remove devices
+ * ==============
+ *
+ * The created device directories can be removed using ``rmdir``::
+ *
+ * rmdir /sys/kernel/config/xe/0000:03:00.0/
+ */
+
+struct xe_config_device {
+ struct config_group group;
+
+ bool survivability_mode;
+
+ /* protects attributes */
+ struct mutex lock;
+};
+
+static struct xe_config_device *to_xe_config_device(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct xe_config_device, group);
+}
+
+static ssize_t survivability_mode_show(struct config_item *item, char *page)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+
+ return sprintf(page, "%d\n", dev->survivability_mode);
+}
+
+static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+ bool survivability_mode;
+ int ret;
+
+ ret = kstrtobool(page, &survivability_mode);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->lock);
+ dev->survivability_mode = survivability_mode;
+ mutex_unlock(&dev->lock);
+
+ return len;
+}
+
+CONFIGFS_ATTR(, survivability_mode);
+
+static struct configfs_attribute *xe_config_device_attrs[] = {
+ &attr_survivability_mode,
+ NULL,
+};
+
+static void xe_config_device_release(struct config_item *item)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+
+ mutex_destroy(&dev->lock);
+ kfree(dev);
+}
+
+static struct configfs_item_operations xe_config_device_ops = {
+ .release = xe_config_device_release,
+};
+
+static const struct config_item_type xe_config_device_type = {
+ .ct_item_ops = &xe_config_device_ops,
+ .ct_attrs = xe_config_device_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *xe_config_make_device_group(struct config_group *group,
+ const char *name)
+{
+ unsigned int domain, bus, slot, function;
+ struct xe_config_device *dev;
+ struct pci_dev *pdev;
+ int ret;
+
+ ret = sscanf(name, "%04x:%02x:%02x.%x", &domain, &bus, &slot, &function);
+ if (ret != 4)
+ return ERR_PTR(-EINVAL);
+
+ pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
+ if (!pdev)
+ return ERR_PTR(-EINVAL);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ config_group_init_type_name(&dev->group, name, &xe_config_device_type);
+
+ mutex_init(&dev->lock);
+
+ return &dev->group;
+}
+
+static struct configfs_group_operations xe_config_device_group_ops = {
+ .make_group = xe_config_make_device_group,
+};
+
+static const struct config_item_type xe_configfs_type = {
+ .ct_group_ops = &xe_config_device_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem xe_configfs = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "xe",
+ .ci_type = &xe_configfs_type,
+ },
+ },
+};
+
+static struct xe_config_device *configfs_find_group(struct pci_dev *pdev)
+{
+ struct config_item *item;
+ char name[64];
+
+ snprintf(name, sizeof(name), "%04x:%02x:%02x.%x", pci_domain_nr(pdev->bus),
+ pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+ mutex_lock(&xe_configfs.su_mutex);
+ item = config_group_find_item(&xe_configfs.su_group, name);
+ mutex_unlock(&xe_configfs.su_mutex);
+
+ if (!item)
+ return NULL;
+
+ return to_xe_config_device(item);
+}
+
+/**
+ * xe_configfs_get_survivability_mode - get configfs survivability mode attribute
+ * @pdev: pci device
+ *
+ * find the configfs group that belongs to the pci device and return
+ * the survivability mode attribute
+ *
+ * Return: survivability mode if config group is found, false otherwise
+ */
+bool xe_configfs_get_survivability_mode(struct pci_dev *pdev)
+{
+ struct xe_config_device *dev = configfs_find_group(pdev);
+ bool mode;
+
+ if (!dev)
+ return false;
+
+ mode = dev->survivability_mode;
+ config_item_put(&dev->group.cg_item);
+
+ return mode;
+}
+
+/**
+ * xe_configfs_clear_survivability_mode - clear configfs survivability mode attribute
+ * @pdev: pci device
+ *
+ * find the configfs group that belongs to the pci device and clear survivability
+ * mode attribute
+ */
+void xe_configfs_clear_survivability_mode(struct pci_dev *pdev)
+{
+ struct xe_config_device *dev = configfs_find_group(pdev);
+
+ if (!dev)
+ return;
+
+ mutex_lock(&dev->lock);
+ dev->survivability_mode = 0;
+ mutex_unlock(&dev->lock);
+
+ config_item_put(&dev->group.cg_item);
+}
+
+int __init xe_configfs_init(void)
+{
+ struct config_group *root = &xe_configfs.su_group;
+ int ret;
+
+ config_group_init(root);
+ mutex_init(&xe_configfs.su_mutex);
+ ret = configfs_register_subsystem(&xe_configfs);
+ if (ret) {
+ pr_err("Error %d while registering %s subsystem\n",
+ ret, root->cg_item.ci_namebuf);
+ return ret;
+ }
+
+ return 0;
+}
+
+void __exit xe_configfs_exit(void)
+{
+ configfs_unregister_subsystem(&xe_configfs);
+}
+
diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h
new file mode 100644
index 000000000000..d7d041ec2611
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_configfs.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+#ifndef _XE_CONFIGFS_H_
+#define _XE_CONFIGFS_H_
+
+#include <linux/types.h>
+
+struct pci_dev;
+
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
+int xe_configfs_init(void);
+void xe_configfs_exit(void);
+bool xe_configfs_get_survivability_mode(struct pci_dev *pdev);
+void xe_configfs_clear_survivability_mode(struct pci_dev *pdev);
+#else
+static inline int xe_configfs_init(void) { return 0; };
+static inline void xe_configfs_exit(void) {};
+static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; };
+static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) {};
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 81b9d9bb3f57..7a8af2311318 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -80,7 +80,8 @@ static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q)
return &q->gt->uc.guc;
}
-static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
+static ssize_t __xe_devcoredump_read(char *buffer, ssize_t count,
+ ssize_t start,
struct xe_devcoredump *coredump)
{
struct xe_device *xe;
@@ -94,7 +95,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
ss = &coredump->snapshot;
iter.data = buffer;
- iter.start = 0;
+ iter.start = start;
iter.remain = count;
p = drm_coredump_printer(&iter);
@@ -168,12 +169,16 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
ss->vm = NULL;
}
+#define XE_DEVCOREDUMP_CHUNK_MAX (SZ_512M + SZ_1G)
+
static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
{
struct xe_devcoredump *coredump = data;
struct xe_devcoredump_snapshot *ss;
ssize_t byte_copied;
+ u32 chunk_offset;
+ ssize_t new_chunk_position;
if (!coredump)
return -ENODEV;
@@ -183,6 +188,9 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
/* Ensure delayed work is captured before continuing */
flush_work(&ss->work);
+ if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX)
+ xe_pm_runtime_get(gt_to_xe(ss->gt));
+
mutex_lock(&coredump->lock);
if (!ss->read.buffer) {
@@ -195,12 +203,29 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
return 0;
}
+ new_chunk_position = div_u64_rem(offset,
+ XE_DEVCOREDUMP_CHUNK_MAX,
+ &chunk_offset);
+
+ if (offset >= ss->read.chunk_position + XE_DEVCOREDUMP_CHUNK_MAX ||
+ offset < ss->read.chunk_position) {
+ ss->read.chunk_position = new_chunk_position *
+ XE_DEVCOREDUMP_CHUNK_MAX;
+
+ __xe_devcoredump_read(ss->read.buffer,
+ XE_DEVCOREDUMP_CHUNK_MAX,
+ ss->read.chunk_position, coredump);
+ }
+
byte_copied = count < ss->read.size - offset ? count :
ss->read.size - offset;
- memcpy(buffer, ss->read.buffer + offset, byte_copied);
+ memcpy(buffer, ss->read.buffer + chunk_offset, byte_copied);
mutex_unlock(&coredump->lock);
+ if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX)
+ xe_pm_runtime_put(gt_to_xe(ss->gt));
+
return byte_copied;
}
@@ -254,17 +279,32 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
- xe_pm_runtime_put(xe);
+ ss->read.chunk_position = 0;
/* Calculate devcoredump size */
- ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
-
- ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
- if (!ss->read.buffer)
- return;
+ ss->read.size = __xe_devcoredump_read(NULL, LONG_MAX, 0, coredump);
+
+ if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) {
+ ss->read.buffer = kvmalloc(XE_DEVCOREDUMP_CHUNK_MAX,
+ GFP_USER);
+ if (!ss->read.buffer)
+ goto put_pm;
+
+ __xe_devcoredump_read(ss->read.buffer,
+ XE_DEVCOREDUMP_CHUNK_MAX,
+ 0, coredump);
+ } else {
+ ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
+ if (!ss->read.buffer)
+ goto put_pm;
+
+ __xe_devcoredump_read(ss->read.buffer, ss->read.size, 0,
+ coredump);
+ xe_devcoredump_snapshot_free(ss);
+ }
- __xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump);
- xe_devcoredump_snapshot_free(ss);
+put_pm:
+ xe_pm_runtime_put(xe);
}
static void devcoredump_snapshot(struct xe_devcoredump *coredump,
@@ -425,7 +465,7 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffi
if (offset & 3)
drm_printf(p, "Offset not word aligned: %zu", offset);
- line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_KERNEL);
+ line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_ATOMIC);
if (!line_buff) {
drm_printf(p, "Failed to allocate line buffer\n");
return;
diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h
index 1a1d16a96b2d..a174385a6d83 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump_types.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h
@@ -66,6 +66,8 @@ struct xe_devcoredump_snapshot {
struct {
/** @read.size: size of devcoredump in human readable format */
ssize_t size;
+ /** @read.chunk_position: position of devcoredump chunk */
+ ssize_t chunk_position;
/** @read.buffer: buffer of devcoredump in human readable format */
char *buffer;
} read;
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 00191227bc95..c02c4c4e9412 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -23,8 +23,10 @@
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
#include "xe_bo.h"
+#include "xe_bo_evict.h"
#include "xe_debugfs.h"
#include "xe_devcoredump.h"
+#include "xe_device_sysfs.h"
#include "xe_dma_buf.h"
#include "xe_drm_client.h"
#include "xe_drv.h"
@@ -467,10 +469,9 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xa_erase(&xe->usm.asid_to_vm, asid);
}
- spin_lock_init(&xe->pinned.lock);
- INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
- INIT_LIST_HEAD(&xe->pinned.external_vram);
- INIT_LIST_HEAD(&xe->pinned.evicted);
+ err = xe_bo_pinned_init(xe);
+ if (err)
+ goto err;
xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq",
WQ_MEM_RECLAIM);
@@ -505,7 +506,15 @@ ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */
static bool xe_driver_flr_disabled(struct xe_device *xe)
{
- return xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS;
+ if (IS_SRIOV_VF(xe))
+ return true;
+
+ if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
+ drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n");
+ return true;
+ }
+
+ return false;
}
/*
@@ -523,7 +532,7 @@ static bool xe_driver_flr_disabled(struct xe_device *xe)
*/
static void __xe_driver_flr(struct xe_device *xe)
{
- const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */
+ const unsigned int flr_timeout = 3 * USEC_PER_SEC; /* specs recommend a 3s wait */
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
int ret;
@@ -569,10 +578,8 @@ static void __xe_driver_flr(struct xe_device *xe)
static void xe_driver_flr(struct xe_device *xe)
{
- if (xe_driver_flr_disabled(xe)) {
- drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n");
+ if (xe_driver_flr_disabled(xe))
return;
- }
__xe_driver_flr(xe);
}
@@ -706,7 +713,7 @@ int xe_device_probe_early(struct xe_device *xe)
sriov_update_device_info(xe);
err = xe_pcode_probe_early(xe);
- if (err) {
+ if (err || xe_survivability_mode_is_requested(xe)) {
int save_err = err;
/*
@@ -729,6 +736,7 @@ int xe_device_probe_early(struct xe_device *xe)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */
static int probe_has_flat_ccs(struct xe_device *xe)
{
@@ -908,6 +916,10 @@ int xe_device_probe(struct xe_device *xe)
if (err)
goto err_unregister_display;
+ err = xe_device_sysfs_init(xe);
+ if (err)
+ goto err_unregister_display;
+
xe_debugfs_register(xe);
err = xe_hwmon_register(xe);
@@ -932,6 +944,8 @@ void xe_device_remove(struct xe_device *xe)
xe_display_unregister(xe);
drm_dev_unplug(&xe->drm);
+
+ xe_bo_pci_dev_remove_all(xe);
}
void xe_device_shutdown(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index 7efbd4c52791..b9440f8c781e 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -3,14 +3,16 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/pci.h>
#include <linux/sysfs.h>
-#include <drm/drm_managed.h>
-
#include "xe_device.h"
#include "xe_device_sysfs.h"
+#include "xe_mmio.h"
+#include "xe_pcode_api.h"
+#include "xe_pcode.h"
#include "xe_pm.h"
/**
@@ -63,11 +65,94 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(vram_d3cold_threshold);
+/**
+ * DOC: PCIe Gen5 Limitations
+ *
+ * Default link speed of discrete GPUs is determined by configuration parameters
+ * stored in their flash memory, which are subject to override through user
+ * initiated firmware updates. It has been observed that devices configured with
+ * PCIe Gen5 as their default link speed can come across link quality issues due
+ * to host or motherboard limitations and may have to auto-downgrade their link
+ * to PCIe Gen4 speed when faced with unstable link at Gen5, which makes
+ * firmware updates rather risky on such setups. It is required to ensure that
+ * the device is capable of auto-downgrading its link to PCIe Gen4 speed before
+ * pushing the firmware image with PCIe Gen5 as default configuration. This can
+ * be done by reading ``auto_link_downgrade_capable`` sysfs entry, which will
+ * denote if the device is capable of auto-downgrading its link to PCIe Gen4
+ * speed with boolean output value of ``0`` or ``1``, meaning `incapable` or
+ * `capable` respectively.
+ *
+ * .. code-block:: shell
+ *
+ * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_capable
+ *
+ * Pushing the firmware image with PCIe Gen5 as default configuration on a auto
+ * link downgrade incapable device and facing link instability due to host or
+ * motherboard limitations can result in driver failing to bind to the device,
+ * making further firmware updates impossible with RMA being the only last
+ * resort.
+ *
+ * Link downgrade status of auto link downgrade capable devices is available
+ * through ``auto_link_downgrade_status`` sysfs entry with boolean output value
+ * of ``0`` or ``1``, where ``0`` means no auto-downgrading was required during
+ * link training (which is the optimal scenario) and ``1`` means the device has
+ * auto-downgraded its link to PCIe Gen4 speed due to unstable Gen5 link.
+ *
+ * .. code-block:: shell
+ *
+ * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_status
+ */
+
+static ssize_t
+auto_link_downgrade_capable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ u32 cap, val;
+
+ xe_pm_runtime_get(xe);
+ val = xe_mmio_read32(xe_root_tile_mmio(xe), BMG_PCIE_CAP);
+ xe_pm_runtime_put(xe);
+
+ cap = REG_FIELD_GET(LINK_DOWNGRADE, val);
+ return sysfs_emit(buf, "%u\n", cap == DOWNGRADE_CAPABLE);
+}
+static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_capable);
+
+static ssize_t
+auto_link_downgrade_status_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ /* default the auto_link_downgrade status to 0 */
+ u32 val = 0;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = xe_pcode_read(xe_device_get_root_tile(xe),
+ PCODE_MBOX(DGFX_PCODE_STATUS, DGFX_GET_INIT_STATUS, 0),
+ &val, NULL);
+ xe_pm_runtime_put(xe);
+
+ return ret ?: sysfs_emit(buf, "%u\n", REG_FIELD_GET(DGFX_LINK_DOWNGRADE_STATUS, val));
+}
+static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_status);
+
+static const struct attribute *auto_link_downgrade_attrs[] = {
+ &dev_attr_auto_link_downgrade_capable.attr,
+ &dev_attr_auto_link_downgrade_status.attr,
+ NULL
+};
+
static void xe_device_sysfs_fini(void *arg)
{
struct xe_device *xe = arg;
- sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+ if (xe->d3cold.capable)
+ sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+
+ if (xe->info.platform == XE_BATTLEMAGE)
+ sysfs_remove_files(&xe->drm.dev->kobj, auto_link_downgrade_attrs);
}
int xe_device_sysfs_init(struct xe_device *xe)
@@ -75,9 +160,17 @@ int xe_device_sysfs_init(struct xe_device *xe)
struct device *dev = xe->drm.dev;
int ret;
- ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
- if (ret)
- return ret;
+ if (xe->d3cold.capable) {
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+ if (ret)
+ return ret;
+ }
+
+ if (xe->info.platform == XE_BATTLEMAGE) {
+ ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs);
+ if (ret)
+ return ret;
+ }
return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 9f8667ebba85..6383a1c0d478 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -31,7 +31,6 @@
#endif
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
-#include "soc/intel_pch.h"
#include "intel_display_core.h"
#include "intel_display_device.h"
#endif
@@ -107,6 +106,9 @@ struct xe_vram_region {
resource_size_t actual_physical_size;
/** @mapping: pointer to VRAM mappable space */
void __iomem *mapping;
+ /** @ttm: VRAM TTM manager */
+ struct xe_ttm_vram_mgr ttm;
+#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
/** @pagemap: Used to remap device memory as ZONE_DEVICE */
struct dev_pagemap pagemap;
/**
@@ -120,8 +122,7 @@ struct xe_vram_region {
* This is generated when remap device memory as ZONE_DEVICE
*/
resource_size_t hpa_base;
- /** @ttm: VRAM TTM manager */
- struct xe_ttm_vram_mgr ttm;
+#endif
};
/**
@@ -314,6 +315,8 @@ struct xe_device {
u8 has_atomic_enable_pte_bit:1;
/** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
u8 has_device_atomics_on_smem:1;
+ /** @info.has_fan_control: Device supports fan control */
+ u8 has_fan_control:1;
/** @info.has_flat_ccs: Whether flat CCS metadata is used */
u8 has_flat_ccs:1;
/** @info.has_heci_cscfi: device has heci cscfi */
@@ -322,6 +325,10 @@ struct xe_device {
u8 has_heci_gscfi:1;
/** @info.has_llc: Device has a shared CPU+GPU last level cache */
u8 has_llc:1;
+ /** @info.has_mbx_power_limits: Device has support to manage power limits using
+ * pcode mailbox commands.
+ */
+ u8 has_mbx_power_limits:1;
/** @info.has_pxp: Device has PXP support */
u8 has_pxp:1;
/** @info.has_range_tlb_invalidation: Has range based TLB invalidations */
@@ -330,8 +337,12 @@ struct xe_device {
u8 has_sriov:1;
/** @info.has_usm: Device has unified shared memory support */
u8 has_usm:1;
+ /** @info.has_64bit_timestamp: Device supports 64-bit timestamps */
+ u8 has_64bit_timestamp:1;
/** @info.is_dgfx: is discrete device */
u8 is_dgfx:1;
+ /** @info.needs_scratch: needs scratch page for oob prefetch to work */
+ u8 needs_scratch:1;
/**
* @info.probe_display: Probe display hardware. If set to
* false, the driver will behave as if there is no display
@@ -418,12 +429,22 @@ struct xe_device {
struct {
/** @pinned.lock: protected pinned BO list state */
spinlock_t lock;
- /** @pinned.kernel_bo_present: pinned kernel BO that are present */
- struct list_head kernel_bo_present;
- /** @pinned.evicted: pinned BO that have been evicted */
- struct list_head evicted;
- /** @pinned.external_vram: pinned external BO in vram*/
- struct list_head external_vram;
+ /** @pinned.early: early pinned lists */
+ struct {
+ /** @pinned.early.kernel_bo_present: pinned kernel BO that are present */
+ struct list_head kernel_bo_present;
+ /** @pinned.early.evicted: pinned BO that have been evicted */
+ struct list_head evicted;
+ } early;
+ /** @pinned.late: late pinned lists */
+ struct {
+ /** @pinned.late.kernel_bo_present: pinned kernel BO that are present */
+ struct list_head kernel_bo_present;
+ /** @pinned.late.evicted: pinned BO that have been evicted */
+ struct list_head evicted;
+ /** @pinned.external: pinned external and dma-buf. */
+ struct list_head external;
+ } late;
} pinned;
/** @ufence_wq: user fence wait queue */
@@ -506,6 +527,9 @@ struct xe_device {
struct mutex lock;
} d3cold;
+ /** @pm_notifier: Our PM notifier to perform actions in response to various PM events. */
+ struct notifier_block pm_notifier;
+
/** @pmt: Support the PMT driver callback interface */
struct {
/** @pmt.lock: protect access for telemetry data */
@@ -570,7 +594,6 @@ struct xe_device {
* migrating to the right sub-structs
*/
struct intel_display display;
- enum intel_pch pch_type;
struct dram_info {
bool wm_lv_0_adjust_needed;
@@ -586,6 +609,7 @@ struct xe_device {
INTEL_DRAM_LPDDR5,
INTEL_DRAM_GDDR,
INTEL_DRAM_GDDR_ECC,
+ __INTEL_DRAM_TYPE_MAX,
} type;
u8 num_qgv_points;
u8 num_psf_gv_points;
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index f7a20264ea33..346f857f3837 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -233,7 +233,7 @@ static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
struct drm_gem_object *obj = attach->importer_priv;
struct xe_bo *bo = gem_to_xe_bo(obj);
- XE_WARN_ON(xe_bo_evict(bo, false));
+ XE_WARN_ON(xe_bo_evict(bo));
}
static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index e2bb156c71fb..96732613b4b7 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -283,7 +283,7 @@ static int xe_eu_stall_user_ext_set_property(struct xe_device *xe, u64 extension
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -313,7 +313,7 @@ static int xe_eu_stall_user_extensions(struct xe_device *xe, u64 extension,
if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index b75adfc99fb7..44364c042ad7 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -176,8 +176,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
if (xe_exec_queue_is_parallel(q)) {
- err = __copy_from_user(addresses, addresses_user, sizeof(u64) *
- q->width);
+ err = copy_from_user(addresses, addresses_user, sizeof(u64) *
+ q->width);
if (err) {
err = -EFAULT;
goto err_syncs;
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 606922d9dd73..fee22358cc09 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -114,7 +114,6 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
static int __xe_exec_queue_init(struct xe_exec_queue *q)
{
- struct xe_vm *vm = q->vm;
int i, err;
u32 flags = 0;
@@ -132,32 +131,20 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
flags |= XE_LRC_CREATE_RUNALONE;
}
- if (vm) {
- err = xe_vm_lock(vm, true);
- if (err)
- return err;
- }
-
for (i = 0; i < q->width; ++i) {
q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags);
if (IS_ERR(q->lrc[i])) {
err = PTR_ERR(q->lrc[i]);
- goto err_unlock;
+ goto err_lrc;
}
}
- if (vm)
- xe_vm_unlock(vm);
-
err = q->ops->init(q);
if (err)
goto err_lrc;
return 0;
-err_unlock:
- if (vm)
- xe_vm_unlock(vm);
err_lrc:
for (i = i - 1; i >= 0; --i)
xe_lrc_put(q->lrc[i]);
@@ -479,7 +466,7 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -518,7 +505,7 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -618,9 +605,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
return -EINVAL;
- err = __copy_from_user(eci, user_eci,
- sizeof(struct drm_xe_engine_class_instance) *
- len);
+ err = copy_from_user(eci, user_eci,
+ sizeof(struct drm_xe_engine_class_instance) * len);
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -830,7 +816,7 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
{
struct xe_device *xe = gt_to_xe(q->gt);
struct xe_lrc *lrc;
- u32 old_ts, new_ts;
+ u64 old_ts, new_ts;
int idx;
/*
diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c
index 4f6784e5abf8..8a5cba22b586 100644
--- a/drivers/gpu/drm/xe/xe_force_wake.c
+++ b/drivers/gpu/drm/xe/xe_force_wake.c
@@ -49,9 +49,6 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
fw->gt = gt;
spin_lock_init(&fw->lock);
- /* Assuming gen11+ so assert this assumption is correct */
- xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
-
if (xe->info.graphics_verx100 >= 1270) {
init_domain(fw, XE_FW_DOMAIN_ID_GT,
FORCEWAKE_GT,
@@ -67,9 +64,6 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
{
int i, j;
- /* Assuming gen11+ so assert this assumption is correct */
- xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
-
if (!xe_gt_is_media_type(gt))
init_domain(fw, XE_FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER,
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 5fcb2b4c2c13..7062115909f2 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -365,7 +365,7 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
* scratch entries, rather keep the scratch page in system memory on
* platforms where 64K pages are needed for VRAM.
*/
- flags = XE_BO_FLAG_PINNED;
+ flags = 0;
if (ggtt->flags & XE_GGTT_FLAGS_64K)
flags |= XE_BO_FLAG_SYSTEM;
else
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index c250ea773491..308061f0cf37 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -51,7 +51,15 @@ static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
{
- drm_sched_resubmit_jobs(&sched->base);
+ struct drm_sched_job *s_job;
+
+ list_for_each_entry(s_job, &sched->base.pending_list, list) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+ struct dma_fence *hw_fence = s_fence->parent;
+
+ if (hw_fence && !dma_fence_is_signaled(hw_fence))
+ sched->base.ops->run_job(s_job);
+ }
}
static inline bool
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 66198cf2662c..6c4cb9576fb6 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -12,8 +12,10 @@
#include <generated/xe_wa_oob.h>
+#include "instructions/xe_alu_commands.h"
#include "instructions/xe_gfxpipe_commands.h"
#include "instructions/xe_mi_commands.h"
+#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_bb.h"
@@ -116,7 +118,7 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
}
- xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3);
+ xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
@@ -176,15 +178,6 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
return 0;
}
-/*
- * Convert back from encoded value to type-safe, only to be used when reg.mcr
- * is true
- */
-static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
-{
- return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
-}
-
static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
{
struct xe_reg_sr *sr = &q->hwe->reg_lrc;
@@ -194,6 +187,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
struct xe_bb *bb;
struct dma_fence *fence;
long timeout;
+ int count_rmw = 0;
int count = 0;
if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
@@ -206,30 +200,32 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
if (IS_ERR(bb))
return PTR_ERR(bb);
- xa_for_each(&sr->xa, idx, entry)
- ++count;
+ /* count RMW registers as those will be handled separately */
+ xa_for_each(&sr->xa, idx, entry) {
+ if (entry->reg.masked || entry->clr_bits == ~0)
+ ++count;
+ else
+ ++count_rmw;
+ }
- if (count) {
+ if (count || count_rmw)
xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
+ if (count) {
+ /* emit single LRI with all non RMW regs */
+
bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
xa_for_each(&sr->xa, idx, entry) {
struct xe_reg reg = entry->reg;
- struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
u32 val;
- /*
- * Skip reading the register if it's not really needed
- */
if (reg.masked)
val = entry->clr_bits << 16;
- else if (entry->clr_bits + 1)
- val = (reg.mcr ?
- xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
- xe_mmio_read32(&gt->mmio, reg)) & (~entry->clr_bits);
- else
+ else if (entry->clr_bits == ~0)
val = 0;
+ else
+ continue;
val |= entry->set_bits;
@@ -239,6 +235,52 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
}
}
+ if (count_rmw) {
+ /* emit MI_MATH for each RMW reg */
+
+ xa_for_each(&sr->xa, idx, entry) {
+ if (entry->reg.masked || entry->clr_bits == ~0)
+ continue;
+
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO;
+ bb->cs[bb->len++] = entry->reg.addr;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
+
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
+ MI_LRI_LRM_CS_MMIO;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr;
+ bb->cs[bb->len++] = entry->clr_bits;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr;
+ bb->cs[bb->len++] = entry->set_bits;
+
+ bb->cs[bb->len++] = MI_MATH(8);
+ bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0);
+ bb->cs[bb->len++] = CS_ALU_INSTR_LOADINV(SRCB, REG1);
+ bb->cs[bb->len++] = CS_ALU_INSTR_AND;
+ bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU);
+ bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0);
+ bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCB, REG2);
+ bb->cs[bb->len++] = CS_ALU_INSTR_OR;
+ bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU);
+
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
+ bb->cs[bb->len++] = entry->reg.addr;
+
+ xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n",
+ entry->reg.addr, entry->clr_bits, entry->set_bits);
+ }
+
+ /* reset used GPR */
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) | MI_LRI_LRM_CS_MMIO;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
+ bb->cs[bb->len++] = 0;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr;
+ bb->cs[bb->len++] = 0;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr;
+ bb->cs[bb->len++] = 0;
+ }
+
xe_lrc_emit_hwe_state_instructions(q, bb);
job = xe_bb_create_job(q, bb);
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index f7005a3643e6..119a55bb7580 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -300,20 +300,20 @@ static int hwconfig(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
-static const struct drm_info_list debugfs_list[] = {
- {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
+/*
+ * only for GT debugfs files which can be safely used on the VF as well:
+ * - without access to the GT privileged registers
+ * - without access to the PF specific data
+ */
+static const struct drm_info_list vf_safe_debugfs_list[] = {
{"force_reset", .show = xe_gt_debugfs_simple_show, .data = force_reset},
{"force_reset_sync", .show = xe_gt_debugfs_simple_show, .data = force_reset_sync},
{"sa_info", .show = xe_gt_debugfs_simple_show, .data = sa_info},
{"topology", .show = xe_gt_debugfs_simple_show, .data = topology},
- {"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
{"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt},
- {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info},
{"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore},
{"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds},
{"tunings", .show = xe_gt_debugfs_simple_show, .data = tunings},
- {"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
- {"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs},
{"default_lrc_rcs", .show = xe_gt_debugfs_simple_show, .data = rcs_default_lrc},
{"default_lrc_ccs", .show = xe_gt_debugfs_simple_show, .data = ccs_default_lrc},
{"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc},
@@ -323,6 +323,15 @@ static const struct drm_info_list debugfs_list[] = {
{"hwconfig", .show = xe_gt_debugfs_simple_show, .data = hwconfig},
};
+/* everything else should be added here */
+static const struct drm_info_list pf_only_debugfs_list[] = {
+ {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
+ {"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs},
+ {"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
+ {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info},
+ {"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
+};
+
void xe_gt_debugfs_register(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -346,10 +355,15 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
*/
root->d_inode->i_private = gt;
- drm_debugfs_create_files(debugfs_list,
- ARRAY_SIZE(debugfs_list),
+ drm_debugfs_create_files(vf_safe_debugfs_list,
+ ARRAY_SIZE(vf_safe_debugfs_list),
root, minor);
+ if (!IS_SRIOV_VF(xe))
+ drm_debugfs_create_files(pf_only_debugfs_list,
+ ARRAY_SIZE(pf_only_debugfs_list),
+ root, minor);
+
xe_uc_debugfs_register(&gt->uc, root);
if (IS_SRIOV_PF(xe))
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index 604bdc7c8173..60d9354e7dbf 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -32,13 +32,18 @@
* Xe's Freq provides a sysfs API for frequency management:
*
* device/tile#/gt#/freq0/<item>_freq *read-only* files:
+ *
* - act_freq: The actual resolved frequency decided by PCODE.
* - cur_freq: The current one requested by GuC PC to the PCODE.
* - rpn_freq: The Render Performance (RP) N level, which is the minimal one.
+ * - rpa_freq: The Render Performance (RP) A level, which is the achiveable one.
+ * Calculated by PCODE at runtime based on multiple running conditions
* - rpe_freq: The Render Performance (RP) E level, which is the efficient one.
+ * Calculated by PCODE at runtime based on multiple running conditions
* - rp0_freq: The Render Performance (RP) 0 level, which is the maximum one.
*
* device/tile#/gt#/freq0/<item>_freq *read-write* files:
+ *
* - min_freq: Min frequency request.
* - max_freq: Max frequency request.
* If max <= min, then freq_min becomes a fixed frequency request.
@@ -56,9 +61,10 @@ dev_to_xe(struct device *dev)
return gt_to_xe(kobj_to_gt(dev->kobj.parent));
}
-static ssize_t act_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t act_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -68,11 +74,12 @@ static ssize_t act_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(act_freq);
+static struct kobj_attribute attr_act_freq = __ATTR_RO(act_freq);
-static ssize_t cur_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t cur_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -85,11 +92,12 @@ static ssize_t cur_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(cur_freq);
+static struct kobj_attribute attr_cur_freq = __ATTR_RO(cur_freq);
-static ssize_t rp0_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rp0_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -99,11 +107,12 @@ static ssize_t rp0_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(rp0_freq);
+static struct kobj_attribute attr_rp0_freq = __ATTR_RO(rp0_freq);
-static ssize_t rpe_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rpe_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -113,11 +122,12 @@ static ssize_t rpe_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(rpe_freq);
+static struct kobj_attribute attr_rpe_freq = __ATTR_RO(rpe_freq);
-static ssize_t rpa_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rpa_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -127,20 +137,22 @@ static ssize_t rpa_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(rpa_freq);
+static struct kobj_attribute attr_rpa_freq = __ATTR_RO(rpa_freq);
-static ssize_t rpn_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rpn_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rpn_freq(pc));
}
-static DEVICE_ATTR_RO(rpn_freq);
+static struct kobj_attribute attr_rpn_freq = __ATTR_RO(rpn_freq);
-static ssize_t min_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t min_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -154,9 +166,10 @@ static ssize_t min_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
- const char *buff, size_t count)
+static ssize_t min_freq_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff, size_t count)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -173,11 +186,12 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR_RW(min_freq);
+static struct kobj_attribute attr_min_freq = __ATTR_RW(min_freq);
-static ssize_t max_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t max_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -191,9 +205,10 @@ static ssize_t max_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
- const char *buff, size_t count)
+static ssize_t max_freq_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff, size_t count)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -210,17 +225,17 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR_RW(max_freq);
+static struct kobj_attribute attr_max_freq = __ATTR_RW(max_freq);
static const struct attribute *freq_attrs[] = {
- &dev_attr_act_freq.attr,
- &dev_attr_cur_freq.attr,
- &dev_attr_rp0_freq.attr,
- &dev_attr_rpa_freq.attr,
- &dev_attr_rpe_freq.attr,
- &dev_attr_rpn_freq.attr,
- &dev_attr_min_freq.attr,
- &dev_attr_max_freq.attr,
+ &attr_act_freq.attr,
+ &attr_cur_freq.attr,
+ &attr_rp0_freq.attr,
+ &attr_rpa_freq.attr,
+ &attr_rpe_freq.attr,
+ &attr_rpn_freq.attr,
+ &attr_min_freq.attr,
+ &attr_max_freq.attr,
NULL
};
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index fbbace7b0b12..c11206410a4d 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -249,9 +249,10 @@ int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
-static ssize_t name_show(struct device *dev,
- struct device_attribute *attr, char *buff)
+static ssize_t name_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
ssize_t ret;
@@ -262,11 +263,12 @@ static ssize_t name_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR_RO(name);
+static struct kobj_attribute name_attr = __ATTR_RO(name);
-static ssize_t idle_status_show(struct device *dev,
- struct device_attribute *attr, char *buff)
+static ssize_t idle_status_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
enum xe_gt_idle_state state;
@@ -277,6 +279,7 @@ static ssize_t idle_status_show(struct device *dev,
return sysfs_emit(buff, "%s\n", gt_idle_state_to_string(state));
}
+static struct kobj_attribute idle_status_attr = __ATTR_RO(idle_status);
u64 xe_gt_idle_residency_msec(struct xe_gt_idle *gtidle)
{
@@ -291,10 +294,11 @@ u64 xe_gt_idle_residency_msec(struct xe_gt_idle *gtidle)
return residency;
}
-static DEVICE_ATTR_RO(idle_status);
-static ssize_t idle_residency_ms_show(struct device *dev,
- struct device_attribute *attr, char *buff)
+
+static ssize_t idle_residency_ms_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
u64 residency;
@@ -305,12 +309,12 @@ static ssize_t idle_residency_ms_show(struct device *dev,
return sysfs_emit(buff, "%llu\n", residency);
}
-static DEVICE_ATTR_RO(idle_residency_ms);
+static struct kobj_attribute idle_residency_attr = __ATTR_RO(idle_residency_ms);
static const struct attribute *gt_idle_attrs[] = {
- &dev_attr_name.attr,
- &dev_attr_idle_status.attr,
- &dev_attr_idle_residency_ms.attr,
+ &name_attr.attr,
+ &idle_status_attr.attr,
+ &idle_residency_attr.attr,
NULL,
};
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 605aad3554e7..d4d9730f0d2c 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -345,7 +345,8 @@ fallback:
* Some older platforms don't have tables or don't have complete tables.
* Newer platforms should always have the required info.
*/
- if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 2000)
+ if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 2000 &&
+ !gt_to_xe(gt)->info.force_execlist)
xe_gt_err(gt, "Slice/Subslice counts missing from hwconfig table; using typical fallback values\n");
if (gt_to_xe(gt)->info.platform == XE_PVC)
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 0c22b3a36655..10622ca471a2 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -240,7 +240,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
atomic = access_is_atomic(pf->access_type);
if (xe_vma_is_cpu_addr_mirror(vma))
- err = xe_svm_handle_pagefault(vm, vma, gt_to_tile(gt),
+ err = xe_svm_handle_pagefault(vm, vma, gt,
pf->page_addr, atomic);
else
err = handle_vma_pagefault(gt, vma, atomic);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 10be109bf357..2420a548cacc 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -1444,15 +1444,23 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
return 0;
xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_NEEDS_2M |
- XE_BO_FLAG_PINNED);
+ bo = xe_bo_create_locked(xe, tile, NULL,
+ ALIGN(size, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_NEEDS_2M |
+ XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_PINNED_LATE_RESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
+ err = xe_bo_pin(bo);
+ xe_bo_unlock(bo);
+ if (unlikely(err)) {
+ xe_bo_put(bo);
+ return err;
+ }
+
config->lmem_obj = bo;
if (xe_device_has_lmtt(xe)) {
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
index b2521dd6ec42..0fe47f41b63c 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
@@ -51,27 +51,18 @@ static unsigned int extract_vfid(struct dentry *d)
* /sys/kernel/debug/dri/0/
* ├── gt0
* │   ├── pf
- * │   │   ├── ggtt_available
- * │   │   ├── ggtt_provisioned
* │   │   ├── contexts_provisioned
* │   │   ├── doorbells_provisioned
* │   │   ├── runtime_registers
* │   │   ├── negotiated_versions
* │   │   ├── adverse_events
+ * ├── gt1
+ * │   ├── pf
+ * │   │   ├── ...
*/
static const struct drm_info_list pf_info[] = {
{
- "ggtt_available",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_available_ggtt,
- },
- {
- "ggtt_provisioned",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_ggtt,
- },
- {
"contexts_provisioned",
.show = xe_gt_debugfs_simple_show,
.data = xe_gt_sriov_pf_config_print_ctxs,
@@ -82,11 +73,6 @@ static const struct drm_info_list pf_info[] = {
.data = xe_gt_sriov_pf_config_print_dbs,
},
{
- "lmem_provisioned",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_lmem,
- },
- {
"runtime_registers",
.show = xe_gt_debugfs_simple_show,
.data = xe_gt_sriov_pf_service_print_runtime,
@@ -107,6 +93,42 @@ static const struct drm_info_list pf_info[] = {
* /sys/kernel/debug/dri/0/
* ├── gt0
* │   ├── pf
+ * │   │   ├── ggtt_available
+ * │   │   ├── ggtt_provisioned
+ */
+
+static const struct drm_info_list pf_ggtt_info[] = {
+ {
+ "ggtt_available",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_available_ggtt,
+ },
+ {
+ "ggtt_provisioned",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_ggtt,
+ },
+};
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── pf
+ * │   │   ├── lmem_provisioned
+ */
+
+static const struct drm_info_list pf_lmem_info[] = {
+ {
+ "lmem_provisioned",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_lmem,
+ },
+};
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── pf
* │   │   ├── reset_engine
* │   │   ├── sample_period
* │   │   ├── sched_if_idle
@@ -532,6 +554,16 @@ void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root)
pfdentry->d_inode->i_private = gt;
drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), pfdentry, minor);
+ if (!xe_gt_is_media_type(gt)) {
+ drm_debugfs_create_files(pf_ggtt_info,
+ ARRAY_SIZE(pf_ggtt_info),
+ pfdentry, minor);
+ if (IS_DGFX(gt_to_xe(gt)))
+ drm_debugfs_create_files(pf_lmem_info,
+ ARRAY_SIZE(pf_lmem_info),
+ pfdentry, minor);
+ }
+
pf_add_policy_attrs(gt, pfdentry);
pf_add_config_attrs(gt, pfdentry, PFID);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
index 4efde5f46b43..821cfcc34e6b 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
@@ -112,7 +112,6 @@ static const struct xe_reg tgl_runtime_regs[] = {
XELP_GT_SLICE_ENABLE, /* _MMIO(0x9138) */
XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -124,7 +123,6 @@ static const struct xe_reg ats_m_runtime_regs[] = {
XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -136,7 +134,6 @@ static const struct xe_reg pvc_runtime_regs[] = {
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
- CTC_MODE, /* _MMIO(0xA26C) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -150,7 +147,6 @@ static const struct xe_reg ver_1270_runtime_regs[] = {
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -167,7 +163,6 @@ static const struct xe_reg ver_2000_runtime_regs[] = {
XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */
XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */
XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
@@ -185,7 +180,6 @@ static const struct xe_reg ver_3000_runtime_regs[] = {
XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */
XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */
XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index 6155ea354432..30f942671c2b 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -27,6 +27,7 @@ void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr)
}
static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
+ "svm_pagefault_count",
"tlb_inval_count",
"vma_pagefault_count",
"vma_pagefault_kb",
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
index d556771f99d6..be3244d7133c 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -7,6 +7,7 @@
#define _XE_GT_STATS_TYPES_H_
enum xe_gt_stats_id {
+ XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT,
XE_GT_STATS_ID_TLB_INVAL,
XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT,
XE_GT_STATS_ID_VMA_PAGEFAULT_KB,
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle.c b/drivers/gpu/drm/xe/xe_gt_throttle.c
index 8db78d616b6f..aa962c783cdf 100644
--- a/drivers/gpu/drm/xe/xe_gt_throttle.c
+++ b/drivers/gpu/drm/xe/xe_gt_throttle.c
@@ -114,115 +114,115 @@ static u32 read_reason_vr_tdc(struct xe_gt *gt)
return tdc;
}
-static ssize_t status_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t status_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool status = !!read_status(gt);
return sysfs_emit(buff, "%u\n", status);
}
-static DEVICE_ATTR_RO(status);
+static struct kobj_attribute attr_status = __ATTR_RO(status);
-static ssize_t reason_pl1_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_pl1_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool pl1 = !!read_reason_pl1(gt);
return sysfs_emit(buff, "%u\n", pl1);
}
-static DEVICE_ATTR_RO(reason_pl1);
+static struct kobj_attribute attr_reason_pl1 = __ATTR_RO(reason_pl1);
-static ssize_t reason_pl2_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_pl2_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool pl2 = !!read_reason_pl2(gt);
return sysfs_emit(buff, "%u\n", pl2);
}
-static DEVICE_ATTR_RO(reason_pl2);
+static struct kobj_attribute attr_reason_pl2 = __ATTR_RO(reason_pl2);
-static ssize_t reason_pl4_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_pl4_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool pl4 = !!read_reason_pl4(gt);
return sysfs_emit(buff, "%u\n", pl4);
}
-static DEVICE_ATTR_RO(reason_pl4);
+static struct kobj_attribute attr_reason_pl4 = __ATTR_RO(reason_pl4);
-static ssize_t reason_thermal_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_thermal_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool thermal = !!read_reason_thermal(gt);
return sysfs_emit(buff, "%u\n", thermal);
}
-static DEVICE_ATTR_RO(reason_thermal);
+static struct kobj_attribute attr_reason_thermal = __ATTR_RO(reason_thermal);
-static ssize_t reason_prochot_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_prochot_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool prochot = !!read_reason_prochot(gt);
return sysfs_emit(buff, "%u\n", prochot);
}
-static DEVICE_ATTR_RO(reason_prochot);
+static struct kobj_attribute attr_reason_prochot = __ATTR_RO(reason_prochot);
-static ssize_t reason_ratl_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_ratl_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool ratl = !!read_reason_ratl(gt);
return sysfs_emit(buff, "%u\n", ratl);
}
-static DEVICE_ATTR_RO(reason_ratl);
+static struct kobj_attribute attr_reason_ratl = __ATTR_RO(reason_ratl);
-static ssize_t reason_vr_thermalert_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_vr_thermalert_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool thermalert = !!read_reason_vr_thermalert(gt);
return sysfs_emit(buff, "%u\n", thermalert);
}
-static DEVICE_ATTR_RO(reason_vr_thermalert);
+static struct kobj_attribute attr_reason_vr_thermalert = __ATTR_RO(reason_vr_thermalert);
-static ssize_t reason_vr_tdc_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_vr_tdc_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool tdc = !!read_reason_vr_tdc(gt);
return sysfs_emit(buff, "%u\n", tdc);
}
-static DEVICE_ATTR_RO(reason_vr_tdc);
+static struct kobj_attribute attr_reason_vr_tdc = __ATTR_RO(reason_vr_tdc);
static struct attribute *throttle_attrs[] = {
- &dev_attr_status.attr,
- &dev_attr_reason_pl1.attr,
- &dev_attr_reason_pl2.attr,
- &dev_attr_reason_pl4.attr,
- &dev_attr_reason_thermal.attr,
- &dev_attr_reason_prochot.attr,
- &dev_attr_reason_ratl.attr,
- &dev_attr_reason_vr_thermalert.attr,
- &dev_attr_reason_vr_tdc.attr,
+ &attr_status.attr,
+ &attr_reason_pl1.attr,
+ &attr_reason_pl2.attr,
+ &attr_reason_pl4.attr,
+ &attr_reason_thermal.attr,
+ &attr_reason_prochot.attr,
+ &attr_reason_ratl.attr,
+ &attr_reason_vr_thermalert.attr,
+ &attr_reason_vr_tdc.attr,
NULL
};
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 084cbdeba8ea..e1362e608146 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -138,6 +138,14 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
int pending_seqno;
/*
+ * we can get here before the CTs are even initialized if we're wedging
+ * very early, in which case there are not going to be any pending
+ * fences so we can bail immediately.
+ */
+ if (!xe_guc_ct_initialized(&gt->uc.guc.ct))
+ return;
+
+ /*
* CT channel is already disabled at this point. No new TLB requests can
* appear.
*/
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index bc5714a5b36b..bac5471a1a78 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -483,7 +483,8 @@ static int guc_g2g_alloc(struct xe_guc *guc)
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_ALL |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1393,6 +1394,7 @@ proto:
/* Use data from the GuC response as our return value */
return FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
}
+ALLOW_ERROR_INJECTION(xe_guc_mmio_send_recv, ERRNO);
int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len)
{
@@ -1508,30 +1510,32 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
xe_uc_fw_print(&guc->fw, p);
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (!fw_ref)
- return;
+ if (!IS_SRIOV_VF(gt_to_xe(gt))) {
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return;
+
+ status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
+
+ drm_printf(p, "\nGuC status 0x%08x:\n", status);
+ drm_printf(p, "\tBootrom status = 0x%x\n",
+ REG_FIELD_GET(GS_BOOTROM_MASK, status));
+ drm_printf(p, "\tuKernel status = 0x%x\n",
+ REG_FIELD_GET(GS_UKERNEL_MASK, status));
+ drm_printf(p, "\tMIA Core status = 0x%x\n",
+ REG_FIELD_GET(GS_MIA_MASK, status));
+ drm_printf(p, "\tLog level = %d\n",
+ xe_guc_log_get_level(&guc->log));
+
+ drm_puts(p, "\nScratch registers:\n");
+ for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
+ drm_printf(p, "\t%2d: \t0x%x\n",
+ i, xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(i)));
+ }
- status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
-
- drm_printf(p, "\nGuC status 0x%08x:\n", status);
- drm_printf(p, "\tBootrom status = 0x%x\n",
- REG_FIELD_GET(GS_BOOTROM_MASK, status));
- drm_printf(p, "\tuKernel status = 0x%x\n",
- REG_FIELD_GET(GS_UKERNEL_MASK, status));
- drm_printf(p, "\tMIA Core status = 0x%x\n",
- REG_FIELD_GET(GS_MIA_MASK, status));
- drm_printf(p, "\tLog level = %d\n",
- xe_guc_log_get_level(&guc->log));
-
- drm_puts(p, "\nScratch registers:\n");
- for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
- drm_printf(p, "\t%2d: \t0x%x\n",
- i, xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(i)));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
-
drm_puts(p, "\n");
xe_guc_ct_print(&guc->ct, p, false);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 7031542a70ce..44c1fa2fe7c8 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -376,6 +376,11 @@ static void guc_waklv_init(struct xe_guc_ads *ads)
GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET,
&offset, &remain);
+ if (GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 44, 0) && XE_WA(gt, 16026508708))
+ guc_waklv_enable_simple(ads,
+ GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH,
+ &offset, &remain);
+
size = guc_ads_waklv_size(ads) - remain;
if (!size)
return;
@@ -414,7 +419,8 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -710,8 +716,8 @@ static int guc_capture_prep_lists(struct xe_guc_ads *ads)
}
if (ads->capture_size != PAGE_ALIGN(total_size))
- xe_gt_dbg(gt, "ADS capture alloc size changed from %d to %d\n",
- ads->capture_size, PAGE_ALIGN(total_size));
+ xe_gt_dbg(gt, "Updated ADS capture size %d (was %d)\n",
+ PAGE_ALIGN(total_size), ads->capture_size);
return PAGE_ALIGN(total_size);
}
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c
index 9095618648bc..859a3ba91be5 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture.c
+++ b/drivers/gpu/drm/xe/xe_guc_capture.c
@@ -105,49 +105,49 @@ struct __guc_capture_parsed_output {
* 3. Incorrect order will trigger XE_WARN.
*/
#define COMMON_XELP_BASE_GLOBAL \
- { FORCEWAKE_GT, REG_32BIT, 0, 0, "FORCEWAKE_GT"}
+ { FORCEWAKE_GT, REG_32BIT, 0, 0, 0, "FORCEWAKE_GT"}
#define COMMON_BASE_ENGINE_INSTANCE \
- { RING_HWSTAM(0), REG_32BIT, 0, 0, "HWSTAM"}, \
- { RING_HWS_PGA(0), REG_32BIT, 0, 0, "RING_HWS_PGA"}, \
- { RING_HEAD(0), REG_32BIT, 0, 0, "RING_HEAD"}, \
- { RING_TAIL(0), REG_32BIT, 0, 0, "RING_TAIL"}, \
- { RING_CTL(0), REG_32BIT, 0, 0, "RING_CTL"}, \
- { RING_MI_MODE(0), REG_32BIT, 0, 0, "RING_MI_MODE"}, \
- { RING_MODE(0), REG_32BIT, 0, 0, "RING_MODE"}, \
- { RING_ESR(0), REG_32BIT, 0, 0, "RING_ESR"}, \
- { RING_EMR(0), REG_32BIT, 0, 0, "RING_EMR"}, \
- { RING_EIR(0), REG_32BIT, 0, 0, "RING_EIR"}, \
- { RING_IMR(0), REG_32BIT, 0, 0, "RING_IMR"}, \
- { RING_IPEHR(0), REG_32BIT, 0, 0, "IPEHR"}, \
- { RING_INSTDONE(0), REG_32BIT, 0, 0, "RING_INSTDONE"}, \
- { INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, "INDIRECT_RING_STATE"}, \
- { RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, "ACTHD"}, \
- { RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_BBADDR_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_BBADDR"}, \
- { RING_START(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_START_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_START"}, \
- { RING_DMA_FADD(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_DMA_FADD_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_DMA_FADD"}, \
- { RING_EXECLIST_STATUS_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_EXECLIST_STATUS_HI(0), REG_64BIT_HI_DW, 0, 0, "RING_EXECLIST_STATUS"}, \
- { RING_EXECLIST_SQ_CONTENTS_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
- { RING_EXECLIST_SQ_CONTENTS_HI(0), REG_64BIT_HI_DW, 0, 0, "RING_EXECLIST_SQ_CONTENTS"}
+ { RING_HWSTAM(0), REG_32BIT, 0, 0, 0, "HWSTAM"}, \
+ { RING_HWS_PGA(0), REG_32BIT, 0, 0, 0, "RING_HWS_PGA"}, \
+ { RING_HEAD(0), REG_32BIT, 0, 0, 0, "RING_HEAD"}, \
+ { RING_TAIL(0), REG_32BIT, 0, 0, 0, "RING_TAIL"}, \
+ { RING_CTL(0), REG_32BIT, 0, 0, 0, "RING_CTL"}, \
+ { RING_MI_MODE(0), REG_32BIT, 0, 0, 0, "RING_MI_MODE"}, \
+ { RING_MODE(0), REG_32BIT, 0, 0, 0, "RING_MODE"}, \
+ { RING_ESR(0), REG_32BIT, 0, 0, 0, "RING_ESR"}, \
+ { RING_EMR(0), REG_32BIT, 0, 0, 0, "RING_EMR"}, \
+ { RING_EIR(0), REG_32BIT, 0, 0, 0, "RING_EIR"}, \
+ { RING_IMR(0), REG_32BIT, 0, 0, 0, "RING_IMR"}, \
+ { RING_IPEHR(0), REG_32BIT, 0, 0, 0, "IPEHR"}, \
+ { RING_INSTDONE(0), REG_32BIT, 0, 0, 0, "RING_INSTDONE"}, \
+ { INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, 0, "INDIRECT_RING_STATE"}, \
+ { RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "ACTHD"}, \
+ { RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_BBADDR_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_BBADDR"}, \
+ { RING_START(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_START_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_START"}, \
+ { RING_DMA_FADD(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_DMA_FADD_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_DMA_FADD"}, \
+ { RING_EXECLIST_STATUS_LO(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_EXECLIST_STATUS_HI(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_EXECLIST_STATUS"}, \
+ { RING_EXECLIST_SQ_CONTENTS_LO(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_EXECLIST_SQ_CONTENTS_HI(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_EXECLIST_SQ_CONTENTS"}
#define COMMON_XELP_RC_CLASS \
- { RCU_MODE, REG_32BIT, 0, 0, "RCU_MODE"}
+ { RCU_MODE, REG_32BIT, 0, 0, 0, "RCU_MODE"}
#define COMMON_XELP_RC_CLASS_INSTDONE \
- { SC_INSTDONE, REG_32BIT, 0, 0, "SC_INSTDONE"}, \
- { SC_INSTDONE_EXTRA, REG_32BIT, 0, 0, "SC_INSTDONE_EXTRA"}, \
- { SC_INSTDONE_EXTRA2, REG_32BIT, 0, 0, "SC_INSTDONE_EXTRA2"}
+ { SC_INSTDONE, REG_32BIT, 0, 0, 0, "SC_INSTDONE"}, \
+ { SC_INSTDONE_EXTRA, REG_32BIT, 0, 0, 0, "SC_INSTDONE_EXTRA"}, \
+ { SC_INSTDONE_EXTRA2, REG_32BIT, 0, 0, 0, "SC_INSTDONE_EXTRA2"}
#define XELP_VEC_CLASS_REGS \
- { SFC_DONE(0), 0, 0, 0, "SFC_DONE[0]"}, \
- { SFC_DONE(1), 0, 0, 0, "SFC_DONE[1]"}, \
- { SFC_DONE(2), 0, 0, 0, "SFC_DONE[2]"}, \
- { SFC_DONE(3), 0, 0, 0, "SFC_DONE[3]"}
+ { SFC_DONE(0), 0, 0, 0, 0, "SFC_DONE[0]"}, \
+ { SFC_DONE(1), 0, 0, 0, 0, "SFC_DONE[1]"}, \
+ { SFC_DONE(2), 0, 0, 0, 0, "SFC_DONE[2]"}, \
+ { SFC_DONE(3), 0, 0, 0, 0, "SFC_DONE[3]"}
/* XE_LP Global */
static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
@@ -352,7 +352,7 @@ static const struct __ext_steer_reg xehpg_extregs[] = {
static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
const struct __ext_steer_reg *extlist,
- int slice_id, int subslice_id)
+ u32 dss_id, u16 slice_id, u16 subslice_id)
{
if (!ext || !extlist)
return;
@@ -361,6 +361,7 @@ static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
ext->flags = FIELD_PREP(GUC_REGSET_STEERING_NEEDED, 1);
ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
+ ext->dss_id = dss_id;
ext->regname = extlist->name;
}
@@ -397,7 +398,7 @@ static void guc_capture_alloc_steered_lists(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
u16 slice, subslice;
- int iter, i, total = 0;
+ int dss, i, total = 0;
const struct __guc_mmio_reg_descr_group *lists = guc->capture->reglists;
const struct __guc_mmio_reg_descr_group *list;
struct __guc_mmio_reg_descr_group *extlists;
@@ -454,15 +455,15 @@ static void guc_capture_alloc_steered_lists(struct xe_guc *guc)
/* For steering registers, the list is generated at run-time */
extarray = (struct __guc_mmio_reg_descr *)extlists[0].list;
- for_each_dss_steering(iter, gt, slice, subslice) {
+ for_each_dss_steering(dss, gt, slice, subslice) {
for (i = 0; i < ARRAY_SIZE(xe_extregs); ++i) {
- __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
+ __fill_ext_reg(extarray, &xe_extregs[i], dss, slice, subslice);
++extarray;
}
if (has_xehpg_extregs)
for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) {
- __fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice);
+ __fill_ext_reg(extarray, &xehpg_extregs[i], dss, slice, subslice);
++extarray;
}
}
@@ -1672,18 +1673,16 @@ snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_
{
struct xe_gt *gt = snapshot->hwe->gt;
struct xe_device *xe = gt_to_xe(gt);
- struct xe_guc *guc = &gt->uc.guc;
struct xe_devcoredump *devcoredump = &xe->devcoredump;
struct xe_devcoredump_snapshot *devcore_snapshot = &devcoredump->snapshot;
struct gcap_reg_list_info *reginfo = NULL;
u32 i, last_value = 0;
- bool is_ext, low32_ready = false;
+ bool low32_ready = false;
if (!list || !list->list || list->num_regs == 0)
return;
XE_WARN_ON(!devcore_snapshot->matched_node);
- is_ext = list == guc->capture->extlists;
reginfo = &devcore_snapshot->matched_node->reginfo[type];
/*
@@ -1749,17 +1748,12 @@ snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_
*/
XE_WARN_ON(low32_ready);
- if (is_ext) {
- int dss, group, instance;
-
- group = FIELD_GET(GUC_REGSET_STEERING_GROUP, reg_desc->flags);
- instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, reg_desc->flags);
- dss = xe_gt_mcr_steering_info_to_dss_id(gt, group, instance);
-
- drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname, dss, value);
- } else {
+ if (FIELD_GET(GUC_REGSET_STEERING_NEEDED, reg_desc->flags))
+ drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname,
+ reg_desc->dss_id, value);
+ else
drm_printf(p, "\t%s: 0x%08x\n", reg_desc->regname, value);
- }
+
break;
}
}
diff --git a/drivers/gpu/drm/xe/xe_guc_capture_types.h b/drivers/gpu/drm/xe/xe_guc_capture_types.h
index ca2d390ccbee..6cb439115597 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_capture_types.h
@@ -39,6 +39,8 @@ struct __guc_mmio_reg_descr {
u32 flags;
/** @mask: The mask to apply */
u32 mask;
+ /** @dss_id: Cached index for steered registers */
+ u32 dss_id;
/** @regname: Name of the register */
const char *regname;
};
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 72ad576fc18e..d0ac48d8f4f7 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -238,7 +238,8 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -513,6 +514,9 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct)
*/
void xe_guc_ct_stop(struct xe_guc_ct *ct)
{
+ if (!xe_guc_ct_initialized(ct))
+ return;
+
xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
stop_g2h_handler(ct);
}
@@ -759,7 +763,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u16 seqno;
int ret;
- xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, xe_guc_ct_initialized(ct));
xe_gt_assert(gt, !g2h_len || !g2h_fence);
xe_gt_assert(gt, !num_g2h || !g2h_fence);
xe_gt_assert(gt, !g2h_len || num_g2h);
@@ -1088,6 +1092,7 @@ int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
return guc_ct_send_recv(ct, action, len, response_buffer, false);
}
+ALLOW_ERROR_INJECTION(xe_guc_ct_send_recv, ERRNO);
int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 *response_buffer)
@@ -1342,7 +1347,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
u32 action;
u32 *hxg;
- xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, xe_guc_ct_initialized(ct));
lockdep_assert_held(&ct->fast_lock);
if (ct->state == XE_GUC_CT_STATE_DISABLED)
@@ -1828,10 +1833,10 @@ static void ct_dead_print(struct xe_dead_ct *dead)
return;
}
- drm_printf(&lp, "CTB is dead - reason=0x%X\n", dead->reason);
/* Can't generate a genuine core dump at this point, so just do the good bits */
drm_puts(&lp, "**** Xe Device Coredump ****\n");
+ drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason);
xe_device_snapshot_print(xe, &lp);
drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index 82c4ae458dda..582aac106469 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -22,6 +22,11 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_pr
void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb);
+static inline bool xe_guc_ct_initialized(struct xe_guc_ct *ct)
+{
+ return ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED;
+}
+
static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct)
{
return ct->state == XE_GUC_CT_STATE_ENABLED;
diff --git a/drivers/gpu/drm/xe/xe_guc_debugfs.c b/drivers/gpu/drm/xe/xe_guc_debugfs.c
index c569ff456e74..0b102ab46c4d 100644
--- a/drivers/gpu/drm/xe/xe_guc_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_guc_debugfs.c
@@ -17,101 +17,130 @@
#include "xe_macros.h"
#include "xe_pm.h"
-static struct xe_guc *node_to_guc(struct drm_info_node *node)
-{
- return node->info_ent->data;
-}
-
-static int guc_info(struct seq_file *m, void *data)
+/*
+ * guc_debugfs_show - A show callback for struct drm_info_list
+ * @m: the &seq_file
+ * @data: data used by the drm debugfs helpers
+ *
+ * This callback can be used in struct drm_info_list to describe debugfs
+ * files that are &xe_guc specific in similar way how we handle &xe_gt
+ * specific files using &xe_gt_debugfs_simple_show.
+ *
+ * It is assumed that those debugfs files will be created on directory entry
+ * which grandparent struct dentry d_inode->i_private points to &xe_gt.
+ *
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0 # dent->d_parent->d_parent (d_inode->i_private == gt)
+ * │   ├── uc # dent->d_parent
+ * │   │   ├── guc_info # dent
+ * │   │   ├── guc_...
+ *
+ * This function assumes that &m->private will be set to the &struct
+ * drm_info_node corresponding to the instance of the info on a given &struct
+ * drm_minor (see struct drm_info_list.show for details).
+ *
+ * This function also assumes that struct drm_info_list.data will point to the
+ * function code that will actually print a file content::
+ *
+ * int (*print)(struct xe_guc *, struct drm_printer *)
+ *
+ * Example::
+ *
+ * int foo(struct xe_guc *guc, struct drm_printer *p)
+ * {
+ * drm_printf(p, "enabled %d\n", guc->submission_state.enabled);
+ * return 0;
+ * }
+ *
+ * static const struct drm_info_list bar[] = {
+ * { name = "foo", .show = guc_debugfs_show, .data = foo },
+ * };
+ *
+ * parent = debugfs_create_dir("uc", gtdir);
+ * drm_debugfs_create_files(bar, ARRAY_SIZE(bar), parent, minor);
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int guc_debugfs_show(struct seq_file *m, void *data)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_info_node *node = m->private;
+ struct dentry *parent = node->dent->d_parent;
+ struct dentry *grandparent = parent->d_parent;
+ struct xe_gt *gt = grandparent->d_inode->i_private;
+ struct xe_device *xe = gt_to_xe(gt);
+ int (*print)(struct xe_guc *, struct drm_printer *) = node->info_ent->data;
+ int ret;
xe_pm_runtime_get(xe);
- xe_guc_print_info(guc, &p);
+ ret = print(&gt->uc.guc, &p);
xe_pm_runtime_put(xe);
- return 0;
+ return ret;
}
-static int guc_log(struct seq_file *m, void *data)
+static int guc_log(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_pm_runtime_get(xe);
- xe_guc_log_print(&guc->log, &p);
- xe_pm_runtime_put(xe);
-
+ xe_guc_log_print(&guc->log, p);
return 0;
}
-static int guc_log_dmesg(struct seq_file *m, void *data)
+static int guc_log_dmesg(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
-
- xe_pm_runtime_get(xe);
xe_guc_log_print_dmesg(&guc->log);
- xe_pm_runtime_put(xe);
-
return 0;
}
-static int guc_ctb(struct seq_file *m, void *data)
+static int guc_ctb(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_pm_runtime_get(xe);
- xe_guc_ct_print(&guc->ct, &p, true);
- xe_pm_runtime_put(xe);
-
+ xe_guc_ct_print(&guc->ct, p, true);
return 0;
}
-static int guc_pc(struct seq_file *m, void *data)
+static int guc_pc(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
- struct drm_printer p = drm_seq_file_printer(m);
-
- xe_pm_runtime_get(xe);
- xe_guc_pc_print(&guc->pc, &p);
- xe_pm_runtime_put(xe);
-
+ xe_guc_pc_print(&guc->pc, p);
return 0;
}
-static const struct drm_info_list debugfs_list[] = {
- {"guc_info", guc_info, 0},
- {"guc_log", guc_log, 0},
- {"guc_log_dmesg", guc_log_dmesg, 0},
- {"guc_ctb", guc_ctb, 0},
- {"guc_pc", guc_pc, 0},
+/*
+ * only for GuC debugfs files which can be safely used on the VF as well:
+ * - without access to the GuC privileged registers
+ * - without access to the PF specific GuC objects
+ */
+static const struct drm_info_list vf_safe_debugfs_list[] = {
+ { "guc_info", .show = guc_debugfs_show, .data = xe_guc_print_info },
+ { "guc_ctb", .show = guc_debugfs_show, .data = guc_ctb },
+};
+
+/* For GuC debugfs files that require the SLPC support */
+static const struct drm_info_list slpc_debugfs_list[] = {
+ { "guc_pc", .show = guc_debugfs_show, .data = guc_pc },
+};
+
+/* everything else should be added here */
+static const struct drm_info_list pf_only_debugfs_list[] = {
+ { "guc_log", .show = guc_debugfs_show, .data = guc_log },
+ { "guc_log_dmesg", .show = guc_debugfs_show, .data = guc_log_dmesg },
};
void xe_guc_debugfs_register(struct xe_guc *guc, struct dentry *parent)
{
- struct drm_minor *minor = guc_to_xe(guc)->drm.primary;
- struct drm_info_list *local;
- int i;
-
-#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
- local = drmm_kmalloc(&guc_to_xe(guc)->drm, DEBUGFS_SIZE, GFP_KERNEL);
- if (!local)
- return;
+ struct xe_device *xe = guc_to_xe(guc);
+ struct drm_minor *minor = xe->drm.primary;
- memcpy(local, debugfs_list, DEBUGFS_SIZE);
-#undef DEBUGFS_SIZE
+ drm_debugfs_create_files(vf_safe_debugfs_list,
+ ARRAY_SIZE(vf_safe_debugfs_list),
+ parent, minor);
- for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i)
- local[i].data = guc;
+ if (!IS_SRIOV_VF(xe)) {
+ drm_debugfs_create_files(pf_only_debugfs_list,
+ ARRAY_SIZE(pf_only_debugfs_list),
+ parent, minor);
- drm_debugfs_create_files(local,
- ARRAY_SIZE(debugfs_list),
- parent, minor);
+ if (!xe->info.skip_guc_pc)
+ drm_debugfs_create_files(slpc_debugfs_list,
+ ARRAY_SIZE(slpc_debugfs_list),
+ parent, minor);
+ }
}
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.c b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
index 2a457dcf31d5..0fb48f8f05d8 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity.c
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
@@ -17,36 +17,61 @@
#include "xe_hw_engine.h"
#include "xe_map.h"
#include "xe_mmio.h"
+#include "xe_sriov_pf_helpers.h"
#include "xe_trace_guc.h"
#define TOTAL_QUANTA 0x8000
-static struct iosys_map engine_activity_map(struct xe_guc *guc, struct xe_hw_engine *hwe)
+static struct iosys_map engine_activity_map(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int index)
{
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
- struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+ struct engine_activity_buffer *buffer;
u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
size_t offset;
- offset = offsetof(struct guc_engine_activity_data,
+ if (engine_activity->num_functions) {
+ buffer = &engine_activity->function_buffer;
+ offset = sizeof(struct guc_engine_activity_data) * index;
+ } else {
+ buffer = &engine_activity->device_buffer;
+ offset = 0;
+ }
+
+ offset += offsetof(struct guc_engine_activity_data,
engine_activity[guc_class][hwe->logical_instance]);
return IOSYS_MAP_INIT_OFFSET(&buffer->activity_bo->vmap, offset);
}
-static struct iosys_map engine_metadata_map(struct xe_guc *guc)
+static struct iosys_map engine_metadata_map(struct xe_guc *guc,
+ unsigned int index)
{
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
- struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+ struct engine_activity_buffer *buffer;
+ size_t offset;
- return buffer->metadata_bo->vmap;
+ if (engine_activity->num_functions) {
+ buffer = &engine_activity->function_buffer;
+ offset = sizeof(struct guc_engine_activity_metadata) * index;
+ } else {
+ buffer = &engine_activity->device_buffer;
+ offset = 0;
+ }
+
+ return IOSYS_MAP_INIT_OFFSET(&buffer->metadata_bo->vmap, offset);
}
static int allocate_engine_activity_group(struct xe_guc *guc)
{
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
struct xe_device *xe = guc_to_xe(guc);
- u32 num_activity_group = 1; /* Will be modified for VF */
+ u32 num_activity_group;
+
+ /*
+ * An additional activity group is allocated for PF
+ */
+ num_activity_group = IS_SRIOV_PF(xe) ? xe_sriov_pf_get_totalvfs(xe) + 1 : 1;
engine_activity->eag = drmm_kcalloc(&xe->drm, num_activity_group,
sizeof(struct engine_activity_group), GFP_KERNEL);
@@ -60,10 +85,11 @@ static int allocate_engine_activity_group(struct xe_guc *guc)
}
static int allocate_engine_activity_buffers(struct xe_guc *guc,
- struct engine_activity_buffer *buffer)
+ struct engine_activity_buffer *buffer,
+ int count)
{
- u32 metadata_size = sizeof(struct guc_engine_activity_metadata);
- u32 size = sizeof(struct guc_engine_activity_data);
+ u32 metadata_size = sizeof(struct guc_engine_activity_metadata) * count;
+ u32 size = sizeof(struct guc_engine_activity_data) * count;
struct xe_gt *gt = guc_to_gt(guc);
struct xe_tile *tile = gt_to_tile(gt);
struct xe_bo *bo, *metadata_bo;
@@ -118,10 +144,11 @@ static bool is_engine_activity_supported(struct xe_guc *guc)
return true;
}
-static struct engine_activity *hw_engine_to_engine_activity(struct xe_hw_engine *hwe)
+static struct engine_activity *hw_engine_to_engine_activity(struct xe_hw_engine *hwe,
+ unsigned int index)
{
struct xe_guc *guc = &hwe->gt->uc.guc;
- struct engine_activity_group *eag = &guc->engine_activity.eag[0];
+ struct engine_activity_group *eag = &guc->engine_activity.eag[index];
u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
return &eag->engine[guc_class][hwe->logical_instance];
@@ -138,9 +165,10 @@ static u64 cpu_ns_to_guc_tsc_tick(ktime_t ns, u32 freq)
#define read_metadata_record(xe_, map_, field_) \
xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity_metadata, field_)
-static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int index)
{
- struct engine_activity *ea = hw_engine_to_engine_activity(hwe);
+ struct engine_activity *ea = hw_engine_to_engine_activity(hwe, index);
struct guc_engine_activity *cached_activity = &ea->activity;
struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
@@ -151,8 +179,8 @@ static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
u64 active_ticks, gpm_ts;
u16 change_num;
- activity_map = engine_activity_map(guc, hwe);
- metadata_map = engine_metadata_map(guc);
+ activity_map = engine_activity_map(guc, hwe, index);
+ metadata_map = engine_metadata_map(guc, index);
global_change_num = read_metadata_record(xe, &metadata_map, global_change_num);
/* GuC has not initialized activity data yet, return 0 */
@@ -194,9 +222,9 @@ update:
return ea->total + ea->active;
}
-static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe, unsigned int index)
{
- struct engine_activity *ea = hw_engine_to_engine_activity(hwe);
+ struct engine_activity *ea = hw_engine_to_engine_activity(hwe, index);
struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
struct guc_engine_activity *cached_activity = &ea->activity;
struct iosys_map activity_map, metadata_map;
@@ -205,8 +233,8 @@ static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
u64 numerator;
u16 quanta_ratio;
- activity_map = engine_activity_map(guc, hwe);
- metadata_map = engine_metadata_map(guc);
+ activity_map = engine_activity_map(guc, hwe, index);
+ metadata_map = engine_metadata_map(guc, index);
if (!cached_metadata->guc_tsc_frequency_hz)
cached_metadata->guc_tsc_frequency_hz = read_metadata_record(xe, &metadata_map,
@@ -245,12 +273,39 @@ static int enable_engine_activity_stats(struct xe_guc *guc)
return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
}
-static void engine_activity_set_cpu_ts(struct xe_guc *guc)
+static int enable_function_engine_activity_stats(struct xe_guc *guc, bool enable)
{
struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
- struct engine_activity_group *eag = &engine_activity->eag[0];
+ u32 metadata_ggtt_addr = 0, ggtt_addr = 0, num_functions = 0;
+ struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
+ u32 action[6];
+ int len = 0;
+
+ if (enable) {
+ metadata_ggtt_addr = xe_bo_ggtt_addr(buffer->metadata_bo);
+ ggtt_addr = xe_bo_ggtt_addr(buffer->activity_bo);
+ num_functions = engine_activity->num_functions;
+ }
+
+ action[len++] = XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER;
+ action[len++] = num_functions;
+ action[len++] = metadata_ggtt_addr;
+ action[len++] = 0;
+ action[len++] = ggtt_addr;
+ action[len++] = 0;
+
+ /* Blocking here to ensure the buffers are ready before reading them */
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static void engine_activity_set_cpu_ts(struct xe_guc *guc, unsigned int index)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_group *eag = &engine_activity->eag[index];
int i, j;
+ xe_gt_assert(guc_to_gt(guc), index < engine_activity->num_activity_group);
+
for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++)
for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; j++)
eag->engine[i][j].last_cpu_ts = ktime_get();
@@ -265,34 +320,107 @@ static u32 gpm_timestamp_shift(struct xe_gt *gt)
return 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
}
+static bool is_function_valid(struct xe_guc *guc, unsigned int fn_id)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+
+ if (!IS_SRIOV_PF(xe) && fn_id)
+ return false;
+
+ if (engine_activity->num_functions && fn_id >= engine_activity->num_functions)
+ return false;
+
+ return true;
+}
+
+static int engine_activity_disable_function_stats(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
+ int ret;
+
+ if (!engine_activity->num_functions)
+ return 0;
+
+ ret = enable_function_engine_activity_stats(guc, false);
+ if (ret)
+ return ret;
+
+ free_engine_activity_buffers(buffer);
+ engine_activity->num_functions = 0;
+
+ return 0;
+}
+
+static int engine_activity_enable_function_stats(struct xe_guc *guc, int num_vfs)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
+ int ret, i;
+
+ if (!num_vfs)
+ return 0;
+
+ /* This includes 1 PF and num_vfs */
+ engine_activity->num_functions = num_vfs + 1;
+
+ ret = allocate_engine_activity_buffers(guc, buffer, engine_activity->num_functions);
+ if (ret)
+ return ret;
+
+ ret = enable_function_engine_activity_stats(guc, true);
+ if (ret) {
+ free_engine_activity_buffers(buffer);
+ engine_activity->num_functions = 0;
+ return ret;
+ }
+
+ /* skip PF as it was already setup */
+ for (i = 1; i < engine_activity->num_functions; i++)
+ engine_activity_set_cpu_ts(guc, i);
+
+ return 0;
+}
+
/**
* xe_guc_engine_activity_active_ticks - Get engine active ticks
* @guc: The GuC object
* @hwe: The hw_engine object
+ * @fn_id: function id to report on
*
* Return: accumulated ticks @hwe was active since engine activity stats were enabled.
*/
-u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id)
{
if (!xe_guc_engine_activity_supported(guc))
return 0;
- return get_engine_active_ticks(guc, hwe);
+ if (!is_function_valid(guc, fn_id))
+ return 0;
+
+ return get_engine_active_ticks(guc, hwe, fn_id);
}
/**
* xe_guc_engine_activity_total_ticks - Get engine total ticks
* @guc: The GuC object
* @hwe: The hw_engine object
+ * @fn_id: function id to report on
*
* Return: accumulated quanta of ticks allocated for the engine
*/
-u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe)
+u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id)
{
if (!xe_guc_engine_activity_supported(guc))
return 0;
- return get_engine_total_ticks(guc, hwe);
+ if (!is_function_valid(guc, fn_id))
+ return 0;
+
+ return get_engine_total_ticks(guc, hwe, fn_id);
}
/**
@@ -311,6 +439,25 @@ bool xe_guc_engine_activity_supported(struct xe_guc *guc)
}
/**
+ * xe_guc_engine_activity_function_stats - Enable/Disable per-function engine activity stats
+ * @guc: The GuC object
+ * @num_vfs: number of vfs
+ * @enable: true to enable, false otherwise
+ *
+ * Return: 0 on success, negative error code otherwise
+ */
+int xe_guc_engine_activity_function_stats(struct xe_guc *guc, int num_vfs, bool enable)
+{
+ if (!xe_guc_engine_activity_supported(guc))
+ return 0;
+
+ if (enable)
+ return engine_activity_enable_function_stats(guc, num_vfs);
+
+ return engine_activity_disable_function_stats(guc);
+}
+
+/**
* xe_guc_engine_activity_enable_stats - Enable engine activity stats
* @guc: The GuC object
*
@@ -327,7 +474,7 @@ void xe_guc_engine_activity_enable_stats(struct xe_guc *guc)
if (ret)
xe_gt_err(guc_to_gt(guc), "failed to enable activity stats%d\n", ret);
else
- engine_activity_set_cpu_ts(guc);
+ engine_activity_set_cpu_ts(guc, 0);
}
static void engine_activity_fini(void *arg)
@@ -360,7 +507,7 @@ int xe_guc_engine_activity_init(struct xe_guc *guc)
return ret;
}
- ret = allocate_engine_activity_buffers(guc, &engine_activity->device_buffer);
+ ret = allocate_engine_activity_buffers(guc, &engine_activity->device_buffer, 1);
if (ret) {
xe_gt_err(gt, "failed to allocate engine activity buffers (%pe)\n", ERR_PTR(ret));
return ret;
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.h b/drivers/gpu/drm/xe/xe_guc_engine_activity.h
index a042d4cb404c..b32926c2d208 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity.h
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.h
@@ -14,6 +14,9 @@ struct xe_guc;
int xe_guc_engine_activity_init(struct xe_guc *guc);
bool xe_guc_engine_activity_supported(struct xe_guc *guc);
void xe_guc_engine_activity_enable_stats(struct xe_guc *guc);
-u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe);
-u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe);
+int xe_guc_engine_activity_function_stats(struct xe_guc *guc, int num_vfs, bool enable);
+u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id);
+u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id);
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h b/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
index 5cdd034b6b70..48f69ddefa36 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
@@ -79,14 +79,24 @@ struct xe_guc_engine_activity {
/** @num_activity_group: number of activity groups */
u32 num_activity_group;
+ /** @num_functions: number of functions */
+ u32 num_functions;
+
/** @supported: indicates support for engine activity stats */
bool supported;
- /** @eag: holds the device level engine activity data */
+ /**
+ * @eag: holds the device level engine activity data in native mode.
+ * In SRIOV mode, points to an array with entries which holds the engine
+ * activity data for PF and VF's
+ */
struct engine_activity_group *eag;
/** @device_buffer: buffer object for global engine activity */
struct engine_activity_buffer device_buffer;
+
+ /** @function_buffer: buffer object for per-function engine activity */
+ struct engine_activity_buffer function_buffer;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index 80514a446ba2..38039c411387 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -260,7 +260,8 @@ int xe_guc_log_init(struct xe_guc_log *log)
bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(),
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 43b1192ba61c..3beaaa7b25c1 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -462,6 +462,21 @@ static u32 get_cur_freq(struct xe_gt *gt)
}
/**
+ * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency
+ * @pc: The GuC PC
+ *
+ * Returns: the requested frequency for that GT instance
+ */
+u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
+
+ return get_cur_freq(gt);
+}
+
+/**
* xe_guc_pc_get_cur_freq - Get Current requested frequency
* @pc: The GuC PC
* @freq: A pointer to a u32 where the freq value will be returned
@@ -1053,7 +1068,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
goto out;
}
- memset(pc->bo->vmap.vaddr, 0, size);
+ xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
slpc_shared_data_write(pc, header.size, size);
earlier = ktime_get();
@@ -1170,7 +1185,8 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
bo = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
index 39102b79602f..0a2664d5c811 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc.h
@@ -22,6 +22,7 @@ void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p);
u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc);
int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq);
+u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 31bc2022bfc2..9567f6700cf2 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -229,6 +229,17 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
static void guc_submit_fini(struct drm_device *drm, void *arg)
{
struct xe_guc *guc = arg;
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ ret = wait_event_timeout(guc->submission_state.fini_wq,
+ xa_empty(&guc->submission_state.exec_queue_lookup),
+ HZ * 5);
+
+ drain_workqueue(xe->destroy_wq);
+
+ xe_gt_assert(gt, ret);
xa_destroy(&guc->submission_state.exec_queue_lookup);
}
@@ -300,6 +311,8 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
primelockdep(guc);
+ guc->submission_state.initialized = true;
+
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
}
@@ -834,6 +847,13 @@ void xe_guc_submit_wedge(struct xe_guc *guc)
xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
+ /*
+ * If device is being wedged even before submission_state is
+ * initialized, there's nothing to do here.
+ */
+ if (!guc->submission_state.initialized)
+ return;
+
err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
guc_submit_wedged_fini, guc);
if (err) {
@@ -941,7 +961,7 @@ static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
return xe_sched_invalidate_job(job, 2);
}
- ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]);
+ ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(q->lrc[0]));
ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
/*
@@ -1170,9 +1190,12 @@ trigger_reset:
process_name = q->vm->xef->process_name;
pid = q->vm->xef->pid;
}
- xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]",
- xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
- q->guc->id, q->flags, process_name, pid);
+
+ if (!exec_queue_killed(q))
+ xe_gt_notice(guc_to_gt(guc),
+ "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]",
+ xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
+ q->guc->id, q->flags, process_name, pid);
trace_xe_sched_job_timedout(job);
@@ -1739,6 +1762,9 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc)
{
int ret;
+ if (!guc->submission_state.initialized)
+ return 0;
+
/*
* Using an atomic here rather than submission_state.lock as this
* function can be called while holding the CT lock (engine reset
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index 63bac64429a5..1fde7614fcc5 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -89,6 +89,11 @@ struct xe_guc {
struct mutex lock;
/** @submission_state.enabled: submission is enabled */
bool enabled;
+ /**
+ * @submission_state.initialized: mark when submission state is
+ * even initialized - before that not even the lock is valid
+ */
+ bool initialized;
/** @submission_state.fini_wq: submit fini wait queue */
wait_queue_head_t fini_wq;
} submission_state;
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
index a440442b4d72..640950172088 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
@@ -605,6 +605,7 @@ err_object:
kobject_put(kobj);
return err;
}
+ALLOW_ERROR_INJECTION(xe_add_hw_engine_class_defaults, ERRNO); /* See xe_pci_probe() */
static void hw_engine_class_sysfs_fini(void *arg)
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 48d80ffdf7bb..74f31639b37f 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -5,6 +5,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon.h>
+#include <linux/jiffies.h>
#include <linux/types.h>
#include <linux/units.h>
@@ -27,6 +28,7 @@ enum xe_hwmon_reg {
REG_PKG_POWER_SKU_UNIT,
REG_GT_PERF_STATUS,
REG_PKG_ENERGY_STATUS,
+ REG_FAN_SPEED,
};
enum xe_hwmon_reg_operation {
@@ -42,6 +44,21 @@ enum xe_hwmon_channel {
CHANNEL_MAX,
};
+enum xe_fan_channel {
+ FAN_1,
+ FAN_2,
+ FAN_3,
+ FAN_MAX,
+};
+
+/*
+ * For platforms that support mailbox commands for power limits, REG_PKG_POWER_SKU_UNIT is
+ * not supported and below are SKU units to be used.
+ */
+#define PWR_UNIT 0x3
+#define ENERGY_UNIT 0xe
+#define TIME_UNIT 0xa
+
/*
* SF_* - scale factors for particular quantities according to hwmon spec.
*/
@@ -51,6 +68,18 @@ enum xe_hwmon_channel {
#define SF_ENERGY 1000000 /* microjoules */
#define SF_TIME 1000 /* milliseconds */
+/*
+ * PL*_HWMON_ATTR - mapping of hardware power limits to corresponding hwmon power attribute.
+ */
+#define PL1_HWMON_ATTR hwmon_power_max
+
+#define PWR_ATTR_TO_STR(attr) (((attr) == hwmon_power_max) ? "PL1" : "Invalid")
+
+/*
+ * Timeout for power limit write mailbox command.
+ */
+#define PL_WRITE_MBX_TIMEOUT_MS (1)
+
/**
* struct xe_hwmon_energy_info - to accumulate energy
*/
@@ -62,6 +91,16 @@ struct xe_hwmon_energy_info {
};
/**
+ * struct xe_hwmon_fan_info - to cache previous fan reading
+ */
+struct xe_hwmon_fan_info {
+ /** @reg_val_prev: previous fan reg val */
+ u32 reg_val_prev;
+ /** @time_prev: previous timestamp */
+ u64 time_prev;
+};
+
+/**
* struct xe_hwmon - xe hwmon data structure
*/
struct xe_hwmon {
@@ -79,8 +118,82 @@ struct xe_hwmon {
int scl_shift_time;
/** @ei: Energy info for energyN_input */
struct xe_hwmon_energy_info ei[CHANNEL_MAX];
+ /** @fi: Fan info for fanN_input */
+ struct xe_hwmon_fan_info fi[FAN_MAX];
+ /** @boot_power_limit_read: is boot power limits read */
+ bool boot_power_limit_read;
+ /** @pl1_on_boot: power limit PL1 on boot */
+ u32 pl1_on_boot[CHANNEL_MAX];
};
+static int xe_hwmon_pcode_read_power_limit(const struct xe_hwmon *hwmon, u32 attr, int channel,
+ u32 *uval)
+{
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+ u32 val0 = 0, val1 = 0;
+ int ret = 0;
+
+ ret = xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
+ (channel == CHANNEL_CARD) ?
+ READ_PSYSGPU_POWER_LIMIT :
+ READ_PACKAGE_POWER_LIMIT,
+ hwmon->boot_power_limit_read ?
+ READ_PL_FROM_PCODE : READ_PL_FROM_FW),
+ &val0, &val1);
+
+ if (ret) {
+ drm_dbg(&hwmon->xe->drm, "read failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n",
+ channel, val0, val1, ret);
+ *uval = 0;
+ return ret;
+ }
+
+ /* return the value only if limit is enabled */
+ if (attr == PL1_HWMON_ATTR)
+ *uval = (val0 & PWR_LIM_EN) ? val0 : 0;
+ else if (attr == hwmon_power_label)
+ *uval = (val0 & PWR_LIM_EN) ? 1 : 0;
+ else
+ *uval = 0;
+
+ return ret;
+}
+
+static int xe_hwmon_pcode_write_power_limit(const struct xe_hwmon *hwmon, u32 attr, u8 channel,
+ u32 uval)
+{
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+ u32 val0, val1;
+ int ret = 0;
+
+ ret = xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
+ (channel == CHANNEL_CARD) ?
+ READ_PSYSGPU_POWER_LIMIT :
+ READ_PACKAGE_POWER_LIMIT,
+ hwmon->boot_power_limit_read ?
+ READ_PL_FROM_PCODE : READ_PL_FROM_FW),
+ &val0, &val1);
+
+ if (ret)
+ drm_dbg(&hwmon->xe->drm, "read failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n",
+ channel, val0, val1, ret);
+
+ if (attr == PL1_HWMON_ATTR)
+ val0 = uval;
+ else
+ return -EIO;
+
+ ret = xe_pcode_write64_timeout(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
+ (channel == CHANNEL_CARD) ?
+ WRITE_PSYSGPU_POWER_LIMIT :
+ WRITE_PACKAGE_POWER_LIMIT, 0),
+ val0, val1, PL_WRITE_MBX_TIMEOUT_MS);
+ if (ret)
+ drm_dbg(&hwmon->xe->drm, "write failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n",
+ channel, val0, val1, ret);
+ return ret;
+}
+
static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
int channel)
{
@@ -101,29 +214,19 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
}
break;
case REG_PKG_RAPL_LIMIT:
- if (xe->info.platform == XE_BATTLEMAGE) {
- if (channel == CHANNEL_PKG)
- return BMG_PACKAGE_RAPL_LIMIT;
- else
- return BMG_PLATFORM_POWER_LIMIT;
- } else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
+ if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
return PVC_GT0_PACKAGE_RAPL_LIMIT;
- } else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) {
+ else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
return PCU_CR_PACKAGE_RAPL_LIMIT;
- }
break;
case REG_PKG_POWER_SKU:
- if (xe->info.platform == XE_BATTLEMAGE)
- return BMG_PACKAGE_POWER_SKU;
- else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
+ if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
return PVC_GT0_PACKAGE_POWER_SKU;
else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
return PCU_CR_PACKAGE_POWER_SKU;
break;
case REG_PKG_POWER_SKU_UNIT:
- if (xe->info.platform == XE_BATTLEMAGE)
- return BMG_PACKAGE_POWER_SKU_UNIT;
- else if (xe->info.platform == XE_PVC)
+ if (xe->info.platform == XE_PVC)
return PVC_GT0_PACKAGE_POWER_SKU_UNIT;
else if (xe->info.platform == XE_DG2)
return PCU_CR_PACKAGE_POWER_SKU_UNIT;
@@ -144,6 +247,14 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
return PCU_CR_PACKAGE_ENERGY_STATUS;
}
break;
+ case REG_FAN_SPEED:
+ if (channel == FAN_1)
+ return BMG_FAN_1_SPEED;
+ else if (channel == FAN_2)
+ return BMG_FAN_2_SPEED;
+ else if (channel == FAN_3)
+ return BMG_FAN_3_SPEED;
+ break;
default:
drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
break;
@@ -152,7 +263,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
return XE_REG(0);
}
-#define PL1_DISABLE 0
+#define PL_DISABLE 0
/*
* HW allows arbitrary PL1 limits to be set but silently clamps these values to
@@ -160,67 +271,83 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
* same pattern for sysfs, allow arbitrary PL1 limits to be set but display
* clamped values when read.
*/
-static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *value)
+static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value)
{
u64 reg_val, min, max;
struct xe_device *xe = hwmon->xe;
struct xe_reg rapl_limit, pkg_power_sku;
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
- rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
- pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+ mutex_lock(&hwmon->hwmon_lock);
- /*
- * Valid check of REG_PKG_RAPL_LIMIT is already done in xe_hwmon_power_is_visible.
- * So not checking it again here.
- */
- if (!xe_reg_is_valid(pkg_power_sku)) {
- drm_warn(&xe->drm, "pkg_power_sku invalid\n");
- *value = 0;
- return;
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, (u32 *)&reg_val);
+ } else {
+ rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
+ pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+
+ /*
+ * Valid check of REG_PKG_RAPL_LIMIT is already done in xe_hwmon_power_is_visible.
+ * So not checking it again here.
+ */
+ if (!xe_reg_is_valid(pkg_power_sku)) {
+ drm_warn(&xe->drm, "pkg_power_sku invalid\n");
+ *value = 0;
+ goto unlock;
+ }
+ reg_val = xe_mmio_read32(mmio, rapl_limit);
}
- mutex_lock(&hwmon->hwmon_lock);
-
- reg_val = xe_mmio_read32(mmio, rapl_limit);
- /* Check if PL1 limit is disabled */
- if (!(reg_val & PKG_PWR_LIM_1_EN)) {
- *value = PL1_DISABLE;
+ /* Check if PL limits are disabled. */
+ if (!(reg_val & PWR_LIM_EN)) {
+ *value = PL_DISABLE;
+ drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%016llx\n",
+ PWR_ATTR_TO_STR(attr), channel, reg_val);
goto unlock;
}
- reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
+ reg_val = REG_FIELD_GET(PWR_LIM_VAL, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
- reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
- min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
- min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
- max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
- max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
-
- if (min && max)
- *value = clamp_t(u64, *value, min, max);
+ /* For platforms with mailbox power limit support clamping would be done by pcode. */
+ if (!hwmon->xe->info.has_mbx_power_limits) {
+ reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
+ min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
+ max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
+ min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
+ max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
+ if (min && max)
+ *value = clamp_t(u64, *value, min, max);
+ }
unlock:
mutex_unlock(&hwmon->hwmon_lock);
}
-static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value)
+static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channel, long value)
{
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
int ret = 0;
- u64 reg_val;
+ u32 reg_val;
struct xe_reg rapl_limit;
+ mutex_lock(&hwmon->hwmon_lock);
+
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
- mutex_lock(&hwmon->hwmon_lock);
+ /* Disable Power Limit and verify, as limit cannot be disabled on all platforms. */
+ if (value == PL_DISABLE) {
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ drm_dbg(&hwmon->xe->drm, "disabling %s on channel %d\n",
+ PWR_ATTR_TO_STR(attr), channel);
+ xe_hwmon_pcode_write_power_limit(hwmon, attr, channel, 0);
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &reg_val);
+ } else {
+ reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM_EN, 0);
+ reg_val = xe_mmio_read32(mmio, rapl_limit);
+ }
- /* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
- if (value == PL1_DISABLE) {
- reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN, 0);
- reg_val = xe_mmio_read32(mmio, rapl_limit);
- if (reg_val & PKG_PWR_LIM_1_EN) {
- drm_warn(&hwmon->xe->drm, "PL1 disable is not supported!\n");
+ if (reg_val & PWR_LIM_EN) {
+ drm_warn(&hwmon->xe->drm, "Power limit disable is not supported!\n");
ret = -EOPNOTSUPP;
}
goto unlock;
@@ -228,26 +355,50 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va
/* Computation in 64-bits to avoid overflow. Round to nearest. */
reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
- reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
- reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
+ reg_val = PWR_LIM_EN | REG_FIELD_PREP(PWR_LIM_VAL, reg_val);
+ /*
+ * Clamp power limit to card-firmware default as maximum, as an additional protection to
+ * pcode clamp.
+ */
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ if (reg_val > REG_FIELD_GET(PWR_LIM_VAL, hwmon->pl1_on_boot[channel])) {
+ reg_val = REG_FIELD_GET(PWR_LIM_VAL, hwmon->pl1_on_boot[channel]);
+ drm_dbg(&hwmon->xe->drm, "Clamping power limit to firmware default 0x%x\n",
+ reg_val);
+ }
+ }
+
+ if (hwmon->xe->info.has_mbx_power_limits)
+ ret = xe_hwmon_pcode_write_power_limit(hwmon, attr, channel, reg_val);
+ else
+ reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM_EN | PWR_LIM_VAL,
+ reg_val);
unlock:
mutex_unlock(&hwmon->hwmon_lock);
return ret;
}
-static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value)
+static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, u32 attr, int channel,
+ long *value)
{
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
- struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
- u64 reg_val;
+ u32 reg_val;
+
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ /* PL1 is rated max if supported. */
+ xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, channel, &reg_val);
+ } else {
+ /*
+ * This sysfs file won't be visible if REG_PKG_POWER_SKU is invalid, so valid check
+ * for this register can be skipped.
+ * See xe_hwmon_power_is_visible.
+ */
+ struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+
+ reg_val = xe_mmio_read32(mmio, reg);
+ }
- /*
- * This sysfs file won't be visible if REG_PKG_POWER_SKU is invalid, so valid check
- * for this register can be skipped.
- * See xe_hwmon_power_is_visible.
- */
- reg_val = xe_mmio_read32(mmio, reg);
reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
}
@@ -301,23 +452,35 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
u32 x, y, x_w = 2; /* 2 bits */
u64 r, tau4, out;
- int sensor_index = to_sensor_dev_attr(attr)->index;
+ int channel = to_sensor_dev_attr(attr)->index;
+ u32 power_attr = PL1_HWMON_ATTR;
+ int ret = 0;
xe_pm_runtime_get(hwmon->xe);
mutex_lock(&hwmon->hwmon_lock);
- r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index));
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, (u32 *)&r);
+ if (ret) {
+ drm_err(&hwmon->xe->drm,
+ "power interval read fail, ch %d, attr %d, r 0%llx, ret %d\n",
+ channel, power_attr, r, ret);
+ r = 0;
+ }
+ } else {
+ r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel));
+ }
mutex_unlock(&hwmon->hwmon_lock);
xe_pm_runtime_put(hwmon->xe);
- x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
- y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
+ x = REG_FIELD_GET(PWR_LIM_TIME_X, r);
+ y = REG_FIELD_GET(PWR_LIM_TIME_Y, r);
/*
- * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
+ * tau = (1 + (x / 4)) * power(2,y), x = bits(23:22), y = bits(21:17)
* = (4 | x) << (y - 2)
*
* Here (y - 2) ensures a 1.x fixed point representation of 1.x
@@ -344,14 +507,15 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
u64 tau4, r, max_win;
unsigned long val;
int ret;
- int sensor_index = to_sensor_dev_attr(attr)->index;
+ int channel = to_sensor_dev_attr(attr)->index;
+ u32 power_attr = PL1_HWMON_ATTR;
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
/*
- * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12.
+ * Max HW supported tau in '(1 + (x / 4)) * power(2,y)' format, x = 0, y = 0x12.
* The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds.
*
* The ideal scenario is for PKG_MAX_WIN to be read from the PKG_PWR_SKU register.
@@ -371,11 +535,13 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
- if (val > max_win)
+ if (val > max_win) {
+ drm_warn(&hwmon->xe->drm, "power_interval invalid val 0x%lx\n", val);
return -EINVAL;
+ }
/* val in hw units */
- val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
+ val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME) + 1;
/*
* Convert val to 1.x * power(2,y)
@@ -390,14 +556,21 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
x = (val - (1ul << y)) << x_w >> y;
}
- rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
+ rxy = REG_FIELD_PREP(PWR_LIM_TIME_X, x) |
+ REG_FIELD_PREP(PWR_LIM_TIME_Y, y);
xe_pm_runtime_get(hwmon->xe);
mutex_lock(&hwmon->hwmon_lock);
- r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index),
- PKG_PWR_LIM_1_TIME, rxy);
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, (u32 *)&r);
+ r = (r & ~PWR_LIM_TIME) | rxy;
+ xe_hwmon_pcode_write_power_limit(hwmon, power_attr, channel, r);
+ } else {
+ r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel),
+ PWR_LIM_TIME, rxy);
+ }
mutex_unlock(&hwmon->hwmon_lock);
@@ -406,6 +579,7 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
return count;
}
+/* PSYS PL1 */
static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
xe_hwmon_power_max_interval_show,
xe_hwmon_power_max_interval_store, CHANNEL_CARD);
@@ -426,10 +600,19 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret = 0;
+ int channel = index ? CHANNEL_PKG : CHANNEL_CARD;
+ u32 power_attr = PL1_HWMON_ATTR;
+ u32 uval;
xe_pm_runtime_get(hwmon->xe);
- ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, index)) ? attr->mode : 0;
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &uval);
+ ret = (uval & PWR_LIM_EN) ? attr->mode : 0;
+ } else {
+ ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
+ channel)) ? attr->mode : 0;
+ }
xe_pm_runtime_put(hwmon->xe);
@@ -449,11 +632,12 @@ static const struct attribute_group *hwmon_groups[] = {
static const struct hwmon_channel_info * const hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
- HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL,
- HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT | HWMON_P_LABEL),
+ HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CRIT,
+ HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL),
HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL),
HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL),
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT, HWMON_F_INPUT),
NULL
};
@@ -480,6 +664,19 @@ static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval)
(uval & POWER_SETUP_I1_DATA_MASK));
}
+static int xe_hwmon_pcode_read_fan_control(const struct xe_hwmon *hwmon, u32 subcmd, u32 *uval)
+{
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+
+ /* Platforms that don't return correct value */
+ if (hwmon->xe->info.platform == XE_DG2 && subcmd == FSC_READ_NUM_FANS) {
+ *uval = 2;
+ return 0;
+ }
+
+ return xe_pcode_read(root_tile, PCODE_MBOX(FAN_SPEED_CONTROL, subcmd, 0), uval, NULL);
+}
+
static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
long *value, u32 scale_factor)
{
@@ -561,19 +758,27 @@ xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
switch (attr) {
case hwmon_power_max:
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &uval);
+ return (uval) ? 0664 : 0;
+ } else {
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
channel)) ? 0664 : 0;
+ }
case hwmon_power_rated_max:
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU,
- channel)) ? 0444 : 0;
+ if (hwmon->xe->info.has_mbx_power_limits)
+ return 0;
+ else
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU,
+ channel)) ? 0444 : 0;
case hwmon_power_crit:
- if (channel == CHANNEL_PKG)
- return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
- !(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
- break;
case hwmon_power_label:
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
- channel)) ? 0444 : 0;
+ if (channel == CHANNEL_CARD) {
+ xe_hwmon_pcode_read_i1(hwmon, &uval);
+ return (uval & POWER_SETUP_I1_WATTS) ? (attr == hwmon_power_label) ?
+ 0444 : 0644 : 0;
+ }
+ break;
default:
return 0;
}
@@ -585,10 +790,10 @@ xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
{
switch (attr) {
case hwmon_power_max:
- xe_hwmon_power_max_read(hwmon, channel, val);
+ xe_hwmon_power_max_read(hwmon, attr, channel, val);
return 0;
case hwmon_power_rated_max:
- xe_hwmon_power_rated_max_read(hwmon, channel, val);
+ xe_hwmon_power_rated_max_read(hwmon, attr, channel, val);
return 0;
case hwmon_power_crit:
return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_POWER);
@@ -602,7 +807,7 @@ xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
{
switch (attr) {
case hwmon_power_max:
- return xe_hwmon_power_max_write(hwmon, channel, val);
+ return xe_hwmon_power_max_write(hwmon, attr, channel, val);
case hwmon_power_crit:
return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_POWER);
default:
@@ -706,6 +911,75 @@ xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
}
static umode_t
+xe_hwmon_fan_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
+{
+ u32 uval;
+
+ if (!hwmon->xe->info.has_fan_control)
+ return 0;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ if (xe_hwmon_pcode_read_fan_control(hwmon, FSC_READ_NUM_FANS, &uval))
+ return 0;
+
+ return channel < uval ? 0444 : 0;
+ default:
+ return 0;
+ }
+}
+
+static int
+xe_hwmon_fan_input_read(struct xe_hwmon *hwmon, int channel, long *val)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
+ struct xe_hwmon_fan_info *fi = &hwmon->fi[channel];
+ u64 rotations, time_now, time;
+ u32 reg_val;
+ int ret = 0;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_FAN_SPEED, channel));
+ time_now = get_jiffies_64();
+
+ /*
+ * HW register value is accumulated count of pulses from PWM fan with the scale
+ * of 2 pulses per rotation.
+ */
+ rotations = (reg_val - fi->reg_val_prev) / 2;
+
+ time = jiffies_delta_to_msecs(time_now - fi->time_prev);
+ if (unlikely(!time)) {
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ /*
+ * Calculate fan speed in RPM by time averaging two subsequent readings in minutes.
+ * RPM = number of rotations * msecs per minute / time in msecs
+ */
+ *val = DIV_ROUND_UP_ULL(rotations * (MSEC_PER_SEC * 60), time);
+
+ fi->reg_val_prev = reg_val;
+ fi->time_prev = time_now;
+unlock:
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
+}
+
+static int
+xe_hwmon_fan_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_fan_input:
+ return xe_hwmon_fan_input_read(hwmon, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t
xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
u32 attr, int channel)
{
@@ -730,6 +1004,9 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
case hwmon_energy:
ret = xe_hwmon_energy_is_visible(hwmon, attr, channel);
break;
+ case hwmon_fan:
+ ret = xe_hwmon_fan_is_visible(hwmon, attr, channel);
+ break;
default:
ret = 0;
break;
@@ -765,6 +1042,9 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
case hwmon_energy:
ret = xe_hwmon_energy_read(hwmon, attr, channel, val);
break;
+ case hwmon_fan:
+ ret = xe_hwmon_fan_read(hwmon, attr, channel, val);
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -842,23 +1122,47 @@ static void
xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
{
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
- long energy;
+ long energy, fan_speed;
u64 val_sku_unit = 0;
int channel;
struct xe_reg pkg_power_sku_unit;
- /*
- * The contents of register PKG_POWER_SKU_UNIT do not change,
- * so read it once and store the shift values.
- */
- pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0);
- if (xe_reg_is_valid(pkg_power_sku_unit)) {
- val_sku_unit = xe_mmio_read32(mmio, pkg_power_sku_unit);
- hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
- hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
- hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ /* Check if card firmware support mailbox power limits commands. */
+ if (xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_CARD,
+ &hwmon->pl1_on_boot[CHANNEL_CARD]) |
+ xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_PKG,
+ &hwmon->pl1_on_boot[CHANNEL_PKG])) {
+ drm_warn(&hwmon->xe->drm,
+ "Failed to read power limits, check card firmware !\n");
+ } else {
+ drm_info(&hwmon->xe->drm, "Using mailbox commands for power limits\n");
+ /* Write default limits to read from pcode from now on. */
+ xe_hwmon_pcode_write_power_limit(hwmon, PL1_HWMON_ATTR,
+ CHANNEL_CARD,
+ hwmon->pl1_on_boot[CHANNEL_CARD]);
+ xe_hwmon_pcode_write_power_limit(hwmon, PL1_HWMON_ATTR,
+ CHANNEL_PKG,
+ hwmon->pl1_on_boot[CHANNEL_PKG]);
+ hwmon->scl_shift_power = PWR_UNIT;
+ hwmon->scl_shift_energy = ENERGY_UNIT;
+ hwmon->scl_shift_time = TIME_UNIT;
+ hwmon->boot_power_limit_read = true;
+ }
+ } else {
+ drm_info(&hwmon->xe->drm, "Using register for power limits\n");
+ /*
+ * The contents of register PKG_POWER_SKU_UNIT do not change,
+ * so read it once and store the shift values.
+ */
+ pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0);
+ if (xe_reg_is_valid(pkg_power_sku_unit)) {
+ val_sku_unit = xe_mmio_read32(mmio, pkg_power_sku_unit);
+ hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
+ hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
+ hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
+ }
}
-
/*
* Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the
* first value of the energy register read
@@ -866,6 +1170,11 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
for (channel = 0; channel < CHANNEL_MAX; channel++)
if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, channel))
xe_hwmon_energy_get(hwmon, channel, &energy);
+
+ /* Initialize 'struct xe_hwmon_fan_info' with initial fan register reading. */
+ for (channel = 0; channel < FAN_MAX; channel++)
+ if (xe_hwmon_is_visible(hwmon, hwmon_fan, hwmon_fan_input, channel))
+ xe_hwmon_fan_input_read(hwmon, channel, &fan_speed);
}
static void xe_hwmon_mutex_destroy(void *arg)
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index 89393dcb53d9..63db66df064b 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -71,7 +71,7 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
lmtt->ops->lmtt_pte_num(level)),
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
- XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_NEEDS_64K);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_free_pt;
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index df3ceddede07..bf7c3981897d 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -24,6 +24,7 @@
#include "xe_hw_fence.h"
#include "xe_map.h"
#include "xe_memirq.h"
+#include "xe_mmio.h"
#include "xe_sriov.h"
#include "xe_trace_lrc.h"
#include "xe_vm.h"
@@ -37,6 +38,7 @@
#define LRC_ENGINE_CLASS GENMASK_ULL(63, 61)
#define LRC_ENGINE_INSTANCE GENMASK_ULL(53, 48)
+#define LRC_PPHWSP_SIZE SZ_4K
#define LRC_INDIRECT_RING_STATE_SIZE SZ_4K
static struct xe_device *
@@ -50,19 +52,22 @@ size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
struct xe_device *xe = gt_to_xe(gt);
size_t size;
+ /* Per-process HW status page (PPHWSP) */
+ size = LRC_PPHWSP_SIZE;
+
+ /* Engine context image */
switch (class) {
case XE_ENGINE_CLASS_RENDER:
if (GRAPHICS_VER(xe) >= 20)
- size = 4 * SZ_4K;
+ size += 3 * SZ_4K;
else
- size = 14 * SZ_4K;
+ size += 13 * SZ_4K;
break;
case XE_ENGINE_CLASS_COMPUTE:
- /* 14 pages since graphics_ver == 11 */
if (GRAPHICS_VER(xe) >= 20)
- size = 3 * SZ_4K;
+ size += 2 * SZ_4K;
else
- size = 14 * SZ_4K;
+ size += 13 * SZ_4K;
break;
default:
WARN(1, "Unknown engine class: %d", class);
@@ -71,7 +76,7 @@ size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
case XE_ENGINE_CLASS_VIDEO_DECODE:
case XE_ENGINE_CLASS_VIDEO_ENHANCE:
case XE_ENGINE_CLASS_OTHER:
- size = 2 * SZ_4K;
+ size += 1 * SZ_4K;
}
/* Add indirect ring state page */
@@ -650,7 +655,7 @@ u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc)
#define LRC_START_SEQNO_PPHWSP_OFFSET (LRC_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_CTX_JOB_TIMESTAMP_OFFSET (LRC_START_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_PARALLEL_PPHWSP_OFFSET 2048
-#define LRC_PPHWSP_SIZE SZ_4K
+#define LRC_ENGINE_ID_PPHWSP_OFFSET 2096
u32 xe_lrc_regs_offset(struct xe_lrc *lrc)
{
@@ -684,7 +689,7 @@ static inline u32 __xe_lrc_start_seqno_offset(struct xe_lrc *lrc)
static u32 __xe_lrc_ctx_job_timestamp_offset(struct xe_lrc *lrc)
{
- /* The start seqno is stored in the driver-defined portion of PPHWSP */
+ /* This is stored in the driver-defined portion of PPHWSP */
return xe_lrc_pphwsp_offset(lrc) + LRC_CTX_JOB_TIMESTAMP_OFFSET;
}
@@ -694,11 +699,21 @@ static inline u32 __xe_lrc_parallel_offset(struct xe_lrc *lrc)
return xe_lrc_pphwsp_offset(lrc) + LRC_PARALLEL_PPHWSP_OFFSET;
}
+static inline u32 __xe_lrc_engine_id_offset(struct xe_lrc *lrc)
+{
+ return xe_lrc_pphwsp_offset(lrc) + LRC_ENGINE_ID_PPHWSP_OFFSET;
+}
+
static u32 __xe_lrc_ctx_timestamp_offset(struct xe_lrc *lrc)
{
return __xe_lrc_regs_offset(lrc) + CTX_TIMESTAMP * sizeof(u32);
}
+static u32 __xe_lrc_ctx_timestamp_udw_offset(struct xe_lrc *lrc)
+{
+ return __xe_lrc_regs_offset(lrc) + CTX_TIMESTAMP_UDW * sizeof(u32);
+}
+
static inline u32 __xe_lrc_indirect_ring_offset(struct xe_lrc *lrc)
{
/* Indirect ring state page is at the very end of LRC */
@@ -726,8 +741,10 @@ DECL_MAP_ADDR_HELPERS(regs)
DECL_MAP_ADDR_HELPERS(start_seqno)
DECL_MAP_ADDR_HELPERS(ctx_job_timestamp)
DECL_MAP_ADDR_HELPERS(ctx_timestamp)
+DECL_MAP_ADDR_HELPERS(ctx_timestamp_udw)
DECL_MAP_ADDR_HELPERS(parallel)
DECL_MAP_ADDR_HELPERS(indirect_ring)
+DECL_MAP_ADDR_HELPERS(engine_id)
#undef DECL_MAP_ADDR_HELPERS
@@ -743,18 +760,37 @@ u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc)
}
/**
+ * xe_lrc_ctx_timestamp_udw_ggtt_addr() - Get ctx timestamp udw GGTT address
+ * @lrc: Pointer to the lrc.
+ *
+ * Returns: ctx timestamp udw GGTT address
+ */
+u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc)
+{
+ return __xe_lrc_ctx_timestamp_udw_ggtt_addr(lrc);
+}
+
+/**
* xe_lrc_ctx_timestamp() - Read ctx timestamp value
* @lrc: Pointer to the lrc.
*
* Returns: ctx timestamp value
*/
-u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc)
+u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc)
{
struct xe_device *xe = lrc_to_xe(lrc);
struct iosys_map map;
+ u32 ldw, udw = 0;
map = __xe_lrc_ctx_timestamp_map(lrc);
- return xe_map_read32(xe, &map);
+ ldw = xe_map_read32(xe, &map);
+
+ if (xe->info.has_64bit_timestamp) {
+ map = __xe_lrc_ctx_timestamp_udw_map(lrc);
+ udw = xe_map_read32(xe, &map);
+ }
+
+ return (u64)udw << 32 | ldw;
}
/**
@@ -864,7 +900,7 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe)
static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
{
- u64 desc = xe_vm_pdp4_descriptor(vm, lrc->tile);
+ u64 desc = xe_vm_pdp4_descriptor(vm, gt_to_tile(lrc->gt));
xe_lrc_write_ctx_reg(lrc, CTX_PDP0_UDW, upper_32_bits(desc));
xe_lrc_write_ctx_reg(lrc, CTX_PDP0_LDW, lower_32_bits(desc));
@@ -873,10 +909,80 @@ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
static void xe_lrc_finish(struct xe_lrc *lrc)
{
xe_hw_fence_ctx_finish(&lrc->fence_ctx);
- xe_bo_lock(lrc->bo, false);
- xe_bo_unpin(lrc->bo);
- xe_bo_unlock(lrc->bo);
- xe_bo_put(lrc->bo);
+ xe_bo_unpin_map_no_vm(lrc->bo);
+ xe_bo_unpin_map_no_vm(lrc->bb_per_ctx_bo);
+}
+
+/*
+ * xe_lrc_setup_utilization() - Setup wa bb to assist in calculating active
+ * context run ticks.
+ * @lrc: Pointer to the lrc.
+ *
+ * Context Timestamp (CTX_TIMESTAMP) in the LRC accumulates the run ticks of the
+ * context, but only gets updated when the context switches out. In order to
+ * check how long a context has been active before it switches out, two things
+ * are required:
+ *
+ * (1) Determine if the context is running:
+ * To do so, we program the WA BB to set an initial value for CTX_TIMESTAMP in
+ * the LRC. The value chosen is 1 since 0 is the initial value when the LRC is
+ * initialized. During a query, we just check for this value to determine if the
+ * context is active. If the context switched out, it would overwrite this
+ * location with the actual CTX_TIMESTAMP MMIO value. Note that WA BB runs as
+ * the last part of context restore, so reusing this LRC location will not
+ * clobber anything.
+ *
+ * (2) Calculate the time that the context has been active for:
+ * The CTX_TIMESTAMP ticks only when the context is active. If a context is
+ * active, we just use the CTX_TIMESTAMP MMIO as the new value of utilization.
+ * While doing so, we need to read the CTX_TIMESTAMP MMIO for the specific
+ * engine instance. Since we do not know which instance the context is running
+ * on until it is scheduled, we also read the ENGINE_ID MMIO in the WA BB and
+ * store it in the PPHSWP.
+ */
+#define CONTEXT_ACTIVE 1ULL
+static int xe_lrc_setup_utilization(struct xe_lrc *lrc)
+{
+ u32 *cmd, *buf = NULL;
+
+ if (lrc->bb_per_ctx_bo->vmap.is_iomem) {
+ buf = kmalloc(lrc->bb_per_ctx_bo->size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ cmd = buf;
+ } else {
+ cmd = lrc->bb_per_ctx_bo->vmap.vaddr;
+ }
+
+ *cmd++ = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
+ *cmd++ = ENGINE_ID(0).addr;
+ *cmd++ = __xe_lrc_engine_id_ggtt_addr(lrc);
+ *cmd++ = 0;
+
+ *cmd++ = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
+ *cmd++ = __xe_lrc_ctx_timestamp_ggtt_addr(lrc);
+ *cmd++ = 0;
+ *cmd++ = lower_32_bits(CONTEXT_ACTIVE);
+
+ if (lrc_to_xe(lrc)->info.has_64bit_timestamp) {
+ *cmd++ = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
+ *cmd++ = __xe_lrc_ctx_timestamp_udw_ggtt_addr(lrc);
+ *cmd++ = 0;
+ *cmd++ = upper_32_bits(CONTEXT_ACTIVE);
+ }
+
+ *cmd++ = MI_BATCH_BUFFER_END;
+
+ if (buf) {
+ xe_map_memcpy_to(gt_to_xe(lrc->gt), &lrc->bb_per_ctx_bo->vmap, 0,
+ buf, (cmd - buf) * sizeof(*cmd));
+ kfree(buf);
+ }
+
+ xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR,
+ xe_bo_ggtt_addr(lrc->bb_per_ctx_bo) | 1);
+
+ return 0;
}
#define PVC_CTX_ASID (0x2e + 1)
@@ -893,31 +999,42 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
void *init_data = NULL;
u32 arb_enable;
u32 lrc_size;
+ u32 bo_flags;
int err;
kref_init(&lrc->refcount);
+ lrc->gt = gt;
lrc->flags = 0;
lrc_size = ring_size + xe_gt_lrc_size(gt, hwe->class);
if (xe_gt_has_indirect_ring_state(gt))
lrc->flags |= XE_LRC_FLAG_INDIRECT_RING_STATE;
+ bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE;
+ if (vm && vm->xef) /* userspace */
+ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
+
/*
* FIXME: Perma-pinning LRC as we don't yet support moving GGTT address
* via VM bind calls.
*/
- lrc->bo = xe_bo_create_pin_map(xe, tile, vm, lrc_size,
+ lrc->bo = xe_bo_create_pin_map(xe, tile, NULL, lrc_size,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ bo_flags);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);
+ lrc->bb_per_ctx_bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
+ ttm_bo_type_kernel,
+ bo_flags);
+ if (IS_ERR(lrc->bb_per_ctx_bo)) {
+ err = PTR_ERR(lrc->bb_per_ctx_bo);
+ goto err_lrc_finish;
+ }
+
lrc->size = lrc_size;
- lrc->tile = gt_to_tile(hwe->gt);
lrc->ring.size = ring_size;
lrc->ring.tail = 0;
- lrc->ctx_timestamp = 0;
xe_hw_fence_ctx_init(&lrc->fence_ctx, hwe->gt,
hwe->fence_irq, hwe->name);
@@ -990,7 +1107,10 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
_MASKED_BIT_ENABLE(CTX_CTRL_PXP_ENABLE));
+ lrc->ctx_timestamp = 0;
xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP, 0);
+ if (lrc_to_xe(lrc)->info.has_64bit_timestamp)
+ xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP_UDW, 0);
if (xe->info.has_asid && vm)
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
@@ -1019,6 +1139,10 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
map = __xe_lrc_start_seqno_map(lrc);
xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1);
+ err = xe_lrc_setup_utilization(lrc);
+ if (err)
+ goto err_lrc_finish;
+
return 0;
err_lrc_finish:
@@ -1238,6 +1362,21 @@ struct iosys_map xe_lrc_parallel_map(struct xe_lrc *lrc)
return __xe_lrc_parallel_map(lrc);
}
+/**
+ * xe_lrc_engine_id() - Read engine id value
+ * @lrc: Pointer to the lrc.
+ *
+ * Returns: context id value
+ */
+static u32 xe_lrc_engine_id(struct xe_lrc *lrc)
+{
+ struct xe_device *xe = lrc_to_xe(lrc);
+ struct iosys_map map;
+
+ map = __xe_lrc_engine_id_map(lrc);
+ return xe_map_read32(xe, &map);
+}
+
static int instr_dw(u32 cmd_header)
{
/* GFXPIPE "SINGLE_DW" opcodes are a single dword */
@@ -1445,6 +1584,7 @@ static int dump_gfxpipe_command(struct drm_printer *p,
MATCH3D(3DSTATE_CLIP_MESH);
MATCH3D(3DSTATE_SBE_MESH);
MATCH3D(3DSTATE_CPSIZE_CONTROL_BUFFER);
+ MATCH3D(3DSTATE_COARSE_PIXEL);
MATCH3D(3DSTATE_DRAWING_RECTANGLE);
MATCH3D(3DSTATE_CHROMA_KEY);
@@ -1668,9 +1808,6 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
if (!snapshot)
return NULL;
- if (lrc->bo->vm)
- xe_vm_get(lrc->bo->vm);
-
snapshot->context_desc = xe_lrc_ggtt_addr(lrc);
snapshot->ring_addr = __xe_lrc_ring_ggtt_addr(lrc);
snapshot->indirect_context_desc = xe_lrc_indirect_ring_ggtt_addr(lrc);
@@ -1684,7 +1821,7 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc);
snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset;
snapshot->lrc_snapshot = NULL;
- snapshot->ctx_timestamp = xe_lrc_ctx_timestamp(lrc);
+ snapshot->ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(lrc));
snapshot->ctx_job_timestamp = xe_lrc_ctx_job_timestamp(lrc);
return snapshot;
}
@@ -1692,14 +1829,12 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
{
struct xe_bo *bo;
- struct xe_vm *vm;
struct iosys_map src;
if (!snapshot)
return;
bo = snapshot->lrc_bo;
- vm = bo->vm;
snapshot->lrc_bo = NULL;
snapshot->lrc_snapshot = kvmalloc(snapshot->lrc_size, GFP_KERNEL);
@@ -1719,8 +1854,6 @@ void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
xe_bo_unlock(bo);
put_bo:
xe_bo_put(bo);
- if (vm)
- xe_vm_put(vm);
}
void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p)
@@ -1773,33 +1906,80 @@ void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot)
return;
kvfree(snapshot->lrc_snapshot);
- if (snapshot->lrc_bo) {
- struct xe_vm *vm;
-
- vm = snapshot->lrc_bo->vm;
+ if (snapshot->lrc_bo)
xe_bo_put(snapshot->lrc_bo);
- if (vm)
- xe_vm_put(vm);
- }
+
kfree(snapshot);
}
+static int get_ctx_timestamp(struct xe_lrc *lrc, u32 engine_id, u64 *reg_ctx_ts)
+{
+ u16 class = REG_FIELD_GET(ENGINE_CLASS_ID, engine_id);
+ u16 instance = REG_FIELD_GET(ENGINE_INSTANCE_ID, engine_id);
+ struct xe_hw_engine *hwe;
+ u64 val;
+
+ hwe = xe_gt_hw_engine(lrc->gt, class, instance, false);
+ if (xe_gt_WARN_ONCE(lrc->gt, !hwe || xe_hw_engine_is_reserved(hwe),
+ "Unexpected engine class:instance %d:%d for context utilization\n",
+ class, instance))
+ return -1;
+
+ if (lrc_to_xe(lrc)->info.has_64bit_timestamp)
+ val = xe_mmio_read64_2x32(&hwe->gt->mmio,
+ RING_CTX_TIMESTAMP(hwe->mmio_base));
+ else
+ val = xe_mmio_read32(&hwe->gt->mmio,
+ RING_CTX_TIMESTAMP(hwe->mmio_base));
+
+ *reg_ctx_ts = val;
+
+ return 0;
+}
+
/**
* xe_lrc_update_timestamp() - Update ctx timestamp
* @lrc: Pointer to the lrc.
* @old_ts: Old timestamp value
*
* Populate @old_ts current saved ctx timestamp, read new ctx timestamp and
- * update saved value.
+ * update saved value. With support for active contexts, the calculation may be
+ * slightly racy, so follow a read-again logic to ensure that the context is
+ * still active before returning the right timestamp.
*
* Returns: New ctx timestamp value
*/
-u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts)
+u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts)
{
+ u64 lrc_ts, reg_ts;
+ u32 engine_id;
+
*old_ts = lrc->ctx_timestamp;
- lrc->ctx_timestamp = xe_lrc_ctx_timestamp(lrc);
+ lrc_ts = xe_lrc_ctx_timestamp(lrc);
+ /* CTX_TIMESTAMP mmio read is invalid on VF, so return the LRC value */
+ if (IS_SRIOV_VF(lrc_to_xe(lrc))) {
+ lrc->ctx_timestamp = lrc_ts;
+ goto done;
+ }
+
+ if (lrc_ts == CONTEXT_ACTIVE) {
+ engine_id = xe_lrc_engine_id(lrc);
+ if (!get_ctx_timestamp(lrc, engine_id, &reg_ts))
+ lrc->ctx_timestamp = reg_ts;
+
+ /* read lrc again to ensure context is still active */
+ lrc_ts = xe_lrc_ctx_timestamp(lrc);
+ }
+
+ /*
+ * If context switched out, just use the lrc_ts. Note that this needs to
+ * be a separate if condition.
+ */
+ if (lrc_ts != CONTEXT_ACTIVE)
+ lrc->ctx_timestamp = lrc_ts;
+done:
trace_xe_lrc_update_timestamp(lrc, *old_ts);
return lrc->ctx_timestamp;
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index 0b40f349ab95..eb6e8de8c939 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -120,7 +120,8 @@ void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer
void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot);
u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc);
-u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc);
+u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc);
+u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc);
@@ -136,6 +137,6 @@ u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc);
*
* Returns the current LRC timestamp
*/
-u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts);
+u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts);
#endif
diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h
index 71ecb453f811..ae24cf6f8dd9 100644
--- a/drivers/gpu/drm/xe/xe_lrc_types.h
+++ b/drivers/gpu/drm/xe/xe_lrc_types.h
@@ -25,8 +25,8 @@ struct xe_lrc {
/** @size: size of lrc including any indirect ring state page */
u32 size;
- /** @tile: tile which this LRC belongs to */
- struct xe_tile *tile;
+ /** @gt: gt which this LRC belongs to */
+ struct xe_gt *gt;
/** @flags: LRC flags */
#define XE_LRC_FLAG_INDIRECT_RING_STATE 0x1
@@ -52,7 +52,10 @@ struct xe_lrc {
struct xe_hw_fence_ctx fence_ctx;
/** @ctx_timestamp: readout value of CTX_TIMESTAMP on last update */
- u32 ctx_timestamp;
+ u64 ctx_timestamp;
+
+ /** @bb_per_ctx_bo: buffer object for per context batch wa buffer */
+ struct xe_bo *bb_per_ctx_bo;
};
struct xe_lrc_snapshot;
diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
index 404fa2a456d5..49c45ec3e83c 100644
--- a/drivers/gpu/drm/xe/xe_memirq.c
+++ b/drivers/gpu/drm/xe/xe_memirq.c
@@ -86,7 +86,7 @@ static const char *guc_name(struct xe_guc *guc)
* This object needs to be 4KiB aligned.
*
* - _`Interrupt Source Report Page`: this is the equivalent of the
- * GEN11_GT_INTR_DWx registers, with each bit in those registers being
+ * GT_INTR_DWx registers, with each bit in those registers being
* mapped to a byte here. The offsets are the same, just bytes instead
* of bits. This object needs to be cacheline aligned.
*
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 5a3e89022c38..8f8e9fdfb2a8 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -97,7 +97,7 @@ struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile)
return tile->migrate->q;
}
-static void xe_migrate_fini(struct drm_device *dev, void *arg)
+static void xe_migrate_fini(void *arg)
{
struct xe_migrate *m = arg;
@@ -209,7 +209,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED |
XE_BO_FLAG_PAGETABLE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -401,7 +400,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
struct xe_vm *vm;
int err;
- m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL);
+ m = devm_kzalloc(xe->drm.dev, sizeof(*m), GFP_KERNEL);
if (!m)
return ERR_PTR(-ENOMEM);
@@ -455,7 +454,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
might_lock(&m->job_mutex);
fs_reclaim_release(GFP_KERNEL);
- err = drmm_add_action_or_reset(&xe->drm, xe_migrate_fini, m);
+ err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m);
if (err)
return ERR_PTR(err);
@@ -670,6 +669,7 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
u32 mocs = 0;
u32 tile_y = 0;
+ xe_gt_assert(gt, !(pitch & 3));
xe_gt_assert(gt, size / pitch <= S16_MAX);
xe_gt_assert(gt, pitch / 4 <= S16_MAX);
xe_gt_assert(gt, pitch <= U16_MAX);
@@ -779,10 +779,12 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
bool dst_is_pltt = dst->mem_type == XE_PL_TT;
bool src_is_vram = mem_type_is_vram(src->mem_type);
bool dst_is_vram = mem_type_is_vram(dst->mem_type);
+ bool type_device = src_bo->ttm.type == ttm_bo_type_device;
+ bool needs_ccs_emit = type_device && xe_migrate_needs_ccs_emit(xe);
bool copy_ccs = xe_device_has_flat_ccs(xe) &&
xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
- bool use_comp_pat = xe_device_has_flat_ccs(xe) &&
+ bool use_comp_pat = type_device && xe_device_has_flat_ccs(xe) &&
GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
/* Copying CCS between two different BOs is not supported yet. */
@@ -839,6 +841,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
avail_pts, avail_pts);
if (copy_system_ccs) {
+ xe_assert(xe, type_device);
ccs_size = xe_device_ccs_bytes(xe, src_L0);
batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
&ccs_ofs, &ccs_pt, 0,
@@ -849,7 +852,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
/* Add copy commands size here */
batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
- ((xe_migrate_needs_ccs_emit(xe) ? EMIT_COPY_CCS_DW : 0));
+ ((needs_ccs_emit ? EMIT_COPY_CCS_DW : 0));
bb = xe_bb_new(gt, batch_size, usm);
if (IS_ERR(bb)) {
@@ -878,7 +881,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (!copy_only_ccs)
emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
- if (xe_migrate_needs_ccs_emit(xe))
+ if (needs_ccs_emit)
flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
IS_DGFX(xe) ? src_is_vram : src_is_pltt,
dst_L0_ofs,
@@ -1601,55 +1604,63 @@ enum xe_migrate_copy_dir {
XE_MIGRATE_COPY_TO_SRAM,
};
+#define XE_CACHELINE_BYTES 64ull
+#define XE_CACHELINE_MASK (XE_CACHELINE_BYTES - 1)
+
static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
- unsigned long npages,
+ unsigned long len,
+ unsigned long sram_offset,
dma_addr_t *sram_addr, u64 vram_addr,
const enum xe_migrate_copy_dir dir)
{
struct xe_gt *gt = m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
+ bool use_usm_batch = xe->info.has_usm;
struct dma_fence *fence = NULL;
u32 batch_size = 2;
u64 src_L0_ofs, dst_L0_ofs;
- u64 round_update_size;
struct xe_sched_job *job;
struct xe_bb *bb;
u32 update_idx, pt_slot = 0;
+ unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE);
+ unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ?
+ PAGE_SIZE : 4;
int err;
- if (npages * PAGE_SIZE > MAX_PREEMPTDISABLE_TRANSFER)
- return ERR_PTR(-EINVAL);
+ if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) ||
+ (sram_offset | vram_addr) & XE_CACHELINE_MASK))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER);
- round_update_size = npages * PAGE_SIZE;
- batch_size += pte_update_cmd_size(round_update_size);
+ batch_size += pte_update_cmd_size(len);
batch_size += EMIT_COPY_DW;
- bb = xe_bb_new(gt, batch_size, true);
+ bb = xe_bb_new(gt, batch_size, use_usm_batch);
if (IS_ERR(bb)) {
err = PTR_ERR(bb);
return ERR_PTR(err);
}
build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
- sram_addr, round_update_size);
+ sram_addr, len + sram_offset);
if (dir == XE_MIGRATE_COPY_TO_VRAM) {
- src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0);
+ src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
} else {
src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
- dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0);
+ dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
}
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
- emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, round_update_size,
- XE_PAGE_SIZE);
+ emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch);
job = xe_bb_create_migration_job(m->q, bb,
- xe_migrate_batch_base(m, true),
+ xe_migrate_batch_base(m, use_usm_batch),
update_idx);
if (IS_ERR(job)) {
err = PTR_ERR(job);
@@ -1694,7 +1705,7 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
dma_addr_t *src_addr,
u64 dst_addr)
{
- return xe_migrate_vram(m, npages, src_addr, dst_addr,
+ return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
XE_MIGRATE_COPY_TO_VRAM);
}
@@ -1715,10 +1726,193 @@ struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
u64 src_addr,
dma_addr_t *dst_addr)
{
- return xe_migrate_vram(m, npages, dst_addr, src_addr,
+ return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
XE_MIGRATE_COPY_TO_SRAM);
}
+static void xe_migrate_dma_unmap(struct xe_device *xe, dma_addr_t *dma_addr,
+ int len, int write)
+{
+ unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
+
+ for (i = 0; i < npages; ++i) {
+ if (!dma_addr[i])
+ break;
+
+ dma_unmap_page(xe->drm.dev, dma_addr[i], PAGE_SIZE,
+ write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ }
+ kfree(dma_addr);
+}
+
+static dma_addr_t *xe_migrate_dma_map(struct xe_device *xe,
+ void *buf, int len, int write)
+{
+ dma_addr_t *dma_addr;
+ unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
+
+ dma_addr = kcalloc(npages, sizeof(*dma_addr), GFP_KERNEL);
+ if (!dma_addr)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < npages; ++i) {
+ dma_addr_t addr;
+ struct page *page;
+
+ if (is_vmalloc_addr(buf))
+ page = vmalloc_to_page(buf);
+ else
+ page = virt_to_page(buf);
+
+ addr = dma_map_page(xe->drm.dev,
+ page, 0, PAGE_SIZE,
+ write ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(xe->drm.dev, addr))
+ goto err_fault;
+
+ dma_addr[i] = addr;
+ buf += PAGE_SIZE;
+ }
+
+ return dma_addr;
+
+err_fault:
+ xe_migrate_dma_unmap(xe, dma_addr, len, write);
+ return ERR_PTR(-EFAULT);
+}
+
+/**
+ * xe_migrate_access_memory - Access memory of a BO via GPU
+ *
+ * @m: The migration context.
+ * @bo: buffer object
+ * @offset: access offset into buffer object
+ * @buf: pointer to caller memory to read into or write from
+ * @len: length of access
+ * @write: write access
+ *
+ * Access memory of a BO via GPU either reading in or writing from a passed in
+ * pointer. Pointer is dma mapped for GPU access and GPU commands are issued to
+ * read to or write from pointer.
+ *
+ * Returns:
+ * 0 if successful, negative error code on failure.
+ */
+int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
+ unsigned long offset, void *buf, int len,
+ int write)
+{
+ struct xe_tile *tile = m->tile;
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_res_cursor cursor;
+ struct dma_fence *fence = NULL;
+ dma_addr_t *dma_addr;
+ unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK;
+ int bytes_left = len, current_page = 0;
+ void *orig_buf = buf;
+
+ xe_bo_assert_held(bo);
+
+ /* Use bounce buffer for small access and unaligned access */
+ if (len & XE_CACHELINE_MASK ||
+ ((uintptr_t)buf | offset) & XE_CACHELINE_MASK) {
+ int buf_offset = 0;
+
+ /*
+ * Less than ideal for large unaligned access but this should be
+ * fairly rare, can fixup if this becomes common.
+ */
+ do {
+ u8 bounce[XE_CACHELINE_BYTES];
+ void *ptr = (void *)bounce;
+ int err;
+ int copy_bytes = min_t(int, bytes_left,
+ XE_CACHELINE_BYTES -
+ (offset & XE_CACHELINE_MASK));
+ int ptr_offset = offset & XE_CACHELINE_MASK;
+
+ err = xe_migrate_access_memory(m, bo,
+ offset &
+ ~XE_CACHELINE_MASK,
+ (void *)ptr,
+ sizeof(bounce), 0);
+ if (err)
+ return err;
+
+ if (write) {
+ memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes);
+
+ err = xe_migrate_access_memory(m, bo,
+ offset & ~XE_CACHELINE_MASK,
+ (void *)ptr,
+ sizeof(bounce), 0);
+ if (err)
+ return err;
+ } else {
+ memcpy(buf + buf_offset, ptr + ptr_offset,
+ copy_bytes);
+ }
+
+ bytes_left -= copy_bytes;
+ buf_offset += copy_bytes;
+ offset += copy_bytes;
+ } while (bytes_left);
+
+ return 0;
+ }
+
+ dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
+ if (IS_ERR(dma_addr))
+ return PTR_ERR(dma_addr);
+
+ xe_res_first(bo->ttm.resource, offset, bo->size - offset, &cursor);
+
+ do {
+ struct dma_fence *__fence;
+ u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) +
+ cursor.start;
+ int current_bytes;
+
+ if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER)
+ current_bytes = min_t(int, bytes_left,
+ MAX_PREEMPTDISABLE_TRANSFER);
+ else
+ current_bytes = min_t(int, bytes_left, cursor.size);
+
+ if (fence)
+ dma_fence_put(fence);
+
+ __fence = xe_migrate_vram(m, current_bytes,
+ (unsigned long)buf & ~PAGE_MASK,
+ dma_addr + current_page,
+ vram_addr, write ?
+ XE_MIGRATE_COPY_TO_VRAM :
+ XE_MIGRATE_COPY_TO_SRAM);
+ if (IS_ERR(__fence)) {
+ if (fence)
+ dma_fence_wait(fence, false);
+ fence = __fence;
+ goto out_err;
+ }
+ fence = __fence;
+
+ buf += current_bytes;
+ offset += current_bytes;
+ current_page = (int)(buf - orig_buf) / PAGE_SIZE;
+ bytes_left -= current_bytes;
+ if (bytes_left)
+ xe_res_next(&cursor, current_bytes);
+ } while (bytes_left);
+
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+out_err:
+ xe_migrate_dma_unmap(xe, dma_addr, len + page_offset, write);
+ return IS_ERR(fence) ? PTR_ERR(fence) : 0;
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
#include "tests/xe_migrate.c"
#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index 6ff9a963425c..fb9839c1bae0 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -112,6 +112,10 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct ttm_resource *dst,
bool copy_only_ccs);
+int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
+ unsigned long offset, void *buf, int len,
+ int write);
+
#define XE_MIGRATE_CLEAR_FLAG_BO_DATA BIT(0)
#define XE_MIGRATE_CLEAR_FLAG_CCS_DATA BIT(1)
#define XE_MIGRATE_CLEAR_FLAG_FULL (XE_MIGRATE_CLEAR_FLAG_BO_DATA | \
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 70a36e777546..7357458bc0d2 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -75,12 +75,12 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
* is fine as it's going to the root tile's mmio, that's
* guaranteed to be initialized earlier in xe_mmio_probe_early()
*/
- mtcfg = xe_mmio_read64_2x32(mmio, XEHP_MTCFG_ADDR);
+ mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR);
tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
if (tile_count < xe->info.tile_count) {
drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
- xe->info.tile_count, tile_count);
+ xe->info.tile_count, tile_count);
xe->info.tile_count = tile_count;
/*
@@ -128,7 +128,7 @@ int xe_mmio_probe_early(struct xe_device *xe)
*/
xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR);
xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0);
- if (xe->mmio.regs == NULL) {
+ if (!xe->mmio.regs) {
drm_err(&xe->drm, "failed to map registers\n");
return -EIO;
}
@@ -138,6 +138,7 @@ int xe_mmio_probe_early(struct xe_device *xe)
return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
}
+ALLOW_ERROR_INJECTION(xe_mmio_probe_early, ERRNO); /* See xe_pci_probe() */
/**
* xe_mmio_init() - Initialize an MMIO instance
@@ -204,8 +205,9 @@ void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val)
trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
- if (!reg.vf && mmio->sriov_vf_gt)
- xe_gt_sriov_vf_write32(mmio->sriov_vf_gt, reg, val);
+ if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
+ xe_gt_sriov_vf_write32(mmio->sriov_vf_gt ?:
+ mmio->tile->primary_gt, reg, val);
else
writel(val, mmio->regs + addr);
}
@@ -218,8 +220,9 @@ u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg)
/* Wa_15015404425 */
mmio_flush_pending_writes(mmio);
- if (!reg.vf && mmio->sriov_vf_gt)
- val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt, reg);
+ if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
+ val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt ?:
+ mmio->tile->primary_gt, reg);
else
val = readl(mmio->regs + addr);
@@ -309,8 +312,8 @@ u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
return (u64)udw << 32 | ldw;
}
-static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
- u32 *out_val, bool atomic, bool expect_match)
+static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
+ u32 timeout_us, u32 *out_val, bool atomic, bool expect_match)
{
ktime_t cur = ktime_get_raw();
const ktime_t end = ktime_add_us(cur, timeout_us);
diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
index 31dade91a089..0c737413fcb6 100644
--- a/drivers/gpu/drm/xe/xe_mocs.c
+++ b/drivers/gpu/drm/xe/xe_mocs.c
@@ -775,22 +775,23 @@ void xe_mocs_init(struct xe_gt *gt)
void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
+ enum xe_force_wake_domains domain;
struct xe_mocs_info table;
unsigned int fw_ref, flags;
flags = get_mocs_settings(xe, &table);
+ domain = flags & HAS_LNCF_MOCS ? XE_FORCEWAKE_ALL : XE_FW_GT;
xe_pm_runtime_get_noresume(xe);
- fw_ref = xe_force_wake_get(gt_to_fw(gt),
- flags & HAS_LNCF_MOCS ?
- XE_FORCEWAKE_ALL : XE_FW_GT);
- if (!fw_ref)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), domain);
+
+ if (!xe_force_wake_ref_has_domain(fw_ref, domain))
goto err_fw;
table.ops->dump(&table, flags, gt, p);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
err_fw:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(xe);
}
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index 9f4632e39a1a..e4742e27e2cd 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -11,6 +11,7 @@
#include <drm/drm_module.h>
#include "xe_drv.h"
+#include "xe_configfs.h"
#include "xe_hw_fence.h"
#include "xe_pci.h"
#include "xe_pm.h"
@@ -29,17 +30,14 @@ struct xe_modparam xe_modparam = {
module_param_named(svm_notifier_size, xe_modparam.svm_notifier_size, uint, 0600);
MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size(in MiB), must be power of 2");
-module_param_named(always_migrate_to_vram, xe_modparam.always_migrate_to_vram, bool, 0444);
-MODULE_PARM_DESC(always_migrate_to_vram, "Always migrate to VRAM on GPU fault");
-
module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444);
MODULE_PARM_DESC(force_execlist, "Force Execlist submission");
module_param_named(probe_display, xe_modparam.probe_display, bool, 0444);
MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched (default: true)");
-module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, uint, 0600);
-MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)");
+module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, int, 0600);
+MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size (in MiB) - <0=disable-resize, 0=max-needed-size[default], >0=force-size");
module_param_named(guc_log_level, xe_modparam.guc_log_level, int, 0600);
MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1..5=enable with verbosity min..max)");
@@ -89,6 +87,10 @@ static const struct init_funcs init_funcs[] = {
.init = xe_check_nomodeset,
},
{
+ .init = xe_configfs_init,
+ .exit = xe_configfs_exit,
+ },
+ {
.init = xe_hw_fence_module_init,
.exit = xe_hw_fence_module_exit,
},
diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h
index 84339e509c80..5a3bfea8b7b4 100644
--- a/drivers/gpu/drm/xe/xe_module.h
+++ b/drivers/gpu/drm/xe/xe_module.h
@@ -12,7 +12,6 @@
struct xe_modparam {
bool force_execlist;
bool probe_display;
- bool always_migrate_to_vram;
u32 force_vram_bar_size;
int guc_log_level;
char *guc_firmware_path;
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 7ffc98f67e69..fb842fa0552e 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -1301,7 +1301,7 @@ static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_fr
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(oa->xe, err))
return -EFAULT;
@@ -1338,7 +1338,7 @@ static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from fro
if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(oa->xe, err))
return -EFAULT;
@@ -2221,6 +2221,7 @@ addr_err:
kfree(oa_regs);
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_oa_alloc_regs, ERRNO);
static ssize_t show_dynamic_id(struct kobject *kobj,
struct kobj_attribute *attr,
@@ -2280,7 +2281,7 @@ int xe_oa_add_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *fi
return -EACCES;
}
- err = __copy_from_user(&param, u64_to_user_ptr(data), sizeof(param));
+ err = copy_from_user(&param, u64_to_user_ptr(data), sizeof(param));
if (XE_IOCTL_DBG(oa->xe, err))
return -EFAULT;
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 818f023166d5..ac4beaed58ff 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -62,11 +62,14 @@ struct xe_device_desc {
u8 is_dgfx:1;
u8 has_display:1;
+ u8 has_fan_control:1;
u8 has_heci_gscfi:1;
u8 has_heci_cscfi:1;
u8 has_llc:1;
+ u8 has_mbx_power_limits:1;
u8 has_pxp:1;
u8 has_sriov:1;
+ u8 needs_scratch:1;
u8 skip_guc_pc:1;
u8 skip_mtcfg:1;
u8 skip_pcode:1;
@@ -140,6 +143,7 @@ static const struct xe_graphics_desc graphics_xelpg = {
.has_indirect_ring_state = 1, \
.has_range_tlb_invalidation = 1, \
.has_usm = 1, \
+ .has_64bit_timestamp = 1, \
.va_bits = 48, \
.vm_max_level = 4, \
.hw_engine_mask = \
@@ -302,6 +306,8 @@ static const struct xe_device_desc dg2_desc = {
DG2_FEATURES,
.has_display = true,
+ .has_fan_control = true,
+ .has_mbx_power_limits = false,
};
static const __maybe_unused struct xe_device_desc pvc_desc = {
@@ -313,6 +319,7 @@ static const __maybe_unused struct xe_device_desc pvc_desc = {
.has_heci_gscfi = 1,
.max_remote_tiles = 1,
.require_force_probe = true,
+ .has_mbx_power_limits = false,
};
static const struct xe_device_desc mtl_desc = {
@@ -329,6 +336,7 @@ static const struct xe_device_desc lnl_desc = {
.dma_mask_size = 46,
.has_display = true,
.has_pxp = true,
+ .needs_scratch = true,
};
static const struct xe_device_desc bmg_desc = {
@@ -336,7 +344,10 @@ static const struct xe_device_desc bmg_desc = {
PLATFORM(BATTLEMAGE),
.dma_mask_size = 46,
.has_display = true,
+ .has_fan_control = true,
+ .has_mbx_power_limits = true,
.has_heci_cscfi = 1,
+ .needs_scratch = true,
};
static const struct xe_device_desc ptl_desc = {
@@ -345,6 +356,7 @@ static const struct xe_device_desc ptl_desc = {
.has_display = true,
.has_sriov = true,
.require_force_probe = true,
+ .needs_scratch = true,
};
#undef PLATFORM
@@ -575,6 +587,8 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.dma_mask_size = desc->dma_mask_size;
xe->info.is_dgfx = desc->is_dgfx;
+ xe->info.has_fan_control = desc->has_fan_control;
+ xe->info.has_mbx_power_limits = desc->has_mbx_power_limits;
xe->info.has_heci_gscfi = desc->has_heci_gscfi;
xe->info.has_heci_cscfi = desc->has_heci_cscfi;
xe->info.has_llc = desc->has_llc;
@@ -583,6 +597,7 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.skip_guc_pc = desc->skip_guc_pc;
xe->info.skip_mtcfg = desc->skip_mtcfg;
xe->info.skip_pcode = desc->skip_pcode;
+ xe->info.needs_scratch = desc->needs_scratch;
xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
xe_modparam.probe_display &&
@@ -668,6 +683,7 @@ static int xe_info_init(struct xe_device *xe,
xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
xe->info.has_usm = graphics_desc->has_usm;
+ xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp;
for_each_remote_tile(tile, xe, id) {
int err;
@@ -733,7 +749,7 @@ static void xe_pci_remove(struct pci_dev *pdev)
return;
xe_device_remove(xe);
- xe_pm_runtime_fini(xe);
+ xe_pm_fini(xe);
}
/*
@@ -803,18 +819,17 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
err = xe_device_probe_early(xe);
- if (err) {
- /*
- * In Boot Survivability mode, no drm card is exposed and driver
- * is loaded with bare minimum to allow for firmware to be
- * flashed through mei. If early probe failed, but it managed to
- * enable survivability mode, return success.
- */
- if (xe_survivability_mode_is_enabled(xe))
- return 0;
+ /*
+ * In Boot Survivability mode, no drm card is exposed and driver
+ * is loaded with bare minimum to allow for firmware to be
+ * flashed through mei. Return success, if survivability mode
+ * is enabled due to pcode failure or configfs being set
+ */
+ if (xe_survivability_mode_is_enabled(xe))
+ return 0;
+ if (err)
return err;
- }
err = xe_info_init(xe, desc);
if (err)
@@ -920,6 +935,7 @@ static int xe_pci_suspend(struct device *dev)
pci_save_state(pdev);
pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c
index 09ee8a06fe2e..8813efdcafbb 100644
--- a/drivers/gpu/drm/xe/xe_pci_sriov.c
+++ b/drivers/gpu/drm/xe/xe_pci_sriov.c
@@ -7,6 +7,8 @@
#include "xe_device.h"
#include "xe_gt_sriov_pf_config.h"
#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc_engine_activity.h"
#include "xe_pci_sriov.h"
#include "xe_pm.h"
#include "xe_sriov.h"
@@ -111,6 +113,20 @@ static void pf_link_vfs(struct xe_device *xe, int num_vfs)
}
}
+static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs, bool enable)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int ret = 0;
+
+ for_each_gt(gt, xe, id) {
+ ret = xe_guc_engine_activity_function_stats(&gt->uc.guc, num_vfs, enable);
+ if (ret)
+ xe_gt_sriov_info(gt, "Failed to %s engine activity function stats (%pe)\n",
+ str_enable_disable(enable), ERR_PTR(ret));
+ }
+}
+
static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -145,6 +161,9 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
xe_sriov_info(xe, "Enabled %u of %u VF%s\n",
num_vfs, total_vfs, str_plural(total_vfs));
+
+ pf_engine_activity_stats(xe, num_vfs, true);
+
return num_vfs;
failed:
@@ -168,6 +187,8 @@ static int pf_disable_vfs(struct xe_device *xe)
if (!num_vfs)
return 0;
+ pf_engine_activity_stats(xe, num_vfs, false);
+
pci_disable_sriov(pdev);
pf_reset_vfs(xe, num_vfs);
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index e9b9bbc138d3..ca6b10d35573 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -21,6 +21,7 @@ struct xe_graphics_desc {
u8 has_indirect_ring_state:1;
u8 has_range_tlb_invalidation:1;
u8 has_usm:1;
+ u8 has_64bit_timestamp:1;
};
struct xe_media_desc {
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index 9333ce776a6e..9189117fe825 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/error-injection.h>
#include <drm/drm_managed.h>
@@ -108,6 +109,17 @@ int xe_pcode_write_timeout(struct xe_tile *tile, u32 mbox, u32 data, int timeout
return err;
}
+int xe_pcode_write64_timeout(struct xe_tile *tile, u32 mbox, u32 data0, u32 data1, int timeout)
+{
+ int err;
+
+ mutex_lock(&tile->pcode.lock);
+ err = pcode_mailbox_rw(tile, mbox, &data0, &data1, timeout, false, false);
+ mutex_unlock(&tile->pcode.lock);
+
+ return err;
+}
+
int xe_pcode_read(struct xe_tile *tile, u32 mbox, u32 *val, u32 *val1)
{
int err;
@@ -323,3 +335,4 @@ int xe_pcode_probe_early(struct xe_device *xe)
{
return xe_pcode_ready(xe, false);
}
+ALLOW_ERROR_INJECTION(xe_pcode_probe_early, ERRNO); /* See xe_pci_probe */
diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h
index ba33991d72a7..de38f44f3201 100644
--- a/drivers/gpu/drm/xe/xe_pcode.h
+++ b/drivers/gpu/drm/xe/xe_pcode.h
@@ -18,6 +18,9 @@ int xe_pcode_init_min_freq_table(struct xe_tile *tile, u32 min_gt_freq,
int xe_pcode_read(struct xe_tile *tile, u32 mbox, u32 *val, u32 *val1);
int xe_pcode_write_timeout(struct xe_tile *tile, u32 mbox, u32 val,
int timeout_ms);
+int xe_pcode_write64_timeout(struct xe_tile *tile, u32 mbox, u32 data0,
+ u32 data1, int timeout);
+
#define xe_pcode_write(tile, mbox, val) \
xe_pcode_write_timeout(tile, mbox, val, 1)
diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h
index 2bae9afdbd35..0befdea77db1 100644
--- a/drivers/gpu/drm/xe/xe_pcode_api.h
+++ b/drivers/gpu/drm/xe/xe_pcode_api.h
@@ -34,6 +34,7 @@
#define DGFX_PCODE_STATUS 0x7E
#define DGFX_GET_INIT_STATUS 0x0
#define DGFX_INIT_STATUS_COMPLETE 0x1
+#define DGFX_LINK_DOWNGRADE_STATUS REG_BIT(31)
#define PCODE_POWER_SETUP 0x7C
#define POWER_SETUP_SUBCOMMAND_READ_I1 0x4
@@ -42,6 +43,13 @@
#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
+#define READ_PSYSGPU_POWER_LIMIT 0x6
+#define WRITE_PSYSGPU_POWER_LIMIT 0x7
+#define READ_PACKAGE_POWER_LIMIT 0x8
+#define WRITE_PACKAGE_POWER_LIMIT 0x9
+#define READ_PL_FROM_FW 0x1
+#define READ_PL_FROM_PCODE 0x0
+
#define PCODE_FREQUENCY_CONFIG 0x6e
/* Frequency Config Sub Commands (param1) */
#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
@@ -49,6 +57,9 @@
/* Domain IDs (param2) */
#define PCODE_MBOX_DOMAIN_HBM 0x2
+#define FAN_SPEED_CONTROL 0x7D
+#define FSC_READ_NUM_FANS 0x4
+
#define PCODE_SCRATCH(x) XE_REG(0x138320 + ((x) * 4))
/* PCODE_SCRATCH0 */
#define AUXINFO_REG_OFFSET REG_GENMASK(17, 15)
@@ -63,6 +74,10 @@
/* Auxiliary info bits */
#define AUXINFO_HISTORY_OFFSET REG_GENMASK(31, 29)
+#define BMG_PCIE_CAP XE_REG(0x138340)
+#define LINK_DOWNGRADE REG_GENMASK(1, 0)
+#define DOWNGRADE_CAPABLE 2
+
struct pcode_err_decode {
int errno;
const char *str;
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 7b6b754ad6eb..ff749edc005b 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -16,7 +16,6 @@
#include "xe_bo.h"
#include "xe_bo_evict.h"
#include "xe_device.h"
-#include "xe_device_sysfs.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_guc.h"
@@ -188,7 +187,7 @@ int xe_pm_resume(struct xe_device *xe)
* This only restores pinned memory which is the memory required for the
* GT(s) to resume.
*/
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
if (err)
goto err;
@@ -199,7 +198,7 @@ int xe_pm_resume(struct xe_device *xe)
xe_display_pm_resume(xe);
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err)
goto err;
@@ -273,6 +272,7 @@ int xe_pm_init_early(struct xe_device *xe)
if (err)
return err;
+ xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
return 0;
}
ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
@@ -286,6 +286,42 @@ static u32 vram_threshold_value(struct xe_device *xe)
return DEFAULT_VRAM_THRESHOLD;
}
+static int xe_pm_notifier_callback(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct xe_device *xe = container_of(nb, struct xe_device, pm_notifier);
+ int err = 0;
+
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ xe_pm_runtime_get(xe);
+ err = xe_bo_evict_all_user(xe);
+ if (err) {
+ drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
+ xe_pm_runtime_put(xe);
+ break;
+ }
+
+ err = xe_bo_notifier_prepare_all_pinned(xe);
+ if (err) {
+ drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err);
+ xe_pm_runtime_put(xe);
+ }
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ xe_bo_notifier_unprepare_all_pinned(xe);
+ xe_pm_runtime_put(xe);
+ break;
+ }
+
+ if (err)
+ return NOTIFY_BAD;
+
+ return NOTIFY_DONE;
+}
+
/**
* xe_pm_init - Initialize Xe Power Management
* @xe: xe device instance
@@ -299,33 +335,31 @@ int xe_pm_init(struct xe_device *xe)
u32 vram_threshold;
int err;
+ xe->pm_notifier.notifier_call = xe_pm_notifier_callback;
+ err = register_pm_notifier(&xe->pm_notifier);
+ if (err)
+ return err;
+
/* For now suspend/resume is only allowed with GuC */
if (!xe_device_uc_enabled(xe))
return 0;
- xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
-
if (xe->d3cold.capable) {
- err = xe_device_sysfs_init(xe);
- if (err)
- return err;
-
vram_threshold = vram_threshold_value(xe);
err = xe_pm_set_vram_threshold(xe, vram_threshold);
if (err)
- return err;
+ goto err_unregister;
}
xe_pm_runtime_init(xe);
-
return 0;
+
+err_unregister:
+ unregister_pm_notifier(&xe->pm_notifier);
+ return err;
}
-/**
- * xe_pm_runtime_fini - Finalize Runtime PM
- * @xe: xe device instance
- */
-void xe_pm_runtime_fini(struct xe_device *xe)
+static void xe_pm_runtime_fini(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
@@ -333,6 +367,18 @@ void xe_pm_runtime_fini(struct xe_device *xe)
pm_runtime_forbid(dev);
}
+/**
+ * xe_pm_fini - Finalize PM
+ * @xe: xe device instance
+ */
+void xe_pm_fini(struct xe_device *xe)
+{
+ if (xe_device_uc_enabled(xe))
+ xe_pm_runtime_fini(xe);
+
+ unregister_pm_notifier(&xe->pm_notifier);
+}
+
static void xe_pm_write_callback_task(struct xe_device *xe,
struct task_struct *task)
{
@@ -484,7 +530,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
* This only restores pinned memory which is the memory
* required for the GT(s) to resume.
*/
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
if (err)
goto out;
}
@@ -497,7 +543,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
xe_display_pm_runtime_resume(xe);
if (xe->d3cold.allowed) {
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err)
goto out;
}
@@ -641,7 +687,7 @@ static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
return dev->power.runtime_status == RPM_SUSPENDING ||
dev->power.runtime_status == RPM_RESUMING ||
- pm_suspend_target_state != PM_SUSPEND_ON;
+ pm_suspend_in_progress();
#else
return false;
#endif
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
index 998d1ed64556..59678b310e55 100644
--- a/drivers/gpu/drm/xe/xe_pm.h
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -17,7 +17,7 @@ int xe_pm_resume(struct xe_device *xe);
int xe_pm_init_early(struct xe_device *xe);
int xe_pm_init(struct xe_device *xe);
-void xe_pm_runtime_fini(struct xe_device *xe);
+void xe_pm_fini(struct xe_device *xe);
bool xe_pm_runtime_suspended(struct xe_device *xe);
int xe_pm_runtime_suspend(struct xe_device *xe);
int xe_pm_runtime_resume(struct xe_device *xe);
diff --git a/drivers/gpu/drm/xe/xe_pmu.c b/drivers/gpu/drm/xe/xe_pmu.c
index 4f62a6e515d6..69df0e3520a5 100644
--- a/drivers/gpu/drm/xe/xe_pmu.c
+++ b/drivers/gpu/drm/xe/xe_pmu.c
@@ -10,9 +10,11 @@
#include "xe_force_wake.h"
#include "xe_gt_idle.h"
#include "xe_guc_engine_activity.h"
+#include "xe_guc_pc.h"
#include "xe_hw_engine.h"
#include "xe_pm.h"
#include "xe_pmu.h"
+#include "xe_sriov_pf_helpers.h"
/**
* DOC: Xe PMU (Performance Monitoring Unit)
@@ -32,9 +34,10 @@
* gt[60:63] Selects gt for the event
* engine_class[20:27] Selects engine-class for event
* engine_instance[12:19] Selects the engine-instance for the event
+ * function[44:59] Selects the function of the event (SRIOV enabled)
*
* For engine specific events (engine-*), gt, engine_class and engine_instance parameters must be
- * set as populated by DRM_XE_DEVICE_QUERY_ENGINES.
+ * set as populated by DRM_XE_DEVICE_QUERY_ENGINES and function if SRIOV is enabled.
*
* For gt specific events (gt-*) gt parameter must be passed. All other parameters will be 0.
*
@@ -49,6 +52,7 @@
*/
#define XE_PMU_EVENT_GT_MASK GENMASK_ULL(63, 60)
+#define XE_PMU_EVENT_FUNCTION_MASK GENMASK_ULL(59, 44)
#define XE_PMU_EVENT_ENGINE_CLASS_MASK GENMASK_ULL(27, 20)
#define XE_PMU_EVENT_ENGINE_INSTANCE_MASK GENMASK_ULL(19, 12)
#define XE_PMU_EVENT_ID_MASK GENMASK_ULL(11, 0)
@@ -58,6 +62,11 @@ static unsigned int config_to_event_id(u64 config)
return FIELD_GET(XE_PMU_EVENT_ID_MASK, config);
}
+static unsigned int config_to_function_id(u64 config)
+{
+ return FIELD_GET(XE_PMU_EVENT_FUNCTION_MASK, config);
+}
+
static unsigned int config_to_engine_class(u64 config)
{
return FIELD_GET(XE_PMU_EVENT_ENGINE_CLASS_MASK, config);
@@ -76,6 +85,8 @@ static unsigned int config_to_gt_id(u64 config)
#define XE_PMU_EVENT_GT_C6_RESIDENCY 0x01
#define XE_PMU_EVENT_ENGINE_ACTIVE_TICKS 0x02
#define XE_PMU_EVENT_ENGINE_TOTAL_TICKS 0x03
+#define XE_PMU_EVENT_GT_ACTUAL_FREQUENCY 0x04
+#define XE_PMU_EVENT_GT_REQUESTED_FREQUENCY 0x05
static struct xe_gt *event_to_gt(struct perf_event *event)
{
@@ -111,6 +122,14 @@ static bool is_engine_event(u64 config)
event_id == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
}
+static bool is_gt_frequency_event(struct perf_event *event)
+{
+ u32 id = config_to_event_id(event->attr.config);
+
+ return id == XE_PMU_EVENT_GT_ACTUAL_FREQUENCY ||
+ id == XE_PMU_EVENT_GT_REQUESTED_FREQUENCY;
+}
+
static bool event_gt_forcewake(struct perf_event *event)
{
struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
@@ -118,7 +137,7 @@ static bool event_gt_forcewake(struct perf_event *event)
struct xe_gt *gt;
unsigned int *fw_ref;
- if (!is_engine_event(config))
+ if (!is_engine_event(config) && !is_gt_frequency_event(event))
return true;
gt = xe_device_get_gt(xe, config_to_gt_id(config));
@@ -151,7 +170,7 @@ static bool event_supported(struct xe_pmu *pmu, unsigned int gt,
static bool event_param_valid(struct perf_event *event)
{
struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
- unsigned int engine_class, engine_instance;
+ unsigned int engine_class, engine_instance, function_id;
u64 config = event->attr.config;
struct xe_gt *gt;
@@ -161,16 +180,28 @@ static bool event_param_valid(struct perf_event *event)
engine_class = config_to_engine_class(config);
engine_instance = config_to_engine_instance(config);
+ function_id = config_to_function_id(config);
switch (config_to_event_id(config)) {
case XE_PMU_EVENT_GT_C6_RESIDENCY:
- if (engine_class || engine_instance)
+ case XE_PMU_EVENT_GT_ACTUAL_FREQUENCY:
+ case XE_PMU_EVENT_GT_REQUESTED_FREQUENCY:
+ if (engine_class || engine_instance || function_id)
return false;
break;
case XE_PMU_EVENT_ENGINE_ACTIVE_TICKS:
case XE_PMU_EVENT_ENGINE_TOTAL_TICKS:
if (!event_to_hwe(event))
return false;
+
+ /* PF(0) and total vfs when SRIOV is enabled */
+ if (IS_SRIOV_PF(xe)) {
+ if (function_id > xe_sriov_pf_get_totalvfs(xe))
+ return false;
+ } else if (function_id) {
+ return false;
+ }
+
break;
}
@@ -242,13 +273,17 @@ static int xe_pmu_event_init(struct perf_event *event)
static u64 read_engine_events(struct xe_gt *gt, struct perf_event *event)
{
struct xe_hw_engine *hwe;
- u64 val = 0;
+ unsigned int function_id;
+ u64 config, val = 0;
+
+ config = event->attr.config;
+ function_id = config_to_function_id(config);
hwe = event_to_hwe(event);
- if (config_to_event_id(event->attr.config) == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS)
- val = xe_guc_engine_activity_active_ticks(&gt->uc.guc, hwe);
+ if (config_to_event_id(config) == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS)
+ val = xe_guc_engine_activity_active_ticks(&gt->uc.guc, hwe, function_id);
else
- val = xe_guc_engine_activity_total_ticks(&gt->uc.guc, hwe);
+ val = xe_guc_engine_activity_total_ticks(&gt->uc.guc, hwe, function_id);
return val;
}
@@ -266,6 +301,10 @@ static u64 __xe_pmu_event_read(struct perf_event *event)
case XE_PMU_EVENT_ENGINE_ACTIVE_TICKS:
case XE_PMU_EVENT_ENGINE_TOTAL_TICKS:
return read_engine_events(gt, event);
+ case XE_PMU_EVENT_GT_ACTUAL_FREQUENCY:
+ return xe_guc_pc_get_act_freq(&gt->uc.guc.pc);
+ case XE_PMU_EVENT_GT_REQUESTED_FREQUENCY:
+ return xe_guc_pc_get_cur_freq_fw(&gt->uc.guc.pc);
}
return 0;
@@ -281,7 +320,14 @@ static void xe_pmu_event_update(struct perf_event *event)
new = __xe_pmu_event_read(event);
} while (!local64_try_cmpxchg(&hwc->prev_count, &prev, new));
- local64_add(new - prev, &event->count);
+ /*
+ * GT frequency is not a monotonically increasing counter, so add the
+ * instantaneous value instead.
+ */
+ if (is_gt_frequency_event(event))
+ local64_add(new, &event->count);
+ else
+ local64_add(new - prev, &event->count);
}
static void xe_pmu_event_read(struct perf_event *event)
@@ -351,6 +397,7 @@ static void xe_pmu_event_del(struct perf_event *event, int flags)
}
PMU_FORMAT_ATTR(gt, "config:60-63");
+PMU_FORMAT_ATTR(function, "config:44-59");
PMU_FORMAT_ATTR(engine_class, "config:20-27");
PMU_FORMAT_ATTR(engine_instance, "config:12-19");
PMU_FORMAT_ATTR(event, "config:0-11");
@@ -359,6 +406,7 @@ static struct attribute *pmu_format_attrs[] = {
&format_attr_event.attr,
&format_attr_engine_class.attr,
&format_attr_engine_instance.attr,
+ &format_attr_function.attr,
&format_attr_gt.attr,
NULL,
};
@@ -419,6 +467,10 @@ static ssize_t event_attr_show(struct device *dev,
XE_EVENT_ATTR_SIMPLE(gt-c6-residency, gt_c6_residency, XE_PMU_EVENT_GT_C6_RESIDENCY, "ms");
XE_EVENT_ATTR_NOUNIT(engine-active-ticks, engine_active_ticks, XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
XE_EVENT_ATTR_NOUNIT(engine-total-ticks, engine_total_ticks, XE_PMU_EVENT_ENGINE_TOTAL_TICKS);
+XE_EVENT_ATTR_SIMPLE(gt-actual-frequency, gt_actual_frequency,
+ XE_PMU_EVENT_GT_ACTUAL_FREQUENCY, "MHz");
+XE_EVENT_ATTR_SIMPLE(gt-requested-frequency, gt_requested_frequency,
+ XE_PMU_EVENT_GT_REQUESTED_FREQUENCY, "MHz");
static struct attribute *pmu_empty_event_attrs[] = {
/* Empty - all events are added as groups with .attr_update() */
@@ -434,6 +486,8 @@ static const struct attribute_group *pmu_events_attr_update[] = {
&pmu_group_gt_c6_residency,
&pmu_group_engine_active_ticks,
&pmu_group_engine_total_ticks,
+ &pmu_group_gt_actual_frequency,
+ &pmu_group_gt_requested_frequency,
NULL,
};
@@ -442,8 +496,11 @@ static void set_supported_events(struct xe_pmu *pmu)
struct xe_device *xe = container_of(pmu, typeof(*xe), pmu);
struct xe_gt *gt = xe_device_get_gt(xe, 0);
- if (!xe->info.skip_guc_pc)
+ if (!xe->info.skip_guc_pc) {
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_C6_RESIDENCY);
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_ACTUAL_FREQUENCY);
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_REQUESTED_FREQUENCY);
+ }
if (xe_guc_engine_activity_supported(&gt->uc.guc)) {
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index ffaf0d02dc7d..b04756a97cdc 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -103,6 +103,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
{
struct xe_pt *pt;
struct xe_bo *bo;
+ u32 bo_flags;
int err;
if (level) {
@@ -115,14 +116,16 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
if (!pt)
return ERR_PTR(-ENOMEM);
+ bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
+ XE_BO_FLAG_NO_RESV_EVICT | XE_BO_FLAG_PAGETABLE;
+ if (vm->xef) /* userspace */
+ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
+
pt->level = level;
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
- XE_BO_FLAG_PINNED |
- XE_BO_FLAG_NO_RESV_EVICT |
- XE_BO_FLAG_PAGETABLE);
+ bo_flags);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto err_kfree;
@@ -269,8 +272,11 @@ struct xe_pt_update {
bool preexisting;
};
+/**
+ * struct xe_pt_stage_bind_walk - Walk state for the stage_bind walk.
+ */
struct xe_pt_stage_bind_walk {
- /** base: The base class. */
+ /** @base: The base class. */
struct xe_pt_walk base;
/* Input parameters for the walk */
@@ -278,15 +284,19 @@ struct xe_pt_stage_bind_walk {
struct xe_vm *vm;
/** @tile: The tile we're building for. */
struct xe_tile *tile;
- /** @default_pte: PTE flag only template. No address is associated */
- u64 default_pte;
+ /** @default_vram_pte: PTE flag only template for VRAM. No address is associated */
+ u64 default_vram_pte;
+ /** @default_system_pte: PTE flag only template for System. No address is associated */
+ u64 default_system_pte;
/** @dma_offset: DMA offset to add to the PTE. */
u64 dma_offset;
/**
- * @needs_64k: This address range enforces 64K alignment and
- * granularity.
+ * @needs_64K: This address range enforces 64K alignment and
+ * granularity on VRAM.
*/
bool needs_64K;
+ /** @clear_pt: clear page table entries during the bind walk */
+ bool clear_pt;
/**
* @vma: VMA being mapped
*/
@@ -299,6 +309,7 @@ struct xe_pt_stage_bind_walk {
u64 va_curs_start;
/* Output */
+ /** @wupd: Walk output data for page-table updates. */
struct xe_walk_update {
/** @wupd.entries: Caller provided storage. */
struct xe_vm_pgtable_update *entries;
@@ -316,7 +327,7 @@ struct xe_pt_stage_bind_walk {
u64 l0_end_addr;
/** @addr_64K: The start address of the current 64K chunk. */
u64 addr_64K;
- /** @found_64: Whether @add_64K actually points to a 64K chunk. */
+ /** @found_64K: Whether @add_64K actually points to a 64K chunk. */
bool found_64K;
};
@@ -436,6 +447,10 @@ static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level,
if (xe_vma_is_null(xe_walk->vma))
return true;
+ /* if we are clearing page table, no dma addresses*/
+ if (xe_walk->clear_pt)
+ return true;
+
/* Is the DMA address huge PTE size aligned? */
size = next - addr;
dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs);
@@ -515,24 +530,35 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) {
struct xe_res_cursor *curs = xe_walk->curs;
bool is_null = xe_vma_is_null(xe_walk->vma);
+ bool is_vram = is_null ? false : xe_res_is_vram(curs);
XE_WARN_ON(xe_walk->va_curs_start != addr);
- pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
- xe_res_dma(curs) + xe_walk->dma_offset,
- xe_walk->vma, pat_index, level);
- pte |= xe_walk->default_pte;
+ if (xe_walk->clear_pt) {
+ pte = 0;
+ } else {
+ pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
+ xe_res_dma(curs) +
+ xe_walk->dma_offset,
+ xe_walk->vma,
+ pat_index, level);
+ if (!is_null)
+ pte |= is_vram ? xe_walk->default_vram_pte :
+ xe_walk->default_system_pte;
- /*
- * Set the XE_PTE_PS64 hint if possible, otherwise if
- * this device *requires* 64K PTE size for VRAM, fail.
- */
- if (level == 0 && !xe_parent->is_compact) {
- if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
- xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K;
- pte |= XE_PTE_PS64;
- } else if (XE_WARN_ON(xe_walk->needs_64K)) {
- return -EINVAL;
+ /*
+ * Set the XE_PTE_PS64 hint if possible, otherwise if
+ * this device *requires* 64K PTE size for VRAM, fail.
+ */
+ if (level == 0 && !xe_parent->is_compact) {
+ if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
+ xe_walk->vma->gpuva.flags |=
+ XE_VMA_PTE_64K;
+ pte |= XE_PTE_PS64;
+ } else if (XE_WARN_ON(xe_walk->needs_64K &&
+ is_vram)) {
+ return -EINVAL;
+ }
}
}
@@ -540,7 +566,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (unlikely(ret))
return ret;
- if (!is_null)
+ if (!is_null && !xe_walk->clear_pt)
xe_res_next(curs, next - addr);
xe_walk->va_curs_start = next;
xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level);
@@ -603,6 +629,44 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
.pt_entry = xe_pt_stage_bind_entry,
};
+/*
+ * Default atomic expectations for different allocation scenarios are as follows:
+ *
+ * 1. Traditional API: When the VM is not in LR mode:
+ * - Device atomics are expected to function with all allocations.
+ *
+ * 2. Compute/SVM API: When the VM is in LR mode:
+ * - Device atomics are the default behavior when the bo is placed in a single region.
+ * - In all other cases device atomics will be disabled with AE=0 until an application
+ * request differently using a ioctl like madvise.
+ */
+static bool xe_atomic_for_vram(struct xe_vm *vm)
+{
+ return true;
+}
+
+static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_bo *bo)
+{
+ struct xe_device *xe = vm->xe;
+
+ if (!xe->info.has_device_atomics_on_smem)
+ return false;
+
+ /*
+ * If a SMEM+LMEM allocation is backed by SMEM, a device
+ * atomics will cause a gpu page fault and which then
+ * gets migrated to LMEM, bind such allocations with
+ * device atomics enabled.
+ *
+ * TODO: Revisit this. Perhaps add something like a
+ * fault_on_atomics_in_system UAPI flag.
+ * Note that this also prohibits GPU atomics in LR mode for
+ * userptr and system memory on DGFX.
+ */
+ return (!IS_DGFX(xe) || (!xe_vm_in_lr_mode(vm) ||
+ (bo && xe_bo_has_single_placement(bo))));
+}
+
/**
* xe_pt_stage_bind() - Build a disconnected page-table tree for a given address
* range.
@@ -612,6 +676,7 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
* @entries: Storage for the update entries used for connecting the tree to
* the main tree at commit time.
* @num_entries: On output contains the number of @entries used.
+ * @clear_pt: Clear the page table entries.
*
* This function builds a disconnected page-table tree for a given address
* range. The tree is connected to the main vm tree for the gpu using
@@ -625,13 +690,13 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
static int
xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_svm_range *range,
- struct xe_vm_pgtable_update *entries, u32 *num_entries)
+ struct xe_vm_pgtable_update *entries,
+ u32 *num_entries, bool clear_pt)
{
struct xe_device *xe = tile_to_xe(tile);
struct xe_bo *bo = xe_vma_bo(vma);
- bool is_devmem = !xe_vma_is_userptr(vma) && bo &&
- (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo));
struct xe_res_cursor curs;
+ struct xe_vm *vm = xe_vma_vm(vma);
struct xe_pt_stage_bind_walk xe_walk = {
.base = {
.ops = &xe_pt_stage_bind_ops,
@@ -639,34 +704,31 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
.max_level = XE_PT_HIGHEST_LEVEL,
.staging = true,
},
- .vm = xe_vma_vm(vma),
+ .vm = vm,
.tile = tile,
.curs = &curs,
.va_curs_start = range ? range->base.itree.start :
xe_vma_start(vma),
.vma = vma,
.wupd.entries = entries,
+ .clear_pt = clear_pt,
};
- struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
+ struct xe_pt *pt = vm->pt_root[tile->id];
int ret;
if (range) {
/* Move this entire thing to xe_svm.c? */
- xe_svm_notifier_lock(xe_vma_vm(vma));
+ xe_svm_notifier_lock(vm);
if (!xe_svm_range_pages_valid(range)) {
xe_svm_range_debug(range, "BIND PREPARE - RETRY");
- xe_svm_notifier_unlock(xe_vma_vm(vma));
+ xe_svm_notifier_unlock(vm);
return -EAGAIN;
}
if (xe_svm_range_has_dma_mapping(range)) {
xe_res_first_dma(range->base.dma_addr, 0,
range->base.itree.last + 1 - range->base.itree.start,
&curs);
- is_devmem = xe_res_is_vram(&curs);
- if (is_devmem)
- xe_svm_range_debug(range, "BIND PREPARE - DMA VRAM");
- else
- xe_svm_range_debug(range, "BIND PREPARE - DMA");
+ xe_svm_range_debug(range, "BIND PREPARE - MIXED");
} else {
xe_assert(xe, false);
}
@@ -674,54 +736,21 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
* Note, when unlocking the resource cursor dma addresses may become
* stale, but the bind will be aborted anyway at commit time.
*/
- xe_svm_notifier_unlock(xe_vma_vm(vma));
+ xe_svm_notifier_unlock(vm);
}
- xe_walk.needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem;
+ xe_walk.needs_64K = (vm->flags & XE_VM_FLAG_64K);
+ if (clear_pt)
+ goto walk_pt;
- /**
- * Default atomic expectations for different allocation scenarios are as follows:
- *
- * 1. Traditional API: When the VM is not in LR mode:
- * - Device atomics are expected to function with all allocations.
- *
- * 2. Compute/SVM API: When the VM is in LR mode:
- * - Device atomics are the default behavior when the bo is placed in a single region.
- * - In all other cases device atomics will be disabled with AE=0 until an application
- * request differently using a ioctl like madvise.
- */
if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
- if (xe_vm_in_lr_mode(xe_vma_vm(vma))) {
- if (bo && xe_bo_has_single_placement(bo))
- xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
- /**
- * If a SMEM+LMEM allocation is backed by SMEM, a device
- * atomics will cause a gpu page fault and which then
- * gets migrated to LMEM, bind such allocations with
- * device atomics enabled.
- */
- else if (is_devmem)
- xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
- } else {
- xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
- }
-
- /**
- * Unset AE if the platform(PVC) doesn't support it on an
- * allocation
- */
- if (!xe->info.has_device_atomics_on_smem && !is_devmem)
- xe_walk.default_pte &= ~XE_USM_PPGTT_PTE_AE;
- }
-
- if (is_devmem) {
- xe_walk.default_pte |= XE_PPGTT_PTE_DM;
- xe_walk.dma_offset = bo ? vram_region_gpu_offset(bo->ttm.resource) : 0;
+ xe_walk.default_vram_pte = xe_atomic_for_vram(vm) ? XE_USM_PPGTT_PTE_AE : 0;
+ xe_walk.default_system_pte = xe_atomic_for_system(vm, bo) ?
+ XE_USM_PPGTT_PTE_AE : 0;
}
- if (!xe_vma_has_no_bo(vma) && xe_bo_is_stolen(bo))
- xe_walk.dma_offset = xe_ttm_stolen_gpu_offset(xe_bo_device(bo));
-
+ xe_walk.default_vram_pte |= XE_PPGTT_PTE_DM;
+ xe_walk.dma_offset = bo ? vram_region_gpu_offset(bo->ttm.resource) : 0;
if (!range)
xe_bo_assert_held(bo);
@@ -739,6 +768,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
curs.size = xe_vma_size(vma);
}
+walk_pt:
ret = xe_pt_walk_range(&pt->base, pt->level,
range ? range->base.itree.start : xe_vma_start(vma),
range ? range->base.itree.last + 1 : xe_vma_end(vma),
@@ -1103,12 +1133,14 @@ static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries,
static int
xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_svm_range *range,
- struct xe_vm_pgtable_update *entries, u32 *num_entries)
+ struct xe_vm_pgtable_update *entries,
+ u32 *num_entries, bool invalidate_on_bind)
{
int err;
*num_entries = 0;
- err = xe_pt_stage_bind(tile, vma, range, entries, num_entries);
+ err = xe_pt_stage_bind(tile, vma, range, entries, num_entries,
+ invalidate_on_bind);
if (!err)
xe_tile_assert(tile, *num_entries);
@@ -1420,6 +1452,7 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
return err;
}
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
{
struct xe_vm *vm = pt_update->vops->vm;
@@ -1453,6 +1486,7 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
return 0;
}
+#endif
struct invalidation_fence {
struct xe_gt_tlb_invalidation_fence base;
@@ -1791,7 +1825,7 @@ static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma)
static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
- struct xe_vma *vma)
+ struct xe_vma *vma, bool invalidate_on_bind)
{
u32 current_op = pt_update_ops->current_op;
struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
@@ -1813,7 +1847,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
return err;
err = xe_pt_prepare_bind(tile, vma, NULL, pt_op->entries,
- &pt_op->num_entries);
+ &pt_op->num_entries, invalidate_on_bind);
if (!err) {
xe_tile_assert(tile, pt_op->num_entries <=
ARRAY_SIZE(pt_op->entries));
@@ -1835,11 +1869,11 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
* If !rebind, and scratch enabled VMs, there is a chance the scratch
* PTE is already cached in the TLB so it needs to be invalidated.
* On !LR VMs this is done in the ring ops preceding a batch, but on
- * non-faulting LR, in particular on user-space batch buffer chaining,
- * it needs to be done here.
+ * LR, in particular on user-space batch buffer chaining, it needs to
+ * be done here.
*/
if ((!pt_op->rebind && xe_vm_has_scratch(vm) &&
- xe_vm_in_preempt_fence_mode(vm)))
+ xe_vm_in_lr_mode(vm)))
pt_update_ops->needs_invalidation = true;
else if (pt_op->rebind && !xe_vm_in_lr_mode(vm))
/* We bump also if batch_invalidate_tlb is true */
@@ -1875,7 +1909,7 @@ static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
pt_op->rebind = BIT(tile->id) & range->tile_present;
err = xe_pt_prepare_bind(tile, vma, range, pt_op->entries,
- &pt_op->num_entries);
+ &pt_op->num_entries, false);
if (!err) {
xe_tile_assert(tile, pt_op->num_entries <=
ARRAY_SIZE(pt_op->entries));
@@ -1987,11 +2021,13 @@ static int op_prepare(struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
+ if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
+ !op->map.invalidate_on_bind) ||
op->map.is_cpu_addr_mirror)
break;
- err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma);
+ err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
+ op->map.invalidate_on_bind);
pt_update_ops->wait_vm_kernel = true;
break;
case DRM_GPUVA_OP_REMAP:
@@ -2005,12 +2041,12 @@ static int op_prepare(struct xe_vm *vm,
if (!err && op->remap.prev) {
err = bind_op_prepare(vm, tile, pt_update_ops,
- op->remap.prev);
+ op->remap.prev, false);
pt_update_ops->wait_vm_bookkeep = true;
}
if (!err && op->remap.next) {
err = bind_op_prepare(vm, tile, pt_update_ops,
- op->remap.next);
+ op->remap.next, false);
pt_update_ops->wait_vm_bookkeep = true;
}
break;
@@ -2032,7 +2068,7 @@ static int op_prepare(struct xe_vm *vm,
if (xe_vma_is_cpu_addr_mirror(vma))
break;
- err = bind_op_prepare(vm, tile, pt_update_ops, vma);
+ err = bind_op_prepare(vm, tile, pt_update_ops, vma, false);
pt_update_ops->wait_vm_kernel = true;
break;
}
@@ -2115,7 +2151,7 @@ ALLOW_ERROR_INJECTION(xe_pt_update_ops_prepare, ERRNO);
static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma *vma, struct dma_fence *fence,
- struct dma_fence *fence2)
+ struct dma_fence *fence2, bool invalidate_on_bind)
{
xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
@@ -2132,6 +2168,8 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
}
vma->tile_present |= BIT(tile->id);
vma->tile_staged &= ~BIT(tile->id);
+ if (invalidate_on_bind)
+ vma->tile_invalidated |= BIT(tile->id);
if (xe_vma_is_userptr(vma)) {
lockdep_assert_held_read(&vm->userptr.notifier_lock);
to_userptr_vma(vma)->userptr.initial_bind = true;
@@ -2193,7 +2231,7 @@ static void op_commit(struct xe_vm *vm,
break;
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
- fence2);
+ fence2, op->map.invalidate_on_bind);
break;
case DRM_GPUVA_OP_REMAP:
{
@@ -2206,10 +2244,10 @@ static void op_commit(struct xe_vm *vm,
if (op->remap.prev)
bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
- fence, fence2);
+ fence, fence2, false);
if (op->remap.next)
bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
- fence, fence2);
+ fence, fence2, false);
break;
}
case DRM_GPUVA_OP_UNMAP:
@@ -2227,16 +2265,24 @@ static void op_commit(struct xe_vm *vm,
if (!xe_vma_is_cpu_addr_mirror(vma))
bind_op_commit(vm, tile, pt_update_ops, vma, fence,
- fence2);
+ fence2, false);
break;
}
case DRM_GPUVA_OP_DRIVER:
{
+ /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */
+
if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
- op->map_range.range->tile_present |= BIT(tile->id);
- op->map_range.range->tile_invalidated &= ~BIT(tile->id);
+ WRITE_ONCE(op->map_range.range->tile_present,
+ op->map_range.range->tile_present |
+ BIT(tile->id));
+ WRITE_ONCE(op->map_range.range->tile_invalidated,
+ op->map_range.range->tile_invalidated &
+ ~BIT(tile->id));
} else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
- op->unmap_range.range->tile_present &= ~BIT(tile->id);
+ WRITE_ONCE(op->unmap_range.range->tile_present,
+ op->unmap_range.range->tile_present &
+ ~BIT(tile->id));
}
break;
}
@@ -2257,11 +2303,15 @@ static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
.pre_commit = xe_pt_userptr_pre_commit,
};
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
static const struct xe_migrate_pt_update_ops svm_migrate_ops = {
.populate = xe_vm_populate_pgtable,
.clear = xe_migrate_clear_pgtable_callback,
.pre_commit = xe_pt_svm_pre_commit,
};
+#else
+static const struct xe_migrate_pt_update_ops svm_migrate_ops;
+#endif
/**
* xe_pt_update_ops_run() - Run PT update operations
diff --git a/drivers/gpu/drm/xe/xe_pxp.c b/drivers/gpu/drm/xe/xe_pxp.c
index 454ea7dc08ac..b5bc15f436fa 100644
--- a/drivers/gpu/drm/xe/xe_pxp.c
+++ b/drivers/gpu/drm/xe/xe_pxp.c
@@ -541,10 +541,14 @@ int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
*/
xe_pm_runtime_get(pxp->xe);
- if (!pxp_prerequisites_done(pxp)) {
- ret = -EBUSY;
+ /* get_readiness_status() returns 0 for in-progress and 1 for done */
+ ret = xe_pxp_get_readiness_status(pxp);
+ if (ret <= 0) {
+ if (!ret)
+ ret = -EBUSY;
goto out;
}
+ ret = 0;
wait_for_idle:
/*
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 5e65830dad25..2dbf4066d86f 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -340,7 +340,7 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
- if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_GPUSVM))
+ if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM))
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR;
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
index 9475e3f74958..fc8447a838c4 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr.c
+++ b/drivers/gpu/drm/xe/xe_reg_sr.c
@@ -173,6 +173,9 @@ void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt)
if (xa_empty(&sr->xa))
return;
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
+
xe_gt_dbg(gt, "Applying %s save-restore MMIOs\n", sr->name);
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index a7582b097ae6..bc1689db4cd7 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -234,13 +234,10 @@ static u32 get_ppgtt_flag(struct xe_sched_job *job)
static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
{
- dw[i++] = MI_COPY_MEM_MEM | MI_COPY_MEM_MEM_SRC_GGTT |
- MI_COPY_MEM_MEM_DST_GGTT;
+ dw[i++] = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
+ dw[i++] = RING_CTX_TIMESTAMP(0).addr;
dw[i++] = xe_lrc_ctx_job_timestamp_ggtt_addr(lrc);
dw[i++] = 0;
- dw[i++] = xe_lrc_ctx_timestamp_ggtt_addr(lrc);
- dw[i++] = 0;
- dw[i++] = MI_NOOP;
return i;
}
diff --git a/drivers/gpu/drm/xe/xe_ring_ops_types.h b/drivers/gpu/drm/xe/xe_ring_ops_types.h
index 1ae56e2ee7b4..d7e3e150a9a5 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops_types.h
+++ b/drivers/gpu/drm/xe/xe_ring_ops_types.h
@@ -8,7 +8,7 @@
struct xe_sched_job;
-#define MAX_JOB_SIZE_DW 48
+#define MAX_JOB_SIZE_DW 58
#define MAX_JOB_SIZE_BYTES (MAX_JOB_SIZE_DW * 4)
/**
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 13bb62d3e615..29e694bb1219 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -258,9 +258,6 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
rtp_get_context(ctx, &hwe, &gt, &xe);
- if (IS_SRIOV_VF(xe))
- return;
-
xe_assert(xe, entries);
for (entry = entries; entry - entries < n_entries; entry++) {
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index f8fe61e25518..1d43e183ca21 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -60,7 +60,8 @@ struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u3
bo = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo)) {
drm_err(&xe->drm, "Failed to prepare %uKiB BO for SA manager (%pe)\n",
size / SZ_1K, bo);
diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c
index 8184390f9c7b..86d47aaf0358 100644
--- a/drivers/gpu/drm/xe/xe_shrinker.c
+++ b/drivers/gpu/drm/xe/xe_shrinker.c
@@ -227,7 +227,7 @@ struct xe_shrinker *xe_shrinker_create(struct xe_device *xe)
if (!shrinker)
return ERR_PTR(-ENOMEM);
- shrinker->shrink = shrinker_alloc(0, "xe system shrinker");
+ shrinker->shrink = shrinker_alloc(0, "drm-xe_gem:%s", xe->drm.unique);
if (!shrinker->shrink) {
kfree(shrinker);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c
index cb813b337fd3..1f710b3fc599 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.c
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.c
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <linux/sysfs.h>
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_heci_gsc.h"
@@ -28,20 +29,32 @@
* This is implemented by loading the driver with bare minimum (no drm card) to allow the firmware
* to be flashed through mei and collect telemetry. The driver's probe flow is modified
* such that it enters survivability mode when pcode initialization is incomplete and boot status
- * denotes a failure. The driver then populates the survivability_mode PCI sysfs indicating
- * survivability mode and provides additional information required for debug
+ * denotes a failure.
*
- * KMD exposes below admin-only readable sysfs in survivability mode
+ * Survivability mode can also be entered manually using the survivability mode attribute available
+ * through configfs which is beneficial in several usecases. It can be used to address scenarios
+ * where pcode does not detect failure or for validation purposes. It can also be used in
+ * In-Field-Repair (IFR) to repair a single card without impacting the other cards in a node.
*
- * device/survivability_mode: The presence of this file indicates that the card is in survivability
- * mode. Also, provides additional information on why the driver entered
- * survivability mode.
+ * Use below command enable survivability mode manually::
*
- * Capability Information - Provides boot status
- * Postcode Information - Provides information about the failure
- * Overflow Information - Provides history of previous failures
- * Auxiliary Information - Certain failures may have information in
- * addition to postcode information
+ * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
+ *
+ * Refer :ref:`xe_configfs` for more details on how to use configfs
+ *
+ * Survivability mode is indicated by the below admin-only readable sysfs which provides additional
+ * debug information::
+ *
+ * /sys/bus/pci/devices/<device>/surivability_mode
+ *
+ * Capability Information:
+ * Provides boot status
+ * Postcode Information:
+ * Provides information about the failure
+ * Overflow Information
+ * Provides history of previous failures
+ * Auxiliary Information
+ * Certain failures may have information in addition to postcode information
*/
static u32 aux_history_offset(u32 reg_value)
@@ -133,6 +146,7 @@ static void xe_survivability_mode_fini(void *arg)
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct device *dev = &pdev->dev;
+ xe_configfs_clear_survivability_mode(pdev);
sysfs_remove_file(&dev->kobj, &dev_attr_survivability_mode.attr);
}
@@ -186,24 +200,41 @@ bool xe_survivability_mode_is_enabled(struct xe_device *xe)
return xe->survivability.mode;
}
-/*
- * survivability_mode_requested - check if it's possible to enable
- * survivability mode and that was requested by firmware
+/**
+ * xe_survivability_mode_is_requested - check if it's possible to enable survivability
+ * mode that was requested by firmware or userspace
+ * @xe: xe device instance
*
- * This function reads the boot status from Pcode.
+ * This function reads configfs and boot status from Pcode.
*
* Return: true if platform support is available and boot status indicates
- * failure, false otherwise.
+ * failure or if survivability mode is requested, false otherwise.
*/
-static bool survivability_mode_requested(struct xe_device *xe)
+bool xe_survivability_mode_is_requested(struct xe_device *xe)
{
struct xe_survivability *survivability = &xe->survivability;
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
u32 data;
+ bool survivability_mode;
- if (!IS_DGFX(xe) || xe->info.platform < XE_BATTLEMAGE || IS_SRIOV_VF(xe))
+ if (!IS_DGFX(xe) || IS_SRIOV_VF(xe))
return false;
+ survivability_mode = xe_configfs_get_survivability_mode(pdev);
+
+ if (xe->info.platform < XE_BATTLEMAGE) {
+ if (survivability_mode) {
+ dev_err(&pdev->dev, "Survivability Mode is not supported on this card\n");
+ xe_configfs_clear_survivability_mode(pdev);
+ }
+ return false;
+ }
+
+ /* Enable survivability mode if set via configfs */
+ if (survivability_mode)
+ return true;
+
data = xe_mmio_read32(mmio, PCODE_SCRATCH(0));
survivability->boot_status = REG_FIELD_GET(BOOT_STATUS, data);
@@ -226,7 +257,7 @@ int xe_survivability_mode_enable(struct xe_device *xe)
struct xe_survivability_info *info;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- if (!survivability_mode_requested(xe))
+ if (!xe_survivability_mode_is_requested(xe))
return 0;
survivability->size = MAX_SCRATCH_MMIO;
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.h b/drivers/gpu/drm/xe/xe_survivability_mode.h
index d7e64885570d..02231c2bf008 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.h
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.h
@@ -12,5 +12,6 @@ struct xe_device;
int xe_survivability_mode_enable(struct xe_device *xe);
bool xe_survivability_mode_is_enabled(struct xe_device *xe);
+bool xe_survivability_mode_is_requested(struct xe_device *xe);
#endif /* _XE_SURVIVABILITY_MODE_H_ */
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 24c578e1170e..f0b167b3fb6a 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -4,6 +4,7 @@
*/
#include "xe_bo.h"
+#include "xe_gt_stats.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_module.h"
@@ -15,8 +16,17 @@
static bool xe_svm_range_in_vram(struct xe_svm_range *range)
{
- /* Not reliable without notifier lock */
- return range->base.flags.has_devmem_pages;
+ /*
+ * Advisory only check whether the range is currently backed by VRAM
+ * memory.
+ */
+
+ struct drm_gpusvm_range_flags flags = {
+ /* Pairs with WRITE_ONCE in drm_gpusvm.c */
+ .__flags = READ_ONCE(range->base.flags.__flags),
+ };
+
+ return flags.has_devmem_pages;
}
static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
@@ -339,6 +349,8 @@ static void xe_svm_garbage_collector_work_func(struct work_struct *w)
up_write(&vm->lock);
}
+#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+
static struct xe_vram_region *page_to_vr(struct page *page)
{
return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
@@ -577,6 +589,8 @@ static const struct drm_gpusvm_devmem_ops gpusvm_devmem_ops = {
.copy_to_ram = xe_svm_copy_to_ram,
};
+#endif
+
static const struct drm_gpusvm_ops gpusvm_ops = {
.range_alloc = xe_svm_range_alloc,
.range_free = xe_svm_range_free,
@@ -645,11 +659,19 @@ void xe_svm_fini(struct xe_vm *vm)
}
static bool xe_svm_range_is_valid(struct xe_svm_range *range,
- struct xe_tile *tile)
+ struct xe_tile *tile,
+ bool devmem_only)
{
- return (range->tile_present & ~range->tile_invalidated) & BIT(tile->id);
+ /*
+ * Advisory only check whether the range currently has a valid mapping,
+ * READ_ONCE pairs with WRITE_ONCE in xe_pt.c
+ */
+ return ((READ_ONCE(range->tile_present) &
+ ~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) &&
+ (!devmem_only || xe_svm_range_in_vram(range));
}
+#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
{
return &tile->mem.vram;
@@ -711,12 +733,50 @@ unlock:
return err;
}
+#else
+static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static bool supports_4K_migration(struct xe_device *xe)
+{
+ if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
+ return false;
+
+ return true;
+}
+
+static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range,
+ struct xe_vma *vma)
+{
+ struct xe_vm *vm = range_to_vm(&range->base);
+ u64 range_size = xe_svm_range_size(range);
+
+ if (!range->base.flags.migrate_devmem)
+ return false;
+
+ if (xe_svm_range_in_vram(range)) {
+ drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
+ return false;
+ }
+
+ if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
+ drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
+ return false;
+ }
+
+ return true;
+}
/**
* xe_svm_handle_pagefault() - SVM handle page fault
* @vm: The VM.
* @vma: The CPU address mirror VMA.
- * @tile: The tile upon the fault occurred.
+ * @gt: The gt upon the fault occurred.
* @fault_addr: The GPU fault address.
* @atomic: The fault atomic access bit.
*
@@ -726,7 +786,7 @@ unlock:
* Return: 0 on success, negative error code on error.
*/
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_tile *tile, u64 fault_addr,
+ struct xe_gt *gt, u64 fault_addr,
bool atomic)
{
struct drm_gpusvm_ctx ctx = {
@@ -735,17 +795,25 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
.check_pages_threshold = IS_DGFX(vm->xe) &&
IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
+ .devmem_only = atomic && IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
+ .timeslice_ms = atomic && IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? 5 : 0,
};
struct xe_svm_range *range;
struct drm_gpusvm_range *r;
struct drm_exec exec;
struct dma_fence *fence;
+ int migrate_try_count = ctx.devmem_only ? 3 : 1;
+ struct xe_tile *tile = gt_to_tile(gt);
ktime_t end = 0;
int err;
lockdep_assert_held_write(&vm->lock);
xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
+
retry:
/* Always process UNMAPs first so view SVM ranges is current */
err = xe_svm_garbage_collector(vm);
@@ -758,24 +826,31 @@ retry:
if (IS_ERR(r))
return PTR_ERR(r);
+ if (ctx.devmem_only && !r->flags.migrate_devmem)
+ return -EACCES;
+
range = to_xe_range(r);
- if (xe_svm_range_is_valid(range, tile))
+ if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
return 0;
range_debug(range, "PAGE FAULT");
- /* XXX: Add migration policy, for now migrate range once */
- if (!range->skip_migrate && range->base.flags.migrate_devmem &&
- xe_svm_range_size(range) >= SZ_64K) {
- range->skip_migrate = true;
-
+ if (--migrate_try_count >= 0 &&
+ xe_svm_range_needs_migrate_to_vram(range, vma)) {
err = xe_svm_alloc_vram(vm, tile, range, &ctx);
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (err) {
- drm_dbg(&vm->xe->drm,
- "VRAM allocation failed, falling back to "
- "retrying fault, asid=%u, errno=%pe\n",
- vm->usm.asid, ERR_PTR(err));
- goto retry;
+ if (migrate_try_count || !ctx.devmem_only) {
+ drm_dbg(&vm->xe->drm,
+ "VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
+ vm->usm.asid, ERR_PTR(err));
+ goto retry;
+ } else {
+ drm_err(&vm->xe->drm,
+ "VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
+ vm->usm.asid, ERR_PTR(err));
+ return err;
+ }
}
}
@@ -783,15 +858,23 @@ retry:
err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
/* Corner where CPU mappings have changed */
if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
- if (err == -EOPNOTSUPP) {
- range_debug(range, "PAGE FAULT - EVICT PAGES");
- drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
+ if (migrate_try_count > 0 || !ctx.devmem_only) {
+ if (err == -EOPNOTSUPP) {
+ range_debug(range, "PAGE FAULT - EVICT PAGES");
+ drm_gpusvm_range_evict(&vm->svm.gpusvm,
+ &range->base);
+ }
+ drm_dbg(&vm->xe->drm,
+ "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ range_debug(range, "PAGE FAULT - RETRY PAGES");
+ goto retry;
+ } else {
+ drm_err(&vm->xe->drm,
+ "Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
}
- drm_dbg(&vm->xe->drm,
- "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
- vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
- range_debug(range, "PAGE FAULT - RETRY PAGES");
- goto retry;
}
if (err) {
range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
@@ -815,6 +898,7 @@ retry_bind:
drm_exec_fini(&exec);
err = PTR_ERR(fence);
if (err == -EAGAIN) {
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
range_debug(range, "PAGE FAULT - RETRY BIND");
goto retry;
}
@@ -825,9 +909,6 @@ retry_bind:
}
drm_exec_fini(&exec);
- if (xe_modparam.always_migrate_to_vram)
- range->skip_migrate = false;
-
dma_fence_wait(fence, false);
dma_fence_put(fence);
@@ -866,6 +947,7 @@ int xe_svm_bo_evict(struct xe_bo *bo)
}
#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+
static struct drm_pagemap_device_addr
xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
struct device *dev,
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index be306fe7aaa4..30fc78b85b30 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -6,16 +6,19 @@
#ifndef _XE_SVM_H_
#define _XE_SVM_H_
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
+
#include <drm/drm_pagemap.h>
#include <drm/drm_gpusvm.h>
#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
struct xe_bo;
-struct xe_vram_region;
+struct xe_gt;
struct xe_tile;
struct xe_vm;
struct xe_vma;
+struct xe_vram_region;
/** struct xe_svm_range - SVM range */
struct xe_svm_range {
@@ -36,14 +39,8 @@ struct xe_svm_range {
* range. Protected by GPU SVM notifier lock.
*/
u8 tile_invalidated;
- /**
- * @skip_migrate: Skip migration to VRAM, protected by GPU fault handler
- * locking.
- */
- u8 skip_migrate :1;
};
-#if IS_ENABLED(CONFIG_DRM_GPUSVM)
/**
* xe_svm_range_pages_valid() - SVM range pages valid
* @range: SVM range
@@ -64,7 +61,7 @@ void xe_svm_fini(struct xe_vm *vm);
void xe_svm_close(struct xe_vm *vm);
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_tile *tile, u64 fault_addr,
+ struct xe_gt *gt, u64 fault_addr,
bool atomic);
bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
@@ -73,9 +70,51 @@ int xe_svm_bo_evict(struct xe_bo *bo);
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
+/**
+ * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
+ * @range: SVM range
+ *
+ * Return: True if SVM range has a DMA mapping, False otherwise
+ */
+static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
+{
+ lockdep_assert_held(&range->base.gpusvm->notifier_lock);
+ return range->base.flags.has_dma_mapping;
+}
+
+#define xe_svm_assert_in_notifier(vm__) \
+ lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_notifier_lock(vm__) \
+ drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
+
+#define xe_svm_notifier_unlock(vm__) \
+ drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
+
void xe_svm_flush(struct xe_vm *vm);
#else
+#include <linux/interval_tree.h>
+
+struct drm_pagemap_device_addr;
+struct xe_bo;
+struct xe_gt;
+struct xe_vm;
+struct xe_vma;
+struct xe_tile;
+struct xe_vram_region;
+
+#define XE_INTERCONNECT_VRAM 1
+
+struct xe_svm_range {
+ struct {
+ struct interval_tree_node itree;
+ const struct drm_pagemap_device_addr *dma_addr;
+ } base;
+ u32 tile_present;
+ u32 tile_invalidated;
+};
+
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
{
return false;
@@ -105,7 +144,7 @@ void xe_svm_close(struct xe_vm *vm)
static inline
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_tile *tile, u64 fault_addr,
+ struct xe_gt *gt, u64 fault_addr,
bool atomic)
{
return 0;
@@ -128,31 +167,19 @@ void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
{
}
-static inline void xe_svm_flush(struct xe_vm *vm)
+#define xe_svm_assert_in_notifier(...) do {} while (0)
+#define xe_svm_range_has_dma_mapping(...) false
+
+static inline void xe_svm_notifier_lock(struct xe_vm *vm)
{
}
-#endif
-
-/**
- * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
- * @range: SVM range
- *
- * Return: True if SVM range has a DMA mapping, False otherwise
- */
-static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
+static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
{
- lockdep_assert_held(&range->base.gpusvm->notifier_lock);
- return range->base.flags.has_dma_mapping;
}
-#define xe_svm_assert_in_notifier(vm__) \
- lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
-
-#define xe_svm_notifier_lock(vm__) \
- drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
-
-#define xe_svm_notifier_unlock(vm__) \
- drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
-
+static inline void xe_svm_flush(struct xe_vm *vm)
+{
+}
+#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_trace_lrc.h b/drivers/gpu/drm/xe/xe_trace_lrc.h
index 5c669a0b2180..d525cbee1e34 100644
--- a/drivers/gpu/drm/xe/xe_trace_lrc.h
+++ b/drivers/gpu/drm/xe/xe_trace_lrc.h
@@ -19,12 +19,12 @@
#define __dev_name_lrc(lrc) dev_name(gt_to_xe((lrc)->fence_ctx.gt)->drm.dev)
TRACE_EVENT(xe_lrc_update_timestamp,
- TP_PROTO(struct xe_lrc *lrc, uint32_t old),
+ TP_PROTO(struct xe_lrc *lrc, uint64_t old),
TP_ARGS(lrc, old),
TP_STRUCT__entry(
__field(struct xe_lrc *, lrc)
- __field(u32, old)
- __field(u32, new)
+ __field(u64, old)
+ __field(u64, new)
__string(name, lrc->fence_ctx.name)
__string(device_id, __dev_name_lrc(lrc))
),
@@ -36,7 +36,7 @@ TRACE_EVENT(xe_lrc_update_timestamp,
__assign_str(name);
__assign_str(device_id);
),
- TP_printk("lrc=:%p lrc->name=%s old=%u new=%u device_id:%s",
+ TP_printk("lrc=:%p lrc->name=%s old=%llu new=%llu device_id:%s",
__entry->lrc, __get_str(name),
__entry->old, __entry->new,
__get_str(device_id))
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index fb0eda3d5682..2741849bbf4d 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -92,6 +92,8 @@
struct uc_fw_entry {
enum xe_platform platform;
+ enum xe_gt_type gt_type;
+
struct {
const char *path;
u16 major;
@@ -106,32 +108,37 @@ struct fw_blobs_by_type {
u32 count;
};
-#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
- fw_def(BATTLEMAGE, major_ver(xe, guc, bmg, 70, 29, 2)) \
- fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 29, 2)) \
- fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 29, 2)) \
- fw_def(DG2, major_ver(i915, guc, dg2, 70, 29, 2)) \
- fw_def(DG1, major_ver(i915, guc, dg1, 70, 29, 2)) \
- fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 29, 2)) \
- fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 29, 2)) \
- fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 29, 2)) \
- fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 29, 2)) \
- fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 29, 2))
+/*
+ * Add an "ANY" define just to convey the meaning it's given here.
+ */
+#define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED
+
+#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
+ fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 44, 1)) \
+ fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 44, 1)) \
+ fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 44, 1)) \
+ fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 44, 1)) \
+ fw_def(DG1, GT_TYPE_ANY, major_ver(i915, guc, dg1, 70, 44, 1)) \
+ fw_def(ALDERLAKE_N, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \
+ fw_def(ALDERLAKE_P, GT_TYPE_ANY, major_ver(i915, guc, adlp, 70, 44, 1)) \
+ fw_def(ALDERLAKE_S, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \
+ fw_def(ROCKETLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \
+ fw_def(TIGERLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1))
#define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \
- fw_def(BATTLEMAGE, no_ver(xe, huc, bmg)) \
- fw_def(LUNARLAKE, no_ver(xe, huc, lnl)) \
- fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \
- fw_def(DG1, no_ver(i915, huc, dg1)) \
- fw_def(ALDERLAKE_P, no_ver(i915, huc, tgl)) \
- fw_def(ALDERLAKE_S, no_ver(i915, huc, tgl)) \
- fw_def(ROCKETLAKE, no_ver(i915, huc, tgl)) \
- fw_def(TIGERLAKE, no_ver(i915, huc, tgl))
+ fw_def(BATTLEMAGE, GT_TYPE_ANY, no_ver(xe, huc, bmg)) \
+ fw_def(LUNARLAKE, GT_TYPE_ANY, no_ver(xe, huc, lnl)) \
+ fw_def(METEORLAKE, GT_TYPE_ANY, no_ver(i915, huc_gsc, mtl)) \
+ fw_def(DG1, GT_TYPE_ANY, no_ver(i915, huc, dg1)) \
+ fw_def(ALDERLAKE_P, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \
+ fw_def(ALDERLAKE_S, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \
+ fw_def(ROCKETLAKE, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \
+ fw_def(TIGERLAKE, GT_TYPE_ANY, no_ver(i915, huc, tgl))
/* for the GSC FW we match the compatibility version and not the release one */
#define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \
- fw_def(LUNARLAKE, major_ver(xe, gsc, lnl, 104, 1, 0)) \
- fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 102, 1, 0))
+ fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, gsc, lnl, 104, 1, 0)) \
+ fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, gsc, mtl, 102, 1, 0))
#define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \
__stringify(dir__) "/" __stringify(shortname__) "_" __stringify(uc__) version__ ".bin"
@@ -159,12 +166,13 @@ struct fw_blobs_by_type {
a, b, c }
/* All blobs need to be declared via MODULE_FIRMWARE() */
-#define XE_UC_MODULE_FIRMWARE(platform__, fw_filename) \
+#define XE_UC_MODULE_FIRMWARE(platform__, gt_type__, fw_filename) \
MODULE_FIRMWARE(fw_filename);
-#define XE_UC_FW_ENTRY(platform__, entry__) \
+#define XE_UC_FW_ENTRY(platform__, gt_type__, entry__) \
{ \
.platform = XE_ ## platform__, \
+ .gt_type = XE_ ## gt_type__, \
entry__, \
},
@@ -222,30 +230,38 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw)
[XE_UC_FW_TYPE_HUC] = { entries_huc, ARRAY_SIZE(entries_huc) },
[XE_UC_FW_TYPE_GSC] = { entries_gsc, ARRAY_SIZE(entries_gsc) },
};
- static const struct uc_fw_entry *entries;
+ struct xe_gt *gt = uc_fw_to_gt(uc_fw);
enum xe_platform p = xe->info.platform;
+ const struct uc_fw_entry *entries;
u32 count;
int i;
- xe_assert(xe, uc_fw->type < ARRAY_SIZE(blobs_all));
+ xe_gt_assert(gt, uc_fw->type < ARRAY_SIZE(blobs_all));
+ xe_gt_assert(gt, gt->info.type != XE_GT_TYPE_UNINITIALIZED);
+
entries = blobs_all[uc_fw->type].entries;
count = blobs_all[uc_fw->type].count;
for (i = 0; i < count && p <= entries[i].platform; i++) {
- if (p == entries[i].platform) {
- uc_fw->path = entries[i].path;
- uc_fw->versions.wanted.major = entries[i].major;
- uc_fw->versions.wanted.minor = entries[i].minor;
- uc_fw->versions.wanted.patch = entries[i].patch;
- uc_fw->full_ver_required = entries[i].full_ver_required;
-
- if (uc_fw->type == XE_UC_FW_TYPE_GSC)
- uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY;
- else
- uc_fw->versions.wanted_type = XE_UC_FW_VER_RELEASE;
-
- break;
- }
+ if (p != entries[i].platform)
+ continue;
+
+ if (entries[i].gt_type != XE_GT_TYPE_ANY &&
+ entries[i].gt_type != gt->info.type)
+ continue;
+
+ uc_fw->path = entries[i].path;
+ uc_fw->versions.wanted.major = entries[i].major;
+ uc_fw->versions.wanted.minor = entries[i].minor;
+ uc_fw->versions.wanted.patch = entries[i].patch;
+ uc_fw->full_ver_required = entries[i].full_ver_required;
+
+ if (uc_fw->type == XE_UC_FW_TYPE_GSC)
+ uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY;
+ else
+ uc_fw->versions.wanted_type = XE_UC_FW_VER_RELEASE;
+
+ break;
}
}
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 367c84b90e9e..861577746929 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1678,13 +1678,21 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
* scheduler drops all the references of it, hence protecting the VM
* for this case is necessary.
*/
- if (flags & XE_VM_FLAG_LR_MODE)
+ if (flags & XE_VM_FLAG_LR_MODE) {
+ INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
xe_pm_runtime_get_noresume(xe);
+ }
+
+ if (flags & XE_VM_FLAG_FAULT_MODE) {
+ err = xe_svm_init(vm);
+ if (err)
+ goto err_no_resv;
+ }
vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
if (!vm_resv_obj) {
err = -ENOMEM;
- goto err_no_resv;
+ goto err_svm_fini;
}
drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
@@ -1724,10 +1732,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->batch_invalidate_tlb = true;
}
- if (vm->flags & XE_VM_FLAG_LR_MODE) {
- INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
+ if (vm->flags & XE_VM_FLAG_LR_MODE)
vm->batch_invalidate_tlb = false;
- }
/* Fill pt_root after allocating scratch tables */
for_each_tile(tile, xe, id) {
@@ -1757,12 +1763,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
}
}
- if (flags & XE_VM_FLAG_FAULT_MODE) {
- err = xe_svm_init(vm);
- if (err)
- goto err_close;
- }
-
if (number_tiles > 1)
vm->composite_fence_ctx = dma_fence_context_alloc(1);
@@ -1776,6 +1776,11 @@ err_close:
xe_vm_close_and_put(vm);
return ERR_PTR(err);
+err_svm_fini:
+ if (flags & XE_VM_FLAG_FAULT_MODE) {
+ vm->size = 0; /* close the vm */
+ xe_svm_fini(vm);
+ }
err_no_resv:
mutex_destroy(&vm->snap_mutex);
for_each_tile(tile, xe, id)
@@ -2049,7 +2054,8 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
- args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
+ args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
+ !xe->info.needs_scratch))
return -EINVAL;
if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
@@ -2201,6 +2207,20 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
}
#endif
+static bool __xe_vm_needs_clear_scratch_pages(struct xe_vm *vm, u32 bind_flags)
+{
+ if (!xe_vm_in_fault_mode(vm))
+ return false;
+
+ if (!xe_vm_has_scratch(vm))
+ return false;
+
+ if (bind_flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE)
+ return false;
+
+ return true;
+}
+
/*
* Create operations list from IOCTL arguments, setup operations fields so parse
* and commit steps are decoupled from IOCTL arguments. This step can fail.
@@ -2273,6 +2293,8 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index;
+ op->map.invalidate_on_bind =
+ __xe_vm_needs_clear_scratch_pages(vm, flags);
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
op->prefetch.region = prefetch_region;
}
@@ -2472,8 +2494,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return PTR_ERR(vma);
op->map.vma = vma;
- if ((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
- !op->map.is_cpu_addr_mirror)
+ if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
+ !op->map.is_cpu_addr_mirror) ||
+ op->map.invalidate_on_bind)
xe_vma_ops_incr_pt_update_ops(vops,
op->tile_mask);
break;
@@ -2726,9 +2749,10 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- err = vma_lock_and_validate(exec, op->map.vma,
- !xe_vm_in_fault_mode(vm) ||
- op->map.immediate);
+ if (!op->map.invalidate_on_bind)
+ err = vma_lock_and_validate(exec, op->map.vma,
+ !xe_vm_in_fault_mode(vm) ||
+ op->map.immediate);
break;
case DRM_GPUVA_OP_REMAP:
err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
@@ -3082,9 +3106,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
if (!*bind_ops)
return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
- err = __copy_from_user(*bind_ops, bind_user,
- sizeof(struct drm_xe_vm_bind_op) *
- args->num_binds);
+ err = copy_from_user(*bind_ops, bind_user,
+ sizeof(struct drm_xe_vm_bind_op) *
+ args->num_binds);
if (XE_IOCTL_DBG(xe, err)) {
err = -EFAULT;
goto free_bind_ops;
@@ -3109,7 +3133,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
if (XE_IOCTL_DBG(xe, is_cpu_addr_mirror &&
(!xe_vm_in_fault_mode(vm) ||
- !IS_ENABLED(CONFIG_DRM_GPUSVM)))) {
+ !IS_ENABLED(CONFIG_DRM_XE_GPUSVM)))) {
err = -EINVAL;
goto free_bind_ops;
}
@@ -3243,7 +3267,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
- return -EINVAL;
+ return -EINVAL;
}
}
@@ -3251,7 +3275,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
if (bo->cpu_caching) {
if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
- return -EINVAL;
+ return -EINVAL;
}
} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
/*
@@ -3260,7 +3284,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
* how it was mapped on the CPU. Just assume is it
* potentially cached on CPU side.
*/
- return -EINVAL;
+ return -EINVAL;
}
/* If a BO is protected it can only be mapped if the key is still valid */
@@ -3846,6 +3870,9 @@ void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
}
drm_puts(p, "\n");
+
+ if (drm_coredump_printer_is_full(p))
+ return;
}
}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 0ef811fc2bde..494af6bdc646 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -301,6 +301,75 @@ void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
+/**
+ * xe_vm_set_validating() - Register this task as currently making bos resident
+ * @allow_res_evict: Allow eviction of buffer objects bound to @vm when
+ * validating.
+ * @vm: Pointer to the vm or NULL.
+ *
+ * Register this task as currently making bos resident for the vm. Intended
+ * to avoid eviction by the same task of shared bos bound to the vm.
+ * Call with the vm's resv lock held.
+ *
+ * Return: A pin cookie that should be used for xe_vm_clear_validating().
+ */
+static inline struct pin_cookie xe_vm_set_validating(struct xe_vm *vm,
+ bool allow_res_evict)
+{
+ struct pin_cookie cookie = {};
+
+ if (vm && !allow_res_evict) {
+ xe_vm_assert_held(vm);
+ cookie = lockdep_pin_lock(&xe_vm_resv(vm)->lock.base);
+ /* Pairs with READ_ONCE in xe_vm_is_validating() */
+ WRITE_ONCE(vm->validating, current);
+ }
+
+ return cookie;
+}
+
+/**
+ * xe_vm_clear_validating() - Unregister this task as currently making bos resident
+ * @vm: Pointer to the vm or NULL
+ * @allow_res_evict: Eviction from @vm was allowed. Must be set to the same
+ * value as for xe_vm_set_validation().
+ * @cookie: Cookie obtained from xe_vm_set_validating().
+ *
+ * Register this task as currently making bos resident for the vm. Intended
+ * to avoid eviction by the same task of shared bos bound to the vm.
+ * Call with the vm's resv lock held.
+ */
+static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict,
+ struct pin_cookie cookie)
+{
+ if (vm && !allow_res_evict) {
+ lockdep_unpin_lock(&xe_vm_resv(vm)->lock.base, cookie);
+ /* Pairs with READ_ONCE in xe_vm_is_validating() */
+ WRITE_ONCE(vm->validating, NULL);
+ }
+}
+
+/**
+ * xe_vm_is_validating() - Whether bos bound to the vm are currently being made resident
+ * by the current task.
+ * @vm: Pointer to the vm.
+ *
+ * If this function returns %true, we should be in a vm resv locked region, since
+ * the current process is the same task that called xe_vm_set_validating().
+ * The function asserts that that's indeed the case.
+ *
+ * Return: %true if the task is currently making bos resident, %false otherwise.
+ */
+static inline bool xe_vm_is_validating(struct xe_vm *vm)
+{
+ /* Pairs with WRITE_ONCE in xe_vm_is_validating() */
+ if (READ_ONCE(vm->validating) == current) {
+ xe_vm_assert_held(vm);
+ return true;
+ }
+ return false;
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
#else
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 84fa41b9fa20..1979e9bdbdf3 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -310,6 +310,14 @@ struct xe_vm {
* protected by the vm resv.
*/
u64 tlb_flush_seqno;
+ /**
+ * @validating: The task that is currently making bos resident for this vm.
+ * Protected by the VM's resv for writing. Opportunistic reading can be done
+ * using READ_ONCE. Note: This is a workaround for the
+ * TTM eviction_valuable() callback not being passed a struct
+ * ttm_operation_context(). Future work might want to address this.
+ */
+ struct task_struct *validating;
/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
bool batch_invalidate_tlb;
/** @xef: XE file handle for tracking this VM's drm client */
@@ -330,6 +338,8 @@ struct xe_vma_op_map {
bool is_cpu_addr_mirror;
/** @dumpable: whether BO is dumped on GPU hang */
bool dumpable;
+ /** @invalidate: invalidate the VMA before bind */
+ bool invalidate_on_bind;
/** @pat_index: The pat index to use for this operation. */
u16 pat_index;
};
diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
index b1f81dca610d..e421a74fb87c 100644
--- a/drivers/gpu/drm/xe/xe_vram.c
+++ b/drivers/gpu/drm/xe/xe_vram.c
@@ -49,7 +49,7 @@ _resize_bar(struct xe_device *xe, int resno, resource_size_t size)
*/
static void resize_vram_bar(struct xe_device *xe)
{
- u64 force_vram_bar_size = xe_modparam.force_vram_bar_size;
+ int force_vram_bar_size = xe_modparam.force_vram_bar_size;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct pci_bus *root = pdev->bus;
resource_size_t current_size;
@@ -66,6 +66,9 @@ static void resize_vram_bar(struct xe_device *xe)
if (!bar_size_mask)
return;
+ if (force_vram_bar_size < 0)
+ return;
+
/* set to a specific size? */
if (force_vram_bar_size) {
u32 bar_size_bit;
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 24f644c0a673..67196baa4249 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -230,6 +230,18 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
+ /* Xe2_HPG */
+
+ { XE_RTP_NAME("16025250150"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001)),
+ XE_RTP_ACTIONS(SET(LSN_VC_REG2,
+ LSN_LNI_WGT(1) |
+ LSN_LNE_WGT(1) |
+ LSN_DIM_X_WGT(1) |
+ LSN_DIM_Y_WGT(1) |
+ LSN_DIM_Z_WGT(1)))
+ },
+
/* Xe2_HPM */
{ XE_RTP_NAME("16021867713"),
@@ -815,6 +827,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
},
+ { XE_RTP_NAME("22021007897"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
+ },
/* Xe3_LPG */
{ XE_RTP_NAME("14021490052"),
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 9b9e176992a8..9efc5accd43d 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -57,3 +57,5 @@ no_media_l3 MEDIA_VERSION(3000)
GRAPHICS_VERSION(1260), GRAPHICS_STEP(A0, B0)
16023105232 GRAPHICS_VERSION_RANGE(2001, 3001)
MEDIA_VERSION_RANGE(1301, 3000)
+16026508708 GRAPHICS_VERSION_RANGE(1200, 3001)
+ MEDIA_VERSION_RANGE(1300, 3000)
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index dbecca9bdd54..cfabf5e2a0bb 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -22,6 +22,7 @@ config DRM_ZYNQMP_DPSUB_AUDIO
bool "ZynqMP DisplayPort Audio Support"
depends on DRM_ZYNQMP_DPSUB
depends on SND && SND_SOC
+ depends on SND_SOC=y || DRM_ZYNQMP_DPSUB=m
select SND_SOC_GENERIC_DMAENGINE_PCM
help
Choose this option to enable DisplayPort audio support in the ZynqMP
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index a6a4a871f197..238cbb49963e 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -1481,6 +1481,7 @@ static void zynqmp_dp_disp_disable(struct zynqmp_dp *dp,
*/
static int zynqmp_dp_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
@@ -1494,7 +1495,7 @@ static int zynqmp_dp_bridge_attach(struct drm_bridge *bridge,
}
if (dp->next_bridge) {
- ret = drm_bridge_attach(bridge->encoder, dp->next_bridge,
+ ret = drm_bridge_attach(encoder, dp->next_bridge,
bridge, flags);
if (ret < 0)
goto error;
@@ -2466,10 +2467,8 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
dp->reset = devm_reset_control_get(dp->dev, NULL);
if (IS_ERR(dp->reset)) {
- if (PTR_ERR(dp->reset) != -EPROBE_DEFER)
- dev_err(dp->dev, "failed to get reset: %ld\n",
- PTR_ERR(dp->reset));
- ret = PTR_ERR(dp->reset);
+ ret = dev_err_probe(dp->dev, PTR_ERR(dp->reset),
+ "failed to get reset\n");
goto err_free;
}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
index f07ff4eb3a6d..1a46a046103f 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
@@ -45,7 +45,6 @@ struct zynqmp_dpsub_audio {
struct {
struct snd_soc_dai_link_component cpu;
- struct snd_soc_dai_link_component codec;
struct snd_soc_dai_link_component platform;
} components[ZYNQMP_NUM_PCMS];
@@ -403,10 +402,8 @@ int zynqmp_audio_init(struct zynqmp_dpsub *dpsub)
link->num_cpus = 1;
link->cpus[0].dai_name = audio->dai_name;
- link->codecs = &audio->components[i].codec;
+ link->codecs = &snd_soc_dummy_dlc;
link->num_codecs = 1;
- link->codecs[0].name = "snd-soc-dummy";
- link->codecs[0].dai_name = "snd-soc-dummy-dai";
link->platforms = &audio->components[i].platform;
link->num_platforms = 1;
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 8e09d6d328d2..344cc9e741c1 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -41,7 +41,6 @@ static int host1x_subdev_add(struct host1x_device *device,
struct device_node *np)
{
struct host1x_subdev *subdev;
- struct device_node *child;
int err;
subdev = kzalloc(sizeof(*subdev), GFP_KERNEL);
@@ -56,13 +55,12 @@ static int host1x_subdev_add(struct host1x_device *device,
mutex_unlock(&device->subdevs_lock);
/* recursively add children */
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
if (of_match_node(driver->subdevs, child) &&
of_device_is_available(child)) {
err = host1x_subdev_add(device, driver, child);
if (err < 0) {
/* XXX cleanup? */
- of_node_put(child);
return err;
}
}
@@ -90,17 +88,14 @@ static void host1x_subdev_del(struct host1x_subdev *subdev)
static int host1x_device_parse_dt(struct host1x_device *device,
struct host1x_driver *driver)
{
- struct device_node *np;
int err;
- for_each_child_of_node(device->dev.parent->of_node, np) {
+ for_each_child_of_node_scoped(device->dev.parent->of_node, np) {
if (of_match_node(driver->subdevs, np) &&
of_device_is_available(np)) {
err = host1x_subdev_add(device, driver, np);
- if (err < 0) {
- of_node_put(np);
+ if (err < 0)
return err;
- }
}
}
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index 407ed9b9cf64..ba2e572567c0 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -247,8 +247,6 @@ static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev),
CDMA_EVENT_PUSH_BUFFER_SPACE);
- host1x_hw_cdma_flush(host1x, cdma);
-
/* If somebody has managed to already start waiting, yield */
if (cdma->event != CDMA_EVENT_NONE) {
mutex_unlock(&cdma->lock);
@@ -591,7 +589,6 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
*/
void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
{
- struct host1x *host1x = cdma_to_host1x(cdma);
struct push_buffer *pb = &cdma->push_buffer;
u32 slots_free = cdma->slots_free;
@@ -599,11 +596,9 @@ void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
trace_host1x_cdma_push(dev_name(cdma_to_channel(cdma)->dev),
op1, op2);
- if (slots_free == 0) {
- host1x_hw_cdma_flush(host1x, cdma);
+ if (slots_free == 0)
slots_free = host1x_cdma_wait_locked(cdma,
CDMA_EVENT_PUSH_BUFFER_SPACE);
- }
cdma->slots_free = slots_free - 1;
cdma->slots_used++;
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index fa77e4e64f12..333f36e0a715 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1008,7 +1008,7 @@ int ipu_map_irq(struct ipu_soc *ipu, int irq)
{
int virq;
- virq = irq_linear_revmap(ipu->domain, irq);
+ virq = irq_find_mapping(ipu->domain, irq);
if (!virq)
virq = irq_create_mapping(ipu->domain, irq);
@@ -1169,8 +1169,8 @@ static int ipu_irq_init(struct ipu_soc *ipu)
};
int ret, i;
- ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
- &irq_generic_chip_ops, ipu);
+ ipu->domain = irq_domain_create_linear(of_fwnode_handle(ipu->dev->of_node), IPU_NUM_IRQS,
+ &irq_generic_chip_ops, ipu);
if (!ipu->domain) {
dev_err(ipu->dev, "failed to add irq domain\n");
return -ENODEV;
@@ -1219,7 +1219,7 @@ static void ipu_irq_exit(struct ipu_soc *ipu)
/* TODO: remove irq_domain_generic_chips */
for (i = 0; i < IPU_NUM_IRQS; i++) {
- irq = irq_linear_revmap(ipu->domain, i);
+ irq = irq_find_mapping(ipu->domain, i);
if (irq)
irq_dispose_mapping(irq);
}
diff --git a/drivers/gpu/nova-core/Kconfig b/drivers/gpu/nova-core/Kconfig
index ad0c06756516..8726d80d6ba4 100644
--- a/drivers/gpu/nova-core/Kconfig
+++ b/drivers/gpu/nova-core/Kconfig
@@ -3,6 +3,7 @@ config NOVA_CORE
depends on PCI
depends on RUST
depends on RUST_FW_LOADER_ABSTRACTIONS
+ select AUXILIARY_BUS
default n
help
Choose this if you want to build the Nova Core driver for Nvidia
diff --git a/drivers/gpu/nova-core/driver.rs b/drivers/gpu/nova-core/driver.rs
index a08fb6599267..8c86101c26cb 100644
--- a/drivers/gpu/nova-core/driver.rs
+++ b/drivers/gpu/nova-core/driver.rs
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{bindings, c_str, device::Core, pci, prelude::*};
+use kernel::{auxiliary, bindings, c_str, device::Core, pci, prelude::*};
use crate::gpu::Gpu;
@@ -8,6 +8,7 @@ use crate::gpu::Gpu;
pub(crate) struct NovaCore {
#[pin]
pub(crate) gpu: Gpu,
+ _reg: auxiliary::Registration,
}
const BAR0_SIZE: usize = 8;
@@ -38,6 +39,12 @@ impl pci::Driver for NovaCore {
let this = KBox::pin_init(
try_pin_init!(Self {
gpu <- Gpu::new(pdev, bar)?,
+ _reg: auxiliary::Registration::new(
+ pdev.as_ref(),
+ c_str!("nova-drm"),
+ 0, // TODO: Once it lands, use XArray; for now we don't use the ID.
+ crate::MODULE_NAME
+ )?,
}),
GFP_KERNEL,
)?;
diff --git a/drivers/gpu/nova-core/firmware.rs b/drivers/gpu/nova-core/firmware.rs
index 6e6361c59ca1..4b8a38358a4f 100644
--- a/drivers/gpu/nova-core/firmware.rs
+++ b/drivers/gpu/nova-core/firmware.rs
@@ -1,13 +1,49 @@
// SPDX-License-Identifier: GPL-2.0
-use crate::gpu;
+//! Contains structures and functions dedicated to the parsing, building and patching of firmwares
+//! to be loaded into a given execution unit.
+
+use kernel::device;
use kernel::firmware;
+use kernel::prelude::*;
+use kernel::str::CString;
+
+use crate::gpu;
+use crate::gpu::Chipset;
+
+pub(crate) const FIRMWARE_VERSION: &str = "535.113.01";
+
+/// Structure encapsulating the firmware blobs required for the GPU to operate.
+#[expect(dead_code)]
+pub(crate) struct Firmware {
+ booter_load: firmware::Firmware,
+ booter_unload: firmware::Firmware,
+ bootloader: firmware::Firmware,
+ gsp: firmware::Firmware,
+}
+
+impl Firmware {
+ pub(crate) fn new(dev: &device::Device, chipset: Chipset, ver: &str) -> Result<Firmware> {
+ let mut chip_name = CString::try_from_fmt(fmt!("{}", chipset))?;
+ chip_name.make_ascii_lowercase();
+
+ let request = |name_| {
+ CString::try_from_fmt(fmt!("nvidia/{}/gsp/{}-{}.bin", &*chip_name, name_, ver))
+ .and_then(|path| firmware::Firmware::request(&path, dev))
+ };
+
+ Ok(Firmware {
+ booter_load: request("booter_load")?,
+ booter_unload: request("booter_unload")?,
+ bootloader: request("bootloader")?,
+ gsp: request("gsp")?,
+ })
+ }
+}
pub(crate) struct ModInfoBuilder<const N: usize>(firmware::ModInfoBuilder<N>);
impl<const N: usize> ModInfoBuilder<N> {
- const VERSION: &'static str = "535.113.01";
-
const fn make_entry_file(self, chipset: &str, fw: &str) -> Self {
ModInfoBuilder(
self.0
@@ -17,7 +53,7 @@ impl<const N: usize> ModInfoBuilder<N> {
.push("/gsp/")
.push(fw)
.push("-")
- .push(Self::VERSION)
+ .push(FIRMWARE_VERSION)
.push(".bin"),
)
}
diff --git a/drivers/gpu/nova-core/gpu.rs b/drivers/gpu/nova-core/gpu.rs
index ab0e5a72a059..60b86f370284 100644
--- a/drivers/gpu/nova-core/gpu.rs
+++ b/drivers/gpu/nova-core/gpu.rs
@@ -1,10 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{
- device, devres::Devres, error::code::*, firmware, fmt, pci, prelude::*, str::CString,
-};
+use kernel::{device, devres::Devres, error::code::*, pci, prelude::*};
use crate::driver::Bar0;
+use crate::firmware::{Firmware, FIRMWARE_VERSION};
use crate::regs;
use crate::util;
use core::fmt;
@@ -13,7 +12,7 @@ macro_rules! define_chipset {
({ $($variant:ident = $value:expr),* $(,)* }) =>
{
/// Enum representation of the GPU chipset.
- #[derive(fmt::Debug)]
+ #[derive(fmt::Debug, Copy, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub(crate) enum Chipset {
$($variant = $value),*,
}
@@ -54,6 +53,7 @@ define_chipset!({
TU117 = 0x167,
TU116 = 0x168,
// Ampere
+ GA100 = 0x170,
GA102 = 0x172,
GA103 = 0x173,
GA104 = 0x174,
@@ -73,7 +73,7 @@ impl Chipset {
Self::TU102 | Self::TU104 | Self::TU106 | Self::TU117 | Self::TU116 => {
Architecture::Turing
}
- Self::GA102 | Self::GA103 | Self::GA104 | Self::GA106 | Self::GA107 => {
+ Self::GA100 | Self::GA102 | Self::GA103 | Self::GA104 | Self::GA106 | Self::GA107 => {
Architecture::Ampere
}
Self::AD102 | Self::AD103 | Self::AD104 | Self::AD106 | Self::AD107 => {
@@ -100,9 +100,22 @@ impl fmt::Display for Chipset {
/// Enum representation of the GPU generation.
#[derive(fmt::Debug)]
pub(crate) enum Architecture {
- Turing,
- Ampere,
- Ada,
+ Turing = 0x16,
+ Ampere = 0x17,
+ Ada = 0x19,
+}
+
+impl TryFrom<u8> for Architecture {
+ type Error = Error;
+
+ fn try_from(value: u8) -> Result<Self> {
+ match value {
+ 0x16 => Ok(Self::Turing),
+ 0x17 => Ok(Self::Ampere),
+ 0x19 => Ok(Self::Ada),
+ _ => Err(ENODEV),
+ }
+ }
}
pub(crate) struct Revision {
@@ -111,10 +124,10 @@ pub(crate) struct Revision {
}
impl Revision {
- fn from_boot0(boot0: regs::Boot0) -> Self {
+ fn from_boot0(boot0: regs::NV_PMC_BOOT_0) -> Self {
Self {
- major: boot0.major_rev(),
- minor: boot0.minor_rev(),
+ major: boot0.major_revision(),
+ minor: boot0.minor_revision(),
}
}
}
@@ -133,45 +146,16 @@ pub(crate) struct Spec {
}
impl Spec {
- fn new(bar: &Devres<Bar0>) -> Result<Spec> {
- let bar = bar.try_access().ok_or(ENXIO)?;
- let boot0 = regs::Boot0::read(&bar);
+ fn new(bar: &Bar0) -> Result<Spec> {
+ let boot0 = regs::NV_PMC_BOOT_0::read(bar);
Ok(Self {
- chipset: boot0.chipset().try_into()?,
+ chipset: boot0.chipset()?,
revision: Revision::from_boot0(boot0),
})
}
}
-/// Structure encapsulating the firmware blobs required for the GPU to operate.
-#[expect(dead_code)]
-pub(crate) struct Firmware {
- booter_load: firmware::Firmware,
- booter_unload: firmware::Firmware,
- bootloader: firmware::Firmware,
- gsp: firmware::Firmware,
-}
-
-impl Firmware {
- fn new(dev: &device::Device, spec: &Spec, ver: &str) -> Result<Firmware> {
- let mut chip_name = CString::try_from_fmt(fmt!("{}", spec.chipset))?;
- chip_name.make_ascii_lowercase();
-
- let request = |name_| {
- CString::try_from_fmt(fmt!("nvidia/{}/gsp/{}-{}.bin", &*chip_name, name_, ver))
- .and_then(|path| firmware::Firmware::request(&path, dev))
- };
-
- Ok(Firmware {
- booter_load: request("booter_load")?,
- booter_unload: request("booter_unload")?,
- bootloader: request("bootloader")?,
- gsp: request("gsp")?,
- })
- }
-}
-
/// Structure holding the resources required to operate the GPU.
#[pin_data]
pub(crate) struct Gpu {
@@ -182,9 +166,13 @@ pub(crate) struct Gpu {
}
impl Gpu {
- pub(crate) fn new(pdev: &pci::Device, bar: Devres<Bar0>) -> Result<impl PinInit<Self>> {
- let spec = Spec::new(&bar)?;
- let fw = Firmware::new(pdev.as_ref(), &spec, "535.113.01")?;
+ pub(crate) fn new(
+ pdev: &pci::Device<device::Bound>,
+ devres_bar: Devres<Bar0>,
+ ) -> Result<impl PinInit<Self>> {
+ let bar = devres_bar.access(pdev.as_ref())?;
+ let spec = Spec::new(bar)?;
+ let fw = Firmware::new(pdev.as_ref(), spec.chipset, FIRMWARE_VERSION)?;
dev_info!(
pdev.as_ref(),
@@ -194,6 +182,10 @@ impl Gpu {
spec.revision
);
- Ok(pin_init!(Self { spec, bar, fw }))
+ Ok(pin_init!(Self {
+ spec,
+ bar: devres_bar,
+ fw
+ }))
}
}
diff --git a/drivers/gpu/nova-core/nova_core.rs b/drivers/gpu/nova-core/nova_core.rs
index a91cd924054b..618632f0abcc 100644
--- a/drivers/gpu/nova-core/nova_core.rs
+++ b/drivers/gpu/nova-core/nova_core.rs
@@ -8,6 +8,8 @@ mod gpu;
mod regs;
mod util;
+pub(crate) const MODULE_NAME: &kernel::str::CStr = <LocalModule as kernel::ModuleMetadata>::NAME;
+
kernel::module_pci_driver! {
type: driver::NovaCore,
name: "NovaCore",
diff --git a/drivers/gpu/nova-core/regs.rs b/drivers/gpu/nova-core/regs.rs
index b1a25b86ef17..5a1273230306 100644
--- a/drivers/gpu/nova-core/regs.rs
+++ b/drivers/gpu/nova-core/regs.rs
@@ -1,55 +1,39 @@
// SPDX-License-Identifier: GPL-2.0
-use crate::driver::Bar0;
-
-// TODO
-//
-// Create register definitions via generic macros. See task "Generic register
-// abstraction" in Documentation/gpu/nova/core/todo.rst.
-
-const BOOT0_OFFSET: usize = 0x00000000;
-
-// 3:0 - chipset minor revision
-const BOOT0_MINOR_REV_SHIFT: u8 = 0;
-const BOOT0_MINOR_REV_MASK: u32 = 0x0000000f;
-
-// 7:4 - chipset major revision
-const BOOT0_MAJOR_REV_SHIFT: u8 = 4;
-const BOOT0_MAJOR_REV_MASK: u32 = 0x000000f0;
-
-// 23:20 - chipset implementation Identifier (depends on architecture)
-const BOOT0_IMPL_SHIFT: u8 = 20;
-const BOOT0_IMPL_MASK: u32 = 0x00f00000;
-
-// 28:24 - chipset architecture identifier
-const BOOT0_ARCH_MASK: u32 = 0x1f000000;
-
-// 28:20 - chipset identifier (virtual register field combining BOOT0_IMPL and
-// BOOT0_ARCH)
-const BOOT0_CHIPSET_SHIFT: u8 = BOOT0_IMPL_SHIFT;
-const BOOT0_CHIPSET_MASK: u32 = BOOT0_IMPL_MASK | BOOT0_ARCH_MASK;
-
-#[derive(Copy, Clone)]
-pub(crate) struct Boot0(u32);
-
-impl Boot0 {
- #[inline]
- pub(crate) fn read(bar: &Bar0) -> Self {
- Self(bar.read32(BOOT0_OFFSET))
- }
-
- #[inline]
- pub(crate) fn chipset(&self) -> u32 {
- (self.0 & BOOT0_CHIPSET_MASK) >> BOOT0_CHIPSET_SHIFT
- }
-
- #[inline]
- pub(crate) fn minor_rev(&self) -> u8 {
- ((self.0 & BOOT0_MINOR_REV_MASK) >> BOOT0_MINOR_REV_SHIFT) as u8
+// Required to retain the original register names used by OpenRM, which are all capital snake case
+// but are mapped to types.
+#![allow(non_camel_case_types)]
+
+#[macro_use]
+mod macros;
+
+use crate::gpu::{Architecture, Chipset};
+use kernel::prelude::*;
+
+/* PMC */
+
+register!(NV_PMC_BOOT_0 @ 0x00000000, "Basic revision information about the GPU" {
+ 3:0 minor_revision as u8, "Minor revision of the chip";
+ 7:4 major_revision as u8, "Major revision of the chip";
+ 8:8 architecture_1 as u8, "MSB of the architecture";
+ 23:20 implementation as u8, "Implementation version of the architecture";
+ 28:24 architecture_0 as u8, "Lower bits of the architecture";
+});
+
+impl NV_PMC_BOOT_0 {
+ /// Combines `architecture_0` and `architecture_1` to obtain the architecture of the chip.
+ pub(crate) fn architecture(self) -> Result<Architecture> {
+ Architecture::try_from(
+ self.architecture_0() | (self.architecture_1() << Self::ARCHITECTURE_0.len()),
+ )
}
- #[inline]
- pub(crate) fn major_rev(&self) -> u8 {
- ((self.0 & BOOT0_MAJOR_REV_MASK) >> BOOT0_MAJOR_REV_SHIFT) as u8
+ /// Combines `architecture` and `implementation` to obtain a code unique to the chipset.
+ pub(crate) fn chipset(self) -> Result<Chipset> {
+ self.architecture()
+ .map(|arch| {
+ ((arch as u32) << Self::IMPLEMENTATION.len()) | self.implementation() as u32
+ })
+ .and_then(Chipset::try_from)
}
}
diff --git a/drivers/gpu/nova-core/regs/macros.rs b/drivers/gpu/nova-core/regs/macros.rs
new file mode 100644
index 000000000000..7ecc70efb3cd
--- /dev/null
+++ b/drivers/gpu/nova-core/regs/macros.rs
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Macro to define register layout and accessors.
+//!
+//! A single register typically includes several fields, which are accessed through a combination
+//! of bit-shift and mask operations that introduce a class of potential mistakes, notably because
+//! not all possible field values are necessarily valid.
+//!
+//! The macro in this module allow to define, using an intruitive and readable syntax, a dedicated
+//! type for each register with its own field accessors that can return an error is a field's value
+//! is invalid.
+
+/// Defines a dedicated type for a register with an absolute offset, alongside with getter and
+/// setter methods for its fields and methods to read and write it from an `Io` region.
+///
+/// Example:
+///
+/// ```no_run
+/// register!(BOOT_0 @ 0x00000100, "Basic revision information about the GPU" {
+/// 3:0 minor_revision as u8, "Minor revision of the chip";
+/// 7:4 major_revision as u8, "Major revision of the chip";
+/// 28:20 chipset as u32 ?=> Chipset, "Chipset model";
+/// });
+/// ```
+///
+/// This defines a `BOOT_0` type which can be read or written from offset `0x100` of an `Io`
+/// region. It is composed of 3 fields, for instance `minor_revision` is made of the 4 less
+/// significant bits of the register. Each field can be accessed and modified using accessor
+/// methods:
+///
+/// ```no_run
+/// // Read from the register's defined offset (0x100).
+/// let boot0 = BOOT_0::read(&bar);
+/// pr_info!("chip revision: {}.{}", boot0.major_revision(), boot0.minor_revision());
+///
+/// // `Chipset::try_from` will be called with the value of the field and returns an error if the
+/// // value is invalid.
+/// let chipset = boot0.chipset()?;
+///
+/// // Update some fields and write the value back.
+/// boot0.set_major_revision(3).set_minor_revision(10).write(&bar);
+///
+/// // Or just read and update the register in a single step:
+/// BOOT_0::alter(&bar, |r| r.set_major_revision(3).set_minor_revision(10));
+/// ```
+///
+/// Fields can be defined as follows:
+///
+/// - `as <type>` simply returns the field value casted as the requested integer type, typically
+/// `u32`, `u16`, `u8` or `bool`. Note that `bool` fields must have a range of 1 bit.
+/// - `as <type> => <into_type>` calls `<into_type>`'s `From::<<type>>` implementation and returns
+/// the result.
+/// - `as <type> ?=> <try_into_type>` calls `<try_into_type>`'s `TryFrom::<<type>>` implementation
+/// and returns the result. This is useful on fields for which not all values are value.
+///
+/// The documentation strings are optional. If present, they will be added to the type's
+/// definition, or the field getter and setter methods they are attached to.
+///
+/// Putting a `+` before the address of the register makes it relative to a base: the `read` and
+/// `write` methods take a `base` argument that is added to the specified address before access,
+/// and `try_read` and `try_write` methods are also created, allowing access with offsets unknown
+/// at compile-time:
+///
+/// ```no_run
+/// register!(CPU_CTL @ +0x0000010, "CPU core control" {
+/// 0:0 start as bool, "Start the CPU core";
+/// });
+///
+/// // Flip the `start` switch for the CPU core which base address is at `CPU_BASE`.
+/// let cpuctl = CPU_CTL::read(&bar, CPU_BASE);
+/// pr_info!("CPU CTL: {:#x}", cpuctl);
+/// cpuctl.set_start(true).write(&bar, CPU_BASE);
+/// ```
+macro_rules! register {
+ // Creates a register at a fixed offset of the MMIO space.
+ (
+ $name:ident @ $offset:literal $(, $comment:literal)? {
+ $($fields:tt)*
+ }
+ ) => {
+ register!(@common $name $(, $comment)?);
+ register!(@field_accessors $name { $($fields)* });
+ register!(@io $name @ $offset);
+ };
+
+ // Creates a register at a relative offset from a base address.
+ (
+ $name:ident @ + $offset:literal $(, $comment:literal)? {
+ $($fields:tt)*
+ }
+ ) => {
+ register!(@common $name $(, $comment)?);
+ register!(@field_accessors $name { $($fields)* });
+ register!(@io$name @ + $offset);
+ };
+
+ // Defines the wrapper `$name` type, as well as its relevant implementations (`Debug`, `BitOr`,
+ // and conversion to regular `u32`).
+ (@common $name:ident $(, $comment:literal)?) => {
+ $(
+ #[doc=$comment]
+ )?
+ #[repr(transparent)]
+ #[derive(Clone, Copy, Default)]
+ pub(crate) struct $name(u32);
+
+ // TODO: display the raw hex value, then the value of all the fields. This requires
+ // matching the fields, which will complexify the syntax considerably...
+ impl ::core::fmt::Debug for $name {
+ fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
+ f.debug_tuple(stringify!($name))
+ .field(&format_args!("0x{0:x}", &self.0))
+ .finish()
+ }
+ }
+
+ impl core::ops::BitOr for $name {
+ type Output = Self;
+
+ fn bitor(self, rhs: Self) -> Self::Output {
+ Self(self.0 | rhs.0)
+ }
+ }
+
+ impl ::core::convert::From<$name> for u32 {
+ fn from(reg: $name) -> u32 {
+ reg.0
+ }
+ }
+ };
+
+ // Defines all the field getter/methods methods for `$name`.
+ (
+ @field_accessors $name:ident {
+ $($hi:tt:$lo:tt $field:ident as $type:tt
+ $(?=> $try_into_type:ty)?
+ $(=> $into_type:ty)?
+ $(, $comment:literal)?
+ ;
+ )*
+ }
+ ) => {
+ $(
+ register!(@check_field_bounds $hi:$lo $field as $type);
+ )*
+
+ #[allow(dead_code)]
+ impl $name {
+ $(
+ register!(@field_accessor $name $hi:$lo $field as $type
+ $(?=> $try_into_type)?
+ $(=> $into_type)?
+ $(, $comment)?
+ ;
+ );
+ )*
+ }
+ };
+
+ // Boolean fields must have `$hi == $lo`.
+ (@check_field_bounds $hi:tt:$lo:tt $field:ident as bool) => {
+ #[allow(clippy::eq_op)]
+ const _: () = {
+ kernel::build_assert!(
+ $hi == $lo,
+ concat!("boolean field `", stringify!($field), "` covers more than one bit")
+ );
+ };
+ };
+
+ // Non-boolean fields must have `$hi >= $lo`.
+ (@check_field_bounds $hi:tt:$lo:tt $field:ident as $type:tt) => {
+ #[allow(clippy::eq_op)]
+ const _: () = {
+ kernel::build_assert!(
+ $hi >= $lo,
+ concat!("field `", stringify!($field), "`'s MSB is smaller than its LSB")
+ );
+ };
+ };
+
+ // Catches fields defined as `bool` and convert them into a boolean value.
+ (
+ @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as bool => $into_type:ty
+ $(, $comment:literal)?;
+ ) => {
+ register!(
+ @leaf_accessor $name $hi:$lo $field as bool
+ { |f| <$into_type>::from(if f != 0 { true } else { false }) }
+ $into_type => $into_type $(, $comment)?;
+ );
+ };
+
+ // Shortcut for fields defined as `bool` without the `=>` syntax.
+ (
+ @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as bool $(, $comment:literal)?;
+ ) => {
+ register!(@field_accessor $name $hi:$lo $field as bool => bool $(, $comment)?;);
+ };
+
+ // Catches the `?=>` syntax for non-boolean fields.
+ (
+ @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt ?=> $try_into_type:ty
+ $(, $comment:literal)?;
+ ) => {
+ register!(@leaf_accessor $name $hi:$lo $field as $type
+ { |f| <$try_into_type>::try_from(f as $type) } $try_into_type =>
+ ::core::result::Result<
+ $try_into_type,
+ <$try_into_type as ::core::convert::TryFrom<$type>>::Error
+ >
+ $(, $comment)?;);
+ };
+
+ // Catches the `=>` syntax for non-boolean fields.
+ (
+ @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt => $into_type:ty
+ $(, $comment:literal)?;
+ ) => {
+ register!(@leaf_accessor $name $hi:$lo $field as $type
+ { |f| <$into_type>::from(f as $type) } $into_type => $into_type $(, $comment)?;);
+ };
+
+ // Shortcut for fields defined as non-`bool` without the `=>` or `?=>` syntax.
+ (
+ @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt
+ $(, $comment:literal)?;
+ ) => {
+ register!(@field_accessor $name $hi:$lo $field as $type => $type $(, $comment)?;);
+ };
+
+ // Generates the accessor methods for a single field.
+ (
+ @leaf_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:ty
+ { $process:expr } $to_type:ty => $res_type:ty $(, $comment:literal)?;
+ ) => {
+ kernel::macros::paste!(
+ const [<$field:upper>]: ::core::ops::RangeInclusive<u8> = $lo..=$hi;
+ const [<$field:upper _MASK>]: u32 = ((((1 << $hi) - 1) << 1) + 1) - ((1 << $lo) - 1);
+ const [<$field:upper _SHIFT>]: u32 = Self::[<$field:upper _MASK>].trailing_zeros();
+ );
+
+ $(
+ #[doc="Returns the value of this field:"]
+ #[doc=$comment]
+ )?
+ #[inline]
+ pub(crate) fn $field(self) -> $res_type {
+ kernel::macros::paste!(
+ const MASK: u32 = $name::[<$field:upper _MASK>];
+ const SHIFT: u32 = $name::[<$field:upper _SHIFT>];
+ );
+ let field = ((self.0 & MASK) >> SHIFT);
+
+ $process(field)
+ }
+
+ kernel::macros::paste!(
+ $(
+ #[doc="Sets the value of this field:"]
+ #[doc=$comment]
+ )?
+ #[inline]
+ pub(crate) fn [<set_ $field>](mut self, value: $to_type) -> Self {
+ const MASK: u32 = $name::[<$field:upper _MASK>];
+ const SHIFT: u32 = $name::[<$field:upper _SHIFT>];
+ let value = ((value as u32) << SHIFT) & MASK;
+ self.0 = (self.0 & !MASK) | value;
+
+ self
+ }
+ );
+ };
+
+ // Creates the IO accessors for a fixed offset register.
+ (@io $name:ident @ $offset:literal) => {
+ #[allow(dead_code)]
+ impl $name {
+ #[inline]
+ pub(crate) fn read<const SIZE: usize, T>(io: &T) -> Self where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ Self(io.read32($offset))
+ }
+
+ #[inline]
+ pub(crate) fn write<const SIZE: usize, T>(self, io: &T) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ io.write32(self.0, $offset)
+ }
+
+ #[inline]
+ pub(crate) fn alter<const SIZE: usize, T, F>(
+ io: &T,
+ f: F,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ let reg = f(Self::read(io));
+ reg.write(io);
+ }
+ }
+ };
+
+ // Create the IO accessors for a relative offset register.
+ (@io $name:ident @ + $offset:literal) => {
+ #[allow(dead_code)]
+ impl $name {
+ #[inline]
+ pub(crate) fn read<const SIZE: usize, T>(
+ io: &T,
+ base: usize,
+ ) -> Self where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ Self(io.read32(base + $offset))
+ }
+
+ #[inline]
+ pub(crate) fn write<const SIZE: usize, T>(
+ self,
+ io: &T,
+ base: usize,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ io.write32(self.0, base + $offset)
+ }
+
+ #[inline]
+ pub(crate) fn alter<const SIZE: usize, T, F>(
+ io: &T,
+ base: usize,
+ f: F,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ let reg = f(Self::read(io, base));
+ reg.write(io, base);
+ }
+
+ #[inline]
+ pub(crate) fn try_read<const SIZE: usize, T>(
+ io: &T,
+ base: usize,
+ ) -> ::kernel::error::Result<Self> where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ io.try_read32(base + $offset).map(Self)
+ }
+
+ #[inline]
+ pub(crate) fn try_write<const SIZE: usize, T>(
+ self,
+ io: &T,
+ base: usize,
+ ) -> ::kernel::error::Result<()> where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ io.try_write32(self.0, base + $offset)
+ }
+
+ #[inline]
+ pub(crate) fn try_alter<const SIZE: usize, T, F>(
+ io: &T,
+ base: usize,
+ f: F,
+ ) -> ::kernel::error::Result<()> where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ let reg = f(Self::try_read(io, base)?);
+ reg.try_write(io, base)
+ }
+ }
+ };
+}
diff --git a/drivers/greybus/operation.c b/drivers/greybus/operation.c
index f6beeebf974c..54ccc434a1f7 100644
--- a/drivers/greybus/operation.c
+++ b/drivers/greybus/operation.c
@@ -295,7 +295,8 @@ static void gb_operation_work(struct work_struct *work)
static void gb_operation_timeout(struct timer_list *t)
{
- struct gb_operation *operation = from_timer(operation, t, timer);
+ struct gb_operation *operation = timer_container_of(operation, t,
+ timer);
if (gb_operation_result_set(operation, -ETIMEDOUT)) {
/*
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index a503252702b7..43859fc75747 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -151,6 +151,7 @@ config HID_APPLEIR
config HID_APPLETB_BL
tristate "Apple Touch Bar Backlight"
depends on BACKLIGHT_CLASS_DEVICE
+ depends on X86 || COMPILE_TEST
help
Say Y here if you want support for the backlight of Touch Bars on x86
MacBook Pros.
@@ -163,6 +164,7 @@ config HID_APPLETB_KBD
depends on USB_HID
depends on BACKLIGHT_CLASS_DEVICE
depends on INPUT
+ depends on X86 || COMPILE_TEST
select INPUT_SPARSEKMAP
select HID_APPLETB_BL
help
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index 25f0ebfcbd5f..0a9b44ce4904 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -83,6 +83,9 @@ static int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
case ALS_IDX:
privdata->dev_en.is_als_present = false;
break;
+ case SRA_IDX:
+ privdata->dev_en.is_sra_present = false;
+ break;
}
if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
@@ -134,9 +137,6 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
for (i = 0; i < cl_data->num_hid_devices; i++) {
cl_data->sensor_sts[i] = SENSOR_DISABLED;
- if (cl_data->num_hid_devices == 1 && cl_data->sensor_idx[0] == SRA_IDX)
- break;
-
if (cl_data->sensor_idx[i] == SRA_IDX) {
info.sensor_idx = cl_data->sensor_idx[i];
writel(0, privdata->mmio + amd_get_p2c_val(privdata, 0));
@@ -145,8 +145,10 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
(privdata, cl_data->sensor_idx[i], ENABLE_SENSOR);
cl_data->sensor_sts[i] = (status == 0) ? SENSOR_ENABLED : SENSOR_DISABLED;
- if (cl_data->sensor_sts[i] == SENSOR_ENABLED)
+ if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
+ cl_data->is_any_sensor_enabled = true;
privdata->dev_en.is_sra_present = true;
+ }
continue;
}
@@ -238,6 +240,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
cleanup:
amd_sfh_hid_client_deinit(privdata);
for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_idx[i] == SRA_IDX)
+ continue;
devm_kfree(dev, cl_data->feature_report[i]);
devm_kfree(dev, in_data->input_report[i]);
devm_kfree(dev, cl_data->report_descr[i]);
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index 2e96ec6a3073..9a06f9b0e4ef 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -38,6 +38,9 @@ dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type
struct hid_bpf_ops *e;
int ret;
+ if (unlikely(hdev->bpf.destroyed))
+ return ERR_PTR(-ENODEV);
+
if (type >= HID_REPORT_TYPES)
return ERR_PTR(-EINVAL);
@@ -93,6 +96,9 @@ int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
struct hid_bpf_ops *e;
int ret, idx;
+ if (unlikely(hdev->bpf.destroyed))
+ return -ENODEV;
+
if (rtype >= HID_REPORT_TYPES)
return -EINVAL;
@@ -130,6 +136,9 @@ int dispatch_hid_bpf_output_report(struct hid_device *hdev,
struct hid_bpf_ops *e;
int ret, idx;
+ if (unlikely(hdev->bpf.destroyed))
+ return -ENODEV;
+
idx = srcu_read_lock(&hdev->bpf.srcu);
list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
srcu_read_lock_held(&hdev->bpf.srcu)) {
diff --git a/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c b/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c
index 1a0aeea6a081..a754710fc90b 100644
--- a/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c
+++ b/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c
@@ -157,6 +157,7 @@ static const __u8 fixed_rdesc_vendor[] = {
ReportCount(5) // padding
Input(Const)
// Byte 4 in report - just exists so we get to be a tablet pad
+ UsagePage_Digitizers
Usage_Dig_BarrelSwitch // BTN_STYLUS
ReportCount(1)
ReportSize(1)
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index ed34f5cd5a91..0639b1f43d88 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -614,7 +614,7 @@ static int apple_fetch_battery(struct hid_device *hdev)
static void apple_battery_timer_tick(struct timer_list *t)
{
- struct apple_sc *asc = from_timer(asc, t, battery_timer);
+ struct apple_sc *asc = timer_container_of(asc, t, battery_timer);
struct hid_device *hdev = asc->hdev;
if (apple_fetch_battery(hdev) == 0) {
diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c
index bb7db9ae41c2..5e8ced7bc05a 100644
--- a/drivers/hid/hid-appleir.c
+++ b/drivers/hid/hid-appleir.c
@@ -167,7 +167,7 @@ static void battery_flat(struct appleir *appleir)
static void key_up_tick(struct timer_list *t)
{
- struct appleir *appleir = from_timer(appleir, t, key_up_timer);
+ struct appleir *appleir = timer_container_of(appleir, t, key_up_timer);
struct hid_device *hid = appleir->hid;
unsigned long flags;
diff --git a/drivers/hid/hid-appletb-kbd.c b/drivers/hid/hid-appletb-kbd.c
index 029ccbaa1d12..6f251b284018 100644
--- a/drivers/hid/hid-appletb-kbd.c
+++ b/drivers/hid/hid-appletb-kbd.c
@@ -166,13 +166,14 @@ static int appletb_tb_key_to_slot(unsigned int code)
static void appletb_inactivity_timer(struct timer_list *t)
{
- struct appletb_kbd *kbd = from_timer(kbd, t, inactivity_timer);
+ struct appletb_kbd *kbd = timer_container_of(kbd, t, inactivity_timer);
if (kbd->backlight_dev && appletb_tb_autodim) {
if (!kbd->has_dimmed) {
backlight_device_set_brightness(kbd->backlight_dev, 1);
kbd->has_dimmed = true;
- mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_idle_timeout * 1000));
+ mod_timer(&kbd->inactivity_timer,
+ jiffies + secs_to_jiffies(appletb_tb_idle_timeout));
} else if (!kbd->has_turned_off) {
backlight_device_set_brightness(kbd->backlight_dev, 0);
kbd->has_turned_off = true;
@@ -188,7 +189,8 @@ static void reset_inactivity_timer(struct appletb_kbd *kbd)
kbd->has_dimmed = false;
kbd->has_turned_off = false;
}
- mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_dim_timeout * 1000));
+ mod_timer(&kbd->inactivity_timer,
+ jiffies + secs_to_jiffies(appletb_tb_dim_timeout));
}
}
@@ -407,7 +409,8 @@ static int appletb_kbd_probe(struct hid_device *hdev, const struct hid_device_id
} else {
backlight_device_set_brightness(kbd->backlight_dev, 2);
timer_setup(&kbd->inactivity_timer, appletb_inactivity_timer, 0);
- mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_dim_timeout * 1000));
+ mod_timer(&kbd->inactivity_timer,
+ jiffies + secs_to_jiffies(appletb_tb_dim_timeout));
}
kbd->inp_handler.event = appletb_kbd_inp_event;
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 46e3e42f9eb5..4b45e31f0bab 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -52,6 +52,10 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define FEATURE_KBD_LED_REPORT_ID1 0x5d
#define FEATURE_KBD_LED_REPORT_ID2 0x5e
+#define ROG_ALLY_REPORT_SIZE 64
+#define ROG_ALLY_X_MIN_MCU 313
+#define ROG_ALLY_MIN_MCU 319
+
#define SUPPORT_KBD_BACKLIGHT BIT(0)
#define MAX_TOUCH_MAJOR 8
@@ -84,6 +88,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define QUIRK_MEDION_E1239T BIT(10)
#define QUIRK_ROG_NKEY_KEYBOARD BIT(11)
#define QUIRK_ROG_CLAYMORE_II_KEYBOARD BIT(12)
+#define QUIRK_ROG_ALLY_XPAD BIT(13)
#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
QUIRK_NO_INIT_REPORTS | \
@@ -534,9 +539,102 @@ static bool asus_kbd_wmi_led_control_present(struct hid_device *hdev)
return !!(value & ASUS_WMI_DSTS_PRESENCE_BIT);
}
+/*
+ * We don't care about any other part of the string except the version section.
+ * Example strings: FGA80100.RC72LA.312_T01, FGA80100.RC71LS.318_T01
+ * The bytes "5a 05 03 31 00 1a 13" and possibly more come before the version
+ * string, and there may be additional bytes after the version string such as
+ * "75 00 74 00 65 00" or a postfix such as "_T01"
+ */
+static int mcu_parse_version_string(const u8 *response, size_t response_size)
+{
+ const u8 *end = response + response_size;
+ const u8 *p = response;
+ int dots, err, version;
+ char buf[4];
+
+ dots = 0;
+ while (p < end && dots < 2) {
+ if (*p++ == '.')
+ dots++;
+ }
+
+ if (dots != 2 || p >= end || (p + 3) >= end)
+ return -EINVAL;
+
+ memcpy(buf, p, 3);
+ buf[3] = '\0';
+
+ err = kstrtoint(buf, 10, &version);
+ if (err || version < 0)
+ return -EINVAL;
+
+ return version;
+}
+
+static int mcu_request_version(struct hid_device *hdev)
+{
+ u8 *response __free(kfree) = kzalloc(ROG_ALLY_REPORT_SIZE, GFP_KERNEL);
+ const u8 request[] = { 0x5a, 0x05, 0x03, 0x31, 0x00, 0x20 };
+ int ret;
+
+ if (!response)
+ return -ENOMEM;
+
+ ret = asus_kbd_set_report(hdev, request, sizeof(request));
+ if (ret < 0)
+ return ret;
+
+ ret = hid_hw_raw_request(hdev, FEATURE_REPORT_ID, response,
+ ROG_ALLY_REPORT_SIZE, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret < 0)
+ return ret;
+
+ ret = mcu_parse_version_string(response, ROG_ALLY_REPORT_SIZE);
+ if (ret < 0) {
+ pr_err("Failed to parse MCU version: %d\n", ret);
+ print_hex_dump(KERN_ERR, "MCU: ", DUMP_PREFIX_NONE,
+ 16, 1, response, ROG_ALLY_REPORT_SIZE, false);
+ }
+
+ return ret;
+}
+
+static void validate_mcu_fw_version(struct hid_device *hdev, int idProduct)
+{
+ int min_version, version;
+
+ version = mcu_request_version(hdev);
+ if (version < 0)
+ return;
+
+ switch (idProduct) {
+ case USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY:
+ min_version = ROG_ALLY_MIN_MCU;
+ break;
+ case USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X:
+ min_version = ROG_ALLY_X_MIN_MCU;
+ break;
+ default:
+ min_version = 0;
+ }
+
+ if (version < min_version) {
+ hid_warn(hdev,
+ "The MCU firmware version must be %d or greater to avoid issues with suspend.\n",
+ min_version);
+ } else {
+ set_ally_mcu_hack(ASUS_WMI_ALLY_MCU_HACK_DISABLED);
+ set_ally_mcu_powersave(true);
+ }
+}
+
static int asus_kbd_register_leds(struct hid_device *hdev)
{
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+ struct usb_interface *intf;
+ struct usb_device *udev;
unsigned char kbd_func;
int ret;
@@ -560,6 +658,14 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
if (ret < 0)
return ret;
}
+
+ if (drvdata->quirks & QUIRK_ROG_ALLY_XPAD) {
+ intf = to_usb_interface(hdev->dev.parent);
+ udev = interface_to_usbdev(intf);
+ validate_mcu_fw_version(hdev,
+ le16_to_cpu(udev->descriptor.idProduct));
+ }
+
} else {
/* Initialize keyboard */
ret = asus_kbd_init(hdev, FEATURE_KBD_REPORT_ID);
@@ -1280,10 +1386,10 @@ static const struct hid_device_id asus_devices[] = {
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY),
- QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD | QUIRK_ROG_ALLY_XPAD},
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X),
- QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD | QUIRK_ROG_ALLY_XPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD),
QUIRK_ROG_CLAYMORE_II_KEYBOARD },
@@ -1327,4 +1433,5 @@ static struct hid_driver asus_driver = {
};
module_hid_driver(asus_driver);
+MODULE_IMPORT_NS("ASUS_WMI");
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 4741ff626771..b348d0464314 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2396,6 +2396,9 @@ int hid_hw_open(struct hid_device *hdev)
ret = hdev->ll_driver->open(hdev);
if (ret)
hdev->ll_open_count--;
+
+ if (hdev->driver->on_hid_hw_open)
+ hdev->driver->on_hid_hw_open(hdev);
}
mutex_unlock(&hdev->ll_open_lock);
@@ -2415,8 +2418,12 @@ EXPORT_SYMBOL_GPL(hid_hw_open);
void hid_hw_close(struct hid_device *hdev)
{
mutex_lock(&hdev->ll_open_lock);
- if (!--hdev->ll_open_count)
+ if (!--hdev->ll_open_count) {
hdev->ll_driver->close(hdev);
+
+ if (hdev->driver->on_hid_hw_close)
+ hdev->driver->on_hid_hw_close(hdev);
+ }
mutex_unlock(&hdev->ll_open_lock);
}
EXPORT_SYMBOL_GPL(hid_hw_close);
diff --git a/drivers/hid/hid-corsair-void.c b/drivers/hid/hid-corsair-void.c
index afbd67aa9719..fee134a7eba3 100644
--- a/drivers/hid/hid-corsair-void.c
+++ b/drivers/hid/hid-corsair-void.c
@@ -507,7 +507,7 @@ static void corsair_void_status_work_handler(struct work_struct *work)
struct delayed_work *delayed_work;
int battery_ret;
- delayed_work = container_of(work, struct delayed_work, work);
+ delayed_work = to_delayed_work(work);
drvdata = container_of(delayed_work, struct corsair_void_drvdata,
delayed_status_work);
@@ -525,7 +525,7 @@ static void corsair_void_firmware_work_handler(struct work_struct *work)
struct delayed_work *delayed_work;
int firmware_ret;
- delayed_work = container_of(work, struct delayed_work, work);
+ delayed_work = to_delayed_work(work);
drvdata = container_of(delayed_work, struct corsair_void_drvdata,
delayed_firmware_work);
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index f4c8d981aa0a..234fa82eab07 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -17,11 +17,13 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
#include <linux/hid.h>
#include <linux/hidraw.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/nls.h>
#include <linux/string_choices.h>
#include <linux/usb/ch9.h>
@@ -185,7 +187,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
u8 *buf = dev->in_out_buffer;
int ret;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -194,7 +196,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
if (ret >= 0)
ret = -EIO;
- goto exit;
+ return ret;
}
buf[1] &= ~BIT(offset);
@@ -207,25 +209,19 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
hid_err(hdev, "error setting GPIO config: %d\n", ret);
if (ret >= 0)
ret = -EIO;
- goto exit;
+ return ret;
}
- ret = 0;
-
-exit:
- mutex_unlock(&dev->lock);
- return ret;
+ return 0;
}
-static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int cp2112_gpio_set_unlocked(struct cp2112_device *dev,
+ unsigned int offset, int value)
{
- struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
int ret;
- mutex_lock(&dev->lock);
-
buf[0] = CP2112_GPIO_SET;
buf[1] = value ? CP2112_GPIO_ALL_GPIO_MASK : 0;
buf[2] = BIT(offset);
@@ -236,7 +232,17 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (ret < 0)
hid_err(hdev, "error setting GPIO values: %d\n", ret);
- mutex_unlock(&dev->lock);
+ return ret;
+}
+
+static int cp2112_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct cp2112_device *dev = gpiochip_get_data(chip);
+
+ guard(mutex)(&dev->lock);
+
+ return cp2112_gpio_set_unlocked(dev, offset, value);
}
static int cp2112_gpio_get_all(struct gpio_chip *chip)
@@ -246,23 +252,17 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
u8 *buf = dev->in_out_buffer;
int ret;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
if (ret != CP2112_GPIO_GET_LENGTH) {
hid_err(hdev, "error requesting GPIO values: %d\n", ret);
- ret = ret < 0 ? ret : -EIO;
- goto exit;
+ return ret < 0 ? ret : -EIO;
}
- ret = buf[1];
-
-exit:
- mutex_unlock(&dev->lock);
-
- return ret;
+ return buf[1];
}
static int cp2112_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -284,14 +284,14 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
u8 *buf = dev->in_out_buffer;
int ret;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
- goto fail;
+ return ret < 0 ? ret : -EIO;
}
buf[1] |= 1 << offset;
@@ -302,22 +302,16 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
HID_REQ_SET_REPORT);
if (ret < 0) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
- goto fail;
+ return ret;
}
- mutex_unlock(&dev->lock);
-
/*
* Set gpio value when output direction is already set,
* as specified in AN495, Rev. 0.2, cpt. 4.4
*/
- cp2112_gpio_set(chip, offset, value);
+ cp2112_gpio_set_unlocked(dev, offset, value);
return 0;
-
-fail:
- mutex_unlock(&dev->lock);
- return ret < 0 ? ret : -EIO;
}
static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,
@@ -1205,7 +1199,11 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!dev->in_out_buffer)
return -ENOMEM;
- mutex_init(&dev->lock);
+ ret = devm_mutex_init(&hdev->dev, &dev->lock);
+ if (ret) {
+ hid_err(hdev, "mutex init failed\n");
+ return ret;
+ }
ret = hid_parse(hdev);
if (ret) {
@@ -1290,7 +1288,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev->gc.label = "cp2112_gpio";
dev->gc.direction_input = cp2112_gpio_direction_input;
dev->gc.direction_output = cp2112_gpio_direction_output;
- dev->gc.set = cp2112_gpio_set;
+ dev->gc.set_rv = cp2112_gpio_set;
dev->gc.get = cp2112_gpio_get;
dev->gc.base = -1;
dev->gc.ngpio = CP2112_GPIO_MAX_GPIO;
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 0fb210e40a41..9eafff0b6ea4 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -192,7 +192,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
goto cleanup;
input_device->report_desc_size = le16_to_cpu(
- desc->desc[0].wDescriptorLength);
+ desc->rpt_desc.wDescriptorLength);
if (input_device->report_desc_size == 0) {
input_device->dev_info_status = -EINVAL;
goto cleanup;
@@ -210,7 +210,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
memcpy(input_device->report_desc,
((unsigned char *)desc) + desc->bLength,
- le16_to_cpu(desc->desc[0].wDescriptorLength));
+ le16_to_cpu(desc->rpt_desc.wDescriptorLength));
/* Send the ack */
memset(&ack, 0, sizeof(struct mousevsc_prt_msg));
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 288a2b864cc4..e3fb4e2fe911 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -41,6 +41,10 @@
#define USB_VENDOR_ID_ACTIONSTAR 0x2101
#define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011
+#define USB_VENDOR_ID_ADATA_XPG 0x125f
+#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE 0x7505
+#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE 0x7506
+
#define USB_VENDOR_ID_ADS_TECH 0x06e1
#define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155
@@ -92,6 +96,7 @@
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
#define USB_DEVICE_ID_APPLE_MAGICMOUSE2 0x0269
+#define USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC 0x0323
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 0x0265
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC 0x0324
diff --git a/drivers/hid/hid-kysona.c b/drivers/hid/hid-kysona.c
index d4c0406b3323..09bfe30d02cb 100644
--- a/drivers/hid/hid-kysona.c
+++ b/drivers/hid/hid-kysona.c
@@ -14,6 +14,7 @@
#define BATTERY_TIMEOUT_MS 5000
+#define ONLINE_REPORT_ID 3
#define BATTERY_REPORT_ID 4
struct kysona_drvdata {
@@ -80,11 +81,46 @@ static int kysona_battery_get_property(struct power_supply *psy,
return ret;
}
+static const char kysona_online_request[] = {
+ 0x08, ONLINE_REPORT_ID, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a
+};
+
static const char kysona_battery_request[] = {
0x08, BATTERY_REPORT_ID, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49
};
+static int kysona_m600_fetch_online(struct hid_device *hdev)
+{
+ u8 *write_buf;
+ int ret;
+
+ /* Request online information */
+ write_buf = kmemdup(kysona_online_request, sizeof(kysona_online_request), GFP_KERNEL);
+ if (!write_buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(hdev, kysona_online_request[0],
+ write_buf, sizeof(kysona_online_request),
+ HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+ if (ret < (int)sizeof(kysona_online_request)) {
+ hid_err(hdev, "hid_hw_raw_request() failed with %d\n", ret);
+ ret = -ENODATA;
+ }
+ kfree(write_buf);
+ return ret;
+}
+
+static void kysona_fetch_online(struct hid_device *hdev)
+{
+ int ret = kysona_m600_fetch_online(hdev);
+
+ if (ret < 0)
+ hid_dbg(hdev,
+ "Online query failed (err: %d)\n", ret);
+}
+
static int kysona_m600_fetch_battery(struct hid_device *hdev)
{
u8 *write_buf;
@@ -121,6 +157,7 @@ static void kysona_battery_timer_tick(struct work_struct *work)
struct kysona_drvdata, battery_work.work);
struct hid_device *hdev = drv_data->hdev;
+ kysona_fetch_online(hdev);
kysona_fetch_battery(hdev);
schedule_delayed_work(&drv_data->battery_work,
msecs_to_jiffies(BATTERY_TIMEOUT_MS));
@@ -160,6 +197,7 @@ static int kysona_battery_probe(struct hid_device *hdev)
power_supply_powers(drv_data->battery, &hdev->dev);
INIT_DELAYED_WORK(&drv_data->battery_work, kysona_battery_timer_tick);
+ kysona_fetch_online(hdev);
kysona_fetch_battery(hdev);
schedule_delayed_work(&drv_data->battery_work,
msecs_to_jiffies(BATTERY_TIMEOUT_MS));
@@ -206,12 +244,16 @@ static int kysona_raw_event(struct hid_device *hdev,
{
struct kysona_drvdata *drv_data = hid_get_drvdata(hdev);
- if (drv_data->battery && size == sizeof(kysona_battery_request) &&
+ if (size == sizeof(kysona_online_request) &&
+ data[0] == 8 && data[1] == ONLINE_REPORT_ID) {
+ drv_data->online = data[6];
+ }
+
+ if (size == sizeof(kysona_battery_request) &&
data[0] == 8 && data[1] == BATTERY_REPORT_ID) {
drv_data->battery_capacity = data[6];
drv_data->battery_charging = data[7];
drv_data->battery_voltage = (data[8] << 8) | data[9];
- drv_data->online = true;
}
return 0;
diff --git a/drivers/hid/hid-letsketch.c b/drivers/hid/hid-letsketch.c
index 8602c63ed9c6..11e21f988723 100644
--- a/drivers/hid/hid-letsketch.c
+++ b/drivers/hid/hid-letsketch.c
@@ -155,7 +155,8 @@ static int letsketch_setup_input_tablet_pad(struct letsketch_data *data)
static void letsketch_inrange_timeout(struct timer_list *t)
{
- struct letsketch_data *data = from_timer(data, t, inrange_timer);
+ struct letsketch_data *data = timer_container_of(data, t,
+ inrange_timer);
struct input_dev *input = data->input_tablet;
input_report_key(input, BTN_TOOL_PEN, 0);
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index c0a138f21ca4..445623dd1bd6 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -823,7 +823,7 @@ static ssize_t lg4ff_alternate_modes_show(struct device *dev, struct device_attr
for (i = 0; i < LG4FF_MODE_MAX_IDX; i++) {
if (entry->wdata.alternate_modes & BIT(i)) {
/* Print tag and full name */
- count += scnprintf(buf + count, PAGE_SIZE - count, "%s: %s",
+ count += sysfs_emit_at(buf, count, "%s: %s",
lg4ff_alternate_modes[i].tag,
!lg4ff_alternate_modes[i].product_id ? entry->wdata.real_name : lg4ff_alternate_modes[i].name);
if (count >= PAGE_SIZE - 1)
@@ -832,9 +832,9 @@ static ssize_t lg4ff_alternate_modes_show(struct device *dev, struct device_attr
/* Mark the currently active mode with an asterisk */
if (lg4ff_alternate_modes[i].product_id == entry->wdata.product_id ||
(lg4ff_alternate_modes[i].product_id == 0 && entry->wdata.product_id == entry->wdata.real_product_id))
- count += scnprintf(buf + count, PAGE_SIZE - count, " *\n");
+ count += sysfs_emit_at(buf, count, " *\n");
else
- count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
+ count += sysfs_emit_at(buf, count, "\n");
if (count >= PAGE_SIZE - 1)
return count;
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index adfa329e917b..36f034ac605d 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -218,7 +218,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
int pressure = 0;
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) {
id = (tdata[6] << 2 | tdata[5] >> 6) & 0xf;
x = (tdata[1] << 28 | tdata[0] << 20) >> 20;
y = -((tdata[2] << 24 | tdata[1] << 16) >> 20);
@@ -370,7 +371,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
if (report_undeciphered) {
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC)
input_event(input, EV_MSC, MSC_RAW, tdata[7]);
else if (input->id.product !=
USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
@@ -497,7 +499,8 @@ static int magicmouse_raw_event(struct hid_device *hdev,
}
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) {
magicmouse_emit_buttons(msc, clicks & 3);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
@@ -519,7 +522,8 @@ static int magicmouse_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct magicmouse_sc *msc = hid_get_drvdata(hdev);
- if (msc->input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 &&
+ if ((msc->input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ msc->input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) &&
field->report->id == MOUSE2_REPORT_ID) {
/*
* magic_mouse_raw_event has done all the work. Skip hidinput.
@@ -540,7 +544,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(EV_KEY, input->evbit);
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) {
__set_bit(BTN_LEFT, input->keybit);
__set_bit(BTN_RIGHT, input->keybit);
if (emulate_3button)
@@ -625,7 +630,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
* inverse of the reported Y.
*/
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE ||
- input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC) {
input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
input_set_abs_params(input, ABS_MT_POSITION_X,
MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
@@ -741,19 +747,25 @@ static int magicmouse_enable_multitouch(struct hid_device *hdev)
int ret;
int feature_size;
- if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
- hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
- if (hdev->vendor == BT_VENDOR_ID_APPLE) {
+ switch (hdev->product) {
+ case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2:
+ case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC:
+ switch (hdev->vendor) {
+ case BT_VENDOR_ID_APPLE:
feature_size = sizeof(feature_mt_trackpad2_bt);
feature = feature_mt_trackpad2_bt;
- } else { /* USB_VENDOR_ID_APPLE */
+ break;
+ default: /* USB_VENDOR_ID_APPLE */
feature_size = sizeof(feature_mt_trackpad2_usb);
feature = feature_mt_trackpad2_usb;
}
- } else if (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ break;
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE2:
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC:
feature_size = sizeof(feature_mt_mouse2);
feature = feature_mt_mouse2;
- } else {
+ break;
+ default:
feature_size = sizeof(feature_mt);
feature = feature_mt;
}
@@ -787,6 +799,7 @@ static int magicmouse_fetch_battery(struct hid_device *hdev)
if (!hdev->battery || hdev->vendor != USB_VENDOR_ID_APPLE ||
(hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2 &&
+ hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC &&
hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC))
return -1;
@@ -809,7 +822,7 @@ static int magicmouse_fetch_battery(struct hid_device *hdev)
static void magicmouse_battery_timer_tick(struct timer_list *t)
{
- struct magicmouse_sc *msc = from_timer(msc, t, battery_timer);
+ struct magicmouse_sc *msc = timer_container_of(msc, t, battery_timer);
struct hid_device *hdev = msc->hdev;
if (magicmouse_fetch_battery(hdev) == 0) {
@@ -857,6 +870,7 @@ static int magicmouse_probe(struct hid_device *hdev,
if (id->vendor == USB_VENDOR_ID_APPLE &&
(id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC ||
((id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
hdev->type != HID_TYPE_USBMOUSE)))
@@ -868,21 +882,27 @@ static int magicmouse_probe(struct hid_device *hdev,
goto err_stop_hw;
}
- if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
- report = hid_register_report(hdev, HID_INPUT_REPORT,
- MOUSE_REPORT_ID, 0);
- else if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
- report = hid_register_report(hdev, HID_INPUT_REPORT,
- MOUSE2_REPORT_ID, 0);
- else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
- id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
- if (id->vendor == BT_VENDOR_ID_APPLE)
+ switch (id->product) {
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE:
+ report = hid_register_report(hdev, HID_INPUT_REPORT, MOUSE_REPORT_ID, 0);
+ break;
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE2:
+ case USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC:
+ report = hid_register_report(hdev, HID_INPUT_REPORT, MOUSE2_REPORT_ID, 0);
+ break;
+ case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2:
+ case USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC:
+ switch (id->vendor) {
+ case BT_VENDOR_ID_APPLE:
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD2_BT_REPORT_ID, 0);
- else /* USB_VENDOR_ID_APPLE */
+ break;
+ default:
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD2_USB_REPORT_ID, 0);
- } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ }
+ break;
+ default: /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD_REPORT_ID, 0);
report = hid_register_report(hdev, HID_INPUT_REPORT,
@@ -909,7 +929,8 @@ static int magicmouse_probe(struct hid_device *hdev,
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
}
- if (ret == -EIO && id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
+ if (ret == -EIO && (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC)) {
schedule_delayed_work(&msc->work, msecs_to_jiffies(500));
}
@@ -945,6 +966,7 @@ static const __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*/
if (hdev->vendor == USB_VENDOR_ID_APPLE &&
(hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
+ hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC ||
hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
*rsize == 83 && rdesc[46] == 0x84 && rdesc[58] == 0x85) {
@@ -971,6 +993,10 @@ static const struct hid_device_id magic_mice[] = {
USB_DEVICE_ID_APPLE_MAGICMOUSE2), .driver_data = 0 },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICMOUSE2), .driver_data = 0 },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC), .driver_data = 0 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICMOUSE2_USBC), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICTRACKPAD), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
diff --git a/drivers/hid/hid-mcp2200.c b/drivers/hid/hid-mcp2200.c
index bf57f7f6caa0..e6ea0a2140eb 100644
--- a/drivers/hid/hid-mcp2200.c
+++ b/drivers/hid/hid-mcp2200.c
@@ -127,8 +127,8 @@ static int mcp_cmd_read_all(struct mcp2200 *mcp)
return mcp->status;
}
-static void mcp_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int mcp_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
struct mcp2200 *mcp = gpiochip_get_data(gc);
u8 value;
@@ -150,16 +150,20 @@ static void mcp_set_multiple(struct gpio_chip *gc, unsigned long *mask,
if (status == sizeof(struct mcp_set_clear_outputs))
mcp->gpio_val = value;
+ else
+ status = -EIO;
mutex_unlock(&mcp->lock);
+
+ return status;
}
-static void mcp_set(struct gpio_chip *gc, unsigned int gpio_nr, int value)
+static int mcp_set(struct gpio_chip *gc, unsigned int gpio_nr, int value)
{
unsigned long mask = 1 << gpio_nr;
unsigned long bmap_value = value << gpio_nr;
- mcp_set_multiple(gc, &mask, &bmap_value);
+ return mcp_set_multiple(gc, &mask, &bmap_value);
}
static int mcp_get_multiple(struct gpio_chip *gc, unsigned long *mask,
@@ -263,9 +267,10 @@ static int mcp_direction_output(struct gpio_chip *gc, unsigned int gpio_nr,
bmap_value = value << gpio_nr;
ret = mcp_set_direction(gc, gpio_nr, MCP2200_DIR_OUT);
- if (!ret)
- mcp_set_multiple(gc, &mask, &bmap_value);
- return ret;
+ if (ret)
+ return ret;
+
+ return mcp_set_multiple(gc, &mask, &bmap_value);
}
static const struct gpio_chip template_chip = {
@@ -274,8 +279,8 @@ static const struct gpio_chip template_chip = {
.get_direction = mcp_get_direction,
.direction_input = mcp_direction_input,
.direction_output = mcp_direction_output,
- .set = mcp_set,
- .set_multiple = mcp_set_multiple,
+ .set_rv = mcp_set,
+ .set_multiple_rv = mcp_set_multiple,
.get = mcp_get,
.get_multiple = mcp_get_multiple,
.base = -1,
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index 0f93c22a479f..6c0ac14f11a6 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -624,10 +624,10 @@ static int mcp_gpio_get(struct gpio_chip *gc,
return ret;
}
-static void mcp_gpio_set(struct gpio_chip *gc,
- unsigned int offset, int value)
+static int mcp_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct mcp2221 *mcp = gpiochip_get_data(gc);
+ int ret;
memset(mcp->txbuf, 0, 18);
mcp->txbuf[0] = MCP2221_GPIO_SET;
@@ -638,8 +638,10 @@ static void mcp_gpio_set(struct gpio_chip *gc,
mcp->txbuf[mcp->gp_idx] = !!value;
mutex_lock(&mcp->lock);
- mcp_send_data_req_status(mcp, mcp->txbuf, 18);
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 18);
mutex_unlock(&mcp->lock);
+
+ return ret;
}
static int mcp_gpio_dir_set(struct mcp2221 *mcp,
@@ -1206,7 +1208,7 @@ static int mcp2221_probe(struct hid_device *hdev,
mcp->gc->direction_input = mcp_gpio_direction_input;
mcp->gc->direction_output = mcp_gpio_direction_output;
mcp->gc->get_direction = mcp_gpio_get_direction;
- mcp->gc->set = mcp_gpio_set;
+ mcp->gc->set_rv = mcp_gpio_set;
mcp->gc->get = mcp_gpio_get;
mcp->gc->ngpio = MCP_NGPIO;
mcp->gc->base = -1;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 7ac8e16e6158..b41001e02da7 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1745,7 +1745,7 @@ static void mt_release_contacts(struct hid_device *hid)
static void mt_expired_timeout(struct timer_list *t)
{
- struct mt_device *td = from_timer(td, t, release_timer);
+ struct mt_device *td = timer_container_of(td, t, release_timer);
struct hid_device *hdev = td->hdev;
/*
@@ -1887,6 +1887,16 @@ static void mt_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
+static void mt_on_hid_hw_open(struct hid_device *hdev)
+{
+ mt_set_modes(hdev, HID_LATENCY_NORMAL, TOUCHPAD_REPORT_ALL);
+}
+
+static void mt_on_hid_hw_close(struct hid_device *hdev)
+{
+ mt_set_modes(hdev, HID_LATENCY_HIGH, TOUCHPAD_REPORT_NONE);
+}
+
/*
* This list contains only:
* - VID/PID of products not working with the default multitouch handling
@@ -2354,5 +2364,7 @@ static struct hid_driver mt_driver = {
.suspend = pm_ptr(mt_suspend),
.reset_resume = pm_ptr(mt_reset_resume),
.resume = pm_ptr(mt_resume),
+ .on_hid_hw_open = mt_on_hid_hw_open,
+ .on_hid_hw_close = mt_on_hid_hw_close,
};
module_hid_driver(mt_driver);
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index c6b922c2adba..74bddb2c3e82 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -227,7 +227,7 @@ drop_note:
static void pcmidi_sustained_note_release(struct timer_list *t)
{
- struct pcmidi_sustain *pms = from_timer(pms, t, timer);
+ struct pcmidi_sustain *pms = timer_container_of(pms, t, timer);
pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity);
pms->in_use = 0;
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 646171598e41..7fefeb413ec3 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -27,6 +27,8 @@
static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016), HID_QUIRK_FULLSPEED_INTERVAL },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX), HID_QUIRK_NO_INIT_REPORTS },
@@ -1061,7 +1063,7 @@ bool hid_ignore(struct hid_device *hdev)
}
if (hdev->type == HID_TYPE_USBMOUSE &&
- hid_match_id(hdev, hid_mouse_ignore_list))
+ hdev->quirks & HID_QUIRK_IGNORE_MOUSE)
return true;
return !!hid_match_id(hdev, hid_ignore_list);
@@ -1265,6 +1267,9 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
if (hid_match_id(hdev, hid_ignore_list))
quirks |= HID_QUIRK_IGNORE;
+ if (hid_match_id(hdev, hid_mouse_ignore_list))
+ quirks |= HID_QUIRK_IGNORE_MOUSE;
+
if (hid_match_id(hdev, hid_have_special_driver))
quirks |= HID_QUIRK_HAVE_SPECIAL_DRIVER;
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index a2be652b7bbd..b966e4044238 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -545,7 +545,7 @@ static void ghl_magic_poke_cb(struct urb *urb)
static void ghl_magic_poke(struct timer_list *t)
{
int ret;
- struct sony_sc *sc = from_timer(sc, t, ghl_poke_timer);
+ struct sony_sc *sc = timer_container_of(sc, t, ghl_poke_timer);
ret = usb_submit_urb(sc->ghl_urb, GFP_ATOMIC);
if (ret < 0)
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index dfd9d22ed559..949d307c66a8 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -1150,11 +1150,9 @@ static void steam_client_ll_close(struct hid_device *hdev)
struct steam_device *steam = hdev->driver_data;
unsigned long flags;
- bool connected;
spin_lock_irqsave(&steam->lock, flags);
steam->client_opened--;
- connected = steam->connected && !steam->client_opened;
spin_unlock_irqrestore(&steam->lock, flags);
schedule_work(&steam->unregister_work);
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index 3b81468a1df2..0bf70664c35e 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -174,6 +174,7 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
u8 ep_addr[2] = {b_ep, 0};
if (!usb_check_int_endpoints(usbif, ep_addr)) {
+ kfree(send_buf);
hid_err(hdev, "Unexpected non-int endpoint\n");
return;
}
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index a367df6ea01f..af98398d9247 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -32,8 +32,8 @@
*/
static void uclogic_inrange_timeout(struct timer_list *t)
{
- struct uclogic_drvdata *drvdata = from_timer(drvdata, t,
- inrange_timer);
+ struct uclogic_drvdata *drvdata = timer_container_of(drvdata, t,
+ inrange_timer);
struct input_dev *input = drvdata->pen_input;
if (input == NULL)
@@ -142,11 +142,12 @@ static int uclogic_input_configured(struct hid_device *hdev,
suffix = "System Control";
break;
}
- }
-
- if (suffix)
+ } else {
hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
"%s %s", hdev->name, suffix);
+ if (!hi->input->name)
+ return -ENOMEM;
+ }
return 0;
}
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index 8080083121d3..5b5fc460a4c5 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -1240,7 +1240,7 @@ static void wiimote_schedule(struct wiimote_data *wdata)
static void wiimote_init_timeout(struct timer_list *t)
{
- struct wiimote_data *wdata = from_timer(wdata, t, timer);
+ struct wiimote_data *wdata = timer_container_of(wdata, t, timer);
wiimote_schedule(wdata);
}
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
index fa51155ebe39..8a8c4a46f927 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
@@ -82,15 +82,10 @@ static int quicki2c_acpi_get_dsd_property(struct acpi_device *adev, acpi_string
{
acpi_handle handle = acpi_device_handle(adev);
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object obj = { .type = type };
- struct acpi_object_list arg_list = {
- .count = 1,
- .pointer = &obj,
- };
union acpi_object *ret_obj;
acpi_status status;
- status = acpi_evaluate_object(handle, dsd_method_name, &arg_list, &buffer);
+ status = acpi_evaluate_object(handle, dsd_method_name, NULL, &buffer);
if (ACPI_FAILURE(status)) {
acpi_handle_err(handle,
"Can't evaluate %s method: %d\n", dsd_method_name, status);
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
index 4fc78b5a04b5..c105df7f6c87 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
@@ -1121,7 +1121,7 @@ EXPORT_SYMBOL_NS_GPL(thc_port_select, "INTEL_THC");
static u8 thc_get_spi_freq_div_val(struct thc_device *dev, u32 spi_freq_val)
{
- int frequency[] = {
+ static const int frequency[] = {
THC_SPI_FREQUENCY_7M,
THC_SPI_FREQUENCY_15M,
THC_SPI_FREQUENCY_17M,
@@ -1130,7 +1130,7 @@ static u8 thc_get_spi_freq_div_val(struct thc_device *dev, u32 spi_freq_val)
THC_SPI_FREQUENCY_31M,
THC_SPI_FREQUENCY_41M,
};
- u8 frequency_div[] = {
+ static const u8 frequency_div[] = {
THC_SPI_FRQ_DIV_2,
THC_SPI_FRQ_DIV_1,
THC_SPI_FRQ_DIV_7,
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 7d9297fad90e..aac0051a2cf6 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -106,7 +106,7 @@ static int hid_start_in(struct hid_device *hid)
/* I/O retry timer routine */
static void hid_retry_timeout(struct timer_list *t)
{
- struct usbhid_device *usbhid = from_timer(usbhid, t, io_retry);
+ struct usbhid_device *usbhid = timer_container_of(usbhid, t, io_retry);
struct hid_device *hid = usbhid->hid;
dev_dbg(&usbhid->intf->dev, "retrying intr urb\n");
@@ -984,12 +984,11 @@ static int usbhid_parse(struct hid_device *hid)
struct usb_host_interface *interface = intf->cur_altsetting;
struct usb_device *dev = interface_to_usbdev (intf);
struct hid_descriptor *hdesc;
+ struct hid_class_descriptor *hcdesc;
u32 quirks = 0;
unsigned int rsize = 0;
char *rdesc;
- int ret, n;
- int num_descriptors;
- size_t offset = offsetof(struct hid_descriptor, desc);
+ int ret;
quirks = hid_lookup_quirk(hid);
@@ -1011,20 +1010,19 @@ static int usbhid_parse(struct hid_device *hid)
return -ENODEV;
}
- if (hdesc->bLength < sizeof(struct hid_descriptor)) {
- dbg_hid("hid descriptor is too short\n");
+ if (!hdesc->bNumDescriptors ||
+ hdesc->bLength != sizeof(*hdesc) +
+ (hdesc->bNumDescriptors - 1) * sizeof(*hcdesc)) {
+ dbg_hid("hid descriptor invalid, bLen=%hhu bNum=%hhu\n",
+ hdesc->bLength, hdesc->bNumDescriptors);
return -EINVAL;
}
hid->version = le16_to_cpu(hdesc->bcdHID);
hid->country = hdesc->bCountryCode;
- num_descriptors = min_t(int, hdesc->bNumDescriptors,
- (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
-
- for (n = 0; n < num_descriptors; n++)
- if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
- rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
+ if (hdesc->rpt_desc.bDescriptorType == HID_DT_REPORT)
+ rsize = le16_to_cpu(hdesc->rpt_desc.wDescriptorLength);
if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
dbg_hid("weird size of report descriptor (%u)\n", rsize);
@@ -1052,6 +1050,11 @@ static int usbhid_parse(struct hid_device *hid)
goto err;
}
+ if (hdesc->bNumDescriptors > 1)
+ hid_warn(intf,
+ "%u unsupported optional hid class descriptors\n",
+ (int)(hdesc->bNumDescriptors - 1));
+
hid->quirks |= quirks;
return 0;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 1556d4287fa5..eaf099b2efdb 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -70,10 +70,16 @@ static void wacom_wac_queue_flush(struct hid_device *hdev,
{
while (!kfifo_is_empty(fifo)) {
int size = kfifo_peek_len(fifo);
- u8 *buf = kzalloc(size, GFP_KERNEL);
+ u8 *buf;
unsigned int count;
int err;
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf) {
+ kfifo_skip(fifo);
+ continue;
+ }
+
count = kfifo_out(fifo, buf, size);
if (count != size) {
// Hard to say what is the "right" action in this
@@ -81,6 +87,7 @@ static void wacom_wac_queue_flush(struct hid_device *hdev,
// to flush seems reasonable enough, however.
hid_warn(hdev, "%s: removed fifo entry with unexpected size\n",
__func__);
+ kfree(buf);
continue;
}
err = hid_report_raw_event(hdev, HID_INPUT_REPORT, buf, size, false);
@@ -2361,6 +2368,8 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
unsigned int connect_mask = HID_CONNECT_HIDRAW;
features->pktlen = wacom_compute_pktlen(hdev);
+ if (!features->pktlen)
+ return -ENODEV;
if (!devres_open_group(&hdev->dev, wacom, GFP_KERNEL))
return -ENOMEM;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 5107a676e24f..955b39d22524 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -63,7 +63,7 @@ static void wacom_force_proxout(struct wacom_wac *wacom_wac)
void wacom_idleprox_timeout(struct timer_list *list)
{
- struct wacom *wacom = from_timer(wacom, list, idleprox_timer);
+ struct wacom *wacom = timer_container_of(wacom, list, idleprox_timer);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
if (!wacom_wac->hid_data.sense_state) {
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index cbc5e72857de..d10a01f3eb9e 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -453,7 +453,7 @@ static void ssip_error(struct hsi_client *cl)
static void ssip_keep_alive(struct timer_list *t)
{
- struct ssi_protocol *ssi = from_timer(ssi, t, keep_alive);
+ struct ssi_protocol *ssi = timer_container_of(ssi, t, keep_alive);
struct hsi_client *cl = ssi->cl;
dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n",
@@ -480,7 +480,7 @@ static void ssip_keep_alive(struct timer_list *t)
static void ssip_rx_wd(struct timer_list *t)
{
- struct ssi_protocol *ssi = from_timer(ssi, t, rx_wd);
+ struct ssi_protocol *ssi = timer_container_of(ssi, t, rx_wd);
struct hsi_client *cl = ssi->cl;
dev_err(&cl->device, "Watchdog triggered\n");
@@ -489,7 +489,7 @@ static void ssip_rx_wd(struct timer_list *t)
static void ssip_tx_wd(struct timer_list *t)
{
- struct ssi_protocol *ssi = from_timer(ssi, t, tx_wd);
+ struct ssi_protocol *ssi = timer_container_of(ssi, t, tx_wd);
struct hsi_client *cl = ssi->cl;
dev_err(&cl->device, "Watchdog triggered\n");
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 6c1416167bd2..1cd188b73b74 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -5,17 +5,18 @@ menu "Microsoft Hyper-V guest support"
config HYPERV
tristate "Microsoft Hyper-V client drivers"
depends on (X86 && X86_LOCAL_APIC && HYPERVISOR_GUEST) \
- || (ACPI && ARM64 && !CPU_BIG_ENDIAN)
+ || (ARM64 && !CPU_BIG_ENDIAN)
select PARAVIRT
select X86_HV_CALLBACK_VECTOR if X86
select OF_EARLY_FLATTREE if OF
+ select SYSFB if !HYPERV_VTL_MODE
help
Select this option to run Linux as a Hyper-V client operating
system.
config HYPERV_VTL_MODE
bool "Enable Linux to boot in VTL context"
- depends on X86_64 && HYPERV
+ depends on (X86_64 || ARM64) && HYPERV
depends on SMP
default n
help
@@ -31,7 +32,7 @@ config HYPERV_VTL_MODE
Select this option to build a Linux kernel to run at a VTL other than
the normal VTL0, which currently is only VTL2. This option
- initializes the x86 platform for VTL2, and adds the ability to boot
+ initializes the kernel to run in VTL2, and adds the ability to boot
secondary CPUs directly into 64-bit context as required for VTLs other
than 0. A kernel built with this option must run at VTL2, and will
not run as a normal guest.
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index fb8cd8469328..35f26fa1ffe7 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -1077,68 +1077,10 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
EXPORT_SYMBOL(vmbus_sendpacket);
/*
- * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
- * packets using a GPADL Direct packet type. This interface allows you
- * to control notifying the host. This will be useful for sending
- * batched data. Also the sender can control the send flags
- * explicitly.
- */
-int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
- struct hv_page_buffer pagebuffers[],
- u32 pagecount, void *buffer, u32 bufferlen,
- u64 requestid)
-{
- int i;
- struct vmbus_channel_packet_page_buffer desc;
- u32 descsize;
- u32 packetlen;
- u32 packetlen_aligned;
- struct kvec bufferlist[3];
- u64 aligned_data = 0;
-
- if (pagecount > MAX_PAGE_BUFFER_COUNT)
- return -EINVAL;
-
- /*
- * Adjust the size down since vmbus_channel_packet_page_buffer is the
- * largest size we support
- */
- descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
- ((MAX_PAGE_BUFFER_COUNT - pagecount) *
- sizeof(struct hv_page_buffer));
- packetlen = descsize + bufferlen;
- packetlen_aligned = ALIGN(packetlen, sizeof(u64));
-
- /* Setup the descriptor */
- desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
- desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
- desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
- desc.length8 = (u16)(packetlen_aligned >> 3);
- desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
- desc.reserved = 0;
- desc.rangecount = pagecount;
-
- for (i = 0; i < pagecount; i++) {
- desc.range[i].len = pagebuffers[i].len;
- desc.range[i].offset = pagebuffers[i].offset;
- desc.range[i].pfn = pagebuffers[i].pfn;
- }
-
- bufferlist[0].iov_base = &desc;
- bufferlist[0].iov_len = descsize;
- bufferlist[1].iov_base = buffer;
- bufferlist[1].iov_len = bufferlen;
- bufferlist[2].iov_base = &aligned_data;
- bufferlist[2].iov_len = (packetlen_aligned - packetlen);
-
- return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
-}
-EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
-
-/*
- * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
+ * vmbus_sendpacket_mpb_desc - Send one or more multi-page buffer packets
* using a GPADL Direct packet type.
- * The buffer includes the vmbus descriptor.
+ * The desc argument must include space for the VMBus descriptor. The
+ * rangecount field must already be set.
*/
int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
struct vmbus_packet_mpb_array *desc,
@@ -1160,7 +1102,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
desc->length8 = (u16)(packetlen_aligned >> 3);
desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
desc->reserved = 0;
- desc->rangecount = 1;
bufferlist[0].iov_base = desc;
bufferlist[0].iov_len = desc_size;
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 8351360bba16..be490c598785 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -207,10 +207,19 @@ int vmbus_connect(void)
mutex_init(&vmbus_connection.channel_mutex);
/*
+ * The following Hyper-V interrupt and monitor pages can be used by
+ * UIO for mapping to user-space, so they should always be allocated on
+ * system page boundaries. The system page size must be >= the Hyper-V
+ * page size.
+ */
+ BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
+
+ /*
* Setup the vmbus event connection for channel interrupt
* abstraction stuff
*/
- vmbus_connection.int_page = hv_alloc_hyperv_zeroed_page();
+ vmbus_connection.int_page =
+ (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (vmbus_connection.int_page == NULL) {
ret = -ENOMEM;
goto cleanup;
@@ -225,8 +234,8 @@ int vmbus_connect(void)
* Setup the monitor notification facility. The 1st page for
* parent->child and the 2nd page for child->parent
*/
- vmbus_connection.monitor_pages[0] = hv_alloc_hyperv_page();
- vmbus_connection.monitor_pages[1] = hv_alloc_hyperv_page();
+ vmbus_connection.monitor_pages[0] = (void *)__get_free_page(GFP_KERNEL);
+ vmbus_connection.monitor_pages[1] = (void *)__get_free_page(GFP_KERNEL);
if ((vmbus_connection.monitor_pages[0] == NULL) ||
(vmbus_connection.monitor_pages[1] == NULL)) {
ret = -ENOMEM;
@@ -342,21 +351,23 @@ void vmbus_disconnect(void)
destroy_workqueue(vmbus_connection.work_queue);
if (vmbus_connection.int_page) {
- hv_free_hyperv_page(vmbus_connection.int_page);
+ free_page((unsigned long)vmbus_connection.int_page);
vmbus_connection.int_page = NULL;
}
if (vmbus_connection.monitor_pages[0]) {
if (!set_memory_encrypted(
(unsigned long)vmbus_connection.monitor_pages[0], 1))
- hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
+ free_page((unsigned long)
+ vmbus_connection.monitor_pages[0]);
vmbus_connection.monitor_pages[0] = NULL;
}
if (vmbus_connection.monitor_pages[1]) {
if (!set_memory_encrypted(
(unsigned long)vmbus_connection.monitor_pages[1], 1))
- hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
+ free_page((unsigned long)
+ vmbus_connection.monitor_pages[1]);
vmbus_connection.monitor_pages[1] = NULL;
}
}
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 59792e00cecf..49898d10faff 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -105,45 +105,6 @@ void __init hv_common_free(void)
hv_synic_eventring_tail = NULL;
}
-/*
- * Functions for allocating and freeing memory with size and
- * alignment HV_HYP_PAGE_SIZE. These functions are needed because
- * the guest page size may not be the same as the Hyper-V page
- * size. We depend upon kmalloc() aligning power-of-two size
- * allocations to the allocation size boundary, so that the
- * allocated memory appears to Hyper-V as a page of the size
- * it expects.
- */
-
-void *hv_alloc_hyperv_page(void)
-{
- BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
-
- if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
- return (void *)__get_free_page(GFP_KERNEL);
- else
- return kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
-}
-EXPORT_SYMBOL_GPL(hv_alloc_hyperv_page);
-
-void *hv_alloc_hyperv_zeroed_page(void)
-{
- if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
- return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- else
- return kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
-}
-EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page);
-
-void hv_free_hyperv_page(void *addr)
-{
- if (PAGE_SIZE == HV_HYP_PAGE_SIZE)
- free_page((unsigned long)addr);
- else
- kfree(addr);
-}
-EXPORT_SYMBOL_GPL(hv_free_hyperv_page);
-
static void *hv_panic_page;
/*
@@ -272,7 +233,7 @@ static void hv_kmsg_dump_unregister(void)
atomic_notifier_chain_unregister(&panic_notifier_list,
&hyperv_panic_report_block);
- hv_free_hyperv_page(hv_panic_page);
+ kfree(hv_panic_page);
hv_panic_page = NULL;
}
@@ -280,7 +241,7 @@ static void hv_kmsg_dump_register(void)
{
int ret;
- hv_panic_page = hv_alloc_hyperv_zeroed_page();
+ hv_panic_page = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!hv_panic_page) {
pr_err("Hyper-V: panic message page memory allocation failed\n");
return;
@@ -289,7 +250,7 @@ static void hv_kmsg_dump_register(void)
ret = kmsg_dump_register(&hv_kmsg_dumper);
if (ret) {
pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret);
- hv_free_hyperv_page(hv_panic_page);
+ kfree(hv_panic_page);
hv_panic_page = NULL;
}
}
@@ -317,6 +278,37 @@ void __init hv_get_partition_id(void)
pr_err("Hyper-V: failed to get partition ID: %#x\n",
hv_result(status));
}
+#if IS_ENABLED(CONFIG_HYPERV_VTL_MODE)
+u8 __init get_vtl(void)
+{
+ u64 control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_REGISTERS;
+ struct hv_input_get_vp_registers *input;
+ struct hv_output_get_vp_registers *output;
+ unsigned long flags;
+ u64 ret;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ memset(input, 0, struct_size(input, names, 1));
+ input->partition_id = HV_PARTITION_ID_SELF;
+ input->vp_index = HV_VP_INDEX_SELF;
+ input->input_vtl.as_uint8 = 0;
+ input->names[0] = HV_REGISTER_VSM_VP_STATUS;
+
+ ret = hv_do_hypercall(control, input, output);
+ if (hv_result_success(ret)) {
+ ret = output->values[0].reg8 & HV_VTL_MASK;
+ } else {
+ pr_err("Failed to get VTL(error: %lld) exiting...\n", ret);
+ BUG();
+ }
+
+ local_irq_restore(flags);
+ return ret;
+}
+#endif
int __init hv_common_init(void)
{
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index e3d51a316316..33b524b4eb5e 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -45,7 +45,8 @@ struct vmbus_dynid {
struct hv_vmbus_device_id id;
};
-static struct device *hv_dev;
+/* VMBus Root Device */
+static struct device *vmbus_root_device;
static int hyperv_cpuhp_online;
@@ -80,9 +81,15 @@ static struct resource *fb_mmio;
static struct resource *hyperv_mmio;
static DEFINE_MUTEX(hyperv_mmio_lock);
+struct device *hv_get_vmbus_root_device(void)
+{
+ return vmbus_root_device;
+}
+EXPORT_SYMBOL_GPL(hv_get_vmbus_root_device);
+
static int vmbus_exists(void)
{
- if (hv_dev == NULL)
+ if (vmbus_root_device == NULL)
return -ENODEV;
return 0;
@@ -707,7 +714,30 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(const struct hv_driver *
return id;
}
-/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
+/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices
+ *
+ * This function can race with vmbus_device_register(). This function is
+ * typically running on a user thread in response to writing to the "new_id"
+ * sysfs entry for a driver. vmbus_device_register() is running on a
+ * workqueue thread in response to the Hyper-V host offering a device to the
+ * guest. This function calls driver_attach(), which looks for an existing
+ * device matching the new id, and attaches the driver to which the new id
+ * has been assigned. vmbus_device_register() calls device_register(), which
+ * looks for a driver that matches the device being registered. If both
+ * operations are running simultaneously, the device driver probe function runs
+ * on whichever thread establishes the linkage between the driver and device.
+ *
+ * In most cases, it doesn't matter which thread runs the driver probe
+ * function. But if vmbus_device_register() does not find a matching driver,
+ * it proceeds to create the "channels" subdirectory and numbered per-channel
+ * subdirectory in sysfs. While that multi-step creation is in progress, this
+ * function could run the driver probe function. If the probe function checks
+ * for, or operates on, entries in the "channels" subdirectory, including by
+ * calling hv_create_ring_sysfs(), the operation may or may not succeed
+ * depending on the race. The race can't create a kernel failure in VMBus
+ * or device subsystem code, but probe functions in VMBus drivers doing such
+ * operations must be prepared for the failure case.
+ */
static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
{
struct vmbus_dynid *dynid;
@@ -861,7 +891,7 @@ static int vmbus_dma_configure(struct device *child_device)
* On x86/x64 coherence is assumed and these calls have no effect.
*/
hv_setup_dma_ops(child_device,
- device_get_dma_attr(hv_dev) == DEV_DMA_COHERENT);
+ device_get_dma_attr(vmbus_root_device) == DEV_DMA_COHERENT);
return 0;
}
@@ -1841,7 +1871,7 @@ static struct attribute *vmbus_chan_attrs[] = {
NULL
};
-static struct bin_attribute *vmbus_chan_bin_attrs[] = {
+static const struct bin_attribute *vmbus_chan_bin_attrs[] = {
&chan_attr_ring_buffer,
NULL
};
@@ -1921,7 +1951,8 @@ static const struct kobj_type vmbus_chan_ktype = {
* ring for userspace to use.
* Note: Race conditions can happen with userspace and it is not encouraged to create new
* use-cases for this. This was added to maintain backward compatibility, while solving
- * one of the race conditions in uio_hv_generic while creating sysfs.
+ * one of the race conditions in uio_hv_generic while creating sysfs. See comments with
+ * vmbus_add_dynid() and vmbus_device_register().
*
* Returns 0 on success or error code on failure.
*/
@@ -2037,7 +2068,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
&child_device_obj->channel->offermsg.offer.if_instance);
child_device_obj->device.bus = &hv_bus;
- child_device_obj->device.parent = hv_dev;
+ child_device_obj->device.parent = vmbus_root_device;
child_device_obj->device.release = vmbus_device_release;
child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
@@ -2055,6 +2086,20 @@ int vmbus_device_register(struct hv_device *child_device_obj)
return ret;
}
+ /*
+ * If device_register() found a driver to assign to the device, the
+ * driver's probe function has already run at this point. If that
+ * probe function accesses or operates on the "channels" subdirectory
+ * in sysfs, those operations will have failed because the "channels"
+ * subdirectory doesn't exist until the code below runs. Or if the
+ * probe function creates a /dev entry, a user space program could
+ * find and open the /dev entry, and then create a race by accessing
+ * the "channels" subdirectory while the creation steps are in progress
+ * here. The race can't result in a kernel failure, but the user space
+ * program may get an error in accessing "channels" or its
+ * subdirectories. See also comments with vmbus_add_dynid() about a
+ * related race condition.
+ */
child_device_obj->channels_kset = kset_create_and_add("channels",
NULL, kobj);
if (!child_device_obj->channels_kset) {
@@ -2412,7 +2457,7 @@ static int vmbus_acpi_add(struct platform_device *pdev)
struct acpi_device *ancestor;
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
- hv_dev = &device->dev;
+ vmbus_root_device = &device->dev;
/*
* Older versions of Hyper-V for ARM64 fail to include the _CCA
@@ -2465,6 +2510,31 @@ static int vmbus_acpi_add(struct platform_device *pdev)
}
#endif
+static int vmbus_set_irq(struct platform_device *pdev)
+{
+ struct irq_data *data;
+ int irq;
+ irq_hw_number_t hwirq;
+
+ irq = platform_get_irq(pdev, 0);
+ /* platform_get_irq() may not return 0. */
+ if (irq < 0)
+ return irq;
+
+ data = irq_get_irq_data(irq);
+ if (!data) {
+ pr_err("No interrupt data for VMBus virq %d\n", irq);
+ return -ENODEV;
+ }
+ hwirq = irqd_to_hwirq(data);
+
+ vmbus_irq = irq;
+ vmbus_interrupt = hwirq;
+ pr_debug("VMBus virq %d, hwirq %d\n", vmbus_irq, vmbus_interrupt);
+
+ return 0;
+}
+
static int vmbus_device_add(struct platform_device *pdev)
{
struct resource **cur_res = &hyperv_mmio;
@@ -2473,12 +2543,17 @@ static int vmbus_device_add(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int ret;
- hv_dev = &pdev->dev;
+ vmbus_root_device = &pdev->dev;
ret = of_range_parser_init(&parser, np);
if (ret)
return ret;
+ if (!__is_defined(HYPERVISOR_CALLBACK_VECTOR))
+ ret = vmbus_set_irq(pdev);
+ if (ret)
+ return ret;
+
for_each_of_range(&parser, &range) {
struct resource *res;
@@ -2786,7 +2861,7 @@ static int __init hv_acpi_init(void)
if (ret)
return ret;
- if (!hv_dev) {
+ if (!vmbus_root_device) {
ret = -ENODEV;
goto cleanup;
}
@@ -2817,7 +2892,7 @@ static int __init hv_acpi_init(void)
cleanup:
platform_driver_unregister(&vmbus_platform_driver);
- hv_dev = NULL;
+ vmbus_root_device = NULL;
return ret;
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index f91f713b0105..079620dd4286 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -335,6 +335,26 @@ config SENSORS_K10TEMP
This driver can also be built as a module. If so, the module
will be called k10temp.
+config SENSORS_KBATT
+ tristate "KEBA battery controller support"
+ depends on KEBA_CP500
+ help
+ This driver supports the battery monitoring controller found in
+ KEBA system FPGA devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called kbatt.
+
+config SENSORS_KFAN
+ tristate "KEBA fan controller support"
+ depends on KEBA_CP500
+ help
+ This driver supports the fan controller found in KEBA system
+ FPGA devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called kfan.
+
config SENSORS_FAM15H_POWER
tristate "AMD Family 15h processor power"
depends on X86 && PCI && CPU_SUP_AMD
@@ -1308,6 +1328,15 @@ config SENSORS_MAX31790
This driver can also be built as a module. If so, the module
will be called max31790.
+config SENSORS_MAX77705
+ tristate "MAX77705 current and voltage sensor"
+ depends on MFD_MAX77705
+ help
+ If you say yes here you get support for MAX77705 sensors connected with I2C.
+
+ This driver can also be built as a module. If so, the module
+ will be called max77705-hwmon.
+
config SENSORS_MC34VR500
tristate "NXP MC34VR500 hardware monitoring driver"
depends on I2C
@@ -1795,17 +1824,6 @@ config SENSORS_NZXT_SMART2
source "drivers/hwmon/occ/Kconfig"
-config SENSORS_OXP
- tristate "OneXPlayer EC fan control"
- depends on ACPI_EC
- depends on X86
- help
- If you say yes here you get support for fan readings and control over
- OneXPlayer handheld devices. Only OneXPlayer mini AMD handheld variant
- boards are supported.
-
- Can also be built as a module. In that case it will be called oxp-sensors.
-
config SENSORS_PCF8591
tristate "Philips PCF8591 ADC/DAC"
depends on I2C
@@ -1887,16 +1905,6 @@ config SENSORS_SBTSI
This driver can also be built as a module. If so, the module will
be called sbtsi_temp.
-config SENSORS_SBRMI
- tristate "Emulated SB-RMI sensor"
- depends on I2C
- help
- If you say yes here you get support for emulated RMI
- sensors on AMD SoCs with APML interface connected to a BMC device.
-
- This driver can also be built as a module. If so, the module will
- be called sbrmi.
-
config SENSORS_SHT15
tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
depends on GPIOLIB || COMPILE_TEST
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 766c652ef22b..48e5866c0c9a 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -110,6 +110,8 @@ obj-$(CONFIG_SENSORS_IT87) += it87.o
obj-$(CONFIG_SENSORS_JC42) += jc42.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
+obj-$(CONFIG_SENSORS_KBATT) += kbatt.o
+obj-$(CONFIG_SENSORS_KFAN) += kfan.o
obj-$(CONFIG_SENSORS_LAN966X) += lan966x-hwmon.o
obj-$(CONFIG_SENSORS_LENOVO_EC) += lenovo-ec-sensors.o
obj-$(CONFIG_SENSORS_LINEAGE) += lineage-pem.o
@@ -161,6 +163,7 @@ obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
obj-$(CONFIG_SENSORS_MAX6697) += max6697.o
obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
obj-$(CONFIG_MAX31827) += max31827.o
+obj-$(CONFIG_SENSORS_MAX77705) += max77705-hwmon.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_MC34VR500) += mc34vr500.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
@@ -183,7 +186,6 @@ obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
obj-$(CONFIG_SENSORS_NZXT_KRAKEN2) += nzxt-kraken2.o
obj-$(CONFIG_SENSORS_NZXT_KRAKEN3) += nzxt-kraken3.o
obj-$(CONFIG_SENSORS_NZXT_SMART2) += nzxt-smart2.o
-obj-$(CONFIG_SENSORS_OXP) += oxp-sensors.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
index 312ef3e98754..d1c55e2eb479 100644
--- a/drivers/hwmon/aht10.c
+++ b/drivers/hwmon/aht10.c
@@ -94,7 +94,7 @@ struct aht10_data {
unsigned int meas_size;
};
-/**
+/*
* aht10_init() - Initialize an AHT10/AHT20 chip
* @data: the data associated with this AHT10/AHT20 chip
* Return: 0 if successful, 1 if not
@@ -124,7 +124,7 @@ static int aht10_init(struct aht10_data *data)
return 0;
}
-/**
+/*
* aht10_polltime_expired() - check if the minimum poll interval has
* expired
* @data: the data containing the time to compare
@@ -140,7 +140,7 @@ static int aht10_polltime_expired(struct aht10_data *data)
DECLARE_CRC8_TABLE(crc8_table);
-/**
+/*
* crc8_check() - check crc of the sensor's measurements
* @raw_data: data frame received from sensor(including crc as the last byte)
* @count: size of the data frame
@@ -155,7 +155,7 @@ static int crc8_check(u8 *raw_data, int count)
return crc8(crc8_table, raw_data, count, CRC8_INIT_VALUE);
}
-/**
+/*
* aht10_read_values() - read and parse the raw data from the AHT10/AHT20
* @data: the struct aht10_data to use for the lock
* Return: 0 if successful, 1 if not
@@ -214,7 +214,7 @@ static int aht10_read_values(struct aht10_data *data)
return 0;
}
-/**
+/*
* aht10_interval_write() - store the given minimum poll interval.
* Return: 0 on success, -EINVAL if a value lower than the
* AHT10_MIN_POLL_INTERVAL is given
@@ -226,7 +226,7 @@ static ssize_t aht10_interval_write(struct aht10_data *data,
return 0;
}
-/**
+/*
* aht10_interval_read() - read the minimum poll interval
* in milliseconds
*/
@@ -237,7 +237,7 @@ static ssize_t aht10_interval_read(struct aht10_data *data,
return 0;
}
-/**
+/*
* aht10_temperature1_read() - read the temperature in millidegrees
*/
static int aht10_temperature1_read(struct aht10_data *data, long *val)
@@ -252,7 +252,7 @@ static int aht10_temperature1_read(struct aht10_data *data, long *val)
return 0;
}
-/**
+/*
* aht10_humidity1_read() - read the relative humidity in millipercent
*/
static int aht10_humidity1_read(struct aht10_data *data, long *val)
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 1e3c6acd8974..13a789cc85d2 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -23,9 +23,12 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
+#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <dt-bindings/pwm/pwm.h>
+
/*
* Addresses to scan.
*/
@@ -37,7 +40,7 @@ static const unsigned short normal_i2c[] = {0x18, 0x19, 0x1a, 0x2c, 0x2d, 0x2e,
* Insmod parameters
*/
-static int pwminv; /*Inverted PWM output. */
+static int pwminv = -1; /*Inverted PWM output. */
module_param(pwminv, int, 0444);
static int init = 1; /*Power-on initialization.*/
@@ -845,9 +848,43 @@ static int amc6821_detect(struct i2c_client *client, struct i2c_board_info *info
return 0;
}
-static int amc6821_init_client(struct amc6821_data *data)
+static enum pwm_polarity amc6821_pwm_polarity(struct i2c_client *client)
+{
+ enum pwm_polarity polarity = PWM_POLARITY_NORMAL;
+ struct of_phandle_args args;
+ struct device_node *fan_np;
+
+ /*
+ * For backward compatibility, the pwminv module parameter takes
+ * always the precedence over any other device description
+ */
+ if (pwminv == 0)
+ return PWM_POLARITY_NORMAL;
+ if (pwminv > 0)
+ return PWM_POLARITY_INVERSED;
+
+ fan_np = of_get_child_by_name(client->dev.of_node, "fan");
+ if (!fan_np)
+ return PWM_POLARITY_NORMAL;
+
+ if (of_parse_phandle_with_args(fan_np, "pwms", "#pwm-cells", 0, &args))
+ goto out;
+ of_node_put(args.np);
+
+ if (args.args_count != 2)
+ goto out;
+
+ if (args.args[1] & PWM_POLARITY_INVERTED)
+ polarity = PWM_POLARITY_INVERSED;
+out:
+ of_node_put(fan_np);
+ return polarity;
+}
+
+static int amc6821_init_client(struct i2c_client *client, struct amc6821_data *data)
{
struct regmap *regmap = data->regmap;
+ u32 regval;
int err;
if (init) {
@@ -864,11 +901,14 @@ static int amc6821_init_client(struct amc6821_data *data)
if (err)
return err;
+ regval = AMC6821_CONF1_START;
+ if (amc6821_pwm_polarity(client) == PWM_POLARITY_INVERSED)
+ regval |= AMC6821_CONF1_PWMINV;
+
err = regmap_update_bits(regmap, AMC6821_REG_CONF1,
AMC6821_CONF1_THERMOVIE | AMC6821_CONF1_FANIE |
AMC6821_CONF1_START | AMC6821_CONF1_PWMINV,
- AMC6821_CONF1_START |
- (pwminv ? AMC6821_CONF1_PWMINV : 0));
+ regval);
if (err)
return err;
}
@@ -916,7 +956,7 @@ static int amc6821_probe(struct i2c_client *client)
"Failed to initialize regmap\n");
data->regmap = regmap;
- err = amc6821_init_client(data);
+ err = amc6821_init_client(client, data);
if (err)
return err;
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 006ced5ab6e6..e0a95197c71b 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -169,7 +169,11 @@ enum board_family {
family_intel_600_series
};
-/* All the known sensors for ASUS EC controllers */
+/*
+ * All the known sensors for ASUS EC controllers. These arrays have to be sorted
+ * by the full ((bank << 8) + index) register index (see asus_ec_block_read() as
+ * to why).
+ */
static const struct ec_sensor_info sensors_family_amd_400[] = {
[ec_sensor_temp_chipset] =
EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
@@ -183,10 +187,10 @@ static const struct ec_sensor_info sensors_family_amd_400[] = {
EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
[ec_sensor_in_cpu_core] =
EC_SENSOR("CPU Core", hwmon_in, 2, 0x00, 0xa2),
- [ec_sensor_fan_cpu_opt] =
- EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xbc),
[ec_sensor_fan_vrm_hs] =
EC_SENSOR("VRM HS", hwmon_fan, 2, 0x00, 0xb2),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xbc),
[ec_sensor_fan_chipset] =
/* no chipset fans in this generation */
EC_SENSOR("Chipset", hwmon_fan, 0, 0x00, 0x00),
@@ -194,10 +198,10 @@ static const struct ec_sensor_info sensors_family_amd_400[] = {
EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xb4),
[ec_sensor_curr_cpu] =
EC_SENSOR("CPU", hwmon_curr, 1, 0x00, 0xf4),
- [ec_sensor_temp_water_in] =
- EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x0d),
[ec_sensor_temp_water_out] =
EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x0b),
+ [ec_sensor_temp_water_in] =
+ EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x0d),
};
static const struct ec_sensor_info sensors_family_amd_500[] = {
@@ -239,19 +243,20 @@ static const struct ec_sensor_info sensors_family_amd_500[] = {
static const struct ec_sensor_info sensors_family_amd_600[] = {
[ec_sensor_temp_cpu] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x30),
- [ec_sensor_temp_cpu_package] = EC_SENSOR("CPU Package", hwmon_temp, 1, 0x00, 0x31),
+ [ec_sensor_temp_cpu_package] =
+ EC_SENSOR("CPU Package", hwmon_temp, 1, 0x00, 0x31),
[ec_sensor_temp_mb] =
EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x32),
[ec_sensor_temp_vrm] =
EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x33),
[ec_sensor_temp_t_sensor] =
EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x36),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
[ec_sensor_temp_water_in] =
EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
[ec_sensor_temp_water_out] =
EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
- [ec_sensor_fan_cpu_opt] =
- EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
};
static const struct ec_sensor_info sensors_family_intel_300[] = {
@@ -278,6 +283,14 @@ static const struct ec_sensor_info sensors_family_intel_600[] = {
[ec_sensor_temp_t_sensor] =
EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
[ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
+ [ec_sensor_fan_water_flow] =
+ EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xbe),
+ [ec_sensor_temp_water_in] =
+ EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
+ [ec_sensor_temp_water_out] =
+ EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
+ [ec_sensor_temp_water_block_in] =
+ EC_SENSOR("Water_Block_In", hwmon_temp, 1, 0x01, 0x02),
};
/* Shortcuts for common combinations */
@@ -300,6 +313,15 @@ struct ec_board_info {
enum board_family family;
};
+static const struct ec_board_info board_info_maximus_vi_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_intel_300_series,
+};
+
static const struct ec_board_info board_info_prime_x470_pro = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
@@ -402,6 +424,13 @@ static const struct ec_board_info board_info_maximus_xi_hero = {
.family = family_intel_300_series,
};
+static const struct ec_board_info board_info_maximus_z690_formula = {
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_SET_TEMP_WATER | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .family = family_intel_600_series,
+};
+
static const struct ec_board_info board_info_crosshair_viii_impact = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
@@ -507,6 +536,8 @@ static const struct ec_board_info board_info_tuf_gaming_x670e_plus = {
}
static const struct dmi_system_id dmi_table[] = {
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("MAXIMUS VI HERO",
+ &board_info_maximus_vi_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X470-PRO",
&board_info_prime_x470_pro),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X570-PRO",
@@ -537,6 +568,8 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_maximus_xi_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO (WI-FI)",
&board_info_maximus_xi_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS Z690 FORMULA",
+ &board_info_maximus_z690_formula),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII IMPACT",
&board_info_crosshair_viii_impact),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-E GAMING",
@@ -933,6 +966,10 @@ static int asus_ec_hwmon_read_string(struct device *dev,
{
struct ec_sensors_data *state = dev_get_drvdata(dev);
int sensor_index = find_ec_sensor_index(state, type, channel);
+
+ if (sensor_index < 0)
+ return sensor_index;
+
*str = get_sensor_info(state, sensor_index)->label;
return 0;
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 79e5606e6d2f..1e2c8e284001 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1274,6 +1274,13 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
},
},
{
+ .ident = "Dell OptiPlex 7050",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "OptiPlex 7050"),
+ },
+ },
+ {
.ident = "Dell Precision",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 9ed2c4b6734e..8ecebea53651 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -143,8 +143,8 @@ static void do_read_registers_on_cu(void *_data)
*/
cu = topology_core_id(smp_processor_id());
- rdmsrl_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]);
- rdmsrl_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]);
+ rdmsrq_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]);
+ rdmsrq_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]);
data->cu_on[cu] = 1;
}
@@ -424,7 +424,7 @@ static int fam15h_power_init_data(struct pci_dev *f4,
*/
data->cpu_pwr_sample_ratio = cpuid_ecx(0x80000007);
- if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) {
+ if (rdmsrq_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) {
pr_err("Failed to read max compute unit power accumulator MSR\n");
return -ENODEV;
}
diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
index a3a07662e491..8aeec16a7a90 100644
--- a/drivers/hwmon/ftsteutates.c
+++ b/drivers/hwmon/ftsteutates.c
@@ -423,13 +423,16 @@ static int fts_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
break;
case hwmon_pwm:
switch (attr) {
- case hwmon_pwm_auto_channels_temp:
- if (data->fan_source[channel] == FTS_FAN_SOURCE_INVALID)
+ case hwmon_pwm_auto_channels_temp: {
+ u8 fan_source = data->fan_source[channel];
+
+ if (fan_source == FTS_FAN_SOURCE_INVALID || fan_source >= BITS_PER_LONG)
*val = 0;
else
- *val = BIT(data->fan_source[channel]);
+ *val = BIT(fan_source);
return 0;
+ }
default:
break;
}
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index b779240328d5..516c34bb61c9 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -20,6 +20,9 @@
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include <linux/thermal.h>
struct gpio_fan_speed {
@@ -42,6 +45,7 @@ struct gpio_fan_data {
bool pwm_enable;
struct gpio_desc *alarm_gpio;
struct work_struct alarm_work;
+ struct regulator *supply;
};
/*
@@ -125,13 +129,32 @@ static int __get_fan_ctrl(struct gpio_fan_data *fan_data)
}
/* Must be called with fan_data->lock held, except during initialization. */
-static void set_fan_speed(struct gpio_fan_data *fan_data, int speed_index)
+static int set_fan_speed(struct gpio_fan_data *fan_data, int speed_index)
{
if (fan_data->speed_index == speed_index)
- return;
+ return 0;
+
+ if (fan_data->speed_index == 0 && speed_index > 0) {
+ int ret;
+
+ ret = pm_runtime_resume_and_get(fan_data->dev);
+ if (ret < 0)
+ return ret;
+ }
__set_fan_ctrl(fan_data, fan_data->speed[speed_index].ctrl_val);
+
+ if (fan_data->speed_index > 0 && speed_index == 0) {
+ int ret;
+
+ ret = pm_runtime_put_sync(fan_data->dev);
+ if (ret < 0)
+ return ret;
+ }
+
fan_data->speed_index = speed_index;
+
+ return 0;
}
static int get_fan_speed_index(struct gpio_fan_data *fan_data)
@@ -176,7 +199,7 @@ static ssize_t pwm1_store(struct device *dev, struct device_attribute *attr,
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
unsigned long pwm;
int speed_index;
- int ret = count;
+ int ret;
if (kstrtoul(buf, 10, &pwm) || pwm > 255)
return -EINVAL;
@@ -189,12 +212,12 @@ static ssize_t pwm1_store(struct device *dev, struct device_attribute *attr,
}
speed_index = DIV_ROUND_UP(pwm * (fan_data->num_speed - 1), 255);
- set_fan_speed(fan_data, speed_index);
+ ret = set_fan_speed(fan_data, speed_index);
exit_unlock:
mutex_unlock(&fan_data->lock);
- return ret;
+ return ret ? ret : count;
}
static ssize_t pwm1_enable_show(struct device *dev,
@@ -211,6 +234,7 @@ static ssize_t pwm1_enable_store(struct device *dev,
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
unsigned long val;
+ int ret = 0;
if (kstrtoul(buf, 10, &val) || val > 1)
return -EINVAL;
@@ -224,11 +248,11 @@ static ssize_t pwm1_enable_store(struct device *dev,
/* Disable manual control mode: set fan at full speed. */
if (val == 0)
- set_fan_speed(fan_data, fan_data->num_speed - 1);
+ ret = set_fan_speed(fan_data, fan_data->num_speed - 1);
mutex_unlock(&fan_data->lock);
- return count;
+ return ret ? ret : count;
}
static ssize_t pwm1_mode_show(struct device *dev,
@@ -279,7 +303,7 @@ static ssize_t set_rpm(struct device *dev, struct device_attribute *attr,
goto exit_unlock;
}
- set_fan_speed(fan_data, rpm_to_speed_index(fan_data, rpm));
+ ret = set_fan_speed(fan_data, rpm_to_speed_index(fan_data, rpm));
exit_unlock:
mutex_unlock(&fan_data->lock);
@@ -386,6 +410,7 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct gpio_fan_data *fan_data = cdev->devdata;
+ int ret;
if (!fan_data)
return -EINVAL;
@@ -395,11 +420,11 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
mutex_lock(&fan_data->lock);
- set_fan_speed(fan_data, state);
+ ret = set_fan_speed(fan_data, state);
mutex_unlock(&fan_data->lock);
- return 0;
+ return ret;
}
static const struct thermal_cooling_device_ops gpio_fan_cool_ops = {
@@ -499,6 +524,8 @@ static void gpio_fan_stop(void *data)
mutex_lock(&fan_data->lock);
set_fan_speed(data, 0);
mutex_unlock(&fan_data->lock);
+
+ pm_runtime_disable(fan_data->dev);
}
static int gpio_fan_probe(struct platform_device *pdev)
@@ -521,6 +548,11 @@ static int gpio_fan_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fan_data);
mutex_init(&fan_data->lock);
+ fan_data->supply = devm_regulator_get(dev, "fan");
+ if (IS_ERR(fan_data->supply))
+ return dev_err_probe(dev, PTR_ERR(fan_data->supply),
+ "Failed to get fan-supply");
+
/* Configure control GPIOs if available. */
if (fan_data->gpios && fan_data->num_gpios > 0) {
if (!fan_data->speed || fan_data->num_speed <= 1)
@@ -548,6 +580,17 @@ static int gpio_fan_probe(struct platform_device *pdev)
return err;
}
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ /* If current GPIO state is active, mark RPM as active as well */
+ if (fan_data->speed_index > 0) {
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+ }
+
/* Optional cooling device register for Device tree platforms */
fan_data->cdev = devm_thermal_of_cooling_device_register(dev, np,
"gpio-fan", fan_data, &gpio_fan_cool_ops);
@@ -565,41 +608,69 @@ static void gpio_fan_shutdown(struct platform_device *pdev)
set_fan_speed(fan_data, 0);
}
+static int gpio_fan_runtime_suspend(struct device *dev)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (fan_data->supply)
+ ret = regulator_disable(fan_data->supply);
+
+ return ret;
+}
+
+static int gpio_fan_runtime_resume(struct device *dev)
+{
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (fan_data->supply)
+ ret = regulator_enable(fan_data->supply);
+
+ return ret;
+}
+
static int gpio_fan_suspend(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ int ret = 0;
if (fan_data->gpios) {
fan_data->resume_speed = fan_data->speed_index;
mutex_lock(&fan_data->lock);
- set_fan_speed(fan_data, 0);
+ ret = set_fan_speed(fan_data, 0);
mutex_unlock(&fan_data->lock);
}
- return 0;
+ return ret;
}
static int gpio_fan_resume(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
+ int ret = 0;
if (fan_data->gpios) {
mutex_lock(&fan_data->lock);
- set_fan_speed(fan_data, fan_data->resume_speed);
+ ret = set_fan_speed(fan_data, fan_data->resume_speed);
mutex_unlock(&fan_data->lock);
}
- return 0;
+ return ret;
}
-static DEFINE_SIMPLE_DEV_PM_OPS(gpio_fan_pm, gpio_fan_suspend, gpio_fan_resume);
+static const struct dev_pm_ops gpio_fan_pm = {
+ RUNTIME_PM_OPS(gpio_fan_runtime_suspend,
+ gpio_fan_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(gpio_fan_suspend, gpio_fan_resume)
+};
static struct platform_driver gpio_fan_driver = {
.probe = gpio_fan_probe,
.shutdown = gpio_fan_shutdown,
.driver = {
.name = "gpio-fan",
- .pm = pm_sleep_ptr(&gpio_fan_pm),
+ .pm = pm_ptr(&gpio_fan_pm),
.of_match_table = of_gpio_fan_match,
},
};
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 6d1175a51832..2df4956296ed 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -15,6 +15,10 @@
#include <linux/kernel.h>
#include <linux/hwmon-vid.h>
+#ifdef CONFIG_X86
+#include <asm/msr.h>
+#endif
+
/*
* Common code for decoding VID pins.
*
diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c
index 2d9f12f68d50..a4a41742786b 100644
--- a/drivers/hwmon/ina238.c
+++ b/drivers/hwmon/ina238.c
@@ -21,11 +21,14 @@
#define INA238_CONFIG 0x0
#define INA238_ADC_CONFIG 0x1
#define INA238_SHUNT_CALIBRATION 0x2
+#define SQ52206_SHUNT_TEMPCO 0x3
#define INA238_SHUNT_VOLTAGE 0x4
#define INA238_BUS_VOLTAGE 0x5
#define INA238_DIE_TEMP 0x6
#define INA238_CURRENT 0x7
#define INA238_POWER 0x8
+#define SQ52206_ENERGY 0x9
+#define SQ52206_CHARGE 0xa
#define INA238_DIAG_ALERT 0xb
#define INA238_SHUNT_OVER_VOLTAGE 0xc
#define INA238_SHUNT_UNDER_VOLTAGE 0xd
@@ -33,9 +36,12 @@
#define INA238_BUS_UNDER_VOLTAGE 0xf
#define INA238_TEMP_LIMIT 0x10
#define INA238_POWER_LIMIT 0x11
+#define SQ52206_POWER_PEAK 0x20
#define INA238_DEVICE_ID 0x3f /* not available on INA237 */
#define INA238_CONFIG_ADCRANGE BIT(4)
+#define SQ52206_CONFIG_ADCRANGE_HIGH BIT(4)
+#define SQ52206_CONFIG_ADCRANGE_LOW BIT(3)
#define INA238_DIAG_ALERT_TMPOL BIT(7)
#define INA238_DIAG_ALERT_SHNTOL BIT(6)
@@ -44,12 +50,13 @@
#define INA238_DIAG_ALERT_BUSUL BIT(3)
#define INA238_DIAG_ALERT_POL BIT(2)
-#define INA238_REGISTERS 0x11
+#define INA238_REGISTERS 0x20
#define INA238_RSHUNT_DEFAULT 10000 /* uOhm */
/* Default configuration of device on reset. */
#define INA238_CONFIG_DEFAULT 0
+#define SQ52206_CONFIG_DEFAULT 0x0005
/* 16 sample averaging, 1052us conversion time, continuous mode */
#define INA238_ADC_CONFIG_DEFAULT 0xfb6a
/* Configure alerts to be based on averaged value (SLOWALERT) */
@@ -87,14 +94,19 @@
* shunt = 0x4000 / (819.2 * 10^6) / 0.001 = 20000 uOhms (with 1mA/lsb)
*
* Current (mA) = register value * 20000 / rshunt / 4 * gain
- * Power (W) = 0.2 * register value * 20000 / rshunt / 4 * gain
+ * Power (mW) = 0.2 * register value * 20000 / rshunt / 4 * gain
+ * (Specific for SQ52206)
+ * Power (mW) = 0.24 * register value * 20000 / rshunt / 4 * gain
+ * Energy (mJ) = 16 * 0.24 * register value * 20000 / rshunt / 4 * gain
*/
#define INA238_CALIBRATION_VALUE 16384
#define INA238_FIXED_SHUNT 20000
#define INA238_SHUNT_VOLTAGE_LSB 5 /* 5 uV/lsb */
#define INA238_BUS_VOLTAGE_LSB 3125 /* 3.125 mV/lsb */
-#define INA238_DIE_TEMP_LSB 125 /* 125 mC/lsb */
+#define INA238_DIE_TEMP_LSB 1250000 /* 125.0000 mC/lsb */
+#define SQ52206_BUS_VOLTAGE_LSB 3750 /* 3.75 mV/lsb */
+#define SQ52206_DIE_TEMP_LSB 78125 /* 7.8125 mC/lsb */
static const struct regmap_config ina238_regmap_config = {
.max_register = INA238_REGISTERS,
@@ -102,7 +114,20 @@ static const struct regmap_config ina238_regmap_config = {
.val_bits = 16,
};
+enum ina238_ids { ina238, ina237, sq52206 };
+
+struct ina238_config {
+ bool has_power_highest; /* chip detection power peak */
+ bool has_energy; /* chip detection energy */
+ u8 temp_shift; /* fixed parameters for temp calculate */
+ u32 power_calculate_factor; /* fixed parameters for power calculate */
+ u16 config_default; /* Power-on default state */
+ int bus_voltage_lsb; /* use for temperature calculate, uV/lsb */
+ int temp_lsb; /* use for temperature calculate */
+};
+
struct ina238_data {
+ const struct ina238_config *config;
struct i2c_client *client;
struct mutex config_lock;
struct regmap *regmap;
@@ -110,6 +135,36 @@ struct ina238_data {
int gain;
};
+static const struct ina238_config ina238_config[] = {
+ [ina238] = {
+ .has_energy = false,
+ .has_power_highest = false,
+ .temp_shift = 4,
+ .power_calculate_factor = 20,
+ .config_default = INA238_CONFIG_DEFAULT,
+ .bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
+ .temp_lsb = INA238_DIE_TEMP_LSB,
+ },
+ [ina237] = {
+ .has_energy = false,
+ .has_power_highest = false,
+ .temp_shift = 4,
+ .power_calculate_factor = 20,
+ .config_default = INA238_CONFIG_DEFAULT,
+ .bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
+ .temp_lsb = INA238_DIE_TEMP_LSB,
+ },
+ [sq52206] = {
+ .has_energy = true,
+ .has_power_highest = true,
+ .temp_shift = 0,
+ .power_calculate_factor = 24,
+ .config_default = SQ52206_CONFIG_DEFAULT,
+ .bus_voltage_lsb = SQ52206_BUS_VOLTAGE_LSB,
+ .temp_lsb = SQ52206_DIE_TEMP_LSB,
+ },
+};
+
static int ina238_read_reg24(const struct i2c_client *client, u8 reg, u32 *val)
{
u8 data[3];
@@ -126,6 +181,24 @@ static int ina238_read_reg24(const struct i2c_client *client, u8 reg, u32 *val)
return 0;
}
+static int ina238_read_reg40(const struct i2c_client *client, u8 reg, u64 *val)
+{
+ u8 data[5];
+ u32 low;
+ int err;
+
+ /* 40-bit register read */
+ err = i2c_smbus_read_i2c_block_data(client, reg, 5, data);
+ if (err < 0)
+ return err;
+ if (err != 5)
+ return -EIO;
+ low = (data[1] << 24) | (data[2] << 16) | (data[3] << 8) | data[4];
+ *val = ((long long)data[0] << 32) | low;
+
+ return 0;
+}
+
static int ina238_read_in(struct device *dev, u32 attr, int channel,
long *val)
{
@@ -197,10 +270,10 @@ static int ina238_read_in(struct device *dev, u32 attr, int channel,
regval = (s16)regval;
if (channel == 0)
/* gain of 1 -> LSB / 4 */
- *val = (regval * INA238_SHUNT_VOLTAGE_LSB) /
- (1000 * (4 - data->gain + 1));
+ *val = (regval * INA238_SHUNT_VOLTAGE_LSB) *
+ data->gain / (1000 * 4);
else
- *val = (regval * INA238_BUS_VOLTAGE_LSB) / 1000;
+ *val = (regval * data->config->bus_voltage_lsb) / 1000;
break;
case hwmon_in_max_alarm:
case hwmon_in_min_alarm:
@@ -225,8 +298,8 @@ static int ina238_write_in(struct device *dev, u32 attr, int channel,
case 0:
/* signed value, clamp to max range +/-163 mV */
regval = clamp_val(val, -163, 163);
- regval = (regval * 1000 * (4 - data->gain + 1)) /
- INA238_SHUNT_VOLTAGE_LSB;
+ regval = (regval * 1000 * 4) /
+ (INA238_SHUNT_VOLTAGE_LSB * data->gain);
regval = clamp_val(regval, S16_MIN, S16_MAX);
switch (attr) {
@@ -242,7 +315,7 @@ static int ina238_write_in(struct device *dev, u32 attr, int channel,
case 1:
/* signed value, positive values only. Clamp to max 102.396 V */
regval = clamp_val(val, 0, 102396);
- regval = (regval * 1000) / INA238_BUS_VOLTAGE_LSB;
+ regval = (regval * 1000) / data->config->bus_voltage_lsb;
regval = clamp_val(regval, 0, S16_MAX);
switch (attr) {
@@ -297,8 +370,19 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
return err;
/* Fixed 1mA lsb, scaled by 1000000 to have result in uW */
- power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT *
- data->gain, 20 * data->rshunt);
+ power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT * data->gain *
+ data->config->power_calculate_factor, 4 * 100 * data->rshunt);
+ /* Clamp value to maximum value of long */
+ *val = clamp_val(power, 0, LONG_MAX);
+ break;
+ case hwmon_power_input_highest:
+ err = ina238_read_reg24(data->client, SQ52206_POWER_PEAK, &regval);
+ if (err)
+ return err;
+
+ /* Fixed 1mA lsb, scaled by 1000000 to have result in uW */
+ power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT * data->gain *
+ data->config->power_calculate_factor, 4 * 100 * data->rshunt);
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
break;
@@ -311,8 +395,8 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
* Truncated 24-bit compare register, lower 8-bits are
* truncated. Same conversion to/from uW as POWER register.
*/
- power = div_u64((regval << 8) * 1000ULL * INA238_FIXED_SHUNT *
- data->gain, 20 * data->rshunt);
+ power = div_u64((regval << 8) * 1000ULL * INA238_FIXED_SHUNT * data->gain *
+ data->config->power_calculate_factor, 4 * 100 * data->rshunt);
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
break;
@@ -344,8 +428,8 @@ static int ina238_write_power(struct device *dev, u32 attr, long val)
* register.
*/
regval = clamp_val(val, 0, LONG_MAX);
- regval = div_u64(val * 20ULL * data->rshunt,
- 1000ULL * INA238_FIXED_SHUNT * data->gain);
+ regval = div_u64(val * 4 * 100 * data->rshunt, data->config->power_calculate_factor *
+ 1000ULL * INA238_FIXED_SHUNT * data->gain);
regval = clamp_val(regval >> 8, 0, U16_MAX);
return regmap_write(data->regmap, INA238_POWER_LIMIT, regval);
@@ -362,17 +446,17 @@ static int ina238_read_temp(struct device *dev, u32 attr, long *val)
err = regmap_read(data->regmap, INA238_DIE_TEMP, &regval);
if (err)
return err;
-
- /* Signed, bits 15-4 of register, result in mC */
- *val = ((s16)regval >> 4) * INA238_DIE_TEMP_LSB;
+ /* Signed, result in mC */
+ *val = div_s64(((s64)((s16)regval) >> data->config->temp_shift) *
+ (s64)data->config->temp_lsb, 10000);
break;
case hwmon_temp_max:
err = regmap_read(data->regmap, INA238_TEMP_LIMIT, &regval);
if (err)
return err;
-
- /* Signed, bits 15-4 of register, result in mC */
- *val = ((s16)regval >> 4) * INA238_DIE_TEMP_LSB;
+ /* Signed, result in mC */
+ *val = div_s64(((s64)((s16)regval) >> data->config->temp_shift) *
+ (s64)data->config->temp_lsb, 10000);
break;
case hwmon_temp_max_alarm:
err = regmap_read(data->regmap, INA238_DIAG_ALERT, &regval);
@@ -396,13 +480,33 @@ static int ina238_write_temp(struct device *dev, u32 attr, long val)
if (attr != hwmon_temp_max)
return -EOPNOTSUPP;
- /* Signed, bits 15-4 of register */
- regval = (val / INA238_DIE_TEMP_LSB) << 4;
- regval = clamp_val(regval, S16_MIN, S16_MAX) & 0xfff0;
+ /* Signed */
+ regval = clamp_val(val, -40000, 125000);
+ regval = div_s64(val * 10000, data->config->temp_lsb) << data->config->temp_shift;
+ regval = clamp_val(regval, S16_MIN, S16_MAX) & (0xffff << data->config->temp_shift);
return regmap_write(data->regmap, INA238_TEMP_LIMIT, regval);
}
+static ssize_t energy1_input_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ int ret;
+ u64 regval;
+ u64 energy;
+
+ ret = ina238_read_reg40(data->client, SQ52206_ENERGY, &regval);
+ if (ret)
+ return ret;
+
+ /* result in mJ */
+ energy = div_u64(regval * INA238_FIXED_SHUNT * data->gain * 16 *
+ data->config->power_calculate_factor, 4 * 100 * data->rshunt);
+
+ return sysfs_emit(buf, "%llu\n", energy);
+}
+
static int ina238_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
@@ -422,7 +526,7 @@ static int ina238_read(struct device *dev, enum hwmon_sensor_types type,
}
static int ina238_write(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long val)
+ u32 attr, int channel, long val)
{
struct ina238_data *data = dev_get_drvdata(dev);
int err;
@@ -452,6 +556,9 @@ static umode_t ina238_is_visible(const void *drvdata,
enum hwmon_sensor_types type,
u32 attr, int channel)
{
+ const struct ina238_data *data = drvdata;
+ bool has_power_highest = data->config->has_power_highest;
+
switch (type) {
case hwmon_in:
switch (attr) {
@@ -479,6 +586,10 @@ static umode_t ina238_is_visible(const void *drvdata,
return 0444;
case hwmon_power_max:
return 0644;
+ case hwmon_power_input_highest:
+ if (has_power_highest)
+ return 0444;
+ return 0;
default:
return 0;
}
@@ -512,7 +623,8 @@ static const struct hwmon_channel_info * const ina238_info[] = {
HWMON_C_INPUT),
HWMON_CHANNEL_INFO(power,
/* 0: power */
- HWMON_P_INPUT | HWMON_P_MAX | HWMON_P_MAX_ALARM),
+ HWMON_P_INPUT | HWMON_P_MAX |
+ HWMON_P_MAX_ALARM | HWMON_P_INPUT_HIGHEST),
HWMON_CHANNEL_INFO(temp,
/* 0: die temperature */
HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_ALARM),
@@ -530,20 +642,35 @@ static const struct hwmon_chip_info ina238_chip_info = {
.info = ina238_info,
};
+/* energy attributes are 5 bytes wide so we need u64 */
+static DEVICE_ATTR_RO(energy1_input);
+
+static struct attribute *ina238_attrs[] = {
+ &dev_attr_energy1_input.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ina238);
+
static int ina238_probe(struct i2c_client *client)
{
struct ina2xx_platform_data *pdata = dev_get_platdata(&client->dev);
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct ina238_data *data;
+ enum ina238_ids chip;
int config;
int ret;
+ chip = (uintptr_t)i2c_get_match_data(client);
+
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->client = client;
+ /* set the device type */
+ data->config = &ina238_config[chip];
+
mutex_init(&data->config_lock);
data->regmap = devm_regmap_init_i2c(client, &ina238_regmap_config);
@@ -564,15 +691,21 @@ static int ina238_probe(struct i2c_client *client)
/* load shunt gain value */
if (device_property_read_u32(dev, "ti,shunt-gain", &data->gain) < 0)
data->gain = 4; /* Default of ADCRANGE = 0 */
- if (data->gain != 1 && data->gain != 4) {
+ if (data->gain != 1 && data->gain != 2 && data->gain != 4) {
dev_err(dev, "invalid shunt gain value %u\n", data->gain);
return -EINVAL;
}
/* Setup CONFIG register */
- config = INA238_CONFIG_DEFAULT;
- if (data->gain == 1)
+ config = data->config->config_default;
+ if (chip == sq52206) {
+ if (data->gain == 1)
+ config |= SQ52206_CONFIG_ADCRANGE_HIGH; /* ADCRANGE = 10/11 is /1 */
+ else if (data->gain == 2)
+ config |= SQ52206_CONFIG_ADCRANGE_LOW; /* ADCRANGE = 01 is /2 */
+ } else if (data->gain == 1) {
config |= INA238_CONFIG_ADCRANGE; /* ADCRANGE = 1 is /1 */
+ }
ret = regmap_write(data->regmap, INA238_CONFIG, config);
if (ret < 0) {
dev_err(dev, "error configuring the device: %d\n", ret);
@@ -605,7 +738,8 @@ static int ina238_probe(struct i2c_client *client)
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data,
&ina238_chip_info,
- NULL);
+ data->config->has_energy ?
+ ina238_groups : NULL);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
@@ -616,15 +750,27 @@ static int ina238_probe(struct i2c_client *client)
}
static const struct i2c_device_id ina238_id[] = {
- { "ina238" },
+ { "ina237", ina237 },
+ { "ina238", ina238 },
+ { "sq52206", sq52206 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ina238_id);
static const struct of_device_id __maybe_unused ina238_of_match[] = {
- { .compatible = "ti,ina237" },
- { .compatible = "ti,ina238" },
- { },
+ {
+ .compatible = "ti,ina237",
+ .data = (void *)ina237
+ },
+ {
+ .compatible = "ti,ina238",
+ .data = (void *)ina238
+ },
+ {
+ .compatible = "silergy,sq52206",
+ .data = (void *)sq52206
+ },
+ { }
};
MODULE_DEVICE_TABLE(of, ina238_of_match);
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 345fe7db9de9..bc3c1f7314b3 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -959,8 +959,12 @@ static int ina2xx_probe(struct i2c_client *client)
return PTR_ERR(data->regmap);
}
- ret = devm_regulator_get_enable(dev, "vs");
- if (ret)
+ /*
+ * Regulator core returns -ENODEV if the 'vs' is not available.
+ * Hence the check for -ENODEV return code is necessary.
+ */
+ ret = devm_regulator_get_enable_optional(dev, "vs");
+ if (ret < 0 && ret != -ENODEV)
return dev_err_probe(dev, ret, "failed to enable vs regulator\n");
ret = ina2xx_init(dev, data);
diff --git a/drivers/hwmon/isl28022.c b/drivers/hwmon/isl28022.c
index 1fb9864635db..c2e559dde63f 100644
--- a/drivers/hwmon/isl28022.c
+++ b/drivers/hwmon/isl28022.c
@@ -154,6 +154,7 @@ static int isl28022_read_current(struct device *dev, u32 attr, long *val)
struct isl28022_data *data = dev_get_drvdata(dev);
unsigned int regval;
int err;
+ u16 sign_bit;
switch (attr) {
case hwmon_curr_input:
@@ -161,8 +162,9 @@ static int isl28022_read_current(struct device *dev, u32 attr, long *val)
ISL28022_REG_CURRENT, &regval);
if (err < 0)
return err;
- *val = ((long)regval * 1250L * (long)data->gain) /
- (long)data->shunt;
+ sign_bit = (regval >> 15) & 0x01;
+ *val = (((long)(((u16)regval) & 0x7FFF) - (sign_bit * 32768)) *
+ 1250L * (long)data->gain) / (long)data->shunt;
break;
default:
return -EOPNOTSUPP;
@@ -301,7 +303,7 @@ static const struct regmap_config isl28022_regmap_config = {
.writeable_reg = isl28022_is_writeable_reg,
.volatile_reg = isl28022_is_volatile_reg,
.val_format_endian = REGMAP_ENDIAN_BIG,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.use_single_read = true,
.use_single_write = true,
};
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 3685906cc57c..babf2413d666 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -20,7 +20,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
-#include <asm/amd_node.h>
+#include <asm/amd/node.h>
#include <asm/processor.h>
MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
@@ -503,6 +503,13 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
k10temp_get_ccd_support(data, 12);
break;
}
+ } else if (boot_cpu_data.x86 == 0x1a) {
+ switch (boot_cpu_data.x86_model) {
+ case 0x40 ... 0x4f: /* Zen5 Ryzen Desktop */
+ data->ccd_offset = 0x308;
+ k10temp_get_ccd_support(data, 8);
+ break;
+ }
}
for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
diff --git a/drivers/hwmon/kbatt.c b/drivers/hwmon/kbatt.c
new file mode 100644
index 000000000000..501b8f4ded33
--- /dev/null
+++ b/drivers/hwmon/kbatt.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 KEBA Industrial Automation GmbH
+ *
+ * Driver for KEBA battery monitoring controller FPGA IP core
+ */
+
+#include <linux/hwmon.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/misc/keba.h>
+#include <linux/mutex.h>
+
+#define KBATT "kbatt"
+
+#define KBATT_CONTROL_REG 0x4
+#define KBATT_CONTROL_BAT_TEST 0x01
+
+#define KBATT_STATUS_REG 0x8
+#define KBATT_STATUS_BAT_OK 0x01
+
+#define KBATT_MAX_UPD_INTERVAL (10 * HZ)
+#define KBATT_SETTLE_TIME_US (100 * USEC_PER_MSEC)
+
+struct kbatt {
+ /* update lock */
+ struct mutex lock;
+ void __iomem *base;
+
+ unsigned long next_update; /* in jiffies */
+ bool alarm;
+};
+
+static bool kbatt_alarm(struct kbatt *kbatt)
+{
+ mutex_lock(&kbatt->lock);
+
+ if (!kbatt->next_update || time_after(jiffies, kbatt->next_update)) {
+ /* switch load on */
+ iowrite8(KBATT_CONTROL_BAT_TEST,
+ kbatt->base + KBATT_CONTROL_REG);
+
+ /* wait some time to let things settle */
+ fsleep(KBATT_SETTLE_TIME_US);
+
+ /* check battery state */
+ if (ioread8(kbatt->base + KBATT_STATUS_REG) &
+ KBATT_STATUS_BAT_OK)
+ kbatt->alarm = false;
+ else
+ kbatt->alarm = true;
+
+ /* switch load off */
+ iowrite8(0, kbatt->base + KBATT_CONTROL_REG);
+
+ kbatt->next_update = jiffies + KBATT_MAX_UPD_INTERVAL;
+ }
+
+ mutex_unlock(&kbatt->lock);
+
+ return kbatt->alarm;
+}
+
+static int kbatt_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct kbatt *kbatt = dev_get_drvdata(dev);
+
+ *val = kbatt_alarm(kbatt) ? 1 : 0;
+
+ return 0;
+}
+
+static umode_t kbatt_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (channel == 0 && attr == hwmon_in_min_alarm)
+ return 0444;
+
+ return 0;
+}
+
+static const struct hwmon_channel_info *kbatt_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ /* 0: input minimum alarm channel */
+ HWMON_I_MIN_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops kbatt_hwmon_ops = {
+ .is_visible = kbatt_is_visible,
+ .read = kbatt_read,
+};
+
+static const struct hwmon_chip_info kbatt_chip_info = {
+ .ops = &kbatt_hwmon_ops,
+ .info = kbatt_info,
+};
+
+static int kbatt_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct keba_batt_auxdev *kbatt_auxdev =
+ container_of(auxdev, struct keba_batt_auxdev, auxdev);
+ struct device *dev = &auxdev->dev;
+ struct device *hwmon_dev;
+ struct kbatt *kbatt;
+ int retval;
+
+ kbatt = devm_kzalloc(dev, sizeof(*kbatt), GFP_KERNEL);
+ if (!kbatt)
+ return -ENOMEM;
+
+ retval = devm_mutex_init(dev, &kbatt->lock);
+ if (retval)
+ return retval;
+
+ kbatt->base = devm_ioremap_resource(dev, &kbatt_auxdev->io);
+ if (IS_ERR(kbatt->base))
+ return PTR_ERR(kbatt->base);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, KBATT, kbatt,
+ &kbatt_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct auxiliary_device_id kbatt_devtype_aux[] = {
+ { .name = "keba.batt" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, kbatt_devtype_aux);
+
+static struct auxiliary_driver kbatt_driver_aux = {
+ .name = KBATT,
+ .id_table = kbatt_devtype_aux,
+ .probe = kbatt_probe,
+};
+module_auxiliary_driver(kbatt_driver_aux);
+
+MODULE_AUTHOR("Petar Bojanic <boja@keba.com>");
+MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>");
+MODULE_DESCRIPTION("KEBA battery monitoring controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/kfan.c b/drivers/hwmon/kfan.c
new file mode 100644
index 000000000000..f353acb66749
--- /dev/null
+++ b/drivers/hwmon/kfan.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 KEBA Industrial Automation GmbH
+ *
+ * Driver for KEBA fan controller FPGA IP core
+ *
+ */
+
+#include <linux/hwmon.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/misc/keba.h>
+
+#define KFAN "kfan"
+
+#define KFAN_CONTROL_REG 0x04
+
+#define KFAN_STATUS_REG 0x08
+#define KFAN_STATUS_PRESENT 0x01
+#define KFAN_STATUS_REGULABLE 0x02
+#define KFAN_STATUS_TACHO 0x04
+#define KFAN_STATUS_BLOCKED 0x08
+
+#define KFAN_TACHO_REG 0x0c
+
+#define KFAN_DEFAULT_DIV 2
+
+struct kfan {
+ void __iomem *base;
+ bool tacho;
+ bool regulable;
+
+ /* hwmon API configuration */
+ u32 fan_channel_config[2];
+ struct hwmon_channel_info fan_info;
+ u32 pwm_channel_config[2];
+ struct hwmon_channel_info pwm_info;
+ const struct hwmon_channel_info *info[3];
+ struct hwmon_chip_info chip;
+};
+
+static bool kfan_get_fault(struct kfan *kfan)
+{
+ u8 status = ioread8(kfan->base + KFAN_STATUS_REG);
+
+ if (!(status & KFAN_STATUS_PRESENT))
+ return true;
+
+ if (!kfan->tacho && (status & KFAN_STATUS_BLOCKED))
+ return true;
+
+ return false;
+}
+
+static unsigned int kfan_count_to_rpm(u16 count)
+{
+ if (count == 0 || count == 0xffff)
+ return 0;
+
+ return 5000000UL / (KFAN_DEFAULT_DIV * count);
+}
+
+static unsigned int kfan_get_rpm(struct kfan *kfan)
+{
+ unsigned int rpm;
+ u16 count;
+
+ count = ioread16(kfan->base + KFAN_TACHO_REG);
+ rpm = kfan_count_to_rpm(count);
+
+ return rpm;
+}
+
+static unsigned int kfan_get_pwm(struct kfan *kfan)
+{
+ return ioread8(kfan->base + KFAN_CONTROL_REG);
+}
+
+static int kfan_set_pwm(struct kfan *kfan, long val)
+{
+ if (val < 0 || val > 0xff)
+ return -EINVAL;
+
+ /* if none-regulable, then only 0 or 0xff can be written */
+ if (!kfan->regulable && val > 0)
+ val = 0xff;
+
+ iowrite8(val, kfan->base + KFAN_CONTROL_REG);
+
+ return 0;
+}
+
+static int kfan_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct kfan *kfan = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ return kfan_set_pwm(kfan, val);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int kfan_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct kfan *kfan = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_fault:
+ *val = kfan_get_fault(kfan);
+ return 0;
+ case hwmon_fan_input:
+ *val = kfan_get_rpm(kfan);
+ return 0;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = kfan_get_pwm(kfan);
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t kfan_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ return 0444;
+ case hwmon_fan_fault:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops kfan_hwmon_ops = {
+ .is_visible = kfan_is_visible,
+ .read = kfan_read,
+ .write = kfan_write,
+};
+
+static int kfan_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct keba_fan_auxdev *kfan_auxdev =
+ container_of(auxdev, struct keba_fan_auxdev, auxdev);
+ struct device *dev = &auxdev->dev;
+ struct device *hwmon_dev;
+ struct kfan *kfan;
+ u8 status;
+
+ kfan = devm_kzalloc(dev, sizeof(*kfan), GFP_KERNEL);
+ if (!kfan)
+ return -ENOMEM;
+
+ kfan->base = devm_ioremap_resource(dev, &kfan_auxdev->io);
+ if (IS_ERR(kfan->base))
+ return PTR_ERR(kfan->base);
+
+ status = ioread8(kfan->base + KFAN_STATUS_REG);
+ if (status & KFAN_STATUS_REGULABLE)
+ kfan->regulable = true;
+ if (status & KFAN_STATUS_TACHO)
+ kfan->tacho = true;
+
+ /* fan */
+ kfan->fan_channel_config[0] = HWMON_F_FAULT;
+ if (kfan->tacho)
+ kfan->fan_channel_config[0] |= HWMON_F_INPUT;
+ kfan->fan_info.type = hwmon_fan;
+ kfan->fan_info.config = kfan->fan_channel_config;
+ kfan->info[0] = &kfan->fan_info;
+
+ /* PWM */
+ kfan->pwm_channel_config[0] = HWMON_PWM_INPUT;
+ kfan->pwm_info.type = hwmon_pwm;
+ kfan->pwm_info.config = kfan->pwm_channel_config;
+ kfan->info[1] = &kfan->pwm_info;
+
+ kfan->chip.ops = &kfan_hwmon_ops;
+ kfan->chip.info = kfan->info;
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, KFAN, kfan,
+ &kfan->chip, NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct auxiliary_device_id kfan_devtype_aux[] = {
+ { .name = "keba.fan" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, kfan_devtype_aux);
+
+static struct auxiliary_driver kfan_driver_aux = {
+ .name = KFAN,
+ .id_table = kfan_devtype_aux,
+ .probe = kfan_probe,
+};
+module_auxiliary_driver(kfan_driver_aux);
+
+MODULE_AUTHOR("Petar Bojanic <boja@keba.com>");
+MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>");
+MODULE_DESCRIPTION("KEBA fan controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index d95a3c6c245c..9b4875e2fd8d 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -622,7 +622,7 @@ static int lm75_i3c_reg_read(void *context, unsigned int reg, unsigned int *val)
{
.rnw = true,
.len = 2,
- .data.out = data->val_buf,
+ .data.in = data->val_buf,
},
};
int ret;
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 75f09553fd67..c1f528e292f3 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1235,7 +1235,7 @@ static int lm90_update_alarms(struct lm90_data *data, bool force)
static void lm90_alert_work(struct work_struct *__work)
{
- struct delayed_work *delayed_work = container_of(__work, struct delayed_work, work);
+ struct delayed_work *delayed_work = to_delayed_work(__work);
struct lm90_data *data = container_of(delayed_work, struct lm90_data, alert_work);
/* Nothing to do if alerts are enabled */
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index 541fa09dc6e7..a07e2eb93c71 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -256,33 +256,38 @@ static int ltc2992_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask
return 0;
}
-static void ltc2992_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int ltc2992_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ltc2992_state *st = gpiochip_get_data(chip);
unsigned long gpio_ctrl;
- int reg;
+ int reg, ret;
mutex_lock(&st->gpio_mutex);
reg = ltc2992_read_reg(st, ltc2992_gpio_addr_map[offset].ctrl, 1);
if (reg < 0) {
mutex_unlock(&st->gpio_mutex);
- return;
+ return reg;
}
gpio_ctrl = reg;
assign_bit(ltc2992_gpio_addr_map[offset].ctrl_bit, &gpio_ctrl, value);
- ltc2992_write_reg(st, ltc2992_gpio_addr_map[offset].ctrl, 1, gpio_ctrl);
+ ret = ltc2992_write_reg(st, ltc2992_gpio_addr_map[offset].ctrl, 1,
+ gpio_ctrl);
mutex_unlock(&st->gpio_mutex);
+
+ return ret;
}
-static void ltc2992_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int ltc2992_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
struct ltc2992_state *st = gpiochip_get_data(chip);
unsigned long gpio_ctrl_io = 0;
unsigned long gpio_ctrl = 0;
unsigned int gpio_nr;
+ int ret;
for_each_set_bit(gpio_nr, mask, LTC2992_GPIO_NR) {
if (gpio_nr < 3)
@@ -293,9 +298,14 @@ static void ltc2992_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mas
}
mutex_lock(&st->gpio_mutex);
- ltc2992_write_reg(st, LTC2992_GPIO_IO_CTRL, 1, gpio_ctrl_io);
- ltc2992_write_reg(st, LTC2992_GPIO_CTRL, 1, gpio_ctrl);
+ ret = ltc2992_write_reg(st, LTC2992_GPIO_IO_CTRL, 1, gpio_ctrl_io);
+ if (ret)
+ goto out;
+
+ ret = ltc2992_write_reg(st, LTC2992_GPIO_CTRL, 1, gpio_ctrl);
+out:
mutex_unlock(&st->gpio_mutex);
+ return ret;
}
static int ltc2992_config_gpio(struct ltc2992_state *st)
@@ -329,8 +339,8 @@ static int ltc2992_config_gpio(struct ltc2992_state *st)
st->gc.ngpio = ARRAY_SIZE(st->gpio_names);
st->gc.get = ltc2992_gpio_get;
st->gc.get_multiple = ltc2992_gpio_get_multiple;
- st->gc.set = ltc2992_gpio_set;
- st->gc.set_multiple = ltc2992_gpio_set_multiple;
+ st->gc.set_rv = ltc2992_gpio_set;
+ st->gc.set_multiple_rv = ltc2992_gpio_set_multiple;
ret = devm_gpiochip_add_data(&st->client->dev, &st->gc, st);
if (ret)
diff --git a/drivers/hwmon/ltc4282.c b/drivers/hwmon/ltc4282.c
index 7f38d2696239..f607fe8f7937 100644
--- a/drivers/hwmon/ltc4282.c
+++ b/drivers/hwmon/ltc4282.c
@@ -1512,13 +1512,6 @@ static int ltc4282_setup(struct ltc4282_state *st, struct device *dev)
}
if (device_property_read_bool(dev, "adi,fault-log-enable")) {
- ret = regmap_set_bits(st->map, LTC4282_ADC_CTRL,
- LTC4282_FAULT_LOG_EN_MASK);
- if (ret)
- return ret;
- }
-
- if (device_property_read_bool(dev, "adi,fault-log-enable")) {
ret = regmap_set_bits(st->map, LTC4282_ADC_CTRL, LTC4282_FAULT_LOG_EN_MASK);
if (ret)
return ret;
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 32b4d54b2076..a06346496e1d 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -80,6 +80,7 @@ struct max6639_data {
/* Register values initialized only once */
u8 ppr[MAX6639_NUM_CHANNELS]; /* Pulses per rotation 0..3 for 1..4 ppr */
u8 rpm_range[MAX6639_NUM_CHANNELS]; /* Index in above rpm_ranges table */
+ u32 target_rpm[MAX6639_NUM_CHANNELS];
/* Optional regulator for FAN supply */
struct regulator *reg;
@@ -563,6 +564,10 @@ static int max6639_probe_child_from_dt(struct i2c_client *client,
if (!err)
data->rpm_range[i] = rpm_range_to_reg(val);
+ err = of_property_read_u32(child, "target-rpm", &val);
+ if (!err)
+ data->target_rpm[i] = val;
+
return 0;
}
@@ -573,6 +578,7 @@ static int max6639_init_client(struct i2c_client *client,
const struct device_node *np = dev->of_node;
struct device_node *child;
int i, err;
+ u8 target_duty;
/* Reset chip to default values, see below for GCONFIG setup */
err = regmap_write(data->regmap, MAX6639_REG_GCONFIG, MAX6639_GCONFIG_POR);
@@ -586,6 +592,8 @@ static int max6639_init_client(struct i2c_client *client,
/* default: 4000 RPM */
data->rpm_range[0] = 1;
data->rpm_range[1] = 1;
+ data->target_rpm[0] = 4000;
+ data->target_rpm[1] = 4000;
for_each_child_of_node(np, child) {
if (strcmp(child->name, "fan"))
@@ -639,8 +647,12 @@ static int max6639_init_client(struct i2c_client *client,
if (err)
return err;
- /* PWM 120/120 (i.e. 100%) */
- err = regmap_write(data->regmap, MAX6639_REG_TARGTDUTY(i), 120);
+ /* Set PWM based on target RPM if specified */
+ if (data->target_rpm[i] > rpm_ranges[data->rpm_range[i]])
+ data->target_rpm[i] = rpm_ranges[data->rpm_range[i]];
+
+ target_duty = 120 * data->target_rpm[i] / rpm_ranges[data->rpm_range[i]];
+ err = regmap_write(data->regmap, MAX6639_REG_TARGTDUTY(i), target_duty);
if (err)
return err;
}
diff --git a/drivers/hwmon/max77705-hwmon.c b/drivers/hwmon/max77705-hwmon.c
new file mode 100644
index 000000000000..990023e6474e
--- /dev/null
+++ b/drivers/hwmon/max77705-hwmon.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MAX77705 voltage and current hwmon driver.
+ *
+ * Copyright (C) 2025 Dzmitry Sankouski <dsankouski@gmail.com>
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/mfd/max77705-private.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+struct channel_desc {
+ u8 reg;
+ u8 avg_reg;
+ const char *const label;
+ // register resolution. nano Volts for voltage, nano Amperes for current
+ u32 resolution;
+};
+
+static const struct channel_desc current_channel_desc[] = {
+ {
+ .reg = IIN_REG,
+ .label = "IIN_REG",
+ .resolution = 125000
+ },
+ {
+ .reg = ISYS_REG,
+ .avg_reg = AVGISYS_REG,
+ .label = "ISYS_REG",
+ .resolution = 312500
+ }
+};
+
+static const struct channel_desc voltage_channel_desc[] = {
+ {
+ .reg = VBYP_REG,
+ .label = "VBYP_REG",
+ .resolution = 427246
+ },
+ {
+ .reg = VSYS_REG,
+ .label = "VSYS_REG",
+ .resolution = 156250
+ }
+};
+
+static int max77705_read_and_convert(struct regmap *regmap, u8 reg, u32 res,
+ bool is_signed, long *val)
+{
+ int ret;
+ u32 regval;
+
+ ret = regmap_read(regmap, reg, &regval);
+ if (ret < 0)
+ return ret;
+
+ if (is_signed)
+ *val = mult_frac((long)sign_extend32(regval, 15), res, 1000000);
+ else
+ *val = mult_frac((long)regval, res, 1000000);
+
+ return 0;
+}
+
+static umode_t max77705_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_label:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_in_label:
+ return 0444;
+ case hwmon_curr_average:
+ if (current_channel_desc[channel].avg_reg)
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int max77705_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **buf)
+{
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_in_label:
+ *buf = current_channel_desc[channel].label;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_label:
+ *buf = voltage_channel_desc[channel].label;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int max77705_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ u8 reg;
+ u32 res;
+
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ reg = current_channel_desc[channel].reg;
+ res = current_channel_desc[channel].resolution;
+
+ return max77705_read_and_convert(regmap, reg, res, true, val);
+ case hwmon_curr_average:
+ reg = current_channel_desc[channel].avg_reg;
+ res = current_channel_desc[channel].resolution;
+
+ return max77705_read_and_convert(regmap, reg, res, true, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ reg = voltage_channel_desc[channel].reg;
+ res = voltage_channel_desc[channel].resolution;
+
+ return max77705_read_and_convert(regmap, reg, res, false, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops max77705_hwmon_ops = {
+ .is_visible = max77705_is_visible,
+ .read = max77705_read,
+ .read_string = max77705_read_string,
+};
+
+static const struct hwmon_channel_info *max77705_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL
+ ),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_AVERAGE | HWMON_C_LABEL
+ ),
+ NULL
+};
+
+static const struct hwmon_chip_info max77705_chip_info = {
+ .ops = &max77705_hwmon_ops,
+ .info = max77705_info,
+};
+
+static int max77705_hwmon_probe(struct platform_device *pdev)
+{
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap)
+ return -ENODEV;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev, "max77705", regmap,
+ &max77705_chip_info, NULL);
+ if (IS_ERR(hwmon_dev))
+ return dev_err_probe(&pdev->dev, PTR_ERR(hwmon_dev),
+ "Unable to register hwmon device\n");
+
+ return 0;
+};
+
+static struct platform_driver max77705_hwmon_driver = {
+ .driver = {
+ .name = "max77705-hwmon",
+ },
+ .probe = max77705_hwmon_probe,
+};
+
+module_platform_driver(max77705_hwmon_driver);
+
+MODULE_AUTHOR("Dzmitry Sankouski <dsankouski@gmail.com>");
+MODULE_DESCRIPTION("MAX77705 monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/nct7363.c b/drivers/hwmon/nct7363.c
index be7bf32f6e68..e13ab918b1ab 100644
--- a/drivers/hwmon/nct7363.c
+++ b/drivers/hwmon/nct7363.c
@@ -391,7 +391,7 @@ static const struct regmap_config nct7363_regmap_config = {
.val_bits = 8,
.use_single_read = true,
.use_single_write = true,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = nct7363_regmap_is_volatile,
};
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
index db3b551828eb..802c73def428 100644
--- a/drivers/hwmon/npcm750-pwm-fan.c
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -331,7 +331,7 @@ static void npcm7xx_fan_polling(struct timer_list *t)
struct npcm7xx_pwm_fan_data *data;
int i;
- data = from_timer(data, t, fan_timer);
+ data = timer_container_of(data, t, fan_timer);
/*
* Polling two module per one round,
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 9486db249c64..b3694a4209b9 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -459,12 +459,10 @@ static ssize_t occ_show_power_1(struct device *dev,
return sysfs_emit(buf, "%llu\n", val);
}
-static u64 occ_get_powr_avg(u64 *accum, u32 *samples)
+static u64 occ_get_powr_avg(u64 accum, u32 samples)
{
- u64 divisor = get_unaligned_be32(samples);
-
- return (divisor == 0) ? 0 :
- div64_u64(get_unaligned_be64(accum) * 1000000ULL, divisor);
+ return (samples == 0) ? 0 :
+ mul_u64_u32_div(accum, 1000000UL, samples);
}
static ssize_t occ_show_power_2(struct device *dev,
@@ -489,8 +487,8 @@ static ssize_t occ_show_power_2(struct device *dev,
get_unaligned_be32(&power->sensor_id),
power->function_id, power->apss_channel);
case 1:
- val = occ_get_powr_avg(&power->accumulator,
- &power->update_tag);
+ val = occ_get_powr_avg(get_unaligned_be64(&power->accumulator),
+ get_unaligned_be32(&power->update_tag));
break;
case 2:
val = (u64)get_unaligned_be32(&power->update_tag) *
@@ -527,8 +525,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
return sysfs_emit(buf, "%u_system\n",
get_unaligned_be32(&power->sensor_id));
case 1:
- val = occ_get_powr_avg(&power->system.accumulator,
- &power->system.update_tag);
+ val = occ_get_powr_avg(get_unaligned_be64(&power->system.accumulator),
+ get_unaligned_be32(&power->system.update_tag));
break;
case 2:
val = (u64)get_unaligned_be32(&power->system.update_tag) *
@@ -541,8 +539,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
return sysfs_emit(buf, "%u_proc\n",
get_unaligned_be32(&power->sensor_id));
case 5:
- val = occ_get_powr_avg(&power->proc.accumulator,
- &power->proc.update_tag);
+ val = occ_get_powr_avg(get_unaligned_be64(&power->proc.accumulator),
+ get_unaligned_be32(&power->proc.update_tag));
break;
case 6:
val = (u64)get_unaligned_be32(&power->proc.update_tag) *
@@ -555,8 +553,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
return sysfs_emit(buf, "%u_vdd\n",
get_unaligned_be32(&power->sensor_id));
case 9:
- val = occ_get_powr_avg(&power->vdd.accumulator,
- &power->vdd.update_tag);
+ val = occ_get_powr_avg(get_unaligned_be64(&power->vdd.accumulator),
+ get_unaligned_be32(&power->vdd.update_tag));
break;
case 10:
val = (u64)get_unaligned_be32(&power->vdd.update_tag) *
@@ -569,8 +567,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
return sysfs_emit(buf, "%u_vdn\n",
get_unaligned_be32(&power->sensor_id));
case 13:
- val = occ_get_powr_avg(&power->vdn.accumulator,
- &power->vdn.update_tag);
+ val = occ_get_powr_avg(get_unaligned_be64(&power->vdn.accumulator),
+ get_unaligned_be32(&power->vdn.update_tag));
break;
case 14:
val = (u64)get_unaligned_be32(&power->vdn.update_tag) *
@@ -747,29 +745,30 @@ static ssize_t occ_show_extended(struct device *dev,
}
/*
- * Some helper macros to make it easier to define an occ_attribute. Since these
- * are dynamically allocated, we shouldn't use the existing kernel macros which
+ * A helper to make it easier to define an occ_attribute. Since these
+ * are dynamically allocated, we cannot use the existing kernel macros which
* stringify the name argument.
*/
-#define ATTR_OCC(_name, _mode, _show, _store) { \
- .attr = { \
- .name = _name, \
- .mode = VERIFY_OCTAL_PERMISSIONS(_mode), \
- }, \
- .show = _show, \
- .store = _store, \
-}
-
-#define SENSOR_ATTR_OCC(_name, _mode, _show, _store, _nr, _index) { \
- .dev_attr = ATTR_OCC(_name, _mode, _show, _store), \
- .index = _index, \
- .nr = _nr, \
+static void occ_init_attribute(struct occ_attribute *attr, int mode,
+ ssize_t (*show)(struct device *dev, struct device_attribute *attr, char *buf),
+ ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count),
+ int nr, int index, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ vsnprintf(attr->name, sizeof(attr->name), fmt, args);
+ va_end(args);
+
+ attr->sensor.dev_attr.attr.name = attr->name;
+ attr->sensor.dev_attr.attr.mode = mode;
+ attr->sensor.dev_attr.show = show;
+ attr->sensor.dev_attr.store = store;
+ attr->sensor.index = index;
+ attr->sensor.nr = nr;
}
-#define OCC_INIT_ATTR(_name, _mode, _show, _store, _nr, _index) \
- ((struct sensor_device_attribute_2) \
- SENSOR_ATTR_OCC(_name, _mode, _show, _store, _nr, _index))
-
/*
* Allocate and instatiate sensor_device_attribute_2s. It's most efficient to
* use our own instead of the built-in hwmon attribute types.
@@ -855,14 +854,15 @@ static int occ_setup_sensor_attrs(struct occ *occ)
sensors->extended.num_sensors = 0;
}
- occ->attrs = devm_kzalloc(dev, sizeof(*occ->attrs) * num_attrs,
+ occ->attrs = devm_kcalloc(dev, num_attrs, sizeof(*occ->attrs),
GFP_KERNEL);
if (!occ->attrs)
return -ENOMEM;
/* null-terminated list */
- occ->group.attrs = devm_kzalloc(dev, sizeof(*occ->group.attrs) *
- num_attrs + 1, GFP_KERNEL);
+ occ->group.attrs = devm_kcalloc(dev, num_attrs + 1,
+ sizeof(*occ->group.attrs),
+ GFP_KERNEL);
if (!occ->group.attrs)
return -ENOMEM;
@@ -872,43 +872,33 @@ static int occ_setup_sensor_attrs(struct occ *occ)
s = i + 1;
temp = ((struct temp_sensor_2 *)sensors->temp.data) + i;
- snprintf(attr->name, sizeof(attr->name), "temp%d_label", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_temp, NULL,
- 0, i);
+ occ_init_attribute(attr, 0444, show_temp, NULL,
+ 0, i, "temp%d_label", s);
attr++;
if (sensors->temp.version == 2 &&
temp->fru_type == OCC_FRU_TYPE_VRM) {
- snprintf(attr->name, sizeof(attr->name),
- "temp%d_alarm", s);
+ occ_init_attribute(attr, 0444, show_temp, NULL,
+ 1, i, "temp%d_alarm", s);
} else {
- snprintf(attr->name, sizeof(attr->name),
- "temp%d_input", s);
+ occ_init_attribute(attr, 0444, show_temp, NULL,
+ 1, i, "temp%d_input", s);
}
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_temp, NULL,
- 1, i);
attr++;
if (sensors->temp.version > 1) {
- snprintf(attr->name, sizeof(attr->name),
- "temp%d_fru_type", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_temp, NULL, 2, i);
+ occ_init_attribute(attr, 0444, show_temp, NULL,
+ 2, i, "temp%d_fru_type", s);
attr++;
- snprintf(attr->name, sizeof(attr->name),
- "temp%d_fault", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_temp, NULL, 3, i);
+ occ_init_attribute(attr, 0444, show_temp, NULL,
+ 3, i, "temp%d_fault", s);
attr++;
if (sensors->temp.version == 0x10) {
- snprintf(attr->name, sizeof(attr->name),
- "temp%d_max", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_temp, NULL,
- 4, i);
+ occ_init_attribute(attr, 0444, show_temp, NULL,
+ 4, i, "temp%d_max", s);
attr++;
}
}
@@ -917,14 +907,12 @@ static int occ_setup_sensor_attrs(struct occ *occ)
for (i = 0; i < sensors->freq.num_sensors; ++i) {
s = i + 1;
- snprintf(attr->name, sizeof(attr->name), "freq%d_label", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_freq, NULL,
- 0, i);
+ occ_init_attribute(attr, 0444, show_freq, NULL,
+ 0, i, "freq%d_label", s);
attr++;
- snprintf(attr->name, sizeof(attr->name), "freq%d_input", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_freq, NULL,
- 1, i);
+ occ_init_attribute(attr, 0444, show_freq, NULL,
+ 1, i, "freq%d_input", s);
attr++;
}
@@ -940,32 +928,24 @@ static int occ_setup_sensor_attrs(struct occ *occ)
s = (i * 4) + 1;
for (j = 0; j < 4; ++j) {
- snprintf(attr->name, sizeof(attr->name),
- "power%d_label", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_power, NULL,
- nr++, i);
+ occ_init_attribute(attr, 0444, show_power,
+ NULL, nr++, i,
+ "power%d_label", s);
attr++;
- snprintf(attr->name, sizeof(attr->name),
- "power%d_average", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_power, NULL,
- nr++, i);
+ occ_init_attribute(attr, 0444, show_power,
+ NULL, nr++, i,
+ "power%d_average", s);
attr++;
- snprintf(attr->name, sizeof(attr->name),
- "power%d_average_interval", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_power, NULL,
- nr++, i);
+ occ_init_attribute(attr, 0444, show_power,
+ NULL, nr++, i,
+ "power%d_average_interval", s);
attr++;
- snprintf(attr->name, sizeof(attr->name),
- "power%d_input", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_power, NULL,
- nr++, i);
+ occ_init_attribute(attr, 0444, show_power,
+ NULL, nr++, i,
+ "power%d_input", s);
attr++;
s++;
@@ -977,28 +957,20 @@ static int occ_setup_sensor_attrs(struct occ *occ)
for (i = 0; i < sensors->power.num_sensors; ++i) {
s = i + 1;
- snprintf(attr->name, sizeof(attr->name),
- "power%d_label", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_power, NULL, 0, i);
+ occ_init_attribute(attr, 0444, show_power, NULL,
+ 0, i, "power%d_label", s);
attr++;
- snprintf(attr->name, sizeof(attr->name),
- "power%d_average", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_power, NULL, 1, i);
+ occ_init_attribute(attr, 0444, show_power, NULL,
+ 1, i, "power%d_average", s);
attr++;
- snprintf(attr->name, sizeof(attr->name),
- "power%d_average_interval", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_power, NULL, 2, i);
+ occ_init_attribute(attr, 0444, show_power, NULL,
+ 2, i, "power%d_average_interval", s);
attr++;
- snprintf(attr->name, sizeof(attr->name),
- "power%d_input", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_power, NULL, 3, i);
+ occ_init_attribute(attr, 0444, show_power, NULL,
+ 3, i, "power%d_input", s);
attr++;
}
@@ -1006,56 +978,43 @@ static int occ_setup_sensor_attrs(struct occ *occ)
}
if (sensors->caps.num_sensors >= 1) {
- snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
- 0, 0);
+ occ_init_attribute(attr, 0444, show_caps, NULL,
+ 0, 0, "power%d_label", s);
attr++;
- snprintf(attr->name, sizeof(attr->name), "power%d_cap", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
- 1, 0);
+ occ_init_attribute(attr, 0444, show_caps, NULL,
+ 1, 0, "power%d_cap", s);
attr++;
- snprintf(attr->name, sizeof(attr->name), "power%d_input", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
- 2, 0);
+ occ_init_attribute(attr, 0444, show_caps, NULL,
+ 2, 0, "power%d_input", s);
attr++;
- snprintf(attr->name, sizeof(attr->name),
- "power%d_cap_not_redundant", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
- 3, 0);
+ occ_init_attribute(attr, 0444, show_caps, NULL,
+ 3, 0, "power%d_cap_not_redundant", s);
attr++;
- snprintf(attr->name, sizeof(attr->name), "power%d_cap_max", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
- 4, 0);
+ occ_init_attribute(attr, 0444, show_caps, NULL,
+ 4, 0, "power%d_cap_max", s);
attr++;
- snprintf(attr->name, sizeof(attr->name), "power%d_cap_min", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
- 5, 0);
+ occ_init_attribute(attr, 0444, show_caps, NULL,
+ 5, 0, "power%d_cap_min", s);
attr++;
- snprintf(attr->name, sizeof(attr->name), "power%d_cap_user",
- s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0644, show_caps,
- occ_store_caps_user, 6, 0);
+ occ_init_attribute(attr, 0644, show_caps, occ_store_caps_user,
+ 6, 0, "power%d_cap_user", s);
attr++;
if (sensors->caps.version > 1) {
- snprintf(attr->name, sizeof(attr->name),
- "power%d_cap_user_source", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_caps, NULL, 7, 0);
+ occ_init_attribute(attr, 0444, show_caps, NULL,
+ 7, 0, "power%d_cap_user_source", s);
attr++;
if (sensors->caps.version > 2) {
- snprintf(attr->name, sizeof(attr->name),
- "power%d_cap_min_soft", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_caps, NULL,
- 8, 0);
+ occ_init_attribute(attr, 0444, show_caps, NULL,
+ 8, 0,
+ "power%d_cap_min_soft", s);
attr++;
}
}
@@ -1064,19 +1023,16 @@ static int occ_setup_sensor_attrs(struct occ *occ)
for (i = 0; i < sensors->extended.num_sensors; ++i) {
s = i + 1;
- snprintf(attr->name, sizeof(attr->name), "extn%d_label", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- occ_show_extended, NULL, 0, i);
+ occ_init_attribute(attr, 0444, occ_show_extended, NULL,
+ 0, i, "extn%d_label", s);
attr++;
- snprintf(attr->name, sizeof(attr->name), "extn%d_flags", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- occ_show_extended, NULL, 1, i);
+ occ_init_attribute(attr, 0444, occ_show_extended, NULL,
+ 1, i, "extn%d_flags", s);
attr++;
- snprintf(attr->name, sizeof(attr->name), "extn%d_input", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- occ_show_extended, NULL, 2, i);
+ occ_init_attribute(attr, 0444, occ_show_extended, NULL,
+ 2, i, "extn%d_input", s);
attr++;
}
diff --git a/drivers/hwmon/oxp-sensors.c b/drivers/hwmon/oxp-sensors.c
deleted file mode 100644
index 83730d931824..000000000000
--- a/drivers/hwmon/oxp-sensors.c
+++ /dev/null
@@ -1,716 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Platform driver for OneXPlayer, AOKZOE, AYANEO, and OrangePi Handhelds
- * that expose fan reading and control via hwmon sysfs.
- *
- * Old OXP boards have the same DMI strings and they are told apart by
- * the boot cpu vendor (Intel/AMD). Of these older models only AMD is
- * supported.
- *
- * Fan control is provided via pwm interface in the range [0-255].
- * Old AMD boards use [0-100] as range in the EC, the written value is
- * scaled to accommodate for that. Newer boards like the mini PRO and
- * AOKZOE are not scaled but have the same EC layout. Newer models
- * like the 2 and X1 are [0-184] and are scaled to 0-255. OrangePi
- * are [1-244] and scaled to 0-255.
- *
- * Copyright (C) 2022 Joaquín I. Aramendía <samsagax@gmail.com>
- * Copyright (C) 2024 Derek J. Clark <derekjohn.clark@gmail.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/dmi.h>
-#include <linux/hwmon.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/processor.h>
-
-/* Handle ACPI lock mechanism */
-static u32 oxp_mutex;
-
-#define ACPI_LOCK_DELAY_MS 500
-
-static bool lock_global_acpi_lock(void)
-{
- return ACPI_SUCCESS(acpi_acquire_global_lock(ACPI_LOCK_DELAY_MS, &oxp_mutex));
-}
-
-static bool unlock_global_acpi_lock(void)
-{
- return ACPI_SUCCESS(acpi_release_global_lock(oxp_mutex));
-}
-
-enum oxp_board {
- aok_zoe_a1 = 1,
- aya_neo_2,
- aya_neo_air,
- aya_neo_air_1s,
- aya_neo_air_plus_mendo,
- aya_neo_air_pro,
- aya_neo_flip,
- aya_neo_geek,
- aya_neo_kun,
- orange_pi_neo,
- oxp_2,
- oxp_fly,
- oxp_mini_amd,
- oxp_mini_amd_a07,
- oxp_mini_amd_pro,
- oxp_x1,
-};
-
-static enum oxp_board board;
-
-/* Fan reading and PWM */
-#define OXP_SENSOR_FAN_REG 0x76 /* Fan reading is 2 registers long */
-#define OXP_2_SENSOR_FAN_REG 0x58 /* Fan reading is 2 registers long */
-#define OXP_SENSOR_PWM_ENABLE_REG 0x4A /* PWM enable is 1 register long */
-#define OXP_SENSOR_PWM_REG 0x4B /* PWM reading is 1 register long */
-#define PWM_MODE_AUTO 0x00
-#define PWM_MODE_MANUAL 0x01
-
-/* OrangePi fan reading and PWM */
-#define ORANGEPI_SENSOR_FAN_REG 0x78 /* Fan reading is 2 registers long */
-#define ORANGEPI_SENSOR_PWM_ENABLE_REG 0x40 /* PWM enable is 1 register long */
-#define ORANGEPI_SENSOR_PWM_REG 0x38 /* PWM reading is 1 register long */
-
-/* Turbo button takeover function
- * Different boards have different values and EC registers
- * for the same function
- */
-#define OXP_TURBO_SWITCH_REG 0xF1 /* Mini Pro, OneXFly, AOKZOE */
-#define OXP_2_TURBO_SWITCH_REG 0xEB /* OXP2 and X1 */
-#define OXP_MINI_TURBO_SWITCH_REG 0x1E /* Mini AO7 */
-
-#define OXP_MINI_TURBO_TAKE_VAL 0x01 /* Mini AO7 */
-#define OXP_TURBO_TAKE_VAL 0x40 /* All other models */
-
-#define OXP_TURBO_RETURN_VAL 0x00 /* Common return val */
-
-static const struct dmi_system_id dmi_table[] = {
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AOKZOE"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AOKZOE A1 AR07"),
- },
- .driver_data = (void *)aok_zoe_a1,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AOKZOE"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AOKZOE A1 Pro"),
- },
- .driver_data = (void *)aok_zoe_a1,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "AYANEO 2"),
- },
- .driver_data = (void *)aya_neo_2,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR"),
- },
- .driver_data = (void *)aya_neo_air,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR 1S"),
- },
- .driver_data = (void *)aya_neo_air_1s,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AB05-Mendocino"),
- },
- .driver_data = (void *)aya_neo_air_plus_mendo,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR Pro"),
- },
- .driver_data = (void *)aya_neo_air_pro,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "FLIP"),
- },
- .driver_data = (void *)aya_neo_flip,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "GEEK"),
- },
- .driver_data = (void *)aya_neo_geek,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "KUN"),
- },
- .driver_data = (void *)aya_neo_kun,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "OrangePi"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "NEO-01"),
- },
- .driver_data = (void *)orange_pi_neo,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONE XPLAYER"),
- },
- .driver_data = (void *)oxp_mini_amd,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_MATCH(DMI_BOARD_NAME, "ONEXPLAYER 2"),
- },
- .driver_data = (void *)oxp_2,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER F1"),
- },
- .driver_data = (void *)oxp_fly,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER mini A07"),
- },
- .driver_data = (void *)oxp_mini_amd_a07,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER Mini Pro"),
- },
- .driver_data = (void *)oxp_mini_amd_pro,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
- DMI_MATCH(DMI_BOARD_NAME, "ONEXPLAYER X1"),
- },
- .driver_data = (void *)oxp_x1,
- },
- {},
-};
-
-/* Helper functions to handle EC read/write */
-static int read_from_ec(u8 reg, int size, long *val)
-{
- int i;
- int ret;
- u8 buffer;
-
- if (!lock_global_acpi_lock())
- return -EBUSY;
-
- *val = 0;
- for (i = 0; i < size; i++) {
- ret = ec_read(reg + i, &buffer);
- if (ret)
- return ret;
- *val <<= i * 8;
- *val += buffer;
- }
-
- if (!unlock_global_acpi_lock())
- return -EBUSY;
-
- return 0;
-}
-
-static int write_to_ec(u8 reg, u8 value)
-{
- int ret;
-
- if (!lock_global_acpi_lock())
- return -EBUSY;
-
- ret = ec_write(reg, value);
-
- if (!unlock_global_acpi_lock())
- return -EBUSY;
-
- return ret;
-}
-
-/* Turbo button toggle functions */
-static int tt_toggle_enable(void)
-{
- u8 reg;
- u8 val;
-
- switch (board) {
- case oxp_mini_amd_a07:
- reg = OXP_MINI_TURBO_SWITCH_REG;
- val = OXP_MINI_TURBO_TAKE_VAL;
- break;
- case aok_zoe_a1:
- case oxp_fly:
- case oxp_mini_amd_pro:
- reg = OXP_TURBO_SWITCH_REG;
- val = OXP_TURBO_TAKE_VAL;
- break;
- case oxp_2:
- case oxp_x1:
- reg = OXP_2_TURBO_SWITCH_REG;
- val = OXP_TURBO_TAKE_VAL;
- break;
- default:
- return -EINVAL;
- }
- return write_to_ec(reg, val);
-}
-
-static int tt_toggle_disable(void)
-{
- u8 reg;
- u8 val;
-
- switch (board) {
- case oxp_mini_amd_a07:
- reg = OXP_MINI_TURBO_SWITCH_REG;
- val = OXP_TURBO_RETURN_VAL;
- break;
- case aok_zoe_a1:
- case oxp_fly:
- case oxp_mini_amd_pro:
- reg = OXP_TURBO_SWITCH_REG;
- val = OXP_TURBO_RETURN_VAL;
- break;
- case oxp_2:
- case oxp_x1:
- reg = OXP_2_TURBO_SWITCH_REG;
- val = OXP_TURBO_RETURN_VAL;
- break;
- default:
- return -EINVAL;
- }
- return write_to_ec(reg, val);
-}
-
-/* Callbacks for turbo toggle attribute */
-static umode_t tt_toggle_is_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- switch (board) {
- case aok_zoe_a1:
- case oxp_2:
- case oxp_fly:
- case oxp_mini_amd_a07:
- case oxp_mini_amd_pro:
- case oxp_x1:
- return attr->mode;
- default:
- break;
- }
- return 0;
-}
-
-static ssize_t tt_toggle_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- int rval;
- bool value;
-
- rval = kstrtobool(buf, &value);
- if (rval)
- return rval;
-
- if (value) {
- rval = tt_toggle_enable();
- } else {
- rval = tt_toggle_disable();
- }
- if (rval)
- return rval;
-
- return count;
-}
-
-static ssize_t tt_toggle_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int retval;
- u8 reg;
- long val;
-
- switch (board) {
- case oxp_mini_amd_a07:
- reg = OXP_MINI_TURBO_SWITCH_REG;
- break;
- case aok_zoe_a1:
- case oxp_fly:
- case oxp_mini_amd_pro:
- reg = OXP_TURBO_SWITCH_REG;
- break;
- case oxp_2:
- case oxp_x1:
- reg = OXP_2_TURBO_SWITCH_REG;
- break;
- default:
- return -EINVAL;
- }
-
- retval = read_from_ec(reg, 1, &val);
- if (retval)
- return retval;
-
- return sysfs_emit(buf, "%d\n", !!val);
-}
-
-static DEVICE_ATTR_RW(tt_toggle);
-
-/* PWM enable/disable functions */
-static int oxp_pwm_enable(void)
-{
- switch (board) {
- case orange_pi_neo:
- return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_MANUAL);
- case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_2:
- case oxp_fly:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- case oxp_mini_amd_pro:
- case oxp_x1:
- return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, PWM_MODE_MANUAL);
- default:
- return -EINVAL;
- }
-}
-
-static int oxp_pwm_disable(void)
-{
- switch (board) {
- case orange_pi_neo:
- return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_AUTO);
- case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_2:
- case oxp_fly:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- case oxp_mini_amd_pro:
- case oxp_x1:
- return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, PWM_MODE_AUTO);
- default:
- return -EINVAL;
- }
-}
-
-/* Callbacks for hwmon interface */
-static umode_t oxp_ec_hwmon_is_visible(const void *drvdata,
- enum hwmon_sensor_types type, u32 attr, int channel)
-{
- switch (type) {
- case hwmon_fan:
- return 0444;
- case hwmon_pwm:
- return 0644;
- default:
- return 0;
- }
-}
-
-static int oxp_platform_read(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
-{
- int ret;
-
- switch (type) {
- case hwmon_fan:
- switch (attr) {
- case hwmon_fan_input:
- switch (board) {
- case orange_pi_neo:
- return read_from_ec(ORANGEPI_SENSOR_FAN_REG, 2, val);
- case oxp_2:
- case oxp_x1:
- return read_from_ec(OXP_2_SENSOR_FAN_REG, 2, val);
- case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_fly:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- case oxp_mini_amd_pro:
- return read_from_ec(OXP_SENSOR_FAN_REG, 2, val);
- default:
- break;
- }
- break;
- default:
- break;
- }
- break;
- case hwmon_pwm:
- switch (attr) {
- case hwmon_pwm_input:
- switch (board) {
- case orange_pi_neo:
- ret = read_from_ec(ORANGEPI_SENSOR_PWM_REG, 1, val);
- if (ret)
- return ret;
- /* scale from range [1-244] */
- *val = ((*val - 1) * 254 / 243) + 1;
- break;
- case oxp_2:
- case oxp_x1:
- ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
- if (ret)
- return ret;
- /* scale from range [0-184] */
- *val = (*val * 255) / 184;
- break;
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
- if (ret)
- return ret;
- /* scale from range [0-100] */
- *val = (*val * 255) / 100;
- break;
- case aok_zoe_a1:
- case oxp_fly:
- case oxp_mini_amd_pro:
- default:
- ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
- if (ret)
- return ret;
- break;
- }
- return 0;
- case hwmon_pwm_enable:
- switch (board) {
- case orange_pi_neo:
- return read_from_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, 1, val);
- case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_2:
- case oxp_fly:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- case oxp_mini_amd_pro:
- case oxp_x1:
- return read_from_ec(OXP_SENSOR_PWM_ENABLE_REG, 1, val);
- default:
- break;
- }
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- return -EOPNOTSUPP;
-}
-
-static int oxp_platform_write(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long val)
-{
- switch (type) {
- case hwmon_pwm:
- switch (attr) {
- case hwmon_pwm_enable:
- if (val == 1)
- return oxp_pwm_enable();
- else if (val == 0)
- return oxp_pwm_disable();
- return -EINVAL;
- case hwmon_pwm_input:
- if (val < 0 || val > 255)
- return -EINVAL;
- switch (board) {
- case orange_pi_neo:
- /* scale to range [1-244] */
- val = ((val - 1) * 243 / 254) + 1;
- return write_to_ec(ORANGEPI_SENSOR_PWM_REG, val);
- case oxp_2:
- case oxp_x1:
- /* scale to range [0-184] */
- val = (val * 184) / 255;
- return write_to_ec(OXP_SENSOR_PWM_REG, val);
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
- case oxp_mini_amd:
- case oxp_mini_amd_a07:
- /* scale to range [0-100] */
- val = (val * 100) / 255;
- return write_to_ec(OXP_SENSOR_PWM_REG, val);
- case aok_zoe_a1:
- case oxp_fly:
- case oxp_mini_amd_pro:
- return write_to_ec(OXP_SENSOR_PWM_REG, val);
- default:
- break;
- }
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- return -EOPNOTSUPP;
-}
-
-/* Known sensors in the OXP EC controllers */
-static const struct hwmon_channel_info * const oxp_platform_sensors[] = {
- HWMON_CHANNEL_INFO(fan,
- HWMON_F_INPUT),
- HWMON_CHANNEL_INFO(pwm,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
- NULL,
-};
-
-static struct attribute *oxp_ec_attrs[] = {
- &dev_attr_tt_toggle.attr,
- NULL
-};
-
-static struct attribute_group oxp_ec_attribute_group = {
- .is_visible = tt_toggle_is_visible,
- .attrs = oxp_ec_attrs,
-};
-
-static const struct attribute_group *oxp_ec_groups[] = {
- &oxp_ec_attribute_group,
- NULL
-};
-
-static const struct hwmon_ops oxp_ec_hwmon_ops = {
- .is_visible = oxp_ec_hwmon_is_visible,
- .read = oxp_platform_read,
- .write = oxp_platform_write,
-};
-
-static const struct hwmon_chip_info oxp_ec_chip_info = {
- .ops = &oxp_ec_hwmon_ops,
- .info = oxp_platform_sensors,
-};
-
-/* Initialization logic */
-static int oxp_platform_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device *hwdev;
-
- hwdev = devm_hwmon_device_register_with_info(dev, "oxpec", NULL,
- &oxp_ec_chip_info, NULL);
-
- return PTR_ERR_OR_ZERO(hwdev);
-}
-
-static struct platform_driver oxp_platform_driver = {
- .driver = {
- .name = "oxp-platform",
- .dev_groups = oxp_ec_groups,
- },
- .probe = oxp_platform_probe,
-};
-
-static struct platform_device *oxp_platform_device;
-
-static int __init oxp_platform_init(void)
-{
- const struct dmi_system_id *dmi_entry;
-
- dmi_entry = dmi_first_match(dmi_table);
- if (!dmi_entry)
- return -ENODEV;
-
- board = (enum oxp_board)(unsigned long)dmi_entry->driver_data;
-
- /*
- * Have to check for AMD processor here because DMI strings are the same
- * between Intel and AMD boards on older OneXPlayer devices, the only way
- * to tell them apart is the CPU. Old Intel boards have an unsupported EC.
- */
- if (board == oxp_mini_amd && boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
- return -ENODEV;
-
- oxp_platform_device =
- platform_create_bundle(&oxp_platform_driver,
- oxp_platform_probe, NULL, 0, NULL, 0);
-
- return PTR_ERR_OR_ZERO(oxp_platform_device);
-}
-
-static void __exit oxp_platform_exit(void)
-{
- platform_device_unregister(oxp_platform_device);
- platform_driver_unregister(&oxp_platform_driver);
-}
-
-MODULE_DEVICE_TABLE(dmi, dmi_table);
-
-module_init(oxp_platform_init);
-module_exit(oxp_platform_exit);
-
-MODULE_AUTHOR("Joaquín Ignacio Aramendía <samsagax@gmail.com>");
-MODULE_DESCRIPTION("Platform driver that handles EC sensors of OneXPlayer devices");
-MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index c9b3c3149982..441f984a859d 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -218,6 +218,24 @@ config SENSORS_LM25066_REGULATOR
If you say yes here you get regulator support for National
Semiconductor LM25066, LM5064, and LM5066.
+config SENSORS_LT3074
+ tristate "Analog Devices LT3074"
+ help
+ If you say yes here you get hardware monitoring support for Analog
+ Devices LT3074.
+
+ This driver can also be built as a module. If so, the module will
+ be called lt3074.
+
+config SENSORS_LT3074_REGULATOR
+ tristate "Regulator support for LT3074"
+ depends on SENSORS_LT3074 && REGULATOR
+ help
+ If you say yes here you get regulator support for Analog Devices
+ LT3074. The LT3074 is a low voltage, ultralow noise, high PSRR,
+ dropout linear regulator. The device supplies up to 3A with a
+ typical dropout voltage of 45mV.
+
config SENSORS_LT7182S
tristate "Analog Devices LT7182S"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 56f128c4653e..29cd8a3317d2 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_SENSORS_IR38064) += ir38064.o
obj-$(CONFIG_SENSORS_IRPS5401) += irps5401.o
obj-$(CONFIG_SENSORS_ISL68137) += isl68137.o
obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
+obj-$(CONFIG_SENSORS_LT3074) += lt3074.o
obj-$(CONFIG_SENSORS_LT7182S) += lt7182s.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 40b0dda32ea6..dd7275a67a0a 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -437,7 +437,7 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
#if IS_ENABLED(CONFIG_SENSORS_LM25066_REGULATOR)
static const struct regulator_desc lm25066_reg_desc[] = {
- PMBUS_REGULATOR_ONE("vout"),
+ PMBUS_REGULATOR_ONE_NODE("vout"),
};
#endif
diff --git a/drivers/hwmon/pmbus/lt3074.c b/drivers/hwmon/pmbus/lt3074.c
new file mode 100644
index 000000000000..3704dbe7b54a
--- /dev/null
+++ b/drivers/hwmon/pmbus/lt3074.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hardware monitoring driver for Analog Devices LT3074
+ *
+ * Copyright (C) 2025 Analog Devices, Inc.
+ */
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include "pmbus.h"
+
+#define LT3074_MFR_READ_VBIAS 0xc6
+#define LT3074_MFR_BIAS_OV_WARN_LIMIT 0xc7
+#define LT3074_MFR_BIAS_UV_WARN_LIMIT 0xc8
+#define LT3074_MFR_SPECIAL_ID 0xe7
+
+#define LT3074_SPECIAL_ID_VALUE 0x1c1d
+
+static const struct regulator_desc __maybe_unused lt3074_reg_desc[] = {
+ PMBUS_REGULATOR_ONE("regulator"),
+};
+
+static int lt3074_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ switch (reg) {
+ case PMBUS_VIRT_READ_VMON:
+ return pmbus_read_word_data(client, page, phase,
+ LT3074_MFR_READ_VBIAS);
+ case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+ return pmbus_read_word_data(client, page, phase,
+ LT3074_MFR_BIAS_UV_WARN_LIMIT);
+ case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+ return pmbus_read_word_data(client, page, phase,
+ LT3074_MFR_BIAS_OV_WARN_LIMIT);
+ default:
+ return -ENODATA;
+ }
+}
+
+static int lt3074_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ switch (reg) {
+ case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+ return pmbus_write_word_data(client, 0,
+ LT3074_MFR_BIAS_UV_WARN_LIMIT,
+ word);
+ case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+ return pmbus_write_word_data(client, 0,
+ LT3074_MFR_BIAS_OV_WARN_LIMIT,
+ word);
+ default:
+ return -ENODATA;
+ }
+}
+
+static struct pmbus_driver_info lt3074_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_VMON |
+ PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP,
+ .read_word_data = lt3074_read_word_data,
+ .write_word_data = lt3074_write_word_data,
+#if IS_ENABLED(CONFIG_SENSORS_LT3074_REGULATOR)
+ .num_regulators = 1,
+ .reg_desc = lt3074_reg_desc,
+#endif
+};
+
+static int lt3074_probe(struct i2c_client *client)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_word_data(client, LT3074_MFR_SPECIAL_ID);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to read ID\n");
+
+ if (ret != LT3074_SPECIAL_ID_VALUE)
+ return dev_err_probe(dev, -ENODEV, "ID mismatch\n");
+
+ return pmbus_do_probe(client, &lt3074_info);
+}
+
+static const struct i2c_device_id lt3074_id[] = {
+ { "lt3074", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, lt3074_id);
+
+static const struct of_device_id __maybe_unused lt3074_of_match[] = {
+ { .compatible = "adi,lt3074" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lt3074_of_match);
+
+static struct i2c_driver lt3074_driver = {
+ .driver = {
+ .name = "lt3074",
+ .of_match_table = of_match_ptr(lt3074_of_match),
+ },
+ .probe = lt3074_probe,
+ .id_table = lt3074_id,
+};
+module_i2c_driver(lt3074_driver);
+
+MODULE_AUTHOR("Cedric Encarnacion <cedricjustine.encarnacion@analog.com>");
+MODULE_DESCRIPTION("PMBus driver for Analog Devices LT3074");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index c9dda33831ff..56834d26f8ef 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -12,9 +12,26 @@
#include <linux/init.h>
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/delay.h>
#include "pmbus.h"
-enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
+enum chips {
+ adpm12160,
+ max34440,
+ max34441,
+ max34446,
+ max34451,
+ max34460,
+ max34461,
+};
+
+/*
+ * Firmware is sometimes not ready if we try and read the
+ * data from the page immediately after setting. Maxim
+ * recommends 50us delay due to the chip failing to clock
+ * stretch long enough here.
+ */
+#define MAX34440_PAGE_CHANGE_DELAY 50
#define MAX34440_MFR_VOUT_PEAK 0xd4
#define MAX34440_MFR_IOUT_PEAK 0xd5
@@ -34,16 +51,21 @@ enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
/*
* The whole max344* family have IOUT_OC_WARN_LIMIT and IOUT_OC_FAULT_LIMIT
* swapped from the standard pmbus spec addresses.
+ * For max34451, version MAX34451ETNA6+ and later has this issue fixed.
*/
#define MAX34440_IOUT_OC_WARN_LIMIT 0x46
#define MAX34440_IOUT_OC_FAULT_LIMIT 0x4A
+#define MAX34451ETNA6_MFR_REV 0x0012
+
#define MAX34451_MFR_CHANNEL_CONFIG 0xe4
#define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK 0x3f
struct max34440_data {
int id;
struct pmbus_driver_info info;
+ u8 iout_oc_warn_limit;
+ u8 iout_oc_fault_limit;
};
#define to_max34440_data(x) container_of(x, struct max34440_data, info)
@@ -60,11 +82,11 @@ static int max34440_read_word_data(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_IOUT_OC_FAULT_LIMIT:
ret = pmbus_read_word_data(client, page, phase,
- MAX34440_IOUT_OC_FAULT_LIMIT);
+ data->iout_oc_fault_limit);
break;
case PMBUS_IOUT_OC_WARN_LIMIT:
ret = pmbus_read_word_data(client, page, phase,
- MAX34440_IOUT_OC_WARN_LIMIT);
+ data->iout_oc_warn_limit);
break;
case PMBUS_VIRT_READ_VOUT_MIN:
ret = pmbus_read_word_data(client, page, phase,
@@ -75,7 +97,8 @@ static int max34440_read_word_data(struct i2c_client *client, int page,
MAX34440_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_AVG:
- if (data->id != max34446 && data->id != max34451)
+ if (data->id != max34446 && data->id != max34451 &&
+ data->id != adpm12160)
return -ENXIO;
ret = pmbus_read_word_data(client, page, phase,
MAX34446_MFR_IOUT_AVG);
@@ -133,11 +156,11 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_IOUT_OC_FAULT_LIMIT:
- ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_FAULT_LIMIT,
+ ret = pmbus_write_word_data(client, page, data->iout_oc_fault_limit,
word);
break;
case PMBUS_IOUT_OC_WARN_LIMIT:
- ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_WARN_LIMIT,
+ ret = pmbus_write_word_data(client, page, data->iout_oc_warn_limit,
word);
break;
case PMBUS_VIRT_RESET_POUT_HISTORY:
@@ -159,7 +182,8 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
case PMBUS_VIRT_RESET_IOUT_HISTORY:
ret = pmbus_write_word_data(client, page,
MAX34440_MFR_IOUT_PEAK, 0);
- if (!ret && (data->id == max34446 || data->id == max34451))
+ if (!ret && (data->id == max34446 || data->id == max34451 ||
+ data->id == adpm12160))
ret = pmbus_write_word_data(client, page,
MAX34446_MFR_IOUT_AVG, 0);
@@ -235,9 +259,29 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
*/
int page, rv;
+ bool max34451_na6 = false;
+
+ rv = i2c_smbus_read_word_data(client, PMBUS_MFR_REVISION);
+ if (rv < 0)
+ return rv;
+
+ if (rv >= MAX34451ETNA6_MFR_REV) {
+ max34451_na6 = true;
+ data->info.format[PSC_VOLTAGE_IN] = direct;
+ data->info.format[PSC_CURRENT_IN] = direct;
+ data->info.m[PSC_VOLTAGE_IN] = 1;
+ data->info.b[PSC_VOLTAGE_IN] = 0;
+ data->info.R[PSC_VOLTAGE_IN] = 3;
+ data->info.m[PSC_CURRENT_IN] = 1;
+ data->info.b[PSC_CURRENT_IN] = 0;
+ data->info.R[PSC_CURRENT_IN] = 2;
+ data->iout_oc_fault_limit = PMBUS_IOUT_OC_FAULT_LIMIT;
+ data->iout_oc_warn_limit = PMBUS_IOUT_OC_WARN_LIMIT;
+ }
for (page = 0; page < 16; page++) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ fsleep(MAX34440_PAGE_CHANGE_DELAY);
if (rv < 0)
return rv;
@@ -251,16 +295,30 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
case 0x20:
data->info.func[page] = PMBUS_HAVE_VOUT |
PMBUS_HAVE_STATUS_VOUT;
+
+ if (max34451_na6)
+ data->info.func[page] |= PMBUS_HAVE_VIN |
+ PMBUS_HAVE_STATUS_INPUT;
break;
case 0x21:
data->info.func[page] = PMBUS_HAVE_VOUT;
+
+ if (max34451_na6)
+ data->info.func[page] |= PMBUS_HAVE_VIN;
break;
case 0x22:
data->info.func[page] = PMBUS_HAVE_IOUT |
PMBUS_HAVE_STATUS_IOUT;
+
+ if (max34451_na6)
+ data->info.func[page] |= PMBUS_HAVE_IIN |
+ PMBUS_HAVE_STATUS_INPUT;
break;
case 0x23:
data->info.func[page] = PMBUS_HAVE_IOUT;
+
+ if (max34451_na6)
+ data->info.func[page] |= PMBUS_HAVE_IIN;
break;
default:
break;
@@ -271,6 +329,41 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
}
static struct pmbus_driver_info max34440_info[] = {
+ [adpm12160] = {
+ .pages = 19,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 0,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 0,
+ .m[PSC_CURRENT_IN] = 1,
+ .b[PSC_CURRENT_IN] = 0,
+ .R[PSC_CURRENT_IN] = 2,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 2,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ /* absent func below [18] are not for monitoring */
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[4] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[5] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[6] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[7] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[8] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[9] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT,
+ .func[10] = PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_INPUT,
+ .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
+ },
[max34440] = {
.pages = 14,
.format[PSC_VOLTAGE_IN] = direct,
@@ -312,6 +405,7 @@ static struct pmbus_driver_info max34440_info[] = {
.read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34441] = {
.pages = 12,
@@ -355,6 +449,7 @@ static struct pmbus_driver_info max34440_info[] = {
.read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34446] = {
.pages = 7,
@@ -392,6 +487,7 @@ static struct pmbus_driver_info max34440_info[] = {
.read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34451] = {
.pages = 21,
@@ -415,6 +511,7 @@ static struct pmbus_driver_info max34440_info[] = {
.func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34460] = {
.pages = 18,
@@ -445,6 +542,7 @@ static struct pmbus_driver_info max34440_info[] = {
.func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
[max34461] = {
.pages = 23,
@@ -480,6 +578,7 @@ static struct pmbus_driver_info max34440_info[] = {
.func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
+ .page_change_delay = MAX34440_PAGE_CHANGE_DELAY,
},
};
@@ -494,17 +593,23 @@ static int max34440_probe(struct i2c_client *client)
return -ENOMEM;
data->id = i2c_match_id(max34440_id, client)->driver_data;
data->info = max34440_info[data->id];
+ data->iout_oc_fault_limit = MAX34440_IOUT_OC_FAULT_LIMIT;
+ data->iout_oc_warn_limit = MAX34440_IOUT_OC_WARN_LIMIT;
if (data->id == max34451) {
rv = max34451_set_supported_funcs(client, data);
if (rv)
return rv;
+ } else if (data->id == adpm12160) {
+ data->iout_oc_fault_limit = PMBUS_IOUT_OC_FAULT_LIMIT;
+ data->iout_oc_warn_limit = PMBUS_IOUT_OC_WARN_LIMIT;
}
return pmbus_do_probe(client, &data->info);
}
static const struct i2c_device_id max34440_id[] = {
+ {"adpm12160", adpm12160},
{"max34440", max34440},
{"max34441", max34441},
{"max34446", max34446},
diff --git a/drivers/hwmon/pmbus/mpq7932.c b/drivers/hwmon/pmbus/mpq7932.c
index c1e2d0cb2fd0..8f10e37a7a76 100644
--- a/drivers/hwmon/pmbus/mpq7932.c
+++ b/drivers/hwmon/pmbus/mpq7932.c
@@ -51,8 +51,8 @@ static const struct regulator_desc mpq7932_regulators_desc[] = {
};
static const struct regulator_desc mpq7932_regulators_desc_one[] = {
- PMBUS_REGULATOR_STEP_ONE("buck", MPQ7932_N_VOLTAGES,
- MPQ7932_UV_STEP, MPQ7932_BUCK_UV_MIN),
+ PMBUS_REGULATOR_STEP_ONE_NODE("buck", MPQ7932_N_VOLTAGES,
+ MPQ7932_UV_STEP, MPQ7932_BUCK_UV_MIN),
};
#endif
diff --git a/drivers/hwmon/pmbus/mpq8785.c b/drivers/hwmon/pmbus/mpq8785.c
index 331c274ca892..1f56aaf4dde8 100644
--- a/drivers/hwmon/pmbus/mpq8785.c
+++ b/drivers/hwmon/pmbus/mpq8785.c
@@ -4,10 +4,23 @@
*/
#include <linux/i2c.h>
+#include <linux/bitops.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/of_device.h>
#include "pmbus.h"
+#define MPM82504_READ_TEMPERATURE_1_SIGN_POS 9
+
+enum chips { mpm3695, mpm3695_25, mpm82504, mpq8785 };
+
+static u16 voltage_scale_loop_max_val[] = {
+ [mpm3695] = GENMASK(9, 0),
+ [mpm3695_25] = GENMASK(11, 0),
+ [mpm82504] = GENMASK(9, 0),
+ [mpq8785] = GENMASK(10, 0),
+};
+
static int mpq8785_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
@@ -34,6 +47,20 @@ static int mpq8785_identify(struct i2c_client *client,
return 0;
};
+static int mpm82504_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ int ret;
+
+ ret = pmbus_read_word_data(client, page, phase, reg);
+
+ if (ret < 0 || reg != PMBUS_READ_TEMPERATURE_1)
+ return ret;
+
+ /* Fix PMBUS_READ_TEMPERATURE_1 signedness */
+ return sign_extend32(ret, MPM82504_READ_TEMPERATURE_1_SIGN_POS) & 0xffff;
+}
+
static struct pmbus_driver_info mpq8785_info = {
.pages = 1,
.format[PSC_VOLTAGE_IN] = direct,
@@ -53,26 +80,74 @@ static struct pmbus_driver_info mpq8785_info = {
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
- .identify = mpq8785_identify,
-};
-
-static int mpq8785_probe(struct i2c_client *client)
-{
- return pmbus_do_probe(client, &mpq8785_info);
};
static const struct i2c_device_id mpq8785_id[] = {
- { "mpq8785" },
+ { "mpm3695", mpm3695 },
+ { "mpm3695-25", mpm3695_25 },
+ { "mpm82504", mpm82504 },
+ { "mpq8785", mpq8785 },
{ },
};
MODULE_DEVICE_TABLE(i2c, mpq8785_id);
static const struct of_device_id __maybe_unused mpq8785_of_match[] = {
- { .compatible = "mps,mpq8785" },
+ { .compatible = "mps,mpm3695", .data = (void *)mpm3695 },
+ { .compatible = "mps,mpm3695-25", .data = (void *)mpm3695_25 },
+ { .compatible = "mps,mpm82504", .data = (void *)mpm82504 },
+ { .compatible = "mps,mpq8785", .data = (void *)mpq8785 },
{}
};
MODULE_DEVICE_TABLE(of, mpq8785_of_match);
+static int mpq8785_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct pmbus_driver_info *info;
+ enum chips chip_id;
+ u32 voltage_scale;
+ int ret;
+
+ info = devm_kmemdup(dev, &mpq8785_info, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ if (dev->of_node)
+ chip_id = (kernel_ulong_t)of_device_get_match_data(dev);
+ else
+ chip_id = (kernel_ulong_t)i2c_get_match_data(client);
+
+ switch (chip_id) {
+ case mpm3695:
+ case mpm3695_25:
+ case mpm82504:
+ info->format[PSC_VOLTAGE_OUT] = direct;
+ info->m[PSC_VOLTAGE_OUT] = 8;
+ info->b[PSC_VOLTAGE_OUT] = 0;
+ info->R[PSC_VOLTAGE_OUT] = 2;
+ info->read_word_data = mpm82504_read_word_data;
+ break;
+ case mpq8785:
+ info->identify = mpq8785_identify;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ if (!device_property_read_u32(dev, "mps,vout-fb-divider-ratio-permille",
+ &voltage_scale)) {
+ if (voltage_scale > voltage_scale_loop_max_val[chip_id])
+ return -EINVAL;
+
+ ret = i2c_smbus_write_word_data(client, PMBUS_VOUT_SCALE_LOOP,
+ voltage_scale);
+ if (ret)
+ return ret;
+ }
+
+ return pmbus_do_probe(client, info);
+};
+
static struct i2c_driver mpq8785_driver = {
.driver = {
.name = "mpq8785",
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index ddb19c9726d6..d2e9bfb5320f 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -482,6 +482,7 @@ struct pmbus_driver_info {
*/
int access_delay; /* in microseconds */
int write_delay; /* in microseconds */
+ int page_change_delay; /* in microseconds */
};
/* Regulator ops */
@@ -508,11 +509,11 @@ int pmbus_regulator_init_cb(struct regulator_dev *rdev,
#define PMBUS_REGULATOR(_name, _id) PMBUS_REGULATOR_STEP(_name, _id, 0, 0, 0)
-#define PMBUS_REGULATOR_STEP_ONE(_name, _voltages, _step, _min_uV) \
+#define __PMBUS_REGULATOR_STEP_ONE(_name, _node, _voltages, _step, _min_uV) \
{ \
.name = (_name), \
.of_match = of_match_ptr(_name), \
- .regulators_node = of_match_ptr("regulators"), \
+ .regulators_node = of_match_ptr(_node), \
.ops = &pmbus_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
@@ -522,7 +523,19 @@ int pmbus_regulator_init_cb(struct regulator_dev *rdev,
.init_cb = pmbus_regulator_init_cb, \
}
-#define PMBUS_REGULATOR_ONE(_name) PMBUS_REGULATOR_STEP_ONE(_name, 0, 0, 0)
+/*
+ * _NODE macros are defined for historic reasons and MUST NOT be used in new
+ * drivers.
+ */
+#define PMBUS_REGULATOR_STEP_ONE_NODE(_name, _voltages, _step, _min_uV) \
+ __PMBUS_REGULATOR_STEP_ONE(_name, "regulators", _voltages, _step, _min_uV)
+
+#define PMBUS_REGULATOR_ONE_NODE(_name) PMBUS_REGULATOR_STEP_ONE_NODE(_name, 0, 0, 0)
+
+#define PMBUS_REGULATOR_STEP_ONE(_name, _voltages, _step, _min_uV) \
+ __PMBUS_REGULATOR_STEP_ONE(_name, NULL, _voltages, _step, _min_uV)
+
+#define PMBUS_REGULATOR_ONE(_name) PMBUS_REGULATOR_STEP_ONE(_name, 0, 0, 0)
/* Function declarations */
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index cfeba2e4c5c3..be6d05def115 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -32,6 +32,13 @@
#define PMBUS_ATTR_ALLOC_SIZE 32
#define PMBUS_NAME_SIZE 24
+/*
+ * The type of operation used for picking the delay between
+ * successive pmbus operations.
+ */
+#define PMBUS_OP_WRITE BIT(0)
+#define PMBUS_OP_PAGE_CHANGE BIT(1)
+
static int wp = -1;
module_param(wp, int, 0444);
@@ -113,8 +120,8 @@ struct pmbus_data {
int vout_low[PMBUS_PAGES]; /* voltage low margin */
int vout_high[PMBUS_PAGES]; /* voltage high margin */
- ktime_t write_time; /* Last SMBUS write timestamp */
- ktime_t access_time; /* Last SMBUS access timestamp */
+
+ ktime_t next_access_backoff; /* Wait until at least this time */
};
struct pmbus_debugfs_entry {
@@ -169,32 +176,26 @@ EXPORT_SYMBOL_NS_GPL(pmbus_set_update, "PMBUS");
static void pmbus_wait(struct i2c_client *client)
{
struct pmbus_data *data = i2c_get_clientdata(client);
- const struct pmbus_driver_info *info = data->info;
- s64 delta;
+ s64 delay = ktime_us_delta(data->next_access_backoff, ktime_get());
- if (info->access_delay) {
- delta = ktime_us_delta(ktime_get(), data->access_time);
-
- if (delta < info->access_delay)
- fsleep(info->access_delay - delta);
- } else if (info->write_delay) {
- delta = ktime_us_delta(ktime_get(), data->write_time);
-
- if (delta < info->write_delay)
- fsleep(info->write_delay - delta);
- }
+ if (delay > 0)
+ fsleep(delay);
}
-/* Sets the last accessed timestamp for pmbus_wait */
-static void pmbus_update_ts(struct i2c_client *client, bool write_op)
+/* Sets the last operation timestamp for pmbus_wait */
+static void pmbus_update_ts(struct i2c_client *client, int op)
{
struct pmbus_data *data = i2c_get_clientdata(client);
const struct pmbus_driver_info *info = data->info;
+ int delay = info->access_delay;
+
+ if (op & PMBUS_OP_WRITE)
+ delay = max(delay, info->write_delay);
+ if (op & PMBUS_OP_PAGE_CHANGE)
+ delay = max(delay, info->page_change_delay);
- if (info->access_delay)
- data->access_time = ktime_get();
- else if (info->write_delay && write_op)
- data->write_time = ktime_get();
+ if (delay > 0)
+ data->next_access_backoff = ktime_add_us(ktime_get(), delay);
}
int pmbus_set_page(struct i2c_client *client, int page, int phase)
@@ -209,13 +210,13 @@ int pmbus_set_page(struct i2c_client *client, int page, int phase)
data->info->pages > 1 && page != data->currpage) {
pmbus_wait(client);
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE | PMBUS_OP_PAGE_CHANGE);
if (rv < 0)
return rv;
pmbus_wait(client);
rv = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
if (rv < 0)
return rv;
@@ -229,7 +230,7 @@ int pmbus_set_page(struct i2c_client *client, int page, int phase)
pmbus_wait(client);
rv = i2c_smbus_write_byte_data(client, PMBUS_PHASE,
phase);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
if (rv)
return rv;
}
@@ -249,7 +250,7 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
pmbus_wait(client);
rv = i2c_smbus_write_byte(client, value);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
return rv;
}
@@ -284,7 +285,7 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
pmbus_wait(client);
rv = i2c_smbus_write_word_data(client, reg, word);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
return rv;
}
@@ -405,7 +406,7 @@ int pmbus_read_word_data(struct i2c_client *client, int page, int phase, u8 reg)
pmbus_wait(client);
rv = i2c_smbus_read_word_data(client, reg);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
return rv;
}
@@ -468,7 +469,7 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
pmbus_wait(client);
rv = i2c_smbus_read_byte_data(client, reg);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
return rv;
}
@@ -484,7 +485,7 @@ int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, u8 value)
pmbus_wait(client);
rv = i2c_smbus_write_byte_data(client, reg, value);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
return rv;
}
@@ -520,7 +521,7 @@ static int pmbus_read_block_data(struct i2c_client *client, int page, u8 reg,
pmbus_wait(client);
rv = i2c_smbus_read_block_data(client, reg, data_buf);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
return rv;
}
@@ -2524,7 +2525,7 @@ static int pmbus_read_coefficients(struct i2c_client *client,
rv = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
I2C_SMBUS_WRITE, PMBUS_COEFFICIENTS,
I2C_SMBUS_BLOCK_PROC_CALL, &data);
- pmbus_update_ts(client, true);
+ pmbus_update_ts(client, PMBUS_OP_WRITE);
if (rv < 0)
return rv;
@@ -2728,7 +2729,7 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
if (!(data->flags & PMBUS_NO_CAPABILITY)) {
pmbus_wait(client);
ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK)) {
if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC))
@@ -2744,13 +2745,13 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
data->read_status = pmbus_read_status_word;
pmbus_wait(client);
ret = i2c_smbus_read_word_data(client, PMBUS_STATUS_WORD);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
if (ret < 0 || ret == 0xffff) {
data->read_status = pmbus_read_status_byte;
pmbus_wait(client);
ret = i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE);
- pmbus_update_ts(client, false);
+ pmbus_update_ts(client, 0);
if (ret < 0 || ret == 0xff) {
dev_err(dev, "PMBus status register not found\n");
diff --git a/drivers/hwmon/pmbus/tda38640.c b/drivers/hwmon/pmbus/tda38640.c
index 07fe58c24485..d902d39f49f4 100644
--- a/drivers/hwmon/pmbus/tda38640.c
+++ b/drivers/hwmon/pmbus/tda38640.c
@@ -15,7 +15,7 @@
#include "pmbus.h"
static const struct regulator_desc __maybe_unused tda38640_reg_desc[] = {
- PMBUS_REGULATOR_ONE("vout"),
+ PMBUS_REGULATOR_ONE_NODE("vout"),
};
struct tda38640_data {
diff --git a/drivers/hwmon/pmbus/tps25990.c b/drivers/hwmon/pmbus/tps25990.c
index 0d2655e69549..c13edd7e1abf 100644
--- a/drivers/hwmon/pmbus/tps25990.c
+++ b/drivers/hwmon/pmbus/tps25990.c
@@ -333,7 +333,7 @@ static int tps25990_write_byte_data(struct i2c_client *client,
#if IS_ENABLED(CONFIG_SENSORS_TPS25990_REGULATOR)
static const struct regulator_desc tps25990_reg_desc[] = {
- PMBUS_REGULATOR_ONE("vout"),
+ PMBUS_REGULATOR_ONE_NODE("vout"),
};
#endif
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 9b0eadc81a2e..2bc8cccb01fd 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -212,8 +212,8 @@ static int ucd9000_gpio_get(struct gpio_chip *gc, unsigned int offset)
return !!(ret & UCD9000_GPIO_CONFIG_STATUS);
}
-static void ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static int ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct i2c_client *client = gpiochip_get_data(gc);
int ret;
@@ -222,17 +222,17 @@ static void ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
if (ret < 0) {
dev_dbg(&client->dev, "failed to read GPIO %d config: %d\n",
offset, ret);
- return;
+ return ret;
}
if (value) {
if (ret & UCD9000_GPIO_CONFIG_STATUS)
- return;
+ return 0;
ret |= UCD9000_GPIO_CONFIG_STATUS;
} else {
if (!(ret & UCD9000_GPIO_CONFIG_STATUS))
- return;
+ return 0;
ret &= ~UCD9000_GPIO_CONFIG_STATUS;
}
@@ -244,7 +244,7 @@ static void ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
if (ret < 0) {
dev_dbg(&client->dev, "Failed to write GPIO %d config: %d\n",
offset, ret);
- return;
+ return ret;
}
ret &= ~UCD9000_GPIO_CONFIG_ENABLE;
@@ -253,6 +253,8 @@ static void ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
if (ret < 0)
dev_dbg(&client->dev, "Failed to write GPIO %d config: %d\n",
offset, ret);
+
+ return ret;
}
static int ucd9000_gpio_get_direction(struct gpio_chip *gc,
@@ -362,7 +364,7 @@ static void ucd9000_probe_gpio(struct i2c_client *client,
data->gpio.direction_input = ucd9000_gpio_direction_input;
data->gpio.direction_output = ucd9000_gpio_direction_output;
data->gpio.get = ucd9000_gpio_get;
- data->gpio.set = ucd9000_gpio_set;
+ data->gpio.set_rv = ucd9000_gpio_set;
data->gpio.can_sleep = true;
data->gpio.base = -1;
data->gpio.parent = &client->dev;
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index d506a5e7e033..d0fe53451bdf 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -78,7 +78,7 @@ static irqreturn_t pulse_handler(int irq, void *dev_id)
static void sample_timer(struct timer_list *t)
{
- struct pwm_fan_ctx *ctx = from_timer(ctx, t, rpm_timer);
+ struct pwm_fan_ctx *ctx = timer_container_of(ctx, t, rpm_timer);
unsigned int delta = ktime_ms_delta(ktime_get(), ctx->sample_start);
int i;
@@ -620,8 +620,8 @@ static int pwm_fan_probe(struct platform_device *pdev)
if (tach->irq == -EPROBE_DEFER)
return tach->irq;
if (tach->irq > 0) {
- ret = devm_request_irq(dev, tach->irq, pulse_handler, 0,
- pdev->name, tach);
+ ret = devm_request_irq(dev, tach->irq, pulse_handler,
+ IRQF_NO_THREAD, pdev->name, tach);
if (ret) {
dev_err(dev,
"Failed to request interrupt: %d\n",
diff --git a/drivers/hwmon/qnap-mcu-hwmon.c b/drivers/hwmon/qnap-mcu-hwmon.c
index 29057514739c..e86e64c4d391 100644
--- a/drivers/hwmon/qnap-mcu-hwmon.c
+++ b/drivers/hwmon/qnap-mcu-hwmon.c
@@ -6,7 +6,6 @@
* Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de>
*/
-#include <linux/fwnode.h>
#include <linux/hwmon.h>
#include <linux/mfd/qnap-mcu.h>
#include <linux/module.h>
diff --git a/drivers/hwmon/sbrmi.c b/drivers/hwmon/sbrmi.c
deleted file mode 100644
index d48d8e5460ff..000000000000
--- a/drivers/hwmon/sbrmi.c
+++ /dev/null
@@ -1,357 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * sbrmi.c - hwmon driver for a SB-RMI mailbox
- * compliant AMD SoC device.
- *
- * Copyright (C) 2020-2021 Advanced Micro Devices, Inc.
- */
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/hwmon.h>
-#include <linux/i2c.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-
-/* Do not allow setting negative power limit */
-#define SBRMI_PWR_MIN 0
-/* Mask for Status Register bit[1] */
-#define SW_ALERT_MASK 0x2
-
-/* Software Interrupt for triggering */
-#define START_CMD 0x80
-#define TRIGGER_MAILBOX 0x01
-
-/*
- * SB-RMI supports soft mailbox service request to MP1 (power management
- * firmware) through SBRMI inbound/outbound message registers.
- * SB-RMI message IDs
- */
-enum sbrmi_msg_id {
- SBRMI_READ_PKG_PWR_CONSUMPTION = 0x1,
- SBRMI_WRITE_PKG_PWR_LIMIT,
- SBRMI_READ_PKG_PWR_LIMIT,
- SBRMI_READ_PKG_MAX_PWR_LIMIT,
-};
-
-/* SB-RMI registers */
-enum sbrmi_reg {
- SBRMI_CTRL = 0x01,
- SBRMI_STATUS,
- SBRMI_OUTBNDMSG0 = 0x30,
- SBRMI_OUTBNDMSG1,
- SBRMI_OUTBNDMSG2,
- SBRMI_OUTBNDMSG3,
- SBRMI_OUTBNDMSG4,
- SBRMI_OUTBNDMSG5,
- SBRMI_OUTBNDMSG6,
- SBRMI_OUTBNDMSG7,
- SBRMI_INBNDMSG0,
- SBRMI_INBNDMSG1,
- SBRMI_INBNDMSG2,
- SBRMI_INBNDMSG3,
- SBRMI_INBNDMSG4,
- SBRMI_INBNDMSG5,
- SBRMI_INBNDMSG6,
- SBRMI_INBNDMSG7,
- SBRMI_SW_INTERRUPT,
-};
-
-/* Each client has this additional data */
-struct sbrmi_data {
- struct i2c_client *client;
- struct mutex lock;
- u32 pwr_limit_max;
-};
-
-struct sbrmi_mailbox_msg {
- u8 cmd;
- bool read;
- u32 data_in;
- u32 data_out;
-};
-
-static int sbrmi_enable_alert(struct i2c_client *client)
-{
- int ctrl;
-
- /*
- * Enable the SB-RMI Software alert status
- * by writing 0 to bit 4 of Control register(0x1)
- */
- ctrl = i2c_smbus_read_byte_data(client, SBRMI_CTRL);
- if (ctrl < 0)
- return ctrl;
-
- if (ctrl & 0x10) {
- ctrl &= ~0x10;
- return i2c_smbus_write_byte_data(client,
- SBRMI_CTRL, ctrl);
- }
-
- return 0;
-}
-
-static int rmi_mailbox_xfer(struct sbrmi_data *data,
- struct sbrmi_mailbox_msg *msg)
-{
- int i, ret, retry = 10;
- int sw_status;
- u8 byte;
-
- mutex_lock(&data->lock);
-
- /* Indicate firmware a command is to be serviced */
- ret = i2c_smbus_write_byte_data(data->client,
- SBRMI_INBNDMSG7, START_CMD);
- if (ret < 0)
- goto exit_unlock;
-
- /* Write the command to SBRMI::InBndMsg_inst0 */
- ret = i2c_smbus_write_byte_data(data->client,
- SBRMI_INBNDMSG0, msg->cmd);
- if (ret < 0)
- goto exit_unlock;
-
- /*
- * For both read and write the initiator (BMC) writes
- * Command Data In[31:0] to SBRMI::InBndMsg_inst[4:1]
- * SBRMI_x3C(MSB):SBRMI_x39(LSB)
- */
- for (i = 0; i < 4; i++) {
- byte = (msg->data_in >> i * 8) & 0xff;
- ret = i2c_smbus_write_byte_data(data->client,
- SBRMI_INBNDMSG1 + i, byte);
- if (ret < 0)
- goto exit_unlock;
- }
-
- /*
- * Write 0x01 to SBRMI::SoftwareInterrupt to notify firmware to
- * perform the requested read or write command
- */
- ret = i2c_smbus_write_byte_data(data->client,
- SBRMI_SW_INTERRUPT, TRIGGER_MAILBOX);
- if (ret < 0)
- goto exit_unlock;
-
- /*
- * Firmware will write SBRMI::Status[SwAlertSts]=1 to generate
- * an ALERT (if enabled) to initiator (BMC) to indicate completion
- * of the requested command
- */
- do {
- sw_status = i2c_smbus_read_byte_data(data->client,
- SBRMI_STATUS);
- if (sw_status < 0) {
- ret = sw_status;
- goto exit_unlock;
- }
- if (sw_status & SW_ALERT_MASK)
- break;
- usleep_range(50, 100);
- } while (retry--);
-
- if (retry < 0) {
- dev_err(&data->client->dev,
- "Firmware fail to indicate command completion\n");
- ret = -EIO;
- goto exit_unlock;
- }
-
- /*
- * For a read operation, the initiator (BMC) reads the firmware
- * response Command Data Out[31:0] from SBRMI::OutBndMsg_inst[4:1]
- * {SBRMI_x34(MSB):SBRMI_x31(LSB)}.
- */
- if (msg->read) {
- for (i = 0; i < 4; i++) {
- ret = i2c_smbus_read_byte_data(data->client,
- SBRMI_OUTBNDMSG1 + i);
- if (ret < 0)
- goto exit_unlock;
- msg->data_out |= ret << i * 8;
- }
- }
-
- /*
- * BMC must write 1'b1 to SBRMI::Status[SwAlertSts] to clear the
- * ALERT to initiator
- */
- ret = i2c_smbus_write_byte_data(data->client, SBRMI_STATUS,
- sw_status | SW_ALERT_MASK);
-
-exit_unlock:
- mutex_unlock(&data->lock);
- return ret;
-}
-
-static int sbrmi_read(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
-{
- struct sbrmi_data *data = dev_get_drvdata(dev);
- struct sbrmi_mailbox_msg msg = { 0 };
- int ret;
-
- if (type != hwmon_power)
- return -EINVAL;
-
- msg.read = true;
- switch (attr) {
- case hwmon_power_input:
- msg.cmd = SBRMI_READ_PKG_PWR_CONSUMPTION;
- ret = rmi_mailbox_xfer(data, &msg);
- break;
- case hwmon_power_cap:
- msg.cmd = SBRMI_READ_PKG_PWR_LIMIT;
- ret = rmi_mailbox_xfer(data, &msg);
- break;
- case hwmon_power_cap_max:
- msg.data_out = data->pwr_limit_max;
- ret = 0;
- break;
- default:
- return -EINVAL;
- }
- if (ret < 0)
- return ret;
- /* hwmon power attributes are in microWatt */
- *val = (long)msg.data_out * 1000;
- return ret;
-}
-
-static int sbrmi_write(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long val)
-{
- struct sbrmi_data *data = dev_get_drvdata(dev);
- struct sbrmi_mailbox_msg msg = { 0 };
-
- if (type != hwmon_power && attr != hwmon_power_cap)
- return -EINVAL;
- /*
- * hwmon power attributes are in microWatt
- * mailbox read/write is in mWatt
- */
- val /= 1000;
-
- val = clamp_val(val, SBRMI_PWR_MIN, data->pwr_limit_max);
-
- msg.cmd = SBRMI_WRITE_PKG_PWR_LIMIT;
- msg.data_in = val;
- msg.read = false;
-
- return rmi_mailbox_xfer(data, &msg);
-}
-
-static umode_t sbrmi_is_visible(const void *data,
- enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- switch (type) {
- case hwmon_power:
- switch (attr) {
- case hwmon_power_input:
- case hwmon_power_cap_max:
- return 0444;
- case hwmon_power_cap:
- return 0644;
- }
- break;
- default:
- break;
- }
- return 0;
-}
-
-static const struct hwmon_channel_info * const sbrmi_info[] = {
- HWMON_CHANNEL_INFO(power,
- HWMON_P_INPUT | HWMON_P_CAP | HWMON_P_CAP_MAX),
- NULL
-};
-
-static const struct hwmon_ops sbrmi_hwmon_ops = {
- .is_visible = sbrmi_is_visible,
- .read = sbrmi_read,
- .write = sbrmi_write,
-};
-
-static const struct hwmon_chip_info sbrmi_chip_info = {
- .ops = &sbrmi_hwmon_ops,
- .info = sbrmi_info,
-};
-
-static int sbrmi_get_max_pwr_limit(struct sbrmi_data *data)
-{
- struct sbrmi_mailbox_msg msg = { 0 };
- int ret;
-
- msg.cmd = SBRMI_READ_PKG_MAX_PWR_LIMIT;
- msg.read = true;
- ret = rmi_mailbox_xfer(data, &msg);
- if (ret < 0)
- return ret;
- data->pwr_limit_max = msg.data_out;
-
- return ret;
-}
-
-static int sbrmi_probe(struct i2c_client *client)
-{
- struct device *dev = &client->dev;
- struct device *hwmon_dev;
- struct sbrmi_data *data;
- int ret;
-
- data = devm_kzalloc(dev, sizeof(struct sbrmi_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->client = client;
- mutex_init(&data->lock);
-
- /* Enable alert for SB-RMI sequence */
- ret = sbrmi_enable_alert(client);
- if (ret < 0)
- return ret;
-
- /* Cache maximum power limit */
- ret = sbrmi_get_max_pwr_limit(data);
- if (ret < 0)
- return ret;
-
- hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data,
- &sbrmi_chip_info, NULL);
-
- return PTR_ERR_OR_ZERO(hwmon_dev);
-}
-
-static const struct i2c_device_id sbrmi_id[] = {
- {"sbrmi"},
- {}
-};
-MODULE_DEVICE_TABLE(i2c, sbrmi_id);
-
-static const struct of_device_id __maybe_unused sbrmi_of_match[] = {
- {
- .compatible = "amd,sbrmi",
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, sbrmi_of_match);
-
-static struct i2c_driver sbrmi_driver = {
- .driver = {
- .name = "sbrmi",
- .of_match_table = of_match_ptr(sbrmi_of_match),
- },
- .probe = sbrmi_probe,
- .id_table = sbrmi_id,
-};
-
-module_i2c_driver(sbrmi_driver);
-
-MODULE_AUTHOR("Akshay Gupta <akshay.gupta@amd.com>");
-MODULE_DESCRIPTION("Hwmon driver for AMD SB-RMI emulated sensor");
-MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/spd5118.c b/drivers/hwmon/spd5118.c
index 358152868d96..5da44571b6a0 100644
--- a/drivers/hwmon/spd5118.c
+++ b/drivers/hwmon/spd5118.c
@@ -66,6 +66,9 @@ static const unsigned short normal_i2c[] = {
#define SPD5118_EEPROM_BASE 0x80
#define SPD5118_EEPROM_SIZE (SPD5118_PAGE_SIZE * SPD5118_NUM_PAGES)
+#define PAGE_ADDR0(page) (((page) & BIT(0)) << 6)
+#define PAGE_ADDR1_4(page) (((page) & GENMASK(4, 1)) >> 1)
+
/* Temperature unit in millicelsius */
#define SPD5118_TEMP_UNIT (MILLIDEGREE_PER_DEGREE / 4)
/* Representable temperature range in millicelsius */
@@ -75,6 +78,7 @@ static const unsigned short normal_i2c[] = {
struct spd5118_data {
struct regmap *regmap;
struct mutex nvmem_lock;
+ bool is_16bit;
};
/* hwmon */
@@ -305,51 +309,6 @@ static bool spd5118_vendor_valid(u8 bank, u8 id)
return id && id != 0x7f;
}
-/* Return 0 if detection is successful, -ENODEV otherwise */
-static int spd5118_detect(struct i2c_client *client, struct i2c_board_info *info)
-{
- struct i2c_adapter *adapter = client->adapter;
- int regval;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
-
- regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
- if (regval != 0x5118)
- return -ENODEV;
-
- regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
- if (regval < 0 || !spd5118_vendor_valid(regval & 0xff, regval >> 8))
- return -ENODEV;
-
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_CAPABILITY);
- if (regval < 0)
- return -ENODEV;
- if (!(regval & SPD5118_CAP_TS_SUPPORT) || (regval & 0xfc))
- return -ENODEV;
-
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CLR);
- if (regval)
- return -ENODEV;
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_ERROR_CLR);
- if (regval)
- return -ENODEV;
-
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_REVISION);
- if (regval < 0 || (regval & 0xc1))
- return -ENODEV;
-
- regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CONFIG);
- if (regval < 0)
- return -ENODEV;
- if (regval & ~SPD5118_TS_DISABLE)
- return -ENODEV;
-
- strscpy(info->type, "spd5118", I2C_NAME_SIZE);
- return 0;
-}
-
static const struct hwmon_channel_info *spd5118_info[] = {
HWMON_CHANNEL_INFO(chip,
HWMON_C_REGISTER_TZ),
@@ -376,11 +335,12 @@ static const struct hwmon_chip_info spd5118_chip_info = {
/* nvmem */
-static ssize_t spd5118_nvmem_read_page(struct regmap *regmap, char *buf,
+static ssize_t spd5118_nvmem_read_page(struct spd5118_data *data, char *buf,
unsigned int offset, size_t count)
{
- int addr = (offset >> SPD5118_PAGE_SHIFT) * 0x100 + SPD5118_EEPROM_BASE;
- int err;
+ int page = offset >> SPD5118_PAGE_SHIFT;
+ struct regmap *regmap = data->regmap;
+ int err, addr;
offset &= SPD5118_PAGE_MASK;
@@ -388,6 +348,12 @@ static ssize_t spd5118_nvmem_read_page(struct regmap *regmap, char *buf,
if (offset + count > SPD5118_PAGE_SIZE)
count = SPD5118_PAGE_SIZE - offset;
+ if (data->is_16bit) {
+ addr = SPD5118_EEPROM_BASE | PAGE_ADDR0(page) |
+ (PAGE_ADDR1_4(page) << 8);
+ } else {
+ addr = page * 0x100 + SPD5118_EEPROM_BASE;
+ }
err = regmap_bulk_read(regmap, addr + offset, buf, count);
if (err)
return err;
@@ -410,7 +376,7 @@ static int spd5118_nvmem_read(void *priv, unsigned int off, void *val, size_t co
mutex_lock(&data->nvmem_lock);
while (count) {
- ret = spd5118_nvmem_read_page(data->regmap, buf, off, count);
+ ret = spd5118_nvmem_read_page(data, buf, off, count);
if (ret < 0) {
mutex_unlock(&data->nvmem_lock);
return ret;
@@ -483,7 +449,7 @@ static bool spd5118_volatile_reg(struct device *dev, unsigned int reg)
}
}
-static const struct regmap_range_cfg spd5118_regmap_range_cfg[] = {
+static const struct regmap_range_cfg spd5118_i2c_regmap_range_cfg[] = {
{
.selector_reg = SPD5118_REG_I2C_LEGACY_MODE,
.selector_mask = SPD5118_LEGACY_PAGE_MASK,
@@ -495,7 +461,7 @@ static const struct regmap_range_cfg spd5118_regmap_range_cfg[] = {
},
};
-static const struct regmap_config spd5118_regmap_config = {
+static const struct regmap_config spd5118_regmap8_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x7ff,
@@ -503,89 +469,76 @@ static const struct regmap_config spd5118_regmap_config = {
.volatile_reg = spd5118_volatile_reg,
.cache_type = REGCACHE_MAPLE,
- .ranges = spd5118_regmap_range_cfg,
- .num_ranges = ARRAY_SIZE(spd5118_regmap_range_cfg),
+ .ranges = spd5118_i2c_regmap_range_cfg,
+ .num_ranges = ARRAY_SIZE(spd5118_i2c_regmap_range_cfg),
};
-static int spd5118_init(struct i2c_client *client)
-{
- struct i2c_adapter *adapter = client->adapter;
- int err, regval, mode;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
+static const struct regmap_config spd5118_regmap16_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0x7ff,
+ .writeable_reg = spd5118_writeable_reg,
+ .volatile_reg = spd5118_volatile_reg,
+ .cache_type = REGCACHE_MAPLE,
+};
- regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
- if (regval < 0 || (regval && regval != 0x5118))
- return -ENODEV;
+static int spd5118_suspend(struct device *dev)
+{
+ struct spd5118_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ u32 regval;
+ int err;
/*
- * If the device type registers return 0, it is possible that the chip
- * has a non-zero page selected and takes the specification literally,
- * i.e. disables access to volatile registers besides the page register
- * if the page is not 0. Try to identify such chips.
+ * Make sure the configuration register in the regmap cache is current
+ * before bypassing it.
*/
- if (!regval) {
- /* Vendor ID registers must also be 0 */
- regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
- if (regval)
- return -ENODEV;
-
- /* The selected page in MR11 must not be 0 */
- mode = i2c_smbus_read_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE);
- if (mode < 0 || (mode & ~SPD5118_LEGACY_MODE_MASK) ||
- !(mode & SPD5118_LEGACY_PAGE_MASK))
- return -ENODEV;
+ err = regmap_read(regmap, SPD5118_REG_TEMP_CONFIG, &regval);
+ if (err < 0)
+ return err;
- err = i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE,
- mode & SPD5118_LEGACY_MODE_ADDR);
- if (err)
- return -ENODEV;
+ regcache_cache_bypass(regmap, true);
+ regmap_update_bits(regmap, SPD5118_REG_TEMP_CONFIG, SPD5118_TS_DISABLE,
+ SPD5118_TS_DISABLE);
+ regcache_cache_bypass(regmap, false);
- /*
- * If the device type registers are still bad after selecting
- * page 0, this is not a SPD5118 device. Restore original
- * legacy mode register value and abort.
- */
- regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
- if (regval != 0x5118) {
- i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE, mode);
- return -ENODEV;
- }
- }
+ regcache_cache_only(regmap, true);
+ regcache_mark_dirty(regmap);
- /* We are reasonably sure that this is really a SPD5118 hub controller */
return 0;
}
-static int spd5118_probe(struct i2c_client *client)
+static int spd5118_resume(struct device *dev)
{
- struct device *dev = &client->dev;
- unsigned int regval, revision, vendor, bank;
+ struct spd5118_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+
+ regcache_cache_only(regmap, false);
+ return regcache_sync(regmap);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(spd5118_pm_ops, spd5118_suspend, spd5118_resume);
+
+static int spd5118_common_probe(struct device *dev, struct regmap *regmap,
+ bool is_16bit)
+{
+ unsigned int capability, revision, vendor, bank;
struct spd5118_data *data;
struct device *hwmon_dev;
- struct regmap *regmap;
int err;
- err = spd5118_init(client);
- if (err)
- return err;
-
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- regmap = devm_regmap_init_i2c(client, &spd5118_regmap_config);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap), "regmap init failed\n");
-
- err = regmap_read(regmap, SPD5118_REG_CAPABILITY, &regval);
+ err = regmap_read(regmap, SPD5118_REG_CAPABILITY, &capability);
if (err)
return err;
- if (!(regval & SPD5118_CAP_TS_SUPPORT))
+ if (!(capability & SPD5118_CAP_TS_SUPPORT))
return -ENODEV;
+ data->is_16bit = is_16bit;
+
err = regmap_read(regmap, SPD5118_REG_REVISION, &revision);
if (err)
return err;
@@ -627,48 +580,176 @@ static int spd5118_probe(struct i2c_client *client)
return 0;
}
-static int spd5118_suspend(struct device *dev)
+/* I2C */
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int spd5118_detect(struct i2c_client *client, struct i2c_board_info *info)
{
- struct spd5118_data *data = dev_get_drvdata(dev);
- struct regmap *regmap = data->regmap;
- u32 regval;
- int err;
+ struct i2c_adapter *adapter = client->adapter;
+ int regval;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
+ if (regval != 0x5118)
+ return -ENODEV;
+
+ regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
+ if (regval < 0 || !spd5118_vendor_valid(regval & 0xff, regval >> 8))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_CAPABILITY);
+ if (regval < 0)
+ return -ENODEV;
+ if (!(regval & SPD5118_CAP_TS_SUPPORT) || (regval & 0xfc))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CLR);
+ if (regval)
+ return -ENODEV;
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_ERROR_CLR);
+ if (regval)
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_REVISION);
+ if (regval < 0 || (regval & 0xc1))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_byte_data(client, SPD5118_REG_TEMP_CONFIG);
+ if (regval < 0)
+ return -ENODEV;
+ if (regval & ~SPD5118_TS_DISABLE)
+ return -ENODEV;
+
+ strscpy(info->type, "spd5118", I2C_NAME_SIZE);
+ return 0;
+}
+
+static int spd5118_i2c_init(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int err, regval, mode;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
+ if (regval < 0 || (regval && regval != 0x5118))
+ return -ENODEV;
/*
- * Make sure the configuration register in the regmap cache is current
- * before bypassing it.
+ * If the device type registers return 0, it is possible that the chip
+ * has a non-zero page selected and takes the specification literally,
+ * i.e. disables access to volatile registers besides the page register
+ * if the page is not 0. The Renesas/ITD SPD5118 Hub Controller is known
+ * to show this behavior. Try to identify such chips.
*/
- err = regmap_read(regmap, SPD5118_REG_TEMP_CONFIG, &regval);
- if (err < 0)
- return err;
+ if (!regval) {
+ /* Vendor ID registers must also be 0 */
+ regval = i2c_smbus_read_word_data(client, SPD5118_REG_VENDOR);
+ if (regval)
+ return -ENODEV;
- regcache_cache_bypass(regmap, true);
- regmap_update_bits(regmap, SPD5118_REG_TEMP_CONFIG, SPD5118_TS_DISABLE,
- SPD5118_TS_DISABLE);
- regcache_cache_bypass(regmap, false);
+ /* The selected page in MR11 must not be 0 */
+ mode = i2c_smbus_read_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE);
+ if (mode < 0 || (mode & ~SPD5118_LEGACY_MODE_MASK) ||
+ !(mode & SPD5118_LEGACY_PAGE_MASK))
+ return -ENODEV;
- regcache_cache_only(regmap, true);
- regcache_mark_dirty(regmap);
+ err = i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE,
+ mode & SPD5118_LEGACY_MODE_ADDR);
+ if (err)
+ return -ENODEV;
+ /*
+ * If the device type registers are still bad after selecting
+ * page 0, this is not a SPD5118 device. Restore original
+ * legacy mode register value and abort.
+ */
+ regval = i2c_smbus_read_word_swapped(client, SPD5118_REG_TYPE);
+ if (regval != 0x5118) {
+ i2c_smbus_write_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE, mode);
+ return -ENODEV;
+ }
+ }
+
+ /* We are reasonably sure that this is really a SPD5118 hub controller */
return 0;
}
-static int spd5118_resume(struct device *dev)
+/*
+ * 16-bit addressing note:
+ *
+ * If I2C_FUNC_I2C is not supported by an I2C adapter driver, regmap uses
+ * SMBus operations as alternative. To simulate a read operation with a 16-bit
+ * address, it writes the address using i2c_smbus_write_byte_data(), followed
+ * by one or more calls to i2c_smbus_read_byte() to read the data.
+ * Per spd5118 standard, a read operation after writing the address must start
+ * with <Sr> (Repeat Start). However, a SMBus read byte operation starts with
+ * <S> (Start). This resets the register address in the spd5118 chip. As result,
+ * i2c_smbus_read_byte() always returns data from register address 0x00.
+ *
+ * A working alternative to access chips with 16-bit register addresses in the
+ * absence of I2C_FUNC_I2C support is not known.
+ *
+ * For this reason, 16-bit addressing can only be supported with I2C if the
+ * adapter supports I2C_FUNC_I2C.
+ *
+ * For I2C, the addressing mode selected by the BIOS must not be changed.
+ * Experiments show that at least some PC BIOS versions will not change the
+ * addressing mode on a soft reboot and end up in setup, claiming that some
+ * configuration change happened. This will happen again after a power cycle,
+ * which does reset the addressing mode. To prevent this from happening,
+ * detect if 16-bit addressing is enabled and always use the currently
+ * configured addressing mode.
+ */
+
+static int spd5118_i2c_probe(struct i2c_client *client)
{
- struct spd5118_data *data = dev_get_drvdata(dev);
- struct regmap *regmap = data->regmap;
+ const struct regmap_config *config;
+ struct device *dev = &client->dev;
+ struct regmap *regmap;
+ int err, mode;
+ bool is_16bit;
- regcache_cache_only(regmap, false);
- return regcache_sync(regmap);
-}
+ err = spd5118_i2c_init(client);
+ if (err)
+ return err;
-static DEFINE_SIMPLE_DEV_PM_OPS(spd5118_pm_ops, spd5118_suspend, spd5118_resume);
+ mode = i2c_smbus_read_byte_data(client, SPD5118_REG_I2C_LEGACY_MODE);
+ if (mode < 0)
+ return mode;
+
+ is_16bit = mode & SPD5118_LEGACY_MODE_ADDR;
+ if (is_16bit) {
+ /*
+ * See 16-bit addressing note above explaining why it is
+ * necessary to check for I2C_FUNC_I2C support here.
+ */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "Adapter does not support 16-bit register addresses\n");
+ return -ENODEV;
+ }
+ config = &spd5118_regmap16_config;
+ } else {
+ config = &spd5118_regmap8_config;
+ }
+
+ regmap = devm_regmap_init_i2c(client, config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "regmap init failed\n");
+
+ return spd5118_common_probe(dev, regmap, is_16bit);
+}
-static const struct i2c_device_id spd5118_id[] = {
+static const struct i2c_device_id spd5118_i2c_id[] = {
{ "spd5118" },
{ }
};
-MODULE_DEVICE_TABLE(i2c, spd5118_id);
+MODULE_DEVICE_TABLE(i2c, spd5118_i2c_id);
static const struct of_device_id spd5118_of_ids[] = {
{ .compatible = "jedec,spd5118", },
@@ -676,20 +757,20 @@ static const struct of_device_id spd5118_of_ids[] = {
};
MODULE_DEVICE_TABLE(of, spd5118_of_ids);
-static struct i2c_driver spd5118_driver = {
+static struct i2c_driver spd5118_i2c_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "spd5118",
.of_match_table = spd5118_of_ids,
.pm = pm_sleep_ptr(&spd5118_pm_ops),
},
- .probe = spd5118_probe,
- .id_table = spd5118_id,
+ .probe = spd5118_i2c_probe,
+ .id_table = spd5118_i2c_id,
.detect = IS_ENABLED(CONFIG_SENSORS_SPD5118_DETECT) ? spd5118_detect : NULL,
.address_list = IS_ENABLED(CONFIG_SENSORS_SPD5118_DETECT) ? normal_i2c : NULL,
};
-module_i2c_driver(spd5118_driver);
+module_i2c_driver(spd5118_i2c_driver);
MODULE_AUTHOR("René Rebe <rene@exactcode.de>");
MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 8af44a33055f..a02daa496c9c 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/of.h>
#define DRIVER_NAME "tmp102"
@@ -204,6 +205,10 @@ static int tmp102_probe(struct i2c_client *client)
return -ENODEV;
}
+ err = devm_regulator_get_enable_optional(dev, "vcc");
+ if (err < 0 && err != -ENODEV)
+ return dev_err_probe(dev, err, "Failed to enable regulator\n");
+
tmp102 = devm_kzalloc(dev, sizeof(*tmp102), GFP_KERNEL);
if (!tmp102)
return -ENOMEM;
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 2cdbd5f107a2..11c5d80428cd 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -103,8 +103,6 @@ struct xgene_hwmon_dev {
struct device *hwmon_dev;
bool temp_critical_alarm;
- phys_addr_t comm_base_addr;
- void *pcc_comm_addr;
unsigned int usecs_lat;
};
@@ -125,7 +123,8 @@ static u16 xgene_word_tst_and_clr(u16 *addr, u16 mask)
static int xgene_hwmon_pcc_rd(struct xgene_hwmon_dev *ctx, u32 *msg)
{
- struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ ctx->pcc_chan->shmem;
u32 *ptr = (void *)(generic_comm_base + 1);
int rc, i;
u16 val;
@@ -523,7 +522,8 @@ static void xgene_hwmon_rx_cb(struct mbox_client *cl, void *msg)
static void xgene_hwmon_pcc_rx_cb(struct mbox_client *cl, void *msg)
{
struct xgene_hwmon_dev *ctx = to_xgene_hwmon_dev(cl);
- struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ ctx->pcc_chan->shmem;
struct slimpro_resp_msg amsg;
/*
@@ -649,7 +649,6 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
} else {
struct pcc_mbox_chan *pcc_chan;
const struct acpi_device_id *acpi_id;
- int version;
acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
&pdev->dev);
@@ -658,8 +657,6 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
goto out_mbox_free;
}
- version = (int)acpi_id->driver_data;
-
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx)) {
dev_err(&pdev->dev, "no pcc-channel property\n");
@@ -686,34 +683,6 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
}
/*
- * This is the shared communication region
- * for the OS and Platform to communicate over.
- */
- ctx->comm_base_addr = pcc_chan->shmem_base_addr;
- if (ctx->comm_base_addr) {
- if (version == XGENE_HWMON_V2)
- ctx->pcc_comm_addr = (void __force *)devm_ioremap(&pdev->dev,
- ctx->comm_base_addr,
- pcc_chan->shmem_size);
- else
- ctx->pcc_comm_addr = devm_memremap(&pdev->dev,
- ctx->comm_base_addr,
- pcc_chan->shmem_size,
- MEMREMAP_WB);
- } else {
- dev_err(&pdev->dev, "Failed to get PCC comm region\n");
- rc = -ENODEV;
- goto out;
- }
-
- if (IS_ERR_OR_NULL(ctx->pcc_comm_addr)) {
- dev_err(&pdev->dev,
- "Failed to ioremap PCC comm region\n");
- rc = -ENOMEM;
- goto out;
- }
-
- /*
* pcc_chan->latency is just a Nominal value. In reality
* the remote processor could be much slower to reply.
* So add an arbitrary amount of wait on top of Nominal.
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index ecd7086a5b83..f064e3d172b3 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -259,4 +259,13 @@ config CORESIGHT_DUMMY
To compile this driver as a module, choose M here: the module will be
called coresight-dummy.
+
+config CORESIGHT_KUNIT_TESTS
+ tristate "Enable Coresight unit tests"
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable Coresight unit tests. Only useful for development and not
+ intended for production.
+
endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 8e62c3150aeb..4e7cc3c5bf99 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -22,6 +22,8 @@ condflags := \
$(call cc-option, -Wstringop-truncation)
subdir-ccflags-y += $(condflags)
+CFLAGS_coresight-stm.o := -D__DISABLE_TRACE_MMIO__
+
obj-$(CONFIG_CORESIGHT) += coresight.o
coresight-y := coresight-core.o coresight-etm-perf.o coresight-platform.o \
coresight-sysfs.o coresight-syscfg.o coresight-config.o \
@@ -53,3 +55,4 @@ obj-$(CONFIG_ULTRASOC_SMB) += ultrasoc-smb.o
obj-$(CONFIG_CORESIGHT_DUMMY) += coresight-dummy.o
obj-$(CONFIG_CORESIGHT_CTCU) += coresight-ctcu.o
coresight-ctcu-y := coresight-ctcu-core.o
+obj-$(CONFIG_CORESIGHT_KUNIT_TESTS) += coresight-kunit-tests.o
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index fa170c966bc3..5058432233da 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -113,9 +113,8 @@ typedef u64 cate_t;
* containing the data page pointer for @offset. If @daddrp is not NULL,
* @daddrp points the DMA address of the beginning of the table.
*/
-static inline cate_t *catu_get_table(struct tmc_sg_table *catu_table,
- unsigned long offset,
- dma_addr_t *daddrp)
+static cate_t *catu_get_table(struct tmc_sg_table *catu_table, unsigned long offset,
+ dma_addr_t *daddrp)
{
unsigned long buf_size = tmc_sg_table_buf_size(catu_table);
unsigned int table_nr, pg_idx, pg_offset;
@@ -165,12 +164,12 @@ static void catu_dump_table(struct tmc_sg_table *catu_table)
}
#else
-static inline void catu_dump_table(struct tmc_sg_table *catu_table)
+static void catu_dump_table(struct tmc_sg_table *catu_table)
{
}
#endif
-static inline cate_t catu_make_entry(dma_addr_t addr)
+static cate_t catu_make_entry(dma_addr_t addr)
{
return addr ? CATU_VALID_ENTRY(addr) : 0;
}
@@ -390,7 +389,7 @@ static const struct attribute_group *catu_groups[] = {
};
-static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
+static int catu_wait_for_ready(struct catu_drvdata *drvdata)
{
struct csdev_access *csa = &drvdata->csdev->access;
@@ -458,12 +457,17 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, enum cs_mode cs_mode,
static int catu_enable(struct coresight_device *csdev, enum cs_mode mode,
void *data)
{
- int rc;
+ int rc = 0;
struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
- CS_UNLOCK(catu_drvdata->base);
- rc = catu_enable_hw(catu_drvdata, mode, data);
- CS_LOCK(catu_drvdata->base);
+ guard(raw_spinlock_irqsave)(&catu_drvdata->spinlock);
+ if (csdev->refcnt == 0) {
+ CS_UNLOCK(catu_drvdata->base);
+ rc = catu_enable_hw(catu_drvdata, mode, data);
+ CS_LOCK(catu_drvdata->base);
+ }
+ if (!rc)
+ csdev->refcnt++;
return rc;
}
@@ -486,12 +490,15 @@ static int catu_disable_hw(struct catu_drvdata *drvdata)
static int catu_disable(struct coresight_device *csdev, void *__unused)
{
- int rc;
+ int rc = 0;
struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
- CS_UNLOCK(catu_drvdata->base);
- rc = catu_disable_hw(catu_drvdata);
- CS_LOCK(catu_drvdata->base);
+ guard(raw_spinlock_irqsave)(&catu_drvdata->spinlock);
+ if (--csdev->refcnt == 0) {
+ CS_UNLOCK(catu_drvdata->base);
+ rc = catu_disable_hw(catu_drvdata);
+ CS_LOCK(catu_drvdata->base);
+ }
return rc;
}
@@ -550,6 +557,7 @@ static int __catu_probe(struct device *dev, struct resource *res)
dev->platform_data = pdata;
drvdata->base = base;
+ raw_spin_lock_init(&drvdata->spinlock);
catu_desc.access = CSDEV_ACCESS_IOMEM(base);
catu_desc.pdata = pdata;
catu_desc.dev = dev;
@@ -558,6 +566,7 @@ static int __catu_probe(struct device *dev, struct resource *res)
catu_desc.subtype.helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_CATU;
catu_desc.ops = &catu_ops;
+ coresight_clear_self_claim_tag(&catu_desc.access);
drvdata->csdev = coresight_register(&catu_desc);
if (IS_ERR(drvdata->csdev))
ret = PTR_ERR(drvdata->csdev);
@@ -702,7 +711,7 @@ static int __init catu_init(void)
{
int ret;
- ret = coresight_init_driver("catu", &catu_driver, &catu_platform_driver);
+ ret = coresight_init_driver("catu", &catu_driver, &catu_platform_driver, THIS_MODULE);
tmc_etr_set_catu_ops(&etr_catu_buf_ops);
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
index 141feac1c14b..755776cd19c5 100644
--- a/drivers/hwtracing/coresight/coresight-catu.h
+++ b/drivers/hwtracing/coresight/coresight-catu.h
@@ -65,6 +65,7 @@ struct catu_drvdata {
void __iomem *base;
struct coresight_device *csdev;
int irq;
+ raw_spinlock_t spinlock;
};
#define CATU_REG32(name, offset) \
diff --git a/drivers/hwtracing/coresight/coresight-config.h b/drivers/hwtracing/coresight/coresight-config.h
index b9ebc9fcfb7f..90fd937d3bd8 100644
--- a/drivers/hwtracing/coresight/coresight-config.h
+++ b/drivers/hwtracing/coresight/coresight-config.h
@@ -228,7 +228,7 @@ struct cscfg_feature_csdev {
* @feats_csdev:references to the device features to enable.
*/
struct cscfg_config_csdev {
- const struct cscfg_config_desc *config_desc;
+ struct cscfg_config_desc *config_desc;
struct coresight_device *csdev;
bool enabled;
struct list_head node;
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index fb43ef6a3b1f..fa758cc21827 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -129,34 +129,36 @@ coresight_find_out_connection(struct coresight_device *csdev,
return ERR_PTR(-ENODEV);
}
-static inline u32 coresight_read_claim_tags(struct coresight_device *csdev)
+static u32 coresight_read_claim_tags_unlocked(struct coresight_device *csdev)
{
- return csdev_access_relaxed_read32(&csdev->access, CORESIGHT_CLAIMCLR);
+ return FIELD_GET(CORESIGHT_CLAIM_MASK,
+ csdev_access_relaxed_read32(&csdev->access, CORESIGHT_CLAIMCLR));
}
-static inline bool coresight_is_claimed_self_hosted(struct coresight_device *csdev)
-{
- return coresight_read_claim_tags(csdev) == CORESIGHT_CLAIM_SELF_HOSTED;
-}
-
-static inline bool coresight_is_claimed_any(struct coresight_device *csdev)
-{
- return coresight_read_claim_tags(csdev) != 0;
-}
-
-static inline void coresight_set_claim_tags(struct coresight_device *csdev)
+static void coresight_set_self_claim_tag_unlocked(struct coresight_device *csdev)
{
csdev_access_relaxed_write32(&csdev->access, CORESIGHT_CLAIM_SELF_HOSTED,
CORESIGHT_CLAIMSET);
isb();
}
-static inline void coresight_clear_claim_tags(struct coresight_device *csdev)
+void coresight_clear_self_claim_tag(struct csdev_access *csa)
{
- csdev_access_relaxed_write32(&csdev->access, CORESIGHT_CLAIM_SELF_HOSTED,
+ if (csa->io_mem)
+ CS_UNLOCK(csa->base);
+ coresight_clear_self_claim_tag_unlocked(csa);
+ if (csa->io_mem)
+ CS_LOCK(csa->base);
+}
+EXPORT_SYMBOL_GPL(coresight_clear_self_claim_tag);
+
+void coresight_clear_self_claim_tag_unlocked(struct csdev_access *csa)
+{
+ csdev_access_relaxed_write32(csa, CORESIGHT_CLAIM_SELF_HOSTED,
CORESIGHT_CLAIMCLR);
isb();
}
+EXPORT_SYMBOL_GPL(coresight_clear_self_claim_tag_unlocked);
/*
* coresight_claim_device_unlocked : Claim the device for self-hosted usage
@@ -170,18 +172,41 @@ static inline void coresight_clear_claim_tags(struct coresight_device *csdev)
*/
int coresight_claim_device_unlocked(struct coresight_device *csdev)
{
+ int tag;
+ struct csdev_access *csa;
+
if (WARN_ON(!csdev))
return -EINVAL;
- if (coresight_is_claimed_any(csdev))
+ csa = &csdev->access;
+ tag = coresight_read_claim_tags_unlocked(csdev);
+
+ switch (tag) {
+ case CORESIGHT_CLAIM_FREE:
+ coresight_set_self_claim_tag_unlocked(csdev);
+ if (coresight_read_claim_tags_unlocked(csdev) == CORESIGHT_CLAIM_SELF_HOSTED)
+ return 0;
+
+ /* There was a race setting the tag, clean up and fail */
+ coresight_clear_self_claim_tag_unlocked(csa);
+ dev_dbg(&csdev->dev, "Busy: Couldn't set self claim tag");
return -EBUSY;
- coresight_set_claim_tags(csdev);
- if (coresight_is_claimed_self_hosted(csdev))
- return 0;
- /* There was a race setting the tags, clean up and fail */
- coresight_clear_claim_tags(csdev);
- return -EBUSY;
+ case CORESIGHT_CLAIM_EXTERNAL:
+ /* External debug is an expected state, so log and report BUSY */
+ dev_dbg(&csdev->dev, "Busy: Claimed by external debugger");
+ return -EBUSY;
+
+ default:
+ case CORESIGHT_CLAIM_SELF_HOSTED:
+ case CORESIGHT_CLAIM_INVALID:
+ /*
+ * Warn here because we clear a lingering self hosted tag
+ * on probe, so other tag combinations are impossible.
+ */
+ dev_err_once(&csdev->dev, "Invalid claim tag state: %x", tag);
+ return -EBUSY;
+ }
}
EXPORT_SYMBOL_GPL(coresight_claim_device_unlocked);
@@ -201,7 +226,7 @@ int coresight_claim_device(struct coresight_device *csdev)
EXPORT_SYMBOL_GPL(coresight_claim_device);
/*
- * coresight_disclaim_device_unlocked : Clear the claim tags for the device.
+ * coresight_disclaim_device_unlocked : Clear the claim tag for the device.
* Called with CS_UNLOCKed for the component.
*/
void coresight_disclaim_device_unlocked(struct coresight_device *csdev)
@@ -210,15 +235,15 @@ void coresight_disclaim_device_unlocked(struct coresight_device *csdev)
if (WARN_ON(!csdev))
return;
- if (coresight_is_claimed_self_hosted(csdev))
- coresight_clear_claim_tags(csdev);
+ if (coresight_read_claim_tags_unlocked(csdev) == CORESIGHT_CLAIM_SELF_HOSTED)
+ coresight_clear_self_claim_tag_unlocked(&csdev->access);
else
/*
* The external agent may have not honoured our claim
* and has manipulated it. Or something else has seriously
* gone wrong in our driver.
*/
- WARN_ON_ONCE(1);
+ dev_WARN_ONCE(&csdev->dev, 1, "External agent took claim tag");
}
EXPORT_SYMBOL_GPL(coresight_disclaim_device_unlocked);
@@ -367,6 +392,28 @@ void coresight_disable_source(struct coresight_device *csdev, void *data)
}
EXPORT_SYMBOL_GPL(coresight_disable_source);
+void coresight_pause_source(struct coresight_device *csdev)
+{
+ if (!coresight_is_percpu_source(csdev))
+ return;
+
+ if (source_ops(csdev)->pause_perf)
+ source_ops(csdev)->pause_perf(csdev);
+}
+EXPORT_SYMBOL_GPL(coresight_pause_source);
+
+int coresight_resume_source(struct coresight_device *csdev)
+{
+ if (!coresight_is_percpu_source(csdev))
+ return -EOPNOTSUPP;
+
+ if (!source_ops(csdev)->resume_perf)
+ return -EOPNOTSUPP;
+
+ return source_ops(csdev)->resume_perf(csdev);
+}
+EXPORT_SYMBOL_GPL(coresight_resume_source);
+
/*
* coresight_disable_path_from : Disable components in the given path beyond
* @nd in the list. If @nd is NULL, all the components, except the SOURCE are
@@ -465,7 +512,7 @@ int coresight_enable_path(struct coresight_path *path, enum cs_mode mode,
/* Enable all helpers adjacent to the path first */
ret = coresight_enable_helpers(csdev, mode, path);
if (ret)
- goto err;
+ goto err_disable_path;
/*
* ETF devices are tricky... They can be a link or a sink,
* depending on how they are configured. If an ETF has been
@@ -486,8 +533,10 @@ int coresight_enable_path(struct coresight_path *path, enum cs_mode mode,
* that need disabling. Disabling the path here
* would mean we could disrupt an existing session.
*/
- if (ret)
+ if (ret) {
+ coresight_disable_helpers(csdev, path);
goto out;
+ }
break;
case CORESIGHT_DEV_TYPE_SOURCE:
/* sources are enabled from either sysFS or Perf */
@@ -497,16 +546,19 @@ int coresight_enable_path(struct coresight_path *path, enum cs_mode mode,
child = list_next_entry(nd, link)->csdev;
ret = coresight_enable_link(csdev, parent, child, source);
if (ret)
- goto err;
+ goto err_disable_helpers;
break;
default:
- goto err;
+ ret = -EINVAL;
+ goto err_disable_helpers;
}
}
out:
return ret;
-err:
+err_disable_helpers:
+ coresight_disable_helpers(csdev, path);
+err_disable_path:
coresight_disable_path_from(path, nd);
goto out;
}
@@ -579,7 +631,7 @@ struct coresight_device *coresight_get_sink_by_id(u32 id)
* Return true in successful case and power up the device.
* Return false when failed to get reference of module.
*/
-static inline bool coresight_get_ref(struct coresight_device *csdev)
+static bool coresight_get_ref(struct coresight_device *csdev)
{
struct device *dev = csdev->dev.parent;
@@ -598,7 +650,7 @@ static inline bool coresight_get_ref(struct coresight_device *csdev)
*
* @csdev: The coresight device to decrement a reference from.
*/
-static inline void coresight_put_ref(struct coresight_device *csdev)
+static void coresight_put_ref(struct coresight_device *csdev)
{
struct device *dev = csdev->dev.parent;
@@ -821,7 +873,7 @@ void coresight_release_path(struct coresight_path *path)
}
/* return true if the device is a suitable type for a default sink */
-static inline bool coresight_is_def_sink_type(struct coresight_device *csdev)
+static bool coresight_is_def_sink_type(struct coresight_device *csdev)
{
/* sink & correct subtype */
if (((csdev->type == CORESIGHT_DEV_TYPE_SINK) ||
@@ -959,6 +1011,7 @@ coresight_find_default_sink(struct coresight_device *csdev)
}
return csdev->def_sink;
}
+EXPORT_SYMBOL_GPL(coresight_find_default_sink);
static int coresight_remove_sink_ref(struct device *dev, void *data)
{
@@ -1385,8 +1438,8 @@ EXPORT_SYMBOL_GPL(coresight_unregister);
*
* Returns the index of the entry, when found. Otherwise, -ENOENT.
*/
-static inline int coresight_search_device_idx(struct coresight_dev_list *dict,
- struct fwnode_handle *fwnode)
+static int coresight_search_device_idx(struct coresight_dev_list *dict,
+ struct fwnode_handle *fwnode)
{
int i;
@@ -1585,17 +1638,17 @@ module_init(coresight_init);
module_exit(coresight_exit);
int coresight_init_driver(const char *drv, struct amba_driver *amba_drv,
- struct platform_driver *pdev_drv)
+ struct platform_driver *pdev_drv, struct module *owner)
{
int ret;
- ret = amba_driver_register(amba_drv);
+ ret = __amba_driver_register(amba_drv, owner);
if (ret) {
pr_err("%s: error registering AMBA driver\n", drv);
return ret;
}
- ret = platform_driver_register(pdev_drv);
+ ret = __platform_driver_register(pdev_drv, owner);
if (!ret)
return 0;
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 342c3aaf414d..a871d997330b 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -774,7 +774,8 @@ static struct platform_driver debug_platform_driver = {
static int __init debug_init(void)
{
- return coresight_init_driver("debug", &debug_driver, &debug_platform_driver);
+ return coresight_init_driver("debug", &debug_driver, &debug_platform_driver,
+ THIS_MODULE);
}
static void __exit debug_exit(void)
diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index 80f6265e3740..8fb30dd73fd2 100644
--- a/drivers/hwtracing/coresight/coresight-cti-core.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -931,6 +931,8 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
cti_desc.ops = &cti_ops;
cti_desc.groups = drvdata->ctidev.con_groups;
cti_desc.dev = dev;
+
+ coresight_clear_self_claim_tag(&cti_desc.access);
drvdata->csdev = coresight_register(&cti_desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
diff --git a/drivers/hwtracing/coresight/coresight-cti.h b/drivers/hwtracing/coresight/coresight-cti.h
index 16e310e7e9d4..8362a47c939c 100644
--- a/drivers/hwtracing/coresight/coresight-cti.h
+++ b/drivers/hwtracing/coresight/coresight-cti.h
@@ -9,7 +9,6 @@
#include <linux/coresight.h>
#include <linux/device.h>
-#include <linux/fwnode.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
@@ -17,6 +16,8 @@
#include "coresight-priv.h"
+struct fwnode_handle;
+
/*
* Device registers
* 0x000 - 0x144: CTI programming and status
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 7948597d483d..d5efb085b30d 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -95,7 +95,7 @@ struct etb_drvdata {
static int etb_set_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle);
-static inline unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
+static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
{
return readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
}
@@ -772,6 +772,8 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
desc.pdata = pdata;
desc.dev = dev;
desc.groups = coresight_etb_groups;
+
+ coresight_clear_self_claim_tag(&desc.access);
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index f4cccd68e625..f1551c08ecb2 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -366,6 +366,18 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
}
/*
+ * If AUX pause feature is enabled but the ETM driver does not
+ * support the operations, clear this CPU from the mask and
+ * continue to next one.
+ */
+ if (event->attr.aux_start_paused &&
+ (!source_ops(csdev)->pause_perf || !source_ops(csdev)->resume_perf)) {
+ dev_err_once(&csdev->dev, "AUX pause is not supported.\n");
+ cpumask_clear_cpu(cpu, mask);
+ continue;
+ }
+
+ /*
* No sink provided - look for a default sink for all the ETMs,
* where this event can be scheduled.
* We allocate the sink specific buffers only once for this
@@ -450,6 +462,15 @@ err:
goto out;
}
+static int etm_event_resume(struct coresight_device *csdev,
+ struct etm_ctxt *ctxt)
+{
+ if (!ctxt->event_data)
+ return 0;
+
+ return coresight_resume_source(csdev);
+}
+
static void etm_event_start(struct perf_event *event, int flags)
{
int cpu = smp_processor_id();
@@ -463,6 +484,14 @@ static void etm_event_start(struct perf_event *event, int flags)
if (!csdev)
goto fail;
+ if (flags & PERF_EF_RESUME) {
+ if (etm_event_resume(csdev, ctxt) < 0) {
+ dev_err(&csdev->dev, "Failed to resume ETM event.\n");
+ goto fail;
+ }
+ return;
+ }
+
/* Have we messed up our tracking ? */
if (WARN_ON(ctxt->event_data))
goto fail;
@@ -545,6 +574,55 @@ fail:
return;
}
+static void etm_event_pause(struct perf_event *event,
+ struct coresight_device *csdev,
+ struct etm_ctxt *ctxt)
+{
+ int cpu = smp_processor_id();
+ struct coresight_device *sink;
+ struct perf_output_handle *handle = &ctxt->handle;
+ struct coresight_path *path;
+ unsigned long size;
+
+ if (!ctxt->event_data)
+ return;
+
+ /* Stop tracer */
+ coresight_pause_source(csdev);
+
+ path = etm_event_cpu_path(ctxt->event_data, cpu);
+ sink = coresight_get_sink(path);
+ if (WARN_ON_ONCE(!sink))
+ return;
+
+ /*
+ * The per CPU sink has own interrupt handling, it might have
+ * race condition with updating buffer on AUX trace pause if
+ * it is invoked from NMI. To avoid the race condition,
+ * disallows updating buffer for the per CPU sink case.
+ */
+ if (coresight_is_percpu_sink(sink))
+ return;
+
+ if (WARN_ON_ONCE(handle->event != event))
+ return;
+
+ if (!sink_ops(sink)->update_buffer)
+ return;
+
+ size = sink_ops(sink)->update_buffer(sink, handle,
+ ctxt->event_data->snk_config);
+ if (READ_ONCE(handle->event)) {
+ if (!size)
+ return;
+
+ perf_aux_output_end(handle, size);
+ perf_aux_output_begin(handle, event);
+ } else {
+ WARN_ON_ONCE(size);
+ }
+}
+
static void etm_event_stop(struct perf_event *event, int mode)
{
int cpu = smp_processor_id();
@@ -555,6 +633,9 @@ static void etm_event_stop(struct perf_event *event, int mode)
struct etm_event_data *event_data;
struct coresight_path *path;
+ if (mode & PERF_EF_PAUSE)
+ return etm_event_pause(event, csdev, ctxt);
+
/*
* If we still have access to the event_data via handle,
* confirm that we haven't messed up the tracking.
@@ -899,7 +980,8 @@ int __init etm_perf_init(void)
int ret;
etm_pmu.capabilities = (PERF_PMU_CAP_EXCLUSIVE |
- PERF_PMU_CAP_ITRACE);
+ PERF_PMU_CAP_ITRACE |
+ PERF_PMU_CAP_AUX_PAUSE);
etm_pmu.attr_groups = etm_pmu_attr_groups;
etm_pmu.task_ctx_nr = perf_sw_context;
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index 171f1384f7c0..1d753cca2943 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -229,7 +229,7 @@ struct etm_config {
* @config: structure holding configuration parameters.
*/
struct etm_drvdata {
- void __iomem *base;
+ struct csdev_access csa;
struct clk *atclk;
struct coresight_device *csdev;
spinlock_t spinlock;
@@ -260,7 +260,7 @@ static inline void etm_writel(struct etm_drvdata *drvdata,
"invalid CP14 access to ETM reg: %#x", off);
}
} else {
- writel_relaxed(val, drvdata->base + off);
+ writel_relaxed(val, drvdata->csa.base + off);
}
}
@@ -274,7 +274,7 @@ static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
"invalid CP14 access to ETM reg: %#x", off);
}
} else {
- val = readl_relaxed(drvdata->base + off);
+ val = readl_relaxed(drvdata->csa.base + off);
}
return val;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 8927bfaf3af2..1c6204e14422 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -86,9 +86,9 @@ static void etm_set_pwrup(struct etm_drvdata *drvdata)
{
u32 etmpdcr;
- etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
+ etmpdcr = readl_relaxed(drvdata->csa.base + ETMPDCR);
etmpdcr |= ETMPDCR_PWD_UP;
- writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
+ writel_relaxed(etmpdcr, drvdata->csa.base + ETMPDCR);
/* Ensure pwrup completes before subsequent cp14 accesses */
mb();
isb();
@@ -101,9 +101,9 @@ static void etm_clr_pwrup(struct etm_drvdata *drvdata)
/* Ensure pending cp14 accesses complete before clearing pwrup */
mb();
isb();
- etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
+ etmpdcr = readl_relaxed(drvdata->csa.base + ETMPDCR);
etmpdcr &= ~ETMPDCR_PWD_UP;
- writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
+ writel_relaxed(etmpdcr, drvdata->csa.base + ETMPDCR);
}
/**
@@ -365,7 +365,7 @@ static int etm_enable_hw(struct etm_drvdata *drvdata)
struct etm_config *config = &drvdata->config;
struct coresight_device *csdev = drvdata->csdev;
- CS_UNLOCK(drvdata->base);
+ CS_UNLOCK(drvdata->csa.base);
rc = coresight_claim_device_unlocked(csdev);
if (rc)
@@ -427,7 +427,7 @@ static int etm_enable_hw(struct etm_drvdata *drvdata)
etm_clr_prog(drvdata);
done:
- CS_LOCK(drvdata->base);
+ CS_LOCK(drvdata->csa.base);
dev_dbg(&drvdata->csdev->dev, "cpu: %d enable smp call done: %d\n",
drvdata->cpu, rc);
@@ -549,7 +549,7 @@ static void etm_disable_hw(void *info)
struct etm_config *config = &drvdata->config;
struct coresight_device *csdev = drvdata->csdev;
- CS_UNLOCK(drvdata->base);
+ CS_UNLOCK(drvdata->csa.base);
etm_set_prog(drvdata);
/* Read back sequencer and counters for post trace analysis */
@@ -561,7 +561,7 @@ static void etm_disable_hw(void *info)
etm_set_pwrdwn(drvdata);
coresight_disclaim_device_unlocked(csdev);
- CS_LOCK(drvdata->base);
+ CS_LOCK(drvdata->csa.base);
dev_dbg(&drvdata->csdev->dev,
"cpu: %d disable smp call done\n", drvdata->cpu);
@@ -574,7 +574,7 @@ static void etm_disable_perf(struct coresight_device *csdev)
if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
return;
- CS_UNLOCK(drvdata->base);
+ CS_UNLOCK(drvdata->csa.base);
/* Setting the prog bit disables tracing immediately */
etm_set_prog(drvdata);
@@ -586,7 +586,7 @@ static void etm_disable_perf(struct coresight_device *csdev)
etm_set_pwrdwn(drvdata);
coresight_disclaim_device_unlocked(csdev);
- CS_LOCK(drvdata->base);
+ CS_LOCK(drvdata->csa.base);
/*
* perf will release trace ids when _free_aux()
@@ -733,7 +733,7 @@ static void etm_init_arch_data(void *info)
/* Make sure all registers are accessible */
etm_os_unlock(drvdata);
- CS_UNLOCK(drvdata->base);
+ CS_UNLOCK(drvdata->csa.base);
/* First dummy read */
(void)etm_readl(drvdata, ETMPDSR);
@@ -764,9 +764,10 @@ static void etm_init_arch_data(void *info)
drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
+ coresight_clear_self_claim_tag_unlocked(&drvdata->csa);
etm_set_pwrdwn(drvdata);
etm_clr_pwrup(drvdata);
- CS_LOCK(drvdata->base);
+ CS_LOCK(drvdata->csa.base);
}
static int __init etm_hp_setup(void)
@@ -827,8 +828,7 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
if (IS_ERR(base))
return PTR_ERR(base);
- drvdata->base = base;
- desc.access = CSDEV_ACCESS_IOMEM(base);
+ desc.access = drvdata->csa = CSDEV_ACCESS_IOMEM(base);
spin_lock_init(&drvdata->spinlock);
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index b9006451f515..762109307b86 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -50,11 +50,11 @@ static ssize_t etmsr_show(struct device *dev,
pm_runtime_get_sync(dev->parent);
spin_lock_irqsave(&drvdata->spinlock, flags);
- CS_UNLOCK(drvdata->base);
+ CS_UNLOCK(drvdata->csa.base);
val = etm_readl(drvdata, ETMSR);
- CS_LOCK(drvdata->base);
+ CS_LOCK(drvdata->csa.base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
pm_runtime_put(dev->parent);
@@ -949,9 +949,9 @@ static ssize_t seq_curr_state_show(struct device *dev,
pm_runtime_get_sync(dev->parent);
spin_lock_irqsave(&drvdata->spinlock, flags);
- CS_UNLOCK(drvdata->base);
+ CS_UNLOCK(drvdata->csa.base);
val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
- CS_LOCK(drvdata->base);
+ CS_LOCK(drvdata->csa.base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
pm_runtime_put(dev->parent);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 2b8f10463840..42e5d37403ad 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -84,7 +84,7 @@ static int etm4_probe_cpu(unsigned int cpu);
* TRCIDR4.NUMPC > 0b0000 .
* TRCSSCSR<n>.PC == 0b1
*/
-static inline bool etm4x_sspcicrn_present(struct etmv4_drvdata *drvdata, int n)
+static bool etm4x_sspcicrn_present(struct etmv4_drvdata *drvdata, int n)
{
return (n < drvdata->nr_ss_cmp) &&
drvdata->nr_pe &&
@@ -185,7 +185,7 @@ static void etm_write_os_lock(struct etmv4_drvdata *drvdata,
isb();
}
-static inline void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata,
+static void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata,
struct csdev_access *csa)
{
WARN_ON(drvdata->cpu != smp_processor_id());
@@ -431,6 +431,44 @@ static int etm4x_wait_status(struct csdev_access *csa, int pos, int val)
return coresight_timeout(csa, TRCSTATR, pos, val);
}
+static int etm4_enable_trace_unit(struct etmv4_drvdata *drvdata)
+{
+ struct coresight_device *csdev = drvdata->csdev;
+ struct device *etm_dev = &csdev->dev;
+ struct csdev_access *csa = &csdev->access;
+
+ /*
+ * ETE mandates that the TRCRSR is written to before
+ * enabling it.
+ */
+ if (etm4x_is_ete(drvdata))
+ etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR);
+
+ etm4x_allow_trace(drvdata);
+ /* Enable the trace unit */
+ etm4x_relaxed_write32(csa, 1, TRCPRGCTLR);
+
+ /* Synchronize the register updates for sysreg access */
+ if (!csa->io_mem)
+ isb();
+
+ /* wait for TRCSTATR.IDLE to go back down to '0' */
+ if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 0)) {
+ dev_err(etm_dev,
+ "timeout while waiting for Idle Trace Status\n");
+ return -ETIME;
+ }
+
+ /*
+ * As recommended by section 4.3.7 ("Synchronization when using the
+ * memory-mapped interface") of ARM IHI 0064D
+ */
+ dsb(sy);
+ isb();
+
+ return 0;
+}
+
static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
{
int i, rc;
@@ -539,33 +577,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, trcpdcr | TRCPDCR_PU, TRCPDCR);
}
- /*
- * ETE mandates that the TRCRSR is written to before
- * enabling it.
- */
- if (etm4x_is_ete(drvdata))
- etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR);
-
- etm4x_allow_trace(drvdata);
- /* Enable the trace unit */
- etm4x_relaxed_write32(csa, 1, TRCPRGCTLR);
-
- /* Synchronize the register updates for sysreg access */
- if (!csa->io_mem)
- isb();
-
- /* wait for TRCSTATR.IDLE to go back down to '0' */
- if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 0))
- dev_err(etm_dev,
- "timeout while waiting for Idle Trace Status\n");
-
- /*
- * As recommended by section 4.3.7 ("Synchronization when using the
- * memory-mapped interface") of ARM IHI 0064D
- */
- dsb(sy);
- isb();
-
+ if (!drvdata->paused)
+ rc = etm4_enable_trace_unit(drvdata);
done:
etm4_cs_lock(drvdata, csa);
@@ -808,6 +821,9 @@ static int etm4_enable_perf(struct coresight_device *csdev,
drvdata->trcid = path->trace_id;
+ /* Populate pause state */
+ drvdata->paused = !!READ_ONCE(event->hw.aux_paused);
+
/* And enable it */
ret = etm4_enable_hw(drvdata);
@@ -834,6 +850,9 @@ static int etm4_enable_sysfs(struct coresight_device *csdev, struct coresight_pa
drvdata->trcid = path->trace_id;
+ /* Tracer will never be paused in sysfs mode */
+ drvdata->paused = false;
+
/*
* Executing etm4_enable_hw on the cpu whose ETM is being enabled
* ensures that register writes occur when cpu is powered.
@@ -884,25 +903,12 @@ static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
return ret;
}
-static void etm4_disable_hw(void *info)
+static void etm4_disable_trace_unit(struct etmv4_drvdata *drvdata)
{
u32 control;
- struct etmv4_drvdata *drvdata = info;
- struct etmv4_config *config = &drvdata->config;
struct coresight_device *csdev = drvdata->csdev;
struct device *etm_dev = &csdev->dev;
struct csdev_access *csa = &csdev->access;
- int i;
-
- etm4_cs_unlock(drvdata, csa);
- etm4_disable_arch_specific(drvdata);
-
- if (!drvdata->skip_power_up) {
- /* power can be removed from the trace unit now */
- control = etm4x_relaxed_read32(csa, TRCPDCR);
- control &= ~TRCPDCR_PU;
- etm4x_relaxed_write32(csa, control, TRCPDCR);
- }
control = etm4x_relaxed_read32(csa, TRCPRGCTLR);
@@ -943,6 +949,28 @@ static void etm4_disable_hw(void *info)
* of ARM IHI 0064H.b.
*/
isb();
+}
+
+static void etm4_disable_hw(void *info)
+{
+ u32 control;
+ struct etmv4_drvdata *drvdata = info;
+ struct etmv4_config *config = &drvdata->config;
+ struct coresight_device *csdev = drvdata->csdev;
+ struct csdev_access *csa = &csdev->access;
+ int i;
+
+ etm4_cs_unlock(drvdata, csa);
+ etm4_disable_arch_specific(drvdata);
+
+ if (!drvdata->skip_power_up) {
+ /* power can be removed from the trace unit now */
+ control = etm4x_relaxed_read32(csa, TRCPDCR);
+ control &= ~TRCPDCR_PU;
+ etm4x_relaxed_write32(csa, control, TRCPDCR);
+ }
+
+ etm4_disable_trace_unit(drvdata);
/* read the status of the single shot comparators */
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
@@ -1020,6 +1048,9 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
raw_spin_unlock(&drvdata->spinlock);
+
+ cscfg_csdev_disable_active_config(csdev);
+
cpus_read_unlock();
/*
@@ -1059,10 +1090,43 @@ static void etm4_disable(struct coresight_device *csdev,
coresight_set_mode(csdev, CS_MODE_DISABLED);
}
+static int etm4_resume_perf(struct coresight_device *csdev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct csdev_access *csa = &csdev->access;
+
+ if (coresight_get_mode(csdev) != CS_MODE_PERF)
+ return -EINVAL;
+
+ etm4_cs_unlock(drvdata, csa);
+ etm4_enable_trace_unit(drvdata);
+ etm4_cs_lock(drvdata, csa);
+
+ drvdata->paused = false;
+ return 0;
+}
+
+static void etm4_pause_perf(struct coresight_device *csdev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct csdev_access *csa = &csdev->access;
+
+ if (coresight_get_mode(csdev) != CS_MODE_PERF)
+ return;
+
+ etm4_cs_unlock(drvdata, csa);
+ etm4_disable_trace_unit(drvdata);
+ etm4_cs_lock(drvdata, csa);
+
+ drvdata->paused = true;
+}
+
static const struct coresight_ops_source etm4_source_ops = {
.cpu_id = etm4_cpu_id,
.enable = etm4_enable,
.disable = etm4_disable,
+ .resume_perf = etm4_resume_perf,
+ .pause_perf = etm4_pause_perf,
};
static const struct coresight_ops etm4_cs_ops = {
@@ -1070,7 +1134,7 @@ static const struct coresight_ops etm4_cs_ops = {
.source_ops = &etm4_source_ops,
};
-static inline bool cpu_supports_sysreg_trace(void)
+static bool cpu_supports_sysreg_trace(void)
{
u64 dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
@@ -1176,7 +1240,7 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
* tracing at the kernel EL and EL0, forcing to use the
* virtual time as the timestamp.
*/
- trfcr = (TRFCR_EL1_TS_VIRTUAL |
+ trfcr = (FIELD_PREP(TRFCR_EL1_TS_MASK, TRFCR_EL1_TS_VIRTUAL) |
TRFCR_EL1_ExTRE |
TRFCR_EL1_E0TRE);
@@ -1372,11 +1436,13 @@ static void etm4_init_arch_data(void *info)
drvdata->nrseqstate = FIELD_GET(TRCIDR5_NUMSEQSTATE_MASK, etmidr5);
/* NUMCNTR, bits[30:28] number of counters available for tracing */
drvdata->nr_cntr = FIELD_GET(TRCIDR5_NUMCNTR_MASK, etmidr5);
+
+ coresight_clear_self_claim_tag_unlocked(csa);
etm4_cs_lock(drvdata, csa);
cpu_detect_trace_filtering(drvdata);
}
-static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config)
+static u32 etm4_get_victlr_access_type(struct etmv4_config *config)
{
return etm4_get_access_type(config) << __bf_shf(TRCVICTLR_EXLEVEL_MASK);
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index fdd0956fecb3..ab251865b893 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -2320,11 +2320,11 @@ static ssize_t ts_source_show(struct device *dev,
goto out;
}
- switch (drvdata->trfcr & TRFCR_EL1_TS_MASK) {
+ val = FIELD_GET(TRFCR_EL1_TS_MASK, drvdata->trfcr);
+ switch (val) {
case TRFCR_EL1_TS_VIRTUAL:
case TRFCR_EL1_TS_GUEST_PHYSICAL:
case TRFCR_EL1_TS_PHYSICAL:
- val = FIELD_GET(TRFCR_EL1_TS_MASK, drvdata->trfcr);
break;
default:
val = -1;
@@ -2440,7 +2440,7 @@ static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset)
return reg.data;
}
-static inline u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
+static u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
{
struct dev_ext_attribute *eattr;
@@ -2464,7 +2464,7 @@ static ssize_t coresight_etm4x_reg_show(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
}
-static inline bool
+static bool
etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
{
switch (offset) {
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index bd7db36ba197..ac649515054d 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -983,6 +983,7 @@ struct etmv4_save_state {
* @state_needs_restore: True when there is context to restore after PM exit
* @skip_power_up: Indicates if an implementation can skip powering up
* the trace unit.
+ * @paused: Indicates if the trace unit is paused.
* @arch_features: Bitmap of arch features of etmv4 devices.
*/
struct etmv4_drvdata {
@@ -1036,6 +1037,7 @@ struct etmv4_drvdata {
struct etmv4_save_state *save_state;
bool state_needs_restore;
bool skip_power_up;
+ bool paused;
DECLARE_BITMAP(arch_features, ETM4_IMPDEF_FEATURE_MAX);
};
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 0541712b2bcb..b1922dbe9292 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -255,6 +255,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
drvdata->base = base;
desc.groups = coresight_funnel_groups;
desc.access = CSDEV_ACCESS_IOMEM(base);
+ coresight_clear_self_claim_tag(&desc.access);
}
dev_set_drvdata(dev, drvdata);
@@ -433,7 +434,8 @@ static struct amba_driver dynamic_funnel_driver = {
static int __init funnel_init(void)
{
- return coresight_init_driver("funnel", &dynamic_funnel_driver, &funnel_driver);
+ return coresight_init_driver("funnel", &dynamic_funnel_driver, &funnel_driver,
+ THIS_MODULE);
}
static void __exit funnel_exit(void)
diff --git a/drivers/hwtracing/coresight/coresight-kunit-tests.c b/drivers/hwtracing/coresight/coresight-kunit-tests.c
new file mode 100644
index 000000000000..c8f361767c45
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-kunit-tests.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/test.h>
+#include <kunit/device.h>
+#include <linux/coresight.h>
+
+#include "coresight-priv.h"
+
+static struct coresight_device *coresight_test_device(struct device *dev)
+{
+ struct coresight_device *csdev = devm_kcalloc(dev, 1,
+ sizeof(struct coresight_device),
+ GFP_KERNEL);
+ csdev->pdata = devm_kcalloc(dev, 1,
+ sizeof(struct coresight_platform_data),
+ GFP_KERNEL);
+ return csdev;
+}
+
+static void test_default_sink(struct kunit *test)
+{
+ /*
+ * Source -> ETF -> ETR -> CATU
+ * ^
+ * | default
+ */
+ struct device *dev = kunit_device_register(test, "coresight_kunit");
+ struct coresight_device *src = coresight_test_device(dev),
+ *etf = coresight_test_device(dev),
+ *etr = coresight_test_device(dev),
+ *catu = coresight_test_device(dev);
+ struct coresight_connection conn = {};
+
+ src->type = CORESIGHT_DEV_TYPE_SOURCE;
+ /*
+ * Don't use CORESIGHT_DEV_SUBTYPE_SOURCE_PROC, that would always return
+ * a TRBE sink if one is registered.
+ */
+ src->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_BUS;
+ etf->type = CORESIGHT_DEV_TYPE_LINKSINK;
+ etf->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+ etr->type = CORESIGHT_DEV_TYPE_SINK;
+ etr->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
+ catu->type = CORESIGHT_DEV_TYPE_HELPER;
+
+ conn.src_dev = src;
+ conn.dest_dev = etf;
+ coresight_add_out_conn(dev, src->pdata, &conn);
+
+ conn.src_dev = etf;
+ conn.dest_dev = etr;
+ coresight_add_out_conn(dev, etf->pdata, &conn);
+
+ conn.src_dev = etr;
+ conn.dest_dev = catu;
+ coresight_add_out_conn(dev, etr->pdata, &conn);
+
+ KUNIT_ASSERT_PTR_EQ(test, coresight_find_default_sink(src), etr);
+}
+
+static struct kunit_case coresight_testcases[] = {
+ KUNIT_CASE(test_default_sink),
+ {}
+};
+
+static struct kunit_suite coresight_test_suite = {
+ .name = "coresight_test_suite",
+ .test_cases = coresight_testcases,
+};
+
+kunit_test_suites(&coresight_test_suite);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("James Clark <james.clark@linaro.org>");
+MODULE_DESCRIPTION("Arm CoreSight KUnit tests");
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index 8192ba3279f0..0db64c5f4995 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -139,7 +139,7 @@ coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode)
EXPORT_SYMBOL_GPL(coresight_find_csdev_by_fwnode);
#ifdef CONFIG_OF
-static inline bool of_coresight_legacy_ep_is_input(struct device_node *ep)
+static bool of_coresight_legacy_ep_is_input(struct device_node *ep)
{
return of_property_read_bool(ep, "slave-mode");
}
@@ -159,7 +159,7 @@ static struct device_node *of_coresight_get_port_parent(struct device_node *ep)
return parent;
}
-static inline struct device_node *
+static struct device_node *
of_coresight_get_output_ports_node(const struct device_node *node)
{
return of_get_child_by_name(node, "out-ports");
@@ -327,14 +327,14 @@ static int of_get_coresight_platform_data(struct device *dev,
return 0;
}
#else
-static inline int
+static int
of_get_coresight_platform_data(struct device *dev,
struct coresight_platform_data *pdata)
{
return -ENOENT;
}
-static inline int of_coresight_get_cpu(struct device *dev)
+static int of_coresight_get_cpu(struct device *dev)
{
return -ENODEV;
}
@@ -356,7 +356,7 @@ static const guid_t coresight_graph_uuid = GUID_INIT(0x3ecbc8b6, 0x1d0e, 0x4fb3,
#define ACPI_CORESIGHT_LINK_SLAVE 0
#define ACPI_CORESIGHT_LINK_MASTER 1
-static inline bool is_acpi_guid(const union acpi_object *obj)
+static bool is_acpi_guid(const union acpi_object *obj)
{
return (obj->type == ACPI_TYPE_BUFFER) && (obj->buffer.length == 16);
}
@@ -365,24 +365,24 @@ static inline bool is_acpi_guid(const union acpi_object *obj)
* acpi_guid_matches - Checks if the given object is a GUID object and
* that it matches the supplied the GUID.
*/
-static inline bool acpi_guid_matches(const union acpi_object *obj,
+static bool acpi_guid_matches(const union acpi_object *obj,
const guid_t *guid)
{
return is_acpi_guid(obj) &&
guid_equal((guid_t *)obj->buffer.pointer, guid);
}
-static inline bool is_acpi_dsd_graph_guid(const union acpi_object *obj)
+static bool is_acpi_dsd_graph_guid(const union acpi_object *obj)
{
return acpi_guid_matches(obj, &acpi_graph_uuid);
}
-static inline bool is_acpi_coresight_graph_guid(const union acpi_object *obj)
+static bool is_acpi_coresight_graph_guid(const union acpi_object *obj)
{
return acpi_guid_matches(obj, &coresight_graph_uuid);
}
-static inline bool is_acpi_coresight_graph(const union acpi_object *obj)
+static bool is_acpi_coresight_graph(const union acpi_object *obj)
{
const union acpi_object *graphid, *guid, *links;
@@ -469,7 +469,7 @@ static inline bool is_acpi_coresight_graph(const union acpi_object *obj)
* }, // End of ACPI Graph Property
* })
*/
-static inline bool acpi_validate_dsd_graph(const union acpi_object *graph)
+static bool acpi_validate_dsd_graph(const union acpi_object *graph)
{
int i, n;
const union acpi_object *rev, *nr_graphs;
@@ -553,7 +553,7 @@ acpi_get_dsd_graph(struct acpi_device *adev, struct acpi_buffer *buf)
return NULL;
}
-static inline bool
+static bool
acpi_validate_coresight_graph(const union acpi_object *cs_graph)
{
int nlinks;
@@ -794,14 +794,14 @@ acpi_get_coresight_platform_data(struct device *dev,
#else
-static inline int
+static int
acpi_get_coresight_platform_data(struct device *dev,
struct coresight_platform_data *pdata)
{
return -ENOENT;
}
-static inline int acpi_coresight_get_cpu(struct device *dev)
+static int acpi_coresight_get_cpu(struct device *dev)
{
return -ENODEV;
}
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 82644aff8d2b..33e22b1ba043 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -35,7 +35,11 @@ extern const struct device_type coresight_dev_type[];
* Coresight device CLAIM protocol.
* See PSCI - ARM DEN 0022D, Section: 6.8.1 Debug and Trace save and restore.
*/
-#define CORESIGHT_CLAIM_SELF_HOSTED BIT(1)
+#define CORESIGHT_CLAIM_MASK GENMASK(1, 0)
+#define CORESIGHT_CLAIM_FREE 0
+#define CORESIGHT_CLAIM_EXTERNAL 1
+#define CORESIGHT_CLAIM_SELF_HOSTED 2
+#define CORESIGHT_CLAIM_INVALID 3
#define TIMEOUT_US 100
#define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb)
@@ -56,10 +60,8 @@ struct cs_off_attribute {
u32 off;
};
-extern ssize_t coresight_simple_show32(struct device *_dev,
- struct device_attribute *attr, char *buf);
-extern ssize_t coresight_simple_show_pair(struct device *_dev,
- struct device_attribute *attr, char *buf);
+ssize_t coresight_simple_show32(struct device *_dev, struct device_attribute *attr, char *buf);
+ssize_t coresight_simple_show_pair(struct device *_dev, struct device_attribute *attr, char *buf);
#define coresight_simple_reg32(name, offset) \
(&((struct cs_off_attribute[]) { \
@@ -156,8 +158,8 @@ void coresight_path_assign_trace_id(struct coresight_path *path,
enum cs_mode mode);
#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM3X)
-extern int etm_readl_cp14(u32 off, unsigned int *val);
-extern int etm_writel_cp14(u32 off, u32 val);
+int etm_readl_cp14(u32 off, unsigned int *val);
+int etm_writel_cp14(u32 off, u32 val);
#else
static inline int etm_readl_cp14(u32 off, unsigned int *val) { return 0; }
static inline int etm_writel_cp14(u32 off, u32 val) { return 0; }
@@ -168,8 +170,8 @@ struct cti_assoc_op {
void (*remove)(struct coresight_device *csdev);
};
-extern void coresight_set_cti_ops(const struct cti_assoc_op *cti_op);
-extern void coresight_remove_cti_ops(void);
+void coresight_set_cti_ops(const struct cti_assoc_op *cti_op);
+void coresight_remove_cti_ops(void);
/*
* Macros and inline functions to handle CoreSight UCI data and driver
@@ -249,5 +251,7 @@ void coresight_add_helper(struct coresight_device *csdev,
void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev);
struct coresight_device *coresight_get_percpu_sink(int cpu);
void coresight_disable_source(struct coresight_device *csdev, void *data);
+void coresight_pause_source(struct coresight_device *csdev);
+int coresight_resume_source(struct coresight_device *csdev);
#endif
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index ee7ee79f6cf7..06efd2b01a0f 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -63,7 +63,7 @@ static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
/*
* replicator_reset : Reset the replicator configuration to sane values.
*/
-static inline void replicator_reset(struct replicator_drvdata *drvdata)
+static void replicator_reset(struct replicator_drvdata *drvdata)
{
if (drvdata->base)
dynamic_replicator_reset(drvdata);
@@ -262,6 +262,7 @@ static int replicator_probe(struct device *dev, struct resource *res)
drvdata->base = base;
desc.groups = replicator_groups;
desc.access = CSDEV_ACCESS_IOMEM(base);
+ coresight_clear_self_claim_tag(&desc.access);
}
if (fwnode_property_present(dev_fwnode(dev),
@@ -438,7 +439,8 @@ static struct amba_driver dynamic_replicator_driver = {
static int __init replicator_init(void)
{
- return coresight_init_driver("replicator", &dynamic_replicator_driver, &replicator_driver);
+ return coresight_init_driver("replicator", &dynamic_replicator_driver, &replicator_driver,
+ THIS_MODULE);
}
static void __exit replicator_exit(void)
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 26f9339f38b9..e45c6c7204b4 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -301,7 +301,7 @@ static const struct coresight_ops stm_cs_ops = {
.source_ops = &stm_source_ops,
};
-static inline bool stm_addr_unaligned(const void *addr, u8 write_bytes)
+static bool stm_addr_unaligned(const void *addr, u8 write_bytes)
{
return ((unsigned long)addr & (write_bytes - 1));
}
@@ -685,7 +685,7 @@ static int of_stm_get_stimulus_area(struct device *dev, struct resource *res)
return of_address_to_resource(np, index, res);
}
#else
-static inline int of_stm_get_stimulus_area(struct device *dev,
+static int of_stm_get_stimulus_area(struct device *dev,
struct resource *res)
{
return -ENOENT;
@@ -729,7 +729,7 @@ static int acpi_stm_get_stimulus_area(struct device *dev, struct resource *res)
return rc;
}
#else
-static inline int acpi_stm_get_stimulus_area(struct device *dev,
+static int acpi_stm_get_stimulus_area(struct device *dev,
struct resource *res)
{
return -ENOENT;
@@ -1058,7 +1058,7 @@ static struct platform_driver stm_platform_driver = {
static int __init stm_init(void)
{
- return coresight_init_driver("stm", &stm_driver, &stm_platform_driver);
+ return coresight_init_driver("stm", &stm_driver, &stm_platform_driver, THIS_MODULE);
}
static void __exit stm_exit(void)
diff --git a/drivers/hwtracing/coresight/coresight-syscfg-configfs.c b/drivers/hwtracing/coresight/coresight-syscfg-configfs.c
index 213b4159b062..2b40e556be87 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg-configfs.c
+++ b/drivers/hwtracing/coresight/coresight-syscfg-configfs.c
@@ -10,7 +10,7 @@
#include "coresight-syscfg-configfs.h"
/* create a default ci_type. */
-static inline struct config_item_type *cscfg_create_ci_type(void)
+static struct config_item_type *cscfg_create_ci_type(void)
{
struct config_item_type *ci_type;
diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c
index a70c1454b410..83dad24e0116 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg.c
+++ b/drivers/hwtracing/coresight/coresight-syscfg.c
@@ -395,6 +395,8 @@ static void cscfg_remove_owned_csdev_configs(struct coresight_device *csdev, voi
if (list_empty(&csdev->config_csdev_list))
return;
+ guard(raw_spinlock_irqsave)(&csdev->cscfg_csdev_lock);
+
list_for_each_entry_safe(config_csdev, tmp, &csdev->config_csdev_list, node) {
if (config_csdev->config_desc->load_owner == load_owner)
list_del(&config_csdev->node);
@@ -867,6 +869,25 @@ unlock_exit:
}
EXPORT_SYMBOL_GPL(cscfg_csdev_reset_feats);
+static bool cscfg_config_desc_get(struct cscfg_config_desc *config_desc)
+{
+ if (!atomic_fetch_inc(&config_desc->active_cnt)) {
+ /* must ensure that config cannot be unloaded in use */
+ if (unlikely(cscfg_owner_get(config_desc->load_owner))) {
+ atomic_dec(&config_desc->active_cnt);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void cscfg_config_desc_put(struct cscfg_config_desc *config_desc)
+{
+ if (!atomic_dec_return(&config_desc->active_cnt))
+ cscfg_owner_put(config_desc->load_owner);
+}
+
/*
* This activate configuration for either perf or sysfs. Perf can have multiple
* active configs, selected per event, sysfs is limited to one.
@@ -890,22 +911,17 @@ static int _cscfg_activate_config(unsigned long cfg_hash)
if (config_desc->available == false)
return -EBUSY;
- /* must ensure that config cannot be unloaded in use */
- err = cscfg_owner_get(config_desc->load_owner);
- if (err)
+ if (!cscfg_config_desc_get(config_desc)) {
+ err = -EINVAL;
break;
+ }
+
/*
* increment the global active count - control changes to
* active configurations
*/
atomic_inc(&cscfg_mgr->sys_active_cnt);
- /*
- * mark the descriptor as active so enable config on a
- * device instance will use it
- */
- atomic_inc(&config_desc->active_cnt);
-
err = 0;
dev_dbg(cscfg_device(), "Activate config %s.\n", config_desc->name);
break;
@@ -920,9 +936,8 @@ static void _cscfg_deactivate_config(unsigned long cfg_hash)
list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) {
if ((unsigned long)config_desc->event_ea->var == cfg_hash) {
- atomic_dec(&config_desc->active_cnt);
atomic_dec(&cscfg_mgr->sys_active_cnt);
- cscfg_owner_put(config_desc->load_owner);
+ cscfg_config_desc_put(config_desc);
dev_dbg(cscfg_device(), "Deactivate config %s.\n", config_desc->name);
break;
}
@@ -1047,7 +1062,7 @@ int cscfg_csdev_enable_active_config(struct coresight_device *csdev,
unsigned long cfg_hash, int preset)
{
struct cscfg_config_csdev *config_csdev_active = NULL, *config_csdev_item;
- const struct cscfg_config_desc *config_desc;
+ struct cscfg_config_desc *config_desc;
unsigned long flags;
int err = 0;
@@ -1062,8 +1077,8 @@ int cscfg_csdev_enable_active_config(struct coresight_device *csdev,
raw_spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags);
list_for_each_entry(config_csdev_item, &csdev->config_csdev_list, node) {
config_desc = config_csdev_item->config_desc;
- if ((atomic_read(&config_desc->active_cnt)) &&
- ((unsigned long)config_desc->event_ea->var == cfg_hash)) {
+ if (((unsigned long)config_desc->event_ea->var == cfg_hash) &&
+ cscfg_config_desc_get(config_desc)) {
config_csdev_active = config_csdev_item;
csdev->active_cscfg_ctxt = (void *)config_csdev_active;
break;
@@ -1097,7 +1112,11 @@ int cscfg_csdev_enable_active_config(struct coresight_device *csdev,
err = -EBUSY;
raw_spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
}
+
+ if (err)
+ cscfg_config_desc_put(config_desc);
}
+
return err;
}
EXPORT_SYMBOL_GPL(cscfg_csdev_enable_active_config);
@@ -1136,8 +1155,10 @@ void cscfg_csdev_disable_active_config(struct coresight_device *csdev)
raw_spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags);
/* true if there was an enabled active config */
- if (config_csdev)
+ if (config_csdev) {
cscfg_csdev_disable_config(config_csdev);
+ cscfg_config_desc_put(config_csdev->config_desc);
+ }
}
EXPORT_SYMBOL_GPL(cscfg_csdev_disable_active_config);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index a7814e8e657b..88afb16bb6be 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -287,8 +287,8 @@ static int tmc_open(struct inode *inode, struct file *file)
return 0;
}
-static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
- loff_t pos, size_t len, char **bufpp)
+static ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata, loff_t pos, size_t len,
+ char **bufpp)
{
switch (drvdata->config_type) {
case TMC_CONFIG_TYPE_ETB:
@@ -591,7 +591,7 @@ static const struct attribute_group *coresight_etr_groups[] = {
NULL,
};
-static inline bool tmc_etr_can_use_sg(struct device *dev)
+static bool tmc_etr_can_use_sg(struct device *dev)
{
int ret;
u8 val_u8;
@@ -621,7 +621,7 @@ static inline bool tmc_etr_can_use_sg(struct device *dev)
return false;
}
-static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
+static bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
{
u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
@@ -869,6 +869,7 @@ static int __tmc_probe(struct device *dev, struct resource *res)
dev->platform_data = pdata;
desc.pdata = pdata;
+ coresight_clear_self_claim_tag(&desc.access);
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
@@ -1060,7 +1061,7 @@ static struct platform_driver tmc_platform_driver = {
static int __init tmc_init(void)
{
- return coresight_init_driver("tmc", &tmc_driver, &tmc_platform_driver);
+ return coresight_init_driver("tmc", &tmc_driver, &tmc_platform_driver, THIS_MODULE);
}
static void __exit tmc_exit(void)
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index d858740001c2..0f45ab5e5249 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -482,6 +482,7 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
unsigned long offset, to_read = 0, flags;
struct cs_buffers *buf = sink_config;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct perf_event *event = handle->event;
if (!buf)
return 0;
@@ -586,6 +587,14 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
* is expected by the perf ring buffer.
*/
CS_LOCK(drvdata->base);
+
+ /*
+ * If the event is active, it is triggered during an AUX pause.
+ * Re-enable the sink so that it is ready when AUX resume is invoked.
+ */
+ if (!event->hw.state)
+ __tmc_etb_enable_hw(drvdata);
+
out:
raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -747,7 +756,6 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
char *buf = NULL;
enum tmc_mode mode;
unsigned long flags;
- int rc = 0;
/* config types are set a boot time and never change */
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
@@ -773,11 +781,11 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
* can't be NULL.
*/
memset(drvdata->buf, 0, drvdata->size);
- rc = __tmc_etb_enable_hw(drvdata);
- if (rc) {
- raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
- return rc;
- }
+ /*
+ * Ignore failures to enable the TMC to make sure, we don't
+ * leave the TMC in a "reading" state.
+ */
+ __tmc_etb_enable_hw(drvdata);
} else {
/*
* The ETB/ETF is not tracing and the buffer was just read.
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 76a8cb29b68a..b07fcdb3fe1a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -125,7 +125,7 @@ struct etr_sg_table {
* If we spill over to a new page for mapping 1 entry, we could as
* well replace the link entry of the previous page with the last entry.
*/
-static inline unsigned long __attribute_const__
+static unsigned long __attribute_const__
tmc_etr_sg_table_entries(int nr_pages)
{
unsigned long nr_sgpages = nr_pages * ETR_SG_PAGES_PER_SYSPAGE;
@@ -239,13 +239,13 @@ err:
return -ENOMEM;
}
-static inline long
+static long
tmc_sg_get_data_page_offset(struct tmc_sg_table *sg_table, dma_addr_t addr)
{
return tmc_pages_get_offset(&sg_table->data_pages, addr);
}
-static inline void tmc_free_table_pages(struct tmc_sg_table *sg_table)
+static void tmc_free_table_pages(struct tmc_sg_table *sg_table)
{
if (sg_table->table_vaddr)
vunmap(sg_table->table_vaddr);
@@ -481,7 +481,7 @@ static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table)
dev_dbg(sg_table->dev, "******* End of Table *****\n");
}
#else
-static inline void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {}
+static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {}
#endif
/*
@@ -886,10 +886,8 @@ void tmc_etr_remove_catu_ops(void)
}
EXPORT_SYMBOL_GPL(tmc_etr_remove_catu_ops);
-static inline int tmc_etr_mode_alloc_buf(int mode,
- struct tmc_drvdata *drvdata,
- struct etr_buf *etr_buf, int node,
- void **pages)
+static int tmc_etr_mode_alloc_buf(int mode, struct tmc_drvdata *drvdata, struct etr_buf *etr_buf,
+ int node, void **pages)
{
int rc = -EINVAL;
@@ -1009,7 +1007,7 @@ static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp);
}
-static inline s64
+static s64
tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
{
ssize_t len;
@@ -1636,6 +1634,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct etr_perf_buffer *etr_perf = config;
struct etr_buf *etr_buf = etr_perf->etr_buf;
+ struct perf_event *event = handle->event;
raw_spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -1705,6 +1704,15 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
*/
smp_wmb();
+ /*
+ * If the event is active, it is triggered during an AUX pause.
+ * Re-enable the sink so that it is ready when AUX resume is invoked.
+ */
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (csdev->refcnt && !event->hw.state)
+ __tmc_etr_enable_hw(drvdata);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
out:
/*
* Don't set the TRUNCATED flag in snapshot mode because 1) the
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 97ef36f03ec2..3e0159288428 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -318,7 +318,7 @@ static struct platform_driver tpiu_platform_driver = {
static int __init tpiu_init(void)
{
- return coresight_init_driver("tpiu", &tpiu_driver, &tpiu_platform_driver);
+ return coresight_init_driver("tpiu", &tpiu_driver, &tpiu_platform_driver, THIS_MODULE);
}
static void __exit tpiu_exit(void)
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
index fff67aac8418..8267dd1a2130 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.c
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -160,22 +160,22 @@ static void trbe_check_errata(struct trbe_cpudata *cpudata)
}
}
-static inline bool trbe_has_erratum(struct trbe_cpudata *cpudata, int i)
+static bool trbe_has_erratum(struct trbe_cpudata *cpudata, int i)
{
return (i < TRBE_ERRATA_MAX) && test_bit(i, cpudata->errata);
}
-static inline bool trbe_may_overwrite_in_fill_mode(struct trbe_cpudata *cpudata)
+static bool trbe_may_overwrite_in_fill_mode(struct trbe_cpudata *cpudata)
{
return trbe_has_erratum(cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE);
}
-static inline bool trbe_may_write_out_of_range(struct trbe_cpudata *cpudata)
+static bool trbe_may_write_out_of_range(struct trbe_cpudata *cpudata)
{
return trbe_has_erratum(cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE);
}
-static inline bool trbe_needs_drain_after_disable(struct trbe_cpudata *cpudata)
+static bool trbe_needs_drain_after_disable(struct trbe_cpudata *cpudata)
{
/*
* Errata affected TRBE implementation will need TSB CSYNC and
@@ -185,7 +185,7 @@ static inline bool trbe_needs_drain_after_disable(struct trbe_cpudata *cpudata)
return trbe_has_erratum(cpudata, TRBE_NEEDS_DRAIN_AFTER_DISABLE);
}
-static inline bool trbe_needs_ctxt_sync_after_enable(struct trbe_cpudata *cpudata)
+static bool trbe_needs_ctxt_sync_after_enable(struct trbe_cpudata *cpudata)
{
/*
* Errata affected TRBE implementation will need an additional
@@ -196,7 +196,7 @@ static inline bool trbe_needs_ctxt_sync_after_enable(struct trbe_cpudata *cpudat
return trbe_has_erratum(cpudata, TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE);
}
-static inline bool trbe_is_broken(struct trbe_cpudata *cpudata)
+static bool trbe_is_broken(struct trbe_cpudata *cpudata)
{
return trbe_has_erratum(cpudata, TRBE_IS_BROKEN);
}
@@ -208,13 +208,13 @@ static int trbe_alloc_node(struct perf_event *event)
return cpu_to_node(event->cpu);
}
-static inline void trbe_drain_buffer(void)
+static void trbe_drain_buffer(void)
{
tsb_csync();
dsb(nsh);
}
-static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
+static void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
{
/*
* Enable the TRBE without clearing LIMITPTR which
@@ -231,7 +231,7 @@ static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
isb();
}
-static inline void set_trbe_disabled(struct trbe_cpudata *cpudata)
+static void set_trbe_disabled(struct trbe_cpudata *cpudata)
{
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index 7a01f2687b4c..740066ceaea3 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -19,6 +19,7 @@
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-pcf.h>
+#include <linux/string_choices.h>
#include "i2c-algo-pcf.h"
@@ -316,7 +317,7 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
pmsg = &msgs[i];
DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: Doing %s %d bytes to 0x%02x - %d of %d messages\n",
- pmsg->flags & I2C_M_RD ? "read" : "write",
+ str_read_write(pmsg->flags & I2C_M_RD),
pmsg->len, pmsg->addr, i + 1, num);)
ret = pcf_doAddress(adap, pmsg);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 83c88c79afe2..48c5ab832009 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -200,7 +200,7 @@ config I2C_ISMT
config I2C_PIIX4
tristate "Intel PIIX4 and compatible (ATI/AMD/Serverworks/Broadcom/SMSC)"
- depends on PCI && HAS_IOPORT
+ depends on PCI && HAS_IOPORT && X86
select I2C_SMBUS
help
If you say yes to this option, support will be included for the Intel
@@ -592,6 +592,17 @@ config I2C_DESIGNWARE_PLATFORM
This driver can also be built as a module. If so, the module
will be called i2c-designware-platform.
+config I2C_DESIGNWARE_AMDISP
+ tristate "Synopsys DesignWare Platform for AMDISP"
+ depends on DRM_AMD_ISP || COMPILE_TEST
+ depends on I2C_DESIGNWARE_CORE
+ help
+ If you say yes to this option, support will be included for the
+ AMDISP Synopsys DesignWare I2C adapter.
+
+ This driver can also be built as a module. If so, the module
+ will be called amd_isp_i2c_designware.
+
config I2C_DESIGNWARE_AMDPSP
bool "AMD PSP I2C semaphore support"
depends on ACPI
@@ -845,7 +856,7 @@ config I2C_LS2X
config I2C_MLXBF
tristate "Mellanox BlueField I2C controller"
- depends on MELLANOX_PLATFORM && ARM64
+ depends on (MELLANOX_PLATFORM && ARM64) || COMPILE_TEST
depends on ACPI
select I2C_SLAVE
help
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index c1252e2b779e..04db855fdfd6 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o
i2c-designware-platform-y := i2c-designware-platdrv.o
i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_AMDPSP) += i2c-designware-amdpsp.o
i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-baytrail.o
+obj-$(CONFIG_I2C_DESIGNWARE_AMDISP) += i2c-designware-amdisp.o
obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o
i2c-designware-pci-y := i2c-designware-pcidrv.o
obj-$(CONFIG_I2C_DIGICOLOR) += i2c-digicolor.o
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index ee3b469ddfb9..374fc50bb205 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -26,6 +26,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/string_choices.h>
#include "i2c-at91.h"
@@ -523,7 +524,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
*/
dev_dbg(dev->dev, "transfer: %s %zu bytes.\n",
- (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
+ str_read_write(dev->msg->flags & I2C_M_RD), dev->buf_len);
reinit_completion(&dev->cmd_complete);
dev->transfer_status = 0;
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 332a0fcca28d..63bc3c8f49d3 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -224,11 +224,6 @@ static void slave_rx_tasklet_fn(unsigned long);
| BIT(IS_S_TX_UNDERRUN_SHIFT) | BIT(IS_S_RX_FIFO_FULL_SHIFT)\
| BIT(IS_S_RX_THLD_SHIFT))
-static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave);
-static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave);
-static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
- bool enable);
-
static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
u32 offset)
{
@@ -264,8 +259,8 @@ static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
}
}
-static void bcm_iproc_i2c_slave_init(
- struct bcm_iproc_i2c_dev *iproc_i2c, bool need_reset)
+static void bcm_iproc_i2c_slave_init(struct bcm_iproc_i2c_dev *iproc_i2c,
+ bool need_reset)
{
u32 val;
@@ -276,8 +271,8 @@ static void bcm_iproc_i2c_slave_init(
val |= BIT(CFG_RESET_SHIFT);
iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
- /* wait 100 usec per spec */
- udelay(100);
+ /* wait approximately 100 usec as per spec */
+ usleep_range(100, 200);
/* bring controller out of reset */
val &= ~(BIT(CFG_RESET_SHIFT));
@@ -316,6 +311,19 @@ static void bcm_iproc_i2c_slave_init(
iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
}
+static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
+ bool enable)
+{
+ u32 val;
+
+ val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
+ if (enable)
+ val |= BIT(CFG_EN_SHIFT);
+ else
+ val &= ~BIT(CFG_EN_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
+}
+
static bool bcm_iproc_i2c_check_slave_status
(struct bcm_iproc_i2c_dev *iproc_i2c, u32 status)
{
@@ -438,7 +446,6 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
u32 val;
u8 value;
-
if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
iproc_i2c->tx_underrun++;
if (iproc_i2c->tx_underrun == 1)
@@ -542,7 +549,7 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c)
{
struct i2c_msg *msg = iproc_i2c->msg;
- uint32_t val;
+ u32 val;
/* Read valid data from RX FIFO */
while (iproc_i2c->rx_bytes < msg->len) {
@@ -688,8 +695,8 @@ static void bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
val &= ~(BIT(CFG_EN_SHIFT));
iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
- /* wait 100 usec per spec */
- udelay(100);
+ /* wait approximately 100 usec as per spec */
+ usleep_range(100, 200);
/* bring controller out of reset */
val &= ~(BIT(CFG_RESET_SHIFT));
@@ -708,19 +715,6 @@ static void bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, 0xffffffff);
}
-static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
- bool enable)
-{
- u32 val;
-
- val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
- if (enable)
- val |= BIT(CFG_EN_SHIFT);
- else
- val &= ~BIT(CFG_EN_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
-}
-
static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
struct i2c_msg *msg)
{
@@ -734,31 +728,31 @@ static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
return 0;
case M_CMD_STATUS_LOST_ARB:
- dev_dbg(iproc_i2c->device, "lost bus arbitration\n");
+ dev_err(iproc_i2c->device, "lost bus arbitration\n");
return -EAGAIN;
case M_CMD_STATUS_NACK_ADDR:
- dev_dbg(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr);
+ dev_err(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr);
return -ENXIO;
case M_CMD_STATUS_NACK_DATA:
- dev_dbg(iproc_i2c->device, "NAK data\n");
+ dev_err(iproc_i2c->device, "NAK data\n");
return -ENXIO;
case M_CMD_STATUS_TIMEOUT:
- dev_dbg(iproc_i2c->device, "bus timeout\n");
+ dev_err(iproc_i2c->device, "bus timeout\n");
return -ETIMEDOUT;
case M_CMD_STATUS_FIFO_UNDERRUN:
- dev_dbg(iproc_i2c->device, "FIFO under-run\n");
+ dev_err(iproc_i2c->device, "FIFO under-run\n");
return -ENXIO;
case M_CMD_STATUS_RX_FIFO_FULL:
- dev_dbg(iproc_i2c->device, "RX FIFO full\n");
+ dev_err(iproc_i2c->device, "RX FIFO full\n");
return -ETIMEDOUT;
default:
- dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val);
+ dev_err(iproc_i2c->device, "unknown error code=%d\n", val);
/* re-initialize i2c for recovery */
bcm_iproc_i2c_enable_disable(iproc_i2c, false);
@@ -833,7 +827,7 @@ static int bcm_iproc_i2c_xfer_wait(struct bcm_iproc_i2c_dev *iproc_i2c,
* The i2c quirks are set to enforce this rule.
*/
static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c,
- struct i2c_msg *msgs, bool process_call)
+ struct i2c_msg *msgs, bool process_call)
{
int i;
u8 addr;
@@ -842,8 +836,8 @@ static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c,
struct i2c_msg *msg = &msgs[0];
/* check if bus is busy */
- if (!!(iproc_i2c_rd_reg(iproc_i2c,
- M_CMD_OFFSET) & BIT(M_CMD_START_BUSY_SHIFT))) {
+ if (iproc_i2c_rd_reg(iproc_i2c,
+ M_CMD_OFFSET) & BIT(M_CMD_START_BUSY_SHIFT)) {
dev_warn(iproc_i2c->device, "bus is busy\n");
return -EBUSY;
}
@@ -970,14 +964,14 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
ret = bcm_iproc_i2c_xfer_internal(iproc_i2c, msgs, process_call);
if (ret) {
- dev_dbg(iproc_i2c->device, "xfer failed\n");
+ dev_err(iproc_i2c->device, "xfer failed\n");
return ret;
}
return num;
}
-static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
+static u32 bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
{
u32 val;
@@ -989,6 +983,63 @@ static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
return val;
}
+static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
+{
+ struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
+
+ if (iproc_i2c->slave)
+ return -EBUSY;
+
+ if (slave->flags & I2C_CLIENT_TEN)
+ return -EAFNOSUPPORT;
+
+ iproc_i2c->slave = slave;
+
+ tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn,
+ (unsigned long)iproc_i2c);
+
+ bcm_iproc_i2c_slave_init(iproc_i2c, false);
+
+ return 0;
+}
+
+static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
+{
+ struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
+ u32 tmp;
+
+ if (!iproc_i2c->slave)
+ return -EINVAL;
+
+ disable_irq(iproc_i2c->irq);
+
+ tasklet_kill(&iproc_i2c->slave_rx_tasklet);
+
+ /* disable all slave interrupts */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ tmp &= ~(IE_S_ALL_INTERRUPT_MASK <<
+ IE_S_ALL_INTERRUPT_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
+
+ /* Erase the slave address programmed */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
+ tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp);
+
+ /* flush TX/RX FIFOs */
+ tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp);
+
+ /* clear all pending slave interrupts */
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE);
+
+ iproc_i2c->slave = NULL;
+
+ enable_irq(iproc_i2c->irq);
+
+ return 0;
+}
+
static struct i2c_algorithm bcm_iproc_algo = {
.master_xfer = bcm_iproc_i2c_xfer,
.functionality = bcm_iproc_i2c_functionality,
@@ -1010,21 +1061,18 @@ static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
"clock-frequency", &bus_speed);
if (ret < 0) {
dev_info(iproc_i2c->device,
- "unable to interpret clock-frequency DT property\n");
+ "unable to interpret clock-frequency DT property\n");
bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
}
- if (bus_speed < I2C_MAX_STANDARD_MODE_FREQ) {
- dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n",
- bus_speed);
- dev_err(iproc_i2c->device,
- "valid speeds are 100khz and 400khz\n");
- return -EINVAL;
- } else if (bus_speed < I2C_MAX_FAST_MODE_FREQ) {
+ if (bus_speed < I2C_MAX_STANDARD_MODE_FREQ)
+ return dev_err_probe(iproc_i2c->device, -EINVAL,
+ "%d Hz not supported (out of 100-400 kHz range)\n",
+ bus_speed);
+ else if (bus_speed < I2C_MAX_FAST_MODE_FREQ)
bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
- } else {
+ else
bus_speed = I2C_MAX_FAST_MODE_FREQ;
- }
iproc_i2c->bus_speed = bus_speed;
val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET);
@@ -1039,9 +1087,9 @@ static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
static int bcm_iproc_i2c_probe(struct platform_device *pdev)
{
- int irq, ret = 0;
struct bcm_iproc_i2c_dev *iproc_i2c;
struct i2c_adapter *adap;
+ int irq, ret;
iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c),
GFP_KERNEL);
@@ -1066,11 +1114,9 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
ret = of_property_read_u32(iproc_i2c->device->of_node,
"brcm,ape-hsls-addr-mask",
&iproc_i2c->ape_addr_mask);
- if (ret < 0) {
- dev_err(iproc_i2c->device,
- "'brcm,ape-hsls-addr-mask' missing\n");
- return -EINVAL;
- }
+ if (ret < 0)
+ return dev_err_probe(iproc_i2c->device, ret,
+ "'brcm,ape-hsls-addr-mask' missing\n");
spin_lock_init(&iproc_i2c->idm_lock);
@@ -1090,11 +1136,9 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
ret = devm_request_irq(iproc_i2c->device, irq,
bcm_iproc_i2c_isr, 0, pdev->name,
iproc_i2c);
- if (ret < 0) {
- dev_err(iproc_i2c->device,
- "unable to request irq %i\n", irq);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(iproc_i2c->device, ret,
+ "unable to request irq %i\n", irq);
iproc_i2c->irq = irq;
} else {
@@ -1106,9 +1150,8 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
adap = &iproc_i2c->adapter;
i2c_set_adapdata(adap, iproc_i2c);
- snprintf(adap->name, sizeof(adap->name),
- "Broadcom iProc (%s)",
- of_node_full_name(iproc_i2c->device->of_node));
+ snprintf(adap->name, sizeof(adap->name), "Broadcom iProc (%s)",
+ of_node_full_name(iproc_i2c->device->of_node));
adap->algo = &bcm_iproc_algo;
adap->quirks = &bcm_iproc_i2c_quirks;
adap->dev.parent = &pdev->dev;
@@ -1182,62 +1225,6 @@ static const struct dev_pm_ops bcm_iproc_i2c_pm_ops = {
.resume_early = &bcm_iproc_i2c_resume
};
-static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
-{
- struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
-
- if (iproc_i2c->slave)
- return -EBUSY;
-
- if (slave->flags & I2C_CLIENT_TEN)
- return -EAFNOSUPPORT;
-
- iproc_i2c->slave = slave;
-
- tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn,
- (unsigned long)iproc_i2c);
-
- bcm_iproc_i2c_slave_init(iproc_i2c, false);
- return 0;
-}
-
-static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
-{
- u32 tmp;
- struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
-
- if (!iproc_i2c->slave)
- return -EINVAL;
-
- disable_irq(iproc_i2c->irq);
-
- tasklet_kill(&iproc_i2c->slave_rx_tasklet);
-
- /* disable all slave interrupts */
- tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
- tmp &= ~(IE_S_ALL_INTERRUPT_MASK <<
- IE_S_ALL_INTERRUPT_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
-
- /* Erase the slave address programmed */
- tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
- tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
- iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp);
-
- /* flush TX/RX FIFOs */
- tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT));
- iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp);
-
- /* clear all pending slave interrupts */
- iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE);
-
- iproc_i2c->slave = NULL;
-
- enable_irq(iproc_i2c->irq);
-
- return 0;
-}
-
static const struct of_device_id bcm_iproc_i2c_of_match[] = {
{
.compatible = "brcm,iproc-i2c",
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 26a36a65521e..606ac071cb80 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -467,7 +467,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
return ret;
/* Alloc and register client IRQ */
- adap->irq_domain = irq_domain_add_linear(NULL, 1, &irq_domain_simple_ops, NULL);
+ adap->irq_domain = irq_domain_create_linear(NULL, 1, &irq_domain_simple_ops, NULL);
if (!adap->irq_domain)
return -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 6a909d339681..6a3d4e9e07f4 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -551,7 +551,8 @@ out:
static u32 i2c_davinci_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_PROTOCOL_MANGLING;
}
static void terminate_read(struct davinci_i2c_dev *dev)
diff --git a/drivers/i2c/busses/i2c-designware-amdisp.c b/drivers/i2c/busses/i2c-designware-amdisp.c
new file mode 100644
index 000000000000..ad6f08338124
--- /dev/null
+++ b/drivers/i2c/busses/i2c-designware-amdisp.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Based on Synopsys DesignWare I2C adapter driver.
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "i2c-designware-core.h"
+
+#define DRV_NAME "amd_isp_i2c_designware"
+#define AMD_ISP_I2C_INPUT_CLK 100 /* Mhz */
+
+static void amd_isp_dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *i2c_dev)
+{
+ pm_runtime_disable(i2c_dev->dev);
+
+ if (i2c_dev->shared_with_punit)
+ pm_runtime_put_noidle(i2c_dev->dev);
+}
+
+static inline u32 amd_isp_dw_i2c_get_clk_rate(struct dw_i2c_dev *i2c_dev)
+{
+ return AMD_ISP_I2C_INPUT_CLK * 1000;
+}
+
+static int amd_isp_dw_i2c_plat_probe(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *isp_i2c_dev;
+ struct i2c_adapter *adap;
+ int ret;
+
+ isp_i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*isp_i2c_dev), GFP_KERNEL);
+ if (!isp_i2c_dev)
+ return -ENOMEM;
+ isp_i2c_dev->dev = &pdev->dev;
+
+ pdev->dev.init_name = DRV_NAME;
+
+ /*
+ * Use the polling mode to send/receive the data, because
+ * no IRQ connection from ISP I2C
+ */
+ isp_i2c_dev->flags |= ACCESS_POLLING;
+ platform_set_drvdata(pdev, isp_i2c_dev);
+
+ isp_i2c_dev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(isp_i2c_dev->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(isp_i2c_dev->base),
+ "failed to get IOMEM resource\n");
+
+ isp_i2c_dev->get_clk_rate_khz = amd_isp_dw_i2c_get_clk_rate;
+ ret = i2c_dw_fw_parse_and_configure(isp_i2c_dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to parse i2c dw fwnode and configure\n");
+
+ i2c_dw_configure(isp_i2c_dev);
+
+ adap = &isp_i2c_dev->adapter;
+ adap->owner = THIS_MODULE;
+ ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
+ adap->dev.of_node = pdev->dev.of_node;
+ /* use dynamically allocated adapter id */
+ adap->nr = -1;
+
+ if (isp_i2c_dev->flags & ACCESS_NO_IRQ_SUSPEND)
+ dev_pm_set_driver_flags(&pdev->dev,
+ DPM_FLAG_SMART_PREPARE);
+ else
+ dev_pm_set_driver_flags(&pdev->dev,
+ DPM_FLAG_SMART_PREPARE |
+ DPM_FLAG_SMART_SUSPEND);
+
+ device_enable_async_suspend(&pdev->dev);
+
+ if (isp_i2c_dev->shared_with_punit)
+ pm_runtime_get_noresume(&pdev->dev);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ ret = i2c_dw_probe(isp_i2c_dev);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "i2c_dw_probe failed\n");
+ goto error_release_rpm;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ return 0;
+
+error_release_rpm:
+ amd_isp_dw_i2c_plat_pm_cleanup(isp_i2c_dev);
+ pm_runtime_put_sync(&pdev->dev);
+ return ret;
+}
+
+static void amd_isp_dw_i2c_plat_remove(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *isp_i2c_dev = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ i2c_del_adapter(&isp_i2c_dev->adapter);
+
+ i2c_dw_disable(isp_i2c_dev);
+
+ pm_runtime_put_sync(&pdev->dev);
+ amd_isp_dw_i2c_plat_pm_cleanup(isp_i2c_dev);
+}
+
+static int amd_isp_dw_i2c_plat_prepare(struct device *dev)
+{
+ /*
+ * If the ACPI companion device object is present for this device, it
+ * may be accessed during suspend and resume of other devices via I2C
+ * operation regions, so tell the PM core and middle layers to avoid
+ * skipping system suspend/resume callbacks for it in that case.
+ */
+ return !has_acpi_companion(dev);
+}
+
+static int amd_isp_dw_i2c_plat_runtime_suspend(struct device *dev)
+{
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+
+ if (i_dev->shared_with_punit)
+ return 0;
+
+ i2c_dw_disable(i_dev);
+ i2c_dw_prepare_clk(i_dev, false);
+
+ return 0;
+}
+
+static int amd_isp_dw_i2c_plat_suspend(struct device *dev)
+{
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+ int ret;
+
+ if (!i_dev)
+ return -ENODEV;
+
+ ret = amd_isp_dw_i2c_plat_runtime_suspend(dev);
+ if (!ret)
+ i2c_mark_adapter_suspended(&i_dev->adapter);
+
+ return ret;
+}
+
+static int amd_isp_dw_i2c_plat_runtime_resume(struct device *dev)
+{
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+
+ if (!i_dev)
+ return -ENODEV;
+
+ if (!i_dev->shared_with_punit)
+ i2c_dw_prepare_clk(i_dev, true);
+ if (i_dev->init)
+ i_dev->init(i_dev);
+
+ return 0;
+}
+
+static int amd_isp_dw_i2c_plat_resume(struct device *dev)
+{
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+
+ amd_isp_dw_i2c_plat_runtime_resume(dev);
+ i2c_mark_adapter_resumed(&i_dev->adapter);
+
+ return 0;
+}
+
+static const struct dev_pm_ops amd_isp_dw_i2c_dev_pm_ops = {
+ .prepare = pm_sleep_ptr(amd_isp_dw_i2c_plat_prepare),
+ LATE_SYSTEM_SLEEP_PM_OPS(amd_isp_dw_i2c_plat_suspend, amd_isp_dw_i2c_plat_resume)
+ RUNTIME_PM_OPS(amd_isp_dw_i2c_plat_runtime_suspend, amd_isp_dw_i2c_plat_runtime_resume, NULL)
+};
+
+/* Work with hotplug and coldplug */
+MODULE_ALIAS("platform:amd_isp_i2c_designware");
+
+static struct platform_driver amd_isp_dw_i2c_driver = {
+ .probe = amd_isp_dw_i2c_plat_probe,
+ .remove = amd_isp_dw_i2c_plat_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = pm_ptr(&amd_isp_dw_i2c_dev_pm_ops),
+ },
+};
+module_platform_driver(amd_isp_dw_i2c_driver);
+
+MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter in AMD ISP");
+MODULE_IMPORT_NS("I2C_DW");
+MODULE_IMPORT_NS("I2C_DW_COMMON");
+MODULE_AUTHOR("Venkata Narendra Kumar Gutta <vengutta@amd.com>");
+MODULE_AUTHOR("Pratap Nirujogi <pratap.nirujogi@amd.com>");
+MODULE_AUTHOR("Bin Du <bin.du@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 8eb7bd640f8d..5b1e8f74c4ac 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -572,8 +572,10 @@ u32 i2c_dw_clk_rate(struct dw_i2c_dev *dev)
* Clock is not necessary if we got LCNT/HCNT values directly from
* the platform code.
*/
- if (WARN_ON_ONCE(!dev->get_clk_rate_khz))
+ if (!dev->get_clk_rate_khz) {
+ dev_dbg_once(dev->dev, "Callback get_clk_rate_khz() is not defined\n");
return 0;
+ }
return dev->get_clk_rate_khz(dev);
}
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 8e0267c7cc29..f21f9877c040 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -278,9 +278,11 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node);
- if (IS_ERR(dev->slave))
+ if (IS_ERR(dev->slave)) {
+ i2c_del_adapter(&dev->adapter);
return dev_err_probe(device, PTR_ERR(dev->slave),
"register UCSI failed\n");
+ }
}
pm_runtime_set_autosuspend_delay(device, 1000);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index d6e1ee935399..879719e91df2 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -34,7 +34,7 @@
static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
{
- return clk_get_rate(dev->clk) / KILO;
+ return clk_get_rate(dev->clk) / HZ_PER_KHZ;
}
#ifdef CONFIG_OF
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 5cd4a5f7a472..b936a240db0a 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -96,7 +96,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
i2c_dw_disable(dev);
synchronize_irq(dev->irq);
dev->slave = NULL;
- pm_runtime_put(dev->dev);
+ pm_runtime_put_sync_suspend(dev->dev);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 48e1af544b75..a7f89946dad4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1180,7 +1180,7 @@ static void i801_probe_optional_targets(struct i801_priv *priv)
#ifdef CONFIG_I2C_I801_MUX
if (!priv->mux_pdev)
#endif
- i2c_register_spd(&priv->adapter);
+ i2c_register_spd_write_enable(&priv->adapter);
}
#else
static void __init input_apanel_init(void) {}
@@ -1283,7 +1283,7 @@ static int i801_notifier_call(struct notifier_block *nb, unsigned long action,
return NOTIFY_DONE;
/* Call i2c_register_spd for muxed child segments */
- i2c_register_spd(to_i2c_adapter(dev));
+ i2c_register_spd_write_enable(to_i2c_adapter(dev));
return NOTIFY_OK;
}
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 5d8b82ae6fd6..3278707bb885 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -831,7 +831,7 @@ next_atomic_cmd:
*/
static void img_i2c_check_timer(struct timer_list *t)
{
- struct img_i2c *i2c = from_timer(i2c, t, check_timer);
+ struct img_i2c *i2c = timer_container_of(i2c, t, check_timer);
unsigned long flags;
unsigned int line_status;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 9e5d454d8318..de01dfecb16e 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1711,11 +1711,11 @@ static int i2c_imx_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return irq;
+ return dev_err_probe(&pdev->dev, irq, "can't get IRQ\n");
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
- return PTR_ERR(base);
+ return dev_err_probe(&pdev->dev, PTR_ERR(base), "can't get IO memory\n");
phy_addr = (dma_addr_t)res->start;
i2c_imx = devm_kzalloc(&pdev->dev, sizeof(*i2c_imx), GFP_KERNEL);
@@ -1810,13 +1810,15 @@ static int i2c_imx_probe(struct platform_device *pdev)
*/
ret = i2c_imx_dma_request(i2c_imx, phy_addr);
if (ret) {
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ dev_err_probe(&pdev->dev, ret, "can't get DMA channels\n");
goto clk_notifier_unregister;
- else if (ret == -ENODEV)
+ } else if (ret == -ENODEV) {
dev_dbg(&pdev->dev, "Only use PIO mode\n");
- else
+ } else {
dev_warn(&pdev->dev, "Failed to setup DMA (%pe), only use PIO mode\n",
ERR_PTR(ret));
+ }
}
/* Add I2C adapter */
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index c93c02aa6ac8..7aaefb21416a 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -933,7 +933,7 @@ ismt_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_region(pdev, SMBBAR, ismt_driver.name);
+ err = pcim_request_region(pdev, SMBBAR, ismt_driver.name);
if (err) {
dev_err(&pdev->dev,
"Failed to request SMBus region 0x%lx-0x%lx\n",
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index 6943a0de860a..ccd13c4fb83e 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -442,8 +442,13 @@ static int i2c_lpc2k_suspend(struct device *dev)
static int i2c_lpc2k_resume(struct device *dev)
{
struct lpc2k_i2c *i2c = dev_get_drvdata(dev);
+ int ret;
- clk_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clock.\n");
+ return ret;
+ }
i2c_lpc2k_reset(i2c);
return 0;
diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c
index 5db73429125c..492bf4c34722 100644
--- a/drivers/i2c/busses/i2c-microchip-corei2c.c
+++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
@@ -76,6 +76,8 @@
#define CORE_I2C_FREQ (0x14)
#define CORE_I2C_GLITCHREG (0x18)
#define CORE_I2C_SLAVE1_ADDR (0x1c)
+#define CORE_I2C_SMBUS_MSG_WR (0x0)
+#define CORE_I2C_SMBUS_MSG_RD (0x1)
#define PCLK_DIV_960 (CTRL_CR2)
#define PCLK_DIV_256 (0)
@@ -424,9 +426,109 @@ static u32 mchp_corei2c_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
+static int mchp_corei2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags,
+ char read_write, u8 command,
+ int size, union i2c_smbus_data *data)
+{
+ struct i2c_msg msgs[2];
+ struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
+ u8 tx_buf[I2C_SMBUS_BLOCK_MAX + 2];
+ u8 rx_buf[I2C_SMBUS_BLOCK_MAX + 1];
+ int num_msgs = 1;
+
+ msgs[CORE_I2C_SMBUS_MSG_WR].addr = addr;
+ msgs[CORE_I2C_SMBUS_MSG_WR].flags = 0;
+
+ if (read_write == I2C_SMBUS_READ && size <= I2C_SMBUS_BYTE)
+ msgs[CORE_I2C_SMBUS_MSG_WR].flags = I2C_M_RD;
+
+ if (read_write == I2C_SMBUS_WRITE && size <= I2C_SMBUS_WORD_DATA)
+ msgs[CORE_I2C_SMBUS_MSG_WR].len = size;
+
+ if (read_write == I2C_SMBUS_WRITE && size > I2C_SMBUS_BYTE) {
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = tx_buf;
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[0] = command;
+ }
+
+ if (read_write == I2C_SMBUS_READ && size >= I2C_SMBUS_BYTE_DATA) {
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = tx_buf;
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[0] = command;
+ msgs[CORE_I2C_SMBUS_MSG_RD].addr = addr;
+ msgs[CORE_I2C_SMBUS_MSG_RD].flags = I2C_M_RD;
+ num_msgs = 2;
+ }
+
+ if (read_write == I2C_SMBUS_READ && size > I2C_SMBUS_QUICK)
+ msgs[CORE_I2C_SMBUS_MSG_WR].len = 1;
+
+ switch (size) {
+ case I2C_SMBUS_QUICK:
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = NULL;
+ return 0;
+ case I2C_SMBUS_BYTE:
+ if (read_write == I2C_SMBUS_WRITE)
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = &command;
+ else
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf = &data->byte;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[1] = data->byte;
+ } else {
+ msgs[CORE_I2C_SMBUS_MSG_RD].len = size - 1;
+ msgs[CORE_I2C_SMBUS_MSG_RD].buf = &data->byte;
+ }
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[1] = data->word & 0xFF;
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[2] = (data->word >> 8) & 0xFF;
+ } else {
+ msgs[CORE_I2C_SMBUS_MSG_RD].len = size - 1;
+ msgs[CORE_I2C_SMBUS_MSG_RD].buf = rx_buf;
+ }
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ int data_len;
+
+ data_len = data->block[0];
+ msgs[CORE_I2C_SMBUS_MSG_WR].len = data_len + 2;
+ for (int i = 0; i <= data_len; i++)
+ msgs[CORE_I2C_SMBUS_MSG_WR].buf[i + 1] = data->block[i];
+ } else {
+ msgs[CORE_I2C_SMBUS_MSG_RD].len = I2C_SMBUS_BLOCK_MAX + 1;
+ msgs[CORE_I2C_SMBUS_MSG_RD].buf = rx_buf;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ mchp_corei2c_xfer(&idev->adapter, msgs, num_msgs);
+ if (read_write == I2C_SMBUS_WRITE || size <= I2C_SMBUS_BYTE_DATA)
+ return 0;
+
+ switch (size) {
+ case I2C_SMBUS_WORD_DATA:
+ data->word = (rx_buf[0] | (rx_buf[1] << 8));
+ break;
+ case I2C_SMBUS_BLOCK_DATA:
+ if (rx_buf[0] > I2C_SMBUS_BLOCK_MAX)
+ rx_buf[0] = I2C_SMBUS_BLOCK_MAX;
+ /* As per protocol first member of block is size of the block. */
+ for (int i = 0; i <= rx_buf[0]; i++)
+ data->block[i] = rx_buf[i];
+ break;
+ }
+
+ return 0;
+}
+
static const struct i2c_algorithm mchp_corei2c_algo = {
.master_xfer = mchp_corei2c_xfer,
.functionality = mchp_corei2c_func,
+ .smbus_xfer = mchp_corei2c_smbus_xfer,
};
static int mchp_corei2c_probe(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index 280dde53d7f3..8345f7e6385d 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
/* Defines what functionality is present. */
#define MLXBF_I2C_FUNC_SMBUS_BLOCK \
@@ -197,6 +198,7 @@
#define MLXBF_I2C_MASK_8 GENMASK(7, 0)
#define MLXBF_I2C_MASK_16 GENMASK(15, 0)
+#define MLXBF_I2C_MASK_32 GENMASK(31, 0)
#define MLXBF_I2C_MST_ADDR_OFFSET 0x200
@@ -223,7 +225,7 @@
#define MLXBF_I2C_MASTER_ENABLE \
(MLXBF_I2C_MASTER_LOCK_BIT | MLXBF_I2C_MASTER_BUSY_BIT | \
- MLXBF_I2C_MASTER_START_BIT | MLXBF_I2C_MASTER_STOP_BIT)
+ MLXBF_I2C_MASTER_START_BIT)
#define MLXBF_I2C_MASTER_ENABLE_WRITE \
(MLXBF_I2C_MASTER_ENABLE | MLXBF_I2C_MASTER_CTL_WRITE_BIT)
@@ -337,6 +339,7 @@ enum {
MLXBF_I2C_F_SMBUS_BLOCK = BIT(5),
MLXBF_I2C_F_SMBUS_PEC = BIT(6),
MLXBF_I2C_F_SMBUS_PROCESS_CALL = BIT(7),
+ MLXBF_I2C_F_WRITE_WITHOUT_STOP = BIT(8),
};
/* Mellanox BlueField chip type. */
@@ -637,16 +640,19 @@ static void mlxbf_i2c_smbus_read_data(struct mlxbf_i2c_priv *priv,
}
static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave,
- u8 len, u8 block_en, u8 pec_en, bool read)
+ u8 len, u8 block_en, u8 pec_en, bool read,
+ bool stop)
{
- u32 command;
+ u32 command = 0;
/* Set Master GW control word. */
+ if (stop)
+ command |= MLXBF_I2C_MASTER_STOP_BIT;
if (read) {
- command = MLXBF_I2C_MASTER_ENABLE_READ;
+ command |= MLXBF_I2C_MASTER_ENABLE_READ;
command |= rol32(len, MLXBF_I2C_MASTER_READ_SHIFT);
} else {
- command = MLXBF_I2C_MASTER_ENABLE_WRITE;
+ command |= MLXBF_I2C_MASTER_ENABLE_WRITE;
command |= rol32(len, MLXBF_I2C_MASTER_WRITE_SHIFT);
}
command |= rol32(slave, MLXBF_I2C_MASTER_SLV_ADDR_SHIFT);
@@ -681,8 +687,10 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
u8 op_idx, data_idx, data_len, write_len, read_len;
struct mlxbf_i2c_smbus_operation *operation;
u8 read_en, write_en, block_en, pec_en;
- u8 slave, flags, addr;
+ bool stop_after_write = true;
+ u8 slave, addr;
u8 *read_buf;
+ u32 flags;
u32 bits;
int ret;
@@ -754,7 +762,16 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
memcpy(data_desc + data_idx,
operation->buffer, operation->length);
data_idx += operation->length;
+
+ /*
+ * The stop condition can be skipped when writing on the bus
+ * to implement a repeated start condition on the next read
+ * as required for several SMBus and I2C operations.
+ */
+ if (flags & MLXBF_I2C_F_WRITE_WITHOUT_STOP)
+ stop_after_write = false;
}
+
/*
* We assume that read operations are performed only once per
* SMBus transaction. *TBD* protect this statement so it won't
@@ -780,7 +797,7 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
if (write_en) {
ret = mlxbf_i2c_smbus_enable(priv, slave, write_len, block_en,
- pec_en, 0);
+ pec_en, 0, stop_after_write);
if (ret)
goto out_unlock;
}
@@ -790,7 +807,7 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
mlxbf_i2c_smbus_write_data(priv, (const u8 *)&addr, 1,
MLXBF_I2C_MASTER_DATA_DESC_ADDR, true);
ret = mlxbf_i2c_smbus_enable(priv, slave, read_len, block_en,
- pec_en, 1);
+ pec_en, 1, true);
if (!ret) {
/* Get Master GW data descriptor. */
mlxbf_i2c_smbus_read_data(priv, data_desc, read_len + 1,
@@ -896,6 +913,9 @@ mlxbf_i2c_smbus_i2c_block_func(struct mlxbf_i2c_smbus_request *request,
request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0;
request->operation[0].buffer = command;
+ if (read)
+ request->operation[0].flags |= MLXBF_I2C_F_WRITE_WITHOUT_STOP;
+
/*
* As specified in the standard, the max number of bytes to read/write
* per block operation is 32 bytes. In Golan code, the controller can
@@ -1063,7 +1083,7 @@ static u32 mlxbf_i2c_get_ticks(struct mlxbf_i2c_priv *priv, u64 nanoseconds,
* Frequency
*/
frequency = priv->frequency;
- ticks = (nanoseconds * frequency) / MLXBF_I2C_FREQUENCY_1GHZ;
+ ticks = div_u64(nanoseconds * frequency, MLXBF_I2C_FREQUENCY_1GHZ);
/*
* The number of ticks is rounded down and if minimum is equal to 1
* then add one tick.
@@ -1130,7 +1150,8 @@ static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_THIGH_MAX_TBUF);
- timer = timings->timeout;
+ timer = mlxbf_i2c_set_timer(priv, timings->timeout, false,
+ MLXBF_I2C_MASK_32, MLXBF_I2C_SHIFT_0);
writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT);
}
@@ -1140,11 +1161,7 @@ enum mlxbf_i2c_timings_config {
MLXBF_I2C_TIMING_CONFIG_1000KHZ,
};
-/*
- * Note that the mlxbf_i2c_timings->timeout value is not related to the
- * bus frequency, it is impacted by the time it takes the driver to
- * complete data transmission before transaction abort.
- */
+/* Timing values are in nanoseconds */
static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = {
[MLXBF_I2C_TIMING_CONFIG_100KHZ] = {
.scl_high = 4810,
@@ -1159,8 +1176,8 @@ static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = {
.scl_fall = 50,
.hold_data = 300,
.buf = 20000,
- .thigh_max = 5000,
- .timeout = 106500
+ .thigh_max = 50000,
+ .timeout = 35000000
},
[MLXBF_I2C_TIMING_CONFIG_400KHZ] = {
.scl_high = 1011,
@@ -1175,24 +1192,24 @@ static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = {
.scl_fall = 50,
.hold_data = 300,
.buf = 20000,
- .thigh_max = 5000,
- .timeout = 106500
+ .thigh_max = 50000,
+ .timeout = 35000000
},
[MLXBF_I2C_TIMING_CONFIG_1000KHZ] = {
- .scl_high = 600,
- .scl_low = 1300,
+ .scl_high = 383,
+ .scl_low = 460,
.hold_start = 600,
- .setup_start = 600,
- .setup_stop = 600,
- .setup_data = 100,
+ .setup_start = 260,
+ .setup_stop = 260,
+ .setup_data = 50,
.sda_rise = 50,
.sda_fall = 50,
.scl_rise = 50,
.scl_fall = 50,
.hold_data = 300,
- .buf = 20000,
- .thigh_max = 5000,
- .timeout = 106500
+ .buf = 500,
+ .thigh_max = 50000,
+ .timeout = 35000000
}
};
@@ -1443,9 +1460,8 @@ static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_
* and PadFrequency, respectively.
*/
core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f);
- core_frequency /= (++core_r) * (++core_od);
- return core_frequency;
+ return div_u64(core_frequency, (++core_r) * (++core_od));
}
static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
@@ -1474,9 +1490,8 @@ static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_r
* and PadFrequency, respectively.
*/
corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST;
- corepll_frequency /= (++core_r) * (++core_od);
- return corepll_frequency;
+ return div_u64(corepll_frequency, (++core_r) * (++core_od));
}
static int mlxbf_i2c_calculate_corepll_freq(struct platform_device *pdev,
@@ -2038,21 +2053,21 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
read ? &data->byte : &command, read,
pec);
dev_dbg(&adap->dev, "smbus %s byte, slave 0x%02x.\n",
- read ? "read" : "write", addr);
+ str_read_write(read), addr);
break;
case I2C_SMBUS_BYTE_DATA:
mlxbf_i2c_smbus_data_byte_func(&request, &command, &data->byte,
read, pec);
dev_dbg(&adap->dev, "smbus %s byte data at 0x%02x, slave 0x%02x.\n",
- read ? "read" : "write", command, addr);
+ str_read_write(read), command, addr);
break;
case I2C_SMBUS_WORD_DATA:
mlxbf_i2c_smbus_data_word_func(&request, &command,
(u8 *)&data->word, read, pec);
dev_dbg(&adap->dev, "smbus %s word data at 0x%02x, slave 0x%02x.\n",
- read ? "read" : "write", command, addr);
+ str_read_write(read), command, addr);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
@@ -2060,7 +2075,7 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
mlxbf_i2c_smbus_i2c_block_func(&request, &command, data->block,
&byte_cnt, read, pec);
dev_dbg(&adap->dev, "i2c %s block data, %d bytes at 0x%02x, slave 0x%02x.\n",
- read ? "read" : "write", byte_cnt, command, addr);
+ str_read_write(read), byte_cnt, command, addr);
break;
case I2C_SMBUS_BLOCK_DATA:
@@ -2068,7 +2083,7 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
mlxbf_i2c_smbus_block_func(&request, &command, data->block,
&byte_cnt, read, pec);
dev_dbg(&adap->dev, "smbus %s block data, %d bytes at 0x%02x, slave 0x%02x.\n",
- read ? "read" : "write", byte_cnt, command, addr);
+ str_read_write(read), byte_cnt, command, addr);
break;
case I2C_FUNC_SMBUS_PROC_CALL:
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index de713b5747fe..892e2d2988a7 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -1115,14 +1115,10 @@ static void npcm_i2c_master_abort(struct npcm_i2c *bus)
#if IS_ENABLED(CONFIG_I2C_SLAVE)
static u8 npcm_i2c_get_slave_addr(struct npcm_i2c *bus, enum i2c_addr addr_type)
{
- u8 slave_add;
-
if (addr_type > I2C_SLAVE_ADDR2 && addr_type <= I2C_SLAVE_ADDR10)
dev_err(bus->dev, "get slave: try to use more than 2 SA not supported\n");
- slave_add = ioread8(bus->reg + npcm_i2caddr[(int)addr_type]);
-
- return slave_add;
+ return ioread8(bus->reg + npcm_i2caddr[addr_type]);
}
static int npcm_i2c_remove_slave_addr(struct npcm_i2c *bus, u8 slave_add)
@@ -2178,10 +2174,14 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode,
/* Check HW is OK: SDA and SCL should be high at this point. */
if ((npcm_i2c_get_SDA(&bus->adap) == 0) || (npcm_i2c_get_SCL(&bus->adap) == 0)) {
- dev_err(bus->dev, "I2C%d init fail: lines are low\n", bus->num);
- dev_err(bus->dev, "SDA=%d SCL=%d\n", npcm_i2c_get_SDA(&bus->adap),
- npcm_i2c_get_SCL(&bus->adap));
- return -ENXIO;
+ dev_warn(bus->dev, " I2C%d SDA=%d SCL=%d, attempting to recover\n", bus->num,
+ npcm_i2c_get_SDA(&bus->adap), npcm_i2c_get_SCL(&bus->adap));
+ if (npcm_i2c_recovery_tgclk(&bus->adap)) {
+ dev_err(bus->dev, "I2C%d init fail: SDA=%d SCL=%d\n",
+ bus->num, npcm_i2c_get_SDA(&bus->adap),
+ npcm_i2c_get_SCL(&bus->adap));
+ return -ENXIO;
+ }
}
npcm_i2c_int_enable(bus, true);
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index baf6b27f3752..93a49e4637ec 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -135,6 +135,32 @@ static void octeon_i2c_hlc_disable(struct octeon_i2c *i2c)
octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
}
+static void octeon_i2c_block_enable(struct octeon_i2c *i2c)
+{
+ u64 mode;
+
+ if (i2c->block_enabled || !OCTEON_REG_BLOCK_CTL(i2c))
+ return;
+
+ i2c->block_enabled = true;
+ mode = __raw_readq(i2c->twsi_base + OCTEON_REG_MODE(i2c));
+ mode |= TWSX_MODE_BLOCK_MODE;
+ octeon_i2c_writeq_flush(mode, i2c->twsi_base + OCTEON_REG_MODE(i2c));
+}
+
+static void octeon_i2c_block_disable(struct octeon_i2c *i2c)
+{
+ u64 mode;
+
+ if (!i2c->block_enabled || !OCTEON_REG_BLOCK_CTL(i2c))
+ return;
+
+ i2c->block_enabled = false;
+ mode = __raw_readq(i2c->twsi_base + OCTEON_REG_MODE(i2c));
+ mode &= ~TWSX_MODE_BLOCK_MODE;
+ octeon_i2c_writeq_flush(mode, i2c->twsi_base + OCTEON_REG_MODE(i2c));
+}
+
/**
* octeon_i2c_hlc_wait - wait for an HLC operation to complete
* @i2c: The struct octeon_i2c
@@ -281,6 +307,7 @@ static int octeon_i2c_start(struct octeon_i2c *i2c)
u8 stat;
octeon_i2c_hlc_disable(i2c);
+ octeon_i2c_block_disable(i2c);
octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STA);
ret = octeon_i2c_wait(i2c);
@@ -605,6 +632,125 @@ err:
}
/**
+ * octeon_i2c_hlc_block_comp_read - high-level-controller composite block read
+ * @i2c: The struct octeon_i2c
+ * @msgs: msg[0] contains address, place read data into msg[1]
+ *
+ * i2c core command is constructed and written into the SW_TWSI register.
+ * The execution of the command will result in requested data being
+ * placed into a FIFO buffer, ready to be read.
+ * Used in the case where the i2c xfer is for greater than 8 bytes of read data.
+ *
+ * Returns: 0 on success, otherwise a negative errno.
+ */
+static int octeon_i2c_hlc_block_comp_read(struct octeon_i2c *i2c, struct i2c_msg *msgs)
+{
+ int ret;
+ u16 len, i;
+ u64 cmd;
+
+ octeon_i2c_hlc_enable(i2c);
+ octeon_i2c_block_enable(i2c);
+
+ /* Write (size - 1) into block control register */
+ len = msgs[1].len - 1;
+ octeon_i2c_writeq_flush((u64)len, i2c->twsi_base + OCTEON_REG_BLOCK_CTL(i2c));
+
+ /* Prepare core command */
+ cmd = SW_TWSI_V | SW_TWSI_R | SW_TWSI_SOVR | SW_TWSI_OP_7_IA;
+ cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
+
+ /* Send core command */
+ ret = octeon_i2c_hlc_read_cmd(i2c, msgs[0], cmd);
+ if (ret)
+ goto err;
+
+ cmd = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
+ if ((cmd & SW_TWSI_R) == 0) {
+ octeon_i2c_block_disable(i2c);
+ return octeon_i2c_check_status(i2c, false);
+ }
+
+ /* read data in FIFO */
+ octeon_i2c_writeq_flush(TWSX_BLOCK_STS_RESET_PTR,
+ i2c->twsi_base + OCTEON_REG_BLOCK_STS(i2c));
+ for (i = 0; i <= len; i += 8) {
+ /* Byte-swap FIFO data and copy into msg buffer */
+ __be64 rd = cpu_to_be64(__raw_readq(i2c->twsi_base + OCTEON_REG_BLOCK_FIFO(i2c)));
+
+ memcpy(&msgs[1].buf[i], &rd, min(8, msgs[1].len - i));
+ }
+
+err:
+ octeon_i2c_block_disable(i2c);
+ return ret;
+}
+
+/**
+ * octeon_i2c_hlc_block_comp_write - high-level-controller composite block write
+ * @i2c: The struct octeon_i2c
+ * @msgs: msg[0] contains address, msg[1] contains data to be written
+ *
+ * i2c core command is constructed and write data is written into the FIFO buffer.
+ * The execution of the command will result in HW write, using the data in FIFO.
+ * Used in the case where the i2c xfer is for greater than 8 bytes of write data.
+ *
+ * Returns: 0 on success, otherwise a negative errno.
+ */
+static int octeon_i2c_hlc_block_comp_write(struct octeon_i2c *i2c, struct i2c_msg *msgs)
+{
+ bool set_ext;
+ int ret;
+ u16 len, i;
+ u64 cmd, ext = 0;
+
+ octeon_i2c_hlc_enable(i2c);
+ octeon_i2c_block_enable(i2c);
+
+ /* Write (size - 1) into block control register */
+ len = msgs[1].len - 1;
+ octeon_i2c_writeq_flush((u64)len, i2c->twsi_base + OCTEON_REG_BLOCK_CTL(i2c));
+
+ /* Prepare core command */
+ cmd = SW_TWSI_V | SW_TWSI_SOVR | SW_TWSI_OP_7_IA;
+ cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
+
+ /* Set parameters for extended message (if required) */
+ set_ext = octeon_i2c_hlc_ext(i2c, msgs[0], &cmd, &ext);
+
+ /* Write msg into FIFO buffer */
+ octeon_i2c_writeq_flush(TWSX_BLOCK_STS_RESET_PTR,
+ i2c->twsi_base + OCTEON_REG_BLOCK_STS(i2c));
+ for (i = 0; i <= len; i += 8) {
+ __be64 buf = 0;
+
+ /* Copy 8 bytes or remaining bytes from message buffer */
+ memcpy(&buf, &msgs[1].buf[i], min(8, msgs[1].len - i));
+
+ /* Byte-swap message data and write into FIFO */
+ buf = cpu_to_be64(buf);
+ octeon_i2c_writeq_flush((u64)buf, i2c->twsi_base + OCTEON_REG_BLOCK_FIFO(i2c));
+ }
+ if (set_ext)
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + OCTEON_REG_SW_TWSI_EXT(i2c));
+
+ /* Send command to core (send data in FIFO) */
+ ret = octeon_i2c_hlc_cmd_send(i2c, cmd);
+ if (ret)
+ goto err;
+
+ cmd = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c));
+ if ((cmd & SW_TWSI_R) == 0) {
+ octeon_i2c_block_disable(i2c);
+ return octeon_i2c_check_status(i2c, false);
+ }
+
+err:
+ octeon_i2c_block_disable(i2c);
+ return ret;
+}
+
+/**
* octeon_i2c_xfer - The driver's xfer function
* @adap: Pointer to the i2c_adapter structure
* @msgs: Pointer to the messages to be processed
@@ -630,13 +776,21 @@ int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if ((msgs[0].flags & I2C_M_RD) == 0 &&
(msgs[1].flags & I2C_M_RECV_LEN) == 0 &&
msgs[0].len > 0 && msgs[0].len <= 2 &&
- msgs[1].len > 0 && msgs[1].len <= 8 &&
+ msgs[1].len > 0 &&
msgs[0].addr == msgs[1].addr) {
- if (msgs[1].flags & I2C_M_RD)
- ret = octeon_i2c_hlc_comp_read(i2c, msgs);
- else
- ret = octeon_i2c_hlc_comp_write(i2c, msgs);
- goto out;
+ if (msgs[1].len <= 8) {
+ if (msgs[1].flags & I2C_M_RD)
+ ret = octeon_i2c_hlc_comp_read(i2c, msgs);
+ else
+ ret = octeon_i2c_hlc_comp_write(i2c, msgs);
+ goto out;
+ } else if (msgs[1].len <= 1024 && OCTEON_REG_BLOCK_CTL(i2c)) {
+ if (msgs[1].flags & I2C_M_RD)
+ ret = octeon_i2c_hlc_block_comp_read(i2c, msgs);
+ else
+ ret = octeon_i2c_hlc_block_comp_write(i2c, msgs);
+ goto out;
+ }
}
}
}
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
index b265e21189a1..32a44f2d6274 100644
--- a/drivers/i2c/busses/i2c-octeon-core.h
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -96,18 +96,28 @@ struct octeon_i2c_reg_offset {
unsigned int twsi_int;
unsigned int sw_twsi_ext;
unsigned int mode;
+ unsigned int block_ctl;
+ unsigned int block_sts;
+ unsigned int block_fifo;
};
#define OCTEON_REG_SW_TWSI(x) ((x)->roff.sw_twsi)
#define OCTEON_REG_TWSI_INT(x) ((x)->roff.twsi_int)
#define OCTEON_REG_SW_TWSI_EXT(x) ((x)->roff.sw_twsi_ext)
#define OCTEON_REG_MODE(x) ((x)->roff.mode)
+#define OCTEON_REG_BLOCK_CTL(x) ((x)->roff.block_ctl)
+#define OCTEON_REG_BLOCK_STS(x) ((x)->roff.block_sts)
+#define OCTEON_REG_BLOCK_FIFO(x) ((x)->roff.block_fifo)
-/* Set REFCLK_SRC and HS_MODE in TWSX_MODE register */
+/* TWSX_MODE register */
#define TWSX_MODE_REFCLK_SRC BIT(4)
+#define TWSX_MODE_BLOCK_MODE BIT(2)
#define TWSX_MODE_HS_MODE BIT(0)
#define TWSX_MODE_HS_MASK (TWSX_MODE_REFCLK_SRC | TWSX_MODE_HS_MODE)
+/* TWSX_BLOCK_STS register */
+#define TWSX_BLOCK_STS_RESET_PTR BIT(0)
+
/* Set BUS_MON_RST to reset bus monitor */
#define BUS_MON_RST_MASK BIT(3)
@@ -123,6 +133,7 @@ struct octeon_i2c {
void __iomem *twsi_base;
struct device *dev;
bool hlc_enabled;
+ bool block_enabled;
bool broken_irq_mode;
bool broken_irq_check;
void (*int_enable)(struct octeon_i2c *);
diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
index bd128ab2e2eb..f4eca44ed183 100644
--- a/drivers/i2c/busses/i2c-pasemi-core.c
+++ b/drivers/i2c/busses/i2c-pasemi-core.c
@@ -5,22 +5,24 @@
* SMBus host driver for PA Semi PWRficient
*/
-#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/stddef.h>
#include <linux/sched.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/io.h>
+#include <linux/stddef.h>
#include "i2c-pasemi-core.h"
/* Register offsets */
#define REG_MTXFIFO 0x00
#define REG_MRXFIFO 0x04
+#define REG_XFSTA 0x0c
#define REG_SMSTA 0x14
#define REG_IMASK 0x18
#define REG_CTL 0x1c
@@ -52,6 +54,12 @@
#define CTL_UJM BIT(8)
#define CTL_CLK_M GENMASK(7, 0)
+/*
+ * The hardware (supposedly) has a 25ms timeout for clock stretching, thus
+ * use 100ms here which should be plenty.
+ */
+#define PASEMI_TRANSFER_TIMEOUT_MS 100
+
static inline void reg_write(struct pasemi_smbus *smbus, int reg, int val)
{
dev_dbg(smbus->dev, "smbus write reg %x val %08x\n", reg, val);
@@ -71,7 +79,7 @@ static inline int reg_read(struct pasemi_smbus *smbus, int reg)
static void pasemi_reset(struct pasemi_smbus *smbus)
{
- u32 val = (CTL_MTR | CTL_MRR | (smbus->clk_div & CTL_CLK_M));
+ u32 val = (CTL_MTR | CTL_MRR | CTL_UJM | (smbus->clk_div & CTL_CLK_M));
if (smbus->hw_rev >= 6)
val |= CTL_EN;
@@ -80,43 +88,102 @@ static void pasemi_reset(struct pasemi_smbus *smbus)
reinit_completion(&smbus->irq_completion);
}
-static void pasemi_smb_clear(struct pasemi_smbus *smbus)
+static int pasemi_smb_clear(struct pasemi_smbus *smbus)
{
unsigned int status;
+ int ret;
+
+ /* First wait for the bus to go idle */
+ ret = readx_poll_timeout(ioread32, smbus->ioaddr + REG_SMSTA,
+ status, !(status & (SMSTA_XIP | SMSTA_JAM)),
+ USEC_PER_MSEC,
+ USEC_PER_MSEC * PASEMI_TRANSFER_TIMEOUT_MS);
+
+ if (ret < 0) {
+ dev_err(smbus->dev, "Bus is still stuck (status 0x%08x xfstatus 0x%08x)\n",
+ status, reg_read(smbus, REG_XFSTA));
+ return -EIO;
+ }
+
+ /* If any badness happened or there is data in the FIFOs, reset the FIFOs */
+ if ((status & (SMSTA_MRNE | SMSTA_JMD | SMSTA_MTO | SMSTA_TOM | SMSTA_MTN | SMSTA_MTA)) ||
+ !(status & SMSTA_MTE)) {
+ dev_warn(smbus->dev, "Issuing reset due to status 0x%08x (xfstatus 0x%08x)\n",
+ status, reg_read(smbus, REG_XFSTA));
+ pasemi_reset(smbus);
+ }
- status = reg_read(smbus, REG_SMSTA);
+ /* Clear the flags */
reg_write(smbus, REG_SMSTA, status);
+
+ return 0;
}
static int pasemi_smb_waitready(struct pasemi_smbus *smbus)
{
- int timeout = 100;
unsigned int status;
if (smbus->use_irq) {
reinit_completion(&smbus->irq_completion);
reg_write(smbus, REG_IMASK, SMSTA_XEN | SMSTA_MTN);
- wait_for_completion_timeout(&smbus->irq_completion, msecs_to_jiffies(100));
+ int ret = wait_for_completion_timeout(
+ &smbus->irq_completion,
+ msecs_to_jiffies(PASEMI_TRANSFER_TIMEOUT_MS));
reg_write(smbus, REG_IMASK, 0);
status = reg_read(smbus, REG_SMSTA);
+
+ if (ret < 0) {
+ dev_err(smbus->dev,
+ "Completion wait failed with %d, status 0x%08x\n",
+ ret, status);
+ return ret;
+ } else if (ret == 0) {
+ dev_err(smbus->dev, "Timeout, status 0x%08x\n", status);
+ return -ETIME;
+ }
} else {
- status = reg_read(smbus, REG_SMSTA);
- while (!(status & SMSTA_XEN) && timeout--) {
- msleep(1);
- status = reg_read(smbus, REG_SMSTA);
+ int ret = readx_poll_timeout(
+ ioread32, smbus->ioaddr + REG_SMSTA,
+ status, status & SMSTA_XEN,
+ USEC_PER_MSEC,
+ USEC_PER_MSEC * PASEMI_TRANSFER_TIMEOUT_MS);
+
+ if (ret < 0) {
+ dev_err(smbus->dev, "Timeout, status 0x%08x\n", status);
+ return -ETIME;
}
}
- /* Got NACK? */
- if (status & SMSTA_MTN)
- return -ENXIO;
+ /* Controller timeout? */
+ if (status & SMSTA_TOM) {
+ dev_err(smbus->dev, "Controller timeout, status 0x%08x\n", status);
+ return -EIO;
+ }
- if (timeout < 0) {
- dev_warn(smbus->dev, "Timeout, status 0x%08x\n", status);
- reg_write(smbus, REG_SMSTA, status);
+ /* Peripheral timeout? */
+ if (status & SMSTA_MTO) {
+ dev_err(smbus->dev, "Peripheral timeout, status 0x%08x\n", status);
return -ETIME;
}
+ /* Still stuck in a transaction? */
+ if (status & SMSTA_XIP) {
+ dev_err(smbus->dev, "Bus stuck, status 0x%08x\n", status);
+ return -EIO;
+ }
+
+ /* Arbitration loss? */
+ if (status & SMSTA_MTA) {
+ dev_err(smbus->dev, "Arbitration loss, status 0x%08x\n", status);
+ return -EBUSY;
+ }
+
+ /* Got NACK? */
+ if (status & SMSTA_MTN) {
+ dev_err(smbus->dev, "NACK, status 0x%08x\n", status);
+ return -ENXIO;
+ }
+
/* Clear XEN */
reg_write(smbus, REG_SMSTA, SMSTA_XEN);
@@ -177,9 +244,9 @@ static int pasemi_i2c_xfer(struct i2c_adapter *adapter,
struct pasemi_smbus *smbus = adapter->algo_data;
int ret, i;
- pasemi_smb_clear(smbus);
-
- ret = 0;
+ ret = pasemi_smb_clear(smbus);
+ if (ret)
+ return ret;
for (i = 0; i < num && !ret; i++)
ret = pasemi_i2c_xfer_msg(adapter, &msgs[i], (i == (num - 1)));
@@ -200,7 +267,9 @@ static int pasemi_smb_xfer(struct i2c_adapter *adapter,
addr <<= 1;
read_flag = read_write == I2C_SMBUS_READ;
- pasemi_smb_clear(smbus);
+ err = pasemi_smb_clear(smbus);
+ if (err)
+ return err;
switch (size) {
case I2C_SMBUS_QUICK:
diff --git a/drivers/i2c/busses/i2c-pasemi-pci.c b/drivers/i2c/busses/i2c-pasemi-pci.c
index 77f90c7436ed..b9ccb54ec77e 100644
--- a/drivers/i2c/busses/i2c-pasemi-pci.c
+++ b/drivers/i2c/busses/i2c-pasemi-pci.c
@@ -5,15 +5,15 @@
* SMBus host driver for PA Semi PWRficient
*/
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/stddef.h>
#include <linux/sched.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/io.h>
+#include <linux/stddef.h>
#include "i2c-pasemi-core.h"
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index dd75916157f0..9d3a4dc2bd60 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -34,6 +34,7 @@
#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/io.h>
+#include <asm/amd/fch.h>
#include "i2c-piix4.h"
@@ -80,12 +81,11 @@
#define SB800_PIIX4_PORT_IDX_MASK 0x06
#define SB800_PIIX4_PORT_IDX_SHIFT 1
-/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
-#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
-#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
+/* SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+#define SB800_PIIX4_PORT_IDX_KERNCZ (FCH_PM_DECODEEN + 0x02)
+#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ (FCH_PM_DECODEEN_SMBUS0SEL >> 16)
#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
-#define SB800_PIIX4_FCH_PM_ADDR 0xFED80300
#define SB800_PIIX4_FCH_PM_SIZE 8
#define SB800_ASF_ACPI_PATH "\\_SB.ASFC"
@@ -162,19 +162,19 @@ int piix4_sb800_region_request(struct device *dev, struct sb800_mmio_cfg *mmio_c
if (mmio_cfg->use_mmio) {
void __iomem *addr;
- if (!request_mem_region_muxed(SB800_PIIX4_FCH_PM_ADDR,
+ if (!request_mem_region_muxed(FCH_PM_BASE,
SB800_PIIX4_FCH_PM_SIZE,
"sb800_piix4_smb")) {
dev_err(dev,
"SMBus base address memory region 0x%x already in use.\n",
- SB800_PIIX4_FCH_PM_ADDR);
+ FCH_PM_BASE);
return -EBUSY;
}
- addr = ioremap(SB800_PIIX4_FCH_PM_ADDR,
+ addr = ioremap(FCH_PM_BASE,
SB800_PIIX4_FCH_PM_SIZE);
if (!addr) {
- release_mem_region(SB800_PIIX4_FCH_PM_ADDR,
+ release_mem_region(FCH_PM_BASE,
SB800_PIIX4_FCH_PM_SIZE);
dev_err(dev, "SMBus base address mapping failed.\n");
return -ENOMEM;
@@ -201,7 +201,7 @@ void piix4_sb800_region_release(struct device *dev, struct sb800_mmio_cfg *mmio_
{
if (mmio_cfg->use_mmio) {
iounmap(mmio_cfg->addr);
- release_mem_region(SB800_PIIX4_FCH_PM_ADDR,
+ release_mem_region(FCH_PM_BASE,
SB800_PIIX4_FCH_PM_SIZE);
return;
}
@@ -971,7 +971,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
* This would allow the ee1004 to be probed incorrectly.
*/
if (port == 0)
- i2c_register_spd(adap);
+ i2c_register_spd_write_enable(adap);
*padap = adap;
return 0;
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 9a867c817db0..f99a2cc721a8 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -349,7 +349,7 @@ static void i2c_powermac_register_devices(struct i2c_adapter *adap,
/* Fill out the rest of the info structure */
info.addr = addr;
info.irq = irq_of_parse_and_map(node, 0);
- info.of_node = of_node_get(node);
+ info.fwnode = of_fwnode_handle(of_node_get(node));
newdev = i2c_new_client_device(adap, &info);
if (IS_ERR(newdev)) {
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 515a784c951c..ccea575fb783 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -71,7 +71,6 @@ enum geni_i2c_err_code {
<< 5)
#define I2C_AUTO_SUSPEND_DELAY 250
-#define KHZ(freq) (1000 * freq)
#define PACKING_BYTES_PW 4
#define ABORT_TIMEOUT HZ
@@ -148,18 +147,18 @@ struct geni_i2c_clk_fld {
* source_clock = 19.2 MHz
*/
static const struct geni_i2c_clk_fld geni_i2c_clk_map_19p2mhz[] = {
- {KHZ(100), 7, 10, 12, 26},
- {KHZ(400), 2, 5, 11, 22},
- {KHZ(1000), 1, 2, 8, 18},
- {},
+ { I2C_MAX_STANDARD_MODE_FREQ, 7, 10, 12, 26 },
+ { I2C_MAX_FAST_MODE_FREQ, 2, 5, 11, 22 },
+ { I2C_MAX_FAST_MODE_PLUS_FREQ, 1, 2, 8, 18 },
+ {}
};
/* source_clock = 32 MHz */
static const struct geni_i2c_clk_fld geni_i2c_clk_map_32mhz[] = {
- {KHZ(100), 8, 14, 18, 40},
- {KHZ(400), 4, 3, 11, 20},
- {KHZ(1000), 2, 3, 6, 15},
- {},
+ { I2C_MAX_STANDARD_MODE_FREQ, 8, 14, 18, 40 },
+ { I2C_MAX_FAST_MODE_FREQ, 4, 3, 11, 20 },
+ { I2C_MAX_FAST_MODE_PLUS_FREQ, 2, 3, 6, 15 },
+ {}
};
static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c)
@@ -812,7 +811,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
&gi2c->clk_freq_out);
if (ret) {
dev_info(dev, "Bus frequency not specified, default to 100kHz.\n");
- gi2c->clk_freq_out = KHZ(100);
+ gi2c->clk_freq_out = I2C_MAX_STANDARD_MODE_FREQ;
}
if (has_acpi_companion(dev))
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index d7dddd6c296a..23375f7fe3ad 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -52,6 +52,8 @@
#define ICCR1_ICE BIT(7)
#define ICCR1_IICRST BIT(6)
#define ICCR1_SOWP BIT(4)
+#define ICCR1_SCLO BIT(3)
+#define ICCR1_SDAO BIT(2)
#define ICCR1_SCLI BIT(1)
#define ICCR1_SDAI BIT(0)
@@ -151,11 +153,11 @@ static int riic_bus_barrier(struct riic_dev *riic)
ret = readb_poll_timeout(riic->base + riic->info->regs[RIIC_ICCR2], val,
!(val & ICCR2_BBSY), 10, riic->adapter.timeout);
if (ret)
- return ret;
+ return i2c_recover_bus(&riic->adapter);
if ((riic_readb(riic, RIIC_ICCR1) & (ICCR1_SDAI | ICCR1_SCLI)) !=
(ICCR1_SDAI | ICCR1_SCLI))
- return -EBUSY;
+ return i2c_recover_bus(&riic->adapter);
return 0;
}
@@ -439,6 +441,52 @@ static int riic_init_hw(struct riic_dev *riic)
return 0;
}
+static int riic_get_scl(struct i2c_adapter *adap)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+
+ return !!(riic_readb(riic, RIIC_ICCR1) & ICCR1_SCLI);
+}
+
+static int riic_get_sda(struct i2c_adapter *adap)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+
+ return !!(riic_readb(riic, RIIC_ICCR1) & ICCR1_SDAI);
+}
+
+static void riic_set_scl(struct i2c_adapter *adap, int val)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+
+ if (val)
+ riic_clear_set_bit(riic, ICCR1_SOWP, ICCR1_SCLO, RIIC_ICCR1);
+ else
+ riic_clear_set_bit(riic, ICCR1_SOWP | ICCR1_SCLO, 0, RIIC_ICCR1);
+
+ riic_clear_set_bit(riic, 0, ICCR1_SOWP, RIIC_ICCR1);
+}
+
+static void riic_set_sda(struct i2c_adapter *adap, int val)
+{
+ struct riic_dev *riic = i2c_get_adapdata(adap);
+
+ if (val)
+ riic_clear_set_bit(riic, ICCR1_SOWP, ICCR1_SDAO, RIIC_ICCR1);
+ else
+ riic_clear_set_bit(riic, ICCR1_SOWP | ICCR1_SDAO, 0, RIIC_ICCR1);
+
+ riic_clear_set_bit(riic, 0, ICCR1_SOWP, RIIC_ICCR1);
+}
+
+static struct i2c_bus_recovery_info riic_bri = {
+ .recover_bus = i2c_generic_scl_recovery,
+ .get_scl = riic_get_scl,
+ .set_scl = riic_set_scl,
+ .get_sda = riic_get_sda,
+ .set_sda = riic_set_sda,
+};
+
static const struct riic_irq_desc riic_irqs[] = {
{ .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" },
{ .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" },
@@ -495,6 +543,7 @@ static int riic_i2c_probe(struct platform_device *pdev)
adap->algo = &riic_algo;
adap->dev.parent = dev;
adap->dev.of_node = dev->of_node;
+ adap->bus_recovery_info = &riic_bri;
init_completion(&riic->msg_done);
diff --git a/drivers/i2c/busses/i2c-rzv2m.c b/drivers/i2c/busses/i2c-rzv2m.c
index 53762cc56d28..b0e9c0b62429 100644
--- a/drivers/i2c/busses/i2c-rzv2m.c
+++ b/drivers/i2c/busses/i2c-rzv2m.c
@@ -402,7 +402,7 @@ static const struct i2c_adapter_quirks rzv2m_i2c_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN,
};
-static struct i2c_algorithm rzv2m_i2c_algo = {
+static const struct i2c_algorithm rzv2m_i2c_algo = {
.xfer = rzv2m_i2c_xfer,
.functionality = rzv2m_i2c_func,
};
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index efe29621b8d7..adfcee6c9fdc 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
/* Transmit operation: */
/* */
@@ -409,7 +410,7 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
pd->sr |= sr; /* remember state */
dev_dbg(pd->dev, "i2c_isr 0x%02x 0x%02x %s %d %d!\n", sr, pd->sr,
- (pd->msg->flags & I2C_M_RD) ? "read" : "write",
+ str_read_write(pd->msg->flags & I2C_M_RD),
pd->pos, pd->msg->len);
/* Kick off TxDMA after preface was done */
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 87976e99e6d0..049b4d154c23 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1395,6 +1395,11 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], MSG_END_CONTINUE);
if (ret)
break;
+
+ /* Validate message length before proceeding */
+ if (msgs[i].buf[0] == 0 || msgs[i].buf[0] > I2C_SMBUS_BLOCK_MAX)
+ break;
+
/* Set the msg length from first byte */
msgs[i].len += msgs[i].buf[0];
dev_dbg(i2c_dev->dev, "reading %d bytes\n", msgs[i].len);
diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
index 143d012fa43e..3959f23fc440 100644
--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
+++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
@@ -168,6 +168,9 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
i2c->roff.twsi_int = 0x1010;
i2c->roff.sw_twsi_ext = 0x1018;
i2c->roff.mode = 0x1038;
+ i2c->roff.block_ctl = 0x1048;
+ i2c->roff.block_sts = 0x1050;
+ i2c->roff.block_fifo = 0x1058;
i2c->dev = dev;
pci_set_drvdata(pdev, i2c);
@@ -175,7 +178,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
if (ret)
return ret;
- ret = pci_request_regions(pdev, DRV_NAME);
+ ret = pcim_request_all_regions(pdev, DRV_NAME);
if (ret)
return ret;
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index 0f2ed181b266..a18eab0992a1 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
/* include interfaces to usb layer */
@@ -71,7 +72,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
dev_dbg(&adapter->dev,
" %d: %s (flags %d) %d bytes to 0x%02x\n",
- i, pmsg->flags & I2C_M_RD ? "read" : "write",
+ i, str_read_write(pmsg->flags & I2C_M_RD),
pmsg->flags, pmsg->len, pmsg->addr);
/* and directly send the message */
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index d877f5a1f579..ca0358e8f928 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -532,22 +532,16 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed))
bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
- if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) {
- dev_err(dev, "invalid clock-frequency %d\n", bus_speed);
- return -EINVAL;
- }
+ if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ)
+ return dev_err_probe(dev, -EINVAL, "invalid clock-frequency %d\n", bus_speed);
priv->clk = devm_clk_get_enabled(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to enable clock\n");
- return PTR_ERR(priv->clk);
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "failed to enable clock\n");
clk_rate = clk_get_rate(priv->clk);
- if (!clk_rate) {
- dev_err(dev, "input clock rate should not be zero\n");
- return -EINVAL;
- }
+ if (!clk_rate)
+ return dev_err_probe(dev, -EINVAL, "input clock rate should not be zero\n");
priv->clk_cycle = clk_rate / bus_speed;
init_completion(&priv->comp);
@@ -565,10 +559,8 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, uniphier_fi2c_interrupt, 0,
pdev->name, priv);
- if (ret) {
- dev_err(dev, "failed to request irq %d\n", irq);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq %d\n", irq);
return i2c_add_adapter(&priv->adap);
}
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index b95d50d4d7db..9d49a3d5d612 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -327,22 +327,16 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed))
bus_speed = I2C_MAX_STANDARD_MODE_FREQ;
- if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) {
- dev_err(dev, "invalid clock-frequency %d\n", bus_speed);
- return -EINVAL;
- }
+ if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ)
+ return dev_err_probe(dev, -EINVAL, "invalid clock-frequency %d\n", bus_speed);
priv->clk = devm_clk_get_enabled(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to enable clock\n");
- return PTR_ERR(priv->clk);
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "failed to enable clock\n");
clk_rate = clk_get_rate(priv->clk);
- if (!clk_rate) {
- dev_err(dev, "input clock rate should not be zero\n");
- return -EINVAL;
- }
+ if (!clk_rate)
+ return dev_err_probe(dev, -EINVAL, "input clock rate should not be zero\n");
priv->clk_cycle = clk_rate / bus_speed;
init_completion(&priv->comp);
@@ -359,10 +353,8 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, uniphier_i2c_interrupt, 0, pdev->name,
priv);
- if (ret) {
- dev_err(dev, "failed to request irq %d\n", irq);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq %d\n", irq);
return i2c_add_adapter(&priv->adap);
}
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 7ed29992a97f..2c26a57883f2 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -89,10 +89,9 @@ static int vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id)
u8 rev;
int res;
- if (pm_io_base) {
- dev_err(&dev->dev, "i2c-via: Will only support one host\n");
- return -ENODEV;
- }
+ if (pm_io_base)
+ return dev_err_probe(&dev->dev, -ENODEV,
+ "Will only support one host\n");
pci_read_config_byte(dev, PM_CFG_REVID, &rev);
@@ -113,10 +112,10 @@ static int vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id)
pci_read_config_word(dev, base, &pm_io_base);
pm_io_base &= (0xff << 8);
- if (!request_region(I2C_DIR, IOSPACE, vt586b_driver.name)) {
- dev_err(&dev->dev, "IO 0x%x-0x%x already in use\n", I2C_DIR, I2C_DIR + IOSPACE);
- return -ENODEV;
- }
+ if (!request_region(I2C_DIR, IOSPACE, vt586b_driver.name))
+ return dev_err_probe(&dev->dev, -ENODEV,
+ "IO 0x%x-0x%x already in use\n",
+ I2C_DIR, I2C_DIR + IOSPACE);
outb(inb(I2C_DIR) & ~(I2C_SDA | I2C_SCL), I2C_DIR);
outb(inb(I2C_OUT) & ~(I2C_SDA | I2C_SCL), I2C_OUT);
diff --git a/drivers/i2c/busses/i2c-viai2c-wmt.c b/drivers/i2c/busses/i2c-viai2c-wmt.c
index 4eb740faf268..2cf3cc0165fb 100644
--- a/drivers/i2c/busses/i2c-viai2c-wmt.c
+++ b/drivers/i2c/busses/i2c-viai2c-wmt.c
@@ -44,16 +44,13 @@ static int wmt_i2c_reset_hardware(struct viai2c *i2c)
int err;
err = clk_prepare_enable(i2c->clk);
- if (err) {
- dev_err(i2c->dev, "failed to enable clock\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(i2c->dev, err, "failed to enable clock\n");
err = clk_set_rate(i2c->clk, 20000000);
if (err) {
- dev_err(i2c->dev, "failed to set clock = 20Mhz\n");
clk_disable_unprepare(i2c->clk);
- return err;
+ return dev_err_probe(i2c->dev, err, "failed to set clock = 20Mhz\n");
}
writew(0, i2c->base + VIAI2C_REG_CR);
@@ -121,10 +118,9 @@ static int wmt_i2c_probe(struct platform_device *pdev)
"failed to request irq %i\n", i2c->irq);
i2c->clk = of_clk_get(np, 0);
- if (IS_ERR(i2c->clk)) {
- dev_err(&pdev->dev, "unable to request clock\n");
- return PTR_ERR(i2c->clk);
- }
+ if (IS_ERR(i2c->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c->clk),
+ "unable to request clock\n");
err = of_property_read_u32(np, "clock-frequency", &clk_rate);
if (!err && clk_rate == I2C_MAX_FAST_MODE_FREQ)
@@ -139,10 +135,8 @@ static int wmt_i2c_probe(struct platform_device *pdev)
adap->dev.of_node = pdev->dev.of_node;
err = wmt_i2c_reset_hardware(i2c);
- if (err) {
- dev_err(&pdev->dev, "error initializing hardware\n");
+ if (err)
return err;
- }
err = i2c_add_adapter(adap);
if (err)
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 2cc7bba3b8bf..c58843609107 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -330,30 +330,27 @@ static int vt596_probe(struct pci_dev *pdev,
SMBHSTCFG = 0x84;
} else {
/* no matches at all */
- dev_err(&pdev->dev, "Cannot configure "
- "SMBus I/O Base address\n");
- return -ENODEV;
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "Cannot configure "
+ "SMBus I/O Base address\n");
}
}
vt596_smba &= 0xfff0;
- if (vt596_smba == 0) {
- dev_err(&pdev->dev, "SMBus base address "
- "uninitialized - upgrade BIOS or use "
- "force_addr=0xaddr\n");
- return -ENODEV;
- }
+ if (vt596_smba == 0)
+ return dev_err_probe(&pdev->dev, -ENODEV, "SMBus base address "
+ "uninitialized - upgrade BIOS or use "
+ "force_addr=0xaddr\n");
found:
error = acpi_check_region(vt596_smba, 8, vt596_driver.name);
if (error)
return -ENODEV;
- if (!request_region(vt596_smba, 8, vt596_driver.name)) {
- dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n",
- vt596_smba);
- return -ENODEV;
- }
+ if (!request_region(vt596_smba, 8, vt596_driver.name))
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "SMBus region 0x%x already in use!\n",
+ vt596_smba);
pci_read_config_byte(pdev, SMBHSTCFG, &temp);
/* If force_addr is set, we program the new address here. Just to make
@@ -375,10 +372,10 @@ found:
pci_write_config_byte(pdev, SMBHSTCFG, temp | 0x01);
dev_info(&pdev->dev, "Enabling SMBus device\n");
} else {
- dev_err(&pdev->dev, "SMBUS: Error: Host SMBus "
- "controller not enabled! - upgrade BIOS or "
- "use force=1\n");
- error = -ENODEV;
+ error = dev_err_probe(&pdev->dev, -ENODEV,
+ "SMBUS: Error: Host SMBus "
+ "controller not enabled! - "
+ "upgrade BIOS or use force=1\n");
goto release_region;
}
}
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index 503e2f4d6f84..1bd602852e35 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
@@ -278,7 +279,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
dev_dbg(&i2c->dev,
" %d: %s (flags %d) %d bytes to 0x%02x\n",
- i, pmsg->flags & I2C_M_RD ? "read" : "write",
+ i, str_read_write(pmsg->flags & I2C_M_RD),
pmsg->flags, pmsg->len, pmsg->addr);
mutex_lock(&vb->lock);
@@ -384,15 +385,13 @@ static int vprbrd_i2c_probe(struct platform_device *pdev)
VPRBRD_USB_REQUEST_I2C_FREQ, VPRBRD_USB_TYPE_OUT,
0x0000, 0x0000, &vb_i2c->bus_freq_param, 1,
VPRBRD_USB_TIMEOUT_MS);
- if (ret != 1) {
- dev_err(&pdev->dev, "failure setting i2c_bus_freq to %d\n",
- i2c_bus_freq);
- return -EIO;
- }
+ if (ret != 1)
+ return dev_err_probe(&pdev->dev, -EIO,
+ "failure setting i2c_bus_freq to %d\n",
+ i2c_bus_freq);
} else {
- dev_err(&pdev->dev,
- "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq);
- return -EIO;
+ return dev_err_probe(&pdev->dev, -EIO,
+ "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq);
}
vb_i2c->i2c.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
index 2a351f961b89..9b05ff53d3d7 100644
--- a/drivers/i2c/busses/i2c-virtio.c
+++ b/drivers/i2c/busses/i2c-virtio.c
@@ -192,10 +192,9 @@ static int virtio_i2c_probe(struct virtio_device *vdev)
struct virtio_i2c *vi;
int ret;
- if (!virtio_has_feature(vdev, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST)) {
- dev_err(&vdev->dev, "Zero-length request feature is mandatory\n");
- return -EINVAL;
- }
+ if (!virtio_has_feature(vdev, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST))
+ return dev_err_probe(&vdev->dev, -EINVAL,
+ "Zero-length request feature is mandatory\n");
vi = devm_kzalloc(&vdev->dev, sizeof(*vi), GFP_KERNEL);
if (!vi)
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 663fe5604dd6..b29dec66b2c3 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -101,8 +101,6 @@ struct slimpro_i2c_dev {
struct completion rd_complete;
u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
u32 *resp_msg;
- phys_addr_t comm_base_addr;
- void *pcc_comm_addr;
};
#define to_slimpro_i2c_dev(cl) \
@@ -148,7 +146,8 @@ static void slimpro_i2c_rx_cb(struct mbox_client *cl, void *mssg)
static void slimpro_i2c_pcc_rx_cb(struct mbox_client *cl, void *msg)
{
struct slimpro_i2c_dev *ctx = to_slimpro_i2c_dev(cl);
- struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ ctx->pcc_chan->shmem;
/* Check if platform sends interrupt */
if (!xgene_word_tst_and_clr(&generic_comm_base->status,
@@ -169,7 +168,8 @@ static void slimpro_i2c_pcc_rx_cb(struct mbox_client *cl, void *msg)
static void slimpro_i2c_pcc_tx_prepare(struct slimpro_i2c_dev *ctx, u32 *msg)
{
- struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ ctx->pcc_chan->shmem;
u32 *ptr = (void *)(generic_comm_base + 1);
u16 status;
int i;
@@ -457,22 +457,18 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
cl->tx_block = true;
cl->rx_callback = slimpro_i2c_rx_cb;
ctx->mbox_chan = mbox_request_channel(cl, MAILBOX_I2C_INDEX);
- if (IS_ERR(ctx->mbox_chan)) {
- dev_err(&pdev->dev, "i2c mailbox channel request failed\n");
- return PTR_ERR(ctx->mbox_chan);
- }
+ if (IS_ERR(ctx->mbox_chan))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ctx->mbox_chan),
+ "i2c mailbox channel request failed\n");
} else {
struct pcc_mbox_chan *pcc_chan;
const struct acpi_device_id *acpi_id;
- int version = XGENE_SLIMPRO_I2C_V1;
acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
&pdev->dev);
if (!acpi_id)
return -EINVAL;
- version = (int)acpi_id->driver_data;
-
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx))
ctx->mbox_idx = MAILBOX_I2C_INDEX;
@@ -480,48 +476,19 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
cl->tx_block = false;
cl->rx_callback = slimpro_i2c_pcc_rx_cb;
pcc_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
- if (IS_ERR(pcc_chan)) {
- dev_err(&pdev->dev, "PCC mailbox channel request failed\n");
- return PTR_ERR(pcc_chan);
- }
+ if (IS_ERR(pcc_chan))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pcc_chan),
+ "PCC mailbox channel request failed\n");
ctx->pcc_chan = pcc_chan;
ctx->mbox_chan = pcc_chan->mchan;
if (!ctx->mbox_chan->mbox->txdone_irq) {
- dev_err(&pdev->dev, "PCC IRQ not supported\n");
- rc = -ENOENT;
+ rc = dev_err_probe(&pdev->dev, -ENOENT,
+ "PCC IRQ not supported\n");
goto mbox_err;
}
- /*
- * This is the shared communication region
- * for the OS and Platform to communicate over.
- */
- ctx->comm_base_addr = pcc_chan->shmem_base_addr;
- if (ctx->comm_base_addr) {
- if (version == XGENE_SLIMPRO_I2C_V2)
- ctx->pcc_comm_addr = memremap(
- ctx->comm_base_addr,
- pcc_chan->shmem_size,
- MEMREMAP_WT);
- else
- ctx->pcc_comm_addr = memremap(
- ctx->comm_base_addr,
- pcc_chan->shmem_size,
- MEMREMAP_WB);
- } else {
- dev_err(&pdev->dev, "Failed to get PCC comm region\n");
- rc = -ENOENT;
- goto mbox_err;
- }
-
- if (!ctx->pcc_comm_addr) {
- dev_err(&pdev->dev,
- "Failed to ioremap PCC comm region\n");
- rc = -ENOMEM;
- goto mbox_err;
- }
}
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index dc1e46d834dc..6bc1575cea6c 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -1489,7 +1489,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
pdev->name, i2c);
if (ret < 0) {
- dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ dev_err_probe(&pdev->dev, ret, "Cannot claim IRQ\n");
goto err_pm_disable;
}
@@ -1510,7 +1510,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
ret = xiic_reinit(i2c);
if (ret < 0) {
- dev_err(&pdev->dev, "Cannot xiic_reinit\n");
+ dev_err_probe(&pdev->dev, ret, "Cannot xiic_reinit\n");
goto err_pm_disable;
}
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 4d6abd7e92ce..06cf221557f2 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -500,10 +500,8 @@ static int scx200_probe(struct platform_device *pdev)
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!res) {
- dev_err(&pdev->dev, "can't fetch device resource info\n");
- return -ENODEV;
- }
+ if (!res)
+ return dev_err_probe(&pdev->dev, -ENODEV, "can't fetch device resource info\n");
iface = scx200_create_dev("CS5535", res->start, 0, &pdev->dev);
if (!iface)
diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c
index 783fb8df2ebe..be7d6d41e0b2 100644
--- a/drivers/i2c/i2c-atr.c
+++ b/drivers/i2c/i2c-atr.c
@@ -16,32 +16,65 @@
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/lockdep.h>
#define ATR_MAX_ADAPTERS 100 /* Just a sanity limit */
#define ATR_MAX_SYMLINK_LEN 11 /* Longest name is 10 chars: "channel-99" */
/**
- * struct i2c_atr_alias_pair - Holds the alias assigned to a client.
+ * struct i2c_atr_alias_pair - Holds the alias assigned to a client address.
* @node: List node
- * @client: Pointer to the client on the child bus
+ * @addr: Address of the client on the child bus.
* @alias: I2C alias address assigned by the driver.
* This is the address that will be used to issue I2C transactions
* on the parent (physical) bus.
+ * @fixed: Alias pair cannot be replaced during dynamic address attachment.
+ * This flag is necessary for situations where a single I2C transaction
+ * contains more distinct target addresses than the ATR channel can handle.
+ * It marks addresses that have already been attached to an alias so
+ * that their alias pair is not evicted by a subsequent address in the same
+ * transaction.
+ *
*/
struct i2c_atr_alias_pair {
struct list_head node;
- const struct i2c_client *client;
+ bool fixed;
+ u16 addr;
u16 alias;
};
/**
+ * struct i2c_atr_alias_pool - Pool of client aliases available for an ATR.
+ * @size: Total number of aliases
+ * @shared: Indicates if this alias pool is shared by multiple channels
+ *
+ * @lock: Lock protecting @aliases and @use_mask
+ * @aliases: Array of aliases, must hold exactly @size elements
+ * @use_mask: Mask of used aliases
+ */
+struct i2c_atr_alias_pool {
+ size_t size;
+ bool shared;
+
+ /* Protects aliases and use_mask */
+ spinlock_t lock;
+ u16 *aliases;
+ unsigned long *use_mask;
+};
+
+/**
* struct i2c_atr_chan - Data for a channel.
* @adap: The &struct i2c_adapter for the channel
* @atr: The parent I2C ATR
* @chan_id: The ID of this channel
- * @alias_list: List of @struct i2c_atr_alias_pair containing the
+ * @alias_pairs_lock: Mutex protecting @alias_pairs
+ * @alias_pairs_lock_key: Lock key for @alias_pairs_lock
+ * @alias_pairs: List of @struct i2c_atr_alias_pair containing the
* assigned aliases
+ * @alias_pool: Pool of available client aliases
+ *
* @orig_addrs_lock: Mutex protecting @orig_addrs
+ * @orig_addrs_lock_key: Lock key for @orig_addrs_lock
* @orig_addrs: Buffer used to store the original addresses during transmit
* @orig_addrs_size: Size of @orig_addrs
*/
@@ -50,10 +83,15 @@ struct i2c_atr_chan {
struct i2c_atr *atr;
u32 chan_id;
- struct list_head alias_list;
+ /* Lock alias_pairs during attach/detach */
+ struct mutex alias_pairs_lock;
+ struct lock_class_key alias_pairs_lock_key;
+ struct list_head alias_pairs;
+ struct i2c_atr_alias_pool *alias_pool;
/* Lock orig_addrs during xfer */
struct mutex orig_addrs_lock;
+ struct lock_class_key orig_addrs_lock_key;
u16 *orig_addrs;
unsigned int orig_addrs_size;
};
@@ -66,11 +104,10 @@ struct i2c_atr_chan {
* @priv: Private driver data, set with i2c_atr_set_driver_data()
* @algo: The &struct i2c_algorithm for adapters
* @lock: Lock for the I2C bus segment (see &struct i2c_lock_operations)
+ * @lock_key: Lock key for @lock
* @max_adapters: Maximum number of adapters this I2C ATR can have
- * @num_aliases: Number of aliases in the aliases array
- * @aliases: The aliases array
- * @alias_mask_lock: Lock protecting alias_use_mask
- * @alias_use_mask: Bitmask for used aliases in aliases array
+ * @flags: Flags for ATR
+ * @alias_pool: Optional common pool of available client aliases
* @i2c_nb: Notifier for remote client add & del events
* @adapter: Array of adapters
*/
@@ -84,27 +121,135 @@ struct i2c_atr {
struct i2c_algorithm algo;
/* lock for the I2C bus segment (see struct i2c_lock_operations) */
struct mutex lock;
+ struct lock_class_key lock_key;
int max_adapters;
+ u32 flags;
- size_t num_aliases;
- const u16 *aliases;
- /* Protects alias_use_mask */
- spinlock_t alias_mask_lock;
- unsigned long *alias_use_mask;
+ struct i2c_atr_alias_pool *alias_pool;
struct notifier_block i2c_nb;
struct i2c_adapter *adapter[] __counted_by(max_adapters);
};
+static struct i2c_atr_alias_pool *i2c_atr_alloc_alias_pool(size_t num_aliases, bool shared)
+{
+ struct i2c_atr_alias_pool *alias_pool;
+ int ret;
+
+ alias_pool = kzalloc(sizeof(*alias_pool), GFP_KERNEL);
+ if (!alias_pool)
+ return ERR_PTR(-ENOMEM);
+
+ alias_pool->size = num_aliases;
+
+ alias_pool->aliases = kcalloc(num_aliases, sizeof(*alias_pool->aliases), GFP_KERNEL);
+ if (!alias_pool->aliases) {
+ ret = -ENOMEM;
+ goto err_free_alias_pool;
+ }
+
+ alias_pool->use_mask = bitmap_zalloc(num_aliases, GFP_KERNEL);
+ if (!alias_pool->use_mask) {
+ ret = -ENOMEM;
+ goto err_free_aliases;
+ }
+
+ alias_pool->shared = shared;
+
+ spin_lock_init(&alias_pool->lock);
+
+ return alias_pool;
+
+err_free_aliases:
+ kfree(alias_pool->aliases);
+err_free_alias_pool:
+ kfree(alias_pool);
+ return ERR_PTR(ret);
+}
+
+static void i2c_atr_free_alias_pool(struct i2c_atr_alias_pool *alias_pool)
+{
+ bitmap_free(alias_pool->use_mask);
+ kfree(alias_pool->aliases);
+ kfree(alias_pool);
+}
+
+/* Must be called with alias_pairs_lock held */
+static struct i2c_atr_alias_pair *i2c_atr_create_c2a(struct i2c_atr_chan *chan,
+ u16 alias, u16 addr)
+{
+ struct i2c_atr_alias_pair *c2a;
+
+ lockdep_assert_held(&chan->alias_pairs_lock);
+
+ c2a = kzalloc(sizeof(*c2a), GFP_KERNEL);
+ if (!c2a)
+ return NULL;
+
+ c2a->addr = addr;
+ c2a->alias = alias;
+
+ list_add(&c2a->node, &chan->alias_pairs);
+
+ return c2a;
+}
+
+/* Must be called with alias_pairs_lock held */
+static void i2c_atr_destroy_c2a(struct i2c_atr_alias_pair **pc2a)
+{
+ list_del(&(*pc2a)->node);
+ kfree(*pc2a);
+ *pc2a = NULL;
+}
+
+static int i2c_atr_reserve_alias(struct i2c_atr_alias_pool *alias_pool)
+{
+ unsigned long idx;
+ u16 alias;
+
+ spin_lock(&alias_pool->lock);
+
+ idx = find_first_zero_bit(alias_pool->use_mask, alias_pool->size);
+ if (idx >= alias_pool->size) {
+ spin_unlock(&alias_pool->lock);
+ return -EBUSY;
+ }
+
+ set_bit(idx, alias_pool->use_mask);
+
+ alias = alias_pool->aliases[idx];
+
+ spin_unlock(&alias_pool->lock);
+ return alias;
+}
+
+static void i2c_atr_release_alias(struct i2c_atr_alias_pool *alias_pool, u16 alias)
+{
+ unsigned int idx;
+
+ spin_lock(&alias_pool->lock);
+
+ for (idx = 0; idx < alias_pool->size; ++idx) {
+ if (alias_pool->aliases[idx] == alias) {
+ clear_bit(idx, alias_pool->use_mask);
+ spin_unlock(&alias_pool->lock);
+ return;
+ }
+ }
+
+ spin_unlock(&alias_pool->lock);
+}
+
static struct i2c_atr_alias_pair *
-i2c_atr_find_mapping_by_client(const struct list_head *list,
- const struct i2c_client *client)
+i2c_atr_find_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr)
{
struct i2c_atr_alias_pair *c2a;
- list_for_each_entry(c2a, list, node) {
- if (c2a->client == client)
+ lockdep_assert_held(&chan->alias_pairs_lock);
+
+ list_for_each_entry(c2a, &chan->alias_pairs, node) {
+ if (c2a->addr == addr)
return c2a;
}
@@ -112,18 +257,107 @@ i2c_atr_find_mapping_by_client(const struct list_head *list,
}
static struct i2c_atr_alias_pair *
-i2c_atr_find_mapping_by_addr(const struct list_head *list, u16 phys_addr)
+i2c_atr_create_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr)
{
+ struct i2c_atr *atr = chan->atr;
struct i2c_atr_alias_pair *c2a;
+ u16 alias;
+ int ret;
- list_for_each_entry(c2a, list, node) {
- if (c2a->client->addr == phys_addr)
- return c2a;
+ lockdep_assert_held(&chan->alias_pairs_lock);
+
+ ret = i2c_atr_reserve_alias(chan->alias_pool);
+ if (ret < 0)
+ return NULL;
+
+ alias = ret;
+
+ c2a = i2c_atr_create_c2a(chan, alias, addr);
+ if (!c2a)
+ goto err_release_alias;
+
+ ret = atr->ops->attach_addr(atr, chan->chan_id, c2a->addr, c2a->alias);
+ if (ret) {
+ dev_err(atr->dev, "failed to attach 0x%02x on channel %d: err %d\n",
+ addr, chan->chan_id, ret);
+ goto err_del_c2a;
}
+ return c2a;
+
+err_del_c2a:
+ i2c_atr_destroy_c2a(&c2a);
+err_release_alias:
+ i2c_atr_release_alias(chan->alias_pool, alias);
return NULL;
}
+static struct i2c_atr_alias_pair *
+i2c_atr_replace_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr)
+{
+ struct i2c_atr *atr = chan->atr;
+ struct i2c_atr_alias_pair *c2a;
+ struct list_head *alias_pairs;
+ bool found = false;
+ u16 alias;
+ int ret;
+
+ lockdep_assert_held(&chan->alias_pairs_lock);
+
+ alias_pairs = &chan->alias_pairs;
+
+ if (unlikely(list_empty(alias_pairs)))
+ return NULL;
+
+ list_for_each_entry_reverse(c2a, alias_pairs, node) {
+ if (!c2a->fixed) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return NULL;
+
+ atr->ops->detach_addr(atr, chan->chan_id, c2a->addr);
+ c2a->addr = addr;
+
+ list_move(&c2a->node, alias_pairs);
+
+ alias = c2a->alias;
+
+ ret = atr->ops->attach_addr(atr, chan->chan_id, c2a->addr, c2a->alias);
+ if (ret) {
+ dev_err(atr->dev, "failed to attach 0x%02x on channel %d: err %d\n",
+ addr, chan->chan_id, ret);
+ i2c_atr_destroy_c2a(&c2a);
+ i2c_atr_release_alias(chan->alias_pool, alias);
+ return NULL;
+ }
+
+ return c2a;
+}
+
+static struct i2c_atr_alias_pair *
+i2c_atr_get_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr)
+{
+ struct i2c_atr *atr = chan->atr;
+ struct i2c_atr_alias_pair *c2a;
+
+ c2a = i2c_atr_find_mapping_by_addr(chan, addr);
+ if (c2a)
+ return c2a;
+
+ if (atr->flags & I2C_ATR_F_STATIC)
+ return NULL;
+
+ c2a = i2c_atr_create_mapping_by_addr(chan, addr);
+ if (c2a)
+ return c2a;
+
+ return i2c_atr_replace_mapping_by_addr(chan, addr);
+}
+
/*
* Replace all message addresses with their aliases, saving the original
* addresses.
@@ -136,7 +370,7 @@ static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
{
struct i2c_atr *atr = chan->atr;
static struct i2c_atr_alias_pair *c2a;
- int i;
+ int i, ret = 0;
/* Ensure we have enough room to save the original addresses */
if (unlikely(chan->orig_addrs_size < num)) {
@@ -152,25 +386,36 @@ static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
chan->orig_addrs_size = num;
}
+ mutex_lock(&chan->alias_pairs_lock);
+
for (i = 0; i < num; i++) {
chan->orig_addrs[i] = msgs[i].addr;
- c2a = i2c_atr_find_mapping_by_addr(&chan->alias_list,
- msgs[i].addr);
+ c2a = i2c_atr_get_mapping_by_addr(chan, msgs[i].addr);
+
if (!c2a) {
+ if (atr->flags & I2C_ATR_F_PASSTHROUGH)
+ continue;
+
dev_err(atr->dev, "client 0x%02x not mapped!\n",
msgs[i].addr);
while (i--)
msgs[i].addr = chan->orig_addrs[i];
- return -ENXIO;
+ ret = -ENXIO;
+ goto out_unlock;
}
+ // Prevent c2a from being overwritten by another client in this transaction
+ c2a->fixed = true;
+
msgs[i].addr = c2a->alias;
}
- return 0;
+out_unlock:
+ mutex_unlock(&chan->alias_pairs_lock);
+ return ret;
}
/*
@@ -183,10 +428,24 @@ static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
static void i2c_atr_unmap_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
int num)
{
+ struct i2c_atr_alias_pair *c2a;
int i;
for (i = 0; i < num; i++)
msgs[i].addr = chan->orig_addrs[i];
+
+ mutex_lock(&chan->alias_pairs_lock);
+
+ if (unlikely(list_empty(&chan->alias_pairs)))
+ goto out_unlock;
+
+ // unfix c2a entries so that subsequent transfers can reuse their aliases
+ list_for_each_entry(c2a, &chan->alias_pairs, node) {
+ c2a->fixed = false;
+ }
+
+out_unlock:
+ mutex_unlock(&chan->alias_pairs_lock);
}
static int i2c_atr_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
@@ -224,14 +483,23 @@ static int i2c_atr_smbus_xfer(struct i2c_adapter *adap, u16 addr,
struct i2c_atr *atr = chan->atr;
struct i2c_adapter *parent = atr->parent;
struct i2c_atr_alias_pair *c2a;
+ u16 alias;
- c2a = i2c_atr_find_mapping_by_addr(&chan->alias_list, addr);
- if (!c2a) {
+ mutex_lock(&chan->alias_pairs_lock);
+
+ c2a = i2c_atr_get_mapping_by_addr(chan, addr);
+
+ if (!c2a && !(atr->flags & I2C_ATR_F_PASSTHROUGH)) {
dev_err(atr->dev, "client 0x%02x not mapped!\n", addr);
+ mutex_unlock(&chan->alias_pairs_lock);
return -ENXIO;
}
- return i2c_smbus_xfer(parent, c2a->alias, flags, read_write, command,
+ alias = c2a ? c2a->alias : addr;
+
+ mutex_unlock(&chan->alias_pairs_lock);
+
+ return i2c_smbus_xfer(parent, alias, flags, read_write, command,
size, data);
}
@@ -273,112 +541,60 @@ static const struct i2c_lock_operations i2c_atr_lock_ops = {
.unlock_bus = i2c_atr_unlock_bus,
};
-static int i2c_atr_reserve_alias(struct i2c_atr *atr)
-{
- unsigned long idx;
-
- spin_lock(&atr->alias_mask_lock);
-
- idx = find_first_zero_bit(atr->alias_use_mask, atr->num_aliases);
- if (idx >= atr->num_aliases) {
- spin_unlock(&atr->alias_mask_lock);
- dev_err(atr->dev, "failed to find a free alias\n");
- return -EBUSY;
- }
-
- set_bit(idx, atr->alias_use_mask);
-
- spin_unlock(&atr->alias_mask_lock);
-
- return atr->aliases[idx];
-}
-
-static void i2c_atr_release_alias(struct i2c_atr *atr, u16 alias)
-{
- unsigned int idx;
-
- spin_lock(&atr->alias_mask_lock);
-
- for (idx = 0; idx < atr->num_aliases; ++idx) {
- if (atr->aliases[idx] == alias) {
- clear_bit(idx, atr->alias_use_mask);
- spin_unlock(&atr->alias_mask_lock);
- return;
- }
- }
-
- spin_unlock(&atr->alias_mask_lock);
-
- /* This should never happen */
- dev_warn(atr->dev, "Unable to find mapped alias\n");
-}
-
-static int i2c_atr_attach_client(struct i2c_adapter *adapter,
- const struct i2c_client *client)
+static int i2c_atr_attach_addr(struct i2c_adapter *adapter,
+ u16 addr)
{
struct i2c_atr_chan *chan = adapter->algo_data;
struct i2c_atr *atr = chan->atr;
struct i2c_atr_alias_pair *c2a;
- u16 alias;
- int ret;
+ int ret = 0;
- ret = i2c_atr_reserve_alias(atr);
- if (ret < 0)
- return ret;
+ mutex_lock(&chan->alias_pairs_lock);
- alias = ret;
+ c2a = i2c_atr_create_mapping_by_addr(chan, addr);
+ if (!c2a && !(atr->flags & I2C_ATR_F_STATIC))
+ c2a = i2c_atr_replace_mapping_by_addr(chan, addr);
- c2a = kzalloc(sizeof(*c2a), GFP_KERNEL);
if (!c2a) {
- ret = -ENOMEM;
- goto err_release_alias;
+ dev_err(atr->dev, "failed to find a free alias\n");
+ ret = -EBUSY;
+ goto out_unlock;
}
- ret = atr->ops->attach_client(atr, chan->chan_id, client, alias);
- if (ret)
- goto err_free;
-
- dev_dbg(atr->dev, "chan%u: client 0x%02x mapped at alias 0x%02x (%s)\n",
- chan->chan_id, client->addr, alias, client->name);
-
- c2a->client = client;
- c2a->alias = alias;
- list_add(&c2a->node, &chan->alias_list);
-
- return 0;
-
-err_free:
- kfree(c2a);
-err_release_alias:
- i2c_atr_release_alias(atr, alias);
+ dev_dbg(atr->dev, "chan%u: using alias 0x%02x for addr 0x%02x\n",
+ chan->chan_id, c2a->alias, addr);
+out_unlock:
+ mutex_unlock(&chan->alias_pairs_lock);
return ret;
}
-static void i2c_atr_detach_client(struct i2c_adapter *adapter,
- const struct i2c_client *client)
+static void i2c_atr_detach_addr(struct i2c_adapter *adapter,
+ u16 addr)
{
struct i2c_atr_chan *chan = adapter->algo_data;
struct i2c_atr *atr = chan->atr;
struct i2c_atr_alias_pair *c2a;
- atr->ops->detach_client(atr, chan->chan_id, client);
+ atr->ops->detach_addr(atr, chan->chan_id, addr);
+
+ mutex_lock(&chan->alias_pairs_lock);
- c2a = i2c_atr_find_mapping_by_client(&chan->alias_list, client);
+ c2a = i2c_atr_find_mapping_by_addr(chan, addr);
if (!c2a) {
- /* This should never happen */
- dev_warn(atr->dev, "Unable to find address mapping\n");
+ mutex_unlock(&chan->alias_pairs_lock);
return;
}
- i2c_atr_release_alias(atr, c2a->alias);
+ i2c_atr_release_alias(chan->alias_pool, c2a->alias);
dev_dbg(atr->dev,
- "chan%u: client 0x%02x unmapped from alias 0x%02x (%s)\n",
- chan->chan_id, client->addr, c2a->alias, client->name);
+ "chan%u: detached alias 0x%02x from addr 0x%02x\n",
+ chan->chan_id, c2a->alias, addr);
- list_del(&c2a->node);
- kfree(c2a);
+ i2c_atr_destroy_c2a(&c2a);
+
+ mutex_unlock(&chan->alias_pairs_lock);
}
static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
@@ -405,7 +621,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
switch (event) {
case BUS_NOTIFY_ADD_DEVICE:
- ret = i2c_atr_attach_client(client->adapter, client);
+ ret = i2c_atr_attach_addr(client->adapter, client->addr);
if (ret)
dev_err(atr->dev,
"Failed to attach remote client '%s': %d\n",
@@ -413,7 +629,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
break;
case BUS_NOTIFY_REMOVED_DEVICE:
- i2c_atr_detach_client(client->adapter, client);
+ i2c_atr_detach_addr(client->adapter, client->addr);
break;
default:
@@ -425,29 +641,43 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
static int i2c_atr_parse_alias_pool(struct i2c_atr *atr)
{
+ struct i2c_atr_alias_pool *alias_pool;
struct device *dev = atr->dev;
- unsigned long *alias_use_mask;
size_t num_aliases;
unsigned int i;
u32 *aliases32;
- u16 *aliases16;
int ret;
- ret = fwnode_property_count_u32(dev_fwnode(dev), "i2c-alias-pool");
- if (ret < 0) {
- dev_err(dev, "Failed to count 'i2c-alias-pool' property: %d\n",
- ret);
+ if (!fwnode_property_present(dev_fwnode(dev), "i2c-alias-pool")) {
+ num_aliases = 0;
+ } else {
+ ret = fwnode_property_count_u32(dev_fwnode(dev), "i2c-alias-pool");
+ if (ret < 0) {
+ dev_err(dev, "Failed to count 'i2c-alias-pool' property: %d\n",
+ ret);
+ return ret;
+ }
+
+ num_aliases = ret;
+ }
+
+ alias_pool = i2c_atr_alloc_alias_pool(num_aliases, true);
+ if (IS_ERR(alias_pool)) {
+ ret = PTR_ERR(alias_pool);
+ dev_err(dev, "Failed to allocate alias pool, err %d\n", ret);
return ret;
}
- num_aliases = ret;
+ atr->alias_pool = alias_pool;
- if (!num_aliases)
+ if (!alias_pool->size)
return 0;
aliases32 = kcalloc(num_aliases, sizeof(*aliases32), GFP_KERNEL);
- if (!aliases32)
- return -ENOMEM;
+ if (!aliases32) {
+ ret = -ENOMEM;
+ goto err_free_alias_pool;
+ }
ret = fwnode_property_read_u32_array(dev_fwnode(dev), "i2c-alias-pool",
aliases32, num_aliases);
@@ -457,48 +687,33 @@ static int i2c_atr_parse_alias_pool(struct i2c_atr *atr)
goto err_free_aliases32;
}
- aliases16 = kcalloc(num_aliases, sizeof(*aliases16), GFP_KERNEL);
- if (!aliases16) {
- ret = -ENOMEM;
- goto err_free_aliases32;
- }
-
for (i = 0; i < num_aliases; i++) {
if (!(aliases32[i] & 0xffff0000)) {
- aliases16[i] = aliases32[i];
+ alias_pool->aliases[i] = aliases32[i];
continue;
}
dev_err(dev, "Failed to parse 'i2c-alias-pool' property: I2C flags are not supported\n");
ret = -EINVAL;
- goto err_free_aliases16;
- }
-
- alias_use_mask = bitmap_zalloc(num_aliases, GFP_KERNEL);
- if (!alias_use_mask) {
- ret = -ENOMEM;
- goto err_free_aliases16;
+ goto err_free_aliases32;
}
kfree(aliases32);
- atr->num_aliases = num_aliases;
- atr->aliases = aliases16;
- atr->alias_use_mask = alias_use_mask;
-
- dev_dbg(dev, "i2c-alias-pool has %zu aliases", atr->num_aliases);
+ dev_dbg(dev, "i2c-alias-pool has %zu aliases\n", alias_pool->size);
return 0;
-err_free_aliases16:
- kfree(aliases16);
err_free_aliases32:
kfree(aliases32);
+err_free_alias_pool:
+ i2c_atr_free_alias_pool(alias_pool);
return ret;
}
struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
- const struct i2c_atr_ops *ops, int max_adapters)
+ const struct i2c_atr_ops *ops, int max_adapters,
+ u32 flags)
{
struct i2c_atr *atr;
int ret;
@@ -506,20 +721,21 @@ struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
if (max_adapters > ATR_MAX_ADAPTERS)
return ERR_PTR(-EINVAL);
- if (!ops || !ops->attach_client || !ops->detach_client)
+ if (!ops || !ops->attach_addr || !ops->detach_addr)
return ERR_PTR(-EINVAL);
atr = kzalloc(struct_size(atr, adapter, max_adapters), GFP_KERNEL);
if (!atr)
return ERR_PTR(-ENOMEM);
- mutex_init(&atr->lock);
- spin_lock_init(&atr->alias_mask_lock);
+ lockdep_register_key(&atr->lock_key);
+ mutex_init_with_key(&atr->lock, &atr->lock_key);
atr->parent = parent;
atr->dev = dev;
atr->ops = ops;
atr->max_adapters = max_adapters;
+ atr->flags = flags;
if (parent->algo->master_xfer)
atr->algo.master_xfer = i2c_atr_master_xfer;
@@ -534,15 +750,15 @@ struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
atr->i2c_nb.notifier_call = i2c_atr_bus_notifier_call;
ret = bus_register_notifier(&i2c_bus_type, &atr->i2c_nb);
if (ret)
- goto err_free_aliases;
+ goto err_free_alias_pool;
return atr;
-err_free_aliases:
- bitmap_free(atr->alias_use_mask);
- kfree(atr->aliases);
+err_free_alias_pool:
+ i2c_atr_free_alias_pool(atr->alias_pool);
err_destroy_mutex:
mutex_destroy(&atr->lock);
+ lockdep_unregister_key(&atr->lock_key);
kfree(atr);
return ERR_PTR(ret);
@@ -557,22 +773,22 @@ void i2c_atr_delete(struct i2c_atr *atr)
WARN_ON(atr->adapter[i]);
bus_unregister_notifier(&i2c_bus_type, &atr->i2c_nb);
- bitmap_free(atr->alias_use_mask);
- kfree(atr->aliases);
+ i2c_atr_free_alias_pool(atr->alias_pool);
mutex_destroy(&atr->lock);
+ lockdep_unregister_key(&atr->lock_key);
kfree(atr);
}
EXPORT_SYMBOL_NS_GPL(i2c_atr_delete, "I2C_ATR");
-int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
- struct device *adapter_parent,
- struct fwnode_handle *bus_handle)
+int i2c_atr_add_adapter(struct i2c_atr *atr, struct i2c_atr_adap_desc *desc)
{
+ struct fwnode_handle *bus_handle = desc->bus_handle;
struct i2c_adapter *parent = atr->parent;
+ char symlink_name[ATR_MAX_SYMLINK_LEN];
struct device *dev = atr->dev;
+ u32 chan_id = desc->chan_id;
struct i2c_atr_chan *chan;
- char symlink_name[ATR_MAX_SYMLINK_LEN];
- int ret;
+ int ret, idx;
if (chan_id >= atr->max_adapters) {
dev_err(dev, "No room for more i2c-atr adapters\n");
@@ -588,20 +804,23 @@ int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
if (!chan)
return -ENOMEM;
- if (!adapter_parent)
- adapter_parent = dev;
+ if (!desc->parent)
+ desc->parent = dev;
chan->atr = atr;
chan->chan_id = chan_id;
- INIT_LIST_HEAD(&chan->alias_list);
- mutex_init(&chan->orig_addrs_lock);
+ INIT_LIST_HEAD(&chan->alias_pairs);
+ lockdep_register_key(&chan->alias_pairs_lock_key);
+ lockdep_register_key(&chan->orig_addrs_lock_key);
+ mutex_init_with_key(&chan->alias_pairs_lock, &chan->alias_pairs_lock_key);
+ mutex_init_with_key(&chan->orig_addrs_lock, &chan->orig_addrs_lock_key);
snprintf(chan->adap.name, sizeof(chan->adap.name), "i2c-%d-atr-%d",
i2c_adapter_id(parent), chan_id);
chan->adap.owner = THIS_MODULE;
chan->adap.algo = &atr->algo;
chan->adap.algo_data = chan;
- chan->adap.dev.parent = adapter_parent;
+ chan->adap.dev.parent = desc->parent;
chan->adap.retries = parent->retries;
chan->adap.timeout = parent->timeout;
chan->adap.quirks = parent->quirks;
@@ -628,13 +847,26 @@ int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
fwnode_handle_put(atr_node);
}
+ if (desc->num_aliases > 0) {
+ chan->alias_pool = i2c_atr_alloc_alias_pool(desc->num_aliases, false);
+ if (IS_ERR(chan->alias_pool)) {
+ ret = PTR_ERR(chan->alias_pool);
+ goto err_fwnode_put;
+ }
+
+ for (idx = 0; idx < desc->num_aliases; idx++)
+ chan->alias_pool->aliases[idx] = desc->aliases[idx];
+ } else {
+ chan->alias_pool = atr->alias_pool;
+ }
+
atr->adapter[chan_id] = &chan->adap;
ret = i2c_add_adapter(&chan->adap);
if (ret) {
dev_err(dev, "failed to add atr-adapter %u (error=%d)\n",
chan_id, ret);
- goto err_fwnode_put;
+ goto err_free_alias_pool;
}
snprintf(symlink_name, sizeof(symlink_name), "channel-%u",
@@ -651,9 +883,15 @@ int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
return 0;
+err_free_alias_pool:
+ if (!chan->alias_pool->shared)
+ i2c_atr_free_alias_pool(chan->alias_pool);
err_fwnode_put:
fwnode_handle_put(dev_fwnode(&chan->adap.dev));
mutex_destroy(&chan->orig_addrs_lock);
+ mutex_destroy(&chan->alias_pairs_lock);
+ lockdep_unregister_key(&chan->orig_addrs_lock_key);
+ lockdep_unregister_key(&chan->alias_pairs_lock_key);
kfree(chan);
return ret;
}
@@ -683,10 +921,16 @@ void i2c_atr_del_adapter(struct i2c_atr *atr, u32 chan_id)
i2c_del_adapter(adap);
+ if (!chan->alias_pool->shared)
+ i2c_atr_free_alias_pool(chan->alias_pool);
+
atr->adapter[chan_id] = NULL;
fwnode_handle_put(fwnode);
mutex_destroy(&chan->orig_addrs_lock);
+ mutex_destroy(&chan->alias_pairs_lock);
+ lockdep_unregister_key(&chan->orig_addrs_lock_key);
+ lockdep_unregister_key(&chan->alias_pairs_lock_key);
kfree(chan->orig_addrs);
kfree(chan);
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 7ad1ad5c8c3f..2ad2b1838f0f 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -26,14 +26,13 @@
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/irqflags.h>
+#include <linux/irq.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/pm_domain.h>
@@ -42,6 +41,7 @@
#include <linux/property.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "i2c-core.h"
@@ -490,6 +490,7 @@ static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client)
static int i2c_device_probe(struct device *dev)
{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct i2c_client *client = i2c_verify_client(dev);
struct i2c_driver *driver;
bool do_power_on;
@@ -508,11 +509,11 @@ static int i2c_device_probe(struct device *dev)
/* Keep adapter active when Host Notify is required */
pm_runtime_get_sync(&client->adapter->dev);
irq = i2c_smbus_host_notify_to_irq(client);
- } else if (dev->of_node) {
- irq = of_irq_get_byname(dev->of_node, "irq");
+ } else if (is_of_node(fwnode)) {
+ irq = fwnode_irq_get_byname(fwnode, "irq");
if (irq == -EINVAL || irq == -ENODATA)
- irq = of_irq_get(dev->of_node, 0);
- } else if (ACPI_COMPANION(dev)) {
+ irq = fwnode_irq_get(fwnode, 0);
+ } else if (is_acpi_device_node(fwnode)) {
bool wake_capable;
irq = i2c_acpi_get_irq(client, &wake_capable);
@@ -520,7 +521,7 @@ static int i2c_device_probe(struct device *dev)
client->flags |= I2C_CLIENT_WAKE;
}
if (irq == -EPROBE_DEFER) {
- status = irq;
+ status = dev_err_probe(dev, irq, "can't get irq\n");
goto put_sync_adapter;
}
@@ -546,9 +547,9 @@ static int i2c_device_probe(struct device *dev)
if (client->flags & I2C_CLIENT_WAKE) {
int wakeirq;
- wakeirq = of_irq_get_byname(dev->of_node, "wakeup");
+ wakeirq = fwnode_irq_get_byname(fwnode, "wakeup");
if (wakeirq == -EPROBE_DEFER) {
- status = wakeirq;
+ status = dev_err_probe(dev, wakeirq, "can't get wakeirq\n");
goto put_sync_adapter;
}
@@ -567,7 +568,7 @@ static int i2c_device_probe(struct device *dev)
dev_dbg(dev, "probe\n");
- status = of_clk_set_defaults(dev->of_node, false);
+ status = of_clk_set_defaults(to_of_node(fwnode), false);
if (status < 0)
goto err_clear_wakeup_irq;
@@ -961,6 +962,7 @@ static void i2c_unlock_addr(struct i2c_adapter *adap, unsigned short addr,
struct i2c_client *
i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
{
+ struct fwnode_handle *fwnode = info->fwnode;
struct i2c_client *client;
bool need_put = false;
int status;
@@ -1001,18 +1003,18 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
client->dev.parent = &client->adapter->dev;
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
- client->dev.of_node = of_node_get(info->of_node);
- client->dev.fwnode = info->fwnode;
device_enable_async_suspend(&client->dev);
+ device_set_node(&client->dev, fwnode_handle_get(fwnode));
+
if (info->swnode) {
status = device_add_software_node(&client->dev, info->swnode);
if (status) {
dev_err(&adap->dev,
"Failed to add software node to client %s: %d\n",
client->name, status);
- goto out_err_put_of_node;
+ goto out_err_put_fwnode;
}
}
@@ -1031,8 +1033,8 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
out_remove_swnode:
device_remove_software_node(&client->dev);
need_put = true;
-out_err_put_of_node:
- of_node_put(info->of_node);
+out_err_put_fwnode:
+ fwnode_handle_put(fwnode);
out_err:
dev_err(&adap->dev,
"Failed to register i2c client %s at 0x%02x (%d)\n",
@@ -1054,16 +1056,17 @@ EXPORT_SYMBOL_GPL(i2c_new_client_device);
*/
void i2c_unregister_device(struct i2c_client *client)
{
+ struct fwnode_handle *fwnode;
+
if (IS_ERR_OR_NULL(client))
return;
- if (client->dev.of_node) {
- of_node_clear_flag(client->dev.of_node, OF_POPULATED);
- of_node_put(client->dev.of_node);
- }
-
- if (ACPI_COMPANION(&client->dev))
- acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
+ fwnode = dev_fwnode(&client->dev);
+ if (is_of_node(fwnode))
+ of_node_clear_flag(to_of_node(fwnode), OF_POPULATED);
+ else if (is_acpi_device_node(fwnode))
+ acpi_device_clear_enumerated(to_acpi_device_node(fwnode));
+ fwnode_handle_put(fwnode);
device_remove_software_node(&client->dev);
device_unregister(&client->dev);
@@ -1209,11 +1212,9 @@ struct i2c_client *i2c_new_ancillary_device(struct i2c_client *client,
u32 addr = default_addr;
int i;
- if (np) {
- i = of_property_match_string(np, "reg-names", name);
- if (i >= 0)
- of_property_read_u32_index(np, "reg", i, &addr);
- }
+ i = of_property_match_string(np, "reg-names", name);
+ if (i >= 0)
+ of_property_read_u32_index(np, "reg", i, &addr);
dev_dbg(&client->adapter->dev, "Address for %s : 0x%x\n", name, addr);
return i2c_new_dummy_device(client->adapter, addr);
@@ -1651,12 +1652,10 @@ int i2c_add_adapter(struct i2c_adapter *adapter)
struct device *dev = &adapter->dev;
int id;
- if (dev->of_node) {
- id = of_alias_get_id(dev->of_node, "i2c");
- if (id >= 0) {
- adapter->nr = id;
- return __i2c_add_numbered_adapter(adapter);
- }
+ id = of_alias_get_id(dev->of_node, "i2c");
+ if (id >= 0) {
+ adapter->nr = id;
+ return __i2c_add_numbered_adapter(adapter);
}
mutex_lock(&core_lock);
@@ -2146,7 +2145,7 @@ static int i2c_quirk_error(struct i2c_adapter *adap, struct i2c_msg *msg, char *
{
dev_err_ratelimited(&adap->dev, "adapter quirk: %s (addr 0x%04x, size %u, %s)\n",
err_msg, msg->addr, msg->len,
- msg->flags & I2C_M_RD ? "read" : "write");
+ str_read_write(msg->flags & I2C_M_RD));
return -EOPNOTSUPP;
}
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index 02feee6c9ba9..eb7fb202355f 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -49,7 +49,6 @@ int of_i2c_get_board_info(struct device *dev, struct device_node *node,
}
info->addr = addr;
- info->of_node = node;
info->fwnode = of_fwnode_handle(node);
if (of_property_read_bool(node, "host-notify"))
diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c
index faefe1dfa8e5..7ee6b992b835 100644
--- a/drivers/i2c/i2c-core-slave.c
+++ b/drivers/i2c/i2c-core-slave.c
@@ -11,6 +11,7 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/of.h>
+#include <linux/property.h>
#include "i2c-core.h"
@@ -108,15 +109,18 @@ EXPORT_SYMBOL_GPL(i2c_slave_event);
*/
bool i2c_detect_slave_mode(struct device *dev)
{
- if (IS_BUILTIN(CONFIG_OF) && dev->of_node) {
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ if (is_of_node(fwnode)) {
+ struct fwnode_handle *child __free(fwnode_handle) = NULL;
u32 reg;
- for_each_child_of_node_scoped(dev->of_node, child) {
- of_property_read_u32(child, "reg", &reg);
+ fwnode_for_each_child_node(fwnode, child) {
+ fwnode_property_read_u32(child, "reg", &reg);
if (reg & I2C_OWN_SLAVE_ADDRESS)
return true;
}
- } else if (IS_BUILTIN(CONFIG_ACPI) && ACPI_HANDLE(dev)) {
+ } else if (is_acpi_device_node(fwnode)) {
dev_dbg(dev, "ACPI slave is not supported yet\n");
}
return false;
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index e73afbefe222..71eb1ef56f0c 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -16,6 +16,7 @@
#include <linux/i2c-smbus.h>
#include <linux/property.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "i2c-core.h"
@@ -433,7 +434,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
case I2C_SMBUS_I2C_BLOCK_DATA:
if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
dev_err(&adapter->dev, "Invalid block %s size %d\n",
- read_write == I2C_SMBUS_READ ? "read" : "write",
+ str_read_write(read_write == I2C_SMBUS_READ),
data->block[0]);
return -EINVAL;
}
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 7d40e7aa3799..0316b347f9e7 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -372,12 +372,13 @@ EXPORT_SYMBOL_GPL(i2c_free_slave_host_notify_device);
* - Only works on systems with 1 to 8 memory slots
*/
#if IS_ENABLED(CONFIG_DMI)
-void i2c_register_spd(struct i2c_adapter *adap)
+static void i2c_register_spd(struct i2c_adapter *adap, bool write_disabled)
{
int n, slot_count = 0, dimm_count = 0;
u16 handle;
u8 common_mem_type = 0x0, mem_type;
u64 mem_size;
+ bool instantiate = true;
const char *name;
while ((handle = dmi_memdev_handle(slot_count)) != 0xffff) {
@@ -438,6 +439,7 @@ void i2c_register_spd(struct i2c_adapter *adap)
case 0x22: /* DDR5 */
case 0x23: /* LPDDR5 */
name = "spd5118";
+ instantiate = !write_disabled;
break;
default:
dev_info(&adap->dev,
@@ -461,6 +463,9 @@ void i2c_register_spd(struct i2c_adapter *adap)
addr_list[0] = 0x50 + n;
addr_list[1] = I2C_CLIENT_END;
+ if (!instantiate)
+ continue;
+
if (!IS_ERR(i2c_new_scanned_device(adap, &info, addr_list, NULL))) {
dev_info(&adap->dev,
"Successfully instantiated SPD at 0x%hx\n",
@@ -469,7 +474,19 @@ void i2c_register_spd(struct i2c_adapter *adap)
}
}
}
-EXPORT_SYMBOL_GPL(i2c_register_spd);
+
+void i2c_register_spd_write_disable(struct i2c_adapter *adap)
+{
+ i2c_register_spd(adap, true);
+}
+EXPORT_SYMBOL_GPL(i2c_register_spd_write_disable);
+
+void i2c_register_spd_write_enable(struct i2c_adapter *adap)
+{
+ i2c_register_spd(adap, false);
+}
+EXPORT_SYMBOL_GPL(i2c_register_spd_write_enable);
+
#endif
MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
diff --git a/drivers/i2c/muxes/i2c-mux-ltc4306.c b/drivers/i2c/muxes/i2c-mux-ltc4306.c
index 8a87f19bf5d5..c688af270a11 100644
--- a/drivers/i2c/muxes/i2c-mux-ltc4306.c
+++ b/drivers/i2c/muxes/i2c-mux-ltc4306.c
@@ -85,13 +85,13 @@ static int ltc4306_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(1 - offset));
}
-static void ltc4306_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int ltc4306_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ltc4306 *data = gpiochip_get_data(chip);
- regmap_update_bits(data->regmap, LTC_REG_CONFIG, BIT(5 - offset),
- value ? BIT(5 - offset) : 0);
+ return regmap_update_bits(data->regmap, LTC_REG_CONFIG,
+ BIT(5 - offset), value ? BIT(5 - offset) : 0);
}
static int ltc4306_gpio_get_direction(struct gpio_chip *chip,
@@ -164,7 +164,7 @@ static int ltc4306_gpio_init(struct ltc4306 *data)
data->gpiochip.direction_input = ltc4306_gpio_direction_input;
data->gpiochip.direction_output = ltc4306_gpio_direction_output;
data->gpiochip.get = ltc4306_gpio_get;
- data->gpiochip.set = ltc4306_gpio_set;
+ data->gpiochip.set_rv = ltc4306_gpio_set;
data->gpiochip.set_config = ltc4306_gpio_set_config;
data->gpiochip.owner = THIS_MODULE;
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index db95113a5b49..5bb26af0f532 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -442,9 +442,9 @@ static int pca954x_irq_setup(struct i2c_mux_core *muxc)
raw_spin_lock_init(&data->lock);
- data->irq = irq_domain_add_linear(client->dev.of_node,
- data->chip->nchans,
- &irq_domain_simple_ops, data);
+ data->irq = irq_domain_create_linear(of_fwnode_handle(client->dev.of_node),
+ data->chip->nchans,
+ &irq_domain_simple_ops, data);
if (!data->irq)
return -ENODEV;
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index 77da199c7413..7b30db3253af 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config CDNS_I3C_MASTER
tristate "Cadence I3C master driver"
- depends on I3C
depends on HAS_IOMEM
depends on !(ALPHA || PARISC)
help
@@ -9,7 +8,6 @@ config CDNS_I3C_MASTER
config DW_I3C_MASTER
tristate "Synospsys DesignWare I3C master driver"
- depends on I3C
depends on HAS_IOMEM
depends on !(ALPHA || PARISC)
# ALPHA and PARISC needs {read,write}sl()
@@ -38,7 +36,6 @@ config AST2600_I3C_MASTER
config SVC_I3C_MASTER
tristate "Silvaco I3C Dual-Role Master driver"
- depends on I3C
depends on HAS_IOMEM
depends on !(ALPHA || PARISC)
help
@@ -46,7 +43,6 @@ config SVC_I3C_MASTER
config MIPI_I3C_HCI
tristate "MIPI I3C Host Controller Interface driver (EXPERIMENTAL)"
- depends on I3C
depends on HAS_IOMEM
help
Support for hardware following the MIPI Aliance's I3C Host Controller
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index a71226d7ca59..bc4538694540 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -78,7 +78,7 @@
#define INTR_SIGNAL_ENABLE 0x28
#define INTR_FORCE 0x2c
#define INTR_HC_CMD_SEQ_UFLOW_STAT BIT(12) /* Cmd Sequence Underflow */
-#define INTR_HC_RESET_CANCEL BIT(11) /* HC Cancelled Reset */
+#define INTR_HC_SEQ_CANCEL BIT(11) /* HC Cancelled Transaction Sequence */
#define INTR_HC_INTERNAL_ERR BIT(10) /* HC Internal Error */
#define DAT_SECTION 0x30 /* Device Address Table */
@@ -590,26 +590,27 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
u32 val;
val = reg_read(INTR_STATUS);
+ reg_write(INTR_STATUS, val);
DBG("INTR_STATUS = %#x", val);
- if (val) {
- reg_write(INTR_STATUS, val);
- }
+ if (val)
+ result = IRQ_HANDLED;
- if (val & INTR_HC_RESET_CANCEL) {
- DBG("cancelled reset");
- val &= ~INTR_HC_RESET_CANCEL;
+ if (val & INTR_HC_SEQ_CANCEL) {
+ dev_dbg(&hci->master.dev,
+ "Host Controller Cancelled Transaction Sequence\n");
+ val &= ~INTR_HC_SEQ_CANCEL;
}
if (val & INTR_HC_INTERNAL_ERR) {
dev_err(&hci->master.dev, "Host Controller Internal Error\n");
val &= ~INTR_HC_INTERNAL_ERR;
}
- hci->io->irq_handler(hci);
-
if (val)
- dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val);
- else
+ dev_warn_once(&hci->master.dev,
+ "unexpected INTR_STATUS %#x\n", val);
+
+ if (hci->io->irq_handler(hci))
result = IRQ_HANDLED;
return result;
@@ -699,9 +700,14 @@ static int i3c_hci_init(struct i3c_hci *hci)
if (ret)
return -ENXIO;
- /* Disable all interrupts and allow all signal updates */
+ /* Disable all interrupts */
reg_write(INTR_SIGNAL_ENABLE, 0x0);
- reg_write(INTR_STATUS_ENABLE, 0xffffffff);
+ /*
+ * Only allow bit 31:10 signal updates because
+ * Bit 0:9 are reserved in IP version >= 0.8
+ * Bit 0:5 are defined in IP version < 0.8 but not handled by PIO code
+ */
+ reg_write(INTR_STATUS_ENABLE, GENMASK(31, 10));
/* Make sure our data ordering fits the host's */
regval = reg_read(HC_CONTROL);
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 85e16de208d3..7e1a7cb94b43 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -201,11 +201,10 @@ struct svc_i3c_drvdata {
* @addrs: Array containing the dynamic addresses of each attached device
* @descs: Array of descriptors, one per attached device
* @hj_work: Hot-join work
- * @ibi_work: IBI work
* @irq: Main interrupt
- * @pclk: System clock
+ * @num_clks: I3C clock number
* @fclk: Fast clock (bus)
- * @sclk: Slow clock (other events)
+ * @clks: I3C clock array
* @xferqueue: Transfer queue structure
* @xferqueue.list: List member
* @xferqueue.cur: Current ongoing transfer
@@ -229,11 +228,10 @@ struct svc_i3c_master {
u8 addrs[SVC_I3C_MAX_DEVS];
struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
struct work_struct hj_work;
- struct work_struct ibi_work;
int irq;
- struct clk *pclk;
+ int num_clks;
struct clk *fclk;
- struct clk *sclk;
+ struct clk_bulk_data *clks;
struct {
struct list_head list;
struct svc_i3c_xfer *cur;
@@ -487,9 +485,8 @@ static int svc_i3c_master_handle_ibi_won(struct svc_i3c_master *master, u32 msta
return ret;
}
-static void svc_i3c_master_ibi_work(struct work_struct *work)
+static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
{
- struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
struct svc_i3c_i2c_dev_data *data;
unsigned int ibitype, ibiaddr;
struct i3c_dev_desc *dev;
@@ -504,7 +501,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
* schedule during the whole I3C transaction, otherwise, the I3C bus timeout may happen if
* any irq or schedule happen during transaction.
*/
- guard(spinlock_irqsave)(&master->xferqueue.lock);
+ guard(spinlock)(&master->xferqueue.lock);
/*
* IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
@@ -530,7 +527,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
if (ret) {
dev_err(master->dev, "Timeout when polling for IBIWON\n");
svc_i3c_master_emit_stop(master);
- goto reenable_ibis;
+ return;
}
status = readl(master->regs + SVC_I3C_MSTATUS);
@@ -574,17 +571,17 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
svc_i3c_master_emit_stop(master);
- goto reenable_ibis;
+ return;
}
/* Handle the non critical tasks */
switch (ibitype) {
case SVC_I3C_MSTATUS_IBITYPE_IBI:
+ svc_i3c_master_emit_stop(master);
if (dev) {
i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
master->ibi.tbq_slot = NULL;
}
- svc_i3c_master_emit_stop(master);
break;
case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
svc_i3c_master_emit_stop(master);
@@ -597,9 +594,6 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
default:
break;
}
-
-reenable_ibis:
- svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
}
static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
@@ -618,10 +612,12 @@ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
!SVC_I3C_MSTATUS_STATE_SLVREQ(active))
return IRQ_HANDLED;
- svc_i3c_master_disable_interrupts(master);
-
- /* Handle the interrupt in a non atomic context */
- queue_work(master->base.wq, &master->ibi_work);
+ /*
+ * The SDA line remains low until the request is processed.
+ * Receive the request in the interrupt context to respond promptly
+ * and restore the bus to idle state.
+ */
+ svc_i3c_master_ibi_isr(master);
return IRQ_HANDLED;
}
@@ -1281,9 +1277,9 @@ static int svc_i3c_master_write(struct svc_i3c_master *master,
static int svc_i3c_master_xfer(struct svc_i3c_master *master,
bool rnw, unsigned int xfer_type, u8 addr,
u8 *in, const u8 *out, unsigned int xfer_len,
- unsigned int *actual_len, bool continued)
+ unsigned int *actual_len, bool continued, bool repeat_start)
{
- int retry = 2;
+ int retry = repeat_start ? 1 : 2;
u32 reg;
int ret;
@@ -1468,7 +1464,7 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
cmd->addr, cmd->in, cmd->out,
cmd->len, &cmd->actual_len,
- cmd->continued);
+ cmd->continued, i > 0);
/* cmd->xfer is NULL if I2C or CCC transfer */
if (cmd->xfer)
cmd->xfer->actual_len = cmd->actual_len;
@@ -1875,42 +1871,11 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = {
.set_speed = svc_i3c_master_set_speed,
};
-static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
-{
- int ret = 0;
-
- ret = clk_prepare_enable(master->pclk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(master->fclk);
- if (ret) {
- clk_disable_unprepare(master->pclk);
- return ret;
- }
-
- ret = clk_prepare_enable(master->sclk);
- if (ret) {
- clk_disable_unprepare(master->pclk);
- clk_disable_unprepare(master->fclk);
- return ret;
- }
-
- return 0;
-}
-
-static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
-{
- clk_disable_unprepare(master->pclk);
- clk_disable_unprepare(master->fclk);
- clk_disable_unprepare(master->sclk);
-}
-
static int svc_i3c_master_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct svc_i3c_master *master;
- int ret;
+ int ret, i;
master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
if (!master)
@@ -1924,30 +1889,33 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
- master->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(master->pclk))
- return PTR_ERR(master->pclk);
+ master->num_clks = devm_clk_bulk_get_all(dev, &master->clks);
+ if (master->num_clks < 0)
+ return dev_err_probe(dev, -EINVAL, "can't get I3C clocks\n");
- master->fclk = devm_clk_get(dev, "fast_clk");
+ for (i = 0; i < master->num_clks; i++) {
+ if (!strcmp(master->clks[i].id, "fast_clk"))
+ break;
+ }
+
+ if (i == master->num_clks)
+ return dev_err_probe(dev, -EINVAL,
+ "can't get I3C peripheral clock\n");
+
+ master->fclk = master->clks[i].clk;
if (IS_ERR(master->fclk))
return PTR_ERR(master->fclk);
- master->sclk = devm_clk_get(dev, "slow_clk");
- if (IS_ERR(master->sclk))
- return PTR_ERR(master->sclk);
-
master->irq = platform_get_irq(pdev, 0);
if (master->irq < 0)
return master->irq;
master->dev = dev;
-
- ret = svc_i3c_master_prepare_clks(master);
+ ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "can't enable I3C clocks\n");
INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
- INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
mutex_init(&master->lock);
ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
@@ -1998,7 +1966,7 @@ rpm_disable:
pm_runtime_set_suspended(&pdev->dev);
err_disable_clks:
- svc_i3c_master_unprepare_clks(master);
+ clk_bulk_disable_unprepare(master->num_clks, master->clks);
return ret;
}
@@ -2036,7 +2004,7 @@ static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
struct svc_i3c_master *master = dev_get_drvdata(dev);
svc_i3c_save_regs(master);
- svc_i3c_master_unprepare_clks(master);
+ clk_bulk_disable_unprepare(master->num_clks, master->clks);
pinctrl_pm_select_sleep_state(dev);
return 0;
@@ -2045,9 +2013,12 @@ static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
{
struct svc_i3c_master *master = dev_get_drvdata(dev);
+ int ret;
pinctrl_pm_select_default_state(dev);
- svc_i3c_master_prepare_clks(master);
+ ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
+ if (ret)
+ return ret;
svc_i3c_restore_regs(master);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 976f5be54e36..73747d20df85 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -48,14 +48,17 @@
#include <trace/events/power.h>
#include <linux/sched.h>
#include <linux/sched/smt.h>
+#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
-#include <asm/cpuid.h>
+#include <linux/sysfs.h>
+#include <asm/cpuid/api.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/mwait.h>
#include <asm/spec-ctrl.h>
+#include <asm/msr.h>
#include <asm/tsc.h>
#include <asm/fpu/api.h>
#include <asm/smp.h>
@@ -92,9 +95,15 @@ struct idle_cpu {
*/
unsigned long auto_demotion_disable_flags;
bool disable_promotion_to_c1e;
+ bool c1_demotion_supported;
bool use_acpi;
};
+static bool c1_demotion_supported;
+static DEFINE_MUTEX(c1_demotion_mutex);
+
+static struct device *sysfs_root __initdata;
+
static const struct idle_cpu *icpu __initdata;
static struct cpuidle_state *cpuidle_state_table __initdata;
@@ -143,8 +152,8 @@ static __always_inline int __intel_idle(struct cpuidle_device *dev,
int index, bool irqoff)
{
struct cpuidle_state *state = &drv->states[index];
- unsigned long eax = flg2MWAIT(state->flags);
- unsigned long ecx = 1*irqoff; /* break on interrupt flag */
+ unsigned int eax = flg2MWAIT(state->flags);
+ unsigned int ecx = 1*irqoff; /* break on interrupt flag */
mwait_idle_with_hints(eax, ecx);
@@ -217,9 +226,9 @@ static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev,
static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- unsigned long ecx = 1; /* break on interrupt flag */
struct cpuidle_state *state = &drv->states[index];
- unsigned long eax = flg2MWAIT(state->flags);
+ unsigned int eax = flg2MWAIT(state->flags);
+ unsigned int ecx = 1; /* break on interrupt flag */
if (state->flags & CPUIDLE_FLAG_INIT_XSTATE)
fpu_idle_fpregs();
@@ -1549,18 +1558,21 @@ static const struct idle_cpu idle_cpu_gmt __initconst = {
static const struct idle_cpu idle_cpu_spr __initconst = {
.state_table = spr_cstates,
.disable_promotion_to_c1e = true,
+ .c1_demotion_supported = true,
.use_acpi = true,
};
static const struct idle_cpu idle_cpu_gnr __initconst = {
.state_table = gnr_cstates,
.disable_promotion_to_c1e = true,
+ .c1_demotion_supported = true,
.use_acpi = true,
};
static const struct idle_cpu idle_cpu_gnrd __initconst = {
.state_table = gnrd_cstates,
.disable_promotion_to_c1e = true,
+ .c1_demotion_supported = true,
.use_acpi = true,
};
@@ -1599,12 +1611,14 @@ static const struct idle_cpu idle_cpu_snr __initconst = {
static const struct idle_cpu idle_cpu_grr __initconst = {
.state_table = grr_cstates,
.disable_promotion_to_c1e = true,
+ .c1_demotion_supported = true,
.use_acpi = true,
};
static const struct idle_cpu idle_cpu_srf __initconst = {
.state_table = srf_cstates,
.disable_promotion_to_c1e = true,
+ .c1_demotion_supported = true,
.use_acpi = true,
};
@@ -1928,35 +1942,35 @@ static void __init bxt_idle_state_table_update(void)
unsigned long long msr;
unsigned int usec;
- rdmsrl(MSR_PKGC6_IRTL, msr);
+ rdmsrq(MSR_PKGC6_IRTL, msr);
usec = irtl_2_usec(msr);
if (usec) {
bxt_cstates[2].exit_latency = usec;
bxt_cstates[2].target_residency = usec;
}
- rdmsrl(MSR_PKGC7_IRTL, msr);
+ rdmsrq(MSR_PKGC7_IRTL, msr);
usec = irtl_2_usec(msr);
if (usec) {
bxt_cstates[3].exit_latency = usec;
bxt_cstates[3].target_residency = usec;
}
- rdmsrl(MSR_PKGC8_IRTL, msr);
+ rdmsrq(MSR_PKGC8_IRTL, msr);
usec = irtl_2_usec(msr);
if (usec) {
bxt_cstates[4].exit_latency = usec;
bxt_cstates[4].target_residency = usec;
}
- rdmsrl(MSR_PKGC9_IRTL, msr);
+ rdmsrq(MSR_PKGC9_IRTL, msr);
usec = irtl_2_usec(msr);
if (usec) {
bxt_cstates[5].exit_latency = usec;
bxt_cstates[5].target_residency = usec;
}
- rdmsrl(MSR_PKGC10_IRTL, msr);
+ rdmsrq(MSR_PKGC10_IRTL, msr);
usec = irtl_2_usec(msr);
if (usec) {
bxt_cstates[6].exit_latency = usec;
@@ -1984,7 +1998,7 @@ static void __init sklh_idle_state_table_update(void)
if ((mwait_substates & (0xF << 28)) == 0)
return;
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
+ rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr);
/* PC10 is not enabled in PKG C-state limit */
if ((msr & 0xF) != 8)
@@ -1996,7 +2010,7 @@ static void __init sklh_idle_state_table_update(void)
/* if SGX is present */
if (ebx & (1 << 2)) {
- rdmsrl(MSR_IA32_FEAT_CTL, msr);
+ rdmsrq(MSR_IA32_FEAT_CTL, msr);
/* if SGX is enabled */
if (msr & (1 << 18))
@@ -2015,7 +2029,7 @@ static void __init skx_idle_state_table_update(void)
{
unsigned long long msr;
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
+ rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr);
/*
* 000b: C0/C1 (no package C-state support)
@@ -2068,7 +2082,7 @@ static void __init spr_idle_state_table_update(void)
* C6. However, if PC6 is disabled, we update the numbers to match
* core C6.
*/
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
+ rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr);
/* Limit value 2 and above allow for PC6. */
if ((msr & 0x7) < 2) {
@@ -2082,8 +2096,8 @@ static void __init spr_idle_state_table_update(void)
*/
static void __init byt_cht_auto_demotion_disable(void)
{
- wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
- wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
+ wrmsrq(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
+ wrmsrq(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
}
static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
@@ -2241,27 +2255,27 @@ static void auto_demotion_disable(void)
{
unsigned long long msr_bits;
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+ rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
msr_bits &= ~auto_demotion_disable_flags;
- wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
+ wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
}
static void c1e_promotion_enable(void)
{
unsigned long long msr_bits;
- rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ rdmsrq(MSR_IA32_POWER_CTL, msr_bits);
msr_bits |= 0x2;
- wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ wrmsrq(MSR_IA32_POWER_CTL, msr_bits);
}
static void c1e_promotion_disable(void)
{
unsigned long long msr_bits;
- rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ rdmsrq(MSR_IA32_POWER_CTL, msr_bits);
msr_bits &= ~0x2;
- wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+ wrmsrq(MSR_IA32_POWER_CTL, msr_bits);
}
/**
@@ -2324,6 +2338,88 @@ static void __init intel_idle_cpuidle_devices_uninit(void)
cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i));
}
+static void intel_c1_demotion_toggle(void *enable)
+{
+ unsigned long long msr_val;
+
+ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_val);
+ /*
+ * Enable/disable C1 undemotion along with C1 demotion, as this is the
+ * most sensible configuration in general.
+ */
+ if (enable)
+ msr_val |= NHM_C1_AUTO_DEMOTE | SNB_C1_AUTO_UNDEMOTE;
+ else
+ msr_val &= ~(NHM_C1_AUTO_DEMOTE | SNB_C1_AUTO_UNDEMOTE);
+ wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_val);
+}
+
+static ssize_t intel_c1_demotion_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ bool enable;
+ int err;
+
+ err = kstrtobool(buf, &enable);
+ if (err)
+ return err;
+
+ mutex_lock(&c1_demotion_mutex);
+ /* Enable/disable C1 demotion on all CPUs */
+ on_each_cpu(intel_c1_demotion_toggle, (void *)enable, 1);
+ mutex_unlock(&c1_demotion_mutex);
+
+ return count;
+}
+
+static ssize_t intel_c1_demotion_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long long msr_val;
+
+ /*
+ * Read the MSR value for a CPU and assume it is the same for all CPUs. Any other
+ * configuration would be a BIOS bug.
+ */
+ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_val);
+ return sysfs_emit(buf, "%d\n", !!(msr_val & NHM_C1_AUTO_DEMOTE));
+}
+static DEVICE_ATTR_RW(intel_c1_demotion);
+
+static int __init intel_idle_sysfs_init(void)
+{
+ int err;
+
+ if (!c1_demotion_supported)
+ return 0;
+
+ sysfs_root = bus_get_dev_root(&cpu_subsys);
+ if (!sysfs_root)
+ return 0;
+
+ err = sysfs_add_file_to_group(&sysfs_root->kobj,
+ &dev_attr_intel_c1_demotion.attr,
+ "cpuidle");
+ if (err) {
+ put_device(sysfs_root);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __init intel_idle_sysfs_uninit(void)
+{
+ if (!sysfs_root)
+ return;
+
+ sysfs_remove_file_from_group(&sysfs_root->kobj,
+ &dev_attr_intel_c1_demotion.attr,
+ "cpuidle");
+ put_device(sysfs_root);
+}
+
static int __init intel_idle_init(void)
{
const struct x86_cpu_id *id;
@@ -2374,6 +2470,8 @@ static int __init intel_idle_init(void)
auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
if (icpu->disable_promotion_to_c1e)
c1e_promotion = C1E_PROMOTION_DISABLE;
+ if (icpu->c1_demotion_supported)
+ c1_demotion_supported = true;
if (icpu->use_acpi || force_use_acpi)
intel_idle_acpi_cst_extract();
} else if (!intel_idle_acpi_cst_extract()) {
@@ -2387,6 +2485,10 @@ static int __init intel_idle_init(void)
if (!intel_idle_cpuidle_devices)
return -ENOMEM;
+ retval = intel_idle_sysfs_init();
+ if (retval)
+ pr_warn("failed to initialized sysfs");
+
intel_idle_cpuidle_driver_init(&intel_idle_driver);
retval = cpuidle_register_driver(&intel_idle_driver);
@@ -2405,17 +2507,20 @@ static int __init intel_idle_init(void)
pr_debug("Local APIC timer is reliable in %s\n",
boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1");
+ arch_cpu_rescan_dead_smt_siblings();
+
return 0;
hp_setup_fail:
intel_idle_cpuidle_devices_uninit();
cpuidle_unregister_driver(&intel_idle_driver);
init_driver_fail:
+ intel_idle_sysfs_uninit();
free_percpu(intel_idle_cpuidle_devices);
return retval;
}
-device_initcall(intel_idle_init);
+subsys_initcall_sync(intel_idle_init);
/*
* We are not really modular, but we used to support that. Meaning we also
diff --git a/drivers/iio/accel/adxl345.h b/drivers/iio/accel/adxl345.h
index bc6d634bd85c..7d482dd595fa 100644
--- a/drivers/iio/accel/adxl345.h
+++ b/drivers/iio/accel/adxl345.h
@@ -111,6 +111,10 @@
*/
#define ADXL375_USCALE 480000
+struct regmap;
+
+bool adxl345_is_volatile_reg(struct device *dev, unsigned int reg);
+
struct adxl345_chip_info {
const char *name;
int uscale;
diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c
index 375c27d16827..2e5fdd479e8d 100644
--- a/drivers/iio/accel/adxl345_core.c
+++ b/drivers/iio/accel/adxl345_core.c
@@ -8,6 +8,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/property.h>
@@ -17,6 +18,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
#include <linux/iio/kfifo_buf.h>
#include "adxl345.h"
@@ -31,18 +33,71 @@
#define ADXL345_INT1 0
#define ADXL345_INT2 1
+#define ADXL345_REG_TAP_AXIS_MSK GENMASK(2, 0)
+#define ADXL345_REG_TAP_SUPPRESS_MSK BIT(3)
+#define ADXL345_REG_TAP_SUPPRESS BIT(3)
+
+#define ADXL345_TAP_Z_EN BIT(0)
+#define ADXL345_TAP_Y_EN BIT(1)
+#define ADXL345_TAP_X_EN BIT(2)
+
+/* single/double tap */
+enum adxl345_tap_type {
+ ADXL345_SINGLE_TAP,
+ ADXL345_DOUBLE_TAP,
+};
+
+static const unsigned int adxl345_tap_int_reg[] = {
+ [ADXL345_SINGLE_TAP] = ADXL345_INT_SINGLE_TAP,
+ [ADXL345_DOUBLE_TAP] = ADXL345_INT_DOUBLE_TAP,
+};
+
+enum adxl345_tap_time_type {
+ ADXL345_TAP_TIME_LATENT,
+ ADXL345_TAP_TIME_WINDOW,
+ ADXL345_TAP_TIME_DUR,
+};
+
+static const unsigned int adxl345_tap_time_reg[] = {
+ [ADXL345_TAP_TIME_LATENT] = ADXL345_REG_LATENT,
+ [ADXL345_TAP_TIME_WINDOW] = ADXL345_REG_WINDOW,
+ [ADXL345_TAP_TIME_DUR] = ADXL345_REG_DUR,
+};
+
struct adxl345_state {
const struct adxl345_chip_info *info;
struct regmap *regmap;
bool fifo_delay; /* delay: delay is needed for SPI */
int irq;
- u8 intio;
- u8 int_map;
u8 watermark;
u8 fifo_mode;
+
+ u32 tap_duration_us;
+ u32 tap_latent_us;
+ u32 tap_window_us;
+
__le16 fifo_buf[ADXL345_DIRS * ADXL345_FIFO_SIZE + 1] __aligned(IIO_DMA_MINALIGN);
};
+static struct iio_event_spec adxl345_events[] = {
+ {
+ /* single tap */
+ .type = IIO_EV_TYPE_GESTURE,
+ .dir = IIO_EV_DIR_SINGLETAP,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_TIMEOUT),
+ },
+ {
+ /* double tap */
+ .type = IIO_EV_TYPE_GESTURE,
+ .dir = IIO_EV_DIR_DOUBLETAP,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_RESET_TIMEOUT) |
+ BIT(IIO_EV_INFO_TAP2_MIN_DELAY),
+ },
+};
+
#define ADXL345_CHANNEL(index, reg, axis) { \
.type = IIO_ACCEL, \
.modified = 1, \
@@ -59,6 +114,8 @@ struct adxl345_state {
.storagebits = 16, \
.endianness = IIO_LE, \
}, \
+ .event_spec = adxl345_events, \
+ .num_event_specs = ARRAY_SIZE(adxl345_events), \
}
enum adxl345_chans {
@@ -76,6 +133,25 @@ static const unsigned long adxl345_scan_masks[] = {
0
};
+bool adxl345_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ADXL345_REG_DATA_AXIS(0):
+ case ADXL345_REG_DATA_AXIS(1):
+ case ADXL345_REG_DATA_AXIS(2):
+ case ADXL345_REG_DATA_AXIS(3):
+ case ADXL345_REG_DATA_AXIS(4):
+ case ADXL345_REG_DATA_AXIS(5):
+ case ADXL345_REG_ACT_TAP_STATUS:
+ case ADXL345_REG_FIFO_STATUS:
+ case ADXL345_REG_INT_SOURCE:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(adxl345_is_volatile_reg, "IIO_ADXL345");
+
/**
* adxl345_set_measure_en() - Enable and disable measuring.
*
@@ -96,24 +172,215 @@ static int adxl345_set_measure_en(struct adxl345_state *st, bool en)
return regmap_write(st->regmap, ADXL345_REG_POWER_CTL, val);
}
-static int adxl345_set_interrupts(struct adxl345_state *st)
+/* tap */
+
+static int _adxl345_set_tap_int(struct adxl345_state *st,
+ enum adxl345_tap_type type, bool state)
{
+ unsigned int int_map = 0x00;
+ unsigned int tap_threshold;
+ bool axis_valid;
+ bool singletap_args_valid = false;
+ bool doubletap_args_valid = false;
+ bool en = false;
+ u32 axis_ctrl;
int ret;
- unsigned int int_enable = st->int_map;
- unsigned int int_map;
+
+ ret = regmap_read(st->regmap, ADXL345_REG_TAP_AXIS, &axis_ctrl);
+ if (ret)
+ return ret;
+
+ axis_valid = FIELD_GET(ADXL345_REG_TAP_AXIS_MSK, axis_ctrl) > 0;
+
+ ret = regmap_read(st->regmap, ADXL345_REG_THRESH_TAP, &tap_threshold);
+ if (ret)
+ return ret;
/*
- * Any bits set to 0 in the INT map register send their respective
- * interrupts to the INT1 pin, whereas bits set to 1 send their respective
- * interrupts to the INT2 pin. The intio shall convert this accordingly.
+ * Note: A value of 0 for threshold and/or dur may result in undesirable
+ * behavior if single tap/double tap interrupts are enabled.
*/
- int_map = st->intio ? st->int_map : ~st->int_map;
+ singletap_args_valid = tap_threshold > 0 && st->tap_duration_us > 0;
- ret = regmap_write(st->regmap, ADXL345_REG_INT_MAP, int_map);
+ if (type == ADXL345_SINGLE_TAP) {
+ en = axis_valid && singletap_args_valid;
+ } else {
+ /* doubletap: Window must be equal or greater than latent! */
+ doubletap_args_valid = st->tap_latent_us > 0 &&
+ st->tap_window_us > 0 &&
+ st->tap_window_us >= st->tap_latent_us;
+
+ en = axis_valid && singletap_args_valid && doubletap_args_valid;
+ }
+
+ if (state && en)
+ int_map |= adxl345_tap_int_reg[type];
+
+ return regmap_update_bits(st->regmap, ADXL345_REG_INT_ENABLE,
+ adxl345_tap_int_reg[type], int_map);
+}
+
+static int adxl345_is_tap_en(struct adxl345_state *st,
+ enum iio_modifier axis,
+ enum adxl345_tap_type type, bool *en)
+{
+ unsigned int regval;
+ u32 axis_ctrl;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADXL345_REG_TAP_AXIS, &axis_ctrl);
if (ret)
return ret;
- return regmap_write(st->regmap, ADXL345_REG_INT_ENABLE, int_enable);
+ /* Verify if axis is enabled for the tap detection. */
+ switch (axis) {
+ case IIO_MOD_X:
+ *en = FIELD_GET(ADXL345_TAP_X_EN, axis_ctrl);
+ break;
+ case IIO_MOD_Y:
+ *en = FIELD_GET(ADXL345_TAP_Y_EN, axis_ctrl);
+ break;
+ case IIO_MOD_Z:
+ *en = FIELD_GET(ADXL345_TAP_Z_EN, axis_ctrl);
+ break;
+ default:
+ *en = false;
+ return -EINVAL;
+ }
+
+ if (*en) {
+ /*
+ * If axis allow for tap detection, verify if the interrupt is
+ * enabled for tap detection.
+ */
+ ret = regmap_read(st->regmap, ADXL345_REG_INT_ENABLE, &regval);
+ if (ret)
+ return ret;
+
+ *en = adxl345_tap_int_reg[type] & regval;
+ }
+
+ return 0;
+}
+
+static int adxl345_set_singletap_en(struct adxl345_state *st,
+ enum iio_modifier axis, bool en)
+{
+ int ret;
+ u32 axis_ctrl;
+
+ switch (axis) {
+ case IIO_MOD_X:
+ axis_ctrl = ADXL345_TAP_X_EN;
+ break;
+ case IIO_MOD_Y:
+ axis_ctrl = ADXL345_TAP_Y_EN;
+ break;
+ case IIO_MOD_Z:
+ axis_ctrl = ADXL345_TAP_Z_EN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (en)
+ ret = regmap_set_bits(st->regmap, ADXL345_REG_TAP_AXIS,
+ axis_ctrl);
+ else
+ ret = regmap_clear_bits(st->regmap, ADXL345_REG_TAP_AXIS,
+ axis_ctrl);
+ if (ret)
+ return ret;
+
+ return _adxl345_set_tap_int(st, ADXL345_SINGLE_TAP, en);
+}
+
+static int adxl345_set_doubletap_en(struct adxl345_state *st, bool en)
+{
+ int ret;
+
+ /*
+ * Generally suppress detection of spikes during the latency period as
+ * double taps here, this is fully optional for double tap detection
+ */
+ ret = regmap_update_bits(st->regmap, ADXL345_REG_TAP_AXIS,
+ ADXL345_REG_TAP_SUPPRESS_MSK,
+ en ? ADXL345_REG_TAP_SUPPRESS : 0x00);
+ if (ret)
+ return ret;
+
+ return _adxl345_set_tap_int(st, ADXL345_DOUBLE_TAP, en);
+}
+
+static int _adxl345_set_tap_time(struct adxl345_state *st,
+ enum adxl345_tap_time_type type, u32 val_us)
+{
+ unsigned int regval;
+
+ switch (type) {
+ case ADXL345_TAP_TIME_WINDOW:
+ st->tap_window_us = val_us;
+ break;
+ case ADXL345_TAP_TIME_LATENT:
+ st->tap_latent_us = val_us;
+ break;
+ case ADXL345_TAP_TIME_DUR:
+ st->tap_duration_us = val_us;
+ break;
+ }
+
+ /*
+ * The scale factor is 1250us / LSB for tap_window_us and tap_latent_us.
+ * For tap_duration_us the scale factor is 625us / LSB.
+ */
+ if (type == ADXL345_TAP_TIME_DUR)
+ regval = DIV_ROUND_CLOSEST(val_us, 625);
+ else
+ regval = DIV_ROUND_CLOSEST(val_us, 1250);
+
+ return regmap_write(st->regmap, adxl345_tap_time_reg[type], regval);
+}
+
+static int adxl345_set_tap_duration(struct adxl345_state *st, u32 val_int,
+ u32 val_fract_us)
+{
+ /*
+ * Max value is 255 * 625 us = 0.159375 seconds
+ *
+ * Note: the scaling is similar to the scaling in the ADXL380
+ */
+ if (val_int || val_fract_us > 159375)
+ return -EINVAL;
+
+ return _adxl345_set_tap_time(st, ADXL345_TAP_TIME_DUR, val_fract_us);
+}
+
+static int adxl345_set_tap_window(struct adxl345_state *st, u32 val_int,
+ u32 val_fract_us)
+{
+ /*
+ * Max value is 255 * 1250 us = 0.318750 seconds
+ *
+ * Note: the scaling is similar to the scaling in the ADXL380
+ */
+ if (val_int || val_fract_us > 318750)
+ return -EINVAL;
+
+ return _adxl345_set_tap_time(st, ADXL345_TAP_TIME_WINDOW, val_fract_us);
+}
+
+static int adxl345_set_tap_latent(struct adxl345_state *st, u32 val_int,
+ u32 val_fract_us)
+{
+ /*
+ * Max value is 255 * 1250 us = 0.318750 seconds
+ *
+ * Note: the scaling is similar to the scaling in the ADXL380
+ */
+ if (val_int || val_fract_us > 318750)
+ return -EINVAL;
+
+ return _adxl345_set_tap_time(st, ADXL345_TAP_TIME_LATENT, val_fract_us);
}
static int adxl345_read_raw(struct iio_dev *indio_dev,
@@ -136,7 +403,7 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
ret = regmap_bulk_read(st->regmap,
ADXL345_REG_DATA_AXIS(chan->address),
&accel, sizeof(accel));
- if (ret < 0)
+ if (ret)
return ret;
*val = sign_extend32(le16_to_cpu(accel), 12);
@@ -148,7 +415,7 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_CALIBBIAS:
ret = regmap_read(st->regmap,
ADXL345_REG_OFS_AXIS(chan->address), &regval);
- if (ret < 0)
+ if (ret)
return ret;
/*
* 8-bit resolution at +/- 2g, that is 4x accel data scale
@@ -159,7 +426,7 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
ret = regmap_read(st->regmap, ADXL345_REG_BW_RATE, &regval);
- if (ret < 0)
+ if (ret)
return ret;
samp_freq_nhz = ADXL345_BASE_RATE_NANO_HZ <<
@@ -201,6 +468,157 @@ static int adxl345_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int adxl345_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct adxl345_state *st = iio_priv(indio_dev);
+ bool int_en;
+ int ret;
+
+ switch (type) {
+ case IIO_EV_TYPE_GESTURE:
+ switch (dir) {
+ case IIO_EV_DIR_SINGLETAP:
+ ret = adxl345_is_tap_en(st, chan->channel2,
+ ADXL345_SINGLE_TAP, &int_en);
+ if (ret)
+ return ret;
+ return int_en;
+ case IIO_EV_DIR_DOUBLETAP:
+ ret = adxl345_is_tap_en(st, chan->channel2,
+ ADXL345_DOUBLE_TAP, &int_en);
+ if (ret)
+ return ret;
+ return int_en;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl345_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
+{
+ struct adxl345_state *st = iio_priv(indio_dev);
+
+ switch (type) {
+ case IIO_EV_TYPE_GESTURE:
+ switch (dir) {
+ case IIO_EV_DIR_SINGLETAP:
+ return adxl345_set_singletap_en(st, chan->channel2, state);
+ case IIO_EV_DIR_DOUBLETAP:
+ return adxl345_set_doubletap_en(st, state);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl345_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct adxl345_state *st = iio_priv(indio_dev);
+ unsigned int tap_threshold;
+ int ret;
+
+ switch (type) {
+ case IIO_EV_TYPE_GESTURE:
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ /*
+ * The scale factor would be 62.5mg/LSB (i.e. 0xFF = 16g) but
+ * not applied here. In context of this general purpose sensor,
+ * what imports is rather signal intensity than the absolute
+ * measured g value.
+ */
+ ret = regmap_read(st->regmap, ADXL345_REG_THRESH_TAP,
+ &tap_threshold);
+ if (ret)
+ return ret;
+ *val = sign_extend32(tap_threshold, 7);
+ return IIO_VAL_INT;
+ case IIO_EV_INFO_TIMEOUT:
+ *val = st->tap_duration_us;
+ *val2 = 1000000;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_EV_INFO_RESET_TIMEOUT:
+ *val = st->tap_window_us;
+ *val2 = 1000000;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_EV_INFO_TAP2_MIN_DELAY:
+ *val = st->tap_latent_us;
+ *val2 = 1000000;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl345_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct adxl345_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = adxl345_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ switch (type) {
+ case IIO_EV_TYPE_GESTURE:
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ ret = regmap_write(st->regmap, ADXL345_REG_THRESH_TAP,
+ min(val, 0xFF));
+ if (ret)
+ return ret;
+ break;
+ case IIO_EV_INFO_TIMEOUT:
+ ret = adxl345_set_tap_duration(st, val, val2);
+ if (ret)
+ return ret;
+ break;
+ case IIO_EV_INFO_RESET_TIMEOUT:
+ ret = adxl345_set_tap_window(st, val, val2);
+ if (ret)
+ return ret;
+ break;
+ case IIO_EV_INFO_TAP2_MIN_DELAY:
+ ret = adxl345_set_tap_latent(st, val, val2);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return adxl345_set_measure_en(st, true);
+}
+
static int adxl345_reg_access(struct iio_dev *indio_dev, unsigned int reg,
unsigned int writeval, unsigned int *readval)
{
@@ -214,7 +632,7 @@ static int adxl345_reg_access(struct iio_dev *indio_dev, unsigned int reg,
static int adxl345_set_watermark(struct iio_dev *indio_dev, unsigned int value)
{
struct adxl345_state *st = iio_priv(indio_dev);
- unsigned int fifo_mask = 0x1F;
+ const unsigned int fifo_mask = 0x1F, watermark_mask = 0x02;
int ret;
value = min(value, ADXL345_FIFO_SIZE - 1);
@@ -224,9 +642,8 @@ static int adxl345_set_watermark(struct iio_dev *indio_dev, unsigned int value)
return ret;
st->watermark = value;
- st->int_map |= ADXL345_INT_WATERMARK;
-
- return 0;
+ return regmap_update_bits(st->regmap, ADXL345_REG_INT_ENABLE,
+ watermark_mask, ADXL345_INT_WATERMARK);
}
static int adxl345_write_raw_get_fmt(struct iio_dev *indio_dev,
@@ -265,21 +682,25 @@ static const struct attribute_group adxl345_attrs_group = {
static int adxl345_set_fifo(struct adxl345_state *st)
{
+ unsigned int intio;
int ret;
/* FIFO should only be configured while in standby mode */
ret = adxl345_set_measure_en(st, false);
- if (ret < 0)
+ if (ret)
+ return ret;
+
+ ret = regmap_read(st->regmap, ADXL345_REG_INT_MAP, &intio);
+ if (ret)
return ret;
ret = regmap_write(st->regmap, ADXL345_REG_FIFO_CTL,
FIELD_PREP(ADXL345_FIFO_CTL_SAMPLES_MSK,
st->watermark) |
- FIELD_PREP(ADXL345_FIFO_CTL_TRIGGER_MSK,
- st->intio) |
+ FIELD_PREP(ADXL345_FIFO_CTL_TRIGGER_MSK, intio) |
FIELD_PREP(ADXL345_FIFO_CTL_MODE_MSK,
st->fifo_mode));
- if (ret < 0)
+ if (ret)
return ret;
return adxl345_set_measure_en(st, true);
@@ -300,7 +721,7 @@ static int adxl345_get_samples(struct adxl345_state *st)
int ret;
ret = regmap_read(st->regmap, ADXL345_REG_FIFO_STATUS, &regval);
- if (ret < 0)
+ if (ret)
return ret;
return FIELD_GET(ADXL345_REG_FIFO_STATUS_MSK, regval);
@@ -328,7 +749,7 @@ static int adxl345_fifo_transfer(struct adxl345_state *st, int samples)
/* read 3x 2 byte elements from base address into next fifo_buf position */
ret = regmap_bulk_read(st->regmap, ADXL345_REG_XYZ_BASE,
st->fifo_buf + (i * count / 2), count);
- if (ret < 0)
+ if (ret)
return ret;
/*
@@ -374,11 +795,6 @@ static void adxl345_fifo_reset(struct adxl345_state *st)
static int adxl345_buffer_postenable(struct iio_dev *indio_dev)
{
struct adxl345_state *st = iio_priv(indio_dev);
- int ret;
-
- ret = adxl345_set_interrupts(st);
- if (ret < 0)
- return ret;
st->fifo_mode = ADXL345_FIFO_STREAM;
return adxl345_set_fifo(st);
@@ -391,11 +807,10 @@ static int adxl345_buffer_predisable(struct iio_dev *indio_dev)
st->fifo_mode = ADXL345_FIFO_BYPASS;
ret = adxl345_set_fifo(st);
- if (ret < 0)
+ if (ret)
return ret;
- st->int_map = 0x00;
- return adxl345_set_interrupts(st);
+ return regmap_write(st->regmap, ADXL345_REG_INT_ENABLE, 0x00);
}
static const struct iio_buffer_setup_ops adxl345_buffer_ops = {
@@ -422,6 +837,48 @@ static int adxl345_fifo_push(struct iio_dev *indio_dev,
return 0;
}
+static int adxl345_push_event(struct iio_dev *indio_dev, int int_stat,
+ enum iio_modifier tap_dir)
+{
+ s64 ts = iio_get_time_ns(indio_dev);
+ struct adxl345_state *st = iio_priv(indio_dev);
+ int samples;
+ int ret = -ENOENT;
+
+ if (FIELD_GET(ADXL345_INT_SINGLE_TAP, int_stat)) {
+ ret = iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, tap_dir,
+ IIO_EV_TYPE_GESTURE,
+ IIO_EV_DIR_SINGLETAP),
+ ts);
+ if (ret)
+ return ret;
+ }
+
+ if (FIELD_GET(ADXL345_INT_DOUBLE_TAP, int_stat)) {
+ ret = iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, tap_dir,
+ IIO_EV_TYPE_GESTURE,
+ IIO_EV_DIR_DOUBLETAP),
+ ts);
+ if (ret)
+ return ret;
+ }
+
+ if (FIELD_GET(ADXL345_INT_WATERMARK, int_stat)) {
+ samples = adxl345_get_samples(st);
+ if (samples < 0)
+ return -EINVAL;
+
+ if (adxl345_fifo_push(indio_dev, samples) < 0)
+ return -EINVAL;
+
+ ret = 0;
+ }
+
+ return ret;
+}
+
/**
* adxl345_irq_handler() - Handle irqs of the ADXL345.
* @irq: The irq being handled.
@@ -433,21 +890,35 @@ static irqreturn_t adxl345_irq_handler(int irq, void *p)
{
struct iio_dev *indio_dev = p;
struct adxl345_state *st = iio_priv(indio_dev);
+ unsigned int regval;
+ enum iio_modifier tap_dir = IIO_NO_MOD;
+ u32 axis_ctrl;
int int_stat;
- int samples;
+ int ret;
- if (regmap_read(st->regmap, ADXL345_REG_INT_SOURCE, &int_stat))
+ ret = regmap_read(st->regmap, ADXL345_REG_TAP_AXIS, &axis_ctrl);
+ if (ret)
return IRQ_NONE;
- if (FIELD_GET(ADXL345_INT_WATERMARK, int_stat)) {
- samples = adxl345_get_samples(st);
- if (samples < 0)
- goto err;
-
- if (adxl345_fifo_push(indio_dev, samples) < 0)
- goto err;
+ if (FIELD_GET(ADXL345_REG_TAP_AXIS_MSK, axis_ctrl)) {
+ ret = regmap_read(st->regmap, ADXL345_REG_ACT_TAP_STATUS, &regval);
+ if (ret)
+ return IRQ_NONE;
+
+ if (FIELD_GET(ADXL345_TAP_Z_EN, regval))
+ tap_dir = IIO_MOD_Z;
+ else if (FIELD_GET(ADXL345_TAP_Y_EN, regval))
+ tap_dir = IIO_MOD_Y;
+ else if (FIELD_GET(ADXL345_TAP_X_EN, regval))
+ tap_dir = IIO_MOD_X;
}
+ if (regmap_read(st->regmap, ADXL345_REG_INT_SOURCE, &int_stat))
+ return IRQ_NONE;
+
+ if (adxl345_push_event(indio_dev, int_stat, tap_dir))
+ goto err;
+
if (FIELD_GET(ADXL345_INT_OVERRUN, int_stat))
goto err;
@@ -464,6 +935,10 @@ static const struct iio_info adxl345_info = {
.read_raw = adxl345_read_raw,
.write_raw = adxl345_write_raw,
.write_raw_get_fmt = adxl345_write_raw_get_fmt,
+ .read_event_config = adxl345_read_event_config,
+ .write_event_config = adxl345_write_event_config,
+ .read_event_value = adxl345_read_event_value,
+ .write_event_value = adxl345_write_event_value,
.debugfs_reg_access = &adxl345_reg_access,
.hwfifo_set_watermark = adxl345_set_watermark,
};
@@ -492,10 +967,12 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap,
struct adxl345_state *st;
struct iio_dev *indio_dev;
u32 regval;
+ u8 intio = ADXL345_INT1;
unsigned int data_format_mask = (ADXL345_DATA_FORMAT_RANGE |
ADXL345_DATA_FORMAT_JUSTIFY |
ADXL345_DATA_FORMAT_FULL_RES |
ADXL345_DATA_FORMAT_SELF_TEST);
+ unsigned int tap_threshold;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
@@ -509,6 +986,12 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap,
return -ENODEV;
st->fifo_delay = fifo_delay_default;
+ /* Init with reasonable values */
+ tap_threshold = 48; /* 48 [0x30] -> ~3g */
+ st->tap_duration_us = 16; /* 16 [0x10] -> .010 */
+ st->tap_window_us = 64; /* 64 [0x40] -> .080 */
+ st->tap_latent_us = 16; /* 16 [0x10] -> .020 */
+
indio_dev->name = st->info->name;
indio_dev->info = &adxl345_info;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -516,6 +999,11 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap,
indio_dev->num_channels = ARRAY_SIZE(adxl345_channels);
indio_dev->available_scan_masks = adxl345_scan_masks;
+ /* Reset interrupts at start up */
+ ret = regmap_write(st->regmap, ADXL345_REG_INT_ENABLE, 0x00);
+ if (ret)
+ return ret;
+
if (setup) {
/* Perform optional initial bus specific configuration */
ret = setup(dev, st->regmap);
@@ -540,7 +1028,7 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap,
}
ret = regmap_read(st->regmap, ADXL345_REG_DEVID, &regval);
- if (ret < 0)
+ if (ret)
return dev_err_probe(dev, ret, "Error reading device ID\n");
if (regval != ADXL345_DEVID)
@@ -549,23 +1037,37 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap,
/* Enable measurement mode */
ret = adxl345_set_measure_en(st, true);
- if (ret < 0)
+ if (ret)
return dev_err_probe(dev, ret, "Failed to enable measurement mode\n");
ret = devm_add_action_or_reset(dev, adxl345_powerdown, st);
- if (ret < 0)
+ if (ret)
return ret;
- st->intio = ADXL345_INT1;
st->irq = fwnode_irq_get_byname(dev_fwnode(dev), "INT1");
if (st->irq < 0) {
- st->intio = ADXL345_INT2;
+ intio = ADXL345_INT2;
st->irq = fwnode_irq_get_byname(dev_fwnode(dev), "INT2");
if (st->irq < 0)
- st->intio = ADXL345_INT_NONE;
+ intio = ADXL345_INT_NONE;
}
- if (st->intio != ADXL345_INT_NONE) {
+ if (intio != ADXL345_INT_NONE) {
+ /*
+ * Any bits set to 0 in the INT map register send their respective
+ * interrupts to the INT1 pin, whereas bits set to 1 send their respective
+ * interrupts to the INT2 pin. The intio shall convert this accordingly.
+ */
+ regval = intio ? 0xff : 0;
+
+ ret = regmap_write(st->regmap, ADXL345_REG_INT_MAP, regval);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, ADXL345_REG_THRESH_TAP, tap_threshold);
+ if (ret)
+ return ret;
+
/* FIFO_STREAM mode is going to be activated later */
ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, &adxl345_buffer_ops);
if (ret)
@@ -581,7 +1083,7 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap,
ret = regmap_write(st->regmap, ADXL345_REG_FIFO_CTL,
FIELD_PREP(ADXL345_FIFO_CTL_MODE_MSK,
ADXL345_FIFO_BYPASS));
- if (ret < 0)
+ if (ret)
return ret;
}
diff --git a/drivers/iio/accel/adxl345_i2c.c b/drivers/iio/accel/adxl345_i2c.c
index 8c385dd6c01d..af84c0043c6c 100644
--- a/drivers/iio/accel/adxl345_i2c.c
+++ b/drivers/iio/accel/adxl345_i2c.c
@@ -17,6 +17,8 @@
static const struct regmap_config adxl345_i2c_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
+ .volatile_reg = adxl345_is_volatile_reg,
+ .cache_type = REGCACHE_MAPLE,
};
static int adxl345_i2c_probe(struct i2c_client *client)
diff --git a/drivers/iio/accel/adxl345_spi.c b/drivers/iio/accel/adxl345_spi.c
index 7e518aea17bf..0315f4bfd69a 100644
--- a/drivers/iio/accel/adxl345_spi.c
+++ b/drivers/iio/accel/adxl345_spi.c
@@ -19,6 +19,8 @@ static const struct regmap_config adxl345_spi_regmap_config = {
.val_bits = 8,
/* Setting bits 7 and 6 enables multiple-byte read */
.read_flag_mask = BIT(7) | BIT(6),
+ .volatile_reg = adxl345_is_volatile_reg,
+ .cache_type = REGCACHE_MAPLE,
};
static int adxl345_spi_setup(struct device *dev, struct regmap *regmap)
diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c
index cbac622ef821..2e00fd51b4d5 100644
--- a/drivers/iio/accel/adxl355_core.c
+++ b/drivers/iio/accel/adxl355_core.c
@@ -666,8 +666,8 @@ static irqreturn_t adxl355_trigger_handler(int irq, void *p)
if (ret)
goto out_unlock_notify;
- iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
- pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &data->buffer,
+ sizeof(data->buffer), pf->timestamp);
out_unlock_notify:
mutex_unlock(&data->lock);
diff --git a/drivers/iio/accel/adxl367_i2c.c b/drivers/iio/accel/adxl367_i2c.c
index 80f0b642b9b0..1c7d2eb054a2 100644
--- a/drivers/iio/accel/adxl367_i2c.c
+++ b/drivers/iio/accel/adxl367_i2c.c
@@ -68,7 +68,7 @@ MODULE_DEVICE_TABLE(i2c, adxl367_i2c_id);
static const struct of_device_id adxl367_of_match[] = {
{ .compatible = "adi,adxl367" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, adxl367_of_match);
diff --git a/drivers/iio/accel/adxl367_spi.c b/drivers/iio/accel/adxl367_spi.c
index 49d7c8fbe8ed..3fed56bb9054 100644
--- a/drivers/iio/accel/adxl367_spi.c
+++ b/drivers/iio/accel/adxl367_spi.c
@@ -139,13 +139,13 @@ static int adxl367_spi_probe(struct spi_device *spi)
static const struct spi_device_id adxl367_spi_id[] = {
{ "adxl367", 0 },
- { },
+ { }
};
MODULE_DEVICE_TABLE(spi, adxl367_spi_id);
static const struct of_device_id adxl367_of_match[] = {
{ .compatible = "adi,adxl367" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, adxl367_of_match);
diff --git a/drivers/iio/accel/adxl372_i2c.c b/drivers/iio/accel/adxl372_i2c.c
index 43d5fd921be7..186d4fe9a556 100644
--- a/drivers/iio/accel/adxl372_i2c.c
+++ b/drivers/iio/accel/adxl372_i2c.c
@@ -43,7 +43,7 @@ static int adxl372_i2c_probe(struct i2c_client *client)
static const struct i2c_device_id adxl372_i2c_id[] = {
{ "adxl372" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, adxl372_i2c_id);
diff --git a/drivers/iio/accel/adxl372_spi.c b/drivers/iio/accel/adxl372_spi.c
index 1ab1997a55b1..39941b519c3b 100644
--- a/drivers/iio/accel/adxl372_spi.c
+++ b/drivers/iio/accel/adxl372_spi.c
@@ -34,7 +34,7 @@ static int adxl372_spi_probe(struct spi_device *spi)
static const struct spi_device_id adxl372_spi_id[] = {
{ "adxl372", 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, adxl372_spi_id);
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index aa664a923f91..93a868678722 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -887,7 +887,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
mutex_unlock(&data->mutex);
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, time_ns);
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan), time_ns);
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
index 96ba028157ee..38f7498431ee 100644
--- a/drivers/iio/accel/bma220_spi.c
+++ b/drivers/iio/accel/bma220_spi.c
@@ -103,8 +103,8 @@ static irqreturn_t bma220_trigger_handler(int irq, void *p)
if (ret < 0)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ pf->timestamp);
err:
mutex_unlock(&data->lock);
iio_trigger_notify_done(indio_dev->trig);
@@ -307,12 +307,12 @@ static DEFINE_SIMPLE_DEV_PM_OPS(bma220_pm_ops, bma220_suspend, bma220_resume);
static const struct spi_device_id bma220_spi_id[] = {
{"bma220", 0},
- {}
+ { }
};
static const struct acpi_device_id bma220_acpi_id[] = {
{"BMA0220", 0},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, bma220_spi_id);
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
index 23f5e1ce9cc4..85e23badf733 100644
--- a/drivers/iio/accel/bma400_core.c
+++ b/drivers/iio/accel/bma400_core.c
@@ -1591,8 +1591,9 @@ static irqreturn_t bma400_trigger_handler(int irq, void *p)
data->buffer.temperature = temp;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &data->buffer,
+ sizeof(data->buffer),
+ iio_get_time_ns(indio_dev));
mutex_unlock(&data->mutex);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c
index 0d4ce6c38931..b4604f441553 100644
--- a/drivers/iio/accel/bmc150-accel-i2c.c
+++ b/drivers/iio/accel/bmc150-accel-i2c.c
@@ -240,7 +240,7 @@ static const struct acpi_device_id bmc150_accel_acpi_match[] = {
{"BOSC0200"},
{"BSBA0150"},
{"DUAL250E"},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, bmc150_accel_acpi_match);
@@ -255,7 +255,7 @@ static const struct i2c_device_id bmc150_accel_id[] = {
{"bmc150_accel"},
{"bmc156_accel", BOSCH_BMC156},
{"bmi055_accel"},
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, bmc150_accel_id);
@@ -271,7 +271,7 @@ static const struct of_device_id bmc150_accel_of_match[] = {
{ .compatible = "bosch,bmc150_accel" },
{ .compatible = "bosch,bmc156_accel" },
{ .compatible = "bosch,bmi055_accel" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, bmc150_accel_of_match);
diff --git a/drivers/iio/accel/bmc150-accel-spi.c b/drivers/iio/accel/bmc150-accel-spi.c
index 70b3642656ab..26ce50b37716 100644
--- a/drivers/iio/accel/bmc150-accel-spi.c
+++ b/drivers/iio/accel/bmc150-accel-spi.c
@@ -48,7 +48,7 @@ static const struct acpi_device_id bmc150_accel_acpi_match[] = {
{"BMC150A"},
{"BMI055A"},
{"BSBA0150"},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, bmc150_accel_acpi_match);
@@ -62,7 +62,7 @@ static const struct spi_device_id bmc150_accel_id[] = {
{"bmc150_accel"},
{"bmc156_accel", BOSCH_BMC156},
{"bmi055_accel"},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, bmc150_accel_id);
diff --git a/drivers/iio/accel/bmi088-accel-i2c.c b/drivers/iio/accel/bmi088-accel-i2c.c
index bd22bd0d3c25..310f863029bb 100644
--- a/drivers/iio/accel/bmi088-accel-i2c.c
+++ b/drivers/iio/accel/bmi088-accel-i2c.c
@@ -40,7 +40,7 @@ static const struct of_device_id bmi088_of_match[] = {
{ .compatible = "bosch,bmi085-accel" },
{ .compatible = "bosch,bmi088-accel" },
{ .compatible = "bosch,bmi090l-accel" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, bmi088_of_match);
@@ -48,7 +48,7 @@ static const struct i2c_device_id bmi088_accel_id[] = {
{ "bmi085-accel", BOSCH_BMI085 },
{ "bmi088-accel", BOSCH_BMI088 },
{ "bmi090l-accel", BOSCH_BMI090L },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, bmi088_accel_id);
diff --git a/drivers/iio/accel/bmi088-accel-spi.c b/drivers/iio/accel/bmi088-accel-spi.c
index c9d51a74c07f..44cb50c76cb1 100644
--- a/drivers/iio/accel/bmi088-accel-spi.c
+++ b/drivers/iio/accel/bmi088-accel-spi.c
@@ -67,7 +67,7 @@ static const struct of_device_id bmi088_of_match[] = {
{ .compatible = "bosch,bmi085-accel" },
{ .compatible = "bosch,bmi088-accel" },
{ .compatible = "bosch,bmi090l-accel" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, bmi088_of_match);
@@ -75,7 +75,7 @@ static const struct spi_device_id bmi088_accel_id[] = {
{"bmi085-accel", BOSCH_BMI085},
{"bmi088-accel", BOSCH_BMI088},
{"bmi090l-accel", BOSCH_BMI090L},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, bmi088_accel_id);
diff --git a/drivers/iio/accel/da280.c b/drivers/iio/accel/da280.c
index 992286828844..c2dd123b9021 100644
--- a/drivers/iio/accel/da280.c
+++ b/drivers/iio/accel/da280.c
@@ -157,7 +157,7 @@ static const struct da280_match_data da280_match_data = { "da280", 3 };
static const struct acpi_device_id da280_acpi_match[] = {
{ "NSA2513", (kernel_ulong_t)&da217_match_data },
{ "MIRAACC", (kernel_ulong_t)&da280_match_data },
- {}
+ { }
};
MODULE_DEVICE_TABLE(acpi, da280_acpi_match);
@@ -165,7 +165,7 @@ static const struct i2c_device_id da280_i2c_id[] = {
{ "da217", (kernel_ulong_t)&da217_match_data },
{ "da226", (kernel_ulong_t)&da226_match_data },
{ "da280", (kernel_ulong_t)&da280_match_data },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, da280_i2c_id);
diff --git a/drivers/iio/accel/da311.c b/drivers/iio/accel/da311.c
index 94f827acdd1c..e1df7b009d89 100644
--- a/drivers/iio/accel/da311.c
+++ b/drivers/iio/accel/da311.c
@@ -269,7 +269,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(da311_pm_ops, da311_suspend, da311_resume);
static const struct i2c_device_id da311_i2c_id[] = {
{ "da311" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, da311_i2c_id);
diff --git a/drivers/iio/accel/dmard10.c b/drivers/iio/accel/dmard10.c
index 35c0eefb741e..71cd1928baa6 100644
--- a/drivers/iio/accel/dmard10.c
+++ b/drivers/iio/accel/dmard10.c
@@ -232,7 +232,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(dmard10_pm_ops, dmard10_suspend,
static const struct i2c_device_id dmard10_i2c_id[] = {
{ "dmard10" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, dmard10_i2c_id);
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index bf1d3923a181..12598feaa693 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -23,6 +23,7 @@
#include <linux/regulator/consumer.h>
#include <linux/regmap.h>
#include <linux/types.h>
+#include <linux/units.h>
#include <linux/iio/buffer.h>
#include <linux/iio/events.h>
@@ -439,8 +440,16 @@ static int fxls8962af_read_raw(struct iio_dev *indio_dev,
*val = FXLS8962AF_TEMP_CENTER_VAL;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = 0;
- return fxls8962af_read_full_scale(data, val2);
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = MILLIDEGREE_PER_DEGREE;
+ return IIO_VAL_INT;
+ case IIO_ACCEL:
+ *val = 0;
+ return fxls8962af_read_full_scale(data, val2);
+ default:
+ return -EINVAL;
+ }
case IIO_CHAN_INFO_SAMP_FREQ:
return fxls8962af_read_samp_freq(data, val, val2);
default:
@@ -736,9 +745,11 @@ static const struct iio_event_spec fxls8962af_event[] = {
.type = IIO_TEMP, \
.address = FXLS8962AF_TEMP_OUT, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_OFFSET),\
.scan_index = -1, \
.scan_type = { \
+ .sign = 's', \
.realbits = 8, \
.storagebits = 8, \
}, \
@@ -983,8 +994,8 @@ static int fxls8962af_fifo_flush(struct iio_dev *indio_dev)
sizeof(data->scan.channels[0]));
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- tstamp);
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan,
+ sizeof(data->scan), tstamp);
tstamp += sample_period;
}
diff --git a/drivers/iio/accel/fxls8962af-i2c.c b/drivers/iio/accel/fxls8962af-i2c.c
index 1b9156b6b2e3..106198a12474 100644
--- a/drivers/iio/accel/fxls8962af-i2c.c
+++ b/drivers/iio/accel/fxls8962af-i2c.c
@@ -32,14 +32,14 @@ static const struct i2c_device_id fxls8962af_id[] = {
{ "fxls8964af", fxls8964af },
{ "fxls8967af", fxls8967af },
{ "fxls8974cf", fxls8974cf },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, fxls8962af_id);
static const struct of_device_id fxls8962af_of_match[] = {
{ .compatible = "nxp,fxls8962af" },
{ .compatible = "nxp,fxls8964af" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, fxls8962af_of_match);
diff --git a/drivers/iio/accel/fxls8962af-spi.c b/drivers/iio/accel/fxls8962af-spi.c
index 46fc6e002714..bdafd1f615d9 100644
--- a/drivers/iio/accel/fxls8962af-spi.c
+++ b/drivers/iio/accel/fxls8962af-spi.c
@@ -30,14 +30,14 @@ static int fxls8962af_probe(struct spi_device *spi)
static const struct of_device_id fxls8962af_spi_of_match[] = {
{ .compatible = "nxp,fxls8962af" },
{ .compatible = "nxp,fxls8964af" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, fxls8962af_spi_of_match);
static const struct spi_device_id fxls8962af_spi_id_table[] = {
{ "fxls8962af", fxls8962af },
{ "fxls8964af", fxls8964af },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, fxls8962af_spi_id_table);
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 078fab2abb68..2ff591b3458f 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -228,7 +228,7 @@ static void hid_sensor_push_data(struct iio_dev *indio_dev, void *data,
int len, int64_t timestamp)
{
dev_dbg(&indio_dev->dev, "hid_sensor_push_data\n");
- iio_push_to_buffers_with_timestamp(indio_dev, data, timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, data, len, timestamp);
}
/* Callback handler to send event after all samples are received and captured */
@@ -440,7 +440,7 @@ static const struct platform_device_id hid_accel_3d_ids[] = {
{ /* gravity sensor */
.name = "HID-SENSOR-20007b",
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(platform, hid_accel_3d_ids);
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index f2496cad8ec2..910d7b5716e1 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -674,8 +674,8 @@ static int kxcjk1013_chip_update_thresholds(struct kxcjk1013_data *data)
return 0;
}
-static int kxcjk1013_setup_any_motion_interrupt(struct kxcjk1013_data *data,
- bool status)
+static int kxcjk1013_setup_interrupt(struct kxcjk1013_data *data,
+ bool status, bool is_new_data)
{
const struct kx_chipset_regs *regs = data->info->regs;
int ret;
@@ -690,69 +690,12 @@ static int kxcjk1013_setup_any_motion_interrupt(struct kxcjk1013_data *data,
if (ret < 0)
return ret;
- ret = kxcjk1013_chip_update_thresholds(data);
- if (ret < 0)
- return ret;
-
- ret = i2c_smbus_read_byte_data(data->client, regs->int_ctrl1);
- if (ret < 0) {
- dev_err(&data->client->dev, "Error reading reg_int_ctrl1\n");
- return ret;
- }
-
- if (status)
- ret |= KXCJK1013_REG_INT_CTRL1_BIT_IEN;
- else
- ret &= ~KXCJK1013_REG_INT_CTRL1_BIT_IEN;
-
- ret = i2c_smbus_write_byte_data(data->client, regs->int_ctrl1, ret);
- if (ret < 0) {
- dev_err(&data->client->dev, "Error writing reg_int_ctrl1\n");
- return ret;
- }
-
- ret = i2c_smbus_read_byte_data(data->client, regs->ctrl1);
- if (ret < 0) {
- dev_err(&data->client->dev, "Error reading reg_ctrl1\n");
- return ret;
- }
-
- if (status)
- ret |= KXCJK1013_REG_CTRL1_BIT_WUFE;
- else
- ret &= ~KXCJK1013_REG_CTRL1_BIT_WUFE;
-
- ret = i2c_smbus_write_byte_data(data->client, regs->ctrl1, ret);
- if (ret < 0) {
- dev_err(&data->client->dev, "Error writing reg_ctrl1\n");
- return ret;
- }
-
- if (store_mode == OPERATION) {
- ret = kxcjk1013_set_mode(data, OPERATION);
+ if (is_new_data == true) {
+ ret = kxcjk1013_chip_update_thresholds(data);
if (ret < 0)
return ret;
}
- return 0;
-}
-
-static int kxcjk1013_setup_new_data_interrupt(struct kxcjk1013_data *data,
- bool status)
-{
- const struct kx_chipset_regs *regs = data->info->regs;
- int ret;
- enum kxcjk1013_mode store_mode;
-
- ret = kxcjk1013_get_mode(data, &store_mode);
- if (ret < 0)
- return ret;
-
- /* This is requirement by spec to change state to STANDBY */
- ret = kxcjk1013_set_mode(data, STANDBY);
- if (ret < 0)
- return ret;
-
ret = i2c_smbus_read_byte_data(data->client, regs->int_ctrl1);
if (ret < 0) {
dev_err(&data->client->dev, "Error reading reg_int_ctrl1\n");
@@ -776,10 +719,17 @@ static int kxcjk1013_setup_new_data_interrupt(struct kxcjk1013_data *data,
return ret;
}
- if (status)
- ret |= KXCJK1013_REG_CTRL1_BIT_DRDY;
- else
- ret &= ~KXCJK1013_REG_CTRL1_BIT_DRDY;
+ if (is_new_data) {
+ if (status)
+ ret |= KXCJK1013_REG_CTRL1_BIT_DRDY;
+ else
+ ret &= ~KXCJK1013_REG_CTRL1_BIT_DRDY;
+ } else {
+ if (status)
+ ret |= KXCJK1013_REG_CTRL1_BIT_WUFE;
+ else
+ ret &= ~KXCJK1013_REG_CTRL1_BIT_WUFE;
+ }
ret = i2c_smbus_write_byte_data(data->client, regs->ctrl1, ret);
if (ret < 0) {
@@ -1112,7 +1062,7 @@ static int kxcjk1013_write_event_config(struct iio_dev *indio_dev,
return ret;
}
- ret = kxcjk1013_setup_any_motion_interrupt(data, state);
+ ret = kxcjk1013_setup_interrupt(data, state, false);
if (ret < 0) {
kxcjk1013_set_power_state(data, false);
data->ev_enable_state = 0;
@@ -1253,8 +1203,8 @@ static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
if (ret < 0)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- data->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ data->timestamp);
err:
iio_trigger_notify_done(indio_dev->trig);
@@ -1293,10 +1243,7 @@ static int kxcjk1013_data_rdy_trigger_set_state(struct iio_trigger *trig,
mutex_unlock(&data->mutex);
return ret;
}
- if (data->motion_trig == trig)
- ret = kxcjk1013_setup_any_motion_interrupt(data, state);
- else
- ret = kxcjk1013_setup_new_data_interrupt(data, state);
+ ret = kxcjk1013_setup_interrupt(data, state, data->motion_trig != trig);
if (ret < 0) {
kxcjk1013_set_power_state(data, false);
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c
index 3857d2edf250..1fa88b99149e 100644
--- a/drivers/iio/accel/kxsd9-i2c.c
+++ b/drivers/iio/accel/kxsd9-i2c.c
@@ -38,7 +38,7 @@ static void kxsd9_i2c_remove(struct i2c_client *client)
static const struct of_device_id kxsd9_of_match[] = {
{ .compatible = "kionix,kxsd9", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, kxsd9_of_match);
diff --git a/drivers/iio/accel/kxsd9-spi.c b/drivers/iio/accel/kxsd9-spi.c
index a05f4467d94a..cbb6c6412665 100644
--- a/drivers/iio/accel/kxsd9-spi.c
+++ b/drivers/iio/accel/kxsd9-spi.c
@@ -38,7 +38,7 @@ static void kxsd9_spi_remove(struct spi_device *spi)
static const struct spi_device_id kxsd9_spi_id[] = {
{"kxsd9", 0},
- { },
+ { }
};
MODULE_DEVICE_TABLE(spi, kxsd9_spi_id);
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 0ededf8cfdca..cfc31265cdd0 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -229,9 +229,8 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
goto out;
}
- iio_push_to_buffers_with_timestamp(indio_dev,
- &hw_values,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &hw_values, sizeof(hw_values),
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
@@ -273,7 +272,7 @@ kxsd9_get_mount_matrix(const struct iio_dev *indio_dev,
static const struct iio_chan_spec_ext_info kxsd9_ext_info[] = {
IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, kxsd9_get_mount_matrix),
- { },
+ { }
};
#define KXSD9_ACCEL_CHAN(axis, index) \
diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c
index 30746621052c..a2b5bdf14dde 100644
--- a/drivers/iio/accel/mma7455_core.c
+++ b/drivers/iio/accel/mma7455_core.c
@@ -103,8 +103,9 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p)
if (ret)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, &mma7455->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &mma7455->scan,
+ sizeof(mma7455->scan),
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index 2894aff80161..d0a16f227903 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -262,7 +262,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(mma7660_pm_ops, mma7660_suspend,
static const struct i2c_device_id mma7660_i2c_id[] = {
{ "mma7660" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, mma7660_i2c_id);
@@ -274,7 +274,7 @@ MODULE_DEVICE_TABLE(of, mma7660_of_match);
static const struct acpi_device_id mma7660_acpi_id[] = {
{"MMA7660", 0},
- {}
+ { }
};
MODULE_DEVICE_TABLE(acpi, mma7660_acpi_id);
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 05f5482f366e..aba444a980d9 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1103,8 +1103,9 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &data->buffer,
+ sizeof(data->buffer),
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
index 1b96687da01a..b89bad9e6fe6 100644
--- a/drivers/iio/accel/mma9551.c
+++ b/drivers/iio/accel/mma9551.c
@@ -578,14 +578,14 @@ static const struct dev_pm_ops mma9551_pm_ops = {
static const struct acpi_device_id mma9551_acpi_match[] = {
{"MMA9551", 0},
- {},
+ { }
};
MODULE_DEVICE_TABLE(acpi, mma9551_acpi_match);
static const struct i2c_device_id mma9551_id[] = {
{ "mma9551" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, mma9551_id);
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index 00e224efc8ed..1bbe660b254f 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -919,7 +919,7 @@ static const struct iio_enum mma9553_calibgender_enum = {
static const struct iio_chan_spec_ext_info mma9553_ext_info[] = {
IIO_ENUM("calibgender", IIO_SHARED_BY_TYPE, &mma9553_calibgender_enum),
IIO_ENUM_AVAILABLE("calibgender", IIO_SHARED_BY_TYPE, &mma9553_calibgender_enum),
- {},
+ { }
};
#define MMA9553_PEDOMETER_CHANNEL(_type, _mask) { \
@@ -1216,14 +1216,14 @@ static const struct dev_pm_ops mma9553_pm_ops = {
static const struct acpi_device_id mma9553_acpi_match[] = {
{"MMA9553", 0},
- {},
+ { }
};
MODULE_DEVICE_TABLE(acpi, mma9553_acpi_match);
static const struct i2c_device_id mma9553_id[] = {
{ "mma9553" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, mma9553_id);
diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c
index d31c11fbbe68..c31c53abc3d0 100644
--- a/drivers/iio/accel/msa311.c
+++ b/drivers/iio/accel/msa311.c
@@ -919,8 +919,8 @@ static irqreturn_t msa311_buffer_thread(int irq, void *p)
mutex_unlock(&msa311->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, &buf,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &buf, sizeof(buf),
+ iio_get_time_ns(indio_dev));
notify_done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index cb5c4e354fc0..1075c8ce0e37 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -335,8 +335,8 @@ static irqreturn_t mxc4005_trigger_handler(int irq, void *private)
if (ret < 0)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ pf->timestamp);
err:
iio_trigger_notify_done(indio_dev->trig);
@@ -573,14 +573,14 @@ static const struct acpi_device_id mxc4005_acpi_match[] = {
{"MXC4005", 0},
{"MXC6655", 0},
{"MDA6655", 0},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, mxc4005_acpi_match);
static const struct of_device_id mxc4005_of_match[] = {
{ .compatible = "memsic,mxc4005", },
{ .compatible = "memsic,mxc6655", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, mxc4005_of_match);
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index 3fb0f386c3db..aabe4491efd7 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -1541,7 +1541,7 @@ static const struct spi_device_id sca3000_id[] = {
{"sca3000_e02", e02},
{"sca3000_e04", e04},
{"sca3000_e05", e05},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, sca3000_id);
diff --git a/drivers/iio/accel/sca3300.c b/drivers/iio/accel/sca3300.c
index ca0ce83e42b2..67416a406e2f 100644
--- a/drivers/iio/accel/sca3300.c
+++ b/drivers/iio/accel/sca3300.c
@@ -58,15 +58,6 @@ enum sca3300_scan_indexes {
SCA3300_SCAN_MAX
};
-/*
- * Buffer size max case:
- * Three accel channels, two bytes per channel.
- * Temperature channel, two bytes.
- * Three incli channels, two bytes per channel.
- * Timestamp channel, eight bytes.
- */
-#define SCA3300_MAX_BUFFER_SIZE (ALIGN(sizeof(s16) * SCA3300_SCAN_MAX, sizeof(s64)) + sizeof(s64))
-
#define SCA3300_ACCEL_CHANNEL(index, reg, axis) { \
.type = IIO_ACCEL, \
.address = reg, \
@@ -193,9 +184,6 @@ struct sca3300_chip_info {
* @spi: SPI device structure
* @lock: Data buffer lock
* @chip: Sensor chip specific information
- * @buffer: Triggered buffer:
- * -SCA3300: 4 channel 16-bit data + 64-bit timestamp
- * -SCL3300: 7 channel 16-bit data + 64-bit timestamp
* @txbuf: Transmit buffer
* @rxbuf: Receive buffer
*/
@@ -203,7 +191,6 @@ struct sca3300_data {
struct spi_device *spi;
struct mutex lock;
const struct sca3300_chip_info *chip;
- u8 buffer[SCA3300_MAX_BUFFER_SIZE] __aligned(sizeof(s64));
u8 txbuf[4] __aligned(IIO_DMA_MINALIGN);
u8 rxbuf[4];
};
@@ -492,7 +479,7 @@ static irqreturn_t sca3300_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct sca3300_data *data = iio_priv(indio_dev);
int bit, ret, val, i = 0;
- s16 *channels = (s16 *)data->buffer;
+ IIO_DECLARE_BUFFER_WITH_TS(s16, channels, SCA3300_SCAN_MAX);
iio_for_each_active_channel(indio_dev, bit) {
ret = sca3300_read_reg(data, indio_dev->channels[bit].address, &val);
@@ -505,8 +492,8 @@ static irqreturn_t sca3300_trigger_handler(int irq, void *p)
channels[i++] = val;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, channels, sizeof(channels),
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
@@ -674,14 +661,14 @@ static int sca3300_probe(struct spi_device *spi)
static const struct of_device_id sca3300_dt_ids[] = {
{ .compatible = "murata,sca3300"},
{ .compatible = "murata,scl3300"},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, sca3300_dt_ids);
static const struct spi_device_id sca3300_ids[] = {
{ "sca3300" },
{ "scl3300" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, sca3300_ids);
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index ab4fdba75a0a..f24449500533 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -126,14 +126,14 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,iis328dq",
.data = IIS328DQ_ACCEL_DEV_NAME,
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
static const struct acpi_device_id st_accel_acpi_match[] = {
{"SMO8840", (kernel_ulong_t)LIS2DH12_ACCEL_DEV_NAME},
{"SMO8A90", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, st_accel_acpi_match);
@@ -164,7 +164,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LSM303C_ACCEL_DEV_NAME },
{ SC7A20_ACCEL_DEV_NAME },
{ IIS328DQ_ACCEL_DEV_NAME },
- {},
+ { }
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 6146754fe47f..d8ec0555f42a 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -108,7 +108,7 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,iis328dq",
.data = IIS328DQ_ACCEL_DEV_NAME,
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -167,7 +167,7 @@ static const struct spi_device_id st_accel_id_table[] = {
{ LIS302DL_ACCEL_DEV_NAME },
{ LSM303C_ACCEL_DEV_NAME },
{ IIS328DQ_ACCEL_DEV_NAME },
- {},
+ { }
};
MODULE_DEVICE_TABLE(spi, st_accel_id_table);
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index 471c154c3631..dfac2e44191f 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -460,8 +460,8 @@ static irqreturn_t stk8312_trigger_handler(int irq, void *p)
}
mutex_unlock(&data->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ pf->timestamp);
err:
iio_trigger_notify_done(indio_dev->trig);
@@ -635,7 +635,7 @@ static const struct i2c_device_id stk8312_i2c_id[] = {
/* Deprecated in favour of lowercase form */
{ "STK8312" },
{ "stk8312" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, stk8312_i2c_id);
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index cab592a68622..05d4fd540eb2 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -340,8 +340,8 @@ static irqreturn_t stk8ba50_trigger_handler(int irq, void *p)
data->scan.chans[i++] = ret;
}
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ pf->timestamp);
err:
mutex_unlock(&data->lock);
iio_trigger_notify_done(indio_dev->trig);
@@ -526,13 +526,13 @@ static DEFINE_SIMPLE_DEV_PM_OPS(stk8ba50_pm_ops, stk8ba50_suspend,
static const struct i2c_device_id stk8ba50_i2c_id[] = {
{ "stk8ba50" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, stk8ba50_i2c_id);
static const struct acpi_device_id stk8ba50_acpi_id[] = {
{"STK8BA50", 0},
- {}
+ { }
};
MODULE_DEVICE_TABLE(acpi, stk8ba50_acpi_id);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 6529df1a498c..ea3ba1397392 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -6,6 +6,9 @@
menu "Analog to digital converters"
+config IIO_ADC_HELPER
+ tristate
+
config AB8500_GPADC
bool "ST-Ericsson AB8500 GPADC driver"
depends on AB8500_CORE && REGULATOR_AB8500
@@ -25,10 +28,15 @@ config AD4000
tristate "Analog Devices AD4000 ADC Driver"
depends on SPI
select IIO_BUFFER
+ select IIO_BUFFER_DMAENGINE
select IIO_TRIGGERED_BUFFER
+ select SPI_OFFLOAD
help
Say yes here to build support for Analog Devices AD4000 high speed
- SPI analog to digital converters (ADC).
+ SPI analog to digital converters (ADC). If intended to use with
+ SPI offloading support, it is recommended to enable
+ CONFIG_SPI_AXI_SPI_ENGINE, CONFIG_PWM_AXI_PWMGEN, and
+ CONFIG_SPI_OFFLOAD_TRIGGER_PWM.
To compile this driver as a module, choose M here: the module will be
called ad4000.
@@ -129,8 +137,9 @@ config AD7173
tristate "Analog Devices AD7173 driver"
depends on SPI_MASTER
select AD_SIGMA_DELTA
- select GPIO_REGMAP if GPIOLIB
- select REGMAP_SPI if GPIOLIB
+ select GPIOLIB
+ select GPIO_REGMAP
+ select REGMAP_SPI
help
Say yes here to build support for Analog Devices AD7173 and similar ADC
Currently supported models:
@@ -281,6 +290,8 @@ config AD7606_IFACE_SPI
tristate "Analog Devices AD7606 ADC driver with spi interface support"
depends on SPI
select AD7606
+ select IIO_BUFFER_DMAENGINE
+ select SPI_OFFLOAD
help
Say yes here to build spi interface support for Analog Devices:
ad7605-4, ad7606, ad7606-6, ad7606-4 analog to digital converters (ADC).
@@ -319,6 +330,7 @@ config AD7766
config AD7768_1
tristate "Analog Devices AD7768-1 ADC driver"
depends on SPI
+ select REGMAP_SPI
select IIO_BUFFER
select IIO_TRIGGER
select IIO_TRIGGERED_BUFFER
@@ -1092,6 +1104,17 @@ config NAU7802
To compile this driver as a module, choose M here: the
module will be called nau7802.
+config NCT7201
+ tristate "Nuvoton Instruments NCT7201 and NCT7202 Power Monitor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the Nuvoton NCT7201 and
+ NCT7202 Voltage Monitor.
+
+ This driver can also be built as a module. If so, the module
+ will be called nct7201.
+
config NPCM_ADC
tristate "Nuvoton NPCM ADC driver"
depends on ARCH_NPCM || COMPILE_TEST
@@ -1232,6 +1255,18 @@ config RN5T618_ADC
This driver can also be built as a module. If so, the module
will be called rn5t618-adc.
+config ROHM_BD79124
+ tristate "Rohm BD79124 ADC driver"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_ADC_HELPER
+ help
+ Say yes here to build support for the ROHM BD79124 ADC. The
+ ROHM BD79124 is a 12-bit, 8-channel, SAR ADC. The ADC supports
+ also an automatic measurement mode, with an alarm interrupt for
+ out-of-window measurements. The window is configurable for each
+ channel.
+
config ROCKCHIP_SARADC
tristate "Rockchip SARADC driver"
depends on ARCH_ROCKCHIP || COMPILE_TEST
@@ -1263,6 +1298,7 @@ config RICHTEK_RTQ6056
config RZG2L_ADC
tristate "Renesas RZ/G2L ADC driver"
depends on ARCH_RZG2L || COMPILE_TEST
+ select IIO_ADC_HELPER
help
Say yes here to build support for the ADC found in Renesas
RZ/G2L family.
@@ -1397,6 +1433,7 @@ config SUN4I_GPADC
config SUN20I_GPADC
tristate "Allwinner D1/T113s/T507/R329 and similar GPADCs driver"
depends on ARCH_SUNXI || COMPILE_TEST
+ select IIO_ADC_HELPER
help
Say yes here to build support for Allwinner (D1, T113, T507 and R329)
SoCs GPADC. This ADC provides up to 16 channels.
@@ -1440,18 +1477,6 @@ config TI_ADC084S021
This driver can also be built as a module. If so, the module will be
called ti-adc084s021.
-config TI_ADC12138
- tristate "Texas Instruments ADC12130/ADC12132/ADC12138"
- depends on SPI
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
- help
- If you say yes here you get support for Texas Instruments ADC12130,
- ADC12132 and ADC12138 chips.
-
- This driver can also be built as a module. If so, the module will be
- called ti-adc12138.
-
config TI_ADC108S102
tristate "Texas Instruments ADC108S102 and ADC128S102 driver"
depends on SPI
@@ -1464,12 +1489,24 @@ config TI_ADC108S102
To compile this driver as a module, choose M here: the module will
be called ti-adc108s102.
+config TI_ADC12138
+ tristate "Texas Instruments ADC12130/ADC12132/ADC12138"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for Texas Instruments ADC12130,
+ ADC12132 and ADC12138 chips.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-adc12138.
+
config TI_ADC128S052
tristate "Texas Instruments ADC128S052/ADC122S021/ADC124S021"
depends on SPI
help
If you say yes here you get support for Texas Instruments ADC128S052,
- ADC122S021 and ADC124S021 chips.
+ ADC122S021, ADC124S021 and ROHM Semiconductor BD79104 chips.
This driver can also be built as a module. If so, the module will be
called ti-adc128s052.
@@ -1499,6 +1536,16 @@ config TI_ADS1015
This driver can also be built as a module. If so, the module will be
called ti-ads1015.
+config TI_ADS1100
+ tristate "Texas Instruments ADS1100 and ADS1000 ADC"
+ depends on I2C
+ help
+ If you say yes here you get support for Texas Instruments ADS1100 and
+ ADS1000 ADC chips.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-ads1100.
+
config TI_ADS1119
tristate "Texas Instruments ADS1119 ADC"
depends on I2C
@@ -1511,6 +1558,42 @@ config TI_ADS1119
This driver can also be built as a module. If so, the module will be
called ti-ads1119.
+config TI_ADS124S08
+ tristate "Texas Instruments ADS124S08"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for Texas Instruments ADS124S08
+ and ADS124S06 ADC chips
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-ads124s08.
+
+config TI_ADS1298
+ tristate "Texas Instruments ADS1298"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
+ help
+ If you say yes here you get support for Texas Instruments ADS1298
+ medical ADC chips
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-ads1298.
+
+config TI_ADS131E08
+ tristate "Texas Instruments ADS131E08"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to get support for Texas Instruments ADS131E04, ADS131E06
+ and ADS131E08 chips.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-ads131e08.
+
config TI_ADS7138
tristate "Texas Instruments ADS7128 and ADS7138 ADC driver"
depends on I2C
@@ -1532,27 +1615,6 @@ config TI_ADS7924
This driver can also be built as a module. If so, the module will be
called ti-ads7924.
-config TI_ADS1100
- tristate "Texas Instruments ADS1100 and ADS1000 ADC"
- depends on I2C
- help
- If you say yes here you get support for Texas Instruments ADS1100 and
- ADS1000 ADC chips.
-
- This driver can also be built as a module. If so, the module will be
- called ti-ads1100.
-
-config TI_ADS1298
- tristate "Texas Instruments ADS1298"
- depends on SPI
- select IIO_BUFFER
- help
- If you say yes here you get support for Texas Instruments ADS1298
- medical ADC chips
-
- This driver can also be built as a module. If so, the module will be
- called ti-ads1298.
-
config TI_ADS7950
tristate "Texas Instruments ADS7950 ADC driver"
depends on SPI && GPIOLIB
@@ -1588,30 +1650,6 @@ config TI_ADS8688
This driver can also be built as a module. If so, the module will be
called ti-ads8688.
-config TI_ADS124S08
- tristate "Texas Instruments ADS124S08"
- depends on SPI
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
- help
- If you say yes here you get support for Texas Instruments ADS124S08
- and ADS124S06 ADC chips
-
- This driver can also be built as a module. If so, the module will be
- called ti-ads124s08.
-
-config TI_ADS131E08
- tristate "Texas Instruments ADS131E08"
- depends on SPI
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
- help
- Say yes here to get support for Texas Instruments ADS131E04, ADS131E06
- and ADS131E08 chips.
-
- This driver can also be built as a module. If so, the module will be
- called ti-ads131e08.
-
config TI_AM335X_ADC
tristate "TI's AM335X ADC driver"
depends on MFD_TI_AM335X_TSCADC && HAS_DMA
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 3e918c3eec69..09ae6edb2650 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -3,6 +3,8 @@
# Makefile for IIO ADC drivers
#
+obj-$(CONFIG_IIO_ADC_HELPER) += industrialio-adc.o
+
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
@@ -97,6 +99,7 @@ obj-$(CONFIG_MESON_SARADC) += meson_saradc.o
obj-$(CONFIG_MP2629_ADC) += mp2629_adc.o
obj-$(CONFIG_MXS_LRADC_ADC) += mxs-lradc-adc.o
obj-$(CONFIG_NAU7802) += nau7802.o
+obj-$(CONFIG_NCT7201) += nct7201.o
obj-$(CONFIG_NPCM_ADC) += npcm_adc.o
obj-$(CONFIG_PAC1921) += pac1921.o
obj-$(CONFIG_PAC1934) += pac1934.o
@@ -110,6 +113,7 @@ obj-$(CONFIG_QCOM_VADC_COMMON) += qcom-vadc-common.o
obj-$(CONFIG_RCAR_GYRO_ADC) += rcar-gyroadc.o
obj-$(CONFIG_RICHTEK_RTQ6056) += rtq6056.o
obj-$(CONFIG_RN5T618_ADC) += rn5t618-adc.o
+obj-$(CONFIG_ROHM_BD79124) += rohm-bd79124.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_RZG2L_ADC) += rzg2l_adc.o
obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o
diff --git a/drivers/iio/adc/ad4000.c b/drivers/iio/adc/ad4000.c
index 4fe8dee48da9..5609a7845b6f 100644
--- a/drivers/iio/adc/ad4000.c
+++ b/drivers/iio/adc/ad4000.c
@@ -15,12 +15,14 @@
#include <linux/mod_devicetable.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
+#include <linux/spi/offload/consumer.h>
#include <linux/spi/spi.h>
#include <linux/units.h>
#include <linux/util_macros.h>
-#include <linux/iio/iio.h>
+#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/buffer-dmaengine.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
@@ -32,10 +34,11 @@
/* AD4000 Configuration Register programmable bits */
#define AD4000_CFG_SPAN_COMP BIT(3) /* Input span compression */
#define AD4000_CFG_HIGHZ BIT(2) /* High impedance mode */
+#define AD4000_CFG_TURBO BIT(1) /* Turbo mode */
#define AD4000_SCALE_OPTIONS 2
-#define __AD4000_DIFF_CHANNEL(_sign, _real_bits, _storage_bits, _reg_access) \
+#define __AD4000_DIFF_CHANNEL(_sign, _real_bits, _storage_bits, _reg_access, _offl)\
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
@@ -43,54 +46,65 @@
.channel = 0, \
.channel2 = 1, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_SCALE), \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ (_offl ? BIT(IIO_CHAN_INFO_SAMP_FREQ) : 0), \
.info_mask_separate_available = _reg_access ? BIT(IIO_CHAN_INFO_SCALE) : 0,\
.scan_index = 0, \
.scan_type = { \
.sign = _sign, \
.realbits = _real_bits, \
.storagebits = _storage_bits, \
- .shift = _storage_bits - _real_bits, \
- .endianness = IIO_BE, \
+ .shift = (_offl ? 0 : _storage_bits - _real_bits), \
+ .endianness = _offl ? IIO_CPU : IIO_BE \
}, \
}
-#define AD4000_DIFF_CHANNEL(_sign, _real_bits, _reg_access) \
+#define AD4000_DIFF_CHANNEL(_sign, _real_bits, _reg_access, _offl) \
__AD4000_DIFF_CHANNEL((_sign), (_real_bits), \
- ((_real_bits) > 16 ? 32 : 16), (_reg_access))
+ (((_offl) || ((_real_bits) > 16)) ? 32 : 16), \
+ (_reg_access), (_offl))
+/*
+ * When SPI offload is configured, transfers are executed without CPU
+ * intervention so no soft timestamp can be recorded when transfers run.
+ * Because of that, the macros that set timestamp channel are only used when
+ * transfers are not offloaded.
+ */
#define AD4000_DIFF_CHANNELS(_sign, _real_bits, _reg_access) \
{ \
- AD4000_DIFF_CHANNEL(_sign, _real_bits, _reg_access), \
+ AD4000_DIFF_CHANNEL(_sign, _real_bits, _reg_access, 0), \
IIO_CHAN_SOFT_TIMESTAMP(1), \
}
-#define __AD4000_PSEUDO_DIFF_CHANNEL(_sign, _real_bits, _storage_bits, _reg_access)\
+#define __AD4000_PSEUDO_DIFF_CHANNEL(_sign, _real_bits, _storage_bits, \
+ _reg_access, _offl) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = 0, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE) | \
- BIT(IIO_CHAN_INFO_OFFSET), \
+ BIT(IIO_CHAN_INFO_OFFSET) | \
+ (_offl ? BIT(IIO_CHAN_INFO_SAMP_FREQ) : 0), \
.info_mask_separate_available = _reg_access ? BIT(IIO_CHAN_INFO_SCALE) : 0,\
.scan_index = 0, \
.scan_type = { \
.sign = _sign, \
.realbits = _real_bits, \
.storagebits = _storage_bits, \
- .shift = _storage_bits - _real_bits, \
- .endianness = IIO_BE, \
+ .shift = (_offl ? 0 : _storage_bits - _real_bits), \
+ .endianness = _offl ? IIO_CPU : IIO_BE \
}, \
}
-#define AD4000_PSEUDO_DIFF_CHANNEL(_sign, _real_bits, _reg_access) \
+#define AD4000_PSEUDO_DIFF_CHANNEL(_sign, _real_bits, _reg_access, _offl) \
__AD4000_PSEUDO_DIFF_CHANNEL((_sign), (_real_bits), \
- ((_real_bits) > 16 ? 32 : 16), (_reg_access))
+ (((_offl) || ((_real_bits) > 16)) ? 32 : 16),\
+ (_reg_access), (_offl))
#define AD4000_PSEUDO_DIFF_CHANNELS(_sign, _real_bits, _reg_access) \
{ \
- AD4000_PSEUDO_DIFF_CHANNEL(_sign, _real_bits, _reg_access), \
+ AD4000_PSEUDO_DIFF_CHANNEL(_sign, _real_bits, _reg_access, 0), \
IIO_CHAN_SOFT_TIMESTAMP(1), \
}
@@ -184,212 +198,298 @@ struct ad4000_chip_info {
const char *dev_name;
struct iio_chan_spec chan_spec[2];
struct iio_chan_spec reg_access_chan_spec[2];
+ struct iio_chan_spec offload_chan_spec;
+ struct iio_chan_spec reg_access_offload_chan_spec;
const struct ad4000_time_spec *time_spec;
bool has_hardware_gain;
+ int max_rate_hz;
};
static const struct ad4000_chip_info ad4000_chip_info = {
.dev_name = "ad4000",
.chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
.reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 1),
+ .offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0, 1),
+ .reg_access_offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1, 1),
.time_spec = &ad4000_t_spec,
+ .max_rate_hz = 2 * MEGA,
};
static const struct ad4000_chip_info ad4001_chip_info = {
.dev_name = "ad4001",
.chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
.reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 16, 1),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0, 1),
+ .reg_access_offload_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1, 1),
.time_spec = &ad4000_t_spec,
+ .max_rate_hz = 2 * MEGA,
};
static const struct ad4000_chip_info ad4002_chip_info = {
.dev_name = "ad4002",
.chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 0),
.reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 1),
+ .offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0, 1),
+ .reg_access_offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1, 1),
.time_spec = &ad4000_t_spec,
+ .max_rate_hz = 2 * MEGA,
};
static const struct ad4000_chip_info ad4003_chip_info = {
.dev_name = "ad4003",
.chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
.reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 18, 1),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0, 1),
+ .reg_access_offload_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1, 1),
.time_spec = &ad4000_t_spec,
+ .max_rate_hz = 2 * MEGA,
};
static const struct ad4000_chip_info ad4004_chip_info = {
.dev_name = "ad4004",
.chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
.reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 1),
+ .offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0, 1),
+ .reg_access_offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1, 1),
.time_spec = &ad4000_t_spec,
+ .max_rate_hz = 1 * MEGA,
};
static const struct ad4000_chip_info ad4005_chip_info = {
.dev_name = "ad4005",
.chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
.reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 16, 1),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0, 1),
+ .reg_access_offload_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1, 1),
.time_spec = &ad4000_t_spec,
+ .max_rate_hz = 1 * MEGA,
};
static const struct ad4000_chip_info ad4006_chip_info = {
.dev_name = "ad4006",
.chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 0),
.reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 1),
+ .offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0, 1),
+ .reg_access_offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1, 1),
.time_spec = &ad4000_t_spec,
+ .max_rate_hz = 1 * MEGA,
};
static const struct ad4000_chip_info ad4007_chip_info = {
.dev_name = "ad4007",
.chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
.reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 18, 1),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0, 1),
+ .reg_access_offload_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1, 1),
.time_spec = &ad4000_t_spec,
+ .max_rate_hz = 1 * MEGA,
};
static const struct ad4000_chip_info ad4008_chip_info = {
.dev_name = "ad4008",
.chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
.reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 1),
+ .offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0, 1),
+ .reg_access_offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 1, 1),
.time_spec = &ad4000_t_spec,
+ .max_rate_hz = 500 * KILO,
};
static const struct ad4000_chip_info ad4010_chip_info = {
.dev_name = "ad4010",
.chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 0),
.reg_access_chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 18, 1),
+ .offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 0, 1),
+ .reg_access_offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 18, 1, 1),
.time_spec = &ad4000_t_spec,
+ .max_rate_hz = 500 * KILO,
};
static const struct ad4000_chip_info ad4011_chip_info = {
.dev_name = "ad4011",
.chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
.reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 18, 1),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0, 1),
+ .reg_access_offload_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1, 1),
.time_spec = &ad4000_t_spec,
+ .max_rate_hz = 500 * KILO,
};
static const struct ad4000_chip_info ad4020_chip_info = {
.dev_name = "ad4020",
.chan_spec = AD4000_DIFF_CHANNELS('s', 20, 0),
.reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 20, 1),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0, 1),
+ .reg_access_offload_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1, 1),
.time_spec = &ad4020_t_spec,
+ .max_rate_hz = 1800 * KILO,
};
static const struct ad4000_chip_info ad4021_chip_info = {
.dev_name = "ad4021",
.chan_spec = AD4000_DIFF_CHANNELS('s', 20, 0),
.reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 20, 1),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0, 1),
+ .reg_access_offload_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1, 1),
.time_spec = &ad4020_t_spec,
+ .max_rate_hz = 1 * MEGA,
};
static const struct ad4000_chip_info ad4022_chip_info = {
.dev_name = "ad4022",
.chan_spec = AD4000_DIFF_CHANNELS('s', 20, 0),
.reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 20, 1),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 0, 1),
+ .reg_access_offload_chan_spec = AD4000_DIFF_CHANNEL('s', 20, 1, 1),
.time_spec = &ad4020_t_spec,
+ .max_rate_hz = 500 * KILO,
};
static const struct ad4000_chip_info adaq4001_chip_info = {
.dev_name = "adaq4001",
.chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
.reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 16, 1),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0, 1),
+ .reg_access_offload_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 1, 1),
.time_spec = &ad4000_t_spec,
.has_hardware_gain = true,
+ .max_rate_hz = 2 * MEGA,
};
static const struct ad4000_chip_info adaq4003_chip_info = {
.dev_name = "adaq4003",
.chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
.reg_access_chan_spec = AD4000_DIFF_CHANNELS('s', 18, 1),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0, 1),
+ .reg_access_offload_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 1, 1),
.time_spec = &ad4000_t_spec,
.has_hardware_gain = true,
+ .max_rate_hz = 2 * MEGA,
};
static const struct ad4000_chip_info ad7685_chip_info = {
.dev_name = "ad7685",
.chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0, 1),
.time_spec = &ad7687_t_spec,
+ .max_rate_hz = 250 * KILO,
};
static const struct ad4000_chip_info ad7686_chip_info = {
.dev_name = "ad7686",
.chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0, 1),
.time_spec = &ad7686_t_spec,
+ .max_rate_hz = 500 * KILO,
};
static const struct ad4000_chip_info ad7687_chip_info = {
.dev_name = "ad7687",
.chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0, 1),
.time_spec = &ad7687_t_spec,
+ .max_rate_hz = 250 * KILO,
};
static const struct ad4000_chip_info ad7688_chip_info = {
.dev_name = "ad7688",
.chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0, 1),
.time_spec = &ad7686_t_spec,
+ .max_rate_hz = 500 * KILO,
};
static const struct ad4000_chip_info ad7690_chip_info = {
.dev_name = "ad7690",
.chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0, 1),
.time_spec = &ad7690_t_spec,
+ .max_rate_hz = 400 * KILO,
};
static const struct ad4000_chip_info ad7691_chip_info = {
.dev_name = "ad7691",
.chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0, 1),
.time_spec = &ad7691_t_spec,
+ .max_rate_hz = 250 * KILO,
};
static const struct ad4000_chip_info ad7693_chip_info = {
.dev_name = "ad7693",
.chan_spec = AD4000_DIFF_CHANNELS('s', 16, 0),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 16, 0, 1),
.time_spec = &ad7686_t_spec,
+ .max_rate_hz = 500 * KILO,
};
static const struct ad4000_chip_info ad7942_chip_info = {
.dev_name = "ad7942",
.chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 14, 0),
+ .offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 14, 0, 1),
.time_spec = &ad7687_t_spec,
+ .max_rate_hz = 250 * KILO,
};
static const struct ad4000_chip_info ad7946_chip_info = {
.dev_name = "ad7946",
.chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 14, 0),
+ .offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 14, 0, 1),
.time_spec = &ad7686_t_spec,
+ .max_rate_hz = 500 * KILO,
};
static const struct ad4000_chip_info ad7980_chip_info = {
.dev_name = "ad7980",
.chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0, 1),
.time_spec = &ad7980_t_spec,
+ .max_rate_hz = 1 * MEGA,
};
static const struct ad4000_chip_info ad7982_chip_info = {
.dev_name = "ad7982",
.chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0, 1),
.time_spec = &ad7980_t_spec,
+ .max_rate_hz = 1 * MEGA,
};
static const struct ad4000_chip_info ad7983_chip_info = {
.dev_name = "ad7983",
.chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0, 1),
.time_spec = &ad7983_t_spec,
+ .max_rate_hz = 1 * MEGA + 333 * KILO + 333,
};
static const struct ad4000_chip_info ad7984_chip_info = {
.dev_name = "ad7984",
.chan_spec = AD4000_DIFF_CHANNELS('s', 18, 0),
+ .offload_chan_spec = AD4000_DIFF_CHANNEL('s', 18, 0, 1),
.time_spec = &ad7983_t_spec,
+ .max_rate_hz = 1 * MEGA + 333 * KILO + 333,
};
static const struct ad4000_chip_info ad7988_1_chip_info = {
.dev_name = "ad7988-1",
.chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0, 1),
.time_spec = &ad7988_1_t_spec,
+ .max_rate_hz = 100 * KILO,
};
static const struct ad4000_chip_info ad7988_5_chip_info = {
.dev_name = "ad7988-5",
.chan_spec = AD4000_PSEUDO_DIFF_CHANNELS('u', 16, 0),
+ .offload_chan_spec = AD4000_PSEUDO_DIFF_CHANNEL('u', 16, 0, 1),
.time_spec = &ad7686_t_spec,
+ .max_rate_hz = 500 * KILO,
+};
+
+static const struct spi_offload_config ad4000_offload_config = {
+ .capability_flags = SPI_OFFLOAD_CAP_TRIGGER |
+ SPI_OFFLOAD_CAP_RX_STREAM_DMA,
};
struct ad4000_state {
@@ -397,6 +497,13 @@ struct ad4000_state {
struct gpio_desc *cnv_gpio;
struct spi_transfer xfers[2];
struct spi_message msg;
+ struct spi_transfer offload_xfer;
+ struct spi_message offload_msg;
+ struct spi_offload *offload;
+ struct spi_offload_trigger *offload_trigger;
+ bool using_offload;
+ unsigned long offload_trigger_hz;
+ int max_rate_hz;
struct mutex lock; /* Protect read modify write cycle */
int vref_mv;
enum ad4000_sdi sdi_pin;
@@ -411,8 +518,10 @@ struct ad4000_state {
*/
struct {
union {
- __be16 sample_buf16;
- __be32 sample_buf32;
+ __be16 sample_buf16_be;
+ __be32 sample_buf32_be;
+ u16 sample_buf16;
+ u32 sample_buf32;
} data;
aligned_s64 timestamp;
} scan __aligned(IIO_DMA_MINALIGN);
@@ -487,6 +596,25 @@ static int ad4000_read_reg(struct ad4000_state *st, unsigned int *val)
return ret;
}
+static int ad4000_set_sampling_freq(struct ad4000_state *st, int freq)
+{
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
+ .periodic = {
+ .frequency_hz = freq,
+ },
+ };
+ int ret;
+
+ ret = spi_offload_trigger_validate(st->offload_trigger, &config);
+ if (ret)
+ return ret;
+
+ st->offload_trigger_hz = config.periodic.frequency_hz;
+
+ return 0;
+}
+
static int ad4000_convert_and_acquire(struct ad4000_state *st)
{
int ret;
@@ -515,10 +643,17 @@ static int ad4000_single_conversion(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- if (chan->scan_type.storagebits > 16)
- sample = be32_to_cpu(st->scan.data.sample_buf32);
- else
- sample = be16_to_cpu(st->scan.data.sample_buf16);
+ if (chan->scan_type.endianness == IIO_BE) {
+ if (chan->scan_type.realbits > 16)
+ sample = be32_to_cpu(st->scan.data.sample_buf32_be);
+ else
+ sample = be16_to_cpu(st->scan.data.sample_buf16_be);
+ } else {
+ if (chan->scan_type.realbits > 16)
+ sample = st->scan.data.sample_buf32;
+ else
+ sample = st->scan.data.sample_buf16;
+ }
sample >>= chan->scan_type.shift;
@@ -555,6 +690,9 @@ static int ad4000_read_raw(struct iio_dev *indio_dev,
*val = mult_frac(st->vref_mv, 1, 10);
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->offload_trigger_hz;
+ return IIO_VAL_INT;
default:
return -EINVAL;
}
@@ -620,6 +758,7 @@ static int ad4000_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
+ struct ad4000_state *st = iio_priv(indio_dev);
int ret;
switch (mask) {
@@ -629,6 +768,15 @@ static int ad4000_write_raw(struct iio_dev *indio_dev,
ret = __ad4000_write_raw(indio_dev, chan, val2);
iio_device_release_direct(indio_dev);
return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val < 1 || val > st->max_rate_hz)
+ return -EINVAL;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = ad4000_set_sampling_freq(st, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
default:
return -EINVAL;
}
@@ -645,7 +793,8 @@ static irqreturn_t ad4000_trigger_handler(int irq, void *p)
if (ret < 0)
goto err_out;
- iio_push_to_buffers_with_timestamp(indio_dev, &st->scan, pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &st->scan, sizeof(st->scan),
+ pf->timestamp);
err_out:
iio_trigger_notify_done(indio_dev->trig);
@@ -659,10 +808,114 @@ static const struct iio_info ad4000_reg_access_info = {
.write_raw_get_fmt = &ad4000_write_raw_get_fmt,
};
+static const struct iio_info ad4000_offload_info = {
+ .read_raw = &ad4000_read_raw,
+ .write_raw = &ad4000_write_raw,
+ .write_raw_get_fmt = &ad4000_write_raw_get_fmt,
+};
+
static const struct iio_info ad4000_info = {
.read_raw = &ad4000_read_raw,
};
+static int ad4000_offload_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ad4000_state *st = iio_priv(indio_dev);
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
+ .periodic = {
+ .frequency_hz = st->offload_trigger_hz,
+ },
+ };
+
+ return spi_offload_trigger_enable(st->offload, st->offload_trigger,
+ &config);
+}
+
+static int ad4000_offload_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct ad4000_state *st = iio_priv(indio_dev);
+
+ spi_offload_trigger_disable(st->offload, st->offload_trigger);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops ad4000_offload_buffer_setup_ops = {
+ .postenable = &ad4000_offload_buffer_postenable,
+ .predisable = &ad4000_offload_buffer_predisable,
+};
+
+static int ad4000_spi_offload_setup(struct iio_dev *indio_dev,
+ struct ad4000_state *st)
+{
+ struct spi_device *spi = st->spi;
+ struct device *dev = &spi->dev;
+ struct dma_chan *rx_dma;
+ int ret;
+
+ st->offload_trigger = devm_spi_offload_trigger_get(dev, st->offload,
+ SPI_OFFLOAD_TRIGGER_PERIODIC);
+ if (IS_ERR(st->offload_trigger))
+ return dev_err_probe(dev, PTR_ERR(st->offload_trigger),
+ "Failed to get offload trigger\n");
+
+ ret = ad4000_set_sampling_freq(st, st->max_rate_hz);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to set sampling frequency\n");
+
+ rx_dma = devm_spi_offload_rx_stream_request_dma_chan(dev, st->offload);
+ if (IS_ERR(rx_dma))
+ return dev_err_probe(dev, PTR_ERR(rx_dma),
+ "Failed to get offload RX DMA\n");
+
+ ret = devm_iio_dmaengine_buffer_setup_with_handle(dev, indio_dev, rx_dma,
+ IIO_BUFFER_DIRECTION_IN);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to setup DMA buffer\n");
+
+ return 0;
+}
+
+/*
+ * This executes a data sample transfer when using SPI offloading. The device
+ * connections should be in "3-wire" mode, selected either when the adi,sdi-pin
+ * device tree property is absent or set to "high". Also, the ADC CNV pin must
+ * be connected to a SPI controller CS (it can't be connected to a GPIO).
+ *
+ * In order to achieve the maximum sample rate, we only do one transfer per
+ * SPI offload trigger. Because the ADC output has a one sample latency (delay)
+ * when the device is wired in "3-wire" mode and only one transfer per sample is
+ * being made in turbo mode, the first data sample is not valid because it
+ * contains the output of an earlier conversion result. We also set transfer
+ * `bits_per_word` to achieve higher throughput by using the minimum number of
+ * SCLK cycles. Also, a delay is added to make sure we meet the minimum quiet
+ * time before releasing the CS line.
+ *
+ * Note that, with `bits_per_word` set to the number of ADC precision bits,
+ * transfers use larger word sizes that get stored in 'in-memory wordsizes' that
+ * are always in native CPU byte order. Because of that, IIO buffer elements
+ * ought to be read in CPU endianness which requires setting IIO scan_type
+ * endianness accordingly (i.e. IIO_CPU).
+ */
+static int ad4000_prepare_offload_message(struct ad4000_state *st,
+ const struct iio_chan_spec *chan)
+{
+ struct spi_transfer *xfer = &st->offload_xfer;
+
+ xfer->bits_per_word = chan->scan_type.realbits;
+ xfer->len = chan->scan_type.realbits > 16 ? 4 : 2;
+ xfer->delay.value = st->time_spec->t_quiet2_ns;
+ xfer->delay.unit = SPI_DELAY_UNIT_NSECS;
+ xfer->offload_flags = SPI_OFFLOAD_XFER_RX_STREAM;
+
+ spi_message_init_with_transfers(&st->offload_msg, xfer, 1);
+ st->offload_msg.offload = st->offload;
+
+ return devm_spi_optimize_message(&st->spi->dev, st->spi, &st->offload_msg);
+}
+
/*
* This executes a data sample transfer for when the device connections are
* in "3-wire" mode, selected when the adi,sdi-pin device tree property is
@@ -689,7 +942,16 @@ static int ad4000_prepare_3wire_mode_message(struct ad4000_state *st,
xfers[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
xfers[1].rx_buf = &st->scan.data;
- xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits);
+ xfers[1].len = chan->scan_type.realbits > 16 ? 4 : 2;
+
+ /*
+ * If the device is set up for SPI offloading, IIO channel scan_type is
+ * set to IIO_CPU. When that is the case, use larger SPI word sizes for
+ * single-shot reads too. Thus, sample data can be correctly handled in
+ * ad4000_single_conversion() according to scan_type endianness.
+ */
+ if (chan->scan_type.endianness != IIO_BE)
+ xfers[1].bits_per_word = chan->scan_type.realbits;
xfers[1].delay.value = st->time_spec->t_quiet2_ns;
xfers[1].delay.unit = SPI_DELAY_UNIT_NSECS;
@@ -733,6 +995,9 @@ static int ad4000_config(struct ad4000_state *st)
if (device_property_present(&st->spi->dev, "adi,high-z-input"))
reg_val |= FIELD_PREP(AD4000_CFG_HIGHZ, 1);
+ if (st->using_offload)
+ reg_val |= FIELD_PREP(AD4000_CFG_TURBO, 1);
+
return ad4000_write_reg(st, reg_val);
}
@@ -755,6 +1020,7 @@ static int ad4000_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
st->spi = spi;
st->time_spec = chip->time_spec;
+ st->max_rate_hz = chip->max_rate_hz;
ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(ad4000_power_supplies),
ad4000_power_supplies);
@@ -772,6 +1038,26 @@ static int ad4000_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(st->cnv_gpio),
"Failed to get CNV GPIO");
+ st->offload = devm_spi_offload_get(dev, spi, &ad4000_offload_config);
+ ret = PTR_ERR_OR_ZERO(st->offload);
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "Failed to get offload\n");
+
+ st->using_offload = !IS_ERR(st->offload);
+ if (st->using_offload) {
+ indio_dev->setup_ops = &ad4000_offload_buffer_setup_ops;
+ ret = ad4000_spi_offload_setup(indio_dev, st);
+ if (ret)
+ return ret;
+ } else {
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &ad4000_trigger_handler,
+ NULL);
+ if (ret)
+ return ret;
+ }
+
ret = device_property_match_property_string(dev, "adi,sdi-pin",
ad4000_sdi_pin,
ARRAY_SIZE(ad4000_sdi_pin));
@@ -784,7 +1070,6 @@ static int ad4000_probe(struct spi_device *spi)
switch (st->sdi_pin) {
case AD4000_SDI_MOSI:
indio_dev->info = &ad4000_reg_access_info;
- indio_dev->channels = chip->reg_access_chan_spec;
/*
* In "3-wire mode", the ADC SDI line must be kept high when
@@ -796,9 +1081,26 @@ static int ad4000_probe(struct spi_device *spi)
if (ret < 0)
return ret;
+ if (st->using_offload) {
+ indio_dev->channels = &chip->reg_access_offload_chan_spec;
+ indio_dev->num_channels = 1;
+ ret = ad4000_prepare_offload_message(st, indio_dev->channels);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to optimize SPI msg\n");
+ } else {
+ indio_dev->channels = chip->reg_access_chan_spec;
+ indio_dev->num_channels = ARRAY_SIZE(chip->reg_access_chan_spec);
+ }
+
+ /*
+ * Call ad4000_prepare_3wire_mode_message() so single-shot read
+ * SPI messages are always initialized.
+ */
ret = ad4000_prepare_3wire_mode_message(st, &indio_dev->channels[0]);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret,
+ "Failed to optimize SPI msg\n");
ret = ad4000_config(st);
if (ret < 0)
@@ -806,19 +1108,38 @@ static int ad4000_probe(struct spi_device *spi)
break;
case AD4000_SDI_VIO:
- indio_dev->info = &ad4000_info;
- indio_dev->channels = chip->chan_spec;
+ if (st->using_offload) {
+ indio_dev->info = &ad4000_offload_info;
+ indio_dev->channels = &chip->offload_chan_spec;
+ indio_dev->num_channels = 1;
+
+ ret = ad4000_prepare_offload_message(st, indio_dev->channels);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to optimize SPI msg\n");
+ } else {
+ indio_dev->info = &ad4000_info;
+ indio_dev->channels = chip->chan_spec;
+ indio_dev->num_channels = ARRAY_SIZE(chip->chan_spec);
+ }
+
ret = ad4000_prepare_3wire_mode_message(st, &indio_dev->channels[0]);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret,
+ "Failed to optimize SPI msg\n");
break;
case AD4000_SDI_CS:
+ if (st->using_offload)
+ return dev_err_probe(dev, -EPROTONOSUPPORT,
+ "Unsupported sdi-pin + offload config\n");
indio_dev->info = &ad4000_info;
indio_dev->channels = chip->chan_spec;
+ indio_dev->num_channels = ARRAY_SIZE(chip->chan_spec);
ret = ad4000_prepare_4wire_mode_message(st, &indio_dev->channels[0]);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret,
+ "Failed to optimize SPI msg\n");
break;
case AD4000_SDI_GND:
@@ -830,7 +1151,6 @@ static int ad4000_probe(struct spi_device *spi)
}
indio_dev->name = chip->dev_name;
- indio_dev->num_channels = 2;
ret = devm_mutex_init(dev, &st->lock);
if (ret)
@@ -853,12 +1173,6 @@ static int ad4000_probe(struct spi_device *spi)
ad4000_fill_scale_tbl(st, &indio_dev->channels[0]);
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
- &iio_pollfunc_store_time,
- &ad4000_trigger_handler, NULL);
- if (ret)
- return ret;
-
return devm_iio_device_register(dev, indio_dev);
}
@@ -947,3 +1261,4 @@ module_spi_driver(ad4000_driver);
MODULE_AUTHOR("Marcelo Schmitt <marcelo.schmitt@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD4000 ADC driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_DMAENGINE_BUFFER");
diff --git a/drivers/iio/adc/ad4030.c b/drivers/iio/adc/ad4030.c
index 9a020680885d..1bc2f9a22470 100644
--- a/drivers/iio/adc/ad4030.c
+++ b/drivers/iio/adc/ad4030.c
@@ -147,7 +147,6 @@ struct ad4030_state {
struct spi_device *spi;
struct regmap *regmap;
const struct ad4030_chip_info *chip;
- const struct iio_scan_type *current_scan_type;
struct gpio_desc *cnv_gpio;
int vref_uv;
int vio_uv;
@@ -245,7 +244,6 @@ static int ad4030_enter_config_mode(struct ad4030_state *st)
struct spi_transfer xfer = {
.tx_buf = st->tx_data,
- .bits_per_word = 8,
.len = 1,
.speed_hz = AD4030_SPI_MAX_REG_XFER_SPEED,
};
@@ -261,7 +259,6 @@ static int ad4030_exit_config_mode(struct ad4030_state *st)
struct spi_transfer xfer = {
.tx_buf = st->tx_data,
- .bits_per_word = 8,
.len = 3,
.speed_hz = AD4030_SPI_MAX_REG_XFER_SPEED,
};
@@ -277,7 +274,6 @@ static int ad4030_spi_read(void *context, const void *reg, size_t reg_size,
struct spi_transfer xfer = {
.tx_buf = st->tx_data,
.rx_buf = st->rx_data.raw,
- .bits_per_word = 8,
.len = reg_size + val_size,
.speed_hz = AD4030_SPI_MAX_REG_XFER_SPEED,
};
@@ -312,7 +308,6 @@ static int ad4030_spi_write(void *context, const void *data, size_t count)
((u8 *)data)[2] == 0x81;
struct spi_transfer xfer = {
.tx_buf = st->tx_data,
- .bits_per_word = 8,
.len = count,
.speed_hz = AD4030_SPI_MAX_REG_XFER_SPEED,
};
@@ -390,16 +385,17 @@ static int ad4030_get_chan_scale(struct iio_dev *indio_dev,
struct ad4030_state *st = iio_priv(indio_dev);
const struct iio_scan_type *scan_type;
- if (chan->differential) {
- scan_type = iio_get_current_scan_type(indio_dev,
- st->chip->channels);
+ scan_type = iio_get_current_scan_type(indio_dev, st->chip->channels);
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
+ if (chan->differential)
*val = (st->vref_uv * 2) / MILLI;
- *val2 = scan_type->realbits;
- return IIO_VAL_FRACTIONAL_LOG2;
- }
+ else
+ *val = st->vref_uv / MILLI;
+
+ *val2 = scan_type->realbits;
- *val = st->vref_uv / MILLI;
- *val2 = chan->scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
}
@@ -561,11 +557,6 @@ static int ad4030_set_mode(struct iio_dev *indio_dev, unsigned long mask)
st->mode = AD4030_OUT_DATA_MD_DIFF;
}
- st->current_scan_type = iio_get_current_scan_type(indio_dev,
- st->chip->channels);
- if (IS_ERR(st->current_scan_type))
- return PTR_ERR(st->current_scan_type);
-
return regmap_update_bits(st->regmap, AD4030_REG_MODES,
AD4030_REG_MODES_MASK_OUT_DATA_MODE,
st->mode);
@@ -613,15 +604,20 @@ static void ad4030_extract_interleaved(u8 *src, u32 *ch0, u32 *ch1)
static int ad4030_conversion(struct iio_dev *indio_dev)
{
struct ad4030_state *st = iio_priv(indio_dev);
- unsigned char diff_realbytes =
- BITS_TO_BYTES(st->current_scan_type->realbits);
- unsigned char diff_storagebytes =
- BITS_TO_BYTES(st->current_scan_type->storagebits);
+ const struct iio_scan_type *scan_type;
+ unsigned char diff_realbytes, diff_storagebytes;
unsigned int bytes_to_read;
unsigned long cnv_nb = BIT(st->avg_log2);
unsigned int i;
int ret;
+ scan_type = iio_get_current_scan_type(indio_dev, st->chip->channels);
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
+ diff_realbytes = BITS_TO_BYTES(scan_type->realbits);
+ diff_storagebytes = BITS_TO_BYTES(scan_type->storagebits);
+
/* Number of bytes for one differential channel */
bytes_to_read = diff_realbytes;
/* Add one byte if we are using a differential + common byte mode */
@@ -646,6 +642,12 @@ static int ad4030_conversion(struct iio_dev *indio_dev)
&st->rx_data.dual.diff[0],
&st->rx_data.dual.diff[1]);
+ /*
+ * If no common mode voltage channel is enabled, we can use the raw
+ * data as is. Otherwise, we need to rearrange the data a bit to match
+ * the natural alignment of the IIO buffer.
+ */
+
if (st->mode != AD4030_OUT_DATA_MD_16_DIFF_8_COM &&
st->mode != AD4030_OUT_DATA_MD_24_DIFF_8_COM)
return 0;
@@ -672,11 +674,6 @@ static int ad4030_single_conversion(struct iio_dev *indio_dev,
if (ret)
return ret;
- st->current_scan_type = iio_get_current_scan_type(indio_dev,
- st->chip->channels);
- if (IS_ERR(st->current_scan_type))
- return PTR_ERR(st->current_scan_type);
-
ret = ad4030_conversion(indio_dev);
if (ret)
return ret;
@@ -706,8 +703,8 @@ static irqreturn_t ad4030_trigger_handler(int irq, void *p)
if (ret)
goto out;
- iio_push_to_buffers_with_timestamp(indio_dev, st->rx_data.raw,
- pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &st->rx_data, sizeof(st->rx_data),
+ pf->timestamp);
out:
iio_trigger_notify_done(indio_dev->trig);
@@ -867,6 +864,12 @@ static int ad4030_get_current_scan_type(const struct iio_dev *indio_dev,
return st->avg_log2 ? AD4030_SCAN_TYPE_AVG : AD4030_SCAN_TYPE_NORMAL;
}
+static int ad4030_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ return ad4030_set_mode(indio_dev, *scan_mask);
+}
+
static const struct iio_info ad4030_iio_info = {
.read_avail = ad4030_read_avail,
.read_raw = ad4030_read_raw,
@@ -874,13 +877,9 @@ static const struct iio_info ad4030_iio_info = {
.debugfs_reg_access = ad4030_reg_access,
.read_label = ad4030_read_label,
.get_current_scan_type = ad4030_get_current_scan_type,
+ .update_scan_mode = ad4030_update_scan_mode,
};
-static int ad4030_buffer_preenable(struct iio_dev *indio_dev)
-{
- return ad4030_set_mode(indio_dev, *indio_dev->active_scan_mask);
-}
-
static bool ad4030_validate_scan_mask(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
@@ -894,7 +893,6 @@ static bool ad4030_validate_scan_mask(struct iio_dev *indio_dev,
}
static const struct iio_buffer_setup_ops ad4030_buffer_setup_ops = {
- .preenable = ad4030_buffer_preenable,
.validate_scan_mask = ad4030_validate_scan_mask,
};
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
index 0f4c9cd6c102..6cf790ff3eb5 100644
--- a/drivers/iio/adc/ad4130.c
+++ b/drivers/iio/adc/ad4130.c
@@ -522,15 +522,15 @@ static int ad4130_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
return GPIO_LINE_DIRECTION_OUT;
}
-static void ad4130_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static int ad4130_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct ad4130_state *st = gpiochip_get_data(gc);
unsigned int mask = FIELD_PREP(AD4130_IO_CONTROL_GPIO_DATA_MASK,
BIT(offset));
- regmap_update_bits(st->regmap, AD4130_IO_CONTROL_REG, mask,
- value ? mask : 0);
+ return regmap_update_bits(st->regmap, AD4130_IO_CONTROL_REG, mask,
+ value ? mask : 0);
}
static int ad4130_set_mode(struct ad4130_state *st, enum ad4130_mode mode)
@@ -2064,7 +2064,7 @@ static int ad4130_probe(struct spi_device *spi)
st->gc.can_sleep = true;
st->gc.init_valid_mask = ad4130_gpio_init_valid_mask;
st->gc.get_direction = ad4130_gpio_get_direction;
- st->gc.set = ad4130_gpio_set;
+ st->gc.set_rv = ad4130_gpio_set;
ret = devm_gpiochip_add_data(dev, &st->gc, st);
if (ret)
diff --git a/drivers/iio/adc/ad4695.c b/drivers/iio/adc/ad4695.c
index 8222c8ab2940..cda419638d9a 100644
--- a/drivers/iio/adc/ad4695.c
+++ b/drivers/iio/adc/ad4695.c
@@ -105,9 +105,7 @@
#define AD4695_REG_ACCESS_SCLK_HZ (10 * MEGA)
/* Max number of voltage input channels. */
-#define AD4695_MAX_CHANNELS 16
-/* Max size of 1 raw sample in bytes. */
-#define AD4695_MAX_CHANNEL_SIZE 2
+#define AD4695_MAX_VIN_CHANNELS 16
enum ad4695_in_pair {
AD4695_IN_PAIR_REFGND,
@@ -145,8 +143,8 @@ struct ad4695_state {
/* offload also requires separate gpio to manually control CNV */
struct gpio_desc *cnv_gpio;
/* voltages channels plus temperature and timestamp */
- struct iio_chan_spec iio_chan[AD4695_MAX_CHANNELS + 2];
- struct ad4695_channel_config channels_cfg[AD4695_MAX_CHANNELS];
+ struct iio_chan_spec iio_chan[AD4695_MAX_VIN_CHANNELS + 2];
+ struct ad4695_channel_config channels_cfg[AD4695_MAX_VIN_CHANNELS];
const struct ad4695_chip_info *chip_info;
int sample_freq_range[3];
/* Reference voltage. */
@@ -159,11 +157,10 @@ struct ad4695_state {
* to control CS and add a delay between the last SCLK and next
* CNV rising edges.
*/
- struct spi_transfer buf_read_xfer[AD4695_MAX_CHANNELS * 2 + 3];
+ struct spi_transfer buf_read_xfer[AD4695_MAX_VIN_CHANNELS * 2 + 3];
struct spi_message buf_read_msg;
/* Raw conversion data received. */
- u8 buf[ALIGN((AD4695_MAX_CHANNELS + 2) * AD4695_MAX_CHANNEL_SIZE,
- sizeof(s64)) + sizeof(s64)] __aligned(IIO_DMA_MINALIGN);
+ IIO_DECLARE_DMA_BUFFER_WITH_TS(u16, buf, AD4695_MAX_VIN_CHANNELS + 1);
u16 raw_data;
/* Commands to send for single conversion. */
u16 cnv_cmd;
@@ -660,9 +657,8 @@ static int ad4695_buffer_preenable(struct iio_dev *indio_dev)
iio_for_each_active_channel(indio_dev, bit) {
xfer = &st->buf_read_xfer[num_xfer];
xfer->bits_per_word = 16;
- xfer->rx_buf = &st->buf[rx_buf_offset];
+ xfer->rx_buf = &st->buf[rx_buf_offset++];
xfer->len = 2;
- rx_buf_offset += xfer->len;
if (bit == temp_chan_bit) {
temp_en = 1;
@@ -801,7 +797,8 @@ static irqreturn_t ad4695_trigger_handler(int irq, void *p)
if (ret)
goto out;
- iio_push_to_buffers_with_timestamp(indio_dev, st->buf, pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, st->buf, sizeof(st->buf),
+ pf->timestamp);
out:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ad4851.c b/drivers/iio/adc/ad4851.c
index 98ebc853db79..f1d2e2896f2a 100644
--- a/drivers/iio/adc/ad4851.c
+++ b/drivers/iio/adc/ad4851.c
@@ -1034,7 +1034,7 @@ static int ad4858_parse_channels(struct iio_dev *indio_dev)
struct device *dev = &st->spi->dev;
struct iio_chan_spec *ad4851_channels;
const struct iio_chan_spec ad4851_chan = AD4858_IIO_CHANNEL;
- int ret;
+ int ret, i = 0;
ret = ad4851_parse_channels_common(indio_dev, &ad4851_channels,
ad4851_chan);
@@ -1042,15 +1042,15 @@ static int ad4858_parse_channels(struct iio_dev *indio_dev)
return ret;
device_for_each_child_node_scoped(dev, child) {
- ad4851_channels->has_ext_scan_type = 1;
+ ad4851_channels[i].has_ext_scan_type = 1;
if (fwnode_property_read_bool(child, "bipolar")) {
- ad4851_channels->ext_scan_type = ad4851_scan_type_20_b;
- ad4851_channels->num_ext_scan_type = ARRAY_SIZE(ad4851_scan_type_20_b);
+ ad4851_channels[i].ext_scan_type = ad4851_scan_type_20_b;
+ ad4851_channels[i].num_ext_scan_type = ARRAY_SIZE(ad4851_scan_type_20_b);
} else {
- ad4851_channels->ext_scan_type = ad4851_scan_type_20_u;
- ad4851_channels->num_ext_scan_type = ARRAY_SIZE(ad4851_scan_type_20_u);
+ ad4851_channels[i].ext_scan_type = ad4851_scan_type_20_u;
+ ad4851_channels[i].num_ext_scan_type = ARRAY_SIZE(ad4851_scan_type_20_u);
}
- ad4851_channels++;
+ i++;
}
indio_dev->channels = ad4851_channels;
diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c
index 931ff71b2888..647a7852dd8d 100644
--- a/drivers/iio/adc/ad7091r-base.c
+++ b/drivers/iio/adc/ad7091r-base.c
@@ -387,13 +387,8 @@ EXPORT_SYMBOL_NS_GPL(ad7091r_writeable_reg, "IIO_AD7091R");
bool ad7091r_volatile_reg(struct device *dev, unsigned int reg)
{
- switch (reg) {
- case AD7091R_REG_RESULT:
- case AD7091R_REG_ALERT:
- return true;
- default:
- return false;
- }
+ /* The volatile ad7091r registers are also the only RO ones. */
+ return !ad7091r_writeable_reg(dev, reg);
}
EXPORT_SYMBOL_NS_GPL(ad7091r_volatile_reg, "IIO_AD7091R");
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 3ea81a98e455..92596f15e797 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -32,7 +32,7 @@
#define AD7124_IO_CONTROL_2 0x04
#define AD7124_ID 0x05
#define AD7124_ERROR 0x06
-#define AD7124_ERROR_EN 0x07
+#define AD7124_ERROR_EN 0x07
#define AD7124_MCLK_COUNT 0x08
#define AD7124_CHANNEL(x) (0x09 + (x))
#define AD7124_CONFIG(x) (0x19 + (x))
@@ -41,68 +41,58 @@
#define AD7124_GAIN(x) (0x31 + (x))
/* AD7124_STATUS */
-#define AD7124_STATUS_POR_FLAG_MSK BIT(4)
+#define AD7124_STATUS_POR_FLAG BIT(4)
/* AD7124_ADC_CONTROL */
-#define AD7124_ADC_STATUS_EN_MSK BIT(10)
-#define AD7124_ADC_STATUS_EN(x) FIELD_PREP(AD7124_ADC_STATUS_EN_MSK, x)
-#define AD7124_ADC_CTRL_REF_EN_MSK BIT(8)
-#define AD7124_ADC_CTRL_REF_EN(x) FIELD_PREP(AD7124_ADC_CTRL_REF_EN_MSK, x)
-#define AD7124_ADC_CTRL_PWR_MSK GENMASK(7, 6)
-#define AD7124_ADC_CTRL_PWR(x) FIELD_PREP(AD7124_ADC_CTRL_PWR_MSK, x)
-#define AD7124_ADC_CTRL_MODE_MSK GENMASK(5, 2)
-#define AD7124_ADC_CTRL_MODE(x) FIELD_PREP(AD7124_ADC_CTRL_MODE_MSK, x)
-
-#define AD7124_MODE_CAL_INT_ZERO 0x5 /* Internal Zero-Scale Calibration */
-#define AD7124_MODE_CAL_INT_FULL 0x6 /* Internal Full-Scale Calibration */
-#define AD7124_MODE_CAL_SYS_ZERO 0x7 /* System Zero-Scale Calibration */
-#define AD7124_MODE_CAL_SYS_FULL 0x8 /* System Full-Scale Calibration */
-
-/* AD7124 ID */
-#define AD7124_DEVICE_ID_MSK GENMASK(7, 4)
-#define AD7124_DEVICE_ID_GET(x) FIELD_GET(AD7124_DEVICE_ID_MSK, x)
-#define AD7124_SILICON_REV_MSK GENMASK(3, 0)
-#define AD7124_SILICON_REV_GET(x) FIELD_GET(AD7124_SILICON_REV_MSK, x)
-
-#define CHIPID_AD7124_4 0x0
-#define CHIPID_AD7124_8 0x1
+#define AD7124_ADC_CONTROL_MODE GENMASK(5, 2)
+#define AD7124_ADC_CONTROL_MODE_CONTINUOUS 0
+#define AD7124_ADC_CONTROL_MODE_SINGLE 1
+#define AD7124_ADC_CONTROL_MODE_STANDBY 2
+#define AD7124_ADC_CONTROL_MODE_POWERDOWN 3
+#define AD7124_ADC_CONTROL_MODE_IDLE 4
+#define AD7124_ADC_CONTROL_MODE_INT_OFFSET_CALIB 5 /* Internal Zero-Scale Calibration */
+#define AD7124_ADC_CONTROL_MODE_INT_GAIN_CALIB 6 /* Internal Full-Scale Calibration */
+#define AD7124_ADC_CONTROL_MODE_SYS_OFFSET_CALIB 7 /* System Zero-Scale Calibration */
+#define AD7124_ADC_CONTROL_MODE_SYS_GAIN_CALIB 8 /* System Full-Scale Calibration */
+#define AD7124_ADC_CONTROL_POWER_MODE GENMASK(7, 6)
+#define AD7124_ADC_CONTROL_POWER_MODE_LOW 0
+#define AD7124_ADC_CONTROL_POWER_MODE_MID 1
+#define AD7124_ADC_CONTROL_POWER_MODE_FULL 2
+#define AD7124_ADC_CONTROL_REF_EN BIT(8)
+#define AD7124_ADC_CONTROL_DATA_STATUS BIT(10)
+
+/* AD7124_ID */
+#define AD7124_ID_SILICON_REVISION GENMASK(3, 0)
+#define AD7124_ID_DEVICE_ID GENMASK(7, 4)
+#define AD7124_ID_DEVICE_ID_AD7124_4 0x0
+#define AD7124_ID_DEVICE_ID_AD7124_8 0x1
/* AD7124_CHANNEL_X */
-#define AD7124_CHANNEL_EN_MSK BIT(15)
-#define AD7124_CHANNEL_EN(x) FIELD_PREP(AD7124_CHANNEL_EN_MSK, x)
-#define AD7124_CHANNEL_SETUP_MSK GENMASK(14, 12)
-#define AD7124_CHANNEL_SETUP(x) FIELD_PREP(AD7124_CHANNEL_SETUP_MSK, x)
-#define AD7124_CHANNEL_AINP_MSK GENMASK(9, 5)
-#define AD7124_CHANNEL_AINP(x) FIELD_PREP(AD7124_CHANNEL_AINP_MSK, x)
-#define AD7124_CHANNEL_AINM_MSK GENMASK(4, 0)
-#define AD7124_CHANNEL_AINM(x) FIELD_PREP(AD7124_CHANNEL_AINM_MSK, x)
+#define AD7124_CHANNEL_ENABLE BIT(15)
+#define AD7124_CHANNEL_SETUP GENMASK(14, 12)
+#define AD7124_CHANNEL_AINP GENMASK(9, 5)
+#define AD7124_CHANNEL_AINM GENMASK(4, 0)
+#define AD7124_CHANNEL_AINx_TEMPSENSOR 16
+#define AD7124_CHANNEL_AINx_AVSS 17
/* AD7124_CONFIG_X */
-#define AD7124_CONFIG_BIPOLAR_MSK BIT(11)
-#define AD7124_CONFIG_BIPOLAR(x) FIELD_PREP(AD7124_CONFIG_BIPOLAR_MSK, x)
-#define AD7124_CONFIG_REF_SEL_MSK GENMASK(4, 3)
-#define AD7124_CONFIG_REF_SEL(x) FIELD_PREP(AD7124_CONFIG_REF_SEL_MSK, x)
-#define AD7124_CONFIG_PGA_MSK GENMASK(2, 0)
-#define AD7124_CONFIG_PGA(x) FIELD_PREP(AD7124_CONFIG_PGA_MSK, x)
-#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(6, 5)
-#define AD7124_CONFIG_IN_BUFF(x) FIELD_PREP(AD7124_CONFIG_IN_BUFF_MSK, x)
+#define AD7124_CONFIG_BIPOLAR BIT(11)
+#define AD7124_CONFIG_IN_BUFF GENMASK(6, 5)
+#define AD7124_CONFIG_AIN_BUFP BIT(6)
+#define AD7124_CONFIG_AIN_BUFM BIT(5)
+#define AD7124_CONFIG_REF_SEL GENMASK(4, 3)
+#define AD7124_CONFIG_PGA GENMASK(2, 0)
/* AD7124_FILTER_X */
-#define AD7124_FILTER_FS_MSK GENMASK(10, 0)
-#define AD7124_FILTER_FS(x) FIELD_PREP(AD7124_FILTER_FS_MSK, x)
-#define AD7124_FILTER_TYPE_MSK GENMASK(23, 21)
-#define AD7124_FILTER_TYPE_SEL(x) FIELD_PREP(AD7124_FILTER_TYPE_MSK, x)
+#define AD7124_FILTER_FS GENMASK(10, 0)
+#define AD7124_FILTER_FILTER GENMASK(23, 21)
+#define AD7124_FILTER_FILTER_SINC4 0
+#define AD7124_FILTER_FILTER_SINC3 2
-#define AD7124_SINC3_FILTER 2
-#define AD7124_SINC4_FILTER 0
-
-#define AD7124_CONF_ADDR_OFFSET 20
#define AD7124_MAX_CONFIGS 8
#define AD7124_MAX_CHANNELS 16
/* AD7124 input sources */
-#define AD7124_INPUT_TEMPSENSOR 16
-#define AD7124_INPUT_AVSS 17
enum ad7124_ids {
ID_AD7124_4,
@@ -206,12 +196,12 @@ struct ad7124_state {
static struct ad7124_chip_info ad7124_chip_info_tbl[] = {
[ID_AD7124_4] = {
.name = "ad7124-4",
- .chip_id = CHIPID_AD7124_4,
+ .chip_id = AD7124_ID_DEVICE_ID_AD7124_4,
.num_inputs = 8,
},
[ID_AD7124_8] = {
.name = "ad7124-8",
- .chip_id = CHIPID_AD7124_8,
+ .chip_id = AD7124_ID_DEVICE_ID_AD7124_8,
.num_inputs = 16,
},
};
@@ -260,8 +250,8 @@ static int ad7124_set_mode(struct ad_sigma_delta *sd,
{
struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
- st->adc_control &= ~AD7124_ADC_CTRL_MODE_MSK;
- st->adc_control |= AD7124_ADC_CTRL_MODE(mode);
+ st->adc_control &= ~AD7124_ADC_CONTROL_MODE;
+ st->adc_control |= FIELD_PREP(AD7124_ADC_CONTROL_MODE, mode);
return ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control);
}
@@ -300,41 +290,15 @@ static int ad7124_get_3db_filter_freq(struct ad7124_state *st,
fadc = st->channels[channel].cfg.odr;
switch (st->channels[channel].cfg.filter_type) {
- case AD7124_SINC3_FILTER:
+ case AD7124_FILTER_FILTER_SINC3:
+ return DIV_ROUND_CLOSEST(fadc * 272, 1000);
+ case AD7124_FILTER_FILTER_SINC4:
return DIV_ROUND_CLOSEST(fadc * 230, 1000);
- case AD7124_SINC4_FILTER:
- return DIV_ROUND_CLOSEST(fadc * 262, 1000);
default:
return -EINVAL;
}
}
-static void ad7124_set_3db_filter_freq(struct ad7124_state *st, unsigned int channel,
- unsigned int freq)
-{
- unsigned int sinc4_3db_odr;
- unsigned int sinc3_3db_odr;
- unsigned int new_filter;
- unsigned int new_odr;
-
- sinc4_3db_odr = DIV_ROUND_CLOSEST(freq * 1000, 230);
- sinc3_3db_odr = DIV_ROUND_CLOSEST(freq * 1000, 262);
-
- if (sinc4_3db_odr > sinc3_3db_odr) {
- new_filter = AD7124_SINC3_FILTER;
- new_odr = sinc4_3db_odr;
- } else {
- new_filter = AD7124_SINC4_FILTER;
- new_odr = sinc3_3db_odr;
- }
-
- if (new_odr != st->channels[channel].cfg.odr)
- st->channels[channel].cfg.live = false;
-
- st->channels[channel].cfg.filter_type = new_filter;
- st->channels[channel].cfg.odr = new_odr;
-}
-
static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_state *st,
struct ad7124_channel_config *cfg)
{
@@ -413,8 +377,7 @@ static int ad7124_init_config_vref(struct ad7124_state *st, struct ad7124_channe
return 0;
case AD7124_INT_REF:
cfg->vref_mv = 2500;
- st->adc_control &= ~AD7124_ADC_CTRL_REF_EN_MSK;
- st->adc_control |= AD7124_ADC_CTRL_REF_EN(1);
+ st->adc_control |= AD7124_ADC_CONTROL_REF_EN;
return 0;
default:
return dev_err_probe(dev, -EINVAL, "Invalid reference %d\n", refsel);
@@ -438,18 +401,20 @@ static int ad7124_write_config(struct ad7124_state *st, struct ad7124_channel_co
if (ret)
return ret;
- tmp = (cfg->buf_positive << 1) + cfg->buf_negative;
- val = AD7124_CONFIG_BIPOLAR(cfg->bipolar) | AD7124_CONFIG_REF_SEL(cfg->refsel) |
- AD7124_CONFIG_IN_BUFF(tmp) | AD7124_CONFIG_PGA(cfg->pga_bits);
+ val = FIELD_PREP(AD7124_CONFIG_BIPOLAR, cfg->bipolar) |
+ FIELD_PREP(AD7124_CONFIG_REF_SEL, cfg->refsel) |
+ (cfg->buf_positive ? AD7124_CONFIG_AIN_BUFP : 0) |
+ (cfg->buf_negative ? AD7124_CONFIG_AIN_BUFM : 0) |
+ FIELD_PREP(AD7124_CONFIG_PGA, cfg->pga_bits);
ret = ad_sd_write_reg(&st->sd, AD7124_CONFIG(cfg->cfg_slot), 2, val);
if (ret < 0)
return ret;
- tmp = AD7124_FILTER_TYPE_SEL(cfg->filter_type) |
- AD7124_FILTER_FS(cfg->odr_sel_bits);
+ tmp = FIELD_PREP(AD7124_FILTER_FILTER, cfg->filter_type) |
+ FIELD_PREP(AD7124_FILTER_FS, cfg->odr_sel_bits);
return ad7124_spi_write_mask(st, AD7124_FILTER(cfg->cfg_slot),
- AD7124_FILTER_TYPE_MSK | AD7124_FILTER_FS_MSK,
+ AD7124_FILTER_FILTER | AD7124_FILTER_FS,
tmp, 3);
}
@@ -514,7 +479,8 @@ static int ad7124_enable_channel(struct ad7124_state *st, struct ad7124_channel
{
ch->cfg.live = true;
return ad_sd_write_reg(&st->sd, AD7124_CHANNEL(ch->nr), 2, ch->ain |
- AD7124_CHANNEL_SETUP(ch->cfg.cfg_slot) | AD7124_CHANNEL_EN(1));
+ FIELD_PREP(AD7124_CHANNEL_SETUP, ch->cfg.cfg_slot) |
+ AD7124_CHANNEL_ENABLE);
}
static int ad7124_prepare_read(struct ad7124_state *st, int address)
@@ -564,8 +530,10 @@ static int ad7124_append_status(struct ad_sigma_delta *sd, bool append)
unsigned int adc_control = st->adc_control;
int ret;
- adc_control &= ~AD7124_ADC_STATUS_EN_MSK;
- adc_control |= AD7124_ADC_STATUS_EN(append);
+ if (append)
+ adc_control |= AD7124_ADC_CONTROL_DATA_STATUS;
+ else
+ adc_control &= ~AD7124_ADC_CONTROL_DATA_STATUS;
ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, adc_control);
if (ret < 0)
@@ -580,7 +548,7 @@ static int ad7124_disable_one(struct ad_sigma_delta *sd, unsigned int chan)
{
struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
- /* The relevant thing here is that AD7124_CHANNEL_EN_MSK is cleared. */
+ /* The relevant thing here is that AD7124_CHANNEL_ENABLE is cleared. */
return ad_sd_write_reg(&st->sd, AD7124_CHANNEL(chan), 2, 0);
}
@@ -739,16 +707,8 @@ static int ad7124_write_raw(struct iio_dev *indio_dev,
st->channels[chan->address].cfg.pga_bits = res;
break;
- case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
- if (val2 != 0) {
- ret = -EINVAL;
- break;
- }
-
- ad7124_set_3db_filter_freq(st, chan->address, val);
- break;
default:
- ret = -EINVAL;
+ ret = -EINVAL;
}
mutex_unlock(&st->cfgs_lock);
@@ -802,7 +762,7 @@ static int ad7124_update_scan_mode(struct iio_dev *indio_dev,
if (bit_set)
ret = __ad7124_set_channel(&st->sd, i);
else
- ret = ad7124_spi_write_mask(st, AD7124_CHANNEL(i), AD7124_CHANNEL_EN_MSK,
+ ret = ad7124_spi_write_mask(st, AD7124_CHANNEL(i), AD7124_CHANNEL_ENABLE,
0, 2);
if (ret < 0) {
mutex_unlock(&st->cfgs_lock);
@@ -843,14 +803,14 @@ static int ad7124_soft_reset(struct ad7124_state *st)
if (ret < 0)
return dev_err_probe(dev, ret, "Error reading status register\n");
- if (!(readval & AD7124_STATUS_POR_FLAG_MSK))
+ if (!(readval & AD7124_STATUS_POR_FLAG))
break;
/* The AD7124 requires typically 2ms to power up and settle */
usleep_range(100, 2000);
} while (--timeout);
- if (readval & AD7124_STATUS_POR_FLAG_MSK)
+ if (readval & AD7124_STATUS_POR_FLAG)
return dev_err_probe(dev, -EIO, "Soft reset failed\n");
ret = ad_sd_read_reg(&st->sd, AD7124_GAIN(0), 3, &st->gain_default);
@@ -872,8 +832,8 @@ static int ad7124_check_chip_id(struct ad7124_state *st)
if (ret < 0)
return dev_err_probe(dev, ret, "Failure to read ID register\n");
- chip_id = AD7124_DEVICE_ID_GET(readval);
- silicon_rev = AD7124_SILICON_REV_GET(readval);
+ chip_id = FIELD_GET(AD7124_ID_DEVICE_ID, readval);
+ silicon_rev = FIELD_GET(AD7124_ID_SILICON_REVISION, readval);
if (chip_id != st->chip_info->chip_id)
return dev_err_probe(dev, -ENODEV,
@@ -901,7 +861,7 @@ static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan
if (ch->syscalib_mode == AD7124_SYSCALIB_ZERO_SCALE) {
ch->cfg.calibration_offset = 0x800000;
- ret = ad_sd_calibrate(&st->sd, AD7124_MODE_CAL_SYS_ZERO,
+ ret = ad_sd_calibrate(&st->sd, AD7124_ADC_CONTROL_MODE_SYS_OFFSET_CALIB,
chan->address);
if (ret < 0)
return ret;
@@ -916,7 +876,7 @@ static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan
} else {
ch->cfg.calibration_gain = st->gain_default;
- ret = ad_sd_calibrate(&st->sd, AD7124_MODE_CAL_SYS_FULL,
+ ret = ad_sd_calibrate(&st->sd, AD7124_ADC_CONTROL_MODE_SYS_GAIN_CALIB,
chan->address);
if (ret < 0)
return ret;
@@ -1031,7 +991,7 @@ static bool ad7124_valid_input_select(unsigned int ain, const struct ad7124_chip
if (ain >= info->num_inputs && ain < 16)
return false;
- return ain <= FIELD_MAX(AD7124_CHANNEL_AINM_MSK);
+ return ain <= FIELD_MAX(AD7124_CHANNEL_AINM);
}
static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
@@ -1096,8 +1056,8 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
"diff-channels property of %pfwP contains invalid data\n", child);
st->channels[channel].nr = channel;
- st->channels[channel].ain = AD7124_CHANNEL_AINP(ain[0]) |
- AD7124_CHANNEL_AINM(ain[1]);
+ st->channels[channel].ain = FIELD_PREP(AD7124_CHANNEL_AINP, ain[0]) |
+ FIELD_PREP(AD7124_CHANNEL_AINM, ain[1]);
cfg = &st->channels[channel].cfg;
cfg->bipolar = fwnode_property_read_bool(child, "bipolar");
@@ -1123,8 +1083,8 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
if (num_channels < AD7124_MAX_CHANNELS) {
st->channels[num_channels] = (struct ad7124_channel) {
.nr = num_channels,
- .ain = AD7124_CHANNEL_AINP(AD7124_INPUT_TEMPSENSOR) |
- AD7124_CHANNEL_AINM(AD7124_INPUT_AVSS),
+ .ain = FIELD_PREP(AD7124_CHANNEL_AINP, AD7124_CHANNEL_AINx_TEMPSENSOR) |
+ FIELD_PREP(AD7124_CHANNEL_AINM, AD7124_CHANNEL_AINx_AVSS),
.cfg = {
.bipolar = true,
},
@@ -1175,11 +1135,11 @@ static int ad7124_setup(struct ad7124_state *st)
}
/* Set the power mode */
- st->adc_control &= ~AD7124_ADC_CTRL_PWR_MSK;
- st->adc_control |= AD7124_ADC_CTRL_PWR(power_mode);
+ st->adc_control &= ~AD7124_ADC_CONTROL_POWER_MODE;
+ st->adc_control |= FIELD_PREP(AD7124_ADC_CONTROL_POWER_MODE, power_mode);
- st->adc_control &= ~AD7124_ADC_CTRL_MODE_MSK;
- st->adc_control |= AD7124_ADC_CTRL_MODE(AD_SD_MODE_IDLE);
+ st->adc_control &= ~AD7124_ADC_CONTROL_MODE;
+ st->adc_control |= FIELD_PREP(AD7124_ADC_CONTROL_MODE, AD_SD_MODE_IDLE);
mutex_init(&st->cfgs_lock);
INIT_KFIFO(st->live_cfgs_fifo);
@@ -1233,7 +1193,7 @@ static int __ad7124_calibrate_all(struct ad7124_state *st, struct iio_dev *indio
* usual: first zero-scale then full-scale calibration.
*/
if (st->channels[i].cfg.pga_bits > 0) {
- ret = ad_sd_calibrate(&st->sd, AD7124_MODE_CAL_INT_FULL, i);
+ ret = ad_sd_calibrate(&st->sd, AD7124_ADC_CONTROL_MODE_INT_GAIN_CALIB, i);
if (ret < 0)
return ret;
@@ -1250,7 +1210,7 @@ static int __ad7124_calibrate_all(struct ad7124_state *st, struct iio_dev *indio
return ret;
}
- ret = ad_sd_calibrate(&st->sd, AD7124_MODE_CAL_INT_ZERO, i);
+ ret = ad_sd_calibrate(&st->sd, AD7124_ADC_CONTROL_MODE_INT_OFFSET_CALIB, i);
if (ret < 0)
return ret;
@@ -1279,9 +1239,9 @@ static int ad7124_calibrate_all(struct ad7124_state *st, struct iio_dev *indio_d
* The resulting calibration is then also valid for high-speed, so just
* restore adc_control afterwards.
*/
- if (FIELD_GET(AD7124_ADC_CTRL_PWR_MSK, adc_control) >= AD7124_FULL_POWER) {
- st->adc_control &= ~AD7124_ADC_CTRL_PWR_MSK;
- st->adc_control |= AD7124_ADC_CTRL_PWR(AD7124_MID_POWER);
+ if (FIELD_GET(AD7124_ADC_CONTROL_POWER_MODE, adc_control) >= AD7124_FULL_POWER) {
+ st->adc_control &= ~AD7124_ADC_CONTROL_POWER_MODE;
+ st->adc_control |= FIELD_PREP(AD7124_ADC_CONTROL_POWER_MODE, AD7124_MID_POWER);
}
ret = __ad7124_calibrate_all(st, indio_dev);
diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
index 69de5886474c..b3e6bd2a55d7 100644
--- a/drivers/iio/adc/ad7173.c
+++ b/drivers/iio/adc/ad7173.c
@@ -230,10 +230,8 @@ struct ad7173_state {
unsigned long long *config_cnts;
struct clk *ext_clk;
struct clk_hw int_clk_hw;
-#if IS_ENABLED(CONFIG_GPIOLIB)
struct regmap *reg_gpiocon_regmap;
struct gpio_regmap *gpio_regmap;
-#endif
};
static unsigned int ad4115_sinc5_data_rates[] = {
@@ -288,8 +286,6 @@ static const char *const ad7173_clk_sel[] = {
"ext-clk", "xtal"
};
-#if IS_ENABLED(CONFIG_GPIOLIB)
-
static const struct regmap_range ad7173_range_gpio[] = {
regmap_reg_range(AD7173_REG_GPIO, AD7173_REG_GPIO),
};
@@ -543,12 +539,6 @@ static int ad7173_gpio_init(struct ad7173_state *st)
return 0;
}
-#else
-static int ad7173_gpio_init(struct ad7173_state *st)
-{
- return 0;
-}
-#endif /* CONFIG_GPIOLIB */
static struct ad7173_state *ad_sigma_delta_to_ad7173(struct ad_sigma_delta *sd)
{
@@ -1797,10 +1787,7 @@ static int ad7173_probe(struct spi_device *spi)
if (ret)
return ret;
- if (IS_ENABLED(CONFIG_GPIOLIB))
- return ad7173_gpio_init(st);
-
- return 0;
+ return ad7173_gpio_init(st);
}
static const struct of_device_id ad7173_of_match[] = {
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 7fef2727f89e..3364ac6c4631 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -86,10 +86,9 @@ static irqreturn_t ad7266_trigger_handler(int irq, void *p)
int ret;
ret = spi_read(st->spi, st->data.sample, 4);
- if (ret == 0) {
- iio_push_to_buffers_with_timestamp(indio_dev, &st->data,
- pf->timestamp);
- }
+ if (ret == 0)
+ iio_push_to_buffers_with_ts(indio_dev, &st->data, sizeof(st->data),
+ pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ad7280a.c b/drivers/iio/adc/ad7280a.c
index f9f32737db80..dda2986ccda0 100644
--- a/drivers/iio/adc/ad7280a.c
+++ b/drivers/iio/adc/ad7280a.c
@@ -572,7 +572,7 @@ static const struct iio_chan_spec_ext_info ad7280_cell_ext_info[] = {
.write = ad7280_store_balance_timer,
.shared = IIO_SEPARATE,
},
- {}
+ { }
};
static const struct iio_event_spec ad7280_events[] = {
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
index 28b88092b4aa..7c0538ea15c8 100644
--- a/drivers/iio/adc/ad7298.c
+++ b/drivers/iio/adc/ad7298.c
@@ -155,8 +155,8 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p)
if (b_sent)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, st->rx_buf, sizeof(st->rx_buf),
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c
index aef85093eb16..d96bd12dfea6 100644
--- a/drivers/iio/adc/ad7380.c
+++ b/drivers/iio/adc/ad7380.c
@@ -13,6 +13,7 @@
* ad7381-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7381-4.pdf
* ad7383/4-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7383-4-ad7384-4.pdf
* ad7386/7/8-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7386-4-7387-4-7388-4.pdf
+ * ad7389-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7389-4.pdf
* adaq4370-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/adaq4370-4.pdf
* adaq4380-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/adaq4380-4.pdf
* adaq4381-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/adaq4381-4.pdf
@@ -119,7 +120,8 @@ struct ad7380_chip_info {
const char * const *supplies;
unsigned int num_supplies;
bool external_ref_only;
- bool adaq_internal_ref_only;
+ bool internal_ref_only;
+ unsigned int internal_ref_mv;
const char * const *vcm_supplies;
unsigned int num_vcm_supplies;
const unsigned long *available_scan_masks;
@@ -609,6 +611,7 @@ static const struct ad7380_chip_info ad7380_chip_info = {
.num_simult_channels = 2,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
+ .internal_ref_mv = AD7380_INTERNAL_REF_MV,
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
.max_conversion_rate_hz = 4 * MEGA,
@@ -622,6 +625,7 @@ static const struct ad7380_chip_info ad7381_chip_info = {
.num_simult_channels = 2,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
+ .internal_ref_mv = AD7380_INTERNAL_REF_MV,
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
.max_conversion_rate_hz = 4 * MEGA,
@@ -637,6 +641,7 @@ static const struct ad7380_chip_info ad7383_chip_info = {
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.vcm_supplies = ad7380_2_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies),
+ .internal_ref_mv = AD7380_INTERNAL_REF_MV,
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
.max_conversion_rate_hz = 4 * MEGA,
@@ -652,6 +657,7 @@ static const struct ad7380_chip_info ad7384_chip_info = {
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.vcm_supplies = ad7380_2_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies),
+ .internal_ref_mv = AD7380_INTERNAL_REF_MV,
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
.max_conversion_rate_hz = 4 * MEGA,
@@ -665,6 +671,7 @@ static const struct ad7380_chip_info ad7386_chip_info = {
.num_simult_channels = 2,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
+ .internal_ref_mv = AD7380_INTERNAL_REF_MV,
.has_mux = true,
.available_scan_masks = ad7380_2x2_channel_scan_masks,
.timing_specs = &ad7380_timing,
@@ -679,6 +686,7 @@ static const struct ad7380_chip_info ad7387_chip_info = {
.num_simult_channels = 2,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
+ .internal_ref_mv = AD7380_INTERNAL_REF_MV,
.has_mux = true,
.available_scan_masks = ad7380_2x2_channel_scan_masks,
.timing_specs = &ad7380_timing,
@@ -693,6 +701,7 @@ static const struct ad7380_chip_info ad7388_chip_info = {
.num_simult_channels = 2,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
+ .internal_ref_mv = AD7380_INTERNAL_REF_MV,
.has_mux = true,
.available_scan_masks = ad7380_2x2_channel_scan_masks,
.timing_specs = &ad7380_timing,
@@ -721,6 +730,7 @@ static const struct ad7380_chip_info ad7381_4_chip_info = {
.num_simult_channels = 4,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
+ .internal_ref_mv = AD7380_INTERNAL_REF_MV,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
.max_conversion_rate_hz = 4 * MEGA,
@@ -734,6 +744,7 @@ static const struct ad7380_chip_info ad7383_4_chip_info = {
.num_simult_channels = 4,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
+ .internal_ref_mv = AD7380_INTERNAL_REF_MV,
.vcm_supplies = ad7380_4_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
@@ -749,6 +760,7 @@ static const struct ad7380_chip_info ad7384_4_chip_info = {
.num_simult_channels = 4,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
+ .internal_ref_mv = AD7380_INTERNAL_REF_MV,
.vcm_supplies = ad7380_4_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
@@ -764,6 +776,7 @@ static const struct ad7380_chip_info ad7386_4_chip_info = {
.num_simult_channels = 4,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
+ .internal_ref_mv = AD7380_INTERNAL_REF_MV,
.has_mux = true,
.available_scan_masks = ad7380_2x4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
@@ -778,6 +791,7 @@ static const struct ad7380_chip_info ad7387_4_chip_info = {
.num_simult_channels = 4,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
+ .internal_ref_mv = AD7380_INTERNAL_REF_MV,
.has_mux = true,
.available_scan_masks = ad7380_2x4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
@@ -792,12 +806,28 @@ static const struct ad7380_chip_info ad7388_4_chip_info = {
.num_simult_channels = 4,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
+ .internal_ref_mv = AD7380_INTERNAL_REF_MV,
.has_mux = true,
.available_scan_masks = ad7380_2x4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
.max_conversion_rate_hz = 4 * MEGA,
};
+static const struct ad7380_chip_info ad7389_4_chip_info = {
+ .name = "ad7389-4",
+ .channels = ad7380_4_channels,
+ .offload_channels = ad7380_4_offload_channels,
+ .num_channels = ARRAY_SIZE(ad7380_4_channels),
+ .num_simult_channels = 4,
+ .supplies = ad7380_supplies,
+ .num_supplies = ARRAY_SIZE(ad7380_supplies),
+ .internal_ref_only = true,
+ .internal_ref_mv = AD7380_INTERNAL_REF_MV,
+ .available_scan_masks = ad7380_4_channel_scan_masks,
+ .timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
+};
+
static const struct ad7380_chip_info adaq4370_4_chip_info = {
.name = "adaq4370-4",
.channels = adaq4380_4_channels,
@@ -806,7 +836,8 @@ static const struct ad7380_chip_info adaq4370_4_chip_info = {
.num_simult_channels = 4,
.supplies = adaq4380_supplies,
.num_supplies = ARRAY_SIZE(adaq4380_supplies),
- .adaq_internal_ref_only = true,
+ .internal_ref_only = true,
+ .internal_ref_mv = ADAQ4380_INTERNAL_REF_MV,
.has_hardware_gain = true,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
@@ -821,7 +852,8 @@ static const struct ad7380_chip_info adaq4380_4_chip_info = {
.num_simult_channels = 4,
.supplies = adaq4380_supplies,
.num_supplies = ARRAY_SIZE(adaq4380_supplies),
- .adaq_internal_ref_only = true,
+ .internal_ref_only = true,
+ .internal_ref_mv = ADAQ4380_INTERNAL_REF_MV,
.has_hardware_gain = true,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
@@ -836,7 +868,8 @@ static const struct ad7380_chip_info adaq4381_4_chip_info = {
.num_simult_channels = 4,
.supplies = adaq4380_supplies,
.num_supplies = ARRAY_SIZE(adaq4380_supplies),
- .adaq_internal_ref_only = true,
+ .internal_ref_only = true,
+ .internal_ref_mv = ADAQ4380_INTERNAL_REF_MV,
.has_hardware_gain = true,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
@@ -876,8 +909,7 @@ struct ad7380_state {
* Make the buffer large enough for MAX_NUM_CHANNELS 32-bit samples and
* one 64-bit aligned 64-bit timestamp.
*/
- u8 scan_data[ALIGN(MAX_NUM_CHANNELS * sizeof(u32), sizeof(s64))
- + sizeof(s64)] __aligned(IIO_DMA_MINALIGN);
+ IIO_DECLARE_DMA_BUFFER_WITH_TS(u8, scan_data, MAX_NUM_CHANNELS * sizeof(u32));
/* buffers for reading/writing registers */
u16 tx;
u16 rx;
@@ -1327,8 +1359,8 @@ static irqreturn_t ad7380_trigger_handler(int irq, void *p)
if (ret)
goto out;
- iio_push_to_buffers_with_timestamp(indio_dev, &st->scan_data,
- pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &st->scan_data, sizeof(st->scan_data),
+ pf->timestamp);
out:
iio_trigger_notify_done(indio_dev->trig);
@@ -1859,7 +1891,7 @@ static int ad7380_probe(struct spi_device *spi)
"Failed to enable power supplies\n");
fsleep(T_POWERUP_US);
- if (st->chip_info->adaq_internal_ref_only) {
+ if (st->chip_info->internal_ref_only) {
/*
* ADAQ chips use fixed internal reference but still
* require a specific reference supply to power it.
@@ -1867,7 +1899,7 @@ static int ad7380_probe(struct spi_device *spi)
* in bulk_get_enable().
*/
- st->vref_mv = ADAQ4380_INTERNAL_REF_MV;
+ st->vref_mv = st->chip_info->internal_ref_mv;
/* these chips don't have a register bit for this */
external_ref_en = false;
@@ -1892,7 +1924,8 @@ static int ad7380_probe(struct spi_device *spi)
"Failed to get refio regulator\n");
external_ref_en = ret != -ENODEV;
- st->vref_mv = external_ref_en ? ret / 1000 : AD7380_INTERNAL_REF_MV;
+ st->vref_mv = external_ref_en ? ret / 1000
+ : st->chip_info->internal_ref_mv;
}
if (st->chip_info->num_vcm_supplies > ARRAY_SIZE(st->vcm_mv))
@@ -2045,6 +2078,7 @@ static const struct of_device_id ad7380_of_match_table[] = {
{ .compatible = "adi,ad7386-4", .data = &ad7386_4_chip_info },
{ .compatible = "adi,ad7387-4", .data = &ad7387_4_chip_info },
{ .compatible = "adi,ad7388-4", .data = &ad7388_4_chip_info },
+ { .compatible = "adi,ad7389-4", .data = &ad7389_4_chip_info },
{ .compatible = "adi,adaq4370-4", .data = &adaq4370_4_chip_info },
{ .compatible = "adi,adaq4380-4", .data = &adaq4380_4_chip_info },
{ .compatible = "adi,adaq4381-4", .data = &adaq4381_4_chip_info },
@@ -2066,6 +2100,7 @@ static const struct spi_device_id ad7380_id_table[] = {
{ "ad7386-4", (kernel_ulong_t)&ad7386_4_chip_info },
{ "ad7387-4", (kernel_ulong_t)&ad7387_4_chip_info },
{ "ad7388-4", (kernel_ulong_t)&ad7388_4_chip_info },
+ { "ad7389-4", (kernel_ulong_t)&ad7389_4_chip_info },
{ "adaq4370-4", (kernel_ulong_t)&adaq4370_4_chip_info },
{ "adaq4380-4", (kernel_ulong_t)&adaq4380_4_chip_info },
{ "adaq4381-4", (kernel_ulong_t)&adaq4381_4_chip_info },
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index 37b0515cf4fc..ddb607ac1860 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -99,8 +99,8 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p)
if (b_sent < 0)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, st->data,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, st->data, sizeof(st->data),
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index 703556eb7257..185243dee86e 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -13,6 +13,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/pwm.h>
#include <linux/regulator/consumer.h>
@@ -94,121 +95,35 @@ static const unsigned int ad7616_oversampling_avail[8] = {
1, 2, 4, 8, 16, 32, 64, 128,
};
-static const struct iio_chan_spec ad7605_channels[] = {
- IIO_CHAN_SOFT_TIMESTAMP(4),
- AD7605_CHANNEL(0),
- AD7605_CHANNEL(1),
- AD7605_CHANNEL(2),
- AD7605_CHANNEL(3),
-};
-
-static const struct iio_chan_spec ad7606_channels_16bit[] = {
- IIO_CHAN_SOFT_TIMESTAMP(8),
- AD7606_CHANNEL(0, 16),
- AD7606_CHANNEL(1, 16),
- AD7606_CHANNEL(2, 16),
- AD7606_CHANNEL(3, 16),
- AD7606_CHANNEL(4, 16),
- AD7606_CHANNEL(5, 16),
- AD7606_CHANNEL(6, 16),
- AD7606_CHANNEL(7, 16),
-};
-
-static const struct iio_chan_spec ad7606_channels_18bit[] = {
- IIO_CHAN_SOFT_TIMESTAMP(8),
- AD7606_CHANNEL(0, 18),
- AD7606_CHANNEL(1, 18),
- AD7606_CHANNEL(2, 18),
- AD7606_CHANNEL(3, 18),
- AD7606_CHANNEL(4, 18),
- AD7606_CHANNEL(5, 18),
- AD7606_CHANNEL(6, 18),
- AD7606_CHANNEL(7, 18),
-};
-
-static const struct iio_chan_spec ad7607_channels[] = {
- IIO_CHAN_SOFT_TIMESTAMP(8),
- AD7606_CHANNEL(0, 14),
- AD7606_CHANNEL(1, 14),
- AD7606_CHANNEL(2, 14),
- AD7606_CHANNEL(3, 14),
- AD7606_CHANNEL(4, 14),
- AD7606_CHANNEL(5, 14),
- AD7606_CHANNEL(6, 14),
- AD7606_CHANNEL(7, 14),
-};
-
-static const struct iio_chan_spec ad7608_channels[] = {
- IIO_CHAN_SOFT_TIMESTAMP(8),
- AD7606_CHANNEL(0, 18),
- AD7606_CHANNEL(1, 18),
- AD7606_CHANNEL(2, 18),
- AD7606_CHANNEL(3, 18),
- AD7606_CHANNEL(4, 18),
- AD7606_CHANNEL(5, 18),
- AD7606_CHANNEL(6, 18),
- AD7606_CHANNEL(7, 18),
-};
-
-/*
- * The current assumption that this driver makes for AD7616, is that it's
- * working in Hardware Mode with Serial, Burst and Sequencer modes activated.
- * To activate them, following pins must be pulled high:
- * -SER/PAR
- * -SEQEN
- * And following pins must be pulled low:
- * -WR/BURST
- * -DB4/SER1W
- */
-static const struct iio_chan_spec ad7616_channels[] = {
- IIO_CHAN_SOFT_TIMESTAMP(16),
- AD7606_CHANNEL(0, 16),
- AD7606_CHANNEL(1, 16),
- AD7606_CHANNEL(2, 16),
- AD7606_CHANNEL(3, 16),
- AD7606_CHANNEL(4, 16),
- AD7606_CHANNEL(5, 16),
- AD7606_CHANNEL(6, 16),
- AD7606_CHANNEL(7, 16),
- AD7606_CHANNEL(8, 16),
- AD7606_CHANNEL(9, 16),
- AD7606_CHANNEL(10, 16),
- AD7606_CHANNEL(11, 16),
- AD7606_CHANNEL(12, 16),
- AD7606_CHANNEL(13, 16),
- AD7606_CHANNEL(14, 16),
- AD7606_CHANNEL(15, 16),
-};
-
static int ad7606c_18bit_chan_scale_setup(struct iio_dev *indio_dev,
- struct iio_chan_spec *chan, int ch);
+ struct iio_chan_spec *chan);
static int ad7606c_16bit_chan_scale_setup(struct iio_dev *indio_dev,
- struct iio_chan_spec *chan, int ch);
+ struct iio_chan_spec *chan);
static int ad7606_16bit_chan_scale_setup(struct iio_dev *indio_dev,
- struct iio_chan_spec *chan, int ch);
+ struct iio_chan_spec *chan);
static int ad7607_chan_scale_setup(struct iio_dev *indio_dev,
- struct iio_chan_spec *chan, int ch);
+ struct iio_chan_spec *chan);
static int ad7608_chan_scale_setup(struct iio_dev *indio_dev,
- struct iio_chan_spec *chan, int ch);
+ struct iio_chan_spec *chan);
static int ad7609_chan_scale_setup(struct iio_dev *indio_dev,
- struct iio_chan_spec *chan, int ch);
+ struct iio_chan_spec *chan);
static int ad7616_sw_mode_setup(struct iio_dev *indio_dev);
static int ad7606b_sw_mode_setup(struct iio_dev *indio_dev);
const struct ad7606_chip_info ad7605_4_info = {
- .channels = ad7605_channels,
+ .max_samplerate = 300 * KILO,
.name = "ad7605-4",
+ .bits = 16,
.num_adc_channels = 4,
- .num_channels = 5,
.scale_setup_cb = ad7606_16bit_chan_scale_setup,
};
EXPORT_SYMBOL_NS_GPL(ad7605_4_info, "IIO_AD7606");
const struct ad7606_chip_info ad7606_8_info = {
- .channels = ad7606_channels_16bit,
+ .max_samplerate = 200 * KILO,
.name = "ad7606-8",
+ .bits = 16,
.num_adc_channels = 8,
- .num_channels = 9,
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7606_16bit_chan_scale_setup,
@@ -216,108 +131,116 @@ const struct ad7606_chip_info ad7606_8_info = {
EXPORT_SYMBOL_NS_GPL(ad7606_8_info, "IIO_AD7606");
const struct ad7606_chip_info ad7606_6_info = {
- .channels = ad7606_channels_16bit,
+ .max_samplerate = 200 * KILO,
.name = "ad7606-6",
+ .bits = 16,
.num_adc_channels = 6,
- .num_channels = 7,
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7606_16bit_chan_scale_setup,
+ .offload_storagebits = 32,
};
EXPORT_SYMBOL_NS_GPL(ad7606_6_info, "IIO_AD7606");
const struct ad7606_chip_info ad7606_4_info = {
- .channels = ad7606_channels_16bit,
+ .max_samplerate = 200 * KILO,
.name = "ad7606-4",
+ .bits = 16,
.num_adc_channels = 4,
- .num_channels = 5,
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7606_16bit_chan_scale_setup,
+ .offload_storagebits = 32,
};
EXPORT_SYMBOL_NS_GPL(ad7606_4_info, "IIO_AD7606");
const struct ad7606_chip_info ad7606b_info = {
- .channels = ad7606_channels_16bit,
.max_samplerate = 800 * KILO,
.name = "ad7606b",
+ .bits = 16,
.num_adc_channels = 8,
- .num_channels = 9,
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7606_16bit_chan_scale_setup,
.sw_setup_cb = ad7606b_sw_mode_setup,
+ .offload_storagebits = 32,
};
EXPORT_SYMBOL_NS_GPL(ad7606b_info, "IIO_AD7606");
const struct ad7606_chip_info ad7606c_16_info = {
- .channels = ad7606_channels_16bit,
+ .max_samplerate = 1 * MEGA,
.name = "ad7606c16",
+ .bits = 16,
.num_adc_channels = 8,
- .num_channels = 9,
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7606c_16bit_chan_scale_setup,
.sw_setup_cb = ad7606b_sw_mode_setup,
+ .offload_storagebits = 32,
};
EXPORT_SYMBOL_NS_GPL(ad7606c_16_info, "IIO_AD7606");
const struct ad7606_chip_info ad7607_info = {
- .channels = ad7607_channels,
+ .max_samplerate = 200 * KILO,
.name = "ad7607",
+ .bits = 14,
.num_adc_channels = 8,
- .num_channels = 9,
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7607_chan_scale_setup,
+ .offload_storagebits = 32,
};
EXPORT_SYMBOL_NS_GPL(ad7607_info, "IIO_AD7606");
const struct ad7606_chip_info ad7608_info = {
- .channels = ad7608_channels,
+ .max_samplerate = 200 * KILO,
.name = "ad7608",
+ .bits = 18,
.num_adc_channels = 8,
- .num_channels = 9,
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7608_chan_scale_setup,
+ .offload_storagebits = 32,
};
EXPORT_SYMBOL_NS_GPL(ad7608_info, "IIO_AD7606");
const struct ad7606_chip_info ad7609_info = {
- .channels = ad7608_channels,
+ .max_samplerate = 200 * KILO,
.name = "ad7609",
+ .bits = 18,
.num_adc_channels = 8,
- .num_channels = 9,
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7609_chan_scale_setup,
+ .offload_storagebits = 32,
};
EXPORT_SYMBOL_NS_GPL(ad7609_info, "IIO_AD7606");
const struct ad7606_chip_info ad7606c_18_info = {
- .channels = ad7606_channels_18bit,
+ .max_samplerate = 1 * MEGA,
.name = "ad7606c18",
+ .bits = 18,
.num_adc_channels = 8,
- .num_channels = 9,
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7606c_18bit_chan_scale_setup,
.sw_setup_cb = ad7606b_sw_mode_setup,
+ .offload_storagebits = 32,
};
EXPORT_SYMBOL_NS_GPL(ad7606c_18_info, "IIO_AD7606");
const struct ad7606_chip_info ad7616_info = {
- .channels = ad7616_channels,
+ .max_samplerate = 1 * MEGA,
.init_delay_ms = 15,
.name = "ad7616",
+ .bits = 16,
.num_adc_channels = 16,
- .num_channels = 17,
.oversampling_avail = ad7616_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7616_oversampling_avail),
.os_req_reset = true,
.scale_setup_cb = ad7606_16bit_chan_scale_setup,
.sw_setup_cb = ad7616_sw_mode_setup,
+ .offload_storagebits = 16,
};
EXPORT_SYMBOL_NS_GPL(ad7616_info, "IIO_AD7606");
@@ -335,10 +258,10 @@ int ad7606_reset(struct ad7606_state *st)
EXPORT_SYMBOL_NS_GPL(ad7606_reset, "IIO_AD7606");
static int ad7606_16bit_chan_scale_setup(struct iio_dev *indio_dev,
- struct iio_chan_spec *chan, int ch)
+ struct iio_chan_spec *chan)
{
struct ad7606_state *st = iio_priv(indio_dev);
- struct ad7606_chan_scale *cs = &st->chan_scales[ch];
+ struct ad7606_chan_scale *cs = &st->chan_scales[chan->scan_index];
if (!st->sw_mode_en) {
/* tied to logic low, analog input range is +/- 5V */
@@ -362,7 +285,6 @@ static int ad7606_get_chan_config(struct iio_dev *indio_dev, int ch,
{
struct ad7606_state *st = iio_priv(indio_dev);
unsigned int num_channels = st->chip_info->num_adc_channels;
- unsigned int offset = indio_dev->num_channels - st->chip_info->num_adc_channels;
struct device *dev = st->dev;
int ret;
@@ -378,7 +300,7 @@ static int ad7606_get_chan_config(struct iio_dev *indio_dev, int ch,
continue;
/* channel number (here) is from 1 to num_channels */
- if (reg < offset || reg > num_channels) {
+ if (reg < 1 || reg > num_channels) {
dev_warn(dev,
"Invalid channel number (ignoring): %d\n", reg);
continue;
@@ -414,10 +336,10 @@ static int ad7606_get_chan_config(struct iio_dev *indio_dev, int ch,
}
static int ad7606c_18bit_chan_scale_setup(struct iio_dev *indio_dev,
- struct iio_chan_spec *chan, int ch)
+ struct iio_chan_spec *chan)
{
struct ad7606_state *st = iio_priv(indio_dev);
- struct ad7606_chan_scale *cs = &st->chan_scales[ch];
+ struct ad7606_chan_scale *cs = &st->chan_scales[chan->scan_index];
bool bipolar, differential;
int ret;
@@ -428,7 +350,8 @@ static int ad7606c_18bit_chan_scale_setup(struct iio_dev *indio_dev,
return 0;
}
- ret = ad7606_get_chan_config(indio_dev, ch, &bipolar, &differential);
+ ret = ad7606_get_chan_config(indio_dev, chan->scan_index, &bipolar,
+ &differential);
if (ret)
return ret;
@@ -471,10 +394,10 @@ static int ad7606c_18bit_chan_scale_setup(struct iio_dev *indio_dev,
}
static int ad7606c_16bit_chan_scale_setup(struct iio_dev *indio_dev,
- struct iio_chan_spec *chan, int ch)
+ struct iio_chan_spec *chan)
{
struct ad7606_state *st = iio_priv(indio_dev);
- struct ad7606_chan_scale *cs = &st->chan_scales[ch];
+ struct ad7606_chan_scale *cs = &st->chan_scales[chan->scan_index];
bool bipolar, differential;
int ret;
@@ -485,7 +408,8 @@ static int ad7606c_16bit_chan_scale_setup(struct iio_dev *indio_dev,
return 0;
}
- ret = ad7606_get_chan_config(indio_dev, ch, &bipolar, &differential);
+ ret = ad7606_get_chan_config(indio_dev, chan->scan_index, &bipolar,
+ &differential);
if (ret)
return ret;
@@ -529,10 +453,10 @@ static int ad7606c_16bit_chan_scale_setup(struct iio_dev *indio_dev,
}
static int ad7607_chan_scale_setup(struct iio_dev *indio_dev,
- struct iio_chan_spec *chan, int ch)
+ struct iio_chan_spec *chan)
{
struct ad7606_state *st = iio_priv(indio_dev);
- struct ad7606_chan_scale *cs = &st->chan_scales[ch];
+ struct ad7606_chan_scale *cs = &st->chan_scales[chan->scan_index];
cs->range = 0;
cs->scale_avail = ad7607_hw_scale_avail;
@@ -541,10 +465,10 @@ static int ad7607_chan_scale_setup(struct iio_dev *indio_dev,
}
static int ad7608_chan_scale_setup(struct iio_dev *indio_dev,
- struct iio_chan_spec *chan, int ch)
+ struct iio_chan_spec *chan)
{
struct ad7606_state *st = iio_priv(indio_dev);
- struct ad7606_chan_scale *cs = &st->chan_scales[ch];
+ struct ad7606_chan_scale *cs = &st->chan_scales[chan->scan_index];
cs->range = 0;
cs->scale_avail = ad7606_18bit_hw_scale_avail;
@@ -553,10 +477,10 @@ static int ad7608_chan_scale_setup(struct iio_dev *indio_dev,
}
static int ad7609_chan_scale_setup(struct iio_dev *indio_dev,
- struct iio_chan_spec *chan, int ch)
+ struct iio_chan_spec *chan)
{
struct ad7606_state *st = iio_priv(indio_dev);
- struct ad7606_chan_scale *cs = &st->chan_scales[ch];
+ struct ad7606_chan_scale *cs = &st->chan_scales[chan->scan_index];
cs->range = 0;
cs->scale_avail = ad7609_hw_scale_avail;
@@ -599,7 +523,7 @@ static int ad7606_pwm_set_high(struct ad7606_state *st)
return ret;
}
-static int ad7606_pwm_set_low(struct ad7606_state *st)
+int ad7606_pwm_set_low(struct ad7606_state *st)
{
struct pwm_state cnvst_pwm_state;
int ret;
@@ -612,8 +536,9 @@ static int ad7606_pwm_set_low(struct ad7606_state *st)
return ret;
}
+EXPORT_SYMBOL_NS_GPL(ad7606_pwm_set_low, "IIO_AD7606");
-static int ad7606_pwm_set_swing(struct ad7606_state *st)
+int ad7606_pwm_set_swing(struct ad7606_state *st)
{
struct pwm_state cnvst_pwm_state;
@@ -623,6 +548,7 @@ static int ad7606_pwm_set_swing(struct ad7606_state *st)
return pwm_apply_might_sleep(st->cnvst_pwm, &cnvst_pwm_state);
}
+EXPORT_SYMBOL_NS_GPL(ad7606_pwm_set_swing, "IIO_AD7606");
static bool ad7606_pwm_is_swinging(struct ad7606_state *st)
{
@@ -679,8 +605,8 @@ static irqreturn_t ad7606_trigger_handler(int irq, void *p)
if (ret)
goto error_ret;
- iio_push_to_buffers_with_timestamp(indio_dev, &st->data,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &st->data, sizeof(st->data),
+ iio_get_time_ns(indio_dev));
error_ret:
iio_trigger_notify_done(indio_dev->trig);
/* The rising edge of the CONVST signal starts a new conversion. */
@@ -693,8 +619,8 @@ static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned int ch,
int *val)
{
struct ad7606_state *st = iio_priv(indio_dev);
- unsigned int realbits = st->chip_info->channels[1].scan_type.realbits;
const struct iio_chan_spec *chan;
+ unsigned int realbits;
int ret;
if (st->gpio_convst) {
@@ -711,7 +637,7 @@ static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned int ch,
* will not happen because IIO_CHAN_INFO_RAW is not supported for the backend.
* TODO: Add support for reading a single value when the backend is used.
*/
- if (!st->back) {
+ if (st->trig) {
ret = wait_for_completion_timeout(&st->completion,
msecs_to_jiffies(1000));
if (!ret) {
@@ -719,25 +645,30 @@ static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned int ch,
goto error_ret;
}
} else {
- fsleep(1);
+ /*
+ * If the BUSY interrupt is not available, wait enough time for
+ * the longest possible conversion (max for the whole family is
+ * around 350us).
+ */
+ fsleep(400);
}
ret = ad7606_read_samples(st);
if (ret)
goto error_ret;
- chan = &indio_dev->channels[ch + 1];
- if (chan->scan_type.sign == 'u') {
- if (realbits > 16)
- *val = st->data.buf32[ch];
- else
- *val = st->data.buf16[ch];
- } else {
- if (realbits > 16)
- *val = sign_extend32(st->data.buf32[ch], realbits - 1);
- else
- *val = sign_extend32(st->data.buf16[ch], realbits - 1);
- }
+ chan = &indio_dev->channels[ch];
+ realbits = chan->scan_type.realbits;
+
+ if (realbits > 16)
+ *val = st->data.buf32[ch];
+ else
+ *val = st->data.buf16[ch];
+
+ *val &= GENMASK(realbits - 1, 0);
+
+ if (chan->scan_type.sign == 's')
+ *val = sign_extend32(*val, realbits - 1);
error_ret:
if (!st->gpio_convst) {
@@ -765,14 +696,14 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
if (!iio_device_claim_direct(indio_dev))
return -EBUSY;
- ret = ad7606_scan_direct(indio_dev, chan->address, val);
+ ret = ad7606_scan_direct(indio_dev, chan->scan_index, val);
iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
if (st->sw_mode_en)
- ch = chan->address;
+ ch = chan->scan_index;
cs = &st->chan_scales[ch];
*val = cs->scale_avail[cs->range][0];
*val2 = cs->scale_avail[cs->range][1];
@@ -781,10 +712,6 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
*val = st->oversampling;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
- /*
- * TODO: return the real frequency intead of the requested one once
- * pwm_get_state_hw comes upstream.
- */
pwm_get_state(st->cnvst_pwm, &cnvst_pwm_state);
*val = DIV_ROUND_CLOSEST_ULL(NSEC_PER_SEC, cnvst_pwm_state.period);
return IIO_VAL_INT;
@@ -854,7 +781,7 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SCALE:
if (st->sw_mode_en)
- ch = chan->address;
+ ch = chan->scan_index;
cs = &st->chan_scales[ch];
for (i = 0; i < cs->num_scales; i++) {
scale_avail_uv[i] = cs->scale_avail[i][0] * MICRO +
@@ -1061,7 +988,7 @@ static int ad7606_read_avail(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
if (st->sw_mode_en)
- ch = chan->address;
+ ch = chan->scan_index;
cs = &st->chan_scales[ch];
*vals = (int *)cs->scale_avail;
@@ -1276,29 +1203,91 @@ static int ad7606b_sw_mode_setup(struct iio_dev *indio_dev)
return st->bops->sw_mode_config(indio_dev);
}
-static int ad7606_chan_scales_setup(struct iio_dev *indio_dev)
+static int ad7606_probe_channels(struct iio_dev *indio_dev)
{
struct ad7606_state *st = iio_priv(indio_dev);
- unsigned int offset = indio_dev->num_channels - st->chip_info->num_adc_channels;
- struct iio_chan_spec *chans;
- size_t size;
- int ch, ret;
-
- /* Clone IIO channels, since some may be differential */
- size = indio_dev->num_channels * sizeof(*indio_dev->channels);
- chans = devm_kzalloc(st->dev, size, GFP_KERNEL);
- if (!chans)
+ struct device *dev = indio_dev->dev.parent;
+ struct iio_chan_spec *channels;
+ bool slow_bus;
+ int ret, i;
+
+ slow_bus = !(st->bops->iio_backend_config || st->offload_en);
+ indio_dev->num_channels = st->chip_info->num_adc_channels;
+
+ /* Slow buses also get 1 more channel for soft timestamp */
+ if (slow_bus)
+ indio_dev->num_channels++;
+
+ channels = devm_kcalloc(dev, indio_dev->num_channels, sizeof(*channels),
+ GFP_KERNEL);
+ if (!channels)
return -ENOMEM;
- memcpy(chans, indio_dev->channels, size);
- indio_dev->channels = chans;
+ for (i = 0; i < st->chip_info->num_adc_channels; i++) {
+ struct iio_chan_spec *chan = &channels[i];
- for (ch = 0; ch < st->chip_info->num_adc_channels; ch++) {
- ret = st->chip_info->scale_setup_cb(indio_dev, &chans[ch + offset], ch);
+ chan->type = IIO_VOLTAGE;
+ chan->indexed = 1;
+ chan->channel = i;
+ chan->scan_index = i;
+ chan->scan_type.sign = 's';
+ chan->scan_type.realbits = st->chip_info->bits;
+ /*
+ * If in SPI offload mode, storagebits are set based
+ * on the spi-engine hw implementation.
+ */
+ chan->scan_type.storagebits = st->offload_en ?
+ st->chip_info->offload_storagebits :
+ (st->chip_info->bits > 16 ? 32 : 16);
+
+ chan->scan_type.endianness = IIO_CPU;
+
+ if (indio_dev->modes & INDIO_DIRECT_MODE)
+ chan->info_mask_separate |= BIT(IIO_CHAN_INFO_RAW);
+
+ if (st->sw_mode_en) {
+ chan->info_mask_separate |= BIT(IIO_CHAN_INFO_SCALE);
+ chan->info_mask_separate_available |=
+ BIT(IIO_CHAN_INFO_SCALE);
+
+ /*
+ * All chips with software mode support oversampling,
+ * so we skip the oversampling_available check. And the
+ * shared_by_type instead of shared_by_all on slow
+ * buses is for backward compatibility.
+ */
+ if (slow_bus)
+ chan->info_mask_shared_by_type |=
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+ else
+ chan->info_mask_shared_by_all |=
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+
+ chan->info_mask_shared_by_all_available |=
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+ } else {
+ chan->info_mask_shared_by_type |=
+ BIT(IIO_CHAN_INFO_SCALE);
+
+ if (st->chip_info->oversampling_avail)
+ chan->info_mask_shared_by_all |=
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+ }
+
+ if (!slow_bus)
+ chan->info_mask_shared_by_all |=
+ BIT(IIO_CHAN_INFO_SAMP_FREQ);
+
+ ret = st->chip_info->scale_setup_cb(indio_dev, chan);
if (ret)
return ret;
}
+ if (slow_bus)
+ channels[i] = (struct iio_chan_spec)IIO_CHAN_SOFT_TIMESTAMP(i);
+
+ indio_dev->channels = channels;
+
return 0;
}
@@ -1322,11 +1311,19 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
st = iio_priv(indio_dev);
dev_set_drvdata(dev, indio_dev);
+ ret = devm_mutex_init(dev, &st->lock);
+ if (ret)
+ return ret;
+
st->dev = dev;
- mutex_init(&st->lock);
st->bops = bops;
st->base_address = base_address;
st->oversampling = 1;
+ st->sw_mode_en = device_property_read_bool(dev, "adi,sw-mode");
+
+ if (st->sw_mode_en && !chip_info->sw_setup_cb)
+ return dev_err_probe(dev, -EINVAL,
+ "Software mode is not supported for this chip\n");
ret = devm_regulator_get_enable(dev, "avcc");
if (ret)
@@ -1355,10 +1352,21 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
else
indio_dev->info = &ad7606_info_no_os_or_range;
}
- indio_dev->modes = INDIO_DIRECT_MODE;
+
+ /* AXI ADC backend doesn't support single read. */
+ indio_dev->modes = st->bops->iio_backend_config ? 0 : INDIO_DIRECT_MODE;
indio_dev->name = chip_info->name;
- indio_dev->channels = st->chip_info->channels;
- indio_dev->num_channels = st->chip_info->num_channels;
+
+ /* Using spi-engine with offload support ? */
+ if (st->bops->offload_config) {
+ ret = st->bops->offload_config(dev, indio_dev);
+ if (ret)
+ return ret;
+ }
+
+ ret = ad7606_probe_channels(indio_dev);
+ if (ret)
+ return ret;
ret = ad7606_reset(st);
if (ret)
@@ -1371,7 +1379,7 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
}
/* If convst pin is not defined, setup PWM. */
- if (!st->gpio_convst) {
+ if (!st->gpio_convst || st->offload_en) {
st->cnvst_pwm = devm_pwm_get(dev, NULL);
if (IS_ERR(st->cnvst_pwm))
return PTR_ERR(st->cnvst_pwm);
@@ -1401,8 +1409,7 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
* If there is a backend, the PWM should not overpass the maximum sampling
* frequency the chip supports.
*/
- ret = ad7606_set_sampling_freq(st,
- chip_info->max_samplerate ? : 2 * KILO);
+ ret = ad7606_set_sampling_freq(st, chip_info->max_samplerate);
if (ret)
return ret;
@@ -1411,8 +1418,7 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
return ret;
indio_dev->setup_ops = &ad7606_backend_buffer_ops;
- } else {
-
+ } else if (!st->offload_en) {
/* Reserve the PWM use only for backend (force gpio_convst definition) */
if (!st->gpio_convst)
return dev_err_probe(dev, -EINVAL,
@@ -1450,17 +1456,12 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
st->write_scale = ad7606_write_scale_hw;
st->write_os = ad7606_write_os_hw;
- st->sw_mode_en = st->chip_info->sw_setup_cb &&
- device_property_present(st->dev, "adi,sw-mode");
- if (st->sw_mode_en) {
+ /* Offload needs 1 DOUT line, applying this setting in sw_setup_cb. */
+ if (st->sw_mode_en || st->offload_en) {
indio_dev->info = &ad7606_info_sw_mode;
st->chip_info->sw_setup_cb(indio_dev);
}
- ret = ad7606_chan_scales_setup(indio_dev);
- if (ret)
- return ret;
-
return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_NS_GPL(ad7606_probe, "IIO_AD7606");
diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
index 71a30525eaab..441e62c521bc 100644
--- a/drivers/iio/adc/ad7606.h
+++ b/drivers/iio/adc/ad7606.h
@@ -40,119 +40,48 @@
#define AD7606_RANGE_CH_ADDR(ch) (0x03 + ((ch) >> 1))
#define AD7606_OS_MODE 0x08
-#define AD760X_CHANNEL(num, mask_sep, mask_type, mask_all, \
- mask_sep_avail, mask_all_avail, bits) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .channel = num, \
- .address = num, \
- .info_mask_separate = mask_sep, \
- .info_mask_separate_available = \
- mask_sep_avail, \
- .info_mask_shared_by_type = mask_type, \
- .info_mask_shared_by_all = mask_all, \
- .info_mask_shared_by_all_available = \
- mask_all_avail, \
- .scan_index = num, \
- .scan_type = { \
- .sign = 's', \
- .realbits = (bits), \
- .storagebits = (bits) > 16 ? 32 : 16, \
- .endianness = IIO_CPU, \
- }, \
-}
-
-#define AD7606_SW_CHANNEL(num, bits) \
- AD760X_CHANNEL(num, \
- /* mask separate */ \
- BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_SCALE), \
- /* mask type */ \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
- /* mask all */ \
- 0, \
- /* mask separate available */ \
- BIT(IIO_CHAN_INFO_SCALE), \
- /* mask all available */ \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
- bits)
-
-#define AD7605_CHANNEL(num) \
- AD760X_CHANNEL(num, BIT(IIO_CHAN_INFO_RAW), \
- BIT(IIO_CHAN_INFO_SCALE), 0, 0, 0, 16)
-
-#define AD7606_CHANNEL(num, bits) \
- AD760X_CHANNEL(num, BIT(IIO_CHAN_INFO_RAW), \
- BIT(IIO_CHAN_INFO_SCALE), \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
- 0, 0, bits)
-
-#define AD7616_CHANNEL(num) AD7606_SW_CHANNEL(num, 16)
-
-#define AD7606_BI_CHANNEL(num) \
- AD760X_CHANNEL(num, 0, \
- BIT(IIO_CHAN_INFO_SCALE), \
- BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
- 0, 0, 16)
-
-#define AD7606_BI_SW_CHANNEL(num) \
- AD760X_CHANNEL(num, \
- /* mask separate */ \
- BIT(IIO_CHAN_INFO_SCALE), \
- /* mask type */ \
- 0, \
- /* mask all */ \
- BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
- /* mask separate available */ \
- BIT(IIO_CHAN_INFO_SCALE), \
- /* mask all available */ \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
- 16)
-
struct ad7606_state;
typedef int (*ad7606_scale_setup_cb_t)(struct iio_dev *indio_dev,
- struct iio_chan_spec *chan, int ch);
+ struct iio_chan_spec *chan);
typedef int (*ad7606_sw_setup_cb_t)(struct iio_dev *indio_dev);
/**
* struct ad7606_chip_info - chip specific information
- * @channels: channel specification
- * @max_samplerate: maximum supported samplerate
- * @name device name
- * @num_channels: number of channels
- * @num_adc_channels the number of channels the ADC actually inputs.
+ * @max_samplerate: maximum supported sample rate
+ * @name: device name
+ * @bits: data width in bits
+ * @num_adc_channels: the number of physical voltage inputs
* @scale_setup_cb: callback to setup the scales for each channel
* @sw_setup_cb: callback to setup the software mode if available.
- * @oversampling_avail pointer to the array which stores the available
+ * @oversampling_avail: pointer to the array which stores the available
* oversampling ratios.
- * @oversampling_num number of elements stored in oversampling_avail array
- * @os_req_reset some devices require a reset to update oversampling
- * @init_delay_ms required delay in milliseconds for initialization
+ * @oversampling_num: number of elements stored in oversampling_avail array
+ * @os_req_reset: some devices require a reset to update oversampling
+ * @init_delay_ms: required delay in milliseconds for initialization
* after a restart
+ * @offload_storagebits: storage bits used by the offload hw implementation
*/
struct ad7606_chip_info {
- const struct iio_chan_spec *channels;
unsigned int max_samplerate;
const char *name;
+ unsigned int bits;
unsigned int num_adc_channels;
- unsigned int num_channels;
ad7606_scale_setup_cb_t scale_setup_cb;
ad7606_sw_setup_cb_t sw_setup_cb;
const unsigned int *oversampling_avail;
unsigned int oversampling_num;
bool os_req_reset;
unsigned long init_delay_ms;
+ u8 offload_storagebits;
};
/**
* struct ad7606_chan_scale - channel scale configuration
- * @scale_avail pointer to the array which stores the available scales
- * @num_scales number of elements stored in the scale_avail array
- * @range voltage range selection, selects which scale to apply
- * @reg_offset offset for the register value, to be applied when
+ * @scale_avail: pointer to the array which stores the available scales
+ * @num_scales: number of elements stored in the scale_avail array
+ * @range: voltage range selection, selects which scale to apply
+ * @reg_offset: offset for the register value, to be applied when
* writing the value of 'range' to the register value
*/
struct ad7606_chan_scale {
@@ -165,32 +94,35 @@ struct ad7606_chan_scale {
/**
* struct ad7606_state - driver instance specific data
- * @dev pointer to kernel device
- * @chip_info entry in the table of chips that describes this device
- * @bops bus operations (SPI or parallel)
- * @chan_scales scale configuration for channels
- * @oversampling oversampling selection
- * @cnvst_pwm pointer to the PWM device connected to the cnvst pin
- * @base_address address from where to read data in parallel operation
- * @sw_mode_en software mode enabled
- * @oversampling_avail pointer to the array which stores the available
+ * @dev: pointer to kernel device
+ * @chip_info: entry in the table of chips that describes this device
+ * @bops: bus operations (SPI or parallel)
+ * @chan_scales: scale configuration for channels
+ * @oversampling: oversampling selection
+ * @cnvst_pwm: pointer to the PWM device connected to the cnvst pin
+ * @base_address: address from where to read data in parallel operation
+ * @sw_mode_en: software mode enabled
+ * @oversampling_avail: pointer to the array which stores the available
* oversampling ratios.
- * @num_os_ratios number of elements stored in oversampling_avail array
- * @write_scale pointer to the function which writes the scale
- * @write_os pointer to the function which writes the os
- * @lock protect sensor state from concurrent accesses to GPIOs
- * @gpio_convst GPIO descriptor for conversion start signal (CONVST)
- * @gpio_reset GPIO descriptor for device hard-reset
- * @gpio_range GPIO descriptor for range selection
- * @gpio_standby GPIO descriptor for stand-by signal (STBY),
+ * @num_os_ratios: number of elements stored in oversampling_avail array
+ * @back: pointer to the iio_backend structure, if used
+ * @write_scale: pointer to the function which writes the scale
+ * @write_os: pointer to the function which writes the os
+ * @lock: protect sensor state from concurrent accesses to GPIOs
+ * @gpio_convst: GPIO descriptor for conversion start signal (CONVST)
+ * @gpio_reset: GPIO descriptor for device hard-reset
+ * @gpio_range: GPIO descriptor for range selection
+ * @gpio_standby: GPIO descriptor for stand-by signal (STBY),
* controls power-down mode of device
- * @gpio_frstdata GPIO descriptor for reading from device when data
+ * @gpio_frstdata: GPIO descriptor for reading from device when data
* is being read on the first channel
- * @gpio_os GPIO descriptors to control oversampling on the device
- * @complete completion to indicate end of conversion
- * @trig The IIO trigger associated with the device.
- * @data buffer for reading data from the device
- * @d16 be16 buffer for reading data from the device
+ * @gpio_os: GPIO descriptors to control oversampling on the device
+ * @trig: The IIO trigger associated with the device.
+ * @completion: completion to indicate end of conversion
+ * @data: buffer for reading data from the device
+ * @offload_en: SPI offload enabled
+ * @bus_data: bus-specific variables
+ * @d16: be16 buffer for reading data from the device
*/
struct ad7606_state {
struct device *dev;
@@ -217,36 +149,44 @@ struct ad7606_state {
struct iio_trigger *trig;
struct completion completion;
+ bool offload_en;
+ void *bus_data;
+
/*
* DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
- * 16 * 16-bit samples + 64-bit timestamp - for AD7616
- * 8 * 32-bit samples + 64-bit timestamp - for AD7616C-18 (and similar)
+ * 16 * 16-bit samples for AD7616
+ * 8 * 32-bit samples for AD7616C-18 (and similar)
*/
- union {
- u16 buf16[20];
- u32 buf32[10];
+ struct {
+ union {
+ u16 buf16[16];
+ u32 buf32[8];
+ };
+ aligned_s64 timestamp;
} data __aligned(IIO_DMA_MINALIGN);
__be16 d16[2];
};
/**
* struct ad7606_bus_ops - driver bus operations
- * @iio_backend_config function pointer for configuring the iio_backend for
+ * @iio_backend_config: function pointer for configuring the iio_backend for
* the compatibles that use it
- * @read_block function pointer for reading blocks of data
+ * @read_block: function pointer for reading blocks of data
* @sw_mode_config: pointer to a function which configured the device
* for software mode
- * @reg_read function pointer for reading spi register
- * @reg_write function pointer for writing spi register
- * @write_mask function pointer for write spi register with mask
- * @update_scan_mode function pointer for handling the calls to iio_info's update_scan
- * mode when enabling/disabling channels.
- * @rd_wr_cmd pointer to the function which calculates the spi address
+ * @offload_config: function pointer for configuring offload support,
+ * where any
+ * @reg_read: function pointer for reading spi register
+ * @reg_write: function pointer for writing spi register
+ * @update_scan_mode: function pointer for handling the calls to iio_info's
+ * update_scan mode when enabling/disabling channels.
+ * @rd_wr_cmd: pointer to the function which calculates the spi address
*/
struct ad7606_bus_ops {
/* more methods added in future? */
int (*iio_backend_config)(struct device *dev, struct iio_dev *indio_dev);
+ int (*offload_config)(struct device *dev, struct iio_dev *indio_dev);
int (*read_block)(struct device *dev, int num, void *data);
int (*sw_mode_config)(struct iio_dev *indio_dev);
int (*reg_read)(struct ad7606_state *st, unsigned int addr);
@@ -254,13 +194,13 @@ struct ad7606_bus_ops {
unsigned int addr,
unsigned int val);
int (*update_scan_mode)(struct iio_dev *indio_dev, const unsigned long *scan_mask);
- u16 (*rd_wr_cmd)(int addr, char isWriteOp);
+ u16 (*rd_wr_cmd)(int addr, char is_write_op);
};
/**
- * struct ad7606_bus_info - agregate ad7606_chip_info and ad7606_bus_ops
- * @chip_info entry in the table of chips that describes this device
- * @bops bus operations (SPI or parallel)
+ * struct ad7606_bus_info - aggregate ad7606_chip_info and ad7606_bus_ops
+ * @chip_info: entry in the table of chips that describes this device
+ * @bops: bus operations (SPI or parallel)
*/
struct ad7606_bus_info {
const struct ad7606_chip_info *chip_info;
@@ -272,6 +212,8 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
const struct ad7606_bus_ops *bops);
int ad7606_reset(struct ad7606_state *st);
+int ad7606_pwm_set_swing(struct ad7606_state *st);
+int ad7606_pwm_set_low(struct ad7606_state *st);
extern const struct ad7606_chip_info ad7605_4_info;
extern const struct ad7606_chip_info ad7606_8_info;
diff --git a/drivers/iio/adc/ad7606_par.c b/drivers/iio/adc/ad7606_par.c
index 335fb481bfde..634852c4bbd2 100644
--- a/drivers/iio/adc/ad7606_par.c
+++ b/drivers/iio/adc/ad7606_par.c
@@ -21,28 +21,6 @@
#include "ad7606.h"
#include "ad7606_bus_iface.h"
-static const struct iio_chan_spec ad7606b_bi_channels[] = {
- AD7606_BI_CHANNEL(0),
- AD7606_BI_CHANNEL(1),
- AD7606_BI_CHANNEL(2),
- AD7606_BI_CHANNEL(3),
- AD7606_BI_CHANNEL(4),
- AD7606_BI_CHANNEL(5),
- AD7606_BI_CHANNEL(6),
- AD7606_BI_CHANNEL(7),
-};
-
-static const struct iio_chan_spec ad7606b_bi_sw_channels[] = {
- AD7606_BI_SW_CHANNEL(0),
- AD7606_BI_SW_CHANNEL(1),
- AD7606_BI_SW_CHANNEL(2),
- AD7606_BI_SW_CHANNEL(3),
- AD7606_BI_SW_CHANNEL(4),
- AD7606_BI_SW_CHANNEL(5),
- AD7606_BI_SW_CHANNEL(6),
- AD7606_BI_SW_CHANNEL(7),
-};
-
static int ad7606_par_bus_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
@@ -94,9 +72,6 @@ static int ad7606_par_bus_setup_iio_backend(struct device *dev,
return ret;
}
- indio_dev->channels = ad7606b_bi_channels;
- indio_dev->num_channels = 8;
-
return 0;
}
@@ -120,19 +95,11 @@ static int ad7606_par_bus_reg_write(struct ad7606_state *st, unsigned int addr,
return pdata->bus_reg_write(st->back, addr, val);
}
-static int ad7606_par_bus_sw_mode_config(struct iio_dev *indio_dev)
-{
- indio_dev->channels = ad7606b_bi_sw_channels;
-
- return 0;
-}
-
static const struct ad7606_bus_ops ad7606_bi_bops = {
.iio_backend_config = ad7606_par_bus_setup_iio_backend,
.update_scan_mode = ad7606_par_bus_update_scan_mode,
.reg_read = ad7606_par_bus_reg_read,
.reg_write = ad7606_par_bus_reg_write,
- .sw_mode_config = ad7606_par_bus_sw_mode_config,
};
static int ad7606_par16_read_block(struct device *dev,
@@ -255,6 +222,8 @@ static const struct platform_device_id ad7606_driver_ids[] = {
{ .name = "ad7606-6", .driver_data = (kernel_ulong_t)&ad7606_6_info, },
{ .name = "ad7606-8", .driver_data = (kernel_ulong_t)&ad7606_8_info, },
{ .name = "ad7606b", .driver_data = (kernel_ulong_t)&ad7606b_info, },
+ { .name = "ad7606c-16", .driver_data = (kernel_ulong_t)&ad7606c_16_info },
+ { .name = "ad7606c-18", .driver_data = (kernel_ulong_t)&ad7606c_18_info },
{ .name = "ad7607", .driver_data = (kernel_ulong_t)&ad7607_info, },
{ .name = "ad7608", .driver_data = (kernel_ulong_t)&ad7608_info, },
{ .name = "ad7609", .driver_data = (kernel_ulong_t)&ad7609_info, },
@@ -268,6 +237,8 @@ static const struct of_device_id ad7606_of_match[] = {
{ .compatible = "adi,ad7606-6", .data = &ad7606_6_info },
{ .compatible = "adi,ad7606-8", .data = &ad7606_8_info },
{ .compatible = "adi,ad7606b", .data = &ad7606b_info },
+ { .compatible = "adi,ad7606c-16", .data = &ad7606c_16_info },
+ { .compatible = "adi,ad7606c-18", .data = &ad7606c_18_info },
{ .compatible = "adi,ad7607", .data = &ad7607_info },
{ .compatible = "adi,ad7608", .data = &ad7608_info },
{ .compatible = "adi,ad7609", .data = &ad7609_info },
diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c
index 179115e90988..f28e4ca37707 100644
--- a/drivers/iio/adc/ad7606_spi.c
+++ b/drivers/iio/adc/ad7606_spi.c
@@ -5,70 +5,43 @@
* Copyright 2011 Analog Devices Inc.
*/
+#include <linux/bitmap.h>
#include <linux/err.h>
+#include <linux/math.h>
#include <linux/module.h>
+#include <linux/pwm.h>
+#include <linux/spi/offload/consumer.h>
+#include <linux/spi/offload/provider.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
+#include <linux/units.h>
+#include <linux/iio/buffer-dmaengine.h>
#include <linux/iio/iio.h>
-#include "ad7606.h"
-#define MAX_SPI_FREQ_HZ 23500000 /* VDRIVE above 4.75 V */
+#include <dt-bindings/iio/adc/adi,ad7606.h>
-static const struct iio_chan_spec ad7616_sw_channels[] = {
- IIO_CHAN_SOFT_TIMESTAMP(16),
- AD7616_CHANNEL(0),
- AD7616_CHANNEL(1),
- AD7616_CHANNEL(2),
- AD7616_CHANNEL(3),
- AD7616_CHANNEL(4),
- AD7616_CHANNEL(5),
- AD7616_CHANNEL(6),
- AD7616_CHANNEL(7),
- AD7616_CHANNEL(8),
- AD7616_CHANNEL(9),
- AD7616_CHANNEL(10),
- AD7616_CHANNEL(11),
- AD7616_CHANNEL(12),
- AD7616_CHANNEL(13),
- AD7616_CHANNEL(14),
- AD7616_CHANNEL(15),
-};
+#include "ad7606.h"
-static const struct iio_chan_spec ad7606b_sw_channels[] = {
- IIO_CHAN_SOFT_TIMESTAMP(8),
- AD7606_SW_CHANNEL(0, 16),
- AD7606_SW_CHANNEL(1, 16),
- AD7606_SW_CHANNEL(2, 16),
- AD7606_SW_CHANNEL(3, 16),
- AD7606_SW_CHANNEL(4, 16),
- AD7606_SW_CHANNEL(5, 16),
- AD7606_SW_CHANNEL(6, 16),
- AD7606_SW_CHANNEL(7, 16),
-};
+#define MAX_SPI_FREQ_HZ 23500000 /* VDRIVE above 4.75 V */
-static const struct iio_chan_spec ad7606c_18_sw_channels[] = {
- IIO_CHAN_SOFT_TIMESTAMP(8),
- AD7606_SW_CHANNEL(0, 18),
- AD7606_SW_CHANNEL(1, 18),
- AD7606_SW_CHANNEL(2, 18),
- AD7606_SW_CHANNEL(3, 18),
- AD7606_SW_CHANNEL(4, 18),
- AD7606_SW_CHANNEL(5, 18),
- AD7606_SW_CHANNEL(6, 18),
- AD7606_SW_CHANNEL(7, 18),
+struct spi_bus_data {
+ struct spi_offload *offload;
+ struct spi_offload_trigger *offload_trigger;
+ struct spi_transfer offload_xfer;
+ struct spi_message offload_msg;
};
-static u16 ad7616_spi_rd_wr_cmd(int addr, char isWriteOp)
+static u16 ad7616_spi_rd_wr_cmd(int addr, char is_write_op)
{
/*
* The address of register consist of one w/r bit
* 6 bits of address followed by one reserved bit.
*/
- return ((addr & 0x7F) << 1) | ((isWriteOp & 0x1) << 7);
+ return ((addr & 0x7F) << 1) | ((is_write_op & 0x1) << 7);
}
-static u16 ad7606B_spi_rd_wr_cmd(int addr, char is_write_op)
+static u16 ad7606b_spi_rd_wr_cmd(int addr, char is_write_op)
{
/*
* The address of register consists of one bit which
@@ -155,87 +128,275 @@ static int ad7606_spi_reg_write(struct ad7606_state *st,
struct spi_device *spi = to_spi_device(st->dev);
st->d16[0] = cpu_to_be16((st->bops->rd_wr_cmd(addr, 1) << 8) |
- (val & 0x1FF));
+ (val & 0xFF));
return spi_write(spi, &st->d16[0], sizeof(st->d16[0]));
}
-static int ad7616_sw_mode_config(struct iio_dev *indio_dev)
+static int ad7606b_sw_mode_config(struct iio_dev *indio_dev)
{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ /* Configure device spi to output on a single channel */
+ return st->bops->reg_write(st, AD7606_CONFIGURATION_REGISTER,
+ AD7606_SINGLE_DOUT);
+}
+
+static const struct spi_offload_config ad7606_spi_offload_config = {
+ .capability_flags = SPI_OFFLOAD_CAP_TRIGGER |
+ SPI_OFFLOAD_CAP_RX_STREAM_DMA,
+};
+
+static int ad7606_spi_offload_buffer_postenable(struct iio_dev *indio_dev)
+{
+ const struct iio_scan_type *scan_type;
+ struct ad7606_state *st = iio_priv(indio_dev);
+ struct spi_bus_data *bus_data = st->bus_data;
+ struct spi_transfer *xfer = &bus_data->offload_xfer;
+ struct spi_device *spi = to_spi_device(st->dev);
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_DATA_READY,
+ };
+ int ret;
+
+ scan_type = &indio_dev->channels[0].scan_type;
+
+ xfer->bits_per_word = scan_type->realbits;
+ xfer->offload_flags = SPI_OFFLOAD_XFER_RX_STREAM;
/*
- * Scale can be configured individually for each channel
- * in software mode.
+ * Using SPI offload, storagebits are related to the spi-engine
+ * hw implementation, can be 16 or 32, so can't be used to compute
+ * struct spi_transfer.len. Using realbits instead.
*/
- indio_dev->channels = ad7616_sw_channels;
+ xfer->len = (scan_type->realbits > 16 ? 4 : 2) *
+ st->chip_info->num_adc_channels;
+
+ spi_message_init_with_transfers(&bus_data->offload_msg, xfer, 1);
+ bus_data->offload_msg.offload = bus_data->offload;
+
+ ret = spi_optimize_message(spi, &bus_data->offload_msg);
+ if (ret) {
+ dev_err(st->dev, "failed to prepare offload, err: %d\n", ret);
+ return ret;
+ }
+
+ ret = spi_offload_trigger_enable(bus_data->offload,
+ bus_data->offload_trigger,
+ &config);
+ if (ret)
+ goto err_unoptimize_message;
+
+ ret = ad7606_pwm_set_swing(st);
+ if (ret)
+ goto err_offload_exit_conversion_mode;
return 0;
+
+err_offload_exit_conversion_mode:
+ spi_offload_trigger_disable(bus_data->offload,
+ bus_data->offload_trigger);
+
+err_unoptimize_message:
+ spi_unoptimize_message(&bus_data->offload_msg);
+
+ return ret;
}
-static int ad7606B_sw_mode_config(struct iio_dev *indio_dev)
+static int ad7606_spi_offload_buffer_predisable(struct iio_dev *indio_dev)
{
struct ad7606_state *st = iio_priv(indio_dev);
+ struct spi_bus_data *bus_data = st->bus_data;
+ int ret;
- /* Configure device spi to output on a single channel */
- st->bops->reg_write(st,
- AD7606_CONFIGURATION_REGISTER,
- AD7606_SINGLE_DOUT);
+ ret = ad7606_pwm_set_low(st);
+ if (ret)
+ return ret;
+
+ spi_offload_trigger_disable(bus_data->offload,
+ bus_data->offload_trigger);
+ spi_unoptimize_message(&bus_data->offload_msg);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops ad7606_offload_buffer_setup_ops = {
+ .postenable = ad7606_spi_offload_buffer_postenable,
+ .predisable = ad7606_spi_offload_buffer_predisable,
+};
+
+static bool ad7606_spi_offload_trigger_match(
+ struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type,
+ u64 *args, u32 nargs)
+{
+ if (type != SPI_OFFLOAD_TRIGGER_DATA_READY)
+ return false;
/*
- * Scale can be configured individually for each channel
- * in software mode.
+ * Requires 1 arg:
+ * args[0] is the trigger event.
*/
- indio_dev->channels = ad7606b_sw_channels;
+ if (nargs != 1 || args[0] != AD7606_TRIGGER_EVENT_BUSY)
+ return false;
+
+ return true;
+}
+
+static int ad7606_spi_offload_trigger_request(
+ struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type,
+ u64 *args, u32 nargs)
+{
+ /* Should already be validated by match, but just in case. */
+ if (nargs != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ad7606_spi_offload_trigger_validate(
+ struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ if (config->type != SPI_OFFLOAD_TRIGGER_DATA_READY)
+ return -EINVAL;
return 0;
}
-static int ad7606c_18_sw_mode_config(struct iio_dev *indio_dev)
+static const struct spi_offload_trigger_ops ad7606_offload_trigger_ops = {
+ .match = ad7606_spi_offload_trigger_match,
+ .request = ad7606_spi_offload_trigger_request,
+ .validate = ad7606_spi_offload_trigger_validate,
+};
+
+static int ad7606_spi_offload_probe(struct device *dev,
+ struct iio_dev *indio_dev)
{
+ struct ad7606_state *st = iio_priv(indio_dev);
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_bus_data *bus_data;
+ struct dma_chan *rx_dma;
+ struct spi_offload_trigger_info trigger_info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &ad7606_offload_trigger_ops,
+ .priv = st,
+ };
int ret;
- ret = ad7606B_sw_mode_config(indio_dev);
+ bus_data = devm_kzalloc(dev, sizeof(*bus_data), GFP_KERNEL);
+ if (!bus_data)
+ return -ENOMEM;
+ st->bus_data = bus_data;
+
+ bus_data->offload = devm_spi_offload_get(dev, spi,
+ &ad7606_spi_offload_config);
+ ret = PTR_ERR_OR_ZERO(bus_data->offload);
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to get SPI offload\n");
+ /* Allow main ad7606_probe function to continue. */
+ if (ret == -ENODEV)
+ return 0;
+
+ ret = devm_spi_offload_trigger_register(dev, &trigger_info);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret,
+ "failed to register offload trigger\n");
+
+ bus_data->offload_trigger = devm_spi_offload_trigger_get(dev,
+ bus_data->offload, SPI_OFFLOAD_TRIGGER_DATA_READY);
+ if (IS_ERR(bus_data->offload_trigger))
+ return dev_err_probe(dev, PTR_ERR(bus_data->offload_trigger),
+ "failed to get offload trigger\n");
+
+ /* TODO: PWM setup should be ok, done for the backend. PWM mutex ? */
+ rx_dma = devm_spi_offload_rx_stream_request_dma_chan(dev,
+ bus_data->offload);
+ if (IS_ERR(rx_dma))
+ return dev_err_probe(dev, PTR_ERR(rx_dma),
+ "failed to get offload RX DMA\n");
+
+ ret = devm_iio_dmaengine_buffer_setup_with_handle(dev, indio_dev,
+ rx_dma, IIO_BUFFER_DIRECTION_IN);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to setup offload RX DMA\n");
+
+ /* Use offload ops. */
+ indio_dev->setup_ops = &ad7606_offload_buffer_setup_ops;
- indio_dev->channels = ad7606c_18_sw_channels;
+ st->offload_en = true;
+
+ return 0;
+}
+
+static int ad7606_spi_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ if (st->offload_en) {
+ unsigned int num_adc_ch = st->chip_info->num_adc_channels;
+
+ /*
+ * SPI offload requires that all channels are enabled since
+ * there isn't a way to selectively disable channels that get
+ * read (this is simultaneous sampling ADC) and the DMA buffer
+ * has no way of demuxing the data to filter out unwanted
+ * channels.
+ */
+ if (bitmap_weight(scan_mask, num_adc_ch) != num_adc_ch)
+ return -EINVAL;
+ }
return 0;
}
static const struct ad7606_bus_ops ad7606_spi_bops = {
+ .offload_config = ad7606_spi_offload_probe,
.read_block = ad7606_spi_read_block,
+ .update_scan_mode = ad7606_spi_update_scan_mode,
};
static const struct ad7606_bus_ops ad7607_spi_bops = {
+ .offload_config = ad7606_spi_offload_probe,
.read_block = ad7606_spi_read_block14to16,
+ .update_scan_mode = ad7606_spi_update_scan_mode,
};
static const struct ad7606_bus_ops ad7608_spi_bops = {
+ .offload_config = ad7606_spi_offload_probe,
.read_block = ad7606_spi_read_block18to32,
+ .update_scan_mode = ad7606_spi_update_scan_mode,
};
static const struct ad7606_bus_ops ad7616_spi_bops = {
+ .offload_config = ad7606_spi_offload_probe,
.read_block = ad7606_spi_read_block,
.reg_read = ad7606_spi_reg_read,
.reg_write = ad7606_spi_reg_write,
.rd_wr_cmd = ad7616_spi_rd_wr_cmd,
- .sw_mode_config = ad7616_sw_mode_config,
+ .update_scan_mode = ad7606_spi_update_scan_mode,
};
static const struct ad7606_bus_ops ad7606b_spi_bops = {
+ .offload_config = ad7606_spi_offload_probe,
.read_block = ad7606_spi_read_block,
.reg_read = ad7606_spi_reg_read,
.reg_write = ad7606_spi_reg_write,
- .rd_wr_cmd = ad7606B_spi_rd_wr_cmd,
- .sw_mode_config = ad7606B_sw_mode_config,
+ .rd_wr_cmd = ad7606b_spi_rd_wr_cmd,
+ .sw_mode_config = ad7606b_sw_mode_config,
+ .update_scan_mode = ad7606_spi_update_scan_mode,
};
static const struct ad7606_bus_ops ad7606c_18_spi_bops = {
+ .offload_config = ad7606_spi_offload_probe,
.read_block = ad7606_spi_read_block18to32,
.reg_read = ad7606_spi_reg_read,
.reg_write = ad7606_spi_reg_write,
- .rd_wr_cmd = ad7606B_spi_rd_wr_cmd,
- .sw_mode_config = ad7606c_18_sw_mode_config,
+ .rd_wr_cmd = ad7606b_spi_rd_wr_cmd,
+ .sw_mode_config = ad7606b_sw_mode_config,
+ .update_scan_mode = ad7606_spi_update_scan_mode,
};
static const struct ad7606_bus_info ad7605_4_bus_info = {
@@ -348,3 +509,4 @@ MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS("IIO_AD7606");
+MODULE_IMPORT_NS("IIO_DMAENGINE_BUFFER");
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index 5e0be36af0c5..51134023534a 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -4,14 +4,17 @@
*
* Copyright 2017 Analog Devices Inc.
*/
+#include <linux/array_size.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
-#include <linux/kernel.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/sysfs.h>
#include <linux/spi/spi.h>
@@ -53,12 +56,15 @@
#define AD7768_REG_SPI_DIAG_ENABLE 0x28
#define AD7768_REG_ADC_DIAG_ENABLE 0x29
#define AD7768_REG_DIG_DIAG_ENABLE 0x2A
-#define AD7768_REG_ADC_DATA 0x2C
+#define AD7768_REG24_ADC_DATA 0x2C
#define AD7768_REG_MASTER_STATUS 0x2D
#define AD7768_REG_SPI_DIAG_STATUS 0x2E
#define AD7768_REG_ADC_DIAG_STATUS 0x2F
#define AD7768_REG_DIG_DIAG_STATUS 0x30
#define AD7768_REG_MCLK_COUNTER 0x31
+#define AD7768_REG_COEFF_CONTROL 0x32
+#define AD7768_REG24_COEFF_DATA 0x33
+#define AD7768_REG_ACCESS_KEY 0x34
/* AD7768_REG_POWER_CLOCK */
#define AD7768_PWR_MCLK_DIV_MSK GENMASK(5, 4)
@@ -76,9 +82,6 @@
#define AD7768_CONV_MODE_MSK GENMASK(2, 0)
#define AD7768_CONV_MODE(x) FIELD_PREP(AD7768_CONV_MODE_MSK, x)
-#define AD7768_RD_FLAG_MSK(x) (BIT(6) | ((x) & 0x3F))
-#define AD7768_WR_FLAG_MSK(x) ((x) & 0x3F)
-
enum ad7768_conv_mode {
AD7768_CONTINUOUS,
AD7768_ONE_SHOT,
@@ -153,6 +156,8 @@ static const struct iio_chan_spec ad7768_channels[] = {
struct ad7768_state {
struct spi_device *spi;
+ struct regmap *regmap;
+ struct regmap *regmap24;
struct regulator *vref;
struct clk *mclk;
unsigned int mclk_freq;
@@ -160,6 +165,7 @@ struct ad7768_state {
struct completion completion;
struct iio_trigger *trig;
struct gpio_desc *gpio_sync_in;
+ struct gpio_desc *gpio_reset;
const char *labels[ARRAY_SIZE(ad7768_channels)];
/*
* DMA (thus cache coherency maintenance) may require the
@@ -175,46 +181,82 @@ struct ad7768_state {
} data __aligned(IIO_DMA_MINALIGN);
};
-static int ad7768_spi_reg_read(struct ad7768_state *st, unsigned int addr,
- unsigned int len)
-{
- unsigned int shift;
- int ret;
+static const struct regmap_range ad7768_regmap_rd_ranges[] = {
+ regmap_reg_range(AD7768_REG_CHIP_TYPE, AD7768_REG_CHIP_GRADE),
+ regmap_reg_range(AD7768_REG_SCRATCH_PAD, AD7768_REG_SCRATCH_PAD),
+ regmap_reg_range(AD7768_REG_VENDOR_L, AD7768_REG_VENDOR_H),
+ regmap_reg_range(AD7768_REG_INTERFACE_FORMAT, AD7768_REG_GAIN_LO),
+ regmap_reg_range(AD7768_REG_SPI_DIAG_ENABLE, AD7768_REG_DIG_DIAG_ENABLE),
+ regmap_reg_range(AD7768_REG_MASTER_STATUS, AD7768_REG_COEFF_CONTROL),
+ regmap_reg_range(AD7768_REG_ACCESS_KEY, AD7768_REG_ACCESS_KEY),
+};
- shift = 32 - (8 * len);
- st->data.d8[0] = AD7768_RD_FLAG_MSK(addr);
+static const struct regmap_access_table ad7768_regmap_rd_table = {
+ .yes_ranges = ad7768_regmap_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ad7768_regmap_rd_ranges),
+};
- ret = spi_write_then_read(st->spi, st->data.d8, 1,
- &st->data.d32, len);
- if (ret < 0)
- return ret;
+static const struct regmap_range ad7768_regmap_wr_ranges[] = {
+ regmap_reg_range(AD7768_REG_SCRATCH_PAD, AD7768_REG_SCRATCH_PAD),
+ regmap_reg_range(AD7768_REG_INTERFACE_FORMAT, AD7768_REG_GPIO_WRITE),
+ regmap_reg_range(AD7768_REG_OFFSET_HI, AD7768_REG_GAIN_LO),
+ regmap_reg_range(AD7768_REG_SPI_DIAG_ENABLE, AD7768_REG_DIG_DIAG_ENABLE),
+ regmap_reg_range(AD7768_REG_SPI_DIAG_STATUS, AD7768_REG_SPI_DIAG_STATUS),
+ regmap_reg_range(AD7768_REG_COEFF_CONTROL, AD7768_REG_COEFF_CONTROL),
+ regmap_reg_range(AD7768_REG_ACCESS_KEY, AD7768_REG_ACCESS_KEY),
+};
- return (be32_to_cpu(st->data.d32) >> shift);
-}
+static const struct regmap_access_table ad7768_regmap_wr_table = {
+ .yes_ranges = ad7768_regmap_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ad7768_regmap_wr_ranges),
+};
-static int ad7768_spi_reg_write(struct ad7768_state *st,
- unsigned int addr,
- unsigned int val)
-{
- st->data.d8[0] = AD7768_WR_FLAG_MSK(addr);
- st->data.d8[1] = val & 0xFF;
+static const struct regmap_config ad7768_regmap_config = {
+ .name = "ad7768-1-8",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .read_flag_mask = BIT(6),
+ .rd_table = &ad7768_regmap_rd_table,
+ .wr_table = &ad7768_regmap_wr_table,
+ .max_register = AD7768_REG_ACCESS_KEY,
+ .use_single_write = true,
+ .use_single_read = true,
+};
- return spi_write(st->spi, st->data.d8, 2);
-}
+static const struct regmap_range ad7768_regmap24_rd_ranges[] = {
+ regmap_reg_range(AD7768_REG24_ADC_DATA, AD7768_REG24_ADC_DATA),
+ regmap_reg_range(AD7768_REG24_COEFF_DATA, AD7768_REG24_COEFF_DATA),
+};
-static int ad7768_set_mode(struct ad7768_state *st,
- enum ad7768_conv_mode mode)
-{
- int regval;
+static const struct regmap_access_table ad7768_regmap24_rd_table = {
+ .yes_ranges = ad7768_regmap24_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ad7768_regmap24_rd_ranges),
+};
- regval = ad7768_spi_reg_read(st, AD7768_REG_CONVERSION, 1);
- if (regval < 0)
- return regval;
+static const struct regmap_range ad7768_regmap24_wr_ranges[] = {
+ regmap_reg_range(AD7768_REG24_COEFF_DATA, AD7768_REG24_COEFF_DATA),
+};
- regval &= ~AD7768_CONV_MODE_MSK;
- regval |= AD7768_CONV_MODE(mode);
+static const struct regmap_access_table ad7768_regmap24_wr_table = {
+ .yes_ranges = ad7768_regmap24_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ad7768_regmap24_wr_ranges),
+};
- return ad7768_spi_reg_write(st, AD7768_REG_CONVERSION, regval);
+static const struct regmap_config ad7768_regmap24_config = {
+ .name = "ad7768-1-24",
+ .reg_bits = 8,
+ .val_bits = 24,
+ .read_flag_mask = BIT(6),
+ .rd_table = &ad7768_regmap24_rd_table,
+ .wr_table = &ad7768_regmap24_wr_table,
+ .max_register = AD7768_REG24_COEFF_DATA,
+};
+
+static int ad7768_set_mode(struct ad7768_state *st,
+ enum ad7768_conv_mode mode)
+{
+ return regmap_update_bits(st->regmap, AD7768_REG_CONVERSION,
+ AD7768_CONV_MODE_MSK, AD7768_CONV_MODE(mode));
}
static int ad7768_scan_direct(struct iio_dev *indio_dev)
@@ -233,9 +275,10 @@ static int ad7768_scan_direct(struct iio_dev *indio_dev)
if (!ret)
return -ETIMEDOUT;
- readval = ad7768_spi_reg_read(st, AD7768_REG_ADC_DATA, 3);
- if (readval < 0)
- return readval;
+ ret = regmap_read(st->regmap24, AD7768_REG24_ADC_DATA, &readval);
+ if (ret)
+ return ret;
+
/*
* Any SPI configuration of the AD7768-1 can only be
* performed in continuous conversion mode.
@@ -258,16 +301,23 @@ static int ad7768_reg_access(struct iio_dev *indio_dev,
if (!iio_device_claim_direct(indio_dev))
return -EBUSY;
+ ret = -EINVAL;
if (readval) {
- ret = ad7768_spi_reg_read(st, reg, 1);
- if (ret < 0)
- goto err_release;
- *readval = ret;
- ret = 0;
+ if (regmap_check_range_table(st->regmap, reg, &ad7768_regmap_rd_table))
+ ret = regmap_read(st->regmap, reg, readval);
+
+ if (regmap_check_range_table(st->regmap24, reg, &ad7768_regmap24_rd_table))
+ ret = regmap_read(st->regmap24, reg, readval);
+
} else {
- ret = ad7768_spi_reg_write(st, reg, writeval);
+ if (regmap_check_range_table(st->regmap, reg, &ad7768_regmap_wr_table))
+ ret = regmap_write(st->regmap, reg, writeval);
+
+ if (regmap_check_range_table(st->regmap24, reg, &ad7768_regmap24_wr_table))
+ ret = regmap_write(st->regmap24, reg, writeval);
+
}
-err_release:
+
iio_device_release_direct(indio_dev);
return ret;
@@ -284,7 +334,7 @@ static int ad7768_set_dig_fil(struct ad7768_state *st,
else
mode = AD7768_DIG_FIL_DEC_RATE(dec_rate);
- ret = ad7768_spi_reg_write(st, AD7768_REG_DIGITAL_FILTER, mode);
+ ret = regmap_write(st->regmap, AD7768_REG_DIGITAL_FILTER, mode);
if (ret < 0)
return ret;
@@ -321,7 +371,7 @@ static int ad7768_set_freq(struct ad7768_state *st,
*/
pwr_mode = AD7768_PWR_MCLK_DIV(ad7768_clk_config[idx].mclk_div) |
AD7768_PWR_PWRMODE(ad7768_clk_config[idx].pwrmode);
- ret = ad7768_spi_reg_write(st, AD7768_REG_POWER_CLOCK, pwr_mode);
+ ret = regmap_write(st->regmap, AD7768_REG_POWER_CLOCK, pwr_mode);
if (ret < 0)
return ret;
@@ -440,19 +490,30 @@ static int ad7768_setup(struct ad7768_state *st)
{
int ret;
- /*
- * Two writes to the SPI_RESET[1:0] bits are required to initiate
- * a software reset. The bits must first be set to 11, and then
- * to 10. When the sequence is detected, the reset occurs.
- * See the datasheet, page 70.
- */
- ret = ad7768_spi_reg_write(st, AD7768_REG_SYNC_RESET, 0x3);
- if (ret)
- return ret;
+ st->gpio_reset = devm_gpiod_get_optional(&st->spi->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(st->gpio_reset))
+ return PTR_ERR(st->gpio_reset);
- ret = ad7768_spi_reg_write(st, AD7768_REG_SYNC_RESET, 0x2);
- if (ret)
- return ret;
+ if (st->gpio_reset) {
+ fsleep(10);
+ gpiod_set_value_cansleep(st->gpio_reset, 0);
+ fsleep(200);
+ } else {
+ /*
+ * Two writes to the SPI_RESET[1:0] bits are required to initiate
+ * a software reset. The bits must first be set to 11, and then
+ * to 10. When the sequence is detected, the reset occurs.
+ * See the datasheet, page 70.
+ */
+ ret = regmap_write(st->regmap, AD7768_REG_SYNC_RESET, 0x3);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD7768_REG_SYNC_RESET, 0x2);
+ if (ret)
+ return ret;
+ }
st->gpio_sync_in = devm_gpiod_get(&st->spi->dev, "adi,sync-in",
GPIOD_OUT_LOW);
@@ -474,8 +535,9 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p)
if (ret < 0)
goto out;
- iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &st->data.scan,
+ sizeof(st->data.scan),
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
@@ -505,18 +567,19 @@ static int ad7768_buffer_postenable(struct iio_dev *indio_dev)
* continuous read mode. Subsequent data reads do not require an
* initial 8-bit write to query the ADC_DATA register.
*/
- return ad7768_spi_reg_write(st, AD7768_REG_INTERFACE_FORMAT, 0x01);
+ return regmap_write(st->regmap, AD7768_REG_INTERFACE_FORMAT, 0x01);
}
static int ad7768_buffer_predisable(struct iio_dev *indio_dev)
{
struct ad7768_state *st = iio_priv(indio_dev);
+ unsigned int unused;
/*
* To exit continuous read mode, perform a single read of the ADC_DATA
* reg (0x2C), which allows further configuration of the device.
*/
- return ad7768_spi_reg_read(st, AD7768_REG_ADC_DATA, 3);
+ return regmap_read(st->regmap24, AD7768_REG24_ADC_DATA, &unused);
}
static const struct iio_buffer_setup_ops ad7768_buffer_ops = {
@@ -559,6 +622,31 @@ static int ad7768_set_channel_label(struct iio_dev *indio_dev,
return 0;
}
+static int ad7768_triggered_buffer_alloc(struct iio_dev *indio_dev)
+{
+ struct ad7768_state *st = iio_priv(indio_dev);
+ int ret;
+
+ st->trig = devm_iio_trigger_alloc(indio_dev->dev.parent, "%s-dev%d",
+ indio_dev->name,
+ iio_device_id(indio_dev));
+ if (!st->trig)
+ return -ENOMEM;
+
+ st->trig->ops = &ad7768_trigger_ops;
+ iio_trigger_set_drvdata(st->trig, indio_dev);
+ ret = devm_iio_trigger_register(indio_dev->dev.parent, st->trig);
+ if (ret)
+ return ret;
+
+ indio_dev->trig = iio_trigger_get(st->trig);
+
+ return devm_iio_triggered_buffer_setup(indio_dev->dev.parent, indio_dev,
+ &iio_pollfunc_store_time,
+ &ad7768_trigger_handler,
+ &ad7768_buffer_ops);
+}
+
static int ad7768_probe(struct spi_device *spi)
{
struct ad7768_state *st;
@@ -587,6 +675,16 @@ static int ad7768_probe(struct spi_device *spi)
st->spi = spi;
+ st->regmap = devm_regmap_init_spi(spi, &ad7768_regmap_config);
+ if (IS_ERR(st->regmap))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->regmap),
+ "Failed to initialize regmap");
+
+ st->regmap24 = devm_regmap_init_spi(spi, &ad7768_regmap24_config);
+ if (IS_ERR(st->regmap24))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->regmap24),
+ "Failed to initialize regmap24");
+
st->vref = devm_regulator_get(&spi->dev, "vref");
if (IS_ERR(st->vref))
return PTR_ERR(st->vref);
@@ -619,20 +717,6 @@ static int ad7768_probe(struct spi_device *spi)
return ret;
}
- st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
- indio_dev->name,
- iio_device_id(indio_dev));
- if (!st->trig)
- return -ENOMEM;
-
- st->trig->ops = &ad7768_trigger_ops;
- iio_trigger_set_drvdata(st->trig, indio_dev);
- ret = devm_iio_trigger_register(&spi->dev, st->trig);
- if (ret)
- return ret;
-
- indio_dev->trig = iio_trigger_get(st->trig);
-
init_completion(&st->completion);
ret = ad7768_set_channel_label(indio_dev, ARRAY_SIZE(ad7768_channels));
@@ -646,10 +730,7 @@ static int ad7768_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
- &iio_pollfunc_store_time,
- &ad7768_trigger_handler,
- &ad7768_buffer_ops);
+ ret = ad7768_triggered_buffer_alloc(indio_dev);
if (ret)
return ret;
@@ -658,7 +739,7 @@ static int ad7768_probe(struct spi_device *spi)
static const struct spi_device_id ad7768_id_table[] = {
{ "ad7768-1", 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7768_id_table);
diff --git a/drivers/iio/adc/ad7779.c b/drivers/iio/adc/ad7779.c
index a5d87faa5e12..845adc510239 100644
--- a/drivers/iio/adc/ad7779.c
+++ b/drivers/iio/adc/ad7779.c
@@ -595,7 +595,8 @@ static irqreturn_t ad7779_trigger_handler(int irq, void *p)
goto exit_handler;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &st->data, pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &st->data, sizeof(st->data),
+ pf->timestamp);
exit_handler:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index 597c2686ffa4..041fc25e3209 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -464,7 +464,7 @@ static const struct spi_device_id ad7791_spi_ids[] = {
{ "ad7789", AD7789 },
{ "ad7790", AD7790 },
{ "ad7791", AD7791 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7791_spi_ids);
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 87945efb940b..0369151c7db1 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -207,8 +207,8 @@ static irqreturn_t ad7923_trigger_handler(int irq, void *p)
if (b_sent)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, st->rx_buf, sizeof(st->rx_buf),
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ad7944.c b/drivers/iio/adc/ad7944.c
index 2f949fe55873..7722cf9e8214 100644
--- a/drivers/iio/adc/ad7944.c
+++ b/drivers/iio/adc/ad7944.c
@@ -190,11 +190,6 @@ static int ad7944_3wire_cs_mode_init_msg(struct device *dev, struct ad7944_adc *
struct spi_transfer *xfers = adc->xfers;
/*
- * NB: can get better performance from some SPI controllers if we use
- * the same bits_per_word in every transfer.
- */
- xfers[0].bits_per_word = chan->scan_type.realbits;
- /*
* CS is tied to CNV and we need a low to high transition to start the
* conversion, so place CNV low for t_QUIET to prepare for this.
*/
@@ -208,7 +203,6 @@ static int ad7944_3wire_cs_mode_init_msg(struct device *dev, struct ad7944_adc *
xfers[1].cs_off = 1;
xfers[1].delay.value = t_conv_ns;
xfers[1].delay.unit = SPI_DELAY_UNIT_NSECS;
- xfers[1].bits_per_word = chan->scan_type.realbits;
/* Then we can read the data during the acquisition phase */
xfers[2].rx_buf = &adc->sample.raw;
@@ -228,11 +222,6 @@ static int ad7944_4wire_mode_init_msg(struct device *dev, struct ad7944_adc *adc
struct spi_transfer *xfers = adc->xfers;
/*
- * NB: can get better performance from some SPI controllers if we use
- * the same bits_per_word in every transfer.
- */
- xfers[0].bits_per_word = chan->scan_type.realbits;
- /*
* CS has to be high for full conversion time to avoid triggering the
* busy indication.
*/
@@ -377,6 +366,8 @@ static int ad7944_single_conversion(struct ad7944_adc *adc,
if (chan->scan_type.sign == 's')
*val = sign_extend32(*val, chan->scan_type.realbits - 1);
+ else
+ *val &= GENMASK(chan->scan_type.realbits - 1, 0);
return IIO_VAL_INT;
}
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 993f4651b73a..9c02f9199139 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -958,7 +958,7 @@ static const struct i2c_device_id ad799x_id[] = {
{ "ad7994", ad7994 },
{ "ad7997", ad7997 },
{ "ad7998", ad7998 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ad799x_id);
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 6c37f8e21120..4c5f8d29a559 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -587,6 +587,10 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p)
* byte set to zero. */
ad_sd_read_reg_raw(sigma_delta, data_reg, transfer_size, &data[1]);
break;
+
+ default:
+ dev_err_ratelimited(&indio_dev->dev, "Unsupported reg_size: %u\n", reg_size);
+ goto irq_handled;
}
/*
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index cf942c043457..4116c44197b8 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -702,7 +702,7 @@ static const struct of_device_id adi_axi_adc_of_match[] = {
{ .compatible = "adi,axi-adc-10.0.a", .data = &adc_generic },
{ .compatible = "adi,axi-ad485x", .data = &adi_axi_ad485x },
{ .compatible = "adi,axi-ad7606x", .data = &adc_ad7606 },
- { /* end of list */ }
+ { }
};
MODULE_DEVICE_TABLE(of, adi_axi_adc_of_match);
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 414610afcb2c..c3450246730e 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -586,15 +586,6 @@ struct at91_adc_temp {
u16 saved_oversampling;
};
-/*
- * Buffer size requirements:
- * No channels * bytes_per_channel(2) + timestamp bytes (8)
- * Divided by 2 because we need half words.
- * We assume 32 channels for now, has to be increased if needed.
- * Nobody minds a buffer being too big.
- */
-#define AT91_BUFFER_MAX_HWORDS ((32 * 2 + 8) / 2)
-
struct at91_adc_state {
void __iomem *base;
int irq;
@@ -616,8 +607,8 @@ struct at91_adc_state {
struct at91_adc_temp temp_st;
struct iio_dev *indio_dev;
struct device *dev;
- /* Ensure naturally aligned timestamp */
- u16 buffer[AT91_BUFFER_MAX_HWORDS] __aligned(8);
+ /* We assume 32 channels for now, has to be increased if needed. */
+ IIO_DECLARE_BUFFER_WITH_TS(u16, buffer, 32);
/*
* lock to prevent concurrent 'single conversion' requests through
* sysfs.
diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
index 9fd7027623d0..71584ffd3632 100644
--- a/drivers/iio/adc/axp20x_adc.c
+++ b/drivers/iio/adc/axp20x_adc.c
@@ -163,14 +163,14 @@ static const struct iio_map axp20x_maps[] = {
IIO_MAP("batt_v", "axp20x-battery-power-supply", "batt_v"),
IIO_MAP("batt_chrg_i", "axp20x-battery-power-supply", "batt_chrg_i"),
IIO_MAP("batt_dischrg_i", "axp20x-battery-power-supply", "batt_dischrg_i"),
- { /* sentinel */ }
+ { }
};
static const struct iio_map axp22x_maps[] = {
IIO_MAP("batt_v", "axp20x-battery-power-supply", "batt_v"),
IIO_MAP("batt_chrg_i", "axp20x-battery-power-supply", "batt_chrg_i"),
IIO_MAP("batt_dischrg_i", "axp20x-battery-power-supply", "batt_dischrg_i"),
- { /* sentinel */ }
+ { }
};
static struct iio_map axp717_maps[] = {
@@ -1074,7 +1074,7 @@ static const struct of_device_id axp20x_adc_of_match[] = {
{ .compatible = "x-powers,axp221-adc", .data = (void *)&axp22x_data, },
{ .compatible = "x-powers,axp717-adc", .data = (void *)&axp717_data, },
{ .compatible = "x-powers,axp813-adc", .data = (void *)&axp813_data, },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, axp20x_adc_of_match);
@@ -1084,7 +1084,7 @@ static const struct platform_device_id axp20x_adc_id_match[] = {
{ .name = "axp22x-adc", .driver_data = (kernel_ulong_t)&axp22x_data, },
{ .name = "axp717-adc", .driver_data = (kernel_ulong_t)&axp717_data, },
{ .name = "axp813-adc", .driver_data = (kernel_ulong_t)&axp813_data, },
- { /* sentinel */ },
+ { }
};
MODULE_DEVICE_TABLE(platform, axp20x_adc_id_match);
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 45542efc3ece..c8283279c477 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -110,7 +110,7 @@ static const struct iio_map axp288_adc_default_maps[] = {
IIO_MAP("BATT_CHG_I", "axp288-chrg", "axp288-chrg-curr"),
IIO_MAP("BATT_DISCHRG_I", "axp288-chrg", "axp288-chrg-d-curr"),
IIO_MAP("BATT_V", "axp288-batt", "axp288-batt-volt"),
- {},
+ { }
};
static int axp288_adc_read_channel(int *val, unsigned long address,
@@ -207,7 +207,7 @@ static const struct dmi_system_id axp288_adc_ts_bias_override[] = {
},
.driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
},
- {}
+ { }
};
static int axp288_adc_initialize(struct axp288_adc_info *info)
diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c
index c218acf6c9c6..ba7cbd3b4822 100644
--- a/drivers/iio/adc/cpcap-adc.c
+++ b/drivers/iio/adc/cpcap-adc.c
@@ -942,7 +942,7 @@ static const struct of_device_id cpcap_adc_id_table[] = {
.compatible = "motorola,mapphone-cpcap-adc",
.data = &mapphone_adc,
},
- { /* sentinel */ },
+ { }
};
MODULE_DEVICE_TABLE(of, cpcap_adc_id_table);
diff --git a/drivers/iio/adc/da9150-gpadc.c b/drivers/iio/adc/da9150-gpadc.c
index 0290345ade84..b99291ce2a45 100644
--- a/drivers/iio/adc/da9150-gpadc.c
+++ b/drivers/iio/adc/da9150-gpadc.c
@@ -296,7 +296,7 @@ static const struct iio_map da9150_gpadc_default_maps[] = {
IIO_MAP("VBUS", "da9150-charger", "CHAN_VBUS"),
IIO_MAP("TJUNC_CORE", "da9150-charger", "CHAN_TJUNC"),
IIO_MAP("VBAT", "da9150-charger", "CHAN_VBAT"),
- {},
+ { }
};
static int da9150_gpadc_probe(struct platform_device *pdev)
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 359e26e3f5bc..9dbd2c87938c 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -488,8 +488,8 @@ static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
(void *)dev_data.values + t->from, t->length);
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &data, sizeof(data),
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/envelope-detector.c b/drivers/iio/adc/envelope-detector.c
index e911c25d106d..5b16fe737659 100644
--- a/drivers/iio/adc/envelope-detector.c
+++ b/drivers/iio/adc/envelope-detector.c
@@ -305,7 +305,7 @@ static const struct iio_chan_spec_ext_info envelope_detector_ext_info[] = {
{ .name = "compare_interval",
.read = envelope_show_comp_interval,
.write = envelope_store_comp_interval, },
- { /* sentinel */ }
+ { }
};
static const struct iio_chan_spec envelope_detector_iio_channel = {
@@ -390,7 +390,7 @@ static int envelope_detector_probe(struct platform_device *pdev)
static const struct of_device_id envelope_detector_match[] = {
{ .compatible = "axentia,tse850-envelope-detector", },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, envelope_detector_match);
diff --git a/drivers/iio/adc/fsl-imx25-gcq.c b/drivers/iio/adc/fsl-imx25-gcq.c
index b3f037510e35..f8c220f6a7b4 100644
--- a/drivers/iio/adc/fsl-imx25-gcq.c
+++ b/drivers/iio/adc/fsl-imx25-gcq.c
@@ -372,7 +372,7 @@ static int mx25_gcq_probe(struct platform_device *pdev)
static const struct of_device_id mx25_gcq_ids[] = {
{ .compatible = "fsl,imx25-gcq", },
- { /* Sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, mx25_gcq_ids);
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index 689e34f06987..b4562aae8477 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -351,7 +351,7 @@ static const struct iio_enum hi8435_sensing_mode = {
static const struct iio_chan_spec_ext_info hi8435_ext_info[] = {
IIO_ENUM("sensing_mode", IIO_SEPARATE, &hi8435_sensing_mode),
IIO_ENUM_AVAILABLE("sensing_mode", IIO_SHARED_BY_TYPE, &hi8435_sensing_mode),
- {},
+ { }
};
#define HI8435_VOLTAGE_CHANNEL(num) \
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index 8da0419ecfa3..7235fa9e13d5 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -87,7 +87,10 @@ struct hx711_data {
* triggered buffer
* 2x32-bit channel + 64-bit naturally aligned timestamp
*/
- u32 buffer[4] __aligned(8);
+ struct {
+ u32 channel[2];
+ aligned_s64 timestamp;
+ } buffer;
/*
* delay after a rising edge on SCK until the data is ready DOUT
* this is dependent on the hx711 where the datasheet tells a
@@ -361,15 +364,15 @@ static irqreturn_t hx711_trigger(int irq, void *p)
mutex_lock(&hx711_data->lock);
- memset(hx711_data->buffer, 0, sizeof(hx711_data->buffer));
+ memset(&hx711_data->buffer, 0, sizeof(hx711_data->buffer));
iio_for_each_active_channel(indio_dev, i) {
- hx711_data->buffer[j] = hx711_reset_read(hx711_data,
+ hx711_data->buffer.channel[j] = hx711_reset_read(hx711_data,
indio_dev->channels[i].channel);
j++;
}
- iio_push_to_buffers_with_timestamp(indio_dev, hx711_data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &hx711_data->buffer,
pf->timestamp);
mutex_unlock(&hx711_data->lock);
diff --git a/drivers/iio/adc/imx7d_adc.c b/drivers/iio/adc/imx7d_adc.c
index 828d3fea6d43..09ce71f6e941 100644
--- a/drivers/iio/adc/imx7d_adc.c
+++ b/drivers/iio/adc/imx7d_adc.c
@@ -413,7 +413,7 @@ static const struct iio_info imx7d_adc_iio_info = {
static const struct of_device_id imx7d_adc_match[] = {
{ .compatible = "fsl,imx7d-adc", },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, imx7d_adc_match);
diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c
index 3d19d7d744aa..be13a6ed7e00 100644
--- a/drivers/iio/adc/imx8qxp-adc.c
+++ b/drivers/iio/adc/imx8qxp-adc.c
@@ -481,7 +481,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(imx8qxp_adc_pm_ops,
static const struct of_device_id imx8qxp_adc_match[] = {
{ .compatible = "nxp,imx8qxp-adc", },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, imx8qxp_adc_match);
diff --git a/drivers/iio/adc/imx93_adc.c b/drivers/iio/adc/imx93_adc.c
index 002eb19587d6..7feaafd2316f 100644
--- a/drivers/iio/adc/imx93_adc.c
+++ b/drivers/iio/adc/imx93_adc.c
@@ -464,7 +464,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(imx93_adc_pm_ops,
static const struct of_device_id imx93_adc_match[] = {
{ .compatible = "nxp,imx93-adc", },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, imx93_adc_match);
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 40d14faa71c5..857e1b69d6cd 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -766,7 +766,7 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
chip->scan.chan[i++] = val;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &chip->scan, time);
+ iio_push_to_buffers_with_ts(indio_dev, &chip->scan, sizeof(chip->scan), time);
return 0;
};
diff --git a/drivers/iio/adc/industrialio-adc.c b/drivers/iio/adc/industrialio-adc.c
new file mode 100644
index 000000000000..b4057230e749
--- /dev/null
+++ b/drivers/iio/adc/industrialio-adc.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Helpers for parsing common ADC information from a firmware node.
+ *
+ * Copyright (c) 2025 Matti Vaittinen <mazziesaccount@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/types.h>
+
+#include <linux/iio/adc-helpers.h>
+#include <linux/iio/iio.h>
+
+/**
+ * devm_iio_adc_device_alloc_chaninfo_se - allocate and fill iio_chan_spec for ADC
+ *
+ * Scan the device node for single-ended ADC channel information. Channel ID is
+ * expected to be found from the "reg" property. Allocate and populate the
+ * iio_chan_spec structure corresponding to channels that are found. The memory
+ * for iio_chan_spec structure will be freed upon device detach.
+ *
+ * @dev: Pointer to the ADC device.
+ * @template: Template iio_chan_spec from which the fields of all
+ * found and allocated channels are initialized.
+ * @max_chan_id: Maximum value of a channel ID. Use negative value if no
+ * checking is required.
+ * @cs: Location where pointer to allocated iio_chan_spec
+ * should be stored.
+ *
+ * Return: Number of found channels on success. Negative value to indicate
+ * failure. Specifically, -ENOENT if no channel nodes were found.
+ */
+int devm_iio_adc_device_alloc_chaninfo_se(struct device *dev,
+ const struct iio_chan_spec *template,
+ int max_chan_id,
+ struct iio_chan_spec **cs)
+{
+ struct iio_chan_spec *chan_array, *chan;
+ int num_chan, ret;
+
+ num_chan = iio_adc_device_num_channels(dev);
+ if (num_chan < 0)
+ return num_chan;
+
+ if (!num_chan)
+ return -ENOENT;
+
+ chan_array = devm_kcalloc(dev, num_chan, sizeof(*chan_array),
+ GFP_KERNEL);
+ if (!chan_array)
+ return -ENOMEM;
+
+ chan = &chan_array[0];
+
+ device_for_each_named_child_node_scoped(dev, child, "channel") {
+ u32 ch;
+
+ ret = fwnode_property_read_u32(child, "reg", &ch);
+ if (ret)
+ return ret;
+
+ if (max_chan_id >= 0 && ch > max_chan_id)
+ return -ERANGE;
+
+ *chan = *template;
+ chan->channel = ch;
+ chan++;
+ }
+
+ *cs = chan_array;
+
+ return num_chan;
+}
+EXPORT_SYMBOL_NS_GPL(devm_iio_adc_device_alloc_chaninfo_se, "IIO_DRIVER");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Matti Vaittinen <mazziesaccount@gmail.com>");
+MODULE_DESCRIPTION("IIO ADC fwnode parsing helpers");
diff --git a/drivers/iio/adc/intel_mrfld_adc.c b/drivers/iio/adc/intel_mrfld_adc.c
index c178850eaaab..101c1a0ce591 100644
--- a/drivers/iio/adc/intel_mrfld_adc.c
+++ b/drivers/iio/adc/intel_mrfld_adc.c
@@ -174,7 +174,7 @@ static const struct iio_map iio_maps[] = {
IIO_MAP("CH6", "bcove-temp", "SYSTEMP0"),
IIO_MAP("CH7", "bcove-temp", "SYSTEMP1"),
IIO_MAP("CH8", "bcove-temp", "SYSTEMP2"),
- {}
+ { }
};
static int mrfld_adc_probe(struct platform_device *pdev)
@@ -222,7 +222,7 @@ static int mrfld_adc_probe(struct platform_device *pdev)
static const struct platform_device_id mrfld_adc_id_table[] = {
{ .name = "mrfld_bcove_adc" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(platform, mrfld_adc_id_table);
diff --git a/drivers/iio/adc/lpc18xx_adc.c b/drivers/iio/adc/lpc18xx_adc.c
index 450a243d1f7c..7e5d181ff702 100644
--- a/drivers/iio/adc/lpc18xx_adc.c
+++ b/drivers/iio/adc/lpc18xx_adc.c
@@ -188,7 +188,7 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
static const struct of_device_id lpc18xx_adc_match[] = {
{ .compatible = "nxp,lpc1850-adc" },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, lpc18xx_adc_match);
diff --git a/drivers/iio/adc/ltc2471.c b/drivers/iio/adc/ltc2471.c
index 97c417c3a4eb..a579107fd5c9 100644
--- a/drivers/iio/adc/ltc2471.c
+++ b/drivers/iio/adc/ltc2471.c
@@ -138,7 +138,7 @@ static int ltc2471_i2c_probe(struct i2c_client *client)
static const struct i2c_device_id ltc2471_i2c_id[] = {
{ "ltc2471", ltc2471 },
{ "ltc2473", ltc2473 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ltc2471_i2c_id);
diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c
index 565ca2e21c0c..7d7001e8e3d9 100644
--- a/drivers/iio/adc/max1118.c
+++ b/drivers/iio/adc/max1118.c
@@ -188,8 +188,8 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p)
adc->scan.channels[i] = ret;
i++;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &adc->scan, sizeof(adc->scan),
+ iio_get_time_ns(indio_dev));
out:
mutex_unlock(&adc->lock);
diff --git a/drivers/iio/adc/max11410.c b/drivers/iio/adc/max11410.c
index 437d9f24b5a1..511b2f14dfaf 100644
--- a/drivers/iio/adc/max11410.c
+++ b/drivers/iio/adc/max11410.c
@@ -632,8 +632,8 @@ static irqreturn_t max11410_trigger_handler(int irq, void *p)
goto out;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &st->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &st->scan, sizeof(st->scan),
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 35717ec082ce..a7e9912fb44a 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1498,8 +1498,8 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
if (b_sent < 0)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, &st->data,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &st->data, sizeof(st->data),
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -1551,7 +1551,7 @@ static const struct of_device_id max1363_of_match[] = {
MAX1363_COMPATIBLE("maxim,max11645", max11645),
MAX1363_COMPATIBLE("maxim,max11646", max11646),
MAX1363_COMPATIBLE("maxim,max11647", max11647),
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, max1363_of_match);
@@ -1672,7 +1672,7 @@ static const struct i2c_device_id max1363_id[] = {
MAX1363_ID_TABLE("max11645", max11645),
MAX1363_ID_TABLE("max11646", max11646),
MAX1363_ID_TABLE("max11647", max11647),
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(i2c, max1363_id);
diff --git a/drivers/iio/adc/max77541-adc.c b/drivers/iio/adc/max77541-adc.c
index 21d024bde16b..0aa04d143ad4 100644
--- a/drivers/iio/adc/max77541-adc.c
+++ b/drivers/iio/adc/max77541-adc.c
@@ -176,7 +176,7 @@ static int max77541_adc_probe(struct platform_device *pdev)
static const struct platform_device_id max77541_adc_platform_id[] = {
{ "max77541-adc" },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(platform, max77541_adc_platform_id);
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index 6748b44d568d..a6f21791c685 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -6,11 +6,13 @@
* Copyright (C) 2018 Kent Gustavsson <kent@minoris.se>
*/
#include <linux/bitfield.h>
-#include <linux/bits.h>
+#include <linux/bitops.h>
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dev_printk.h>
#include <linux/err.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
@@ -79,6 +81,8 @@
#define MCP3910_CONFIG1_CLKEXT BIT(6)
#define MCP3910_CONFIG1_VREFEXT BIT(7)
+#define MCP3910_CHANNEL(ch) (MCP3911_REG_CHANNEL0 + (ch))
+
#define MCP3910_REG_OFFCAL_CH0 0x0f
#define MCP3910_OFFCAL(ch) (MCP3910_REG_OFFCAL_CH0 + (ch) * 6)
@@ -110,6 +114,7 @@ struct mcp3911_chip_info {
int (*get_offset)(struct mcp3911 *adc, int channel, int *val);
int (*set_offset)(struct mcp3911 *adc, int channel, int val);
int (*set_scale)(struct mcp3911 *adc, int channel, u32 val);
+ int (*get_raw)(struct mcp3911 *adc, int channel, int *val);
};
struct mcp3911 {
@@ -170,6 +175,18 @@ static int mcp3911_update(struct mcp3911 *adc, u8 reg, u32 mask, u32 val, u8 len
return mcp3911_write(adc, reg, val, len);
}
+static int mcp3911_read_s24(struct mcp3911 *const adc, u8 const reg, s32 *const val)
+{
+ u32 uval;
+ int const ret = mcp3911_read(adc, reg, &uval, 3);
+
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(uval, 23);
+ return ret;
+}
+
static int mcp3910_enable_offset(struct mcp3911 *adc, bool enable)
{
unsigned int mask = MCP3910_CONFIG0_EN_OFFCAL;
@@ -194,6 +211,11 @@ static int mcp3910_set_offset(struct mcp3911 *adc, int channel, int val)
return adc->chip->enable_offset(adc, 1);
}
+static int mcp3910_get_raw(struct mcp3911 *adc, int channel, s32 *val)
+{
+ return mcp3911_read_s24(adc, MCP3910_CHANNEL(channel), val);
+}
+
static int mcp3911_enable_offset(struct mcp3911 *adc, bool enable)
{
unsigned int mask = MCP3911_STATUSCOM_EN_OFFCAL;
@@ -218,6 +240,11 @@ static int mcp3911_set_offset(struct mcp3911 *adc, int channel, int val)
return adc->chip->enable_offset(adc, 1);
}
+static int mcp3911_get_raw(struct mcp3911 *adc, int channel, s32 *val)
+{
+ return mcp3911_read_s24(adc, MCP3911_CHANNEL(channel), val);
+}
+
static int mcp3910_get_osr(struct mcp3911 *adc, u32 *val)
{
int ret;
@@ -321,12 +348,9 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
guard(mutex)(&adc->lock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = mcp3911_read(adc,
- MCP3911_CHANNEL(channel->channel), val, 3);
+ ret = adc->chip->get_raw(adc, channel->channel, val);
if (ret)
return ret;
-
- *val = sign_extend32(*val, 23);
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
ret = adc->chip->get_offset(adc, channel->channel, val);
@@ -516,8 +540,8 @@ static irqreturn_t mcp3911_trigger_handler(int irq, void *p)
adc->scan.channels[i] = get_unaligned_be24(&adc->rx_buf[scan_chan->channel * 3]);
i++;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &adc->scan, sizeof(adc->scan),
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
@@ -684,6 +708,7 @@ static const struct iio_trigger_ops mcp3911_trigger_ops = {
static int mcp3911_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct gpio_desc *gpio_reset;
struct iio_dev *indio_dev;
struct mcp3911 *adc;
bool external_vref;
@@ -728,6 +753,22 @@ static int mcp3911_probe(struct spi_device *spi)
}
dev_dbg(dev, "use device address %i\n", adc->dev_addr);
+ gpio_reset = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio_reset))
+ return dev_err_probe(dev, PTR_ERR(gpio_reset),
+ "Cannot get reset GPIO\n");
+
+ if (gpio_reset) {
+ gpiod_set_value_cansleep(gpio_reset, 0);
+
+ /*
+ * Settling time after Hard Reset Mode (determined experimentally):
+ * 330 micro-seconds are too few; 470 micro-seconds are sufficient.
+ * Just in case, we add some safety factor...
+ */
+ fsleep(600);
+ }
+
ret = adc->chip->config(adc, external_vref);
if (ret)
return ret;
@@ -799,6 +840,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = {
.get_offset = mcp3910_get_offset,
.set_offset = mcp3910_set_offset,
.set_scale = mcp3910_set_scale,
+ .get_raw = mcp3910_get_raw,
},
[MCP3911] = {
.channels = mcp3911_channels,
@@ -810,6 +852,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = {
.get_offset = mcp3911_get_offset,
.set_offset = mcp3911_set_offset,
.set_scale = mcp3911_set_scale,
+ .get_raw = mcp3911_get_raw,
},
[MCP3912] = {
.channels = mcp3912_channels,
@@ -821,6 +864,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = {
.get_offset = mcp3910_get_offset,
.set_offset = mcp3910_set_offset,
.set_scale = mcp3910_set_scale,
+ .get_raw = mcp3910_get_raw,
},
[MCP3913] = {
.channels = mcp3913_channels,
@@ -832,6 +876,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = {
.get_offset = mcp3910_get_offset,
.set_offset = mcp3910_set_offset,
.set_scale = mcp3910_set_scale,
+ .get_raw = mcp3910_get_raw,
},
[MCP3914] = {
.channels = mcp3914_channels,
@@ -843,6 +888,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = {
.get_offset = mcp3910_get_offset,
.set_offset = mcp3910_set_offset,
.set_scale = mcp3910_set_scale,
+ .get_raw = mcp3910_get_raw,
},
[MCP3918] = {
.channels = mcp3918_channels,
@@ -854,6 +900,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = {
.get_offset = mcp3910_get_offset,
.set_offset = mcp3910_set_offset,
.set_scale = mcp3910_set_scale,
+ .get_raw = mcp3910_get_raw,
},
[MCP3919] = {
.channels = mcp3919_channels,
@@ -865,6 +912,7 @@ static const struct mcp3911_chip_info mcp3911_chip_info[] = {
.get_offset = mcp3910_get_offset,
.set_offset = mcp3910_set_offset,
.set_scale = mcp3910_set_scale,
+ .get_raw = mcp3910_get_raw,
},
};
static const struct of_device_id mcp3911_dt_ids[] = {
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 997def4a4d2f..4ff88603e4fc 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -160,6 +160,11 @@
#define MESON_SAR_ADC_REG11_EOC BIT(1)
#define MESON_SAR_ADC_REG11_VREF_SEL BIT(0)
+#define MESON_SAR_ADC_REG12 0x30
+ #define MESON_SAR_ADC_REG12_MPLL0_UNKNOWN BIT(0)
+ #define MESON_SAR_ADC_REG12_MPLL1_UNKNOWN BIT(1)
+ #define MESON_SAR_ADC_REG12_MPLL2_UNKNOWN BIT(2)
+
#define MESON_SAR_ADC_REG13 0x34
#define MESON_SAR_ADC_REG13_12BIT_CALIBRATION_MASK GENMASK(13, 8)
@@ -326,6 +331,7 @@ struct meson_sar_adc_param {
u8 cmv_select;
u8 adc_eoc;
enum meson_sar_adc_vref_sel vref_voltage;
+ bool enable_mpll_clock_workaround;
};
struct meson_sar_adc_data {
@@ -995,6 +1001,15 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
priv->param->cmv_select);
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
MESON_SAR_ADC_REG11_CMV_SEL, regval);
+
+ if (priv->param->enable_mpll_clock_workaround) {
+ dev_warn(dev,
+ "Enabling unknown bits to make the MPLL clocks work. This may change so always update dtbs and kernel together\n");
+ regmap_write(priv->regmap, MESON_SAR_ADC_REG12,
+ MESON_SAR_ADC_REG12_MPLL0_UNKNOWN |
+ MESON_SAR_ADC_REG12_MPLL1_UNKNOWN |
+ MESON_SAR_ADC_REG12_MPLL2_UNKNOWN);
+ }
}
ret = clk_set_parent(priv->adc_sel_clk, priv->clkin);
@@ -1219,6 +1234,17 @@ static const struct meson_sar_adc_param meson_sar_adc_gxl_param = {
.cmv_select = 1,
};
+static const struct meson_sar_adc_param meson_sar_adc_gxlx_param = {
+ .has_bl30_integration = true,
+ .clock_rate = 1200000,
+ .regmap_config = &meson_sar_adc_regmap_config_gxbb,
+ .resolution = 12,
+ .disable_ring_counter = 1,
+ .vref_voltage = 1,
+ .cmv_select = true,
+ .enable_mpll_clock_workaround = true,
+};
+
static const struct meson_sar_adc_param meson_sar_adc_axg_param = {
.has_bl30_integration = true,
.clock_rate = 1200000,
@@ -1267,6 +1293,11 @@ static const struct meson_sar_adc_data meson_sar_adc_gxl_data = {
.name = "meson-gxl-saradc",
};
+static const struct meson_sar_adc_data meson_sar_adc_gxlx_data = {
+ .param = &meson_sar_adc_gxlx_param,
+ .name = "meson-gxlx-saradc",
+};
+
static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
.param = &meson_sar_adc_gxl_param,
.name = "meson-gxm-saradc",
@@ -1299,6 +1330,9 @@ static const struct of_device_id meson_sar_adc_of_match[] = {
.compatible = "amlogic,meson-gxl-saradc",
.data = &meson_sar_adc_gxl_data,
}, {
+ .compatible = "amlogic,meson-gxlx-saradc",
+ .data = &meson_sar_adc_gxlx_data,
+ }, {
.compatible = "amlogic,meson-gxm-saradc",
.data = &meson_sar_adc_gxm_data,
}, {
@@ -1308,7 +1342,7 @@ static const struct of_device_id meson_sar_adc_of_match[] = {
.compatible = "amlogic,meson-g12a-saradc",
.data = &meson_sar_adc_g12a_data,
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, meson_sar_adc_of_match);
diff --git a/drivers/iio/adc/mt6359-auxadc.c b/drivers/iio/adc/mt6359-auxadc.c
index a4970cfb49a5..eecf88b05c6f 100644
--- a/drivers/iio/adc/mt6359-auxadc.c
+++ b/drivers/iio/adc/mt6359-auxadc.c
@@ -588,7 +588,7 @@ static const struct of_device_id mt6359_auxadc_of_match[] = {
{ .compatible = "mediatek,mt6357-auxadc", .data = &mt6357_chip_info },
{ .compatible = "mediatek,mt6358-auxadc", .data = &mt6358_chip_info },
{ .compatible = "mediatek,mt6359-auxadc", .data = &mt6359_chip_info },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, mt6359_auxadc_of_match);
diff --git a/drivers/iio/adc/mt6360-adc.c b/drivers/iio/adc/mt6360-adc.c
index 4eb2455d6ffa..f8e98b6fa7e9 100644
--- a/drivers/iio/adc/mt6360-adc.c
+++ b/drivers/iio/adc/mt6360-adc.c
@@ -263,8 +263,8 @@ static irqreturn_t mt6360_adc_trigger_handler(int irq, void *p)
struct mt6360_adc_data *mad = iio_priv(indio_dev);
struct {
u16 values[MT6360_CHAN_MAX];
- int64_t timestamp;
- } data __aligned(8);
+ aligned_s64 timestamp;
+ } data;
int i = 0, bit, val, ret;
memset(&data, 0, sizeof(data));
diff --git a/drivers/iio/adc/mt6370-adc.c b/drivers/iio/adc/mt6370-adc.c
index 0bc112135bca..7c71fe5e8d31 100644
--- a/drivers/iio/adc/mt6370-adc.c
+++ b/drivers/iio/adc/mt6370-adc.c
@@ -336,7 +336,7 @@ static int mt6370_adc_probe(struct platform_device *pdev)
static const struct of_device_id mt6370_adc_of_id[] = {
{ .compatible = "mediatek,mt6370-adc", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, mt6370_adc_of_id);
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
index 152cbe265e1a..92baf3f5f560 100644
--- a/drivers/iio/adc/mxs-lradc-adc.c
+++ b/drivers/iio/adc/mxs-lradc-adc.c
@@ -141,9 +141,8 @@ static int mxs_lradc_adc_read_single(struct iio_dev *iio_dev, int chan,
* the same time, yet the code becomes horribly complicated. Therefore I
* applied KISS principle here.
*/
- ret = iio_device_claim_direct_mode(iio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(iio_dev))
+ return -EBUSY;
reinit_completion(&adc->completion);
@@ -192,7 +191,7 @@ err:
writel(LRADC_CTRL1_LRADC_IRQ_EN(0),
adc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
- iio_device_release_direct_mode(iio_dev);
+ iio_device_release_direct(iio_dev);
return ret;
}
@@ -275,9 +274,8 @@ static int mxs_lradc_adc_write_raw(struct iio_dev *iio_dev,
adc->scale_avail[chan->channel];
int ret;
- ret = iio_device_claim_direct_mode(iio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(iio_dev))
+ return -EBUSY;
switch (m) {
case IIO_CHAN_INFO_SCALE:
@@ -300,7 +298,7 @@ static int mxs_lradc_adc_write_raw(struct iio_dev *iio_dev,
break;
}
- iio_device_release_direct_mode(iio_dev);
+ iio_device_release_direct(iio_dev);
return ret;
}
@@ -427,7 +425,8 @@ static irqreturn_t mxs_lradc_adc_trigger_handler(int irq, void *p)
j++;
}
- iio_push_to_buffers_with_timestamp(iio, adc->buffer, pf->timestamp);
+ iio_push_to_buffers_with_ts(iio, adc->buffer, sizeof(adc->buffer),
+ pf->timestamp);
iio_trigger_notify_done(iio->trig);
diff --git a/drivers/iio/adc/nct7201.c b/drivers/iio/adc/nct7201.c
new file mode 100644
index 000000000000..d87824e5490f
--- /dev/null
+++ b/drivers/iio/adc/nct7201.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Nuvoton nct7201 and nct7202 power monitor chips.
+ *
+ * Copyright (c) 2024-2025 Nuvoton Technology corporation.
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+
+#define NCT7201_REG_INTERRUPT_STATUS 0x0C
+#define NCT7201_REG_VOLT_LOW_BYTE 0x0F
+#define NCT7201_REG_CONFIGURATION 0x10
+#define NCT7201_BIT_CONFIGURATION_START BIT(0)
+#define NCT7201_BIT_CONFIGURATION_ALERT_MSK BIT(1)
+#define NCT7201_BIT_CONFIGURATION_CONV_RATE BIT(2)
+#define NCT7201_BIT_CONFIGURATION_RESET BIT(7)
+
+#define NCT7201_REG_ADVANCED_CONFIGURATION 0x11
+#define NCT7201_BIT_ADVANCED_CONF_MOD_ALERT BIT(0)
+#define NCT7201_BIT_ADVANCED_CONF_MOD_STS BIT(1)
+#define NCT7201_BIT_ADVANCED_CONF_FAULT_QUEUE BIT(2)
+#define NCT7201_BIT_ADVANCED_CONF_EN_DEEP_SHUTDOWN BIT(4)
+#define NCT7201_BIT_ADVANCED_CONF_EN_SMB_TIMEOUT BIT(5)
+#define NCT7201_BIT_ADVANCED_CONF_MOD_RSTIN BIT(7)
+
+#define NCT7201_REG_CHANNEL_INPUT_MODE 0x12
+#define NCT7201_REG_CHANNEL_ENABLE 0x13
+#define NCT7201_REG_INTERRUPT_MASK_1 0x15
+#define NCT7201_REG_INTERRUPT_MASK_2 0x16
+#define NCT7201_REG_BUSY_STATUS 0x1E
+#define NCT7201_BIT_BUSY BIT(0)
+#define NCT7201_BIT_PWR_UP BIT(1)
+#define NCT7201_REG_ONE_SHOT 0x1F
+#define NCT7201_REG_SMUS_ADDRESS 0xFC
+#define NCT7201_REG_VIN_MASK GENMASK(15, 3)
+
+#define NCT7201_REG_VIN(i) (0x00 + i)
+#define NCT7201_REG_VIN_HIGH_LIMIT(i) (0x20 + (i) * 2)
+#define NCT7201_REG_VIN_LOW_LIMIT(i) (0x21 + (i) * 2)
+#define NCT7201_MAX_CHANNEL 12
+
+static const struct regmap_range nct7201_read_reg_range[] = {
+ regmap_reg_range(NCT7201_REG_INTERRUPT_STATUS, NCT7201_REG_BUSY_STATUS),
+ regmap_reg_range(NCT7201_REG_SMUS_ADDRESS, NCT7201_REG_SMUS_ADDRESS),
+};
+
+static const struct regmap_access_table nct7201_readable_regs_tbl = {
+ .yes_ranges = nct7201_read_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(nct7201_read_reg_range),
+};
+
+static const struct regmap_range nct7201_write_reg_range[] = {
+ regmap_reg_range(NCT7201_REG_CONFIGURATION, NCT7201_REG_INTERRUPT_MASK_2),
+ regmap_reg_range(NCT7201_REG_ONE_SHOT, NCT7201_REG_ONE_SHOT),
+};
+
+static const struct regmap_access_table nct7201_writeable_regs_tbl = {
+ .yes_ranges = nct7201_write_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(nct7201_write_reg_range),
+};
+
+static const struct regmap_range nct7201_read_vin_reg_range[] = {
+ regmap_reg_range(NCT7201_REG_VIN(0), NCT7201_REG_VIN(NCT7201_MAX_CHANNEL - 1)),
+ regmap_reg_range(NCT7201_REG_VIN_HIGH_LIMIT(0),
+ NCT7201_REG_VIN_LOW_LIMIT(NCT7201_MAX_CHANNEL - 1)),
+};
+
+static const struct regmap_access_table nct7201_readable_vin_regs_tbl = {
+ .yes_ranges = nct7201_read_vin_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(nct7201_read_vin_reg_range),
+};
+
+static const struct regmap_range nct7201_write_vin_reg_range[] = {
+ regmap_reg_range(NCT7201_REG_VIN_HIGH_LIMIT(0),
+ NCT7201_REG_VIN_LOW_LIMIT(NCT7201_MAX_CHANNEL - 1)),
+};
+
+static const struct regmap_access_table nct7201_writeable_vin_regs_tbl = {
+ .yes_ranges = nct7201_write_vin_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(nct7201_write_vin_reg_range),
+};
+
+static const struct regmap_config nct7201_regmap8_config = {
+ .name = "vin-data-read-byte",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .use_single_read = true,
+ .use_single_write = true,
+ .max_register = 0xff,
+ .rd_table = &nct7201_readable_regs_tbl,
+ .wr_table = &nct7201_writeable_regs_tbl,
+};
+
+static const struct regmap_config nct7201_regmap16_config = {
+ .name = "vin-data-read-word",
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = 0xff,
+ .rd_table = &nct7201_readable_vin_regs_tbl,
+ .wr_table = &nct7201_writeable_vin_regs_tbl,
+};
+
+struct nct7201_chip_info {
+ struct regmap *regmap;
+ struct regmap *regmap16;
+ int num_vin_channels;
+ __le16 vin_mask;
+};
+
+struct nct7201_adc_model_data {
+ const char *model_name;
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ int num_vin_channels;
+};
+
+static const struct iio_event_spec nct7201_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define NCT7201_VOLTAGE_CHANNEL(num) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = num + 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = num, \
+ .event_spec = nct7201_events, \
+ .num_event_specs = ARRAY_SIZE(nct7201_events), \
+ }
+
+static const struct iio_chan_spec nct7201_channels[] = {
+ NCT7201_VOLTAGE_CHANNEL(0),
+ NCT7201_VOLTAGE_CHANNEL(1),
+ NCT7201_VOLTAGE_CHANNEL(2),
+ NCT7201_VOLTAGE_CHANNEL(3),
+ NCT7201_VOLTAGE_CHANNEL(4),
+ NCT7201_VOLTAGE_CHANNEL(5),
+ NCT7201_VOLTAGE_CHANNEL(6),
+ NCT7201_VOLTAGE_CHANNEL(7),
+};
+
+static const struct iio_chan_spec nct7202_channels[] = {
+ NCT7201_VOLTAGE_CHANNEL(0),
+ NCT7201_VOLTAGE_CHANNEL(1),
+ NCT7201_VOLTAGE_CHANNEL(2),
+ NCT7201_VOLTAGE_CHANNEL(3),
+ NCT7201_VOLTAGE_CHANNEL(4),
+ NCT7201_VOLTAGE_CHANNEL(5),
+ NCT7201_VOLTAGE_CHANNEL(6),
+ NCT7201_VOLTAGE_CHANNEL(7),
+ NCT7201_VOLTAGE_CHANNEL(8),
+ NCT7201_VOLTAGE_CHANNEL(9),
+ NCT7201_VOLTAGE_CHANNEL(10),
+ NCT7201_VOLTAGE_CHANNEL(11),
+};
+
+static int nct7201_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct nct7201_chip_info *chip = iio_priv(indio_dev);
+ unsigned int value;
+ int err;
+
+ if (chan->type != IIO_VOLTAGE)
+ return -EOPNOTSUPP;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = regmap_read(chip->regmap16, NCT7201_REG_VIN(chan->address), &value);
+ if (err)
+ return err;
+ *val = FIELD_GET(NCT7201_REG_VIN_MASK, value);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /* From the datasheet, we have to multiply by 0.0004995 */
+ *val = 0;
+ *val2 = 499500;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int nct7201_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct nct7201_chip_info *chip = iio_priv(indio_dev);
+ unsigned int value;
+ int err;
+
+ if (chan->type != IIO_VOLTAGE)
+ return -EOPNOTSUPP;
+
+ if (info != IIO_EV_INFO_VALUE)
+ return -EINVAL;
+
+ if (dir == IIO_EV_DIR_FALLING)
+ err = regmap_read(chip->regmap16, NCT7201_REG_VIN_LOW_LIMIT(chan->address),
+ &value);
+ else
+ err = regmap_read(chip->regmap16, NCT7201_REG_VIN_HIGH_LIMIT(chan->address),
+ &value);
+ if (err)
+ return err;
+
+ *val = FIELD_GET(NCT7201_REG_VIN_MASK, value);
+
+ return IIO_VAL_INT;
+}
+
+static int nct7201_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct nct7201_chip_info *chip = iio_priv(indio_dev);
+ int err;
+
+ if (chan->type != IIO_VOLTAGE)
+ return -EOPNOTSUPP;
+
+ if (info != IIO_EV_INFO_VALUE)
+ return -EOPNOTSUPP;
+
+ if (dir == IIO_EV_DIR_FALLING)
+ err = regmap_write(chip->regmap16, NCT7201_REG_VIN_LOW_LIMIT(chan->address),
+ FIELD_PREP(NCT7201_REG_VIN_MASK, val));
+ else
+ err = regmap_write(chip->regmap16, NCT7201_REG_VIN_HIGH_LIMIT(chan->address),
+ FIELD_PREP(NCT7201_REG_VIN_MASK, val));
+
+ return err;
+}
+
+static int nct7201_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct nct7201_chip_info *chip = iio_priv(indio_dev);
+
+ if (chan->type != IIO_VOLTAGE)
+ return -EOPNOTSUPP;
+
+ return !!(le16_to_cpu(chip->vin_mask) & BIT(chan->address));
+}
+
+static int nct7201_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
+{
+ struct nct7201_chip_info *chip = iio_priv(indio_dev);
+ __le16 mask = cpu_to_le16(BIT(chan->address));
+ int err;
+
+ if (chan->type != IIO_VOLTAGE)
+ return -EOPNOTSUPP;
+
+ if (state)
+ chip->vin_mask |= mask;
+ else
+ chip->vin_mask &= ~mask;
+
+ if (chip->num_vin_channels <= 8)
+ err = regmap_write(chip->regmap, NCT7201_REG_CHANNEL_ENABLE,
+ le16_to_cpu(chip->vin_mask));
+ else
+ err = regmap_bulk_write(chip->regmap, NCT7201_REG_CHANNEL_ENABLE,
+ &chip->vin_mask, sizeof(chip->vin_mask));
+
+ return err;
+}
+
+static const struct iio_info nct7201_info = {
+ .read_raw = nct7201_read_raw,
+ .read_event_config = nct7201_read_event_config,
+ .write_event_config = nct7201_write_event_config,
+ .read_event_value = nct7201_read_event_value,
+ .write_event_value = nct7201_write_event_value,
+};
+
+static const struct iio_info nct7201_info_no_irq = {
+ .read_raw = nct7201_read_raw,
+};
+
+static const struct nct7201_adc_model_data nct7201_model_data = {
+ .model_name = "nct7201",
+ .channels = nct7201_channels,
+ .num_channels = ARRAY_SIZE(nct7201_channels),
+ .num_vin_channels = 8,
+};
+
+static const struct nct7201_adc_model_data nct7202_model_data = {
+ .model_name = "nct7202",
+ .channels = nct7202_channels,
+ .num_channels = ARRAY_SIZE(nct7202_channels),
+ .num_vin_channels = 12,
+};
+
+static int nct7201_init_chip(struct nct7201_chip_info *chip)
+{
+ struct device *dev = regmap_get_device(chip->regmap);
+ __le16 data = cpu_to_le16(GENMASK(chip->num_vin_channels - 1, 0));
+ unsigned int value;
+ int err;
+
+ err = regmap_write(chip->regmap, NCT7201_REG_CONFIGURATION,
+ NCT7201_BIT_CONFIGURATION_RESET);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to reset chip\n");
+
+ /*
+ * After about 25 msecs, the device should be ready and then the power-up
+ * bit will be set to 1.
+ */
+ fsleep(25 * USEC_PER_MSEC);
+
+ err = regmap_read(chip->regmap, NCT7201_REG_BUSY_STATUS, &value);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to read busy status\n");
+ if (!(value & NCT7201_BIT_PWR_UP))
+ return dev_err_probe(dev, -EIO, "Failed to power up after reset\n");
+
+ /* Enable Channels */
+ if (chip->num_vin_channels <= 8)
+ err = regmap_write(chip->regmap, NCT7201_REG_CHANNEL_ENABLE,
+ le16_to_cpu(data));
+ else
+ err = regmap_bulk_write(chip->regmap, NCT7201_REG_CHANNEL_ENABLE,
+ &data, sizeof(data));
+ if (err)
+ return dev_err_probe(dev, err, "Failed to enable channels\n");
+
+ err = regmap_bulk_read(chip->regmap, NCT7201_REG_CHANNEL_ENABLE,
+ &chip->vin_mask, sizeof(chip->vin_mask));
+ if (err)
+ return dev_err_probe(dev, err,
+ "Failed to read channel enable register\n");
+
+ /* Start monitoring if needed */
+ err = regmap_set_bits(chip->regmap, NCT7201_REG_CONFIGURATION,
+ NCT7201_BIT_CONFIGURATION_START);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to start monitoring\n");
+
+ return 0;
+}
+
+static irqreturn_t nct7201_irq_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct nct7201_chip_info *chip = iio_priv(indio_dev);
+ __le16 data;
+ int err;
+
+ err = regmap_bulk_read(chip->regmap, NCT7201_REG_INTERRUPT_STATUS,
+ &data, sizeof(data));
+ if (err)
+ return IRQ_NONE;
+
+ if (data)
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns(indio_dev));
+
+ return IRQ_HANDLED;
+}
+
+static int nct7201_probe(struct i2c_client *client)
+{
+ const struct nct7201_adc_model_data *model_data;
+ struct device *dev = &client->dev;
+ struct nct7201_chip_info *chip;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ model_data = i2c_get_match_data(client);
+ if (!model_data)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+ chip = iio_priv(indio_dev);
+
+ chip->regmap = devm_regmap_init_i2c(client, &nct7201_regmap8_config);
+ if (IS_ERR(chip->regmap))
+ return dev_err_probe(dev, PTR_ERR(chip->regmap),
+ "Failed to init regmap\n");
+
+ chip->regmap16 = devm_regmap_init_i2c(client, &nct7201_regmap16_config);
+ if (IS_ERR(chip->regmap16))
+ return dev_err_probe(dev, PTR_ERR(chip->regmap16),
+ "Failed to init regmap16\n");
+
+ chip->num_vin_channels = model_data->num_vin_channels;
+
+ ret = nct7201_init_chip(chip);
+ if (ret)
+ return ret;
+
+ indio_dev->name = model_data->model_name;
+ indio_dev->channels = model_data->channels;
+ indio_dev->num_channels = model_data->num_channels;
+ if (client->irq) {
+ /* Enable alert function */
+ ret = regmap_clear_bits(chip->regmap, NCT7201_REG_CONFIGURATION,
+ NCT7201_BIT_CONFIGURATION_ALERT_MSK);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to enable alert function\n");
+
+ ret = devm_request_threaded_irq(dev, client->irq,
+ NULL, nct7201_irq_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ client->name, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to assign interrupt.\n");
+
+ indio_dev->info = &nct7201_info;
+ } else {
+ indio_dev->info = &nct7201_info_no_irq;
+ }
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct i2c_device_id nct7201_id[] = {
+ { .name = "nct7201", .driver_data = (kernel_ulong_t)&nct7201_model_data },
+ { .name = "nct7202", .driver_data = (kernel_ulong_t)&nct7202_model_data },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, nct7201_id);
+
+static const struct of_device_id nct7201_of_match[] = {
+ {
+ .compatible = "nuvoton,nct7201",
+ .data = &nct7201_model_data,
+ },
+ {
+ .compatible = "nuvoton,nct7202",
+ .data = &nct7202_model_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nct7201_of_match);
+
+static struct i2c_driver nct7201_driver = {
+ .driver = {
+ .name = "nct7201",
+ .of_match_table = nct7201_of_match,
+ },
+ .probe = nct7201_probe,
+ .id_table = nct7201_id,
+};
+module_i2c_driver(nct7201_driver);
+
+MODULE_AUTHOR("Eason Yang <j2anfernee@gmail.com>");
+MODULE_DESCRIPTION("Nuvoton NCT7201 voltage monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/npcm_adc.c b/drivers/iio/adc/npcm_adc.c
index 7c1511ee3a4b..c8283873cdee 100644
--- a/drivers/iio/adc/npcm_adc.c
+++ b/drivers/iio/adc/npcm_adc.c
@@ -196,7 +196,7 @@ static const struct iio_info npcm_adc_iio_info = {
static const struct of_device_id npcm_adc_match[] = {
{ .compatible = "nuvoton,npcm750-adc", .data = &npxm7xx_adc_info},
{ .compatible = "nuvoton,npcm845-adc", .data = &npxm8xx_adc_info},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, npcm_adc_match);
diff --git a/drivers/iio/adc/pac1921.c b/drivers/iio/adc/pac1921.c
index beb5511c4504..72aa4ca2e5a4 100644
--- a/drivers/iio/adc/pac1921.c
+++ b/drivers/iio/adc/pac1921.c
@@ -900,7 +900,7 @@ static ssize_t pac1921_read_scale_avail(struct iio_dev *indio_dev,
static const struct iio_chan_spec_ext_info pac1921_ext_info_voltage[] = {
PAC1921_EXT_INFO_SCALE_AVAIL,
- {}
+ { }
};
static const struct iio_chan_spec_ext_info pac1921_ext_info_current[] = {
@@ -911,7 +911,7 @@ static const struct iio_chan_spec_ext_info pac1921_ext_info_current[] = {
.write = pac1921_write_shunt_resistor,
.shared = IIO_SEPARATE,
},
- {}
+ { }
};
static const struct iio_event_spec pac1921_overflow_event[] = {
@@ -1044,7 +1044,8 @@ static irqreturn_t pac1921_trigger_handler(int irq, void *p)
priv->scan.chan[ch++] = val;
}
- iio_push_to_buffers_with_timestamp(idev, &priv->scan, pf->timestamp);
+ iio_push_to_buffers_with_ts(idev, &priv->scan, sizeof(priv->scan),
+ pf->timestamp);
done:
iio_trigger_notify_done(idev->trig);
diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c
index 20802b7f49ea..09fe88eb3fb0 100644
--- a/drivers/iio/adc/pac1934.c
+++ b/drivers/iio/adc/pac1934.c
@@ -1081,7 +1081,7 @@ static int pac1934_chip_identify(struct pac1934_chip_info *info)
/*
* documentation related to the ACPI device definition
- * https://ww1.microchip.com/downloads/aemDocuments/documents/OTH/ApplicationNotes/ApplicationNotes/PAC1934-Integration-Notes-for-Microsoft-Windows-10-and-Windows-11-Driver-Support-DS00002534.pdf
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/OTH/ApplicationNotes/ApplicationNotes/PAC193X-Integration-Notes-for-Microsoft-Windows-10-and-Windows-11-Driver-Support-DS00002534.pdf
*/
static int pac1934_acpi_parse_channel_config(struct i2c_client *client,
struct pac1934_chip_info *info)
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index d283ee8fb1d2..7c01e33be04c 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -1164,7 +1164,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(palmas_pm_ops, palmas_gpadc_suspend,
static const struct of_device_id of_palmas_gpadc_match_tbl[] = {
{ .compatible = "ti,palmas-gpadc", },
- { /* end */ }
+ { }
};
MODULE_DEVICE_TABLE(of, of_palmas_gpadc_match_tbl);
diff --git a/drivers/iio/adc/qcom-spmi-rradc.c b/drivers/iio/adc/qcom-spmi-rradc.c
index 63ebaf13ef19..f61ad0510f04 100644
--- a/drivers/iio/adc/qcom-spmi-rradc.c
+++ b/drivers/iio/adc/qcom-spmi-rradc.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2016-2017, 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Linaro Limited.
- * Author: Caleb Connolly <caleb.connolly@linaro.org>
+ * Author: Casey Connolly <casey.connolly@linaro.org>
*
* This driver is for the Round Robin ADC found in the pmi8998 and pm660 PMICs.
*/
@@ -1016,5 +1016,5 @@ static struct platform_driver rradc_driver = {
module_platform_driver(rradc_driver);
MODULE_DESCRIPTION("QCOM SPMI PMIC RR ADC driver");
-MODULE_AUTHOR("Caleb Connolly <caleb.connolly@linaro.org>");
+MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index 11170b5852d1..cc326f21d398 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -199,13 +199,12 @@ static int rcar_gyroadc_read_raw(struct iio_dev *indio_dev,
if (!consumer)
return -EINVAL;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = rcar_gyroadc_set_power(priv, true);
if (ret < 0) {
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
@@ -213,7 +212,7 @@ static int rcar_gyroadc_read_raw(struct iio_dev *indio_dev,
*val &= BIT(priv->sample_width) - 1;
ret = rcar_gyroadc_set_power(priv, false);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
@@ -308,7 +307,7 @@ static const struct of_device_id rcar_gyroadc_child_match[] __maybe_unused = {
.compatible = "maxim,max11100",
.data = (void *)RCAR_GYROADC_MODE_SELECT_3_MAX1162,
},
- { /* sentinel */ }
+ { }
};
static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
diff --git a/drivers/iio/adc/rn5t618-adc.c b/drivers/iio/adc/rn5t618-adc.c
index b33536157adc..d6f6b351f2af 100644
--- a/drivers/iio/adc/rn5t618-adc.c
+++ b/drivers/iio/adc/rn5t618-adc.c
@@ -188,7 +188,7 @@ static const struct iio_chan_spec rn5t618_adc_iio_channels[] = {
static const struct iio_map rn5t618_maps[] = {
IIO_MAP("VADP", "rn5t618-power", "vadp"),
IIO_MAP("VUSB", "rn5t618-power", "vusb"),
- { /* sentinel */ }
+ { }
};
static int rn5t618_adc_probe(struct platform_device *pdev)
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 5e28bd28b81a..325e3747a134 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -425,7 +425,8 @@ static irqreturn_t rockchip_saradc_trigger_handler(int irq, void *p)
j++;
}
- iio_push_to_buffers_with_timestamp(i_dev, &data, iio_get_time_ns(i_dev));
+ iio_push_to_buffers_with_ts(i_dev, &data, sizeof(data),
+ iio_get_time_ns(i_dev));
out:
mutex_unlock(&info->lock);
diff --git a/drivers/iio/adc/rohm-bd79124.c b/drivers/iio/adc/rohm-bd79124.c
new file mode 100644
index 000000000000..bb7c93ae4055
--- /dev/null
+++ b/drivers/iio/adc/rohm-bd79124.c
@@ -0,0 +1,1146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ROHM ADC driver for BD79124 ADC/GPO device
+ * https://fscdn.rohm.com/en/products/databook/datasheet/ic/data_converter/dac/bd79124muf-c-e.pdf
+ *
+ * Copyright (c) 2025, ROHM Semiconductor.
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/devm-helpers.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <asm/byteorder.h>
+
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/adc-helpers.h>
+
+#define BD79124_I2C_MULTI_READ 0x30
+#define BD79124_I2C_MULTI_WRITE 0x28
+#define BD79124_REG_MAX 0xaf
+
+#define BD79124_REG_SYSTEM_STATUS 0x00
+#define BD79124_REG_GEN_CFG 0x01
+#define BD79124_REG_OPMODE_CFG 0x04
+#define BD79124_REG_PINCFG 0x05
+#define BD79124_REG_GPO_VAL 0x0B
+#define BD79124_REG_SEQ_CFG 0x10
+#define BD79124_REG_MANUAL_CHANNELS 0x11
+#define BD79124_REG_AUTO_CHANNELS 0x12
+#define BD79124_REG_ALERT_CH_SEL 0x14
+#define BD79124_REG_EVENT_FLAG 0x18
+#define BD79124_REG_EVENT_FLAG_HI 0x1a
+#define BD79124_REG_EVENT_FLAG_LO 0x1c
+#define BD79124_REG_HYSTERESIS_CH0 0x20
+#define BD79124_REG_EVENTCOUNT_CH0 0x22
+#define BD79124_REG_RECENT_CH0_LSB 0xa0
+#define BD79124_REG_RECENT_CH7_MSB 0xaf
+
+#define BD79124_ADC_BITS 12
+
+/* Masks for the BD79124_REG_OPMODE_CFG */
+#define BD79124_MSK_CONV_MODE GENMASK(6, 5)
+#define BD79124_CONV_MODE_MANSEQ 0
+#define BD79124_CONV_MODE_AUTO 1
+#define BD79124_MSK_AUTO_INTERVAL GENMASK(1, 0)
+#define BD79124_INTERVAL_750_US 0
+
+/* Masks for the BD79124_REG_GEN_CFG */
+#define BD79124_MSK_DWC_EN BIT(4)
+#define BD79124_MSK_STATS_EN BIT(5)
+
+/* Masks for the BD79124_REG_SEQ_CFG */
+#define BD79124_MSK_SEQ_START BIT(4)
+#define BD79124_MSK_SEQ_MODE GENMASK(1, 0)
+#define BD79124_MSK_SEQ_MANUAL 0
+#define BD79124_MSK_SEQ_SEQ 1
+
+#define BD79124_MSK_HYSTERESIS GENMASK(3, 0)
+#define BD79124_LOW_LIMIT_MIN 0
+#define BD79124_HIGH_LIMIT_MAX GENMASK(11, 0)
+
+/*
+ * The high limit, low limit and last measurement result are each stored in
+ * 2 consequtive registers. 4 bits are in the high bits of the first register
+ * and 8 bits in the next register.
+ *
+ * These macros return the address of the first reg for the given channel.
+ */
+#define BD79124_GET_HIGH_LIMIT_REG(ch) (BD79124_REG_HYSTERESIS_CH0 + (ch) * 4)
+#define BD79124_GET_LOW_LIMIT_REG(ch) (BD79124_REG_EVENTCOUNT_CH0 + (ch) * 4)
+#define BD79124_GET_LIMIT_REG(ch, dir) ((dir) == IIO_EV_DIR_RISING ? \
+ BD79124_GET_HIGH_LIMIT_REG(ch) : BD79124_GET_LOW_LIMIT_REG(ch))
+#define BD79124_GET_RECENT_RES_REG(ch) (BD79124_REG_RECENT_CH0_LSB + (ch) * 2)
+
+/*
+ * The hysteresis for a channel is stored in the same register where the
+ * 4 bits of high limit reside.
+ */
+#define BD79124_GET_HYSTERESIS_REG(ch) BD79124_GET_HIGH_LIMIT_REG(ch)
+
+#define BD79124_MAX_NUM_CHANNELS 8
+
+struct bd79124_data {
+ s64 timestamp;
+ struct regmap *map;
+ struct device *dev;
+ int vmax;
+ /*
+ * Keep measurement status so read_raw() knows if the measurement needs
+ * to be started.
+ */
+ int alarm_monitored[BD79124_MAX_NUM_CHANNELS];
+ /*
+ * The BD79124 does not allow disabling/enabling limit separately for
+ * one direction only. Hence, we do the disabling by changing the limit
+ * to maximum/minimum measurable value. This means we need to cache
+ * the limit in order to maintain it over the time limit is disabled.
+ */
+ u16 alarm_r_limit[BD79124_MAX_NUM_CHANNELS];
+ u16 alarm_f_limit[BD79124_MAX_NUM_CHANNELS];
+ /* Bitmask of disabled events (for rate limiting) for each channel. */
+ int alarm_suppressed[BD79124_MAX_NUM_CHANNELS];
+ /*
+ * The BD79124 is configured to run the measurements in the background.
+ * This is done for the event monitoring as well as for the read_raw().
+ * Protect the measurement starting/stopping using a mutex.
+ */
+ struct mutex mutex;
+ struct delayed_work alm_enable_work;
+ struct gpio_chip gc;
+ u8 gpio_valid_mask;
+};
+
+static const struct regmap_range bd79124_ro_ranges[] = {
+ {
+ .range_min = BD79124_REG_EVENT_FLAG,
+ .range_max = BD79124_REG_EVENT_FLAG,
+ }, {
+ .range_min = BD79124_REG_RECENT_CH0_LSB,
+ .range_max = BD79124_REG_RECENT_CH7_MSB,
+ },
+};
+
+static const struct regmap_access_table bd79124_ro_regs = {
+ .no_ranges = &bd79124_ro_ranges[0],
+ .n_no_ranges = ARRAY_SIZE(bd79124_ro_ranges),
+};
+
+static const struct regmap_range bd79124_volatile_ranges[] = {
+ {
+ .range_min = BD79124_REG_RECENT_CH0_LSB,
+ .range_max = BD79124_REG_RECENT_CH7_MSB,
+ }, {
+ .range_min = BD79124_REG_EVENT_FLAG,
+ .range_max = BD79124_REG_EVENT_FLAG,
+ }, {
+ .range_min = BD79124_REG_EVENT_FLAG_HI,
+ .range_max = BD79124_REG_EVENT_FLAG_HI,
+ }, {
+ .range_min = BD79124_REG_EVENT_FLAG_LO,
+ .range_max = BD79124_REG_EVENT_FLAG_LO,
+ }, {
+ .range_min = BD79124_REG_SYSTEM_STATUS,
+ .range_max = BD79124_REG_SYSTEM_STATUS,
+ },
+};
+
+static const struct regmap_access_table bd79124_volatile_regs = {
+ .yes_ranges = &bd79124_volatile_ranges[0],
+ .n_yes_ranges = ARRAY_SIZE(bd79124_volatile_ranges),
+};
+
+static const struct regmap_range bd79124_precious_ranges[] = {
+ {
+ .range_min = BD79124_REG_EVENT_FLAG_HI,
+ .range_max = BD79124_REG_EVENT_FLAG_HI,
+ }, {
+ .range_min = BD79124_REG_EVENT_FLAG_LO,
+ .range_max = BD79124_REG_EVENT_FLAG_LO,
+ },
+};
+
+static const struct regmap_access_table bd79124_precious_regs = {
+ .yes_ranges = &bd79124_precious_ranges[0],
+ .n_yes_ranges = ARRAY_SIZE(bd79124_precious_ranges),
+};
+
+static const struct regmap_config bd79124_regmap = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .read_flag_mask = BD79124_I2C_MULTI_READ,
+ .write_flag_mask = BD79124_I2C_MULTI_WRITE,
+ .max_register = BD79124_REG_MAX,
+ .cache_type = REGCACHE_MAPLE,
+ .volatile_table = &bd79124_volatile_regs,
+ .wr_table = &bd79124_ro_regs,
+ .precious_table = &bd79124_precious_regs,
+};
+
+static int bd79124gpo_direction_get(struct gpio_chip *gc, unsigned int offset)
+{
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int bd79124gpo_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct bd79124_data *data = gpiochip_get_data(gc);
+
+ return regmap_assign_bits(data->map, BD79124_REG_GPO_VAL, BIT(offset),
+ value);
+}
+
+static int bd79124gpo_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ unsigned int all_gpos;
+ int ret;
+ struct bd79124_data *data = gpiochip_get_data(gc);
+
+ /*
+ * Ensure all GPIOs in 'mask' are set to be GPIOs
+ * The valid_mask was not obeyed by the gpiolib in all cases prior the
+ * https://lore.kernel.org/all/cd5e067b80e1bb590027bc3bfa817e7f794f21c3.1741180097.git.mazziesaccount@gmail.com/
+ *
+ * Keep this check here for a couple of cycles.
+ */
+ ret = regmap_read(data->map, BD79124_REG_PINCFG, &all_gpos);
+ if (ret)
+ return ret;
+
+ if (all_gpos ^ *mask) {
+ dev_dbg(data->dev, "Invalid mux config. Can't set value.\n");
+
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(data->map, BD79124_REG_GPO_VAL, *mask, *bits);
+}
+
+static int bd79124_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct bd79124_data *data = gpiochip_get_data(gc);
+
+ *valid_mask = data->gpio_valid_mask;
+
+ return 0;
+}
+
+/* Template for GPIO chip */
+static const struct gpio_chip bd79124gpo_chip = {
+ .label = "bd79124-gpo",
+ .get_direction = bd79124gpo_direction_get,
+ .set_rv = bd79124gpo_set,
+ .set_multiple_rv = bd79124gpo_set_multiple,
+ .init_valid_mask = bd79124_init_valid_mask,
+ .can_sleep = true,
+ .ngpio = 8,
+ .base = -1,
+};
+
+struct bd79124_raw {
+ u8 val_bit3_0; /* Is set in high bits of the byte */
+ u8 val_bit11_4;
+};
+#define BD79124_RAW_TO_INT(r) ((r.val_bit11_4 << 4) | (r.val_bit3_0 >> 4))
+#define BD79124_INT_TO_RAW(val) { \
+ .val_bit11_4 = (val) >> 4, \
+ .val_bit3_0 = (val) << 4, \
+}
+
+/*
+ * The high and low limits as well as the recent result values are stored in
+ * the same way in 2 consequent registers. The first register contains 4 bits
+ * of the value. These bits are stored in the high bits [7:4] of register, but
+ * they represent the low bits [3:0] of the value.
+ * The value bits [11:4] are stored in the next register.
+ *
+ * Read data from register and convert to integer.
+ */
+static int bd79124_read_reg_to_int(struct bd79124_data *data, int reg,
+ unsigned int *val)
+{
+ int ret;
+ struct bd79124_raw raw;
+
+ ret = regmap_bulk_read(data->map, reg, &raw, sizeof(raw));
+ if (ret) {
+ dev_dbg(data->dev, "bulk_read failed %d\n", ret);
+
+ return ret;
+ }
+
+ *val = BD79124_RAW_TO_INT(raw);
+
+ return 0;
+}
+
+/*
+ * The high and low limits as well as the recent result values are stored in
+ * the same way in 2 consequent registers. The first register contains 4 bits
+ * of the value. These bits are stored in the high bits [7:4] of register, but
+ * they represent the low bits [3:0] of the value.
+ * The value bits [11:4] are stored in the next register.
+ *
+ * Convert the integer to register format and write it using rmw cycle.
+ */
+static int bd79124_write_int_to_reg(struct bd79124_data *data, int reg,
+ unsigned int val)
+{
+ struct bd79124_raw raw = BD79124_INT_TO_RAW(val);
+ unsigned int tmp;
+ int ret;
+
+ ret = regmap_read(data->map, reg, &tmp);
+ if (ret)
+ return ret;
+
+ raw.val_bit3_0 |= (tmp & 0xf);
+
+ return regmap_bulk_write(data->map, reg, &raw, sizeof(raw));
+}
+
+static const struct iio_event_spec bd79124_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS),
+ },
+};
+
+static const struct iio_chan_spec bd79124_chan_template_noirq = {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .indexed = 1,
+};
+
+static const struct iio_chan_spec bd79124_chan_template = {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .indexed = 1,
+ .event_spec = bd79124_events,
+ .num_event_specs = ARRAY_SIZE(bd79124_events),
+};
+
+static int bd79124_read_event_value(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val,
+ int *val2)
+{
+ struct bd79124_data *data = iio_priv(iio_dev);
+ int ret, reg;
+
+ if (chan->channel >= BD79124_MAX_NUM_CHANNELS)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (dir == IIO_EV_DIR_RISING)
+ *val = data->alarm_r_limit[chan->channel];
+ else if (dir == IIO_EV_DIR_FALLING)
+ *val = data->alarm_f_limit[chan->channel];
+ else
+ return -EINVAL;
+
+ return IIO_VAL_INT;
+
+ case IIO_EV_INFO_HYSTERESIS:
+ reg = BD79124_GET_HYSTERESIS_REG(chan->channel);
+ ret = regmap_read(data->map, reg, val);
+ if (ret)
+ return ret;
+
+ *val &= BD79124_MSK_HYSTERESIS;
+ /*
+ * The data-sheet says the hysteresis register value needs to be
+ * shifted left by 3.
+ */
+ *val <<= 3;
+
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bd79124_start_measurement(struct bd79124_data *data, int chan)
+{
+ unsigned int val, regval;
+ int ret;
+
+ /* See if already started */
+ ret = regmap_read(data->map, BD79124_REG_AUTO_CHANNELS, &val);
+ if (val & BIT(chan))
+ return 0;
+
+ /*
+ * The sequencer must be stopped when channels are added/removed from
+ * the list of the measured channels to ensure the new channel
+ * configuration is used.
+ */
+ ret = regmap_clear_bits(data->map, BD79124_REG_SEQ_CFG,
+ BD79124_MSK_SEQ_START);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->map, BD79124_REG_AUTO_CHANNELS, val | BIT(chan));
+ if (ret)
+ return ret;
+
+ ret = regmap_set_bits(data->map, BD79124_REG_SEQ_CFG,
+ BD79124_MSK_SEQ_START);
+ if (ret)
+ return ret;
+
+ /*
+ * Start the measurement at the background. Don't bother checking if
+ * it was started, regmap has cache.
+ */
+ regval = FIELD_PREP(BD79124_MSK_CONV_MODE, BD79124_CONV_MODE_AUTO);
+
+ return regmap_update_bits(data->map, BD79124_REG_OPMODE_CFG,
+ BD79124_MSK_CONV_MODE, regval);
+}
+
+static int bd79124_stop_measurement(struct bd79124_data *data, int chan)
+{
+ unsigned int enabled_chans;
+ int ret;
+
+ /* See if already stopped */
+ ret = regmap_read(data->map, BD79124_REG_AUTO_CHANNELS, &enabled_chans);
+ if (!(enabled_chans & BIT(chan)))
+ return 0;
+
+ ret = regmap_clear_bits(data->map, BD79124_REG_SEQ_CFG,
+ BD79124_MSK_SEQ_START);
+
+ /* Clear the channel from the measured channels */
+ enabled_chans &= ~BIT(chan);
+ ret = regmap_write(data->map, BD79124_REG_AUTO_CHANNELS,
+ enabled_chans);
+ if (ret)
+ return ret;
+
+ /*
+ * Stop background conversion for power saving if it was the last
+ * channel.
+ */
+ if (!enabled_chans) {
+ int regval = FIELD_PREP(BD79124_MSK_CONV_MODE,
+ BD79124_CONV_MODE_MANSEQ);
+
+ ret = regmap_update_bits(data->map, BD79124_REG_OPMODE_CFG,
+ BD79124_MSK_CONV_MODE, regval);
+ if (ret)
+ return ret;
+ }
+
+ return regmap_set_bits(data->map, BD79124_REG_SEQ_CFG,
+ BD79124_MSK_SEQ_START);
+}
+
+static int bd79124_read_event_config(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct bd79124_data *data = iio_priv(iio_dev);
+
+ if (chan->channel >= BD79124_MAX_NUM_CHANNELS)
+ return -EINVAL;
+
+ return !!(data->alarm_monitored[chan->channel] & BIT(dir));
+}
+
+static int bd79124_disable_event(struct bd79124_data *data,
+ enum iio_event_direction dir, int channel)
+{
+ int dir_bit = BIT(dir);
+ int reg;
+ unsigned int limit;
+
+ guard(mutex)(&data->mutex);
+
+ /*
+ * Set thresholds either to 0 or to 2^12 - 1 as appropriate to prevent
+ * alerts and thus disable event generation.
+ */
+ if (dir == IIO_EV_DIR_RISING) {
+ reg = BD79124_GET_HIGH_LIMIT_REG(channel);
+ limit = BD79124_HIGH_LIMIT_MAX;
+ } else if (dir == IIO_EV_DIR_FALLING) {
+ reg = BD79124_GET_LOW_LIMIT_REG(channel);
+ limit = BD79124_LOW_LIMIT_MIN;
+ } else {
+ return -EINVAL;
+ }
+
+ data->alarm_monitored[channel] &= ~dir_bit;
+
+ /*
+ * Stop measurement if there is no more events to monitor.
+ * We don't bother checking the retval because the limit
+ * setting should in any case effectively disable the alarm.
+ */
+ if (!data->alarm_monitored[channel]) {
+ bd79124_stop_measurement(data, channel);
+ regmap_clear_bits(data->map, BD79124_REG_ALERT_CH_SEL,
+ BIT(channel));
+ }
+
+ return bd79124_write_int_to_reg(data, reg, limit);
+}
+
+static int bd79124_enable_event(struct bd79124_data *data,
+ enum iio_event_direction dir,
+ unsigned int channel)
+{
+ int dir_bit = BIT(dir);
+ int reg, ret;
+ u16 *limit;
+
+ guard(mutex)(&data->mutex);
+ ret = bd79124_start_measurement(data, channel);
+ if (ret)
+ return ret;
+
+ data->alarm_monitored[channel] |= dir_bit;
+
+ /* Add the channel to the list of monitored channels */
+ ret = regmap_set_bits(data->map, BD79124_REG_ALERT_CH_SEL, BIT(channel));
+ if (ret)
+ return ret;
+
+ if (dir == IIO_EV_DIR_RISING) {
+ limit = &data->alarm_f_limit[channel];
+ reg = BD79124_GET_HIGH_LIMIT_REG(channel);
+ } else {
+ limit = &data->alarm_f_limit[channel];
+ reg = BD79124_GET_LOW_LIMIT_REG(channel);
+ }
+ /*
+ * Don't write the new limit to the hardware if we are in the
+ * rate-limit period. The timer which re-enables the event will set
+ * the limit.
+ */
+ if (!(data->alarm_suppressed[channel] & dir_bit)) {
+ ret = bd79124_write_int_to_reg(data, reg, *limit);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Enable comparator. Trust the regmap cache, no need to check
+ * if it was already enabled.
+ *
+ * We could do this in the hw-init, but there may be users who
+ * never enable alarms and for them it makes sense to not
+ * enable the comparator at probe.
+ */
+ return regmap_set_bits(data->map, BD79124_REG_GEN_CFG,
+ BD79124_MSK_DWC_EN);
+}
+
+static int bd79124_write_event_config(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, bool state)
+{
+ struct bd79124_data *data = iio_priv(iio_dev);
+
+ if (chan->channel >= BD79124_MAX_NUM_CHANNELS)
+ return -EINVAL;
+
+ if (state)
+ return bd79124_enable_event(data, dir, chan->channel);
+
+ return bd79124_disable_event(data, dir, chan->channel);
+}
+
+static int bd79124_write_event_value(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val,
+ int val2)
+{
+ struct bd79124_data *data = iio_priv(iio_dev);
+ int reg;
+
+ if (chan->channel >= BD79124_MAX_NUM_CHANNELS)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ {
+ guard(mutex)(&data->mutex);
+
+ if (dir == IIO_EV_DIR_RISING) {
+ data->alarm_r_limit[chan->channel] = val;
+ reg = BD79124_GET_HIGH_LIMIT_REG(chan->channel);
+ } else if (dir == IIO_EV_DIR_FALLING) {
+ data->alarm_f_limit[chan->channel] = val;
+ reg = BD79124_GET_LOW_LIMIT_REG(chan->channel);
+ } else {
+ return -EINVAL;
+ }
+ /*
+ * We don't want to enable the alarm if it is not enabled or
+ * if it is suppressed. In that case skip writing to the
+ * register.
+ */
+ if (!(data->alarm_monitored[chan->channel] & BIT(dir)) ||
+ data->alarm_suppressed[chan->channel] & BIT(dir))
+ return 0;
+
+ return bd79124_write_int_to_reg(data, reg, val);
+ }
+ case IIO_EV_INFO_HYSTERESIS:
+ reg = BD79124_GET_HYSTERESIS_REG(chan->channel);
+ val >>= 3;
+
+ return regmap_update_bits(data->map, reg, BD79124_MSK_HYSTERESIS,
+ val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bd79124_single_chan_seq(struct bd79124_data *data, int chan, unsigned int *old)
+{
+ int ret;
+
+ ret = regmap_clear_bits(data->map, BD79124_REG_SEQ_CFG,
+ BD79124_MSK_SEQ_START);
+ if (ret)
+ return ret;
+
+ /*
+ * It may be we have some channels monitored for alarms so we want to
+ * cache the old config and return it when the single channel
+ * measurement has been completed.
+ */
+ ret = regmap_read(data->map, BD79124_REG_AUTO_CHANNELS, old);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->map, BD79124_REG_AUTO_CHANNELS, BIT(chan));
+ if (ret)
+ return ret;
+
+ /* Restart the sequencer */
+ return regmap_set_bits(data->map, BD79124_REG_SEQ_CFG,
+ BD79124_MSK_SEQ_START);
+}
+
+static int bd79124_single_chan_seq_end(struct bd79124_data *data, unsigned int old)
+{
+ int ret;
+
+ ret = regmap_clear_bits(data->map, BD79124_REG_SEQ_CFG,
+ BD79124_MSK_SEQ_START);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->map, BD79124_REG_AUTO_CHANNELS, old);
+ if (ret)
+ return ret;
+
+ return regmap_set_bits(data->map, BD79124_REG_SEQ_CFG,
+ BD79124_MSK_SEQ_START);
+}
+
+static int bd79124_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long m)
+{
+ struct bd79124_data *data = iio_priv(iio_dev);
+ int ret;
+
+ if (chan->channel >= BD79124_MAX_NUM_CHANNELS)
+ return -EINVAL;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ {
+ unsigned int old_chan_cfg, regval;
+ int tmp;
+
+ guard(mutex)(&data->mutex);
+
+ /*
+ * Start the automatic conversion. This is needed here if no
+ * events have been enabled.
+ */
+ regval = FIELD_PREP(BD79124_MSK_CONV_MODE,
+ BD79124_CONV_MODE_AUTO);
+ ret = regmap_update_bits(data->map, BD79124_REG_OPMODE_CFG,
+ BD79124_MSK_CONV_MODE, regval);
+ if (ret)
+ return ret;
+
+ ret = bd79124_single_chan_seq(data, chan->channel, &old_chan_cfg);
+ if (ret)
+ return ret;
+
+ /* The maximum conversion time is 6 uS. */
+ udelay(6);
+
+ ret = bd79124_read_reg_to_int(data,
+ BD79124_GET_RECENT_RES_REG(chan->channel), val);
+ /*
+ * Return the old chan config even if data reading failed in
+ * order to re-enable the event monitoring.
+ */
+ tmp = bd79124_single_chan_seq_end(data, old_chan_cfg);
+ if (tmp)
+ dev_err(data->dev,
+ "Failed to return config. Alarms may be disabled\n");
+
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ *val = data->vmax / 1000;
+ *val2 = BD79124_ADC_BITS;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info bd79124_info = {
+ .read_raw = bd79124_read_raw,
+ .read_event_config = &bd79124_read_event_config,
+ .write_event_config = &bd79124_write_event_config,
+ .read_event_value = &bd79124_read_event_value,
+ .write_event_value = &bd79124_write_event_value,
+};
+
+static void bd79124_re_enable_lo(struct bd79124_data *data, unsigned int channel)
+{
+ int ret, evbit = BIT(IIO_EV_DIR_FALLING);
+
+ /*
+ * We should not re-enable the event if user has disabled it while
+ * rate-limiting was enabled.
+ */
+ if (!(data->alarm_suppressed[channel] & evbit))
+ return;
+
+ data->alarm_suppressed[channel] &= ~evbit;
+
+ if (!(data->alarm_monitored[channel] & evbit))
+ return;
+
+ ret = bd79124_write_int_to_reg(data, BD79124_GET_LOW_LIMIT_REG(channel),
+ data->alarm_f_limit[channel]);
+ if (ret)
+ dev_warn(data->dev, "Low limit enabling failed for channel%d\n",
+ channel);
+}
+
+static void bd79124_re_enable_hi(struct bd79124_data *data, unsigned int channel)
+{
+ int ret, evbit = BIT(IIO_EV_DIR_RISING);
+
+ /*
+ * We should not re-enable the event if user has disabled it while
+ * rate-limiting was enabled.
+ */
+ if (!(data->alarm_suppressed[channel] & evbit))
+ return;
+
+ data->alarm_suppressed[channel] &= ~evbit;
+
+ if (!(data->alarm_monitored[channel] & evbit))
+ return;
+
+ ret = bd79124_write_int_to_reg(data, BD79124_GET_HIGH_LIMIT_REG(channel),
+ data->alarm_r_limit[channel]);
+ if (ret)
+ dev_warn(data->dev, "High limit enabling failed for channel%d\n",
+ channel);
+}
+
+static void bd79124_alm_enable_worker(struct work_struct *work)
+{
+ int i;
+ struct bd79124_data *data = container_of(work, struct bd79124_data,
+ alm_enable_work.work);
+
+ /* Take the mutex so there is no race with user disabling the alarm */
+ guard(mutex)(&data->mutex);
+ for (i = 0; i < BD79124_MAX_NUM_CHANNELS; i++) {
+ bd79124_re_enable_hi(data, i);
+ bd79124_re_enable_lo(data, i);
+ }
+}
+
+static int __bd79124_event_ratelimit(struct bd79124_data *data, int reg,
+ unsigned int limit)
+{
+ int ret;
+
+ if (limit > BD79124_HIGH_LIMIT_MAX)
+ return -EINVAL;
+
+ ret = bd79124_write_int_to_reg(data, reg, limit);
+ if (ret)
+ return ret;
+
+ /*
+ * We use 1 sec 'grace period'. At the moment I see no reason to make
+ * this user configurable. We need an ABI for this if configuration is
+ * needed.
+ */
+ schedule_delayed_work(&data->alm_enable_work, msecs_to_jiffies(1000));
+
+ return 0;
+}
+
+static int bd79124_event_ratelimit_hi(struct bd79124_data *data,
+ unsigned int channel)
+{
+ guard(mutex)(&data->mutex);
+ data->alarm_suppressed[channel] |= BIT(IIO_EV_DIR_RISING);
+
+ return __bd79124_event_ratelimit(data,
+ BD79124_GET_HIGH_LIMIT_REG(channel),
+ BD79124_HIGH_LIMIT_MAX);
+}
+
+static int bd79124_event_ratelimit_lo(struct bd79124_data *data,
+ unsigned int channel)
+{
+ guard(mutex)(&data->mutex);
+ data->alarm_suppressed[channel] |= BIT(IIO_EV_DIR_FALLING);
+
+ return __bd79124_event_ratelimit(data,
+ BD79124_GET_LOW_LIMIT_REG(channel),
+ BD79124_LOW_LIMIT_MIN);
+}
+
+static irqreturn_t bd79124_event_handler(int irq, void *priv)
+{
+ unsigned int i_hi, i_lo;
+ int i, ret;
+ struct iio_dev *iio_dev = priv;
+ struct bd79124_data *data = iio_priv(iio_dev);
+
+ /*
+ * Return IRQ_NONE if bailing-out without acking. This allows the IRQ
+ * subsystem to disable the offending IRQ line if we get a hardware
+ * problem. This behaviour has saved my poor bottom a few times in the
+ * past as, instead of getting unusably unresponsive, the system has
+ * spilled out the magic words "...nobody cared".
+ */
+ ret = regmap_read(data->map, BD79124_REG_EVENT_FLAG_HI, &i_hi);
+ if (ret)
+ return IRQ_NONE;
+
+ ret = regmap_read(data->map, BD79124_REG_EVENT_FLAG_LO, &i_lo);
+ if (ret)
+ return IRQ_NONE;
+
+ if (!i_lo && !i_hi)
+ return IRQ_NONE;
+
+ for (i = 0; i < BD79124_MAX_NUM_CHANNELS; i++) {
+ u64 ecode;
+
+ if (BIT(i) & i_hi) {
+ ecode = IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, i,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING);
+
+ iio_push_event(iio_dev, ecode, data->timestamp);
+ /*
+ * The BD79124 keeps the IRQ asserted for as long as
+ * the voltage exceeds the threshold. It causes the IRQ
+ * to keep firing.
+ *
+ * Disable the event for the channel and schedule the
+ * re-enabling the event later to prevent storm of
+ * events.
+ */
+ ret = bd79124_event_ratelimit_hi(data, i);
+ if (ret)
+ return IRQ_NONE;
+ }
+ if (BIT(i) & i_lo) {
+ ecode = IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, i,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING);
+
+ iio_push_event(iio_dev, ecode, data->timestamp);
+ ret = bd79124_event_ratelimit_lo(data, i);
+ if (ret)
+ return IRQ_NONE;
+ }
+ }
+
+ ret = regmap_write(data->map, BD79124_REG_EVENT_FLAG_HI, i_hi);
+ if (ret)
+ return IRQ_NONE;
+
+ ret = regmap_write(data->map, BD79124_REG_EVENT_FLAG_LO, i_lo);
+ if (ret)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bd79124_irq_handler(int irq, void *priv)
+{
+ struct iio_dev *iio_dev = priv;
+ struct bd79124_data *data = iio_priv(iio_dev);
+
+ data->timestamp = iio_get_time_ns(iio_dev);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int bd79124_chan_init(struct bd79124_data *data, int channel)
+{
+ int ret;
+
+ ret = regmap_write(data->map, BD79124_GET_HIGH_LIMIT_REG(channel),
+ BD79124_HIGH_LIMIT_MAX);
+ if (ret)
+ return ret;
+
+ return regmap_write(data->map, BD79124_GET_LOW_LIMIT_REG(channel),
+ BD79124_LOW_LIMIT_MIN);
+}
+
+static int bd79124_get_gpio_pins(const struct iio_chan_spec *cs, int num_channels)
+{
+ int i, gpio_channels;
+
+ /*
+ * Let's initialize the mux config to say that all 8 channels are
+ * GPIOs. Then we can just loop through the iio_chan_spec and clear the
+ * bits for found ADC channels.
+ */
+ gpio_channels = GENMASK(7, 0);
+ for (i = 0; i < num_channels; i++)
+ gpio_channels &= ~BIT(cs[i].channel);
+
+ return gpio_channels;
+}
+
+static int bd79124_hw_init(struct bd79124_data *data)
+{
+ unsigned int regval;
+ int ret, i;
+
+ for (i = 0; i < BD79124_MAX_NUM_CHANNELS; i++) {
+ ret = bd79124_chan_init(data, i);
+ if (ret)
+ return ret;
+ data->alarm_r_limit[i] = BD79124_HIGH_LIMIT_MAX;
+ }
+ /* Stop auto sequencer */
+ ret = regmap_clear_bits(data->map, BD79124_REG_SEQ_CFG,
+ BD79124_MSK_SEQ_START);
+ if (ret)
+ return ret;
+
+ /* Enable writing the measured values to the regsters */
+ ret = regmap_set_bits(data->map, BD79124_REG_GEN_CFG,
+ BD79124_MSK_STATS_EN);
+ if (ret)
+ return ret;
+
+ /* Set no channels to be auto-measured */
+ ret = regmap_write(data->map, BD79124_REG_AUTO_CHANNELS, 0x0);
+ if (ret)
+ return ret;
+
+ /* Set no channels to be manually measured */
+ ret = regmap_write(data->map, BD79124_REG_MANUAL_CHANNELS, 0x0);
+ if (ret)
+ return ret;
+
+ regval = FIELD_PREP(BD79124_MSK_AUTO_INTERVAL, BD79124_INTERVAL_750_US);
+ ret = regmap_update_bits(data->map, BD79124_REG_OPMODE_CFG,
+ BD79124_MSK_AUTO_INTERVAL, regval);
+ if (ret)
+ return ret;
+
+ /* Sequencer mode to auto */
+ ret = regmap_set_bits(data->map, BD79124_REG_SEQ_CFG,
+ BD79124_MSK_SEQ_SEQ);
+ if (ret)
+ return ret;
+
+ /* Don't start the measurement */
+ regval = FIELD_PREP(BD79124_MSK_CONV_MODE, BD79124_CONV_MODE_MANSEQ);
+ return regmap_update_bits(data->map, BD79124_REG_OPMODE_CFG,
+ BD79124_MSK_CONV_MODE, regval);
+}
+
+static int bd79124_probe(struct i2c_client *i2c)
+{
+ struct bd79124_data *data;
+ struct iio_dev *iio_dev;
+ const struct iio_chan_spec *template;
+ struct iio_chan_spec *cs;
+ struct device *dev = &i2c->dev;
+ unsigned int gpio_pins;
+ int ret;
+
+ iio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(iio_dev);
+ data->dev = dev;
+ data->map = devm_regmap_init_i2c(i2c, &bd79124_regmap);
+ if (IS_ERR(data->map))
+ return dev_err_probe(dev, PTR_ERR(data->map),
+ "Failed to initialize Regmap\n");
+
+ ret = devm_regulator_get_enable_read_voltage(dev, "vdd");
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get the Vdd\n");
+
+ data->vmax = ret;
+
+ ret = devm_regulator_get_enable(dev, "iovdd");
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to enable I/O voltage\n");
+
+ ret = devm_delayed_work_autocancel(dev, &data->alm_enable_work,
+ bd79124_alm_enable_worker);
+ if (ret)
+ return ret;
+
+ if (i2c->irq) {
+ template = &bd79124_chan_template;
+ } else {
+ template = &bd79124_chan_template_noirq;
+ dev_dbg(dev, "No IRQ found, events disabled\n");
+ }
+
+ ret = devm_mutex_init(dev, &data->mutex);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_adc_device_alloc_chaninfo_se(dev, template,
+ BD79124_MAX_NUM_CHANNELS - 1, &cs);
+ if (ret < 0) {
+ /* Register all pins as GPOs if there are no ADC channels */
+ if (ret == -ENOENT)
+ goto register_gpios;
+ return ret;
+ }
+ iio_dev->channels = cs;
+ iio_dev->num_channels = ret;
+ iio_dev->info = &bd79124_info;
+ iio_dev->name = "bd79124";
+ iio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = bd79124_hw_init(data);
+ if (ret)
+ return ret;
+
+ if (i2c->irq > 0) {
+ ret = devm_request_threaded_irq(dev, i2c->irq,
+ bd79124_irq_handler, &bd79124_event_handler,
+ IRQF_ONESHOT, "adc-thresh-alert", iio_dev);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "Failed to register IRQ\n");
+ }
+
+ ret = devm_iio_device_register(data->dev, iio_dev);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "Failed to register ADC\n");
+
+register_gpios:
+ gpio_pins = bd79124_get_gpio_pins(iio_dev->channels,
+ iio_dev->num_channels);
+
+ /*
+ * The mux should default to "all ADCs", but better to not trust it.
+ * Thus we do set the mux even when we have only ADCs and no GPOs.
+ */
+ ret = regmap_write(data->map, BD79124_REG_PINCFG, gpio_pins);
+ if (ret)
+ return ret;
+
+ /* No GPOs if all channels are reserved for ADC, so we're done. */
+ if (!gpio_pins)
+ return 0;
+
+ data->gpio_valid_mask = gpio_pins;
+ data->gc = bd79124gpo_chip;
+ data->gc.parent = dev;
+
+ return devm_gpiochip_add_data(dev, &data->gc, data);
+}
+
+static const struct of_device_id bd79124_of_match[] = {
+ { .compatible = "rohm,bd79124" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bd79124_of_match);
+
+static const struct i2c_device_id bd79124_id[] = {
+ { "bd79124" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, bd79124_id);
+
+static struct i2c_driver bd79124_driver = {
+ .driver = {
+ .name = "bd79124",
+ .of_match_table = bd79124_of_match,
+ },
+ .probe = bd79124_probe,
+ .id_table = bd79124_id,
+};
+module_i2c_driver(bd79124_driver);
+
+MODULE_AUTHOR("Matti Vaittinen <mazziesaccount@gmail.com>");
+MODULE_DESCRIPTION("Driver for ROHM BD79124 ADC");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_DRIVER");
diff --git a/drivers/iio/adc/rtq6056.c b/drivers/iio/adc/rtq6056.c
index 54239df61d86..6ff47415a222 100644
--- a/drivers/iio/adc/rtq6056.c
+++ b/drivers/iio/adc/rtq6056.c
@@ -666,7 +666,8 @@ static irqreturn_t rtq6056_buffer_trigger_handler(int irq, void *p)
data.vals[i++] = raw;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data, iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &data, sizeof(data),
+ iio_get_time_ns(indio_dev));
out:
pm_runtime_mark_last_busy(dev);
diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c
index 883c167c0670..9674d48074c9 100644
--- a/drivers/iio/adc/rzg2l_adc.c
+++ b/drivers/iio/adc/rzg2l_adc.c
@@ -11,6 +11,7 @@
#include <linux/cleanup.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/iio/adc-helpers.h>
#include <linux/iio/iio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -324,48 +325,39 @@ static irqreturn_t rzg2l_adc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static const struct iio_chan_spec rzg2l_adc_chan_template = {
+ .indexed = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+};
+
static int rzg2l_adc_parse_properties(struct platform_device *pdev, struct rzg2l_adc *adc)
{
const struct rzg2l_adc_hw_params *hw_params = adc->hw_params;
struct iio_chan_spec *chan_array;
struct rzg2l_adc_data *data;
- unsigned int channel;
int num_channels;
- int ret;
u8 i;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- num_channels = device_get_child_node_count(&pdev->dev);
- if (!num_channels)
- return dev_err_probe(&pdev->dev, -ENODEV, "no channel children\n");
+ num_channels = devm_iio_adc_device_alloc_chaninfo_se(&pdev->dev,
+ &rzg2l_adc_chan_template,
+ hw_params->num_channels - 1,
+ &chan_array);
+ if (num_channels < 0)
+ return num_channels;
if (num_channels > hw_params->num_channels)
return dev_err_probe(&pdev->dev, -EINVAL,
"num of channel children out of range\n");
- chan_array = devm_kcalloc(&pdev->dev, num_channels, sizeof(*chan_array),
- GFP_KERNEL);
- if (!chan_array)
- return -ENOMEM;
+ for (i = 0; i < num_channels; i++) {
+ int channel = chan_array[i].channel;
- i = 0;
- device_for_each_child_node_scoped(&pdev->dev, fwnode) {
- ret = fwnode_property_read_u32(fwnode, "reg", &channel);
- if (ret)
- return ret;
-
- if (channel >= hw_params->num_channels)
- return -EINVAL;
-
- chan_array[i].type = rzg2l_adc_channels[channel].type;
- chan_array[i].indexed = 1;
- chan_array[i].channel = channel;
- chan_array[i].info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
chan_array[i].datasheet_name = rzg2l_adc_channels[channel].name;
- i++;
+ chan_array[i].type = rzg2l_adc_channels[channel].type;
}
data->num_channels = num_channels;
@@ -515,7 +507,7 @@ static const struct rzg2l_adc_hw_params rzg3s_hw_params = {
static const struct of_device_id rzg2l_adc_match[] = {
{ .compatible = "renesas,r9a08g045-adc", .data = &rzg3s_hw_params },
{ .compatible = "renesas,rzg2l-adc", .data = &rzg2l_hw_params },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, rzg2l_adc_match);
@@ -626,3 +618,4 @@ module_platform_driver(rzg2l_adc_driver);
MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
MODULE_DESCRIPTION("Renesas RZ/G2L ADC driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("IIO_DRIVER");
diff --git a/drivers/iio/adc/spear_adc.c b/drivers/iio/adc/spear_adc.c
index b6dd096391c1..e3a865c79686 100644
--- a/drivers/iio/adc/spear_adc.c
+++ b/drivers/iio/adc/spear_adc.c
@@ -345,7 +345,7 @@ static int spear_adc_probe(struct platform_device *pdev)
static const struct of_device_id spear_adc_dt_ids[] = {
{ .compatible = "st,spear600-adc", },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, spear_adc_dt_ids);
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 0914148d1a22..bd3458965bff 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -421,9 +421,10 @@ static int stm32_adc_irq_probe(struct platform_device *pdev,
return priv->irq[i];
}
- priv->domain = irq_domain_add_simple(np, STM32_ADC_MAX_ADCS, 0,
- &stm32_adc_domain_ops,
- priv);
+ priv->domain = irq_domain_create_simple(of_fwnode_handle(np),
+ STM32_ADC_MAX_ADCS, 0,
+ &stm32_adc_domain_ops,
+ priv);
if (!priv->domain) {
dev_err(&pdev->dev, "Failed to add irq domain\n");
return -ENOMEM;
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
index 73b2c2e91c08..db50a9f3b922 100644
--- a/drivers/iio/adc/stm32-adc-core.h
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -10,6 +10,9 @@
#ifndef __STM32_ADC_H
#define __STM32_ADC_H
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
/*
* STM32 - ADC global register map
* ________________________________________________________
@@ -91,6 +94,7 @@
#define STM32H7_ADC_IER 0x04
#define STM32H7_ADC_CR 0x08
#define STM32H7_ADC_CFGR 0x0C
+#define STM32H7_ADC_CFGR2 0x10
#define STM32H7_ADC_SMPR1 0x14
#define STM32H7_ADC_SMPR2 0x18
#define STM32H7_ADC_PCSEL 0x1C
@@ -160,6 +164,13 @@
#define STM32H7_DMNGT_SHIFT 0
#define STM32H7_DMNGT_MASK GENMASK(1, 0)
+/* STM32H7_ADC_CFGR2 bit fields */
+#define STM32H7_OVSR_MASK GENMASK(25, 16) /* Correspond to OSVR field in datasheet */
+#define STM32H7_OVSR(v) FIELD_PREP(STM32H7_OVSR_MASK, v)
+#define STM32H7_OVSS_MASK GENMASK(8, 5)
+#define STM32H7_OVSS(v) FIELD_PREP(STM32H7_OVSS_MASK, v)
+#define STM32H7_ROVSE BIT(0)
+
enum stm32h7_adc_dmngt {
STM32H7_DMNGT_DR_ONLY, /* Regular data in DR only */
STM32H7_DMNGT_DMA_ONESHOT, /* DMA one shot mode */
@@ -226,6 +237,12 @@ enum stm32h7_adc_dmngt {
#define STM32MP13_RES_SHIFT 3
#define STM32MP13_RES_MASK GENMASK(4, 3)
+/* STM32MP13_ADC_CFGR2 bit fields */
+#define STM32MP13_OVSR_MASK GENMASK(4, 2)
+#define STM32MP13_OVSR(v) FIELD_PREP(STM32MP13_OVSR_MASK, v)
+#define STM32MP13_OVSS_MASK GENMASK(8, 5)
+#define STM32MP13_OVSS(v) FIELD_PREP(STM32MP13_OVSS_MASK, v)
+
/* STM32MP13_ADC_DIFSEL - bit fields */
#define STM32MP13_DIFSEL_MASK GENMASK(18, 0)
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 5dbf5f136768..e84babf43385 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -6,6 +6,7 @@
* Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
*/
+#include <linux/array_size.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
@@ -202,11 +203,13 @@ struct stm32_adc;
* @has_boostmode: boost mode support flag
* @has_linearcal: linear calibration support flag
* @has_presel: channel preselection support flag
+ * @has_oversampling: oversampling support flag
* @prepare: optional prepare routine (power-up, enable)
* @start_conv: routine to start conversions
* @stop_conv: routine to stop conversions
* @unprepare: optional unprepare routine (disable, power-down)
* @irq_clear: routine to clear irqs
+ * @set_ovs: routine to set oversampling configuration
* @smp_cycles: programmable sampling time (ADC clock cycles)
* @ts_int_ch: pointer to array of internal channels minimum sampling time in ns
*/
@@ -219,11 +222,13 @@ struct stm32_adc_cfg {
bool has_boostmode;
bool has_linearcal;
bool has_presel;
+ bool has_oversampling;
int (*prepare)(struct iio_dev *);
void (*start_conv)(struct iio_dev *, bool dma);
void (*stop_conv)(struct iio_dev *);
void (*unprepare)(struct iio_dev *);
void (*irq_clear)(struct iio_dev *indio_dev, u32 msk);
+ void (*set_ovs)(struct iio_dev *indio_dev, u32 ovs_idx);
const unsigned int *smp_cycles;
const unsigned int *ts_int_ch;
};
@@ -255,6 +260,7 @@ struct stm32_adc_cfg {
* @num_diff: number of differential channels
* @int_ch: internal channel indexes array
* @nsmps: number of channels with optional sample time
+ * @ovs_idx: current oversampling ratio index (in oversampling array)
*/
struct stm32_adc {
struct stm32_adc_common *common;
@@ -282,6 +288,7 @@ struct stm32_adc {
u32 num_diff;
int int_ch[STM32_ADC_INT_CH_NB];
int nsmps;
+ int ovs_idx;
};
struct stm32_adc_diff_channel {
@@ -293,12 +300,24 @@ struct stm32_adc_diff_channel {
* struct stm32_adc_info - stm32 ADC, per instance config data
* @max_channels: Number of channels
* @resolutions: available resolutions
+ * @oversampling: available oversampling ratios
* @num_res: number of available resolutions
+ * @num_ovs: number of available oversampling ratios
*/
struct stm32_adc_info {
int max_channels;
const unsigned int *resolutions;
+ const unsigned int *oversampling;
const unsigned int num_res;
+ const unsigned int num_ovs;
+};
+
+static const unsigned int stm32h7_adc_oversampling_avail[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024,
+};
+
+static const unsigned int stm32mp13_adc_oversampling_avail[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128, 256,
};
static const unsigned int stm32f4_adc_resolutions[] = {
@@ -322,14 +341,18 @@ static const unsigned int stm32h7_adc_resolutions[] = {
static const struct stm32_adc_info stm32h7_adc_info = {
.max_channels = STM32_ADC_CH_MAX,
.resolutions = stm32h7_adc_resolutions,
+ .oversampling = stm32h7_adc_oversampling_avail,
.num_res = ARRAY_SIZE(stm32h7_adc_resolutions),
+ .num_ovs = ARRAY_SIZE(stm32h7_adc_oversampling_avail),
};
/* stm32mp13 can have up to 19 channels */
static const struct stm32_adc_info stm32mp13_adc_info = {
.max_channels = 19,
.resolutions = stm32f4_adc_resolutions,
+ .oversampling = stm32mp13_adc_oversampling_avail,
.num_res = ARRAY_SIZE(stm32f4_adc_resolutions),
+ .num_ovs = ARRAY_SIZE(stm32mp13_adc_oversampling_avail),
};
/*
@@ -469,7 +492,7 @@ static struct stm32_adc_trig_info stm32h7_adc_trigs[] = {
{ LPTIM1_OUT, STM32_EXT18 },
{ LPTIM2_OUT, STM32_EXT19 },
{ LPTIM3_OUT, STM32_EXT20 },
- {},
+ { }
};
/*
@@ -889,6 +912,56 @@ static void stm32mp13_adc_start_conv(struct iio_dev *indio_dev, bool dma)
stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADSTART);
}
+static void stm32h7_adc_set_ovs(struct iio_dev *indio_dev, u32 ovs_idx)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ u32 ovsr_bits, bits, msk;
+
+ msk = STM32H7_ROVSE | STM32H7_OVSR_MASK | STM32H7_OVSS_MASK;
+ stm32_adc_clr_bits(adc, STM32H7_ADC_CFGR2, msk);
+
+ if (!ovs_idx)
+ return;
+
+ /*
+ * Only the oversampling ratios corresponding to 2^ovs_idx are exposed in sysfs.
+ * Oversampling ratios [2,3,...,1024] are mapped on OVSR register values [1,2,...,1023].
+ * OVSR = 2^ovs_idx - 1
+ * These ratio increase the resolution by ovs_idx bits. Apply a right shift to keep initial
+ * resolution given by "assigned-resolution-bits" property.
+ * OVSS = ovs_idx
+ */
+ ovsr_bits = GENMASK(ovs_idx - 1, 0);
+ bits = STM32H7_ROVSE | STM32H7_OVSS(ovs_idx) | STM32H7_OVSR(ovsr_bits);
+
+ stm32_adc_set_bits(adc, STM32H7_ADC_CFGR2, bits & msk);
+}
+
+static void stm32mp13_adc_set_ovs(struct iio_dev *indio_dev, u32 ovs_idx)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ u32 bits, msk;
+
+ msk = STM32H7_ROVSE | STM32MP13_OVSR_MASK | STM32MP13_OVSS_MASK;
+ stm32_adc_clr_bits(adc, STM32H7_ADC_CFGR2, msk);
+
+ if (!ovs_idx)
+ return;
+
+ /*
+ * The oversampling ratios [2,4,8,..,256] are mapped on OVSR register values [0,1,...,7].
+ * OVSR = ovs_idx - 1
+ * These ratio increase the resolution by ovs_idx bits. Apply a right shift to keep initial
+ * resolution given by "assigned-resolution-bits" property.
+ * OVSS = ovs_idx
+ */
+ bits = STM32H7_ROVSE | STM32MP13_OVSS(ovs_idx);
+ if (ovs_idx - 1)
+ bits |= STM32MP13_OVSR(ovs_idx - 1);
+
+ stm32_adc_set_bits(adc, STM32H7_ADC_CFGR2, bits & msk);
+}
+
static int stm32h7_adc_exit_pwr_down(struct iio_dev *indio_dev)
{
struct stm32_adc *adc = iio_priv(indio_dev);
@@ -1461,6 +1534,67 @@ static int stm32_adc_single_conv(struct iio_dev *indio_dev,
return ret;
}
+static int stm32_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ struct device *dev = indio_dev->dev.parent;
+ int nb = adc->cfg->adc_info->num_ovs;
+ unsigned int idx;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ if (val2)
+ return -EINVAL;
+
+ for (idx = 0; idx < nb; idx++)
+ if (adc->cfg->adc_info->oversampling[idx] == val)
+ break;
+ if (idx >= nb)
+ return -EINVAL;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ goto err;
+
+ adc->cfg->set_ovs(indio_dev, idx);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ adc->ovs_idx = idx;
+
+err:
+ iio_device_release_direct(indio_dev);
+
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int stm32_adc_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length, long m)
+{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+
+ switch (m) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *type = IIO_VAL_INT;
+ *length = adc->cfg->adc_info->num_ovs;
+ *vals = adc->cfg->adc_info->oversampling;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
static int stm32_adc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -1502,6 +1636,10 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev,
*val = 0;
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = adc->cfg->adc_info->oversampling[adc->ovs_idx];
+ return IIO_VAL_INT;
+
default:
return -EINVAL;
}
@@ -1678,6 +1816,8 @@ static int stm32_adc_debugfs_reg_access(struct iio_dev *indio_dev,
static const struct iio_info stm32_adc_iio_info = {
.read_raw = stm32_adc_read_raw,
+ .write_raw = stm32_adc_write_raw,
+ .read_avail = stm32_adc_read_avail,
.validate_trigger = stm32_adc_validate_trigger,
.hwfifo_set_watermark = stm32_adc_set_watermark,
.update_scan_mode = stm32_adc_update_scan_mode,
@@ -1858,8 +1998,8 @@ static irqreturn_t stm32_adc_trigger_handler(int irq, void *p)
/* reset buffer index */
adc->bufi = 0;
- iio_push_to_buffers_with_timestamp(indio_dev, adc->buffer,
- pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, adc->buffer, sizeof(adc->buffer),
+ pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
/* re-enable eoc irq */
@@ -1876,7 +2016,7 @@ static const struct iio_chan_spec_ext_info stm32_adc_ext_info[] = {
.read = iio_enum_available_read,
.private = (uintptr_t)&stm32_adc_trig_pol,
},
- {},
+ { }
};
static void stm32_adc_debugfs_init(struct iio_dev *indio_dev)
@@ -1971,6 +2111,10 @@ static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OFFSET);
+ if (adc->cfg->has_oversampling) {
+ chan->info_mask_shared_by_all |= BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+ chan->info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+ }
chan->scan_type.sign = 'u';
chan->scan_type.realbits = adc->cfg->adc_info->resolutions[adc->res];
chan->scan_type.storagebits = 16;
@@ -2587,6 +2731,7 @@ static const struct stm32_adc_cfg stm32h7_adc_cfg = {
.has_boostmode = true,
.has_linearcal = true,
.has_presel = true,
+ .has_oversampling = true,
.start_conv = stm32h7_adc_start_conv,
.stop_conv = stm32h7_adc_stop_conv,
.prepare = stm32h7_adc_prepare,
@@ -2594,6 +2739,7 @@ static const struct stm32_adc_cfg stm32h7_adc_cfg = {
.smp_cycles = stm32h7_adc_smp_cycles,
.irq_clear = stm32h7_adc_irq_clear,
.ts_int_ch = stm32_adc_min_ts_h7,
+ .set_ovs = stm32h7_adc_set_ovs,
};
static const unsigned int stm32_adc_min_ts_mp1[] = { 100, 100, 100, 4300, 9800 };
@@ -2607,6 +2753,7 @@ static const struct stm32_adc_cfg stm32mp1_adc_cfg = {
.has_boostmode = true,
.has_linearcal = true,
.has_presel = true,
+ .has_oversampling = true,
.start_conv = stm32h7_adc_start_conv,
.stop_conv = stm32h7_adc_stop_conv,
.prepare = stm32h7_adc_prepare,
@@ -2614,6 +2761,7 @@ static const struct stm32_adc_cfg stm32mp1_adc_cfg = {
.smp_cycles = stm32h7_adc_smp_cycles,
.irq_clear = stm32h7_adc_irq_clear,
.ts_int_ch = stm32_adc_min_ts_mp1,
+ .set_ovs = stm32h7_adc_set_ovs,
};
static const unsigned int stm32_adc_min_ts_mp13[] = { 100, 0, 0, 4300, 9800 };
@@ -2623,6 +2771,7 @@ static const struct stm32_adc_cfg stm32mp13_adc_cfg = {
.regs = &stm32mp13_adc_regspec,
.adc_info = &stm32mp13_adc_info,
.trigs = stm32h7_adc_trigs,
+ .has_oversampling = true,
.start_conv = stm32mp13_adc_start_conv,
.stop_conv = stm32h7_adc_stop_conv,
.prepare = stm32h7_adc_prepare,
@@ -2630,6 +2779,7 @@ static const struct stm32_adc_cfg stm32mp13_adc_cfg = {
.smp_cycles = stm32mp13_adc_smp_cycles,
.irq_clear = stm32h7_adc_irq_clear,
.ts_int_ch = stm32_adc_min_ts_mp13,
+ .set_ovs = stm32mp13_adc_set_ovs,
};
static const struct of_device_id stm32_adc_of_match[] = {
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index 726ddafc9f6d..f583924eb16b 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -108,7 +108,7 @@ static const struct stm32_dfsdm_str2field stm32_dfsdm_chan_type[] = {
{ "SPI_F", 1 }, /* SPI with data on falling edge */
{ "MANCH_R", 2 }, /* Manchester codec, rising edge = logic 0 */
{ "MANCH_F", 3 }, /* Manchester codec, falling edge = logic 1 */
- {},
+ { }
};
/* DFSDM channel clock source */
@@ -121,7 +121,7 @@ static const struct stm32_dfsdm_str2field stm32_dfsdm_chan_src[] = {
{ "CLKOUT_F", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING },
/* Internal SPI clock divided by 2 (falling edge) */
{ "CLKOUT_R", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING },
- {},
+ { }
};
static int stm32_dfsdm_str2val(const char *str,
@@ -167,7 +167,7 @@ static const struct stm32_dfsdm_trig_info stm32_dfsdm_trigs[] = {
{ LPTIM1_OUT, 26 },
{ LPTIM2_OUT, 27 },
{ LPTIM3_OUT, 28 },
- {},
+ { }
};
static int stm32_dfsdm_get_jextsel(struct iio_dev *indio_dev,
@@ -1747,7 +1747,7 @@ static const struct of_device_id stm32_dfsdm_adc_match[] = {
.compatible = "st,stm32-dfsdm-dmic",
.data = &stm32h7_dfsdm_audio_data,
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, stm32_dfsdm_adc_match);
diff --git a/drivers/iio/adc/sun20i-gpadc-iio.c b/drivers/iio/adc/sun20i-gpadc-iio.c
index 136b8d9c294f..e4dfe76e6362 100644
--- a/drivers/iio/adc/sun20i-gpadc-iio.c
+++ b/drivers/iio/adc/sun20i-gpadc-iio.c
@@ -15,6 +15,7 @@
#include <linux/property.h>
#include <linux/reset.h>
+#include <linux/iio/adc-helpers.h>
#include <linux/iio/iio.h>
#define SUN20I_GPADC_DRIVER_NAME "sun20i-gpadc"
@@ -149,36 +150,23 @@ static void sun20i_gpadc_reset_assert(void *data)
reset_control_assert(rst);
}
+static const struct iio_chan_spec sun20i_gpadc_chan_template = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+};
+
static int sun20i_gpadc_alloc_channels(struct iio_dev *indio_dev,
struct device *dev)
{
- unsigned int channel;
- int num_channels, i, ret;
+ int num_channels;
struct iio_chan_spec *channels;
- num_channels = device_get_child_node_count(dev);
- if (num_channels == 0)
- return dev_err_probe(dev, -ENODEV, "no channel children\n");
-
- channels = devm_kcalloc(dev, num_channels, sizeof(*channels),
- GFP_KERNEL);
- if (!channels)
- return -ENOMEM;
-
- i = 0;
- device_for_each_child_node_scoped(dev, node) {
- ret = fwnode_property_read_u32(node, "reg", &channel);
- if (ret)
- return dev_err_probe(dev, ret, "invalid channel number\n");
-
- channels[i].type = IIO_VOLTAGE;
- channels[i].indexed = 1;
- channels[i].channel = channel;
- channels[i].info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
- channels[i].info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
-
- i++;
- }
+ num_channels = devm_iio_adc_device_alloc_chaninfo_se(dev,
+ &sun20i_gpadc_chan_template, -1, &channels);
+ if (num_channels < 0)
+ return num_channels;
indio_dev->channels = channels;
indio_dev->num_channels = num_channels;
@@ -255,7 +243,7 @@ static int sun20i_gpadc_probe(struct platform_device *pdev)
static const struct of_device_id sun20i_gpadc_of_id[] = {
{ .compatible = "allwinner,sun20i-d1-gpadc" },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, sun20i_gpadc_of_id);
@@ -271,3 +259,4 @@ module_platform_driver(sun20i_gpadc_driver);
MODULE_DESCRIPTION("ADC driver for sunxi platforms");
MODULE_AUTHOR("Maksim Kiselev <bigunclemax@gmail.com>");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_DRIVER");
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 8b27458dcd66..6b8d6bee1873 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -116,7 +116,7 @@ struct sun4i_gpadc_iio {
static const struct iio_map sun4i_gpadc_hwmon_maps[] = {
IIO_MAP("temp_adc", "iio_hwmon.0", NULL),
- { /* sentinel */ },
+ { }
};
static const struct iio_chan_spec sun4i_gpadc_channels[] = {
@@ -485,7 +485,7 @@ static const struct of_device_id sun4i_gpadc_of_id[] = {
.compatible = "allwinner,sun8i-a33-ths",
.data = &sun8i_a33_gpadc_data,
},
- { /* sentinel */ }
+ { }
};
static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
@@ -685,7 +685,7 @@ static const struct platform_device_id sun4i_gpadc_id[] = {
{ "sun4i-a10-gpadc-iio", (kernel_ulong_t)&sun4i_gpadc_data },
{ "sun5i-a13-gpadc-iio", (kernel_ulong_t)&sun5i_gpadc_data },
{ "sun6i-a31-gpadc-iio", (kernel_ulong_t)&sun6i_gpadc_data },
- { /* sentinel */ },
+ { }
};
MODULE_DEVICE_TABLE(platform, sun4i_gpadc_id);
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 1af9be071d8d..4f514db5c26e 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -140,8 +140,8 @@ static irqreturn_t adc081c_trigger_handler(int irq, void *p)
if (ret < 0)
goto out;
data->scan.channel = ret;
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
index e2dbd070c7c4..cfcdafbe284b 100644
--- a/drivers/iio/adc/ti-adc0832.c
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -225,8 +225,8 @@ static irqreturn_t adc0832_trigger_handler(int irq, void *p)
adc->data[i] = ret;
i++;
}
- iio_push_to_buffers_with_timestamp(indio_dev, adc->data,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, adc->data, sizeof(adc->data),
+ iio_get_time_ns(indio_dev));
out:
mutex_unlock(&adc->lock);
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
index 9c845ee01697..50a474f4d9f5 100644
--- a/drivers/iio/adc/ti-adc084s021.c
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -151,8 +151,8 @@ static irqreturn_t adc084s021_buffer_trigger_handler(int irq, void *pollfunc)
if (adc084s021_adc_conversion(adc, adc->scan.channels) < 0)
dev_err(&adc->spi->dev, "Failed to read data\n");
- iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &adc->scan, sizeof(adc->scan),
+ iio_get_time_ns(indio_dev));
mutex_unlock(&adc->lock);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c
index 7f065f457b36..9dc465a10ffc 100644
--- a/drivers/iio/adc/ti-adc12138.c
+++ b/drivers/iio/adc/ti-adc12138.c
@@ -376,8 +376,8 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
}
}
- iio_push_to_buffers_with_timestamp(indio_dev, adc->data,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, adc->data, sizeof(adc->data),
+ iio_get_time_ns(indio_dev));
out:
mutex_unlock(&adc->lock);
diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
index a456ea78462f..1b46a8155803 100644
--- a/drivers/iio/adc/ti-adc128s052.c
+++ b/drivers/iio/adc/ti-adc128s052.c
@@ -9,6 +9,7 @@
* https://www.ti.com/lit/ds/symlink/adc124s021.pdf
*/
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/iio/iio.h>
#include <linux/mod_devicetable.h>
@@ -20,40 +21,44 @@
struct adc128_configuration {
const struct iio_chan_spec *channels;
u8 num_channels;
+ const char *refname;
+ int num_other_regulators;
+ const char * const (*other_regulators)[];
};
struct adc128 {
struct spi_device *spi;
- struct regulator *reg;
+ /*
+ * Serialize the SPI 'write-channel + read data' accesses and protect
+ * the shared buffer.
+ */
struct mutex lock;
-
- u8 buffer[2] __aligned(IIO_DMA_MINALIGN);
+ int vref_mv;
+ union {
+ __be16 buffer16;
+ u8 buffer[2];
+ } __aligned(IIO_DMA_MINALIGN);
};
static int adc128_adc_conversion(struct adc128 *adc, u8 channel)
{
int ret;
- mutex_lock(&adc->lock);
+ guard(mutex)(&adc->lock);
adc->buffer[0] = channel << 3;
adc->buffer[1] = 0;
- ret = spi_write(adc->spi, &adc->buffer, 2);
- if (ret < 0) {
- mutex_unlock(&adc->lock);
+ ret = spi_write(adc->spi, &adc->buffer, sizeof(adc->buffer));
+ if (ret < 0)
return ret;
- }
-
- ret = spi_read(adc->spi, &adc->buffer, 2);
-
- mutex_unlock(&adc->lock);
+ ret = spi_read(adc->spi, &adc->buffer16, sizeof(adc->buffer16));
if (ret < 0)
return ret;
- return ((adc->buffer[0] << 8 | adc->buffer[1]) & 0xFFF);
+ return be16_to_cpu(adc->buffer16) & 0xFFF;
}
static int adc128_read_raw(struct iio_dev *indio_dev,
@@ -75,11 +80,7 @@ static int adc128_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
- ret = regulator_get_voltage(adc->reg);
- if (ret < 0)
- return ret;
-
- *val = ret / 1000;
+ *val = adc->vref_mv;
*val2 = 12;
return IIO_VAL_FRACTIONAL_LOG2;
@@ -121,21 +122,34 @@ static const struct iio_chan_spec adc124s021_channels[] = {
ADC128_VOLTAGE_CHANNEL(3),
};
+static const char * const bd79104_regulators[] = { "iovdd" };
+
static const struct adc128_configuration adc128_config[] = {
- { adc128s052_channels, ARRAY_SIZE(adc128s052_channels) },
- { adc122s021_channels, ARRAY_SIZE(adc122s021_channels) },
- { adc124s021_channels, ARRAY_SIZE(adc124s021_channels) },
+ {
+ .channels = adc128s052_channels,
+ .num_channels = ARRAY_SIZE(adc128s052_channels),
+ .refname = "vref",
+ }, {
+ .channels = adc122s021_channels,
+ .num_channels = ARRAY_SIZE(adc122s021_channels),
+ .refname = "vref",
+ }, {
+ .channels = adc124s021_channels,
+ .num_channels = ARRAY_SIZE(adc124s021_channels),
+ .refname = "vref",
+ }, {
+ .channels = adc128s052_channels,
+ .num_channels = ARRAY_SIZE(adc128s052_channels),
+ .refname = "vdd",
+ .other_regulators = &bd79104_regulators,
+ .num_other_regulators = 1,
+ },
};
static const struct iio_info adc128_info = {
.read_raw = adc128_read_raw,
};
-static void adc128_disable_regulator(void *reg)
-{
- regulator_disable(reg);
-}
-
static int adc128_probe(struct spi_device *spi)
{
const struct adc128_configuration *config;
@@ -159,20 +173,28 @@ static int adc128_probe(struct spi_device *spi)
indio_dev->channels = config->channels;
indio_dev->num_channels = config->num_channels;
- adc->reg = devm_regulator_get(&spi->dev, "vref");
- if (IS_ERR(adc->reg))
- return PTR_ERR(adc->reg);
-
- ret = regulator_enable(adc->reg);
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev,
+ config->refname);
if (ret < 0)
- return ret;
- ret = devm_add_action_or_reset(&spi->dev, adc128_disable_regulator,
- adc->reg);
+ return dev_err_probe(&spi->dev, ret,
+ "failed to read '%s' voltage",
+ config->refname);
+
+ adc->vref_mv = ret / 1000;
+
+ if (config->num_other_regulators) {
+ ret = devm_regulator_bulk_get_enable(&spi->dev,
+ config->num_other_regulators,
+ *config->other_regulators);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "Failed to enable regulators\n");
+ }
+
+ ret = devm_mutex_init(&spi->dev, &adc->lock);
if (ret)
return ret;
- mutex_init(&adc->lock);
-
return devm_iio_device_register(&spi->dev, indio_dev);
}
@@ -184,7 +206,8 @@ static const struct of_device_id adc128_of_match[] = {
{ .compatible = "ti,adc124s021", .data = &adc128_config[2] },
{ .compatible = "ti,adc124s051", .data = &adc128_config[2] },
{ .compatible = "ti,adc124s101", .data = &adc128_config[2] },
- { /* sentinel */ },
+ { .compatible = "rohm,bd79104", .data = &adc128_config[3] },
+ { }
};
MODULE_DEVICE_TABLE(of, adc128_of_match);
@@ -196,6 +219,7 @@ static const struct spi_device_id adc128_id[] = {
{ "adc124s021", (kernel_ulong_t)&adc128_config[2] },
{ "adc124s051", (kernel_ulong_t)&adc128_config[2] },
{ "adc124s101", (kernel_ulong_t)&adc128_config[2] },
+ { "bd79104", (kernel_ulong_t)&adc128_config[3] },
{ }
};
MODULE_DEVICE_TABLE(spi, adc128_id);
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 4355726b373a..21181cc3bd85 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -12,6 +12,7 @@
*/
#include <linux/module.h>
+#include <linux/cleanup.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/i2c.h>
@@ -466,8 +467,8 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
scan.chan = res;
mutex_unlock(&data->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, &scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
@@ -533,6 +534,31 @@ static int ads1015_read_avail(struct iio_dev *indio_dev,
}
}
+static int __ads1015_read_info_raw(struct ads1015_data *data,
+ struct iio_chan_spec const *chan, int *val)
+{
+ int ret;
+
+ if (ads1015_event_channel_enabled(data) &&
+ data->event_channel != chan->address)
+ return -EBUSY;
+
+ ret = ads1015_set_power_state(data, true);
+ if (ret < 0)
+ return ret;
+
+ ret = ads1015_get_adc_result(data, chan->address, val);
+ if (ret < 0) {
+ ads1015_set_power_state(data, false);
+ return ret;
+ }
+
+ *val = sign_extend32(*val >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
+
+ return ads1015_set_power_state(data, false);
+}
+
static int ads1015_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
@@ -540,58 +566,29 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
int ret, idx;
struct ads1015_data *data = iio_priv(indio_dev);
- mutex_lock(&data->lock);
+ guard(mutex)(&data->lock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = __ads1015_read_info_raw(data, chan, val);
+ iio_device_release_direct(indio_dev);
if (ret)
- break;
-
- if (ads1015_event_channel_enabled(data) &&
- data->event_channel != chan->address) {
- ret = -EBUSY;
- goto release_direct;
- }
-
- ret = ads1015_set_power_state(data, true);
- if (ret < 0)
- goto release_direct;
-
- ret = ads1015_get_adc_result(data, chan->address, val);
- if (ret < 0) {
- ads1015_set_power_state(data, false);
- goto release_direct;
- }
-
- *val = sign_extend32(*val >> chan->scan_type.shift,
- chan->scan_type.realbits - 1);
-
- ret = ads1015_set_power_state(data, false);
- if (ret < 0)
- goto release_direct;
+ return ret;
- ret = IIO_VAL_INT;
-release_direct:
- iio_device_release_direct_mode(indio_dev);
- break;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
idx = data->channel_data[chan->address].pga;
*val = ads1015_fullscale_range[idx];
*val2 = chan->scan_type.realbits - 1;
- ret = IIO_VAL_FRACTIONAL_LOG2;
- break;
+ return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_SAMP_FREQ:
idx = data->channel_data[chan->address].data_rate;
*val = data->chip->data_rate[idx];
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- mutex_unlock(&data->lock);
-
- return ret;
}
static int ads1015_write_raw(struct iio_dev *indio_dev,
@@ -599,23 +596,16 @@ static int ads1015_write_raw(struct iio_dev *indio_dev,
int val2, long mask)
{
struct ads1015_data *data = iio_priv(indio_dev);
- int ret;
- mutex_lock(&data->lock);
+ guard(mutex)(&data->lock);
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- ret = ads1015_set_scale(data, chan, val, val2);
- break;
+ return ads1015_set_scale(data, chan, val, val2);
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = ads1015_set_data_rate(data, chan->address, val);
- break;
+ return ads1015_set_data_rate(data, chan->address, val);
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- mutex_unlock(&data->lock);
-
- return ret;
}
static int ads1015_read_event(struct iio_dev *indio_dev,
@@ -624,20 +614,18 @@ static int ads1015_read_event(struct iio_dev *indio_dev,
int *val2)
{
struct ads1015_data *data = iio_priv(indio_dev);
- int ret;
unsigned int comp_queue;
int period;
int dr;
- mutex_lock(&data->lock);
+ guard(mutex)(&data->lock);
switch (info) {
case IIO_EV_INFO_VALUE:
*val = (dir == IIO_EV_DIR_RISING) ?
data->thresh_data[chan->address].high_thresh :
data->thresh_data[chan->address].low_thresh;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_EV_INFO_PERIOD:
dr = data->channel_data[chan->address].data_rate;
comp_queue = data->thresh_data[chan->address].comp_queue;
@@ -646,16 +634,10 @@ static int ads1015_read_event(struct iio_dev *indio_dev,
*val = period / USEC_PER_SEC;
*val2 = period % USEC_PER_SEC;
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
+ return IIO_VAL_INT_PLUS_MICRO;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-
- mutex_unlock(&data->lock);
-
- return ret;
}
static int ads1015_write_event(struct iio_dev *indio_dev,
@@ -666,24 +648,22 @@ static int ads1015_write_event(struct iio_dev *indio_dev,
struct ads1015_data *data = iio_priv(indio_dev);
const int *data_rate = data->chip->data_rate;
int realbits = chan->scan_type.realbits;
- int ret = 0;
long long period;
int i;
int dr;
- mutex_lock(&data->lock);
+ guard(mutex)(&data->lock);
switch (info) {
case IIO_EV_INFO_VALUE:
- if (val >= 1 << (realbits - 1) || val < -1 << (realbits - 1)) {
- ret = -EINVAL;
- break;
- }
+ if (val >= 1 << (realbits - 1) || val < -1 << (realbits - 1))
+ return -EINVAL;
+
if (dir == IIO_EV_DIR_RISING)
data->thresh_data[chan->address].high_thresh = val;
else
data->thresh_data[chan->address].low_thresh = val;
- break;
+ return 0;
case IIO_EV_INFO_PERIOD:
dr = data->channel_data[chan->address].data_rate;
period = val * USEC_PER_SEC + val2;
@@ -694,15 +674,10 @@ static int ads1015_write_event(struct iio_dev *indio_dev,
break;
}
data->thresh_data[chan->address].comp_queue = i;
- break;
+ return 0;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-
- mutex_unlock(&data->lock);
-
- return ret;
}
static int ads1015_read_event_config(struct iio_dev *indio_dev,
@@ -710,25 +685,19 @@ static int ads1015_read_event_config(struct iio_dev *indio_dev,
enum iio_event_direction dir)
{
struct ads1015_data *data = iio_priv(indio_dev);
- int ret = 0;
- mutex_lock(&data->lock);
- if (data->event_channel == chan->address) {
- switch (dir) {
- case IIO_EV_DIR_RISING:
- ret = 1;
- break;
- case IIO_EV_DIR_EITHER:
- ret = (data->comp_mode == ADS1015_CFG_COMP_MODE_WINDOW);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- }
- mutex_unlock(&data->lock);
+ guard(mutex)(&data->lock);
+ if (data->event_channel != chan->address)
+ return 0;
- return ret;
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return 1;
+ case IIO_EV_DIR_EITHER:
+ return (data->comp_mode == ADS1015_CFG_COMP_MODE_WINDOW);
+ default:
+ return -EINVAL;
+ }
}
static int ads1015_enable_event_config(struct ads1015_data *data,
@@ -813,23 +782,18 @@ static int ads1015_write_event_config(struct iio_dev *indio_dev,
int comp_mode = (dir == IIO_EV_DIR_EITHER) ?
ADS1015_CFG_COMP_MODE_WINDOW : ADS1015_CFG_COMP_MODE_TRAD;
- mutex_lock(&data->lock);
+ guard(mutex)(&data->lock);
/* Prevent from enabling both buffer and event at a time */
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret) {
- mutex_unlock(&data->lock);
- return ret;
- }
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
if (state)
ret = ads1015_enable_event_config(data, chan, comp_mode);
else
ret = ads1015_disable_event_config(data, chan, comp_mode);
- iio_device_release_direct_mode(indio_dev);
- mutex_unlock(&data->lock);
-
+ iio_device_release_direct(indio_dev);
return ret;
}
diff --git a/drivers/iio/adc/ti-ads1100.c b/drivers/iio/adc/ti-ads1100.c
index 1e46f07a9ca6..b0790e300b18 100644
--- a/drivers/iio/adc/ti-ads1100.c
+++ b/drivers/iio/adc/ti-ads1100.c
@@ -10,6 +10,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -219,36 +220,30 @@ static int ads1100_read_raw(struct iio_dev *indio_dev,
int ret;
struct ads1100_data *data = iio_priv(indio_dev);
- mutex_lock(&data->lock);
+ guard(mutex)(&data->lock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- break;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ads1100_get_adc_result(data, chan->address, val);
- if (ret >= 0)
- ret = IIO_VAL_INT;
- iio_device_release_direct_mode(indio_dev);
- break;
+ iio_device_release_direct(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
/* full-scale is the supply voltage in millivolts */
*val = ads1100_get_vdd_millivolts(data);
*val2 = 15 + FIELD_GET(ADS1100_PGA_MASK, data->config);
- ret = IIO_VAL_FRACTIONAL_LOG2;
- break;
+ return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_SAMP_FREQ:
*val = ads1100_data_rate[FIELD_GET(ADS1100_DR_MASK,
data->config)];
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- mutex_unlock(&data->lock);
-
- return ret;
}
static int ads1100_write_raw(struct iio_dev *indio_dev,
@@ -256,23 +251,16 @@ static int ads1100_write_raw(struct iio_dev *indio_dev,
int val2, long mask)
{
struct ads1100_data *data = iio_priv(indio_dev);
- int ret;
- mutex_lock(&data->lock);
+ guard(mutex)(&data->lock);
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- ret = ads1100_set_scale(data, val, val2);
- break;
+ return ads1100_set_scale(data, val, val2);
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = ads1100_set_data_rate(data, chan->address, val);
- break;
+ return ads1100_set_data_rate(data, chan->address, val);
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- mutex_unlock(&data->lock);
-
- return ret;
}
static const struct iio_info ads1100_info = {
diff --git a/drivers/iio/adc/ti-ads1119.c b/drivers/iio/adc/ti-ads1119.c
index f120e7e21cff..d280c949cf47 100644
--- a/drivers/iio/adc/ti-ads1119.c
+++ b/drivers/iio/adc/ti-ads1119.c
@@ -534,8 +534,8 @@ static irqreturn_t ads1119_trigger_handler(int irq, void *private)
scan.sample = ret;
- iio_push_to_buffers_with_timestamp(indio_dev, &scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
index 77c299bb4ebc..8ea1269f74db 100644
--- a/drivers/iio/adc/ti-ads124s08.c
+++ b/drivers/iio/adc/ti-ads124s08.c
@@ -297,8 +297,8 @@ static irqreturn_t ads124s_trigger_handler(int irq, void *p)
j++;
}
- iio_push_to_buffers_with_timestamp(indio_dev, priv->buffer,
- pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, priv->buffer, sizeof(priv->buffer),
+ pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c
index c6096b64664e..085f0d6fb39e 100644
--- a/drivers/iio/adc/ti-ads131e08.c
+++ b/drivers/iio/adc/ti-ads131e08.c
@@ -664,8 +664,8 @@ static irqreturn_t ads131e08_trigger_handler(int irq, void *private)
i++;
}
- iio_push_to_buffers_with_timestamp(indio_dev, st->tmp_buf.data,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &st->tmp_buf, sizeof(st->tmp_buf),
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index af28672aa803..0356ccf23fea 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -403,10 +403,11 @@ static const struct iio_info ti_ads7950_info = {
.update_scan_mode = ti_ads7950_update_scan_mode,
};
-static void ti_ads7950_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int ti_ads7950_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ti_ads7950_state *st = gpiochip_get_data(chip);
+ int ret;
mutex_lock(&st->slock);
@@ -416,9 +417,11 @@ static void ti_ads7950_set(struct gpio_chip *chip, unsigned int offset,
st->cmd_settings_bitmask &= ~BIT(offset);
st->single_tx = TI_ADS7950_MAN_CMD_SETTINGS(st);
- spi_sync(st->spi, &st->scan_single_msg);
+ ret = spi_sync(st->spi, &st->scan_single_msg);
mutex_unlock(&st->slock);
+
+ return ret;
}
static int ti_ads7950_get(struct gpio_chip *chip, unsigned int offset)
@@ -499,7 +502,11 @@ static int ti_ads7950_direction_input(struct gpio_chip *chip,
static int ti_ads7950_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
- ti_ads7950_set(chip, offset, value);
+ int ret;
+
+ ret = ti_ads7950_set(chip, offset, value);
+ if (ret)
+ return ret;
return _ti_ads7950_set_direction(chip, offset, 0);
}
@@ -641,7 +648,7 @@ static int ti_ads7950_probe(struct spi_device *spi)
st->chip.direction_input = ti_ads7950_direction_input;
st->chip.direction_output = ti_ads7950_direction_output;
st->chip.get = ti_ads7950_get;
- st->chip.set = ti_ads7950_set;
+ st->chip.set_rv = ti_ads7950_set;
ret = gpiochip_add_data(&st->chip, st);
if (ret) {
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index a31658b760a4..b0bf46cae0b6 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -389,8 +389,8 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
j++;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, buffer, sizeof(buffer),
+ iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ti-lmp92064.c b/drivers/iio/adc/ti-lmp92064.c
index 1e4a78677fe5..3f375c1f586c 100644
--- a/drivers/iio/adc/ti-lmp92064.c
+++ b/drivers/iio/adc/ti-lmp92064.c
@@ -209,8 +209,8 @@ static irqreturn_t lmp92064_trigger_handler(int irq, void *p)
if (ret)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, &data,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &data, sizeof(data),
+ iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
@@ -366,7 +366,7 @@ MODULE_DEVICE_TABLE(spi, lmp92064_id_table);
static const struct of_device_id lmp92064_of_table[] = {
{ .compatible = "ti,lmp92064" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, lmp92064_of_table);
diff --git a/drivers/iio/adc/ti-tlc4541.c b/drivers/iio/adc/ti-tlc4541.c
index 5a138be983ed..f67945c62c99 100644
--- a/drivers/iio/adc/ti-tlc4541.c
+++ b/drivers/iio/adc/ti-tlc4541.c
@@ -99,8 +99,8 @@ static irqreturn_t tlc4541_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, st->rx_buf, sizeof(st->rx_buf),
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index 49560059f4b7..c2d2aada6772 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -418,8 +418,9 @@ static int tsc2046_adc_scan(struct iio_dev *indio_dev)
for (group = 0; group < priv->groups; group++)
priv->scan_buf.data[group] = tsc2046_adc_get_val(priv, group);
- ret = iio_push_to_buffers_with_timestamp(indio_dev, &priv->scan_buf,
- iio_get_time_ns(indio_dev));
+ ret = iio_push_to_buffers_with_ts(indio_dev, &priv->scan_buf,
+ sizeof(priv->scan_buf),
+ iio_get_time_ns(indio_dev));
/* If the consumer is kfifo, we may get a EBUSY here - ignore it. */
if (ret < 0 && ret != -EBUSY) {
dev_err_ratelimited(dev, "Failed to push scan buffer %pe\n",
@@ -760,7 +761,6 @@ static int tsc2046_adc_probe(struct spi_device *spi)
if (!dcfg)
return -EINVAL;
- spi->bits_per_word = 8;
spi->mode &= ~SPI_MODE_X_MASK;
spi->mode |= SPI_MODE_0;
ret = spi_setup(spi);
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index ef7430e6877d..3ac774ebf678 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -871,7 +871,7 @@ static const struct of_device_id of_twl6030_match_tbl[] = {
.compatible = "ti,twl6032-gpadc",
.data = &twl6032_pdata,
},
- { /* end */ }
+ { }
};
MODULE_DEVICE_TABLE(of, of_twl6030_match_tbl);
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 513365d42aa5..6404b015234a 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -11,6 +11,7 @@
#include <linux/property.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -504,7 +505,7 @@ static const struct iio_enum vf610_conversion_mode = {
static const struct iio_chan_spec_ext_info vf610_ext_info[] = {
IIO_ENUM("conversion_mode", IIO_SHARED_BY_DIR, &vf610_conversion_mode),
- {},
+ { }
};
#define VF610_ADC_CHAN(_idx, _chan_type) { \
@@ -591,9 +592,9 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
info->value = vf610_adc_read_data(info);
if (iio_buffer_enabled(indio_dev)) {
info->scan.chan = info->value;
- iio_push_to_buffers_with_timestamp(indio_dev,
- &info->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &info->scan,
+ sizeof(info->scan),
+ iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
} else
complete(&info->completion);
@@ -630,36 +631,29 @@ static const struct attribute_group vf610_attribute_group = {
.attrs = vf610_attributes,
};
-static int vf610_read_sample(struct iio_dev *indio_dev,
+static int vf610_read_sample(struct vf610_adc *info,
struct iio_chan_spec const *chan, int *val)
{
- struct vf610_adc *info = iio_priv(indio_dev);
unsigned int hc_cfg;
int ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- mutex_lock(&info->lock);
+ guard(mutex)(&info->lock);
reinit_completion(&info->completion);
hc_cfg = VF610_ADC_ADCHC(chan->channel);
hc_cfg |= VF610_ADC_AIEN;
writel(hc_cfg, info->regs + VF610_REG_ADC_HC0);
ret = wait_for_completion_interruptible_timeout(&info->completion,
VF610_ADC_TIMEOUT);
- if (ret == 0) {
- ret = -ETIMEDOUT;
- goto out_unlock;
- }
+ if (ret == 0)
+ return -ETIMEDOUT;
if (ret < 0)
- goto out_unlock;
+ return ret;
switch (chan->type) {
case IIO_VOLTAGE:
*val = info->value;
- break;
+ return 0;
case IIO_TEMP:
/*
* Calculate in degree Celsius times 1000
@@ -669,17 +663,10 @@ static int vf610_read_sample(struct iio_dev *indio_dev,
*val = 25000 - ((int)info->value - VF610_VTEMP25_3V3) *
1000000 / VF610_TEMP_SLOPE_COEFF;
- break;
+ return 0;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-
-out_unlock:
- mutex_unlock(&info->lock);
- iio_device_release_direct_mode(indio_dev);
-
- return ret;
}
static int vf610_read_raw(struct iio_dev *indio_dev,
@@ -694,7 +681,10 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
case IIO_CHAN_INFO_PROCESSED:
- ret = vf610_read_sample(indio_dev, chan, val);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = vf610_read_sample(info, chan, val);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
@@ -823,7 +813,7 @@ static const struct vf610_chip_info imx6sx_chip_info = {
static const struct of_device_id vf610_adc_match[] = {
{ .compatible = "fsl,imx6sx-adc", .data = &imx6sx_chip_info},
{ .compatible = "fsl,vf610-adc", .data = &vf610_chip_info},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, vf610_adc_match);
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index e1f8740ae688..e257c1b94a5f 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1186,7 +1186,7 @@ static const struct of_device_id xadc_of_match_table[] = {
.compatible = "xlnx,system-management-wiz-1.3",
.data = &xadc_us_axi_ops
},
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, xadc_of_match_table);
diff --git a/drivers/iio/addac/ad74115.c b/drivers/iio/addac/ad74115.c
index a7e480f2472d..4d8b64048e4f 100644
--- a/drivers/iio/addac/ad74115.c
+++ b/drivers/iio/addac/ad74115.c
@@ -542,18 +542,16 @@ static int ad74115_gpio_get(struct gpio_chip *gc, unsigned int offset)
return FIELD_GET(AD74115_GPIO_CONFIG_GPI_DATA, val);
}
-static void ad74115_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+static int ad74115_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct ad74115_state *st = gpiochip_get_data(gc);
- struct device *dev = &st->spi->dev;
- int ret;
- ret = regmap_update_bits(st->regmap, AD74115_GPIO_CONFIG_X_REG(offset),
- AD74115_GPIO_CONFIG_GPO_DATA,
- FIELD_PREP(AD74115_GPIO_CONFIG_GPO_DATA, value));
- if (ret)
- dev_err(dev, "Failed to set GPIO %u output value, err: %d\n",
- offset, ret);
+ return regmap_update_bits(st->regmap,
+ AD74115_GPIO_CONFIG_X_REG(offset),
+ AD74115_GPIO_CONFIG_GPO_DATA,
+ FIELD_PREP(AD74115_GPIO_CONFIG_GPO_DATA,
+ value));
}
static int ad74115_set_comp_debounce(struct ad74115_state *st, unsigned int val)
@@ -866,15 +864,14 @@ static int ad74115_get_adc_code(struct iio_dev *indio_dev,
struct ad74115_state *st = iio_priv(indio_dev);
int ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&st->lock);
ret = _ad74115_get_adc_code(st, channel, val);
mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
@@ -1580,7 +1577,7 @@ static int ad74115_setup_gpio_chip(struct ad74115_state *st)
.direction_input = ad74115_gpio_direction_input,
.direction_output = ad74115_gpio_direction_output,
.get = ad74115_gpio_get,
- .set = ad74115_gpio_set,
+ .set_rv = ad74115_gpio_set,
};
return devm_gpiochip_add_data(dev, &st->gc, st);
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
index f14d12b03da6..a0bb1dbcb7ad 100644
--- a/drivers/iio/addac/ad74413r.c
+++ b/drivers/iio/addac/ad74413r.c
@@ -4,7 +4,6 @@
* Author: Cosmin Tanislav <cosmin.tanislav@analog.com>
*/
-#include <linux/unaligned.h>
#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/crc8.h>
@@ -24,6 +23,8 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
#include <dt-bindings/iio/addac/adi,ad74413r.h>
@@ -84,7 +85,7 @@ struct ad74413r_state {
*/
struct {
u8 rx_buf[AD74413R_FRAME_SIZE * AD74413R_CHANNEL_MAX];
- s64 timestamp;
+ aligned_s64 timestamp;
} adc_samples_buf __aligned(IIO_DMA_MINALIGN);
u8 adc_samples_tx_buf[AD74413R_FRAME_SIZE * AD74413R_CHANNEL_MAX];
@@ -276,8 +277,8 @@ static int ad74413r_set_comp_drive_strength(struct ad74413r_state *st,
}
-static void ad74413r_gpio_set(struct gpio_chip *chip,
- unsigned int offset, int val)
+static int ad74413r_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct ad74413r_state *st = gpiochip_get_data(chip);
unsigned int real_offset = st->gpo_gpio_offsets[offset];
@@ -286,16 +287,16 @@ static void ad74413r_gpio_set(struct gpio_chip *chip,
ret = ad74413r_set_gpo_config(st, real_offset,
AD74413R_GPO_CONFIG_LOGIC);
if (ret)
- return;
+ return ret;
- regmap_update_bits(st->regmap, AD74413R_REG_GPO_CONFIG_X(real_offset),
- AD74413R_GPO_CONFIG_DATA_MASK,
- val ? AD74413R_GPO_CONFIG_DATA_MASK : 0);
+ return regmap_update_bits(st->regmap,
+ AD74413R_REG_GPO_CONFIG_X(real_offset),
+ AD74413R_GPO_CONFIG_DATA_MASK,
+ val ? AD74413R_GPO_CONFIG_DATA_MASK : 0);
}
-static void ad74413r_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask,
- unsigned long *bits)
+static int ad74413r_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
{
struct ad74413r_state *st = gpiochip_get_data(chip);
unsigned long real_mask = 0;
@@ -309,15 +310,15 @@ static void ad74413r_gpio_set_multiple(struct gpio_chip *chip,
ret = ad74413r_set_gpo_config(st, real_offset,
AD74413R_GPO_CONFIG_LOGIC_PARALLEL);
if (ret)
- return;
+ return ret;
real_mask |= BIT(real_offset);
if (*bits & offset)
real_bits |= BIT(real_offset);
}
- regmap_update_bits(st->regmap, AD74413R_REG_GPO_PAR_DATA,
- real_mask, real_bits);
+ return regmap_update_bits(st->regmap, AD74413R_REG_GPO_PAR_DATA,
+ real_mask, real_bits);
}
static int ad74413r_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -1424,8 +1425,8 @@ static int ad74413r_probe(struct spi_device *spi)
st->gpo_gpiochip.ngpio = st->num_gpo_gpios;
st->gpo_gpiochip.parent = st->dev;
st->gpo_gpiochip.can_sleep = true;
- st->gpo_gpiochip.set = ad74413r_gpio_set;
- st->gpo_gpiochip.set_multiple = ad74413r_gpio_set_multiple;
+ st->gpo_gpiochip.set_rv = ad74413r_gpio_set;
+ st->gpo_gpiochip.set_multiple_rv = ad74413r_gpio_set_multiple;
st->gpo_gpiochip.set_config = ad74413r_gpio_set_gpo_config;
st->gpo_gpiochip.get_direction =
ad74413r_gpio_get_gpo_direction;
@@ -1505,14 +1506,14 @@ static const struct of_device_id ad74413r_dt_id[] = {
.compatible = "adi,ad74413r",
.data = &ad74413r_chip_info_data,
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, ad74413r_dt_id);
static const struct spi_device_id ad74413r_spi_id[] = {
{ .name = "ad74412r", .driver_data = (kernel_ulong_t)&ad74412r_chip_info_data },
{ .name = "ad74413r", .driver_data = (kernel_ulong_t)&ad74413r_chip_info_data },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad74413r_spi_id);
diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c
index b6a46036d5ea..ecaf59278c6f 100644
--- a/drivers/iio/afe/iio-rescale.c
+++ b/drivers/iio/afe/iio-rescale.c
@@ -514,7 +514,7 @@ static const struct of_device_id rescale_match[] = {
.data = &rescale_cfg[TEMP_SENSE_RTD], },
{ .compatible = "temperature-transducer",
.data = &rescale_cfg[TEMP_TRANSDUCER], },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, rescale_match);
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index 31564afb13a2..e73c9b983395 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -330,7 +330,7 @@ static const struct spi_device_id ad8366_id[] = {
{"adl5240", ID_ADL5240},
{"hmc792a", ID_HMC792},
{"hmc1119", ID_HMC1119},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad8366_id);
diff --git a/drivers/iio/amplifiers/ada4250.c b/drivers/iio/amplifiers/ada4250.c
index 566f0e1c98a5..74f8429d652b 100644
--- a/drivers/iio/amplifiers/ada4250.c
+++ b/drivers/iio/amplifiers/ada4250.c
@@ -378,13 +378,13 @@ static int ada4250_probe(struct spi_device *spi)
static const struct spi_device_id ada4250_id[] = {
{ "ada4250", 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ada4250_id);
static const struct of_device_id ada4250_of_match[] = {
{ .compatible = "adi,ada4250" },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, ada4250_of_match);
diff --git a/drivers/iio/amplifiers/hmc425a.c b/drivers/iio/amplifiers/hmc425a.c
index d9a359e1388a..4dbf894c7e3b 100644
--- a/drivers/iio/amplifiers/hmc425a.c
+++ b/drivers/iio/amplifiers/hmc425a.c
@@ -270,7 +270,7 @@ static const struct iio_chan_spec_ext_info ltc6373_ext_info[] = {
.write = ltc6373_write_powerdown,
.shared = IIO_SEPARATE,
},
- {}
+ { }
};
#define HMC425A_CHAN(_channel) \
@@ -398,7 +398,6 @@ static int hmc425a_probe(struct platform_device *pdev)
return devm_iio_device_register(&pdev->dev, indio_dev);
}
-/* Match table for of_platform binding */
static const struct of_device_id hmc425a_of_match[] = {
{ .compatible = "adi,hmc425a",
.data = &hmc425a_chip_info_tbl[ID_HMC425A]},
@@ -408,7 +407,7 @@ static const struct of_device_id hmc425a_of_match[] = {
.data = &hmc425a_chip_info_tbl[ID_ADRF5740]},
{ .compatible = "adi,ltc6373",
.data = &hmc425a_chip_info_tbl[ID_LTC6373]},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, hmc425a_of_match);
diff --git a/drivers/iio/cdc/ad7150.c b/drivers/iio/cdc/ad7150.c
index e64a41bae32c..427d32e398b3 100644
--- a/drivers/iio/cdc/ad7150.c
+++ b/drivers/iio/cdc/ad7150.c
@@ -631,7 +631,7 @@ static const struct i2c_device_id ad7150_id[] = {
{ "ad7150", AD7150 },
{ "ad7151", AD7151 },
{ "ad7156", AD7150 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ad7150_id);
@@ -640,7 +640,7 @@ static const struct of_device_id ad7150_of_match[] = {
{ "adi,ad7150" },
{ "adi,ad7151" },
{ "adi,ad7156" },
- {}
+ { }
};
static struct i2c_driver ad7150_driver = {
.driver = {
diff --git a/drivers/iio/cdc/ad7746.c b/drivers/iio/cdc/ad7746.c
index ba18dbbe0940..8a306d55c72a 100644
--- a/drivers/iio/cdc/ad7746.c
+++ b/drivers/iio/cdc/ad7746.c
@@ -792,7 +792,7 @@ static const struct i2c_device_id ad7746_id[] = {
{ "ad7745", 7745 },
{ "ad7746", 7746 },
{ "ad7747", 7747 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ad7746_id);
@@ -800,7 +800,7 @@ static const struct of_device_id ad7746_of_match[] = {
{ .compatible = "adi,ad7745" },
{ .compatible = "adi,ad7746" },
{ .compatible = "adi,ad7747" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ad7746_of_match);
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index 330fe0af946f..b22afa1f6d59 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -108,6 +108,16 @@ config IAQCORE
iAQ-Core Continuous/Pulsed VOC (Volatile Organic Compounds)
sensors
+config MHZ19B
+ tristate "Winsen MHZ19B CO2 sensor"
+ depends on SERIAL_DEV_BUS
+ help
+ Say Y here to build Serdev interface support for the Winsen
+ MHZ19B CO2 sensor.
+
+ To compile this driver as a module, choose M here: the module will
+ be called mhz19b.
+
config PMS7003
tristate "Plantower PMS7003 particulate matter sensor"
depends on SERIAL_DEV_BUS
@@ -166,6 +176,16 @@ config SCD4X
To compile this driver as a module, choose M here: the module will
be called scd4x.
+config SEN0322
+ tristate "SEN0322 oxygen sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here to build support for the DFRobot SEN0322 oxygen sensor.
+
+ To compile this driver as a module, choose M here: the module will
+ be called sen0322.
+
config SENSIRION_SGP30
tristate "Sensirion SGPxx gas sensors"
depends on I2C
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index 4866db06bdc9..2287a00a6b75 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -15,11 +15,13 @@ obj-$(CONFIG_ENS160) += ens160_core.o
obj-$(CONFIG_ENS160_I2C) += ens160_i2c.o
obj-$(CONFIG_ENS160_SPI) += ens160_spi.o
obj-$(CONFIG_IAQCORE) += ams-iaq-core.o
+obj-$(CONFIG_MHZ19B) += mhz19b.o
obj-$(CONFIG_PMS7003) += pms7003.o
obj-$(CONFIG_SCD30_CORE) += scd30_core.o
obj-$(CONFIG_SCD30_I2C) += scd30_i2c.o
obj-$(CONFIG_SCD30_SERIAL) += scd30_serial.o
obj-$(CONFIG_SCD4X) += scd4x.o
+obj-$(CONFIG_SEN0322) += sen0322.o
obj-$(CONFIG_SENSEAIR_SUNRISE_CO2) += sunrise_co2.o
obj-$(CONFIG_SENSIRION_SGP30) += sgp30.o
obj-$(CONFIG_SENSIRION_SGP40) += sgp40.o
diff --git a/drivers/iio/chemical/ags02ma.c b/drivers/iio/chemical/ags02ma.c
index 8fcd80946543..151178d4e8f4 100644
--- a/drivers/iio/chemical/ags02ma.c
+++ b/drivers/iio/chemical/ags02ma.c
@@ -140,13 +140,13 @@ static int ags02ma_probe(struct i2c_client *client)
static const struct i2c_device_id ags02ma_id_table[] = {
{ "ags02ma" },
- { /* Sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(i2c, ags02ma_id_table);
static const struct of_device_id ags02ma_of_table[] = {
{ .compatible = "aosong,ags02ma" },
- { /* Sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, ags02ma_of_table);
diff --git a/drivers/iio/chemical/atlas-ezo-sensor.c b/drivers/iio/chemical/atlas-ezo-sensor.c
index 761a853a4d17..de0b87edd188 100644
--- a/drivers/iio/chemical/atlas-ezo-sensor.c
+++ b/drivers/iio/chemical/atlas-ezo-sensor.c
@@ -189,7 +189,7 @@ static const struct i2c_device_id atlas_ezo_id[] = {
{ "atlas-co2-ezo", (kernel_ulong_t)&atlas_ezo_devices[ATLAS_CO2_EZO] },
{ "atlas-o2-ezo", (kernel_ulong_t)&atlas_ezo_devices[ATLAS_O2_EZO] },
{ "atlas-hum-ezo", (kernel_ulong_t)&atlas_ezo_devices[ATLAS_HUM_EZO] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, atlas_ezo_id);
@@ -197,7 +197,7 @@ static const struct of_device_id atlas_ezo_dt_ids[] = {
{ .compatible = "atlas,co2-ezo", .data = &atlas_ezo_devices[ATLAS_CO2_EZO], },
{ .compatible = "atlas,o2-ezo", .data = &atlas_ezo_devices[ATLAS_O2_EZO], },
{ .compatible = "atlas,hum-ezo", .data = &atlas_ezo_devices[ATLAS_HUM_EZO], },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, atlas_ezo_dt_ids);
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index baf93e5e3ca7..cb6662b92137 100644
--- a/drivers/iio/chemical/atlas-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -458,8 +458,9 @@ static irqreturn_t atlas_trigger_handler(int irq, void *private)
&data->buffer, sizeof(__be32) * channels);
if (!ret)
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, data->buffer,
+ sizeof(data->buffer),
+ iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
@@ -518,13 +519,12 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
case IIO_CONCENTRATION:
case IIO_ELECTRICALCONDUCTIVITY:
case IIO_VOLTAGE:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = atlas_read_measurement(data, chan->address, &reg);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
break;
default:
ret = -EINVAL;
@@ -594,7 +594,7 @@ static const struct i2c_device_id atlas_id[] = {
{ "atlas-orp-sm", (kernel_ulong_t)&atlas_devices[ATLAS_ORP_SM] },
{ "atlas-do-sm", (kernel_ulong_t)&atlas_devices[ATLAS_DO_SM] },
{ "atlas-rtd-sm", (kernel_ulong_t)&atlas_devices[ATLAS_RTD_SM] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, atlas_id);
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index 9d73fd2cf52c..3e850562ab00 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -1120,8 +1120,8 @@ static irqreturn_t bme680_trigger_handler(int irq, void *p)
gas_range = FIELD_GET(BME680_GAS_RANGE_MASK, gas_regs_val);
data->scan.chan[3] = bme680_compensate_gas(data, adc_gas_res, gas_range);
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c
index ac7763f98a6a..5560ea708b36 100644
--- a/drivers/iio/chemical/bme680_i2c.c
+++ b/drivers/iio/chemical/bme680_i2c.c
@@ -37,13 +37,13 @@ static int bme680_i2c_probe(struct i2c_client *client)
static const struct i2c_device_id bme680_i2c_id[] = {
{ "bme680" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, bme680_i2c_id);
static const struct of_device_id bme680_of_i2c_match[] = {
{ .compatible = "bosch,bme680", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, bme680_of_i2c_match);
diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c
index ecb24ba0ebc9..aa97645ba539 100644
--- a/drivers/iio/chemical/bme680_spi.c
+++ b/drivers/iio/chemical/bme680_spi.c
@@ -112,14 +112,6 @@ static int bme680_spi_probe(struct spi_device *spi)
const struct spi_device_id *id = spi_get_device_id(spi);
struct bme680_spi_bus_context *bus_context;
struct regmap *regmap;
- int ret;
-
- spi->bits_per_word = 8;
- ret = spi_setup(spi);
- if (ret < 0) {
- dev_err(&spi->dev, "spi_setup failed!\n");
- return ret;
- }
bus_context = devm_kzalloc(&spi->dev, sizeof(*bus_context), GFP_KERNEL);
if (!bus_context)
@@ -140,13 +132,13 @@ static int bme680_spi_probe(struct spi_device *spi)
static const struct spi_device_id bme680_spi_id[] = {
{"bme680", 0},
- {},
+ { }
};
MODULE_DEVICE_TABLE(spi, bme680_spi_id);
static const struct of_device_id bme680_of_spi_match[] = {
{ .compatible = "bosch,bme680", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, bme680_of_spi_match);
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 451fb65dbe60..998c9239c4c7 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -15,6 +15,7 @@
* 4. Read error register and put the information in logs
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -214,6 +215,40 @@ static int ccs811_get_measurement(struct ccs811_data *data)
return ret;
}
+static int ccs811_read_info_raw(struct ccs811_data *data,
+ struct iio_chan_spec const *chan,
+ int *val, int mask)
+{
+ int ret;
+
+ guard(mutex)(&data->lock);
+ ret = ccs811_get_measurement(data);
+ if (ret < 0)
+ return ret;
+
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = be16_to_cpu(data->buffer.raw_data) & CCS811_VOLTAGE_MASK;
+ return IIO_VAL_INT;
+ case IIO_CURRENT:
+ *val = be16_to_cpu(data->buffer.raw_data) >> 10;
+ return IIO_VAL_INT;
+ case IIO_CONCENTRATION:
+ switch (chan->channel2) {
+ case IIO_MOD_CO2:
+ *val = be16_to_cpu(data->buffer.co2);
+ return IIO_VAL_INT;
+ case IIO_MOD_VOC:
+ *val = be16_to_cpu(data->buffer.voc);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
static int ccs811_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -223,46 +258,12 @@ static int ccs811_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
- mutex_lock(&data->lock);
- ret = ccs811_get_measurement(data);
- if (ret < 0) {
- mutex_unlock(&data->lock);
- iio_device_release_direct_mode(indio_dev);
- return ret;
- }
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- switch (chan->type) {
- case IIO_VOLTAGE:
- *val = be16_to_cpu(data->buffer.raw_data) &
- CCS811_VOLTAGE_MASK;
- ret = IIO_VAL_INT;
- break;
- case IIO_CURRENT:
- *val = be16_to_cpu(data->buffer.raw_data) >> 10;
- ret = IIO_VAL_INT;
- break;
- case IIO_CONCENTRATION:
- switch (chan->channel2) {
- case IIO_MOD_CO2:
- *val = be16_to_cpu(data->buffer.co2);
- ret = IIO_VAL_INT;
- break;
- case IIO_MOD_VOC:
- *val = be16_to_cpu(data->buffer.voc);
- ret = IIO_VAL_INT;
- break;
- default:
- ret = -EINVAL;
- }
- break;
- default:
- ret = -EINVAL;
- }
- mutex_unlock(&data->lock);
- iio_device_release_direct_mode(indio_dev);
+ ret = ccs811_read_info_raw(data, chan, val, mask);
+
+ iio_device_release_direct(indio_dev);
return ret;
@@ -342,8 +343,8 @@ static irqreturn_t ccs811_trigger_handler(int irq, void *p)
goto err;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/chemical/ens160_core.c b/drivers/iio/chemical/ens160_core.c
index 152f81ff57e3..6cec60074827 100644
--- a/drivers/iio/chemical/ens160_core.c
+++ b/drivers/iio/chemical/ens160_core.c
@@ -267,8 +267,8 @@ static irqreturn_t ens160_trigger_handler(int irq, void *p)
if (ret)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ pf->timestamp);
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/chemical/mhz19b.c b/drivers/iio/chemical/mhz19b.c
new file mode 100644
index 000000000000..3c64154918b1
--- /dev/null
+++ b/drivers/iio/chemical/mhz19b.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mh-z19b CO₂ sensor driver
+ *
+ * Copyright (c) 2025 Gyeyoung Baek <gye976@gmail.com>
+ *
+ * Datasheet:
+ * https://www.winsen-sensor.com/d/files/infrared-gas-sensor/mh-z19b-co2-ver1_0.pdf
+ */
+
+#include <linux/array_size.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/kstrtox.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/serdev.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+
+/*
+ * Commands have following format:
+ *
+ * +------+------+-----+------+------+------+------+------+-------+
+ * | 0xFF | 0x01 | cmd | arg0 | arg1 | 0x00 | 0x00 | 0x00 | cksum |
+ * +------+------+-----+------+------+------+------+------+-------+
+ */
+#define MHZ19B_CMD_SIZE 9
+
+/* ABC logic in MHZ19B means auto calibration. */
+#define MHZ19B_ABC_LOGIC_CMD 0x79
+#define MHZ19B_READ_CO2_CMD 0x86
+#define MHZ19B_SPAN_POINT_CMD 0x88
+#define MHZ19B_ZERO_POINT_CMD 0x87
+
+#define MHZ19B_SPAN_POINT_PPM_MIN 1000
+#define MHZ19B_SPAN_POINT_PPM_MAX 5000
+
+#define MHZ19B_SERDEV_TIMEOUT msecs_to_jiffies(100)
+
+struct mhz19b_state {
+ struct serdev_device *serdev;
+
+ /* Must wait until the 'buf' is filled with 9 bytes.*/
+ struct completion buf_ready;
+
+ u8 buf_idx;
+ /*
+ * Serdev receive buffer.
+ * When data is received from the MH-Z19B,
+ * the 'mhz19b_receive_buf' callback function is called and fills this buffer.
+ */
+ u8 buf[MHZ19B_CMD_SIZE] __aligned(IIO_DMA_MINALIGN);
+};
+
+static u8 mhz19b_get_checksum(u8 *cmd_buf)
+{
+ u8 i, checksum = 0;
+
+/*
+ * +------+------+-----+------+------+------+------+------+-------+
+ * | 0xFF | 0x01 | cmd | arg0 | arg1 | 0x00 | 0x00 | 0x00 | cksum |
+ * +------+------+-----+------+------+------+------+------+-------+
+ * i:1 2 3 4 5 6 7
+ *
+ * Sum all cmd_buf elements from index 1 to 7.
+ */
+ for (i = 1; i < 8; i++)
+ checksum += cmd_buf[i];
+
+ return -checksum;
+}
+
+static int mhz19b_serdev_cmd(struct iio_dev *indio_dev, int cmd, u16 arg)
+{
+ struct mhz19b_state *st = iio_priv(indio_dev);
+ struct serdev_device *serdev = st->serdev;
+ struct device *dev = &indio_dev->dev;
+ int ret;
+
+ /*
+ * cmd_buf[3,4] : arg0,1
+ * cmd_buf[8] : checksum
+ */
+ u8 cmd_buf[MHZ19B_CMD_SIZE] = {
+ 0xFF, 0x01, cmd,
+ };
+
+ switch (cmd) {
+ case MHZ19B_ABC_LOGIC_CMD:
+ cmd_buf[3] = arg ? 0xA0 : 0;
+ break;
+ case MHZ19B_SPAN_POINT_CMD:
+ put_unaligned_be16(arg, &cmd_buf[3]);
+ break;
+ default:
+ break;
+ }
+ cmd_buf[8] = mhz19b_get_checksum(cmd_buf);
+
+ /* Write buf to uart ctrl synchronously */
+ ret = serdev_device_write(serdev, cmd_buf, MHZ19B_CMD_SIZE, 0);
+ if (ret < 0)
+ return ret;
+ if (ret != MHZ19B_CMD_SIZE)
+ return -EIO;
+
+ switch (cmd) {
+ case MHZ19B_READ_CO2_CMD:
+ ret = wait_for_completion_interruptible_timeout(&st->buf_ready,
+ MHZ19B_SERDEV_TIMEOUT);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -ETIMEDOUT;
+
+ if (st->buf[8] != mhz19b_get_checksum(st->buf)) {
+ dev_err(dev, "checksum err");
+ return -EINVAL;
+ }
+
+ return get_unaligned_be16(&st->buf[2]);
+ default:
+ /* No response commands. */
+ return 0;
+ }
+}
+
+static int mhz19b_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+
+ ret = mhz19b_serdev_cmd(indio_dev, MHZ19B_READ_CO2_CMD, 0);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return IIO_VAL_INT;
+}
+
+/*
+ * echo 0 > calibration_auto_enable : ABC logic off
+ * echo 1 > calibration_auto_enable : ABC logic on
+ */
+static ssize_t calibration_auto_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret)
+ return ret;
+
+ ret = mhz19b_serdev_cmd(indio_dev, MHZ19B_ABC_LOGIC_CMD, enable);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+static IIO_DEVICE_ATTR_WO(calibration_auto_enable, 0);
+
+/*
+ * echo 0 > calibration_forced_value : zero point calibration
+ * (make sure the sensor has been working under 400ppm for over 20 minutes.)
+ * echo [1000 1 5000] > calibration_forced_value : span point calibration
+ * (make sure the sensor has been working under a certain level CO₂ for over 20 minutes.)
+ */
+static ssize_t calibration_forced_value_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ u16 ppm;
+ int cmd, ret;
+
+ ret = kstrtou16(buf, 0, &ppm);
+ if (ret)
+ return ret;
+
+ if (ppm) {
+ if (!in_range(ppm, MHZ19B_SPAN_POINT_PPM_MIN,
+ MHZ19B_SPAN_POINT_PPM_MAX - MHZ19B_SPAN_POINT_PPM_MIN + 1)) {
+ dev_dbg(&indio_dev->dev,
+ "span point ppm should be in a range [%d-%d]\n",
+ MHZ19B_SPAN_POINT_PPM_MIN, MHZ19B_SPAN_POINT_PPM_MAX);
+ return -EINVAL;
+ }
+
+ cmd = MHZ19B_SPAN_POINT_CMD;
+ } else {
+ cmd = MHZ19B_ZERO_POINT_CMD;
+ }
+
+ ret = mhz19b_serdev_cmd(indio_dev, cmd, ppm);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+static IIO_DEVICE_ATTR_WO(calibration_forced_value, 0);
+
+static struct attribute *mhz19b_attrs[] = {
+ &iio_dev_attr_calibration_auto_enable.dev_attr.attr,
+ &iio_dev_attr_calibration_forced_value.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group mhz19b_attr_group = {
+ .attrs = mhz19b_attrs,
+};
+
+static const struct iio_info mhz19b_info = {
+ .attrs = &mhz19b_attr_group,
+ .read_raw = mhz19b_read_raw,
+};
+
+static const struct iio_chan_spec mhz19b_channels[] = {
+ {
+ .type = IIO_CONCENTRATION,
+ .channel2 = IIO_MOD_CO2,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+};
+
+static size_t mhz19b_receive_buf(struct serdev_device *serdev,
+ const u8 *data, size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(&serdev->dev);
+ struct mhz19b_state *st = iio_priv(indio_dev);
+
+ memcpy(st->buf + st->buf_idx, data, len);
+ st->buf_idx += len;
+
+ if (st->buf_idx == MHZ19B_CMD_SIZE) {
+ st->buf_idx = 0;
+ complete(&st->buf_ready);
+ }
+
+ return len;
+}
+
+static const struct serdev_device_ops mhz19b_ops = {
+ .receive_buf = mhz19b_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static int mhz19b_probe(struct serdev_device *serdev)
+{
+ int ret;
+ struct device *dev = &serdev->dev;
+ struct iio_dev *indio_dev;
+ struct mhz19b_state *st;
+
+ serdev_device_set_client_ops(serdev, &mhz19b_ops);
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret)
+ return ret;
+ serdev_device_set_baudrate(serdev, 9600);
+ serdev_device_set_flow_control(serdev, false);
+ ret = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
+ if (ret)
+ return ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+ serdev_device_set_drvdata(serdev, indio_dev);
+
+ st = iio_priv(indio_dev);
+ st->serdev = serdev;
+
+ init_completion(&st->buf_ready);
+
+ ret = devm_regulator_get_enable(dev, "vin");
+ if (ret)
+ return ret;
+
+ indio_dev->name = "mh-z19b";
+ indio_dev->channels = mhz19b_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mhz19b_channels);
+ indio_dev->info = &mhz19b_info;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id mhz19b_of_match[] = {
+ { .compatible = "winsen,mhz19b", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mhz19b_of_match);
+
+static struct serdev_device_driver mhz19b_driver = {
+ .driver = {
+ .name = "mhz19b",
+ .of_match_table = mhz19b_of_match,
+ },
+ .probe = mhz19b_probe,
+};
+module_serdev_device_driver(mhz19b_driver);
+
+MODULE_AUTHOR("Gyeyoung Baek");
+MODULE_DESCRIPTION("MH-Z19B CO2 sensor driver using serdev interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c
index e05ce1f12065..656d4a12c58f 100644
--- a/drivers/iio/chemical/pms7003.c
+++ b/drivers/iio/chemical/pms7003.c
@@ -127,8 +127,8 @@ static irqreturn_t pms7003_trigger_handler(int irq, void *p)
pms7003_get_pm(frame->data + PMS7003_PM10_OFFSET);
mutex_unlock(&state->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, &state->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &state->scan, sizeof(state->scan),
+ iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/chemical/scd30_core.c b/drivers/iio/chemical/scd30_core.c
index 3fed6b63710f..8316720b1fa3 100644
--- a/drivers/iio/chemical/scd30_core.c
+++ b/drivers/iio/chemical/scd30_core.c
@@ -601,7 +601,8 @@ static irqreturn_t scd30_trigger_handler(int irq, void *p)
if (ret)
goto out;
- iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/chemical/scd4x.c b/drivers/iio/chemical/scd4x.c
index 50e3ac44422b..2463149519b6 100644
--- a/drivers/iio/chemical/scd4x.c
+++ b/drivers/iio/chemical/scd4x.c
@@ -358,15 +358,14 @@ static int scd4x_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
}
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&state->lock);
ret = scd4x_read_channel(state, chan->address);
mutex_unlock(&state->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
@@ -676,7 +675,8 @@ static irqreturn_t scd4x_trigger_handler(int irq, void *p)
if (ret)
goto out;
- iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/chemical/sen0322.c b/drivers/iio/chemical/sen0322.c
new file mode 100644
index 000000000000..96c6fc1203ad
--- /dev/null
+++ b/drivers/iio/chemical/sen0322.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the DFRobot SEN0322 oxygen sensor.
+ *
+ * Datasheet:
+ * https://wiki.dfrobot.com/Gravity_I2C_Oxygen_Sensor_SKU_SEN0322
+ *
+ * Possible I2C slave addresses:
+ * 0x70
+ * 0x71
+ * 0x72
+ * 0x73
+ *
+ * Copyright (C) 2025 Tóth János <gomba007@gmail.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+
+#define SEN0322_REG_DATA 0x03
+#define SEN0322_REG_COEFF 0x0A
+
+struct sen0322 {
+ struct regmap *regmap;
+};
+
+static int sen0322_read_data(struct sen0322 *sen0322)
+{
+ u8 data[3] = { };
+ int ret;
+
+ ret = regmap_bulk_read(sen0322->regmap, SEN0322_REG_DATA, data,
+ sizeof(data));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The actual value in the registers is:
+ * val = data[0] + data[1] / 10 + data[2] / 100
+ * but it is multiplied by 100 here to avoid floating-point math
+ * and the scale is divided by 100 to compensate this.
+ */
+ return data[0] * 100 + data[1] * 10 + data[2];
+}
+
+static int sen0322_read_scale(struct sen0322 *sen0322, int *num, int *den)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(sen0322->regmap, SEN0322_REG_COEFF, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val) {
+ *num = val;
+ *den = 100000; /* Coeff is scaled by 1000 at calibration. */
+ } else { /* The device is not calibrated, using the factory-defaults. */
+ *num = 209; /* Oxygen content in the atmosphere is 20.9%. */
+ *den = 120000; /* Output of the sensor at 20.9% is 120 uA. */
+ }
+
+ dev_dbg(regmap_get_device(sen0322->regmap), "scale: %d/%d\n",
+ *num, *den);
+
+ return 0;
+}
+
+static int sen0322_read_raw(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct sen0322 *sen0322 = iio_priv(iio_dev);
+ int ret;
+
+ if (chan->type != IIO_CONCENTRATION)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = sen0322_read_data(sen0322);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = sen0322_read_scale(sen0322, val, val2);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_FRACTIONAL;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info sen0322_info = {
+ .read_raw = sen0322_read_raw,
+};
+
+static const struct regmap_config sen0322_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const struct iio_chan_spec sen0322_channel = {
+ .type = IIO_CONCENTRATION,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+};
+
+static int sen0322_probe(struct i2c_client *client)
+{
+ struct sen0322 *sen0322;
+ struct iio_dev *iio_dev;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ iio_dev = devm_iio_device_alloc(&client->dev, sizeof(*sen0322));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ sen0322 = iio_priv(iio_dev);
+
+ sen0322->regmap = devm_regmap_init_i2c(client, &sen0322_regmap_conf);
+ if (IS_ERR(sen0322->regmap))
+ return PTR_ERR(sen0322->regmap);
+
+ iio_dev->info = &sen0322_info;
+ iio_dev->name = "sen0322";
+ iio_dev->channels = &sen0322_channel;
+ iio_dev->num_channels = 1;
+ iio_dev->modes = INDIO_DIRECT_MODE;
+
+ return devm_iio_device_register(&client->dev, iio_dev);
+}
+
+static const struct of_device_id sen0322_of_match[] = {
+ { .compatible = "dfrobot,sen0322" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sen0322_of_match);
+
+static struct i2c_driver sen0322_driver = {
+ .driver = {
+ .name = "sen0322",
+ .of_match_table = sen0322_of_match,
+ },
+ .probe = sen0322_probe,
+};
+module_i2c_driver(sen0322_driver);
+
+MODULE_AUTHOR("Tóth János <gomba007@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SEN0322 oxygen sensor driver");
diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c
index a7888146188d..a934bf0298dd 100644
--- a/drivers/iio/chemical/sps30.c
+++ b/drivers/iio/chemical/sps30.c
@@ -117,8 +117,8 @@ static irqreturn_t sps30_trigger_handler(int irq, void *p)
if (ret)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, &scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/chemical/sunrise_co2.c b/drivers/iio/chemical/sunrise_co2.c
index cdb8696a4e81..af79efde37e8 100644
--- a/drivers/iio/chemical/sunrise_co2.c
+++ b/drivers/iio/chemical/sunrise_co2.c
@@ -373,7 +373,7 @@ static const struct iio_chan_spec_ext_info sunrise_concentration_ext_info[] = {
.read = iio_enum_available_read,
.private = (uintptr_t)&sunrise_error_statuses_enum,
},
- {}
+ { }
};
static const struct iio_chan_spec sunrise_channels[] = {
@@ -519,7 +519,7 @@ static int sunrise_probe(struct i2c_client *client)
static const struct of_device_id sunrise_of_match[] = {
{ .compatible = "senseair,sunrise-006-0-0007" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, sunrise_of_match);
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c b/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c
index 119acb078af3..2d3d148b4206 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c
@@ -121,7 +121,7 @@ static const struct platform_device_id cros_ec_lid_angle_ids[] = {
{
.name = DRV_NAME,
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(platform, cros_ec_lid_angle_ids);
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index 66153b1850f1..82cef4a12442 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -311,7 +311,7 @@ static const struct platform_device_id cros_ec_sensors_ids[] = {
{
.name = "cros-ec-mag",
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(platform, cros_ec_sensors_ids);
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index 7751d6f69b12..700ebcd68ff4 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -34,25 +34,19 @@
static int cros_ec_get_host_cmd_version_mask(struct cros_ec_device *ec_dev,
u16 cmd_offset, u16 cmd, u32 *mask)
{
+ DEFINE_RAW_FLEX(struct cros_ec_command, buf, data,
+ MAX(sizeof(struct ec_response_get_cmd_versions),
+ sizeof(struct ec_params_get_cmd_versions)));
int ret;
- struct {
- struct cros_ec_command msg;
- union {
- struct ec_params_get_cmd_versions params;
- struct ec_response_get_cmd_versions resp;
- };
- } __packed buf = {
- .msg = {
- .command = EC_CMD_GET_CMD_VERSIONS + cmd_offset,
- .insize = sizeof(struct ec_response_get_cmd_versions),
- .outsize = sizeof(struct ec_params_get_cmd_versions)
- },
- .params = {.cmd = cmd}
- };
-
- ret = cros_ec_cmd_xfer_status(ec_dev, &buf.msg);
+
+ buf->command = EC_CMD_GET_CMD_VERSIONS + cmd_offset;
+ buf->insize = sizeof(struct ec_response_get_cmd_versions);
+ buf->outsize = sizeof(struct ec_params_get_cmd_versions);
+ ((struct ec_params_get_cmd_versions *)buf->data)->cmd = cmd;
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, buf);
if (ret >= 0)
- *mask = buf.resp.version_mask;
+ *mask = ((struct ec_response_get_cmd_versions *)buf->data)->version_mask;
return ret;
}
@@ -97,22 +91,6 @@ static void get_default_min_max_freq(enum motionsensor_type type,
}
}
-static int cros_ec_sensor_set_ec_rate(struct cros_ec_sensors_core_state *st,
- int rate)
-{
- int ret;
-
- if (rate > U16_MAX)
- rate = U16_MAX;
-
- mutex_lock(&st->cmd_lock);
- st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
- st->param.ec_rate.data = rate;
- ret = cros_ec_motion_send_host_cmd(st, 0);
- mutex_unlock(&st->cmd_lock);
- return ret;
-}
-
static ssize_t cros_ec_sensor_set_report_latency(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
@@ -128,7 +106,25 @@ static ssize_t cros_ec_sensor_set_report_latency(struct device *dev,
/* EC rate is in ms. */
latency = integer * 1000 + fract / 1000;
- ret = cros_ec_sensor_set_ec_rate(st, latency);
+
+ mutex_lock(&st->cmd_lock);
+ st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
+ st->param.ec_rate.data = min(U16_MAX, latency);
+ ret = cros_ec_motion_send_host_cmd(st, 0);
+ if (ret < 0) {
+ mutex_unlock(&st->cmd_lock);
+ return ret;
+ }
+
+ /*
+ * Flush samples currently in the FIFO, especially when the new latency
+ * is shorter than the old one: new timeout value is only considered when
+ * there is a new sample available. It can take a while for a slow
+ * sensor.
+ */
+ st->param.cmd = MOTIONSENSE_CMD_FIFO_FLUSH;
+ ret = cros_ec_motion_send_host_cmd(st, 0);
+ mutex_unlock(&st->cmd_lock);
if (ret < 0)
return ret;
@@ -486,7 +482,7 @@ const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[] = {
.shared = IIO_SHARED_BY_ALL,
.read = cros_ec_sensors_id
},
- { },
+ { }
};
EXPORT_SYMBOL_GPL(cros_ec_sensors_ext_info);
@@ -838,6 +834,18 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
st->param.sensor_odr.roundup = 1;
ret = cros_ec_motion_send_host_cmd(st, 0);
+ if (ret)
+ break;
+
+ /* Flush the FIFO when a sensor is stopped.
+ * If the FIFO has just been emptied, pending samples will be
+ * stuck until new samples are available. It will not happen
+ * when all the sensors are stopped.
+ */
+ if (frequency == 0) {
+ st->param.cmd = MOTIONSENSE_CMD_FIFO_FLUSH;
+ ret = cros_ec_motion_send_host_cmd(st, 0);
+ }
break;
default:
ret = -EINVAL;
diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c
index ed15dcbf4cf6..da516c46e057 100644
--- a/drivers/iio/common/scmi_sensors/scmi_iio.c
+++ b/drivers/iio/common/scmi_sensors/scmi_iio.c
@@ -351,12 +351,11 @@ static int scmi_iio_read_raw(struct iio_dev *iio_dev,
ret = scmi_iio_get_odr_val(iio_dev, val, val2);
return ret ? ret : IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(iio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(iio_dev))
+ return -EBUSY;
ret = scmi_iio_read_channel_data(iio_dev, ch, val, val2);
- iio_device_release_direct_mode(iio_dev);
+ iio_device_release_direct(iio_dev);
return ret;
default:
return -EINVAL;
@@ -418,7 +417,7 @@ static const struct iio_chan_spec_ext_info scmi_iio_ext_info[] = {
.read = scmi_iio_get_raw_available,
.shared = IIO_SHARED_BY_TYPE,
},
- {},
+ { }
};
static void scmi_iio_set_timestamp_channel(struct iio_chan_spec *iio_chan,
@@ -705,7 +704,7 @@ static int scmi_iio_dev_probe(struct scmi_device *sdev)
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_SENSOR, "iiodev" },
- {},
+ { }
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index 22ea10eb48ae..1e167dc673ca 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -167,7 +167,7 @@ static void ssp_wdt_work_func(struct work_struct *work)
static void ssp_wdt_timer_func(struct timer_list *t)
{
- struct ssp_data *data = from_timer(data, t, wdt_timer);
+ struct ssp_data *data = timer_container_of(data, t, wdt_timer);
switch (data->fw_dl_state) {
case SSP_FW_DL_STATE_FAIL:
@@ -434,7 +434,7 @@ static const struct of_device_id ssp_of_match[] = {
.compatible = "samsung,sensorhub-thermostat",
.data = &ssp_thermostat_info,
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, ssp_of_match);
diff --git a/drivers/iio/common/ssp_sensors/ssp_spi.c b/drivers/iio/common/ssp_sensors/ssp_spi.c
index f32b04b63ea1..b7f093d7345b 100644
--- a/drivers/iio/common/ssp_sensors/ssp_spi.c
+++ b/drivers/iio/common/ssp_sensors/ssp_spi.c
@@ -104,7 +104,7 @@ static struct ssp_msg *ssp_create_msg(u8 cmd, u16 len, u16 opt, u32 data)
/*
* It is a bit heavy to do it this way but often the function is used to compose
* the message from smaller chunks which are placed on the stack. Often the
- * chunks are small so memcpy should be optimalized.
+ * chunks are small so memcpy should be optimized.
*/
static inline void ssp_fill_buffer(struct ssp_msg *m, unsigned int offset,
const void *src, unsigned int len)
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index e4f5a7ff7e74..8ce1dccfea4f 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -530,9 +530,8 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
int err;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- err = iio_device_claim_direct_mode(indio_dev);
- if (err)
- return err;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&sdata->odr_lock);
@@ -551,7 +550,7 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
out:
mutex_unlock(&sdata->odr_lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return err;
}
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 4811ea973125..e0996dc014a3 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -6,6 +6,17 @@
menu "Digital to analog converters"
+config AD3530R
+ tristate "Analog Devices AD3530R and Similar DACs driver"
+ depends on SPI
+ select REGMAP_SPI
+ help
+ Say yes here to build support for Analog Devices AD3530R, AD3531R
+ Digital to Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad3530r.
+
config AD3552R_HS
tristate "Analog Devices AD3552R DAC High Speed driver"
select AD3552R_LIB
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 8dd6cce81ed1..3684cd52b7fa 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -4,6 +4,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AD3530R) += ad3530r.o
obj-$(CONFIG_AD3552R_HS) += ad3552r-hs.o
obj-$(CONFIG_AD3552R_LIB) += ad3552r-common.o
obj-$(CONFIG_AD3552R) += ad3552r.o
diff --git a/drivers/iio/dac/ad3530r.c b/drivers/iio/dac/ad3530r.c
new file mode 100644
index 000000000000..f9752a571aa5
--- /dev/null
+++ b/drivers/iio/dac/ad3530r.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AD3530R/AD3530 8-channel, 16-bit Voltage Output DAC Driver
+ * AD3531R/AD3531 4-channel, 16-bit Voltage Output DAC Driver
+ *
+ * Copyright 2025 Analog Devices Inc.
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/kstrtox.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#define AD3530R_INTERFACE_CONFIG_A 0x00
+#define AD3530R_OUTPUT_OPERATING_MODE_0 0x20
+#define AD3530R_OUTPUT_OPERATING_MODE_1 0x21
+#define AD3530R_OUTPUT_CONTROL_0 0x2A
+#define AD3530R_REFERENCE_CONTROL_0 0x3C
+#define AD3530R_SW_LDAC_TRIG_A 0xE5
+#define AD3530R_INPUT_CH 0xEB
+#define AD3530R_MAX_REG_ADDR 0xF9
+
+#define AD3531R_SW_LDAC_TRIG_A 0xDD
+#define AD3531R_INPUT_CH 0xE3
+
+#define AD3530R_SLD_TRIG_A BIT(7)
+#define AD3530R_OUTPUT_CONTROL_RANGE BIT(2)
+#define AD3530R_REFERENCE_CONTROL_SEL BIT(0)
+#define AD3530R_REG_VAL_MASK GENMASK(15, 0)
+#define AD3530R_OP_MODE_CHAN_MSK(chan) (GENMASK(1, 0) << 2 * (chan))
+
+#define AD3530R_SW_RESET (BIT(7) | BIT(0))
+#define AD3530R_INTERNAL_VREF_mV 2500
+#define AD3530R_LDAC_PULSE_US 100
+
+#define AD3530R_DAC_MAX_VAL GENMASK(15, 0)
+#define AD3530R_MAX_CHANNELS 8
+#define AD3531R_MAX_CHANNELS 4
+
+/* Non-constant mask variant of FIELD_PREP() */
+#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
+
+enum ad3530r_mode {
+ AD3530R_NORMAL_OP,
+ AD3530R_POWERDOWN_1K,
+ AD3530R_POWERDOWN_7K7,
+ AD3530R_POWERDOWN_32K,
+};
+
+struct ad3530r_chan {
+ enum ad3530r_mode powerdown_mode;
+ bool powerdown;
+};
+
+struct ad3530r_chip_info {
+ const char *name;
+ const struct iio_chan_spec *channels;
+ int (*input_ch_reg)(unsigned int channel);
+ unsigned int num_channels;
+ unsigned int sw_ldac_trig_reg;
+ bool internal_ref_support;
+};
+
+struct ad3530r_state {
+ struct regmap *regmap;
+ /* lock to protect against multiple access to the device and shared data */
+ struct mutex lock;
+ struct ad3530r_chan chan[AD3530R_MAX_CHANNELS];
+ const struct ad3530r_chip_info *chip_info;
+ struct gpio_desc *ldac_gpio;
+ int vref_mV;
+ /*
+ * DMA (thus cache coherency maintenance) may require the transfer
+ * buffers to live in their own cache lines.
+ */
+ __be16 buf __aligned(IIO_DMA_MINALIGN);
+};
+
+static int ad3530r_input_ch_reg(unsigned int channel)
+{
+ return 2 * channel + AD3530R_INPUT_CH;
+}
+
+static int ad3531r_input_ch_reg(unsigned int channel)
+{
+ return 2 * channel + AD3531R_INPUT_CH;
+}
+
+static const char * const ad3530r_powerdown_modes[] = {
+ "1kohm_to_gnd",
+ "7.7kohm_to_gnd",
+ "32kohm_to_gnd",
+};
+
+static int ad3530r_get_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad3530r_state *st = iio_priv(indio_dev);
+
+ guard(mutex)(&st->lock);
+ return st->chan[chan->channel].powerdown_mode - 1;
+}
+
+static int ad3530r_set_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct ad3530r_state *st = iio_priv(indio_dev);
+
+ guard(mutex)(&st->lock);
+ st->chan[chan->channel].powerdown_mode = mode + 1;
+
+ return 0;
+}
+
+static const struct iio_enum ad3530r_powerdown_mode_enum = {
+ .items = ad3530r_powerdown_modes,
+ .num_items = ARRAY_SIZE(ad3530r_powerdown_modes),
+ .get = ad3530r_get_powerdown_mode,
+ .set = ad3530r_set_powerdown_mode,
+};
+
+static ssize_t ad3530r_get_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ad3530r_state *st = iio_priv(indio_dev);
+
+ guard(mutex)(&st->lock);
+ return sysfs_emit(buf, "%d\n", st->chan[chan->channel].powerdown);
+}
+
+static ssize_t ad3530r_set_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ad3530r_state *st = iio_priv(indio_dev);
+ int ret;
+ unsigned int reg, pdmode, mask, val;
+ bool powerdown;
+
+ ret = kstrtobool(buf, &powerdown);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&st->lock);
+ reg = chan->channel < AD3531R_MAX_CHANNELS ?
+ AD3530R_OUTPUT_OPERATING_MODE_0 :
+ AD3530R_OUTPUT_OPERATING_MODE_1;
+ pdmode = powerdown ? st->chan[chan->channel].powerdown_mode : 0;
+ mask = AD3530R_OP_MODE_CHAN_MSK(chan->channel);
+ val = field_prep(mask, pdmode);
+
+ ret = regmap_update_bits(st->regmap, reg, mask, val);
+ if (ret)
+ return ret;
+
+ st->chan[chan->channel].powerdown = powerdown;
+
+ return len;
+}
+
+static int ad3530r_trigger_hw_ldac(struct gpio_desc *ldac_gpio)
+{
+ gpiod_set_value_cansleep(ldac_gpio, 1);
+ fsleep(AD3530R_LDAC_PULSE_US);
+ gpiod_set_value_cansleep(ldac_gpio, 0);
+
+ return 0;
+}
+
+static int ad3530r_dac_write(struct ad3530r_state *st, unsigned int chan,
+ unsigned int val)
+{
+ int ret;
+
+ guard(mutex)(&st->lock);
+ st->buf = cpu_to_be16(val);
+
+ ret = regmap_bulk_write(st->regmap, st->chip_info->input_ch_reg(chan),
+ &st->buf, sizeof(st->buf));
+ if (ret)
+ return ret;
+
+ if (st->ldac_gpio)
+ return ad3530r_trigger_hw_ldac(st->ldac_gpio);
+
+ return regmap_set_bits(st->regmap, st->chip_info->sw_ldac_trig_reg,
+ AD3530R_SLD_TRIG_A);
+}
+
+static int ad3530r_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct ad3530r_state *st = iio_priv(indio_dev);
+ int ret;
+
+ guard(mutex)(&st->lock);
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_bulk_read(st->regmap,
+ st->chip_info->input_ch_reg(chan->channel),
+ &st->buf, sizeof(st->buf));
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(AD3530R_REG_VAL_MASK, be16_to_cpu(st->buf));
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->vref_mV;
+ *val2 = 16;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad3530r_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct ad3530r_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ if (val < 0 || val > AD3530R_DAC_MAX_VAL)
+ return -EINVAL;
+
+ return ad3530r_dac_write(st, chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad3530r_reg_access(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct ad3530r_state *st = iio_priv(indio_dev);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+
+ return regmap_write(st->regmap, reg, writeval);
+}
+
+static const struct iio_chan_spec_ext_info ad3530r_ext_info[] = {
+ {
+ .name = "powerdown",
+ .shared = IIO_SEPARATE,
+ .read = ad3530r_get_dac_powerdown,
+ .write = ad3530r_set_dac_powerdown,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad3530r_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE,
+ &ad3530r_powerdown_mode_enum),
+ { }
+};
+
+#define AD3530R_CHAN(_chan) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _chan, \
+ .output = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = ad3530r_ext_info, \
+}
+
+static const struct iio_chan_spec ad3530r_channels[] = {
+ AD3530R_CHAN(0),
+ AD3530R_CHAN(1),
+ AD3530R_CHAN(2),
+ AD3530R_CHAN(3),
+ AD3530R_CHAN(4),
+ AD3530R_CHAN(5),
+ AD3530R_CHAN(6),
+ AD3530R_CHAN(7),
+};
+
+static const struct iio_chan_spec ad3531r_channels[] = {
+ AD3530R_CHAN(0),
+ AD3530R_CHAN(1),
+ AD3530R_CHAN(2),
+ AD3530R_CHAN(3),
+};
+
+static const struct ad3530r_chip_info ad3530_chip = {
+ .name = "ad3530",
+ .channels = ad3530r_channels,
+ .num_channels = ARRAY_SIZE(ad3530r_channels),
+ .sw_ldac_trig_reg = AD3530R_SW_LDAC_TRIG_A,
+ .input_ch_reg = ad3530r_input_ch_reg,
+ .internal_ref_support = false,
+};
+
+static const struct ad3530r_chip_info ad3530r_chip = {
+ .name = "ad3530r",
+ .channels = ad3530r_channels,
+ .num_channels = ARRAY_SIZE(ad3530r_channels),
+ .sw_ldac_trig_reg = AD3530R_SW_LDAC_TRIG_A,
+ .input_ch_reg = ad3530r_input_ch_reg,
+ .internal_ref_support = true,
+};
+
+static const struct ad3530r_chip_info ad3531_chip = {
+ .name = "ad3531",
+ .channels = ad3531r_channels,
+ .num_channels = ARRAY_SIZE(ad3531r_channels),
+ .sw_ldac_trig_reg = AD3531R_SW_LDAC_TRIG_A,
+ .input_ch_reg = ad3531r_input_ch_reg,
+ .internal_ref_support = false,
+};
+
+static const struct ad3530r_chip_info ad3531r_chip = {
+ .name = "ad3531r",
+ .channels = ad3531r_channels,
+ .num_channels = ARRAY_SIZE(ad3531r_channels),
+ .sw_ldac_trig_reg = AD3531R_SW_LDAC_TRIG_A,
+ .input_ch_reg = ad3531r_input_ch_reg,
+ .internal_ref_support = true,
+};
+
+static int ad3530r_setup(struct ad3530r_state *st, int external_vref_uV)
+{
+ struct device *dev = regmap_get_device(st->regmap);
+ struct gpio_desc *reset_gpio;
+ int i, ret;
+ u8 range_multiplier, val;
+
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(reset_gpio),
+ "Failed to get reset GPIO\n");
+
+ if (reset_gpio) {
+ /* Perform hardware reset */
+ fsleep(1 * USEC_PER_MSEC);
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ } else {
+ /* Perform software reset */
+ ret = regmap_update_bits(st->regmap, AD3530R_INTERFACE_CONFIG_A,
+ AD3530R_SW_RESET, AD3530R_SW_RESET);
+ if (ret)
+ return ret;
+ }
+
+ fsleep(10 * USEC_PER_MSEC);
+
+ range_multiplier = 1;
+ if (device_property_read_bool(dev, "adi,range-double")) {
+ ret = regmap_set_bits(st->regmap, AD3530R_OUTPUT_CONTROL_0,
+ AD3530R_OUTPUT_CONTROL_RANGE);
+ if (ret)
+ return ret;
+
+ range_multiplier = 2;
+ }
+
+ if (external_vref_uV) {
+ st->vref_mV = range_multiplier * external_vref_uV / MILLI;
+ } else {
+ ret = regmap_set_bits(st->regmap, AD3530R_REFERENCE_CONTROL_0,
+ AD3530R_REFERENCE_CONTROL_SEL);
+ if (ret)
+ return ret;
+
+ st->vref_mV = range_multiplier * AD3530R_INTERNAL_VREF_mV;
+ }
+
+ /* Set normal operating mode for all channels */
+ val = FIELD_PREP(AD3530R_OP_MODE_CHAN_MSK(0), AD3530R_NORMAL_OP) |
+ FIELD_PREP(AD3530R_OP_MODE_CHAN_MSK(1), AD3530R_NORMAL_OP) |
+ FIELD_PREP(AD3530R_OP_MODE_CHAN_MSK(2), AD3530R_NORMAL_OP) |
+ FIELD_PREP(AD3530R_OP_MODE_CHAN_MSK(3), AD3530R_NORMAL_OP);
+
+ ret = regmap_write(st->regmap, AD3530R_OUTPUT_OPERATING_MODE_0, val);
+ if (ret)
+ return ret;
+
+ if (st->chip_info->num_channels > 4) {
+ ret = regmap_write(st->regmap, AD3530R_OUTPUT_OPERATING_MODE_1,
+ val);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < st->chip_info->num_channels; i++)
+ st->chan[i].powerdown_mode = AD3530R_POWERDOWN_32K;
+
+ st->ldac_gpio = devm_gpiod_get_optional(dev, "ldac", GPIOD_OUT_LOW);
+ if (IS_ERR(st->ldac_gpio))
+ return dev_err_probe(dev, PTR_ERR(st->ldac_gpio),
+ "Failed to get ldac GPIO\n");
+
+ return 0;
+}
+
+static const struct regmap_config ad3530r_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = AD3530R_MAX_REG_ADDR,
+};
+
+static const struct iio_info ad3530r_info = {
+ .read_raw = ad3530r_read_raw,
+ .write_raw = ad3530r_write_raw,
+ .debugfs_reg_access = ad3530r_reg_access,
+};
+
+static int ad3530r_probe(struct spi_device *spi)
+{
+ static const char * const regulators[] = { "vdd", "iovdd" };
+ struct device *dev = &spi->dev;
+ struct iio_dev *indio_dev;
+ struct ad3530r_state *st;
+ int ret, external_vref_uV;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ st->regmap = devm_regmap_init_spi(spi, &ad3530r_regmap_config);
+ if (IS_ERR(st->regmap))
+ return dev_err_probe(dev, PTR_ERR(st->regmap),
+ "Failed to init regmap");
+
+ ret = devm_mutex_init(dev, &st->lock);
+ if (ret)
+ return ret;
+
+ st->chip_info = spi_get_device_match_data(spi);
+ if (!st->chip_info)
+ return -ENODEV;
+
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulators),
+ regulators);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable regulators\n");
+
+ external_vref_uV = devm_regulator_get_enable_read_voltage(dev, "ref");
+ if (external_vref_uV < 0 && external_vref_uV != -ENODEV)
+ return external_vref_uV;
+
+ if (external_vref_uV == -ENODEV)
+ external_vref_uV = 0;
+
+ if (!st->chip_info->internal_ref_support && external_vref_uV == 0)
+ return -ENODEV;
+
+ ret = ad3530r_setup(st, external_vref_uV);
+ if (ret)
+ return ret;
+
+ indio_dev->name = st->chip_info->name;
+ indio_dev->info = &ad3530r_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = st->chip_info->num_channels;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id ad3530r_id[] = {
+ { "ad3530", (kernel_ulong_t)&ad3530_chip },
+ { "ad3530r", (kernel_ulong_t)&ad3530r_chip },
+ { "ad3531", (kernel_ulong_t)&ad3531_chip },
+ { "ad3531r", (kernel_ulong_t)&ad3531r_chip },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad3530r_id);
+
+static const struct of_device_id ad3530r_of_match[] = {
+ { .compatible = "adi,ad3530", .data = &ad3530_chip },
+ { .compatible = "adi,ad3530r", .data = &ad3530r_chip },
+ { .compatible = "adi,ad3531", .data = &ad3531_chip },
+ { .compatible = "adi,ad3531r", .data = &ad3531r_chip },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad3530r_of_match);
+
+static struct spi_driver ad3530r_driver = {
+ .driver = {
+ .name = "ad3530r",
+ .of_match_table = ad3530r_of_match,
+ },
+ .probe = ad3530r_probe,
+ .id_table = ad3530r_id,
+};
+module_spi_driver(ad3530r_driver);
+
+MODULE_AUTHOR("Kim Seer Paller <kimseer.paller@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD3530R and Similar DACs Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/dac/ad3552r-common.c b/drivers/iio/dac/ad3552r-common.c
index b8807e54fa05..38baaea0e6c8 100644
--- a/drivers/iio/dac/ad3552r-common.c
+++ b/drivers/iio/dac/ad3552r-common.c
@@ -43,6 +43,7 @@ const struct ad3552r_model_data ad3541r_model_data = {
.num_ranges = ARRAY_SIZE(ad3542r_ch_ranges),
.requires_output_range = true,
.num_spi_data_lanes = 2,
+ .max_reg_addr = 0x46,
};
EXPORT_SYMBOL_NS_GPL(ad3541r_model_data, "IIO_AD3552R");
@@ -54,6 +55,7 @@ const struct ad3552r_model_data ad3542r_model_data = {
.num_ranges = ARRAY_SIZE(ad3542r_ch_ranges),
.requires_output_range = true,
.num_spi_data_lanes = 2,
+ .max_reg_addr = 0x49,
};
EXPORT_SYMBOL_NS_GPL(ad3542r_model_data, "IIO_AD3552R");
@@ -65,6 +67,7 @@ const struct ad3552r_model_data ad3551r_model_data = {
.num_ranges = ARRAY_SIZE(ad3552r_ch_ranges),
.requires_output_range = false,
.num_spi_data_lanes = 4,
+ .max_reg_addr = 0x46,
};
EXPORT_SYMBOL_NS_GPL(ad3551r_model_data, "IIO_AD3552R");
@@ -76,6 +79,7 @@ const struct ad3552r_model_data ad3552r_model_data = {
.num_ranges = ARRAY_SIZE(ad3552r_ch_ranges),
.requires_output_range = false,
.num_spi_data_lanes = 4,
+ .max_reg_addr = 0x49,
};
EXPORT_SYMBOL_NS_GPL(ad3552r_model_data, "IIO_AD3552R");
diff --git a/drivers/iio/dac/ad3552r-hs.c b/drivers/iio/dac/ad3552r-hs.c
index cd8dabb60c55..41b96b48ba98 100644
--- a/drivers/iio/dac/ad3552r-hs.c
+++ b/drivers/iio/dac/ad3552r-hs.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/iio/backend.h>
@@ -54,6 +55,18 @@ struct ad3552r_hs_state {
struct ad3552r_hs_platform_data *data;
/* INTERFACE_CONFIG_D register cache, in DDR we cannot read values. */
u32 config_d;
+ /* Protects backend I/O operations from concurrent accesses. */
+ struct mutex lock;
+};
+
+enum ad3552r_sources {
+ AD3552R_SRC_NORMAL,
+ AD3552R_SRC_RAMP_16BIT,
+};
+
+static const char * const dbgfs_attr_source[] = {
+ [AD3552R_SRC_NORMAL] = "normal",
+ [AD3552R_SRC_RAMP_16BIT] = "ramp-16bit",
};
static int ad3552r_hs_reg_read(struct ad3552r_hs_state *st, u32 reg, u32 *val,
@@ -65,6 +78,20 @@ static int ad3552r_hs_reg_read(struct ad3552r_hs_state *st, u32 reg, u32 *val,
return st->data->bus_reg_read(st->back, reg, val, xfer_size);
}
+static int ad3552r_hs_set_data_source(struct ad3552r_hs_state *st,
+ enum iio_backend_data_source type)
+{
+ int i, ret;
+
+ for (i = 0; i < st->model_data->num_hw_channels; ++i) {
+ ret = iio_backend_data_source_set(st->back, i, type);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int ad3552r_hs_update_reg_bits(struct ad3552r_hs_state *st, u32 reg,
u32 mask, u32 val, size_t xfer_size)
{
@@ -464,6 +491,122 @@ static int ad3552r_hs_setup_custom_gain(struct ad3552r_hs_state *st,
gain, 1);
}
+static int ad3552r_hs_reg_access(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct ad3552r_hs_state *st = iio_priv(indio_dev);
+
+ if (reg > st->model_data->max_reg_addr)
+ return -EINVAL;
+
+ /*
+ * There are 8, 16 or 24 bit registers, but HDL supports only reading 8
+ * or 16 bit data, not 24. So, also to avoid to check any proper read
+ * alignment, supporting only 8-bit readings here.
+ */
+ if (readval)
+ return ad3552r_hs_reg_read(st, reg, readval, 1);
+
+ return st->data->bus_reg_write(st->back, reg, writeval, 1);
+}
+
+static ssize_t ad3552r_hs_show_data_source(struct file *f, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ad3552r_hs_state *st = file_inode(f)->i_private;
+ enum iio_backend_data_source type;
+ int idx, ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = iio_backend_data_source_get(st->back, 0, &type);
+ if (ret)
+ return ret;
+
+ switch (type) {
+ case IIO_BACKEND_INTERNAL_RAMP_16BIT:
+ idx = AD3552R_SRC_RAMP_16BIT;
+ break;
+ case IIO_BACKEND_EXTERNAL:
+ idx = AD3552R_SRC_NORMAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return simple_read_from_buffer(userbuf, count, ppos,
+ dbgfs_attr_source[idx],
+ strlen(dbgfs_attr_source[idx]));
+}
+
+static ssize_t ad3552r_hs_write_data_source(struct file *f,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct ad3552r_hs_state *st = file_inode(f)->i_private;
+ char buf[64];
+ int ret, source;
+
+ guard(mutex)(&st->lock);
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf,
+ count);
+ if (ret < 0)
+ return ret;
+
+ buf[count] = '\0';
+
+ ret = match_string(dbgfs_attr_source, ARRAY_SIZE(dbgfs_attr_source),
+ buf);
+ if (ret < 0)
+ return ret;
+
+ switch (ret) {
+ case AD3552R_SRC_RAMP_16BIT:
+ source = IIO_BACKEND_INTERNAL_RAMP_16BIT;
+ break;
+ case AD3552R_SRC_NORMAL:
+ source = IIO_BACKEND_EXTERNAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = ad3552r_hs_set_data_source(st, source);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t ad3552r_hs_show_data_source_avail(struct file *f,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t len = 0;
+ char buf[128];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dbgfs_attr_source); i++) {
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s ",
+ dbgfs_attr_source[i]);
+ }
+ buf[len - 1] = '\n';
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations ad3552r_hs_data_source_fops = {
+ .owner = THIS_MODULE,
+ .write = ad3552r_hs_write_data_source,
+ .read = ad3552r_hs_show_data_source,
+};
+
+static const struct file_operations ad3552r_hs_data_source_avail_fops = {
+ .owner = THIS_MODULE,
+ .read = ad3552r_hs_show_data_source_avail,
+};
+
static int ad3552r_hs_setup(struct ad3552r_hs_state *st)
{
u16 id;
@@ -531,11 +674,7 @@ static int ad3552r_hs_setup(struct ad3552r_hs_state *st)
if (ret)
return ret;
- ret = iio_backend_data_source_set(st->back, 0, IIO_BACKEND_EXTERNAL);
- if (ret)
- return ret;
-
- ret = iio_backend_data_source_set(st->back, 1, IIO_BACKEND_EXTERNAL);
+ ret = ad3552r_hs_set_data_source(st, IIO_BACKEND_EXTERNAL);
if (ret)
return ret;
@@ -639,8 +778,29 @@ static const struct iio_chan_spec ad3552r_hs_channels[] = {
static const struct iio_info ad3552r_hs_info = {
.read_raw = &ad3552r_hs_read_raw,
.write_raw = &ad3552r_hs_write_raw,
+ .debugfs_reg_access = &ad3552r_hs_reg_access,
};
+static void ad3552r_hs_debugfs_init(struct iio_dev *indio_dev)
+{
+ struct ad3552r_hs_state *st = iio_priv(indio_dev);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return;
+
+ d = iio_get_debugfs_dentry(indio_dev);
+ if (!d) {
+ dev_warn(st->dev, "can't set debugfs in driver dir\n");
+ return;
+ }
+
+ debugfs_create_file("data_source", 0600, d, st,
+ &ad3552r_hs_data_source_fops);
+ debugfs_create_file("data_source_available", 0600, d, st,
+ &ad3552r_hs_data_source_avail_fops);
+}
+
static int ad3552r_hs_probe(struct platform_device *pdev)
{
struct ad3552r_hs_state *st;
@@ -685,7 +845,17 @@ static int ad3552r_hs_probe(struct platform_device *pdev)
if (ret)
return ret;
- return devm_iio_device_register(&pdev->dev, indio_dev);
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_mutex_init(&pdev->dev, &st->lock);
+ if (ret)
+ return ret;
+
+ ad3552r_hs_debugfs_init(indio_dev);
+
+ return ret;
}
static const struct of_device_id ad3552r_hs_of_id[] = {
diff --git a/drivers/iio/dac/ad3552r.h b/drivers/iio/dac/ad3552r.h
index 768fa264d39e..9bb46a9e07a5 100644
--- a/drivers/iio/dac/ad3552r.h
+++ b/drivers/iio/dac/ad3552r.h
@@ -156,6 +156,7 @@ struct ad3552r_model_data {
int num_ranges;
bool requires_output_range;
int num_spi_data_lanes;
+ int max_reg_addr;
};
struct ad3552r_ch_data {
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index 905988724f27..84be5174babd 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -378,7 +378,7 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5064_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5064_powerdown_mode_enum),
- { },
+ { }
};
static const struct iio_chan_spec_ext_info ltc2617_ext_info[] = {
@@ -390,7 +390,7 @@ static const struct iio_chan_spec_ext_info ltc2617_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ltc2617_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ltc2617_powerdown_mode_enum),
- { },
+ { }
};
#define AD5064_CHANNEL(chan, addr, bits, _shift, _ext_info) { \
@@ -936,7 +936,7 @@ static const struct spi_device_id ad5064_spi_ids[] = {
{"ad5668-1", ID_AD5668_1},
{"ad5668-2", ID_AD5668_2},
{"ad5668-3", ID_AD5668_2}, /* similar enough to ad5668-2 */
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad5064_spi_ids);
@@ -1048,7 +1048,7 @@ static const struct i2c_device_id ad5064_i2c_ids[] = {
{"ltc2635-h10", ID_LTC2635_H10},
{"ltc2635-l8", ID_LTC2635_L8},
{"ltc2635-h8", ID_LTC2635_H8},
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids);
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index e0b7f658d611..a57b0a093112 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -542,7 +542,7 @@ static const struct spi_device_id ad5360_ids[] = {
{ "ad5371", ID_AD5371 },
{ "ad5372", ID_AD5372 },
{ "ad5373", ID_AD5373 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad5360_ids);
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 392a1c7aee03..f63af704b77e 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -246,7 +246,7 @@ static const struct iio_chan_spec_ext_info ad5380_ext_info[] = {
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
&ad5380_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5380_powerdown_mode_enum),
- { },
+ { }
};
#define AD5380_CHANNEL(_bits) { \
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 6ad99f97eed5..ad304b0fec08 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -141,7 +141,7 @@ static const struct iio_chan_spec_ext_info ad5446_ext_info_powerdown[] = {
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5446_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5446_powerdown_mode_enum),
- { },
+ { }
};
#define _AD5446_CHANNEL(bits, storage, _shift, ext) { \
@@ -440,7 +440,7 @@ static const struct spi_device_id ad5446_spi_ids[] = {
{"dac101s101", ID_AD5310},
{"dac121s101", ID_AD5320},
{"dac7512", ID_AD5320},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad5446_spi_ids);
@@ -543,7 +543,7 @@ static const struct i2c_device_id ad5446_i2c_ids[] = {
{"ad5602", ID_AD5602},
{"ad5612", ID_AD5612},
{"ad5622", ID_AD5622},
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ad5446_i2c_ids);
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index 1c996016756a..d8c325260259 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -337,7 +337,7 @@ static const struct spi_device_id ad5449_spi_ids[] = {
{ "ad5439", ID_AD5439 },
{ "ad5443", ID_AD5443 },
{ "ad5449", ID_AD5449 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad5449_spi_ids);
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index ff0765c8af47..355bcb6a8ba0 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -242,7 +242,7 @@ static const struct iio_chan_spec_ext_info ad5504_ext_info[] = {
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
&ad5504_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5504_powerdown_mode_enum),
- { },
+ { }
};
#define AD5504_CHANNEL(_chan) { \
@@ -320,7 +320,7 @@ static int ad5504_probe(struct spi_device *spi)
static const struct spi_device_id ad5504_id[] = {
{"ad5504", ID_AD5504},
{"ad5501", ID_AD5501},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad5504_id);
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 50d19304bacb..5f2cd51723f6 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
@@ -24,16 +25,14 @@ static int ad5592r_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct ad5592r_state *st = gpiochip_get_data(chip);
int ret = 0;
- u8 val;
+ u8 val = 0;
- mutex_lock(&st->gpio_lock);
-
- if (st->gpio_out & BIT(offset))
- val = st->gpio_val;
- else
- ret = st->ops->gpio_read(st, &val);
-
- mutex_unlock(&st->gpio_lock);
+ scoped_guard(mutex, &st->gpio_lock) {
+ if (st->gpio_out & BIT(offset))
+ val = st->gpio_val;
+ else
+ ret = st->ops->gpio_read(st, &val);
+ }
if (ret < 0)
return ret;
@@ -41,20 +40,19 @@ static int ad5592r_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(val & BIT(offset));
}
-static void ad5592r_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int ad5592r_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ad5592r_state *st = gpiochip_get_data(chip);
- mutex_lock(&st->gpio_lock);
+ guard(mutex)(&st->gpio_lock);
if (value)
st->gpio_val |= BIT(offset);
else
st->gpio_val &= ~BIT(offset);
- st->ops->reg_write(st, AD5592R_REG_GPIO_SET, st->gpio_val);
-
- mutex_unlock(&st->gpio_lock);
+ return st->ops->reg_write(st, AD5592R_REG_GPIO_SET, st->gpio_val);
}
static int ad5592r_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -62,21 +60,16 @@ static int ad5592r_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
struct ad5592r_state *st = gpiochip_get_data(chip);
int ret;
- mutex_lock(&st->gpio_lock);
+ guard(mutex)(&st->gpio_lock);
st->gpio_out &= ~BIT(offset);
st->gpio_in |= BIT(offset);
ret = st->ops->reg_write(st, AD5592R_REG_GPIO_OUT_EN, st->gpio_out);
if (ret < 0)
- goto err_unlock;
-
- ret = st->ops->reg_write(st, AD5592R_REG_GPIO_IN_EN, st->gpio_in);
-
-err_unlock:
- mutex_unlock(&st->gpio_lock);
+ return ret;
- return ret;
+ return st->ops->reg_write(st, AD5592R_REG_GPIO_IN_EN, st->gpio_in);
}
static int ad5592r_gpio_direction_output(struct gpio_chip *chip,
@@ -85,7 +78,7 @@ static int ad5592r_gpio_direction_output(struct gpio_chip *chip,
struct ad5592r_state *st = gpiochip_get_data(chip);
int ret;
- mutex_lock(&st->gpio_lock);
+ guard(mutex)(&st->gpio_lock);
if (value)
st->gpio_val |= BIT(offset);
@@ -97,18 +90,13 @@ static int ad5592r_gpio_direction_output(struct gpio_chip *chip,
ret = st->ops->reg_write(st, AD5592R_REG_GPIO_SET, st->gpio_val);
if (ret < 0)
- goto err_unlock;
+ return ret;
ret = st->ops->reg_write(st, AD5592R_REG_GPIO_OUT_EN, st->gpio_out);
if (ret < 0)
- goto err_unlock;
-
- ret = st->ops->reg_write(st, AD5592R_REG_GPIO_IN_EN, st->gpio_in);
-
-err_unlock:
- mutex_unlock(&st->gpio_lock);
+ return ret;
- return ret;
+ return st->ops->reg_write(st, AD5592R_REG_GPIO_IN_EN, st->gpio_in);
}
static int ad5592r_gpio_request(struct gpio_chip *chip, unsigned offset)
@@ -141,7 +129,7 @@ static int ad5592r_gpio_init(struct ad5592r_state *st)
st->gpiochip.direction_input = ad5592r_gpio_direction_input;
st->gpiochip.direction_output = ad5592r_gpio_direction_output;
st->gpiochip.get = ad5592r_gpio_get;
- st->gpiochip.set = ad5592r_gpio_set;
+ st->gpiochip.set_rv = ad5592r_gpio_set;
st->gpiochip.request = ad5592r_gpio_request;
st->gpiochip.owner = THIS_MODULE;
st->gpiochip.names = ad5592r_gpio_names;
@@ -155,6 +143,8 @@ static void ad5592r_gpio_cleanup(struct ad5592r_state *st)
{
if (st->gpio_map)
gpiochip_remove(&st->gpiochip);
+
+ mutex_destroy(&st->gpio_lock);
}
static int ad5592r_reset(struct ad5592r_state *st)
@@ -169,10 +159,9 @@ static int ad5592r_reset(struct ad5592r_state *st)
udelay(1);
gpiod_set_value(gpio, 1);
} else {
- mutex_lock(&st->lock);
- /* Writing this magic value resets the device */
- st->ops->reg_write(st, AD5592R_REG_RESET, 0xdac);
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock)
+ /* Writing this magic value resets the device */
+ st->ops->reg_write(st, AD5592R_REG_RESET, 0xdac);
}
udelay(250);
@@ -247,46 +236,44 @@ static int ad5592r_set_channel_modes(struct ad5592r_state *st)
}
}
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
/* Pull down unused pins to GND */
ret = ops->reg_write(st, AD5592R_REG_PULLDOWN, pulldown);
if (ret)
- goto err_unlock;
+ return ret;
ret = ops->reg_write(st, AD5592R_REG_TRISTATE, tristate);
if (ret)
- goto err_unlock;
+ return ret;
/* Configure pins that we use */
ret = ops->reg_write(st, AD5592R_REG_DAC_EN, dac);
if (ret)
- goto err_unlock;
+ return ret;
ret = ops->reg_write(st, AD5592R_REG_ADC_EN, adc);
if (ret)
- goto err_unlock;
+ return ret;
ret = ops->reg_write(st, AD5592R_REG_GPIO_SET, st->gpio_val);
if (ret)
- goto err_unlock;
+ return ret;
ret = ops->reg_write(st, AD5592R_REG_GPIO_OUT_EN, st->gpio_out);
if (ret)
- goto err_unlock;
+ return ret;
ret = ops->reg_write(st, AD5592R_REG_GPIO_IN_EN, st->gpio_in);
if (ret)
- goto err_unlock;
+ return ret;
/* Verify that we can read back at least one register */
ret = ops->reg_read(st, AD5592R_REG_ADC_EN, &read_back);
if (!ret && (read_back & 0xff) != adc)
- ret = -EIO;
+ return -EIO;
-err_unlock:
- mutex_unlock(&st->lock);
- return ret;
+ return 0;
}
static int ad5592r_reset_channel_modes(struct ad5592r_state *st)
@@ -303,7 +290,7 @@ static int ad5592r_write_raw(struct iio_dev *iio_dev,
struct iio_chan_spec const *chan, int val, int val2, long mask)
{
struct ad5592r_state *st = iio_priv(iio_dev);
- int ret;
+ int ret = 0;
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -314,11 +301,11 @@ static int ad5592r_write_raw(struct iio_dev *iio_dev,
if (!chan->output)
return -EINVAL;
- mutex_lock(&st->lock);
- ret = st->ops->write_dac(st, chan->channel, val);
- if (!ret)
- st->cached_dac[chan->channel] = val;
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock) {
+ ret = st->ops->write_dac(st, chan->channel, val);
+ if (!ret)
+ st->cached_dac[chan->channel] = val;
+ }
return ret;
case IIO_CHAN_INFO_SCALE:
if (chan->type == IIO_VOLTAGE) {
@@ -333,14 +320,12 @@ static int ad5592r_write_raw(struct iio_dev *iio_dev,
else
return -EINVAL;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = st->ops->reg_read(st, AD5592R_REG_CTRL,
&st->cached_gp_ctrl);
- if (ret < 0) {
- mutex_unlock(&st->lock);
+ if (ret < 0)
return ret;
- }
if (chan->output) {
if (gain)
@@ -358,11 +343,8 @@ static int ad5592r_write_raw(struct iio_dev *iio_dev,
~AD5592R_REG_CTRL_ADC_RANGE;
}
- ret = st->ops->reg_write(st, AD5592R_REG_CTRL,
- st->cached_gp_ctrl);
- mutex_unlock(&st->lock);
-
- return ret;
+ return st->ops->reg_write(st, AD5592R_REG_CTRL,
+ st->cached_gp_ctrl);
}
break;
default:
@@ -377,15 +359,15 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
int *val, int *val2, long m)
{
struct ad5592r_state *st = iio_priv(iio_dev);
- u16 read_val;
- int ret, mult;
+ u16 read_val = 0;
+ int ret = 0, mult = 0;
switch (m) {
case IIO_CHAN_INFO_RAW:
if (!chan->output) {
- mutex_lock(&st->lock);
- ret = st->ops->read_adc(st, chan->channel, &read_val);
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock)
+ ret = st->ops->read_adc(st, chan->channel,
+ &read_val);
if (ret)
return ret;
@@ -398,9 +380,8 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
read_val &= GENMASK(11, 0);
} else {
- mutex_lock(&st->lock);
- read_val = st->cached_dac[chan->channel];
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock)
+ read_val = st->cached_dac[chan->channel];
}
dev_dbg(st->dev, "Channel %u read: 0x%04hX\n",
@@ -418,35 +399,32 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
return IIO_VAL_INT_PLUS_NANO;
}
- mutex_lock(&st->lock);
-
- if (chan->output)
- mult = !!(st->cached_gp_ctrl &
- AD5592R_REG_CTRL_DAC_RANGE);
- else
- mult = !!(st->cached_gp_ctrl &
- AD5592R_REG_CTRL_ADC_RANGE);
-
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock) {
+ if (chan->output)
+ mult = !!(st->cached_gp_ctrl &
+ AD5592R_REG_CTRL_DAC_RANGE);
+ else
+ mult = !!(st->cached_gp_ctrl &
+ AD5592R_REG_CTRL_ADC_RANGE);
+ }
*val *= ++mult;
*val2 = chan->scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
- case IIO_CHAN_INFO_OFFSET:
+ case IIO_CHAN_INFO_OFFSET: {
ret = ad5592r_get_vref(st);
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
if (st->cached_gp_ctrl & AD5592R_REG_CTRL_ADC_RANGE)
*val = (-34365 * 25) / ret;
else
*val = (-75365 * 25) / ret;
- mutex_unlock(&st->lock);
-
return IIO_VAL_INT;
+ }
default:
return -EINVAL;
}
@@ -490,7 +468,7 @@ static const struct iio_chan_spec_ext_info ad5592r_ext_info[] = {
.read = ad5592r_show_scale_available,
.shared = IIO_SHARED_BY_TYPE,
},
- {},
+ { }
};
static void ad5592r_setup_channel(struct iio_dev *iio_dev,
@@ -606,6 +584,10 @@ int ad5592r_probe(struct device *dev, const char *name,
st->num_channels = 8;
dev_set_drvdata(dev, iio_dev);
+ ret = devm_mutex_init(dev, &st->lock);
+ if (ret)
+ return ret;
+
st->reg = devm_regulator_get_optional(dev, "vref");
if (IS_ERR(st->reg)) {
if ((PTR_ERR(st->reg) != -ENODEV) && dev_fwnode(dev))
@@ -622,8 +604,6 @@ int ad5592r_probe(struct device *dev, const char *name,
iio_dev->info = &ad5592r_info;
iio_dev->modes = INDIO_DIRECT_MODE;
- mutex_init(&st->lock);
-
ad5592r_init_scales(st, ad5592r_get_vref(st));
ret = ad5592r_reset(st);
diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c
index fd82d8701322..92d1b629b85d 100644
--- a/drivers/iio/dac/ad5592r.c
+++ b/drivers/iio/dac/ad5592r.c
@@ -137,19 +137,19 @@ static void ad5592r_spi_remove(struct spi_device *spi)
static const struct spi_device_id ad5592r_spi_ids[] = {
{ .name = "ad5592r", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad5592r_spi_ids);
static const struct of_device_id ad5592r_of_match[] = {
{ .compatible = "adi,ad5592r", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, ad5592r_of_match);
static const struct acpi_device_id ad5592r_acpi_match[] = {
{"ADS5592", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, ad5592r_acpi_match);
diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c
index ddd13ad821a7..9a8525c61173 100644
--- a/drivers/iio/dac/ad5593r.c
+++ b/drivers/iio/dac/ad5593r.c
@@ -116,19 +116,19 @@ static void ad5593r_i2c_remove(struct i2c_client *i2c)
static const struct i2c_device_id ad5593r_i2c_ids[] = {
{ .name = "ad5593r", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(i2c, ad5593r_i2c_ids);
static const struct of_device_id ad5593r_of_match[] = {
{ .compatible = "adi,ad5593r", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, ad5593r_of_match);
static const struct acpi_device_id ad5593r_acpi_match[] = {
{"ADS5593", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, ad5593r_acpi_match);
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 2fd38ac8f698..13aefe769bad 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -160,7 +160,7 @@ static const struct iio_chan_spec_ext_info ad5624r_ext_info[] = {
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
&ad5624r_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5624r_powerdown_mode_enum),
- { },
+ { }
};
#define AD5624R_CHANNEL(_chan, _bits) { \
@@ -266,7 +266,7 @@ static const struct spi_device_id ad5624r_id[] = {
{"ad5624r5", ID_AD5624R5},
{"ad5644r5", ID_AD5644R5},
{"ad5664r5", ID_AD5664R5},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad5624r_id);
diff --git a/drivers/iio/dac/ad5686-spi.c b/drivers/iio/dac/ad5686-spi.c
index 9c727aa6ea18..df8619e0c092 100644
--- a/drivers/iio/dac/ad5686-spi.c
+++ b/drivers/iio/dac/ad5686-spi.c
@@ -112,7 +112,7 @@ static const struct spi_device_id ad5686_spi_id[] = {
{"ad5685r", ID_AD5685R},
{"ad5686", ID_AD5686},
{"ad5686r", ID_AD5686R},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad5686_spi_id);
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 763af690c444..d9cae9555e5d 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -185,7 +185,7 @@ static const struct iio_chan_spec_ext_info ad5686_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5686_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5686_powerdown_mode_enum),
- { },
+ { }
};
#define AD5868_CHANNEL(chan, addr, bits, _shift) { \
diff --git a/drivers/iio/dac/ad5696-i2c.c b/drivers/iio/dac/ad5696-i2c.c
index 0156f32c12c8..d3327bca0e07 100644
--- a/drivers/iio/dac/ad5696-i2c.c
+++ b/drivers/iio/dac/ad5696-i2c.c
@@ -82,7 +82,7 @@ static const struct i2c_device_id ad5686_i2c_id[] = {
{"ad5695r", ID_AD5695R},
{"ad5696", ID_AD5696},
{"ad5696r", ID_AD5696R},
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ad5686_i2c_id);
@@ -101,7 +101,7 @@ static const struct of_device_id ad5686_of_match[] = {
{ .compatible = "adi,ad5695r" },
{ .compatible = "adi,ad5696" },
{ .compatible = "adi,ad5696r" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ad5686_of_match);
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 05e80b6ae2cc..d0e5f35462b1 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -522,7 +522,7 @@ static const struct iio_chan_spec_ext_info ad5755_ext_info[] = {
.write = ad5755_write_powerdown,
.shared = IIO_SEPARATE,
},
- { },
+ { }
};
#define AD5755_CHANNEL(_bits) { \
@@ -853,7 +853,7 @@ static const struct spi_device_id ad5755_id[] = {
{ "ad5757", (kernel_ulong_t)&ad5755_chip_info_tbl[ID_AD5757] },
{ "ad5735", (kernel_ulong_t)&ad5755_chip_info_tbl[ID_AD5735] },
{ "ad5737", (kernel_ulong_t)&ad5755_chip_info_tbl[ID_AD5737] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad5755_id);
diff --git a/drivers/iio/dac/ad5758.c b/drivers/iio/dac/ad5758.c
index 98771e37a7b5..4ed4fda76ea9 100644
--- a/drivers/iio/dac/ad5758.c
+++ b/drivers/iio/dac/ad5758.c
@@ -878,7 +878,7 @@ static int ad5758_probe(struct spi_device *spi)
static const struct spi_device_id ad5758_id[] = {
{ "ad5758", 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad5758_id);
diff --git a/drivers/iio/dac/ad5761.c b/drivers/iio/dac/ad5761.c
index 0aa5ba7f4654..b5d20f04f070 100644
--- a/drivers/iio/dac/ad5761.c
+++ b/drivers/iio/dac/ad5761.c
@@ -137,13 +137,11 @@ static int _ad5761_spi_read(struct ad5761_state *st, u8 addr, u16 *val)
struct spi_transfer xfers[] = {
{
.tx_buf = &st->data[0].d8[1],
- .bits_per_word = 8,
.len = 3,
.cs_change = true,
}, {
.tx_buf = &st->data[1].d8[1],
.rx_buf = &st->data[2].d8[1],
- .bits_per_word = 8,
.len = 3,
},
};
@@ -348,7 +346,7 @@ static const struct spi_device_id ad5761_id[] = {
{"ad5721r", ID_AD5721R},
{"ad5761", ID_AD5761},
{"ad5761r", ID_AD5761R},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad5761_id);
diff --git a/drivers/iio/dac/ad5766.c b/drivers/iio/dac/ad5766.c
index f658ac8086aa..f6a0a0d84fef 100644
--- a/drivers/iio/dac/ad5766.c
+++ b/drivers/iio/dac/ad5766.c
@@ -148,13 +148,11 @@ static int __ad5766_spi_read(struct ad5766_state *st, u8 dac, int *val)
struct spi_transfer xfers[] = {
{
.tx_buf = &st->data[0].d32,
- .bits_per_word = 8,
.len = 3,
.cs_change = 1,
}, {
.tx_buf = &st->data[1].d32,
.rx_buf = &st->data[2].d32,
- .bits_per_word = 8,
.len = 3,
},
};
@@ -437,7 +435,7 @@ static const struct iio_chan_spec_ext_info ad5766_ext_info[] = {
IIO_ENUM("dither_scale", IIO_SEPARATE, &ad5766_dither_scale_enum),
IIO_ENUM_AVAILABLE("dither_scale", IIO_SEPARATE,
&ad5766_dither_scale_enum),
- {}
+ { }
};
#define AD576x_CHANNEL(_chan, _bits) { \
@@ -648,14 +646,14 @@ static int ad5766_probe(struct spi_device *spi)
static const struct of_device_id ad5766_dt_match[] = {
{ .compatible = "adi,ad5766" },
{ .compatible = "adi,ad5767" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ad5766_dt_match);
static const struct spi_device_id ad5766_spi_ids[] = {
{ "ad5766", ID_AD5766 },
{ "ad5767", ID_AD5767 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad5766_spi_ids);
diff --git a/drivers/iio/dac/ad5770r.c b/drivers/iio/dac/ad5770r.c
index 25cf11d0471b..6eb4027a44fb 100644
--- a/drivers/iio/dac/ad5770r.c
+++ b/drivers/iio/dac/ad5770r.c
@@ -637,13 +637,13 @@ static int ad5770r_probe(struct spi_device *spi)
static const struct of_device_id ad5770r_of_id[] = {
{ .compatible = "adi,ad5770r", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, ad5770r_of_id);
static const struct spi_device_id ad5770r_id[] = {
{ "ad5770r", 0 },
- {},
+ { }
};
MODULE_DEVICE_TABLE(spi, ad5770r_id);
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index 07848be3f8d5..41582f2b90fb 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -138,13 +138,11 @@ static int ad5791_spi_read(struct ad5791_state *st, u8 addr, u32 *val)
struct spi_transfer xfers[] = {
{
.tx_buf = &st->data[0].d8[1],
- .bits_per_word = 8,
.len = 3,
.cs_change = 1,
}, {
.tx_buf = &st->data[1].d8[1],
.rx_buf = &st->data[2].d8[1],
- .bits_per_word = 8,
.len = 3,
},
};
@@ -312,7 +310,7 @@ static const struct iio_chan_spec_ext_info ad5791_ext_info[] = {
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
&ad5791_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5791_powerdown_mode_enum),
- { },
+ { }
};
#define AD5791_DEFINE_CHIP_INFO(_name, bits, _shift, _lin_comp) \
diff --git a/drivers/iio/dac/ad7293.c b/drivers/iio/dac/ad7293.c
index d3f49b5337d2..c3797e40cdd9 100644
--- a/drivers/iio/dac/ad7293.c
+++ b/drivers/iio/dac/ad7293.c
@@ -114,6 +114,7 @@
#define AD7293_REG_DATA_RAW_MSK GENMASK(15, 4)
#define AD7293_REG_VINX_RANGE_GET_CH_MSK(x, ch) (((x) >> (ch)) & 0x1)
#define AD7293_REG_VINX_RANGE_SET_CH_MSK(x, ch) (((x) & 0x1) << (ch))
+#define AD7293_GENERAL_ADC_REF_MSK BIT(7)
#define AD7293_CHIP_ID 0x18
enum ad7293_ch_type {
@@ -141,6 +142,7 @@ struct ad7293_state {
/* Protect against concurrent accesses to the device, page selection and data content */
struct mutex lock;
struct gpio_desc *gpio_reset;
+ bool vrefin_en;
u8 page_select;
u8 data[3] __aligned(IIO_DMA_MINALIGN);
};
@@ -785,6 +787,12 @@ static int ad7293_properties_parse(struct ad7293_state *st)
if (ret)
return dev_err_probe(&spi->dev, ret, "failed to enable VDRIVE\n");
+ ret = devm_regulator_get_enable_optional(&spi->dev, "vrefin");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(&spi->dev, ret, "failed to enable VREFIN\n");
+
+ st->vrefin_en = ret != -ENODEV;
+
st->gpio_reset = devm_gpiod_get_optional(&st->spi->dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(st->gpio_reset))
@@ -818,6 +826,11 @@ static int ad7293_init(struct ad7293_state *st)
return -EINVAL;
}
+ if (!st->vrefin_en)
+ return __ad7293_spi_update_bits(st, AD7293_REG_GENERAL,
+ AD7293_GENERAL_ADC_REF_MSK,
+ AD7293_GENERAL_ADC_REF_MSK);
+
return 0;
}
@@ -859,13 +872,13 @@ static int ad7293_probe(struct spi_device *spi)
static const struct spi_device_id ad7293_id[] = {
{ "ad7293", 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7293_id);
static const struct of_device_id ad7293_of_match[] = {
{ .compatible = "adi,ad7293" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ad7293_of_match);
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index bff6bf697d9c..a88cc639047d 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -173,7 +173,7 @@ static const struct iio_chan_spec_ext_info ad7303_ext_info[] = {
.write = ad7303_write_dac_powerdown,
.shared = IIO_SEPARATE,
},
- { },
+ { }
};
#define AD7303_CHANNEL(chan) { \
@@ -264,13 +264,13 @@ static int ad7303_probe(struct spi_device *spi)
static const struct of_device_id ad7303_spi_of_match[] = {
{ .compatible = "adi,ad7303", },
- { /* sentinel */ },
+ { }
};
MODULE_DEVICE_TABLE(of, ad7303_spi_of_match);
static const struct spi_device_id ad7303_spi_ids[] = {
{ "ad7303", 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7303_spi_ids);
diff --git a/drivers/iio/dac/ad8801.c b/drivers/iio/dac/ad8801.c
index 8a362fae2eca..60e663af1cc1 100644
--- a/drivers/iio/dac/ad8801.c
+++ b/drivers/iio/dac/ad8801.c
@@ -153,7 +153,7 @@ static int ad8801_probe(struct spi_device *spi)
static const struct spi_device_id ad8801_ids[] = {
{"ad8801", ID_AD8801},
{"ad8803", ID_AD8803},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad8801_ids);
diff --git a/drivers/iio/dac/ad9739a.c b/drivers/iio/dac/ad9739a.c
index b6a65359b0b4..d77b46d83bd4 100644
--- a/drivers/iio/dac/ad9739a.c
+++ b/drivers/iio/dac/ad9739a.c
@@ -442,13 +442,13 @@ static int ad9739a_probe(struct spi_device *spi)
static const struct of_device_id ad9739a_of_match[] = {
{ .compatible = "adi,ad9739a" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ad9739a_of_match);
static const struct spi_device_id ad9739a_id[] = {
{"ad9739a"},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad9739a_id);
diff --git a/drivers/iio/dac/adi-axi-dac.c b/drivers/iio/dac/adi-axi-dac.c
index 892d770aec69..33faba4b02c2 100644
--- a/drivers/iio/dac/adi-axi-dac.c
+++ b/drivers/iio/dac/adi-axi-dac.c
@@ -84,6 +84,7 @@
#define AXI_DAC_CHAN_CNTRL_7_REG(c) (0x0418 + (c) * 0x40)
#define AXI_DAC_CHAN_CNTRL_7_DATA_SEL GENMASK(3, 0)
+#define AXI_DAC_CHAN_CNTRL_MAX 15
#define AXI_DAC_RD_ADDR(x) (BIT(7) | (x))
/* 360 degrees in rad */
@@ -186,6 +187,9 @@ static int __axi_dac_frequency_get(struct axi_dac_state *st, unsigned int chan,
u32 reg, raw;
int ret;
+ if (chan > AXI_DAC_CHAN_CNTRL_MAX)
+ return -EINVAL;
+
if (!st->dac_clk) {
dev_err(st->dev, "Sampling rate is 0...\n");
return -EINVAL;
@@ -230,6 +234,9 @@ static int axi_dac_scale_get(struct axi_dac_state *st,
int ret, vals[2];
u32 reg, raw;
+ if (chan->channel > AXI_DAC_CHAN_CNTRL_MAX)
+ return -EINVAL;
+
if (tone_2)
reg = AXI_DAC_CHAN_CNTRL_3_REG(chan->channel);
else
@@ -264,6 +271,9 @@ static int axi_dac_phase_get(struct axi_dac_state *st,
u32 reg, raw, phase;
int ret, vals[2];
+ if (chan->channel > AXI_DAC_CHAN_CNTRL_MAX)
+ return -EINVAL;
+
if (tone_2)
reg = AXI_DAC_CHAN_CNTRL_4_REG(chan->channel);
else
@@ -291,6 +301,9 @@ static int __axi_dac_frequency_set(struct axi_dac_state *st, unsigned int chan,
u16 raw;
int ret;
+ if (chan > AXI_DAC_CHAN_CNTRL_MAX)
+ return -EINVAL;
+
if (!sample_rate || freq > sample_rate / 2) {
dev_err(st->dev, "Invalid frequency(%u) dac_clk(%llu)\n",
freq, sample_rate);
@@ -342,6 +355,9 @@ static int axi_dac_scale_set(struct axi_dac_state *st,
u32 raw = 0, reg;
int ret;
+ if (chan->channel > AXI_DAC_CHAN_CNTRL_MAX)
+ return -EINVAL;
+
ret = iio_str_to_fixpoint(buf, 100000, &integer, &frac);
if (ret)
return ret;
@@ -385,6 +401,9 @@ static int axi_dac_phase_set(struct axi_dac_state *st,
u32 raw, reg;
int ret;
+ if (chan->channel > AXI_DAC_CHAN_CNTRL_MAX)
+ return -EINVAL;
+
ret = iio_str_to_fixpoint(buf, 100000, &integer, &frac);
if (ret)
return ret;
@@ -469,7 +488,7 @@ static const struct iio_chan_spec_ext_info axi_dac_ext_info[] = {
IIO_BACKEND_EX_INFO("scale1", IIO_SEPARATE, AXI_DAC_SCALE_TONE_2),
IIO_BACKEND_EX_INFO("phase0", IIO_SEPARATE, AXI_DAC_PHASE_TONE_1),
IIO_BACKEND_EX_INFO("phase1", IIO_SEPARATE, AXI_DAC_PHASE_TONE_2),
- {}
+ { }
};
static int axi_dac_extend_chan(struct iio_backend *back,
@@ -493,6 +512,9 @@ static int axi_dac_data_source_set(struct iio_backend *back, unsigned int chan,
{
struct axi_dac_state *st = iio_backend_get_priv(back);
+ if (chan > AXI_DAC_CHAN_CNTRL_MAX)
+ return -EINVAL;
+
switch (data) {
case IIO_BACKEND_INTERNAL_CONTINUOUS_WAVE:
return regmap_update_bits(st->regmap,
@@ -514,6 +536,35 @@ static int axi_dac_data_source_set(struct iio_backend *back, unsigned int chan,
}
}
+static int axi_dac_data_source_get(struct iio_backend *back, unsigned int chan,
+ enum iio_backend_data_source *data)
+{
+ struct axi_dac_state *st = iio_backend_get_priv(back);
+ int ret;
+ u32 val;
+
+ if (chan > AXI_DAC_CHAN_CNTRL_MAX)
+ return -EINVAL;
+
+ ret = regmap_read(st->regmap, AXI_DAC_CHAN_CNTRL_7_REG(chan), &val);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case AXI_DAC_DATA_INTERNAL_TONE:
+ *data = IIO_BACKEND_INTERNAL_CONTINUOUS_WAVE;
+ return 0;
+ case AXI_DAC_DATA_DMA:
+ *data = IIO_BACKEND_EXTERNAL;
+ return 0;
+ case AXI_DAC_DATA_INTERNAL_RAMP_16BIT:
+ *data = IIO_BACKEND_INTERNAL_RAMP_16BIT;
+ return 0;
+ default:
+ return -EIO;
+ }
+}
+
static int axi_dac_set_sample_rate(struct iio_backend *back, unsigned int chan,
u64 sample_rate)
{
@@ -521,6 +572,8 @@ static int axi_dac_set_sample_rate(struct iio_backend *back, unsigned int chan,
unsigned int freq;
int ret, tone;
+ if (chan > AXI_DAC_CHAN_CNTRL_MAX)
+ return -EINVAL;
if (!sample_rate)
return -EINVAL;
if (st->reg_config & AXI_DAC_CONFIG_DDS_DISABLE)
@@ -707,6 +760,7 @@ static int axi_dac_bus_reg_read(struct iio_backend *back, u32 reg, u32 *val,
{
struct axi_dac_state *st = iio_backend_get_priv(back);
int ret;
+ u32 ival;
guard(mutex)(&st->lock);
@@ -719,6 +773,13 @@ static int axi_dac_bus_reg_read(struct iio_backend *back, u32 reg, u32 *val,
if (ret)
return ret;
+ ret = regmap_read_poll_timeout(st->regmap,
+ AXI_DAC_UI_STATUS_REG, ival,
+ FIELD_GET(AXI_DAC_UI_STATUS_IF_BUSY, ival) == 0,
+ 10, 100 * KILO);
+ if (ret)
+ return ret;
+
return regmap_read(st->regmap, AXI_DAC_CUSTOM_RD_REG, val);
}
@@ -794,6 +855,7 @@ static const struct iio_backend_ops axi_ad3552r_ops = {
.request_buffer = axi_dac_request_buffer,
.free_buffer = axi_dac_free_buffer,
.data_source_set = axi_dac_data_source_set,
+ .data_source_get = axi_dac_data_source_get,
.ddr_enable = axi_dac_ddr_enable,
.ddr_disable = axi_dac_ddr_disable,
.data_stream_enable = axi_dac_data_stream_enable,
@@ -961,7 +1023,7 @@ static const struct axi_dac_info dac_ad3552r = {
static const struct of_device_id axi_dac_of_match[] = {
{ .compatible = "adi,axi-dac-9.1.b", .data = &dac_generic },
{ .compatible = "adi,axi-ad3552r", .data = &dac_ad3552r },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, axi_dac_of_match);
diff --git a/drivers/iio/dac/dpot-dac.c b/drivers/iio/dac/dpot-dac.c
index f36f10bfb6be..d1b8441051ae 100644
--- a/drivers/iio/dac/dpot-dac.c
+++ b/drivers/iio/dac/dpot-dac.c
@@ -237,7 +237,7 @@ static void dpot_dac_remove(struct platform_device *pdev)
static const struct of_device_id dpot_dac_match[] = {
{ .compatible = "dpot-dac" },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, dpot_dac_match);
diff --git a/drivers/iio/dac/ds4424.c b/drivers/iio/dac/ds4424.c
index e89e4c054653..a26a99753418 100644
--- a/drivers/iio/dac/ds4424.c
+++ b/drivers/iio/dac/ds4424.c
@@ -301,7 +301,7 @@ MODULE_DEVICE_TABLE(i2c, ds4424_id);
static const struct of_device_id ds4424_of_match[] = {
{ .compatible = "maxim,ds4422" },
{ .compatible = "maxim,ds4424" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ds4424_of_match);
diff --git a/drivers/iio/dac/lpc18xx_dac.c b/drivers/iio/dac/lpc18xx_dac.c
index 2332b0c22691..aa1c73f8429d 100644
--- a/drivers/iio/dac/lpc18xx_dac.c
+++ b/drivers/iio/dac/lpc18xx_dac.c
@@ -179,7 +179,7 @@ static void lpc18xx_dac_remove(struct platform_device *pdev)
static const struct of_device_id lpc18xx_dac_match[] = {
{ .compatible = "nxp,lpc1850-dac" },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, lpc18xx_dac_match);
diff --git a/drivers/iio/dac/ltc1660.c b/drivers/iio/dac/ltc1660.c
index 2758fc8a5ad5..6e80b49f4665 100644
--- a/drivers/iio/dac/ltc1660.c
+++ b/drivers/iio/dac/ltc1660.c
@@ -219,14 +219,14 @@ static void ltc1660_remove(struct spi_device *spi)
static const struct of_device_id ltc1660_dt_ids[] = {
{ .compatible = "lltc,ltc1660", .data = (void *)ID_LTC1660 },
{ .compatible = "lltc,ltc1665", .data = (void *)ID_LTC1665 },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, ltc1660_dt_ids);
static const struct spi_device_id ltc1660_id[] = {
{"ltc1660", ID_LTC1660},
{"ltc1665", ID_LTC1665},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(spi, ltc1660_id);
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
index 999348836d87..105f939f7e54 100644
--- a/drivers/iio/dac/ltc2632.c
+++ b/drivers/iio/dac/ltc2632.c
@@ -176,7 +176,7 @@ static const struct iio_chan_spec_ext_info ltc2632_ext_info[] = {
.write = ltc2632_write_dac_powerdown,
.shared = IIO_SEPARATE,
},
- { },
+ { }
};
#define LTC2632_CHANNEL(_chan, _bits) { \
@@ -372,7 +372,7 @@ static const struct spi_device_id ltc2632_id[] = {
{ "ltc2636-h12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636H12] },
{ "ltc2636-h10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636H10] },
{ "ltc2636-h8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636H8] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ltc2632_id);
@@ -432,7 +432,7 @@ static const struct of_device_id ltc2632_of_match[] = {
.compatible = "lltc,ltc2636-h8",
.data = &ltc2632_chip_info_tbl[ID_LTC2636H8]
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ltc2632_of_match);
diff --git a/drivers/iio/dac/ltc2688.c b/drivers/iio/dac/ltc2688.c
index bdc857c7fa6d..1f24f07d1ad2 100644
--- a/drivers/iio/dac/ltc2688.c
+++ b/drivers/iio/dac/ltc2688.c
@@ -104,13 +104,11 @@ static int ltc2688_spi_read(void *context, const void *reg, size_t reg_size,
struct spi_transfer xfers[] = {
{
.tx_buf = st->tx_data,
- .bits_per_word = 8,
.len = reg_size + val_size,
.cs_change = 1,
}, {
.tx_buf = st->tx_data + 3,
.rx_buf = st->rx_data,
- .bits_per_word = 8,
.len = reg_size + val_size,
},
};
@@ -608,7 +606,7 @@ static const struct iio_chan_spec_ext_info ltc2688_toggle_sym_ext_info[] = {
ltc2688_reg_bool_get, ltc2688_reg_bool_set),
LTC2688_CHAN_EXT_INFO("symbol", LTC2688_CMD_SW_TOGGLE, IIO_SEPARATE,
ltc2688_reg_bool_get, ltc2688_reg_bool_set),
- {}
+ { }
};
static const struct iio_chan_spec_ext_info ltc2688_toggle_ext_info[] = {
@@ -621,7 +619,7 @@ static const struct iio_chan_spec_ext_info ltc2688_toggle_ext_info[] = {
ltc2688_dither_toggle_set),
LTC2688_CHAN_EXT_INFO("powerdown", LTC2688_CMD_POWERDOWN, IIO_SEPARATE,
ltc2688_reg_bool_get, ltc2688_reg_bool_set),
- {}
+ { }
};
static struct iio_chan_spec_ext_info ltc2688_dither_ext_info[] = {
@@ -649,13 +647,13 @@ static struct iio_chan_spec_ext_info ltc2688_dither_ext_info[] = {
ltc2688_dither_toggle_set),
LTC2688_CHAN_EXT_INFO("powerdown", LTC2688_CMD_POWERDOWN, IIO_SEPARATE,
ltc2688_reg_bool_get, ltc2688_reg_bool_set),
- {}
+ { }
};
static const struct iio_chan_spec_ext_info ltc2688_ext_info[] = {
LTC2688_CHAN_EXT_INFO("powerdown", LTC2688_CMD_POWERDOWN, IIO_SEPARATE,
ltc2688_reg_bool_get, ltc2688_reg_bool_set),
- {}
+ { }
};
#define LTC2688_CHANNEL(_chan) { \
@@ -991,13 +989,13 @@ static int ltc2688_probe(struct spi_device *spi)
static const struct of_device_id ltc2688_of_id[] = {
{ .compatible = "adi,ltc2688" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ltc2688_of_id);
static const struct spi_device_id ltc2688_id[] = {
{ "ltc2688" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ltc2688_id);
diff --git a/drivers/iio/dac/max5522.c b/drivers/iio/dac/max5522.c
index 9f72155dcbc7..1b8fe6b8d26e 100644
--- a/drivers/iio/dac/max5522.c
+++ b/drivers/iio/dac/max5522.c
@@ -174,7 +174,7 @@ static int max5522_spi_probe(struct spi_device *spi)
static const struct spi_device_id max5522_ids[] = {
{ "max5522", (kernel_ulong_t)&max5522_chip_info_tbl[ID_MAX5522] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, max5522_ids);
@@ -183,7 +183,7 @@ static const struct of_device_id max5522_of_match[] = {
.compatible = "maxim,max5522",
.data = &max5522_chip_info_tbl[ID_MAX5522],
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, max5522_of_match);
diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c
index b062a18be5e7..e7e29359f8fe 100644
--- a/drivers/iio/dac/max5821.c
+++ b/drivers/iio/dac/max5821.c
@@ -137,7 +137,7 @@ static const struct iio_chan_spec_ext_info max5821_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &max5821_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &max5821_powerdown_mode_enum),
- { },
+ { }
};
#define MAX5821_CHANNEL(chan) { \
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 1337fb02ccf5..62972494a229 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -241,7 +241,7 @@ static const struct iio_chan_spec_ext_info mcp4725_ext_info[] = {
&mcp472x_powerdown_mode_enum[MCP4725]),
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE,
&mcp472x_powerdown_mode_enum[MCP4725]),
- { },
+ { }
};
static const struct iio_chan_spec_ext_info mcp4726_ext_info[] = {
@@ -255,7 +255,7 @@ static const struct iio_chan_spec_ext_info mcp4726_ext_info[] = {
&mcp472x_powerdown_mode_enum[MCP4726]),
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE,
&mcp472x_powerdown_mode_enum[MCP4726]),
- { },
+ { }
};
static const struct iio_chan_spec mcp472x_channel[] = {
diff --git a/drivers/iio/dac/mcp4728.c b/drivers/iio/dac/mcp4728.c
index 192175dc6419..4f30b99110b7 100644
--- a/drivers/iio/dac/mcp4728.c
+++ b/drivers/iio/dac/mcp4728.c
@@ -286,7 +286,7 @@ static const struct iio_chan_spec_ext_info mcp4728_ext_info[] = {
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &mcp4728_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE,
&mcp4728_powerdown_mode_enum),
- {},
+ { }
};
static const struct iio_chan_spec mcp4728_channels[MCP4728_N_CHANNELS] = {
@@ -573,13 +573,13 @@ static int mcp4728_probe(struct i2c_client *client)
static const struct i2c_device_id mcp4728_id[] = {
{ "mcp4728" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, mcp4728_id);
static const struct of_device_id mcp4728_of_match[] = {
{ .compatible = "microchip,mcp4728" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, mcp4728_of_match);
diff --git a/drivers/iio/dac/mcp4821.c b/drivers/iio/dac/mcp4821.c
index c1a59bbbba3c..748bdca9a964 100644
--- a/drivers/iio/dac/mcp4821.c
+++ b/drivers/iio/dac/mcp4821.c
@@ -206,7 +206,7 @@ static const struct of_device_id mcp4821_of_table[] = {
MCP4821_COMPATIBLE("microchip,mcp4812", ID_MCP4812),
MCP4821_COMPATIBLE("microchip,mcp4821", ID_MCP4821),
MCP4821_COMPATIBLE("microchip,mcp4822", ID_MCP4822),
- { /* Sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, mcp4821_of_table);
@@ -217,7 +217,7 @@ static const struct spi_device_id mcp4821_id_table[] = {
{ "mcp4812", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4812]},
{ "mcp4821", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4821]},
{ "mcp4822", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4822]},
- { /* Sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(spi, mcp4821_id_table);
diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c
index 26aa99059813..74f338afcab9 100644
--- a/drivers/iio/dac/mcp4922.c
+++ b/drivers/iio/dac/mcp4922.c
@@ -161,7 +161,7 @@ static const struct spi_device_id mcp4922_id[] = {
{"mcp4912", ID_MCP4912},
{"mcp4921", ID_MCP4921},
{"mcp4922", ID_MCP4922},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, mcp4922_id);
diff --git a/drivers/iio/dac/rohm-bd79703.c b/drivers/iio/dac/rohm-bd79703.c
index e998ab51052e..a35c37d2261d 100644
--- a/drivers/iio/dac/rohm-bd79703.c
+++ b/drivers/iio/dac/rohm-bd79703.c
@@ -38,11 +38,20 @@ static const struct regmap_config bd79703_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
+/* Dynamic driver private data */
struct bd79703_data {
struct regmap *regmap;
int vfs;
};
+/* Static, IC type specific data for different variants */
+struct bd7970x_chip_data {
+ const char *name;
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ bool has_vfs;
+};
+
static int bd79703_read_raw(struct iio_dev *idev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
@@ -67,7 +76,7 @@ static int bd79703_write_raw(struct iio_dev *idev,
if (val < 0 || val >= 1 << BD79703_DAC_BITS)
return -EINVAL;
- return regmap_write(data->regmap, chan->channel + 1, val);
+ return regmap_write(data->regmap, chan->address, val);
};
static const struct iio_info bd79703_info = {
@@ -75,16 +84,42 @@ static const struct iio_info bd79703_info = {
.write_raw = bd79703_write_raw,
};
-#define BD79703_CHAN(_chan) { \
+#define BD79703_CHAN_ADDR(_chan, _addr) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.output = 1, \
.channel = (_chan), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .address = (_chan), \
+ .address = (_addr), \
}
+#define BD79703_CHAN(_chan) BD79703_CHAN_ADDR((_chan), (_chan) + 1)
+
+static const struct iio_chan_spec bd79700_channels[] = {
+ BD79703_CHAN(0),
+ BD79703_CHAN(1),
+};
+
+static const struct iio_chan_spec bd79701_channels[] = {
+ BD79703_CHAN(0),
+ BD79703_CHAN(1),
+ BD79703_CHAN(2),
+};
+
+/*
+ * The BD79702 has 4 channels. They aren't mapped to BD79703 channels 0, 1, 2
+ * and 3, but to the channels 0, 1, 4, 5. So the addressing used with SPI
+ * accesses is 1, 2, 5 and 6 for them. Thus, they're not constant offset to
+ * the channel number as with other IC variants.
+ */
+static const struct iio_chan_spec bd79702_channels[] = {
+ BD79703_CHAN_ADDR(0, 1),
+ BD79703_CHAN_ADDR(1, 2),
+ BD79703_CHAN_ADDR(2, 5),
+ BD79703_CHAN_ADDR(3, 6),
+};
+
static const struct iio_chan_spec bd79703_channels[] = {
BD79703_CHAN(0),
BD79703_CHAN(1),
@@ -94,13 +129,46 @@ static const struct iio_chan_spec bd79703_channels[] = {
BD79703_CHAN(5),
};
+static const struct bd7970x_chip_data bd79700_chip_data = {
+ .name = "bd79700",
+ .channels = bd79700_channels,
+ .num_channels = ARRAY_SIZE(bd79700_channels),
+ .has_vfs = false,
+};
+
+static const struct bd7970x_chip_data bd79701_chip_data = {
+ .name = "bd79701",
+ .channels = bd79701_channels,
+ .num_channels = ARRAY_SIZE(bd79701_channels),
+ .has_vfs = false,
+};
+
+static const struct bd7970x_chip_data bd79702_chip_data = {
+ .name = "bd79702",
+ .channels = bd79702_channels,
+ .num_channels = ARRAY_SIZE(bd79702_channels),
+ .has_vfs = true,
+};
+
+static const struct bd7970x_chip_data bd79703_chip_data = {
+ .name = "bd79703",
+ .channels = bd79703_channels,
+ .num_channels = ARRAY_SIZE(bd79703_channels),
+ .has_vfs = true,
+};
+
static int bd79703_probe(struct spi_device *spi)
{
+ const struct bd7970x_chip_data *cd;
struct device *dev = &spi->dev;
struct bd79703_data *data;
struct iio_dev *idev;
int ret;
+ cd = spi_get_device_match_data(spi);
+ if (!cd)
+ return -ENODEV;
+
idev = devm_iio_device_alloc(dev, sizeof(*data));
if (!idev)
return -ENOMEM;
@@ -112,20 +180,30 @@ static int bd79703_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(data->regmap),
"Failed to initialize Regmap\n");
- ret = devm_regulator_get_enable(dev, "vcc");
- if (ret)
- return dev_err_probe(dev, ret, "Failed to enable VCC\n");
-
- ret = devm_regulator_get_enable_read_voltage(dev, "vfs");
- if (ret < 0)
- return dev_err_probe(dev, ret, "Failed to get Vfs\n");
-
+ /*
+ * BD79703 has a separate VFS pin, whereas the BD79700 and BD79701 use
+ * VCC for their full-scale output voltage.
+ */
+ if (cd->has_vfs) {
+ ret = devm_regulator_get_enable(dev, "vcc");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable VCC\n");
+
+ ret = devm_regulator_get_enable_read_voltage(dev, "vfs");
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get Vfs\n");
+ } else {
+ ret = devm_regulator_get_enable_read_voltage(dev, "vcc");
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get VCC\n");
+ }
data->vfs = ret;
- idev->channels = bd79703_channels;
- idev->num_channels = ARRAY_SIZE(bd79703_channels);
+
+ idev->channels = cd->channels;
+ idev->num_channels = cd->num_channels;
idev->modes = INDIO_DIRECT_MODE;
idev->info = &bd79703_info;
- idev->name = "bd79703";
+ idev->name = cd->name;
/* Initialize all to output zero */
ret = regmap_write(data->regmap, BD79703_REG_OUT_ALL, 0);
@@ -136,13 +214,19 @@ static int bd79703_probe(struct spi_device *spi)
}
static const struct spi_device_id bd79703_id[] = {
- { "bd79703", },
+ { "bd79700", (kernel_ulong_t)&bd79700_chip_data },
+ { "bd79701", (kernel_ulong_t)&bd79701_chip_data },
+ { "bd79702", (kernel_ulong_t)&bd79702_chip_data },
+ { "bd79703", (kernel_ulong_t)&bd79703_chip_data },
{ }
};
MODULE_DEVICE_TABLE(spi, bd79703_id);
static const struct of_device_id bd79703_of_match[] = {
- { .compatible = "rohm,bd79703", },
+ { .compatible = "rohm,bd79700", .data = &bd79700_chip_data },
+ { .compatible = "rohm,bd79701", .data = &bd79701_chip_data },
+ { .compatible = "rohm,bd79702", .data = &bd79702_chip_data },
+ { .compatible = "rohm,bd79703", .data = &bd79703_chip_data },
{ }
};
MODULE_DEVICE_TABLE(of, bd79703_of_match);
diff --git a/drivers/iio/dac/stm32-dac-core.c b/drivers/iio/dac/stm32-dac-core.c
index 95ed5197d16f..8ef702917060 100644
--- a/drivers/iio/dac/stm32-dac-core.c
+++ b/drivers/iio/dac/stm32-dac-core.c
@@ -239,7 +239,7 @@ static const struct of_device_id stm32_dac_of_match[] = {
.compatible = "st,stm32h7-dac-core",
.data = (void *)&stm32h7_dac_cfg,
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, stm32_dac_of_match);
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index 3bfb368b3a23..344388338d9b 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -250,7 +250,7 @@ static const struct iio_chan_spec_ext_info stm32_dac_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &stm32_dac_powerdown_mode_en),
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &stm32_dac_powerdown_mode_en),
- {},
+ { }
};
#define STM32_DAC_CHANNEL(chan, name) { \
@@ -392,7 +392,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(stm32_dac_pm_ops, stm32_dac_suspend,
static const struct of_device_id stm32_dac_of_match[] = {
{ .compatible = "st,stm32-dac", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, stm32_dac_of_match);
diff --git a/drivers/iio/dac/ti-dac082s085.c b/drivers/iio/dac/ti-dac082s085.c
index 8e1590e3cc8b..715870c8a9c4 100644
--- a/drivers/iio/dac/ti-dac082s085.c
+++ b/drivers/iio/dac/ti-dac082s085.c
@@ -161,7 +161,7 @@ static const struct iio_chan_spec_ext_info ti_dac_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode),
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode),
- { },
+ { }
};
#define TI_DAC_CHANNEL(chan) { \
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index c5162b72951a..bdc3f94aef98 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -216,7 +216,7 @@ static const struct iio_chan_spec_ext_info dac5571_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &dac5571_powerdown_mode),
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &dac5571_powerdown_mode),
- {},
+ { }
};
#define dac5571_CHANNEL(chan, name) { \
@@ -398,7 +398,7 @@ static const struct of_device_id dac5571_of_id[] = {
{.compatible = "ti,dac5573", .data = &dac5571_spec[quad_8bit] },
{.compatible = "ti,dac6573", .data = &dac5571_spec[quad_10bit] },
{.compatible = "ti,dac7573", .data = &dac5571_spec[quad_12bit] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, dac5571_of_id);
@@ -414,7 +414,7 @@ static const struct i2c_device_id dac5571_id[] = {
{"dac5573", (kernel_ulong_t)&dac5571_spec[quad_8bit] },
{"dac6573", (kernel_ulong_t)&dac5571_spec[quad_10bit] },
{"dac7573", (kernel_ulong_t)&dac5571_spec[quad_12bit] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, dac5571_id);
diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c
index 6f4aa4794a0c..3d2ce61f0db6 100644
--- a/drivers/iio/dac/ti-dac7311.c
+++ b/drivers/iio/dac/ti-dac7311.c
@@ -147,7 +147,7 @@ static const struct iio_chan_spec_ext_info ti_dac_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode),
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode),
- { },
+ { }
};
#define TI_DAC_CHANNEL(chan) { \
diff --git a/drivers/iio/dac/ti-dac7612.c b/drivers/iio/dac/ti-dac7612.c
index 8195815de26f..c308eca02b88 100644
--- a/drivers/iio/dac/ti-dac7612.c
+++ b/drivers/iio/dac/ti-dac7612.c
@@ -166,7 +166,7 @@ static int dac7612_probe(struct spi_device *spi)
static const struct spi_device_id dac7612_id[] = {
{"ti-dac7612"},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, dac7612_id);
@@ -174,7 +174,7 @@ static const struct of_device_id dac7612_of_match[] = {
{ .compatible = "ti,dac7612" },
{ .compatible = "ti,dac7612u" },
{ .compatible = "ti,dac7612ub" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, dac7612_of_match);
diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c
index 82a078fa98ad..b30ff7bb4400 100644
--- a/drivers/iio/dac/vf610_dac.c
+++ b/drivers/iio/dac/vf610_dac.c
@@ -99,7 +99,7 @@ static const struct iio_enum vf610_conversion_mode = {
static const struct iio_chan_spec_ext_info vf610_ext_info[] = {
IIO_ENUM("conversion_mode", IIO_SHARED_BY_DIR,
&vf610_conversion_mode),
- {},
+ { }
};
#define VF610_DAC_CHAN(_chan_type) { \
@@ -166,7 +166,7 @@ static const struct iio_info vf610_dac_iio_info = {
static const struct of_device_id vf610_dac_match[] = {
{ .compatible = "fsl,vf610-dac", },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, vf610_dac_match);
diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c
index 288880346707..e35e0596cbfb 100644
--- a/drivers/iio/dummy/iio_simple_dummy_buffer.c
+++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c
@@ -31,6 +31,11 @@ static const s16 fakedata[] = {
[DUMMY_INDEX_ACCELX] = 344,
};
+struct dummy_scan {
+ s16 data[ARRAY_SIZE(fakedata)];
+ aligned_s64 timestamp;
+};
+
/**
* iio_simple_dummy_trigger_h() - the trigger handler function
* @irq: the interrupt number
@@ -45,11 +50,18 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
+ struct dummy_scan *scan;
int i = 0, j;
- u16 *data;
- data = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (!data)
+ /*
+ * Note that some buses such as SPI require DMA safe buffers which
+ * cannot be on the stack. Two easy ways to do this:
+ * - Local kzalloc (as done here)
+ * - A buffer at the end of the structure accessed via iio_priv()
+ * that is marked __aligned(IIO_DMA_MINALIGN).
+ */
+ scan = kzalloc(sizeof(*scan), GFP_KERNEL);
+ if (!scan)
goto done;
/*
@@ -69,13 +81,12 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
* constant table fakedata.
*/
iio_for_each_active_channel(indio_dev, j)
- data[i++] = fakedata[j];
-
- iio_push_to_buffers_with_timestamp(indio_dev, data,
- iio_get_time_ns(indio_dev));
+ scan->data[i++] = fakedata[j];
- kfree(data);
+ iio_push_to_buffers_with_ts(indio_dev, scan, sizeof(*scan),
+ iio_get_time_ns(indio_dev));
+ kfree(scan);
done:
/*
* Tell the core we are done with this trigger and ready for the
diff --git a/drivers/iio/filter/admv8818.c b/drivers/iio/filter/admv8818.c
index d85b7d3de866..19f823446cda 100644
--- a/drivers/iio/filter/admv8818.c
+++ b/drivers/iio/filter/admv8818.c
@@ -14,6 +14,7 @@
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/units.h>
@@ -70,6 +71,16 @@
#define ADMV8818_HPF_WR0_MSK GENMASK(7, 4)
#define ADMV8818_LPF_WR0_MSK GENMASK(3, 0)
+#define ADMV8818_BAND_BYPASS 0
+#define ADMV8818_BAND_MIN 1
+#define ADMV8818_BAND_MAX 4
+#define ADMV8818_BAND_CORNER_LOW 0
+#define ADMV8818_BAND_CORNER_HIGH 1
+
+#define ADMV8818_STATE_MIN 0
+#define ADMV8818_STATE_MAX 15
+#define ADMV8818_NUM_STATES 16
+
enum {
ADMV8818_BW_FREQ,
ADMV8818_CENTER_FREQ
@@ -90,20 +101,24 @@ struct admv8818_state {
struct mutex lock;
unsigned int filter_mode;
u64 cf_hz;
+ u64 lpf_margin_hz;
+ u64 hpf_margin_hz;
};
-static const unsigned long long freq_range_hpf[4][2] = {
+static const unsigned long long freq_range_hpf[5][2] = {
+ {0ULL, 0ULL}, /* bypass */
{1750000000ULL, 3550000000ULL},
{3400000000ULL, 7250000000ULL},
{6600000000, 12000000000},
{12500000000, 19900000000}
};
-static const unsigned long long freq_range_lpf[4][2] = {
+static const unsigned long long freq_range_lpf[5][2] = {
+ {U64_MAX, U64_MAX}, /* bypass */
{2050000000ULL, 3850000000ULL},
{3350000000ULL, 7250000000ULL},
{7000000000, 13000000000},
- {12550000000, 18500000000}
+ {12550000000, 18850000000}
};
static const struct regmap_config admv8818_regmap_config = {
@@ -121,44 +136,59 @@ static const char * const admv8818_modes[] = {
static int __admv8818_hpf_select(struct admv8818_state *st, u64 freq)
{
- unsigned int hpf_step = 0, hpf_band = 0, i, j;
- u64 freq_step;
- int ret;
+ int band, state, ret;
+ unsigned int hpf_state = ADMV8818_STATE_MIN, hpf_band = ADMV8818_BAND_BYPASS;
+ u64 freq_error, min_freq_error, freq_corner, freq_step;
- if (freq < freq_range_hpf[0][0])
+ if (freq < freq_range_hpf[ADMV8818_BAND_MIN][ADMV8818_BAND_CORNER_LOW])
goto hpf_write;
- if (freq > freq_range_hpf[3][1]) {
- hpf_step = 15;
- hpf_band = 4;
-
+ if (freq >= freq_range_hpf[ADMV8818_BAND_MAX][ADMV8818_BAND_CORNER_HIGH]) {
+ hpf_state = ADMV8818_STATE_MAX;
+ hpf_band = ADMV8818_BAND_MAX;
goto hpf_write;
}
- for (i = 0; i < 4; i++) {
- freq_step = div_u64((freq_range_hpf[i][1] -
- freq_range_hpf[i][0]), 15);
+ /* Close HPF frequency gap between 12 and 12.5 GHz */
+ if (freq >= 12000ULL * HZ_PER_MHZ && freq < 12500ULL * HZ_PER_MHZ) {
+ hpf_state = ADMV8818_STATE_MAX;
+ hpf_band = 3;
+ goto hpf_write;
+ }
- if (freq > freq_range_hpf[i][0] &&
- (freq < freq_range_hpf[i][1] + freq_step)) {
- hpf_band = i + 1;
+ min_freq_error = U64_MAX;
+ for (band = ADMV8818_BAND_MIN; band <= ADMV8818_BAND_MAX; band++) {
+ /*
+ * This (and therefore all other ranges) have a corner
+ * frequency higher than the target frequency.
+ */
+ if (freq_range_hpf[band][ADMV8818_BAND_CORNER_LOW] > freq)
+ break;
- for (j = 1; j <= 16; j++) {
- if (freq < (freq_range_hpf[i][0] + (freq_step * j))) {
- hpf_step = j - 1;
- break;
- }
+ freq_step = freq_range_hpf[band][ADMV8818_BAND_CORNER_HIGH] -
+ freq_range_hpf[band][ADMV8818_BAND_CORNER_LOW];
+ freq_step = div_u64(freq_step, ADMV8818_NUM_STATES - 1);
+
+ for (state = ADMV8818_STATE_MIN; state <= ADMV8818_STATE_MAX; state++) {
+ freq_corner = freq_range_hpf[band][ADMV8818_BAND_CORNER_LOW] +
+ freq_step * state;
+
+ /*
+ * This (and therefore all other states) have a corner
+ * frequency higher than the target frequency.
+ */
+ if (freq_corner > freq)
+ break;
+
+ freq_error = freq - freq_corner;
+ if (freq_error < min_freq_error) {
+ min_freq_error = freq_error;
+ hpf_state = state;
+ hpf_band = band;
}
- break;
}
}
- /* Close HPF frequency gap between 12 and 12.5 GHz */
- if (freq >= 12000 * HZ_PER_MHZ && freq <= 12500 * HZ_PER_MHZ) {
- hpf_band = 3;
- hpf_step = 15;
- }
-
hpf_write:
ret = regmap_update_bits(st->regmap, ADMV8818_REG_WR0_SW,
ADMV8818_SW_IN_SET_WR0_MSK |
@@ -170,7 +200,7 @@ hpf_write:
return regmap_update_bits(st->regmap, ADMV8818_REG_WR0_FILTER,
ADMV8818_HPF_WR0_MSK,
- FIELD_PREP(ADMV8818_HPF_WR0_MSK, hpf_step));
+ FIELD_PREP(ADMV8818_HPF_WR0_MSK, hpf_state));
}
static int admv8818_hpf_select(struct admv8818_state *st, u64 freq)
@@ -186,31 +216,52 @@ static int admv8818_hpf_select(struct admv8818_state *st, u64 freq)
static int __admv8818_lpf_select(struct admv8818_state *st, u64 freq)
{
- unsigned int lpf_step = 0, lpf_band = 0, i, j;
- u64 freq_step;
- int ret;
+ int band, state, ret;
+ unsigned int lpf_state = ADMV8818_STATE_MIN, lpf_band = ADMV8818_BAND_BYPASS;
+ u64 freq_error, min_freq_error, freq_corner, freq_step;
- if (freq > freq_range_lpf[3][1])
+ if (freq > freq_range_lpf[ADMV8818_BAND_MAX][ADMV8818_BAND_CORNER_HIGH])
goto lpf_write;
- if (freq < freq_range_lpf[0][0]) {
- lpf_band = 1;
-
+ if (freq < freq_range_lpf[ADMV8818_BAND_MIN][ADMV8818_BAND_CORNER_LOW]) {
+ lpf_state = ADMV8818_STATE_MIN;
+ lpf_band = ADMV8818_BAND_MIN;
goto lpf_write;
}
- for (i = 0; i < 4; i++) {
- if (freq > freq_range_lpf[i][0] && freq < freq_range_lpf[i][1]) {
- lpf_band = i + 1;
- freq_step = div_u64((freq_range_lpf[i][1] - freq_range_lpf[i][0]), 15);
+ min_freq_error = U64_MAX;
+ for (band = ADMV8818_BAND_MAX; band >= ADMV8818_BAND_MIN; --band) {
+ /*
+ * At this point the highest corner frequency of
+ * all remaining ranges is below the target.
+ * LPF corner should be >= the target.
+ */
+ if (freq > freq_range_lpf[band][ADMV8818_BAND_CORNER_HIGH])
+ break;
+
+ freq_step = freq_range_lpf[band][ADMV8818_BAND_CORNER_HIGH] -
+ freq_range_lpf[band][ADMV8818_BAND_CORNER_LOW];
+ freq_step = div_u64(freq_step, ADMV8818_NUM_STATES - 1);
+
+ for (state = ADMV8818_STATE_MAX; state >= ADMV8818_STATE_MIN; --state) {
+
+ freq_corner = freq_range_lpf[band][ADMV8818_BAND_CORNER_LOW] +
+ state * freq_step;
- for (j = 0; j <= 15; j++) {
- if (freq < (freq_range_lpf[i][0] + (freq_step * j))) {
- lpf_step = j;
- break;
- }
+ /*
+ * At this point all other states in range will
+ * place the corner frequency below the target
+ * LPF corner should >= the target.
+ */
+ if (freq > freq_corner)
+ break;
+
+ freq_error = freq_corner - freq;
+ if (freq_error < min_freq_error) {
+ min_freq_error = freq_error;
+ lpf_state = state;
+ lpf_band = band;
}
- break;
}
}
@@ -225,7 +276,7 @@ lpf_write:
return regmap_update_bits(st->regmap, ADMV8818_REG_WR0_FILTER,
ADMV8818_LPF_WR0_MSK,
- FIELD_PREP(ADMV8818_LPF_WR0_MSK, lpf_step));
+ FIELD_PREP(ADMV8818_LPF_WR0_MSK, lpf_state));
}
static int admv8818_lpf_select(struct admv8818_state *st, u64 freq)
@@ -242,16 +293,28 @@ static int admv8818_lpf_select(struct admv8818_state *st, u64 freq)
static int admv8818_rfin_band_select(struct admv8818_state *st)
{
int ret;
+ u64 hpf_corner_target, lpf_corner_target;
st->cf_hz = clk_get_rate(st->clkin);
+ /* Check for underflow */
+ if (st->cf_hz > st->hpf_margin_hz)
+ hpf_corner_target = st->cf_hz - st->hpf_margin_hz;
+ else
+ hpf_corner_target = 0;
+
+ /* Check for overflow */
+ lpf_corner_target = st->cf_hz + st->lpf_margin_hz;
+ if (lpf_corner_target < st->cf_hz)
+ lpf_corner_target = U64_MAX;
+
mutex_lock(&st->lock);
- ret = __admv8818_hpf_select(st, st->cf_hz);
+ ret = __admv8818_hpf_select(st, hpf_corner_target);
if (ret)
goto exit;
- ret = __admv8818_lpf_select(st, st->cf_hz);
+ ret = __admv8818_lpf_select(st, lpf_corner_target);
exit:
mutex_unlock(&st->lock);
return ret;
@@ -278,8 +341,11 @@ static int __admv8818_read_hpf_freq(struct admv8818_state *st, u64 *hpf_freq)
hpf_state = FIELD_GET(ADMV8818_HPF_WR0_MSK, data);
- *hpf_freq = div_u64(freq_range_hpf[hpf_band - 1][1] - freq_range_hpf[hpf_band - 1][0], 15);
- *hpf_freq = freq_range_hpf[hpf_band - 1][0] + (*hpf_freq * hpf_state);
+ *hpf_freq = freq_range_hpf[hpf_band][ADMV8818_BAND_CORNER_HIGH] -
+ freq_range_hpf[hpf_band][ADMV8818_BAND_CORNER_LOW];
+ *hpf_freq = div_u64(*hpf_freq, ADMV8818_NUM_STATES - 1);
+ *hpf_freq = freq_range_hpf[hpf_band][ADMV8818_BAND_CORNER_LOW] +
+ (*hpf_freq * hpf_state);
return ret;
}
@@ -316,8 +382,11 @@ static int __admv8818_read_lpf_freq(struct admv8818_state *st, u64 *lpf_freq)
lpf_state = FIELD_GET(ADMV8818_LPF_WR0_MSK, data);
- *lpf_freq = div_u64(freq_range_lpf[lpf_band - 1][1] - freq_range_lpf[lpf_band - 1][0], 15);
- *lpf_freq = freq_range_lpf[lpf_band - 1][0] + (*lpf_freq * lpf_state);
+ *lpf_freq = freq_range_lpf[lpf_band][ADMV8818_BAND_CORNER_HIGH] -
+ freq_range_lpf[lpf_band][ADMV8818_BAND_CORNER_LOW];
+ *lpf_freq = div_u64(*lpf_freq, ADMV8818_NUM_STATES - 1);
+ *lpf_freq = freq_range_lpf[lpf_band][ADMV8818_BAND_CORNER_LOW] +
+ (*lpf_freq * lpf_state);
return ret;
}
@@ -333,6 +402,19 @@ static int admv8818_read_lpf_freq(struct admv8818_state *st, u64 *lpf_freq)
return ret;
}
+static int admv8818_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ return IIO_VAL_INT_64;
+ default:
+ return -EINVAL;
+ }
+}
+
static int admv8818_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long info)
@@ -341,6 +423,9 @@ static int admv8818_write_raw(struct iio_dev *indio_dev,
u64 freq = ((u64)val2 << 32 | (u32)val);
+ if ((s64)freq < 0)
+ return -EINVAL;
+
switch (info) {
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
return admv8818_lpf_select(st, freq);
@@ -502,6 +587,7 @@ set_mode:
static const struct iio_info admv8818_info = {
.write_raw = admv8818_write_raw,
+ .write_raw_get_fmt = admv8818_write_raw_get_fmt,
.read_raw = admv8818_read_raw,
.debugfs_reg_access = &admv8818_reg_access,
};
@@ -516,7 +602,7 @@ static const struct iio_enum admv8818_mode_enum = {
static const struct iio_chan_spec_ext_info admv8818_ext_info[] = {
IIO_ENUM("filter_mode", IIO_SHARED_BY_ALL, &admv8818_mode_enum),
IIO_ENUM_AVAILABLE("filter_mode", IIO_SHARED_BY_ALL, &admv8818_mode_enum),
- { },
+ { }
};
#define ADMV8818_CHAN(_channel) { \
@@ -641,6 +727,32 @@ static int admv8818_clk_setup(struct admv8818_state *st)
return devm_add_action_or_reset(&spi->dev, admv8818_clk_notifier_unreg, st);
}
+static int admv8818_read_properties(struct admv8818_state *st)
+{
+ struct spi_device *spi = st->spi;
+ u32 mhz;
+ int ret;
+
+ ret = device_property_read_u32(&spi->dev, "adi,lpf-margin-mhz", &mhz);
+ if (ret == 0)
+ st->lpf_margin_hz = (u64)mhz * HZ_PER_MHZ;
+ else if (ret == -EINVAL)
+ st->lpf_margin_hz = 0;
+ else
+ return ret;
+
+
+ ret = device_property_read_u32(&spi->dev, "adi,hpf-margin-mhz", &mhz);
+ if (ret == 0)
+ st->hpf_margin_hz = (u64)mhz * HZ_PER_MHZ;
+ else if (ret == -EINVAL)
+ st->hpf_margin_hz = 0;
+ else if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int admv8818_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -672,6 +784,10 @@ static int admv8818_probe(struct spi_device *spi)
mutex_init(&st->lock);
+ ret = admv8818_read_properties(st);
+ if (ret)
+ return ret;
+
ret = admv8818_init(st);
if (ret)
return ret;
@@ -681,13 +797,13 @@ static int admv8818_probe(struct spi_device *spi)
static const struct spi_device_id admv8818_id[] = {
{ "admv8818", 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, admv8818_id);
static const struct of_device_id admv8818_of_match[] = {
{ .compatible = "adi,admv8818" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, admv8818_of_match);
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index b1554ced7a26..63c485e9e44c 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -1032,7 +1032,7 @@ static int ad9523_probe(struct spi_device *spi)
static const struct spi_device_id ad9523_id[] = {
{"ad9523-1", 9523},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad9523_id);
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 61828e61e275..47f1c7e9efa9 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -373,7 +373,7 @@ static const struct iio_chan_spec_ext_info adf4350_ext_info[] = {
_ADF4350_EXT_INFO("frequency_resolution", ADF4350_FREQ_RESOLUTION),
_ADF4350_EXT_INFO("refin_frequency", ADF4350_FREQ_REFIN),
_ADF4350_EXT_INFO("powerdown", ADF4350_PWRDOWN),
- { },
+ { }
};
static const struct iio_chan_spec adf4350_chan = {
@@ -682,14 +682,14 @@ static int adf4350_probe(struct spi_device *spi)
static const struct of_device_id adf4350_of_match[] = {
{ .compatible = "adi,adf4350", },
{ .compatible = "adi,adf4351", },
- { /* sentinel */ },
+ { }
};
MODULE_DEVICE_TABLE(of, adf4350_of_match);
static const struct spi_device_id adf4350_id[] = {
{"adf4350", 4350},
{"adf4351", 4351},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, adf4350_id);
diff --git a/drivers/iio/frequency/adf4371.c b/drivers/iio/frequency/adf4371.c
index 9a84e81787b1..d6dc7827fb41 100644
--- a/drivers/iio/frequency/adf4371.c
+++ b/drivers/iio/frequency/adf4371.c
@@ -438,7 +438,7 @@ static const struct iio_chan_spec_ext_info adf4371_ext_info[] = {
_ADF4371_EXT_INFO("frequency", ADF4371_FREQ),
_ADF4371_EXT_INFO("powerdown", ADF4371_POWER_DOWN),
_ADF4371_EXT_INFO("name", ADF4371_CHANNEL_NAME),
- { },
+ { }
};
#define ADF4371_CHANNEL(index) { \
@@ -626,14 +626,14 @@ static int adf4371_probe(struct spi_device *spi)
static const struct spi_device_id adf4371_id_table[] = {
{ "adf4371", (kernel_ulong_t)&adf4371_chip_info },
{ "adf4372", (kernel_ulong_t)&adf4372_chip_info },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, adf4371_id_table);
static const struct of_device_id adf4371_of_match[] = {
{ .compatible = "adi,adf4371", .data = &adf4371_chip_info },
{ .compatible = "adi,adf4372", .data = &adf4372_chip_info},
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, adf4371_of_match);
diff --git a/drivers/iio/frequency/adf4377.c b/drivers/iio/frequency/adf4377.c
index 45ceeb828d6b..08833b7035e4 100644
--- a/drivers/iio/frequency/adf4377.c
+++ b/drivers/iio/frequency/adf4377.c
@@ -985,14 +985,14 @@ static int adf4377_probe(struct spi_device *spi)
static const struct spi_device_id adf4377_id[] = {
{ "adf4377", (kernel_ulong_t)&adf4377_chip_info },
{ "adf4378", (kernel_ulong_t)&adf4378_chip_info },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, adf4377_id);
static const struct of_device_id adf4377_of_match[] = {
{ .compatible = "adi,adf4377", .data = &adf4377_chip_info },
{ .compatible = "adi,adf4378", .data = &adf4378_chip_info },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, adf4377_of_match);
diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c
index 8ef583680ad0..d8e8d541990f 100644
--- a/drivers/iio/frequency/admv1013.c
+++ b/drivers/iio/frequency/admv1013.c
@@ -319,7 +319,7 @@ static ssize_t admv1013_write(struct iio_dev *indio_dev,
return -EINVAL;
}
- return ret ? ret : len;
+ return len;
}
static int admv1013_update_quad_filters(struct admv1013_state *st)
@@ -407,7 +407,7 @@ static int admv1013_freq_change(struct notifier_block *nb, unsigned long action,
static const struct iio_chan_spec_ext_info admv1013_ext_info[] = {
_ADMV1013_EXT_INFO("i_calibphase", IIO_SEPARATE, ADMV1013_RFMOD_I_CALIBPHASE),
_ADMV1013_EXT_INFO("q_calibphase", IIO_SEPARATE, ADMV1013_RFMOD_Q_CALIBPHASE),
- { },
+ { }
};
#define ADMV1013_CHAN_PHASE(_channel, _channel2, _admv1013_ext_info) { \
@@ -615,13 +615,13 @@ static int admv1013_probe(struct spi_device *spi)
static const struct spi_device_id admv1013_id[] = {
{ "admv1013", 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, admv1013_id);
static const struct of_device_id admv1013_of_match[] = {
{ .compatible = "adi,admv1013" },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, admv1013_of_match);
diff --git a/drivers/iio/frequency/admv1014.c b/drivers/iio/frequency/admv1014.c
index 986b87a72577..7a8f92ec80a2 100644
--- a/drivers/iio/frequency/admv1014.c
+++ b/drivers/iio/frequency/admv1014.c
@@ -792,13 +792,13 @@ static int admv1014_probe(struct spi_device *spi)
static const struct spi_device_id admv1014_id[] = {
{ "admv1014", 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, admv1014_id);
static const struct of_device_id admv1014_of_match[] = {
{ .compatible = "adi,admv1014" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, admv1014_of_match);
diff --git a/drivers/iio/frequency/adrf6780.c b/drivers/iio/frequency/adrf6780.c
index 57ee908fc747..a7a21f929970 100644
--- a/drivers/iio/frequency/adrf6780.c
+++ b/drivers/iio/frequency/adrf6780.c
@@ -487,13 +487,13 @@ static int adrf6780_probe(struct spi_device *spi)
static const struct spi_device_id adrf6780_id[] = {
{ "adrf6780", 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, adrf6780_id);
static const struct of_device_id adrf6780_of_match[] = {
{ .compatible = "adi,adrf6780" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, adrf6780_of_match);
diff --git a/drivers/iio/gyro/adis16080.c b/drivers/iio/gyro/adis16080.c
index 14b3abf6dce9..178bba95a709 100644
--- a/drivers/iio/gyro/adis16080.c
+++ b/drivers/iio/gyro/adis16080.c
@@ -214,7 +214,7 @@ static int adis16080_probe(struct spi_device *spi)
static const struct spi_device_id adis16080_ids[] = {
{ "adis16080", ID_ADIS16080 },
{ "adis16100", ID_ADIS16100 },
- {},
+ { }
};
MODULE_DEVICE_TABLE(spi, adis16080_ids);
diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c
index c151fbb59ffe..586e6cfa14a9 100644
--- a/drivers/iio/gyro/adis16260.c
+++ b/drivers/iio/gyro/adis16260.c
@@ -414,7 +414,7 @@ static const struct spi_device_id adis16260_id[] = {
{"adis16250", ADIS16260},
{"adis16255", ADIS16260},
{"adis16251", ADIS16251},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, adis16260_id);
diff --git a/drivers/iio/gyro/adxrs290.c b/drivers/iio/gyro/adxrs290.c
index 223fc181109c..8fcb41f45baa 100644
--- a/drivers/iio/gyro/adxrs290.c
+++ b/drivers/iio/gyro/adxrs290.c
@@ -290,9 +290,8 @@ static int adxrs290_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
switch (chan->type) {
case IIO_ANGL_VEL:
@@ -316,7 +315,7 @@ static int adxrs290_read_raw(struct iio_dev *indio_dev,
break;
}
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
@@ -366,9 +365,8 @@ static int adxrs290_write_raw(struct iio_dev *indio_dev,
struct adxrs290_state *st = iio_priv(indio_dev);
int ret, lpf_idx, hpf_idx;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
switch (mask) {
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
@@ -408,7 +406,7 @@ static int adxrs290_write_raw(struct iio_dev *indio_dev,
break;
}
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
diff --git a/drivers/iio/gyro/adxrs450.c b/drivers/iio/gyro/adxrs450.c
index f84438e0c42c..a1d8d3cb301b 100644
--- a/drivers/iio/gyro/adxrs450.c
+++ b/drivers/iio/gyro/adxrs450.c
@@ -95,12 +95,10 @@ static int adxrs450_spi_read_reg_16(struct iio_dev *indio_dev,
struct spi_transfer xfers[] = {
{
.tx_buf = &st->tx,
- .bits_per_word = 8,
.len = sizeof(st->tx),
.cs_change = 1,
}, {
.rx_buf = &st->rx,
- .bits_per_word = 8,
.len = sizeof(st->rx),
},
};
@@ -169,12 +167,10 @@ static int adxrs450_spi_sensor_data(struct iio_dev *indio_dev, s16 *val)
struct spi_transfer xfers[] = {
{
.tx_buf = &st->tx,
- .bits_per_word = 8,
.len = sizeof(st->tx),
.cs_change = 1,
}, {
.rx_buf = &st->rx,
- .bits_per_word = 8,
.len = sizeof(st->rx),
},
};
@@ -209,7 +205,6 @@ static int adxrs450_spi_initial(struct adxrs450_state *st,
struct spi_transfer xfers = {
.tx_buf = &st->tx,
.rx_buf = &st->rx,
- .bits_per_word = 8,
.len = sizeof(st->tx),
};
@@ -446,7 +441,7 @@ static int adxrs450_probe(struct spi_device *spi)
static const struct spi_device_id adxrs450_id[] = {
{"adxrs450", ID_ADXRS450},
{"adxrs453", ID_ADXRS453},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, adxrs450_id);
diff --git a/drivers/iio/gyro/bmg160_i2c.c b/drivers/iio/gyro/bmg160_i2c.c
index e6caab49f98a..1fb8a7969c25 100644
--- a/drivers/iio/gyro/bmg160_i2c.c
+++ b/drivers/iio/gyro/bmg160_i2c.c
@@ -41,7 +41,7 @@ static void bmg160_i2c_remove(struct i2c_client *client)
static const struct acpi_device_id bmg160_acpi_match[] = {
{"BMG0160", 0},
- {},
+ { }
};
MODULE_DEVICE_TABLE(acpi, bmg160_acpi_match);
@@ -50,7 +50,7 @@ static const struct i2c_device_id bmg160_i2c_id[] = {
{ "bmg160" },
{ "bmi055_gyro" },
{ "bmi088_gyro" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, bmg160_i2c_id);
diff --git a/drivers/iio/gyro/bmg160_spi.c b/drivers/iio/gyro/bmg160_spi.c
index ac04b3b1b554..6aecc5eb8347 100644
--- a/drivers/iio/gyro/bmg160_spi.c
+++ b/drivers/iio/gyro/bmg160_spi.c
@@ -36,7 +36,7 @@ static const struct spi_device_id bmg160_spi_id[] = {
{"bmg160", 0},
{"bmi055_gyro", 0},
{"bmi088_gyro", 0},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, bmg160_spi_id);
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 54b6f6fbdcaa..c43990c518f7 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -375,7 +375,7 @@ static const struct platform_device_id hid_gyro_3d_ids[] = {
/* Format: HID-SENSOR-usage_id_in_hex_lowercase */
.name = "HID-SENSOR-200076",
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(platform, hid_gyro_3d_ids);
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index d66224bed8e3..16553948c5c3 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -684,7 +684,7 @@ mpu3050_get_mount_matrix(const struct iio_dev *indio_dev,
static const struct iio_chan_spec_ext_info mpu3050_ext_info[] = {
IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, mpu3050_get_mount_matrix),
- { },
+ { }
};
#define MPU3050_AXIS_CHANNEL(axis, index) \
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
index 29ecfa6fd633..8e284f47242c 100644
--- a/drivers/iio/gyro/mpu3050-i2c.c
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -95,7 +95,7 @@ static void mpu3050_i2c_remove(struct i2c_client *client)
*/
static const struct i2c_device_id mpu3050_i2c_id[] = {
{ "mpu3050" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, mpu3050_i2c_id);
@@ -103,7 +103,7 @@ static const struct of_device_id mpu3050_i2c_of_match[] = {
{ .compatible = "invensense,mpu3050", .data = "mpu3050" },
/* Deprecated vendor ID from the Input driver */
{ .compatible = "invn,mpu3050", .data = "mpu3050" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, mpu3050_i2c_of_match);
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index d4b11bdba666..aef5ec8f9dee 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -54,7 +54,7 @@ static const struct of_device_id st_gyro_of_match[] = {
.compatible = "st,lsm9ds0-gyro",
.data = LSM9DS0_GYRO_DEV_NAME,
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, st_gyro_of_match);
@@ -102,7 +102,7 @@ static const struct i2c_device_id st_gyro_id_table[] = {
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
{ LSM9DS0_GYRO_DEV_NAME },
- {},
+ { }
};
MODULE_DEVICE_TABLE(i2c, st_gyro_id_table);
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index 811f712711f5..f645da157372 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -59,7 +59,7 @@ static const struct of_device_id st_gyro_of_match[] = {
.compatible = "st,lsm9ds0-gyro",
.data = LSM9DS0_GYRO_DEV_NAME,
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, st_gyro_of_match);
@@ -107,7 +107,7 @@ static const struct spi_device_id st_gyro_id_table[] = {
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
{ LSM9DS0_GYRO_DEV_NAME },
- {},
+ { }
};
MODULE_DEVICE_TABLE(spi, st_gyro_id_table);
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 13e1dd4dd62c..1582cfc03579 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -411,7 +411,7 @@ static const struct regmap_config afe4403_regmap_config = {
static const struct of_device_id afe4403_of_match[] = {
{ .compatible = "ti,afe4403", },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, afe4403_of_match);
@@ -574,7 +574,7 @@ static int afe4403_probe(struct spi_device *spi)
static const struct spi_device_id afe4403_ids[] = {
{ "afe4403", 0 },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(spi, afe4403_ids);
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index d49e1572a439..99ff68aed27c 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -419,7 +419,7 @@ static const struct regmap_config afe4404_regmap_config = {
static const struct of_device_id afe4404_of_match[] = {
{ .compatible = "ti,afe4404", },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, afe4404_of_match);
@@ -581,7 +581,7 @@ static int afe4404_probe(struct i2c_client *client)
static const struct i2c_device_id afe4404_ids[] = {
{ "afe4404" },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(i2c, afe4404_ids);
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index e08d143a707c..846664a4ee90 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -483,7 +483,7 @@ static void max30100_remove(struct i2c_client *client)
static const struct i2c_device_id max30100_id[] = {
{ "max30100" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, max30100_id);
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
index 1d074eb6a8c5..f5f29d2fec57 100644
--- a/drivers/iio/health/max30102.c
+++ b/drivers/iio/health/max30102.c
@@ -484,11 +484,11 @@ any_mode_retry:
* things cannot concurrently change. And we just keep
* trying until we get one of the modes...
*/
- if (iio_device_claim_direct_mode(indio_dev))
+ if (!iio_device_claim_direct(indio_dev))
goto any_mode_retry;
ret = max30102_get_temp(data, val, true);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
} else {
ret = max30102_get_temp(data, val, false);
iio_device_release_buffer_mode(indio_dev);
@@ -615,7 +615,7 @@ static const struct i2c_device_id max30102_id[] = {
{ "max30101", max30105 },
{ "max30102", max30102 },
{ "max30105", max30105 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, max30102_id);
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index 2323974b805c..f021c3e6d886 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -253,7 +253,7 @@ static int am2315_probe(struct i2c_client *client)
static const struct i2c_device_id am2315_i2c_id[] = {
{ "am2315" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, am2315_i2c_id);
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index a303f704b7ed..c2b36e682e06 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -13,6 +13,7 @@
* https://www.ti.com/product/HDC1080/datasheet
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -206,26 +207,20 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW: {
int ret;
- mutex_lock(&data->lock);
+ guard(mutex)(&data->lock);
if (chan->type == IIO_CURRENT) {
*val = hdc100x_get_heater_status(data);
- ret = IIO_VAL_INT;
- } else {
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret) {
- mutex_unlock(&data->lock);
- return ret;
- }
-
- ret = hdc100x_get_measurement(data, chan);
- iio_device_release_direct_mode(indio_dev);
- if (ret >= 0) {
- *val = ret;
- ret = IIO_VAL_INT;
- }
+ return IIO_VAL_INT;
}
- mutex_unlock(&data->lock);
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = hdc100x_get_measurement(data, chan);
+ iio_device_release_direct(indio_dev);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
}
case IIO_CHAN_INFO_INT_TIME:
*val = 0;
@@ -256,26 +251,23 @@ static int hdc100x_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct hdc100x_data *data = iio_priv(indio_dev);
- int ret = -EINVAL;
switch (mask) {
- case IIO_CHAN_INFO_INT_TIME:
+ case IIO_CHAN_INFO_INT_TIME: {
if (val != 0)
return -EINVAL;
- mutex_lock(&data->lock);
- ret = hdc100x_set_it_time(data, chan->address, val2);
- mutex_unlock(&data->lock);
- return ret;
- case IIO_CHAN_INFO_RAW:
+ guard(mutex)(&data->lock);
+ return hdc100x_set_it_time(data, chan->address, val2);
+ }
+ case IIO_CHAN_INFO_RAW: {
if (chan->type != IIO_CURRENT || val2 != 0)
return -EINVAL;
- mutex_lock(&data->lock);
- ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_HEATER_EN,
- val ? HDC100X_REG_CONFIG_HEATER_EN : 0);
- mutex_unlock(&data->lock);
- return ret;
+ guard(mutex)(&data->lock);
+ return hdc100x_update_config(data, HDC100X_REG_CONFIG_HEATER_EN,
+ val ? HDC100X_REG_CONFIG_HEATER_EN : 0);
+ }
default:
return -EINVAL;
}
@@ -284,27 +276,19 @@ static int hdc100x_write_raw(struct iio_dev *indio_dev,
static int hdc100x_buffer_postenable(struct iio_dev *indio_dev)
{
struct hdc100x_data *data = iio_priv(indio_dev);
- int ret;
/* Buffer is enabled. First set ACQ Mode, then attach poll func */
- mutex_lock(&data->lock);
- ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE,
- HDC100X_REG_CONFIG_ACQ_MODE);
- mutex_unlock(&data->lock);
-
- return ret;
+ guard(mutex)(&data->lock);
+ return hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE,
+ HDC100X_REG_CONFIG_ACQ_MODE);
}
static int hdc100x_buffer_predisable(struct iio_dev *indio_dev)
{
struct hdc100x_data *data = iio_priv(indio_dev);
- int ret;
- mutex_lock(&data->lock);
- ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0);
- mutex_unlock(&data->lock);
-
- return ret;
+ guard(mutex)(&data->lock);
+ return hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0);
}
static const struct iio_buffer_setup_ops hdc_buffer_setup_ops = {
diff --git a/drivers/iio/humidity/hdc2010.c b/drivers/iio/humidity/hdc2010.c
index f5867659e00f..894a8b4ab193 100644
--- a/drivers/iio/humidity/hdc2010.c
+++ b/drivers/iio/humidity/hdc2010.c
@@ -169,13 +169,12 @@ static int hdc2010_read_raw(struct iio_dev *indio_dev,
*val = hdc2010_get_heater_status(data);
return IIO_VAL_INT;
}
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&data->lock);
ret = hdc2010_get_prim_measurement_word(data, chan);
mutex_unlock(&data->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
*val = ret;
@@ -184,13 +183,12 @@ static int hdc2010_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_PEAK: {
int ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&data->lock);
ret = hdc2010_get_peak_measurement_byte(data, chan);
mutex_unlock(&data->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
/* Scaling up the value so we can use same offset as RAW */
diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c
index a40e1eb6e98c..be2338d5f407 100644
--- a/drivers/iio/humidity/hid-sensor-humidity.c
+++ b/drivers/iio/humidity/hid-sensor-humidity.c
@@ -276,7 +276,7 @@ static const struct platform_device_id hid_humidity_ids[] = {
/* Format: HID-SENSOR-usage_id_in_hex_lowercase */
.name = "HID-SENSOR-200032",
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(platform, hid_humidity_ids);
diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c
index 0be11470730c..bfeb0a60d3af 100644
--- a/drivers/iio/humidity/hts221_core.c
+++ b/drivers/iio/humidity/hts221_core.c
@@ -418,31 +418,22 @@ static int hts221_read_oneshot(struct hts221_hw *hw, u8 addr, int *val)
return IIO_VAL_INT;
}
-static int hts221_read_raw(struct iio_dev *iio_dev,
- struct iio_chan_spec const *ch,
- int *val, int *val2, long mask)
+static int __hts221_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *ch,
+ int *val, int *val2, long mask)
{
struct hts221_hw *hw = iio_priv(iio_dev);
- int ret;
-
- ret = iio_device_claim_direct_mode(iio_dev);
- if (ret)
- return ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = hts221_read_oneshot(hw, ch->address, val);
- break;
+ return hts221_read_oneshot(hw, ch->address, val);
case IIO_CHAN_INFO_SCALE:
- ret = hts221_get_sensor_scale(hw, ch->type, val, val2);
- break;
+ return hts221_get_sensor_scale(hw, ch->type, val, val2);
case IIO_CHAN_INFO_OFFSET:
- ret = hts221_get_sensor_offset(hw, ch->type, val, val2);
- break;
+ return hts221_get_sensor_offset(hw, ch->type, val, val2);
case IIO_CHAN_INFO_SAMP_FREQ:
*val = hw->odr;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO: {
u8 idx;
const struct hts221_avg *avg;
@@ -452,64 +443,72 @@ static int hts221_read_raw(struct iio_dev *iio_dev,
avg = &hts221_avg_list[HTS221_SENSOR_H];
idx = hw->sensors[HTS221_SENSOR_H].cur_avg_idx;
*val = avg->avg_avl[idx];
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_TEMP:
avg = &hts221_avg_list[HTS221_SENSOR_T];
idx = hw->sensors[HTS221_SENSOR_T].cur_avg_idx;
*val = avg->avg_avl[idx];
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- break;
}
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
+}
+
+static int hts221_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *ch,
+ int *val, int *val2, long mask)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(iio_dev))
+ return -EBUSY;
- iio_device_release_direct_mode(iio_dev);
+ ret = __hts221_read_raw(iio_dev, ch, val, val2, mask);
+
+ iio_device_release_direct(iio_dev);
return ret;
}
-static int hts221_write_raw(struct iio_dev *iio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
+static int __hts221_write_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int val, long mask)
{
struct hts221_hw *hw = iio_priv(iio_dev);
- int ret;
-
- ret = iio_device_claim_direct_mode(iio_dev);
- if (ret)
- return ret;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = hts221_update_odr(hw, val);
- break;
+ return hts221_update_odr(hw, val);
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
switch (chan->type) {
case IIO_HUMIDITYRELATIVE:
- ret = hts221_update_avg(hw, HTS221_SENSOR_H, val);
- break;
+ return hts221_update_avg(hw, HTS221_SENSOR_H, val);
case IIO_TEMP:
- ret = hts221_update_avg(hw, HTS221_SENSOR_T, val);
- break;
+ return hts221_update_avg(hw, HTS221_SENSOR_T, val);
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- break;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
+}
+
+static int hts221_write_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(iio_dev))
+ return -EBUSY;
+
+ ret = __hts221_write_raw(iio_dev, chan, val, mask);
- iio_device_release_direct_mode(iio_dev);
+ iio_device_release_direct(iio_dev);
return ret;
}
diff --git a/drivers/iio/humidity/hts221_i2c.c b/drivers/iio/humidity/hts221_i2c.c
index 87a8e3c8d277..cbaa7d1af6c4 100644
--- a/drivers/iio/humidity/hts221_i2c.c
+++ b/drivers/iio/humidity/hts221_i2c.c
@@ -42,19 +42,19 @@ static int hts221_i2c_probe(struct i2c_client *client)
static const struct acpi_device_id hts221_acpi_match[] = {
{"SMO9100", 0},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, hts221_acpi_match);
static const struct of_device_id hts221_i2c_of_match[] = {
{ .compatible = "st,hts221", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, hts221_i2c_of_match);
static const struct i2c_device_id hts221_i2c_id_table[] = {
{ HTS221_DEV_NAME },
- {},
+ { }
};
MODULE_DEVICE_TABLE(i2c, hts221_i2c_id_table);
diff --git a/drivers/iio/humidity/hts221_spi.c b/drivers/iio/humidity/hts221_spi.c
index 00154b9d66b5..e6fef2acd523 100644
--- a/drivers/iio/humidity/hts221_spi.c
+++ b/drivers/iio/humidity/hts221_spi.c
@@ -42,13 +42,13 @@ static int hts221_spi_probe(struct spi_device *spi)
static const struct of_device_id hts221_spi_of_match[] = {
{ .compatible = "st,hts221", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, hts221_spi_of_match);
static const struct spi_device_id hts221_spi_id_table[] = {
{ HTS221_DEV_NAME },
- {},
+ { }
};
MODULE_DEVICE_TABLE(spi, hts221_spi_id_table);
diff --git a/drivers/iio/humidity/htu21.c b/drivers/iio/humidity/htu21.c
index 6402e393edb8..7f1775bd26fd 100644
--- a/drivers/iio/humidity/htu21.c
+++ b/drivers/iio/humidity/htu21.c
@@ -232,14 +232,14 @@ static int htu21_probe(struct i2c_client *client)
static const struct i2c_device_id htu21_id[] = {
{"htu21", HTU21},
{"ms8607-humidity", MS8607},
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, htu21_id);
static const struct of_device_id htu21_of_match[] = {
{ .compatible = "meas,htu21", },
{ .compatible = "meas,ms8607-humidity", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, htu21_of_match);
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index 0ea072a4c966..d160147cce0b 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -39,34 +39,29 @@ int __adis_write_reg(struct adis *adis, unsigned int reg, unsigned int value,
struct spi_transfer xfers[] = {
{
.tx_buf = adis->tx,
- .bits_per_word = 8,
.len = 2,
.cs_change = 1,
.delay.value = adis->data->write_delay,
.delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 2,
- .bits_per_word = 8,
.len = 2,
.cs_change = 1,
.delay.value = adis->data->write_delay,
.delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 4,
- .bits_per_word = 8,
.len = 2,
.cs_change = 1,
.delay.value = adis->data->write_delay,
.delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 6,
- .bits_per_word = 8,
.len = 2,
.delay.value = adis->data->write_delay,
.delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 8,
- .bits_per_word = 8,
.len = 2,
.delay.value = adis->data->write_delay,
.delay.unit = SPI_DELAY_UNIT_USECS,
@@ -133,14 +128,12 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val,
struct spi_transfer xfers[] = {
{
.tx_buf = adis->tx,
- .bits_per_word = 8,
.len = 2,
.cs_change = 1,
.delay.value = adis->data->write_delay,
.delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 2,
- .bits_per_word = 8,
.len = 2,
.cs_change = 1,
.delay.value = adis->data->read_delay,
@@ -148,14 +141,12 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val,
}, {
.tx_buf = adis->tx + 4,
.rx_buf = adis->rx,
- .bits_per_word = 8,
.len = 2,
.cs_change = 1,
.delay.value = adis->data->read_delay,
.delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.rx_buf = adis->rx + 2,
- .bits_per_word = 8,
.len = 2,
.delay.value = adis->data->read_delay,
.delay.unit = SPI_DELAY_UNIT_USECS,
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index 3086dd536203..90ed3f9bb39c 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -1212,7 +1212,7 @@ static const struct spi_device_id adis16400_id[] = {
{"adis16405", ADIS16400},
{"adis16445", ADIS16445},
{"adis16448", ADIS16448},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, adis16400_id);
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
index ecf74046fde1..ba1887d36577 100644
--- a/drivers/iio/imu/adis16460.c
+++ b/drivers/iio/imu/adis16460.c
@@ -395,13 +395,13 @@ static int adis16460_probe(struct spi_device *spi)
static const struct spi_device_id adis16460_ids[] = {
{ "adis16460", 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, adis16460_ids);
static const struct of_device_id adis16460_of_match[] = {
{ .compatible = "adi,adis16460" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, adis16460_of_match);
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index df8c6cd91169..924395b7e3b4 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -2058,7 +2058,7 @@ static const struct of_device_id adis16475_of_match[] = {
.data = &adis16475_chip_info[ADIS16577_2] },
{ .compatible = "adi,adis16577-3",
.data = &adis16475_chip_info[ADIS16577_3] },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, adis16475_of_match);
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 727e0a11eac1..543d5c4bfb11 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -1852,7 +1852,7 @@ static const struct of_device_id adis16480_of_match[] = {
{ .compatible = "adi,adis16547-1" },
{ .compatible = "adi,adis16547-2" },
{ .compatible = "adi,adis16547-3" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, adis16480_of_match);
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index fdfc0538734c..cd3db2388164 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -49,12 +49,10 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev,
tx[1] = 0;
adis->xfer[0].tx_buf = tx;
- adis->xfer[0].bits_per_word = 8;
adis->xfer[0].len = 2;
if (adis->data->burst_max_speed_hz)
adis->xfer[0].speed_hz = adis->data->burst_max_speed_hz;
adis->xfer[1].rx_buf = adis->buffer;
- adis->xfer[1].bits_per_word = 8;
adis->xfer[1].len = burst_length;
if (adis->data->burst_max_speed_hz)
adis->xfer[1].speed_hz = adis->data->burst_max_speed_hz;
@@ -100,7 +98,6 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
spi_message_init(&adis->msg);
for (j = 0; j <= scan_count; j++) {
- adis->xfer[j].bits_per_word = 8;
if (j != scan_count)
adis->xfer[j].cs_change = 1;
adis->xfer[j].len = 2;
diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c
index 214503fa4af5..9fa3a19a8977 100644
--- a/drivers/iio/imu/bmi160/bmi160_i2c.c
+++ b/drivers/iio/imu/bmi160/bmi160_i2c.c
@@ -39,7 +39,7 @@ static int bmi160_i2c_probe(struct i2c_client *client)
static const struct i2c_device_id bmi160_i2c_id[] = {
{ "bmi120" },
{ "bmi160" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, bmi160_i2c_id);
@@ -55,14 +55,14 @@ static const struct acpi_device_id bmi160_acpi_match[] = {
{"10EC5280", 0},
{"BMI0120", 0},
{"BMI0160", 0},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match);
static const struct of_device_id bmi160_of_match[] = {
{ .compatible = "bosch,bmi120" },
{ .compatible = "bosch,bmi160" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, bmi160_of_match);
diff --git a/drivers/iio/imu/bmi160/bmi160_spi.c b/drivers/iio/imu/bmi160/bmi160_spi.c
index 8fbaab22db81..ebb586904215 100644
--- a/drivers/iio/imu/bmi160/bmi160_spi.c
+++ b/drivers/iio/imu/bmi160/bmi160_spi.c
@@ -36,21 +36,21 @@ static int bmi160_spi_probe(struct spi_device *spi)
static const struct spi_device_id bmi160_spi_id[] = {
{"bmi120", 0},
{"bmi160", 0},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, bmi160_spi_id);
static const struct acpi_device_id bmi160_acpi_match[] = {
{"BMI0120", 0},
{"BMI0160", 0},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match);
static const struct of_device_id bmi160_of_match[] = {
{ .compatible = "bosch,bmi120" },
{ .compatible = "bosch,bmi160" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, bmi160_of_match);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
index 18787a43477b..f893dbe69965 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
@@ -426,7 +426,7 @@ int inv_icm42600_set_temp_conf(struct inv_icm42600_state *st, bool enable,
int inv_icm42600_debugfs_reg(struct iio_dev *indio_dev, unsigned int reg,
unsigned int writeval, unsigned int *readval);
-int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq,
+int inv_icm42600_core_probe(struct regmap *regmap, int chip,
inv_icm42600_bus_setup bus_setup);
struct iio_dev *inv_icm42600_gyro_init(struct inv_icm42600_state *st);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
index 388520ec60b5..e6cd9dcb0687 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
@@ -157,7 +157,7 @@ static const struct iio_chan_spec_ext_info inv_icm42600_accel_ext_infos[] = {
&inv_icm42600_accel_power_mode_enum),
IIO_ENUM("power_mode", IIO_SHARED_BY_TYPE,
&inv_icm42600_accel_power_mode_enum),
- {},
+ { }
};
static const struct iio_chan_spec inv_icm42600_accel_channels[] = {
@@ -685,11 +685,10 @@ static int inv_icm42600_accel_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = inv_icm42600_accel_read_sensor(indio_dev, chan, &data);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret)
return ret;
*val = data;
@@ -747,20 +746,18 @@ static int inv_icm42600_accel_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = inv_icm42600_accel_write_scale(indio_dev, val, val2);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
return inv_icm42600_accel_write_odr(indio_dev, val, val2);
case IIO_CHAN_INFO_CALIBBIAS:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = inv_icm42600_accel_write_offset(st, chan, val, val2);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
default:
return -EINVAL;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
index ef9875d3b79d..63d46619ebfa 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
@@ -683,12 +683,13 @@ static void inv_icm42600_disable_pm(void *_data)
pm_runtime_disable(dev);
}
-int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq,
+int inv_icm42600_core_probe(struct regmap *regmap, int chip,
inv_icm42600_bus_setup bus_setup)
{
struct device *dev = regmap_get_device(regmap);
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct inv_icm42600_state *st;
- int irq_type;
+ int irq, irq_type;
bool open_drain;
int ret;
@@ -697,6 +698,15 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq,
return -ENODEV;
}
+ /* get INT1 only supported interrupt or fallback to first interrupt */
+ irq = fwnode_irq_get_byname(fwnode, "INT1");
+ if (irq < 0 && irq != -EPROBE_DEFER) {
+ dev_info(dev, "no INT1 interrupt defined, fallback to first interrupt\n");
+ irq = fwnode_irq_get(fwnode, 0);
+ }
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "error missing INT1 interrupt\n");
+
irq_type = irq_get_trigger_type(irq);
if (!irq_type)
irq_type = IRQF_TRIGGER_FALLING;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
index 591ed78a55bb..b4d7ce1432a4 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
@@ -57,7 +57,7 @@ enum inv_icm42600_gyro_scan {
static const struct iio_chan_spec_ext_info inv_icm42600_gyro_ext_infos[] = {
IIO_MOUNT_MATRIX(IIO_SHARED_BY_ALL, inv_icm42600_get_mount_matrix),
- {},
+ { }
};
static const struct iio_chan_spec inv_icm42600_gyro_channels[] = {
@@ -591,11 +591,10 @@ static int inv_icm42600_gyro_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = inv_icm42600_gyro_read_sensor(st, chan, &data);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret)
return ret;
*val = data;
@@ -653,20 +652,18 @@ static int inv_icm42600_gyro_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = inv_icm42600_gyro_write_scale(indio_dev, val, val2);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
return inv_icm42600_gyro_write_odr(indio_dev, val, val2);
case IIO_CHAN_INFO_CALIBBIAS:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = inv_icm42600_gyro_write_offset(st, chan, val, val2);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
default:
return -EINVAL;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
index 04e440fe023a..7e4d3ea68721 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
@@ -67,8 +67,7 @@ static int inv_icm42600_probe(struct i2c_client *client)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- return inv_icm42600_core_probe(regmap, chip, client->irq,
- inv_icm42600_i2c_bus_setup);
+ return inv_icm42600_core_probe(regmap, chip, inv_icm42600_i2c_bus_setup);
}
/*
@@ -110,7 +109,7 @@ static const struct of_device_id inv_icm42600_of_matches[] = {
.compatible = "invensense,icm42631",
.data = (void *)INV_CHIP_ICM42631,
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, inv_icm42600_of_matches);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
index 2bd2c4c8e50c..13e2e7d38638 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
@@ -64,8 +64,7 @@ static int inv_icm42600_probe(struct spi_device *spi)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- return inv_icm42600_core_probe(regmap, chip, spi->irq,
- inv_icm42600_spi_bus_setup);
+ return inv_icm42600_core_probe(regmap, chip, inv_icm42600_spi_bus_setup);
}
/*
@@ -107,7 +106,7 @@ static const struct of_device_id inv_icm42600_of_matches[] = {
.compatible = "invensense,icm42631",
.data = (void *)INV_CHIP_ICM42631,
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, inv_icm42600_of_matches);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
index 213cce1c3111..988f227f6563 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
@@ -56,27 +56,28 @@ int inv_icm42600_temp_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = inv_icm42600_temp_read(st, &temp);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret)
return ret;
*val = temp;
return IIO_VAL_INT;
/*
* T°C = (temp / 132.48) + 25
- * Tm°C = 1000 * ((temp * 100 / 13248) + 25)
+ * Tm°C = 1000 * ((temp / 132.48) + 25)
+ * Tm°C = 7.548309 * temp + 25000
+ * Tm°C = (temp + 3312) * 7.548309
* scale: 100000 / 13248 ~= 7.548309
- * offset: 25000
+ * offset: 3312
*/
case IIO_CHAN_INFO_SCALE:
*val = 7;
*val2 = 548309;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_OFFSET:
- *val = 25000;
+ *val = 3312;
return IIO_VAL_INT;
default:
return -EINVAL;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
index 373e59f6d91a..a9bcf02e5b43 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
@@ -39,7 +39,7 @@ static const struct dmi_system_id inv_mpu_dev_list[] = {
},
},
/* Add more matching tables here..*/
- {}
+ { }
};
static int asus_acpi_get_sensor_info(struct acpi_device *adev,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 5bcd5e797046..b8656c02354a 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -755,13 +755,12 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&st->lock);
ret = inv_mpu6050_read_channel_data(indio_dev, chan, val);
mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
@@ -895,9 +894,8 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
* we should only update scale when the chip is disabled, i.e.
* not running
*/
- result = iio_device_claim_direct_mode(indio_dev);
- if (result)
- return result;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&st->lock);
result = pm_runtime_resume_and_get(pdev);
@@ -944,7 +942,7 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
pm_runtime_put_autosuspend(pdev);
error_write_raw_unlock:
mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return result;
}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 91d77f94d204..8dc61812a8fc 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -192,7 +192,7 @@ static const struct i2c_device_id inv_mpu_id[] = {
{"iam20680", INV_IAM20680},
{"iam20680hp", INV_IAM20680HP},
{"iam20680ht", INV_IAM20680HT},
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
@@ -276,7 +276,7 @@ MODULE_DEVICE_TABLE(of, inv_of_match);
static const struct acpi_device_id inv_acpi_match[] = {
{"INVN6500", INV_MPU6500},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 20de6eb5cd35..1f4c62142b60 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -83,7 +83,7 @@ static const struct spi_device_id inv_mpu_id[] = {
{"iam20680", INV_IAM20680},
{"iam20680hp", INV_IAM20680HP},
{"iam20680ht", INV_IAM20680HT},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, inv_mpu_id);
@@ -163,7 +163,7 @@ MODULE_DEVICE_TABLE(of, inv_of_match);
static const struct acpi_device_id inv_acpi_match[] = {
{"INVN6000", INV_MPU6000},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index e19c5d3137c6..2bdfb2619137 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1487,7 +1487,7 @@ static const struct dev_pm_ops kmx61_pm_ops = {
static const struct i2c_device_id kmx61_id[] = {
{ "kmx611021" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, kmx61_id);
diff --git a/drivers/iio/imu/smi240.c b/drivers/iio/imu/smi240.c
index 4492c4d013bd..d159ee59acdd 100644
--- a/drivers/iio/imu/smi240.c
+++ b/drivers/iio/imu/smi240.c
@@ -414,11 +414,10 @@ static int smi240_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = smi240_get_data(data, chan->type, chan->channel2, val);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret)
return ret;
return IIO_VAL_INT;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 96c6106b95ee..c65ad49829e7 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -1804,12 +1804,11 @@ static int st_lsm6dsx_read_raw(struct iio_dev *iio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(iio_dev);
- if (ret)
- break;
+ if (!iio_device_claim_direct(iio_dev))
+ return -EBUSY;
ret = st_lsm6dsx_read_oneshot(sensor, ch->address, val);
- iio_device_release_direct_mode(iio_dev);
+ iio_device_release_direct(iio_dev);
break;
case IIO_CHAN_INFO_SAMP_FREQ:
*val = sensor->odr / 1000;
@@ -1834,11 +1833,10 @@ static int st_lsm6dsx_write_raw(struct iio_dev *iio_dev,
int val, int val2, long mask)
{
struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
- int err;
+ int err = 0;
- err = iio_device_claim_direct_mode(iio_dev);
- if (err)
- return err;
+ if (!iio_device_claim_direct(iio_dev))
+ return -EBUSY;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -1860,7 +1858,7 @@ static int st_lsm6dsx_write_raw(struct iio_dev *iio_dev,
break;
}
- iio_device_release_direct_mode(iio_dev);
+ iio_device_release_direct(iio_dev);
return err;
}
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index 25e1de89b6e4..7c933218036b 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -138,13 +138,13 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
.compatible = "st,asm330lhhxg1",
.data = (void *)ST_ASM330LHHXG1_ID,
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match);
static const struct acpi_device_id st_lsm6dsx_i2c_acpi_match[] = {
{ "SMO8B30", ST_LSM6DS3TRC_ID, },
- {}
+ { }
};
MODULE_DEVICE_TABLE(acpi, st_lsm6dsx_i2c_acpi_match);
@@ -173,7 +173,7 @@ static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = {
{ ST_ISM330IS_DEV_NAME, ST_ISM330IS_ID },
{ ST_ASM330LHB_DEV_NAME, ST_ASM330LHB_ID },
{ ST_ASM330LHHXG1_DEV_NAME, ST_ASM330LHHXG1_ID },
- {},
+ { }
};
MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c
index f968f32890d1..cb5c5d7e1f3d 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c
@@ -17,7 +17,7 @@
static const struct i3c_device_id st_lsm6dsx_i3c_ids[] = {
I3C_DEVICE(0x0104, 0x006C, (void *)ST_LSM6DSO_ID),
I3C_DEVICE(0x0104, 0x006B, (void *)ST_LSM6DSR_ID),
- { /* sentinel */ },
+ { }
};
MODULE_DEVICE_TABLE(i3c, st_lsm6dsx_i3c_ids);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index c1b444520d2a..3c5e65dc0f97 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -558,12 +558,11 @@ st_lsm6dsx_shub_read_raw(struct iio_dev *iio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(iio_dev);
- if (ret)
- break;
+ if (!iio_device_claim_direct(iio_dev))
+ return -EBUSY;
ret = st_lsm6dsx_shub_read_oneshot(sensor, ch, val);
- iio_device_release_direct_mode(iio_dev);
+ iio_device_release_direct(iio_dev);
break;
case IIO_CHAN_INFO_SAMP_FREQ:
*val = sensor->ext_info.slv_odr / 1000;
@@ -614,53 +613,57 @@ st_lsm6dsx_shub_set_full_scale(struct st_lsm6dsx_sensor *sensor,
}
static int
-st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
+__st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
{
struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
int err;
- err = iio_device_claim_direct_mode(iio_dev);
- if (err)
- return err;
-
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ: {
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+ struct st_lsm6dsx_sensor *ref_sensor;
+ u8 odr_val;
u16 data;
+ int odr;
val = val * 1000 + val2 / 1000;
err = st_lsm6dsx_shub_get_odr_val(sensor, val, &data);
- if (!err) {
- struct st_lsm6dsx_hw *hw = sensor->hw;
- struct st_lsm6dsx_sensor *ref_sensor;
- u8 odr_val;
- int odr;
-
- ref_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
- odr = st_lsm6dsx_check_odr(ref_sensor, val, &odr_val);
- if (odr < 0) {
- err = odr;
- goto release;
- }
-
- sensor->ext_info.slv_odr = val;
- sensor->odr = odr;
- }
- break;
+ if (err)
+ return err;
+
+ ref_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
+ odr = st_lsm6dsx_check_odr(ref_sensor, val, &odr_val);
+ if (odr < 0)
+ return odr;
+
+ sensor->ext_info.slv_odr = val;
+ sensor->odr = odr;
+ return 0;
}
case IIO_CHAN_INFO_SCALE:
- err = st_lsm6dsx_shub_set_full_scale(sensor, val2);
- break;
+ return st_lsm6dsx_shub_set_full_scale(sensor, val2);
default:
- err = -EINVAL;
- break;
+ return -EINVAL;
}
+}
+
+static int
+st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
-release:
- iio_device_release_direct_mode(iio_dev);
+ if (!iio_device_claim_direct(iio_dev))
+ return -EBUSY;
- return err;
+ ret = __st_lsm6dsx_shub_write_raw(iio_dev, chan, val, val2, mask);
+
+ iio_device_release_direct(iio_dev);
+
+ return ret;
}
static ssize_t
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index 4b4b6d45524f..3389b15df0bc 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -133,7 +133,7 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
.compatible = "st,asm330lhhxg1",
.data = (void *)ST_ASM330LHHXG1_ID,
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match);
@@ -162,7 +162,7 @@ static const struct spi_device_id st_lsm6dsx_spi_id_table[] = {
{ ST_ISM330IS_DEV_NAME, ST_ISM330IS_ID },
{ ST_ASM330LHB_DEV_NAME, ST_ASM330LHB_ID },
{ ST_ASM330LHHXG1_DEV_NAME, ST_ASM330LHHXG1_ID },
- {},
+ { }
};
MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table);
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
index 8cc071463249..4232a9d800fc 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
@@ -28,20 +28,20 @@ static const struct of_device_id st_lsm9ds0_of_match[] = {
.compatible = "st,lsm9ds0-imu",
.data = LSM9DS0_IMU_DEV_NAME,
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, st_lsm9ds0_of_match);
static const struct i2c_device_id st_lsm9ds0_id_table[] = {
{ LSM303D_IMU_DEV_NAME },
{ LSM9DS0_IMU_DEV_NAME },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, st_lsm9ds0_id_table);
static const struct acpi_device_id st_lsm9ds0_acpi_match[] = {
{"ACCL0001", (kernel_ulong_t)LSM303D_IMU_DEV_NAME},
- {}
+ { }
};
MODULE_DEVICE_TABLE(acpi, st_lsm9ds0_acpi_match);
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
index 806e55f75f65..acea8a0757d7 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
@@ -28,14 +28,14 @@ static const struct of_device_id st_lsm9ds0_of_match[] = {
.compatible = "st,lsm9ds0-imu",
.data = LSM9DS0_IMU_DEV_NAME,
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, st_lsm9ds0_of_match);
static const struct spi_device_id st_lsm9ds0_id_table[] = {
{ LSM303D_IMU_DEV_NAME },
{ LSM9DS0_IMU_DEV_NAME },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, st_lsm9ds0_id_table);
diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c
index a43c8d1bb3d0..c1eb9ef9db08 100644
--- a/drivers/iio/industrialio-backend.c
+++ b/drivers/iio/industrialio-backend.c
@@ -381,6 +381,34 @@ int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan,
EXPORT_SYMBOL_NS_GPL(iio_backend_data_source_set, "IIO_BACKEND");
/**
+ * iio_backend_data_source_get - Get current data source
+ * @back: Backend device
+ * @chan: Channel number
+ * @data: Pointer to receive the current source value
+ *
+ * A given backend may have different sources to stream/sync data. This allows
+ * to know what source is in use.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_data_source_get(struct iio_backend *back, unsigned int chan,
+ enum iio_backend_data_source *data)
+{
+ int ret;
+
+ ret = iio_backend_op_call(back, data_source_get, chan, data);
+ if (ret)
+ return ret;
+
+ if (*data >= IIO_BACKEND_DATA_SOURCE_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_data_source_get, "IIO_BACKEND");
+
+/**
* iio_backend_set_sampling_freq - Set channel sampling rate
* @back: Backend device
* @chan: Channel number
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index b9f4113ae5fc..178e99b111de 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -26,6 +26,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/wait.h>
+#include <linux/wordpart.h>
#include <linux/iio/buffer.h>
#include <linux/iio/buffer_impl.h>
@@ -966,8 +967,10 @@ static ssize_t iio_write_channel_info(struct device *dev,
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret, fract_mult = 100000;
int integer, fract = 0;
+ long long integer64;
bool is_char = false;
bool scale_db = false;
+ bool is_64bit = false;
/* Assumes decimal - precision based on number of digits */
if (!indio_dev->info->write_raw)
@@ -991,6 +994,9 @@ static ssize_t iio_write_channel_info(struct device *dev,
case IIO_VAL_CHAR:
is_char = true;
break;
+ case IIO_VAL_INT_64:
+ is_64bit = true;
+ break;
default:
return -EINVAL;
}
@@ -1001,6 +1007,13 @@ static ssize_t iio_write_channel_info(struct device *dev,
if (sscanf(buf, "%c", &ch) != 1)
return -EINVAL;
integer = ch;
+ } else if (is_64bit) {
+ ret = kstrtoll(buf, 0, &integer64);
+ if (ret)
+ return ret;
+
+ fract = upper_32_bits(integer64);
+ integer = lower_32_bits(integer64);
} else {
ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract,
scale_db);
@@ -2144,17 +2157,19 @@ int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
EXPORT_SYMBOL_GPL(__devm_iio_device_register);
/**
- * iio_device_claim_direct_mode - Keep device in direct mode
+ * __iio_device_claim_direct - Keep device in direct mode
* @indio_dev: the iio_dev associated with the device
*
* If the device is in direct mode it is guaranteed to stay
- * that way until iio_device_release_direct_mode() is called.
+ * that way until __iio_device_release_direct() is called.
*
- * Use with iio_device_release_direct_mode()
+ * Use with __iio_device_release_direct().
*
- * Returns: 0 on success, -EBUSY on failure.
+ * Drivers should only call iio_device_claim_direct().
+ *
+ * Returns: true on success, false on failure.
*/
-int iio_device_claim_direct_mode(struct iio_dev *indio_dev)
+bool __iio_device_claim_direct(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
@@ -2162,26 +2177,28 @@ int iio_device_claim_direct_mode(struct iio_dev *indio_dev)
if (iio_buffer_enabled(indio_dev)) {
mutex_unlock(&iio_dev_opaque->mlock);
- return -EBUSY;
+ return false;
}
- return 0;
+ return true;
}
-EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode);
+EXPORT_SYMBOL_GPL(__iio_device_claim_direct);
/**
- * iio_device_release_direct_mode - releases claim on direct mode
+ * __iio_device_release_direct - releases claim on direct mode
* @indio_dev: the iio_dev associated with the device
*
* Release the claim. Device is no longer guaranteed to stay
* in direct mode.
*
- * Use with iio_device_claim_direct_mode()
+ * Drivers should only call iio_device_release_direct().
+ *
+ * Use with __iio_device_claim_direct()
*/
-void iio_device_release_direct_mode(struct iio_dev *indio_dev)
+void __iio_device_release_direct(struct iio_dev *indio_dev)
{
mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock);
}
-EXPORT_SYMBOL_GPL(iio_device_release_direct_mode);
+EXPORT_SYMBOL_GPL(__iio_device_release_direct);
/**
* iio_device_claim_buffer_mode - Keep device in buffer mode
diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c
index 2d91caf24dd0..032e6cae8b80 100644
--- a/drivers/iio/light/acpi-als.c
+++ b/drivers/iio/light/acpi-als.c
@@ -230,7 +230,7 @@ static int acpi_als_add(struct acpi_device *device)
static const struct acpi_device_id acpi_als_device_ids[] = {
{"ACPI0008", 0},
- {},
+ { }
};
MODULE_DEVICE_TABLE(acpi, acpi_als_device_ids);
diff --git a/drivers/iio/light/adux1020.c b/drivers/iio/light/adux1020.c
index 9240983a6cc4..e321f89c5340 100644
--- a/drivers/iio/light/adux1020.c
+++ b/drivers/iio/light/adux1020.c
@@ -820,7 +820,7 @@ static int adux1020_probe(struct i2c_client *client)
static const struct i2c_device_id adux1020_id[] = {
{ "adux1020" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, adux1020_id);
diff --git a/drivers/iio/light/al3000a.c b/drivers/iio/light/al3000a.c
index e2fbb1270040..6f301c067045 100644
--- a/drivers/iio/light/al3000a.c
+++ b/drivers/iio/light/al3000a.c
@@ -85,12 +85,17 @@ static void al3000a_set_pwr_off(void *_data)
static int al3000a_init(struct al3000a_data *data)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
ret = al3000a_set_pwr_on(data);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, al3000a_set_pwr_off, data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add action\n");
+
ret = regmap_write(data->regmap, AL3000A_REG_SYSTEM, AL3000A_CONFIG_RESET);
if (ret)
return ret;
@@ -157,10 +162,6 @@ static int al3000a_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "failed to init ALS\n");
- ret = devm_add_action_or_reset(dev, al3000a_set_pwr_off, data);
- if (ret)
- return dev_err_probe(dev, ret, "failed to add action\n");
-
return devm_iio_device_register(dev, indio_dev);
}
@@ -189,7 +190,7 @@ MODULE_DEVICE_TABLE(i2c, al3000a_id);
static const struct of_device_id al3000a_of_match[] = {
{ .compatible = "dynaimage,al3000a" },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, al3000a_of_match);
diff --git a/drivers/iio/light/al3010.c b/drivers/iio/light/al3010.c
index 7cbb8b203300..0932fa2b49fa 100644
--- a/drivers/iio/light/al3010.c
+++ b/drivers/iio/light/al3010.c
@@ -17,13 +17,12 @@
#include <linux/bitfield.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/regmap.h>
#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#define AL3010_DRV_NAME "al3010"
-
#define AL3010_REG_SYSTEM 0x00
#define AL3010_REG_DATA_LOW 0x0c
#define AL3010_REG_CONFIG 0x10
@@ -46,8 +45,14 @@ static const int al3010_scales[][2] = {
{0, 1187200}, {0, 296800}, {0, 74200}, {0, 18600}
};
+static const struct regmap_config al3010_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = AL3010_REG_CONFIG,
+};
+
struct al3010_data {
- struct i2c_client *client;
+ struct regmap *regmap;
};
static const struct iio_chan_spec al3010_channels[] = {
@@ -69,40 +74,36 @@ static const struct attribute_group al3010_attribute_group = {
.attrs = al3010_attributes,
};
-static int al3010_set_pwr(struct i2c_client *client, bool pwr)
+static int al3010_set_pwr_on(struct al3010_data *data)
{
- u8 val = pwr ? AL3010_CONFIG_ENABLE : AL3010_CONFIG_DISABLE;
- return i2c_smbus_write_byte_data(client, AL3010_REG_SYSTEM, val);
+ return regmap_write(data->regmap, AL3010_REG_SYSTEM, AL3010_CONFIG_ENABLE);
}
static void al3010_set_pwr_off(void *_data)
{
struct al3010_data *data = _data;
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
- al3010_set_pwr(data->client, false);
+ ret = regmap_write(data->regmap, AL3010_REG_SYSTEM, AL3010_CONFIG_DISABLE);
+ if (ret)
+ dev_err(dev, "failed to write system register\n");
}
static int al3010_init(struct al3010_data *data)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
- ret = al3010_set_pwr(data->client, true);
- if (ret < 0)
+ ret = al3010_set_pwr_on(data);
+ if (ret)
return ret;
- ret = devm_add_action_or_reset(&data->client->dev,
- al3010_set_pwr_off,
- data);
- if (ret < 0)
+ ret = devm_add_action_or_reset(dev, al3010_set_pwr_off, data);
+ if (ret)
return ret;
-
- ret = i2c_smbus_write_byte_data(data->client, AL3010_REG_CONFIG,
- FIELD_PREP(AL3010_GAIN_MASK,
- AL3XXX_RANGE_3));
- if (ret < 0)
- return ret;
-
- return 0;
+ return regmap_write(data->regmap, AL3010_REG_CONFIG,
+ FIELD_PREP(AL3010_GAIN_MASK, AL3XXX_RANGE_3));
}
static int al3010_read_raw(struct iio_dev *indio_dev,
@@ -110,7 +111,7 @@ static int al3010_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct al3010_data *data = iio_priv(indio_dev);
- int ret;
+ int ret, gain, raw;
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -119,21 +120,21 @@ static int al3010_read_raw(struct iio_dev *indio_dev,
* - low byte of output is stored at AL3010_REG_DATA_LOW
* - high byte of output is stored at AL3010_REG_DATA_LOW + 1
*/
- ret = i2c_smbus_read_word_data(data->client,
- AL3010_REG_DATA_LOW);
- if (ret < 0)
+ ret = regmap_read(data->regmap, AL3010_REG_DATA_LOW, &raw);
+ if (ret)
return ret;
- *val = ret;
+
+ *val = raw;
+
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- ret = i2c_smbus_read_byte_data(data->client,
- AL3010_REG_CONFIG);
- if (ret < 0)
+ ret = regmap_read(data->regmap, AL3010_REG_CONFIG, &gain);
+ if (ret)
return ret;
- ret = FIELD_GET(AL3010_GAIN_MASK, ret);
- *val = al3010_scales[ret][0];
- *val2 = al3010_scales[ret][1];
+ gain = FIELD_GET(AL3010_GAIN_MASK, gain);
+ *val = al3010_scales[gain][0];
+ *val2 = al3010_scales[gain][1];
return IIO_VAL_INT_PLUS_MICRO;
}
@@ -145,7 +146,7 @@ static int al3010_write_raw(struct iio_dev *indio_dev,
int val2, long mask)
{
struct al3010_data *data = iio_priv(indio_dev);
- int i;
+ unsigned int i;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -154,9 +155,8 @@ static int al3010_write_raw(struct iio_dev *indio_dev,
val2 != al3010_scales[i][1])
continue;
- return i2c_smbus_write_byte_data(data->client,
- AL3010_REG_CONFIG,
- FIELD_PREP(AL3010_GAIN_MASK, i));
+ return regmap_write(data->regmap, AL3010_REG_CONFIG,
+ FIELD_PREP(AL3010_GAIN_MASK, i));
}
break;
}
@@ -172,59 +172,66 @@ static const struct iio_info al3010_info = {
static int al3010_probe(struct i2c_client *client)
{
struct al3010_data *data;
+ struct device *dev = &client->dev;
struct iio_dev *indio_dev;
int ret;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
- data->client = client;
+ data->regmap = devm_regmap_init_i2c(client, &al3010_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev, PTR_ERR(data->regmap),
+ "cannot allocate regmap\n");
indio_dev->info = &al3010_info;
- indio_dev->name = AL3010_DRV_NAME;
+ indio_dev->name = "al3010";
indio_dev->channels = al3010_channels;
indio_dev->num_channels = ARRAY_SIZE(al3010_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
ret = al3010_init(data);
- if (ret < 0) {
- dev_err(&client->dev, "al3010 chip init failed\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init ALS\n");
- return devm_iio_device_register(&client->dev, indio_dev);
+ return devm_iio_device_register(dev, indio_dev);
}
static int al3010_suspend(struct device *dev)
{
- return al3010_set_pwr(to_i2c_client(dev), false);
+ struct al3010_data *data = iio_priv(dev_get_drvdata(dev));
+
+ al3010_set_pwr_off(data);
+ return 0;
}
static int al3010_resume(struct device *dev)
{
- return al3010_set_pwr(to_i2c_client(dev), true);
+ struct al3010_data *data = iio_priv(dev_get_drvdata(dev));
+
+ return al3010_set_pwr_on(data);
}
static DEFINE_SIMPLE_DEV_PM_OPS(al3010_pm_ops, al3010_suspend, al3010_resume);
static const struct i2c_device_id al3010_id[] = {
{"al3010", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, al3010_id);
static const struct of_device_id al3010_of_match[] = {
{ .compatible = "dynaimage,al3010", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, al3010_of_match);
static struct i2c_driver al3010_driver = {
.driver = {
- .name = AL3010_DRV_NAME,
+ .name = "al3010",
.of_match_table = al3010_of_match,
.pm = pm_sleep_ptr(&al3010_pm_ops),
},
diff --git a/drivers/iio/light/al3320a.c b/drivers/iio/light/al3320a.c
index 497ea3fe3377..63f5a85912fc 100644
--- a/drivers/iio/light/al3320a.c
+++ b/drivers/iio/light/al3320a.c
@@ -15,13 +15,12 @@
#include <linux/bitfield.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/regmap.h>
#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#define AL3320A_DRV_NAME "al3320a"
-
#define AL3320A_REG_CONFIG 0x00
#define AL3320A_REG_STATUS 0x01
#define AL3320A_REG_INT 0x02
@@ -59,8 +58,14 @@ static const int al3320a_scales[][2] = {
{0, 512000}, {0, 128000}, {0, 32000}, {0, 10000}
};
+static const struct regmap_config al3320a_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = AL3320A_REG_HIGH_THRESH_HIGH,
+};
+
struct al3320a_data {
- struct i2c_client *client;
+ struct regmap *regmap;
};
static const struct iio_chan_spec al3320a_channels[] = {
@@ -82,45 +87,47 @@ static const struct attribute_group al3320a_attribute_group = {
.attrs = al3320a_attributes,
};
-static int al3320a_set_pwr(struct i2c_client *client, bool pwr)
+static int al3320a_set_pwr_on(struct al3320a_data *data)
{
- u8 val = pwr ? AL3320A_CONFIG_ENABLE : AL3320A_CONFIG_DISABLE;
- return i2c_smbus_write_byte_data(client, AL3320A_REG_CONFIG, val);
+ return regmap_write(data->regmap, AL3320A_REG_CONFIG, AL3320A_CONFIG_ENABLE);
}
static void al3320a_set_pwr_off(void *_data)
{
struct al3320a_data *data = _data;
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
- al3320a_set_pwr(data->client, false);
+ ret = regmap_write(data->regmap, AL3320A_REG_CONFIG, AL3320A_CONFIG_DISABLE);
+ if (ret)
+ dev_err(dev, "failed to write system register\n");
}
static int al3320a_init(struct al3320a_data *data)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
- ret = al3320a_set_pwr(data->client, true);
-
- if (ret < 0)
+ ret = al3320a_set_pwr_on(data);
+ if (ret)
return ret;
- ret = i2c_smbus_write_byte_data(data->client, AL3320A_REG_CONFIG_RANGE,
- FIELD_PREP(AL3320A_GAIN_MASK,
- AL3320A_RANGE_3));
- if (ret < 0)
+ ret = devm_add_action_or_reset(dev, al3320a_set_pwr_off, data);
+ if (ret)
return ret;
- ret = i2c_smbus_write_byte_data(data->client, AL3320A_REG_MEAN_TIME,
- AL3320A_DEFAULT_MEAN_TIME);
- if (ret < 0)
+ ret = regmap_write(data->regmap, AL3320A_REG_CONFIG_RANGE,
+ FIELD_PREP(AL3320A_GAIN_MASK, AL3320A_RANGE_3));
+ if (ret)
return ret;
- ret = i2c_smbus_write_byte_data(data->client, AL3320A_REG_WAIT,
- AL3320A_DEFAULT_WAIT_TIME);
- if (ret < 0)
+ ret = regmap_write(data->regmap, AL3320A_REG_MEAN_TIME,
+ AL3320A_DEFAULT_MEAN_TIME);
+ if (ret)
return ret;
- return 0;
+ return regmap_write(data->regmap, AL3320A_REG_WAIT,
+ AL3320A_DEFAULT_WAIT_TIME);
}
static int al3320a_read_raw(struct iio_dev *indio_dev,
@@ -128,7 +135,7 @@ static int al3320a_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct al3320a_data *data = iio_priv(indio_dev);
- int ret;
+ int ret, gain, raw;
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -137,21 +144,21 @@ static int al3320a_read_raw(struct iio_dev *indio_dev,
* - low byte of output is stored at AL3320A_REG_DATA_LOW
* - high byte of output is stored at AL3320A_REG_DATA_LOW + 1
*/
- ret = i2c_smbus_read_word_data(data->client,
- AL3320A_REG_DATA_LOW);
- if (ret < 0)
+ ret = regmap_read(data->regmap, AL3320A_REG_DATA_LOW, &raw);
+ if (ret)
return ret;
- *val = ret;
+
+ *val = raw;
+
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- ret = i2c_smbus_read_byte_data(data->client,
- AL3320A_REG_CONFIG_RANGE);
- if (ret < 0)
+ ret = regmap_read(data->regmap, AL3320A_REG_CONFIG_RANGE, &gain);
+ if (ret)
return ret;
- ret = FIELD_GET(AL3320A_GAIN_MASK, ret);
- *val = al3320a_scales[ret][0];
- *val2 = al3320a_scales[ret][1];
+ gain = FIELD_GET(AL3320A_GAIN_MASK, gain);
+ *val = al3320a_scales[gain][0];
+ *val2 = al3320a_scales[gain][1];
return IIO_VAL_INT_PLUS_MICRO;
}
@@ -163,7 +170,7 @@ static int al3320a_write_raw(struct iio_dev *indio_dev,
int val2, long mask)
{
struct al3320a_data *data = iio_priv(indio_dev);
- int i;
+ unsigned int i;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -172,9 +179,8 @@ static int al3320a_write_raw(struct iio_dev *indio_dev,
val2 != al3320a_scales[i][1])
continue;
- return i2c_smbus_write_byte_data(data->client,
- AL3320A_REG_CONFIG_RANGE,
- FIELD_PREP(AL3320A_GAIN_MASK, i));
+ return regmap_write(data->regmap, AL3320A_REG_CONFIG_RANGE,
+ FIELD_PREP(AL3320A_GAIN_MASK, i));
}
break;
}
@@ -190,46 +196,50 @@ static const struct iio_info al3320a_info = {
static int al3320a_probe(struct i2c_client *client)
{
struct al3320a_data *data;
+ struct device *dev = &client->dev;
struct iio_dev *indio_dev;
int ret;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
- data->client = client;
+
+ data->regmap = devm_regmap_init_i2c(client, &al3320a_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev, PTR_ERR(data->regmap),
+ "cannot allocate regmap\n");
indio_dev->info = &al3320a_info;
- indio_dev->name = AL3320A_DRV_NAME;
+ indio_dev->name = "al3320a";
indio_dev->channels = al3320a_channels;
indio_dev->num_channels = ARRAY_SIZE(al3320a_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
ret = al3320a_init(data);
if (ret < 0) {
- dev_err(&client->dev, "al3320a chip init failed\n");
+ dev_err(dev, "al3320a chip init failed\n");
return ret;
}
- ret = devm_add_action_or_reset(&client->dev,
- al3320a_set_pwr_off,
- data);
- if (ret < 0)
- return ret;
-
- return devm_iio_device_register(&client->dev, indio_dev);
+ return devm_iio_device_register(dev, indio_dev);
}
static int al3320a_suspend(struct device *dev)
{
- return al3320a_set_pwr(to_i2c_client(dev), false);
+ struct al3320a_data *data = iio_priv(dev_get_drvdata(dev));
+
+ al3320a_set_pwr_off(data);
+ return 0;
}
static int al3320a_resume(struct device *dev)
{
- return al3320a_set_pwr(to_i2c_client(dev), true);
+ struct al3320a_data *data = iio_priv(dev_get_drvdata(dev));
+
+ return al3320a_set_pwr_on(data);
}
static DEFINE_SIMPLE_DEV_PM_OPS(al3320a_pm_ops, al3320a_suspend,
@@ -237,25 +247,25 @@ static DEFINE_SIMPLE_DEV_PM_OPS(al3320a_pm_ops, al3320a_suspend,
static const struct i2c_device_id al3320a_id[] = {
{ "al3320a" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, al3320a_id);
static const struct of_device_id al3320a_of_match[] = {
{ .compatible = "dynaimage,al3320a", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, al3320a_of_match);
static const struct acpi_device_id al3320a_acpi_match[] = {
{"CALS0001"},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, al3320a_acpi_match);
static struct i2c_driver al3320a_driver = {
.driver = {
- .name = AL3320A_DRV_NAME,
+ .name = "al3320a",
.of_match_table = al3320a_of_match,
.pm = pm_sleep_ptr(&al3320a_pm_ops),
.acpi_match_table = al3320a_acpi_match,
diff --git a/drivers/iio/light/apds9306.c b/drivers/iio/light/apds9306.c
index 5ed7e17f49e7..e9b237de180a 100644
--- a/drivers/iio/light/apds9306.c
+++ b/drivers/iio/light/apds9306.c
@@ -831,11 +831,10 @@ static int apds9306_read_raw(struct iio_dev *indio_dev,
* Changing device parameters during adc operation, resets
* the ADC which has to avoided.
*/
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = apds9306_read_data(data, val, reg);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret)
return ret;
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index d30441d33703..0003a29bf264 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -1157,7 +1157,7 @@ static const struct dev_pm_ops apds9960_pm_ops = {
static const struct i2c_device_id apds9960_id[] = {
{ "apds9960" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, apds9960_id);
diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c
index 37fffce35dd1..68f60dc3c79d 100644
--- a/drivers/iio/light/as73211.c
+++ b/drivers/iio/light/as73211.c
@@ -16,6 +16,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -418,18 +419,17 @@ static int as73211_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec cons
case IIO_CHAN_INFO_RAW: {
int ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret < 0)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = as73211_req_data(data);
if (ret < 0) {
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
ret = i2c_smbus_read_word_data(data->client, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
@@ -517,6 +517,16 @@ static int _as73211_write_raw(struct iio_dev *indio_dev,
struct as73211_data *data = iio_priv(indio_dev);
int ret;
+ /* Need to switch to config mode ... */
+ if ((data->osr & AS73211_OSR_DOS_MASK) != AS73211_OSR_DOS_CONFIG) {
+ data->osr &= ~AS73211_OSR_DOS_MASK;
+ data->osr |= AS73211_OSR_DOS_CONFIG;
+
+ ret = i2c_smbus_write_byte_data(data->client, AS73211_REG_OSR, data->osr);
+ if (ret < 0)
+ return ret;
+ }
+
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ: {
int reg_bits, freq_kHz = val / HZ_PER_KHZ; /* 1024, 2048, ... */
@@ -601,28 +611,14 @@ static int as73211_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec con
struct as73211_data *data = iio_priv(indio_dev);
int ret;
- mutex_lock(&data->mutex);
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret < 0)
- goto error_unlock;
-
- /* Need to switch to config mode ... */
- if ((data->osr & AS73211_OSR_DOS_MASK) != AS73211_OSR_DOS_CONFIG) {
- data->osr &= ~AS73211_OSR_DOS_MASK;
- data->osr |= AS73211_OSR_DOS_CONFIG;
+ guard(mutex)(&data->mutex);
- ret = i2c_smbus_write_byte_data(data->client, AS73211_REG_OSR, data->osr);
- if (ret < 0)
- goto error_release;
- }
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = _as73211_write_raw(indio_dev, chan, val, val2, mask);
+ iio_device_release_direct(indio_dev);
-error_release:
- iio_device_release_direct_mode(indio_dev);
-error_unlock:
- mutex_unlock(&data->mutex);
return ret;
}
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index 4b869fa9e5b1..764f88826fcb 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -22,12 +22,16 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/module.h>
+#include <linux/gpio/consumer.h>
#define BH1750_POWER_DOWN 0x00
#define BH1750_ONE_TIME_H_RES_MODE 0x20 /* auto-mode for BH1721 */
#define BH1750_CHANGE_INT_TIME_H_BIT 0x40
#define BH1750_CHANGE_INT_TIME_L_BIT 0x60
+/* Define the reset delay time in microseconds */
+#define BH1750_RESET_DELAY_US 10000 /* 10ms */
+
enum {
BH1710,
BH1721,
@@ -40,6 +44,7 @@ struct bh1750_data {
struct mutex lock;
const struct bh1750_chip_info *chip_info;
u16 mtreg;
+ struct gpio_desc *reset_gpio;
};
struct bh1750_chip_info {
@@ -248,6 +253,25 @@ static int bh1750_probe(struct i2c_client *client)
data->client = client;
data->chip_info = &bh1750_chip_info_tbl[id->driver_data];
+ /* Get reset GPIO from device tree */
+ data->reset_gpio = devm_gpiod_get_optional(&client->dev,
+ "reset", GPIOD_OUT_HIGH);
+
+ if (IS_ERR(data->reset_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(data->reset_gpio),
+ "Failed to get reset GPIO\n");
+
+ /* Perform hardware reset if GPIO is provided */
+ if (data->reset_gpio) {
+ /* Perform reset sequence: low-high */
+ gpiod_set_value_cansleep(data->reset_gpio, 1);
+ fsleep(BH1750_RESET_DELAY_US);
+ gpiod_set_value_cansleep(data->reset_gpio, 0);
+ fsleep(BH1750_RESET_DELAY_US);
+
+ dev_dbg(&client->dev, "BH1750 reset completed via GPIO\n");
+ }
+
usec = data->chip_info->mtreg_to_usec * data->chip_info->mtreg_default;
ret = bh1750_change_int_time(data, usec);
if (ret < 0)
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
index 475f44954f61..c7c877d2fe67 100644
--- a/drivers/iio/light/bh1780.c
+++ b/drivers/iio/light/bh1780.c
@@ -264,7 +264,7 @@ MODULE_DEVICE_TABLE(i2c, bh1780_id);
static const struct of_device_id of_bh1780_match[] = {
{ .compatible = "rohm,bh1780gli", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, of_bh1780_match);
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index 5b00ad2a014e..e864d2ef036e 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -369,7 +369,7 @@ static void cm3232_remove(struct i2c_client *client)
static const struct i2c_device_id cm3232_id[] = {
{ "cm3232" },
- {}
+ { }
};
static int cm3232_suspend(struct device *dev)
@@ -406,7 +406,7 @@ MODULE_DEVICE_TABLE(i2c, cm3232_id);
static const struct of_device_id cm3232_of_match[] = {
{.compatible = "capella,cm3232"},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, cm3232_of_match);
diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c
index 79a64e2ff812..79ad6e2209ca 100644
--- a/drivers/iio/light/cm3323.c
+++ b/drivers/iio/light/cm3323.c
@@ -251,13 +251,13 @@ static int cm3323_probe(struct i2c_client *client)
static const struct i2c_device_id cm3323_id[] = {
{ "cm3323" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, cm3323_id);
static const struct of_device_id cm3323_of_match[] = {
{ .compatible = "capella,cm3323", },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, cm3323_of_match);
diff --git a/drivers/iio/light/cm3605.c b/drivers/iio/light/cm3605.c
index 675c0fd44db4..0c17378e27d1 100644
--- a/drivers/iio/light/cm3605.c
+++ b/drivers/iio/light/cm3605.c
@@ -307,7 +307,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(cm3605_dev_pm_ops, cm3605_pm_suspend,
static const struct of_device_id cm3605_of_match[] = {
{.compatible = "capella,cm3605"},
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, cm3605_of_match);
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
index 19e529c84e95..815806ceb5c8 100644
--- a/drivers/iio/light/cros_ec_light_prox.c
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -249,7 +249,7 @@ static const struct platform_device_id cros_ec_light_prox_ids[] = {
{
.name = "cros-ec-light",
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(platform, cros_ec_light_prox_ids);
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index d56ee217fe53..42859e5b1089 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -700,7 +700,7 @@ MODULE_DEVICE_TABLE(i2c, gp2ap002_id_table);
static const struct of_device_id gp2ap002_of_match[] = {
{ .compatible = "sharp,gp2ap002a00f" },
{ .compatible = "sharp,gp2ap002s00f" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, gp2ap002_of_match);
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index 1a352c88598e..c7df4b258e2c 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -1283,12 +1283,11 @@ static int gp2ap020a00f_read_raw(struct iio_dev *indio_dev,
int err = -EINVAL;
if (mask == IIO_CHAN_INFO_RAW) {
- err = iio_device_claim_direct_mode(indio_dev);
- if (err)
- return err;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
err = gp2ap020a00f_read_channel(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
}
return err < 0 ? err : IIO_VAL_INT;
}
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index aa4c72d4849e..830e5ae7f34a 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -456,7 +456,7 @@ static const struct platform_device_id hid_als_ids[] = {
/* Format: HID-SENSOR-custom_sensor_tag-usage_id_in_hex_lowercase */
.name = "HID-SENSOR-LISS-0041",
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(platform, hid_als_ids);
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 4c65b32d34ce..efa904a70d0e 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -215,6 +215,9 @@ static int prox_capture_sample(struct hid_sensor_hub_device *hsdev,
case 1:
prox_state->human_presence[chan] = *(u8 *)raw_data * multiplier;
return 0;
+ case 2:
+ prox_state->human_presence[chan] = *(u16 *)raw_data * multiplier;
+ return 0;
case 4:
prox_state->human_presence[chan] = *(u32 *)raw_data * multiplier;
return 0;
@@ -362,7 +365,7 @@ static const struct platform_device_id hid_prox_ids[] = {
/* Format: HID-SENSOR-tag-usage_id_in_hex_lowercase */
.name = "HID-SENSOR-LISS-0226",
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(platform, hid_prox_ids);
diff --git a/drivers/iio/light/isl29018.c b/drivers/iio/light/isl29018.c
index 201eae1c4589..1b4c18423048 100644
--- a/drivers/iio/light/isl29018.c
+++ b/drivers/iio/light/isl29018.c
@@ -824,7 +824,7 @@ static const struct acpi_device_id isl29018_acpi_match[] = {
{"ISL29018", isl29018},
{"ISL29023", isl29023},
{"ISL29035", isl29035},
- {}
+ { }
};
MODULE_DEVICE_TABLE(acpi, isl29018_acpi_match);
@@ -832,7 +832,7 @@ static const struct i2c_device_id isl29018_id[] = {
{"isl29018", isl29018},
{"isl29023", isl29023},
{"isl29035", isl29035},
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, isl29018_id);
diff --git a/drivers/iio/light/isl29028.c b/drivers/iio/light/isl29028.c
index 95bfb3ffa519..609ebf0f7313 100644
--- a/drivers/iio/light/isl29028.c
+++ b/drivers/iio/light/isl29028.c
@@ -680,7 +680,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(isl29028_pm_ops, isl29028_suspend,
static const struct i2c_device_id isl29028_id[] = {
{ "isl29028" },
{ "isl29030" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, isl29028_id);
@@ -688,7 +688,7 @@ static const struct of_device_id isl29028_of_match[] = {
{ .compatible = "isl,isl29028", }, /* for backward compat., don't use */
{ .compatible = "isil,isl29028", },
{ .compatible = "isil,isl29030", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, isl29028_of_match);
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index 326dc39e7929..6bc23b164cc5 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -131,11 +131,10 @@ static int isl29125_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = isl29125_read_data(data, chan->scan_index);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
*val = ret;
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
index e7ba934c8e69..fa4677c28931 100644
--- a/drivers/iio/light/jsa1212.c
+++ b/drivers/iio/light/jsa1212.c
@@ -424,7 +424,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(jsa1212_pm_ops, jsa1212_suspend,
static const struct acpi_device_id jsa1212_acpi_match[] = {
{"JSA1212", 0},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, jsa1212_acpi_match);
diff --git a/drivers/iio/light/ltr390.c b/drivers/iio/light/ltr390.c
index df664f360903..ee59bbb8aa09 100644
--- a/drivers/iio/light/ltr390.c
+++ b/drivers/iio/light/ltr390.c
@@ -717,13 +717,13 @@ static DEFINE_SIMPLE_DEV_PM_OPS(ltr390_pm_ops, ltr390_suspend, ltr390_resume);
static const struct i2c_device_id ltr390_id[] = {
{ "ltr390" },
- { /* Sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(i2c, ltr390_id);
static const struct of_device_id ltr390_of_table[] = {
{ .compatible = "liteon,ltr390" },
- { /* Sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, ltr390_of_table);
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 669da0840eba..8d8051cf6927 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -541,7 +541,7 @@ static const struct iio_chan_spec_ext_info ltr501_ext_info[] = {
.shared = IIO_SEPARATE,
.read = ltr501_read_near_level,
},
- { /* sentinel */ }
+ { }
};
static const struct iio_event_spec ltr501_als_event_spec[] = {
@@ -646,6 +646,36 @@ static const struct iio_chan_spec ltr301_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(2),
};
+static int ltr501_read_info_raw(struct ltr501_data *data,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ __le16 buf[2];
+ int ret;
+
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ mutex_lock(&data->lock_als);
+ ret = ltr501_read_als(data, buf);
+ mutex_unlock(&data->lock_als);
+ if (ret < 0)
+ return ret;
+ *val = le16_to_cpu(chan->address == LTR501_ALS_DATA1 ?
+ buf[0] : buf[1]);
+ return IIO_VAL_INT;
+ case IIO_PROXIMITY:
+ mutex_lock(&data->lock_ps);
+ ret = ltr501_read_ps(data);
+ mutex_unlock(&data->lock_ps);
+ if (ret < 0)
+ return ret;
+ *val = ret & LTR501_PS_DATA_MASK;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
static int ltr501_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -658,14 +688,13 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
case IIO_LIGHT:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&data->lock_als);
ret = ltr501_read_als(data, buf);
mutex_unlock(&data->lock_als);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
*val = ltr501_calculate_lux(le16_to_cpu(buf[1]),
@@ -675,36 +704,12 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- switch (chan->type) {
- case IIO_INTENSITY:
- mutex_lock(&data->lock_als);
- ret = ltr501_read_als(data, buf);
- mutex_unlock(&data->lock_als);
- if (ret < 0)
- break;
- *val = le16_to_cpu(chan->address == LTR501_ALS_DATA1 ?
- buf[0] : buf[1]);
- ret = IIO_VAL_INT;
- break;
- case IIO_PROXIMITY:
- mutex_lock(&data->lock_ps);
- ret = ltr501_read_ps(data);
- mutex_unlock(&data->lock_ps);
- if (ret < 0)
- break;
- *val = ret & LTR501_PS_DATA_MASK;
- ret = IIO_VAL_INT;
- break;
- default:
- ret = -EINVAL;
- break;
- }
+ ret = ltr501_read_info_raw(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SCALE:
@@ -756,18 +761,14 @@ static int ltr501_get_gain_index(const struct ltr501_gain *gain, int size,
return -1;
}
-static int ltr501_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
+static int __ltr501_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
{
struct ltr501_data *data = iio_priv(indio_dev);
int i, ret, freq_val, freq_val2;
const struct ltr501_chip_info *info = data->chip_info;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
switch (mask) {
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
@@ -775,53 +776,43 @@ static int ltr501_write_raw(struct iio_dev *indio_dev,
i = ltr501_get_gain_index(info->als_gain,
info->als_gain_tbl_size,
val, val2);
- if (i < 0) {
- ret = -EINVAL;
- break;
- }
+ if (i < 0)
+ return -EINVAL;
data->als_contr &= ~info->als_gain_mask;
data->als_contr |= i << info->als_gain_shift;
- ret = regmap_write(data->regmap, LTR501_ALS_CONTR,
- data->als_contr);
- break;
+ return regmap_write(data->regmap, LTR501_ALS_CONTR,
+ data->als_contr);
case IIO_PROXIMITY:
i = ltr501_get_gain_index(info->ps_gain,
info->ps_gain_tbl_size,
val, val2);
- if (i < 0) {
- ret = -EINVAL;
- break;
- }
+ if (i < 0)
+ return -EINVAL;
+
data->ps_contr &= ~LTR501_CONTR_PS_GAIN_MASK;
data->ps_contr |= i << LTR501_CONTR_PS_GAIN_SHIFT;
- ret = regmap_write(data->regmap, LTR501_PS_CONTR,
- data->ps_contr);
- break;
+ return regmap_write(data->regmap, LTR501_PS_CONTR,
+ data->ps_contr);
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- break;
case IIO_CHAN_INFO_INT_TIME:
switch (chan->type) {
case IIO_INTENSITY:
- if (val != 0) {
- ret = -EINVAL;
- break;
- }
+ if (val != 0)
+ return -EINVAL;
+
mutex_lock(&data->lock_als);
ret = ltr501_set_it_time(data, val2);
mutex_unlock(&data->lock_als);
- break;
+ return ret;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- break;
case IIO_CHAN_INFO_SAMP_FREQ:
switch (chan->type) {
@@ -829,50 +820,61 @@ static int ltr501_write_raw(struct iio_dev *indio_dev,
ret = ltr501_als_read_samp_freq(data, &freq_val,
&freq_val2);
if (ret < 0)
- break;
+ return ret;
ret = ltr501_als_write_samp_freq(data, val, val2);
if (ret < 0)
- break;
+ return ret;
/* update persistence count when changing frequency */
ret = ltr501_write_intr_prst(data, chan->type,
0, data->als_period);
if (ret < 0)
- ret = ltr501_als_write_samp_freq(data, freq_val,
- freq_val2);
- break;
+ /* Do not ovewrite error */
+ ltr501_als_write_samp_freq(data, freq_val,
+ freq_val2);
+ return ret;
case IIO_PROXIMITY:
ret = ltr501_ps_read_samp_freq(data, &freq_val,
&freq_val2);
if (ret < 0)
- break;
+ return ret;
ret = ltr501_ps_write_samp_freq(data, val, val2);
if (ret < 0)
- break;
+ return ret;
/* update persistence count when changing frequency */
ret = ltr501_write_intr_prst(data, chan->type,
0, data->ps_period);
if (ret < 0)
- ret = ltr501_ps_write_samp_freq(data, freq_val,
- freq_val2);
- break;
+ /* Do not overwrite error */
+ ltr501_ps_write_samp_freq(data, freq_val,
+ freq_val2);
+ return ret;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- break;
-
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
+}
+
+static int ltr501_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __ltr501_write_raw(indio_dev, chan, val, val2, mask);
+
+ iio_device_release_direct(indio_dev);
- iio_device_release_direct_mode(indio_dev);
return ret;
}
@@ -1600,7 +1602,7 @@ static const struct acpi_device_id ltr_acpi_match[] = {
{ "LTER0301", ltr301 },
/* https://www.catalog.update.microsoft.com/Search.aspx?q=lter0303 */
{ "LTER0303", ltr303 },
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, ltr_acpi_match);
@@ -1618,7 +1620,7 @@ static const struct of_device_id ltr501_of_match[] = {
{ .compatible = "liteon,ltr559", },
{ .compatible = "liteon,ltr301", },
{ .compatible = "liteon,ltr303", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ltr501_of_match);
diff --git a/drivers/iio/light/ltrf216a.c b/drivers/iio/light/ltrf216a.c
index dbec1e7cfeb8..61f57a82b872 100644
--- a/drivers/iio/light/ltrf216a.c
+++ b/drivers/iio/light/ltrf216a.c
@@ -554,7 +554,7 @@ static const struct ltr_chip_info ltrf216a_chip_info = {
static const struct i2c_device_id ltrf216a_id[] = {
{ "ltr308", .driver_data = (kernel_ulong_t)&ltr308_chip_info },
{ "ltrf216a", .driver_data = (kernel_ulong_t)&ltrf216a_chip_info },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ltrf216a_id);
@@ -563,7 +563,7 @@ static const struct of_device_id ltrf216a_of_match[] = {
{ .compatible = "liteon,ltrf216a", .data = &ltrf216a_chip_info },
/* For Valve's Steamdeck device, an ACPI platform using PRP0001 */
{ .compatible = "ltr,ltrf216a", .data = &ltrf216a_chip_info },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ltrf216a_of_match);
diff --git a/drivers/iio/light/opt4001.c b/drivers/iio/light/opt4001.c
index 6cf60151b3d8..ba4eb82d9bc2 100644
--- a/drivers/iio/light/opt4001.c
+++ b/drivers/iio/light/opt4001.c
@@ -448,7 +448,7 @@ MODULE_DEVICE_TABLE(i2c, opt4001_id);
static const struct of_device_id opt4001_of_match[] = {
{ .compatible = "ti,opt4001-sot-5x3", .data = &opt4001_sot_5x3_info},
{ .compatible = "ti,opt4001-picostar", .data = &opt4001_picostar_info},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, opt4001_of_match);
diff --git a/drivers/iio/light/opt4060.c b/drivers/iio/light/opt4060.c
index ab55f8d2ea0c..f4085020e03e 100644
--- a/drivers/iio/light/opt4060.c
+++ b/drivers/iio/light/opt4060.c
@@ -311,7 +311,7 @@ any_mode_retry:
* concurrently change. And we just keep trying until we get one
* of the modes...
*/
- if (iio_device_claim_direct_mode(indio_dev))
+ if (!iio_device_claim_direct(indio_dev))
goto any_mode_retry;
/*
* This path means that we managed to claim direct mode. In
@@ -320,7 +320,8 @@ any_mode_retry:
*/
ret = opt4060_set_state_common(chip, continuous_sampling,
continuous_irq);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
+ return ret;
} else {
/*
* This path means that we managed to claim buffer mode. In
diff --git a/drivers/iio/light/pa12203001.c b/drivers/iio/light/pa12203001.c
index b920bf82c102..8885852bef22 100644
--- a/drivers/iio/light/pa12203001.c
+++ b/drivers/iio/light/pa12203001.c
@@ -456,14 +456,14 @@ static const struct dev_pm_ops pa12203001_pm_ops = {
static const struct acpi_device_id pa12203001_acpi_match[] = {
{ "TXCPA122", 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(acpi, pa12203001_acpi_match);
static const struct i2c_device_id pa12203001_id[] = {
{ "txcpa122" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, pa12203001_id);
diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c
index cc25596cb248..7cec5e943373 100644
--- a/drivers/iio/light/rohm-bu27034.c
+++ b/drivers/iio/light/rohm-bu27034.c
@@ -998,9 +998,8 @@ static int bu27034_read_raw(struct iio_dev *idev,
return -EINVAL;
/* Don't mess with measurement enabling while buffering */
- ret = iio_device_claim_direct_mode(idev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(idev))
+ return -EBUSY;
mutex_lock(&data->mutex);
/*
@@ -1011,7 +1010,7 @@ static int bu27034_read_raw(struct iio_dev *idev,
ret = result_get(data, chan->channel, val);
mutex_unlock(&data->mutex);
- iio_device_release_direct_mode(idev);
+ iio_device_release_direct(idev);
if (ret)
return ret;
@@ -1050,9 +1049,8 @@ static int bu27034_write_raw(struct iio_dev *idev,
struct bu27034_data *data = iio_priv(idev);
int ret;
- ret = iio_device_claim_direct_mode(idev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(idev))
+ return -EBUSY;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -1069,7 +1067,7 @@ static int bu27034_write_raw(struct iio_dev *idev,
break;
}
- iio_device_release_direct_mode(idev);
+ iio_device_release_direct(idev);
return ret;
}
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index 2ba917c5c138..92e7552f3e39 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/cleanup.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
@@ -704,50 +705,58 @@ static int rpr0521_write_ps_offset(struct rpr0521_data *data, int offset)
return ret;
}
+static int rpr0521_read_info_raw(struct rpr0521_data *data,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ u8 device_mask;
+ __le16 raw_data;
+ int ret;
+
+ device_mask = rpr0521_data_reg[chan->address].device_mask;
+
+ guard(mutex)(&data->lock);
+ ret = rpr0521_set_power_state(data, true, device_mask);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap,
+ rpr0521_data_reg[chan->address].address,
+ &raw_data, sizeof(raw_data));
+ if (ret < 0) {
+ rpr0521_set_power_state(data, false, device_mask);
+ return ret;
+ }
+
+ ret = rpr0521_set_power_state(data, false, device_mask);
+ if (ret < 0)
+ return ret;
+
+ *val = le16_to_cpu(raw_data);
+
+ return 0;
+}
+
static int rpr0521_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
{
struct rpr0521_data *data = iio_priv(indio_dev);
int ret;
- int busy;
- u8 device_mask;
- __le16 raw_data;
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (chan->type != IIO_INTENSITY && chan->type != IIO_PROXIMITY)
return -EINVAL;
- busy = iio_device_claim_direct_mode(indio_dev);
- if (busy)
+ if (!iio_device_claim_direct(indio_dev))
return -EBUSY;
- device_mask = rpr0521_data_reg[chan->address].device_mask;
-
- mutex_lock(&data->lock);
- ret = rpr0521_set_power_state(data, true, device_mask);
- if (ret < 0)
- goto rpr0521_read_raw_out;
-
- ret = regmap_bulk_read(data->regmap,
- rpr0521_data_reg[chan->address].address,
- &raw_data, sizeof(raw_data));
- if (ret < 0) {
- rpr0521_set_power_state(data, false, device_mask);
- goto rpr0521_read_raw_out;
- }
-
- ret = rpr0521_set_power_state(data, false, device_mask);
-
-rpr0521_read_raw_out:
- mutex_unlock(&data->lock);
- iio_device_release_direct_mode(indio_dev);
+ ret = rpr0521_read_info_raw(data, chan, val);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
- *val = le16_to_cpu(raw_data);
-
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
index 66abda021696..4aa02afd853e 100644
--- a/drivers/iio/light/si1145.c
+++ b/drivers/iio/light/si1145.c
@@ -633,11 +633,10 @@ static int si1145_read_raw(struct iio_dev *indio_dev,
case IIO_VOLTAGE:
case IIO_TEMP:
case IIO_UVINDEX:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = si1145_measure(indio_dev, chan);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
@@ -750,18 +749,17 @@ static int si1145_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = si1145_param_set(data, reg1, val);
if (ret < 0) {
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
/* Set recovery period to one's complement of gain */
ret = si1145_param_set(data, reg2, (~val & 0x07) << 4);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_RAW:
if (chan->type != IIO_CURRENT)
@@ -773,19 +771,18 @@ static int si1145_write_raw(struct iio_dev *indio_dev,
reg1 = SI1145_PS_LED_REG(chan->channel);
shift = SI1145_PS_LED_SHIFT(chan->channel);
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = i2c_smbus_read_byte_data(data->client, reg1);
if (ret < 0) {
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
ret = i2c_smbus_write_byte_data(data->client, reg1,
(ret & ~(0x0f << shift)) |
((val & 0x0f) << shift));
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
return si1145_store_samp_freq(data, val);
diff --git a/drivers/iio/light/st_uvis25_core.c b/drivers/iio/light/st_uvis25_core.c
index 40a810000df0..124a8f9204a9 100644
--- a/drivers/iio/light/st_uvis25_core.c
+++ b/drivers/iio/light/st_uvis25_core.c
@@ -117,9 +117,8 @@ static int st_uvis25_read_raw(struct iio_dev *iio_dev,
{
int ret;
- ret = iio_device_claim_direct_mode(iio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(iio_dev))
+ return -EBUSY;
switch (mask) {
case IIO_CHAN_INFO_PROCESSED: {
@@ -144,7 +143,7 @@ static int st_uvis25_read_raw(struct iio_dev *iio_dev,
break;
}
- iio_device_release_direct_mode(iio_dev);
+ iio_device_release_direct(iio_dev);
return ret;
}
diff --git a/drivers/iio/light/st_uvis25_i2c.c b/drivers/iio/light/st_uvis25_i2c.c
index f54282476d11..5d9bb4d9be63 100644
--- a/drivers/iio/light/st_uvis25_i2c.c
+++ b/drivers/iio/light/st_uvis25_i2c.c
@@ -41,13 +41,13 @@ static int st_uvis25_i2c_probe(struct i2c_client *client)
static const struct of_device_id st_uvis25_i2c_of_match[] = {
{ .compatible = "st,uvis25", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, st_uvis25_i2c_of_match);
static const struct i2c_device_id st_uvis25_i2c_id_table[] = {
{ ST_UVIS25_DEV_NAME },
- {},
+ { }
};
MODULE_DEVICE_TABLE(i2c, st_uvis25_i2c_id_table);
diff --git a/drivers/iio/light/st_uvis25_spi.c b/drivers/iio/light/st_uvis25_spi.c
index 18edc6a5a4a4..a5aad74ce73e 100644
--- a/drivers/iio/light/st_uvis25_spi.c
+++ b/drivers/iio/light/st_uvis25_spi.c
@@ -42,13 +42,13 @@ static int st_uvis25_spi_probe(struct spi_device *spi)
static const struct of_device_id st_uvis25_spi_of_match[] = {
{ .compatible = "st,uvis25", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, st_uvis25_spi_of_match);
static const struct spi_device_id st_uvis25_spi_id_table[] = {
{ ST_UVIS25_DEV_NAME },
- {},
+ { }
};
MODULE_DEVICE_TABLE(spi, st_uvis25_spi_id_table);
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index b81cc44db43c..deada9ba4748 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -165,7 +165,7 @@ static const struct iio_chan_spec_ext_info stk3310_ext_info[] = {
.shared = IIO_SEPARATE,
.read = stk3310_read_near_level,
},
- { /* sentinel */ }
+ { }
};
static const struct iio_chan_spec stk3310_channels[] = {
@@ -703,7 +703,7 @@ static const struct i2c_device_id stk3310_i2c_id[] = {
{ "STK3310" },
{ "STK3311" },
{ "STK3335" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, stk3310_i2c_id);
@@ -711,7 +711,7 @@ static const struct acpi_device_id stk3310_acpi_id[] = {
{"STK3013", 0},
{"STK3310", 0},
{"STK3311", 0},
- {}
+ { }
};
MODULE_DEVICE_TABLE(acpi, stk3310_acpi_id);
@@ -721,7 +721,7 @@ static const struct of_device_id stk3310_of_match[] = {
{ .compatible = "sensortek,stk3310", },
{ .compatible = "sensortek,stk3311", },
{ .compatible = "sensortek,stk3335", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, stk3310_of_match);
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index 884e43e4cda4..39268f855c77 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -134,16 +134,15 @@ static int tcs3414_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = tcs3414_req_data(data);
if (ret < 0) {
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
ret = i2c_smbus_read_word_data(data->client, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
*val = ret;
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 2bd36a344ea5..0f8bf8503edd 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -148,16 +148,15 @@ static int tcs3472_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = tcs3472_req_data(data);
if (ret < 0) {
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
ret = i2c_smbus_read_word_data(data->client, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
*val = ret;
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index f1fe7640fce6..f2af1cd7c2d1 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -843,7 +843,7 @@ static const struct i2c_device_id tsl2563_id[] = {
{ "tsl2561", 1 },
{ "tsl2562", 2 },
{ "tsl2563", 3 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, tsl2563_id);
@@ -852,7 +852,7 @@ static const struct of_device_id tsl2563_of_match[] = {
{ .compatible = "amstaos,tsl2561" },
{ .compatible = "amstaos,tsl2562" },
{ .compatible = "amstaos,tsl2563" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, tsl2563_of_match);
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index 02ad11611b9c..fc3b0c4226be 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -922,7 +922,7 @@ static const struct i2c_device_id tsl2583_idtable[] = {
{ "tsl2580", 0 },
{ "tsl2581", 1 },
{ "tsl2583", 2 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, tsl2583_idtable);
@@ -930,7 +930,7 @@ static const struct of_device_id tsl2583_of_match[] = {
{ .compatible = "amstaos,tsl2580", },
{ .compatible = "amstaos,tsl2581", },
{ .compatible = "amstaos,tsl2583", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, tsl2583_of_match);
diff --git a/drivers/iio/light/tsl2591.c b/drivers/iio/light/tsl2591.c
index b81ca6f73f92..08476f193a44 100644
--- a/drivers/iio/light/tsl2591.c
+++ b/drivers/iio/light/tsl2591.c
@@ -1204,7 +1204,7 @@ static int tsl2591_probe(struct i2c_client *client)
static const struct of_device_id tsl2591_of_match[] = {
{ .compatible = "amstaos,tsl2591"},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, tsl2591_of_match);
diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
index 349afdcbe30d..0b171106441a 100644
--- a/drivers/iio/light/tsl2772.c
+++ b/drivers/iio/light/tsl2772.c
@@ -1899,7 +1899,7 @@ static const struct i2c_device_id tsl2772_idtable[] = {
{ "tsl2772", tsl2772 },
{ "tmd2772", tmd2772 },
{ "apds9930", apds9930 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, tsl2772_idtable);
@@ -1916,7 +1916,7 @@ static const struct of_device_id tsl2772_of_match[] = {
{ .compatible = "amstaos,tsl2772" },
{ .compatible = "amstaos,tmd2772" },
{ .compatible = "avago,apds9930" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, tsl2772_of_match);
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index c83114aed6b2..61a0957317a1 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -949,21 +949,21 @@ static const struct dev_pm_ops us5182d_pm_ops = {
static const struct acpi_device_id us5182d_acpi_match[] = {
{ "USD5182", 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(acpi, us5182d_acpi_match);
static const struct i2c_device_id us5182d_id[] = {
{ "usd5182" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, us5182d_id);
static const struct of_device_id us5182d_of_match[] = {
{ .compatible = "upisemi,usd5182" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, us5182d_of_match);
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index e19199b17f2e..90e7d4421abf 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -1084,9 +1084,8 @@ static int vcnl4010_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
case IIO_CHAN_INFO_SCALE:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
/* Protect against event capture. */
if (vcnl4010_is_in_periodic_mode(data)) {
@@ -1096,7 +1095,7 @@ static int vcnl4010_read_raw(struct iio_dev *indio_dev,
mask);
}
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
switch (chan->type) {
@@ -1157,9 +1156,8 @@ static int vcnl4010_write_raw(struct iio_dev *indio_dev,
int ret;
struct vcnl4000_data *data = iio_priv(indio_dev);
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
/* Protect against event capture. */
if (vcnl4010_is_in_periodic_mode(data)) {
@@ -1183,7 +1181,7 @@ static int vcnl4010_write_raw(struct iio_dev *indio_dev,
}
end:
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
@@ -1410,46 +1408,52 @@ static int vcnl4010_read_event_config(struct iio_dev *indio_dev,
}
}
-static int vcnl4010_config_threshold(struct iio_dev *indio_dev, bool state)
+static int vcnl4010_config_threshold_enable(struct vcnl4000_data *data)
{
- struct vcnl4000_data *data = iio_priv(indio_dev);
int ret;
- int icr;
- int command;
- if (state) {
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ /* Enable periodic measurement of proximity data. */
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND,
+ VCNL4000_SELF_TIMED_EN | VCNL4000_PROX_EN);
+ if (ret < 0)
+ return ret;
- /* Enable periodic measurement of proximity data. */
- command = VCNL4000_SELF_TIMED_EN | VCNL4000_PROX_EN;
+ /*
+ * Enable interrupts on threshold, for proximity data by
+ * default.
+ */
+ return i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL,
+ VCNL4010_INT_THR_EN);
+}
- /*
- * Enable interrupts on threshold, for proximity data by
- * default.
- */
- icr = VCNL4010_INT_THR_EN;
- } else {
- if (!vcnl4010_is_thr_enabled(data))
- return 0;
+static int vcnl4010_config_threshold_disable(struct vcnl4000_data *data)
+{
+ int ret;
- command = 0;
- icr = 0;
- }
+ if (!vcnl4010_is_thr_enabled(data))
+ return 0;
- ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND,
- command);
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, 0);
if (ret < 0)
- goto end;
+ return ret;
- ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL, icr);
+ return i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL, 0);
+}
-end:
- if (state)
- iio_device_release_direct_mode(indio_dev);
+static int vcnl4010_config_threshold(struct iio_dev *indio_dev, bool state)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ int ret;
- return ret;
+ if (state) {
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = vcnl4010_config_threshold_enable(data);
+ iio_device_release_direct(indio_dev);
+ return ret;
+ } else {
+ return vcnl4010_config_threshold_disable(data);
+ }
}
static int vcnl4010_write_event_config(struct iio_dev *indio_dev,
@@ -1741,7 +1745,7 @@ static const struct iio_chan_spec_ext_info vcnl4000_ext_info[] = {
.shared = IIO_SEPARATE,
.read = vcnl4000_read_near_level,
},
- { /* sentinel */ }
+ { }
};
static const struct iio_event_spec vcnl4000_event_spec[] = {
@@ -2064,7 +2068,7 @@ static const struct of_device_id vcnl_4000_of_match[] = {
.compatible = "vishay,vcnl4200",
.data = (void *)VCNL4200,
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, vcnl_4000_of_match);
diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
index 67c94be02018..b2bede9d3daa 100644
--- a/drivers/iio/light/vcnl4035.c
+++ b/drivers/iio/light/vcnl4035.c
@@ -156,6 +156,31 @@ static int vcnl4035_set_pm_runtime_state(struct vcnl4035_data *data, bool on)
return ret;
}
+static int vcnl4035_read_info_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val)
+{
+ struct vcnl4035_data *data = iio_priv(indio_dev);
+ int ret;
+ int raw_data;
+ unsigned int reg;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ if (chan->channel)
+ reg = VCNL4035_ALS_DATA;
+ else
+ reg = VCNL4035_WHITE_DATA;
+ ret = regmap_read(data->regmap, reg, &raw_data);
+ iio_device_release_direct(indio_dev);
+ if (ret)
+ return ret;
+
+ *val = raw_data;
+
+ return IIO_VAL_INT;
+}
+
/*
* Device IT INT Time (ms) Scale (lux/step)
* 000 50 0.064
@@ -175,28 +200,13 @@ static int vcnl4035_read_raw(struct iio_dev *indio_dev,
{
struct vcnl4035_data *data = iio_priv(indio_dev);
int ret;
- int raw_data;
- unsigned int reg;
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = vcnl4035_set_pm_runtime_state(data, true);
if (ret < 0)
return ret;
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (!ret) {
- if (chan->channel)
- reg = VCNL4035_ALS_DATA;
- else
- reg = VCNL4035_WHITE_DATA;
- ret = regmap_read(data->regmap, reg, &raw_data);
- iio_device_release_direct_mode(indio_dev);
- if (!ret) {
- *val = raw_data;
- ret = IIO_VAL_INT;
- }
- }
+ ret = vcnl4035_read_info_raw(indio_dev, chan, val);
vcnl4035_set_pm_runtime_state(data, false);
return ret;
case IIO_CHAN_INFO_INT_TIME:
diff --git a/drivers/iio/light/veml6040.c b/drivers/iio/light/veml6040.c
index 216e271001a8..71a594b2ec85 100644
--- a/drivers/iio/light/veml6040.c
+++ b/drivers/iio/light/veml6040.c
@@ -256,13 +256,13 @@ static int veml6040_probe(struct i2c_client *client)
static const struct i2c_device_id veml6040_id_table[] = {
{"veml6040"},
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, veml6040_id_table);
static const struct of_device_id veml6040_of_match[] = {
{.compatible = "vishay,veml6040"},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, veml6040_of_match);
diff --git a/drivers/iio/light/veml6075.c b/drivers/iio/light/veml6075.c
index 859891e8f115..edbb43407054 100644
--- a/drivers/iio/light/veml6075.c
+++ b/drivers/iio/light/veml6075.c
@@ -458,7 +458,7 @@ MODULE_DEVICE_TABLE(i2c, veml6075_id);
static const struct of_device_id veml6075_of_match[] = {
{ .compatible = "vishay,veml6075" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, veml6075_of_match);
diff --git a/drivers/iio/light/vl6180.c b/drivers/iio/light/vl6180.c
index 6e2183a4243e..cc4f2e5404aa 100644
--- a/drivers/iio/light/vl6180.c
+++ b/drivers/iio/light/vl6180.c
@@ -745,7 +745,7 @@ static int vl6180_probe(struct i2c_client *client)
static const struct of_device_id vl6180_of_match[] = {
{ .compatible = "st,vl6180", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, vl6180_of_match);
diff --git a/drivers/iio/light/zopt2201.c b/drivers/iio/light/zopt2201.c
index 604be60e92ac..1e5e9bf2935f 100644
--- a/drivers/iio/light/zopt2201.c
+++ b/drivers/iio/light/zopt2201.c
@@ -113,11 +113,13 @@ static const struct {
{ 13, 3125 },
};
-static const struct {
+struct zopt2201_scale {
unsigned int scale, uscale; /* scale factor as integer + micro */
u8 gain; /* gain register value */
u8 res; /* resolution register value */
-} zopt2201_scale_als[] = {
+};
+
+static struct zopt2201_scale zopt2201_scale_als[] = {
{ 19, 200000, 0, 5 },
{ 6, 400000, 1, 5 },
{ 3, 200000, 2, 5 },
@@ -142,11 +144,7 @@ static const struct {
{ 0, 8333, 4, 0 },
};
-static const struct {
- unsigned int scale, uscale; /* scale factor as integer + micro */
- u8 gain; /* gain register value */
- u8 res; /* resolution register value */
-} zopt2201_scale_uvb[] = {
+static struct zopt2201_scale zopt2201_scale_uvb[] = {
{ 0, 460800, 0, 5 },
{ 0, 153600, 1, 5 },
{ 0, 76800, 2, 5 },
@@ -348,16 +346,17 @@ static int zopt2201_set_gain(struct zopt2201_data *data, u8 gain)
return 0;
}
-static int zopt2201_write_scale_als_by_idx(struct zopt2201_data *data, int idx)
+static int zopt2201_write_scale_by_idx(struct zopt2201_data *data, int idx,
+ struct zopt2201_scale *zopt2201_scale_array)
{
int ret;
mutex_lock(&data->lock);
- ret = zopt2201_set_resolution(data, zopt2201_scale_als[idx].res);
+ ret = zopt2201_set_resolution(data, zopt2201_scale_array[idx].res);
if (ret < 0)
goto unlock;
- ret = zopt2201_set_gain(data, zopt2201_scale_als[idx].gain);
+ ret = zopt2201_set_gain(data, zopt2201_scale_array[idx].gain);
unlock:
mutex_unlock(&data->lock);
@@ -371,29 +370,12 @@ static int zopt2201_write_scale_als(struct zopt2201_data *data,
for (i = 0; i < ARRAY_SIZE(zopt2201_scale_als); i++)
if (val == zopt2201_scale_als[i].scale &&
- val2 == zopt2201_scale_als[i].uscale) {
- return zopt2201_write_scale_als_by_idx(data, i);
- }
+ val2 == zopt2201_scale_als[i].uscale)
+ return zopt2201_write_scale_by_idx(data, i, zopt2201_scale_als);
return -EINVAL;
}
-static int zopt2201_write_scale_uvb_by_idx(struct zopt2201_data *data, int idx)
-{
- int ret;
-
- mutex_lock(&data->lock);
- ret = zopt2201_set_resolution(data, zopt2201_scale_als[idx].res);
- if (ret < 0)
- goto unlock;
-
- ret = zopt2201_set_gain(data, zopt2201_scale_als[idx].gain);
-
-unlock:
- mutex_unlock(&data->lock);
- return ret;
-}
-
static int zopt2201_write_scale_uvb(struct zopt2201_data *data,
int val, int val2)
{
@@ -402,7 +384,7 @@ static int zopt2201_write_scale_uvb(struct zopt2201_data *data,
for (i = 0; i < ARRAY_SIZE(zopt2201_scale_uvb); i++)
if (val == zopt2201_scale_uvb[i].scale &&
val2 == zopt2201_scale_uvb[i].uscale)
- return zopt2201_write_scale_uvb_by_idx(data, i);
+ return zopt2201_write_scale_by_idx(data, i, zopt2201_scale_uvb);
return -EINVAL;
}
diff --git a/drivers/iio/magnetometer/af8133j.c b/drivers/iio/magnetometer/af8133j.c
index c1fc339e85b4..192ba2da94e2 100644
--- a/drivers/iio/magnetometer/af8133j.c
+++ b/drivers/iio/magnetometer/af8133j.c
@@ -370,7 +370,8 @@ static irqreturn_t af8133j_trigger_handler(int irq, void *p)
if (ret)
goto out_done;
- iio_push_to_buffers_with_timestamp(indio_dev, &sample, timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &sample, sizeof(sample),
+ timestamp);
out_done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 7bc341c69697..947fe8a475f2 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -673,8 +673,8 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev)
goto out_unlock;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &ak8974->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &ak8974->scan, sizeof(ak8974->scan),
+ iio_get_time_ns(indio_dev));
out_unlock:
mutex_unlock(&ak8974->lock);
@@ -704,7 +704,7 @@ ak8974_get_mount_matrix(const struct iio_dev *indio_dev,
static const struct iio_chan_spec_ext_info ak8974_ext_info[] = {
IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, ak8974_get_mount_matrix),
- { },
+ { }
};
#define AK8974_AXIS_CHANNEL(axis, index, bits) \
@@ -1023,14 +1023,14 @@ static const struct i2c_device_id ak8974_id[] = {
{ "ami306" },
{ "ak8974" },
{ "hscdtd008a" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ak8974_id);
static const struct of_device_id ak8974_of_match[] = {
{ .compatible = "asahi-kasei,ak8974", },
{ .compatible = "alps,hscdtd008a", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ak8974_of_match);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index ef1363126cc2..a1e92b2abffd 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -882,8 +882,8 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
data->scan.channels[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range);
data->scan.channels[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range);
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ iio_get_time_ns(indio_dev));
return;
@@ -1107,7 +1107,7 @@ static const struct i2c_device_id ak8975_id[] = {
{"ak09912", (kernel_ulong_t)&ak_def_array[AK09912] },
{"ak09916", (kernel_ulong_t)&ak_def_array[AK09916] },
{"ak09918", (kernel_ulong_t)&ak_def_array[AK09918] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ak8975_id);
@@ -1122,7 +1122,7 @@ static const struct of_device_id ak8975_of_match[] = {
{ .compatible = "ak09912", .data = &ak_def_array[AK09912] },
{ .compatible = "asahi-kasei,ak09916", .data = &ak_def_array[AK09916] },
{ .compatible = "asahi-kasei,ak09918", .data = &ak_def_array[AK09918] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ak8975_of_match);
diff --git a/drivers/iio/magnetometer/als31300.c b/drivers/iio/magnetometer/als31300.c
index 87b60c4e81fa..f72af829715f 100644
--- a/drivers/iio/magnetometer/als31300.c
+++ b/drivers/iio/magnetometer/als31300.c
@@ -245,8 +245,7 @@ static irqreturn_t als31300_trigger_handler(int irq, void *p)
scan.channels[0] = x;
scan.channels[1] = y;
scan.channels[2] = z;
- iio_push_to_buffers_with_timestamp(indio_dev, &scan,
- pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan), pf->timestamp);
trigger_out:
iio_trigger_notify_done(indio_dev->trig);
@@ -457,7 +456,7 @@ static const struct i2c_device_id als31300_id[] = {
.name = "als31300-2000",
.driver_data = (kernel_ulong_t)&al31300_variant_2000,
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(i2c, als31300_id);
@@ -474,7 +473,7 @@ static const struct of_device_id als31300_of_match[] = {
.compatible = "allegromicro,als31300-2000",
.data = &al31300_variant_2000,
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, als31300_of_match);
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index 88bb673e40d8..f9c51ceae011 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -678,8 +678,8 @@ static irqreturn_t bmc150_magn_trigger_handler(int irq, void *p)
if (ret < 0)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ pf->timestamp);
err:
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/magnetometer/bmc150_magn_i2c.c b/drivers/iio/magnetometer/bmc150_magn_i2c.c
index 8cbeda924bda..b110791f688a 100644
--- a/drivers/iio/magnetometer/bmc150_magn_i2c.c
+++ b/drivers/iio/magnetometer/bmc150_magn_i2c.c
@@ -42,7 +42,7 @@ static const struct i2c_device_id bmc150_magn_i2c_id[] = {
{ "bmc150_magn" },
{ "bmc156_magn" },
{ "bmm150_magn" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, bmc150_magn_i2c_id);
diff --git a/drivers/iio/magnetometer/bmc150_magn_spi.c b/drivers/iio/magnetometer/bmc150_magn_spi.c
index 2d4b8cba32f1..896b1d280731 100644
--- a/drivers/iio/magnetometer/bmc150_magn_spi.c
+++ b/drivers/iio/magnetometer/bmc150_magn_spi.c
@@ -37,7 +37,7 @@ static const struct spi_device_id bmc150_magn_spi_id[] = {
{"bmc150_magn", 0},
{"bmc156_magn", 0},
{"bmm150_magn", 0},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, bmc150_magn_spi_id);
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 97ddaa2a03f6..c673f9323e47 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -563,7 +563,7 @@ static const struct platform_device_id hid_magn_3d_ids[] = {
/* Format: HID-SENSOR-usage_id_in_hex_lowercase */
.name = "HID-SENSOR-200083",
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(platform, hid_magn_3d_ids);
diff --git a/drivers/iio/magnetometer/hmc5843.h b/drivers/iio/magnetometer/hmc5843.h
index ffd669b1ee7c..7a3faf7ffed4 100644
--- a/drivers/iio/magnetometer/hmc5843.h
+++ b/drivers/iio/magnetometer/hmc5843.h
@@ -34,7 +34,7 @@ enum hmc5843_ids {
* @regmap: hardware access register maps
* @variant: describe chip variants
* @scan: buffer to pack data for passing to
- * iio_push_to_buffers_with_timestamp()
+ * iio_push_to_buffers_with_ts()
*/
struct hmc5843_data {
struct device *dev;
diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
index 2fc84310e2cc..fc16ebd314f7 100644
--- a/drivers/iio/magnetometer/hmc5843_core.c
+++ b/drivers/iio/magnetometer/hmc5843_core.c
@@ -452,8 +452,8 @@ static irqreturn_t hmc5843_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c
index 657a309e2bd5..b41709959e2b 100644
--- a/drivers/iio/magnetometer/hmc5843_i2c.c
+++ b/drivers/iio/magnetometer/hmc5843_i2c.c
@@ -84,7 +84,7 @@ static const struct of_device_id hmc5843_of_match[] = {
{ .compatible = "honeywell,hmc5883", .data = (void *)HMC5883_ID },
{ .compatible = "honeywell,hmc5883l", .data = (void *)HMC5883L_ID },
{ .compatible = "honeywell,hmc5983", .data = (void *)HMC5983_ID },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, hmc5843_of_match);
diff --git a/drivers/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c
index b7fde331069d..6a55c1559b0d 100644
--- a/drivers/iio/magnetometer/hmc5843_spi.c
+++ b/drivers/iio/magnetometer/hmc5843_spi.c
@@ -60,7 +60,6 @@ static int hmc5843_spi_probe(struct spi_device *spi)
spi->mode = SPI_MODE_3;
spi->max_speed_hz = 8000000;
- spi->bits_per_word = 8;
ret = spi_setup(spi);
if (ret)
return ret;
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 2fe8e97f2cf8..ff09250a06e7 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -9,6 +9,7 @@
* TODO: irq, user offset, oversampling, continuous mode
*/
+#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
@@ -102,17 +103,12 @@ static int mag3110_read(struct mag3110_data *data, __be16 buf[3])
{
int ret;
- mutex_lock(&data->lock);
+ guard(mutex)(&data->lock);
ret = mag3110_request(data);
- if (ret < 0) {
- mutex_unlock(&data->lock);
+ if (ret < 0)
return ret;
- }
- ret = i2c_smbus_read_i2c_block_data(data->client,
- MAG3110_OUT_X, 3 * sizeof(__be16), (u8 *) buf);
- mutex_unlock(&data->lock);
-
- return ret;
+ return i2c_smbus_read_i2c_block_data(data->client, MAG3110_OUT_X,
+ 3 * sizeof(__be16), (u8 *) buf);
}
static ssize_t mag3110_show_int_plus_micros(char *buf,
@@ -231,19 +227,17 @@ static int mag3110_change_config(struct mag3110_data *data, u8 reg, u8 val)
int ret;
int is_active;
- mutex_lock(&data->lock);
+ guard(mutex)(&data->lock);
is_active = mag3110_is_active(data);
- if (is_active < 0) {
- ret = is_active;
- goto fail;
- }
+ if (is_active < 0)
+ return is_active;
/* config can only be changed when in standby */
if (is_active > 0) {
ret = mag3110_standby(data);
if (ret < 0)
- goto fail;
+ return ret;
}
/*
@@ -252,23 +246,52 @@ static int mag3110_change_config(struct mag3110_data *data, u8 reg, u8 val)
*/
ret = mag3110_wait_standby(data);
if (ret < 0)
- goto fail;
+ return ret;
ret = i2c_smbus_write_byte_data(data->client, reg, val);
if (ret < 0)
- goto fail;
+ return ret;
if (is_active > 0) {
ret = mag3110_active(data);
if (ret < 0)
- goto fail;
+ return ret;
}
- ret = 0;
-fail:
- mutex_unlock(&data->lock);
+ return 0;
+}
- return ret;
+static int __mag3110_read_info_raw(struct mag3110_data *data,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ __be16 buffer[3];
+ int ret;
+
+ switch (chan->type) {
+ case IIO_MAGN: /* in 0.1 uT / LSB */
+ ret = mag3110_read(data, buffer);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(be16_to_cpu(buffer[chan->scan_index]),
+ chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+
+ case IIO_TEMP: { /* in 1 C / LSB */
+ guard(mutex)(&data->lock);
+ ret = mag3110_request(data);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_read_byte_data(data->client,
+ MAG3110_DIE_TEMP);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(ret, chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+ }
+ default:
+ return -EINVAL;
+ }
}
static int mag3110_read_raw(struct iio_dev *indio_dev,
@@ -276,46 +299,14 @@ static int mag3110_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct mag3110_data *data = iio_priv(indio_dev);
- __be16 buffer[3];
int i, ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- switch (chan->type) {
- case IIO_MAGN: /* in 0.1 uT / LSB */
- ret = mag3110_read(data, buffer);
- if (ret < 0)
- goto release;
- *val = sign_extend32(
- be16_to_cpu(buffer[chan->scan_index]),
- chan->scan_type.realbits - 1);
- ret = IIO_VAL_INT;
- break;
- case IIO_TEMP: /* in 1 C / LSB */
- mutex_lock(&data->lock);
- ret = mag3110_request(data);
- if (ret < 0) {
- mutex_unlock(&data->lock);
- goto release;
- }
- ret = i2c_smbus_read_byte_data(data->client,
- MAG3110_DIE_TEMP);
- mutex_unlock(&data->lock);
- if (ret < 0)
- goto release;
- *val = sign_extend32(ret,
- chan->scan_type.realbits - 1);
- ret = IIO_VAL_INT;
- break;
- default:
- ret = -EINVAL;
- }
-release:
- iio_device_release_direct_mode(indio_dev);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = __mag3110_read_info_raw(data, chan, val);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SCALE:
@@ -346,24 +337,18 @@ release:
return -EINVAL;
}
-static int mag3110_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
+static int __mag3110_write_raw(struct mag3110_data *data,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
{
- struct mag3110_data *data = iio_priv(indio_dev);
- int rate, ret;
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ int rate;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
rate = mag3110_get_samp_freq_index(data, val, val2);
- if (rate < 0) {
- ret = -EINVAL;
- break;
- }
+ if (rate < 0)
+ return -EINVAL;
+
data->ctrl_reg1 &= 0xff & ~MAG3110_CTRL_DR_MASK
& ~MAG3110_CTRL_AC;
data->ctrl_reg1 |= rate << MAG3110_CTRL_DR_SHIFT;
@@ -371,22 +356,32 @@ static int mag3110_write_raw(struct iio_dev *indio_dev,
if (data->sleep_val < 40)
data->ctrl_reg1 |= MAG3110_CTRL_AC;
- ret = mag3110_change_config(data, MAG3110_CTRL_REG1,
- data->ctrl_reg1);
- break;
+ return mag3110_change_config(data, MAG3110_CTRL_REG1,
+ data->ctrl_reg1);
+
case IIO_CHAN_INFO_CALIBBIAS:
- if (val < -10000 || val > 10000) {
- ret = -EINVAL;
- break;
- }
- ret = i2c_smbus_write_word_swapped(data->client,
+ if (val < -10000 || val > 10000)
+ return -EINVAL;
+
+ return i2c_smbus_write_word_swapped(data->client,
MAG3110_OFF_X + 2 * chan->scan_index, val << 1);
- break;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- iio_device_release_direct_mode(indio_dev);
+}
+
+static int mag3110_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct mag3110_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = __mag3110_write_raw(data, chan, val, val2, mask);
+ iio_device_release_direct(indio_dev);
+
return ret;
}
@@ -409,8 +404,8 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p)
data->scan.temperature = ret;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index dd480a4a5f98..e08a57cd6de2 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -556,13 +556,13 @@ MODULE_DEVICE_TABLE(of, mmc35240_of_match);
static const struct acpi_device_id mmc35240_acpi_match[] = {
{"MMC35240", 0},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, mmc35240_acpi_match);
static const struct i2c_device_id mmc35240_id[] = {
{ "mmc35240" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, mmc35240_id);
diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
index c99694a77a14..2b2884425746 100644
--- a/drivers/iio/magnetometer/rm3100-core.c
+++ b/drivers/iio/magnetometer/rm3100-core.c
@@ -399,12 +399,11 @@ static int rm3100_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret < 0)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = rm3100_read_mag(data, chan->scan_index, val);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SCALE:
@@ -516,8 +515,8 @@ static irqreturn_t rm3100_trigger_handler(int irq, void *p)
* Always using the same buffer so that we wouldn't need to set the
* paddings to 0 in case of leaking any data.
*/
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, data->buffer, sizeof(data->buffer),
+ pf->timestamp);
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/magnetometer/rm3100-spi.c b/drivers/iio/magnetometer/rm3100-spi.c
index dd6d48043740..2f60a41c07f7 100644
--- a/drivers/iio/magnetometer/rm3100-spi.c
+++ b/drivers/iio/magnetometer/rm3100-spi.c
@@ -32,7 +32,6 @@ static int rm3100_probe(struct spi_device *spi)
spi->mode = SPI_MODE_0;
/* Data rates cannot exceed 1Mbits. */
spi->max_speed_hz = 1000000;
- spi->bits_per_word = 8;
ret = spi_setup(spi);
if (ret)
return ret;
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
index 1672b274768d..ed70e782af5e 100644
--- a/drivers/iio/magnetometer/st_magn_i2c.c
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -54,7 +54,7 @@ static const struct of_device_id st_magn_of_match[] = {
.compatible = "st,lsm303c-magn",
.data = LSM303C_MAGN_DEV_NAME,
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, st_magn_of_match);
@@ -102,7 +102,7 @@ static const struct i2c_device_id st_magn_id_table[] = {
{ LSM9DS1_MAGN_DEV_NAME },
{ IIS2MDC_MAGN_DEV_NAME },
{ LSM303C_MAGN_DEV_NAME },
- {},
+ { }
};
MODULE_DEVICE_TABLE(i2c, st_magn_id_table);
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index fe4d0e63133c..68816362bb95 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -49,7 +49,7 @@ static const struct of_device_id st_magn_of_match[] = {
.compatible = "st,lsm303c-magn",
.data = LSM303C_MAGN_DEV_NAME,
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, st_magn_of_match);
@@ -94,7 +94,7 @@ static const struct spi_device_id st_magn_id_table[] = {
{ LSM9DS1_MAGN_DEV_NAME },
{ IIS2MDC_MAGN_DEV_NAME },
{ LSM303C_MAGN_DEV_NAME },
- {},
+ { }
};
MODULE_DEVICE_TABLE(spi, st_magn_id_table);
diff --git a/drivers/iio/magnetometer/tmag5273.c b/drivers/iio/magnetometer/tmag5273.c
index 4187abe12784..2ca5c26f0091 100644
--- a/drivers/iio/magnetometer/tmag5273.c
+++ b/drivers/iio/magnetometer/tmag5273.c
@@ -712,13 +712,13 @@ static DEFINE_RUNTIME_DEV_PM_OPS(tmag5273_pm_ops,
static const struct i2c_device_id tmag5273_id[] = {
{ "tmag5273" },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(i2c, tmag5273_id);
static const struct of_device_id tmag5273_of_match[] = {
{ .compatible = "ti,tmag5273" },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, tmag5273_of_match);
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
index 28012b20c64f..340607111d9a 100644
--- a/drivers/iio/magnetometer/yamaha-yas530.c
+++ b/drivers/iio/magnetometer/yamaha-yas530.c
@@ -674,8 +674,8 @@ static void yas5xx_fill_buffer(struct iio_dev *indio_dev)
yas5xx->scan.channels[1] = x;
yas5xx->scan.channels[2] = y;
yas5xx->scan.channels[3] = z;
- iio_push_to_buffers_with_timestamp(indio_dev, &yas5xx->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &yas5xx->scan, sizeof(yas5xx->scan),
+ iio_get_time_ns(indio_dev));
}
static irqreturn_t yas5xx_handle_trigger(int irq, void *p)
@@ -1585,7 +1585,7 @@ static const struct i2c_device_id yas5xx_id[] = {
{"yas532", (kernel_ulong_t)&yas5xx_chip_info_tbl[yas532] },
{"yas533", (kernel_ulong_t)&yas5xx_chip_info_tbl[yas533] },
{"yas537", (kernel_ulong_t)&yas5xx_chip_info_tbl[yas537] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, yas5xx_id);
@@ -1594,7 +1594,7 @@ static const struct of_device_id yas5xx_of_match[] = {
{ .compatible = "yamaha,yas532", &yas5xx_chip_info_tbl[yas532] },
{ .compatible = "yamaha,yas533", &yas5xx_chip_info_tbl[yas533] },
{ .compatible = "yamaha,yas537", &yas5xx_chip_info_tbl[yas537] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, yas5xx_of_match);
diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c
index c309d991490c..b742ca9a99d1 100644
--- a/drivers/iio/multiplexer/iio-mux.c
+++ b/drivers/iio/multiplexer/iio-mux.c
@@ -448,7 +448,7 @@ static int mux_probe(struct platform_device *pdev)
static const struct of_device_id mux_match[] = {
{ .compatible = "io-channel-mux" },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, mux_match);
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
index 429035b65c65..4e23a598a3fb 100644
--- a/drivers/iio/orientation/hid-sensor-incl-3d.c
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -399,7 +399,7 @@ static const struct platform_device_id hid_incl_3d_ids[] = {
/* Format: HID-SENSOR-usage_id_in_hex_lowercase */
.name = "HID-SENSOR-200086",
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(platform, hid_incl_3d_ids);
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index 96f03988640c..e759f91a710a 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -19,7 +19,7 @@ struct dev_rot_state {
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info quaternion;
struct {
- s32 sampled_vals[4] __aligned(16);
+ s32 sampled_vals[4];
aligned_s64 timestamp;
} scan;
int scale_pre_decml;
@@ -351,7 +351,7 @@ static const struct platform_device_id hid_dev_rot_ids[] = {
/* Geomagnetic orientation(AM) sensor */
.name = "HID-SENSOR-2000c1",
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(platform, hid_dev_rot_ids);
diff --git a/drivers/iio/position/hid-sensor-custom-intel-hinge.c b/drivers/iio/position/hid-sensor-custom-intel-hinge.c
index 423bbb8a3b38..bff7039690ac 100644
--- a/drivers/iio/position/hid-sensor-custom-intel-hinge.c
+++ b/drivers/iio/position/hid-sensor-custom-intel-hinge.c
@@ -358,7 +358,7 @@ static const struct platform_device_id hid_hinge_ids[] = {
/* Format: HID-SENSOR-INT-usage_id_in_hex_lowercase */
.name = "HID-SENSOR-INT-020b",
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(platform, hid_hinge_ids);
diff --git a/drivers/iio/potentiometer/ad5272.c b/drivers/iio/potentiometer/ad5272.c
index b17941e4c2f7..672b1ca3a920 100644
--- a/drivers/iio/potentiometer/ad5272.c
+++ b/drivers/iio/potentiometer/ad5272.c
@@ -199,7 +199,7 @@ static const struct of_device_id ad5272_dt_ids[] = {
{ .compatible = "adi,ad5272-100", .data = (void *)AD5272_100 },
{ .compatible = "adi,ad5274-020", .data = (void *)AD5274_020 },
{ .compatible = "adi,ad5274-100", .data = (void *)AD5274_100 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ad5272_dt_ids);
@@ -209,7 +209,7 @@ static const struct i2c_device_id ad5272_id[] = {
{ "ad5272-100", AD5272_100 },
{ "ad5274-020", AD5274_020 },
{ "ad5274-100", AD5274_100 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ad5272_id);
diff --git a/drivers/iio/potentiometer/ds1803.c b/drivers/iio/potentiometer/ds1803.c
index e0526dd0e3cb..5761f69c538a 100644
--- a/drivers/iio/potentiometer/ds1803.c
+++ b/drivers/iio/potentiometer/ds1803.c
@@ -231,7 +231,7 @@ static const struct of_device_id ds1803_dt_ids[] = {
{ .compatible = "maxim,ds1803-050", .data = &ds1803_cfg[DS1803_050] },
{ .compatible = "maxim,ds1803-100", .data = &ds1803_cfg[DS1803_100] },
{ .compatible = "maxim,ds3502", .data = &ds1803_cfg[DS3502] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ds1803_dt_ids);
@@ -240,7 +240,7 @@ static const struct i2c_device_id ds1803_id[] = {
{ "ds1803-050", (kernel_ulong_t)&ds1803_cfg[DS1803_050] },
{ "ds1803-100", (kernel_ulong_t)&ds1803_cfg[DS1803_100] },
{ "ds3502", (kernel_ulong_t)&ds1803_cfg[DS3502] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ds1803_id);
diff --git a/drivers/iio/potentiometer/max5432.c b/drivers/iio/potentiometer/max5432.c
index c8e2481dadb5..26390be79d02 100644
--- a/drivers/iio/potentiometer/max5432.c
+++ b/drivers/iio/potentiometer/max5432.c
@@ -114,7 +114,7 @@ static const struct of_device_id max5432_dt_ids[] = {
{ .compatible = "maxim,max5433", .data = (void *)MAX5432_OHM_100K },
{ .compatible = "maxim,max5434", .data = (void *)MAX5432_OHM_50K },
{ .compatible = "maxim,max5435", .data = (void *)MAX5432_OHM_100K },
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, max5432_dt_ids);
diff --git a/drivers/iio/potentiometer/max5487.c b/drivers/iio/potentiometer/max5487.c
index 4838d2e72f53..3b11b991940b 100644
--- a/drivers/iio/potentiometer/max5487.c
+++ b/drivers/iio/potentiometer/max5487.c
@@ -137,7 +137,7 @@ static const struct acpi_device_id max5487_acpi_match[] = {
{ "MAX5487", 10 },
{ "MAX5488", 50 },
{ "MAX5489", 100 },
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, max5487_acpi_match);
diff --git a/drivers/iio/potentiometer/mcp4018.c b/drivers/iio/potentiometer/mcp4018.c
index 44678d372126..a88bb2231850 100644
--- a/drivers/iio/potentiometer/mcp4018.c
+++ b/drivers/iio/potentiometer/mcp4018.c
@@ -117,7 +117,7 @@ static const struct i2c_device_id mcp4018_id[] = {
MCP4018_ID_TABLE("mcp4019-103", MCP4018_103),
MCP4018_ID_TABLE("mcp4019-503", MCP4018_503),
MCP4018_ID_TABLE("mcp4019-104", MCP4018_104),
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(i2c, mcp4018_id);
@@ -139,7 +139,7 @@ static const struct of_device_id mcp4018_of_match[] = {
MCP4018_COMPATIBLE("microchip,mcp4019-103", MCP4018_103),
MCP4018_COMPATIBLE("microchip,mcp4019-503", MCP4018_503),
MCP4018_COMPATIBLE("microchip,mcp4019-104", MCP4018_104),
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, mcp4018_of_match);
diff --git a/drivers/iio/potentiometer/mcp41010.c b/drivers/iio/potentiometer/mcp41010.c
index 2b73c7540209..f35fc4a6c55b 100644
--- a/drivers/iio/potentiometer/mcp41010.c
+++ b/drivers/iio/potentiometer/mcp41010.c
@@ -171,7 +171,7 @@ static const struct of_device_id mcp41010_match[] = {
{ .compatible = "microchip,mcp42010", .data = &mcp41010_cfg[MCP42010] },
{ .compatible = "microchip,mcp42050", .data = &mcp41010_cfg[MCP42050] },
{ .compatible = "microchip,mcp42100", .data = &mcp41010_cfg[MCP42100] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, mcp41010_match);
@@ -182,7 +182,7 @@ static const struct spi_device_id mcp41010_id[] = {
{ "mcp42010", MCP42010 },
{ "mcp42050", MCP42050 },
{ "mcp42100", MCP42100 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, mcp41010_id);
diff --git a/drivers/iio/potentiometer/mcp4131.c b/drivers/iio/potentiometer/mcp4131.c
index 7890c0993ec4..9082b559d029 100644
--- a/drivers/iio/potentiometer/mcp4131.c
+++ b/drivers/iio/potentiometer/mcp4131.c
@@ -403,7 +403,7 @@ static const struct of_device_id mcp4131_dt_ids[] = {
.data = &mcp4131_cfg[MCP426x_503] },
{ .compatible = "microchip,mcp4262-104",
.data = &mcp4131_cfg[MCP426x_104] },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, mcp4131_dt_ids);
@@ -472,7 +472,7 @@ static const struct spi_device_id mcp4131_id[] = {
{ "mcp4262-103", MCP426x_103 },
{ "mcp4262-503", MCP426x_503 },
{ "mcp4262-104", MCP426x_104 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, mcp4131_id);
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index f28880ebd758..9912e91ff7b4 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -276,7 +276,7 @@ static const struct i2c_device_id mcp4531_id[] = {
MCP4531_ID_TABLE("mcp4662-103", MCP466x_103),
MCP4531_ID_TABLE("mcp4662-503", MCP466x_503),
MCP4531_ID_TABLE("mcp4662-104", MCP466x_104),
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(i2c, mcp4531_id);
@@ -350,7 +350,7 @@ static const struct of_device_id mcp4531_of_match[] = {
MCP4531_COMPATIBLE("microchip,mcp4662-103", MCP466x_103),
MCP4531_COMPATIBLE("microchip,mcp4662-503", MCP466x_503),
MCP4531_COMPATIBLE("microchip,mcp4662-104", MCP466x_104),
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(of, mcp4531_of_match);
diff --git a/drivers/iio/potentiometer/tpl0102.c b/drivers/iio/potentiometer/tpl0102.c
index 8923ccb0fc4f..a42b57733363 100644
--- a/drivers/iio/potentiometer/tpl0102.c
+++ b/drivers/iio/potentiometer/tpl0102.c
@@ -153,7 +153,7 @@ static const struct i2c_device_id tpl0102_id[] = {
{ "cat5140-104", CAT5140_104 },
{ "tpl0102-104", TPL0102_104 },
{ "tpl0401-103", TPL0401_103 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, tpl0102_id);
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index c2c6b2b29867..030498d0b763 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -400,14 +400,14 @@ static void lmp91000_remove(struct i2c_client *client)
static const struct of_device_id lmp91000_of_match[] = {
{ .compatible = "ti,lmp91000", },
{ .compatible = "ti,lmp91002", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, lmp91000_of_match);
static const struct i2c_device_id lmp91000_id[] = {
{ "lmp91000" },
{ "lmp91002" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, lmp91000_id);
diff --git a/drivers/iio/pressure/abp060mg.c b/drivers/iio/pressure/abp060mg.c
index 752a63c06b44..a0d956c3e254 100644
--- a/drivers/iio/pressure/abp060mg.c
+++ b/drivers/iio/pressure/abp060mg.c
@@ -247,7 +247,7 @@ static const struct i2c_device_id abp060mg_id_table[] = {
{ "abp015pd", ABP015PD },
{ "abp030pd", ABP030PD },
{ "abp060pd", ABP060PD },
- { /* empty */ },
+ { }
};
MODULE_DEVICE_TABLE(i2c, abp060mg_id_table);
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index d44ab65c94cb..f37f20776c89 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -46,6 +46,7 @@
#include <linux/random.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/types.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
@@ -1105,9 +1106,13 @@ static irqreturn_t bmp280_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bmp280_data *data = iio_priv(indio_dev);
- u32 adc_temp, adc_press, comp_press;
- s32 t_fine, comp_temp;
- s32 *chans = (s32 *)data->sensor_data;
+ u32 adc_temp, adc_press;
+ s32 t_fine;
+ struct {
+ u32 comp_press;
+ s32 comp_temp;
+ aligned_s64 timestamp;
+ } buffer;
int ret;
guard(mutex)(&data->lock);
@@ -1127,7 +1132,7 @@ static irqreturn_t bmp280_trigger_handler(int irq, void *p)
goto out;
}
- comp_temp = bmp280_compensate_temp(data, adc_temp);
+ buffer.comp_temp = bmp280_compensate_temp(data, adc_temp);
/* Pressure calculations */
adc_press = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(&data->buf[0]));
@@ -1137,13 +1142,10 @@ static irqreturn_t bmp280_trigger_handler(int irq, void *p)
}
t_fine = bmp280_calc_t_fine(data, adc_temp);
- comp_press = bmp280_compensate_press(data, adc_press, t_fine);
-
- chans[0] = comp_press;
- chans[1] = comp_temp;
+ buffer.comp_press = bmp280_compensate_press(data, adc_press, t_fine);
- iio_push_to_buffers_with_timestamp(indio_dev, data->sensor_data,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &buffer, sizeof(buffer),
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
@@ -1225,11 +1227,19 @@ static irqreturn_t bme280_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bmp280_data *data = iio_priv(indio_dev);
- u32 adc_temp, adc_press, adc_humidity, comp_press, comp_humidity;
- s32 t_fine, comp_temp;
- s32 *chans = (s32 *)data->sensor_data;
+ u32 adc_temp, adc_press, adc_humidity;
+ s32 t_fine;
+ struct {
+ u32 comp_press;
+ s32 comp_temp;
+ u32 comp_humidity;
+ aligned_s64 timestamp;
+ } buffer;
int ret;
+ /* Don't leak uninitialized stack to userspace. */
+ memset(&buffer, 0, sizeof(buffer));
+
guard(mutex)(&data->lock);
/* Burst read data registers */
@@ -1247,7 +1257,7 @@ static irqreturn_t bme280_trigger_handler(int irq, void *p)
goto out;
}
- comp_temp = bmp280_compensate_temp(data, adc_temp);
+ buffer.comp_temp = bmp280_compensate_temp(data, adc_temp);
/* Pressure calculations */
adc_press = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(&data->buf[0]));
@@ -1257,7 +1267,7 @@ static irqreturn_t bme280_trigger_handler(int irq, void *p)
}
t_fine = bmp280_calc_t_fine(data, adc_temp);
- comp_press = bmp280_compensate_press(data, adc_press, t_fine);
+ buffer.comp_press = bmp280_compensate_press(data, adc_press, t_fine);
/* Humidity calculations */
adc_humidity = get_unaligned_be16(&data->buf[6]);
@@ -1267,14 +1277,11 @@ static irqreturn_t bme280_trigger_handler(int irq, void *p)
goto out;
}
- comp_humidity = bme280_compensate_humidity(data, adc_humidity, t_fine);
-
- chans[0] = comp_press;
- chans[1] = comp_temp;
- chans[2] = comp_humidity;
+ buffer.comp_humidity = bme280_compensate_humidity(data, adc_humidity,
+ t_fine);
- iio_push_to_buffers_with_timestamp(indio_dev, data->sensor_data,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &buffer, sizeof(buffer),
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
@@ -1899,9 +1906,13 @@ static irqreturn_t bmp380_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bmp280_data *data = iio_priv(indio_dev);
- u32 adc_temp, adc_press, comp_press;
- s32 t_fine, comp_temp;
- s32 *chans = (s32 *)data->sensor_data;
+ u32 adc_temp, adc_press;
+ s32 t_fine;
+ struct {
+ u32 comp_press;
+ s32 comp_temp;
+ aligned_s64 timestamp;
+ } buffer;
int ret;
guard(mutex)(&data->lock);
@@ -1921,7 +1932,7 @@ static irqreturn_t bmp380_trigger_handler(int irq, void *p)
goto out;
}
- comp_temp = bmp380_compensate_temp(data, adc_temp);
+ buffer.comp_temp = bmp380_compensate_temp(data, adc_temp);
/* Pressure calculations */
adc_press = get_unaligned_le24(&data->buf[0]);
@@ -1931,13 +1942,10 @@ static irqreturn_t bmp380_trigger_handler(int irq, void *p)
}
t_fine = bmp380_calc_t_fine(data, adc_temp);
- comp_press = bmp380_compensate_press(data, adc_press, t_fine);
+ buffer.comp_press = bmp380_compensate_press(data, adc_press, t_fine);
- chans[0] = comp_press;
- chans[1] = comp_temp;
-
- iio_push_to_buffers_with_timestamp(indio_dev, data->sensor_data,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &buffer, sizeof(buffer),
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
@@ -2608,7 +2616,12 @@ static irqreturn_t bmp580_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bmp280_data *data = iio_priv(indio_dev);
- int ret, offset;
+ struct {
+ __le32 comp_temp;
+ __le32 comp_press;
+ aligned_s64 timestamp;
+ } buffer;
+ int ret;
guard(mutex)(&data->lock);
@@ -2620,18 +2633,14 @@ static irqreturn_t bmp580_trigger_handler(int irq, void *p)
goto out;
}
- offset = 0;
-
/* Pressure calculations */
- memcpy(&data->sensor_data[offset], &data->buf[3], 3);
-
- offset += sizeof(s32);
+ memcpy(&buffer.comp_press, &data->buf[3], 3);
/* Temperature calculations */
- memcpy(&data->sensor_data[offset], &data->buf[0], 3);
+ memcpy(&buffer.comp_temp, &data->buf[0], 3);
- iio_push_to_buffers_with_timestamp(indio_dev, data->sensor_data,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &buffer, sizeof(buffer),
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
@@ -2952,25 +2961,26 @@ static irqreturn_t bmp180_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bmp280_data *data = iio_priv(indio_dev);
- int ret, comp_temp, comp_press;
- s32 *chans = (s32 *)data->sensor_data;
+ struct {
+ u32 comp_press;
+ s32 comp_temp;
+ aligned_s64 timestamp;
+ } buffer;
+ int ret;
guard(mutex)(&data->lock);
- ret = bmp180_read_temp(data, &comp_temp);
+ ret = bmp180_read_temp(data, &buffer.comp_temp);
if (ret)
goto out;
- ret = bmp180_read_press(data, &comp_press);
+ ret = bmp180_read_press(data, &buffer.comp_press);
if (ret)
goto out;
- chans[0] = comp_press;
- chans[1] = comp_temp;
-
- iio_push_to_buffers_with_timestamp(indio_dev, data->sensor_data,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &buffer, sizeof(buffer),
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
index 868e1b2ec711..8e459b6c97ff 100644
--- a/drivers/iio/pressure/bmp280-i2c.c
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -33,7 +33,7 @@ static const struct of_device_id bmp280_of_i2c_match[] = {
{ .compatible = "bosch,bme280", .data = &bme280_chip_info },
{ .compatible = "bosch,bmp380", .data = &bmp380_chip_info },
{ .compatible = "bosch,bmp580", .data = &bmp580_chip_info },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, bmp280_of_i2c_match);
@@ -44,7 +44,7 @@ static const struct i2c_device_id bmp280_i2c_id[] = {
{"bme280", (kernel_ulong_t)&bme280_chip_info },
{"bmp380", (kernel_ulong_t)&bmp380_chip_info },
{"bmp580", (kernel_ulong_t)&bmp580_chip_info },
- { },
+ { }
};
MODULE_DEVICE_TABLE(i2c, bmp280_i2c_id);
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index 0e6e27892f99..3b90384f17d7 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -81,14 +81,6 @@ static int bmp280_spi_probe(struct spi_device *spi)
const struct bmp280_chip_info *chip_info;
struct regmap_bus const *bmp_regmap_bus;
struct regmap *regmap;
- int ret;
-
- spi->bits_per_word = 8;
- ret = spi_setup(spi);
- if (ret < 0) {
- dev_err(&spi->dev, "spi_setup failed!\n");
- return ret;
- }
chip_info = spi_get_device_match_data(spi);
@@ -121,7 +113,7 @@ static const struct of_device_id bmp280_of_spi_match[] = {
{ .compatible = "bosch,bme280", .data = &bme280_chip_info },
{ .compatible = "bosch,bmp380", .data = &bmp380_chip_info },
{ .compatible = "bosch,bmp580", .data = &bmp580_chip_info },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, bmp280_of_spi_match);
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index 5b2ee1d0ee46..25bb9c743a05 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -349,7 +349,6 @@
BMP280_NUM_TEMP_BYTES + \
BME280_NUM_HUMIDITY_BYTES)
-#define BME280_NUM_MAX_CHANNELS 3
/* Core exported structs */
static const char *const bmp280_supply_names[] = {
@@ -452,13 +451,6 @@ struct bmp280_data {
*/
int sampling_freq;
- /*
- * Data to push to userspace triggered buffer. Up to 3 channels and
- * s64 timestamp, aligned.
- */
- u8 sensor_data[ALIGN(sizeof(s32) * BME280_NUM_MAX_CHANNELS, sizeof(s64))
- + sizeof(s64)] __aligned(sizeof(s64));
-
/* Value to hold the current operation mode of the device */
enum bmp280_op_mode op_mode;
diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c
index 2649c2f89e89..c6b950c596c1 100644
--- a/drivers/iio/pressure/cros_ec_baro.c
+++ b/drivers/iio/pressure/cros_ec_baro.c
@@ -192,7 +192,7 @@ static const struct platform_device_id cros_ec_baro_ids[] = {
{
.name = "cros-ec-baro",
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(platform, cros_ec_baro_ids);
diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c
index e99e97ea6300..48afe5c94000 100644
--- a/drivers/iio/pressure/dlhl60d.c
+++ b/drivers/iio/pressure/dlhl60d.c
@@ -147,12 +147,11 @@ static int dlh_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = dlh_read_direct(st, &pressure, &temperature);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret)
return ret;
@@ -344,14 +343,14 @@ static int dlh_probe(struct i2c_client *client)
static const struct of_device_id dlh_of_match[] = {
{ .compatible = "asc,dlhl60d" },
{ .compatible = "asc,dlhl60g" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, dlh_of_match);
static const struct i2c_device_id dlh_id[] = {
{ "dlhl60d", dlhl60d },
{ "dlhl60g", dlhl60g },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, dlh_id);
diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c
index c6f44f0f4d2e..8edaa4d10a70 100644
--- a/drivers/iio/pressure/dps310.c
+++ b/drivers/iio/pressure/dps310.c
@@ -888,13 +888,13 @@ static int dps310_probe(struct i2c_client *client)
static const struct i2c_device_id dps310_id[] = {
{ DPS310_DEV_NAME },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, dps310_id);
static const struct acpi_device_id dps310_acpi_match[] = {
{ "IFX3100" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(acpi, dps310_acpi_match);
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index f7273d30c5f0..5f1d6abda3e4 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -176,8 +176,9 @@ static int press_proc_event(struct hid_sensor_hub_device *hsdev,
if (!press_state->timestamp)
press_state->timestamp = iio_get_time_ns(indio_dev);
- iio_push_to_buffers_with_timestamp(
- indio_dev, &press_state->scan, press_state->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &press_state->scan,
+ sizeof(press_state->scan),
+ press_state->timestamp);
}
return 0;
@@ -339,7 +340,7 @@ static const struct platform_device_id hid_press_ids[] = {
/* Format: HID-SENSOR-usage_id_in_hex_lowercase */
.name = "HID-SENSOR-200031",
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(platform, hid_press_ids);
diff --git a/drivers/iio/pressure/hp03.c b/drivers/iio/pressure/hp03.c
index 6f7a16787143..cbb4aaf45e2c 100644
--- a/drivers/iio/pressure/hp03.c
+++ b/drivers/iio/pressure/hp03.c
@@ -273,7 +273,7 @@ MODULE_DEVICE_TABLE(i2c, hp03_id);
static const struct of_device_id hp03_of_match[] = {
{ .compatible = "hoperf,hp03" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, hp03_of_match);
diff --git a/drivers/iio/pressure/hp206c.c b/drivers/iio/pressure/hp206c.c
index 442740941933..abe10ccb6770 100644
--- a/drivers/iio/pressure/hp206c.c
+++ b/drivers/iio/pressure/hp206c.c
@@ -396,13 +396,13 @@ static int hp206c_probe(struct i2c_client *client)
static const struct i2c_device_id hp206c_id[] = {
{"hp206c"},
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, hp206c_id);
static const struct acpi_device_id hp206c_acpi_match[] = {
{"HOP206C", 0},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, hp206c_acpi_match);
diff --git a/drivers/iio/pressure/hsc030pa.c b/drivers/iio/pressure/hsc030pa.c
index 168245818cfe..2d00c0656259 100644
--- a/drivers/iio/pressure/hsc030pa.c
+++ b/drivers/iio/pressure/hsc030pa.c
@@ -314,8 +314,8 @@ static irqreturn_t hsc_trigger_handler(int irq, void *private)
memcpy(&data->scan.chan[0], &data->buffer[0], 2);
memcpy(&data->scan.chan[1], &data->buffer[2], 2);
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ iio_get_time_ns(indio_dev));
error:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/pressure/hsc030pa_i2c.c b/drivers/iio/pressure/hsc030pa_i2c.c
index 7f2398aa8155..a34ef4653f34 100644
--- a/drivers/iio/pressure/hsc030pa_i2c.c
+++ b/drivers/iio/pressure/hsc030pa_i2c.c
@@ -48,13 +48,13 @@ static int hsc_i2c_probe(struct i2c_client *client)
static const struct of_device_id hsc_i2c_match[] = {
{ .compatible = "honeywell,hsc030pa" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, hsc_i2c_match);
static const struct i2c_device_id hsc_i2c_id[] = {
{ "hsc030pa" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, hsc_i2c_id);
diff --git a/drivers/iio/pressure/hsc030pa_spi.c b/drivers/iio/pressure/hsc030pa_spi.c
index 60768726e9ad..5d331b3b6da8 100644
--- a/drivers/iio/pressure/hsc030pa_spi.c
+++ b/drivers/iio/pressure/hsc030pa_spi.c
@@ -35,13 +35,13 @@ static int hsc_spi_probe(struct spi_device *spi)
static const struct of_device_id hsc_spi_match[] = {
{ .compatible = "honeywell,hsc030pa" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, hsc_spi_match);
static const struct spi_device_id hsc_spi_id[] = {
{ "hsc030pa" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, hsc_spi_id);
diff --git a/drivers/iio/pressure/icp10100.c b/drivers/iio/pressure/icp10100.c
index 3e0bf5d31ad7..1951c1cc84cf 100644
--- a/drivers/iio/pressure/icp10100.c
+++ b/drivers/iio/pressure/icp10100.c
@@ -343,9 +343,8 @@ static int icp10100_read_raw_measures(struct iio_dev *indio_dev,
uint32_t pressure_mPa;
int ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = icp10100_get_measures(st, &raw_pressure, &raw_temp);
if (ret)
@@ -370,7 +369,7 @@ static int icp10100_read_raw_measures(struct iio_dev *indio_dev,
}
error_release:
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
@@ -439,7 +438,6 @@ static int icp10100_write_raw(struct iio_dev *indio_dev,
{
struct icp10100_state *st = iio_priv(indio_dev);
unsigned int mode;
- int ret;
switch (mask) {
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
@@ -449,13 +447,12 @@ static int icp10100_write_raw(struct iio_dev *indio_dev,
mode = ilog2(val);
if (mode >= ICP10100_MODE_NB)
return -EINVAL;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&st->lock);
st->mode = mode;
mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return 0;
default:
return -EINVAL;
diff --git a/drivers/iio/pressure/mpl115_spi.c b/drivers/iio/pressure/mpl115_spi.c
index 888cfa666238..4e1d24beff94 100644
--- a/drivers/iio/pressure/mpl115_spi.c
+++ b/drivers/iio/pressure/mpl115_spi.c
@@ -85,7 +85,7 @@ static int mpl115_spi_probe(struct spi_device *spi)
static const struct spi_device_id mpl115_spi_ids[] = {
{ "mpl115", 0 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, mpl115_spi_ids);
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 71ded2eee060..d6715997f137 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -69,6 +69,52 @@ static int mpl3115_request(struct mpl3115_data *data)
return 0;
}
+static int mpl3115_read_info_raw(struct mpl3115_data *data,
+ struct iio_chan_spec const *chan, int *val)
+{
+ int ret;
+
+ switch (chan->type) {
+ case IIO_PRESSURE: { /* in 0.25 pascal / LSB */
+ __be32 tmp = 0;
+
+ guard(mutex)(&data->lock);
+ ret = mpl3115_request(data);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_OUT_PRESS,
+ 3, (u8 *) &tmp);
+ if (ret < 0)
+ return ret;
+
+ *val = be32_to_cpu(tmp) >> chan->scan_type.shift;
+ return IIO_VAL_INT;
+ }
+ case IIO_TEMP: { /* in 0.0625 celsius / LSB */
+ __be16 tmp;
+
+ guard(mutex)(&data->lock);
+ ret = mpl3115_request(data);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_OUT_TEMP,
+ 2, (u8 *) &tmp);
+ if (ret < 0)
+ return ret;
+
+ *val = sign_extend32(be16_to_cpu(tmp) >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
static int mpl3115_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -78,54 +124,11 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- switch (chan->type) {
- case IIO_PRESSURE: { /* in 0.25 pascal / LSB */
- __be32 tmp = 0;
-
- mutex_lock(&data->lock);
- ret = mpl3115_request(data);
- if (ret < 0) {
- mutex_unlock(&data->lock);
- break;
- }
- ret = i2c_smbus_read_i2c_block_data(data->client,
- MPL3115_OUT_PRESS, 3, (u8 *) &tmp);
- mutex_unlock(&data->lock);
- if (ret < 0)
- break;
- *val = be32_to_cpu(tmp) >> chan->scan_type.shift;
- ret = IIO_VAL_INT;
- break;
- }
- case IIO_TEMP: { /* in 0.0625 celsius / LSB */
- __be16 tmp;
-
- mutex_lock(&data->lock);
- ret = mpl3115_request(data);
- if (ret < 0) {
- mutex_unlock(&data->lock);
- break;
- }
- ret = i2c_smbus_read_i2c_block_data(data->client,
- MPL3115_OUT_TEMP, 2, (u8 *) &tmp);
- mutex_unlock(&data->lock);
- if (ret < 0)
- break;
- *val = sign_extend32(be16_to_cpu(tmp) >> chan->scan_type.shift,
- chan->scan_type.realbits - 1);
- ret = IIO_VAL_INT;
- break;
- }
- default:
- ret = -EINVAL;
- break;
- }
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- iio_device_release_direct_mode(indio_dev);
+ ret = mpl3115_read_info_raw(data, chan, val);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SCALE:
@@ -188,8 +191,8 @@ static irqreturn_t mpl3115_trigger_handler(int irq, void *p)
}
mutex_unlock(&data->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, buffer, sizeof(buffer),
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/pressure/mprls0025pa_i2c.c b/drivers/iio/pressure/mprls0025pa_i2c.c
index 48b23a4256ce..1a48f8d43d71 100644
--- a/drivers/iio/pressure/mprls0025pa_i2c.c
+++ b/drivers/iio/pressure/mprls0025pa_i2c.c
@@ -74,13 +74,13 @@ static int mpr_i2c_probe(struct i2c_client *client)
static const struct of_device_id mpr_i2c_match[] = {
{ .compatible = "honeywell,mprls0025pa" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, mpr_i2c_match);
static const struct i2c_device_id mpr_i2c_id[] = {
{ "mprls0025pa" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, mpr_i2c_id);
diff --git a/drivers/iio/pressure/mprls0025pa_spi.c b/drivers/iio/pressure/mprls0025pa_spi.c
index 09f724c76d70..d04102f8a4a0 100644
--- a/drivers/iio/pressure/mprls0025pa_spi.c
+++ b/drivers/iio/pressure/mprls0025pa_spi.c
@@ -66,13 +66,13 @@ static int mpr_spi_probe(struct spi_device *spi)
static const struct of_device_id mpr_spi_match[] = {
{ .compatible = "honeywell,mprls0025pa" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, mpr_spi_match);
static const struct spi_device_id mpr_spi_id[] = {
{ "mprls0025pa" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, mpr_spi_id);
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 00c077b2a2a4..bdac27bd5a5d 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -308,7 +308,6 @@ static int ms5611_write_raw(struct iio_dev *indio_dev,
{
struct ms5611_state *st = iio_priv(indio_dev);
const struct ms5611_osr *osr = NULL;
- int ret;
if (mask != IIO_CHAN_INFO_OVERSAMPLING_RATIO)
return -EINVAL;
@@ -322,9 +321,8 @@ static int ms5611_write_raw(struct iio_dev *indio_dev,
if (!osr)
return -EINVAL;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&st->lock);
@@ -334,7 +332,7 @@ static int ms5611_write_raw(struct iio_dev *indio_dev,
st->pressure_osr = osr;
mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return 0;
}
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index b5a91e885793..25c7bd2d8fdf 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -92,7 +92,6 @@ static int ms5611_spi_probe(struct spi_device *spi)
spi->mode = SPI_MODE_0;
spi->max_speed_hz = min(spi->max_speed_hz, 20000000U);
- spi->bits_per_word = 8;
ret = spi_setup(spi);
if (ret < 0)
return ret;
diff --git a/drivers/iio/pressure/ms5637.c b/drivers/iio/pressure/ms5637.c
index a1767a17fdce..59705a666979 100644
--- a/drivers/iio/pressure/ms5637.c
+++ b/drivers/iio/pressure/ms5637.c
@@ -219,7 +219,7 @@ static const struct i2c_device_id ms5637_id[] = {
{"ms5805", (kernel_ulong_t)&ms5805_data },
{"ms5837", (kernel_ulong_t)&ms5837_data },
{"ms8607-temppressure", (kernel_ulong_t)&ms8607_data },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ms5637_id);
@@ -229,7 +229,7 @@ static const struct of_device_id ms5637_of_match[] = {
{ .compatible = "meas,ms5805", .data = &ms5805_data },
{ .compatible = "meas,ms5837", .data = &ms5837_data },
{ .compatible = "meas,ms8607-temppressure", .data = &ms8607_data },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ms5637_of_match);
diff --git a/drivers/iio/pressure/rohm-bm1390.c b/drivers/iio/pressure/rohm-bm1390.c
index 9c1197f0e742..dac27fd359ad 100644
--- a/drivers/iio/pressure/rohm-bm1390.c
+++ b/drivers/iio/pressure/rohm-bm1390.c
@@ -319,12 +319,11 @@ static int bm1390_read_raw(struct iio_dev *idev,
return -EINVAL;
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(idev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(idev))
+ return -EBUSY;
ret = bm1390_read_data(data, chan, val, val2);
- iio_device_release_direct_mode(idev);
+ iio_device_release_direct(idev);
if (ret)
return ret;
@@ -653,7 +652,8 @@ static irqreturn_t bm1390_trigger_handler(int irq, void *p)
}
}
- iio_push_to_buffers_with_timestamp(idev, &data->buf, data->timestamp);
+ iio_push_to_buffers_with_ts(idev, &data->buf, sizeof(data->buf),
+ data->timestamp);
iio_trigger_notify_done(idev->trig);
return IRQ_HANDLED;
@@ -883,13 +883,13 @@ static int bm1390_probe(struct i2c_client *i2c)
static const struct of_device_id bm1390_of_match[] = {
{ .compatible = "rohm,bm1390glv-z" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, bm1390_of_match);
static const struct i2c_device_id bm1390_id[] = {
{ "bm1390glv-z", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, bm1390_id);
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index b7b66ddc3a73..0f50bac1fb4d 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -50,13 +50,13 @@ static const struct of_device_id st_press_of_match[] = {
.compatible = "st,lps22df",
.data = LPS22DF_PRESS_DEV_NAME,
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
static const struct acpi_device_id st_press_acpi_match[] = {
{"SNO9210", LPS22HB},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, st_press_acpi_match);
@@ -69,7 +69,7 @@ static const struct i2c_device_id st_press_id_table[] = {
{ LPS35HW_PRESS_DEV_NAME, LPS35HW },
{ LPS22HH_PRESS_DEV_NAME, LPS22HH },
{ LPS22DF_PRESS_DEV_NAME, LPS22DF },
- {},
+ { }
};
MODULE_DEVICE_TABLE(i2c, st_press_id_table);
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index 1a4bd1a0f787..39827e6841ca 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -55,7 +55,7 @@ static const struct of_device_id st_press_of_match[] = {
.compatible = "st,lps22df",
.data = LPS22DF_PRESS_DEV_NAME,
},
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
@@ -106,7 +106,7 @@ static const struct spi_device_id st_press_id_table[] = {
{ "lps25h-press", },
{ "lps331ap-press" },
{ "lps22hb-press" },
- {},
+ { }
};
MODULE_DEVICE_TABLE(spi, st_press_id_table);
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 9db1c94dfc18..1640aa3717ed 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -582,7 +582,7 @@ static int zpa2326_fill_sample_buffer(struct iio_dev *indio_dev,
struct {
u32 pressure;
u16 temperature;
- u64 timestamp;
+ aligned_s64 timestamp;
} sample;
int err;
@@ -618,8 +618,8 @@ static int zpa2326_fill_sample_buffer(struct iio_dev *indio_dev,
*/
zpa2326_dbg(indio_dev, "filling raw samples buffer");
- iio_push_to_buffers_with_timestamp(indio_dev, &sample,
- private->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &sample, sizeof(sample),
+ private->timestamp);
return 0;
}
@@ -1062,9 +1062,8 @@ static int zpa2326_sample_oneshot(struct iio_dev *indio_dev,
int ret;
struct zpa2326_private *priv;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = zpa2326_resume(indio_dev);
if (ret < 0)
@@ -1120,7 +1119,7 @@ static int zpa2326_sample_oneshot(struct iio_dev *indio_dev,
suspend:
zpa2326_suspend(indio_dev);
release:
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
@@ -1438,7 +1437,6 @@ static int zpa2326_set_frequency(struct iio_dev *indio_dev, int hz)
{
struct zpa2326_private *priv = iio_priv(indio_dev);
int freq;
- int err;
/* Check if requested frequency is supported. */
for (freq = 0; freq < ARRAY_SIZE(zpa2326_sampling_frequencies); freq++)
@@ -1448,13 +1446,12 @@ static int zpa2326_set_frequency(struct iio_dev *indio_dev, int hz)
return -EINVAL;
/* Don't allow changing frequency if buffered sampling is ongoing. */
- err = iio_device_claim_direct_mode(indio_dev);
- if (err)
- return err;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
priv->frequency = &zpa2326_sampling_frequencies[freq];
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return 0;
}
diff --git a/drivers/iio/pressure/zpa2326_spi.c b/drivers/iio/pressure/zpa2326_spi.c
index c678f5b96266..af756e2b0f31 100644
--- a/drivers/iio/pressure/zpa2326_spi.c
+++ b/drivers/iio/pressure/zpa2326_spi.c
@@ -47,7 +47,6 @@ static int zpa2326_probe_spi(struct spi_device *spi)
*/
spi->mode = SPI_MODE_3;
spi->max_speed_hz = min(spi->max_speed_hz, 1000000U);
- spi->bits_per_word = 8;
err = spi_setup(spi);
if (err < 0)
return err;
@@ -63,7 +62,7 @@ static void zpa2326_remove_spi(struct spi_device *spi)
static const struct spi_device_id zpa2326_spi_ids[] = {
{ "zpa2326", 0 },
- { },
+ { }
};
MODULE_DEVICE_TABLE(spi, zpa2326_spi_ids);
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 9d3caf2bef18..f1018b14aecf 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -231,8 +231,8 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
goto err_read;
st->scan.chan = val & AS3935_DATA_MASK;
- iio_push_to_buffers_with_timestamp(indio_dev, &st->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &st->scan, sizeof(st->scan),
+ iio_get_time_ns(indio_dev));
err_read:
iio_trigger_notify_done(indio_dev->trig);
@@ -444,13 +444,13 @@ static int as3935_probe(struct spi_device *spi)
static const struct of_device_id as3935_of_match[] = {
{ .compatible = "ams,as3935", },
- { /* sentinel */ },
+ { }
};
MODULE_DEVICE_TABLE(of, as3935_of_match);
static const struct spi_device_id as3935_id[] = {
{"as3935", 0},
- {},
+ { }
};
MODULE_DEVICE_TABLE(spi, as3935_id);
diff --git a/drivers/iio/proximity/cros_ec_mkbp_proximity.c b/drivers/iio/proximity/cros_ec_mkbp_proximity.c
index 667369be0555..1f9de7066ebf 100644
--- a/drivers/iio/proximity/cros_ec_mkbp_proximity.c
+++ b/drivers/iio/proximity/cros_ec_mkbp_proximity.c
@@ -59,16 +59,11 @@ static int cros_ec_mkbp_proximity_parse_state(const void *data)
static int cros_ec_mkbp_proximity_query(struct cros_ec_device *ec_dev,
int *state)
{
- struct {
- struct cros_ec_command msg;
- union {
- struct ec_params_mkbp_info params;
- u32 switches;
- };
- } __packed buf = { };
- struct ec_params_mkbp_info *params = &buf.params;
- struct cros_ec_command *msg = &buf.msg;
- u32 *switches = &buf.switches;
+ DEFINE_RAW_FLEX(struct cros_ec_command, buf, data,
+ MAX(sizeof(u32), sizeof(struct ec_params_mkbp_info)));
+ struct ec_params_mkbp_info *params = (struct ec_params_mkbp_info *)buf->data;
+ struct cros_ec_command *msg = buf;
+ u32 *switches = (u32 *)buf->data;
size_t insize = sizeof(*switches);
int ret;
@@ -250,7 +245,7 @@ static void cros_ec_mkbp_proximity_remove(struct platform_device *pdev)
static const struct of_device_id cros_ec_mkbp_proximity_of_match[] = {
{ .compatible = "google,cros-ec-mkbp-proximity" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, cros_ec_mkbp_proximity_of_match);
diff --git a/drivers/iio/proximity/hx9023s.c b/drivers/iio/proximity/hx9023s.c
index 5aa8e5a22f32..33781c314728 100644
--- a/drivers/iio/proximity/hx9023s.c
+++ b/drivers/iio/proximity/hx9023s.c
@@ -701,12 +701,11 @@ static int hx9023s_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = hx9023s_get_proximity(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
return hx9023s_get_samp_freq(data, val, val2);
@@ -954,8 +953,8 @@ static irqreturn_t hx9023s_trigger_handler(int irq, void *private)
data->buffer.channels[i++] = cpu_to_le16(data->ch_data[index].diff);
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
- pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &data->buffer,
+ sizeof(data->buffer), pf->timestamp);
out:
iio_trigger_notify_done(indio_dev->trig);
@@ -1191,13 +1190,13 @@ static DEFINE_SIMPLE_DEV_PM_OPS(hx9023s_pm_ops, hx9023s_suspend,
static const struct of_device_id hx9023s_of_match[] = {
{ .compatible = "tyhx,hx9023s" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, hx9023s_of_match);
static const struct i2c_device_id hx9023s_id[] = {
{ "hx9023s" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, hx9023s_id);
diff --git a/drivers/iio/proximity/irsd200.c b/drivers/iio/proximity/irsd200.c
index b0ffd3574013..0d30b91dbcbc 100644
--- a/drivers/iio/proximity/irsd200.c
+++ b/drivers/iio/proximity/irsd200.c
@@ -760,15 +760,19 @@ static irqreturn_t irsd200_trigger_handler(int irq, void *pollf)
{
struct iio_dev *indio_dev = ((struct iio_poll_func *)pollf)->indio_dev;
struct irsd200_data *data = iio_priv(indio_dev);
- s64 buf[2] = {};
+ struct {
+ s16 channel;
+ aligned_s64 ts;
+ } scan;
int ret;
- ret = irsd200_read_data(data, (s16 *)buf);
+ memset(&scan, 0, sizeof(scan));
+ ret = irsd200_read_data(data, &scan.channel);
if (ret)
goto end;
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(indio_dev));
end:
iio_trigger_notify_done(indio_dev->trig);
@@ -941,7 +945,7 @@ static const struct of_device_id irsd200_of_match[] = {
{
.compatible = "murata,irsd200",
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, irsd200_of_match);
diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c
index dc66ca9bba6b..d1510fe24050 100644
--- a/drivers/iio/proximity/isl29501.c
+++ b/drivers/iio/proximity/isl29501.c
@@ -481,7 +481,7 @@ static const struct iio_chan_spec_ext_info isl29501_ext_info[] = {
_ISL29501_EXT_INFO("calib_phase_temp_b", REG_CALIB_PHASE_TEMP_B),
_ISL29501_EXT_INFO("calib_phase_light_a", REG_CALIB_PHASE_LIGHT_A),
_ISL29501_EXT_INFO("calib_phase_light_b", REG_CALIB_PHASE_LIGHT_B),
- { },
+ { }
};
#define ISL29501_DISTANCE_SCAN_INDEX 0
@@ -990,7 +990,7 @@ static int isl29501_probe(struct i2c_client *client)
static const struct i2c_device_id isl29501_id[] = {
{ "isl29501" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, isl29501_id);
diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c
index cfc75d001f20..01783486bc7d 100644
--- a/drivers/iio/proximity/mb1232.c
+++ b/drivers/iio/proximity/mb1232.c
@@ -125,8 +125,8 @@ static irqreturn_t mb1232_trigger_handler(int irq, void *p)
if (data->scan.distance < 0)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ pf->timestamp);
err:
iio_trigger_notify_done(indio_dev->trig);
@@ -239,7 +239,7 @@ static const struct of_device_id of_mb1232_match[] = {
{ .compatible = "maxbotix,mb1242", },
{ .compatible = "maxbotix,mb7040", },
{ .compatible = "maxbotix,mb7137", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, of_mb1232_match);
diff --git a/drivers/iio/proximity/ping.c b/drivers/iio/proximity/ping.c
index 2ad69b150902..c5b4e1378b7d 100644
--- a/drivers/iio/proximity/ping.c
+++ b/drivers/iio/proximity/ping.c
@@ -268,7 +268,7 @@ static const struct iio_chan_spec ping_chan_spec[] = {
static const struct of_device_id of_ping_match[] = {
{ .compatible = "parallax,ping", .data = &pa_ping_cfg },
{ .compatible = "parallax,laserping", .data = &pa_laser_ping_cfg },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, of_ping_match);
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index f3d054b06b4c..1deaf70e92ce 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -208,7 +208,7 @@ static int lidar_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW: {
u16 reg;
- if (iio_device_claim_direct_mode(indio_dev))
+ if (!iio_device_claim_direct(indio_dev))
return -EBUSY;
ret = lidar_get_measurement(data, &reg);
@@ -216,7 +216,7 @@ static int lidar_read_raw(struct iio_dev *indio_dev,
*val = reg;
ret = IIO_VAL_INT;
}
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
break;
}
case IIO_CHAN_INFO_SCALE:
@@ -238,8 +238,9 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private)
ret = lidar_get_measurement(data, &data->scan.chan);
if (!ret) {
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan,
+ sizeof(data->scan),
+ iio_get_time_ns(indio_dev));
} else if (ret != -EINVAL) {
dev_err(&data->client->dev, "cannot read LIDAR measurement");
}
diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c
index 71ad29e441b2..b059bac1078b 100644
--- a/drivers/iio/proximity/srf04.c
+++ b/drivers/iio/proximity/srf04.c
@@ -240,7 +240,7 @@ static const struct of_device_id of_srf04_match[] = {
{ .compatible = "maxbotix,mb1020", .data = &mb_lv_cfg },
{ .compatible = "maxbotix,mb1030", .data = &mb_lv_cfg },
{ .compatible = "maxbotix,mb1040", .data = &mb_lv_cfg },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, of_srf04_match);
diff --git a/drivers/iio/proximity/srf08.c b/drivers/iio/proximity/srf08.c
index 86cab113ef3d..6e32fdfd161b 100644
--- a/drivers/iio/proximity/srf08.c
+++ b/drivers/iio/proximity/srf08.c
@@ -191,8 +191,8 @@ static irqreturn_t srf08_trigger_handler(int irq, void *p)
mutex_lock(&data->lock);
data->scan.chan = sensor_data;
- iio_push_to_buffers_with_timestamp(indio_dev,
- &data->scan, pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ pf->timestamp);
mutex_unlock(&data->lock);
err:
@@ -531,7 +531,7 @@ static const struct of_device_id of_srf08_match[] = {
{ .compatible = "devantech,srf02", (void *)SRF02 },
{ .compatible = "devantech,srf08", (void *)SRF08 },
{ .compatible = "devantech,srf10", (void *)SRF10 },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, of_srf08_match);
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
index b60707eba39d..fb02eac78ed4 100644
--- a/drivers/iio/proximity/sx9310.c
+++ b/drivers/iio/proximity/sx9310.c
@@ -995,21 +995,21 @@ static const struct sx931x_info sx9311_info = {
static const struct acpi_device_id sx9310_acpi_match[] = {
{ "STH9310", (kernel_ulong_t)&sx9310_info },
{ "STH9311", (kernel_ulong_t)&sx9311_info },
- {}
+ { }
};
MODULE_DEVICE_TABLE(acpi, sx9310_acpi_match);
static const struct of_device_id sx9310_of_match[] = {
{ .compatible = "semtech,sx9310", &sx9310_info },
{ .compatible = "semtech,sx9311", &sx9311_info },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, sx9310_of_match);
static const struct i2c_device_id sx9310_id[] = {
{ "sx9310", (kernel_ulong_t)&sx9310_info },
{ "sx9311", (kernel_ulong_t)&sx9311_info },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, sx9310_id);
diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c
index 73d972416c01..c7b2d03c23bc 100644
--- a/drivers/iio/proximity/sx9324.c
+++ b/drivers/iio/proximity/sx9324.c
@@ -202,7 +202,7 @@ static const struct iio_chan_spec_ext_info sx9324_channel_ext_info[] = {
.shared = IIO_SEPARATE,
.read = sx9324_phase_configuration_show,
},
- {}
+ { }
};
#define SX9324_CHANNEL(idx) \
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index c4e94d0fb163..8913da59dc73 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -387,11 +387,10 @@ static int sx9500_read_raw(struct iio_dev *indio_dev,
case IIO_PROXIMITY:
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = sx9500_read_proximity(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
return sx9500_read_samp_freq(data, val, val2);
@@ -866,7 +865,7 @@ static const struct acpi_gpio_mapping acpi_sx9500_gpios[] = {
* GPIO to be output only. Ask the GPIO core to ignore this limit.
*/
{ "interrupt-gpios", &interrupt_gpios, 1, ACPI_GPIO_QUIRK_NO_IO_RESTRICTION },
- { },
+ { }
};
static void sx9500_gpio_probe(struct i2c_client *client,
@@ -1031,7 +1030,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(sx9500_pm_ops, sx9500_suspend, sx9500_resume);
static const struct acpi_device_id sx9500_acpi_match[] = {
{"SSX9500", 0},
{"SASX9500", 0},
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, sx9500_acpi_match);
diff --git a/drivers/iio/proximity/sx_common.c b/drivers/iio/proximity/sx_common.c
index f70198a1f0d1..59b35e40739b 100644
--- a/drivers/iio/proximity/sx_common.c
+++ b/drivers/iio/proximity/sx_common.c
@@ -379,8 +379,8 @@ static irqreturn_t sx_common_trigger_handler(int irq, void *private)
data->buffer.channels[i++] = val;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
- pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &data->buffer,
+ sizeof(data->buffer), pf->timestamp);
out:
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/proximity/vcnl3020.c b/drivers/iio/proximity/vcnl3020.c
index bb6c9cc88b35..31e77d9e0c90 100644
--- a/drivers/iio/proximity/vcnl3020.c
+++ b/drivers/iio/proximity/vcnl3020.c
@@ -653,7 +653,7 @@ static const struct of_device_id vcnl3020_of_match[] = {
{
.compatible = "vishay,vcnl3020",
},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, vcnl3020_of_match);
diff --git a/drivers/iio/proximity/vl53l0x-i2c.c b/drivers/iio/proximity/vl53l0x-i2c.c
index 87d10faaff9b..ef4aa7b2835e 100644
--- a/drivers/iio/proximity/vl53l0x-i2c.c
+++ b/drivers/iio/proximity/vl53l0x-i2c.c
@@ -94,8 +94,8 @@ static irqreturn_t vl53l0x_trigger_handler(int irq, void *priv)
return -EREMOTEIO;
data->scan.chan = get_unaligned_be16(&buffer[10]);
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
vl53l0x_clear_irq(data);
diff --git a/drivers/iio/resolver/ad2s1200.c b/drivers/iio/resolver/ad2s1200.c
index 9d95241bdf8f..0bc84f12cb34 100644
--- a/drivers/iio/resolver/ad2s1200.c
+++ b/drivers/iio/resolver/ad2s1200.c
@@ -186,7 +186,7 @@ MODULE_DEVICE_TABLE(of, ad2s1200_of_match);
static const struct spi_device_id ad2s1200_id[] = {
{ "ad2s1200" },
{ "ad2s1205" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad2s1200_id);
diff --git a/drivers/iio/resolver/ad2s1210.c b/drivers/iio/resolver/ad2s1210.c
index ab860cedecd1..9b028c8bb1db 100644
--- a/drivers/iio/resolver/ad2s1210.c
+++ b/drivers/iio/resolver/ad2s1210.c
@@ -1340,7 +1340,8 @@ static irqreturn_t ad2s1210_trigger_handler(int irq, void *p)
}
ad2s1210_push_events(indio_dev, st->sample.fault, pf->timestamp);
- iio_push_to_buffers_with_timestamp(indio_dev, &st->scan, pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &st->scan, sizeof(st->scan),
+ pf->timestamp);
error_ret:
iio_trigger_notify_done(indio_dev->trig);
@@ -1597,7 +1598,7 @@ MODULE_DEVICE_TABLE(of, ad2s1210_of_match);
static const struct spi_device_id ad2s1210_id[] = {
{ "ad2s1210" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad2s1210_id);
diff --git a/drivers/iio/resolver/ad2s90.c b/drivers/iio/resolver/ad2s90.c
index be6836e55376..18f1c905eeac 100644
--- a/drivers/iio/resolver/ad2s90.c
+++ b/drivers/iio/resolver/ad2s90.c
@@ -105,13 +105,13 @@ static int ad2s90_probe(struct spi_device *spi)
static const struct of_device_id ad2s90_of_match[] = {
{ .compatible = "adi,ad2s90", },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ad2s90_of_match);
static const struct spi_device_id ad2s90_id[] = {
{ "ad2s90" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad2s90_id);
diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c
index 692520e1c497..9f628a8e5cfb 100644
--- a/drivers/iio/temperature/hid-sensor-temperature.c
+++ b/drivers/iio/temperature/hid-sensor-temperature.c
@@ -131,8 +131,9 @@ static int temperature_proc_event(struct hid_sensor_hub_device *hsdev,
struct temperature_state *temp_st = iio_priv(indio_dev);
if (atomic_read(&temp_st->common_attributes.data_ready))
- iio_push_to_buffers_with_timestamp(indio_dev, &temp_st->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &temp_st->scan,
+ sizeof(temp_st->scan),
+ iio_get_time_ns(indio_dev));
return 0;
}
@@ -272,7 +273,7 @@ static const struct platform_device_id hid_temperature_ids[] = {
/* Format: HID-SENSOR-usage_id_in_hex_lowercase */
.name = "HID-SENSOR-200033",
},
- { /* sentinel */ }
+ { }
};
MODULE_DEVICE_TABLE(platform, hid_temperature_ids);
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
index f8ea2219ab48..7dd40d69cce6 100644
--- a/drivers/iio/temperature/ltc2983.c
+++ b/drivers/iio/temperature/ltc2983.c
@@ -1664,7 +1664,7 @@ static const struct spi_device_id ltc2983_id_table[] = {
{ "ltc2984", (kernel_ulong_t)&ltc2984_chip_info_data },
{ "ltc2986", (kernel_ulong_t)&ltc2986_chip_info_data },
{ "ltm2985", (kernel_ulong_t)&ltm2985_chip_info_data },
- {},
+ { }
};
MODULE_DEVICE_TABLE(spi, ltc2983_id_table);
@@ -1673,7 +1673,7 @@ static const struct of_device_id ltc2983_of_match[] = {
{ .compatible = "adi,ltc2984", .data = &ltc2984_chip_info_data },
{ .compatible = "adi,ltc2986", .data = &ltc2986_chip_info_data },
{ .compatible = "adi,ltm2985", .data = &ltm2985_chip_info_data },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, ltc2983_of_match);
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 555a61e2f3fd..cae8e84821d7 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -9,7 +9,6 @@
#include <linux/init.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
@@ -169,8 +168,9 @@ static irqreturn_t maxim_thermocouple_trigger_handler(int irq, void *private)
ret = spi_read(data->spi, data->buffer, data->chip->read_size);
if (!ret) {
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, data->buffer,
+ sizeof(data->buffer),
+ iio_get_time_ns(indio_dev));
}
iio_trigger_notify_done(indio_dev->trig);
@@ -183,40 +183,35 @@ static int maxim_thermocouple_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct maxim_thermocouple_data *data = iio_priv(indio_dev);
- int ret = -EINVAL;
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = maxim_thermocouple_read(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
-
- if (!ret)
- return IIO_VAL_INT;
+ iio_device_release_direct(indio_dev);
+ if (ret)
+ return ret;
- break;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
switch (chan->channel2) {
case IIO_MOD_TEMP_AMBIENT:
*val = 62;
*val2 = 500000; /* 1000 * 0.0625 */
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
+ return IIO_VAL_INT_PLUS_MICRO;
default:
*val = 250; /* 1000 * 0.25 */
- ret = IIO_VAL_INT;
+ return IIO_VAL_INT;
}
- break;
case IIO_CHAN_INFO_THERMOCOUPLE_TYPE:
*val = data->tc_type;
- ret = IIO_VAL_CHAR;
- break;
+ return IIO_VAL_CHAR;
+ default:
+ return -EINVAL;
}
-
- return ret;
}
static const struct iio_info maxim_thermocouple_info = {
@@ -271,7 +266,7 @@ static const struct spi_device_id maxim_thermocouple_id[] = {
{"max31855t", MAX31855T},
{"max31855e", MAX31855E},
{"max31855r", MAX31855R},
- {},
+ { }
};
MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
diff --git a/drivers/iio/temperature/mcp9600.c b/drivers/iio/temperature/mcp9600.c
index c2447860adfd..6e9108d5cf75 100644
--- a/drivers/iio/temperature/mcp9600.c
+++ b/drivers/iio/temperature/mcp9600.c
@@ -449,13 +449,13 @@ static int mcp9600_probe(struct i2c_client *client)
static const struct i2c_device_id mcp9600_id[] = {
{ "mcp9600" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, mcp9600_id);
static const struct of_device_id mcp9600_of_match[] = {
{ .compatible = "microchip,mcp9600" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, mcp9600_of_match);
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index b5c94b7492f5..29bff9d8859d 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -269,8 +269,8 @@ static irqreturn_t tmp006_trigger_handler(int irq, void *p)
goto err;
scan.channels[1] = ret;
- iio_push_to_buffers_with_timestamp(indio_dev, &scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/temperature/tmp007.c b/drivers/iio/temperature/tmp007.c
index fd4d389ce1df..043283b02c4d 100644
--- a/drivers/iio/temperature/tmp007.c
+++ b/drivers/iio/temperature/tmp007.c
@@ -558,7 +558,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(tmp007_pm_ops, tmp007_suspend, tmp007_resume);
static const struct of_device_id tmp007_of_match[] = {
{ .compatible = "ti,tmp007", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, tmp007_of_match);
diff --git a/drivers/iio/temperature/tsys01.c b/drivers/iio/temperature/tsys01.c
index cfaa16f46a3f..334bba6fdae6 100644
--- a/drivers/iio/temperature/tsys01.c
+++ b/drivers/iio/temperature/tsys01.c
@@ -207,13 +207,13 @@ static int tsys01_i2c_probe(struct i2c_client *client)
static const struct i2c_device_id tsys01_id[] = {
{ "tsys01" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, tsys01_id);
static const struct of_device_id tsys01_of_match[] = {
{ .compatible = "meas,tsys01", },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, tsys01_of_match);
diff --git a/drivers/iio/temperature/tsys02d.c b/drivers/iio/temperature/tsys02d.c
index ef34b3c58f26..0cad27205667 100644
--- a/drivers/iio/temperature/tsys02d.c
+++ b/drivers/iio/temperature/tsys02d.c
@@ -169,7 +169,7 @@ static int tsys02d_probe(struct i2c_client *client)
static const struct i2c_device_id tsys02d_id[] = {
{ "tsys02d" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, tsys02d_id);
diff --git a/drivers/iio/test/iio-test-format.c b/drivers/iio/test/iio-test-format.c
index fc67e6b73df7..872dd8582003 100644
--- a/drivers/iio/test/iio-test-format.c
+++ b/drivers/iio/test/iio-test-format.c
@@ -257,7 +257,7 @@ static struct kunit_case iio_format_test_cases[] = {
KUNIT_CASE(iio_test_iio_format_value_fractional_log2),
KUNIT_CASE(iio_test_iio_format_value_multiple),
KUNIT_CASE(iio_test_iio_format_value_integer_64),
- {}
+ { }
};
static struct kunit_suite iio_format_test_suite = {
diff --git a/drivers/iio/test/iio-test-gts.c b/drivers/iio/test/iio-test-gts.c
index 1eceec9d477f..11250bc905c9 100644
--- a/drivers/iio/test/iio-test-gts.c
+++ b/drivers/iio/test/iio-test-gts.c
@@ -499,7 +499,7 @@ static struct kunit_case iio_gts_test_cases[] = {
KUNIT_CASE(test_iio_find_closest_gain_low),
KUNIT_CASE(test_iio_gts_total_gain_to_scale),
KUNIT_CASE(test_iio_gts_avail_test),
- {}
+ { }
};
static struct kunit_suite iio_gts_test_suite = {
diff --git a/drivers/iio/test/iio-test-rescale.c b/drivers/iio/test/iio-test-rescale.c
index bbc6a2e1c2c1..ac6942cf1e44 100644
--- a/drivers/iio/test/iio-test-rescale.c
+++ b/drivers/iio/test/iio-test-rescale.c
@@ -704,7 +704,7 @@ static void iio_rescale_test_offset(struct kunit *test)
static struct kunit_case iio_rescale_test_cases[] = {
KUNIT_CASE_PARAM(iio_rescale_test_scale, iio_rescale_scale_gen_params),
KUNIT_CASE_PARAM(iio_rescale_test_offset, iio_rescale_offset_gen_params),
- {}
+ { }
};
static struct kunit_suite iio_rescale_test_suite = {
diff --git a/drivers/iio/trigger/stm32-lptimer-trigger.c b/drivers/iio/trigger/stm32-lptimer-trigger.c
index f1e18913236a..2505ace440b4 100644
--- a/drivers/iio/trigger/stm32-lptimer-trigger.c
+++ b/drivers/iio/trigger/stm32-lptimer-trigger.c
@@ -16,16 +16,43 @@
#include <linux/platform_device.h>
#include <linux/property.h>
-/* List Low-Power Timer triggers */
-static const char * const stm32_lptim_triggers[] = {
- LPTIM1_OUT,
- LPTIM2_OUT,
- LPTIM3_OUT,
+/* Maximum triggers + one trailing null entry to indicate the end of array */
+#define MAX_TRIGGERS 3
+
+struct stm32_lptim_cfg {
+ const char * const (*triggers)[MAX_TRIGGERS];
+ unsigned int nb_triggers;
+};
+
+/* List Low-Power Timer triggers for H7, MP13, MP15 */
+static const char * const stm32_lptim_triggers[][MAX_TRIGGERS] = {
+ { LPTIM1_OUT,},
+ { LPTIM2_OUT,},
+ { LPTIM3_OUT,},
+};
+
+/* List Low-Power Timer triggers for STM32MP25 */
+static const char * const stm32mp25_lptim_triggers[][MAX_TRIGGERS] = {
+ { LPTIM1_CH1, LPTIM1_CH2, },
+ { LPTIM2_CH1, LPTIM2_CH2, },
+ { LPTIM3_CH1,},
+ { LPTIM4_CH1,},
+ { LPTIM5_OUT,},
+};
+
+static const struct stm32_lptim_cfg stm32mp15_lptim_cfg = {
+ .triggers = stm32_lptim_triggers,
+ .nb_triggers = ARRAY_SIZE(stm32_lptim_triggers),
+};
+
+static const struct stm32_lptim_cfg stm32mp25_lptim_cfg = {
+ .triggers = stm32mp25_lptim_triggers,
+ .nb_triggers = ARRAY_SIZE(stm32mp25_lptim_triggers),
};
struct stm32_lptim_trigger {
struct device *dev;
- const char *trg;
+ const char * const *triggers;
};
static int stm32_lptim_validate_device(struct iio_trigger *trig,
@@ -56,22 +83,33 @@ EXPORT_SYMBOL(is_stm32_lptim_trigger);
static int stm32_lptim_setup_trig(struct stm32_lptim_trigger *priv)
{
- struct iio_trigger *trig;
+ const char * const *cur = priv->triggers;
+ int ret;
- trig = devm_iio_trigger_alloc(priv->dev, "%s", priv->trg);
- if (!trig)
- return -ENOMEM;
+ while (cur && *cur) {
+ struct iio_trigger *trig;
- trig->dev.parent = priv->dev->parent;
- trig->ops = &stm32_lptim_trigger_ops;
- iio_trigger_set_drvdata(trig, priv);
+ trig = devm_iio_trigger_alloc(priv->dev, "%s", *cur);
+ if (!trig)
+ return -ENOMEM;
- return devm_iio_trigger_register(priv->dev, trig);
+ trig->dev.parent = priv->dev->parent;
+ trig->ops = &stm32_lptim_trigger_ops;
+ iio_trigger_set_drvdata(trig, priv);
+
+ ret = devm_iio_trigger_register(priv->dev, trig);
+ if (ret)
+ return ret;
+ cur++;
+ }
+
+ return 0;
}
static int stm32_lptim_trigger_probe(struct platform_device *pdev)
{
struct stm32_lptim_trigger *priv;
+ struct stm32_lptim_cfg const *lptim_cfg;
u32 index;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -81,18 +119,21 @@ static int stm32_lptim_trigger_probe(struct platform_device *pdev)
if (device_property_read_u32(&pdev->dev, "reg", &index))
return -EINVAL;
- if (index >= ARRAY_SIZE(stm32_lptim_triggers))
+ lptim_cfg = device_get_match_data(&pdev->dev);
+
+ if (index >= lptim_cfg->nb_triggers)
return -EINVAL;
priv->dev = &pdev->dev;
- priv->trg = stm32_lptim_triggers[index];
+ priv->triggers = lptim_cfg->triggers[index];
return stm32_lptim_setup_trig(priv);
}
static const struct of_device_id stm32_lptim_trig_of_match[] = {
- { .compatible = "st,stm32-lptimer-trigger", },
- {},
+ { .compatible = "st,stm32-lptimer-trigger", .data = &stm32mp15_lptim_cfg },
+ { .compatible = "st,stm32mp25-lptimer-trigger", .data = &stm32mp25_lptim_cfg},
+ { }
};
MODULE_DEVICE_TABLE(of, stm32_lptim_trig_of_match);
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index e41cb741253b..925b864facca 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -711,7 +711,7 @@ static const struct iio_chan_spec_ext_info stm32_trigger_count_info[] = {
IIO_ENUM_AVAILABLE("enable_mode", IIO_SHARED_BY_TYPE, &stm32_enable_mode_enum),
IIO_ENUM("trigger_mode", IIO_SEPARATE, &stm32_trigger_mode_enum),
IIO_ENUM_AVAILABLE("trigger_mode", IIO_SHARED_BY_TYPE, &stm32_trigger_mode_enum),
- {}
+ { }
};
static const struct iio_chan_spec stm32_trigger_channel = {
@@ -921,7 +921,7 @@ static const struct of_device_id stm32_trig_of_match[] = {
.compatible = "st,stm32mp25-timer-trigger",
.data = (void *)&stm32mp25_timer_trg_cfg,
},
- { /* end node */ },
+ { }
};
MODULE_DEVICE_TABLE(of, stm32_trig_of_match);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 142170473e75..8670e58675c6 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -36,6 +36,7 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
#define CM_DIRECT_RETRY_CTX ((void *) 1UL)
+#define CM_MRA_SETTING 24 /* 4.096us * 2^24 = ~68.7 seconds */
static const char * const ibcm_rej_reason_strs[] = {
[IB_CM_REJ_NO_QP] = "no QP",
@@ -167,7 +168,7 @@ struct cm_port {
struct cm_device {
struct kref kref;
struct list_head list;
- spinlock_t mad_agent_lock;
+ rwlock_t mad_agent_lock;
struct ib_device *ib_device;
u8 ack_delay;
int going_down;
@@ -241,7 +242,6 @@ struct cm_id_private {
u8 initiator_depth;
u8 retry_count;
u8 rnr_retry_count;
- u8 service_timeout;
u8 target_ack_delay;
struct list_head work_list;
@@ -285,7 +285,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
if (!cm_id_priv->av.port)
return ERR_PTR(-EINVAL);
- spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
mad_agent = cm_id_priv->av.port->mad_agent;
if (!mad_agent) {
m = ERR_PTR(-EINVAL);
@@ -311,7 +311,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv)
m->ah = ah;
out:
- spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
return m;
}
@@ -1297,10 +1297,10 @@ static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
if (!cm_id_priv->av.port)
return cpu_to_be64(low_tid);
- spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
if (cm_id_priv->av.port->mad_agent)
hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32;
- spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
+ read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock);
return cpu_to_be64(hi_tid | low_tid);
}
@@ -1872,7 +1872,7 @@ static void cm_process_work(struct cm_id_private *cm_id_priv,
static void cm_format_mra(struct cm_mra_msg *mra_msg,
struct cm_id_private *cm_id_priv,
- enum cm_msg_response msg_mraed, u8 service_timeout,
+ enum cm_msg_response msg_mraed,
const void *private_data, u8 private_data_len)
{
cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
@@ -1881,7 +1881,7 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg,
be32_to_cpu(cm_id_priv->id.local_id));
IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
be32_to_cpu(cm_id_priv->id.remote_id));
- IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
+ IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, CM_MRA_SETTING);
if (private_data && private_data_len)
IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
@@ -1960,7 +1960,7 @@ static void cm_dup_req_handler(struct cm_work *work,
switch (cm_id_priv->id.state) {
case IB_CM_MRA_REQ_SENT:
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
+ CM_MSG_RESPONSE_REQ,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
break;
@@ -2454,7 +2454,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
cm_id_priv->private_data_len);
else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
+ CM_MSG_RESPONSE_REP,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
else
@@ -3094,26 +3094,13 @@ out:
return -EINVAL;
}
-int ib_send_cm_mra(struct ib_cm_id *cm_id,
- u8 service_timeout,
- const void *private_data,
- u8 private_data_len)
+int ib_prepare_cm_mra(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
- struct ib_mad_send_buf *msg;
enum ib_cm_state cm_state;
enum ib_cm_lap_state lap_state;
- enum cm_msg_response msg_response;
- void *data;
unsigned long flags;
- int ret;
-
- if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
- return -EINVAL;
-
- data = cm_copy_private_data(private_data, private_data_len);
- if (IS_ERR(data))
- return PTR_ERR(data);
+ int ret = 0;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@@ -3122,58 +3109,33 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
case IB_CM_REQ_RCVD:
cm_state = IB_CM_MRA_REQ_SENT;
lap_state = cm_id->lap_state;
- msg_response = CM_MSG_RESPONSE_REQ;
break;
case IB_CM_REP_RCVD:
cm_state = IB_CM_MRA_REP_SENT;
lap_state = cm_id->lap_state;
- msg_response = CM_MSG_RESPONSE_REP;
break;
case IB_CM_ESTABLISHED:
if (cm_id->lap_state == IB_CM_LAP_RCVD) {
cm_state = cm_id->state;
lap_state = IB_CM_MRA_LAP_SENT;
- msg_response = CM_MSG_RESPONSE_OTHER;
break;
}
fallthrough;
default:
- trace_icm_send_mra_unknown_err(&cm_id_priv->id);
+ trace_icm_prepare_mra_unknown_err(&cm_id_priv->id);
ret = -EINVAL;
goto error_unlock;
}
- if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
- msg = cm_alloc_msg(cm_id_priv);
- if (IS_ERR(msg)) {
- ret = PTR_ERR(msg);
- goto error_unlock;
- }
-
- cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
- msg_response, service_timeout,
- private_data, private_data_len);
- trace_icm_send_mra(cm_id);
- ret = ib_post_send_mad(msg, NULL);
- if (ret)
- goto error_free_msg;
- }
-
cm_id->state = cm_state;
cm_id->lap_state = lap_state;
- cm_id_priv->service_timeout = service_timeout;
- cm_set_private_data(cm_id_priv, data, private_data_len);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- return 0;
+ cm_set_private_data(cm_id_priv, NULL, 0);
-error_free_msg:
- cm_free_msg(msg);
error_unlock:
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- kfree(data);
return ret;
}
-EXPORT_SYMBOL(ib_send_cm_mra);
+EXPORT_SYMBOL(ib_prepare_cm_mra);
static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
{
@@ -3377,7 +3339,6 @@ static int cm_lap_handler(struct cm_work *work)
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
CM_MSG_RESPONSE_OTHER,
- cm_id_priv->service_timeout,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
spin_unlock_irq(&cm_id_priv->lock);
@@ -3786,7 +3747,8 @@ static void cm_process_send_error(struct cm_id_private *cm_id_priv,
spin_lock_irq(&cm_id_priv->lock);
if (msg != cm_id_priv->msg) {
spin_unlock_irq(&cm_id_priv->lock);
- cm_free_priv_msg(msg);
+ cm_free_msg(msg);
+ cm_deref_id(cm_id_priv);
return;
}
cm_free_priv_msg(msg);
@@ -4378,7 +4340,7 @@ static int cm_add_one(struct ib_device *ib_device)
return -ENOMEM;
kref_init(&cm_dev->kref);
- spin_lock_init(&cm_dev->mad_agent_lock);
+ rwlock_init(&cm_dev->mad_agent_lock);
cm_dev->ib_device = ib_device;
cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
cm_dev->going_down = 0;
@@ -4494,9 +4456,9 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
* The above ensures no call paths from the work are running,
* the remaining paths all take the mad_agent_lock.
*/
- spin_lock(&cm_dev->mad_agent_lock);
+ write_lock(&cm_dev->mad_agent_lock);
port->mad_agent = NULL;
- spin_unlock(&cm_dev->mad_agent_lock);
+ write_unlock(&cm_dev->mad_agent_lock);
ib_unregister_mad_agent(mad_agent);
ib_port_unregister_client_groups(ib_device, i,
cm_counter_groups);
diff --git a/drivers/infiniband/core/cm_trace.h b/drivers/infiniband/core/cm_trace.h
index 944d9071245d..4a4987da69d4 100644
--- a/drivers/infiniband/core/cm_trace.h
+++ b/drivers/infiniband/core/cm_trace.h
@@ -229,7 +229,7 @@ DEFINE_CM_ERR_EVENT(send_drep);
DEFINE_CM_ERR_EVENT(dreq_unknown);
DEFINE_CM_ERR_EVENT(send_unknown_rej);
DEFINE_CM_ERR_EVENT(rej_unknown);
-DEFINE_CM_ERR_EVENT(send_mra_unknown);
+DEFINE_CM_ERR_EVENT(prepare_mra_unknown);
DEFINE_CM_ERR_EVENT(mra_unknown);
DEFINE_CM_ERR_EVENT(qp_init);
DEFINE_CM_ERR_EVENT(qp_rtr);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index ab31eefa916b..9b471548e7ae 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -46,7 +46,6 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_RESPONSE_TIMEOUT 20
#define CMA_MAX_CM_RETRIES 15
-#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 16
#define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
@@ -146,19 +145,6 @@ struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id)
}
EXPORT_SYMBOL(rdma_iw_cm_id);
-/**
- * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
- * @res: rdma resource tracking entry pointer
- */
-struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
-{
- struct rdma_id_private *id_priv =
- container_of(res, struct rdma_id_private, res);
-
- return &id_priv->id;
-}
-EXPORT_SYMBOL(rdma_res_to_id);
-
static int cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device, void *client_data);
@@ -2214,8 +2200,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
case IB_CM_REP_RECEIVED:
if (state == RDMA_CM_CONNECT &&
(id_priv->id.qp_type != IB_QPT_UD)) {
- trace_cm_send_mra(id_priv);
- ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+ trace_cm_prepare_mra(id_priv);
+ ib_prepare_cm_mra(cm_id);
}
if (id_priv->id.qp) {
event.status = cma_rep_recv(id_priv);
@@ -2476,8 +2462,8 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT &&
conn_id->id.qp_type != IB_QPT_UD) {
- trace_cm_send_mra(cm_id->context);
- ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+ trace_cm_prepare_mra(cm_id->context);
+ ib_prepare_cm_mra(cm_id);
}
mutex_unlock(&conn_id->handler_mutex);
@@ -5245,7 +5231,8 @@ static int cma_netevent_callback(struct notifier_block *self,
neigh->ha, ETH_ALEN))
continue;
cma_id_get(current_id);
- queue_work(cma_wq, &current_id->id.net_work);
+ if (!queue_work(cma_wq, &current_id->id.net_work))
+ cma_id_put(current_id);
}
out:
spin_unlock_irqrestore(&id_table_lock, flags);
diff --git a/drivers/infiniband/core/cma_trace.h b/drivers/infiniband/core/cma_trace.h
index dc622f3778be..3456d5f3aa47 100644
--- a/drivers/infiniband/core/cma_trace.h
+++ b/drivers/infiniband/core/cma_trace.h
@@ -55,7 +55,7 @@ DECLARE_EVENT_CLASS(cma_fsm_class,
DEFINE_CMA_FSM_EVENT(send_rtu);
DEFINE_CMA_FSM_EVENT(send_rej);
-DEFINE_CMA_FSM_EVENT(send_mra);
+DEFINE_CMA_FSM_EVENT(prepare_mra);
DEFINE_CMA_FSM_EVENT(send_sidr_req);
DEFINE_CMA_FSM_EVENT(send_sidr_rep);
DEFINE_CMA_FSM_EVENT(disconnect);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index b4e3e4beb7f4..d4263385850a 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1352,6 +1352,9 @@ static void ib_device_notify_register(struct ib_device *device)
down_read(&devices_rwsem);
+ /* Mark for userspace that device is ready */
+ kobject_uevent(&device->dev.kobj, KOBJ_ADD);
+
ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
if (ret)
goto out;
@@ -1468,10 +1471,9 @@ int ib_register_device(struct ib_device *device, const char *name,
return ret;
}
dev_set_uevent_suppress(&device->dev, false);
- /* Mark for userspace that device is ready */
- kobject_uevent(&device->dev.kobj, KOBJ_ADD);
ib_device_notify_register(device);
+
ib_device_put(device);
return 0;
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index f4486cbd8f45..62410578dec3 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -368,12 +368,9 @@ EXPORT_SYMBOL(iw_cm_disconnect);
/*
* CM_ID <-- DESTROYING
*
- * Clean up all resources associated with the connection and release
- * the initial reference taken by iw_create_cm_id.
- *
- * Returns true if and only if the last cm_id_priv reference has been dropped.
+ * Clean up all resources associated with the connection.
*/
-static bool destroy_cm_id(struct iw_cm_id *cm_id)
+static void destroy_cm_id(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
struct ib_qp *qp;
@@ -442,20 +439,22 @@ static bool destroy_cm_id(struct iw_cm_id *cm_id)
iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
}
-
- return iwcm_deref_id(cm_id_priv);
}
/*
- * This function is only called by the application thread and cannot
- * be called by the event thread. The function will wait for all
- * references to be released on the cm_id and then kfree the cm_id
- * object.
+ * Destroy cm_id. If the cm_id still has other references, wait for all
+ * references to be released on the cm_id and then release the initial
+ * reference taken by iw_create_cm_id.
*/
void iw_destroy_cm_id(struct iw_cm_id *cm_id)
{
- if (!destroy_cm_id(cm_id))
+ struct iwcm_id_private *cm_id_priv;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ destroy_cm_id(cm_id);
+ if (refcount_read(&cm_id_priv->refcount) > 1)
flush_workqueue(iwcm_wq);
+ iwcm_deref_id(cm_id_priv);
}
EXPORT_SYMBOL(iw_destroy_cm_id);
@@ -1035,8 +1034,10 @@ static void cm_work_handler(struct work_struct *_work)
if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
ret = process_event(cm_id_priv, &levent);
- if (ret)
- WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id));
+ if (ret) {
+ destroy_cm_id(&cm_id_priv->id);
+ WARN_ON_ONCE(iwcm_deref_id(cm_id_priv));
+ }
} else
pr_debug("dropping event %d\n", levent.event);
if (iwcm_deref_id(cm_id_priv))
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 8af0619a39cd..b4b10e8a6495 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -158,7 +158,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
recv_wc->recv_buf.grh, agent->port_num);
if (IS_ERR(ah))
- return (void *) ah;
+ return ERR_CAST(ah);
hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index c48ef6083020..c752ae9fad6c 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -41,67 +41,72 @@
#include <linux/hugetlb.h>
#include <linux/interval_tree.h>
#include <linux/hmm.h>
+#include <linux/hmm-dma.h>
#include <linux/pagemap.h>
#include <rdma/ib_umem_odp.h>
#include "uverbs.h"
-static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
- const struct mmu_interval_notifier_ops *ops)
+static void ib_init_umem_implicit_odp(struct ib_umem_odp *umem_odp)
{
- int ret;
+ umem_odp->is_implicit_odp = 1;
+ umem_odp->umem.is_odp = 1;
+ mutex_init(&umem_odp->umem_mutex);
+}
+
+static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
+ const struct mmu_interval_notifier_ops *ops)
+{
+ struct ib_device *dev = umem_odp->umem.ibdev;
+ size_t page_size = 1UL << umem_odp->page_shift;
+ struct hmm_dma_map *map;
+ unsigned long start;
+ unsigned long end;
+ size_t nr_entries;
+ int ret = 0;
umem_odp->umem.is_odp = 1;
mutex_init(&umem_odp->umem_mutex);
- if (!umem_odp->is_implicit_odp) {
- size_t page_size = 1UL << umem_odp->page_shift;
- unsigned long start;
- unsigned long end;
- size_t ndmas, npfns;
-
- start = ALIGN_DOWN(umem_odp->umem.address, page_size);
- if (check_add_overflow(umem_odp->umem.address,
- (unsigned long)umem_odp->umem.length,
- &end))
- return -EOVERFLOW;
- end = ALIGN(end, page_size);
- if (unlikely(end < page_size))
- return -EOVERFLOW;
-
- ndmas = (end - start) >> umem_odp->page_shift;
- if (!ndmas)
- return -EINVAL;
-
- npfns = (end - start) >> PAGE_SHIFT;
- umem_odp->pfn_list = kvcalloc(
- npfns, sizeof(*umem_odp->pfn_list),
- GFP_KERNEL | __GFP_NOWARN);
- if (!umem_odp->pfn_list)
- return -ENOMEM;
-
- umem_odp->dma_list = kvcalloc(
- ndmas, sizeof(*umem_odp->dma_list),
- GFP_KERNEL | __GFP_NOWARN);
- if (!umem_odp->dma_list) {
+ start = ALIGN_DOWN(umem_odp->umem.address, page_size);
+ if (check_add_overflow(umem_odp->umem.address,
+ (unsigned long)umem_odp->umem.length, &end))
+ return -EOVERFLOW;
+ end = ALIGN(end, page_size);
+ if (unlikely(end < page_size))
+ return -EOVERFLOW;
+
+ nr_entries = (end - start) >> PAGE_SHIFT;
+ if (!(nr_entries * PAGE_SIZE / page_size))
+ return -EINVAL;
+
+ map = &umem_odp->map;
+ if (ib_uses_virt_dma(dev)) {
+ map->pfn_list = kvcalloc(nr_entries, sizeof(*map->pfn_list),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!map->pfn_list)
ret = -ENOMEM;
- goto out_pfn_list;
- }
+ } else
+ ret = hmm_dma_map_alloc(dev->dma_device, map,
+ (end - start) >> PAGE_SHIFT,
+ 1 << umem_odp->page_shift);
+ if (ret)
+ return ret;
- ret = mmu_interval_notifier_insert(&umem_odp->notifier,
- umem_odp->umem.owning_mm,
- start, end - start, ops);
- if (ret)
- goto out_dma_list;
- }
+ ret = mmu_interval_notifier_insert(&umem_odp->notifier,
+ umem_odp->umem.owning_mm, start,
+ end - start, ops);
+ if (ret)
+ goto out_free_map;
return 0;
-out_dma_list:
- kvfree(umem_odp->dma_list);
-out_pfn_list:
- kvfree(umem_odp->pfn_list);
+out_free_map:
+ if (ib_uses_virt_dma(dev))
+ kfree(map->pfn_list);
+ else
+ hmm_dma_map_free(dev->dma_device, map);
return ret;
}
@@ -120,7 +125,6 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
{
struct ib_umem *umem;
struct ib_umem_odp *umem_odp;
- int ret;
if (access & IB_ACCESS_HUGETLB)
return ERR_PTR(-EINVAL);
@@ -132,16 +136,10 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
umem->ibdev = device;
umem->writable = ib_access_writable(access);
umem->owning_mm = current->mm;
- umem_odp->is_implicit_odp = 1;
umem_odp->page_shift = PAGE_SHIFT;
umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
- ret = ib_init_umem_odp(umem_odp, NULL);
- if (ret) {
- put_pid(umem_odp->tgid);
- kfree(umem_odp);
- return ERR_PTR(ret);
- }
+ ib_init_umem_implicit_odp(umem_odp);
return umem_odp;
}
EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
@@ -262,74 +260,41 @@ err_put_pid:
}
EXPORT_SYMBOL(ib_umem_odp_get);
-void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
+static void ib_umem_odp_free(struct ib_umem_odp *umem_odp)
{
+ struct ib_device *dev = umem_odp->umem.ibdev;
+
/*
* Ensure that no more pages are mapped in the umem.
*
* It is the driver's responsibility to ensure, before calling us,
* that the hardware will not attempt to access the MR any more.
*/
- if (!umem_odp->is_implicit_odp) {
- mutex_lock(&umem_odp->umem_mutex);
- ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
- mutex_unlock(&umem_odp->umem_mutex);
- mmu_interval_notifier_remove(&umem_odp->notifier);
- kvfree(umem_odp->dma_list);
- kvfree(umem_odp->pfn_list);
- }
- put_pid(umem_odp->tgid);
- kfree(umem_odp);
+ mutex_lock(&umem_odp->umem_mutex);
+ ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
+ ib_umem_end(umem_odp));
+ mutex_unlock(&umem_odp->umem_mutex);
+ mmu_interval_notifier_remove(&umem_odp->notifier);
+ if (ib_uses_virt_dma(dev))
+ kfree(umem_odp->map.pfn_list);
+ else
+ hmm_dma_map_free(dev->dma_device, &umem_odp->map);
}
-EXPORT_SYMBOL(ib_umem_odp_release);
-/*
- * Map for DMA and insert a single page into the on-demand paging page tables.
- *
- * @umem: the umem to insert the page to.
- * @dma_index: index in the umem to add the dma to.
- * @page: the page struct to map and add.
- * @access_mask: access permissions needed for this page.
- *
- * The function returns -EFAULT if the DMA mapping operation fails.
- *
- */
-static int ib_umem_odp_map_dma_single_page(
- struct ib_umem_odp *umem_odp,
- unsigned int dma_index,
- struct page *page,
- u64 access_mask)
+void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
{
- struct ib_device *dev = umem_odp->umem.ibdev;
- dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index];
-
- if (*dma_addr) {
- /*
- * If the page is already dma mapped it means it went through
- * a non-invalidating trasition, like read-only to writable.
- * Resync the flags.
- */
- *dma_addr = (*dma_addr & ODP_DMA_ADDR_MASK) | access_mask;
- return 0;
- }
+ if (!umem_odp->is_implicit_odp)
+ ib_umem_odp_free(umem_odp);
- *dma_addr = ib_dma_map_page(dev, page, 0, 1 << umem_odp->page_shift,
- DMA_BIDIRECTIONAL);
- if (ib_dma_mapping_error(dev, *dma_addr)) {
- *dma_addr = 0;
- return -EFAULT;
- }
- umem_odp->npages++;
- *dma_addr |= access_mask;
- return 0;
+ put_pid(umem_odp->tgid);
+ kfree(umem_odp);
}
+EXPORT_SYMBOL(ib_umem_odp_release);
/**
* ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it.
*
* Maps the range passed in the argument to DMA addresses.
- * The DMA addresses of the mapped pages is updated in umem_odp->dma_list.
* Upon success the ODP MR will be locked to let caller complete its device
* page table update.
*
@@ -357,9 +322,6 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
struct hmm_range range = {};
unsigned long timeout;
- if (access_mask == 0)
- return -EINVAL;
-
if (user_virt < ib_umem_start(umem_odp) ||
user_virt + bcnt > ib_umem_end(umem_odp))
return -EFAULT;
@@ -385,11 +347,11 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
if (fault) {
range.default_flags = HMM_PFN_REQ_FAULT;
- if (access_mask & ODP_WRITE_ALLOWED_BIT)
+ if (access_mask & HMM_PFN_WRITE)
range.default_flags |= HMM_PFN_REQ_WRITE;
}
- range.hmm_pfns = &(umem_odp->pfn_list[pfn_start_idx]);
+ range.hmm_pfns = &(umem_odp->map.pfn_list[pfn_start_idx]);
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
retry:
@@ -417,22 +379,17 @@ retry:
for (pfn_index = 0; pfn_index < num_pfns;
pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) {
- if (fault) {
- /*
- * Since we asked for hmm_range_fault() to populate
- * pages it shouldn't return an error entry on success.
- */
- WARN_ON(range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
- WARN_ON(!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
- } else {
- if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)) {
- WARN_ON(umem_odp->dma_list[dma_index]);
- continue;
- }
- access_mask = ODP_READ_ALLOWED_BIT;
- if (range.hmm_pfns[pfn_index] & HMM_PFN_WRITE)
- access_mask |= ODP_WRITE_ALLOWED_BIT;
- }
+ /*
+ * Since we asked for hmm_range_fault() to populate
+ * pages it shouldn't return an error entry on success.
+ */
+ WARN_ON(fault && range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
+ WARN_ON(fault && !(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
+ if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID))
+ continue;
+
+ if (range.hmm_pfns[pfn_index] & HMM_PFN_DMA_MAPPED)
+ continue;
hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]);
/* If a hugepage was detected and ODP wasn't set for, the umem
@@ -445,15 +402,6 @@ retry:
__func__, hmm_order, page_shift);
break;
}
-
- ret = ib_umem_odp_map_dma_single_page(
- umem_odp, dma_index, hmm_pfn_to_page(range.hmm_pfns[pfn_index]),
- access_mask);
- if (ret < 0) {
- ibdev_dbg(umem_odp->umem.ibdev,
- "ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
- break;
- }
}
/* upon success lock should stay on hold for the callee */
if (!ret)
@@ -473,45 +421,38 @@ EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock);
void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
u64 bound)
{
- dma_addr_t dma_addr;
- dma_addr_t dma;
- int idx;
- u64 addr;
struct ib_device *dev = umem_odp->umem.ibdev;
+ u64 addr;
lockdep_assert_held(&umem_odp->umem_mutex);
virt = max_t(u64, virt, ib_umem_start(umem_odp));
bound = min_t(u64, bound, ib_umem_end(umem_odp));
for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
- idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- dma = umem_odp->dma_list[idx];
-
- /* The access flags guaranteed a valid DMA address in case was NULL */
- if (dma) {
- unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
- struct page *page = hmm_pfn_to_page(umem_odp->pfn_list[pfn_idx]);
-
- dma_addr = dma & ODP_DMA_ADDR_MASK;
- ib_dma_unmap_page(dev, dma_addr,
- BIT(umem_odp->page_shift),
- DMA_BIDIRECTIONAL);
- if (dma & ODP_WRITE_ALLOWED_BIT) {
- struct page *head_page = compound_head(page);
- /*
- * set_page_dirty prefers being called with
- * the page lock. However, MMU notifiers are
- * called sometimes with and sometimes without
- * the lock. We rely on the umem_mutex instead
- * to prevent other mmu notifiers from
- * continuing and allowing the page mapping to
- * be removed.
- */
- set_page_dirty(head_page);
- }
- umem_odp->dma_list[idx] = 0;
- umem_odp->npages--;
+ u64 offset = addr - ib_umem_start(umem_odp);
+ size_t idx = offset >> umem_odp->page_shift;
+ unsigned long pfn = umem_odp->map.pfn_list[idx];
+
+ if (!hmm_dma_unmap_pfn(dev->dma_device, &umem_odp->map, idx))
+ goto clear;
+
+ if (pfn & HMM_PFN_WRITE) {
+ struct page *page = hmm_pfn_to_page(pfn);
+ struct page *head_page = compound_head(page);
+ /*
+ * set_page_dirty prefers being called with
+ * the page lock. However, MMU notifiers are
+ * called sometimes with and sometimes without
+ * the lock. We rely on the umem_mutex instead
+ * to prevent other mmu notifiers from
+ * continuing and allowing the page mapping to
+ * be removed.
+ */
+ set_page_dirty(head_page);
}
+ umem_odp->npages--;
+clear:
+ umem_odp->map.pfn_list[idx] &= ~HMM_PFN_FLAGS;
}
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 3c3bb670c805..bc9fe3ceca4d 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -193,7 +193,7 @@ _ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
fd, attrs);
if (IS_ERR(uobj))
- return (void *)uobj;
+ return ERR_CAST(uobj);
uverbs_uobject_get(uobj);
uobj_put_read(uobj);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index c5e78bbefbd0..75fde0fe9989 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -572,7 +572,7 @@ struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
GFP_KERNEL : GFP_ATOMIC);
if (IS_ERR(slave)) {
rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
- return (void *)slave;
+ return ERR_CAST(slave);
}
ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
rdma_lag_put_ah_roce_slave(slave);
diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.c b/drivers/infiniband/hw/bnxt_re/debugfs.c
index af91d16c3c77..e632f1661b92 100644
--- a/drivers/infiniband/hw/bnxt_re/debugfs.c
+++ b/drivers/infiniband/hw/bnxt_re/debugfs.c
@@ -170,6 +170,9 @@ static int map_cc_config_offset_gen0_ext0(u32 offset, struct bnxt_qplib_cc_param
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP:
*val = ccparam->tcp_cp;
break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP:
+ *val = ccparam->inact_th;
+ break;
default:
return -EINVAL;
}
@@ -203,7 +206,7 @@ static ssize_t bnxt_re_cc_config_get(struct file *filp, char __user *buffer,
return simple_read_from_buffer(buffer, usr_buf_len, ppos, (u8 *)(buf), rc);
}
-static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offset, u32 val)
+static int bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offset, u32 val)
{
u32 modify_mask;
@@ -247,7 +250,9 @@ static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offs
ccparam->tcp_cp = val;
break;
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TX_QUEUE:
+ return -EOPNOTSUPP;
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP:
+ ccparam->inact_th = val;
break;
case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TIME_PER_PHASE:
ccparam->time_pph = val;
@@ -258,17 +263,20 @@ static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offs
}
ccparam->mask = modify_mask;
+ return 0;
}
static int bnxt_re_configure_cc(struct bnxt_re_dev *rdev, u32 gen_ext, u32 offset, u32 val)
{
struct bnxt_qplib_cc_param ccparam = { };
+ int rc;
- /* Supporting only Gen 0 now */
- if (gen_ext == CC_CONFIG_GEN0_EXT0)
- bnxt_re_fill_gen0_ext0(&ccparam, offset, val);
- else
- return -EINVAL;
+ if (gen_ext != CC_CONFIG_GEN0_EXT0)
+ return -EOPNOTSUPP;
+
+ rc = bnxt_re_fill_gen0_ext0(&ccparam, offset, val);
+ if (rc)
+ return rc;
bnxt_qplib_modify_cc(&rdev->qplib_res, &ccparam);
return 0;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 457eecb99f96..be34c605d516 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1113,7 +1113,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
- if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
+ if (bnxt_ext_stats_supported(res->cctx, res->dattr->dev_cap_flags, res->is_vf))
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
req.qp_flags = cpu_to_le32(qp_flags);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index f231e886ad9d..9efd32a3dc55 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -846,7 +846,12 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
req.resp_addr = cpu_to_le64(sbuf.dma_addr);
- req.function_id = cpu_to_le32(fid);
+ if (bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx) && rcfw->res->is_vf)
+ req.function_id =
+ cpu_to_le32(CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID |
+ (fid << CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT));
+ else
+ req.function_id = cpu_to_le32(fid);
req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID);
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index e02721a9e288..b3b45c49077d 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -4327,7 +4327,7 @@ static DECLARE_WORK(skb_work, process_work);
static void ep_timeout(struct timer_list *t)
{
- struct c4iw_ep *ep = from_timer(ep, t, timer);
+ struct c4iw_ep *ep = timer_container_of(ep, t, timer);
int kickit = 0;
spin_lock(&timeout_lock);
diff --git a/drivers/infiniband/hw/hfi1/aspm.c b/drivers/infiniband/hw/hfi1/aspm.c
index 9b508eaf441d..79990d09522b 100644
--- a/drivers/infiniband/hw/hfi1/aspm.c
+++ b/drivers/infiniband/hw/hfi1/aspm.c
@@ -169,7 +169,7 @@ unlock:
/* Timer function for re-enabling ASPM in the absence of interrupt activity */
static void aspm_ctx_timer_function(struct timer_list *t)
{
- struct hfi1_ctxtdata *rcd = from_timer(rcd, t, aspm_timer);
+ struct hfi1_ctxtdata *rcd = timer_container_of(rcd, t, aspm_timer);
unsigned long flags;
spin_lock_irqsave(&rcd->aspm_lock, flags);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index e908f529335d..0781ab756d44 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -5548,7 +5548,7 @@ static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
#define RCVERR_CHECK_TIME 10
static void update_rcverr_timer(struct timer_list *t)
{
- struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
+ struct hfi1_devdata *dd = timer_container_of(dd, t, rcverr_timer);
struct hfi1_pportdata *ppd = dd->pport;
u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
@@ -12587,7 +12587,7 @@ static void do_update_synth_timer(struct work_struct *work)
static void update_synth_timer(struct timer_list *t)
{
- struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
+ struct hfi1_devdata *dd = timer_container_of(dd, t, synth_stats_timer);
queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 3da90f2eb8e7..06487e20f723 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1315,7 +1315,8 @@ void shutdown_led_override(struct hfi1_pportdata *ppd)
static void run_led_override(struct timer_list *t)
{
- struct hfi1_pportdata *ppd = from_timer(ppd, t, led_override_timer);
+ struct hfi1_pportdata *ppd = timer_container_of(ppd, t,
+ led_override_timer);
struct hfi1_devdata *dd = ppd->dd;
unsigned long timeout;
int phase_idx;
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index b39f63ce6dfc..961fa07116f0 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -369,7 +369,7 @@ static void send_trap(struct hfi1_ibport *ibp, struct trap_node *trap)
void hfi1_handle_trap_timer(struct timer_list *t)
{
- struct hfi1_ibport *ibp = from_timer(ibp, t, rvp.trap_timer);
+ struct hfi1_ibport *ibp = timer_container_of(ibp, t, rvp.trap_timer);
struct trap_node *trap = NULL;
unsigned long flags;
int i;
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index b6e3141253c4..d6dde762921a 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -124,7 +124,6 @@ struct opa_mad_notice_attr {
} __packed ntc_2048;
};
- u8 class_data[];
};
#define IB_VLARB_LOWPRI_0_31 1
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 5a91cbda4aee..764286da2ce8 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1361,16 +1361,6 @@ void sc_flush(struct send_context *sc)
sc_wait_for_packet_egress(sc, 1);
}
-/* drop all packets on the context, no waiting until they are sent */
-void sc_drop(struct send_context *sc)
-{
- if (!sc)
- return;
-
- dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
- __func__, sc->sw_index, sc->hw_context);
-}
-
/*
* Start the software reaction to a context halt or SPC freeze:
* - mark the context as halted or frozen
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index d07cc6ea7c63..ab0f9a3a8d12 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -246,7 +246,6 @@ void sc_disable(struct send_context *sc);
int sc_restart(struct send_context *sc);
void sc_return_credits(struct send_context *sc);
void sc_flush(struct send_context *sc);
-void sc_drop(struct send_context *sc);
void sc_stop(struct send_context *sc, int bit);
struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
pio_release_cb cb, void *arg);
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 0d2b39b7c8b5..719b7c34e238 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -467,7 +467,8 @@ static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
static void sdma_err_progress_check(struct timer_list *t)
{
unsigned index;
- struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer);
+ struct sdma_engine *sde = timer_container_of(sde, t,
+ err_progress_check_timer);
dd_dev_err(sde->dd, "SDE progress check event\n");
for (index = 0; index < sde->dd->num_sdma; index++) {
@@ -1521,24 +1522,6 @@ void sdma_all_running(struct hfi1_devdata *dd)
}
/**
- * sdma_all_idle() - called when the link goes down
- * @dd: hfi1_devdata
- *
- * This routine moves all engines to the idle state.
- */
-void sdma_all_idle(struct hfi1_devdata *dd)
-{
- struct sdma_engine *sde;
- unsigned int i;
-
- /* idle all engines */
- for (i = 0; i < dd->num_sdma; ++i) {
- sde = &dd->per_sdma[i];
- sdma_process_event(sde, sdma_event_e70_go_idle);
- }
-}
-
-/**
* sdma_start() - called to kick off state processing for all engines
* @dd: hfi1_devdata
*
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index d77246b48434..91dfd5d0c419 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -373,7 +373,6 @@ void sdma_start(struct hfi1_devdata *dd);
void sdma_exit(struct hfi1_devdata *dd);
void sdma_clean(struct hfi1_devdata *dd, size_t num_engines);
void sdma_all_running(struct hfi1_devdata *dd);
-void sdma_all_idle(struct hfi1_devdata *dd);
void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
void sdma_freeze(struct hfi1_devdata *dd);
void sdma_unfreeze(struct hfi1_devdata *dd);
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 78bf4a48c035..eafd2f157e32 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -3981,7 +3981,7 @@ void hfi1_del_tid_reap_timer(struct rvt_qp *qp)
static void hfi1_tid_timeout(struct timer_list *t)
{
- struct hfi1_qp_priv *qpriv = from_timer(qpriv, t, s_tid_timer);
+ struct hfi1_qp_priv *qpriv = timer_container_of(qpriv, t, s_tid_timer);
struct rvt_qp *qp = qpriv->owner;
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
unsigned long flags;
@@ -4797,7 +4797,8 @@ void hfi1_del_tid_retry_timer(struct rvt_qp *qp)
static void hfi1_tid_retry_timeout(struct timer_list *t)
{
- struct hfi1_qp_priv *priv = from_timer(priv, t, s_tid_retry_timer);
+ struct hfi1_qp_priv *priv = timer_container_of(priv, t,
+ s_tid_retry_timer);
struct rvt_qp *qp = priv->owner;
struct rvt_swqe *wqe;
unsigned long flags;
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index cf2d29098406..62b4f16dab27 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -53,7 +53,7 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
int ret = 0;
fd->entry_to_rb = kcalloc(uctxt->expected_count,
- sizeof(struct rb_node *),
+ sizeof(*fd->entry_to_rb),
GFP_KERNEL);
if (!fd->entry_to_rb)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 49e0f79b950c..3cbbfccdd8cd 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -554,7 +554,7 @@ void hfi1_16B_rcv(struct hfi1_packet *packet)
*/
static void mem_timer(struct timer_list *t)
{
- struct hfi1_ibdev *dev = from_timer(dev, t, mem_timer);
+ struct hfi1_ibdev *dev = timer_container_of(dev, t, mem_timer);
struct list_head *list = &dev->memwait;
struct rvt_qp *qp = NULL;
struct iowait *wait;
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index 7917af8e6380..baf592e6f21b 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -4,6 +4,7 @@
#
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ccflags-y += -I $(src)
hns-roce-hw-v2-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 4fc5b9d5fea8..307c35888b30 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -33,7 +33,6 @@
#include <linux/pci.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
-#include "hnae3.h"
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 560a1d9de408..1dcc9cbb4678 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -1027,6 +1027,26 @@ struct hns_roce_dev {
atomic64_t *dfx_cnt;
};
+enum hns_roce_trace_type {
+ TRACE_SQ,
+ TRACE_RQ,
+ TRACE_SRQ,
+};
+
+static inline const char *trace_type_to_str(enum hns_roce_trace_type type)
+{
+ switch (type) {
+ case TRACE_SQ:
+ return "SQ";
+ case TRACE_RQ:
+ return "RQ";
+ case TRACE_SRQ:
+ return "SRQ";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
{
return container_of(ib_dev, struct hns_roce_dev, ib_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 160e8927d364..fa8747656f25 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -43,13 +43,15 @@
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
-#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
+#define CREATE_TRACE_POINTS
+#include "hns_roce_trace.h"
+
enum {
CMD_RST_PRC_OTHERS,
CMD_RST_PRC_SUCCESS,
@@ -738,6 +740,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
else
ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
+ trace_hns_sq_wqe(qp->qpn, wqe_idx, wqe, 1 << qp->sq.wqe_shift,
+ wr->wr_id, TRACE_SQ);
if (unlikely(ret)) {
*bad_wr = wr;
goto out;
@@ -807,6 +811,9 @@ static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
+
+ trace_hns_rq_wqe(hr_qp->qpn, wqe_idx, wqe, 1 << hr_qp->rq.wqe_shift,
+ wr->wr_id, TRACE_RQ);
}
static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
@@ -943,7 +950,7 @@ static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
static void update_srq_db(struct hns_roce_srq *srq)
{
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
- struct hns_roce_v2_db db;
+ struct hns_roce_v2_db db = {};
hr_reg_write(&db, DB_TAG, srq->srqn);
hr_reg_write(&db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
@@ -984,6 +991,9 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
fill_wqe_idx(srq, wqe_idx);
srq->wrid[wqe_idx] = wr->wr_id;
+
+ trace_hns_srq_wqe(srq->srqn, wqe_idx, wqe, 1 << srq->wqe_shift,
+ wr->wr_id, TRACE_SRQ);
}
if (likely(nreq)) {
@@ -1311,6 +1321,8 @@ static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
tail = csq->head;
for (i = 0; i < num; i++) {
+ trace_hns_cmdq_req(hr_dev, &desc[i]);
+
csq->desc[csq->head++] = desc[i];
if (csq->head == csq->desc_num)
csq->head = 0;
@@ -1325,6 +1337,8 @@ static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
if (hns_roce_cmq_csq_done(hr_dev)) {
ret = 0;
for (i = 0; i < num; i++) {
+ trace_hns_cmdq_resp(hr_dev, &csq->desc[tail]);
+
/* check the result of hardware write back */
desc_ret = le16_to_cpu(csq->desc[tail++].retval);
if (tail == csq->desc_num)
@@ -4302,8 +4316,7 @@ static inline int get_pdn(struct ib_pd *ib_pd)
}
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
- struct hns_roce_v2_qp_context *context,
- struct hns_roce_v2_qp_context *qpc_mask)
+ struct hns_roce_v2_qp_context *context)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
@@ -5122,7 +5135,7 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
- modify_qp_reset_to_init(ibqp, context, qpc_mask);
+ modify_qp_reset_to_init(ibqp, context);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
@@ -5313,6 +5326,7 @@ static void v2_set_flushed_fields(struct ib_qp *ibqp,
return;
spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
+ trace_hns_sq_flush_cqe(hr_qp->qpn, hr_qp->sq.head, TRACE_SQ);
hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head);
hr_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX);
hr_qp->state = IB_QPS_ERR;
@@ -5322,6 +5336,7 @@ static void v2_set_flushed_fields(struct ib_qp *ibqp,
return;
spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
+ trace_hns_rq_flush_cqe(hr_qp->qpn, hr_qp->rq.head, TRACE_RQ);
hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head);
hr_reg_clear(qpc_mask, QPC_RQ_PRODUCER_IDX);
spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
@@ -6248,6 +6263,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
eq->sub_type = sub_type;
++eq->cons_index;
aeqe_found = IRQ_HANDLED;
+ trace_hns_ae_info(event_type, aeqe, eq->eqe_size);
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_AEQE_CNT]);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 91a5665465ff..bc7466830eaf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -34,6 +34,7 @@
#define _HNS_ROCE_HW_V2_H
#include <linux/bitops.h>
+#include "hnae3.h"
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 8d0b63d4b50a..e7a497cc125c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -37,7 +37,6 @@
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_cache.h>
-#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 09da3496843b..93a48b41955b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -38,6 +38,7 @@
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
+#include "hns_roce_trace.h"
static u32 hw_index_to_key(int ind)
{
@@ -159,6 +160,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
+ trace_hns_mr(mr);
if (mr->type != MR_TYPE_FRMR)
ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
else
@@ -1146,6 +1148,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
+ trace_hns_buf_attr(buf_attr);
/* The caller has its own buffer list and invokes the hns_roce_mtr_map()
* to finish the MTT configuration.
*/
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index 356d98816949..f637b73b946e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -4,7 +4,6 @@
#include <rdma/rdma_cm.h>
#include <rdma/restrack.h>
#include <uapi/rdma/rdma_netlink.h>
-#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_trace.h b/drivers/infiniband/hw/hns/hns_roce_trace.h
new file mode 100644
index 000000000000..59ceb591b3a1
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_trace.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2025 Hisilicon Limited.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns_roce
+
+#if !defined(__HNS_ROCE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HNS_ROCE_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <linux/string_choices.h>
+#include "hns_roce_device.h"
+#include "hns_roce_hw_v2.h"
+
+DECLARE_EVENT_CLASS(flush_head_template,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type),
+
+ TP_STRUCT__entry(__field(unsigned long, qpn)
+ __field(u32, pi)
+ __field(enum hns_roce_trace_type, type)
+ ),
+
+ TP_fast_assign(__entry->qpn = qpn;
+ __entry->pi = pi;
+ __entry->type = type;
+ ),
+
+ TP_printk("%s 0x%lx flush head 0x%x.",
+ trace_type_to_str(__entry->type),
+ __entry->qpn, __entry->pi)
+);
+
+DEFINE_EVENT(flush_head_template, hns_sq_flush_cqe,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type));
+DEFINE_EVENT(flush_head_template, hns_rq_flush_cqe,
+ TP_PROTO(unsigned long qpn, u32 pi,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, pi, type));
+
+#define MAX_SGE_PER_WQE 64
+#define MAX_WQE_SIZE (MAX_SGE_PER_WQE * HNS_ROCE_SGE_SIZE)
+DECLARE_EVENT_CLASS(wqe_template,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len,
+ u64 id, enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type),
+
+ TP_STRUCT__entry(__field(unsigned long, qpn)
+ __field(u32, idx)
+ __array(u32, wqe,
+ MAX_WQE_SIZE / sizeof(__le32))
+ __field(u32, len)
+ __field(u64, id)
+ __field(enum hns_roce_trace_type, type)
+ ),
+
+ TP_fast_assign(__entry->qpn = qpn;
+ __entry->idx = idx;
+ __entry->id = id;
+ __entry->len = len / sizeof(__le32);
+ __entry->type = type;
+ for (int i = 0; i < __entry->len; i++)
+ __entry->wqe[i] = le32_to_cpu(((__le32 *)wqe)[i]);
+ ),
+
+ TP_printk("%s 0x%lx wqe(0x%x/0x%llx): %s",
+ trace_type_to_str(__entry->type),
+ __entry->qpn, __entry->idx, __entry->id,
+ __print_array(__entry->wqe, __entry->len,
+ sizeof(__le32)))
+);
+
+DEFINE_EVENT(wqe_template, hns_sq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+DEFINE_EVENT(wqe_template, hns_rq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+DEFINE_EVENT(wqe_template, hns_srq_wqe,
+ TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id,
+ enum hns_roce_trace_type type),
+ TP_ARGS(qpn, idx, wqe, len, id, type));
+
+TRACE_EVENT(hns_ae_info,
+ TP_PROTO(int event_type, void *aeqe, unsigned int len),
+ TP_ARGS(event_type, aeqe, len),
+
+ TP_STRUCT__entry(__field(int, event_type)
+ __array(u32, aeqe,
+ HNS_ROCE_V3_EQE_SIZE / sizeof(__le32))
+ __field(u32, len)
+ ),
+
+ TP_fast_assign(__entry->event_type = event_type;
+ __entry->len = len / sizeof(__le32);
+ for (int i = 0; i < __entry->len; i++)
+ __entry->aeqe[i] = le32_to_cpu(((__le32 *)aeqe)[i]);
+ ),
+
+ TP_printk("event %2d aeqe: %s", __entry->event_type,
+ __print_array(__entry->aeqe, __entry->len, sizeof(__le32)))
+);
+
+TRACE_EVENT(hns_mr,
+ TP_PROTO(struct hns_roce_mr *mr),
+ TP_ARGS(mr),
+
+ TP_STRUCT__entry(__field(u64, iova)
+ __field(u64, size)
+ __field(u32, key)
+ __field(u32, pd)
+ __field(u32, pbl_hop_num)
+ __field(u32, npages)
+ __field(int, type)
+ __field(int, enabled)
+ ),
+
+ TP_fast_assign(__entry->iova = mr->iova;
+ __entry->size = mr->size;
+ __entry->key = mr->key;
+ __entry->pd = mr->pd;
+ __entry->pbl_hop_num = mr->pbl_hop_num;
+ __entry->npages = mr->npages;
+ __entry->type = mr->type;
+ __entry->enabled = mr->enabled;
+ ),
+
+ TP_printk("iova:0x%llx, size:%llu, key:%u, pd:%u, pbl_hop:%u, npages:%u, type:%d, status:%d",
+ __entry->iova, __entry->size, __entry->key,
+ __entry->pd, __entry->pbl_hop_num, __entry->npages,
+ __entry->type, __entry->enabled)
+);
+
+TRACE_EVENT(hns_buf_attr,
+ TP_PROTO(struct hns_roce_buf_attr *attr),
+ TP_ARGS(attr),
+
+ TP_STRUCT__entry(__field(unsigned int, region_count)
+ __field(unsigned int, region0_size)
+ __field(int, region0_hopnum)
+ __field(unsigned int, region1_size)
+ __field(int, region1_hopnum)
+ __field(unsigned int, region2_size)
+ __field(int, region2_hopnum)
+ __field(unsigned int, page_shift)
+ __field(bool, mtt_only)
+ ),
+
+ TP_fast_assign(__entry->region_count = attr->region_count;
+ __entry->region0_size = attr->region[0].size;
+ __entry->region0_hopnum = attr->region[0].hopnum;
+ __entry->region1_size = attr->region[1].size;
+ __entry->region1_hopnum = attr->region[1].hopnum;
+ __entry->region2_size = attr->region[2].size;
+ __entry->region2_hopnum = attr->region[2].hopnum;
+ __entry->page_shift = attr->page_shift;
+ __entry->mtt_only = attr->mtt_only;
+ ),
+
+ TP_printk("rg cnt:%u, pg_sft:0x%x, mtt_only:%s, rg 0 (sz:%u, hop:%u), rg 1 (sz:%u, hop:%u), rg 2 (sz:%u, hop:%u)\n",
+ __entry->region_count, __entry->page_shift,
+ str_yes_no(__entry->mtt_only),
+ __entry->region0_size, __entry->region0_hopnum,
+ __entry->region1_size, __entry->region1_hopnum,
+ __entry->region2_size, __entry->region2_hopnum)
+);
+
+DECLARE_EVENT_CLASS(cmdq,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc),
+
+ TP_STRUCT__entry(__string(dev_name, dev_name(hr_dev->dev))
+ __field(u16, opcode)
+ __field(u16, flag)
+ __field(u16, retval)
+ __array(u32, data, 6)
+ ),
+
+ TP_fast_assign(__assign_str(dev_name);
+ __entry->opcode = le16_to_cpu(desc->opcode);
+ __entry->flag = le16_to_cpu(desc->flag);
+ __entry->retval = le16_to_cpu(desc->retval);
+ for (int i = 0; i < 6; i++)
+ __entry->data[i] = le32_to_cpu(desc->data[i]);
+ ),
+
+ TP_printk("%s cmdq opcode:0x%x, flag:0x%x, retval:0x%x, data:%s\n",
+ __get_str(dev_name), __entry->opcode,
+ __entry->flag, __entry->retval,
+ __print_array(__entry->data, 6, sizeof(__le32)))
+);
+
+DEFINE_EVENT(cmdq, hns_cmdq_req,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc));
+DEFINE_EVENT(cmdq, hns_cmdq_resp,
+ TP_PROTO(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc),
+ TP_ARGS(hr_dev, desc));
+
+#endif /* __HNS_ROCE_TRACE_H */
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hns_roce_trace
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 23207f13ac1b..c6a0a661d6e7 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -1263,7 +1263,8 @@ static void irdma_cm_timer_tick(struct timer_list *t)
struct irdma_timer_entry *send_entry, *close_entry;
struct list_head *list_core_temp;
struct list_head *list_node;
- struct irdma_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
+ struct irdma_cm_core *cm_core = timer_container_of(cm_core, t,
+ tcp_timer);
struct irdma_sc_vsi *vsi;
u32 settimer = 0;
unsigned long timetosend;
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 6aed6169c07d..99a7f1a6c0b5 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -3131,7 +3131,7 @@ int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
ibdev_dbg(to_ibdev(cqp->dev),
- "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%pK] cqp[%p] polarity[x%04x]\n",
+ "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%p] cqp[%p] polarity[x%04x]\n",
cqp->sq_size, cqp->hw_sq_size, cqp->sq_base,
(u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity);
return 0;
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
index 1ee8969595d3..1e840bbd619d 100644
--- a/drivers/infiniband/hw/irdma/main.c
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -1,10 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "main.h"
-#include "../../../net/ethernet/intel/ice/ice.h"
MODULE_ALIAS("i40iw");
-MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Protocol Driver for RDMA");
MODULE_LICENSE("Dual BSD/GPL");
@@ -61,7 +59,7 @@ static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
}
static void irdma_fill_qos_info(struct irdma_l2params *l2params,
- struct iidc_qos_params *qos_info)
+ struct iidc_rdma_qos_params *qos_info)
{
int i;
@@ -85,12 +83,13 @@ static void irdma_fill_qos_info(struct irdma_l2params *l2params,
}
}
-static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event)
+static void irdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
+ struct iidc_rdma_event *event)
{
- struct irdma_device *iwdev = dev_get_drvdata(&pf->adev->dev);
+ struct irdma_device *iwdev = dev_get_drvdata(&cdev_info->adev->dev);
struct irdma_l2params l2params = {};
- if (*event->type & BIT(IIDC_EVENT_AFTER_MTU_CHANGE)) {
+ if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE)) {
ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu);
if (iwdev->vsi.mtu != iwdev->netdev->mtu) {
l2params.mtu = iwdev->netdev->mtu;
@@ -98,25 +97,26 @@ static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event
irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
irdma_change_l2params(&iwdev->vsi, &l2params);
}
- } else if (*event->type & BIT(IIDC_EVENT_BEFORE_TC_CHANGE)) {
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE)) {
if (iwdev->vsi.tc_change_pending)
return;
irdma_prep_tc_change(iwdev);
- } else if (*event->type & BIT(IIDC_EVENT_AFTER_TC_CHANGE)) {
- struct iidc_qos_params qos_info = {};
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_TC_CHANGE)) {
+ struct iidc_rdma_priv_dev_info *iidc_priv = cdev_info->iidc_priv;
if (!iwdev->vsi.tc_change_pending)
return;
l2params.tc_changed = true;
ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
- ice_get_qos_params(pf, &qos_info);
- irdma_fill_qos_info(&l2params, &qos_info);
+
+ irdma_fill_qos_info(&l2params, &iidc_priv->qos_info);
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
- iwdev->dcb_vlan_mode = qos_info.num_tc > 1 && !l2params.dscp_mode;
+ iwdev->dcb_vlan_mode =
+ l2params.num_tc > 1 && !l2params.dscp_mode;
irdma_change_l2params(&iwdev->vsi, &l2params);
- } else if (*event->type & BIT(IIDC_EVENT_CRIT_ERR)) {
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_CRIT_ERR)) {
ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
event->reg);
if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
@@ -151,10 +151,8 @@ static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event
*/
static void irdma_request_reset(struct irdma_pci_f *rf)
{
- struct ice_pf *pf = rf->cdev;
-
ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
- ice_rdma_request_reset(pf, IIDC_PFR);
+ ice_rdma_request_reset(rf->cdev, IIDC_FUNC_RESET);
}
/**
@@ -166,14 +164,15 @@ static int irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node)
{
struct irdma_device *iwdev = vsi->back_vsi;
- struct ice_pf *pf = iwdev->rf->cdev;
+ struct iidc_rdma_core_dev_info *cdev_info;
struct iidc_rdma_qset_params qset = {};
int ret;
+ cdev_info = iwdev->rf->cdev;
qset.qs_handle = tc_node->qs_handle;
qset.tc = tc_node->traffic_class;
qset.vport_id = vsi->vsi_idx;
- ret = ice_add_rdma_qset(pf, &qset);
+ ret = ice_add_rdma_qset(cdev_info, &qset);
if (ret) {
ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
return ret;
@@ -194,19 +193,20 @@ static void irdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node)
{
struct irdma_device *iwdev = vsi->back_vsi;
- struct ice_pf *pf = iwdev->rf->cdev;
+ struct iidc_rdma_core_dev_info *cdev_info;
struct iidc_rdma_qset_params qset = {};
+ cdev_info = iwdev->rf->cdev;
qset.qs_handle = tc_node->qs_handle;
qset.tc = tc_node->traffic_class;
qset.vport_id = vsi->vsi_idx;
qset.teid = tc_node->l2_sched_node_id;
- if (ice_del_rdma_qset(pf, &qset))
+ if (ice_del_rdma_qset(cdev_info, &qset))
ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
}
-static int irdma_init_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf)
+static int irdma_init_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
{
int i;
@@ -217,12 +217,12 @@ static int irdma_init_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf)
return -ENOMEM;
for (i = 0; i < rf->msix_count; i++)
- if (ice_alloc_rdma_qvector(pf, &rf->msix_entries[i]))
+ if (ice_alloc_rdma_qvector(cdev, &rf->msix_entries[i]))
break;
if (i < IRDMA_MIN_MSIX) {
- for (; i > 0; i--)
- ice_free_rdma_qvector(pf, &rf->msix_entries[i]);
+ while (--i >= 0)
+ ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
kfree(rf->msix_entries);
return -ENOMEM;
@@ -233,54 +233,65 @@ static int irdma_init_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf)
return 0;
}
-static void irdma_deinit_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf)
+static void irdma_deinit_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
{
int i;
for (i = 0; i < rf->msix_count; i++)
- ice_free_rdma_qvector(pf, &rf->msix_entries[i]);
+ ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
kfree(rf->msix_entries);
}
static void irdma_remove(struct auxiliary_device *aux_dev)
{
- struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
- struct iidc_auxiliary_dev,
- adev);
- struct ice_pf *pf = iidc_adev->pf;
struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
+ struct iidc_rdma_core_auxiliary_dev *iidc_adev;
+ struct iidc_rdma_core_dev_info *cdev_info;
+
+ iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ cdev_info = iidc_adev->cdev_info;
+ ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, false);
irdma_ib_unregister_device(iwdev);
- ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false);
- irdma_deinit_interrupts(iwdev->rf, pf);
+ irdma_deinit_interrupts(iwdev->rf, cdev_info);
- pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn));
+ kfree(iwdev->rf);
+
+ pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(cdev_info->pdev->devfn));
}
-static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf,
- struct ice_vsi *vsi)
+static void irdma_fill_device_info(struct irdma_device *iwdev,
+ struct iidc_rdma_core_dev_info *cdev_info)
{
+ struct iidc_rdma_priv_dev_info *iidc_priv = cdev_info->iidc_priv;
struct irdma_pci_f *rf = iwdev->rf;
- rf->cdev = pf;
+ rf->sc_dev.hw = &rf->hw;
+ rf->iwdev = iwdev;
+ rf->cdev = cdev_info;
+ rf->hw.hw_addr = iidc_priv->hw_addr;
+ rf->pcidev = cdev_info->pdev;
+ rf->hw.device = &rf->pcidev->dev;
+ rf->pf_id = iidc_priv->pf_id;
rf->gen_ops.register_qset = irdma_lan_register_qset;
rf->gen_ops.unregister_qset = irdma_lan_unregister_qset;
- rf->hw.hw_addr = pf->hw.hw_addr;
- rf->pcidev = pf->pdev;
- rf->pf_id = pf->hw.pf_id;
- rf->default_vsi.vsi_idx = vsi->vsi_num;
- rf->protocol_used = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ?
- IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
+
+ rf->default_vsi.vsi_idx = iidc_priv->vport_id;
+ rf->protocol_used =
+ cdev_info->rdma_protocol == IIDC_RDMA_PROTOCOL_ROCEV2 ?
+ IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
rf->rdma_ver = IRDMA_GEN_2;
rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
rf->gen_ops.request_reset = irdma_request_reset;
rf->limits_sel = 7;
rf->iwdev = iwdev;
+
mutex_init(&iwdev->ah_tbl_lock);
- iwdev->netdev = vsi->netdev;
- iwdev->vsi_num = vsi->vsi_num;
+
+ iwdev->netdev = iidc_priv->netdev;
+ iwdev->vsi_num = iidc_priv->vport_id;
iwdev->init_state = INITIAL_STATE;
iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
@@ -292,19 +303,18 @@ static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf
static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
{
- struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
- struct iidc_auxiliary_dev,
- adev);
- struct ice_pf *pf = iidc_adev->pf;
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
- struct iidc_qos_params qos_info = {};
+ struct iidc_rdma_core_auxiliary_dev *iidc_adev;
+ struct iidc_rdma_core_dev_info *cdev_info;
+ struct iidc_rdma_priv_dev_info *iidc_priv;
+ struct irdma_l2params l2params = {};
struct irdma_device *iwdev;
struct irdma_pci_f *rf;
- struct irdma_l2params l2params = {};
int err;
- if (!vsi)
- return -EIO;
+ iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ cdev_info = iidc_adev->cdev_info;
+ iidc_priv = cdev_info->iidc_priv;
+
iwdev = ib_alloc_device(irdma_device, ibdev);
if (!iwdev)
return -ENOMEM;
@@ -314,10 +324,10 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
return -ENOMEM;
}
- irdma_fill_device_info(iwdev, pf, vsi);
+ irdma_fill_device_info(iwdev, cdev_info);
rf = iwdev->rf;
- err = irdma_init_interrupts(rf, pf);
+ err = irdma_init_interrupts(rf, cdev_info);
if (err)
goto err_init_interrupts;
@@ -326,8 +336,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
goto err_ctrl_init;
l2params.mtu = iwdev->netdev->mtu;
- ice_get_qos_params(pf, &qos_info);
- irdma_fill_qos_info(&l2params, &qos_info);
+ irdma_fill_qos_info(&l2params, &iidc_priv->qos_info);
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
@@ -339,7 +348,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
if (err)
goto err_ibreg;
- ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, true);
+ ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, true);
ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
auxiliary_set_drvdata(aux_dev, iwdev);
@@ -351,7 +360,7 @@ err_ibreg:
err_rt_init:
irdma_ctrl_deinit_hw(rf);
err_ctrl_init:
- irdma_deinit_interrupts(rf, pf);
+ irdma_deinit_interrupts(rf, cdev_info);
err_init_interrupts:
kfree(iwdev->rf);
ib_dealloc_device(&iwdev->ibdev);
@@ -367,7 +376,7 @@ static const struct auxiliary_device_id irdma_auxiliary_id_table[] = {
MODULE_DEVICE_TABLE(auxiliary, irdma_auxiliary_id_table);
-static struct iidc_auxiliary_drv irdma_auxiliary_drv = {
+static struct iidc_rdma_core_auxiliary_drv irdma_auxiliary_drv = {
.adrv = {
.id_table = irdma_auxiliary_id_table,
.probe = irdma_probe,
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index bb0b6494ccb2..674acc952168 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -29,7 +29,8 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#endif
#include <linux/auxiliary_bus.h>
-#include <linux/net/intel/iidc.h>
+#include <linux/net/intel/iidc_rdma.h>
+#include <linux/net/intel/iidc_rdma_ice.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
diff --git a/drivers/infiniband/hw/irdma/osdep.h b/drivers/infiniband/hw/irdma/osdep.h
index 4b4f78288d12..3f73ceacccb6 100644
--- a/drivers/infiniband/hw/irdma/osdep.h
+++ b/drivers/infiniband/hw/irdma/osdep.h
@@ -5,8 +5,8 @@
#include <linux/pci.h>
#include <linux/bitfield.h>
-#include <linux/net/intel/iidc.h>
#include <rdma/ib_verbs.h>
+#include <net/dscp.h>
#define STATS_TIMER_DELAY 60000
diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
index e7ce6840755f..37ce35cb10e7 100644
--- a/drivers/infiniband/hw/irdma/pble.c
+++ b/drivers/infiniband/hw/irdma/pble.c
@@ -108,7 +108,7 @@ static int add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
chunk->vaddr = sd_entry->u.bp.addr.va + offset;
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
ibdev_dbg(to_ibdev(dev),
- "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%pK fpm_addr = %llx\n",
+ "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%p fpm_addr = %llx\n",
chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
return 0;
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index 59b34afa867b..527c6da2c1ac 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -567,7 +567,7 @@ struct irdma_sc_vsi {
u8 qos_rel_bw;
u8 qos_prio_type;
u8 stats_idx;
- u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
+ u8 dscp_map[DSCP_MAX];
struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
bool dscp_mode:1;
@@ -695,7 +695,7 @@ struct irdma_l2params {
u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
u16 mtu;
u8 up2tc[IRDMA_MAX_USER_PRIORITY];
- u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
+ u8 dscp_map[DSCP_MAX];
u8 num_tc;
u8 vsi_rel_bw;
u8 vsi_prio_type;
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index d66b4f7a84ec..b510ef747399 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -930,7 +930,7 @@ void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred)
static void irdma_terminate_timeout(struct timer_list *t)
{
- struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer);
+ struct irdma_qp *iwqp = timer_container_of(iwqp, t, terminate_timer);
struct irdma_sc_qp *qp = &iwqp->sc_qp;
irdma_terminate_done(qp, 1);
@@ -1537,7 +1537,7 @@ int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
static void irdma_hw_stats_timeout(struct timer_list *t)
{
struct irdma_vsi_pestat *pf_devstat =
- from_timer(pf_devstat, t, stats_timer);
+ timer_container_of(pf_devstat, t, stats_timer);
struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index eeb932e58730..1e8c92826de2 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -4871,5 +4871,4 @@ void irdma_ib_dealloc_device(struct ib_device *ibdev)
irdma_rt_deinit_hw(iwdev);
irdma_ctrl_deinit_hw(iwdev->rf);
- kfree(iwdev->rf);
}
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 0fc4e2679218..28e154bbb50f 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -15,14 +15,12 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_device *ibdev = ibcq->device;
struct mana_ib_create_cq ucmd = {};
struct mana_ib_dev *mdev;
- struct gdma_context *gc;
bool is_rnic_cq;
u32 doorbell;
u32 buf_size;
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev_to_gc(mdev);
cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
cq->cq_handle = INVALID_MANA_HANDLE;
@@ -65,7 +63,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
ibdev_dbg(ibdev, "Failed to create kernel queue for create cq, %d\n", err);
return err;
}
- doorbell = gc->mana_ib.doorbell;
+ doorbell = mdev->gdma_dev->doorbell;
}
if (is_rnic_cq) {
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index b31089320aa5..165c0a1e67d1 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -101,103 +101,95 @@ static int mana_ib_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct mana_adev *madev = container_of(adev, struct mana_adev, adev);
+ struct gdma_context *gc = madev->mdev->gdma_context;
+ struct mana_context *mc = gc->mana.driver_data;
struct gdma_dev *mdev = madev->mdev;
struct net_device *ndev;
- struct mana_context *mc;
struct mana_ib_dev *dev;
u8 mac_addr[ETH_ALEN];
int ret;
- mc = mdev->driver_data;
-
dev = ib_alloc_device(mana_ib_dev, ib_dev);
if (!dev)
return -ENOMEM;
ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_ops);
-
- dev->ib_dev.phys_port_cnt = mc->num_ports;
-
- ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
- mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
-
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
-
- /*
- * num_comp_vectors needs to set to the max MSIX index
- * when interrupts and event queues are implemented
- */
- dev->ib_dev.num_comp_vectors = mdev->gdma_context->max_num_queues;
- dev->ib_dev.dev.parent = mdev->gdma_context->dev;
-
- ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
- if (!ndev) {
- ret = -ENODEV;
- ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
- goto free_ib_device;
- }
- ether_addr_copy(mac_addr, ndev->dev_addr);
- addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
- ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
- /* mana_get_primary_netdev() returns ndev with refcount held */
- netdev_put(ndev, &dev->dev_tracker);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
- goto free_ib_device;
- }
-
- ret = mana_gd_register_device(&mdev->gdma_context->mana_ib);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to register device, ret %d",
- ret);
- goto free_ib_device;
- }
- dev->gdma_dev = &mdev->gdma_context->mana_ib;
-
- dev->nb.notifier_call = mana_ib_netdev_event;
- ret = register_netdevice_notifier(&dev->nb);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
- ret);
- goto deregister_device;
- }
-
- ret = mana_ib_gd_query_adapter_caps(dev);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d",
- ret);
- goto deregister_net_notifier;
- }
-
- ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
-
- ret = mana_ib_create_eqs(dev);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
- goto deregister_net_notifier;
- }
-
- ret = mana_ib_gd_create_rnic_adapter(dev);
- if (ret)
- goto destroy_eqs;
-
+ dev->ib_dev.num_comp_vectors = gc->max_num_queues;
+ dev->ib_dev.dev.parent = gc->dev;
+ dev->gdma_dev = mdev;
xa_init_flags(&dev->qp_table_wq, XA_FLAGS_LOCK_IRQ);
- ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d",
- ret);
- goto destroy_rnic;
+
+ if (mana_ib_is_rnic(dev)) {
+ dev->ib_dev.phys_port_cnt = 1;
+ ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
+ if (!ndev) {
+ ret = -ENODEV;
+ ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
+ goto free_ib_device;
+ }
+ ether_addr_copy(mac_addr, ndev->dev_addr);
+ addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
+ ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
+ /* mana_get_primary_netdev() returns ndev with refcount held */
+ netdev_put(ndev, &dev->dev_tracker);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
+ goto free_ib_device;
+ }
+
+ dev->nb.notifier_call = mana_ib_netdev_event;
+ ret = register_netdevice_notifier(&dev->nb);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
+ ret);
+ goto free_ib_device;
+ }
+
+ ret = mana_ib_gd_query_adapter_caps(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d", ret);
+ goto deregister_net_notifier;
+ }
+
+ ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
+
+ ret = mana_ib_create_eqs(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
+ goto deregister_net_notifier;
+ }
+
+ ret = mana_ib_gd_create_rnic_adapter(dev);
+ if (ret)
+ goto destroy_eqs;
+
+ ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", ret);
+ goto destroy_rnic;
+ }
+ } else {
+ dev->ib_dev.phys_port_cnt = mc->num_ports;
+ ret = mana_eth_query_adapter_caps(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to query ETH device caps, ret %d", ret);
+ goto free_ib_device;
+ }
}
- dev->av_pool = dma_pool_create("mana_ib_av", mdev->gdma_context->dev,
- MANA_AV_BUFFER_SIZE, MANA_AV_BUFFER_SIZE, 0);
+ dev->av_pool = dma_pool_create("mana_ib_av", gc->dev, MANA_AV_BUFFER_SIZE,
+ MANA_AV_BUFFER_SIZE, 0);
if (!dev->av_pool) {
ret = -ENOMEM;
goto destroy_rnic;
}
- ret = ib_register_device(&dev->ib_dev, "mana_%d",
- mdev->gdma_context->dev);
+ ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
+ mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
+
+ ret = ib_register_device(&dev->ib_dev, mana_ib_is_rnic(dev) ? "mana_%d" : "manae_%d",
+ gc->dev);
if (ret)
goto deallocate_pool;
@@ -208,15 +200,16 @@ static int mana_ib_probe(struct auxiliary_device *adev,
deallocate_pool:
dma_pool_destroy(dev->av_pool);
destroy_rnic:
- xa_destroy(&dev->qp_table_wq);
- mana_ib_gd_destroy_rnic_adapter(dev);
+ if (mana_ib_is_rnic(dev))
+ mana_ib_gd_destroy_rnic_adapter(dev);
destroy_eqs:
- mana_ib_destroy_eqs(dev);
+ if (mana_ib_is_rnic(dev))
+ mana_ib_destroy_eqs(dev);
deregister_net_notifier:
- unregister_netdevice_notifier(&dev->nb);
-deregister_device:
- mana_gd_deregister_device(dev->gdma_dev);
+ if (mana_ib_is_rnic(dev))
+ unregister_netdevice_notifier(&dev->nb);
free_ib_device:
+ xa_destroy(&dev->qp_table_wq);
ib_dealloc_device(&dev->ib_dev);
return ret;
}
@@ -227,25 +220,24 @@ static void mana_ib_remove(struct auxiliary_device *adev)
ib_unregister_device(&dev->ib_dev);
dma_pool_destroy(dev->av_pool);
+ if (mana_ib_is_rnic(dev)) {
+ mana_ib_gd_destroy_rnic_adapter(dev);
+ mana_ib_destroy_eqs(dev);
+ unregister_netdevice_notifier(&dev->nb);
+ }
xa_destroy(&dev->qp_table_wq);
- mana_ib_gd_destroy_rnic_adapter(dev);
- mana_ib_destroy_eqs(dev);
- unregister_netdevice_notifier(&dev->nb);
- mana_gd_deregister_device(dev->gdma_dev);
ib_dealloc_device(&dev->ib_dev);
}
static const struct auxiliary_device_id mana_id_table[] = {
- {
- .name = "mana.rdma",
- },
+ { .name = "mana.rdma", },
+ { .name = "mana.eth", },
{},
};
MODULE_DEVICE_TABLE(auxiliary, mana_id_table);
static struct auxiliary_driver mana_driver = {
- .name = "rdma",
.probe = mana_ib_probe,
.remove = mana_ib_remove,
.id_table = mana_id_table,
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index eda9c5b971de..41a24a186f9d 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -4,6 +4,7 @@
*/
#include "mana_ib.h"
+#include "linux/pci.h"
void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
u32 port)
@@ -243,7 +244,6 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
struct mana_ib_queue *queue)
{
- struct gdma_context *gc = mdev_to_gc(mdev);
struct gdma_queue_spec spec = {};
int err;
@@ -252,7 +252,7 @@ int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_qu
spec.type = type;
spec.monitor_avl_buf = false;
spec.queue_size = size;
- err = mana_gd_create_mana_wq_cq(&gc->mana_ib, &spec, &queue->kmem);
+ err = mana_gd_create_mana_wq_cq(mdev->gdma_dev, &spec, &queue->kmem);
if (err)
return err;
/* take ownership into mana_ib from mana */
@@ -479,7 +479,7 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
{
unsigned long page_sz;
- page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
+ page_sz = ib_umem_find_best_pgsz(umem, dev->adapter_caps.page_size_cap, virt);
if (!page_sz) {
ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
return -EINVAL;
@@ -494,7 +494,7 @@ int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_ume
unsigned long page_sz;
/* Hardware requires dma region to align to chosen page size */
- page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
+ page_sz = ib_umem_find_best_pgoff(umem, dev->adapter_caps.page_size_cap, 0);
if (!page_sz) {
ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
return -EINVAL;
@@ -551,6 +551,7 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
struct ib_port_attr attr;
int err;
@@ -560,10 +561,12 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
- if (port_num == 1) {
- immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+
+ if (mana_ib_is_rnic(dev)) {
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ } else {
+ immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
}
return 0;
@@ -572,12 +575,14 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw)
{
- struct mana_ib_dev *dev = container_of(ibdev,
- struct mana_ib_dev, ib_dev);
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ struct pci_dev *pdev = to_pci_dev(mdev_to_gc(dev)->dev);
memset(props, 0, sizeof(*props));
+ props->vendor_id = pdev->vendor;
+ props->vendor_part_id = dev->gdma_dev->dev_id.type;
props->max_mr_size = MANA_IB_MAX_MR_SIZE;
- props->page_size_cap = PAGE_SZ_BM;
+ props->page_size_cap = dev->adapter_caps.page_size_cap;
props->max_qp = dev->adapter_caps.max_qp_count;
props->max_qp_wr = dev->adapter_caps.max_qp_wr;
props->device_cap_flags = IB_DEVICE_RC_RNR_NAK_GEN;
@@ -596,6 +601,8 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
props->max_ah = INT_MAX;
props->max_pkeys = 1;
props->local_ca_ack_delay = MANA_CA_ACK_DELAY;
+ if (!mana_ib_is_rnic(dev))
+ props->raw_packet_caps = IB_RAW_PACKET_CAP_IP_CSUM;
return 0;
}
@@ -603,6 +610,7 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
int mana_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
struct net_device *ndev = mana_ib_get_netdev(ibdev, port);
if (!ndev)
@@ -623,7 +631,7 @@ int mana_ib_query_port(struct ib_device *ibdev, u32 port,
props->active_width = IB_WIDTH_4X;
props->active_speed = IB_SPEED_EDR;
props->pkey_tbl_len = 1;
- if (port == 1) {
+ if (mana_ib_is_rnic(dev)) {
props->gid_tbl_len = 16;
props->port_cap_flags = IB_PORT_CM_SUP;
props->ip_gids = true;
@@ -696,6 +704,41 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
caps->max_recv_sge_count = resp.max_recv_sge_count;
caps->feature_flags = resp.feature_flags;
+ caps->page_size_cap = PAGE_SZ_BM;
+ if (mdev_to_gc(dev)->pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB)
+ caps->page_size_cap |= (SZ_4M | SZ_1G | SZ_2G);
+
+ return 0;
+}
+
+int mana_eth_query_adapter_caps(struct mana_ib_dev *dev)
+{
+ struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
+ struct gdma_query_max_resources_resp resp = {};
+ struct gdma_general_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
+ sizeof(req), sizeof(resp));
+
+ err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&dev->ib_dev,
+ "Failed to query adapter caps err %d", err);
+ return err;
+ }
+
+ caps->max_qp_count = min_t(u32, resp.max_sq, resp.max_rq);
+ caps->max_cq_count = resp.max_cq;
+ caps->max_mr_count = resp.max_mst;
+ caps->max_pd_count = 0x6000;
+ caps->max_qp_wr = min_t(u32,
+ 0x100000 / GDMA_MAX_SQE_SIZE,
+ 0x100000 / GDMA_MAX_RQE_SIZE);
+ caps->max_send_sge_count = 30;
+ caps->max_recv_sge_count = 15;
+ caps->page_size_cap = PAGE_SZ_BM;
+
return 0;
}
@@ -740,7 +783,7 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev)
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
spec.eq.msix_index = 0;
- err = mana_gd_create_mana_eq(&gc->mana_ib, &spec, &mdev->fatal_err_eq);
+ err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->fatal_err_eq);
if (err)
return err;
@@ -791,7 +834,7 @@ int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp));
req.hdr.req.msg_version = GDMA_MESSAGE_V2;
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.notify_eq_id = mdev->fatal_err_eq->id;
if (mdev->adapter_caps.feature_flags & MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT)
@@ -816,7 +859,7 @@ int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev)
gc = mdev_to_gc(mdev);
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
@@ -843,7 +886,7 @@ int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
}
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = ADDR_OP_ADD;
req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
@@ -873,7 +916,7 @@ int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
}
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = ADDR_OP_REMOVE;
req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
@@ -896,7 +939,7 @@ int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = op;
copy_in_reverse(req.mac_addr, mac, ETH_ALEN);
@@ -917,8 +960,11 @@ int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 do
struct mana_rnic_create_cq_req req = {};
int err;
+ if (!mdev->eqs)
+ return -EINVAL;
+
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.gdma_region = cq->queue.gdma_region;
req.eq_id = mdev->eqs[cq->comp_vector]->id;
@@ -950,7 +996,7 @@ int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
return 0;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.cq_handle = cq->cq_handle;
@@ -976,7 +1022,7 @@ int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
int err, i;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_RC_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.pd_handle = pd->pd_handle;
req.send_cq_handle = send_cq->cq_handle;
@@ -1012,7 +1058,7 @@ int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_RC_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.rc_qp_handle = qp->qp_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
@@ -1035,7 +1081,7 @@ int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
int err, i;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_UD_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.pd_handle = pd->pd_handle;
req.send_cq_handle = send_cq->cq_handle;
@@ -1070,7 +1116,7 @@ int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_UD_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 6903946677e5..42bebd6cd4f7 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -60,6 +60,7 @@ struct mana_ib_adapter_caps {
u32 max_recv_sge_count;
u32 max_inline_data_size;
u64 feature_flags;
+ u64 page_size_cap;
};
struct mana_ib_queue {
@@ -543,6 +544,11 @@ static inline void mana_put_qp_ref(struct mana_ib_qp *qp)
complete(&qp->free);
}
+static inline bool mana_ib_is_rnic(struct mana_ib_dev *mdev)
+{
+ return mdev->gdma_dev->dev_id.type == GDMA_DEVICE_MANA_IB;
+}
+
static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port)
{
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
@@ -642,6 +648,7 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
+int mana_eth_query_adapter_caps(struct mana_ib_dev *mdev);
int mana_ib_create_eqs(struct mana_ib_dev *mdev);
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index f99557ec7767..6d974d0a8400 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -5,8 +5,8 @@
#include "mana_ib.h"
-#define VALID_MR_FLAGS \
- (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ)
+#define VALID_MR_FLAGS (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |\
+ IB_ACCESS_REMOTE_ATOMIC | IB_ZERO_BASED)
#define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE)
@@ -24,6 +24,9 @@ mana_ib_verbs_to_gdma_access_flags(int access_flags)
if (access_flags & IB_ACCESS_REMOTE_READ)
flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
+ if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
+ flags |= GDMA_ACCESS_FLAG_REMOTE_ATOMIC;
+
return flags;
}
@@ -48,7 +51,10 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
req.gva.virtual_address = mr_params->gva.virtual_address;
req.gva.access_flags = mr_params->gva.access_flags;
break;
-
+ case GDMA_MR_TYPE_ZBVA:
+ req.zbva.dma_region_handle = mr_params->zbva.dma_region_handle;
+ req.zbva.access_flags = mr_params->zbva.access_flags;
+ break;
default:
ibdev_dbg(&dev->ib_dev,
"invalid param (GDMA_MR_TYPE) passed, type %d\n",
@@ -144,11 +150,18 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
dma_region_handle);
mr_params.pd_handle = pd->pd_handle;
- mr_params.mr_type = GDMA_MR_TYPE_GVA;
- mr_params.gva.dma_region_handle = dma_region_handle;
- mr_params.gva.virtual_address = iova;
- mr_params.gva.access_flags =
- mana_ib_verbs_to_gdma_access_flags(access_flags);
+ if (access_flags & IB_ZERO_BASED) {
+ mr_params.mr_type = GDMA_MR_TYPE_ZBVA;
+ mr_params.zbva.dma_region_handle = dma_region_handle;
+ mr_params.zbva.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(access_flags);
+ } else {
+ mr_params.mr_type = GDMA_MR_TYPE_GVA;
+ mr_params.gva.dma_region_handle = dma_region_handle;
+ mr_params.gva.virtual_address = iova;
+ mr_params.gva.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(access_flags);
+ }
err = mana_ib_gd_create_mr(dev, mr, &mr_params);
if (err)
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index c928af58f38b..14fd7d6c54a2 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -635,7 +635,6 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
{
struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
- struct gdma_context *gc = mdev_to_gc(mdev);
u32 doorbell, queue_size;
int i, err;
@@ -654,7 +653,7 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
goto destroy_queues;
}
}
- doorbell = gc->mana_ib.doorbell;
+ doorbell = mdev->gdma_dev->doorbell;
err = create_shadow_queue(&qp->shadow_rq, attr->cap.max_recv_wr,
sizeof(struct ud_rq_shadow_wqe));
@@ -736,7 +735,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
req.qp_state = attr->qp_state;
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 33f525b744f2..e279e69b9a51 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -43,7 +43,7 @@
#define MAX_VFS 80
#define MAX_PEND_REQS_PER_FUNC 4
-#define MAD_TIMEOUT_MS 2000
+#define MAD_TIMEOUT_SEC 2
#define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
#define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
@@ -270,7 +270,7 @@ static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad
if (!ret) {
/* calls mlx4_ib_mcg_timeout_handler */
queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
- msecs_to_jiffies(MAD_TIMEOUT_MS));
+ secs_to_jiffies(MAD_TIMEOUT_SEC));
}
return ret;
@@ -309,7 +309,7 @@ static int send_leave_to_wire(struct mcast_group *group, u8 join_state)
if (!ret) {
/* calls mlx4_ib_mcg_timeout_handler */
queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
- msecs_to_jiffies(MAD_TIMEOUT_MS));
+ secs_to_jiffies(MAD_TIMEOUT_SEC));
}
return ret;
@@ -1091,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
for (i = 0; i < MAX_VFS; ++i)
clean_vf_mcast(ctx, i);
- end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000);
+ end = jiffies + secs_to_jiffies(MAD_TIMEOUT_SEC + 3);
do {
count = 0;
mutex_lock(&ctx->mcg_table_lock);
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 0ff9f18a71e8..680627f1de33 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -1645,11 +1645,6 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
}
-enum {
- LEFTOVERS_MC,
- LEFTOVERS_UC,
-};
-
static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
struct ib_flow_attr *flow_attr,
@@ -1659,43 +1654,32 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
struct mlx5_ib_flow_handler *handler = NULL;
static struct {
- struct ib_flow_attr flow_attr;
struct ib_flow_spec_eth eth_flow;
- } leftovers_specs[] = {
- [LEFTOVERS_MC] = {
- .flow_attr = {
- .num_of_specs = 1,
- .size = sizeof(leftovers_specs[0])
- },
- .eth_flow = {
- .type = IB_FLOW_SPEC_ETH,
- .size = sizeof(struct ib_flow_spec_eth),
- .mask = {.dst_mac = {0x1} },
- .val = {.dst_mac = {0x1} }
- }
- },
- [LEFTOVERS_UC] = {
- .flow_attr = {
- .num_of_specs = 1,
- .size = sizeof(leftovers_specs[0])
- },
- .eth_flow = {
- .type = IB_FLOW_SPEC_ETH,
- .size = sizeof(struct ib_flow_spec_eth),
- .mask = {.dst_mac = {0x1} },
- .val = {.dst_mac = {} }
- }
- }
- };
+ struct ib_flow_attr flow_attr;
+ } leftovers_wc = { .flow_attr = { .num_of_specs = 1,
+ .size = sizeof(leftovers_wc) },
+ .eth_flow = {
+ .type = IB_FLOW_SPEC_ETH,
+ .size = sizeof(struct ib_flow_spec_eth),
+ .mask = { .dst_mac = { 0x1 } },
+ .val = { .dst_mac = { 0x1 } } } };
- handler = create_flow_rule(dev, ft_prio,
- &leftovers_specs[LEFTOVERS_MC].flow_attr,
- dst);
+ static struct {
+ struct ib_flow_spec_eth eth_flow;
+ struct ib_flow_attr flow_attr;
+ } leftovers_uc = { .flow_attr = { .num_of_specs = 1,
+ .size = sizeof(leftovers_uc) },
+ .eth_flow = {
+ .type = IB_FLOW_SPEC_ETH,
+ .size = sizeof(struct ib_flow_spec_eth),
+ .mask = { .dst_mac = { 0x1 } },
+ .val = { .dst_mac = {} } } };
+
+ handler = create_flow_rule(dev, ft_prio, &leftovers_wc.flow_attr, dst);
if (!IS_ERR(handler) &&
flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
handler_ucast = create_flow_rule(dev, ft_prio,
- &leftovers_specs[LEFTOVERS_UC].flow_attr,
- dst);
+ &leftovers_uc.flow_attr, dst);
if (IS_ERR(handler_ucast)) {
mlx5_del_flow_rules(handler->rule);
ft_prio->refcount--;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d07cacaa0abd..ce7610740412 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -485,6 +485,10 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_2X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_200GAUI_1_200GBASE_CR1_KR1):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_XDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_400GAUI_8_400GBASE_CR8):
*active_width = IB_WIDTH_8X;
*active_speed = IB_SPEED_HDR;
@@ -493,10 +497,18 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_400GAUI_2_400GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_XDR;
+ break;
case MLX5E_PROT_MASK(MLX5E_800GAUI_8_800GBASE_CR8_KR8):
*active_width = IB_WIDTH_8X;
*active_speed = IB_SPEED_NDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_800GAUI_4_800GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_XDR;
+ break;
default:
return -EINVAL;
}
@@ -4422,17 +4434,6 @@ static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
mlx5_core_native_port_num(dev->mdev) - 1);
}
-static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
-{
- dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
- return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
-}
-
-static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
-{
- mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
-}
-
static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
{
int err;
@@ -4662,9 +4663,6 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
mlx5_ib_stage_cong_debugfs_init,
mlx5_ib_stage_cong_debugfs_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_UAR,
- mlx5_ib_stage_uar_init,
- mlx5_ib_stage_uar_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
mlx5_ib_stage_bfrag_init,
mlx5_ib_stage_bfrag_cleanup),
@@ -4722,9 +4720,6 @@ const struct mlx5_ib_profile raw_eth_profile = {
STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
mlx5_ib_stage_cong_debugfs_init,
mlx5_ib_stage_cong_debugfs_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_UAR,
- mlx5_ib_stage_uar_init,
- mlx5_ib_stage_uar_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
mlx5_ib_stage_bfrag_init,
mlx5_ib_stage_bfrag_cleanup),
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index ace2df3e1d9f..fde859d207ae 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -351,6 +351,7 @@ struct mlx5_ib_flow_db {
#define MLX5_IB_UPD_XLT_PD BIT(4)
#define MLX5_IB_UPD_XLT_ACCESS BIT(5)
#define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
+#define MLX5_IB_UPD_XLT_DOWNGRADE BIT(7)
/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
*
@@ -1005,7 +1006,6 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_ODP,
MLX5_IB_STAGE_COUNTERS,
MLX5_IB_STAGE_CONG_DEBUGFS,
- MLX5_IB_STAGE_UAR,
MLX5_IB_STAGE_BFREG,
MLX5_IB_STAGE_PRE_IB_REG_UMR,
MLX5_IB_STAGE_WHITELIST_UID,
@@ -1473,8 +1473,8 @@ void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev);
-void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags);
+int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags);
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
@@ -1495,8 +1495,11 @@ static inline int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
{
return 0;
}
-static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags) {}
+static inline int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
+{
+ return -EOPNOTSUPP;
+}
static inline int
mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 5fbebafc8774..57f9bc2a4a3a 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -525,7 +525,7 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
ent->fill_to_high_water = false;
if (ent->pending)
queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
- msecs_to_jiffies(1000));
+ secs_to_jiffies(1));
else
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
}
@@ -576,7 +576,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
"add keys command failed, err %d\n",
err);
queue_delayed_work(cache->wq, &ent->dwork,
- msecs_to_jiffies(1000));
+ secs_to_jiffies(1));
}
}
} else if (ent->mkeys_queue.ci > 2 * ent->limit) {
@@ -838,7 +838,7 @@ static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev)
static void delay_time_func(struct timer_list *t)
{
- struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
+ struct mlx5_ib_dev *dev = timer_container_of(dev, t, delay_timer);
WRITE_ONCE(dev->fill_delay, 0);
}
@@ -2051,7 +2051,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
ent->in_use--;
if (ent->is_tmp && !ent->tmp_cleanup_scheduled) {
mod_delayed_work(ent->dev->cache.wq, &ent->dwork,
- msecs_to_jiffies(30 * 1000));
+ secs_to_jiffies(30));
ent->tmp_cleanup_scheduled = true;
}
spin_unlock_irq(&ent->mkeys_queue.lock);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 86d8fa63bf69..eaa2f9f5f3a9 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -34,6 +34,9 @@
#include <linux/kernel.h>
#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
+#include <linux/hmm.h>
+#include <linux/hmm-dma.h>
+#include <linux/pci-p2pdma.h>
#include "mlx5_ib.h"
#include "cmd.h"
@@ -158,41 +161,50 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
}
}
-static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
-{
- u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
-
- if (umem_dma & ODP_READ_ALLOWED_BIT)
- mtt_entry |= MLX5_IB_MTT_READ;
- if (umem_dma & ODP_WRITE_ALLOWED_BIT)
- mtt_entry |= MLX5_IB_MTT_WRITE;
-
- return mtt_entry;
-}
-
-static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags)
+static int populate_mtt(__be64 *pas, size_t start, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- dma_addr_t pa;
+ bool downgrade = flags & MLX5_IB_UPD_XLT_DOWNGRADE;
+ struct pci_p2pdma_map_state p2pdma_state = {};
+ struct ib_device *dev = odp->umem.ibdev;
size_t i;
if (flags & MLX5_IB_UPD_XLT_ZAP)
- return;
+ return 0;
for (i = 0; i < nentries; i++) {
- pa = odp->dma_list[idx + i];
- pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
+ unsigned long pfn = odp->map.pfn_list[start + i];
+ dma_addr_t dma_addr;
+
+ pfn = odp->map.pfn_list[start + i];
+ if (!(pfn & HMM_PFN_VALID))
+ /* ODP initialization */
+ continue;
+
+ dma_addr = hmm_dma_map_pfn(dev->dma_device, &odp->map,
+ start + i, &p2pdma_state);
+ if (ib_dma_mapping_error(dev, dma_addr))
+ return -EFAULT;
+
+ dma_addr |= MLX5_IB_MTT_READ;
+ if ((pfn & HMM_PFN_WRITE) && !downgrade)
+ dma_addr |= MLX5_IB_MTT_WRITE;
+
+ pas[i] = cpu_to_be64(dma_addr);
+ odp->npages++;
}
+ return 0;
}
-void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
- struct mlx5_ib_mr *mr, int flags)
+int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
{
if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
populate_klm(xlt, idx, nentries, mr, flags);
+ return 0;
} else {
- populate_mtt(xlt, idx, nentries, mr, flags);
+ return populate_mtt(xlt, idx, nentries, mr, flags);
}
}
@@ -303,8 +315,7 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
* estimate the cost of another UMR vs. the cost of bigger
* UMR.
*/
- if (umem_odp->dma_list[idx] &
- (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
+ if (umem_odp->map.pfn_list[idx] & HMM_PFN_VALID) {
if (!in_block) {
blk_start_idx = idx;
in_block = 1;
@@ -687,7 +698,7 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
{
int page_shift, ret, np;
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
- u64 access_mask;
+ u64 access_mask = 0;
u64 start_idx;
bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT);
u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC;
@@ -695,12 +706,14 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
if (flags & MLX5_PF_FLAGS_ENABLE)
xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
+ if (flags & MLX5_PF_FLAGS_DOWNGRADE)
+ xlt_flags |= MLX5_IB_UPD_XLT_DOWNGRADE;
+
page_shift = odp->page_shift;
start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
- access_mask = ODP_READ_ALLOWED_BIT;
if (odp->umem.writable && !downgrade)
- access_mask |= ODP_WRITE_ALLOWED_BIT;
+ access_mask |= HMM_PFN_WRITE;
np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault);
if (np < 0)
diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
index d3dcc272200a..146d03ae40bd 100644
--- a/drivers/infiniband/hw/mlx5/qpc.c
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -21,8 +21,10 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
spin_lock_irqsave(&table->lock, flags);
common = radix_tree_lookup(&table->tree, rsn);
- if (common)
+ if (common && !common->invalid)
refcount_inc(&common->refcount);
+ else
+ common = NULL;
spin_unlock_irqrestore(&table->lock, flags);
@@ -178,6 +180,18 @@ static int create_resource_common(struct mlx5_ib_dev *dev,
return 0;
}
+static void modify_resource_common_state(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *qp,
+ bool invalid)
+{
+ struct mlx5_qp_table *table = &dev->qp_table;
+ unsigned long flags;
+
+ spin_lock_irqsave(&table->lock, flags);
+ qp->common.invalid = invalid;
+ spin_unlock_irqrestore(&table->lock, flags);
+}
+
static void destroy_resource_common(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *qp)
{
@@ -609,8 +623,20 @@ err_destroy_rq:
int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *rq)
{
+ int ret;
+
+ /* The rq destruction can be called again in case it fails, hence we
+ * mark the common resource as invalid and only once FW destruction
+ * is completed successfully we actually destroy the resources.
+ */
+ modify_resource_common_state(dev, rq, true);
+ ret = destroy_rq_tracked(dev, rq->qpn, rq->uid);
+ if (ret) {
+ modify_resource_common_state(dev, rq, false);
+ return ret;
+ }
destroy_resource_common(dev, rq);
- return destroy_rq_tracked(dev, rq->qpn, rq->uid);
+ return 0;
}
static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 793f3c5c4d01..5be4426a2884 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -840,7 +840,17 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
size_to_map = npages * desc_size;
dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
- mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+ /*
+ * npages is the maximum number of pages to map, but we
+ * can't guarantee that all pages are actually mapped.
+ *
+ * For example, if page is p2p of type which is not supported
+ * for mapping, the number of pages mapped will be less than
+ * requested.
+ */
+ err = mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+ if (err)
+ return err;
dma_sync_single_for_device(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
sg.length = ALIGN(size_to_map, MLX5_UMR_FLEX_ALIGNMENT);
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 6eabef9aa211..f1d79968c985 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -132,7 +132,7 @@ static void handle_catas(struct mthca_dev *dev)
static void poll_catas(struct timer_list *t)
{
- struct mthca_dev *dev = from_timer(dev, t, catas_err.timer);
+ struct mthca_dev *dev = timer_container_of(dev, t, catas_err.timer);
int i;
for (i = 0; i < dev->catas_err.size; ++i)
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 192f83fd7c8a..dacb8ceeebe0 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -144,7 +144,7 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
- buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *),
+ buddy->bits = kcalloc(buddy->max_order + 1, sizeof(*buddy->bits),
GFP_KERNEL);
buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
GFP_KERNEL);
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index bdd724add147..91fa5e160c0d 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -659,8 +659,8 @@ int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
static void qib_run_led_override(struct timer_list *t)
{
- struct qib_pportdata *ppd = from_timer(ppd, t,
- led_override_timer);
+ struct qib_pportdata *ppd = timer_container_of(ppd, t,
+ led_override_timer);
struct qib_devdata *dd = ppd->dd;
int timeoff;
int ph_idx;
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index b9f4a2937c3a..2098de762bf5 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -90,7 +90,7 @@ static int create_file(const char *name, umode_t mode,
int error;
inode_lock(d_inode(parent));
- *dentry = lookup_one_len(name, parent, strlen(name));
+ *dentry = lookup_noperm(&QSTR(name), parent);
if (!IS_ERR(*dentry))
error = qibfs_mknod(d_inode(parent), *dentry,
mode, fops, data);
@@ -433,7 +433,7 @@ static int remove_device_files(struct super_block *sb,
char unit[10];
snprintf(unit, sizeof(unit), "%u", dd->unit);
- dir = lookup_one_len_unlocked(unit, sb->s_root, strlen(unit));
+ dir = lookup_noperm_unlocked(&QSTR(unit), sb->s_root);
if (IS_ERR(dir)) {
pr_err("Lookup of %s failed\n", unit);
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 277769fa9745..2640d283eee6 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2615,7 +2615,7 @@ static void qib_chk_6120_errormask(struct qib_devdata *dd)
*/
static void qib_get_6120_faststats(struct timer_list *t)
{
- struct qib_devdata *dd = from_timer(dd, t, stats_timer);
+ struct qib_devdata *dd = timer_container_of(dd, t, stats_timer);
struct qib_pportdata *ppd = dd->pport;
unsigned long flags;
u64 traffic_wds;
@@ -2905,7 +2905,7 @@ static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
static void pma_6120_timer(struct timer_list *t)
{
- struct qib_chip_specific *cs = from_timer(cs, t, pma_timer);
+ struct qib_chip_specific *cs = timer_container_of(cs, t, pma_timer);
struct qib_pportdata *ppd = cs->ppd;
struct qib_ibport *ibp = &ppd->ibport_data;
unsigned long flags;
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 302c0d19f57d..0b347d1129fa 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -1044,8 +1044,8 @@ done:
static void reenable_7220_chase(struct timer_list *t)
{
- struct qib_chippport_specific *cpspec = from_timer(cpspec, t,
- chase_timer);
+ struct qib_chippport_specific *cpspec = timer_container_of(cpspec, t,
+ chase_timer);
struct qib_pportdata *ppd = &cpspec->pportdata;
ppd->cpspec->chase_timer.expires = 0;
@@ -3240,7 +3240,7 @@ done:
*/
static void qib_get_7220_faststats(struct timer_list *t)
{
- struct qib_devdata *dd = from_timer(dd, t, stats_timer);
+ struct qib_devdata *dd = timer_container_of(dd, t, stats_timer);
struct qib_pportdata *ppd = dd->pport;
unsigned long flags;
u64 traffic_wds;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 7b4bf06c3b38..781b6a4fb002 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -1721,7 +1721,8 @@ static void qib_error_tasklet(struct tasklet_struct *t)
static void reenable_chase(struct timer_list *t)
{
- struct qib_chippport_specific *cp = from_timer(cp, t, chase_timer);
+ struct qib_chippport_specific *cp = timer_container_of(cp, t,
+ chase_timer);
struct qib_pportdata *ppd = cp->ppd;
ppd->cpspec->chase_timer.expires = 0;
@@ -5084,7 +5085,7 @@ done:
*/
static void qib_get_7322_faststats(struct timer_list *t)
{
- struct qib_devdata *dd = from_timer(dd, t, stats_timer);
+ struct qib_devdata *dd = timer_container_of(dd, t, stats_timer);
struct qib_pportdata *ppd;
unsigned long flags;
u64 traffic_wds;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 33c23adec101..1c45814f5646 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -493,7 +493,7 @@ static void enable_chip(struct qib_devdata *dd)
static void verify_interrupt(struct timer_list *t)
{
- struct qib_devdata *dd = from_timer(dd, t, intrchk_timer);
+ struct qib_devdata *dd = timer_container_of(dd, t, intrchk_timer);
u64 int_counter;
if (!dd)
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index 85c3187d796d..93357823c6c0 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -172,7 +172,8 @@ skip_ibchange:
void qib_clear_symerror_on_linkup(struct timer_list *t)
{
- struct qib_pportdata *ppd = from_timer(ppd, t, symerr_clear_timer);
+ struct qib_pportdata *ppd = timer_container_of(ppd, t,
+ symerr_clear_timer);
if (ppd->lflags & QIBL_LINKACTIVE)
return;
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 568deb77ab4d..d99932b2ce21 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -2398,7 +2398,8 @@ bail:
static void xmit_wait_timer_func(struct timer_list *t)
{
- struct qib_pportdata *ppd = from_timer(ppd, t, cong_stats.timer);
+ struct qib_pportdata *ppd = timer_container_of(ppd, t,
+ cong_stats.timer);
struct qib_devdata *dd = dd_from_ppd(ppd);
unsigned long flags;
u8 status;
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index c4ee120ac7fb..40bc0a34273e 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -1385,7 +1385,7 @@ MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
static void qib_run_relock(struct timer_list *t)
{
- struct qib_chip_specific *cs = from_timer(cs, t, relock_timer);
+ struct qib_chip_specific *cs = timer_container_of(cs, t, relock_timer);
struct qib_devdata *dd = cs->dd;
struct qib_pportdata *ppd = dd->pport;
int timeoff;
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index 1325110237cd..397928c80f7c 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -548,7 +548,7 @@ void qib_hol_up(struct qib_pportdata *ppd)
*/
void qib_hol_event(struct timer_list *t)
{
- struct qib_pportdata *ppd = from_timer(ppd, t, hol_timer);
+ struct qib_pportdata *ppd = timer_container_of(ppd, t, hol_timer);
/* If hardware error, etc, skip. */
if (!(ppd->dd->flags & QIB_INITTED))
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 9832567a8bb8..bab657f93084 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -361,7 +361,7 @@ drop:
*/
static void mem_timer(struct timer_list *t)
{
- struct qib_ibdev *dev = from_timer(dev, t, mem_timer);
+ struct qib_ibdev *dev = timer_container_of(dev, t, mem_timer);
struct list_head *list = &dev->memwait;
struct rvt_qp *qp = NULL;
struct qib_qp_priv *priv = NULL;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index f948b76f984d..3fbf99757b11 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -56,7 +56,7 @@ static int usnic_uiom_dma_fault(struct iommu_domain *domain,
unsigned long iova, int flags,
void *token)
{
- usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
+ usnic_err("Device %s iommu fault domain 0x%p va 0x%lx flags 0x%x\n",
dev_name(dev),
domain, iova, flags);
return -ENOSYS;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 583debe4b9a2..e825e2ef7966 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -2585,7 +2585,7 @@ EXPORT_SYMBOL(rvt_del_timers_sync);
*/
static void rvt_rc_timeout(struct timer_list *t)
{
- struct rvt_qp *qp = from_timer(qp, t, s_timer);
+ struct rvt_qp *qp = timer_container_of(qp, t, s_timer);
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
unsigned long flags;
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index c180e7ebcfc5..1ed5b63f8afc 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver"
- depends on INET && PCI && INFINIBAND
+ depends on INET && PCI && INFINIBAND && 64BIT
depends on INFINIBAND_VIRT_DMA
select NET_UDP_TUNNEL
select CRC32
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index b248c68bf9b1..3a77d6db1720 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -101,6 +101,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe, struct net_device *ndev)
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_FLUSH;
+ rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC_WRITE;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index d48af2180745..a5b2b62f596b 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -114,7 +114,7 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
void retransmit_timer(struct timer_list *t)
{
- struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
+ struct rxe_qp *qp = timer_container_of(qp, t, retrans_timer);
unsigned long flags;
rxe_dbg_qp(qp, "retransmit timer fired\n");
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index fec87c9030ab..fffd144d509e 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -56,11 +56,8 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
- if (err) {
- vfree(cq->queue->buf);
- kfree(cq->queue);
+ if (err)
return err;
- }
cq->is_user = uresp;
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 0bc3fbb6554f..876702058c84 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -70,9 +70,9 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
void *addr, int length, enum rxe_mr_copy_dir dir);
int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
-int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val);
-int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
+enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
+enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
@@ -193,13 +193,16 @@ static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
/* rxe_odp.c */
extern const struct mmu_interval_notifier_ops rxe_mn_ops;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+#if defined CONFIG_INFINIBAND_ON_DEMAND_PAGING
int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
u64 iova, int access_flags, struct rxe_mr *mr);
int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir);
-int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val);
+enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
+int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length);
+enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline int
rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
@@ -212,9 +215,19 @@ static inline int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
{
return -EOPNOTSUPP;
}
-static inline int
+static inline enum resp_states
rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+}
+static inline int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length)
+{
+ return -EOPNOTSUPP;
+}
+static inline enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr,
+ u64 iova, u64 value)
{
return RESPST_ERR_UNSUPPORTED_OPCODE;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 432d864c3ce9..bcb97b3ea58a 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -424,7 +424,7 @@ err1:
return err;
}
-int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
+static int rxe_mr_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
{
unsigned int page_offset;
unsigned long index;
@@ -433,16 +433,6 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
int err;
u8 *va;
- /* mr must be valid even if length is zero */
- if (WARN_ON(!mr))
- return -EINVAL;
-
- if (length == 0)
- return 0;
-
- if (mr->ibmr.type == IB_MR_TYPE_DMA)
- return -EFAULT;
-
err = mr_check_range(mr, iova, length);
if (err)
return err;
@@ -454,7 +444,7 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
if (!page)
return -EFAULT;
bytes = min_t(unsigned int, length,
- mr_page_size(mr) - page_offset);
+ mr_page_size(mr) - page_offset);
va = kmap_local_page(page);
arch_wb_cache_pmem(va + page_offset, bytes);
@@ -468,11 +458,33 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
return 0;
}
+int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 start, unsigned int length)
+{
+ int err;
+
+ /* mr must be valid even if length is zero */
+ if (WARN_ON(!mr))
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ if (mr->ibmr.type == IB_MR_TYPE_DMA)
+ return -EFAULT;
+
+ if (is_odp_mr(mr))
+ err = rxe_odp_flush_pmem_iova(mr, start, length);
+ else
+ err = rxe_mr_flush_pmem_iova(mr, start, length);
+
+ return err;
+}
+
/* Guarantee atomicity of atomic operations at the machine level. */
DEFINE_SPINLOCK(atomic_ops_lock);
-int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
{
unsigned int page_offset;
struct page *page;
@@ -524,27 +536,15 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
kunmap_local(va);
- return 0;
+ return RESPST_NONE;
}
-#if defined CONFIG_64BIT
-/* only implemented or called for 64 bit architectures */
-int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
+enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
{
unsigned int page_offset;
struct page *page;
u64 *va;
- /* ODP is not supported right now. WIP. */
- if (is_odp_mr(mr))
- return RESPST_ERR_UNSUPPORTED_OPCODE;
-
- /* See IBA oA19-28 */
- if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
- rxe_dbg_mr(mr, "mr not in valid state\n");
- return RESPST_ERR_RKEY_VIOLATION;
- }
-
if (mr->ibmr.type == IB_MR_TYPE_DMA) {
page_offset = iova & (PAGE_SIZE - 1);
page = ib_virt_dma_to_page(iova);
@@ -572,20 +572,12 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
}
va = kmap_local_page(page);
-
/* Do atomic write after all prior operations have completed */
smp_store_release(&va[page_offset >> 3], value);
-
kunmap_local(va);
- return 0;
-}
-#else
-int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
-{
- return RESPST_ERR_UNSUPPORTED_OPCODE;
+ return RESPST_NONE;
}
-#endif
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_odp.c b/drivers/infiniband/sw/rxe/rxe_odp.c
index 9f6e2bb2a269..dbc5a5600eb7 100644
--- a/drivers/infiniband/sw/rxe/rxe_odp.c
+++ b/drivers/infiniband/sw/rxe/rxe_odp.c
@@ -4,6 +4,7 @@
*/
#include <linux/hmm.h>
+#include <linux/libnvdimm.h>
#include <rdma/ib_umem_odp.h>
@@ -26,7 +27,7 @@ static bool rxe_ib_invalidate_range(struct mmu_interval_notifier *mni,
start = max_t(u64, ib_umem_start(umem_odp), range->start);
end = min_t(u64, ib_umem_end(umem_odp), range->end);
- /* update umem_odp->dma_list */
+ /* update umem_odp->map.pfn_list */
ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
mutex_unlock(&umem_odp->umem_mutex);
@@ -44,12 +45,11 @@ static int rxe_odp_do_pagefault_and_lock(struct rxe_mr *mr, u64 user_va, int bcn
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
bool fault = !(flags & RXE_PAGEFAULT_SNAPSHOT);
- u64 access_mask;
+ u64 access_mask = 0;
int np;
- access_mask = ODP_READ_ALLOWED_BIT;
if (umem_odp->umem.writable && !(flags & RXE_PAGEFAULT_RDONLY))
- access_mask |= ODP_WRITE_ALLOWED_BIT;
+ access_mask |= HMM_PFN_WRITE;
/*
* ib_umem_odp_map_dma_and_lock() locks umem_mutex on success.
@@ -124,8 +124,8 @@ int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
return err;
}
-static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
- u64 iova, int length, u32 perm)
+static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp, u64 iova,
+ int length)
{
bool need_fault = false;
u64 addr;
@@ -137,7 +137,7 @@ static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
while (addr < iova + length) {
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- if (!(umem_odp->dma_list[idx] & perm)) {
+ if (!(umem_odp->map.pfn_list[idx] & HMM_PFN_VALID)) {
need_fault = true;
break;
}
@@ -147,23 +147,28 @@ static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
return need_fault;
}
+static unsigned long rxe_odp_iova_to_index(struct ib_umem_odp *umem_odp, u64 iova)
+{
+ return (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
+}
+
+static unsigned long rxe_odp_iova_to_page_offset(struct ib_umem_odp *umem_odp, u64 iova)
+{
+ return iova & (BIT(umem_odp->page_shift) - 1);
+}
+
static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u32 flags)
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
bool need_fault;
- u64 perm;
int err;
if (unlikely(length < 1))
return -EINVAL;
- perm = ODP_READ_ALLOWED_BIT;
- if (!(flags & RXE_PAGEFAULT_RDONLY))
- perm |= ODP_WRITE_ALLOWED_BIT;
-
mutex_lock(&umem_odp->umem_mutex);
- need_fault = rxe_check_pagefault(umem_odp, iova, length, perm);
+ need_fault = rxe_check_pagefault(umem_odp, iova, length);
if (need_fault) {
mutex_unlock(&umem_odp->umem_mutex);
@@ -173,7 +178,7 @@ static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u
if (err < 0)
return err;
- need_fault = rxe_check_pagefault(umem_odp, iova, length, perm);
+ need_fault = rxe_check_pagefault(umem_odp, iova, length);
if (need_fault)
return -EFAULT;
}
@@ -190,13 +195,13 @@ static int __rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
size_t offset;
u8 *user_va;
- idx = (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- offset = iova & (BIT(umem_odp->page_shift) - 1);
+ idx = rxe_odp_iova_to_index(umem_odp, iova);
+ offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
while (length > 0) {
u8 *src, *dest;
- page = hmm_pfn_to_page(umem_odp->pfn_list[idx]);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
user_va = kmap_local_page(page);
if (!user_va)
return -EFAULT;
@@ -255,8 +260,9 @@ int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
return err;
}
-static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+static enum resp_states rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova,
+ int opcode, u64 compare,
+ u64 swap_add, u64 *orig_val)
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
unsigned int page_offset;
@@ -277,9 +283,9 @@ static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
return RESPST_ERR_RKEY_VIOLATION;
}
- idx = (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- page_offset = iova & (BIT(umem_odp->page_shift) - 1);
- page = hmm_pfn_to_page(umem_odp->pfn_list[idx]);
+ idx = rxe_odp_iova_to_index(umem_odp, iova);
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
if (!page)
return RESPST_ERR_RKEY_VIOLATION;
@@ -304,11 +310,11 @@ static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
kunmap_local(va);
- return 0;
+ return RESPST_NONE;
}
-int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
- u64 compare, u64 swap_add, u64 *orig_val)
+enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
{
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
int err;
@@ -324,3 +330,91 @@ int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
return err;
}
+
+int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
+ unsigned int length)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ unsigned int page_offset;
+ unsigned long index;
+ struct page *page;
+ unsigned int bytes;
+ int err;
+ u8 *va;
+
+ err = rxe_odp_map_range_and_lock(mr, iova, length,
+ RXE_PAGEFAULT_DEFAULT);
+ if (err)
+ return err;
+
+ while (length > 0) {
+ index = rxe_odp_iova_to_index(umem_odp, iova);
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
+ if (!page) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ return -EFAULT;
+ }
+
+ bytes = min_t(unsigned int, length,
+ mr_page_size(mr) - page_offset);
+
+ va = kmap_local_page(page);
+ arch_wb_cache_pmem(va + page_offset, bytes);
+ kunmap_local(va);
+
+ length -= bytes;
+ iova += bytes;
+ page_offset = 0;
+ }
+
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return 0;
+}
+
+enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
+{
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
+ unsigned int page_offset;
+ unsigned long index;
+ struct page *page;
+ int err;
+ u64 *va;
+
+ /* See IBA oA19-28 */
+ err = mr_check_range(mr, iova, sizeof(value));
+ if (unlikely(err)) {
+ rxe_dbg_mr(mr, "iova out of range\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ err = rxe_odp_map_range_and_lock(mr, iova, sizeof(value),
+ RXE_PAGEFAULT_DEFAULT);
+ if (err)
+ return RESPST_ERR_RKEY_VIOLATION;
+
+ page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
+ index = rxe_odp_iova_to_index(umem_odp, iova);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
+ if (!page) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+ /* See IBA A19.4.2 */
+ if (unlikely(page_offset & 0x7)) {
+ mutex_unlock(&umem_odp->umem_mutex);
+ rxe_dbg_mr(mr, "misaligned address\n");
+ return RESPST_ERR_MISALIGNED_ATOMIC;
+ }
+
+ va = kmap_local_page(page);
+ /* Do atomic write after all prior operations have completed */
+ smp_store_release(&va[page_offset >> 3], value);
+ kunmap_local(va);
+
+ mutex_unlock(&umem_odp->umem_mutex);
+
+ return RESPST_NONE;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 003f681e5dc0..767870568372 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -53,12 +53,9 @@ enum rxe_device_param {
| IB_DEVICE_MEM_WINDOW
| IB_DEVICE_FLUSH_GLOBAL
| IB_DEVICE_FLUSH_PERSISTENT
-#ifdef CONFIG_64BIT
| IB_DEVICE_MEM_WINDOW_TYPE_2B
| IB_DEVICE_ATOMIC_WRITE,
-#else
- | IB_DEVICE_MEM_WINDOW_TYPE_2B,
-#endif /* CONFIG_64BIT */
+
RXE_MAX_SGE = 32,
RXE_MAX_WQE_SIZE = sizeof(struct rxe_send_wqe) +
sizeof(struct ib_sge) * RXE_MAX_SGE,
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 7975fb0e2782..f2af3e0aef35 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -811,7 +811,12 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
spin_unlock_irqrestore(&qp->state_lock, flags);
qp->qp_timeout_jiffies = 0;
- if (qp_type(qp) == IB_QPT_RC) {
+ /* In the function timer_setup, .function is initialized. If .function
+ * is NULL, it indicates the function timer_setup is not called, the
+ * timer is not initialized. Or else, the timer is initialized.
+ */
+ if (qp_type(qp) == IB_QPT_RC && qp->retrans_timer.function &&
+ qp->rnr_nak_timer.function) {
timer_delete_sync(&qp->retrans_timer);
timer_delete_sync(&qp->rnr_nak_timer);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 9d0392df8a92..373b03f223be 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -97,7 +97,7 @@ static void req_retry(struct rxe_qp *qp)
void rnr_nak_timer(struct timer_list *t)
{
- struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
+ struct rxe_qp *qp = timer_container_of(qp, t, rnr_nak_timer);
unsigned long flags;
rxe_dbg_qp(qp, "nak timer fired\n");
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 5d9174e408db..711f73e0bbb1 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -649,10 +649,6 @@ static enum resp_states process_flush(struct rxe_qp *qp,
struct rxe_mr *mr = qp->resp.mr;
struct resp_res *res = qp->resp.res;
- /* ODP is not supported right now. WIP. */
- if (is_odp_mr(mr))
- return RESPST_ERR_UNSUPPORTED_OPCODE;
-
/* oA19-14, oA19-15 */
if (res && res->replay)
return RESPST_ACKNOWLEDGE;
@@ -753,7 +749,16 @@ static enum resp_states atomic_write_reply(struct rxe_qp *qp,
value = *(u64 *)payload_addr(pkt);
iova = qp->resp.va + qp->resp.offset;
- err = rxe_mr_do_atomic_write(mr, iova, value);
+ /* See IBA oA19-28 */
+ if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
+ rxe_dbg_mr(mr, "mr not in valid state\n");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ if (is_odp_mr(mr))
+ err = rxe_odp_do_atomic_write(mr, iova, value);
+ else
+ err = rxe_mr_do_atomic_write(mr, iova, value);
if (err)
return err;
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 80332638d9e3..6f8f353e9583 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -85,17 +85,17 @@ static bool is_done(struct rxe_task *task)
/* do_task is a wrapper for the three tasks (requester,
* completer, responder) and calls them in a loop until
- * they return a non-zero value. It is called either
- * directly by rxe_run_task or indirectly if rxe_sched_task
- * schedules the task. They must call __reserve_if_idle to
- * move the task to busy before calling or scheduling.
- * The task can also be moved to drained or invalid
- * by calls to rxe_cleanup_task or rxe_disable_task.
- * In that case tasks which get here are not executed but
- * just flushed. The tasks are designed to look to see if
- * there is work to do and then do part of it before returning
- * here with a return value of zero until all the work
- * has been consumed then it returns a non-zero value.
+ * they return a non-zero value. It is called indirectly
+ * when rxe_sched_task schedules the task. They must
+ * call __reserve_if_idle to move the task to busy before
+ * calling or scheduling. The task can also be moved to
+ * drained or invalid by calls to rxe_cleanup_task or
+ * rxe_disable_task. In that case tasks which get here
+ * are not executed but just flushed. The tasks are
+ * designed to look to see if there is work to do and
+ * then do part of it before returning here with a return
+ * value of zero until all the work has been consumed then
+ * it returns a non-zero value.
* The number of times the task can be run is limited by
* max iterations so one task cannot hold the cpu forever.
* If the limit is hit and work remains the task is rescheduled.
@@ -234,24 +234,6 @@ void rxe_cleanup_task(struct rxe_task *task)
spin_unlock_irqrestore(&task->lock, flags);
}
-/* run the task inline if it is currently idle
- * cannot call do_task holding the lock
- */
-void rxe_run_task(struct rxe_task *task)
-{
- unsigned long flags;
- bool run;
-
- WARN_ON(rxe_read(task->qp) <= 0);
-
- spin_lock_irqsave(&task->lock, flags);
- run = __reserve_if_idle(task);
- spin_unlock_irqrestore(&task->lock, flags);
-
- if (run)
- do_task(task);
-}
-
/* schedule the task to run later as a work queue entry.
* the queue_work call can be called holding
* the lock.
diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
index a63e258b3d66..a8c9a77b6027 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.h
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -47,8 +47,6 @@ int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
/* cleanup task */
void rxe_cleanup_task(struct rxe_task *task);
-void rxe_run_task(struct rxe_task *task);
-
void rxe_sched_task(struct rxe_task *task);
/* keep a task from scheduling */
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
index ae4a953e2a03..186f182b80e7 100644
--- a/drivers/infiniband/sw/siw/Kconfig
+++ b/drivers/infiniband/sw/siw/Kconfig
@@ -3,6 +3,7 @@ config RDMA_SIW
depends on INET && INFINIBAND
depends on INFINIBAND_VIRT_DMA
select CRC32
+ select NET_CRC32C
help
This driver implements the iWARP RDMA transport over
the Linux TCP/IP network stack. It enables a system with a
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 385067e07faf..f5fd71717b80 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -693,29 +693,9 @@ static inline void siw_crc_oneshot(const void *data, size_t len, u8 out[4])
return siw_crc_final(&crc, out);
}
-static inline __wsum siw_csum_update(const void *buff, int len, __wsum sum)
-{
- return (__force __wsum)crc32c((__force __u32)sum, buff, len);
-}
-
-static inline __wsum siw_csum_combine(__wsum csum, __wsum csum2, int offset,
- int len)
-{
- return (__force __wsum)crc32c_combine((__force __u32)csum,
- (__force __u32)csum2, len);
-}
-
static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
{
- const struct skb_checksum_ops siw_cs_ops = {
- .update = siw_csum_update,
- .combine = siw_csum_combine,
- };
- __wsum crc = (__force __wsum)srx->mpa_crc;
-
- crc = __skb_checksum(srx->skb, srx->skb_offset, len, crc,
- &siw_cs_ops);
- srx->mpa_crc = (__force u32)crc;
+ srx->mpa_crc = skb_crc32c(srx->skb, srx->skb_offset, len, srx->mpa_crc);
}
#define siw_dbg(ibdev, fmt, ...) \
@@ -738,7 +718,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
"MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
#define siw_dbg_cep(cep, fmt, ...) \
- ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
+ ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \
cep, __func__, ##__VA_ARGS__)
void siw_cq_flush(struct siw_cq *cq);
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
index f3c2226aff94..25b3c741b66b 100644
--- a/drivers/infiniband/sw/siw/siw_cq.c
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -72,7 +72,7 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
wc->opcode = map_wc_opcode[cqe->opcode];
wc->status = map_cqe_status[cqe->status].ib;
siw_dbg_cq(cq,
- "idx %u, type %d, flags %2x, id 0x%pK\n",
+ "idx %u, type %d, flags %2x, id 0x%p\n",
cq->cq_get % cq->num_cqe, cqe->opcode,
cqe->flags, (void *)(uintptr_t)cqe->id);
} else {
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index dcb963607c8b..d5ddeb17bd22 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -18,30 +18,6 @@
#define SIW_STAG_MAX_INDEX 0x00ffffff
/*
- * The code avoids special Stag of zero and tries to randomize
- * STag values between 1 and SIW_STAG_MAX_INDEX.
- */
-int siw_mem_add(struct siw_device *sdev, struct siw_mem *m)
-{
- struct xa_limit limit = XA_LIMIT(1, SIW_STAG_MAX_INDEX);
- u32 id, next;
-
- get_random_bytes(&next, 4);
- next &= SIW_STAG_MAX_INDEX;
-
- if (xa_alloc_cyclic(&sdev->mem_xa, &id, m, limit, &next,
- GFP_KERNEL) < 0)
- return -ENOMEM;
-
- /* Set the STag index part */
- m->stag = id << 8;
-
- siw_dbg_mem(m, "new MEM object\n");
-
- return 0;
-}
-
-/*
* siw_mem_id2obj()
*
* resolves memory from stag given by id. might be called from:
@@ -181,10 +157,10 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
*/
if (addr < mem->va || addr + len > mem->va + mem->len) {
siw_dbg_pd(pd, "MEM interval len %d\n", len);
- siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n",
+ siw_dbg_pd(pd, "[0x%p, 0x%p] out of bounds\n",
(void *)(uintptr_t)addr,
(void *)(uintptr_t)(addr + len));
- siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n",
+ siw_dbg_pd(pd, "[0x%p, 0x%p] STag=0x%08x\n",
(void *)(uintptr_t)mem->va,
(void *)(uintptr_t)(mem->va + mem->len),
mem->stag);
diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h
index e74cfcd6dbc1..8e769d30e2ac 100644
--- a/drivers/infiniband/sw/siw/siw_mem.h
+++ b/drivers/infiniband/sw/siw/siw_mem.h
@@ -12,7 +12,6 @@ void siw_umem_release(struct siw_umem *umem);
struct siw_pbl *siw_pbl_alloc(u32 num_buf);
dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
-int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
enum ib_access_flags perms, int len);
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 32554eba1eac..a10820e33887 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -38,7 +38,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
p = siw_get_upage(umem, dest_addr);
if (unlikely(!p)) {
- pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n",
+ pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n",
__func__, qp_id(rx_qp(srx)),
(void *)(uintptr_t)dest_addr,
(void *)(uintptr_t)umem->fp_addr);
@@ -51,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
pg_off = dest_addr & ~PAGE_MASK;
bytes = min(len, (int)PAGE_SIZE - pg_off);
- siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes);
+ siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes);
dest = kmap_atomic(p);
rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
@@ -105,11 +105,11 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len)
{
int rv;
- siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len);
+ siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len);
rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
if (unlikely(rv)) {
- pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n",
+ pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n",
qp_id(rx_qp(srx)), __func__, len, kva, rv);
return rv;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index fd7b266a221b..2b2a7b8e93b0 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -936,7 +936,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
rv = -EINVAL;
break;
}
- siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
+ siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n",
sqe->opcode, sqe->flags,
(void *)(uintptr_t)sqe->id);
@@ -1102,7 +1102,7 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
siw_dbg_qp(qp, "error %d\n", rv);
*bad_wr = wr;
}
- return rv > 0 ? 0 : rv;
+ return rv;
}
int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
@@ -1332,7 +1332,7 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
struct siw_device *sdev = to_siw_dev(pd->device);
int rv;
- siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
+ siw_dbg_pd(pd, "start: 0x%p, va: 0x%p, len: %llu\n",
(void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
(unsigned long long)len);
@@ -1525,7 +1525,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
mem->len = base_mr->length;
mem->va = base_mr->iova;
siw_dbg_mem(mem,
- "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
+ "%llu bytes, start 0x%p, %u SLE to %u entries\n",
mem->len, (void *)(uintptr_t)mem->va, num_sle,
pbl->num_buf);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index abe0522b7df4..91f866e3fb8b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -329,14 +329,6 @@ struct ipoib_dev_priv {
unsigned long flags;
- /*
- * This protects access to the child_intfs list.
- * To READ from child_intfs the RTNL or vlan_rwsem read side must be
- * held. To WRITE RTNL and the vlan_rwsem write side must be held (in
- * that order) This lock exists because we have a few contexts where
- * we need the child_intfs, but do not want to grab the RTNL.
- */
- struct rw_semaphore vlan_rwsem;
struct mutex mcast_mutex;
struct rb_root path_tree;
@@ -399,6 +391,9 @@ struct ipoib_dev_priv {
struct ib_event_handler event_handler;
struct net_device *parent;
+ /* 'child_intfs' and 'list' membership of all child devices are
+ * protected by the netdev instance lock of 'dev'.
+ */
struct list_head child_intfs;
struct list_head list;
int child_type;
@@ -512,6 +507,8 @@ int ipoib_intf_init(struct ib_device *hca, u32 port, const char *format,
void ipoib_ib_dev_flush_light(struct work_struct *work);
void ipoib_ib_dev_flush_normal(struct work_struct *work);
void ipoib_ib_dev_flush_heavy(struct work_struct *work);
+void ipoib_queue_work(struct ipoib_dev_priv *priv,
+ enum ipoib_flush_level level);
void ipoib_ib_tx_timeout_work(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5cde275daa94..10b0dbda6cd5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -40,6 +40,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <net/netdev_lock.h>
#include <rdma/ib_cache.h>
#include "ipoib.h"
@@ -781,16 +782,20 @@ static void ipoib_napi_enable(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- napi_enable(&priv->recv_napi);
- napi_enable(&priv->send_napi);
+ netdev_lock_ops_to_full(dev);
+ napi_enable_locked(&priv->recv_napi);
+ napi_enable_locked(&priv->send_napi);
+ netdev_unlock_full_to_ops(dev);
}
static void ipoib_napi_disable(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- napi_disable(&priv->recv_napi);
- napi_disable(&priv->send_napi);
+ netdev_lock_ops_to_full(dev);
+ napi_disable_locked(&priv->recv_napi);
+ napi_disable_locked(&priv->send_napi);
+ netdev_unlock_full_to_ops(dev);
}
int ipoib_ib_dev_stop_default(struct net_device *dev)
@@ -1172,24 +1177,11 @@ out:
}
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
- enum ipoib_flush_level level,
- int nesting)
+ enum ipoib_flush_level level)
{
- struct ipoib_dev_priv *cpriv;
struct net_device *dev = priv->dev;
int result;
- down_read_nested(&priv->vlan_rwsem, nesting);
-
- /*
- * Flush any child interfaces too -- they might be up even if
- * the parent is down.
- */
- list_for_each_entry(cpriv, &priv->child_intfs, list)
- __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
-
- up_read(&priv->vlan_rwsem);
-
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
level != IPOIB_FLUSH_HEAVY) {
/* Make sure the dev_addr is set even if not flushing */
@@ -1253,10 +1245,14 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
ipoib_ib_dev_down(dev);
if (level == IPOIB_FLUSH_HEAVY) {
+ netdev_lock_ops(dev);
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
ipoib_ib_dev_stop(dev);
- if (ipoib_ib_dev_open(dev))
+ result = ipoib_ib_dev_open(dev);
+ netdev_unlock_ops(dev);
+
+ if (result)
return;
if (netif_queue_stopped(dev))
@@ -1280,7 +1276,7 @@ void ipoib_ib_dev_flush_light(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_light);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
}
void ipoib_ib_dev_flush_normal(struct work_struct *work)
@@ -1288,7 +1284,7 @@ void ipoib_ib_dev_flush_normal(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_normal);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
}
void ipoib_ib_dev_flush_heavy(struct work_struct *work)
@@ -1297,10 +1293,35 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
container_of(work, struct ipoib_dev_priv, flush_heavy);
rtnl_lock();
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
rtnl_unlock();
}
+void ipoib_queue_work(struct ipoib_dev_priv *priv,
+ enum ipoib_flush_level level)
+{
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ struct ipoib_dev_priv *cpriv;
+
+ netdev_lock(priv->dev);
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ ipoib_queue_work(cpriv, level);
+ netdev_unlock(priv->dev);
+ }
+
+ switch (level) {
+ case IPOIB_FLUSH_LIGHT:
+ queue_work(ipoib_workqueue, &priv->flush_light);
+ break;
+ case IPOIB_FLUSH_NORMAL:
+ queue_work(ipoib_workqueue, &priv->flush_normal);
+ break;
+ case IPOIB_FLUSH_HEAVY:
+ queue_work(ipoib_workqueue, &priv->flush_heavy);
+ break;
+ }
+}
+
void ipoib_ib_dev_cleanup(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 3b463db8ce39..f2f5465f2a90 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -49,6 +49,7 @@
#include <linux/jhash.h>
#include <net/arp.h>
#include <net/addrconf.h>
+#include <net/netdev_lock.h>
#include <net/pkt_sched.h>
#include <linux/inetdevice.h>
#include <rdma/ib_cache.h>
@@ -132,6 +133,52 @@ static int ipoib_netdev_event(struct notifier_block *this,
}
#endif
+struct ipoib_ifupdown_work {
+ struct work_struct work;
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
+ bool up;
+};
+
+static void ipoib_ifupdown_task(struct work_struct *work)
+{
+ struct ipoib_ifupdown_work *pwork =
+ container_of(work, struct ipoib_ifupdown_work, work);
+ struct net_device *dev = pwork->dev;
+ unsigned int flags;
+
+ rtnl_lock();
+ flags = dev->flags;
+ if (pwork->up)
+ flags |= IFF_UP;
+ else
+ flags &= ~IFF_UP;
+
+ if (dev->flags != flags)
+ dev_change_flags(dev, flags, NULL);
+ rtnl_unlock();
+ netdev_put(dev, &pwork->dev_tracker);
+ kfree(pwork);
+}
+
+static void ipoib_schedule_ifupdown_task(struct net_device *dev, bool up)
+{
+ struct ipoib_ifupdown_work *work;
+
+ if ((up && (dev->flags & IFF_UP)) ||
+ (!up && !(dev->flags & IFF_UP)))
+ return;
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return;
+ work->dev = dev;
+ netdev_hold(dev, &work->dev_tracker, GFP_KERNEL);
+ work->up = up;
+ INIT_WORK(&work->work, ipoib_ifupdown_task);
+ queue_work(ipoib_workqueue, &work->work);
+}
+
int ipoib_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -154,17 +201,10 @@ int ipoib_open(struct net_device *dev)
struct ipoib_dev_priv *cpriv;
/* Bring up any child interfaces too */
- down_read(&priv->vlan_rwsem);
- list_for_each_entry(cpriv, &priv->child_intfs, list) {
- int flags;
-
- flags = cpriv->dev->flags;
- if (flags & IFF_UP)
- continue;
-
- dev_change_flags(cpriv->dev, flags | IFF_UP, NULL);
- }
- up_read(&priv->vlan_rwsem);
+ netdev_lock_ops_to_full(dev);
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ ipoib_schedule_ifupdown_task(cpriv->dev, true);
+ netdev_unlock_full_to_ops(dev);
} else if (priv->parent) {
struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
@@ -199,17 +239,10 @@ static int ipoib_stop(struct net_device *dev)
struct ipoib_dev_priv *cpriv;
/* Bring down any child interfaces too */
- down_read(&priv->vlan_rwsem);
- list_for_each_entry(cpriv, &priv->child_intfs, list) {
- int flags;
-
- flags = cpriv->dev->flags;
- if (!(flags & IFF_UP))
- continue;
-
- dev_change_flags(cpriv->dev, flags & ~IFF_UP, NULL);
- }
- up_read(&priv->vlan_rwsem);
+ netdev_lock_ops_to_full(dev);
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ ipoib_schedule_ifupdown_task(cpriv->dev, false);
+ netdev_unlock_full_to_ops(dev);
}
return 0;
@@ -426,17 +459,20 @@ static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
}
}
+ if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
+ return matches;
+
/* Check child interfaces */
- down_read_nested(&priv->vlan_rwsem, nesting);
+ netdev_lock(priv->dev);
list_for_each_entry(child_priv, &priv->child_intfs, list) {
matches += ipoib_match_gid_pkey_addr(child_priv, gid,
- pkey_index, addr,
- nesting + 1,
- found_net_dev);
+ pkey_index, addr,
+ nesting + 1,
+ found_net_dev);
if (matches > 1)
break;
}
- up_read(&priv->vlan_rwsem);
+ netdev_unlock(priv->dev);
return matches;
}
@@ -531,9 +567,11 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
ipoib_warn(priv, "enabling connected mode "
"will cause multicast packet drops\n");
+ netdev_lock_ops(dev);
netdev_update_features(dev);
- dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
+ netif_set_mtu(dev, ipoib_cm_max_mtu(dev));
netif_set_real_num_tx_queues(dev, 1);
+ netdev_unlock_ops(dev);
rtnl_unlock();
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
@@ -543,9 +581,11 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
if (!strcmp(buf, "datagram\n")) {
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+ netdev_lock_ops(dev);
netdev_update_features(dev);
- dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+ netif_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
netif_set_real_num_tx_queues(dev, dev->num_tx_queues);
+ netdev_unlock_ops(dev);
rtnl_unlock();
ipoib_flush_paths(dev);
return (!rtnl_trylock()) ? -EBUSY : 0;
@@ -1212,6 +1252,7 @@ void ipoib_ib_tx_timeout_work(struct work_struct *work)
int err;
rtnl_lock();
+ netdev_lock_ops(priv->dev);
if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
goto unlock;
@@ -1226,6 +1267,7 @@ void ipoib_ib_tx_timeout_work(struct work_struct *work)
netif_tx_wake_all_queues(priv->dev);
unlock:
+ netdev_unlock_ops(priv->dev);
rtnl_unlock();
}
@@ -1992,9 +2034,9 @@ static int ipoib_ndo_init(struct net_device *ndev)
dev_hold(priv->parent);
- down_write(&ppriv->vlan_rwsem);
+ netdev_lock(priv->parent);
list_add_tail(&priv->list, &ppriv->child_intfs);
- up_write(&ppriv->vlan_rwsem);
+ netdev_unlock(priv->parent);
}
return 0;
@@ -2004,8 +2046,6 @@ static void ipoib_ndo_uninit(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- ASSERT_RTNL();
-
/*
* ipoib_remove_one guarantees the children are removed before the
* parent, and that is the only place where a parent can be removed.
@@ -2015,9 +2055,9 @@ static void ipoib_ndo_uninit(struct net_device *dev)
if (priv->parent) {
struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
- down_write(&ppriv->vlan_rwsem);
+ netdev_lock(ppriv->dev);
list_del(&priv->list);
- up_write(&ppriv->vlan_rwsem);
+ netdev_unlock(ppriv->dev);
}
ipoib_neigh_hash_uninit(dev);
@@ -2167,7 +2207,6 @@ static void ipoib_build_priv(struct net_device *dev)
priv->dev = dev;
spin_lock_init(&priv->lock);
- init_rwsem(&priv->vlan_rwsem);
mutex_init(&priv->mcast_mutex);
INIT_LIST_HEAD(&priv->path_list);
@@ -2372,10 +2411,10 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
netif_addr_unlock_bh(netdev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
- down_read(&priv->vlan_rwsem);
+ netdev_lock_ops_to_full(priv->dev);
list_for_each_entry(child_priv, &priv->child_intfs, list)
set_base_guid(child_priv, gid);
- up_read(&priv->vlan_rwsem);
+ netdev_unlock_full_to_ops(priv->dev);
}
}
@@ -2415,6 +2454,14 @@ static int ipoib_set_mac(struct net_device *dev, void *addr)
set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ struct ipoib_dev_priv *cpriv;
+
+ netdev_lock_ops_to_full(dev);
+ list_for_each_entry(cpriv, &priv->child_intfs, list)
+ queue_work(ipoib_workqueue, &cpriv->flush_light);
+ netdev_unlock_full_to_ops(dev);
+ }
queue_work(ipoib_workqueue, &priv->flush_light);
return 0;
@@ -2526,7 +2573,7 @@ static struct net_device *ipoib_add_port(const char *format,
ib_register_event_handler(&priv->event_handler);
/* call event handler to ensure pkey in sync */
- queue_work(ipoib_workqueue, &priv->flush_heavy);
+ ipoib_queue_work(priv, IPOIB_FLUSH_HEAVY);
ndev->rtnl_link_ops = ipoib_get_link_ops();
@@ -2624,9 +2671,11 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
rtnl_lock();
+ netdev_lock(priv->dev);
list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs,
list)
unregister_netdevice_queue(cpriv->dev, &head);
+ netdev_unlock(priv->dev);
unregister_netdevice_queue(priv->dev, &head);
unregister_netdevice_many(&head);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 368e5d77416d..86983080d28b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -280,15 +280,15 @@ void ipoib_event(struct ib_event_handler *handler,
dev_name(&record->device->dev), record->element.port_num);
if (record->event == IB_EVENT_CLIENT_REREGISTER) {
- queue_work(ipoib_workqueue, &priv->flush_light);
+ ipoib_queue_work(priv, IPOIB_FLUSH_LIGHT);
} else if (record->event == IB_EVENT_PORT_ERR ||
record->event == IB_EVENT_PORT_ACTIVE ||
record->event == IB_EVENT_LID_CHANGE) {
- queue_work(ipoib_workqueue, &priv->flush_normal);
+ ipoib_queue_work(priv, IPOIB_FLUSH_NORMAL);
} else if (record->event == IB_EVENT_PKEY_CHANGE) {
- queue_work(ipoib_workqueue, &priv->flush_heavy);
+ ipoib_queue_work(priv, IPOIB_FLUSH_HEAVY);
} else if (record->event == IB_EVENT_GID_CHANGE &&
!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
- queue_work(ipoib_workqueue, &priv->flush_light);
+ ipoib_queue_work(priv, IPOIB_FLUSH_LIGHT);
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 562df2b3ef18..243e8f555eca 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -53,8 +53,7 @@ static bool is_child_unique(struct ipoib_dev_priv *ppriv,
struct ipoib_dev_priv *priv)
{
struct ipoib_dev_priv *tpriv;
-
- ASSERT_RTNL();
+ bool result = true;
/*
* Since the legacy sysfs interface uses pkey for deletion it cannot
@@ -73,13 +72,17 @@ static bool is_child_unique(struct ipoib_dev_priv *ppriv,
if (ppriv->pkey == priv->pkey)
return false;
+ netdev_lock(ppriv->dev);
list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
if (tpriv->pkey == priv->pkey &&
- tpriv->child_type == IPOIB_LEGACY_CHILD)
- return false;
+ tpriv->child_type == IPOIB_LEGACY_CHILD) {
+ result = false;
+ break;
+ }
}
+ netdev_unlock(ppriv->dev);
- return true;
+ return result;
}
/*
@@ -98,8 +101,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
int result;
struct rdma_netdev *rn = netdev_priv(ndev);
- ASSERT_RTNL();
-
/*
* We do not need to touch priv if register_netdevice fails, so just
* always use this flow.
@@ -267,6 +268,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
ppriv = ipoib_priv(pdev);
rc = -ENODEV;
+ netdev_lock(ppriv->dev);
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
@@ -278,9 +280,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
goto out;
}
- down_write(&ppriv->vlan_rwsem);
list_del_init(&priv->list);
- up_write(&ppriv->vlan_rwsem);
work->dev = priv->dev;
INIT_WORK(&work->work, ipoib_vlan_delete_task);
queue_work(ipoib_workqueue, &work->work);
@@ -291,6 +291,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
}
out:
+ netdev_unlock(ppriv->dev);
rtnl_unlock();
return rc;
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index 009822fa61b8..91636479ee3c 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -399,7 +399,7 @@ static void ml_play_effects(struct ml_device *ml)
static void ml_effect_timer(struct timer_list *t)
{
- struct ml_device *ml = from_timer(ml, t, timer);
+ struct ml_device *ml = timer_container_of(ml, t, timer);
struct input_dev *dev = ml->dev;
pr_debug("timer: updating effects\n");
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index ae51f108bfae..a832bc46bc92 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -199,7 +199,8 @@ EXPORT_SYMBOL(gameport_stop_polling);
static void gameport_run_poll_handler(struct timer_list *t)
{
- struct gameport *gameport = from_timer(gameport, t, poll_timer);
+ struct gameport *gameport = timer_container_of(gameport, t,
+ poll_timer);
gameport->poll_handler(gameport);
if (gameport->poll_cnt)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ec4346f20efd..44887e51e049 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -2249,7 +2249,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
*/
static void input_repeat_key(struct timer_list *t)
{
- struct input_dev *dev = from_timer(dev, t, timer);
+ struct input_dev *dev = timer_container_of(dev, t, timer);
guard(spinlock_irqsave)(&dev->event_lock);
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index d7a253835889..d5c67a927404 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -351,7 +351,7 @@ static int db9_saturn(int mode, struct parport *port, struct input_dev *devs[])
static void db9_timer(struct timer_list *t)
{
- struct db9 *db9 = from_timer(db9, t, timer);
+ struct db9 *db9 = timer_container_of(db9, t, timer);
struct parport *port = db9->pd->port;
struct input_dev *dev = db9->dev[0];
struct input_dev *dev2 = db9->dev[1];
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index 9fc629ad58b8..ae95cb3d0ae9 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -726,7 +726,7 @@ static void gc_psx_process_packet(struct gc *gc)
static void gc_timer(struct timer_list *t)
{
- struct gc *gc = from_timer(gc, t, timer);
+ struct gc *gc = timer_container_of(gc, t, timer);
/*
* N64 pads - must be read first, any read confuses them for 200 us
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index aa3e7d471b96..5f69aef01791 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -72,7 +72,7 @@ static struct tgfx {
static void tgfx_timer(struct timer_list *t)
{
- struct tgfx *tgfx = from_timer(tgfx, t, timer);
+ struct tgfx *tgfx = timer_container_of(tgfx, t, timer);
struct input_dev *dev;
int data1, data2, i;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 57a5ff3d1992..c066a4da7c14 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -105,6 +105,8 @@
#define PKT_XBE2_FW_5_EARLY 3
#define PKT_XBE2_FW_5_11 4
+#define FLAG_DELAY_INIT BIT(0)
+
static bool dpad_to_buttons;
module_param(dpad_to_buttons, bool, S_IRUGO);
MODULE_PARM_DESC(dpad_to_buttons, "Map D-PAD to buttons rather than axes for unknown pads");
@@ -127,6 +129,7 @@ static const struct xpad_device {
char *name;
u8 mapping;
u8 xtype;
+ u8 flags;
} xpad_device[] = {
/* Please keep this list sorted by vendor and product ID. */
{ 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
@@ -290,6 +293,8 @@ static const struct xpad_device {
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE },
+ { 0x10f5, 0x7008, "Turtle Beach Recon Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
+ { 0x10f5, 0x7073, "Turtle Beach Stealth Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
{ 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 },
{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
@@ -354,6 +359,7 @@ static const struct xpad_device {
{ 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 },
{ 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
+ { 0x20d6, 0x2064, "PowerA Wired Controller for Xbox", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
{ 0x20d6, 0x400b, "PowerA FUSION Pro 4 Wired Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x20d6, 0x890b, "PowerA MOGA XP-Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
@@ -413,6 +419,7 @@ static const struct xpad_device {
{ 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE },
{ 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
{ 0x3537, 0x1010, "GameSir G7 SE", 0, XTYPE_XBOXONE },
+ { 0x366c, 0x0005, "ByoWave Proteus Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE, FLAG_DELAY_INIT },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0x413d, 0x2104, "Black Shark Green Ghost Gamepad", 0, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
@@ -568,6 +575,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x3285), /* Nacon Evol-X */
XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */
XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */
+ XPAD_XBOXONE_VENDOR(0x366c), /* ByoWave controllers */
XPAD_XBOX360_VENDOR(0x413d), /* Black Shark Green Ghost Controller */
{ }
};
@@ -596,6 +604,7 @@ struct xboxone_init_packet {
* - https://github.com/medusalix/xone/blob/master/bus/protocol.c
*/
#define GIP_CMD_ACK 0x01
+#define GIP_CMD_ANNOUNCE 0x02
#define GIP_CMD_IDENTIFY 0x04
#define GIP_CMD_POWER 0x05
#define GIP_CMD_AUTHENTICATE 0x06
@@ -670,20 +679,19 @@ static const u8 xboxone_hori_ack_id[] = {
};
/*
- * This packet is required for most (all?) of the PDP pads to start
- * sending input reports. These pads include: (0x0e6f:0x02ab),
- * (0x0e6f:0x02a4), (0x0e6f:0x02a6).
+ * This packet is sent by default on Windows, and is required for some pads to
+ * start sending input reports, including most (all?) of the PDP. These pads
+ * include: (0x0e6f:0x02ab), (0x0e6f:0x02a4), (0x0e6f:0x02a6).
*/
-static const u8 xboxone_pdp_led_on[] = {
- GIP_CMD_LED, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(3), 0x00, GIP_LED_ON, 0x14
-};
+static const u8 xboxone_led_on[] = { GIP_CMD_LED, GIP_OPT_INTERNAL, GIP_SEQ0,
+GIP_PL_LEN(3), 0x00, GIP_LED_ON, 0x14 };
/*
* This packet is required for most (all?) of the PDP pads to start
* sending input reports. These pads include: (0x0e6f:0x02ab),
* (0x0e6f:0x02a4), (0x0e6f:0x02a6).
*/
-static const u8 xboxone_pdp_auth[] = {
+static const u8 xboxone_auth_done[] = {
GIP_CMD_AUTHENTICATE, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(2), 0x01, 0x00
};
@@ -720,12 +728,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init),
XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init),
- XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on),
- XBOXONE_INIT_PKT(0x0f0d, 0x01b2, xboxone_pdp_led_on),
- XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_led_on),
- XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth),
- XBOXONE_INIT_PKT(0x0f0d, 0x01b2, xboxone_pdp_auth),
- XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_auth),
+ XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_led_on),
+ XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_auth_done),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
@@ -785,10 +789,13 @@ struct usb_xpad {
const char *name; /* name of the device */
struct work_struct work; /* init/remove device from callback */
time64_t mode_btn_down_ts;
+ bool delay_init; /* init packets should be delayed */
+ bool delayed_init_done;
};
static int xpad_init_input(struct usb_xpad *xpad);
static void xpad_deinit_input(struct usb_xpad *xpad);
+static int xpad_start_input(struct usb_xpad *xpad);
static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num);
static void xpad360w_poweroff_controller(struct usb_xpad *xpad);
@@ -1073,6 +1080,17 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
do_sync = true;
}
+ } else if (data[0] == GIP_CMD_ANNOUNCE) {
+ int error;
+
+ if (xpad->delay_init && !xpad->delayed_init_done) {
+ xpad->delayed_init_done = true;
+ error = xpad_start_input(xpad);
+ if (error)
+ dev_warn(&xpad->dev->dev,
+ "unable to start delayed input: %d\n",
+ error);
+ }
} else if (data[0] == GIP_CMD_INPUT) { /* The main valid packet type for inputs */
/* menu/view buttons */
input_report_key(dev, BTN_START, data[4] & BIT(2));
@@ -1251,6 +1269,14 @@ static bool xpad_prepare_next_init_packet(struct usb_xpad *xpad)
if (xpad->xtype != XTYPE_XBOXONE)
return false;
+ /*
+ * Some dongles will discard init packets if they're sent before the
+ * controller connects. In these cases, we need to wait until we get
+ * an announce packet from them to send the init packet sequence.
+ */
+ if (xpad->delay_init && !xpad->delayed_init_done)
+ return false;
+
/* Perform initialization sequence for Xbox One pads that require it */
while (xpad->init_seq < ARRAY_SIZE(xboxone_init_packets)) {
init_packet = &xboxone_init_packets[xpad->init_seq++];
@@ -2066,6 +2092,9 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
xpad->mapping = xpad_device[i].mapping;
xpad->xtype = xpad_device[i].xtype;
xpad->name = xpad_device[i].name;
+ if (xpad_device[i].flags & FLAG_DELAY_INIT)
+ xpad->delay_init = true;
+
xpad->packet_type = PKT_XB;
INIT_WORK(&xpad->work, xpad_presence_work);
@@ -2265,6 +2294,7 @@ static int xpad_resume(struct usb_interface *intf)
struct usb_xpad *xpad = usb_get_intfdata(intf);
struct input_dev *input = xpad->dev;
+ xpad->delayed_init_done = false;
if (xpad->xtype == XTYPE_XBOX360W)
return xpad360w_start_input(xpad);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index adf0f311996c..3ff2fcf05ad5 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -37,7 +37,7 @@ static int atkbd_set = 2;
module_param_named(set, atkbd_set, int, 0);
MODULE_PARM_DESC(set, "Select keyboard code set (2 = default, 3 = PS/2 native)");
-#if defined(__i386__) || defined(__x86_64__) || defined(__hppa__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__hppa__) || defined(__loongarch__)
static bool atkbd_reset;
#else
static bool atkbd_reset = true;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 5c39a217b94c..f9db86da0818 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -449,6 +449,8 @@ static enum hrtimer_restart gpio_keys_irq_timer(struct hrtimer *t)
release_timer);
struct input_dev *input = bdata->input;
+ guard(spinlock_irqsave)(&bdata->lock);
+
if (bdata->key_pressed) {
input_report_key(input, *bdata->code, 0);
input_sync(input);
@@ -486,7 +488,7 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
if (bdata->release_delay)
hrtimer_start(&bdata->release_timer,
ms_to_ktime(bdata->release_delay),
- HRTIMER_MODE_REL_HARD);
+ HRTIMER_MODE_REL);
out:
return IRQ_HANDLED;
}
@@ -628,7 +630,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
bdata->release_delay = button->debounce_interval;
hrtimer_setup(&bdata->release_timer, gpio_keys_irq_timer,
- CLOCK_REALTIME, HRTIMER_MODE_REL_HARD);
+ CLOCK_REALTIME, HRTIMER_MODE_REL);
isr = gpio_keys_irq_isr;
irqflags = 0;
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index 3cd47fa44efc..069c1d6376e1 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -183,7 +183,8 @@ static void imx_keypad_fire_events(struct imx_keypad *keypad,
*/
static void imx_keypad_check_for_events(struct timer_list *t)
{
- struct imx_keypad *keypad = from_timer(keypad, t, check_matrix_timer);
+ struct imx_keypad *keypad = timer_container_of(keypad, t,
+ check_matrix_timer);
unsigned short matrix_volatile_state[MAX_MATRIX_KEY_COLS];
unsigned short reg_val;
bool state_changed, is_zero_matrix;
diff --git a/drivers/input/keyboard/locomokbd.c b/drivers/input/keyboard/locomokbd.c
index c501a93a4417..58d4f2096cf9 100644
--- a/drivers/input/keyboard/locomokbd.c
+++ b/drivers/input/keyboard/locomokbd.c
@@ -194,7 +194,7 @@ static irqreturn_t locomokbd_interrupt(int irq, void *dev_id)
*/
static void locomokbd_timer_callback(struct timer_list *t)
{
- struct locomokbd *locomokbd = from_timer(locomokbd, t, timer);
+ struct locomokbd *locomokbd = timer_container_of(locomokbd, t, timer);
locomokbd_scankeyboard(locomokbd);
}
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index e46473cb817c..e50a6fea9a60 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -104,6 +104,16 @@ static void disable_row_irqs(struct matrix_keypad *keypad)
disable_irq_nosync(keypad->row_irqs[i]);
}
+static uint32_t read_row_state(struct matrix_keypad *keypad)
+{
+ int row;
+ u32 row_state = 0;
+
+ for (row = 0; row < keypad->num_row_gpios; row++)
+ row_state |= row_asserted(keypad, row) ? BIT(row) : 0;
+ return row_state;
+}
+
/*
* This gets the keys from keyboard and reports it to input subsystem
*/
@@ -115,6 +125,10 @@ static void matrix_keypad_scan(struct work_struct *work)
const unsigned short *keycodes = input_dev->keycode;
uint32_t new_state[MATRIX_MAX_COLS];
int row, col, code;
+ u32 init_row_state, new_row_state;
+
+ /* read initial row state to detect changes between scan */
+ init_row_state = read_row_state(keypad);
/* de-activate all columns for scanning */
activate_all_cols(keypad, false);
@@ -129,9 +143,7 @@ static void matrix_keypad_scan(struct work_struct *work)
activate_col(keypad, col, true);
- for (row = 0; row < keypad->num_row_gpios; row++)
- new_state[col] |=
- row_asserted(keypad, row) ? BIT(row) : 0;
+ new_state[col] = read_row_state(keypad);
activate_col(keypad, col, false);
}
@@ -165,6 +177,18 @@ static void matrix_keypad_scan(struct work_struct *work)
keypad->scan_pending = false;
enable_row_irqs(keypad);
}
+
+ /* read new row state and detect if value has changed */
+ new_row_state = read_row_state(keypad);
+ if (init_row_state != new_row_state) {
+ guard(spinlock_irq)(&keypad->lock);
+ if (unlikely(keypad->scan_pending || keypad->stopped))
+ return;
+ disable_row_irqs(keypad);
+ keypad->scan_pending = true;
+ schedule_delayed_work(&keypad->work,
+ msecs_to_jiffies(keypad->debounce_ms));
+ }
}
static irqreturn_t matrix_keypad_interrupt(int irq, void *id)
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index bbf409dda89f..954055aaf6e2 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -27,6 +27,8 @@
#define SNVS_HPSR_BTN BIT(6)
#define SNVS_LPSR_SPO BIT(18)
#define SNVS_LPCR_DEP_EN BIT(5)
+#define SNVS_LPCR_BPT_SHIFT 16
+#define SNVS_LPCR_BPT_MASK (3 << SNVS_LPCR_BPT_SHIFT)
#define DEBOUNCE_TIME 30
#define REPEAT_INTERVAL 60
@@ -44,7 +46,8 @@ struct pwrkey_drv_data {
static void imx_imx_snvs_check_for_events(struct timer_list *t)
{
- struct pwrkey_drv_data *pdata = from_timer(pdata, t, check_timer);
+ struct pwrkey_drv_data *pdata = timer_container_of(pdata, t,
+ check_timer);
struct input_dev *input = pdata->input;
u32 state;
@@ -114,6 +117,8 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
struct device_node *np;
struct clk *clk;
int error;
+ unsigned int val;
+ unsigned int bpt;
u32 vid;
/* Get SNVS register Page */
@@ -148,6 +153,27 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
if (pdata->irq < 0)
return -EINVAL;
+ error = of_property_read_u32(np, "power-off-time-sec", &val);
+ if (!error) {
+ switch (val) {
+ case 0:
+ bpt = 0x3;
+ break;
+ case 5:
+ case 10:
+ case 15:
+ bpt = (val / 5) - 1;
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "power-off-time-sec %d out of range\n", val);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(pdata->snvs, SNVS_LPCR_REG, SNVS_LPCR_BPT_MASK,
+ bpt << SNVS_LPCR_BPT_SHIFT);
+ }
+
regmap_read(pdata->snvs, SNVS_HPVIDR1_REG, &vid);
pdata->minor_rev = vid & 0xff;
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 32a676f0de53..bc1c80a456f2 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -240,7 +240,7 @@ static void tegra_kbc_set_fifo_interrupt(struct tegra_kbc *kbc, bool enable)
static void tegra_kbc_keypress_timer(struct timer_list *t)
{
- struct tegra_kbc *kbc = from_timer(kbc, t, timer);
+ struct tegra_kbc *kbc = timer_container_of(kbc, t, timer);
u32 val;
unsigned int i;
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index d9ee14b1f451..4581f1c53644 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -844,6 +844,12 @@ static int ims_pcu_flash_firmware(struct ims_pcu *pcu,
addr = be32_to_cpu(rec->addr) / 2;
len = be16_to_cpu(rec->len);
+ if (len > sizeof(pcu->cmd_buf) - 1 - sizeof(*fragment)) {
+ dev_err(pcu->dev,
+ "Invalid record length in firmware: %d\n", len);
+ return -EINVAL;
+ }
+
fragment = (void *)&pcu->cmd_buf[1];
put_unaligned_le32(addr, &fragment->addr);
fragment->len = len;
diff --git a/drivers/input/misc/nxp-bbnsm-pwrkey.c b/drivers/input/misc/nxp-bbnsm-pwrkey.c
index 7ba8d166d68c..0c7b8f8ef4a5 100644
--- a/drivers/input/misc/nxp-bbnsm-pwrkey.c
+++ b/drivers/input/misc/nxp-bbnsm-pwrkey.c
@@ -45,7 +45,7 @@ struct bbnsm_pwrkey {
static void bbnsm_pwrkey_check_for_events(struct timer_list *t)
{
- struct bbnsm_pwrkey *bbnsm = from_timer(bbnsm, t, check_timer);
+ struct bbnsm_pwrkey *bbnsm = timer_container_of(bbnsm, t, check_timer);
struct input_dev *input = bbnsm->input;
u32 state;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 0bd7b09b0aa3..be734d65ea72 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1582,7 +1582,7 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
static void alps_flush_packet(struct timer_list *t)
{
- struct alps_data *priv = from_timer(priv, t, timer);
+ struct alps_data *priv = timer_container_of(priv, t, timer);
struct psmouse *psmouse = priv->psmouse;
guard(serio_pause_rx)(psmouse->ps2dev.serio);
diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c
index 4ee084e00a7c..71aa23dd7d8d 100644
--- a/drivers/input/mouse/byd.c
+++ b/drivers/input/mouse/byd.c
@@ -251,7 +251,7 @@ static void byd_report_input(struct psmouse *psmouse)
static void byd_clear_touch(struct timer_list *t)
{
- struct byd_data *priv = from_timer(priv, t, timer);
+ struct byd_data *priv = timer_container_of(priv, t, timer);
struct psmouse *psmouse = priv->psmouse;
guard(serio_pause_rx)(psmouse->ps2dev.serio);
diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c
index d760af4cc12e..f1947f03b06a 100644
--- a/drivers/input/rmi4/rmi_f34.c
+++ b/drivers/input/rmi4/rmi_f34.c
@@ -4,6 +4,7 @@
* Copyright (C) 2016 Zodiac Inflight Innovations
*/
+#include "linux/device.h"
#include <linux/kernel.h>
#include <linux/rmi.h>
#include <linux/firmware.h>
@@ -289,39 +290,30 @@ static int rmi_f34_update_firmware(struct f34_data *f34,
return rmi_f34_flash_firmware(f34, syn_fw);
}
-static int rmi_f34_status(struct rmi_function *fn)
-{
- struct f34_data *f34 = dev_get_drvdata(&fn->dev);
-
- /*
- * The status is the percentage complete, or once complete,
- * zero for success or a negative return code.
- */
- return f34->update_status;
-}
-
static ssize_t rmi_driver_bootloader_id_show(struct device *dev,
struct device_attribute *dattr,
char *buf)
{
struct rmi_driver_data *data = dev_get_drvdata(dev);
- struct rmi_function *fn = data->f34_container;
+ struct rmi_function *fn;
struct f34_data *f34;
- if (fn) {
- f34 = dev_get_drvdata(&fn->dev);
-
- if (f34->bl_version == 5)
- return sysfs_emit(buf, "%c%c\n",
- f34->bootloader_id[0],
- f34->bootloader_id[1]);
- else
- return sysfs_emit(buf, "V%d.%d\n",
- f34->bootloader_id[1],
- f34->bootloader_id[0]);
- }
+ fn = data->f34_container;
+ if (!fn)
+ return -ENODEV;
- return 0;
+ f34 = dev_get_drvdata(&fn->dev);
+ if (!f34)
+ return -ENODEV;
+
+ if (f34->bl_version == 5)
+ return sysfs_emit(buf, "%c%c\n",
+ f34->bootloader_id[0],
+ f34->bootloader_id[1]);
+ else
+ return sysfs_emit(buf, "V%d.%d\n",
+ f34->bootloader_id[1],
+ f34->bootloader_id[0]);
}
static DEVICE_ATTR(bootloader_id, 0444, rmi_driver_bootloader_id_show, NULL);
@@ -334,13 +326,16 @@ static ssize_t rmi_driver_configuration_id_show(struct device *dev,
struct rmi_function *fn = data->f34_container;
struct f34_data *f34;
- if (fn) {
- f34 = dev_get_drvdata(&fn->dev);
+ fn = data->f34_container;
+ if (!fn)
+ return -ENODEV;
+
+ f34 = dev_get_drvdata(&fn->dev);
+ if (!f34)
+ return -ENODEV;
- return sysfs_emit(buf, "%s\n", f34->configuration_id);
- }
- return 0;
+ return sysfs_emit(buf, "%s\n", f34->configuration_id);
}
static DEVICE_ATTR(configuration_id, 0444,
@@ -356,10 +351,14 @@ static int rmi_firmware_update(struct rmi_driver_data *data,
if (!data->f34_container) {
dev_warn(dev, "%s: No F34 present!\n", __func__);
- return -EINVAL;
+ return -ENODEV;
}
f34 = dev_get_drvdata(&data->f34_container->dev);
+ if (!f34) {
+ dev_warn(dev, "%s: No valid F34 present!\n", __func__);
+ return -ENODEV;
+ }
if (f34->bl_version >= 7) {
if (data->pdt_props & HAS_BSR) {
@@ -485,10 +484,18 @@ static ssize_t rmi_driver_update_fw_status_show(struct device *dev,
char *buf)
{
struct rmi_driver_data *data = dev_get_drvdata(dev);
- int update_status = 0;
+ struct f34_data *f34;
+ int update_status = -ENODEV;
- if (data->f34_container)
- update_status = rmi_f34_status(data->f34_container);
+ /*
+ * The status is the percentage complete, or once complete,
+ * zero for success or a negative return code.
+ */
+ if (data->f34_container) {
+ f34 = dev_get_drvdata(&data->f34_container->dev);
+ if (f34)
+ update_status = f34->update_status;
+ }
return sysfs_emit(buf, "%d\n", update_status);
}
@@ -508,33 +515,21 @@ static const struct attribute_group rmi_firmware_attr_group = {
.attrs = rmi_firmware_attrs,
};
-static int rmi_f34_probe(struct rmi_function *fn)
+static int rmi_f34v5_probe(struct f34_data *f34)
{
- struct f34_data *f34;
- unsigned char f34_queries[9];
+ struct rmi_function *fn = f34->fn;
+ u8 f34_queries[9];
bool has_config_id;
- u8 version = fn->fd.function_version;
- int ret;
-
- f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL);
- if (!f34)
- return -ENOMEM;
-
- f34->fn = fn;
- dev_set_drvdata(&fn->dev, f34);
-
- /* v5 code only supported version 0, try V7 probe */
- if (version > 0)
- return rmi_f34v7_probe(f34);
+ int error;
f34->bl_version = 5;
- ret = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
- f34_queries, sizeof(f34_queries));
- if (ret) {
+ error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
+ f34_queries, sizeof(f34_queries));
+ if (error) {
dev_err(&fn->dev, "%s: Failed to query properties\n",
__func__);
- return ret;
+ return error;
}
snprintf(f34->bootloader_id, sizeof(f34->bootloader_id),
@@ -560,11 +555,11 @@ static int rmi_f34_probe(struct rmi_function *fn)
f34->v5.config_blocks);
if (has_config_id) {
- ret = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr,
- f34_queries, sizeof(f34_queries));
- if (ret) {
+ error = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr,
+ f34_queries, sizeof(f34_queries));
+ if (error) {
dev_err(&fn->dev, "Failed to read F34 config ID\n");
- return ret;
+ return error;
}
snprintf(f34->configuration_id, sizeof(f34->configuration_id),
@@ -573,12 +568,34 @@ static int rmi_f34_probe(struct rmi_function *fn)
f34_queries[2], f34_queries[3]);
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Configuration ID: %s\n",
- f34->configuration_id);
+ f34->configuration_id);
}
return 0;
}
+static int rmi_f34_probe(struct rmi_function *fn)
+{
+ struct f34_data *f34;
+ u8 version = fn->fd.function_version;
+ int error;
+
+ f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL);
+ if (!f34)
+ return -ENOMEM;
+
+ f34->fn = fn;
+
+ /* v5 code only supported version 0 */
+ error = version == 0 ? rmi_f34v5_probe(f34) : rmi_f34v7_probe(f34);
+ if (error)
+ return error;
+
+ dev_set_drvdata(&fn->dev, f34);
+
+ return 0;
+}
+
int rmi_f34_create_sysfs(struct rmi_device *rmi_dev)
{
return sysfs_create_group(&rmi_dev->dev.kobj, &rmi_firmware_attr_group);
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 8d8392ce7005..c9aa1847265a 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -375,7 +375,7 @@ static inline void ad7877_ts_event_release(struct ad7877 *ts)
static void ad7877_timer(struct timer_list *t)
{
- struct ad7877 *ts = from_timer(ts, t, timer);
+ struct ad7877 *ts = timer_container_of(ts, t, timer);
unsigned long flags;
spin_lock_irqsave(&ts->lock, flags);
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index f661e199b63c..f9db5cefb25b 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -237,7 +237,7 @@ static void ad7879_ts_event_release(struct ad7879 *ts)
static void ad7879_timer(struct timer_list *t)
{
- struct ad7879 *ts = from_timer(ts, t, timer);
+ struct ad7879 *ts = timer_container_of(ts, t, timer);
ad7879_ts_event_release(ts);
}
diff --git a/drivers/input/touchscreen/bu21029_ts.c b/drivers/input/touchscreen/bu21029_ts.c
index 3c997fba7048..64f474e67312 100644
--- a/drivers/input/touchscreen/bu21029_ts.c
+++ b/drivers/input/touchscreen/bu21029_ts.c
@@ -209,7 +209,8 @@ static void bu21029_touch_report(struct bu21029_ts_data *bu21029, const u8 *buf)
static void bu21029_touch_release(struct timer_list *t)
{
- struct bu21029_ts_data *bu21029 = from_timer(bu21029, t, timer);
+ struct bu21029_ts_data *bu21029 = timer_container_of(bu21029, t,
+ timer);
input_report_abs(bu21029->in_dev, ABS_PRESSURE, 0);
input_report_key(bu21029->in_dev, BTN_TOUCH, 0);
diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
index 9a5977d8cad2..28da7ba55a4b 100644
--- a/drivers/input/touchscreen/exc3000.c
+++ b/drivers/input/touchscreen/exc3000.c
@@ -105,7 +105,7 @@ static void exc3000_report_slots(struct input_dev *input,
static void exc3000_timer(struct timer_list *t)
{
- struct exc3000_data *data = from_timer(data, t, timer);
+ struct exc3000_data *data = timer_container_of(data, t, timer);
input_mt_sync_frame(data->input);
input_sync(data->input);
diff --git a/drivers/input/touchscreen/sx8654.c b/drivers/input/touchscreen/sx8654.c
index e59b8d0ed19e..5fa47a1a6fdc 100644
--- a/drivers/input/touchscreen/sx8654.c
+++ b/drivers/input/touchscreen/sx8654.c
@@ -116,7 +116,7 @@ static inline void sx865x_penrelease(struct sx8654 *ts)
static void sx865x_penrelease_timer_handler(struct timer_list *t)
{
- struct sx8654 *ts = from_timer(ts, t, timer);
+ struct sx8654 *ts = timer_container_of(ts, t, timer);
unsigned long flags;
spin_lock_irqsave(&ts->lock, flags);
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index 252a93753ee5..82d7d1cf5010 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -194,7 +194,7 @@ out:
static void tsc200x_penup_timer(struct timer_list *t)
{
- struct tsc200x *ts = from_timer(ts, t, penup_timer);
+ struct tsc200x *ts = timer_container_of(ts, t, penup_timer);
guard(spinlock_irqsave)(&ts->lock);
tsc200x_update_pen_state(ts, 0, 0, 0);
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 9d5404a07e8a..1a41e59c77f8 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -20,6 +20,8 @@
#include "internal.h"
+#define ICC_DYN_ID_START 10000
+
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -826,7 +828,12 @@ static struct icc_node *icc_node_create_nolock(int id)
if (!node)
return ERR_PTR(-ENOMEM);
- id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
+ /* dynamic id allocation */
+ if (id == ICC_ALLOC_DYN_ID)
+ id = idr_alloc(&icc_idr, node, ICC_DYN_ID_START, 0, GFP_KERNEL);
+ else
+ id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
+
if (id < 0) {
WARN(1, "%s: couldn't get idr\n", __func__);
kfree(node);
@@ -839,6 +846,25 @@ static struct icc_node *icc_node_create_nolock(int id)
}
/**
+ * icc_node_create_dyn() - create a node with dynamic id
+ *
+ * Return: icc_node pointer on success, or ERR_PTR() on error
+ */
+struct icc_node *icc_node_create_dyn(void)
+{
+ struct icc_node *node;
+
+ mutex_lock(&icc_lock);
+
+ node = icc_node_create_nolock(ICC_ALLOC_DYN_ID);
+
+ mutex_unlock(&icc_lock);
+
+ return node;
+}
+EXPORT_SYMBOL_GPL(icc_node_create_dyn);
+
+/**
* icc_node_create() - create a node
* @id: node id
*
@@ -885,6 +911,56 @@ void icc_node_destroy(int id)
EXPORT_SYMBOL_GPL(icc_node_destroy);
/**
+ * icc_link_nodes() - create link between two nodes
+ * @src_node: source node
+ * @dst_node: destination node
+ *
+ * Create a link between two nodes. The nodes might belong to different
+ * interconnect providers and the @dst_node might not exist (if the
+ * provider driver has not probed yet). So just create the @dst_node
+ * and when the actual provider driver is probed, the rest of the node
+ * data is filled.
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_link_nodes(struct icc_node *src_node, struct icc_node **dst_node)
+{
+ struct icc_node **new;
+ int ret = 0;
+
+ if (!src_node->provider)
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ if (!*dst_node) {
+ *dst_node = icc_node_create_nolock(ICC_ALLOC_DYN_ID);
+
+ if (IS_ERR(*dst_node)) {
+ ret = PTR_ERR(*dst_node);
+ goto out;
+ }
+ }
+
+ new = krealloc(src_node->links,
+ (src_node->num_links + 1) * sizeof(*src_node->links),
+ GFP_KERNEL);
+ if (!new) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ src_node->links = new;
+ src_node->links[src_node->num_links++] = *dst_node;
+
+out:
+ mutex_unlock(&icc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icc_link_nodes);
+
+/**
* icc_link_create() - create a link between two nodes
* @node: source node id
* @dst_id: destination node id
@@ -962,6 +1038,10 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
node->avg_bw = node->init_avg;
node->peak_bw = node->init_peak;
+ if (node->id >= ICC_DYN_ID_START)
+ node->name = devm_kasprintf(provider->dev, GFP_KERNEL, "%s@%s",
+ node->name, dev_name(provider->dev));
+
if (node->avg_bw || node->peak_bw) {
if (provider->pre_aggregate)
provider->pre_aggregate(node);
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index f2d63745be54..41bfc6e7ee1d 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -280,7 +280,14 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
if (!qn)
continue;
- node = icc_node_create(qn->id);
+ if (desc->alloc_dyn_id) {
+ if (!qn->node)
+ qn->node = icc_node_create_dyn();
+ node = qn->node;
+ } else {
+ node = icc_node_create(qn->id);
+ }
+
if (IS_ERR(node)) {
ret = PTR_ERR(node);
goto err_remove_nodes;
@@ -290,8 +297,12 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
node->data = qn;
icc_node_add(node, provider);
- for (j = 0; j < qn->num_links; j++)
- icc_link_create(node, qn->links[j]);
+ for (j = 0; j < qn->num_links; j++) {
+ if (desc->alloc_dyn_id)
+ icc_link_nodes(node, &qn->link_nodes[j]->node);
+ else
+ icc_link_create(node, qn->links[j]);
+ }
data->nodes[i] = node;
}
diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h
index 82344c734091..bd8d730249b1 100644
--- a/drivers/interconnect/qcom/icc-rpmh.h
+++ b/drivers/interconnect/qcom/icc-rpmh.h
@@ -83,6 +83,8 @@ struct qcom_icc_qosbox {
* @name: the node name used in debugfs
* @links: an array of nodes where we can go next while traversing
* @id: a unique node identifier
+ * @link_nodes: links associated with this node
+ * @node: icc_node associated with this node
* @num_links: the total number of @links
* @channels: num of channels at this node
* @buswidth: width of the interconnect between a node and the bus
@@ -96,6 +98,8 @@ struct qcom_icc_node {
const char *name;
u16 links[MAX_LINKS];
u16 id;
+ struct qcom_icc_node **link_nodes;
+ struct icc_node *node;
u16 num_links;
u16 channels;
u16 buswidth;
@@ -154,6 +158,7 @@ struct qcom_icc_desc {
struct qcom_icc_bcm * const *bcms;
size_t num_bcms;
bool qos_requires_clocks;
+ bool alloc_dyn_id;
};
int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
index 6a656ed44d49..baecbf2533f7 100644
--- a/drivers/interconnect/qcom/osm-l3.c
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/args.h>
@@ -32,8 +33,6 @@
#define EPSS_REG_FREQ_LUT 0x100
#define EPSS_REG_PERF_STATE 0x320
-#define OSM_L3_MAX_LINKS 1
-
#define to_osm_l3_provider(_provider) \
container_of(_provider, struct qcom_osm_l3_icc_provider, provider)
@@ -48,16 +47,10 @@ struct qcom_osm_l3_icc_provider {
/**
* struct qcom_osm_l3_node - Qualcomm specific interconnect nodes
* @name: the node name used in debugfs
- * @links: an array of nodes where we can go next while traversing
- * @id: a unique node identifier
- * @num_links: the total number of @links
* @buswidth: width of the interconnect between a node and the bus
*/
struct qcom_osm_l3_node {
const char *name;
- u16 links[OSM_L3_MAX_LINKS];
- u16 id;
- u16 num_links;
u16 buswidth;
};
@@ -69,30 +62,22 @@ struct qcom_osm_l3_desc {
unsigned int reg_perf_state;
};
-enum {
- OSM_L3_MASTER_NODE = 10000,
- OSM_L3_SLAVE_NODE,
-};
-
-#define DEFINE_QNODE(_name, _id, _buswidth, ...) \
+#define DEFINE_QNODE(_name, _buswidth) \
static const struct qcom_osm_l3_node _name = { \
.name = #_name, \
- .id = _id, \
.buswidth = _buswidth, \
- .num_links = COUNT_ARGS(__VA_ARGS__), \
- .links = { __VA_ARGS__ }, \
}
-DEFINE_QNODE(osm_l3_master, OSM_L3_MASTER_NODE, 16, OSM_L3_SLAVE_NODE);
-DEFINE_QNODE(osm_l3_slave, OSM_L3_SLAVE_NODE, 16);
+DEFINE_QNODE(osm_l3_slave, 16);
+DEFINE_QNODE(osm_l3_master, 16);
static const struct qcom_osm_l3_node * const osm_l3_nodes[] = {
[MASTER_OSM_L3_APPS] = &osm_l3_master,
[SLAVE_OSM_L3] = &osm_l3_slave,
};
-DEFINE_QNODE(epss_l3_master, OSM_L3_MASTER_NODE, 32, OSM_L3_SLAVE_NODE);
-DEFINE_QNODE(epss_l3_slave, OSM_L3_SLAVE_NODE, 32);
+DEFINE_QNODE(epss_l3_slave, 32);
+DEFINE_QNODE(epss_l3_master, 32);
static const struct qcom_osm_l3_node * const epss_l3_nodes[] = {
[MASTER_EPSS_L3_APPS] = &epss_l3_master,
@@ -242,10 +227,10 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
icc_provider_init(provider);
+ /* Create nodes */
for (i = 0; i < num_nodes; i++) {
- size_t j;
+ node = icc_node_create_dyn();
- node = icc_node_create(qnodes[i]->id);
if (IS_ERR(node)) {
ret = PTR_ERR(node);
goto err;
@@ -256,12 +241,12 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
node->data = (void *)qnodes[i];
icc_node_add(node, provider);
- for (j = 0; j < qnodes[i]->num_links; j++)
- icc_link_create(node, qnodes[i]->links[j]);
-
data->nodes[i] = node;
}
+ /* Create link */
+ icc_link_nodes(data->nodes[MASTER_OSM_L3_APPS], &data->nodes[SLAVE_OSM_L3]);
+
ret = icc_provider_register(provider);
if (ret)
goto err;
@@ -278,6 +263,7 @@ err:
static const struct of_device_id osm_l3_of_match[] = {
{ .compatible = "qcom,epss-l3", .data = &epss_l3_l3_vote },
{ .compatible = "qcom,osm-l3", .data = &osm_l3 },
+ { .compatible = "qcom,sa8775p-epss-l3", .data = &epss_l3_perf_state },
{ .compatible = "qcom,sc7180-osm-l3", .data = &osm_l3 },
{ .compatible = "qcom,sc7280-epss-l3", .data = &epss_l3_perf_state },
{ .compatible = "qcom,sdm845-osm-l3", .data = &osm_l3 },
diff --git a/drivers/interconnect/qcom/sa8775p.c b/drivers/interconnect/qcom/sa8775p.c
index e2826af3ea2e..04b4abbf4487 100644
--- a/drivers/interconnect/qcom/sa8775p.c
+++ b/drivers/interconnect/qcom/sa8775p.c
@@ -15,1859 +15,1587 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#define SA8775P_MASTER_GPU_TCU 0
-#define SA8775P_MASTER_PCIE_TCU 1
-#define SA8775P_MASTER_SYS_TCU 2
-#define SA8775P_MASTER_APPSS_PROC 3
-#define SA8775P_MASTER_LLCC 4
-#define SA8775P_MASTER_CNOC_LPASS_AG_NOC 5
-#define SA8775P_MASTER_GIC_AHB 6
-#define SA8775P_MASTER_CDSP_NOC_CFG 7
-#define SA8775P_MASTER_CDSPB_NOC_CFG 8
-#define SA8775P_MASTER_QDSS_BAM 9
-#define SA8775P_MASTER_QUP_0 10
-#define SA8775P_MASTER_QUP_1 11
-#define SA8775P_MASTER_QUP_2 12
-#define SA8775P_MASTER_A1NOC_SNOC 13
-#define SA8775P_MASTER_A2NOC_SNOC 14
-#define SA8775P_MASTER_CAMNOC_HF 15
-#define SA8775P_MASTER_CAMNOC_ICP 16
-#define SA8775P_MASTER_CAMNOC_SF 17
-#define SA8775P_MASTER_COMPUTE_NOC 18
-#define SA8775P_MASTER_COMPUTE_NOC_1 19
-#define SA8775P_MASTER_CNOC_A2NOC 20
-#define SA8775P_MASTER_CNOC_DC_NOC 21
-#define SA8775P_MASTER_GEM_NOC_CFG 22
-#define SA8775P_MASTER_GEM_NOC_CNOC 23
-#define SA8775P_MASTER_GEM_NOC_PCIE_SNOC 24
-#define SA8775P_MASTER_GPDSP_SAIL 25
-#define SA8775P_MASTER_GFX3D 26
-#define SA8775P_MASTER_LPASS_ANOC 27
-#define SA8775P_MASTER_MDP0 28
-#define SA8775P_MASTER_MDP1 29
-#define SA8775P_MASTER_MDP_CORE1_0 30
-#define SA8775P_MASTER_MDP_CORE1_1 31
-#define SA8775P_MASTER_MNOC_HF_MEM_NOC 32
-#define SA8775P_MASTER_CNOC_MNOC_HF_CFG 33
-#define SA8775P_MASTER_MNOC_SF_MEM_NOC 34
-#define SA8775P_MASTER_CNOC_MNOC_SF_CFG 35
-#define SA8775P_MASTER_ANOC_PCIE_GEM_NOC 36
-#define SA8775P_MASTER_SNOC_CFG 37
-#define SA8775P_MASTER_SNOC_GC_MEM_NOC 38
-#define SA8775P_MASTER_SNOC_SF_MEM_NOC 39
-#define SA8775P_MASTER_VIDEO_P0 40
-#define SA8775P_MASTER_VIDEO_P1 41
-#define SA8775P_MASTER_VIDEO_PROC 42
-#define SA8775P_MASTER_VIDEO_V_PROC 43
-#define SA8775P_MASTER_QUP_CORE_0 44
-#define SA8775P_MASTER_QUP_CORE_1 45
-#define SA8775P_MASTER_QUP_CORE_2 46
-#define SA8775P_MASTER_QUP_CORE_3 47
-#define SA8775P_MASTER_CRYPTO_CORE0 48
-#define SA8775P_MASTER_CRYPTO_CORE1 49
-#define SA8775P_MASTER_DSP0 50
-#define SA8775P_MASTER_DSP1 51
-#define SA8775P_MASTER_IPA 52
-#define SA8775P_MASTER_LPASS_PROC 53
-#define SA8775P_MASTER_CDSP_PROC 54
-#define SA8775P_MASTER_CDSP_PROC_B 55
-#define SA8775P_MASTER_PIMEM 56
-#define SA8775P_MASTER_QUP_3 57
-#define SA8775P_MASTER_EMAC 58
-#define SA8775P_MASTER_EMAC_1 59
-#define SA8775P_MASTER_GIC 60
-#define SA8775P_MASTER_PCIE_0 61
-#define SA8775P_MASTER_PCIE_1 62
-#define SA8775P_MASTER_QDSS_ETR_0 63
-#define SA8775P_MASTER_QDSS_ETR_1 64
-#define SA8775P_MASTER_SDC 65
-#define SA8775P_MASTER_UFS_CARD 66
-#define SA8775P_MASTER_UFS_MEM 67
-#define SA8775P_MASTER_USB2 68
-#define SA8775P_MASTER_USB3_0 69
-#define SA8775P_MASTER_USB3_1 70
-#define SA8775P_SLAVE_EBI1 512
-#define SA8775P_SLAVE_AHB2PHY_0 513
-#define SA8775P_SLAVE_AHB2PHY_1 514
-#define SA8775P_SLAVE_AHB2PHY_2 515
-#define SA8775P_SLAVE_AHB2PHY_3 516
-#define SA8775P_SLAVE_ANOC_THROTTLE_CFG 517
-#define SA8775P_SLAVE_AOSS 518
-#define SA8775P_SLAVE_APPSS 519
-#define SA8775P_SLAVE_BOOT_ROM 520
-#define SA8775P_SLAVE_CAMERA_CFG 521
-#define SA8775P_SLAVE_CAMERA_NRT_THROTTLE_CFG 522
-#define SA8775P_SLAVE_CAMERA_RT_THROTTLE_CFG 523
-#define SA8775P_SLAVE_CLK_CTL 524
-#define SA8775P_SLAVE_CDSP_CFG 525
-#define SA8775P_SLAVE_CDSP1_CFG 526
-#define SA8775P_SLAVE_RBCPR_CX_CFG 527
-#define SA8775P_SLAVE_RBCPR_MMCX_CFG 528
-#define SA8775P_SLAVE_RBCPR_MX_CFG 529
-#define SA8775P_SLAVE_CPR_NSPCX 530
-#define SA8775P_SLAVE_CRYPTO_0_CFG 531
-#define SA8775P_SLAVE_CX_RDPM 532
-#define SA8775P_SLAVE_DISPLAY_CFG 533
-#define SA8775P_SLAVE_DISPLAY_RT_THROTTLE_CFG 534
-#define SA8775P_SLAVE_DISPLAY1_CFG 535
-#define SA8775P_SLAVE_DISPLAY1_RT_THROTTLE_CFG 536
-#define SA8775P_SLAVE_EMAC_CFG 537
-#define SA8775P_SLAVE_EMAC1_CFG 538
-#define SA8775P_SLAVE_GP_DSP0_CFG 539
-#define SA8775P_SLAVE_GP_DSP1_CFG 540
-#define SA8775P_SLAVE_GPDSP0_THROTTLE_CFG 541
-#define SA8775P_SLAVE_GPDSP1_THROTTLE_CFG 542
-#define SA8775P_SLAVE_GPU_TCU_THROTTLE_CFG 543
-#define SA8775P_SLAVE_GFX3D_CFG 544
-#define SA8775P_SLAVE_HWKM 545
-#define SA8775P_SLAVE_IMEM_CFG 546
-#define SA8775P_SLAVE_IPA_CFG 547
-#define SA8775P_SLAVE_IPC_ROUTER_CFG 548
-#define SA8775P_SLAVE_LLCC_CFG 549
-#define SA8775P_SLAVE_LPASS 550
-#define SA8775P_SLAVE_LPASS_CORE_CFG 551
-#define SA8775P_SLAVE_LPASS_LPI_CFG 552
-#define SA8775P_SLAVE_LPASS_MPU_CFG 553
-#define SA8775P_SLAVE_LPASS_THROTTLE_CFG 554
-#define SA8775P_SLAVE_LPASS_TOP_CFG 555
-#define SA8775P_SLAVE_MX_RDPM 556
-#define SA8775P_SLAVE_MXC_RDPM 557
-#define SA8775P_SLAVE_PCIE_0_CFG 558
-#define SA8775P_SLAVE_PCIE_1_CFG 559
-#define SA8775P_SLAVE_PCIE_RSC_CFG 560
-#define SA8775P_SLAVE_PCIE_TCU_THROTTLE_CFG 561
-#define SA8775P_SLAVE_PCIE_THROTTLE_CFG 562
-#define SA8775P_SLAVE_PDM 563
-#define SA8775P_SLAVE_PIMEM_CFG 564
-#define SA8775P_SLAVE_PKA_WRAPPER_CFG 565
-#define SA8775P_SLAVE_QDSS_CFG 566
-#define SA8775P_SLAVE_QM_CFG 567
-#define SA8775P_SLAVE_QM_MPU_CFG 568
-#define SA8775P_SLAVE_QUP_0 569
-#define SA8775P_SLAVE_QUP_1 570
-#define SA8775P_SLAVE_QUP_2 571
-#define SA8775P_SLAVE_QUP_3 572
-#define SA8775P_SLAVE_SAIL_THROTTLE_CFG 573
-#define SA8775P_SLAVE_SDC1 574
-#define SA8775P_SLAVE_SECURITY 575
-#define SA8775P_SLAVE_SNOC_THROTTLE_CFG 576
-#define SA8775P_SLAVE_TCSR 577
-#define SA8775P_SLAVE_TLMM 578
-#define SA8775P_SLAVE_TSC_CFG 579
-#define SA8775P_SLAVE_UFS_CARD_CFG 580
-#define SA8775P_SLAVE_UFS_MEM_CFG 581
-#define SA8775P_SLAVE_USB2 582
-#define SA8775P_SLAVE_USB3_0 583
-#define SA8775P_SLAVE_USB3_1 584
-#define SA8775P_SLAVE_VENUS_CFG 585
-#define SA8775P_SLAVE_VENUS_CVP_THROTTLE_CFG 586
-#define SA8775P_SLAVE_VENUS_V_CPU_THROTTLE_CFG 587
-#define SA8775P_SLAVE_VENUS_VCODEC_THROTTLE_CFG 588
-#define SA8775P_SLAVE_A1NOC_SNOC 589
-#define SA8775P_SLAVE_A2NOC_SNOC 590
-#define SA8775P_SLAVE_DDRSS_CFG 591
-#define SA8775P_SLAVE_GEM_NOC_CNOC 592
-#define SA8775P_SLAVE_GEM_NOC_CFG 593
-#define SA8775P_SLAVE_SNOC_GEM_NOC_GC 594
-#define SA8775P_SLAVE_SNOC_GEM_NOC_SF 595
-#define SA8775P_SLAVE_GP_DSP_SAIL_NOC 596
-#define SA8775P_SLAVE_GPDSP_NOC_CFG 597
-#define SA8775P_SLAVE_HCP_A 598
-#define SA8775P_SLAVE_LLCC 599
-#define SA8775P_SLAVE_MNOC_HF_MEM_NOC 600
-#define SA8775P_SLAVE_MNOC_SF_MEM_NOC 601
-#define SA8775P_SLAVE_CNOC_MNOC_HF_CFG 602
-#define SA8775P_SLAVE_CNOC_MNOC_SF_CFG 603
-#define SA8775P_SLAVE_CDSP_MEM_NOC 604
-#define SA8775P_SLAVE_CDSPB_MEM_NOC 605
-#define SA8775P_SLAVE_HCP_B 606
-#define SA8775P_SLAVE_GEM_NOC_PCIE_CNOC 607
-#define SA8775P_SLAVE_PCIE_ANOC_CFG 608
-#define SA8775P_SLAVE_ANOC_PCIE_GEM_NOC 609
-#define SA8775P_SLAVE_SNOC_CFG 610
-#define SA8775P_SLAVE_LPASS_SNOC 611
-#define SA8775P_SLAVE_QUP_CORE_0 612
-#define SA8775P_SLAVE_QUP_CORE_1 613
-#define SA8775P_SLAVE_QUP_CORE_2 614
-#define SA8775P_SLAVE_QUP_CORE_3 615
-#define SA8775P_SLAVE_BOOT_IMEM 616
-#define SA8775P_SLAVE_IMEM 617
-#define SA8775P_SLAVE_PIMEM 618
-#define SA8775P_SLAVE_SERVICE_NSP_NOC 619
-#define SA8775P_SLAVE_SERVICE_NSPB_NOC 620
-#define SA8775P_SLAVE_SERVICE_GEM_NOC_1 621
-#define SA8775P_SLAVE_SERVICE_MNOC_HF 622
-#define SA8775P_SLAVE_SERVICE_MNOC_SF 623
-#define SA8775P_SLAVE_SERVICES_LPASS_AML_NOC 624
-#define SA8775P_SLAVE_SERVICE_LPASS_AG_NOC 625
-#define SA8775P_SLAVE_SERVICE_GEM_NOC_2 626
-#define SA8775P_SLAVE_SERVICE_SNOC 627
-#define SA8775P_SLAVE_SERVICE_GEM_NOC 628
-#define SA8775P_SLAVE_SERVICE_GEM_NOC2 629
-#define SA8775P_SLAVE_PCIE_0 630
-#define SA8775P_SLAVE_PCIE_1 631
-#define SA8775P_SLAVE_QDSS_STM 632
-#define SA8775P_SLAVE_TCU 633
+static struct qcom_icc_node qxm_qup3;
+static struct qcom_icc_node xm_emac_0;
+static struct qcom_icc_node xm_emac_1;
+static struct qcom_icc_node xm_sdc1;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node xm_usb2_2;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node xm_usb3_1;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qup0;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node qhm_qup2;
+static struct qcom_icc_node qnm_cnoc_datapath;
+static struct qcom_icc_node qxm_crypto_0;
+static struct qcom_icc_node qxm_crypto_1;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node xm_qdss_etr_0;
+static struct qcom_icc_node xm_qdss_etr_1;
+static struct qcom_icc_node xm_ufs_card;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node qup0_core_master;
+static struct qcom_icc_node qup1_core_master;
+static struct qcom_icc_node qup2_core_master;
+static struct qcom_icc_node qup3_core_master;
+static struct qcom_icc_node qup0_core_slave;
+static struct qcom_icc_node qup1_core_slave;
+static struct qcom_icc_node qup2_core_slave;
+static struct qcom_icc_node qup3_core_slave;
+static struct qcom_icc_node qnm_gemnoc_cnoc;
+static struct qcom_icc_node qnm_gemnoc_pcie;
+static struct qcom_icc_node qhs_ahb2phy0;
+static struct qcom_icc_node qhs_ahb2phy1;
+static struct qcom_icc_node qhs_ahb2phy2;
+static struct qcom_icc_node qhs_ahb2phy3;
+static struct qcom_icc_node qhs_anoc_throttle_cfg;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_apss;
+static struct qcom_icc_node qhs_boot_rom;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_camera_nrt_throttle_cfg;
+static struct qcom_icc_node qhs_camera_rt_throttle_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_compute0_cfg;
+static struct qcom_icc_node qhs_compute1_cfg;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_cpr_mmcx;
+static struct qcom_icc_node qhs_cpr_mx;
+static struct qcom_icc_node qhs_cpr_nspcx;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_cx_rdpm;
+static struct qcom_icc_node qhs_display0_cfg;
+static struct qcom_icc_node qhs_display0_rt_throttle_cfg;
+static struct qcom_icc_node qhs_display1_cfg;
+static struct qcom_icc_node qhs_display1_rt_throttle_cfg;
+static struct qcom_icc_node qhs_emac0_cfg;
+static struct qcom_icc_node qhs_emac1_cfg;
+static struct qcom_icc_node qhs_gp_dsp0_cfg;
+static struct qcom_icc_node qhs_gp_dsp1_cfg;
+static struct qcom_icc_node qhs_gpdsp0_throttle_cfg;
+static struct qcom_icc_node qhs_gpdsp1_throttle_cfg;
+static struct qcom_icc_node qhs_gpu_tcu_throttle_cfg;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_hwkm;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_ipc_router;
+static struct qcom_icc_node qhs_lpass_cfg;
+static struct qcom_icc_node qhs_lpass_throttle_cfg;
+static struct qcom_icc_node qhs_mx_rdpm;
+static struct qcom_icc_node qhs_mxc_rdpm;
+static struct qcom_icc_node qhs_pcie0_cfg;
+static struct qcom_icc_node qhs_pcie1_cfg;
+static struct qcom_icc_node qhs_pcie_rsc_cfg;
+static struct qcom_icc_node qhs_pcie_tcu_throttle_cfg;
+static struct qcom_icc_node qhs_pcie_throttle_cfg;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_pke_wrapper_cfg;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qm_cfg;
+static struct qcom_icc_node qhs_qm_mpu_cfg;
+static struct qcom_icc_node qhs_qup0;
+static struct qcom_icc_node qhs_qup1;
+static struct qcom_icc_node qhs_qup2;
+static struct qcom_icc_node qhs_qup3;
+static struct qcom_icc_node qhs_sail_throttle_cfg;
+static struct qcom_icc_node qhs_sdc1;
+static struct qcom_icc_node qhs_security;
+static struct qcom_icc_node qhs_snoc_throttle_cfg;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm;
+static struct qcom_icc_node qhs_tsc_cfg;
+static struct qcom_icc_node qhs_ufs_card_cfg;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb2_0;
+static struct qcom_icc_node qhs_usb3_0;
+static struct qcom_icc_node qhs_usb3_1;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_venus_cvp_throttle_cfg;
+static struct qcom_icc_node qhs_venus_v_cpu_throttle_cfg;
+static struct qcom_icc_node qhs_venus_vcodec_throttle_cfg;
+static struct qcom_icc_node qns_ddrss_cfg;
+static struct qcom_icc_node qns_gpdsp_noc_cfg;
+static struct qcom_icc_node qns_mnoc_hf_cfg;
+static struct qcom_icc_node qns_mnoc_sf_cfg;
+static struct qcom_icc_node qns_pcie_anoc_cfg;
+static struct qcom_icc_node qns_snoc_cfg;
+static struct qcom_icc_node qxs_boot_imem;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_pimem;
+static struct qcom_icc_node xs_pcie_0;
+static struct qcom_icc_node xs_pcie_1;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
+static struct qcom_icc_node qnm_cnoc_dc_noc;
+static struct qcom_icc_node qhs_llcc;
+static struct qcom_icc_node qns_gemnoc;
+static struct qcom_icc_node alm_gpu_tcu;
+static struct qcom_icc_node alm_pcie_tcu;
+static struct qcom_icc_node alm_sys_tcu;
+static struct qcom_icc_node chm_apps;
+static struct qcom_icc_node qnm_cmpnoc0;
+static struct qcom_icc_node qnm_cmpnoc1;
+static struct qcom_icc_node qnm_gemnoc_cfg;
+static struct qcom_icc_node qnm_gpdsp_sail;
+static struct qcom_icc_node qnm_gpu;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_pcie;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qns_gem_noc_cnoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_pcie;
+static struct qcom_icc_node srvc_even_gemnoc;
+static struct qcom_icc_node srvc_odd_gemnoc;
+static struct qcom_icc_node srvc_sys_gemnoc;
+static struct qcom_icc_node srvc_sys_gemnoc_2;
+static struct qcom_icc_node qxm_dsp0;
+static struct qcom_icc_node qxm_dsp1;
+static struct qcom_icc_node qns_gp_dsp_sail_noc;
+static struct qcom_icc_node qhm_config_noc;
+static struct qcom_icc_node qxm_lpass_dsp;
+static struct qcom_icc_node qhs_lpass_core;
+static struct qcom_icc_node qhs_lpass_lpi;
+static struct qcom_icc_node qhs_lpass_mpu;
+static struct qcom_icc_node qhs_lpass_top;
+static struct qcom_icc_node qns_sysnoc;
+static struct qcom_icc_node srvc_niu_aml_noc;
+static struct qcom_icc_node srvc_niu_lpass_agnoc;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qnm_camnoc_hf;
+static struct qcom_icc_node qnm_camnoc_icp;
+static struct qcom_icc_node qnm_camnoc_sf;
+static struct qcom_icc_node qnm_mdp0_0;
+static struct qcom_icc_node qnm_mdp0_1;
+static struct qcom_icc_node qnm_mdp1_0;
+static struct qcom_icc_node qnm_mdp1_1;
+static struct qcom_icc_node qnm_mnoc_hf_cfg;
+static struct qcom_icc_node qnm_mnoc_sf_cfg;
+static struct qcom_icc_node qnm_video0;
+static struct qcom_icc_node qnm_video1;
+static struct qcom_icc_node qnm_video_cvp;
+static struct qcom_icc_node qnm_video_v_cpu;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node qns_mem_noc_sf;
+static struct qcom_icc_node srvc_mnoc_hf;
+static struct qcom_icc_node srvc_mnoc_sf;
+static struct qcom_icc_node qhm_nsp_noc_config;
+static struct qcom_icc_node qxm_nsp;
+static struct qcom_icc_node qns_hcp;
+static struct qcom_icc_node qns_nsp_gemnoc;
+static struct qcom_icc_node service_nsp_noc;
+static struct qcom_icc_node qhm_nspb_noc_config;
+static struct qcom_icc_node qxm_nspb;
+static struct qcom_icc_node qns_nspb_gemnoc;
+static struct qcom_icc_node qns_nspb_hcp;
+static struct qcom_icc_node service_nspb_noc;
+static struct qcom_icc_node xm_pcie3_0;
+static struct qcom_icc_node xm_pcie3_1;
+static struct qcom_icc_node qns_pcie_mem_noc;
+static struct qcom_icc_node qhm_gic;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qnm_lpass_noc;
+static struct qcom_icc_node qnm_snoc_cfg;
+static struct qcom_icc_node qxm_pimem;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qns_gemnoc_gc;
+static struct qcom_icc_node qns_gemnoc_sf;
+static struct qcom_icc_node srvc_snoc;
static struct qcom_icc_node qxm_qup3 = {
.name = "qxm_qup3",
- .id = SA8775P_MASTER_QUP_3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_A1NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_emac_0 = {
.name = "xm_emac_0",
- .id = SA8775P_MASTER_EMAC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_A1NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_emac_1 = {
.name = "xm_emac_1",
- .id = SA8775P_MASTER_EMAC_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_A1NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc1 = {
.name = "xm_sdc1",
- .id = SA8775P_MASTER_SDC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_A1NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = SA8775P_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_A1NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb2_2 = {
.name = "xm_usb2_2",
- .id = SA8775P_MASTER_USB2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_A1NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = SA8775P_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_A1NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_1 = {
.name = "xm_usb3_1",
- .id = SA8775P_MASTER_USB3_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_A1NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SA8775P_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_SLAVE_A2NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
- .id = SA8775P_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_SLAVE_A2NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = SA8775P_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_SLAVE_A2NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
- .id = SA8775P_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_SLAVE_A2NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
};
static struct qcom_icc_node qnm_cnoc_datapath = {
.name = "qnm_cnoc_datapath",
- .id = SA8775P_MASTER_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_A2NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_crypto_0 = {
.name = "qxm_crypto_0",
- .id = SA8775P_MASTER_CRYPTO_CORE0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_A2NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_crypto_1 = {
.name = "qxm_crypto_1",
- .id = SA8775P_MASTER_CRYPTO_CORE1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_A2NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = SA8775P_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_A2NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_qdss_etr_0 = {
.name = "xm_qdss_etr_0",
- .id = SA8775P_MASTER_QDSS_ETR_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_A2NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_qdss_etr_1 = {
.name = "xm_qdss_etr_1",
- .id = SA8775P_MASTER_QDSS_ETR_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_A2NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_ufs_card = {
.name = "xm_ufs_card",
- .id = SA8775P_MASTER_UFS_CARD,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_A2NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
- .id = SA8775P_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_SLAVE_QUP_CORE_0 },
+ .link_nodes = (struct qcom_icc_node *[]) { &qup0_core_slave },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
- .id = SA8775P_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_SLAVE_QUP_CORE_1 },
+ .link_nodes = (struct qcom_icc_node *[]) { &qup1_core_slave },
};
static struct qcom_icc_node qup2_core_master = {
.name = "qup2_core_master",
- .id = SA8775P_MASTER_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_SLAVE_QUP_CORE_2 },
+ .link_nodes = (struct qcom_icc_node *[]) { &qup2_core_slave },
};
static struct qcom_icc_node qup3_core_master = {
.name = "qup3_core_master",
- .id = SA8775P_MASTER_QUP_CORE_3,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_SLAVE_QUP_CORE_3 },
+ .link_nodes = (struct qcom_icc_node *[]) { &qup3_core_slave },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
- .id = SA8775P_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 82,
- .links = { SA8775P_SLAVE_AHB2PHY_0,
- SA8775P_SLAVE_AHB2PHY_1,
- SA8775P_SLAVE_AHB2PHY_2,
- SA8775P_SLAVE_AHB2PHY_3,
- SA8775P_SLAVE_ANOC_THROTTLE_CFG,
- SA8775P_SLAVE_AOSS,
- SA8775P_SLAVE_APPSS,
- SA8775P_SLAVE_BOOT_ROM,
- SA8775P_SLAVE_CAMERA_CFG,
- SA8775P_SLAVE_CAMERA_NRT_THROTTLE_CFG,
- SA8775P_SLAVE_CAMERA_RT_THROTTLE_CFG,
- SA8775P_SLAVE_CLK_CTL,
- SA8775P_SLAVE_CDSP_CFG,
- SA8775P_SLAVE_CDSP1_CFG,
- SA8775P_SLAVE_RBCPR_CX_CFG,
- SA8775P_SLAVE_RBCPR_MMCX_CFG,
- SA8775P_SLAVE_RBCPR_MX_CFG,
- SA8775P_SLAVE_CPR_NSPCX,
- SA8775P_SLAVE_CRYPTO_0_CFG,
- SA8775P_SLAVE_CX_RDPM,
- SA8775P_SLAVE_DISPLAY_CFG,
- SA8775P_SLAVE_DISPLAY_RT_THROTTLE_CFG,
- SA8775P_SLAVE_DISPLAY1_CFG,
- SA8775P_SLAVE_DISPLAY1_RT_THROTTLE_CFG,
- SA8775P_SLAVE_EMAC_CFG,
- SA8775P_SLAVE_EMAC1_CFG,
- SA8775P_SLAVE_GP_DSP0_CFG,
- SA8775P_SLAVE_GP_DSP1_CFG,
- SA8775P_SLAVE_GPDSP0_THROTTLE_CFG,
- SA8775P_SLAVE_GPDSP1_THROTTLE_CFG,
- SA8775P_SLAVE_GPU_TCU_THROTTLE_CFG,
- SA8775P_SLAVE_GFX3D_CFG,
- SA8775P_SLAVE_HWKM,
- SA8775P_SLAVE_IMEM_CFG,
- SA8775P_SLAVE_IPA_CFG,
- SA8775P_SLAVE_IPC_ROUTER_CFG,
- SA8775P_SLAVE_LPASS,
- SA8775P_SLAVE_LPASS_THROTTLE_CFG,
- SA8775P_SLAVE_MX_RDPM,
- SA8775P_SLAVE_MXC_RDPM,
- SA8775P_SLAVE_PCIE_0_CFG,
- SA8775P_SLAVE_PCIE_1_CFG,
- SA8775P_SLAVE_PCIE_RSC_CFG,
- SA8775P_SLAVE_PCIE_TCU_THROTTLE_CFG,
- SA8775P_SLAVE_PCIE_THROTTLE_CFG,
- SA8775P_SLAVE_PDM,
- SA8775P_SLAVE_PIMEM_CFG,
- SA8775P_SLAVE_PKA_WRAPPER_CFG,
- SA8775P_SLAVE_QDSS_CFG,
- SA8775P_SLAVE_QM_CFG,
- SA8775P_SLAVE_QM_MPU_CFG,
- SA8775P_SLAVE_QUP_0,
- SA8775P_SLAVE_QUP_1,
- SA8775P_SLAVE_QUP_2,
- SA8775P_SLAVE_QUP_3,
- SA8775P_SLAVE_SAIL_THROTTLE_CFG,
- SA8775P_SLAVE_SDC1,
- SA8775P_SLAVE_SECURITY,
- SA8775P_SLAVE_SNOC_THROTTLE_CFG,
- SA8775P_SLAVE_TCSR,
- SA8775P_SLAVE_TLMM,
- SA8775P_SLAVE_TSC_CFG,
- SA8775P_SLAVE_UFS_CARD_CFG,
- SA8775P_SLAVE_UFS_MEM_CFG,
- SA8775P_SLAVE_USB2,
- SA8775P_SLAVE_USB3_0,
- SA8775P_SLAVE_USB3_1,
- SA8775P_SLAVE_VENUS_CFG,
- SA8775P_SLAVE_VENUS_CVP_THROTTLE_CFG,
- SA8775P_SLAVE_VENUS_V_CPU_THROTTLE_CFG,
- SA8775P_SLAVE_VENUS_VCODEC_THROTTLE_CFG,
- SA8775P_SLAVE_DDRSS_CFG,
- SA8775P_SLAVE_GPDSP_NOC_CFG,
- SA8775P_SLAVE_CNOC_MNOC_HF_CFG,
- SA8775P_SLAVE_CNOC_MNOC_SF_CFG,
- SA8775P_SLAVE_PCIE_ANOC_CFG,
- SA8775P_SLAVE_SNOC_CFG,
- SA8775P_SLAVE_BOOT_IMEM,
- SA8775P_SLAVE_IMEM,
- SA8775P_SLAVE_PIMEM,
- SA8775P_SLAVE_QDSS_STM,
- SA8775P_SLAVE_TCU
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &qhs_ahb2phy0, &qhs_ahb2phy1,
+ &qhs_ahb2phy2, &qhs_ahb2phy3,
+ &qhs_anoc_throttle_cfg, &qhs_aoss,
+ &qhs_apss, &qhs_boot_rom,
+ &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg,
+ &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl,
+ &qhs_compute0_cfg, &qhs_compute1_cfg,
+ &qhs_cpr_cx, &qhs_cpr_mmcx,
+ &qhs_cpr_mx, &qhs_cpr_nspcx,
+ &qhs_crypto0_cfg, &qhs_cx_rdpm,
+ &qhs_display0_cfg, &qhs_display0_rt_throttle_cfg,
+ &qhs_display1_cfg, &qhs_display1_rt_throttle_cfg,
+ &qhs_emac0_cfg, &qhs_emac1_cfg,
+ &qhs_gp_dsp0_cfg, &qhs_gp_dsp1_cfg,
+ &qhs_gpdsp0_throttle_cfg, &qhs_gpdsp1_throttle_cfg,
+ &qhs_gpu_tcu_throttle_cfg, &qhs_gpuss_cfg,
+ &qhs_hwkm, &qhs_imem_cfg,
+ &qhs_ipa, &qhs_ipc_router,
+ &qhs_lpass_cfg, &qhs_lpass_throttle_cfg,
+ &qhs_mx_rdpm, &qhs_mxc_rdpm,
+ &qhs_pcie0_cfg, &qhs_pcie1_cfg,
+ &qhs_pcie_rsc_cfg, &qhs_pcie_tcu_throttle_cfg,
+ &qhs_pcie_throttle_cfg, &qhs_pdm,
+ &qhs_pimem_cfg, &qhs_pke_wrapper_cfg,
+ &qhs_qdss_cfg, &qhs_qm_cfg,
+ &qhs_qm_mpu_cfg, &qhs_qup0,
+ &qhs_qup1, &qhs_qup2,
+ &qhs_qup3, &qhs_sail_throttle_cfg,
+ &qhs_sdc1, &qhs_security,
+ &qhs_snoc_throttle_cfg, &qhs_tcsr,
+ &qhs_tlmm, &qhs_tsc_cfg,
+ &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg,
+ &qhs_usb2_0, &qhs_usb3_0,
+ &qhs_usb3_1, &qhs_venus_cfg,
+ &qhs_venus_cvp_throttle_cfg, &qhs_venus_v_cpu_throttle_cfg,
+ &qhs_venus_vcodec_throttle_cfg, &qns_ddrss_cfg,
+ &qns_gpdsp_noc_cfg, &qns_mnoc_hf_cfg,
+ &qns_mnoc_sf_cfg, &qns_pcie_anoc_cfg,
+ &qns_snoc_cfg, &qxs_boot_imem,
+ &qxs_imem, &qxs_pimem,
+ &xs_qdss_stm, &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
- .id = SA8775P_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .links = { SA8775P_SLAVE_PCIE_0,
- SA8775P_SLAVE_PCIE_1
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &xs_pcie_0, &xs_pcie_1 },
};
static struct qcom_icc_node qnm_cnoc_dc_noc = {
.name = "qnm_cnoc_dc_noc",
- .id = SA8775P_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SA8775P_SLAVE_LLCC_CFG,
- SA8775P_SLAVE_GEM_NOC_CFG
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &qhs_llcc, &qns_gemnoc },
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
- .id = SA8775P_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
- SA8775P_SLAVE_LLCC
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node alm_pcie_tcu = {
.name = "alm_pcie_tcu",
- .id = SA8775P_MASTER_PCIE_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
- SA8775P_SLAVE_LLCC
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
- .id = SA8775P_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
- SA8775P_SLAVE_LLCC
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
- .id = SA8775P_MASTER_APPSS_PROC,
.channels = 4,
.buswidth = 32,
.num_links = 3,
- .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
- SA8775P_SLAVE_LLCC,
- SA8775P_SLAVE_GEM_NOC_PCIE_CNOC
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_cmpnoc0 = {
.name = "qnm_cmpnoc0",
- .id = SA8775P_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
- SA8775P_SLAVE_LLCC
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_cmpnoc1 = {
.name = "qnm_cmpnoc1",
- .id = SA8775P_MASTER_COMPUTE_NOC_1,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
- SA8775P_SLAVE_LLCC
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_gemnoc_cfg = {
.name = "qnm_gemnoc_cfg",
- .id = SA8775P_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 4,
- .links = { SA8775P_SLAVE_SERVICE_GEM_NOC_1,
- SA8775P_SLAVE_SERVICE_GEM_NOC_2,
- SA8775P_SLAVE_SERVICE_GEM_NOC,
- SA8775P_SLAVE_SERVICE_GEM_NOC2
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &srvc_even_gemnoc, &srvc_odd_gemnoc,
+ &srvc_sys_gemnoc, &srvc_sys_gemnoc_2 },
};
static struct qcom_icc_node qnm_gpdsp_sail = {
.name = "qnm_gpdsp_sail",
- .id = SA8775P_MASTER_GPDSP_SAIL,
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
- SA8775P_SLAVE_LLCC
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
- .id = SA8775P_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
- SA8775P_SLAVE_LLCC
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = SA8775P_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SA8775P_SLAVE_LLCC,
- SA8775P_SLAVE_GEM_NOC_PCIE_CNOC
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_llcc, &qns_pcie },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = SA8775P_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 3,
- .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
- SA8775P_SLAVE_LLCC,
- SA8775P_SLAVE_GEM_NOC_PCIE_CNOC
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
- .id = SA8775P_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
- .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
- SA8775P_SLAVE_LLCC
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = SA8775P_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_LLCC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SA8775P_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
- SA8775P_SLAVE_LLCC,
- SA8775P_SLAVE_GEM_NOC_PCIE_CNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qxm_dsp0 = {
.name = "qxm_dsp0",
- .id = SA8775P_MASTER_DSP0,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SA8775P_SLAVE_GP_DSP_SAIL_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gp_dsp_sail_noc },
};
static struct qcom_icc_node qxm_dsp1 = {
.name = "qxm_dsp1",
- .id = SA8775P_MASTER_DSP1,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SA8775P_SLAVE_GP_DSP_SAIL_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gp_dsp_sail_noc },
};
static struct qcom_icc_node qhm_config_noc = {
.name = "qhm_config_noc",
- .id = SA8775P_MASTER_CNOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 6,
- .links = { SA8775P_SLAVE_LPASS_CORE_CFG,
- SA8775P_SLAVE_LPASS_LPI_CFG,
- SA8775P_SLAVE_LPASS_MPU_CFG,
- SA8775P_SLAVE_LPASS_TOP_CFG,
- SA8775P_SLAVE_SERVICES_LPASS_AML_NOC,
- SA8775P_SLAVE_SERVICE_LPASS_AG_NOC
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &qhs_lpass_core, &qhs_lpass_lpi,
+ &qhs_lpass_mpu, &qhs_lpass_top,
+ &srvc_niu_aml_noc, &srvc_niu_lpass_agnoc },
};
static struct qcom_icc_node qxm_lpass_dsp = {
.name = "qxm_lpass_dsp",
- .id = SA8775P_MASTER_LPASS_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 4,
- .links = { SA8775P_SLAVE_LPASS_TOP_CFG,
- SA8775P_SLAVE_LPASS_SNOC,
- SA8775P_SLAVE_SERVICES_LPASS_AML_NOC,
- SA8775P_SLAVE_SERVICE_LPASS_AG_NOC
- },
+ .link_nodes = (struct qcom_icc_node *[]) { &qhs_lpass_top, &qns_sysnoc,
+ &srvc_niu_aml_noc, &srvc_niu_lpass_agnoc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SA8775P_MASTER_LLCC,
.channels = 8,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_SLAVE_EBI1 },
+ .link_nodes = (struct qcom_icc_node *[]) { &ebi },
};
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
- .id = SA8775P_MASTER_CAMNOC_HF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_camnoc_icp = {
.name = "qnm_camnoc_icp",
- .id = SA8775P_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
- .id = SA8775P_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_mdp0_0 = {
.name = "qnm_mdp0_0",
- .id = SA8775P_MASTER_MDP0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mdp0_1 = {
.name = "qnm_mdp0_1",
- .id = SA8775P_MASTER_MDP1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mdp1_0 = {
.name = "qnm_mdp1_0",
- .id = SA8775P_MASTER_MDP_CORE1_0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mdp1_1 = {
.name = "qnm_mdp1_1",
- .id = SA8775P_MASTER_MDP_CORE1_1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mnoc_hf_cfg = {
.name = "qnm_mnoc_hf_cfg",
- .id = SA8775P_MASTER_CNOC_MNOC_HF_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_SLAVE_SERVICE_MNOC_HF },
+ .link_nodes = (struct qcom_icc_node *[]) { &srvc_mnoc_hf },
};
static struct qcom_icc_node qnm_mnoc_sf_cfg = {
.name = "qnm_mnoc_sf_cfg",
- .id = SA8775P_MASTER_CNOC_MNOC_SF_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_SLAVE_SERVICE_MNOC_SF },
+ .link_nodes = (struct qcom_icc_node *[]) { &srvc_mnoc_sf },
};
static struct qcom_icc_node qnm_video0 = {
.name = "qnm_video0",
- .id = SA8775P_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video1 = {
.name = "qnm_video1",
- .id = SA8775P_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
- .id = SA8775P_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_v_cpu = {
.name = "qnm_video_v_cpu",
- .id = SA8775P_MASTER_VIDEO_V_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
};
static struct qcom_icc_node qhm_nsp_noc_config = {
.name = "qhm_nsp_noc_config",
- .id = SA8775P_MASTER_CDSP_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_SLAVE_SERVICE_NSP_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &service_nsp_noc },
};
static struct qcom_icc_node qxm_nsp = {
.name = "qxm_nsp",
- .id = SA8775P_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SA8775P_SLAVE_HCP_A, SLAVE_CDSP_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hcp, &qns_nsp_gemnoc },
};
static struct qcom_icc_node qhm_nspb_noc_config = {
.name = "qhm_nspb_noc_config",
- .id = SA8775P_MASTER_CDSPB_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_SLAVE_SERVICE_NSPB_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &service_nspb_noc },
};
static struct qcom_icc_node qxm_nspb = {
.name = "qxm_nspb",
- .id = SA8775P_MASTER_CDSP_PROC_B,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SA8775P_SLAVE_HCP_B, SLAVE_CDSPB_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_nspb_hcp, &qns_nspb_gemnoc },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
- .id = SA8775P_MASTER_PCIE_0,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SA8775P_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_mem_noc },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
- .id = SA8775P_MASTER_PCIE_1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_mem_noc },
};
static struct qcom_icc_node qhm_gic = {
.name = "qhm_gic",
- .id = SA8775P_MASTER_GIC_AHB,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = SA8775P_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = SA8775P_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SA8775P_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_lpass_noc = {
.name = "qnm_lpass_noc",
- .id = SA8775P_MASTER_LPASS_ANOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SA8775P_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_snoc_cfg = {
.name = "qnm_snoc_cfg",
- .id = SA8775P_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_SLAVE_SERVICE_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &srvc_snoc },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
- .id = SA8775P_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_gc },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = SA8775P_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_gc },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = SA8775P_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_MASTER_A1NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_aggre1_noc },
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = SA8775P_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SA8775P_MASTER_A2NOC_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_aggre2_noc },
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
- .id = SA8775P_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
- .id = SA8775P_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup2_core_slave = {
.name = "qup2_core_slave",
- .id = SA8775P_SLAVE_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup3_core_slave = {
.name = "qup3_core_slave",
- .id = SA8775P_SLAVE_QUP_CORE_3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
- .id = SA8775P_SLAVE_AHB2PHY_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
- .id = SA8775P_SLAVE_AHB2PHY_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy2 = {
.name = "qhs_ahb2phy2",
- .id = SA8775P_SLAVE_AHB2PHY_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy3 = {
.name = "qhs_ahb2phy3",
- .id = SA8775P_SLAVE_AHB2PHY_3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_anoc_throttle_cfg = {
.name = "qhs_anoc_throttle_cfg",
- .id = SA8775P_SLAVE_ANOC_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SA8775P_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
- .id = SA8775P_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_boot_rom = {
.name = "qhs_boot_rom",
- .id = SA8775P_SLAVE_BOOT_ROM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = SA8775P_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_nrt_throttle_cfg = {
.name = "qhs_camera_nrt_throttle_cfg",
- .id = SA8775P_SLAVE_CAMERA_NRT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_rt_throttle_cfg = {
.name = "qhs_camera_rt_throttle_cfg",
- .id = SA8775P_SLAVE_CAMERA_RT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SA8775P_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_compute0_cfg = {
.name = "qhs_compute0_cfg",
- .id = SA8775P_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_MASTER_CDSP_NOC_CFG },
+ .link_nodes = (struct qcom_icc_node *[]) { &qhm_nsp_noc_config },
};
static struct qcom_icc_node qhs_compute1_cfg = {
.name = "qhs_compute1_cfg",
- .id = SA8775P_SLAVE_CDSP1_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_MASTER_CDSPB_NOC_CFG },
+ .link_nodes = (struct qcom_icc_node *[]) { &qhm_nspb_noc_config },
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = SA8775P_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
- .id = SA8775P_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
- .id = SA8775P_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_nspcx = {
.name = "qhs_cpr_nspcx",
- .id = SA8775P_SLAVE_CPR_NSPCX,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SA8775P_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
- .id = SA8775P_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display0_cfg = {
.name = "qhs_display0_cfg",
- .id = SA8775P_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display0_rt_throttle_cfg = {
.name = "qhs_display0_rt_throttle_cfg",
- .id = SA8775P_SLAVE_DISPLAY_RT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display1_cfg = {
.name = "qhs_display1_cfg",
- .id = SA8775P_SLAVE_DISPLAY1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display1_rt_throttle_cfg = {
.name = "qhs_display1_rt_throttle_cfg",
- .id = SA8775P_SLAVE_DISPLAY1_RT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emac0_cfg = {
.name = "qhs_emac0_cfg",
- .id = SA8775P_SLAVE_EMAC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emac1_cfg = {
.name = "qhs_emac1_cfg",
- .id = SA8775P_SLAVE_EMAC1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gp_dsp0_cfg = {
.name = "qhs_gp_dsp0_cfg",
- .id = SA8775P_SLAVE_GP_DSP0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gp_dsp1_cfg = {
.name = "qhs_gp_dsp1_cfg",
- .id = SA8775P_SLAVE_GP_DSP1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpdsp0_throttle_cfg = {
.name = "qhs_gpdsp0_throttle_cfg",
- .id = SA8775P_SLAVE_GPDSP0_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpdsp1_throttle_cfg = {
.name = "qhs_gpdsp1_throttle_cfg",
- .id = SA8775P_SLAVE_GPDSP1_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpu_tcu_throttle_cfg = {
.name = "qhs_gpu_tcu_throttle_cfg",
- .id = SA8775P_SLAVE_GPU_TCU_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = SA8775P_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_hwkm = {
.name = "qhs_hwkm",
- .id = SA8775P_SLAVE_HWKM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SA8775P_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SA8775P_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
- .id = SA8775P_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_cfg = {
.name = "qhs_lpass_cfg",
- .id = SA8775P_SLAVE_LPASS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_MASTER_CNOC_LPASS_AG_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qhm_config_noc },
};
static struct qcom_icc_node qhs_lpass_throttle_cfg = {
.name = "qhs_lpass_throttle_cfg",
- .id = SA8775P_SLAVE_LPASS_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mx_rdpm = {
.name = "qhs_mx_rdpm",
- .id = SA8775P_SLAVE_MX_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mxc_rdpm = {
.name = "qhs_mxc_rdpm",
- .id = SA8775P_SLAVE_MXC_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
- .id = SA8775P_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
- .id = SA8775P_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie_rsc_cfg = {
.name = "qhs_pcie_rsc_cfg",
- .id = SA8775P_SLAVE_PCIE_RSC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie_tcu_throttle_cfg = {
.name = "qhs_pcie_tcu_throttle_cfg",
- .id = SA8775P_SLAVE_PCIE_TCU_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie_throttle_cfg = {
.name = "qhs_pcie_throttle_cfg",
- .id = SA8775P_SLAVE_PCIE_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SA8775P_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = SA8775P_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pke_wrapper_cfg = {
.name = "qhs_pke_wrapper_cfg",
- .id = SA8775P_SLAVE_PKA_WRAPPER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SA8775P_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qm_cfg = {
.name = "qhs_qm_cfg",
- .id = SA8775P_SLAVE_QM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qm_mpu_cfg = {
.name = "qhs_qm_mpu_cfg",
- .id = SA8775P_SLAVE_QM_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
- .id = SA8775P_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
- .id = SA8775P_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup2 = {
.name = "qhs_qup2",
- .id = SA8775P_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup3 = {
.name = "qhs_qup3",
- .id = SA8775P_SLAVE_QUP_3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sail_throttle_cfg = {
.name = "qhs_sail_throttle_cfg",
- .id = SA8775P_SLAVE_SAIL_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc1 = {
.name = "qhs_sdc1",
- .id = SA8775P_SLAVE_SDC1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_security = {
.name = "qhs_security",
- .id = SA8775P_SLAVE_SECURITY,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_throttle_cfg = {
.name = "qhs_snoc_throttle_cfg",
- .id = SA8775P_SLAVE_SNOC_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SA8775P_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
- .id = SA8775P_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tsc_cfg = {
.name = "qhs_tsc_cfg",
- .id = SA8775P_SLAVE_TSC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_card_cfg = {
.name = "qhs_ufs_card_cfg",
- .id = SA8775P_SLAVE_UFS_CARD_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = SA8775P_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb2_0 = {
.name = "qhs_usb2_0",
- .id = SA8775P_SLAVE_USB2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
- .id = SA8775P_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_1 = {
.name = "qhs_usb3_1",
- .id = SA8775P_SLAVE_USB3_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = SA8775P_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cvp_throttle_cfg = {
.name = "qhs_venus_cvp_throttle_cfg",
- .id = SA8775P_SLAVE_VENUS_CVP_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_v_cpu_throttle_cfg = {
.name = "qhs_venus_v_cpu_throttle_cfg",
- .id = SA8775P_SLAVE_VENUS_V_CPU_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_vcodec_throttle_cfg = {
.name = "qhs_venus_vcodec_throttle_cfg",
- .id = SA8775P_SLAVE_VENUS_VCODEC_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_ddrss_cfg = {
.name = "qns_ddrss_cfg",
- .id = SA8775P_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_MASTER_CNOC_DC_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_cnoc_dc_noc },
};
static struct qcom_icc_node qns_gpdsp_noc_cfg = {
.name = "qns_gpdsp_noc_cfg",
- .id = SA8775P_SLAVE_GPDSP_NOC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_mnoc_hf_cfg = {
.name = "qns_mnoc_hf_cfg",
- .id = SA8775P_SLAVE_CNOC_MNOC_HF_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_MASTER_CNOC_MNOC_HF_CFG },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_mnoc_hf_cfg },
};
static struct qcom_icc_node qns_mnoc_sf_cfg = {
.name = "qns_mnoc_sf_cfg",
- .id = SA8775P_SLAVE_CNOC_MNOC_SF_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_MASTER_CNOC_MNOC_SF_CFG },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_mnoc_sf_cfg },
};
static struct qcom_icc_node qns_pcie_anoc_cfg = {
.name = "qns_pcie_anoc_cfg",
- .id = SA8775P_SLAVE_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_snoc_cfg = {
.name = "qns_snoc_cfg",
- .id = SA8775P_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_MASTER_SNOC_CFG },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_snoc_cfg },
};
static struct qcom_icc_node qxs_boot_imem = {
.name = "qxs_boot_imem",
- .id = SA8775P_SLAVE_BOOT_IMEM,
.channels = 1,
.buswidth = 16,
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SA8775P_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
- .id = SA8775P_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
- .id = SA8775P_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 16,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
- .id = SA8775P_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SA8775P_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SA8775P_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
- .id = SA8775P_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gemnoc = {
.name = "qns_gemnoc",
- .id = SA8775P_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SA8775P_MASTER_GEM_NOC_CFG },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_gemnoc_cfg },
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
- .id = SA8775P_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SA8775P_MASTER_GEM_NOC_CNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_gemnoc_cnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SA8775P_SLAVE_LLCC,
.channels = 6,
.buswidth = 16,
.num_links = 1,
- .links = { SA8775P_MASTER_LLCC },
+ .link_nodes = (struct qcom_icc_node *[]) { &llcc_mc },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
- .id = SA8775P_SLAVE_GEM_NOC_PCIE_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SA8775P_MASTER_GEM_NOC_PCIE_SNOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_gemnoc_pcie },
};
static struct qcom_icc_node srvc_even_gemnoc = {
.name = "srvc_even_gemnoc",
- .id = SA8775P_SLAVE_SERVICE_GEM_NOC_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_odd_gemnoc = {
.name = "srvc_odd_gemnoc",
- .id = SA8775P_SLAVE_SERVICE_GEM_NOC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_sys_gemnoc = {
.name = "srvc_sys_gemnoc",
- .id = SA8775P_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_sys_gemnoc_2 = {
.name = "srvc_sys_gemnoc_2",
- .id = SA8775P_SLAVE_SERVICE_GEM_NOC2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gp_dsp_sail_noc = {
.name = "qns_gp_dsp_sail_noc",
- .id = SA8775P_SLAVE_GP_DSP_SAIL_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SA8775P_MASTER_GPDSP_SAIL },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_gpdsp_sail },
};
static struct qcom_icc_node qhs_lpass_core = {
.name = "qhs_lpass_core",
- .id = SA8775P_SLAVE_LPASS_CORE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_lpi = {
.name = "qhs_lpass_lpi",
- .id = SA8775P_SLAVE_LPASS_LPI_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_mpu = {
.name = "qhs_lpass_mpu",
- .id = SA8775P_SLAVE_LPASS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_top = {
.name = "qhs_lpass_top",
- .id = SA8775P_SLAVE_LPASS_TOP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_sysnoc = {
.name = "qns_sysnoc",
- .id = SA8775P_SLAVE_LPASS_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SA8775P_MASTER_LPASS_ANOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_lpass_noc },
};
static struct qcom_icc_node srvc_niu_aml_noc = {
.name = "srvc_niu_aml_noc",
- .id = SA8775P_SLAVE_SERVICES_LPASS_AML_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_niu_lpass_agnoc = {
.name = "srvc_niu_lpass_agnoc",
- .id = SA8775P_SLAVE_SERVICE_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SA8775P_SLAVE_EBI1,
.channels = 8,
.buswidth = 4,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = SA8775P_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_mnoc_hf },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
- .id = SA8775P_SLAVE_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_mnoc_sf },
};
static struct qcom_icc_node srvc_mnoc_hf = {
.name = "srvc_mnoc_hf",
- .id = SA8775P_SLAVE_SERVICE_MNOC_HF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_mnoc_sf = {
.name = "srvc_mnoc_sf",
- .id = SA8775P_SLAVE_SERVICE_MNOC_SF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_hcp = {
.name = "qns_hcp",
- .id = SA8775P_SLAVE_HCP_A,
.channels = 2,
.buswidth = 32,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
- .id = SA8775P_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_MASTER_COMPUTE_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_cmpnoc0 },
};
static struct qcom_icc_node service_nsp_noc = {
.name = "service_nsp_noc",
- .id = SA8775P_SLAVE_SERVICE_NSP_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_nspb_gemnoc = {
.name = "qns_nspb_gemnoc",
- .id = SA8775P_SLAVE_CDSPB_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_MASTER_COMPUTE_NOC_1 },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_cmpnoc1 },
};
static struct qcom_icc_node qns_nspb_hcp = {
.name = "qns_nspb_hcp",
- .id = SA8775P_SLAVE_HCP_B,
.channels = 2,
.buswidth = 32,
};
static struct qcom_icc_node service_nspb_noc = {
.name = "service_nspb_noc",
- .id = SA8775P_SLAVE_SERVICE_NSPB_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
- .id = SA8775P_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SA8775P_MASTER_ANOC_PCIE_GEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_pcie },
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
- .id = SA8775P_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SA8775P_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = SA8775P_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SA8775P_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_snoc_sf },
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = SA8775P_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
@@ -2113,6 +1841,7 @@ static const struct qcom_icc_desc sa8775p_aggre1_noc = {
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+ .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
@@ -2140,6 +1869,7 @@ static const struct qcom_icc_desc sa8775p_aggre2_noc = {
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+ .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
@@ -2164,6 +1894,7 @@ static const struct qcom_icc_desc sa8775p_clk_virt = {
.num_nodes = ARRAY_SIZE(clk_virt_nodes),
.bcms = clk_virt_bcms,
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
+ .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const config_noc_bcms[] = {
@@ -2269,6 +2000,7 @@ static const struct qcom_icc_desc sa8775p_config_noc = {
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
+ .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const dc_noc_bcms[] = {
@@ -2285,6 +2017,7 @@ static const struct qcom_icc_desc sa8775p_dc_noc = {
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
+ .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
@@ -2321,6 +2054,7 @@ static const struct qcom_icc_desc sa8775p_gem_noc = {
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
+ .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const gpdsp_anoc_bcms[] = {
@@ -2339,6 +2073,7 @@ static const struct qcom_icc_desc sa8775p_gpdsp_anoc = {
.num_nodes = ARRAY_SIZE(gpdsp_anoc_nodes),
.bcms = gpdsp_anoc_bcms,
.num_bcms = ARRAY_SIZE(gpdsp_anoc_bcms),
+ .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
@@ -2362,6 +2097,7 @@ static const struct qcom_icc_desc sa8775p_lpass_ag_noc = {
.num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
.bcms = lpass_ag_noc_bcms,
.num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
+ .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
@@ -2379,6 +2115,7 @@ static const struct qcom_icc_desc sa8775p_mc_virt = {
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
+ .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
@@ -2411,6 +2148,7 @@ static const struct qcom_icc_desc sa8775p_mmss_noc = {
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+ .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const nspa_noc_bcms[] = {
@@ -2431,6 +2169,7 @@ static const struct qcom_icc_desc sa8775p_nspa_noc = {
.num_nodes = ARRAY_SIZE(nspa_noc_nodes),
.bcms = nspa_noc_bcms,
.num_bcms = ARRAY_SIZE(nspa_noc_bcms),
+ .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const nspb_noc_bcms[] = {
@@ -2451,6 +2190,7 @@ static const struct qcom_icc_desc sa8775p_nspb_noc = {
.num_nodes = ARRAY_SIZE(nspb_noc_nodes),
.bcms = nspb_noc_bcms,
.num_bcms = ARRAY_SIZE(nspb_noc_bcms),
+ .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
@@ -2468,6 +2208,7 @@ static const struct qcom_icc_desc sa8775p_pcie_anoc = {
.num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
.bcms = pcie_anoc_bcms,
.num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
+ .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
@@ -2496,6 +2237,7 @@ static const struct qcom_icc_desc sa8775p_system_noc = {
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
+ .alloc_dyn_id = true,
};
static const struct of_device_id qnoc_of_match[] = {
diff --git a/drivers/interconnect/qcom/sm8650.c b/drivers/interconnect/qcom/sm8650.c
index 20ac5bc5e1fb..b7c321f4e4b5 100644
--- a/drivers/interconnect/qcom/sm8650.c
+++ b/drivers/interconnect/qcom/sm8650.c
@@ -17,20 +17,45 @@
#include "icc-rpmh.h"
#include "sm8650.h"
+static const struct regmap_config icc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+static struct qcom_icc_qosbox qhm_qspi_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xc000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
.id = SM8650_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
+ .qosbox = &qhm_qspi_qos,
.num_links = 1,
.links = { SM8650_SLAVE_A1NOC_SNOC },
};
+static struct qcom_icc_qosbox qhm_qup1_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xd000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
.id = SM8650_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
+ .qosbox = &qhm_qup1_qos,
.num_links = 1,
.links = { SM8650_SLAVE_A1NOC_SNOC },
};
@@ -44,65 +69,128 @@ static struct qcom_icc_node qxm_qup02 = {
.links = { SM8650_SLAVE_A1NOC_SNOC },
};
+static struct qcom_icc_qosbox xm_sdc4_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xe000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
.id = SM8650_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
+ .qosbox = &xm_sdc4_qos,
.num_links = 1,
.links = { SM8650_SLAVE_A1NOC_SNOC },
};
+static struct qcom_icc_qosbox xm_ufs_mem_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xf000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
.id = SM8650_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 16,
+ .qosbox = &xm_ufs_mem_qos,
.num_links = 1,
.links = { SM8650_SLAVE_A1NOC_SNOC },
};
+static struct qcom_icc_qosbox xm_usb3_0_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x10000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
.id = SM8650_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
+ .qosbox = &xm_usb3_0_qos,
.num_links = 1,
.links = { SM8650_SLAVE_A1NOC_SNOC },
};
+static struct qcom_icc_qosbox qhm_qdss_bam_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x12000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.id = SM8650_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
+ .qosbox = &qhm_qdss_bam_qos,
.num_links = 1,
.links = { SM8650_SLAVE_A2NOC_SNOC },
};
+static struct qcom_icc_qosbox qhm_qup2_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x13000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
.id = SM8650_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
+ .qosbox = &qhm_qup2_qos,
.num_links = 1,
.links = { SM8650_SLAVE_A2NOC_SNOC },
};
+static struct qcom_icc_qosbox qxm_crypto_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x15000 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
.id = SM8650_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
+ .qosbox = &qxm_crypto_qos,
.num_links = 1,
.links = { SM8650_SLAVE_A2NOC_SNOC },
};
+static struct qcom_icc_qosbox qxm_ipa_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x16000 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
.id = SM8650_MASTER_IPA,
.channels = 1,
.buswidth = 8,
+ .qosbox = &qxm_ipa_qos,
.num_links = 1,
.links = { SM8650_SLAVE_A2NOC_SNOC },
};
@@ -116,29 +204,56 @@ static struct qcom_icc_node qxm_sp = {
.links = { SM8650_SLAVE_A2NOC_SNOC },
};
+static struct qcom_icc_qosbox xm_qdss_etr_0_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x17000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node xm_qdss_etr_0 = {
.name = "xm_qdss_etr_0",
.id = SM8650_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
+ .qosbox = &xm_qdss_etr_0_qos,
.num_links = 1,
.links = { SM8650_SLAVE_A2NOC_SNOC },
};
+static struct qcom_icc_qosbox xm_qdss_etr_1_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x18000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node xm_qdss_etr_1 = {
.name = "xm_qdss_etr_1",
.id = SM8650_MASTER_QDSS_ETR_1,
.channels = 1,
.buswidth = 8,
+ .qosbox = &xm_qdss_etr_1_qos,
.num_links = 1,
.links = { SM8650_SLAVE_A2NOC_SNOC },
};
+static struct qcom_icc_qosbox xm_sdc2_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x19000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
.id = SM8650_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
+ .qosbox = &xm_sdc2_qos,
.num_links = 1,
.links = { SM8650_SLAVE_A2NOC_SNOC },
};
@@ -223,29 +338,56 @@ static struct qcom_icc_node qnm_gemnoc_pcie = {
.links = { SM8650_SLAVE_PCIE_0, SM8650_SLAVE_PCIE_1 },
};
+static struct qcom_icc_qosbox alm_gpu_tcu_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xbf000 },
+ .prio = 1,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+};
+
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
.id = SM8650_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
+ .qosbox = &alm_gpu_tcu_qos,
.num_links = 2,
.links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
};
+static struct qcom_icc_qosbox alm_sys_tcu_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xc1000 },
+ .prio = 6,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+};
+
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
.id = SM8650_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
+ .qosbox = &alm_sys_tcu_qos,
.num_links = 2,
.links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
};
+static struct qcom_icc_qosbox alm_ubwc_p_tcu_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xc5000 },
+ .prio = 1,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+};
+
static struct qcom_icc_node alm_ubwc_p_tcu = {
.name = "alm_ubwc_p_tcu",
.id = SM8650_MASTER_UBWC_P_TCU,
.channels = 1,
.buswidth = 8,
+ .qosbox = &alm_ubwc_p_tcu_qos,
.num_links = 2,
.links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
};
@@ -260,20 +402,38 @@ static struct qcom_icc_node chm_apps = {
SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
};
+static struct qcom_icc_qosbox qnm_gpu_qos = {
+ .num_ports = 2,
+ .port_offsets = { 0x31000, 0x71000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+};
+
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
.id = SM8650_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
+ .qosbox = &qnm_gpu_qos,
.num_links = 2,
.links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
};
+static struct qcom_icc_qosbox qnm_lpass_gemnoc_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xb5000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qnm_lpass_gemnoc = {
.name = "qnm_lpass_gemnoc",
.id = SM8650_MASTER_LPASS_GEM_NOC,
.channels = 1,
.buswidth = 16,
+ .qosbox = &qnm_lpass_gemnoc_qos,
.num_links = 3,
.links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
@@ -289,67 +449,130 @@ static struct qcom_icc_node qnm_mdsp = {
SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
};
+static struct qcom_icc_qosbox qnm_mnoc_hf_qos = {
+ .num_ports = 2,
+ .port_offsets = { 0x33000, 0x73000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
.id = SM8650_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
+ .qosbox = &qnm_mnoc_hf_qos,
.num_links = 2,
.links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
};
+static struct qcom_icc_qosbox qnm_mnoc_sf_qos = {
+ .num_ports = 2,
+ .port_offsets = { 0x35000, 0x75000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
.id = SM8650_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
+ .qosbox = &qnm_mnoc_sf_qos,
.num_links = 2,
.links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
};
+static struct qcom_icc_qosbox qnm_nsp_gemnoc_qos = {
+ .num_ports = 2,
+ .port_offsets = { 0x37000, 0x77000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+};
+
static struct qcom_icc_node qnm_nsp_gemnoc = {
.name = "qnm_nsp_gemnoc",
.id = SM8650_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
+ .qosbox = &qnm_nsp_gemnoc_qos,
.num_links = 3,
.links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
};
+static struct qcom_icc_qosbox qnm_pcie_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xb7000 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
.id = SM8650_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
+ .qosbox = &qnm_pcie_qos,
.num_links = 2,
.links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
};
+static struct qcom_icc_qosbox qnm_snoc_sf_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xbb000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
.id = SM8650_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
+ .qosbox = &qnm_snoc_sf_qos,
.num_links = 3,
.links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
};
+static struct qcom_icc_qosbox qnm_ubwc_p_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xc3000 },
+ .prio = 1,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+};
+
static struct qcom_icc_node qnm_ubwc_p = {
.name = "qnm_ubwc_p",
.id = SM8650_MASTER_UBWC_P,
.channels = 1,
.buswidth = 32,
+ .qosbox = &qnm_ubwc_p_qos,
.num_links = 1,
.links = { SM8650_SLAVE_LLCC },
};
+static struct qcom_icc_qosbox xm_gic_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xb9000 },
+ .prio = 4,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+};
+
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
.id = SM8650_MASTER_GIC,
.channels = 1,
.buswidth = 8,
+ .qosbox = &xm_gic_qos,
.num_links = 1,
.links = { SM8650_SLAVE_LLCC },
};
@@ -390,38 +613,74 @@ static struct qcom_icc_node llcc_mc = {
.links = { SM8650_SLAVE_EBI1 },
};
+static struct qcom_icc_qosbox qnm_camnoc_hf_qos = {
+ .num_ports = 2,
+ .port_offsets = { 0x28000, 0x29000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
.id = SM8650_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
+ .qosbox = &qnm_camnoc_hf_qos,
.num_links = 1,
.links = { SM8650_SLAVE_MNOC_HF_MEM_NOC },
};
+static struct qcom_icc_qosbox qnm_camnoc_icp_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x2a000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qnm_camnoc_icp = {
.name = "qnm_camnoc_icp",
.id = SM8650_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
+ .qosbox = &qnm_camnoc_icp_qos,
.num_links = 1,
.links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
};
+static struct qcom_icc_qosbox qnm_camnoc_sf_qos = {
+ .num_ports = 2,
+ .port_offsets = { 0x2b000, 0x2c000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
.id = SM8650_MASTER_CAMNOC_SF,
.channels = 2,
.buswidth = 32,
+ .qosbox = &qnm_camnoc_sf_qos,
.num_links = 1,
.links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
};
+static struct qcom_icc_qosbox qnm_mdp_qos = {
+ .num_ports = 2,
+ .port_offsets = { 0x2d000, 0x2e000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qnm_mdp = {
.name = "qnm_mdp",
.id = SM8650_MASTER_MDP,
.channels = 2,
.buswidth = 32,
+ .qosbox = &qnm_mdp_qos,
.num_links = 1,
.links = { SM8650_SLAVE_MNOC_HF_MEM_NOC },
};
@@ -435,38 +694,74 @@ static struct qcom_icc_node qnm_vapss_hcp = {
.links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
};
+static struct qcom_icc_qosbox qnm_video_qos = {
+ .num_ports = 2,
+ .port_offsets = { 0x30000, 0x31000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qnm_video = {
.name = "qnm_video",
.id = SM8650_MASTER_VIDEO,
.channels = 2,
.buswidth = 32,
+ .qosbox = &qnm_video_qos,
.num_links = 1,
.links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
};
+static struct qcom_icc_qosbox qnm_video_cv_cpu_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x32000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qnm_video_cv_cpu = {
.name = "qnm_video_cv_cpu",
.id = SM8650_MASTER_VIDEO_CV_PROC,
.channels = 1,
.buswidth = 8,
+ .qosbox = &qnm_video_cv_cpu_qos,
.num_links = 1,
.links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
};
+static struct qcom_icc_qosbox qnm_video_cvp_qos = {
+ .num_ports = 2,
+ .port_offsets = { 0x33000, 0x34000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
.id = SM8650_MASTER_VIDEO_PROC,
.channels = 2,
.buswidth = 32,
+ .qosbox = &qnm_video_cvp_qos,
.num_links = 1,
.links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
};
+static struct qcom_icc_qosbox qnm_video_v_cpu_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x35000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node qnm_video_v_cpu = {
.name = "qnm_video_v_cpu",
.id = SM8650_MASTER_VIDEO_V_PROC,
.channels = 1,
.buswidth = 8,
+ .qosbox = &qnm_video_v_cpu_qos,
.num_links = 1,
.links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
};
@@ -498,20 +793,38 @@ static struct qcom_icc_node qsm_pcie_anoc_cfg = {
.links = { SM8650_SLAVE_SERVICE_PCIE_ANOC },
};
+static struct qcom_icc_qosbox xm_pcie3_0_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xb000 },
+ .prio = 3,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
.id = SM8650_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
+ .qosbox = &xm_pcie3_0_qos,
.num_links = 1,
.links = { SM8650_SLAVE_ANOC_PCIE_GEM_NOC },
};
+static struct qcom_icc_qosbox xm_pcie3_1_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xc000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+};
+
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
.id = SM8650_MASTER_PCIE_1,
.channels = 1,
.buswidth = 16,
+ .qosbox = &xm_pcie3_1_qos,
.num_links = 1,
.links = { SM8650_SLAVE_ANOC_PCIE_GEM_NOC },
};
@@ -534,6 +847,24 @@ static struct qcom_icc_node qnm_aggre2_noc = {
.links = { SM8650_SLAVE_SNOC_GEM_NOC_SF },
};
+static struct qcom_icc_qosbox qnm_apss_noc_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x1c000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+};
+
+static struct qcom_icc_node qnm_apss_noc = {
+ .name = "qnm_apss_noc",
+ .id = SM8650_MASTER_APSS_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &qnm_apss_noc_qos,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_SNOC_GEM_NOC_SF },
+};
+
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SM8650_SLAVE_A1NOC_SNOC,
@@ -1325,6 +1656,7 @@ static struct qcom_icc_node * const aggre1_noc_nodes[] = {
};
static const struct qcom_icc_desc sm8650_aggre1_noc = {
+ .config = &icc_regmap_config,
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
};
@@ -1346,6 +1678,7 @@ static struct qcom_icc_node * const aggre2_noc_nodes[] = {
};
static const struct qcom_icc_desc sm8650_aggre2_noc = {
+ .config = &icc_regmap_config,
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
@@ -1429,6 +1762,7 @@ static struct qcom_icc_node * const config_noc_nodes[] = {
};
static const struct qcom_icc_desc sm8650_config_noc = {
+ .config = &icc_regmap_config,
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
@@ -1456,6 +1790,7 @@ static struct qcom_icc_node * const cnoc_main_nodes[] = {
};
static const struct qcom_icc_desc sm8650_cnoc_main = {
+ .config = &icc_regmap_config,
.nodes = cnoc_main_nodes,
.num_nodes = ARRAY_SIZE(cnoc_main_nodes),
.bcms = cnoc_main_bcms,
@@ -1488,6 +1823,7 @@ static struct qcom_icc_node * const gem_noc_nodes[] = {
};
static const struct qcom_icc_desc sm8650_gem_noc = {
+ .config = &icc_regmap_config,
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
@@ -1500,6 +1836,7 @@ static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
};
static const struct qcom_icc_desc sm8650_lpass_ag_noc = {
+ .config = &icc_regmap_config,
.nodes = lpass_ag_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
};
@@ -1514,6 +1851,7 @@ static struct qcom_icc_node * const lpass_lpiaon_noc_nodes[] = {
};
static const struct qcom_icc_desc sm8650_lpass_lpiaon_noc = {
+ .config = &icc_regmap_config,
.nodes = lpass_lpiaon_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_lpiaon_noc_nodes),
.bcms = lpass_lpiaon_noc_bcms,
@@ -1526,6 +1864,7 @@ static struct qcom_icc_node * const lpass_lpicx_noc_nodes[] = {
};
static const struct qcom_icc_desc sm8650_lpass_lpicx_noc = {
+ .config = &icc_regmap_config,
.nodes = lpass_lpicx_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_lpicx_noc_nodes),
};
@@ -1569,6 +1908,7 @@ static struct qcom_icc_node * const mmss_noc_nodes[] = {
};
static const struct qcom_icc_desc sm8650_mmss_noc = {
+ .config = &icc_regmap_config,
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
@@ -1585,6 +1925,7 @@ static struct qcom_icc_node * const nsp_noc_nodes[] = {
};
static const struct qcom_icc_desc sm8650_nsp_noc = {
+ .config = &icc_regmap_config,
.nodes = nsp_noc_nodes,
.num_nodes = ARRAY_SIZE(nsp_noc_nodes),
.bcms = nsp_noc_bcms,
@@ -1604,6 +1945,7 @@ static struct qcom_icc_node * const pcie_anoc_nodes[] = {
};
static const struct qcom_icc_desc sm8650_pcie_anoc = {
+ .config = &icc_regmap_config,
.nodes = pcie_anoc_nodes,
.num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
.bcms = pcie_anoc_bcms,
@@ -1620,9 +1962,11 @@ static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
[SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [MASTER_APSS_NOC] = &qnm_apss_noc,
};
static const struct qcom_icc_desc sm8650_system_noc = {
+ .config = &icc_regmap_config,
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
diff --git a/drivers/interconnect/qcom/sm8650.h b/drivers/interconnect/qcom/sm8650.h
index de35c956fe49..b6610225b38a 100644
--- a/drivers/interconnect/qcom/sm8650.h
+++ b/drivers/interconnect/qcom/sm8650.h
@@ -139,5 +139,6 @@
#define SM8650_SLAVE_USB3_0 127
#define SM8650_SLAVE_VENUS_CFG 128
#define SM8650_SLAVE_VSENSE_CTRL_CFG 129
+#define SM8650_MASTER_APSS_NOC 130
#endif
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index cd750f512dee..0a33d995d15d 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -192,6 +192,7 @@ config MSM_IOMMU
If unsure, say N here.
source "drivers/iommu/amd/Kconfig"
+source "drivers/iommu/arm/Kconfig"
source "drivers/iommu/intel/Kconfig"
source "drivers/iommu/iommufd/Kconfig"
source "drivers/iommu/riscv/Kconfig"
@@ -199,7 +200,6 @@ source "drivers/iommu/riscv/Kconfig"
config IRQ_REMAP
bool "Support for Interrupt Remapping"
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
- select DMAR_TABLE if INTEL_IOMMU
help
Supports Interrupt remapping for IO-APIC and MSI devices.
To use x2apic mode in the CPU's which support x2APIC enhancements or
@@ -314,150 +314,6 @@ config APPLE_DART
Say Y here if you are using an Apple SoC.
-# ARM IOMMU support
-config ARM_SMMU
- tristate "ARM Ltd. System MMU (SMMU) Support"
- depends on ARM64 || ARM || COMPILE_TEST
- depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
- select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
- select ARM_DMA_USE_IOMMU if ARM
- help
- Support for implementations of the ARM System MMU architecture
- versions 1 and 2.
-
- Say Y here if your SoC includes an IOMMU device implementing
- the ARM SMMU architecture.
-
-config ARM_SMMU_LEGACY_DT_BINDINGS
- bool "Support the legacy \"mmu-masters\" devicetree bindings"
- depends on ARM_SMMU=y && OF
- help
- Support for the badly designed and deprecated "mmu-masters"
- devicetree bindings. This allows some DMA masters to attach
- to the SMMU but does not provide any support via the DMA API.
- If you're lucky, you might be able to get VFIO up and running.
-
- If you say Y here then you'll make me very sad. Instead, say N
- and move your firmware to the utopian future that was 2016.
-
-config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
- bool "Default to disabling bypass on ARM SMMU v1 and v2"
- depends on ARM_SMMU
- default y
- help
- Say Y here to (by default) disable bypass streams such that
- incoming transactions from devices that are not attached to
- an iommu domain will report an abort back to the device and
- will not be allowed to pass through the SMMU.
-
- Any old kernels that existed before this KConfig was
- introduced would default to _allowing_ bypass (AKA the
- equivalent of NO for this config). However the default for
- this option is YES because the old behavior is insecure.
-
- There are few reasons to allow unmatched stream bypass, and
- even fewer good ones. If saying YES here breaks your board
- you should work on fixing your board. This KConfig option
- is expected to be removed in the future and we'll simply
- hardcode the bypass disable in the code.
-
- NOTE: the kernel command line parameter
- 'arm-smmu.disable_bypass' will continue to override this
- config.
-
-config ARM_SMMU_MMU_500_CPRE_ERRATA
- bool "Enable errata workaround for CPRE in SMMU reset path"
- depends on ARM_SMMU
- default y
- help
- Say Y here (by default) to apply workaround to disable
- MMU-500's next-page prefetcher for sake of 4 known errata.
-
- Say N here only when it is sure that any errata related to
- prefetch enablement are not applicable on the platform.
- Refer silicon-errata.rst for info on errata IDs.
-
-config ARM_SMMU_QCOM
- def_tristate y
- depends on ARM_SMMU && ARCH_QCOM
- select QCOM_SCM
- help
- When running on a Qualcomm platform that has the custom variant
- of the ARM SMMU, this needs to be built into the SMMU driver.
-
-config ARM_SMMU_QCOM_DEBUG
- bool "ARM SMMU QCOM implementation defined debug support"
- depends on ARM_SMMU_QCOM=y
- help
- Support for implementation specific debug features in ARM SMMU
- hardware found in QTI platforms. This include support for
- the Translation Buffer Units (TBU) that can be used to obtain
- additional information when debugging memory management issues
- like context faults.
-
- Say Y here to enable debug for issues such as context faults
- or TLB sync timeouts which requires implementation defined
- register dumps.
-
-config ARM_SMMU_V3
- tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
- depends on ARM64
- select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
- select GENERIC_MSI_IRQ
- select IOMMUFD_DRIVER if IOMMUFD
- help
- Support for implementations of the ARM System MMU architecture
- version 3 providing translation support to a PCIe root complex.
-
- Say Y here if your system includes an IOMMU device implementing
- the ARM SMMUv3 architecture.
-
-if ARM_SMMU_V3
-config ARM_SMMU_V3_SVA
- bool "Shared Virtual Addressing support for the ARM SMMUv3"
- select IOMMU_SVA
- select IOMMU_IOPF
- select MMU_NOTIFIER
- help
- Support for sharing process address spaces with devices using the
- SMMUv3.
-
- Say Y here if your system supports SVA extensions such as PCIe PASID
- and PRI.
-
-config ARM_SMMU_V3_IOMMUFD
- bool "Enable IOMMUFD features for ARM SMMUv3 (EXPERIMENTAL)"
- depends on IOMMUFD
- help
- Support for IOMMUFD features intended to support virtual machines
- with accelerated virtual IOMMUs.
-
- Say Y here if you are doing development and testing on this feature.
-
-config ARM_SMMU_V3_KUNIT_TEST
- tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS
- depends on KUNIT
- depends on ARM_SMMU_V3_SVA
- default KUNIT_ALL_TESTS
- help
- Enable this option to unit-test arm-smmu-v3 driver functions.
-
- If unsure, say N.
-
-config TEGRA241_CMDQV
- bool "NVIDIA Tegra241 CMDQ-V extension support for ARM SMMUv3"
- depends on ACPI
- help
- Support for NVIDIA CMDQ-Virtualization extension for ARM SMMUv3. The
- CMDQ-V extension is similar to v3.3 ECMDQ for multi command queues
- support, except with virtualization capabilities.
-
- Say Y here if your system is NVIDIA Tegra241 (Grace) or it has the same
- CMDQ-V extension.
-endif
-
config S390_IOMMU
def_bool y if S390 && PCI
depends on S390 && PCI
@@ -494,18 +350,6 @@ config MTK_IOMMU_V1
if unsure, say N here.
-config QCOM_IOMMU
- # Note: iommu drivers cannot (yet?) be built as modules
- bool "Qualcomm IOMMU Support"
- depends on ARCH_QCOM || COMPILE_TEST
- depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
- select QCOM_SCM
- select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
- select ARM_DMA_USE_IOMMU
- help
- Support for IOMMU on certain Qualcomm SoCs.
-
config HYPERV_IOMMU
bool "Hyper-V IRQ Handling"
depends on HYPERV && X86
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 5e5a83c6c2aa..355294fa9033 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,6 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += amd/ intel/ arm/ iommufd/ riscv/
+obj-y += arm/ iommufd/
+obj-$(CONFIG_AMD_IOMMU) += amd/
+obj-$(CONFIG_INTEL_IOMMU) += intel/
+obj-$(CONFIG_RISCV_IOMMU) += riscv/
obj-$(CONFIG_IOMMU_API) += iommu.o
+obj-$(CONFIG_IOMMU_SUPPORT) += iommu-pages.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
index 9de33b2d42f5..59c04a67f398 100644
--- a/drivers/iommu/amd/Makefile
+++ b/drivers/iommu/amd/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o
+obj-y += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 220c598b7e14..29a8864381c3 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -147,6 +147,8 @@ static inline int get_pci_sbdf_id(struct pci_dev *pdev)
return PCI_SEG_DEVID_TO_SBDF(seg, devid);
}
+bool amd_iommu_ht_range_ignore(void);
+
/*
* This must be called after device probe completes. During probe
* use rlookup_amd_iommu() get the iommu.
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 5089b58e528a..ccbab3a4811a 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -29,8 +29,6 @@
* some size calculation constants
*/
#define DEV_TABLE_ENTRY_SIZE 32
-#define ALIAS_TABLE_ENTRY_SIZE 2
-#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
/* Capability offsets used by the driver */
#define MMIO_CAP_HDR_OFFSET 0x00
@@ -111,6 +109,7 @@
#define FEATURE_SNPAVICSUP GENMASK_ULL(7, 5)
#define FEATURE_SNPAVICSUP_GAM(x) \
(FIELD_GET(FEATURE_SNPAVICSUP, x) == 0x1)
+#define FEATURE_HT_RANGE_IGNORE BIT_ULL(11)
#define FEATURE_NUM_INT_REMAP_SUP GENMASK_ULL(9, 8)
#define FEATURE_NUM_INT_REMAP_SUP_2K(x) \
@@ -316,6 +315,7 @@
#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
#define DTE_IRQ_REMAP_ENABLE 1ULL
+#define DTE_INTTAB_ALIGNMENT 128
#define DTE_INTTABLEN_MASK (0xfULL << 1)
#define DTE_INTTABLEN_VALUE_512 9ULL
#define DTE_INTTABLEN_512 (DTE_INTTABLEN_VALUE_512 << 1)
@@ -616,12 +616,6 @@ struct amd_iommu_pci_seg {
/* Size of the device table */
u32 dev_table_size;
- /* Size of the alias table */
- u32 alias_table_size;
-
- /* Size of the rlookup table */
- u32 rlookup_table_size;
-
/*
* device table virtual address
*
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 14aa0d77df26..9c17dfa76703 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -243,17 +243,14 @@ static void init_translation_status(struct amd_iommu *iommu)
iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
}
-static inline unsigned long tbl_size(int entry_size, int last_bdf)
+int amd_iommu_get_num_iommus(void)
{
- unsigned shift = PAGE_SHIFT +
- get_order((last_bdf + 1) * entry_size);
-
- return 1UL << shift;
+ return amd_iommus_present;
}
-int amd_iommu_get_num_iommus(void)
+bool amd_iommu_ht_range_ignore(void)
{
- return amd_iommus_present;
+ return check_feature2(FEATURE_HT_RANGE_IGNORE);
}
/*
@@ -634,8 +631,8 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_
/* Allocate per PCI segment device table */
static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->dev_table = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(pci_seg->dev_table_size));
+ pci_seg->dev_table = iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32,
+ pci_seg->dev_table_size);
if (!pci_seg->dev_table)
return -ENOMEM;
@@ -644,16 +641,16 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->dev_table,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->dev_table);
pci_seg->dev_table = NULL;
}
/* Allocate per PCI segment IOMMU rlookup table. */
static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->rlookup_table = iommu_alloc_pages(GFP_KERNEL,
- get_order(pci_seg->rlookup_table_size));
+ pci_seg->rlookup_table = kvcalloc(pci_seg->last_bdf + 1,
+ sizeof(*pci_seg->rlookup_table),
+ GFP_KERNEL);
if (pci_seg->rlookup_table == NULL)
return -ENOMEM;
@@ -662,17 +659,15 @@ static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->rlookup_table,
- get_order(pci_seg->rlookup_table_size));
+ kvfree(pci_seg->rlookup_table);
pci_seg->rlookup_table = NULL;
}
static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- pci_seg->irq_lookup_table = iommu_alloc_pages(GFP_KERNEL,
- get_order(pci_seg->rlookup_table_size));
- kmemleak_alloc(pci_seg->irq_lookup_table,
- pci_seg->rlookup_table_size, 1, GFP_KERNEL);
+ pci_seg->irq_lookup_table = kvcalloc(pci_seg->last_bdf + 1,
+ sizeof(*pci_seg->irq_lookup_table),
+ GFP_KERNEL);
if (pci_seg->irq_lookup_table == NULL)
return -ENOMEM;
@@ -681,9 +676,7 @@ static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_se
static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
{
- kmemleak_free(pci_seg->irq_lookup_table);
- iommu_free_pages(pci_seg->irq_lookup_table,
- get_order(pci_seg->rlookup_table_size));
+ kvfree(pci_seg->irq_lookup_table);
pci_seg->irq_lookup_table = NULL;
}
@@ -691,8 +684,9 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
{
int i;
- pci_seg->alias_table = iommu_alloc_pages(GFP_KERNEL,
- get_order(pci_seg->alias_table_size));
+ pci_seg->alias_table = kvmalloc_array(pci_seg->last_bdf + 1,
+ sizeof(*pci_seg->alias_table),
+ GFP_KERNEL);
if (!pci_seg->alias_table)
return -ENOMEM;
@@ -707,8 +701,7 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->alias_table,
- get_order(pci_seg->alias_table_size));
+ kvfree(pci_seg->alias_table);
pci_seg->alias_table = NULL;
}
@@ -719,8 +712,7 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
*/
static int __init alloc_command_buffer(struct amd_iommu *iommu)
{
- iommu->cmd_buf = iommu_alloc_pages(GFP_KERNEL,
- get_order(CMD_BUFFER_SIZE));
+ iommu->cmd_buf = iommu_alloc_pages_sz(GFP_KERNEL, CMD_BUFFER_SIZE);
return iommu->cmd_buf ? 0 : -ENOMEM;
}
@@ -817,20 +809,22 @@ static void iommu_disable_command_buffer(struct amd_iommu *iommu)
static void __init free_command_buffer(struct amd_iommu *iommu)
{
- iommu_free_pages(iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
+ iommu_free_pages(iommu->cmd_buf);
}
void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
size_t size)
{
- int order = get_order(size);
- void *buf = iommu_alloc_pages(gfp, order);
+ void *buf;
- if (buf &&
- check_feature(FEATURE_SNP) &&
- set_memory_4k((unsigned long)buf, (1 << order))) {
- iommu_free_pages(buf, order);
- buf = NULL;
+ size = PAGE_ALIGN(size);
+ buf = iommu_alloc_pages_sz(gfp, size);
+ if (!buf)
+ return NULL;
+ if (check_feature(FEATURE_SNP) &&
+ set_memory_4k((unsigned long)buf, size / PAGE_SIZE)) {
+ iommu_free_pages(buf);
+ return NULL;
}
return buf;
@@ -873,14 +867,14 @@ static void iommu_disable_event_buffer(struct amd_iommu *iommu)
static void __init free_event_buffer(struct amd_iommu *iommu)
{
- iommu_free_pages(iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
+ iommu_free_pages(iommu->evt_buf);
}
static void free_ga_log(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
- iommu_free_pages(iommu->ga_log, get_order(GA_LOG_SIZE));
- iommu_free_pages(iommu->ga_log_tail, get_order(8));
+ iommu_free_pages(iommu->ga_log);
+ iommu_free_pages(iommu->ga_log_tail);
#endif
}
@@ -925,11 +919,11 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
return 0;
- iommu->ga_log = iommu_alloc_pages(GFP_KERNEL, get_order(GA_LOG_SIZE));
+ iommu->ga_log = iommu_alloc_pages_sz(GFP_KERNEL, GA_LOG_SIZE);
if (!iommu->ga_log)
goto err_out;
- iommu->ga_log_tail = iommu_alloc_pages(GFP_KERNEL, get_order(8));
+ iommu->ga_log_tail = iommu_alloc_pages_sz(GFP_KERNEL, 8);
if (!iommu->ga_log_tail)
goto err_out;
@@ -950,7 +944,7 @@ static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
static void __init free_cwwb_sem(struct amd_iommu *iommu)
{
if (iommu->cmd_sem)
- iommu_free_page((void *)iommu->cmd_sem);
+ iommu_free_pages((void *)iommu->cmd_sem);
}
static void iommu_enable_xt(struct amd_iommu *iommu)
@@ -1024,8 +1018,8 @@ static bool __copy_device_table(struct amd_iommu *iommu)
if (!old_devtb)
return false;
- pci_seg->old_dev_tbl_cpy = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(pci_seg->dev_table_size));
+ pci_seg->old_dev_tbl_cpy = iommu_alloc_pages_sz(
+ GFP_KERNEL | GFP_DMA32, pci_seg->dev_table_size);
if (pci_seg->old_dev_tbl_cpy == NULL) {
pr_err("Failed to allocate memory for copying old device table!\n");
memunmap(old_devtb);
@@ -1599,9 +1593,9 @@ static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
pci_seg->last_bdf = last_bdf;
DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf);
- pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf);
- pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf);
- pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf);
+ pci_seg->dev_table_size =
+ max(roundup_pow_of_two((last_bdf + 1) * DEV_TABLE_ENTRY_SIZE),
+ SZ_4K);
pci_seg->id = id;
init_llist_head(&pci_seg->dev_data_list);
@@ -2030,9 +2024,6 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (!iommu->dev)
return -ENODEV;
- /* Prevent binding other PCI device drivers to IOMMU devices */
- iommu->dev->match_driver = false;
-
/* ACPI _PRT won't have an IRQ for IOMMU */
iommu->dev->irq_managed = 1;
@@ -2789,8 +2780,7 @@ static void early_enable_iommus(void)
for_each_pci_segment(pci_seg) {
if (pci_seg->old_dev_tbl_cpy != NULL) {
- iommu_free_pages(pci_seg->old_dev_tbl_cpy,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->old_dev_tbl_cpy);
pci_seg->old_dev_tbl_cpy = NULL;
}
}
@@ -2803,8 +2793,7 @@ static void early_enable_iommus(void)
pr_info("Copied DEV table from previous kernel.\n");
for_each_pci_segment(pci_seg) {
- iommu_free_pages(pci_seg->dev_table,
- get_order(pci_seg->dev_table_size));
+ iommu_free_pages(pci_seg->dev_table);
pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
}
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 26cf562dde11..4d308c071134 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -47,14 +47,7 @@ static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
return fpte;
}
-static void free_pt_page(u64 *pt, struct list_head *freelist)
-{
- struct page *p = virt_to_page(pt);
-
- list_add_tail(&p->lru, freelist);
-}
-
-static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
+static void free_pt_lvl(u64 *pt, struct iommu_pages_list *freelist, int lvl)
{
u64 *p;
int i;
@@ -77,20 +70,20 @@ static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
if (lvl > 2)
free_pt_lvl(p, freelist, lvl - 1);
else
- free_pt_page(p, freelist);
+ iommu_pages_list_add(freelist, p);
}
- free_pt_page(pt, freelist);
+ iommu_pages_list_add(freelist, pt);
}
-static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
+static void free_sub_pt(u64 *root, int mode, struct iommu_pages_list *freelist)
{
switch (mode) {
case PAGE_MODE_NONE:
case PAGE_MODE_7_LEVEL:
break;
case PAGE_MODE_1_LEVEL:
- free_pt_page(root, freelist);
+ iommu_pages_list_add(freelist, root);
break;
case PAGE_MODE_2_LEVEL:
case PAGE_MODE_3_LEVEL:
@@ -121,7 +114,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
bool ret = true;
u64 *pte;
- pte = iommu_alloc_page_node(cfg->amd.nid, gfp);
+ pte = iommu_alloc_pages_node_sz(cfg->amd.nid, gfp, SZ_4K);
if (!pte)
return false;
@@ -146,7 +139,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
out:
spin_unlock_irqrestore(&domain->lock, flags);
- iommu_free_page(pte);
+ iommu_free_pages(pte);
return ret;
}
@@ -213,7 +206,8 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
if (!IOMMU_PTE_PRESENT(__pte) ||
pte_level == PAGE_MODE_NONE) {
- page = iommu_alloc_page_node(cfg->amd.nid, gfp);
+ page = iommu_alloc_pages_node_sz(cfg->amd.nid, gfp,
+ SZ_4K);
if (!page)
return NULL;
@@ -222,7 +216,7 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
/* pte could have been changed somewhere. */
if (!try_cmpxchg64(pte, &__pte, __npte))
- iommu_free_page(page);
+ iommu_free_pages(page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
@@ -299,7 +293,8 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
return pte;
}
-static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
+static void free_clear_pte(u64 *pte, u64 pteval,
+ struct iommu_pages_list *freelist)
{
u64 *pt;
int mode;
@@ -328,7 +323,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
int prot, gfp_t gfp, size_t *mapped)
{
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
bool updated = false;
u64 __pte, *pte;
int ret, i, count;
@@ -353,7 +348,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
for (i = 0; i < count; ++i)
free_clear_pte(&pte[i], pte[i], &freelist);
- if (!list_empty(&freelist))
+ if (!iommu_pages_list_empty(&freelist))
updated = true;
if (count > 1) {
@@ -524,7 +519,7 @@ static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops,
static void v1_free_pgtable(struct io_pgtable *iop)
{
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
if (pgtable->mode == PAGE_MODE_NONE)
return;
@@ -541,7 +536,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
- pgtable->root = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
+ pgtable->root =
+ iommu_alloc_pages_node_sz(cfg->amd.nid, GFP_KERNEL, SZ_4K);
if (!pgtable->root)
return NULL;
pgtable->mode = PAGE_MODE_3_LEVEL;
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index a56a27396305..b47941353ccb 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -121,10 +121,10 @@ static void free_pgtable(u64 *pt, int level)
if (level > 2)
free_pgtable(p, level - 1);
else
- iommu_free_page(p);
+ iommu_free_pages(p);
}
- iommu_free_page(pt);
+ iommu_free_pages(pt);
}
/* Allocate page table */
@@ -152,14 +152,14 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
}
if (!IOMMU_PTE_PRESENT(__pte)) {
- page = iommu_alloc_page_node(nid, gfp);
+ page = iommu_alloc_pages_node_sz(nid, gfp, SZ_4K);
if (!page)
return NULL;
__npte = set_pgtable_attr(page);
/* pte could have been changed somewhere. */
if (!try_cmpxchg64(pte, &__pte, __npte))
- iommu_free_page(page);
+ iommu_free_pages(page);
else if (IOMMU_PTE_PRESENT(__pte))
*updated = true;
@@ -181,7 +181,7 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
if (pg_size == IOMMU_PAGE_SIZE_1G)
free_pgtable(__pte, end_level - 1);
else if (pg_size == IOMMU_PAGE_SIZE_2M)
- iommu_free_page(__pte);
+ iommu_free_pages(__pte);
}
return pte;
@@ -346,7 +346,7 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
int ias = IOMMU_IN_ADDR_BIT_SIZE;
- pgtable->pgd = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
+ pgtable->pgd = iommu_alloc_pages_node_sz(cfg->amd.nid, GFP_KERNEL, SZ_4K);
if (!pgtable->pgd)
return NULL;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index f34209b08b4c..3117d99cf83d 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -241,7 +241,9 @@ static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
- struct acpihid_map_entry *p;
+ struct acpihid_map_entry *p, *p1 = NULL;
+ int hid_count = 0;
+ bool fw_bug;
if (!adev)
return -ENODEV;
@@ -249,12 +251,33 @@ static inline int get_acpihid_device_id(struct device *dev,
list_for_each_entry(p, &acpihid_map, list) {
if (acpi_dev_hid_uid_match(adev, p->hid,
p->uid[0] ? p->uid : NULL)) {
- if (entry)
- *entry = p;
- return p->devid;
+ p1 = p;
+ fw_bug = false;
+ hid_count = 1;
+ break;
+ }
+
+ /*
+ * Count HID matches w/o UID, raise FW_BUG but allow exactly one match
+ */
+ if (acpi_dev_hid_match(adev, p->hid)) {
+ p1 = p;
+ hid_count++;
+ fw_bug = true;
}
}
- return -EINVAL;
+
+ if (!p1)
+ return -EINVAL;
+ if (fw_bug)
+ dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n",
+ hid_count, hid_count > 1 ? "s" : "");
+ if (hid_count > 1)
+ return -EINVAL;
+ if (entry)
+ *entry = p1;
+
+ return p1->devid;
}
static inline int get_device_sbdf_id(struct device *dev)
@@ -982,6 +1005,14 @@ int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
{
iommu_ga_log_notifier = notifier;
+ /*
+ * Ensure all in-flight IRQ handlers run to completion before returning
+ * to the caller, e.g. to ensure module code isn't unloaded while it's
+ * being executed in the IRQ handler.
+ */
+ if (!notifier)
+ synchronize_rcu();
+
return 0;
}
EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
@@ -1812,7 +1843,7 @@ static void free_gcr3_tbl_level1(u64 *tbl)
ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
- iommu_free_page(ptr);
+ iommu_free_pages(ptr);
}
}
@@ -1845,7 +1876,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
/* Free per device domain ID */
pdom_id_free(gcr3_info->domid);
- iommu_free_page(gcr3_info->gcr3_tbl);
+ iommu_free_pages(gcr3_info->gcr3_tbl);
gcr3_info->gcr3_tbl = NULL;
}
@@ -1884,7 +1915,7 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
return -ENOSPC;
gcr3_info->domid = domid;
- gcr3_info->gcr3_tbl = iommu_alloc_page_node(nid, GFP_ATOMIC);
+ gcr3_info->gcr3_tbl = iommu_alloc_pages_node_sz(nid, GFP_ATOMIC, SZ_4K);
if (gcr3_info->gcr3_tbl == NULL) {
pdom_id_free(domid);
return -ENOMEM;
@@ -2908,6 +2939,9 @@ static void amd_iommu_get_resv_regions(struct device *dev,
return;
list_add_tail(&region->list, head);
+ if (amd_iommu_ht_range_ignore())
+ return;
+
region = iommu_alloc_resv_region(HT_RANGE_START,
HT_RANGE_END - HT_RANGE_START + 1,
0, IOMMU_RESV_RESERVED, GFP_KERNEL);
@@ -2984,38 +3018,6 @@ static const struct iommu_dirty_ops amd_dirty_ops = {
.read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
};
-static int amd_iommu_dev_enable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- int ret = 0;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- case IOMMU_DEV_FEAT_SVA:
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int amd_iommu_dev_disable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- int ret = 0;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- case IOMMU_DEV_FEAT_SVA:
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.blocked_domain = &blocked_domain,
@@ -3029,8 +3031,6 @@ const struct iommu_ops amd_iommu_ops = {
.get_resv_regions = amd_iommu_get_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.def_domain_type = amd_iommu_def_domain_type,
- .dev_enable_feat = amd_iommu_dev_enable_feature,
- .dev_disable_feat = amd_iommu_dev_disable_feature,
.page_response = amd_iommu_page_response,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
@@ -3129,7 +3129,7 @@ static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
return table;
}
-static struct irq_remap_table *__alloc_irq_table(int nid, int order)
+static struct irq_remap_table *__alloc_irq_table(int nid, size_t size)
{
struct irq_remap_table *table;
@@ -3137,7 +3137,8 @@ static struct irq_remap_table *__alloc_irq_table(int nid, int order)
if (!table)
return NULL;
- table->table = iommu_alloc_pages_node(nid, GFP_KERNEL, order);
+ table->table = iommu_alloc_pages_node_sz(
+ nid, GFP_KERNEL, max(DTE_INTTAB_ALIGNMENT, size));
if (!table->table) {
kfree(table);
return NULL;
@@ -3191,7 +3192,6 @@ static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
struct irq_remap_table *new_table = NULL;
struct amd_iommu_pci_seg *pci_seg;
unsigned long flags;
- int order = get_order(get_irq_table_size(max_irqs));
int nid = iommu && iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
u16 alias;
@@ -3211,7 +3211,7 @@ static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
spin_unlock_irqrestore(&iommu_table_lock, flags);
/* Nothing there yet, allocate new irq remapping table */
- new_table = __alloc_irq_table(nid, order);
+ new_table = __alloc_irq_table(nid, get_irq_table_size(max_irqs));
if (!new_table)
return NULL;
@@ -3246,7 +3246,7 @@ out_unlock:
spin_unlock_irqrestore(&iommu_table_lock, flags);
if (new_table) {
- iommu_free_pages(new_table->table, order);
+ iommu_free_pages(new_table->table);
kfree(new_table);
}
return table;
diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c
index 7c67d69f0b8c..e6767c057d01 100644
--- a/drivers/iommu/amd/ppr.c
+++ b/drivers/iommu/amd/ppr.c
@@ -48,7 +48,7 @@ void amd_iommu_enable_ppr_log(struct amd_iommu *iommu)
void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu)
{
- iommu_free_pages(iommu->ppr_log, get_order(PPR_LOG_SIZE));
+ iommu_free_pages(iommu->ppr_log);
}
/*
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index e13501541fdd..757d24f67ad4 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -776,8 +776,7 @@ static void apple_dart_domain_free(struct iommu_domain *domain)
{
struct apple_dart_domain *dart_domain = to_dart_domain(domain);
- if (dart_domain->pgtbl_ops)
- free_io_pgtable_ops(dart_domain->pgtbl_ops);
+ free_io_pgtable_ops(dart_domain->pgtbl_ops);
kfree(dart_domain);
}
diff --git a/drivers/iommu/arm/Kconfig b/drivers/iommu/arm/Kconfig
new file mode 100644
index 000000000000..ef42bbe07dbe
--- /dev/null
+++ b/drivers/iommu/arm/Kconfig
@@ -0,0 +1,144 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# ARM IOMMU support
+config ARM_SMMU
+ tristate "ARM Ltd. System MMU (SMMU) Support"
+ depends on ARM64 || ARM || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ select ARM_DMA_USE_IOMMU if ARM
+ help
+ Support for implementations of the ARM System MMU architecture
+ versions 1 and 2.
+
+ Say Y here if your SoC includes an IOMMU device implementing
+ the ARM SMMU architecture.
+
+if ARM_SMMU
+config ARM_SMMU_LEGACY_DT_BINDINGS
+ bool "Support the legacy \"mmu-masters\" devicetree bindings"
+ depends on ARM_SMMU=y && OF
+ help
+ Support for the badly designed and deprecated "mmu-masters"
+ devicetree bindings. This allows some DMA masters to attach
+ to the SMMU but does not provide any support via the DMA API.
+ If you're lucky, you might be able to get VFIO up and running.
+
+ If you say Y here then you'll make me very sad. Instead, say N
+ and move your firmware to the utopian future that was 2016.
+
+config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
+ bool "Disable unmatched stream bypass by default" if EXPERT
+ default y
+ help
+ If your firmware is broken and fails to describe StreamIDs which
+ Linux should know about in order to manage the SMMU correctly and
+ securely, and you don't want to boot with the 'arm-smmu.disable_bypass=0'
+ command line parameter, then as a last resort you can turn it off
+ by default here. But don't. This option may be removed at any time.
+
+ Note that 'arm-smmu.disable_bypass=1' will still take precedence.
+
+config ARM_SMMU_MMU_500_CPRE_ERRATA
+ bool "Enable errata workaround for CPRE in SMMU reset path"
+ default y
+ help
+ Say Y here (by default) to apply workaround to disable
+ MMU-500's next-page prefetcher for sake of 4 known errata.
+
+ Say N here only when it is sure that any errata related to
+ prefetch enablement are not applicable on the platform.
+ Refer silicon-errata.rst for info on errata IDs.
+
+config ARM_SMMU_QCOM
+ def_tristate y
+ depends on ARCH_QCOM
+ select QCOM_SCM
+ help
+ When running on a Qualcomm platform that has the custom variant
+ of the ARM SMMU, this needs to be built into the SMMU driver.
+
+config ARM_SMMU_QCOM_DEBUG
+ bool "ARM SMMU QCOM implementation defined debug support"
+ depends on ARM_SMMU_QCOM=y
+ help
+ Support for implementation specific debug features in ARM SMMU
+ hardware found in QTI platforms. This include support for
+ the Translation Buffer Units (TBU) that can be used to obtain
+ additional information when debugging memory management issues
+ like context faults.
+
+ Say Y here to enable debug for issues such as context faults
+ or TLB sync timeouts which requires implementation defined
+ register dumps.
+endif
+
+config ARM_SMMU_V3
+ tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
+ depends on ARM64
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ select GENERIC_MSI_IRQ
+ select IOMMUFD_DRIVER if IOMMUFD
+ help
+ Support for implementations of the ARM System MMU architecture
+ version 3 providing translation support to a PCIe root complex.
+
+ Say Y here if your system includes an IOMMU device implementing
+ the ARM SMMUv3 architecture.
+
+if ARM_SMMU_V3
+config ARM_SMMU_V3_SVA
+ bool "Shared Virtual Addressing support for the ARM SMMUv3"
+ select IOMMU_SVA
+ select IOMMU_IOPF
+ select MMU_NOTIFIER
+ help
+ Support for sharing process address spaces with devices using the
+ SMMUv3.
+
+ Say Y here if your system supports SVA extensions such as PCIe PASID
+ and PRI.
+
+config ARM_SMMU_V3_IOMMUFD
+ bool "Enable IOMMUFD features for ARM SMMUv3 (EXPERIMENTAL)"
+ depends on IOMMUFD
+ help
+ Support for IOMMUFD features intended to support virtual machines
+ with accelerated virtual IOMMUs.
+
+ Say Y here if you are doing development and testing on this feature.
+
+config ARM_SMMU_V3_KUNIT_TEST
+ tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on ARM_SMMU_V3_SVA
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to unit-test arm-smmu-v3 driver functions.
+
+ If unsure, say N.
+
+config TEGRA241_CMDQV
+ bool "NVIDIA Tegra241 CMDQ-V extension support for ARM SMMUv3"
+ depends on ACPI
+ help
+ Support for NVIDIA CMDQ-Virtualization extension for ARM SMMUv3. The
+ CMDQ-V extension is similar to v3.3 ECMDQ for multi command queues
+ support, except with virtualization capabilities.
+
+ Say Y here if your system is NVIDIA Tegra241 (Grace) or it has the same
+ CMDQ-V extension.
+endif
+
+config QCOM_IOMMU
+ # Note: iommu drivers cannot (yet?) be built as modules
+ bool "Qualcomm IOMMU Support"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ select QCOM_SCM
+ select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
+ select ARM_DMA_USE_IOMMU
+ help
+ Support for IOMMU on certain Qualcomm SoCs.
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 980cc6b33c43..0601dece0a0d 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -13,8 +13,6 @@
#include "arm-smmu-v3.h"
#include "../../io-pgtable-arm.h"
-static DEFINE_MUTEX(sva_lock);
-
static void __maybe_unused
arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain)
{
@@ -257,84 +255,6 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
return true;
}
-bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
-{
- /* We're not keeping track of SIDs in fault events */
- if (master->num_streams != 1)
- return false;
-
- return master->stall_enabled;
-}
-
-bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
-{
- if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
- return false;
-
- /* SSID support is mandatory for the moment */
- return master->ssid_bits;
-}
-
-bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
-{
- bool enabled;
-
- mutex_lock(&sva_lock);
- enabled = master->sva_enabled;
- mutex_unlock(&sva_lock);
- return enabled;
-}
-
-static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
-{
- struct device *dev = master->dev;
-
- /*
- * Drivers for devices supporting PRI or stall should enable IOPF first.
- * Others have device-specific fault handlers and don't need IOPF.
- */
- if (!arm_smmu_master_iopf_supported(master))
- return 0;
-
- if (!master->iopf_enabled)
- return -EINVAL;
-
- return iopf_queue_add_device(master->smmu->evtq.iopf, dev);
-}
-
-static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
-{
- struct device *dev = master->dev;
-
- if (!master->iopf_enabled)
- return;
-
- iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
-}
-
-int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
-{
- int ret;
-
- mutex_lock(&sva_lock);
- ret = arm_smmu_master_sva_enable_iopf(master);
- if (!ret)
- master->sva_enabled = true;
- mutex_unlock(&sva_lock);
-
- return ret;
-}
-
-int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
-{
- mutex_lock(&sva_lock);
- arm_smmu_master_sva_disable_iopf(master);
- master->sva_enabled = false;
- mutex_unlock(&sva_lock);
-
- return 0;
-}
-
void arm_smmu_sva_notifier_synchronize(void)
{
/*
@@ -353,6 +273,9 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
struct arm_smmu_cd target;
int ret;
+ if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
+ return -EOPNOTSUPP;
+
/* Prevent arm_smmu_mm_release from being called while we are attaching */
if (!mmget_not_zero(domain->mm))
return -EINVAL;
@@ -406,6 +329,9 @@ struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
u32 asid;
int ret;
+ if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
+ return ERR_PTR(-EOPNOTSUPP);
+
smmu_domain = arm_smmu_domain_alloc();
if (IS_ERR(smmu_domain))
return ERR_CAST(smmu_domain);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 48d910399a1b..10cc6dc26b7b 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2720,6 +2720,7 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
static struct arm_smmu_master_domain *
arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain,
+ struct iommu_domain *domain,
struct arm_smmu_master *master,
ioasid_t ssid, bool nested_ats_flush)
{
@@ -2730,6 +2731,7 @@ arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain,
list_for_each_entry(master_domain, &smmu_domain->devices,
devices_elm) {
if (master_domain->master == master &&
+ master_domain->domain == domain &&
master_domain->ssid == ssid &&
master_domain->nested_ats_flush == nested_ats_flush)
return master_domain;
@@ -2756,6 +2758,58 @@ to_smmu_domain_devices(struct iommu_domain *domain)
return NULL;
}
+static int arm_smmu_enable_iopf(struct arm_smmu_master *master,
+ struct arm_smmu_master_domain *master_domain)
+{
+ int ret;
+
+ iommu_group_mutex_assert(master->dev);
+
+ if (!IS_ENABLED(CONFIG_ARM_SMMU_V3_SVA))
+ return -EOPNOTSUPP;
+
+ /*
+ * Drivers for devices supporting PRI or stall require iopf others have
+ * device-specific fault handlers and don't need IOPF, so this is not a
+ * failure.
+ */
+ if (!master->stall_enabled)
+ return 0;
+
+ /* We're not keeping track of SIDs in fault events */
+ if (master->num_streams != 1)
+ return -EOPNOTSUPP;
+
+ if (master->iopf_refcount) {
+ master->iopf_refcount++;
+ master_domain->using_iopf = true;
+ return 0;
+ }
+
+ ret = iopf_queue_add_device(master->smmu->evtq.iopf, master->dev);
+ if (ret)
+ return ret;
+ master->iopf_refcount = 1;
+ master_domain->using_iopf = true;
+ return 0;
+}
+
+static void arm_smmu_disable_iopf(struct arm_smmu_master *master,
+ struct arm_smmu_master_domain *master_domain)
+{
+ iommu_group_mutex_assert(master->dev);
+
+ if (!IS_ENABLED(CONFIG_ARM_SMMU_V3_SVA))
+ return;
+
+ if (!master_domain || !master_domain->using_iopf)
+ return;
+
+ master->iopf_refcount--;
+ if (master->iopf_refcount == 0)
+ iopf_queue_remove_device(master->smmu->evtq.iopf, master->dev);
+}
+
static void arm_smmu_remove_master_domain(struct arm_smmu_master *master,
struct iommu_domain *domain,
ioasid_t ssid)
@@ -2772,15 +2826,17 @@ static void arm_smmu_remove_master_domain(struct arm_smmu_master *master,
nested_ats_flush = to_smmu_nested_domain(domain)->enable_ats;
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- master_domain = arm_smmu_find_master_domain(smmu_domain, master, ssid,
- nested_ats_flush);
+ master_domain = arm_smmu_find_master_domain(smmu_domain, domain, master,
+ ssid, nested_ats_flush);
if (master_domain) {
list_del(&master_domain->devices_elm);
- kfree(master_domain);
if (master->ats_enabled)
atomic_dec(&smmu_domain->nr_ats_masters);
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+ arm_smmu_disable_iopf(master, master_domain);
+ kfree(master_domain);
}
/*
@@ -2853,12 +2909,19 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
kfree(state->vmaster);
return -ENOMEM;
}
+ master_domain->domain = new_domain;
master_domain->master = master;
master_domain->ssid = state->ssid;
if (new_domain->type == IOMMU_DOMAIN_NESTED)
master_domain->nested_ats_flush =
to_smmu_nested_domain(new_domain)->enable_ats;
+ if (new_domain->iopf_handler) {
+ ret = arm_smmu_enable_iopf(master, master_domain);
+ if (ret)
+ goto err_free_master_domain;
+ }
+
/*
* During prepare we want the current smmu_domain and new
* smmu_domain to be in the devices list before we change any
@@ -2878,9 +2941,9 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
!arm_smmu_master_canwbs(master)) {
spin_unlock_irqrestore(&smmu_domain->devices_lock,
flags);
- kfree(master_domain);
kfree(state->vmaster);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_iopf;
}
if (state->ats_enabled)
@@ -2899,6 +2962,12 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
wmb();
}
return 0;
+
+err_iopf:
+ arm_smmu_disable_iopf(master, master_domain);
+err_free_master_domain:
+ kfree(master_domain);
+ return ret;
}
/*
@@ -2953,7 +3022,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
smmu = master->smmu;
if (smmu_domain->smmu != smmu)
- return ret;
+ return -EINVAL;
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
cdptr = arm_smmu_alloc_cd_ptr(master, IOMMU_NO_PASID);
@@ -3510,8 +3579,7 @@ static void arm_smmu_release_device(struct device *dev)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
- if (WARN_ON(arm_smmu_master_sva_enabled(master)))
- iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
+ WARN_ON(master->iopf_refcount);
/* Put the STE back to what arm_smmu_init_strtab() sets */
if (dev->iommu->require_direct)
@@ -3586,58 +3654,6 @@ static void arm_smmu_get_resv_regions(struct device *dev,
iommu_dma_get_resv_regions(dev, head);
}
-static int arm_smmu_dev_enable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return -ENODEV;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- if (!arm_smmu_master_iopf_supported(master))
- return -EINVAL;
- if (master->iopf_enabled)
- return -EBUSY;
- master->iopf_enabled = true;
- return 0;
- case IOMMU_DEV_FEAT_SVA:
- if (!arm_smmu_master_sva_supported(master))
- return -EINVAL;
- if (arm_smmu_master_sva_enabled(master))
- return -EBUSY;
- return arm_smmu_master_enable_sva(master);
- default:
- return -EINVAL;
- }
-}
-
-static int arm_smmu_dev_disable_feature(struct device *dev,
- enum iommu_dev_features feat)
-{
- struct arm_smmu_master *master = dev_iommu_priv_get(dev);
-
- if (!master)
- return -EINVAL;
-
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- if (!master->iopf_enabled)
- return -EINVAL;
- if (master->sva_enabled)
- return -EBUSY;
- master->iopf_enabled = false;
- return 0;
- case IOMMU_DEV_FEAT_SVA:
- if (!arm_smmu_master_sva_enabled(master))
- return -EINVAL;
- return arm_smmu_master_disable_sva(master);
- default:
- return -EINVAL;
- }
-}
-
/*
* HiSilicon PCIe tune and trace device can be used to trace TLP headers on the
* PCIe link and save the data to memory by DMA. The hardware is restricted to
@@ -3670,8 +3686,6 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
- .dev_enable_feat = arm_smmu_dev_enable_feature,
- .dev_disable_feat = arm_smmu_dev_disable_feature,
.page_response = arm_smmu_page_response,
.def_domain_type = arm_smmu_def_domain_type,
.viommu_alloc = arm_vsmmu_alloc,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index dd1ad56ce863..ea41d790463e 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -837,9 +837,8 @@ struct arm_smmu_master {
bool ats_enabled : 1;
bool ste_ats_enabled : 1;
bool stall_enabled;
- bool sva_enabled;
- bool iopf_enabled;
unsigned int ssid_bits;
+ unsigned int iopf_refcount;
};
/* SMMU private data for an IOMMU domain */
@@ -915,8 +914,14 @@ void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
struct arm_smmu_master_domain {
struct list_head devices_elm;
struct arm_smmu_master *master;
+ /*
+ * For nested domains the master_domain is threaded onto the S2 parent,
+ * this points to the IOMMU_DOMAIN_NESTED to disambiguate the masters.
+ */
+ struct iommu_domain *domain;
ioasid_t ssid;
bool nested_ats_flush : 1;
+ bool using_iopf : 1;
};
static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
@@ -995,11 +1000,6 @@ int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
#ifdef CONFIG_ARM_SMMU_V3_SVA
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
-bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);
-bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master);
-int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
-int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
-bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master);
void arm_smmu_sva_notifier_synchronize(void);
struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
struct mm_struct *mm);
@@ -1009,31 +1009,6 @@ static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
return false;
}
-static inline bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
-{
- return false;
-}
-
-static inline bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
-{
- return false;
-}
-
-static inline int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
-{
- return -ENODEV;
-}
-
-static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
-{
- return -ENODEV;
-}
-
-static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
-{
- return false;
-}
-
static inline void arm_smmu_sva_notifier_synchronize(void) {}
#define arm_smmu_sva_domain_alloc NULL
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
index d03b2239baad..65e0ef6539fe 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -406,6 +406,12 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
arm_smmu_print_context_fault_info(smmu, idx, &cfi);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
+
+ if (cfi.fsr & ARM_SMMU_CB_FSR_SS) {
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
+ ret == -EAGAIN ? 0 : ARM_SMMU_RESUME_TERMINATE);
+ }
+
return IRQ_HANDLED;
}
@@ -416,6 +422,9 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
if (!tmp || tmp == -EBUSY) {
ret = IRQ_HANDLED;
resume = ARM_SMMU_RESUME_TERMINATE;
+ } else if (tmp == -EAGAIN) {
+ ret = IRQ_HANDLED;
+ resume = 0;
} else {
phys_addr_t phys_atos = qcom_smmu_verify_fault(smmu_domain, cfi.iova, cfi.fsr);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 59d02687280e..62874b18f645 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -112,25 +112,39 @@ static void qcom_adreno_smmu_set_stall(const void *cookie, bool enabled)
{
struct arm_smmu_domain *smmu_domain = (void *)cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- struct qcom_smmu *qsmmu = to_qcom_smmu(smmu_domain->smmu);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ u32 mask = BIT(cfg->cbndx);
+ bool stall_changed = !!(qsmmu->stall_enabled & mask) != enabled;
+ unsigned long flags;
if (enabled)
- qsmmu->stall_enabled |= BIT(cfg->cbndx);
+ qsmmu->stall_enabled |= mask;
else
- qsmmu->stall_enabled &= ~BIT(cfg->cbndx);
-}
+ qsmmu->stall_enabled &= ~mask;
-static void qcom_adreno_smmu_resume_translation(const void *cookie, bool terminate)
-{
- struct arm_smmu_domain *smmu_domain = (void *)cookie;
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- u32 reg = 0;
+ /*
+ * If the device is on and we changed the setting, update the register.
+ * The spec pseudocode says that CFCFG is resampled after a fault, and
+ * we believe that no implementations cache it in the TLB, so it should
+ * be safe to change it without a TLB invalidation.
+ */
+ if (stall_changed && pm_runtime_get_if_active(smmu->dev) > 0) {
+ u32 reg;
+
+ spin_lock_irqsave(&smmu_domain->cb_lock, flags);
+ reg = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_SCTLR);
+
+ if (enabled)
+ reg |= ARM_SMMU_SCTLR_CFCFG;
+ else
+ reg &= ~ARM_SMMU_SCTLR_CFCFG;
- if (terminate)
- reg |= ARM_SMMU_RESUME_TERMINATE;
+ arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_SCTLR, reg);
+ spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
- arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg);
+ pm_runtime_put_autosuspend(smmu->dev);
+ }
}
static void qcom_adreno_smmu_set_prr_bit(const void *cookie, bool set)
@@ -337,7 +351,6 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg;
priv->get_fault_info = qcom_adreno_smmu_get_fault_info;
priv->set_stall = qcom_adreno_smmu_set_stall;
- priv->resume_translation = qcom_adreno_smmu_resume_translation;
priv->set_prr_bit = NULL;
priv->set_prr_addr = NULL;
@@ -356,6 +369,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,mdp4" },
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,qcm2290-mdss" },
+ { .compatible = "qcom,sar2130p-mdss" },
{ .compatible = "qcom,sc7180-mdss" },
{ .compatible = "qcom,sc7180-mss-pil" },
{ .compatible = "qcom,sc7280-mdss" },
@@ -585,6 +599,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
.tlb_sync = qcom_smmu_tlb_sync,
+ .context_fault_needs_threaded_irq = true,
};
static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = {
@@ -594,6 +609,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = {
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
.tlb_sync = qcom_smmu_tlb_sync,
+ .context_fault_needs_threaded_irq = true,
};
static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 8f439c265a23..8d95b14c7d5a 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -474,6 +474,12 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
arm_smmu_print_context_fault_info(smmu, idx, &cfi);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
+
+ if (cfi.fsr & ARM_SMMU_CB_FSR_SS) {
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
+ ret == -EAGAIN ? 0 : ARM_SMMU_RESUME_TERMINATE);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index a775e4dbe06f..ea2ef53bd4fe 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -27,6 +27,7 @@
#include <linux/msi.h>
#include <linux/of_iommu.h>
#include <linux/pci.h>
+#include <linux/pci-p2pdma.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/swiotlb.h>
@@ -105,7 +106,7 @@ early_param("iommu.forcedac", iommu_dma_forcedac_setup);
struct iova_fq_entry {
unsigned long iova_pfn;
unsigned long pages;
- struct list_head freelist;
+ struct iommu_pages_list freelist;
u64 counter; /* Flush counter when this entry was added */
};
@@ -154,6 +155,8 @@ static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq
fq->entries[idx].iova_pfn,
fq->entries[idx].pages);
+ fq->entries[idx].freelist =
+ IOMMU_PAGES_LIST_INIT(fq->entries[idx].freelist);
fq->head = (fq->head + 1) & fq->mod_mask;
}
}
@@ -176,7 +179,8 @@ static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
static void fq_flush_timeout(struct timer_list *t)
{
- struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer);
+ struct iommu_dma_cookie *cookie = timer_container_of(cookie, t,
+ fq_timer);
int cpu;
atomic_set(&cookie->fq_timer_on, 0);
@@ -192,7 +196,7 @@ static void fq_flush_timeout(struct timer_list *t)
static void queue_iova(struct iommu_dma_cookie *cookie,
unsigned long pfn, unsigned long pages,
- struct list_head *freelist)
+ struct iommu_pages_list *freelist)
{
struct iova_fq *fq;
unsigned long flags;
@@ -231,7 +235,7 @@ static void queue_iova(struct iommu_dma_cookie *cookie,
fq->entries[idx].iova_pfn = pfn;
fq->entries[idx].pages = pages;
fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt);
- list_splice(freelist, &fq->entries[idx].freelist);
+ iommu_pages_list_splice(freelist, &fq->entries[idx].freelist);
spin_unlock_irqrestore(&fq->lock, flags);
@@ -289,7 +293,8 @@ static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
spin_lock_init(&fq->lock);
for (i = 0; i < fq_size; i++)
- INIT_LIST_HEAD(&fq->entries[i].freelist);
+ fq->entries[i].freelist =
+ IOMMU_PAGES_LIST_INIT(fq->entries[i].freelist);
}
static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
@@ -1137,6 +1142,54 @@ void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
+static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iova_domain *iovad = &domain->iova_cookie->iovad;
+
+ if (!is_swiotlb_active(dev)) {
+ dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
+ return (phys_addr_t)DMA_MAPPING_ERROR;
+ }
+
+ trace_swiotlb_bounced(dev, phys, size);
+
+ phys = swiotlb_tbl_map_single(dev, phys, size, iova_mask(iovad), dir,
+ attrs);
+
+ /*
+ * Untrusted devices should not see padding areas with random leftover
+ * kernel data, so zero the pre- and post-padding.
+ * swiotlb_tbl_map_single() has initialized the bounce buffer proper to
+ * the contents of the original memory buffer.
+ */
+ if (phys != (phys_addr_t)DMA_MAPPING_ERROR && dev_is_untrusted(dev)) {
+ size_t start, virt = (size_t)phys_to_virt(phys);
+
+ /* Pre-padding */
+ start = iova_align_down(iovad, virt);
+ memset((void *)start, 0, virt - start);
+
+ /* Post-padding */
+ start = virt + size;
+ memset((void *)start, 0, iova_align(iovad, start) - start);
+ }
+
+ return phys;
+}
+
+/*
+ * Checks if a physical buffer has unaligned boundaries with respect to
+ * the IOMMU granule. Returns non-zero if either the start or end
+ * address is not aligned to the granule boundary.
+ */
+static inline size_t iova_unaligned(struct iova_domain *iovad, phys_addr_t phys,
+ size_t size)
+{
+ return iova_offset(iovad, phys | size);
+}
+
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
@@ -1150,42 +1203,14 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
dma_addr_t iova, dma_mask = dma_get_mask(dev);
/*
- * If both the physical buffer start address and size are
- * page aligned, we don't need to use a bounce page.
+ * If both the physical buffer start address and size are page aligned,
+ * we don't need to use a bounce page.
*/
if (dev_use_swiotlb(dev, size, dir) &&
- iova_offset(iovad, phys | size)) {
- if (!is_swiotlb_active(dev)) {
- dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
+ iova_unaligned(iovad, phys, size)) {
+ phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs);
+ if (phys == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
- }
-
- trace_swiotlb_bounced(dev, phys, size);
-
- phys = swiotlb_tbl_map_single(dev, phys, size,
- iova_mask(iovad), dir, attrs);
-
- if (phys == DMA_MAPPING_ERROR)
- return DMA_MAPPING_ERROR;
-
- /*
- * Untrusted devices should not see padding areas with random
- * leftover kernel data, so zero the pre- and post-padding.
- * swiotlb_tbl_map_single() has initialized the bounce buffer
- * proper to the contents of the original memory buffer.
- */
- if (dev_is_untrusted(dev)) {
- size_t start, virt = (size_t)phys_to_virt(phys);
-
- /* Pre-padding */
- start = iova_align_down(iovad, virt);
- memset((void *)start, 0, virt - start);
-
- /* Post-padding */
- start = virt + size;
- memset((void *)start, 0,
- iova_align(iovad, start) - start);
- }
}
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
@@ -1359,7 +1384,6 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
struct scatterlist *s, *prev = NULL;
int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
struct pci_p2pdma_map_state p2pdma_state = {};
- enum pci_p2pdma_map_type map;
dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
@@ -1389,28 +1413,30 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
size_t s_length = s->length;
size_t pad_len = (mask - iova_len + 1) & mask;
- if (is_pci_p2pdma_page(sg_page(s))) {
- map = pci_p2pdma_map_segment(&p2pdma_state, dev, s);
- switch (map) {
- case PCI_P2PDMA_MAP_BUS_ADDR:
- /*
- * iommu_map_sg() will skip this segment as
- * it is marked as a bus address,
- * __finalise_sg() will copy the dma address
- * into the output segment.
- */
- continue;
- case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
- /*
- * Mapping through host bridge should be
- * mapped with regular IOVAs, thus we
- * do nothing here and continue below.
- */
- break;
- default:
- ret = -EREMOTEIO;
- goto out_restore_sg;
- }
+ switch (pci_p2pdma_state(&p2pdma_state, dev, sg_page(s))) {
+ case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ /*
+ * Mapping through host bridge should be mapped with
+ * regular IOVAs, thus we do nothing here and continue
+ * below.
+ */
+ break;
+ case PCI_P2PDMA_MAP_NONE:
+ break;
+ case PCI_P2PDMA_MAP_BUS_ADDR:
+ /*
+ * iommu_map_sg() will skip this segment as it is marked
+ * as a bus address, __finalise_sg() will copy the dma
+ * address into the output segment.
+ */
+ s->dma_address = pci_p2pdma_bus_addr_map(&p2pdma_state,
+ sg_phys(s));
+ sg_dma_len(s) = sg->length;
+ sg_dma_mark_bus_address(s);
+ continue;
+ default:
+ ret = -EREMOTEIO;
+ goto out_restore_sg;
}
sg_dma_address(s) = s_iova_off;
@@ -1721,6 +1747,354 @@ size_t iommu_dma_max_mapping_size(struct device *dev)
return SIZE_MAX;
}
+/**
+ * dma_iova_try_alloc - Try to allocate an IOVA space
+ * @dev: Device to allocate the IOVA space for
+ * @state: IOVA state
+ * @phys: physical address
+ * @size: IOVA size
+ *
+ * Check if @dev supports the IOVA-based DMA API, and if yes allocate IOVA space
+ * for the given base address and size.
+ *
+ * Note: @phys is only used to calculate the IOVA alignment. Callers that always
+ * do PAGE_SIZE aligned transfers can safely pass 0 here.
+ *
+ * Returns %true if the IOVA-based DMA API can be used and IOVA space has been
+ * allocated, or %false if the regular DMA API should be used.
+ */
+bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t size)
+{
+ struct iommu_dma_cookie *cookie;
+ struct iommu_domain *domain;
+ struct iova_domain *iovad;
+ size_t iova_off;
+ dma_addr_t addr;
+
+ memset(state, 0, sizeof(*state));
+ if (!use_dma_iommu(dev))
+ return false;
+
+ domain = iommu_get_dma_domain(dev);
+ cookie = domain->iova_cookie;
+ iovad = &cookie->iovad;
+ iova_off = iova_offset(iovad, phys);
+
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
+ iommu_deferred_attach(dev, iommu_get_domain_for_dev(dev)))
+ return false;
+
+ if (WARN_ON_ONCE(!size))
+ return false;
+
+ /*
+ * DMA_IOVA_USE_SWIOTLB is flag which is set by dma-iommu
+ * internals, make sure that caller didn't set it and/or
+ * didn't use this interface to map SIZE_MAX.
+ */
+ if (WARN_ON_ONCE((u64)size & DMA_IOVA_USE_SWIOTLB))
+ return false;
+
+ addr = iommu_dma_alloc_iova(domain,
+ iova_align(iovad, size + iova_off),
+ dma_get_mask(dev), dev);
+ if (!addr)
+ return false;
+
+ state->addr = addr + iova_off;
+ state->__size = size;
+ return true;
+}
+EXPORT_SYMBOL_GPL(dma_iova_try_alloc);
+
+/**
+ * dma_iova_free - Free an IOVA space
+ * @dev: Device to free the IOVA space for
+ * @state: IOVA state
+ *
+ * Undoes a successful dma_try_iova_alloc().
+ *
+ * Note that all dma_iova_link() calls need to be undone first. For callers
+ * that never call dma_iova_unlink(), dma_iova_destroy() can be used instead
+ * which unlinks all ranges and frees the IOVA space in a single efficient
+ * operation.
+ */
+void dma_iova_free(struct device *dev, struct dma_iova_state *state)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_start_pad = iova_offset(iovad, state->addr);
+ size_t size = dma_iova_size(state);
+
+ iommu_dma_free_iova(domain, state->addr - iova_start_pad,
+ iova_align(iovad, size + iova_start_pad), NULL);
+}
+EXPORT_SYMBOL_GPL(dma_iova_free);
+
+static int __dma_iova_link(struct device *dev, dma_addr_t addr,
+ phys_addr_t phys, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ bool coherent = dev_is_dma_coherent(dev);
+
+ if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(phys, size, dir);
+
+ return iommu_map_nosync(iommu_get_dma_domain(dev), addr, phys, size,
+ dma_info_to_prot(dir, coherent, attrs), GFP_ATOMIC);
+}
+
+static int iommu_dma_iova_bounce_and_link(struct device *dev, dma_addr_t addr,
+ phys_addr_t phys, size_t bounce_len,
+ enum dma_data_direction dir, unsigned long attrs,
+ size_t iova_start_pad)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iova_domain *iovad = &domain->iova_cookie->iovad;
+ phys_addr_t bounce_phys;
+ int error;
+
+ bounce_phys = iommu_dma_map_swiotlb(dev, phys, bounce_len, dir, attrs);
+ if (bounce_phys == DMA_MAPPING_ERROR)
+ return -ENOMEM;
+
+ error = __dma_iova_link(dev, addr - iova_start_pad,
+ bounce_phys - iova_start_pad,
+ iova_align(iovad, bounce_len), dir, attrs);
+ if (error)
+ swiotlb_tbl_unmap_single(dev, bounce_phys, bounce_len, dir,
+ attrs);
+ return error;
+}
+
+static int iommu_dma_iova_link_swiotlb(struct device *dev,
+ struct dma_iova_state *state, phys_addr_t phys, size_t offset,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_start_pad = iova_offset(iovad, phys);
+ size_t iova_end_pad = iova_offset(iovad, phys + size);
+ dma_addr_t addr = state->addr + offset;
+ size_t mapped = 0;
+ int error;
+
+ if (iova_start_pad) {
+ size_t bounce_len = min(size, iovad->granule - iova_start_pad);
+
+ error = iommu_dma_iova_bounce_and_link(dev, addr, phys,
+ bounce_len, dir, attrs, iova_start_pad);
+ if (error)
+ return error;
+ state->__size |= DMA_IOVA_USE_SWIOTLB;
+
+ mapped += bounce_len;
+ size -= bounce_len;
+ if (!size)
+ return 0;
+ }
+
+ size -= iova_end_pad;
+ error = __dma_iova_link(dev, addr + mapped, phys + mapped, size, dir,
+ attrs);
+ if (error)
+ goto out_unmap;
+ mapped += size;
+
+ if (iova_end_pad) {
+ error = iommu_dma_iova_bounce_and_link(dev, addr + mapped,
+ phys + mapped, iova_end_pad, dir, attrs, 0);
+ if (error)
+ goto out_unmap;
+ state->__size |= DMA_IOVA_USE_SWIOTLB;
+ }
+
+ return 0;
+
+out_unmap:
+ dma_iova_unlink(dev, state, 0, mapped, dir, attrs);
+ return error;
+}
+
+/**
+ * dma_iova_link - Link a range of IOVA space
+ * @dev: DMA device
+ * @state: IOVA state
+ * @phys: physical address to link
+ * @offset: offset into the IOVA state to map into
+ * @size: size of the buffer
+ * @dir: DMA direction
+ * @attrs: attributes of mapping properties
+ *
+ * Link a range of IOVA space for the given IOVA state without IOTLB sync.
+ * This function is used to link multiple physical addresses in contiguous
+ * IOVA space without performing costly IOTLB sync.
+ *
+ * The caller is responsible to call to dma_iova_sync() to sync IOTLB at
+ * the end of linkage.
+ */
+int dma_iova_link(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_start_pad = iova_offset(iovad, phys);
+
+ if (WARN_ON_ONCE(iova_start_pad && offset > 0))
+ return -EIO;
+
+ if (dev_use_swiotlb(dev, size, dir) &&
+ iova_unaligned(iovad, phys, size))
+ return iommu_dma_iova_link_swiotlb(dev, state, phys, offset,
+ size, dir, attrs);
+
+ return __dma_iova_link(dev, state->addr + offset - iova_start_pad,
+ phys - iova_start_pad,
+ iova_align(iovad, size + iova_start_pad), dir, attrs);
+}
+EXPORT_SYMBOL_GPL(dma_iova_link);
+
+/**
+ * dma_iova_sync - Sync IOTLB
+ * @dev: DMA device
+ * @state: IOVA state
+ * @offset: offset into the IOVA state to sync
+ * @size: size of the buffer
+ *
+ * Sync IOTLB for the given IOVA state. This function should be called on
+ * the IOVA-contiguous range created by one ore more dma_iova_link() calls
+ * to sync the IOTLB.
+ */
+int dma_iova_sync(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ dma_addr_t addr = state->addr + offset;
+ size_t iova_start_pad = iova_offset(iovad, addr);
+
+ return iommu_sync_map(domain, addr - iova_start_pad,
+ iova_align(iovad, size + iova_start_pad));
+}
+EXPORT_SYMBOL_GPL(dma_iova_sync);
+
+static void iommu_dma_iova_unlink_range_slow(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_start_pad = iova_offset(iovad, addr);
+ dma_addr_t end = addr + size;
+
+ do {
+ phys_addr_t phys;
+ size_t len;
+
+ phys = iommu_iova_to_phys(domain, addr);
+ if (WARN_ON(!phys))
+ /* Something very horrible happen here */
+ return;
+
+ len = min_t(size_t,
+ end - addr, iovad->granule - iova_start_pad);
+
+ if (!dev_is_dma_coherent(dev) &&
+ !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_cpu(phys, len, dir);
+
+ swiotlb_tbl_unmap_single(dev, phys, len, dir, attrs);
+
+ addr += len;
+ iova_start_pad = 0;
+ } while (addr < end);
+}
+
+static void __iommu_dma_iova_unlink(struct device *dev,
+ struct dma_iova_state *state, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs,
+ bool free_iova)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ dma_addr_t addr = state->addr + offset;
+ size_t iova_start_pad = iova_offset(iovad, addr);
+ struct iommu_iotlb_gather iotlb_gather;
+ size_t unmapped;
+
+ if ((state->__size & DMA_IOVA_USE_SWIOTLB) ||
+ (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)))
+ iommu_dma_iova_unlink_range_slow(dev, addr, size, dir, attrs);
+
+ iommu_iotlb_gather_init(&iotlb_gather);
+ iotlb_gather.queued = free_iova && READ_ONCE(cookie->fq_domain);
+
+ size = iova_align(iovad, size + iova_start_pad);
+ addr -= iova_start_pad;
+ unmapped = iommu_unmap_fast(domain, addr, size, &iotlb_gather);
+ WARN_ON(unmapped != size);
+
+ if (!iotlb_gather.queued)
+ iommu_iotlb_sync(domain, &iotlb_gather);
+ if (free_iova)
+ iommu_dma_free_iova(domain, addr, size, &iotlb_gather);
+}
+
+/**
+ * dma_iova_unlink - Unlink a range of IOVA space
+ * @dev: DMA device
+ * @state: IOVA state
+ * @offset: offset into the IOVA state to unlink
+ * @size: size of the buffer
+ * @dir: DMA direction
+ * @attrs: attributes of mapping properties
+ *
+ * Unlink a range of IOVA space for the given IOVA state.
+ */
+void dma_iova_unlink(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ __iommu_dma_iova_unlink(dev, state, offset, size, dir, attrs, false);
+}
+EXPORT_SYMBOL_GPL(dma_iova_unlink);
+
+/**
+ * dma_iova_destroy - Finish a DMA mapping transaction
+ * @dev: DMA device
+ * @state: IOVA state
+ * @mapped_len: number of bytes to unmap
+ * @dir: DMA direction
+ * @attrs: attributes of mapping properties
+ *
+ * Unlink the IOVA range up to @mapped_len and free the entire IOVA space. The
+ * range of IOVA from dma_addr to @mapped_len must all be linked, and be the
+ * only linked IOVA in state.
+ */
+void dma_iova_destroy(struct device *dev, struct dma_iova_state *state,
+ size_t mapped_len, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ if (mapped_len)
+ __iommu_dma_iova_unlink(dev, state, 0, mapped_len, dir, attrs,
+ true);
+ else
+ /*
+ * We can be here if first call to dma_iova_link() failed and
+ * there is nothing to unlink, so let's be more clear.
+ */
+ dma_iova_free(dev, state);
+}
+EXPORT_SYMBOL_GPL(dma_iova_destroy);
+
void iommu_setup_dma_ops(struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 317266aca6e2..fcb6a0f7c082 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -902,11 +902,11 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
if (!domain)
return NULL;
- domain->pgtable = iommu_alloc_pages(GFP_KERNEL, 2);
+ domain->pgtable = iommu_alloc_pages_sz(GFP_KERNEL, SZ_16K);
if (!domain->pgtable)
goto err_pgtable;
- domain->lv2entcnt = iommu_alloc_pages(GFP_KERNEL, 1);
+ domain->lv2entcnt = iommu_alloc_pages_sz(GFP_KERNEL, SZ_8K);
if (!domain->lv2entcnt)
goto err_counter;
@@ -932,9 +932,9 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
return &domain->domain;
err_lv2ent:
- iommu_free_pages(domain->lv2entcnt, 1);
+ iommu_free_pages(domain->lv2entcnt);
err_counter:
- iommu_free_pages(domain->pgtable, 2);
+ iommu_free_pages(domain->pgtable);
err_pgtable:
kfree(domain);
return NULL;
@@ -975,8 +975,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
phys_to_virt(base));
}
- iommu_free_pages(domain->pgtable, 2);
- iommu_free_pages(domain->lv2entcnt, 1);
+ iommu_free_pages(domain->pgtable);
+ iommu_free_pages(domain->lv2entcnt);
kfree(domain);
}
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 30be786bff11..5f08523f97cb 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -64,7 +64,7 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
spin_lock_irqsave(&iommu_lock, flags);
ret = pamu_update_paace_stash(liodn, val);
if (ret) {
- pr_debug("Failed to update SPAACE for liodn %d\n ", liodn);
+ pr_debug("Failed to update SPAACE for liodn %d\n", liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
return ret;
}
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index 6c7528130cf9..ada651c4a01b 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DMAR_TABLE) += dmar.o
-obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o nested.o cache.o prq.o
-obj-$(CONFIG_DMAR_TABLE) += trace.o
+obj-y += iommu.o pasid.o nested.o cache.o prq.o
+obj-$(CONFIG_DMAR_TABLE) += dmar.o trace.o
obj-$(CONFIG_DMAR_PERF) += perf.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
-ifdef CONFIG_INTEL_IOMMU
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
-endif
obj-$(CONFIG_INTEL_IOMMU_PERF_EVENTS) += perfmon.o
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index e540092d664d..b61d9ea27aa9 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1099,6 +1099,9 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
spin_lock_init(&iommu->device_rbtree_lock);
mutex_init(&iommu->iopf_lock);
iommu->node = NUMA_NO_NODE;
+ spin_lock_init(&iommu->lock);
+ ida_init(&iommu->domain_ida);
+ mutex_init(&iommu->did_lock);
ver = readl(iommu->reg + DMAR_VER_REG);
pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
@@ -1187,7 +1190,7 @@ static void free_iommu(struct intel_iommu *iommu)
}
if (iommu->qi) {
- iommu_free_page(iommu->qi->desc);
+ iommu_free_pages(iommu->qi->desc);
kfree(iommu->qi->desc_status);
kfree(iommu->qi);
}
@@ -1195,6 +1198,7 @@ static void free_iommu(struct intel_iommu *iommu)
if (iommu->reg)
unmap_iommu(iommu);
+ ida_destroy(&iommu->domain_ida);
ida_free(&dmar_seq_ids, iommu->seq_id);
kfree(iommu);
}
@@ -1681,7 +1685,6 @@ int dmar_enable_qi(struct intel_iommu *iommu)
{
struct q_inval *qi;
void *desc;
- int order;
if (!ecap_qis(iommu->ecap))
return -ENOENT;
@@ -1702,8 +1705,9 @@ int dmar_enable_qi(struct intel_iommu *iommu)
* Need two pages to accommodate 256 descriptors of 256 bits each
* if the remapping hardware supports scalable mode translation.
*/
- order = ecap_smts(iommu->ecap) ? 1 : 0;
- desc = iommu_alloc_pages_node(iommu->node, GFP_ATOMIC, order);
+ desc = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
+ ecap_smts(iommu->ecap) ? SZ_8K :
+ SZ_4K);
if (!desc) {
kfree(qi);
iommu->qi = NULL;
@@ -1714,7 +1718,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
if (!qi->desc_status) {
- iommu_free_page(qi->desc);
+ iommu_free_pages(qi->desc);
kfree(qi);
iommu->qi = NULL;
return -ENOMEM;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index cb0b993bebb4..7aa3932251b2 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -397,7 +397,8 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
if (!alloc)
return NULL;
- context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
+ context = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
+ SZ_4K);
if (!context)
return NULL;
@@ -571,17 +572,17 @@ static void free_context_table(struct intel_iommu *iommu)
for (i = 0; i < ROOT_ENTRY_NR; i++) {
context = iommu_context_addr(iommu, i, 0, 0);
if (context)
- iommu_free_page(context);
+ iommu_free_pages(context);
if (!sm_supported(iommu))
continue;
context = iommu_context_addr(iommu, i, 0x80, 0);
if (context)
- iommu_free_page(context);
+ iommu_free_pages(context);
}
- iommu_free_page(iommu->root_entry);
+ iommu_free_pages(iommu->root_entry);
iommu->root_entry = NULL;
}
@@ -731,7 +732,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
if (!dma_pte_present(pte)) {
uint64_t pteval, tmp;
- tmp_page = iommu_alloc_page_node(domain->nid, gfp);
+ tmp_page = iommu_alloc_pages_node_sz(domain->nid, gfp,
+ SZ_4K);
if (!tmp_page)
return NULL;
@@ -745,7 +747,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
tmp = 0ULL;
if (!try_cmpxchg64(&pte->val, &tmp, pteval))
/* Someone else set it while we were thinking; use theirs. */
- iommu_free_page(tmp_page);
+ iommu_free_pages(tmp_page);
else
domain_flush_cache(domain, pte, sizeof(*pte));
}
@@ -858,7 +860,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
last_pfn < level_pfn + level_size(level) - 1)) {
dma_clear_pte(pte);
domain_flush_cache(domain, pte, sizeof(*pte));
- iommu_free_page(level_pte);
+ iommu_free_pages(level_pte);
}
next:
pfn += level_size(level);
@@ -882,7 +884,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
- iommu_free_page(domain->pgd);
+ iommu_free_pages(domain->pgd);
domain->pgd = NULL;
}
}
@@ -894,18 +896,16 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
The 'pte' argument is the *parent* PTE, pointing to the page that is to
be freed. */
static void dma_pte_list_pagetables(struct dmar_domain *domain,
- int level, struct dma_pte *pte,
- struct list_head *freelist)
+ int level, struct dma_pte *parent_pte,
+ struct iommu_pages_list *freelist)
{
- struct page *pg;
+ struct dma_pte *pte = phys_to_virt(dma_pte_addr(parent_pte));
- pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
- list_add_tail(&pg->lru, freelist);
+ iommu_pages_list_add(freelist, pte);
if (level == 1)
return;
- pte = page_address(pg);
do {
if (dma_pte_present(pte) && !dma_pte_superpage(pte))
dma_pte_list_pagetables(domain, level - 1, pte, freelist);
@@ -916,7 +916,7 @@ static void dma_pte_list_pagetables(struct dmar_domain *domain,
static void dma_pte_clear_level(struct dmar_domain *domain, int level,
struct dma_pte *pte, unsigned long pfn,
unsigned long start_pfn, unsigned long last_pfn,
- struct list_head *freelist)
+ struct iommu_pages_list *freelist)
{
struct dma_pte *first_pte = NULL, *last_pte = NULL;
@@ -961,7 +961,8 @@ next:
the page tables, and may have cached the intermediate levels. The
pages can only be freed after the IOTLB flush has been done. */
static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
- unsigned long last_pfn, struct list_head *freelist)
+ unsigned long last_pfn,
+ struct iommu_pages_list *freelist)
{
if (WARN_ON(!domain_pfn_supported(domain, last_pfn)) ||
WARN_ON(start_pfn > last_pfn))
@@ -973,8 +974,7 @@ static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
- struct page *pgd_page = virt_to_page(domain->pgd);
- list_add_tail(&pgd_page->lru, freelist);
+ iommu_pages_list_add(freelist, domain->pgd);
domain->pgd = NULL;
}
}
@@ -984,7 +984,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
struct root_entry *root;
- root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
+ root = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC, SZ_4K);
if (!root) {
pr_err("Allocating root entry for %s failed\n",
iommu->name);
@@ -1289,52 +1289,13 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-static int iommu_init_domains(struct intel_iommu *iommu)
-{
- u32 ndomains;
-
- ndomains = cap_ndoms(iommu->cap);
- pr_debug("%s: Number of Domains supported <%d>\n",
- iommu->name, ndomains);
-
- spin_lock_init(&iommu->lock);
-
- iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL);
- if (!iommu->domain_ids)
- return -ENOMEM;
-
- /*
- * If Caching mode is set, then invalid translations are tagged
- * with domain-id 0, hence we need to pre-allocate it. We also
- * use domain-id 0 as a marker for non-allocated domain-id, so
- * make sure it is not used for a real domain.
- */
- set_bit(0, iommu->domain_ids);
-
- /*
- * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
- * entry for first-level or pass-through translation modes should
- * be programmed with a domain id different from those used for
- * second-level or nested translation. We reserve a domain id for
- * this purpose. This domain id is also used for identity domain
- * in legacy mode.
- */
- set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
-
- return 0;
-}
-
static void disable_dmar_iommu(struct intel_iommu *iommu)
{
- if (!iommu->domain_ids)
- return;
-
/*
* All iommu domains must have been detached from the devices,
* hence there should be no domain IDs in use.
*/
- if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap))
- > NUM_RESERVED_DID))
+ if (WARN_ON(!ida_is_empty(&iommu->domain_ida)))
return;
if (iommu->gcmd & DMA_GCMD_TE)
@@ -1343,11 +1304,6 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
static void free_dmar_iommu(struct intel_iommu *iommu)
{
- if (iommu->domain_ids) {
- bitmap_free(iommu->domain_ids);
- iommu->domain_ids = NULL;
- }
-
if (iommu->copied_tables) {
bitmap_free(iommu->copied_tables);
iommu->copied_tables = NULL;
@@ -1380,7 +1336,6 @@ static bool first_level_by_default(struct intel_iommu *iommu)
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
{
struct iommu_domain_info *info, *curr;
- unsigned long ndomains;
int num, ret = -ENOSPC;
if (domain->domain.type == IOMMU_DOMAIN_SVA)
@@ -1390,40 +1345,36 @@ int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
if (!info)
return -ENOMEM;
- spin_lock(&iommu->lock);
+ guard(mutex)(&iommu->did_lock);
curr = xa_load(&domain->iommu_array, iommu->seq_id);
if (curr) {
curr->refcnt++;
- spin_unlock(&iommu->lock);
kfree(info);
return 0;
}
- ndomains = cap_ndoms(iommu->cap);
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
- if (num >= ndomains) {
+ num = ida_alloc_range(&iommu->domain_ida, IDA_START_DID,
+ cap_ndoms(iommu->cap) - 1, GFP_KERNEL);
+ if (num < 0) {
pr_err("%s: No free domain ids\n", iommu->name);
goto err_unlock;
}
- set_bit(num, iommu->domain_ids);
info->refcnt = 1;
info->did = num;
info->iommu = iommu;
curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id,
- NULL, info, GFP_ATOMIC);
+ NULL, info, GFP_KERNEL);
if (curr) {
ret = xa_err(curr) ? : -EBUSY;
goto err_clear;
}
- spin_unlock(&iommu->lock);
return 0;
err_clear:
- clear_bit(info->did, iommu->domain_ids);
+ ida_free(&iommu->domain_ida, info->did);
err_unlock:
- spin_unlock(&iommu->lock);
kfree(info);
return ret;
}
@@ -1435,21 +1386,21 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
if (domain->domain.type == IOMMU_DOMAIN_SVA)
return;
- spin_lock(&iommu->lock);
+ guard(mutex)(&iommu->did_lock);
info = xa_load(&domain->iommu_array, iommu->seq_id);
if (--info->refcnt == 0) {
- clear_bit(info->did, iommu->domain_ids);
+ ida_free(&iommu->domain_ida, info->did);
xa_erase(&domain->iommu_array, iommu->seq_id);
domain->nid = NUMA_NO_NODE;
kfree(info);
}
- spin_unlock(&iommu->lock);
}
static void domain_exit(struct dmar_domain *domain)
{
if (domain->pgd) {
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist =
+ IOMMU_PAGES_LIST_INIT(freelist);
domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist);
iommu_put_pages_list(&freelist);
@@ -1681,9 +1632,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
}
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
- attr |= DMA_FL_PTE_PRESENT;
if (domain->use_first_level) {
- attr |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
+ attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
if (prot & DMA_PTE_WRITE)
attr |= DMA_FL_PTE_DIRTY;
}
@@ -1859,6 +1809,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
return ret;
info->domain = domain;
+ info->domain_attached = true;
spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
spin_unlock_irqrestore(&domain->lock, flags);
@@ -2027,7 +1978,8 @@ static int copy_context_table(struct intel_iommu *iommu,
if (!old_ce)
goto out;
- new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL);
+ new_ce = iommu_alloc_pages_node_sz(iommu->node,
+ GFP_KERNEL, SZ_4K);
if (!new_ce)
goto out_unmap;
@@ -2042,7 +1994,7 @@ static int copy_context_table(struct intel_iommu *iommu,
did = context_domain_id(&ce);
if (did >= 0 && did < cap_ndoms(iommu->cap))
- set_bit(did, iommu->domain_ids);
+ ida_alloc_range(&iommu->domain_ida, did, did, GFP_KERNEL);
set_context_copied(iommu, bus, devfn);
new_ce[idx] = ce;
@@ -2169,11 +2121,6 @@ static int __init init_dmars(void)
}
intel_iommu_init_qi(iommu);
-
- ret = iommu_init_domains(iommu);
- if (ret)
- goto free_iommu;
-
init_translation_status(iommu);
if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
@@ -2651,9 +2598,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
- ret = iommu_init_domains(iommu);
- if (ret == 0)
- ret = iommu_alloc_root_entry(iommu);
+ ret = iommu_alloc_root_entry(iommu);
if (ret)
goto out;
@@ -2744,7 +2689,6 @@ static struct dmar_satc_unit *dmar_find_matched_satc_unit(struct pci_dev *dev)
struct device *tmp;
int i;
- dev = pci_physfn(dev);
rcu_read_lock();
list_for_each_entry_rcu(satcu, &dmar_satc_units, list) {
@@ -2761,15 +2705,16 @@ out:
return satcu;
}
-static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
+static bool dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
{
- int i, ret = 1;
- struct pci_bus *bus;
struct pci_dev *bridge = NULL;
- struct device *tmp;
- struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
struct dmar_satc_unit *satcu;
+ struct acpi_dmar_atsr *atsr;
+ bool supported = true;
+ struct pci_bus *bus;
+ struct device *tmp;
+ int i;
dev = pci_physfn(dev);
satcu = dmar_find_matched_satc_unit(dev);
@@ -2787,11 +2732,11 @@ static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
bridge = bus->self;
/* If it's an integrated device, allow ATS */
if (!bridge)
- return 1;
+ return true;
/* Connected via non-PCIe: no ATS */
if (!pci_is_pcie(bridge) ||
pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
+ return false;
/* If we found the root port, look it up in the ATSR */
if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
break;
@@ -2810,11 +2755,11 @@ static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
if (atsru->include_all)
goto out;
}
- ret = 0;
+ supported = false;
out:
rcu_read_unlock();
- return ret;
+ return supported;
}
int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
@@ -2972,9 +2917,14 @@ static ssize_t domains_used_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
- return sysfs_emit(buf, "%d\n",
- bitmap_weight(iommu->domain_ids,
- cap_ndoms(iommu->cap)));
+ unsigned int count = 0;
+ int id;
+
+ for (id = 0; id < cap_ndoms(iommu->cap); id++)
+ if (ida_exists(&iommu->domain_ida, id))
+ count++;
+
+ return sysfs_emit(buf, "%d\n", count);
}
static DEVICE_ATTR_RO(domains_used);
@@ -3257,6 +3207,10 @@ void device_block_translation(struct device *dev)
struct intel_iommu *iommu = info->iommu;
unsigned long flags;
+ /* Device in DMA blocking state. Noting to do. */
+ if (!info->domain_attached)
+ return;
+
if (info->domain)
cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
@@ -3268,6 +3222,9 @@ void device_block_translation(struct device *dev)
domain_context_clear(info);
}
+ /* Device now in DMA blocking state. */
+ info->domain_attached = false;
+
if (!info->domain)
return;
@@ -3282,6 +3239,9 @@ void device_block_translation(struct device *dev)
static int blocking_domain_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ iopf_for_domain_remove(info->domain ? &info->domain->domain : NULL, dev);
device_block_translation(dev);
return 0;
}
@@ -3360,7 +3320,7 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
/* always allocate the top pgd */
- domain->pgd = iommu_alloc_page_node(domain->nid, GFP_KERNEL);
+ domain->pgd = iommu_alloc_pages_node_sz(domain->nid, GFP_KERNEL, SZ_4K);
if (!domain->pgd) {
kfree(domain);
return ERR_PTR(-ENOMEM);
@@ -3492,7 +3452,15 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
if (ret)
return ret;
- return dmar_domain_attach_device(to_dmar_domain(domain), dev);
+ ret = iopf_for_domain_set(domain, dev);
+ if (ret)
+ return ret;
+
+ ret = dmar_domain_attach_device(to_dmar_domain(domain), dev);
+ if (ret)
+ iopf_for_domain_remove(domain, dev);
+
+ return ret;
}
static int intel_iommu_map(struct iommu_domain *domain,
@@ -3603,7 +3571,8 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
cache_tag_flush_range(to_dmar_domain(domain), gather->start,
- gather->end, list_empty(&gather->freelist));
+ gather->end,
+ iommu_pages_list_empty(&gather->freelist));
iommu_put_pages_list(&gather->freelist);
}
@@ -3918,6 +3887,8 @@ int intel_iommu_enable_iopf(struct device *dev)
if (!info->pri_enabled)
return -ENODEV;
+ /* pri_enabled is protected by the group mutex. */
+ iommu_group_mutex_assert(dev);
if (info->iopf_refcount) {
info->iopf_refcount++;
return 0;
@@ -3940,43 +3911,13 @@ void intel_iommu_disable_iopf(struct device *dev)
if (WARN_ON(!info->pri_enabled || !info->iopf_refcount))
return;
+ iommu_group_mutex_assert(dev);
if (--info->iopf_refcount)
return;
iopf_queue_remove_device(iommu->iopf_queue, dev);
}
-static int
-intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
-{
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- return intel_iommu_enable_iopf(dev);
-
- case IOMMU_DEV_FEAT_SVA:
- return 0;
-
- default:
- return -ENODEV;
- }
-}
-
-static int
-intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
-{
- switch (feat) {
- case IOMMU_DEV_FEAT_IOPF:
- intel_iommu_disable_iopf(dev);
- return 0;
-
- case IOMMU_DEV_FEAT_SVA:
- return 0;
-
- default:
- return -ENODEV;
- }
-}
-
static bool intel_iommu_is_attach_deferred(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
@@ -4050,6 +3991,7 @@ static int blocking_domain_set_dev_pasid(struct iommu_domain *domain,
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
+ iopf_for_domain_remove(old, dev);
intel_pasid_tear_down_entry(info->iommu, dev, pasid, false);
domain_remove_dev_pasid(old, dev, pasid);
@@ -4123,6 +4065,10 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
if (IS_ERR(dev_pasid))
return PTR_ERR(dev_pasid);
+ ret = iopf_for_domain_replace(domain, old, dev);
+ if (ret)
+ goto out_remove_dev_pasid;
+
if (dmar_domain->use_first_level)
ret = domain_setup_first_level(iommu, dmar_domain,
dev, pasid, old);
@@ -4130,7 +4076,7 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
ret = domain_setup_second_level(iommu, dmar_domain,
dev, pasid, old);
if (ret)
- goto out_remove_dev_pasid;
+ goto out_unwind_iopf;
domain_remove_dev_pasid(old, dev, pasid);
@@ -4138,6 +4084,8 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
return 0;
+out_unwind_iopf:
+ iopf_for_domain_replace(old, domain, dev);
out_remove_dev_pasid:
domain_remove_dev_pasid(domain, dev, pasid);
return ret;
@@ -4352,11 +4300,19 @@ static int identity_domain_attach_dev(struct iommu_domain *domain, struct device
if (dev_is_real_dma_subdevice(dev))
return 0;
+ /*
+ * No PRI support with the global identity domain. No need to enable or
+ * disable PRI in this path as the iommu has been put in the blocking
+ * state.
+ */
if (sm_supported(iommu))
ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID);
else
ret = device_setup_pass_through(dev);
+ if (!ret)
+ info->domain_attached = true;
+
return ret;
}
@@ -4371,10 +4327,16 @@ static int identity_domain_set_dev_pasid(struct iommu_domain *domain,
if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev))
return -EOPNOTSUPP;
- ret = domain_setup_passthrough(iommu, dev, pasid, old);
+ ret = iopf_for_domain_replace(domain, old, dev);
if (ret)
return ret;
+ ret = domain_setup_passthrough(iommu, dev, pasid, old);
+ if (ret) {
+ iopf_for_domain_replace(old, domain, dev);
+ return ret;
+ }
+
domain_remove_dev_pasid(old, dev, pasid);
return 0;
}
@@ -4401,8 +4363,6 @@ const struct iommu_ops intel_iommu_ops = {
.release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.device_group = intel_iommu_device_group,
- .dev_enable_feat = intel_iommu_dev_enable_feat,
- .dev_disable_feat = intel_iommu_dev_disable_feat,
.is_attach_deferred = intel_iommu_is_attach_deferred,
.def_domain_type = device_def_domain_type,
.pgsize_bitmap = SZ_4K,
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index c4916886da5a..3ddbcc603de2 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -493,14 +493,13 @@ struct q_inval {
/* Page Request Queue depth */
#define PRQ_ORDER 4
-#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
-#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5)
+#define PRQ_SIZE (SZ_4K << PRQ_ORDER)
+#define PRQ_RING_MASK (PRQ_SIZE - 0x20)
+#define PRQ_DEPTH (PRQ_SIZE >> 5)
struct dmar_pci_notify_info;
#ifdef CONFIG_IRQ_REMAP
-/* 1MB - maximum possible interrupt remapping table size */
-#define INTR_REMAP_PAGE_ORDER 8
#define INTR_REMAP_TABLE_REG_SIZE 0xf
#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
@@ -722,7 +721,9 @@ struct intel_iommu {
unsigned char name[16]; /* Device Name */
#ifdef CONFIG_INTEL_IOMMU
- unsigned long *domain_ids; /* bitmap of domains */
+ /* mutex to protect domain_ida */
+ struct mutex did_lock;
+ struct ida domain_ida; /* domain id allocator */
unsigned long *copied_tables; /* bitmap of copied tables */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
@@ -773,6 +774,7 @@ struct device_domain_info {
u8 ats_supported:1;
u8 ats_enabled:1;
u8 dtlb_extra_inval:1; /* Quirk for devices need extra flush */
+ u8 domain_attached:1; /* Device has domain attached */
u8 ats_qdep;
unsigned int iopf_refcount;
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
@@ -809,11 +811,22 @@ static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
}
/*
- * Domain ID reserved for pasid entries programmed for first-level
- * only and pass-through transfer modes.
+ * Domain ID 0 and 1 are reserved:
+ *
+ * If Caching mode is set, then invalid translations are tagged
+ * with domain-id 0, hence we need to pre-allocate it. We also
+ * use domain-id 0 as a marker for non-allocated domain-id, so
+ * make sure it is not used for a real domain.
+ *
+ * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
+ * entry for first-level or pass-through translation modes should
+ * be programmed with a domain id different from those used for
+ * second-level or nested translation. We reserve a domain id for
+ * this purpose. This domain id is also used for identity domain
+ * in legacy mode.
*/
#define FLPT_DEFAULT_DID 1
-#define NUM_RESERVED_DID 2
+#define IDA_START_DID 2
/* Retrieve the domain ID which has allocated to the domain */
static inline u16
@@ -1298,6 +1311,39 @@ void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid);
int intel_iommu_enable_iopf(struct device *dev);
void intel_iommu_disable_iopf(struct device *dev);
+static inline int iopf_for_domain_set(struct iommu_domain *domain,
+ struct device *dev)
+{
+ if (!domain || !domain->iopf_handler)
+ return 0;
+
+ return intel_iommu_enable_iopf(dev);
+}
+
+static inline void iopf_for_domain_remove(struct iommu_domain *domain,
+ struct device *dev)
+{
+ if (!domain || !domain->iopf_handler)
+ return;
+
+ intel_iommu_disable_iopf(dev);
+}
+
+static inline int iopf_for_domain_replace(struct iommu_domain *new,
+ struct iommu_domain *old,
+ struct device *dev)
+{
+ int ret;
+
+ ret = iopf_for_domain_set(new, dev);
+ if (ret)
+ return ret;
+
+ iopf_for_domain_remove(old, dev);
+
+ return 0;
+}
+
#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);
struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 3bc2a03cceca..cf7b6882ec75 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -530,11 +530,11 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
if (!ir_table)
return -ENOMEM;
- ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL,
- INTR_REMAP_PAGE_ORDER);
+ /* 1MB - maximum possible interrupt remapping table size */
+ ir_table_base =
+ iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, SZ_1M);
if (!ir_table_base) {
- pr_err("IR%d: failed to allocate pages of order %d\n",
- iommu->seq_id, INTR_REMAP_PAGE_ORDER);
+ pr_err("IR%d: failed to allocate 1M of pages\n", iommu->seq_id);
goto out_free_table;
}
@@ -612,7 +612,7 @@ out_free_fwnode:
out_free_bitmap:
bitmap_free(bitmap);
out_free_pages:
- iommu_free_pages(ir_table_base, INTR_REMAP_PAGE_ORDER);
+ iommu_free_pages(ir_table_base);
out_free_table:
kfree(ir_table);
@@ -633,7 +633,7 @@ static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
irq_domain_free_fwnode(fn);
iommu->ir_domain = NULL;
}
- iommu_free_pages(iommu->ir_table->base, INTR_REMAP_PAGE_ORDER);
+ iommu_free_pages(iommu->ir_table->base);
bitmap_free(iommu->ir_table->bitmap);
kfree(iommu->ir_table);
iommu->ir_table = NULL;
diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
index 6ac5c534bef4..fc312f649f9e 100644
--- a/drivers/iommu/intel/nested.c
+++ b/drivers/iommu/intel/nested.c
@@ -27,8 +27,7 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
unsigned long flags;
int ret = 0;
- if (info->domain)
- device_block_translation(dev);
+ device_block_translation(dev);
if (iommu->agaw < dmar_domain->s2_domain->agaw) {
dev_err_ratelimited(dev, "Adjusted guest address width not compatible\n");
@@ -56,17 +55,24 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
if (ret)
goto detach_iommu;
+ ret = iopf_for_domain_set(domain, dev);
+ if (ret)
+ goto unassign_tag;
+
ret = intel_pasid_setup_nested(iommu, dev,
IOMMU_NO_PASID, dmar_domain);
if (ret)
- goto unassign_tag;
+ goto disable_iopf;
info->domain = dmar_domain;
+ info->domain_attached = true;
spin_lock_irqsave(&dmar_domain->lock, flags);
list_add(&info->link, &dmar_domain->devices);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
return 0;
+disable_iopf:
+ iopf_for_domain_remove(domain, dev);
unassign_tag:
cache_tag_unassign_domain(dmar_domain, dev, IOMMU_NO_PASID);
detach_iommu:
@@ -166,14 +172,20 @@ static int intel_nested_set_dev_pasid(struct iommu_domain *domain,
if (IS_ERR(dev_pasid))
return PTR_ERR(dev_pasid);
- ret = domain_setup_nested(iommu, dmar_domain, dev, pasid, old);
+ ret = iopf_for_domain_replace(domain, old, dev);
if (ret)
goto out_remove_dev_pasid;
+ ret = domain_setup_nested(iommu, dmar_domain, dev, pasid, old);
+ if (ret)
+ goto out_unwind_iopf;
+
domain_remove_dev_pasid(old, dev, pasid);
return 0;
+out_unwind_iopf:
+ iopf_for_domain_replace(old, domain, dev);
out_remove_dev_pasid:
domain_remove_dev_pasid(domain, dev, pasid);
return ret;
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 7ee18bb48bd4..ac67a056b6c8 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -60,14 +60,14 @@ int intel_pasid_alloc_table(struct device *dev)
size = max_pasid >> (PASID_PDE_SHIFT - 3);
order = size ? get_order(size) : 0;
- dir = iommu_alloc_pages_node(info->iommu->node, GFP_KERNEL, order);
+ dir = iommu_alloc_pages_node_sz(info->iommu->node, GFP_KERNEL,
+ 1 << (order + PAGE_SHIFT));
if (!dir) {
kfree(pasid_table);
return -ENOMEM;
}
pasid_table->table = dir;
- pasid_table->order = order;
pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
info->pasid_table = pasid_table;
@@ -97,10 +97,10 @@ void intel_pasid_free_table(struct device *dev)
max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT;
for (i = 0; i < max_pde; i++) {
table = get_pasid_table_from_pde(&dir[i]);
- iommu_free_page(table);
+ iommu_free_pages(table);
}
- iommu_free_pages(pasid_table->table, pasid_table->order);
+ iommu_free_pages(pasid_table->table);
kfree(pasid_table);
}
@@ -148,7 +148,8 @@ retry:
if (!entries) {
u64 tmp;
- entries = iommu_alloc_page_node(info->iommu->node, GFP_ATOMIC);
+ entries = iommu_alloc_pages_node_sz(info->iommu->node,
+ GFP_ATOMIC, SZ_4K);
if (!entries)
return NULL;
@@ -161,7 +162,7 @@ retry:
tmp = 0ULL;
if (!try_cmpxchg64(&dir[dir_index].val, &tmp,
(u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
- iommu_free_page(entries);
+ iommu_free_pages(entries);
goto retry;
}
if (!ecap_coherent(info->iommu->ecap)) {
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 668d8ece6b14..fd0fd1a0df84 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -47,7 +47,6 @@ struct pasid_entry {
/* The representative of a PASID table */
struct pasid_table {
void *table; /* pasid table pointer */
- int order; /* page order of pasid table */
u32 max_pasid; /* max pasid */
};
diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c
index 5b6a64d96850..52570e42a14c 100644
--- a/drivers/iommu/intel/prq.c
+++ b/drivers/iommu/intel/prq.c
@@ -290,7 +290,8 @@ int intel_iommu_enable_prq(struct intel_iommu *iommu)
struct iopf_queue *iopfq;
int irq, ret;
- iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER);
+ iommu->prq =
+ iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, PRQ_SIZE);
if (!iommu->prq) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name);
@@ -340,7 +341,7 @@ free_hwirq:
dmar_free_hwirq(irq);
iommu->pr_irq = 0;
free_prq:
- iommu_free_pages(iommu->prq, PRQ_ORDER);
+ iommu_free_pages(iommu->prq);
iommu->prq = NULL;
return ret;
@@ -363,7 +364,7 @@ int intel_iommu_finish_prq(struct intel_iommu *iommu)
iommu->iopf_queue = NULL;
}
- iommu_free_pages(iommu->prq, PRQ_ORDER);
+ iommu_free_pages(iommu->prq);
iommu->prq = NULL;
return 0;
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index ba93123cb4eb..f3da596410b5 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -164,18 +164,23 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
if (IS_ERR(dev_pasid))
return PTR_ERR(dev_pasid);
+ ret = iopf_for_domain_replace(domain, old, dev);
+ if (ret)
+ goto out_remove_dev_pasid;
+
/* Setup the pasid table: */
sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
ret = __domain_setup_first_level(iommu, dev, pasid,
FLPT_DEFAULT_DID, mm->pgd,
sflags, old);
if (ret)
- goto out_remove_dev_pasid;
+ goto out_unwind_iopf;
domain_remove_dev_pasid(old, dev, pasid);
return 0;
-
+out_unwind_iopf:
+ iopf_for_domain_replace(old, domain, dev);
out_remove_dev_pasid:
domain_remove_dev_pasid(domain, dev, pasid);
return ret;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 7632c80edea6..96425e92f313 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/io-pgtable.h>
#include <linux/kernel.h>
+#include <linux/device/faux.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -251,8 +252,6 @@ static inline bool arm_lpae_concat_mandatory(struct io_pgtable_cfg *cfg,
(data->start_level == 1) && (oas == 40);
}
-static bool selftest_running = false;
-
static dma_addr_t __arm_lpae_dma_addr(void *pages)
{
return (dma_addr_t)virt_to_phys(pages);
@@ -263,16 +262,20 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
void *cookie)
{
struct device *dev = cfg->iommu_dev;
- int order = get_order(size);
+ size_t alloc_size;
dma_addr_t dma;
void *pages;
- VM_BUG_ON((gfp & __GFP_HIGHMEM));
-
+ /*
+ * For very small starting-level translation tables the HW requires a
+ * minimum alignment of at least 64 to cover all cases.
+ */
+ alloc_size = max(size, 64);
if (cfg->alloc)
- pages = cfg->alloc(cookie, size, gfp);
+ pages = cfg->alloc(cookie, alloc_size, gfp);
else
- pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order);
+ pages = iommu_alloc_pages_node_sz(dev_to_node(dev), gfp,
+ alloc_size);
if (!pages)
return NULL;
@@ -300,7 +303,7 @@ out_free:
if (cfg->free)
cfg->free(cookie, pages, size);
else
- iommu_free_pages(pages, order);
+ iommu_free_pages(pages);
return NULL;
}
@@ -316,7 +319,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
if (cfg->free)
cfg->free(cookie, pages, size);
else
- iommu_free_pages(pages, get_order(size));
+ iommu_free_pages(pages);
}
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
@@ -371,7 +374,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
for (i = 0; i < num_entries; i++)
if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
/* We require an unmap first */
- WARN_ON(!selftest_running);
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
return -EEXIST;
} else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
/*
@@ -473,7 +476,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
cptep = iopte_deref(pte, data);
} else if (pte) {
/* We require an unmap first */
- WARN_ON(!selftest_running);
+ WARN_ON(!(cfg->quirks & IO_PGTABLE_QUIRK_NO_WARN));
return -EEXIST;
}
@@ -641,8 +644,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
ptep += unmap_idx_start;
pte = READ_ONCE(*ptep);
- if (WARN_ON(!pte))
- return 0;
+ if (!pte) {
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
+ return -ENOENT;
+ }
/* If the size matches this level, we're in the right place */
if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
@@ -652,8 +657,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
/* Find and handle non-leaf entries */
for (i = 0; i < num_entries; i++) {
pte = READ_ONCE(ptep[i]);
- if (WARN_ON(!pte))
+ if (!pte) {
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
break;
+ }
if (!iopte_leaf(pte, lvl, iop->fmt)) {
__arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1);
@@ -968,7 +975,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_ARM_TTBR1 |
IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
- IO_PGTABLE_QUIRK_ARM_HD))
+ IO_PGTABLE_QUIRK_ARM_HD |
+ IO_PGTABLE_QUIRK_NO_WARN))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -1069,7 +1077,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
struct arm_lpae_io_pgtable *data;
typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
- if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB))
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB |
+ IO_PGTABLE_QUIRK_NO_WARN))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -1310,7 +1319,6 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
#define __FAIL(ops, i) ({ \
WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
arm_lpae_dump_ops(ops); \
- selftest_running = false; \
-EFAULT; \
})
@@ -1326,8 +1334,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
size_t size, mapped;
struct io_pgtable_ops *ops;
- selftest_running = true;
-
for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
cfg_cookie = cfg;
ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
@@ -1416,7 +1422,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
free_io_pgtable_ops(ops);
}
- selftest_running = false;
return 0;
}
@@ -1433,15 +1438,18 @@ static int __init arm_lpae_do_selftests(void)
};
int i, j, k, pass = 0, fail = 0;
- struct device dev;
+ struct faux_device *dev;
struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops,
.coherent_walk = true,
- .iommu_dev = &dev,
+ .quirks = IO_PGTABLE_QUIRK_NO_WARN,
};
- /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
- set_dev_node(&dev, NUMA_NO_NODE);
+ dev = faux_device_create("io-pgtable-test", NULL, 0);
+ if (!dev)
+ return -ENOMEM;
+
+ cfg.iommu_dev = &dev->dev;
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
for (j = 0; j < ARRAY_SIZE(address_size); ++j) {
@@ -1461,6 +1469,8 @@ static int __init arm_lpae_do_selftests(void)
}
pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
+ faux_device_destroy(dev);
+
return fail ? -EFAULT : 0;
}
subsys_initcall(arm_lpae_do_selftests);
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
index 06aca9ab52f9..679bda104797 100644
--- a/drivers/iommu/io-pgtable-dart.c
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -107,14 +107,6 @@ static phys_addr_t iopte_to_paddr(dart_iopte pte,
return paddr;
}
-static void *__dart_alloc_pages(size_t size, gfp_t gfp)
-{
- int order = get_order(size);
-
- VM_BUG_ON((gfp & __GFP_HIGHMEM));
- return iommu_alloc_pages(gfp, order);
-}
-
static int dart_init_pte(struct dart_io_pgtable *data,
unsigned long iova, phys_addr_t paddr,
dart_iopte prot, int num_entries,
@@ -256,13 +248,13 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
/* no L2 table present */
if (!pte) {
- cptep = __dart_alloc_pages(tblsz, gfp);
+ cptep = iommu_alloc_pages_sz(gfp, tblsz);
if (!cptep)
return -ENOMEM;
pte = dart_install_table(cptep, ptep, 0, data);
if (pte)
- iommu_free_pages(cptep, get_order(tblsz));
+ iommu_free_pages(cptep);
/* L2 table is present (now) */
pte = READ_ONCE(*ptep);
@@ -413,7 +405,8 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
- data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL);
+ data->pgd[i] =
+ iommu_alloc_pages_sz(GFP_KERNEL, DART_GRANULE(data));
if (!data->pgd[i])
goto out_free_data;
cfg->apple_dart_cfg.ttbr[i] = virt_to_phys(data->pgd[i]);
@@ -423,8 +416,7 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
out_free_data:
while (--i >= 0) {
- iommu_free_pages(data->pgd[i],
- get_order(DART_GRANULE(data)));
+ iommu_free_pages(data->pgd[i]);
}
kfree(data);
return NULL;
@@ -433,7 +425,6 @@ out_free_data:
static void apple_dart_free_pgtable(struct io_pgtable *iop)
{
struct dart_io_pgtable *data = io_pgtable_to_data(iop);
- int order = get_order(DART_GRANULE(data));
dart_iopte *ptep, *end;
int i;
@@ -445,9 +436,9 @@ static void apple_dart_free_pgtable(struct io_pgtable *iop)
dart_iopte pte = *ptep++;
if (pte)
- iommu_free_pages(iopte_deref(pte, data), order);
+ iommu_free_pages(iopte_deref(pte, data));
}
- iommu_free_pages(data->pgd[i], order);
+ iommu_free_pages(data->pgd[i]);
}
kfree(data);
diff --git a/drivers/iommu/iommu-pages.c b/drivers/iommu/iommu-pages.c
new file mode 100644
index 000000000000..238c09e5166b
--- /dev/null
+++ b/drivers/iommu/iommu-pages.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+#include "iommu-pages.h"
+#include <linux/gfp.h>
+#include <linux/mm.h>
+
+#define IOPTDESC_MATCH(pg_elm, elm) \
+ static_assert(offsetof(struct page, pg_elm) == \
+ offsetof(struct ioptdesc, elm))
+IOPTDESC_MATCH(flags, __page_flags);
+IOPTDESC_MATCH(lru, iopt_freelist_elm); /* Ensure bit 0 is clear */
+IOPTDESC_MATCH(mapping, __page_mapping);
+IOPTDESC_MATCH(private, _private);
+IOPTDESC_MATCH(page_type, __page_type);
+IOPTDESC_MATCH(_refcount, __page_refcount);
+#ifdef CONFIG_MEMCG
+IOPTDESC_MATCH(memcg_data, memcg_data);
+#endif
+#undef IOPTDESC_MATCH
+static_assert(sizeof(struct ioptdesc) <= sizeof(struct page));
+
+/**
+ * iommu_alloc_pages_node_sz - Allocate a zeroed page of a given size from
+ * specific NUMA node
+ * @nid: memory NUMA node id
+ * @gfp: buddy allocator flags
+ * @size: Memory size to allocate, rounded up to a power of 2
+ *
+ * Returns the virtual address of the allocated page. The page must be freed
+ * either by calling iommu_free_pages() or via iommu_put_pages_list(). The
+ * returned allocation is round_up_pow_two(size) big, and is physically aligned
+ * to its size.
+ */
+void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size)
+{
+ unsigned long pgcnt;
+ struct folio *folio;
+ unsigned int order;
+
+ /* This uses page_address() on the memory. */
+ if (WARN_ON(gfp & __GFP_HIGHMEM))
+ return NULL;
+
+ /*
+ * Currently sub page allocations result in a full page being returned.
+ */
+ order = get_order(size);
+
+ /*
+ * __folio_alloc_node() does not handle NUMA_NO_NODE like
+ * alloc_pages_node() did.
+ */
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
+
+ folio = __folio_alloc_node(gfp | __GFP_ZERO, order, nid);
+ if (unlikely(!folio))
+ return NULL;
+
+ /*
+ * All page allocations that should be reported to as "iommu-pagetables"
+ * to userspace must use one of the functions below. This includes
+ * allocations of page-tables and other per-iommu_domain configuration
+ * structures.
+ *
+ * This is necessary for the proper accounting as IOMMU state can be
+ * rather large, i.e. multiple gigabytes in size.
+ */
+ pgcnt = 1UL << order;
+ mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, pgcnt);
+ lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, pgcnt);
+
+ return folio_address(folio);
+}
+EXPORT_SYMBOL_GPL(iommu_alloc_pages_node_sz);
+
+static void __iommu_free_desc(struct ioptdesc *iopt)
+{
+ struct folio *folio = ioptdesc_folio(iopt);
+ const unsigned long pgcnt = 1UL << folio_order(folio);
+
+ mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, -pgcnt);
+ lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, -pgcnt);
+ folio_put(folio);
+}
+
+/**
+ * iommu_free_pages - free pages
+ * @virt: virtual address of the page to be freed.
+ *
+ * The page must have have been allocated by iommu_alloc_pages_node_sz()
+ */
+void iommu_free_pages(void *virt)
+{
+ if (!virt)
+ return;
+ __iommu_free_desc(virt_to_ioptdesc(virt));
+}
+EXPORT_SYMBOL_GPL(iommu_free_pages);
+
+/**
+ * iommu_put_pages_list - free a list of pages.
+ * @list: The list of pages to be freed
+ *
+ * Frees a list of pages allocated by iommu_alloc_pages_node_sz(). On return the
+ * passed list is invalid, the caller must use IOMMU_PAGES_LIST_INIT to reinit
+ * the list if it expects to use it again.
+ */
+void iommu_put_pages_list(struct iommu_pages_list *list)
+{
+ struct ioptdesc *iopt, *tmp;
+
+ list_for_each_entry_safe(iopt, tmp, &list->pages, iopt_freelist_elm)
+ __iommu_free_desc(iopt);
+}
+EXPORT_SYMBOL_GPL(iommu_put_pages_list);
diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h
index 82ebf0033081..b3af2813ed0c 100644
--- a/drivers/iommu/iommu-pages.h
+++ b/drivers/iommu/iommu-pages.h
@@ -7,180 +7,95 @@
#ifndef __IOMMU_PAGES_H
#define __IOMMU_PAGES_H
-#include <linux/vmstat.h>
-#include <linux/gfp.h>
-#include <linux/mm.h>
-
-/*
- * All page allocations that should be reported to as "iommu-pagetables" to
- * userspace must use one of the functions below. This includes allocations of
- * page-tables and other per-iommu_domain configuration structures.
- *
- * This is necessary for the proper accounting as IOMMU state can be rather
- * large, i.e. multiple gigabytes in size.
- */
-
-/**
- * __iommu_alloc_account - account for newly allocated page.
- * @page: head struct page of the page.
- * @order: order of the page
- */
-static inline void __iommu_alloc_account(struct page *page, int order)
-{
- const long pgcnt = 1l << order;
-
- mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, pgcnt);
- mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, pgcnt);
-}
-
-/**
- * __iommu_free_account - account a page that is about to be freed.
- * @page: head struct page of the page.
- * @order: order of the page
- */
-static inline void __iommu_free_account(struct page *page, int order)
-{
- const long pgcnt = 1l << order;
-
- mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, -pgcnt);
- mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, -pgcnt);
-}
+#include <linux/iommu.h>
/**
- * __iommu_alloc_pages - allocate a zeroed page of a given order.
- * @gfp: buddy allocator flags
- * @order: page order
+ * struct ioptdesc - Memory descriptor for IOMMU page tables
+ * @iopt_freelist_elm: List element for a struct iommu_pages_list
*
- * returns the head struct page of the allocated page.
+ * This struct overlays struct page for now. Do not modify without a good
+ * understanding of the issues.
*/
-static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order)
+struct ioptdesc {
+ unsigned long __page_flags;
+
+ struct list_head iopt_freelist_elm;
+ unsigned long __page_mapping;
+ pgoff_t __index;
+ void *_private;
+
+ unsigned int __page_type;
+ atomic_t __page_refcount;
+#ifdef CONFIG_MEMCG
+ unsigned long memcg_data;
+#endif
+};
+
+static inline struct ioptdesc *folio_ioptdesc(struct folio *folio)
{
- struct page *page;
-
- page = alloc_pages(gfp | __GFP_ZERO, order);
- if (unlikely(!page))
- return NULL;
-
- __iommu_alloc_account(page, order);
-
- return page;
+ return (struct ioptdesc *)folio;
}
-/**
- * __iommu_free_pages - free page of a given order
- * @page: head struct page of the page
- * @order: page order
- */
-static inline void __iommu_free_pages(struct page *page, int order)
+static inline struct folio *ioptdesc_folio(struct ioptdesc *iopt)
{
- if (!page)
- return;
-
- __iommu_free_account(page, order);
- __free_pages(page, order);
+ return (struct folio *)iopt;
}
-/**
- * iommu_alloc_pages_node - allocate a zeroed page of a given order from
- * specific NUMA node.
- * @nid: memory NUMA node id
- * @gfp: buddy allocator flags
- * @order: page order
- *
- * returns the virtual address of the allocated page
- */
-static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp, int order)
+static inline struct ioptdesc *virt_to_ioptdesc(void *virt)
{
- struct page *page = alloc_pages_node(nid, gfp | __GFP_ZERO, order);
-
- if (unlikely(!page))
- return NULL;
-
- __iommu_alloc_account(page, order);
-
- return page_address(page);
+ return folio_ioptdesc(virt_to_folio(virt));
}
-/**
- * iommu_alloc_pages - allocate a zeroed page of a given order
- * @gfp: buddy allocator flags
- * @order: page order
- *
- * returns the virtual address of the allocated page
- */
-static inline void *iommu_alloc_pages(gfp_t gfp, int order)
-{
- struct page *page = __iommu_alloc_pages(gfp, order);
-
- if (unlikely(!page))
- return NULL;
-
- return page_address(page);
-}
+void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size);
+void iommu_free_pages(void *virt);
+void iommu_put_pages_list(struct iommu_pages_list *list);
/**
- * iommu_alloc_page_node - allocate a zeroed page at specific NUMA node.
- * @nid: memory NUMA node id
- * @gfp: buddy allocator flags
- *
- * returns the virtual address of the allocated page
+ * iommu_pages_list_add - add the page to a iommu_pages_list
+ * @list: List to add the page to
+ * @virt: Address returned from iommu_alloc_pages_node_sz()
*/
-static inline void *iommu_alloc_page_node(int nid, gfp_t gfp)
+static inline void iommu_pages_list_add(struct iommu_pages_list *list,
+ void *virt)
{
- return iommu_alloc_pages_node(nid, gfp, 0);
+ list_add_tail(&virt_to_ioptdesc(virt)->iopt_freelist_elm, &list->pages);
}
/**
- * iommu_alloc_page - allocate a zeroed page
- * @gfp: buddy allocator flags
+ * iommu_pages_list_splice - Put all the pages in list from into list to
+ * @from: Source list of pages
+ * @to: Destination list of pages
*
- * returns the virtual address of the allocated page
+ * from must be re-initialized after calling this function if it is to be
+ * used again.
*/
-static inline void *iommu_alloc_page(gfp_t gfp)
+static inline void iommu_pages_list_splice(struct iommu_pages_list *from,
+ struct iommu_pages_list *to)
{
- return iommu_alloc_pages(gfp, 0);
+ list_splice(&from->pages, &to->pages);
}
/**
- * iommu_free_pages - free page of a given order
- * @virt: virtual address of the page to be freed.
- * @order: page order
+ * iommu_pages_list_empty - True if the list is empty
+ * @list: List to check
*/
-static inline void iommu_free_pages(void *virt, int order)
+static inline bool iommu_pages_list_empty(struct iommu_pages_list *list)
{
- if (!virt)
- return;
-
- __iommu_free_pages(virt_to_page(virt), order);
+ return list_empty(&list->pages);
}
/**
- * iommu_free_page - free page
- * @virt: virtual address of the page to be freed.
- */
-static inline void iommu_free_page(void *virt)
-{
- iommu_free_pages(virt, 0);
-}
-
-/**
- * iommu_put_pages_list - free a list of pages.
- * @page: the head of the lru list to be freed.
+ * iommu_alloc_pages_sz - Allocate a zeroed page of a given size from
+ * specific NUMA node
+ * @nid: memory NUMA node id
+ * @gfp: buddy allocator flags
+ * @size: Memory size to allocate, this is rounded up to a power of 2
*
- * There are no locking requirement for these pages, as they are going to be
- * put on a free list as soon as refcount reaches 0. Pages are put on this LRU
- * list once they are removed from the IOMMU page tables. However, they can
- * still be access through debugfs.
+ * Returns the virtual address of the allocated page.
*/
-static inline void iommu_put_pages_list(struct list_head *page)
+static inline void *iommu_alloc_pages_sz(gfp_t gfp, size_t size)
{
- while (!list_empty(page)) {
- struct page *p = list_entry(page->prev, struct page, lru);
-
- list_del(&p->lru);
- __iommu_free_account(p, 0);
- put_page(p);
- }
+ return iommu_alloc_pages_node_sz(NUMA_NO_NODE, gfp, size);
}
#endif /* __IOMMU_PAGES_H */
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index ab18bc494eef..1a51cfd82808 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -63,9 +63,6 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
* reference is taken. Caller must call iommu_sva_unbind_device()
* to release each reference.
*
- * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
- * initialize the required SVA features.
- *
* On error, returns an ERR_PTR value.
*/
struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
@@ -299,15 +296,12 @@ static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
const struct iommu_ops *ops = dev_iommu_ops(dev);
struct iommu_domain *domain;
- if (ops->domain_alloc_sva) {
- domain = ops->domain_alloc_sva(dev, mm);
- if (IS_ERR(domain))
- return domain;
- } else {
- domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
- if (!domain)
- return ERR_PTR(-ENOMEM);
- }
+ if (!ops->domain_alloc_sva)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ domain = ops->domain_alloc_sva(dev, mm);
+ if (IS_ERR(domain))
+ return domain;
domain->type = IOMMU_DOMAIN_SVA;
domain->cookie_type = IOMMU_COOKIE_SVA;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 4f91a740c15f..a4b606c591da 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -277,6 +277,8 @@ int iommu_device_register(struct iommu_device *iommu,
err = bus_iommu_probe(iommu_buses[i]);
if (err)
iommu_device_unregister(iommu);
+ else
+ WRITE_ONCE(iommu->ready, true);
return err;
}
EXPORT_SYMBOL_GPL(iommu_device_register);
@@ -422,13 +424,15 @@ static int iommu_init_device(struct device *dev)
* is buried in the bus dma_configure path. Properly unpicking that is
* still a big job, so for now just invoke the whole thing. The device
* already having a driver bound means dma_configure has already run and
- * either found no IOMMU to wait for, or we're in its replay call right
- * now, so either way there's no point calling it again.
+ * found no IOMMU to wait for, so there's no point calling it again.
*/
- if (!dev->driver && dev->bus->dma_configure) {
+ if (!dev->iommu->fwspec && !dev->driver && dev->bus->dma_configure) {
mutex_unlock(&iommu_probe_device_lock);
dev->bus->dma_configure(dev);
mutex_lock(&iommu_probe_device_lock);
+ /* If another instance finished the job for us, skip it */
+ if (!dev->iommu || dev->iommu_group)
+ return -ENODEV;
}
/*
* At this point, relevant devices either now have a fwspec which will
@@ -1629,15 +1633,13 @@ static struct iommu_domain *__iommu_alloc_identity_domain(struct device *dev)
if (ops->identity_domain)
return ops->identity_domain;
- /* Older drivers create the identity domain via ops->domain_alloc() */
- if (!ops->domain_alloc)
+ if (ops->domain_alloc_identity) {
+ domain = ops->domain_alloc_identity(dev);
+ if (IS_ERR(domain))
+ return domain;
+ } else {
return ERR_PTR(-EOPNOTSUPP);
-
- domain = ops->domain_alloc(IOMMU_DOMAIN_IDENTITY);
- if (IS_ERR(domain))
- return domain;
- if (!domain)
- return ERR_PTR(-ENOMEM);
+ }
iommu_domain_init(domain, IOMMU_DOMAIN_IDENTITY, ops);
return domain;
@@ -2025,8 +2027,10 @@ __iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type,
domain = ops->domain_alloc_paging(dev);
else if (ops->domain_alloc_paging_flags)
domain = ops->domain_alloc_paging_flags(dev, flags, NULL);
+#if IS_ENABLED(CONFIG_FSL_PAMU)
else if (ops->domain_alloc && !flags)
domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED);
+#endif
else
return ERR_PTR(-EOPNOTSUPP);
@@ -2204,6 +2208,19 @@ static void *iommu_make_pasid_array_entry(struct iommu_domain *domain,
return xa_tag_pointer(domain, IOMMU_PASID_ARRAY_DOMAIN);
}
+static bool domain_iommu_ops_compatible(const struct iommu_ops *ops,
+ struct iommu_domain *domain)
+{
+ if (domain->owner == ops)
+ return true;
+
+ /* For static domains, owner isn't set. */
+ if (domain == ops->blocked_domain || domain == ops->identity_domain)
+ return true;
+
+ return false;
+}
+
static int __iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group)
{
@@ -2214,7 +2231,8 @@ static int __iommu_attach_group(struct iommu_domain *domain,
return -EBUSY;
dev = iommu_group_first_dev(group);
- if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner)
+ if (!dev_has_iommu(dev) ||
+ !domain_iommu_ops_compatible(dev_iommu_ops(dev), domain))
return -EINVAL;
return __iommu_group_set_domain(group, domain);
@@ -2395,6 +2413,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
unsigned int pgsize_idx, pgsize_idx_next;
unsigned long pgsizes;
size_t offset, pgsize, pgsize_next;
+ size_t offset_end;
unsigned long addr_merge = paddr | iova;
/* Page sizes supported by the hardware and small enough for @size */
@@ -2435,7 +2454,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
* If size is big enough to accommodate the larger page, reduce
* the number of smaller pages.
*/
- if (offset + pgsize_next <= size)
+ if (!check_add_overflow(offset, pgsize_next, &offset_end) &&
+ offset_end <= size)
size = offset;
out_set_count:
@@ -2443,8 +2463,8 @@ out_set_count:
return pgsize;
}
-static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
const struct iommu_domain_ops *ops = domain->ops;
unsigned long orig_iova = iova;
@@ -2453,12 +2473,19 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t orig_paddr = paddr;
int ret = 0;
+ might_sleep_if(gfpflags_allow_blocking(gfp));
+
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL;
if (WARN_ON(!ops->map_pages || domain->pgsize_bitmap == 0UL))
return -ENODEV;
+ /* Discourage passing strange GFP flags */
+ if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
+ __GFP_HIGHMEM)))
+ return -EINVAL;
+
/* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
@@ -2506,31 +2533,27 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}
-int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+int iommu_sync_map(struct iommu_domain *domain, unsigned long iova, size_t size)
{
const struct iommu_domain_ops *ops = domain->ops;
- int ret;
- might_sleep_if(gfpflags_allow_blocking(gfp));
-
- /* Discourage passing strange GFP flags */
- if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
- __GFP_HIGHMEM)))
- return -EINVAL;
+ if (!ops->iotlb_sync_map)
+ return 0;
+ return ops->iotlb_sync_map(domain, iova, size);
+}
- ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
- if (ret == 0 && ops->iotlb_sync_map) {
- ret = ops->iotlb_sync_map(domain, iova, size);
- if (ret)
- goto out_err;
- }
+int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+ int ret;
- return ret;
+ ret = iommu_map_nosync(domain, iova, paddr, size, prot, gfp);
+ if (ret)
+ return ret;
-out_err:
- /* undo mappings already done */
- iommu_unmap(domain, iova, size);
+ ret = iommu_sync_map(domain, iova, size);
+ if (ret)
+ iommu_unmap(domain, iova, size);
return ret;
}
@@ -2618,6 +2641,25 @@ size_t iommu_unmap(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_unmap);
+/**
+ * iommu_unmap_fast() - Remove mappings from a range of IOVA without IOTLB sync
+ * @domain: Domain to manipulate
+ * @iova: IO virtual address to start
+ * @size: Length of the range starting from @iova
+ * @iotlb_gather: range information for a pending IOTLB flush
+ *
+ * iommu_unmap_fast() will remove a translation created by iommu_map().
+ * It can't subdivide a mapping created by iommu_map(), so it should be
+ * called with IOVA ranges that match what was passed to iommu_map(). The
+ * range can aggregate contiguous iommu_map() calls so long as no individual
+ * range is split.
+ *
+ * Basically iommu_unmap_fast() is the same as iommu_unmap() but for callers
+ * which manage the IOTLB flushing externally to perform a batched sync.
+ *
+ * Returns: Number of bytes of IOVA unmapped. iova + res will be the point
+ * unmapping stopped.
+ */
size_t iommu_unmap_fast(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
@@ -2630,26 +2672,17 @@ ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot,
gfp_t gfp)
{
- const struct iommu_domain_ops *ops = domain->ops;
size_t len = 0, mapped = 0;
phys_addr_t start;
unsigned int i = 0;
int ret;
- might_sleep_if(gfpflags_allow_blocking(gfp));
-
- /* Discourage passing strange GFP flags */
- if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
- __GFP_HIGHMEM)))
- return -EINVAL;
-
while (i <= nents) {
phys_addr_t s_phys = sg_phys(sg);
if (len && s_phys != start + len) {
- ret = __iommu_map(domain, iova + mapped, start,
+ ret = iommu_map_nosync(domain, iova + mapped, start,
len, prot, gfp);
-
if (ret)
goto out_err;
@@ -2672,11 +2705,10 @@ next:
sg = sg_next(sg);
}
- if (ops->iotlb_sync_map) {
- ret = ops->iotlb_sync_map(domain, iova, mapped);
- if (ret)
- goto out_err;
- }
+ ret = iommu_sync_map(domain, iova, mapped);
+ if (ret)
+ goto out_err;
+
return mapped;
out_err:
@@ -2830,31 +2862,39 @@ bool iommu_default_passthrough(void)
}
EXPORT_SYMBOL_GPL(iommu_default_passthrough);
-const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
+static const struct iommu_device *iommu_from_fwnode(const struct fwnode_handle *fwnode)
{
- const struct iommu_ops *ops = NULL;
- struct iommu_device *iommu;
+ const struct iommu_device *iommu, *ret = NULL;
spin_lock(&iommu_device_lock);
list_for_each_entry(iommu, &iommu_device_list, list)
if (iommu->fwnode == fwnode) {
- ops = iommu->ops;
+ ret = iommu;
break;
}
spin_unlock(&iommu_device_lock);
- return ops;
+ return ret;
+}
+
+const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
+{
+ const struct iommu_device *iommu = iommu_from_fwnode(fwnode);
+
+ return iommu ? iommu->ops : NULL;
}
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode)
{
- const struct iommu_ops *ops = iommu_ops_from_fwnode(iommu_fwnode);
+ const struct iommu_device *iommu = iommu_from_fwnode(iommu_fwnode);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- if (!ops)
+ if (!iommu)
return driver_deferred_probe_check_state(dev);
+ if (!dev->iommu && !READ_ONCE(iommu->ready))
+ return -EPROBE_DEFER;
if (fwspec)
- return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL;
+ return iommu->ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL;
if (!dev_iommu_get(dev))
return -ENOMEM;
@@ -2908,38 +2948,6 @@ int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids)
}
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
-/*
- * Per device IOMMU features.
- */
-int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
-{
- if (dev_has_iommu(dev)) {
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (ops->dev_enable_feat)
- return ops->dev_enable_feat(dev, feat);
- }
-
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
-
-/*
- * The device drivers should do the necessary cleanups before calling this.
- */
-int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
-{
- if (dev_has_iommu(dev)) {
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (ops->dev_disable_feat)
- return ops->dev_disable_feat(dev, feat);
- }
-
- return -EBUSY;
-}
-EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
-
/**
* iommu_setup_default_domain - Set the default_domain for the group
* @group: Group to change
@@ -3366,10 +3374,12 @@ static int __iommu_set_group_pasid(struct iommu_domain *domain,
int ret;
for_each_group_device(group, device) {
- ret = domain->ops->set_dev_pasid(domain, device->dev,
- pasid, old);
- if (ret)
- goto err_revert;
+ if (device->dev->iommu->max_pasids > 0) {
+ ret = domain->ops->set_dev_pasid(domain, device->dev,
+ pasid, old);
+ if (ret)
+ goto err_revert;
+ }
}
return 0;
@@ -3379,15 +3389,18 @@ err_revert:
for_each_group_device(group, device) {
if (device == last_gdev)
break;
- /*
- * If no old domain, undo the succeeded devices/pasid.
- * Otherwise, rollback the succeeded devices/pasid to the old
- * domain. And it is a driver bug to fail attaching with a
- * previously good domain.
- */
- if (!old || WARN_ON(old->ops->set_dev_pasid(old, device->dev,
+ if (device->dev->iommu->max_pasids > 0) {
+ /*
+ * If no old domain, undo the succeeded devices/pasid.
+ * Otherwise, rollback the succeeded devices/pasid to
+ * the old domain. And it is a driver bug to fail
+ * attaching with a previously good domain.
+ */
+ if (!old ||
+ WARN_ON(old->ops->set_dev_pasid(old, device->dev,
pasid, domain)))
- iommu_remove_dev_pasid(device->dev, pasid, domain);
+ iommu_remove_dev_pasid(device->dev, pasid, domain);
+ }
}
return ret;
}
@@ -3398,8 +3411,10 @@ static void __iommu_remove_group_pasid(struct iommu_group *group,
{
struct group_device *device;
- for_each_group_device(group, device)
- iommu_remove_dev_pasid(device->dev, pasid, domain);
+ for_each_group_device(group, device) {
+ if (device->dev->iommu->max_pasids > 0)
+ iommu_remove_dev_pasid(device->dev, pasid, domain);
+ }
}
/*
@@ -3435,12 +3450,19 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
!ops->blocked_domain->ops->set_dev_pasid)
return -EOPNOTSUPP;
- if (ops != domain->owner || pasid == IOMMU_NO_PASID)
+ if (!domain_iommu_ops_compatible(ops, domain) ||
+ pasid == IOMMU_NO_PASID)
return -EINVAL;
mutex_lock(&group->mutex);
for_each_group_device(group, device) {
- if (pasid >= device->dev->iommu->max_pasids) {
+ /*
+ * Skip PASID validation for devices without PASID support
+ * (max_pasids = 0). These devices cannot issue transactions
+ * with PASID, so they don't affect group's PASID usage.
+ */
+ if ((device->dev->iommu->max_pasids > 0) &&
+ (pasid >= device->dev->iommu->max_pasids)) {
ret = -EINVAL;
goto out_unlock;
}
@@ -3511,7 +3533,7 @@ int iommu_replace_device_pasid(struct iommu_domain *domain,
if (!domain->ops->set_dev_pasid)
return -EOPNOTSUPP;
- if (dev_iommu_ops(dev) != domain->owner ||
+ if (!domain_iommu_ops_compatible(dev_iommu_ops(dev), domain) ||
pasid == IOMMU_NO_PASID || !handle)
return -EINVAL;
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 2111bad72c72..86244403b532 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -221,7 +221,6 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
refcount_inc(&idev->obj.users);
/* igroup refcount moves into iommufd_device */
idev->igroup = igroup;
- mutex_init(&idev->iopf_lock);
/*
* If the caller fails after this success it must call
@@ -425,6 +424,25 @@ static int iommufd_hwpt_pasid_compat(struct iommufd_hw_pagetable *hwpt,
return 0;
}
+static bool iommufd_hwpt_compatible_device(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev)
+{
+ struct pci_dev *pdev;
+
+ if (!hwpt->fault || !dev_is_pci(idev->dev))
+ return true;
+
+ /*
+ * Once we turn on PCI/PRI support for VF, the response failure code
+ * should not be forwarded to the hardware due to PRI being a shared
+ * resource between PF and VFs. There is no coordination for this
+ * shared capability. This waits for a vPRI reset to recover.
+ */
+ pdev = to_pci_dev(idev->dev);
+
+ return (!pdev->is_virtfn || !pci_pri_supported(pdev));
+}
+
static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev,
ioasid_t pasid)
@@ -432,6 +450,9 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle;
int rc;
+ if (!iommufd_hwpt_compatible_device(hwpt, idev))
+ return -EINVAL;
+
rc = iommufd_hwpt_pasid_compat(hwpt, idev, pasid);
if (rc)
return rc;
@@ -440,12 +461,6 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
if (!handle)
return -ENOMEM;
- if (hwpt->fault) {
- rc = iommufd_fault_iopf_enable(idev);
- if (rc)
- goto out_free_handle;
- }
-
handle->idev = idev;
if (pasid == IOMMU_NO_PASID)
rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
@@ -454,13 +469,10 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
rc = iommu_attach_device_pasid(hwpt->domain, idev->dev, pasid,
&handle->handle);
if (rc)
- goto out_disable_iopf;
+ goto out_free_handle;
return 0;
-out_disable_iopf:
- if (hwpt->fault)
- iommufd_fault_iopf_disable(idev);
out_free_handle:
kfree(handle);
return rc;
@@ -492,10 +504,7 @@ static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
else
iommu_detach_device_pasid(hwpt->domain, idev->dev, pasid);
- if (hwpt->fault) {
- iommufd_auto_response_faults(hwpt, handle);
- iommufd_fault_iopf_disable(idev);
- }
+ iommufd_auto_response_faults(hwpt, handle);
kfree(handle);
}
@@ -507,6 +516,9 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
struct iommufd_attach_handle *handle, *old_handle;
int rc;
+ if (!iommufd_hwpt_compatible_device(hwpt, idev))
+ return -EINVAL;
+
rc = iommufd_hwpt_pasid_compat(hwpt, idev, pasid);
if (rc)
return rc;
@@ -517,12 +529,6 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
if (!handle)
return -ENOMEM;
- if (hwpt->fault && !old->fault) {
- rc = iommufd_fault_iopf_enable(idev);
- if (rc)
- goto out_free_handle;
- }
-
handle->idev = idev;
if (pasid == IOMMU_NO_PASID)
rc = iommu_replace_group_handle(idev->igroup->group,
@@ -531,20 +537,13 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
rc = iommu_replace_device_pasid(hwpt->domain, idev->dev,
pasid, &handle->handle);
if (rc)
- goto out_disable_iopf;
+ goto out_free_handle;
- if (old->fault) {
- iommufd_auto_response_faults(hwpt, old_handle);
- if (!hwpt->fault)
- iommufd_fault_iopf_disable(idev);
- }
+ iommufd_auto_response_faults(hwpt, old_handle);
kfree(old_handle);
return 0;
-out_disable_iopf:
- if (hwpt->fault && !old->fault)
- iommufd_fault_iopf_disable(idev);
out_free_handle:
kfree(handle);
return rc;
diff --git a/drivers/iommu/iommufd/eventq.c b/drivers/iommu/iommufd/eventq.c
index f39cf0797347..e373b9eec7f5 100644
--- a/drivers/iommu/iommufd/eventq.c
+++ b/drivers/iommu/iommufd/eventq.c
@@ -9,8 +9,6 @@
#include <linux/iommufd.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/pci.h>
-#include <linux/pci-ats.h>
#include <linux/poll.h>
#include <uapi/linux/iommufd.h>
@@ -18,50 +16,6 @@
#include "iommufd_private.h"
/* IOMMUFD_OBJ_FAULT Functions */
-
-int iommufd_fault_iopf_enable(struct iommufd_device *idev)
-{
- struct device *dev = idev->dev;
- int ret;
-
- /*
- * Once we turn on PCI/PRI support for VF, the response failure code
- * should not be forwarded to the hardware due to PRI being a shared
- * resource between PF and VFs. There is no coordination for this
- * shared capability. This waits for a vPRI reset to recover.
- */
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- if (pdev->is_virtfn && pci_pri_supported(pdev))
- return -EINVAL;
- }
-
- mutex_lock(&idev->iopf_lock);
- /* Device iopf has already been on. */
- if (++idev->iopf_enabled > 1) {
- mutex_unlock(&idev->iopf_lock);
- return 0;
- }
-
- ret = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_IOPF);
- if (ret)
- --idev->iopf_enabled;
- mutex_unlock(&idev->iopf_lock);
-
- return ret;
-}
-
-void iommufd_fault_iopf_disable(struct iommufd_device *idev)
-{
- mutex_lock(&idev->iopf_lock);
- if (!WARN_ON(idev->iopf_enabled == 0)) {
- if (--idev->iopf_enabled == 0)
- iommu_dev_disable_feature(idev->dev, IOMMU_DEV_FEAT_IOPF);
- }
- mutex_unlock(&idev->iopf_lock);
-}
-
void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle)
{
@@ -70,7 +24,7 @@ void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
struct list_head free_list;
unsigned long index;
- if (!fault)
+ if (!fault || !handle)
return;
INIT_LIST_HEAD(&free_list);
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 80e8c76d25f2..9ccc83341f32 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -425,9 +425,6 @@ struct iommufd_device {
/* always the physical device */
struct device *dev;
bool enforce_cache_coherency;
- /* protect iopf_enabled counter */
- struct mutex iopf_lock;
- unsigned int iopf_enabled;
};
static inline struct iommufd_device *
@@ -506,9 +503,6 @@ iommufd_get_fault(struct iommufd_ucmd *ucmd, u32 id)
int iommufd_fault_alloc(struct iommufd_ucmd *ucmd);
void iommufd_fault_destroy(struct iommufd_object *obj);
int iommufd_fault_iopf_handler(struct iopf_group *group);
-
-int iommufd_fault_iopf_enable(struct iommufd_device *idev);
-void iommufd_fault_iopf_disable(struct iommufd_device *idev);
void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle);
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index 18d9a216eb30..6bd0abf9a641 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -58,6 +58,9 @@ enum {
MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
};
+static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain);
+static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain);
+
/*
* Syzkaller has trouble randomizing the correct iova to use since it is linked
* to the map ioctl's output, and it has no ide about that. So, simplify things.
@@ -168,6 +171,8 @@ struct mock_dev {
int id;
u32 cache[MOCK_DEV_CACHE_NUM];
atomic_t pasid_1024_fake_error;
+ unsigned int iopf_refcount;
+ struct iommu_domain *domain;
};
static inline struct mock_dev *to_mock_dev(struct device *dev)
@@ -221,6 +226,13 @@ static int mock_domain_nop_attach(struct iommu_domain *domain,
up_write(&mdev->viommu_rwsem);
}
+ rc = mock_dev_enable_iopf(dev, domain);
+ if (rc)
+ return rc;
+
+ mock_dev_disable_iopf(dev, mdev->domain);
+ mdev->domain = domain;
+
return 0;
}
@@ -229,6 +241,7 @@ static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain,
struct iommu_domain *old)
{
struct mock_dev *mdev = to_mock_dev(dev);
+ int rc;
/*
* Per the first attach with pasid 1024, set the
@@ -256,6 +269,12 @@ static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain,
}
}
+ rc = mock_dev_enable_iopf(dev, domain);
+ if (rc)
+ return rc;
+
+ mock_dev_disable_iopf(dev, old);
+
return 0;
}
@@ -610,22 +629,42 @@ static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt
{
}
-static int mock_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
+static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain)
{
- if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
+ struct mock_dev *mdev = to_mock_dev(dev);
+ int ret;
+
+ if (!domain || !domain->iopf_handler)
+ return 0;
+
+ if (!mock_iommu_iopf_queue)
return -ENODEV;
- return iopf_queue_add_device(mock_iommu_iopf_queue, dev);
+ if (mdev->iopf_refcount) {
+ mdev->iopf_refcount++;
+ return 0;
+ }
+
+ ret = iopf_queue_add_device(mock_iommu_iopf_queue, dev);
+ if (ret)
+ return ret;
+
+ mdev->iopf_refcount = 1;
+
+ return 0;
}
-static int mock_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
+static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain)
{
- if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
- return -ENODEV;
+ struct mock_dev *mdev = to_mock_dev(dev);
- iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
+ if (!domain || !domain->iopf_handler)
+ return;
- return 0;
+ if (--mdev->iopf_refcount)
+ return;
+
+ iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
}
static void mock_viommu_destroy(struct iommufd_viommu *viommu)
@@ -770,8 +809,6 @@ static const struct iommu_ops mock_ops = {
.device_group = generic_device_group,
.probe_device = mock_probe_device,
.page_response = mock_domain_page_response,
- .dev_enable_feat = mock_dev_enable_feat,
- .dev_disable_feat = mock_dev_disable_feat,
.user_pasid_table = true,
.viommu_alloc = mock_viommu_alloc,
.default_domain_ops =
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index e424b279a8cd..90341b24a811 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1090,7 +1090,8 @@ static int ipmmu_probe(struct platform_device *pdev)
if (mmu->features->has_cache_leaf_nodes && ipmmu_is_root(mmu))
return 0;
- ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, dev_name(&pdev->dev));
+ ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, "%s",
+ dev_name(&pdev->dev));
if (ret)
return ret;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index df98d0c65f54..cb95fecf6016 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -1550,6 +1550,31 @@ static const struct mtk_iommu_plat_data mt6795_data = {
.larbid_remap = {{0}, {1}, {2}, {3}, {4}}, /* Linear mapping. */
};
+static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
+ [0] = {~0, ~0}, /* Region0: larb0/1 */
+ [1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */
+ [2] = {0, 0, ~0, 0, 0, 0, 0, 0, /* Region2: larb2/9/11/13/14/16/17/18/19/20 */
+ 0, ~0, 0, ~0, 0, ~(u32)(BIT(9) | BIT(10)), ~(u32)(BIT(4) | BIT(5)), 0,
+ ~0, ~0, ~0, ~0, ~0},
+ [3] = {0},
+ [4] = {[13] = BIT(9) | BIT(10)}, /* larb13 port9/10 */
+ [5] = {[14] = BIT(4) | BIT(5)}, /* larb14 port4/5 */
+};
+
+static const struct mtk_iommu_plat_data mt6893_data = {
+ .m4u_plat = M4U_MT8192,
+ .flags = HAS_BCLK | OUT_ORDER_WR_EN | HAS_SUB_COMM_2BITS |
+ WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = mt8192_multi_dom,
+ .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+ .iova_region_larb_msk = mt8192_larb_region_msk,
+ .larbid_remap = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20},
+ {0, 14, 16}, {0, 13, 18, 17}},
+};
+
static const struct mtk_iommu_plat_data mt8167_data = {
.m4u_plat = M4U_MT8167,
.flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM,
@@ -1673,17 +1698,6 @@ static const struct mtk_iommu_plat_data mt8188_data_vpp = {
27, 28 /* ccu0 */, MTK_INVALID_LARBID}, {4, 6}},
};
-static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
- [0] = {~0, ~0}, /* Region0: larb0/1 */
- [1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */
- [2] = {0, 0, ~0, 0, 0, 0, 0, 0, /* Region2: larb2/9/11/13/14/16/17/18/19/20 */
- 0, ~0, 0, ~0, 0, ~(u32)(BIT(9) | BIT(10)), ~(u32)(BIT(4) | BIT(5)), 0,
- ~0, ~0, ~0, ~0, ~0},
- [3] = {0},
- [4] = {[13] = BIT(9) | BIT(10)}, /* larb13 port9/10 */
- [5] = {[14] = BIT(4) | BIT(5)}, /* larb14 port4/5 */
-};
-
static const struct mtk_iommu_plat_data mt8192_data = {
.m4u_plat = M4U_MT8192,
.flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN |
@@ -1777,6 +1791,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
{ .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
{ .compatible = "mediatek,mt6795-m4u", .data = &mt6795_data},
+ { .compatible = "mediatek,mt6893-iommu-mm", .data = &mt6893_data},
{ .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
diff --git a/drivers/iommu/riscv/Makefile b/drivers/iommu/riscv/Makefile
index f54c9ed17d41..b5929f9f23e6 100644
--- a/drivers/iommu/riscv/Makefile
+++ b/drivers/iommu/riscv/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_RISCV_IOMMU) += iommu.o iommu-platform.o
+obj-y += iommu.o iommu-platform.o
obj-$(CONFIG_RISCV_IOMMU_PCI) += iommu-pci.o
diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c
index 8f049d4a0e2c..bb57092ca901 100644
--- a/drivers/iommu/riscv/iommu.c
+++ b/drivers/iommu/riscv/iommu.c
@@ -48,14 +48,13 @@ static DEFINE_IDA(riscv_iommu_pscids);
/* Device resource-managed allocations */
struct riscv_iommu_devres {
void *addr;
- int order;
};
static void riscv_iommu_devres_pages_release(struct device *dev, void *res)
{
struct riscv_iommu_devres *devres = res;
- iommu_free_pages(devres->addr, devres->order);
+ iommu_free_pages(devres->addr);
}
static int riscv_iommu_devres_pages_match(struct device *dev, void *res, void *p)
@@ -66,13 +65,14 @@ static int riscv_iommu_devres_pages_match(struct device *dev, void *res, void *p
return devres->addr == target->addr;
}
-static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order)
+static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu,
+ unsigned int size)
{
struct riscv_iommu_devres *devres;
void *addr;
- addr = iommu_alloc_pages_node(dev_to_node(iommu->dev),
- GFP_KERNEL_ACCOUNT, order);
+ addr = iommu_alloc_pages_node_sz(dev_to_node(iommu->dev),
+ GFP_KERNEL_ACCOUNT, size);
if (unlikely(!addr))
return NULL;
@@ -80,12 +80,11 @@ static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order)
sizeof(struct riscv_iommu_devres), GFP_KERNEL);
if (unlikely(!devres)) {
- iommu_free_pages(addr, order);
+ iommu_free_pages(addr);
return NULL;
}
devres->addr = addr;
- devres->order = order;
devres_add(iommu->dev, devres);
@@ -163,9 +162,9 @@ static int riscv_iommu_queue_alloc(struct riscv_iommu_device *iommu,
} else {
do {
const size_t queue_size = entry_size << (logsz + 1);
- const int order = get_order(queue_size);
- queue->base = riscv_iommu_get_pages(iommu, order);
+ queue->base = riscv_iommu_get_pages(
+ iommu, max(queue_size, SZ_4K));
queue->phys = __pa(queue->base);
} while (!queue->base && logsz-- > 0);
}
@@ -620,7 +619,7 @@ static struct riscv_iommu_dc *riscv_iommu_get_dc(struct riscv_iommu_device *iomm
break;
}
- ptr = riscv_iommu_get_pages(iommu, 0);
+ ptr = riscv_iommu_get_pages(iommu, SZ_4K);
if (!ptr)
return NULL;
@@ -700,7 +699,7 @@ static int riscv_iommu_iodir_alloc(struct riscv_iommu_device *iommu)
}
if (!iommu->ddt_root) {
- iommu->ddt_root = riscv_iommu_get_pages(iommu, 0);
+ iommu->ddt_root = riscv_iommu_get_pages(iommu, SZ_4K);
iommu->ddt_phys = __pa(iommu->ddt_root);
}
@@ -1087,7 +1086,8 @@ static void riscv_iommu_iotlb_sync(struct iommu_domain *iommu_domain,
#define _io_pte_entry(pn, prot) ((_PAGE_PFN_MASK & ((pn) << _PAGE_PFN_SHIFT)) | (prot))
static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain,
- unsigned long pte, struct list_head *freelist)
+ unsigned long pte,
+ struct iommu_pages_list *freelist)
{
unsigned long *ptr;
int i;
@@ -1105,9 +1105,9 @@ static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain,
}
if (freelist)
- list_add_tail(&virt_to_page(ptr)->lru, freelist);
+ iommu_pages_list_add(freelist, ptr);
else
- iommu_free_page(ptr);
+ iommu_free_pages(ptr);
}
static unsigned long *riscv_iommu_pte_alloc(struct riscv_iommu_domain *domain,
@@ -1144,13 +1144,14 @@ pte_retry:
* page table. This might race with other mappings, retry.
*/
if (_io_pte_none(pte)) {
- addr = iommu_alloc_page_node(domain->numa_node, gfp);
+ addr = iommu_alloc_pages_node_sz(domain->numa_node, gfp,
+ SZ_4K);
if (!addr)
return NULL;
old = pte;
pte = _io_pte_entry(virt_to_pfn(addr), _PAGE_TABLE);
if (cmpxchg_relaxed(ptr, old, pte) != old) {
- iommu_free_page(addr);
+ iommu_free_pages(addr);
goto pte_retry;
}
}
@@ -1194,7 +1195,7 @@ static int riscv_iommu_map_pages(struct iommu_domain *iommu_domain,
unsigned long *ptr;
unsigned long pte, old, pte_prot;
int rc = 0;
- LIST_HEAD(freelist);
+ struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
if (!(prot & IOMMU_WRITE))
pte_prot = _PAGE_BASE | _PAGE_READ;
@@ -1225,7 +1226,7 @@ static int riscv_iommu_map_pages(struct iommu_domain *iommu_domain,
*mapped = size;
- if (!list_empty(&freelist)) {
+ if (!iommu_pages_list_empty(&freelist)) {
/*
* In 1.0 spec version, the smallest scope we can use to
* invalidate all levels of page table (i.e. leaf and non-leaf)
@@ -1385,8 +1386,8 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev)
domain->numa_node = dev_to_node(iommu->dev);
domain->amo_enabled = !!(iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD);
domain->pgd_mode = pgd_mode;
- domain->pgd_root = iommu_alloc_page_node(domain->numa_node,
- GFP_KERNEL_ACCOUNT);
+ domain->pgd_root = iommu_alloc_pages_node_sz(domain->numa_node,
+ GFP_KERNEL_ACCOUNT, SZ_4K);
if (!domain->pgd_root) {
kfree(domain);
return ERR_PTR(-ENOMEM);
@@ -1395,7 +1396,7 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev)
domain->pscid = ida_alloc_range(&riscv_iommu_pscids, 1,
RISCV_IOMMU_MAX_PSCID, GFP_KERNEL);
if (domain->pscid < 0) {
- iommu_free_page(domain->pgd_root);
+ iommu_free_pages(domain->pgd_root);
kfree(domain);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index af4cc91b2bbf..22f74ba33a0e 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -730,14 +730,15 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
if (rk_dte_is_pt_valid(dte))
goto done;
- page_table = iommu_alloc_page(GFP_ATOMIC | rk_ops->gfp_flags);
+ page_table = iommu_alloc_pages_sz(GFP_ATOMIC | rk_ops->gfp_flags,
+ SPAGE_SIZE);
if (!page_table)
return ERR_PTR(-ENOMEM);
pt_dma = dma_map_single(rk_domain->dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(rk_domain->dma_dev, pt_dma)) {
dev_err(rk_domain->dma_dev, "DMA mapping error while allocating page table\n");
- iommu_free_page(page_table);
+ iommu_free_pages(page_table);
return ERR_PTR(-ENOMEM);
}
@@ -1062,7 +1063,8 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
* Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
* Allocate one 4 KiB page for each table.
*/
- rk_domain->dt = iommu_alloc_page(GFP_KERNEL | rk_ops->gfp_flags);
+ rk_domain->dt = iommu_alloc_pages_sz(GFP_KERNEL | rk_ops->gfp_flags,
+ SPAGE_SIZE);
if (!rk_domain->dt)
goto err_free_domain;
@@ -1086,7 +1088,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
return &rk_domain->domain;
err_free_dt:
- iommu_free_page(rk_domain->dt);
+ iommu_free_pages(rk_domain->dt);
err_free_domain:
kfree(rk_domain);
@@ -1107,13 +1109,13 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
u32 *page_table = phys_to_virt(pt_phys);
dma_unmap_single(rk_domain->dma_dev, pt_phys,
SPAGE_SIZE, DMA_TO_DEVICE);
- iommu_free_page(page_table);
+ iommu_free_pages(page_table);
}
}
dma_unmap_single(rk_domain->dma_dev, rk_domain->dt_dma,
SPAGE_SIZE, DMA_TO_DEVICE);
- iommu_free_page(rk_domain->dt);
+ iommu_free_pages(rk_domain->dt);
kfree(rk_domain);
}
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index e1c76e0f9c2b..433b59f43530 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -31,10 +31,21 @@ struct s390_domain {
unsigned long *dma_table;
spinlock_t list_lock;
struct rcu_head rcu;
+ u8 origin_type;
};
static struct iommu_domain blocking_domain;
+static inline unsigned int calc_rfx(dma_addr_t ptr)
+{
+ return ((unsigned long)ptr >> ZPCI_RF_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static inline unsigned int calc_rsx(dma_addr_t ptr)
+{
+ return ((unsigned long)ptr >> ZPCI_RS_SHIFT) & ZPCI_INDEX_MASK;
+}
+
static inline unsigned int calc_rtx(dma_addr_t ptr)
{
return ((unsigned long)ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
@@ -56,6 +67,20 @@ static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa)
*entry |= (pfaa & ZPCI_PTE_ADDR_MASK);
}
+static inline void set_rf_rso(unsigned long *entry, phys_addr_t rso)
+{
+ *entry &= ZPCI_RTE_FLAG_MASK;
+ *entry |= (rso & ZPCI_RTE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_RFX;
+}
+
+static inline void set_rs_rto(unsigned long *entry, phys_addr_t rto)
+{
+ *entry &= ZPCI_RTE_FLAG_MASK;
+ *entry |= (rto & ZPCI_RTE_ADDR_MASK);
+ *entry |= ZPCI_TABLE_TYPE_RSX;
+}
+
static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto)
{
*entry &= ZPCI_RTE_FLAG_MASK;
@@ -70,6 +95,22 @@ static inline void set_st_pto(unsigned long *entry, phys_addr_t pto)
*entry |= ZPCI_TABLE_TYPE_SX;
}
+static inline void validate_rf_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry &= ~ZPCI_TABLE_OFFSET_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+ *entry |= ZPCI_TABLE_LEN_RFX;
+}
+
+static inline void validate_rs_entry(unsigned long *entry)
+{
+ *entry &= ~ZPCI_TABLE_VALID_MASK;
+ *entry &= ~ZPCI_TABLE_OFFSET_MASK;
+ *entry |= ZPCI_TABLE_VALID;
+ *entry |= ZPCI_TABLE_LEN_RSX;
+}
+
static inline void validate_rt_entry(unsigned long *entry)
{
*entry &= ~ZPCI_TABLE_VALID_MASK;
@@ -120,6 +161,22 @@ static inline int pt_entry_isvalid(unsigned long entry)
return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
}
+static inline unsigned long *get_rf_rso(unsigned long entry)
+{
+ if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RFX)
+ return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
+ else
+ return NULL;
+}
+
+static inline unsigned long *get_rs_rto(unsigned long entry)
+{
+ if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RSX)
+ return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
+ else
+ return NULL;
+}
+
static inline unsigned long *get_rt_sto(unsigned long entry)
{
if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
@@ -191,18 +248,59 @@ static void dma_free_seg_table(unsigned long entry)
dma_free_cpu_table(sto);
}
-static void dma_cleanup_tables(unsigned long *table)
+static void dma_free_rt_table(unsigned long entry)
{
+ unsigned long *rto = get_rs_rto(entry);
int rtx;
- if (!table)
+ for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
+ if (reg_entry_isvalid(rto[rtx]))
+ dma_free_seg_table(rto[rtx]);
+
+ dma_free_cpu_table(rto);
+}
+
+static void dma_free_rs_table(unsigned long entry)
+{
+ unsigned long *rso = get_rf_rso(entry);
+ int rsx;
+
+ for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++)
+ if (reg_entry_isvalid(rso[rsx]))
+ dma_free_rt_table(rso[rsx]);
+
+ dma_free_cpu_table(rso);
+}
+
+static void dma_cleanup_tables(struct s390_domain *domain)
+{
+ int rtx, rsx, rfx;
+
+ if (!domain->dma_table)
return;
- for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
- if (reg_entry_isvalid(table[rtx]))
- dma_free_seg_table(table[rtx]);
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RFX:
+ for (rfx = 0; rfx < ZPCI_TABLE_ENTRIES; rfx++)
+ if (reg_entry_isvalid(domain->dma_table[rfx]))
+ dma_free_rs_table(domain->dma_table[rfx]);
+ break;
+ case ZPCI_TABLE_TYPE_RSX:
+ for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++)
+ if (reg_entry_isvalid(domain->dma_table[rsx]))
+ dma_free_rt_table(domain->dma_table[rsx]);
+ break;
+ case ZPCI_TABLE_TYPE_RTX:
+ for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
+ if (reg_entry_isvalid(domain->dma_table[rtx]))
+ dma_free_seg_table(domain->dma_table[rtx]);
+ break;
+ default:
+ WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type);
+ return;
+ }
- dma_free_cpu_table(table);
+ dma_free_cpu_table(domain->dma_table);
}
static unsigned long *dma_alloc_page_table(gfp_t gfp)
@@ -218,6 +316,70 @@ static unsigned long *dma_alloc_page_table(gfp_t gfp)
return table;
}
+static unsigned long *dma_walk_rs_table(unsigned long *rso,
+ dma_addr_t dma_addr, gfp_t gfp)
+{
+ unsigned int rsx = calc_rsx(dma_addr);
+ unsigned long old_rse, rse;
+ unsigned long *rsep, *rto;
+
+ rsep = &rso[rsx];
+ rse = READ_ONCE(*rsep);
+ if (reg_entry_isvalid(rse)) {
+ rto = get_rs_rto(rse);
+ } else {
+ rto = dma_alloc_cpu_table(gfp);
+ if (!rto)
+ return NULL;
+
+ set_rs_rto(&rse, virt_to_phys(rto));
+ validate_rs_entry(&rse);
+ entry_clr_protected(&rse);
+
+ old_rse = cmpxchg(rsep, ZPCI_TABLE_INVALID, rse);
+ if (old_rse != ZPCI_TABLE_INVALID) {
+ /* Somone else was faster, use theirs */
+ dma_free_cpu_table(rto);
+ rto = get_rs_rto(old_rse);
+ }
+ }
+ return rto;
+}
+
+static unsigned long *dma_walk_rf_table(unsigned long *rfo,
+ dma_addr_t dma_addr, gfp_t gfp)
+{
+ unsigned int rfx = calc_rfx(dma_addr);
+ unsigned long old_rfe, rfe;
+ unsigned long *rfep, *rso;
+
+ rfep = &rfo[rfx];
+ rfe = READ_ONCE(*rfep);
+ if (reg_entry_isvalid(rfe)) {
+ rso = get_rf_rso(rfe);
+ } else {
+ rso = dma_alloc_cpu_table(gfp);
+ if (!rso)
+ return NULL;
+
+ set_rf_rso(&rfe, virt_to_phys(rso));
+ validate_rf_entry(&rfe);
+ entry_clr_protected(&rfe);
+
+ old_rfe = cmpxchg(rfep, ZPCI_TABLE_INVALID, rfe);
+ if (old_rfe != ZPCI_TABLE_INVALID) {
+ /* Somone else was faster, use theirs */
+ dma_free_cpu_table(rso);
+ rso = get_rf_rso(old_rfe);
+ }
+ }
+
+ if (!rso)
+ return NULL;
+
+ return dma_walk_rs_table(rso, dma_addr, gfp);
+}
+
static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
{
unsigned long old_rte, rte;
@@ -271,11 +433,31 @@ static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
return pto;
}
-static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr, gfp_t gfp)
+static unsigned long *dma_walk_region_tables(struct s390_domain *domain,
+ dma_addr_t dma_addr, gfp_t gfp)
+{
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RFX:
+ return dma_walk_rf_table(domain->dma_table, dma_addr, gfp);
+ case ZPCI_TABLE_TYPE_RSX:
+ return dma_walk_rs_table(domain->dma_table, dma_addr, gfp);
+ case ZPCI_TABLE_TYPE_RTX:
+ return domain->dma_table;
+ default:
+ return NULL;
+ }
+}
+
+static unsigned long *dma_walk_cpu_trans(struct s390_domain *domain,
+ dma_addr_t dma_addr, gfp_t gfp)
{
- unsigned long *sto, *pto;
+ unsigned long *rto, *sto, *pto;
unsigned int rtx, sx, px;
+ rto = dma_walk_region_tables(domain, dma_addr, gfp);
+ if (!rto)
+ return NULL;
+
rtx = calc_rtx(dma_addr);
sto = dma_get_seg_table_origin(&rto[rtx], gfp);
if (!sto)
@@ -329,9 +511,25 @@ static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
}
}
+static inline u64 max_tbl_size(struct s390_domain *domain)
+{
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RTX:
+ return ZPCI_TABLE_SIZE_RT - 1;
+ case ZPCI_TABLE_TYPE_RSX:
+ return ZPCI_TABLE_SIZE_RS - 1;
+ case ZPCI_TABLE_TYPE_RFX:
+ return U64_MAX;
+ default:
+ return 0;
+ }
+}
+
static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
{
+ struct zpci_dev *zdev = to_zpci_dev(dev);
struct s390_domain *s390_domain;
+ u64 aperture_size;
s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
if (!s390_domain)
@@ -342,9 +540,26 @@ static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
kfree(s390_domain);
return NULL;
}
+
+ aperture_size = min(s390_iommu_aperture,
+ zdev->end_dma - zdev->start_dma + 1);
+ if (aperture_size <= (ZPCI_TABLE_SIZE_RT - zdev->start_dma)) {
+ s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX;
+ } else if (aperture_size <= (ZPCI_TABLE_SIZE_RS - zdev->start_dma) &&
+ (zdev->dtsm & ZPCI_IOTA_DT_RS)) {
+ s390_domain->origin_type = ZPCI_TABLE_TYPE_RSX;
+ } else if (zdev->dtsm & ZPCI_IOTA_DT_RF) {
+ s390_domain->origin_type = ZPCI_TABLE_TYPE_RFX;
+ } else {
+ /* Assume RTX available */
+ s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX;
+ aperture_size = ZPCI_TABLE_SIZE_RT - zdev->start_dma;
+ }
+ zdev->end_dma = zdev->start_dma + aperture_size - 1;
+
s390_domain->domain.geometry.force_aperture = true;
s390_domain->domain.geometry.aperture_start = 0;
- s390_domain->domain.geometry.aperture_end = ZPCI_TABLE_SIZE_RT - 1;
+ s390_domain->domain.geometry.aperture_end = max_tbl_size(s390_domain);
spin_lock_init(&s390_domain->list_lock);
INIT_LIST_HEAD_RCU(&s390_domain->devices);
@@ -356,7 +571,7 @@ static void s390_iommu_rcu_free_domain(struct rcu_head *head)
{
struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu);
- dma_cleanup_tables(s390_domain->dma_table);
+ dma_cleanup_tables(s390_domain);
kfree(s390_domain);
}
@@ -381,6 +596,21 @@ static void zdev_s390_domain_update(struct zpci_dev *zdev,
spin_unlock_irqrestore(&zdev->dom_lock, flags);
}
+static u64 get_iota_region_flag(struct s390_domain *domain)
+{
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RTX:
+ return ZPCI_IOTA_RTTO_FLAG;
+ case ZPCI_TABLE_TYPE_RSX:
+ return ZPCI_IOTA_RSTO_FLAG;
+ case ZPCI_TABLE_TYPE_RFX:
+ return ZPCI_IOTA_RFTO_FLAG;
+ default:
+ WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type);
+ return 0;
+ }
+}
+
static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
struct iommu_domain *domain, u8 *status)
{
@@ -399,7 +629,7 @@ static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
default:
s390_domain = to_s390_domain(domain);
iota = virt_to_phys(s390_domain->dma_table) |
- ZPCI_IOTA_RTTO_FLAG;
+ get_iota_region_flag(s390_domain);
rc = zpci_register_ioat(zdev, 0, zdev->start_dma,
zdev->end_dma, iota, status);
}
@@ -482,6 +712,8 @@ static void s390_iommu_get_resv_regions(struct device *dev,
{
struct zpci_dev *zdev = to_zpci_dev(dev);
struct iommu_resv_region *region;
+ u64 max_size, end_resv;
+ unsigned long flags;
if (zdev->start_dma) {
region = iommu_alloc_resv_region(0, zdev->start_dma, 0,
@@ -491,10 +723,21 @@ static void s390_iommu_get_resv_regions(struct device *dev,
list_add_tail(&region->list, list);
}
- if (zdev->end_dma < ZPCI_TABLE_SIZE_RT - 1) {
- region = iommu_alloc_resv_region(zdev->end_dma + 1,
- ZPCI_TABLE_SIZE_RT - zdev->end_dma - 1,
- 0, IOMMU_RESV_RESERVED, GFP_KERNEL);
+ spin_lock_irqsave(&zdev->dom_lock, flags);
+ if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
+ zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY) {
+ spin_unlock_irqrestore(&zdev->dom_lock, flags);
+ return;
+ }
+
+ max_size = max_tbl_size(to_s390_domain(zdev->s390_domain));
+ spin_unlock_irqrestore(&zdev->dom_lock, flags);
+
+ if (zdev->end_dma < max_size) {
+ end_resv = max_size - zdev->end_dma;
+ region = iommu_alloc_resv_region(zdev->end_dma + 1, end_resv,
+ 0, IOMMU_RESV_RESERVED,
+ GFP_KERNEL);
if (!region)
return;
list_add_tail(&region->list, list);
@@ -510,13 +753,9 @@ static struct iommu_device *s390_iommu_probe_device(struct device *dev)
zdev = to_zpci_dev(dev);
- if (zdev->start_dma > zdev->end_dma ||
- zdev->start_dma > ZPCI_TABLE_SIZE_RT - 1)
+ if (zdev->start_dma > zdev->end_dma)
return ERR_PTR(-EINVAL);
- if (zdev->end_dma > ZPCI_TABLE_SIZE_RT - 1)
- zdev->end_dma = ZPCI_TABLE_SIZE_RT - 1;
-
if (zdev->tlb_refresh)
dev->iommu->shadow_on_flush = 1;
@@ -606,8 +845,7 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
int rc;
for (i = 0; i < nr_pages; i++) {
- entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr,
- gfp);
+ entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp);
if (unlikely(!entry)) {
rc = -ENOMEM;
goto undo_cpu_trans;
@@ -622,8 +860,7 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
undo_cpu_trans:
while (i-- > 0) {
dma_addr -= PAGE_SIZE;
- entry = dma_walk_cpu_trans(s390_domain->dma_table,
- dma_addr, gfp);
+ entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp);
if (!entry)
break;
dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
@@ -640,8 +877,7 @@ static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain,
int rc = 0;
for (i = 0; i < nr_pages; i++) {
- entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr,
- GFP_ATOMIC);
+ entry = dma_walk_cpu_trans(s390_domain, dma_addr, GFP_ATOMIC);
if (unlikely(!entry)) {
rc = -EINVAL;
break;
@@ -685,6 +921,51 @@ static int s390_iommu_map_pages(struct iommu_domain *domain,
return rc;
}
+static unsigned long *get_rso_from_iova(struct s390_domain *domain,
+ dma_addr_t iova)
+{
+ unsigned long *rfo;
+ unsigned long rfe;
+ unsigned int rfx;
+
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RFX:
+ rfo = domain->dma_table;
+ rfx = calc_rfx(iova);
+ rfe = READ_ONCE(rfo[rfx]);
+ if (!reg_entry_isvalid(rfe))
+ return NULL;
+ return get_rf_rso(rfe);
+ case ZPCI_TABLE_TYPE_RSX:
+ return domain->dma_table;
+ default:
+ return NULL;
+ }
+}
+
+static unsigned long *get_rto_from_iova(struct s390_domain *domain,
+ dma_addr_t iova)
+{
+ unsigned long *rso;
+ unsigned long rse;
+ unsigned int rsx;
+
+ switch (domain->origin_type) {
+ case ZPCI_TABLE_TYPE_RFX:
+ case ZPCI_TABLE_TYPE_RSX:
+ rso = get_rso_from_iova(domain, iova);
+ rsx = calc_rsx(iova);
+ rse = READ_ONCE(rso[rsx]);
+ if (!reg_entry_isvalid(rse))
+ return NULL;
+ return get_rs_rto(rse);
+ case ZPCI_TABLE_TYPE_RTX:
+ return domain->dma_table;
+ default:
+ return NULL;
+ }
+}
+
static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -698,10 +979,13 @@ static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
iova > domain->geometry.aperture_end)
return 0;
+ rto = get_rto_from_iova(s390_domain, iova);
+ if (!rto)
+ return 0;
+
rtx = calc_rtx(iova);
sx = calc_sx(iova);
px = calc_px(iova);
- rto = s390_domain->dma_table;
rte = READ_ONCE(rto[rtx]);
if (reg_entry_isvalid(rte)) {
@@ -756,7 +1040,6 @@ struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
int zpci_init_iommu(struct zpci_dev *zdev)
{
- u64 aperture_size;
int rc = 0;
rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL,
@@ -774,12 +1057,6 @@ int zpci_init_iommu(struct zpci_dev *zdev)
if (rc)
goto out_sysfs;
- zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
- aperture_size = min3(s390_iommu_aperture,
- ZPCI_TABLE_SIZE_RT - zdev->start_dma,
- zdev->end_dma - zdev->start_dma + 1);
- zdev->end_dma = zdev->start_dma + aperture_size - 1;
-
return 0;
out_sysfs:
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index 8d8f11854676..76c9620af4bb 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -690,8 +690,8 @@ sun50i_iommu_domain_alloc_paging(struct device *dev)
if (!sun50i_domain)
return NULL;
- sun50i_domain->dt = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
- get_order(DT_SIZE));
+ sun50i_domain->dt =
+ iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32, DT_SIZE);
if (!sun50i_domain->dt)
goto err_free_domain;
@@ -713,7 +713,7 @@ static void sun50i_iommu_domain_free(struct iommu_domain *domain)
{
struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
- iommu_free_pages(sun50i_domain->dt, get_order(DT_SIZE));
+ iommu_free_pages(sun50i_domain->dt);
sun50i_domain->dt = NULL;
kfree(sun50i_domain);
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 69d353e1df84..e58fe9d8b9e7 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -51,14 +51,17 @@ struct tegra_smmu {
struct iommu_device iommu; /* IOMMU Core code handle */
};
+struct tegra_pd;
+struct tegra_pt;
+
struct tegra_smmu_as {
struct iommu_domain domain;
struct tegra_smmu *smmu;
unsigned int use_count;
spinlock_t lock;
u32 *count;
- struct page **pts;
- struct page *pd;
+ struct tegra_pt **pts;
+ struct tegra_pd *pd;
dma_addr_t pd_dma;
unsigned id;
u32 attr;
@@ -155,6 +158,14 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
SMMU_PDE_NONSECURE)
+struct tegra_pd {
+ u32 val[SMMU_NUM_PDE];
+};
+
+struct tegra_pt {
+ u32 val[SMMU_NUM_PTE];
+};
+
static unsigned int iova_pd_index(unsigned long iova)
{
return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
@@ -284,7 +295,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
- as->pd = __iommu_alloc_pages(GFP_KERNEL | __GFP_DMA, 0);
+ as->pd = iommu_alloc_pages_sz(GFP_KERNEL | __GFP_DMA, SMMU_SIZE_PD);
if (!as->pd) {
kfree(as);
return NULL;
@@ -292,7 +303,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
if (!as->count) {
- __iommu_free_pages(as->pd, 0);
+ iommu_free_pages(as->pd);
kfree(as);
return NULL;
}
@@ -300,7 +311,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
if (!as->pts) {
kfree(as->count);
- __iommu_free_pages(as->pd, 0);
+ iommu_free_pages(as->pd);
kfree(as);
return NULL;
}
@@ -417,8 +428,8 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
goto unlock;
}
- as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
- DMA_TO_DEVICE);
+ as->pd_dma =
+ dma_map_single(smmu->dev, as->pd, SMMU_SIZE_PD, DMA_TO_DEVICE);
if (dma_mapping_error(smmu->dev, as->pd_dma)) {
err = -ENOMEM;
goto unlock;
@@ -450,7 +461,7 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
return 0;
err_unmap:
- dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+ dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
unlock:
mutex_unlock(&smmu->lock);
@@ -469,7 +480,7 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
tegra_smmu_free_asid(smmu, as->id);
- dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+ dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
as->smmu = NULL;
@@ -548,11 +559,11 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
{
unsigned int pd_index = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
- u32 *pd = page_address(as->pd);
+ u32 *pd = &as->pd->val[pd_index];
unsigned long offset = pd_index * sizeof(*pd);
/* Set the page directory entry first */
- pd[pd_index] = value;
+ *pd = value;
/* The flush the page directory entry from caches */
dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
@@ -564,11 +575,9 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
smmu_flush(smmu);
}
-static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
+static u32 *tegra_smmu_pte_offset(struct tegra_pt *pt, unsigned long iova)
{
- u32 *pt = page_address(pt_page);
-
- return pt + iova_pt_index(iova);
+ return &pt->val[iova_pt_index(iova)];
}
static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
@@ -576,21 +585,19 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
{
unsigned int pd_index = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
- struct page *pt_page;
- u32 *pd;
+ struct tegra_pt *pt;
- pt_page = as->pts[pd_index];
- if (!pt_page)
+ pt = as->pts[pd_index];
+ if (!pt)
return NULL;
- pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
+ *dmap = smmu_pde_to_dma(smmu, as->pd->val[pd_index]);
- return tegra_smmu_pte_offset(pt_page, iova);
+ return tegra_smmu_pte_offset(pt, iova);
}
static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
- dma_addr_t *dmap, struct page *page)
+ dma_addr_t *dmap, struct tegra_pt *pt)
{
unsigned int pde = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
@@ -598,30 +605,28 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
if (!as->pts[pde]) {
dma_addr_t dma;
- dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
- DMA_TO_DEVICE);
+ dma = dma_map_single(smmu->dev, pt, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
if (dma_mapping_error(smmu->dev, dma)) {
- __iommu_free_pages(page, 0);
+ iommu_free_pages(pt);
return NULL;
}
if (!smmu_dma_addr_valid(smmu, dma)) {
- dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
- DMA_TO_DEVICE);
- __iommu_free_pages(page, 0);
+ dma_unmap_single(smmu->dev, dma, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
+ iommu_free_pages(pt);
return NULL;
}
- as->pts[pde] = page;
+ as->pts[pde] = pt;
tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
SMMU_PDE_NEXT));
*dmap = dma;
} else {
- u32 *pd = page_address(as->pd);
-
- *dmap = smmu_pde_to_dma(smmu, pd[pde]);
+ *dmap = smmu_pde_to_dma(smmu, as->pd->val[pde]);
}
return tegra_smmu_pte_offset(as->pts[pde], iova);
@@ -637,7 +642,7 @@ static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
{
unsigned int pde = iova_pd_index(iova);
- struct page *page = as->pts[pde];
+ struct tegra_pt *pt = as->pts[pde];
/*
* When no entries in this page table are used anymore, return the
@@ -645,13 +650,13 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
*/
if (--as->count[pde] == 0) {
struct tegra_smmu *smmu = as->smmu;
- u32 *pd = page_address(as->pd);
- dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
+ dma_addr_t pte_dma = smmu_pde_to_dma(smmu, as->pd->val[pde]);
tegra_smmu_set_pde(as, iova, 0);
- dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
- __iommu_free_pages(page, 0);
+ dma_unmap_single(smmu->dev, pte_dma, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
+ iommu_free_pages(pt);
as->pts[pde] = NULL;
}
}
@@ -671,16 +676,16 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
smmu_flush(smmu);
}
-static struct page *as_get_pde_page(struct tegra_smmu_as *as,
- unsigned long iova, gfp_t gfp,
- unsigned long *flags)
+static struct tegra_pt *as_get_pde_page(struct tegra_smmu_as *as,
+ unsigned long iova, gfp_t gfp,
+ unsigned long *flags)
{
unsigned int pde = iova_pd_index(iova);
- struct page *page = as->pts[pde];
+ struct tegra_pt *pt = as->pts[pde];
/* at first check whether allocation needs to be done at all */
- if (page)
- return page;
+ if (pt)
+ return pt;
/*
* In order to prevent exhaustion of the atomic memory pool, we
@@ -690,7 +695,7 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as,
if (gfpflags_allow_blocking(gfp))
spin_unlock_irqrestore(&as->lock, *flags);
- page = __iommu_alloc_pages(gfp | __GFP_DMA, 0);
+ pt = iommu_alloc_pages_sz(gfp | __GFP_DMA, SMMU_SIZE_PT);
if (gfpflags_allow_blocking(gfp))
spin_lock_irqsave(&as->lock, *flags);
@@ -701,13 +706,13 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as,
* if allocation succeeded and the allocation failure isn't fatal.
*/
if (as->pts[pde]) {
- if (page)
- __iommu_free_pages(page, 0);
+ if (pt)
+ iommu_free_pages(pt);
- page = as->pts[pde];
+ pt = as->pts[pde];
}
- return page;
+ return pt;
}
static int
@@ -717,15 +722,15 @@ __tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
- struct page *page;
+ struct tegra_pt *pt;
u32 pte_attrs;
u32 *pte;
- page = as_get_pde_page(as, iova, gfp, flags);
- if (!page)
+ pt = as_get_pde_page(as, iova, gfp, flags);
+ if (!pt)
return -ENOMEM;
- pte = as_get_pte(as, iova, &pte_dma, page);
+ pte = as_get_pte(as, iova, &pte_dma, pt);
if (!pte)
return -ENOMEM;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index b85ce6310ddb..ecd41fb03e5a 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -48,6 +48,7 @@ struct viommu_dev {
u64 pgsize_bitmap;
u32 first_domain;
u32 last_domain;
+ u32 identity_domain_id;
/* Supported MAP flags */
u32 map_flags;
u32 probe_size;
@@ -62,7 +63,6 @@ struct viommu_mapping {
struct viommu_domain {
struct iommu_domain domain;
struct viommu_dev *viommu;
- struct mutex mutex; /* protects viommu pointer */
unsigned int id;
u32 map_flags;
@@ -70,7 +70,6 @@ struct viommu_domain {
struct rb_root_cached mappings;
unsigned long nr_endpoints;
- bool bypass;
};
struct viommu_endpoint {
@@ -97,6 +96,8 @@ struct viommu_event {
};
};
+static struct viommu_domain viommu_identity_domain;
+
#define to_viommu_domain(domain) \
container_of(domain, struct viommu_domain, domain)
@@ -305,6 +306,22 @@ out_unlock:
return ret;
}
+static int viommu_send_attach_req(struct viommu_dev *viommu, struct device *dev,
+ struct virtio_iommu_req_attach *req)
+{
+ int ret;
+ unsigned int i;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ for (i = 0; i < fwspec->num_ids; i++) {
+ req->endpoint = cpu_to_le32(fwspec->ids[i]);
+ ret = viommu_send_req_sync(viommu, req, sizeof(*req));
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
/*
* viommu_add_mapping - add a mapping to the internal tree
*
@@ -637,71 +654,45 @@ static void viommu_event_handler(struct virtqueue *vq)
/* IOMMU API */
-static struct iommu_domain *viommu_domain_alloc(unsigned type)
+static struct iommu_domain *viommu_domain_alloc_paging(struct device *dev)
{
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+ struct viommu_dev *viommu = vdev->viommu;
+ unsigned long viommu_page_size;
struct viommu_domain *vdomain;
-
- if (type != IOMMU_DOMAIN_UNMANAGED &&
- type != IOMMU_DOMAIN_DMA &&
- type != IOMMU_DOMAIN_IDENTITY)
- return NULL;
-
- vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
- if (!vdomain)
- return NULL;
-
- mutex_init(&vdomain->mutex);
- spin_lock_init(&vdomain->mappings_lock);
- vdomain->mappings = RB_ROOT_CACHED;
-
- return &vdomain->domain;
-}
-
-static int viommu_domain_finalise(struct viommu_endpoint *vdev,
- struct iommu_domain *domain)
-{
int ret;
- unsigned long viommu_page_size;
- struct viommu_dev *viommu = vdev->viommu;
- struct viommu_domain *vdomain = to_viommu_domain(domain);
viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
if (viommu_page_size > PAGE_SIZE) {
dev_err(vdev->dev,
"granule 0x%lx larger than system page size 0x%lx\n",
viommu_page_size, PAGE_SIZE);
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
- ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
- viommu->last_domain, GFP_KERNEL);
- if (ret < 0)
- return ret;
+ vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
+ if (!vdomain)
+ return ERR_PTR(-ENOMEM);
- vdomain->id = (unsigned int)ret;
+ spin_lock_init(&vdomain->mappings_lock);
+ vdomain->mappings = RB_ROOT_CACHED;
- domain->pgsize_bitmap = viommu->pgsize_bitmap;
- domain->geometry = viommu->geometry;
+ ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
+ viommu->last_domain, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(vdomain);
+ return ERR_PTR(ret);
+ }
- vdomain->map_flags = viommu->map_flags;
- vdomain->viommu = viommu;
+ vdomain->id = (unsigned int)ret;
- if (domain->type == IOMMU_DOMAIN_IDENTITY) {
- if (virtio_has_feature(viommu->vdev,
- VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
- vdomain->bypass = true;
- return 0;
- }
+ vdomain->domain.pgsize_bitmap = viommu->pgsize_bitmap;
+ vdomain->domain.geometry = viommu->geometry;
- ret = viommu_domain_map_identity(vdev, vdomain);
- if (ret) {
- ida_free(&viommu->domain_ids, vdomain->id);
- vdomain->viommu = NULL;
- return ret;
- }
- }
+ vdomain->map_flags = viommu->map_flags;
+ vdomain->viommu = viommu;
- return 0;
+ return &vdomain->domain;
}
static void viommu_domain_free(struct iommu_domain *domain)
@@ -717,29 +708,37 @@ static void viommu_domain_free(struct iommu_domain *domain)
kfree(vdomain);
}
+static struct iommu_domain *viommu_domain_alloc_identity(struct device *dev)
+{
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+ struct iommu_domain *domain;
+ int ret;
+
+ if (virtio_has_feature(vdev->viommu->vdev,
+ VIRTIO_IOMMU_F_BYPASS_CONFIG))
+ return &viommu_identity_domain.domain;
+
+ domain = viommu_domain_alloc_paging(dev);
+ if (IS_ERR(domain))
+ return domain;
+
+ ret = viommu_domain_map_identity(vdev, to_viommu_domain(domain));
+ if (ret) {
+ viommu_domain_free(domain);
+ return ERR_PTR(ret);
+ }
+ return domain;
+}
+
static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- int i;
int ret = 0;
struct virtio_iommu_req_attach req;
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
struct viommu_domain *vdomain = to_viommu_domain(domain);
- mutex_lock(&vdomain->mutex);
- if (!vdomain->viommu) {
- /*
- * Properly initialize the domain now that we know which viommu
- * owns it.
- */
- ret = viommu_domain_finalise(vdev, domain);
- } else if (vdomain->viommu != vdev->viommu) {
- ret = -EINVAL;
- }
- mutex_unlock(&vdomain->mutex);
-
- if (ret)
- return ret;
+ if (vdomain->viommu != vdev->viommu)
+ return -EINVAL;
/*
* In the virtio-iommu device, when attaching the endpoint to a new
@@ -761,16 +760,9 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
.domain = cpu_to_le32(vdomain->id),
};
- if (vdomain->bypass)
- req.flags |= cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS);
-
- for (i = 0; i < fwspec->num_ids; i++) {
- req.endpoint = cpu_to_le32(fwspec->ids[i]);
-
- ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
- if (ret)
- return ret;
- }
+ ret = viommu_send_attach_req(vdomain->viommu, dev, &req);
+ if (ret)
+ return ret;
if (!vdomain->nr_endpoints) {
/*
@@ -788,6 +780,40 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
return 0;
}
+static int viommu_attach_identity_domain(struct iommu_domain *domain,
+ struct device *dev)
+{
+ int ret = 0;
+ struct virtio_iommu_req_attach req;
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+ struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+ req = (struct virtio_iommu_req_attach) {
+ .head.type = VIRTIO_IOMMU_T_ATTACH,
+ .domain = cpu_to_le32(vdev->viommu->identity_domain_id),
+ .flags = cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS),
+ };
+
+ ret = viommu_send_attach_req(vdev->viommu, dev, &req);
+ if (ret)
+ return ret;
+
+ if (vdev->vdomain)
+ vdev->vdomain->nr_endpoints--;
+ vdomain->nr_endpoints++;
+ vdev->vdomain = vdomain;
+ return 0;
+}
+
+static struct viommu_domain viommu_identity_domain = {
+ .domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &(const struct iommu_domain_ops) {
+ .attach_dev = viommu_attach_identity_domain,
+ },
+ },
+};
+
static void viommu_detach_dev(struct viommu_endpoint *vdev)
{
int i;
@@ -1062,7 +1088,8 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
static struct iommu_ops viommu_ops = {
.capable = viommu_capable,
- .domain_alloc = viommu_domain_alloc,
+ .domain_alloc_identity = viommu_domain_alloc_identity,
+ .domain_alloc_paging = viommu_domain_alloc_paging,
.probe_device = viommu_probe_device,
.release_device = viommu_release_device,
.device_group = viommu_device_group,
@@ -1184,6 +1211,12 @@ static int viommu_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
+ /* Reserve an ID to use as the bypass domain */
+ if (virtio_has_feature(viommu->vdev, VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
+ viommu->identity_domain_id = viommu->first_domain;
+ viommu->first_domain++;
+ }
+
viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
virtio_device_ready(vdev);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 08bb3b031f23..0d196e447142 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -166,6 +166,11 @@ config DW_APB_ICTL
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN_HIERARCHY
+config ECONET_EN751221_INTC
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
config FARADAY_FTINTC010
bool
select IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 365bcea9a61f..23ca4959e6ce 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
obj-$(CONFIG_ARCH_ACTIONS) += irq-owl-sirq.o
obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o
obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o
+obj-$(CONFIG_ECONET_EN751221_INTC) += irq-econet-en751221.o
obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index 552aa04ff063..e7dfcf0cda43 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -180,7 +180,7 @@ static void __init combiner_init(void __iomem *combiner_base,
if (!combiner_data)
return;
- combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
+ combiner_irq_domain = irq_domain_create_linear(of_fwnode_handle(np), nr_irq,
&combiner_irq_domain_ops, combiner_data);
if (WARN_ON(!combiner_irq_domain)) {
pr_warn("%s: irq domain init failed\n", __func__);
diff --git a/drivers/irqchip/irq-al-fic.c b/drivers/irqchip/irq-al-fic.c
index dfb761e86c9c..8f300843bbca 100644
--- a/drivers/irqchip/irq-al-fic.c
+++ b/drivers/irqchip/irq-al-fic.c
@@ -65,15 +65,13 @@ static int al_fic_irq_set_type(struct irq_data *data, unsigned int flow_type)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
struct al_fic *fic = gc->private;
enum al_fic_state new_state;
- int ret = 0;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
if (((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH) &&
((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)) {
pr_debug("fic doesn't support flow type %d\n", flow_type);
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
new_state = (flow_type & IRQ_TYPE_LEVEL_HIGH) ?
@@ -91,16 +89,10 @@ static int al_fic_irq_set_type(struct irq_data *data, unsigned int flow_type)
if (fic->state == AL_FIC_UNCONFIGURED) {
al_fic_set_trigger(fic, gc, new_state);
} else if (fic->state != new_state) {
- pr_debug("fic %s state already configured to %d\n",
- fic->name, fic->state);
- ret = -EINVAL;
- goto err;
+ pr_debug("fic %s state already configured to %d\n", fic->name, fic->state);
+ return -EINVAL;
}
-
-err:
- irq_gc_unlock(gc);
-
- return ret;
+ return 0;
}
static void al_fic_irq_handler(struct irq_desc *desc)
@@ -139,7 +131,7 @@ static int al_fic_register(struct device_node *node,
struct irq_chip_generic *gc;
int ret;
- fic->domain = irq_domain_add_linear(node,
+ fic->domain = irq_domain_create_linear(of_fwnode_handle(node),
NR_FIC_IRQS,
&irq_generic_chip_ops,
fic);
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
index a1430ab60a8a..a5289dc26dca 100644
--- a/drivers/irqchip/irq-alpine-msi.c
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -205,15 +205,14 @@ static int alpine_msix_init_domains(struct alpine_msix_data *priv,
return -ENXIO;
}
- middle_domain = irq_domain_add_hierarchy(gic_domain, 0, 0, NULL,
- &alpine_msix_middle_domain_ops,
- priv);
+ middle_domain = irq_domain_create_hierarchy(gic_domain, 0, 0, NULL,
+ &alpine_msix_middle_domain_ops, priv);
if (!middle_domain) {
pr_err("Failed to create the MSIX middle domain\n");
return -ENOMEM;
}
- msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(node),
&alpine_msix_domain_info,
middle_domain);
if (!msi_domain) {
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 974dc088c853..032d66dceb8e 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -1014,7 +1014,7 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
irqc->info.die_stride = off - start_off;
- irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node),
+ irqc->hw_domain = irq_domain_create_tree(of_fwnode_handle(node),
&aic_irq_domain_ops, irqc);
if (WARN_ON(!irqc->hw_domain))
goto err_unmap;
@@ -1067,7 +1067,7 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
if (is_kernel_in_hyp_mode()) {
struct irq_fwspec mi = {
- .fwnode = of_node_to_fwnode(node),
+ .fwnode = of_fwnode_handle(node),
.param_count = 3,
.param = {
[0] = AIC_FIQ, /* This is a lie */
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 2aa6a51e05d0..67b672a78862 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -348,12 +348,12 @@ static int __init mpic_msi_init(struct mpic *mpic, struct device_node *node,
mpic->msi_doorbell_mask = PCI_MSI_FULL_DOORBELL_MASK;
}
- mpic->msi_inner_domain = irq_domain_add_linear(NULL, mpic->msi_doorbell_size,
+ mpic->msi_inner_domain = irq_domain_create_linear(NULL, mpic->msi_doorbell_size,
&mpic_msi_domain_ops, mpic);
if (!mpic->msi_inner_domain)
return -ENOMEM;
- mpic->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), &mpic_msi_domain_info,
+ mpic->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(node), &mpic_msi_domain_info,
mpic->msi_inner_domain);
if (!mpic->msi_domain) {
irq_domain_remove(mpic->msi_inner_domain);
@@ -492,7 +492,7 @@ static int __init mpic_ipi_init(struct mpic *mpic, struct device_node *node)
{
int base_ipi;
- mpic->ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node), IPI_DOORBELL_NR,
+ mpic->ipi_domain = irq_domain_create_linear(of_fwnode_handle(node), IPI_DOORBELL_NR,
&mpic_ipi_domain_ops, mpic);
if (WARN_ON(!mpic->ipi_domain))
return -ENOMEM;
@@ -546,7 +546,7 @@ static void mpic_reenable_percpu(struct mpic *mpic)
{
/* Re-enable per-CPU interrupts that were enabled before suspend */
for (irq_hw_number_t i = 0; i < MPIC_PER_CPU_IRQS_NR; i++) {
- unsigned int virq = irq_linear_revmap(mpic->domain, i);
+ unsigned int virq = irq_find_mapping(mpic->domain, i);
struct irq_data *d;
if (!virq || !irq_percpu_is_enabled(virq))
@@ -740,7 +740,7 @@ static void mpic_resume(void)
/* Re-enable interrupts */
for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) {
- unsigned int virq = irq_linear_revmap(mpic->domain, i);
+ unsigned int virq = irq_find_mapping(mpic->domain, i);
struct irq_data *d;
if (!virq)
@@ -861,7 +861,7 @@ static int __init mpic_of_init(struct device_node *node, struct device_node *par
if (!mpic_is_ipi_available(mpic))
nr_irqs = MPIC_PER_CPU_IRQS_NR;
- mpic->domain = irq_domain_add_linear(node, nr_irqs, &mpic_irq_ops, mpic);
+ mpic->domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs, &mpic_irq_ops, mpic);
if (!mpic->domain) {
pr_err("%pOF: Unable to add IRQ domain\n", node);
return -ENOMEM;
diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c
index 9c9fc3e2967e..87c1feb999ff 100644
--- a/drivers/irqchip/irq-aspeed-i2c-ic.c
+++ b/drivers/irqchip/irq-aspeed-i2c-ic.c
@@ -82,7 +82,7 @@ static int __init aspeed_i2c_ic_of_init(struct device_node *node,
goto err_iounmap;
}
- i2c_ic->irq_domain = irq_domain_add_linear(node, ASPEED_I2C_IC_NUM_BUS,
+ i2c_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), ASPEED_I2C_IC_NUM_BUS,
&aspeed_i2c_ic_irq_domain_ops,
NULL);
if (!i2c_ic->irq_domain) {
diff --git a/drivers/irqchip/irq-aspeed-intc.c b/drivers/irqchip/irq-aspeed-intc.c
index bd3b759b4b2c..8330221799a0 100644
--- a/drivers/irqchip/irq-aspeed-intc.c
+++ b/drivers/irqchip/irq-aspeed-intc.c
@@ -102,7 +102,7 @@ static int __init aspeed_intc_ic_of_init(struct device_node *node,
writel(0xffffffff, intc_ic->base + INTC_INT_STATUS_REG);
writel(0x0, intc_ic->base + INTC_INT_ENABLE_REG);
- intc_ic->irq_domain = irq_domain_add_linear(node, INTC_IRQS_PER_WORD,
+ intc_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), INTC_IRQS_PER_WORD,
&aspeed_intc_ic_irq_domain_ops, intc_ic);
if (!intc_ic->irq_domain) {
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
index 94a7223e95df..1c7045467c48 100644
--- a/drivers/irqchip/irq-aspeed-scu-ic.c
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -165,7 +165,7 @@ static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
goto err;
}
- scu_ic->irq_domain = irq_domain_add_linear(node, scu_ic->num_irqs,
+ scu_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), scu_ic->num_irqs,
&aspeed_scu_ic_domain_ops,
scu_ic);
if (!scu_ic->irq_domain) {
diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c
index 62ccf2c0c414..9b665b5bb531 100644
--- a/drivers/irqchip/irq-aspeed-vic.c
+++ b/drivers/irqchip/irq-aspeed-vic.c
@@ -211,8 +211,8 @@ static int __init avic_of_init(struct device_node *node,
set_handle_irq(avic_handle_irq);
/* Register our domain */
- vic->dom = irq_domain_add_simple(node, NUM_IRQS, 0,
- &avic_dom_ops, vic);
+ vic->dom = irq_domain_create_simple(of_fwnode_handle(node), NUM_IRQS, 0,
+ &avic_dom_ops, vic);
return 0;
}
diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c
index 92f001a5ff8d..268cc18b781f 100644
--- a/drivers/irqchip/irq-ath79-misc.c
+++ b/drivers/irqchip/irq-ath79-misc.c
@@ -147,7 +147,7 @@ static int __init ath79_misc_intc_of_init(
return -ENOMEM;
}
- domain = irq_domain_add_linear(node, ATH79_MISC_IRQ_COUNT,
+ domain = irq_domain_create_linear(of_fwnode_handle(node), ATH79_MISC_IRQ_COUNT,
&misc_irq_domain_ops, base);
if (!domain) {
pr_err("Failed to add MISC irqdomain\n");
@@ -188,7 +188,7 @@ void __init ath79_misc_irq_init(void __iomem *regs, int irq,
else
ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
- domain = irq_domain_add_legacy(NULL, ATH79_MISC_IRQ_COUNT,
+ domain = irq_domain_create_legacy(NULL, ATH79_MISC_IRQ_COUNT,
irq_base, 0, &misc_irq_domain_ops, regs);
if (!domain)
panic("Failed to create MISC irqdomain");
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 4525366d16d6..3cad30a40c19 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -228,7 +228,7 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
goto err_iounmap;
}
- domain = irq_domain_add_linear(node, nchips * 32, ops, aic);
+ domain = irq_domain_create_linear(of_fwnode_handle(node), nchips * 32, ops, aic);
if (!domain) {
ret = -ENOMEM;
goto err_free_aic;
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 3839ad79ad31..03aeed39a4d2 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -78,9 +78,8 @@ static int aic_retrigger(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
/* Enable interrupt on AIC5 */
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, d->mask, AT91_AIC_ISCR);
- irq_gc_unlock(gc);
return 1;
}
@@ -106,30 +105,27 @@ static void aic_suspend(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IDCR);
irq_reg_writel(gc, gc->wake_active, AT91_AIC_IECR);
- irq_gc_unlock(gc);
}
static void aic_resume(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, gc->wake_active, AT91_AIC_IDCR);
irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IECR);
- irq_gc_unlock(gc);
}
static void aic_pm_shutdown(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR);
irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR);
- irq_gc_unlock(gc);
}
#else
#define aic_suspend NULL
@@ -175,10 +171,8 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
{
struct irq_domain_chip_generic *dgc = d->gc;
struct irq_chip_generic *gc;
- unsigned long flags;
unsigned smr;
- int idx;
- int ret;
+ int idx, ret;
if (!dgc)
return -EINVAL;
@@ -194,11 +188,10 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
gc = dgc->gc[idx];
- irq_gc_lock_irqsave(gc, flags);
+ guard(raw_spinlock_irq)(&gc->lock);
smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
aic_common_set_priority(intspec[2], &smr);
irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
- irq_gc_unlock_irqrestore(gc, flags);
return ret;
}
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index e98c2875af9e..60b00d2c3d7a 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -92,11 +92,10 @@ static void aic5_mask(struct irq_data *d)
* Disable interrupt on AIC5. We always take the lock of the
* first irq chip as all chips share the same registers.
*/
- irq_gc_lock(bgc);
+ guard(raw_spinlock)(&bgc->lock);
irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
gc->mask_cache &= ~d->mask;
- irq_gc_unlock(bgc);
}
static void aic5_unmask(struct irq_data *d)
@@ -109,11 +108,10 @@ static void aic5_unmask(struct irq_data *d)
* Enable interrupt on AIC5. We always take the lock of the
* first irq chip as all chips share the same registers.
*/
- irq_gc_lock(bgc);
+ guard(raw_spinlock)(&bgc->lock);
irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
irq_reg_writel(gc, 1, AT91_AIC5_IECR);
gc->mask_cache |= d->mask;
- irq_gc_unlock(bgc);
}
static int aic5_retrigger(struct irq_data *d)
@@ -122,11 +120,9 @@ static int aic5_retrigger(struct irq_data *d)
struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
/* Enable interrupt on AIC5 */
- irq_gc_lock(bgc);
+ guard(raw_spinlock)(&bgc->lock);
irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
irq_reg_writel(bgc, 1, AT91_AIC5_ISCR);
- irq_gc_unlock(bgc);
-
return 1;
}
@@ -137,14 +133,12 @@ static int aic5_set_type(struct irq_data *d, unsigned type)
unsigned int smr;
int ret;
- irq_gc_lock(bgc);
+ guard(raw_spinlock)(&bgc->lock);
irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
ret = aic_common_set_type(d, type, &smr);
if (!ret)
irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
- irq_gc_unlock(bgc);
-
return ret;
}
@@ -166,7 +160,7 @@ static void aic5_suspend(struct irq_data *d)
smr_cache[i] = irq_reg_readl(bgc, AT91_AIC5_SMR);
}
- irq_gc_lock(bgc);
+ guard(raw_spinlock)(&bgc->lock);
for (i = 0; i < dgc->irqs_per_chip; i++) {
mask = 1 << i;
if ((mask & gc->mask_cache) == (mask & gc->wake_active))
@@ -178,7 +172,6 @@ static void aic5_suspend(struct irq_data *d)
else
irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
}
- irq_gc_unlock(bgc);
}
static void aic5_resume(struct irq_data *d)
@@ -190,7 +183,7 @@ static void aic5_resume(struct irq_data *d)
int i;
u32 mask;
- irq_gc_lock(bgc);
+ guard(raw_spinlock)(&bgc->lock);
if (smr_cache) {
irq_reg_writel(bgc, 0xffffffff, AT91_AIC5_SPU);
@@ -214,7 +207,6 @@ static void aic5_resume(struct irq_data *d)
else
irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
}
- irq_gc_unlock(bgc);
}
static void aic5_pm_shutdown(struct irq_data *d)
@@ -225,13 +217,12 @@ static void aic5_pm_shutdown(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
int i;
- irq_gc_lock(bgc);
+ guard(raw_spinlock)(&bgc->lock);
for (i = 0; i < dgc->irqs_per_chip; i++) {
irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
irq_reg_writel(bgc, 1, AT91_AIC5_ICCR);
}
- irq_gc_unlock(bgc);
}
#else
#define aic5_suspend NULL
@@ -277,7 +268,6 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
unsigned int *out_type)
{
struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
- unsigned long flags;
unsigned smr;
int ret;
@@ -289,13 +279,11 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
if (ret)
return ret;
- irq_gc_lock_irqsave(bgc, flags);
+ guard(raw_spinlock_irq)(&bgc->lock);
irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
aic_common_set_priority(intspec[2], &smr);
irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
- irq_gc_unlock_irqrestore(bgc, flags);
-
return ret;
}
diff --git a/drivers/irqchip/irq-bcm2712-mip.c b/drivers/irqchip/irq-bcm2712-mip.c
index 4cce24233f0f..63de5ef6cf2d 100644
--- a/drivers/irqchip/irq-bcm2712-mip.c
+++ b/drivers/irqchip/irq-bcm2712-mip.c
@@ -11,7 +11,7 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#define MIP_INT_RAISE 0x00
#define MIP_INT_CLEAR 0x10
@@ -174,8 +174,8 @@ static int mip_init_domains(struct mip_priv *mip, struct device_node *np)
{
struct irq_domain *middle;
- middle = irq_domain_add_hierarchy(mip->parent, 0, mip->num_msis, np,
- &mip_middle_domain_ops, mip);
+ middle = irq_domain_create_hierarchy(mip->parent, 0, mip->num_msis, of_fwnode_handle(np),
+ &mip_middle_domain_ops, mip);
if (!middle)
return -ENOMEM;
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index 6c20604c2242..1e384c870350 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -144,7 +144,7 @@ static int __init armctrl_of_init(struct device_node *node,
if (!base)
panic("%pOF: unable to map IC registers\n", node);
- intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0),
+ intc.domain = irq_domain_create_linear(of_fwnode_handle(node), MAKE_HWIRQ(NR_BANKS, 0),
&armctrl_ops, NULL);
if (!intc.domain)
panic("%pOF: unable to create IRQ domain\n", node);
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index e366257684b5..fafd1f71348e 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -325,7 +325,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
bcm2835_init_local_timer_frequency();
- intc.domain = irq_domain_add_linear(node, LAST_IRQ + 1,
+ intc.domain = irq_domain_create_linear(of_fwnode_handle(node), LAST_IRQ + 1,
&bcm2836_arm_irqchip_intc_ops,
NULL);
if (!intc.domain)
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index 90daa274ef23..ca4e141c5bc2 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -316,7 +316,7 @@ static int __init bcm6345_l1_of_init(struct device_node *dn,
raw_spin_lock_init(&intc->lock);
- intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
+ intc->domain = irq_domain_create_linear(of_fwnode_handle(dn), IRQS_PER_WORD * intc->n_words,
&bcm6345_l1_domain_ops,
intc);
if (!intc->domain) {
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 36e71af054e9..04fac0cc857f 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -416,7 +416,7 @@ static int __init bcm7038_l1_of_init(struct device_node *dn,
}
}
- intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
+ intc->domain = irq_domain_create_linear(of_fwnode_handle(dn), IRQS_PER_WORD * intc->n_words,
&bcm7038_l1_domain_ops,
intc);
if (!intc->domain) {
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 1e9dab6e0d86..ff22c3104401 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -63,16 +63,15 @@ static void bcm7120_l2_intc_irq_handle(struct irq_desc *desc)
for (idx = 0; idx < b->n_words; idx++) {
int base = idx * IRQS_PER_WORD;
- struct irq_chip_generic *gc =
- irq_get_domain_generic_chip(b->domain, base);
+ struct irq_chip_generic *gc;
unsigned long pending;
int hwirq;
- irq_gc_lock(gc);
- pending = irq_reg_readl(gc, b->stat_offset[idx]) &
- gc->mask_cache &
- data->irq_map_mask[idx];
- irq_gc_unlock(gc);
+ gc = irq_get_domain_generic_chip(b->domain, base);
+ scoped_guard (raw_spinlock, &gc->lock) {
+ pending = irq_reg_readl(gc, b->stat_offset[idx]) & gc->mask_cache &
+ data->irq_map_mask[idx];
+ }
for_each_set_bit(hwirq, &pending, IRQS_PER_WORD)
generic_handle_domain_irq(b->domain, base + hwirq);
@@ -86,11 +85,9 @@ static void bcm7120_l2_intc_suspend(struct irq_chip_generic *gc)
struct bcm7120_l2_intc_data *b = gc->private;
struct irq_chip_type *ct = gc->chip_types;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
if (b->can_wake)
- irq_reg_writel(gc, gc->mask_cache | gc->wake_active,
- ct->regs.mask);
- irq_gc_unlock(gc);
+ irq_reg_writel(gc, gc->mask_cache | gc->wake_active, ct->regs.mask);
}
static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc)
@@ -98,9 +95,8 @@ static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc)
struct irq_chip_type *ct = gc->chip_types;
/* Restore the saved mask */
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, gc->mask_cache, ct->regs.mask);
- irq_gc_unlock(gc);
}
static int bcm7120_l2_intc_init_one(struct device_node *dn,
@@ -264,7 +260,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
goto out_free_l1_data;
}
- data->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * data->n_words,
+ data->domain = irq_domain_create_linear(of_fwnode_handle(dn), IRQS_PER_WORD * data->n_words,
&irq_generic_chip_ops, NULL);
if (!data->domain) {
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index db4c9721fcf2..1bec5b2cd3f0 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -97,9 +97,8 @@ static void __brcmstb_l2_intc_suspend(struct irq_data *d, bool save)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct brcmstb_l2_intc_data *b = gc->private;
- unsigned long flags;
- irq_gc_lock_irqsave(gc, flags);
+ guard(raw_spinlock_irqsave)(&gc->lock);
/* Save the current mask */
if (save)
b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
@@ -109,7 +108,6 @@ static void __brcmstb_l2_intc_suspend(struct irq_data *d, bool save)
irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
}
- irq_gc_unlock_irqrestore(gc, flags);
}
static void brcmstb_l2_intc_shutdown(struct irq_data *d)
@@ -127,9 +125,8 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct brcmstb_l2_intc_data *b = gc->private;
- unsigned long flags;
- irq_gc_lock_irqsave(gc, flags);
+ guard(raw_spinlock_irqsave)(&gc->lock);
if (ct->chip.irq_ack) {
/* Clear unmasked non-wakeup interrupts */
irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
@@ -139,7 +136,6 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
/* Restore the saved mask */
irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
- irq_gc_unlock_irqrestore(gc, flags);
}
static int __init brcmstb_l2_intc_of_init(struct device_node *np,
@@ -182,7 +178,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
goto out_unmap;
}
- data->domain = irq_domain_add_linear(np, 32,
+ data->domain = irq_domain_create_linear(of_fwnode_handle(np), 32,
&irq_generic_chip_ops, NULL);
if (!data->domain) {
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index 48c73c948ddf..c4b73ba2323b 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -184,8 +184,8 @@ static int __init _clps711x_intc_init(struct device_node *np,
clps711x_intc->ops.map = clps711x_intc_irq_map;
clps711x_intc->ops.xlate = irq_domain_xlate_onecell;
clps711x_intc->domain =
- irq_domain_add_legacy(np, ARRAY_SIZE(clps711x_irqs),
- 0, 0, &clps711x_intc->ops, NULL);
+ irq_domain_create_legacy(of_fwnode_handle(np), ARRAY_SIZE(clps711x_irqs), 0, 0,
+ &clps711x_intc->ops, NULL);
if (!clps711x_intc->domain) {
err = -ENOMEM;
goto out_irqfree;
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index a05a7501e107..66bb39e24a52 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -351,10 +351,8 @@ static int __init irqcrossbar_init(struct device_node *node,
if (err)
return err;
- domain = irq_domain_add_hierarchy(parent_domain, 0,
- cb->max_crossbar_sources,
- node, &crossbar_domain_ops,
- NULL);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, cb->max_crossbar_sources,
+ of_fwnode_handle(node), &crossbar_domain_ops, NULL);
if (!domain) {
pr_err("%pOF: failed to allocated domain\n", node);
return -ENOMEM;
diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c
index 6710691e4c25..5b7150705d29 100644
--- a/drivers/irqchip/irq-csky-apb-intc.c
+++ b/drivers/irqchip/irq-csky-apb-intc.c
@@ -50,11 +50,10 @@ static void irq_ck_mask_set_bit(struct irq_data *d)
unsigned long ifr = ct->regs.mask - 8;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
*ct->mask_cache |= mask;
irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
irq_reg_writel(gc, irq_reg_readl(gc, ifr) & ~mask, ifr);
- irq_gc_unlock(gc);
}
static void __init ck_set_gc(struct device_node *node, void __iomem *reg_base,
@@ -114,7 +113,7 @@ ck_intc_init_comm(struct device_node *node, struct device_node *parent)
return -EINVAL;
}
- root_domain = irq_domain_add_linear(node, nr_irq,
+ root_domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irq,
&irq_generic_chip_ops, NULL);
if (!root_domain) {
pr_err("C-SKY Intc irq_domain_add failed.\n");
diff --git a/drivers/irqchip/irq-csky-mpintc.c b/drivers/irqchip/irq-csky-mpintc.c
index 4aebd67d4f8f..1d1f5091f26f 100644
--- a/drivers/irqchip/irq-csky-mpintc.c
+++ b/drivers/irqchip/irq-csky-mpintc.c
@@ -255,7 +255,7 @@ csky_mpintc_init(struct device_node *node, struct device_node *parent)
writel_relaxed(BIT(0), INTCG_base + INTCG_ICTLR);
}
- root_domain = irq_domain_add_linear(node, nr_irq, &csky_irqdomain_ops,
+ root_domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irq, &csky_irqdomain_ops,
NULL);
if (!root_domain)
return -ENXIO;
diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c
index d7948c55f542..00cdcc90f614 100644
--- a/drivers/irqchip/irq-davinci-cp-intc.c
+++ b/drivers/irqchip/irq-davinci-cp-intc.c
@@ -204,8 +204,10 @@ static int __init davinci_cp_intc_do_init(struct resource *res, unsigned int num
return irq_base;
}
- davinci_cp_intc_irq_domain = irq_domain_add_legacy(node, num_irqs, irq_base, 0,
- &davinci_cp_intc_irq_domain_ops, NULL);
+ davinci_cp_intc_irq_domain = irq_domain_create_legacy(of_fwnode_handle(node), num_irqs,
+ irq_base, 0,
+ &davinci_cp_intc_irq_domain_ops,
+ NULL);
if (!davinci_cp_intc_irq_domain) {
pr_err("%s: unable to create an interrupt domain\n", __func__);
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
index 3b0d78aac13b..eb5a8de82751 100644
--- a/drivers/irqchip/irq-digicolor.c
+++ b/drivers/irqchip/irq-digicolor.c
@@ -95,7 +95,7 @@ static int __init digicolor_of_init(struct device_node *node,
regmap_write(ucregs, UC_IRQ_CONTROL, 1);
digicolor_irq_domain =
- irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL);
+ irq_domain_create_linear(of_fwnode_handle(node), 64, &irq_generic_chip_ops, NULL);
if (!digicolor_irq_domain) {
pr_err("%pOF: unable to create IRQ domain\n", node);
return -ENOMEM;
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index d5c1c750c8d2..4240a0dbf627 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -101,10 +101,9 @@ static void dw_apb_ictl_resume(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
writel_relaxed(~0, gc->reg_base + ct->regs.enable);
writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask);
- irq_gc_unlock(gc);
}
#else
#define dw_apb_ictl_resume NULL
@@ -173,7 +172,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
else
nrirqs = fls(readl_relaxed(iobase + APB_INT_ENABLE_L));
- domain = irq_domain_add_linear(np, nrirqs, domain_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(np), nrirqs, domain_ops, NULL);
if (!domain) {
pr_err("%pOF: unable to add irq domain\n", np);
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-econet-en751221.c b/drivers/irqchip/irq-econet-en751221.c
new file mode 100644
index 000000000000..d83d5eb12795
--- /dev/null
+++ b/drivers/irqchip/irq-econet-en751221.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * EN751221 Interrupt Controller Driver.
+ *
+ * The EcoNet EN751221 Interrupt Controller is a simple interrupt controller
+ * designed for the MIPS 34Kc MT SMP processor with 2 VPEs. Each interrupt can
+ * be routed to either VPE but not both, so to support per-CPU interrupts, a
+ * secondary IRQ number is allocated to control masking/unmasking on VPE#1. In
+ * this driver, these are called "shadow interrupts". The assignment of shadow
+ * interrupts is defined by the SoC integrator when wiring the interrupt lines,
+ * so they are configurable in the device tree.
+ *
+ * If an interrupt (say 30) needs per-CPU capability, the SoC integrator
+ * allocates another IRQ number (say 29) to be its shadow. The device tree
+ * reflects this by adding the pair <30 29> to the "econet,shadow-interrupts"
+ * property.
+ *
+ * When VPE#1 requests IRQ 30, the driver manipulates the mask bit for IRQ 29,
+ * telling the hardware to mask VPE#1's view of IRQ 30.
+ *
+ * Copyright (C) 2025 Caleb James DeLisle <cjd@cjdns.fr>
+ */
+
+#include <linux/cleanup.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+
+#define IRQ_COUNT 40
+
+#define NOT_PERCPU 0xff
+#define IS_SHADOW 0xfe
+
+#define REG_MASK0 0x04
+#define REG_MASK1 0x50
+#define REG_PENDING0 0x08
+#define REG_PENDING1 0x54
+
+/**
+ * @membase: Base address of the interrupt controller registers
+ * @interrupt_shadows: Array of all interrupts, for each value,
+ * - NOT_PERCPU: This interrupt is not per-cpu, so it has no shadow
+ * - IS_SHADOW: This interrupt is a shadow of another per-cpu interrupt
+ * - else: This is a per-cpu interrupt whose shadow is the value
+ */
+static struct {
+ void __iomem *membase;
+ u8 interrupt_shadows[IRQ_COUNT];
+} econet_intc __ro_after_init;
+
+static DEFINE_RAW_SPINLOCK(irq_lock);
+
+/* IRQs must be disabled */
+static void econet_wreg(u32 reg, u32 val, u32 mask)
+{
+ u32 v;
+
+ guard(raw_spinlock)(&irq_lock);
+
+ v = ioread32(econet_intc.membase + reg);
+ v &= ~mask;
+ v |= val & mask;
+ iowrite32(v, econet_intc.membase + reg);
+}
+
+/* IRQs must be disabled */
+static void econet_chmask(u32 hwirq, bool unmask)
+{
+ u32 reg, mask;
+ u8 shadow;
+
+ /*
+ * If the IRQ is a shadow, it should never be manipulated directly.
+ * It should only be masked/unmasked as a result of the "real" per-cpu
+ * irq being manipulated by a thread running on VPE#1.
+ * If it is per-cpu (has a shadow), and we're on VPE#1, the shadow is what we mask.
+ * This is single processor only, so smp_processor_id() never exceeds 1.
+ */
+ shadow = econet_intc.interrupt_shadows[hwirq];
+ if (WARN_ON_ONCE(shadow == IS_SHADOW))
+ return;
+ else if (shadow != NOT_PERCPU && smp_processor_id() == 1)
+ hwirq = shadow;
+
+ if (hwirq >= 32) {
+ reg = REG_MASK1;
+ mask = BIT(hwirq - 32);
+ } else {
+ reg = REG_MASK0;
+ mask = BIT(hwirq);
+ }
+
+ econet_wreg(reg, unmask ? mask : 0, mask);
+}
+
+/* IRQs must be disabled */
+static void econet_intc_mask(struct irq_data *d)
+{
+ econet_chmask(d->hwirq, false);
+}
+
+/* IRQs must be disabled */
+static void econet_intc_unmask(struct irq_data *d)
+{
+ econet_chmask(d->hwirq, true);
+}
+
+static void econet_mask_all(void)
+{
+ /* IRQs are generally disabled during init, but guarding here makes it non-obligatory. */
+ guard(irqsave)();
+ econet_wreg(REG_MASK0, 0, ~0);
+ econet_wreg(REG_MASK1, 0, ~0);
+}
+
+static void econet_intc_handle_pending(struct irq_domain *d, u32 pending, u32 offset)
+{
+ int hwirq;
+
+ while (pending) {
+ hwirq = fls(pending) - 1;
+ generic_handle_domain_irq(d, hwirq + offset);
+ pending &= ~BIT(hwirq);
+ }
+}
+
+static void econet_intc_from_parent(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_domain *domain;
+ u32 pending0, pending1;
+
+ chained_irq_enter(chip, desc);
+
+ pending0 = ioread32(econet_intc.membase + REG_PENDING0);
+ pending1 = ioread32(econet_intc.membase + REG_PENDING1);
+
+ if (unlikely(!(pending0 | pending1))) {
+ spurious_interrupt();
+ } else {
+ domain = irq_desc_get_handler_data(desc);
+ econet_intc_handle_pending(domain, pending0, 0);
+ econet_intc_handle_pending(domain, pending1, 32);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static const struct irq_chip econet_irq_chip;
+
+static int econet_intc_map(struct irq_domain *d, u32 irq, irq_hw_number_t hwirq)
+{
+ int ret;
+
+ if (hwirq >= IRQ_COUNT) {
+ pr_err("%s: hwirq %lu out of range\n", __func__, hwirq);
+ return -EINVAL;
+ } else if (econet_intc.interrupt_shadows[hwirq] == IS_SHADOW) {
+ pr_err("%s: can't map hwirq %lu, it is a shadow interrupt\n", __func__, hwirq);
+ return -EINVAL;
+ }
+
+ if (econet_intc.interrupt_shadows[hwirq] == NOT_PERCPU) {
+ irq_set_chip_and_handler(irq, &econet_irq_chip, handle_level_irq);
+ } else {
+ irq_set_chip_and_handler(irq, &econet_irq_chip, handle_percpu_devid_irq);
+ ret = irq_set_percpu_devid(irq);
+ if (ret)
+ pr_warn("%s: Failed irq_set_percpu_devid for %u: %d\n", d->name, irq, ret);
+ }
+
+ irq_set_chip_data(irq, NULL);
+ return 0;
+}
+
+static const struct irq_chip econet_irq_chip = {
+ .name = "en751221-intc",
+ .irq_unmask = econet_intc_unmask,
+ .irq_mask = econet_intc_mask,
+ .irq_mask_ack = econet_intc_mask,
+};
+
+static const struct irq_domain_ops econet_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = econet_intc_map
+};
+
+static int __init get_shadow_interrupts(struct device_node *node)
+{
+ const char *field = "econet,shadow-interrupts";
+ int num_shadows;
+
+ num_shadows = of_property_count_u32_elems(node, field);
+
+ memset(econet_intc.interrupt_shadows, NOT_PERCPU,
+ sizeof(econet_intc.interrupt_shadows));
+
+ if (num_shadows <= 0) {
+ return 0;
+ } else if (num_shadows % 2) {
+ pr_err("%pOF: %s count is odd, ignoring\n", node, field);
+ return 0;
+ }
+
+ u32 *shadows __free(kfree) = kmalloc_array(num_shadows, sizeof(u32), GFP_KERNEL);
+ if (!shadows)
+ return -ENOMEM;
+
+ if (of_property_read_u32_array(node, field, shadows, num_shadows)) {
+ pr_err("%pOF: Failed to read %s\n", node, field);
+ return -EINVAL;
+ }
+
+ for (int i = 0; i < num_shadows; i += 2) {
+ u32 shadow = shadows[i + 1];
+ u32 target = shadows[i];
+
+ if (shadow > IRQ_COUNT) {
+ pr_err("%pOF: %s[%d] shadow(%d) out of range\n",
+ node, field, i + 1, shadow);
+ continue;
+ }
+
+ if (target >= IRQ_COUNT) {
+ pr_err("%pOF: %s[%d] target(%d) out of range\n", node, field, i, target);
+ continue;
+ }
+
+ if (econet_intc.interrupt_shadows[target] != NOT_PERCPU) {
+ pr_err("%pOF: %s[%d] target(%d) already has a shadow\n",
+ node, field, i, target);
+ continue;
+ }
+
+ if (econet_intc.interrupt_shadows[shadow] != NOT_PERCPU) {
+ pr_err("%pOF: %s[%d] shadow(%d) already has a target\n",
+ node, field, i + 1, shadow);
+ continue;
+ }
+
+ econet_intc.interrupt_shadows[target] = shadow;
+ econet_intc.interrupt_shadows[shadow] = IS_SHADOW;
+ }
+
+ return 0;
+}
+
+static int __init econet_intc_of_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *domain;
+ struct resource res;
+ int ret, irq;
+
+ ret = get_shadow_interrupts(node);
+ if (ret)
+ return ret;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ pr_err("%pOF: DT: Failed to get IRQ from 'interrupts'\n", node);
+ return -EINVAL;
+ }
+
+ if (of_address_to_resource(node, 0, &res)) {
+ pr_err("%pOF: DT: Failed to get 'reg'\n", node);
+ ret = -EINVAL;
+ goto err_dispose_mapping;
+ }
+
+ if (!request_mem_region(res.start, resource_size(&res), res.name)) {
+ pr_err("%pOF: Failed to request memory\n", node);
+ ret = -EBUSY;
+ goto err_dispose_mapping;
+ }
+
+ econet_intc.membase = ioremap(res.start, resource_size(&res));
+ if (!econet_intc.membase) {
+ pr_err("%pOF: Failed to remap membase\n", node);
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ econet_mask_all();
+
+ domain = irq_domain_create_linear(of_fwnode_handle(node), IRQ_COUNT,
+ &econet_domain_ops, NULL);
+ if (!domain) {
+ pr_err("%pOF: Failed to add irqdomain\n", node);
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ irq_set_chained_handler_and_data(irq, econet_intc_from_parent, domain);
+
+ return 0;
+
+err_unmap:
+ iounmap(econet_intc.membase);
+err_release:
+ release_mem_region(res.start, resource_size(&res));
+err_dispose_mapping:
+ irq_dispose_mapping(irq);
+ return ret;
+}
+
+IRQCHIP_DECLARE(econet_en751221_intc, "econet,en751221-intc", econet_intc_of_init);
diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c
index b91c358ea6db..a59a66d79da6 100644
--- a/drivers/irqchip/irq-ftintc010.c
+++ b/drivers/irqchip/irq-ftintc010.c
@@ -180,8 +180,9 @@ static int __init ft010_of_init_irq(struct device_node *node,
writel(0, FT010_IRQ_MASK(f->base));
writel(0, FT010_FIQ_MASK(f->base));
- f->domain = irq_domain_add_simple(node, FT010_NUM_IRQS, 0,
- &ft010_irqdomain_ops, f);
+ f->domain = irq_domain_create_simple(of_fwnode_handle(node),
+ FT010_NUM_IRQS, 0,
+ &ft010_irqdomain_ops, f);
set_handle_irq(ft010_irqchip_handle_irq);
return 0;
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index dc98c39d2b20..24ef5af569fe 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -26,7 +26,7 @@
#include <linux/irqchip/arm-gic.h>
#include <linux/irqchip/arm-gic-common.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
/*
* MSI_TYPER:
@@ -252,7 +252,7 @@ static void __init gicv2m_teardown(void)
static struct msi_parent_ops gicv2m_msi_parent_ops = {
.supported_flags = GICV2M_MSI_FLAGS_SUPPORTED,
.required_flags = GICV2M_MSI_FLAGS_REQUIRED,
- .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "GICv2m-",
@@ -261,23 +261,23 @@ static struct msi_parent_ops gicv2m_msi_parent_ops = {
static __init int gicv2m_allocate_domains(struct irq_domain *parent)
{
- struct irq_domain *inner_domain;
+ struct irq_domain_info info = {
+ .ops = &gicv2m_domain_ops,
+ .parent = parent,
+ };
struct v2m_data *v2m;
v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
if (!v2m)
return 0;
- inner_domain = irq_domain_create_hierarchy(parent, 0, 0, v2m->fwnode,
- &gicv2m_domain_ops, v2m);
- if (!inner_domain) {
+ info.host_data = v2m;
+ info.fwnode = v2m->fwnode;
+
+ if (!msi_create_parent_irq_domain(&info, &gicv2m_msi_parent_ops)) {
pr_err("Failed to create GICv2m domain\n");
return -ENOMEM;
}
-
- irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
- inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
- inner_domain->msi_parent_ops = &gicv2m_msi_parent_ops;
return 0;
}
diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
index 8e87fc35f8aa..11549d85f23b 100644
--- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -152,7 +152,7 @@ static void __init its_fsl_mc_of_msi_init(void)
if (!of_property_read_bool(np, "msi-controller"))
continue;
- its_fsl_mc_msi_init_one(of_node_to_fwnode(np),
+ its_fsl_mc_msi_init_one(of_fwnode_handle(np),
np->full_name);
}
}
diff --git a/drivers/irqchip/irq-gic-v3-its-msi-parent.c b/drivers/irqchip/irq-gic-v3-its-msi-parent.c
index bdb04c808148..a5e110ffdd88 100644
--- a/drivers/irqchip/irq-gic-v3-its-msi-parent.c
+++ b/drivers/irqchip/irq-gic-v3-its-msi-parent.c
@@ -8,7 +8,7 @@
#include <linux/pci.h>
#include "irq-gic-common.h"
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#define ITS_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
MSI_FLAG_USE_DEF_CHIP_OPS | \
@@ -68,17 +68,6 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain->parent, pdev);
/*
- * @domain->msi_domain_info->hwsize contains the size of the
- * MSI[-X] domain, but vector allocation happens one by one. This
- * needs some thought when MSI comes into play as the size of MSI
- * might be unknown at domain creation time and therefore set to
- * MSI_MAX_INDEX.
- */
- msi_info = msi_get_domain_info(domain);
- if (msi_info->hwsize > nvec)
- nvec = msi_info->hwsize;
-
- /*
* Always allocate a power of 2, and special case device 0 for
* broken systems where the DevID is not wired (and all devices
* appear as DevID 0). For that reason, we generously allocate a
@@ -118,6 +107,14 @@ static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
index++;
} while (!ret);
+ if (ret) {
+ struct device_node *np = NULL;
+
+ ret = of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &np, dev_id);
+ if (np)
+ of_node_put(np);
+ }
+
return ret;
}
@@ -143,14 +140,6 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
/* ITS specific DeviceID, as the core ITS ignores dev. */
info->scratchpad[0].ul = dev_id;
- /*
- * @domain->msi_domain_info->hwsize contains the size of the device
- * domain, but vector allocation happens one by one.
- */
- msi_info = msi_get_domain_info(domain);
- if (msi_info->hwsize > nvec)
- nvec = msi_info->hwsize;
-
/* Allocate at least 32 MSIs, and always as a power of 2 */
nvec = max_t(int, 32, roundup_pow_of_two(nvec));
@@ -159,6 +148,14 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
dev, nvec, info);
}
+static void its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info)
+{
+ struct msi_domain_info *msi_info;
+
+ msi_info = msi_get_domain_info(domain->parent);
+ msi_info->ops->msi_teardown(domain->parent, info);
+}
+
static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
struct irq_domain *real_parent, struct msi_domain_info *info)
{
@@ -182,6 +179,7 @@ static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
* %MSI_MAX_INDEX.
*/
info->ops->msi_prepare = its_pci_msi_prepare;
+ info->ops->msi_teardown = its_msi_teardown;
break;
case DOMAIN_BUS_DEVICE_MSI:
case DOMAIN_BUS_WIRED_TO_MSI:
@@ -190,6 +188,7 @@ static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
* size is also known at domain creation time.
*/
info->ops->msi_prepare = its_pmsi_prepare;
+ info->ops->msi_teardown = its_msi_teardown;
break;
default:
/* Confused. How did the lib return true? */
@@ -203,7 +202,7 @@ static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
const struct msi_parent_ops gic_v3_its_msi_parent_ops = {
.supported_flags = ITS_MSI_FLAGS_SUPPORTED,
.required_flags = ITS_MSI_FLAGS_REQUIRED,
- .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "ITS-",
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 0115ad6c8259..d54fa0638dc4 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -41,7 +41,7 @@
#include <asm/exception.h>
#include "irq-gic-common.h"
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
@@ -125,6 +125,8 @@ struct its_node {
int vlpi_redist_offset;
};
+static DEFINE_PER_CPU(struct its_node *, local_4_1_its);
+
#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
@@ -2778,6 +2780,7 @@ static u64 inherit_vpe_l1_table_from_its(void)
}
val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
+ *this_cpu_ptr(&local_4_1_its) = its;
return val;
}
@@ -2815,6 +2818,7 @@ static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
*mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
+ *this_cpu_ptr(&local_4_1_its) = *per_cpu_ptr(&local_4_1_its, cpu);
return val;
}
@@ -3620,8 +3624,33 @@ out:
return err;
}
+static void its_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *info)
+{
+ struct its_device *its_dev = info->scratchpad[0].ptr;
+
+ guard(mutex)(&its_dev->its->dev_alloc_lock);
+
+ /* If the device is shared, keep everything around */
+ if (its_dev->shared)
+ return;
+
+ /* LPIs should have been already unmapped at this stage */
+ if (WARN_ON_ONCE(!bitmap_empty(its_dev->event_map.lpi_map,
+ its_dev->event_map.nr_lpis)))
+ return;
+
+ its_lpi_free(its_dev->event_map.lpi_map,
+ its_dev->event_map.lpi_base,
+ its_dev->event_map.nr_lpis);
+
+ /* Unmap device/itt, and get rid of the tracking */
+ its_send_mapd(its_dev, 0);
+ its_free_device(its_dev);
+}
+
static struct msi_domain_ops its_msi_domain_ops = {
.msi_prepare = its_msi_prepare,
+ .msi_teardown = its_msi_teardown,
};
static int its_irq_gic_domain_alloc(struct irq_domain *domain,
@@ -3722,7 +3751,6 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- struct its_node *its = its_dev->its;
int i;
bitmap_release_region(its_dev->event_map.lpi_map,
@@ -3736,26 +3764,6 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
irq_domain_reset_irq_data(data);
}
- mutex_lock(&its->dev_alloc_lock);
-
- /*
- * If all interrupts have been freed, start mopping the
- * floor. This is conditioned on the device not being shared.
- */
- if (!its_dev->shared &&
- bitmap_empty(its_dev->event_map.lpi_map,
- its_dev->event_map.nr_lpis)) {
- its_lpi_free(its_dev->event_map.lpi_map,
- its_dev->event_map.lpi_base,
- its_dev->event_map.nr_lpis);
-
- /* Unmap device/itt */
- its_send_mapd(its_dev, 0);
- its_free_device(its_dev);
- }
-
- mutex_unlock(&its->dev_alloc_lock);
-
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
@@ -4180,7 +4188,7 @@ static struct irq_chip its_vpe_irq_chip = {
static struct its_node *find_4_1_its(void)
{
- static struct its_node *its = NULL;
+ struct its_node *its = *this_cpu_ptr(&local_4_1_its);
if (!its) {
list_for_each_entry(its, &its_nodes, entry) {
@@ -5118,7 +5126,12 @@ out_unmap:
static int its_init_domain(struct its_node *its)
{
- struct irq_domain *inner_domain;
+ struct irq_domain_info dom_info = {
+ .fwnode = its->fwnode_handle,
+ .ops = &its_domain_ops,
+ .domain_flags = its->msi_domain_flags,
+ .parent = its_parent,
+ };
struct msi_domain_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -5127,21 +5140,12 @@ static int its_init_domain(struct its_node *its)
info->ops = &its_msi_domain_ops;
info->data = its;
+ dom_info.host_data = info;
- inner_domain = irq_domain_create_hierarchy(its_parent,
- its->msi_domain_flags, 0,
- its->fwnode_handle, &its_domain_ops,
- info);
- if (!inner_domain) {
+ if (!msi_create_parent_irq_domain(&dom_info, &gic_v3_its_msi_parent_ops)) {
kfree(info);
return -ENOMEM;
}
-
- irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
-
- inner_domain->msi_parent_ops = &gic_v3_its_msi_parent_ops;
- inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
-
return 0;
}
@@ -5518,7 +5522,7 @@ static struct its_node __init *its_node_init(struct resource *res,
its->base = its_base;
its->phys_base = res->start;
its->get_msi_base = its_irq_get_msi_base;
- its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI;
+ its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI | IRQ_DOMAIN_FLAG_MSI_IMMUTABLE;
its->numa_node = numa_node;
its->fwnode_handle = handle;
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index 34e9ca77a8c3..aa11bbe8026a 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -18,7 +18,7 @@
#include <linux/irqchip/arm-gic-v3.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
struct mbi_range {
u32 spi_start;
@@ -197,7 +197,7 @@ static bool mbi_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
static const struct msi_parent_ops gic_v3_mbi_msi_parent_ops = {
.supported_flags = MBI_MSI_FLAGS_SUPPORTED,
.required_flags = MBI_MSI_FLAGS_REQUIRED,
- .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "MBI-",
@@ -206,17 +206,13 @@ static const struct msi_parent_ops gic_v3_mbi_msi_parent_ops = {
static int mbi_allocate_domain(struct irq_domain *parent)
{
- struct irq_domain *nexus_domain;
+ struct irq_domain_info info = {
+ .fwnode = parent->fwnode,
+ .ops = &mbi_domain_ops,
+ .parent = parent,
+ };
- nexus_domain = irq_domain_create_hierarchy(parent, 0, 0, parent->fwnode,
- &mbi_domain_ops, NULL);
- if (!nexus_domain)
- return -ENOMEM;
-
- irq_domain_update_bus_token(nexus_domain, DOMAIN_BUS_NEXUS);
- nexus_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
- nexus_domain->msi_parent_ops = &gic_v3_mbi_msi_parent_ops;
- return 0;
+ return msi_create_parent_irq_domain(&info, &gic_v3_mbi_msi_parent_ops) ? 0 : -ENOMEM;
}
int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 270d7a4d85a6..efc791c43d44 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1826,7 +1826,7 @@ static int partition_domain_translate(struct irq_domain *d,
ppi_idx = __gic_get_ppi_index(ppi_intid);
ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
- of_node_to_fwnode(np));
+ of_fwnode_handle(np));
if (ret < 0)
return ret;
@@ -2192,7 +2192,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
part = &parts[part_idx];
- part->partition_id = of_node_to_fwnode(child_part);
+ part->partition_id = of_fwnode_handle(child_part);
pr_info("GIC: PPI partition %pOFn[%d] { ",
child_part, part_idx);
diff --git a/drivers/irqchip/irq-goldfish-pic.c b/drivers/irqchip/irq-goldfish-pic.c
index 513f6edbbe95..a8b23b507ecd 100644
--- a/drivers/irqchip/irq-goldfish-pic.c
+++ b/drivers/irqchip/irq-goldfish-pic.c
@@ -101,10 +101,9 @@ static int __init goldfish_pic_of_init(struct device_node *of_node,
irq_setup_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS), 0,
IRQ_NOPROBE | IRQ_LEVEL, 0);
- gfpic->irq_domain = irq_domain_add_legacy(of_node, GFPIC_NR_IRQS,
- GFPIC_IRQ_BASE, 0,
- &goldfish_irq_domain_ops,
- NULL);
+ gfpic->irq_domain = irq_domain_create_legacy(of_fwnode_handle(of_node), GFPIC_NR_IRQS,
+ GFPIC_IRQ_BASE, 0, &goldfish_irq_domain_ops,
+ NULL);
if (!gfpic->irq_domain) {
pr_err("Failed to add irqdomain!\n");
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 31c3f70a5d5e..b7958c5a1221 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -386,10 +386,8 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
return -EINVAL;
}
- hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base,
- 0,
- &hip04_irq_domain_ops,
- &hip04_data);
+ hip04_data.domain = irq_domain_create_legacy(of_fwnode_handle(node), nr_irqs, irq_base, 0,
+ &hip04_irq_domain_ops, &hip04_data);
if (WARN_ON(!hip04_data.domain))
return -EINVAL;
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index 115bdcffab24..91b2f587119c 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -313,8 +313,8 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
init_8259A(0);
- domain = irq_domain_add_legacy(node, 16, I8259A_IRQ_BASE, 0,
- &i8259A_ops, NULL);
+ domain = irq_domain_create_legacy(of_fwnode_handle(node), 16, I8259A_IRQ_BASE, 0,
+ &i8259A_ops, NULL);
if (!domain)
panic("Failed to add i8259 IRQ domain");
diff --git a/drivers/irqchip/irq-idt3243x.c b/drivers/irqchip/irq-idt3243x.c
index 0732a0e9af62..f8324fb1fe8f 100644
--- a/drivers/irqchip/irq-idt3243x.c
+++ b/drivers/irqchip/irq-idt3243x.c
@@ -72,7 +72,7 @@ static int idt_pic_init(struct device_node *of_node, struct device_node *parent)
goto out_unmap_irq;
}
- domain = irq_domain_add_linear(of_node, IDT_PIC_NR_IRQS,
+ domain = irq_domain_create_linear(of_fwnode_handle(of_node), IDT_PIC_NR_IRQS,
&irq_generic_chip_ops, NULL);
if (!domain) {
pr_err("Failed to add irqdomain!\n");
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
index 85f80bac0961..f0410d5d7315 100644
--- a/drivers/irqchip/irq-imgpdc.c
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -372,7 +372,7 @@ static int pdc_intc_probe(struct platform_device *pdev)
priv->syswake_irq = irq;
/* Set up an IRQ domain */
- priv->domain = irq_domain_add_linear(node, 16, &irq_generic_chip_ops,
+ priv->domain = irq_domain_create_linear(of_fwnode_handle(node), 16, &irq_generic_chip_ops,
priv);
if (unlikely(!priv->domain)) {
dev_err(&pdev->dev, "cannot add IRQ domain\n");
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 095ae8e3217e..b91f5c14b405 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -240,8 +240,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
return -ENOMEM;
}
- domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
- node, &gpcv2_irqchip_data_domain_ops, cd);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
+ of_fwnode_handle(node), &gpcv2_irqchip_data_domain_ops, cd);
if (!domain) {
iounmap(cd->gpc_base);
kfree(cd);
diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c
index 787543d07565..5f9b204d350b 100644
--- a/drivers/irqchip/irq-imx-intmux.c
+++ b/drivers/irqchip/irq-imx-intmux.c
@@ -254,7 +254,7 @@ static int imx_intmux_probe(struct platform_device *pdev)
goto out;
}
- domain = irq_domain_add_linear(np, 32, &imx_intmux_domain_ops,
+ domain = irq_domain_create_linear(of_fwnode_handle(np), 32, &imx_intmux_domain_ops,
&data->irqchip_data[i]);
if (!domain) {
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index afbfcce3b1e3..6dc9ac48fee5 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -212,7 +212,7 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
/* steer all IRQs into configured channel */
writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
- data->domain = irq_domain_add_linear(np, data->reg_num * 32,
+ data->domain = irq_domain_create_linear(of_fwnode_handle(np), data->reg_num * 32,
&imx_irqsteer_domain_ops, data);
if (!data->domain) {
dev_err(&pdev->dev, "failed to create IRQ domain\n");
diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c
index 69aacdfc8bef..137da1927d14 100644
--- a/drivers/irqchip/irq-imx-mu-msi.c
+++ b/drivers/irqchip/irq-imx-mu-msi.c
@@ -24,7 +24,7 @@
#include <linux/pm_domain.h>
#include <linux/spinlock.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#define IMX_MU_CHANS 4
diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c
index 3363f83bd7e9..794ecba717c9 100644
--- a/drivers/irqchip/irq-ingenic-tcu.c
+++ b/drivers/irqchip/irq-ingenic-tcu.c
@@ -52,11 +52,10 @@ static void ingenic_tcu_gc_unmask_enable_reg(struct irq_data *d)
struct regmap *map = gc->private;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
regmap_write(map, ct->regs.ack, mask);
regmap_write(map, ct->regs.enable, mask);
*ct->mask_cache |= mask;
- irq_gc_unlock(gc);
}
static void ingenic_tcu_gc_mask_disable_reg(struct irq_data *d)
@@ -66,10 +65,9 @@ static void ingenic_tcu_gc_mask_disable_reg(struct irq_data *d)
struct regmap *map = gc->private;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
regmap_write(map, ct->regs.disable, mask);
*ct->mask_cache &= ~mask;
- irq_gc_unlock(gc);
}
static void ingenic_tcu_gc_mask_disable_reg_and_ack(struct irq_data *d)
@@ -79,10 +77,9 @@ static void ingenic_tcu_gc_mask_disable_reg_and_ack(struct irq_data *d)
struct regmap *map = gc->private;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
regmap_write(map, ct->regs.ack, mask);
regmap_write(map, ct->regs.disable, mask);
- irq_gc_unlock(gc);
}
static int __init ingenic_tcu_irq_init(struct device_node *np,
@@ -114,8 +111,8 @@ static int __init ingenic_tcu_irq_init(struct device_node *np,
tcu->nb_parent_irqs = irqs;
- tcu->domain = irq_domain_add_linear(np, 32, &irq_generic_chip_ops,
- NULL);
+ tcu->domain = irq_domain_create_linear(of_fwnode_handle(np), 32, &irq_generic_chip_ops,
+ NULL);
if (!tcu->domain) {
ret = -ENOMEM;
goto err_free_tcu;
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index cee839ca627e..52393724f213 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -90,8 +90,8 @@ static int __init ingenic_intc_of_init(struct device_node *node,
goto out_unmap_irq;
}
- domain = irq_domain_add_linear(node, num_chips * 32,
- &irq_generic_chip_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(node), num_chips * 32,
+ &irq_generic_chip_ops, NULL);
if (!domain) {
err = -ENOMEM;
goto out_unmap_base;
diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c
index f23b02f62a5c..a9a5a52b818a 100644
--- a/drivers/irqchip/irq-ixp4xx.c
+++ b/drivers/irqchip/irq-ixp4xx.c
@@ -261,7 +261,7 @@ static int __init ixp4xx_of_init_irq(struct device_node *np,
pr_crit("IXP4XX: could not ioremap interrupt controller\n");
return -ENODEV;
}
- fwnode = of_node_to_fwnode(np);
+ fwnode = of_fwnode_handle(np);
/* These chip variants have 64 interrupts */
is_356 = of_device_is_compatible(np, "intel,ixp43x-interrupt") ||
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
index 1f613eb7b7f0..94c05cf974be 100644
--- a/drivers/irqchip/irq-jcore-aic.c
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -107,9 +107,8 @@ static int __init aic_irq_of_init(struct device_node *node,
if (ret < 0)
return ret;
- domain = irq_domain_add_legacy(node, dom_sz - min_irq, min_irq, min_irq,
- &jcore_aic_irqdomain_ops,
- &jcore_aic);
+ domain = irq_domain_create_legacy(of_fwnode_handle(node), dom_sz - min_irq, min_irq,
+ min_irq, &jcore_aic_irqdomain_ops, &jcore_aic);
if (!domain)
return -ENOMEM;
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index 37e1a03fcbb4..c9e902b7bf48 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -157,8 +157,8 @@ static int keystone_irq_probe(struct platform_device *pdev)
kirq->chip.irq_mask = keystone_irq_setmask;
kirq->chip.irq_unmask = keystone_irq_unmask;
- kirq->irqd = irq_domain_add_linear(np, KEYSTONE_N_IRQ,
- &keystone_irq_ops, kirq);
+ kirq->irqd = irq_domain_create_linear(of_fwnode_handle(np), KEYSTONE_N_IRQ,
+ &keystone_irq_ops, kirq);
if (!kirq->irqd) {
dev_err(dev, "IRQ domain registration failed\n");
return -ENODEV;
diff --git a/drivers/irqchip/irq-lan966x-oic.c b/drivers/irqchip/irq-lan966x-oic.c
index 41ac880e3b87..11d3a0ffa261 100644
--- a/drivers/irqchip/irq-lan966x-oic.c
+++ b/drivers/irqchip/irq-lan966x-oic.c
@@ -71,14 +71,12 @@ static unsigned int lan966x_oic_irq_startup(struct irq_data *data)
struct lan966x_oic_chip_regs *chip_regs = gc->private;
u32 map;
- irq_gc_lock(gc);
-
- /* Map the source interrupt to the destination */
- map = irq_reg_readl(gc, chip_regs->reg_off_map);
- map |= data->mask;
- irq_reg_writel(gc, map, chip_regs->reg_off_map);
-
- irq_gc_unlock(gc);
+ scoped_guard (raw_spinlock, &gc->lock) {
+ /* Map the source interrupt to the destination */
+ map = irq_reg_readl(gc, chip_regs->reg_off_map);
+ map |= data->mask;
+ irq_reg_writel(gc, map, chip_regs->reg_off_map);
+ }
ct->chip.irq_ack(data);
ct->chip.irq_unmask(data);
@@ -95,14 +93,12 @@ static void lan966x_oic_irq_shutdown(struct irq_data *data)
ct->chip.irq_mask(data);
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
/* Unmap the interrupt */
map = irq_reg_readl(gc, chip_regs->reg_off_map);
map &= ~data->mask;
irq_reg_writel(gc, map, chip_regs->reg_off_map);
-
- irq_gc_unlock(gc);
}
static int lan966x_oic_irq_set_type(struct irq_data *data,
@@ -224,7 +220,7 @@ static int lan966x_oic_probe(struct platform_device *pdev)
.exit = lan966x_oic_chip_exit,
};
struct irq_domain_info d_info = {
- .fwnode = of_node_to_fwnode(pdev->dev.of_node),
+ .fwnode = of_fwnode_handle(pdev->dev.of_node),
.domain_flags = IRQ_DOMAIN_FLAG_DESTROY_GC,
.size = LAN966X_OIC_NR_IRQ,
.hwirq_max = LAN966X_OIC_NR_IRQ,
diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c
index 80e55955a29f..bf52dc8345f5 100644
--- a/drivers/irqchip/irq-loongarch-avec.c
+++ b/drivers/irqchip/irq-loongarch-avec.c
@@ -18,7 +18,7 @@
#include <asm/loongarch.h>
#include <asm/setup.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#include "irq-loongson.h"
#define VECTORS_PER_REG 64
diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
index e62dab4c97fc..950bc087e388 100644
--- a/drivers/irqchip/irq-loongarch-cpu.c
+++ b/drivers/irqchip/irq-loongarch-cpu.c
@@ -100,7 +100,7 @@ static const struct irq_domain_ops loongarch_cpu_intc_irq_domain_ops = {
static int __init cpuintc_of_init(struct device_node *of_node,
struct device_node *parent)
{
- cpuintc_handle = of_node_to_fwnode(of_node);
+ cpuintc_handle = of_fwnode_handle(of_node);
irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM,
&loongarch_cpu_intc_irq_domain_ops, NULL);
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index bb79e19dfb59..b2860eb2d32c 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -554,7 +554,7 @@ static int __init eiointc_of_init(struct device_node *of_node,
priv->vec_count = VEC_COUNT;
priv->node = 0;
- priv->domain_handle = of_node_to_fwnode(of_node);
+ priv->domain_handle = of_fwnode_handle(of_node);
ret = eiointc_init(priv, parent_irq, 0);
if (ret < 0)
diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c
index 5da02c7ad0b3..d8558eb35044 100644
--- a/drivers/irqchip/irq-loongson-htvec.c
+++ b/drivers/irqchip/irq-loongson-htvec.c
@@ -248,7 +248,7 @@ static int htvec_of_init(struct device_node *node,
}
err = htvec_init(res.start, resource_size(&res),
- num_parents, parent_irq, of_node_to_fwnode(node));
+ num_parents, parent_irq, of_fwnode_handle(node));
if (err < 0)
return err;
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index 2b1bd4a96665..0033c2188abc 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -116,9 +116,8 @@ static int liointc_set_type(struct irq_data *data, unsigned int type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
u32 mask = data->mask;
- unsigned long flags;
- irq_gc_lock_irqsave(gc, flags);
+ guard(raw_spinlock)(&gc->lock);
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
liointc_set_bit(gc, LIOINTC_REG_INTC_EDGE, mask, false);
@@ -137,10 +136,8 @@ static int liointc_set_type(struct irq_data *data, unsigned int type)
liointc_set_bit(gc, LIOINTC_REG_INTC_POL, mask, true);
break;
default:
- irq_gc_unlock_irqrestore(gc, flags);
return -EINVAL;
}
- irq_gc_unlock_irqrestore(gc, flags);
irqd_set_trigger_type(data, type);
return 0;
@@ -157,10 +154,9 @@ static void liointc_suspend(struct irq_chip_generic *gc)
static void liointc_resume(struct irq_chip_generic *gc)
{
struct liointc_priv *priv = gc->private;
- unsigned long flags;
int i;
- irq_gc_lock_irqsave(gc, flags);
+ guard(raw_spinlock_irqsave)(&gc->lock);
/* Disable all at first */
writel(0xffffffff, gc->reg_base + LIOINTC_REG_INTC_DISABLE);
/* Restore map cache */
@@ -170,7 +166,6 @@ static void liointc_resume(struct irq_chip_generic *gc)
writel(priv->int_edge, gc->reg_base + LIOINTC_REG_INTC_EDGE);
/* Restore mask cache */
writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE);
- irq_gc_unlock_irqrestore(gc, flags);
}
static int parent_irq[LIOINTC_NUM_PARENT];
@@ -363,7 +358,7 @@ static int __init liointc_of_init(struct device_node *node,
}
err = liointc_init(res.start, resource_size(&res),
- revision, of_node_to_fwnode(node), node);
+ revision, of_fwnode_handle(node), node);
if (err < 0)
return err;
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index 9c62108b3ad5..a0257c7bef10 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -15,7 +15,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#include "irq-loongson.h"
static int nr_pics;
@@ -243,7 +243,7 @@ static int pch_msi_of_init(struct device_node *node, struct device_node *parent)
return -EINVAL;
}
- err = pch_msi_init(res.start, irq_base, irq_count, parent_domain, of_node_to_fwnode(node));
+ err = pch_msi_init(res.start, irq_base, irq_count, parent_domain, of_fwnode_handle(node));
if (err < 0)
return err;
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index 69efda35a8e7..62e6bf3a0611 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -392,7 +392,7 @@ static int pch_pic_of_init(struct device_node *node,
}
err = pch_pic_init(res.start, resource_size(&res), vec_base,
- parent_domain, of_node_to_fwnode(node), 0);
+ parent_domain, of_fwnode_handle(node), 0);
if (err < 0)
return err;
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
index 4d70a857133f..14cca44baa14 100644
--- a/drivers/irqchip/irq-lpc32xx.c
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -210,8 +210,8 @@ static int __init lpc32xx_of_ic_init(struct device_node *node,
return -EINVAL;
}
- irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS,
- &lpc32xx_irq_domain_ops, irqc);
+ irqc->domain = irq_domain_create_linear(of_fwnode_handle(node), NR_LPC32XX_IC_IRQS,
+ &lpc32xx_irq_domain_ops, irqc);
if (!irqc->domain) {
pr_err("unable to add irq domain\n");
iounmap(irqc->base);
diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c
index 139f26b0a6ef..50a7b38381b9 100644
--- a/drivers/irqchip/irq-ls-extirq.c
+++ b/drivers/irqchip/irq-ls-extirq.c
@@ -208,8 +208,8 @@ ls_extirq_of_init(struct device_node *node, struct device_node *parent)
of_device_is_compatible(node, "fsl,ls1043a-extirq");
raw_spin_lock_init(&priv->lock);
- domain = irq_domain_add_hierarchy(parent_domain, 0, priv->nirq, node,
- &extirq_domain_ops, priv);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, priv->nirq, of_fwnode_handle(node),
+ &extirq_domain_ops, priv);
if (!domain) {
ret = -ENOMEM;
goto err_add_hierarchy;
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index 3cb80796cc7c..84bc5e4b47cf 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -215,17 +215,17 @@ static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
{
/* Initialize MSI domain parent */
- msi_data->parent = irq_domain_add_linear(NULL,
- msi_data->irqs_num,
- &ls_scfg_msi_domain_ops,
- msi_data);
+ msi_data->parent = irq_domain_create_linear(NULL,
+ msi_data->irqs_num,
+ &ls_scfg_msi_domain_ops,
+ msi_data);
if (!msi_data->parent) {
dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
msi_data->msi_domain = pci_msi_create_irq_domain(
- of_node_to_fwnode(msi_data->pdev->dev.of_node),
+ of_fwnode_handle(msi_data->pdev->dev.of_node),
&ls_scfg_msi_domain_info,
msi_data->parent);
if (!msi_data->msi_domain) {
diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c
index 77a3f7dfaaf0..589d32007fca 100644
--- a/drivers/irqchip/irq-ls1x.c
+++ b/drivers/irqchip/irq-ls1x.c
@@ -126,8 +126,8 @@ static int __init ls1x_intc_of_init(struct device_node *node,
}
/* Set up an IRQ domain */
- priv->domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops,
- NULL);
+ priv->domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &irq_generic_chip_ops,
+ NULL);
if (!priv->domain) {
pr_err("ls1x-irq: cannot add IRQ domain\n");
err = -ENOMEM;
diff --git a/drivers/irqchip/irq-mchp-eic.c b/drivers/irqchip/irq-mchp-eic.c
index 5dcd94c000a2..516a3a0e359c 100644
--- a/drivers/irqchip/irq-mchp-eic.c
+++ b/drivers/irqchip/irq-mchp-eic.c
@@ -248,8 +248,9 @@ static int mchp_eic_init(struct device_node *node, struct device_node *parent)
eic->irqs[i] = irq.args[1];
}
- eic->domain = irq_domain_add_hierarchy(parent_domain, 0, MCHP_EIC_NIRQ,
- node, &mchp_eic_domain_ops, eic);
+ eic->domain = irq_domain_create_hierarchy(parent_domain, 0, MCHP_EIC_NIRQ,
+ of_fwnode_handle(node), &mchp_eic_domain_ops,
+ eic);
if (!eic->domain) {
pr_err("%pOF: Failed to add domain\n", node);
ret = -ENODEV;
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index 0a25536a5d07..7d177626d64b 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -607,7 +607,7 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *
domain = irq_domain_create_hierarchy(parent_domain, 0,
ctl->params->nr_hwirq,
- of_node_to_fwnode(node),
+ of_fwnode_handle(node),
&meson_gpio_irq_domain_ops,
ctl);
if (!domain) {
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index 0c7ae71a0af0..ac784ef3ed4b 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -238,11 +238,9 @@ static void mips_cpu_register_ipi_domain(struct device_node *of_node)
struct cpu_ipi_domain_state *ipi_domain_state;
ipi_domain_state = kzalloc(sizeof(*ipi_domain_state), GFP_KERNEL);
- ipi_domain = irq_domain_add_hierarchy(irq_domain,
- IRQ_DOMAIN_FLAG_IPI_SINGLE,
- 2, of_node,
- &mips_cpu_ipi_chip_ops,
- ipi_domain_state);
+ ipi_domain = irq_domain_create_hierarchy(irq_domain, IRQ_DOMAIN_FLAG_IPI_SINGLE, 2,
+ of_fwnode_handle(of_node),
+ &mips_cpu_ipi_chip_ops, ipi_domain_state);
if (!ipi_domain)
panic("Failed to add MIPS CPU IPI domain");
irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
@@ -260,9 +258,8 @@ static void __init __mips_cpu_irq_init(struct device_node *of_node)
clear_c0_status(ST0_IM);
clear_c0_cause(CAUSEF_IP);
- irq_domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
- &mips_cpu_intc_irq_domain_ops,
- NULL);
+ irq_domain = irq_domain_create_legacy(of_fwnode_handle(of_node), 8, MIPS_CPU_IRQ_BASE, 0,
+ &mips_cpu_intc_irq_domain_ops, NULL);
if (!irq_domain)
panic("Failed to add irqdomain for MIPS CPU");
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index bca8053864b2..34e8d09c12a0 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -841,10 +841,10 @@ static int gic_register_ipi_domain(struct device_node *node)
struct irq_domain *gic_ipi_domain;
unsigned int v[2], num_ipis;
- gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
- IRQ_DOMAIN_FLAG_IPI_PER_CPU,
- GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
- node, &gic_ipi_domain_ops, NULL);
+ gic_ipi_domain = irq_domain_create_hierarchy(gic_irq_domain, IRQ_DOMAIN_FLAG_IPI_PER_CPU,
+ GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
+ of_fwnode_handle(node), &gic_ipi_domain_ops,
+ NULL);
if (!gic_ipi_domain) {
pr_err("Failed to add IPI domain");
return -ENXIO;
@@ -963,9 +963,10 @@ static int __init gic_of_init(struct device_node *node,
gic_irq_dispatch);
}
- gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
- gic_shared_intrs, 0,
- &gic_irq_domain_ops, NULL);
+ gic_irq_domain = irq_domain_create_simple(of_fwnode_handle(node),
+ GIC_NUM_LOCAL_INTRS +
+ gic_shared_intrs, 0,
+ &gic_irq_domain_ops, NULL);
if (!gic_irq_domain) {
pr_err("Failed to add IRQ domain");
return -ENXIO;
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 25cf4f80e767..09e640430208 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -261,9 +261,9 @@ static int __init mmp_init_bases(struct device_node *node)
}
icu_data[0].virq_base = 0;
- icu_data[0].domain = irq_domain_add_linear(node, nr_irqs,
- &mmp_irq_domain_ops,
- &icu_data[0]);
+ icu_data[0].domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs,
+ &mmp_irq_domain_ops,
+ &icu_data[0]);
for (irq = 0; irq < nr_irqs; irq++) {
ret = irq_create_mapping(icu_data[0].domain, irq);
if (!ret) {
@@ -391,9 +391,9 @@ static int __init mmp2_mux_of_init(struct device_node *node,
return -EINVAL;
icu_data[i].virq_base = 0;
- icu_data[i].domain = irq_domain_add_linear(node, nr_irqs,
- &mmp_irq_domain_ops,
- &icu_data[i]);
+ icu_data[i].domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs,
+ &mmp_irq_domain_ops,
+ &icu_data[i]);
for (irq = 0; irq < nr_irqs; irq++) {
ret = irq_create_mapping(icu_data[i].domain, irq);
if (!ret) {
diff --git a/drivers/irqchip/irq-mscc-ocelot.c b/drivers/irqchip/irq-mscc-ocelot.c
index 3dc745b14caf..8cbc191f750b 100644
--- a/drivers/irqchip/irq-mscc-ocelot.c
+++ b/drivers/irqchip/irq-mscc-ocelot.c
@@ -83,7 +83,7 @@ static void ocelot_irq_unmask(struct irq_data *data)
unsigned int mask = data->mask;
u32 val;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
/*
* Clear sticky bits for edge mode interrupts.
* Serval has only one trigger register replication, but the adjacent
@@ -97,7 +97,6 @@ static void ocelot_irq_unmask(struct irq_data *data)
*ct->mask_cache &= ~mask;
irq_reg_writel(gc, mask, p->reg_off_ena_set);
- irq_gc_unlock(gc);
}
static void ocelot_irq_handler(struct irq_desc *desc)
@@ -132,8 +131,8 @@ static int __init vcoreiii_irq_init(struct device_node *node,
if (!parent_irq)
return -EINVAL;
- domain = irq_domain_add_linear(node, p->n_irq,
- &irq_generic_chip_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(node), p->n_irq,
+ &irq_generic_chip_ops, NULL);
if (!domain) {
pr_err("%pOFn: unable to add irq domain\n", node);
return -ENOMEM;
diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c
index 51464c6257f3..246c30205af4 100644
--- a/drivers/irqchip/irq-msi-lib.c
+++ b/drivers/irqchip/irq-msi-lib.c
@@ -4,7 +4,7 @@
#include <linux/export.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
/**
* msi_lib_init_dev_msi_info - Domain info setup for MSI domains
@@ -105,8 +105,13 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
* MSI message into the hardware which is the whole purpose of the
* device MSI domain aside of mask/unmask which is provided e.g. by
* PCI/MSI device domains.
+ *
+ * The exception to the rule is when the underlying domain
+ * tells you that affinity is not a thing -- for example when
+ * everything is muxed behind a single interrupt.
*/
- chip->irq_set_affinity = msi_domain_set_affinity;
+ if (!chip->irq_set_affinity && !(info->flags & MSI_FLAG_NO_AFFINITY))
+ chip->irq_set_affinity = msi_domain_set_affinity;
return true;
}
EXPORT_SYMBOL_GPL(msi_lib_init_dev_msi_info);
diff --git a/drivers/irqchip/irq-msi-lib.h b/drivers/irqchip/irq-msi-lib.h
deleted file mode 100644
index 681ceabb7bc7..000000000000
--- a/drivers/irqchip/irq-msi-lib.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-// Copyright (C) 2022 Linutronix GmbH
-// Copyright (C) 2022 Intel
-
-#ifndef _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H
-#define _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H
-
-#include <linux/bits.h>
-#include <linux/irqdomain.h>
-#include <linux/msi.h>
-
-#ifdef CONFIG_PCI_MSI
-#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
-#else
-#define MATCH_PCI_MSI (0)
-#endif
-
-#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
-
-int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
- enum irq_domain_bus_token bus_token);
-
-bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
- struct irq_domain *real_parent,
- struct msi_domain_info *info);
-
-#endif /* _DRIVERS_IRQCHIP_IRQ_MSI_LIB_H */
diff --git a/drivers/irqchip/irq-mst-intc.c b/drivers/irqchip/irq-mst-intc.c
index f6133ae28155..9643cc3a77d7 100644
--- a/drivers/irqchip/irq-mst-intc.c
+++ b/drivers/irqchip/irq-mst-intc.c
@@ -273,8 +273,8 @@ static int __init mst_intc_of_init(struct device_node *dn,
raw_spin_lock_init(&cd->lock);
cd->irq_start = irq_start;
cd->nr_irqs = irq_end - irq_start + 1;
- domain = irq_domain_add_hierarchy(domain_parent, 0, cd->nr_irqs, dn,
- &mst_intc_domain_ops, cd);
+ domain = irq_domain_create_hierarchy(domain_parent, 0, cd->nr_irqs, of_fwnode_handle(dn),
+ &mst_intc_domain_ops, cd);
if (!domain) {
iounmap(cd->base);
kfree(cd);
diff --git a/drivers/irqchip/irq-mtk-cirq.c b/drivers/irqchip/irq-mtk-cirq.c
index 76bc0283e3b9..de481ba340f8 100644
--- a/drivers/irqchip/irq-mtk-cirq.c
+++ b/drivers/irqchip/irq-mtk-cirq.c
@@ -336,9 +336,8 @@ static int __init mtk_cirq_of_init(struct device_node *node,
cirq_data->offsets = match->data;
irq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
- domain = irq_domain_add_hierarchy(domain_parent, 0,
- irq_num, node,
- &cirq_domain_ops, cirq_data);
+ domain = irq_domain_create_hierarchy(domain_parent, 0, irq_num, of_fwnode_handle(node),
+ &cirq_domain_ops, cirq_data);
if (!domain) {
ret = -ENOMEM;
goto out_unmap;
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
index 586e52d5442b..6895e7096b27 100644
--- a/drivers/irqchip/irq-mtk-sysirq.c
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -207,8 +207,8 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
chip_data->which_word[i] = word;
}
- domain = irq_domain_add_hierarchy(domain_parent, 0, intpol_num, node,
- &sysirq_domain_ops, chip_data);
+ domain = irq_domain_create_hierarchy(domain_parent, 0, intpol_num, of_fwnode_handle(node),
+ &sysirq_domain_ops, chip_data);
if (!domain) {
ret = -ENOMEM;
goto out_free_which_word;
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
index d67f93f6d750..d3232d6d8dce 100644
--- a/drivers/irqchip/irq-mvebu-gicp.c
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -17,7 +17,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -161,7 +161,7 @@ static const struct irq_domain_ops gicp_domain_ops = {
static const struct msi_parent_ops gicp_msi_parent_ops = {
.supported_flags = GICP_MSI_FLAGS_SUPPORTED,
.required_flags = GICP_MSI_FLAGS_REQUIRED,
- .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "GICP-",
@@ -170,9 +170,12 @@ static const struct msi_parent_ops gicp_msi_parent_ops = {
static int mvebu_gicp_probe(struct platform_device *pdev)
{
- struct irq_domain *inner_domain, *parent_domain;
struct device_node *node = pdev->dev.of_node;
struct device_node *irq_parent_dn;
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(node),
+ .ops = &gicp_domain_ops,
+ };
struct mvebu_gicp *gicp;
int ret, i;
@@ -217,30 +220,23 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
if (!gicp->spi_bitmap)
return -ENOMEM;
+ info.size = gicp->spi_cnt;
+ info.host_data = gicp;
+
irq_parent_dn = of_irq_find_parent(node);
if (!irq_parent_dn) {
dev_err(&pdev->dev, "failed to find parent IRQ node\n");
return -ENODEV;
}
- parent_domain = irq_find_host(irq_parent_dn);
+ info.parent = irq_find_host(irq_parent_dn);
of_node_put(irq_parent_dn);
- if (!parent_domain) {
+ if (!info.parent) {
dev_err(&pdev->dev, "failed to find parent IRQ domain\n");
return -ENODEV;
}
- inner_domain = irq_domain_create_hierarchy(parent_domain, 0,
- gicp->spi_cnt,
- of_node_to_fwnode(node),
- &gicp_domain_ops, gicp);
- if (!inner_domain)
- return -ENOMEM;
-
- irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_GENERIC_MSI);
- inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
- inner_domain->msi_parent_ops = &gicp_msi_parent_ops;
- return 0;
+ return msi_create_parent_irq_domain(&info, &gicp_msi_parent_ops) ? 0 : -ENOMEM;
}
static const struct of_device_id mvebu_gicp_of_match[] = {
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index 4eebed39880a..db5dbc6e88b0 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -20,7 +20,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#include <dt-bindings/interrupt-controller/mvebu-icu.h>
diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c
index 28f7e81df94f..e5b2bde3d933 100644
--- a/drivers/irqchip/irq-mvebu-odmi.c
+++ b/drivers/irqchip/irq-mvebu-odmi.c
@@ -18,7 +18,7 @@
#include <linux/of_address.h>
#include <linux/slab.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -157,7 +157,7 @@ static const struct irq_domain_ops odmi_domain_ops = {
static const struct msi_parent_ops odmi_msi_parent_ops = {
.supported_flags = ODMI_MSI_FLAGS_SUPPORTED,
.required_flags = ODMI_MSI_FLAGS_REQUIRED,
- .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "ODMI-",
@@ -167,7 +167,12 @@ static const struct msi_parent_ops odmi_msi_parent_ops = {
static int __init mvebu_odmi_init(struct device_node *node,
struct device_node *parent)
{
- struct irq_domain *parent_domain, *inner_domain;
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(node),
+ .ops = &odmi_domain_ops,
+ .size = odmis_count * NODMIS_PER_FRAME,
+ .parent = irq_find_host(parent),
+ };
int ret, i;
if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
@@ -203,22 +208,10 @@ static int __init mvebu_odmi_init(struct device_node *node,
}
}
- parent_domain = irq_find_host(parent);
+ if (msi_create_parent_irq_domain(&info, &odmi_msi_parent_ops))
+ return 0;
- inner_domain = irq_domain_create_hierarchy(parent_domain, 0,
- odmis_count * NODMIS_PER_FRAME,
- of_node_to_fwnode(node),
- &odmi_domain_ops, NULL);
- if (!inner_domain) {
- ret = -ENOMEM;
- goto err_unmap;
- }
-
- irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_GENERIC_MSI);
- inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
- inner_domain->msi_parent_ops = &odmi_msi_parent_ops;
-
- return 0;
+ ret = -ENOMEM;
err_unmap:
for (i = 0; i < odmis_count; i++) {
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
index 3888b7585981..8db638aa21d2 100644
--- a/drivers/irqchip/irq-mvebu-pic.c
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -150,8 +150,8 @@ static int mvebu_pic_probe(struct platform_device *pdev)
return -EINVAL;
}
- pic->domain = irq_domain_add_linear(node, PIC_MAX_IRQS,
- &mvebu_pic_domain_ops, pic);
+ pic->domain = irq_domain_create_linear(of_fwnode_handle(node), PIC_MAX_IRQS,
+ &mvebu_pic_domain_ops, pic);
if (!pic->domain) {
dev_err(&pdev->dev, "Failed to allocate irq domain\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index ebd4a9014e8d..5822ea864765 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -14,7 +14,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
/* Cause register */
#define GICP_SECR(idx) (0x0 + ((idx) * 0x4))
@@ -366,6 +366,10 @@ static const struct msi_parent_ops sei_msi_parent_ops = {
static int mvebu_sei_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(node),
+ .ops = &mvebu_sei_cp_domain_ops,
+ };
struct mvebu_sei *sei;
u32 parent_irq;
int ret;
@@ -402,7 +406,7 @@ static int mvebu_sei_probe(struct platform_device *pdev)
}
/* Create the root SEI domain */
- sei->sei_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ sei->sei_domain = irq_domain_create_linear(of_fwnode_handle(node),
(sei->caps->ap_range.size +
sei->caps->cp_range.size),
&mvebu_sei_domain_ops,
@@ -418,7 +422,7 @@ static int mvebu_sei_probe(struct platform_device *pdev)
/* Create the 'wired' domain */
sei->ap_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
sei->caps->ap_range.size,
- of_node_to_fwnode(node),
+ of_fwnode_handle(node),
&mvebu_sei_ap_domain_ops,
sei);
if (!sei->ap_domain) {
@@ -430,21 +434,17 @@ static int mvebu_sei_probe(struct platform_device *pdev)
irq_domain_update_bus_token(sei->ap_domain, DOMAIN_BUS_WIRED);
/* Create the 'MSI' domain */
- sei->cp_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
- sei->caps->cp_range.size,
- of_node_to_fwnode(node),
- &mvebu_sei_cp_domain_ops,
- sei);
+ info.size = sei->caps->cp_range.size;
+ info.host_data = sei;
+ info.parent = sei->sei_domain;
+
+ sei->cp_domain = msi_create_parent_irq_domain(&info, &sei_msi_parent_ops);
if (!sei->cp_domain) {
pr_err("Failed to create CPs IRQ domain\n");
ret = -ENOMEM;
goto remove_ap_domain;
}
- irq_domain_update_bus_token(sei->cp_domain, DOMAIN_BUS_GENERIC_MSI);
- sei->cp_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
- sei->cp_domain->msi_parent_ops = &sei_msi_parent_ops;
-
mvebu_sei_reset(sei);
irq_set_chained_handler_and_data(parent_irq, mvebu_sei_handle_cascade_irq, sei);
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index d67b5da38982..0bb423dd5280 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -162,8 +162,8 @@ static const struct irq_domain_ops icoll_irq_domain_ops = {
static void __init icoll_add_domain(struct device_node *np,
int num)
{
- icoll_domain = irq_domain_add_linear(np, num,
- &icoll_irq_domain_ops, NULL);
+ icoll_domain = irq_domain_create_linear(of_fwnode_handle(np), num,
+ &icoll_irq_domain_ops, NULL);
if (!icoll_domain)
panic("%pOF: unable to create irq domain", np);
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index ba6332b00a0a..76e11cac9631 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -90,7 +90,7 @@ static int __init nvic_of_init(struct device_node *node,
irqs = NVIC_MAX_IRQ;
nvic_irq_domain =
- irq_domain_add_linear(node, irqs, &nvic_irq_domain_ops, NULL);
+ irq_domain_create_linear(of_fwnode_handle(node), irqs, &nvic_irq_domain_ops, NULL);
if (!nvic_irq_domain) {
pr_warn("Failed to allocate irq domain\n");
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index ad84a2f03368..16f00db570e7 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -248,7 +248,7 @@ static int __init omap_init_irq_of(struct device_node *node)
if (WARN_ON(!omap_irq_base))
return -ENOMEM;
- domain = irq_domain_add_linear(node, omap_nr_irqs,
+ domain = irq_domain_create_linear(of_fwnode_handle(node), omap_nr_irqs,
&irq_generic_chip_ops, NULL);
omap_irq_soft_reset();
@@ -274,7 +274,7 @@ static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
irq_base = 0;
}
- domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
+ domain = irq_domain_create_legacy(of_fwnode_handle(node), omap_nr_irqs, irq_base, 0,
&irq_domain_simple_ops, NULL);
omap_irq_soft_reset();
diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c
index f289ccd95291..48126067c54b 100644
--- a/drivers/irqchip/irq-or1k-pic.c
+++ b/drivers/irqchip/irq-or1k-pic.c
@@ -144,8 +144,8 @@ static int __init or1k_pic_init(struct device_node *node,
/* Disable all interrupts until explicitly requested */
mtspr(SPR_PICMR, (0UL));
- root_domain = irq_domain_add_linear(node, 32, &or1k_irq_domain_ops,
- pic);
+ root_domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &or1k_irq_domain_ops,
+ pic);
set_handle_irq(or1k_pic_handle_irq);
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index 4e4e874e09a8..dddbc05917c0 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -59,7 +59,7 @@ static int __init orion_irq_init(struct device_node *np,
/* count number of irq chips by valid reg addresses */
num_chips = of_address_count(np);
- orion_irq_domain = irq_domain_add_linear(np,
+ orion_irq_domain = irq_domain_create_linear(of_fwnode_handle(np),
num_chips * ORION_IRQS_PER_CHIP,
&irq_generic_chip_ops, NULL);
if (!orion_irq_domain)
@@ -146,8 +146,8 @@ static int __init orion_bridge_irq_init(struct device_node *np,
/* get optional number of interrupts provided */
of_property_read_u32(np, "marvell,#interrupts", &nrirqs);
- domain = irq_domain_add_linear(np, nrirqs,
- &irq_generic_chip_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(np), nrirqs,
+ &irq_generic_chip_ops, NULL);
if (!domain) {
pr_err("%pOFn: unable to add irq domain\n", np);
return -ENOMEM;
diff --git a/drivers/irqchip/irq-owl-sirq.c b/drivers/irqchip/irq-owl-sirq.c
index 6e4127465094..3d93d21f6732 100644
--- a/drivers/irqchip/irq-owl-sirq.c
+++ b/drivers/irqchip/irq-owl-sirq.c
@@ -323,8 +323,8 @@ static int __init owl_sirq_init(const struct owl_sirq_params *params,
owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_CLK_SEL, i);
}
- domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_SIRQ, node,
- &owl_sirq_domain_ops, chip_data);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, NUM_SIRQ, of_fwnode_handle(node),
+ &owl_sirq_domain_ops, chip_data);
if (!domain) {
pr_err("%pOF: failed to add domain\n", node);
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c
index b546b1036e12..5dfda8e8df10 100644
--- a/drivers/irqchip/irq-pic32-evic.c
+++ b/drivers/irqchip/irq-pic32-evic.c
@@ -227,9 +227,9 @@ static int __init pic32_of_init(struct device_node *node,
goto err_iounmap;
}
- evic_irq_domain = irq_domain_add_linear(node, nchips * 32,
- &pic32_irq_domain_ops,
- priv);
+ evic_irq_domain = irq_domain_create_linear(of_fwnode_handle(node), nchips * 32,
+ &pic32_irq_domain_ops,
+ priv);
if (!evic_irq_domain) {
ret = -ENOMEM;
goto err_free_priv;
diff --git a/drivers/irqchip/irq-pruss-intc.c b/drivers/irqchip/irq-pruss-intc.c
index bee01980b463..87a5813fd835 100644
--- a/drivers/irqchip/irq-pruss-intc.c
+++ b/drivers/irqchip/irq-pruss-intc.c
@@ -555,8 +555,8 @@ static int pruss_intc_probe(struct platform_device *pdev)
mutex_init(&intc->lock);
- intc->domain = irq_domain_add_linear(dev->of_node, max_system_events,
- &pruss_intc_irq_domain_ops, intc);
+ intc->domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), max_system_events,
+ &pruss_intc_irq_domain_ops, intc);
if (!intc->domain)
return -ENOMEM;
@@ -581,8 +581,7 @@ static int pruss_intc_probe(struct platform_device *pdev)
host_data->intc = intc;
host_data->host_irq = i;
- irq_set_handler_data(irq, host_data);
- irq_set_chained_handler(irq, pruss_intc_irq_handler);
+ irq_set_chained_handler_and_data(irq, pruss_intc_irq_handler, host_data);
}
return 0;
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
index f772deb9cba5..8d569f7c5a7a 100644
--- a/drivers/irqchip/irq-qcom-mpm.c
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -450,7 +450,7 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
priv->domain = irq_domain_create_hierarchy(parent_domain,
IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP, pin_cnt,
- of_node_to_fwnode(np), &qcom_mpm_ops, priv);
+ of_fwnode_handle(np), &qcom_mpm_ops, priv);
if (!priv->domain) {
dev_err(dev, "failed to create MPM domain\n");
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c
index 2a349082af81..942c1f8c363d 100644
--- a/drivers/irqchip/irq-realtek-rtl.c
+++ b/drivers/irqchip/irq-realtek-rtl.c
@@ -162,7 +162,7 @@ static int __init realtek_rtl_of_init(struct device_node *node, struct device_no
else if (!parent_irq)
return -ENODEV;
- domain = irq_domain_add_linear(node, RTL_ICTL_NUM_INPUTS, &irq_domain_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(node), RTL_ICTL_NUM_INPUTS, &irq_domain_ops, NULL);
if (!domain)
return -ENOMEM;
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 954419f2460d..0959ed43b1a9 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -513,8 +513,10 @@ static int intc_irqpin_probe(struct platform_device *pdev)
irq_chip->irq_set_wake = intc_irqpin_irq_set_wake;
irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND;
- p->irq_domain = irq_domain_add_simple(dev->of_node, nirqs, 0,
- &intc_irqpin_irq_domain_ops, p);
+ p->irq_domain = irq_domain_create_simple(of_fwnode_handle(dev->of_node),
+ nirqs, 0,
+ &intc_irqpin_irq_domain_ops,
+ p);
if (!p->irq_domain) {
ret = -ENXIO;
dev_err(dev, "cannot initialize irq domain\n");
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index cbce8ffc7de4..5c3196e5a437 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -168,8 +168,8 @@ static int irqc_probe(struct platform_device *pdev)
p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
- p->irq_domain = irq_domain_add_linear(dev->of_node, p->number_of_irqs,
- &irq_generic_chip_ops, p);
+ p->irq_domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), p->number_of_irqs,
+ &irq_generic_chip_ops, p);
if (!p->irq_domain) {
ret = -ENXIO;
dev_err(dev, "cannot initialize irq domain\n");
diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c
index d4e6a68889ec..0a9640ba0adb 100644
--- a/drivers/irqchip/irq-renesas-rza1.c
+++ b/drivers/irqchip/irq-renesas-rza1.c
@@ -231,9 +231,9 @@ static int rza1_irqc_probe(struct platform_device *pdev)
priv->chip.irq_set_type = rza1_irqc_set_type;
priv->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
- priv->irq_domain = irq_domain_add_hierarchy(parent, 0, IRQC_NUM_IRQ,
- np, &rza1_irqc_domain_ops,
- priv);
+ priv->irq_domain = irq_domain_create_hierarchy(parent, 0, IRQC_NUM_IRQ,
+ of_fwnode_handle(np), &rza1_irqc_domain_ops,
+ priv);
if (!priv->irq_domain) {
dev_err(dev, "cannot initialize irq domain\n");
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 6a2e41f02446..1e861bd64f97 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -574,9 +574,9 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
raw_spin_lock_init(&rzg2l_irqc_data->lock);
- irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ,
- node, &rzg2l_irqc_domain_ops,
- rzg2l_irqc_data);
+ irq_domain = irq_domain_create_hierarchy(parent_domain, 0, IRQC_NUM_IRQ,
+ of_fwnode_handle(node), &rzg2l_irqc_domain_ops,
+ rzg2l_irqc_data);
if (!irq_domain) {
pm_runtime_put(dev);
return dev_err_probe(dev, -ENOMEM, "failed to add irq domain\n");
diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c
index 0f0fd7d4dfdf..69b32c19e8ff 100644
--- a/drivers/irqchip/irq-renesas-rzv2h.c
+++ b/drivers/irqchip/irq-renesas-rzv2h.c
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/irq-renesas-rzv2h.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
@@ -41,6 +42,8 @@
#define ICU_TSCLR 0x24
#define ICU_TITSR(k) (0x28 + (k) * 4)
#define ICU_TSSR(k) (0x30 + (k) * 4)
+#define ICU_DMkSELy(k, y) (0x420 + (k) * 0x20 + (y) * 4)
+#define ICU_DMACKSELk(k) (0x500 + (k) * 4)
/* NMI */
#define ICU_NMI_EDGE_FALLING 0
@@ -103,6 +106,15 @@ struct rzv2h_hw_info {
u8 field_width;
};
+/* DMAC */
+#define ICU_DMAC_DkRQ_SEL_MASK GENMASK(9, 0)
+
+#define ICU_DMAC_DMAREQ_SHIFT(up) ((up) * 16)
+#define ICU_DMAC_DMAREQ_MASK(up) (ICU_DMAC_DkRQ_SEL_MASK \
+ << ICU_DMAC_DMAREQ_SHIFT(up))
+#define ICU_DMAC_PREP_DMAREQ(sel, up) (FIELD_PREP(ICU_DMAC_DkRQ_SEL_MASK, (sel)) \
+ << ICU_DMAC_DMAREQ_SHIFT(up))
+
/**
* struct rzv2h_icu_priv - Interrupt Control Unit controller private data structure.
* @base: Controller's base address
@@ -117,6 +129,27 @@ struct rzv2h_icu_priv {
const struct rzv2h_hw_info *info;
};
+void rzv2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, u8 dmac_channel,
+ u16 req_no)
+{
+ struct rzv2h_icu_priv *priv = platform_get_drvdata(icu_dev);
+ u32 icu_dmksely, dmareq, dmareq_mask;
+ u8 y, upper;
+
+ y = dmac_channel / 2;
+ upper = dmac_channel % 2;
+
+ dmareq = ICU_DMAC_PREP_DMAREQ(req_no, upper);
+ dmareq_mask = ICU_DMAC_DMAREQ_MASK(upper);
+
+ guard(raw_spinlock_irqsave)(&priv->lock);
+
+ icu_dmksely = readl(priv->base + ICU_DMkSELy(dmac_index, y));
+ icu_dmksely = (icu_dmksely & ~dmareq_mask) | dmareq;
+ writel(icu_dmksely, priv->base + ICU_DMkSELy(dmac_index, y));
+}
+EXPORT_SYMBOL_GPL(rzv2h_icu_register_dma_req);
+
static inline struct rzv2h_icu_priv *irq_data_to_priv(struct irq_data *data)
{
return data->domain->host_data;
@@ -491,6 +524,8 @@ static int rzv2h_icu_init_common(struct device_node *node, struct device_node *p
if (!rzv2h_icu_data)
return -ENOMEM;
+ platform_set_drvdata(pdev, rzv2h_icu_data);
+
rzv2h_icu_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
if (IS_ERR(rzv2h_icu_data->base))
return PTR_ERR(rzv2h_icu_data->base);
@@ -522,8 +557,9 @@ static int rzv2h_icu_init_common(struct device_node *node, struct device_node *p
raw_spin_lock_init(&rzv2h_icu_data->lock);
- irq_domain = irq_domain_add_hierarchy(parent_domain, 0, ICU_NUM_IRQ, node,
- &rzv2h_icu_domain_ops, rzv2h_icu_data);
+ irq_domain = irq_domain_create_hierarchy(parent_domain, 0, ICU_NUM_IRQ,
+ of_fwnode_handle(node), &rzv2h_icu_domain_ops,
+ rzv2h_icu_data);
if (!irq_domain) {
dev_err(&pdev->dev, "failed to add irq domain\n");
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
index b8ae67c25b37..1b9fbfce9581 100644
--- a/drivers/irqchip/irq-riscv-imsic-platform.c
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -20,7 +20,7 @@
#include <linux/spinlock.h>
#include <linux/smp.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
#include "irq-riscv-imsic-state.h"
static bool imsic_cpu_page_phys(unsigned int cpu, unsigned int guest_index,
diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c
index bdf5cd2037f2..77670dd645ac 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.c
+++ b/drivers/irqchip/irq-riscv-imsic-state.c
@@ -208,17 +208,17 @@ skip:
}
#ifdef CONFIG_SMP
-static void __imsic_local_timer_start(struct imsic_local_priv *lpriv)
+static void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu)
{
lockdep_assert_held(&lpriv->lock);
if (!timer_pending(&lpriv->timer)) {
lpriv->timer.expires = jiffies + 1;
- add_timer_on(&lpriv->timer, smp_processor_id());
+ add_timer_on(&lpriv->timer, cpu);
}
}
#else
-static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv)
+static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu)
{
}
#endif
@@ -233,7 +233,7 @@ void imsic_local_sync_all(bool force_all)
if (force_all)
bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
if (!__imsic_local_sync(lpriv))
- __imsic_local_timer_start(lpriv);
+ __imsic_local_timer_start(lpriv, smp_processor_id());
raw_spin_unlock_irqrestore(&lpriv->lock, flags);
}
@@ -278,7 +278,7 @@ static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu
return;
}
- __imsic_local_timer_start(lpriv);
+ __imsic_local_timer_start(lpriv, cpu);
}
}
#else
@@ -564,7 +564,7 @@ void imsic_state_offline(void)
struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
raw_spin_lock_irqsave(&lpriv->lock, flags);
- WARN_ON_ONCE(try_to_del_timer_sync(&lpriv->timer) < 0);
+ WARN_ON_ONCE(timer_delete_sync_try(&lpriv->timer) < 0);
raw_spin_unlock_irqrestore(&lpriv->lock, flags);
#endif
}
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index f653c13de62b..e5805885394e 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -242,7 +242,7 @@ static int __init riscv_intc_init(struct device_node *node,
chip = &andes_intc_chip;
}
- return riscv_intc_init_common(of_node_to_fwnode(node), chip);
+ return riscv_intc_init_common(of_fwnode_handle(node), chip);
}
IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
diff --git a/drivers/irqchip/irq-sa11x0.c b/drivers/irqchip/irq-sa11x0.c
index 9d0b80271949..d8d4dff16276 100644
--- a/drivers/irqchip/irq-sa11x0.c
+++ b/drivers/irqchip/irq-sa11x0.c
@@ -162,7 +162,7 @@ void __init sa11x0_init_irq_nodt(int irq_start, resource_size_t io_start)
*/
writel_relaxed(1, iobase + ICCR);
- sa1100_normal_irqdomain = irq_domain_add_simple(NULL,
+ sa1100_normal_irqdomain = irq_domain_create_simple(NULL,
32, irq_start,
&sa1100_normal_irqdomain_ops, NULL);
diff --git a/drivers/irqchip/irq-sg2042-msi.c b/drivers/irqchip/irq-sg2042-msi.c
index 375b55aa0acd..af16bc5a3c8b 100644
--- a/drivers/irqchip/irq-sg2042-msi.c
+++ b/drivers/irqchip/irq-sg2042-msi.c
@@ -17,23 +17,38 @@
#include <linux/property.h>
#include <linux/slab.h>
-#include "irq-msi-lib.h"
+#include <linux/irqchip/irq-msi-lib.h>
-#define SG2042_MAX_MSI_VECTOR 32
+struct sg204x_msi_chip_info {
+ const struct irq_chip *irqchip;
+ const struct msi_parent_ops *parent_ops;
+};
+
+/**
+ * struct sg204x_msi_chipdata - chip data for the SG204x MSI IRQ controller
+ * @reg_clr: clear reg, see TRM, 10.1.33, GP_INTR0_CLR
+ * @doorbell_addr: see TRM, 10.1.32, GP_INTR0_SET
+ * @irq_first: First vectors number that MSIs starts
+ * @num_irqs: Number of vectors for MSIs
+ * @msi_map: mapping for allocated MSI vectors.
+ * @msi_map_lock: Lock for msi_map
+ * @chip_info: chip specific infomations
+ */
+struct sg204x_msi_chipdata {
+ void __iomem *reg_clr;
-struct sg2042_msi_chipdata {
- void __iomem *reg_clr; // clear reg, see TRM, 10.1.33, GP_INTR0_CLR
+ phys_addr_t doorbell_addr;
- phys_addr_t doorbell_addr; // see TRM, 10.1.32, GP_INTR0_SET
+ u32 irq_first;
+ u32 num_irqs;
- u32 irq_first; // The vector number that MSIs starts
- u32 num_irqs; // The number of vectors for MSIs
+ unsigned long *msi_map;
+ struct mutex msi_map_lock;
- DECLARE_BITMAP(msi_map, SG2042_MAX_MSI_VECTOR);
- struct mutex msi_map_lock; // lock for msi_map
+ const struct sg204x_msi_chip_info *chip_info;
};
-static int sg2042_msi_allocate_hwirq(struct sg2042_msi_chipdata *data, int num_req)
+static int sg204x_msi_allocate_hwirq(struct sg204x_msi_chipdata *data, int num_req)
{
int first;
@@ -43,7 +58,7 @@ static int sg2042_msi_allocate_hwirq(struct sg2042_msi_chipdata *data, int num_r
return first >= 0 ? first : -ENOSPC;
}
-static void sg2042_msi_free_hwirq(struct sg2042_msi_chipdata *data, int hwirq, int num_req)
+static void sg204x_msi_free_hwirq(struct sg204x_msi_chipdata *data, int hwirq, int num_req)
{
guard(mutex)(&data->msi_map_lock);
bitmap_release_region(data->msi_map, hwirq, get_count_order(num_req));
@@ -51,7 +66,7 @@ static void sg2042_msi_free_hwirq(struct sg2042_msi_chipdata *data, int hwirq, i
static void sg2042_msi_irq_ack(struct irq_data *d)
{
- struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+ struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d);
int bit_off = d->hwirq;
writel(1 << bit_off, data->reg_clr);
@@ -61,7 +76,7 @@ static void sg2042_msi_irq_ack(struct irq_data *d)
static void sg2042_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
- struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+ struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d);
msg->address_hi = upper_32_bits(data->doorbell_addr);
msg->address_lo = lower_32_bits(data->doorbell_addr);
@@ -79,9 +94,38 @@ static const struct irq_chip sg2042_msi_middle_irq_chip = {
.irq_compose_msi_msg = sg2042_msi_irq_compose_msi_msg,
};
-static int sg2042_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq)
+static void sg2044_msi_irq_ack(struct irq_data *d)
+{
+ struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+
+ writel(0, (u32 __iomem *)data->reg_clr + d->hwirq);
+ irq_chip_ack_parent(d);
+}
+
+static void sg2044_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+ phys_addr_t doorbell = data->doorbell_addr + 4 * (d->hwirq / 32);
+
+ msg->address_lo = lower_32_bits(doorbell);
+ msg->address_hi = upper_32_bits(doorbell);
+ msg->data = d->hwirq % 32;
+}
+
+static struct irq_chip sg2044_msi_middle_irq_chip = {
+ .name = "SG2044 MSI",
+ .irq_ack = sg2044_msi_irq_ack,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+ .irq_compose_msi_msg = sg2044_msi_irq_compose_msi_msg,
+};
+
+static int sg204x_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq)
{
- struct sg2042_msi_chipdata *data = domain->host_data;
+ struct sg204x_msi_chipdata *data = domain->host_data;
struct irq_fwspec fwspec;
struct irq_data *d;
int ret;
@@ -99,47 +143,45 @@ static int sg2042_msi_parent_domain_alloc(struct irq_domain *domain, unsigned in
return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
}
-static int sg2042_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq,
+static int sg204x_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
- struct sg2042_msi_chipdata *data = domain->host_data;
+ struct sg204x_msi_chipdata *data = domain->host_data;
int hwirq, err, i;
- hwirq = sg2042_msi_allocate_hwirq(data, nr_irqs);
+ hwirq = sg204x_msi_allocate_hwirq(data, nr_irqs);
if (hwirq < 0)
return hwirq;
for (i = 0; i < nr_irqs; i++) {
- err = sg2042_msi_parent_domain_alloc(domain, virq + i, hwirq + i);
+ err = sg204x_msi_parent_domain_alloc(domain, virq + i, hwirq + i);
if (err)
goto err_hwirq;
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
- &sg2042_msi_middle_irq_chip, data);
+ data->chip_info->irqchip, data);
}
-
return 0;
err_hwirq:
- sg2042_msi_free_hwirq(data, hwirq, nr_irqs);
+ sg204x_msi_free_hwirq(data, hwirq, nr_irqs);
irq_domain_free_irqs_parent(domain, virq, i);
-
return err;
}
-static void sg2042_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq,
+static void sg204x_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+ struct sg204x_msi_chipdata *data = irq_data_get_irq_chip_data(d);
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
- sg2042_msi_free_hwirq(data, d->hwirq, nr_irqs);
+ sg204x_msi_free_hwirq(data, d->hwirq, nr_irqs);
}
-static const struct irq_domain_ops sg2042_msi_middle_domain_ops = {
- .alloc = sg2042_msi_middle_domain_alloc,
- .free = sg2042_msi_middle_domain_free,
+static const struct irq_domain_ops sg204x_msi_middle_domain_ops = {
+ .alloc = sg204x_msi_middle_domain_alloc,
+ .free = sg204x_msi_middle_domain_free,
.select = msi_lib_irq_domain_select,
};
@@ -158,14 +200,30 @@ static const struct msi_parent_ops sg2042_msi_parent_ops = {
.init_dev_msi_info = msi_lib_init_dev_msi_info,
};
-static int sg2042_msi_init_domains(struct sg2042_msi_chipdata *data,
+#define SG2044_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS)
+
+#define SG2044_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
+
+static const struct msi_parent_ops sg2044_msi_parent_ops = {
+ .required_flags = SG2044_MSI_FLAGS_REQUIRED,
+ .supported_flags = SG2044_MSI_FLAGS_SUPPORTED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
+ .bus_select_mask = MATCH_PCI_MSI,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .prefix = "SG2044-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
+static int sg204x_msi_init_domains(struct sg204x_msi_chipdata *data,
struct irq_domain *plic_domain, struct device *dev)
{
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct irq_domain *middle_domain;
middle_domain = irq_domain_create_hierarchy(plic_domain, 0, data->num_irqs, fwnode,
- &sg2042_msi_middle_domain_ops, data);
+ &sg204x_msi_middle_domain_ops, data);
if (!middle_domain) {
pr_err("Failed to create the MSI middle domain\n");
return -ENOMEM;
@@ -174,24 +232,29 @@ static int sg2042_msi_init_domains(struct sg2042_msi_chipdata *data,
irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
middle_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
- middle_domain->msi_parent_ops = &sg2042_msi_parent_ops;
-
+ middle_domain->msi_parent_ops = data->chip_info->parent_ops;
return 0;
}
static int sg2042_msi_probe(struct platform_device *pdev)
{
struct fwnode_reference_args args = { };
- struct sg2042_msi_chipdata *data;
+ struct sg204x_msi_chipdata *data;
struct device *dev = &pdev->dev;
struct irq_domain *plic_domain;
struct resource *res;
int ret;
- data = devm_kzalloc(dev, sizeof(struct sg2042_msi_chipdata), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct sg204x_msi_chipdata), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->chip_info = device_get_match_data(&pdev->dev);
+ if (!data->chip_info) {
+ dev_err(&pdev->dev, "Failed to get irqchip\n");
+ return -EINVAL;
+ }
+
data->reg_clr = devm_platform_ioremap_resource_byname(pdev, "clr");
if (IS_ERR(data->reg_clr)) {
dev_err(dev, "Failed to map clear register\n");
@@ -232,11 +295,28 @@ static int sg2042_msi_probe(struct platform_device *pdev)
mutex_init(&data->msi_map_lock);
- return sg2042_msi_init_domains(data, plic_domain, dev);
+ data->msi_map = devm_bitmap_zalloc(&pdev->dev, data->num_irqs, GFP_KERNEL);
+ if (!data->msi_map) {
+ dev_err(&pdev->dev, "Unable to allocate msi mapping\n");
+ return -ENOMEM;
+ }
+
+ return sg204x_msi_init_domains(data, plic_domain, dev);
}
+static const struct sg204x_msi_chip_info sg2042_chip_info = {
+ .irqchip = &sg2042_msi_middle_irq_chip,
+ .parent_ops = &sg2042_msi_parent_ops,
+};
+
+static const struct sg204x_msi_chip_info sg2044_chip_info = {
+ .irqchip = &sg2044_msi_middle_irq_chip,
+ .parent_ops = &sg2044_msi_parent_ops,
+};
+
static const struct of_device_id sg2042_msi_of_match[] = {
- { .compatible = "sophgo,sg2042-msi" },
+ { .compatible = "sophgo,sg2042-msi", .data = &sg2042_chip_info },
+ { .compatible = "sophgo,sg2044-msi", .data = &sg2044_chip_info },
{ }
};
diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
index c7db617e1a2f..0cad68aa8388 100644
--- a/drivers/irqchip/irq-sni-exiu.c
+++ b/drivers/irqchip/irq-sni-exiu.c
@@ -249,12 +249,12 @@ static int __init exiu_dt_init(struct device_node *node,
return -ENXIO;
}
- data = exiu_init(of_node_to_fwnode(node), &res);
+ data = exiu_init(of_fwnode_handle(node), &res);
if (IS_ERR(data))
return PTR_ERR(data);
- domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_IRQS, node,
- &exiu_domain_ops, data);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, NUM_IRQS, of_fwnode_handle(node),
+ &exiu_domain_ops, data);
if (!domain) {
pr_err("%pOF: failed to allocate domain\n", node);
goto out_unmap;
diff --git a/drivers/irqchip/irq-sp7021-intc.c b/drivers/irqchip/irq-sp7021-intc.c
index bed78d1def3d..2a6eda9ab62e 100644
--- a/drivers/irqchip/irq-sp7021-intc.c
+++ b/drivers/irqchip/irq-sp7021-intc.c
@@ -256,8 +256,8 @@ static int __init sp_intc_init_dt(struct device_node *node, struct device_node *
writel_relaxed(~0, REG_INTR_CLEAR + i * 4);
}
- sp_intc.domain = irq_domain_add_linear(node, SP_INTC_NR_IRQS,
- &sp_intc_dm_ops, &sp_intc);
+ sp_intc.domain = irq_domain_create_linear(of_fwnode_handle(node), SP_INTC_NR_IRQS,
+ &sp_intc_dm_ops, &sp_intc);
if (!sp_intc.domain) {
ret = -ENOMEM;
goto out_unmap1;
diff --git a/drivers/irqchip/irq-starfive-jh8100-intc.c b/drivers/irqchip/irq-starfive-jh8100-intc.c
index 0f5837176e53..2460798ec158 100644
--- a/drivers/irqchip/irq-starfive-jh8100-intc.c
+++ b/drivers/irqchip/irq-starfive-jh8100-intc.c
@@ -158,8 +158,8 @@ static int __init starfive_intc_init(struct device_node *intc,
raw_spin_lock_init(&irqc->lock);
- irqc->domain = irq_domain_add_linear(intc, STARFIVE_INTC_SRC_IRQ_NUM,
- &starfive_intc_domain_ops, irqc);
+ irqc->domain = irq_domain_create_linear(of_fwnode_handle(intc), STARFIVE_INTC_SRC_IRQ_NUM,
+ &starfive_intc_domain_ops, irqc);
if (!irqc->domain) {
pr_err("Unable to create IRQ domain\n");
ret = -EINVAL;
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 7c6a0080c330..978811f2abe8 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -169,22 +169,18 @@ static int stm32_irq_set_type(struct irq_data *d, unsigned int type)
u32 rtsr, ftsr;
int err;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst);
ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst);
err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
if (err)
- goto unlock;
+ return err;
irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst);
irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst);
-
-unlock:
- irq_gc_unlock(gc);
-
- return err;
+ return 0;
}
static void stm32_chip_suspend(struct stm32_exti_chip_data *chip_data,
@@ -217,18 +213,16 @@ static void stm32_irq_suspend(struct irq_chip_generic *gc)
{
struct stm32_exti_chip_data *chip_data = gc->private;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
stm32_chip_suspend(chip_data, gc->wake_active);
- irq_gc_unlock(gc);
}
static void stm32_irq_resume(struct irq_chip_generic *gc)
{
struct stm32_exti_chip_data *chip_data = gc->private;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
stm32_chip_resume(chip_data, gc->mask_cache);
- irq_gc_unlock(gc);
}
static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq,
@@ -265,11 +259,8 @@ static void stm32_irq_ack(struct irq_data *d)
struct stm32_exti_chip_data *chip_data = gc->private;
const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
- irq_gc_lock(gc);
-
+ guard(raw_spinlock)(&gc->lock);
irq_reg_writel(gc, d->mask, stm32_bank->rpr_ofst);
-
- irq_gc_unlock(gc);
}
static struct
@@ -344,8 +335,8 @@ static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data,
if (!host_data)
return -ENOMEM;
- domain = irq_domain_add_linear(node, drv_data->bank_nr * IRQS_PER_BANK,
- &irq_exti_domain_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(node), drv_data->bank_nr * IRQS_PER_BANK,
+ &irq_exti_domain_ops, NULL);
if (!domain) {
pr_err("%pOFn: Could not register interrupt domain.\n",
node);
diff --git a/drivers/irqchip/irq-stm32mp-exti.c b/drivers/irqchip/irq-stm32mp-exti.c
index cb83d6cc6113..c6b4407d05f9 100644
--- a/drivers/irqchip/irq-stm32mp-exti.c
+++ b/drivers/irqchip/irq-stm32mp-exti.c
@@ -531,7 +531,7 @@ static int stm32mp_exti_domain_alloc(struct irq_domain *dm,
if (ret)
return ret;
/* we only support one parent, so far */
- if (of_node_to_fwnode(out_irq.np) != dm->parent->fwnode)
+ if (of_fwnode_handle(out_irq.np) != dm->parent->fwnode)
return -EINVAL;
of_phandle_args_to_fwspec(out_irq.np, out_irq.args,
@@ -682,10 +682,9 @@ static int stm32mp_exti_probe(struct platform_device *pdev)
return -EINVAL;
}
- domain = irq_domain_add_hierarchy(parent_domain, 0,
- drv_data->bank_nr * IRQS_PER_BANK,
- np, &stm32mp_exti_domain_ops,
- host_data);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, drv_data->bank_nr * IRQS_PER_BANK,
+ of_fwnode_handle(np), &stm32mp_exti_domain_ops,
+ host_data);
if (!domain) {
dev_err(dev, "Could not register exti domain\n");
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index dd506ebfdacb..9c2c9caeca2a 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -133,7 +133,7 @@ static int __init sun4i_of_init(struct device_node *node,
/* Configure the external interrupt source type */
writel(0x00, irq_ic_data->irq_base + SUN4I_IRQ_NMI_CTRL_REG);
- irq_ic_data->irq_domain = irq_domain_add_linear(node, 3 * 32,
+ irq_ic_data->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), 3 * 32,
&sun4i_irq_ops, NULL);
if (!irq_ic_data->irq_domain)
panic("%pOF: unable to create IRQ domain\n", node);
diff --git a/drivers/irqchip/irq-sun6i-r.c b/drivers/irqchip/irq-sun6i-r.c
index 99958d470d62..37d4b29763bc 100644
--- a/drivers/irqchip/irq-sun6i-r.c
+++ b/drivers/irqchip/irq-sun6i-r.c
@@ -338,8 +338,8 @@ static int __init sun6i_r_intc_init(struct device_node *node,
return PTR_ERR(base);
}
- domain = irq_domain_add_hierarchy(parent_domain, 0, 0, node,
- &sun6i_r_intc_domain_ops, NULL);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, 0, of_fwnode_handle(node),
+ &sun6i_r_intc_domain_ops, NULL);
if (!domain) {
pr_err("%pOF: Failed to allocate domain\n", node);
iounmap(base);
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 01b0d8321728..fe32dfdc2dd0 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -111,7 +111,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
unsigned int src_type;
unsigned int i;
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
switch (flow_type & IRQF_TRIGGER_MASK) {
case IRQ_TYPE_EDGE_FALLING:
@@ -128,9 +128,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
break;
default:
- irq_gc_unlock(gc);
- pr_err("Cannot assign multiple trigger modes to IRQ %d.\n",
- data->irq);
+ pr_err("Cannot assign multiple trigger modes to IRQ %d.\n", data->irq);
return -EBADR;
}
@@ -145,9 +143,6 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
src_type_reg |= src_type;
sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
-
- irq_gc_unlock(gc);
-
return IRQ_SET_MASK_OK;
}
@@ -159,7 +154,7 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
struct irq_domain *domain;
int ret;
- domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(node), 1, &irq_generic_chip_ops, NULL);
if (!domain) {
pr_err("Could not register interrupt domain.\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
index d59bfbe8c6d0..94cbc5111d7e 100644
--- a/drivers/irqchip/irq-tb10x.c
+++ b/drivers/irqchip/irq-tb10x.c
@@ -41,11 +41,9 @@ static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg)
static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
- uint32_t im, mod, pol;
+ uint32_t mod, pol, im = data->mask;
- im = data->mask;
-
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
mod = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_MODE) | im;
pol = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_POLARITY) | im;
@@ -67,9 +65,7 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
case IRQ_TYPE_EDGE_RISING:
break;
default:
- irq_gc_unlock(gc);
- pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n",
- __func__, data->irq);
+ pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n", __func__, data->irq);
return -EBADR;
}
@@ -79,9 +75,6 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
ab_irqctl_writereg(gc, AB_IRQCTL_SRC_MODE, mod);
ab_irqctl_writereg(gc, AB_IRQCTL_SRC_POLARITY, pol);
ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, im);
-
- irq_gc_unlock(gc);
-
return IRQ_SET_MASK_OK;
}
@@ -121,13 +114,13 @@ static int __init of_tb10x_init_irq(struct device_node *ictl,
goto ioremap_fail;
}
- domain = irq_domain_add_linear(ictl, AB_IRQCTL_MAXIRQ,
- &irq_generic_chip_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(ictl), AB_IRQCTL_MAXIRQ,
+ &irq_generic_chip_ops, NULL);
if (!domain) {
ret = -ENOMEM;
pr_err("%pOFn: Could not register interrupt domain.\n",
ictl);
- goto irq_domain_add_fail;
+ goto irq_domain_create_fail;
}
ret = irq_alloc_domain_generic_chips(domain, AB_IRQCTL_MAXIRQ,
@@ -174,7 +167,7 @@ static int __init of_tb10x_init_irq(struct device_node *ictl,
gc_alloc_fail:
irq_domain_remove(domain);
-irq_domain_add_fail:
+irq_domain_create_fail:
iounmap(reg_base);
ioremap_fail:
release_mem_region(mem.start, resource_size(&mem));
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index ad3e2c1b3c87..66cbb9f77ff3 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -330,9 +330,8 @@ static int __init tegra_ictlr_init(struct device_node *node,
node, num_ictlrs, soc->num_ictlrs);
- domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32,
- node, &tegra_ictlr_domain_ops,
- lic);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, num_ictlrs * 32,
+ of_fwnode_handle(node), &tegra_ictlr_domain_ops, lic);
if (!domain) {
pr_err("%pOF: failed to allocated domain\n", node);
err = -ENOMEM;
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index a887efba262c..7de59238e6b0 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -233,7 +233,7 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom
INIT_LIST_HEAD(&vint_desc->list);
parent_node = of_irq_find_parent(dev_of_node(&inta->pdev->dev));
- parent_fwspec.fwnode = of_node_to_fwnode(parent_node);
+ parent_fwspec.fwnode = of_fwnode_handle(parent_node);
if (of_device_is_compatible(parent_node, "arm,gic-v3")) {
/* Parent is GIC */
@@ -701,15 +701,15 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
if (ret)
return ret;
- domain = irq_domain_add_linear(dev_of_node(dev),
- ti_sci_get_num_resources(inta->vint),
- &ti_sci_inta_irq_domain_ops, inta);
+ domain = irq_domain_create_linear(of_fwnode_handle(dev_of_node(dev)),
+ ti_sci_get_num_resources(inta->vint),
+ &ti_sci_inta_irq_domain_ops, inta);
if (!domain) {
dev_err(dev, "Failed to allocate IRQ domain\n");
return -ENOMEM;
}
- msi_domain = ti_sci_inta_msi_create_irq_domain(of_node_to_fwnode(node),
+ msi_domain = ti_sci_inta_msi_create_irq_domain(of_fwnode_handle(node),
&ti_sci_inta_msi_domain_info,
domain);
if (!msi_domain) {
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
index b49a73106c69..07fff5ae5ce0 100644
--- a/drivers/irqchip/irq-ti-sci-intr.c
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -149,7 +149,7 @@ static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain,
goto err_irqs;
parent_node = of_irq_find_parent(dev_of_node(intr->dev));
- fwspec.fwnode = of_node_to_fwnode(parent_node);
+ fwspec.fwnode = of_fwnode_handle(parent_node);
if (of_device_is_compatible(parent_node, "arm,gic-v3")) {
/* Parent is GIC */
@@ -274,8 +274,9 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
return PTR_ERR(intr->out_irqs);
}
- domain = irq_domain_add_hierarchy(parent_domain, 0, 0, dev_of_node(dev),
- &ti_sci_intr_irq_domain_ops, intr);
+ domain = irq_domain_create_hierarchy(parent_domain, 0, 0,
+ of_fwnode_handle(dev_of_node(dev)),
+ &ti_sci_intr_irq_domain_ops, intr);
if (!domain) {
dev_err(dev, "Failed to allocate IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index 960c343d5781..e625f4fb2bb8 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -125,7 +125,7 @@ static int ts4800_ic_probe(struct platform_device *pdev)
return -EINVAL;
}
- data->domain = irq_domain_add_linear(node, 8, &ts4800_ic_ops, data);
+ data->domain = irq_domain_create_linear(of_fwnode_handle(node), 8, &ts4800_ic_ops, data);
if (!data->domain) {
dev_err(&pdev->dev, "cannot add IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/irqchip/irq-uniphier-aidet.c b/drivers/irqchip/irq-uniphier-aidet.c
index 601f9343d5b3..6005c2d28dd9 100644
--- a/drivers/irqchip/irq-uniphier-aidet.c
+++ b/drivers/irqchip/irq-uniphier-aidet.c
@@ -188,7 +188,7 @@ static int uniphier_aidet_probe(struct platform_device *pdev)
priv->domain = irq_domain_create_hierarchy(
parent_domain, 0,
UNIPHIER_AIDET_NR_IRQS,
- of_node_to_fwnode(dev->of_node),
+ of_fwnode_handle(dev->of_node),
&uniphier_aidet_domain_ops, priv);
if (!priv->domain)
return -ENOMEM;
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 0abc8934c2ee..034ce6afe170 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -176,8 +176,8 @@ static void __init fpga_irq_init(void __iomem *base, int parent_irq,
f);
}
- f->domain = irq_domain_add_linear(node, fls(valid),
- &fpga_irqdomain_ops, f);
+ f->domain = irq_domain_create_linear(of_fwnode_handle(node), fls(valid),
+ &fpga_irqdomain_ops, f);
/* This will allocate all valid descriptors in the linear case */
for (i = 0; i < fls(valid); i++)
diff --git a/drivers/irqchip/irq-vf610-mscm-ir.c b/drivers/irqchip/irq-vf610-mscm-ir.c
index 2b9a8ba58e26..5d9c7503aa7f 100644
--- a/drivers/irqchip/irq-vf610-mscm-ir.c
+++ b/drivers/irqchip/irq-vf610-mscm-ir.c
@@ -209,9 +209,9 @@ static int __init vf610_mscm_ir_of_init(struct device_node *node,
regmap_read(mscm_cp_regmap, MSCM_CPxNUM, &cpuid);
mscm_ir_data->cpu_mask = 0x1 << cpuid;
- domain = irq_domain_add_hierarchy(domain_parent, 0,
- MSCM_IRSPRC_NUM, node,
- &mscm_irq_domain_ops, mscm_ir_data);
+ domain = irq_domain_create_hierarchy(domain_parent, 0, MSCM_IRSPRC_NUM,
+ of_fwnode_handle(node), &mscm_irq_domain_ops,
+ mscm_ir_data);
if (!domain) {
ret = -ENOMEM;
goto out_unmap;
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index ea93e7236c4a..2bcdf216a000 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -289,8 +289,9 @@ static void __init vic_register(void __iomem *base, unsigned int parent_irq,
vic_handle_irq_cascaded, v);
}
- v->domain = irq_domain_add_simple(node, fls(valid_sources), irq,
- &vic_irqdomain_ops, v);
+ v->domain = irq_domain_create_simple(of_fwnode_handle(node),
+ fls(valid_sources), irq,
+ &vic_irqdomain_ops, v);
/* create an IRQ mapping for each valid IRQ */
for (i = 0; i < fls(valid_sources); i++)
if (valid_sources & (1 << i))
diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c
index e17dd3a8c2d5..3b742590aec8 100644
--- a/drivers/irqchip/irq-vt8500.c
+++ b/drivers/irqchip/irq-vt8500.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
@@ -63,29 +64,28 @@ struct vt8500_irq_data {
struct irq_domain *domain; /* Domain for this controller */
};
-/* Global variable for accessing io-mem addresses */
-static struct vt8500_irq_data intc[VT8500_INTC_MAX];
-static u32 active_cnt = 0;
+/* Primary interrupt controller data */
+static struct vt8500_irq_data *primary_intc;
-static void vt8500_irq_mask(struct irq_data *d)
+static void vt8500_irq_ack(struct irq_data *d)
{
struct vt8500_irq_data *priv = d->domain->host_data;
void __iomem *base = priv->base;
void __iomem *stat_reg = base + VT8500_ICIS + (d->hwirq < 32 ? 0 : 4);
- u8 edge, dctr;
- u32 status;
+ u32 status = (1 << (d->hwirq & 0x1f));
- edge = readb(base + VT8500_ICDC + d->hwirq) & VT8500_EDGE;
- if (edge) {
- status = readl(stat_reg);
+ writel(status, stat_reg);
+}
- status |= (1 << (d->hwirq & 0x1f));
- writel(status, stat_reg);
- } else {
- dctr = readb(base + VT8500_ICDC + d->hwirq);
- dctr &= ~VT8500_INT_ENABLE;
- writeb(dctr, base + VT8500_ICDC + d->hwirq);
- }
+static void vt8500_irq_mask(struct irq_data *d)
+{
+ struct vt8500_irq_data *priv = d->domain->host_data;
+ void __iomem *base = priv->base;
+ u8 dctr;
+
+ dctr = readb(base + VT8500_ICDC + d->hwirq);
+ dctr &= ~VT8500_INT_ENABLE;
+ writeb(dctr, base + VT8500_ICDC + d->hwirq);
}
static void vt8500_irq_unmask(struct irq_data *d)
@@ -130,11 +130,11 @@ static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type)
}
static struct irq_chip vt8500_irq_chip = {
- .name = "vt8500",
- .irq_ack = vt8500_irq_mask,
- .irq_mask = vt8500_irq_mask,
- .irq_unmask = vt8500_irq_unmask,
- .irq_set_type = vt8500_irq_set_type,
+ .name = "vt8500",
+ .irq_ack = vt8500_irq_ack,
+ .irq_mask = vt8500_irq_mask,
+ .irq_unmask = vt8500_irq_unmask,
+ .irq_set_type = vt8500_irq_set_type,
};
static void __init vt8500_init_irq_hw(void __iomem *base)
@@ -163,82 +163,89 @@ static const struct irq_domain_ops vt8500_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
+static inline void vt8500_handle_irq_common(struct vt8500_irq_data *intc)
+{
+ unsigned long irqnr = readl_relaxed(intc->base) & 0x3F;
+ unsigned long stat;
+
+ /*
+ * Highest Priority register default = 63, so check that this
+ * is a real interrupt by checking the status register
+ */
+ if (irqnr == 63) {
+ stat = readl_relaxed(intc->base + VT8500_ICIS + 4);
+ if (!(stat & BIT(31)))
+ return;
+ }
+
+ generic_handle_domain_irq(intc->domain, irqnr);
+}
+
static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
{
- u32 stat, i;
- int irqnr;
- void __iomem *base;
-
- /* Loop through each active controller */
- for (i=0; i<active_cnt; i++) {
- base = intc[i].base;
- irqnr = readl_relaxed(base) & 0x3F;
- /*
- Highest Priority register default = 63, so check that this
- is a real interrupt by checking the status register
- */
- if (irqnr == 63) {
- stat = readl_relaxed(base + VT8500_ICIS + 4);
- if (!(stat & BIT(31)))
- continue;
- }
+ vt8500_handle_irq_common(primary_intc);
+}
- generic_handle_domain_irq(intc[i].domain, irqnr);
- }
+static void vt8500_handle_irq_chained(struct irq_desc *desc)
+{
+ struct irq_domain *d = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct vt8500_irq_data *intc = d->host_data;
+
+ chained_irq_enter(chip, desc);
+ vt8500_handle_irq_common(intc);
+ chained_irq_exit(chip, desc);
}
static int __init vt8500_irq_init(struct device_node *node,
struct device_node *parent)
{
- int irq, i;
- struct device_node *np = node;
+ struct vt8500_irq_data *intc;
+ int irq, i, ret = 0;
- if (active_cnt == VT8500_INTC_MAX) {
- pr_err("%s: Interrupt controllers > VT8500_INTC_MAX\n",
- __func__);
- goto out;
- }
+ intc = kzalloc(sizeof(*intc), GFP_KERNEL);
+ if (!intc)
+ return -ENOMEM;
- intc[active_cnt].base = of_iomap(np, 0);
- intc[active_cnt].domain = irq_domain_add_linear(node, 64,
- &vt8500_irq_domain_ops, &intc[active_cnt]);
-
- if (!intc[active_cnt].base) {
+ intc->base = of_iomap(node, 0);
+ if (!intc->base) {
pr_err("%s: Unable to map IO memory\n", __func__);
- goto out;
+ ret = -ENOMEM;
+ goto err_free;
}
- if (!intc[active_cnt].domain) {
+ intc->domain = irq_domain_create_linear(of_fwnode_handle(node), 64,
+ &vt8500_irq_domain_ops, intc);
+ if (!intc->domain) {
pr_err("%s: Unable to add irq domain!\n", __func__);
- goto out;
+ ret = -ENOMEM;
+ goto err_unmap;
}
- set_handle_irq(vt8500_handle_irq);
-
- vt8500_init_irq_hw(intc[active_cnt].base);
+ vt8500_init_irq_hw(intc->base);
pr_info("vt8500-irq: Added interrupt controller\n");
- active_cnt++;
-
- /* check if this is a slaved controller */
- if (of_irq_count(np) != 0) {
- /* check that we have the correct number of interrupts */
- if (of_irq_count(np) != 8) {
- pr_err("%s: Incorrect IRQ map for slaved controller\n",
- __func__);
- return -EINVAL;
- }
-
- for (i = 0; i < 8; i++) {
- irq = irq_of_parse_and_map(np, i);
- enable_irq(irq);
+ /* check if this is a chained controller */
+ if (of_irq_count(node) != 0) {
+ for (i = 0; i < of_irq_count(node); i++) {
+ irq = irq_of_parse_and_map(node, i);
+ irq_set_chained_handler_and_data(irq, vt8500_handle_irq_chained,
+ intc);
}
pr_info("vt8500-irq: Enabled slave->parent interrupts\n");
+ } else {
+ primary_intc = intc;
+ set_handle_irq(vt8500_handle_irq);
}
-out:
return 0;
+
+err_unmap:
+ iounmap(intc->base);
+err_free:
+ kfree(intc);
+ return ret;
}
IRQCHIP_DECLARE(vt8500_irq, "via,vt8500-intc", vt8500_irq_init);
diff --git a/drivers/irqchip/irq-wpcm450-aic.c b/drivers/irqchip/irq-wpcm450-aic.c
index 91df62a64cd9..a8ed4894d29e 100644
--- a/drivers/irqchip/irq-wpcm450-aic.c
+++ b/drivers/irqchip/irq-wpcm450-aic.c
@@ -154,7 +154,7 @@ static int __init wpcm450_aic_of_init(struct device_node *node,
set_handle_irq(wpcm450_aic_handle_irq);
- aic->domain = irq_domain_add_linear(node, AIC_NUM_IRQS, &wpcm450_aic_ops, aic);
+ aic->domain = irq_domain_create_linear(of_fwnode_handle(node), AIC_NUM_IRQS, &wpcm450_aic_ops, aic);
return 0;
}
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index 38727e9cc713..92dcb9fdcb25 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -212,8 +212,8 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
xintc_write(irqc, MER, MER_HIE | MER_ME);
}
- irqc->root_domain = irq_domain_add_linear(intc, irqc->nr_irq,
- &xintc_irq_domain_ops, irqc);
+ irqc->root_domain = irq_domain_create_linear(of_fwnode_handle(intc), irqc->nr_irq,
+ &xintc_irq_domain_ops, irqc);
if (!irqc->root_domain) {
pr_err("irq-xilinx: Unable to create IRQ domain\n");
ret = -EINVAL;
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index 9b441d180299..9fdacbd89a63 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -167,8 +167,7 @@ static void __init xtensa_mx_init_common(struct irq_domain *root_domain)
int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
- irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
- &xtensa_mx_irq_domain_ops,
+ irq_domain_create_legacy(NULL, NR_IRQS - 1, 1, 0, &xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
xtensa_mx_init_common(root_domain);
return 0;
@@ -178,7 +177,7 @@ static int __init xtensa_mx_init(struct device_node *np,
struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
- irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
+ irq_domain_create_linear(of_fwnode_handle(np), NR_IRQS, &xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
xtensa_mx_init_common(root_domain);
return 0;
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index 9be7b7c5cd23..44e7be051a2e 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -85,7 +85,7 @@ static struct irq_chip xtensa_irq_chip = {
int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
- irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
+ irq_domain_create_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_irq_domain_ops, &xtensa_irq_chip);
irq_set_default_domain(root_domain);
return 0;
@@ -95,7 +95,7 @@ static int __init xtensa_pic_init(struct device_node *np,
struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
- irq_domain_add_linear(np, NR_IRQS, &xtensa_irq_domain_ops,
+ irq_domain_create_linear(of_fwnode_handle(np), NR_IRQS, &xtensa_irq_domain_ops,
&xtensa_irq_chip);
irq_set_default_domain(root_domain);
return 0;
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
index 7a72620fc478..22d46c246594 100644
--- a/drivers/irqchip/irq-zevio.c
+++ b/drivers/irqchip/irq-zevio.c
@@ -92,8 +92,8 @@ static int __init zevio_of_init(struct device_node *node,
zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE);
zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE);
- zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS,
- &irq_generic_chip_ops, NULL);
+ zevio_irq_domain = irq_domain_create_linear(of_fwnode_handle(node), MAX_INTRS,
+ &irq_generic_chip_ops, NULL);
BUG_ON(!zevio_irq_domain);
ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1,
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 7c17a6f643ef..576e55569d77 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -239,7 +239,7 @@ static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
goto err_unmap;
}
- shirq_domain = irq_domain_add_legacy(np, nr_irqs, virq_base, 0,
+ shirq_domain = irq_domain_create_legacy(of_fwnode_handle(np), nr_irqs, virq_base, 0,
&irq_domain_simple_ops, NULL);
if (WARN_ON(!shirq_domain)) {
pr_warn("%s: irq domain init failed\n", __func__);
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index e3870d942699..2b05722d4dbe 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -291,7 +291,7 @@ reset_hfcpci(struct hfc_pci *hc)
static void
hfcpci_Timer(struct timer_list *t)
{
- struct hfc_pci *hc = from_timer(hc, t, hw.timer);
+ struct hfc_pci *hc = timer_container_of(hc, t, hw.timer);
hc->hw.timer.expires = jiffies + 75;
/* WD RESET */
/*
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index 165a63994f04..a34ea6058960 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -713,7 +713,7 @@ isac_release(struct isac_hw *isac)
static void
dbusy_timer_handler(struct timer_list *t)
{
- struct isac_hw *isac = from_timer(isac, t, dch.timer);
+ struct isac_hw *isac = timer_container_of(isac, t, dch.timer);
int rbch, star;
u_long flags;
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index e48f89cbecf8..dace91ba412b 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -1135,7 +1135,7 @@ EXPORT_SYMBOL(mISDNisar_irq);
static void
ftimer_handler(struct timer_list *t)
{
- struct isar_ch *ch = from_timer(ch, t, ftimer);
+ struct isar_ch *ch = timer_container_of(ch, t, ftimer);
pr_debug("%s: ftimer flags %lx\n", ch->is->name, ch->bch.Flags);
test_and_clear_bit(FLG_FTI_RUN, &ch->bch.Flags);
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index da933f4bdf4e..168fc345dcdc 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -802,7 +802,7 @@ w6692_irq(int intno, void *dev_id)
static void
dbusy_timer_handler(struct timer_list *t)
{
- struct dchannel *dch = from_timer(dch, t, timer);
+ struct dchannel *dch = timer_container_of(dch, t, timer);
struct w6692_hw *card = dch->hw;
int rbch, star;
u_long flags;
diff --git a/drivers/isdn/mISDN/dsp_tones.c b/drivers/isdn/mISDN/dsp_tones.c
index 456b313bfa76..fa7813ae8d97 100644
--- a/drivers/isdn/mISDN/dsp_tones.c
+++ b/drivers/isdn/mISDN/dsp_tones.c
@@ -459,7 +459,7 @@ dsp_tone_hw_message(struct dsp *dsp, u8 *sample, int len)
void
dsp_tone_timeout(struct timer_list *t)
{
- struct dsp *dsp = from_timer(dsp, t, tone.tl);
+ struct dsp *dsp = timer_container_of(dsp, t, tone.tl);
struct dsp_tone *tone = &dsp->tone;
struct pattern *pat = (struct pattern *)tone->pattern;
int index = tone->index;
diff --git a/drivers/isdn/mISDN/fsm.c b/drivers/isdn/mISDN/fsm.c
index 5ed0a6110687..825b686496d2 100644
--- a/drivers/isdn/mISDN/fsm.c
+++ b/drivers/isdn/mISDN/fsm.c
@@ -95,7 +95,7 @@ EXPORT_SYMBOL(mISDN_FsmChangeState);
static void
FsmExpireTimer(struct timer_list *t)
{
- struct FsmTimer *ft = from_timer(ft, t, tl);
+ struct FsmTimer *ft = timer_container_of(ft, t, tl);
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "FsmExpireTimer %lx", (long) ft);
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index a5ad88a960d0..f732f6614d37 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -822,7 +822,7 @@ l1oip_send_bh(struct work_struct *work)
static void
l1oip_keepalive(struct timer_list *t)
{
- struct l1oip *hc = from_timer(hc, t, keep_tl);
+ struct l1oip *hc = timer_container_of(hc, t, keep_tl);
schedule_work(&hc->workq);
}
@@ -830,8 +830,8 @@ l1oip_keepalive(struct timer_list *t)
static void
l1oip_timeout(struct timer_list *t)
{
- struct l1oip *hc = from_timer(hc, t,
- timeout_tl);
+ struct l1oip *hc = timer_container_of(hc, t,
+ timeout_tl);
struct dchannel *dch = hc->chan[hc->d_idx].dch;
if (debug & DEBUG_L1OIP_MSG)
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index 7cfa8c61dba0..df98144a9539 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -155,7 +155,7 @@ mISDN_poll(struct file *filep, poll_table *wait)
static void
dev_expire_timer(struct timer_list *t)
{
- struct mISDNtimer *timer = from_timer(timer, t, tl);
+ struct mISDNtimer *timer = timer_container_of(timer, t, tl);
u_long flags;
spin_lock_irqsave(&timer->dev->lock, flags);
diff --git a/drivers/leds/.kunitconfig b/drivers/leds/.kunitconfig
new file mode 100644
index 000000000000..5180f77910a1
--- /dev/null
+++ b/drivers/leds/.kunitconfig
@@ -0,0 +1,4 @@
+CONFIG_KUNIT=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_KUNIT_TEST=y
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index a104cbb0a001..6e3dce7e35a4 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -55,6 +55,13 @@ config LEDS_BRIGHTNESS_HW_CHANGED
See Documentation/ABI/testing/sysfs-class-led for details.
+config LEDS_KUNIT_TEST
+ tristate "KUnit tests for LEDs"
+ depends on KUNIT && LEDS_CLASS
+ default KUNIT_ALL_TESTS
+ help
+ Say Y here to enable KUnit testing for the LEDs framework.
+
comment "LED drivers"
config LEDS_88PM860X
@@ -735,7 +742,7 @@ config LEDS_NS2
tristate "LED support for Network Space v2 GPIO LEDs"
depends on LEDS_CLASS
depends on MACH_KIRKWOOD || MACH_ARMADA_370 || COMPILE_TEST
- default y
+ default y if MACH_KIRKWOOD || MACH_ARMADA_370
help
This option enables support for the dual-GPIO LEDs found on the
following LaCie/Seagate boards:
@@ -750,7 +757,7 @@ config LEDS_NETXBIG
depends on LEDS_CLASS
depends on MACH_KIRKWOOD || COMPILE_TEST
depends on OF_GPIO
- default y
+ default MACH_KIRKWOOD
help
This option enables support for LEDs found on the LaCie 2Big
and 5Big Network v2 boards. The LEDs are wired to a CPLD and are
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 2f170d69dcbf..9a0333ec1a86 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_LEDS_CLASS) += led-class.o
obj-$(CONFIG_LEDS_CLASS_FLASH) += led-class-flash.o
obj-$(CONFIG_LEDS_CLASS_MULTICOLOR) += led-class-multicolor.o
obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
+obj-$(CONFIG_LEDS_KUNIT_TEST) += led-test.o
# LED Platform Drivers (keep this sorted, M-| sort)
obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o
diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c
index effaaaf302b5..c9027f9c4bb7 100644
--- a/drivers/leds/blink/leds-lgm-sso.c
+++ b/drivers/leds/blink/leds-lgm-sso.c
@@ -450,7 +450,7 @@ static int sso_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(reg_val & BIT(offset));
}
-static void sso_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int sso_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct sso_led_priv *priv = gpiochip_get_data(chip);
@@ -458,6 +458,8 @@ static void sso_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
if (!priv->gpio.freq)
regmap_update_bits(priv->mmap, SSO_CON0, SSO_CON0_SWU,
SSO_CON0_SWU);
+
+ return 0;
}
static int sso_gpio_gc_init(struct device *dev, struct sso_led_priv *priv)
@@ -469,7 +471,7 @@ static int sso_gpio_gc_init(struct device *dev, struct sso_led_priv *priv)
gc->get_direction = sso_gpio_get_dir;
gc->direction_output = sso_gpio_dir_out;
gc->get = sso_gpio_get;
- gc->set = sso_gpio_set;
+ gc->set_rv = sso_gpio_set;
gc->label = "lgm-sso";
gc->base = -1;
diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig
index f39f0bfe6eef..55ca663ca506 100644
--- a/drivers/leds/flash/Kconfig
+++ b/drivers/leds/flash/Kconfig
@@ -132,4 +132,15 @@ config LEDS_SY7802
This driver can be built as a module, it will be called "leds-sy7802".
+config LEDS_TPS6131X
+ tristate "LED support for TI TPS6131x flash LED driver"
+ depends on I2C && OF
+ depends on GPIOLIB
+ select REGMAP_I2C
+ help
+ This option enables support for Texas Instruments TPS61310/TPS61311
+ flash LED driver.
+
+ This driver can be built as a module, it will be called "leds-tps6131x".
+
endif # LEDS_CLASS_FLASH
diff --git a/drivers/leds/flash/Makefile b/drivers/leds/flash/Makefile
index 48860eeced79..712fb737a428 100644
--- a/drivers/leds/flash/Makefile
+++ b/drivers/leds/flash/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_LEDS_RT4505) += leds-rt4505.o
obj-$(CONFIG_LEDS_RT8515) += leds-rt8515.o
obj-$(CONFIG_LEDS_SGM3140) += leds-sgm3140.o
obj-$(CONFIG_LEDS_SY7802) += leds-sy7802.o
+obj-$(CONFIG_LEDS_TPS6131X) += leds-tps6131x.o
diff --git a/drivers/leds/flash/leds-rt8515.c b/drivers/leds/flash/leds-rt8515.c
index 32ba397a33d2..6af0d2c7fc56 100644
--- a/drivers/leds/flash/leds-rt8515.c
+++ b/drivers/leds/flash/leds-rt8515.c
@@ -165,7 +165,7 @@ static const struct led_flash_ops rt8515_flash_ops = {
static void rt8515_powerdown_timer(struct timer_list *t)
{
- struct rt8515 *rt = from_timer(rt, t, powerdown_timer);
+ struct rt8515 *rt = timer_container_of(rt, t, powerdown_timer);
/* Turn the LED off */
rt8515_gpio_led_off(rt);
diff --git a/drivers/leds/flash/leds-sgm3140.c b/drivers/leds/flash/leds-sgm3140.c
index 48fb8a9ec703..3e83200675f2 100644
--- a/drivers/leds/flash/leds-sgm3140.c
+++ b/drivers/leds/flash/leds-sgm3140.c
@@ -135,7 +135,7 @@ static int sgm3140_brightness_set(struct led_classdev *led_cdev,
static void sgm3140_powerdown_timer(struct timer_list *t)
{
- struct sgm3140 *priv = from_timer(priv, t, powerdown_timer);
+ struct sgm3140 *priv = timer_container_of(priv, t, powerdown_timer);
gpiod_set_value(priv->enable_gpio, 0);
gpiod_set_value(priv->flash_gpio, 0);
diff --git a/drivers/leds/flash/leds-tps6131x.c b/drivers/leds/flash/leds-tps6131x.c
new file mode 100644
index 000000000000..6f4d4fd55361
--- /dev/null
+++ b/drivers/leds/flash/leds-tps6131x.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Texas Instruments TPS61310/TPS61311 flash LED driver with I2C interface
+ *
+ * Copyright 2025 Matthias Fend <matthias.fend@emfend.at>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/led-class-flash.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <media/v4l2-flash-led-class.h>
+
+#define TPS6131X_REG_0 0x00
+#define TPS6131X_REG_0_RESET BIT(7)
+#define TPS6131X_REG_0_DCLC13 GENMASK(5, 3)
+#define TPS6131X_REG_0_DCLC13_SHIFT 3
+#define TPS6131X_REG_0_DCLC2 GENMASK(2, 0)
+#define TPS6131X_REG_0_DCLC2_SHIFT 0
+
+#define TPS6131X_REG_1 0x01
+#define TPS6131X_REG_1_MODE GENMASK(7, 6)
+#define TPS6131X_REG_1_MODE_SHIFT 6
+#define TPS6131X_REG_1_FC2 GENMASK(5, 0)
+#define TPS6131X_REG_1_FC2_SHIFT 0
+
+#define TPS6131X_REG_2 0x02
+#define TPS6131X_REG_2_MODE GENMASK(7, 6)
+#define TPS6131X_REG_2_MODE_SHIFT 6
+#define TPS6131X_REG_2_ENVM BIT(5)
+#define TPS6131X_REG_2_FC13 GENMASK(4, 0)
+#define TPS6131X_REG_2_FC13_SHIFT 0
+
+#define TPS6131X_REG_3 0x03
+#define TPS6131X_REG_3_STIM GENMASK(7, 5)
+#define TPS6131X_REG_3_STIM_SHIFT 5
+#define TPS6131X_REG_3_HPFL BIT(4)
+#define TPS6131X_REG_3_SELSTIM_TO BIT(3)
+#define TPS6131X_REG_3_STT BIT(2)
+#define TPS6131X_REG_3_SFT BIT(1)
+#define TPS6131X_REG_3_TXMASK BIT(0)
+
+#define TPS6131X_REG_4 0x04
+#define TPS6131X_REG_4_PG BIT(7)
+#define TPS6131X_REG_4_HOTDIE_HI BIT(6)
+#define TPS6131X_REG_4_HOTDIE_LO BIT(5)
+#define TPS6131X_REG_4_ILIM BIT(4)
+#define TPS6131X_REG_4_INDC GENMASK(3, 0)
+#define TPS6131X_REG_4_INDC_SHIFT 0
+
+#define TPS6131X_REG_5 0x05
+#define TPS6131X_REG_5_SELFCAL BIT(7)
+#define TPS6131X_REG_5_ENPSM BIT(6)
+#define TPS6131X_REG_5_STSTRB1_DIR BIT(5)
+#define TPS6131X_REG_5_GPIO BIT(4)
+#define TPS6131X_REG_5_GPIOTYPE BIT(3)
+#define TPS6131X_REG_5_ENLED3 BIT(2)
+#define TPS6131X_REG_5_ENLED2 BIT(1)
+#define TPS6131X_REG_5_ENLED1 BIT(0)
+
+#define TPS6131X_REG_6 0x06
+#define TPS6131X_REG_6_ENTS BIT(7)
+#define TPS6131X_REG_6_LEDHOT BIT(6)
+#define TPS6131X_REG_6_LEDWARN BIT(5)
+#define TPS6131X_REG_6_LEDHDR BIT(4)
+#define TPS6131X_REG_6_OV GENMASK(3, 0)
+#define TPS6131X_REG_6_OV_SHIFT 0
+
+#define TPS6131X_REG_7 0x07
+#define TPS6131X_REG_7_ENBATMON BIT(7)
+#define TPS6131X_REG_7_BATDROOP GENMASK(6, 4)
+#define TPS6131X_REG_7_BATDROOP_SHIFT 4
+#define TPS6131X_REG_7_REVID GENMASK(2, 0)
+#define TPS6131X_REG_7_REVID_SHIFT 0
+
+#define TPS6131X_MAX_CHANNELS 3
+
+#define TPS6131X_FLASH_MAX_I_CHAN13_MA 400
+#define TPS6131X_FLASH_MAX_I_CHAN2_MA 800
+#define TPS6131X_FLASH_STEP_I_MA 25
+
+#define TPS6131X_TORCH_MAX_I_CHAN13_MA 175
+#define TPS6131X_TORCH_MAX_I_CHAN2_MA 175
+#define TPS6131X_TORCH_STEP_I_MA 25
+
+/* The torch watchdog timer must be refreshed within an interval of 13 seconds. */
+#define TPS6131X_TORCH_REFRESH_INTERVAL_JIFFIES msecs_to_jiffies(10000)
+
+#define UA_TO_MA(UA) ((UA) / 1000)
+
+enum tps6131x_mode {
+ TPS6131X_MODE_SHUTDOWN = 0x0,
+ TPS6131X_MODE_TORCH = 0x1,
+ TPS6131X_MODE_FLASH = 0x2,
+};
+
+struct tps6131x {
+ struct device *dev;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ /*
+ * Registers 0, 1, 2, and 3 control parts of the controller that are not completely
+ * independent of each other. Since some operations require the registers to be written in
+ * a specific order to avoid unwanted side effects, they are synchronized with a lock.
+ */
+ struct mutex lock; /* Hardware access lock for register 0, 1, 2 and 3 */
+ struct delayed_work torch_refresh_work;
+ bool valley_current_limit;
+ bool chan1_en;
+ bool chan2_en;
+ bool chan3_en;
+ struct fwnode_handle *led_node;
+ u32 max_flash_current_ma;
+ u32 step_flash_current_ma;
+ u32 max_torch_current_ma;
+ u32 step_torch_current_ma;
+ u32 max_timeout_us;
+ struct led_classdev_flash fled_cdev;
+ struct v4l2_flash *v4l2_flash;
+};
+
+static struct tps6131x *fled_cdev_to_tps6131x(struct led_classdev_flash *fled_cdev)
+{
+ return container_of(fled_cdev, struct tps6131x, fled_cdev);
+}
+
+/*
+ * Register contents after a power on/reset. These values cannot be changed.
+ */
+
+#define TPS6131X_DCLC2_50MA 2
+#define TPS6131X_DCLC13_25MA 1
+#define TPS6131X_FC2_400MA 16
+#define TPS6131X_FC13_200MA 8
+#define TPS6131X_STIM_0_579MS_1_37MS 6
+#define TPS6131X_SELSTIM_RANGE0 0
+#define TPS6131X_INDC_OFF 0
+#define TPS6131X_OV_4950MV 9
+#define TPS6131X_BATDROOP_150MV 4
+
+static const struct reg_default tps6131x_regmap_defaults[] = {
+ { TPS6131X_REG_0, (TPS6131X_DCLC13_25MA << TPS6131X_REG_0_DCLC13_SHIFT) |
+ (TPS6131X_DCLC2_50MA << TPS6131X_REG_0_DCLC2_SHIFT) },
+ { TPS6131X_REG_1, (TPS6131X_MODE_SHUTDOWN << TPS6131X_REG_1_MODE_SHIFT) |
+ (TPS6131X_FC2_400MA << TPS6131X_REG_1_FC2_SHIFT) },
+ { TPS6131X_REG_2, (TPS6131X_MODE_SHUTDOWN << TPS6131X_REG_2_MODE_SHIFT) |
+ (TPS6131X_FC13_200MA << TPS6131X_REG_2_FC13_SHIFT) },
+ { TPS6131X_REG_3, (TPS6131X_STIM_0_579MS_1_37MS << TPS6131X_REG_3_STIM_SHIFT) |
+ (TPS6131X_SELSTIM_RANGE0 << TPS6131X_REG_3_SELSTIM_TO) |
+ TPS6131X_REG_3_TXMASK },
+ { TPS6131X_REG_4, (TPS6131X_INDC_OFF << TPS6131X_REG_4_INDC_SHIFT) },
+ { TPS6131X_REG_5, TPS6131X_REG_5_ENPSM | TPS6131X_REG_5_STSTRB1_DIR |
+ TPS6131X_REG_5_GPIOTYPE | TPS6131X_REG_5_ENLED2 },
+ { TPS6131X_REG_6, (TPS6131X_OV_4950MV << TPS6131X_REG_6_OV_SHIFT) },
+ { TPS6131X_REG_7, (TPS6131X_BATDROOP_150MV << TPS6131X_REG_7_BATDROOP_SHIFT) },
+};
+
+/*
+ * These registers contain flags that are reset when read.
+ */
+static bool tps6131x_regmap_precious(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TPS6131X_REG_3:
+ case TPS6131X_REG_4:
+ case TPS6131X_REG_6:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config tps6131x_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = TPS6131X_REG_7,
+ .reg_defaults = tps6131x_regmap_defaults,
+ .num_reg_defaults = ARRAY_SIZE(tps6131x_regmap_defaults),
+ .cache_type = REGCACHE_FLAT,
+ .precious_reg = &tps6131x_regmap_precious,
+};
+
+struct tps6131x_timer_config {
+ u8 val;
+ u8 range;
+ u32 time_us;
+};
+
+static const struct tps6131x_timer_config tps6131x_timer_configs[] = {
+ { .val = 0, .range = 1, .time_us = 5300 },
+ { .val = 1, .range = 1, .time_us = 10700 },
+ { .val = 2, .range = 1, .time_us = 16000 },
+ { .val = 3, .range = 1, .time_us = 21300 },
+ { .val = 4, .range = 1, .time_us = 26600 },
+ { .val = 5, .range = 1, .time_us = 32000 },
+ { .val = 6, .range = 1, .time_us = 37300 },
+ { .val = 0, .range = 0, .time_us = 68200 },
+ { .val = 7, .range = 1, .time_us = 71500 },
+ { .val = 1, .range = 0, .time_us = 102200 },
+ { .val = 2, .range = 0, .time_us = 136300 },
+ { .val = 3, .range = 0, .time_us = 170400 },
+ { .val = 4, .range = 0, .time_us = 204500 },
+ { .val = 5, .range = 0, .time_us = 340800 },
+ { .val = 6, .range = 0, .time_us = 579300 },
+ { .val = 7, .range = 0, .time_us = 852000 },
+};
+
+static const struct tps6131x_timer_config *tps6131x_find_closest_timer_config(u32 timeout_us)
+{
+ const struct tps6131x_timer_config *timer_config = &tps6131x_timer_configs[0];
+ u32 diff, min_diff = U32_MAX;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tps6131x_timer_configs); i++) {
+ diff = abs(tps6131x_timer_configs[i].time_us - timeout_us);
+ if (diff < min_diff) {
+ timer_config = &tps6131x_timer_configs[i];
+ min_diff = diff;
+ if (!min_diff)
+ break;
+ }
+ }
+
+ return timer_config;
+}
+
+static int tps6131x_reset_chip(struct tps6131x *tps6131x)
+{
+ int ret;
+
+ if (tps6131x->reset_gpio) {
+ gpiod_set_value_cansleep(tps6131x->reset_gpio, 1);
+ fsleep(10);
+ gpiod_set_value_cansleep(tps6131x->reset_gpio, 0);
+ fsleep(100);
+ } else {
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_0, TPS6131X_REG_0_RESET,
+ TPS6131X_REG_0_RESET);
+ if (ret)
+ return ret;
+
+ fsleep(100);
+
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_0, TPS6131X_REG_0_RESET, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tps6131x_init_chip(struct tps6131x *tps6131x)
+{
+ u32 val;
+ int ret;
+
+ val = tps6131x->valley_current_limit ? TPS6131X_REG_4_ILIM : 0;
+
+ ret = regmap_write(tps6131x->regmap, TPS6131X_REG_4, val);
+ if (ret)
+ return ret;
+
+ val = TPS6131X_REG_5_ENPSM | TPS6131X_REG_5_STSTRB1_DIR | TPS6131X_REG_5_GPIOTYPE;
+
+ if (tps6131x->chan1_en)
+ val |= TPS6131X_REG_5_ENLED1;
+
+ if (tps6131x->chan2_en)
+ val |= TPS6131X_REG_5_ENLED2;
+
+ if (tps6131x->chan3_en)
+ val |= TPS6131X_REG_5_ENLED3;
+
+ ret = regmap_write(tps6131x->regmap, TPS6131X_REG_5, val);
+ if (ret)
+ return ret;
+
+ val = TPS6131X_REG_6_ENTS;
+
+ ret = regmap_write(tps6131x->regmap, TPS6131X_REG_6, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps6131x_set_mode(struct tps6131x *tps6131x, enum tps6131x_mode mode, bool force)
+{
+ u8 val = mode << TPS6131X_REG_1_MODE_SHIFT;
+
+ return regmap_update_bits_base(tps6131x->regmap, TPS6131X_REG_1, TPS6131X_REG_1_MODE, val,
+ NULL, false, force);
+}
+
+static void tps6131x_torch_refresh_handler(struct work_struct *work)
+{
+ struct tps6131x *tps6131x = container_of(work, struct tps6131x, torch_refresh_work.work);
+ int ret;
+
+ guard(mutex)(&tps6131x->lock);
+
+ ret = tps6131x_set_mode(tps6131x, TPS6131X_MODE_TORCH, true);
+ if (ret < 0) {
+ dev_err(tps6131x->dev, "Failed to refresh torch watchdog timer\n");
+ return;
+ }
+
+ schedule_delayed_work(&tps6131x->torch_refresh_work,
+ TPS6131X_TORCH_REFRESH_INTERVAL_JIFFIES);
+}
+
+static int tps6131x_brightness_set(struct led_classdev *cdev, enum led_brightness brightness)
+{
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(cdev);
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ u32 num_chans, steps_chan13, steps_chan2, steps_remaining;
+ u8 reg0;
+ int ret;
+
+ cancel_delayed_work_sync(&tps6131x->torch_refresh_work);
+
+ /*
+ * The brightness parameter uses the number of current steps as the unit (not the current
+ * value itself). Since the reported step size can vary depending on the configuration,
+ * this value must be converted into actual register steps.
+ */
+ steps_remaining = (brightness * tps6131x->step_torch_current_ma) / TPS6131X_TORCH_STEP_I_MA;
+
+ num_chans = tps6131x->chan1_en + tps6131x->chan2_en + tps6131x->chan3_en;
+
+ /*
+ * The currents are distributed as evenly as possible across the activated channels.
+ * Since channels 1 and 3 share the same register setting, they always use the same current
+ * value. Channel 2 supports higher currents and thus takes over the remaining additional
+ * portion that cannot be covered by the other channels.
+ */
+ steps_chan13 = min_t(u32, steps_remaining / num_chans,
+ TPS6131X_TORCH_MAX_I_CHAN13_MA / TPS6131X_TORCH_STEP_I_MA);
+ if (tps6131x->chan1_en)
+ steps_remaining -= steps_chan13;
+ if (tps6131x->chan3_en)
+ steps_remaining -= steps_chan13;
+
+ steps_chan2 = min_t(u32, steps_remaining,
+ TPS6131X_TORCH_MAX_I_CHAN2_MA / TPS6131X_TORCH_STEP_I_MA);
+
+ guard(mutex)(&tps6131x->lock);
+
+ reg0 = (steps_chan13 << TPS6131X_REG_0_DCLC13_SHIFT) |
+ (steps_chan2 << TPS6131X_REG_0_DCLC2_SHIFT);
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_0,
+ TPS6131X_REG_0_DCLC13 | TPS6131X_REG_0_DCLC2, reg0);
+ if (ret < 0)
+ return ret;
+
+ ret = tps6131x_set_mode(tps6131x, brightness ? TPS6131X_MODE_TORCH : TPS6131X_MODE_SHUTDOWN,
+ true);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * In order to use both the flash and the video light functions purely via the I2C
+ * interface, STRB1 must be low. If STRB1 is low, then the video light watchdog timer
+ * is also active, which puts the device into the shutdown state after around 13 seconds.
+ * To prevent this, the mode must be refreshed within the watchdog timeout.
+ */
+ if (brightness)
+ schedule_delayed_work(&tps6131x->torch_refresh_work,
+ TPS6131X_TORCH_REFRESH_INTERVAL_JIFFIES);
+
+ return 0;
+}
+
+static int tps6131x_strobe_set(struct led_classdev_flash *fled_cdev, bool state)
+{
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ int ret;
+
+ guard(mutex)(&tps6131x->lock);
+
+ ret = tps6131x_set_mode(tps6131x, state ? TPS6131X_MODE_FLASH : TPS6131X_MODE_SHUTDOWN,
+ true);
+ if (ret < 0)
+ return ret;
+
+ if (state) {
+ ret = regmap_update_bits_base(tps6131x->regmap, TPS6131X_REG_3, TPS6131X_REG_3_SFT,
+ TPS6131X_REG_3_SFT, NULL, false, true);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_update_bits_base(tps6131x->regmap, TPS6131X_REG_3, TPS6131X_REG_3_SFT, 0, NULL,
+ false, true);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps6131x_flash_brightness_set(struct led_classdev_flash *fled_cdev, u32 brightness)
+{
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ u32 num_chans;
+ u32 steps_chan13, steps_chan2;
+ u32 steps_remaining;
+ int ret;
+
+ steps_remaining = brightness / TPS6131X_FLASH_STEP_I_MA;
+ num_chans = tps6131x->chan1_en + tps6131x->chan2_en + tps6131x->chan3_en;
+ steps_chan13 = min_t(u32, steps_remaining / num_chans,
+ TPS6131X_FLASH_MAX_I_CHAN13_MA / TPS6131X_FLASH_STEP_I_MA);
+ if (tps6131x->chan1_en)
+ steps_remaining -= steps_chan13;
+ if (tps6131x->chan3_en)
+ steps_remaining -= steps_chan13;
+ steps_chan2 = min_t(u32, steps_remaining,
+ TPS6131X_FLASH_MAX_I_CHAN2_MA / TPS6131X_FLASH_STEP_I_MA);
+
+ guard(mutex)(&tps6131x->lock);
+
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_2, TPS6131X_REG_2_FC13,
+ steps_chan13 << TPS6131X_REG_2_FC13_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_1, TPS6131X_REG_1_FC2,
+ steps_chan2 << TPS6131X_REG_1_FC2_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ fled_cdev->brightness.val = brightness;
+
+ return 0;
+}
+
+static int tps6131x_flash_timeout_set(struct led_classdev_flash *fled_cdev, u32 timeout_us)
+{
+ const struct tps6131x_timer_config *timer_config;
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ u8 reg3;
+ int ret;
+
+ guard(mutex)(&tps6131x->lock);
+
+ timer_config = tps6131x_find_closest_timer_config(timeout_us);
+
+ reg3 = timer_config->val << TPS6131X_REG_3_STIM_SHIFT;
+ if (timer_config->range)
+ reg3 |= TPS6131X_REG_3_SELSTIM_TO;
+
+ ret = regmap_update_bits(tps6131x->regmap, TPS6131X_REG_3,
+ TPS6131X_REG_3_STIM | TPS6131X_REG_3_SELSTIM_TO, reg3);
+ if (ret < 0)
+ return ret;
+
+ fled_cdev->timeout.val = timer_config->time_us;
+
+ return 0;
+}
+
+static int tps6131x_strobe_get(struct led_classdev_flash *fled_cdev, bool *state)
+{
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ unsigned int reg3;
+ int ret;
+
+ ret = regmap_read_bypassed(tps6131x->regmap, TPS6131X_REG_3, &reg3);
+ if (ret)
+ return ret;
+
+ *state = !!(reg3 & TPS6131X_REG_3_SFT);
+
+ return 0;
+}
+
+static int tps6131x_flash_fault_get(struct led_classdev_flash *fled_cdev, u32 *fault)
+{
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+ unsigned int reg3, reg4, reg6;
+ int ret;
+
+ *fault = 0;
+
+ ret = regmap_read_bypassed(tps6131x->regmap, TPS6131X_REG_3, &reg3);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_bypassed(tps6131x->regmap, TPS6131X_REG_4, &reg4);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_bypassed(tps6131x->regmap, TPS6131X_REG_6, &reg6);
+ if (ret < 0)
+ return ret;
+
+ if (reg3 & TPS6131X_REG_3_HPFL)
+ *fault |= LED_FAULT_SHORT_CIRCUIT;
+
+ if (reg3 & TPS6131X_REG_3_SELSTIM_TO)
+ *fault |= LED_FAULT_TIMEOUT;
+
+ if (reg4 & TPS6131X_REG_4_HOTDIE_HI)
+ *fault |= LED_FAULT_OVER_TEMPERATURE;
+
+ if (reg6 & (TPS6131X_REG_6_LEDHOT | TPS6131X_REG_6_LEDWARN))
+ *fault |= LED_FAULT_LED_OVER_TEMPERATURE;
+
+ if (!(reg6 & TPS6131X_REG_6_LEDHDR))
+ *fault |= LED_FAULT_UNDER_VOLTAGE;
+
+ if (reg6 & TPS6131X_REG_6_LEDHOT) {
+ ret = regmap_update_bits_base(tps6131x->regmap, TPS6131X_REG_6,
+ TPS6131X_REG_6_LEDHOT, 0, NULL, false, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct led_flash_ops flash_ops = {
+ .flash_brightness_set = tps6131x_flash_brightness_set,
+ .strobe_set = tps6131x_strobe_set,
+ .strobe_get = tps6131x_strobe_get,
+ .timeout_set = tps6131x_flash_timeout_set,
+ .fault_get = tps6131x_flash_fault_get,
+};
+
+static int tps6131x_parse_node(struct tps6131x *tps6131x)
+{
+ const struct tps6131x_timer_config *timer_config;
+ struct device *dev = tps6131x->dev;
+ u32 channels[TPS6131X_MAX_CHANNELS];
+ u32 current_step_multiplier;
+ u32 current_ua;
+ u32 max_current_flash_ma, max_current_torch_ma;
+ u32 timeout_us;
+ int num_channels;
+ int i;
+ int ret;
+
+ tps6131x->valley_current_limit = device_property_read_bool(dev, "ti,valley-current-limit");
+
+ tps6131x->led_node = fwnode_get_next_available_child_node(dev->fwnode, NULL);
+ if (!tps6131x->led_node) {
+ dev_err(dev, "Missing LED node\n");
+ return -EINVAL;
+ }
+
+ num_channels = fwnode_property_count_u32(tps6131x->led_node, "led-sources");
+ if (num_channels <= 0) {
+ dev_err(dev, "Failed to read led-sources property\n");
+ return -EINVAL;
+ }
+
+ if (num_channels > TPS6131X_MAX_CHANNELS) {
+ dev_err(dev, "led-sources count %u exceeds maximum channel count %u\n",
+ num_channels, TPS6131X_MAX_CHANNELS);
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_u32_array(tps6131x->led_node, "led-sources", channels,
+ num_channels);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read led-sources property\n");
+ return ret;
+ }
+
+ max_current_flash_ma = 0;
+ max_current_torch_ma = 0;
+ for (i = 0; i < num_channels; i++) {
+ switch (channels[i]) {
+ case 1:
+ tps6131x->chan1_en = true;
+ max_current_flash_ma += TPS6131X_FLASH_MAX_I_CHAN13_MA;
+ max_current_torch_ma += TPS6131X_TORCH_MAX_I_CHAN13_MA;
+ break;
+ case 2:
+ tps6131x->chan2_en = true;
+ max_current_flash_ma += TPS6131X_FLASH_MAX_I_CHAN2_MA;
+ max_current_torch_ma += TPS6131X_TORCH_MAX_I_CHAN2_MA;
+ break;
+ case 3:
+ tps6131x->chan3_en = true;
+ max_current_flash_ma += TPS6131X_FLASH_MAX_I_CHAN13_MA;
+ max_current_torch_ma += TPS6131X_TORCH_MAX_I_CHAN13_MA;
+ break;
+ default:
+ dev_err(dev, "led-source out of range [1-3]\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * If only channels 1 and 3 are used, the step size is doubled because the two channels
+ * share the same current control register.
+ */
+ current_step_multiplier =
+ (tps6131x->chan1_en && tps6131x->chan3_en && !tps6131x->chan2_en) ? 2 : 1;
+ tps6131x->step_flash_current_ma = current_step_multiplier * TPS6131X_FLASH_STEP_I_MA;
+ tps6131x->step_torch_current_ma = current_step_multiplier * TPS6131X_TORCH_STEP_I_MA;
+
+ ret = fwnode_property_read_u32(tps6131x->led_node, "led-max-microamp", &current_ua);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read led-max-microamp property\n");
+ return ret;
+ }
+
+ tps6131x->max_torch_current_ma = UA_TO_MA(current_ua);
+
+ if (!tps6131x->max_torch_current_ma ||
+ tps6131x->max_torch_current_ma > max_current_torch_ma ||
+ (tps6131x->max_torch_current_ma % tps6131x->step_torch_current_ma)) {
+ dev_err(dev, "led-max-microamp out of range or not a multiple of %u\n",
+ tps6131x->step_torch_current_ma);
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_u32(tps6131x->led_node, "flash-max-microamp", &current_ua);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read flash-max-microamp property\n");
+ return ret;
+ }
+
+ tps6131x->max_flash_current_ma = UA_TO_MA(current_ua);
+
+ if (!tps6131x->max_flash_current_ma ||
+ tps6131x->max_flash_current_ma > max_current_flash_ma ||
+ (tps6131x->max_flash_current_ma % tps6131x->step_flash_current_ma)) {
+ dev_err(dev, "flash-max-microamp out of range or not a multiple of %u\n",
+ tps6131x->step_flash_current_ma);
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_u32(tps6131x->led_node, "flash-max-timeout-us", &timeout_us);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read flash-max-timeout-us property\n");
+ return ret;
+ }
+
+ timer_config = tps6131x_find_closest_timer_config(timeout_us);
+ tps6131x->max_timeout_us = timer_config->time_us;
+
+ if (tps6131x->max_timeout_us != timeout_us)
+ dev_warn(dev, "flash-max-timeout-us %u not supported (using %u)\n", timeout_us,
+ tps6131x->max_timeout_us);
+
+ return 0;
+}
+
+static int tps6131x_led_class_setup(struct tps6131x *tps6131x)
+{
+ const struct tps6131x_timer_config *timer_config;
+ struct led_classdev *led_cdev;
+ struct led_flash_setting *setting;
+ struct led_init_data init_data = {};
+ int ret;
+
+ tps6131x->fled_cdev.ops = &flash_ops;
+
+ setting = &tps6131x->fled_cdev.timeout;
+ timer_config = tps6131x_find_closest_timer_config(0);
+ setting->min = timer_config->time_us;
+ setting->max = tps6131x->max_timeout_us;
+ setting->step = 1; /* Only some specific time periods are supported. No fixed step size. */
+ setting->val = setting->min;
+
+ setting = &tps6131x->fled_cdev.brightness;
+ setting->min = tps6131x->step_flash_current_ma;
+ setting->max = tps6131x->max_flash_current_ma;
+ setting->step = tps6131x->step_flash_current_ma;
+ setting->val = setting->min;
+
+ led_cdev = &tps6131x->fled_cdev.led_cdev;
+ led_cdev->brightness_set_blocking = tps6131x_brightness_set;
+ led_cdev->max_brightness = tps6131x->max_torch_current_ma;
+ led_cdev->flags |= LED_DEV_CAP_FLASH;
+
+ init_data.fwnode = tps6131x->led_node;
+ init_data.devicename = NULL;
+ init_data.default_label = NULL;
+ init_data.devname_mandatory = false;
+
+ ret = devm_led_classdev_flash_register_ext(tps6131x->dev, &tps6131x->fled_cdev,
+ &init_data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps6131x_flash_external_strobe_set(struct v4l2_flash *v4l2_flash, bool enable)
+{
+ struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+ struct tps6131x *tps6131x = fled_cdev_to_tps6131x(fled_cdev);
+
+ guard(mutex)(&tps6131x->lock);
+
+ return tps6131x_set_mode(tps6131x, enable ? TPS6131X_MODE_FLASH : TPS6131X_MODE_SHUTDOWN,
+ false);
+}
+
+static const struct v4l2_flash_ops tps6131x_v4l2_flash_ops = {
+ .external_strobe_set = tps6131x_flash_external_strobe_set,
+};
+
+static int tps6131x_v4l2_setup(struct tps6131x *tps6131x)
+{
+ struct v4l2_flash_config v4l2_cfg = { 0 };
+ struct led_flash_setting *intensity = &v4l2_cfg.intensity;
+
+ intensity->min = tps6131x->step_torch_current_ma;
+ intensity->max = tps6131x->max_torch_current_ma;
+ intensity->step = tps6131x->step_torch_current_ma;
+ intensity->val = intensity->min;
+
+ strscpy(v4l2_cfg.dev_name, tps6131x->fled_cdev.led_cdev.dev->kobj.name,
+ sizeof(v4l2_cfg.dev_name));
+
+ v4l2_cfg.has_external_strobe = true;
+ v4l2_cfg.flash_faults = LED_FAULT_TIMEOUT | LED_FAULT_OVER_TEMPERATURE |
+ LED_FAULT_SHORT_CIRCUIT | LED_FAULT_UNDER_VOLTAGE |
+ LED_FAULT_LED_OVER_TEMPERATURE;
+
+ tps6131x->v4l2_flash = v4l2_flash_init(tps6131x->dev, tps6131x->led_node,
+ &tps6131x->fled_cdev, &tps6131x_v4l2_flash_ops,
+ &v4l2_cfg);
+ if (IS_ERR(tps6131x->v4l2_flash)) {
+ dev_err(tps6131x->dev, "Failed to initialize v4l2 flash LED\n");
+ return PTR_ERR(tps6131x->v4l2_flash);
+ }
+
+ return 0;
+}
+
+static int tps6131x_probe(struct i2c_client *client)
+{
+ struct tps6131x *tps6131x;
+ int ret;
+
+ tps6131x = devm_kzalloc(&client->dev, sizeof(*tps6131x), GFP_KERNEL);
+ if (!tps6131x)
+ return -ENOMEM;
+
+ tps6131x->dev = &client->dev;
+ i2c_set_clientdata(client, tps6131x);
+ mutex_init(&tps6131x->lock);
+ INIT_DELAYED_WORK(&tps6131x->torch_refresh_work, tps6131x_torch_refresh_handler);
+
+ ret = tps6131x_parse_node(tps6131x);
+ if (ret)
+ return ret;
+
+ tps6131x->regmap = devm_regmap_init_i2c(client, &tps6131x_regmap);
+ if (IS_ERR(tps6131x->regmap)) {
+ ret = PTR_ERR(tps6131x->regmap);
+ return dev_err_probe(&client->dev, ret, "Failed to allocate register map\n");
+ }
+
+ tps6131x->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(tps6131x->reset_gpio)) {
+ ret = PTR_ERR(tps6131x->reset_gpio);
+ return dev_err_probe(&client->dev, ret, "Failed to get reset GPIO\n");
+ }
+
+ ret = tps6131x_reset_chip(tps6131x);
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to reset LED controller\n");
+
+ ret = tps6131x_init_chip(tps6131x);
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to initialize LED controller\n");
+
+ ret = tps6131x_led_class_setup(tps6131x);
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to setup LED class\n");
+
+ ret = tps6131x_v4l2_setup(tps6131x);
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to setup v4l2 flash\n");
+
+ return 0;
+}
+
+static void tps6131x_remove(struct i2c_client *client)
+{
+ struct tps6131x *tps6131x = i2c_get_clientdata(client);
+
+ v4l2_flash_release(tps6131x->v4l2_flash);
+
+ cancel_delayed_work_sync(&tps6131x->torch_refresh_work);
+}
+
+static const struct of_device_id of_tps6131x_leds_match[] = {
+ { .compatible = "ti,tps61310" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_tps6131x_leds_match);
+
+static struct i2c_driver tps6131x_i2c_driver = {
+ .driver = {
+ .name = "tps6131x",
+ .of_match_table = of_tps6131x_leds_match,
+ },
+ .probe = tps6131x_probe,
+ .remove = tps6131x_remove,
+};
+module_i2c_driver(tps6131x_i2c_driver);
+
+MODULE_DESCRIPTION("Texas Instruments TPS6131X flash LED driver");
+MODULE_AUTHOR("Matthias Fend <matthias.fend@emfend.at>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/led-class-flash.c b/drivers/leds/led-class-flash.c
index f4e26ce84862..165035a8826c 100644
--- a/drivers/leds/led-class-flash.c
+++ b/drivers/leds/led-class-flash.c
@@ -440,6 +440,21 @@ int led_update_flash_brightness(struct led_classdev_flash *fled_cdev)
}
EXPORT_SYMBOL_GPL(led_update_flash_brightness);
+int led_set_flash_duration(struct led_classdev_flash *fled_cdev, u32 duration)
+{
+ struct led_classdev *led_cdev = &fled_cdev->led_cdev;
+ struct led_flash_setting *s = &fled_cdev->duration;
+
+ s->val = duration;
+ led_clamp_align(s);
+
+ if (!(led_cdev->flags & LED_SUSPENDED))
+ return call_flash_op(fled_cdev, duration_set, s->val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(led_set_flash_duration);
+
MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
MODULE_DESCRIPTION("LED Flash class interface");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/led-class-multicolor.c b/drivers/leds/led-class-multicolor.c
index b2a87c994816..fd66d2bdeace 100644
--- a/drivers/leds/led-class-multicolor.c
+++ b/drivers/leds/led-class-multicolor.c
@@ -59,7 +59,8 @@ static ssize_t multi_intensity_store(struct device *dev,
for (i = 0; i < mcled_cdev->num_colors; i++)
mcled_cdev->subled_info[i].intensity = intensity_value[i];
- led_set_brightness(led_cdev, led_cdev->brightness);
+ if (!test_bit(LED_BLINK_SW, &led_cdev->work_flags))
+ led_set_brightness(led_cdev, led_cdev->brightness);
ret = size;
err_out:
mutex_unlock(&led_cdev->led_access);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 907fc703e0c5..59473f286b31 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -64,7 +64,8 @@ static int __led_set_brightness_blocking(struct led_classdev *led_cdev, unsigned
static void led_timer_function(struct timer_list *t)
{
- struct led_classdev *led_cdev = from_timer(led_cdev, t, blink_timer);
+ struct led_classdev *led_cdev = timer_container_of(led_cdev, t,
+ blink_timer);
unsigned long brightness;
unsigned long delay;
@@ -529,6 +530,7 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
struct led_properties props = {};
struct fwnode_handle *fwnode = init_data->fwnode;
const char *devicename = init_data->devicename;
+ int n;
if (!led_classdev_name)
return -EINVAL;
@@ -542,45 +544,49 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
* Otherwise the label is prepended with devicename to compose
* the final LED class device name.
*/
- if (!devicename) {
- strscpy(led_classdev_name, props.label,
- LED_MAX_NAME_SIZE);
+ if (devicename) {
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
+ devicename, props.label);
} else {
- snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
- devicename, props.label);
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s", props.label);
}
} else if (props.function || props.color_present) {
char tmp_buf[LED_MAX_NAME_SIZE];
if (props.func_enum_present) {
- snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s-%d",
- props.color_present ? led_colors[props.color] : "",
- props.function ?: "", props.func_enum);
+ n = snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s-%d",
+ props.color_present ? led_colors[props.color] : "",
+ props.function ?: "", props.func_enum);
} else {
- snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s",
- props.color_present ? led_colors[props.color] : "",
- props.function ?: "");
+ n = snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s",
+ props.color_present ? led_colors[props.color] : "",
+ props.function ?: "");
}
+ if (n >= LED_MAX_NAME_SIZE)
+ return -E2BIG;
+
if (init_data->devname_mandatory) {
- snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
- devicename, tmp_buf);
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
+ devicename, tmp_buf);
} else {
- strscpy(led_classdev_name, tmp_buf, LED_MAX_NAME_SIZE);
-
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s", tmp_buf);
}
} else if (init_data->default_label) {
if (!devicename) {
dev_err(dev, "Legacy LED naming requires devicename segment");
return -EINVAL;
}
- snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
- devicename, init_data->default_label);
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s",
+ devicename, init_data->default_label);
} else if (is_of_node(fwnode)) {
- strscpy(led_classdev_name, to_of_node(fwnode)->name,
- LED_MAX_NAME_SIZE);
+ n = snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s",
+ to_of_node(fwnode)->name);
} else
return -EINVAL;
+ if (n >= LED_MAX_NAME_SIZE)
+ return -E2BIG;
+
return 0;
}
EXPORT_SYMBOL_GPL(led_compose_name);
diff --git a/drivers/leds/led-test.c b/drivers/leds/led-test.c
new file mode 100644
index 000000000000..ddf9aa967a6a
--- /dev/null
+++ b/drivers/leds/led-test.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Google LLC
+ *
+ * Author: Lee Jones <lee@kernel.org>
+ */
+
+#include <kunit/device.h>
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/leds.h>
+
+#define LED_TEST_POST_REG_BRIGHTNESS 10
+
+struct led_test_ddata {
+ struct led_classdev cdev;
+ struct device *dev;
+};
+
+static enum led_brightness led_test_brightness_get(struct led_classdev *cdev)
+{
+ return LED_TEST_POST_REG_BRIGHTNESS;
+}
+
+static void led_test_class_register(struct kunit *test)
+{
+ struct led_test_ddata *ddata = test->priv;
+ struct led_classdev *cdev_clash, *cdev = &ddata->cdev;
+ struct device *dev = ddata->dev;
+ int ret;
+
+ /* Register a LED class device */
+ cdev->name = "led-test";
+ cdev->brightness_get = led_test_brightness_get;
+ cdev->brightness = 0;
+
+ ret = devm_led_classdev_register(dev, cdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, cdev->max_brightness, LED_FULL);
+ KUNIT_EXPECT_EQ(test, cdev->brightness, LED_TEST_POST_REG_BRIGHTNESS);
+ KUNIT_EXPECT_STREQ(test, cdev->dev->kobj.name, "led-test");
+
+ /* Register again with the same name - expect it to pass with the LED renamed */
+ cdev_clash = devm_kmemdup(dev, cdev, sizeof(*cdev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cdev_clash);
+
+ ret = devm_led_classdev_register(dev, cdev_clash);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_STREQ(test, cdev_clash->dev->kobj.name, "led-test_1");
+ KUNIT_EXPECT_STREQ(test, cdev_clash->name, "led-test");
+
+ /* Enable name conflict rejection and register with the same name again - expect failure */
+ cdev_clash->flags |= LED_REJECT_NAME_CONFLICT;
+ ret = devm_led_classdev_register(dev, cdev_clash);
+ KUNIT_EXPECT_EQ(test, ret, -EEXIST);
+}
+
+static void led_test_class_add_lookup_and_get(struct kunit *test)
+{
+ struct led_test_ddata *ddata = test->priv;
+ struct led_classdev *cdev = &ddata->cdev, *cdev_get;
+ struct device *dev = ddata->dev;
+ struct led_lookup_data lookup;
+ int ret;
+
+ /* First, register a LED class device */
+ cdev->name = "led-test";
+ ret = devm_led_classdev_register(dev, cdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* Then make the LED available for lookup */
+ lookup.provider = cdev->name;
+ lookup.dev_id = dev_name(dev);
+ lookup.con_id = "led-test-1";
+ led_add_lookup(&lookup);
+
+ /* Finally, attempt to look it up via the API - imagine this was an orthogonal driver */
+ cdev_get = devm_led_get(dev, "led-test-1");
+ KUNIT_ASSERT_FALSE(test, IS_ERR(cdev_get));
+
+ KUNIT_EXPECT_STREQ(test, cdev_get->name, cdev->name);
+
+ led_remove_lookup(&lookup);
+}
+
+static struct kunit_case led_test_cases[] = {
+ KUNIT_CASE(led_test_class_register),
+ KUNIT_CASE(led_test_class_add_lookup_and_get),
+ { }
+};
+
+static int led_test_init(struct kunit *test)
+{
+ struct led_test_ddata *ddata;
+ struct device *dev;
+
+ ddata = kunit_kzalloc(test, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ test->priv = ddata;
+
+ dev = kunit_device_register(test, "led_test");
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ ddata->dev = get_device(dev);
+
+ return 0;
+}
+
+static void led_test_exit(struct kunit *test)
+{
+ struct led_test_ddata *ddata = test->priv;
+
+ if (ddata && ddata->dev)
+ put_device(ddata->dev);
+}
+
+static struct kunit_suite led_test_suite = {
+ .name = "led",
+ .init = led_test_init,
+ .exit = led_test_exit,
+ .test_cases = led_test_cases,
+};
+kunit_test_suite(led_test_suite);
+
+MODULE_AUTHOR("Lee Jones <lee@kernel.org>");
+MODULE_DESCRIPTION("KUnit tests for the LED framework");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index b2d40f87a5ff..3799dcc1cf07 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -54,6 +54,11 @@ ssize_t led_trigger_write(struct file *filp, struct kobject *kobj,
goto unlock;
}
+ if (sysfs_streq(buf, "default")) {
+ led_trigger_set_default(led_cdev);
+ goto unlock;
+ }
+
down_read(&triggers_list_lock);
list_for_each_entry(trig, &trigger_list, next_trig) {
if (sysfs_streq(buf, trig->name) && trigger_relevant(led_cdev, trig)) {
@@ -98,6 +103,9 @@ static int led_trigger_format(char *buf, size_t size,
int len = led_trigger_snprintf(buf, size, "%s",
led_cdev->trigger ? "none" : "[none]");
+ if (led_cdev->default_trigger)
+ len += led_trigger_snprintf(buf + len, size - len, " default");
+
list_for_each_entry(trig, &trigger_list, next_trig) {
bool hit;
@@ -281,6 +289,11 @@ void led_trigger_set_default(struct led_classdev *led_cdev)
if (!led_cdev->default_trigger)
return;
+ if (!strcmp(led_cdev->default_trigger, "none")) {
+ led_trigger_remove(led_cdev);
+ return;
+ }
+
down_read(&triggers_list_lock);
down_write(&led_cdev->trigger_lock);
list_for_each_entry(trig, &trigger_list, next_trig) {
diff --git a/drivers/leds/leds-cros_ec.c b/drivers/leds/leds-cros_ec.c
index 275522b81ea5..377cf04e202a 100644
--- a/drivers/leds/leds-cros_ec.c
+++ b/drivers/leds/leds-cros_ec.c
@@ -60,31 +60,18 @@ static inline struct cros_ec_led_priv *cros_ec_led_cdev_to_priv(struct led_class
union cros_ec_led_cmd_data {
struct ec_params_led_control req;
struct ec_response_led_control resp;
-} __packed;
+};
static int cros_ec_led_send_cmd(struct cros_ec_device *cros_ec,
union cros_ec_led_cmd_data *arg)
{
int ret;
- struct {
- struct cros_ec_command msg;
- union cros_ec_led_cmd_data data;
- } __packed buf = {
- .msg = {
- .version = 1,
- .command = EC_CMD_LED_CONTROL,
- .insize = sizeof(arg->resp),
- .outsize = sizeof(arg->req),
- },
- .data.req = arg->req
- };
-
- ret = cros_ec_cmd_xfer_status(cros_ec, &buf.msg);
+
+ ret = cros_ec_cmd(cros_ec, 1, EC_CMD_LED_CONTROL, &arg->req,
+ sizeof(arg->req), &arg->resp, sizeof(arg->resp));
if (ret < 0)
return ret;
- arg->resp = buf.data.resp;
-
return 0;
}
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 995f2adf8569..52b97c9f2a03 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -90,8 +90,6 @@
* @led_dev: led class device pointer
* @regmap: Devices register map
* @eeprom_regmap: EEPROM register map
- * @enable_gpio: VDDIO/EN gpio to enable communication interface
- * @regulator: LED supply regulator pointer
*/
struct lp8860_led {
struct mutex lock;
@@ -99,16 +97,9 @@ struct lp8860_led {
struct led_classdev led_dev;
struct regmap *regmap;
struct regmap *eeprom_regmap;
- struct gpio_desc *enable_gpio;
- struct regulator *regulator;
-};
-
-struct lp8860_eeprom_reg {
- uint8_t reg;
- uint8_t value;
};
-static struct lp8860_eeprom_reg lp8860_eeprom_disp_regs[] = {
+static const struct reg_sequence lp8860_eeprom_disp_regs[] = {
{ LP8860_EEPROM_REG_0, 0xed },
{ LP8860_EEPROM_REG_1, 0xdf },
{ LP8860_EEPROM_REG_2, 0xdc },
@@ -136,43 +127,29 @@ static struct lp8860_eeprom_reg lp8860_eeprom_disp_regs[] = {
{ LP8860_EEPROM_REG_24, 0x3E },
};
-static int lp8860_unlock_eeprom(struct lp8860_led *led, int lock)
+static int lp8860_unlock_eeprom(struct lp8860_led *led)
{
int ret;
- mutex_lock(&led->lock);
-
- if (lock == LP8860_UNLOCK_EEPROM) {
- ret = regmap_write(led->regmap,
- LP8860_EEPROM_UNLOCK,
- LP8860_EEPROM_CODE_1);
- if (ret) {
- dev_err(&led->client->dev, "EEPROM Unlock failed\n");
- goto out;
- }
-
- ret = regmap_write(led->regmap,
- LP8860_EEPROM_UNLOCK,
- LP8860_EEPROM_CODE_2);
- if (ret) {
- dev_err(&led->client->dev, "EEPROM Unlock failed\n");
- goto out;
- }
- ret = regmap_write(led->regmap,
- LP8860_EEPROM_UNLOCK,
- LP8860_EEPROM_CODE_3);
- if (ret) {
- dev_err(&led->client->dev, "EEPROM Unlock failed\n");
- goto out;
- }
- } else {
- ret = regmap_write(led->regmap,
- LP8860_EEPROM_UNLOCK,
- LP8860_LOCK_EEPROM);
+ guard(mutex)(&led->lock);
+
+ ret = regmap_write(led->regmap, LP8860_EEPROM_UNLOCK, LP8860_EEPROM_CODE_1);
+ if (ret) {
+ dev_err(&led->client->dev, "EEPROM Unlock failed\n");
+ return ret;
+ }
+
+ ret = regmap_write(led->regmap, LP8860_EEPROM_UNLOCK, LP8860_EEPROM_CODE_2);
+ if (ret) {
+ dev_err(&led->client->dev, "EEPROM Unlock failed\n");
+ return ret;
+ }
+ ret = regmap_write(led->regmap, LP8860_EEPROM_UNLOCK, LP8860_EEPROM_CODE_3);
+ if (ret) {
+ dev_err(&led->client->dev, "EEPROM Unlock failed\n");
+ return ret;
}
-out:
- mutex_unlock(&led->lock);
return ret;
}
@@ -209,47 +186,35 @@ static int lp8860_brightness_set(struct led_classdev *led_cdev,
int disp_brightness = brt_val * 255;
int ret;
- mutex_lock(&led->lock);
+ guard(mutex)(&led->lock);
ret = lp8860_fault_check(led);
if (ret) {
dev_err(&led->client->dev, "Cannot read/clear faults\n");
- goto out;
+ return ret;
}
ret = regmap_write(led->regmap, LP8860_DISP_CL1_BRT_MSB,
(disp_brightness & 0xff00) >> 8);
if (ret) {
dev_err(&led->client->dev, "Cannot write CL1 MSB\n");
- goto out;
+ return ret;
}
ret = regmap_write(led->regmap, LP8860_DISP_CL1_BRT_LSB,
disp_brightness & 0xff);
if (ret) {
dev_err(&led->client->dev, "Cannot write CL1 LSB\n");
- goto out;
+ return ret;
}
-out:
- mutex_unlock(&led->lock);
- return ret;
+
+ return 0;
}
static int lp8860_init(struct lp8860_led *led)
{
unsigned int read_buf;
- int ret, i, reg_count;
-
- if (led->regulator) {
- ret = regulator_enable(led->regulator);
- if (ret) {
- dev_err(&led->client->dev,
- "Failed to enable regulator\n");
- return ret;
- }
- }
-
- gpiod_direction_output(led->enable_gpio, 1);
+ int ret, reg_count;
ret = lp8860_fault_check(led);
if (ret)
@@ -259,24 +224,20 @@ static int lp8860_init(struct lp8860_led *led)
if (ret)
goto out;
- ret = lp8860_unlock_eeprom(led, LP8860_UNLOCK_EEPROM);
+ ret = lp8860_unlock_eeprom(led);
if (ret) {
dev_err(&led->client->dev, "Failed unlocking EEPROM\n");
goto out;
}
reg_count = ARRAY_SIZE(lp8860_eeprom_disp_regs);
- for (i = 0; i < reg_count; i++) {
- ret = regmap_write(led->eeprom_regmap,
- lp8860_eeprom_disp_regs[i].reg,
- lp8860_eeprom_disp_regs[i].value);
- if (ret) {
- dev_err(&led->client->dev, "Failed writing EEPROM\n");
- goto out;
- }
+ ret = regmap_multi_reg_write(led->eeprom_regmap, lp8860_eeprom_disp_regs, reg_count);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed writing EEPROM\n");
+ goto out;
}
- ret = lp8860_unlock_eeprom(led, LP8860_LOCK_EEPROM);
+ ret = regmap_write(led->regmap, LP8860_EEPROM_UNLOCK, LP8860_LOCK_EEPROM);
if (ret)
goto out;
@@ -291,74 +252,14 @@ static int lp8860_init(struct lp8860_led *led)
return ret;
out:
- if (ret)
- gpiod_direction_output(led->enable_gpio, 0);
-
- if (led->regulator) {
- ret = regulator_disable(led->regulator);
- if (ret)
- dev_err(&led->client->dev,
- "Failed to disable regulator\n");
- }
-
return ret;
}
-static const struct reg_default lp8860_reg_defs[] = {
- { LP8860_DISP_CL1_BRT_MSB, 0x00},
- { LP8860_DISP_CL1_BRT_LSB, 0x00},
- { LP8860_DISP_CL1_CURR_MSB, 0x00},
- { LP8860_DISP_CL1_CURR_LSB, 0x00},
- { LP8860_CL2_BRT_MSB, 0x00},
- { LP8860_CL2_BRT_LSB, 0x00},
- { LP8860_CL2_CURRENT, 0x00},
- { LP8860_CL3_BRT_MSB, 0x00},
- { LP8860_CL3_BRT_LSB, 0x00},
- { LP8860_CL3_CURRENT, 0x00},
- { LP8860_CL4_BRT_MSB, 0x00},
- { LP8860_CL4_BRT_LSB, 0x00},
- { LP8860_CL4_CURRENT, 0x00},
- { LP8860_CONFIG, 0x00},
- { LP8860_FAULT_CLEAR, 0x00},
- { LP8860_EEPROM_CNTRL, 0x80},
- { LP8860_EEPROM_UNLOCK, 0x00},
-};
-
static const struct regmap_config lp8860_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = LP8860_EEPROM_UNLOCK,
- .reg_defaults = lp8860_reg_defs,
- .num_reg_defaults = ARRAY_SIZE(lp8860_reg_defs),
-};
-
-static const struct reg_default lp8860_eeprom_defs[] = {
- { LP8860_EEPROM_REG_0, 0x00 },
- { LP8860_EEPROM_REG_1, 0x00 },
- { LP8860_EEPROM_REG_2, 0x00 },
- { LP8860_EEPROM_REG_3, 0x00 },
- { LP8860_EEPROM_REG_4, 0x00 },
- { LP8860_EEPROM_REG_5, 0x00 },
- { LP8860_EEPROM_REG_6, 0x00 },
- { LP8860_EEPROM_REG_7, 0x00 },
- { LP8860_EEPROM_REG_8, 0x00 },
- { LP8860_EEPROM_REG_9, 0x00 },
- { LP8860_EEPROM_REG_10, 0x00 },
- { LP8860_EEPROM_REG_11, 0x00 },
- { LP8860_EEPROM_REG_12, 0x00 },
- { LP8860_EEPROM_REG_13, 0x00 },
- { LP8860_EEPROM_REG_14, 0x00 },
- { LP8860_EEPROM_REG_15, 0x00 },
- { LP8860_EEPROM_REG_16, 0x00 },
- { LP8860_EEPROM_REG_17, 0x00 },
- { LP8860_EEPROM_REG_18, 0x00 },
- { LP8860_EEPROM_REG_19, 0x00 },
- { LP8860_EEPROM_REG_20, 0x00 },
- { LP8860_EEPROM_REG_21, 0x00 },
- { LP8860_EEPROM_REG_22, 0x00 },
- { LP8860_EEPROM_REG_23, 0x00 },
- { LP8860_EEPROM_REG_24, 0x00 },
};
static const struct regmap_config lp8860_eeprom_regmap_config = {
@@ -366,10 +267,15 @@ static const struct regmap_config lp8860_eeprom_regmap_config = {
.val_bits = 8,
.max_register = LP8860_EEPROM_REG_24,
- .reg_defaults = lp8860_eeprom_defs,
- .num_reg_defaults = ARRAY_SIZE(lp8860_eeprom_defs),
};
+static void lp8860_disable_gpio(void *data)
+{
+ struct gpio_desc *gpio = data;
+
+ gpiod_set_value(gpio, 0);
+}
+
static int lp8860_probe(struct i2c_client *client)
{
int ret;
@@ -377,6 +283,7 @@ static int lp8860_probe(struct i2c_client *client)
struct device_node *np = dev_of_node(&client->dev);
struct device_node *child_node;
struct led_init_data init_data = {};
+ struct gpio_desc *enable_gpio;
led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
if (!led)
@@ -386,24 +293,21 @@ static int lp8860_probe(struct i2c_client *client)
if (!child_node)
return -EINVAL;
- led->enable_gpio = devm_gpiod_get_optional(&client->dev,
- "enable", GPIOD_OUT_LOW);
- if (IS_ERR(led->enable_gpio)) {
- ret = PTR_ERR(led->enable_gpio);
- dev_err(&client->dev, "Failed to get enable gpio: %d\n", ret);
- return ret;
- }
+ enable_gpio = devm_gpiod_get_optional(&client->dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(enable_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(enable_gpio),
+ "Failed to get enable GPIO\n");
+ devm_add_action_or_reset(&client->dev, lp8860_disable_gpio, enable_gpio);
- led->regulator = devm_regulator_get(&client->dev, "vled");
- if (IS_ERR(led->regulator))
- led->regulator = NULL;
+ ret = devm_regulator_get_enable_optional(&client->dev, "vled");
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to enable vled regulator\n");
led->client = client;
led->led_dev.brightness_set_blocking = lp8860_brightness_set;
- mutex_init(&led->lock);
-
- i2c_set_clientdata(client, led);
+ devm_mutex_init(&client->dev, &led->lock);
led->regmap = devm_regmap_init_i2c(client, &lp8860_regmap_config);
if (IS_ERR(led->regmap)) {
@@ -439,23 +343,6 @@ static int lp8860_probe(struct i2c_client *client)
return 0;
}
-static void lp8860_remove(struct i2c_client *client)
-{
- struct lp8860_led *led = i2c_get_clientdata(client);
- int ret;
-
- gpiod_direction_output(led->enable_gpio, 0);
-
- if (led->regulator) {
- ret = regulator_disable(led->regulator);
- if (ret)
- dev_err(&led->client->dev,
- "Failed to disable regulator\n");
- }
-
- mutex_destroy(&led->lock);
-}
-
static const struct i2c_device_id lp8860_id[] = {
{ "lp8860" },
{ }
@@ -474,7 +361,6 @@ static struct i2c_driver lp8860_driver = {
.of_match_table = of_lp8860_leds_match,
},
.probe = lp8860_probe,
- .remove = lp8860_remove,
.id_table = lp8860_id,
};
module_i2c_driver(lp8860_driver);
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 1b47acf54720..7d4c071a6cd0 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -318,7 +318,8 @@ static int pca9532_gpio_request_pin(struct gpio_chip *gc, unsigned offset)
return -EBUSY;
}
-static void pca9532_gpio_set_value(struct gpio_chip *gc, unsigned offset, int val)
+static int pca9532_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
+ int val)
{
struct pca9532_data *data = gpiochip_get_data(gc);
struct pca9532_led *led = &data->leds[offset];
@@ -329,6 +330,8 @@ static void pca9532_gpio_set_value(struct gpio_chip *gc, unsigned offset, int va
led->state = PCA9532_OFF;
pca9532_setled(led);
+
+ return 0;
}
static int pca9532_gpio_get_value(struct gpio_chip *gc, unsigned offset)
@@ -351,9 +354,7 @@ static int pca9532_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
static int pca9532_gpio_direction_output(struct gpio_chip *gc, unsigned offset, int val)
{
- pca9532_gpio_set_value(gc, offset, val);
-
- return 0;
+ return pca9532_gpio_set_value(gc, offset, val);
}
#endif /* CONFIG_LEDS_PCA9532_GPIO */
@@ -472,7 +473,7 @@ static int pca9532_configure(struct i2c_client *client,
data->gpio.label = "gpio-pca9532";
data->gpio.direction_input = pca9532_gpio_direction_input;
data->gpio.direction_output = pca9532_gpio_direction_output;
- data->gpio.set = pca9532_gpio_set_value;
+ data->gpio.set_rv = pca9532_gpio_set_value;
data->gpio.get = pca9532_gpio_get_value;
data->gpio.request = pca9532_gpio_request_pin;
data->gpio.can_sleep = 1;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index e9cfde9fe4b1..42fe056b1c74 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -73,7 +73,7 @@ enum pca955x_type {
};
struct pca955x_chipdef {
- int bits;
+ u8 bits;
u8 slv_addr; /* 7-bit slave address mask */
int slv_addr_shift; /* Number of bits to ignore */
int blink_div; /* PSC divider */
@@ -142,13 +142,13 @@ struct pca955x_platform_data {
};
/* 8 bits per input register */
-static inline int pca955x_num_input_regs(int bits)
+static inline u8 pca955x_num_input_regs(u8 bits)
{
return (bits + 7) / 8;
}
/* 4 bits per LED selector register */
-static inline int pca955x_num_led_regs(int bits)
+static inline u8 pca955x_num_led_regs(u8 bits)
{
return (bits + 3) / 4;
}
@@ -495,10 +495,10 @@ static int pca955x_set_value(struct gpio_chip *gc, unsigned int offset,
return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_LOW);
}
-static void pca955x_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
- int val)
+static int pca955x_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
+ int val)
{
- pca955x_set_value(gc, offset, val);
+ return pca955x_set_value(gc, offset, val);
}
static int pca955x_gpio_get_value(struct gpio_chip *gc, unsigned int offset)
@@ -581,14 +581,14 @@ static int pca955x_probe(struct i2c_client *client)
struct led_classdev *led;
struct led_init_data init_data;
struct i2c_adapter *adapter;
- int i, bit, err, nls, reg;
+ u8 i, nls, psc0;
u8 ls1[4];
u8 ls2[4];
struct pca955x_platform_data *pdata;
- u8 psc0;
bool keep_psc0 = false;
bool set_default_label = false;
char default_label[8];
+ int bit, err, reg;
chip = i2c_get_match_data(client);
if (!chip)
@@ -610,16 +610,15 @@ static int pca955x_probe(struct i2c_client *client)
return -ENODEV;
}
- dev_info(&client->dev, "leds-pca955x: Using %s %d-bit LED driver at "
- "slave address 0x%02x\n", client->name, chip->bits,
- client->addr);
+ dev_info(&client->dev, "Using %s %u-bit LED driver at slave address 0x%02x\n",
+ client->name, chip->bits, client->addr);
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
if (pdata->num_leds != chip->bits) {
dev_err(&client->dev,
- "board info claims %d LEDs on a %d-bit chip\n",
+ "board info claims %d LEDs on a %u-bit chip\n",
pdata->num_leds, chip->bits);
return -ENODEV;
}
@@ -694,8 +693,7 @@ static int pca955x_probe(struct i2c_client *client)
}
if (set_default_label) {
- snprintf(default_label, sizeof(default_label),
- "%d", i);
+ snprintf(default_label, sizeof(default_label), "%u", i);
init_data.default_label = default_label;
} else {
init_data.default_label = NULL;
@@ -739,7 +737,7 @@ static int pca955x_probe(struct i2c_client *client)
pca955x->gpio.label = "gpio-pca955x";
pca955x->gpio.direction_input = pca955x_gpio_direction_input;
pca955x->gpio.direction_output = pca955x_gpio_direction_output;
- pca955x->gpio.set = pca955x_gpio_set_value;
+ pca955x->gpio.set_rv = pca955x_gpio_set_value;
pca955x->gpio.get = pca955x_gpio_get_value;
pca955x->gpio.request = pca955x_gpio_request_pin;
pca955x->gpio.free = pca955x_gpio_free_pin;
diff --git a/drivers/leds/leds-pca995x.c b/drivers/leds/leds-pca995x.c
index 11c7bb69573e..6ad06ce2bf64 100644
--- a/drivers/leds/leds-pca995x.c
+++ b/drivers/leds/leds-pca995x.c
@@ -197,7 +197,7 @@ MODULE_DEVICE_TABLE(i2c, pca995x_id);
static const struct of_device_id pca995x_of_match[] = {
{ .compatible = "nxp,pca9952", .data = &pca9952_chipdef },
- { .compatible = "nxp,pca9955b", . data = &pca9955b_chipdef },
+ { .compatible = "nxp,pca9955b", .data = &pca9955b_chipdef },
{ .compatible = "nxp,pca9956b", .data = &pca9956b_chipdef },
{},
};
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index acbd8169723c..89c165c8ee9c 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -588,8 +588,8 @@ static int tca6507_blink_set(struct led_classdev *led_cdev,
}
#ifdef CONFIG_GPIOLIB
-static void tca6507_gpio_set_value(struct gpio_chip *gc,
- unsigned offset, int val)
+static int tca6507_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
+ int val)
{
struct tca6507_chip *tca = gpiochip_get_data(gc);
unsigned long flags;
@@ -604,13 +604,14 @@ static void tca6507_gpio_set_value(struct gpio_chip *gc,
spin_unlock_irqrestore(&tca->lock, flags);
if (tca->reg_set)
schedule_work(&tca->work);
+
+ return 0;
}
static int tca6507_gpio_direction_output(struct gpio_chip *gc,
unsigned offset, int val)
{
- tca6507_gpio_set_value(gc, offset, val);
- return 0;
+ return tca6507_gpio_set_value(gc, offset, val);
}
static int tca6507_probe_gpios(struct device *dev,
@@ -636,7 +637,7 @@ static int tca6507_probe_gpios(struct device *dev,
tca->gpio.base = -1;
tca->gpio.owner = THIS_MODULE;
tca->gpio.direction_output = tca6507_gpio_direction_output;
- tca->gpio.set = tca6507_gpio_set_value;
+ tca->gpio.set_rv = tca6507_gpio_set_value;
tca->gpio.parent = dev;
err = devm_gpiochip_add_data(dev, &tca->gpio, tca);
if (err) {
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 4fe1a9c0bc1b..25ee5c1eb820 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -361,7 +361,7 @@ static DEVICE_ATTR_RW(gamma_correction);
static struct attribute *omnia_led_controller_attrs[] = {
&dev_attr_brightness.attr,
&dev_attr_gamma_correction.attr,
- NULL,
+ NULL
};
ATTRIBUTE_GROUPS(omnia_led_controller);
@@ -527,7 +527,7 @@ static void omnia_leds_remove(struct i2c_client *client)
static const struct of_device_id of_omnia_leds_match[] = {
{ .compatible = "cznic,turris-omnia-leds", },
- {},
+ { }
};
MODULE_DEVICE_TABLE(of, of_omnia_leds_match);
diff --git a/drivers/leds/rgb/leds-mt6370-rgb.c b/drivers/leds/rgb/leds-mt6370-rgb.c
index ebd3ba878dd5..c5927d0eb830 100644
--- a/drivers/leds/rgb/leds-mt6370-rgb.c
+++ b/drivers/leds/rgb/leds-mt6370-rgb.c
@@ -199,17 +199,17 @@ static const struct reg_field mt6372_reg_fields[F_MAX_FIELDS] = {
/* Current unit: microamp, time unit: millisecond */
static const struct linear_range common_led_ranges[R_MAX_RANGES] = {
- [R_LED123_CURR] = { 4000, 1, 6, 4000 },
- [R_LED4_CURR] = { 2000, 1, 3, 2000 },
- [R_LED_TRFON] = { 125, 0, 15, 200 },
- [R_LED_TOFF] = { 250, 0, 15, 400 },
+ [R_LED123_CURR] = LINEAR_RANGE(4000, 1, 6, 4000),
+ [R_LED4_CURR] = LINEAR_RANGE(2000, 1, 3, 2000),
+ [R_LED_TRFON] = LINEAR_RANGE(125, 0, 15, 200),
+ [R_LED_TOFF] = LINEAR_RANGE(250, 0, 15, 400),
};
static const struct linear_range mt6372_led_ranges[R_MAX_RANGES] = {
- [R_LED123_CURR] = { 2000, 1, 14, 2000 },
- [R_LED4_CURR] = { 2000, 1, 14, 2000 },
- [R_LED_TRFON] = { 125, 0, 15, 250 },
- [R_LED_TOFF] = { 250, 0, 15, 500 },
+ [R_LED123_CURR] = LINEAR_RANGE(2000, 1, 14, 2000),
+ [R_LED4_CURR] = LINEAR_RANGE(2000, 1, 14, 2000),
+ [R_LED_TRFON] = LINEAR_RANGE(125, 0, 15, 250),
+ [R_LED_TOFF] = LINEAR_RANGE(250, 0, 15, 500),
};
static const unsigned int common_tfreqs[] = {
diff --git a/drivers/leds/rgb/leds-ncp5623.c b/drivers/leds/rgb/leds-ncp5623.c
index f18156683375..7c7d44623a9e 100644
--- a/drivers/leds/rgb/leds-ncp5623.c
+++ b/drivers/leds/rgb/leds-ncp5623.c
@@ -155,9 +155,9 @@ static int ncp5623_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct fwnode_handle *mc_node, *led_node;
struct led_init_data init_data = { };
- int num_subleds = 0;
struct ncp5623 *ncp;
struct mc_subled *subled_info;
+ unsigned int num_subleds;
u32 color_index;
u32 reg;
int ret;
@@ -172,8 +172,7 @@ static int ncp5623_probe(struct i2c_client *client)
if (!mc_node)
return -EINVAL;
- fwnode_for_each_child_node(mc_node, led_node)
- num_subleds++;
+ num_subleds = fwnode_get_child_node_count(mc_node);
subled_info = devm_kcalloc(dev, num_subleds, sizeof(*subled_info), GFP_KERNEL);
if (!subled_info) {
diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c
index 1c7705bafdfc..e0d7d3c9215c 100644
--- a/drivers/leds/rgb/leds-pwm-multicolor.c
+++ b/drivers/leds/rgb/leds-pwm-multicolor.c
@@ -107,12 +107,12 @@ release_fwnode:
static int led_pwm_mc_probe(struct platform_device *pdev)
{
- struct fwnode_handle *mcnode, *fwnode;
+ struct fwnode_handle *mcnode;
struct led_init_data init_data = {};
struct led_classdev *cdev;
struct mc_subled *subled;
struct pwm_mc_led *priv;
- int count = 0;
+ unsigned int count;
int ret = 0;
mcnode = device_get_named_child_node(&pdev->dev, "multi-led");
@@ -121,8 +121,7 @@ static int led_pwm_mc_probe(struct platform_device *pdev)
"expected multi-led node\n");
/* count the nodes inside the multi-led node */
- fwnode_for_each_child_node(mcnode, fwnode)
- count++;
+ count = fwnode_get_child_node_count(mcnode);
priv = devm_kzalloc(&pdev->dev, struct_size(priv, leds, count),
GFP_KERNEL);
diff --git a/drivers/leds/trigger/ledtrig-activity.c b/drivers/leds/trigger/ledtrig-activity.c
index b3ee33aed36e..c973246a57f9 100644
--- a/drivers/leds/trigger/ledtrig-activity.c
+++ b/drivers/leds/trigger/ledtrig-activity.c
@@ -32,8 +32,8 @@ struct activity_data {
static void led_activity_function(struct timer_list *t)
{
- struct activity_data *activity_data = from_timer(activity_data, t,
- timer);
+ struct activity_data *activity_data = timer_container_of(activity_data,
+ t, timer);
struct led_classdev *led_cdev = activity_data->led_cdev;
unsigned int target;
unsigned int usage;
diff --git a/drivers/leds/trigger/ledtrig-backlight.c b/drivers/leds/trigger/ledtrig-backlight.c
index 487577d22cfc..c1f0f5becaee 100644
--- a/drivers/leds/trigger/ledtrig-backlight.c
+++ b/drivers/leds/trigger/ledtrig-backlight.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
-#include <linux/fb.h>
#include <linux/leds.h>
#include "../leds.h"
@@ -21,29 +20,20 @@ struct bl_trig_notifier {
struct led_classdev *led;
int brightness;
int old_status;
- struct notifier_block notifier;
unsigned invert;
+
+ struct list_head entry;
};
-static int fb_notifier_callback(struct notifier_block *p,
- unsigned long event, void *data)
+static DEFINE_MUTEX(ledtrig_backlight_list_mutex);
+static LIST_HEAD(ledtrig_backlight_list);
+
+static void ledtrig_backlight_notify_blank(struct bl_trig_notifier *n, int new_status)
{
- struct bl_trig_notifier *n = container_of(p,
- struct bl_trig_notifier, notifier);
struct led_classdev *led = n->led;
- struct fb_event *fb_event = data;
- int *blank;
- int new_status;
-
- /* If we aren't interested in this event, skip it immediately ... */
- if (event != FB_EVENT_BLANK)
- return 0;
-
- blank = fb_event->data;
- new_status = *blank ? BLANK : UNBLANK;
if (new_status == n->old_status)
- return 0;
+ return;
if ((n->old_status == UNBLANK) ^ n->invert) {
n->brightness = led->brightness;
@@ -53,9 +43,19 @@ static int fb_notifier_callback(struct notifier_block *p,
}
n->old_status = new_status;
+}
- return 0;
+void ledtrig_backlight_blank(bool blank)
+{
+ struct bl_trig_notifier *n;
+ int new_status = blank ? BLANK : UNBLANK;
+
+ guard(mutex)(&ledtrig_backlight_list_mutex);
+
+ list_for_each_entry(n, &ledtrig_backlight_list, entry)
+ ledtrig_backlight_notify_blank(n, new_status);
}
+EXPORT_SYMBOL(ledtrig_backlight_blank);
static ssize_t bl_trig_invert_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -100,8 +100,6 @@ ATTRIBUTE_GROUPS(bl_trig);
static int bl_trig_activate(struct led_classdev *led)
{
- int ret;
-
struct bl_trig_notifier *n;
n = kzalloc(sizeof(struct bl_trig_notifier), GFP_KERNEL);
@@ -112,11 +110,9 @@ static int bl_trig_activate(struct led_classdev *led)
n->led = led;
n->brightness = led->brightness;
n->old_status = UNBLANK;
- n->notifier.notifier_call = fb_notifier_callback;
- ret = fb_register_client(&n->notifier);
- if (ret)
- dev_err(led->dev, "unable to register backlight trigger\n");
+ guard(mutex)(&ledtrig_backlight_list_mutex);
+ list_add(&n->entry, &ledtrig_backlight_list);
return 0;
}
@@ -125,7 +121,9 @@ static void bl_trig_deactivate(struct led_classdev *led)
{
struct bl_trig_notifier *n = led_get_trigger_data(led);
- fb_unregister_client(&n->notifier);
+ guard(mutex)(&ledtrig_backlight_list_mutex);
+ list_del(&n->entry);
+
kfree(n);
}
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index 393b3ae832f4..40eb61b6d54e 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -33,7 +33,7 @@ struct heartbeat_trig_data {
static void led_heartbeat_function(struct timer_list *t)
{
struct heartbeat_trig_data *heartbeat_data =
- from_timer(heartbeat_data, t, timer);
+ timer_container_of(heartbeat_data, t, timer);
struct led_classdev *led_cdev;
unsigned long brightness = LED_OFF;
unsigned long delay = 0;
diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c
index 06d052957d37..9af3c18f14f4 100644
--- a/drivers/leds/trigger/ledtrig-pattern.c
+++ b/drivers/leds/trigger/ledtrig-pattern.c
@@ -150,7 +150,7 @@ static void pattern_trig_timer_common_function(struct pattern_trig_data *data)
static void pattern_trig_timer_function(struct timer_list *t)
{
- struct pattern_trig_data *data = from_timer(data, t, timer);
+ struct pattern_trig_data *data = timer_container_of(data, t, timer);
return pattern_trig_timer_common_function(data);
}
diff --git a/drivers/leds/trigger/ledtrig-transient.c b/drivers/leds/trigger/ledtrig-transient.c
index e103c7ed830b..20f1351464b1 100644
--- a/drivers/leds/trigger/ledtrig-transient.c
+++ b/drivers/leds/trigger/ledtrig-transient.c
@@ -32,7 +32,7 @@ struct transient_trig_data {
static void transient_timer_function(struct timer_list *t)
{
struct transient_trig_data *transient_data =
- from_timer(transient_data, t, timer);
+ timer_container_of(transient_data, t, timer);
struct led_classdev *led_cdev = transient_data->led_cdev;
transient_data->activate = 0;
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index ed52db272f4d..68eeed660a4a 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -36,6 +36,16 @@ config ARM_MHU_V3
that provides different means of transports: supported extensions
will be discovered and possibly managed at probe-time.
+config CV1800_MBOX
+ tristate "cv1800 mailbox"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ Mailbox driver implementation for Sophgo CV18XX SoCs. This driver
+ can be used to send message between different processors in SoC. Any
+ processer can write data in a channel, and set co-responding register
+ to raise interrupt to notice another processor, and it is allowed to
+ send data to itself.
+
config EXYNOS_MBOX
tristate "Exynos Mailbox"
depends on ARCH_EXYNOS || COMPILE_TEST
@@ -191,8 +201,8 @@ config POLARFIRE_SOC_MAILBOX
config MCHP_SBI_IPC_MBOX
tristate "Microchip Inter-processor Communication (IPC) SBI driver"
- depends on RISCV_SBI || COMPILE_TEST
- depends on ARCH_MICROCHIP
+ depends on RISCV_SBI
+ depends on ARCH_MICROCHIP || COMPILE_TEST
help
Mailbox implementation for Microchip devices with an
Inter-process communication (IPC) controller.
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 9a1542b55539..13a3448b3271 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o
obj-$(CONFIG_ARM_MHU_V3) += arm_mhuv3.o
+obj-$(CONFIG_CV1800_MBOX) += cv1800-mailbox.o
+
obj-$(CONFIG_EXYNOS_MBOX) += exynos-mailbox.o
obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
diff --git a/drivers/mailbox/cv1800-mailbox.c b/drivers/mailbox/cv1800-mailbox.c
new file mode 100644
index 000000000000..4761191acf78
--- /dev/null
+++ b/drivers/mailbox/cv1800-mailbox.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2024 Sophgo Technology Inc.
+ * Copyright (C) 2024 Yuntao Dai <d1581209858@live.com>
+ * Copyright (C) 2025 Junhui Liu <junhui.liu@pigmoral.tech>
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define RECV_CPU 1
+
+#define MAILBOX_MAX_CHAN 8
+#define MAILBOX_MSG_LEN 8
+
+#define MBOX_EN_REG(cpu) (cpu << 2)
+#define MBOX_DONE_REG(cpu) ((cpu << 2) + 2)
+#define MBOX_SET_CLR_REG(cpu) (0x10 + (cpu << 4))
+#define MBOX_SET_INT_REG(cpu) (0x18 + (cpu << 4))
+#define MBOX_SET_REG 0x60
+
+#define MAILBOX_CONTEXT_OFFSET 0x0400
+#define MAILBOX_CONTEXT_SIZE 0x0040
+
+#define MBOX_CONTEXT_BASE_INDEX(base, index) \
+ ((u64 __iomem *)(base + MAILBOX_CONTEXT_OFFSET) + index)
+
+/**
+ * struct cv1800_mbox_chan_priv - cv1800 mailbox channel private data
+ * @idx: index of channel
+ * @cpu: send to which processor
+ */
+struct cv1800_mbox_chan_priv {
+ int idx;
+ int cpu;
+};
+
+struct cv1800_mbox {
+ struct mbox_controller mbox;
+ struct cv1800_mbox_chan_priv priv[MAILBOX_MAX_CHAN];
+ struct mbox_chan chans[MAILBOX_MAX_CHAN];
+ u64 __iomem *content[MAILBOX_MAX_CHAN];
+ void __iomem *mbox_base;
+ int recvid;
+};
+
+static irqreturn_t cv1800_mbox_isr(int irq, void *dev_id)
+{
+ struct cv1800_mbox *mbox = (struct cv1800_mbox *)dev_id;
+ size_t i;
+ u64 msg;
+ int ret = IRQ_NONE;
+
+ for (i = 0; i < MAILBOX_MAX_CHAN; i++) {
+ if (mbox->content[i] && mbox->chans[i].cl) {
+ memcpy_fromio(&msg, mbox->content[i], MAILBOX_MSG_LEN);
+ mbox->content[i] = NULL;
+ mbox_chan_received_data(&mbox->chans[i], (void *)&msg);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+static irqreturn_t cv1800_mbox_irq(int irq, void *dev_id)
+{
+ struct cv1800_mbox *mbox = (struct cv1800_mbox *)dev_id;
+ u8 set, valid;
+ size_t i;
+ int ret = IRQ_NONE;
+
+ set = readb(mbox->mbox_base + MBOX_SET_INT_REG(RECV_CPU));
+
+ if (!set)
+ return ret;
+
+ for (i = 0; i < MAILBOX_MAX_CHAN; i++) {
+ valid = set & BIT(i);
+ if (valid) {
+ mbox->content[i] =
+ MBOX_CONTEXT_BASE_INDEX(mbox->mbox_base, i);
+ writeb(valid, mbox->mbox_base +
+ MBOX_SET_CLR_REG(RECV_CPU));
+ writeb(~valid, mbox->mbox_base + MBOX_EN_REG(RECV_CPU));
+ ret = IRQ_WAKE_THREAD;
+ }
+ }
+
+ return ret;
+}
+
+static int cv1800_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct cv1800_mbox_chan_priv *priv =
+ (struct cv1800_mbox_chan_priv *)chan->con_priv;
+ struct cv1800_mbox *mbox = dev_get_drvdata(chan->mbox->dev);
+ int idx = priv->idx;
+ int cpu = priv->cpu;
+ u8 en, valid;
+
+ memcpy_toio(MBOX_CONTEXT_BASE_INDEX(mbox->mbox_base, idx),
+ data, MAILBOX_MSG_LEN);
+
+ valid = BIT(idx);
+ writeb(valid, mbox->mbox_base + MBOX_SET_CLR_REG(cpu));
+ en = readb(mbox->mbox_base + MBOX_EN_REG(cpu));
+ writeb(en | valid, mbox->mbox_base + MBOX_EN_REG(cpu));
+ writeb(valid, mbox->mbox_base + MBOX_SET_REG);
+
+ return 0;
+}
+
+static bool cv1800_last_tx_done(struct mbox_chan *chan)
+{
+ struct cv1800_mbox_chan_priv *priv =
+ (struct cv1800_mbox_chan_priv *)chan->con_priv;
+ struct cv1800_mbox *mbox = dev_get_drvdata(chan->mbox->dev);
+ u8 en;
+
+ en = readb(mbox->mbox_base + MBOX_EN_REG(priv->cpu));
+
+ return !(en & BIT(priv->idx));
+}
+
+static const struct mbox_chan_ops cv1800_mbox_chan_ops = {
+ .send_data = cv1800_mbox_send_data,
+ .last_tx_done = cv1800_last_tx_done,
+};
+
+static struct mbox_chan *cv1800_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *spec)
+{
+ struct cv1800_mbox_chan_priv *priv;
+
+ int idx = spec->args[0];
+ int cpu = spec->args[1];
+
+ if (idx >= mbox->num_chans)
+ return ERR_PTR(-EINVAL);
+
+ priv = mbox->chans[idx].con_priv;
+ priv->cpu = cpu;
+
+ return &mbox->chans[idx];
+}
+
+static const struct of_device_id cv1800_mbox_of_match[] = {
+ { .compatible = "sophgo,cv1800b-mailbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cv1800_mbox_of_match);
+
+static int cv1800_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cv1800_mbox *mb;
+ int irq, idx, err;
+
+ mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->mbox_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mb->mbox_base))
+ return dev_err_probe(dev, PTR_ERR(mb->mbox_base),
+ "Failed to map resource\n");
+
+ mb->mbox.dev = dev;
+ mb->mbox.chans = mb->chans;
+ mb->mbox.txdone_poll = true;
+ mb->mbox.ops = &cv1800_mbox_chan_ops;
+ mb->mbox.num_chans = MAILBOX_MAX_CHAN;
+ mb->mbox.of_xlate = cv1800_mbox_xlate;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_threaded_irq(dev, irq, cv1800_mbox_irq,
+ cv1800_mbox_isr, IRQF_ONESHOT,
+ dev_name(&pdev->dev), mb);
+ if (err < 0)
+ return dev_err_probe(dev, err, "Failed to register irq\n");
+
+ for (idx = 0; idx < MAILBOX_MAX_CHAN; idx++) {
+ mb->priv[idx].idx = idx;
+ mb->mbox.chans[idx].con_priv = &mb->priv[idx];
+ }
+
+ platform_set_drvdata(pdev, mb);
+
+ err = devm_mbox_controller_register(dev, &mb->mbox);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to register mailbox\n");
+
+ return 0;
+}
+
+static struct platform_driver cv1800_mbox_driver = {
+ .driver = {
+ .name = "cv1800-mbox",
+ .of_match_table = cv1800_mbox_of_match,
+ },
+ .probe = cv1800_mbox_probe,
+};
+
+module_platform_driver(cv1800_mbox_driver);
+
+MODULE_DESCRIPTION("cv1800 mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 6ef8338add0d..6778afc64a04 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -226,7 +226,7 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
{
u32 *arg = data;
u32 val;
- int ret;
+ int ret, count;
switch (cp->type) {
case IMX_MU_TYPE_TX:
@@ -240,11 +240,20 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
case IMX_MU_TYPE_TXDB_V2:
imx_mu_write(priv, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx),
priv->dcfg->xCR[IMX_MU_GCR]);
- ret = readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
- !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
- 0, 1000);
- if (ret)
- dev_warn_ratelimited(priv->dev, "channel type: %d failure\n", cp->type);
+ ret = -ETIMEDOUT;
+ count = 0;
+ while (ret && (count < 10)) {
+ ret =
+ readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
+ !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
+ 0, 10000);
+
+ if (ret) {
+ dev_warn_ratelimited(priv->dev,
+ "channel type: %d timeout, %d times, retry\n",
+ cp->type, ++count);
+ }
+ }
break;
default:
dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
diff --git a/drivers/mailbox/mailbox-altera.c b/drivers/mailbox/mailbox-altera.c
index 748128661892..17278c2571d3 100644
--- a/drivers/mailbox/mailbox-altera.c
+++ b/drivers/mailbox/mailbox-altera.c
@@ -130,7 +130,7 @@ static void altera_mbox_rx_data(struct mbox_chan *chan)
static void altera_mbox_poll_rx(struct timer_list *t)
{
- struct altera_mbox *mbox = from_timer(mbox, t, rxpoll_timer);
+ struct altera_mbox *mbox = timer_container_of(mbox, t, rxpoll_timer);
altera_mbox_rx_data(mbox->chan);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 0593b4d03685..5cd8ae222073 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -6,6 +6,7 @@
* Author: Jassi Brar <jassisinghbrar@gmail.com>
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -24,15 +25,12 @@ static DEFINE_MUTEX(con_mutex);
static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
{
int idx;
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
+ guard(spinlock_irqsave)(&chan->lock);
/* See if there is any space left */
- if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
- spin_unlock_irqrestore(&chan->lock, flags);
+ if (chan->msg_count == MBOX_TX_QUEUE_LEN)
return -ENOBUFS;
- }
idx = chan->msg_free;
chan->msg_data[idx] = mssg;
@@ -43,60 +41,53 @@ static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
else
chan->msg_free++;
- spin_unlock_irqrestore(&chan->lock, flags);
-
return idx;
}
static void msg_submit(struct mbox_chan *chan)
{
unsigned count, idx;
- unsigned long flags;
void *data;
int err = -EBUSY;
- spin_lock_irqsave(&chan->lock, flags);
-
- if (!chan->msg_count || chan->active_req)
- goto exit;
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ if (!chan->msg_count || chan->active_req)
+ break;
- count = chan->msg_count;
- idx = chan->msg_free;
- if (idx >= count)
- idx -= count;
- else
- idx += MBOX_TX_QUEUE_LEN - count;
+ count = chan->msg_count;
+ idx = chan->msg_free;
+ if (idx >= count)
+ idx -= count;
+ else
+ idx += MBOX_TX_QUEUE_LEN - count;
- data = chan->msg_data[idx];
+ data = chan->msg_data[idx];
- if (chan->cl->tx_prepare)
- chan->cl->tx_prepare(chan->cl, data);
- /* Try to submit a message to the MBOX controller */
- err = chan->mbox->ops->send_data(chan, data);
- if (!err) {
- chan->active_req = data;
- chan->msg_count--;
+ if (chan->cl->tx_prepare)
+ chan->cl->tx_prepare(chan->cl, data);
+ /* Try to submit a message to the MBOX controller */
+ err = chan->mbox->ops->send_data(chan, data);
+ if (!err) {
+ chan->active_req = data;
+ chan->msg_count--;
+ }
}
-exit:
- spin_unlock_irqrestore(&chan->lock, flags);
if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
/* kick start the timer immediately to avoid delays */
- spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags);
- hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
- spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags);
+ scoped_guard(spinlock_irqsave, &chan->mbox->poll_hrt_lock)
+ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
}
}
static void tx_tick(struct mbox_chan *chan, int r)
{
- unsigned long flags;
void *mssg;
- spin_lock_irqsave(&chan->lock, flags);
- mssg = chan->active_req;
- chan->active_req = NULL;
- spin_unlock_irqrestore(&chan->lock, flags);
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ mssg = chan->active_req;
+ chan->active_req = NULL;
+ }
/* Submit next message */
msg_submit(chan);
@@ -118,7 +109,6 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
container_of(hrtimer, struct mbox_controller, poll_hrt);
bool txdone, resched = false;
int i;
- unsigned long flags;
for (i = 0; i < mbox->num_chans; i++) {
struct mbox_chan *chan = &mbox->chans[i];
@@ -133,10 +123,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
}
if (resched) {
- spin_lock_irqsave(&mbox->poll_hrt_lock, flags);
- if (!hrtimer_is_queued(hrtimer))
- hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
- spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags);
+ scoped_guard(spinlock_irqsave, &mbox->poll_hrt_lock) {
+ if (!hrtimer_is_queued(hrtimer))
+ hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
+ }
return HRTIMER_RESTART;
}
@@ -318,25 +308,23 @@ EXPORT_SYMBOL_GPL(mbox_flush);
static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
{
struct device *dev = cl->dev;
- unsigned long flags;
int ret;
if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) {
- dev_dbg(dev, "%s: mailbox not free\n", __func__);
+ dev_err(dev, "%s: mailbox not free\n", __func__);
return -EBUSY;
}
- spin_lock_irqsave(&chan->lock, flags);
- chan->msg_free = 0;
- chan->msg_count = 0;
- chan->active_req = NULL;
- chan->cl = cl;
- init_completion(&chan->tx_complete);
-
- if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
- chan->txdone_method = TXDONE_BY_ACK;
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
- spin_unlock_irqrestore(&chan->lock, flags);
+ if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
+ chan->txdone_method = TXDONE_BY_ACK;
+ }
if (chan->mbox->ops->startup) {
ret = chan->mbox->ops->startup(chan);
@@ -370,13 +358,9 @@ static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
*/
int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
{
- int ret;
-
- mutex_lock(&con_mutex);
- ret = __mbox_bind_client(chan, cl);
- mutex_unlock(&con_mutex);
+ guard(mutex)(&con_mutex);
- return ret;
+ return __mbox_bind_client(chan, cl);
}
EXPORT_SYMBOL_GPL(mbox_bind_client);
@@ -413,32 +397,29 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
index, &spec);
if (ret) {
- dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+ dev_err(dev, "%s: can't parse \"mboxes\" property\n", __func__);
return ERR_PTR(ret);
}
- mutex_lock(&con_mutex);
+ scoped_guard(mutex, &con_mutex) {
+ chan = ERR_PTR(-EPROBE_DEFER);
+ list_for_each_entry(mbox, &mbox_cons, node)
+ if (mbox->dev->of_node == spec.np) {
+ chan = mbox->of_xlate(mbox, &spec);
+ if (!IS_ERR(chan))
+ break;
+ }
- chan = ERR_PTR(-EPROBE_DEFER);
- list_for_each_entry(mbox, &mbox_cons, node)
- if (mbox->dev->of_node == spec.np) {
- chan = mbox->of_xlate(mbox, &spec);
- if (!IS_ERR(chan))
- break;
- }
+ of_node_put(spec.np);
- of_node_put(spec.np);
+ if (IS_ERR(chan))
+ return chan;
- if (IS_ERR(chan)) {
- mutex_unlock(&con_mutex);
- return chan;
+ ret = __mbox_bind_client(chan, cl);
+ if (ret)
+ chan = ERR_PTR(ret);
}
- ret = __mbox_bind_client(chan, cl);
- if (ret)
- chan = ERR_PTR(ret);
-
- mutex_unlock(&con_mutex);
return chan;
}
EXPORT_SYMBOL_GPL(mbox_request_channel);
@@ -458,7 +439,7 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
if (index < 0) {
dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
__func__, name);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(index);
}
return mbox_request_channel(cl, index);
}
@@ -471,8 +452,6 @@ EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
*/
void mbox_free_channel(struct mbox_chan *chan)
{
- unsigned long flags;
-
if (!chan || !chan->cl)
return;
@@ -480,14 +459,14 @@ void mbox_free_channel(struct mbox_chan *chan)
chan->mbox->ops->shutdown(chan);
/* The queued TX requests are simply aborted, no callbacks are made */
- spin_lock_irqsave(&chan->lock, flags);
- chan->cl = NULL;
- chan->active_req = NULL;
- if (chan->txdone_method == TXDONE_BY_ACK)
- chan->txdone_method = TXDONE_BY_POLL;
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ chan->cl = NULL;
+ chan->active_req = NULL;
+ if (chan->txdone_method == TXDONE_BY_ACK)
+ chan->txdone_method = TXDONE_BY_POLL;
+ }
module_put(chan->mbox->dev->driver->owner);
- spin_unlock_irqrestore(&chan->lock, flags);
}
EXPORT_SYMBOL_GPL(mbox_free_channel);
@@ -547,9 +526,8 @@ int mbox_controller_register(struct mbox_controller *mbox)
if (!mbox->of_xlate)
mbox->of_xlate = of_mbox_index_xlate;
- mutex_lock(&con_mutex);
- list_add_tail(&mbox->node, &mbox_cons);
- mutex_unlock(&con_mutex);
+ scoped_guard(mutex, &con_mutex)
+ list_add_tail(&mbox->node, &mbox_cons);
return 0;
}
@@ -566,17 +544,15 @@ void mbox_controller_unregister(struct mbox_controller *mbox)
if (!mbox)
return;
- mutex_lock(&con_mutex);
-
- list_del(&mbox->node);
-
- for (i = 0; i < mbox->num_chans; i++)
- mbox_free_channel(&mbox->chans[i]);
+ scoped_guard(mutex, &con_mutex) {
+ list_del(&mbox->node);
- if (mbox->txdone_poll)
- hrtimer_cancel(&mbox->poll_hrt);
+ for (i = 0; i < mbox->num_chans; i++)
+ mbox_free_channel(&mbox->chans[i]);
- mutex_unlock(&con_mutex);
+ if (mbox->txdone_poll)
+ hrtimer_cancel(&mbox->poll_hrt);
+ }
}
EXPORT_SYMBOL_GPL(mbox_controller_unregister);
@@ -587,16 +563,6 @@ static void __devm_mbox_controller_unregister(struct device *dev, void *res)
mbox_controller_unregister(*mbox);
}
-static int devm_mbox_controller_match(struct device *dev, void *res, void *data)
-{
- struct mbox_controller **mbox = res;
-
- if (WARN_ON(!mbox || !*mbox))
- return 0;
-
- return *mbox == data;
-}
-
/**
* devm_mbox_controller_register() - managed mbox_controller_register()
* @dev: device owning the mailbox controller being registered
@@ -632,20 +598,3 @@ int devm_mbox_controller_register(struct device *dev,
return 0;
}
EXPORT_SYMBOL_GPL(devm_mbox_controller_register);
-
-/**
- * devm_mbox_controller_unregister() - managed mbox_controller_unregister()
- * @dev: device owning the mailbox controller being unregistered
- * @mbox: mailbox controller being unregistered
- *
- * This function unregisters the mailbox controller and removes the device-
- * managed resource that was set up to automatically unregister the mailbox
- * controller on driver probe failure or driver removal. It's typically not
- * necessary to call this function.
- */
-void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox)
-{
- WARN_ON(devres_release(dev, __devm_mbox_controller_unregister,
- devm_mbox_controller_match, mbox));
-}
-EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index d186865b8dce..ab4e8d1954a1 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -92,18 +92,6 @@ struct gce_plat {
u32 gce_num;
};
-static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable)
-{
- WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
-
- if (enable)
- writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
- else
- writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
-
- clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
-}
-
u8 cmdq_get_shift_pa(struct mbox_chan *chan)
{
struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
@@ -112,6 +100,19 @@ u8 cmdq_get_shift_pa(struct mbox_chan *chan)
}
EXPORT_SYMBOL(cmdq_get_shift_pa);
+static void cmdq_gctl_value_toggle(struct cmdq *cmdq, bool ddr_enable)
+{
+ u32 val = cmdq->pdata->control_by_sw ? GCE_CTRL_BY_SW : 0;
+
+ if (!cmdq->pdata->control_by_sw && !cmdq->pdata->sw_ddr_en)
+ return;
+
+ if (cmdq->pdata->sw_ddr_en && ddr_enable)
+ val |= GCE_DDR_EN;
+
+ writel(val, cmdq->base + GCE_GCTL_VALUE);
+}
+
static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
{
u32 status;
@@ -140,16 +141,10 @@ static void cmdq_thread_resume(struct cmdq_thread *thread)
static void cmdq_init(struct cmdq *cmdq)
{
int i;
- u32 gctl_regval = 0;
WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
- if (cmdq->pdata->control_by_sw)
- gctl_regval = GCE_CTRL_BY_SW;
- if (cmdq->pdata->sw_ddr_en)
- gctl_regval |= GCE_DDR_EN;
- if (gctl_regval)
- writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE);
+ cmdq_gctl_value_toggle(cmdq, true);
writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
for (i = 0; i <= CMDQ_MAX_EVENT; i++)
@@ -315,14 +310,21 @@ static irqreturn_t cmdq_irq_handler(int irq, void *dev)
static int cmdq_runtime_resume(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
+ int ret;
- return clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
+ ret = clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
+ if (ret)
+ return ret;
+
+ cmdq_gctl_value_toggle(cmdq, true);
+ return 0;
}
static int cmdq_runtime_suspend(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
+ cmdq_gctl_value_toggle(cmdq, false);
clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
return 0;
}
@@ -347,9 +349,6 @@ static int cmdq_suspend(struct device *dev)
if (task_running)
dev_warn(dev, "exist running task(s) in suspend\n");
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, false);
-
return pm_runtime_force_suspend(dev);
}
@@ -360,9 +359,6 @@ static int cmdq_resume(struct device *dev)
WARN_ON(pm_runtime_force_resume(dev));
cmdq->suspended = false;
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, true);
-
return 0;
}
@@ -370,9 +366,6 @@ static void cmdq_remove(struct platform_device *pdev)
{
struct cmdq *cmdq = platform_get_drvdata(pdev);
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, false);
-
if (!IS_ENABLED(CONFIG_PM))
cmdq_runtime_suspend(&pdev->dev);
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 11c41e935a36..8b24ec0fa191 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -116,10 +116,18 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
}
if (apcs_data->clk_name) {
- apcs->clk = platform_device_register_data(&pdev->dev,
- apcs_data->clk_name,
- PLATFORM_DEVID_AUTO,
- NULL, 0);
+ struct device_node *np = of_get_child_by_name(pdev->dev.of_node,
+ "clock-controller");
+ struct platform_device_info pdevinfo = {
+ .parent = &pdev->dev,
+ .name = apcs_data->clk_name,
+ .id = PLATFORM_DEVID_AUTO,
+ .fwnode = of_fwnode_handle(np) ?: pdev->dev.fwnode,
+ .of_node_reused = !np,
+ };
+
+ apcs->clk = platform_device_register_full(&pdevinfo);
+ of_node_put(np);
if (IS_ERR(apcs->clk))
dev_err(&pdev->dev, "failed to register APCS clk\n");
}
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index 0b17a38ea6bf..ea44ffb5ce1a 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -312,8 +312,8 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node,
- &qcom_ipcc_irq_ops, ipcc);
+ ipcc->irq_domain = irq_domain_create_tree(of_fwnode_handle(pdev->dev.of_node),
+ &qcom_ipcc_irq_ops, ipcc);
if (!ipcc->irq_domain)
return -ENOMEM;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index ed40d8600656..1d0100677357 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -36,6 +36,7 @@
#include <linux/sched/clock.h>
#include <linux/rculist.h>
#include <linux/delay.h>
+#include <linux/sort.h>
#include <trace/events/bcache.h>
/*
@@ -88,8 +89,6 @@
* Test module load/unload
*/
-#define MAX_NEED_GC 64
-#define MAX_SAVE_PRIO 72
#define MAX_GC_TIMES 100
#define MIN_GC_NODES 100
#define GC_SLEEP_MS 100
@@ -559,8 +558,6 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
}
}
-#define cmp_int(l, r) ((l > r) - (l < r))
-
#ifdef CONFIG_PROVE_LOCKING
static int btree_lock_cmp_fn(const struct lockdep_map *_a,
const struct lockdep_map *_b)
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index d39dec34b7a3..0056106495a7 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -149,7 +149,7 @@ static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
static void scale_accounting(struct timer_list *t)
{
- struct cache_accounting *acc = from_timer(acc, t, timer);
+ struct cache_accounting *acc = timer_container_of(acc, t, timer);
#define move_stat(name) do { \
unsigned int t = atomic_xchg(&acc->collector.name, 0); \
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 813b38aec3e4..1efb768b2890 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -293,8 +293,7 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META;
bio->bi_iter.bi_sector = SB_SECTOR;
- __bio_add_page(bio, virt_to_page(out), SB_SIZE,
- offset_in_page(out));
+ bio_add_virt_nofail(bio, out, SB_SIZE);
out->offset = cpu_to_le64(sb->offset);
@@ -546,7 +545,8 @@ static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
static struct uuid_entry *uuid_find_empty(struct cache_set *c)
{
- static const char zero_uuid[16] = { 0 };
+ static const char zero_uuid[16] __nonstring =
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
return uuid_find(c, zero_uuid);
}
@@ -1733,7 +1733,12 @@ static CLOSURE_CALLBACK(cache_set_flush)
mutex_unlock(&b->write_lock);
}
- if (ca->alloc_thread)
+ /*
+ * If the register_cache_set() call to bch_cache_set_alloc() failed,
+ * ca has not been assigned a value and return error.
+ * So we need check ca is not NULL during bch_cache_set_unregister().
+ */
+ if (ca && ca->alloc_thread)
kthread_stop(ca->alloc_thread);
if (c->journal.cur) {
@@ -2233,15 +2238,47 @@ static int cache_alloc(struct cache *ca)
bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0);
/*
- * when ca->sb.njournal_buckets is not zero, journal exists,
- * and in bch_journal_replay(), tree node may split,
- * so bucket of RESERVE_BTREE type is needed,
- * the worst situation is all journal buckets are valid journal,
- * and all the keys need to replay,
- * so the number of RESERVE_BTREE type buckets should be as much
- * as journal buckets
+ * When the cache disk is first registered, ca->sb.njournal_buckets
+ * is zero, and it is assigned in run_cache_set().
+ *
+ * When ca->sb.njournal_buckets is not zero, journal exists,
+ * and in bch_journal_replay(), tree node may split.
+ * The worst situation is all journal buckets are valid journal,
+ * and all the keys need to replay, so the number of RESERVE_BTREE
+ * type buckets should be as much as journal buckets.
+ *
+ * If the number of RESERVE_BTREE type buckets is too few, the
+ * bch_allocator_thread() may hang up and unable to allocate
+ * bucket. The situation is roughly as follows:
+ *
+ * 1. In bch_data_insert_keys(), if the operation is not op->replace,
+ * it will call the bch_journal(), which increments the journal_ref
+ * counter. This counter is only decremented after bch_btree_insert
+ * completes.
+ *
+ * 2. When calling bch_btree_insert, if the btree needs to split,
+ * it will call btree_split() and btree_check_reserve() to check
+ * whether there are enough reserved buckets in the RESERVE_BTREE
+ * slot. If not enough, bcache_btree_root() will repeatedly retry.
+ *
+ * 3. Normally, the bch_allocator_thread is responsible for filling
+ * the reservation slots from the free_inc bucket list. When the
+ * free_inc bucket list is exhausted, the bch_allocator_thread
+ * will call invalidate_buckets() until free_inc is refilled.
+ * Then bch_allocator_thread calls bch_prio_write() once. and
+ * bch_prio_write() will call bch_journal_meta() and waits for
+ * the journal write to complete.
+ *
+ * 4. During journal_write, journal_write_unlocked() is be called.
+ * If journal full occurs, journal_reclaim() and btree_flush_write()
+ * will be called sequentially, then retry journal_write.
+ *
+ * 5. When 2 and 4 occur together, IO will hung up and cannot recover.
+ *
+ * Therefore, reserve more RESERVE_BTREE type buckets.
*/
- btree_buckets = ca->sb.njournal_buckets ?: 8;
+ btree_buckets = clamp_t(size_t, ca->sb.nbuckets >> 7,
+ 32, SB_JOURNAL_BUCKETS);
free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
if (!free) {
ret = -EPERM;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index f0b5a6931161..ec84ba5e93e5 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -41,16 +41,6 @@
#define DM_BUFIO_LOW_WATERMARK_RATIO 16
/*
- * Check buffer ages in this interval (seconds)
- */
-#define DM_BUFIO_WORK_TIMER_SECS 30
-
-/*
- * Free buffers when they are older than this (seconds)
- */
-#define DM_BUFIO_DEFAULT_AGE_SECS 300
-
-/*
* The nr of bytes of cached data to keep around.
*/
#define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
@@ -1057,10 +1047,8 @@ static unsigned long dm_bufio_cache_size_latch;
static DEFINE_SPINLOCK(global_spinlock);
-/*
- * Buffers are freed after this timeout
- */
-static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
+static unsigned int dm_bufio_max_age; /* No longer does anything */
+
static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
static unsigned long dm_bufio_peak_allocated;
@@ -1088,7 +1076,6 @@ static LIST_HEAD(dm_bufio_all_clients);
static DEFINE_MUTEX(dm_bufio_clients_lock);
static struct workqueue_struct *dm_bufio_wq;
-static struct delayed_work dm_bufio_cleanup_old_work;
static struct work_struct dm_bufio_replacement_work;
@@ -1364,7 +1351,7 @@ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
ptr = (char *)b->data + offset;
len = n_sectors << SECTOR_SHIFT;
- __bio_add_page(bio, virt_to_page(ptr), len, offset_in_page(ptr));
+ bio_add_virt_nofail(bio, ptr, len);
submit_bio(bio);
}
@@ -2680,130 +2667,6 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
/*--------------------------------------------------------------*/
-static unsigned int get_max_age_hz(void)
-{
- unsigned int max_age = READ_ONCE(dm_bufio_max_age);
-
- if (max_age > UINT_MAX / HZ)
- max_age = UINT_MAX / HZ;
-
- return max_age * HZ;
-}
-
-static bool older_than(struct dm_buffer *b, unsigned long age_hz)
-{
- return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz);
-}
-
-struct evict_params {
- gfp_t gfp;
- unsigned long age_hz;
-
- /*
- * This gets updated with the largest last_accessed (ie. most
- * recently used) of the evicted buffers. It will not be reinitialised
- * by __evict_many(), so you can use it across multiple invocations.
- */
- unsigned long last_accessed;
-};
-
-/*
- * We may not be able to evict this buffer if IO pending or the client
- * is still using it.
- *
- * And if GFP_NOFS is used, we must not do any I/O because we hold
- * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
- * rerouted to different bufio client.
- */
-static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
-{
- struct evict_params *params = context;
-
- if (!(params->gfp & __GFP_FS) ||
- (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
- if (test_bit_acquire(B_READING, &b->state) ||
- test_bit(B_WRITING, &b->state) ||
- test_bit(B_DIRTY, &b->state))
- return ER_DONT_EVICT;
- }
-
- return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP;
-}
-
-static unsigned long __evict_many(struct dm_bufio_client *c,
- struct evict_params *params,
- int list_mode, unsigned long max_count)
-{
- unsigned long count;
- unsigned long last_accessed;
- struct dm_buffer *b;
-
- for (count = 0; count < max_count; count++) {
- b = cache_evict(&c->cache, list_mode, select_for_evict, params);
- if (!b)
- break;
-
- last_accessed = READ_ONCE(b->last_accessed);
- if (time_after_eq(params->last_accessed, last_accessed))
- params->last_accessed = last_accessed;
-
- __make_buffer_clean(b);
- __free_buffer_wake(b);
-
- cond_resched();
- }
-
- return count;
-}
-
-static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
-{
- struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0};
- unsigned long retain = get_retain_buffers(c);
- unsigned long count;
- LIST_HEAD(write_list);
-
- dm_bufio_lock(c);
-
- __check_watermark(c, &write_list);
- if (unlikely(!list_empty(&write_list))) {
- dm_bufio_unlock(c);
- __flush_write_list(&write_list);
- dm_bufio_lock(c);
- }
-
- count = cache_total(&c->cache);
- if (count > retain)
- __evict_many(c, &params, LIST_CLEAN, count - retain);
-
- dm_bufio_unlock(c);
-}
-
-static void cleanup_old_buffers(void)
-{
- unsigned long max_age_hz = get_max_age_hz();
- struct dm_bufio_client *c;
-
- mutex_lock(&dm_bufio_clients_lock);
-
- __cache_size_refresh();
-
- list_for_each_entry(c, &dm_bufio_all_clients, client_list)
- evict_old_buffers(c, max_age_hz);
-
- mutex_unlock(&dm_bufio_clients_lock);
-}
-
-static void work_fn(struct work_struct *w)
-{
- cleanup_old_buffers();
-
- queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
- DM_BUFIO_WORK_TIMER_SECS * HZ);
-}
-
-/*--------------------------------------------------------------*/
-
/*
* Global cleanup tries to evict the oldest buffers from across _all_
* the clients. It does this by repeatedly evicting a few buffers from
@@ -2841,27 +2704,51 @@ static void __insert_client(struct dm_bufio_client *new_client)
list_add_tail(&new_client->client_list, h);
}
+static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
+{
+ /* In no-sleep mode, we cannot wait on IO. */
+ if (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep) {
+ if (test_bit_acquire(B_READING, &b->state) ||
+ test_bit(B_WRITING, &b->state) ||
+ test_bit(B_DIRTY, &b->state))
+ return ER_DONT_EVICT;
+ }
+ return ER_EVICT;
+}
+
static unsigned long __evict_a_few(unsigned long nr_buffers)
{
- unsigned long count;
struct dm_bufio_client *c;
- struct evict_params params = {
- .gfp = GFP_KERNEL,
- .age_hz = 0,
- /* set to jiffies in case there are no buffers in this client */
- .last_accessed = jiffies
- };
+ unsigned long oldest_buffer = jiffies;
+ unsigned long last_accessed;
+ unsigned long count;
+ struct dm_buffer *b;
c = __pop_client();
if (!c)
return 0;
dm_bufio_lock(c);
- count = __evict_many(c, &params, LIST_CLEAN, nr_buffers);
+
+ for (count = 0; count < nr_buffers; count++) {
+ b = cache_evict(&c->cache, LIST_CLEAN, select_for_evict, NULL);
+ if (!b)
+ break;
+
+ last_accessed = READ_ONCE(b->last_accessed);
+ if (time_after_eq(oldest_buffer, last_accessed))
+ oldest_buffer = last_accessed;
+
+ __make_buffer_clean(b);
+ __free_buffer_wake(b);
+
+ cond_resched();
+ }
+
dm_bufio_unlock(c);
if (count)
- c->oldest_buffer = params.last_accessed;
+ c->oldest_buffer = oldest_buffer;
__insert_client(c);
return count;
@@ -2944,10 +2831,7 @@ static int __init dm_bufio_init(void)
if (!dm_bufio_wq)
return -ENOMEM;
- INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
- queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
- DM_BUFIO_WORK_TIMER_SECS * HZ);
return 0;
}
@@ -2959,7 +2843,6 @@ static void __exit dm_bufio_exit(void)
{
int bug = 0;
- cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
destroy_workqueue(dm_bufio_wq);
if (dm_bufio_client_count) {
@@ -2996,7 +2879,7 @@ module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
-MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
+MODULE_PARM_DESC(max_age_seconds, "No longer does anything");
module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 3637761f3585..c889332e533b 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -141,6 +141,7 @@ struct mapped_device {
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int nr_zones;
void *zone_revalidate_map;
+ struct task_struct *revalidate_map_task;
#endif
#ifdef CONFIG_IMA
@@ -162,9 +163,6 @@ struct mapped_device {
#define DMF_POST_SUSPENDING 8
#define DMF_EMULATE_ZONE_APPEND 9
-void disable_discard(struct mapped_device *md);
-void disable_write_zeroes(struct mapped_device *md);
-
static inline sector_t dm_get_size(struct mapped_device *md)
{
return get_capacity(md->disk);
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index d4cf0ac2a7aa..4bb6553278c7 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -14,11 +14,14 @@
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/kthread.h>
+#include <linux/delay.h>
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "delay"
+#define SLEEP_SHIFT 3
+
struct delay_class {
struct dm_dev *dev;
sector_t start;
@@ -34,6 +37,7 @@ struct delay_c {
struct work_struct flush_expired_bios;
struct list_head delayed_bios;
struct task_struct *worker;
+ unsigned int worker_sleep_us;
bool may_delay;
struct delay_class read;
@@ -52,7 +56,7 @@ struct dm_delay_info {
static void handle_delayed_timer(struct timer_list *t)
{
- struct delay_c *dc = from_timer(dc, t, delay_timer);
+ struct delay_c *dc = timer_container_of(dc, t, delay_timer);
queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
}
@@ -136,6 +140,7 @@ static int flush_worker_fn(void *data)
schedule();
} else {
spin_unlock(&dc->delayed_bios_lock);
+ fsleep(dc->worker_sleep_us);
cond_resched();
}
}
@@ -212,7 +217,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct delay_c *dc;
int ret;
- unsigned int max_delay;
+ unsigned int max_delay, min_delay;
if (argc != 3 && argc != 6 && argc != 9) {
ti->error = "Requires exactly 3, 6 or 9 arguments";
@@ -235,7 +240,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ret = delay_class_ctr(ti, &dc->read, argv);
if (ret)
goto bad;
- max_delay = dc->read.delay;
+ min_delay = max_delay = dc->read.delay;
if (argc == 3) {
ret = delay_class_ctr(ti, &dc->write, argv);
@@ -251,6 +256,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (ret)
goto bad;
max_delay = max(max_delay, dc->write.delay);
+ min_delay = min_not_zero(min_delay, dc->write.delay);
if (argc == 6) {
ret = delay_class_ctr(ti, &dc->flush, argv + 3);
@@ -263,9 +269,14 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (ret)
goto bad;
max_delay = max(max_delay, dc->flush.delay);
+ min_delay = min_not_zero(min_delay, dc->flush.delay);
out:
if (max_delay < 50) {
+ if (min_delay >> SLEEP_SHIFT)
+ dc->worker_sleep_us = 1000;
+ else
+ dc->worker_sleep_us = (min_delay * 1000) >> SLEEP_SHIFT;
/*
* In case of small requested delays, use kthread instead of
* timers and workqueue to achieve better latency.
@@ -438,7 +449,7 @@ out:
static struct target_type delay_target = {
.name = "delay",
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
.module = THIS_MODULE,
.ctr = delay_ctr,
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 1a33820c9f46..e75310232bbf 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -534,7 +534,9 @@ static void dust_status(struct dm_target *ti, status_type_t type,
}
}
-static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct dust_device *dd = ti->private;
struct dm_dev *dev = dd->dev;
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index b19b0142a690..6abb31ca9662 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -415,7 +415,8 @@ static void ebs_status(struct dm_target *ti, status_type_t type,
}
}
-static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg, bool *forward)
{
struct ebs_c *ec = ti->private;
struct dm_dev *dev = ec->dev;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index b690905ab89f..c711db6f8f5c 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -47,14 +47,15 @@ enum feature_flag_bits {
};
struct per_bio_data {
- bool bio_submitted;
+ bool bio_can_corrupt;
+ struct bvec_iter saved_iter;
};
static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
struct dm_target *ti)
{
- int r;
- unsigned int argc;
+ int r = 0;
+ unsigned int argc = 0;
const char *arg_name;
static const struct dm_arg _args[] = {
@@ -65,14 +66,13 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
{0, PROBABILITY_BASE, "Invalid random corrupt argument"},
};
- /* No feature arguments supplied. */
- if (!as->argc)
- return 0;
-
- r = dm_read_arg_group(_args, as, &argc, &ti->error);
- if (r)
+ if (as->argc && (r = dm_read_arg_group(_args, as, &argc, &ti->error)))
return r;
+ /* No feature arguments supplied. */
+ if (!argc)
+ goto error_all_io;
+
while (argc) {
arg_name = dm_shift_arg(as);
argc--;
@@ -128,8 +128,11 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
* corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
*/
if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
- if (!argc) {
- ti->error = "Feature corrupt_bio_byte requires parameters";
+ if (fc->corrupt_bio_byte) {
+ ti->error = "Feature corrupt_bio_byte duplicated";
+ return -EINVAL;
+ } else if (argc < 4) {
+ ti->error = "Feature corrupt_bio_byte requires 4 parameters";
return -EINVAL;
}
@@ -176,7 +179,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
}
if (!strcasecmp(arg_name, "random_read_corrupt")) {
- if (!argc) {
+ if (fc->random_read_corrupt) {
+ ti->error = "Feature random_read_corrupt duplicated";
+ return -EINVAL;
+ } else if (!argc) {
ti->error = "Feature random_read_corrupt requires a parameter";
return -EINVAL;
}
@@ -189,7 +195,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
}
if (!strcasecmp(arg_name, "random_write_corrupt")) {
- if (!argc) {
+ if (fc->random_write_corrupt) {
+ ti->error = "Feature random_write_corrupt duplicated";
+ return -EINVAL;
+ } else if (!argc) {
ti->error = "Feature random_write_corrupt requires a parameter";
return -EINVAL;
}
@@ -205,18 +214,25 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
return -EINVAL;
}
- if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
- ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
+ if (test_bit(DROP_WRITES, &fc->flags) &&
+ (fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) {
+ ti->error = "drop_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set";
return -EINVAL;
- } else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
- ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
+ } else if (test_bit(ERROR_WRITES, &fc->flags) &&
+ (fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) {
+ ti->error = "error_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set";
+ return -EINVAL;
+ } else if (test_bit(ERROR_READS, &fc->flags) &&
+ (fc->corrupt_bio_rw == READ || fc->random_read_corrupt)) {
+ ti->error = "error_reads is incompatible with random_read_corrupt or corrupt_bio_byte with the READ flag set";
return -EINVAL;
}
if (!fc->corrupt_bio_byte && !test_bit(ERROR_READS, &fc->flags) &&
!test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags) &&
!fc->random_read_corrupt && !fc->random_write_corrupt) {
+error_all_io:
set_bit(ERROR_WRITES, &fc->flags);
set_bit(ERROR_READS, &fc->flags);
}
@@ -278,7 +294,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto bad;
- r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
+ r = dm_read_arg(_args + 1, &as, &fc->down_interval, &ti->error);
if (r)
goto bad;
@@ -339,7 +355,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
}
static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
- unsigned char corrupt_bio_value)
+ unsigned char corrupt_bio_value,
+ struct bvec_iter start)
{
struct bvec_iter iter;
struct bio_vec bvec;
@@ -348,7 +365,7 @@ static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
* Overwrite the Nth byte of the bio's data, on whichever page
* it falls.
*/
- bio_for_each_segment(bvec, bio, iter) {
+ __bio_for_each_segment(bvec, bio, iter, start) {
if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
unsigned char *segment = bvec_kmap_local(&bvec);
segment[corrupt_bio_byte] = corrupt_bio_value;
@@ -357,36 +374,31 @@ static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte,
"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
bio, corrupt_bio_value, corrupt_bio_byte,
(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
- (unsigned long long)bio->bi_iter.bi_sector,
- bio->bi_iter.bi_size);
+ (unsigned long long)start.bi_sector,
+ start.bi_size);
break;
}
corrupt_bio_byte -= bio_iter_len(bio, iter);
}
}
-static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
+static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc,
+ struct bvec_iter start)
{
unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1;
- if (!bio_has_data(bio))
- return;
-
- corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value);
+ corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value, start);
}
-static void corrupt_bio_random(struct bio *bio)
+static void corrupt_bio_random(struct bio *bio, struct bvec_iter start)
{
unsigned int corrupt_byte;
unsigned char corrupt_value;
- if (!bio_has_data(bio))
- return;
-
- corrupt_byte = get_random_u32() % bio->bi_iter.bi_size;
+ corrupt_byte = get_random_u32() % start.bi_size;
corrupt_value = get_random_u8();
- corrupt_bio_common(bio, corrupt_byte, corrupt_value);
+ corrupt_bio_common(bio, corrupt_byte, corrupt_value, start);
}
static void clone_free(struct bio *clone)
@@ -481,7 +493,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
unsigned int elapsed;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
- pb->bio_submitted = false;
+ pb->bio_can_corrupt = false;
if (op_is_zone_mgmt(bio_op(bio)))
goto map_bio;
@@ -490,14 +502,15 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
elapsed = (jiffies - fc->start_time) / HZ;
if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
bool corrupt_fixed, corrupt_random;
- /*
- * Flag this bio as submitted while down.
- */
- pb->bio_submitted = true;
+
+ if (bio_has_data(bio)) {
+ pb->bio_can_corrupt = true;
+ pb->saved_iter = bio->bi_iter;
+ }
/*
- * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
- * Otherwise, flakey_end_io() will decide if the reads should be modified.
+ * If ERROR_READS isn't set flakey_end_io() will decide if the
+ * reads should be modified.
*/
if (bio_data_dir(bio) == READ) {
if (test_bit(ERROR_READS, &fc->flags))
@@ -516,6 +529,8 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
+ if (!pb->bio_can_corrupt)
+ goto map_bio;
/*
* Corrupt matching writes.
*/
@@ -535,9 +550,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
struct bio *clone = clone_bio(ti, fc, bio);
if (clone) {
if (corrupt_fixed)
- corrupt_bio_data(clone, fc);
+ corrupt_bio_data(clone, fc,
+ clone->bi_iter);
if (corrupt_random)
- corrupt_bio_random(clone);
+ corrupt_bio_random(clone,
+ clone->bi_iter);
submit_bio(clone);
return DM_MAPIO_SUBMITTED;
}
@@ -559,28 +576,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
if (op_is_zone_mgmt(bio_op(bio)))
return DM_ENDIO_DONE;
- if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
+ if (!*error && pb->bio_can_corrupt && (bio_data_dir(bio) == READ)) {
if (fc->corrupt_bio_byte) {
if ((fc->corrupt_bio_rw == READ) &&
all_corrupt_bio_flags_match(bio, fc)) {
/*
* Corrupt successful matching READs while in down state.
*/
- corrupt_bio_data(bio, fc);
+ corrupt_bio_data(bio, fc, pb->saved_iter);
}
}
if (fc->random_read_corrupt) {
u64 rnd = get_random_u64();
u32 rem = do_div(rnd, PROBABILITY_BASE);
if (rem < fc->random_read_corrupt)
- corrupt_bio_random(bio);
- }
- if (test_bit(ERROR_READS, &fc->flags)) {
- /*
- * Error read during the down_interval if drop_writes
- * and error_writes were not configured.
- */
- *error = BLK_STS_IOERR;
+ corrupt_bio_random(bio, pb->saved_iter);
}
}
@@ -638,7 +648,9 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
}
}
-static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct flakey_c *fc = ti->private;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index cc3d3897ef42..4395657fa583 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1540,7 +1540,8 @@ static void sleep_on_endio_wait(struct dm_integrity_c *ic)
static void autocommit_fn(struct timer_list *t)
{
- struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
+ struct dm_integrity_c *ic = timer_container_of(ic, t,
+ autocommit_timer);
if (likely(!dm_integrity_failed(ic)))
queue_work(ic->commit_wq, &ic->commit_work);
@@ -2557,14 +2558,8 @@ static void dm_integrity_inline_recheck(struct work_struct *w)
char *mem;
outgoing_bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recheck_bios);
-
- r = bio_add_page(outgoing_bio, virt_to_page(outgoing_data), ic->sectors_per_block << SECTOR_SHIFT, 0);
- if (unlikely(r != (ic->sectors_per_block << SECTOR_SHIFT))) {
- bio_put(outgoing_bio);
- bio->bi_status = BLK_STS_RESOURCE;
- bio_endio(bio);
- return;
- }
+ bio_add_virt_nofail(outgoing_bio, outgoing_data,
+ ic->sectors_per_block << SECTOR_SHIFT);
bip = bio_integrity_alloc(outgoing_bio, GFP_NOIO, 1);
if (IS_ERR(bip)) {
@@ -3211,7 +3206,8 @@ next_chunk:
bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_READ, GFP_NOIO, &ic->recalc_bios);
bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector;
- __bio_add_page(bio, virt_to_page(recalc_buffer), range.n_sectors << SECTOR_SHIFT, offset_in_page(recalc_buffer));
+ bio_add_virt_nofail(bio, recalc_buffer,
+ range.n_sectors << SECTOR_SHIFT);
r = submit_bio_wait(bio);
bio_put(bio);
if (unlikely(r)) {
@@ -3228,7 +3224,8 @@ next_chunk:
bio = bio_alloc_bioset(ic->dev->bdev, 1, REQ_OP_WRITE, GFP_NOIO, &ic->recalc_bios);
bio->bi_iter.bi_sector = ic->start + SB_SECTORS + range.logical_sector;
- __bio_add_page(bio, virt_to_page(recalc_buffer), range.n_sectors << SECTOR_SHIFT, offset_in_page(recalc_buffer));
+ bio_add_virt_nofail(bio, recalc_buffer,
+ range.n_sectors << SECTOR_SHIFT);
bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
if (unlikely(IS_ERR(bip))) {
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index d42eac944eb5..4165fef4c170 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1885,6 +1885,7 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
{DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry},
{DM_DEV_ARM_POLL_CMD, IOCTL_FLAGS_NO_PARAMS, dev_arm_poll},
{DM_GET_TARGET_VERSION_CMD, 0, get_target_version},
+ {DM_MPATH_PROBE_PATHS_CMD, 0, NULL}, /* block device ioctl */
};
if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 66318aba4bdb..15538ec58f8e 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -119,7 +119,9 @@ static void linear_status(struct dm_target *ti, status_type_t type,
}
}
-static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct linear_c *lc = ti->private;
struct dm_dev *dev = lc->dev;
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 8d7df8303d0a..d484e8e1d48a 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -818,7 +818,9 @@ static void log_writes_status(struct dm_target *ti, status_type_t type,
}
static int log_writes_prepare_ioctl(struct dm_target *ti,
- struct block_device **bdev)
+ struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct log_writes_c *lc = ti->private;
struct dm_dev *dev = lc->dev;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6c98f4ae5ea9..aaf4a0a4b0eb 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -79,6 +79,7 @@ struct multipath {
struct pgpath *current_pgpath;
struct priority_group *current_pg;
struct priority_group *next_pg; /* Switch to this PG if set */
+ struct priority_group *last_probed_pg;
atomic_t nr_valid_paths; /* Total number of usable paths */
unsigned int nr_priority_groups;
@@ -87,6 +88,7 @@ struct multipath {
const char *hw_handler_name;
char *hw_handler_params;
wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
+ wait_queue_head_t probe_wait; /* Wait for probing paths */
unsigned int pg_init_retries; /* Number of times to retry pg_init */
unsigned int pg_init_delay_msecs; /* Number of msecs before pg_init retry */
atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
@@ -100,6 +102,7 @@ struct multipath {
struct bio_list queued_bios;
struct timer_list nopath_timer; /* Timeout for queue_if_no_path */
+ bool is_suspending;
};
/*
@@ -132,6 +135,8 @@ static void queue_if_no_path_timeout_work(struct timer_list *t);
#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
+#define MPATHF_DELAY_PG_SWITCH 7 /* Delay switching pg if it still has paths */
+#define MPATHF_NEED_PG_SWITCH 8 /* Need to switch pgs after the delay has ended */
static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
{
@@ -254,6 +259,7 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
atomic_set(&m->pg_init_count, 0);
m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
init_waitqueue_head(&m->pg_init_wait);
+ init_waitqueue_head(&m->probe_wait);
return 0;
}
@@ -413,13 +419,21 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
goto failed;
}
+ /* Don't change PG until it has no remaining paths */
+ pg = READ_ONCE(m->current_pg);
+ if (pg) {
+ pgpath = choose_path_in_pg(m, pg, nr_bytes);
+ if (!IS_ERR_OR_NULL(pgpath))
+ return pgpath;
+ }
+
/* Were we instructed to switch PG? */
if (READ_ONCE(m->next_pg)) {
spin_lock_irqsave(&m->lock, flags);
pg = m->next_pg;
if (!pg) {
spin_unlock_irqrestore(&m->lock, flags);
- goto check_current_pg;
+ goto check_all_pgs;
}
m->next_pg = NULL;
spin_unlock_irqrestore(&m->lock, flags);
@@ -427,16 +441,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
if (!IS_ERR_OR_NULL(pgpath))
return pgpath;
}
-
- /* Don't change PG until it has no remaining paths */
-check_current_pg:
- pg = READ_ONCE(m->current_pg);
- if (pg) {
- pgpath = choose_path_in_pg(m, pg, nr_bytes);
- if (!IS_ERR_OR_NULL(pgpath))
- return pgpath;
- }
-
+check_all_pgs:
/*
* Loop through priority groups until we find a valid path.
* First time we skip PGs marked 'bypassed'.
@@ -612,7 +617,6 @@ static void multipath_queue_bio(struct multipath *m, struct bio *bio)
static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
{
struct pgpath *pgpath;
- unsigned long flags;
/* Do we need to select a new pgpath? */
pgpath = READ_ONCE(m->current_pgpath);
@@ -620,12 +624,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
if (!pgpath) {
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
__multipath_queue_bio(m, bio);
pgpath = ERR_PTR(-EAGAIN);
}
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
} else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
@@ -688,7 +692,6 @@ static void process_queued_io_list(struct multipath *m)
static void process_queued_bios(struct work_struct *work)
{
int r;
- unsigned long flags;
struct bio *bio;
struct bio_list bios;
struct blk_plug plug;
@@ -697,16 +700,16 @@ static void process_queued_bios(struct work_struct *work)
bio_list_init(&bios);
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (bio_list_empty(&m->queued_bios)) {
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
return;
}
bio_list_merge_init(&bios, &m->queued_bios);
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
blk_start_plug(&plug);
while ((bio = bio_list_pop(&bios))) {
@@ -787,7 +790,7 @@ static int queue_if_no_path(struct multipath *m, bool f_queue_if_no_path,
*/
static void queue_if_no_path_timeout_work(struct timer_list *t)
{
- struct multipath *m = from_timer(m, t, nopath_timer);
+ struct multipath *m = timer_container_of(m, t, nopath_timer);
DMWARN("queue_if_no_path timeout on %s, failing queued IO",
dm_table_device_name(m->ti->table));
@@ -1190,7 +1193,6 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
struct dm_arg_set as;
unsigned int pg_count = 0;
unsigned int next_pg_num;
- unsigned long flags;
as.argc = argc;
as.argv = argv;
@@ -1255,9 +1257,9 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
enable_nopath_timeout(m);
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
@@ -1292,23 +1294,21 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
static void flush_multipath_work(struct multipath *m)
{
if (m->hw_handler_name) {
- unsigned long flags;
-
if (!atomic_read(&m->pg_init_in_progress))
goto skip;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (atomic_read(&m->pg_init_in_progress) &&
!test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
flush_workqueue(kmpath_handlerd);
multipath_wait_for_pg_init_completion(m);
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
}
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
}
skip:
if (m->queue_mode == DM_TYPE_BIO_BASED)
@@ -1370,11 +1370,10 @@ out:
static int reinstate_path(struct pgpath *pgpath)
{
int r = 0, run_queue = 0;
- unsigned long flags;
struct multipath *m = pgpath->pg->m;
unsigned int nr_valid_paths;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (pgpath->is_active)
goto out;
@@ -1404,7 +1403,7 @@ static int reinstate_path(struct pgpath *pgpath)
schedule_work(&m->trigger_event);
out:
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
if (run_queue) {
dm_table_run_md_queue_async(m->ti->table);
process_queued_io_list(m);
@@ -1439,15 +1438,19 @@ static int action_dev(struct multipath *m, dev_t dev, action_fn action)
* Temporarily try to avoid having to use the specified PG
*/
static void bypass_pg(struct multipath *m, struct priority_group *pg,
- bool bypassed)
+ bool bypassed, bool can_be_delayed)
{
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
pg->bypassed = bypassed;
- m->current_pgpath = NULL;
- m->current_pg = NULL;
+ if (can_be_delayed && test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags))
+ set_bit(MPATHF_NEED_PG_SWITCH, &m->flags);
+ else {
+ m->current_pgpath = NULL;
+ m->current_pg = NULL;
+ }
spin_unlock_irqrestore(&m->lock, flags);
@@ -1461,7 +1464,6 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
{
struct priority_group *pg;
unsigned int pgnum;
- unsigned long flags;
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
@@ -1470,17 +1472,21 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
return -EINVAL;
}
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
list_for_each_entry(pg, &m->priority_groups, list) {
pg->bypassed = false;
if (--pgnum)
continue;
- m->current_pgpath = NULL;
- m->current_pg = NULL;
+ if (test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags))
+ set_bit(MPATHF_NEED_PG_SWITCH, &m->flags);
+ else {
+ m->current_pgpath = NULL;
+ m->current_pg = NULL;
+ }
m->next_pg = pg;
}
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
schedule_work(&m->trigger_event);
return 0;
@@ -1507,7 +1513,7 @@ static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
break;
}
- bypass_pg(m, pg, bypassed);
+ bypass_pg(m, pg, bypassed, true);
return 0;
}
@@ -1561,7 +1567,7 @@ static void pg_init_done(void *data, int errors)
* Probably doing something like FW upgrade on the
* controller so try the other pg.
*/
- bypass_pg(m, pg, true);
+ bypass_pg(m, pg, true, false);
break;
case SCSI_DH_RETRY:
/* Wait before retrying. */
@@ -1742,6 +1748,9 @@ static void multipath_presuspend(struct dm_target *ti)
{
struct multipath *m = ti->private;
+ spin_lock_irq(&m->lock);
+ m->is_suspending = true;
+ spin_unlock_irq(&m->lock);
/* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti))
queue_if_no_path(m, false, true, __func__);
@@ -1762,9 +1771,9 @@ static void multipath_postsuspend(struct dm_target *ti)
static void multipath_resume(struct dm_target *ti)
{
struct multipath *m = ti->private;
- unsigned long flags;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
+ m->is_suspending = false;
if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
@@ -1775,7 +1784,7 @@ static void multipath_resume(struct dm_target *ti)
test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
}
/*
@@ -1798,14 +1807,13 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
unsigned int status_flags, char *result, unsigned int maxlen)
{
int sz = 0, pg_counter, pgpath_counter;
- unsigned long flags;
struct multipath *m = ti->private;
struct priority_group *pg;
struct pgpath *p;
unsigned int pg_num;
char state;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
/* Features */
if (type == STATUSTYPE_INFO)
@@ -1845,10 +1853,10 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
DMEMIT("%u ", m->nr_priority_groups);
- if (m->next_pg)
- pg_num = m->next_pg->pg_num;
- else if (m->current_pg)
+ if (m->current_pg)
pg_num = m->current_pg->pg_num;
+ else if (m->next_pg)
+ pg_num = m->next_pg->pg_num;
else
pg_num = (m->nr_priority_groups ? 1 : 0);
@@ -1951,7 +1959,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
break;
}
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
}
static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv,
@@ -1961,7 +1969,6 @@ static int multipath_message(struct dm_target *ti, unsigned int argc, char **arg
dev_t dev;
struct multipath *m = ti->private;
action_fn action;
- unsigned long flags;
mutex_lock(&m->work_mutex);
@@ -1973,9 +1980,9 @@ static int multipath_message(struct dm_target *ti, unsigned int argc, char **arg
if (argc == 1) {
if (!strcasecmp(argv[0], "queue_if_no_path")) {
r = queue_if_no_path(m, true, false, __func__);
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
enable_nopath_timeout(m);
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
goto out;
} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
r = queue_if_no_path(m, false, false, __func__);
@@ -2021,14 +2028,132 @@ out:
return r;
}
+/*
+ * Perform a minimal read from the given path to find out whether the
+ * path still works. If a path error occurs, fail it.
+ */
+static int probe_path(struct pgpath *pgpath)
+{
+ struct block_device *bdev = pgpath->path.dev->bdev;
+ unsigned int read_size = bdev_logical_block_size(bdev);
+ struct page *page;
+ struct bio *bio;
+ blk_status_t status;
+ int r = 0;
+
+ if (WARN_ON_ONCE(read_size > PAGE_SIZE))
+ return -EINVAL;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ /* Perform a minimal read: Sector 0, length read_size */
+ bio = bio_alloc(bdev, 1, REQ_OP_READ, GFP_KERNEL);
+ if (!bio) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ bio->bi_iter.bi_sector = 0;
+ __bio_add_page(bio, page, read_size, 0);
+ submit_bio_wait(bio);
+ status = bio->bi_status;
+ bio_put(bio);
+
+ if (status && blk_path_error(status))
+ fail_path(pgpath);
+
+out:
+ __free_page(page);
+ return r;
+}
+
+/*
+ * Probe all active paths in current_pg to find out whether they still work.
+ * Fail all paths that do not work.
+ *
+ * Return -ENOTCONN if no valid path is left (even outside of current_pg). We
+ * cannot probe paths in other pgs without switching current_pg, so if valid
+ * paths are only in different pgs, they may or may not work. Additionally
+ * we should not probe paths in a pathgroup that is in the process of
+ * Initializing. Userspace can submit a request and we'll switch and wait
+ * for the pathgroup to be initialized. If the request fails, it may need to
+ * probe again.
+ */
+static int probe_active_paths(struct multipath *m)
+{
+ struct pgpath *pgpath;
+ struct priority_group *pg = NULL;
+ int r = 0;
+
+ spin_lock_irq(&m->lock);
+ if (test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags)) {
+ wait_event_lock_irq(m->probe_wait,
+ !test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags),
+ m->lock);
+ /*
+ * if we waited because a probe was already in progress,
+ * and it probed the current active pathgroup, don't
+ * reprobe. Just return the number of valid paths
+ */
+ if (m->current_pg == m->last_probed_pg)
+ goto skip_probe;
+ }
+ if (!m->current_pg || m->is_suspending ||
+ test_bit(MPATHF_QUEUE_IO, &m->flags))
+ goto skip_probe;
+ set_bit(MPATHF_DELAY_PG_SWITCH, &m->flags);
+ pg = m->last_probed_pg = m->current_pg;
+ spin_unlock_irq(&m->lock);
+
+ list_for_each_entry(pgpath, &pg->pgpaths, list) {
+ if (pg != READ_ONCE(m->current_pg) ||
+ READ_ONCE(m->is_suspending))
+ goto out;
+ if (!pgpath->is_active)
+ continue;
+
+ r = probe_path(pgpath);
+ if (r < 0)
+ goto out;
+ }
+
+out:
+ spin_lock_irq(&m->lock);
+ clear_bit(MPATHF_DELAY_PG_SWITCH, &m->flags);
+ if (test_and_clear_bit(MPATHF_NEED_PG_SWITCH, &m->flags)) {
+ m->current_pgpath = NULL;
+ m->current_pg = NULL;
+ }
+skip_probe:
+ if (r == 0 && !atomic_read(&m->nr_valid_paths))
+ r = -ENOTCONN;
+ spin_unlock_irq(&m->lock);
+ if (pg)
+ wake_up(&m->probe_wait);
+ return r;
+}
+
static int multipath_prepare_ioctl(struct dm_target *ti,
- struct block_device **bdev)
+ struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct multipath *m = ti->private;
struct pgpath *pgpath;
- unsigned long flags;
int r;
+ if (_IOC_TYPE(cmd) == DM_IOCTL) {
+ *forward = false;
+ switch (cmd) {
+ case DM_MPATH_PROBE_PATHS:
+ return probe_active_paths(m);
+ default:
+ return -ENOTTY;
+ }
+ }
+
pgpath = READ_ONCE(m->current_pgpath);
if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
pgpath = choose_pgpath(m, 0);
@@ -2044,10 +2169,10 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
} else {
/* No path is available */
r = -EIO;
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
r = -ENOTCONN;
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
}
if (r == -ENOTCONN) {
@@ -2055,10 +2180,10 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
/* Path status changed, redo selection */
(void) choose_pgpath(m, 0);
}
- spin_lock_irqsave(&m->lock, flags);
+ spin_lock_irq(&m->lock);
if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
(void) __pg_init_all_paths(m);
- spin_unlock_irqrestore(&m->lock, flags);
+ spin_unlock_irq(&m->lock);
dm_table_run_md_queue_async(m->ti->table);
process_queued_io_list(m);
}
@@ -2180,7 +2305,7 @@ static int multipath_busy(struct dm_target *ti)
*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 14, 0},
+ .version = {1, 15, 0},
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
DM_TARGET_PASSES_INTEGRITY,
.module = THIS_MODULE,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 6adc55fd90d3..d296770478b2 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -14,6 +14,7 @@
#include "raid5.h"
#include "raid10.h"
#include "md-bitmap.h"
+#include "dm-core.h"
#include <linux/device-mapper.h>
@@ -1355,11 +1356,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
return -EINVAL;
}
- /*
- * In device-mapper, we specify things in sectors, but
- * MD records this value in kB
- */
- if (value < 0 || value / 2 > COUNTER_MAX) {
+ if (value < 0) {
rs->ti->error = "Max write-behind limit out of range";
return -EINVAL;
}
@@ -3308,6 +3305,7 @@ size_check:
/* Disable/enable discard support on raid set. */
configure_discard_support(rs);
+ rs->md.dm_gendisk = ti->table->md->disk;
mddev_unlock(&rs->md);
return 0;
@@ -3327,6 +3325,7 @@ static void raid_dtr(struct dm_target *ti)
mddev_lock_nointr(&rs->md);
md_stop(&rs->md);
+ rs->md.dm_gendisk = NULL;
mddev_unlock(&rs->md);
if (work_pending(&rs->md.event_work))
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9e615b4f1f5e..268f734ca9c3 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -103,7 +103,7 @@ static void wakeup_mirrord(void *context)
static void delayed_wake_fn(struct timer_list *t)
{
- struct mirror_set *ms = from_timer(ms, t, timer);
+ struct mirror_set *ms = timer_container_of(ms, t, timer);
clear_bit(0, &ms->timer_pending);
wakeup_mirrord(ms);
@@ -133,10 +133,9 @@ static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
spin_lock_irqsave(&ms->lock, flags);
should_wake = !(bl->head);
bio_list_add(bl, bio);
- spin_unlock_irqrestore(&ms->lock, flags);
-
if (should_wake)
wakeup_mirrord(ms);
+ spin_unlock_irqrestore(&ms->lock, flags);
}
static void dispatch_bios(void *context, struct bio_list *bio_list)
@@ -646,9 +645,9 @@ static void write_callback(unsigned long error, void *context)
if (!ms->failures.head)
should_wake = 1;
bio_list_add(&ms->failures, bio);
- spin_unlock_irqrestore(&ms->lock, flags);
if (should_wake)
wakeup_mirrord(ms);
+ spin_unlock_irqrestore(&ms->lock, flags);
}
static void do_write(struct mirror_set *ms, struct bio *bio)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index e23076f7ece2..a6ca92049c10 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -217,10 +217,10 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
if (unlikely(error == BLK_STS_TARGET)) {
if (req_op(clone) == REQ_OP_DISCARD &&
!clone->q->limits.max_discard_sectors)
- disable_discard(tio->md);
+ blk_queue_disable_discard(tio->md->queue);
else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
!clone->q->limits.max_write_zeroes_sectors)
- disable_write_zeroes(tio->md);
+ blk_queue_disable_write_zeroes(tio->md->queue);
}
switch (r) {
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index a1b7535c508a..a7dc04bd55e5 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -405,7 +405,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{
unsigned int i;
- char major_minor[16];
+ char major_minor[22];
struct stripe_c *sc = ti->private;
if (!*error)
@@ -417,8 +417,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
if (*error == BLK_STS_NOTSUPP)
return DM_ENDIO_DONE;
- memset(major_minor, 0, sizeof(major_minor));
- sprintf(major_minor, "%d:%d", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)));
+ format_dev_t(major_minor, bio_dev(bio));
/*
* Test to see which stripe drive triggered the event
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index dfd9fb52a6f3..bb1a70b5a215 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -517,7 +517,9 @@ static void switch_status(struct dm_target *ti, status_type_t type,
*
* Passthrough all ioctls to the path for sector 0
*/
-static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct switch_ctx *sctx = ti->private;
unsigned int path_nr;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 6b23e777e10e..24a857ff6d0b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -117,7 +117,6 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
n_targets = (struct dm_target *) (n_highs + num);
memset(n_highs, -1, sizeof(*n_highs) * num);
- kvfree(t->highs);
t->num_allocated = num;
t->highs = n_highs;
@@ -257,7 +256,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
if (bdev_is_zoned(bdev)) {
unsigned int zone_sectors = bdev_zone_sectors(bdev);
- if (start & (zone_sectors - 1)) {
+ if (!bdev_is_zone_aligned(bdev, start)) {
DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)start,
@@ -274,7 +273,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
* devices do not end up with a smaller zone in the middle of
* the sector range.
*/
- if (len & (zone_sectors - 1)) {
+ if (!bdev_is_zone_aligned(bdev, len)) {
DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)len,
@@ -431,6 +430,13 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
return 0;
}
+ mutex_lock(&q->limits_lock);
+ /*
+ * BLK_FEAT_ATOMIC_WRITES is not inherited from the bottom device in
+ * blk_stack_limits(), so do it manually.
+ */
+ limits->features |= (q->limits.features & BLK_FEAT_ATOMIC_WRITES);
+
if (blk_stack_limits(limits, &q->limits,
get_start_sect(bdev) + start) < 0)
DMWARN("%s: adding target device %pg caused an alignment inconsistency: "
@@ -448,6 +454,7 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
*/
if (!dm_target_has_integrity(ti->type))
queue_limits_stack_integrity_bdev(limits, bdev);
+ mutex_unlock(&q->limits_lock);
return 0;
}
@@ -1189,6 +1196,176 @@ put_live_table:
return 0;
}
+enum dm_wrappedkey_op {
+ DERIVE_SW_SECRET,
+ IMPORT_KEY,
+ GENERATE_KEY,
+ PREPARE_KEY,
+};
+
+struct dm_wrappedkey_op_args {
+ enum dm_wrappedkey_op op;
+ int err;
+ union {
+ struct {
+ const u8 *eph_key;
+ size_t eph_key_size;
+ u8 *sw_secret;
+ } derive_sw_secret;
+ struct {
+ const u8 *raw_key;
+ size_t raw_key_size;
+ u8 *lt_key;
+ } import_key;
+ struct {
+ u8 *lt_key;
+ } generate_key;
+ struct {
+ const u8 *lt_key;
+ size_t lt_key_size;
+ u8 *eph_key;
+ } prepare_key;
+ };
+};
+
+static int dm_wrappedkey_op_callback(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct dm_wrappedkey_op_args *args = data;
+ struct block_device *bdev = dev->bdev;
+ struct blk_crypto_profile *profile =
+ bdev_get_queue(bdev)->crypto_profile;
+ int err = -EOPNOTSUPP;
+
+ if (!args->err)
+ return 0;
+
+ switch (args->op) {
+ case DERIVE_SW_SECRET:
+ err = blk_crypto_derive_sw_secret(
+ bdev,
+ args->derive_sw_secret.eph_key,
+ args->derive_sw_secret.eph_key_size,
+ args->derive_sw_secret.sw_secret);
+ break;
+ case IMPORT_KEY:
+ err = blk_crypto_import_key(profile,
+ args->import_key.raw_key,
+ args->import_key.raw_key_size,
+ args->import_key.lt_key);
+ break;
+ case GENERATE_KEY:
+ err = blk_crypto_generate_key(profile,
+ args->generate_key.lt_key);
+ break;
+ case PREPARE_KEY:
+ err = blk_crypto_prepare_key(profile,
+ args->prepare_key.lt_key,
+ args->prepare_key.lt_key_size,
+ args->prepare_key.eph_key);
+ break;
+ }
+ args->err = err;
+
+ /* Try another device in case this fails. */
+ return 0;
+}
+
+static int dm_exec_wrappedkey_op(struct blk_crypto_profile *profile,
+ struct dm_wrappedkey_op_args *args)
+{
+ struct mapped_device *md =
+ container_of(profile, struct dm_crypto_profile, profile)->md;
+ struct dm_target *ti;
+ struct dm_table *t;
+ int srcu_idx;
+ int i;
+
+ args->err = -EOPNOTSUPP;
+
+ t = dm_get_live_table(md, &srcu_idx);
+ if (!t)
+ goto out;
+
+ /*
+ * blk-crypto currently has no support for multiple incompatible
+ * implementations of wrapped inline crypto keys on a single system.
+ * It was already checked earlier that support for wrapped keys was
+ * declared on all underlying devices. Thus, all the underlying devices
+ * should support all wrapped key operations and they should behave
+ * identically, i.e. work with the same keys. So, just executing the
+ * operation on the first device on which it works suffices for now.
+ */
+ for (i = 0; i < t->num_targets; i++) {
+ ti = dm_table_get_target(t, i);
+ if (!ti->type->iterate_devices)
+ continue;
+ ti->type->iterate_devices(ti, dm_wrappedkey_op_callback, args);
+ if (!args->err)
+ break;
+ }
+out:
+ dm_put_live_table(md, srcu_idx);
+ return args->err;
+}
+
+static int dm_derive_sw_secret(struct blk_crypto_profile *profile,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+ struct dm_wrappedkey_op_args args = {
+ .op = DERIVE_SW_SECRET,
+ .derive_sw_secret = {
+ .eph_key = eph_key,
+ .eph_key_size = eph_key_size,
+ .sw_secret = sw_secret,
+ },
+ };
+ return dm_exec_wrappedkey_op(profile, &args);
+}
+
+static int dm_import_key(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct dm_wrappedkey_op_args args = {
+ .op = IMPORT_KEY,
+ .import_key = {
+ .raw_key = raw_key,
+ .raw_key_size = raw_key_size,
+ .lt_key = lt_key,
+ },
+ };
+ return dm_exec_wrappedkey_op(profile, &args);
+}
+
+static int dm_generate_key(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct dm_wrappedkey_op_args args = {
+ .op = GENERATE_KEY,
+ .generate_key = {
+ .lt_key = lt_key,
+ },
+ };
+ return dm_exec_wrappedkey_op(profile, &args);
+}
+
+static int dm_prepare_key(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct dm_wrappedkey_op_args args = {
+ .op = PREPARE_KEY,
+ .prepare_key = {
+ .lt_key = lt_key,
+ .lt_key_size = lt_key_size,
+ .eph_key = eph_key,
+ },
+ };
+ return dm_exec_wrappedkey_op(profile, &args);
+}
+
static int
device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
@@ -1263,6 +1440,13 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
profile);
}
+ if (profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_HW_WRAPPED) {
+ profile->ll_ops.derive_sw_secret = dm_derive_sw_secret;
+ profile->ll_ops.import_key = dm_import_key;
+ profile->ll_ops.generate_key = dm_generate_key;
+ profile->ll_ops.prepare_key = dm_prepare_key;
+ }
+
if (t->md->queue &&
!blk_crypto_has_capabilities(profile,
t->md->queue->crypto_profile)) {
@@ -1490,6 +1674,18 @@ bool dm_table_has_no_data_devices(struct dm_table *t)
return true;
}
+bool dm_table_is_wildcard(struct dm_table *t)
+{
+ for (unsigned int i = 0; i < t->num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(t, i);
+
+ if (!dm_target_is_wildcard(ti->type))
+ return false;
+ }
+
+ return true;
+}
+
static int device_not_zoned(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1721,8 +1917,12 @@ static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
+ int b;
- return !q->limits.max_write_zeroes_sectors;
+ mutex_lock(&q->limits_lock);
+ b = !q->limits.max_write_zeroes_sectors;
+ mutex_unlock(&q->limits_lock);
+ return b;
}
static bool dm_table_supports_write_zeroes(struct dm_table *t)
@@ -1830,10 +2030,24 @@ static bool dm_table_supports_atomic_writes(struct dm_table *t)
return true;
}
+bool dm_table_supports_size_change(struct dm_table *t, sector_t old_size,
+ sector_t new_size)
+{
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && dm_has_zone_plugs(t->md) &&
+ old_size != new_size) {
+ DMWARN("%s: device has zone write plug resources. "
+ "Cannot change size",
+ dm_device_name(t->md));
+ return false;
+ }
+ return true;
+}
+
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
int r;
+ struct queue_limits old_limits;
if (!dm_table_supports_nowait(t))
limits->features &= ~BLK_FEAT_NOWAIT;
@@ -1860,28 +2074,30 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (dm_table_supports_flush(t))
limits->features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
- if (dm_table_supports_dax(t, device_not_dax_capable)) {
+ if (dm_table_supports_dax(t, device_not_dax_capable))
limits->features |= BLK_FEAT_DAX;
- if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
- set_dax_synchronous(t->md->dax_dev);
- } else
+ else
limits->features &= ~BLK_FEAT_DAX;
- if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
- dax_write_cache(t->md->dax_dev, true);
-
/* For a zoned table, setup the zone related queue attributes. */
- if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
- (limits->features & BLK_FEAT_ZONED)) {
- r = dm_set_zones_restrictions(t, q, limits);
- if (r)
- return r;
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ if (limits->features & BLK_FEAT_ZONED) {
+ r = dm_set_zones_restrictions(t, q, limits);
+ if (r)
+ return r;
+ } else if (dm_has_zone_plugs(t->md)) {
+ DMWARN("%s: device has zone write plug resources. "
+ "Cannot switch to non-zoned table.",
+ dm_device_name(t->md));
+ return -EINVAL;
+ }
}
if (dm_table_supports_atomic_writes(t))
limits->features |= BLK_FEAT_ATOMIC_WRITES;
- r = queue_limits_set(q, limits);
+ old_limits = queue_limits_start_update(q);
+ r = queue_limits_commit_update(q, limits);
if (r)
return r;
@@ -1892,10 +2108,21 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
(limits->features & BLK_FEAT_ZONED)) {
r = dm_revalidate_zones(t, q);
- if (r)
+ if (r) {
+ queue_limits_set(q, &old_limits);
return r;
+ }
}
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+ dm_finalize_zone_settings(t, limits);
+
+ if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
+ set_dax_synchronous(t->md->dax_dev);
+
+ if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
+ dax_write_cache(t->md->dax_dev, true);
+
dm_update_crypto_profile(q, t);
return 0;
}
diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c
index 3c58b941e067..4d983092a152 100644
--- a/drivers/md/dm-vdo/dedupe.c
+++ b/drivers/md/dm-vdo/dedupe.c
@@ -2337,7 +2337,7 @@ static void timeout_index_operations_callback(struct vdo_completion *completion)
static void timeout_index_operations(struct timer_list *t)
{
- struct hash_zone *zone = from_timer(zone, t, timer);
+ struct hash_zone *zone = timer_container_of(zone, t, timer);
if (change_timer_state(zone, DEDUPE_QUERY_TIMER_RUNNING,
DEDUPE_QUERY_TIMER_FIRED))
diff --git a/drivers/md/dm-vdo/indexer/volume.c b/drivers/md/dm-vdo/indexer/volume.c
index 655453bb276b..425b3a74f4db 100644
--- a/drivers/md/dm-vdo/indexer/volume.c
+++ b/drivers/md/dm-vdo/indexer/volume.c
@@ -754,10 +754,11 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
u32 physical_page, struct cached_page **page_ptr)
{
struct cached_page *page;
+ unsigned int zone_number = request->zone_number;
get_page_from_cache(&volume->page_cache, physical_page, &page);
if (page != NULL) {
- if (request->zone_number == 0) {
+ if (zone_number == 0) {
/* Only one zone is allowed to update the LRU. */
make_page_most_recent(&volume->page_cache, page);
}
@@ -767,7 +768,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
}
/* Prepare to enqueue a read for the page. */
- end_pending_search(&volume->page_cache, request->zone_number);
+ end_pending_search(&volume->page_cache, zone_number);
mutex_lock(&volume->read_threads_mutex);
/*
@@ -787,8 +788,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
* the order does not matter for correctness as it does below.
*/
mutex_unlock(&volume->read_threads_mutex);
- begin_pending_search(&volume->page_cache, physical_page,
- request->zone_number);
+ begin_pending_search(&volume->page_cache, physical_page, zone_number);
return UDS_QUEUED;
}
@@ -797,7 +797,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request *
* "search pending" state in careful order so no other thread can mess with the data before
* the caller gets to look at it.
*/
- begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
+ begin_pending_search(&volume->page_cache, physical_page, zone_number);
mutex_unlock(&volume->read_threads_mutex);
*page_ptr = page;
return UDS_SUCCESS;
@@ -849,6 +849,7 @@ static int search_cached_index_page(struct volume *volume, struct uds_request *r
{
int result;
struct cached_page *page = NULL;
+ unsigned int zone_number = request->zone_number;
u32 physical_page = map_to_physical_page(volume->geometry, chapter,
index_page_number);
@@ -858,18 +859,18 @@ static int search_cached_index_page(struct volume *volume, struct uds_request *r
* invalidation by the reader thread, before the reader thread has noticed that the
* invalidate_counter has been incremented.
*/
- begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
+ begin_pending_search(&volume->page_cache, physical_page, zone_number);
result = get_volume_page_protected(volume, request, physical_page, &page);
if (result != UDS_SUCCESS) {
- end_pending_search(&volume->page_cache, request->zone_number);
+ end_pending_search(&volume->page_cache, zone_number);
return result;
}
result = uds_search_chapter_index_page(&page->index_page, volume->geometry,
&request->record_name,
record_page_number);
- end_pending_search(&volume->page_cache, request->zone_number);
+ end_pending_search(&volume->page_cache, zone_number);
return result;
}
@@ -882,6 +883,7 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
{
struct cached_page *record_page;
struct index_geometry *geometry = volume->geometry;
+ unsigned int zone_number = request->zone_number;
int result;
u32 physical_page, page_number;
@@ -905,11 +907,11 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
* invalidation by the reader thread, before the reader thread has noticed that the
* invalidate_counter has been incremented.
*/
- begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
+ begin_pending_search(&volume->page_cache, physical_page, zone_number);
result = get_volume_page_protected(volume, request, physical_page, &record_page);
if (result != UDS_SUCCESS) {
- end_pending_search(&volume->page_cache, request->zone_number);
+ end_pending_search(&volume->page_cache, zone_number);
return result;
}
@@ -917,7 +919,7 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req
&request->record_name, geometry, &request->old_metadata))
*found = true;
- end_pending_search(&volume->page_cache, request->zone_number);
+ end_pending_search(&volume->page_cache, zone_number);
return UDS_SUCCESS;
}
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 0c41949db784..631a887b487c 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -593,6 +593,10 @@ int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
(*argc)--;
if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV)) {
+ if (v->fec->dev) {
+ ti->error = "FEC device already specified";
+ return -EINVAL;
+ }
r = dm_get_device(ti, arg_value, BLK_OPEN_READ, &v->fec->dev);
if (r) {
ti->error = "FEC device lookup failed";
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 3c427f18a04b..81186bded1ce 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -682,7 +682,8 @@ static void verity_bh_work(struct work_struct *w)
static inline bool verity_use_bh(unsigned int bytes, unsigned short ioprio)
{
return ioprio <= IOPRIO_CLASS_IDLE &&
- bytes <= READ_ONCE(dm_verity_use_bh_bytes[ioprio]);
+ bytes <= READ_ONCE(dm_verity_use_bh_bytes[ioprio]) &&
+ !need_resched();
}
static void verity_end_io(struct bio *bio)
@@ -993,7 +994,9 @@ static void verity_status(struct dm_target *ti, status_type_t type,
}
}
-static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
{
struct dm_verity *v = ti->private;
@@ -1120,6 +1123,9 @@ static int verity_alloc_most_once(struct dm_verity *v)
{
struct dm_target *ti = v->ti;
+ if (v->validated_blocks)
+ return 0;
+
/* the bitset can only handle INT_MAX blocks */
if (v->data_blocks > INT_MAX) {
ti->error = "device too large to use check_at_most_once";
@@ -1143,6 +1149,9 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
struct dm_verity_io *io;
u8 *zero_data;
+ if (v->zero_digest)
+ return 0;
+
v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL);
if (!v->zero_digest)
@@ -1577,7 +1586,7 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- /* Root hash signature is a optional parameter*/
+ /* Root hash signature is an optional parameter */
r = verity_verify_root_hash(root_hash_digest_to_validate,
strlen(root_hash_digest_to_validate),
verify_args.sig,
diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c
index a9e2c6c0a33c..d5261a0e4232 100644
--- a/drivers/md/dm-verity-verify-sig.c
+++ b/drivers/md/dm-verity-verify-sig.c
@@ -71,9 +71,14 @@ int verity_verify_sig_parse_opt_args(struct dm_arg_set *as,
const char *arg_name)
{
struct dm_target *ti = v->ti;
- int ret = 0;
+ int ret;
const char *sig_key = NULL;
+ if (v->signature_key_desc) {
+ ti->error = DM_VERITY_VERIFY_ERR("root_hash_sig_key_desc already specified");
+ return -EINVAL;
+ }
+
if (!*argc) {
ti->error = DM_VERITY_VERIFY_ERR("Signature key not specified");
return -EINVAL;
@@ -83,14 +88,18 @@ int verity_verify_sig_parse_opt_args(struct dm_arg_set *as,
(*argc)--;
ret = verity_verify_get_sig_from_key(sig_key, sig_opts);
- if (ret < 0)
+ if (ret < 0) {
ti->error = DM_VERITY_VERIFY_ERR("Invalid key specified");
+ return ret;
+ }
v->signature_key_desc = kstrdup(sig_key, GFP_KERNEL);
- if (!v->signature_key_desc)
+ if (!v->signature_key_desc) {
+ ti->error = DM_VERITY_VERIFY_ERR("Could not allocate memory for signature key");
return -ENOMEM;
+ }
- return ret;
+ return 0;
}
/*
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index d6a04a57472d..a428e1cacf07 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -706,7 +706,7 @@ static inline void writecache_verify_watermark(struct dm_writecache *wc)
static void writecache_max_age_timer(struct timer_list *t)
{
- struct dm_writecache *wc = from_timer(wc, t, max_age_timer);
+ struct dm_writecache *wc = timer_container_of(wc, t, max_age_timer);
if (!dm_suspended(wc->ti) && !writecache_has_error(wc)) {
queue_work(wc->writeback_wq, &wc->writeback_work);
@@ -866,7 +866,7 @@ static void writecache_flush_work(struct work_struct *work)
static void writecache_autocommit_timer(struct timer_list *t)
{
- struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
+ struct dm_writecache *wc = timer_container_of(wc, t, autocommit_timer);
if (!writecache_has_error(wc))
queue_work(wc->writeback_wq, &wc->flush_work);
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index 20edd3fabbab..3d31b82e0730 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -56,24 +56,31 @@ int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
{
struct mapped_device *md = disk->private_data;
struct dm_table *map;
- int srcu_idx, ret;
+ struct dm_table *zone_revalidate_map = md->zone_revalidate_map;
+ int srcu_idx, ret = -EIO;
+ bool put_table = false;
- if (!md->zone_revalidate_map) {
- /* Regular user context */
+ if (!zone_revalidate_map || md->revalidate_map_task != current) {
+ /*
+ * Regular user context or
+ * Zone revalidation during __bind() is in progress, but this
+ * call is from a different process
+ */
if (dm_suspended_md(md))
return -EAGAIN;
map = dm_get_live_table(md, &srcu_idx);
- if (!map)
- return -EIO;
+ put_table = true;
} else {
/* Zone revalidation during __bind() */
- map = md->zone_revalidate_map;
+ map = zone_revalidate_map;
}
- ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, data);
+ if (map)
+ ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb,
+ data);
- if (!md->zone_revalidate_map)
+ if (put_table)
dm_put_live_table(md, srcu_idx);
return ret;
@@ -153,33 +160,36 @@ int dm_revalidate_zones(struct dm_table *t, struct request_queue *q)
{
struct mapped_device *md = t->md;
struct gendisk *disk = md->disk;
+ unsigned int nr_zones = disk->nr_zones;
int ret;
if (!get_capacity(disk))
return 0;
- /* Revalidate only if something changed. */
- if (!disk->nr_zones || disk->nr_zones != md->nr_zones) {
- DMINFO("%s using %s zone append",
- disk->disk_name,
- queue_emulates_zone_append(q) ? "emulated" : "native");
- md->nr_zones = 0;
- }
-
- if (md->nr_zones)
+ /*
+ * Do not revalidate if zone write plug resources have already
+ * been allocated.
+ */
+ if (dm_has_zone_plugs(md))
return 0;
+ DMINFO("%s using %s zone append", disk->disk_name,
+ queue_emulates_zone_append(q) ? "emulated" : "native");
+
/*
* Our table is not live yet. So the call to dm_get_live_table()
* in dm_blk_report_zones() will fail. Set a temporary pointer to
* our table for dm_blk_report_zones() to use directly.
*/
md->zone_revalidate_map = t;
+ md->revalidate_map_task = current;
ret = blk_revalidate_disk_zones(disk);
+ md->revalidate_map_task = NULL;
md->zone_revalidate_map = NULL;
if (ret) {
DMERR("Revalidate zones failed %d", ret);
+ disk->nr_zones = nr_zones;
return ret;
}
@@ -337,15 +347,15 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
/*
* Check if zone append is natively supported, and if not, set the
- * mapped device queue as needing zone append emulation.
+ * mapped device queue as needing zone append emulation. If zone
+ * append is natively supported, make sure that
+ * max_hw_zone_append_sectors is not set to 0.
*/
WARN_ON_ONCE(queue_is_mq(q));
- if (dm_table_supports_zone_append(t)) {
- clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
- } else {
- set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ if (!dm_table_supports_zone_append(t))
lim->max_hw_zone_append_sectors = 0;
- }
+ else if (lim->max_hw_zone_append_sectors == 0)
+ lim->max_hw_zone_append_sectors = lim->max_zone_append_sectors;
/*
* Determine the max open and max active zone limits for the mapped
@@ -380,15 +390,28 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
lim->max_open_zones = 0;
lim->max_active_zones = 0;
lim->max_hw_zone_append_sectors = 0;
+ lim->max_zone_append_sectors = 0;
lim->zone_write_granularity = 0;
lim->chunk_sectors = 0;
lim->features &= ~BLK_FEAT_ZONED;
- clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
- md->nr_zones = 0;
- disk->nr_zones = 0;
return 0;
}
+ if (get_capacity(disk) && dm_has_zone_plugs(t->md)) {
+ if (q->limits.chunk_sectors != lim->chunk_sectors) {
+ DMWARN("%s: device has zone write plug resources. "
+ "Cannot change zone size",
+ disk->disk_name);
+ return -EINVAL;
+ }
+ if (lim->max_hw_zone_append_sectors != 0 &&
+ !dm_table_is_wildcard(t)) {
+ DMWARN("%s: device has zone write plug resources. "
+ "New table must emulate zone append",
+ disk->disk_name);
+ return -EINVAL;
+ }
+ }
/*
* Warn once (when the capacity is not yet set) if the mapped device is
* partially using zone resources of the target devices as that leads to
@@ -408,6 +431,23 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
return 0;
}
+void dm_finalize_zone_settings(struct dm_table *t, struct queue_limits *lim)
+{
+ struct mapped_device *md = t->md;
+
+ if (lim->features & BLK_FEAT_ZONED) {
+ if (dm_table_supports_zone_append(t))
+ clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ else
+ set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ } else {
+ clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
+ md->nr_zones = 0;
+ md->disk->nr_zones = 0;
+ }
+}
+
+
/*
* IO completion callback called from clone_endio().
*/
@@ -423,9 +463,9 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone)
*/
if (clone->bi_status == BLK_STS_OK &&
bio_op(clone) == REQ_OP_ZONE_APPEND) {
- sector_t mask = bdev_zone_sectors(disk->part0) - 1;
-
- orig_bio->bi_iter.bi_sector += clone->bi_iter.bi_sector & mask;
+ orig_bio->bi_iter.bi_sector +=
+ bdev_offset_from_zone_start(disk->part0,
+ clone->bi_iter.bi_sector);
}
return;
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 6141fc25d842..5da3db06da10 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1015,7 +1015,8 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
/*
* Pass on ioctl to the backend device.
*/
-static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg, bool *forward)
{
struct dmz_target *dmz = ti->private;
struct dmz_dev *dev = &dmz->dev[0];
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 5ab7574c0c76..1726f0f828cc 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -411,7 +411,8 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
}
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
- struct block_device **bdev)
+ struct block_device **bdev, unsigned int cmd,
+ unsigned long arg, bool *forward)
{
struct dm_target *ti;
struct dm_table *map;
@@ -434,8 +435,8 @@ retry:
if (dm_suspended_md(md))
return -EAGAIN;
- r = ti->type->prepare_ioctl(ti, bdev);
- if (r == -ENOTCONN && !fatal_signal_pending(current)) {
+ r = ti->type->prepare_ioctl(ti, bdev, cmd, arg, forward);
+ if (r == -ENOTCONN && *forward && !fatal_signal_pending(current)) {
dm_put_live_table(md, *srcu_idx);
fsleep(10000);
goto retry;
@@ -454,9 +455,10 @@ static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
{
struct mapped_device *md = bdev->bd_disk->private_data;
int r, srcu_idx;
+ bool forward = true;
- r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
- if (r < 0)
+ r = dm_prepare_ioctl(md, &srcu_idx, &bdev, cmd, arg, &forward);
+ if (!forward || r < 0)
goto out;
if (r > 0) {
@@ -1082,22 +1084,6 @@ static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
return &md->queue->limits;
}
-void disable_discard(struct mapped_device *md)
-{
- struct queue_limits *limits = dm_get_queue_limits(md);
-
- /* device doesn't really support DISCARD, disable it */
- limits->max_hw_discard_sectors = 0;
-}
-
-void disable_write_zeroes(struct mapped_device *md)
-{
- struct queue_limits *limits = dm_get_queue_limits(md);
-
- /* device doesn't really support WRITE ZEROES, disable it */
- limits->max_write_zeroes_sectors = 0;
-}
-
static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
{
return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
@@ -1115,10 +1101,10 @@ static void clone_endio(struct bio *bio)
if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD &&
!bdev_max_discard_sectors(bio->bi_bdev))
- disable_discard(md);
+ blk_queue_disable_discard(md->queue);
else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!bdev_write_zeroes_sectors(bio->bi_bdev))
- disable_write_zeroes(md);
+ blk_queue_disable_write_zeroes(md->queue);
}
if (static_branch_unlikely(&zoned_enabled) &&
@@ -2421,21 +2407,35 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct queue_limits *limits)
{
struct dm_table *old_map;
- sector_t size;
+ sector_t size, old_size;
int ret;
lockdep_assert_held(&md->suspend_lock);
size = dm_table_get_size(t);
+ old_size = dm_get_size(md);
+
+ if (!dm_table_supports_size_change(t, old_size, size)) {
+ old_map = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ set_capacity(md->disk, size);
+
+ ret = dm_table_set_restrictions(t, md->queue, limits);
+ if (ret) {
+ set_capacity(md->disk, old_size);
+ old_map = ERR_PTR(ret);
+ goto out;
+ }
+
/*
* Wipe any geometry if the size of the table changed.
*/
- if (size != dm_get_size(md))
+ if (size != old_size)
memset(&md->geometry, 0, sizeof(md->geometry));
- set_capacity(md->disk, size);
-
dm_table_event_callback(t, event_callback, md);
if (dm_table_request_based(t)) {
@@ -2453,10 +2453,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
* requests in the queue may refer to bio from the old bioset,
* so you must walk through the queue to unprep.
*/
- if (!md->mempools) {
+ if (!md->mempools)
md->mempools = t->mempools;
- t->mempools = NULL;
- }
+ else
+ dm_free_md_mempools(t->mempools);
} else {
/*
* The md may already have mempools that need changing.
@@ -2465,14 +2465,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
*/
dm_free_md_mempools(md->mempools);
md->mempools = t->mempools;
- t->mempools = NULL;
- }
-
- ret = dm_table_set_restrictions(t, md->queue, limits);
- if (ret) {
- old_map = ERR_PTR(ret);
- goto out;
}
+ t->mempools = NULL;
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
rcu_assign_pointer(md->map, (void *)t);
@@ -3638,10 +3632,13 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
int r, srcu_idx;
+ bool forward = true;
- r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
+ /* Not a real ioctl, but targets must not interpret non-DM ioctls */
+ r = dm_prepare_ioctl(md, &srcu_idx, &bdev, 0, 0, &forward);
if (r < 0)
goto out;
+ WARN_ON_ONCE(!forward);
ops = bdev->bd_disk->fops->pr_ops;
if (ops && ops->pr_clear)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index a0a8ff119815..245f52b59215 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -58,6 +58,7 @@ void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
bool dm_table_has_no_data_devices(struct dm_table *table);
+bool dm_table_is_wildcard(struct dm_table *t);
int dm_calculate_queue_limits(struct dm_table *table,
struct queue_limits *limits);
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
@@ -72,6 +73,8 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
+bool dm_table_supports_size_change(struct dm_table *t, sector_t old_size,
+ sector_t new_size);
void dm_lock_md_type(struct mapped_device *md);
void dm_unlock_md_type(struct mapped_device *md);
@@ -102,6 +105,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *lim);
int dm_revalidate_zones(struct dm_table *t, struct request_queue *q);
+void dm_finalize_zone_settings(struct dm_table *t, struct queue_limits *lim);
void dm_zone_endio(struct dm_io *io, struct bio *clone);
#ifdef CONFIG_BLK_DEV_ZONED
int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
@@ -110,12 +114,14 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio);
int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t,
sector_t sector, unsigned int nr_zones,
unsigned long *need_reset);
+#define dm_has_zone_plugs(md) ((md)->disk->zone_wplugs_hash != NULL)
#else
#define dm_blk_report_zones NULL
static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
{
return false;
}
+#define dm_has_zone_plugs(md) false
#endif
/*
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 37b08f26c62f..bd694910b01b 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -105,9 +105,19 @@
*
*/
+typedef __u16 bitmap_counter_t;
+
#define PAGE_BITS (PAGE_SIZE << 3)
#define PAGE_BIT_SHIFT (PAGE_SHIFT + 3)
+#define COUNTER_BITS 16
+#define COUNTER_BIT_SHIFT 4
+#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3)
+
+#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1)))
+#define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2)))
+#define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1)
+
#define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK)
#define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK)
#define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX)
@@ -789,7 +799,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
* is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
*/
write_behind = bitmap->mddev->bitmap_info.max_write_behind;
- if (write_behind > COUNTER_MAX)
+ if (write_behind > COUNTER_MAX / 2)
write_behind = COUNTER_MAX / 2;
sb->write_behind = cpu_to_le32(write_behind);
bitmap->mddev->bitmap_info.max_write_behind = write_behind;
@@ -1672,13 +1682,13 @@ __acquires(bitmap->lock)
&(bitmap->bp[page].map[pageoff]);
}
-static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
- unsigned long sectors)
+static void bitmap_start_write(struct mddev *mddev, sector_t offset,
+ unsigned long sectors)
{
struct bitmap *bitmap = mddev->bitmap;
if (!bitmap)
- return 0;
+ return;
while (sectors) {
sector_t blocks;
@@ -1688,7 +1698,7 @@ static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
if (!bmc) {
spin_unlock_irq(&bitmap->counts.lock);
- return 0;
+ return;
}
if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
@@ -1724,11 +1734,10 @@ static int bitmap_startwrite(struct mddev *mddev, sector_t offset,
else
sectors = 0;
}
- return 0;
}
-static void bitmap_endwrite(struct mddev *mddev, sector_t offset,
- unsigned long sectors)
+static void bitmap_end_write(struct mddev *mddev, sector_t offset,
+ unsigned long sectors)
{
struct bitmap *bitmap = mddev->bitmap;
@@ -2205,9 +2214,9 @@ static struct bitmap *__bitmap_create(struct mddev *mddev, int slot)
return ERR_PTR(err);
}
-static int bitmap_create(struct mddev *mddev, int slot)
+static int bitmap_create(struct mddev *mddev)
{
- struct bitmap *bitmap = __bitmap_create(mddev, slot);
+ struct bitmap *bitmap = __bitmap_create(mddev, -1);
if (IS_ERR(bitmap))
return PTR_ERR(bitmap);
@@ -2670,7 +2679,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
}
mddev->bitmap_info.offset = offset;
- rv = bitmap_create(mddev, -1);
+ rv = bitmap_create(mddev);
if (rv)
goto out;
@@ -3003,8 +3012,8 @@ static struct bitmap_operations bitmap_ops = {
.end_behind_write = bitmap_end_behind_write,
.wait_behind_writes = bitmap_wait_behind_writes,
- .startwrite = bitmap_startwrite,
- .endwrite = bitmap_endwrite,
+ .start_write = bitmap_start_write,
+ .end_write = bitmap_end_write,
.start_sync = bitmap_start_sync,
.end_sync = bitmap_end_sync,
.cond_end_sync = bitmap_cond_end_sync,
diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
index 31c93019c76b..59e9dd45cfde 100644
--- a/drivers/md/md-bitmap.h
+++ b/drivers/md/md-bitmap.h
@@ -9,15 +9,6 @@
#define BITMAP_MAGIC 0x6d746962
-typedef __u16 bitmap_counter_t;
-#define COUNTER_BITS 16
-#define COUNTER_BIT_SHIFT 4
-#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3)
-
-#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1)))
-#define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2)))
-#define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1)
-
/* use these for bitmap->flags and bitmap->sb->state bit-fields */
enum bitmap_state {
BITMAP_STALE = 1, /* the bitmap file is out of date or had -EIO */
@@ -72,7 +63,7 @@ struct md_bitmap_stats {
struct bitmap_operations {
bool (*enabled)(struct mddev *mddev);
- int (*create)(struct mddev *mddev, int slot);
+ int (*create)(struct mddev *mddev);
int (*resize)(struct mddev *mddev, sector_t blocks, int chunksize,
bool init);
@@ -89,10 +80,10 @@ struct bitmap_operations {
void (*end_behind_write)(struct mddev *mddev);
void (*wait_behind_writes)(struct mddev *mddev);
- int (*startwrite)(struct mddev *mddev, sector_t offset,
+ void (*start_write)(struct mddev *mddev, sector_t offset,
+ unsigned long sectors);
+ void (*end_write)(struct mddev *mddev, sector_t offset,
unsigned long sectors);
- void (*endwrite)(struct mddev *mddev, sector_t offset,
- unsigned long sectors);
bool (*start_sync)(struct mddev *mddev, sector_t offset,
sector_t *blocks, bool degraded);
void (*end_sync)(struct mddev *mddev, sector_t offset, sector_t *blocks);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9daa78c5fe33..0f03b21e66e4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -111,32 +111,48 @@ static void md_wakeup_thread_directly(struct md_thread __rcu *thread);
/* Default safemode delay: 200 msec */
#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
/*
- * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
- * is 1000 KB/sec, so the extra system load does not show up that much.
- * Increase it if you want to have more _guaranteed_ speed. Note that
- * the RAID driver will use the maximum available bandwidth if the IO
- * subsystem is idle. There is also an 'absolute maximum' reconstruction
- * speed limit - in case reconstruction slows down your system despite
- * idle IO detection.
+ * Current RAID-1,4,5,6,10 parallel reconstruction 'guaranteed speed limit'
+ * is sysctl_speed_limit_min, 1000 KB/sec by default, so the extra system load
+ * does not show up that much. Increase it if you want to have more guaranteed
+ * speed. Note that the RAID driver will use the maximum bandwidth
+ * sysctl_speed_limit_max, 200 MB/sec by default, if the IO subsystem is idle.
*
- * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
- * or /sys/block/mdX/md/sync_speed_{min,max}
+ * Background sync IO speed control:
+ *
+ * - below speed min:
+ * no limit;
+ * - above speed min and below speed max:
+ * a) if mddev is idle, then no limit;
+ * b) if mddev is busy handling normal IO, then limit inflight sync IO
+ * to sync_io_depth;
+ * - above speed max:
+ * sync IO can't be issued;
+ *
+ * Following configurations can be changed via /proc/sys/dev/raid/ for system
+ * or /sys/block/mdX/md/ for one array.
*/
-
static int sysctl_speed_limit_min = 1000;
static int sysctl_speed_limit_max = 200000;
-static inline int speed_min(struct mddev *mddev)
+static int sysctl_sync_io_depth = 32;
+
+static int speed_min(struct mddev *mddev)
{
return mddev->sync_speed_min ?
mddev->sync_speed_min : sysctl_speed_limit_min;
}
-static inline int speed_max(struct mddev *mddev)
+static int speed_max(struct mddev *mddev)
{
return mddev->sync_speed_max ?
mddev->sync_speed_max : sysctl_speed_limit_max;
}
+static int sync_io_depth(struct mddev *mddev)
+{
+ return mddev->sync_io_depth ?
+ mddev->sync_io_depth : sysctl_sync_io_depth;
+}
+
static void rdev_uninit_serial(struct md_rdev *rdev)
{
if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
@@ -293,14 +309,21 @@ static const struct ctl_table raid_table[] = {
.procname = "speed_limit_min",
.data = &sysctl_speed_limit_min,
.maxlen = sizeof(int),
- .mode = S_IRUGO|S_IWUSR,
+ .mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "speed_limit_max",
.data = &sysctl_speed_limit_max,
.maxlen = sizeof(int),
- .mode = S_IRUGO|S_IWUSR,
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "sync_io_depth",
+ .data = &sysctl_sync_io_depth,
+ .maxlen = sizeof(int),
+ .mode = 0644,
.proc_handler = proc_dointvec,
},
};
@@ -5091,7 +5114,7 @@ static ssize_t
sync_min_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_min(mddev),
- mddev->sync_speed_min ? "local": "system");
+ mddev->sync_speed_min ? "local" : "system");
}
static ssize_t
@@ -5100,7 +5123,7 @@ sync_min_store(struct mddev *mddev, const char *buf, size_t len)
unsigned int min;
int rv;
- if (strncmp(buf, "system", 6)==0) {
+ if (strncmp(buf, "system", 6) == 0) {
min = 0;
} else {
rv = kstrtouint(buf, 10, &min);
@@ -5120,7 +5143,7 @@ static ssize_t
sync_max_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_max(mddev),
- mddev->sync_speed_max ? "local": "system");
+ mddev->sync_speed_max ? "local" : "system");
}
static ssize_t
@@ -5129,7 +5152,7 @@ sync_max_store(struct mddev *mddev, const char *buf, size_t len)
unsigned int max;
int rv;
- if (strncmp(buf, "system", 6)==0) {
+ if (strncmp(buf, "system", 6) == 0) {
max = 0;
} else {
rv = kstrtouint(buf, 10, &max);
@@ -5146,6 +5169,35 @@ static struct md_sysfs_entry md_sync_max =
__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
static ssize_t
+sync_io_depth_show(struct mddev *mddev, char *page)
+{
+ return sprintf(page, "%d (%s)\n", sync_io_depth(mddev),
+ mddev->sync_io_depth ? "local" : "system");
+}
+
+static ssize_t
+sync_io_depth_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ unsigned int max;
+ int rv;
+
+ if (strncmp(buf, "system", 6) == 0) {
+ max = 0;
+ } else {
+ rv = kstrtouint(buf, 10, &max);
+ if (rv < 0)
+ return rv;
+ if (max == 0)
+ return -EINVAL;
+ }
+ mddev->sync_io_depth = max;
+ return len;
+}
+
+static struct md_sysfs_entry md_sync_io_depth =
+__ATTR_RW(sync_io_depth);
+
+static ssize_t
degraded_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d\n", mddev->degraded);
@@ -5671,6 +5723,7 @@ static struct attribute *md_redundancy_attrs[] = {
&md_mismatches.attr,
&md_sync_min.attr,
&md_sync_max.attr,
+ &md_sync_io_depth.attr,
&md_sync_speed.attr,
&md_sync_force_parallel.attr,
&md_sync_completed.attr,
@@ -5995,7 +6048,7 @@ static int add_named_array(const char *val, const struct kernel_param *kp)
static void md_safemode_timeout(struct timer_list *t)
{
- struct mddev *mddev = from_timer(mddev, t, safemode_timer);
+ struct mddev *mddev = timer_container_of(mddev, t, safemode_timer);
mddev->safemode = 1;
if (mddev->external)
@@ -6172,7 +6225,7 @@ int md_run(struct mddev *mddev)
}
if (err == 0 && pers->sync_request &&
(mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
- err = mddev->bitmap_ops->create(mddev, -1);
+ err = mddev->bitmap_ops->create(mddev);
if (err)
pr_warn("%s: failed to create bitmap (%d)\n",
mdname(mddev), err);
@@ -7232,7 +7285,7 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
err = 0;
if (mddev->pers) {
if (fd >= 0) {
- err = mddev->bitmap_ops->create(mddev, -1);
+ err = mddev->bitmap_ops->create(mddev);
if (!err)
err = mddev->bitmap_ops->load(mddev);
@@ -7548,7 +7601,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
- rv = mddev->bitmap_ops->create(mddev, -1);
+ rv = mddev->bitmap_ops->create(mddev);
if (!rv)
rv = mddev->bitmap_ops->load(mddev);
@@ -8572,50 +8625,55 @@ void md_cluster_stop(struct mddev *mddev)
put_cluster_ops(mddev);
}
-static int is_mddev_idle(struct mddev *mddev, int init)
+static bool is_rdev_holder_idle(struct md_rdev *rdev, bool init)
{
+ unsigned long last_events = rdev->last_events;
+
+ if (!bdev_is_partition(rdev->bdev))
+ return true;
+
+ /*
+ * If rdev is partition, and user doesn't issue IO to the array, the
+ * array is still not idle if user issues IO to other partitions.
+ */
+ rdev->last_events = part_stat_read_accum(rdev->bdev->bd_disk->part0,
+ sectors) -
+ part_stat_read_accum(rdev->bdev, sectors);
+
+ return init || rdev->last_events <= last_events;
+}
+
+/*
+ * mddev is idle if following conditions are matched since last check:
+ * 1) mddev doesn't have normal IO completed;
+ * 2) mddev doesn't have inflight normal IO;
+ * 3) if any member disk is partition, and other partitions don't have IO
+ * completed;
+ *
+ * Noted this checking rely on IO accounting is enabled.
+ */
+static bool is_mddev_idle(struct mddev *mddev, int init)
+{
+ unsigned long last_events = mddev->normal_io_events;
+ struct gendisk *disk;
struct md_rdev *rdev;
- int idle;
- int curr_events;
+ bool idle = true;
- idle = 1;
- rcu_read_lock();
- rdev_for_each_rcu(rdev, mddev) {
- struct gendisk *disk = rdev->bdev->bd_disk;
+ disk = mddev_is_dm(mddev) ? mddev->dm_gendisk : mddev->gendisk;
+ if (!disk)
+ return true;
- if (!init && !blk_queue_io_stat(disk->queue))
- continue;
+ mddev->normal_io_events = part_stat_read_accum(disk->part0, sectors);
+ if (!init && (mddev->normal_io_events > last_events ||
+ bdev_count_inflight(disk->part0)))
+ idle = false;
- curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
- atomic_read(&disk->sync_io);
- /* sync IO will cause sync_io to increase before the disk_stats
- * as sync_io is counted when a request starts, and
- * disk_stats is counted when it completes.
- * So resync activity will cause curr_events to be smaller than
- * when there was no such activity.
- * non-sync IO will cause disk_stat to increase without
- * increasing sync_io so curr_events will (eventually)
- * be larger than it was before. Once it becomes
- * substantially larger, the test below will cause
- * the array to appear non-idle, and resync will slow
- * down.
- * If there is a lot of outstanding resync activity when
- * we set last_event to curr_events, then all that activity
- * completing might cause the array to appear non-idle
- * and resync will be slowed down even though there might
- * not have been non-resync activity. This will only
- * happen once though. 'last_events' will soon reflect
- * the state where there is little or no outstanding
- * resync requests, and further resync activity will
- * always make curr_events less than last_events.
- *
- */
- if (init || curr_events - rdev->last_events > 64) {
- rdev->last_events = curr_events;
- idle = 0;
- }
- }
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev)
+ if (!is_rdev_holder_idle(rdev, init))
+ idle = false;
rcu_read_unlock();
+
return idle;
}
@@ -8741,14 +8799,14 @@ static void md_bitmap_start(struct mddev *mddev,
mddev->pers->bitmap_sector(mddev, &md_io_clone->offset,
&md_io_clone->sectors);
- mddev->bitmap_ops->startwrite(mddev, md_io_clone->offset,
- md_io_clone->sectors);
+ mddev->bitmap_ops->start_write(mddev, md_io_clone->offset,
+ md_io_clone->sectors);
}
static void md_bitmap_end(struct mddev *mddev, struct md_io_clone *md_io_clone)
{
- mddev->bitmap_ops->endwrite(mddev, md_io_clone->offset,
- md_io_clone->sectors);
+ mddev->bitmap_ops->end_write(mddev, md_io_clone->offset,
+ md_io_clone->sectors);
}
static void md_end_clone_io(struct bio *bio)
@@ -8927,6 +8985,23 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
}
}
+static bool sync_io_within_limit(struct mddev *mddev)
+{
+ int io_sectors;
+
+ /*
+ * For raid456, sync IO is stripe(4k) per IO, for other levels, it's
+ * RESYNC_PAGES(64k) per IO.
+ */
+ if (mddev->level == 4 || mddev->level == 5 || mddev->level == 6)
+ io_sectors = 8;
+ else
+ io_sectors = 128;
+
+ return atomic_read(&mddev->recovery_active) <
+ io_sectors * sync_io_depth(mddev);
+}
+
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
#define UPDATE_FREQUENCY (5*60*HZ)
@@ -9195,7 +9270,8 @@ void md_do_sync(struct md_thread *thread)
msleep(500);
goto repeat;
}
- if (!is_mddev_idle(mddev, 0)) {
+ if (!sync_io_within_limit(mddev) &&
+ !is_mddev_idle(mddev, 0)) {
/*
* Give other IO more of a chance.
* The faster the devices, the less we wait.
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 1cf00a04bcdd..d45a9e6ead80 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -132,7 +132,7 @@ struct md_rdev {
sector_t sectors; /* Device size (in 512bytes sectors) */
struct mddev *mddev; /* RAID array if running */
- int last_events; /* IO event timestamp */
+ unsigned long last_events; /* IO event timestamp */
/*
* If meta_bdev is non-NULL, it means that a separate device is
@@ -404,7 +404,8 @@ struct mddev {
* are happening, so run/
* takeover/stop are not safe
*/
- struct gendisk *gendisk;
+ struct gendisk *gendisk; /* mdraid gendisk */
+ struct gendisk *dm_gendisk; /* dm-raid gendisk */
struct kobject kobj;
int hold_active;
@@ -483,6 +484,7 @@ struct mddev {
/* if zero, use the system-wide default */
int sync_speed_min;
int sync_speed_max;
+ int sync_io_depth;
/* resync even though the same disks are shared among md-devices */
int parallel_resync;
@@ -518,6 +520,7 @@ struct mddev {
* adding a spare
*/
+ unsigned long normal_io_events; /* IO event timestamp */
atomic_t recovery_active; /* blocks scheduled, but not written */
wait_queue_head_t recovery_wait;
sector_t recovery_cp;
@@ -714,17 +717,6 @@ static inline int mddev_trylock(struct mddev *mddev)
}
extern void mddev_unlock(struct mddev *mddev);
-static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
-{
- if (blk_queue_io_stat(bdev->bd_disk->queue))
- atomic_add(nr_sectors, &bdev->bd_disk->sync_io);
-}
-
-static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
-{
- md_sync_acct(bio->bi_bdev, nr_sectors);
-}
-
struct md_personality
{
struct md_submodule_head head;
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index c7efd8aab675..b8b3a9069701 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -293,3 +293,13 @@ static inline bool raid1_should_read_first(struct mddev *mddev,
return false;
}
+
+/*
+ * bio with REQ_RAHEAD or REQ_NOWAIT can fail at anytime, before such IO is
+ * submitted to the underlying disks, hence don't record badblocks or retry
+ * in this case.
+ */
+static inline bool raid1_should_handle_error(struct bio *bio)
+{
+ return !(bio->bi_opf & (REQ_RAHEAD | REQ_NOWAIT));
+}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index de9bccbe7337..19c5a0ce5a40 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -373,14 +373,16 @@ static void raid1_end_read_request(struct bio *bio)
*/
update_head_pos(r1_bio->read_disk, r1_bio);
- if (uptodate)
+ if (uptodate) {
set_bit(R1BIO_Uptodate, &r1_bio->state);
- else if (test_bit(FailFast, &rdev->flags) &&
- test_bit(R1BIO_FailFast, &r1_bio->state))
+ } else if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state)) {
/* This was a fail-fast read so we definitely
* want to retry */
;
- else {
+ } else if (!raid1_should_handle_error(bio)) {
+ uptodate = 1;
+ } else {
/* If all other devices have failed, we want to return
* the error upwards rather than fail the last device.
* Here we redefine "uptodate" to mean "Don't want to retry"
@@ -451,16 +453,15 @@ static void raid1_end_write_request(struct bio *bio)
struct bio *to_put = NULL;
int mirror = find_bio_disk(r1_bio, bio);
struct md_rdev *rdev = conf->mirrors[mirror].rdev;
- bool discard_error;
sector_t lo = r1_bio->sector;
sector_t hi = r1_bio->sector + r1_bio->sectors;
-
- discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
+ bool ignore_error = !raid1_should_handle_error(bio) ||
+ (bio->bi_status && bio_op(bio) == REQ_OP_DISCARD);
/*
* 'one mirror IO has finished' event handler:
*/
- if (bio->bi_status && !discard_error) {
+ if (bio->bi_status && !ignore_error) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED, &
@@ -511,7 +512,7 @@ static void raid1_end_write_request(struct bio *bio)
/* Maybe we can clear some bad blocks. */
if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
- !discard_error) {
+ !ignore_error) {
r1_bio->bios[mirror] = IO_MADE_GOOD;
set_bit(R1BIO_MadeGood, &r1_bio->state);
}
@@ -2382,7 +2383,6 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
wbio->bi_end_io = end_sync_write;
atomic_inc(&r1_bio->remaining);
- md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
submit_bio_noacct(wbio);
}
@@ -3055,7 +3055,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r1_bio->bios[i];
if (bio->bi_end_io == end_sync_read) {
read_targets--;
- md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
submit_bio_noacct(bio);
@@ -3064,7 +3063,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
} else {
atomic_set(&r1_bio->remaining, 1);
bio = r1_bio->bios[r1_bio->read_disk];
- md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
submit_bio_noacct(bio);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ba32bac975b8..b74780af4c22 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -399,6 +399,8 @@ static void raid10_end_read_request(struct bio *bio)
* wait for the 'master' bio.
*/
set_bit(R10BIO_Uptodate, &r10_bio->state);
+ } else if (!raid1_should_handle_error(bio)) {
+ uptodate = 1;
} else {
/* If all other devices that store this block have
* failed, we want to return the error upwards rather
@@ -456,9 +458,8 @@ static void raid10_end_write_request(struct bio *bio)
int slot, repl;
struct md_rdev *rdev = NULL;
struct bio *to_put = NULL;
- bool discard_error;
-
- discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
+ bool ignore_error = !raid1_should_handle_error(bio) ||
+ (bio->bi_status && bio_op(bio) == REQ_OP_DISCARD);
dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
@@ -472,7 +473,7 @@ static void raid10_end_write_request(struct bio *bio)
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
- if (bio->bi_status && !discard_error) {
+ if (bio->bi_status && !ignore_error) {
if (repl)
/* Never record new bad blocks to replacement,
* just fail it.
@@ -527,7 +528,7 @@ static void raid10_end_write_request(struct bio *bio)
/* Maybe we can clear some bad blocks. */
if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
r10_bio->sectors) &&
- !discard_error) {
+ !ignore_error) {
bio_put(bio);
if (repl)
r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
@@ -2426,7 +2427,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
atomic_inc(&r10_bio->remaining);
- md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
tbio->bi_opf |= MD_FAILFAST;
@@ -2448,8 +2448,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
bio_copy_data(tbio, fbio);
d = r10_bio->devs[i].devnum;
atomic_inc(&r10_bio->remaining);
- md_sync_acct(conf->mirrors[d].replacement->bdev,
- bio_sectors(tbio));
submit_bio_noacct(tbio);
}
@@ -2583,13 +2581,10 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
d = r10_bio->devs[1].devnum;
if (wbio->bi_end_io) {
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
- md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
submit_bio_noacct(wbio);
}
if (wbio2) {
atomic_inc(&conf->mirrors[d].replacement->nr_pending);
- md_sync_acct(conf->mirrors[d].replacement->bdev,
- bio_sectors(wbio2));
submit_bio_noacct(wbio2);
}
}
@@ -3757,7 +3752,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->sectors = nr_sectors;
if (bio->bi_end_io == end_sync_read) {
- md_sync_acct_bio(bio, nr_sectors);
bio->bi_status = 0;
submit_bio_noacct(bio);
}
@@ -4880,7 +4874,6 @@ read_more:
r10_bio->sectors = nr_sectors;
/* Now submit the read */
- md_sync_acct_bio(read_bio, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
read_bio->bi_next = NULL;
submit_bio_noacct(read_bio);
@@ -4940,7 +4933,6 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
continue;
atomic_inc(&rdev->nr_pending);
- md_sync_acct_bio(b, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
b->bi_next = NULL;
submit_bio_noacct(b);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 6389383166c0..ca5b0e8ba707 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1240,10 +1240,6 @@ again:
}
if (rdev) {
- if (s->syncing || s->expanding || s->expanded
- || s->replacing)
- md_sync_acct(rdev->bdev, RAID5_STRIPE_SECTORS(conf));
-
set_bit(STRIPE_IO_STARTED, &sh->state);
bio_init(bi, rdev->bdev, &dev->vec, 1, op | op_flags);
@@ -1300,10 +1296,6 @@ again:
submit_bio_noacct(bi);
}
if (rrdev) {
- if (s->syncing || s->expanding || s->expanded
- || s->replacing)
- md_sync_acct(rrdev->bdev, RAID5_STRIPE_SECTORS(conf));
-
set_bit(STRIPE_IO_STARTED, &sh->state);
bio_init(rbi, rrdev->bdev, &dev->rvec, 1, op | op_flags);
diff --git a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
index 12b73ea0f31d..419b9a7abcce 100644
--- a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
+++ b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
@@ -298,6 +298,7 @@ struct cec_dmi_match {
static const char *const port_b_conns[] = { "Port B", NULL };
static const char *const port_db_conns[] = { "Port D", "Port B", NULL };
static const char *const port_ba_conns[] = { "Port B", "Port A", NULL };
+static const char *const port_ab_conns[] = { "Port A", "Port B", NULL };
static const char *const port_d_conns[] = { "Port D", NULL };
static const struct cec_dmi_match cec_dmi_match_table[] = {
@@ -329,6 +330,10 @@ static const struct cec_dmi_match cec_dmi_match_table[] = {
{ "Google", "Dexi", "0000:00:02.0", port_db_conns },
/* Google Dita */
{ "Google", "Dita", "0000:00:02.0", port_db_conns },
+ /* Google Dirks */
+ { "Google", "Dirks", "0000:00:02.0", port_ab_conns },
+ /* Google Moxie */
+ { "Google", "Moxie", "0000:00:02.0", port_b_conns },
};
static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
index cfbfc4c1b2e6..41d019b01ec0 100644
--- a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
@@ -1002,8 +1002,8 @@ static int extron_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct extron_port *port = cec_get_drvdata(adap);
- char buf[CEC_MAX_MSG_SIZE * 3 + 1];
- char cmd[CEC_MAX_MSG_SIZE * 3 + 13];
+ char buf[(CEC_MAX_MSG_SIZE - 1) * 3 + 1];
+ char cmd[sizeof(buf) + 14];
unsigned int i;
if (port->disconnected)
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index 2952678cce45..9d0362a75ecd 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -153,7 +153,7 @@ void saa7146_buffer_next(struct saa7146_dev *dev,
void saa7146_buffer_timeout(struct timer_list *t)
{
- struct saa7146_dmaqueue *q = from_timer(q, t, timeout);
+ struct saa7146_dmaqueue *q = timer_container_of(q, t, timeout);
struct saa7146_dev *dev = q->dev;
unsigned long flags;
diff --git a/drivers/media/common/saa7146/saa7146_vbi.c b/drivers/media/common/saa7146/saa7146_vbi.c
index 6c324a683be9..1ffcc025d1a4 100644
--- a/drivers/media/common/saa7146/saa7146_vbi.c
+++ b/drivers/media/common/saa7146/saa7146_vbi.c
@@ -330,7 +330,7 @@ static void vbi_stop(struct saa7146_dev *dev)
static void vbi_read_timeout(struct timer_list *t)
{
- struct saa7146_vv *vv = from_timer(vv, t, vbi_read_timeout);
+ struct saa7146_vv *vv = timer_container_of(vv, t, vbi_read_timeout);
struct saa7146_dev *dev = vv->vbi_dmaq.dev;
DEB_VBI("dev:%p\n", dev);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index c6ddf2357c58..b3bf2173c14e 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -469,7 +469,7 @@ vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
struct vb2_dma_sg_buf *buf = dbuf->priv;
struct sg_table *sgt = buf->dma_sgt;
- dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
return 0;
}
@@ -480,7 +480,7 @@ vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
struct vb2_dma_sg_buf *buf = dbuf->priv;
struct sg_table *sgt = buf->dma_sgt;
- dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
return 0;
}
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 9201d854dbcc..1cd26faee503 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -903,6 +903,11 @@ EXPORT_SYMBOL_GPL(vb2_expbuf);
int vb2_queue_init_name(struct vb2_queue *q, const char *name)
{
+ /* vb2_memory should match with v4l2_memory */
+ BUILD_BUG_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP);
+ BUILD_BUG_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR);
+ BUILD_BUG_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF);
+
/*
* Sanity check
*/
@@ -916,12 +921,6 @@ int vb2_queue_init_name(struct vb2_queue *q, const char *name)
WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
- /* Warn that vb2_memory should match with v4l2_memory */
- if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP)
- || WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR)
- || WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF))
- return -EINVAL;
-
if (q->buf_struct_size == 0)
q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 1e985f943944..151177e5a06d 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -352,7 +352,8 @@ static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter,
static void dvb_dmxdev_filter_timeout(struct timer_list *t)
{
- struct dmxdev_filter *dmxdevfilter = from_timer(dmxdevfilter, t, timer);
+ struct dmxdev_filter *dmxdevfilter = timer_container_of(dmxdevfilter,
+ t, timer);
dmxdevfilter->buffer.error = -ETIMEDOUT;
spin_lock_irq(&dmxdevfilter->dev->lock);
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index c5582d4fa5be..b40daf242046 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -2630,7 +2630,7 @@ static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode)
dib7090_configMpegMux(state, 3, 1, 1);
dib7090_setHostBusMux(state, MPEG_ON_HOSTBUS);
} else {/* Use Smooth block */
- dprintk("setting output mode TS_SERIAL using Smooth bloc\n");
+ dprintk("setting output mode TS_SERIAL using Smooth block\n");
dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (2<<6) | (0 << 1);
}
@@ -2654,7 +2654,7 @@ static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode)
outreg |= (1<<6);
break;
- case OUTMODE_MPEG2_FIFO: /* Using Smooth block because not supported by new Mpeg Mux bloc */
+ case OUTMODE_MPEG2_FIFO: /* Using Smooth block because not supported by new Mpeg Mux block */
dprintk("setting output mode TS_FIFO using Smooth block\n");
dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (5<<6);
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index cfe59c3255f7..d90f1b0b2051 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -1584,7 +1584,7 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
dib8096p_configMpegMux(state, 3, 1, 1);
dib8096p_setHostBusMux(state, MPEG_ON_HOSTBUS);
} else {/* Use Smooth block */
- dprintk("dib8096P setting output mode TS_SERIAL using Smooth bloc\n");
+ dprintk("dib8096P setting output mode TS_SERIAL using Smooth block\n");
dib8096p_setHostBusMux(state,
DEMOUT_ON_HOSTBUS);
outreg |= (2 << 6) | (0 << 1);
@@ -1612,7 +1612,8 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
case OUTMODE_MPEG2_FIFO:
/* Using Smooth block because not supported
- by new Mpeg Mux bloc */
+ * by new Mpeg Mux block
+ */
dprintk("dib8096P setting output mode TS_FIFO using Smooth block\n");
dib8096p_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
outreg |= (5 << 6);
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index e45ba127069f..e68202954a8f 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -217,6 +217,7 @@ config VIDEO_IMX319
config VIDEO_IMX334
tristate "Sony IMX334 sensor support"
depends on OF_GPIO
+ select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the Sony
IMX334 camera.
@@ -356,6 +357,26 @@ config VIDEO_OV02A10
To compile this driver as a module, choose M here: the
module will be called ov02a10.
+config VIDEO_OV02E10
+ tristate "OmniVision OV02E10 sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV02E10 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov02e10.
+
+config VIDEO_OV02C10
+ tristate "OmniVision OV02C10 sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV02C10 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov02c10.
+
config VIDEO_OV08D10
tristate "OmniVision OV08D10 sensor support"
help
@@ -691,6 +712,28 @@ config VIDEO_S5K6A3
This is a V4L2 sensor driver for Samsung S5K6A3 raw
camera sensor.
+config VIDEO_VD55G1
+ tristate "ST VD55G1 sensor support"
+ select V4L2_CCI_I2C
+ depends on GPIOLIB
+ help
+ This is a Video4Linux2 sensor driver for the ST VD55G1
+ camera sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vd55g1.
+
+config VIDEO_VD56G3
+ tristate "ST VD56G3 sensor support"
+ select V4L2_CCI_I2C
+ depends on GPIOLIB
+ help
+ This is a Video4Linux2 sensor driver for the ST VD56G3
+ camera sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vd56g3.
+
config VIDEO_VGXY61
tristate "ST VGXY61 sensor support"
select V4L2_CCI_I2C
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 6c23a4463527..5873d29433ee 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -83,6 +83,8 @@ obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o
obj-$(CONFIG_VIDEO_OG01A1B) += og01a1b.o
obj-$(CONFIG_VIDEO_OV01A10) += ov01a10.o
obj-$(CONFIG_VIDEO_OV02A10) += ov02a10.o
+obj-$(CONFIG_VIDEO_OV02C10) += ov02c10.o
+obj-$(CONFIG_VIDEO_OV02E10) += ov02e10.o
obj-$(CONFIG_VIDEO_OV08D10) += ov08d10.o
obj-$(CONFIG_VIDEO_OV08X40) += ov08x40.o
obj-$(CONFIG_VIDEO_OV13858) += ov13858.o
@@ -153,6 +155,8 @@ obj-$(CONFIG_VIDEO_TW9910) += tw9910.o
obj-$(CONFIG_VIDEO_UDA1342) += uda1342.o
obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
+obj-$(CONFIG_VIDEO_VD55G1) += vd55g1.o
+obj-$(CONFIG_VIDEO_VD56G3) += vd56g3.o
obj-$(CONFIG_VIDEO_VGXY61) += vgxy61.o
obj-$(CONFIG_VIDEO_VP27SMPX) += vp27smpx.o
obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index f95a99d85360..853c7806de92 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -1370,9 +1370,9 @@ static int adv7511_set_fmt(struct v4l2_subdev *sd,
case V4L2_COLORSPACE_BT2020:
c = HDMI_COLORIMETRY_EXTENDED;
if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
- ec = 5; /* Not yet available in hdmi.h */
+ ec = HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM;
else
- ec = 6; /* Not yet available in hdmi.h */
+ ec = HDMI_EXTENDED_COLORIMETRY_BT2020;
break;
default:
break;
diff --git a/drivers/media/i2c/ccs-pll.c b/drivers/media/i2c/ccs-pll.c
index 34ccda666524..4eb83636e102 100644
--- a/drivers/media/i2c/ccs-pll.c
+++ b/drivers/media/i2c/ccs-pll.c
@@ -123,10 +123,15 @@ static void print_pll(struct device *dev, const struct ccs_pll *pll)
pll->pixel_rate_pixel_array);
dev_dbg(dev, "pixel rate on CSI-2 bus:\t%u\n",
pll->pixel_rate_csi);
+}
- dev_dbg(dev, "flags%s%s%s%s%s%s%s%s%s\n",
+static void print_pll_flags(struct device *dev, struct ccs_pll *pll)
+{
+ dev_dbg(dev, "PLL flags%s%s%s%s%s%s%s%s%s%s%s\n",
+ pll->flags & PLL_FL(OP_PIX_CLOCK_PER_LANE) ? " op-pix-clock-per-lane" : "",
+ pll->flags & PLL_FL(EVEN_PLL_MULTIPLIER) ? " even-pll-multiplier" : "",
+ pll->flags & PLL_FL(NO_OP_CLOCKS) ? " no-op-clocks" : "",
pll->flags & PLL_FL(LANE_SPEED_MODEL) ? " lane-speed" : "",
- pll->flags & PLL_FL(LINK_DECOUPLED) ? " link-decoupled" : "",
pll->flags & PLL_FL(EXT_IP_PLL_DIVIDER) ?
" ext-ip-pll-divider" : "",
pll->flags & PLL_FL(FLEXIBLE_OP_PIX_CLK_DIV) ?
@@ -311,14 +316,24 @@ __ccs_pll_calculate_vt_tree(struct device *dev,
more_mul *= DIV_ROUND_UP(lim_fr->min_pll_multiplier, mul * more_mul);
dev_dbg(dev, "more_mul2: %u\n", more_mul);
- pll_fr->pll_multiplier = mul * more_mul;
+ if (pll->flags & CCS_PLL_FLAG_EVEN_PLL_MULTIPLIER &&
+ (mul & 1) && (more_mul & 1))
+ more_mul <<= 1;
- if (pll_fr->pll_multiplier * pll_fr->pll_ip_clk_freq_hz >
- lim_fr->max_pll_op_clk_freq_hz)
+ pll_fr->pll_multiplier = mul * more_mul;
+ if (pll_fr->pll_multiplier > lim_fr->max_pll_multiplier) {
+ dev_dbg(dev, "pll multiplier %u too high\n",
+ pll_fr->pll_multiplier);
return -EINVAL;
+ }
pll_fr->pll_op_clk_freq_hz =
pll_fr->pll_ip_clk_freq_hz * pll_fr->pll_multiplier;
+ if (pll_fr->pll_op_clk_freq_hz > lim_fr->max_pll_op_clk_freq_hz) {
+ dev_dbg(dev, "too high OP clock %u\n",
+ pll_fr->pll_op_clk_freq_hz);
+ return -EINVAL;
+ }
vt_div = div * more_mul;
@@ -397,6 +412,8 @@ static int ccs_pll_calculate_vt_tree(struct device *dev,
min_pre_pll_clk_div = max_t(u16, min_pre_pll_clk_div,
pll->ext_clk_freq_hz /
lim_fr->max_pll_ip_clk_freq_hz);
+ if (!(pll->flags & CCS_PLL_FLAG_EXT_IP_PLL_DIVIDER))
+ min_pre_pll_clk_div = clk_div_even(min_pre_pll_clk_div);
dev_dbg(dev, "vt min/max_pre_pll_clk_div: %u,%u\n",
min_pre_pll_clk_div, max_pre_pll_clk_div);
@@ -432,10 +449,11 @@ static int ccs_pll_calculate_vt_tree(struct device *dev,
return 0;
}
+ dev_dbg(dev, "unable to compute VT pre_pll divisor\n");
return -EINVAL;
}
-static void
+static int
ccs_pll_calculate_vt(struct device *dev, const struct ccs_pll_limits *lim,
const struct ccs_pll_branch_limits_bk *op_lim_bk,
struct ccs_pll *pll, struct ccs_pll_branch_fr *pll_fr,
@@ -558,6 +576,8 @@ ccs_pll_calculate_vt(struct device *dev, const struct ccs_pll_limits *lim,
if (best_pix_div < SHRT_MAX >> 1)
break;
}
+ if (best_pix_div == SHRT_MAX >> 1)
+ return -EINVAL;
pll->vt_bk.sys_clk_div = DIV_ROUND_UP(vt_div, best_pix_div);
pll->vt_bk.pix_clk_div = best_pix_div;
@@ -570,6 +590,8 @@ ccs_pll_calculate_vt(struct device *dev, const struct ccs_pll_limits *lim,
out_calc_pixel_rate:
pll->pixel_rate_pixel_array =
pll->vt_bk.pix_clk_freq_hz * pll->vt_lanes;
+
+ return 0;
}
/*
@@ -659,6 +681,10 @@ ccs_pll_calculate_op(struct device *dev, const struct ccs_pll_limits *lim,
if (!is_one_or_even(i))
i <<= 1;
+ if (pll->flags & CCS_PLL_FLAG_EVEN_PLL_MULTIPLIER &&
+ mul & 1 && i & 1)
+ i <<= 1;
+
dev_dbg(dev, "final more_mul: %u\n", i);
if (i > more_mul_max) {
dev_dbg(dev, "final more_mul is bad, max %u\n", more_mul_max);
@@ -716,6 +742,8 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
u32 i;
int rval = -EINVAL;
+ print_pll_flags(dev, pll);
+
if (!(pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL)) {
pll->op_lanes = 1;
pll->vt_lanes = 1;
@@ -792,7 +820,7 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
op_lim_fr->min_pre_pll_clk_div, op_lim_fr->max_pre_pll_clk_div);
max_op_pre_pll_clk_div =
min_t(u16, op_lim_fr->max_pre_pll_clk_div,
- clk_div_even(pll->ext_clk_freq_hz /
+ DIV_ROUND_UP(pll->ext_clk_freq_hz,
op_lim_fr->min_pll_ip_clk_freq_hz));
min_op_pre_pll_clk_div =
max_t(u16, op_lim_fr->min_pre_pll_clk_div,
@@ -815,6 +843,8 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
one_or_more(
DIV_ROUND_UP(op_lim_fr->max_pll_op_clk_freq_hz,
pll->ext_clk_freq_hz))));
+ if (!(pll->flags & CCS_PLL_FLAG_EXT_IP_PLL_DIVIDER))
+ min_op_pre_pll_clk_div = clk_div_even(min_op_pre_pll_clk_div);
dev_dbg(dev, "pll_op check: min / max op_pre_pll_clk_div: %u / %u\n",
min_op_pre_pll_clk_div, max_op_pre_pll_clk_div);
@@ -843,8 +873,10 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
if (pll->flags & CCS_PLL_FLAG_DUAL_PLL)
break;
- ccs_pll_calculate_vt(dev, lim, op_lim_bk, pll, op_pll_fr,
- op_pll_bk, cphy, phy_const);
+ rval = ccs_pll_calculate_vt(dev, lim, op_lim_bk, pll, op_pll_fr,
+ op_pll_bk, cphy, phy_const);
+ if (rval)
+ continue;
rval = check_bk_bounds(dev, lim, pll, PLL_VT);
if (rval)
@@ -857,8 +889,7 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
}
if (rval) {
- dev_dbg(dev, "unable to compute pre_pll divisor\n");
-
+ dev_dbg(dev, "unable to compute OP pre_pll divisor\n");
return rval;
}
diff --git a/drivers/media/i2c/ccs-pll.h b/drivers/media/i2c/ccs-pll.h
index 6eb1b1c68e1e..e22903931e72 100644
--- a/drivers/media/i2c/ccs-pll.h
+++ b/drivers/media/i2c/ccs-pll.h
@@ -18,19 +18,40 @@
#define CCS_PLL_BUS_TYPE_CSI2_DPHY 0x00
#define CCS_PLL_BUS_TYPE_CSI2_CPHY 0x01
-/* Old SMIA and implementation specific flags */
-/* op pix clock is for all lanes in total normally */
+/* Old SMIA and implementation specific flags. */
+/* OP PIX clock is for all lanes in total normally. */
#define CCS_PLL_FLAG_OP_PIX_CLOCK_PER_LANE BIT(0)
-#define CCS_PLL_FLAG_NO_OP_CLOCKS BIT(1)
+/* If set, the PLL multipliers are required to be even. */
+#define CCS_PLL_FLAG_EVEN_PLL_MULTIPLIER BIT(3)
+
/* CCS PLL flags */
+
+/* The sensor doesn't have OP clocks at all. */
+#define CCS_PLL_FLAG_NO_OP_CLOCKS BIT(1)
+/* System speed model if this flag is unset. */
#define CCS_PLL_FLAG_LANE_SPEED_MODEL BIT(2)
-#define CCS_PLL_FLAG_LINK_DECOUPLED BIT(3)
+/* If set, the pre-PLL divider may have odd values, too. */
#define CCS_PLL_FLAG_EXT_IP_PLL_DIVIDER BIT(4)
+/*
+ * If set, the OP PIX clock doesn't have to exactly match with data rate, it may
+ * be higher. See "OP Domain Formulas" in MIPI CCS 1.1 spec.
+ */
#define CCS_PLL_FLAG_FLEXIBLE_OP_PIX_CLK_DIV BIT(5)
+/* If set, the VT domain may run faster than the OP domain. */
#define CCS_PLL_FLAG_FIFO_DERATING BIT(6)
+/* If set, the VT domain may run slower than the OP domain. */
#define CCS_PLL_FLAG_FIFO_OVERRATING BIT(7)
+/* If set, the PLL tree has two PLLs instead of one. */
#define CCS_PLL_FLAG_DUAL_PLL BIT(8)
+/*
+ * If set, the OP SYS clock is a dual data rate clock, transferring two bits per
+ * cycle instead of one.
+ */
#define CCS_PLL_FLAG_OP_SYS_DDR BIT(9)
+/*
+ * If set, the OP PIX clock is a dual data rate clock, transferring two pixels
+ * per cycle instead of one.
+ */
#define CCS_PLL_FLAG_OP_PIX_DDR BIT(10)
/**
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 004d28c33287..487bcabb4a19 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -1354,8 +1354,10 @@ static int ccs_change_cci_addr(struct ccs_sensor *sensor)
client->addr = sensor->hwcfg.i2c_addr_dfl;
- rval = ccs_write(sensor, CCI_ADDRESS_CTRL,
- sensor->hwcfg.i2c_addr_alt << 1);
+ rval = read_poll_timeout(ccs_write, rval, !rval, CCS_RESET_DELAY_US,
+ CCS_RESET_TIMEOUT_US, false, sensor,
+ CCI_ADDRESS_CTRL,
+ sensor->hwcfg.i2c_addr_alt << 1);
if (rval)
return rval;
@@ -1575,44 +1577,38 @@ static int ccs_power_on(struct device *dev)
if (ccsdev->flags & CCS_DEVICE_FLAG_IS_SMIA)
sleep = SMIAPP_RESET_DELAY(sensor->hwcfg.ext_clk);
else
- sleep = 5000;
+ sleep = CCS_RESET_DELAY_US;
usleep_range(sleep, sleep);
}
/*
- * Failures to respond to the address change command have been noticed.
- * Those failures seem to be caused by the sensor requiring a longer
- * boot time than advertised. An additional 10ms delay seems to work
- * around the issue, but the SMIA++ I2C write retry hack makes the delay
- * unnecessary. The failures need to be investigated to find a proper
- * fix, and a delay will likely need to be added here if the I2C write
- * retry hack is reverted before the root cause of the boot time issue
- * is found.
+ * Some devices take longer than the spec-defined time to respond
+ * after reset. Try until some time has passed before flagging it
+ * an error.
*/
-
if (!sensor->reset && !sensor->xshutdown) {
- u8 retry = 100;
u32 reset;
- rval = ccs_write(sensor, SOFTWARE_RESET, CCS_SOFTWARE_RESET_ON);
+ rval = read_poll_timeout(ccs_write, rval, !rval,
+ CCS_RESET_DELAY_US,
+ CCS_RESET_TIMEOUT_US,
+ false, sensor, SOFTWARE_RESET,
+ CCS_SOFTWARE_RESET_ON);
if (rval < 0) {
dev_err(dev, "software reset failed\n");
goto out_cci_addr_fail;
}
- do {
- rval = ccs_read(sensor, SOFTWARE_RESET, &reset);
- reset = !rval && reset == CCS_SOFTWARE_RESET_OFF;
- if (reset)
- break;
-
- usleep_range(1000, 2000);
- } while (--retry);
-
- if (!reset) {
- dev_err(dev, "software reset failed\n");
- rval = -EIO;
+ rval = read_poll_timeout(ccs_read, rval,
+ !rval &&
+ reset == CCS_SOFTWARE_RESET_OFF,
+ CCS_RESET_DELAY_US,
+ CCS_RESET_TIMEOUT_US, false, sensor,
+ SOFTWARE_RESET, &reset);
+ if (rval < 0) {
+ dev_err_probe(dev, rval,
+ "failed to respond after reset\n");
goto out_cci_addr_fail;
}
}
@@ -2857,10 +2853,6 @@ static int ccs_identify_module(struct ccs_sensor *sensor)
break;
}
- if (i >= ARRAY_SIZE(ccs_module_idents))
- dev_warn(&client->dev,
- "no quirks for this module; let's hope it's fully compliant\n");
-
dev_dbg(&client->dev, "the sensor is called %s\n", minfo->name);
return 0;
@@ -3131,8 +3123,6 @@ static int ccs_get_hwconfig(struct ccs_sensor *sensor, struct device *dev)
rval = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
&hwcfg->ext_clk);
- if (rval)
- dev_info(dev, "can't get clock-frequency\n");
dev_dbg(dev, "clk %u, mode %u\n", hwcfg->ext_clk,
hwcfg->csi_signalling_mode);
@@ -3451,7 +3441,6 @@ static int ccs_probe(struct i2c_client *client)
CCS_LIM(sensor, NUM_OF_VT_LANES) + 1;
sensor->pll.op_lanes =
CCS_LIM(sensor, NUM_OF_OP_LANES) + 1;
- sensor->pll.flags |= CCS_PLL_FLAG_LINK_DECOUPLED;
} else {
sensor->pll.vt_lanes = sensor->pll.csi2.lanes;
sensor->pll.op_lanes = sensor->pll.csi2.lanes;
diff --git a/drivers/media/i2c/ccs/ccs-quirk.c b/drivers/media/i2c/ccs/ccs-quirk.c
index e3d4c7a275bc..e48a4fa1f5dd 100644
--- a/drivers/media/i2c/ccs/ccs-quirk.c
+++ b/drivers/media/i2c/ccs/ccs-quirk.c
@@ -190,8 +190,7 @@ static int jt8ev1_post_streamoff(struct ccs_sensor *sensor)
static int jt8ev1_init(struct ccs_sensor *sensor)
{
- sensor->pll.flags |= CCS_PLL_FLAG_LANE_SPEED_MODEL |
- CCS_PLL_FLAG_LINK_DECOUPLED;
+ sensor->pll.flags |= CCS_PLL_FLAG_LANE_SPEED_MODEL;
sensor->pll.vt_lanes = 1;
sensor->pll.op_lanes = sensor->pll.csi2.lanes;
diff --git a/drivers/media/i2c/ccs/ccs-reg-access.c b/drivers/media/i2c/ccs/ccs-reg-access.c
index a696a0ec8ff5..fd36889ccc1d 100644
--- a/drivers/media/i2c/ccs/ccs-reg-access.c
+++ b/drivers/media/i2c/ccs/ccs-reg-access.c
@@ -210,7 +210,6 @@ int ccs_read_addr_noconv(struct ccs_sensor *sensor, u32 reg, u32 *val)
*/
int ccs_write_addr(struct ccs_sensor *sensor, u32 reg, u32 val)
{
- unsigned int retries = 10;
int rval;
rval = ccs_call_quirk(sensor, reg_access, true, &reg, &val);
@@ -219,13 +218,7 @@ int ccs_write_addr(struct ccs_sensor *sensor, u32 reg, u32 val)
if (rval < 0)
return rval;
- rval = 0;
- do {
- if (cci_write(sensor->regmap, reg, val, &rval))
- fsleep(1000);
- } while (rval && --retries);
-
- return rval;
+ return cci_write(sensor->regmap, reg, val, NULL);
}
#define MAX_WRITE_LEN 32U
diff --git a/drivers/media/i2c/ccs/ccs.h b/drivers/media/i2c/ccs/ccs.h
index 096573845a10..0726c4687f0f 100644
--- a/drivers/media/i2c/ccs/ccs.h
+++ b/drivers/media/i2c/ccs/ccs.h
@@ -43,6 +43,8 @@
#define SMIAPP_RESET_DELAY(clk) \
(1000 + (SMIAPP_RESET_DELAY_CLOCKS * 1000 \
+ (clk) / 1000 - 1) / ((clk) / 1000))
+#define CCS_RESET_DELAY_US 5000
+#define CCS_RESET_TIMEOUT_US 1000000
#define CCS_COLOUR_COMPONENTS 4
diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
index fd2d2d5272bf..6d3f8617ef13 100644
--- a/drivers/media/i2c/ds90ub913.c
+++ b/drivers/media/i2c/ds90ub913.c
@@ -12,7 +12,6 @@
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/fwnode.h>
#include <linux/gpio/driver.h>
#include <linux/i2c-atr.h>
#include <linux/i2c.h>
@@ -119,44 +118,66 @@ static const struct ub913_format_info *ub913_find_format(u32 incode)
return NULL;
}
-static int ub913_read(const struct ub913_data *priv, u8 reg, u8 *val)
+static int ub913_read(const struct ub913_data *priv, u8 reg, u8 *val,
+ int *err)
{
unsigned int v;
int ret;
+ if (err && *err)
+ return *err;
+
ret = regmap_read(priv->regmap, reg, &v);
- if (ret < 0) {
+ if (ret) {
dev_err(&priv->client->dev,
"Cannot read register 0x%02x: %d!\n", reg, ret);
- return ret;
+ goto out;
}
*val = v;
- return 0;
+
+out:
+ if (ret && err)
+ *err = ret;
+
+ return ret;
}
-static int ub913_write(const struct ub913_data *priv, u8 reg, u8 val)
+static int ub913_write(const struct ub913_data *priv, u8 reg, u8 val,
+ int *err)
{
int ret;
+ if (err && *err)
+ return *err;
+
ret = regmap_write(priv->regmap, reg, val);
if (ret < 0)
dev_err(&priv->client->dev,
"Cannot write register 0x%02x: %d!\n", reg, ret);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
static int ub913_update_bits(const struct ub913_data *priv, u8 reg, u8 mask,
- u8 val)
+ u8 val, int *err)
{
int ret;
+ if (err && *err)
+ return *err;
+
ret = regmap_update_bits(priv->regmap, reg, mask, val);
if (ret < 0)
dev_err(&priv->client->dev,
"Cannot update register 0x%02x %d!\n", reg, ret);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
@@ -204,7 +225,7 @@ static int ub913_gpiochip_probe(struct ub913_data *priv)
int ret;
/* Initialize GPIOs 0 and 1 to local control, tri-state */
- ub913_write(priv, UB913_REG_GPIO_CFG(0), 0);
+ ub913_write(priv, UB913_REG_GPIO_CFG(0), 0, NULL);
gc->label = dev_name(dev);
gc->parent = dev;
@@ -450,10 +471,10 @@ static int ub913_set_fmt(struct v4l2_subdev *sd,
if (!fmt)
return -EINVAL;
- format->format.code = finfo->outcode;
-
*fmt = format->format;
+ fmt->code = finfo->outcode;
+
return 0;
}
@@ -482,25 +503,41 @@ static int ub913_log_status(struct v4l2_subdev *sd)
{
struct ub913_data *priv = sd_to_ub913(sd);
struct device *dev = &priv->client->dev;
- u8 v = 0, v1 = 0, v2 = 0;
+ u8 v, v1, v2;
+ int ret;
+
+ ret = ub913_read(priv, UB913_REG_MODE_SEL, &v, NULL);
+ if (ret)
+ return ret;
- ub913_read(priv, UB913_REG_MODE_SEL, &v);
dev_info(dev, "MODE_SEL %#02x\n", v);
- ub913_read(priv, UB913_REG_CRC_ERRORS_LSB, &v1);
- ub913_read(priv, UB913_REG_CRC_ERRORS_MSB, &v2);
+ ub913_read(priv, UB913_REG_CRC_ERRORS_LSB, &v1, &ret);
+ ub913_read(priv, UB913_REG_CRC_ERRORS_MSB, &v2, &ret);
+ if (ret)
+ return ret;
+
dev_info(dev, "CRC errors %u\n", v1 | (v2 << 8));
/* clear CRC errors */
- ub913_read(priv, UB913_REG_GENERAL_CFG, &v);
+ ub913_read(priv, UB913_REG_GENERAL_CFG, &v, &ret);
ub913_write(priv, UB913_REG_GENERAL_CFG,
- v | UB913_REG_GENERAL_CFG_CRC_ERR_RESET);
- ub913_write(priv, UB913_REG_GENERAL_CFG, v);
+ v | UB913_REG_GENERAL_CFG_CRC_ERR_RESET, &ret);
+ ub913_write(priv, UB913_REG_GENERAL_CFG, v, &ret);
+
+ if (ret)
+ return ret;
+
+ ret = ub913_read(priv, UB913_REG_GENERAL_STATUS, &v, NULL);
+ if (ret)
+ return ret;
- ub913_read(priv, UB913_REG_GENERAL_STATUS, &v);
dev_info(dev, "GENERAL_STATUS %#02x\n", v);
- ub913_read(priv, UB913_REG_PLL_OVR, &v);
+ ret = ub913_read(priv, UB913_REG_PLL_OVR, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "PLL_OVR %#02x\n", v);
return 0;
@@ -656,11 +693,11 @@ static int ub913_i2c_master_init(struct ub913_data *priv)
scl_high = div64_u64((u64)scl_high * ref, 1000000000);
scl_low = div64_u64((u64)scl_low * ref, 1000000000);
- ret = ub913_write(priv, UB913_REG_SCL_HIGH_TIME, scl_high);
+ ret = ub913_write(priv, UB913_REG_SCL_HIGH_TIME, scl_high, NULL);
if (ret)
return ret;
- ret = ub913_write(priv, UB913_REG_SCL_LOW_TIME, scl_low);
+ ret = ub913_write(priv, UB913_REG_SCL_LOW_TIME, scl_low, NULL);
if (ret)
return ret;
@@ -670,6 +707,7 @@ static int ub913_i2c_master_init(struct ub913_data *priv)
static int ub913_add_i2c_adapter(struct ub913_data *priv)
{
struct device *dev = &priv->client->dev;
+ struct i2c_atr_adap_desc desc = { };
struct fwnode_handle *i2c_handle;
int ret;
@@ -677,8 +715,12 @@ static int ub913_add_i2c_adapter(struct ub913_data *priv)
if (!i2c_handle)
return 0;
- ret = i2c_atr_add_adapter(priv->plat_data->atr, priv->plat_data->port,
- dev, i2c_handle);
+ desc.chan_id = priv->plat_data->port;
+ desc.parent = dev;
+ desc.bus_handle = i2c_handle;
+ desc.num_aliases = 0;
+
+ ret = i2c_atr_add_adapter(priv->plat_data->atr, &desc);
fwnode_handle_put(i2c_handle);
@@ -729,7 +771,7 @@ static int ub913_hw_init(struct ub913_data *priv)
int ret;
u8 v;
- ret = ub913_read(priv, UB913_REG_MODE_SEL, &v);
+ ret = ub913_read(priv, UB913_REG_MODE_SEL, &v, NULL);
if (ret)
return ret;
@@ -750,7 +792,7 @@ static int ub913_hw_init(struct ub913_data *priv)
ret = ub913_update_bits(priv, UB913_REG_GENERAL_CFG,
UB913_REG_GENERAL_CFG_PCLK_RISING,
FIELD_PREP(UB913_REG_GENERAL_CFG_PCLK_RISING,
- priv->pclk_polarity_rising));
+ priv->pclk_polarity_rising), NULL);
if (ret)
return ret;
diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
index 46569381b332..59bd92388845 100644
--- a/drivers/media/i2c/ds90ub953.c
+++ b/drivers/media/i2c/ds90ub953.c
@@ -11,7 +11,6 @@
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/fwnode.h>
#include <linux/gpio/driver.h>
#include <linux/i2c-atr.h>
#include <linux/i2c.h>
@@ -28,6 +27,8 @@
#include <media/v4l2-mediabus.h>
#include <media/v4l2-subdev.h>
+#include "ds90ub953.h"
+
#define UB953_PAD_SINK 0
#define UB953_PAD_SOURCE 1
@@ -35,89 +36,6 @@
#define UB953_DEFAULT_CLKOUT_RATE 25000000UL
-#define UB953_REG_RESET_CTL 0x01
-#define UB953_REG_RESET_CTL_DIGITAL_RESET_1 BIT(1)
-#define UB953_REG_RESET_CTL_DIGITAL_RESET_0 BIT(0)
-
-#define UB953_REG_GENERAL_CFG 0x02
-#define UB953_REG_GENERAL_CFG_CONT_CLK BIT(6)
-#define UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT 4
-#define UB953_REG_GENERAL_CFG_CSI_LANE_SEL_MASK GENMASK(5, 4)
-#define UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE BIT(1)
-#define UB953_REG_GENERAL_CFG_I2C_STRAP_MODE BIT(0)
-
-#define UB953_REG_MODE_SEL 0x03
-#define UB953_REG_MODE_SEL_MODE_DONE BIT(3)
-#define UB953_REG_MODE_SEL_MODE_OVERRIDE BIT(4)
-#define UB953_REG_MODE_SEL_MODE_MASK GENMASK(2, 0)
-
-#define UB953_REG_CLKOUT_CTRL0 0x06
-#define UB953_REG_CLKOUT_CTRL1 0x07
-
-#define UB953_REG_SCL_HIGH_TIME 0x0b
-#define UB953_REG_SCL_LOW_TIME 0x0c
-
-#define UB953_REG_LOCAL_GPIO_DATA 0x0d
-#define UB953_REG_LOCAL_GPIO_DATA_GPIO_RMTEN(n) BIT(4 + (n))
-#define UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(n) BIT(0 + (n))
-
-#define UB953_REG_GPIO_INPUT_CTRL 0x0e
-#define UB953_REG_GPIO_INPUT_CTRL_OUT_EN(n) BIT(4 + (n))
-#define UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(n) BIT(0 + (n))
-
-#define UB953_REG_BC_CTRL 0x49
-#define UB953_REG_BC_CTRL_CRC_ERR_CLR BIT(3)
-
-#define UB953_REG_REV_MASK_ID 0x50
-#define UB953_REG_GENERAL_STATUS 0x52
-
-#define UB953_REG_GPIO_PIN_STS 0x53
-#define UB953_REG_GPIO_PIN_STS_GPIO_STS(n) BIT(0 + (n))
-
-#define UB953_REG_BIST_ERR_CNT 0x54
-#define UB953_REG_CRC_ERR_CNT1 0x55
-#define UB953_REG_CRC_ERR_CNT2 0x56
-
-#define UB953_REG_CSI_ERR_CNT 0x5c
-#define UB953_REG_CSI_ERR_STATUS 0x5d
-#define UB953_REG_CSI_ERR_DLANE01 0x5e
-#define UB953_REG_CSI_ERR_DLANE23 0x5f
-#define UB953_REG_CSI_ERR_CLK_LANE 0x60
-#define UB953_REG_CSI_PKT_HDR_VC_ID 0x61
-#define UB953_REG_PKT_HDR_WC_LSB 0x62
-#define UB953_REG_PKT_HDR_WC_MSB 0x63
-#define UB953_REG_CSI_ECC 0x64
-
-#define UB953_REG_IND_ACC_CTL 0xb0
-#define UB953_REG_IND_ACC_ADDR 0xb1
-#define UB953_REG_IND_ACC_DATA 0xb2
-
-#define UB953_REG_FPD3_RX_ID(n) (0xf0 + (n))
-#define UB953_REG_FPD3_RX_ID_LEN 6
-
-/* Indirect register blocks */
-#define UB953_IND_TARGET_PAT_GEN 0x00
-#define UB953_IND_TARGET_FPD3_TX 0x01
-#define UB953_IND_TARGET_DIE_ID 0x02
-
-#define UB953_IND_PGEN_CTL 0x01
-#define UB953_IND_PGEN_CTL_PGEN_ENABLE BIT(0)
-#define UB953_IND_PGEN_CFG 0x02
-#define UB953_IND_PGEN_CSI_DI 0x03
-#define UB953_IND_PGEN_LINE_SIZE1 0x04
-#define UB953_IND_PGEN_LINE_SIZE0 0x05
-#define UB953_IND_PGEN_BAR_SIZE1 0x06
-#define UB953_IND_PGEN_BAR_SIZE0 0x07
-#define UB953_IND_PGEN_ACT_LPF1 0x08
-#define UB953_IND_PGEN_ACT_LPF0 0x09
-#define UB953_IND_PGEN_TOT_LPF1 0x0a
-#define UB953_IND_PGEN_TOT_LPF0 0x0b
-#define UB953_IND_PGEN_LINE_PD1 0x0c
-#define UB953_IND_PGEN_LINE_PD0 0x0d
-#define UB953_IND_PGEN_VBP 0x0e
-#define UB953_IND_PGEN_VFP 0x0f
-#define UB953_IND_PGEN_COLOR(n) (0x10 + (n)) /* n <= 15 */
-
/* Note: Only sync mode supported for now */
enum ub953_mode {
/* FPD-Link III CSI-2 synchronous mode */
@@ -185,11 +103,14 @@ static inline struct ub953_data *sd_to_ub953(struct v4l2_subdev *sd)
* HW Access
*/
-static int ub953_read(struct ub953_data *priv, u8 reg, u8 *val)
+static int ub953_read(struct ub953_data *priv, u8 reg, u8 *val, int *err)
{
unsigned int v;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = regmap_read(priv->regmap, reg, &v);
@@ -204,13 +125,19 @@ static int ub953_read(struct ub953_data *priv, u8 reg, u8 *val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
-static int ub953_write(struct ub953_data *priv, u8 reg, u8 val)
+static int ub953_write(struct ub953_data *priv, u8 reg, u8 val, int *err)
{
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = regmap_write(priv->regmap, reg, val);
@@ -220,6 +147,9 @@ static int ub953_write(struct ub953_data *priv, u8 reg, u8 val)
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
@@ -244,11 +174,15 @@ static int ub953_select_ind_reg_block(struct ub953_data *priv, u8 block)
}
__maybe_unused
-static int ub953_read_ind(struct ub953_data *priv, u8 block, u8 reg, u8 *val)
+static int ub953_read_ind(struct ub953_data *priv, u8 block, u8 reg, u8 *val,
+ int *err)
{
unsigned int v;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub953_select_ind_reg_block(priv, block);
@@ -258,7 +192,7 @@ static int ub953_read_ind(struct ub953_data *priv, u8 block, u8 reg, u8 *val)
ret = regmap_write(priv->regmap, UB953_REG_IND_ACC_ADDR, reg);
if (ret) {
dev_err(&priv->client->dev,
- "Write to IND_ACC_ADDR failed when reading %u:%x02x: %d\n",
+ "Write to IND_ACC_ADDR failed when reading %u:0x%02x: %d\n",
block, reg, ret);
goto out_unlock;
}
@@ -266,7 +200,7 @@ static int ub953_read_ind(struct ub953_data *priv, u8 block, u8 reg, u8 *val)
ret = regmap_read(priv->regmap, UB953_REG_IND_ACC_DATA, &v);
if (ret) {
dev_err(&priv->client->dev,
- "Write to IND_ACC_DATA failed when reading %u:%x02x: %d\n",
+ "Write to IND_ACC_DATA failed when reading %u:0x%02x: %d\n",
block, reg, ret);
goto out_unlock;
}
@@ -276,14 +210,21 @@ static int ub953_read_ind(struct ub953_data *priv, u8 block, u8 reg, u8 *val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
__maybe_unused
-static int ub953_write_ind(struct ub953_data *priv, u8 block, u8 reg, u8 val)
+static int ub953_write_ind(struct ub953_data *priv, u8 block, u8 reg, u8 val,
+ int *err)
{
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub953_select_ind_reg_block(priv, block);
@@ -293,7 +234,7 @@ static int ub953_write_ind(struct ub953_data *priv, u8 block, u8 reg, u8 val)
ret = regmap_write(priv->regmap, UB953_REG_IND_ACC_ADDR, reg);
if (ret) {
dev_err(&priv->client->dev,
- "Write to IND_ACC_ADDR failed when writing %u:%x02x: %d\n",
+ "Write to IND_ACC_ADDR failed when writing %u:0x%02x: %d\n",
block, reg, ret);
goto out_unlock;
}
@@ -301,13 +242,16 @@ static int ub953_write_ind(struct ub953_data *priv, u8 block, u8 reg, u8 val)
ret = regmap_write(priv->regmap, UB953_REG_IND_ACC_DATA, val);
if (ret) {
dev_err(&priv->client->dev,
- "Write to IND_ACC_DATA failed when writing %u:%x02x\n: %d\n",
+ "Write to IND_ACC_DATA failed when writing %u:0x%02x: %d\n",
block, reg, ret);
}
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
@@ -320,7 +264,7 @@ static int ub953_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
int ret;
u8 v;
- ret = ub953_read(priv, UB953_REG_GPIO_INPUT_CTRL, &v);
+ ret = ub953_read(priv, UB953_REG_GPIO_INPUT_CTRL, &v, NULL);
if (ret)
return ret;
@@ -366,7 +310,7 @@ static int ub953_gpio_get(struct gpio_chip *gc, unsigned int offset)
int ret;
u8 v;
- ret = ub953_read(priv, UB953_REG_GPIO_PIN_STS, &v);
+ ret = ub953_read(priv, UB953_REG_GPIO_PIN_STS, &v, NULL);
if (ret)
return ret;
@@ -400,11 +344,11 @@ static int ub953_gpiochip_probe(struct ub953_data *priv)
int ret;
/* Set all GPIOs to local input mode */
- ret = ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0);
+ ret = ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0, NULL);
if (ret)
return ret;
- ret = ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf);
+ ret = ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf, NULL);
if (ret)
return ret;
@@ -607,23 +551,33 @@ static int ub953_log_status(struct v4l2_subdev *sd)
{
struct ub953_data *priv = sd_to_ub953(sd);
struct device *dev = &priv->client->dev;
- u8 v = 0, v1 = 0, v2 = 0;
- unsigned int i;
char id[UB953_REG_FPD3_RX_ID_LEN];
- u8 gpio_local_data = 0;
- u8 gpio_input_ctrl = 0;
- u8 gpio_pin_sts = 0;
+ u8 gpio_local_data;
+ u8 gpio_input_ctrl;
+ u8 gpio_pin_sts;
+ unsigned int i;
+ u8 v, v1, v2;
+ int ret;
- for (i = 0; i < sizeof(id); i++)
- ub953_read(priv, UB953_REG_FPD3_RX_ID(i), &id[i]);
+ for (i = 0; i < sizeof(id); i++) {
+ ret = ub953_read(priv, UB953_REG_FPD3_RX_ID(i), &id[i], NULL);
+ if (ret)
+ return ret;
+ }
dev_info(dev, "ID '%.*s'\n", (int)sizeof(id), id);
- ub953_read(priv, UB953_REG_GENERAL_STATUS, &v);
+ ret = ub953_read(priv, UB953_REG_GENERAL_STATUS, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "GENERAL_STATUS %#02x\n", v);
- ub953_read(priv, UB953_REG_CRC_ERR_CNT1, &v1);
- ub953_read(priv, UB953_REG_CRC_ERR_CNT2, &v2);
+ ub953_read(priv, UB953_REG_CRC_ERR_CNT1, &v1, &ret);
+ ub953_read(priv, UB953_REG_CRC_ERR_CNT2, &v2, &ret);
+ if (ret)
+ return ret;
+
dev_info(dev, "CRC error count %u\n", v1 | (v2 << 8));
/* Clear CRC error counter */
@@ -632,34 +586,60 @@ static int ub953_log_status(struct v4l2_subdev *sd)
UB953_REG_BC_CTRL_CRC_ERR_CLR,
UB953_REG_BC_CTRL_CRC_ERR_CLR);
- ub953_read(priv, UB953_REG_CSI_ERR_CNT, &v);
+ ret = ub953_read(priv, UB953_REG_CSI_ERR_CNT, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "CSI error count %u\n", v);
- ub953_read(priv, UB953_REG_CSI_ERR_STATUS, &v);
+ ret = ub953_read(priv, UB953_REG_CSI_ERR_STATUS, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "CSI_ERR_STATUS %#02x\n", v);
- ub953_read(priv, UB953_REG_CSI_ERR_DLANE01, &v);
+ ret = ub953_read(priv, UB953_REG_CSI_ERR_DLANE01, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "CSI_ERR_DLANE01 %#02x\n", v);
- ub953_read(priv, UB953_REG_CSI_ERR_DLANE23, &v);
+ ret = ub953_read(priv, UB953_REG_CSI_ERR_DLANE23, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "CSI_ERR_DLANE23 %#02x\n", v);
- ub953_read(priv, UB953_REG_CSI_ERR_CLK_LANE, &v);
+ ret = ub953_read(priv, UB953_REG_CSI_ERR_CLK_LANE, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "CSI_ERR_CLK_LANE %#02x\n", v);
- ub953_read(priv, UB953_REG_CSI_PKT_HDR_VC_ID, &v);
+ ret = ub953_read(priv, UB953_REG_CSI_PKT_HDR_VC_ID, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "CSI packet header VC %u ID %u\n", v >> 6, v & 0x3f);
- ub953_read(priv, UB953_REG_PKT_HDR_WC_LSB, &v1);
- ub953_read(priv, UB953_REG_PKT_HDR_WC_MSB, &v2);
+ ub953_read(priv, UB953_REG_PKT_HDR_WC_LSB, &v1, &ret);
+ ub953_read(priv, UB953_REG_PKT_HDR_WC_MSB, &v2, &ret);
+ if (ret)
+ return ret;
+
dev_info(dev, "CSI packet header WC %u\n", (v2 << 8) | v1);
- ub953_read(priv, UB953_REG_CSI_ECC, &v);
+ ret = ub953_read(priv, UB953_REG_CSI_ECC, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "CSI ECC %#02x\n", v);
- ub953_read(priv, UB953_REG_LOCAL_GPIO_DATA, &gpio_local_data);
- ub953_read(priv, UB953_REG_GPIO_INPUT_CTRL, &gpio_input_ctrl);
- ub953_read(priv, UB953_REG_GPIO_PIN_STS, &gpio_pin_sts);
+ ub953_read(priv, UB953_REG_LOCAL_GPIO_DATA, &gpio_local_data, &ret);
+ ub953_read(priv, UB953_REG_GPIO_INPUT_CTRL, &gpio_input_ctrl, &ret);
+ ub953_read(priv, UB953_REG_GPIO_PIN_STS, &gpio_pin_sts, &ret);
+ if (ret)
+ return ret;
for (i = 0; i < UB953_NUM_GPIOS; i++) {
dev_info(dev,
@@ -843,11 +823,11 @@ static int ub953_i2c_master_init(struct ub953_data *priv)
scl_high = div64_u64((u64)scl_high * ref, 1000000000) - 5;
scl_low = div64_u64((u64)scl_low * ref, 1000000000) - 5;
- ret = ub953_write(priv, UB953_REG_SCL_HIGH_TIME, scl_high);
+ ret = ub953_write(priv, UB953_REG_SCL_HIGH_TIME, scl_high, NULL);
if (ret)
return ret;
- ret = ub953_write(priv, UB953_REG_SCL_LOW_TIME, scl_low);
+ ret = ub953_write(priv, UB953_REG_SCL_LOW_TIME, scl_low, NULL);
if (ret)
return ret;
@@ -986,11 +966,11 @@ static int ub953_write_clkout_regs(struct ub953_data *priv,
clkout_ctrl1 = clkout_data->n;
- ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0);
+ ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0, NULL);
if (ret)
return ret;
- ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1);
+ ret = ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1, NULL);
if (ret)
return ret;
@@ -1009,13 +989,13 @@ static unsigned long ub953_clkout_recalc_rate(struct clk_hw *hw,
u64 rate;
int ret;
- ret = ub953_read(priv, UB953_REG_CLKOUT_CTRL0, &ctrl0);
+ ret = ub953_read(priv, UB953_REG_CLKOUT_CTRL0, &ctrl0, NULL);
if (ret) {
dev_err(dev, "Failed to read CLKOUT_CTRL0: %d\n", ret);
return 0;
}
- ret = ub953_read(priv, UB953_REG_CLKOUT_CTRL1, &ctrl1);
+ ret = ub953_read(priv, UB953_REG_CLKOUT_CTRL1, &ctrl1, NULL);
if (ret) {
dev_err(dev, "Failed to read CLKOUT_CTRL1: %d\n", ret);
return 0;
@@ -1122,6 +1102,7 @@ static int ub953_register_clkout(struct ub953_data *priv)
static int ub953_add_i2c_adapter(struct ub953_data *priv)
{
struct device *dev = &priv->client->dev;
+ struct i2c_atr_adap_desc desc = { };
struct fwnode_handle *i2c_handle;
int ret;
@@ -1129,8 +1110,12 @@ static int ub953_add_i2c_adapter(struct ub953_data *priv)
if (!i2c_handle)
return 0;
- ret = i2c_atr_add_adapter(priv->plat_data->atr, priv->plat_data->port,
- dev, i2c_handle);
+ desc.chan_id = priv->plat_data->port;
+ desc.parent = dev;
+ desc.bus_handle = i2c_handle;
+ desc.num_aliases = 0;
+
+ ret = i2c_atr_add_adapter(priv->plat_data->atr, &desc);
fwnode_handle_put(i2c_handle);
@@ -1191,7 +1176,7 @@ static int ub953_hw_init(struct ub953_data *priv)
int ret;
u8 v;
- ret = ub953_read(priv, UB953_REG_MODE_SEL, &v);
+ ret = ub953_read(priv, UB953_REG_MODE_SEL, &v, NULL);
if (ret)
return ret;
@@ -1231,13 +1216,13 @@ static int ub953_hw_init(struct ub953_data *priv)
return dev_err_probe(dev, -EINVAL,
"clkin required for non-sync ext mode\n");
- ret = ub953_read(priv, UB953_REG_REV_MASK_ID, &v);
+ ret = ub953_read(priv, UB953_REG_REV_MASK_ID, &v, NULL);
if (ret)
return dev_err_probe(dev, ret, "Failed to read revision");
dev_info(dev, "Found %s rev/mask %#04x\n", priv->hw_data->model, v);
- ret = ub953_read(priv, UB953_REG_GENERAL_CFG, &v);
+ ret = ub953_read(priv, UB953_REG_GENERAL_CFG, &v, NULL);
if (ret)
return ret;
@@ -1254,11 +1239,16 @@ static int ub953_hw_init(struct ub953_data *priv)
UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT;
v |= UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE;
- ret = ub953_write(priv, UB953_REG_GENERAL_CFG, v);
+ ret = ub953_write(priv, UB953_REG_GENERAL_CFG, v, NULL);
if (ret)
return ret;
- return 0;
+ v = 1U << UB953_REG_I2C_CONTROL2_SDA_OUTPUT_SETUP_SHIFT;
+ v |= UB953_REG_I2C_CONTROL2_BUS_SPEEDUP;
+
+ ret = ub953_write(priv, UB953_REG_I2C_CONTROL2, v, NULL);
+
+ return ret;
}
static int ub953_subdev_init(struct ub953_data *priv)
diff --git a/drivers/media/i2c/ds90ub953.h b/drivers/media/i2c/ds90ub953.h
new file mode 100644
index 000000000000..97a6b3af326e
--- /dev/null
+++ b/drivers/media/i2c/ds90ub953.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __MEDIA_I2C_DS90UB953_H__
+#define __MEDIA_I2C_DS90UB953_H__
+
+#include <linux/types.h>
+
+#define UB953_REG_RESET_CTL 0x01
+#define UB953_REG_RESET_CTL_DIGITAL_RESET_1 BIT(1)
+#define UB953_REG_RESET_CTL_DIGITAL_RESET_0 BIT(0)
+
+#define UB953_REG_GENERAL_CFG 0x02
+#define UB953_REG_GENERAL_CFG_CONT_CLK BIT(6)
+#define UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT 4
+#define UB953_REG_GENERAL_CFG_CSI_LANE_SEL_MASK GENMASK(5, 4)
+#define UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE BIT(1)
+#define UB953_REG_GENERAL_CFG_I2C_STRAP_MODE BIT(0)
+
+#define UB953_REG_MODE_SEL 0x03
+#define UB953_REG_MODE_SEL_MODE_DONE BIT(3)
+#define UB953_REG_MODE_SEL_MODE_OVERRIDE BIT(4)
+#define UB953_REG_MODE_SEL_MODE_MASK GENMASK(2, 0)
+
+#define UB953_REG_CLKOUT_CTRL0 0x06
+#define UB953_REG_CLKOUT_CTRL1 0x07
+
+#define UB953_REG_I2C_CONTROL2 0x0a
+#define UB953_REG_I2C_CONTROL2_SDA_OUTPUT_SETUP_SHIFT 4
+#define UB953_REG_I2C_CONTROL2_BUS_SPEEDUP BIT(1)
+
+#define UB953_REG_SCL_HIGH_TIME 0x0b
+#define UB953_REG_SCL_LOW_TIME 0x0c
+
+#define UB953_REG_LOCAL_GPIO_DATA 0x0d
+#define UB953_REG_LOCAL_GPIO_DATA_GPIO_RMTEN(n) BIT(4 + (n))
+#define UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(n) BIT(0 + (n))
+
+#define UB953_REG_GPIO_INPUT_CTRL 0x0e
+#define UB953_REG_GPIO_INPUT_CTRL_OUT_EN(n) BIT(4 + (n))
+#define UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(n) BIT(0 + (n))
+
+#define UB953_REG_BC_CTRL 0x49
+#define UB953_REG_BC_CTRL_CRC_ERR_CLR BIT(3)
+
+#define UB953_REG_REV_MASK_ID 0x50
+#define UB953_REG_GENERAL_STATUS 0x52
+
+#define UB953_REG_GPIO_PIN_STS 0x53
+#define UB953_REG_GPIO_PIN_STS_GPIO_STS(n) BIT(0 + (n))
+
+#define UB953_REG_BIST_ERR_CNT 0x54
+#define UB953_REG_CRC_ERR_CNT1 0x55
+#define UB953_REG_CRC_ERR_CNT2 0x56
+
+#define UB953_REG_CSI_ERR_CNT 0x5c
+#define UB953_REG_CSI_ERR_STATUS 0x5d
+#define UB953_REG_CSI_ERR_DLANE01 0x5e
+#define UB953_REG_CSI_ERR_DLANE23 0x5f
+#define UB953_REG_CSI_ERR_CLK_LANE 0x60
+#define UB953_REG_CSI_PKT_HDR_VC_ID 0x61
+#define UB953_REG_PKT_HDR_WC_LSB 0x62
+#define UB953_REG_PKT_HDR_WC_MSB 0x63
+#define UB953_REG_CSI_ECC 0x64
+
+#define UB953_REG_IND_ACC_CTL 0xb0
+#define UB953_REG_IND_ACC_ADDR 0xb1
+#define UB953_REG_IND_ACC_DATA 0xb2
+
+#define UB953_REG_FPD3_RX_ID(n) (0xf0 + (n))
+#define UB953_REG_FPD3_RX_ID_LEN 6
+
+/* Indirect register blocks */
+#define UB953_IND_TARGET_PAT_GEN 0x00
+#define UB953_IND_TARGET_ANALOG 0x01
+#define UB953_IND_TARGET_DIE_ID 0x02
+
+#define UB953_IND_PGEN_CTL 0x01
+#define UB953_IND_PGEN_CTL_PGEN_ENABLE BIT(0)
+#define UB953_IND_PGEN_CFG 0x02
+#define UB953_IND_PGEN_CSI_DI 0x03
+#define UB953_IND_PGEN_LINE_SIZE1 0x04
+#define UB953_IND_PGEN_LINE_SIZE0 0x05
+#define UB953_IND_PGEN_BAR_SIZE1 0x06
+#define UB953_IND_PGEN_BAR_SIZE0 0x07
+#define UB953_IND_PGEN_ACT_LPF1 0x08
+#define UB953_IND_PGEN_ACT_LPF0 0x09
+#define UB953_IND_PGEN_TOT_LPF1 0x0a
+#define UB953_IND_PGEN_TOT_LPF0 0x0b
+#define UB953_IND_PGEN_LINE_PD1 0x0c
+#define UB953_IND_PGEN_LINE_PD0 0x0d
+#define UB953_IND_PGEN_VBP 0x0e
+#define UB953_IND_PGEN_VFP 0x0f
+#define UB953_IND_PGEN_COLOR(n) (0x10 + (n)) /* n <= 15 */
+
+#define UB953_IND_ANA_TEMP_DYNAMIC_CFG 0x4b
+#define UB953_IND_ANA_TEMP_DYNAMIC_CFG_OV BIT(5)
+#define UB953_IND_ANA_TEMP_STATIC_CFG 0x4c
+#define UB953_IND_ANA_TEMP_STATIC_CFG_MASK GENMASK(6, 4)
+
+/* UB971 Registers */
+
+#define UB971_ENH_BC_CHK 0x4b
+
+#endif /* __MEDIA_I2C_DS90UB953_H__ */
diff --git a/drivers/media/i2c/ds90ub960.c b/drivers/media/i2c/ds90ub960.c
index 5dde8452739b..082fc62b0f5b 100644
--- a/drivers/media/i2c/ds90ub960.c
+++ b/drivers/media/i2c/ds90ub960.c
@@ -27,6 +27,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/fwnode.h>
@@ -52,6 +53,8 @@
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
+#include "ds90ub953.h"
+
#define MHZ(v) ((u32)((v) * HZ_PER_MHZ))
/*
@@ -243,13 +246,17 @@
#define UB960_RR_BIST_ERR_COUNT 0x57
#define UB960_RR_BCC_CONFIG 0x58
+#define UB960_RR_BCC_CONFIG_BC_ALWAYS_ON BIT(4)
+#define UB960_RR_BCC_CONFIG_AUTO_ACK_ALL BIT(5)
#define UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH BIT(6)
#define UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK GENMASK(2, 0)
#define UB960_RR_DATAPATH_CTL1 0x59
#define UB960_RR_DATAPATH_CTL2 0x5a
#define UB960_RR_SER_ID 0x5b
+#define UB960_RR_SER_ID_FREEZE_DEVICE_ID BIT(0)
#define UB960_RR_SER_ALIAS_ID 0x5c
+#define UB960_RR_SER_ALIAS_ID_AUTO_ACK BIT(0)
/* For these two register sets: n < UB960_MAX_PORT_ALIASES */
#define UB960_RR_SLAVE_ID(n) (0x5d + (n))
@@ -307,8 +314,6 @@
#define UB960_XR_REFCLK_FREQ 0xa5 /* UB960 */
-#define UB960_RR_VC_ID_MAP(x) (0xa0 + (x)) /* UB9702 */
-
#define UB960_SR_IND_ACC_CTL 0xb0
#define UB960_SR_IND_ACC_CTL_IA_AUTO_INC BIT(1)
@@ -321,9 +326,6 @@
#define UB960_SR_FV_MIN_TIME 0xbc
#define UB960_SR_GPIO_PD_CTL 0xbe
-#define UB960_SR_FPD_RATE_CFG 0xc2 /* UB9702 */
-#define UB960_SR_CSI_PLL_DIV 0xc9 /* UB9702 */
-
#define UB960_RR_PORT_DEBUG 0xd0
#define UB960_RR_AEQ_CTL2 0xd2
#define UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR BIT(2)
@@ -354,15 +356,12 @@
#define UB960_RR_SEN_INT_RISE_STS 0xde
#define UB960_RR_SEN_INT_FALL_STS 0xdf
-#define UB960_RR_CHANNEL_MODE 0xe4 /* UB9702 */
#define UB960_SR_FPD3_RX_ID(n) (0xf0 + (n))
#define UB960_SR_FPD3_RX_ID_LEN 6
#define UB960_SR_I2C_RX_ID(n) (0xf8 + (n))
-#define UB9702_SR_REFCLK_FREQ 0x3d
-
/* Indirect register blocks */
#define UB960_IND_TARGET_PAT_GEN 0x00
#define UB960_IND_TARGET_RX_ANA(n) (0x01 + (n))
@@ -397,6 +396,49 @@
#define UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY BIT(3)
#define UB960_IR_RX_ANA_STROBE_SET_DATA_DELAY_MASK GENMASK(2, 0)
+/* UB9702 Registers */
+
+#define UB9702_SR_CSI_EXCLUSIVE_FWD2 0x3c
+#define UB9702_SR_REFCLK_FREQ 0x3d
+#define UB9702_RR_RX_CTL_1 0x80
+#define UB9702_RR_RX_CTL_2 0x87
+#define UB9702_RR_VC_ID_MAP(x) (0xa0 + (x))
+#define UB9702_SR_FPD_RATE_CFG 0xc2
+#define UB9702_SR_CSI_PLL_DIV 0xc9
+#define UB9702_RR_RX_SM_SEL_2 0xd4
+#define UB9702_RR_CHANNEL_MODE 0xe4
+
+#define UB9702_IND_TARGET_SAR_ADC 0x0a
+
+#define UB9702_IR_RX_ANA_FPD_BC_CTL0 0x04
+#define UB9702_IR_RX_ANA_FPD_BC_CTL1 0x0d
+#define UB9702_IR_RX_ANA_FPD_BC_CTL2 0x1b
+#define UB9702_IR_RX_ANA_SYSTEM_INIT_REG0 0x21
+#define UB9702_IR_RX_ANA_AEQ_ALP_SEL6 0x27
+#define UB9702_IR_RX_ANA_AEQ_ALP_SEL7 0x28
+#define UB9702_IR_RX_ANA_AEQ_ALP_SEL10 0x2b
+#define UB9702_IR_RX_ANA_AEQ_ALP_SEL11 0x2c
+#define UB9702_IR_RX_ANA_EQ_ADAPT_CTRL 0x2e
+#define UB9702_IR_RX_ANA_AEQ_CFG_1 0x34
+#define UB9702_IR_RX_ANA_AEQ_CFG_2 0x4d
+#define UB9702_IR_RX_ANA_GAIN_CTRL_0 0x71
+#define UB9702_IR_RX_ANA_GAIN_CTRL_0 0x71
+#define UB9702_IR_RX_ANA_VGA_CTRL_SEL_1 0x72
+#define UB9702_IR_RX_ANA_VGA_CTRL_SEL_2 0x73
+#define UB9702_IR_RX_ANA_VGA_CTRL_SEL_3 0x74
+#define UB9702_IR_RX_ANA_VGA_CTRL_SEL_6 0x77
+#define UB9702_IR_RX_ANA_AEQ_CFG_3 0x79
+#define UB9702_IR_RX_ANA_AEQ_CFG_4 0x85
+#define UB9702_IR_RX_ANA_EQ_CTRL_SEL_15 0x87
+#define UB9702_IR_RX_ANA_EQ_CTRL_SEL_24 0x90
+#define UB9702_IR_RX_ANA_EQ_CTRL_SEL_38 0x9e
+#define UB9702_IR_RX_ANA_FPD3_CDR_CTRL_SEL_5 0xa5
+#define UB9702_IR_RX_ANA_FPD3_AEQ_CTRL_SEL_1 0xa8
+#define UB9702_IR_RX_ANA_EQ_OVERRIDE_CTRL 0xf0
+#define UB9702_IR_RX_ANA_VGA_CTRL_SEL_8 0xf1
+
+#define UB9702_IR_CSI_ANA_CSIPLL_REG_1 0x92
+
/* EQ related */
#define UB960_MIN_AEQ_STROBE_POS -7
@@ -450,7 +492,9 @@ struct ub960_rxport {
struct fwnode_handle *fwnode;
struct i2c_client *client;
unsigned short alias; /* I2C alias (lower 7 bits) */
+ short addr; /* Local I2C address (lower 7 bits) */
struct ds90ub9xx_platform_data pdata;
+ struct regmap *regmap;
} ser;
enum ub960_rxport_mode rx_mode;
@@ -478,7 +522,9 @@ struct ub960_rxport {
};
} eq;
- const struct i2c_client *aliased_clients[UB960_MAX_PORT_ALIASES];
+ /* lock for aliased_addrs and associated registers */
+ struct mutex aliased_addrs_lock;
+ u16 aliased_addrs[UB960_MAX_PORT_ALIASES];
};
struct ub960_asd {
@@ -614,16 +660,76 @@ static const struct ub960_format_info *ub960_find_format(u32 code)
return NULL;
}
+struct ub960_rxport_iter {
+ unsigned int nport;
+ struct ub960_rxport *rxport;
+};
+
+enum ub960_iter_flags {
+ UB960_ITER_ACTIVE_ONLY = BIT(0),
+ UB960_ITER_FPD4_ONLY = BIT(1),
+};
+
+static struct ub960_rxport_iter ub960_iter_rxport(struct ub960_data *priv,
+ struct ub960_rxport_iter it,
+ enum ub960_iter_flags flags)
+{
+ for (; it.nport < priv->hw_data->num_rxports; it.nport++) {
+ it.rxport = priv->rxports[it.nport];
+
+ if ((flags & UB960_ITER_ACTIVE_ONLY) && !it.rxport)
+ continue;
+
+ if ((flags & UB960_ITER_FPD4_ONLY) &&
+ it.rxport->cdr_mode != RXPORT_CDR_FPD4)
+ continue;
+
+ return it;
+ }
+
+ it.rxport = NULL;
+
+ return it;
+}
+
+#define for_each_rxport(priv, it) \
+ for (struct ub960_rxport_iter it = \
+ ub960_iter_rxport(priv, (struct ub960_rxport_iter){ 0 }, \
+ 0); \
+ it.nport < (priv)->hw_data->num_rxports; \
+ it.nport++, it = ub960_iter_rxport(priv, it, 0))
+
+#define for_each_active_rxport(priv, it) \
+ for (struct ub960_rxport_iter it = \
+ ub960_iter_rxport(priv, (struct ub960_rxport_iter){ 0 }, \
+ UB960_ITER_ACTIVE_ONLY); \
+ it.nport < (priv)->hw_data->num_rxports; \
+ it.nport++, it = ub960_iter_rxport(priv, it, \
+ UB960_ITER_ACTIVE_ONLY))
+
+#define for_each_active_rxport_fpd4(priv, it) \
+ for (struct ub960_rxport_iter it = \
+ ub960_iter_rxport(priv, (struct ub960_rxport_iter){ 0 }, \
+ UB960_ITER_ACTIVE_ONLY | \
+ UB960_ITER_FPD4_ONLY); \
+ it.nport < (priv)->hw_data->num_rxports; \
+ it.nport++, it = ub960_iter_rxport(priv, it, \
+ UB960_ITER_ACTIVE_ONLY | \
+ UB960_ITER_FPD4_ONLY))
+
/* -----------------------------------------------------------------------------
* Basic device access
*/
-static int ub960_read(struct ub960_data *priv, u8 reg, u8 *val)
+static int ub960_read(struct ub960_data *priv, u8 reg, u8 *val, int *err)
{
struct device *dev = &priv->client->dev;
unsigned int v;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = regmap_read(priv->regmap, reg, &v);
@@ -638,14 +744,20 @@ static int ub960_read(struct ub960_data *priv, u8 reg, u8 *val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
-static int ub960_write(struct ub960_data *priv, u8 reg, u8 val)
+static int ub960_write(struct ub960_data *priv, u8 reg, u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = regmap_write(priv->regmap, reg, val);
@@ -655,14 +767,21 @@ static int ub960_write(struct ub960_data *priv, u8 reg, u8 val)
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
-static int ub960_update_bits(struct ub960_data *priv, u8 reg, u8 mask, u8 val)
+static int ub960_update_bits(struct ub960_data *priv, u8 reg, u8 mask, u8 val,
+ int *err)
{
struct device *dev = &priv->client->dev;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = regmap_update_bits(priv->regmap, reg, mask, val);
@@ -672,15 +791,21 @@ static int ub960_update_bits(struct ub960_data *priv, u8 reg, u8 mask, u8 val)
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
-static int ub960_read16(struct ub960_data *priv, u8 reg, u16 *val)
+static int ub960_read16(struct ub960_data *priv, u8 reg, u16 *val, int *err)
{
struct device *dev = &priv->client->dev;
__be16 __v;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = regmap_bulk_read(priv->regmap, reg, &__v, sizeof(__v));
@@ -695,6 +820,9 @@ static int ub960_read16(struct ub960_data *priv, u8 reg, u16 *val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
@@ -721,12 +849,16 @@ static int ub960_rxport_select(struct ub960_data *priv, u8 nport)
return 0;
}
-static int ub960_rxport_read(struct ub960_data *priv, u8 nport, u8 reg, u8 *val)
+static int ub960_rxport_read(struct ub960_data *priv, u8 nport, u8 reg,
+ u8 *val, int *err)
{
struct device *dev = &priv->client->dev;
unsigned int v;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_rxport_select(priv, nport);
@@ -745,14 +877,21 @@ static int ub960_rxport_read(struct ub960_data *priv, u8 nport, u8 reg, u8 *val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
-static int ub960_rxport_write(struct ub960_data *priv, u8 nport, u8 reg, u8 val)
+static int ub960_rxport_write(struct ub960_data *priv, u8 nport, u8 reg,
+ u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_rxport_select(priv, nport);
@@ -767,15 +906,21 @@ static int ub960_rxport_write(struct ub960_data *priv, u8 nport, u8 reg, u8 val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
static int ub960_rxport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
- u8 mask, u8 val)
+ u8 mask, u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_rxport_select(priv, nport);
@@ -790,16 +935,22 @@ static int ub960_rxport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
static int ub960_rxport_read16(struct ub960_data *priv, u8 nport, u8 reg,
- u16 *val)
+ u16 *val, int *err)
{
struct device *dev = &priv->client->dev;
__be16 __v;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_rxport_select(priv, nport);
@@ -818,6 +969,9 @@ static int ub960_rxport_read16(struct ub960_data *priv, u8 nport, u8 reg,
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
@@ -844,12 +998,16 @@ static int ub960_txport_select(struct ub960_data *priv, u8 nport)
return 0;
}
-static int ub960_txport_read(struct ub960_data *priv, u8 nport, u8 reg, u8 *val)
+static int ub960_txport_read(struct ub960_data *priv, u8 nport, u8 reg,
+ u8 *val, int *err)
{
struct device *dev = &priv->client->dev;
unsigned int v;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_txport_select(priv, nport);
@@ -868,14 +1026,21 @@ static int ub960_txport_read(struct ub960_data *priv, u8 nport, u8 reg, u8 *val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
-static int ub960_txport_write(struct ub960_data *priv, u8 nport, u8 reg, u8 val)
+static int ub960_txport_write(struct ub960_data *priv, u8 nport, u8 reg,
+ u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_txport_select(priv, nport);
@@ -890,15 +1055,21 @@ static int ub960_txport_write(struct ub960_data *priv, u8 nport, u8 reg, u8 val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
static int ub960_txport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
- u8 mask, u8 val)
+ u8 mask, u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_txport_select(priv, nport);
@@ -913,6 +1084,9 @@ static int ub960_txport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
@@ -938,12 +1112,16 @@ static int ub960_select_ind_reg_block(struct ub960_data *priv, u8 block)
return 0;
}
-static int ub960_read_ind(struct ub960_data *priv, u8 block, u8 reg, u8 *val)
+static int ub960_read_ind(struct ub960_data *priv, u8 block, u8 reg, u8 *val,
+ int *err)
{
struct device *dev = &priv->client->dev;
unsigned int v;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_select_ind_reg_block(priv, block);
@@ -971,14 +1149,21 @@ static int ub960_read_ind(struct ub960_data *priv, u8 block, u8 reg, u8 *val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
-static int ub960_write_ind(struct ub960_data *priv, u8 block, u8 reg, u8 val)
+static int ub960_write_ind(struct ub960_data *priv, u8 block, u8 reg, u8 val,
+ int *err)
{
struct device *dev = &priv->client->dev;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_select_ind_reg_block(priv, block);
@@ -1004,15 +1189,21 @@ static int ub960_write_ind(struct ub960_data *priv, u8 block, u8 reg, u8 val)
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
return ret;
}
static int ub960_ind_update_bits(struct ub960_data *priv, u8 block, u8 reg,
- u8 mask, u8 val)
+ u8 mask, u8 val, int *err)
{
struct device *dev = &priv->client->dev;
int ret;
+ if (err && *err)
+ return *err;
+
mutex_lock(&priv->reg_lock);
ret = ub960_select_ind_reg_block(priv, block);
@@ -1039,6 +1230,36 @@ static int ub960_ind_update_bits(struct ub960_data *priv, u8 block, u8 reg,
out_unlock:
mutex_unlock(&priv->reg_lock);
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+
+static int ub960_reset(struct ub960_data *priv, bool reset_regs)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int v;
+ int ret;
+ u8 bit;
+
+ bit = reset_regs ? UB960_SR_RESET_DIGITAL_RESET1 :
+ UB960_SR_RESET_DIGITAL_RESET0;
+
+ ret = ub960_write(priv, UB960_SR_RESET, bit, NULL);
+ if (ret)
+ return ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = regmap_read_poll_timeout(priv->regmap, UB960_SR_RESET, v,
+ (v & bit) == 0, 2000, 100000);
+
+ mutex_unlock(&priv->reg_lock);
+
+ if (ret)
+ dev_err(dev, "reset failed: %d\n", ret);
+
return ret;
}
@@ -1046,67 +1267,82 @@ out_unlock:
* I2C-ATR (address translator)
*/
-static int ub960_atr_attach_client(struct i2c_atr *atr, u32 chan_id,
- const struct i2c_client *client, u16 alias)
+static int ub960_atr_attach_addr(struct i2c_atr *atr, u32 chan_id,
+ u16 addr, u16 alias)
{
struct ub960_data *priv = i2c_atr_get_driver_data(atr);
struct ub960_rxport *rxport = priv->rxports[chan_id];
struct device *dev = &priv->client->dev;
unsigned int reg_idx;
+ int ret = 0;
+
+ guard(mutex)(&rxport->aliased_addrs_lock);
- for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_clients); reg_idx++) {
- if (!rxport->aliased_clients[reg_idx])
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_addrs); reg_idx++) {
+ if (!rxport->aliased_addrs[reg_idx])
break;
}
- if (reg_idx == ARRAY_SIZE(rxport->aliased_clients)) {
+ if (reg_idx == ARRAY_SIZE(rxport->aliased_addrs)) {
dev_err(dev, "rx%u: alias pool exhausted\n", rxport->nport);
return -EADDRNOTAVAIL;
}
- rxport->aliased_clients[reg_idx] = client;
+ rxport->aliased_addrs[reg_idx] = addr;
ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ID(reg_idx),
- client->addr << 1);
+ addr << 1, &ret);
ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx),
- alias << 1);
+ alias << 1, &ret);
+
+ if (ret)
+ return ret;
dev_dbg(dev, "rx%u: client 0x%02x assigned alias 0x%02x at slot %u\n",
- rxport->nport, client->addr, alias, reg_idx);
+ rxport->nport, addr, alias, reg_idx);
return 0;
}
-static void ub960_atr_detach_client(struct i2c_atr *atr, u32 chan_id,
- const struct i2c_client *client)
+static void ub960_atr_detach_addr(struct i2c_atr *atr, u32 chan_id,
+ u16 addr)
{
struct ub960_data *priv = i2c_atr_get_driver_data(atr);
struct ub960_rxport *rxport = priv->rxports[chan_id];
struct device *dev = &priv->client->dev;
unsigned int reg_idx;
+ int ret;
- for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_clients); reg_idx++) {
- if (rxport->aliased_clients[reg_idx] == client)
+ guard(mutex)(&rxport->aliased_addrs_lock);
+
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_addrs); reg_idx++) {
+ if (rxport->aliased_addrs[reg_idx] == addr)
break;
}
- if (reg_idx == ARRAY_SIZE(rxport->aliased_clients)) {
+ if (reg_idx == ARRAY_SIZE(rxport->aliased_addrs)) {
dev_err(dev, "rx%u: client 0x%02x is not mapped!\n",
- rxport->nport, client->addr);
+ rxport->nport, addr);
return;
}
- rxport->aliased_clients[reg_idx] = NULL;
+ rxport->aliased_addrs[reg_idx] = 0;
- ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx), 0);
+ ret = ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx),
+ 0, NULL);
+ if (ret) {
+ dev_err(dev, "rx%u: unable to fully unmap client 0x%02x: %d\n",
+ rxport->nport, addr, ret);
+ return;
+ }
dev_dbg(dev, "rx%u: client 0x%02x released at slot %u\n", rxport->nport,
- client->addr, reg_idx);
+ addr, reg_idx);
}
static const struct i2c_atr_ops ub960_atr_ops = {
- .attach_client = ub960_atr_attach_client,
- .detach_client = ub960_atr_detach_client,
+ .attach_addr = ub960_atr_attach_addr,
+ .detach_addr = ub960_atr_detach_addr,
};
static int ub960_init_atr(struct ub960_data *priv)
@@ -1115,7 +1351,7 @@ static int ub960_init_atr(struct ub960_data *priv)
struct i2c_adapter *parent_adap = priv->client->adapter;
priv->atr = i2c_atr_new(parent_adap, dev, &ub960_atr_ops,
- priv->hw_data->num_rxports);
+ priv->hw_data->num_rxports, 0);
if (IS_ERR(priv->atr))
return PTR_ERR(priv->atr);
@@ -1193,21 +1429,24 @@ err_free_txport:
return ret;
}
-static void ub960_csi_handle_events(struct ub960_data *priv, u8 nport)
+static int ub960_csi_handle_events(struct ub960_data *priv, u8 nport)
{
struct device *dev = &priv->client->dev;
u8 csi_tx_isr;
int ret;
- ret = ub960_txport_read(priv, nport, UB960_TR_CSI_TX_ISR, &csi_tx_isr);
+ ret = ub960_txport_read(priv, nport, UB960_TR_CSI_TX_ISR, &csi_tx_isr,
+ NULL);
if (ret)
- return;
+ return ret;
if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_SYNC_ERROR)
dev_warn(dev, "TX%u: CSI_SYNC_ERROR\n", nport);
if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_PASS_ERROR)
dev_warn(dev, "TX%u: CSI_PASS_ERROR\n", nport);
+
+ return 0;
}
/* -----------------------------------------------------------------------------
@@ -1216,25 +1455,25 @@ static void ub960_csi_handle_events(struct ub960_data *priv, u8 nport)
static int ub960_rxport_enable_vpocs(struct ub960_data *priv)
{
- unsigned int nport;
+ unsigned int failed_nport;
int ret;
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
-
- if (!rxport || !rxport->vpoc)
+ for_each_active_rxport(priv, it) {
+ if (!it.rxport->vpoc)
continue;
- ret = regulator_enable(rxport->vpoc);
- if (ret)
+ ret = regulator_enable(it.rxport->vpoc);
+ if (ret) {
+ failed_nport = it.nport;
goto err_disable_vpocs;
+ }
}
return 0;
err_disable_vpocs:
- while (nport--) {
- struct ub960_rxport *rxport = priv->rxports[nport];
+ while (failed_nport--) {
+ struct ub960_rxport *rxport = priv->rxports[failed_nport];
if (!rxport || !rxport->vpoc)
continue;
@@ -1247,40 +1486,44 @@ err_disable_vpocs:
static void ub960_rxport_disable_vpocs(struct ub960_data *priv)
{
- unsigned int nport;
-
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
-
- if (!rxport || !rxport->vpoc)
+ for_each_active_rxport(priv, it) {
+ if (!it.rxport->vpoc)
continue;
- regulator_disable(rxport->vpoc);
+ regulator_disable(it.rxport->vpoc);
}
}
-static void ub960_rxport_clear_errors(struct ub960_data *priv,
- unsigned int nport)
+static int ub960_rxport_clear_errors(struct ub960_data *priv,
+ unsigned int nport)
{
+ int ret = 0;
u8 v;
- ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v);
- ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v);
- ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &v);
- ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &v);
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v, &ret);
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v, &ret);
+ ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &v, &ret);
+ ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &v, &ret);
- ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v);
- ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_LO, &v);
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v, &ret);
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_LO, &v, &ret);
- ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v);
+ ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v, &ret);
+
+ return ret;
}
-static void ub960_clear_rx_errors(struct ub960_data *priv)
+static int ub960_clear_rx_errors(struct ub960_data *priv)
{
- unsigned int nport;
+ int ret;
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++)
- ub960_rxport_clear_errors(priv, nport);
+ for_each_rxport(priv, it) {
+ ret = ub960_rxport_clear_errors(priv, it.nport);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int ub960_rxport_get_strobe_pos(struct ub960_data *priv,
@@ -1290,25 +1533,29 @@ static int ub960_rxport_get_strobe_pos(struct ub960_data *priv,
u8 clk_delay, data_delay;
int ret;
- ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
- UB960_IR_RX_ANA_STROBE_SET_CLK, &v);
+ ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB960_IR_RX_ANA_STROBE_SET_CLK, &v, NULL);
+ if (ret)
+ return ret;
clk_delay = (v & UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY) ?
0 : UB960_MANUAL_STROBE_EXTRA_DELAY;
- ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
- UB960_IR_RX_ANA_STROBE_SET_DATA, &v);
+ ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB960_IR_RX_ANA_STROBE_SET_DATA, &v, NULL);
+ if (ret)
+ return ret;
data_delay = (v & UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY) ?
0 : UB960_MANUAL_STROBE_EXTRA_DELAY;
- ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_0, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_0, &v, NULL);
if (ret)
return ret;
clk_delay += v & UB960_IR_RX_ANA_STROBE_SET_CLK_DELAY_MASK;
- ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_1, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_1, &v, NULL);
if (ret)
return ret;
@@ -1319,10 +1566,11 @@ static int ub960_rxport_get_strobe_pos(struct ub960_data *priv,
return 0;
}
-static void ub960_rxport_set_strobe_pos(struct ub960_data *priv,
- unsigned int nport, s8 strobe_pos)
+static int ub960_rxport_set_strobe_pos(struct ub960_data *priv,
+ unsigned int nport, s8 strobe_pos)
{
u8 clk_delay, data_delay;
+ int ret = 0;
clk_delay = UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY;
data_delay = UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY;
@@ -1337,22 +1585,25 @@ static void ub960_rxport_set_strobe_pos(struct ub960_data *priv,
data_delay = strobe_pos | UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY;
ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
- UB960_IR_RX_ANA_STROBE_SET_CLK, clk_delay);
+ UB960_IR_RX_ANA_STROBE_SET_CLK, clk_delay, &ret);
ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
- UB960_IR_RX_ANA_STROBE_SET_DATA, data_delay);
+ UB960_IR_RX_ANA_STROBE_SET_DATA, data_delay, &ret);
+
+ return ret;
}
-static void ub960_rxport_set_strobe_range(struct ub960_data *priv,
- s8 strobe_min, s8 strobe_max)
+static int ub960_rxport_set_strobe_range(struct ub960_data *priv, s8 strobe_min,
+ s8 strobe_max)
{
/* Convert the signed strobe pos to positive zero based value */
strobe_min -= UB960_MIN_AEQ_STROBE_POS;
strobe_max -= UB960_MIN_AEQ_STROBE_POS;
- ub960_write(priv, UB960_XR_SFILTER_CFG,
- ((u8)strobe_min << UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) |
- ((u8)strobe_max << UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT));
+ return ub960_write(priv, UB960_XR_SFILTER_CFG,
+ ((u8)strobe_min << UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) |
+ ((u8)strobe_max << UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT),
+ NULL);
}
static int ub960_rxport_get_eq_level(struct ub960_data *priv,
@@ -1361,7 +1612,7 @@ static int ub960_rxport_get_eq_level(struct ub960_data *priv,
int ret;
u8 v;
- ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_STATUS, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_STATUS, &v, NULL);
if (ret)
return ret;
@@ -1371,11 +1622,12 @@ static int ub960_rxport_get_eq_level(struct ub960_data *priv,
return 0;
}
-static void ub960_rxport_set_eq_level(struct ub960_data *priv,
- unsigned int nport, u8 eq_level)
+static int ub960_rxport_set_eq_level(struct ub960_data *priv,
+ unsigned int nport, u8 eq_level)
{
u8 eq_stage_1_select_value, eq_stage_2_select_value;
const unsigned int eq_stage_max = 7;
+ int ret;
u8 v;
if (eq_level <= eq_stage_max) {
@@ -1386,7 +1638,9 @@ static void ub960_rxport_set_eq_level(struct ub960_data *priv,
eq_stage_2_select_value = eq_level - eq_stage_max;
}
- ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v, NULL);
+ if (ret)
+ return ret;
v &= ~(UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_MASK |
UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_MASK);
@@ -1394,67 +1648,102 @@ static void ub960_rxport_set_eq_level(struct ub960_data *priv,
v |= eq_stage_2_select_value << UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_SHIFT;
v |= UB960_RR_AEQ_BYPASS_ENABLE;
- ub960_rxport_write(priv, nport, UB960_RR_AEQ_BYPASS, v);
+ ret = ub960_rxport_write(priv, nport, UB960_RR_AEQ_BYPASS, v, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
}
-static void ub960_rxport_set_eq_range(struct ub960_data *priv,
- unsigned int nport, u8 eq_min, u8 eq_max)
+static int ub960_rxport_set_eq_range(struct ub960_data *priv,
+ unsigned int nport, u8 eq_min, u8 eq_max)
{
+ int ret = 0;
+
ub960_rxport_write(priv, nport, UB960_RR_AEQ_MIN_MAX,
(eq_min << UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) |
- (eq_max << UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT));
+ (eq_max << UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT),
+ &ret);
/* Enable AEQ min setting */
ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_CTL2,
UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR,
- UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR);
+ UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR, &ret);
+
+ return ret;
}
-static void ub960_rxport_config_eq(struct ub960_data *priv, unsigned int nport)
+static int ub960_rxport_config_eq(struct ub960_data *priv, unsigned int nport)
{
struct ub960_rxport *rxport = priv->rxports[nport];
+ int ret;
/* We also set common settings here. Should be moved elsewhere. */
if (priv->strobe.manual) {
/* Disable AEQ_SFILTER_EN */
- ub960_update_bits(priv, UB960_XR_AEQ_CTL1,
- UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN, 0);
+ ret = ub960_update_bits(priv, UB960_XR_AEQ_CTL1,
+ UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN, 0,
+ NULL);
+ if (ret)
+ return ret;
} else {
/* Enable SFILTER and error control */
- ub960_write(priv, UB960_XR_AEQ_CTL1,
- UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_MASK |
- UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN);
+ ret = ub960_write(priv, UB960_XR_AEQ_CTL1,
+ UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_MASK |
+ UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN,
+ NULL);
+
+ if (ret)
+ return ret;
/* Set AEQ strobe range */
- ub960_rxport_set_strobe_range(priv, priv->strobe.min,
- priv->strobe.max);
+ ret = ub960_rxport_set_strobe_range(priv, priv->strobe.min,
+ priv->strobe.max);
+ if (ret)
+ return ret;
}
/* The rest are port specific */
if (priv->strobe.manual)
- ub960_rxport_set_strobe_pos(priv, nport, rxport->eq.strobe_pos);
+ ret = ub960_rxport_set_strobe_pos(priv, nport,
+ rxport->eq.strobe_pos);
else
- ub960_rxport_set_strobe_pos(priv, nport, 0);
+ ret = ub960_rxport_set_strobe_pos(priv, nport, 0);
+
+ if (ret)
+ return ret;
if (rxport->eq.manual_eq) {
- ub960_rxport_set_eq_level(priv, nport,
- rxport->eq.manual.eq_level);
+ ret = ub960_rxport_set_eq_level(priv, nport,
+ rxport->eq.manual.eq_level);
+ if (ret)
+ return ret;
/* Enable AEQ Bypass */
- ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
- UB960_RR_AEQ_BYPASS_ENABLE,
- UB960_RR_AEQ_BYPASS_ENABLE);
+ ret = ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
+ UB960_RR_AEQ_BYPASS_ENABLE,
+ UB960_RR_AEQ_BYPASS_ENABLE,
+ NULL);
+ if (ret)
+ return ret;
} else {
- ub960_rxport_set_eq_range(priv, nport,
- rxport->eq.aeq.eq_level_min,
- rxport->eq.aeq.eq_level_max);
+ ret = ub960_rxport_set_eq_range(priv, nport,
+ rxport->eq.aeq.eq_level_min,
+ rxport->eq.aeq.eq_level_max);
+ if (ret)
+ return ret;
/* Disable AEQ Bypass */
- ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
- UB960_RR_AEQ_BYPASS_ENABLE, 0);
+ ret = ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
+ UB960_RR_AEQ_BYPASS_ENABLE, 0,
+ NULL);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
static int ub960_rxport_link_ok(struct ub960_data *priv, unsigned int nport,
@@ -1469,7 +1758,7 @@ static int ub960_rxport_link_ok(struct ub960_data *priv, unsigned int nport,
bool errors;
ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1,
- &rx_port_sts1);
+ &rx_port_sts1, NULL);
if (ret)
return ret;
@@ -1479,25 +1768,27 @@ static int ub960_rxport_link_ok(struct ub960_data *priv, unsigned int nport,
}
ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2,
- &rx_port_sts2);
+ &rx_port_sts2, NULL);
if (ret)
return ret;
- ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &csi_rx_sts);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &csi_rx_sts,
+ NULL);
if (ret)
return ret;
ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER,
- &csi_err_cnt);
+ &csi_err_cnt, NULL);
if (ret)
return ret;
- ret = ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &bcc_sts);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &bcc_sts,
+ NULL);
if (ret)
return ret;
ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
- &parity_errors);
+ &parity_errors, NULL);
if (ret)
return ret;
@@ -1512,6 +1803,23 @@ static int ub960_rxport_link_ok(struct ub960_data *priv, unsigned int nport,
return 0;
}
+static int ub960_rxport_lockup_wa_ub9702(struct ub960_data *priv)
+{
+ int ret;
+
+ /* Toggle PI_MODE to avoid possible FPD RX lockup */
+
+ ret = ub960_update_bits(priv, UB9702_RR_CHANNEL_MODE, GENMASK(4, 3),
+ 2 << 3, NULL);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 5000);
+
+ return ub960_update_bits(priv, UB9702_RR_CHANNEL_MODE, GENMASK(4, 3),
+ 0, NULL);
+}
+
/*
* Wait for the RX ports to lock, have no errors and have stable strobe position
* and EQ level.
@@ -1542,6 +1850,7 @@ static int ub960_rxport_wait_locks(struct ub960_data *priv,
link_ok_mask = 0;
while (time_before(jiffies, timeout)) {
+ bool fpd4_wa = false;
missing = 0;
for_each_set_bit(nport, &port_mask,
@@ -1556,6 +1865,9 @@ static int ub960_rxport_wait_locks(struct ub960_data *priv,
if (ret)
return ret;
+ if (!ok && rxport->cdr_mode == RXPORT_CDR_FPD4)
+ fpd4_wa = true;
+
/*
* We want the link to be ok for two consecutive loops,
* as a link could get established just before our test
@@ -1575,6 +1887,12 @@ static int ub960_rxport_wait_locks(struct ub960_data *priv,
if (missing == 0)
break;
+ if (fpd4_wa) {
+ ret = ub960_rxport_lockup_wa_ub9702(priv);
+ if (ret)
+ return ret;
+ }
+
/*
* The sleep time of 10 ms was found by testing to give a lock
* with a few iterations. It can be decreased if on some setups
@@ -1600,7 +1918,11 @@ static int ub960_rxport_wait_locks(struct ub960_data *priv,
continue;
}
- ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH, &v);
+ ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH,
+ &v, NULL);
+
+ if (ret)
+ return ret;
if (priv->hw_data->is_ub9702) {
dev_dbg(dev, "\trx%u: locked, freq %llu Hz\n",
@@ -1676,13 +1998,188 @@ static unsigned long ub960_calc_bc_clk_rate_ub9702(struct ub960_data *priv,
}
}
+static int ub960_rxport_serializer_write(struct ub960_rxport *rxport, u8 reg,
+ u8 val, int *err)
+{
+ struct ub960_data *priv = rxport->priv;
+ struct device *dev = &priv->client->dev;
+ union i2c_smbus_data data;
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ data.byte = val;
+
+ ret = i2c_smbus_xfer(priv->client->adapter, rxport->ser.alias, 0,
+ I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE_DATA, &data);
+ if (ret)
+ dev_err(dev,
+ "rx%u: cannot write serializer register 0x%02x (%d)!\n",
+ rxport->nport, reg, ret);
+
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+
+static int ub960_rxport_serializer_read(struct ub960_rxport *rxport, u8 reg,
+ u8 *val, int *err)
+{
+ struct ub960_data *priv = rxport->priv;
+ struct device *dev = &priv->client->dev;
+ union i2c_smbus_data data = { 0 };
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ ret = i2c_smbus_xfer(priv->client->adapter, rxport->ser.alias,
+ priv->client->flags, I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &data);
+ if (ret)
+ dev_err(dev,
+ "rx%u: cannot read serializer register 0x%02x (%d)!\n",
+ rxport->nport, reg, ret);
+ else
+ *val = data.byte;
+
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+
+static int ub960_serializer_temp_ramp(struct ub960_rxport *rxport)
+{
+ struct ub960_data *priv = rxport->priv;
+ short temp_dynamic_offset[] = {-1, -1, 0, 0, 1, 1, 1, 3};
+ u8 temp_dynamic_cfg;
+ u8 nport = rxport->nport;
+ u8 ser_temp_code;
+ int ret = 0;
+
+ /* Configure temp ramp only on UB953 */
+ if (!fwnode_device_is_compatible(rxport->ser.fwnode, "ti,ds90ub953-q1"))
+ return 0;
+
+ /* Read current serializer die temperature */
+ ub960_rxport_read(priv, nport, UB960_RR_SENSOR_STS_2, &ser_temp_code,
+ &ret);
+
+ /* Enable I2C passthrough on back channel */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH, &ret);
+
+ if (ret)
+ return ret;
+
+ /* Select indirect page for analog regs on the serializer */
+ ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_CTL,
+ UB953_IND_TARGET_ANALOG << 2, &ret);
+
+ /* Set temperature ramp dynamic and static config */
+ ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_ADDR,
+ UB953_IND_ANA_TEMP_DYNAMIC_CFG, &ret);
+ ub960_rxport_serializer_read(rxport, UB953_REG_IND_ACC_DATA,
+ &temp_dynamic_cfg, &ret);
+
+ if (ret)
+ return ret;
+
+ temp_dynamic_cfg |= UB953_IND_ANA_TEMP_DYNAMIC_CFG_OV;
+ temp_dynamic_cfg += temp_dynamic_offset[ser_temp_code];
+
+ /* Update temp static config */
+ ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_ADDR,
+ UB953_IND_ANA_TEMP_STATIC_CFG, &ret);
+ ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_DATA,
+ UB953_IND_ANA_TEMP_STATIC_CFG_MASK, &ret);
+
+ /* Update temperature ramp dynamic config */
+ ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_ADDR,
+ UB953_IND_ANA_TEMP_DYNAMIC_CFG, &ret);
+
+ /* Enable I2C auto ack on BC before we set dynamic cfg and reset */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
+ UB960_RR_BCC_CONFIG_AUTO_ACK_ALL, &ret);
+
+ ub960_rxport_serializer_write(rxport, UB953_REG_IND_ACC_DATA,
+ temp_dynamic_cfg, &ret);
+
+ if (ret)
+ return ret;
+
+ /* Soft reset to apply PLL updates */
+ ub960_rxport_serializer_write(rxport, UB953_REG_RESET_CTL,
+ UB953_REG_RESET_CTL_DIGITAL_RESET_0,
+ &ret);
+ msleep(20);
+
+ /* Disable I2C passthrough and auto-ack on BC */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
+ UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
+ 0x0, &ret);
+
+ return ret;
+}
+
+static int ub960_rxport_bc_ser_config(struct ub960_rxport *rxport)
+{
+ struct ub960_data *priv = rxport->priv;
+ struct device *dev = &priv->client->dev;
+ u8 nport = rxport->nport;
+ int ret = 0;
+
+ /* Skip port if serializer's address is not known */
+ if (rxport->ser.addr < 0) {
+ dev_dbg(dev,
+ "rx%u: serializer address missing, skip configuration\n",
+ nport);
+ return 0;
+ }
+
+ /*
+ * Note: the code here probably only works for CSI-2 serializers in
+ * sync mode. To support other serializers the BC related configuration
+ * should be done before calling this function.
+ */
+
+ /* Enable I2C passthrough and auto-ack on BC */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
+ UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
+ UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
+ &ret);
+
+ if (ret)
+ return ret;
+
+ /* Disable BC alternate mode auto detect */
+ ub960_rxport_serializer_write(rxport, UB971_ENH_BC_CHK, 0x02, &ret);
+ /* Decrease link detect timer */
+ ub960_rxport_serializer_write(rxport, UB953_REG_BC_CTRL, 0x06, &ret);
+
+ /* Disable I2C passthrough and auto-ack on BC */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH |
+ UB960_RR_BCC_CONFIG_AUTO_ACK_ALL,
+ 0x0, &ret);
+
+ return ret;
+}
+
static int ub960_rxport_add_serializer(struct ub960_data *priv, u8 nport)
{
struct ub960_rxport *rxport = priv->rxports[nport];
struct device *dev = &priv->client->dev;
struct ds90ub9xx_platform_data *ser_pdata = &rxport->ser.pdata;
struct i2c_board_info ser_info = {
- .of_node = to_of_node(rxport->ser.fwnode),
.fwnode = rxport->ser.fwnode,
.platform_data = ser_pdata,
};
@@ -1726,30 +2223,27 @@ static void ub960_rxport_remove_serializer(struct ub960_data *priv, u8 nport)
/* Add serializer i2c devices for all initialized ports */
static int ub960_rxport_add_serializers(struct ub960_data *priv)
{
- unsigned int nport;
+ unsigned int failed_nport;
int ret;
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
-
- if (!rxport)
- continue;
-
- ret = ub960_rxport_add_serializer(priv, nport);
- if (ret)
+ for_each_active_rxport(priv, it) {
+ ret = ub960_rxport_add_serializer(priv, it.nport);
+ if (ret) {
+ failed_nport = it.nport;
goto err_remove_sers;
+ }
}
return 0;
err_remove_sers:
- while (nport--) {
- struct ub960_rxport *rxport = priv->rxports[nport];
+ while (failed_nport--) {
+ struct ub960_rxport *rxport = priv->rxports[failed_nport];
if (!rxport)
continue;
- ub960_rxport_remove_serializer(priv, nport);
+ ub960_rxport_remove_serializer(priv, failed_nport);
}
return ret;
@@ -1757,20 +2251,12 @@ err_remove_sers:
static void ub960_rxport_remove_serializers(struct ub960_data *priv)
{
- unsigned int nport;
-
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
-
- if (!rxport)
- continue;
-
- ub960_rxport_remove_serializer(priv, nport);
- }
+ for_each_active_rxport(priv, it)
+ ub960_rxport_remove_serializer(priv, it.nport);
}
-static void ub960_init_tx_port(struct ub960_data *priv,
- struct ub960_txport *txport)
+static int ub960_init_tx_port(struct ub960_data *priv,
+ struct ub960_txport *txport)
{
unsigned int nport = txport->nport;
u8 csi_ctl = 0;
@@ -1787,76 +2273,114 @@ static void ub960_init_tx_port(struct ub960_data *priv,
if (!txport->non_continous_clk)
csi_ctl |= UB960_TR_CSI_CTL_CSI_CONTS_CLOCK;
- ub960_txport_write(priv, nport, UB960_TR_CSI_CTL, csi_ctl);
+ return ub960_txport_write(priv, nport, UB960_TR_CSI_CTL, csi_ctl, NULL);
}
-static int ub960_init_tx_ports(struct ub960_data *priv)
+static int ub960_init_tx_ports_ub960(struct ub960_data *priv)
{
- unsigned int nport;
u8 speed_select;
- u8 pll_div;
-
- /* TX ports */
switch (priv->tx_data_rate) {
+ case MHZ(400):
+ speed_select = 3;
+ break;
+ case MHZ(800):
+ speed_select = 2;
+ break;
+ case MHZ(1200):
+ speed_select = 1;
+ break;
case MHZ(1600):
default:
speed_select = 0;
+ break;
+ }
+
+ return ub960_write(priv, UB960_SR_CSI_PLL_CTL, speed_select, NULL);
+}
+
+static int ub960_init_tx_ports_ub9702(struct ub960_data *priv)
+{
+ u8 speed_select;
+ u8 ana_pll_div;
+ u8 pll_div;
+ int ret = 0;
+
+ switch (priv->tx_data_rate) {
+ case MHZ(400):
+ speed_select = 3;
+ pll_div = 0x10;
+ ana_pll_div = 0xa2;
+ break;
+ case MHZ(800):
+ speed_select = 2;
pll_div = 0x10;
+ ana_pll_div = 0x92;
break;
case MHZ(1200):
speed_select = 1;
pll_div = 0x18;
+ ana_pll_div = 0x90;
break;
- case MHZ(800):
- speed_select = 2;
- pll_div = 0x10;
+ case MHZ(1500):
+ speed_select = 0;
+ pll_div = 0x0f;
+ ana_pll_div = 0x82;
break;
- case MHZ(400):
- speed_select = 3;
+ case MHZ(1600):
+ default:
+ speed_select = 0;
pll_div = 0x10;
+ ana_pll_div = 0x82;
+ break;
+ case MHZ(2500):
+ speed_select = 0x10;
+ pll_div = 0x19;
+ ana_pll_div = 0x80;
break;
}
- ub960_write(priv, UB960_SR_CSI_PLL_CTL, speed_select);
+ ub960_write(priv, UB960_SR_CSI_PLL_CTL, speed_select, &ret);
+ ub960_write(priv, UB9702_SR_CSI_PLL_DIV, pll_div, &ret);
+ ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA,
+ UB9702_IR_CSI_ANA_CSIPLL_REG_1, ana_pll_div, &ret);
- if (priv->hw_data->is_ub9702) {
- ub960_write(priv, UB960_SR_CSI_PLL_DIV, pll_div);
+ return ret;
+}
- switch (priv->tx_data_rate) {
- case MHZ(1600):
- default:
- ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x92, 0x80);
- ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x4b, 0x2a);
- break;
- case MHZ(800):
- ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x92, 0x90);
- ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x4f, 0x2a);
- ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x4b, 0x2a);
- break;
- case MHZ(400):
- ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x92, 0xa0);
- break;
- }
- }
+static int ub960_init_tx_ports(struct ub960_data *priv)
+{
+ int ret;
- for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
+ if (priv->hw_data->is_ub9702)
+ ret = ub960_init_tx_ports_ub9702(priv);
+ else
+ ret = ub960_init_tx_ports_ub960(priv);
+
+ if (ret)
+ return ret;
+
+ for (unsigned int nport = 0; nport < priv->hw_data->num_txports;
+ nport++) {
struct ub960_txport *txport = priv->txports[nport];
if (!txport)
continue;
- ub960_init_tx_port(priv, txport);
+ ret = ub960_init_tx_port(priv, txport);
+ if (ret)
+ return ret;
}
return 0;
}
-static void ub960_init_rx_port_ub960(struct ub960_data *priv,
- struct ub960_rxport *rxport)
+static int ub960_init_rx_port_ub960(struct ub960_data *priv,
+ struct ub960_rxport *rxport)
{
unsigned int nport = rxport->nport;
u32 bc_freq_val;
+ int ret = 0;
/*
* Back channel frequency select.
@@ -1885,306 +2409,870 @@ static void ub960_init_rx_port_ub960(struct ub960_data *priv,
break;
default:
- return;
+ return -EINVAL;
}
ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK,
- bc_freq_val);
+ bc_freq_val, &ret);
switch (rxport->rx_mode) {
case RXPORT_MODE_RAW10:
/* FPD3_MODE = RAW10 Mode (DS90UB913A-Q1 / DS90UB933-Q1 compatible) */
ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG,
UB960_RR_PORT_CONFIG_FPD3_MODE_MASK,
- 0x3);
+ 0x3, &ret);
/*
* RAW10_8BIT_CTL = 0b10 : 8-bit processing using upper 8 bits
*/
ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2,
UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_MASK,
- 0x2 << UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_SHIFT);
+ 0x2 << UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_SHIFT,
+ &ret);
break;
case RXPORT_MODE_RAW12_HF:
case RXPORT_MODE_RAW12_LF:
/* Not implemented */
- return;
+ return -EINVAL;
case RXPORT_MODE_CSI2_SYNC:
case RXPORT_MODE_CSI2_NONSYNC:
/* CSI-2 Mode (DS90UB953-Q1 compatible) */
ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG, 0x3,
- 0x0);
+ 0x0, &ret);
break;
}
/* LV_POLARITY & FV_POLARITY */
ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3,
- rxport->lv_fv_pol);
+ rxport->lv_fv_pol, &ret);
/* Enable all interrupt sources from this port */
- ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_HI, 0x07);
- ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_LO, 0x7f);
+ ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_HI, 0x07, &ret);
+ ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_LO, 0x7f, &ret);
/* Enable I2C_PASS_THROUGH */
ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
- UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH);
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH, &ret);
/* Enable I2C communication to the serializer via the alias addr */
ub960_rxport_write(priv, nport, UB960_RR_SER_ALIAS_ID,
- rxport->ser.alias << 1);
+ rxport->ser.alias << 1, &ret);
/* Configure EQ related settings */
ub960_rxport_config_eq(priv, nport);
/* Enable RX port */
- ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport));
+ ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
+ &ret);
+
+ return ret;
}
-static void ub960_init_rx_port_ub9702_fpd3(struct ub960_data *priv,
- struct ub960_rxport *rxport)
+static int ub960_init_rx_ports_ub960(struct ub960_data *priv)
{
- unsigned int nport = rxport->nport;
- u8 bc_freq_val;
- u8 fpd_func_mode;
+ struct device *dev = &priv->client->dev;
+ unsigned int port_lock_mask;
+ unsigned int port_mask;
+ int ret;
- switch (rxport->rx_mode) {
- case RXPORT_MODE_RAW10:
- bc_freq_val = 0;
- fpd_func_mode = 5;
- break;
+ for_each_active_rxport(priv, it) {
+ ret = ub960_init_rx_port_ub960(priv, it.rxport);
+ if (ret)
+ return ret;
+ }
- case RXPORT_MODE_RAW12_HF:
- bc_freq_val = 0;
- fpd_func_mode = 4;
- break;
+ ret = ub960_reset(priv, false);
+ if (ret)
+ return ret;
- case RXPORT_MODE_RAW12_LF:
- bc_freq_val = 0;
- fpd_func_mode = 6;
- break;
+ port_mask = 0;
- case RXPORT_MODE_CSI2_SYNC:
- bc_freq_val = 6;
- fpd_func_mode = 2;
- break;
+ for_each_active_rxport(priv, it)
+ port_mask |= BIT(it.nport);
- case RXPORT_MODE_CSI2_NONSYNC:
- bc_freq_val = 2;
- fpd_func_mode = 2;
- break;
+ ret = ub960_rxport_wait_locks(priv, port_mask, &port_lock_mask);
+ if (ret)
+ return ret;
- default:
- return;
+ if (port_mask != port_lock_mask) {
+ ret = -EIO;
+ dev_err_probe(dev, ret, "Failed to lock all RX ports\n");
+ return ret;
}
- ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, 0x7,
- bc_freq_val);
- ub960_rxport_write(priv, nport, UB960_RR_CHANNEL_MODE, fpd_func_mode);
+ /* Set temperature ramp on serializer */
+ for_each_active_rxport(priv, it) {
+ ret = ub960_serializer_temp_ramp(it.rxport);
+ if (ret)
+ return ret;
+
+ ub960_rxport_update_bits(priv, it.nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
+ &ret);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Clear any errors caused by switching the RX port settings while
+ * probing.
+ */
+ ret = ub960_clear_rx_errors(priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * UB9702 specific initial RX port configuration
+ */
+
+static int ub960_turn_off_rxport_ub9702(struct ub960_data *priv,
+ unsigned int nport)
+{
+ int ret = 0;
+
+ /* Disable RX port */
+ ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), 0, &ret);
+
+ /* Disable FPD Rx and FPD BC CMR */
+ ub960_rxport_write(priv, nport, UB9702_RR_RX_CTL_2, 0x1b, &ret);
- /* set serdes_eq_mode = 1 */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xa8, 0x80);
+ /* Disable FPD BC Tx */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, BIT(4), 0,
+ &ret);
- /* enable serdes driver */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x0d, 0x7f);
+ /* Disable internal RX blocks */
+ ub960_rxport_write(priv, nport, UB9702_RR_RX_CTL_1, 0x15, &ret);
- /* set serdes_eq_offset=4 */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2b, 0x04);
+ /* Disable AEQ */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_CFG_2, 0x03, &ret);
+
+ /* PI disabled and oDAC disabled */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_CFG_4, 0x09, &ret);
- /* init default serdes_eq_max in 0xa9 */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xa9, 0x23);
+ /* AEQ configured for disabled link */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_CFG_1, 0x20, &ret);
- /* init serdes_eq_min in 0xaa */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xaa, 0);
+ /* disable AEQ clock and DFE */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_CFG_3, 0x45, &ret);
- /* serdes_driver_ctl2 control: DS90UB953-Q1/DS90UB933-Q1/DS90UB913A-Q1 */
- ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport), 0x1b,
- BIT(3), BIT(3));
+ /* Powerdown FPD3 CDR */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_FPD3_CDR_CTRL_SEL_5, 0x82, &ret);
- /* RX port to half-rate */
- ub960_update_bits(priv, UB960_SR_FPD_RATE_CFG, 0x3 << (nport * 2),
- BIT(nport * 2));
+ return ret;
}
-static void ub960_init_rx_port_ub9702_fpd4_aeq(struct ub960_data *priv,
- struct ub960_rxport *rxport)
+static int ub960_set_bc_drv_config_ub9702(struct ub960_data *priv,
+ unsigned int nport)
{
- unsigned int nport = rxport->nport;
- bool first_time_power_up = true;
-
- if (first_time_power_up) {
- u8 v;
+ u8 fpd_bc_ctl0;
+ u8 fpd_bc_ctl1;
+ u8 fpd_bc_ctl2;
+ int ret = 0;
- /* AEQ init */
- ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2c, &v);
+ if (priv->rxports[nport]->cdr_mode == RXPORT_CDR_FPD4) {
+ /* Set FPD PBC drv into FPD IV mode */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x27, v);
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x28, v + 1);
+ fpd_bc_ctl0 = 0;
+ fpd_bc_ctl1 = 0;
+ fpd_bc_ctl2 = 0;
+ } else {
+ /* Set FPD PBC drv into FPD III mode */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2b, 0x00);
+ fpd_bc_ctl0 = 2;
+ fpd_bc_ctl1 = 1;
+ fpd_bc_ctl2 = 5;
}
- /* enable serdes_eq_ctl2 */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x9e, 0x00);
+ ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_FPD_BC_CTL0, GENMASK(7, 5),
+ fpd_bc_ctl0 << 5, &ret);
- /* enable serdes_eq_ctl1 */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x90, 0x40);
+ ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_FPD_BC_CTL1, BIT(6),
+ fpd_bc_ctl1 << 6, &ret);
- /* enable serdes_eq_en */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2e, 0x40);
+ ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_FPD_BC_CTL2, GENMASK(6, 3),
+ fpd_bc_ctl2 << 3, &ret);
- /* disable serdes_eq_override */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xf0, 0x00);
+ return ret;
+}
- /* disable serdes_gain_override */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x71, 0x00);
+static int ub960_set_fpd4_sync_mode_ub9702(struct ub960_data *priv,
+ unsigned int nport)
+{
+ int ret = 0;
+
+ /* FPD4 Sync Mode */
+ ub960_rxport_write(priv, nport, UB9702_RR_CHANNEL_MODE, 0x0, &ret);
+
+ /* BC_FREQ_SELECT = (PLL_FREQ/3200) Mbps */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 6, &ret);
+
+ if (ret)
+ return ret;
+
+ ret = ub960_set_bc_drv_config_ub9702(priv, nport);
+ if (ret)
+ return ret;
+
+ /* Set AEQ timer to 400us/step */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_SYSTEM_INIT_REG0, 0x2f, &ret);
+
+ /* Disable FPD4 Auto Recovery */
+ ub960_update_bits(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, GENMASK(5, 4), 0,
+ &ret);
+
+ /* Enable RX port */
+ ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
+ &ret);
+
+ /* Enable FPD4 Auto Recovery */
+ ub960_update_bits(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, GENMASK(5, 4),
+ BIT(4), &ret);
+
+ return ret;
}
-static void ub960_init_rx_port_ub9702_fpd4(struct ub960_data *priv,
- struct ub960_rxport *rxport)
+static int ub960_set_fpd4_async_mode_ub9702(struct ub960_data *priv,
+ unsigned int nport)
{
- unsigned int nport = rxport->nport;
- u8 bc_freq_val;
+ int ret = 0;
- switch (rxport->rx_mode) {
- case RXPORT_MODE_RAW10:
- bc_freq_val = 0;
- break;
+ /* FPD4 ASync Mode */
+ ub960_rxport_write(priv, nport, UB9702_RR_CHANNEL_MODE, 0x1, &ret);
- case RXPORT_MODE_RAW12_HF:
- bc_freq_val = 0;
- break;
+ /* 10Mbps w/ BC enabled */
+ /* BC_FREQ_SELECT=(PLL_FREQ/3200) Mbps */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 2, &ret);
- case RXPORT_MODE_RAW12_LF:
- bc_freq_val = 0;
- break;
+ if (ret)
+ return ret;
- case RXPORT_MODE_CSI2_SYNC:
- bc_freq_val = 6;
- break;
+ ret = ub960_set_bc_drv_config_ub9702(priv, nport);
+ if (ret)
+ return ret;
- case RXPORT_MODE_CSI2_NONSYNC:
- bc_freq_val = 2;
- break;
+ /* Set AEQ timer to 400us/step */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_SYSTEM_INIT_REG0, 0x2f, &ret);
- default:
- return;
- }
+ /* Disable FPD4 Auto Recover */
+ ub960_update_bits(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, GENMASK(5, 4), 0,
+ &ret);
- ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, 0x7,
- bc_freq_val);
+ /* Enable RX port */
+ ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
+ &ret);
- /* FPD4 Sync Mode */
- ub960_rxport_write(priv, nport, UB960_RR_CHANNEL_MODE, 0);
+ /* Enable FPD4 Auto Recovery */
+ ub960_update_bits(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, GENMASK(5, 4),
+ BIT(4), &ret);
- /* add serdes_eq_offset of 4 */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2b, 0x04);
+ return ret;
+}
+
+static int ub960_set_fpd3_sync_mode_ub9702(struct ub960_data *priv,
+ unsigned int nport)
+{
+ int ret = 0;
- /* FPD4 serdes_start_eq in 0x27: assign default */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x27, 0x0);
- /* FPD4 serdes_end_eq in 0x28: assign default */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x28, 0x23);
+ /* FPD3 Sync Mode */
+ ub960_rxport_write(priv, nport, UB9702_RR_CHANNEL_MODE, 0x2, &ret);
- /* set serdes_driver_mode into FPD IV mode */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x04, 0x00);
- /* set FPD PBC drv into FPD IV mode */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x1b, 0x00);
+ /* BC_FREQ_SELECT=(PLL_FREQ/3200) Mbps */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 6, &ret);
- /* set serdes_system_init to 0x2f */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x21, 0x2f);
- /* set serdes_system_rst in reset mode */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x25, 0xc1);
+ /* Set AEQ_LOCK_MODE = 1 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_FPD3_AEQ_CTRL_SEL_1, BIT(7), &ret);
- /* RX port to 7.55G mode */
- ub960_update_bits(priv, UB960_SR_FPD_RATE_CFG, 0x3 << (nport * 2),
- 0 << (nport * 2));
+ if (ret)
+ return ret;
- ub960_init_rx_port_ub9702_fpd4_aeq(priv, rxport);
+ ret = ub960_set_bc_drv_config_ub9702(priv, nport);
+ if (ret)
+ return ret;
+
+ /* Enable RX port */
+ ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
+ &ret);
+
+ return ret;
}
-static void ub960_init_rx_port_ub9702(struct ub960_data *priv,
- struct ub960_rxport *rxport)
+static int ub960_set_raw10_dvp_mode_ub9702(struct ub960_data *priv,
+ unsigned int nport)
{
- unsigned int nport = rxport->nport;
+ int ret = 0;
- if (rxport->cdr_mode == RXPORT_CDR_FPD3)
- ub960_init_rx_port_ub9702_fpd3(priv, rxport);
- else /* RXPORT_CDR_FPD4 */
- ub960_init_rx_port_ub9702_fpd4(priv, rxport);
+ /* FPD3 RAW10 Mode */
+ ub960_rxport_write(priv, nport, UB9702_RR_CHANNEL_MODE, 0x5, &ret);
- switch (rxport->rx_mode) {
- case RXPORT_MODE_RAW10:
- /*
- * RAW10_8BIT_CTL = 0b11 : 8-bit processing using lower 8 bits
- * 0b10 : 8-bit processing using upper 8 bits
- */
- ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2,
- 0x3 << 6, 0x2 << 6);
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK, 0, &ret);
+
+ /* Set AEQ_LOCK_MODE = 1 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_FPD3_AEQ_CTRL_SEL_1, BIT(7), &ret);
+
+ /*
+ * RAW10_8BIT_CTL = 0b11 : 8-bit processing using lower 8 bits
+ * 0b10 : 8-bit processing using upper 8 bits
+ */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3 << 6,
+ 0x2 << 6, &ret);
+
+ /* LV_POLARITY & FV_POLARITY */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3,
+ priv->rxports[nport]->lv_fv_pol, &ret);
+
+ if (ret)
+ return ret;
+
+ ret = ub960_set_bc_drv_config_ub9702(priv, nport);
+ if (ret)
+ return ret;
+
+ /* Enable RX port */
+ ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport),
+ &ret);
+
+ return ret;
+}
+
+static int ub960_configure_rx_port_ub9702(struct ub960_data *priv,
+ unsigned int nport)
+{
+ struct device *dev = &priv->client->dev;
+ struct ub960_rxport *rxport = priv->rxports[nport];
+ int ret;
+
+ if (!rxport) {
+ ret = ub960_turn_off_rxport_ub9702(priv, nport);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "rx%u: disabled\n", nport);
+ return 0;
+ }
+
+ switch (rxport->cdr_mode) {
+ case RXPORT_CDR_FPD4:
+ switch (rxport->rx_mode) {
+ case RXPORT_MODE_CSI2_SYNC:
+ ret = ub960_set_fpd4_sync_mode_ub9702(priv, nport);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "rx%u: FPD-Link IV SYNC mode\n", nport);
+ break;
+ case RXPORT_MODE_CSI2_NONSYNC:
+ ret = ub960_set_fpd4_async_mode_ub9702(priv, nport);
+ if (ret)
+ return ret;
+ dev_dbg(dev, "rx%u: FPD-Link IV ASYNC mode\n", nport);
+ break;
+ default:
+ dev_err(dev, "rx%u: unsupported FPD4 mode %u\n", nport,
+ rxport->rx_mode);
+ return -EINVAL;
+ }
break;
- case RXPORT_MODE_RAW12_HF:
- case RXPORT_MODE_RAW12_LF:
- /* Not implemented */
- return;
+ case RXPORT_CDR_FPD3:
+ switch (rxport->rx_mode) {
+ case RXPORT_MODE_CSI2_SYNC:
+ ret = ub960_set_fpd3_sync_mode_ub9702(priv, nport);
+ if (ret)
+ return ret;
- case RXPORT_MODE_CSI2_SYNC:
- case RXPORT_MODE_CSI2_NONSYNC:
+ dev_dbg(dev, "rx%u: FPD-Link III SYNC mode\n", nport);
+ break;
+ case RXPORT_MODE_RAW10:
+ ret = ub960_set_raw10_dvp_mode_ub9702(priv, nport);
+ if (ret)
+ return ret;
+ dev_dbg(dev, "rx%u: FPD-Link III RAW10 DVP mode\n",
+ nport);
+ break;
+ default:
+ dev_err(&priv->client->dev,
+ "rx%u: unsupported FPD3 mode %u\n", nport,
+ rxport->rx_mode);
+ return -EINVAL;
+ }
break;
+
+ default:
+ dev_err(&priv->client->dev, "rx%u: unsupported CDR mode %u\n",
+ nport, rxport->cdr_mode);
+ return -EINVAL;
}
- /* LV_POLARITY & FV_POLARITY */
- ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3,
- rxport->lv_fv_pol);
+ return 0;
+}
- /* Enable all interrupt sources from this port */
- ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_HI, 0x07);
- ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_LO, 0x7f);
+static int ub960_lock_recovery_ub9702(struct ub960_data *priv,
+ unsigned int nport)
+{
+ struct device *dev = &priv->client->dev;
+ /* Assumption that max AEQ should be under 16 */
+ const u8 rx_aeq_limit = 16;
+ u8 prev_aeq = 0xff;
+ bool rx_lock;
+
+ for (unsigned int retry = 0; retry < 3; ++retry) {
+ u8 port_sts1;
+ u8 rx_aeq;
+ int ret;
- /* Enable I2C_PASS_THROUGH */
- ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
- UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
- UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1,
+ &port_sts1, NULL);
+ if (ret)
+ return ret;
- /* Enable I2C communication to the serializer via the alias addr */
- ub960_rxport_write(priv, nport, UB960_RR_SER_ALIAS_ID,
- rxport->ser.alias << 1);
+ rx_lock = port_sts1 & UB960_RR_RX_PORT_STS1_PORT_PASS;
- /* Enable RX port */
- ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport));
+ if (!rx_lock) {
+ ret = ub960_rxport_lockup_wa_ub9702(priv);
+ if (ret)
+ return ret;
+
+ /* Restart AEQ by changing max to 0 --> 0x23 */
+ ret = ub960_write_ind(priv,
+ UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL7, 0,
+ NULL);
+ if (ret)
+ return ret;
+
+ msleep(20);
+
+ /* AEQ Restart */
+ ret = ub960_write_ind(priv,
+ UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL7,
+ 0x23, NULL);
+
+ if (ret)
+ return ret;
+
+ msleep(20);
+ dev_dbg(dev, "rx%u: no lock, retry = %u\n", nport,
+ retry);
+
+ continue;
+ }
+
+ ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL11, &rx_aeq,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (rx_aeq < rx_aeq_limit) {
+ dev_dbg(dev,
+ "rx%u: locked and AEQ normal before setting AEQ window\n",
+ nport);
+ return 0;
+ }
+
+ if (rx_aeq != prev_aeq) {
+ ret = ub960_rxport_lockup_wa_ub9702(priv);
+ if (ret)
+ return ret;
+
+ /* Restart AEQ by changing max to 0 --> 0x23 */
+ ret = ub960_write_ind(priv,
+ UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL7,
+ 0, NULL);
+ if (ret)
+ return ret;
+
+ msleep(20);
- if (rxport->cdr_mode == RXPORT_CDR_FPD4) {
- /* unreset 960 AEQ */
- ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x25, 0x41);
+ /* AEQ Restart */
+ ret = ub960_write_ind(priv,
+ UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL7,
+ 0x23, NULL);
+ if (ret)
+ return ret;
+
+ msleep(20);
+
+ dev_dbg(dev,
+ "rx%u: high AEQ at initial check recovery loop, retry=%u\n",
+ nport, retry);
+
+ prev_aeq = rx_aeq;
+ } else {
+ dev_dbg(dev,
+ "rx%u: lossy cable detected, RX_AEQ %#x, RX_AEQ_LIMIT %#x, retry %u\n",
+ nport, rx_aeq, rx_aeq_limit, retry);
+ dev_dbg(dev,
+ "rx%u: will continue with initiation sequence but high AEQ\n",
+ nport);
+ return 0;
+ }
}
+
+ dev_err(dev, "rx%u: max number of retries: %s\n", nport,
+ rx_lock ? "unstable AEQ" : "no lock");
+
+ return -EIO;
}
-static int ub960_init_rx_ports(struct ub960_data *priv)
+static int ub960_enable_aeq_lms_ub9702(struct ub960_data *priv,
+ unsigned int nport)
{
- unsigned int nport;
+ struct device *dev = &priv->client->dev;
+ u8 read_aeq_init;
+ int ret;
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
+ ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL11, &read_aeq_init,
+ NULL);
+ if (ret)
+ return ret;
- if (!rxport)
- continue;
+ dev_dbg(dev, "rx%u: initial AEQ = %#x\n", nport, read_aeq_init);
- if (priv->hw_data->is_ub9702)
- ub960_init_rx_port_ub9702(priv, rxport);
- else
- ub960_init_rx_port_ub960(priv, rxport);
+ /* Set AEQ Min */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL6, read_aeq_init, &ret);
+ /* Set AEQ Max */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL7, read_aeq_init + 1, &ret);
+ /* Set AEQ offset to 0 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL10, 0x0, &ret);
+
+ /* Enable AEQ tap2 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_EQ_CTRL_SEL_38, 0x00, &ret);
+ /* Set VGA Gain 1 Gain 2 override to 0 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_VGA_CTRL_SEL_8, 0x00, &ret);
+ /* Set VGA Initial Sweep Gain to 0 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_VGA_CTRL_SEL_6, 0x80, &ret);
+ /* Set VGA_Adapt (VGA Gain) override to 0 (thermometer encoded) */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_VGA_CTRL_SEL_3, 0x00, &ret);
+ /* Enable VGA_SWEEP */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_EQ_ADAPT_CTRL, 0x40, &ret);
+ /* Disable VGA_SWEEP_GAIN_OV, disable VGA_TUNE_OV */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_EQ_OVERRIDE_CTRL, 0x00, &ret);
+
+ /* Set VGA HIGH Threshold to 43 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_VGA_CTRL_SEL_1, 0x2b, &ret);
+ /* Set VGA LOW Threshold to 18 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_VGA_CTRL_SEL_2, 0x12, &ret);
+ /* Set vga_sweep_th to 32 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_EQ_CTRL_SEL_15, 0x20, &ret);
+ /* Set AEQ timer to 400us/step and parity threshold to 7 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_SYSTEM_INIT_REG0, 0xef, &ret);
+
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "rx%u: enable FPD-Link IV AEQ LMS\n", nport);
+
+ return 0;
+}
+
+static int ub960_enable_dfe_lms_ub9702(struct ub960_data *priv,
+ unsigned int nport)
+{
+ struct device *dev = &priv->client->dev;
+ int ret = 0;
+
+ /* Enable DFE LMS */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_EQ_CTRL_SEL_24, 0x40, &ret);
+ /* Disable VGA Gain1 override */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_GAIN_CTRL_0, 0x20, &ret);
+
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 5000);
+
+ /* Disable VGA Gain2 override */
+ ret = ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB9702_IR_RX_ANA_GAIN_CTRL_0, 0x00, NULL);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "rx%u: enabled FPD-Link IV DFE LMS", nport);
+
+ return 0;
+}
+
+static int ub960_init_rx_ports_ub9702(struct ub960_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int port_lock_mask;
+ unsigned int port_mask = 0;
+ bool have_fpd4 = false;
+ int ret;
+
+ for_each_active_rxport(priv, it) {
+ ret = ub960_rxport_update_bits(priv, it.nport,
+ UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_BC_ALWAYS_ON,
+ UB960_RR_BCC_CONFIG_BC_ALWAYS_ON,
+ NULL);
+ if (ret)
+ return ret;
+ }
+
+ /* Disable FPD4 Auto Recovery */
+ ret = ub960_write(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, 0x0f, NULL);
+ if (ret)
+ return ret;
+
+ for_each_active_rxport(priv, it) {
+ if (it.rxport->ser.addr >= 0) {
+ /*
+ * Set serializer's I2C address if set in the dts file,
+ * and freeze it to prevent updates from the FC.
+ */
+ ub960_rxport_write(priv, it.nport, UB960_RR_SER_ID,
+ it.rxport->ser.addr << 1 |
+ UB960_RR_SER_ID_FREEZE_DEVICE_ID,
+ &ret);
+ }
+
+ /* Set serializer I2C alias with auto-ack */
+ ub960_rxport_write(priv, it.nport, UB960_RR_SER_ALIAS_ID,
+ it.rxport->ser.alias << 1 |
+ UB960_RR_SER_ALIAS_ID_AUTO_ACK, &ret);
+
+ if (ret)
+ return ret;
+ }
+
+ for_each_active_rxport(priv, it) {
+ if (fwnode_device_is_compatible(it.rxport->ser.fwnode,
+ "ti,ds90ub971-q1")) {
+ ret = ub960_rxport_bc_ser_config(it.rxport);
+ if (ret)
+ return ret;
+ }
+ }
+
+ for_each_active_rxport_fpd4(priv, it) {
+ /* Hold state machine in reset */
+ ub960_rxport_write(priv, it.nport, UB9702_RR_RX_SM_SEL_2, 0x10,
+ &ret);
+
+ /* Set AEQ max to 0 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(it.nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL7, 0, &ret);
+
+ if (ret)
+ return ret;
+
+ dev_dbg(dev,
+ "rx%u: holding state machine and adjusting AEQ max to 0",
+ it.nport);
+ }
+
+ for_each_active_rxport(priv, it) {
+ port_mask |= BIT(it.nport);
+
+ if (it.rxport->cdr_mode == RXPORT_CDR_FPD4)
+ have_fpd4 = true;
+ }
+
+ for_each_rxport(priv, it) {
+ ret = ub960_configure_rx_port_ub9702(priv, it.nport);
+ if (ret)
+ return ret;
+ }
+
+ ret = ub960_reset(priv, false);
+ if (ret)
+ return ret;
+
+ if (have_fpd4) {
+ for_each_active_rxport_fpd4(priv, it) {
+ /* Release state machine */
+ ret = ub960_rxport_write(priv, it.nport,
+ UB9702_RR_RX_SM_SEL_2, 0x0,
+ NULL);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "rx%u: state machine released\n",
+ it.nport);
+ }
+
+ /* Wait for SM to resume */
+ fsleep(5000);
+
+ for_each_active_rxport_fpd4(priv, it) {
+ ret = ub960_write_ind(priv,
+ UB960_IND_TARGET_RX_ANA(it.nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL7,
+ 0x23, NULL);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "rx%u: AEQ restart\n", it.nport);
+ }
+
+ /* Wait for lock */
+ fsleep(20000);
+
+ for_each_active_rxport_fpd4(priv, it) {
+ ret = ub960_lock_recovery_ub9702(priv, it.nport);
+ if (ret)
+ return ret;
+ }
+
+ for_each_active_rxport_fpd4(priv, it) {
+ ret = ub960_enable_aeq_lms_ub9702(priv, it.nport);
+ if (ret)
+ return ret;
+ }
+
+ for_each_active_rxport_fpd4(priv, it) {
+ /* Hold state machine in reset */
+ ret = ub960_rxport_write(priv, it.nport,
+ UB9702_RR_RX_SM_SEL_2, 0x10,
+ NULL);
+ if (ret)
+ return ret;
+ }
+
+ ret = ub960_reset(priv, false);
+ if (ret)
+ return ret;
+
+ for_each_active_rxport_fpd4(priv, it) {
+ /* Release state machine */
+ ret = ub960_rxport_write(priv, it.nport,
+ UB9702_RR_RX_SM_SEL_2, 0,
+ NULL);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Wait time for stable lock */
+ fsleep(15000);
+
+ /* Set temperature ramp on serializer */
+ for_each_active_rxport(priv, it) {
+ ret = ub960_serializer_temp_ramp(it.rxport);
+ if (ret)
+ return ret;
+ }
+
+ for_each_active_rxport_fpd4(priv, it) {
+ ret = ub960_enable_dfe_lms_ub9702(priv, it.nport);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait for DFE and LMS to adapt */
+ fsleep(5000);
+
+ ret = ub960_rxport_wait_locks(priv, port_mask, &port_lock_mask);
+ if (ret)
+ return ret;
+
+ if (port_mask != port_lock_mask) {
+ ret = -EIO;
+ dev_err_probe(dev, ret, "Failed to lock all RX ports\n");
+ return ret;
+ }
+
+ for_each_active_rxport(priv, it) {
+ /* Enable all interrupt sources from this port */
+ ub960_rxport_write(priv, it.nport, UB960_RR_PORT_ICR_HI, 0x07,
+ &ret);
+ ub960_rxport_write(priv, it.nport, UB960_RR_PORT_ICR_LO, 0x7f,
+ &ret);
+
+ /* Clear serializer I2C alias auto-ack */
+ ub960_rxport_update_bits(priv, it.nport, UB960_RR_SER_ALIAS_ID,
+ UB960_RR_SER_ALIAS_ID_AUTO_ACK, 0,
+ &ret);
+
+ /* Enable I2C_PASS_THROUGH */
+ ub960_rxport_update_bits(priv, it.nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
+ &ret);
+
+ if (ret)
+ return ret;
}
+ /* Enable FPD4 Auto Recovery, Recovery loop active */
+ ret = ub960_write(priv, UB9702_SR_CSI_EXCLUSIVE_FWD2, 0x18, NULL);
+ if (ret)
+ return ret;
+
+ for_each_active_rxport_fpd4(priv, it) {
+ u8 final_aeq;
+
+ ret = ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(it.nport),
+ UB9702_IR_RX_ANA_AEQ_ALP_SEL11, &final_aeq,
+ NULL);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "rx%u: final AEQ = %#x\n", it.nport, final_aeq);
+ }
+
+ /*
+ * Clear any errors caused by switching the RX port settings while
+ * probing.
+ */
+
+ ret = ub960_clear_rx_errors(priv);
+ if (ret)
+ return ret;
+
return 0;
}
-static void ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
+static int ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
{
struct device *dev = &priv->client->dev;
u8 rx_port_sts1;
@@ -2194,27 +3282,21 @@ static void ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
int ret = 0;
/* Read interrupts (also clears most of them) */
- if (!ret)
- ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1,
- &rx_port_sts1);
- if (!ret)
- ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2,
- &rx_port_sts2);
- if (!ret)
- ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS,
- &csi_rx_sts);
- if (!ret)
- ret = ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS,
- &bcc_sts);
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &rx_port_sts1,
+ &ret);
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &rx_port_sts2,
+ &ret);
+ ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &csi_rx_sts, &ret);
+ ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &bcc_sts, &ret);
if (ret)
- return;
+ return ret;
if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_PARITY_ERROR) {
u16 v;
ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
- &v);
+ &v, NULL);
if (!ret)
dev_err(dev, "rx%u parity errors: %u\n", nport, v);
}
@@ -2273,7 +3355,8 @@ static void ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_LEN_CHG) {
u16 v;
- ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1, &v);
+ ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1,
+ &v, NULL);
if (!ret)
dev_dbg(dev, "rx%u line len changed: %u\n", nport, v);
}
@@ -2282,7 +3365,7 @@ static void ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
u16 v;
ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_COUNT_HI,
- &v);
+ &v, NULL);
if (!ret)
dev_dbg(dev, "rx%u line count changed: %u\n", nport, v);
}
@@ -2302,6 +3385,8 @@ static void ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
"stable freq" :
"unstable freq");
}
+
+ return 0;
}
/* -----------------------------------------------------------------------------
@@ -2354,17 +3439,17 @@ static int ub960_enable_tx_port(struct ub960_data *priv, unsigned int nport)
return ub960_txport_update_bits(priv, nport, UB960_TR_CSI_CTL,
UB960_TR_CSI_CTL_CSI_ENABLE,
- UB960_TR_CSI_CTL_CSI_ENABLE);
+ UB960_TR_CSI_CTL_CSI_ENABLE, NULL);
}
-static void ub960_disable_tx_port(struct ub960_data *priv, unsigned int nport)
+static int ub960_disable_tx_port(struct ub960_data *priv, unsigned int nport)
{
struct device *dev = &priv->client->dev;
dev_dbg(dev, "disable TX port %u\n", nport);
- ub960_txport_update_bits(priv, nport, UB960_TR_CSI_CTL,
- UB960_TR_CSI_CTL_CSI_ENABLE, 0);
+ return ub960_txport_update_bits(priv, nport, UB960_TR_CSI_CTL,
+ UB960_TR_CSI_CTL_CSI_ENABLE, 0, NULL);
}
static int ub960_enable_rx_port(struct ub960_data *priv, unsigned int nport)
@@ -2375,19 +3460,19 @@ static int ub960_enable_rx_port(struct ub960_data *priv, unsigned int nport)
/* Enable forwarding */
return ub960_update_bits(priv, UB960_SR_FWD_CTL1,
- UB960_SR_FWD_CTL1_PORT_DIS(nport), 0);
+ UB960_SR_FWD_CTL1_PORT_DIS(nport), 0, NULL);
}
-static void ub960_disable_rx_port(struct ub960_data *priv, unsigned int nport)
+static int ub960_disable_rx_port(struct ub960_data *priv, unsigned int nport)
{
struct device *dev = &priv->client->dev;
dev_dbg(dev, "disable RX port %u\n", nport);
/* Disable forwarding */
- ub960_update_bits(priv, UB960_SR_FWD_CTL1,
- UB960_SR_FWD_CTL1_PORT_DIS(nport),
- UB960_SR_FWD_CTL1_PORT_DIS(nport));
+ return ub960_update_bits(priv, UB960_SR_FWD_CTL1,
+ UB960_SR_FWD_CTL1_PORT_DIS(nport),
+ UB960_SR_FWD_CTL1_PORT_DIS(nport), NULL);
}
/*
@@ -2396,20 +3481,14 @@ static void ub960_disable_rx_port(struct ub960_data *priv, unsigned int nport)
*/
static int ub960_validate_stream_vcs(struct ub960_data *priv)
{
- unsigned int nport;
- unsigned int i;
-
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
+ for_each_active_rxport(priv, it) {
struct v4l2_mbus_frame_desc desc;
int ret;
u8 vc;
- if (!rxport)
- continue;
-
- ret = v4l2_subdev_call(rxport->source.sd, pad, get_frame_desc,
- rxport->source.pad, &desc);
+ ret = v4l2_subdev_call(it.rxport->source.sd, pad,
+ get_frame_desc, it.rxport->source.pad,
+ &desc);
if (ret)
return ret;
@@ -2421,13 +3500,13 @@ static int ub960_validate_stream_vcs(struct ub960_data *priv)
vc = desc.entry[0].bus.csi2.vc;
- for (i = 1; i < desc.num_entries; i++) {
+ for (unsigned int i = 1; i < desc.num_entries; i++) {
if (vc == desc.entry[i].bus.csi2.vc)
continue;
dev_err(&priv->client->dev,
"rx%u: source with multiple virtual-channels is not supported\n",
- nport);
+ it.nport);
return -ENODEV;
}
}
@@ -2517,23 +3596,24 @@ static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
*/
fwd_ctl = GENMASK(7, 4);
- for (unsigned int nport = 0; nport < priv->hw_data->num_rxports;
- nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
+ for_each_active_rxport(priv, it) {
+ unsigned long nport = it.nport;
+
u8 vc = vc_map[nport];
if (rx_data[nport].num_streams == 0)
continue;
- switch (rxport->rx_mode) {
+ switch (it.rxport->rx_mode) {
case RXPORT_MODE_RAW10:
ub960_rxport_write(priv, nport, UB960_RR_RAW10_ID,
- rx_data[nport].pixel_dt | (vc << UB960_RR_RAW10_ID_VC_SHIFT));
+ rx_data[nport].pixel_dt | (vc << UB960_RR_RAW10_ID_VC_SHIFT),
+ &ret);
- ub960_rxport_write(priv, rxport->nport,
+ ub960_rxport_write(priv, nport,
UB960_RR_RAW_EMBED_DTYPE,
(rx_data[nport].meta_lines << UB960_RR_RAW_EMBED_DTYPE_LINES_SHIFT) |
- rx_data[nport].meta_dt);
+ rx_data[nport].meta_dt, &ret);
break;
@@ -2550,15 +3630,17 @@ static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
(vc << UB960_RR_CSI_VC_MAP_SHIFT(3)) |
(vc << UB960_RR_CSI_VC_MAP_SHIFT(2)) |
(vc << UB960_RR_CSI_VC_MAP_SHIFT(1)) |
- (vc << UB960_RR_CSI_VC_MAP_SHIFT(0)));
+ (vc << UB960_RR_CSI_VC_MAP_SHIFT(0)),
+ &ret);
} else {
unsigned int i;
/* Map all VCs from this port to VC(nport) */
for (i = 0; i < 8; i++)
ub960_rxport_write(priv, nport,
- UB960_RR_VC_ID_MAP(i),
- (nport << 4) | nport);
+ UB9702_RR_VC_ID_MAP(i),
+ (nport << 4) | nport,
+ &ret);
}
break;
@@ -2570,9 +3652,9 @@ static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
fwd_ctl &= ~BIT(nport); /* forward to TX0 */
}
- ub960_write(priv, UB960_SR_FWD_CTL1, fwd_ctl);
+ ub960_write(priv, UB960_SR_FWD_CTL1, fwd_ctl, &ret);
- return 0;
+ return ret;
}
static void ub960_update_streaming_status(struct ub960_data *priv)
@@ -2596,7 +3678,6 @@ static int ub960_enable_streams(struct v4l2_subdev *sd,
u64 sink_streams[UB960_MAX_RX_NPORTS] = {};
struct v4l2_subdev_route *route;
unsigned int failed_port;
- unsigned int nport;
int ret;
if (!priv->streaming) {
@@ -2618,6 +3699,8 @@ static int ub960_enable_streams(struct v4l2_subdev *sd,
/* Collect sink streams per pad which we need to enable */
for_each_active_route(&state->routing, route) {
+ unsigned int nport;
+
if (route->source_pad != source_pad)
continue;
@@ -2629,7 +3712,9 @@ static int ub960_enable_streams(struct v4l2_subdev *sd,
sink_streams[nport] |= BIT_ULL(route->sink_stream);
}
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ for_each_rxport(priv, it) {
+ unsigned int nport = it.nport;
+
if (!sink_streams[nport])
continue;
@@ -2667,7 +3752,7 @@ static int ub960_enable_streams(struct v4l2_subdev *sd,
return 0;
err:
- for (nport = 0; nport < failed_port; nport++) {
+ for (unsigned int nport = 0; nport < failed_port; nport++) {
if (!sink_streams[nport])
continue;
@@ -2707,11 +3792,12 @@ static int ub960_disable_streams(struct v4l2_subdev *sd,
struct device *dev = &priv->client->dev;
u64 sink_streams[UB960_MAX_RX_NPORTS] = {};
struct v4l2_subdev_route *route;
- unsigned int nport;
int ret;
/* Collect sink streams per pad which we need to disable */
for_each_active_route(&state->routing, route) {
+ unsigned int nport;
+
if (route->source_pad != source_pad)
continue;
@@ -2723,7 +3809,9 @@ static int ub960_disable_streams(struct v4l2_subdev *sd,
sink_streams[nport] |= BIT_ULL(route->sink_stream);
}
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ for_each_rxport(priv, it) {
+ unsigned int nport = it.nport;
+
if (!sink_streams[nport])
continue;
@@ -2975,8 +4063,8 @@ static const struct v4l2_subdev_pad_ops ub960_pad_ops = {
.set_fmt = ub960_set_fmt,
};
-static void ub960_log_status_ub960_sp_eq(struct ub960_data *priv,
- unsigned int nport)
+static int ub960_log_status_ub960_sp_eq(struct ub960_data *priv,
+ unsigned int nport)
{
struct device *dev = &priv->client->dev;
u8 eq_level;
@@ -2986,18 +4074,18 @@ static void ub960_log_status_ub960_sp_eq(struct ub960_data *priv,
/* Strobe */
- ret = ub960_read(priv, UB960_XR_AEQ_CTL1, &v);
+ ret = ub960_read(priv, UB960_XR_AEQ_CTL1, &v, NULL);
if (ret)
- return;
+ return ret;
dev_info(dev, "\t%s strobe\n",
(v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) ? "Adaptive" :
"Manual");
if (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) {
- ret = ub960_read(priv, UB960_XR_SFILTER_CFG, &v);
+ ret = ub960_read(priv, UB960_XR_SFILTER_CFG, &v, NULL);
if (ret)
- return;
+ return ret;
dev_info(dev, "\tStrobe range [%d, %d]\n",
((v >> UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) & 0xf) - 7,
@@ -3006,32 +4094,38 @@ static void ub960_log_status_ub960_sp_eq(struct ub960_data *priv,
ret = ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
if (ret)
- return;
+ return ret;
dev_info(dev, "\tStrobe pos %d\n", strobe_pos);
/* EQ */
- ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v, NULL);
if (ret)
- return;
+ return ret;
dev_info(dev, "\t%s EQ\n",
(v & UB960_RR_AEQ_BYPASS_ENABLE) ? "Manual" :
"Adaptive");
if (!(v & UB960_RR_AEQ_BYPASS_ENABLE)) {
- ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v,
+ NULL);
if (ret)
- return;
+ return ret;
dev_info(dev, "\tEQ range [%u, %u]\n",
(v >> UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) & 0xf,
(v >> UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT) & 0xf);
}
- if (ub960_rxport_get_eq_level(priv, nport, &eq_level) == 0)
- dev_info(dev, "\tEQ level %u\n", eq_level);
+ ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "\tEQ level %u\n", eq_level);
+
+ return 0;
}
static int ub960_log_status(struct v4l2_subdev *sd)
@@ -3039,19 +4133,23 @@ static int ub960_log_status(struct v4l2_subdev *sd)
struct ub960_data *priv = sd_to_ub960(sd);
struct device *dev = &priv->client->dev;
struct v4l2_subdev_state *state;
- unsigned int nport;
u16 v16 = 0;
u8 v = 0;
u8 id[UB960_SR_FPD3_RX_ID_LEN];
+ int ret = 0;
state = v4l2_subdev_lock_and_get_active_state(sd);
- for (unsigned int i = 0; i < sizeof(id); i++)
- ub960_read(priv, UB960_SR_FPD3_RX_ID(i), &id[i]);
+ for (unsigned int i = 0; i < sizeof(id); i++) {
+ ret = ub960_read(priv, UB960_SR_FPD3_RX_ID(i), &id[i], NULL);
+ if (ret)
+ return ret;
+ }
dev_info(dev, "ID '%.*s'\n", (int)sizeof(id), id);
- for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
+ for (unsigned int nport = 0; nport < priv->hw_data->num_txports;
+ nport++) {
struct ub960_txport *txport = priv->txports[nport];
dev_info(dev, "TX %u\n", nport);
@@ -3061,34 +4159,56 @@ static int ub960_log_status(struct v4l2_subdev *sd)
continue;
}
- ub960_txport_read(priv, nport, UB960_TR_CSI_STS, &v);
+ ret = ub960_txport_read(priv, nport, UB960_TR_CSI_STS, &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tsync %u, pass %u\n", v & (u8)BIT(1),
v & (u8)BIT(0));
- ub960_read16(priv, UB960_SR_CSI_FRAME_COUNT_HI(nport), &v16);
+ ret = ub960_read16(priv, UB960_SR_CSI_FRAME_COUNT_HI(nport),
+ &v16, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tframe counter %u\n", v16);
- ub960_read16(priv, UB960_SR_CSI_FRAME_ERR_COUNT_HI(nport), &v16);
+ ret = ub960_read16(priv, UB960_SR_CSI_FRAME_ERR_COUNT_HI(nport),
+ &v16, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tframe error counter %u\n", v16);
- ub960_read16(priv, UB960_SR_CSI_LINE_COUNT_HI(nport), &v16);
+ ret = ub960_read16(priv, UB960_SR_CSI_LINE_COUNT_HI(nport),
+ &v16, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tline counter %u\n", v16);
- ub960_read16(priv, UB960_SR_CSI_LINE_ERR_COUNT_HI(nport), &v16);
+ ret = ub960_read16(priv, UB960_SR_CSI_LINE_ERR_COUNT_HI(nport),
+ &v16, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tline error counter %u\n", v16);
}
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
+ for_each_rxport(priv, it) {
+ unsigned int nport = it.nport;
dev_info(dev, "RX %u\n", nport);
- if (!rxport) {
+ if (!it.rxport) {
dev_info(dev, "\tNot initialized\n");
continue;
}
- ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v,
+ NULL);
+ if (ret)
+ return ret;
if (v & UB960_RR_RX_PORT_STS1_LOCK_STS)
dev_info(dev, "\tLocked\n");
@@ -3096,26 +4216,53 @@ static int ub960_log_status(struct v4l2_subdev *sd)
dev_info(dev, "\tNot locked\n");
dev_info(dev, "\trx_port_sts1 %#02x\n", v);
- ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v,
+ NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\trx_port_sts2 %#02x\n", v);
- ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH, &v16);
+ ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH,
+ &v16, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tlink freq %llu Hz\n", ((u64)v16 * HZ_PER_MHZ) >> 8);
- ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v16);
+ ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
+ &v16, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tparity errors %u\n", v16);
- ub960_rxport_read16(priv, nport, UB960_RR_LINE_COUNT_HI, &v16);
+ ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_COUNT_HI,
+ &v16, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tlines per frame %u\n", v16);
- ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1, &v16);
+ ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1,
+ &v16, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tbytes per line %u\n", v16);
- ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v);
+ ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER,
+ &v, NULL);
+ if (ret)
+ return ret;
+
dev_info(dev, "\tcsi_err_counter %u\n", v);
- if (!priv->hw_data->is_ub9702)
- ub960_log_status_ub960_sp_eq(priv, nport);
+ if (!priv->hw_data->is_ub9702) {
+ ret = ub960_log_status_ub960_sp_eq(priv, nport);
+ if (ret)
+ return ret;
+ }
/* GPIOs */
for (unsigned int i = 0; i < UB960_NUM_BC_GPIOS; i++) {
@@ -3125,7 +4272,9 @@ static int ub960_log_status(struct v4l2_subdev *sd)
ctl_reg = UB960_RR_BC_GPIO_CTL(i / 2);
ctl_shift = (i % 2) * 4;
- ub960_rxport_read(priv, nport, ctl_reg, &v);
+ ret = ub960_rxport_read(priv, nport, ctl_reg, &v, NULL);
+ if (ret)
+ return ret;
dev_info(dev, "\tGPIO%u: mode %u\n", i,
(v >> ctl_shift) & 0xf);
@@ -3163,34 +4312,36 @@ static const struct media_entity_operations ub960_entity_ops = {
static irqreturn_t ub960_handle_events(int irq, void *arg)
{
struct ub960_data *priv = arg;
- unsigned int i;
u8 int_sts;
u8 fwd_sts;
int ret;
- ret = ub960_read(priv, UB960_SR_INTERRUPT_STS, &int_sts);
+ ret = ub960_read(priv, UB960_SR_INTERRUPT_STS, &int_sts, NULL);
if (ret || !int_sts)
return IRQ_NONE;
dev_dbg(&priv->client->dev, "INTERRUPT_STS %x\n", int_sts);
- ret = ub960_read(priv, UB960_SR_FWD_STS, &fwd_sts);
+ ret = ub960_read(priv, UB960_SR_FWD_STS, &fwd_sts, NULL);
if (ret)
return IRQ_NONE;
dev_dbg(&priv->client->dev, "FWD_STS %#02x\n", fwd_sts);
- for (i = 0; i < priv->hw_data->num_txports; i++) {
- if (int_sts & UB960_SR_INTERRUPT_STS_IS_CSI_TX(i))
- ub960_csi_handle_events(priv, i);
+ for (unsigned int i = 0; i < priv->hw_data->num_txports; i++) {
+ if (int_sts & UB960_SR_INTERRUPT_STS_IS_CSI_TX(i)) {
+ ret = ub960_csi_handle_events(priv, i);
+ if (ret)
+ return IRQ_NONE;
+ }
}
- for (i = 0; i < priv->hw_data->num_rxports; i++) {
- if (!priv->rxports[i])
- continue;
-
- if (int_sts & UB960_SR_INTERRUPT_STS_IS_RX(i))
- ub960_rxport_handle_events(priv, i);
+ for_each_active_rxport(priv, it) {
+ if (int_sts & UB960_SR_INTERRUPT_STS_IS_RX(it.nport)) {
+ ret = ub960_rxport_handle_events(priv, it.nport);
+ if (ret)
+ return IRQ_NONE;
+ }
}
return IRQ_HANDLED;
@@ -3225,19 +4376,14 @@ static void ub960_txport_free_ports(struct ub960_data *priv)
static void ub960_rxport_free_ports(struct ub960_data *priv)
{
- unsigned int nport;
+ for_each_active_rxport(priv, it) {
+ fwnode_handle_put(it.rxport->source.ep_fwnode);
+ fwnode_handle_put(it.rxport->ser.fwnode);
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
+ mutex_destroy(&it.rxport->aliased_addrs_lock);
- if (!rxport)
- continue;
-
- fwnode_handle_put(rxport->source.ep_fwnode);
- fwnode_handle_put(rxport->ser.fwnode);
-
- kfree(rxport);
- priv->rxports[nport] = NULL;
+ kfree(it.rxport);
+ priv->rxports[it.nport] = NULL;
}
}
@@ -3253,6 +4399,7 @@ ub960_parse_dt_rxport_link_properties(struct ub960_data *priv,
s32 strobe_pos;
u32 eq_level;
u32 ser_i2c_alias;
+ u32 ser_i2c_addr;
int ret;
cdr_mode = RXPORT_CDR_FPD3;
@@ -3364,6 +4511,13 @@ ub960_parse_dt_rxport_link_properties(struct ub960_data *priv,
return -EINVAL;
}
+ ret = fwnode_property_read_u32(rxport->ser.fwnode, "reg",
+ &ser_i2c_addr);
+ if (ret)
+ rxport->ser.addr = -EINVAL;
+ else
+ rxport->ser.addr = ser_i2c_addr;
+
return 0;
}
@@ -3456,6 +4610,8 @@ static int ub960_parse_dt_rxport(struct ub960_data *priv, unsigned int nport,
if (ret)
goto err_put_remote_fwnode;
+ mutex_init(&rxport->aliased_addrs_lock);
+
return 0;
err_put_remote_fwnode:
@@ -3496,7 +4652,6 @@ static int ub960_parse_dt_rxports(struct ub960_data *priv)
{
struct device *dev = &priv->client->dev;
struct fwnode_handle *links_fwnode;
- unsigned int nport;
int ret;
links_fwnode = fwnode_get_named_child_node(dev_fwnode(dev), "links");
@@ -3511,9 +4666,10 @@ static int ub960_parse_dt_rxports(struct ub960_data *priv)
priv->strobe.manual = fwnode_property_read_bool(links_fwnode, "ti,manual-strobe");
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ for_each_rxport(priv, it) {
struct fwnode_handle *link_fwnode;
struct fwnode_handle *ep_fwnode;
+ unsigned int nport = it.nport;
link_fwnode = ub960_fwnode_get_link_by_regs(links_fwnode, nport);
if (!link_fwnode)
@@ -3602,7 +4758,6 @@ static int ub960_notify_bound(struct v4l2_async_notifier *notifier,
struct ub960_rxport *rxport = to_ub960_asd(asd)->rxport;
struct device *dev = &priv->client->dev;
u8 nport = rxport->nport;
- unsigned int i;
int ret;
ret = media_entity_get_fwnode_pad(&subdev->entity,
@@ -3627,8 +4782,8 @@ static int ub960_notify_bound(struct v4l2_async_notifier *notifier,
return ret;
}
- for (i = 0; i < priv->hw_data->num_rxports; i++) {
- if (priv->rxports[i] && !priv->rxports[i]->source.sd) {
+ for_each_active_rxport(priv, it) {
+ if (!it.rxport->source.sd) {
dev_dbg(dev, "Waiting for more subdevs to be bound\n");
return 0;
}
@@ -3654,29 +4809,24 @@ static const struct v4l2_async_notifier_operations ub960_notify_ops = {
static int ub960_v4l2_notifier_register(struct ub960_data *priv)
{
struct device *dev = &priv->client->dev;
- unsigned int i;
int ret;
v4l2_async_subdev_nf_init(&priv->notifier, &priv->sd);
- for (i = 0; i < priv->hw_data->num_rxports; i++) {
- struct ub960_rxport *rxport = priv->rxports[i];
+ for_each_active_rxport(priv, it) {
struct ub960_asd *asd;
- if (!rxport)
- continue;
-
asd = v4l2_async_nf_add_fwnode(&priv->notifier,
- rxport->source.ep_fwnode,
+ it.rxport->source.ep_fwnode,
struct ub960_asd);
if (IS_ERR(asd)) {
dev_err(dev, "Failed to add subdev for source %u: %pe",
- i, asd);
+ it.nport, asd);
v4l2_async_nf_cleanup(&priv->notifier);
return PTR_ERR(asd);
}
- asd->rxport = rxport;
+ asd->rxport = it.rxport;
}
priv->notifier.ops = &ub960_notify_ops;
@@ -3794,29 +4944,6 @@ static const struct regmap_config ub960_regmap_config = {
.disable_locking = true,
};
-static void ub960_reset(struct ub960_data *priv, bool reset_regs)
-{
- struct device *dev = &priv->client->dev;
- unsigned int v;
- int ret;
- u8 bit;
-
- bit = reset_regs ? UB960_SR_RESET_DIGITAL_RESET1 :
- UB960_SR_RESET_DIGITAL_RESET0;
-
- ub960_write(priv, UB960_SR_RESET, bit);
-
- mutex_lock(&priv->reg_lock);
-
- ret = regmap_read_poll_timeout(priv->regmap, UB960_SR_RESET, v,
- (v & bit) == 0, 2000, 100000);
-
- mutex_unlock(&priv->reg_lock);
-
- if (ret)
- dev_err(dev, "reset failed: %d\n", ret);
-}
-
static int ub960_get_hw_resources(struct ub960_data *priv)
{
struct device *dev = &priv->client->dev;
@@ -3873,10 +5000,12 @@ static int ub960_enable_core_hw(struct ub960_data *priv)
fsleep(2000);
}
- ub960_reset(priv, true);
+ ret = ub960_reset(priv, true);
+ if (ret)
+ goto err_pd_gpio;
/* Runtime check register accessibility */
- ret = ub960_read(priv, UB960_SR_REV_MASK, &rev_mask);
+ ret = ub960_read(priv, UB960_SR_REV_MASK, &rev_mask, NULL);
if (ret) {
dev_err_probe(dev, ret, "Cannot read first register, abort\n");
goto err_pd_gpio;
@@ -3885,14 +5014,16 @@ static int ub960_enable_core_hw(struct ub960_data *priv)
dev_dbg(dev, "Found %s (rev/mask %#04x)\n", priv->hw_data->model,
rev_mask);
- ret = ub960_read(priv, UB960_SR_DEVICE_STS, &dev_sts);
+ ret = ub960_read(priv, UB960_SR_DEVICE_STS, &dev_sts, NULL);
if (ret)
goto err_pd_gpio;
if (priv->hw_data->is_ub9702)
- ret = ub960_read(priv, UB9702_SR_REFCLK_FREQ, &refclk_freq);
+ ret = ub960_read(priv, UB9702_SR_REFCLK_FREQ, &refclk_freq,
+ NULL);
else
- ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq);
+ ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq,
+ NULL);
if (ret)
goto err_pd_gpio;
@@ -3901,7 +5032,7 @@ static int ub960_enable_core_hw(struct ub960_data *priv)
clk_get_rate(priv->refclk) / HZ_PER_MHZ);
/* Disable all RX ports by default */
- ret = ub960_write(priv, UB960_SR_RX_PORT_CTL, 0);
+ ret = ub960_write(priv, UB960_SR_RX_PORT_CTL, 0, NULL);
if (ret)
goto err_pd_gpio;
@@ -3909,7 +5040,8 @@ static int ub960_enable_core_hw(struct ub960_data *priv)
if (priv->hw_data->is_ub9702) {
ret = ub960_update_bits(priv, UB960_SR_RESET,
UB960_SR_RESET_GPIO_LOCK_RELEASE,
- UB960_SR_RESET_GPIO_LOCK_RELEASE);
+ UB960_SR_RESET_GPIO_LOCK_RELEASE,
+ NULL);
if (ret)
goto err_pd_gpio;
}
@@ -3936,9 +5068,6 @@ static int ub960_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ub960_data *priv;
- unsigned int port_lock_mask;
- unsigned int port_mask;
- unsigned int nport;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -3981,39 +5110,14 @@ static int ub960_probe(struct i2c_client *client)
if (ret)
goto err_free_ports;
- ret = ub960_init_rx_ports(priv);
- if (ret)
- goto err_disable_vpocs;
-
- ub960_reset(priv, false);
-
- port_mask = 0;
-
- for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
- struct ub960_rxport *rxport = priv->rxports[nport];
-
- if (!rxport)
- continue;
-
- port_mask |= BIT(nport);
- }
+ if (priv->hw_data->is_ub9702)
+ ret = ub960_init_rx_ports_ub9702(priv);
+ else
+ ret = ub960_init_rx_ports_ub960(priv);
- ret = ub960_rxport_wait_locks(priv, port_mask, &port_lock_mask);
if (ret)
goto err_disable_vpocs;
- if (port_mask != port_lock_mask) {
- ret = -EIO;
- dev_err_probe(dev, ret, "Failed to lock all RX ports\n");
- goto err_disable_vpocs;
- }
-
- /*
- * Clear any errors caused by switching the RX port settings while
- * probing.
- */
- ub960_clear_rx_errors(priv);
-
ret = ub960_init_atr(priv);
if (ret)
goto err_disable_vpocs;
@@ -4033,9 +5137,9 @@ static int ub960_probe(struct i2c_client *client)
msecs_to_jiffies(UB960_POLL_TIME_MS));
#ifdef UB960_DEBUG_I2C_RX_ID
- for (unsigned int i = 0; i < priv->hw_data->num_rxports; i++)
- ub960_write(priv, UB960_SR_I2C_RX_ID(i),
- (UB960_DEBUG_I2C_RX_ID + i) << 1);
+ for_each_rxport(priv, it)
+ ub960_write(priv, UB960_SR_I2C_RX_ID(it.nport),
+ (UB960_DEBUG_I2C_RX_ID + it.nport) << 1, NULL);
#endif
return 0;
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index 04262bbf6306..3b4f68543342 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -718,9 +718,11 @@ static int imx219_configure_lanes(struct imx219 *imx219)
ARRAY_SIZE(imx219_4lane_regs), NULL);
};
-static int imx219_start_streaming(struct imx219 *imx219,
- struct v4l2_subdev_state *state)
+static int imx219_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
{
+ struct imx219 *imx219 = to_imx219(sd);
struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
int ret;
@@ -769,12 +771,16 @@ static int imx219_start_streaming(struct imx219 *imx219,
return 0;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
return ret;
}
-static void imx219_stop_streaming(struct imx219 *imx219)
+static int imx219_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
{
+ struct imx219 *imx219 = to_imx219(sd);
struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
int ret;
@@ -787,23 +793,9 @@ static void imx219_stop_streaming(struct imx219 *imx219)
__v4l2_ctrl_grab(imx219->vflip, false);
__v4l2_ctrl_grab(imx219->hflip, false);
- pm_runtime_put(&client->dev);
-}
-
-static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
-{
- struct imx219 *imx219 = to_imx219(sd);
- struct v4l2_subdev_state *state;
- int ret = 0;
-
- state = v4l2_subdev_lock_and_get_active_state(sd);
-
- if (enable)
- ret = imx219_start_streaming(imx219, state);
- else
- imx219_stop_streaming(imx219);
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
- v4l2_subdev_unlock_state(state);
return ret;
}
@@ -995,7 +987,7 @@ static int imx219_init_state(struct v4l2_subdev *sd,
}
static const struct v4l2_subdev_video_ops imx219_video_ops = {
- .s_stream = imx219_set_stream,
+ .s_stream = v4l2_subdev_s_stream_helper,
};
static const struct v4l2_subdev_pad_ops imx219_pad_ops = {
@@ -1004,6 +996,8 @@ static const struct v4l2_subdev_pad_ops imx219_pad_ops = {
.set_fmt = imx219_set_pad_format,
.get_selection = imx219_get_selection,
.enum_frame_size = imx219_enum_frame_size,
+ .enable_streams = imx219_enable_streams,
+ .disable_streams = imx219_disable_streams,
};
static const struct v4l2_subdev_ops imx219_subdev_ops = {
@@ -1280,6 +1274,8 @@ static int imx219_probe(struct i2c_client *client)
}
pm_runtime_idle(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/imx283.c b/drivers/media/i2c/imx283.c
index beb9169f93ad..da618c8cbadc 100644
--- a/drivers/media/i2c/imx283.c
+++ b/drivers/media/i2c/imx283.c
@@ -1082,7 +1082,7 @@ static int imx283_start_streaming(struct imx283 *imx283,
cci_write(imx283->cci, IMX283_REG_SVR, 0x00, &ret);
dev_dbg(imx283->dev, "Mode: Size %d x %d\n", mode->width, mode->height);
- dev_dbg(imx283->dev, "Analogue Crop (in the mode) %d,%d %dx%d\n",
+ dev_dbg(imx283->dev, "Analogue Crop (in the mode) (%d,%d)/%ux%u\n",
mode->crop.left,
mode->crop.top,
mode->crop.width,
diff --git a/drivers/media/i2c/imx334.c b/drivers/media/i2c/imx334.c
index a544fc3df39c..846b9928d4e8 100644
--- a/drivers/media/i2c/imx334.c
+++ b/drivers/media/i2c/imx334.c
@@ -12,77 +12,125 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
/* Streaming Mode */
-#define IMX334_REG_MODE_SELECT 0x3000
-#define IMX334_MODE_STANDBY 0x01
-#define IMX334_MODE_STREAMING 0x00
+#define IMX334_REG_MODE_SELECT CCI_REG8(0x3000)
+#define IMX334_MODE_STANDBY 0x01
+#define IMX334_MODE_STREAMING 0x00
/* Lines per frame */
-#define IMX334_REG_LPFR 0x3030
+#define IMX334_REG_VMAX CCI_REG24_LE(0x3030)
+
+#define IMX334_REG_HMAX CCI_REG16_LE(0x3034)
+
+#define IMX334_REG_OPB_SIZE_V CCI_REG8(0x304c)
+#define IMX334_REG_ADBIT CCI_REG8(0x3050)
+#define IMX334_REG_MDBIT CCI_REG8(0x319d)
+#define IMX334_REG_ADBIT1 CCI_REG16_LE(0x341c)
+#define IMX334_REG_Y_OUT_SIZE CCI_REG16_LE(0x3308)
+#define IMX334_REG_XVS_XHS_OUTSEL CCI_REG8(0x31a0)
+#define IMX334_REG_XVS_XHS_DRV CCI_REG8(0x31a1)
/* Chip ID */
-#define IMX334_REG_ID 0x3044
-#define IMX334_ID 0x1e
+#define IMX334_REG_ID CCI_REG8(0x3044)
+#define IMX334_ID 0x1e
/* Exposure control */
-#define IMX334_REG_SHUTTER 0x3058
-#define IMX334_EXPOSURE_MIN 1
-#define IMX334_EXPOSURE_OFFSET 5
-#define IMX334_EXPOSURE_STEP 1
-#define IMX334_EXPOSURE_DEFAULT 0x0648
+#define IMX334_REG_SHUTTER CCI_REG24_LE(0x3058)
+#define IMX334_EXPOSURE_MIN 1
+#define IMX334_EXPOSURE_OFFSET 5
+#define IMX334_EXPOSURE_STEP 1
+#define IMX334_EXPOSURE_DEFAULT 0x0648
+
+#define IMX334_REG_LANEMODE CCI_REG8(0x3a01)
+#define IMX334_CSI_4_LANE_MODE 3
+#define IMX334_CSI_8_LANE_MODE 7
+
+/* Window cropping Settings */
+#define IMX334_REG_AREA3_ST_ADR_1 CCI_REG16_LE(0x3074)
+#define IMX334_REG_AREA3_ST_ADR_2 CCI_REG16_LE(0x308e)
+#define IMX334_REG_UNREAD_PARAM5 CCI_REG16_LE(0x30b6)
+#define IMX334_REG_AREA3_WIDTH_1 CCI_REG16_LE(0x3076)
+#define IMX334_REG_AREA3_WIDTH_2 CCI_REG16_LE(0x3090)
+#define IMX334_REG_BLACK_OFSET_ADR CCI_REG16_LE(0x30c6)
+#define IMX334_REG_UNRD_LINE_MAX CCI_REG16_LE(0x30ce)
+#define IMX334_REG_UNREAD_ED_ADR CCI_REG16_LE(0x30d8)
+#define IMX334_REG_UNREAD_PARAM6 CCI_REG16_LE(0x3116)
+
+#define IMX334_REG_VREVERSE CCI_REG8(0x304f)
+#define IMX334_REG_HREVERSE CCI_REG8(0x304e)
+
+/* Binning Settings */
+#define IMX334_REG_HADD_VADD CCI_REG8(0x3199)
+#define IMX334_REG_VALID_EXPAND CCI_REG8(0x31dd)
+#define IMX334_REG_TCYCLE CCI_REG8(0x3300)
/* Analog gain control */
-#define IMX334_REG_AGAIN 0x30e8
-#define IMX334_AGAIN_MIN 0
-#define IMX334_AGAIN_MAX 240
-#define IMX334_AGAIN_STEP 1
-#define IMX334_AGAIN_DEFAULT 0
+#define IMX334_REG_AGAIN CCI_REG16_LE(0x30e8)
+#define IMX334_AGAIN_MIN 0
+#define IMX334_AGAIN_MAX 240
+#define IMX334_AGAIN_STEP 1
+#define IMX334_AGAIN_DEFAULT 0
/* Group hold register */
-#define IMX334_REG_HOLD 0x3001
+#define IMX334_REG_HOLD CCI_REG8(0x3001)
+
+#define IMX334_REG_MASTER_MODE CCI_REG8(0x3002)
+#define IMX334_REG_WINMODE CCI_REG8(0x3018)
+#define IMX334_REG_HTRIMMING_START CCI_REG16_LE(0x302c)
+#define IMX334_REG_HNUM CCI_REG16_LE(0x302e)
/* Input clock rate */
-#define IMX334_INCLK_RATE 24000000
+#define IMX334_INCLK_RATE 24000000
+
+/* INCK Setting Register */
+#define IMX334_REG_BCWAIT_TIME CCI_REG8(0x300c)
+#define IMX334_REG_CPWAIT_TIME CCI_REG8(0x300d)
+#define IMX334_REG_INCKSEL1 CCI_REG16_LE(0x314c)
+#define IMX334_REG_INCKSEL2 CCI_REG8(0x315a)
+#define IMX334_REG_INCKSEL3 CCI_REG8(0x3168)
+#define IMX334_REG_INCKSEL4 CCI_REG8(0x316a)
+#define IMX334_REG_SYS_MODE CCI_REG8(0x319e)
+
+#define IMX334_REG_TCLKPOST CCI_REG16_LE(0x3a18)
+#define IMX334_REG_TCLKPREPARE CCI_REG16_LE(0x3a1a)
+#define IMX334_REG_TCLKTRAIL CCI_REG16_LE(0x3a1c)
+#define IMX334_REG_TCLKZERO CCI_REG16_LE(0x3a1e)
+#define IMX334_REG_THSPREPARE CCI_REG16_LE(0x3a20)
+#define IMX334_REG_THSZERO CCI_REG16_LE(0x3a22)
+#define IMX334_REG_THSTRAIL CCI_REG16_LE(0x3a24)
+#define IMX334_REG_THSEXIT CCI_REG16_LE(0x3a26)
+#define IMX334_REG_TPLX CCI_REG16_LE(0x3a28)
/* CSI2 HW configuration */
-#define IMX334_LINK_FREQ_891M 891000000
-#define IMX334_LINK_FREQ_445M 445500000
-#define IMX334_NUM_DATA_LANES 4
+#define IMX334_LINK_FREQ_891M 891000000
+#define IMX334_LINK_FREQ_445M 445500000
+#define IMX334_NUM_DATA_LANES 4
-#define IMX334_REG_MIN 0x00
-#define IMX334_REG_MAX 0xfffff
+#define IMX334_REG_MIN 0x00
+#define IMX334_REG_MAX 0xfffff
/* Test Pattern Control */
-#define IMX334_REG_TP 0x329e
-#define IMX334_TP_COLOR_HBARS 0xA
-#define IMX334_TP_COLOR_VBARS 0xB
+#define IMX334_REG_TP CCI_REG8(0x329e)
+#define IMX334_TP_COLOR_HBARS 0xa
+#define IMX334_TP_COLOR_VBARS 0xb
-#define IMX334_TPG_EN_DOUT 0x329c
-#define IMX334_TP_ENABLE 0x1
-#define IMX334_TP_DISABLE 0x0
+#define IMX334_TPG_EN_DOUT CCI_REG8(0x329c)
+#define IMX334_TP_ENABLE 0x1
+#define IMX334_TP_DISABLE 0x0
-#define IMX334_TPG_COLORW 0x32a0
-#define IMX334_TPG_COLORW_120P 0x13
+#define IMX334_TPG_COLORW CCI_REG8(0x32a0)
+#define IMX334_TPG_COLORW_120P 0x13
-#define IMX334_TP_CLK_EN 0x3148
-#define IMX334_TP_CLK_EN_VAL 0x10
-#define IMX334_TP_CLK_DIS_VAL 0x0
+#define IMX334_TP_CLK_EN CCI_REG8(0x3148)
+#define IMX334_TP_CLK_EN_VAL 0x10
+#define IMX334_TP_CLK_DIS_VAL 0x0
-#define IMX334_DIG_CLP_MODE 0x3280
-
-/**
- * struct imx334_reg - imx334 sensor register
- * @address: Register address
- * @val: Register value
- */
-struct imx334_reg {
- u16 address;
- u8 val;
-};
+#define IMX334_DIG_CLP_MODE CCI_REG8(0x3280)
/**
* struct imx334_reg_list - imx334 sensor register list
@@ -91,7 +139,7 @@ struct imx334_reg {
*/
struct imx334_reg_list {
u32 num_of_regs;
- const struct imx334_reg *regs;
+ const struct cci_reg_sequence *regs;
};
/**
@@ -121,6 +169,7 @@ struct imx334_mode {
/**
* struct imx334 - imx334 sensor device structure
* @dev: Pointer to generic device
+ * @cci: CCI register map
* @client: Pointer to i2c client
* @sd: V4L2 sub-device
* @pad: Media pad. Only one pad supported
@@ -135,12 +184,12 @@ struct imx334_mode {
* @again_ctrl: Pointer to analog gain control
* @vblank: Vertical blanking in lines
* @cur_mode: Pointer to current selected sensor mode
- * @mutex: Mutex for serializing sensor controls
* @link_freq_bitmap: Menu bitmap for link_freq_ctrl
* @cur_code: current selected format code
*/
struct imx334 {
struct device *dev;
+ struct regmap *cci;
struct i2c_client *client;
struct v4l2_subdev sd;
struct media_pad pad;
@@ -157,7 +206,6 @@ struct imx334 {
};
u32 vblank;
const struct imx334_mode *cur_mode;
- struct mutex mutex;
unsigned long link_freq_bitmap;
u32 cur_code;
};
@@ -167,283 +215,183 @@ static const s64 link_freq[] = {
IMX334_LINK_FREQ_445M,
};
+/* Sensor common mode registers values */
+static const struct cci_reg_sequence common_mode_regs[] = {
+ { IMX334_REG_MODE_SELECT, IMX334_MODE_STANDBY },
+ { IMX334_REG_WINMODE, 0x04 },
+ { IMX334_REG_VMAX, 0x0008ca },
+ { IMX334_REG_HMAX, 0x044c },
+ { IMX334_REG_BLACK_OFSET_ADR, 0x0000 },
+ { IMX334_REG_UNRD_LINE_MAX, 0x0000 },
+ { IMX334_REG_OPB_SIZE_V, 0x00 },
+ { IMX334_REG_HREVERSE, 0x00 },
+ { IMX334_REG_VREVERSE, 0x00 },
+ { IMX334_REG_UNREAD_PARAM5, 0x0000 },
+ { IMX334_REG_UNREAD_PARAM6, 0x0008 },
+ { IMX334_REG_XVS_XHS_OUTSEL, 0x20 },
+ { IMX334_REG_XVS_XHS_DRV, 0x0f },
+ { IMX334_REG_BCWAIT_TIME, 0x3b },
+ { IMX334_REG_CPWAIT_TIME, 0x2a },
+ { IMX334_REG_INCKSEL1, 0x0129 },
+ { IMX334_REG_INCKSEL2, 0x06 },
+ { IMX334_REG_INCKSEL3, 0xa0 },
+ { IMX334_REG_INCKSEL4, 0x7e },
+ { IMX334_REG_SYS_MODE, 0x02 },
+ { IMX334_REG_HADD_VADD, 0x00 },
+ { IMX334_REG_VALID_EXPAND, 0x03 },
+ { IMX334_REG_TCYCLE, 0x00 },
+ { IMX334_REG_TCLKPOST, 0x007f },
+ { IMX334_REG_TCLKPREPARE, 0x0037 },
+ { IMX334_REG_TCLKTRAIL, 0x0037 },
+ { IMX334_REG_TCLKZERO, 0xf7 },
+ { IMX334_REG_THSPREPARE, 0x002f },
+ { CCI_REG8(0x3078), 0x02 },
+ { CCI_REG8(0x3079), 0x00 },
+ { CCI_REG8(0x307a), 0x00 },
+ { CCI_REG8(0x307b), 0x00 },
+ { CCI_REG8(0x3080), 0x02 },
+ { CCI_REG8(0x3081), 0x00 },
+ { CCI_REG8(0x3082), 0x00 },
+ { CCI_REG8(0x3083), 0x00 },
+ { CCI_REG8(0x3088), 0x02 },
+ { CCI_REG8(0x3094), 0x00 },
+ { CCI_REG8(0x3095), 0x00 },
+ { CCI_REG8(0x3096), 0x00 },
+ { CCI_REG8(0x309b), 0x02 },
+ { CCI_REG8(0x309c), 0x00 },
+ { CCI_REG8(0x309d), 0x00 },
+ { CCI_REG8(0x309e), 0x00 },
+ { CCI_REG8(0x30a4), 0x00 },
+ { CCI_REG8(0x30a5), 0x00 },
+ { CCI_REG8(0x3288), 0x21 },
+ { CCI_REG8(0x328a), 0x02 },
+ { CCI_REG8(0x3414), 0x05 },
+ { CCI_REG8(0x3416), 0x18 },
+ { CCI_REG8(0x35Ac), 0x0e },
+ { CCI_REG8(0x3648), 0x01 },
+ { CCI_REG8(0x364a), 0x04 },
+ { CCI_REG8(0x364c), 0x04 },
+ { CCI_REG8(0x3678), 0x01 },
+ { CCI_REG8(0x367c), 0x31 },
+ { CCI_REG8(0x367e), 0x31 },
+ { CCI_REG8(0x3708), 0x02 },
+ { CCI_REG8(0x3714), 0x01 },
+ { CCI_REG8(0x3715), 0x02 },
+ { CCI_REG8(0x3716), 0x02 },
+ { CCI_REG8(0x3717), 0x02 },
+ { CCI_REG8(0x371c), 0x3d },
+ { CCI_REG8(0x371d), 0x3f },
+ { CCI_REG8(0x372c), 0x00 },
+ { CCI_REG8(0x372d), 0x00 },
+ { CCI_REG8(0x372e), 0x46 },
+ { CCI_REG8(0x372f), 0x00 },
+ { CCI_REG8(0x3730), 0x89 },
+ { CCI_REG8(0x3731), 0x00 },
+ { CCI_REG8(0x3732), 0x08 },
+ { CCI_REG8(0x3733), 0x01 },
+ { CCI_REG8(0x3734), 0xfe },
+ { CCI_REG8(0x3735), 0x05 },
+ { CCI_REG8(0x375d), 0x00 },
+ { CCI_REG8(0x375e), 0x00 },
+ { CCI_REG8(0x375f), 0x61 },
+ { CCI_REG8(0x3760), 0x06 },
+ { CCI_REG8(0x3768), 0x1b },
+ { CCI_REG8(0x3769), 0x1b },
+ { CCI_REG8(0x376a), 0x1a },
+ { CCI_REG8(0x376b), 0x19 },
+ { CCI_REG8(0x376c), 0x18 },
+ { CCI_REG8(0x376d), 0x14 },
+ { CCI_REG8(0x376e), 0x0f },
+ { CCI_REG8(0x3776), 0x00 },
+ { CCI_REG8(0x3777), 0x00 },
+ { CCI_REG8(0x3778), 0x46 },
+ { CCI_REG8(0x3779), 0x00 },
+ { CCI_REG8(0x377a), 0x08 },
+ { CCI_REG8(0x377b), 0x01 },
+ { CCI_REG8(0x377c), 0x45 },
+ { CCI_REG8(0x377d), 0x01 },
+ { CCI_REG8(0x377e), 0x23 },
+ { CCI_REG8(0x377f), 0x02 },
+ { CCI_REG8(0x3780), 0xd9 },
+ { CCI_REG8(0x3781), 0x03 },
+ { CCI_REG8(0x3782), 0xf5 },
+ { CCI_REG8(0x3783), 0x06 },
+ { CCI_REG8(0x3784), 0xa5 },
+ { CCI_REG8(0x3788), 0x0f },
+ { CCI_REG8(0x378a), 0xd9 },
+ { CCI_REG8(0x378b), 0x03 },
+ { CCI_REG8(0x378c), 0xeb },
+ { CCI_REG8(0x378d), 0x05 },
+ { CCI_REG8(0x378e), 0x87 },
+ { CCI_REG8(0x378f), 0x06 },
+ { CCI_REG8(0x3790), 0xf5 },
+ { CCI_REG8(0x3792), 0x43 },
+ { CCI_REG8(0x3794), 0x7a },
+ { CCI_REG8(0x3796), 0xa1 },
+ { CCI_REG8(0x37b0), 0x37 },
+ { CCI_REG8(0x3e04), 0x0e },
+ { IMX334_REG_AGAIN, 0x0050 },
+ { IMX334_REG_MASTER_MODE, 0x00 },
+};
+
+/* Sensor mode registers for 640x480@30fps */
+static const struct cci_reg_sequence mode_640x480_regs[] = {
+ { IMX334_REG_HTRIMMING_START, 0x0670 },
+ { IMX334_REG_HNUM, 0x0280 },
+ { IMX334_REG_AREA3_ST_ADR_1, 0x0748 },
+ { IMX334_REG_AREA3_ST_ADR_2, 0x0749 },
+ { IMX334_REG_AREA3_WIDTH_1, 0x01e0 },
+ { IMX334_REG_AREA3_WIDTH_2, 0x01e0 },
+ { IMX334_REG_Y_OUT_SIZE, 0x01e0 },
+ { IMX334_REG_UNREAD_ED_ADR, 0x0b30 },
+};
+
+/* Sensor mode registers for 1280x720@30fps */
+static const struct cci_reg_sequence mode_1280x720_regs[] = {
+ { IMX334_REG_HTRIMMING_START, 0x0530 },
+ { IMX334_REG_HNUM, 0x0500 },
+ { IMX334_REG_AREA3_ST_ADR_1, 0x0384 },
+ { IMX334_REG_AREA3_ST_ADR_2, 0x0385 },
+ { IMX334_REG_AREA3_WIDTH_1, 0x02d0 },
+ { IMX334_REG_AREA3_WIDTH_2, 0x02d0 },
+ { IMX334_REG_Y_OUT_SIZE, 0x02d0 },
+ { IMX334_REG_UNREAD_ED_ADR, 0x0b30 },
+};
+
/* Sensor mode registers for 1920x1080@30fps */
-static const struct imx334_reg mode_1920x1080_regs[] = {
- {0x3000, 0x01},
- {0x3018, 0x04},
- {0x3030, 0xca},
- {0x3031, 0x08},
- {0x3032, 0x00},
- {0x3034, 0x4c},
- {0x3035, 0x04},
- {0x302c, 0xf0},
- {0x302d, 0x03},
- {0x302e, 0x80},
- {0x302f, 0x07},
- {0x3074, 0xcc},
- {0x3075, 0x02},
- {0x308e, 0xcd},
- {0x308f, 0x02},
- {0x3076, 0x38},
- {0x3077, 0x04},
- {0x3090, 0x38},
- {0x3091, 0x04},
- {0x3308, 0x38},
- {0x3309, 0x04},
- {0x30C6, 0x00},
- {0x30c7, 0x00},
- {0x30ce, 0x00},
- {0x30cf, 0x00},
- {0x30d8, 0x18},
- {0x30d9, 0x0a},
- {0x304c, 0x00},
- {0x304e, 0x00},
- {0x304f, 0x00},
- {0x3050, 0x00},
- {0x30b6, 0x00},
- {0x30b7, 0x00},
- {0x3116, 0x08},
- {0x3117, 0x00},
- {0x31a0, 0x20},
- {0x31a1, 0x0f},
- {0x300c, 0x3b},
- {0x300d, 0x29},
- {0x314c, 0x29},
- {0x314d, 0x01},
- {0x315a, 0x06},
- {0x3168, 0xa0},
- {0x316a, 0x7e},
- {0x319e, 0x02},
- {0x3199, 0x00},
- {0x319d, 0x00},
- {0x31dd, 0x03},
- {0x3300, 0x00},
- {0x341c, 0xff},
- {0x341d, 0x01},
- {0x3a01, 0x03},
- {0x3a18, 0x7f},
- {0x3a19, 0x00},
- {0x3a1a, 0x37},
- {0x3a1b, 0x00},
- {0x3a1c, 0x37},
- {0x3a1d, 0x00},
- {0x3a1e, 0xf7},
- {0x3a1f, 0x00},
- {0x3a20, 0x3f},
- {0x3a21, 0x00},
- {0x3a20, 0x6f},
- {0x3a21, 0x00},
- {0x3a20, 0x3f},
- {0x3a21, 0x00},
- {0x3a20, 0x5f},
- {0x3a21, 0x00},
- {0x3a20, 0x2f},
- {0x3a21, 0x00},
- {0x3078, 0x02},
- {0x3079, 0x00},
- {0x307a, 0x00},
- {0x307b, 0x00},
- {0x3080, 0x02},
- {0x3081, 0x00},
- {0x3082, 0x00},
- {0x3083, 0x00},
- {0x3088, 0x02},
- {0x3094, 0x00},
- {0x3095, 0x00},
- {0x3096, 0x00},
- {0x309b, 0x02},
- {0x309c, 0x00},
- {0x309d, 0x00},
- {0x309e, 0x00},
- {0x30a4, 0x00},
- {0x30a5, 0x00},
- {0x3288, 0x21},
- {0x328a, 0x02},
- {0x3414, 0x05},
- {0x3416, 0x18},
- {0x35Ac, 0x0e},
- {0x3648, 0x01},
- {0x364a, 0x04},
- {0x364c, 0x04},
- {0x3678, 0x01},
- {0x367c, 0x31},
- {0x367e, 0x31},
- {0x3708, 0x02},
- {0x3714, 0x01},
- {0x3715, 0x02},
- {0x3716, 0x02},
- {0x3717, 0x02},
- {0x371c, 0x3d},
- {0x371d, 0x3f},
- {0x372c, 0x00},
- {0x372d, 0x00},
- {0x372e, 0x46},
- {0x372f, 0x00},
- {0x3730, 0x89},
- {0x3731, 0x00},
- {0x3732, 0x08},
- {0x3733, 0x01},
- {0x3734, 0xfe},
- {0x3735, 0x05},
- {0x375d, 0x00},
- {0x375e, 0x00},
- {0x375f, 0x61},
- {0x3760, 0x06},
- {0x3768, 0x1b},
- {0x3769, 0x1b},
- {0x376a, 0x1a},
- {0x376b, 0x19},
- {0x376c, 0x18},
- {0x376d, 0x14},
- {0x376e, 0x0f},
- {0x3776, 0x00},
- {0x3777, 0x00},
- {0x3778, 0x46},
- {0x3779, 0x00},
- {0x377a, 0x08},
- {0x377b, 0x01},
- {0x377c, 0x45},
- {0x377d, 0x01},
- {0x377e, 0x23},
- {0x377f, 0x02},
- {0x3780, 0xd9},
- {0x3781, 0x03},
- {0x3782, 0xf5},
- {0x3783, 0x06},
- {0x3784, 0xa5},
- {0x3788, 0x0f},
- {0x378a, 0xd9},
- {0x378b, 0x03},
- {0x378c, 0xeb},
- {0x378d, 0x05},
- {0x378e, 0x87},
- {0x378f, 0x06},
- {0x3790, 0xf5},
- {0x3792, 0x43},
- {0x3794, 0x7a},
- {0x3796, 0xa1},
- {0x37b0, 0x37},
- {0x3e04, 0x0e},
- {0x30e8, 0x50},
- {0x30e9, 0x00},
- {0x3e04, 0x0e},
- {0x3002, 0x00},
+static const struct cci_reg_sequence mode_1920x1080_regs[] = {
+ { IMX334_REG_HTRIMMING_START, 0x03f0 },
+ { IMX334_REG_HNUM, 0x0780 },
+ { IMX334_REG_AREA3_ST_ADR_1, 0x02cc },
+ { IMX334_REG_AREA3_ST_ADR_2, 0x02cd },
+ { IMX334_REG_AREA3_WIDTH_1, 0x0438 },
+ { IMX334_REG_AREA3_WIDTH_2, 0x0438 },
+ { IMX334_REG_Y_OUT_SIZE, 0x0438 },
+ { IMX334_REG_UNREAD_ED_ADR, 0x0a18 },
};
/* Sensor mode registers for 3840x2160@30fps */
-static const struct imx334_reg mode_3840x2160_regs[] = {
- {0x3000, 0x01},
- {0x3002, 0x00},
- {0x3018, 0x04},
- {0x37b0, 0x36},
- {0x304c, 0x00},
- {0x300c, 0x3b},
- {0x300d, 0x2a},
- {0x3034, 0x26},
- {0x3035, 0x02},
- {0x314c, 0x29},
- {0x314d, 0x01},
- {0x315a, 0x02},
- {0x3168, 0xa0},
- {0x316a, 0x7e},
- {0x3288, 0x21},
- {0x328a, 0x02},
- {0x302c, 0x3c},
- {0x302d, 0x00},
- {0x302e, 0x00},
- {0x302f, 0x0f},
- {0x3076, 0x70},
- {0x3077, 0x08},
- {0x3090, 0x70},
- {0x3091, 0x08},
- {0x30d8, 0x20},
- {0x30d9, 0x12},
- {0x3308, 0x70},
- {0x3309, 0x08},
- {0x3414, 0x05},
- {0x3416, 0x18},
- {0x35ac, 0x0e},
- {0x3648, 0x01},
- {0x364a, 0x04},
- {0x364c, 0x04},
- {0x3678, 0x01},
- {0x367c, 0x31},
- {0x367e, 0x31},
- {0x3708, 0x02},
- {0x3714, 0x01},
- {0x3715, 0x02},
- {0x3716, 0x02},
- {0x3717, 0x02},
- {0x371c, 0x3d},
- {0x371d, 0x3f},
- {0x372c, 0x00},
- {0x372d, 0x00},
- {0x372e, 0x46},
- {0x372f, 0x00},
- {0x3730, 0x89},
- {0x3731, 0x00},
- {0x3732, 0x08},
- {0x3733, 0x01},
- {0x3734, 0xfe},
- {0x3735, 0x05},
- {0x375d, 0x00},
- {0x375e, 0x00},
- {0x375f, 0x61},
- {0x3760, 0x06},
- {0x3768, 0x1b},
- {0x3769, 0x1b},
- {0x376a, 0x1a},
- {0x376b, 0x19},
- {0x376c, 0x18},
- {0x376d, 0x14},
- {0x376e, 0x0f},
- {0x3776, 0x00},
- {0x3777, 0x00},
- {0x3778, 0x46},
- {0x3779, 0x00},
- {0x377a, 0x08},
- {0x377b, 0x01},
- {0x377c, 0x45},
- {0x377d, 0x01},
- {0x377e, 0x23},
- {0x377f, 0x02},
- {0x3780, 0xd9},
- {0x3781, 0x03},
- {0x3782, 0xf5},
- {0x3783, 0x06},
- {0x3784, 0xa5},
- {0x3788, 0x0f},
- {0x378a, 0xd9},
- {0x378b, 0x03},
- {0x378c, 0xeb},
- {0x378d, 0x05},
- {0x378e, 0x87},
- {0x378f, 0x06},
- {0x3790, 0xf5},
- {0x3792, 0x43},
- {0x3794, 0x7a},
- {0x3796, 0xa1},
- {0x3e04, 0x0e},
- {0x319e, 0x00},
- {0x3a00, 0x01},
- {0x3a18, 0xbf},
- {0x3a19, 0x00},
- {0x3a1a, 0x67},
- {0x3a1b, 0x00},
- {0x3a1c, 0x6f},
- {0x3a1d, 0x00},
- {0x3a1e, 0xd7},
- {0x3a1f, 0x01},
- {0x3a20, 0x6f},
- {0x3a21, 0x00},
- {0x3a22, 0xcf},
- {0x3a23, 0x00},
- {0x3a24, 0x6f},
- {0x3a25, 0x00},
- {0x3a26, 0xb7},
- {0x3a27, 0x00},
- {0x3a28, 0x5f},
- {0x3a29, 0x00},
+static const struct cci_reg_sequence mode_3840x2160_regs[] = {
+ { IMX334_REG_HMAX, 0x0226 },
+ { IMX334_REG_INCKSEL2, 0x02 },
+ { IMX334_REG_HTRIMMING_START, 0x003c },
+ { IMX334_REG_HNUM, 0x0f00 },
+ { IMX334_REG_AREA3_ST_ADR_1, 0x00b0 },
+ { IMX334_REG_AREA3_ST_ADR_2, 0x00b1 },
+ { IMX334_REG_UNREAD_ED_ADR, 0x1220 },
+ { IMX334_REG_AREA3_WIDTH_1, 0x0870 },
+ { IMX334_REG_AREA3_WIDTH_2, 0x0870 },
+ { IMX334_REG_Y_OUT_SIZE, 0x0870 },
+ { IMX334_REG_SYS_MODE, 0x0100 },
+ { IMX334_REG_TCLKPOST, 0x00bf },
+ { IMX334_REG_TCLKPREPARE, 0x0067 },
+ { IMX334_REG_TCLKTRAIL, 0x006f },
+ { IMX334_REG_TCLKZERO, 0x1d7 },
+ { IMX334_REG_THSPREPARE, 0x006f },
+ { IMX334_REG_THSZERO, 0x00cf },
+ { IMX334_REG_THSTRAIL, 0x006f },
+ { IMX334_REG_THSEXIT, 0x00b7 },
+ { IMX334_REG_TPLX, 0x005f },
};
static const char * const imx334_test_pattern_menu[] = {
@@ -458,18 +406,16 @@ static const int imx334_test_pattern_val[] = {
IMX334_TP_COLOR_VBARS,
};
-static const struct imx334_reg raw10_framefmt_regs[] = {
- {0x3050, 0x00},
- {0x319d, 0x00},
- {0x341c, 0xff},
- {0x341d, 0x01},
+static const struct cci_reg_sequence raw10_framefmt_regs[] = {
+ { IMX334_REG_ADBIT, 0x00 },
+ { IMX334_REG_MDBIT, 0x00 },
+ { IMX334_REG_ADBIT1, 0x01ff },
};
-static const struct imx334_reg raw12_framefmt_regs[] = {
- {0x3050, 0x01},
- {0x319d, 0x01},
- {0x341c, 0x47},
- {0x341d, 0x00},
+static const struct cci_reg_sequence raw12_framefmt_regs[] = {
+ { IMX334_REG_ADBIT, 0x01 },
+ { IMX334_REG_MDBIT, 0x01 },
+ { IMX334_REG_ADBIT1, 0x0047 },
};
static const u32 imx334_mbus_codes[] = {
@@ -505,6 +451,32 @@ static const struct imx334_mode supported_modes[] = {
.num_of_regs = ARRAY_SIZE(mode_1920x1080_regs),
.regs = mode_1920x1080_regs,
},
+ }, {
+ .width = 1280,
+ .height = 720,
+ .hblank = 2480,
+ .vblank = 1170,
+ .vblank_min = 45,
+ .vblank_max = 132840,
+ .pclk = 297000000,
+ .link_freq_idx = 1,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1280x720_regs),
+ .regs = mode_1280x720_regs,
+ },
+ }, {
+ .width = 640,
+ .height = 480,
+ .hblank = 2480,
+ .vblank = 1170,
+ .vblank_min = 45,
+ .vblank_max = 132840,
+ .pclk = 297000000,
+ .link_freq_idx = 1,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_640x480_regs),
+ .regs = mode_640x480_regs,
+ },
},
};
@@ -520,101 +492,6 @@ static inline struct imx334 *to_imx334(struct v4l2_subdev *subdev)
}
/**
- * imx334_read_reg() - Read registers.
- * @imx334: pointer to imx334 device
- * @reg: register address
- * @len: length of bytes to read. Max supported bytes is 4
- * @val: pointer to register value to be filled.
- *
- * Big endian register addresses with little endian values.
- *
- * Return: 0 if successful, error code otherwise.
- */
-static int imx334_read_reg(struct imx334 *imx334, u16 reg, u32 len, u32 *val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&imx334->sd);
- struct i2c_msg msgs[2] = {0};
- u8 addr_buf[2] = {0};
- u8 data_buf[4] = {0};
- int ret;
-
- if (WARN_ON(len > 4))
- return -EINVAL;
-
- put_unaligned_be16(reg, addr_buf);
-
- /* Write register address */
- msgs[0].addr = client->addr;
- msgs[0].flags = 0;
- msgs[0].len = ARRAY_SIZE(addr_buf);
- msgs[0].buf = addr_buf;
-
- /* Read data from register */
- msgs[1].addr = client->addr;
- msgs[1].flags = I2C_M_RD;
- msgs[1].len = len;
- msgs[1].buf = data_buf;
-
- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret != ARRAY_SIZE(msgs))
- return -EIO;
-
- *val = get_unaligned_le32(data_buf);
-
- return 0;
-}
-
-/**
- * imx334_write_reg() - Write register
- * @imx334: pointer to imx334 device
- * @reg: register address
- * @len: length of bytes. Max supported bytes is 4
- * @val: register value
- *
- * Big endian register addresses with little endian values.
- *
- * Return: 0 if successful, error code otherwise.
- */
-static int imx334_write_reg(struct imx334 *imx334, u16 reg, u32 len, u32 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&imx334->sd);
- u8 buf[6] = {0};
-
- if (WARN_ON(len > 4))
- return -EINVAL;
-
- put_unaligned_be16(reg, buf);
- put_unaligned_le32(val, buf + 2);
- if (i2c_master_send(client, buf, len + 2) != len + 2)
- return -EIO;
-
- return 0;
-}
-
-/**
- * imx334_write_regs() - Write a list of registers
- * @imx334: pointer to imx334 device
- * @regs: list of registers to be written
- * @len: length of registers array
- *
- * Return: 0 if successful, error code otherwise.
- */
-static int imx334_write_regs(struct imx334 *imx334,
- const struct imx334_reg *regs, u32 len)
-{
- unsigned int i;
- int ret;
-
- for (i = 0; i < len; i++) {
- ret = imx334_write_reg(imx334, regs[i].address, 1, regs[i].val);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-/**
* imx334_update_controls() - Update control ranges based on streaming mode
* @imx334: pointer to imx334 device
* @mode: pointer to imx334_mode sensor mode
@@ -659,30 +536,23 @@ static int imx334_update_controls(struct imx334 *imx334,
static int imx334_update_exp_gain(struct imx334 *imx334, u32 exposure, u32 gain)
{
u32 lpfr, shutter;
- int ret;
+ int ret_hold;
+ int ret = 0;
lpfr = imx334->vblank + imx334->cur_mode->height;
shutter = lpfr - exposure;
- dev_dbg(imx334->dev, "Set long exp %u analog gain %u sh0 %u lpfr %u",
+ dev_dbg(imx334->dev, "Set long exp %u analog gain %u sh0 %u lpfr %u\n",
exposure, gain, shutter, lpfr);
- ret = imx334_write_reg(imx334, IMX334_REG_HOLD, 1, 1);
- if (ret)
- return ret;
-
- ret = imx334_write_reg(imx334, IMX334_REG_LPFR, 3, lpfr);
- if (ret)
- goto error_release_group_hold;
-
- ret = imx334_write_reg(imx334, IMX334_REG_SHUTTER, 3, shutter);
- if (ret)
- goto error_release_group_hold;
-
- ret = imx334_write_reg(imx334, IMX334_REG_AGAIN, 1, gain);
+ cci_write(imx334->cci, IMX334_REG_HOLD, 1, &ret);
+ cci_write(imx334->cci, IMX334_REG_VMAX, lpfr, &ret);
+ cci_write(imx334->cci, IMX334_REG_SHUTTER, shutter, &ret);
+ cci_write(imx334->cci, IMX334_REG_AGAIN, gain, &ret);
-error_release_group_hold:
- imx334_write_reg(imx334, IMX334_REG_HOLD, 1, 0);
+ ret_hold = cci_write(imx334->cci, IMX334_REG_HOLD, 0, NULL);
+ if (ret_hold)
+ return ret_hold;
return ret;
}
@@ -707,11 +577,10 @@ static int imx334_set_ctrl(struct v4l2_ctrl *ctrl)
u32 exposure;
int ret;
- switch (ctrl->id) {
- case V4L2_CID_VBLANK:
+ if (ctrl->id == V4L2_CID_VBLANK) {
imx334->vblank = imx334->vblank_ctrl->val;
- dev_dbg(imx334->dev, "Received vblank %u, new lpfr %u",
+ dev_dbg(imx334->dev, "Received vblank %u, new lpfr %u\n",
imx334->vblank,
imx334->vblank + imx334->cur_mode->height);
@@ -721,23 +590,32 @@ static int imx334_set_ctrl(struct v4l2_ctrl *ctrl)
imx334->cur_mode->height -
IMX334_EXPOSURE_OFFSET,
1, IMX334_EXPOSURE_DEFAULT);
+ if (ret)
+ return ret;
+ }
+
+ /* Set controls only if sensor is in power on state */
+ if (!pm_runtime_get_if_in_use(imx334->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ exposure = imx334->exp_ctrl->val;
+ analog_gain = imx334->again_ctrl->val;
+
+ ret = imx334_update_exp_gain(imx334, exposure, analog_gain);
+
break;
case V4L2_CID_EXPOSURE:
- /* Set controls only if sensor is in power on state */
- if (!pm_runtime_get_if_in_use(imx334->dev))
- return 0;
-
exposure = ctrl->val;
analog_gain = imx334->again_ctrl->val;
- dev_dbg(imx334->dev, "Received exp %u analog gain %u",
+ dev_dbg(imx334->dev, "Received exp %u analog gain %u\n",
exposure, analog_gain);
ret = imx334_update_exp_gain(imx334, exposure, analog_gain);
- pm_runtime_put(imx334->dev);
-
break;
case V4L2_CID_PIXEL_RATE:
case V4L2_CID_LINK_FREQ:
@@ -746,29 +624,31 @@ static int imx334_set_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_TEST_PATTERN:
if (ctrl->val) {
- imx334_write_reg(imx334, IMX334_TP_CLK_EN, 1,
- IMX334_TP_CLK_EN_VAL);
- imx334_write_reg(imx334, IMX334_DIG_CLP_MODE, 1, 0x0);
- imx334_write_reg(imx334, IMX334_TPG_COLORW, 1,
- IMX334_TPG_COLORW_120P);
- imx334_write_reg(imx334, IMX334_REG_TP, 1,
- imx334_test_pattern_val[ctrl->val]);
- imx334_write_reg(imx334, IMX334_TPG_EN_DOUT, 1,
- IMX334_TP_ENABLE);
+ cci_write(imx334->cci, IMX334_TP_CLK_EN,
+ IMX334_TP_CLK_EN_VAL, NULL);
+ cci_write(imx334->cci, IMX334_DIG_CLP_MODE, 0x0, NULL);
+ cci_write(imx334->cci, IMX334_TPG_COLORW,
+ IMX334_TPG_COLORW_120P, NULL);
+ cci_write(imx334->cci, IMX334_REG_TP,
+ imx334_test_pattern_val[ctrl->val], NULL);
+ cci_write(imx334->cci, IMX334_TPG_EN_DOUT,
+ IMX334_TP_ENABLE, NULL);
} else {
- imx334_write_reg(imx334, IMX334_DIG_CLP_MODE, 1, 0x1);
- imx334_write_reg(imx334, IMX334_TP_CLK_EN, 1,
- IMX334_TP_CLK_DIS_VAL);
- imx334_write_reg(imx334, IMX334_TPG_EN_DOUT, 1,
- IMX334_TP_DISABLE);
+ cci_write(imx334->cci, IMX334_DIG_CLP_MODE, 0x1, NULL);
+ cci_write(imx334->cci, IMX334_TP_CLK_EN,
+ IMX334_TP_CLK_DIS_VAL, NULL);
+ cci_write(imx334->cci, IMX334_TPG_EN_DOUT,
+ IMX334_TP_DISABLE, NULL);
}
ret = 0;
break;
default:
- dev_err(imx334->dev, "Invalid control %d", ctrl->id);
+ dev_err(imx334->dev, "Invalid control %d\n", ctrl->id);
ret = -EINVAL;
}
+ pm_runtime_put(imx334->dev);
+
return ret;
}
@@ -874,8 +754,6 @@ static int imx334_get_pad_format(struct v4l2_subdev *sd,
{
struct imx334 *imx334 = to_imx334(sd);
- mutex_lock(&imx334->mutex);
-
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *framefmt;
@@ -886,8 +764,6 @@ static int imx334_get_pad_format(struct v4l2_subdev *sd,
imx334_fill_pad_format(imx334, imx334->cur_mode, fmt);
}
- mutex_unlock(&imx334->mutex);
-
return 0;
}
@@ -907,8 +783,6 @@ static int imx334_set_pad_format(struct v4l2_subdev *sd,
const struct imx334_mode *mode;
int ret = 0;
- mutex_lock(&imx334->mutex);
-
mode = v4l2_find_nearest_size(supported_modes,
ARRAY_SIZE(supported_modes),
width, height,
@@ -929,8 +803,6 @@ static int imx334_set_pad_format(struct v4l2_subdev *sd,
imx334->cur_mode = mode;
}
- mutex_unlock(&imx334->mutex);
-
return ret;
}
@@ -949,8 +821,6 @@ static int imx334_init_state(struct v4l2_subdev *sd,
fmt.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- mutex_lock(&imx334->mutex);
-
imx334_fill_pad_format(imx334, imx334->cur_mode, &fmt);
__v4l2_ctrl_modify_range(imx334->link_freq_ctrl, 0,
@@ -958,8 +828,6 @@ static int imx334_init_state(struct v4l2_subdev *sd,
~(imx334->link_freq_bitmap),
__ffs(imx334->link_freq_bitmap));
- mutex_unlock(&imx334->mutex);
-
return imx334_set_pad_format(sd, sd_state, &fmt);
}
@@ -967,109 +835,113 @@ static int imx334_set_framefmt(struct imx334 *imx334)
{
switch (imx334->cur_code) {
case MEDIA_BUS_FMT_SRGGB10_1X10:
- return imx334_write_regs(imx334, raw10_framefmt_regs,
- ARRAY_SIZE(raw10_framefmt_regs));
+ return cci_multi_reg_write(imx334->cci, raw10_framefmt_regs,
+ ARRAY_SIZE(raw10_framefmt_regs), NULL);
+
case MEDIA_BUS_FMT_SRGGB12_1X12:
- return imx334_write_regs(imx334, raw12_framefmt_regs,
- ARRAY_SIZE(raw12_framefmt_regs));
+ return cci_multi_reg_write(imx334->cci, raw12_framefmt_regs,
+ ARRAY_SIZE(raw12_framefmt_regs), NULL);
}
return -EINVAL;
}
/**
- * imx334_start_streaming() - Start sensor stream
- * @imx334: pointer to imx334 device
+ * imx334_enable_streams() - Enable specified streams for the sensor
+ * @sd: pointer to the V4L2 subdevice
+ * @state: pointer to the subdevice state
+ * @pad: pad number for which streams are enabled
+ * @streams_mask: bitmask specifying the streams to enable
*
* Return: 0 if successful, error code otherwise.
*/
-static int imx334_start_streaming(struct imx334 *imx334)
+static int imx334_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
{
+ struct imx334 *imx334 = to_imx334(sd);
const struct imx334_reg_list *reg_list;
int ret;
+ ret = pm_runtime_resume_and_get(imx334->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = cci_multi_reg_write(imx334->cci, common_mode_regs,
+ ARRAY_SIZE(common_mode_regs), NULL);
+ if (ret) {
+ dev_err(imx334->dev, "fail to write common registers\n");
+ goto err_rpm_put;
+ }
+
/* Write sensor mode registers */
reg_list = &imx334->cur_mode->reg_list;
- ret = imx334_write_regs(imx334, reg_list->regs,
- reg_list->num_of_regs);
+ ret = cci_multi_reg_write(imx334->cci, reg_list->regs,
+ reg_list->num_of_regs, NULL);
if (ret) {
- dev_err(imx334->dev, "fail to write initial registers");
- return ret;
+ dev_err(imx334->dev, "fail to write initial registers\n");
+ goto err_rpm_put;
+ }
+
+ ret = cci_write(imx334->cci, IMX334_REG_LANEMODE,
+ IMX334_CSI_4_LANE_MODE, NULL);
+ if (ret) {
+ dev_err(imx334->dev, "failed to configure lanes\n");
+ goto err_rpm_put;
}
ret = imx334_set_framefmt(imx334);
if (ret) {
dev_err(imx334->dev, "%s failed to set frame format: %d\n",
__func__, ret);
- return ret;
+ goto err_rpm_put;
}
/* Setup handler will write actual exposure and gain */
ret = __v4l2_ctrl_handler_setup(imx334->sd.ctrl_handler);
if (ret) {
- dev_err(imx334->dev, "fail to setup handler");
- return ret;
+ dev_err(imx334->dev, "fail to setup handler\n");
+ goto err_rpm_put;
}
/* Start streaming */
- ret = imx334_write_reg(imx334, IMX334_REG_MODE_SELECT,
- 1, IMX334_MODE_STREAMING);
+ ret = cci_write(imx334->cci, IMX334_REG_MODE_SELECT,
+ IMX334_MODE_STREAMING, NULL);
if (ret) {
- dev_err(imx334->dev, "fail to start streaming");
- return ret;
+ dev_err(imx334->dev, "fail to start streaming\n");
+ goto err_rpm_put;
}
return 0;
-}
-/**
- * imx334_stop_streaming() - Stop sensor stream
- * @imx334: pointer to imx334 device
- *
- * Return: 0 if successful, error code otherwise.
- */
-static int imx334_stop_streaming(struct imx334 *imx334)
-{
- return imx334_write_reg(imx334, IMX334_REG_MODE_SELECT,
- 1, IMX334_MODE_STANDBY);
+err_rpm_put:
+ pm_runtime_put(imx334->dev);
+ return ret;
}
/**
- * imx334_set_stream() - Enable sensor streaming
- * @sd: pointer to imx334 subdevice
- * @enable: set to enable sensor streaming
+ * imx334_disable_streams() - Enable specified streams for the sensor
+ * @sd: pointer to the V4L2 subdevice
+ * @state: pointer to the subdevice state
+ * @pad: pad number for which streams are disabled
+ * @streams_mask: bitmask specifying the streams to disable
*
* Return: 0 if successful, error code otherwise.
*/
-static int imx334_set_stream(struct v4l2_subdev *sd, int enable)
+static int imx334_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
{
struct imx334 *imx334 = to_imx334(sd);
int ret;
- mutex_lock(&imx334->mutex);
-
- if (enable) {
- ret = pm_runtime_resume_and_get(imx334->dev);
- if (ret < 0)
- goto error_unlock;
-
- ret = imx334_start_streaming(imx334);
- if (ret)
- goto error_power_off;
- } else {
- imx334_stop_streaming(imx334);
- pm_runtime_put(imx334->dev);
- }
-
- mutex_unlock(&imx334->mutex);
-
- return 0;
+ ret = cci_write(imx334->cci, IMX334_REG_MODE_SELECT,
+ IMX334_MODE_STANDBY, NULL);
+ if (ret)
+ dev_err(imx334->dev, "%s failed to stop stream\n", __func__);
-error_power_off:
pm_runtime_put(imx334->dev);
-error_unlock:
- mutex_unlock(&imx334->mutex);
return ret;
}
@@ -1083,14 +955,14 @@ error_unlock:
static int imx334_detect(struct imx334 *imx334)
{
int ret;
- u32 val;
+ u64 val;
- ret = imx334_read_reg(imx334, IMX334_REG_ID, 2, &val);
+ ret = cci_read(imx334->cci, IMX334_REG_ID, &val, NULL);
if (ret)
return ret;
if (val != IMX334_ID) {
- dev_err(imx334->dev, "chip id mismatch: %x!=%x",
+ dev_err(imx334->dev, "chip id mismatch: %x!=%llx\n",
IMX334_ID, val);
return -ENXIO;
}
@@ -1120,24 +992,20 @@ static int imx334_parse_hw_config(struct imx334 *imx334)
/* Request optional reset pin */
imx334->reset_gpio = devm_gpiod_get_optional(imx334->dev, "reset",
GPIOD_OUT_LOW);
- if (IS_ERR(imx334->reset_gpio)) {
- dev_err(imx334->dev, "failed to get reset gpio %ld",
- PTR_ERR(imx334->reset_gpio));
- return PTR_ERR(imx334->reset_gpio);
- }
+ if (IS_ERR(imx334->reset_gpio))
+ return dev_err_probe(imx334->dev, PTR_ERR(imx334->reset_gpio),
+ "failed to get reset gpio\n");
/* Get sensor input clock */
imx334->inclk = devm_clk_get(imx334->dev, NULL);
- if (IS_ERR(imx334->inclk)) {
- dev_err(imx334->dev, "could not get inclk");
- return PTR_ERR(imx334->inclk);
- }
+ if (IS_ERR(imx334->inclk))
+ return dev_err_probe(imx334->dev, PTR_ERR(imx334->inclk),
+ "could not get inclk\n");
rate = clk_get_rate(imx334->inclk);
- if (rate != IMX334_INCLK_RATE) {
- dev_err(imx334->dev, "inclk frequency mismatch");
- return -EINVAL;
- }
+ if (rate != IMX334_INCLK_RATE)
+ return dev_err_probe(imx334->dev, -EINVAL,
+ "inclk frequency mismatch\n");
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
@@ -1150,7 +1018,7 @@ static int imx334_parse_hw_config(struct imx334 *imx334)
if (bus_cfg.bus.mipi_csi2.num_data_lanes != IMX334_NUM_DATA_LANES) {
dev_err(imx334->dev,
- "number of CSI2 data lanes %d is not supported",
+ "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
ret = -EINVAL;
goto done_endpoint_free;
@@ -1169,7 +1037,7 @@ done_endpoint_free:
/* V4l2 subdevice ops */
static const struct v4l2_subdev_video_ops imx334_video_ops = {
- .s_stream = imx334_set_stream,
+ .s_stream = v4l2_subdev_s_stream_helper,
};
static const struct v4l2_subdev_pad_ops imx334_pad_ops = {
@@ -1177,6 +1045,8 @@ static const struct v4l2_subdev_pad_ops imx334_pad_ops = {
.enum_frame_size = imx334_enum_frame_size,
.get_fmt = imx334_get_pad_format,
.set_fmt = imx334_set_pad_format,
+ .enable_streams = imx334_enable_streams,
+ .disable_streams = imx334_disable_streams,
};
static const struct v4l2_subdev_ops imx334_subdev_ops = {
@@ -1204,7 +1074,7 @@ static int imx334_power_on(struct device *dev)
ret = clk_prepare_enable(imx334->inclk);
if (ret) {
- dev_err(imx334->dev, "fail to enable inclk");
+ dev_err(imx334->dev, "fail to enable inclk\n");
goto error_reset;
}
@@ -1253,9 +1123,6 @@ static int imx334_init_controls(struct imx334 *imx334)
if (ret)
return ret;
- /* Serialize controls with sensor device */
- ctrl_hdlr->lock = &imx334->mutex;
-
/* Initialize exposure and gain */
lpfr = mode->vblank + mode->height;
imx334->exp_ctrl = v4l2_ctrl_new_std(ctrl_hdlr,
@@ -1342,29 +1209,31 @@ static int imx334_probe(struct i2c_client *client)
return -ENOMEM;
imx334->dev = &client->dev;
+ imx334->cci = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(imx334->cci)) {
+ dev_err(imx334->dev, "Unable to initialize I2C\n");
+ return -ENODEV;
+ }
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx334->sd, client, &imx334_subdev_ops);
imx334->sd.internal_ops = &imx334_internal_ops;
ret = imx334_parse_hw_config(imx334);
- if (ret) {
- dev_err(imx334->dev, "HW configuration is not supported");
- return ret;
- }
-
- mutex_init(&imx334->mutex);
+ if (ret)
+ return dev_err_probe(imx334->dev, ret,
+ "HW configuration is not supported\n");
ret = imx334_power_on(imx334->dev);
if (ret) {
- dev_err(imx334->dev, "failed to power-on the sensor");
- goto error_mutex_destroy;
+ dev_err_probe(imx334->dev, ret, "failed to power-on the sensor\n");
+ return ret;
}
/* Check module identity */
ret = imx334_detect(imx334);
if (ret) {
- dev_err(imx334->dev, "failed to find sensor: %d", ret);
+ dev_err(imx334->dev, "failed to find sensor: %d\n", ret);
goto error_power_off;
}
@@ -1375,7 +1244,7 @@ static int imx334_probe(struct i2c_client *client)
ret = imx334_init_controls(imx334);
if (ret) {
- dev_err(imx334->dev, "failed to init controls: %d", ret);
+ dev_err(imx334->dev, "failed to init controls: %d\n", ret);
goto error_power_off;
}
@@ -1387,31 +1256,44 @@ static int imx334_probe(struct i2c_client *client)
imx334->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&imx334->sd.entity, 1, &imx334->pad);
if (ret) {
- dev_err(imx334->dev, "failed to init entity pads: %d", ret);
+ dev_err(imx334->dev, "failed to init entity pads: %d\n", ret);
goto error_handler_free;
}
- ret = v4l2_async_register_subdev_sensor(&imx334->sd);
+ imx334->sd.state_lock = imx334->ctrl_handler.lock;
+ ret = v4l2_subdev_init_finalize(&imx334->sd);
if (ret < 0) {
- dev_err(imx334->dev,
- "failed to register async subdev: %d", ret);
+ dev_err(imx334->dev, "subdev init error: %d\n", ret);
goto error_media_entity;
}
pm_runtime_set_active(imx334->dev);
pm_runtime_enable(imx334->dev);
+
+ ret = v4l2_async_register_subdev_sensor(&imx334->sd);
+ if (ret < 0) {
+ dev_err(imx334->dev,
+ "failed to register async subdev: %d\n", ret);
+ goto error_subdev_cleanup;
+ }
+
pm_runtime_idle(imx334->dev);
return 0;
+error_subdev_cleanup:
+ v4l2_subdev_cleanup(&imx334->sd);
+ pm_runtime_disable(imx334->dev);
+ pm_runtime_set_suspended(imx334->dev);
+
error_media_entity:
media_entity_cleanup(&imx334->sd.entity);
+
error_handler_free:
v4l2_ctrl_handler_free(imx334->sd.ctrl_handler);
+
error_power_off:
imx334_power_off(imx334->dev);
-error_mutex_destroy:
- mutex_destroy(&imx334->mutex);
return ret;
}
@@ -1425,16 +1307,17 @@ error_mutex_destroy:
static void imx334_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx334 *imx334 = to_imx334(sd);
v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
- pm_runtime_suspended(&client->dev);
-
- mutex_destroy(&imx334->mutex);
+ if (!pm_runtime_status_suspended(&client->dev)) {
+ imx334_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ }
}
static const struct dev_pm_ops imx334_pm_ops = {
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index 0beb80b8c458..9b4db4cd4929 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -31,7 +31,7 @@
#define IMX335_REG_CPWAIT_TIME CCI_REG8(0x300d)
#define IMX335_REG_WINMODE CCI_REG8(0x3018)
#define IMX335_REG_HTRIMMING_START CCI_REG16_LE(0x302c)
-#define IMX335_REG_HNUM CCI_REG8(0x302e)
+#define IMX335_REG_HNUM CCI_REG16_LE(0x302e)
/* Lines per frame */
#define IMX335_REG_VMAX CCI_REG24_LE(0x3030)
@@ -660,7 +660,8 @@ static int imx335_enum_frame_size(struct v4l2_subdev *sd,
struct imx335 *imx335 = to_imx335(sd);
u32 code;
- if (fsize->index > ARRAY_SIZE(imx335_mbus_codes))
+ /* Only a single supported_mode available. */
+ if (fsize->index > 0)
return -EINVAL;
code = imx335_get_format_code(imx335, fsize->code);
diff --git a/drivers/media/i2c/lt6911uxe.c b/drivers/media/i2c/lt6911uxe.c
index c5b40bb58a37..24857d683fcf 100644
--- a/drivers/media/i2c/lt6911uxe.c
+++ b/drivers/media/i2c/lt6911uxe.c
@@ -605,10 +605,10 @@ static int lt6911uxe_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(lt6911uxe->reset_gpio),
"failed to get reset gpio\n");
- lt6911uxe->irq_gpio = devm_gpiod_get(dev, "readystat", GPIOD_IN);
+ lt6911uxe->irq_gpio = devm_gpiod_get(dev, "hpd", GPIOD_IN);
if (IS_ERR(lt6911uxe->irq_gpio))
return dev_err_probe(dev, PTR_ERR(lt6911uxe->irq_gpio),
- "failed to get ready_stat gpio\n");
+ "failed to get hpd gpio\n");
ret = lt6911uxe_fwnode_parse(lt6911uxe, dev);
if (ret)
diff --git a/drivers/media/i2c/max96714.c b/drivers/media/i2c/max96714.c
index 159753b13777..3cc1b1ae47d1 100644
--- a/drivers/media/i2c/max96714.c
+++ b/drivers/media/i2c/max96714.c
@@ -7,11 +7,11 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
-#include <linux/fwnode.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/media/i2c/max96717.c b/drivers/media/i2c/max96717.c
index 9259d58ba734..3746729366ac 100644
--- a/drivers/media/i2c/max96717.c
+++ b/drivers/media/i2c/max96717.c
@@ -9,10 +9,10 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
-#include <linux/fwnode.h>
#include <linux/gpio/driver.h>
#include <linux/i2c-mux.h>
#include <linux/i2c.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <media/v4l2-cci.h>
diff --git a/drivers/media/i2c/ov02c10.c b/drivers/media/i2c/ov02c10.c
new file mode 100644
index 000000000000..089a4fd9627c
--- /dev/null
+++ b/drivers/media/i2c/ov02c10.c
@@ -0,0 +1,1013 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2022 Intel Corporation.
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/version.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV02C10_LINK_FREQ_400MHZ 400000000ULL
+#define OV02C10_MCLK 19200000
+#define OV02C10_RGB_DEPTH 10
+
+#define OV02C10_REG_CHIP_ID CCI_REG16(0x300a)
+#define OV02C10_CHIP_ID 0x5602
+
+#define OV02C10_REG_STREAM_CONTROL CCI_REG8(0x0100)
+
+#define OV02C10_REG_HTS CCI_REG16(0x380c)
+
+/* vertical-timings from sensor */
+#define OV02C10_REG_VTS CCI_REG16(0x380e)
+#define OV02C10_VTS_MAX 0xffff
+
+/* Exposure controls from sensor */
+#define OV02C10_REG_EXPOSURE CCI_REG16(0x3501)
+#define OV02C10_EXPOSURE_MIN 4
+#define OV02C10_EXPOSURE_MAX_MARGIN 8
+#define OV02C10_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define OV02C10_REG_ANALOG_GAIN CCI_REG16(0x3508)
+#define OV02C10_ANAL_GAIN_MIN 0x10
+#define OV02C10_ANAL_GAIN_MAX 0xf8
+#define OV02C10_ANAL_GAIN_STEP 1
+#define OV02C10_ANAL_GAIN_DEFAULT 0x10
+
+/* Digital gain controls from sensor */
+#define OV02C10_REG_DIGITAL_GAIN CCI_REG24(0x350a)
+#define OV02C10_DGTL_GAIN_MIN 0x0400
+#define OV02C10_DGTL_GAIN_MAX 0x3fff
+#define OV02C10_DGTL_GAIN_STEP 1
+#define OV02C10_DGTL_GAIN_DEFAULT 0x0400
+
+/* Rotate */
+#define OV02C10_ROTATE_CONTROL CCI_REG8(0x3820)
+#define OV02C10_ISP_X_WIN_CONTROL CCI_REG16(0x3810)
+#define OV02C10_ISP_Y_WIN_CONTROL CCI_REG16(0x3812)
+#define OV02C10_CONFIG_ROTATE 0x18
+
+/* Test Pattern Control */
+#define OV02C10_REG_TEST_PATTERN CCI_REG8(0x4503)
+#define OV02C10_TEST_PATTERN_ENABLE BIT(7)
+
+struct ov02c10_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 hts;
+
+ /* Min vertical timining size */
+ u32 vts_min;
+
+ /* Sensor register settings for this resolution */
+ const struct reg_sequence *reg_sequence;
+ const int sequence_length;
+ /* Sensor register settings for 1 or 2 lane config */
+ const struct reg_sequence *lane_settings[2];
+ const int lane_settings_length[2];
+};
+
+static const struct reg_sequence sensor_1928x1092_30fps_setting[] = {
+ {0x0301, 0x08},
+ {0x0303, 0x06},
+ {0x0304, 0x01},
+ {0x0305, 0xe0},
+ {0x0313, 0x40},
+ {0x031c, 0x4f},
+ {0x3020, 0x97},
+ {0x3022, 0x01},
+ {0x3026, 0xb4},
+ {0x303b, 0x00},
+ {0x303c, 0x4f},
+ {0x303d, 0xe6},
+ {0x303e, 0x00},
+ {0x303f, 0x03},
+ {0x3021, 0x23},
+ {0x3501, 0x04},
+ {0x3502, 0x6c},
+ {0x3504, 0x0c},
+ {0x3507, 0x00},
+ {0x3508, 0x08},
+ {0x3509, 0x00},
+ {0x350a, 0x01},
+ {0x350b, 0x00},
+ {0x350c, 0x41},
+ {0x3600, 0x84},
+ {0x3603, 0x08},
+ {0x3610, 0x57},
+ {0x3611, 0x1b},
+ {0x3613, 0x78},
+ {0x3623, 0x00},
+ {0x3632, 0xa0},
+ {0x3642, 0xe8},
+ {0x364c, 0x70},
+ {0x365f, 0x0f},
+ {0x3708, 0x30},
+ {0x3714, 0x24},
+ {0x3725, 0x02},
+ {0x3737, 0x08},
+ {0x3739, 0x28},
+ {0x3749, 0x32},
+ {0x374a, 0x32},
+ {0x374b, 0x32},
+ {0x374c, 0x32},
+ {0x374d, 0x81},
+ {0x374e, 0x81},
+ {0x374f, 0x81},
+ {0x3752, 0x36},
+ {0x3753, 0x36},
+ {0x3754, 0x36},
+ {0x3761, 0x00},
+ {0x376c, 0x81},
+ {0x3774, 0x18},
+ {0x3776, 0x08},
+ {0x377c, 0x81},
+ {0x377d, 0x81},
+ {0x377e, 0x81},
+ {0x37a0, 0x44},
+ {0x37a6, 0x44},
+ {0x37aa, 0x0d},
+ {0x37ae, 0x00},
+ {0x37cb, 0x03},
+ {0x37cc, 0x01},
+ {0x37d8, 0x02},
+ {0x37d9, 0x10},
+ {0x37e1, 0x10},
+ {0x37e2, 0x18},
+ {0x37e3, 0x08},
+ {0x37e4, 0x08},
+ {0x37e5, 0x02},
+ {0x37e6, 0x08},
+
+ /* 1928x1092 */
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x07},
+ {0x3805, 0x8f},
+ {0x3806, 0x04},
+ {0x3807, 0x47},
+ {0x3808, 0x07},
+ {0x3809, 0x88},
+ {0x380a, 0x04},
+ {0x380b, 0x44},
+ {0x3810, 0x00},
+ {0x3811, 0x02},
+ {0x3812, 0x00},
+ {0x3813, 0x02},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x01},
+ {0x3817, 0x01},
+
+ {0x3820, 0xb0},
+ {0x3821, 0x00},
+ {0x3822, 0x80},
+ {0x3823, 0x08},
+ {0x3824, 0x00},
+ {0x3825, 0x20},
+ {0x3826, 0x00},
+ {0x3827, 0x08},
+ {0x382a, 0x00},
+ {0x382b, 0x08},
+ {0x382d, 0x00},
+ {0x382e, 0x00},
+ {0x382f, 0x23},
+ {0x3834, 0x00},
+ {0x3839, 0x00},
+ {0x383a, 0xd1},
+ {0x383e, 0x03},
+ {0x393d, 0x29},
+ {0x393f, 0x6e},
+ {0x394b, 0x06},
+ {0x394c, 0x06},
+ {0x394d, 0x08},
+ {0x394f, 0x01},
+ {0x3950, 0x01},
+ {0x3951, 0x01},
+ {0x3952, 0x01},
+ {0x3953, 0x01},
+ {0x3954, 0x01},
+ {0x3955, 0x01},
+ {0x3956, 0x01},
+ {0x3957, 0x0e},
+ {0x3958, 0x08},
+ {0x3959, 0x08},
+ {0x395a, 0x08},
+ {0x395b, 0x13},
+ {0x395c, 0x09},
+ {0x395d, 0x05},
+ {0x395e, 0x02},
+ {0x395f, 0x00},
+ {0x395f, 0x00},
+ {0x3960, 0x00},
+ {0x3961, 0x00},
+ {0x3962, 0x00},
+ {0x3963, 0x00},
+ {0x3964, 0x00},
+ {0x3965, 0x00},
+ {0x3966, 0x00},
+ {0x3967, 0x00},
+ {0x3968, 0x01},
+ {0x3969, 0x01},
+ {0x396a, 0x01},
+ {0x396b, 0x01},
+ {0x396c, 0x10},
+ {0x396d, 0xf0},
+ {0x396e, 0x11},
+ {0x396f, 0x00},
+ {0x3970, 0x37},
+ {0x3971, 0x37},
+ {0x3972, 0x37},
+ {0x3973, 0x37},
+ {0x3974, 0x00},
+ {0x3975, 0x3c},
+ {0x3976, 0x3c},
+ {0x3977, 0x3c},
+ {0x3978, 0x3c},
+ {0x3c00, 0x0f},
+ {0x3c20, 0x01},
+ {0x3c21, 0x08},
+ {0x3f00, 0x8b},
+ {0x3f02, 0x0f},
+ {0x4000, 0xc3},
+ {0x4001, 0xe0},
+ {0x4002, 0x00},
+ {0x4003, 0x40},
+ {0x4008, 0x04},
+ {0x4009, 0x23},
+ {0x400a, 0x04},
+ {0x400b, 0x01},
+ {0x4077, 0x06},
+ {0x4078, 0x00},
+ {0x4079, 0x1a},
+ {0x407a, 0x7f},
+ {0x407b, 0x01},
+ {0x4080, 0x03},
+ {0x4081, 0x84},
+ {0x4308, 0x03},
+ {0x4309, 0xff},
+ {0x430d, 0x00},
+ {0x4806, 0x00},
+ {0x4813, 0x00},
+ {0x4837, 0x10},
+ {0x4857, 0x05},
+ {0x4500, 0x07},
+ {0x4501, 0x00},
+ {0x4503, 0x00},
+ {0x450a, 0x04},
+ {0x450e, 0x00},
+ {0x450f, 0x00},
+ {0x4900, 0x00},
+ {0x4901, 0x00},
+ {0x4902, 0x01},
+ {0x5001, 0x50},
+ {0x5006, 0x00},
+ {0x5080, 0x40},
+ {0x5181, 0x2b},
+ {0x5202, 0xa3},
+ {0x5206, 0x01},
+ {0x5207, 0x00},
+ {0x520a, 0x01},
+ {0x520b, 0x00},
+ {0x365d, 0x00},
+ {0x4815, 0x40},
+ {0x4816, 0x12},
+ {0x4f00, 0x01},
+};
+
+static const struct reg_sequence sensor_1928x1092_30fps_1lane_setting[] = {
+ {0x301b, 0xd2},
+ {0x3027, 0xe1},
+ {0x380c, 0x08},
+ {0x380d, 0xe8},
+ {0x380e, 0x04},
+ {0x380f, 0x8c},
+ {0x394e, 0x0b},
+ {0x4800, 0x24},
+ {0x5000, 0xf5},
+ /* plls */
+ {0x0303, 0x05},
+ {0x0305, 0x90},
+ {0x0316, 0x90},
+ {0x3016, 0x12},
+};
+
+static const struct reg_sequence sensor_1928x1092_30fps_2lane_setting[] = {
+ {0x301b, 0xf0},
+ {0x3027, 0xf1},
+ {0x380c, 0x04},
+ {0x380d, 0x74},
+ {0x380e, 0x09},
+ {0x380f, 0x18},
+ {0x394e, 0x0a},
+ {0x4041, 0x20},
+ {0x4884, 0x04},
+ {0x4800, 0x64},
+ {0x4d00, 0x03},
+ {0x4d01, 0xd8},
+ {0x4d02, 0xba},
+ {0x4d03, 0xa0},
+ {0x4d04, 0xb7},
+ {0x4d05, 0x34},
+ {0x4d0d, 0x00},
+ {0x5000, 0xfd},
+ {0x481f, 0x30},
+ /* plls */
+ {0x0303, 0x05},
+ {0x0305, 0x90},
+ {0x0316, 0x90},
+ {0x3016, 0x32},
+};
+
+static const char * const ov02c10_test_pattern_menu[] = {
+ "Disabled",
+ "Color Bar",
+ "Top-Bottom Darker Color Bar",
+ "Right-Left Darker Color Bar",
+ "Color Bar type 4",
+};
+
+static const s64 link_freq_menu_items[] = {
+ OV02C10_LINK_FREQ_400MHZ,
+};
+
+static const struct ov02c10_mode supported_modes[] = {
+ {
+ .width = 1928,
+ .height = 1092,
+ .hts = 2280,
+ .vts_min = 1164,
+ .reg_sequence = sensor_1928x1092_30fps_setting,
+ .sequence_length = ARRAY_SIZE(sensor_1928x1092_30fps_setting),
+ .lane_settings = {
+ sensor_1928x1092_30fps_1lane_setting,
+ sensor_1928x1092_30fps_2lane_setting
+ },
+ .lane_settings_length = {
+ ARRAY_SIZE(sensor_1928x1092_30fps_1lane_setting),
+ ARRAY_SIZE(sensor_1928x1092_30fps_2lane_setting),
+ },
+ },
+};
+
+static const char * const ov02c10_supply_names[] = {
+ "dovdd", /* Digital I/O power */
+ "avdd", /* Analog power */
+ "dvdd", /* Digital core power */
+};
+
+struct ov02c10 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct regmap *regmap;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ struct clk *img_clk;
+ struct gpio_desc *reset;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(ov02c10_supply_names)];
+
+ /* MIPI lane info */
+ u32 link_freq_index;
+ u8 mipi_lanes;
+};
+
+static inline struct ov02c10 *to_ov02c10(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct ov02c10, sd);
+}
+
+static int ov02c10_test_pattern(struct ov02c10 *ov02c10, int pattern)
+{
+ int ret = 0;
+
+ if (!pattern)
+ return cci_update_bits(ov02c10->regmap, OV02C10_REG_TEST_PATTERN,
+ BIT(7), 0, NULL);
+
+ cci_update_bits(ov02c10->regmap, OV02C10_REG_TEST_PATTERN,
+ 0x03, pattern - 1, &ret);
+ cci_update_bits(ov02c10->regmap, OV02C10_REG_TEST_PATTERN,
+ BIT(7), OV02C10_TEST_PATTERN_ENABLE, &ret);
+ return ret;
+}
+
+static int ov02c10_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov02c10 *ov02c10 = container_of(ctrl->handler,
+ struct ov02c10, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd);
+ const u32 height = supported_modes[0].height;
+ s64 exposure_max;
+ int ret = 0;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = height + ctrl->val - OV02C10_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(ov02c10->exposure,
+ ov02c10->exposure->minimum,
+ exposure_max, ov02c10->exposure->step,
+ exposure_max);
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ cci_write(ov02c10->regmap, OV02C10_REG_ANALOG_GAIN,
+ ctrl->val << 4, &ret);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ cci_write(ov02c10->regmap, OV02C10_REG_DIGITAL_GAIN,
+ ctrl->val << 6, &ret);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ cci_write(ov02c10->regmap, OV02C10_REG_EXPOSURE,
+ ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_VBLANK:
+ cci_write(ov02c10->regmap, OV02C10_REG_VTS, height + ctrl->val,
+ &ret);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov02c10_test_pattern(ov02c10, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov02c10_ctrl_ops = {
+ .s_ctrl = ov02c10_set_ctrl,
+};
+
+static int ov02c10_init_controls(struct ov02c10 *ov02c10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd);
+ struct v4l2_ctrl_handler *ctrl_hdlr = &ov02c10->ctrl_handler;
+ const struct ov02c10_mode *mode = &supported_modes[0];
+ u32 vblank_min, vblank_max, vblank_default, vts_def;
+ struct v4l2_fwnode_device_properties props;
+ s64 exposure_max, h_blank, pixel_rate;
+ int ret;
+
+ v4l2_ctrl_handler_init(ctrl_hdlr, 10);
+
+ ov02c10->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
+ &ov02c10_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ov02c10->link_freq_index, 0,
+ link_freq_menu_items);
+ if (ov02c10->link_freq)
+ ov02c10->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ /* MIPI lanes are DDR -> use link-freq * 2 */
+ pixel_rate = div_u64(link_freq_menu_items[ov02c10->link_freq_index] *
+ 2 * ov02c10->mipi_lanes, OV02C10_RGB_DEPTH);
+
+ ov02c10->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ pixel_rate, 1, pixel_rate);
+
+ /*
+ * For default multiple min by number of lanes to keep the default
+ * FPS the same indepenedent of the lane count.
+ */
+ vts_def = mode->vts_min * ov02c10->mipi_lanes;
+
+ vblank_min = mode->vts_min - mode->height;
+ vblank_max = OV02C10_VTS_MAX - mode->height;
+ vblank_default = vts_def - mode->height;
+ ov02c10->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops,
+ V4L2_CID_VBLANK, vblank_min,
+ vblank_max, 1, vblank_default);
+
+ h_blank = mode->hts - mode->width;
+ ov02c10->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank,
+ 1, h_blank);
+ if (ov02c10->hblank)
+ ov02c10->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV02C10_ANAL_GAIN_MIN, OV02C10_ANAL_GAIN_MAX,
+ OV02C10_ANAL_GAIN_STEP, OV02C10_ANAL_GAIN_DEFAULT);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV02C10_DGTL_GAIN_MIN, OV02C10_DGTL_GAIN_MAX,
+ OV02C10_DGTL_GAIN_STEP, OV02C10_DGTL_GAIN_DEFAULT);
+ exposure_max = vts_def - OV02C10_EXPOSURE_MAX_MARGIN;
+ ov02c10->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV02C10_EXPOSURE_MIN,
+ exposure_max,
+ OV02C10_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov02c10_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov02c10_test_pattern_menu) - 1,
+ 0, 0, ov02c10_test_pattern_menu);
+
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov02c10_ctrl_ops, &props);
+
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ ov02c10->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void ov02c10_update_pad_format(const struct ov02c10_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int ov02c10_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ const struct ov02c10_mode *mode = &supported_modes[0];
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov02c10 *ov02c10 = to_ov02c10(sd);
+ const struct reg_sequence *reg_sequence;
+ int ret, sequence_length;
+
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret)
+ return ret;
+
+ reg_sequence = mode->reg_sequence;
+ sequence_length = mode->sequence_length;
+ ret = regmap_multi_reg_write(ov02c10->regmap,
+ reg_sequence, sequence_length);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode\n");
+ goto out;
+ }
+
+ reg_sequence = mode->lane_settings[ov02c10->mipi_lanes - 1];
+ sequence_length = mode->lane_settings_length[ov02c10->mipi_lanes - 1];
+ ret = regmap_multi_reg_write(ov02c10->regmap,
+ reg_sequence, sequence_length);
+ if (ret) {
+ dev_err(&client->dev, "failed to write lane settings\n");
+ goto out;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(ov02c10->sd.ctrl_handler);
+ if (ret)
+ goto out;
+
+ ret = cci_write(ov02c10->regmap, OV02C10_REG_STREAM_CONTROL, 1, NULL);
+out:
+ if (ret)
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static int ov02c10_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov02c10 *ov02c10 = to_ov02c10(sd);
+
+ cci_write(ov02c10->regmap, OV02C10_REG_STREAM_CONTROL, 0, NULL);
+ pm_runtime_put(&client->dev);
+
+ return 0;
+}
+
+/* This function tries to get power control resources */
+static int ov02c10_get_pm_resources(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov02c10 *ov02c10 = to_ov02c10(sd);
+ int i;
+
+ ov02c10->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ov02c10->reset))
+ return dev_err_probe(dev, PTR_ERR(ov02c10->reset),
+ "failed to get reset gpio\n");
+
+ for (i = 0; i < ARRAY_SIZE(ov02c10_supply_names); i++)
+ ov02c10->supplies[i].supply = ov02c10_supply_names[i];
+
+ return devm_regulator_bulk_get(dev, ARRAY_SIZE(ov02c10_supply_names),
+ ov02c10->supplies);
+}
+
+static int ov02c10_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov02c10 *ov02c10 = to_ov02c10(sd);
+
+ gpiod_set_value_cansleep(ov02c10->reset, 1);
+
+ regulator_bulk_disable(ARRAY_SIZE(ov02c10_supply_names),
+ ov02c10->supplies);
+
+ clk_disable_unprepare(ov02c10->img_clk);
+
+ return 0;
+}
+
+static int ov02c10_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov02c10 *ov02c10 = to_ov02c10(sd);
+ int ret;
+
+ ret = clk_prepare_enable(ov02c10->img_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable imaging clock: %d", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ov02c10_supply_names),
+ ov02c10->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable regulators: %d", ret);
+ clk_disable_unprepare(ov02c10->img_clk);
+ return ret;
+ }
+
+ if (ov02c10->reset) {
+ /* Assert reset for at least 2ms on back to back off-on */
+ usleep_range(2000, 2200);
+ gpiod_set_value_cansleep(ov02c10->reset, 0);
+ usleep_range(5000, 5100);
+ }
+
+ return 0;
+}
+
+static int ov02c10_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ const struct ov02c10_mode *mode = &supported_modes[0];
+ struct ov02c10 *ov02c10 = to_ov02c10(sd);
+ s32 vblank_def, h_blank;
+
+ ov02c10_update_pad_format(mode, &fmt->format);
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->vts_min * ov02c10->mipi_lanes - mode->height;
+ __v4l2_ctrl_modify_range(ov02c10->vblank, mode->vts_min - mode->height,
+ OV02C10_VTS_MAX - mode->height, 1, vblank_def);
+ __v4l2_ctrl_s_ctrl(ov02c10->vblank, vblank_def);
+ h_blank = mode->hts - mode->width;
+ __v4l2_ctrl_modify_range(ov02c10->hblank, h_blank, h_blank, 1, h_blank);
+
+ return 0;
+}
+
+static int ov02c10_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int ov02c10_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov02c10_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ ov02c10_update_pad_format(&supported_modes[0],
+ v4l2_subdev_state_get_format(sd_state, 0));
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov02c10_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops ov02c10_pad_ops = {
+ .set_fmt = ov02c10_set_format,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .enum_mbus_code = ov02c10_enum_mbus_code,
+ .enum_frame_size = ov02c10_enum_frame_size,
+ .enable_streams = ov02c10_enable_streams,
+ .disable_streams = ov02c10_disable_streams,
+};
+
+static const struct v4l2_subdev_ops ov02c10_subdev_ops = {
+ .video = &ov02c10_video_ops,
+ .pad = &ov02c10_pad_ops,
+};
+
+static const struct media_entity_operations ov02c10_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops ov02c10_internal_ops = {
+ .init_state = ov02c10_init_state,
+};
+
+static int ov02c10_identify_module(struct ov02c10 *ov02c10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd);
+ u64 chip_id;
+ int ret;
+
+ ret = cci_read(ov02c10->regmap, OV02C10_REG_CHIP_ID, &chip_id, NULL);
+ if (ret)
+ return ret;
+
+ if (chip_id != OV02C10_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%llx",
+ OV02C10_CHIP_ID, chip_id);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int ov02c10_check_hwcfg(struct device *dev, struct ov02c10 *ov02c10)
+{
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct fwnode_handle *ep, *fwnode = dev_fwnode(dev);
+ unsigned long link_freq_bitmap;
+ u32 mclk;
+ int ret;
+
+ /*
+ * Sometimes the fwnode graph is initialized by the bridge driver,
+ * wait for this.
+ */
+ ep = fwnode_graph_get_endpoint_by_id(fwnode, 0, 0, 0);
+ if (!ep)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "waiting for fwnode graph endpoint\n");
+
+ ov02c10->img_clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(ov02c10->img_clk)) {
+ fwnode_handle_put(ep);
+ return dev_err_probe(dev, PTR_ERR(ov02c10->img_clk),
+ "failed to get imaging clock\n");
+ }
+
+ if (ov02c10->img_clk) {
+ mclk = clk_get_rate(ov02c10->img_clk);
+ } else {
+ ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
+ if (ret) {
+ fwnode_handle_put(ep);
+ return dev_err_probe(dev, ret,
+ "reading clock-frequency property\n");
+ }
+ }
+
+ if (mclk != OV02C10_MCLK) {
+ fwnode_handle_put(ep);
+ return dev_err_probe(dev, -EINVAL,
+ "external clock %u is not supported\n",
+ mclk);
+ }
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return dev_err_probe(dev, ret, "parsing endpoint failed\n");
+
+ ret = v4l2_link_freq_to_bitmap(dev, bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ link_freq_menu_items,
+ ARRAY_SIZE(link_freq_menu_items),
+ &link_freq_bitmap);
+ if (ret)
+ goto check_hwcfg_error;
+
+ /* v4l2_link_freq_to_bitmap() guarantees at least 1 bit is set */
+ ov02c10->link_freq_index = ffs(link_freq_bitmap) - 1;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 1 &&
+ bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
+ ret = dev_err_probe(dev, -EINVAL,
+ "number of CSI2 data lanes %u is not supported\n",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ goto check_hwcfg_error;
+ }
+
+ ov02c10->mipi_lanes = bus_cfg.bus.mipi_csi2.num_data_lanes;
+
+check_hwcfg_error:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+ return ret;
+}
+
+static void ov02c10_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev)) {
+ ov02c10_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ }
+}
+
+static int ov02c10_probe(struct i2c_client *client)
+{
+ struct ov02c10 *ov02c10;
+ int ret;
+
+ ov02c10 = devm_kzalloc(&client->dev, sizeof(*ov02c10), GFP_KERNEL);
+ if (!ov02c10)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&ov02c10->sd, client, &ov02c10_subdev_ops);
+
+ /* Check HW config */
+ ret = ov02c10_check_hwcfg(&client->dev, ov02c10);
+ if (ret)
+ return ret;
+
+ ret = ov02c10_get_pm_resources(&client->dev);
+ if (ret)
+ return ret;
+
+ ov02c10->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(ov02c10->regmap))
+ return PTR_ERR(ov02c10->regmap);
+
+ ret = ov02c10_power_on(&client->dev);
+ if (ret) {
+ dev_err_probe(&client->dev, ret, "failed to power on\n");
+ return ret;
+ }
+
+ ret = ov02c10_identify_module(ov02c10);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ goto probe_error_power_off;
+ }
+
+ ret = ov02c10_init_controls(ov02c10);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ov02c10->sd.internal_ops = &ov02c10_internal_ops;
+ ov02c10->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov02c10->sd.entity.ops = &ov02c10_subdev_entity_ops;
+ ov02c10->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ov02c10->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&ov02c10->sd.entity, 1, &ov02c10->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ov02c10->sd.state_lock = ov02c10->ctrl_handler.lock;
+ ret = v4l2_subdev_init_finalize(&ov02c10->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to init subdev: %d", ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+
+ ret = v4l2_async_register_subdev_sensor(&ov02c10->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_v4l2_subdev_cleanup;
+ }
+
+ pm_runtime_idle(&client->dev);
+ return 0;
+
+probe_error_v4l2_subdev_cleanup:
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ v4l2_subdev_cleanup(&ov02c10->sd);
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&ov02c10->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(ov02c10->sd.ctrl_handler);
+
+probe_error_power_off:
+ ov02c10_power_off(&client->dev);
+
+ return ret;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(ov02c10_pm_ops, ov02c10_power_off,
+ ov02c10_power_on, NULL);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ov02c10_acpi_ids[] = {
+ { "OVTI02C1" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(acpi, ov02c10_acpi_ids);
+#endif
+
+static const struct of_device_id ov02c10_of_match[] = {
+ { .compatible = "ovti,ov02c10" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ov02c10_of_match);
+
+static struct i2c_driver ov02c10_i2c_driver = {
+ .driver = {
+ .name = "ov02c10",
+ .pm = pm_sleep_ptr(&ov02c10_pm_ops),
+ .acpi_match_table = ACPI_PTR(ov02c10_acpi_ids),
+ .of_match_table = ov02c10_of_match,
+ },
+ .probe = ov02c10_probe,
+ .remove = ov02c10_remove,
+};
+
+module_i2c_driver(ov02c10_i2c_driver);
+
+MODULE_AUTHOR("Hao Yao <hao.yao@intel.com>");
+MODULE_AUTHOR("Heimir Thor Sverrisson <heimir.sverrisson@gmail.com>");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_DESCRIPTION("OmniVision OV02C10 sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov02e10.c b/drivers/media/i2c/ov02e10.c
new file mode 100644
index 000000000000..d74dc62e189d
--- /dev/null
+++ b/drivers/media/i2c/ov02e10.c
@@ -0,0 +1,969 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2023 Intel Corporation.
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV02E10_LINK_FREQ_360MHZ 360000000ULL
+#define OV02E10_SCLK 36000000LL
+#define OV02E10_MCLK 19200000
+#define OV02E10_DATA_LANES 2
+#define OV02E10_RGB_DEPTH 10
+
+#define OV02E10_REG_PAGE_FLAG CCI_REG8(0xfd)
+#define OV02E10_PAGE_0 0x0
+#define OV02E10_PAGE_1 0x1
+#define OV02E10_PAGE_2 0x2
+#define OV02E10_PAGE_3 0x3
+#define OV02E10_PAGE_5 0x4
+#define OV02E10_PAGE_7 0x5
+#define OV02E10_PAGE_8 0x6
+#define OV02E10_PAGE_9 0xF
+#define OV02E10_PAGE_D 0x8
+#define OV02E10_PAGE_E 0x9
+#define OV02E10_PAGE_F 0xA
+
+#define OV02E10_REG_CHIP_ID CCI_REG32(0x00)
+#define OV02E10_CHIP_ID 0x45025610
+
+/* Horizontal and vertical flip */
+#define OV02E10_REG_ORIENTATION CCI_REG8(0x32)
+
+/* vertical-timings from sensor */
+#define OV02E10_REG_VTS CCI_REG16(0x35)
+#define OV02E10_VTS_DEF 2244
+#define OV02E10_VTS_MIN 2244
+#define OV02E10_VTS_MAX 0x7fff
+
+/* horizontal-timings from sensor */
+#define OV02E10_REG_HTS CCI_REG16(0x37)
+
+/* Exposure controls from sensor */
+#define OV02E10_REG_EXPOSURE CCI_REG16(0x03)
+#define OV02E10_EXPOSURE_MIN 1
+#define OV02E10_EXPOSURE_MAX_MARGIN 2
+#define OV02E10_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define OV02E10_REG_ANALOG_GAIN CCI_REG8(0x24)
+#define OV02E10_ANAL_GAIN_MIN 0x10
+#define OV02E10_ANAL_GAIN_MAX 0xf8
+#define OV02E10_ANAL_GAIN_STEP 1
+
+/* Digital gain controls from sensor */
+#define OV02E10_REG_DIGITAL_GAIN CCI_REG16(0x21)
+#define OV02E10_DGTL_GAIN_MIN 256
+#define OV02E10_DGTL_GAIN_MAX 1020
+#define OV02E10_DGTL_GAIN_STEP 1
+#define OV02E10_DGTL_GAIN_DEFAULT 256
+
+/* Register update control */
+#define OV02E10_REG_COMMAND_UPDATE CCI_REG8(0xE7)
+#define OV02E10_COMMAND_UPDATE 0x00
+#define OV02E10_COMMAND_HOLD 0x01
+
+/* Test Pattern Control */
+#define OV02E10_REG_TEST_PATTERN CCI_REG8(0x12)
+#define OV02E10_TEST_PATTERN_ENABLE BIT(0)
+#define OV02E10_TEST_PATTERN_BAR_SHIFT 1
+
+struct reg_sequence_list {
+ u32 num_regs;
+ const struct reg_sequence *regs;
+};
+
+struct ov02e10_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 hts;
+
+ /* Default vertical timing */
+ u32 vts_def;
+
+ /* Min vertical timining size */
+ u32 vts_min;
+
+ /* Sensor register settings for this resolution */
+ const struct reg_sequence_list reg_list;
+};
+
+static const struct reg_sequence mode_1928x1088_30fps_2lane[] = {
+ { 0xfd, 0x00 },
+ { 0x20, 0x00 },
+ { 0x20, 0x0b },
+ { 0x21, 0x02 },
+ { 0x10, 0x23 },
+ { 0xc5, 0x04 },
+ { 0x21, 0x00 },
+ { 0x14, 0x96 },
+ { 0x17, 0x01 },
+ { 0xfd, 0x01 },
+ { 0x03, 0x00 },
+ { 0x04, 0x04 },
+ { 0x05, 0x04 },
+ { 0x06, 0x62 },
+ { 0x07, 0x01 },
+ { 0x22, 0x80 },
+ { 0x24, 0xff },
+ { 0x40, 0xc6 },
+ { 0x41, 0x18 },
+ { 0x45, 0x3f },
+ { 0x48, 0x0c },
+ { 0x4c, 0x08 },
+ { 0x51, 0x12 },
+ { 0x52, 0x10 },
+ { 0x57, 0x98 },
+ { 0x59, 0x06 },
+ { 0x5a, 0x04 },
+ { 0x5c, 0x38 },
+ { 0x5e, 0x10 },
+ { 0x67, 0x11 },
+ { 0x7b, 0x04 },
+ { 0x81, 0x12 },
+ { 0x90, 0x51 },
+ { 0x91, 0x09 },
+ { 0x92, 0x21 },
+ { 0x93, 0x28 },
+ { 0x95, 0x54 },
+ { 0x9d, 0x20 },
+ { 0x9e, 0x04 },
+ { 0xb1, 0x9a },
+ { 0xb2, 0x86 },
+ { 0xb6, 0x3f },
+ { 0xb9, 0x30 },
+ { 0xc1, 0x01 },
+ { 0xc5, 0xa0 },
+ { 0xc6, 0x73 },
+ { 0xc7, 0x04 },
+ { 0xc8, 0x25 },
+ { 0xc9, 0x05 },
+ { 0xca, 0x28 },
+ { 0xcb, 0x00 },
+ { 0xcf, 0x16 },
+ { 0xd2, 0xd0 },
+ { 0xd7, 0x3f },
+ { 0xd8, 0x40 },
+ { 0xd9, 0x40 },
+ { 0xda, 0x44 },
+ { 0xdb, 0x3d },
+ { 0xdc, 0x3d },
+ { 0xdd, 0x3d },
+ { 0xde, 0x3d },
+ { 0xdf, 0xf0 },
+ { 0xea, 0x0f },
+ { 0xeb, 0x04 },
+ { 0xec, 0x29 },
+ { 0xee, 0x47 },
+ { 0xfd, 0x01 },
+ { 0x31, 0x01 },
+ { 0x27, 0x00 },
+ { 0x2f, 0x41 },
+ { 0xfd, 0x02 },
+ { 0xa1, 0x01 },
+ { 0xfd, 0x02 },
+ { 0x9a, 0x03 },
+ { 0xfd, 0x03 },
+ { 0x9d, 0x0f },
+ { 0xfd, 0x07 },
+ { 0x42, 0x00 },
+ { 0x43, 0xad },
+ { 0x44, 0x00 },
+ { 0x45, 0xa8 },
+ { 0x46, 0x00 },
+ { 0x47, 0xa8 },
+ { 0x48, 0x00 },
+ { 0x49, 0xad },
+ { 0xfd, 0x00 },
+ { 0xc4, 0x01 },
+ { 0xfd, 0x01 },
+ { 0x33, 0x03 },
+ { 0xfd, 0x00 },
+ { 0x20, 0x1f },
+};
+
+static const char *const ov02e10_test_pattern_menu[] = {
+ "Disabled",
+ "Color Bar",
+};
+
+static const s64 link_freq_menu_items[] = {
+ OV02E10_LINK_FREQ_360MHZ,
+};
+
+static const struct ov02e10_mode supported_modes[] = {
+ {
+ .width = 1928,
+ .height = 1088,
+ .hts = 534,
+ .vts_def = 2244,
+ .vts_min = 2244,
+ .reg_list = {
+ .num_regs = ARRAY_SIZE(mode_1928x1088_30fps_2lane),
+ .regs = mode_1928x1088_30fps_2lane,
+ },
+ },
+};
+
+static const char * const ov02e10_supply_names[] = {
+ "dovdd", /* Digital I/O power */
+ "avdd", /* Analog power */
+ "dvdd", /* Digital core power */
+};
+
+struct ov02e10 {
+ struct regmap *regmap;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *hflip;
+
+ struct clk *img_clk;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(ov02e10_supply_names)];
+ struct gpio_desc *reset;
+
+ /* Current mode */
+ const struct ov02e10_mode *cur_mode;
+
+ /* MIPI lanes info */
+ u32 link_freq_index;
+ u8 mipi_lanes;
+};
+
+static inline struct ov02e10 *to_ov02e10(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct ov02e10, sd);
+}
+
+static u64 to_pixel_rate(u32 f_index)
+{
+ u64 pixel_rate = link_freq_menu_items[f_index] * 2 * OV02E10_DATA_LANES;
+
+ do_div(pixel_rate, OV02E10_RGB_DEPTH);
+
+ return pixel_rate;
+}
+
+static u64 to_pixels_per_line(u32 hts, u32 f_index)
+{
+ u64 ppl = hts * to_pixel_rate(f_index);
+
+ do_div(ppl, OV02E10_SCLK);
+
+ return ppl;
+}
+
+static void ov02e10_test_pattern(struct ov02e10 *ov02e10, u32 pattern, int *pret)
+{
+ if (pattern)
+ pattern = pattern << OV02E10_TEST_PATTERN_BAR_SHIFT |
+ OV02E10_TEST_PATTERN_ENABLE;
+
+ cci_write(ov02e10->regmap, OV02E10_REG_TEST_PATTERN, pattern, pret);
+}
+
+static int ov02e10_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov02e10 *ov02e10 = container_of(ctrl->handler,
+ struct ov02e10, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd);
+ s64 exposure_max;
+ int ret;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = ov02e10->cur_mode->height + ctrl->val -
+ OV02E10_EXPOSURE_MAX_MARGIN;
+ ret = __v4l2_ctrl_modify_range(ov02e10->exposure,
+ ov02e10->exposure->minimum,
+ exposure_max,
+ ov02e10->exposure->step,
+ exposure_max);
+ if (ret)
+ return ret;
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ ret = cci_write(ov02e10->regmap, OV02E10_REG_COMMAND_UPDATE,
+ OV02E10_COMMAND_HOLD, NULL);
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG,
+ OV02E10_PAGE_1, &ret);
+ cci_write(ov02e10->regmap, OV02E10_REG_ANALOG_GAIN,
+ ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG,
+ OV02E10_PAGE_1, &ret);
+ cci_write(ov02e10->regmap, OV02E10_REG_DIGITAL_GAIN,
+ ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG,
+ OV02E10_PAGE_1, &ret);
+ cci_write(ov02e10->regmap, OV02E10_REG_EXPOSURE,
+ ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG,
+ OV02E10_PAGE_1, &ret);
+ cci_write(ov02e10->regmap, OV02E10_REG_ORIENTATION,
+ ov02e10->hflip->val | ov02e10->vflip->val << 1, &ret);
+ break;
+ case V4L2_CID_VBLANK:
+ cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG,
+ OV02E10_PAGE_1, &ret);
+ cci_write(ov02e10->regmap, OV02E10_REG_VTS,
+ ov02e10->cur_mode->height + ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG,
+ OV02E10_PAGE_1, &ret);
+ ov02e10_test_pattern(ov02e10, ctrl->val, &ret);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ cci_write(ov02e10->regmap, OV02E10_REG_COMMAND_UPDATE,
+ OV02E10_COMMAND_UPDATE, &ret);
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov02e10_ctrl_ops = {
+ .s_ctrl = ov02e10_set_ctrl,
+};
+
+static int ov02e10_init_controls(struct ov02e10 *ov02e10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd);
+ struct v4l2_ctrl_handler *ctrl_hdlr = &ov02e10->ctrl_handler;
+ const struct ov02e10_mode *mode = ov02e10->cur_mode;
+ u32 vblank_min, vblank_max, vblank_def;
+ struct v4l2_fwnode_device_properties props;
+ s64 exposure_max, h_blank, pixel_rate;
+ int ret;
+
+ v4l2_ctrl_handler_init(ctrl_hdlr, 12);
+
+ ov02e10->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
+ &ov02e10_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ov02e10->link_freq_index,
+ 0, link_freq_menu_items);
+ if (ov02e10->link_freq)
+ ov02e10->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ pixel_rate = to_pixel_rate(ov02e10->link_freq_index);
+ ov02e10->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ pixel_rate, 1, pixel_rate);
+
+ vblank_min = mode->vts_min - mode->height;
+ vblank_max = OV02E10_VTS_MAX - mode->height;
+ vblank_def = mode->vts_def - mode->height;
+ ov02e10->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops,
+ V4L2_CID_VBLANK, vblank_min,
+ vblank_max, 1, vblank_def);
+
+ h_blank = mode->hts - mode->width;
+ ov02e10->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank,
+ 1, h_blank);
+ if (ov02e10->hblank)
+ ov02e10->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV02E10_ANAL_GAIN_MIN, OV02E10_ANAL_GAIN_MAX,
+ OV02E10_ANAL_GAIN_STEP, OV02E10_ANAL_GAIN_MIN);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV02E10_DGTL_GAIN_MIN, OV02E10_DGTL_GAIN_MAX,
+ OV02E10_DGTL_GAIN_STEP, OV02E10_DGTL_GAIN_DEFAULT);
+
+ exposure_max = mode->vts_def - OV02E10_EXPOSURE_MAX_MARGIN;
+ ov02e10->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV02E10_EXPOSURE_MIN,
+ exposure_max,
+ OV02E10_EXPOSURE_STEP,
+ exposure_max);
+
+ ov02e10->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ if (ov02e10->hflip)
+ ov02e10->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ ov02e10->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &ov02e10_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (ov02e10->vflip)
+ ov02e10->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov02e10_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov02e10_test_pattern_menu) - 1,
+ 0, 0, ov02e10_test_pattern_menu);
+
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov02e10_ctrl_ops, &props);
+
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ ov02e10->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void ov02e10_update_pad_format(const struct ov02e10_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int ov02e10_set_stream_mode(struct ov02e10 *ov02e10, u8 val)
+{
+ int ret = 0;
+
+ cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG, OV02E10_PAGE_0, &ret);
+ cci_write(ov02e10->regmap, CCI_REG8(0xa0), val, &ret);
+ cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG, OV02E10_PAGE_1, &ret);
+ cci_write(ov02e10->regmap, CCI_REG8(0x01), 0x02, &ret);
+
+ return ret;
+}
+
+static int ov02e10_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
+ const struct reg_sequence_list *reg_list;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret)
+ return ret;
+
+ reg_list = &ov02e10->cur_mode->reg_list;
+ ret = regmap_multi_reg_write(ov02e10->regmap, reg_list->regs,
+ reg_list->num_regs);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode\n");
+ goto out;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(ov02e10->sd.ctrl_handler);
+ if (ret)
+ goto out;
+
+ ret = ov02e10_set_stream_mode(ov02e10, 1);
+
+out:
+ if (ret)
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static int ov02e10_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
+
+ ov02e10_set_stream_mode(ov02e10, 0);
+ pm_runtime_put(&client->dev);
+
+ return 0;
+}
+
+static int ov02e10_get_pm_resources(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
+ int i;
+
+ ov02e10->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ov02e10->reset))
+ return dev_err_probe(dev, PTR_ERR(ov02e10->reset),
+ "failed to get reset gpio\n");
+
+ for (i = 0; i < ARRAY_SIZE(ov02e10_supply_names); i++)
+ ov02e10->supplies[i].supply = ov02e10_supply_names[i];
+
+ return devm_regulator_bulk_get(dev, ARRAY_SIZE(ov02e10_supply_names),
+ ov02e10->supplies);
+}
+
+static int ov02e10_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
+
+ if (ov02e10->reset)
+ gpiod_set_value_cansleep(ov02e10->reset, 1);
+
+ regulator_bulk_disable(ARRAY_SIZE(ov02e10_supply_names),
+ ov02e10->supplies);
+
+ clk_disable_unprepare(ov02e10->img_clk);
+
+ return 0;
+}
+
+static int ov02e10_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
+ int ret;
+
+ ret = clk_prepare_enable(ov02e10->img_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable imaging clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ov02e10_supply_names),
+ ov02e10->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable regulators\n");
+ goto disable_clk;
+ }
+
+ if (ov02e10->reset) {
+ usleep_range(5000, 5100);
+ gpiod_set_value_cansleep(ov02e10->reset, 0);
+ usleep_range(8000, 8100);
+ }
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(ov02e10->img_clk);
+
+ return ret;
+}
+
+static int ov02e10_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
+ const struct ov02e10_mode *mode;
+ s32 vblank_def, h_blank;
+ int ret = 0;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height, fmt->format.width,
+ fmt->format.height);
+
+ ov02e10_update_pad_format(mode, &fmt->format);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format;
+ } else {
+ ov02e10->cur_mode = mode;
+ ret = __v4l2_ctrl_s_ctrl(ov02e10->link_freq,
+ ov02e10->link_freq_index);
+ if (ret)
+ return ret;
+
+ ret = __v4l2_ctrl_s_ctrl_int64(ov02e10->pixel_rate,
+ to_pixel_rate(ov02e10->link_freq_index));
+ if (ret)
+ return ret;
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->vts_def - mode->height;
+ ret = __v4l2_ctrl_modify_range(ov02e10->vblank,
+ mode->vts_min - mode->height,
+ OV02E10_VTS_MAX - mode->height,
+ 1, vblank_def);
+ if (ret)
+ return ret;
+
+ ret = __v4l2_ctrl_s_ctrl(ov02e10->vblank, vblank_def);
+ if (ret)
+ return ret;
+
+ h_blank = to_pixels_per_line(mode->hts, ov02e10->link_freq_index);
+ h_blank -= mode->width;
+ ret = __v4l2_ctrl_modify_range(ov02e10->hblank, h_blank,
+ h_blank, 1, h_blank);
+ }
+
+ return ret;
+}
+
+static int ov02e10_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_state_get_format(sd_state, fmt->pad);
+ else
+ ov02e10_update_pad_format(ov02e10->cur_mode, &fmt->format);
+
+ return 0;
+}
+
+static int ov02e10_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int ov02e10_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov02e10_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ ov02e10_update_pad_format(&supported_modes[0],
+ v4l2_subdev_state_get_format(sd_state, 0));
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov02e10_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops ov02e10_pad_ops = {
+ .set_fmt = ov02e10_set_format,
+ .get_fmt = ov02e10_get_format,
+ .enum_mbus_code = ov02e10_enum_mbus_code,
+ .enum_frame_size = ov02e10_enum_frame_size,
+ .enable_streams = ov02e10_enable_streams,
+ .disable_streams = ov02e10_disable_streams,
+};
+
+static const struct v4l2_subdev_ops ov02e10_subdev_ops = {
+ .video = &ov02e10_video_ops,
+ .pad = &ov02e10_pad_ops,
+};
+
+static const struct media_entity_operations ov02e10_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops ov02e10_internal_ops = {
+ .init_state = ov02e10_init_state,
+};
+
+static int ov02e10_identify_module(struct ov02e10 *ov02e10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd);
+ int ret;
+ u64 val;
+
+ ret = cci_write(ov02e10->regmap, OV02E10_REG_PAGE_FLAG,
+ OV02E10_PAGE_0, NULL);
+ cci_read(ov02e10->regmap, OV02E10_REG_CHIP_ID, &val, &ret);
+ if (ret)
+ return ret;
+
+ if (val != OV02E10_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ OV02E10_CHIP_ID, (u32)val);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int ov02e10_check_hwcfg(struct device *dev, struct ov02e10 *ov02e10)
+{
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ unsigned long link_freq_bitmap;
+ u32 ext_clk;
+ int ret;
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "waiting for fwnode graph endpoint\n");
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return dev_err_probe(dev, ret, "parsing endpoint failed\n");
+
+ ov02e10->img_clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(ov02e10->img_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(ov02e10->img_clk),
+ "failed to get imaging clock\n");
+ goto out_err;
+ }
+
+ if (ov02e10->img_clk) {
+ ext_clk = clk_get_rate(ov02e10->img_clk);
+ } else {
+ ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
+ &ext_clk);
+ if (ret) {
+ dev_err(dev, "can't get clock frequency\n");
+ goto out_err;
+ }
+ }
+
+ if (ext_clk != OV02E10_MCLK) {
+ dev_err(dev, "external clock %d is not supported\n",
+ ext_clk);
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV02E10_DATA_LANES) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported\n",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequencies defined\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ ret = v4l2_link_freq_to_bitmap(dev, bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ link_freq_menu_items,
+ ARRAY_SIZE(link_freq_menu_items),
+ &link_freq_bitmap);
+ if (ret)
+ goto out_err;
+
+ /* v4l2_link_freq_to_bitmap() guarantees at least 1 bit is set */
+ ov02e10->link_freq_index = ffs(link_freq_bitmap) - 1;
+ ov02e10->mipi_lanes = bus_cfg.bus.mipi_csi2.num_data_lanes;
+
+out_err:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static void ov02e10_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+
+ if (!pm_runtime_status_suspended(&client->dev)) {
+ ov02e10_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ }
+}
+
+static int ov02e10_probe(struct i2c_client *client)
+{
+ struct ov02e10 *ov02e10;
+ int ret;
+
+ ov02e10 = devm_kzalloc(&client->dev, sizeof(*ov02e10), GFP_KERNEL);
+ if (!ov02e10)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&ov02e10->sd, client, &ov02e10_subdev_ops);
+
+ /* Check HW config */
+ ret = ov02e10_check_hwcfg(&client->dev, ov02e10);
+ if (ret)
+ return ret;
+
+ /* Initialize subdev */
+ ov02e10->regmap = devm_cci_regmap_init_i2c(client, 8);
+ if (IS_ERR(ov02e10->regmap))
+ return PTR_ERR(ov02e10->regmap);
+
+ ret = ov02e10_get_pm_resources(&client->dev);
+ if (ret)
+ return ret;
+
+ ret = ov02e10_power_on(&client->dev);
+ if (ret) {
+ dev_err_probe(&client->dev, ret, "failed to power on\n");
+ return ret;
+ }
+
+ /* Check module identity */
+ ret = ov02e10_identify_module(ov02e10);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ goto probe_error_power_off;
+ }
+
+ ov02e10->cur_mode = &supported_modes[0];
+ ret = ov02e10_init_controls(ov02e10);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d\n", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ /* Initialize subdev */
+ ov02e10->sd.internal_ops = &ov02e10_internal_ops;
+ ov02e10->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov02e10->sd.entity.ops = &ov02e10_subdev_entity_ops;
+ ov02e10->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ /* Initialize source pad */
+ ov02e10->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&ov02e10->sd.entity, 1, &ov02e10->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ov02e10->sd.state_lock = ov02e10->ctrl_handler.lock;
+ ret = v4l2_subdev_init_finalize(&ov02e10->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to init subdev: %d", ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+
+ ret = v4l2_async_register_subdev_sensor(&ov02e10->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_v4l2_subdev_cleanup;
+ }
+
+ pm_runtime_idle(&client->dev);
+ return 0;
+
+probe_error_v4l2_subdev_cleanup:
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ v4l2_subdev_cleanup(&ov02e10->sd);
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&ov02e10->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(ov02e10->sd.ctrl_handler);
+
+probe_error_power_off:
+ ov02e10_power_off(&client->dev);
+
+ return ret;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(ov02e10_pm_ops, ov02e10_power_off,
+ ov02e10_power_on, NULL);
+
+static const struct acpi_device_id ov02e10_acpi_ids[] = {
+ { "OVTI02E1" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(acpi, ov02e10_acpi_ids);
+
+static const struct of_device_id ov02e10_of_match[] = {
+ { .compatible = "ovti,ov02e10" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ov02e10_of_match);
+
+static struct i2c_driver ov02e10_i2c_driver = {
+ .driver = {
+ .name = "ov02e10",
+ .pm = pm_sleep_ptr(&ov02e10_pm_ops),
+ .acpi_match_table = ov02e10_acpi_ids,
+ .of_match_table = ov02e10_of_match,
+ },
+ .probe = ov02e10_probe,
+ .remove = ov02e10_remove,
+};
+
+module_i2c_driver(ov02e10_i2c_driver);
+
+MODULE_AUTHOR("Jingjing Xiong <jingjing.xiong@intel.com>");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_AUTHOR("Alan Stern <stern@rowland.harvard.edu>");
+MODULE_AUTHOR("Bryan O'Donoghue <bryan.odonoghue@linaro.org>");
+MODULE_DESCRIPTION("OmniVision OV02E10 sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c
index cf0e41fc3071..e0094305ca2a 100644
--- a/drivers/media/i2c/ov08x40.c
+++ b/drivers/media/i2c/ov08x40.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -107,6 +108,7 @@
enum {
OV08X40_LINK_FREQ_400MHZ_INDEX,
+ OV08X40_LINK_FREQ_749MHZ_INDEX,
};
struct ov08x40_reg {
@@ -150,67 +152,7 @@ struct ov08x40_mode {
u16 exposure_shift;
};
-static const struct ov08x40_reg mipi_data_rate_800mbps[] = {
- {0x0103, 0x01},
- {0x1000, 0x00},
- {0x1601, 0xd0},
- {0x1001, 0x04},
- {0x5004, 0x53},
- {0x5110, 0x00},
- {0x5111, 0x14},
- {0x5112, 0x01},
- {0x5113, 0x7b},
- {0x5114, 0x00},
- {0x5152, 0xa3},
- {0x5a52, 0x1f},
- {0x5a1a, 0x0e},
- {0x5a1b, 0x10},
- {0x5a1f, 0x0e},
- {0x5a27, 0x0e},
- {0x6002, 0x2e},
-};
-
-static const struct ov08x40_reg mode_3856x2416_regs[] = {
- {0x5000, 0x5d},
- {0x5001, 0x20},
- {0x5008, 0xb0},
- {0x50c1, 0x00},
- {0x53c1, 0x00},
- {0x5f40, 0x00},
- {0x5f41, 0x40},
- {0x0300, 0x3a},
- {0x0301, 0xc8},
- {0x0302, 0x31},
- {0x0303, 0x03},
- {0x0304, 0x01},
- {0x0305, 0xa1},
- {0x0306, 0x04},
- {0x0307, 0x01},
- {0x0308, 0x03},
- {0x0309, 0x03},
- {0x0310, 0x0a},
- {0x0311, 0x02},
- {0x0312, 0x01},
- {0x0313, 0x08},
- {0x0314, 0x66},
- {0x0315, 0x00},
- {0x0316, 0x34},
- {0x0320, 0x02},
- {0x0321, 0x03},
- {0x0323, 0x05},
- {0x0324, 0x01},
- {0x0325, 0xb8},
- {0x0326, 0x4a},
- {0x0327, 0x04},
- {0x0329, 0x00},
- {0x032a, 0x05},
- {0x032b, 0x00},
- {0x032c, 0x00},
- {0x032d, 0x00},
- {0x032e, 0x02},
- {0x032f, 0xa0},
- {0x0350, 0x00},
- {0x0360, 0x01},
+static const struct ov08x40_reg ov08x40_global_regs[] = {
{0x1216, 0x60},
{0x1217, 0x5b},
{0x1218, 0x00},
@@ -220,7 +162,6 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x198e, 0x00},
{0x198f, 0x01},
{0x3009, 0x04},
- {0x3012, 0x41},
{0x3015, 0x00},
{0x3016, 0xb0},
{0x3017, 0xf0},
@@ -245,11 +186,10 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x3058, 0x80},
{0x3059, 0x00},
{0x3107, 0x86},
- {0x3400, 0x1c},
{0x3401, 0x80},
{0x3402, 0x8c},
- {0x3419, 0x13},
- {0x341a, 0x89},
+ {0x3404, 0x01},
+ {0x3407, 0x01},
{0x341b, 0x30},
{0x3420, 0x00},
{0x3421, 0x00},
@@ -257,7 +197,7 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x3423, 0x00},
{0x3424, 0x00},
{0x3425, 0x00},
- {0x3426, 0x00},
+ {0x3426, 0x10},
{0x3427, 0x00},
{0x3428, 0x0f},
{0x3429, 0x00},
@@ -286,27 +226,28 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x3455, 0x80},
{0x3456, 0x08},
{0x3500, 0x00},
- {0x3501, 0x02},
- {0x3502, 0x00},
+ {0x3502, 0x10},
{0x3504, 0x4c},
{0x3506, 0x30},
{0x3507, 0x00},
- {0x3508, 0x01},
- {0x3509, 0x00},
{0x350a, 0x01},
{0x350b, 0x00},
{0x350c, 0x00},
{0x3540, 0x00},
- {0x3541, 0x01},
- {0x3542, 0x00},
{0x3544, 0x4c},
{0x3546, 0x30},
{0x3547, 0x00},
- {0x3548, 0x01},
{0x3549, 0x00},
{0x354a, 0x01},
{0x354b, 0x00},
{0x354c, 0x00},
+ {0x3601, 0x40},
+ {0x3602, 0x90},
+ {0x3608, 0x0a},
+ {0x3609, 0x08},
+ {0x360f, 0x99},
+ {0x3680, 0xa4},
+ {0x3682, 0x80},
{0x3688, 0x02},
{0x368a, 0x2e},
{0x368e, 0x71},
@@ -316,9 +257,7 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x36a4, 0x00},
{0x36a6, 0x00},
{0x3711, 0x00},
- {0x3712, 0x51},
{0x3713, 0x00},
- {0x3714, 0x24},
{0x3716, 0x00},
{0x3718, 0x07},
{0x371a, 0x1c},
@@ -327,7 +266,6 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x3725, 0x32},
{0x3727, 0x05},
{0x3760, 0x02},
- {0x3761, 0x17},
{0x3762, 0x02},
{0x3763, 0x02},
{0x3764, 0x02},
@@ -337,41 +275,19 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x3768, 0x02},
{0x3769, 0x00},
{0x376b, 0x20},
- {0x376e, 0x03},
- {0x37b0, 0x00},
- {0x37b1, 0xab},
{0x37b2, 0x01},
- {0x37b3, 0x82},
- {0x37b4, 0x00},
- {0x37b5, 0xe4},
- {0x37b6, 0x01},
- {0x37b7, 0xee},
{0x3800, 0x00},
{0x3801, 0x00},
{0x3802, 0x00},
- {0x3803, 0x00},
{0x3804, 0x0f},
{0x3805, 0x1f},
{0x3806, 0x09},
- {0x3807, 0x7f},
- {0x3808, 0x0f},
- {0x3809, 0x10},
- {0x380a, 0x09},
- {0x380b, 0x70},
{0x380c, 0x02},
- {0x380d, 0x80},
- {0x380e, 0x13},
- {0x380f, 0x88},
{0x3810, 0x00},
- {0x3811, 0x08},
{0x3812, 0x00},
- {0x3813, 0x07},
{0x3814, 0x11},
{0x3815, 0x11},
- {0x3820, 0x00},
- {0x3821, 0x04},
{0x3822, 0x00},
- {0x3823, 0x04},
{0x3828, 0x0f},
{0x382a, 0x80},
{0x382e, 0x41},
@@ -386,7 +302,6 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x3847, 0x00},
{0x384a, 0x00},
{0x384c, 0x02},
- {0x384d, 0x80},
{0x3856, 0x50},
{0x3857, 0x30},
{0x3858, 0x80},
@@ -400,9 +315,45 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x388d, 0x00},
{0x388e, 0x00},
{0x388f, 0x00},
- {0x3894, 0x00},
{0x3895, 0x00},
+ {0x3911, 0x90},
+ {0x3913, 0x90},
+ {0x3921, 0x0f},
+ {0x3928, 0x15},
+ {0x3929, 0x2a},
+ {0x392c, 0x02},
+ {0x392e, 0x04},
+ {0x392f, 0x03},
+ {0x3931, 0x07},
+ {0x3932, 0x10},
+ {0x3938, 0x09},
+ {0x3a1f, 0x8a},
+ {0x3a22, 0x91},
+ {0x3a23, 0x15},
+ {0x3a25, 0x96},
+ {0x3a28, 0xb4},
+ {0x3a29, 0x26},
+ {0x3a2b, 0xba},
+ {0x3a2e, 0xbf},
+ {0x3a2f, 0x18},
+ {0x3a31, 0xc1},
+ {0x3a74, 0x84},
+ {0x3a99, 0x84},
+ {0x3ab9, 0xa6},
+ {0x3aba, 0xba},
+ {0x3b0a, 0x01},
+ {0x3b0b, 0x00},
+ {0x3b0e, 0x01},
+ {0x3b0f, 0x00},
+ {0x3b12, 0x84},
+ {0x3b14, 0xbb},
+ {0x3b15, 0xbf},
+ {0x3b1b, 0xc9},
+ {0x3b21, 0xc9},
+ {0x3b3f, 0x9d},
+ {0x3b45, 0x9d},
{0x3c84, 0x00},
+ {0x3d84, 0x04},
{0x3d85, 0x8b},
{0x3daa, 0x80},
{0x3dab, 0x14},
@@ -424,44 +375,21 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x4008, 0x00},
{0x4009, 0x05},
{0x400a, 0x00},
- {0x400b, 0x08},
{0x400c, 0x00},
- {0x400d, 0x08},
{0x400e, 0x14},
{0x4010, 0xf4},
{0x4011, 0x03},
{0x4012, 0x55},
{0x4015, 0x00},
- {0x4016, 0x2d},
{0x4017, 0x00},
{0x4018, 0x0f},
+ {0x4019, 0x00},
+ {0x401a, 0x40},
{0x401b, 0x08},
{0x401c, 0x00},
{0x401d, 0x10},
{0x401e, 0x02},
{0x401f, 0x00},
- {0x4050, 0x06},
- {0x4051, 0xff},
- {0x4052, 0xff},
- {0x4053, 0xff},
- {0x4054, 0xff},
- {0x4055, 0xff},
- {0x4056, 0xff},
- {0x4057, 0x7f},
- {0x4058, 0x00},
- {0x4059, 0x00},
- {0x405a, 0x00},
- {0x405b, 0x00},
- {0x405c, 0x07},
- {0x405d, 0xff},
- {0x405e, 0x07},
- {0x405f, 0xff},
- {0x4080, 0x78},
- {0x4081, 0x78},
- {0x4082, 0x78},
- {0x4083, 0x78},
- {0x4019, 0x00},
- {0x401a, 0x40},
{0x4020, 0x04},
{0x4021, 0x00},
{0x4022, 0x04},
@@ -486,6 +414,22 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x4045, 0x80},
{0x4046, 0x00},
{0x4047, 0x80},
+ {0x4050, 0x06},
+ {0x4051, 0xff},
+ {0x4052, 0xff},
+ {0x4053, 0xff},
+ {0x4054, 0xff},
+ {0x4055, 0xff},
+ {0x4056, 0xff},
+ {0x4057, 0x7f},
+ {0x4058, 0x00},
+ {0x4059, 0x00},
+ {0x405a, 0x00},
+ {0x405b, 0x00},
+ {0x405c, 0x07},
+ {0x405d, 0xff},
+ {0x405e, 0x07},
+ {0x405f, 0xff},
{0x4060, 0x00},
{0x4061, 0x00},
{0x4062, 0x00},
@@ -518,6 +462,10 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x407d, 0x00},
{0x407e, 0x00},
{0x407f, 0x00},
+ {0x4080, 0x78},
+ {0x4081, 0x78},
+ {0x4082, 0x78},
+ {0x4083, 0x78},
{0x40e0, 0x00},
{0x40e1, 0x00},
{0x40e2, 0x00},
@@ -560,7 +508,6 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x431b, 0x00},
{0x431c, 0x00},
{0x4500, 0x07},
- {0x4501, 0x00},
{0x4502, 0x00},
{0x4503, 0x0f},
{0x4504, 0x80},
@@ -573,7 +520,6 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x4510, 0x00},
{0x4523, 0x00},
{0x4526, 0x00},
- {0x4542, 0x00},
{0x4543, 0x00},
{0x4544, 0x00},
{0x4545, 0x00},
@@ -592,8 +538,6 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x480c, 0x80},
{0x480f, 0x32},
{0x4813, 0xe4},
- {0x4837, 0x14},
- {0x4850, 0x42},
{0x4884, 0x04},
{0x4c00, 0xf8},
{0x4c01, 0x44},
@@ -604,93 +548,37 @@ static const struct ov08x40_reg mode_3856x2416_regs[] = {
{0x4d05, 0x00},
{0x4d06, 0x0c},
{0x4d07, 0x00},
- {0x3d84, 0x04},
- {0x3680, 0xa4},
- {0x3682, 0x80},
- {0x3601, 0x40},
- {0x3602, 0x90},
- {0x3608, 0x0a},
- {0x3938, 0x09},
- {0x3a74, 0x84},
- {0x3a99, 0x84},
- {0x3ab9, 0xa6},
- {0x3aba, 0xba},
- {0x3b12, 0x84},
- {0x3b14, 0xbb},
- {0x3b15, 0xbf},
- {0x3a29, 0x26},
- {0x3a1f, 0x8a},
- {0x3a22, 0x91},
- {0x3a25, 0x96},
- {0x3a28, 0xb4},
- {0x3a2b, 0xba},
- {0x3a2e, 0xbf},
- {0x3a31, 0xc1},
- {0x3a20, 0x00},
- {0x3939, 0x9d},
- {0x3902, 0x0e},
- {0x3903, 0x0e},
- {0x3904, 0x0e},
- {0x3905, 0x0e},
- {0x3906, 0x07},
- {0x3907, 0x0d},
- {0x3908, 0x11},
- {0x3909, 0x12},
- {0x360f, 0x99},
- {0x390c, 0x33},
- {0x390d, 0x66},
- {0x390e, 0xaa},
- {0x3911, 0x90},
- {0x3913, 0x90},
- {0x3915, 0x90},
- {0x3917, 0x90},
- {0x3b3f, 0x9d},
- {0x3b45, 0x9d},
- {0x3b1b, 0xc9},
- {0x3b21, 0xc9},
- {0x3440, 0xa4},
- {0x3a23, 0x15},
- {0x3a26, 0x1d},
- {0x3a2c, 0x4a},
- {0x3a2f, 0x18},
- {0x3a32, 0x55},
- {0x3b0a, 0x01},
- {0x3b0b, 0x00},
- {0x3b0e, 0x01},
- {0x3b0f, 0x00},
- {0x392c, 0x02},
- {0x392d, 0x02},
- {0x392e, 0x04},
- {0x392f, 0x03},
- {0x3930, 0x08},
- {0x3931, 0x07},
- {0x3932, 0x10},
- {0x3933, 0x0c},
- {0x3609, 0x08},
- {0x3921, 0x0f},
- {0x3928, 0x15},
- {0x3929, 0x2a},
- {0x392a, 0x54},
- {0x392b, 0xa8},
- {0x3426, 0x10},
- {0x3407, 0x01},
- {0x3404, 0x01},
- {0x3500, 0x00},
- {0x3501, 0x10},
- {0x3502, 0x10},
- {0x3508, 0x0f},
- {0x3509, 0x80},
-};
-
-static const struct ov08x40_reg mode_1928x1208_regs[] = {
- {0x5000, 0x55},
- {0x5001, 0x00},
{0x5008, 0xb0},
{0x50c1, 0x00},
{0x53c1, 0x00},
{0x5f40, 0x00},
{0x5f41, 0x40},
- {0x0300, 0x3a},
+};
+
+static const struct ov08x40_reg_list ov08x40_global_setting = {
+ .num_of_regs = ARRAY_SIZE(ov08x40_global_regs),
+ .regs = ov08x40_global_regs,
+};
+
+static const struct ov08x40_reg mipi_data_rate_800mbps[] = {
+ {0x0103, 0x01},
+ {0x1000, 0x00},
+ {0x1601, 0xd0},
+ {0x1001, 0x04},
+ {0x5004, 0x53},
+ {0x5110, 0x00},
+ {0x5111, 0x14},
+ {0x5112, 0x01},
+ {0x5113, 0x7b},
+ {0x5114, 0x00},
+ {0x5152, 0xa3},
+ {0x5a52, 0x1f},
+ {0x5a1a, 0x0e},
+ {0x5a1b, 0x10},
+ {0x5a1f, 0x0e},
+ {0x5a27, 0x0e},
+ {0x6002, 0x2e},
+ {0x0300, 0x3a}, /* PLL CTRL */
{0x0301, 0xc8},
{0x0302, 0x31},
{0x0303, 0x03},
@@ -723,421 +611,522 @@ static const struct ov08x40_reg mode_1928x1208_regs[] = {
{0x032f, 0xa0},
{0x0350, 0x00},
{0x0360, 0x01},
- {0x1216, 0x60},
- {0x1217, 0x5b},
- {0x1218, 0x00},
- {0x1220, 0x24},
- {0x198a, 0x00},
- {0x198b, 0x01},
- {0x198e, 0x00},
- {0x198f, 0x01},
- {0x3009, 0x04},
+ {0x3012, 0x41}, /* MIPI SC Lanes */
+};
+
+static const struct ov08x40_reg mipi_data_rate_1500mbps[] = {
+ {0x0103, 0x01},
+ {0x1000, 0x00},
+ {0x1601, 0xd0},
+ {0x1001, 0x04},
+ {0x5004, 0x53},
+ {0x5110, 0x00},
+ {0x5111, 0x14},
+ {0x5112, 0x01},
+ {0x5113, 0x7b},
+ {0x5114, 0x00},
+ {0x5152, 0xa3},
+ {0x5a52, 0x1f},
+ {0x5a1a, 0x0e},
+ {0x5a1b, 0x10},
+ {0x5a1f, 0x0e},
+ {0x5a27, 0x0e},
+ {0x6002, 0x2e},
+ {0x0300, 0x3a}, /* PLL */
+ {0x0301, 0x88},
+ {0x0302, 0x31},
+ {0x0303, 0x05},
+ {0x0304, 0x01},
+ {0x0305, 0x38},
+ {0x0306, 0x04},
+ {0x0307, 0x00},
+ {0x0308, 0x03},
+ {0x0309, 0x02},
+ {0x0310, 0x0a},
+ {0x0311, 0x02},
+ {0x0312, 0x01},
+ {0x0313, 0x08},
+ {0x0314, 0x00},
+ {0x0315, 0x00},
+ {0x0316, 0x2c},
+ {0x0320, 0x02},
+ {0x0321, 0x03},
+ {0x0323, 0x05},
+ {0x0324, 0x01},
+ {0x0325, 0xb8},
+ {0x0326, 0x4a},
+ {0x0327, 0x04},
+ {0x0329, 0x00},
+ {0x032a, 0x05},
+ {0x032b, 0x00},
+ {0x032c, 0x00},
+ {0x032d, 0x00},
+ {0x032e, 0x02},
+ {0x032f, 0xa0},
+ {0x0350, 0x00},
+ {0x0360, 0x01},
+ {0x3012, 0x21}, /* MIPI SC Lanes */
+};
+
+static const struct ov08x40_reg mode_3856x2176_regs_800mbps[] = {
+ {0x5000, 0x5d},
+ {0x5001, 0x20},
+ {0x3012, 0x41},
+ {0x3400, 0x1c},
+ {0x3419, 0x13},
+ {0x341a, 0x89},
+ {0x3426, 0x00},
+ {0x3501, 0x02},
+ {0x3502, 0x00},
+ {0x3508, 0x01},
+ {0x3509, 0x00},
+ {0x3541, 0x01},
+ {0x3542, 0x00},
+ {0x3548, 0x01},
+ {0x3712, 0x51},
+ {0x3714, 0x24},
+ {0x3761, 0x17},
+ {0x376e, 0x03},
+ {0x37b0, 0x00},
+ {0x37b1, 0xab},
+ {0x37b3, 0x82},
+ {0x37b4, 0x00},
+ {0x37b5, 0xe4},
+ {0x37b6, 0x01},
+ {0x37b7, 0xee},
+ {0x3820, 0x00},
+ {0x3821, 0x04},
+ {0x3823, 0x04},
+ {0x384d, 0x80},
+ {0x3894, 0x00},
+ {0x400b, 0x08},
+ {0x400d, 0x08},
+ {0x4016, 0x2d},
+ {0x4501, 0x00},
+ {0x4542, 0x00},
+ {0x4837, 0x14},
+ {0x4850, 0x42},
+ {0x3a20, 0x00},
+ {0x3939, 0x9d},
+ {0x3902, 0x0e},
+ {0x3903, 0x0e},
+ {0x3904, 0x0e},
+ {0x3905, 0x0e},
+ {0x3906, 0x07},
+ {0x3907, 0x0d},
+ {0x3908, 0x11},
+ {0x3909, 0x12},
+ {0x390c, 0x33},
+ {0x390d, 0x66},
+ {0x390e, 0xaa},
+ {0x3915, 0x90},
+ {0x3917, 0x90},
+ {0x3440, 0xa4},
+ {0x3a26, 0x1d},
+ {0x3a2c, 0x4a},
+ {0x3a32, 0x55},
+ {0x392d, 0x02},
+ {0x3930, 0x08},
+ {0x3933, 0x0c},
+ {0x392a, 0x54},
+ {0x392b, 0xa8},
+ {0x380d, 0x80},
+ {0x380e, 0x13},
+ {0x380f, 0x88},
+ {0x3803, 0x70},
+ {0x3807, 0x0f},
+ {0x3808, 0x0f},
+ {0x3809, 0x10},
+ {0x380a, 0x08},
+ {0x380b, 0x80},
+ {0x3811, 0x08},
+ {0x3813, 0x10},
+ {0x3501, 0x10},
+ {0x3508, 0x0f},
+ {0x3509, 0x80},
+ {0x3813, 0x0f},
+};
+
+/* OV08X 1C 3856x2176_DPHY1500M-2L */
+static const struct ov08x40_reg mode_3856x2176_regs_1500mbps[] = {
+ {0x5000, 0x5d},
+ {0x5001, 0x20},
+ {0x3012, 0x21},
+ {0x3400, 0x1c},
+ {0x3419, 0x12},
+ {0x341a, 0x99},
+ {0x3426, 0x00},
+ {0x3501, 0x02},
+ {0x3502, 0x00},
+ {0x3508, 0x01},
+ {0x3509, 0x00},
+ {0x3541, 0x01},
+ {0x3542, 0x00},
+ {0x3548, 0x01},
+ {0x3712, 0x51},
+ {0x3714, 0x24},
+ {0x3761, 0x17},
+ {0x376e, 0x03},
+ {0x37b0, 0x00},
+ {0x37b1, 0xab},
+ {0x37b3, 0x82},
+ {0x37b4, 0x00},
+ {0x37b5, 0xe4},
+ {0x37b6, 0x01},
+ {0x37b7, 0xee},
+ {0x3803, 0x70},
+ {0x3807, 0x0f},
+ {0x3808, 0x0f},
+ {0x3809, 0x10},
+ {0x380a, 0x08},
+ {0x380b, 0x80},
+ {0x380d, 0xa0},
+ {0x380e, 0x12},
+ {0x380f, 0x98},
+ {0x3811, 0x08},
+ {0x3813, 0x10},
+ {0x3820, 0x00},
+ {0x3821, 0x04},
+ {0x3823, 0x04},
+ {0x384d, 0xa0},
+ {0x3894, 0x00},
+ {0x400b, 0x08},
+ {0x400d, 0x08},
+ {0x4016, 0x2d},
+ {0x4501, 0x00},
+ {0x4542, 0x00},
+ {0x4837, 0x0a},
+ {0x4850, 0x47},
+ {0x3a20, 0x00},
+ {0x3939, 0x9d},
+ {0x3902, 0x0e},
+ {0x3903, 0x0e},
+ {0x3904, 0x0e},
+ {0x3905, 0x0e},
+ {0x3906, 0x07},
+ {0x3907, 0x0d},
+ {0x3908, 0x11},
+ {0x3909, 0x12},
+ {0x390c, 0x33},
+ {0x390d, 0x66},
+ {0x390e, 0xaa},
+ {0x3915, 0x90},
+ {0x3917, 0x90},
+ {0x3440, 0xa4},
+ {0x3a26, 0x1d},
+ {0x3a2c, 0x4a},
+ {0x3a32, 0x55},
+ {0x392d, 0x02},
+ {0x3930, 0x08},
+ {0x3933, 0x0c},
+ {0x392a, 0x54},
+ {0x392b, 0xa8},
+ {0x3501, 0x10},
+ {0x3508, 0x0f},
+ {0x3509, 0x80},
+ {0x3813, 0x0f},
+};
+
+/* OV08X 4C1stg 1928x1088_DPHY1500M-2L 30fps */
+static const struct ov08x40_reg mode_1928x1088_regs_1500mbps[] = {
+ {0x5000, 0x55},
+ {0x5001, 0x00},
+ {0x3012, 0x21},
+ {0x3400, 0x30},
+ {0x3419, 0x08},
+ {0x341a, 0x4f},
+ {0x3426, 0x00},
+ {0x3501, 0x02},
+ {0x3502, 0x00},
+ {0x3508, 0x01},
+ {0x3509, 0x00},
+ {0x3541, 0x01},
+ {0x3542, 0x00},
+ {0x3548, 0x01},
+ {0x3712, 0x50},
+ {0x3714, 0x21},
+ {0x3761, 0x28},
+ {0x376e, 0x07},
+ {0x37b0, 0x01},
+ {0x37b1, 0x0f},
+ {0x37b3, 0xd6},
+ {0x37b4, 0x01},
+ {0x37b5, 0x48},
+ {0x37b6, 0x02},
+ {0x37b7, 0x40},
+ {0x3803, 0x78},
+ {0x3807, 0x07},
+ {0x3808, 0x07},
+ {0x3809, 0x88},
+ {0x380a, 0x04},
+ {0x380b, 0x40},
+ {0x380d, 0xf0},
+ {0x380e, 0x08},
+ {0x380f, 0x4e},
+ {0x3811, 0x04},
+ {0x3813, 0x03},
+ {0x3820, 0x02},
+ {0x3821, 0x14},
+ {0x3823, 0x84},
+ {0x384d, 0xf0},
+ {0x3894, 0x03},
+ {0x400b, 0x04},
+ {0x400d, 0x04},
+ {0x4016, 0x27},
+ {0x4501, 0x10},
+ {0x4542, 0x01},
+ {0x4837, 0x0a},
+ {0x4850, 0x47},
+ {0x4911, 0x00},
+ {0x4919, 0x00},
+ {0x491a, 0x40},
+ {0x4920, 0x04},
+ {0x4921, 0x00},
+ {0x4922, 0x04},
+ {0x4923, 0x00},
+ {0x4924, 0x04},
+ {0x4925, 0x00},
+ {0x4926, 0x04},
+ {0x4927, 0x00},
+ {0x4930, 0x00},
+ {0x4931, 0x00},
+ {0x4932, 0x00},
+ {0x4933, 0x00},
+ {0x4934, 0x00},
+ {0x4935, 0x00},
+ {0x4936, 0x00},
+ {0x4937, 0x00},
+ {0x4940, 0x00},
+ {0x4941, 0x80},
+ {0x4942, 0x00},
+ {0x4943, 0x80},
+ {0x4944, 0x00},
+ {0x4945, 0x80},
+ {0x4946, 0x00},
+ {0x4947, 0x80},
+ {0x4960, 0x00},
+ {0x4961, 0x00},
+ {0x4962, 0x00},
+ {0x4963, 0x00},
+ {0x4964, 0x00},
+ {0x4965, 0x00},
+ {0x4966, 0x00},
+ {0x4967, 0x00},
+ {0x4968, 0x00},
+ {0x4969, 0x00},
+ {0x496a, 0x00},
+ {0x496b, 0x00},
+ {0x496c, 0x00},
+ {0x496d, 0x00},
+ {0x496e, 0x00},
+ {0x496f, 0x00},
+ {0x4970, 0x00},
+ {0x4971, 0x00},
+ {0x4972, 0x00},
+ {0x4973, 0x00},
+ {0x4974, 0x00},
+ {0x4975, 0x00},
+ {0x4976, 0x00},
+ {0x4977, 0x00},
+ {0x4978, 0x00},
+ {0x4979, 0x00},
+ {0x497a, 0x00},
+ {0x497b, 0x00},
+ {0x497c, 0x00},
+ {0x497d, 0x00},
+ {0x497e, 0x00},
+ {0x497f, 0x00},
+ {0x49e0, 0x00},
+ {0x49e1, 0x00},
+ {0x49e2, 0x00},
+ {0x49e3, 0x00},
+ {0x49e4, 0x00},
+ {0x49e5, 0x00},
+ {0x49e6, 0x00},
+ {0x49e7, 0x00},
+ {0x49e8, 0x00},
+ {0x49e9, 0x80},
+ {0x49ea, 0x00},
+ {0x49eb, 0x80},
+ {0x49ec, 0x00},
+ {0x49ed, 0x80},
+ {0x49ee, 0x00},
+ {0x49ef, 0x80},
+ {0x49f0, 0x02},
+ {0x49f1, 0x04},
+ {0x3a20, 0x05},
+ {0x3939, 0x6b},
+ {0x3902, 0x10},
+ {0x3903, 0x10},
+ {0x3904, 0x10},
+ {0x3905, 0x10},
+ {0x3906, 0x01},
+ {0x3907, 0x0b},
+ {0x3908, 0x10},
+ {0x3909, 0x13},
+ {0x390b, 0x11},
+ {0x390c, 0x21},
+ {0x390d, 0x32},
+ {0x390e, 0x76},
+ {0x3a1a, 0x1c},
+ {0x3a26, 0x17},
+ {0x3a2c, 0x50},
+ {0x3a32, 0x4f},
+ {0x3ace, 0x01},
+ {0x3ad2, 0x01},
+ {0x3ad6, 0x01},
+ {0x3ada, 0x01},
+ {0x3ade, 0x01},
+ {0x3ae2, 0x01},
+ {0x3aee, 0x01},
+ {0x3af2, 0x01},
+ {0x3af6, 0x01},
+ {0x3afa, 0x01},
+ {0x3afe, 0x01},
+ {0x3b02, 0x01},
+ {0x3b06, 0x01},
+ {0x392d, 0x01},
+ {0x3930, 0x09},
+ {0x3933, 0x0d},
+ {0x392a, 0x52},
+ {0x392b, 0xa3},
+ {0x340b, 0x1b},
+ {0x3501, 0x01},
+ {0x3508, 0x0f},
+ {0x3509, 0x00},
+ {0x3541, 0x00},
+ {0x3542, 0x80},
+ {0x3548, 0x0f},
+ {0x3813, 0x03},
+};
+
+static const struct ov08x40_reg mode_3856x2416_regs[] = {
+ {0x5000, 0x5d},
+ {0x5001, 0x20},
+ {0x3012, 0x41},
+ {0x3400, 0x1c},
+ {0x3419, 0x13},
+ {0x341a, 0x89},
+ {0x3426, 0x00},
+ {0x3501, 0x02},
+ {0x3502, 0x00},
+ {0x3508, 0x01},
+ {0x3509, 0x00},
+ {0x3541, 0x01},
+ {0x3542, 0x00},
+ {0x3548, 0x01},
+ {0x3712, 0x51},
+ {0x3714, 0x24},
+ {0x3761, 0x17},
+ {0x376e, 0x03},
+ {0x37b0, 0x00},
+ {0x37b1, 0xab},
+ {0x37b3, 0x82},
+ {0x37b4, 0x00},
+ {0x37b5, 0xe4},
+ {0x37b6, 0x01},
+ {0x37b7, 0xee},
+ {0x3803, 0x00},
+ {0x3807, 0x7f},
+ {0x3808, 0x0f},
+ {0x3809, 0x10},
+ {0x380a, 0x09},
+ {0x380b, 0x70},
+ {0x380d, 0x80},
+ {0x380e, 0x13},
+ {0x380f, 0x88},
+ {0x3811, 0x08},
+ {0x3813, 0x07},
+ {0x3820, 0x00},
+ {0x3821, 0x04},
+ {0x3823, 0x04},
+ {0x384d, 0x80},
+ {0x3894, 0x00},
+ {0x400b, 0x08},
+ {0x400d, 0x08},
+ {0x4016, 0x2d},
+ {0x4501, 0x00},
+ {0x4542, 0x00},
+ {0x4837, 0x14},
+ {0x4850, 0x42},
+ {0x3a20, 0x00},
+ {0x3939, 0x9d},
+ {0x3902, 0x0e},
+ {0x3903, 0x0e},
+ {0x3904, 0x0e},
+ {0x3905, 0x0e},
+ {0x3906, 0x07},
+ {0x3907, 0x0d},
+ {0x3908, 0x11},
+ {0x3909, 0x12},
+ {0x390c, 0x33},
+ {0x390d, 0x66},
+ {0x390e, 0xaa},
+ {0x3915, 0x90},
+ {0x3917, 0x90},
+ {0x3440, 0xa4},
+ {0x3a26, 0x1d},
+ {0x3a2c, 0x4a},
+ {0x3a32, 0x55},
+ {0x392d, 0x02},
+ {0x3930, 0x08},
+ {0x3933, 0x0c},
+ {0x392a, 0x54},
+ {0x392b, 0xa8},
+ {0x3501, 0x10},
+ {0x3508, 0x0f},
+ {0x3509, 0x80},
+};
+
+static const struct ov08x40_reg mode_1928x1208_regs[] = {
+ {0x5000, 0x55},
+ {0x5001, 0x00},
{0x3012, 0x41},
- {0x3015, 0x00},
- {0x3016, 0xb0},
- {0x3017, 0xf0},
- {0x3018, 0xf0},
- {0x3019, 0xd2},
- {0x301a, 0xb0},
- {0x301c, 0x81},
- {0x301d, 0x02},
- {0x301e, 0x80},
- {0x3022, 0xf0},
- {0x3025, 0x89},
- {0x3030, 0x03},
- {0x3044, 0xc2},
- {0x3050, 0x35},
- {0x3051, 0x60},
- {0x3052, 0x25},
- {0x3053, 0x00},
- {0x3054, 0x00},
- {0x3055, 0x02},
- {0x3056, 0x80},
- {0x3057, 0x80},
- {0x3058, 0x80},
- {0x3059, 0x00},
- {0x3107, 0x86},
{0x3400, 0x1c},
- {0x3401, 0x80},
- {0x3402, 0x8c},
{0x3419, 0x08},
{0x341a, 0xaf},
- {0x341b, 0x30},
- {0x3420, 0x00},
- {0x3421, 0x00},
- {0x3422, 0x00},
- {0x3423, 0x00},
- {0x3424, 0x00},
- {0x3425, 0x00},
{0x3426, 0x00},
- {0x3427, 0x00},
- {0x3428, 0x0f},
- {0x3429, 0x00},
- {0x342a, 0x00},
- {0x342b, 0x00},
- {0x342c, 0x00},
- {0x342d, 0x00},
- {0x342e, 0x00},
- {0x342f, 0x11},
- {0x3430, 0x11},
- {0x3431, 0x10},
- {0x3432, 0x00},
- {0x3433, 0x00},
- {0x3434, 0x00},
- {0x3435, 0x00},
- {0x3436, 0x00},
- {0x3437, 0x00},
- {0x3442, 0x02},
- {0x3443, 0x02},
- {0x3444, 0x07},
- {0x3450, 0x00},
- {0x3451, 0x00},
- {0x3452, 0x18},
- {0x3453, 0x18},
- {0x3454, 0x00},
- {0x3455, 0x80},
- {0x3456, 0x08},
- {0x3500, 0x00},
{0x3501, 0x02},
{0x3502, 0x00},
- {0x3504, 0x4c},
- {0x3506, 0x30},
- {0x3507, 0x00},
{0x3508, 0x01},
{0x3509, 0x00},
- {0x350a, 0x01},
- {0x350b, 0x00},
- {0x350c, 0x00},
- {0x3540, 0x00},
{0x3541, 0x01},
{0x3542, 0x00},
- {0x3544, 0x4c},
- {0x3546, 0x30},
- {0x3547, 0x00},
{0x3548, 0x01},
- {0x3549, 0x00},
- {0x354a, 0x01},
- {0x354b, 0x00},
- {0x354c, 0x00},
- {0x3688, 0x02},
- {0x368a, 0x2e},
- {0x368e, 0x71},
- {0x3696, 0xd1},
- {0x3699, 0x00},
- {0x369a, 0x00},
- {0x36a4, 0x00},
- {0x36a6, 0x00},
- {0x3711, 0x00},
{0x3712, 0x50},
- {0x3713, 0x00},
{0x3714, 0x21},
- {0x3716, 0x00},
- {0x3718, 0x07},
- {0x371a, 0x1c},
- {0x371b, 0x00},
- {0x3720, 0x08},
- {0x3725, 0x32},
- {0x3727, 0x05},
- {0x3760, 0x02},
{0x3761, 0x28},
- {0x3762, 0x02},
- {0x3763, 0x02},
- {0x3764, 0x02},
- {0x3765, 0x2c},
- {0x3766, 0x04},
- {0x3767, 0x2c},
- {0x3768, 0x02},
- {0x3769, 0x00},
- {0x376b, 0x20},
{0x376e, 0x07},
{0x37b0, 0x01},
{0x37b1, 0x0f},
- {0x37b2, 0x01},
{0x37b3, 0xd6},
{0x37b4, 0x01},
{0x37b5, 0x48},
{0x37b6, 0x02},
{0x37b7, 0x40},
- {0x3800, 0x00},
- {0x3801, 0x00},
- {0x3802, 0x00},
{0x3803, 0x00},
- {0x3804, 0x0f},
- {0x3805, 0x1f},
- {0x3806, 0x09},
{0x3807, 0x7f},
{0x3808, 0x07},
{0x3809, 0x88},
{0x380a, 0x04},
{0x380b, 0xb8},
- {0x380c, 0x02},
{0x380d, 0xd0},
{0x380e, 0x11},
{0x380f, 0x5c},
- {0x3810, 0x00},
{0x3811, 0x04},
- {0x3812, 0x00},
{0x3813, 0x03},
- {0x3814, 0x11},
- {0x3815, 0x11},
{0x3820, 0x02},
{0x3821, 0x14},
- {0x3822, 0x00},
{0x3823, 0x04},
- {0x3828, 0x0f},
- {0x382a, 0x80},
- {0x382e, 0x41},
- {0x3837, 0x08},
- {0x383a, 0x81},
- {0x383b, 0x81},
- {0x383c, 0x11},
- {0x383d, 0x11},
- {0x383e, 0x00},
- {0x383f, 0x38},
- {0x3840, 0x00},
- {0x3847, 0x00},
- {0x384a, 0x00},
- {0x384c, 0x02},
{0x384d, 0xd0},
- {0x3856, 0x50},
- {0x3857, 0x30},
- {0x3858, 0x80},
- {0x3859, 0x40},
- {0x3860, 0x00},
- {0x3888, 0x00},
- {0x3889, 0x00},
- {0x388a, 0x00},
- {0x388b, 0x00},
- {0x388c, 0x00},
- {0x388d, 0x00},
- {0x388e, 0x00},
- {0x388f, 0x00},
{0x3894, 0x00},
- {0x3895, 0x00},
- {0x3c84, 0x00},
- {0x3d85, 0x8b},
- {0x3daa, 0x80},
- {0x3dab, 0x14},
- {0x3dac, 0x80},
- {0x3dad, 0xc8},
- {0x3dae, 0x81},
- {0x3daf, 0x7b},
- {0x3f00, 0x10},
- {0x3f01, 0x11},
- {0x3f06, 0x0d},
- {0x3f07, 0x0b},
- {0x3f08, 0x0d},
- {0x3f09, 0x0b},
- {0x3f0a, 0x01},
- {0x3f0b, 0x11},
- {0x3f0c, 0x33},
- {0x4001, 0x07},
- {0x4007, 0x20},
- {0x4008, 0x00},
- {0x4009, 0x05},
- {0x400a, 0x00},
{0x400b, 0x04},
- {0x400c, 0x00},
{0x400d, 0x04},
- {0x400e, 0x14},
- {0x4010, 0xf4},
- {0x4011, 0x03},
- {0x4012, 0x55},
- {0x4015, 0x00},
{0x4016, 0x27},
- {0x4017, 0x00},
- {0x4018, 0x0f},
- {0x401b, 0x08},
- {0x401c, 0x00},
- {0x401d, 0x10},
- {0x401e, 0x02},
- {0x401f, 0x00},
- {0x4050, 0x06},
- {0x4051, 0xff},
- {0x4052, 0xff},
- {0x4053, 0xff},
- {0x4054, 0xff},
- {0x4055, 0xff},
- {0x4056, 0xff},
- {0x4057, 0x7f},
- {0x4058, 0x00},
- {0x4059, 0x00},
- {0x405a, 0x00},
- {0x405b, 0x00},
- {0x405c, 0x07},
- {0x405d, 0xff},
- {0x405e, 0x07},
- {0x405f, 0xff},
- {0x4080, 0x78},
- {0x4081, 0x78},
- {0x4082, 0x78},
- {0x4083, 0x78},
- {0x4019, 0x00},
- {0x401a, 0x40},
- {0x4020, 0x04},
- {0x4021, 0x00},
- {0x4022, 0x04},
- {0x4023, 0x00},
- {0x4024, 0x04},
- {0x4025, 0x00},
- {0x4026, 0x04},
- {0x4027, 0x00},
- {0x4030, 0x00},
- {0x4031, 0x00},
- {0x4032, 0x00},
- {0x4033, 0x00},
- {0x4034, 0x00},
- {0x4035, 0x00},
- {0x4036, 0x00},
- {0x4037, 0x00},
- {0x4040, 0x00},
- {0x4041, 0x80},
- {0x4042, 0x00},
- {0x4043, 0x80},
- {0x4044, 0x00},
- {0x4045, 0x80},
- {0x4046, 0x00},
- {0x4047, 0x80},
- {0x4060, 0x00},
- {0x4061, 0x00},
- {0x4062, 0x00},
- {0x4063, 0x00},
- {0x4064, 0x00},
- {0x4065, 0x00},
- {0x4066, 0x00},
- {0x4067, 0x00},
- {0x4068, 0x00},
- {0x4069, 0x00},
- {0x406a, 0x00},
- {0x406b, 0x00},
- {0x406c, 0x00},
- {0x406d, 0x00},
- {0x406e, 0x00},
- {0x406f, 0x00},
- {0x4070, 0x00},
- {0x4071, 0x00},
- {0x4072, 0x00},
- {0x4073, 0x00},
- {0x4074, 0x00},
- {0x4075, 0x00},
- {0x4076, 0x00},
- {0x4077, 0x00},
- {0x4078, 0x00},
- {0x4079, 0x00},
- {0x407a, 0x00},
- {0x407b, 0x00},
- {0x407c, 0x00},
- {0x407d, 0x00},
- {0x407e, 0x00},
- {0x407f, 0x00},
- {0x40e0, 0x00},
- {0x40e1, 0x00},
- {0x40e2, 0x00},
- {0x40e3, 0x00},
- {0x40e4, 0x00},
- {0x40e5, 0x00},
- {0x40e6, 0x00},
- {0x40e7, 0x00},
- {0x40e8, 0x00},
- {0x40e9, 0x80},
- {0x40ea, 0x00},
- {0x40eb, 0x80},
- {0x40ec, 0x00},
- {0x40ed, 0x80},
- {0x40ee, 0x00},
- {0x40ef, 0x80},
- {0x40f0, 0x02},
- {0x40f1, 0x04},
- {0x4300, 0x00},
- {0x4301, 0x00},
- {0x4302, 0x00},
- {0x4303, 0x00},
- {0x4304, 0x00},
- {0x4305, 0x00},
- {0x4306, 0x00},
- {0x4307, 0x00},
- {0x4308, 0x00},
- {0x4309, 0x00},
- {0x430a, 0x00},
- {0x430b, 0xff},
- {0x430c, 0xff},
- {0x430d, 0x00},
- {0x430e, 0x00},
- {0x4315, 0x00},
- {0x4316, 0x00},
- {0x4317, 0x00},
- {0x4318, 0x00},
- {0x4319, 0x00},
- {0x431a, 0x00},
- {0x431b, 0x00},
- {0x431c, 0x00},
- {0x4500, 0x07},
{0x4501, 0x10},
- {0x4502, 0x00},
- {0x4503, 0x0f},
- {0x4504, 0x80},
- {0x4506, 0x01},
- {0x4509, 0x05},
- {0x450c, 0x00},
- {0x450d, 0x20},
- {0x450e, 0x00},
- {0x450f, 0x00},
- {0x4510, 0x00},
- {0x4523, 0x00},
- {0x4526, 0x00},
{0x4542, 0x00},
- {0x4543, 0x00},
- {0x4544, 0x00},
- {0x4545, 0x00},
- {0x4546, 0x00},
- {0x4547, 0x10},
- {0x4602, 0x00},
- {0x4603, 0x15},
- {0x460b, 0x07},
- {0x4680, 0x11},
- {0x4686, 0x00},
- {0x4687, 0x00},
- {0x4700, 0x00},
- {0x4800, 0x64},
- {0x4806, 0x40},
- {0x480b, 0x10},
- {0x480c, 0x80},
- {0x480f, 0x32},
- {0x4813, 0xe4},
{0x4837, 0x14},
{0x4850, 0x42},
- {0x4884, 0x04},
- {0x4c00, 0xf8},
- {0x4c01, 0x44},
- {0x4c03, 0x00},
- {0x4d00, 0x00},
- {0x4d01, 0x16},
- {0x4d04, 0x10},
- {0x4d05, 0x00},
- {0x4d06, 0x0c},
- {0x4d07, 0x00},
- {0x3d84, 0x04},
- {0x3680, 0xa4},
- {0x3682, 0x80},
- {0x3601, 0x40},
- {0x3602, 0x90},
- {0x3608, 0x0a},
- {0x3938, 0x09},
- {0x3a74, 0x84},
- {0x3a99, 0x84},
- {0x3ab9, 0xa6},
- {0x3aba, 0xba},
- {0x3b12, 0x84},
- {0x3b14, 0xbb},
- {0x3b15, 0xbf},
- {0x3a29, 0x26},
- {0x3a1f, 0x8a},
- {0x3a22, 0x91},
- {0x3a25, 0x96},
- {0x3a28, 0xb4},
- {0x3a2b, 0xba},
- {0x3a2e, 0xbf},
- {0x3a31, 0xc1},
{0x3a20, 0x05},
{0x3939, 0x6b},
{0x3902, 0x10},
@@ -1148,22 +1137,13 @@ static const struct ov08x40_reg mode_1928x1208_regs[] = {
{0x3907, 0x0b},
{0x3908, 0x10},
{0x3909, 0x13},
- {0x360f, 0x99},
{0x390b, 0x11},
{0x390c, 0x21},
{0x390d, 0x32},
{0x390e, 0x76},
- {0x3911, 0x90},
- {0x3913, 0x90},
- {0x3b3f, 0x9d},
- {0x3b45, 0x9d},
- {0x3b1b, 0xc9},
- {0x3b21, 0xc9},
{0x3a1a, 0x1c},
- {0x3a23, 0x15},
{0x3a26, 0x17},
{0x3a2c, 0x50},
- {0x3a2f, 0x18},
{0x3a32, 0x4f},
{0x3ace, 0x01},
{0x3ad2, 0x01},
@@ -1178,31 +1158,13 @@ static const struct ov08x40_reg mode_1928x1208_regs[] = {
{0x3afe, 0x01},
{0x3b02, 0x01},
{0x3b06, 0x01},
- {0x3b0a, 0x01},
- {0x3b0b, 0x00},
- {0x3b0e, 0x01},
- {0x3b0f, 0x00},
- {0x392c, 0x02},
{0x392d, 0x01},
- {0x392e, 0x04},
- {0x392f, 0x03},
{0x3930, 0x09},
- {0x3931, 0x07},
- {0x3932, 0x10},
{0x3933, 0x0d},
- {0x3609, 0x08},
- {0x3921, 0x0f},
- {0x3928, 0x15},
- {0x3929, 0x2a},
{0x392a, 0x52},
{0x392b, 0xa3},
{0x340b, 0x1b},
- {0x3426, 0x10},
- {0x3407, 0x01},
- {0x3404, 0x01},
- {0x3500, 0x00},
{0x3501, 0x08},
- {0x3502, 0x10},
{0x3508, 0x04},
{0x3509, 0x00},
};
@@ -1217,6 +1179,7 @@ static const char * const ov08x40_test_pattern_menu[] = {
/* Configurations for supported link frequencies */
#define OV08X40_LINK_FREQ_400MHZ 400000000ULL
+#define OV08X40_LINK_FREQ_749MHZ 749000000ULL
#define OV08X40_SCLK_96MHZ 96000000ULL
#define OV08X40_XVCLK 19200000
#define OV08X40_DATA_LANES 4
@@ -1236,6 +1199,7 @@ static u64 link_freq_to_pixel_rate(u64 f)
/* Menu items for LINK_FREQ V4L2 control */
static const s64 link_freq_menu_items[] = {
OV08X40_LINK_FREQ_400MHZ,
+ OV08X40_LINK_FREQ_749MHZ,
};
/* Link frequency configs */
@@ -1246,6 +1210,12 @@ static const struct ov08x40_link_freq_config link_freq_configs[] = {
.regs = mipi_data_rate_800mbps,
}
},
+ [OV08X40_LINK_FREQ_749MHZ_INDEX] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_1500mbps),
+ .regs = mipi_data_rate_1500mbps,
+ }
+ },
};
/* Mode configs */
@@ -1266,6 +1236,22 @@ static const struct ov08x40_mode supported_modes[] = {
.exposure_margin = OV08X40_EXPOSURE_MAX_MARGIN,
},
{
+ .width = 3856,
+ .height = 2176,
+ .vts_def = OV08X40_VTS_30FPS,
+ .vts_min = OV08X40_VTS_30FPS,
+ .llp = 0x10aa, /* in normal mode, tline time = 2 * HTS / SCLK */
+ .lanes = 4,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_3856x2176_regs_800mbps),
+ .regs = mode_3856x2176_regs_800mbps,
+ },
+ .link_freq_index = OV08X40_LINK_FREQ_400MHZ_INDEX,
+ .exposure_shift = 1,
+ .exposure_margin = OV08X40_EXPOSURE_MAX_MARGIN,
+ },
+
+ {
.width = 1928,
.height = 1208,
.vts_def = OV08X40_VTS_BIN_30FPS,
@@ -1280,6 +1266,36 @@ static const struct ov08x40_mode supported_modes[] = {
.exposure_shift = 0,
.exposure_margin = OV08X40_EXPOSURE_BIN_MAX_MARGIN,
},
+ {
+ .width = 3856,
+ .height = 2176,
+ .vts_def = OV08X40_VTS_30FPS,
+ .vts_min = OV08X40_VTS_30FPS,
+ .llp = 0x10aa, /* in normal mode, tline time = 2 * HTS / SCLK */
+ .lanes = 2,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_3856x2176_regs_1500mbps),
+ .regs = mode_3856x2176_regs_1500mbps,
+ },
+ .link_freq_index = OV08X40_LINK_FREQ_749MHZ_INDEX,
+ .exposure_shift = 1,
+ .exposure_margin = OV08X40_EXPOSURE_MAX_MARGIN,
+ },
+ {
+ .width = 1928,
+ .height = 1088,
+ .vts_def = OV08X40_VTS_BIN_30FPS,
+ .vts_min = OV08X40_VTS_BIN_30FPS,
+ .llp = 0x960,
+ .lanes = 2,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1928x1088_regs_1500mbps),
+ .regs = mode_1928x1088_regs_1500mbps,
+ },
+ .link_freq_index = OV08X40_LINK_FREQ_749MHZ_INDEX,
+ .exposure_shift = 0,
+ .exposure_margin = OV08X40_EXPOSURE_MAX_MARGIN,
+ },
};
static const char * const ov08x40_supply_names[] = {
@@ -1310,8 +1326,13 @@ struct ov08x40 {
/* Mutex for serialized access */
struct mutex mutex;
+ /* data lanes */
+ u8 mipi_lanes;
+
/* True if the device has been identified */
bool identified;
+
+ unsigned long link_freq_bitmap;
};
#define to_ov08x40(_sd) container_of(_sd, struct ov08x40, sd)
@@ -1341,7 +1362,7 @@ static int ov08x40_power_on(struct device *dev)
}
gpiod_set_value_cansleep(ov08x->reset_gpio, 0);
- usleep_range(1500, 1800);
+ usleep_range(5000, 5500);
return 0;
@@ -1727,6 +1748,15 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl)
return ret;
}
+static bool filter_by_mipi_lanes(const void *array, size_t index,
+ const void *context)
+{
+ const struct ov08x40_mode *mode = array;
+ const struct ov08x40 *ov08x = context;
+
+ return mode->lanes == ov08x->mipi_lanes;
+}
+
static const struct v4l2_ctrl_ops ov08x40_ctrl_ops = {
.s_ctrl = ov08x40_set_ctrl,
};
@@ -1748,18 +1778,28 @@ static int ov08x40_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- if (fse->index >= ARRAY_SIZE(supported_modes))
- return -EINVAL;
+ struct ov08x40 *ov08x = to_ov08x40(sd);
+ size_t i, count = 0;
if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
return -EINVAL;
- fse->min_width = supported_modes[fse->index].width;
- fse->max_width = fse->min_width;
- fse->min_height = supported_modes[fse->index].height;
- fse->max_height = fse->min_height;
+ for (i = 0; i < ARRAY_SIZE(supported_modes); i++) {
+ if (!filter_by_mipi_lanes(&supported_modes[i], i, ov08x))
+ continue;
- return 0;
+ if (count == fse->index) {
+ fse->min_width = supported_modes[i].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[i].height;
+ fse->max_height = fse->min_height;
+ return 0;
+ }
+
+ count++;
+ }
+
+ return -EINVAL;
}
static void ov08x40_update_pad_format(const struct ov08x40_mode *mode,
@@ -1822,10 +1862,13 @@ ov08x40_set_pad_format(struct v4l2_subdev *sd,
if (fmt->format.code != MEDIA_BUS_FMT_SGRBG10_1X10)
fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
- mode = v4l2_find_nearest_size(supported_modes,
- ARRAY_SIZE(supported_modes),
- width, height,
- fmt->format.width, fmt->format.height);
+ mode = v4l2_find_nearest_size_conditional(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width,
+ fmt->format.height,
+ filter_by_mipi_lanes,
+ ov08x);
ov08x40_update_pad_format(mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
@@ -1891,6 +1934,14 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x)
return ret;
}
+ reg_list = &ov08x40_global_setting;
+ ret = ov08x40_write_reg_list(ov08x, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set global setting\n",
+ __func__);
+ return ret;
+ }
+
/* Apply default values of current mode */
reg_list = &ov08x->cur_mode->reg_list;
ret = ov08x40_write_reg_list(ov08x, reg_list);
@@ -2038,7 +2089,6 @@ static int ov08x40_init_controls(struct ov08x40 *ov08x)
s64 pixel_rate_min;
s64 pixel_rate_max;
const struct ov08x40_mode *mode;
- u32 max;
int ret;
ctrl_hdlr = &ov08x->ctrl_handler;
@@ -2048,12 +2098,11 @@ static int ov08x40_init_controls(struct ov08x40 *ov08x)
mutex_init(&ov08x->mutex);
ctrl_hdlr->lock = &ov08x->mutex;
- max = ARRAY_SIZE(link_freq_menu_items) - 1;
ov08x->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
&ov08x40_ctrl_ops,
V4L2_CID_LINK_FREQ,
- max,
- 0,
+ __fls(ov08x->link_freq_bitmap),
+ __ffs(ov08x->link_freq_bitmap),
link_freq_menu_items);
if (ov08x->link_freq)
ov08x->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
@@ -2149,7 +2198,7 @@ static int ov08x40_check_hwcfg(struct ov08x40 *ov08x, struct device *dev)
};
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
- unsigned int i, j;
+ unsigned int i;
int ret;
u32 xvclk_rate;
@@ -2207,7 +2256,12 @@ static int ov08x40_check_hwcfg(struct ov08x40 *ov08x, struct device *dev)
goto out_err;
}
- if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV08X40_DATA_LANES) {
+ switch (bus_cfg.bus.mipi_csi2.num_data_lanes) {
+ case 2:
+ case 4:
+ ov08x->mipi_lanes = bus_cfg.bus.mipi_csi2.num_data_lanes;
+ break;
+ default:
dev_err(dev, "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
ret = -EINVAL;
@@ -2219,21 +2273,11 @@ static int ov08x40_check_hwcfg(struct ov08x40 *ov08x, struct device *dev)
ret = -EINVAL;
goto out_err;
}
-
- for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) {
- for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) {
- if (link_freq_menu_items[i] ==
- bus_cfg.link_frequencies[j])
- break;
- }
-
- if (j == bus_cfg.nr_of_link_frequencies) {
- dev_err(dev, "no link frequency %lld supported\n",
- link_freq_menu_items[i]);
- ret = -EINVAL;
- goto out_err;
- }
- }
+ ret = v4l2_link_freq_to_bitmap(dev, bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ link_freq_menu_items,
+ ARRAY_SIZE(link_freq_menu_items),
+ &ov08x->link_freq_bitmap);
out_err:
v4l2_fwnode_endpoint_free(&bus_cfg);
diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
index 73c844aa5697..e85c7d33a670 100644
--- a/drivers/media/i2c/ov13b10.c
+++ b/drivers/media/i2c/ov13b10.c
@@ -34,9 +34,6 @@
#define OV13B10_VTS_120FPS 0x0320
#define OV13B10_VTS_MAX 0x7fff
-/* HBLANK control - read only */
-#define OV13B10_PPL_560MHZ 4704
-
/* Exposure control */
#define OV13B10_REG_EXPOSURE 0x3500
#define OV13B10_EXPOSURE_MIN 4
@@ -95,7 +92,7 @@ struct ov13b10_reg_list {
/* Link frequency config */
struct ov13b10_link_freq_config {
- u32 pixels_per_line;
+ u64 link_freq;
/* registers for this link frequency */
struct ov13b10_reg_list reg_list;
@@ -114,6 +111,10 @@ struct ov13b10_mode {
/* Index of Link frequency config to be used */
u32 link_freq_index;
+
+ /* Pixels per line in current mode */
+ u32 ppl;
+
/* Default register values */
struct ov13b10_reg_list reg_list;
};
@@ -513,6 +514,52 @@ static const struct ov13b10_reg mode_1364x768_120fps_regs[] = {
{0x5001, 0x0d},
};
+static const struct ov13b10_reg mode_2lanes_2104x1560_60fps_regs[] = {
+ {0x3016, 0x32},
+ {0x3106, 0x29},
+ {0x0305, 0xaf},
+ {0x3501, 0x06},
+ {0x3662, 0x88},
+ {0x3714, 0x28},
+ {0x3739, 0x10},
+ {0x37c2, 0x14},
+ {0x37d9, 0x06},
+ {0x37e2, 0x0c},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x08},
+ {0x3809, 0x38},
+ {0x380a, 0x06},
+ {0x380b, 0x18},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x06},
+ {0x380f, 0x3e},
+ {0x3810, 0x00},
+ {0x3811, 0x07},
+ {0x3812, 0x00},
+ {0x3813, 0x05},
+ {0x3814, 0x03},
+ {0x3816, 0x03},
+ {0x3820, 0x8b},
+ {0x3c8c, 0x18},
+ {0x4008, 0x00},
+ {0x4009, 0x05},
+ {0x4050, 0x00},
+ {0x4051, 0x05},
+ {0x4501, 0x08},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xfd},
+ {0x5001, 0x0d},
+};
+
static const char * const ov13b10_test_pattern_menu[] = {
"Disabled",
"Vertical Color Bar Type 1",
@@ -526,15 +573,16 @@ static const char * const ov13b10_test_pattern_menu[] = {
#define OV13B10_LINK_FREQ_INDEX_0 0
#define OV13B10_EXT_CLK 19200000
-#define OV13B10_DATA_LANES 4
+#define OV13B10_4_DATA_LANES 4
+#define OV13B10_2_DATA_LANES 2
/*
- * pixel_rate = link_freq * data-rate * nr_of_lanes / bits_per_sample
- * data rate => double data rate; number of lanes => 4; bits per pixel => 10
+ * pixel_rate = data_rate * nr_of_lanes / bits_per_pixel
+ * data_rate => link_freq * 2; number of lanes => 4 or 2; bits per pixel => 10
*/
-static u64 link_freq_to_pixel_rate(u64 f)
+static u64 link_freq_to_pixel_rate(u64 f, u8 lanes)
{
- f *= 2 * OV13B10_DATA_LANES;
+ f *= 2 * lanes;
do_div(f, 10);
return f;
@@ -549,7 +597,7 @@ static const s64 link_freq_menu_items[] = {
static const struct ov13b10_link_freq_config
link_freq_configs[] = {
{
- .pixels_per_line = OV13B10_PPL_560MHZ,
+ .link_freq = OV13B10_LINK_FREQ_560MHZ,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mipi_data_rate_1120mbps),
.regs = mipi_data_rate_1120mbps,
@@ -558,12 +606,14 @@ static const struct ov13b10_link_freq_config
};
/* Mode configs */
-static const struct ov13b10_mode supported_modes[] = {
+static const struct ov13b10_mode supported_4_lanes_modes[] = {
+ /* 4 data lanes */
{
.width = 4208,
.height = 3120,
.vts_def = OV13B10_VTS_30FPS,
.vts_min = OV13B10_VTS_30FPS,
+ .ppl = 4704,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_4208x3120_regs),
.regs = mode_4208x3120_regs,
@@ -575,6 +625,7 @@ static const struct ov13b10_mode supported_modes[] = {
.height = 3120,
.vts_def = OV13B10_VTS_30FPS,
.vts_min = OV13B10_VTS_30FPS,
+ .ppl = 4704,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_4160x3120_regs),
.regs = mode_4160x3120_regs,
@@ -586,6 +637,7 @@ static const struct ov13b10_mode supported_modes[] = {
.height = 2340,
.vts_def = OV13B10_VTS_30FPS,
.vts_min = OV13B10_VTS_30FPS,
+ .ppl = 4704,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_4160x2340_regs),
.regs = mode_4160x2340_regs,
@@ -597,6 +649,7 @@ static const struct ov13b10_mode supported_modes[] = {
.height = 1560,
.vts_def = OV13B10_VTS_60FPS,
.vts_min = OV13B10_VTS_60FPS,
+ .ppl = 4704,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_2104x1560_regs),
.regs = mode_2104x1560_regs,
@@ -608,6 +661,7 @@ static const struct ov13b10_mode supported_modes[] = {
.height = 1170,
.vts_def = OV13B10_VTS_60FPS,
.vts_min = OV13B10_VTS_60FPS,
+ .ppl = 4704,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_2080x1170_regs),
.regs = mode_2080x1170_regs,
@@ -620,6 +674,7 @@ static const struct ov13b10_mode supported_modes[] = {
.vts_def = OV13B10_VTS_120FPS,
.vts_min = OV13B10_VTS_120FPS,
.link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ .ppl = 4664,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_1364x768_120fps_regs),
.regs = mode_1364x768_120fps_regs,
@@ -627,6 +682,23 @@ static const struct ov13b10_mode supported_modes[] = {
},
};
+static const struct ov13b10_mode supported_2_lanes_modes[] = {
+ /* 2 data lanes */
+ {
+ .width = 2104,
+ .height = 1560,
+ .vts_def = OV13B10_VTS_60FPS,
+ .vts_min = OV13B10_VTS_60FPS,
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ .ppl = 2352,
+ .reg_list = {
+ .num_of_regs =
+ ARRAY_SIZE(mode_2lanes_2104x1560_60fps_regs),
+ .regs = mode_2lanes_2104x1560_60fps_regs,
+ },
+ },
+};
+
struct ov13b10 {
struct v4l2_subdev sd;
struct media_pad pad;
@@ -644,12 +716,20 @@ struct ov13b10 {
struct v4l2_ctrl *hblank;
struct v4l2_ctrl *exposure;
+ /* Supported modes */
+ const struct ov13b10_mode *supported_modes;
+
/* Current mode */
const struct ov13b10_mode *cur_mode;
/* Mutex for serialized access */
struct mutex mutex;
+ u8 supported_modes_num;
+
+ /* Data lanes used */
+ u8 data_lanes;
+
/* True if the device has been identified */
bool identified;
};
@@ -753,8 +833,8 @@ static int ov13b10_write_reg_list(struct ov13b10 *ov13b,
/* Open sub-device */
static int ov13b10_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- const struct ov13b10_mode *default_mode = &supported_modes[0];
struct ov13b10 *ov13b = to_ov13b10(sd);
+ const struct ov13b10_mode *default_mode = ov13b->supported_modes;
struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_state_get_format(fh->state,
0);
@@ -973,7 +1053,10 @@ static int ov13b10_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- if (fse->index >= ARRAY_SIZE(supported_modes))
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ const struct ov13b10_mode *supported_modes = ov13b->supported_modes;
+
+ if (fse->index >= ov13b->supported_modes_num)
return -EINVAL;
if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
@@ -1033,6 +1116,7 @@ ov13b10_set_pad_format(struct v4l2_subdev *sd,
{
struct ov13b10 *ov13b = to_ov13b10(sd);
const struct ov13b10_mode *mode;
+ const struct ov13b10_mode *supported_modes = ov13b->supported_modes;
struct v4l2_mbus_framefmt *framefmt;
s32 vblank_def;
s32 vblank_min;
@@ -1047,7 +1131,7 @@ ov13b10_set_pad_format(struct v4l2_subdev *sd,
fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
mode = v4l2_find_nearest_size(supported_modes,
- ARRAY_SIZE(supported_modes),
+ ov13b->supported_modes_num,
width, height,
fmt->format.width, fmt->format.height);
ov13b10_update_pad_format(mode, fmt);
@@ -1058,23 +1142,18 @@ ov13b10_set_pad_format(struct v4l2_subdev *sd,
ov13b->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov13b->link_freq, mode->link_freq_index);
link_freq = link_freq_menu_items[mode->link_freq_index];
- pixel_rate = link_freq_to_pixel_rate(link_freq);
+ pixel_rate = link_freq_to_pixel_rate(link_freq,
+ ov13b->data_lanes);
__v4l2_ctrl_s_ctrl_int64(ov13b->pixel_rate, pixel_rate);
/* Update limits and set FPS to default */
- vblank_def = ov13b->cur_mode->vts_def -
- ov13b->cur_mode->height;
- vblank_min = ov13b->cur_mode->vts_min -
- ov13b->cur_mode->height;
+ vblank_def = mode->vts_def - mode->height;
+ vblank_min = mode->vts_min - mode->height;
__v4l2_ctrl_modify_range(ov13b->vblank, vblank_min,
- OV13B10_VTS_MAX
- - ov13b->cur_mode->height,
- 1,
- vblank_def);
+ OV13B10_VTS_MAX - mode->height,
+ 1, vblank_def);
__v4l2_ctrl_s_ctrl(ov13b->vblank, vblank_def);
- h_blank =
- link_freq_configs[mode->link_freq_index].pixels_per_line
- - ov13b->cur_mode->width;
+ h_blank = mode->ppl - mode->width;
__v4l2_ctrl_modify_range(ov13b->hblank, h_blank,
h_blank, 1, h_blank);
}
@@ -1311,7 +1390,8 @@ static int ov13b10_init_controls(struct ov13b10 *ov13b)
if (ov13b->link_freq)
ov13b->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]);
+ pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0],
+ ov13b->data_lanes);
pixel_rate_min = 0;
/* By default, PIXEL_RATE is read only */
ov13b->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
@@ -1328,8 +1408,7 @@ static int ov13b10_init_controls(struct ov13b10 *ov13b)
OV13B10_VTS_MAX - mode->height, 1,
vblank_def);
- hblank = link_freq_configs[mode->link_freq_index].pixels_per_line -
- mode->width;
+ hblank = mode->ppl - mode->width;
ov13b->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
V4L2_CID_HBLANK,
hblank, hblank, 1, hblank);
@@ -1423,7 +1502,7 @@ static int ov13b10_get_pm_resources(struct device *dev)
return 0;
}
-static int ov13b10_check_hwcfg(struct device *dev)
+static int ov13b10_check_hwcfg(struct device *dev, struct ov13b10 *ov13b)
{
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
@@ -1433,6 +1512,7 @@ static int ov13b10_check_hwcfg(struct device *dev)
unsigned int i, j;
int ret;
u32 ext_clk;
+ u8 dlane;
if (!fwnode)
return -ENXIO;
@@ -1459,13 +1539,32 @@ static int ov13b10_check_hwcfg(struct device *dev)
if (ret)
return ret;
- if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV13B10_DATA_LANES) {
+ dlane = bus_cfg.bus.mipi_csi2.num_data_lanes;
+ switch (dlane) {
+ case OV13B10_4_DATA_LANES:
+ ov13b->supported_modes = supported_4_lanes_modes;
+ ov13b->supported_modes_num =
+ ARRAY_SIZE(supported_4_lanes_modes);
+ break;
+
+ case OV13B10_2_DATA_LANES:
+ ov13b->supported_modes = supported_2_lanes_modes;
+ ov13b->supported_modes_num =
+ ARRAY_SIZE(supported_2_lanes_modes);
+ break;
+
+ default:
dev_err(dev, "number of CSI2 data lanes %d is not supported",
- bus_cfg.bus.mipi_csi2.num_data_lanes);
+ dlane);
ret = -EINVAL;
goto out_err;
}
+ ov13b->data_lanes = dlane;
+ ov13b->cur_mode = ov13b->supported_modes;
+ dev_dbg(dev, "%u lanes with %u modes selected\n",
+ ov13b->data_lanes, ov13b->supported_modes_num);
+
if (!bus_cfg.nr_of_link_frequencies) {
dev_err(dev, "no link frequencies defined");
ret = -EINVAL;
@@ -1499,17 +1598,17 @@ static int ov13b10_probe(struct i2c_client *client)
bool full_power;
int ret;
+ ov13b = devm_kzalloc(&client->dev, sizeof(*ov13b), GFP_KERNEL);
+ if (!ov13b)
+ return -ENOMEM;
+
/* Check HW config */
- ret = ov13b10_check_hwcfg(&client->dev);
+ ret = ov13b10_check_hwcfg(&client->dev, ov13b);
if (ret) {
dev_err(&client->dev, "failed to check hwcfg: %d", ret);
return ret;
}
- ov13b = devm_kzalloc(&client->dev, sizeof(*ov13b), GFP_KERNEL);
- if (!ov13b)
- return -ENOMEM;
-
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov13b->sd, client, &ov13b10_subdev_ops);
@@ -1533,9 +1632,6 @@ static int ov13b10_probe(struct i2c_client *client)
}
}
- /* Set default mode to max resolution */
- ov13b->cur_mode = &supported_modes[0];
-
ret = ov13b10_init_controls(ov13b);
if (ret)
goto error_power_off;
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 80d151e8ae29..6cf461e3373c 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -1456,12 +1456,12 @@ static int ov2740_probe(struct i2c_client *client)
return 0;
probe_error_v4l2_subdev_cleanup:
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
v4l2_subdev_cleanup(&ov2740->sd);
probe_error_media_entity_cleanup:
media_entity_cleanup(&ov2740->sd.entity);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(ov2740->sd.ctrl_handler);
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index c1081deffc2f..e7aec281e9a4 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -1295,11 +1295,8 @@ static int ov5675_probe(struct i2c_client *client)
return -ENOMEM;
ret = ov5675_get_hwcfg(ov5675, &client->dev);
- if (ret) {
- dev_err(&client->dev, "failed to get HW configuration: %d",
- ret);
+ if (ret)
return ret;
- }
v4l2_i2c_subdev_init(&ov5675->sd, client, &ov5675_subdev_ops);
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index e6704d018248..4b6874d2a104 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -2276,8 +2276,8 @@ static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
if (!is_acpi_node(fwnode)) {
ov8856->xvclk = devm_clk_get(dev, "xvclk");
if (IS_ERR(ov8856->xvclk)) {
- dev_err(dev, "could not get xvclk clock (%pe)\n",
- ov8856->xvclk);
+ dev_err_probe(dev, PTR_ERR(ov8856->xvclk),
+ "could not get xvclk clock\n");
return PTR_ERR(ov8856->xvclk);
}
@@ -2382,11 +2382,8 @@ static int ov8856_probe(struct i2c_client *client)
return -ENOMEM;
ret = ov8856_get_hwcfg(ov8856, &client->dev);
- if (ret) {
- dev_err(&client->dev, "failed to get HW configuration: %d",
- ret);
+ if (ret)
return ret;
- }
v4l2_i2c_subdev_init(&ov8856->sd, client, &ov8856_subdev_ops);
diff --git a/drivers/media/i2c/rdacm20.c b/drivers/media/i2c/rdacm20.c
index b8bd8354d100..52e8e2620b4d 100644
--- a/drivers/media/i2c/rdacm20.c
+++ b/drivers/media/i2c/rdacm20.c
@@ -16,10 +16,10 @@
*/
#include <linux/delay.h>
-#include <linux/fwnode.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
@@ -575,10 +575,9 @@ static int rdacm20_probe(struct i2c_client *client)
dev->dev = &client->dev;
dev->serializer.client = client;
- ret = of_property_read_u32_array(client->dev.of_node, "reg",
- dev->addrs, 2);
+ ret = device_property_read_u32_array(dev->dev, "reg", dev->addrs, 2);
if (ret < 0) {
- dev_err(dev->dev, "Invalid DT reg property: %d\n", ret);
+ dev_err(dev->dev, "Invalid FW reg property: %d\n", ret);
return -EINVAL;
}
diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
index 3e22df36354f..bcab462708c7 100644
--- a/drivers/media/i2c/rdacm21.c
+++ b/drivers/media/i2c/rdacm21.c
@@ -11,10 +11,10 @@
*/
#include <linux/delay.h>
-#include <linux/fwnode.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
@@ -551,10 +551,9 @@ static int rdacm21_probe(struct i2c_client *client)
dev->dev = &client->dev;
dev->serializer.client = client;
- ret = of_property_read_u32_array(client->dev.of_node, "reg",
- dev->addrs, 2);
+ ret = device_property_read_u32_array(dev->dev, "reg", dev->addrs, 2);
if (ret < 0) {
- dev_err(dev->dev, "Invalid DT reg property: %d\n", ret);
+ dev_err(dev->dev, "Invalid FW reg property: %d\n", ret);
return -EINVAL;
}
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 2d5f42f11158..3d6703b75bfa 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -313,6 +313,10 @@ static int tc358743_get_detected_timings(struct v4l2_subdev *sd,
memset(timings, 0, sizeof(struct v4l2_dv_timings));
+ /* if HPD is low, ignore any video */
+ if (!(i2c_rd8(sd, HPD_CTL) & MASK_HPD_OUT0))
+ return -ENOLINK;
+
if (no_signal(sd)) {
v4l2_dbg(1, debug, sd, "%s: no valid signal\n", __func__);
return -ENOLINK;
@@ -1501,7 +1505,7 @@ static irqreturn_t tc358743_irq_handler(int irq, void *dev_id)
static void tc358743_irq_poll_timer(struct timer_list *t)
{
- struct tc358743_state *state = from_timer(state, t, timer);
+ struct tc358743_state *state = timer_container_of(state, t, timer);
unsigned int msecs;
schedule_work(&state->work_i2c_poll);
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index 42115118a0bd..6267e9ad39c0 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -318,7 +318,7 @@ static int chip_cmd(struct CHIPSTATE *chip, char *name, audiocmd *cmd)
static void chip_thread_wake(struct timer_list *t)
{
- struct CHIPSTATE *chip = from_timer(chip, t, wt);
+ struct CHIPSTATE *chip = timer_container_of(chip, t, wt);
wake_up_process(chip->thread);
}
diff --git a/drivers/media/i2c/vd55g1.c b/drivers/media/i2c/vd55g1.c
new file mode 100644
index 000000000000..25e2fc88a036
--- /dev/null
+++ b/drivers/media/i2c/vd55g1.c
@@ -0,0 +1,1965 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for VD55G1 global shutter sensor family driver
+ *
+ * Copyright (C) 2025 STMicroelectronics SA
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+#include <linux/units.h>
+
+#include <media/mipi-csi2.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+/* Register Map */
+#define VD55G1_REG_MODEL_ID CCI_REG32_LE(0x0000)
+#define VD55G1_MODEL_ID 0x53354731
+#define VD55G1_REG_REVISION CCI_REG16_LE(0x0004)
+#define VD55G1_REVISION_CCB 0x2020
+#define VD55G1_REG_FWPATCH_REVISION CCI_REG16_LE(0x0012)
+#define VD55G1_REG_FWPATCH_START_ADDR CCI_REG8(0x2000)
+#define VD55G1_REG_SYSTEM_FSM CCI_REG8(0x001c)
+#define VD55G1_SYSTEM_FSM_READY_TO_BOOT 0x01
+#define VD55G1_SYSTEM_FSM_SW_STBY 0x02
+#define VD55G1_SYSTEM_FSM_STREAMING 0x03
+#define VD55G1_REG_BOOT CCI_REG8(0x0200)
+#define VD55G1_BOOT_PATCH_SETUP 2
+#define VD55G1_REG_STBY CCI_REG8(0x0201)
+#define VD55G1_STBY_START_STREAM 1
+#define VD55G1_REG_STREAMING CCI_REG8(0x0202)
+#define VD55G1_STREAMING_STOP_STREAM 1
+#define VD55G1_REG_EXT_CLOCK CCI_REG32_LE(0x0220)
+#define VD55G1_REG_LINE_LENGTH CCI_REG16_LE(0x0300)
+#define VD55G1_REG_ORIENTATION CCI_REG8(0x0302)
+#define VD55G1_REG_FORMAT_CTRL CCI_REG8(0x030a)
+#define VD55G1_REG_OIF_CTRL CCI_REG16_LE(0x030c)
+#define VD55G1_REG_ISL_ENABLE CCI_REG16_LE(0x326)
+#define VD55G1_REG_OIF_IMG_CTRL CCI_REG8(0x030f)
+#define VD55G1_REG_MIPI_DATA_RATE CCI_REG32_LE(0x0224)
+#define VD55G1_REG_PATGEN_CTRL CCI_REG16_LE(0x0304)
+#define VD55G1_PATGEN_TYPE_SHIFT 4
+#define VD55G1_PATGEN_ENABLE BIT(0)
+#define VD55G1_REG_MANUAL_ANALOG_GAIN CCI_REG8(0x0501)
+#define VD55G1_REG_MANUAL_COARSE_EXPOSURE CCI_REG16_LE(0x0502)
+#define VD55G1_REG_MANUAL_DIGITAL_GAIN CCI_REG16_LE(0x0504)
+#define VD55G1_REG_APPLIED_COARSE_EXPOSURE CCI_REG16_LE(0x00e8)
+#define VD55G1_REG_APPLIED_ANALOG_GAIN CCI_REG16_LE(0x00ea)
+#define VD55G1_REG_APPLIED_DIGITAL_GAIN CCI_REG16_LE(0x00ec)
+#define VD55G1_REG_AE_FORCE_COLDSTART CCI_REG8(0x0308)
+#define VD55G1_REG_AE_COLDSTART_EXP_TIME CCI_REG32_LE(0x0374)
+#define VD55G1_REG_READOUT_CTRL CCI_REG8(0x052e)
+#define VD55G1_READOUT_CTRL_BIN_MODE_NORMAL 0
+#define VD55G1_READOUT_CTRL_BIN_MODE_DIGITAL_X2 1
+#define VD55G1_REG_DUSTER_CTRL CCI_REG8(0x03ea)
+#define VD55G1_DUSTER_ENABLE BIT(0)
+#define VD55G1_DUSTER_DISABLE 0
+#define VD55G1_DUSTER_DYN_ENABLE BIT(1)
+#define VD55G1_DUSTER_RING_ENABLE BIT(4)
+#define VD55G1_REG_AE_TARGET_PERCENTAGE CCI_REG8(0x0486)
+#define VD55G1_REG_NEXT_CTX CCI_REG16_LE(0x03e4)
+#define VD55G1_REG_EXPOSURE_USE_CASES CCI_REG8(0x0312)
+#define VD55G1_EXPOSURE_USE_CASES_MULTI_CONTEXT BIT(2)
+#define VD55G1_REG_EXPOSURE_MAX_COARSE CCI_REG16_LE(0x0372)
+#define VD55G1_EXPOSURE_MAX_COARSE_DEF 0x7fff
+#define VD55G1_EXPOSURE_MAX_COARSE_SUB 446
+#define VD55G1_REG_CTX_REPEAT_COUNT_CTX0 CCI_REG16_LE(0x03dc)
+#define VD55G1_REG_CTX_REPEAT_COUNT_CTX1 CCI_REG16_LE(0x03de)
+
+#define VD55G1_REG_EXP_MODE(ctx) \
+ CCI_REG8(0x0500 + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_REG_FRAME_LENGTH(ctx) \
+ CCI_REG32_LE(0x050c + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_REG_X_START(ctx) \
+ CCI_REG16_LE(0x0514 + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_REG_X_WIDTH(ctx) \
+ CCI_REG16_LE(0x0516 + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_REG_Y_START(ctx) \
+ CCI_REG16_LE(0x0510 + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_REG_Y_HEIGHT(ctx) \
+ CCI_REG16_LE(0x0512 + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_REG_GPIO_0_CTRL(ctx) \
+ CCI_REG8(0x051d + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_GPIO_MODE_FSYNC_OUT 0x00
+#define VD55G1_GPIO_MODE_IN 0x01
+#define VD55G1_GPIO_MODE_STROBE 0x02
+#define VD55G1_REG_VT_MODE(ctx) \
+ CCI_REG8(0x0536 + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_VT_MODE_NORMAL 0
+#define VD55G1_VT_MODE_SUBTRACTION 1
+#define VD55G1_REG_MASK_FRAME_CTRL(ctx) \
+ CCI_REG8(0x0537 + VD55G1_CTX_OFFSET * (ctx))
+#define VD55G1_MASK_FRAME_CTRL_OUTPUT 0
+#define VD55G1_MASK_FRAME_CTRL_MASK 1
+#define VD55G1_REG_EXPOSURE_INSTANCE(ctx) \
+ CCI_REG32_LE(0x52D + VD55G1_CTX_OFFSET * (ctx))
+
+#define VD55G1_WIDTH 804
+#define VD55G1_HEIGHT 704
+#define VD55G1_DEFAULT_MODE 0
+#define VD55G1_NB_GPIOS 4
+#define VD55G1_MEDIA_BUS_FMT_DEF MEDIA_BUS_FMT_Y8_1X8
+#define VD55G1_DGAIN_DEF 256
+#define VD55G1_AGAIN_DEF 19
+#define VD55G1_EXPO_MAX_TERM 64
+#define VD55G1_EXPO_DEF 500
+#define VD55G1_LINE_LENGTH_MIN 1128
+#define VD55G1_LINE_LENGTH_SUB_MIN 1344
+#define VD55G1_VBLANK_MIN 86
+#define VD55G1_VBLANK_MAX 0xffff
+#define VD55G1_FRAME_LENGTH_DEF 1860 /* 60 fps */
+#define VD55G1_MIPI_MARGIN 900
+#define VD55G1_CTX_OFFSET 0x50
+#define VD55G1_FWPATCH_REVISION_MAJOR 2
+#define VD55G1_FWPATCH_REVISION_MINOR 9
+#define VD55G1_XCLK_FREQ_MIN (6 * HZ_PER_MHZ)
+#define VD55G1_XCLK_FREQ_MAX (27 * HZ_PER_MHZ)
+#define VD55G1_MIPI_RATE_MIN (250 * HZ_PER_MHZ)
+#define VD55G1_MIPI_RATE_MAX (1200 * HZ_PER_MHZ)
+
+static const u8 patch_array[] = {
+ 0x44, 0x03, 0x09, 0x02, 0xe6, 0x01, 0x42, 0x00, 0xea, 0x01, 0x42, 0x00,
+ 0xf0, 0x01, 0x42, 0x00, 0xe6, 0x01, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x4c, 0x00, 0x00, 0xfa, 0x68, 0x40, 0x00, 0xe8,
+ 0x09, 0xbe, 0x4c, 0x08, 0x00, 0xf2, 0x93, 0xdd, 0x1c, 0x00, 0xc0, 0xe2,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x0c, 0x04, 0x00, 0xfa, 0x6b, 0x80, 0x98, 0x7f,
+ 0xfc, 0xef, 0x11, 0xc1, 0x0f, 0x82, 0x69, 0xbe, 0x0f, 0xac, 0x58, 0x40,
+ 0x00, 0xe8, 0x0c, 0x0c, 0x00, 0xf2, 0x93, 0xdd, 0x1c, 0x00, 0x40, 0xe3,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x0c, 0x04, 0x84, 0xfa, 0x46, 0x0e, 0xe8, 0xe0,
+ 0x08, 0xde, 0x4a, 0x40, 0x84, 0xe0, 0xa5, 0x86, 0xa8, 0x7d, 0xfc, 0xef,
+ 0x6b, 0x80, 0x01, 0xbf, 0x28, 0x77, 0x0c, 0xef, 0x0b, 0x0e, 0x21, 0x78,
+ 0x06, 0xc0, 0x0b, 0xa5, 0xb5, 0x84, 0x06, 0x42, 0x98, 0xe1, 0x01, 0x81,
+ 0x01, 0x42, 0x38, 0xe0, 0x0c, 0xc4, 0x0e, 0x84, 0x46, 0x02, 0x84, 0xe0,
+ 0x0c, 0x84, 0x11, 0x81, 0x21, 0x81, 0x31, 0x81, 0x41, 0x81, 0x51, 0x81,
+ 0xc1, 0x81, 0x05, 0x83, 0x0c, 0x0c, 0x84, 0xf2, 0x93, 0xdd, 0x06, 0x40,
+ 0x98, 0xe1, 0xc8, 0x80, 0x58, 0x82, 0x48, 0xc0, 0x38, 0xc2, 0x29, 0x00,
+ 0x10, 0xe0, 0x19, 0x00, 0x14, 0xe0, 0x09, 0x00, 0x38, 0xe0, 0x5f, 0xb8,
+ 0x5f, 0xa8, 0x5f, 0xa6, 0x5f, 0xa4, 0x5f, 0xa2, 0x5f, 0xa0, 0x56, 0x41,
+ 0x98, 0xe1, 0x18, 0x82, 0x28, 0x80, 0x38, 0xc0, 0x5f, 0xa2, 0x19, 0x00,
+ 0x20, 0xf8, 0x5f, 0xa4, 0x28, 0xc2, 0x5f, 0xa6, 0x39, 0x00, 0x10, 0xe0,
+ 0x5f, 0xa2, 0x19, 0x00, 0x14, 0xe0, 0x5f, 0xa4, 0x29, 0x00, 0x18, 0xe0,
+ 0x5f, 0xa6, 0x39, 0x00, 0x40, 0xe0, 0x5f, 0xa2, 0x19, 0x00, 0x44, 0xe0,
+ 0x5f, 0xa4, 0x29, 0x00, 0x1c, 0xe0, 0x5f, 0xa6, 0x39, 0x00, 0x38, 0xe0,
+ 0x5f, 0xa2, 0x19, 0x00, 0x20, 0xe0, 0x5f, 0xa4, 0x29, 0x00, 0x24, 0xe0,
+ 0x5f, 0xa6, 0x39, 0x00, 0x28, 0xe0, 0x5f, 0xa2, 0x19, 0x00, 0x2c, 0xe0,
+ 0x5f, 0xa4, 0x29, 0x00, 0x30, 0xe0, 0x5f, 0xa6, 0x09, 0x00, 0x34, 0xe0,
+ 0x5f, 0xa2, 0x5f, 0xa4, 0x5f, 0xa0, 0x4a, 0x0a, 0xfc, 0xfb, 0xe5, 0x82,
+ 0x08, 0xde, 0x4a, 0x40, 0x88, 0xe0, 0xf6, 0x40, 0x00, 0xe0, 0x01, 0x4e,
+ 0x99, 0x78, 0x0a, 0xc0, 0x85, 0x80, 0x98, 0x40, 0x00, 0xe8, 0x35, 0x81,
+ 0xa8, 0x40, 0x00, 0xe8, 0x0b, 0x8c, 0x0c, 0x0c, 0x84, 0xf2, 0xd5, 0xed,
+ 0x83, 0xc1, 0x13, 0xc5, 0x93, 0xdd, 0xc3, 0xc1, 0x83, 0xc1, 0x13, 0xc3,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x4c, 0x04, 0x04, 0xfa, 0xc6, 0x0f, 0x94, 0xe0,
+ 0x19, 0x0e, 0xc9, 0x65, 0x01, 0xc0, 0x28, 0xde, 0x0a, 0x42, 0x80, 0xe0,
+ 0x24, 0x02, 0x00, 0xfc, 0x16, 0xde, 0xa5, 0x8a, 0x19, 0x00, 0xb8, 0xe0,
+ 0x10, 0x02, 0x0c, 0xec, 0x1d, 0xe6, 0x14, 0x02, 0x88, 0x80, 0x4e, 0x04,
+ 0x01, 0x00, 0x10, 0x80, 0x25, 0x02, 0x08, 0x9c, 0x86, 0x02, 0x00, 0x80,
+ 0x08, 0x44, 0x00, 0x98, 0x55, 0x81, 0x11, 0x85, 0x45, 0x81, 0x11, 0x89,
+ 0x25, 0x81, 0x11, 0x83, 0x2b, 0x00, 0x24, 0xe0, 0x64, 0xc2, 0x0b, 0x84,
+ 0x08, 0x51, 0x00, 0xef, 0x2b, 0x80, 0x01, 0x83, 0x1b, 0x8c, 0x38, 0x7d,
+ 0x5c, 0xef, 0x18, 0xde, 0x0b, 0xa1, 0x25, 0x82, 0x0b, 0x0e, 0x88, 0xf9,
+ 0x0a, 0x00, 0x00, 0xe8, 0x10, 0x42, 0x04, 0x9c, 0x11, 0x4e, 0x0c, 0x80,
+ 0x10, 0x40, 0x04, 0xf0, 0x4e, 0x05, 0x01, 0x60, 0x10, 0xc0, 0x06, 0x88,
+ 0x10, 0x40, 0xf8, 0xf3, 0x06, 0xde, 0x4c, 0x0c, 0x04, 0xf2, 0x93, 0xdd,
+ 0x0c, 0x04, 0x1c, 0xfe, 0xf6, 0x0f, 0x94, 0xe0, 0x38, 0x9c, 0x46, 0x51,
+ 0xfc, 0xe0, 0x46, 0x49, 0x38, 0xe2, 0x30, 0x46, 0xf8, 0xf3, 0x36, 0x9c,
+ 0xc6, 0x46, 0x0c, 0xe1, 0x34, 0x8c, 0x94, 0xa0, 0x4e, 0xa0, 0x39, 0x06,
+ 0x80, 0xe0, 0x4a, 0x46, 0x94, 0xe0, 0x05, 0x8c, 0x6a, 0x40, 0x80, 0xe0,
+ 0x2c, 0x0c, 0x00, 0xe2, 0x0b, 0x8c, 0xb8, 0x7c, 0x5c, 0xef, 0x0b, 0x8c,
+ 0x9e, 0xa0, 0xf8, 0x40, 0x60, 0xef, 0x0b, 0xa1, 0x5a, 0x40, 0x80, 0xe0,
+ 0x65, 0x88, 0x28, 0x02, 0x01, 0x40, 0x00, 0x80, 0x2a, 0x42, 0x9c, 0xe1,
+ 0x28, 0x49, 0x60, 0xef, 0x96, 0x4d, 0x9c, 0xe1, 0x01, 0x81, 0x06, 0x98,
+ 0xd5, 0x81, 0x09, 0x0e, 0xa1, 0x64, 0x01, 0xc0, 0x4a, 0x40, 0x88, 0xe0,
+ 0x85, 0x80, 0xb8, 0x77, 0xfc, 0xef, 0x35, 0x81, 0xc8, 0x77, 0xfc, 0xef,
+ 0x08, 0x98, 0x4a, 0x00, 0xfc, 0xfb, 0x55, 0xfc, 0xe8, 0x4a, 0x60, 0xef,
+ 0x1a, 0x44, 0x9c, 0xe1, 0x35, 0x81, 0x1a, 0x4e, 0x9c, 0xe9, 0x1c, 0x00,
+ 0x00, 0xe2, 0x0c, 0x0c, 0x1c, 0xf6, 0x93, 0xdd, 0x0d, 0xc3, 0x1a, 0x41,
+ 0x08, 0xe4, 0x0a, 0x40, 0x84, 0xe1, 0x0c, 0x00, 0x00, 0xe2, 0x93, 0xdd,
+ 0x4c, 0x04, 0x1c, 0xfa, 0x86, 0x52, 0xec, 0xe1, 0x08, 0xa6, 0x65, 0x12,
+ 0x24, 0xf8, 0x0e, 0x02, 0x99, 0x7a, 0x00, 0xc0, 0x00, 0x40, 0xa0, 0xf3,
+ 0x06, 0xa6, 0x0b, 0x8c, 0x08, 0x49, 0x00, 0xef, 0x85, 0x12, 0x28, 0xf8,
+ 0x02, 0x02, 0xfc, 0xed, 0xf6, 0x47, 0xfd, 0x6f, 0xe0, 0xff, 0x04, 0xe2,
+ 0x14, 0x04, 0xc0, 0xe0, 0x0f, 0x86, 0x2f, 0xa0, 0x0b, 0x8c, 0x2e, 0xe2,
+ 0x08, 0x48, 0x00, 0xef, 0x86, 0x02, 0x84, 0xfe, 0x0e, 0x05, 0x09, 0x7d,
+ 0x00, 0xc0, 0x05, 0x52, 0x08, 0xf8, 0x18, 0x7d, 0xfc, 0xef, 0x4a, 0x40,
+ 0x80, 0xe0, 0x09, 0x12, 0x04, 0xc0, 0x65, 0x12, 0x20, 0xf8, 0x00, 0x40,
+ 0x40, 0xdc, 0x01, 0x52, 0x04, 0xc0, 0x0e, 0x00, 0x41, 0x78, 0xf5, 0xc5,
+ 0x6d, 0xc0, 0xb5, 0x82, 0x05, 0x10, 0x10, 0xe0, 0x11, 0xf1, 0x0f, 0x82,
+ 0x05, 0x50, 0x10, 0xe0, 0x05, 0x10, 0x10, 0xe0, 0xfe, 0x02, 0xf0, 0xff,
+ 0x0f, 0x82, 0x85, 0x83, 0x15, 0x10, 0x10, 0xe0, 0x16, 0x00, 0x91, 0x6e,
+ 0x69, 0xcd, 0x21, 0xf1, 0x6d, 0xc1, 0x01, 0x83, 0x2f, 0x82, 0x26, 0x00,
+ 0x00, 0x80, 0x2f, 0xa0, 0x25, 0x50, 0x10, 0xe0, 0x05, 0x10, 0x10, 0xe0,
+ 0x11, 0xa1, 0xfe, 0x04, 0xf0, 0xff, 0x06, 0x42, 0x00, 0x80, 0x0f, 0x84,
+ 0x0f, 0xa2, 0x05, 0x50, 0x10, 0xe0, 0x16, 0x00, 0x91, 0x6e, 0x69, 0xcd,
+ 0x6d, 0xc1, 0x71, 0x8d, 0x16, 0x00, 0x79, 0x61, 0x2d, 0xcb, 0x86, 0x0e,
+ 0x00, 0x80, 0x6d, 0xc1, 0x56, 0x0e, 0x00, 0xc0, 0x0b, 0x8c, 0x1b, 0x8e,
+ 0x71, 0x52, 0x0c, 0xf8, 0x08, 0x43, 0x00, 0xef, 0x05, 0x52, 0x14, 0xf8,
+ 0x15, 0x10, 0x28, 0xe0, 0x70, 0x04, 0x04, 0xec, 0x31, 0xe1, 0x29, 0x9e,
+ 0x1f, 0x86, 0x1f, 0xa4, 0x15, 0x50, 0x28, 0xe0, 0x86, 0x42, 0x3c, 0xe0,
+ 0x0e, 0x04, 0x9d, 0x64, 0x9b, 0xc2, 0x05, 0x52, 0x1c, 0xf8, 0x78, 0xa6,
+ 0x48, 0x77, 0xfc, 0xef, 0x4a, 0x40, 0x80, 0xe0, 0x70, 0x4e, 0x10, 0xdc,
+ 0x1e, 0x00, 0x81, 0x70, 0xeb, 0xcb, 0x70, 0x4e, 0xec, 0x93, 0x6d, 0xc1,
+ 0x11, 0x85, 0x36, 0x02, 0x00, 0x80, 0x76, 0xa6, 0x11, 0x52, 0x10, 0xf8,
+ 0x05, 0x10, 0x40, 0xe0, 0xfe, 0x47, 0x0c, 0xff, 0x14, 0x04, 0xa0, 0xe0,
+ 0x0f, 0x86, 0x0f, 0xa4, 0x05, 0x50, 0x40, 0xe0, 0x05, 0x10, 0x28, 0xe0,
+ 0xfe, 0x47, 0xfd, 0x7f, 0xe3, 0xff, 0x14, 0x04, 0xd0, 0xe0, 0x0f, 0x86,
+ 0x2f, 0xa0, 0x20, 0x00, 0x01, 0x6c, 0x00, 0xd0, 0x05, 0x50, 0x28, 0xe0,
+ 0x0b, 0x8c, 0xf8, 0x7e, 0xfc, 0xee, 0x0e, 0x03, 0x59, 0x78, 0xf5, 0xc5,
+ 0x0d, 0xc2, 0x05, 0x52, 0x0c, 0xf8, 0x08, 0xa6, 0x46, 0x42, 0xb4, 0xe0,
+ 0x18, 0x84, 0x00, 0x40, 0xf4, 0x93, 0x00, 0x40, 0x08, 0xdc, 0x1b, 0xa1,
+ 0x06, 0xa6, 0x05, 0x10, 0x40, 0x80, 0x04, 0x00, 0x50, 0x9c, 0x65, 0x8a,
+ 0x05, 0x10, 0x44, 0xe0, 0xf6, 0x43, 0xfd, 0x6f, 0x00, 0xf8, 0x0f, 0x82,
+ 0x06, 0x02, 0x01, 0x60, 0x1e, 0xc0, 0x0f, 0xa2, 0x05, 0x50, 0x44, 0xe0,
+ 0x05, 0x10, 0x44, 0xe0, 0x0e, 0x02, 0x00, 0xf8, 0x0f, 0x82, 0x09, 0xf6,
+ 0x05, 0x50, 0x44, 0xe0, 0x05, 0x10, 0x40, 0xe0, 0x04, 0x00, 0x54, 0xfc,
+ 0x05, 0x50, 0x40, 0xe0, 0x05, 0x10, 0x40, 0xe0, 0x04, 0x00, 0xcc, 0xfc,
+ 0x05, 0x50, 0x40, 0xe0, 0x05, 0x10, 0x40, 0xe0, 0x04, 0x00, 0x4c, 0xfc,
+ 0x05, 0x50, 0x40, 0xe0, 0x05, 0x10, 0x40, 0xe0, 0x04, 0x00, 0xd0, 0xfc,
+ 0x05, 0x50, 0x40, 0xe0, 0x4c, 0x0c, 0x1c, 0xf2, 0x93, 0xdd, 0xc3, 0xc1,
+ 0xc6, 0x40, 0xfc, 0xe0, 0x04, 0x80, 0xc6, 0x44, 0x0c, 0xe1, 0x15, 0x04,
+ 0x0c, 0xf8, 0x0a, 0x80, 0x06, 0x07, 0x04, 0xe0, 0x03, 0x42, 0x48, 0xe1,
+ 0x46, 0x02, 0x40, 0xe2, 0x08, 0xc6, 0x44, 0x88, 0x06, 0x46, 0x0e, 0xe0,
+ 0x86, 0x01, 0x84, 0xe0, 0x33, 0x80, 0x39, 0x06, 0xd8, 0xef, 0x0a, 0x46,
+ 0x80, 0xe0, 0x31, 0xbf, 0x06, 0x06, 0x00, 0xc0, 0x31, 0x48, 0x60, 0xe0,
+ 0x34, 0x88, 0x49, 0x06, 0x40, 0xe1, 0x40, 0x48, 0x7c, 0xf3, 0x41, 0x46,
+ 0x40, 0xe1, 0x24, 0x8a, 0x39, 0x04, 0x10, 0xe0, 0x39, 0xc2, 0x31, 0x44,
+ 0x10, 0xe0, 0x14, 0xc4, 0x1b, 0xa5, 0x11, 0x83, 0x11, 0x40, 0x25, 0x6a,
+ 0x01, 0xc0, 0x08, 0x5c, 0x00, 0xda, 0x15, 0x00, 0xcc, 0xe0, 0x25, 0x00,
+ 0xf8, 0xe0, 0x1b, 0x85, 0x08, 0x5c, 0x00, 0x9a, 0x4e, 0x03, 0x01, 0x60,
+ 0x10, 0xc0, 0x29, 0x00, 0x1c, 0xe4, 0x18, 0x84, 0x20, 0x44, 0xf8, 0xf3,
+ 0x2f, 0xa2, 0x21, 0x40, 0x1c, 0xe4, 0x93, 0xdd, 0x0c, 0x00, 0x80, 0xfa,
+ 0x15, 0x00, 0x3c, 0xe0, 0x21, 0x81, 0x31, 0x85, 0x21, 0x42, 0x60, 0xe0,
+ 0x15, 0x00, 0x44, 0xe0, 0x31, 0x42, 0x40, 0xe1, 0x15, 0x00, 0x34, 0xe0,
+ 0x21, 0x42, 0x20, 0xe0, 0x15, 0x00, 0x34, 0xe0, 0xd6, 0x04, 0x10, 0xe0,
+ 0x23, 0x42, 0x30, 0xe0, 0x15, 0x00, 0x34, 0xe0, 0x86, 0x44, 0x04, 0xe0,
+ 0x23, 0x42, 0x38, 0xe0, 0x05, 0x00, 0x30, 0xe0, 0xc6, 0x02, 0x08, 0xe0,
+ 0x13, 0x40, 0x10, 0xe3, 0xe8, 0x56, 0x40, 0xef, 0x06, 0x40, 0x0c, 0xe1,
+ 0x04, 0x80, 0x06, 0x02, 0x94, 0xe0, 0x2b, 0x02, 0xc4, 0xea, 0x3b, 0x00,
+ 0x78, 0xe2, 0x20, 0x44, 0xfd, 0x73, 0x07, 0xc0, 0x30, 0x46, 0x01, 0x70,
+ 0xf8, 0xc0, 0x3f, 0xa4, 0x33, 0x40, 0x78, 0xe2, 0x0a, 0x84, 0x0c, 0x08,
+ 0x80, 0xf2, 0xf8, 0x3b, 0x3c, 0xff, 0xc3, 0xc1, 0x06, 0x40, 0x0c, 0xe1,
+ 0x04, 0x80, 0x1b, 0x00, 0x40, 0xe4, 0x19, 0xc2, 0x13, 0x40, 0x40, 0xe4,
+ 0x1b, 0x00, 0x40, 0xe4, 0x19, 0xc4, 0x13, 0x40, 0x40, 0xe4, 0x93, 0xdd,
+ 0xc6, 0x43, 0xec, 0xe0, 0x46, 0x41, 0xfc, 0xe0, 0x24, 0x84, 0x04, 0x80,
+ 0x31, 0x81, 0x4a, 0x44, 0x80, 0xe0, 0x86, 0x44, 0x0c, 0xe1, 0x09, 0x00,
+ 0x6c, 0xe0, 0xc4, 0x8a, 0x8e, 0x47, 0xfc, 0x9f, 0x01, 0x42, 0x51, 0x78,
+ 0x0c, 0xc0, 0x31, 0x58, 0x90, 0xe0, 0x34, 0x8a, 0x41, 0xbf, 0x06, 0x08,
+ 0x00, 0xc0, 0x41, 0x46, 0xa0, 0xe0, 0x34, 0x8a, 0x51, 0x81, 0xf6, 0x0b,
+ 0x00, 0xc0, 0x51, 0x46, 0xd0, 0xe0, 0x34, 0x8a, 0x01, 0xbf, 0x51, 0x46,
+ 0xe0, 0xe0, 0x44, 0x84, 0x0a, 0x48, 0x84, 0xe0, 0x75, 0x86, 0x54, 0xca,
+ 0x49, 0x88, 0x44, 0x06, 0x88, 0xe1, 0x36, 0x94, 0x4a, 0x46, 0x80, 0xe0,
+ 0x34, 0xca, 0x47, 0xc6, 0x11, 0x8d, 0x41, 0x46, 0xd0, 0xe0, 0x34, 0x88,
+ 0x76, 0x02, 0x00, 0xc0, 0x06, 0x00, 0x00, 0xc0, 0x16, 0x8c, 0x14, 0x88,
+ 0x01, 0x42, 0xc0, 0xe1, 0x01, 0x42, 0xe0, 0xe1, 0x01, 0x42, 0xf0, 0xe1,
+ 0x93, 0xdd, 0x34, 0xca, 0x41, 0x85, 0x46, 0x8c, 0x34, 0xca, 0x06, 0x48,
+ 0x00, 0xe0, 0x41, 0x46, 0xd0, 0xe0, 0x34, 0x88, 0x41, 0x83, 0x46, 0x8c,
+ 0x34, 0x88, 0x01, 0x46, 0xc0, 0xe1, 0x01, 0x46, 0xe0, 0xe1, 0x01, 0x46,
+ 0xf0, 0xe1, 0x09, 0x02, 0x20, 0xe0, 0x14, 0xca, 0x03, 0x42, 0x58, 0xe0,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x4c, 0x04, 0x04, 0xfa, 0x46, 0x4e, 0x08, 0xe1,
+ 0x06, 0x4c, 0x0c, 0xe1, 0x0a, 0x9e, 0x14, 0x98, 0x05, 0x42, 0x44, 0xe0,
+ 0x10, 0x00, 0xe1, 0x65, 0x03, 0xc0, 0x78, 0x41, 0x00, 0xe8, 0x08, 0x9c,
+ 0x0b, 0xa1, 0x04, 0x98, 0x06, 0x02, 0x10, 0x80, 0x13, 0x40, 0xf8, 0x86,
+ 0x65, 0x82, 0x00, 0x00, 0xe1, 0x65, 0x03, 0xc0, 0xa8, 0x40, 0x00, 0xe8,
+ 0x14, 0x98, 0x04, 0x00, 0xa0, 0xfc, 0x03, 0x42, 0x00, 0xe7, 0x4c, 0x0c,
+ 0x04, 0xf2, 0x93, 0xdd, 0x0a, 0x80, 0x93, 0xdd, 0x0c, 0x04, 0x00, 0xfa,
+ 0x06, 0x02, 0xec, 0xe1, 0x64, 0x84, 0x15, 0x0c, 0x2c, 0xe0, 0x14, 0x02,
+ 0xa0, 0xfc, 0x15, 0x4c, 0x2c, 0xe0, 0xd8, 0x40, 0x00, 0xe8, 0x14, 0xd8,
+ 0x09, 0x82, 0x14, 0x02, 0x00, 0xfc, 0x1f, 0xa0, 0x1e, 0xd8, 0x01, 0x85,
+ 0x0c, 0x0c, 0x00, 0xf2, 0xe8, 0x32, 0x2c, 0xff, 0x93, 0xdd, 0xc3, 0xc1,
+ 0x0c, 0x04, 0x00, 0xfa, 0x6b, 0x80, 0xf6, 0x01, 0x94, 0xe0, 0x08, 0x80,
+ 0x4a, 0x40, 0x80, 0xe0, 0x45, 0x86, 0x06, 0x40, 0x0c, 0xe1, 0x04, 0x80,
+ 0xc6, 0x02, 0x40, 0xe2, 0x09, 0x00, 0xd0, 0xe0, 0x14, 0x84, 0x1b, 0xa5,
+ 0x15, 0x84, 0x07, 0xc5, 0x09, 0x82, 0x18, 0x41, 0x00, 0xe8, 0x46, 0x43,
+ 0xfc, 0xe0, 0x14, 0x84, 0x19, 0x02, 0xd8, 0xe0, 0x19, 0x82, 0x0b, 0x83,
+ 0x16, 0x00, 0x00, 0xc0, 0x01, 0x4c, 0x00, 0xc0, 0x0c, 0x0c, 0x00, 0xf2,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x4a, 0x00, 0x00, 0xe0, 0x0c, 0x00, 0x00, 0xe2,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x46, 0x40, 0x84, 0xe0, 0x11, 0xaf, 0x13, 0x40,
+ 0x6c, 0xec, 0x11, 0xb3, 0x13, 0x40, 0x70, 0xec, 0xc6, 0x43, 0xf0, 0xe0,
+ 0x13, 0x40, 0xdc, 0xec, 0xc6, 0x02, 0x24, 0xe0, 0x1c, 0x80, 0x93, 0xdd,
+ 0x4c, 0x00, 0x00, 0xfa, 0xc8, 0x60, 0x7c, 0xef, 0xe8, 0x61, 0x7c, 0xef,
+ 0x28, 0x7e, 0x80, 0xef, 0xc6, 0x40, 0x98, 0xe1, 0x11, 0x83, 0x16, 0x80,
+ 0x46, 0x01, 0x10, 0xe1, 0x11, 0x81, 0x16, 0x80, 0x4c, 0x08, 0x00, 0xf2,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x0c, 0x04, 0x0c, 0xfa, 0x6b, 0x80, 0x04, 0x98,
+ 0x7b, 0x82, 0x56, 0x42, 0xb4, 0xe0, 0x88, 0x84, 0x05, 0x00, 0x10, 0xe0,
+ 0x09, 0x86, 0x0b, 0xa5, 0x46, 0x02, 0x00, 0x80, 0x06, 0x05, 0x00, 0x80,
+ 0x25, 0x82, 0x0b, 0xa3, 0xa5, 0x80, 0x0b, 0xa1, 0x06, 0x00, 0xf4, 0xef,
+ 0xd5, 0x84, 0x11, 0x85, 0x21, 0x91, 0x0b, 0x8e, 0x88, 0x74, 0x10, 0xef,
+ 0x0b, 0xa1, 0xf5, 0x82, 0x0a, 0x9e, 0x1a, 0x9c, 0x24, 0x98, 0x07, 0xe0,
+ 0x0f, 0xa2, 0x0e, 0xca, 0x0a, 0xde, 0x1a, 0xdc, 0x24, 0x98, 0x03, 0xb0,
+ 0x07, 0xe0, 0x0f, 0xa2, 0x0e, 0xc8, 0x01, 0x81, 0x0c, 0x0c, 0x0c, 0xf2,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x0c, 0x04, 0x7c, 0xfa, 0x46, 0x42, 0x9c, 0xe0,
+ 0x0b, 0x02, 0x04, 0xe3, 0xf0, 0x1e, 0x30, 0xec, 0x0b, 0xa3, 0x35, 0x96,
+ 0x8e, 0x01, 0x01, 0x60, 0x10, 0xc0, 0x0e, 0xfc, 0xc6, 0x05, 0xd0, 0xe1,
+ 0x0b, 0x82, 0x31, 0x81, 0x10, 0x16, 0x00, 0xe5, 0x20, 0x10, 0x20, 0xe7,
+ 0x0e, 0xbe, 0xb5, 0x85, 0x94, 0xfc, 0xa4, 0xbe, 0x82, 0x4c, 0x9c, 0xf0,
+ 0x05, 0x0c, 0x40, 0xe0, 0x11, 0x89, 0x93, 0x8e, 0xa3, 0x8e, 0x58, 0x44,
+ 0x00, 0xe8, 0x15, 0x0c, 0xc0, 0xf8, 0x04, 0x0c, 0x80, 0xfb, 0x0c, 0xed,
+ 0x0b, 0x82, 0x1b, 0x8c, 0x48, 0x44, 0x00, 0xe8, 0x15, 0x10, 0x1c, 0xfc,
+ 0x0e, 0xa8, 0x0b, 0x82, 0x1b, 0x8c, 0xd8, 0x43, 0x00, 0xe8, 0x71, 0x88,
+ 0x0e, 0xa4, 0x0a, 0x0e, 0x40, 0xe0, 0x35, 0xf8, 0x04, 0xbe, 0x14, 0xbc,
+ 0x81, 0xa0, 0x03, 0x8e, 0x0e, 0xbe, 0x04, 0xfc, 0x11, 0x82, 0x3b, 0x82,
+ 0x03, 0x8e, 0x0e, 0xfc, 0x3b, 0xa9, 0x06, 0x0e, 0x00, 0xc0, 0x35, 0x5e,
+ 0x00, 0xc0, 0xd5, 0xfa, 0xc6, 0x01, 0xd0, 0xe1, 0x7b, 0x80, 0x04, 0x9e,
+ 0x11, 0x91, 0x98, 0x41, 0x00, 0xe8, 0x24, 0x9c, 0x46, 0x42, 0x9c, 0xe0,
+ 0x6b, 0x82, 0x03, 0x4c, 0xc4, 0xe0, 0x11, 0x91, 0x0b, 0x84, 0xf8, 0x40,
+ 0x00, 0xe8, 0x19, 0x0e, 0x20, 0xe5, 0x03, 0x4c, 0xc0, 0xe0, 0x0b, 0x82,
+ 0x08, 0x72, 0xfc, 0xef, 0x01, 0x4c, 0x24, 0xf9, 0xf1, 0x98, 0x0c, 0x0c,
+ 0x7c, 0xf2, 0x93, 0xdd, 0x4c, 0x00, 0x00, 0xfa, 0x48, 0x65, 0x2c, 0xef,
+ 0x4c, 0x08, 0x00, 0xf2, 0x93, 0xdd, 0xc3, 0xc1, 0x0c, 0x04, 0x00, 0xfa,
+ 0x6b, 0x82, 0x78, 0x6e, 0xfc, 0xee, 0x46, 0x42, 0xec, 0xe0, 0x24, 0x84,
+ 0x24, 0x02, 0x80, 0xfa, 0x1d, 0xcc, 0x11, 0x83, 0xf5, 0x82, 0x24, 0x02,
+ 0xa0, 0xe1, 0x14, 0x02, 0x80, 0xfa, 0x1d, 0xcc, 0x11, 0x85, 0x15, 0x82,
+ 0x27, 0xe1, 0x24, 0x02, 0x80, 0xfa, 0x1d, 0xcc, 0x11, 0x89, 0x86, 0x02,
+ 0x00, 0x80, 0x0c, 0x0c, 0x00, 0xf2, 0x18, 0x17, 0xfc, 0xfe, 0xc3, 0xc1,
+ 0x0c, 0x04, 0x00, 0xfa, 0x06, 0x41, 0x8c, 0xe0, 0x1b, 0x00, 0xec, 0xe4,
+ 0x1b, 0xa3, 0x75, 0x84, 0x11, 0x81, 0x8e, 0x05, 0x01, 0x60, 0x10, 0xc0,
+ 0x00, 0x06, 0xc0, 0xe5, 0x95, 0x81, 0x44, 0x88, 0x1d, 0xee, 0x75, 0x80,
+ 0x4e, 0xc1, 0x25, 0x81, 0x4e, 0xcd, 0x21, 0x88, 0x11, 0x82, 0x0a, 0x02,
+ 0x40, 0xe0, 0xd5, 0xfc, 0x56, 0x00, 0x00, 0xe1, 0x18, 0x80, 0x1b, 0xa1,
+ 0xc5, 0x84, 0x08, 0x82, 0x4a, 0x00, 0xfc, 0xfb, 0x45, 0x84, 0x86, 0x4d,
+ 0x84, 0xe1, 0x04, 0x98, 0x05, 0x00, 0x10, 0xe0, 0x4a, 0x40, 0x80, 0xe0,
+ 0x45, 0x82, 0x11, 0x81, 0x0b, 0x8c, 0x58, 0x76, 0x28, 0xef, 0x0b, 0x8c,
+ 0x0c, 0x0c, 0x00, 0xf2, 0x88, 0x35, 0x28, 0xff, 0x0c, 0x0c, 0x00, 0xf2,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x46, 0x41, 0xfc, 0xe0, 0x04, 0x80, 0x09, 0x00,
+ 0x80, 0xe0, 0x09, 0x9e, 0x0b, 0xa3, 0x75, 0x82, 0x46, 0x41, 0x80, 0xe1,
+ 0x04, 0x80, 0xc6, 0x42, 0x8c, 0xe0, 0x04, 0xc2, 0x00, 0x40, 0x00, 0xf2,
+ 0x07, 0xcf, 0x06, 0x84, 0x06, 0x40, 0x84, 0xe0, 0x15, 0x00, 0x28, 0xe5,
+ 0x1c, 0xc2, 0x93, 0xdd, 0x0b, 0xa1, 0xc6, 0x00, 0xa0, 0xe1, 0x15, 0x00,
+ 0x04, 0xf8, 0x05, 0x84, 0x21, 0x8b, 0x2c, 0x84, 0x14, 0x80, 0x2c, 0x84,
+ 0x14, 0x82, 0x2c, 0x84, 0x15, 0x00, 0x10, 0xe0, 0x21, 0xa1, 0x21, 0x42,
+ 0x10, 0xe0, 0x05, 0x00, 0x14, 0xe0, 0x01, 0x88, 0x75, 0x83, 0x21, 0x85,
+ 0x2c, 0x84, 0x14, 0x80, 0x06, 0x46, 0x00, 0xe0, 0x2c, 0x84, 0x14, 0x82,
+ 0x2c, 0x84, 0x14, 0xc0, 0x21, 0xa1, 0x21, 0x42, 0x20, 0xe0, 0x14, 0xc2,
+ 0x31, 0x42, 0x20, 0xe0, 0x15, 0x00, 0x10, 0xe0, 0x21, 0x42, 0x20, 0xe0,
+ 0x05, 0x00, 0x14, 0xe0, 0x01, 0x90, 0x06, 0x42, 0x00, 0xe0, 0x16, 0x80,
+ 0x93, 0xdd, 0xc3, 0xc1, 0x0c, 0x04, 0x7c, 0xfa, 0x4a, 0x40, 0x80, 0xe0,
+ 0xf0, 0x1e, 0x30, 0xec, 0xe5, 0x82, 0xa6, 0x40, 0x00, 0xe1, 0x1a, 0x80,
+ 0x2a, 0xc0, 0x3a, 0xc2, 0x13, 0x40, 0x10, 0xe0, 0x1a, 0x82, 0x23, 0x40,
+ 0x18, 0xe0, 0x33, 0x40, 0x1c, 0xe0, 0x13, 0x40, 0x14, 0xe0, 0xf8, 0x61,
+ 0x68, 0xef, 0xc6, 0x13, 0x00, 0xe1, 0x15, 0x12, 0x28, 0xf8, 0x0b, 0x02,
+ 0x2c, 0xe0, 0x1b, 0x02, 0x24, 0xe0, 0x8a, 0x00, 0xa5, 0x64, 0x03, 0xc0,
+ 0x35, 0x82, 0x0a, 0x4e, 0x9c, 0xe1, 0x1a, 0x03, 0x11, 0x6f, 0x02, 0xc0,
+ 0xe8, 0x13, 0x01, 0x20, 0x00, 0xc0, 0x1f, 0xa0, 0x5a, 0x42, 0x80, 0xe0,
+ 0x0a, 0x4e, 0x9c, 0xe1, 0x68, 0x13, 0x00, 0xa0, 0x09, 0x12, 0x78, 0xf8,
+ 0xa1, 0x81, 0xf0, 0x02, 0x10, 0xe4, 0x07, 0xc4, 0x0c, 0xfc, 0xf0, 0x00,
+ 0x20, 0xe4, 0xa6, 0x91, 0xa8, 0x53, 0x74, 0xef, 0x05, 0x12, 0x30, 0xf8,
+ 0x25, 0x12, 0x28, 0xf8, 0x61, 0x87, 0x09, 0x00, 0x48, 0xe0, 0x81, 0x85,
+ 0x09, 0x86, 0x0b, 0xa7, 0x26, 0x0c, 0x00, 0xc0, 0x0b, 0xa1, 0x0b, 0x04,
+ 0x28, 0xe0, 0x16, 0x0c, 0x00, 0x80, 0x03, 0x52, 0x04, 0xf8, 0x0b, 0x04,
+ 0x20, 0xe0, 0x0c, 0xa6, 0x1b, 0x04, 0x2c, 0xe0, 0x3b, 0x04, 0x28, 0xe0,
+ 0x4b, 0x04, 0x20, 0xe0, 0x13, 0x86, 0x3b, 0x04, 0x24, 0xe0, 0x10, 0x0a,
+ 0x04, 0xec, 0x1a, 0xfc, 0x33, 0x88, 0x30, 0x06, 0x04, 0xec, 0x12, 0x4e,
+ 0x94, 0xf0, 0x32, 0x48, 0x84, 0xf0, 0x4c, 0xe4, 0x7c, 0xa4, 0xcb, 0x04,
+ 0x28, 0xe0, 0x14, 0x08, 0x84, 0xe1, 0xcd, 0xc9, 0xc2, 0x58, 0x90, 0x91,
+ 0x42, 0x4e, 0x94, 0x90, 0xc3, 0x52, 0x04, 0x98, 0x73, 0x52, 0x00, 0x80,
+ 0x5b, 0x04, 0x20, 0xe0, 0x5d, 0xc9, 0x52, 0x40, 0x90, 0x91, 0x42, 0x48,
+ 0x8c, 0x90, 0x03, 0x52, 0x04, 0x80, 0x43, 0x52, 0x08, 0x80, 0x3b, 0x04,
+ 0x2c, 0xe0, 0x49, 0x04, 0xb8, 0xe0, 0x33, 0x52, 0x1c, 0xf8, 0x2b, 0x04,
+ 0x24, 0xe0, 0x4b, 0xab, 0x23, 0x52, 0x18, 0xf8, 0x65, 0x8a, 0x4b, 0xa9,
+ 0xe5, 0x90, 0x4b, 0xa7, 0x22, 0x44, 0x84, 0xd0, 0x32, 0x46, 0x84, 0xd0,
+ 0x33, 0x52, 0x1c, 0xd8, 0x23, 0x52, 0x18, 0xd8, 0x95, 0x96, 0x20, 0x44,
+ 0xf9, 0x73, 0xff, 0xc0, 0x27, 0xc3, 0x23, 0x82, 0x23, 0x52, 0x18, 0xf8,
+ 0x24, 0x02, 0x80, 0xfb, 0x04, 0x00, 0x80, 0xfb, 0x2b, 0x8c, 0x58, 0x52,
+ 0x74, 0xef, 0x1b, 0x12, 0x1c, 0xf8, 0x2a, 0xfc, 0x0c, 0xe4, 0x17, 0xc3,
+ 0x13, 0x84, 0x13, 0x52, 0x1c, 0xf8, 0x0b, 0x12, 0x04, 0xf8, 0x14, 0x02,
+ 0x80, 0xfb, 0x2b, 0x8c, 0x68, 0x51, 0x74, 0xef, 0xc5, 0x87, 0x20, 0x44,
+ 0xe1, 0x73, 0xff, 0xc0, 0x27, 0xc7, 0x23, 0x82, 0x23, 0x52, 0x18, 0xf8,
+ 0x24, 0x02, 0x80, 0xfb, 0x04, 0x00, 0x80, 0xfb, 0x2b, 0x8c, 0x78, 0x57,
+ 0x74, 0xef, 0x1b, 0x12, 0x1c, 0xf8, 0x2a, 0xfc, 0x0c, 0xe4, 0x17, 0xc7,
+ 0x13, 0x84, 0x13, 0x52, 0x1c, 0xf8, 0x0b, 0x12, 0x04, 0xf8, 0x14, 0x02,
+ 0x80, 0xfb, 0x2b, 0x8c, 0x88, 0x56, 0x74, 0xef, 0xe5, 0x83, 0x20, 0x44,
+ 0xf1, 0x73, 0xff, 0xc0, 0x27, 0xc5, 0x23, 0x82, 0x23, 0x52, 0x18, 0xf8,
+ 0x24, 0x02, 0x80, 0xfb, 0x04, 0x00, 0x80, 0xfb, 0x2b, 0x8c, 0x18, 0x52,
+ 0x74, 0xef, 0x1b, 0x12, 0x1c, 0xf8, 0x2a, 0xfc, 0x0c, 0xe4, 0x17, 0xc5,
+ 0x13, 0x84, 0x13, 0x52, 0x1c, 0xf8, 0x0b, 0x12, 0x04, 0xf8, 0x14, 0x02,
+ 0x80, 0xfb, 0x2b, 0x8c, 0x28, 0x51, 0x74, 0xef, 0x7b, 0x80, 0x7c, 0xa4,
+ 0x08, 0x91, 0xa3, 0x52, 0x1c, 0xe0, 0xa3, 0x52, 0x24, 0xe0, 0x0b, 0xa1,
+ 0x83, 0x52, 0x1c, 0x80, 0x83, 0x52, 0x24, 0x80, 0x89, 0x12, 0x78, 0xf8,
+ 0xf6, 0x57, 0xfc, 0xef, 0x6b, 0x12, 0x1c, 0xf8, 0xab, 0x12, 0x18, 0xf8,
+ 0xd6, 0x57, 0xfc, 0x8f, 0x8b, 0xa3, 0xa0, 0x40, 0x00, 0x9c, 0xa5, 0x86,
+ 0x64, 0x00, 0x80, 0xfb, 0x1b, 0x90, 0xf8, 0x7d, 0xf8, 0xee, 0x6b, 0x80,
+ 0xa4, 0x00, 0x80, 0xfb, 0x1b, 0x90, 0x98, 0x7d, 0xf8, 0xee, 0x15, 0x12,
+ 0x28, 0xf8, 0x19, 0x02, 0xb8, 0xe0, 0x1b, 0xad, 0x95, 0x82, 0x1a, 0xa6,
+ 0xa0, 0x44, 0xf9, 0x73, 0xff, 0xc0, 0x27, 0xc3, 0x13, 0x94, 0x10, 0x02,
+ 0x08, 0xec, 0x1c, 0xe4, 0x23, 0x52, 0x18, 0xf8, 0x1b, 0x12, 0x04, 0xf8,
+ 0x03, 0x96, 0x03, 0x52, 0x28, 0xe0, 0x1c, 0xe6, 0x0a, 0xa6, 0x1a, 0xe4,
+ 0x63, 0x96, 0x63, 0x52, 0x20, 0xe0, 0x73, 0x52, 0x10, 0xe0, 0x03, 0x52,
+ 0x14, 0xe0, 0x13, 0x52, 0x18, 0xe0, 0x98, 0x52, 0x74, 0xef, 0x09, 0x12,
+ 0x8c, 0xe0, 0x0b, 0xa1, 0x01, 0x81, 0x01, 0x52, 0x90, 0xe0, 0x65, 0x82,
+ 0x05, 0x12, 0x30, 0xf8, 0x09, 0x00, 0xa8, 0xe0, 0x0a, 0x00, 0x0c, 0xf8,
+ 0x16, 0x00, 0x00, 0xc0, 0x01, 0x52, 0x90, 0xc0, 0x46, 0x41, 0x84, 0xe0,
+ 0x0a, 0x80, 0x0a, 0x4e, 0x9c, 0xe9, 0x1a, 0x00, 0x08, 0xe0, 0x38, 0x01,
+ 0x01, 0x20, 0x00, 0xc0, 0x0b, 0x12, 0x1c, 0xe0, 0x1b, 0x12, 0x24, 0xe0,
+ 0x2b, 0x12, 0x28, 0xe0, 0x03, 0x52, 0x2c, 0xe0, 0x0b, 0x12, 0x20, 0xe0,
+ 0x13, 0x52, 0x34, 0xe0, 0x23, 0x52, 0x38, 0xe0, 0x03, 0x52, 0x30, 0xe0,
+ 0x0c, 0x00, 0x00, 0xe2, 0xf1, 0x98, 0x0c, 0x0c, 0x7c, 0xf2, 0x93, 0xdd,
+ 0x13, 0xa9, 0x00, 0x00, 0xa8, 0xc1, 0x40, 0x00, 0x68, 0x04, 0xa0, 0xe0,
+ 0x40, 0x6c, 0x40, 0x00, 0xe8, 0x34, 0xc8, 0xe0, 0xfc, 0x91, 0x40, 0x00,
+ 0x68, 0x1f, 0xb8, 0xe0, 0x30, 0x16, 0x41, 0x00, 0x28, 0x39, 0x74, 0xe0,
+ 0xb0, 0x7e, 0x40, 0x00, 0xe8, 0x38, 0xc0, 0xe0, 0x30, 0x04, 0x41, 0x00,
+ 0x48, 0x1b, 0x80, 0xe0, 0x30, 0x2e, 0x40, 0x00, 0x88, 0x0c, 0xec, 0xe0,
+ 0x10, 0x9f, 0x40, 0x00, 0x88, 0x08, 0xb4, 0xe0, 0x10, 0x01, 0x41, 0x00,
+ 0x68, 0x01, 0x84, 0xe0, 0x54, 0xd6, 0x40, 0x00, 0xc8, 0x1a, 0x98, 0xe0,
+ 0xd0, 0xc8, 0x40, 0x00, 0x68, 0x08, 0xa0, 0xe0, 0x80, 0xdb, 0x40, 0x00,
+ 0xe8, 0x35, 0x94, 0xe0, 0x74, 0xff, 0x40, 0x00, 0xa8, 0x11, 0x80, 0xe0,
+ 0xf8, 0x89, 0x40, 0x00, 0x88, 0x16, 0xbc, 0xe0, 0x00, 0x90, 0x40, 0x00,
+ 0x08, 0x35, 0xb8, 0xe0, 0x7c, 0x73, 0x40, 0x00, 0x88, 0x1b, 0xc8, 0xe0,
+ 0xf4, 0xff, 0x40, 0x00, 0x68, 0x39, 0x80, 0xe0, 0xa4, 0xa4, 0x40, 0x00,
+ 0xa8, 0x16, 0xb0, 0xe0, 0x50, 0xc9, 0x40, 0x00, 0x28, 0x3a, 0x98, 0xe0,
+ 0x00, 0xb9, 0x00, 0x00, 0xb6, 0x85, 0x00, 0x00,
+};
+
+static const char * const vd55g1_tp_menu[] = {
+ "Disabled",
+ "Diagonal Grey Scale",
+ "Pseudo-random Noise",
+};
+
+static const s64 vd55g1_ev_bias_menu[] = {
+ -3000, -2500, -2000, -1500, -1000, -500,
+ 0,
+ 500, 1000, 1500, 2000, 2500, 3000,
+};
+
+static const char * const vd55g1_hdr_menu[] = {
+ "No HDR",
+ /*
+ * This mode acquires 2 frames on the sensor, the first one is ditched
+ * out and only used for auto exposure data, the second one is output to
+ * the host
+ */
+ "Internal subtraction",
+};
+
+static const char * const vd55g1_supply_name[] = {
+ "vcore",
+ "vddio",
+ "vana",
+};
+
+enum vd55g1_hdr_mode {
+ VD55G1_NO_HDR,
+ VD55G1_HDR_SUB,
+};
+
+struct vd55g1_mode {
+ u32 width;
+ u32 height;
+};
+
+struct vd55g1_fmt_desc {
+ u32 code;
+ u8 bpp;
+ u8 data_type;
+};
+
+static const struct vd55g1_fmt_desc vd55g1_mbus_codes[] = {
+ {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .bpp = 8,
+ .data_type = MIPI_CSI2_DT_RAW8,
+ },
+ {
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .bpp = 10,
+ .data_type = MIPI_CSI2_DT_RAW10,
+ },
+};
+
+static const struct vd55g1_mode vd55g1_supported_modes[] = {
+ {
+ .width = VD55G1_WIDTH,
+ .height = VD55G1_HEIGHT,
+ },
+ {
+ .width = 800,
+ .height = VD55G1_HEIGHT,
+ },
+ {
+ .width = 800,
+ .height = 600,
+ },
+ {
+ .width = 640,
+ .height = 480,
+ },
+ {
+ .width = 320,
+ .height = 240,
+ },
+};
+
+enum vd55g1_expo_state {
+ VD55G1_EXP_AUTO,
+ VD55G1_EXP_FREEZE,
+ VD55G1_EXP_MANUAL,
+ VD55G1_EXP_SINGLE_STEP,
+ VD55G1_EXP_BYPASS,
+};
+
+struct vd55g1_vblank_limits {
+ u16 min;
+ u16 def;
+ u16 max;
+};
+
+struct vd55g1 {
+ struct device *dev;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(vd55g1_supply_name)];
+ struct gpio_desc *reset_gpio;
+ struct clk *xclk;
+ struct regmap *regmap;
+ u32 xclk_freq;
+ u16 oif_ctrl;
+ u8 gpios[VD55G1_NB_GPIOS];
+ unsigned long ext_leds_mask;
+ u32 mipi_rate;
+ u32 pixel_clock;
+ u64 link_freq;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *pixel_rate_ctrl;
+ struct v4l2_ctrl *vblank_ctrl;
+ struct v4l2_ctrl *hblank_ctrl;
+ struct {
+ struct v4l2_ctrl *hflip_ctrl;
+ struct v4l2_ctrl *vflip_ctrl;
+ };
+ struct v4l2_ctrl *patgen_ctrl;
+ struct {
+ struct v4l2_ctrl *ae_ctrl;
+ struct v4l2_ctrl *expo_ctrl;
+ struct v4l2_ctrl *again_ctrl;
+ struct v4l2_ctrl *dgain_ctrl;
+ };
+ struct v4l2_ctrl *ae_lock_ctrl;
+ struct v4l2_ctrl *ae_bias_ctrl;
+ struct v4l2_ctrl *led_ctrl;
+ struct v4l2_ctrl *hdr_ctrl;
+};
+
+static inline struct vd55g1 *to_vd55g1(struct v4l2_subdev *sd)
+{
+ return container_of_const(sd, struct vd55g1, sd);
+}
+
+static inline struct vd55g1 *ctrl_to_vd55g1(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = &container_of_const(ctrl->handler,
+ struct vd55g1,
+ ctrl_handler)->sd;
+
+ return to_vd55g1(sd);
+}
+
+static const struct vd55g1_fmt_desc *vd55g1_get_fmt_desc(struct vd55g1 *sensor,
+ u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vd55g1_mbus_codes); i++) {
+ if (vd55g1_mbus_codes[i].code == code)
+ return &vd55g1_mbus_codes[i];
+ }
+
+ /* Should never happen */
+ dev_warn(sensor->dev, "Unsupported code %d. default to 8 bpp\n", code);
+
+ return &vd55g1_mbus_codes[0];
+}
+
+static s32 vd55g1_get_pixel_rate(struct vd55g1 *sensor,
+ struct v4l2_mbus_framefmt *format)
+{
+ return sensor->mipi_rate /
+ vd55g1_get_fmt_desc(sensor, format->code)->bpp;
+}
+
+static unsigned int vd55g1_get_hblank_min(struct vd55g1 *sensor,
+ struct v4l2_mbus_framefmt *format,
+ struct v4l2_rect *crop)
+{
+ u32 mipi_req_line_time;
+ u32 mipi_req_line_length;
+ u32 min_line_length;
+
+ /* MIPI required time */
+ mipi_req_line_time = (crop->width *
+ vd55g1_get_fmt_desc(sensor, format->code)->bpp +
+ VD55G1_MIPI_MARGIN) /
+ (sensor->mipi_rate / MEGA);
+ mipi_req_line_length = mipi_req_line_time * sensor->pixel_clock /
+ HZ_PER_MHZ;
+
+ /* Absolute time required for ADCs to convert pixels */
+ min_line_length = VD55G1_LINE_LENGTH_MIN;
+ if (sensor->hdr_ctrl->val == VD55G1_HDR_SUB)
+ min_line_length = VD55G1_LINE_LENGTH_SUB_MIN;
+
+ /* Respect both constraint */
+ min_line_length = max(min_line_length, mipi_req_line_length);
+
+ return min_line_length - crop->width;
+}
+
+static void vd55g1_get_vblank_limits(struct vd55g1 *sensor,
+ struct v4l2_rect *crop,
+ struct vd55g1_vblank_limits *limits)
+{
+ limits->min = VD55G1_VBLANK_MIN;
+ limits->def = VD55G1_FRAME_LENGTH_DEF - crop->height;
+ limits->max = VD55G1_VBLANK_MAX - crop->height;
+}
+
+#define vd55g1_read(sensor, reg, val, err) \
+ cci_read((sensor)->regmap, reg, val, err)
+
+#define vd55g1_write(sensor, reg, val, err) \
+ cci_write((sensor)->regmap, reg, val, err)
+
+static int vd55g1_write_array(struct vd55g1 *sensor, u32 reg, unsigned int len,
+ const u8 *array, int *err)
+{
+ unsigned int chunk_sz = 1024;
+ unsigned int sz;
+ int ret = 0;
+
+ if (err && *err)
+ return *err;
+
+ /*
+ * This loop isn't necessary but in certains conditions (platforms, cpu
+ * load, etc.) it has been observed that the bulk write could timeout.
+ */
+ while (len) {
+ sz = min(len, chunk_sz);
+ ret = regmap_bulk_write(sensor->regmap, reg, array, sz);
+ if (ret < 0)
+ goto out;
+ len -= sz;
+ reg += sz;
+ array += sz;
+ }
+
+out:
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+
+static int vd55g1_poll_reg(struct vd55g1 *sensor, u32 reg, u8 poll_val,
+ int *err)
+{
+ unsigned int val = 0;
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ ret = regmap_read_poll_timeout(sensor->regmap, CCI_REG_ADDR(reg), val,
+ (val == poll_val), 2000,
+ 500 * USEC_PER_MSEC);
+
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+
+static int vd55g1_wait_state(struct vd55g1 *sensor, int state, int *err)
+{
+ return vd55g1_poll_reg(sensor, VD55G1_REG_SYSTEM_FSM, state, err);
+}
+
+static int vd55g1_prepare_clock_tree(struct vd55g1 *sensor)
+{
+ u32 sys_clk, mipi_div, pixel_div;
+
+ if (sensor->xclk_freq < VD55G1_XCLK_FREQ_MIN ||
+ sensor->xclk_freq > VD55G1_XCLK_FREQ_MAX) {
+ dev_err(sensor->dev,
+ "Only %luMhz-%luMhz clock range supported. Provided %lu MHz\n",
+ VD55G1_XCLK_FREQ_MIN / HZ_PER_MHZ,
+ VD55G1_XCLK_FREQ_MAX / HZ_PER_MHZ,
+ sensor->xclk_freq / HZ_PER_MHZ);
+ return -EINVAL;
+ }
+
+ /* MIPI bus is double data rate */
+ sensor->mipi_rate = sensor->link_freq * 2;
+
+ if (sensor->mipi_rate < VD55G1_MIPI_RATE_MIN ||
+ sensor->mipi_rate > VD55G1_MIPI_RATE_MAX) {
+ dev_err(sensor->dev,
+ "Only %luMbps-%luMbps data rate range supported. Provided %lu Mbps\n",
+ VD55G1_MIPI_RATE_MIN / MEGA,
+ VD55G1_MIPI_RATE_MAX / MEGA,
+ sensor->mipi_rate / MEGA);
+ return -EINVAL;
+ }
+
+ if (sensor->mipi_rate <= 300 * MEGA)
+ mipi_div = 4;
+ else if (sensor->mipi_rate <= 600 * MEGA)
+ mipi_div = 2;
+ else
+ mipi_div = 1;
+
+ sys_clk = sensor->mipi_rate * mipi_div;
+
+ if (sys_clk <= 780 * HZ_PER_MHZ)
+ pixel_div = 5;
+ else if (sys_clk <= 900 * HZ_PER_MHZ)
+ pixel_div = 6;
+ else
+ pixel_div = 8;
+
+ sensor->pixel_clock = sys_clk / pixel_div;
+
+ return 0;
+}
+
+static int vd55g1_update_patgen(struct vd55g1 *sensor, u32 patgen_index)
+{
+ static const u8 index2val[] = {
+ 0x0, 0x22, 0x28
+ };
+ u32 pattern = index2val[patgen_index];
+ u32 reg = pattern << VD55G1_PATGEN_TYPE_SHIFT;
+ u8 duster = VD55G1_DUSTER_RING_ENABLE | VD55G1_DUSTER_DYN_ENABLE |
+ VD55G1_DUSTER_ENABLE;
+ int ret = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(index2val) != ARRAY_SIZE(vd55g1_tp_menu));
+
+ if (pattern != 0) {
+ reg |= VD55G1_PATGEN_ENABLE;
+ /* Take care of duster to not mess up the test pattern output */
+ duster = VD55G1_DUSTER_DISABLE;
+ }
+
+ vd55g1_write(sensor, VD55G1_REG_DUSTER_CTRL, duster, &ret);
+ vd55g1_write(sensor, VD55G1_REG_PATGEN_CTRL, reg, &ret);
+
+ return ret;
+}
+
+static int vd55g1_update_expo_cluster(struct vd55g1 *sensor, bool is_auto)
+{
+ enum vd55g1_expo_state expo_state = is_auto ? VD55G1_EXP_AUTO :
+ VD55G1_EXP_MANUAL;
+ int ret = 0;
+
+ if (sensor->ae_ctrl->is_new)
+ vd55g1_write(sensor, VD55G1_REG_EXP_MODE(0), expo_state, &ret);
+
+ if (sensor->hdr_ctrl->val == VD55G1_HDR_SUB &&
+ sensor->hdr_ctrl->is_new) {
+ vd55g1_write(sensor, VD55G1_REG_EXP_MODE(1), VD55G1_EXP_BYPASS,
+ &ret);
+ if (ret)
+ return ret;
+ }
+
+ if (!is_auto && sensor->expo_ctrl->is_new)
+ vd55g1_write(sensor, VD55G1_REG_MANUAL_COARSE_EXPOSURE,
+ sensor->expo_ctrl->val, &ret);
+
+ if (!is_auto && sensor->again_ctrl->is_new)
+ vd55g1_write(sensor, VD55G1_REG_MANUAL_ANALOG_GAIN,
+ sensor->again_ctrl->val, &ret);
+
+ if (!is_auto && sensor->dgain_ctrl->is_new)
+ vd55g1_write(sensor, VD55G1_REG_MANUAL_DIGITAL_GAIN,
+ sensor->dgain_ctrl->val, &ret);
+
+ return ret;
+}
+
+static int vd55g1_lock_exposure(struct vd55g1 *sensor, u32 lock_val)
+{
+ bool ae_lock = lock_val & V4L2_LOCK_EXPOSURE;
+ enum vd55g1_expo_state expo_state = ae_lock ? VD55G1_EXP_FREEZE :
+ VD55G1_EXP_AUTO;
+ int ret = 0;
+
+ if (sensor->ae_ctrl->val == V4L2_EXPOSURE_AUTO)
+ vd55g1_write(sensor, VD55G1_REG_EXP_MODE(0), expo_state, &ret);
+
+ return ret;
+}
+
+static int vd55g1_read_expo_cluster(struct vd55g1 *sensor)
+{
+ u64 exposure = 0;
+ u64 again = 0;
+ u64 dgain = 0;
+ int ret = 0;
+
+ vd55g1_read(sensor, VD55G1_REG_APPLIED_COARSE_EXPOSURE, &exposure,
+ &ret);
+ vd55g1_read(sensor, VD55G1_REG_APPLIED_ANALOG_GAIN, &again, &ret);
+ vd55g1_read(sensor, VD55G1_REG_APPLIED_DIGITAL_GAIN, &dgain, &ret);
+ if (ret)
+ return ret;
+
+ sensor->expo_ctrl->cur.val = exposure;
+ sensor->again_ctrl->cur.val = again;
+ sensor->dgain_ctrl->cur.val = dgain;
+
+ return 0;
+}
+
+static int vd55g1_update_frame_length(struct vd55g1 *sensor,
+ unsigned int frame_length)
+{
+ int ret = 0;
+
+ if (sensor->hdr_ctrl->val == VD55G1_HDR_SUB)
+ vd55g1_write(sensor, VD55G1_REG_FRAME_LENGTH(1), frame_length,
+ &ret);
+ vd55g1_write(sensor, VD55G1_REG_FRAME_LENGTH(0), frame_length, &ret);
+
+ return ret;
+}
+
+static int vd55g1_update_exposure_target(struct vd55g1 *sensor, int index)
+{
+ /*
+ * Find auto exposure target with: default target exposure * 2^EV
+ * Defaut target exposure being 27 for the sensor.
+ */
+ static const unsigned int index2exposure_target[] = {
+ 3, 5, 7, 10, 14, 19, 27, 38, 54, 76, 108, 153, 216,
+ };
+ int exposure_target = index2exposure_target[index];
+
+ return vd55g1_write(sensor, VD55G1_REG_AE_TARGET_PERCENTAGE,
+ exposure_target, NULL);
+}
+
+static int vd55g1_apply_cold_start(struct vd55g1 *sensor,
+ struct v4l2_rect *crop)
+{
+ /*
+ * Cold start register is a single register expressed as exposure time
+ * in us. This differ from status registers being a combination of
+ * exposure, digital gain, and analog gain, requiring the following
+ * format conversion.
+ */
+ unsigned int line_length = crop->width + sensor->hblank_ctrl->val;
+ unsigned int line_time_us = DIV_ROUND_UP(line_length * MEGA,
+ sensor->pixel_clock);
+ u8 d_gain = DIV_ROUND_CLOSEST(sensor->dgain_ctrl->val, 1 << 8);
+ u8 a_gain = DIV_ROUND_CLOSEST(32, (32 - sensor->again_ctrl->val));
+ unsigned int expo_us = sensor->expo_ctrl->val * d_gain * a_gain *
+ line_time_us;
+ int ret = 0;
+
+ vd55g1_write(sensor, VD55G1_REG_AE_FORCE_COLDSTART, 1, &ret);
+ vd55g1_write(sensor, VD55G1_REG_AE_COLDSTART_EXP_TIME, expo_us, &ret);
+
+ return ret;
+}
+
+static void vd55g1_update_img_pad_format(struct vd55g1 *sensor,
+ const struct vd55g1_mode *mode,
+ u32 code,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->code = code;
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
+ fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static int vd55g1_update_hdr_mode(struct vd55g1 *sensor)
+{
+ int ret = 0;
+
+ switch (sensor->hdr_ctrl->val) {
+ case VD55G1_NO_HDR:
+ vd55g1_write(sensor, VD55G1_REG_EXPOSURE_MAX_COARSE,
+ VD55G1_EXPOSURE_MAX_COARSE_DEF, &ret);
+ vd55g1_write(sensor, VD55G1_REG_EXPOSURE_USE_CASES, 0, &ret);
+ vd55g1_write(sensor, VD55G1_REG_NEXT_CTX, 0x0, &ret);
+
+ vd55g1_write(sensor, VD55G1_REG_CTX_REPEAT_COUNT_CTX0, 0, &ret);
+
+ vd55g1_write(sensor, VD55G1_REG_VT_MODE(0),
+ VD55G1_VT_MODE_NORMAL, &ret);
+ vd55g1_write(sensor, VD55G1_REG_MASK_FRAME_CTRL(0),
+ VD55G1_MASK_FRAME_CTRL_OUTPUT, &ret);
+ break;
+ case VD55G1_HDR_SUB:
+ vd55g1_write(sensor, VD55G1_REG_EXPOSURE_MAX_COARSE,
+ VD55G1_EXPOSURE_MAX_COARSE_SUB, &ret);
+ vd55g1_write(sensor, VD55G1_REG_EXPOSURE_USE_CASES,
+ VD55G1_EXPOSURE_USE_CASES_MULTI_CONTEXT, &ret);
+ vd55g1_write(sensor, VD55G1_REG_NEXT_CTX, 0x0001, &ret);
+
+ vd55g1_write(sensor, VD55G1_REG_CTX_REPEAT_COUNT_CTX0, 1, &ret);
+ vd55g1_write(sensor, VD55G1_REG_CTX_REPEAT_COUNT_CTX1, 1, &ret);
+
+ vd55g1_write(sensor, VD55G1_REG_VT_MODE(0),
+ VD55G1_VT_MODE_NORMAL, &ret);
+ vd55g1_write(sensor, VD55G1_REG_MASK_FRAME_CTRL(0),
+ VD55G1_MASK_FRAME_CTRL_MASK, &ret);
+ vd55g1_write(sensor, VD55G1_REG_EXPOSURE_INSTANCE(0), 0, &ret);
+ vd55g1_write(sensor, VD55G1_REG_VT_MODE(1),
+ VD55G1_VT_MODE_SUBTRACTION, &ret);
+ vd55g1_write(sensor, VD55G1_REG_MASK_FRAME_CTRL(1),
+ VD55G1_MASK_FRAME_CTRL_OUTPUT, &ret);
+ vd55g1_write(sensor, VD55G1_REG_EXPOSURE_INSTANCE(1), 1, &ret);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vd55g1_set_framefmt(struct vd55g1 *sensor,
+ struct v4l2_mbus_framefmt *format,
+ struct v4l2_rect *crop)
+{
+ u8 binning;
+ int ret = 0;
+
+ vd55g1_write(sensor, VD55G1_REG_FORMAT_CTRL,
+ vd55g1_get_fmt_desc(sensor, format->code)->bpp, &ret);
+ vd55g1_write(sensor, VD55G1_REG_OIF_IMG_CTRL,
+ vd55g1_get_fmt_desc(sensor, format->code)->data_type,
+ &ret);
+
+ switch (crop->width / format->width) {
+ case 1:
+ default:
+ binning = VD55G1_READOUT_CTRL_BIN_MODE_NORMAL;
+ break;
+ case 2:
+ binning = VD55G1_READOUT_CTRL_BIN_MODE_DIGITAL_X2;
+ break;
+ }
+ vd55g1_write(sensor, VD55G1_REG_READOUT_CTRL, binning, &ret);
+
+ vd55g1_write(sensor, VD55G1_REG_X_START(0), crop->left, &ret);
+ vd55g1_write(sensor, VD55G1_REG_X_WIDTH(0), crop->width, &ret);
+ vd55g1_write(sensor, VD55G1_REG_Y_START(0), crop->top, &ret);
+ vd55g1_write(sensor, VD55G1_REG_Y_HEIGHT(0), crop->height, &ret);
+
+ vd55g1_write(sensor, VD55G1_REG_X_START(1), crop->left, &ret);
+ vd55g1_write(sensor, VD55G1_REG_X_WIDTH(1), crop->width, &ret);
+ vd55g1_write(sensor, VD55G1_REG_Y_START(1), crop->top, &ret);
+ vd55g1_write(sensor, VD55G1_REG_Y_HEIGHT(1), crop->height, &ret);
+
+ return ret;
+}
+
+static int vd55g1_update_gpios(struct vd55g1 *sensor, unsigned long gpio_mask)
+{
+ unsigned long io;
+ u8 gpio_val;
+ int ret = 0;
+
+ for_each_set_bit(io, &gpio_mask, VD55G1_NB_GPIOS) {
+ gpio_val = sensor->gpios[io];
+
+ if (gpio_val == VD55G1_GPIO_MODE_STROBE &&
+ sensor->led_ctrl->val == V4L2_FLASH_LED_MODE_NONE) {
+ gpio_val = VD55G1_GPIO_MODE_IN;
+ if (sensor->hdr_ctrl->val == VD55G1_HDR_SUB) {
+ /* Make its context 1 counterpart strobe too */
+ vd55g1_write(sensor,
+ VD55G1_REG_GPIO_0_CTRL(1) + io,
+ gpio_val, &ret);
+ }
+ }
+
+ ret = vd55g1_write(sensor, VD55G1_REG_GPIO_0_CTRL(0) + io,
+ gpio_val, &ret);
+ }
+
+ return ret;
+}
+
+static int vd55g1_ro_ctrls_setup(struct vd55g1 *sensor, struct v4l2_rect *crop)
+{
+ return vd55g1_write(sensor, VD55G1_REG_LINE_LENGTH,
+ crop->width + sensor->hblank_ctrl->val, NULL);
+}
+
+static void vd55g1_grab_ctrls(struct vd55g1 *sensor, bool enable)
+{
+ /* These settings cannot change during stream */
+ v4l2_ctrl_grab(sensor->hflip_ctrl, enable);
+ v4l2_ctrl_grab(sensor->vflip_ctrl, enable);
+ v4l2_ctrl_grab(sensor->patgen_ctrl, enable);
+ v4l2_ctrl_grab(sensor->hdr_ctrl, enable);
+}
+
+static int vd55g1_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct vd55g1 *sensor = to_vd55g1(sd);
+ struct v4l2_rect *crop =
+ v4l2_subdev_state_get_crop(state, 0);
+ struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_state_get_format(state, 0);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(sensor->dev);
+ if (ret < 0)
+ return ret;
+
+ vd55g1_write(sensor, VD55G1_REG_EXT_CLOCK, sensor->xclk_freq, &ret);
+
+ /* Configure output */
+ vd55g1_write(sensor, VD55G1_REG_MIPI_DATA_RATE,
+ sensor->mipi_rate, &ret);
+ vd55g1_write(sensor, VD55G1_REG_OIF_CTRL, sensor->oif_ctrl, &ret);
+ vd55g1_write(sensor, VD55G1_REG_ISL_ENABLE, 0, &ret);
+ if (ret)
+ goto err_rpm_put;
+
+ ret = vd55g1_set_framefmt(sensor, format, crop);
+ if (ret)
+ goto err_rpm_put;
+
+ /* Setup default GPIO values; could be overridden by V4L2 ctrl setup */
+ ret = vd55g1_update_gpios(sensor, GENMASK(VD55G1_NB_GPIOS - 1, 0));
+ if (ret)
+ goto err_rpm_put;
+
+ ret = vd55g1_apply_cold_start(sensor, crop);
+ if (ret)
+ goto err_rpm_put;
+
+ /* Apply settings from V4L2 ctrls */
+ ret = __v4l2_ctrl_handler_setup(&sensor->ctrl_handler);
+ if (ret)
+ goto err_rpm_put;
+
+ /* Also apply settings from read-only V4L2 ctrls */
+ ret = vd55g1_ro_ctrls_setup(sensor, crop);
+ if (ret)
+ goto err_rpm_put;
+
+ /* Start streaming */
+ vd55g1_write(sensor, VD55G1_REG_STBY, VD55G1_STBY_START_STREAM, &ret);
+ vd55g1_poll_reg(sensor, VD55G1_REG_STBY, 0, &ret);
+ vd55g1_wait_state(sensor, VD55G1_SYSTEM_FSM_STREAMING, &ret);
+ if (ret)
+ goto err_rpm_put;
+
+ vd55g1_grab_ctrls(sensor, true);
+
+ return 0;
+
+err_rpm_put:
+ pm_runtime_put(sensor->dev);
+ return 0;
+}
+
+static int vd55g1_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct vd55g1 *sensor = to_vd55g1(sd);
+ int ret = 0;
+
+ /* Retrieve Expo cluster to enable coldstart of AE */
+ ret = vd55g1_read_expo_cluster(sensor);
+
+ vd55g1_write(sensor, VD55G1_REG_STREAMING, VD55G1_STREAMING_STOP_STREAM,
+ &ret);
+ vd55g1_poll_reg(sensor, VD55G1_REG_STREAMING, 0, &ret);
+ vd55g1_wait_state(sensor, VD55G1_SYSTEM_FSM_SW_STBY, &ret);
+
+ if (ret)
+ dev_warn(sensor->dev, "Can't disable stream\n");
+
+ vd55g1_grab_ctrls(sensor, false);
+
+ pm_runtime_mark_last_busy(sensor->dev);
+ pm_runtime_put_autosuspend(sensor->dev);
+
+ return ret;
+}
+
+static int vd55g1_patch(struct vd55g1 *sensor)
+{
+ u64 patch;
+ int ret = 0;
+
+ vd55g1_write_array(sensor, VD55G1_REG_FWPATCH_START_ADDR,
+ sizeof(patch_array), patch_array, &ret);
+ vd55g1_write(sensor, VD55G1_REG_BOOT, VD55G1_BOOT_PATCH_SETUP, &ret);
+ vd55g1_poll_reg(sensor, VD55G1_REG_BOOT, 0, &ret);
+ if (ret) {
+ dev_err(sensor->dev, "Failed to apply patch\n");
+ return ret;
+ }
+
+ vd55g1_read(sensor, VD55G1_REG_FWPATCH_REVISION, &patch, &ret);
+ if (patch != (VD55G1_FWPATCH_REVISION_MAJOR << 8) +
+ VD55G1_FWPATCH_REVISION_MINOR) {
+ dev_err(sensor->dev, "Bad patch version expected %d.%d got %d.%d\n",
+ VD55G1_FWPATCH_REVISION_MAJOR,
+ VD55G1_FWPATCH_REVISION_MINOR,
+ (u8)(patch >> 8), (u8)(patch & 0xff));
+ return -ENODEV;
+ }
+ dev_dbg(sensor->dev, "patch %d.%d applied\n",
+ (u8)(patch >> 8), (u8)(patch & 0xff));
+
+ return 0;
+}
+
+static int vd55g1_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ const struct v4l2_rect *crop = v4l2_subdev_state_get_crop(sd_state, 0);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *crop;
+ return 0;
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = VD55G1_WIDTH;
+ sel->r.height = VD55G1_HEIGHT;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int vd55g1_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(vd55g1_mbus_codes))
+ return -EINVAL;
+
+ code->code = vd55g1_mbus_codes[code->index].code;
+
+ return 0;
+}
+
+static int vd55g1_new_format_change_controls(struct vd55g1 *sensor,
+ struct v4l2_mbus_framefmt *format,
+ struct v4l2_rect *crop)
+{
+ struct vd55g1_vblank_limits vblank;
+ unsigned int hblank;
+ unsigned int frame_length = 0;
+ unsigned int expo_max;
+ int ret;
+
+ /* Reset vblank and frame length to default */
+ vd55g1_get_vblank_limits(sensor, crop, &vblank);
+ ret = __v4l2_ctrl_modify_range(sensor->vblank_ctrl, vblank.min,
+ vblank.max, 1, vblank.def);
+ if (ret)
+ return ret;
+
+ /* Max exposure changes with vblank */
+ frame_length = crop->height + sensor->vblank_ctrl->val;
+ expo_max = frame_length - VD55G1_EXPO_MAX_TERM;
+ ret = __v4l2_ctrl_modify_range(sensor->expo_ctrl, 0, expo_max, 1,
+ VD55G1_EXPO_DEF);
+ if (ret)
+ return ret;
+
+ /* Update pixel rate to reflect new bpp */
+ ret = __v4l2_ctrl_s_ctrl_int64(sensor->pixel_rate_ctrl,
+ vd55g1_get_pixel_rate(sensor, format));
+ if (ret)
+ return ret;
+
+ /* Update hblank according to new width */
+ hblank = vd55g1_get_hblank_min(sensor, format, crop);
+ ret = __v4l2_ctrl_modify_range(sensor->hblank_ctrl, hblank, hblank, 1,
+ hblank);
+
+ return ret;
+}
+
+static int vd55g1_set_pad_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sd_fmt)
+{
+ struct vd55g1 *sensor = to_vd55g1(sd);
+ const struct vd55g1_mode *new_mode;
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect pad_crop;
+ unsigned int binning;
+
+ new_mode = v4l2_find_nearest_size(vd55g1_supported_modes,
+ ARRAY_SIZE(vd55g1_supported_modes),
+ width, height, sd_fmt->format.width,
+ sd_fmt->format.height);
+
+ vd55g1_update_img_pad_format(sensor, new_mode, sd_fmt->format.code,
+ &sd_fmt->format);
+
+ /*
+ * Use binning to maximize the crop rectangle size, and centre it in the
+ * sensor.
+ */
+ binning = min(VD55G1_WIDTH / sd_fmt->format.width,
+ VD55G1_HEIGHT / sd_fmt->format.height);
+ binning = min(binning, 2U);
+ pad_crop.width = sd_fmt->format.width * binning;
+ pad_crop.height = sd_fmt->format.height * binning;
+ pad_crop.left = (VD55G1_WIDTH - pad_crop.width) / 2;
+ pad_crop.top = (VD55G1_HEIGHT - pad_crop.height) / 2;
+
+ format = v4l2_subdev_state_get_format(sd_state, sd_fmt->pad);
+
+ *format = sd_fmt->format;
+
+ *v4l2_subdev_state_get_crop(sd_state, sd_fmt->pad) = pad_crop;
+ if (sd_fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ return vd55g1_new_format_change_controls(sensor,
+ &sd_fmt->format,
+ &pad_crop);
+
+ return 0;
+}
+
+static int vd55g1_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ unsigned int def_mode = VD55G1_DEFAULT_MODE;
+ struct vd55g1 *sensor = to_vd55g1(sd);
+ struct v4l2_subdev_format fmt = { 0 };
+ struct v4l2_subdev_route routes[] = {
+ { .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE }
+ };
+ struct v4l2_subdev_krouting routing = {
+ .num_routes = ARRAY_SIZE(routes),
+ .routes = routes,
+ };
+ int ret;
+
+ /* Needed by v4l2_subdev_s_stream_helper(), even with 1 stream only */
+ ret = v4l2_subdev_set_routing(sd, sd_state, &routing);
+ if (ret)
+ return ret;
+
+ vd55g1_update_img_pad_format(sensor, &vd55g1_supported_modes[def_mode],
+ VD55G1_MEDIA_BUS_FMT_DEF, &fmt.format);
+
+ return vd55g1_set_pad_fmt(sd, sd_state, &fmt);
+}
+
+static int vd55g1_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(vd55g1_supported_modes))
+ return -EINVAL;
+
+ fse->min_width = vd55g1_supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = vd55g1_supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops vd55g1_internal_ops = {
+ .init_state = vd55g1_init_state,
+};
+
+static const struct v4l2_subdev_pad_ops vd55g1_pad_ops = {
+ .enum_mbus_code = vd55g1_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = vd55g1_set_pad_fmt,
+ .get_selection = vd55g1_get_selection,
+ .enum_frame_size = vd55g1_enum_frame_size,
+ .enable_streams = vd55g1_enable_streams,
+ .disable_streams = vd55g1_disable_streams,
+};
+
+static const struct v4l2_subdev_video_ops vd55g1_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_ops vd55g1_subdev_ops = {
+ .video = &vd55g1_video_ops,
+ .pad = &vd55g1_pad_ops,
+};
+
+static int vd55g1_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vd55g1 *sensor = ctrl_to_vd55g1(ctrl);
+ int ret = 0;
+
+ /* Interact with HW only when it is powered ON */
+ if (!pm_runtime_get_if_in_use(sensor->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = vd55g1_read_expo_cluster(sensor);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_mark_last_busy(sensor->dev);
+ pm_runtime_put_autosuspend(sensor->dev);
+
+ return ret;
+}
+
+static int vd55g1_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vd55g1 *sensor = ctrl_to_vd55g1(ctrl);
+ unsigned int frame_length = 0;
+ unsigned int expo_max;
+ struct v4l2_subdev_state *state =
+ v4l2_subdev_get_locked_active_state(&sensor->sd);
+ struct v4l2_rect *crop =
+ v4l2_subdev_state_get_crop(state, 0);
+ struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_state_get_format(state, 0);
+ unsigned int hblank = vd55g1_get_hblank_min(sensor, format, crop);
+ bool is_auto = false;
+ int ret = 0;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+ return 0;
+
+ /* Update controls state, range, etc. whatever the state of the HW */
+ switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ frame_length = crop->height + ctrl->val;
+ expo_max = frame_length - VD55G1_EXPO_MAX_TERM;
+ ret = __v4l2_ctrl_modify_range(sensor->expo_ctrl, 0, expo_max,
+ 1, VD55G1_EXPO_DEF);
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ is_auto = (ctrl->val == V4L2_EXPOSURE_AUTO);
+ __v4l2_ctrl_grab(sensor->ae_lock_ctrl, !is_auto);
+ __v4l2_ctrl_grab(sensor->ae_bias_ctrl, !is_auto);
+ break;
+ case V4L2_CID_HDR_SENSOR_MODE:
+ /* Discriminate if the userspace changed the control value */
+ if (ctrl->val != ctrl->cur.val) {
+ /* Max horizontal blanking changes with hdr mode */
+ ret = __v4l2_ctrl_modify_range(sensor->hblank_ctrl,
+ hblank, hblank, 1,
+ hblank);
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Don't modify hardware if controls modification failed */
+ if (ret)
+ return ret;
+
+ /* Interact with HW only when it is powered ON */
+ if (!pm_runtime_get_if_in_use(sensor->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ret = vd55g1_write(sensor, VD55G1_REG_ORIENTATION,
+ sensor->hflip_ctrl->val |
+ (sensor->vflip_ctrl->val << 1),
+ NULL);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = vd55g1_update_patgen(sensor, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = vd55g1_update_expo_cluster(sensor, is_auto);
+ break;
+ case V4L2_CID_3A_LOCK:
+ ret = vd55g1_lock_exposure(sensor, ctrl->val);
+ break;
+ case V4L2_CID_AUTO_EXPOSURE_BIAS:
+ /*
+ * We use auto exposure target percentage register to control
+ * exposure bias for more precision.
+ */
+ ret = vd55g1_update_exposure_target(sensor, ctrl->val);
+ break;
+ case V4L2_CID_VBLANK:
+ ret = vd55g1_update_frame_length(sensor, frame_length);
+ break;
+ case V4L2_CID_FLASH_LED_MODE:
+ ret = vd55g1_update_gpios(sensor, sensor->ext_leds_mask);
+ break;
+ case V4L2_CID_HDR_SENSOR_MODE:
+ ret = vd55g1_update_hdr_mode(sensor);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_mark_last_busy(sensor->dev);
+ pm_runtime_put_autosuspend(sensor->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops vd55g1_ctrl_ops = {
+ .g_volatile_ctrl = vd55g1_g_volatile_ctrl,
+ .s_ctrl = vd55g1_s_ctrl,
+};
+
+static int vd55g1_init_ctrls(struct vd55g1 *sensor)
+{
+ const struct v4l2_ctrl_ops *ops = &vd55g1_ctrl_ops;
+ struct v4l2_ctrl_handler *hdl = &sensor->ctrl_handler;
+ struct v4l2_ctrl *ctrl;
+ struct v4l2_fwnode_device_properties fwnode_props;
+ struct vd55g1_vblank_limits vblank;
+ unsigned int hblank;
+ struct v4l2_subdev_state *state =
+ v4l2_subdev_lock_and_get_active_state(&sensor->sd);
+ struct v4l2_rect *crop =
+ v4l2_subdev_state_get_crop(state, 0);
+ struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_state_get_format(state, 0);
+ s32 pixel_rate = vd55g1_get_pixel_rate(sensor, format);
+ int ret;
+
+ v4l2_ctrl_handler_init(hdl, 16);
+
+ /* Flip cluster */
+ sensor->hflip_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ sensor->vflip_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_cluster(2, &sensor->hflip_ctrl);
+
+ /* Exposition cluster */
+ sensor->ae_ctrl = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_AUTO, 1,
+ ~0x3, V4L2_EXPOSURE_AUTO);
+ sensor->again_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN,
+ 0, 0x1c, 1, VD55G1_AGAIN_DEF);
+ sensor->dgain_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_DIGITAL_GAIN,
+ 256, 0xffff, 1,
+ VD55G1_DGAIN_DEF);
+ sensor->expo_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE, 0,
+ VD55G1_FRAME_LENGTH_DEF -
+ VD55G1_EXPO_MAX_TERM,
+ 1, VD55G1_EXPO_DEF);
+ v4l2_ctrl_auto_cluster(4, &sensor->ae_ctrl, V4L2_EXPOSURE_MANUAL, true);
+
+ sensor->patgen_ctrl =
+ v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(vd55g1_tp_menu) - 1, 0,
+ 0, vd55g1_tp_menu);
+ ctrl = v4l2_ctrl_new_int_menu(hdl, ops, V4L2_CID_LINK_FREQ,
+ 0, 0, &sensor->link_freq);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ sensor->pixel_rate_ctrl = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_PIXEL_RATE, 1,
+ INT_MAX, 1,
+ pixel_rate);
+ if (sensor->pixel_rate_ctrl)
+ sensor->pixel_rate_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ sensor->ae_lock_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_3A_LOCK,
+ 0, 1, 0, 0);
+ sensor->ae_bias_ctrl =
+ v4l2_ctrl_new_int_menu(hdl, ops,
+ V4L2_CID_AUTO_EXPOSURE_BIAS,
+ ARRAY_SIZE(vd55g1_ev_bias_menu) - 1,
+ ARRAY_SIZE(vd55g1_ev_bias_menu) / 2,
+ vd55g1_ev_bias_menu);
+ sensor->hdr_ctrl =
+ v4l2_ctrl_new_std_menu_items(hdl, ops,
+ V4L2_CID_HDR_SENSOR_MODE,
+ ARRAY_SIZE(vd55g1_hdr_menu) - 1, 0,
+ VD55G1_NO_HDR, vd55g1_hdr_menu);
+ hblank = vd55g1_get_hblank_min(sensor, format, crop);
+ sensor->hblank_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HBLANK,
+ hblank, hblank, 1, hblank);
+ if (sensor->hblank_ctrl)
+ sensor->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ vd55g1_get_vblank_limits(sensor, crop, &vblank);
+ sensor->vblank_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VBLANK,
+ vblank.min, vblank.max,
+ 1, vblank.def);
+
+ /* Additional controls based on device tree properties */
+ if (sensor->ext_leds_mask) {
+ sensor->led_ctrl =
+ v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_FLASH_LED_MODE,
+ V4L2_FLASH_LED_MODE_FLASH, 0,
+ V4L2_FLASH_LED_MODE_NONE);
+ }
+
+ ret = v4l2_fwnode_device_parse(sensor->dev, &fwnode_props);
+ if (ret)
+ goto free_ctrls;
+
+ ret = v4l2_ctrl_new_fwnode_properties(hdl, ops, &fwnode_props);
+ if (ret)
+ goto free_ctrls;
+
+ sensor->sd.ctrl_handler = hdl;
+ goto unlock_state;
+
+free_ctrls:
+ v4l2_ctrl_handler_free(hdl);
+unlock_state:
+ v4l2_subdev_unlock_state(state);
+ return ret;
+}
+
+static int vd55g1_detect(struct vd55g1 *sensor)
+{
+ u64 device_rev;
+ u64 id;
+ int ret;
+
+ ret = vd55g1_read(sensor, VD55G1_REG_MODEL_ID, &id, NULL);
+ if (ret)
+ return ret;
+
+ if (id != VD55G1_MODEL_ID) {
+ dev_warn(sensor->dev, "Unsupported sensor id %x\n", (u32)id);
+ return -ENODEV;
+ }
+
+ ret = vd55g1_read(sensor, VD55G1_REG_REVISION, &device_rev, NULL);
+ if (ret)
+ return ret;
+
+ if (device_rev != VD55G1_REVISION_CCB) {
+ dev_err(sensor->dev, "Unsupported sensor revision (0x%x)\n",
+ (u16)device_rev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int vd55g1_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct vd55g1 *sensor = to_vd55g1(sd);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(vd55g1_supply_name),
+ sensor->supplies);
+ if (ret) {
+ dev_err(dev, "Failed to enable regulators %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(sensor->xclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock %d\n", ret);
+ goto disable_bulk;
+ }
+
+ gpiod_set_value_cansleep(sensor->reset_gpio, 0);
+ usleep_range(5000, 10000);
+ ret = vd55g1_wait_state(sensor, VD55G1_SYSTEM_FSM_READY_TO_BOOT, NULL);
+ if (ret) {
+ dev_err(dev, "Sensor reset failed %d\n", ret);
+ goto disable_clock;
+ }
+
+ ret = vd55g1_detect(sensor);
+ if (ret) {
+ dev_err(dev, "Sensor detect failed %d\n", ret);
+ goto disable_clock;
+ }
+
+ ret = vd55g1_patch(sensor);
+ if (ret) {
+ dev_err(dev, "Sensor patch failed %d\n", ret);
+ goto disable_clock;
+ }
+
+ ret = vd55g1_wait_state(sensor, VD55G1_SYSTEM_FSM_SW_STBY, NULL);
+ if (ret) {
+ dev_err(dev, "Sensor waiting after patch failed %d\n",
+ ret);
+ goto disable_clock;
+ }
+
+ return 0;
+
+disable_clock:
+ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
+ clk_disable_unprepare(sensor->xclk);
+disable_bulk:
+ regulator_bulk_disable(ARRAY_SIZE(vd55g1_supply_name),
+ sensor->supplies);
+
+ return ret;
+}
+
+static int vd55g1_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct vd55g1 *sensor = to_vd55g1(sd);
+
+ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
+ clk_disable_unprepare(sensor->xclk);
+ regulator_bulk_disable(ARRAY_SIZE(sensor->supplies), sensor->supplies);
+
+ return 0;
+}
+
+static int vd55g1_check_csi_conf(struct vd55g1 *sensor,
+ struct fwnode_handle *endpoint)
+{
+ struct v4l2_fwnode_endpoint ep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
+ u8 n_lanes;
+ int ret;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &ep);
+ if (ret)
+ return -EINVAL;
+
+ /* Check lanes number */
+ n_lanes = ep.bus.mipi_csi2.num_data_lanes;
+ if (n_lanes != 1) {
+ dev_err(sensor->dev, "Sensor only supports 1 lane, found %d\n",
+ n_lanes);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Clock lane must be first */
+ if (ep.bus.mipi_csi2.clock_lane != 0) {
+ dev_err(sensor->dev, "Clock lane must be mapped to lane 0\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Handle polarities in sensor configuration */
+ sensor->oif_ctrl = (ep.bus.mipi_csi2.lane_polarities[0] << 3) |
+ (ep.bus.mipi_csi2.lane_polarities[1] << 6);
+
+ /* Check the link frequency set in device tree */
+ if (!ep.nr_of_link_frequencies) {
+ dev_err(sensor->dev, "link-frequency property not found in DT\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ if (ep.nr_of_link_frequencies != 1) {
+ dev_err(sensor->dev, "Multiple link frequencies not supported\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ sensor->link_freq = ep.link_frequencies[0];
+
+done:
+ v4l2_fwnode_endpoint_free(&ep);
+
+ return ret;
+}
+
+static int vd55g1_parse_dt_gpios_array(struct vd55g1 *sensor,
+ char *prop_name, u32 *array, int *nb)
+{
+ unsigned int i;
+ int ret;
+
+ *nb = device_property_count_u32(sensor->dev, prop_name);
+ if (*nb == -EINVAL) {
+ /* Property not found */
+ *nb = 0;
+ return 0;
+ }
+
+ ret = device_property_read_u32_array(sensor->dev,
+ prop_name, array, *nb);
+ if (ret) {
+ dev_err(sensor->dev, "Failed to read %s prop\n", prop_name);
+ return ret;
+ }
+ for (i = 0; i < *nb; i++) {
+ if (array[i] >= VD55G1_NB_GPIOS) {
+ dev_err(sensor->dev, "Invalid GPIO number %d\n",
+ array[i]);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int vd55g1_parse_dt_gpios(struct vd55g1 *sensor)
+{
+ u32 led_gpios[VD55G1_NB_GPIOS];
+ int nb_gpios_leds;
+ unsigned int i;
+ int ret;
+
+ /* Initialize GPIOs to default */
+ for (i = 0; i < VD55G1_NB_GPIOS; i++)
+ sensor->gpios[i] = VD55G1_GPIO_MODE_IN;
+ sensor->ext_leds_mask = 0;
+
+ /* Take into account optional 'st,leds' output for GPIOs */
+ ret = vd55g1_parse_dt_gpios_array(sensor, "st,leds", led_gpios,
+ &nb_gpios_leds);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nb_gpios_leds; i++) {
+ sensor->gpios[led_gpios[i]] = VD55G1_GPIO_MODE_STROBE;
+ set_bit(led_gpios[i], &sensor->ext_leds_mask);
+ }
+
+ return 0;
+}
+
+static int vd55g1_parse_dt(struct vd55g1 *sensor)
+{
+ struct fwnode_handle *endpoint;
+ int ret;
+
+ endpoint = fwnode_graph_get_endpoint_by_id(dev_fwnode(sensor->dev),
+ 0, 0, 0);
+ if (!endpoint) {
+ dev_err(sensor->dev, "Endpoint node not found\n");
+ return -EINVAL;
+ }
+
+ ret = vd55g1_check_csi_conf(sensor, endpoint);
+ fwnode_handle_put(endpoint);
+ if (ret)
+ return ret;
+
+ return vd55g1_parse_dt_gpios(sensor);
+}
+
+static int vd55g1_subdev_init(struct vd55g1 *sensor)
+{
+ int ret;
+
+ /* Init sub device */
+ sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sensor->sd.internal_ops = &vd55g1_internal_ops;
+
+ /* Init source pad */
+ sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
+ if (ret) {
+ dev_err(sensor->dev, "Failed to init media entity: %d\n", ret);
+ return ret;
+ }
+
+ sensor->sd.state_lock = sensor->ctrl_handler.lock;
+ ret = v4l2_subdev_init_finalize(&sensor->sd);
+ if (ret) {
+ dev_err(sensor->dev, "Subdev init error: %d\n", ret);
+ goto err_ctrls;
+ }
+
+ /*
+ * Initialize controls after v4l2_subdev_init_finalize() to make sure
+ * active state is set
+ */
+ ret = vd55g1_init_ctrls(sensor);
+ if (ret) {
+ dev_err(sensor->dev, "Controls initialization failed %d\n",
+ ret);
+ goto err_media;
+ }
+
+ return 0;
+
+err_ctrls:
+ v4l2_ctrl_handler_free(sensor->sd.ctrl_handler);
+
+err_media:
+ media_entity_cleanup(&sensor->sd.entity);
+ return ret;
+}
+
+static void vd55g1_subdev_cleanup(struct vd55g1 *sensor)
+{
+ v4l2_async_unregister_subdev(&sensor->sd);
+ v4l2_subdev_cleanup(&sensor->sd);
+ media_entity_cleanup(&sensor->sd.entity);
+ v4l2_ctrl_handler_free(sensor->sd.ctrl_handler);
+}
+
+static int vd55g1_get_regulators(struct vd55g1 *sensor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vd55g1_supply_name); i++)
+ sensor->supplies[i].supply = vd55g1_supply_name[i];
+
+ return devm_regulator_bulk_get(sensor->dev,
+ ARRAY_SIZE(vd55g1_supply_name),
+ sensor->supplies);
+}
+
+static int vd55g1_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct vd55g1 *sensor;
+ int ret;
+
+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+ sensor->dev = &client->dev;
+
+ v4l2_i2c_subdev_init(&sensor->sd, client, &vd55g1_subdev_ops);
+
+ ret = vd55g1_parse_dt(sensor);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to parse Device Tree\n");
+
+ /* Get (and check) resources : power regs, ext clock, reset gpio */
+ ret = vd55g1_get_regulators(sensor);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ sensor->xclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(sensor->xclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->xclk),
+ "Failed to get xclk\n");
+
+ sensor->xclk_freq = clk_get_rate(sensor->xclk);
+ ret = vd55g1_prepare_clock_tree(sensor);
+ if (ret)
+ return ret;
+
+ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(sensor->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(sensor->reset_gpio),
+ "Failed to get reset gpio\n");
+
+ sensor->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(sensor->regmap))
+ return dev_err_probe(dev, PTR_ERR(sensor->regmap),
+ "Failed to init regmap\n");
+
+ /* Detect if sensor is present and if its revision is supported */
+ ret = vd55g1_power_on(dev);
+ if (ret)
+ return ret;
+
+ /* Enable pm_runtime and power off the sensor */
+ pm_runtime_set_active(dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 4000);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ ret = vd55g1_subdev_init(sensor);
+ if (ret) {
+ dev_err(dev, "V4l2 init failed: %d\n", ret);
+ goto err_power_off;
+ }
+
+ ret = v4l2_async_register_subdev(&sensor->sd);
+ if (ret) {
+ dev_err(dev, "async subdev register failed %d\n", ret);
+ goto err_subdev;
+ }
+
+ return 0;
+
+err_subdev:
+ vd55g1_subdev_cleanup(sensor);
+err_power_off:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ vd55g1_power_off(dev);
+
+ return ret;
+}
+
+static void vd55g1_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct vd55g1 *sensor = to_vd55g1(sd);
+
+ vd55g1_subdev_cleanup(sensor);
+
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ vd55g1_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_dont_use_autosuspend(&client->dev);
+}
+
+static const struct of_device_id vd55g1_dt_ids[] = {
+ { .compatible = "st,vd55g1" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vd55g1_dt_ids);
+
+static const struct dev_pm_ops vd55g1_pm_ops = {
+ SET_RUNTIME_PM_OPS(vd55g1_power_off, vd55g1_power_on, NULL)
+};
+
+static struct i2c_driver vd55g1_i2c_driver = {
+ .driver = {
+ .name = "vd55g1",
+ .of_match_table = vd55g1_dt_ids,
+ .pm = &vd55g1_pm_ops,
+ },
+ .probe = vd55g1_probe,
+ .remove = vd55g1_remove,
+};
+
+module_i2c_driver(vd55g1_i2c_driver);
+
+MODULE_AUTHOR("Benjamin Mugnier <benjamin.mugnier@foss.st.com>");
+MODULE_AUTHOR("Sylvain Petinot <sylvain.petinot@foss.st.com>");
+MODULE_DESCRIPTION("VD55G1 camera subdev driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/vd56g3.c b/drivers/media/i2c/vd56g3.c
new file mode 100644
index 000000000000..5d951ad0b478
--- /dev/null
+++ b/drivers/media/i2c/vd56g3.c
@@ -0,0 +1,1586 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A V4L2 driver for ST VD56G3 (Mono) and VD66GY (RGB) global shutter cameras.
+ * Copyright (C) 2024, STMicroelectronics SA
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+#include <linux/units.h>
+
+#include <media/mipi-csi2.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+/* Register Map */
+#define VD56G3_REG_MODEL_ID CCI_REG16_LE(0x0000)
+#define VD56G3_MODEL_ID 0x5603
+#define VD56G3_REG_REVISION CCI_REG16_LE(0x0002)
+#define VD56G3_REVISION_CUT3 0x31
+#define VD56G3_REG_OPTICAL_REVISION CCI_REG8(0x001a)
+#define VD56G3_OPTICAL_REVISION_MONO 0
+#define VD56G3_OPTICAL_REVISION_BAYER 1
+#define VD56G3_REG_SYSTEM_FSM CCI_REG8(0x0028)
+#define VD56G3_SYSTEM_FSM_READY_TO_BOOT 0x01
+#define VD56G3_SYSTEM_FSM_SW_STBY 0x02
+#define VD56G3_SYSTEM_FSM_STREAMING 0x03
+#define VD56G3_REG_APPLIED_COARSE_EXPOSURE CCI_REG16_LE(0x0064)
+#define VD56G3_REG_APPLIED_ANALOG_GAIN CCI_REG8(0x0068)
+#define VD56G3_REG_APPLIED_DIGITAL_GAIN CCI_REG16_LE(0x006a)
+#define VD56G3_REG_BOOT CCI_REG8(0x0200)
+#define VD56G3_CMD_ACK 0
+#define VD56G3_CMD_BOOT 1
+#define VD56G3_REG_STBY CCI_REG8(0x0201)
+#define VD56G3_CMD_START_STREAM 1
+#define VD56G3_REG_STREAMING CCI_REG8(0x0202)
+#define VD56G3_CMD_STOP_STREAM 1
+#define VD56G3_REG_EXT_CLOCK CCI_REG32_LE(0x0220)
+#define VD56G3_REG_CLK_PLL_PREDIV CCI_REG8(0x0224)
+#define VD56G3_REG_CLK_SYS_PLL_MULT CCI_REG8(0x0226)
+#define VD56G3_REG_ORIENTATION CCI_REG8(0x0302)
+#define VD56G3_REG_FORMAT_CTRL CCI_REG8(0x030a)
+#define VD56G3_REG_OIF_CTRL CCI_REG16_LE(0x030c)
+#define VD56G3_REG_OIF_IMG_CTRL CCI_REG8(0x030f)
+#define VD56G3_REG_OIF_CSI_BITRATE CCI_REG16_LE(0x0312)
+#define VD56G3_REG_DUSTER_CTRL CCI_REG8(0x0318)
+#define VD56G3_DUSTER_DISABLE 0
+#define VD56G3_DUSTER_ENABLE_DEF_MODULES 0x13
+#define VD56G3_REG_ISL_ENABLE CCI_REG8(0x0333)
+#define VD56G3_REG_DARKCAL_CTRL CCI_REG8(0x0340)
+#define VD56G3_DARKCAL_ENABLE 1
+#define VD56G3_DARKCAL_DISABLE_DARKAVG 2
+#define VD56G3_REG_PATGEN_CTRL CCI_REG16_LE(0x0400)
+#define VD56G3_PATGEN_ENABLE 1
+#define VD56G3_PATGEN_TYPE_SHIFT 4
+#define VD56G3_REG_AE_COLDSTART_COARSE_EXPOSURE CCI_REG16_LE(0x042a)
+#define VD56G3_REG_AE_COLDSTART_ANALOG_GAIN CCI_REG8(0x042c)
+#define VD56G3_REG_AE_COLDSTART_DIGITAL_GAIN CCI_REG16_LE(0x042e)
+#define VD56G3_REG_AE_ROI_START_H CCI_REG16_LE(0x0432)
+#define VD56G3_REG_AE_ROI_START_V CCI_REG16_LE(0x0434)
+#define VD56G3_REG_AE_ROI_END_H CCI_REG16_LE(0x0436)
+#define VD56G3_REG_AE_ROI_END_V CCI_REG16_LE(0x0438)
+#define VD56G3_REG_AE_COMPENSATION CCI_REG16_LE(0x043a)
+#define VD56G3_REG_EXP_MODE CCI_REG8(0x044c)
+#define VD56G3_EXP_MODE_AUTO 0
+#define VD56G3_EXP_MODE_FREEZE 1
+#define VD56G3_EXP_MODE_MANUAL 2
+#define VD56G3_REG_MANUAL_ANALOG_GAIN CCI_REG8(0x044d)
+#define VD56G3_REG_MANUAL_COARSE_EXPOSURE CCI_REG16_LE(0x044e)
+#define VD56G3_REG_MANUAL_DIGITAL_GAIN_CH0 CCI_REG16_LE(0x0450)
+#define VD56G3_REG_MANUAL_DIGITAL_GAIN_CH1 CCI_REG16_LE(0x0452)
+#define VD56G3_REG_MANUAL_DIGITAL_GAIN_CH2 CCI_REG16_LE(0x0454)
+#define VD56G3_REG_MANUAL_DIGITAL_GAIN_CH3 CCI_REG16_LE(0x0456)
+#define VD56G3_REG_FRAME_LENGTH CCI_REG16_LE(0x0458)
+#define VD56G3_REG_Y_START CCI_REG16_LE(0x045a)
+#define VD56G3_REG_Y_END CCI_REG16_LE(0x045c)
+#define VD56G3_REG_OUT_ROI_X_START CCI_REG16_LE(0x045e)
+#define VD56G3_REG_OUT_ROI_X_END CCI_REG16_LE(0x0460)
+#define VD56G3_REG_OUT_ROI_Y_START CCI_REG16_LE(0x0462)
+#define VD56G3_REG_OUT_ROI_Y_END CCI_REG16_LE(0x0464)
+#define VD56G3_REG_GPIO_0_CTRL CCI_REG8(0x0467)
+#define VD56G3_GPIOX_GPIO_IN 0x01
+#define VD56G3_GPIOX_STROBE_MODE 0x02
+#define VD56G3_REG_READOUT_CTRL CCI_REG8(0x047e)
+#define READOUT_NORMAL 0x00
+#define READOUT_DIGITAL_BINNING_X2 0x01
+
+/* The VD56G3 is a portrait image sensor with native resolution of 1124x1364. */
+#define VD56G3_NATIVE_WIDTH 1124
+#define VD56G3_NATIVE_HEIGHT 1364
+#define VD56G3_DEFAULT_MODE 0
+
+/* PLL settings */
+#define VD56G3_TARGET_PLL 804000000UL
+#define VD56G3_VT_CLOCK_DIV 5
+
+/* External clock must be in [6Mhz-27Mhz] */
+#define VD56G3_XCLK_FREQ_MIN (6 * HZ_PER_MHZ)
+#define VD56G3_XCLK_FREQ_MAX (27 * HZ_PER_MHZ)
+
+/* Line length and Frame length (settings are for standard 10bits ADC mode) */
+#define VD56G3_LINE_LENGTH_MIN 1236
+#define VD56G3_VBLANK_MIN 110
+#define VD56G3_FRAME_LENGTH_DEF_60FPS 2168
+#define VD56G3_FRAME_LENGTH_MAX 0xffff
+
+/* Exposure settings */
+#define VD56G3_EXPOSURE_MARGIN 75
+#define VD56G3_EXPOSURE_MIN 5
+#define VD56G3_EXPOSURE_DEFAULT 1420
+
+/* Output Interface settings */
+#define VD56G3_MAX_CSI_DATA_LANES 2
+#define VD56G3_LINK_FREQ_DEF_1LANE 750000000UL
+#define VD56G3_LINK_FREQ_DEF_2LANES 402000000UL
+
+/* GPIOs */
+#define VD56G3_NB_GPIOS 8
+
+/* regulator supplies */
+static const char *const vd56g3_supply_names[] = {
+ "vcore",
+ "vddio",
+ "vana",
+};
+
+/* -----------------------------------------------------------------------------
+ * Models (VD56G3: Mono, VD66GY: Bayer RGB), Modes and formats
+ */
+
+enum vd56g3_models {
+ VD56G3_MODEL_VD56G3,
+ VD56G3_MODEL_VD66GY,
+};
+
+struct vd56g3_mode {
+ u32 width;
+ u32 height;
+};
+
+static const struct vd56g3_mode vd56g3_supported_modes[] = {
+ {
+ .width = VD56G3_NATIVE_WIDTH,
+ .height = VD56G3_NATIVE_HEIGHT,
+ },
+ {
+ .width = 1120,
+ .height = 1360,
+ },
+ {
+ .width = 1024,
+ .height = 1280,
+ },
+ {
+ .width = 1024,
+ .height = 768,
+ },
+ {
+ .width = 768,
+ .height = 1024,
+ },
+ {
+ .width = 720,
+ .height = 1280,
+ },
+ {
+ .width = 640,
+ .height = 480,
+ },
+ {
+ .width = 480,
+ .height = 640,
+ },
+ {
+ .width = 320,
+ .height = 240,
+ },
+};
+
+/*
+ * Sensor support 8bits and 10bits output in both variants
+ * - Monochrome
+ * - RGB (with all H/V flip variations)
+ */
+static const unsigned int vd56g3_mbus_codes[2][5] = {
+ {
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ },
+ {
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ },
+};
+
+struct vd56g3 {
+ struct device *dev;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(vd56g3_supply_names)];
+ struct gpio_desc *reset_gpio;
+ struct clk *xclk;
+ struct regmap *regmap;
+ u32 xclk_freq;
+ u32 pll_prediv;
+ u32 pll_mult;
+ u32 pixel_clock;
+ u16 oif_ctrl;
+ u8 nb_of_lane;
+ u32 gpios[VD56G3_NB_GPIOS];
+ unsigned long ext_leds_mask;
+ bool is_mono;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *hblank_ctrl;
+ struct v4l2_ctrl *vblank_ctrl;
+ struct {
+ struct v4l2_ctrl *hflip_ctrl;
+ struct v4l2_ctrl *vflip_ctrl;
+ };
+ struct v4l2_ctrl *patgen_ctrl;
+ struct {
+ struct v4l2_ctrl *ae_ctrl;
+ struct v4l2_ctrl *expo_ctrl;
+ struct v4l2_ctrl *again_ctrl;
+ struct v4l2_ctrl *dgain_ctrl;
+ };
+ struct v4l2_ctrl *ae_lock_ctrl;
+ struct v4l2_ctrl *ae_bias_ctrl;
+ struct v4l2_ctrl *led_ctrl;
+};
+
+static inline struct vd56g3 *to_vd56g3(struct v4l2_subdev *sd)
+{
+ return container_of_const(sd, struct vd56g3, sd);
+}
+
+static inline struct vd56g3 *ctrl_to_vd56g3(struct v4l2_ctrl *ctrl)
+{
+ return container_of_const(ctrl->handler, struct vd56g3, ctrl_handler);
+}
+
+/* -----------------------------------------------------------------------------
+ * Additional i2c register helpers
+ */
+
+static int vd56g3_poll_reg(struct vd56g3 *sensor, u32 reg, u8 poll_val,
+ int *err)
+{
+ unsigned int val = 0;
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ /*
+ * Timeout must be higher than longuest frame duration. With current
+ * blanking constraints, frame duration can take up to 504ms.
+ */
+ ret = regmap_read_poll_timeout(sensor->regmap, CCI_REG_ADDR(reg), val,
+ (val == poll_val), 2000,
+ 600 * USEC_PER_MSEC);
+
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+
+static int vd56g3_wait_state(struct vd56g3 *sensor, int state, int *err)
+{
+ return vd56g3_poll_reg(sensor, VD56G3_REG_SYSTEM_FSM, state, err);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls: definitions, helpers and handlers
+ */
+
+static const char *const vd56g3_tp_menu[] = { "Disabled",
+ "Solid Color",
+ "Vertical Color Bars",
+ "Horizontal Gray Scale",
+ "Vertical Gray Scale",
+ "Diagonal Gray Scale",
+ "Pseudo Random" };
+
+static const s64 vd56g3_ev_bias_qmenu[] = { -4000, -3500, -3000, -2500, -2000,
+ -1500, -1000, -500, 0, 500,
+ 1000, 1500, 2000, 2500, 3000,
+ 3500, 4000 };
+
+static const s64 vd56g3_link_freq_1lane[] = { VD56G3_LINK_FREQ_DEF_1LANE };
+
+static const s64 vd56g3_link_freq_2lanes[] = { VD56G3_LINK_FREQ_DEF_2LANES };
+
+static u8 vd56g3_get_bpp(__u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ default:
+ return 8;
+ case MEDIA_BUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ return 10;
+ }
+}
+
+static u8 vd56g3_get_datatype(__u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ default:
+ return MIPI_CSI2_DT_RAW8;
+ case MEDIA_BUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ return MIPI_CSI2_DT_RAW10;
+ }
+}
+
+static int vd56g3_read_expo_cluster(struct vd56g3 *sensor, bool force_cur_val)
+{
+ u64 exposure;
+ u64 again;
+ u64 dgain;
+ int ret = 0;
+
+ /*
+ * When 'force_cur_val' is enabled, save the ctrl value in 'cur.val'
+ * instead of the normal 'val', this is used during poweroff to cache
+ * volatile ctrls and enable coldstart.
+ */
+ cci_read(sensor->regmap, VD56G3_REG_APPLIED_COARSE_EXPOSURE, &exposure,
+ &ret);
+ cci_read(sensor->regmap, VD56G3_REG_APPLIED_ANALOG_GAIN, &again, &ret);
+ cci_read(sensor->regmap, VD56G3_REG_APPLIED_DIGITAL_GAIN, &dgain, &ret);
+ if (ret)
+ return ret;
+
+ if (force_cur_val) {
+ sensor->expo_ctrl->cur.val = exposure;
+ sensor->again_ctrl->cur.val = again;
+ sensor->dgain_ctrl->cur.val = dgain;
+ } else {
+ sensor->expo_ctrl->val = exposure;
+ sensor->again_ctrl->val = again;
+ sensor->dgain_ctrl->val = dgain;
+ }
+
+ return ret;
+}
+
+static int vd56g3_update_patgen(struct vd56g3 *sensor, u32 patgen_index)
+{
+ u32 pattern = patgen_index <= 2 ? patgen_index : patgen_index + 13;
+ u16 patgen = pattern << VD56G3_PATGEN_TYPE_SHIFT;
+ u8 duster = VD56G3_DUSTER_ENABLE_DEF_MODULES;
+ u8 darkcal = VD56G3_DARKCAL_ENABLE;
+ int ret = 0;
+
+ if (patgen_index) {
+ patgen |= VD56G3_PATGEN_ENABLE;
+ duster = VD56G3_DUSTER_DISABLE;
+ darkcal = VD56G3_DARKCAL_DISABLE_DARKAVG;
+ }
+
+ cci_write(sensor->regmap, VD56G3_REG_DUSTER_CTRL, duster, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_DARKCAL_CTRL, darkcal, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_PATGEN_CTRL, patgen, &ret);
+
+ return ret;
+}
+
+static int vd56g3_update_expo_cluster(struct vd56g3 *sensor, bool is_auto)
+{
+ u8 expo_state = is_auto ? VD56G3_EXP_MODE_AUTO : VD56G3_EXP_MODE_MANUAL;
+ int ret = 0;
+
+ if (sensor->ae_ctrl->is_new)
+ cci_write(sensor->regmap, VD56G3_REG_EXP_MODE, expo_state,
+ &ret);
+
+ /* In Auto expo, set coldstart parameters */
+ if (is_auto && sensor->ae_ctrl->is_new) {
+ cci_write(sensor->regmap,
+ VD56G3_REG_AE_COLDSTART_COARSE_EXPOSURE,
+ sensor->expo_ctrl->val, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_AE_COLDSTART_ANALOG_GAIN,
+ sensor->again_ctrl->val, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_AE_COLDSTART_DIGITAL_GAIN,
+ sensor->dgain_ctrl->val, &ret);
+ }
+
+ /* In Manual expo, set exposure, analog and digital gains */
+ if (!is_auto && sensor->expo_ctrl->is_new)
+ cci_write(sensor->regmap, VD56G3_REG_MANUAL_COARSE_EXPOSURE,
+ sensor->expo_ctrl->val, &ret);
+
+ if (!is_auto && sensor->again_ctrl->is_new)
+ cci_write(sensor->regmap, VD56G3_REG_MANUAL_ANALOG_GAIN,
+ sensor->again_ctrl->val, &ret);
+
+ if (!is_auto && sensor->dgain_ctrl->is_new) {
+ cci_write(sensor->regmap, VD56G3_REG_MANUAL_DIGITAL_GAIN_CH0,
+ sensor->dgain_ctrl->val, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_MANUAL_DIGITAL_GAIN_CH1,
+ sensor->dgain_ctrl->val, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_MANUAL_DIGITAL_GAIN_CH2,
+ sensor->dgain_ctrl->val, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_MANUAL_DIGITAL_GAIN_CH3,
+ sensor->dgain_ctrl->val, &ret);
+ }
+
+ return ret;
+}
+
+static int vd56g3_lock_exposure(struct vd56g3 *sensor, u32 lock_val)
+{
+ bool ae_lock = lock_val & V4L2_LOCK_EXPOSURE;
+ u8 expo_state = ae_lock ? VD56G3_EXP_MODE_FREEZE : VD56G3_EXP_MODE_AUTO;
+
+ if (sensor->ae_ctrl->val == V4L2_EXPOSURE_AUTO)
+ return cci_write(sensor->regmap, VD56G3_REG_EXP_MODE,
+ expo_state, NULL);
+
+ return 0;
+}
+
+static int vd56g3_write_gpiox(struct vd56g3 *sensor, unsigned long gpio_mask)
+{
+ unsigned long io;
+ u32 gpio_val;
+ int ret = 0;
+
+ for_each_set_bit(io, &gpio_mask, VD56G3_NB_GPIOS) {
+ gpio_val = sensor->gpios[io];
+
+ if (gpio_val == VD56G3_GPIOX_STROBE_MODE &&
+ sensor->led_ctrl->val == V4L2_FLASH_LED_MODE_NONE)
+ gpio_val = VD56G3_GPIOX_GPIO_IN;
+
+ cci_write(sensor->regmap, VD56G3_REG_GPIO_0_CTRL + io, gpio_val,
+ &ret);
+ }
+
+ return ret;
+}
+
+static int vd56g3_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vd56g3 *sensor = ctrl_to_vd56g3(ctrl);
+ int ret = 0;
+
+ /* Interact with HW only when it is powered ON */
+ if (!pm_runtime_get_if_in_use(sensor->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = vd56g3_read_expo_cluster(sensor, false);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_mark_last_busy(sensor->dev);
+ pm_runtime_put_autosuspend(sensor->dev);
+
+ return ret;
+}
+
+static int vd56g3_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vd56g3 *sensor = ctrl_to_vd56g3(ctrl);
+ struct v4l2_subdev_state *state;
+ const struct v4l2_rect *crop;
+ unsigned int frame_length = 0;
+ unsigned int expo_max;
+ unsigned int ae_compensation;
+ bool is_auto = false;
+ int ret = 0;
+
+ state = v4l2_subdev_get_locked_active_state(&sensor->sd);
+ crop = v4l2_subdev_state_get_crop(state, 0);
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+ return 0;
+
+ /* Update controls state, range, etc. whatever the state of the HW */
+ switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ frame_length = crop->height + ctrl->val;
+ expo_max = frame_length - VD56G3_EXPOSURE_MARGIN;
+ ret = __v4l2_ctrl_modify_range(sensor->expo_ctrl,
+ VD56G3_EXPOSURE_MIN, expo_max, 1,
+ min(VD56G3_EXPOSURE_DEFAULT,
+ expo_max));
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ is_auto = (ctrl->val == V4L2_EXPOSURE_AUTO);
+ __v4l2_ctrl_grab(sensor->ae_lock_ctrl, !is_auto);
+ __v4l2_ctrl_grab(sensor->ae_bias_ctrl, !is_auto);
+ break;
+ default:
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Interact with HW only when it is powered ON */
+ if (!pm_runtime_get_if_in_use(sensor->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ret = cci_write(sensor->regmap, VD56G3_REG_ORIENTATION,
+ sensor->hflip_ctrl->val |
+ (sensor->vflip_ctrl->val << 1),
+ NULL);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = vd56g3_update_patgen(sensor, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = vd56g3_update_expo_cluster(sensor, is_auto);
+ break;
+ case V4L2_CID_3A_LOCK:
+ ret = vd56g3_lock_exposure(sensor, ctrl->val);
+ break;
+ case V4L2_CID_AUTO_EXPOSURE_BIAS:
+ ae_compensation =
+ DIV_ROUND_CLOSEST((int)vd56g3_ev_bias_qmenu[ctrl->val] *
+ 256, 1000);
+ ret = cci_write(sensor->regmap, VD56G3_REG_AE_COMPENSATION,
+ ae_compensation, NULL);
+ break;
+ case V4L2_CID_VBLANK:
+ ret = cci_write(sensor->regmap, VD56G3_REG_FRAME_LENGTH,
+ frame_length, NULL);
+ break;
+ case V4L2_CID_FLASH_LED_MODE:
+ ret = vd56g3_write_gpiox(sensor, sensor->ext_leds_mask);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_mark_last_busy(sensor->dev);
+ pm_runtime_put_autosuspend(sensor->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops vd56g3_ctrl_ops = {
+ .g_volatile_ctrl = vd56g3_g_volatile_ctrl,
+ .s_ctrl = vd56g3_s_ctrl,
+};
+
+static int vd56g3_update_controls(struct vd56g3 *sensor)
+{
+ struct v4l2_subdev_state *state;
+ const struct v4l2_rect *crop;
+ unsigned int hblank;
+ unsigned int vblank_min, vblank, vblank_max;
+ unsigned int frame_length;
+ unsigned int expo_max;
+ int ret;
+
+ state = v4l2_subdev_get_locked_active_state(&sensor->sd);
+ crop = v4l2_subdev_state_get_crop(state, 0);
+ hblank = VD56G3_LINE_LENGTH_MIN - crop->width;
+ vblank_min = VD56G3_VBLANK_MIN;
+ vblank = VD56G3_FRAME_LENGTH_DEF_60FPS - crop->height;
+ vblank_max = VD56G3_FRAME_LENGTH_MAX - crop->height;
+ frame_length = crop->height + vblank;
+ expo_max = frame_length - VD56G3_EXPOSURE_MARGIN;
+
+ /* Update blanking and exposure (ranges + values) */
+ ret = __v4l2_ctrl_modify_range(sensor->hblank_ctrl, hblank, hblank, 1,
+ hblank);
+ if (ret)
+ return ret;
+
+ ret = __v4l2_ctrl_modify_range(sensor->vblank_ctrl, vblank_min,
+ vblank_max, 1, vblank);
+ if (ret)
+ return ret;
+
+ ret = __v4l2_ctrl_s_ctrl(sensor->vblank_ctrl, vblank);
+ if (ret)
+ return ret;
+
+ ret = __v4l2_ctrl_modify_range(sensor->expo_ctrl, VD56G3_EXPOSURE_MIN,
+ expo_max, 1, VD56G3_EXPOSURE_DEFAULT);
+ if (ret)
+ return ret;
+
+ return __v4l2_ctrl_s_ctrl(sensor->expo_ctrl, VD56G3_EXPOSURE_DEFAULT);
+}
+
+static int vd56g3_init_controls(struct vd56g3 *sensor)
+{
+ const struct v4l2_ctrl_ops *ops = &vd56g3_ctrl_ops;
+ struct v4l2_ctrl_handler *hdl = &sensor->ctrl_handler;
+ struct v4l2_fwnode_device_properties fwnode_props;
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ v4l2_ctrl_handler_init(hdl, 25);
+
+ /* Horizontal & vertical flips modify bayer code on RGB variant */
+ sensor->hflip_ctrl =
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ if (sensor->hflip_ctrl)
+ sensor->hflip_ctrl->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ sensor->vflip_ctrl =
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (sensor->vflip_ctrl)
+ sensor->vflip_ctrl->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ sensor->patgen_ctrl =
+ v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(vd56g3_tp_menu) - 1, 0,
+ 0, vd56g3_tp_menu);
+
+ ctrl = v4l2_ctrl_new_int_menu(hdl, ops, V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(vd56g3_link_freq_1lane) - 1, 0,
+ (sensor->nb_of_lane == 2) ?
+ vd56g3_link_freq_2lanes :
+ vd56g3_link_freq_1lane);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_PIXEL_RATE,
+ sensor->pixel_clock, sensor->pixel_clock, 1,
+ sensor->pixel_clock);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ sensor->ae_ctrl = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL, 0,
+ V4L2_EXPOSURE_AUTO);
+
+ sensor->ae_lock_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_3A_LOCK, 0,
+ GENMASK(2, 0), 0, 0);
+
+ sensor->ae_bias_ctrl =
+ v4l2_ctrl_new_int_menu(hdl, ops, V4L2_CID_AUTO_EXPOSURE_BIAS,
+ ARRAY_SIZE(vd56g3_ev_bias_qmenu) - 1,
+ ARRAY_SIZE(vd56g3_ev_bias_qmenu) / 2,
+ vd56g3_ev_bias_qmenu);
+
+ /*
+ * Analog gain [1, 8] is computed with the following logic :
+ * 32/(32 - again_reg), with again_reg in the range [0:28]
+ * Digital gain [1.00, 8.00] is coded as a Fixed Point 5.8
+ */
+ sensor->again_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN,
+ 0, 28, 1, 0);
+ sensor->dgain_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_DIGITAL_GAIN,
+ 0x100, 0x800, 1, 0x100);
+
+ /*
+ * Set the exposure, horizontal and vertical blanking ctrls
+ * to hardcoded values, they will be updated in vd56g3_update_controls.
+ * Exposure being in an auto-cluster, set a significant value here.
+ */
+ sensor->expo_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
+ VD56G3_EXPOSURE_DEFAULT,
+ VD56G3_EXPOSURE_DEFAULT, 1,
+ VD56G3_EXPOSURE_DEFAULT);
+ sensor->hblank_ctrl =
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HBLANK, 1, 1, 1, 1);
+ if (sensor->hblank_ctrl)
+ sensor->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ sensor->vblank_ctrl =
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VBLANK, 1, 1, 1, 1);
+
+ /* Additional control based on device tree properties */
+ if (sensor->ext_leds_mask)
+ sensor->led_ctrl =
+ v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_FLASH_LED_MODE,
+ V4L2_FLASH_LED_MODE_FLASH, 0,
+ V4L2_FLASH_LED_MODE_NONE);
+
+ if (hdl->error) {
+ ret = hdl->error;
+ goto free_ctrls;
+ }
+
+ v4l2_ctrl_cluster(2, &sensor->hflip_ctrl);
+ v4l2_ctrl_auto_cluster(4, &sensor->ae_ctrl, V4L2_EXPOSURE_MANUAL, true);
+
+ /* Optional controls coming from fwnode (e.g. rotation, orientation). */
+ ret = v4l2_fwnode_device_parse(sensor->dev, &fwnode_props);
+ if (ret)
+ goto free_ctrls;
+
+ ret = v4l2_ctrl_new_fwnode_properties(hdl, ops, &fwnode_props);
+ if (ret)
+ goto free_ctrls;
+
+ sensor->sd.ctrl_handler = hdl;
+
+ return 0;
+
+free_ctrls:
+ v4l2_ctrl_handler_free(hdl);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pad ops
+ */
+
+/* Media bus code is dependent of :
+ * - 8bits or 10bits output
+ * - variant : Mono or RGB
+ * - H/V flips parameters in case of RGB
+ */
+static u32 vd56g3_get_mbus_code(struct vd56g3 *sensor, u32 code)
+{
+ unsigned int i_bpp;
+ unsigned int j;
+
+ for (i_bpp = 0; i_bpp < ARRAY_SIZE(vd56g3_mbus_codes); i_bpp++) {
+ for (j = 0; j < ARRAY_SIZE(vd56g3_mbus_codes[i_bpp]); j++) {
+ if (vd56g3_mbus_codes[i_bpp][j] == code)
+ goto endloops;
+ }
+ }
+
+endloops:
+ if (i_bpp >= ARRAY_SIZE(vd56g3_mbus_codes))
+ i_bpp = 0;
+
+ if (sensor->is_mono)
+ j = 0;
+ else
+ j = 1 + (sensor->hflip_ctrl->val ? 1 : 0) +
+ (sensor->vflip_ctrl->val ? 2 : 0);
+
+ return vd56g3_mbus_codes[i_bpp][j];
+}
+
+static int vd56g3_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct vd56g3 *sensor = to_vd56g3(sd);
+
+ if (code->index >= ARRAY_SIZE(vd56g3_mbus_codes))
+ return -EINVAL;
+
+ code->code =
+ vd56g3_get_mbus_code(sensor, vd56g3_mbus_codes[code->index][0]);
+
+ return 0;
+}
+
+static int vd56g3_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(vd56g3_supported_modes))
+ return -EINVAL;
+
+ fse->min_width = vd56g3_supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = vd56g3_supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static void vd56g3_update_img_pad_format(struct vd56g3 *sensor,
+ const struct vd56g3_mode *mode,
+ u32 mbus_code,
+ struct v4l2_mbus_framefmt *mbus_fmt)
+{
+ mbus_fmt->width = mode->width;
+ mbus_fmt->height = mode->height;
+ mbus_fmt->code = vd56g3_get_mbus_code(sensor, mbus_code);
+ mbus_fmt->colorspace = V4L2_COLORSPACE_RAW;
+ mbus_fmt->field = V4L2_FIELD_NONE;
+ mbus_fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ mbus_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ mbus_fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+}
+
+static int vd56g3_set_pad_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sd_fmt)
+{
+ struct vd56g3 *sensor = to_vd56g3(sd);
+ const struct vd56g3_mode *new_mode;
+ struct v4l2_rect pad_crop;
+ unsigned int binning;
+
+ new_mode = v4l2_find_nearest_size(vd56g3_supported_modes,
+ ARRAY_SIZE(vd56g3_supported_modes),
+ width, height, sd_fmt->format.width,
+ sd_fmt->format.height);
+
+ vd56g3_update_img_pad_format(sensor, new_mode, sd_fmt->format.code,
+ &sd_fmt->format);
+ *v4l2_subdev_state_get_format(sd_state, sd_fmt->pad) = sd_fmt->format;
+
+ /* Compute and update crop rectangle (maximized via binning) */
+ binning = min(VD56G3_NATIVE_WIDTH / sd_fmt->format.width,
+ VD56G3_NATIVE_HEIGHT / sd_fmt->format.height);
+ binning = min(binning, 2U);
+ pad_crop.width = sd_fmt->format.width * binning;
+ pad_crop.height = sd_fmt->format.height * binning;
+ pad_crop.left = (VD56G3_NATIVE_WIDTH - pad_crop.width) / 2;
+ pad_crop.top = (VD56G3_NATIVE_HEIGHT - pad_crop.height) / 2;
+ *v4l2_subdev_state_get_crop(sd_state, sd_fmt->pad) = pad_crop;
+
+ /* Update controls in case of active state */
+ if (sd_fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ return vd56g3_update_controls(sensor);
+
+ return 0;
+}
+
+static int vd56g3_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *v4l2_subdev_state_get_crop(sd_state, 0);
+ break;
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = VD56G3_NATIVE_WIDTH;
+ sel->r.height = VD56G3_NATIVE_HEIGHT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vd56g3_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct v4l2_subdev_state *state;
+ const struct v4l2_mbus_framefmt *format;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ format = v4l2_subdev_state_get_format(state, pad);
+ v4l2_subdev_unlock_state(state);
+
+ fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
+ fd->num_entries = 1;
+ fd->entry[0].pixelcode = format->code;
+ fd->entry[0].stream = 0;
+ fd->entry[0].bus.csi2.vc = 0;
+ fd->entry[0].bus.csi2.dt = vd56g3_get_datatype(format->code);
+
+ return 0;
+}
+
+static int vd56g3_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct vd56g3 *sensor = to_vd56g3(sd);
+ const struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_state_get_format(state, 0);
+ const struct v4l2_rect *crop = v4l2_subdev_state_get_crop(state, 0);
+ unsigned int csi_mbps = ((sensor->nb_of_lane == 2) ?
+ VD56G3_LINK_FREQ_DEF_2LANES :
+ VD56G3_LINK_FREQ_DEF_1LANE) *
+ 2 / MEGA;
+ unsigned int binning;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(sensor->dev);
+ if (ret < 0)
+ return ret;
+
+ /* configure clocks */
+ cci_write(sensor->regmap, VD56G3_REG_EXT_CLOCK, sensor->xclk_freq,
+ &ret);
+ cci_write(sensor->regmap, VD56G3_REG_CLK_PLL_PREDIV, sensor->pll_prediv,
+ &ret);
+ cci_write(sensor->regmap, VD56G3_REG_CLK_SYS_PLL_MULT, sensor->pll_mult,
+ &ret);
+
+ /* configure output */
+ cci_write(sensor->regmap, VD56G3_REG_FORMAT_CTRL,
+ vd56g3_get_bpp(format->code), &ret);
+ cci_write(sensor->regmap, VD56G3_REG_OIF_CTRL, sensor->oif_ctrl, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_OIF_CSI_BITRATE, csi_mbps, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_OIF_IMG_CTRL,
+ vd56g3_get_datatype(format->code), &ret);
+ cci_write(sensor->regmap, VD56G3_REG_ISL_ENABLE, 0, &ret);
+
+ /* configure binning mode */
+ switch (crop->width / format->width) {
+ case 1:
+ default:
+ binning = READOUT_NORMAL;
+ break;
+ case 2:
+ binning = READOUT_DIGITAL_BINNING_X2;
+ break;
+ }
+ cci_write(sensor->regmap, VD56G3_REG_READOUT_CTRL, binning, &ret);
+
+ /* configure ROIs */
+ cci_write(sensor->regmap, VD56G3_REG_Y_START, crop->top, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_Y_END,
+ crop->top + crop->height - 1, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_OUT_ROI_X_START, crop->left, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_OUT_ROI_X_END,
+ crop->left + crop->width - 1, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_OUT_ROI_Y_START, 0, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_OUT_ROI_Y_END, crop->height - 1,
+ &ret);
+ cci_write(sensor->regmap, VD56G3_REG_AE_ROI_START_H, crop->left, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_AE_ROI_END_H,
+ crop->left + crop->width - 1, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_AE_ROI_START_V, 0, &ret);
+ cci_write(sensor->regmap, VD56G3_REG_AE_ROI_END_V, crop->height - 1,
+ &ret);
+ if (ret)
+ goto rpm_put;
+
+ /* Setup default GPIO values; could be overridden by V4L2 ctrl setup */
+ ret = vd56g3_write_gpiox(sensor, GENMASK(VD56G3_NB_GPIOS - 1, 0));
+ if (ret)
+ goto rpm_put;
+
+ /* Apply settings from V4L2 ctrls */
+ ret = __v4l2_ctrl_handler_setup(&sensor->ctrl_handler);
+ if (ret)
+ goto rpm_put;
+
+ /* start streaming */
+ cci_write(sensor->regmap, VD56G3_REG_STBY, VD56G3_CMD_START_STREAM,
+ &ret);
+ vd56g3_poll_reg(sensor, VD56G3_REG_STBY, VD56G3_CMD_ACK, &ret);
+ vd56g3_wait_state(sensor, VD56G3_SYSTEM_FSM_STREAMING, &ret);
+ if (ret)
+ goto rpm_put;
+
+ /* some controls are locked during streaming */
+ __v4l2_ctrl_grab(sensor->hflip_ctrl, true);
+ __v4l2_ctrl_grab(sensor->vflip_ctrl, true);
+ __v4l2_ctrl_grab(sensor->patgen_ctrl, true);
+
+ return ret;
+
+rpm_put:
+ dev_err(sensor->dev, "Failed to start streaming\n");
+ pm_runtime_put_sync(sensor->dev);
+
+ return ret;
+}
+
+static int vd56g3_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct vd56g3 *sensor = to_vd56g3(sd);
+ int ret;
+
+ /* Retrieve Expo cluster to enable coldstart of AE */
+ ret = vd56g3_read_expo_cluster(sensor, true);
+
+ cci_write(sensor->regmap, VD56G3_REG_STREAMING, VD56G3_CMD_STOP_STREAM,
+ &ret);
+ vd56g3_poll_reg(sensor, VD56G3_REG_STREAMING, VD56G3_CMD_ACK, &ret);
+ vd56g3_wait_state(sensor, VD56G3_SYSTEM_FSM_SW_STBY, &ret);
+
+ /* locked controls must be unlocked */
+ __v4l2_ctrl_grab(sensor->hflip_ctrl, false);
+ __v4l2_ctrl_grab(sensor->vflip_ctrl, false);
+ __v4l2_ctrl_grab(sensor->patgen_ctrl, false);
+
+ pm_runtime_mark_last_busy(sensor->dev);
+ pm_runtime_put_autosuspend(sensor->dev);
+
+ return ret;
+}
+
+static int vd56g3_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ unsigned int def_mode = VD56G3_DEFAULT_MODE;
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ .pad = 0,
+ .format = {
+ .code = vd56g3_mbus_codes[0][0],
+ .width = vd56g3_supported_modes[def_mode].width,
+ .height = vd56g3_supported_modes[def_mode].height,
+ },
+ };
+
+ return vd56g3_set_pad_fmt(sd, sd_state, &fmt);
+}
+
+static const struct v4l2_subdev_video_ops vd56g3_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops vd56g3_pad_ops = {
+ .enum_mbus_code = vd56g3_enum_mbus_code,
+ .enum_frame_size = vd56g3_enum_frame_size,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = vd56g3_set_pad_fmt,
+ .get_selection = vd56g3_get_selection,
+ .get_frame_desc = vd56g3_get_frame_desc,
+ .enable_streams = vd56g3_enable_streams,
+ .disable_streams = vd56g3_disable_streams,
+};
+
+static const struct v4l2_subdev_ops vd56g3_subdev_ops = {
+ .video = &vd56g3_video_ops,
+ .pad = &vd56g3_pad_ops,
+};
+
+static const struct media_entity_operations vd56g3_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops vd56g3_internal_ops = {
+ .init_state = vd56g3_init_state,
+};
+
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
+
+static int vd56g3_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct vd56g3 *sensor = to_vd56g3(sd);
+ int ret;
+
+ /* power on */
+ ret = regulator_bulk_enable(ARRAY_SIZE(sensor->supplies),
+ sensor->supplies);
+ if (ret) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(sensor->xclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock: %d\n", ret);
+ goto disable_reg;
+ }
+
+ gpiod_set_value_cansleep(sensor->reset_gpio, 0);
+ usleep_range(3500, 4000);
+ ret = vd56g3_wait_state(sensor, VD56G3_SYSTEM_FSM_READY_TO_BOOT, NULL);
+ if (ret) {
+ dev_err(dev, "Sensor reset failed: %d\n", ret);
+ goto disable_clock;
+ }
+
+ /* boot sensor */
+ cci_write(sensor->regmap, VD56G3_REG_BOOT, VD56G3_CMD_BOOT, &ret);
+ vd56g3_poll_reg(sensor, VD56G3_REG_BOOT, VD56G3_CMD_ACK, &ret);
+ vd56g3_wait_state(sensor, VD56G3_SYSTEM_FSM_SW_STBY, &ret);
+ if (ret) {
+ dev_err(dev, "Sensor boot failed: %d\n", ret);
+ goto disable_clock;
+ }
+
+ return 0;
+
+disable_clock:
+ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
+ clk_disable_unprepare(sensor->xclk);
+disable_reg:
+ regulator_bulk_disable(ARRAY_SIZE(sensor->supplies), sensor->supplies);
+
+ return ret;
+}
+
+static int vd56g3_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct vd56g3 *sensor = to_vd56g3(sd);
+
+ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
+ clk_disable_unprepare(sensor->xclk);
+ regulator_bulk_disable(ARRAY_SIZE(sensor->supplies), sensor->supplies);
+
+ return 0;
+}
+
+static const struct dev_pm_ops vd56g3_pm_ops = {
+ SET_RUNTIME_PM_OPS(vd56g3_power_off, vd56g3_power_on, NULL)
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe and initialization
+ */
+
+static int vd56g3_check_csi_conf(struct vd56g3 *sensor,
+ struct fwnode_handle *endpoint)
+{
+ struct v4l2_fwnode_endpoint ep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
+ u32 phy_data_lanes[VD56G3_MAX_CSI_DATA_LANES] = { ~0, ~0 };
+ u8 n_lanes;
+ u64 frequency;
+ int p, l;
+ int ret = 0;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &ep);
+ if (ret)
+ return -EINVAL;
+
+ /* Check lanes number */
+ n_lanes = ep.bus.mipi_csi2.num_data_lanes;
+ if (n_lanes != 1 && n_lanes != 2) {
+ dev_err(sensor->dev, "Invalid data lane number: %d\n", n_lanes);
+ ret = -EINVAL;
+ goto done;
+ }
+ sensor->nb_of_lane = n_lanes;
+
+ /* Clock lane must be first */
+ if (ep.bus.mipi_csi2.clock_lane != 0) {
+ dev_err(sensor->dev, "Clock lane must be mapped to lane 0\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * Prepare Output Interface conf based on lane settings
+ * logical to physical lane conversion (+ pad remaining slots)
+ */
+ for (l = 0; l < n_lanes; l++)
+ phy_data_lanes[ep.bus.mipi_csi2.data_lanes[l] - 1] = l;
+ for (p = 0; p < VD56G3_MAX_CSI_DATA_LANES; p++) {
+ if (phy_data_lanes[p] != ~0)
+ continue;
+ phy_data_lanes[p] = l;
+ l++;
+ }
+ sensor->oif_ctrl = n_lanes |
+ (ep.bus.mipi_csi2.lane_polarities[0] << 3) |
+ ((phy_data_lanes[0]) << 4) |
+ (ep.bus.mipi_csi2.lane_polarities[1] << 6) |
+ ((phy_data_lanes[1]) << 7) |
+ (ep.bus.mipi_csi2.lane_polarities[2] << 9);
+
+ /* Check link frequency */
+ if (!ep.nr_of_link_frequencies) {
+ dev_err(sensor->dev, "link-frequency not found in DT\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ frequency = (n_lanes == 2) ? VD56G3_LINK_FREQ_DEF_2LANES :
+ VD56G3_LINK_FREQ_DEF_1LANE;
+ if (ep.nr_of_link_frequencies != 1 ||
+ ep.link_frequencies[0] != frequency) {
+ dev_err(sensor->dev, "Link frequency not supported: %lld\n",
+ ep.link_frequencies[0]);
+ ret = -EINVAL;
+ goto done;
+ }
+
+done:
+ v4l2_fwnode_endpoint_free(&ep);
+
+ return ret;
+}
+
+static int vd56g3_parse_dt_gpios_array(struct vd56g3 *sensor, char *prop_name,
+ u32 *array, unsigned int *nb)
+{
+ struct device *dev = sensor->dev;
+ unsigned int i;
+ int ret;
+
+ if (!device_property_present(dev, prop_name)) {
+ *nb = 0;
+ return 0;
+ }
+
+ ret = device_property_count_u32(dev, prop_name);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read %s count\n", prop_name);
+ return ret;
+ }
+
+ *nb = ret;
+ ret = device_property_read_u32_array(dev, prop_name, array, *nb);
+ if (ret) {
+ dev_err(dev, "Failed to read %s prop\n", prop_name);
+ return ret;
+ }
+
+ for (i = 0; i < *nb; i++) {
+ if (array[i] >= VD56G3_NB_GPIOS) {
+ dev_err(dev, "Invalid GPIO: %d\n", array[i]);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int vd56g3_parse_dt_gpios(struct vd56g3 *sensor)
+{
+ u32 led_gpios[VD56G3_NB_GPIOS];
+ unsigned int nb_gpios_leds;
+ unsigned int i;
+ int ret;
+
+ /* Initialize GPIOs to default */
+ for (i = 0; i < VD56G3_NB_GPIOS; i++)
+ sensor->gpios[i] = VD56G3_GPIOX_GPIO_IN;
+ sensor->ext_leds_mask = 0;
+
+ /* Take into account optional 'st,leds' output for GPIOs */
+ ret = vd56g3_parse_dt_gpios_array(sensor, "st,leds", led_gpios,
+ &nb_gpios_leds);
+ if (ret)
+ return ret;
+ for (i = 0; i < nb_gpios_leds; i++) {
+ sensor->gpios[led_gpios[i]] = VD56G3_GPIOX_STROBE_MODE;
+ set_bit(led_gpios[i], &sensor->ext_leds_mask);
+ }
+
+ return 0;
+}
+
+static int vd56g3_parse_dt(struct vd56g3 *sensor)
+{
+ struct fwnode_handle *endpoint;
+ int ret;
+
+ endpoint = fwnode_graph_get_endpoint_by_id(dev_fwnode(sensor->dev), 0,
+ 0, 0);
+ if (!endpoint) {
+ dev_err(sensor->dev, "Endpoint node not found\n");
+ return -EINVAL;
+ }
+
+ ret = vd56g3_check_csi_conf(sensor, endpoint);
+ fwnode_handle_put(endpoint);
+ if (ret)
+ return ret;
+
+ return vd56g3_parse_dt_gpios(sensor);
+}
+
+static int vd56g3_get_regulators(struct vd56g3 *sensor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sensor->supplies); i++)
+ sensor->supplies[i].supply = vd56g3_supply_names[i];
+
+ return devm_regulator_bulk_get(sensor->dev,
+ ARRAY_SIZE(sensor->supplies),
+ sensor->supplies);
+}
+
+static int vd56g3_prepare_clock_tree(struct vd56g3 *sensor)
+{
+ const unsigned int predivs[] = { 1, 2, 4 };
+ u32 pll_out;
+ int i;
+
+ /* External clock must be in [6Mhz-27Mhz] */
+ if (sensor->xclk_freq < VD56G3_XCLK_FREQ_MIN ||
+ sensor->xclk_freq > VD56G3_XCLK_FREQ_MAX) {
+ dev_err(sensor->dev,
+ "Only 6Mhz-27Mhz clock range supported. Provided %lu MHz\n",
+ sensor->xclk_freq / HZ_PER_MHZ);
+ return -EINVAL;
+ }
+
+ /* PLL input should be in [6Mhz-12Mhz[ */
+ for (i = 0; i < ARRAY_SIZE(predivs); i++) {
+ sensor->pll_prediv = predivs[i];
+ if (sensor->xclk_freq / sensor->pll_prediv < 12 * HZ_PER_MHZ)
+ break;
+ }
+
+ /* PLL output clock must be as close as possible to 804Mhz */
+ sensor->pll_mult = (VD56G3_TARGET_PLL * sensor->pll_prediv +
+ sensor->xclk_freq / 2) /
+ sensor->xclk_freq;
+ pll_out = sensor->xclk_freq * sensor->pll_mult / sensor->pll_prediv;
+
+ /* Target Pixel Clock for standard 10bit ADC mode : 160.8Mhz */
+ sensor->pixel_clock = pll_out / VD56G3_VT_CLOCK_DIV;
+
+ return 0;
+}
+
+static int vd56g3_detect(struct vd56g3 *sensor)
+{
+ struct device *dev = sensor->dev;
+ unsigned int model;
+ u64 model_id;
+ u64 device_revision;
+ u64 optical_revision;
+ int ret = 0;
+
+ model = (uintptr_t)device_get_match_data(dev);
+
+ ret = cci_read(sensor->regmap, VD56G3_REG_MODEL_ID, &model_id, NULL);
+ if (ret)
+ return ret;
+
+ if (model_id != VD56G3_MODEL_ID) {
+ dev_err(dev, "Unsupported sensor id: %x\n", (u16)model_id);
+ return -ENODEV;
+ }
+
+ ret = cci_read(sensor->regmap, VD56G3_REG_REVISION, &device_revision,
+ NULL);
+ if (ret)
+ return ret;
+
+ if ((device_revision >> 8) != VD56G3_REVISION_CUT3) {
+ dev_err(dev, "Unsupported version: %x\n", (u16)device_revision);
+ return -ENODEV;
+ }
+
+ ret = cci_read(sensor->regmap, VD56G3_REG_OPTICAL_REVISION,
+ &optical_revision, NULL);
+ if (ret)
+ return ret;
+
+ sensor->is_mono =
+ ((optical_revision & 1) == VD56G3_OPTICAL_REVISION_MONO);
+ if ((sensor->is_mono && model == VD56G3_MODEL_VD66GY) ||
+ (!sensor->is_mono && model == VD56G3_MODEL_VD56G3)) {
+ dev_err(dev, "Found %s sensor, while %s model is defined in DT\n",
+ (sensor->is_mono) ? "Mono" : "Bayer",
+ (model == VD56G3_MODEL_VD56G3) ? "vd56g3" : "vd66gy");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int vd56g3_subdev_init(struct vd56g3 *sensor)
+{
+ struct v4l2_subdev_state *state;
+ int ret;
+
+ /* Init remaining sub device ops */
+ sensor->sd.internal_ops = &vd56g3_internal_ops;
+ sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sensor->sd.entity.ops = &vd56g3_subdev_entity_ops;
+
+ /* Init source pad */
+ sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
+ if (ret) {
+ dev_err(sensor->dev, "Failed to init media entity: %d\n", ret);
+ return ret;
+ }
+
+ /* Init controls */
+ ret = vd56g3_init_controls(sensor);
+ if (ret) {
+ dev_err(sensor->dev, "Controls initialization failed: %d\n",
+ ret);
+ goto err_media;
+ }
+
+ /* Init vd56g3 struct : default resolution + raw8 */
+ sensor->sd.state_lock = sensor->ctrl_handler.lock;
+ ret = v4l2_subdev_init_finalize(&sensor->sd);
+ if (ret) {
+ dev_err(sensor->dev, "Subdev init failed: %d\n", ret);
+ goto err_ctrls;
+ }
+
+ /* Update controls according to the resolution set */
+ state = v4l2_subdev_lock_and_get_active_state(&sensor->sd);
+ ret = vd56g3_update_controls(sensor);
+ v4l2_subdev_unlock_state(state);
+ if (ret) {
+ dev_err(sensor->dev, "Controls update failed: %d\n", ret);
+ goto err_ctrls;
+ }
+
+ return 0;
+
+err_ctrls:
+ v4l2_ctrl_handler_free(sensor->sd.ctrl_handler);
+
+err_media:
+ media_entity_cleanup(&sensor->sd.entity);
+
+ return ret;
+}
+
+static void vd56g3_subdev_cleanup(struct vd56g3 *sensor)
+{
+ v4l2_async_unregister_subdev(&sensor->sd);
+ v4l2_subdev_cleanup(&sensor->sd);
+ media_entity_cleanup(&sensor->sd.entity);
+ v4l2_ctrl_handler_free(sensor->sd.ctrl_handler);
+}
+
+static int vd56g3_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct vd56g3 *sensor;
+ int ret;
+
+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&sensor->sd, client, &vd56g3_subdev_ops);
+ sensor->dev = dev;
+
+ ret = vd56g3_parse_dt(sensor);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to parse Device Tree\n");
+
+ /* Get (and check) resources : power regs, ext clock, reset gpio */
+ ret = vd56g3_get_regulators(sensor);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ sensor->xclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(sensor->xclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->xclk),
+ "Failed to get xclk\n");
+ sensor->xclk_freq = clk_get_rate(sensor->xclk);
+ ret = vd56g3_prepare_clock_tree(sensor);
+ if (ret)
+ return ret;
+
+ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(sensor->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(sensor->reset_gpio),
+ "Failed to get reset gpio\n");
+
+ sensor->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(sensor->regmap))
+ return dev_err_probe(dev, PTR_ERR(sensor->regmap),
+ "Failed to init regmap\n");
+
+ /* Power ON */
+ ret = vd56g3_power_on(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Sensor power on failed\n");
+
+ /* Enable PM runtime with autosuspend (sensor being ON, set active) */
+ pm_runtime_set_active(dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+
+ /* Check HW model/version */
+ ret = vd56g3_detect(sensor);
+ if (ret) {
+ dev_err(dev, "Sensor detect failed: %d\n", ret);
+ goto err_power_off;
+ }
+
+ /* Initialize & register subdev (v4l2_i2c subdev already initialized) */
+ ret = vd56g3_subdev_init(sensor);
+ if (ret) {
+ dev_err(dev, "V4l2 init failed: %d\n", ret);
+ goto err_power_off;
+ }
+
+ ret = v4l2_async_register_subdev(&sensor->sd);
+ if (ret) {
+ dev_err(dev, "Async subdev register failed: %d\n", ret);
+ goto err_subdev;
+ }
+
+ /* Sensor could now be powered off (after the autosuspend delay) */
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ dev_dbg(dev, "Successfully probe %s sensor\n",
+ (sensor->is_mono) ? "vd56g3" : "vd66gy");
+
+ return 0;
+
+err_subdev:
+ vd56g3_subdev_cleanup(sensor);
+err_power_off:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ vd56g3_power_off(dev);
+
+ return ret;
+}
+
+static void vd56g3_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct vd56g3 *sensor = to_vd56g3(sd);
+
+ vd56g3_subdev_cleanup(sensor);
+
+ pm_runtime_disable(sensor->dev);
+ if (!pm_runtime_status_suspended(sensor->dev))
+ vd56g3_power_off(sensor->dev);
+ pm_runtime_set_suspended(sensor->dev);
+ pm_runtime_dont_use_autosuspend(sensor->dev);
+}
+
+static const struct of_device_id vd56g3_dt_ids[] = {
+ { .compatible = "st,vd56g3", .data = (void *)VD56G3_MODEL_VD56G3 },
+ { .compatible = "st,vd66gy", .data = (void *)VD56G3_MODEL_VD66GY },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vd56g3_dt_ids);
+
+static struct i2c_driver vd56g3_i2c_driver = {
+ .driver = {
+ .name = "vd56g3",
+ .of_match_table = vd56g3_dt_ids,
+ .pm = &vd56g3_pm_ops,
+ },
+ .probe = vd56g3_probe,
+ .remove = vd56g3_remove,
+};
+
+module_i2c_driver(vd56g3_i2c_driver);
+
+MODULE_AUTHOR("Benjamin Mugnier <benjamin.mugnier@foss.st.com>");
+MODULE_AUTHOR("Mickael Guene <mickael.guene@st.com>");
+MODULE_AUTHOR("Sylvain Petinot <sylvain.petinot@foss.st.com>");
+MODULE_DESCRIPTION("ST VD56G3 sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index 7f65aa609388..eebb16c58f3d 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -15,7 +15,6 @@ if MEDIA_CAMERA_SUPPORT
source "drivers/media/pci/mgb4/Kconfig"
source "drivers/media/pci/solo6x10/Kconfig"
-source "drivers/media/pci/sta2x11/Kconfig"
source "drivers/media/pci/tw5864/Kconfig"
source "drivers/media/pci/tw68/Kconfig"
source "drivers/media/pci/tw686x/Kconfig"
diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile
index f18c7e15abe3..02763ad88511 100644
--- a/drivers/media/pci/Makefile
+++ b/drivers/media/pci/Makefile
@@ -22,8 +22,6 @@ obj-y += ttpci/ \
# Please keep it alphabetically sorted by Kconfig name
# (e. g. LC_ALL=C sort Makefile)
-obj-$(CONFIG_STA2X11_VIP) += sta2x11/
-
obj-$(CONFIG_VIDEO_BT848) += bt8xx/
obj-$(CONFIG_VIDEO_COBALT) += cobalt/
obj-$(CONFIG_VIDEO_CX18) += cx18/
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 377a7e7f0499..9ce67f515843 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2798,7 +2798,7 @@ bttv_irq_wakeup_vbi(struct bttv *btv, struct bttv_buffer *wakeup,
static void bttv_irq_timeout(struct timer_list *t)
{
- struct bttv *btv = from_timer(btv, t, timeout);
+ struct bttv *btv = timer_container_of(btv, t, timeout);
struct bttv_buffer_set old,new;
struct bttv_buffer *ovbi;
struct bttv_buffer *item;
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index 9eb7a5356b4c..84aa269248fd 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -126,7 +126,7 @@ void bttv_input_irq(struct bttv *btv)
static void bttv_input_timer(struct timer_list *t)
{
- struct bttv_ir *ir = from_timer(ir, t, timer);
+ struct bttv_ir *ir = timer_container_of(ir, t, timer);
struct bttv *btv = ir->btv;
if (btv->c.type == BTTV_BOARD_ENLTV_FM_2)
@@ -182,7 +182,7 @@ static u32 bttv_rc5_decode(unsigned int code)
static void bttv_rc5_timer_end(struct timer_list *t)
{
- struct bttv_ir *ir = from_timer(ir, t, timer);
+ struct bttv_ir *ir = timer_container_of(ir, t, timer);
ktime_t tv;
u32 gap, rc5, scancode;
u8 toggle, command, system;
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index 7e742733391b..315577d71d95 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -628,7 +628,7 @@ __poll_t cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
void cx18_vb_timeout(struct timer_list *t)
{
- struct cx18_stream *s = from_timer(s, t, vb_timeout);
+ struct cx18_stream *s = timer_container_of(s, t, vb_timeout);
/*
* Return all of the buffers in error state, so the vbi/vid inode
diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
index 1cb745855600..83e682e1a4b7 100644
--- a/drivers/media/pci/intel/ipu-bridge.c
+++ b/drivers/media/pci/intel/ipu-bridge.c
@@ -66,6 +66,8 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = {
IPU_SENSOR_CONFIG("INT347E", 1, 319200000),
/* Hynix Hi-556 */
IPU_SENSOR_CONFIG("INT3537", 1, 437000000),
+ /* Lontium lt6911uxe */
+ IPU_SENSOR_CONFIG("INTC10C5", 0),
/* Omnivision OV01A10 / OV01A1S */
IPU_SENSOR_CONFIG("OVTI01A0", 1, 400000000),
IPU_SENSOR_CONFIG("OVTI01AS", 1, 400000000),
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 0c365eb59085..16fde96c9fb2 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -1702,14 +1702,13 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
dev_info(dev, "device 0x%x (rev: 0x%x)\n",
pci_dev->device, pci_dev->revision);
- r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
+ cio2->base = pcim_iomap_region(pci_dev, CIO2_PCI_BAR, CIO2_NAME);
+ r = PTR_ERR_OR_ZERO(cio2->base);
if (r) {
dev_err(dev, "failed to remap I/O memory (%d)\n", r);
return -ENODEV;
}
- cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
-
pci_set_drvdata(pci_dev, cio2);
pci_set_master(pci_dev);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-bus.c b/drivers/media/pci/intel/ipu6/ipu6-bus.c
index 37d88ddb6ee7..5cee2748983b 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-bus.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-bus.c
@@ -82,7 +82,7 @@ static void ipu6_bus_release(struct device *dev)
struct ipu6_bus_device *
ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent,
- void *pdata, struct ipu6_buttress_ctrl *ctrl,
+ void *pdata, const struct ipu6_buttress_ctrl *ctrl,
char *name)
{
struct auxiliary_device *auxdev;
diff --git a/drivers/media/pci/intel/ipu6/ipu6-bus.h b/drivers/media/pci/intel/ipu6/ipu6-bus.h
index bb4926dfdf08..a08c5468d536 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-bus.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-bus.h
@@ -15,8 +15,6 @@
struct firmware;
struct pci_dev;
-#define IPU6_BUS_NAME IPU6_NAME "-bus"
-
struct ipu6_buttress_ctrl;
struct ipu6_bus_device {
@@ -27,8 +25,7 @@ struct ipu6_bus_device {
void *pdata;
struct ipu6_mmu *mmu;
struct ipu6_device *isp;
- struct ipu6_buttress_ctrl *ctrl;
- u64 dma_mask;
+ const struct ipu6_buttress_ctrl *ctrl;
const struct firmware *fw;
struct sg_table fw_sgt;
u64 *pkg_dir;
@@ -50,7 +47,7 @@ struct ipu6_auxdrv_data {
struct ipu6_bus_device *
ipu6_bus_initialize_device(struct pci_dev *pdev, struct device *parent,
- void *pdata, struct ipu6_buttress_ctrl *ctrl,
+ void *pdata, const struct ipu6_buttress_ctrl *ctrl,
char *name);
int ipu6_bus_add_device(struct ipu6_bus_device *adev);
void ipu6_bus_del_devices(struct pci_dev *pdev);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-buttress.c b/drivers/media/pci/intel/ipu6/ipu6-buttress.c
index d8db5aa5d528..103386c4f6ae 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-buttress.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-buttress.c
@@ -443,8 +443,8 @@ irqreturn_t ipu6_buttress_isr_threaded(int irq, void *isp_ptr)
return ret;
}
-int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl,
- bool on)
+int ipu6_buttress_power(struct device *dev,
+ const struct ipu6_buttress_ctrl *ctrl, bool on)
{
struct ipu6_device *isp = to_ipu6_bus_device(dev)->isp;
u32 pwr_sts, val;
@@ -478,8 +478,6 @@ int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl,
dev_err(&isp->pdev->dev,
"Change power status timeout with 0x%x\n", val);
- ctrl->started = !ret && on;
-
mutex_unlock(&isp->buttress.power_mutex);
return ret;
diff --git a/drivers/media/pci/intel/ipu6/ipu6-buttress.h b/drivers/media/pci/intel/ipu6/ipu6-buttress.h
index 482978c2a09d..51e5ad48db82 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-buttress.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-buttress.h
@@ -26,7 +26,6 @@ struct ipu6_buttress_ctrl {
u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off;
unsigned int ratio;
unsigned int qos_floor;
- bool started;
};
struct ipu6_buttress_ipc {
@@ -66,8 +65,8 @@ int ipu6_buttress_map_fw_image(struct ipu6_bus_device *sys,
struct sg_table *sgt);
void ipu6_buttress_unmap_fw_image(struct ipu6_bus_device *sys,
struct sg_table *sgt);
-int ipu6_buttress_power(struct device *dev, struct ipu6_buttress_ctrl *ctrl,
- bool on);
+int ipu6_buttress_power(struct device *dev,
+ const struct ipu6_buttress_ctrl *ctrl, bool on);
bool ipu6_buttress_get_secure_mode(struct ipu6_device *isp);
int ipu6_buttress_authenticate(struct ipu6_device *isp);
int ipu6_buttress_reset_authentication(struct ipu6_device *isp);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.c b/drivers/media/pci/intel/ipu6/ipu6-dma.c
index 1ca60ca79dba..7296373d36b0 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-dma.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-dma.c
@@ -172,7 +172,7 @@ void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size,
count = PHYS_PFN(size);
iova = alloc_iova(&mmu->dmap->iovad, count,
- PHYS_PFN(dma_get_mask(dev)), 0);
+ PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
if (!iova)
goto out_kfree;
@@ -398,7 +398,7 @@ int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist,
nents, npages);
iova = alloc_iova(&mmu->dmap->iovad, npages,
- PHYS_PFN(dma_get_mask(dev)), 0);
+ PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
if (!iova)
return 0;
diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.h b/drivers/media/pci/intel/ipu6/ipu6-dma.h
index 2882850d9366..ae9b9a5df57f 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-dma.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-dma.h
@@ -4,9 +4,6 @@
#ifndef IPU6_DMA_H
#define IPU6_DMA_H
-#include <linux/dma-map-ops.h>
-#include <linux/dma-mapping.h>
-#include <linux/iova.h>
#include <linux/iova.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h
index bc8594c94f99..ce8eed91065c 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.h
@@ -14,7 +14,6 @@ struct v4l2_mbus_frame_desc_entry;
struct ipu6_isys_video;
struct ipu6_isys;
-struct ipu6_isys_csi2_pdata;
struct ipu6_isys_stream;
#define NR_OF_CSI2_VC 16
@@ -37,7 +36,6 @@ struct ipu6_isys_stream;
struct ipu6_isys_csi2 {
struct ipu6_isys_subdev asd;
- struct ipu6_isys_csi2_pdata *pdata;
struct ipu6_isys *isys;
struct ipu6_isys_video av[NR_OF_CSI2_SRC_PADS];
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
index 72f5f987ef48..aa2cf7287477 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
@@ -652,10 +652,8 @@ static void stop_streaming(struct vb2_queue *q)
}
static unsigned int
-get_sof_sequence_by_timestamp(struct ipu6_isys_stream *stream,
- struct ipu6_fw_isys_resp_info_abi *info)
+get_sof_sequence_by_timestamp(struct ipu6_isys_stream *stream, u64 time)
{
- u64 time = (u64)info->timestamp[1] << 32 | info->timestamp[0];
struct ipu6_isys *isys = stream->isys;
struct device *dev = &isys->adev->auxdev.dev;
unsigned int i;
@@ -681,8 +679,7 @@ get_sof_sequence_by_timestamp(struct ipu6_isys_stream *stream,
return 0;
}
-static u64 get_sof_ns_delta(struct ipu6_isys_video *av,
- struct ipu6_fw_isys_resp_info_abi *info)
+static u64 get_sof_ns_delta(struct ipu6_isys_video *av, u64 timestamp)
{
struct ipu6_bus_device *adev = av->isys->adev;
struct ipu6_device *isp = adev->isp;
@@ -692,13 +689,13 @@ static u64 get_sof_ns_delta(struct ipu6_isys_video *av,
if (!tsc_now)
return 0;
- delta = tsc_now - ((u64)info->timestamp[1] << 32 | info->timestamp[0]);
+ delta = tsc_now - timestamp;
return ipu6_buttress_tsc_ticks_to_ns(delta, isp);
}
-void ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib,
- struct ipu6_fw_isys_resp_info_abi *info)
+static void
+ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib, u64 time)
{
struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
@@ -709,8 +706,8 @@ void ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib,
u64 ns;
u32 sequence;
- ns = ktime_get_ns() - get_sof_ns_delta(av, info);
- sequence = get_sof_sequence_by_timestamp(stream, info);
+ ns = ktime_get_ns() - get_sof_ns_delta(av, time);
+ sequence = get_sof_sequence_by_timestamp(stream, time);
vbuf->vb2_buf.timestamp = ns;
vbuf->sequence = sequence;
@@ -721,7 +718,7 @@ void ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib,
vbuf->vb2_buf.timestamp);
}
-void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib)
+static void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib)
{
struct vb2_buffer *vb = ipu6_isys_buffer_to_vb2_buffer(ib);
@@ -737,10 +734,11 @@ void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib)
}
}
-void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
- struct ipu6_fw_isys_resp_info_abi *info)
+static void
+ipu6_stream_buf_ready(struct ipu6_isys_stream *stream, u8 pin_id, u32 pin_addr,
+ u64 time, bool error_check)
{
- struct ipu6_isys_queue *aq = stream->output_pins[info->pin_id].aq;
+ struct ipu6_isys_queue *aq = stream->output_pins_queue[pin_id];
struct ipu6_isys *isys = stream->isys;
struct device *dev = &isys->adev->auxdev.dev;
struct ipu6_isys_buffer *ib;
@@ -766,7 +764,7 @@ void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
ivb = vb2_buffer_to_ipu6_isys_video_buffer(vvb);
addr = ivb->dma_addr;
- if (info->pin.addr != addr) {
+ if (pin_addr != addr) {
if (first)
dev_err(dev, "Unexpected buffer address %pad\n",
&addr);
@@ -774,8 +772,7 @@ void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
continue;
}
- if (info->error_info.error ==
- IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO) {
+ if (error_check) {
/*
* Check for error message:
* 'IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO'
@@ -790,18 +787,27 @@ void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
list_del(&ib->head);
spin_unlock_irqrestore(&aq->lock, flags);
- ipu6_isys_buf_calc_sequence_time(ib, info);
+ ipu6_isys_buf_calc_sequence_time(ib, time);
ipu6_isys_queue_buf_done(ib);
return;
}
- dev_err(dev, "Failed to find a matching video buffer");
+ dev_err(dev, "Failed to find a matching video buffer\n");
spin_unlock_irqrestore(&aq->lock, flags);
}
+void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
+ struct ipu6_fw_isys_resp_info_abi *info)
+{
+ u64 time = (u64)info->timestamp[1] << 32 | info->timestamp[0];
+ bool err = info->error_info.error == IPU6_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO;
+
+ ipu6_stream_buf_ready(stream, info->pin_id, info->pin.addr, time, err);
+}
+
static const struct vb2_ops ipu6_isys_queue_ops = {
.queue_setup = ipu6_isys_queue_setup,
.buf_init = ipu6_isys_buf_init,
@@ -835,7 +841,6 @@ int ipu6_isys_queue_init(struct ipu6_isys_queue *aq)
if (ret)
return ret;
- aq->dev = &adev->auxdev.dev;
aq->vbq.dev = &adev->isp->pdev->dev;
spin_lock_init(&aq->lock);
INIT_LIST_HEAD(&aq->active);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
index fe8fc796a58f..844dfda15ab6 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.h
@@ -20,11 +20,7 @@ struct ipu6_isys_stream;
struct ipu6_isys_queue {
struct vb2_queue vbq;
struct list_head node;
- struct device *dev;
- /*
- * @lock: serialise access to queued and pre_streamon_queued
- */
- spinlock_t lock;
+ spinlock_t lock; /* Protects active and incoming lists */
struct list_head active;
struct list_head incoming;
unsigned int fw_output;
@@ -69,10 +65,6 @@ void
ipu6_isys_buf_to_fw_frame_buf(struct ipu6_fw_isys_frame_buff_set_abi *set,
struct ipu6_isys_stream *stream,
struct ipu6_isys_buffer_list *bl);
-void
-ipu6_isys_buf_calc_sequence_time(struct ipu6_isys_buffer *ib,
- struct ipu6_fw_isys_resp_info_abi *info);
-void ipu6_isys_queue_buf_done(struct ipu6_isys_buffer *ib);
void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
struct ipu6_fw_isys_resp_info_abi *info);
int ipu6_isys_queue_init(struct ipu6_isys_queue *aq);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h
index 9ef8d95464f5..268dfa01e903 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.h
@@ -37,10 +37,6 @@ int ipu6_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum
*code);
-int ipu6_isys_subdev_link_validate(struct v4l2_subdev *sd,
- struct media_link *link,
- struct v4l2_subdev_format *source_fmt,
- struct v4l2_subdev_format *sink_fmt);
u32 ipu6_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad);
int ipu6_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream,
struct v4l2_mbus_framefmt *format);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
index 959869a88556..24a2ef93474c 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
@@ -241,7 +241,7 @@ static void ipu6_isys_try_fmt_cap(struct ipu6_isys_video *av, u32 type,
else
*bytesperline = DIV_ROUND_UP(*width * pfmt->bpp, BITS_PER_BYTE);
- *bytesperline = ALIGN(*bytesperline, av->isys->line_align);
+ *bytesperline = ALIGN(*bytesperline, 64);
/*
* (height + 1) * bytesperline due to a hardware issue: the DMA unit
@@ -486,8 +486,7 @@ static int ipu6_isys_fw_pin_cfg(struct ipu6_isys_video *av,
output_pins = cfg->nof_output_pins++;
aq->fw_output = output_pins;
- stream->output_pins[output_pins].pin_ready = ipu6_isys_queue_buf_ready;
- stream->output_pins[output_pins].aq = aq;
+ stream->output_pins_queue[output_pins] = aq;
output_pin = &cfg->output_pins[output_pins];
output_pin->input_pin_id = input_pins;
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.h b/drivers/media/pci/intel/ipu6/ipu6-isys-video.h
index 1d945be2b879..1dd36f2a077e 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-video.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.h
@@ -37,12 +37,6 @@ struct sequence_info {
u64 timestamp;
};
-struct output_pin_data {
- void (*pin_ready)(struct ipu6_isys_stream *stream,
- struct ipu6_fw_isys_resp_info_abi *info);
- struct ipu6_isys_queue *aq;
-};
-
/*
* Align with firmware stream. Each stream represents a CSI virtual channel.
* May map to multiple video devices
@@ -68,7 +62,7 @@ struct ipu6_isys_stream {
struct completion stream_stop_completion;
struct ipu6_isys *isys;
- struct output_pin_data output_pins[IPU6_ISYS_OUTPUT_PINS];
+ struct ipu6_isys_queue *output_pins_queue[IPU6_ISYS_OUTPUT_PINS];
int error;
u8 vc;
};
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c
index 8df1d83a74b5..fc0ec0a4b8f5 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c
@@ -1089,7 +1089,6 @@ static int isys_probe(struct auxiliary_device *auxdev,
INIT_LIST_HEAD(&isys->framebuflist);
INIT_LIST_HEAD(&isys->framebuflist_fw);
- isys->line_align = IPU6_ISYS_2600_MEM_LINE_ALIGN;
isys->icache_prefetch = 0;
dev_set_drvdata(&auxdev->dev, isys);
@@ -1294,12 +1293,11 @@ static int isys_isr_one(struct ipu6_bus_device *adev)
*/
ipu6_put_fw_msg_buf(ipu6_bus_get_drvdata(adev), resp->buf_id);
if (resp->pin_id < IPU6_ISYS_OUTPUT_PINS &&
- stream->output_pins[resp->pin_id].pin_ready)
- stream->output_pins[resp->pin_id].pin_ready(stream,
- resp);
+ stream->output_pins_queue[resp->pin_id])
+ ipu6_isys_queue_buf_ready(stream, resp);
else
dev_warn(&adev->auxdev.dev,
- "%d:No data pin ready handler for pin id %d\n",
+ "%d:No queue for pin id %d\n",
resp->stream_handle, resp->pin_id);
if (csi2)
ipu6_isys_csi2_error(csi2);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.h b/drivers/media/pci/intel/ipu6/ipu6-isys.h
index 610b60e69152..f488e782c26e 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys.h
@@ -29,8 +29,6 @@ struct ipu6_bus_device;
IPU6_ISYS_UNISPART_IRQ_CSI0 | \
IPU6_ISYS_UNISPART_IRQ_CSI1)
-#define IPU6_ISYS_2600_MEM_LINE_ALIGN 64
-
/*
* Current message queue configuration. These must be big enough
* so that they never gets full. Queues are located in system memory
@@ -118,7 +116,6 @@ struct sensor_async_sd {
* @streams: streams per firmware stream ID
* @fwcom: fw communication layer private pointer
* or optional external library private pointer
- * @line_align: line alignment in memory
* @phy_termcal_val: the termination calibration value, only used for DWC PHY
* @need_reset: Isys requires d0i0->i3 transition
* @ref_count: total number of callers fw open
@@ -140,7 +137,6 @@ struct ipu6_isys {
struct ipu6_isys_stream streams[IPU6_ISYS_MAX_STREAMS];
int streams_ref_count[IPU6_ISYS_MAX_STREAMS];
void *fwcom;
- unsigned int line_align;
u32 phy_termcal_val;
bool need_reset;
bool icache_prefetch;
diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c
index 277af7cda8ee..1f4f20b9c94d 100644
--- a/drivers/media/pci/intel/ipu6/ipu6.c
+++ b/drivers/media/pci/intel/ipu6/ipu6.c
@@ -464,11 +464,6 @@ static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver)
{
int ret;
- /* disable IPU6 PCI ATS on mtl ES2 */
- if (is_ipu6ep_mtl(hw_ver) && boot_cpu_data.x86_stepping == 0x2 &&
- pci_ats_supported(dev))
- pci_disable_ats(dev);
-
/* No PCI msi capability for IPU6EP */
if (is_ipu6ep(hw_ver) || is_ipu6ep_mtl(hw_ver)) {
/* likely do nothing as msi not enabled by default */
@@ -525,11 +520,11 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
phys = pci_resource_start(pdev, IPU6_PCI_BAR);
dev_dbg(dev, "IPU6 PCI bar[%u] = %pa\n", IPU6_PCI_BAR, &phys);
- ret = pcim_iomap_regions(pdev, 1 << IPU6_PCI_BAR, pci_name(pdev));
- if (ret)
- return dev_err_probe(dev, ret, "Failed to I/O mem remapping\n");
+ isp->base = pcim_iomap_region(pdev, IPU6_PCI_BAR, IPU6_NAME);
+ if (IS_ERR(isp->base))
+ return dev_err_probe(dev, PTR_ERR(isp->base),
+ "Failed to I/O mem remapping\n");
- isp->base = pcim_iomap_table(pdev)[IPU6_PCI_BAR];
pci_set_drvdata(pdev, isp);
pci_set_master(pdev);
diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c
index b3b670b6ef70..748c14e87963 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.c
+++ b/drivers/media/pci/ivtv/ivtv-irq.c
@@ -1064,7 +1064,7 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
void ivtv_unfinished_dma(struct timer_list *t)
{
- struct ivtv *itv = from_timer(itv, t, dma_timer);
+ struct ivtv *itv = timer_container_of(itv, t, dma_timer);
if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
return;
diff --git a/drivers/media/pci/mgb4/mgb4_vin.c b/drivers/media/pci/mgb4/mgb4_vin.c
index 434eaf0440e2..989e93f67f75 100644
--- a/drivers/media/pci/mgb4/mgb4_vin.c
+++ b/drivers/media/pci/mgb4/mgb4_vin.c
@@ -641,7 +641,14 @@ static int vidioc_query_dv_timings(struct file *file, void *fh,
static int vidioc_enum_dv_timings(struct file *file, void *fh,
struct v4l2_enum_dv_timings *timings)
{
- return v4l2_enum_dv_timings_cap(timings, &video_timings_cap, NULL, NULL);
+ struct mgb4_vin_dev *vindev = video_drvdata(file);
+
+ if (timings->index != 0)
+ return -EINVAL;
+ if (get_timings(vindev, &timings->timings) < 0)
+ return -ENODATA;
+
+ return 0;
}
static int vidioc_dv_timings_cap(struct file *file, void *fh,
@@ -749,14 +756,14 @@ static void signal_change(struct work_struct *work)
u32 width = resolution >> 16;
u32 height = resolution & 0xFFFF;
- if (timings->width != width || timings->height != height) {
- static const struct v4l2_event ev = {
- .type = V4L2_EVENT_SOURCE_CHANGE,
- .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
- };
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
- v4l2_event_queue(&vindev->vdev, &ev);
+ v4l2_event_queue(&vindev->vdev, &ev);
+ if (timings->width != width || timings->height != height) {
if (vb2_is_streaming(&vindev->queue))
vb2_queue_error(&vindev->queue);
}
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 16338d13d9c8..9f2ac33cffa7 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -637,7 +637,7 @@ static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
static void netup_unidvb_dma_timeout(struct timer_list *t)
{
- struct netup_dma *dma = from_timer(dma, t, timeout);
+ struct netup_dma *dma = timer_container_of(dma, t, timeout);
struct netup_unidvb_dev *ndev = dma->ndev;
dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__);
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
index 246f73b8a9e7..c55aa782b72c 100644
--- a/drivers/media/pci/pt3/pt3.c
+++ b/drivers/media/pci/pt3/pt3.c
@@ -692,6 +692,7 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u8 rev;
u32 ver;
int i, ret;
+ void __iomem *iomem;
struct pt3_board *pt3;
struct i2c_adapter *i2c;
@@ -703,10 +704,6 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
pci_set_master(pdev);
- ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2), DRV_NAME);
- if (ret < 0)
- return ret;
-
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(&pdev->dev, "Failed to set DMA mask\n");
@@ -719,8 +716,16 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, pt3);
pt3->pdev = pdev;
mutex_init(&pt3->lock);
- pt3->regs[0] = pcim_iomap_table(pdev)[0];
- pt3->regs[1] = pcim_iomap_table(pdev)[2];
+
+ iomem = pcim_iomap_region(pdev, 0, DRV_NAME);
+ if (IS_ERR(iomem))
+ return PTR_ERR(iomem);
+ pt3->regs[0] = iomem;
+
+ iomem = pcim_iomap_region(pdev, 2, DRV_NAME);
+ if (IS_ERR(iomem))
+ return PTR_ERR(iomem);
+ pt3->regs[1] = iomem;
ver = ioread32(pt3->regs[0] + REG_VERSION);
if ((ver >> 16) != 0x0301) {
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 84295bdb8ce4..537aa65acdc8 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -328,7 +328,7 @@ void saa7134_buffer_next(struct saa7134_dev *dev,
void saa7134_buffer_timeout(struct timer_list *t)
{
- struct saa7134_dmaqueue *q = from_timer(q, t, timeout);
+ struct saa7134_dmaqueue *q = timer_container_of(q, t, timeout);
struct saa7134_dev *dev = q->dev;
unsigned long flags;
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index d7d97c7d4a2b..468dbe8d552f 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -431,7 +431,7 @@ void saa7134_input_irq(struct saa7134_dev *dev)
static void saa7134_input_timer(struct timer_list *t)
{
- struct saa7134_card_ir *ir = from_timer(ir, t, timer);
+ struct saa7134_card_ir *ir = timer_container_of(ir, t, timer);
struct saa7134_dev *dev = ir->dev->priv;
build_key(dev);
diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
index 6ec1480a6d18..febb2c156cf6 100644
--- a/drivers/media/pci/solo6x10/solo6x10-core.c
+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
@@ -477,10 +477,10 @@ static int solo_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_write_config_byte(pdev, 0x40, 0x00);
pci_write_config_byte(pdev, 0x41, 0x00);
- ret = pcim_iomap_regions(pdev, BIT(0), SOLO6X10_NAME);
+ solo_dev->reg_base = pcim_iomap_region(pdev, 0, SOLO6X10_NAME);
+ ret = PTR_ERR_OR_ZERO(solo_dev->reg_base);
if (ret)
goto fail_probe;
- solo_dev->reg_base = pcim_iomap_table(pdev)[0];
chip_id = solo_reg_read(solo_dev, SOLO_CHIP_OPTION) &
SOLO_CHIP_ID_MASK;
diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
deleted file mode 100644
index 118b922c08c3..000000000000
--- a/drivers/media/pci/sta2x11/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config STA2X11_VIP
- tristate "STA2X11 VIP Video For Linux"
- depends on PCI && VIDEO_DEV && I2C
- depends on STA2X11 || COMPILE_TEST
- select GPIOLIB if MEDIA_SUBDRV_AUTOSELECT
- select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
- select VIDEOBUF2_DMA_CONTIG
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- help
- Say Y for support for STA2X11 VIP (Video Input Port) capture
- device.
-
- To compile this driver as a module, choose M here: the
- module will be called sta2x11_vip.
diff --git a/drivers/media/pci/sta2x11/Makefile b/drivers/media/pci/sta2x11/Makefile
deleted file mode 100644
index bb684a7b6270..000000000000
--- a/drivers/media/pci/sta2x11/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_STA2X11_VIP) += sta2x11_vip.o
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
deleted file mode 100644
index 3049bad20f14..000000000000
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ /dev/null
@@ -1,1270 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * This is the driver for the STA2x11 Video Input Port.
- *
- * Copyright (C) 2012 ST Microelectronics
- * author: Federico Vaga <federico.vaga@gmail.com>
- * Copyright (C) 2010 WindRiver Systems, Inc.
- * authors: Andreas Kies <andreas.kies@windriver.com>
- * Vlad Lungu <vlad.lungu@windriver.com>
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/videodev2.h>
-#include <linux/kmod.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/gpio/consumer.h>
-#include <linux/gpio.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-event.h>
-#include <media/videobuf2-dma-contig.h>
-
-#include "sta2x11_vip.h"
-
-#define DRV_VERSION "1.3"
-
-#ifndef PCI_DEVICE_ID_STMICRO_VIP
-#define PCI_DEVICE_ID_STMICRO_VIP 0xCC0D
-#endif
-
-#define MAX_FRAMES 4
-
-/*Register offsets*/
-#define DVP_CTL 0x00
-#define DVP_TFO 0x04
-#define DVP_TFS 0x08
-#define DVP_BFO 0x0C
-#define DVP_BFS 0x10
-#define DVP_VTP 0x14
-#define DVP_VBP 0x18
-#define DVP_VMP 0x1C
-#define DVP_ITM 0x98
-#define DVP_ITS 0x9C
-#define DVP_STA 0xA0
-#define DVP_HLFLN 0xA8
-#define DVP_RGB 0xC0
-#define DVP_PKZ 0xF0
-
-/*Register fields*/
-#define DVP_CTL_ENA 0x00000001
-#define DVP_CTL_RST 0x80000000
-#define DVP_CTL_DIS (~0x00040001)
-
-#define DVP_IT_VSB 0x00000008
-#define DVP_IT_VST 0x00000010
-#define DVP_IT_FIFO 0x00000020
-
-#define DVP_HLFLN_SD 0x00000001
-
-#define SAVE_COUNT 8
-#define AUX_COUNT 3
-#define IRQ_COUNT 1
-
-
-struct vip_buffer {
- struct vb2_v4l2_buffer vb;
- struct list_head list;
- dma_addr_t dma;
-};
-static inline struct vip_buffer *to_vip_buffer(struct vb2_v4l2_buffer *vb2)
-{
- return container_of(vb2, struct vip_buffer, vb);
-}
-
-/**
- * struct sta2x11_vip - All internal data for one instance of device
- * @v4l2_dev: device registered in v4l layer
- * @video_dev: properties of our device
- * @pdev: PCI device
- * @adapter: contains I2C adapter information
- * @register_save_area: All relevant register are saved here during suspend
- * @decoder: contains information about video DAC
- * @ctrl_hdl: handler for control framework
- * @format: pixel format, fixed UYVY
- * @std: video standard (e.g. PAL/NTSC)
- * @input: input line for video signal ( 0 or 1 )
- * @disabled: Device is in power down state
- * @slock: for excluse access of registers
- * @vb_vidq: queue maintained by videobuf2 layer
- * @buffer_list: list of buffer in use
- * @sequence: sequence number of acquired buffer
- * @active: current active buffer
- * @lock: used in videobuf2 callback
- * @v4l_lock: serialize its video4linux ioctls
- * @tcount: Number of top frames
- * @bcount: Number of bottom frames
- * @overflow: Number of FIFO overflows
- * @iomem: hardware base address
- * @config: I2C and gpio config from platform
- *
- * All non-local data is accessed via this structure.
- */
-struct sta2x11_vip {
- struct v4l2_device v4l2_dev;
- struct video_device video_dev;
- struct pci_dev *pdev;
- struct i2c_adapter *adapter;
- unsigned int register_save_area[IRQ_COUNT + SAVE_COUNT + AUX_COUNT];
- struct v4l2_subdev *decoder;
- struct v4l2_ctrl_handler ctrl_hdl;
-
-
- struct v4l2_pix_format format;
- v4l2_std_id std;
- unsigned int input;
- int disabled;
- spinlock_t slock;
-
- struct vb2_queue vb_vidq;
- struct list_head buffer_list;
- unsigned int sequence;
- struct vip_buffer *active; /* current active buffer */
- spinlock_t lock; /* Used in videobuf2 callback */
- struct mutex v4l_lock;
-
- /* Interrupt counters */
- int tcount, bcount;
- int overflow;
-
- void __iomem *iomem; /* I/O Memory */
- struct vip_config *config;
-};
-
-static const unsigned int registers_to_save[AUX_COUNT] = {
- DVP_HLFLN, DVP_RGB, DVP_PKZ
-};
-
-static struct v4l2_pix_format formats_50[] = {
- { /*PAL interlaced */
- .width = 720,
- .height = 576,
- .pixelformat = V4L2_PIX_FMT_UYVY,
- .field = V4L2_FIELD_INTERLACED,
- .bytesperline = 720 * 2,
- .sizeimage = 720 * 2 * 576,
- .colorspace = V4L2_COLORSPACE_SMPTE170M},
- { /*PAL top */
- .width = 720,
- .height = 288,
- .pixelformat = V4L2_PIX_FMT_UYVY,
- .field = V4L2_FIELD_TOP,
- .bytesperline = 720 * 2,
- .sizeimage = 720 * 2 * 288,
- .colorspace = V4L2_COLORSPACE_SMPTE170M},
- { /*PAL bottom */
- .width = 720,
- .height = 288,
- .pixelformat = V4L2_PIX_FMT_UYVY,
- .field = V4L2_FIELD_BOTTOM,
- .bytesperline = 720 * 2,
- .sizeimage = 720 * 2 * 288,
- .colorspace = V4L2_COLORSPACE_SMPTE170M},
-
-};
-
-static struct v4l2_pix_format formats_60[] = {
- { /*NTSC interlaced */
- .width = 720,
- .height = 480,
- .pixelformat = V4L2_PIX_FMT_UYVY,
- .field = V4L2_FIELD_INTERLACED,
- .bytesperline = 720 * 2,
- .sizeimage = 720 * 2 * 480,
- .colorspace = V4L2_COLORSPACE_SMPTE170M},
- { /*NTSC top */
- .width = 720,
- .height = 240,
- .pixelformat = V4L2_PIX_FMT_UYVY,
- .field = V4L2_FIELD_TOP,
- .bytesperline = 720 * 2,
- .sizeimage = 720 * 2 * 240,
- .colorspace = V4L2_COLORSPACE_SMPTE170M},
- { /*NTSC bottom */
- .width = 720,
- .height = 240,
- .pixelformat = V4L2_PIX_FMT_UYVY,
- .field = V4L2_FIELD_BOTTOM,
- .bytesperline = 720 * 2,
- .sizeimage = 720 * 2 * 240,
- .colorspace = V4L2_COLORSPACE_SMPTE170M},
-};
-
-/* Write VIP register */
-static inline void reg_write(struct sta2x11_vip *vip, unsigned int reg, u32 val)
-{
- iowrite32((val), (vip->iomem)+(reg));
-}
-/* Read VIP register */
-static inline u32 reg_read(struct sta2x11_vip *vip, unsigned int reg)
-{
- return ioread32((vip->iomem)+(reg));
-}
-/* Start DMA acquisition */
-static void start_dma(struct sta2x11_vip *vip, struct vip_buffer *vip_buf)
-{
- unsigned long offset = 0;
-
- if (vip->format.field == V4L2_FIELD_INTERLACED)
- offset = vip->format.width * 2;
-
- spin_lock_irq(&vip->slock);
- /* Enable acquisition */
- reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) | DVP_CTL_ENA);
- /* Set Top and Bottom Field memory address */
- reg_write(vip, DVP_VTP, (u32)vip_buf->dma);
- reg_write(vip, DVP_VBP, (u32)vip_buf->dma + offset);
- spin_unlock_irq(&vip->slock);
-}
-
-/* Fetch the next buffer to activate */
-static void vip_active_buf_next(struct sta2x11_vip *vip)
-{
- /* Get the next buffer */
- spin_lock(&vip->lock);
- if (list_empty(&vip->buffer_list)) {/* No available buffer */
- spin_unlock(&vip->lock);
- return;
- }
- vip->active = list_first_entry(&vip->buffer_list,
- struct vip_buffer,
- list);
- /* Reset Top and Bottom counter */
- vip->tcount = 0;
- vip->bcount = 0;
- spin_unlock(&vip->lock);
- if (vb2_is_streaming(&vip->vb_vidq)) { /* streaming is on */
- start_dma(vip, vip->active); /* start dma capture */
- }
-}
-
-
-/* Videobuf2 Operations */
-static int queue_setup(struct vb2_queue *vq,
- unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], struct device *alloc_devs[])
-{
- struct sta2x11_vip *vip = vb2_get_drv_priv(vq);
-
- if (!(*nbuffers) || *nbuffers < MAX_FRAMES)
- *nbuffers = MAX_FRAMES;
-
- *nplanes = 1;
- sizes[0] = vip->format.sizeimage;
-
- vip->sequence = 0;
- vip->active = NULL;
- vip->tcount = 0;
- vip->bcount = 0;
-
- return 0;
-};
-static int buffer_init(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
-
- vip_buf->dma = vb2_dma_contig_plane_dma_addr(vb, 0);
- INIT_LIST_HEAD(&vip_buf->list);
- return 0;
-}
-
-static int buffer_prepare(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
- struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
- unsigned long size;
-
- size = vip->format.sizeimage;
- if (vb2_plane_size(vb, 0) < size) {
- v4l2_err(&vip->v4l2_dev, "buffer too small (%lu < %lu)\n",
- vb2_plane_size(vb, 0), size);
- return -EINVAL;
- }
-
- vb2_set_plane_payload(&vip_buf->vb.vb2_buf, 0, size);
-
- return 0;
-}
-static void buffer_queue(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
- struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
-
- spin_lock(&vip->lock);
- list_add_tail(&vip_buf->list, &vip->buffer_list);
- if (!vip->active) { /* No active buffer, active the first one */
- vip->active = list_first_entry(&vip->buffer_list,
- struct vip_buffer,
- list);
- if (vb2_is_streaming(&vip->vb_vidq)) /* streaming is on */
- start_dma(vip, vip_buf); /* start dma capture */
- }
- spin_unlock(&vip->lock);
-}
-static void buffer_finish(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct sta2x11_vip *vip = vb2_get_drv_priv(vb->vb2_queue);
- struct vip_buffer *vip_buf = to_vip_buffer(vbuf);
-
- /* Buffer handled, remove it from the list */
- spin_lock(&vip->lock);
- list_del_init(&vip_buf->list);
- spin_unlock(&vip->lock);
-
- if (vb2_is_streaming(vb->vb2_queue))
- vip_active_buf_next(vip);
-}
-
-static int start_streaming(struct vb2_queue *vq, unsigned int count)
-{
- struct sta2x11_vip *vip = vb2_get_drv_priv(vq);
-
- spin_lock_irq(&vip->slock);
- /* Enable interrupt VSYNC Top and Bottom*/
- reg_write(vip, DVP_ITM, DVP_IT_VSB | DVP_IT_VST);
- spin_unlock_irq(&vip->slock);
-
- if (count)
- start_dma(vip, vip->active);
-
- return 0;
-}
-
-/* abort streaming and wait for last buffer */
-static void stop_streaming(struct vb2_queue *vq)
-{
- struct sta2x11_vip *vip = vb2_get_drv_priv(vq);
- struct vip_buffer *vip_buf, *node;
-
- /* Disable acquisition */
- reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) & ~DVP_CTL_ENA);
- /* Disable all interrupts */
- reg_write(vip, DVP_ITM, 0);
-
- /* Release all active buffers */
- spin_lock(&vip->lock);
- list_for_each_entry_safe(vip_buf, node, &vip->buffer_list, list) {
- vb2_buffer_done(&vip_buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- list_del(&vip_buf->list);
- }
- spin_unlock(&vip->lock);
-}
-
-static const struct vb2_ops vip_video_qops = {
- .queue_setup = queue_setup,
- .buf_init = buffer_init,
- .buf_prepare = buffer_prepare,
- .buf_finish = buffer_finish,
- .buf_queue = buffer_queue,
- .start_streaming = start_streaming,
- .stop_streaming = stop_streaming,
-};
-
-
-/* File Operations */
-static const struct v4l2_file_operations vip_fops = {
- .owner = THIS_MODULE,
- .open = v4l2_fh_open,
- .release = vb2_fop_release,
- .unlocked_ioctl = video_ioctl2,
- .read = vb2_fop_read,
- .mmap = vb2_fop_mmap,
- .poll = vb2_fop_poll
-};
-
-
-/**
- * vidioc_querycap - return capabilities of device
- * @file: descriptor of device
- * @cap: contains return values
- * @priv: unused
- *
- * the capabilities of the device are returned
- *
- * return value: 0, no error.
- */
-static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
- strscpy(cap->card, KBUILD_MODNAME, sizeof(cap->card));
- return 0;
-}
-
-/**
- * vidioc_s_std - set video standard
- * @file: descriptor of device
- * @std: contains standard to be set
- * @priv: unused
- *
- * the video standard is set
- *
- * return value: 0, no error.
- *
- * -EIO, no input signal detected
- *
- * other, returned from video DAC.
- */
-static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id std)
-{
- struct sta2x11_vip *vip = video_drvdata(file);
-
- /*
- * This is here for backwards compatibility only.
- * The use of V4L2_STD_ALL to trigger a querystd is non-standard.
- */
- if (std == V4L2_STD_ALL) {
- v4l2_subdev_call(vip->decoder, video, querystd, &std);
- if (std == V4L2_STD_UNKNOWN)
- return -EIO;
- }
-
- if (vip->std != std) {
- vip->std = std;
- if (V4L2_STD_525_60 & std)
- vip->format = formats_60[0];
- else
- vip->format = formats_50[0];
- }
-
- return v4l2_subdev_call(vip->decoder, video, s_std, std);
-}
-
-/**
- * vidioc_g_std - get video standard
- * @file: descriptor of device
- * @priv: unused
- * @std: contains return values
- *
- * the current video standard is returned
- *
- * return value: 0, no error.
- */
-static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std)
-{
- struct sta2x11_vip *vip = video_drvdata(file);
-
- *std = vip->std;
- return 0;
-}
-
-/**
- * vidioc_querystd - get possible video standards
- * @file: descriptor of device
- * @priv: unused
- * @std: contains return values
- *
- * all possible video standards are returned
- *
- * return value: delivered by video DAC routine.
- */
-static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *std)
-{
- struct sta2x11_vip *vip = video_drvdata(file);
-
- return v4l2_subdev_call(vip->decoder, video, querystd, std);
-}
-
-static int vidioc_enum_input(struct file *file, void *priv,
- struct v4l2_input *inp)
-{
- if (inp->index > 1)
- return -EINVAL;
-
- inp->type = V4L2_INPUT_TYPE_CAMERA;
- inp->std = V4L2_STD_ALL;
- sprintf(inp->name, "Camera %u", inp->index);
-
- return 0;
-}
-
-/**
- * vidioc_s_input - set input line
- * @file: descriptor of device
- * @priv: unused
- * @i: new input line number
- *
- * the current active input line is set
- *
- * return value: 0, no error.
- *
- * -EINVAL, line number out of range
- */
-static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
-{
- struct sta2x11_vip *vip = video_drvdata(file);
- int ret;
-
- if (i > 1)
- return -EINVAL;
- ret = v4l2_subdev_call(vip->decoder, video, s_routing, i, 0, 0);
-
- if (!ret)
- vip->input = i;
-
- return 0;
-}
-
-/**
- * vidioc_g_input - return input line
- * @file: descriptor of device
- * @priv: unused
- * @i: returned input line number
- *
- * the current active input line is returned
- *
- * return value: always 0.
- */
-static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
-{
- struct sta2x11_vip *vip = video_drvdata(file);
-
- *i = vip->input;
- return 0;
-}
-
-/**
- * vidioc_enum_fmt_vid_cap - return video capture format
- * @file: descriptor of device
- * @priv: unused
- * @f: returned format information
- *
- * returns name and format of video capture
- * Only UYVY is supported by hardware.
- *
- * return value: always 0.
- */
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
-
- if (f->index != 0)
- return -EINVAL;
-
- f->pixelformat = V4L2_PIX_FMT_UYVY;
- return 0;
-}
-
-/**
- * vidioc_try_fmt_vid_cap - set video capture format
- * @file: descriptor of device
- * @priv: unused
- * @f: new format
- *
- * new video format is set which includes width and
- * field type. width is fixed to 720, no scaling.
- * Only UYVY is supported by this hardware.
- * the minimum height is 200, the maximum is 576 (PAL)
- *
- * return value: 0, no error
- *
- * -EINVAL, pixel or field format not supported
- *
- */
-static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct sta2x11_vip *vip = video_drvdata(file);
- int interlace_lim;
-
- if (V4L2_PIX_FMT_UYVY != f->fmt.pix.pixelformat) {
- v4l2_warn(&vip->v4l2_dev, "Invalid format, only UYVY supported\n");
- return -EINVAL;
- }
-
- if (V4L2_STD_525_60 & vip->std)
- interlace_lim = 240;
- else
- interlace_lim = 288;
-
- switch (f->fmt.pix.field) {
- default:
- case V4L2_FIELD_ANY:
- if (interlace_lim < f->fmt.pix.height)
- f->fmt.pix.field = V4L2_FIELD_INTERLACED;
- else
- f->fmt.pix.field = V4L2_FIELD_BOTTOM;
- break;
- case V4L2_FIELD_TOP:
- case V4L2_FIELD_BOTTOM:
- if (interlace_lim < f->fmt.pix.height)
- f->fmt.pix.height = interlace_lim;
- break;
- case V4L2_FIELD_INTERLACED:
- break;
- }
-
- /* It is the only supported format */
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
- f->fmt.pix.height &= ~1;
- if (2 * interlace_lim < f->fmt.pix.height)
- f->fmt.pix.height = 2 * interlace_lim;
- if (200 > f->fmt.pix.height)
- f->fmt.pix.height = 200;
- f->fmt.pix.width = 720;
- f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
- f->fmt.pix.sizeimage = f->fmt.pix.width * 2 * f->fmt.pix.height;
- f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- return 0;
-}
-
-/**
- * vidioc_s_fmt_vid_cap - set current video format parameters
- * @file: descriptor of device
- * @priv: unused
- * @f: returned format information
- *
- * set new capture format
- * return value: 0, no error
- *
- * other, delivered by video DAC routine.
- */
-static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct sta2x11_vip *vip = video_drvdata(file);
- unsigned int t_stop, b_stop, pitch;
- int ret;
-
- ret = vidioc_try_fmt_vid_cap(file, priv, f);
- if (ret)
- return ret;
-
- if (vb2_is_busy(&vip->vb_vidq)) {
- /* Can't change format during acquisition */
- v4l2_err(&vip->v4l2_dev, "device busy\n");
- return -EBUSY;
- }
- vip->format = f->fmt.pix;
- switch (vip->format.field) {
- case V4L2_FIELD_INTERLACED:
- t_stop = ((vip->format.height / 2 - 1) << 16) |
- (2 * vip->format.width - 1);
- b_stop = t_stop;
- pitch = 4 * vip->format.width;
- break;
- case V4L2_FIELD_TOP:
- t_stop = ((vip->format.height - 1) << 16) |
- (2 * vip->format.width - 1);
- b_stop = (0 << 16) | (2 * vip->format.width - 1);
- pitch = 2 * vip->format.width;
- break;
- case V4L2_FIELD_BOTTOM:
- t_stop = (0 << 16) | (2 * vip->format.width - 1);
- b_stop = (vip->format.height << 16) |
- (2 * vip->format.width - 1);
- pitch = 2 * vip->format.width;
- break;
- default:
- v4l2_err(&vip->v4l2_dev, "unknown field format\n");
- return -EINVAL;
- }
-
- spin_lock_irq(&vip->slock);
- /* Y-X Top Field Offset */
- reg_write(vip, DVP_TFO, 0);
- /* Y-X Bottom Field Offset */
- reg_write(vip, DVP_BFO, 0);
- /* Y-X Top Field Stop*/
- reg_write(vip, DVP_TFS, t_stop);
- /* Y-X Bottom Field Stop */
- reg_write(vip, DVP_BFS, b_stop);
- /* Video Memory Pitch */
- reg_write(vip, DVP_VMP, pitch);
- spin_unlock_irq(&vip->slock);
-
- return 0;
-}
-
-/**
- * vidioc_g_fmt_vid_cap - get current video format parameters
- * @file: descriptor of device
- * @priv: unused
- * @f: contains format information
- *
- * returns current video format parameters
- *
- * return value: 0, always successful
- */
-static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct sta2x11_vip *vip = video_drvdata(file);
-
- f->fmt.pix = vip->format;
-
- return 0;
-}
-
-static const struct v4l2_ioctl_ops vip_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- /* FMT handling */
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- /* Buffer handlers */
- .vidioc_create_bufs = vb2_ioctl_create_bufs,
- .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
- .vidioc_reqbufs = vb2_ioctl_reqbufs,
- .vidioc_querybuf = vb2_ioctl_querybuf,
- .vidioc_qbuf = vb2_ioctl_qbuf,
- .vidioc_dqbuf = vb2_ioctl_dqbuf,
- /* Stream on/off */
- .vidioc_streamon = vb2_ioctl_streamon,
- .vidioc_streamoff = vb2_ioctl_streamoff,
- /* Standard handling */
- .vidioc_g_std = vidioc_g_std,
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
- /* Input handling */
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- /* Log status ioctl */
- .vidioc_log_status = v4l2_ctrl_log_status,
- /* Event handling */
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-static const struct video_device video_dev_template = {
- .name = KBUILD_MODNAME,
- .release = video_device_release_empty,
- .fops = &vip_fops,
- .ioctl_ops = &vip_ioctl_ops,
- .tvnorms = V4L2_STD_ALL,
- .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING,
-};
-
-/**
- * vip_irq - interrupt routine
- * @irq: Number of interrupt ( not used, correct number is assumed )
- * @data: local data structure containing all information
- *
- * check for both frame interrupts set ( top and bottom ).
- * check FIFO overflow, but limit number of log messages after open.
- * signal a complete buffer if done
- *
- * return value: IRQ_NONE, interrupt was not generated by VIP
- *
- * IRQ_HANDLED, interrupt done.
- */
-static irqreturn_t vip_irq(int irq, void *data)
-{
- struct sta2x11_vip *vip = data;
- unsigned int status;
-
- status = reg_read(vip, DVP_ITS);
-
- if (!status) /* No interrupt to handle */
- return IRQ_NONE;
-
- if (status & DVP_IT_FIFO)
- if (vip->overflow++ > 5)
- pr_info("VIP: fifo overflow\n");
-
- if ((status & DVP_IT_VST) && (status & DVP_IT_VSB)) {
- /* this is bad, we are too slow, hope the condition is gone
- * on the next frame */
- return IRQ_HANDLED;
- }
-
- if (status & DVP_IT_VST)
- if ((++vip->tcount) < 2)
- return IRQ_HANDLED;
- if (status & DVP_IT_VSB) {
- vip->bcount++;
- return IRQ_HANDLED;
- }
-
- if (vip->active) { /* Acquisition is over on this buffer */
- /* Disable acquisition */
- reg_write(vip, DVP_CTL, reg_read(vip, DVP_CTL) & ~DVP_CTL_ENA);
- /* Remove the active buffer from the list */
- vip->active->vb.vb2_buf.timestamp = ktime_get_ns();
- vip->active->vb.sequence = vip->sequence++;
- vb2_buffer_done(&vip->active->vb.vb2_buf, VB2_BUF_STATE_DONE);
- }
-
- return IRQ_HANDLED;
-}
-
-static void sta2x11_vip_init_register(struct sta2x11_vip *vip)
-{
- /* Register initialization */
- spin_lock_irq(&vip->slock);
- /* Clean interrupt */
- reg_read(vip, DVP_ITS);
- /* Enable Half Line per vertical */
- reg_write(vip, DVP_HLFLN, DVP_HLFLN_SD);
- /* Reset VIP control */
- reg_write(vip, DVP_CTL, DVP_CTL_RST);
- /* Clear VIP control */
- reg_write(vip, DVP_CTL, 0);
- spin_unlock_irq(&vip->slock);
-}
-static void sta2x11_vip_clear_register(struct sta2x11_vip *vip)
-{
- spin_lock_irq(&vip->slock);
- /* Disable interrupt */
- reg_write(vip, DVP_ITM, 0);
- /* Reset VIP Control */
- reg_write(vip, DVP_CTL, DVP_CTL_RST);
- /* Clear VIP Control */
- reg_write(vip, DVP_CTL, 0);
- /* Clean VIP Interrupt */
- reg_read(vip, DVP_ITS);
- spin_unlock_irq(&vip->slock);
-}
-static int sta2x11_vip_init_buffer(struct sta2x11_vip *vip)
-{
- int err;
-
- err = dma_set_coherent_mask(&vip->pdev->dev, DMA_BIT_MASK(29));
- if (err) {
- v4l2_err(&vip->v4l2_dev, "Cannot configure coherent mask");
- return err;
- }
- memset(&vip->vb_vidq, 0, sizeof(struct vb2_queue));
- vip->vb_vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- vip->vb_vidq.io_modes = VB2_MMAP | VB2_READ;
- vip->vb_vidq.drv_priv = vip;
- vip->vb_vidq.buf_struct_size = sizeof(struct vip_buffer);
- vip->vb_vidq.ops = &vip_video_qops;
- vip->vb_vidq.mem_ops = &vb2_dma_contig_memops;
- vip->vb_vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- vip->vb_vidq.dev = &vip->pdev->dev;
- vip->vb_vidq.lock = &vip->v4l_lock;
- err = vb2_queue_init(&vip->vb_vidq);
- if (err)
- return err;
- INIT_LIST_HEAD(&vip->buffer_list);
- spin_lock_init(&vip->lock);
- return 0;
-}
-
-static int sta2x11_vip_init_controls(struct sta2x11_vip *vip)
-{
- /*
- * Inititialize an empty control so VIP can inerithing controls
- * from ADV7180
- */
- v4l2_ctrl_handler_init(&vip->ctrl_hdl, 0);
-
- vip->v4l2_dev.ctrl_handler = &vip->ctrl_hdl;
- if (vip->ctrl_hdl.error) {
- int err = vip->ctrl_hdl.error;
-
- v4l2_ctrl_handler_free(&vip->ctrl_hdl);
- return err;
- }
-
- return 0;
-}
-
-/**
- * vip_gpio_reserve - reserve gpio pin
- * @dev: device
- * @pin: GPIO pin number
- * @dir: direction, input or output
- * @name: GPIO pin name
- *
- */
-static int vip_gpio_reserve(struct device *dev, int pin, int dir,
- const char *name)
-{
- struct gpio_desc *desc = gpio_to_desc(pin);
- int ret = -ENODEV;
-
- if (!gpio_is_valid(pin))
- return ret;
-
- ret = gpio_request(pin, name);
- if (ret) {
- dev_err(dev, "Failed to allocate pin %d (%s)\n", pin, name);
- return ret;
- }
-
- ret = gpiod_direction_output(desc, dir);
- if (ret) {
- dev_err(dev, "Failed to set direction for pin %d (%s)\n",
- pin, name);
- gpio_free(pin);
- return ret;
- }
-
- ret = gpiod_export(desc, false);
- if (ret) {
- dev_err(dev, "Failed to export pin %d (%s)\n", pin, name);
- gpio_free(pin);
- return ret;
- }
-
- return 0;
-}
-
-/**
- * vip_gpio_release - release gpio pin
- * @dev: device
- * @pin: GPIO pin number
- * @name: GPIO pin name
- *
- */
-static void vip_gpio_release(struct device *dev, int pin, const char *name)
-{
- if (gpio_is_valid(pin)) {
- struct gpio_desc *desc = gpio_to_desc(pin);
-
- dev_dbg(dev, "releasing pin %d (%s)\n", pin, name);
- gpiod_unexport(desc);
- gpio_free(pin);
- }
-}
-
-/**
- * sta2x11_vip_init_one - init one instance of video device
- * @pdev: PCI device
- * @ent: (not used)
- *
- * allocate reset pins for DAC.
- * Reset video DAC, this is done via reset line.
- * allocate memory for managing device
- * request interrupt
- * map IO region
- * register device
- * find and initialize video DAC
- *
- * return value: 0, no error
- *
- * -ENOMEM, no memory
- *
- * -ENODEV, device could not be detected or registered
- */
-static int sta2x11_vip_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int ret;
- struct sta2x11_vip *vip;
- struct vip_config *config;
-
- /* Check if hardware support 26-bit DMA */
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(26))) {
- dev_err(&pdev->dev, "26-bit DMA addressing not available\n");
- return -EINVAL;
- }
- /* Enable PCI */
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
- /* Get VIP platform data */
- config = dev_get_platdata(&pdev->dev);
- if (!config) {
- dev_info(&pdev->dev, "VIP slot disabled\n");
- ret = -EINVAL;
- goto disable;
- }
-
- /* Power configuration */
- ret = vip_gpio_reserve(&pdev->dev, config->pwr_pin, 0,
- config->pwr_name);
- if (ret)
- goto disable;
-
- ret = vip_gpio_reserve(&pdev->dev, config->reset_pin, 0,
- config->reset_name);
- if (ret) {
- vip_gpio_release(&pdev->dev, config->pwr_pin,
- config->pwr_name);
- goto disable;
- }
-
- if (gpio_is_valid(config->pwr_pin)) {
- /* Datasheet says 5ms between PWR and RST */
- usleep_range(5000, 25000);
- gpio_direction_output(config->pwr_pin, 1);
- }
-
- if (gpio_is_valid(config->reset_pin)) {
- /* Datasheet says 5ms between PWR and RST */
- usleep_range(5000, 25000);
- gpio_direction_output(config->reset_pin, 1);
- }
- usleep_range(5000, 25000);
-
- /* Allocate a new VIP instance */
- vip = kzalloc(sizeof(struct sta2x11_vip), GFP_KERNEL);
- if (!vip) {
- ret = -ENOMEM;
- goto release_gpios;
- }
- vip->pdev = pdev;
- vip->std = V4L2_STD_PAL;
- vip->format = formats_50[0];
- vip->config = config;
- mutex_init(&vip->v4l_lock);
-
- ret = sta2x11_vip_init_controls(vip);
- if (ret)
- goto free_mem;
- ret = v4l2_device_register(&pdev->dev, &vip->v4l2_dev);
- if (ret)
- goto free_mem;
-
- dev_dbg(&pdev->dev, "BAR #0 at 0x%lx 0x%lx irq %d\n",
- (unsigned long)pci_resource_start(pdev, 0),
- (unsigned long)pci_resource_len(pdev, 0), pdev->irq);
-
- pci_set_master(pdev);
-
- ret = pci_request_regions(pdev, KBUILD_MODNAME);
- if (ret)
- goto unreg;
-
- vip->iomem = pci_iomap(pdev, 0, 0x100);
- if (!vip->iomem) {
- ret = -ENOMEM;
- goto release;
- }
-
- pci_enable_msi(pdev);
-
- /* Initialize buffer */
- ret = sta2x11_vip_init_buffer(vip);
- if (ret)
- goto unmap;
-
- spin_lock_init(&vip->slock);
-
- ret = request_irq(pdev->irq, vip_irq, IRQF_SHARED, KBUILD_MODNAME, vip);
- if (ret) {
- dev_err(&pdev->dev, "request_irq failed\n");
- ret = -ENODEV;
- goto release_buf;
- }
-
- /* Initialize and register video device */
- vip->video_dev = video_dev_template;
- vip->video_dev.v4l2_dev = &vip->v4l2_dev;
- vip->video_dev.queue = &vip->vb_vidq;
- vip->video_dev.lock = &vip->v4l_lock;
- video_set_drvdata(&vip->video_dev, vip);
-
- ret = video_register_device(&vip->video_dev, VFL_TYPE_VIDEO, -1);
- if (ret)
- goto vrelease;
-
- /* Get ADV7180 subdevice */
- vip->adapter = i2c_get_adapter(vip->config->i2c_id);
- if (!vip->adapter) {
- ret = -ENODEV;
- dev_err(&pdev->dev, "no I2C adapter found\n");
- goto vunreg;
- }
-
- vip->decoder = v4l2_i2c_new_subdev(&vip->v4l2_dev, vip->adapter,
- "adv7180", vip->config->i2c_addr,
- NULL);
- if (!vip->decoder) {
- ret = -ENODEV;
- dev_err(&pdev->dev, "no decoder found\n");
- goto vunreg;
- }
-
- i2c_put_adapter(vip->adapter);
- v4l2_subdev_call(vip->decoder, core, init, 0);
-
- sta2x11_vip_init_register(vip);
-
- dev_info(&pdev->dev, "STA2X11 Video Input Port (VIP) loaded\n");
- return 0;
-
-vunreg:
- video_set_drvdata(&vip->video_dev, NULL);
-vrelease:
- vb2_video_unregister_device(&vip->video_dev);
- free_irq(pdev->irq, vip);
-release_buf:
- pci_disable_msi(pdev);
-unmap:
- pci_iounmap(pdev, vip->iomem);
-release:
- pci_release_regions(pdev);
-unreg:
- v4l2_device_unregister(&vip->v4l2_dev);
-free_mem:
- kfree(vip);
-release_gpios:
- vip_gpio_release(&pdev->dev, config->reset_pin, config->reset_name);
- vip_gpio_release(&pdev->dev, config->pwr_pin, config->pwr_name);
-disable:
- /*
- * do not call pci_disable_device on sta2x11 because it break all
- * other Bus masters on this EP
- */
- return ret;
-}
-
-/**
- * sta2x11_vip_remove_one - release device
- * @pdev: PCI device
- *
- * Undo everything done in .._init_one
- *
- * unregister video device
- * free interrupt
- * unmap ioadresses
- * free memory
- * free GPIO pins
- */
-static void sta2x11_vip_remove_one(struct pci_dev *pdev)
-{
- struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev);
- struct sta2x11_vip *vip =
- container_of(v4l2_dev, struct sta2x11_vip, v4l2_dev);
-
- sta2x11_vip_clear_register(vip);
-
- video_set_drvdata(&vip->video_dev, NULL);
- vb2_video_unregister_device(&vip->video_dev);
- free_irq(pdev->irq, vip);
- pci_disable_msi(pdev);
- pci_iounmap(pdev, vip->iomem);
- pci_release_regions(pdev);
-
- v4l2_device_unregister(&vip->v4l2_dev);
-
- vip_gpio_release(&pdev->dev, vip->config->pwr_pin,
- vip->config->pwr_name);
- vip_gpio_release(&pdev->dev, vip->config->reset_pin,
- vip->config->reset_name);
-
- kfree(vip);
- /*
- * do not call pci_disable_device on sta2x11 because it break all
- * other Bus masters on this EP
- */
-}
-
-/**
- * sta2x11_vip_suspend - set device into power save mode
- * @dev_d: PCI device
- *
- * all relevant registers are saved and an attempt to set a new state is made.
- *
- * return value: 0 always indicate success,
- * even if device could not be disabled. (workaround for hardware problem)
- */
-static int __maybe_unused sta2x11_vip_suspend(struct device *dev_d)
-{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(dev_d);
- struct sta2x11_vip *vip =
- container_of(v4l2_dev, struct sta2x11_vip, v4l2_dev);
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&vip->slock, flags);
- vip->register_save_area[0] = reg_read(vip, DVP_CTL);
- reg_write(vip, DVP_CTL, vip->register_save_area[0] & DVP_CTL_DIS);
- vip->register_save_area[SAVE_COUNT] = reg_read(vip, DVP_ITM);
- reg_write(vip, DVP_ITM, 0);
- for (i = 1; i < SAVE_COUNT; i++)
- vip->register_save_area[i] = reg_read(vip, 4 * i);
- for (i = 0; i < AUX_COUNT; i++)
- vip->register_save_area[SAVE_COUNT + IRQ_COUNT + i] =
- reg_read(vip, registers_to_save[i]);
- spin_unlock_irqrestore(&vip->slock, flags);
-
- vip->disabled = 1;
-
- pr_info("VIP: suspend\n");
- return 0;
-}
-
-/**
- * sta2x11_vip_resume - resume device operation
- * @dev_d : PCI device
- *
- * return value: 0, no error.
- *
- * other, could not set device to power on state.
- */
-static int __maybe_unused sta2x11_vip_resume(struct device *dev_d)
-{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(dev_d);
- struct sta2x11_vip *vip =
- container_of(v4l2_dev, struct sta2x11_vip, v4l2_dev);
- unsigned long flags;
- int i;
-
- pr_info("VIP: resume\n");
-
- vip->disabled = 0;
-
- spin_lock_irqsave(&vip->slock, flags);
- for (i = 1; i < SAVE_COUNT; i++)
- reg_write(vip, 4 * i, vip->register_save_area[i]);
- for (i = 0; i < AUX_COUNT; i++)
- reg_write(vip, registers_to_save[i],
- vip->register_save_area[SAVE_COUNT + IRQ_COUNT + i]);
- reg_write(vip, DVP_CTL, vip->register_save_area[0]);
- reg_write(vip, DVP_ITM, vip->register_save_area[SAVE_COUNT]);
- spin_unlock_irqrestore(&vip->slock, flags);
- return 0;
-}
-
-static const struct pci_device_id sta2x11_vip_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_VIP)},
- {0,}
-};
-
-static SIMPLE_DEV_PM_OPS(sta2x11_vip_pm_ops,
- sta2x11_vip_suspend,
- sta2x11_vip_resume);
-
-static struct pci_driver sta2x11_vip_driver = {
- .name = KBUILD_MODNAME,
- .probe = sta2x11_vip_init_one,
- .remove = sta2x11_vip_remove_one,
- .id_table = sta2x11_vip_pci_tbl,
- .driver.pm = &sta2x11_vip_pm_ops,
-};
-
-static int __init sta2x11_vip_init_module(void)
-{
- return pci_register_driver(&sta2x11_vip_driver);
-}
-
-static void __exit sta2x11_vip_exit_module(void)
-{
- pci_unregister_driver(&sta2x11_vip_driver);
-}
-
-#ifdef MODULE
-module_init(sta2x11_vip_init_module);
-module_exit(sta2x11_vip_exit_module);
-#else
-late_initcall_sync(sta2x11_vip_init_module);
-#endif
-
-MODULE_DESCRIPTION("STA2X11 Video Input Port driver");
-MODULE_AUTHOR("Wind River");
-MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
-MODULE_DEVICE_TABLE(pci, sta2x11_vip_pci_tbl);
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.h b/drivers/media/pci/sta2x11/sta2x11_vip.h
deleted file mode 100644
index de6000e7943e..000000000000
--- a/drivers/media/pci/sta2x11/sta2x11_vip.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2011 Wind River Systems, Inc.
- *
- * Author: Anders Wallin <anders.wallin@windriver.com>
- */
-
-#ifndef __STA2X11_VIP_H
-#define __STA2X11_VIP_H
-
-/**
- * struct vip_config - video input configuration data
- * @pwr_name: ADV powerdown name
- * @pwr_pin: ADV powerdown pin
- * @reset_name: ADV reset name
- * @reset_pin: ADV reset pin
- * @i2c_id: ADV i2c adapter ID
- * @i2c_addr: ADV i2c address
- */
-struct vip_config {
- const char *pwr_name;
- int pwr_pin;
- const char *reset_name;
- int reset_pin;
- int i2c_id;
- int i2c_addr;
-};
-
-#endif /* __STA2X11_VIP_H */
diff --git a/drivers/media/pci/tw5864/tw5864-core.c b/drivers/media/pci/tw5864/tw5864-core.c
index 4d33caf83307..832788603f88 100644
--- a/drivers/media/pci/tw5864/tw5864-core.c
+++ b/drivers/media/pci/tw5864/tw5864-core.c
@@ -24,6 +24,8 @@
#include "tw5864.h"
#include "tw5864-reg.h"
+#define DRIVER_NAME "tw5864"
+
MODULE_DESCRIPTION("V4L2 driver module for tw5864-based multimedia capture & encoding devices");
MODULE_AUTHOR("Bluecherry Maintainers <maintainers@bluecherrydvr.com>");
MODULE_AUTHOR("Andrey Utkin <andrey.utkin@corp.bluecherry.net>");
@@ -246,7 +248,8 @@ static int tw5864_initdev(struct pci_dev *pci_dev,
if (!dev)
return -ENOMEM;
- snprintf(dev->name, sizeof(dev->name), "tw5864:%s", pci_name(pci_dev));
+ snprintf(dev->name, sizeof(dev->name), "%s:%s", DRIVER_NAME,
+ pci_name(pci_dev));
err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
if (err)
@@ -269,12 +272,12 @@ static int tw5864_initdev(struct pci_dev *pci_dev,
}
/* get mmio */
- err = pcim_iomap_regions(pci_dev, BIT(0), dev->name);
+ dev->mmio = pcim_iomap_region(pci_dev, 0, DRIVER_NAME);
+ err = PTR_ERR_OR_ZERO(dev->mmio);
if (err) {
dev_err(&dev->pci->dev, "Cannot request regions for MMIO\n");
goto unreg_v4l2;
}
- dev->mmio = pcim_iomap_table(pci_dev)[0];
spin_lock_init(&dev->slock);
@@ -290,7 +293,7 @@ static int tw5864_initdev(struct pci_dev *pci_dev,
/* get irq */
err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw5864_isr,
- IRQF_SHARED, "tw5864", dev);
+ IRQF_SHARED, DRIVER_NAME, dev);
if (err < 0) {
dev_err(&dev->pci->dev, "can't get IRQ %d\n", pci_dev->irq);
goto fini_video;
@@ -324,7 +327,7 @@ static void tw5864_finidev(struct pci_dev *pci_dev)
}
static struct pci_driver tw5864_pci_driver = {
- .name = "tw5864",
+ .name = DRIVER_NAME,
.id_table = tw5864_pci_tbl,
.probe = tw5864_initdev,
.remove = tw5864_finidev,
diff --git a/drivers/media/pci/tw686x/tw686x-core.c b/drivers/media/pci/tw686x/tw686x-core.c
index 80bd268926cc..f39e0e34deb6 100644
--- a/drivers/media/pci/tw686x/tw686x-core.c
+++ b/drivers/media/pci/tw686x/tw686x-core.c
@@ -125,7 +125,7 @@ void tw686x_enable_channel(struct tw686x_dev *dev, unsigned int channel)
*/
static void tw686x_dma_delay(struct timer_list *t)
{
- struct tw686x_dev *dev = from_timer(dev, t, dma_delay_timer);
+ struct tw686x_dev *dev = timer_container_of(dev, t, dma_delay_timer);
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
index 3975fc1b2ee3..e31f9f19a48a 100644
--- a/drivers/media/pci/zoran/zoran_card.c
+++ b/drivers/media/pci/zoran/zoran_card.c
@@ -1202,7 +1202,7 @@ static int zoran_debugfs_show(struct seq_file *seq, void *v)
seq_printf(seq, "JPG ver_dcm %u\n", zr->jpg_settings.ver_dcm);
seq_printf(seq, "JPG tmp_dcm %u\n", zr->jpg_settings.tmp_dcm);
seq_printf(seq, "JPG odd_even %u\n", zr->jpg_settings.odd_even);
- seq_printf(seq, "JPG crop %dx%d %d %d\n",
+ seq_printf(seq, "JPG crop (%d,%d)/%dx%d\n",
zr->jpg_settings.img_x,
zr->jpg_settings.img_y,
zr->jpg_settings.img_width,
diff --git a/drivers/media/pci/zoran/zr36016.c b/drivers/media/pci/zoran/zr36016.c
index 4b328ad6083f..d2e136c48a1b 100644
--- a/drivers/media/pci/zoran/zr36016.c
+++ b/drivers/media/pci/zoran/zr36016.c
@@ -216,7 +216,7 @@ static int zr36016_set_video(struct videocodec *codec, const struct tvnorm *norm
struct zr36016 *ptr = (struct zr36016 *)codec->data;
struct zoran *zr = videocodec_to_zoran(codec);
- zrdev_dbg(zr, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) call\n",
+ zrdev_dbg(zr, "%s: set_video %d.%d, (%u,%u)/%ux%u (0x%x) call\n",
ptr->name, norm->h_start, norm->v_start,
cap->x, cap->y, cap->width, cap->height,
cap->decimation);
diff --git a/drivers/media/pci/zoran/zr36050.c b/drivers/media/pci/zoran/zr36050.c
index b07d7e5c1b4a..c17965073557 100644
--- a/drivers/media/pci/zoran/zr36050.c
+++ b/drivers/media/pci/zoran/zr36050.c
@@ -547,7 +547,7 @@ static int zr36050_set_video(struct videocodec *codec, const struct tvnorm *norm
struct zoran *zr = videocodec_to_zoran(codec);
int size;
- zrdev_dbg(zr, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) q%d call\n",
+ zrdev_dbg(zr, "%s: set_video %d.%d, (%u,%u)/%ux%u (0x%x) q%d call\n",
ptr->name, norm->h_start, norm->v_start,
cap->x, cap->y, cap->width, cap->height,
cap->decimation, cap->quality);
diff --git a/drivers/media/pci/zoran/zr36060.c b/drivers/media/pci/zoran/zr36060.c
index 75fd167603dc..d6c12efc5bb6 100644
--- a/drivers/media/pci/zoran/zr36060.c
+++ b/drivers/media/pci/zoran/zr36060.c
@@ -488,7 +488,7 @@ static int zr36060_set_video(struct videocodec *codec, const struct tvnorm *norm
u32 reg;
int size;
- zrdev_dbg(zr, "%s: set_video %d/%d-%dx%d (%%%d) call\n", ptr->name,
+ zrdev_dbg(zr, "%s: set_video (%u,%u)/%ux%u (%%%d) call\n", ptr->name,
cap->x, cap->y, cap->width, cap->height, cap->decimation);
/* if () return -EINVAL;
diff --git a/drivers/media/platform/amlogic/Kconfig b/drivers/media/platform/amlogic/Kconfig
index 5014957404e9..458acf3d5fa8 100644
--- a/drivers/media/platform/amlogic/Kconfig
+++ b/drivers/media/platform/amlogic/Kconfig
@@ -2,4 +2,5 @@
comment "Amlogic media platform drivers"
+source "drivers/media/platform/amlogic/c3/Kconfig"
source "drivers/media/platform/amlogic/meson-ge2d/Kconfig"
diff --git a/drivers/media/platform/amlogic/Makefile b/drivers/media/platform/amlogic/Makefile
index d3cdb8fa4ddb..c744afcd1b9e 100644
--- a/drivers/media/platform/amlogic/Makefile
+++ b/drivers/media/platform/amlogic/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
+
+obj-y += c3/
obj-y += meson-ge2d/
diff --git a/drivers/media/platform/amlogic/c3/Kconfig b/drivers/media/platform/amlogic/c3/Kconfig
new file mode 100644
index 000000000000..d355d3a9358d
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+source "drivers/media/platform/amlogic/c3/isp/Kconfig"
+source "drivers/media/platform/amlogic/c3/mipi-adapter/Kconfig"
+source "drivers/media/platform/amlogic/c3/mipi-csi2/Kconfig"
diff --git a/drivers/media/platform/amlogic/c3/Makefile b/drivers/media/platform/amlogic/c3/Makefile
new file mode 100644
index 000000000000..14f305a493d2
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-y += isp/
+obj-y += mipi-adapter/
+obj-y += mipi-csi2/
diff --git a/drivers/media/platform/amlogic/c3/isp/Kconfig b/drivers/media/platform/amlogic/c3/isp/Kconfig
new file mode 100644
index 000000000000..02c62a50a5e8
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config VIDEO_C3_ISP
+ tristate "Amlogic C3 Image Signal Processor (ISP) driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on VIDEO_DEV
+ depends on OF
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ help
+ Video4Linux2 driver for Amlogic C3 ISP pipeline.
+ The C3 ISP is used for processing raw images and
+ outputing results to memory.
+
+ To compile this driver as a module choose m here.
diff --git a/drivers/media/platform/amlogic/c3/isp/Makefile b/drivers/media/platform/amlogic/c3/isp/Makefile
new file mode 100644
index 000000000000..b1b064170b57
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+c3-isp-objs := c3-isp-dev.o \
+ c3-isp-params.o \
+ c3-isp-stats.o \
+ c3-isp-capture.o \
+ c3-isp-core.o \
+ c3-isp-resizer.o
+
+obj-$(CONFIG_VIDEO_C3_ISP) += c3-isp.o
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c b/drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c
new file mode 100644
index 000000000000..11d85f5342f0
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c
@@ -0,0 +1,804 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/cleanup.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "c3-isp-common.h"
+#include "c3-isp-regs.h"
+
+#define C3_ISP_WRMIFX3_REG(addr, id) ((addr) + (id) * 0x100)
+
+static const struct c3_isp_cap_format_info cap_formats[] = {
+ /* YUV formats */
+ {
+ .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_Y_ONLY,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X1,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_8BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_VU,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_8BIT,
+ .hdiv = 1,
+ .vdiv = 1,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_YUV420,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X2,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_8BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_UV,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_8BIT,
+ .hdiv = 2,
+ .vdiv = 2,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
+ .fourcc = V4L2_PIX_FMT_NV21M,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_YUV420,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X2,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_8BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_VU,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_8BIT,
+ .hdiv = 2,
+ .vdiv = 2,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
+ .fourcc = V4L2_PIX_FMT_NV16M,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_YUV422,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X2,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_8BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_UV,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_8BIT,
+ .hdiv = 1,
+ .vdiv = 2
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
+ .fourcc = V4L2_PIX_FMT_NV61M,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_YUV422,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X2,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_8BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_VU,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_8BIT,
+ .hdiv = 1,
+ .vdiv = 2,
+ },
+ /* RAW formats */
+ {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB16_1X16,
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_RAW,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X1,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_16BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_VU,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_16BIT,
+ .hdiv = 1,
+ .vdiv = 1,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR16_1X16,
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_RAW,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X1,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_16BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_VU,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_16BIT,
+ .hdiv = 1,
+ .vdiv = 1,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG16_1X16,
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_RAW,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X1,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_16BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_VU,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_16BIT,
+ .hdiv = 1,
+ .vdiv = 1,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG16_1X16,
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .format = ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_RAW,
+ .planes = ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X1,
+ .ch0_pix_bits = ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_16BITS,
+ .uv_swap = ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_VU,
+ .in_bits = ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_16BIT,
+ .hdiv = 1,
+ .vdiv = 1,
+ },
+};
+
+/* Hardware configuration */
+
+/* Set the address of wrmifx3(write memory interface) */
+static void c3_isp_cap_wrmifx3_buff(struct c3_isp_capture *cap)
+{
+ dma_addr_t y_dma_addr;
+ dma_addr_t uv_dma_addr;
+
+ if (cap->buff) {
+ y_dma_addr = cap->buff->dma_addr[C3_ISP_PLANE_Y];
+ uv_dma_addr = cap->buff->dma_addr[C3_ISP_PLANE_UV];
+ } else {
+ y_dma_addr = cap->dummy_buff.dma_addr;
+ uv_dma_addr = cap->dummy_buff.dma_addr;
+ }
+
+ c3_isp_write(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_CH0_BADDR, cap->id),
+ ISP_WRMIFX3_0_CH0_BASE_ADDR(y_dma_addr));
+
+ c3_isp_write(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_CH1_BADDR, cap->id),
+ ISP_WRMIFX3_0_CH1_BASE_ADDR(uv_dma_addr));
+}
+
+static void c3_isp_cap_wrmifx3_format(struct c3_isp_capture *cap)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &cap->format.pix_mp;
+ const struct c3_isp_cap_format_info *info = cap->format.info;
+ u32 stride;
+ u32 chrom_h;
+ u32 chrom_v;
+
+ c3_isp_write(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_FMT_SIZE, cap->id),
+ ISP_WRMIFX3_0_FMT_SIZE_HSIZE(pix_mp->width) |
+ ISP_WRMIFX3_0_FMT_SIZE_VSIZE(pix_mp->height));
+
+ c3_isp_update_bits(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_FMT_CTRL, cap->id),
+ ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_MASK, info->format);
+
+ c3_isp_update_bits(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_FMT_CTRL, cap->id),
+ ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_MASK,
+ info->in_bits);
+
+ c3_isp_update_bits(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_FMT_CTRL, cap->id),
+ ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_MASK, info->planes);
+
+ c3_isp_update_bits(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_FMT_CTRL, cap->id),
+ ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_MASK,
+ info->uv_swap);
+
+ stride = DIV_ROUND_UP(pix_mp->plane_fmt[C3_ISP_PLANE_Y].bytesperline,
+ C3_ISP_DMA_SIZE_ALIGN_BYTES);
+ c3_isp_update_bits(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_CH0_CTRL0, cap->id),
+ ISP_WRMIFX3_0_CH0_CTRL0_STRIDE_MASK,
+ ISP_WRMIFX3_0_CH0_CTRL0_STRIDE(stride));
+
+ c3_isp_update_bits(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_CH0_CTRL1, cap->id),
+ ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_MODE_MASK,
+ info->ch0_pix_bits);
+
+ c3_isp_write(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_WIN_LUMA_H, cap->id),
+ ISP_WRMIFX3_0_WIN_LUMA_H_LUMA_HEND(pix_mp->width));
+
+ c3_isp_write(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_WIN_LUMA_V, cap->id),
+ ISP_WRMIFX3_0_WIN_LUMA_V_LUMA_VEND(pix_mp->height));
+
+ stride = DIV_ROUND_UP(pix_mp->plane_fmt[C3_ISP_PLANE_UV].bytesperline,
+ C3_ISP_DMA_SIZE_ALIGN_BYTES);
+ c3_isp_update_bits(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_CH1_CTRL0, cap->id),
+ ISP_WRMIFX3_0_CH1_CTRL0_STRIDE_MASK,
+ ISP_WRMIFX3_0_CH1_CTRL0_STRIDE(stride));
+
+ c3_isp_update_bits(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_CH1_CTRL1, cap->id),
+ ISP_WRMIFX3_0_CH1_CTRL1_PIX_BITS_MODE_MASK,
+ ISP_WRMIFX3_0_CH1_CTRL1_PIX_BITS_16BITS);
+
+ chrom_h = DIV_ROUND_UP(pix_mp->width, info->hdiv);
+ c3_isp_write(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_WIN_CHROM_H, cap->id),
+ ISP_WRMIFX3_0_WIN_CHROM_H_CHROM_HEND(chrom_h));
+
+ chrom_v = DIV_ROUND_UP(pix_mp->height, info->vdiv);
+ c3_isp_write(cap->isp,
+ C3_ISP_WRMIFX3_REG(ISP_WRMIFX3_0_WIN_CHROM_V, cap->id),
+ ISP_WRMIFX3_0_WIN_CHROM_V_CHROM_VEND(chrom_v));
+}
+
+static int c3_isp_cap_dummy_buff_create(struct c3_isp_capture *cap)
+{
+ struct c3_isp_dummy_buffer *dummy_buff = &cap->dummy_buff;
+ struct v4l2_pix_format_mplane *pix_mp = &cap->format.pix_mp;
+
+ if (pix_mp->num_planes == 1)
+ dummy_buff->size = pix_mp->plane_fmt[C3_ISP_PLANE_Y].sizeimage;
+ else
+ dummy_buff->size =
+ max(pix_mp->plane_fmt[C3_ISP_PLANE_Y].sizeimage,
+ pix_mp->plane_fmt[C3_ISP_PLANE_UV].sizeimage);
+
+ /* The driver never access vaddr, no mapping is required */
+ dummy_buff->vaddr = dma_alloc_attrs(cap->isp->dev, dummy_buff->size,
+ &dummy_buff->dma_addr, GFP_KERNEL,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ if (!dummy_buff->vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void c3_isp_cap_dummy_buff_destroy(struct c3_isp_capture *cap)
+{
+ dma_free_attrs(cap->isp->dev, cap->dummy_buff.size,
+ cap->dummy_buff.vaddr, cap->dummy_buff.dma_addr,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+}
+
+static void c3_isp_cap_cfg_buff(struct c3_isp_capture *cap)
+{
+ cap->buff = list_first_entry_or_null(&cap->pending,
+ struct c3_isp_cap_buffer, list);
+
+ c3_isp_cap_wrmifx3_buff(cap);
+
+ if (cap->buff)
+ list_del(&cap->buff->list);
+}
+
+static void c3_isp_cap_start(struct c3_isp_capture *cap)
+{
+ u32 mask;
+ u32 val;
+
+ scoped_guard(spinlock_irqsave, &cap->buff_lock)
+ c3_isp_cap_cfg_buff(cap);
+
+ c3_isp_cap_wrmifx3_format(cap);
+
+ if (cap->id == C3_ISP_CAP_DEV_0) {
+ mask = ISP_TOP_PATH_EN_WRMIF0_EN_MASK;
+ val = ISP_TOP_PATH_EN_WRMIF0_EN;
+ } else if (cap->id == C3_ISP_CAP_DEV_1) {
+ mask = ISP_TOP_PATH_EN_WRMIF1_EN_MASK;
+ val = ISP_TOP_PATH_EN_WRMIF1_EN;
+ } else {
+ mask = ISP_TOP_PATH_EN_WRMIF2_EN_MASK;
+ val = ISP_TOP_PATH_EN_WRMIF2_EN;
+ }
+
+ c3_isp_update_bits(cap->isp, ISP_TOP_PATH_EN, mask, val);
+}
+
+static void c3_isp_cap_stop(struct c3_isp_capture *cap)
+{
+ u32 mask;
+ u32 val;
+
+ if (cap->id == C3_ISP_CAP_DEV_0) {
+ mask = ISP_TOP_PATH_EN_WRMIF0_EN_MASK;
+ val = ISP_TOP_PATH_EN_WRMIF0_DIS;
+ } else if (cap->id == C3_ISP_CAP_DEV_1) {
+ mask = ISP_TOP_PATH_EN_WRMIF1_EN_MASK;
+ val = ISP_TOP_PATH_EN_WRMIF1_DIS;
+ } else {
+ mask = ISP_TOP_PATH_EN_WRMIF2_EN_MASK;
+ val = ISP_TOP_PATH_EN_WRMIF2_DIS;
+ }
+
+ c3_isp_update_bits(cap->isp, ISP_TOP_PATH_EN, mask, val);
+}
+
+static void c3_isp_cap_done(struct c3_isp_capture *cap)
+{
+ struct c3_isp_cap_buffer *buff = cap->buff;
+
+ guard(spinlock_irqsave)(&cap->buff_lock);
+
+ if (buff) {
+ buff->vb.sequence = cap->isp->frm_sequence;
+ buff->vb.vb2_buf.timestamp = ktime_get();
+ buff->vb.field = V4L2_FIELD_NONE;
+ vb2_buffer_done(&buff->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ }
+
+ c3_isp_cap_cfg_buff(cap);
+}
+
+/* V4L2 video operations */
+
+static const struct c3_isp_cap_format_info *c3_cap_find_fmt(u32 fourcc)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(cap_formats); i++) {
+ if (cap_formats[i].fourcc == fourcc)
+ return &cap_formats[i];
+ }
+
+ return NULL;
+}
+
+static void c3_cap_try_fmt(struct v4l2_pix_format_mplane *pix_mp)
+{
+ const struct c3_isp_cap_format_info *fmt;
+ const struct v4l2_format_info *info;
+ struct v4l2_plane_pix_format *plane;
+
+ fmt = c3_cap_find_fmt(pix_mp->pixelformat);
+ if (!fmt)
+ fmt = &cap_formats[0];
+
+ pix_mp->width = clamp(pix_mp->width, C3_ISP_MIN_WIDTH,
+ C3_ISP_MAX_WIDTH);
+ pix_mp->height = clamp(pix_mp->height, C3_ISP_MIN_HEIGHT,
+ C3_ISP_MAX_HEIGHT);
+ pix_mp->pixelformat = fmt->fourcc;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->colorspace = V4L2_COLORSPACE_SRGB;
+ pix_mp->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ pix_mp->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+
+ info = v4l2_format_info(fmt->fourcc);
+ pix_mp->num_planes = info->mem_planes;
+ memset(pix_mp->plane_fmt, 0, sizeof(pix_mp->plane_fmt));
+
+ for (unsigned int i = 0; i < info->comp_planes; i++) {
+ unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
+ unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
+
+ plane = &pix_mp->plane_fmt[i];
+
+ plane->bytesperline = DIV_ROUND_UP(pix_mp->width, hdiv) *
+ info->bpp[i] / info->bpp_div[i];
+ plane->bytesperline = ALIGN(plane->bytesperline,
+ C3_ISP_DMA_SIZE_ALIGN_BYTES);
+ plane->sizeimage = plane->bytesperline *
+ DIV_ROUND_UP(pix_mp->height, vdiv);
+ }
+}
+
+static void c3_isp_cap_return_buffers(struct c3_isp_capture *cap,
+ enum vb2_buffer_state state)
+{
+ struct c3_isp_cap_buffer *buff;
+
+ guard(spinlock_irqsave)(&cap->buff_lock);
+
+ if (cap->buff) {
+ vb2_buffer_done(&cap->buff->vb.vb2_buf, state);
+ cap->buff = NULL;
+ }
+
+ while (!list_empty(&cap->pending)) {
+ buff = list_first_entry(&cap->pending,
+ struct c3_isp_cap_buffer, list);
+ list_del(&buff->list);
+ vb2_buffer_done(&buff->vb.vb2_buf, state);
+ }
+}
+
+static int c3_isp_cap_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, C3_ISP_DRIVER_NAME, sizeof(cap->driver));
+ strscpy(cap->card, "AML C3 ISP", sizeof(cap->card));
+
+ return 0;
+}
+
+static int c3_isp_cap_enum_fmt(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ const struct c3_isp_cap_format_info *fmt;
+ unsigned int index = 0;
+ unsigned int i;
+
+ if (!f->mbus_code) {
+ if (f->index >= ARRAY_SIZE(cap_formats))
+ return -EINVAL;
+
+ fmt = &cap_formats[f->index];
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cap_formats); i++) {
+ fmt = &cap_formats[i];
+ if (f->mbus_code != fmt->mbus_code)
+ continue;
+
+ if (index++ == f->index) {
+ f->pixelformat = cap_formats[i].fourcc;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int c3_isp_cap_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct c3_isp_capture *cap = video_drvdata(file);
+
+ f->fmt.pix_mp = cap->format.pix_mp;
+
+ return 0;
+}
+
+static int c3_isp_cap_s_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct c3_isp_capture *cap = video_drvdata(file);
+
+ c3_cap_try_fmt(&f->fmt.pix_mp);
+
+ cap->format.pix_mp = f->fmt.pix_mp;
+ cap->format.info = c3_cap_find_fmt(f->fmt.pix_mp.pixelformat);
+
+ return 0;
+}
+
+static int c3_isp_cap_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ c3_cap_try_fmt(&f->fmt.pix_mp);
+
+ return 0;
+}
+
+static int c3_isp_cap_enum_frmsize(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ const struct c3_isp_cap_format_info *fmt;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ fmt = c3_cap_find_fmt(fsize->pixel_format);
+ if (!fmt)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = C3_ISP_MIN_WIDTH;
+ fsize->stepwise.min_height = C3_ISP_MIN_HEIGHT;
+ fsize->stepwise.max_width = C3_ISP_MAX_WIDTH;
+ fsize->stepwise.max_height = C3_ISP_MAX_HEIGHT;
+ fsize->stepwise.step_width = 2;
+ fsize->stepwise.step_height = 2;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops isp_cap_v4l2_ioctl_ops = {
+ .vidioc_querycap = c3_isp_cap_querycap,
+ .vidioc_enum_fmt_vid_cap = c3_isp_cap_enum_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = c3_isp_cap_g_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = c3_isp_cap_s_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = c3_isp_cap_try_fmt_mplane,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_enum_framesizes = c3_isp_cap_enum_frmsize,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations isp_cap_v4l2_fops = {
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+static int c3_isp_cap_link_validate(struct media_link *link)
+{
+ struct video_device *vdev =
+ media_entity_to_video_device(link->sink->entity);
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(link->source->entity);
+ struct c3_isp_capture *cap = video_get_drvdata(vdev);
+ struct v4l2_subdev_format src_fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = link->source->index,
+ };
+ int ret;
+
+ ret = v4l2_subdev_call_state_active(sd, pad, get_fmt, &src_fmt);
+ if (ret)
+ return ret;
+
+ if (src_fmt.format.width != cap->format.pix_mp.width ||
+ src_fmt.format.height != cap->format.pix_mp.height ||
+ src_fmt.format.code != cap->format.info->mbus_code) {
+ dev_err(cap->isp->dev,
+ "link %s: %u -> %s: %u not valid: 0x%04x/%ux%u not match 0x%04x/%ux%u\n",
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index,
+ src_fmt.format.code, src_fmt.format.width,
+ src_fmt.format.height, cap->format.info->mbus_code,
+ cap->format.pix_mp.width, cap->format.pix_mp.height);
+
+ return -EPIPE;
+ }
+
+ return 0;
+}
+
+static const struct media_entity_operations isp_cap_entity_ops = {
+ .link_validate = c3_isp_cap_link_validate,
+};
+
+static int c3_isp_vb2_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct c3_isp_capture *cap = vb2_get_drv_priv(q);
+ const struct v4l2_pix_format_mplane *pix_mp = &cap->format.pix_mp;
+ unsigned int i;
+
+ if (*num_planes) {
+ if (*num_planes != pix_mp->num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < pix_mp->num_planes; i++)
+ if (sizes[i] < pix_mp->plane_fmt[i].sizeimage)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *num_planes = pix_mp->num_planes;
+ for (i = 0; i < pix_mp->num_planes; i++)
+ sizes[i] = pix_mp->plane_fmt[i].sizeimage;
+
+ return 0;
+}
+
+static void c3_isp_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+ struct c3_isp_cap_buffer *buf =
+ container_of(v4l2_buf, struct c3_isp_cap_buffer, vb);
+ struct c3_isp_capture *cap = vb2_get_drv_priv(vb->vb2_queue);
+
+ guard(spinlock_irqsave)(&cap->buff_lock);
+
+ list_add_tail(&buf->list, &cap->pending);
+}
+
+static int c3_isp_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ struct c3_isp_capture *cap = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size;
+
+ for (unsigned int i = 0; i < cap->format.pix_mp.num_planes; i++) {
+ size = cap->format.pix_mp.plane_fmt[i].sizeimage;
+ if (vb2_plane_size(vb, i) < size) {
+ dev_err(cap->isp->dev,
+ "User buffer too small (%ld < %lu)\n",
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ return 0;
+}
+
+static int c3_isp_vb2_buf_init(struct vb2_buffer *vb)
+{
+ struct c3_isp_capture *cap = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+ struct c3_isp_cap_buffer *buf =
+ container_of(v4l2_buf, struct c3_isp_cap_buffer, vb);
+
+ for (unsigned int i = 0; i < cap->format.pix_mp.num_planes; i++)
+ buf->dma_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+
+ return 0;
+}
+
+static int c3_isp_vb2_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+{
+ struct c3_isp_capture *cap = vb2_get_drv_priv(q);
+ int ret;
+
+ ret = video_device_pipeline_start(&cap->vdev, &cap->isp->pipe);
+ if (ret) {
+ dev_err(cap->isp->dev,
+ "Failed to start cap%u pipeline: %d\n", cap->id, ret);
+ goto err_return_buffers;
+ }
+
+ ret = c3_isp_cap_dummy_buff_create(cap);
+ if (ret)
+ goto err_pipeline_stop;
+
+ ret = pm_runtime_resume_and_get(cap->isp->dev);
+ if (ret)
+ goto err_dummy_destroy;
+
+ c3_isp_cap_start(cap);
+
+ ret = v4l2_subdev_enable_streams(&cap->rsz->sd, C3_ISP_RSZ_PAD_SOURCE,
+ BIT(0));
+ if (ret)
+ goto err_pm_put;
+
+ return 0;
+
+err_pm_put:
+ pm_runtime_put(cap->isp->dev);
+err_dummy_destroy:
+ c3_isp_cap_dummy_buff_destroy(cap);
+err_pipeline_stop:
+ video_device_pipeline_stop(&cap->vdev);
+err_return_buffers:
+ c3_isp_cap_return_buffers(cap, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void c3_isp_vb2_stop_streaming(struct vb2_queue *q)
+{
+ struct c3_isp_capture *cap = vb2_get_drv_priv(q);
+
+ c3_isp_cap_stop(cap);
+
+ c3_isp_cap_return_buffers(cap, VB2_BUF_STATE_ERROR);
+
+ v4l2_subdev_disable_streams(&cap->rsz->sd, C3_ISP_RSZ_PAD_SOURCE,
+ BIT(0));
+
+ pm_runtime_put(cap->isp->dev);
+
+ c3_isp_cap_dummy_buff_destroy(cap);
+
+ video_device_pipeline_stop(&cap->vdev);
+}
+
+static const struct vb2_ops isp_video_vb2_ops = {
+ .queue_setup = c3_isp_vb2_queue_setup,
+ .buf_queue = c3_isp_vb2_buf_queue,
+ .buf_prepare = c3_isp_vb2_buf_prepare,
+ .buf_init = c3_isp_vb2_buf_init,
+ .start_streaming = c3_isp_vb2_start_streaming,
+ .stop_streaming = c3_isp_vb2_stop_streaming,
+};
+
+static int c3_isp_register_capture(struct c3_isp_capture *cap)
+{
+ struct video_device *vdev = &cap->vdev;
+ struct vb2_queue *vb2_q = &cap->vb2_q;
+ int ret;
+
+ snprintf(vdev->name, sizeof(vdev->name), "c3-isp-cap%u", cap->id);
+ vdev->fops = &isp_cap_v4l2_fops;
+ vdev->ioctl_ops = &isp_cap_v4l2_ioctl_ops;
+ vdev->v4l2_dev = &cap->isp->v4l2_dev;
+ vdev->entity.ops = &isp_cap_entity_ops;
+ vdev->lock = &cap->lock;
+ vdev->minor = -1;
+ vdev->queue = vb2_q;
+ vdev->release = video_device_release_empty;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_STREAMING;
+ vdev->vfl_dir = VFL_DIR_RX;
+ video_set_drvdata(vdev, cap);
+
+ vb2_q->drv_priv = cap;
+ vb2_q->mem_ops = &vb2_dma_contig_memops;
+ vb2_q->ops = &isp_video_vb2_ops;
+ vb2_q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ vb2_q->io_modes = VB2_DMABUF | VB2_MMAP;
+ vb2_q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vb2_q->buf_struct_size = sizeof(struct c3_isp_cap_buffer);
+ vb2_q->dev = cap->isp->dev;
+ vb2_q->lock = &cap->lock;
+
+ ret = vb2_queue_init(vb2_q);
+ if (ret)
+ goto err_destroy;
+
+ cap->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vdev->entity, 1, &cap->pad);
+ if (ret)
+ goto err_queue_release;
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(cap->isp->dev,
+ "Failed to register %s: %d\n", vdev->name, ret);
+ goto err_entity_cleanup;
+ }
+
+ return 0;
+
+err_entity_cleanup:
+ media_entity_cleanup(&vdev->entity);
+err_queue_release:
+ vb2_queue_release(vb2_q);
+err_destroy:
+ mutex_destroy(&cap->lock);
+ return ret;
+}
+
+int c3_isp_captures_register(struct c3_isp_device *isp)
+{
+ int ret;
+ unsigned int i;
+ struct c3_isp_capture *cap;
+
+ for (i = C3_ISP_CAP_DEV_0; i < C3_ISP_NUM_CAP_DEVS; i++) {
+ cap = &isp->caps[i];
+ memset(cap, 0, sizeof(*cap));
+
+ cap->format.pix_mp.width = C3_ISP_DEFAULT_WIDTH;
+ cap->format.pix_mp.height = C3_ISP_DEFAULT_HEIGHT;
+ cap->format.pix_mp.field = V4L2_FIELD_NONE;
+ cap->format.pix_mp.pixelformat = V4L2_PIX_FMT_NV12M;
+ cap->format.pix_mp.colorspace = V4L2_COLORSPACE_SRGB;
+ cap->format.info =
+ c3_cap_find_fmt(cap->format.pix_mp.pixelformat);
+
+ c3_cap_try_fmt(&cap->format.pix_mp);
+
+ cap->id = i;
+ cap->rsz = &isp->resizers[i];
+ cap->isp = isp;
+ INIT_LIST_HEAD(&cap->pending);
+ spin_lock_init(&cap->buff_lock);
+ mutex_init(&cap->lock);
+
+ ret = c3_isp_register_capture(cap);
+ if (ret) {
+ cap->isp = NULL;
+ mutex_destroy(&cap->lock);
+ c3_isp_captures_unregister(isp);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void c3_isp_captures_unregister(struct c3_isp_device *isp)
+{
+ unsigned int i;
+ struct c3_isp_capture *cap;
+
+ for (i = C3_ISP_CAP_DEV_0; i < C3_ISP_NUM_CAP_DEVS; i++) {
+ cap = &isp->caps[i];
+
+ if (!cap->isp)
+ continue;
+ vb2_queue_release(&cap->vb2_q);
+ media_entity_cleanup(&cap->vdev.entity);
+ video_unregister_device(&cap->vdev);
+ mutex_destroy(&cap->lock);
+ }
+}
+
+void c3_isp_captures_isr(struct c3_isp_device *isp)
+{
+ c3_isp_cap_done(&isp->caps[C3_ISP_CAP_DEV_0]);
+ c3_isp_cap_done(&isp->caps[C3_ISP_CAP_DEV_1]);
+ c3_isp_cap_done(&isp->caps[C3_ISP_CAP_DEV_2]);
+}
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-common.h b/drivers/media/platform/amlogic/c3/isp/c3-isp-common.h
new file mode 100644
index 000000000000..cb470802e61e
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-common.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#ifndef __C3_ISP_COMMON_H__
+#define __C3_ISP_COMMON_H__
+
+#include <linux/clk.h>
+
+#include <media/media-device.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-v4l2.h>
+
+#define C3_ISP_DRIVER_NAME "c3-isp"
+#define C3_ISP_CLOCK_NUM_MAX 3
+
+#define C3_ISP_DEFAULT_WIDTH 1920
+#define C3_ISP_DEFAULT_HEIGHT 1080
+#define C3_ISP_MAX_WIDTH 2888
+#define C3_ISP_MAX_HEIGHT 2240
+#define C3_ISP_MIN_WIDTH 160
+#define C3_ISP_MIN_HEIGHT 120
+
+#define C3_ISP_DMA_SIZE_ALIGN_BYTES 16
+
+enum c3_isp_core_pads {
+ C3_ISP_CORE_PAD_SINK_VIDEO,
+ C3_ISP_CORE_PAD_SINK_PARAMS,
+ C3_ISP_CORE_PAD_SOURCE_STATS,
+ C3_ISP_CORE_PAD_SOURCE_VIDEO_0,
+ C3_ISP_CORE_PAD_SOURCE_VIDEO_1,
+ C3_ISP_CORE_PAD_SOURCE_VIDEO_2,
+ C3_ISP_CORE_PAD_MAX
+};
+
+enum c3_isp_resizer_ids {
+ C3_ISP_RSZ_0,
+ C3_ISP_RSZ_1,
+ C3_ISP_RSZ_2,
+ C3_ISP_NUM_RSZ
+};
+
+enum c3_isp_resizer_pads {
+ C3_ISP_RSZ_PAD_SINK,
+ C3_ISP_RSZ_PAD_SOURCE,
+ C3_ISP_RSZ_PAD_MAX
+};
+
+enum c3_isp_cap_devs {
+ C3_ISP_CAP_DEV_0,
+ C3_ISP_CAP_DEV_1,
+ C3_ISP_CAP_DEV_2,
+ C3_ISP_NUM_CAP_DEVS
+};
+
+enum c3_isp_planes {
+ C3_ISP_PLANE_Y,
+ C3_ISP_PLANE_UV,
+ C3_ISP_NUM_PLANES
+};
+
+/*
+ * struct c3_isp_cap_format_info - The image format of capture device
+ *
+ * @mbus_code: the mbus code
+ * @fourcc: the pixel format
+ * @format: defines the output format of hardware
+ * @planes: defines the mutil plane of hardware
+ * @ch0_pix_bits: defines the channel 0 pixel bits mode of hardware
+ * @uv_swap: defines the uv swap flag of hardware
+ * @in_bits: defines the input bits of hardware
+ * @hdiv: horizontal chroma subsampling factor of hardware
+ * @vdiv: vertical chroma subsampling factor of hardware
+ */
+struct c3_isp_cap_format_info {
+ u32 mbus_code;
+ u32 fourcc;
+ u32 format;
+ u32 planes;
+ u32 ch0_pix_bits;
+ u8 uv_swap;
+ u8 in_bits;
+ u8 hdiv;
+ u8 vdiv;
+};
+
+/*
+ * struct c3_isp_cap_buffer - A container of vb2 buffer used by the video
+ * devices: capture video devices
+ *
+ * @vb: vb2 buffer
+ * @dma_addr: buffer physical address
+ * @list: entry of the buffer in the queue
+ */
+struct c3_isp_cap_buffer {
+ struct vb2_v4l2_buffer vb;
+ dma_addr_t dma_addr[C3_ISP_NUM_PLANES];
+ struct list_head list;
+};
+
+/*
+ * struct c3_isp_stats_dma_buffer - A container of vb2 buffer used by the video
+ * devices: stats video devices
+ *
+ * @vb: vb2 buffer
+ * @dma_addr: buffer physical address
+ * @list: entry of the buffer in the queue
+ */
+struct c3_isp_stats_buffer {
+ struct vb2_v4l2_buffer vb;
+ dma_addr_t dma_addr;
+ struct list_head list;
+};
+
+/*
+ * struct c3_isp_params_buffer - A container of vb2 buffer used by the
+ * params video device
+ *
+ * @vb: vb2 buffer
+ * @cfg: scratch buffer used for caching the ISP configuration parameters
+ * @list: entry of the buffer in the queue
+ */
+struct c3_isp_params_buffer {
+ struct vb2_v4l2_buffer vb;
+ void *cfg;
+ struct list_head list;
+};
+
+/*
+ * struct c3_isp_dummy_buffer - A buffer to write the next frame to in case
+ * there are no vb2 buffers available.
+ *
+ * @vaddr: return value of call to dma_alloc_attrs
+ * @dma_addr: dma address of the buffer
+ * @size: size of the buffer
+ */
+struct c3_isp_dummy_buffer {
+ void *vaddr;
+ dma_addr_t dma_addr;
+ u32 size;
+};
+
+/*
+ * struct c3_isp_core - ISP core subdev
+ *
+ * @sd: ISP sub-device
+ * @pads: ISP sub-device pads
+ * @src_pad: source sub-device pad
+ * @isp: pointer to c3_isp_device
+ */
+struct c3_isp_core {
+ struct v4l2_subdev sd;
+ struct media_pad pads[C3_ISP_CORE_PAD_MAX];
+ struct media_pad *src_pad;
+ struct c3_isp_device *isp;
+};
+
+/*
+ * struct c3_isp_resizer - ISP resizer subdev
+ *
+ * @id: resizer id
+ * @sd: resizer sub-device
+ * @pads: resizer sub-device pads
+ * @src_sd: source sub-device
+ * @isp: pointer to c3_isp_device
+ * @src_pad: the pad of source sub-device
+ */
+struct c3_isp_resizer {
+ enum c3_isp_resizer_ids id;
+ struct v4l2_subdev sd;
+ struct media_pad pads[C3_ISP_RSZ_PAD_MAX];
+ struct v4l2_subdev *src_sd;
+ struct c3_isp_device *isp;
+ u32 src_pad;
+};
+
+/*
+ * struct c3_isp_stats - ISP statistics device
+ *
+ * @vb2_q: vb2 buffer queue
+ * @vdev: video node
+ * @vfmt: v4l2_format of the metadata format
+ * @pad: media pad
+ * @lock: protects vb2_q, vdev
+ * @isp: pointer to c3_isp_device
+ * @buff: in use buffer
+ * @buff_lock: protects stats buffer
+ * @pending: stats buffer list head
+ */
+struct c3_isp_stats {
+ struct vb2_queue vb2_q;
+ struct video_device vdev;
+ struct v4l2_format vfmt;
+ struct media_pad pad;
+
+ struct mutex lock; /* Protects vb2_q, vdev */
+ struct c3_isp_device *isp;
+
+ struct c3_isp_stats_buffer *buff;
+ spinlock_t buff_lock; /* Protects stats buffer */
+ struct list_head pending;
+};
+
+/*
+ * struct c3_isp_params - ISP parameters device
+ *
+ * @vb2_q: vb2 buffer queue
+ * @vdev: video node
+ * @vfmt: v4l2_format of the metadata format
+ * @pad: media pad
+ * @lock: protects vb2_q, vdev
+ * @isp: pointer to c3_isp_device
+ * @buff: in use buffer
+ * @buff_lock: protects stats buffer
+ * @pending: stats buffer list head
+ */
+struct c3_isp_params {
+ struct vb2_queue vb2_q;
+ struct video_device vdev;
+ struct v4l2_format vfmt;
+ struct media_pad pad;
+
+ struct mutex lock; /* Protects vb2_q, vdev */
+ struct c3_isp_device *isp;
+
+ struct c3_isp_params_buffer *buff;
+ spinlock_t buff_lock; /* Protects params buffer */
+ struct list_head pending;
+};
+
+/*
+ * struct c3_isp_capture - ISP capture device
+ *
+ * @id: capture device ID
+ * @vb2_q: vb2 buffer queue
+ * @vdev: video node
+ * @pad: media pad
+ * @lock: protects vb2_q, vdev
+ * @isp: pointer to c3_isp_device
+ * @rsz: pointer to c3_isp_resizer
+ * @buff: in use buffer
+ * @buff_lock: protects capture buffer
+ * @pending: capture buffer list head
+ * @format.info: a pointer to the c3_isp_capture_format of the pixel format
+ * @format.fmt: buffer format
+ */
+struct c3_isp_capture {
+ enum c3_isp_cap_devs id;
+ struct vb2_queue vb2_q;
+ struct video_device vdev;
+ struct media_pad pad;
+
+ struct mutex lock; /* Protects vb2_q, vdev */
+ struct c3_isp_device *isp;
+ struct c3_isp_resizer *rsz;
+
+ struct c3_isp_dummy_buffer dummy_buff;
+ struct c3_isp_cap_buffer *buff;
+ spinlock_t buff_lock; /* Protects stream buffer */
+ struct list_head pending;
+ struct {
+ const struct c3_isp_cap_format_info *info;
+ struct v4l2_pix_format_mplane pix_mp;
+ } format;
+};
+
+/**
+ * struct c3_isp_info - ISP information
+ *
+ * @clocks: array of ISP clock names
+ * @clock_num: actual clock number
+ */
+struct c3_isp_info {
+ char *clocks[C3_ISP_CLOCK_NUM_MAX];
+ u32 clock_num;
+};
+
+/**
+ * struct c3_isp_device - ISP platform device
+ *
+ * @dev: pointer to the struct device
+ * @base: base register address
+ * @clks: array of clocks
+ * @notifier: notifier to register on the v4l2-async API
+ * @v4l2_dev: v4l2_device variable
+ * @media_dev: media device variable
+ * @pipe: media pipeline
+ * @core: ISP core subdev
+ * @resizers: ISP resizer subdev
+ * @stats: ISP stats device
+ * @params: ISP params device
+ * @caps: array of ISP capture device
+ * @frm_sequence: used to record frame id
+ * @info: version-specific ISP information
+ */
+struct c3_isp_device {
+ struct device *dev;
+ void __iomem *base;
+ struct clk_bulk_data clks[C3_ISP_CLOCK_NUM_MAX];
+
+ struct v4l2_async_notifier notifier;
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct media_pipeline pipe;
+
+ struct c3_isp_core core;
+ struct c3_isp_resizer resizers[C3_ISP_NUM_RSZ];
+ struct c3_isp_stats stats;
+ struct c3_isp_params params;
+ struct c3_isp_capture caps[C3_ISP_NUM_CAP_DEVS];
+
+ u32 frm_sequence;
+ const struct c3_isp_info *info;
+};
+
+u32 c3_isp_read(struct c3_isp_device *isp, u32 reg);
+void c3_isp_write(struct c3_isp_device *isp, u32 reg, u32 val);
+void c3_isp_update_bits(struct c3_isp_device *isp, u32 reg, u32 mask, u32 val);
+
+void c3_isp_core_queue_sof(struct c3_isp_device *isp);
+int c3_isp_core_register(struct c3_isp_device *isp);
+void c3_isp_core_unregister(struct c3_isp_device *isp);
+int c3_isp_resizers_register(struct c3_isp_device *isp);
+void c3_isp_resizers_unregister(struct c3_isp_device *isp);
+int c3_isp_captures_register(struct c3_isp_device *isp);
+void c3_isp_captures_unregister(struct c3_isp_device *isp);
+void c3_isp_captures_isr(struct c3_isp_device *isp);
+void c3_isp_stats_pre_cfg(struct c3_isp_device *isp);
+int c3_isp_stats_register(struct c3_isp_device *isp);
+void c3_isp_stats_unregister(struct c3_isp_device *isp);
+void c3_isp_stats_isr(struct c3_isp_device *isp);
+void c3_isp_params_pre_cfg(struct c3_isp_device *isp);
+int c3_isp_params_register(struct c3_isp_device *isp);
+void c3_isp_params_unregister(struct c3_isp_device *isp);
+void c3_isp_params_isr(struct c3_isp_device *isp);
+
+#endif
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-core.c b/drivers/media/platform/amlogic/c3/isp/c3-isp-core.c
new file mode 100644
index 000000000000..ff6413fff889
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-core.c
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/media/amlogic/c3-isp-config.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-event.h>
+
+#include "c3-isp-common.h"
+#include "c3-isp-regs.h"
+
+#define C3_ISP_CORE_SUBDEV_NAME "c3-isp-core"
+
+#define C3_ISP_PHASE_OFFSET_0 0
+#define C3_ISP_PHASE_OFFSET_1 1
+#define C3_ISP_PHASE_OFFSET_NONE 0xff
+
+#define C3_ISP_CORE_DEF_SINK_PAD_FMT MEDIA_BUS_FMT_SRGGB10_1X10
+#define C3_ISP_CORE_DEF_SRC_PAD_FMT MEDIA_BUS_FMT_YUV10_1X30
+
+/*
+ * struct c3_isp_core_format_info - ISP core format information
+ *
+ * @mbus_code: the mbus code
+ * @pads: bitmask detailing valid pads for this mbus_code
+ * @xofst: horizontal phase offset of hardware
+ * @yofst: vertical phase offset of hardware
+ * @is_raw: the raw format flag of mbus code
+ */
+struct c3_isp_core_format_info {
+ u32 mbus_code;
+ u32 pads;
+ u8 xofst;
+ u8 yofst;
+ bool is_raw;
+};
+
+static const struct c3_isp_core_format_info c3_isp_core_fmts[] = {
+ /* RAW formats */
+ {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .pads = BIT(C3_ISP_CORE_PAD_SINK_VIDEO),
+ .xofst = C3_ISP_PHASE_OFFSET_0,
+ .yofst = C3_ISP_PHASE_OFFSET_1,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .pads = BIT(C3_ISP_CORE_PAD_SINK_VIDEO),
+ .xofst = C3_ISP_PHASE_OFFSET_1,
+ .yofst = C3_ISP_PHASE_OFFSET_1,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .pads = BIT(C3_ISP_CORE_PAD_SINK_VIDEO),
+ .xofst = C3_ISP_PHASE_OFFSET_0,
+ .yofst = C3_ISP_PHASE_OFFSET_0,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .pads = BIT(C3_ISP_CORE_PAD_SINK_VIDEO),
+ .xofst = C3_ISP_PHASE_OFFSET_1,
+ .yofst = C3_ISP_PHASE_OFFSET_0,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .pads = BIT(C3_ISP_CORE_PAD_SINK_VIDEO),
+ .xofst = C3_ISP_PHASE_OFFSET_0,
+ .yofst = C3_ISP_PHASE_OFFSET_1,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .pads = BIT(C3_ISP_CORE_PAD_SINK_VIDEO),
+ .xofst = C3_ISP_PHASE_OFFSET_1,
+ .yofst = C3_ISP_PHASE_OFFSET_1,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .pads = BIT(C3_ISP_CORE_PAD_SINK_VIDEO),
+ .xofst = C3_ISP_PHASE_OFFSET_0,
+ .yofst = C3_ISP_PHASE_OFFSET_0,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .pads = BIT(C3_ISP_CORE_PAD_SINK_VIDEO),
+ .xofst = C3_ISP_PHASE_OFFSET_1,
+ .yofst = C3_ISP_PHASE_OFFSET_0,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB16_1X16,
+ .pads = BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_0)
+ | BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_1)
+ | BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_2),
+ .xofst = C3_ISP_PHASE_OFFSET_NONE,
+ .yofst = C3_ISP_PHASE_OFFSET_NONE,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR16_1X16,
+ .pads = BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_0)
+ | BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_1)
+ | BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_2),
+ .xofst = C3_ISP_PHASE_OFFSET_NONE,
+ .yofst = C3_ISP_PHASE_OFFSET_NONE,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG16_1X16,
+ .pads = BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_0)
+ | BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_1)
+ | BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_2),
+ .xofst = C3_ISP_PHASE_OFFSET_NONE,
+ .yofst = C3_ISP_PHASE_OFFSET_NONE,
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG16_1X16,
+ .pads = BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_0)
+ | BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_1)
+ | BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_2),
+ .xofst = C3_ISP_PHASE_OFFSET_NONE,
+ .yofst = C3_ISP_PHASE_OFFSET_NONE,
+ .is_raw = true,
+ },
+ /* YUV formats */
+ {
+ .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
+ .pads = BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_0) |
+ BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_1) |
+ BIT(C3_ISP_CORE_PAD_SOURCE_VIDEO_2),
+ .xofst = C3_ISP_PHASE_OFFSET_NONE,
+ .yofst = C3_ISP_PHASE_OFFSET_NONE,
+ .is_raw = false,
+ },
+};
+
+static const struct c3_isp_core_format_info
+*core_find_format_by_code(u32 code, u32 pad)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(c3_isp_core_fmts); i++) {
+ const struct c3_isp_core_format_info *info =
+ &c3_isp_core_fmts[i];
+
+ if (info->mbus_code == code && info->pads & BIT(pad))
+ return info;
+ }
+
+ return NULL;
+}
+
+static const struct c3_isp_core_format_info
+*core_find_format_by_index(u32 index, u32 pad)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(c3_isp_core_fmts); i++) {
+ const struct c3_isp_core_format_info *info =
+ &c3_isp_core_fmts[i];
+
+ if (!(info->pads & BIT(pad)))
+ continue;
+
+ if (!index)
+ return info;
+
+ index--;
+ }
+
+ return NULL;
+}
+
+static void c3_isp_core_enable(struct c3_isp_device *isp)
+{
+ c3_isp_update_bits(isp, ISP_TOP_IRQ_EN, ISP_TOP_IRQ_EN_FRM_END_MASK,
+ ISP_TOP_IRQ_EN_FRM_END_EN);
+ c3_isp_update_bits(isp, ISP_TOP_IRQ_EN, ISP_TOP_IRQ_EN_FRM_RST_MASK,
+ ISP_TOP_IRQ_EN_FRM_RST_EN);
+
+ /* Enable image data to ISP core */
+ c3_isp_update_bits(isp, ISP_TOP_PATH_SEL, ISP_TOP_PATH_SEL_CORE_MASK,
+ ISP_TOP_PATH_SEL_CORE_MIPI_CORE);
+}
+
+static void c3_isp_core_disable(struct c3_isp_device *isp)
+{
+ /* Disable image data to ISP core */
+ c3_isp_update_bits(isp, ISP_TOP_PATH_SEL, ISP_TOP_PATH_SEL_CORE_MASK,
+ ISP_TOP_PATH_SEL_CORE_CORE_DIS);
+
+ c3_isp_update_bits(isp, ISP_TOP_IRQ_EN, ISP_TOP_IRQ_EN_FRM_END_MASK,
+ ISP_TOP_IRQ_EN_FRM_END_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_IRQ_EN, ISP_TOP_IRQ_EN_FRM_RST_MASK,
+ ISP_TOP_IRQ_EN_FRM_RST_DIS);
+}
+
+/* Set the phase offset of blc, wb and lns */
+static void c3_isp_core_lswb_ofst(struct c3_isp_device *isp,
+ u8 xofst, u8 yofst)
+{
+ c3_isp_update_bits(isp, ISP_LSWB_BLC_PHSOFST,
+ ISP_LSWB_BLC_PHSOFST_HORIZ_OFST_MASK,
+ ISP_LSWB_BLC_PHSOFST_HORIZ_OFST(xofst));
+ c3_isp_update_bits(isp, ISP_LSWB_BLC_PHSOFST,
+ ISP_LSWB_BLC_PHSOFST_VERT_OFST_MASK,
+ ISP_LSWB_BLC_PHSOFST_VERT_OFST(yofst));
+
+ c3_isp_update_bits(isp, ISP_LSWB_WB_PHSOFST,
+ ISP_LSWB_WB_PHSOFST_HORIZ_OFST_MASK,
+ ISP_LSWB_WB_PHSOFST_HORIZ_OFST(xofst));
+ c3_isp_update_bits(isp, ISP_LSWB_WB_PHSOFST,
+ ISP_LSWB_WB_PHSOFST_VERT_OFST_MASK,
+ ISP_LSWB_WB_PHSOFST_VERT_OFST(yofst));
+
+ c3_isp_update_bits(isp, ISP_LSWB_LNS_PHSOFST,
+ ISP_LSWB_LNS_PHSOFST_HORIZ_OFST_MASK,
+ ISP_LSWB_LNS_PHSOFST_HORIZ_OFST(xofst));
+ c3_isp_update_bits(isp, ISP_LSWB_LNS_PHSOFST,
+ ISP_LSWB_LNS_PHSOFST_VERT_OFST_MASK,
+ ISP_LSWB_LNS_PHSOFST_VERT_OFST(yofst));
+}
+
+/* Set the phase offset of af, ae and awb */
+static void c3_isp_core_3a_ofst(struct c3_isp_device *isp,
+ u8 xofst, u8 yofst)
+{
+ c3_isp_update_bits(isp, ISP_AF_CTRL, ISP_AF_CTRL_HORIZ_OFST_MASK,
+ ISP_AF_CTRL_HORIZ_OFST(xofst));
+ c3_isp_update_bits(isp, ISP_AF_CTRL, ISP_AF_CTRL_VERT_OFST_MASK,
+ ISP_AF_CTRL_VERT_OFST(yofst));
+
+ c3_isp_update_bits(isp, ISP_AE_CTRL, ISP_AE_CTRL_HORIZ_OFST_MASK,
+ ISP_AE_CTRL_HORIZ_OFST(xofst));
+ c3_isp_update_bits(isp, ISP_AE_CTRL, ISP_AE_CTRL_VERT_OFST_MASK,
+ ISP_AE_CTRL_VERT_OFST(yofst));
+
+ c3_isp_update_bits(isp, ISP_AWB_CTRL, ISP_AWB_CTRL_HORIZ_OFST_MASK,
+ ISP_AWB_CTRL_HORIZ_OFST(xofst));
+ c3_isp_update_bits(isp, ISP_AWB_CTRL, ISP_AWB_CTRL_VERT_OFST_MASK,
+ ISP_AWB_CTRL_VERT_OFST(yofst));
+}
+
+/* Set the phase offset of demosaic */
+static void c3_isp_core_dms_ofst(struct c3_isp_device *isp,
+ u8 xofst, u8 yofst)
+{
+ c3_isp_update_bits(isp, ISP_DMS_COMMON_PARAM0,
+ ISP_DMS_COMMON_PARAM0_HORIZ_PHS_OFST_MASK,
+ ISP_DMS_COMMON_PARAM0_HORIZ_PHS_OFST(xofst));
+ c3_isp_update_bits(isp, ISP_DMS_COMMON_PARAM0,
+ ISP_DMS_COMMON_PARAM0_VERT_PHS_OFST_MASK,
+ ISP_DMS_COMMON_PARAM0_VERT_PHS_OFST(yofst));
+}
+
+static void c3_isp_core_cfg_format(struct c3_isp_device *isp,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *fmt;
+ const struct c3_isp_core_format_info *isp_fmt;
+
+ fmt = v4l2_subdev_state_get_format(state, C3_ISP_CORE_PAD_SINK_VIDEO);
+ isp_fmt = core_find_format_by_code(fmt->code,
+ C3_ISP_CORE_PAD_SINK_VIDEO);
+
+ c3_isp_write(isp, ISP_TOP_INPUT_SIZE,
+ ISP_TOP_INPUT_SIZE_HORIZ_SIZE(fmt->width) |
+ ISP_TOP_INPUT_SIZE_VERT_SIZE(fmt->height));
+ c3_isp_write(isp, ISP_TOP_FRM_SIZE,
+ ISP_TOP_FRM_SIZE_CORE_HORIZ_SIZE(fmt->width) |
+ ISP_TOP_FRM_SIZE_CORE_VERT_SIZE(fmt->height));
+
+ c3_isp_update_bits(isp, ISP_TOP_HOLD_SIZE,
+ ISP_TOP_HOLD_SIZE_CORE_HORIZ_SIZE_MASK,
+ ISP_TOP_HOLD_SIZE_CORE_HORIZ_SIZE(fmt->width));
+
+ c3_isp_write(isp, ISP_AF_HV_SIZE,
+ ISP_AF_HV_SIZE_GLB_WIN_XSIZE(fmt->width) |
+ ISP_AF_HV_SIZE_GLB_WIN_YSIZE(fmt->height));
+ c3_isp_write(isp, ISP_AE_HV_SIZE,
+ ISP_AE_HV_SIZE_HORIZ_SIZE(fmt->width) |
+ ISP_AE_HV_SIZE_VERT_SIZE(fmt->height));
+ c3_isp_write(isp, ISP_AWB_HV_SIZE,
+ ISP_AWB_HV_SIZE_HORIZ_SIZE(fmt->width) |
+ ISP_AWB_HV_SIZE_VERT_SIZE(fmt->height));
+
+ c3_isp_core_lswb_ofst(isp, isp_fmt->xofst, isp_fmt->yofst);
+ c3_isp_core_3a_ofst(isp, isp_fmt->xofst, isp_fmt->yofst);
+ c3_isp_core_dms_ofst(isp, isp_fmt->xofst, isp_fmt->yofst);
+}
+
+static bool c3_isp_core_streams_ready(struct c3_isp_core *core)
+{
+ unsigned int n_links = 0;
+ struct media_link *link;
+
+ for_each_media_entity_data_link(&core->sd.entity, link) {
+ if ((link->source->index == C3_ISP_CORE_PAD_SOURCE_VIDEO_0 ||
+ link->source->index == C3_ISP_CORE_PAD_SOURCE_VIDEO_1 ||
+ link->source->index == C3_ISP_CORE_PAD_SOURCE_VIDEO_2) &&
+ link->flags == MEDIA_LNK_FL_ENABLED)
+ n_links++;
+ }
+
+ return n_links == core->isp->pipe.start_count;
+}
+
+static int c3_isp_core_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct c3_isp_core *core = v4l2_get_subdevdata(sd);
+ struct media_pad *sink_pad;
+ struct v4l2_subdev *src_sd;
+ int ret;
+
+ if (!c3_isp_core_streams_ready(core))
+ return 0;
+
+ core->isp->frm_sequence = 0;
+ c3_isp_core_cfg_format(core->isp, state);
+ c3_isp_core_enable(core->isp);
+
+ sink_pad = &core->pads[C3_ISP_CORE_PAD_SINK_VIDEO];
+ core->src_pad = media_pad_remote_pad_unique(sink_pad);
+ if (IS_ERR(core->src_pad)) {
+ dev_dbg(core->isp->dev,
+ "Failed to get source pad for ISP core\n");
+ return -EPIPE;
+ }
+
+ src_sd = media_entity_to_v4l2_subdev(core->src_pad->entity);
+
+ ret = v4l2_subdev_enable_streams(src_sd, core->src_pad->index, BIT(0));
+ if (ret) {
+ c3_isp_core_disable(core->isp);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int c3_isp_core_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct c3_isp_core *core = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *src_sd;
+
+ if (core->isp->pipe.start_count != 1)
+ return 0;
+
+ if (core->src_pad) {
+ src_sd = media_entity_to_v4l2_subdev(core->src_pad->entity);
+ v4l2_subdev_disable_streams(src_sd, core->src_pad->index,
+ BIT(0));
+ }
+ core->src_pad = NULL;
+
+ c3_isp_core_disable(core->isp);
+
+ return 0;
+}
+
+static int c3_isp_core_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct c3_isp_core_format_info *info;
+
+ switch (code->pad) {
+ case C3_ISP_CORE_PAD_SINK_VIDEO:
+ case C3_ISP_CORE_PAD_SOURCE_VIDEO_0:
+ case C3_ISP_CORE_PAD_SOURCE_VIDEO_1:
+ case C3_ISP_CORE_PAD_SOURCE_VIDEO_2:
+ info = core_find_format_by_index(code->index, code->pad);
+ if (!info)
+ return -EINVAL;
+
+ code->code = info->mbus_code;
+
+ break;
+ case C3_ISP_CORE_PAD_SINK_PARAMS:
+ case C3_ISP_CORE_PAD_SOURCE_STATS:
+ if (code->index)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_METADATA_FIXED;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void c3_isp_core_set_sink_fmt(struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+ const struct c3_isp_core_format_info *isp_fmt;
+
+ sink_fmt = v4l2_subdev_state_get_format(state, format->pad);
+
+ isp_fmt = core_find_format_by_code(format->format.code, format->pad);
+ if (!isp_fmt)
+ sink_fmt->code = C3_ISP_CORE_DEF_SINK_PAD_FMT;
+ else
+ sink_fmt->code = format->format.code;
+
+ sink_fmt->width = clamp_t(u32, format->format.width,
+ C3_ISP_MIN_WIDTH, C3_ISP_MAX_WIDTH);
+ sink_fmt->height = clamp_t(u32, format->format.height,
+ C3_ISP_MIN_HEIGHT, C3_ISP_MAX_HEIGHT);
+ sink_fmt->field = V4L2_FIELD_NONE;
+ sink_fmt->colorspace = V4L2_COLORSPACE_RAW;
+ sink_fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ for (unsigned int i = C3_ISP_CORE_PAD_SOURCE_VIDEO_0;
+ i < C3_ISP_CORE_PAD_MAX; i++) {
+ src_fmt = v4l2_subdev_state_get_format(state, i);
+
+ src_fmt->width = sink_fmt->width;
+ src_fmt->height = sink_fmt->height;
+ }
+
+ format->format = *sink_fmt;
+}
+
+static void c3_isp_core_set_source_fmt(struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ const struct c3_isp_core_format_info *isp_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+ struct v4l2_mbus_framefmt *sink_fmt;
+
+ sink_fmt = v4l2_subdev_state_get_format(state,
+ C3_ISP_CORE_PAD_SINK_VIDEO);
+ src_fmt = v4l2_subdev_state_get_format(state, format->pad);
+
+ isp_fmt = core_find_format_by_code(format->format.code, format->pad);
+ if (!isp_fmt)
+ src_fmt->code = C3_ISP_CORE_DEF_SRC_PAD_FMT;
+ else
+ src_fmt->code = format->format.code;
+
+ src_fmt->width = sink_fmt->width;
+ src_fmt->height = sink_fmt->height;
+ src_fmt->field = V4L2_FIELD_NONE;
+ src_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+
+ if (isp_fmt && isp_fmt->is_raw) {
+ src_fmt->colorspace = V4L2_COLORSPACE_RAW;
+ src_fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ } else {
+ src_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ src_fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
+ src_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ }
+
+ format->format = *src_fmt;
+}
+
+static int c3_isp_core_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ if (format->pad == C3_ISP_CORE_PAD_SINK_VIDEO)
+ c3_isp_core_set_sink_fmt(state, format);
+ else if (format->pad == C3_ISP_CORE_PAD_SOURCE_VIDEO_0 ||
+ format->pad == C3_ISP_CORE_PAD_SOURCE_VIDEO_1 ||
+ format->pad == C3_ISP_CORE_PAD_SOURCE_VIDEO_2)
+ c3_isp_core_set_source_fmt(state, format);
+ else
+ format->format =
+ *v4l2_subdev_state_get_format(state, format->pad);
+
+ return 0;
+}
+
+static int c3_isp_core_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *fmt;
+
+ /* Video sink pad */
+ fmt = v4l2_subdev_state_get_format(state, C3_ISP_CORE_PAD_SINK_VIDEO);
+ fmt->width = C3_ISP_DEFAULT_WIDTH;
+ fmt->height = C3_ISP_DEFAULT_HEIGHT;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->code = C3_ISP_CORE_DEF_SINK_PAD_FMT;
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ /* Video source pad */
+ for (unsigned int i = C3_ISP_CORE_PAD_SOURCE_VIDEO_0;
+ i < C3_ISP_CORE_PAD_MAX; i++) {
+ fmt = v4l2_subdev_state_get_format(state, i);
+ fmt->width = C3_ISP_DEFAULT_WIDTH;
+ fmt->height = C3_ISP_DEFAULT_HEIGHT;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->code = C3_ISP_CORE_DEF_SRC_PAD_FMT;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ }
+
+ /* Parameters pad */
+ fmt = v4l2_subdev_state_get_format(state, C3_ISP_CORE_PAD_SINK_PARAMS);
+ fmt->width = 0;
+ fmt->height = 0;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->code = MEDIA_BUS_FMT_METADATA_FIXED;
+
+ /* Statistics pad */
+ fmt = v4l2_subdev_state_get_format(state, C3_ISP_CORE_PAD_SOURCE_STATS);
+ fmt->width = 0;
+ fmt->height = 0;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->code = MEDIA_BUS_FMT_METADATA_FIXED;
+
+ return 0;
+}
+
+static int c3_isp_core_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (sub->type != V4L2_EVENT_FRAME_SYNC)
+ return -EINVAL;
+
+ /* V4L2_EVENT_FRAME_SYNC doesn't need id, so should set 0 */
+ if (sub->id != 0)
+ return -EINVAL;
+
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+}
+
+static const struct v4l2_subdev_pad_ops c3_isp_core_pad_ops = {
+ .enum_mbus_code = c3_isp_core_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = c3_isp_core_set_fmt,
+ .enable_streams = c3_isp_core_enable_streams,
+ .disable_streams = c3_isp_core_disable_streams,
+};
+
+static const struct v4l2_subdev_core_ops c3_isp_core_core_ops = {
+ .subscribe_event = c3_isp_core_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops c3_isp_core_subdev_ops = {
+ .core = &c3_isp_core_core_ops,
+ .pad = &c3_isp_core_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops c3_isp_core_internal_ops = {
+ .init_state = c3_isp_core_init_state,
+};
+
+static int c3_isp_core_link_validate(struct media_link *link)
+{
+ if (link->sink->index == C3_ISP_CORE_PAD_SINK_PARAMS)
+ return 0;
+
+ return v4l2_subdev_link_validate(link);
+}
+
+/* Media entity operations */
+static const struct media_entity_operations c3_isp_core_entity_ops = {
+ .link_validate = c3_isp_core_link_validate,
+};
+
+void c3_isp_core_queue_sof(struct c3_isp_device *isp)
+{
+ struct v4l2_event event = {
+ .type = V4L2_EVENT_FRAME_SYNC,
+ };
+
+ event.u.frame_sync.frame_sequence = isp->frm_sequence;
+ v4l2_event_queue(isp->core.sd.devnode, &event);
+}
+
+int c3_isp_core_register(struct c3_isp_device *isp)
+{
+ struct c3_isp_core *core = &isp->core;
+ struct v4l2_subdev *sd = &core->sd;
+ int ret;
+
+ v4l2_subdev_init(sd, &c3_isp_core_subdev_ops);
+ sd->owner = THIS_MODULE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ sd->internal_ops = &c3_isp_core_internal_ops;
+ snprintf(sd->name, sizeof(sd->name), "%s", C3_ISP_CORE_SUBDEV_NAME);
+
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ sd->entity.ops = &c3_isp_core_entity_ops;
+
+ core->isp = isp;
+ sd->dev = isp->dev;
+ v4l2_set_subdevdata(sd, core);
+
+ core->pads[C3_ISP_CORE_PAD_SINK_VIDEO].flags = MEDIA_PAD_FL_SINK;
+ core->pads[C3_ISP_CORE_PAD_SINK_PARAMS].flags = MEDIA_PAD_FL_SINK;
+ core->pads[C3_ISP_CORE_PAD_SOURCE_STATS].flags = MEDIA_PAD_FL_SOURCE;
+ core->pads[C3_ISP_CORE_PAD_SOURCE_VIDEO_0].flags = MEDIA_PAD_FL_SOURCE;
+ core->pads[C3_ISP_CORE_PAD_SOURCE_VIDEO_1].flags = MEDIA_PAD_FL_SOURCE;
+ core->pads[C3_ISP_CORE_PAD_SOURCE_VIDEO_2].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&sd->entity, C3_ISP_CORE_PAD_MAX,
+ core->pads);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret)
+ goto err_entity_cleanup;
+
+ ret = v4l2_device_register_subdev(&isp->v4l2_dev, sd);
+ if (ret)
+ goto err_subdev_cleanup;
+
+ return 0;
+
+err_subdev_cleanup:
+ v4l2_subdev_cleanup(sd);
+err_entity_cleanup:
+ media_entity_cleanup(&sd->entity);
+ return ret;
+}
+
+void c3_isp_core_unregister(struct c3_isp_device *isp)
+{
+ struct c3_isp_core *core = &isp->core;
+ struct v4l2_subdev *sd = &core->sd;
+
+ v4l2_device_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+}
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-dev.c b/drivers/media/platform/amlogic/c3/isp/c3-isp-dev.c
new file mode 100644
index 000000000000..c3b779f63088
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-dev.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+
+#include "c3-isp-common.h"
+#include "c3-isp-regs.h"
+
+u32 c3_isp_read(struct c3_isp_device *isp, u32 reg)
+{
+ return readl(isp->base + reg);
+}
+
+void c3_isp_write(struct c3_isp_device *isp, u32 reg, u32 val)
+{
+ writel(val, isp->base + reg);
+}
+
+void c3_isp_update_bits(struct c3_isp_device *isp, u32 reg, u32 mask, u32 val)
+{
+ u32 orig, tmp;
+
+ orig = c3_isp_read(isp, reg);
+
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+
+ if (tmp != orig)
+ c3_isp_write(isp, reg, tmp);
+}
+
+/* PM runtime suspend */
+static int c3_isp_runtime_suspend(struct device *dev)
+{
+ struct c3_isp_device *isp = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(isp->info->clock_num, isp->clks);
+
+ return 0;
+}
+
+/* PM runtime resume */
+static int c3_isp_runtime_resume(struct device *dev)
+{
+ struct c3_isp_device *isp = dev_get_drvdata(dev);
+
+ return clk_bulk_prepare_enable(isp->info->clock_num, isp->clks);
+}
+
+static const struct dev_pm_ops c3_isp_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ RUNTIME_PM_OPS(c3_isp_runtime_suspend,
+ c3_isp_runtime_resume, NULL)
+};
+
+/* IRQ handling */
+static irqreturn_t c3_isp_irq_handler(int irq, void *dev)
+{
+ struct c3_isp_device *isp = dev;
+ u32 status;
+
+ /* Get irq status and clear irq status */
+ status = c3_isp_read(isp, ISP_TOP_RO_IRQ_STAT);
+ c3_isp_write(isp, ISP_TOP_IRQ_CLR, status);
+
+ if (status & ISP_TOP_RO_IRQ_STAT_FRM_END_MASK) {
+ c3_isp_stats_isr(isp);
+ c3_isp_params_isr(isp);
+ c3_isp_captures_isr(isp);
+ isp->frm_sequence++;
+ }
+
+ if (status & ISP_TOP_RO_IRQ_STAT_FRM_RST_MASK)
+ c3_isp_core_queue_sof(isp);
+
+ return IRQ_HANDLED;
+}
+
+/* Subdev notifier register */
+static int c3_isp_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asc)
+{
+ struct c3_isp_device *isp =
+ container_of(notifier, struct c3_isp_device, notifier);
+ struct media_pad *sink =
+ &isp->core.sd.entity.pads[C3_ISP_CORE_PAD_SINK_VIDEO];
+
+ return v4l2_create_fwnode_links_to_pad(sd, sink, MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static int c3_isp_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct c3_isp_device *isp =
+ container_of(notifier, struct c3_isp_device, notifier);
+
+ return v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
+}
+
+static const struct v4l2_async_notifier_operations c3_isp_notify_ops = {
+ .bound = c3_isp_notify_bound,
+ .complete = c3_isp_notify_complete,
+};
+
+static int c3_isp_async_nf_register(struct c3_isp_device *isp)
+{
+ struct v4l2_async_connection *asc;
+ struct fwnode_handle *ep;
+ int ret;
+
+ v4l2_async_nf_init(&isp->notifier, &isp->v4l2_dev);
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(isp->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ return -ENOTCONN;
+
+ asc = v4l2_async_nf_add_fwnode_remote(&isp->notifier, ep,
+ struct v4l2_async_connection);
+ fwnode_handle_put(ep);
+
+ if (IS_ERR(asc))
+ return PTR_ERR(asc);
+
+ isp->notifier.ops = &c3_isp_notify_ops;
+ ret = v4l2_async_nf_register(&isp->notifier);
+ if (ret)
+ v4l2_async_nf_cleanup(&isp->notifier);
+
+ return ret;
+}
+
+static void c3_isp_async_nf_unregister(struct c3_isp_device *isp)
+{
+ v4l2_async_nf_unregister(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
+}
+
+static int c3_isp_media_register(struct c3_isp_device *isp)
+{
+ struct media_device *media_dev = &isp->media_dev;
+ struct v4l2_device *v4l2_dev = &isp->v4l2_dev;
+ int ret;
+
+ /* Initialize media device */
+ strscpy(media_dev->model, C3_ISP_DRIVER_NAME, sizeof(media_dev->model));
+ media_dev->dev = isp->dev;
+
+ media_device_init(media_dev);
+
+ /* Initialize v4l2 device */
+ v4l2_dev->mdev = media_dev;
+ strscpy(v4l2_dev->name, C3_ISP_DRIVER_NAME, sizeof(v4l2_dev->name));
+
+ ret = v4l2_device_register(isp->dev, v4l2_dev);
+ if (ret)
+ goto err_media_dev_cleanup;
+
+ ret = media_device_register(&isp->media_dev);
+ if (ret) {
+ dev_err(isp->dev, "Failed to register media device: %d\n", ret);
+ goto err_unreg_v4l2_dev;
+ }
+
+ return 0;
+
+err_unreg_v4l2_dev:
+ v4l2_device_unregister(&isp->v4l2_dev);
+err_media_dev_cleanup:
+ media_device_cleanup(media_dev);
+ return ret;
+}
+
+static void c3_isp_media_unregister(struct c3_isp_device *isp)
+{
+ media_device_unregister(&isp->media_dev);
+ v4l2_device_unregister(&isp->v4l2_dev);
+ media_device_cleanup(&isp->media_dev);
+}
+
+static void c3_isp_remove_links(struct c3_isp_device *isp)
+{
+ unsigned int i;
+
+ media_entity_remove_links(&isp->core.sd.entity);
+
+ for (i = 0; i < C3_ISP_NUM_RSZ; i++)
+ media_entity_remove_links(&isp->resizers[i].sd.entity);
+
+ for (i = 0; i < C3_ISP_NUM_CAP_DEVS; i++)
+ media_entity_remove_links(&isp->caps[i].vdev.entity);
+}
+
+static int c3_isp_create_links(struct c3_isp_device *isp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < C3_ISP_NUM_RSZ; i++) {
+ ret = media_create_pad_link(&isp->resizers[i].sd.entity,
+ C3_ISP_RSZ_PAD_SOURCE,
+ &isp->caps[i].vdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to link rsz %u and cap %u\n", i, i);
+ goto err_remove_links;
+ }
+
+ ret = media_create_pad_link(&isp->core.sd.entity,
+ C3_ISP_CORE_PAD_SOURCE_VIDEO_0 + i,
+ &isp->resizers[i].sd.entity,
+ C3_ISP_RSZ_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to link core and rsz %u\n", i);
+ goto err_remove_links;
+ }
+ }
+
+ ret = media_create_pad_link(&isp->core.sd.entity,
+ C3_ISP_CORE_PAD_SOURCE_STATS,
+ &isp->stats.vdev.entity,
+ 0, MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_err(isp->dev, "Failed to link core and stats\n");
+ goto err_remove_links;
+ }
+
+ ret = media_create_pad_link(&isp->params.vdev.entity, 0,
+ &isp->core.sd.entity,
+ C3_ISP_CORE_PAD_SINK_PARAMS,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_err(isp->dev, "Failed to link params and core\n");
+ goto err_remove_links;
+ }
+
+ return 0;
+
+err_remove_links:
+ c3_isp_remove_links(isp);
+ return ret;
+}
+
+static int c3_isp_videos_register(struct c3_isp_device *isp)
+{
+ int ret;
+
+ ret = c3_isp_captures_register(isp);
+ if (ret)
+ return ret;
+
+ ret = c3_isp_stats_register(isp);
+ if (ret)
+ goto err_captures_unregister;
+
+ ret = c3_isp_params_register(isp);
+ if (ret)
+ goto err_stats_unregister;
+
+ ret = c3_isp_create_links(isp);
+ if (ret)
+ goto err_params_unregister;
+
+ return 0;
+
+err_params_unregister:
+ c3_isp_params_unregister(isp);
+err_stats_unregister:
+ c3_isp_stats_unregister(isp);
+err_captures_unregister:
+ c3_isp_captures_unregister(isp);
+ return ret;
+}
+
+static void c3_isp_videos_unregister(struct c3_isp_device *isp)
+{
+ c3_isp_remove_links(isp);
+ c3_isp_params_unregister(isp);
+ c3_isp_stats_unregister(isp);
+ c3_isp_captures_unregister(isp);
+}
+
+static int c3_isp_get_clocks(struct c3_isp_device *isp)
+{
+ const struct c3_isp_info *info = isp->info;
+
+ for (unsigned int i = 0; i < info->clock_num; i++)
+ isp->clks[i].id = info->clocks[i];
+
+ return devm_clk_bulk_get(isp->dev, info->clock_num, isp->clks);
+}
+
+static int c3_isp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct c3_isp_device *isp;
+ int irq;
+ int ret;
+
+ isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL);
+ if (!isp)
+ return -ENOMEM;
+
+ isp->info = of_device_get_match_data(dev);
+ isp->dev = dev;
+
+ isp->base = devm_platform_ioremap_resource_byname(pdev, "isp");
+ if (IS_ERR(isp->base))
+ return dev_err_probe(dev, PTR_ERR(isp->base),
+ "Failed to ioremap resource\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = c3_isp_get_clocks(isp);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+
+ platform_set_drvdata(pdev, isp);
+
+ pm_runtime_enable(dev);
+
+ ret = c3_isp_media_register(isp);
+ if (ret)
+ goto err_runtime_disable;
+
+ ret = c3_isp_core_register(isp);
+ if (ret)
+ goto err_v4l2_unregister;
+
+ ret = c3_isp_resizers_register(isp);
+ if (ret)
+ goto err_core_unregister;
+
+ ret = c3_isp_async_nf_register(isp);
+ if (ret)
+ goto err_resizers_unregister;
+
+ ret = devm_request_irq(dev, irq,
+ c3_isp_irq_handler, IRQF_SHARED,
+ dev_driver_string(dev), isp);
+ if (ret)
+ goto err_nf_unregister;
+
+ ret = c3_isp_videos_register(isp);
+ if (ret)
+ goto err_nf_unregister;
+
+ return 0;
+
+err_nf_unregister:
+ c3_isp_async_nf_unregister(isp);
+err_resizers_unregister:
+ c3_isp_resizers_unregister(isp);
+err_core_unregister:
+ c3_isp_core_unregister(isp);
+err_v4l2_unregister:
+ c3_isp_media_unregister(isp);
+err_runtime_disable:
+ pm_runtime_disable(dev);
+ return ret;
+};
+
+static void c3_isp_remove(struct platform_device *pdev)
+{
+ struct c3_isp_device *isp = platform_get_drvdata(pdev);
+
+ c3_isp_videos_unregister(isp);
+ c3_isp_async_nf_unregister(isp);
+ c3_isp_core_unregister(isp);
+ c3_isp_resizers_unregister(isp);
+ c3_isp_media_unregister(isp);
+ pm_runtime_disable(isp->dev);
+};
+
+static const struct c3_isp_info isp_info = {
+ .clocks = {"vapb", "isp0"},
+ .clock_num = 2
+};
+
+static const struct of_device_id c3_isp_of_match[] = {
+ { .compatible = "amlogic,c3-isp",
+ .data = &isp_info },
+ { },
+};
+MODULE_DEVICE_TABLE(of, c3_isp_of_match);
+
+static struct platform_driver c3_isp_driver = {
+ .probe = c3_isp_probe,
+ .remove = c3_isp_remove,
+ .driver = {
+ .name = "c3-isp",
+ .of_match_table = c3_isp_of_match,
+ .pm = pm_ptr(&c3_isp_pm_ops),
+ },
+};
+
+module_platform_driver(c3_isp_driver);
+
+MODULE_AUTHOR("Keke Li <keke.li@amlogic.com>");
+MODULE_DESCRIPTION("Amlogic C3 ISP pipeline");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-params.c b/drivers/media/platform/amlogic/c3/isp/c3-isp-params.c
new file mode 100644
index 000000000000..c80667dd7662
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-params.c
@@ -0,0 +1,1008 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/cleanup.h>
+#include <linux/media/amlogic/c3-isp-config.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "c3-isp-common.h"
+#include "c3-isp-regs.h"
+
+/*
+ * union c3_isp_params_block - Generalisation of a parameter block
+ *
+ * This union allows the driver to treat a block as a generic struct to this
+ * union and safely access the header and block-specific struct without having
+ * to resort to casting. The header member is accessed first, and the type field
+ * checked which allows the driver to determine which of the other members
+ * should be used.
+ *
+ * @header: The shared header struct embedded as the first member
+ * of all the possible other members. This member would be
+ * accessed first and the type field checked to determine
+ * which of the other members should be accessed.
+ * @awb_gains: For header.type == C3_ISP_PARAMS_BLOCK_AWB_GAINS
+ * @awb_cfg: For header.type == C3_ISP_PARAMS_BLOCK_AWB_CONFIG
+ * @ae_cfg: For header.type == C3_ISP_PARAMS_BLOCK_AE_CONFIG
+ * @af_cfg: For header.type == C3_ISP_PARAMS_BLOCK_AF_CONFIG
+ * @pst_gamma: For header.type == C3_ISP_PARAMS_BLOCK_PST_GAMMA
+ * @ccm: For header.type == C3_ISP_PARAMS_BLOCK_CCM
+ * @csc: For header.type == C3_ISP_PARAMS_BLOCK_CSC
+ * @blc: For header.type == C3_ISP_PARAMS_BLOCK_BLC
+ */
+union c3_isp_params_block {
+ struct c3_isp_params_block_header header;
+ struct c3_isp_params_awb_gains awb_gains;
+ struct c3_isp_params_awb_config awb_cfg;
+ struct c3_isp_params_ae_config ae_cfg;
+ struct c3_isp_params_af_config af_cfg;
+ struct c3_isp_params_pst_gamma pst_gamma;
+ struct c3_isp_params_ccm ccm;
+ struct c3_isp_params_csc csc;
+ struct c3_isp_params_blc blc;
+};
+
+typedef void (*c3_isp_block_handler)(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block);
+
+struct c3_isp_params_handler {
+ size_t size;
+ c3_isp_block_handler handler;
+};
+
+#define to_c3_isp_params_buffer(vbuf) \
+ container_of(vbuf, struct c3_isp_params_buffer, vb)
+
+/* Hardware configuration */
+
+static void c3_isp_params_cfg_awb_gains(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block)
+{
+ const struct c3_isp_params_awb_gains *awb_gains = &block->awb_gains;
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_DISABLE) {
+ c3_isp_update_bits(isp, ISP_TOP_BEO_CTRL,
+ ISP_TOP_BEO_CTRL_WB_EN_MASK,
+ ISP_TOP_BEO_CTRL_WB_DIS);
+ return;
+ }
+
+ c3_isp_update_bits(isp, ISP_LSWB_WB_GAIN0,
+ ISP_LSWB_WB_GAIN0_GR_GAIN_MASK,
+ ISP_LSWB_WB_GAIN0_GR_GAIN(awb_gains->gr_gain));
+ c3_isp_update_bits(isp, ISP_LSWB_WB_GAIN0,
+ ISP_LSWB_WB_GAIN0_R_GAIN_MASK,
+ ISP_LSWB_WB_GAIN0_R_GAIN(awb_gains->r_gain));
+ c3_isp_update_bits(isp, ISP_LSWB_WB_GAIN1,
+ ISP_LSWB_WB_GAIN1_B_GAIN_MASK,
+ ISP_LSWB_WB_GAIN1_B_GAIN(awb_gains->b_gain));
+ c3_isp_update_bits(isp, ISP_LSWB_WB_GAIN1,
+ ISP_LSWB_WB_GAIN1_GB_GAIN_MASK,
+ ISP_LSWB_WB_GAIN1_GB_GAIN(awb_gains->gb_gain));
+ c3_isp_update_bits(isp, ISP_LSWB_WB_GAIN2,
+ ISP_LSWB_WB_GAIN2_IR_GAIN_MASK,
+ ISP_LSWB_WB_GAIN2_IR_GAIN(awb_gains->gb_gain));
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_ENABLE)
+ c3_isp_update_bits(isp, ISP_TOP_BEO_CTRL,
+ ISP_TOP_BEO_CTRL_WB_EN_MASK,
+ ISP_TOP_BEO_CTRL_WB_EN);
+}
+
+static void c3_isp_params_awb_wt(struct c3_isp_device *isp,
+ const struct c3_isp_params_awb_config *cfg)
+{
+ unsigned int zones_num;
+ unsigned int base;
+ unsigned int data;
+ unsigned int i;
+
+ /* Set the weight address to 0 position */
+ c3_isp_write(isp, ISP_AWB_BLK_WT_ADDR, 0);
+
+ zones_num = cfg->horiz_zones_num * cfg->vert_zones_num;
+
+ /* Need to write 8 weights at once */
+ for (i = 0; i < zones_num / 8; i++) {
+ base = i * 8;
+ data = ISP_AWB_BLK_WT_DATA_WT(0, cfg->zone_weight[base + 0]) |
+ ISP_AWB_BLK_WT_DATA_WT(1, cfg->zone_weight[base + 1]) |
+ ISP_AWB_BLK_WT_DATA_WT(2, cfg->zone_weight[base + 2]) |
+ ISP_AWB_BLK_WT_DATA_WT(3, cfg->zone_weight[base + 3]) |
+ ISP_AWB_BLK_WT_DATA_WT(4, cfg->zone_weight[base + 4]) |
+ ISP_AWB_BLK_WT_DATA_WT(5, cfg->zone_weight[base + 5]) |
+ ISP_AWB_BLK_WT_DATA_WT(6, cfg->zone_weight[base + 6]) |
+ ISP_AWB_BLK_WT_DATA_WT(7, cfg->zone_weight[base + 7]);
+ c3_isp_write(isp, ISP_AWB_BLK_WT_DATA, data);
+ }
+
+ if (zones_num % 8 == 0)
+ return;
+
+ data = 0;
+ base = i * 8;
+
+ for (i = 0; i < zones_num % 8; i++)
+ data |= ISP_AWB_BLK_WT_DATA_WT(i, cfg->zone_weight[base + i]);
+
+ c3_isp_write(isp, ISP_AWB_BLK_WT_DATA, data);
+}
+
+static void c3_isp_params_awb_cood(struct c3_isp_device *isp,
+ const struct c3_isp_params_awb_config *cfg)
+{
+ unsigned int max_point_num;
+
+ /* The number of points is one more than the number of edges */
+ max_point_num = max(cfg->horiz_zones_num, cfg->vert_zones_num) + 1;
+
+ /* Set the index address to 0 position */
+ c3_isp_write(isp, ISP_AWB_IDX_ADDR, 0);
+
+ for (unsigned int i = 0; i < max_point_num; i++)
+ c3_isp_write(isp, ISP_AWB_IDX_DATA,
+ ISP_AWB_IDX_DATA_HIDX_DATA(cfg->horiz_coord[i]) |
+ ISP_AWB_IDX_DATA_VIDX_DATA(cfg->vert_coord[i]));
+}
+
+static void c3_isp_params_cfg_awb_config(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block)
+{
+ const struct c3_isp_params_awb_config *awb_cfg = &block->awb_cfg;
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_DISABLE) {
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AWB_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AWB_STAT_DIS);
+ return;
+ }
+
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AWB_POINT_MASK,
+ ISP_TOP_3A_STAT_CRTL_AWB_POINT(awb_cfg->tap_point));
+
+ c3_isp_update_bits(isp, ISP_AWB_STAT_CTRL2,
+ ISP_AWB_STAT_CTRL2_SATUR_CTRL_MASK,
+ ISP_AWB_STAT_CTRL2_SATUR_CTRL(awb_cfg->satur_vald));
+
+ c3_isp_update_bits(isp, ISP_AWB_HV_BLKNUM,
+ ISP_AWB_HV_BLKNUM_H_NUM_MASK,
+ ISP_AWB_HV_BLKNUM_H_NUM(awb_cfg->horiz_zones_num));
+ c3_isp_update_bits(isp, ISP_AWB_HV_BLKNUM,
+ ISP_AWB_HV_BLKNUM_V_NUM_MASK,
+ ISP_AWB_HV_BLKNUM_V_NUM(awb_cfg->vert_zones_num));
+
+ c3_isp_update_bits(isp, ISP_AWB_STAT_RG, ISP_AWB_STAT_RG_MIN_VALUE_MASK,
+ ISP_AWB_STAT_RG_MIN_VALUE(awb_cfg->rg_min));
+ c3_isp_update_bits(isp, ISP_AWB_STAT_RG, ISP_AWB_STAT_RG_MAX_VALUE_MASK,
+ ISP_AWB_STAT_RG_MAX_VALUE(awb_cfg->rg_max));
+
+ c3_isp_update_bits(isp, ISP_AWB_STAT_BG, ISP_AWB_STAT_BG_MIN_VALUE_MASK,
+ ISP_AWB_STAT_BG_MIN_VALUE(awb_cfg->bg_min));
+ c3_isp_update_bits(isp, ISP_AWB_STAT_BG, ISP_AWB_STAT_BG_MAX_VALUE_MASK,
+ ISP_AWB_STAT_BG_MAX_VALUE(awb_cfg->bg_max));
+
+ c3_isp_update_bits(isp, ISP_AWB_STAT_RG_HL,
+ ISP_AWB_STAT_RG_HL_LOW_VALUE_MASK,
+ ISP_AWB_STAT_RG_HL_LOW_VALUE(awb_cfg->rg_low));
+ c3_isp_update_bits(isp, ISP_AWB_STAT_RG_HL,
+ ISP_AWB_STAT_RG_HL_HIGH_VALUE_MASK,
+ ISP_AWB_STAT_RG_HL_HIGH_VALUE(awb_cfg->rg_high));
+
+ c3_isp_update_bits(isp, ISP_AWB_STAT_BG_HL,
+ ISP_AWB_STAT_BG_HL_LOW_VALUE_MASK,
+ ISP_AWB_STAT_BG_HL_LOW_VALUE(awb_cfg->bg_low));
+ c3_isp_update_bits(isp, ISP_AWB_STAT_BG_HL,
+ ISP_AWB_STAT_BG_HL_HIGH_VALUE_MASK,
+ ISP_AWB_STAT_BG_HL_HIGH_VALUE(awb_cfg->bg_high));
+
+ c3_isp_params_awb_wt(isp, awb_cfg);
+ c3_isp_params_awb_cood(isp, awb_cfg);
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_ENABLE)
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AWB_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AWB_STAT_EN);
+}
+
+static void c3_isp_params_ae_wt(struct c3_isp_device *isp,
+ const struct c3_isp_params_ae_config *cfg)
+{
+ unsigned int zones_num;
+ unsigned int base;
+ unsigned int data;
+ unsigned int i;
+
+ /* Set the weight address to 0 position */
+ c3_isp_write(isp, ISP_AE_BLK_WT_ADDR, 0);
+
+ zones_num = cfg->horiz_zones_num * cfg->vert_zones_num;
+
+ /* Need to write 8 weights at once */
+ for (i = 0; i < zones_num / 8; i++) {
+ base = i * 8;
+ data = ISP_AE_BLK_WT_DATA_WT(0, cfg->zone_weight[base + 0]) |
+ ISP_AE_BLK_WT_DATA_WT(1, cfg->zone_weight[base + 1]) |
+ ISP_AE_BLK_WT_DATA_WT(2, cfg->zone_weight[base + 2]) |
+ ISP_AE_BLK_WT_DATA_WT(3, cfg->zone_weight[base + 3]) |
+ ISP_AE_BLK_WT_DATA_WT(4, cfg->zone_weight[base + 4]) |
+ ISP_AE_BLK_WT_DATA_WT(5, cfg->zone_weight[base + 5]) |
+ ISP_AE_BLK_WT_DATA_WT(6, cfg->zone_weight[base + 6]) |
+ ISP_AE_BLK_WT_DATA_WT(7, cfg->zone_weight[base + 7]);
+ c3_isp_write(isp, ISP_AE_BLK_WT_DATA, data);
+ }
+
+ if (zones_num % 8 == 0)
+ return;
+
+ data = 0;
+ base = i * 8;
+
+ /* Write the last weights data */
+ for (i = 0; i < zones_num % 8; i++)
+ data |= ISP_AE_BLK_WT_DATA_WT(i, cfg->zone_weight[base + i]);
+
+ c3_isp_write(isp, ISP_AE_BLK_WT_DATA, data);
+}
+
+static void c3_isp_params_ae_cood(struct c3_isp_device *isp,
+ const struct c3_isp_params_ae_config *cfg)
+{
+ unsigned int max_point_num;
+
+ /* The number of points is one more than the number of edges */
+ max_point_num = max(cfg->horiz_zones_num, cfg->vert_zones_num) + 1;
+
+ /* Set the index address to 0 position */
+ c3_isp_write(isp, ISP_AE_IDX_ADDR, 0);
+
+ for (unsigned int i = 0; i < max_point_num; i++)
+ c3_isp_write(isp, ISP_AE_IDX_DATA,
+ ISP_AE_IDX_DATA_HIDX_DATA(cfg->horiz_coord[i]) |
+ ISP_AE_IDX_DATA_VIDX_DATA(cfg->vert_coord[i]));
+}
+
+static void c3_isp_params_cfg_ae_config(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block)
+{
+ const struct c3_isp_params_ae_config *ae_cfg = &block->ae_cfg;
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_DISABLE) {
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AE_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AE_STAT_DIS);
+ return;
+ }
+
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AE_POINT_MASK,
+ ISP_TOP_3A_STAT_CRTL_AE_POINT(ae_cfg->tap_point));
+
+ if (ae_cfg->tap_point == C3_ISP_AE_STATS_TAP_GE)
+ c3_isp_update_bits(isp, ISP_AE_CTRL,
+ ISP_AE_CTRL_INPUT_2LINE_MASK,
+ ISP_AE_CTRL_INPUT_2LINE_EN);
+ else
+ c3_isp_update_bits(isp, ISP_AE_CTRL,
+ ISP_AE_CTRL_INPUT_2LINE_MASK,
+ ISP_AE_CTRL_INPUT_2LINE_DIS);
+
+ c3_isp_update_bits(isp, ISP_AE_HV_BLKNUM,
+ ISP_AE_HV_BLKNUM_H_NUM_MASK,
+ ISP_AE_HV_BLKNUM_H_NUM(ae_cfg->horiz_zones_num));
+ c3_isp_update_bits(isp, ISP_AE_HV_BLKNUM,
+ ISP_AE_HV_BLKNUM_V_NUM_MASK,
+ ISP_AE_HV_BLKNUM_V_NUM(ae_cfg->vert_zones_num));
+
+ c3_isp_params_ae_wt(isp, ae_cfg);
+ c3_isp_params_ae_cood(isp, ae_cfg);
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_ENABLE)
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AE_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AE_STAT_EN);
+}
+
+static void c3_isp_params_af_cood(struct c3_isp_device *isp,
+ const struct c3_isp_params_af_config *cfg)
+{
+ unsigned int max_point_num;
+
+ /* The number of points is one more than the number of edges */
+ max_point_num = max(cfg->horiz_zones_num, cfg->vert_zones_num) + 1;
+
+ /* Set the index address to 0 position */
+ c3_isp_write(isp, ISP_AF_IDX_ADDR, 0);
+
+ for (unsigned int i = 0; i < max_point_num; i++)
+ c3_isp_write(isp, ISP_AF_IDX_DATA,
+ ISP_AF_IDX_DATA_HIDX_DATA(cfg->horiz_coord[i]) |
+ ISP_AF_IDX_DATA_VIDX_DATA(cfg->vert_coord[i]));
+}
+
+static void c3_isp_params_cfg_af_config(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block)
+{
+ const struct c3_isp_params_af_config *af_cfg = &block->af_cfg;
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_DISABLE) {
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AF_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AF_STAT_DIS);
+ return;
+ }
+
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AF_POINT_MASK,
+ ISP_TOP_3A_STAT_CRTL_AF_POINT(af_cfg->tap_point));
+
+ c3_isp_update_bits(isp, ISP_AF_HV_BLKNUM,
+ ISP_AF_HV_BLKNUM_H_NUM_MASK,
+ ISP_AF_HV_BLKNUM_H_NUM(af_cfg->horiz_zones_num));
+ c3_isp_update_bits(isp, ISP_AF_HV_BLKNUM,
+ ISP_AF_HV_BLKNUM_V_NUM_MASK,
+ ISP_AF_HV_BLKNUM_V_NUM(af_cfg->vert_zones_num));
+
+ c3_isp_params_af_cood(isp, af_cfg);
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_ENABLE)
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AF_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AF_STAT_EN);
+}
+
+static void c3_isp_params_cfg_pst_gamma(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block)
+{
+ const struct c3_isp_params_pst_gamma *gm = &block->pst_gamma;
+ unsigned int base;
+ unsigned int i;
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_DISABLE) {
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_PST_GAMMA_EN_MASK,
+ ISP_TOP_BED_CTRL_PST_GAMMA_DIS);
+ return;
+ }
+
+ /* R, G and B channels use the same gamma lut */
+ for (unsigned int j = 0; j < 3; j++) {
+ /* Set the channel lut address */
+ c3_isp_write(isp, ISP_PST_GAMMA_LUT_ADDR,
+ ISP_PST_GAMMA_LUT_ADDR_IDX_ADDR(j));
+
+ /* Need to write 2 lut values at once */
+ for (i = 0; i < ARRAY_SIZE(gm->lut) / 2; i++) {
+ base = i * 2;
+ c3_isp_write(isp, ISP_PST_GAMMA_LUT_DATA,
+ ISP_PST_GM_LUT_DATA0(gm->lut[base]) |
+ ISP_PST_GM_LUT_DATA1(gm->lut[base + 1]));
+ }
+
+ /* Write the last one */
+ if (ARRAY_SIZE(gm->lut) % 2) {
+ base = i * 2;
+ c3_isp_write(isp, ISP_PST_GAMMA_LUT_DATA,
+ ISP_PST_GM_LUT_DATA0(gm->lut[base]));
+ }
+ }
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_ENABLE)
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_PST_GAMMA_EN_MASK,
+ ISP_TOP_BED_CTRL_PST_GAMMA_EN);
+}
+
+/* Configure 3 x 3 ccm matrix */
+static void c3_isp_params_cfg_ccm(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block)
+{
+ const struct c3_isp_params_ccm *ccm = &block->ccm;
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_DISABLE) {
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_CCM_EN_MASK,
+ ISP_TOP_BED_CTRL_CCM_DIS);
+ return;
+ }
+
+ c3_isp_update_bits(isp, ISP_CCM_MTX_00_01,
+ ISP_CCM_MTX_00_01_MTX_00_MASK,
+ ISP_CCM_MTX_00_01_MTX_00(ccm->matrix[0][0]));
+ c3_isp_update_bits(isp, ISP_CCM_MTX_00_01,
+ ISP_CCM_MTX_00_01_MTX_01_MASK,
+ ISP_CCM_MTX_00_01_MTX_01(ccm->matrix[0][1]));
+ c3_isp_update_bits(isp, ISP_CCM_MTX_02_03,
+ ISP_CCM_MTX_02_03_MTX_02_MASK,
+ ISP_CCM_MTX_02_03_MTX_02(ccm->matrix[0][2]));
+
+ c3_isp_update_bits(isp, ISP_CCM_MTX_10_11,
+ ISP_CCM_MTX_10_11_MTX_10_MASK,
+ ISP_CCM_MTX_10_11_MTX_10(ccm->matrix[1][0]));
+ c3_isp_update_bits(isp, ISP_CCM_MTX_10_11,
+ ISP_CCM_MTX_10_11_MTX_11_MASK,
+ ISP_CCM_MTX_10_11_MTX_11(ccm->matrix[1][1]));
+ c3_isp_update_bits(isp, ISP_CCM_MTX_12_13,
+ ISP_CCM_MTX_12_13_MTX_12_MASK,
+ ISP_CCM_MTX_12_13_MTX_12(ccm->matrix[1][2]));
+
+ c3_isp_update_bits(isp, ISP_CCM_MTX_20_21,
+ ISP_CCM_MTX_20_21_MTX_20_MASK,
+ ISP_CCM_MTX_20_21_MTX_20(ccm->matrix[2][0]));
+ c3_isp_update_bits(isp, ISP_CCM_MTX_20_21,
+ ISP_CCM_MTX_20_21_MTX_21_MASK,
+ ISP_CCM_MTX_20_21_MTX_21(ccm->matrix[2][1]));
+ c3_isp_update_bits(isp, ISP_CCM_MTX_22_23_RS,
+ ISP_CCM_MTX_22_23_RS_MTX_22_MASK,
+ ISP_CCM_MTX_22_23_RS_MTX_22(ccm->matrix[2][2]));
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_ENABLE)
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_CCM_EN_MASK,
+ ISP_TOP_BED_CTRL_CCM_EN);
+}
+
+/* Configure color space conversion matrix parameters */
+static void c3_isp_params_cfg_csc(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block)
+{
+ const struct c3_isp_params_csc *csc = &block->csc;
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_DISABLE) {
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_CM0_EN_MASK,
+ ISP_TOP_BED_CTRL_CM0_DIS);
+ return;
+ }
+
+ c3_isp_update_bits(isp, ISP_CM0_COEF00_01,
+ ISP_CM0_COEF00_01_MTX_00_MASK,
+ ISP_CM0_COEF00_01_MTX_00(csc->matrix[0][0]));
+ c3_isp_update_bits(isp, ISP_CM0_COEF00_01,
+ ISP_CM0_COEF00_01_MTX_01_MASK,
+ ISP_CM0_COEF00_01_MTX_01(csc->matrix[0][1]));
+ c3_isp_update_bits(isp, ISP_CM0_COEF02_10,
+ ISP_CM0_COEF02_10_MTX_02_MASK,
+ ISP_CM0_COEF02_10_MTX_02(csc->matrix[0][2]));
+
+ c3_isp_update_bits(isp, ISP_CM0_COEF02_10,
+ ISP_CM0_COEF02_10_MTX_10_MASK,
+ ISP_CM0_COEF02_10_MTX_10(csc->matrix[1][0]));
+ c3_isp_update_bits(isp, ISP_CM0_COEF11_12,
+ ISP_CM0_COEF11_12_MTX_11_MASK,
+ ISP_CM0_COEF11_12_MTX_11(csc->matrix[1][1]));
+ c3_isp_update_bits(isp, ISP_CM0_COEF11_12,
+ ISP_CM0_COEF11_12_MTX_12_MASK,
+ ISP_CM0_COEF11_12_MTX_12(csc->matrix[1][2]));
+
+ c3_isp_update_bits(isp, ISP_CM0_COEF20_21,
+ ISP_CM0_COEF20_21_MTX_20_MASK,
+ ISP_CM0_COEF20_21_MTX_20(csc->matrix[2][0]));
+ c3_isp_update_bits(isp, ISP_CM0_COEF20_21,
+ ISP_CM0_COEF20_21_MTX_21_MASK,
+ ISP_CM0_COEF20_21_MTX_21(csc->matrix[2][1]));
+ c3_isp_update_bits(isp, ISP_CM0_COEF22_OUP_OFST0,
+ ISP_CM0_COEF22_OUP_OFST0_MTX_22_MASK,
+ ISP_CM0_COEF22_OUP_OFST0_MTX_22(csc->matrix[2][2]));
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_ENABLE)
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_CM0_EN_MASK,
+ ISP_TOP_BED_CTRL_CM0_EN);
+}
+
+/* Set blc offset of each color channel */
+static void c3_isp_params_cfg_blc(struct c3_isp_device *isp,
+ const union c3_isp_params_block *block)
+{
+ const struct c3_isp_params_blc *blc = &block->blc;
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_DISABLE) {
+ c3_isp_update_bits(isp, ISP_TOP_BEO_CTRL,
+ ISP_TOP_BEO_CTRL_BLC_EN_MASK,
+ ISP_TOP_BEO_CTRL_BLC_DIS);
+ return;
+ }
+
+ c3_isp_write(isp, ISP_LSWB_BLC_OFST0,
+ ISP_LSWB_BLC_OFST0_R_OFST(blc->r_ofst) |
+ ISP_LSWB_BLC_OFST0_GR_OFST(blc->gr_ofst));
+ c3_isp_write(isp, ISP_LSWB_BLC_OFST1,
+ ISP_LSWB_BLC_OFST1_GB_OFST(blc->gb_ofst) |
+ ISP_LSWB_BLC_OFST1_B_OFST(blc->b_ofst));
+
+ if (block->header.flags & C3_ISP_PARAMS_BLOCK_FL_ENABLE)
+ c3_isp_update_bits(isp, ISP_TOP_BEO_CTRL,
+ ISP_TOP_BEO_CTRL_BLC_EN_MASK,
+ ISP_TOP_BEO_CTRL_BLC_EN);
+}
+
+static const struct c3_isp_params_handler c3_isp_params_handlers[] = {
+ [C3_ISP_PARAMS_BLOCK_AWB_GAINS] = {
+ .size = sizeof(struct c3_isp_params_awb_gains),
+ .handler = c3_isp_params_cfg_awb_gains,
+ },
+ [C3_ISP_PARAMS_BLOCK_AWB_CONFIG] = {
+ .size = sizeof(struct c3_isp_params_awb_config),
+ .handler = c3_isp_params_cfg_awb_config,
+ },
+ [C3_ISP_PARAMS_BLOCK_AE_CONFIG] = {
+ .size = sizeof(struct c3_isp_params_ae_config),
+ .handler = c3_isp_params_cfg_ae_config,
+ },
+ [C3_ISP_PARAMS_BLOCK_AF_CONFIG] = {
+ .size = sizeof(struct c3_isp_params_af_config),
+ .handler = c3_isp_params_cfg_af_config,
+ },
+ [C3_ISP_PARAMS_BLOCK_PST_GAMMA] = {
+ .size = sizeof(struct c3_isp_params_pst_gamma),
+ .handler = c3_isp_params_cfg_pst_gamma,
+ },
+ [C3_ISP_PARAMS_BLOCK_CCM] = {
+ .size = sizeof(struct c3_isp_params_ccm),
+ .handler = c3_isp_params_cfg_ccm,
+ },
+ [C3_ISP_PARAMS_BLOCK_CSC] = {
+ .size = sizeof(struct c3_isp_params_csc),
+ .handler = c3_isp_params_cfg_csc,
+ },
+ [C3_ISP_PARAMS_BLOCK_BLC] = {
+ .size = sizeof(struct c3_isp_params_blc),
+ .handler = c3_isp_params_cfg_blc,
+ },
+};
+
+static void c3_isp_params_cfg_blocks(struct c3_isp_params *params)
+{
+ struct c3_isp_params_cfg *config = params->buff->cfg;
+ size_t block_offset = 0;
+
+ if (WARN_ON(!config))
+ return;
+
+ /* Walk the list of parameter blocks and process them */
+ while (block_offset < config->data_size) {
+ const struct c3_isp_params_handler *block_handler;
+ const union c3_isp_params_block *block;
+
+ block = (const union c3_isp_params_block *)
+ &config->data[block_offset];
+
+ block_handler = &c3_isp_params_handlers[block->header.type];
+ block_handler->handler(params->isp, block);
+
+ block_offset += block->header.size;
+ }
+}
+
+void c3_isp_params_pre_cfg(struct c3_isp_device *isp)
+{
+ struct c3_isp_params *params = &isp->params;
+
+ /* Disable some unused modules */
+ c3_isp_update_bits(isp, ISP_TOP_FEO_CTRL0,
+ ISP_TOP_FEO_CTRL0_INPUT_FMT_EN_MASK,
+ ISP_TOP_FEO_CTRL0_INPUT_FMT_DIS);
+
+ c3_isp_update_bits(isp, ISP_TOP_FEO_CTRL1_0,
+ ISP_TOP_FEO_CTRL1_0_DPC_EN_MASK,
+ ISP_TOP_FEO_CTRL1_0_DPC_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_FEO_CTRL1_0,
+ ISP_TOP_FEO_CTRL1_0_OG_EN_MASK,
+ ISP_TOP_FEO_CTRL1_0_OG_DIS);
+
+ c3_isp_update_bits(isp, ISP_TOP_FED_CTRL, ISP_TOP_FED_CTRL_PDPC_EN_MASK,
+ ISP_TOP_FED_CTRL_PDPC_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_FED_CTRL,
+ ISP_TOP_FED_CTRL_RAWCNR_EN_MASK,
+ ISP_TOP_FED_CTRL_RAWCNR_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_FED_CTRL, ISP_TOP_FED_CTRL_SNR1_EN_MASK,
+ ISP_TOP_FED_CTRL_SNR1_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_FED_CTRL, ISP_TOP_FED_CTRL_TNR0_EN_MASK,
+ ISP_TOP_FED_CTRL_TNR0_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_FED_CTRL,
+ ISP_TOP_FED_CTRL_CUBIC_CS_EN_MASK,
+ ISP_TOP_FED_CTRL_CUBIC_CS_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_FED_CTRL, ISP_TOP_FED_CTRL_SQRT_EN_MASK,
+ ISP_TOP_FED_CTRL_SQRT_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_FED_CTRL,
+ ISP_TOP_FED_CTRL_DGAIN_EN_MASK,
+ ISP_TOP_FED_CTRL_DGAIN_DIS);
+
+ c3_isp_update_bits(isp, ISP_TOP_BEO_CTRL,
+ ISP_TOP_BEO_CTRL_INV_DGAIN_EN_MASK,
+ ISP_TOP_BEO_CTRL_INV_DGAIN_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BEO_CTRL, ISP_TOP_BEO_CTRL_EOTF_EN_MASK,
+ ISP_TOP_BEO_CTRL_EOTF_DIS);
+
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_YHS_STAT_EN_MASK,
+ ISP_TOP_BED_CTRL_YHS_STAT_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_GRPH_STAT_EN_MASK,
+ ISP_TOP_BED_CTRL_GRPH_STAT_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_FMETER_EN_MASK,
+ ISP_TOP_BED_CTRL_FMETER_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL, ISP_TOP_BED_CTRL_BSC_EN_MASK,
+ ISP_TOP_BED_CTRL_BSC_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL, ISP_TOP_BED_CTRL_CNR2_EN_MASK,
+ ISP_TOP_BED_CTRL_CNR2_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL, ISP_TOP_BED_CTRL_CM1_EN_MASK,
+ ISP_TOP_BED_CTRL_CM1_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_LUT3D_EN_MASK,
+ ISP_TOP_BED_CTRL_LUT3D_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL,
+ ISP_TOP_BED_CTRL_PST_TNR_LITE_EN_MASK,
+ ISP_TOP_BED_CTRL_PST_TNR_LITE_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_BED_CTRL, ISP_TOP_BED_CTRL_AMCM_EN_MASK,
+ ISP_TOP_BED_CTRL_AMCM_DIS);
+
+ /*
+ * Disable AE, AF and AWB stat module. Please configure the parameters
+ * in userspace algorithm if need to enable these switch.
+ */
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AE_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AE_STAT_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AWB_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AWB_STAT_DIS);
+ c3_isp_update_bits(isp, ISP_TOP_3A_STAT_CRTL,
+ ISP_TOP_3A_STAT_CRTL_AF_STAT_EN_MASK,
+ ISP_TOP_3A_STAT_CRTL_AF_STAT_DIS);
+
+ c3_isp_write(isp, ISP_LSWB_WB_LIMIT0,
+ ISP_LSWB_WB_LIMIT0_WB_LIMIT_R_MAX |
+ ISP_LSWB_WB_LIMIT0_WB_LIMIT_GR_MAX);
+ c3_isp_write(isp, ISP_LSWB_WB_LIMIT1,
+ ISP_LSWB_WB_LIMIT1_WB_LIMIT_GB_MAX |
+ ISP_LSWB_WB_LIMIT1_WB_LIMIT_B_MAX);
+
+ guard(spinlock_irqsave)(&params->buff_lock);
+
+ /* Only use the first buffer to initialize ISP */
+ params->buff =
+ list_first_entry_or_null(&params->pending,
+ struct c3_isp_params_buffer, list);
+ if (params->buff)
+ c3_isp_params_cfg_blocks(params);
+}
+
+/* V4L2 video operations */
+
+static int c3_isp_params_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, C3_ISP_DRIVER_NAME, sizeof(cap->driver));
+ strscpy(cap->card, "AML C3 ISP", sizeof(cap->card));
+
+ return 0;
+}
+
+static int c3_isp_params_enum_fmt(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index)
+ return -EINVAL;
+
+ f->pixelformat = V4L2_META_FMT_C3ISP_PARAMS;
+
+ return 0;
+}
+
+static int c3_isp_params_g_fmt(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct c3_isp_params *params = video_drvdata(file);
+
+ f->fmt.meta = params->vfmt.fmt.meta;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops isp_params_v4l2_ioctl_ops = {
+ .vidioc_querycap = c3_isp_params_querycap,
+ .vidioc_enum_fmt_meta_out = c3_isp_params_enum_fmt,
+ .vidioc_g_fmt_meta_out = c3_isp_params_g_fmt,
+ .vidioc_s_fmt_meta_out = c3_isp_params_g_fmt,
+ .vidioc_try_fmt_meta_out = c3_isp_params_g_fmt,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static const struct v4l2_file_operations isp_params_v4l2_fops = {
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+static int c3_isp_params_vb2_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ if (*num_planes) {
+ if (*num_planes != 1)
+ return -EINVAL;
+
+ if (sizes[0] < sizeof(struct c3_isp_params_cfg))
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *num_planes = 1;
+ sizes[0] = sizeof(struct c3_isp_params_cfg);
+
+ return 0;
+}
+
+static void c3_isp_params_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+ struct c3_isp_params_buffer *buf = to_c3_isp_params_buffer(v4l2_buf);
+ struct c3_isp_params *params = vb2_get_drv_priv(vb->vb2_queue);
+
+ guard(spinlock_irqsave)(&params->buff_lock);
+
+ list_add_tail(&buf->list, &params->pending);
+}
+
+static int c3_isp_params_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct c3_isp_params_buffer *buf = to_c3_isp_params_buffer(vbuf);
+ struct c3_isp_params *params = vb2_get_drv_priv(vb->vb2_queue);
+ struct c3_isp_params_cfg *cfg = buf->cfg;
+ struct c3_isp_params_cfg *usr_cfg = vb2_plane_vaddr(vb, 0);
+ size_t payload_size = vb2_get_plane_payload(vb, 0);
+ size_t header_size = offsetof(struct c3_isp_params_cfg, data);
+ size_t block_offset = 0;
+ size_t cfg_size;
+
+ /* Payload size can't be greater than the destination buffer size */
+ if (payload_size > params->vfmt.fmt.meta.buffersize) {
+ dev_dbg(params->isp->dev,
+ "Payload size is too large: %zu\n", payload_size);
+ return -EINVAL;
+ }
+
+ /* Payload size can't be smaller than the header size */
+ if (payload_size < header_size) {
+ dev_dbg(params->isp->dev,
+ "Payload size is too small: %zu\n", payload_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Use the internal scratch buffer to avoid userspace modifying
+ * the buffer content while the driver is processing it.
+ */
+ memcpy(cfg, usr_cfg, payload_size);
+
+ /* Only v0 is supported at the moment */
+ if (cfg->version != C3_ISP_PARAMS_BUFFER_V0) {
+ dev_dbg(params->isp->dev,
+ "Invalid params buffer version: %u\n", cfg->version);
+ return -EINVAL;
+ }
+
+ /* Validate the size reported in the parameter buffer header */
+ cfg_size = header_size + cfg->data_size;
+ if (cfg_size != payload_size) {
+ dev_dbg(params->isp->dev,
+ "Data size %zu and payload size %zu are different\n",
+ cfg_size, payload_size);
+ return -EINVAL;
+ }
+
+ /* Walk the list of parameter blocks and validate them */
+ cfg_size = cfg->data_size;
+ while (cfg_size >= sizeof(struct c3_isp_params_block_header)) {
+ const struct c3_isp_params_block_header *block;
+ const struct c3_isp_params_handler *handler;
+
+ block = (struct c3_isp_params_block_header *)
+ &cfg->data[block_offset];
+
+ if (block->type >= ARRAY_SIZE(c3_isp_params_handlers)) {
+ dev_dbg(params->isp->dev,
+ "Invalid params block type\n");
+ return -EINVAL;
+ }
+
+ if (block->size > cfg_size) {
+ dev_dbg(params->isp->dev,
+ "Block size is greater than cfg size\n");
+ return -EINVAL;
+ }
+
+ if ((block->flags & (C3_ISP_PARAMS_BLOCK_FL_ENABLE |
+ C3_ISP_PARAMS_BLOCK_FL_DISABLE)) ==
+ (C3_ISP_PARAMS_BLOCK_FL_ENABLE |
+ C3_ISP_PARAMS_BLOCK_FL_DISABLE)) {
+ dev_dbg(params->isp->dev,
+ "Invalid parameters block flags\n");
+ return -EINVAL;
+ }
+
+ handler = &c3_isp_params_handlers[block->type];
+ if (block->size != handler->size) {
+ dev_dbg(params->isp->dev,
+ "Invalid params block size\n");
+ return -EINVAL;
+ }
+
+ block_offset += block->size;
+ cfg_size -= block->size;
+ }
+
+ if (cfg_size) {
+ dev_dbg(params->isp->dev,
+ "Unexpected data after the params buffer end\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int c3_isp_params_vb2_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+ struct c3_isp_params *params = vb2_get_drv_priv(vb->vb2_queue);
+ struct c3_isp_params_buffer *buf = to_c3_isp_params_buffer(v4l2_buf);
+
+ buf->cfg = kvmalloc(params->vfmt.fmt.meta.buffersize, GFP_KERNEL);
+ if (!buf->cfg)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void c3_isp_params_vb2_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+ struct c3_isp_params_buffer *buf = to_c3_isp_params_buffer(v4l2_buf);
+
+ kvfree(buf->cfg);
+ buf->cfg = NULL;
+}
+
+static void c3_isp_params_vb2_stop_streaming(struct vb2_queue *q)
+{
+ struct c3_isp_params *params = vb2_get_drv_priv(q);
+ struct c3_isp_params_buffer *buff;
+
+ guard(spinlock_irqsave)(&params->buff_lock);
+
+ while (!list_empty(&params->pending)) {
+ buff = list_first_entry(&params->pending,
+ struct c3_isp_params_buffer, list);
+ list_del(&buff->list);
+ vb2_buffer_done(&buff->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static const struct vb2_ops isp_params_vb2_ops = {
+ .queue_setup = c3_isp_params_vb2_queue_setup,
+ .buf_queue = c3_isp_params_vb2_buf_queue,
+ .buf_prepare = c3_isp_params_vb2_buf_prepare,
+ .buf_init = c3_isp_params_vb2_buf_init,
+ .buf_cleanup = c3_isp_params_vb2_buf_cleanup,
+ .stop_streaming = c3_isp_params_vb2_stop_streaming,
+};
+
+int c3_isp_params_register(struct c3_isp_device *isp)
+{
+ struct c3_isp_params *params = &isp->params;
+ struct video_device *vdev = &params->vdev;
+ struct vb2_queue *vb2_q = &params->vb2_q;
+ int ret;
+
+ memset(params, 0, sizeof(*params));
+ params->vfmt.fmt.meta.dataformat = V4L2_META_FMT_C3ISP_PARAMS;
+ params->vfmt.fmt.meta.buffersize = sizeof(struct c3_isp_params_cfg);
+ params->isp = isp;
+ INIT_LIST_HEAD(&params->pending);
+ spin_lock_init(&params->buff_lock);
+ mutex_init(&params->lock);
+
+ snprintf(vdev->name, sizeof(vdev->name), "c3-isp-params");
+ vdev->fops = &isp_params_v4l2_fops;
+ vdev->ioctl_ops = &isp_params_v4l2_ioctl_ops;
+ vdev->v4l2_dev = &isp->v4l2_dev;
+ vdev->lock = &params->lock;
+ vdev->minor = -1;
+ vdev->queue = vb2_q;
+ vdev->release = video_device_release_empty;
+ vdev->device_caps = V4L2_CAP_META_OUTPUT | V4L2_CAP_STREAMING;
+ vdev->vfl_dir = VFL_DIR_TX;
+ video_set_drvdata(vdev, params);
+
+ vb2_q->drv_priv = params;
+ vb2_q->mem_ops = &vb2_vmalloc_memops;
+ vb2_q->ops = &isp_params_vb2_ops;
+ vb2_q->type = V4L2_BUF_TYPE_META_OUTPUT;
+ vb2_q->io_modes = VB2_DMABUF | VB2_MMAP;
+ vb2_q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vb2_q->buf_struct_size = sizeof(struct c3_isp_params_buffer);
+ vb2_q->dev = isp->dev;
+ vb2_q->lock = &params->lock;
+ vb2_q->min_queued_buffers = 1;
+
+ ret = vb2_queue_init(vb2_q);
+ if (ret)
+ goto err_detroy;
+
+ params->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&vdev->entity, 1, &params->pad);
+ if (ret)
+ goto err_queue_release;
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret < 0) {
+ dev_err(isp->dev,
+ "Failed to register %s: %d\n", vdev->name, ret);
+ goto err_entity_cleanup;
+ }
+
+ return 0;
+
+err_entity_cleanup:
+ media_entity_cleanup(&vdev->entity);
+err_queue_release:
+ vb2_queue_release(vb2_q);
+err_detroy:
+ mutex_destroy(&params->lock);
+ return ret;
+}
+
+void c3_isp_params_unregister(struct c3_isp_device *isp)
+{
+ struct c3_isp_params *params = &isp->params;
+
+ vb2_queue_release(&params->vb2_q);
+ media_entity_cleanup(&params->vdev.entity);
+ video_unregister_device(&params->vdev);
+ mutex_destroy(&params->lock);
+}
+
+void c3_isp_params_isr(struct c3_isp_device *isp)
+{
+ struct c3_isp_params *params = &isp->params;
+
+ guard(spinlock_irqsave)(&params->buff_lock);
+
+ params->buff =
+ list_first_entry_or_null(&params->pending,
+ struct c3_isp_params_buffer, list);
+ if (!params->buff)
+ return;
+
+ list_del(&params->buff->list);
+
+ c3_isp_params_cfg_blocks(params);
+
+ params->buff->vb.sequence = params->isp->frm_sequence;
+ params->buff->vb.vb2_buf.timestamp = ktime_get();
+ params->buff->vb.field = V4L2_FIELD_NONE;
+ vb2_buffer_done(&params->buff->vb.vb2_buf, VB2_BUF_STATE_DONE);
+}
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-regs.h b/drivers/media/platform/amlogic/c3/isp/c3-isp-regs.h
new file mode 100644
index 000000000000..fa249985a771
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-regs.h
@@ -0,0 +1,618 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#ifndef __C3_ISP_REGS_H__
+#define __C3_ISP_REGS_H__
+
+#define ISP_TOP_INPUT_SIZE 0x0000
+#define ISP_TOP_INPUT_SIZE_VERT_SIZE_MASK GENMASK(15, 0)
+#define ISP_TOP_INPUT_SIZE_VERT_SIZE(x) ((x) << 0)
+#define ISP_TOP_INPUT_SIZE_HORIZ_SIZE_MASK GENMASK(31, 16)
+#define ISP_TOP_INPUT_SIZE_HORIZ_SIZE(x) ((x) << 16)
+
+#define ISP_TOP_FRM_SIZE 0x0004
+#define ISP_TOP_FRM_SIZE_CORE_VERT_SIZE_MASK GENMASK(15, 0)
+#define ISP_TOP_FRM_SIZE_CORE_VERT_SIZE(x) ((x) << 0)
+#define ISP_TOP_FRM_SIZE_CORE_HORIZ_SIZE_MASK GENMASK(31, 16)
+#define ISP_TOP_FRM_SIZE_CORE_HORIZ_SIZE(x) ((x) << 16)
+
+#define ISP_TOP_HOLD_SIZE 0x0008
+#define ISP_TOP_HOLD_SIZE_CORE_HORIZ_SIZE_MASK GENMASK(31, 16)
+#define ISP_TOP_HOLD_SIZE_CORE_HORIZ_SIZE(x) ((x) << 16)
+
+#define ISP_TOP_PATH_EN 0x0010
+#define ISP_TOP_PATH_EN_DISP0_EN_MASK BIT(0)
+#define ISP_TOP_PATH_EN_DISP0_EN BIT(0)
+#define ISP_TOP_PATH_EN_DISP0_DIS (0 << 0)
+#define ISP_TOP_PATH_EN_DISP1_EN_MASK BIT(1)
+#define ISP_TOP_PATH_EN_DISP1_EN BIT(1)
+#define ISP_TOP_PATH_EN_DISP1_DIS (0 << 1)
+#define ISP_TOP_PATH_EN_DISP2_EN_MASK BIT(2)
+#define ISP_TOP_PATH_EN_DISP2_EN BIT(2)
+#define ISP_TOP_PATH_EN_DISP2_DIS (0 << 2)
+#define ISP_TOP_PATH_EN_WRMIF0_EN_MASK BIT(8)
+#define ISP_TOP_PATH_EN_WRMIF0_EN BIT(8)
+#define ISP_TOP_PATH_EN_WRMIF0_DIS (0 << 8)
+#define ISP_TOP_PATH_EN_WRMIF1_EN_MASK BIT(9)
+#define ISP_TOP_PATH_EN_WRMIF1_EN BIT(9)
+#define ISP_TOP_PATH_EN_WRMIF1_DIS (0 << 9)
+#define ISP_TOP_PATH_EN_WRMIF2_EN_MASK BIT(10)
+#define ISP_TOP_PATH_EN_WRMIF2_EN BIT(10)
+#define ISP_TOP_PATH_EN_WRMIF2_DIS (0 << 10)
+
+#define ISP_TOP_PATH_SEL 0x0014
+#define ISP_TOP_PATH_SEL_CORE_MASK GENMASK(18, 16)
+#define ISP_TOP_PATH_SEL_CORE_CORE_DIS (0 << 16)
+#define ISP_TOP_PATH_SEL_CORE_MIPI_CORE BIT(16)
+
+#define ISP_TOP_DISPIN_SEL 0x0018
+#define ISP_TOP_DISPIN_SEL_DISP0_MASK GENMASK(3, 0)
+#define ISP_TOP_DISPIN_SEL_DISP0_CORE_OUT (0 << 0)
+#define ISP_TOP_DISPIN_SEL_DISP0_MIPI_OUT (2 << 0)
+#define ISP_TOP_DISPIN_SEL_DISP1_MASK GENMASK(7, 4)
+#define ISP_TOP_DISPIN_SEL_DISP1_CORE_OUT (0 << 4)
+#define ISP_TOP_DISPIN_SEL_DISP1_MIPI_OUT (2 << 4)
+#define ISP_TOP_DISPIN_SEL_DISP2_MASK GENMASK(11, 8)
+#define ISP_TOP_DISPIN_SEL_DISP2_CORE_OUT (0 << 8)
+#define ISP_TOP_DISPIN_SEL_DISP2_MIPI_OUT (2 << 8)
+
+#define ISP_TOP_IRQ_EN 0x0080
+#define ISP_TOP_IRQ_EN_FRM_END_MASK BIT(0)
+#define ISP_TOP_IRQ_EN_FRM_END_EN BIT(0)
+#define ISP_TOP_IRQ_EN_FRM_END_DIS (0 << 0)
+#define ISP_TOP_IRQ_EN_FRM_RST_MASK BIT(1)
+#define ISP_TOP_IRQ_EN_FRM_RST_EN BIT(1)
+#define ISP_TOP_IRQ_EN_FRM_RST_DIS (0 << 1)
+#define ISP_TOP_IRQ_EN_3A_DMA_ERR_MASK BIT(5)
+#define ISP_TOP_IRQ_EN_3A_DMA_ERR_EN BIT(5)
+#define ISP_TOP_IRQ_EN_3A_DMA_ERR_DIS (0 << 5)
+
+#define ISP_TOP_IRQ_CLR 0x0084
+#define ISP_TOP_RO_IRQ_STAT 0x01c4
+#define ISP_TOP_RO_IRQ_STAT_FRM_END_MASK BIT(0)
+#define ISP_TOP_RO_IRQ_STAT_FRM_RST_MASK BIT(1)
+#define ISP_TOP_RO_IRQ_STAT_3A_DMA_ERR_MASK BIT(5)
+
+#define ISP_TOP_MODE_CTRL 0x0400
+#define ISP_TOP_FEO_CTRL0 0x040c
+#define ISP_TOP_FEO_CTRL0_INPUT_FMT_EN_MASK BIT(8)
+#define ISP_TOP_FEO_CTRL0_INPUT_FMT_DIS (0 << 8)
+#define ISP_TOP_FEO_CTRL0_INPUT_FMT_EN BIT(8)
+
+#define ISP_TOP_FEO_CTRL1_0 0x0410
+#define ISP_TOP_FEO_CTRL1_0_DPC_EN_MASK BIT(3)
+#define ISP_TOP_FEO_CTRL1_0_DPC_DIS (0 << 3)
+#define ISP_TOP_FEO_CTRL1_0_DPC_EN BIT(3)
+#define ISP_TOP_FEO_CTRL1_0_OG_EN_MASK BIT(5)
+#define ISP_TOP_FEO_CTRL1_0_OG_DIS (0 << 5)
+#define ISP_TOP_FEO_CTRL1_0_OG_EN BIT(5)
+
+#define ISP_TOP_FED_CTRL 0x0418
+#define ISP_TOP_FED_CTRL_PDPC_EN_MASK BIT(1)
+#define ISP_TOP_FED_CTRL_PDPC_DIS (0 << 1)
+#define ISP_TOP_FED_CTRL_PDPC_EN BIT(1)
+#define ISP_TOP_FED_CTRL_RAWCNR_EN_MASK GENMASK(6, 5)
+#define ISP_TOP_FED_CTRL_RAWCNR_DIS (0 << 5)
+#define ISP_TOP_FED_CTRL_RAWCNR_EN BIT(5)
+#define ISP_TOP_FED_CTRL_SNR1_EN_MASK BIT(9)
+#define ISP_TOP_FED_CTRL_SNR1_DIS (0 << 9)
+#define ISP_TOP_FED_CTRL_SNR1_EN BIT(9)
+#define ISP_TOP_FED_CTRL_TNR0_EN_MASK BIT(11)
+#define ISP_TOP_FED_CTRL_TNR0_DIS (0 << 11)
+#define ISP_TOP_FED_CTRL_TNR0_EN BIT(11)
+#define ISP_TOP_FED_CTRL_CUBIC_CS_EN_MASK BIT(12)
+#define ISP_TOP_FED_CTRL_CUBIC_CS_DIS (0 << 12)
+#define ISP_TOP_FED_CTRL_CUBIC_CS_EN BIT(12)
+#define ISP_TOP_FED_CTRL_SQRT_EN_MASK BIT(14)
+#define ISP_TOP_FED_CTRL_SQRT_DIS (0 << 14)
+#define ISP_TOP_FED_CTRL_SQRT_EN BIT(14)
+#define ISP_TOP_FED_CTRL_DGAIN_EN_MASK BIT(16)
+#define ISP_TOP_FED_CTRL_DGAIN_DIS (0 << 16)
+#define ISP_TOP_FED_CTRL_DGAIN_EN BIT(16)
+
+#define ISP_TOP_BEO_CTRL 0x041c
+#define ISP_TOP_BEO_CTRL_WB_EN_MASK BIT(6)
+#define ISP_TOP_BEO_CTRL_WB_DIS (0 << 6)
+#define ISP_TOP_BEO_CTRL_WB_EN BIT(6)
+#define ISP_TOP_BEO_CTRL_BLC_EN_MASK BIT(7)
+#define ISP_TOP_BEO_CTRL_BLC_DIS (0 << 7)
+#define ISP_TOP_BEO_CTRL_BLC_EN BIT(7)
+#define ISP_TOP_BEO_CTRL_INV_DGAIN_EN_MASK BIT(8)
+#define ISP_TOP_BEO_CTRL_INV_DGAIN_DIS (0 << 8)
+#define ISP_TOP_BEO_CTRL_INV_DGAIN_EN BIT(8)
+#define ISP_TOP_BEO_CTRL_EOTF_EN_MASK BIT(9)
+#define ISP_TOP_BEO_CTRL_EOTF_DIS (0 << 9)
+#define ISP_TOP_BEO_CTRL_EOTF_EN BIT(9)
+
+#define ISP_TOP_BED_CTRL 0x0420
+#define ISP_TOP_BED_CTRL_YHS_STAT_EN_MASK GENMASK(1, 0)
+#define ISP_TOP_BED_CTRL_YHS_STAT_DIS (0 << 0)
+#define ISP_TOP_BED_CTRL_YHS_STAT_EN BIT(0)
+#define ISP_TOP_BED_CTRL_GRPH_STAT_EN_MASK BIT(2)
+#define ISP_TOP_BED_CTRL_GRPH_STAT_DIS (0 << 2)
+#define ISP_TOP_BED_CTRL_GRPH_STAT_EN BIT(2)
+#define ISP_TOP_BED_CTRL_FMETER_EN_MASK BIT(3)
+#define ISP_TOP_BED_CTRL_FMETER_DIS (0 << 3)
+#define ISP_TOP_BED_CTRL_FMETER_EN BIT(3)
+#define ISP_TOP_BED_CTRL_BSC_EN_MASK BIT(10)
+#define ISP_TOP_BED_CTRL_BSC_DIS (0 << 10)
+#define ISP_TOP_BED_CTRL_BSC_EN BIT(10)
+#define ISP_TOP_BED_CTRL_CNR2_EN_MASK BIT(11)
+#define ISP_TOP_BED_CTRL_CNR2_DIS (0 << 11)
+#define ISP_TOP_BED_CTRL_CNR2_EN BIT(11)
+#define ISP_TOP_BED_CTRL_CM1_EN_MASK BIT(13)
+#define ISP_TOP_BED_CTRL_CM1_DIS (0 << 13)
+#define ISP_TOP_BED_CTRL_CM1_EN BIT(13)
+#define ISP_TOP_BED_CTRL_CM0_EN_MASK BIT(14)
+#define ISP_TOP_BED_CTRL_CM0_DIS (0 << 14)
+#define ISP_TOP_BED_CTRL_CM0_EN BIT(14)
+#define ISP_TOP_BED_CTRL_PST_GAMMA_EN_MASK BIT(16)
+#define ISP_TOP_BED_CTRL_PST_GAMMA_DIS (0 << 16)
+#define ISP_TOP_BED_CTRL_PST_GAMMA_EN BIT(16)
+#define ISP_TOP_BED_CTRL_LUT3D_EN_MASK BIT(17)
+#define ISP_TOP_BED_CTRL_LUT3D_DIS (0 << 17)
+#define ISP_TOP_BED_CTRL_LUT3D_EN BIT(17)
+#define ISP_TOP_BED_CTRL_CCM_EN_MASK BIT(18)
+#define ISP_TOP_BED_CTRL_CCM_DIS (0 << 18)
+#define ISP_TOP_BED_CTRL_CCM_EN BIT(18)
+#define ISP_TOP_BED_CTRL_PST_TNR_LITE_EN_MASK BIT(21)
+#define ISP_TOP_BED_CTRL_PST_TNR_LITE_DIS (0 << 21)
+#define ISP_TOP_BED_CTRL_PST_TNR_LITE_EN BIT(21)
+#define ISP_TOP_BED_CTRL_AMCM_EN_MASK BIT(25)
+#define ISP_TOP_BED_CTRL_AMCM_DIS (0 << 25)
+#define ISP_TOP_BED_CTRL_AMCM_EN BIT(25)
+
+#define ISP_TOP_3A_STAT_CRTL 0x0424
+#define ISP_TOP_3A_STAT_CRTL_AE_STAT_EN_MASK BIT(0)
+#define ISP_TOP_3A_STAT_CRTL_AE_STAT_DIS (0 << 0)
+#define ISP_TOP_3A_STAT_CRTL_AE_STAT_EN BIT(0)
+#define ISP_TOP_3A_STAT_CRTL_AWB_STAT_EN_MASK BIT(1)
+#define ISP_TOP_3A_STAT_CRTL_AWB_STAT_DIS (0 << 1)
+#define ISP_TOP_3A_STAT_CRTL_AWB_STAT_EN BIT(1)
+#define ISP_TOP_3A_STAT_CRTL_AF_STAT_EN_MASK BIT(2)
+#define ISP_TOP_3A_STAT_CRTL_AF_STAT_DIS (0 << 2)
+#define ISP_TOP_3A_STAT_CRTL_AF_STAT_EN BIT(2)
+#define ISP_TOP_3A_STAT_CRTL_AWB_POINT_MASK GENMASK(6, 4)
+#define ISP_TOP_3A_STAT_CRTL_AWB_POINT(x) ((x) << 4)
+#define ISP_TOP_3A_STAT_CRTL_AE_POINT_MASK GENMASK(9, 8)
+#define ISP_TOP_3A_STAT_CRTL_AE_POINT(x) ((x) << 8)
+#define ISP_TOP_3A_STAT_CRTL_AF_POINT_MASK GENMASK(13, 12)
+#define ISP_TOP_3A_STAT_CRTL_AF_POINT(x) ((x) << 12)
+
+#define ISP_LSWB_BLC_OFST0 0x4028
+#define ISP_LSWB_BLC_OFST0_R_OFST_MASK GENMASK(15, 0)
+#define ISP_LSWB_BLC_OFST0_R_OFST(x) ((x) << 0)
+#define ISP_LSWB_BLC_OFST0_GR_OFST_MASK GENMASK(31, 16)
+#define ISP_LSWB_BLC_OFST0_GR_OFST(x) ((x) << 16)
+
+#define ISP_LSWB_BLC_OFST1 0x402c
+#define ISP_LSWB_BLC_OFST1_GB_OFST_MASK GENMASK(15, 0)
+#define ISP_LSWB_BLC_OFST1_GB_OFST(x) ((x) << 0)
+#define ISP_LSWB_BLC_OFST1_B_OFST_MASK GENMASK(31, 16)
+#define ISP_LSWB_BLC_OFST1_B_OFST(x) ((x) << 16)
+
+#define ISP_LSWB_BLC_PHSOFST 0x4034
+#define ISP_LSWB_BLC_PHSOFST_VERT_OFST_MASK GENMASK(1, 0)
+#define ISP_LSWB_BLC_PHSOFST_VERT_OFST(x) ((x) << 0)
+#define ISP_LSWB_BLC_PHSOFST_HORIZ_OFST_MASK GENMASK(3, 2)
+#define ISP_LSWB_BLC_PHSOFST_HORIZ_OFST(x) ((x) << 2)
+
+#define ISP_LSWB_WB_GAIN0 0x4038
+#define ISP_LSWB_WB_GAIN0_R_GAIN_MASK GENMASK(11, 0)
+#define ISP_LSWB_WB_GAIN0_R_GAIN(x) ((x) << 0)
+#define ISP_LSWB_WB_GAIN0_GR_GAIN_MASK GENMASK(27, 16)
+#define ISP_LSWB_WB_GAIN0_GR_GAIN(x) ((x) << 16)
+
+#define ISP_LSWB_WB_GAIN1 0x403c
+#define ISP_LSWB_WB_GAIN1_GB_GAIN_MASK GENMASK(11, 0)
+#define ISP_LSWB_WB_GAIN1_GB_GAIN(x) ((x) << 0)
+#define ISP_LSWB_WB_GAIN1_B_GAIN_MASK GENMASK(27, 16)
+#define ISP_LSWB_WB_GAIN1_B_GAIN(x) ((x) << 16)
+
+#define ISP_LSWB_WB_GAIN2 0x4040
+#define ISP_LSWB_WB_GAIN2_IR_GAIN_MASK GENMASK(11, 0)
+#define ISP_LSWB_WB_GAIN2_IR_GAIN(x) ((x) << 0)
+
+#define ISP_LSWB_WB_LIMIT0 0x4044
+#define ISP_LSWB_WB_LIMIT0_WB_LIMIT_R_MASK GENMASK(15, 0)
+#define ISP_LSWB_WB_LIMIT0_WB_LIMIT_R(x) ((x) << 0)
+#define ISP_LSWB_WB_LIMIT0_WB_LIMIT_R_MAX (0x8fff << 0)
+#define ISP_LSWB_WB_LIMIT0_WB_LIMIT_GR_MASK GENMASK(31, 16)
+#define ISP_LSWB_WB_LIMIT0_WB_LIMIT_GR(x) ((x) << 16)
+#define ISP_LSWB_WB_LIMIT0_WB_LIMIT_GR_MAX (0x8fff << 16)
+
+#define ISP_LSWB_WB_LIMIT1 0x4048
+#define ISP_LSWB_WB_LIMIT1_WB_LIMIT_GB_MASK GENMASK(15, 0)
+#define ISP_LSWB_WB_LIMIT1_WB_LIMIT_GB(x) ((x) << 0)
+#define ISP_LSWB_WB_LIMIT1_WB_LIMIT_GB_MAX (0x8fff << 0)
+#define ISP_LSWB_WB_LIMIT1_WB_LIMIT_B_MASK GENMASK(31, 16)
+#define ISP_LSWB_WB_LIMIT1_WB_LIMIT_B(x) ((x) << 16)
+#define ISP_LSWB_WB_LIMIT1_WB_LIMIT_B_MAX (0x8fff << 16)
+
+#define ISP_LSWB_WB_PHSOFST 0x4050
+#define ISP_LSWB_WB_PHSOFST_VERT_OFST_MASK GENMASK(1, 0)
+#define ISP_LSWB_WB_PHSOFST_VERT_OFST(x) ((x) << 0)
+#define ISP_LSWB_WB_PHSOFST_HORIZ_OFST_MASK GENMASK(3, 2)
+#define ISP_LSWB_WB_PHSOFST_HORIZ_OFST(x) ((x) << 2)
+
+#define ISP_LSWB_LNS_PHSOFST 0x4054
+#define ISP_LSWB_LNS_PHSOFST_VERT_OFST_MASK GENMASK(1, 0)
+#define ISP_LSWB_LNS_PHSOFST_VERT_OFST(x) ((x) << 0)
+#define ISP_LSWB_LNS_PHSOFST_HORIZ_OFST_MASK GENMASK(3, 2)
+#define ISP_LSWB_LNS_PHSOFST_HORIZ_OFST(x) ((x) << 2)
+
+#define ISP_DMS_COMMON_PARAM0 0x5000
+#define ISP_DMS_COMMON_PARAM0_VERT_PHS_OFST_MASK GENMASK(1, 0)
+#define ISP_DMS_COMMON_PARAM0_VERT_PHS_OFST(x) ((x) << 0)
+#define ISP_DMS_COMMON_PARAM0_HORIZ_PHS_OFST_MASK GENMASK(3, 2)
+#define ISP_DMS_COMMON_PARAM0_HORIZ_PHS_OFST(x) ((x) << 2)
+
+#define ISP_CM0_COEF00_01 0x6048
+#define ISP_CM0_COEF00_01_MTX_00_MASK GENMASK(12, 0)
+#define ISP_CM0_COEF00_01_MTX_00(x) ((x) << 0)
+#define ISP_CM0_COEF00_01_MTX_01_MASK GENMASK(28, 16)
+#define ISP_CM0_COEF00_01_MTX_01(x) ((x) << 16)
+
+#define ISP_CM0_COEF02_10 0x604c
+#define ISP_CM0_COEF02_10_MTX_02_MASK GENMASK(12, 0)
+#define ISP_CM0_COEF02_10_MTX_02(x) ((x) << 0)
+#define ISP_CM0_COEF02_10_MTX_10_MASK GENMASK(28, 16)
+#define ISP_CM0_COEF02_10_MTX_10(x) ((x) << 16)
+
+#define ISP_CM0_COEF11_12 0x6050
+#define ISP_CM0_COEF11_12_MTX_11_MASK GENMASK(12, 0)
+#define ISP_CM0_COEF11_12_MTX_11(x) ((x) << 0)
+#define ISP_CM0_COEF11_12_MTX_12_MASK GENMASK(28, 16)
+#define ISP_CM0_COEF11_12_MTX_12(x) ((x) << 16)
+
+#define ISP_CM0_COEF20_21 0x6054
+#define ISP_CM0_COEF20_21_MTX_20_MASK GENMASK(12, 0)
+#define ISP_CM0_COEF20_21_MTX_20(x) ((x) << 0)
+#define ISP_CM0_COEF20_21_MTX_21_MASK GENMASK(28, 16)
+#define ISP_CM0_COEF20_21_MTX_21(x) ((x) << 16)
+
+#define ISP_CM0_COEF22_OUP_OFST0 0x6058
+#define ISP_CM0_COEF22_OUP_OFST0_MTX_22_MASK GENMASK(12, 0)
+#define ISP_CM0_COEF22_OUP_OFST0_MTX_22(x) ((x) << 0)
+
+#define ISP_CCM_MTX_00_01 0x6098
+#define ISP_CCM_MTX_00_01_MTX_00_MASK GENMASK(12, 0)
+#define ISP_CCM_MTX_00_01_MTX_00(x) ((x) << 0)
+#define ISP_CCM_MTX_00_01_MTX_01_MASK GENMASK(28, 16)
+#define ISP_CCM_MTX_00_01_MTX_01(x) ((x) << 16)
+
+#define ISP_CCM_MTX_02_03 0x609c
+#define ISP_CCM_MTX_02_03_MTX_02_MASK GENMASK(12, 0)
+#define ISP_CCM_MTX_02_03_MTX_02(x) ((x) << 0)
+
+#define ISP_CCM_MTX_10_11 0x60A0
+#define ISP_CCM_MTX_10_11_MTX_10_MASK GENMASK(12, 0)
+#define ISP_CCM_MTX_10_11_MTX_10(x) ((x) << 0)
+#define ISP_CCM_MTX_10_11_MTX_11_MASK GENMASK(28, 16)
+#define ISP_CCM_MTX_10_11_MTX_11(x) ((x) << 16)
+
+#define ISP_CCM_MTX_12_13 0x60A4
+#define ISP_CCM_MTX_12_13_MTX_12_MASK GENMASK(12, 0)
+#define ISP_CCM_MTX_12_13_MTX_12(x) ((x) << 0)
+
+#define ISP_CCM_MTX_20_21 0x60A8
+#define ISP_CCM_MTX_20_21_MTX_20_MASK GENMASK(12, 0)
+#define ISP_CCM_MTX_20_21_MTX_20(x) ((x) << 0)
+#define ISP_CCM_MTX_20_21_MTX_21_MASK GENMASK(28, 16)
+#define ISP_CCM_MTX_20_21_MTX_21(x) ((x) << 16)
+
+#define ISP_CCM_MTX_22_23_RS 0x60Ac
+#define ISP_CCM_MTX_22_23_RS_MTX_22_MASK GENMASK(12, 0)
+#define ISP_CCM_MTX_22_23_RS_MTX_22(x) ((x) << 0)
+
+#define ISP_PST_GAMMA_LUT_ADDR 0x60cc
+#define ISP_PST_GAMMA_LUT_ADDR_IDX_ADDR(x) ((x) << 7)
+
+#define ISP_PST_GAMMA_LUT_DATA 0x60d0
+#define ISP_PST_GM_LUT_DATA0(x) (((x) & GENMASK(15, 0)) << 0)
+#define ISP_PST_GM_LUT_DATA1(x) (((x) & GENMASK(15, 0)) << 16)
+
+#define DISP0_TOP_TOP_CTRL 0x8000
+#define DISP0_TOP_TOP_CTRL_CROP2_EN_MASK BIT(5)
+#define DISP0_TOP_TOP_CTRL_CROP2_EN BIT(5)
+#define DISP0_TOP_TOP_CTRL_CROP2_DIS (0 << 5)
+
+#define DISP0_TOP_CRP2_START 0x8004
+#define DISP0_TOP_CRP2_START_V_START_MASK GENMASK(15, 0)
+#define DISP0_TOP_CRP2_START_V_START(x) ((x) << 0)
+#define DISP0_TOP_CRP2_START_H_START_MASK GENMASK(31, 16)
+#define DISP0_TOP_CRP2_START_H_START(x) ((x) << 16)
+
+#define DISP0_TOP_CRP2_SIZE 0x8008
+#define DISP0_TOP_CRP2_SIZE_V_SIZE_MASK GENMASK(15, 0)
+#define DISP0_TOP_CRP2_SIZE_V_SIZE(x) ((x) << 0)
+#define DISP0_TOP_CRP2_SIZE_H_SIZE_MASK GENMASK(31, 16)
+#define DISP0_TOP_CRP2_SIZE_H_SIZE(x) ((x) << 16)
+
+#define DISP0_TOP_OUT_SIZE 0x800c
+#define DISP0_TOP_OUT_SIZE_SCL_OUT_HEIGHT_MASK GENMASK(12, 0)
+#define DISP0_TOP_OUT_SIZE_SCL_OUT_HEIGHT(x) ((x) << 0)
+#define DISP0_TOP_OUT_SIZE_SCL_OUT_WIDTH_MASK GENMASK(28, 16)
+#define DISP0_TOP_OUT_SIZE_SCL_OUT_WIDTH(x) ((x) << 16)
+
+#define ISP_DISP0_TOP_IN_SIZE 0x804c
+#define ISP_DISP0_TOP_IN_SIZE_VSIZE_MASK GENMASK(12, 0)
+#define ISP_DISP0_TOP_IN_SIZE_VSIZE(x) ((x) << 0)
+#define ISP_DISP0_TOP_IN_SIZE_HSIZE_MASK GENMASK(28, 16)
+#define ISP_DISP0_TOP_IN_SIZE_HSIZE(x) ((x) << 16)
+
+#define DISP0_PPS_SCALE_EN 0x8200
+#define DISP0_PPS_SCALE_EN_VSC_TAP_NUM_MASK GENMASK(3, 0)
+#define DISP0_PPS_SCALE_EN_VSC_TAP_NUM(x) ((x) << 0)
+#define DISP0_PPS_SCALE_EN_HSC_TAP_NUM_MASK GENMASK(7, 4)
+#define DISP0_PPS_SCALE_EN_HSC_TAP_NUM(x) ((x) << 4)
+#define DISP0_PPS_SCALE_EN_PREVSC_FLT_NUM_MASK GENMASK(11, 8)
+#define DISP0_PPS_SCALE_EN_PREVSC_FLT_NUM(x) ((x) << 8)
+#define DISP0_PPS_SCALE_EN_PREHSC_FLT_NUM_MASK GENMASK(15, 12)
+#define DISP0_PPS_SCALE_EN_PREHSC_FLT_NUM(x) ((x) << 12)
+#define DISP0_PPS_SCALE_EN_PREVSC_RATE_MASK GENMASK(17, 16)
+#define DISP0_PPS_SCALE_EN_PREVSC_RATE(x) ((x) << 16)
+#define DISP0_PPS_SCALE_EN_PREHSC_RATE_MASK GENMASK(19, 18)
+#define DISP0_PPS_SCALE_EN_PREHSC_RATE(x) ((x) << 18)
+#define DISP0_PPS_SCALE_EN_HSC_EN_MASK BIT(20)
+#define DISP0_PPS_SCALE_EN_HSC_EN(x) ((x) << 20)
+#define DISP0_PPS_SCALE_EN_HSC_DIS (0 << 20)
+#define DISP0_PPS_SCALE_EN_VSC_EN_MASK BIT(21)
+#define DISP0_PPS_SCALE_EN_VSC_EN(x) ((x) << 21)
+#define DISP0_PPS_SCALE_EN_VSC_DIS (0 << 21)
+#define DISP0_PPS_SCALE_EN_PREVSC_EN_MASK BIT(22)
+#define DISP0_PPS_SCALE_EN_PREVSC_EN(x) ((x) << 22)
+#define DISP0_PPS_SCALE_EN_PREVSC_DIS (0 << 22)
+#define DISP0_PPS_SCALE_EN_PREHSC_EN_MASK BIT(23)
+#define DISP0_PPS_SCALE_EN_PREHSC_EN(x) ((x) << 23)
+#define DISP0_PPS_SCALE_EN_PREHSC_DIS (0 << 23)
+#define DISP0_PPS_SCALE_EN_HSC_NOR_RS_BITS_MASK GENMASK(27, 24)
+#define DISP0_PPS_SCALE_EN_HSC_NOR_RS_BITS(x) ((x) << 24)
+#define DISP0_PPS_SCALE_EN_VSC_NOR_RS_BITS_MASK GENMASK(31, 28)
+#define DISP0_PPS_SCALE_EN_VSC_NOR_RS_BITS(x) ((x) << 28)
+
+#define DISP0_PPS_VSC_START_PHASE_STEP 0x8224
+#define DISP0_PPS_VSC_START_PHASE_STEP_VERT_FRAC_MASK GENMASK(23, 0)
+#define DISP0_PPS_VSC_START_PHASE_STEP_VERT_FRAC(x) ((x) << 0)
+#define DISP0_PPS_VSC_START_PHASE_STEP_VERT_INTE_MASK GENMASK(27, 24)
+#define DISP0_PPS_VSC_START_PHASE_STEP_VERT_INTE(x) ((x) << 24)
+
+#define DISP0_PPS_HSC_START_PHASE_STEP 0x8230
+#define DISP0_PPS_HSC_START_PHASE_STEP_HORIZ_FRAC_MASK GENMASK(23, 0)
+#define DISP0_PPS_HSC_START_PHASE_STEP_HORIZ_FRAC(x) ((x) << 0)
+#define DISP0_PPS_HSC_START_PHASE_STEP_HORIZ_INTE_MASK GENMASK(27, 24)
+#define DISP0_PPS_HSC_START_PHASE_STEP_HORIZ_INTE(x) ((x) << 24)
+
+#define DISP0_PPS_444TO422 0x823c
+#define DISP0_PPS_444TO422_EN_MASK BIT(0)
+#define DISP0_PPS_444TO422_EN(x) ((x) << 0)
+
+#define ISP_SCALE0_COEF_IDX_LUMA 0x8240
+#define ISP_SCALE0_COEF_IDX_LUMA_COEF_S11_MODE_MASK BIT(9)
+#define ISP_SCALE0_COEF_IDX_LUMA_COEF_S11_MODE_EN BIT(9)
+#define ISP_SCALE0_COEF_IDX_LUMA_COEF_S11_MODE_DIS (0 << 9)
+#define ISP_SCALE0_COEF_IDX_LUMA_CTYPE_MASK GENMASK(12, 10)
+#define ISP_SCALE0_COEF_IDX_LUMA_CTYPE(x) ((x) << 10)
+
+#define ISP_SCALE0_COEF_LUMA 0x8244
+#define ISP_SCALE0_COEF_LUMA_DATA1(x) (((x) & GENMASK(10, 0)) << 0)
+#define ISP_SCALE0_COEF_LUMA_DATA0(x) (((x) & GENMASK(10, 0)) << 16)
+
+#define ISP_SCALE0_COEF_IDX_CHRO 0x8248
+#define ISP_SCALE0_COEF_IDX_CHRO_COEF_S11_MODE_MASK BIT(9)
+#define ISP_SCALE0_COEF_IDX_CHRO_COEF_S11_MODE_EN BIT(9)
+#define ISP_SCALE0_COEF_IDX_CHRO_COEF_S11_MODE_DIS (0 << 9)
+#define ISP_SCALE0_COEF_IDX_CHRO_CTYPE_MASK GENMASK(12, 10)
+#define ISP_SCALE0_COEF_IDX_CHRO_CTYPE(x) ((x) << 10)
+
+#define ISP_SCALE0_COEF_CHRO 0x824c
+#define ISP_SCALE0_COEF_CHRO_DATA1(x) (((x) & GENMASK(10, 0)) << 0)
+#define ISP_SCALE0_COEF_CHRO_DATA0(x) (((x) & GENMASK(10, 0)) << 16)
+
+#define ISP_AF_CTRL 0xa044
+#define ISP_AF_CTRL_VERT_OFST_MASK GENMASK(15, 14)
+#define ISP_AF_CTRL_VERT_OFST(x) ((x) << 14)
+#define ISP_AF_CTRL_HORIZ_OFST_MASK GENMASK(17, 16)
+#define ISP_AF_CTRL_HORIZ_OFST(x) ((x) << 16)
+
+#define ISP_AF_HV_SIZE 0xa04c
+#define ISP_AF_HV_SIZE_GLB_WIN_YSIZE_MASK GENMASK(15, 0)
+#define ISP_AF_HV_SIZE_GLB_WIN_YSIZE(x) ((x) << 0)
+#define ISP_AF_HV_SIZE_GLB_WIN_XSIZE_MASK GENMASK(31, 16)
+#define ISP_AF_HV_SIZE_GLB_WIN_XSIZE(x) ((x) << 16)
+
+#define ISP_AF_HV_BLKNUM 0xa050
+#define ISP_AF_HV_BLKNUM_V_NUM_MASK GENMASK(5, 0)
+#define ISP_AF_HV_BLKNUM_V_NUM(x) ((x) << 0)
+#define ISP_AF_HV_BLKNUM_H_NUM_MASK GENMASK(21, 16)
+#define ISP_AF_HV_BLKNUM_H_NUM(x) ((x) << 16)
+
+#define ISP_AF_EN_CTRL 0xa054
+#define ISP_AF_EN_CTRL_STAT_SEL_MASK BIT(21)
+#define ISP_AF_EN_CTRL_STAT_SEL_OLD (0 << 21)
+#define ISP_AF_EN_CTRL_STAT_SEL_NEW BIT(21)
+
+#define ISP_AF_IDX_ADDR 0xa1c0
+#define ISP_AF_IDX_DATA 0xa1c4
+#define ISP_AF_IDX_DATA_VIDX_DATA(x) (((x) & GENMASK(15, 0)) << 0)
+#define ISP_AF_IDX_DATA_HIDX_DATA(x) (((x) & GENMASK(15, 0)) << 16)
+
+#define ISP_AE_CTRL 0xa448
+#define ISP_AE_CTRL_INPUT_2LINE_MASK BIT(7)
+#define ISP_AE_CTRL_INPUT_2LINE_EN BIT(7)
+#define ISP_AE_CTRL_INPUT_2LINE_DIS (0 << 7)
+#define ISP_AE_CTRL_LUMA_MODE_MASK GENMASK(9, 8)
+#define ISP_AE_CTRL_LUMA_MODE_CUR (0 << 8)
+#define ISP_AE_CTRL_LUMA_MODE_MAX BIT(8)
+#define ISP_AE_CTRL_LUMA_MODE_FILTER (2 << 8)
+#define ISP_AE_CTRL_VERT_OFST_MASK GENMASK(25, 24)
+#define ISP_AE_CTRL_VERT_OFST(x) ((x) << 24)
+#define ISP_AE_CTRL_HORIZ_OFST_MASK GENMASK(27, 26)
+#define ISP_AE_CTRL_HORIZ_OFST(x) ((x) << 26)
+
+#define ISP_AE_HV_SIZE 0xa464
+#define ISP_AE_HV_SIZE_VERT_SIZE_MASK GENMASK(15, 0)
+#define ISP_AE_HV_SIZE_VERT_SIZE(x) ((x) << 0)
+#define ISP_AE_HV_SIZE_HORIZ_SIZE_MASK GENMASK(31, 16)
+#define ISP_AE_HV_SIZE_HORIZ_SIZE(x) ((x) << 16)
+
+#define ISP_AE_HV_BLKNUM 0xa468
+#define ISP_AE_HV_BLKNUM_V_NUM_MASK GENMASK(6, 0)
+#define ISP_AE_HV_BLKNUM_V_NUM(x) ((x) << 0)
+#define ISP_AE_HV_BLKNUM_H_NUM_MASK GENMASK(22, 16)
+#define ISP_AE_HV_BLKNUM_H_NUM(x) ((x) << 16)
+
+#define ISP_AE_IDX_ADDR 0xa600
+#define ISP_AE_IDX_DATA 0xa604
+#define ISP_AE_IDX_DATA_VIDX_DATA(x) (((x) & GENMASK(15, 0)) << 0)
+#define ISP_AE_IDX_DATA_HIDX_DATA(x) (((x) & GENMASK(15, 0)) << 16)
+
+#define ISP_AE_BLK_WT_ADDR 0xa608
+#define ISP_AE_BLK_WT_DATA 0xa60c
+#define ISP_AE_BLK_WT_DATA_WT(i, x) (((x) & GENMASK(3, 0)) << ((i) * 4))
+
+#define ISP_AWB_CTRL 0xa834
+#define ISP_AWB_CTRL_VERT_OFST_MASK GENMASK(1, 0)
+#define ISP_AWB_CTRL_VERT_OFST(x) ((x) << 0)
+#define ISP_AWB_CTRL_HORIZ_OFST_MASK GENMASK(3, 2)
+#define ISP_AWB_CTRL_HORIZ_OFST(x) ((x) << 2)
+
+#define ISP_AWB_HV_SIZE 0xa83c
+#define ISP_AWB_HV_SIZE_VERT_SIZE_MASK GENMASK(15, 0)
+#define ISP_AWB_HV_SIZE_VERT_SIZE(x) ((x) << 0)
+#define ISP_AWB_HV_SIZE_HORIZ_SIZE_MASK GENMASK(31, 16)
+#define ISP_AWB_HV_SIZE_HORIZ_SIZE(x) ((x) << 16)
+
+#define ISP_AWB_HV_BLKNUM 0xa840
+#define ISP_AWB_HV_BLKNUM_V_NUM_MASK GENMASK(5, 0)
+#define ISP_AWB_HV_BLKNUM_V_NUM(x) ((x) << 0)
+#define ISP_AWB_HV_BLKNUM_H_NUM_MASK GENMASK(21, 16)
+#define ISP_AWB_HV_BLKNUM_H_NUM(x) ((x) << 16)
+
+#define ISP_AWB_STAT_RG 0xa848
+#define ISP_AWB_STAT_RG_MIN_VALUE_MASK GENMASK(11, 0)
+#define ISP_AWB_STAT_RG_MIN_VALUE(x) ((x) << 0)
+#define ISP_AWB_STAT_RG_MAX_VALUE_MASK GENMASK(27, 16)
+#define ISP_AWB_STAT_RG_MAX_VALUE(x) ((x) << 16)
+
+#define ISP_AWB_STAT_BG 0xa84c
+#define ISP_AWB_STAT_BG_MIN_VALUE_MASK GENMASK(11, 0)
+#define ISP_AWB_STAT_BG_MIN_VALUE(x) ((x) << 0)
+#define ISP_AWB_STAT_BG_MAX_VALUE_MASK GENMASK(27, 16)
+#define ISP_AWB_STAT_BG_MAX_VALUE(x) ((x) << 16)
+
+#define ISP_AWB_STAT_RG_HL 0xa850
+#define ISP_AWB_STAT_RG_HL_LOW_VALUE_MASK GENMASK(11, 0)
+#define ISP_AWB_STAT_RG_HL_LOW_VALUE(x) ((x) << 0)
+#define ISP_AWB_STAT_RG_HL_HIGH_VALUE_MASK GENMASK(27, 16)
+#define ISP_AWB_STAT_RG_HL_HIGH_VALUE(x) ((x) << 16)
+
+#define ISP_AWB_STAT_BG_HL 0xa854
+#define ISP_AWB_STAT_BG_HL_LOW_VALUE_MASK GENMASK(11, 0)
+#define ISP_AWB_STAT_BG_HL_LOW_VALUE(x) ((x) << 0)
+#define ISP_AWB_STAT_BG_HL_HIGH_VALUE_MASK GENMASK(27, 16)
+#define ISP_AWB_STAT_BG_HL_HIGH_VALUE(x) ((x) << 16)
+
+#define ISP_AWB_STAT_CTRL2 0xa858
+#define ISP_AWB_STAT_CTRL2_SATUR_CTRL_MASK BIT(0)
+#define ISP_AWB_STAT_CTRL2_SATUR_CTRL(x) ((x) << 0)
+
+#define ISP_AWB_IDX_ADDR 0xaa00
+#define ISP_AWB_IDX_DATA 0xaa04
+#define ISP_AWB_IDX_DATA_VIDX_DATA(x) (((x) & GENMASK(15, 0)) << 0)
+#define ISP_AWB_IDX_DATA_HIDX_DATA(x) (((x) & GENMASK(15, 0)) << 16)
+
+#define ISP_AWB_BLK_WT_ADDR 0xaa08
+#define ISP_AWB_BLK_WT_DATA 0xaa0c
+#define ISP_AWB_BLK_WT_DATA_WT(i, x) (((x) & GENMASK(3, 0)) << ((i) * 4))
+
+#define ISP_WRMIFX3_0_CH0_CTRL0 0xc400
+#define ISP_WRMIFX3_0_CH0_CTRL0_STRIDE_MASK GENMASK(28, 16)
+#define ISP_WRMIFX3_0_CH0_CTRL0_STRIDE(x) ((x) << 16)
+
+#define ISP_WRMIFX3_0_CH0_CTRL1 0xc404
+#define ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_MODE_MASK GENMASK(30, 27)
+#define ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_8BITS BIT(27)
+#define ISP_WRMIFX3_0_CH0_CTRL1_PIX_BITS_16BITS (2 << 27)
+
+#define ISP_WRMIFX3_0_CH1_CTRL0 0xc408
+#define ISP_WRMIFX3_0_CH1_CTRL0_STRIDE_MASK GENMASK(28, 16)
+#define ISP_WRMIFX3_0_CH1_CTRL0_STRIDE(x) ((x) << 16)
+
+#define ISP_WRMIFX3_0_CH1_CTRL1 0xc40c
+#define ISP_WRMIFX3_0_CH1_CTRL1_PIX_BITS_MODE_MASK GENMASK(30, 27)
+#define ISP_WRMIFX3_0_CH1_CTRL1_PIX_BITS_8BITS BIT(27)
+#define ISP_WRMIFX3_0_CH1_CTRL1_PIX_BITS_16BITS (2 << 27)
+#define ISP_WRMIFX3_0_CH1_CTRL1_PIX_BITS_32BITS (3 << 27)
+
+#define ISP_WRMIFX3_0_WIN_LUMA_H 0xc420
+#define ISP_WRMIFX3_0_WIN_LUMA_H_LUMA_HEND_MASK GENMASK(28, 16)
+#define ISP_WRMIFX3_0_WIN_LUMA_H_LUMA_HEND(x) (((x) - 1) << 16)
+
+#define ISP_WRMIFX3_0_WIN_LUMA_V 0xc424
+#define ISP_WRMIFX3_0_WIN_LUMA_V_LUMA_VEND_MASK GENMASK(28, 16)
+#define ISP_WRMIFX3_0_WIN_LUMA_V_LUMA_VEND(x) (((x) - 1) << 16)
+
+#define ISP_WRMIFX3_0_WIN_CHROM_H 0xc428
+#define ISP_WRMIFX3_0_WIN_CHROM_H_CHROM_HEND_MASK GENMASK(28, 16)
+#define ISP_WRMIFX3_0_WIN_CHROM_H_CHROM_HEND(x) (((x) - 1) << 16)
+
+#define ISP_WRMIFX3_0_WIN_CHROM_V 0xc42c
+#define ISP_WRMIFX3_0_WIN_CHROM_V_CHROM_VEND_MASK GENMASK(28, 16)
+#define ISP_WRMIFX3_0_WIN_CHROM_V_CHROM_VEND(x) (((x) - 1) << 16)
+
+#define ISP_WRMIFX3_0_CH0_BADDR 0xc440
+#define ISP_WRMIFX3_0_CH0_BASE_ADDR(x) ((x) >> 4)
+
+#define ISP_WRMIFX3_0_CH1_BADDR 0xc444
+#define ISP_WRMIFX3_0_CH1_BASE_ADDR(x) ((x) >> 4)
+
+#define ISP_WRMIFX3_0_FMT_SIZE 0xc464
+#define ISP_WRMIFX3_0_FMT_SIZE_HSIZE_MASK GENMASK(15, 0)
+#define ISP_WRMIFX3_0_FMT_SIZE_HSIZE(x) ((x) << 0)
+#define ISP_WRMIFX3_0_FMT_SIZE_VSIZE_MASK GENMASK(31, 16)
+#define ISP_WRMIFX3_0_FMT_SIZE_VSIZE(x) ((x) << 16)
+
+#define ISP_WRMIFX3_0_FMT_CTRL 0xc468
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_MASK GENMASK(1, 0)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_8BIT (0 << 0)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_10BIT BIT(0)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_12BIT (2 << 0)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_IBITS_16BIT (3 << 0)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_MASK BIT(2)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_VU (0 << 2)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_UV_SWAP_UV BIT(2)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_MASK GENMASK(5, 4)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X1 (0 << 4)
+#define ISP_WRMIFX3_0_FMT_CTRL_MTX_PLANE_X2 BIT(4)
+#define ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_MASK GENMASK(18, 16)
+#define ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_YUV422 BIT(16)
+#define ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_YUV420 (2 << 16)
+#define ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_Y_ONLY (3 << 16)
+#define ISP_WRMIFX3_0_FMT_CTRL_MODE_OUT_RAW (4 << 16)
+
+#define VIU_DMAWR_BADDR0 0xc840
+#define VIU_DMAWR_BADDR0_AF_STATS_BASE_ADDR_MASK GENMASK(27, 0)
+#define VIU_DMAWR_BADDR0_AF_STATS_BASE_ADDR(x) ((x) >> 4)
+
+#define VIU_DMAWR_BADDR1 0xc844
+#define VIU_DMAWR_BADDR1_AWB_STATS_BASE_ADDR_MASK GENMASK(27, 0)
+#define VIU_DMAWR_BADDR1_AWB_STATS_BASE_ADDR(x) ((x) >> 4)
+
+#define VIU_DMAWR_BADDR2 0xc848
+#define VIU_DMAWR_BADDR2_AE_STATS_BASE_ADDR_MASK GENMASK(27, 0)
+#define VIU_DMAWR_BADDR2_AE_STATS_BASE_ADDR(x) ((x) >> 4)
+
+#define VIU_DMAWR_SIZE0 0xc854
+#define VIU_DMAWR_SIZE0_AF_STATS_SIZE_MASK GENMASK(15, 0)
+#define VIU_DMAWR_SIZE0_AF_STATS_SIZE(x) ((x) << 0)
+#define VIU_DMAWR_SIZE0_AWB_STATS_SIZE_MASK GENMASK(31, 16)
+#define VIU_DMAWR_SIZE0_AWB_STATS_SIZE(x) ((x) << 16)
+
+#define VIU_DMAWR_SIZE1 0xc858
+#define VIU_DMAWR_SIZE1_AE_STATS_SIZE_MASK GENMASK(15, 0)
+#define VIU_DMAWR_SIZE1_AE_STATS_SIZE(x) ((x) << 0)
+
+#endif
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-resizer.c b/drivers/media/platform/amlogic/c3/isp/c3-isp-resizer.c
new file mode 100644
index 000000000000..453a889e0b27
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-resizer.c
@@ -0,0 +1,892 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "c3-isp-common.h"
+#include "c3-isp-regs.h"
+
+#define C3_ISP_RSZ_DEF_PAD_FMT MEDIA_BUS_FMT_YUV10_1X30
+#define C3_ISP_DISP_REG(base, id) ((base) + (id) * 0x400)
+#define C3_ISP_PPS_LUT_H_NUM 33
+#define C3_ISP_PPS_LUT_CTYPE_0 0
+#define C3_ISP_PPS_LUT_CTYPE_2 2
+#define C3_ISP_SCL_EN 1
+#define C3_ISP_SCL_DIS 0
+
+/*
+ * struct c3_isp_rsz_format_info - ISP resizer format information
+ *
+ * @mbus_code: the mbus code
+ * @pads: bitmask detailing valid pads for this mbus_code
+ * @is_raw: the raw format flag of mbus code
+ */
+struct c3_isp_rsz_format_info {
+ u32 mbus_code;
+ u32 pads;
+ bool is_raw;
+};
+
+static const struct c3_isp_rsz_format_info c3_isp_rsz_fmts[] = {
+ /* RAW formats */
+ {
+ .mbus_code = MEDIA_BUS_FMT_SBGGR16_1X16,
+ .pads = BIT(C3_ISP_RSZ_PAD_SINK)
+ | BIT(C3_ISP_RSZ_PAD_SOURCE),
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGBRG16_1X16,
+ .pads = BIT(C3_ISP_RSZ_PAD_SINK)
+ | BIT(C3_ISP_RSZ_PAD_SOURCE),
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SGRBG16_1X16,
+ .pads = BIT(C3_ISP_RSZ_PAD_SINK)
+ | BIT(C3_ISP_RSZ_PAD_SOURCE),
+ .is_raw = true,
+ }, {
+ .mbus_code = MEDIA_BUS_FMT_SRGGB16_1X16,
+ .pads = BIT(C3_ISP_RSZ_PAD_SINK)
+ | BIT(C3_ISP_RSZ_PAD_SOURCE),
+ .is_raw = true,
+ },
+ /* YUV formats */
+ {
+ .mbus_code = MEDIA_BUS_FMT_YUV10_1X30,
+ .pads = BIT(C3_ISP_RSZ_PAD_SINK)
+ | BIT(C3_ISP_RSZ_PAD_SOURCE),
+ .is_raw = false,
+ },
+};
+
+/*
+ * struct c3_isp_pps_io_size - ISP scaler input and output size
+ *
+ * @thsize: input horizontal size of after preprocessing
+ * @tvsize: input vertical size of after preprocessing
+ * @ohsize: output horizontal size
+ * @ovsize: output vertical size
+ * @ihsize: input horizontal size
+ * @max_hsize: maximum horizontal size
+ */
+struct c3_isp_pps_io_size {
+ u32 thsize;
+ u32 tvsize;
+ u32 ohsize;
+ u32 ovsize;
+ u32 ihsize;
+ u32 max_hsize;
+};
+
+/* The normal parameters of pps module */
+static const int c3_isp_pps_lut[C3_ISP_PPS_LUT_H_NUM][4] = {
+ { 0, 511, 0, 0}, { -5, 511, 5, 0}, {-10, 511, 11, 0},
+ {-14, 510, 17, -1}, {-18, 508, 23, -1}, {-22, 506, 29, -1},
+ {-25, 503, 36, -2}, {-28, 500, 43, -3}, {-32, 496, 51, -3},
+ {-34, 491, 59, -4}, {-37, 487, 67, -5}, {-39, 482, 75, -6},
+ {-41, 476, 84, -7}, {-42, 470, 92, -8}, {-44, 463, 102, -9},
+ {-45, 456, 111, -10}, {-45, 449, 120, -12}, {-47, 442, 130, -13},
+ {-47, 434, 140, -15}, {-47, 425, 151, -17}, {-47, 416, 161, -18},
+ {-47, 407, 172, -20}, {-47, 398, 182, -21}, {-47, 389, 193, -23},
+ {-46, 379, 204, -25}, {-45, 369, 215, -27}, {-44, 358, 226, -28},
+ {-43, 348, 237, -30}, {-43, 337, 249, -31}, {-41, 326, 260, -33},
+ {-40, 316, 271, -35}, {-39, 305, 282, -36}, {-37, 293, 293, -37}
+};
+
+static const struct c3_isp_rsz_format_info
+*rsz_find_format_by_code(u32 code, u32 pad)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(c3_isp_rsz_fmts); i++) {
+ const struct c3_isp_rsz_format_info *info = &c3_isp_rsz_fmts[i];
+
+ if (info->mbus_code == code && info->pads & BIT(pad))
+ return info;
+ }
+
+ return NULL;
+}
+
+static const struct c3_isp_rsz_format_info
+*rsz_find_format_by_index(u32 index, u32 pad)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(c3_isp_rsz_fmts); i++) {
+ const struct c3_isp_rsz_format_info *info = &c3_isp_rsz_fmts[i];
+
+ if (!(info->pads & BIT(pad)))
+ continue;
+
+ if (!index)
+ return info;
+
+ index--;
+ }
+
+ return NULL;
+}
+
+static void c3_isp_rsz_pps_size(struct c3_isp_resizer *rsz,
+ struct c3_isp_pps_io_size *io_size)
+{
+ int thsize = io_size->thsize;
+ int tvsize = io_size->tvsize;
+ u32 ohsize = io_size->ohsize;
+ u32 ovsize = io_size->ovsize;
+ u32 ihsize = io_size->ihsize;
+ u32 max_hsize = io_size->max_hsize;
+ int h_int;
+ int v_int;
+ int h_fract;
+ int v_fract;
+ int yuv444to422_en;
+
+ /* Calculate the integer part of horizonal scaler step */
+ h_int = thsize / ohsize;
+
+ /* Calculate the vertical part of horizonal scaler step */
+ v_int = tvsize / ovsize;
+
+ /*
+ * Calculate the fraction part of horizonal scaler step.
+ * step_h_fraction = (source / dest) * 2^24,
+ * so step_h_fraction = ((source << 12) / dest) << 12.
+ */
+ h_fract = ((thsize << 12) / ohsize) << 12;
+
+ /*
+ * Calculate the fraction part of vertical scaler step
+ * step_v_fraction = (source / dest) * 2^24,
+ * so step_v_fraction = ((source << 12) / dest) << 12.
+ */
+ v_fract = ((tvsize << 12) / ovsize) << 12;
+
+ yuv444to422_en = ihsize > (max_hsize / 2) ? 1 : 0;
+
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_444TO422, rsz->id),
+ DISP0_PPS_444TO422_EN_MASK,
+ DISP0_PPS_444TO422_EN(yuv444to422_en));
+
+ c3_isp_write(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_VSC_START_PHASE_STEP, rsz->id),
+ DISP0_PPS_VSC_START_PHASE_STEP_VERT_FRAC(v_fract) |
+ DISP0_PPS_VSC_START_PHASE_STEP_VERT_INTE(v_int));
+
+ c3_isp_write(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_HSC_START_PHASE_STEP, rsz->id),
+ DISP0_PPS_HSC_START_PHASE_STEP_HORIZ_FRAC(h_fract) |
+ DISP0_PPS_HSC_START_PHASE_STEP_HORIZ_INTE(h_int));
+}
+
+static void c3_isp_rsz_pps_lut(struct c3_isp_resizer *rsz, u32 ctype)
+{
+ unsigned int i;
+
+ /*
+ * Default value of this register is 0, so only need to set
+ * SCALE_LUMA_COEF_S11_MODE and SCALE_LUMA_CTYPE. This register needs
+ * to be written in one time.
+ */
+ c3_isp_write(rsz->isp,
+ C3_ISP_DISP_REG(ISP_SCALE0_COEF_IDX_LUMA, rsz->id),
+ ISP_SCALE0_COEF_IDX_LUMA_COEF_S11_MODE_EN |
+ ISP_SCALE0_COEF_IDX_LUMA_CTYPE(ctype));
+
+ for (i = 0; i < C3_ISP_PPS_LUT_H_NUM; i++) {
+ c3_isp_write(rsz->isp,
+ C3_ISP_DISP_REG(ISP_SCALE0_COEF_LUMA, rsz->id),
+ ISP_SCALE0_COEF_LUMA_DATA0(c3_isp_pps_lut[i][0]) |
+ ISP_SCALE0_COEF_LUMA_DATA1(c3_isp_pps_lut[i][1]));
+ c3_isp_write(rsz->isp,
+ C3_ISP_DISP_REG(ISP_SCALE0_COEF_LUMA, rsz->id),
+ ISP_SCALE0_COEF_LUMA_DATA0(c3_isp_pps_lut[i][2]) |
+ ISP_SCALE0_COEF_LUMA_DATA1(c3_isp_pps_lut[i][3]));
+ }
+
+ /*
+ * Default value of this register is 0, so only need to set
+ * SCALE_CHRO_COEF_S11_MODE and SCALE_CHRO_CTYPE. This register needs
+ * to be written in one time.
+ */
+ c3_isp_write(rsz->isp,
+ C3_ISP_DISP_REG(ISP_SCALE0_COEF_IDX_CHRO, rsz->id),
+ ISP_SCALE0_COEF_IDX_CHRO_COEF_S11_MODE_EN |
+ ISP_SCALE0_COEF_IDX_CHRO_CTYPE(ctype));
+
+ for (i = 0; i < C3_ISP_PPS_LUT_H_NUM; i++) {
+ c3_isp_write(rsz->isp,
+ C3_ISP_DISP_REG(ISP_SCALE0_COEF_CHRO, rsz->id),
+ ISP_SCALE0_COEF_CHRO_DATA0(c3_isp_pps_lut[i][0]) |
+ ISP_SCALE0_COEF_CHRO_DATA1(c3_isp_pps_lut[i][1]));
+ c3_isp_write(rsz->isp,
+ C3_ISP_DISP_REG(ISP_SCALE0_COEF_CHRO, rsz->id),
+ ISP_SCALE0_COEF_CHRO_DATA0(c3_isp_pps_lut[i][2]) |
+ ISP_SCALE0_COEF_CHRO_DATA1(c3_isp_pps_lut[i][3]));
+ }
+}
+
+static void c3_isp_rsz_pps_disable(struct c3_isp_resizer *rsz)
+{
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_HSC_EN_MASK,
+ DISP0_PPS_SCALE_EN_HSC_DIS);
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_VSC_EN_MASK,
+ DISP0_PPS_SCALE_EN_VSC_DIS);
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_PREVSC_EN_MASK,
+ DISP0_PPS_SCALE_EN_PREVSC_DIS);
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_PREHSC_EN_MASK,
+ DISP0_PPS_SCALE_EN_PREHSC_DIS);
+}
+
+static int c3_isp_rsz_pps_enable(struct c3_isp_resizer *rsz,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_rect *crop;
+ struct v4l2_rect *cmps;
+ int max_hsize;
+ int hsc_en, vsc_en;
+ int preh_en, prev_en;
+ u32 prehsc_rate;
+ u32 prevsc_flt_num;
+ int pre_vscale_max_hsize;
+ u32 ihsize_after_pre_hsc;
+ u32 ihsize_after_pre_hsc_alt;
+ u32 vsc_tap_num_alt;
+ u32 ihsize;
+ u32 ivsize;
+ struct c3_isp_pps_io_size io_size;
+
+ crop = v4l2_subdev_state_get_crop(state, C3_ISP_RSZ_PAD_SINK);
+ cmps = v4l2_subdev_state_get_compose(state, C3_ISP_RSZ_PAD_SINK);
+
+ ihsize = crop->width;
+ ivsize = crop->height;
+
+ hsc_en = (ihsize == cmps->width) ? C3_ISP_SCL_DIS : C3_ISP_SCL_EN;
+ vsc_en = (ivsize == cmps->height) ? C3_ISP_SCL_DIS : C3_ISP_SCL_EN;
+
+ /* Disable pps when there no need to use pps */
+ if (!hsc_en && !vsc_en) {
+ c3_isp_rsz_pps_disable(rsz);
+ return 0;
+ }
+
+ /* Pre-scale needs to be enable if the down scaling factor exceeds 4 */
+ preh_en = (ihsize > cmps->width * 4) ? C3_ISP_SCL_EN : C3_ISP_SCL_DIS;
+ prev_en = (ivsize > cmps->height * 4) ? C3_ISP_SCL_EN : C3_ISP_SCL_DIS;
+
+ if (rsz->id == C3_ISP_RSZ_2) {
+ max_hsize = C3_ISP_MAX_WIDTH;
+
+ /* Set vertical tap number */
+ prevsc_flt_num = 4;
+
+ /* Set the max hsize of pre-vertical scale */
+ pre_vscale_max_hsize = max_hsize / 2;
+ } else {
+ max_hsize = C3_ISP_DEFAULT_WIDTH;
+
+ /* Set vertical tap number and the max hsize of pre-vertical */
+ if (ihsize > (max_hsize / 2) &&
+ ihsize <= max_hsize && prev_en) {
+ prevsc_flt_num = 2;
+ pre_vscale_max_hsize = max_hsize;
+ } else {
+ prevsc_flt_num = 4;
+ pre_vscale_max_hsize = max_hsize / 2;
+ }
+ }
+
+ /*
+ * Set pre-horizonal scale rate and the hsize of after
+ * pre-horizonal scale.
+ */
+ if (preh_en) {
+ prehsc_rate = 1;
+ ihsize_after_pre_hsc = DIV_ROUND_UP(ihsize, 2);
+ } else {
+ prehsc_rate = 0;
+ ihsize_after_pre_hsc = ihsize;
+ }
+
+ /* Change pre-horizonal scale rate */
+ if (prev_en && ihsize_after_pre_hsc >= pre_vscale_max_hsize)
+ prehsc_rate += 1;
+
+ /* Set the actual hsize of after pre-horizonal scale */
+ if (preh_en)
+ ihsize_after_pre_hsc_alt =
+ DIV_ROUND_UP(ihsize, 1 << prehsc_rate);
+ else
+ ihsize_after_pre_hsc_alt = ihsize;
+
+ /* Set vertical scaler bank length */
+ if (ihsize_after_pre_hsc_alt <= (max_hsize / 2))
+ vsc_tap_num_alt = 4;
+ else if (ihsize_after_pre_hsc_alt <= max_hsize)
+ vsc_tap_num_alt = prev_en ? 2 : 4;
+ else
+ vsc_tap_num_alt = prev_en ? 4 : 2;
+
+ io_size.thsize = ihsize_after_pre_hsc_alt;
+ io_size.tvsize = prev_en ? DIV_ROUND_UP(ivsize, 2) : ivsize;
+ io_size.ohsize = cmps->width;
+ io_size.ovsize = cmps->height;
+ io_size.ihsize = ihsize;
+ io_size.max_hsize = max_hsize;
+
+ c3_isp_rsz_pps_size(rsz, &io_size);
+ c3_isp_rsz_pps_lut(rsz, C3_ISP_PPS_LUT_CTYPE_0);
+ c3_isp_rsz_pps_lut(rsz, C3_ISP_PPS_LUT_CTYPE_2);
+
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_VSC_TAP_NUM_MASK,
+ DISP0_PPS_SCALE_EN_VSC_TAP_NUM(vsc_tap_num_alt));
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_PREVSC_FLT_NUM_MASK,
+ DISP0_PPS_SCALE_EN_PREVSC_FLT_NUM(prevsc_flt_num));
+
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_PREVSC_RATE_MASK,
+ DISP0_PPS_SCALE_EN_PREVSC_RATE(prev_en));
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_PREHSC_RATE_MASK,
+ DISP0_PPS_SCALE_EN_PREHSC_RATE(prehsc_rate));
+
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_HSC_EN_MASK,
+ DISP0_PPS_SCALE_EN_HSC_EN(hsc_en));
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_VSC_EN_MASK,
+ DISP0_PPS_SCALE_EN_VSC_EN(vsc_en));
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_PREVSC_EN_MASK,
+ DISP0_PPS_SCALE_EN_PREVSC_EN(prev_en));
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_PREHSC_EN_MASK,
+ DISP0_PPS_SCALE_EN_PREHSC_EN(preh_en));
+
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_HSC_NOR_RS_BITS_MASK,
+ DISP0_PPS_SCALE_EN_HSC_NOR_RS_BITS(9));
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_PPS_SCALE_EN, rsz->id),
+ DISP0_PPS_SCALE_EN_VSC_NOR_RS_BITS_MASK,
+ DISP0_PPS_SCALE_EN_VSC_NOR_RS_BITS(9));
+
+ return 0;
+}
+
+static void c3_isp_rsz_start(struct c3_isp_resizer *rsz,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+ const struct c3_isp_rsz_format_info *rsz_fmt;
+ struct v4l2_rect *sink_crop;
+ u32 mask;
+ u32 val;
+
+ sink_fmt = v4l2_subdev_state_get_format(state, C3_ISP_RSZ_PAD_SINK);
+ sink_crop = v4l2_subdev_state_get_crop(state, C3_ISP_RSZ_PAD_SINK);
+ src_fmt = v4l2_subdev_state_get_format(state, C3_ISP_RSZ_PAD_SOURCE);
+ rsz_fmt = rsz_find_format_by_code(sink_fmt->code, C3_ISP_RSZ_PAD_SINK);
+
+ if (rsz->id == C3_ISP_RSZ_0) {
+ mask = ISP_TOP_DISPIN_SEL_DISP0_MASK;
+ val = rsz_fmt->is_raw ? ISP_TOP_DISPIN_SEL_DISP0_MIPI_OUT
+ : ISP_TOP_DISPIN_SEL_DISP0_CORE_OUT;
+ } else if (rsz->id == C3_ISP_RSZ_1) {
+ mask = ISP_TOP_DISPIN_SEL_DISP1_MASK;
+ val = rsz_fmt->is_raw ? ISP_TOP_DISPIN_SEL_DISP1_MIPI_OUT
+ : ISP_TOP_DISPIN_SEL_DISP1_CORE_OUT;
+ } else {
+ mask = ISP_TOP_DISPIN_SEL_DISP2_MASK;
+ val = rsz_fmt->is_raw ? ISP_TOP_DISPIN_SEL_DISP2_MIPI_OUT
+ : ISP_TOP_DISPIN_SEL_DISP2_CORE_OUT;
+ }
+
+ c3_isp_update_bits(rsz->isp, ISP_TOP_DISPIN_SEL, mask, val);
+
+ c3_isp_write(rsz->isp, C3_ISP_DISP_REG(ISP_DISP0_TOP_IN_SIZE, rsz->id),
+ ISP_DISP0_TOP_IN_SIZE_HSIZE(sink_fmt->width) |
+ ISP_DISP0_TOP_IN_SIZE_VSIZE(sink_fmt->height));
+
+ c3_isp_write(rsz->isp, C3_ISP_DISP_REG(DISP0_TOP_CRP2_START, rsz->id),
+ DISP0_TOP_CRP2_START_V_START(sink_crop->top) |
+ DISP0_TOP_CRP2_START_H_START(sink_crop->left));
+
+ c3_isp_write(rsz->isp, C3_ISP_DISP_REG(DISP0_TOP_CRP2_SIZE, rsz->id),
+ DISP0_TOP_CRP2_SIZE_V_SIZE(sink_crop->height) |
+ DISP0_TOP_CRP2_SIZE_H_SIZE(sink_crop->width));
+
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_TOP_TOP_CTRL, rsz->id),
+ DISP0_TOP_TOP_CTRL_CROP2_EN_MASK,
+ DISP0_TOP_TOP_CTRL_CROP2_EN);
+
+ if (!rsz_fmt->is_raw)
+ c3_isp_rsz_pps_enable(rsz, state);
+
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_TOP_OUT_SIZE, rsz->id),
+ DISP0_TOP_OUT_SIZE_SCL_OUT_HEIGHT_MASK,
+ DISP0_TOP_OUT_SIZE_SCL_OUT_HEIGHT(src_fmt->height));
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_TOP_OUT_SIZE, rsz->id),
+ DISP0_TOP_OUT_SIZE_SCL_OUT_WIDTH_MASK,
+ DISP0_TOP_OUT_SIZE_SCL_OUT_WIDTH(src_fmt->width));
+
+ if (rsz->id == C3_ISP_RSZ_0) {
+ mask = ISP_TOP_PATH_EN_DISP0_EN_MASK;
+ val = ISP_TOP_PATH_EN_DISP0_EN;
+ } else if (rsz->id == C3_ISP_RSZ_1) {
+ mask = ISP_TOP_PATH_EN_DISP1_EN_MASK;
+ val = ISP_TOP_PATH_EN_DISP1_EN;
+ } else {
+ mask = ISP_TOP_PATH_EN_DISP2_EN_MASK;
+ val = ISP_TOP_PATH_EN_DISP2_EN;
+ }
+
+ c3_isp_update_bits(rsz->isp, ISP_TOP_PATH_EN, mask, val);
+}
+
+static void c3_isp_rsz_stop(struct c3_isp_resizer *rsz)
+{
+ u32 mask;
+ u32 val;
+
+ if (rsz->id == C3_ISP_RSZ_0) {
+ mask = ISP_TOP_PATH_EN_DISP0_EN_MASK;
+ val = ISP_TOP_PATH_EN_DISP0_DIS;
+ } else if (rsz->id == C3_ISP_RSZ_1) {
+ mask = ISP_TOP_PATH_EN_DISP1_EN_MASK;
+ val = ISP_TOP_PATH_EN_DISP1_DIS;
+ } else {
+ mask = ISP_TOP_PATH_EN_DISP2_EN_MASK;
+ val = ISP_TOP_PATH_EN_DISP2_DIS;
+ }
+
+ c3_isp_update_bits(rsz->isp, ISP_TOP_PATH_EN, mask, val);
+
+ c3_isp_update_bits(rsz->isp,
+ C3_ISP_DISP_REG(DISP0_TOP_TOP_CTRL, rsz->id),
+ DISP0_TOP_TOP_CTRL_CROP2_EN_MASK,
+ DISP0_TOP_TOP_CTRL_CROP2_DIS);
+
+ c3_isp_rsz_pps_disable(rsz);
+}
+
+static int c3_isp_rsz_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct c3_isp_resizer *rsz = v4l2_get_subdevdata(sd);
+
+ c3_isp_rsz_start(rsz, state);
+
+ c3_isp_params_pre_cfg(rsz->isp);
+ c3_isp_stats_pre_cfg(rsz->isp);
+
+ return v4l2_subdev_enable_streams(rsz->src_sd, rsz->src_pad, BIT(0));
+}
+
+static int c3_isp_rsz_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct c3_isp_resizer *rsz = v4l2_get_subdevdata(sd);
+
+ c3_isp_rsz_stop(rsz);
+
+ return v4l2_subdev_disable_streams(rsz->src_sd, rsz->src_pad, BIT(0));
+}
+
+static int c3_isp_rsz_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct c3_isp_rsz_format_info *info;
+
+ info = rsz_find_format_by_index(code->index, code->pad);
+ if (!info)
+ return -EINVAL;
+
+ code->code = info->mbus_code;
+
+ return 0;
+}
+
+static void c3_isp_rsz_set_sink_fmt(struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+ struct v4l2_rect *sink_crop;
+ struct v4l2_rect *sink_cmps;
+ const struct c3_isp_rsz_format_info *rsz_fmt;
+
+ sink_fmt = v4l2_subdev_state_get_format(state, format->pad);
+ sink_crop = v4l2_subdev_state_get_crop(state, format->pad);
+ sink_cmps = v4l2_subdev_state_get_compose(state, format->pad);
+ src_fmt = v4l2_subdev_state_get_format(state, C3_ISP_RSZ_PAD_SOURCE);
+
+ rsz_fmt = rsz_find_format_by_code(format->format.code, format->pad);
+ if (rsz_fmt)
+ sink_fmt->code = format->format.code;
+ else
+ sink_fmt->code = C3_ISP_RSZ_DEF_PAD_FMT;
+
+ sink_fmt->width = clamp_t(u32, format->format.width,
+ C3_ISP_MIN_WIDTH, C3_ISP_MAX_WIDTH);
+ sink_fmt->height = clamp_t(u32, format->format.height,
+ C3_ISP_MIN_HEIGHT, C3_ISP_MAX_HEIGHT);
+ sink_fmt->field = V4L2_FIELD_NONE;
+ sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+
+ if (rsz_fmt && rsz_fmt->is_raw) {
+ sink_fmt->colorspace = V4L2_COLORSPACE_RAW;
+ sink_fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ } else {
+ sink_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ sink_fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
+ sink_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ }
+
+ sink_crop->width = sink_fmt->width;
+ sink_crop->height = sink_fmt->height;
+ sink_crop->left = 0;
+ sink_crop->top = 0;
+
+ sink_cmps->width = sink_crop->width;
+ sink_cmps->height = sink_crop->height;
+ sink_cmps->left = 0;
+ sink_cmps->top = 0;
+
+ src_fmt->code = sink_fmt->code;
+ src_fmt->width = sink_cmps->width;
+ src_fmt->height = sink_cmps->height;
+
+ format->format = *sink_fmt;
+}
+
+static void c3_isp_rsz_set_source_fmt(struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *src_fmt;
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_rect *sink_cmps;
+ const struct c3_isp_rsz_format_info *rsz_fmt;
+
+ src_fmt = v4l2_subdev_state_get_format(state, format->pad);
+ sink_fmt = v4l2_subdev_state_get_format(state, C3_ISP_RSZ_PAD_SINK);
+ sink_cmps = v4l2_subdev_state_get_compose(state, C3_ISP_RSZ_PAD_SINK);
+
+ src_fmt->code = sink_fmt->code;
+ src_fmt->width = sink_cmps->width;
+ src_fmt->height = sink_cmps->height;
+ src_fmt->field = V4L2_FIELD_NONE;
+ src_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+
+ rsz_fmt = rsz_find_format_by_code(src_fmt->code, format->pad);
+ if (rsz_fmt->is_raw) {
+ src_fmt->colorspace = V4L2_COLORSPACE_RAW;
+ src_fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ } else {
+ src_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ src_fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
+ src_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ }
+
+ format->format = *src_fmt;
+}
+
+static int c3_isp_rsz_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ if (format->pad == C3_ISP_RSZ_PAD_SINK)
+ c3_isp_rsz_set_sink_fmt(state, format);
+ else
+ c3_isp_rsz_set_source_fmt(state, format);
+
+ return 0;
+}
+
+static int c3_isp_rsz_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_rect *crop;
+ struct v4l2_rect *cmps;
+
+ if (sel->pad == C3_ISP_RSZ_PAD_SOURCE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ fmt = v4l2_subdev_state_get_format(state, sel->pad);
+ sel->r.width = fmt->width;
+ sel->r.height = fmt->height;
+ sel->r.left = 0;
+ sel->r.top = 0;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ crop = v4l2_subdev_state_get_crop(state, sel->pad);
+ sel->r.width = crop->width;
+ sel->r.height = crop->height;
+ sel->r.left = 0;
+ sel->r.top = 0;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ crop = v4l2_subdev_state_get_crop(state, sel->pad);
+ sel->r = *crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ cmps = v4l2_subdev_state_get_compose(state, sel->pad);
+ sel->r = *cmps;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int c3_isp_rsz_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+ struct v4l2_rect *crop;
+ struct v4l2_rect *cmps;
+
+ if (sel->pad == C3_ISP_RSZ_PAD_SOURCE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ fmt = v4l2_subdev_state_get_format(state, sel->pad);
+ crop = v4l2_subdev_state_get_crop(state, sel->pad);
+ cmps = v4l2_subdev_state_get_compose(state, sel->pad);
+ src_fmt = v4l2_subdev_state_get_format(state,
+ C3_ISP_RSZ_PAD_SOURCE);
+
+ sel->r.left = clamp_t(s32, sel->r.left, 0, fmt->width - 1);
+ sel->r.top = clamp_t(s32, sel->r.top, 0, fmt->height - 1);
+ sel->r.width = clamp(sel->r.width, C3_ISP_MIN_WIDTH,
+ fmt->width - sel->r.left);
+ sel->r.height = clamp(sel->r.height, C3_ISP_MIN_HEIGHT,
+ fmt->height - sel->r.top);
+
+ crop->width = ALIGN(sel->r.width, 2);
+ crop->height = ALIGN(sel->r.height, 2);
+ crop->left = sel->r.left;
+ crop->top = sel->r.top;
+
+ *cmps = *crop;
+
+ src_fmt->code = fmt->code;
+ src_fmt->width = cmps->width;
+ src_fmt->height = cmps->height;
+
+ sel->r = *crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ crop = v4l2_subdev_state_get_crop(state, sel->pad);
+ cmps = v4l2_subdev_state_get_compose(state, sel->pad);
+
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = clamp(sel->r.width, C3_ISP_MIN_WIDTH,
+ crop->width);
+ sel->r.height = clamp(sel->r.height, C3_ISP_MIN_HEIGHT,
+ crop->height);
+
+ cmps->width = ALIGN(sel->r.width, 2);
+ cmps->height = ALIGN(sel->r.height, 2);
+ cmps->left = sel->r.left;
+ cmps->top = sel->r.top;
+
+ sel->r = *cmps;
+
+ fmt = v4l2_subdev_state_get_format(state,
+ C3_ISP_RSZ_PAD_SOURCE);
+ fmt->width = cmps->width;
+ fmt->height = cmps->height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int c3_isp_rsz_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_rect *crop;
+ struct v4l2_rect *cmps;
+
+ fmt = v4l2_subdev_state_get_format(state, C3_ISP_RSZ_PAD_SINK);
+ fmt->width = C3_ISP_DEFAULT_WIDTH;
+ fmt->height = C3_ISP_DEFAULT_HEIGHT;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->code = C3_ISP_RSZ_DEF_PAD_FMT;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+
+ crop = v4l2_subdev_state_get_crop(state, C3_ISP_RSZ_PAD_SINK);
+ crop->width = C3_ISP_DEFAULT_WIDTH;
+ crop->height = C3_ISP_DEFAULT_HEIGHT;
+ crop->left = 0;
+ crop->top = 0;
+
+ cmps = v4l2_subdev_state_get_compose(state, C3_ISP_RSZ_PAD_SINK);
+ cmps->width = C3_ISP_DEFAULT_WIDTH;
+ cmps->height = C3_ISP_DEFAULT_HEIGHT;
+ cmps->left = 0;
+ cmps->top = 0;
+
+ fmt = v4l2_subdev_state_get_format(state, C3_ISP_RSZ_PAD_SOURCE);
+ fmt->width = cmps->width;
+ fmt->height = cmps->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->code = C3_ISP_RSZ_DEF_PAD_FMT;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops c3_isp_rsz_pad_ops = {
+ .enum_mbus_code = c3_isp_rsz_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = c3_isp_rsz_set_fmt,
+ .get_selection = c3_isp_rsz_get_selection,
+ .set_selection = c3_isp_rsz_set_selection,
+ .enable_streams = c3_isp_rsz_enable_streams,
+ .disable_streams = c3_isp_rsz_disable_streams,
+};
+
+static const struct v4l2_subdev_ops c3_isp_rsz_subdev_ops = {
+ .pad = &c3_isp_rsz_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops c3_isp_rsz_internal_ops = {
+ .init_state = c3_isp_rsz_init_state,
+};
+
+/* Media entity operations */
+static const struct media_entity_operations c3_isp_rsz_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int c3_isp_rsz_register(struct c3_isp_resizer *rsz)
+{
+ struct v4l2_subdev *sd = &rsz->sd;
+ int ret;
+
+ v4l2_subdev_init(sd, &c3_isp_rsz_subdev_ops);
+ sd->owner = THIS_MODULE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->internal_ops = &c3_isp_rsz_internal_ops;
+ snprintf(sd->name, sizeof(sd->name), "c3-isp-resizer%u", rsz->id);
+
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
+ sd->entity.ops = &c3_isp_rsz_entity_ops;
+
+ sd->dev = rsz->isp->dev;
+ v4l2_set_subdevdata(sd, rsz);
+
+ rsz->pads[C3_ISP_RSZ_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ rsz->pads[C3_ISP_RSZ_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&sd->entity, C3_ISP_RSZ_PAD_MAX,
+ rsz->pads);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret)
+ goto err_entity_cleanup;
+
+ ret = v4l2_device_register_subdev(&rsz->isp->v4l2_dev, sd);
+ if (ret)
+ goto err_subdev_cleanup;
+
+ return 0;
+
+err_subdev_cleanup:
+ v4l2_subdev_cleanup(sd);
+err_entity_cleanup:
+ media_entity_cleanup(&sd->entity);
+ return ret;
+}
+
+static void c3_isp_rsz_unregister(struct c3_isp_resizer *rsz)
+{
+ struct v4l2_subdev *sd = &rsz->sd;
+
+ v4l2_device_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+}
+
+int c3_isp_resizers_register(struct c3_isp_device *isp)
+{
+ int ret;
+
+ for (unsigned int i = C3_ISP_RSZ_0; i < C3_ISP_NUM_RSZ; i++) {
+ struct c3_isp_resizer *rsz = &isp->resizers[i];
+
+ rsz->id = i;
+ rsz->isp = isp;
+ rsz->src_sd = &isp->core.sd;
+ rsz->src_pad = C3_ISP_CORE_PAD_SOURCE_VIDEO_0 + i;
+
+ ret = c3_isp_rsz_register(rsz);
+ if (ret) {
+ rsz->isp = NULL;
+ c3_isp_resizers_unregister(isp);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void c3_isp_resizers_unregister(struct c3_isp_device *isp)
+{
+ for (unsigned int i = C3_ISP_RSZ_0; i < C3_ISP_NUM_RSZ; i++) {
+ struct c3_isp_resizer *rsz = &isp->resizers[i];
+
+ if (rsz->isp)
+ c3_isp_rsz_unregister(rsz);
+ }
+}
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-stats.c b/drivers/media/platform/amlogic/c3/isp/c3-isp-stats.c
new file mode 100644
index 000000000000..8a5d7e1a30c9
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-stats.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/cleanup.h>
+#include <linux/media/amlogic/c3-isp-config.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "c3-isp-common.h"
+#include "c3-isp-regs.h"
+
+/* Hardware configuration */
+
+static void c3_isp_stats_cfg_dmawr_addr(struct c3_isp_stats *stats)
+{
+ u32 awb_dma_size = sizeof(struct c3_isp_awb_stats);
+ u32 ae_dma_size = sizeof(struct c3_isp_ae_stats);
+ u32 awb_dma_addr = stats->buff->dma_addr;
+ u32 af_dma_addr;
+ u32 ae_dma_addr;
+
+ ae_dma_addr = awb_dma_addr + awb_dma_size;
+ af_dma_addr = ae_dma_addr + ae_dma_size;
+
+ c3_isp_update_bits(stats->isp, VIU_DMAWR_BADDR0,
+ VIU_DMAWR_BADDR0_AF_STATS_BASE_ADDR_MASK,
+ VIU_DMAWR_BADDR0_AF_STATS_BASE_ADDR(af_dma_addr));
+
+ c3_isp_update_bits(stats->isp, VIU_DMAWR_BADDR1,
+ VIU_DMAWR_BADDR1_AWB_STATS_BASE_ADDR_MASK,
+ VIU_DMAWR_BADDR1_AWB_STATS_BASE_ADDR(awb_dma_addr));
+
+ c3_isp_update_bits(stats->isp, VIU_DMAWR_BADDR2,
+ VIU_DMAWR_BADDR2_AE_STATS_BASE_ADDR_MASK,
+ VIU_DMAWR_BADDR2_AE_STATS_BASE_ADDR(ae_dma_addr));
+}
+
+static void c3_isp_stats_cfg_buff(struct c3_isp_stats *stats)
+{
+ stats->buff =
+ list_first_entry_or_null(&stats->pending,
+ struct c3_isp_stats_buffer, list);
+ if (stats->buff) {
+ c3_isp_stats_cfg_dmawr_addr(stats);
+ list_del(&stats->buff->list);
+ }
+}
+
+void c3_isp_stats_pre_cfg(struct c3_isp_device *isp)
+{
+ struct c3_isp_stats *stats = &isp->stats;
+ u32 dma_size;
+
+ c3_isp_update_bits(stats->isp, ISP_AF_EN_CTRL,
+ ISP_AF_EN_CTRL_STAT_SEL_MASK,
+ ISP_AF_EN_CTRL_STAT_SEL_NEW);
+ c3_isp_update_bits(stats->isp, ISP_AE_CTRL,
+ ISP_AE_CTRL_LUMA_MODE_MASK,
+ ISP_AE_CTRL_LUMA_MODE_FILTER);
+
+ /* The unit of dma_size is 16 bytes */
+ dma_size = sizeof(struct c3_isp_af_stats) / C3_ISP_DMA_SIZE_ALIGN_BYTES;
+ c3_isp_update_bits(stats->isp, VIU_DMAWR_SIZE0,
+ VIU_DMAWR_SIZE0_AF_STATS_SIZE_MASK,
+ VIU_DMAWR_SIZE0_AF_STATS_SIZE(dma_size));
+
+ dma_size = sizeof(struct c3_isp_awb_stats) /
+ C3_ISP_DMA_SIZE_ALIGN_BYTES;
+ c3_isp_update_bits(stats->isp, VIU_DMAWR_SIZE0,
+ VIU_DMAWR_SIZE0_AWB_STATS_SIZE_MASK,
+ VIU_DMAWR_SIZE0_AWB_STATS_SIZE(dma_size));
+
+ dma_size = sizeof(struct c3_isp_ae_stats) / C3_ISP_DMA_SIZE_ALIGN_BYTES;
+ c3_isp_update_bits(stats->isp, VIU_DMAWR_SIZE1,
+ VIU_DMAWR_SIZE1_AE_STATS_SIZE_MASK,
+ VIU_DMAWR_SIZE1_AE_STATS_SIZE(dma_size));
+
+ guard(spinlock_irqsave)(&stats->buff_lock);
+
+ c3_isp_stats_cfg_buff(stats);
+}
+
+static int c3_isp_stats_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, C3_ISP_DRIVER_NAME, sizeof(cap->driver));
+ strscpy(cap->card, "AML C3 ISP", sizeof(cap->card));
+
+ return 0;
+}
+
+static int c3_isp_stats_enum_fmt(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct c3_isp_stats *stats = video_drvdata(file);
+
+ if (f->index > 0 || f->type != stats->vb2_q.type)
+ return -EINVAL;
+
+ f->pixelformat = V4L2_META_FMT_C3ISP_STATS;
+
+ return 0;
+}
+
+static int c3_isp_stats_g_fmt(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct c3_isp_stats *stats = video_drvdata(file);
+
+ f->fmt.meta = stats->vfmt.fmt.meta;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops isp_stats_v4l2_ioctl_ops = {
+ .vidioc_querycap = c3_isp_stats_querycap,
+ .vidioc_enum_fmt_meta_cap = c3_isp_stats_enum_fmt,
+ .vidioc_g_fmt_meta_cap = c3_isp_stats_g_fmt,
+ .vidioc_s_fmt_meta_cap = c3_isp_stats_g_fmt,
+ .vidioc_try_fmt_meta_cap = c3_isp_stats_g_fmt,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static const struct v4l2_file_operations isp_stats_v4l2_fops = {
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+static int c3_isp_stats_vb2_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ if (*num_planes) {
+ if (*num_planes != 1)
+ return -EINVAL;
+
+ if (sizes[0] < sizeof(struct c3_isp_stats_info))
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *num_planes = 1;
+ sizes[0] = sizeof(struct c3_isp_stats_info);
+
+ return 0;
+}
+
+static void c3_isp_stats_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+ struct c3_isp_stats_buffer *buf =
+ container_of(v4l2_buf, struct c3_isp_stats_buffer, vb);
+ struct c3_isp_stats *stats = vb2_get_drv_priv(vb->vb2_queue);
+
+ guard(spinlock_irqsave)(&stats->buff_lock);
+
+ list_add_tail(&buf->list, &stats->pending);
+}
+
+static int c3_isp_stats_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ struct c3_isp_stats *stats = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int size = stats->vfmt.fmt.meta.buffersize;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(stats->isp->dev,
+ "User buffer too small (%ld < %u)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static int c3_isp_stats_vb2_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+ struct c3_isp_stats_buffer *buf =
+ container_of(v4l2_buf, struct c3_isp_stats_buffer, vb);
+
+ buf->dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ return 0;
+}
+
+static void c3_isp_stats_vb2_stop_streaming(struct vb2_queue *q)
+{
+ struct c3_isp_stats *stats = vb2_get_drv_priv(q);
+
+ guard(spinlock_irqsave)(&stats->buff_lock);
+
+ if (stats->buff) {
+ vb2_buffer_done(&stats->buff->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ stats->buff = NULL;
+ }
+
+ while (!list_empty(&stats->pending)) {
+ struct c3_isp_stats_buffer *buff;
+
+ buff = list_first_entry(&stats->pending,
+ struct c3_isp_stats_buffer, list);
+ list_del(&buff->list);
+ vb2_buffer_done(&buff->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static const struct vb2_ops isp_stats_vb2_ops = {
+ .queue_setup = c3_isp_stats_vb2_queue_setup,
+ .buf_queue = c3_isp_stats_vb2_buf_queue,
+ .buf_prepare = c3_isp_stats_vb2_buf_prepare,
+ .buf_init = c3_isp_stats_vb2_buf_init,
+ .stop_streaming = c3_isp_stats_vb2_stop_streaming,
+};
+
+int c3_isp_stats_register(struct c3_isp_device *isp)
+{
+ struct c3_isp_stats *stats = &isp->stats;
+ struct video_device *vdev = &stats->vdev;
+ struct vb2_queue *vb2_q = &stats->vb2_q;
+ int ret;
+
+ memset(stats, 0, sizeof(*stats));
+ stats->vfmt.fmt.meta.dataformat = V4L2_META_FMT_C3ISP_STATS;
+ stats->vfmt.fmt.meta.buffersize = sizeof(struct c3_isp_stats_info);
+ stats->isp = isp;
+ INIT_LIST_HEAD(&stats->pending);
+ spin_lock_init(&stats->buff_lock);
+
+ mutex_init(&stats->lock);
+
+ snprintf(vdev->name, sizeof(vdev->name), "c3-isp-stats");
+ vdev->fops = &isp_stats_v4l2_fops;
+ vdev->ioctl_ops = &isp_stats_v4l2_ioctl_ops;
+ vdev->v4l2_dev = &isp->v4l2_dev;
+ vdev->lock = &stats->lock;
+ vdev->minor = -1;
+ vdev->queue = vb2_q;
+ vdev->release = video_device_release_empty;
+ vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
+ vdev->vfl_dir = VFL_DIR_RX;
+ video_set_drvdata(vdev, stats);
+
+ vb2_q->drv_priv = stats;
+ vb2_q->mem_ops = &vb2_dma_contig_memops;
+ vb2_q->ops = &isp_stats_vb2_ops;
+ vb2_q->type = V4L2_BUF_TYPE_META_CAPTURE;
+ vb2_q->io_modes = VB2_DMABUF | VB2_MMAP;
+ vb2_q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vb2_q->buf_struct_size = sizeof(struct c3_isp_stats_buffer);
+ vb2_q->dev = isp->dev;
+ vb2_q->lock = &stats->lock;
+ vb2_q->min_queued_buffers = 2;
+
+ ret = vb2_queue_init(vb2_q);
+ if (ret)
+ goto err_destroy;
+
+ stats->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vdev->entity, 1, &stats->pad);
+ if (ret)
+ goto err_queue_release;
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to register %s: %d\n", vdev->name, ret);
+ goto err_entity_cleanup;
+ }
+
+ return 0;
+
+err_entity_cleanup:
+ media_entity_cleanup(&vdev->entity);
+err_queue_release:
+ vb2_queue_release(vb2_q);
+err_destroy:
+ mutex_destroy(&stats->lock);
+ return ret;
+}
+
+void c3_isp_stats_unregister(struct c3_isp_device *isp)
+{
+ struct c3_isp_stats *stats = &isp->stats;
+
+ vb2_queue_release(&stats->vb2_q);
+ media_entity_cleanup(&stats->vdev.entity);
+ video_unregister_device(&stats->vdev);
+ mutex_destroy(&stats->lock);
+}
+
+void c3_isp_stats_isr(struct c3_isp_device *isp)
+{
+ struct c3_isp_stats *stats = &isp->stats;
+
+ guard(spinlock_irqsave)(&stats->buff_lock);
+
+ if (stats->buff) {
+ stats->buff->vb.sequence = stats->isp->frm_sequence;
+ stats->buff->vb.vb2_buf.timestamp = ktime_get();
+ stats->buff->vb.field = V4L2_FIELD_NONE;
+ vb2_buffer_done(&stats->buff->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ }
+
+ c3_isp_stats_cfg_buff(stats);
+}
diff --git a/drivers/media/platform/amlogic/c3/mipi-adapter/Kconfig b/drivers/media/platform/amlogic/c3/mipi-adapter/Kconfig
new file mode 100644
index 000000000000..bf19059b3543
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/mipi-adapter/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config VIDEO_C3_MIPI_ADAPTER
+ tristate "Amlogic C3 MIPI adapter"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on VIDEO_DEV
+ depends on OF
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Video4Linux2 driver for Amlogic C3 MIPI adapter.
+ C3 MIPI adapter mainly responsible for organizing
+ MIPI data and sending raw data to ISP pipeline.
+
+ To compile this driver as a module choose m here.
diff --git a/drivers/media/platform/amlogic/c3/mipi-adapter/Makefile b/drivers/media/platform/amlogic/c3/mipi-adapter/Makefile
new file mode 100644
index 000000000000..216fc310c5b4
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/mipi-adapter/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_VIDEO_C3_MIPI_ADAPTER) += c3-mipi-adap.o
diff --git a/drivers/media/platform/amlogic/c3/mipi-adapter/c3-mipi-adap.c b/drivers/media/platform/amlogic/c3/mipi-adapter/c3-mipi-adap.c
new file mode 100644
index 000000000000..4bd98fb9c7e9
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/mipi-adapter/c3-mipi-adap.c
@@ -0,0 +1,842 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <media/mipi-csi2.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+
+/*
+ * Adapter Block Diagram
+ * ---------------------
+ *
+ * +--------------------------------------------+
+ * | Adapter |
+ * |--------------------------------------------|
+ * +------------+ | | | | | +-----+
+ * | MIPI CSI-2 |--->| Frontend -> DDR_RD0 -> PIXEL0 -> ALIGNMENT |--->| ISP |
+ * +------------+ | | | | | +-----+
+ * +--------------------------------------------+
+ *
+ */
+
+/* C3 adapter submodule definition */
+enum {
+ SUBMD_TOP,
+ SUBMD_FD,
+ SUBMD_RD,
+};
+
+#define ADAP_SUBMD_MASK GENMASK(17, 16)
+#define ADAP_SUBMD_SHIFT 16
+#define ADAP_SUBMD(x) (((x) & (ADAP_SUBMD_MASK)) >> (ADAP_SUBMD_SHIFT))
+#define ADAP_REG_ADDR_MASK GENMASK(15, 0)
+#define ADAP_REG_ADDR(x) ((x) & (ADAP_REG_ADDR_MASK))
+#define ADAP_REG_T(x) ((SUBMD_TOP << ADAP_SUBMD_SHIFT) | (x))
+#define ADAP_REG_F(x) ((SUBMD_FD << ADAP_SUBMD_SHIFT) | (x))
+#define ADAP_REG_R(x) ((SUBMD_RD << ADAP_SUBMD_SHIFT) | (x))
+
+#define MIPI_ADAP_CLOCK_NUM_MAX 3
+#define MIPI_ADAP_SUBDEV_NAME "c3-mipi-adapter"
+
+/* C3 MIPI adapter TOP register */
+#define MIPI_TOP_CTRL0 ADAP_REG_T(0x00)
+#define MIPI_TOP_CTRL0_RST_ADAPTER_MASK BIT(1)
+#define MIPI_TOP_CTRL0_RST_ADAPTER_APPLY BIT(1)
+#define MIPI_TOP_CTRL0_RST_ADAPTER_EXIT (0 << 1)
+
+#define MIPI_ADAPT_DE_CTRL0 ADAP_REG_T(0x40)
+#define MIPI_ADAPT_DE_CTRL0_RD_BUS_BYPASS_MASK BIT(3)
+#define MIPI_ADAPT_DE_CTRL0_RD_BUS_BYPASS_EN BIT(3)
+#define MIPI_ADAPT_DE_CTRL0_RD_BUS_BYPASS_DIS (0 << 3)
+#define MIPI_ADAPT_DE_CTRL0_WR_BUS_BYPASS_MASK BIT(7)
+#define MIPI_ADAPT_DE_CTRL0_WR_BUS_BYPASS_EN BIT(7)
+#define MIPI_ADAPT_DE_CTRL0_WR_BUS_BYPASS_DIS (0 << 7)
+
+/* C3 MIPI adapter FRONTEND register */
+#define CSI2_CLK_RESET ADAP_REG_F(0x00)
+#define CSI2_CLK_RESET_SW_RESET_MASK BIT(0)
+#define CSI2_CLK_RESET_SW_RESET_APPLY BIT(0)
+#define CSI2_CLK_RESET_SW_RESET_RELEASE (0 << 0)
+#define CSI2_CLK_RESET_CLK_ENABLE_MASK BIT(1)
+#define CSI2_CLK_RESET_CLK_ENABLE_EN BIT(1)
+#define CSI2_CLK_RESET_CLK_ENABLE_DIS (0 << 1)
+
+#define CSI2_GEN_CTRL0 ADAP_REG_F(0x04)
+#define CSI2_GEN_CTRL0_VC0_MASK BIT(0)
+#define CSI2_GEN_CTRL0_VC0_EN BIT(0)
+#define CSI2_GEN_CTRL0_VC0_DIS (0 << 0)
+#define CSI2_GEN_CTRL0_ENABLE_PACKETS_MASK GENMASK(20, 16)
+#define CSI2_GEN_CTRL0_ENABLE_PACKETS_RAW BIT(16)
+#define CSI2_GEN_CTRL0_ENABLE_PACKETS_YUV (2 << 16)
+
+#define CSI2_X_START_END_ISP ADAP_REG_F(0x0c)
+#define CSI2_X_START_END_ISP_X_START_MASK GENMASK(15, 0)
+#define CSI2_X_START_END_ISP_X_START(x) ((x) << 0)
+#define CSI2_X_START_END_ISP_X_END_MASK GENMASK(31, 16)
+#define CSI2_X_START_END_ISP_X_END(x) (((x) - 1) << 16)
+
+#define CSI2_Y_START_END_ISP ADAP_REG_F(0x10)
+#define CSI2_Y_START_END_ISP_Y_START_MASK GENMASK(15, 0)
+#define CSI2_Y_START_END_ISP_Y_START(x) ((x) << 0)
+#define CSI2_Y_START_END_ISP_Y_END_MASK GENMASK(31, 16)
+#define CSI2_Y_START_END_ISP_Y_END(x) (((x) - 1) << 16)
+
+#define CSI2_VC_MODE ADAP_REG_F(0x1c)
+#define CSI2_VC_MODE_VS_ISP_SEL_VC_MASK GENMASK(19, 16)
+#define CSI2_VC_MODE_VS_ISP_SEL_VC_0 BIT(16)
+#define CSI2_VC_MODE_VS_ISP_SEL_VC_1 (2 << 16)
+#define CSI2_VC_MODE_VS_ISP_SEL_VC_2 (4 << 16)
+#define CSI2_VC_MODE_VS_ISP_SEL_VC_3 (8 << 16)
+#define CSI2_VC_MODE_HS_ISP_SEL_VC_MASK GENMASK(23, 20)
+#define CSI2_VC_MODE_HS_ISP_SEL_VC_0 BIT(20)
+#define CSI2_VC_MODE_HS_ISP_SEL_VC_1 (2 << 20)
+#define CSI2_VC_MODE_HS_ISP_SEL_VC_2 (4 << 20)
+#define CSI2_VC_MODE_HS_ISP_SEL_VC_3 (8 << 20)
+
+/* C3 MIPI adapter READER register */
+#define MIPI_ADAPT_DDR_RD0_CNTL0 ADAP_REG_R(0x00)
+#define MIPI_ADAPT_DDR_RD0_CNTL0_MODULE_EN_MASK BIT(0)
+#define MIPI_ADAPT_DDR_RD0_CNTL0_MODULE_EN BIT(0)
+#define MIPI_ADAPT_DDR_RD0_CNTL0_MODULE_DIS (0 << 0)
+
+#define MIPI_ADAPT_DDR_RD0_CNTL1 ADAP_REG_R(0x04)
+#define MIPI_ADAPT_DDR_RD0_CNTL1_PORT_SEL_MASK GENMASK(31, 30)
+#define MIPI_ADAPT_DDR_RD0_CNTL1_PORT_SEL_DIRECT_MODE (0 << 30)
+#define MIPI_ADAPT_DDR_RD0_CNTL1_PORT_SEL_DDR_MODE BIT(30)
+
+#define MIPI_ADAPT_PIXEL0_CNTL0 ADAP_REG_R(0x80)
+#define MIPI_ADAPT_PIXEL0_CNTL0_WORK_MODE_MASK GENMASK(17, 16)
+#define MIPI_ADAPT_PIXEL0_CNTL0_WORK_MODE_RAW_DDR (0 << 16)
+#define MIPI_ADAPT_PIXEL0_CNTL0_WORK_MODE_RAW_DIRECT BIT(16)
+#define MIPI_ADAPT_PIXEL0_CNTL0_DATA_TYPE_MASK GENMASK(25, 20)
+#define MIPI_ADAPT_PIXEL0_CNTL0_DATA_TYPE(x) ((x) << 20)
+#define MIPI_ADAPT_PIXEL0_CNTL0_START_EN_MASK BIT(31)
+#define MIPI_ADAPT_PIXEL0_CNTL0_START_EN BIT(31)
+
+#define MIPI_ADAPT_ALIG_CNTL0 ADAP_REG_R(0x100)
+#define MIPI_ADAPT_ALIG_CNTL0_H_NUM_MASK GENMASK(15, 0)
+#define MIPI_ADAPT_ALIG_CNTL0_H_NUM(x) ((x) << 0)
+#define MIPI_ADAPT_ALIG_CNTL0_V_NUM_MASK GENMASK(31, 16)
+#define MIPI_ADAPT_ALIG_CNTL0_V_NUM(x) ((x) << 16)
+
+#define MIPI_ADAPT_ALIG_CNTL1 ADAP_REG_R(0x104)
+#define MIPI_ADAPT_ALIG_CNTL1_HPE_NUM_MASK GENMASK(31, 16)
+#define MIPI_ADAPT_ALIG_CNTL1_HPE_NUM(x) ((x) << 16)
+
+#define MIPI_ADAPT_ALIG_CNTL2 ADAP_REG_R(0x108)
+#define MIPI_ADAPT_ALIG_CNTL2_VPE_NUM_MASK GENMASK(31, 16)
+#define MIPI_ADAPT_ALIG_CNTL2_VPE_NUM(x) ((x) << 16)
+
+#define MIPI_ADAPT_ALIG_CNTL6 ADAP_REG_R(0x118)
+#define MIPI_ADAPT_ALIG_CNTL6_PATH0_EN_MASK BIT(0)
+#define MIPI_ADAPT_ALIG_CNTL6_PATH0_EN BIT(0)
+#define MIPI_ADAPT_ALIG_CNTL6_PATH0_DIS (0 << 0)
+#define MIPI_ADAPT_ALIG_CNTL6_PIX0_DATA_MODE_MASK BIT(4)
+#define MIPI_ADAPT_ALIG_CNTL6_PIX0_DATA_MODE_DDR (0 << 4)
+#define MIPI_ADAPT_ALIG_CNTL6_PIX0_DATA_MODE_DIRECT BIT(4)
+#define MIPI_ADAPT_ALIG_CNTL6_DATA0_EN_MASK BIT(12)
+#define MIPI_ADAPT_ALIG_CNTL6_DATA0_EN BIT(12)
+#define MIPI_ADAPT_ALIG_CNTL6_DATA0_DIS (0 << 12)
+
+#define MIPI_ADAPT_ALIG_CNTL8 ADAP_REG_R(0x120)
+#define MIPI_ADAPT_ALIG_CNTL8_FRMAE_CONTINUE_MASK BIT(5)
+#define MIPI_ADAPT_ALIG_CNTL8_FRMAE_CONTINUE_EN BIT(5)
+#define MIPI_ADAPT_ALIG_CNTL8_FRMAE_CONTINUE_DIS (0 << 5)
+#define MIPI_ADAPT_ALIG_CNTL8_EXCEED_DIS_MASK BIT(12)
+#define MIPI_ADAPT_ALIG_CNTL8_EXCEED_HOLD (0 << 12)
+#define MIPI_ADAPT_ALIG_CNTL8_EXCEED_NOT_HOLD BIT(12)
+#define MIPI_ADAPT_ALIG_CNTL8_START_EN_MASK BIT(31)
+#define MIPI_ADAPT_ALIG_CNTL8_START_EN BIT(31)
+
+#define MIPI_ADAP_MAX_WIDTH 2888
+#define MIPI_ADAP_MIN_WIDTH 160
+#define MIPI_ADAP_MAX_HEIGHT 2240
+#define MIPI_ADAP_MIN_HEIGHT 120
+#define MIPI_ADAP_DEFAULT_WIDTH 1920
+#define MIPI_ADAP_DEFAULT_HEIGHT 1080
+#define MIPI_ADAP_DEFAULT_FMT MEDIA_BUS_FMT_SRGGB10_1X10
+
+/* C3 MIPI adapter pad list */
+enum {
+ C3_MIPI_ADAP_PAD_SINK,
+ C3_MIPI_ADAP_PAD_SRC,
+ C3_MIPI_ADAP_PAD_MAX
+};
+
+/*
+ * struct c3_adap_info - mipi adapter information
+ *
+ * @clocks: array of mipi adapter clock names
+ * @clock_num: actual clock number
+ */
+struct c3_adap_info {
+ char *clocks[MIPI_ADAP_CLOCK_NUM_MAX];
+ u32 clock_num;
+};
+
+/*
+ * struct c3_adap_device - mipi adapter platform device
+ *
+ * @dev: pointer to the struct device
+ * @top: mipi adapter top register address
+ * @fd: mipi adapter frontend register address
+ * @rd: mipi adapter reader register address
+ * @clks: array of MIPI adapter clocks
+ * @sd: mipi adapter sub-device
+ * @pads: mipi adapter sub-device pads
+ * @notifier: notifier to register on the v4l2-async API
+ * @src_sd: source sub-device pad
+ * @info: version-specific MIPI adapter information
+ */
+struct c3_adap_device {
+ struct device *dev;
+ void __iomem *top;
+ void __iomem *fd;
+ void __iomem *rd;
+ struct clk_bulk_data clks[MIPI_ADAP_CLOCK_NUM_MAX];
+
+ struct v4l2_subdev sd;
+ struct media_pad pads[C3_MIPI_ADAP_PAD_MAX];
+ struct v4l2_async_notifier notifier;
+ struct media_pad *src_pad;
+
+ const struct c3_adap_info *info;
+};
+
+/* Format helpers */
+
+struct c3_adap_pix_format {
+ u32 code;
+ u8 type;
+};
+
+static const struct c3_adap_pix_format c3_mipi_adap_formats[] = {
+ { MEDIA_BUS_FMT_SBGGR10_1X10, MIPI_CSI2_DT_RAW10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, MIPI_CSI2_DT_RAW10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, MIPI_CSI2_DT_RAW10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, MIPI_CSI2_DT_RAW10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, MIPI_CSI2_DT_RAW12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, MIPI_CSI2_DT_RAW12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, MIPI_CSI2_DT_RAW12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, MIPI_CSI2_DT_RAW12 },
+};
+
+static const struct c3_adap_pix_format *c3_mipi_adap_find_format(u32 code)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(c3_mipi_adap_formats); i++)
+ if (code == c3_mipi_adap_formats[i].code)
+ return &c3_mipi_adap_formats[i];
+
+ return NULL;
+}
+
+/* Hardware configuration */
+
+static void c3_mipi_adap_update_bits(struct c3_adap_device *adap, u32 reg,
+ u32 mask, u32 val)
+{
+ void __iomem *addr;
+ u32 orig, tmp;
+
+ switch (ADAP_SUBMD(reg)) {
+ case SUBMD_TOP:
+ addr = adap->top + ADAP_REG_ADDR(reg);
+ break;
+ case SUBMD_FD:
+ addr = adap->fd + ADAP_REG_ADDR(reg);
+ break;
+ case SUBMD_RD:
+ addr = adap->rd + ADAP_REG_ADDR(reg);
+ break;
+ default:
+ dev_err(adap->dev,
+ "Invalid sub-module: %lu\n", ADAP_SUBMD(reg));
+ return;
+ }
+
+ orig = readl(addr);
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+
+ if (tmp != orig)
+ writel(tmp, addr);
+}
+
+/* Configure adapter top sub module */
+static void c3_mipi_adap_cfg_top(struct c3_adap_device *adap)
+{
+ /* Reset adapter */
+ c3_mipi_adap_update_bits(adap, MIPI_TOP_CTRL0,
+ MIPI_TOP_CTRL0_RST_ADAPTER_MASK,
+ MIPI_TOP_CTRL0_RST_ADAPTER_APPLY);
+ c3_mipi_adap_update_bits(adap, MIPI_TOP_CTRL0,
+ MIPI_TOP_CTRL0_RST_ADAPTER_MASK,
+ MIPI_TOP_CTRL0_RST_ADAPTER_EXIT);
+
+ /* Bypass decompress */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_DE_CTRL0,
+ MIPI_ADAPT_DE_CTRL0_RD_BUS_BYPASS_MASK,
+ MIPI_ADAPT_DE_CTRL0_RD_BUS_BYPASS_EN);
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_DE_CTRL0,
+ MIPI_ADAPT_DE_CTRL0_WR_BUS_BYPASS_MASK,
+ MIPI_ADAPT_DE_CTRL0_WR_BUS_BYPASS_EN);
+}
+
+/* Configure adapter frontend sub module */
+static void c3_mipi_adap_cfg_frontend(struct c3_adap_device *adap,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ /* Reset frontend module */
+ c3_mipi_adap_update_bits(adap, CSI2_CLK_RESET,
+ CSI2_CLK_RESET_SW_RESET_MASK,
+ CSI2_CLK_RESET_SW_RESET_APPLY);
+ c3_mipi_adap_update_bits(adap, CSI2_CLK_RESET,
+ CSI2_CLK_RESET_SW_RESET_MASK,
+ CSI2_CLK_RESET_SW_RESET_RELEASE);
+ c3_mipi_adap_update_bits(adap, CSI2_CLK_RESET,
+ CSI2_CLK_RESET_CLK_ENABLE_MASK,
+ CSI2_CLK_RESET_CLK_ENABLE_EN);
+
+ c3_mipi_adap_update_bits(adap, CSI2_X_START_END_ISP,
+ CSI2_X_START_END_ISP_X_START_MASK,
+ CSI2_X_START_END_ISP_X_START(0));
+ c3_mipi_adap_update_bits(adap, CSI2_X_START_END_ISP,
+ CSI2_X_START_END_ISP_X_END_MASK,
+ CSI2_X_START_END_ISP_X_END(fmt->width));
+
+ c3_mipi_adap_update_bits(adap, CSI2_Y_START_END_ISP,
+ CSI2_Y_START_END_ISP_Y_START_MASK,
+ CSI2_Y_START_END_ISP_Y_START(0));
+ c3_mipi_adap_update_bits(adap, CSI2_Y_START_END_ISP,
+ CSI2_Y_START_END_ISP_Y_END_MASK,
+ CSI2_Y_START_END_ISP_Y_END(fmt->height));
+
+ /* Select VS and HS signal for direct path */
+ c3_mipi_adap_update_bits(adap, CSI2_VC_MODE,
+ CSI2_VC_MODE_VS_ISP_SEL_VC_MASK,
+ CSI2_VC_MODE_VS_ISP_SEL_VC_0);
+ c3_mipi_adap_update_bits(adap, CSI2_VC_MODE,
+ CSI2_VC_MODE_HS_ISP_SEL_VC_MASK,
+ CSI2_VC_MODE_HS_ISP_SEL_VC_0);
+
+ /* Enable to receive RAW packet */
+ c3_mipi_adap_update_bits(adap, CSI2_GEN_CTRL0,
+ CSI2_GEN_CTRL0_ENABLE_PACKETS_MASK,
+ CSI2_GEN_CTRL0_ENABLE_PACKETS_RAW);
+
+ /* Enable virtual channel 0 */
+ c3_mipi_adap_update_bits(adap, CSI2_GEN_CTRL0,
+ CSI2_GEN_CTRL0_VC0_MASK,
+ CSI2_GEN_CTRL0_VC0_EN);
+}
+
+static void c3_mipi_adap_cfg_rd0(struct c3_adap_device *adap)
+{
+ /* Select direct mode for DDR_RD0 mode */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_DDR_RD0_CNTL1,
+ MIPI_ADAPT_DDR_RD0_CNTL1_PORT_SEL_MASK,
+ MIPI_ADAPT_DDR_RD0_CNTL1_PORT_SEL_DIRECT_MODE);
+
+ /* Data can't bypass DDR_RD0 in direct mode, so enable DDR_RD0 here */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_DDR_RD0_CNTL0,
+ MIPI_ADAPT_DDR_RD0_CNTL0_MODULE_EN_MASK,
+ MIPI_ADAPT_DDR_RD0_CNTL0_MODULE_EN);
+}
+
+static void c3_mipi_adap_cfg_pixel0(struct c3_adap_device *adap,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ const struct c3_adap_pix_format *pix;
+
+ pix = c3_mipi_adap_find_format(fmt->code);
+
+ /* Set work mode and data type for PIXEL0 module */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_PIXEL0_CNTL0,
+ MIPI_ADAPT_PIXEL0_CNTL0_WORK_MODE_MASK,
+ MIPI_ADAPT_PIXEL0_CNTL0_WORK_MODE_RAW_DIRECT);
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_PIXEL0_CNTL0,
+ MIPI_ADAPT_PIXEL0_CNTL0_DATA_TYPE_MASK,
+ MIPI_ADAPT_PIXEL0_CNTL0_DATA_TYPE(pix->type));
+
+ /* Start PIXEL0 module */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_PIXEL0_CNTL0,
+ MIPI_ADAPT_PIXEL0_CNTL0_START_EN_MASK,
+ MIPI_ADAPT_PIXEL0_CNTL0_START_EN);
+}
+
+static void c3_mipi_adap_cfg_alig(struct c3_adap_device *adap,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ /*
+ * ISP hardware requires the number of horizonal blanks greater than
+ * 64 cycles, so adding 64 here.
+ */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL0,
+ MIPI_ADAPT_ALIG_CNTL0_H_NUM_MASK,
+ MIPI_ADAPT_ALIG_CNTL0_H_NUM(fmt->width + 64));
+
+ /*
+ * ISP hardware requires the number of vertical blanks greater than
+ * 40 lines, so adding 40 here.
+ */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL0,
+ MIPI_ADAPT_ALIG_CNTL0_V_NUM_MASK,
+ MIPI_ADAPT_ALIG_CNTL0_V_NUM(fmt->height + 40));
+
+ /* End pixel in a line */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL1,
+ MIPI_ADAPT_ALIG_CNTL1_HPE_NUM_MASK,
+ MIPI_ADAPT_ALIG_CNTL1_HPE_NUM(fmt->width));
+
+ /* End line in a frame */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL2,
+ MIPI_ADAPT_ALIG_CNTL2_VPE_NUM_MASK,
+ MIPI_ADAPT_ALIG_CNTL2_VPE_NUM(fmt->height));
+
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL6,
+ MIPI_ADAPT_ALIG_CNTL6_PATH0_EN_MASK,
+ MIPI_ADAPT_ALIG_CNTL6_PATH0_EN);
+
+ /* Select direct mode for ALIG module */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL6,
+ MIPI_ADAPT_ALIG_CNTL6_PIX0_DATA_MODE_MASK,
+ MIPI_ADAPT_ALIG_CNTL6_PIX0_DATA_MODE_DIRECT);
+
+ /* Enable to send raw data */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL6,
+ MIPI_ADAPT_ALIG_CNTL6_DATA0_EN_MASK,
+ MIPI_ADAPT_ALIG_CNTL6_DATA0_EN);
+
+ /* Set continue mode and disable hold counter */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL8,
+ MIPI_ADAPT_ALIG_CNTL8_FRMAE_CONTINUE_MASK,
+ MIPI_ADAPT_ALIG_CNTL8_FRMAE_CONTINUE_EN);
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL8,
+ MIPI_ADAPT_ALIG_CNTL8_EXCEED_DIS_MASK,
+ MIPI_ADAPT_ALIG_CNTL8_EXCEED_NOT_HOLD);
+
+ /* Start ALIG module */
+ c3_mipi_adap_update_bits(adap, MIPI_ADAPT_ALIG_CNTL8,
+ MIPI_ADAPT_ALIG_CNTL8_START_EN_MASK,
+ MIPI_ADAPT_ALIG_CNTL8_START_EN);
+}
+
+/* V4L2 subdev operations */
+
+static int c3_mipi_adap_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct c3_adap_device *adap = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ struct media_pad *sink_pad;
+ struct v4l2_subdev *src_sd;
+ int ret;
+
+ sink_pad = &adap->pads[C3_MIPI_ADAP_PAD_SINK];
+ adap->src_pad = media_pad_remote_pad_unique(sink_pad);
+ if (IS_ERR(adap->src_pad)) {
+ dev_dbg(adap->dev, "Failed to get source pad for MIPI adap\n");
+ return -EPIPE;
+ }
+
+ src_sd = media_entity_to_v4l2_subdev(adap->src_pad->entity);
+
+ pm_runtime_resume_and_get(adap->dev);
+
+ fmt = v4l2_subdev_state_get_format(state, C3_MIPI_ADAP_PAD_SINK);
+
+ c3_mipi_adap_cfg_top(adap);
+ c3_mipi_adap_cfg_frontend(adap, fmt);
+ c3_mipi_adap_cfg_rd0(adap);
+ c3_mipi_adap_cfg_pixel0(adap, fmt);
+ c3_mipi_adap_cfg_alig(adap, fmt);
+
+ ret = v4l2_subdev_enable_streams(src_sd, adap->src_pad->index, BIT(0));
+ if (ret) {
+ pm_runtime_put(adap->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int c3_mipi_adap_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct c3_adap_device *adap = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *src_sd;
+
+ if (adap->src_pad) {
+ src_sd = media_entity_to_v4l2_subdev(adap->src_pad->entity);
+ v4l2_subdev_disable_streams(src_sd, adap->src_pad->index,
+ BIT(0));
+ }
+ adap->src_pad = NULL;
+
+ pm_runtime_put(adap->dev);
+
+ return 0;
+}
+
+static int c3_mipi_adap_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct v4l2_mbus_framefmt *fmt;
+
+ switch (code->pad) {
+ case C3_MIPI_ADAP_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(c3_mipi_adap_formats))
+ return -EINVAL;
+
+ code->code = c3_mipi_adap_formats[code->index].code;
+ break;
+ case C3_MIPI_ADAP_PAD_SRC:
+ if (code->index)
+ return -EINVAL;
+
+ fmt = v4l2_subdev_state_get_format(state, code->pad);
+ code->code = fmt->code;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int c3_mipi_adap_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt;
+ const struct c3_adap_pix_format *pix_format;
+
+ if (format->pad != C3_MIPI_ADAP_PAD_SINK)
+ return v4l2_subdev_get_fmt(sd, state, format);
+
+ pix_format = c3_mipi_adap_find_format(format->format.code);
+ if (!pix_format)
+ pix_format = &c3_mipi_adap_formats[0];
+
+ fmt = v4l2_subdev_state_get_format(state, format->pad);
+ fmt->code = pix_format->code;
+ fmt->width = clamp_t(u32, format->format.width,
+ MIPI_ADAP_MIN_WIDTH, MIPI_ADAP_MAX_WIDTH);
+ fmt->height = clamp_t(u32, format->format.height,
+ MIPI_ADAP_MIN_HEIGHT, MIPI_ADAP_MAX_HEIGHT);
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ format->format = *fmt;
+
+ /* Synchronize the format to source pad */
+ fmt = v4l2_subdev_state_get_format(state, C3_MIPI_ADAP_PAD_SRC);
+ *fmt = format->format;
+
+ return 0;
+}
+
+static int c3_mipi_adap_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+
+ sink_fmt = v4l2_subdev_state_get_format(state, C3_MIPI_ADAP_PAD_SINK);
+ src_fmt = v4l2_subdev_state_get_format(state, C3_MIPI_ADAP_PAD_SRC);
+
+ sink_fmt->width = MIPI_ADAP_DEFAULT_WIDTH;
+ sink_fmt->height = MIPI_ADAP_DEFAULT_HEIGHT;
+ sink_fmt->field = V4L2_FIELD_NONE;
+ sink_fmt->code = MIPI_ADAP_DEFAULT_FMT;
+ sink_fmt->colorspace = V4L2_COLORSPACE_RAW;
+ sink_fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ *src_fmt = *sink_fmt;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops c3_mipi_adap_pad_ops = {
+ .enum_mbus_code = c3_mipi_adap_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = c3_mipi_adap_set_fmt,
+ .enable_streams = c3_mipi_adap_enable_streams,
+ .disable_streams = c3_mipi_adap_disable_streams,
+};
+
+static const struct v4l2_subdev_ops c3_mipi_adap_subdev_ops = {
+ .pad = &c3_mipi_adap_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops c3_mipi_adap_internal_ops = {
+ .init_state = c3_mipi_adap_init_state,
+};
+
+/* Media entity operations */
+static const struct media_entity_operations c3_mipi_adap_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* PM runtime */
+
+static int c3_mipi_adap_runtime_suspend(struct device *dev)
+{
+ struct c3_adap_device *adap = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(adap->info->clock_num, adap->clks);
+
+ return 0;
+}
+
+static int c3_mipi_adap_runtime_resume(struct device *dev)
+{
+ struct c3_adap_device *adap = dev_get_drvdata(dev);
+
+ return clk_bulk_prepare_enable(adap->info->clock_num, adap->clks);
+}
+
+static const struct dev_pm_ops c3_mipi_adap_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ RUNTIME_PM_OPS(c3_mipi_adap_runtime_suspend,
+ c3_mipi_adap_runtime_resume, NULL)
+};
+
+/* Probe/remove & platform driver */
+
+static int c3_mipi_adap_subdev_init(struct c3_adap_device *adap)
+{
+ struct v4l2_subdev *sd = &adap->sd;
+ int ret;
+
+ v4l2_subdev_init(sd, &c3_mipi_adap_subdev_ops);
+ sd->owner = THIS_MODULE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->internal_ops = &c3_mipi_adap_internal_ops;
+ snprintf(sd->name, sizeof(sd->name), "%s", MIPI_ADAP_SUBDEV_NAME);
+
+ sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ sd->entity.ops = &c3_mipi_adap_entity_ops;
+
+ sd->dev = adap->dev;
+ v4l2_set_subdevdata(sd, adap);
+
+ adap->pads[C3_MIPI_ADAP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ adap->pads[C3_MIPI_ADAP_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&sd->entity, C3_MIPI_ADAP_PAD_MAX,
+ adap->pads);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret) {
+ media_entity_cleanup(&sd->entity);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void c3_mipi_adap_subdev_deinit(struct c3_adap_device *adap)
+{
+ v4l2_subdev_cleanup(&adap->sd);
+ media_entity_cleanup(&adap->sd.entity);
+}
+
+/* Subdev notifier register */
+static int c3_mipi_adap_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asc)
+{
+ struct c3_adap_device *adap = v4l2_get_subdevdata(notifier->sd);
+ struct media_pad *sink = &adap->sd.entity.pads[C3_MIPI_ADAP_PAD_SINK];
+
+ return v4l2_create_fwnode_links_to_pad(sd, sink, MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static const struct v4l2_async_notifier_operations c3_mipi_adap_notify_ops = {
+ .bound = c3_mipi_adap_notify_bound,
+};
+
+static int c3_mipi_adap_async_register(struct c3_adap_device *adap)
+{
+ struct v4l2_async_connection *asc;
+ struct fwnode_handle *ep;
+ int ret;
+
+ v4l2_async_subdev_nf_init(&adap->notifier, &adap->sd);
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(adap->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ return -ENOTCONN;
+
+ asc = v4l2_async_nf_add_fwnode_remote(&adap->notifier, ep,
+ struct v4l2_async_connection);
+ if (IS_ERR(asc)) {
+ ret = PTR_ERR(asc);
+ goto err_put_handle;
+ }
+
+ adap->notifier.ops = &c3_mipi_adap_notify_ops;
+ ret = v4l2_async_nf_register(&adap->notifier);
+ if (ret)
+ goto err_cleanup_nf;
+
+ ret = v4l2_async_register_subdev(&adap->sd);
+ if (ret)
+ goto err_unregister_nf;
+
+ fwnode_handle_put(ep);
+
+ return 0;
+
+err_unregister_nf:
+ v4l2_async_nf_unregister(&adap->notifier);
+err_cleanup_nf:
+ v4l2_async_nf_cleanup(&adap->notifier);
+err_put_handle:
+ fwnode_handle_put(ep);
+ return ret;
+}
+
+static void c3_mipi_adap_async_unregister(struct c3_adap_device *adap)
+{
+ v4l2_async_unregister_subdev(&adap->sd);
+ v4l2_async_nf_unregister(&adap->notifier);
+ v4l2_async_nf_cleanup(&adap->notifier);
+}
+
+static int c3_mipi_adap_ioremap_resource(struct c3_adap_device *adap)
+{
+ struct device *dev = adap->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ adap->top = devm_platform_ioremap_resource_byname(pdev, "top");
+ if (IS_ERR(adap->top))
+ return PTR_ERR(adap->top);
+
+ adap->fd = devm_platform_ioremap_resource_byname(pdev, "fd");
+ if (IS_ERR(adap->fd))
+ return PTR_ERR(adap->fd);
+
+ adap->rd = devm_platform_ioremap_resource_byname(pdev, "rd");
+ if (IS_ERR(adap->rd))
+ return PTR_ERR(adap->rd);
+
+ return 0;
+}
+
+static int c3_mipi_adap_get_clocks(struct c3_adap_device *adap)
+{
+ const struct c3_adap_info *info = adap->info;
+
+ for (unsigned int i = 0; i < info->clock_num; i++)
+ adap->clks[i].id = info->clocks[i];
+
+ return devm_clk_bulk_get(adap->dev, info->clock_num, adap->clks);
+}
+
+static int c3_mipi_adap_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct c3_adap_device *adap;
+ int ret;
+
+ adap = devm_kzalloc(dev, sizeof(*adap), GFP_KERNEL);
+ if (!adap)
+ return -ENOMEM;
+
+ adap->info = of_device_get_match_data(dev);
+ adap->dev = dev;
+
+ ret = c3_mipi_adap_ioremap_resource(adap);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to ioremap resource\n");
+
+ ret = c3_mipi_adap_get_clocks(adap);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+
+ platform_set_drvdata(pdev, adap);
+
+ pm_runtime_enable(dev);
+
+ ret = c3_mipi_adap_subdev_init(adap);
+ if (ret)
+ goto err_disable_runtime_pm;
+
+ ret = c3_mipi_adap_async_register(adap);
+ if (ret)
+ goto err_deinit_subdev;
+
+ return 0;
+
+err_deinit_subdev:
+ c3_mipi_adap_subdev_deinit(adap);
+err_disable_runtime_pm:
+ pm_runtime_disable(dev);
+ return ret;
+};
+
+static void c3_mipi_adap_remove(struct platform_device *pdev)
+{
+ struct c3_adap_device *adap = platform_get_drvdata(pdev);
+
+ c3_mipi_adap_async_unregister(adap);
+ c3_mipi_adap_subdev_deinit(adap);
+
+ pm_runtime_disable(&pdev->dev);
+};
+
+static const struct c3_adap_info c3_mipi_adap_info = {
+ .clocks = {"vapb", "isp0"},
+ .clock_num = 2
+};
+
+static const struct of_device_id c3_mipi_adap_of_match[] = {
+ {
+ .compatible = "amlogic,c3-mipi-adapter",
+ .data = &c3_mipi_adap_info
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, c3_mipi_adap_of_match);
+
+static struct platform_driver c3_mipi_adap_driver = {
+ .probe = c3_mipi_adap_probe,
+ .remove = c3_mipi_adap_remove,
+ .driver = {
+ .name = "c3-mipi-adapter",
+ .of_match_table = c3_mipi_adap_of_match,
+ .pm = pm_ptr(&c3_mipi_adap_pm_ops),
+ },
+};
+
+module_platform_driver(c3_mipi_adap_driver);
+
+MODULE_AUTHOR("Keke Li <keke.li@amlogic.com>");
+MODULE_DESCRIPTION("Amlogic C3 MIPI adapter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/amlogic/c3/mipi-csi2/Kconfig b/drivers/media/platform/amlogic/c3/mipi-csi2/Kconfig
new file mode 100644
index 000000000000..0d7b2e203273
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/mipi-csi2/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config VIDEO_C3_MIPI_CSI2
+ tristate "Amlogic C3 MIPI CSI-2 receiver"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on VIDEO_DEV
+ depends on OF
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Video4Linux2 driver for Amlogic C3 MIPI CSI-2 receiver.
+ C3 MIPI CSI-2 receiver is used to receive MIPI data from
+ image sensor.
+
+ To compile this driver as a module choose m here.
diff --git a/drivers/media/platform/amlogic/c3/mipi-csi2/Makefile b/drivers/media/platform/amlogic/c3/mipi-csi2/Makefile
new file mode 100644
index 000000000000..cc08fc722bfd
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/mipi-csi2/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_VIDEO_C3_MIPI_CSI2) += c3-mipi-csi2.o
diff --git a/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c b/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c
new file mode 100644
index 000000000000..1011ab3ebac7
--- /dev/null
+++ b/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c
@@ -0,0 +1,828 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR MIT)
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+
+/* C3 CSI-2 submodule definition */
+enum {
+ SUBMD_APHY,
+ SUBMD_DPHY,
+ SUBMD_HOST,
+};
+
+#define CSI2_SUBMD_MASK GENMASK(17, 16)
+#define CSI2_SUBMD_SHIFT 16
+#define CSI2_SUBMD(x) (((x) & (CSI2_SUBMD_MASK)) >> (CSI2_SUBMD_SHIFT))
+#define CSI2_REG_ADDR_MASK GENMASK(15, 0)
+#define CSI2_REG_ADDR(x) ((x) & (CSI2_REG_ADDR_MASK))
+#define CSI2_REG_A(x) ((SUBMD_APHY << CSI2_SUBMD_SHIFT) | (x))
+#define CSI2_REG_D(x) ((SUBMD_DPHY << CSI2_SUBMD_SHIFT) | (x))
+#define CSI2_REG_H(x) ((SUBMD_HOST << CSI2_SUBMD_SHIFT) | (x))
+
+#define MIPI_CSI2_CLOCK_NUM_MAX 3
+#define MIPI_CSI2_SUBDEV_NAME "c3-mipi-csi2"
+
+/* C3 CSI-2 APHY register */
+#define CSI_PHY_CNTL0 CSI2_REG_A(0x44)
+#define CSI_PHY_CNTL0_HS_LP_BIAS_EN BIT(10)
+#define CSI_PHY_CNTL0_HS_RX_TRIM_11 (11 << 11)
+#define CSI_PHY_CNTL0_LP_LOW_VTH_2 (2 << 16)
+#define CSI_PHY_CNTL0_LP_HIGH_VTH_4 (4 << 20)
+#define CSI_PHY_CNTL0_DATA_LANE0_HS_DIG_EN BIT(24)
+#define CSI_PHY_CNTL0_DATA_LANE1_HS_DIG_EN BIT(25)
+#define CSI_PHY_CNTL0_CLK0_LANE_HS_DIG_EN BIT(26)
+#define CSI_PHY_CNTL0_DATA_LANE2_HS_DIG_EN BIT(27)
+#define CSI_PHY_CNTL0_DATA_LANE3_HS_DIG_EN BIT(28)
+
+#define CSI_PHY_CNTL1 CSI2_REG_A(0x48)
+#define CSI_PHY_CNTL1_HS_EQ_CAP_SMALL (2 << 16)
+#define CSI_PHY_CNTL1_HS_EQ_CAP_BIG (3 << 16)
+#define CSI_PHY_CNTL1_HS_EQ_RES_MIN (3 << 18)
+#define CSI_PHY_CNTL1_HS_EQ_RES_MED (2 << 18)
+#define CSI_PHY_CNTL1_HS_EQ_RES_MAX BIT(18)
+#define CSI_PHY_CNTL1_CLK_CHN_EQ_MAX_GAIN BIT(20)
+#define CSI_PHY_CNTL1_DATA_CHN_EQ_MAX_GAIN BIT(21)
+#define CSI_PHY_CNTL1_COM_BG_EN BIT(24)
+#define CSI_PHY_CNTL1_HS_SYNC_EN BIT(25)
+
+/* C3 CSI-2 DPHY register */
+#define MIPI_PHY_CTRL CSI2_REG_D(0x00)
+#define MIPI_PHY_CTRL_DATA_LANE0_EN (0 << 0)
+#define MIPI_PHY_CTRL_DATA_LANE0_DIS BIT(0)
+#define MIPI_PHY_CTRL_DATA_LANE1_EN (0 << 1)
+#define MIPI_PHY_CTRL_DATA_LANE1_DIS BIT(1)
+#define MIPI_PHY_CTRL_DATA_LANE2_EN (0 << 2)
+#define MIPI_PHY_CTRL_DATA_LANE2_DIS BIT(2)
+#define MIPI_PHY_CTRL_DATA_LANE3_EN (0 << 3)
+#define MIPI_PHY_CTRL_DATA_LANE3_DIS BIT(3)
+#define MIPI_PHY_CTRL_CLOCK_LANE_EN (0 << 4)
+#define MIPI_PHY_CTRL_CLOCK_LANE_DIS BIT(4)
+
+#define MIPI_PHY_CLK_LANE_CTRL CSI2_REG_D(0x04)
+#define MIPI_PHY_CLK_LANE_CTRL_FORCE_ULPS_ENTER BIT(0)
+#define MIPI_PHY_CLK_LANE_CTRL_FORCE_ULPS_EXIT BIT(1)
+#define MIPI_PHY_CLK_LANE_CTRL_TCLK_ZERO_HS (0 << 3)
+#define MIPI_PHY_CLK_LANE_CTRL_TCLK_ZERO_HS_2 BIT(3)
+#define MIPI_PHY_CLK_LANE_CTRL_TCLK_ZERO_HS_4 (2 << 3)
+#define MIPI_PHY_CLK_LANE_CTRL_TCLK_ZERO_HS_8 (3 << 3)
+#define MIPI_PHY_CLK_LANE_CTRL_TCLK_ZERO_HS_16 (4 << 3)
+#define MIPI_PHY_CLK_LANE_CTRL_TCLK_ZERO_EN BIT(6)
+#define MIPI_PHY_CLK_LANE_CTRL_LPEN_DIS BIT(7)
+#define MIPI_PHY_CLK_LANE_CTRL_END_EN BIT(8)
+#define MIPI_PHY_CLK_LANE_CTRL_HS_RX_EN BIT(9)
+
+#define MIPI_PHY_DATA_LANE_CTRL1 CSI2_REG_D(0x0c)
+#define MIPI_PHY_DATA_LANE_CTRL1_INSERT_ERRESC BIT(0)
+#define MIPI_PHY_DATA_LANE_CTRL1_HS_SYNC_CHK_EN BIT(1)
+#define MIPI_PHY_DATA_LANE_CTRL1_PIPE_MASK GENMASK(6, 2)
+#define MIPI_PHY_DATA_LANE_CTRL1_PIPE_ALL_EN (0x1f << 2)
+#define MIPI_PHY_DATA_LANE_CTRL1_PIPE_DELAY_MASK GENMASK(9, 7)
+#define MIPI_PHY_DATA_LANE_CTRL1_PIPE_DELAY_3 (3 << 7)
+
+#define MIPI_PHY_TCLK_MISS CSI2_REG_D(0x10)
+#define MIPI_PHY_TCLK_MISS_CYCLES_MASK GENMASK(7, 0)
+#define MIPI_PHY_TCLK_MISS_CYCLES_9 (9 << 0)
+
+#define MIPI_PHY_TCLK_SETTLE CSI2_REG_D(0x14)
+#define MIPI_PHY_TCLK_SETTLE_CYCLES_MASK GENMASK(7, 0)
+#define MIPI_PHY_TCLK_SETTLE_CYCLES_31 (31 << 0)
+
+#define MIPI_PHY_THS_EXIT CSI2_REG_D(0x18)
+#define MIPI_PHY_THS_EXIT_CYCLES_MASK GENMASK(7, 0)
+#define MIPI_PHY_THS_EXIT_CYCLES_8 (8 << 0)
+
+#define MIPI_PHY_THS_SKIP CSI2_REG_D(0x1c)
+#define MIPI_PHY_THS_SKIP_CYCLES_MASK GENMASK(7, 0)
+#define MIPI_PHY_THS_SKIP_CYCLES_10 (10 << 0)
+
+#define MIPI_PHY_THS_SETTLE CSI2_REG_D(0x20)
+#define MIPI_PHY_THS_SETTLE_CYCLES_MASK GENMASK(7, 0)
+
+#define MIPI_PHY_TINIT CSI2_REG_D(0x24)
+#define MIPI_PHY_TINIT_CYCLES_MASK GENMASK(31, 0)
+#define MIPI_PHY_TINIT_CYCLES_20000 (20000 << 0)
+
+#define MIPI_PHY_TULPS_C CSI2_REG_D(0x28)
+#define MIPI_PHY_TULPS_C_CYCLES_MASK GENMASK(31, 0)
+#define MIPI_PHY_TULPS_C_CYCLES_4096 (4096 << 0)
+
+#define MIPI_PHY_TULPS_S CSI2_REG_D(0x2c)
+#define MIPI_PHY_TULPS_S_CYCLES_MASK GENMASK(31, 0)
+#define MIPI_PHY_TULPS_S_CYCLES_256 (256 << 0)
+
+#define MIPI_PHY_TMBIAS CSI2_REG_D(0x30)
+#define MIPI_PHY_TMBIAS_CYCLES_MASK GENMASK(31, 0)
+#define MIPI_PHY_TMBIAS_CYCLES_256 (256 << 0)
+
+#define MIPI_PHY_TLP_EN_W CSI2_REG_D(0x34)
+#define MIPI_PHY_TLP_EN_W_CYCLES_MASK GENMASK(31, 0)
+#define MIPI_PHY_TLP_EN_W_CYCLES_12 (12 << 0)
+
+#define MIPI_PHY_TLPOK CSI2_REG_D(0x38)
+#define MIPI_PHY_TLPOK_CYCLES_MASK GENMASK(31, 0)
+#define MIPI_PHY_TLPOK_CYCLES_256 (256 << 0)
+
+#define MIPI_PHY_TWD_INIT CSI2_REG_D(0x3c)
+#define MIPI_PHY_TWD_INIT_DOG_MASK GENMASK(31, 0)
+#define MIPI_PHY_TWD_INIT_DOG_0X400000 (0x400000 << 0)
+
+#define MIPI_PHY_TWD_HS CSI2_REG_D(0x40)
+#define MIPI_PHY_TWD_HS_DOG_MASK GENMASK(31, 0)
+#define MIPI_PHY_TWD_HS_DOG_0X400000 (0x400000 << 0)
+
+#define MIPI_PHY_MUX_CTRL0 CSI2_REG_D(0x284)
+#define MIPI_PHY_MUX_CTRL0_SFEN3_SRC_MASK GENMASK(3, 0)
+#define MIPI_PHY_MUX_CTRL0_SFEN3_SRC_LANE0 (0 << 0)
+#define MIPI_PHY_MUX_CTRL0_SFEN3_SRC_LANE1 BIT(0)
+#define MIPI_PHY_MUX_CTRL0_SFEN3_SRC_LANE2 (2 << 0)
+#define MIPI_PHY_MUX_CTRL0_SFEN3_SRC_LANE3 (3 << 0)
+#define MIPI_PHY_MUX_CTRL0_SFEN2_SRC_MASK GENMASK(7, 4)
+#define MIPI_PHY_MUX_CTRL0_SFEN2_SRC_LANE0 (0 << 4)
+#define MIPI_PHY_MUX_CTRL0_SFEN2_SRC_LANE1 BIT(4)
+#define MIPI_PHY_MUX_CTRL0_SFEN2_SRC_LANE2 (2 << 4)
+#define MIPI_PHY_MUX_CTRL0_SFEN2_SRC_LANE3 (3 << 4)
+#define MIPI_PHY_MUX_CTRL0_SFEN1_SRC_MASK GENMASK(11, 8)
+#define MIPI_PHY_MUX_CTRL0_SFEN1_SRC_LANE0 (0 << 8)
+#define MIPI_PHY_MUX_CTRL0_SFEN1_SRC_LANE1 BIT(8)
+#define MIPI_PHY_MUX_CTRL0_SFEN1_SRC_LANE2 (2 << 8)
+#define MIPI_PHY_MUX_CTRL0_SFEN1_SRC_LANE3 (3 << 8)
+#define MIPI_PHY_MUX_CTRL0_SFEN0_SRC_MASK GENMASK(14, 12)
+#define MIPI_PHY_MUX_CTRL0_SFEN0_SRC_LANE0 (0 << 12)
+#define MIPI_PHY_MUX_CTRL0_SFEN0_SRC_LANE1 BIT(12)
+#define MIPI_PHY_MUX_CTRL0_SFEN0_SRC_LANE2 (2 << 12)
+#define MIPI_PHY_MUX_CTRL0_SFEN0_SRC_LANE3 (3 << 12)
+
+#define MIPI_PHY_MUX_CTRL1 CSI2_REG_D(0x288)
+#define MIPI_PHY_MUX_CTRL1_LANE3_SRC_MASK GENMASK(3, 0)
+#define MIPI_PHY_MUX_CTRL1_LANE3_SRC_SFEN0 (0 << 0)
+#define MIPI_PHY_MUX_CTRL1_LANE3_SRC_SFEN1 BIT(0)
+#define MIPI_PHY_MUX_CTRL1_LANE3_SRC_SFEN2 (2 << 0)
+#define MIPI_PHY_MUX_CTRL1_LANE3_SRC_SFEN3 (3 << 0)
+#define MIPI_PHY_MUX_CTRL1_LANE2_SRC_MASK GENMASK(7, 4)
+#define MIPI_PHY_MUX_CTRL1_LANE2_SRC_SFEN0 (0 << 4)
+#define MIPI_PHY_MUX_CTRL1_LANE2_SRC_SFEN1 BIT(4)
+#define MIPI_PHY_MUX_CTRL1_LANE2_SRC_SFEN2 (2 << 4)
+#define MIPI_PHY_MUX_CTRL1_LANE2_SRC_SFEN3 (3 << 4)
+#define MIPI_PHY_MUX_CTRL1_LANE1_SRC_MASK GENMASK(11, 8)
+#define MIPI_PHY_MUX_CTRL1_LANE1_SRC_SFEN0 (0 << 8)
+#define MIPI_PHY_MUX_CTRL1_LANE1_SRC_SFEN1 BIT(8)
+#define MIPI_PHY_MUX_CTRL1_LANE1_SRC_SFEN2 (2 << 8)
+#define MIPI_PHY_MUX_CTRL1_LANE1_SRC_SFEN3 (3 << 8)
+#define MIPI_PHY_MUX_CTRL1_LANE0_SRC_MASK GENMASK(14, 12)
+#define MIPI_PHY_MUX_CTRL1_LANE0_SRC_SFEN0 (0 << 12)
+#define MIPI_PHY_MUX_CTRL1_LANE0_SRC_SFEN1 BIT(12)
+#define MIPI_PHY_MUX_CTRL1_LANE0_SRC_SFEN2 (2 << 12)
+#define MIPI_PHY_MUX_CTRL1_LANE0_SRC_SFEN3 (3 << 12)
+
+/* C3 CSI-2 HOST register */
+#define CSI2_HOST_N_LANES CSI2_REG_H(0x04)
+#define CSI2_HOST_N_LANES_MASK GENMASK(1, 0)
+#define CSI2_HOST_N_LANES_1 (0 << 0)
+#define CSI2_HOST_N_LANES_2 BIT(0)
+#define CSI2_HOST_N_LANES_3 (2 << 0)
+#define CSI2_HOST_N_LANES_4 (3 << 0)
+
+#define CSI2_HOST_CSI2_RESETN CSI2_REG_H(0x10)
+#define CSI2_HOST_CSI2_RESETN_MASK BIT(0)
+#define CSI2_HOST_CSI2_RESETN_ACTIVE (0 << 0)
+#define CSI2_HOST_CSI2_RESETN_EXIT BIT(0)
+
+#define C3_MIPI_CSI2_MAX_WIDTH 2888
+#define C3_MIPI_CSI2_MIN_WIDTH 160
+#define C3_MIPI_CSI2_MAX_HEIGHT 2240
+#define C3_MIPI_CSI2_MIN_HEIGHT 120
+#define C3_MIPI_CSI2_DEFAULT_WIDTH 1920
+#define C3_MIPI_CSI2_DEFAULT_HEIGHT 1080
+#define C3_MIPI_CSI2_DEFAULT_FMT MEDIA_BUS_FMT_SRGGB10_1X10
+
+/* C3 CSI-2 pad list */
+enum {
+ C3_MIPI_CSI2_PAD_SINK,
+ C3_MIPI_CSI2_PAD_SRC,
+ C3_MIPI_CSI2_PAD_MAX
+};
+
+/*
+ * struct c3_csi_info - MIPI CSI2 information
+ *
+ * @clocks: array of MIPI CSI2 clock names
+ * @clock_num: actual clock number
+ */
+struct c3_csi_info {
+ char *clocks[MIPI_CSI2_CLOCK_NUM_MAX];
+ u32 clock_num;
+};
+
+/*
+ * struct c3_csi_device - MIPI CSI2 platform device
+ *
+ * @dev: pointer to the struct device
+ * @aphy: MIPI CSI2 aphy register address
+ * @dphy: MIPI CSI2 dphy register address
+ * @host: MIPI CSI2 host register address
+ * @clks: array of MIPI CSI2 clocks
+ * @sd: MIPI CSI2 sub-device
+ * @pads: MIPI CSI2 sub-device pads
+ * @notifier: notifier to register on the v4l2-async API
+ * @src_pad: source sub-device pad
+ * @bus: MIPI CSI2 bus information
+ * @info: version-specific MIPI CSI2 information
+ */
+struct c3_csi_device {
+ struct device *dev;
+ void __iomem *aphy;
+ void __iomem *dphy;
+ void __iomem *host;
+ struct clk_bulk_data clks[MIPI_CSI2_CLOCK_NUM_MAX];
+
+ struct v4l2_subdev sd;
+ struct media_pad pads[C3_MIPI_CSI2_PAD_MAX];
+ struct v4l2_async_notifier notifier;
+ struct media_pad *src_pad;
+ struct v4l2_mbus_config_mipi_csi2 bus;
+
+ const struct c3_csi_info *info;
+};
+
+static const u32 c3_mipi_csi_formats[] = {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+};
+
+/* Hardware configuration */
+
+static void c3_mipi_csi_write(struct c3_csi_device *csi, u32 reg, u32 val)
+{
+ void __iomem *addr;
+
+ switch (CSI2_SUBMD(reg)) {
+ case SUBMD_APHY:
+ addr = csi->aphy + CSI2_REG_ADDR(reg);
+ break;
+ case SUBMD_DPHY:
+ addr = csi->dphy + CSI2_REG_ADDR(reg);
+ break;
+ case SUBMD_HOST:
+ addr = csi->host + CSI2_REG_ADDR(reg);
+ break;
+ default:
+ dev_err(csi->dev, "Invalid sub-module: %lu\n", CSI2_SUBMD(reg));
+ return;
+ }
+
+ writel(val, addr);
+}
+
+static void c3_mipi_csi_cfg_aphy(struct c3_csi_device *csi)
+{
+ c3_mipi_csi_write(csi, CSI_PHY_CNTL0,
+ CSI_PHY_CNTL0_HS_LP_BIAS_EN |
+ CSI_PHY_CNTL0_HS_RX_TRIM_11 |
+ CSI_PHY_CNTL0_LP_LOW_VTH_2 |
+ CSI_PHY_CNTL0_LP_HIGH_VTH_4 |
+ CSI_PHY_CNTL0_DATA_LANE0_HS_DIG_EN |
+ CSI_PHY_CNTL0_DATA_LANE1_HS_DIG_EN |
+ CSI_PHY_CNTL0_CLK0_LANE_HS_DIG_EN |
+ CSI_PHY_CNTL0_DATA_LANE2_HS_DIG_EN |
+ CSI_PHY_CNTL0_DATA_LANE3_HS_DIG_EN);
+
+ c3_mipi_csi_write(csi, CSI_PHY_CNTL1,
+ CSI_PHY_CNTL1_HS_EQ_CAP_SMALL |
+ CSI_PHY_CNTL1_HS_EQ_RES_MED |
+ CSI_PHY_CNTL1_CLK_CHN_EQ_MAX_GAIN |
+ CSI_PHY_CNTL1_DATA_CHN_EQ_MAX_GAIN |
+ CSI_PHY_CNTL1_COM_BG_EN |
+ CSI_PHY_CNTL1_HS_SYNC_EN);
+}
+
+static void c3_mipi_csi_cfg_dphy(struct c3_csi_device *csi, s64 rate)
+{
+ u32 val;
+ u32 settle;
+
+ /* Calculate the high speed settle */
+ val = DIV_ROUND_UP_ULL(1000000000, rate);
+ settle = (16 * val + 230) / 10;
+
+ c3_mipi_csi_write(csi, MIPI_PHY_CLK_LANE_CTRL,
+ MIPI_PHY_CLK_LANE_CTRL_HS_RX_EN |
+ MIPI_PHY_CLK_LANE_CTRL_END_EN |
+ MIPI_PHY_CLK_LANE_CTRL_LPEN_DIS |
+ MIPI_PHY_CLK_LANE_CTRL_TCLK_ZERO_EN |
+ MIPI_PHY_CLK_LANE_CTRL_TCLK_ZERO_HS_8);
+
+ c3_mipi_csi_write(csi, MIPI_PHY_TCLK_MISS, MIPI_PHY_TCLK_MISS_CYCLES_9);
+ c3_mipi_csi_write(csi, MIPI_PHY_TCLK_SETTLE,
+ MIPI_PHY_TCLK_SETTLE_CYCLES_31);
+ c3_mipi_csi_write(csi, MIPI_PHY_THS_EXIT, MIPI_PHY_THS_EXIT_CYCLES_8);
+ c3_mipi_csi_write(csi, MIPI_PHY_THS_SKIP, MIPI_PHY_THS_SKIP_CYCLES_10);
+ c3_mipi_csi_write(csi, MIPI_PHY_THS_SETTLE, settle);
+ c3_mipi_csi_write(csi, MIPI_PHY_TINIT, MIPI_PHY_TINIT_CYCLES_20000);
+ c3_mipi_csi_write(csi, MIPI_PHY_TMBIAS, MIPI_PHY_TMBIAS_CYCLES_256);
+ c3_mipi_csi_write(csi, MIPI_PHY_TULPS_C, MIPI_PHY_TULPS_C_CYCLES_4096);
+ c3_mipi_csi_write(csi, MIPI_PHY_TULPS_S, MIPI_PHY_TULPS_S_CYCLES_256);
+ c3_mipi_csi_write(csi, MIPI_PHY_TLP_EN_W, MIPI_PHY_TLP_EN_W_CYCLES_12);
+ c3_mipi_csi_write(csi, MIPI_PHY_TLPOK, MIPI_PHY_TLPOK_CYCLES_256);
+ c3_mipi_csi_write(csi, MIPI_PHY_TWD_INIT,
+ MIPI_PHY_TWD_INIT_DOG_0X400000);
+ c3_mipi_csi_write(csi, MIPI_PHY_TWD_HS, MIPI_PHY_TWD_HS_DOG_0X400000);
+
+ c3_mipi_csi_write(csi, MIPI_PHY_DATA_LANE_CTRL1,
+ MIPI_PHY_DATA_LANE_CTRL1_INSERT_ERRESC |
+ MIPI_PHY_DATA_LANE_CTRL1_HS_SYNC_CHK_EN |
+ MIPI_PHY_DATA_LANE_CTRL1_PIPE_ALL_EN |
+ MIPI_PHY_DATA_LANE_CTRL1_PIPE_DELAY_3);
+
+ /* Set the order of lanes */
+ c3_mipi_csi_write(csi, MIPI_PHY_MUX_CTRL0,
+ MIPI_PHY_MUX_CTRL0_SFEN3_SRC_LANE3 |
+ MIPI_PHY_MUX_CTRL0_SFEN2_SRC_LANE2 |
+ MIPI_PHY_MUX_CTRL0_SFEN1_SRC_LANE1 |
+ MIPI_PHY_MUX_CTRL0_SFEN0_SRC_LANE0);
+
+ c3_mipi_csi_write(csi, MIPI_PHY_MUX_CTRL1,
+ MIPI_PHY_MUX_CTRL1_LANE3_SRC_SFEN3 |
+ MIPI_PHY_MUX_CTRL1_LANE2_SRC_SFEN2 |
+ MIPI_PHY_MUX_CTRL1_LANE1_SRC_SFEN1 |
+ MIPI_PHY_MUX_CTRL1_LANE0_SRC_SFEN0);
+
+ /* Enable digital data and clock lanes */
+ c3_mipi_csi_write(csi, MIPI_PHY_CTRL,
+ MIPI_PHY_CTRL_DATA_LANE0_EN |
+ MIPI_PHY_CTRL_DATA_LANE1_EN |
+ MIPI_PHY_CTRL_DATA_LANE2_EN |
+ MIPI_PHY_CTRL_DATA_LANE3_EN |
+ MIPI_PHY_CTRL_CLOCK_LANE_EN);
+}
+
+static void c3_mipi_csi_cfg_host(struct c3_csi_device *csi)
+{
+ /* Reset CSI-2 controller output */
+ c3_mipi_csi_write(csi, CSI2_HOST_CSI2_RESETN,
+ CSI2_HOST_CSI2_RESETN_ACTIVE);
+ c3_mipi_csi_write(csi, CSI2_HOST_CSI2_RESETN,
+ CSI2_HOST_CSI2_RESETN_EXIT);
+
+ /* Set data lane number */
+ c3_mipi_csi_write(csi, CSI2_HOST_N_LANES, csi->bus.num_data_lanes - 1);
+}
+
+static int c3_mipi_csi_start_stream(struct c3_csi_device *csi,
+ struct v4l2_subdev *src_sd)
+{
+ s64 link_freq;
+ s64 lane_rate;
+
+ link_freq = v4l2_get_link_freq(src_sd->ctrl_handler, 0, 0);
+ if (link_freq < 0) {
+ dev_err(csi->dev,
+ "Unable to obtain link frequency: %lld\n", link_freq);
+ return link_freq;
+ }
+
+ lane_rate = link_freq * 2;
+ if (lane_rate > 1500000000) {
+ dev_err(csi->dev, "Invalid lane rate: %lld\n", lane_rate);
+ return -EINVAL;
+ }
+
+ c3_mipi_csi_cfg_aphy(csi);
+ c3_mipi_csi_cfg_dphy(csi, lane_rate);
+ c3_mipi_csi_cfg_host(csi);
+
+ return 0;
+}
+
+static int c3_mipi_csi_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct c3_csi_device *csi = v4l2_get_subdevdata(sd);
+ struct media_pad *sink_pad;
+ struct v4l2_subdev *src_sd;
+ int ret;
+
+ sink_pad = &csi->pads[C3_MIPI_CSI2_PAD_SINK];
+ csi->src_pad = media_pad_remote_pad_unique(sink_pad);
+ if (IS_ERR(csi->src_pad)) {
+ dev_dbg(csi->dev, "Failed to get source pad for MIPI CSI-2\n");
+ return -EPIPE;
+ }
+
+ src_sd = media_entity_to_v4l2_subdev(csi->src_pad->entity);
+
+ pm_runtime_resume_and_get(csi->dev);
+
+ c3_mipi_csi_start_stream(csi, src_sd);
+
+ ret = v4l2_subdev_enable_streams(src_sd, csi->src_pad->index, BIT(0));
+ if (ret) {
+ pm_runtime_put(csi->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int c3_mipi_csi_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct c3_csi_device *csi = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *src_sd;
+
+ if (csi->src_pad) {
+ src_sd = media_entity_to_v4l2_subdev(csi->src_pad->entity);
+ v4l2_subdev_disable_streams(src_sd, csi->src_pad->index,
+ BIT(0));
+ }
+ csi->src_pad = NULL;
+
+ pm_runtime_put(csi->dev);
+
+ return 0;
+}
+
+static int c3_mipi_csi_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct v4l2_mbus_framefmt *fmt;
+
+ switch (code->pad) {
+ case C3_MIPI_CSI2_PAD_SINK:
+ if (code->index >= ARRAY_SIZE(c3_mipi_csi_formats))
+ return -EINVAL;
+
+ code->code = c3_mipi_csi_formats[code->index];
+ break;
+ case C3_MIPI_CSI2_PAD_SRC:
+ if (code->index)
+ return -EINVAL;
+
+ fmt = v4l2_subdev_state_get_format(state, code->pad);
+ code->code = fmt->code;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int c3_mipi_csi_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt;
+ unsigned int i;
+
+ if (format->pad != C3_MIPI_CSI2_PAD_SINK)
+ return v4l2_subdev_get_fmt(sd, state, format);
+
+ fmt = v4l2_subdev_state_get_format(state, format->pad);
+
+ for (i = 0; i < ARRAY_SIZE(c3_mipi_csi_formats); i++) {
+ if (format->format.code == c3_mipi_csi_formats[i]) {
+ fmt->code = c3_mipi_csi_formats[i];
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(c3_mipi_csi_formats))
+ fmt->code = c3_mipi_csi_formats[0];
+
+ fmt->width = clamp_t(u32, format->format.width,
+ C3_MIPI_CSI2_MIN_WIDTH, C3_MIPI_CSI2_MAX_WIDTH);
+ fmt->height = clamp_t(u32, format->format.height,
+ C3_MIPI_CSI2_MIN_HEIGHT, C3_MIPI_CSI2_MAX_HEIGHT);
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ format->format = *fmt;
+
+ /* Synchronize the format to source pad */
+ fmt = v4l2_subdev_state_get_format(state, C3_MIPI_CSI2_PAD_SRC);
+ *fmt = format->format;
+
+ return 0;
+}
+
+static int c3_mipi_csi_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+
+ sink_fmt = v4l2_subdev_state_get_format(state, C3_MIPI_CSI2_PAD_SINK);
+ src_fmt = v4l2_subdev_state_get_format(state, C3_MIPI_CSI2_PAD_SRC);
+
+ sink_fmt->width = C3_MIPI_CSI2_DEFAULT_WIDTH;
+ sink_fmt->height = C3_MIPI_CSI2_DEFAULT_HEIGHT;
+ sink_fmt->field = V4L2_FIELD_NONE;
+ sink_fmt->code = C3_MIPI_CSI2_DEFAULT_FMT;
+ sink_fmt->colorspace = V4L2_COLORSPACE_RAW;
+ sink_fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ *src_fmt = *sink_fmt;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops c3_mipi_csi_pad_ops = {
+ .enum_mbus_code = c3_mipi_csi_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = c3_mipi_csi_set_fmt,
+ .enable_streams = c3_mipi_csi_enable_streams,
+ .disable_streams = c3_mipi_csi_disable_streams,
+};
+
+static const struct v4l2_subdev_ops c3_mipi_csi_subdev_ops = {
+ .pad = &c3_mipi_csi_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops c3_mipi_csi_internal_ops = {
+ .init_state = c3_mipi_csi_init_state,
+};
+
+/* Media entity operations */
+static const struct media_entity_operations c3_mipi_csi_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* PM runtime */
+
+static int c3_mipi_csi_runtime_suspend(struct device *dev)
+{
+ struct c3_csi_device *csi = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(csi->info->clock_num, csi->clks);
+
+ return 0;
+}
+
+static int c3_mipi_csi_runtime_resume(struct device *dev)
+{
+ struct c3_csi_device *csi = dev_get_drvdata(dev);
+
+ return clk_bulk_prepare_enable(csi->info->clock_num, csi->clks);
+}
+
+static const struct dev_pm_ops c3_mipi_csi_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ RUNTIME_PM_OPS(c3_mipi_csi_runtime_suspend,
+ c3_mipi_csi_runtime_resume, NULL)
+};
+
+/* Probe/remove & platform driver */
+
+static int c3_mipi_csi_subdev_init(struct c3_csi_device *csi)
+{
+ struct v4l2_subdev *sd = &csi->sd;
+ int ret;
+
+ v4l2_subdev_init(sd, &c3_mipi_csi_subdev_ops);
+ sd->owner = THIS_MODULE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->internal_ops = &c3_mipi_csi_internal_ops;
+ snprintf(sd->name, sizeof(sd->name), "%s", MIPI_CSI2_SUBDEV_NAME);
+
+ sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ sd->entity.ops = &c3_mipi_csi_entity_ops;
+
+ sd->dev = csi->dev;
+ v4l2_set_subdevdata(sd, csi);
+
+ csi->pads[C3_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ csi->pads[C3_MIPI_CSI2_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&sd->entity, C3_MIPI_CSI2_PAD_MAX,
+ csi->pads);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret) {
+ media_entity_cleanup(&sd->entity);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void c3_mipi_csi_subdev_deinit(struct c3_csi_device *csi)
+{
+ v4l2_subdev_cleanup(&csi->sd);
+ media_entity_cleanup(&csi->sd.entity);
+}
+
+/* Subdev notifier register */
+static int c3_mipi_csi_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asc)
+{
+ struct c3_csi_device *csi = v4l2_get_subdevdata(notifier->sd);
+ struct media_pad *sink = &csi->sd.entity.pads[C3_MIPI_CSI2_PAD_SINK];
+
+ return v4l2_create_fwnode_links_to_pad(sd, sink, MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static const struct v4l2_async_notifier_operations c3_mipi_csi_notify_ops = {
+ .bound = c3_mipi_csi_notify_bound,
+};
+
+static int c3_mipi_csi_async_register(struct c3_csi_device *csi)
+{
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct v4l2_async_connection *asc;
+ struct fwnode_handle *ep;
+ int ret;
+
+ v4l2_async_subdev_nf_init(&csi->notifier, &csi->sd);
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ return -ENOTCONN;
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret)
+ goto err_put_handle;
+
+ csi->bus = vep.bus.mipi_csi2;
+
+ asc = v4l2_async_nf_add_fwnode_remote(&csi->notifier, ep,
+ struct v4l2_async_connection);
+ if (IS_ERR(asc)) {
+ ret = PTR_ERR(asc);
+ goto err_put_handle;
+ }
+
+ csi->notifier.ops = &c3_mipi_csi_notify_ops;
+ ret = v4l2_async_nf_register(&csi->notifier);
+ if (ret)
+ goto err_cleanup_nf;
+
+ ret = v4l2_async_register_subdev(&csi->sd);
+ if (ret)
+ goto err_unregister_nf;
+
+ fwnode_handle_put(ep);
+
+ return 0;
+
+err_unregister_nf:
+ v4l2_async_nf_unregister(&csi->notifier);
+err_cleanup_nf:
+ v4l2_async_nf_cleanup(&csi->notifier);
+err_put_handle:
+ fwnode_handle_put(ep);
+ return ret;
+}
+
+static void c3_mipi_csi_async_unregister(struct c3_csi_device *csi)
+{
+ v4l2_async_unregister_subdev(&csi->sd);
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
+}
+
+static int c3_mipi_csi_ioremap_resource(struct c3_csi_device *csi)
+{
+ struct device *dev = csi->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ csi->aphy = devm_platform_ioremap_resource_byname(pdev, "aphy");
+ if (IS_ERR(csi->aphy))
+ return PTR_ERR(csi->aphy);
+
+ csi->dphy = devm_platform_ioremap_resource_byname(pdev, "dphy");
+ if (IS_ERR(csi->dphy))
+ return PTR_ERR(csi->dphy);
+
+ csi->host = devm_platform_ioremap_resource_byname(pdev, "host");
+ if (IS_ERR(csi->host))
+ return PTR_ERR(csi->host);
+
+ return 0;
+}
+
+static int c3_mipi_csi_get_clocks(struct c3_csi_device *csi)
+{
+ const struct c3_csi_info *info = csi->info;
+
+ for (unsigned int i = 0; i < info->clock_num; i++)
+ csi->clks[i].id = info->clocks[i];
+
+ return devm_clk_bulk_get(csi->dev, info->clock_num, csi->clks);
+}
+
+static int c3_mipi_csi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct c3_csi_device *csi;
+ int ret;
+
+ csi = devm_kzalloc(dev, sizeof(*csi), GFP_KERNEL);
+ if (!csi)
+ return -ENOMEM;
+
+ csi->info = of_device_get_match_data(dev);
+ csi->dev = dev;
+
+ ret = c3_mipi_csi_ioremap_resource(csi);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to ioremap resource\n");
+
+ ret = c3_mipi_csi_get_clocks(csi);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+
+ platform_set_drvdata(pdev, csi);
+
+ pm_runtime_enable(dev);
+
+ ret = c3_mipi_csi_subdev_init(csi);
+ if (ret)
+ goto err_disable_runtime_pm;
+
+ ret = c3_mipi_csi_async_register(csi);
+ if (ret)
+ goto err_deinit_subdev;
+
+ return 0;
+
+err_deinit_subdev:
+ c3_mipi_csi_subdev_deinit(csi);
+err_disable_runtime_pm:
+ pm_runtime_disable(dev);
+ return ret;
+};
+
+static void c3_mipi_csi_remove(struct platform_device *pdev)
+{
+ struct c3_csi_device *csi = platform_get_drvdata(pdev);
+
+ c3_mipi_csi_async_unregister(csi);
+ c3_mipi_csi_subdev_deinit(csi);
+
+ pm_runtime_disable(&pdev->dev);
+};
+
+static const struct c3_csi_info c3_mipi_csi_info = {
+ .clocks = {"vapb", "phy0"},
+ .clock_num = 2
+};
+
+static const struct of_device_id c3_mipi_csi_of_match[] = {
+ {
+ .compatible = "amlogic,c3-mipi-csi2",
+ .data = &c3_mipi_csi_info,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, c3_mipi_csi_of_match);
+
+static struct platform_driver c3_mipi_csi_driver = {
+ .probe = c3_mipi_csi_probe,
+ .remove = c3_mipi_csi_remove,
+ .driver = {
+ .name = "c3-mipi-csi2",
+ .of_match_table = c3_mipi_csi_of_match,
+ .pm = pm_ptr(&c3_mipi_csi_pm_ops),
+ },
+};
+
+module_platform_driver(c3_mipi_csi_driver);
+
+MODULE_AUTHOR("Keke Li <keke.li@amlogic.com>");
+MODULE_DESCRIPTION("Amlogic C3 MIPI CSI-2 receiver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
index 6a38a0fa0e2d..85d518823159 100644
--- a/drivers/media/platform/amphion/vdec.c
+++ b/drivers/media/platform/amphion/vdec.c
@@ -805,7 +805,7 @@ static void vdec_buf_done(struct vpu_inst *inst, struct vpu_frame_info *frame)
cur_fmt = vpu_get_format(inst, inst->cap_format.type);
vbuf = &vpu_buf->m2m_buf.vb;
if (vbuf->vb2_buf.index != frame->id)
- dev_err(inst->dev, "[%d] buffer id(%d, %d) dismatch\n",
+ dev_err(inst->dev, "[%d] buffer id(%d, %d) mismatch\n",
inst->id, vbuf->vb2_buf.index, frame->id);
if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_READY && vdec->params.display_delay_enable)
diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
index 22f0da26ccec..1451549c9dd2 100644
--- a/drivers/media/platform/amphion/vpu.h
+++ b/drivers/media/platform/amphion/vpu.h
@@ -162,7 +162,6 @@ struct vpu_core {
struct delayed_work msg_delayed_work;
struct kfifo msg_fifo;
void *msg_buffer;
- unsigned int msg_buffer_size;
struct vpu_dev *vpu;
void *iface;
diff --git a/drivers/media/platform/amphion/vpu_core.c b/drivers/media/platform/amphion/vpu_core.c
index 8df85c14ab3f..da00f5fc0e5d 100644
--- a/drivers/media/platform/amphion/vpu_core.c
+++ b/drivers/media/platform/amphion/vpu_core.c
@@ -250,6 +250,7 @@ static void vpu_core_get_vpu(struct vpu_core *core)
static int vpu_core_register(struct device *dev, struct vpu_core *core)
{
struct vpu_dev *vpu = dev_get_drvdata(dev);
+ unsigned int buffer_size;
int ret = 0;
dev_dbg(core->dev, "register core %s\n", vpu_core_type_desc(core->type));
@@ -263,14 +264,14 @@ static int vpu_core_register(struct device *dev, struct vpu_core *core)
}
INIT_WORK(&core->msg_work, vpu_msg_run_work);
INIT_DELAYED_WORK(&core->msg_delayed_work, vpu_msg_delayed_work);
- core->msg_buffer_size = roundup_pow_of_two(VPU_MSG_BUFFER_SIZE);
- core->msg_buffer = vzalloc(core->msg_buffer_size);
+ buffer_size = roundup_pow_of_two(VPU_MSG_BUFFER_SIZE);
+ core->msg_buffer = vzalloc(buffer_size);
if (!core->msg_buffer) {
dev_err(core->dev, "failed allocate buffer for fifo\n");
ret = -ENOMEM;
goto error;
}
- ret = kfifo_init(&core->msg_fifo, core->msg_buffer, core->msg_buffer_size);
+ ret = kfifo_init(&core->msg_fifo, core->msg_buffer, buffer_size);
if (ret) {
dev_err(core->dev, "failed init kfifo\n");
goto error;
diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
index 4769c053c6c2..feca7d4220ed 100644
--- a/drivers/media/platform/amphion/vpu_malone.c
+++ b/drivers/media/platform/amphion/vpu_malone.c
@@ -3,6 +3,7 @@
* Copyright 2020-2021 NXP
*/
+#include <linux/bitfield.h>
#include <linux/init.h>
#include <linux/interconnect.h>
#include <linux/ioctl.h>
@@ -25,6 +26,10 @@
#include "vpu_imx8q.h"
#include "vpu_malone.h"
+static bool low_latency;
+module_param(low_latency, bool, 0644);
+MODULE_PARM_DESC(low_latency, "Set low latency frame flush mode: 0 (disable) or 1 (enable)");
+
#define CMD_SIZE 25600
#define MSG_SIZE 25600
#define CODEC_SIZE 0x1000
@@ -68,6 +73,12 @@
#define MALONE_DEC_FMT_RV_MASK BIT(21)
+#define MALONE_VERSION_MASK 0xFFFFF
+#define MALONE_VERSION(maj, min, inc) \
+ (FIELD_PREP(0xF0000, maj) | FIELD_PREP(0xFF00, min) | FIELD_PREP(0xFF, inc))
+#define CHECK_VERSION(iface, maj, min) \
+ (FIELD_GET(MALONE_VERSION_MASK, (iface)->fw_version) >= MALONE_VERSION(maj, min, 0))
+
enum vpu_malone_stream_input_mode {
INVALID_MODE = 0,
FRAME_LVL,
@@ -332,6 +343,8 @@ struct vpu_dec_ctrl {
u32 buf_addr[VID_API_NUM_STREAMS];
};
+static const struct malone_padding_scode *get_padding_scode(u32 type, u32 fmt);
+
u32 vpu_malone_get_data_size(void)
{
return sizeof(struct vpu_dec_ctrl);
@@ -654,9 +667,15 @@ static int vpu_malone_set_params(struct vpu_shared_addr *shared,
hc->jpg[instance].jpg_mjpeg_interlaced = 0;
}
- hc->codec_param[instance].disp_imm = params->display_delay_enable ? 1 : 0;
- if (malone_format != MALONE_FMT_AVC)
+ if (params->display_delay_enable &&
+ get_padding_scode(SCODE_PADDING_BUFFLUSH, params->codec_format))
+ hc->codec_param[instance].disp_imm = 1;
+ else
+ hc->codec_param[instance].disp_imm = 0;
+
+ if (params->codec_format == V4L2_PIX_FMT_HEVC && !CHECK_VERSION(iface, 1, 9))
hc->codec_param[instance].disp_imm = 0;
+
hc->codec_param[instance].dbglog_enable = 0;
iface->dbglog_desc.level = 0;
@@ -1023,6 +1042,7 @@ static const struct malone_padding_scode padding_scodes[] = {
{SCODE_PADDING_EOS, V4L2_PIX_FMT_JPEG, {0x0, 0x0}},
{SCODE_PADDING_BUFFLUSH, V4L2_PIX_FMT_H264, {0x15010000, 0x0}},
{SCODE_PADDING_BUFFLUSH, V4L2_PIX_FMT_H264_MVC, {0x15010000, 0x0}},
+ {SCODE_PADDING_BUFFLUSH, V4L2_PIX_FMT_HEVC, {0x3e010000, 0x20}},
};
static const struct malone_padding_scode padding_scode_dft = {0x0, 0x0};
@@ -1057,8 +1077,11 @@ static int vpu_malone_add_padding_scode(struct vpu_buffer *stream_buffer,
int ret;
ps = get_padding_scode(scode_type, pixelformat);
- if (!ps)
+ if (!ps) {
+ if (scode_type == SCODE_PADDING_BUFFLUSH)
+ return 0;
return -EINVAL;
+ }
wptr = readl(&str_buf->wptr);
if (wptr < stream_buffer->phys || wptr > stream_buffer->phys + stream_buffer->length)
@@ -1562,7 +1585,15 @@ static int vpu_malone_input_frame_data(struct vpu_malone_str_buffer __iomem *str
vpu_malone_update_wptr(str_buf, wptr);
- if (disp_imm && !vpu_vb_is_codecconfig(vbuf)) {
+ /*
+ * Enable the low latency flush mode if display delay is set to 0
+ * or the low latency frame flush mode if it is set to 1.
+ * The low latency flush mode requires some padding data to be appended to each frame,
+ * but there must not be any padding data between the sequence header and the frame.
+ * This module is currently only supported for the H264 and HEVC formats,
+ * for other formats, vpu_malone_add_scode() will return 0.
+ */
+ if ((disp_imm || low_latency) && !vpu_vb_is_codecconfig(vbuf)) {
ret = vpu_malone_add_scode(inst->core->iface,
inst->id,
&inst->stream_buffer,
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index 0d1c39347529..a05a744cbb75 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -1072,16 +1072,12 @@ static int isi_formats_init(struct atmel_isi *isi)
return -ENXIO;
isi->num_user_formats = num_fmts;
- isi->user_formats = devm_kcalloc(isi->dev,
- num_fmts, sizeof(struct isi_format *),
- GFP_KERNEL);
+ isi->user_formats = devm_kmemdup_array(isi->dev, isi_fmts, num_fmts,
+ sizeof(*isi_fmts), GFP_KERNEL);
if (!isi->user_formats)
return -ENOMEM;
- memcpy(isi->user_formats, isi_fmts,
- num_fmts * sizeof(struct isi_format *));
isi->current_fmt = isi->user_formats[0];
-
return 0;
}
diff --git a/drivers/media/platform/imagination/e5010-jpeg-enc.c b/drivers/media/platform/imagination/e5010-jpeg-enc.c
index c194f830577f..ae868d9f73e1 100644
--- a/drivers/media/platform/imagination/e5010-jpeg-enc.c
+++ b/drivers/media/platform/imagination/e5010-jpeg-enc.c
@@ -1057,8 +1057,11 @@ static int e5010_probe(struct platform_device *pdev)
e5010->vdev->lock = &e5010->mutex;
ret = v4l2_device_register(dev, &e5010->v4l2_dev);
- if (ret)
- return dev_err_probe(dev, ret, "failed to register v4l2 device\n");
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to register v4l2 device\n");
+ goto fail_after_video_device_alloc;
+ }
+
e5010->m2m_dev = v4l2_m2m_init(&e5010_m2m_ops);
if (IS_ERR(e5010->m2m_dev)) {
@@ -1118,6 +1121,8 @@ fail_after_video_register_device:
v4l2_m2m_release(e5010->m2m_dev);
fail_after_v4l2_register:
v4l2_device_unregister(&e5010->v4l2_dev);
+fail_after_video_device_alloc:
+ video_device_release(e5010->vdev);
return ret;
}
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
index 834d2a354692..7eb12449b63a 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
@@ -1026,6 +1026,7 @@ static void mtk_jpeg_dec_device_run(void *priv)
spin_lock_irqsave(&jpeg->hw_lock, flags);
mtk_jpeg_dec_reset(jpeg->reg_base);
mtk_jpeg_dec_set_config(jpeg->reg_base,
+ jpeg->variant->support_34bit,
&jpeg_src_buf->dec_param,
jpeg_src_buf->bs_size,
&bs,
@@ -1570,7 +1571,8 @@ static irqreturn_t mtk_jpeg_enc_done(struct mtk_jpeg_dev *jpeg)
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- result_size = mtk_jpeg_enc_get_file_size(jpeg->reg_base);
+ result_size = mtk_jpeg_enc_get_file_size(jpeg->reg_base,
+ jpeg->variant->support_34bit);
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, result_size);
buf_state = VB2_BUF_STATE_DONE;
@@ -1770,6 +1772,7 @@ retry_select:
ctx->total_frame_num++;
mtk_jpeg_dec_reset(comp_jpeg[hw_id]->reg_base);
mtk_jpeg_dec_set_config(comp_jpeg[hw_id]->reg_base,
+ jpeg->variant->support_34bit,
&jpeg_src_buf->dec_param,
jpeg_src_buf->bs_size,
&bs,
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h
index 8877eb39e807..02ed0ed5b736 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h
@@ -34,6 +34,8 @@
#define MTK_JPEG_MAX_EXIF_SIZE (64 * 1024)
+#define MTK_JPEG_ADDR_MASK GENMASK(1, 0)
+
/**
* enum mtk_jpeg_ctx_state - states of the context state machine
* @MTK_JPEG_INIT: current state is initialized
@@ -62,6 +64,7 @@ enum mtk_jpeg_ctx_state {
* @cap_q_default_fourcc: capture queue default fourcc
* @multi_core: mark jpeg hw is multi_core or not
* @jpeg_worker: jpeg dec or enc worker
+ * @support_34bit: flag to check support for 34-bit DMA address
*/
struct mtk_jpeg_variant {
struct clk_bulk_data *clks;
@@ -78,6 +81,7 @@ struct mtk_jpeg_variant {
u32 cap_q_default_fourcc;
bool multi_core;
void (*jpeg_worker)(struct work_struct *work);
+ bool support_34bit;
};
struct mtk_jpeg_src_buf {
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
index 2c5d74939d0a..e78e1d11093c 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
@@ -5,6 +5,8 @@
* Rick Chang <rick.chang@mediatek.com>
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -279,23 +281,43 @@ static void mtk_jpeg_dec_set_brz_factor(void __iomem *base, u8 yscale_w,
writel(val, base + JPGDEC_REG_BRZ_FACTOR);
}
-static void mtk_jpeg_dec_set_dst_bank0(void __iomem *base, u32 addr_y,
- u32 addr_u, u32 addr_v)
+static void mtk_jpeg_dec_set_dst_bank0(void __iomem *base, bool support_34bit,
+ dma_addr_t addr_y, dma_addr_t addr_u, dma_addr_t addr_v)
{
+ u32 val;
+
mtk_jpeg_verify_align(addr_y, 16, JPGDEC_REG_DEST_ADDR0_Y);
- writel(addr_y, base + JPGDEC_REG_DEST_ADDR0_Y);
+ writel(lower_32_bits(addr_y), base + JPGDEC_REG_DEST_ADDR0_Y);
mtk_jpeg_verify_align(addr_u, 16, JPGDEC_REG_DEST_ADDR0_U);
- writel(addr_u, base + JPGDEC_REG_DEST_ADDR0_U);
+ writel(lower_32_bits(addr_u), base + JPGDEC_REG_DEST_ADDR0_U);
mtk_jpeg_verify_align(addr_v, 16, JPGDEC_REG_DEST_ADDR0_V);
- writel(addr_v, base + JPGDEC_REG_DEST_ADDR0_V);
+ writel(lower_32_bits(addr_v), base + JPGDEC_REG_DEST_ADDR0_V);
+ if (support_34bit) {
+ val = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(addr_y));
+ writel(val, base + JPGDEC_REG_DEST_ADDR0_Y_EXT);
+ val = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(addr_u));
+ writel(val, base + JPGDEC_REG_DEST_ADDR0_U_EXT);
+ val = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(addr_v));
+ writel(val, base + JPGDEC_REG_DEST_ADDR0_V_EXT);
+ }
}
-static void mtk_jpeg_dec_set_dst_bank1(void __iomem *base, u32 addr_y,
- u32 addr_u, u32 addr_v)
+static void mtk_jpeg_dec_set_dst_bank1(void __iomem *base, bool support_34bit,
+ dma_addr_t addr_y, dma_addr_t addr_u, dma_addr_t addr_v)
{
- writel(addr_y, base + JPGDEC_REG_DEST_ADDR1_Y);
- writel(addr_u, base + JPGDEC_REG_DEST_ADDR1_U);
- writel(addr_v, base + JPGDEC_REG_DEST_ADDR1_V);
+ u32 val;
+
+ writel(lower_32_bits(addr_y), base + JPGDEC_REG_DEST_ADDR1_Y);
+ writel(lower_32_bits(addr_u), base + JPGDEC_REG_DEST_ADDR1_U);
+ writel(lower_32_bits(addr_v), base + JPGDEC_REG_DEST_ADDR1_V);
+ if (support_34bit) {
+ val = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(addr_y));
+ writel(val, base + JPGDEC_REG_DEST_ADDR1_Y_EXT);
+ val = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(addr_u));
+ writel(val, base + JPGDEC_REG_DEST_ADDR1_U_EXT);
+ val = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(addr_v));
+ writel(val, base + JPGDEC_REG_DEST_ADDR1_V_EXT);
+ }
}
static void mtk_jpeg_dec_set_mem_stride(void __iomem *base, u32 stride_y,
@@ -322,18 +344,30 @@ static void mtk_jpeg_dec_set_dec_mode(void __iomem *base, u32 mode)
writel(mode & 0x03, base + JPGDEC_REG_OPERATION_MODE);
}
-static void mtk_jpeg_dec_set_bs_write_ptr(void __iomem *base, u32 ptr)
+static void mtk_jpeg_dec_set_bs_write_ptr(void __iomem *base, bool support_34bit, dma_addr_t ptr)
{
+ u32 val;
+
mtk_jpeg_verify_align(ptr, 16, JPGDEC_REG_FILE_BRP);
- writel(ptr, base + JPGDEC_REG_FILE_BRP);
+ writel(lower_32_bits(ptr), base + JPGDEC_REG_FILE_BRP);
+ if (support_34bit) {
+ val = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(ptr));
+ writel(val, base + JPGDEC_REG_FILE_BRP_EXT);
+ }
}
-static void mtk_jpeg_dec_set_bs_info(void __iomem *base, u32 addr, u32 size,
- u32 bitstream_size)
+static void mtk_jpeg_dec_set_bs_info(void __iomem *base, bool support_34bit,
+ dma_addr_t addr, u32 size, u32 bitstream_size)
{
+ u32 val;
+
mtk_jpeg_verify_align(addr, 16, JPGDEC_REG_FILE_ADDR);
mtk_jpeg_verify_align(size, 128, JPGDEC_REG_FILE_TOTAL_SIZE);
- writel(addr, base + JPGDEC_REG_FILE_ADDR);
+ writel(lower_32_bits(addr), base + JPGDEC_REG_FILE_ADDR);
+ if (support_34bit) {
+ val = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(addr));
+ writel(val, base + JPGDEC_REG_FILE_ADDR_EXT);
+ }
writel(size, base + JPGDEC_REG_FILE_TOTAL_SIZE);
writel(bitstream_size, base + JPGDEC_REG_BIT_STREAM_SIZE);
}
@@ -404,6 +438,7 @@ static void mtk_jpeg_dec_set_sampling_factor(void __iomem *base, u32 comp_num,
}
void mtk_jpeg_dec_set_config(void __iomem *base,
+ bool support_34bits,
struct mtk_jpeg_dec_param *cfg,
u32 bitstream_size,
struct mtk_jpeg_bs *bs,
@@ -413,8 +448,8 @@ void mtk_jpeg_dec_set_config(void __iomem *base,
mtk_jpeg_dec_set_dec_mode(base, 0);
mtk_jpeg_dec_set_comp0_du(base, cfg->unit_num);
mtk_jpeg_dec_set_total_mcu(base, cfg->total_mcu);
- mtk_jpeg_dec_set_bs_info(base, bs->str_addr, bs->size, bitstream_size);
- mtk_jpeg_dec_set_bs_write_ptr(base, bs->end_addr);
+ mtk_jpeg_dec_set_bs_info(base, support_34bits, bs->str_addr, bs->size, bitstream_size);
+ mtk_jpeg_dec_set_bs_write_ptr(base, support_34bits, bs->end_addr);
mtk_jpeg_dec_set_du_membership(base, cfg->membership, 1,
(cfg->comp_num == 1) ? 1 : 0);
mtk_jpeg_dec_set_comp_id(base, cfg->comp_id[0], cfg->comp_id[1],
@@ -432,9 +467,9 @@ void mtk_jpeg_dec_set_config(void __iomem *base,
cfg->mem_stride[1]);
mtk_jpeg_dec_set_img_stride(base, cfg->img_stride[0],
cfg->img_stride[1]);
- mtk_jpeg_dec_set_dst_bank0(base, fb->plane_addr[0],
+ mtk_jpeg_dec_set_dst_bank0(base, support_34bits, fb->plane_addr[0],
fb->plane_addr[1], fb->plane_addr[2]);
- mtk_jpeg_dec_set_dst_bank1(base, 0, 0, 0);
+ mtk_jpeg_dec_set_dst_bank1(base, support_34bits, 0, 0, 0);
mtk_jpeg_dec_set_dma_group(base, cfg->dma_mcu, cfg->dma_group,
cfg->dma_last_mcu);
mtk_jpeg_dec_set_pause_mcu_idx(base, cfg->total_mcu);
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h
index 8c31c6b12417..2948c9c300a4 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.h
@@ -71,6 +71,7 @@ int mtk_jpeg_dec_fill_param(struct mtk_jpeg_dec_param *param);
u32 mtk_jpeg_dec_get_int_status(void __iomem *dec_reg_base);
u32 mtk_jpeg_dec_enum_result(u32 irq_result);
void mtk_jpeg_dec_set_config(void __iomem *base,
+ bool support_34bits,
struct mtk_jpeg_dec_param *cfg,
u32 bitstream_size,
struct mtk_jpeg_bs *bs,
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h
index 27b7711ca341..e94f52de7c69 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_reg.h
@@ -46,5 +46,13 @@
#define JPGDEC_REG_INTERRUPT_STATUS 0x0274
#define JPGDEC_REG_STATUS 0x0278
#define JPGDEC_REG_BIT_STREAM_SIZE 0x0344
+#define JPGDEC_REG_DEST_ADDR0_Y_EXT 0x0360
+#define JPGDEC_REG_DEST_ADDR0_U_EXT 0x0364
+#define JPGDEC_REG_DEST_ADDR0_V_EXT 0x0368
+#define JPGDEC_REG_DEST_ADDR1_Y_EXT 0x036c
+#define JPGDEC_REG_DEST_ADDR1_U_EXT 0x0370
+#define JPGDEC_REG_DEST_ADDR1_V_EXT 0x0374
+#define JPGDEC_REG_FILE_ADDR_EXT 0x0378
+#define JPGDEC_REG_FILE_BRP_EXT 0x037c
#endif /* _MTK_JPEG_REG_H */
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
index f8fa3b841ccf..9ab27aee302a 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
@@ -5,6 +5,8 @@
*
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -62,9 +64,9 @@ void mtk_jpeg_enc_reset(void __iomem *base)
}
EXPORT_SYMBOL_GPL(mtk_jpeg_enc_reset);
-u32 mtk_jpeg_enc_get_file_size(void __iomem *base)
+u32 mtk_jpeg_enc_get_file_size(void __iomem *base, bool support_34bit)
{
- return readl(base + JPEG_ENC_DMA_ADDR0) -
+ return (readl(base + JPEG_ENC_DMA_ADDR0) << ((support_34bit) ? 2 : 0)) -
readl(base + JPEG_ENC_DST_ADDR0);
}
EXPORT_SYMBOL_GPL(mtk_jpeg_enc_get_file_size);
@@ -84,14 +86,24 @@ void mtk_jpeg_set_enc_src(struct mtk_jpeg_ctx *ctx, void __iomem *base,
{
int i;
dma_addr_t dma_addr;
+ u32 addr_ext;
+ bool support_34bit = ctx->jpeg->variant->support_34bit;
for (i = 0; i < src_buf->num_planes; i++) {
dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, i) +
src_buf->planes[i].data_offset;
- if (!i)
- writel(dma_addr, base + JPEG_ENC_SRC_LUMA_ADDR);
+ if (i == 0)
+ writel(lower_32_bits(dma_addr), base + JPEG_ENC_SRC_LUMA_ADDR);
else
- writel(dma_addr, base + JPEG_ENC_SRC_CHROMA_ADDR);
+ writel(lower_32_bits(dma_addr), base + JPEG_ENC_SRC_CHROMA_ADDR);
+
+ if (support_34bit) {
+ addr_ext = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(dma_addr));
+ if (i == 0)
+ writel(addr_ext, base + JPEG_ENC_SRC_LUMA_ADDR_EXT);
+ else
+ writel(addr_ext, base + JPEG_ENC_SRC_CHRO_ADDR_EXT);
+ }
}
}
EXPORT_SYMBOL_GPL(mtk_jpeg_set_enc_src);
@@ -103,6 +115,8 @@ void mtk_jpeg_set_enc_dst(struct mtk_jpeg_ctx *ctx, void __iomem *base,
size_t size;
u32 dma_addr_offset;
u32 dma_addr_offsetmask;
+ u32 addr_ext;
+ bool support_34bit = ctx->jpeg->variant->support_34bit;
dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
dma_addr_offset = ctx->enable_exif ? MTK_JPEG_MAX_EXIF_SIZE : 0;
@@ -113,6 +127,12 @@ void mtk_jpeg_set_enc_dst(struct mtk_jpeg_ctx *ctx, void __iomem *base,
writel(dma_addr_offsetmask & 0xf, base + JPEG_ENC_BYTE_OFFSET_MASK);
writel(dma_addr & ~0xf, base + JPEG_ENC_DST_ADDR0);
writel((dma_addr + size) & ~0xf, base + JPEG_ENC_STALL_ADDR0);
+
+ if (support_34bit) {
+ addr_ext = FIELD_PREP(MTK_JPEG_ADDR_MASK, upper_32_bits(dma_addr));
+ writel(addr_ext, base + JPEG_ENC_DEST_ADDR0_EXT);
+ writel(addr_ext + size, base + JPEG_ENC_STALL_ADDR0_EXT);
+ }
}
EXPORT_SYMBOL_GPL(mtk_jpeg_set_enc_dst);
@@ -278,7 +298,8 @@ static irqreturn_t mtk_jpegenc_hw_irq_handler(int irq, void *priv)
if (!(irq_status & JPEG_ENC_INT_STATUS_DONE))
dev_warn(jpeg->dev, "Jpg Enc occurs unknown Err.");
- result_size = mtk_jpeg_enc_get_file_size(jpeg->reg_base);
+ result_size = mtk_jpeg_enc_get_file_size(jpeg->reg_base,
+ ctx->jpeg->variant->support_34bit);
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, result_size);
buf_state = VB2_BUF_STATE_DONE;
v4l2_m2m_buf_done(src_buf, buf_state);
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.h
index 61c60e4e58ea..31ec9030ae88 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.h
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.h
@@ -68,6 +68,11 @@
#define JPEG_ENC_DCM_CTRL 0x300
#define JPEG_ENC_CODEC_SEL 0x314
#define JPEG_ENC_ULTRA_THRES 0x318
+#define JPEG_ENC_SRC_LUMA_ADDR_EXT 0x584
+#define JPEG_ENC_SRC_CHRO_ADDR_EXT 0x588
+#define JPEG_ENC_Q_TBL_ADDR_EXT 0x58C
+#define JPEG_ENC_DEST_ADDR0_EXT 0x590
+#define JPEG_ENC_STALL_ADDR0_EXT 0x594
/**
* struct mtk_jpeg_enc_qlt - JPEG encoder quality data
@@ -80,7 +85,7 @@ struct mtk_jpeg_enc_qlt {
};
void mtk_jpeg_enc_reset(void __iomem *base);
-u32 mtk_jpeg_enc_get_file_size(void __iomem *base);
+u32 mtk_jpeg_enc_get_file_size(void __iomem *base, bool support_34bit);
void mtk_jpeg_enc_start(void __iomem *enc_reg_base);
void mtk_jpeg_set_enc_src(struct mtk_jpeg_ctx *ctx, void __iomem *base,
struct vb2_buffer *src_buf);
diff --git a/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
index 28c998bd3a81..d0fd77dcf8e2 100644
--- a/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
@@ -342,7 +342,7 @@ static int mtk_mdp_try_crop(struct mtk_mdp_ctx *ctx, u32 type,
if (r->left & 1)
r->left -= 1;
- mtk_mdp_dbg(2, "[%d] crop l,t,w,h:%d,%d,%d,%d, max:%dx%d", ctx->id,
+ mtk_mdp_dbg(2, "[%d] crop (%d,%d)/%ux%u, max:%dx%d", ctx->id,
r->left, r->top, r->width,
r->height, max_w, max_h);
return 0;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
index 935ae9825728..222611e03a06 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
@@ -12,8 +12,6 @@
#include <linux/soc/mediatek/mtk-cmdq.h>
#include "mtk-img-ipi.h"
-struct platform_device *mdp_get_plat_device(struct platform_device *pdev);
-
struct mdp_cmdq_param {
struct img_config *config;
struct img_ipi_frameparam *param;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
index f571f561f070..8de2c8e4d333 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
@@ -79,25 +79,6 @@ static struct platform_device *__get_pdev_by_id(struct platform_device *pdev,
return mdp_pdev;
}
-struct platform_device *mdp_get_plat_device(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *mdp_node;
- struct platform_device *mdp_pdev;
-
- mdp_node = of_parse_phandle(dev->of_node, MDP_PHANDLE_NAME, 0);
- if (!mdp_node) {
- dev_err(dev, "can't get node %s\n", MDP_PHANDLE_NAME);
- return NULL;
- }
-
- mdp_pdev = of_find_device_by_node(mdp_node);
- of_node_put(mdp_node);
-
- return mdp_pdev;
-}
-EXPORT_SYMBOL_GPL(mdp_get_plat_device);
-
int mdp_vpu_get_locked(struct mdp_dev *mdp)
{
int ret = 0;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
index 657356f87743..644b223b2877 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
@@ -236,7 +236,7 @@ int mdp_try_crop(struct mdp_m2m_ctx *ctx, struct v4l2_rect *r,
u32 framew, frameh, walign, halign;
int ret;
- dev_dbg(dev, "%d target:%d, set:(%d,%d) %ux%u", ctx->id,
+ dev_dbg(dev, "%d target:%d, set:(%d,%d)/%ux%u", ctx->id,
s->target, s->r.left, s->r.top, s->r.width, s->r.height);
left = s->r.left;
@@ -275,7 +275,7 @@ int mdp_try_crop(struct mdp_m2m_ctx *ctx, struct v4l2_rect *r,
r->width = right - left;
r->height = bottom - top;
- dev_dbg(dev, "%d crop:(%d,%d) %ux%u", ctx->id,
+ dev_dbg(dev, "%d crop:(%d,%d)/%ux%u", ctx->id,
r->left, r->top, r->width, r->height);
return 0;
}
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
index ac568ed14fa2..aececca7ecf8 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
@@ -17,6 +17,7 @@
#define IS_VDEC_LAT_ARCH(hw_arch) ((hw_arch) >= MTK_VDEC_LAT_SINGLE_CORE)
#define IS_VDEC_INNER_RACING(capability) ((capability) & MTK_VCODEC_INNER_RACING)
+#define IS_VDEC_SUPPORT_EXT(capability) ((capability) & MTK_VDEC_IS_SUPPORT_EXT)
enum mtk_vcodec_dec_chip_name {
MTK_VDEC_INVAL = 0,
@@ -42,6 +43,7 @@ enum mtk_vdec_format_types {
MTK_VDEC_FORMAT_HEVC_FRAME = 0x1000,
MTK_VCODEC_INNER_RACING = 0x20000,
MTK_VDEC_IS_SUPPORT_10BIT = 0x40000,
+ MTK_VDEC_IS_SUPPORT_EXT = 0x80000,
};
/*
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
index afa224da0f41..d873159b9b30 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
@@ -152,8 +152,6 @@ static const struct mtk_stateless_control mtk_stateless_controls[] = {
.id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
.def = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
.max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
- .menu_skip_mask =
- BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE),
},
.codec_type = V4L2_PIX_FMT_HEVC_SLICE,
},
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
index 1ed0ccec5665..5b25e1679b51 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
@@ -30,6 +30,7 @@ enum vdec_h264_core_dec_err_type {
/**
* struct vdec_h264_slice_lat_dec_param - parameters for decode current frame
+ * (shared data between host and firmware)
*
* @sps: h264 sps syntax parameters
* @pps: h264 pps syntax parameters
@@ -48,7 +49,7 @@ struct vdec_h264_slice_lat_dec_param {
};
/**
- * struct vdec_h264_slice_info - decode information
+ * struct vdec_h264_slice_info - decode information (shared data between host and firmware)
*
* @nal_info: nal info of current picture
* @timeout: Decode timeout: 1 timeout, 0 no timeout
@@ -72,23 +73,22 @@ struct vdec_h264_slice_info {
/**
* struct vdec_h264_slice_vsi - shared memory for decode information exchange
- * between SCP and Host.
+ * between SCP and Host (shared data between host and firmware).
*
- * @wdma_err_addr: wdma error dma address
- * @wdma_start_addr: wdma start dma address
- * @wdma_end_addr: wdma end dma address
- * @slice_bc_start_addr: slice bc start dma address
- * @slice_bc_end_addr: slice bc end dma address
- * @row_info_start_addr: row info start dma address
- * @row_info_end_addr: row info end dma address
- * @trans_start: trans start dma address
- * @trans_end: trans end dma address
- * @wdma_end_addr_offset: wdma end address offset
+ * @wdma_err_addr: wdma error dma address
+ * @wdma_start_addr: wdma start dma address
+ * @wdma_end_addr: wdma end dma address
+ * @slice_bc_start_addr: slice bc start dma address
+ * @slice_bc_end_addr: slice bc end dma address
+ * @row_info_start_addr: row info start dma address
+ * @row_info_end_addr: row info end dma address
+ * @trans_start: trans start dma address
+ * @trans_end: trans end dma address
+ * @wdma_end_addr_offset: wdma end address offset
*
- * @mv_buf_dma: HW working motion vector buffer
- * dma address (AP-W, VPU-R)
- * @dec: decode information (AP-R, VPU-W)
- * @h264_slice_params: decode parameters for hw used
+ * @mv_buf_dma: HW working motion vector buffer
+ * @dec: decode information (AP-R, VPU-W)
+ * @h264_slice_params: decode parameters for hw used
*/
struct vdec_h264_slice_vsi {
/* LAT dec addr */
@@ -112,12 +112,12 @@ struct vdec_h264_slice_vsi {
* struct vdec_h264_slice_share_info - shared information used to exchange
* message between lat and core
*
- * @sps: sequence header information from user space
- * @dec_params: decoder params from user space
- * @h264_slice_params: decoder params used for hardware
- * @trans_start: trans start dma address
- * @trans_end: trans end dma address
- * @nal_info: nal info of current picture
+ * @sps: sequence header information from user space
+ * @dec_params: decoder params from user space
+ * @h264_slice_params: decoder params used for hardware
+ * @trans_start: trans start dma address
+ * @trans_end: trans end dma address
+ * @nal_info: nal info of current picture
*/
struct vdec_h264_slice_share_info {
struct v4l2_ctrl_h264_sps sps;
@@ -128,6 +128,86 @@ struct vdec_h264_slice_share_info {
u16 nal_info;
};
+/*
+ * struct vdec_h264_slice_mem - memory address and size
+ * (shared data between host and firmware)
+ */
+struct vdec_h264_slice_mem {
+ union {
+ u64 buf;
+ u64 dma_addr;
+ };
+ union {
+ size_t size;
+ u64 dma_addr_end;
+ };
+};
+
+/**
+ * struct vdec_h264_slice_fb - frame buffer for decoding
+ * (shared data between host and firmware)
+ *
+ * @y: current luma buffer address info
+ * @c: current chroma buffer address info
+ */
+struct vdec_h264_slice_fb {
+ struct vdec_h264_slice_mem y;
+ struct vdec_h264_slice_mem c;
+};
+
+/**
+ * struct vdec_h264_slice_info_ext - extend decode information
+ * (shared data between host and firmware)
+ *
+ * @wdma_end_addr_offset: offset from buffer start
+ * @nal_info: nal info of current picture
+ * @timeout: toggles whether a decode operation is timeout
+ * @reserved: reserved
+ * @vdec_fb_va: vdec frame buffer struct virtual address
+ * @crc: displays the hardware status
+ */
+struct vdec_h264_slice_info_ext {
+ u64 wdma_end_addr_offset;
+ u16 nal_info;
+ u16 timeout;
+ u32 reserved;
+ u64 vdec_fb_va;
+ u32 crc[8];
+};
+
+/**
+ * struct vdec_h264_slice_vsi_ext - extend shared memory for decode information exchange
+ * between SCP and Host (shared data between host and firmware).
+ *
+ * @bs: input buffer info
+ * @fb: current y/c buffer
+ *
+ * @ube: buffer used to share date between lat and core
+ * @trans: transcoded buffer used for core decode
+ * @row_info: row info buffer
+ * @err_map: error map buffer
+ * @slice_bc: slice buffer
+ *
+ * @mv_buf_dma: store hardware motion vector data
+ * @dec: decode information (AP-R, VPU-W)
+ * @h264_slice_params: decode parameters used for the hw
+ */
+struct vdec_h264_slice_vsi_ext {
+ /* LAT dec addr */
+ struct vdec_h264_slice_mem bs;
+ struct vdec_h264_slice_fb fb;
+
+ struct vdec_h264_slice_mem ube;
+ struct vdec_h264_slice_mem trans;
+ struct vdec_h264_slice_mem row_info;
+ struct vdec_h264_slice_mem err_map;
+ struct vdec_h264_slice_mem slice_bc;
+
+ struct vdec_h264_slice_mem mv_buf_dma[H264_MAX_MV_NUM];
+ struct vdec_h264_slice_info_ext dec;
+ struct vdec_h264_slice_lat_dec_param h264_slice_params;
+};
+
/**
* struct vdec_h264_slice_inst - h264 decoder instance
*
@@ -138,17 +218,21 @@ struct vdec_h264_slice_share_info {
* @vpu: VPU instance
* @vsi: vsi used for lat
* @vsi_core: vsi used for core
- *
- * @vsi_ctx: Local VSI data for this decoding context
+ * @vsi_ctx: vsi data for this decoding context
+ * @vsi_ext: extended vsi used for lat
+ * @vsi_core_ext: extended vsi used for core
+ * @vsi_ctx_ext: extended vsi data for this decoding context
* @h264_slice_param: the parameters that hardware use to decode
*
- * @resolution_changed:resolution changed
+ * @resolution_changed: resolution changed
* @realloc_mv_buf: reallocate mv buffer
* @cap_num_planes: number of capture queue plane
*
* @dpb: decoded picture buffer used to store reference
* buffer information
- *@is_field_bitstream: is field bitstream
+ * @is_field_bitstream: not support field bitstream, only support frame
+ *
+ * @decode: lat decoder pointer for different architectures
*/
struct vdec_h264_slice_inst {
unsigned int slice_dec_num;
@@ -156,10 +240,18 @@ struct vdec_h264_slice_inst {
struct mtk_vcodec_mem pred_buf;
struct mtk_vcodec_mem mv_buf[H264_MAX_MV_NUM];
struct vdec_vpu_inst vpu;
- struct vdec_h264_slice_vsi *vsi;
- struct vdec_h264_slice_vsi *vsi_core;
-
- struct vdec_h264_slice_vsi vsi_ctx;
+ union {
+ struct {
+ struct vdec_h264_slice_vsi *vsi;
+ struct vdec_h264_slice_vsi *vsi_core;
+ struct vdec_h264_slice_vsi vsi_ctx;
+ };
+ struct {
+ struct vdec_h264_slice_vsi_ext *vsi_ext;
+ struct vdec_h264_slice_vsi_ext *vsi_core_ext;
+ struct vdec_h264_slice_vsi_ext vsi_ctx_ext;
+ };
+ };
struct vdec_h264_slice_lat_dec_param h264_slice_param;
unsigned int resolution_changed;
@@ -168,12 +260,15 @@ struct vdec_h264_slice_inst {
struct v4l2_h264_dpb_entry dpb[16];
bool is_field_bitstream;
+
+ int (*decode)(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *unused, bool *res_chg);
};
static int vdec_h264_slice_fill_decode_parameters(struct vdec_h264_slice_inst *inst,
- struct vdec_h264_slice_share_info *share_info)
+ struct vdec_h264_slice_share_info *share_info,
+ struct vdec_h264_slice_lat_dec_param *slice_param)
{
- struct vdec_h264_slice_lat_dec_param *slice_param = &inst->vsi->h264_slice_params;
const struct v4l2_ctrl_h264_decode_params *dec_params;
const struct v4l2_ctrl_h264_scaling_matrix *src_matrix;
const struct v4l2_ctrl_h264_sps *sps;
@@ -266,9 +361,6 @@ static int get_vdec_sig_decode_parameters(struct vdec_h264_slice_inst *inst)
mtk_vdec_h264_get_ref_list(b0_reflist, v4l2_b0_reflist, reflist_builder.num_valid);
mtk_vdec_h264_get_ref_list(b1_reflist, v4l2_b1_reflist, reflist_builder.num_valid);
- memcpy(&inst->vsi_ctx.h264_slice_params, slice_param,
- sizeof(inst->vsi_ctx.h264_slice_params));
-
return 0;
}
@@ -392,68 +484,148 @@ static void vdec_h264_slice_get_crop_info(struct vdec_h264_slice_inst *inst,
cr->left, cr->top, cr->width, cr->height);
}
-static int vdec_h264_slice_init(struct mtk_vcodec_dec_ctx *ctx)
+static void vdec_h264_slice_setup_lat_buffer_ext(struct vdec_h264_slice_inst *inst,
+ struct mtk_vcodec_mem *bs,
+ struct vdec_lat_buf *lat_buf)
{
- struct vdec_h264_slice_inst *inst;
- int err, vsi_size;
+ struct mtk_vcodec_mem *mem;
+ int i;
- inst = kzalloc(sizeof(*inst), GFP_KERNEL);
- if (!inst)
- return -ENOMEM;
+ inst->vsi_ext->bs.dma_addr = (u64)bs->dma_addr;
+ inst->vsi_ext->bs.size = bs->size;
- inst->ctx = ctx;
+ for (i = 0; i < H264_MAX_MV_NUM; i++) {
+ mem = &inst->mv_buf[i];
+ inst->vsi_ext->mv_buf_dma[i].dma_addr = mem->dma_addr;
+ inst->vsi_ext->mv_buf_dma[i].size = mem->size;
+ }
+ inst->vsi_ext->ube.dma_addr = lat_buf->ctx->msg_queue.wdma_addr.dma_addr;
+ inst->vsi_ext->ube.size = lat_buf->ctx->msg_queue.wdma_addr.size;
- inst->vpu.id = SCP_IPI_VDEC_LAT;
- inst->vpu.core_id = SCP_IPI_VDEC_CORE;
- inst->vpu.ctx = ctx;
- inst->vpu.codec_type = ctx->current_codec;
- inst->vpu.capture_type = ctx->capture_fourcc;
+ inst->vsi_ext->row_info.dma_addr = 0;
+ inst->vsi_ext->row_info.size = 0;
- err = vpu_dec_init(&inst->vpu);
- if (err) {
- mtk_vdec_err(ctx, "vdec_h264 init err=%d", err);
- goto error_free_inst;
+ inst->vsi_ext->err_map.dma_addr = lat_buf->wdma_err_addr.dma_addr;
+ inst->vsi_ext->err_map.size = lat_buf->wdma_err_addr.size;
+
+ inst->vsi_ext->slice_bc.dma_addr = lat_buf->slice_bc_addr.dma_addr;
+ inst->vsi_ext->slice_bc.size = lat_buf->slice_bc_addr.size;
+
+ inst->vsi_ext->trans.dma_addr_end = inst->ctx->msg_queue.wdma_rptr_addr;
+ inst->vsi_ext->trans.dma_addr = inst->ctx->msg_queue.wdma_wptr_addr;
+}
+
+static int vdec_h264_slice_setup_core_buffer_ext(struct vdec_h264_slice_inst *inst,
+ struct vdec_h264_slice_share_info *share_info,
+ struct vdec_lat_buf *lat_buf)
+{
+ struct mtk_vcodec_mem *mem;
+ struct mtk_vcodec_dec_ctx *ctx = inst->ctx;
+ struct vb2_v4l2_buffer *vb2_v4l2;
+ struct vdec_fb *fb;
+ u64 y_fb_dma, c_fb_dma = 0;
+ int i;
+
+ fb = ctx->dev->vdec_pdata->get_cap_buffer(ctx);
+ if (!fb) {
+ mtk_vdec_err(ctx, "Unable to get a CAPTURE buffer for CAPTURE queue is empty.");
+ return -EBUSY;
}
- vsi_size = round_up(sizeof(struct vdec_h264_slice_vsi), VCODEC_DEC_ALIGNED_64);
- inst->vsi = inst->vpu.vsi;
- inst->vsi_core =
- (struct vdec_h264_slice_vsi *)(((char *)inst->vpu.vsi) + vsi_size);
- inst->resolution_changed = true;
- inst->realloc_mv_buf = true;
+ y_fb_dma = (u64)fb->base_y.dma_addr;
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1)
+ c_fb_dma = y_fb_dma + ctx->picinfo.fb_sz[0];
+ else
+ c_fb_dma = (u64)fb->base_c.dma_addr;
- mtk_vdec_debug(ctx, "lat struct size = %d,%d,%d,%d vsi: %d\n",
- (int)sizeof(struct mtk_h264_sps_param),
- (int)sizeof(struct mtk_h264_pps_param),
- (int)sizeof(struct vdec_h264_slice_lat_dec_param),
- (int)sizeof(struct mtk_h264_dpb_info),
- vsi_size);
- mtk_vdec_debug(ctx, "lat H264 instance >> %p, codec_type = 0x%x",
- inst, inst->vpu.codec_type);
+ mtk_vdec_debug(ctx, "[h264-core] y/c addr = 0x%llx 0x%llx", y_fb_dma, c_fb_dma);
- ctx->drv_handle = inst;
- return 0;
+ inst->vsi_core_ext->fb.y.dma_addr = y_fb_dma;
+ inst->vsi_core_ext->fb.y.size = ctx->picinfo.fb_sz[0];
+ inst->vsi_core_ext->fb.c.dma_addr = c_fb_dma;
+ inst->vsi_core_ext->fb.c.size = ctx->picinfo.fb_sz[1];
-error_free_inst:
- kfree(inst);
- return err;
+ inst->vsi_core_ext->dec.vdec_fb_va = (unsigned long)fb;
+ inst->vsi_core_ext->dec.nal_info = share_info->nal_info;
+
+ inst->vsi_core_ext->ube.dma_addr = lat_buf->ctx->msg_queue.wdma_addr.dma_addr;
+ inst->vsi_core_ext->ube.size = lat_buf->ctx->msg_queue.wdma_addr.size;
+
+ inst->vsi_core_ext->err_map.dma_addr = lat_buf->wdma_err_addr.dma_addr;
+ inst->vsi_core_ext->err_map.size = lat_buf->wdma_err_addr.size;
+
+ inst->vsi_core_ext->slice_bc.dma_addr = lat_buf->slice_bc_addr.dma_addr;
+ inst->vsi_core_ext->slice_bc.size = lat_buf->slice_bc_addr.size;
+
+ inst->vsi_core_ext->row_info.dma_addr = 0;
+ inst->vsi_core_ext->row_info.size = 0;
+
+ inst->vsi_core_ext->trans.dma_addr = share_info->trans_start;
+ inst->vsi_core_ext->trans.dma_addr_end = share_info->trans_end;
+
+ for (i = 0; i < H264_MAX_MV_NUM; i++) {
+ mem = &inst->mv_buf[i];
+ inst->vsi_core_ext->mv_buf_dma[i].dma_addr = mem->dma_addr;
+ inst->vsi_core_ext->mv_buf_dma[i].size = mem->size;
+ }
+
+ vb2_v4l2 = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ v4l2_m2m_buf_copy_metadata(&lat_buf->ts_info, vb2_v4l2, true);
+
+ return 0;
}
-static void vdec_h264_slice_deinit(void *h_vdec)
+static int vdec_h264_slice_core_decode_ext(struct vdec_lat_buf *lat_buf)
{
- struct vdec_h264_slice_inst *inst = h_vdec;
+ int err, timeout;
+ struct mtk_vcodec_dec_ctx *ctx = lat_buf->ctx;
+ struct vdec_h264_slice_inst *inst = ctx->drv_handle;
+ struct vdec_h264_slice_share_info *share_info = lat_buf->private_data;
+ struct vdec_vpu_inst *vpu = &inst->vpu;
- vpu_dec_deinit(&inst->vpu);
- vdec_h264_slice_free_mv_buf(inst);
- vdec_msg_queue_deinit(&inst->ctx->msg_queue, inst->ctx);
+ memcpy(&inst->vsi_core_ext->h264_slice_params, &share_info->h264_slice_params,
+ sizeof(share_info->h264_slice_params));
- kfree(inst);
+ err = vdec_h264_slice_setup_core_buffer_ext(inst, share_info, lat_buf);
+ if (err)
+ goto vdec_dec_end;
+
+ vdec_h264_slice_fill_decode_reflist(inst, &inst->vsi_core_ext->h264_slice_params,
+ share_info);
+ err = vpu_dec_core(vpu);
+ if (err) {
+ mtk_vdec_err(ctx, "core decode err=%d", err);
+ goto vdec_dec_end;
+ }
+
+ /* wait decoder done interrupt */
+ timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS, MTK_VDEC_CORE);
+ if (timeout)
+ mtk_vdec_err(ctx, "core decode timeout: pic_%d", ctx->decoded_frame_cnt);
+ inst->vsi_core_ext->dec.timeout = !!timeout;
+
+ vpu_dec_core_end(vpu);
+
+ /* crc is hardware checksum, can be used to check whether the decoder result is right.*/
+ mtk_vdec_debug(ctx, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
+ ctx->decoded_frame_cnt,
+ inst->vsi_core_ext->dec.crc[0], inst->vsi_core_ext->dec.crc[1],
+ inst->vsi_core_ext->dec.crc[2], inst->vsi_core_ext->dec.crc[3],
+ inst->vsi_core_ext->dec.crc[4], inst->vsi_core_ext->dec.crc[5],
+ inst->vsi_core_ext->dec.crc[6], inst->vsi_core_ext->dec.crc[7]);
+
+vdec_dec_end:
+ vdec_msg_queue_update_ube_rptr(&lat_buf->ctx->msg_queue, share_info->trans_end);
+ ctx->dev->vdec_pdata->cap_to_disp(ctx, !!err, lat_buf->src_buf_req);
+ mtk_vdec_debug(ctx, "core decode done err=%d", err);
+ ctx->decoded_frame_cnt++;
+ return 0;
}
static int vdec_h264_slice_core_decode(struct vdec_lat_buf *lat_buf)
{
struct vdec_fb *fb;
- u64 vdec_fb_va;
u64 y_fb_dma, c_fb_dma;
int err, timeout, i;
struct mtk_vcodec_dec_ctx *ctx = lat_buf->ctx;
@@ -463,22 +635,19 @@ static int vdec_h264_slice_core_decode(struct vdec_lat_buf *lat_buf)
struct mtk_vcodec_mem *mem;
struct vdec_vpu_inst *vpu = &inst->vpu;
- mtk_vdec_debug(ctx, "[h264-core] vdec_h264 core decode");
memcpy(&inst->vsi_core->h264_slice_params, &share_info->h264_slice_params,
sizeof(share_info->h264_slice_params));
fb = ctx->dev->vdec_pdata->get_cap_buffer(ctx);
if (!fb) {
err = -EBUSY;
- mtk_vdec_err(ctx, "fb buffer is NULL");
+ mtk_vdec_err(ctx, "Unable to get a CAPTURE buffer for CAPTURE queue is empty.");
goto vdec_dec_end;
}
- vdec_fb_va = (unsigned long)fb;
y_fb_dma = (u64)fb->base_y.dma_addr;
if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1)
- c_fb_dma =
- y_fb_dma + inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h;
+ c_fb_dma = y_fb_dma + ctx->picinfo.fb_sz[0];
else
c_fb_dma = (u64)fb->base_c.dma_addr;
@@ -486,7 +655,7 @@ static int vdec_h264_slice_core_decode(struct vdec_lat_buf *lat_buf)
inst->vsi_core->dec.y_fb_dma = y_fb_dma;
inst->vsi_core->dec.c_fb_dma = c_fb_dma;
- inst->vsi_core->dec.vdec_fb_va = vdec_fb_va;
+ inst->vsi_core->dec.vdec_fb_va = (unsigned long)fb;
inst->vsi_core->dec.nal_info = share_info->nal_info;
inst->vsi_core->wdma_start_addr =
lat_buf->ctx->msg_queue.wdma_addr.dma_addr;
@@ -524,6 +693,8 @@ static int vdec_h264_slice_core_decode(struct vdec_lat_buf *lat_buf)
inst->vsi_core->dec.timeout = !!timeout;
vpu_dec_core_end(vpu);
+
+ /* crc is hardware checksum, can be used to check whether the decoder result is right.*/
mtk_vdec_debug(ctx, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
ctx->decoded_frame_cnt,
inst->vsi_core->dec.crc[0], inst->vsi_core->dec.crc[1],
@@ -536,6 +707,7 @@ vdec_dec_end:
ctx->dev->vdec_pdata->cap_to_disp(ctx, !!err, lat_buf->src_buf_req);
mtk_vdec_debug(ctx, "core decode done err=%d", err);
ctx->decoded_frame_cnt++;
+
return 0;
}
@@ -562,6 +734,128 @@ static void vdec_h264_insert_startcode(struct mtk_vcodec_dec_dev *vcodec_dev, un
(*bs_size) += 4;
}
+static int vdec_h264_slice_lat_decode_ext(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ struct vdec_h264_slice_inst *inst = h_vdec;
+ struct vdec_vpu_inst *vpu = &inst->vpu;
+ struct mtk_video_dec_buf *src_buf_info;
+ int err, timeout = 0;
+ unsigned int data[2];
+ struct vdec_lat_buf *lat_buf;
+ struct vdec_h264_slice_share_info *share_info;
+
+ if (vdec_msg_queue_init(&inst->ctx->msg_queue, inst->ctx,
+ vdec_h264_slice_core_decode_ext,
+ sizeof(*share_info)))
+ return -ENOMEM;
+
+ /* bs NULL means flush decoder */
+ if (!bs) {
+ vdec_msg_queue_wait_lat_buf_full(&inst->ctx->msg_queue);
+ return vpu_dec_reset(vpu);
+ }
+
+ if (inst->is_field_bitstream)
+ return -EINVAL;
+
+ lat_buf = vdec_msg_queue_dqbuf(&inst->ctx->msg_queue.lat_ctx);
+ if (!lat_buf) {
+ mtk_vdec_debug(inst->ctx, "failed to get lat buffer");
+ return -EAGAIN;
+ }
+ share_info = lat_buf->private_data;
+ src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
+
+ lat_buf->src_buf_req = src_buf_info->m2m_buf.vb.vb2_buf.req_obj.req;
+ v4l2_m2m_buf_copy_metadata(&src_buf_info->m2m_buf.vb, &lat_buf->ts_info, true);
+
+ err = vdec_h264_slice_fill_decode_parameters(inst, share_info,
+ &inst->vsi_ext->h264_slice_params);
+ if (err)
+ goto err_free_fb_out;
+
+ vdec_h264_insert_startcode(inst->ctx->dev, bs->va, &bs->size,
+ &share_info->h264_slice_params.pps);
+
+ *res_chg = inst->resolution_changed;
+ if (inst->resolution_changed) {
+ mtk_vdec_debug(inst->ctx, "- resolution changed -");
+ if (inst->realloc_mv_buf) {
+ err = vdec_h264_slice_alloc_mv_buf(inst, &inst->ctx->picinfo);
+ inst->realloc_mv_buf = false;
+ if (err)
+ goto err_free_fb_out;
+ }
+ inst->resolution_changed = false;
+ }
+
+ vdec_h264_slice_setup_lat_buffer_ext(inst, bs, lat_buf);
+ mtk_vdec_debug(inst->ctx, "lat:trans(0x%llx 0x%lx) err:0x%llx",
+ inst->vsi_ext->ube.dma_addr, (unsigned long)inst->vsi_ext->ube.size,
+ inst->vsi_ext->err_map.dma_addr);
+
+ mtk_vdec_debug(inst->ctx, "slice(0x%llx 0x%lx) rprt((0x%llx 0x%llx))",
+ inst->vsi_ext->slice_bc.dma_addr,
+ (unsigned long)inst->vsi_ext->slice_bc.size,
+ inst->vsi_ext->trans.dma_addr, inst->vsi_ext->trans.dma_addr_end);
+
+ err = vpu_dec_start(vpu, data, 2);
+ if (err) {
+ mtk_vdec_debug(inst->ctx, "lat decode err: %d", err);
+ goto err_free_fb_out;
+ }
+
+ share_info->trans_end = inst->ctx->msg_queue.wdma_addr.dma_addr +
+ inst->vsi_ext->dec.wdma_end_addr_offset;
+
+ share_info->trans_start = inst->ctx->msg_queue.wdma_wptr_addr;
+ share_info->nal_info = inst->vsi_ext->dec.nal_info;
+
+ if (IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability)) {
+ memcpy(&share_info->h264_slice_params, &inst->vsi_ext->h264_slice_params,
+ sizeof(share_info->h264_slice_params));
+ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.core_ctx, lat_buf);
+ }
+
+ /* wait decoder done interrupt */
+ timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS, MTK_VDEC_LAT0);
+ if (timeout)
+ mtk_vdec_err(inst->ctx, "lat decode timeout: pic_%d", inst->slice_dec_num);
+ inst->vsi_ext->dec.timeout = !!timeout;
+
+ err = vpu_dec_end(vpu);
+ if (err == SLICE_HEADER_FULL || err == TRANS_BUFFER_FULL) {
+ if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability))
+ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
+ inst->slice_dec_num++;
+ mtk_vdec_err(inst->ctx, "lat dec fail: pic_%d err:%d", inst->slice_dec_num, err);
+ return -EINVAL;
+ }
+
+ share_info->trans_end = inst->ctx->msg_queue.wdma_addr.dma_addr +
+ inst->vsi_ext->dec.wdma_end_addr_offset;
+
+ vdec_msg_queue_update_ube_wptr(&lat_buf->ctx->msg_queue, share_info->trans_end);
+
+ if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability)) {
+ memcpy(&share_info->h264_slice_params, &inst->vsi_ext->h264_slice_params,
+ sizeof(share_info->h264_slice_params));
+ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.core_ctx, lat_buf);
+ }
+ mtk_vdec_debug(inst->ctx, "dec num: %d lat crc: 0x%x 0x%x 0x%x", inst->slice_dec_num,
+ inst->vsi_ext->dec.crc[0], inst->vsi_ext->dec.crc[1],
+ inst->vsi_ext->dec.crc[2]);
+
+ inst->slice_dec_num++;
+ return 0;
+err_free_fb_out:
+ vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
+ mtk_vdec_err(inst->ctx, "slice dec number: %d err: %d", inst->slice_dec_num, err);
+ return err;
+}
+
static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_fb *fb, bool *res_chg)
{
@@ -608,7 +902,8 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
lat_buf->src_buf_req = src_buf_info->m2m_buf.vb.vb2_buf.req_obj.req;
v4l2_m2m_buf_copy_metadata(&src_buf_info->m2m_buf.vb, &lat_buf->ts_info, true);
- err = vdec_h264_slice_fill_decode_parameters(inst, share_info);
+ err = vdec_h264_slice_fill_decode_parameters(inst, share_info,
+ &inst->vsi->h264_slice_params);
if (err)
goto err_free_fb_out;
@@ -706,6 +1001,101 @@ err_free_fb_out:
return err;
}
+static int vdec_h264_slice_single_decode_ext(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *unused, bool *res_chg)
+{
+ struct vdec_h264_slice_inst *inst = h_vdec;
+ struct vdec_vpu_inst *vpu = &inst->vpu;
+ struct mtk_video_dec_buf *src_buf_info, *dst_buf_info;
+ struct vdec_fb *fb;
+ unsigned int data[2], i;
+ u64 y_fb_dma, c_fb_dma;
+ struct mtk_vcodec_mem *mem;
+ int err;
+
+ /* bs NULL means flush decoder */
+ if (!bs)
+ return vpu_dec_reset(vpu);
+
+ fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
+ if (!fb) {
+ mtk_vdec_err(inst->ctx,
+ "Unable to get a CAPTURE buffer for CAPTURE queue is empty.");
+ return -ENOMEM;
+ }
+
+ src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
+ dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
+
+ y_fb_dma = fb->base_y.dma_addr;
+ c_fb_dma = fb->base_c.dma_addr;
+ mtk_vdec_debug(inst->ctx, "[h264-dec] [%d] y_dma=%llx c_dma=%llx",
+ inst->ctx->decoded_frame_cnt, y_fb_dma, c_fb_dma);
+
+ inst->vsi_ctx_ext.bs.dma_addr = (u64)bs->dma_addr;
+ inst->vsi_ctx_ext.bs.size = bs->size;
+ inst->vsi_ctx_ext.fb.y.dma_addr = y_fb_dma;
+ inst->vsi_ctx_ext.fb.c.dma_addr = c_fb_dma;
+ inst->vsi_ctx_ext.dec.vdec_fb_va = (u64)(uintptr_t)fb;
+
+ v4l2_m2m_buf_copy_metadata(&src_buf_info->m2m_buf.vb,
+ &dst_buf_info->m2m_buf.vb, true);
+ err = get_vdec_sig_decode_parameters(inst);
+ if (err)
+ goto err_free_fb_out;
+
+ memcpy(&inst->vsi_ctx_ext.h264_slice_params, &inst->h264_slice_param,
+ sizeof(inst->vsi_ctx_ext.h264_slice_params));
+
+ *res_chg = inst->resolution_changed;
+ if (inst->resolution_changed) {
+ mtk_vdec_debug(inst->ctx, "- resolution changed -");
+ if (inst->realloc_mv_buf) {
+ err = vdec_h264_slice_alloc_mv_buf(inst, &inst->ctx->picinfo);
+ inst->realloc_mv_buf = false;
+ if (err)
+ goto err_free_fb_out;
+ }
+ inst->resolution_changed = false;
+
+ for (i = 0; i < H264_MAX_MV_NUM; i++) {
+ mem = &inst->mv_buf[i];
+ inst->vsi_ctx_ext.mv_buf_dma[i].dma_addr = mem->dma_addr;
+ }
+ }
+
+ memcpy(inst->vpu.vsi, &inst->vsi_ctx_ext, sizeof(inst->vsi_ctx_ext));
+ err = vpu_dec_start(vpu, data, 2);
+ if (err)
+ goto err_free_fb_out;
+
+ /* wait decoder done interrupt */
+ err = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS, MTK_VDEC_CORE);
+ if (err)
+ mtk_vdec_err(inst->ctx, "decode timeout: pic_%d", inst->ctx->decoded_frame_cnt);
+
+ inst->vsi_ext->dec.timeout = !!err;
+ err = vpu_dec_end(vpu);
+ if (err)
+ goto err_free_fb_out;
+
+ memcpy(&inst->vsi_ctx_ext, inst->vpu.vsi, sizeof(inst->vsi_ctx_ext));
+ mtk_vdec_debug(inst->ctx, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
+ inst->ctx->decoded_frame_cnt,
+ inst->vsi_ctx_ext.dec.crc[0], inst->vsi_ctx_ext.dec.crc[1],
+ inst->vsi_ctx_ext.dec.crc[2], inst->vsi_ctx_ext.dec.crc[3],
+ inst->vsi_ctx_ext.dec.crc[4], inst->vsi_ctx_ext.dec.crc[5],
+ inst->vsi_ctx_ext.dec.crc[6], inst->vsi_ctx_ext.dec.crc[7]);
+
+ inst->ctx->decoded_frame_cnt++;
+ return 0;
+
+err_free_fb_out:
+ mtk_vdec_err(inst->ctx, "dec frame number: %d err: %d", inst->ctx->decoded_frame_cnt, err);
+ return err;
+}
+
static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_fb *unused, bool *res_chg)
{
@@ -725,7 +1115,8 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
if (!fb) {
- mtk_vdec_err(inst->ctx, "fb buffer is NULL");
+ mtk_vdec_err(inst->ctx,
+ "Unable to get a CAPTURE buffer for CAPTURE queue is empty.");
return -ENOMEM;
}
@@ -749,6 +1140,9 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs
if (err)
goto err_free_fb_out;
+ memcpy(&inst->vsi_ctx.h264_slice_params, &inst->h264_slice_param,
+ sizeof(inst->vsi_ctx.h264_slice_params));
+
buf = (unsigned char *)bs->va;
nal_start_idx = mtk_vdec_h264_find_start_code(buf, bs->size);
if (nal_start_idx < 0) {
@@ -806,21 +1200,95 @@ err_free_fb_out:
return err;
}
+static int vdec_h264_slice_init(struct mtk_vcodec_dec_ctx *ctx)
+{
+ struct vdec_h264_slice_inst *inst;
+ int err, vsi_size;
+ unsigned char *temp;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+
+ inst->vpu.id = SCP_IPI_VDEC_LAT;
+ inst->vpu.core_id = SCP_IPI_VDEC_CORE;
+ inst->vpu.ctx = ctx;
+ inst->vpu.codec_type = ctx->current_codec;
+ inst->vpu.capture_type = ctx->capture_fourcc;
+
+ err = vpu_dec_init(&inst->vpu);
+ if (err) {
+ mtk_vdec_err(ctx, "vdec_h264 init err=%d", err);
+ goto error_free_inst;
+ }
+
+ if (IS_VDEC_SUPPORT_EXT(ctx->dev->dec_capability)) {
+ vsi_size = sizeof(struct vdec_h264_slice_vsi_ext);
+
+ vsi_size = round_up(vsi_size, VCODEC_DEC_ALIGNED_64);
+ inst->vsi_ext = inst->vpu.vsi;
+ temp = (unsigned char *)inst->vsi_ext;
+ inst->vsi_core_ext = (struct vdec_h264_slice_vsi_ext *)(temp + vsi_size);
+
+ if (inst->ctx->dev->vdec_pdata->hw_arch == MTK_VDEC_PURE_SINGLE_CORE)
+ inst->decode = vdec_h264_slice_single_decode_ext;
+ else
+ inst->decode = vdec_h264_slice_lat_decode_ext;
+ } else {
+ vsi_size = sizeof(struct vdec_h264_slice_vsi);
+
+ vsi_size = round_up(vsi_size, VCODEC_DEC_ALIGNED_64);
+ inst->vsi = inst->vpu.vsi;
+ temp = (unsigned char *)inst->vsi;
+ inst->vsi_core = (struct vdec_h264_slice_vsi *)(temp + vsi_size);
+
+ if (inst->ctx->dev->vdec_pdata->hw_arch == MTK_VDEC_PURE_SINGLE_CORE)
+ inst->decode = vdec_h264_slice_single_decode;
+ else
+ inst->decode = vdec_h264_slice_lat_decode;
+ }
+ inst->resolution_changed = true;
+ inst->realloc_mv_buf = true;
+
+ mtk_vdec_debug(ctx, "lat struct size = %d,%d,%d,%d vsi: %d\n",
+ (int)sizeof(struct mtk_h264_sps_param),
+ (int)sizeof(struct mtk_h264_pps_param),
+ (int)sizeof(struct vdec_h264_slice_lat_dec_param),
+ (int)sizeof(struct mtk_h264_dpb_info),
+ vsi_size);
+ mtk_vdec_debug(ctx, "lat H264 instance >> %p, codec_type = 0x%x",
+ inst, inst->vpu.codec_type);
+
+ ctx->drv_handle = inst;
+ return 0;
+
+error_free_inst:
+ kfree(inst);
+ return err;
+}
+
+static void vdec_h264_slice_deinit(void *h_vdec)
+{
+ struct vdec_h264_slice_inst *inst = h_vdec;
+
+ vpu_dec_deinit(&inst->vpu);
+ vdec_h264_slice_free_mv_buf(inst);
+ vdec_msg_queue_deinit(&inst->ctx->msg_queue, inst->ctx);
+
+ kfree(inst);
+}
+
static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_fb *unused, bool *res_chg)
{
struct vdec_h264_slice_inst *inst = h_vdec;
- int ret;
if (!h_vdec)
return -EINVAL;
- if (inst->ctx->dev->vdec_pdata->hw_arch == MTK_VDEC_PURE_SINGLE_CORE)
- ret = vdec_h264_slice_single_decode(h_vdec, bs, unused, res_chg);
- else
- ret = vdec_h264_slice_lat_decode(h_vdec, bs, unused, res_chg);
-
- return ret;
+ return inst->decode(h_vdec, bs, unused, res_chg);
}
static int vdec_h264_slice_get_param(void *h_vdec, enum vdec_get_param_type type,
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
index aa721cc43647..2725db882e5b 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
@@ -821,7 +821,7 @@ static int vdec_hevc_slice_setup_core_buffer(struct vdec_hevc_slice_inst *inst,
inst->vsi_core->fb.y.dma_addr = y_fb_dma;
inst->vsi_core->fb.y.size = ctx->picinfo.fb_sz[0];
inst->vsi_core->fb.c.dma_addr = c_fb_dma;
- inst->vsi_core->fb.y.size = ctx->picinfo.fb_sz[1];
+ inst->vsi_core->fb.c.size = ctx->picinfo.fb_sz[1];
inst->vsi_core->dec.vdec_fb_va = (unsigned long)fb;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
index 8522f71fc901..0f63657d8bad 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
@@ -515,7 +515,7 @@ static int h264_encode_frame(struct venc_h264_inst *inst,
struct venc_frame_info frame_info;
struct mtk_vcodec_enc_ctx *ctx = inst->ctx;
- mtk_venc_debug(ctx, "frm_cnt = %d\n ", inst->frm_cnt);
+ mtk_venc_debug(ctx, "frm_cnt = %d\n", inst->frm_cnt);
if (MTK_ENC_IOVA_IS_34BIT(ctx)) {
gop_size = inst->vsi_34->config.gop_size;
diff --git a/drivers/media/platform/nuvoton/npcm-video.c b/drivers/media/platform/nuvoton/npcm-video.c
index 7a9d8928ae40..44e904e61801 100644
--- a/drivers/media/platform/nuvoton/npcm-video.c
+++ b/drivers/media/platform/nuvoton/npcm-video.c
@@ -578,7 +578,7 @@ static unsigned int npcm_video_hres(struct npcm_video *video)
regmap_read(gfxi, HVCNTL, &hvcntl);
apb_hor_res = (((hvcnth & HVCNTH_MASK) << 8) + (hvcntl & HVCNTL_MASK) + 1);
- return apb_hor_res;
+ return (apb_hor_res > MAX_WIDTH) ? MAX_WIDTH : apb_hor_res;
}
static unsigned int npcm_video_vres(struct npcm_video *video)
@@ -591,7 +591,7 @@ static unsigned int npcm_video_vres(struct npcm_video *video)
apb_ver_res = (((vvcnth & VVCNTH_MASK) << 8) + (vvcntl & VVCNTL_MASK));
- return apb_ver_res;
+ return (apb_ver_res > MAX_HEIGHT) ? MAX_HEIGHT : apb_ver_res;
}
static int npcm_video_capres(struct npcm_video *video, unsigned int hor_res,
@@ -863,7 +863,6 @@ static void npcm_video_detect_resolution(struct npcm_video *video)
struct regmap *gfxi = video->gfx_regmap;
unsigned int dispst;
- video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
det->width = npcm_video_hres(video);
det->height = npcm_video_vres(video);
@@ -892,12 +891,16 @@ static void npcm_video_detect_resolution(struct npcm_video *video)
clear_bit(VIDEO_RES_CHANGING, &video->flags);
}
- if (det->width && det->height)
+ if (det->width && det->height) {
video->v4l2_input_status = 0;
-
- dev_dbg(video->dev, "Got resolution[%dx%d] -> [%dx%d], status %d\n",
- act->width, act->height, det->width, det->height,
- video->v4l2_input_status);
+ dev_dbg(video->dev, "Got resolution[%dx%d] -> [%dx%d], status %d\n",
+ act->width, act->height, det->width, det->height,
+ video->v4l2_input_status);
+ } else {
+ video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
+ dev_err(video->dev, "Got invalid resolution[%dx%d]\n", det->width,
+ det->height);
+ }
}
static int npcm_video_set_resolution(struct npcm_video *video,
diff --git a/drivers/media/platform/nxp/dw100/dw100.c b/drivers/media/platform/nxp/dw100/dw100.c
index 66582e7f92fc..3d1db1121bf9 100644
--- a/drivers/media/platform/nxp/dw100/dw100.c
+++ b/drivers/media/platform/nxp/dw100/dw100.c
@@ -961,9 +961,9 @@ static int dw100_s_selection(struct file *file, void *fh,
src_q_data = dw100_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
dev_dbg(&ctx->dw_dev->pdev->dev,
- ">>> Buffer Type: %u Target: %u Rect: %ux%u@%d.%d\n",
+ ">>> Buffer Type: %u Target: %u Rect: (%d,%d)/%ux%u\n",
sel->type, sel->target,
- sel->r.width, sel->r.height, sel->r.left, sel->r.top);
+ sel->r.left, sel->r.top, sel->r.width, sel->r.height);
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
@@ -1025,9 +1025,9 @@ static int dw100_s_selection(struct file *file, void *fh,
}
dev_dbg(&ctx->dw_dev->pdev->dev,
- "<<< Buffer Type: %u Target: %u Rect: %ux%u@%d.%d\n",
+ "<<< Buffer Type: %u Target: %u Rect: (%d,%d)/%ux%u\n",
sel->type, sel->target,
- sel->r.width, sel->r.height, sel->r.left, sel->r.top);
+ sel->r.left, sel->r.top, sel->r.width, sel->r.height);
return 0;
}
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h
index d579c804b047..adb93e977be9 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h
@@ -89,6 +89,7 @@
/* SLOT_STATUS fields for slots 0..3 */
#define SLOT_STATUS_FRMDONE (0x1 << 3)
#define SLOT_STATUS_ENC_CONFIG_ERR (0x1 << 8)
+#define SLOT_STATUS_ONGOING (0x1 << 31)
/* SLOT_IRQ_EN fields TBD */
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
index 1221b309a916..5c17bc58181e 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
@@ -535,7 +535,18 @@ static const unsigned char jpeg_sos_maximal[] = {
};
static const unsigned char jpeg_image_red[] = {
- 0xFC, 0x5F, 0xA2, 0xBF, 0xCA, 0x73, 0xFE, 0xFE,
+ 0xF9, 0xFE, 0x8A, 0xFC, 0x34, 0xFD, 0xC4, 0x28,
+ 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A,
+ 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0,
+ 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00,
+ 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02,
+ 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28,
+ 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A,
+ 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0,
+ 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00,
+ 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02,
+ 0x8A, 0x00, 0x28, 0xA0, 0x0F, 0xFF, 0xD0, 0xF9,
+ 0xFE, 0x8A, 0xFC, 0x34, 0xFD, 0xC4, 0x28, 0xA0,
0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00,
0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02,
0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28,
@@ -545,7 +556,7 @@ static const unsigned char jpeg_image_red[] = {
0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02,
0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28,
0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A,
- 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00
+ 0x00, 0x28, 0xA0, 0x0F
};
static const unsigned char jpeg_eoi[] = {
@@ -752,6 +763,39 @@ static int mxc_get_free_slot(struct mxc_jpeg_slot_data *slot_data)
return -1;
}
+static void mxc_jpeg_free_slot_data(struct mxc_jpeg_dev *jpeg)
+{
+ /* free descriptor for decoding/encoding phase */
+ dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
+ jpeg->slot_data.desc,
+ jpeg->slot_data.desc_handle);
+ jpeg->slot_data.desc = NULL;
+ jpeg->slot_data.desc_handle = 0;
+
+ /* free descriptor for encoder configuration phase / decoder DHT */
+ dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
+ jpeg->slot_data.cfg_desc,
+ jpeg->slot_data.cfg_desc_handle);
+ jpeg->slot_data.cfg_desc_handle = 0;
+ jpeg->slot_data.cfg_desc = NULL;
+
+ /* free configuration stream */
+ dma_free_coherent(jpeg->dev, MXC_JPEG_MAX_CFG_STREAM,
+ jpeg->slot_data.cfg_stream_vaddr,
+ jpeg->slot_data.cfg_stream_handle);
+ jpeg->slot_data.cfg_stream_vaddr = NULL;
+ jpeg->slot_data.cfg_stream_handle = 0;
+
+ dma_free_coherent(jpeg->dev, jpeg->slot_data.cfg_dec_size,
+ jpeg->slot_data.cfg_dec_vaddr,
+ jpeg->slot_data.cfg_dec_daddr);
+ jpeg->slot_data.cfg_dec_size = 0;
+ jpeg->slot_data.cfg_dec_vaddr = NULL;
+ jpeg->slot_data.cfg_dec_daddr = 0;
+
+ jpeg->slot_data.used = false;
+}
+
static bool mxc_jpeg_alloc_slot_data(struct mxc_jpeg_dev *jpeg)
{
struct mxc_jpeg_desc *desc;
@@ -788,36 +832,25 @@ static bool mxc_jpeg_alloc_slot_data(struct mxc_jpeg_dev *jpeg)
goto err;
jpeg->slot_data.cfg_stream_vaddr = cfg_stm;
+ jpeg->slot_data.cfg_dec_size = MXC_JPEG_PATTERN_WIDTH * MXC_JPEG_PATTERN_HEIGHT * 2;
+ jpeg->slot_data.cfg_dec_vaddr = dma_alloc_coherent(jpeg->dev,
+ jpeg->slot_data.cfg_dec_size,
+ &jpeg->slot_data.cfg_dec_daddr,
+ GFP_ATOMIC);
+ if (!jpeg->slot_data.cfg_dec_vaddr)
+ goto err;
+
skip_alloc:
jpeg->slot_data.used = true;
return true;
err:
dev_err(jpeg->dev, "Could not allocate descriptors for slot %d", jpeg->slot_data.slot);
+ mxc_jpeg_free_slot_data(jpeg);
return false;
}
-static void mxc_jpeg_free_slot_data(struct mxc_jpeg_dev *jpeg)
-{
- /* free descriptor for decoding/encoding phase */
- dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
- jpeg->slot_data.desc,
- jpeg->slot_data.desc_handle);
-
- /* free descriptor for encoder configuration phase / decoder DHT */
- dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
- jpeg->slot_data.cfg_desc,
- jpeg->slot_data.cfg_desc_handle);
-
- /* free configuration stream */
- dma_free_coherent(jpeg->dev, MXC_JPEG_MAX_CFG_STREAM,
- jpeg->slot_data.cfg_stream_vaddr,
- jpeg->slot_data.cfg_stream_handle);
-
- jpeg->slot_data.used = false;
-}
-
static void mxc_jpeg_check_and_set_last_buffer(struct mxc_jpeg_ctx *ctx,
struct vb2_v4l2_buffer *src_buf,
struct vb2_v4l2_buffer *dst_buf)
@@ -877,6 +910,34 @@ static u32 mxc_jpeg_get_plane_size(struct mxc_jpeg_q_data *q_data, u32 plane_no)
return size;
}
+static bool mxc_dec_is_ongoing(struct mxc_jpeg_ctx *ctx)
+{
+ struct mxc_jpeg_dev *jpeg = ctx->mxc_jpeg;
+ u32 curr_desc;
+ u32 slot_status;
+
+ curr_desc = readl(jpeg->base_reg + MXC_SLOT_OFFSET(ctx->slot, SLOT_CUR_DESCPT_PTR));
+ if (curr_desc == jpeg->slot_data.cfg_desc_handle)
+ return true;
+
+ slot_status = readl(jpeg->base_reg + MXC_SLOT_OFFSET(ctx->slot, SLOT_STATUS));
+ if (slot_status & SLOT_STATUS_ONGOING)
+ return true;
+
+ /*
+ * The curr_desc register is updated when next_descpt_ptr is loaded,
+ * the ongoing bit of slot_status is set when the 32 bytes descriptor is loaded.
+ * So there will be a short time interval in between, which may cause fake false.
+ * Consider read register is quite slow compared with IP read 32byte from memory,
+ * read twice slot_status can avoid this situation.
+ */
+ slot_status = readl(jpeg->base_reg + MXC_SLOT_OFFSET(ctx->slot, SLOT_STATUS));
+ if (slot_status & SLOT_STATUS_ONGOING)
+ return true;
+
+ return false;
+}
+
static irqreturn_t mxc_jpeg_dec_irq(int irq, void *priv)
{
struct mxc_jpeg_dev *jpeg = priv;
@@ -946,7 +1007,8 @@ static irqreturn_t mxc_jpeg_dec_irq(int irq, void *priv)
mxc_jpeg_enc_mode_go(dev, reg, mxc_jpeg_is_extended_sequential(q_data->fmt));
goto job_unlock;
}
- if (jpeg->mode == MXC_JPEG_DECODE && jpeg_src_buf->dht_needed) {
+ if (jpeg->mode == MXC_JPEG_DECODE && jpeg_src_buf->dht_needed &&
+ mxc_dec_is_ongoing(ctx)) {
jpeg_src_buf->dht_needed = false;
dev_dbg(dev, "Decoder DHT cfg finished. Start decoding...\n");
goto job_unlock;
@@ -1209,14 +1271,14 @@ static void mxc_jpeg_config_dec_desc(struct vb2_buffer *out_buf,
*/
*cfg_size = mxc_jpeg_setup_cfg_stream(cfg_stream_vaddr,
V4L2_PIX_FMT_YUYV,
- MXC_JPEG_MIN_WIDTH,
- MXC_JPEG_MIN_HEIGHT);
+ MXC_JPEG_PATTERN_WIDTH,
+ MXC_JPEG_PATTERN_HEIGHT);
cfg_desc->next_descpt_ptr = desc_handle | MXC_NXT_DESCPT_EN;
- cfg_desc->buf_base0 = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ cfg_desc->buf_base0 = jpeg->slot_data.cfg_dec_daddr;
cfg_desc->buf_base1 = 0;
- cfg_desc->imgsize = MXC_JPEG_MIN_WIDTH << 16;
- cfg_desc->imgsize |= MXC_JPEG_MIN_HEIGHT;
- cfg_desc->line_pitch = MXC_JPEG_MIN_WIDTH * 2;
+ cfg_desc->imgsize = MXC_JPEG_PATTERN_WIDTH << 16;
+ cfg_desc->imgsize |= MXC_JPEG_PATTERN_HEIGHT;
+ cfg_desc->line_pitch = MXC_JPEG_PATTERN_WIDTH * 2;
cfg_desc->stm_ctrl = STM_CTRL_IMAGE_FORMAT(MXC_JPEG_YUV422);
cfg_desc->stm_ctrl |= STM_CTRL_BITBUF_PTR_CLR(1);
cfg_desc->stm_bufbase = cfg_stream_handle;
@@ -1918,9 +1980,19 @@ static void mxc_jpeg_buf_queue(struct vb2_buffer *vb)
jpeg_src_buf = vb2_to_mxc_buf(vb);
jpeg_src_buf->jpeg_parse_error = false;
ret = mxc_jpeg_parse(ctx, vb);
- if (ret)
+ if (ret) {
jpeg_src_buf->jpeg_parse_error = true;
+ /*
+ * if the capture queue is not setup, the device_run() won't be scheduled,
+ * need to drop the error buffer, so that the decoding can continue
+ */
+ if (!vb2_is_streaming(v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx))) {
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ return;
+ }
+ }
+
end:
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
index 86e324b21aed..fdde45f7e163 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
@@ -28,6 +28,8 @@
#define MXC_JPEG_W_ALIGN 3
#define MXC_JPEG_MAX_SIZEIMAGE 0xFFFFFC00
#define MXC_JPEG_MAX_PLANES 2
+#define MXC_JPEG_PATTERN_WIDTH 128
+#define MXC_JPEG_PATTERN_HEIGHT 64
enum mxc_jpeg_enc_state {
MXC_JPEG_ENCODING = 0, /* jpeg encode phase */
@@ -117,6 +119,9 @@ struct mxc_jpeg_slot_data {
dma_addr_t desc_handle;
dma_addr_t cfg_desc_handle; // configuration descriptor dma address
dma_addr_t cfg_stream_handle; // configuration bitstream dma address
+ dma_addr_t cfg_dec_size;
+ void *cfg_dec_vaddr;
+ dma_addr_t cfg_dec_daddr;
};
struct mxc_jpeg_dev {
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
index 794050a6a919..22e49d3a1287 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
@@ -43,6 +43,7 @@ struct mxc_isi_m2m_ctx_queue_data {
struct v4l2_pix_format_mplane format;
const struct mxc_isi_format_info *info;
u32 sequence;
+ bool streaming;
};
struct mxc_isi_m2m_ctx {
@@ -484,15 +485,18 @@ static int mxc_isi_m2m_streamon(struct file *file, void *fh,
enum v4l2_buf_type type)
{
struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
+ struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format;
const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format;
const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
struct mxc_isi_m2m *m2m = ctx->m2m;
bool bypass;
-
int ret;
+ if (q->streaming)
+ return 0;
+
mutex_lock(&m2m->lock);
if (m2m->usage_count == INT_MAX) {
@@ -545,6 +549,8 @@ static int mxc_isi_m2m_streamon(struct file *file, void *fh,
goto unchain;
}
+ q->streaming = true;
+
return 0;
unchain:
@@ -567,10 +573,14 @@ static int mxc_isi_m2m_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type)
{
struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
+ struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
struct mxc_isi_m2m *m2m = ctx->m2m;
v4l2_m2m_ioctl_streamoff(file, fh, type);
+ if (!q->streaming)
+ return 0;
+
mutex_lock(&m2m->lock);
/*
@@ -596,6 +606,8 @@ static int mxc_isi_m2m_streamoff(struct file *file, void *fh,
mutex_unlock(&m2m->lock);
+ q->streaming = false;
+
return 0;
}
diff --git a/drivers/media/platform/qcom/camss/Makefile b/drivers/media/platform/qcom/camss/Makefile
index f6db5b3b5ace..d26a9c24a430 100644
--- a/drivers/media/platform/qcom/camss/Makefile
+++ b/drivers/media/platform/qcom/camss/Makefile
@@ -6,6 +6,7 @@ qcom-camss-objs += \
camss-csid.o \
camss-csid-4-1.o \
camss-csid-4-7.o \
+ camss-csid-680.o \
camss-csid-gen2.o \
camss-csid-780.o \
camss-csiphy-2ph-1-0.o \
@@ -17,6 +18,7 @@ qcom-camss-objs += \
camss-vfe-4-8.o \
camss-vfe-17x.o \
camss-vfe-480.o \
+ camss-vfe-680.o \
camss-vfe-780.o \
camss-vfe-gen1.o \
camss-vfe.o \
diff --git a/drivers/media/platform/qcom/camss/camss-csid-680.c b/drivers/media/platform/qcom/camss/camss-csid-680.c
new file mode 100644
index 000000000000..3ad3a174bcfb
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csid-680.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module
+ *
+ * Copyright (C) 2020-2025 Linaro Ltd.
+ */
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#include "camss.h"
+#include "camss-csid.h"
+#include "camss-csid-gen2.h"
+
+#define CSID_TOP_IO_PATH_CFG0(csid) (0x4 * (csid))
+#define CSID_TOP_IO_PATH_CFG0_INTERNAL_CSID BIT(0)
+#define CSID_TOP_IO_PATH_CFG0_SFE_0 BIT(1)
+#define CSID_TOP_IO_PATH_CFG0_SFE_1 GENMASK(1, 0)
+#define CSID_TOP_IO_PATH_CFG0_SBI_0 BIT(4)
+#define CSID_TOP_IO_PATH_CFG0_SBI_1 GENMASK(3, 0)
+#define CSID_TOP_IO_PATH_CFG0_SBI_2 GENMASK(3, 1)
+#define CSID_TOP_IO_PATH_CFG0_OUTPUT_IFE_EN BIT(8)
+#define CSID_TOP_IO_PATH_CFG0_SFE_OFFLINE_EN BIT(12)
+
+#define CSID_RESET_CMD 0x10
+#define CSID_RESET_CMD_HW_RESET BIT(0)
+#define CSID_RESET_CMD_SW_RESET BIT(1)
+#define CSID_RESET_CMD_IRQ_CTRL BIT(2)
+
+#define CSID_IRQ_CMD 0x14
+#define CSID_IRQ_CMD_CLEAR BIT(0)
+#define CSID_IRQ_CMD_SET BIT(4)
+
+#define CSID_REG_UPDATE_CMD 0x18
+
+#define CSID_CSI2_RDIN_IRQ_STATUS(rdi) (0xec + 0x10 * (rdi))
+#define CSID_CSI2_RDIN_CCIF_VIOLATION BIT(29)
+#define CSID_CSI2_RDIN_SENSOR_SWITCH_OUT_OF_SYNC_FRAME_DROP BIT(28)
+#define CSID_CSI2_RDIN_ERROR_REC_WIDTH_VIOLATION BIT(27)
+#define CSID_CSI2_RDIN_ERROR_REC_HEIGHT_VIOLATION BIT(26)
+#define CSID_CSI2_RDIN_BATCH_END_MISSING_VIOLATION BIT(25)
+#define CSID_CSI2_RDIN_ILLEGAL_BATCH_ID_IRQ BIT(24)
+#define CSID_CSI2_RDIN_RUP_DONE BIT(23)
+#define CSID_CSI2_RDIN_CAMIF_EPOCH_1_IRQ BIT(22)
+#define CSID_CSI2_RDIN_CAMIF_EPOCH_0_IRQ BIT(21)
+#define CSID_CSI2_RDIN_ERROR_REC_OVERFLOW_IRQ BIT(19)
+#define CSID_CSI2_RDIN_ERROR_REC_FRAME_DROP BIT(18)
+#define CSID_CSI2_RDIN_VCDT_GRP_CHANG BIT(17)
+#define CSID_CSI2_RDIN_VCDT_GRP_0_SEL BIT(16)
+#define CSID_CSI2_RDIN_VCDT_GRP_1_SEL BIT(15)
+#define CSID_CSI2_RDIN_ERROR_LINE_COUNT BIT(14)
+#define CSID_CSI2_RDIN_ERROR_PIX_COUNT BIT(13)
+#define CSID_CSI2_RDIN_INFO_INPUT_SOF BIT(12)
+#define CSID_CSI2_RDIN_INFO_INPUT_SOL BIT(11)
+#define CSID_CSI2_RDIN_INFO_INPUT_EOL BIT(10)
+#define CSID_CSI2_RDIN_INFO_INPUT_EOF BIT(9)
+#define CSID_CSI2_RDIN_INFO_FRAME_DROP_SOF BIT(8)
+#define CSID_CSI2_RDIN_INFO_FRAME_DROP_SOL BIT(7)
+#define CSID_CSI2_RDIN_INFO_FRAME_DROP_EOL BIT(6)
+#define CSID_CSI2_RDIN_INFO_FRAME_DROP_EOF BIT(5)
+#define CSID_CSI2_RDIN_INFO_CAMIF_SOF BIT(4)
+#define CSID_CSI2_RDIN_INFO_CAMIF_EOF BIT(3)
+#define CSID_CSI2_RDIN_INFO_FIFO_OVERFLOW BIT(2)
+#define CSID_CSI2_RDIN_RES1 BIT(1)
+#define CSID_CSI2_RDIN_RES0 BIT(0)
+
+#define CSID_CSI2_RDIN_IRQ_MASK(rdi) (0xf0 + 0x10 * (rdi))
+#define CSID_CSI2_RDIN_IRQ_CLEAR(rdi) (0xf4 + 0x10 * (rdi))
+#define CSID_CSI2_RDIN_IRQ_SET(rdi) (0xf8 + 0x10 * (rdi))
+
+#define CSID_TOP_IRQ_STATUS 0x7c
+#define CSID_TOP_IRQ_MASK 0x80
+#define CSID_TOP_IRQ_CLEAR 0x84
+#define CSID_TOP_IRQ_RESET BIT(0)
+#define CSID_TOP_IRQ_RX BIT(2)
+#define CSID_TOP_IRQ_LONG_PKT(rdi) (BIT(8) << (rdi))
+#define CSID_TOP_IRQ_BUF_DONE BIT(13)
+
+#define CSID_BUF_DONE_IRQ_STATUS 0x8c
+#define BUF_DONE_IRQ_STATUS_RDI_OFFSET (csid_is_lite(csid) ? 1 : 14)
+#define CSID_BUF_DONE_IRQ_MASK 0x90
+#define CSID_BUF_DONE_IRQ_CLEAR 0x94
+
+#define CSID_CSI2_RX_IRQ_STATUS 0x9c
+#define CSID_CSI2_RX_IRQ_MASK 0xa0
+#define CSID_CSI2_RX_IRQ_CLEAR 0xa4
+
+#define CSID_RESET_CFG 0xc
+#define CSID_RESET_CFG_MODE_IMMEDIATE BIT(0)
+#define CSID_RESET_CFG_LOCATION_COMPLETE BIT(4)
+
+#define CSID_CSI2_RDI_IRQ_STATUS(rdi) (0xec + 0x10 * (rdi))
+#define CSID_CSI2_RDI_IRQ_MASK(rdi) (0xf0 + 0x10 * (rdi))
+#define CSID_CSI2_RDI_IRQ_CLEAR(rdi) (0xf4 + 0x10 * (rdi))
+
+#define CSID_CSI2_RX_CFG0 0x200
+#define CSI2_RX_CFG0_NUM_ACTIVE_LANES 0
+#define CSI2_RX_CFG0_DL0_INPUT_SEL 4
+#define CSI2_RX_CFG0_DL1_INPUT_SEL 8
+#define CSI2_RX_CFG0_DL2_INPUT_SEL 12
+#define CSI2_RX_CFG0_DL3_INPUT_SEL 16
+#define CSI2_RX_CFG0_PHY_NUM_SEL 20
+#define CSI2_RX_CFG0_PHY_SEL_BASE_IDX 1
+#define CSI2_RX_CFG0_PHY_TYPE_SEL 24
+
+#define CSID_CSI2_RX_CFG1 0x204
+#define CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN BIT(0)
+#define CSI2_RX_CFG1_DE_SCRAMBLE_EN BIT(1)
+#define CSI2_RX_CFG1_VC_MODE BIT(2)
+#define CSI2_RX_CFG1_COMPLETE_STREAM_EN BIT(4)
+#define CSI2_RX_CFG1_COMPLETE_STREAM_FRAME_TIMING BIT(5)
+#define CSI2_RX_CFG1_MISR_EN BIT(6)
+#define CSI2_RX_CFG1_CGC_MODE BIT(7)
+
+#define CSID_CSI2_RX_CAPTURE_CTRL 0x208
+#define CSI2_RX_CAPTURE_CTRL_LONG_PKT_EN BIT(0)
+#define CSI2_RX_CAPTURE_CTRL_SHORT_PKT_EN BIT(1)
+#define CSI2_RX_CAPTURE_CTRL_CPHY_PKT_EN BIT(2)
+#define CSI2_RX_CAPTURE_CTRL_LONG_PKT_DT GENMASK(9, 4)
+#define CSI2_RX_CAPTURE_CTRL_LONG_PKT_VC GENMASK(14, 10)
+#define CSI2_RX_CAPTURE_CTRL_SHORT_PKT_VC GENMASK(19, 15)
+#define CSI2_RX_CAPTURE_CTRL_CPHY_PKT_DT GENMASK(20, 25)
+#define CSI2_RX_CAPTURE_CTRL_CPHY_PKT_VC GENMASK(30, 26)
+
+#define CSID_CSI2_RX_TOTAL_PKTS_RCVD 0x240
+#define CSID_CSI2_RX_STATS_ECC 0x244
+#define CSID_CSI2_RX_CRC_ERRORS 0x248
+
+#define CSID_RDI_CFG0(rdi) (0x500 + 0x100 * (rdi))
+#define RDI_CFG0_DECODE_FORMAT 12
+#define RDI_CFG0_DATA_TYPE 16
+#define RDI_CFG0_VIRTUAL_CHANNEL 22
+#define RDI_CFG0_DT_ID 27
+#define RDI_CFG0_ENABLE BIT(31)
+
+#define CSID_RDI_CTRL(rdi) (0x504 + 0x100 * (rdi))
+#define CSID_RDI_CTRL_HALT_CMD_HALT_AT_FRAME_BOUNDARY 0
+#define CSID_RDI_CTRL_HALT_CMD_RESUME_AT_FRAME_BOUNDARY 1
+
+#define CSID_RDI_CFG1(rdi) (0x510 + 0x100 * (rdi))
+#define RDI_CFG1_TIMESTAMP_STB_FRAME BIT(0)
+#define RDI_CFG1_TIMESTAMP_STB_IRQ BIT(1)
+#define RDI_CFG1_BYTE_CNTR_EN BIT(2)
+#define RDI_CFG1_TIMESTAMP_EN BIT(4)
+#define RDI_CFG1_DROP_H_EN BIT(5)
+#define RDI_CFG1_DROP_V_EN BIT(6)
+#define RDI_CFG1_CROP_H_EN BIT(7)
+#define RDI_CFG1_CROP_V_EN BIT(8)
+#define RDI_CFG1_MISR_EN BIT(9)
+#define RDI_CFG1_PLAIN_ALIGN_MSB BIT(11)
+#define RDI_CFG1_EARLY_EOF_EN BIT(14)
+#define RDI_CFG1_PACKING_MIPI BIT(15)
+
+#define CSID_RDI_ERR_RECOVERY_CFG0(rdi) (0x514 + 0x100 * (rdi))
+#define CSID_RDI_EPOCH_IRQ_CFG(rdi) (0x52c + 0x100 * (rdi))
+#define CSID_RDI_FRM_DROP_PATTERN(rdi) (0x540 + 0x100 * (rdi))
+#define CSID_RDI_FRM_DROP_PERIOD(rdi) (0x544 + 0x100 * (rdi))
+#define CSID_RDI_IRQ_SUBSAMPLE_PATTERN(rdi) (0x548 + 0x100 * (rdi))
+#define CSID_RDI_IRQ_SUBSAMPLE_PERIOD(rdi) (0x54c + 0x100 * (rdi))
+#define CSID_RDI_PIX_DROP_PATTERN(rdi) (0x558 + 0x100 * (rdi))
+#define CSID_RDI_PIX_DROP_PERIOD(rdi) (0x55c + 0x100 * (rdi))
+#define CSID_RDI_LINE_DROP_PATTERN(rdi) (0x560 + 0x100 * (rdi))
+#define CSID_RDI_LINE_DROP_PERIOD(rdi) (0x564 + 0x100 * (rdi))
+
+static inline int reg_update_rdi(struct csid_device *csid, int n)
+{
+ return BIT(4 + n) + BIT(20 + n);
+}
+
+static void csid_reg_update(struct csid_device *csid, int port_id)
+{
+ csid->reg_update |= reg_update_rdi(csid, port_id);
+ writel(csid->reg_update, csid->base + CSID_REG_UPDATE_CMD);
+}
+
+static inline void csid_reg_update_clear(struct csid_device *csid,
+ int port_id)
+{
+ csid->reg_update &= ~reg_update_rdi(csid, port_id);
+ writel(csid->reg_update, csid->base + CSID_REG_UPDATE_CMD);
+}
+
+static void __csid_configure_rx(struct csid_device *csid,
+ struct csid_phy_config *phy, int vc)
+{
+ u32 val;
+
+ val = (phy->lane_cnt - 1) << CSI2_RX_CFG0_NUM_ACTIVE_LANES;
+ val |= phy->lane_assign << CSI2_RX_CFG0_DL0_INPUT_SEL;
+ val |= (phy->csiphy_id + CSI2_RX_CFG0_PHY_SEL_BASE_IDX) << CSI2_RX_CFG0_PHY_NUM_SEL;
+
+ writel(val, csid->base + CSID_CSI2_RX_CFG0);
+
+ val = CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN;
+ if (vc > 3)
+ val |= CSI2_RX_CFG1_VC_MODE;
+ writel(val, csid->base + CSID_CSI2_RX_CFG1);
+}
+
+static void __csid_ctrl_rdi(struct csid_device *csid, int enable, u8 rdi)
+{
+ u32 val;
+
+ if (enable)
+ val = CSID_RDI_CTRL_HALT_CMD_RESUME_AT_FRAME_BOUNDARY;
+ else
+ val = CSID_RDI_CTRL_HALT_CMD_HALT_AT_FRAME_BOUNDARY;
+
+ writel(val, csid->base + CSID_RDI_CTRL(rdi));
+}
+
+static void __csid_configure_top(struct csid_device *csid)
+{
+ u32 val;
+
+ val = CSID_TOP_IO_PATH_CFG0_OUTPUT_IFE_EN | CSID_TOP_IO_PATH_CFG0_INTERNAL_CSID;
+ writel(val, csid->camss->csid_wrapper_base +
+ CSID_TOP_IO_PATH_CFG0(csid->id));
+}
+
+static void __csid_configure_rdi_stream(struct csid_device *csid, u8 enable, u8 vc)
+{
+ struct v4l2_mbus_framefmt *input_format = &csid->fmt[MSM_CSID_PAD_FIRST_SRC + vc];
+ const struct csid_format_info *format = csid_get_fmt_entry(csid->res->formats->formats,
+ csid->res->formats->nformats,
+ input_format->code);
+ u8 lane_cnt = csid->phy.lane_cnt;
+ u8 dt_id;
+ u32 val;
+
+ if (!lane_cnt)
+ lane_cnt = 4;
+
+ val = 0;
+ writel(val, csid->base + CSID_RDI_FRM_DROP_PERIOD(vc));
+
+ /*
+ * DT_ID is a two bit bitfield that is concatenated with
+ * the four least significant bits of the five bit VC
+ * bitfield to generate an internal CID value.
+ *
+ * CSID_RDI_CFG0(vc)
+ * DT_ID : 28:27
+ * VC : 26:22
+ * DT : 21:16
+ *
+ * CID : VC 3:0 << 2 | DT_ID 1:0
+ */
+ dt_id = vc & 0x03;
+
+ /* note: for non-RDI path, this should be format->decode_format */
+ val |= DECODE_FORMAT_PAYLOAD_ONLY << RDI_CFG0_DECODE_FORMAT;
+ val |= format->data_type << RDI_CFG0_DATA_TYPE;
+ val |= vc << RDI_CFG0_VIRTUAL_CHANNEL;
+ val |= dt_id << RDI_CFG0_DT_ID;
+ writel(val, csid->base + CSID_RDI_CFG0(vc));
+
+ val = RDI_CFG1_TIMESTAMP_STB_FRAME;
+ val |= RDI_CFG1_BYTE_CNTR_EN;
+ val |= RDI_CFG1_TIMESTAMP_EN;
+ val |= RDI_CFG1_DROP_H_EN;
+ val |= RDI_CFG1_DROP_V_EN;
+ val |= RDI_CFG1_CROP_H_EN;
+ val |= RDI_CFG1_CROP_V_EN;
+ val |= RDI_CFG1_PACKING_MIPI;
+
+ writel(val, csid->base + CSID_RDI_CFG1(vc));
+
+ val = 0;
+ writel(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PERIOD(vc));
+
+ val = 1;
+ writel(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PATTERN(vc));
+
+ val = 0;
+ writel(val, csid->base + CSID_RDI_CTRL(vc));
+
+ val = readl(csid->base + CSID_RDI_CFG0(vc));
+ if (enable)
+ val |= RDI_CFG0_ENABLE;
+ else
+ val &= ~RDI_CFG0_ENABLE;
+ writel(val, csid->base + CSID_RDI_CFG0(vc));
+}
+
+static void csid_configure_stream(struct csid_device *csid, u8 enable)
+{
+ int i;
+
+ __csid_configure_top(csid);
+
+ /* Loop through all enabled VCs and configure stream for each */
+ for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++) {
+ if (csid->phy.en_vc & BIT(i)) {
+ __csid_configure_rdi_stream(csid, enable, i);
+ __csid_configure_rx(csid, &csid->phy, i);
+ __csid_ctrl_rdi(csid, enable, i);
+ }
+ }
+}
+
+/*
+ * csid_reset - Trigger reset on CSID module and wait to complete
+ * @csid: CSID device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csid_reset(struct csid_device *csid)
+{
+ unsigned long time;
+ u32 val;
+ int i;
+
+ reinit_completion(&csid->reset_complete);
+
+ writel(CSID_IRQ_CMD_CLEAR, csid->base + CSID_IRQ_CMD);
+
+ /* preserve registers */
+ val = CSID_RESET_CFG_MODE_IMMEDIATE | CSID_RESET_CFG_LOCATION_COMPLETE;
+ writel(val, csid->base + CSID_RESET_CFG);
+
+ val = CSID_RESET_CMD_HW_RESET | CSID_RESET_CMD_SW_RESET;
+ writel(val, csid->base + CSID_RESET_CMD);
+
+ time = wait_for_completion_timeout(&csid->reset_complete,
+ msecs_to_jiffies(CSID_RESET_TIMEOUT_MS));
+ if (!time) {
+ dev_err(csid->camss->dev, "CSID reset timeout\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++) {
+ /* Enable RUP done for the client port */
+ writel(CSID_CSI2_RDIN_RUP_DONE, csid->base + CSID_CSI2_RDIN_IRQ_MASK(i));
+ }
+
+ /* Clear RDI status */
+ writel(~0u, csid->base + CSID_BUF_DONE_IRQ_CLEAR);
+
+ /* Enable BUF_DONE bit for all write-master client ports */
+ writel(~0u, csid->base + CSID_BUF_DONE_IRQ_MASK);
+
+ /* Unmask all TOP interrupts */
+ writel(~0u, csid->base + CSID_TOP_IRQ_MASK);
+
+ return 0;
+}
+
+static void csid_rup_complete(struct csid_device *csid, int rdi)
+{
+ csid_reg_update_clear(csid, rdi);
+}
+
+/*
+ * csid_isr - CSID module interrupt service routine
+ * @irq: Interrupt line
+ * @dev: CSID device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t csid_isr(int irq, void *dev)
+{
+ struct csid_device *csid = dev;
+ u32 buf_done_val, val, val_top;
+ int i;
+
+ /* Latch and clear TOP status */
+ val_top = readl(csid->base + CSID_TOP_IRQ_STATUS);
+ writel(val_top, csid->base + CSID_TOP_IRQ_CLEAR);
+
+ /* Latch and clear CSID_CSI2 status */
+ val = readl(csid->base + CSID_CSI2_RX_IRQ_STATUS);
+ writel(val, csid->base + CSID_CSI2_RX_IRQ_CLEAR);
+
+ /* Latch and clear top level BUF_DONE status */
+ buf_done_val = readl(csid->base + CSID_BUF_DONE_IRQ_STATUS);
+ writel(buf_done_val, csid->base + CSID_BUF_DONE_IRQ_CLEAR);
+
+ /* Process state for each RDI channel */
+ for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++) {
+ val = readl(csid->base + CSID_CSI2_RDIN_IRQ_STATUS(i));
+ if (val)
+ writel(val, csid->base + CSID_CSI2_RDIN_IRQ_CLEAR(i));
+
+ if (val & CSID_CSI2_RDIN_RUP_DONE)
+ csid_rup_complete(csid, i);
+
+ if (buf_done_val & BIT(BUF_DONE_IRQ_STATUS_RDI_OFFSET + i))
+ camss_buf_done(csid->camss, csid->id, i);
+ }
+
+ /* Issue clear command */
+ writel(CSID_IRQ_CMD_CLEAR, csid->base + CSID_IRQ_CMD);
+
+ /* Reset complete */
+ if (val_top & CSID_TOP_IRQ_RESET)
+ complete(&csid->reset_complete);
+
+ return IRQ_HANDLED;
+}
+
+static void csid_subdev_reg_update(struct csid_device *csid, int port_id, bool is_clear)
+{
+ if (is_clear)
+ csid_reg_update_clear(csid, port_id);
+ else
+ csid_reg_update(csid, port_id);
+}
+
+static void csid_subdev_init(struct csid_device *csid) {}
+
+const struct csid_hw_ops csid_ops_680 = {
+ .configure_testgen_pattern = NULL,
+ .configure_stream = csid_configure_stream,
+ .hw_version = csid_hw_version,
+ .isr = csid_isr,
+ .reset = csid_reset,
+ .src_pad_code = csid_src_pad_code,
+ .subdev_init = csid_subdev_init,
+ .reg_update = csid_subdev_reg_update,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index d08117f46f3b..5284b5857368 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -613,8 +613,8 @@ u32 csid_hw_version(struct csid_device *csid)
hw_gen = (hw_version >> HW_VERSION_GENERATION) & 0xF;
hw_rev = (hw_version >> HW_VERSION_REVISION) & 0xFFF;
hw_step = (hw_version >> HW_VERSION_STEPPING) & 0xFFFF;
- dev_info(csid->camss->dev, "CSID:%d HW Version = %u.%u.%u\n",
- csid->id, hw_gen, hw_rev, hw_step);
+ dev_dbg(csid->camss->dev, "CSID:%d HW Version = %u.%u.%u\n",
+ csid->id, hw_gen, hw_rev, hw_step);
return hw_version;
}
diff --git a/drivers/media/platform/qcom/camss/camss-csid.h b/drivers/media/platform/qcom/camss/camss-csid.h
index 90b8fc5852be..9dc826d8c8f6 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.h
+++ b/drivers/media/platform/qcom/camss/camss-csid.h
@@ -213,6 +213,7 @@ extern const struct csid_formats csid_formats_gen2;
extern const struct csid_hw_ops csid_ops_4_1;
extern const struct csid_hw_ops csid_ops_4_7;
+extern const struct csid_hw_ops csid_ops_680;
extern const struct csid_hw_ops csid_ops_gen2;
extern const struct csid_hw_ops csid_ops_780;
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index a6cc957b986e..f732a76de93e 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -55,11 +55,12 @@
#define CSIPHY_DNP_PARAMS 4
#define CSIPHY_2PH_REGS 5
#define CSIPHY_3PH_REGS 6
+#define CSIPHY_SKEW_CAL 7
struct csiphy_lane_regs {
s32 reg_addr;
s32 reg_data;
- s32 delay;
+ u32 delay_us;
u32 csiphy_param_type;
};
@@ -423,6 +424,123 @@ csiphy_lane_regs lane_regs_sm8550[] = {
{0x0C64, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
};
+/* 4nm 2PH v 2.1.2 2p5Gbps 4 lane DPHY mode */
+static const struct
+csiphy_lane_regs lane_regs_x1e80100[] = {
+ /* Power up lanes 2ph mode */
+ {0x1014, 0xD5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x101C, 0x7A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x1018, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+
+ {0x0094, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0090, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0094, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0094, 0xD7, 0x00, CSIPHY_SKEW_CAL},
+ {0x005C, 0x00, 0x00, CSIPHY_SKEW_CAL},
+ {0x0060, 0xBD, 0x00, CSIPHY_SKEW_CAL},
+ {0x0064, 0x7F, 0x00, CSIPHY_SKEW_CAL},
+
+ {0x0E94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0EA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+
+ {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0490, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0494, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0494, 0xD7, 0x00, CSIPHY_SKEW_CAL},
+ {0x045C, 0x00, 0x00, CSIPHY_SKEW_CAL},
+ {0x0460, 0xBD, 0x00, CSIPHY_SKEW_CAL},
+ {0x0464, 0x7F, 0x00, CSIPHY_SKEW_CAL},
+
+ {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0890, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0894, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0808, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0894, 0xD7, 0x00, CSIPHY_SKEW_CAL},
+ {0x085C, 0x00, 0x00, CSIPHY_SKEW_CAL},
+ {0x0860, 0xBD, 0x00, CSIPHY_SKEW_CAL},
+ {0x0864, 0x7F, 0x00, CSIPHY_SKEW_CAL},
+
+ {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C90, 0x0f, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C00, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C38, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C94, 0xD7, 0x00, CSIPHY_SKEW_CAL},
+ {0x0C5C, 0x00, 0x00, CSIPHY_SKEW_CAL},
+ {0x0C60, 0xBD, 0x00, CSIPHY_SKEW_CAL},
+ {0x0C64, 0x7F, 0x00, CSIPHY_SKEW_CAL},
+};
+
static void csiphy_hw_version_read(struct csiphy_device *csiphy,
struct device *dev)
{
@@ -593,6 +711,9 @@ static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
case CSIPHY_SETTLE_CNT_LOWER_BYTE:
val = settle_cnt & 0xff;
break;
+ case CSIPHY_SKEW_CAL:
+ /* TODO: support application of skew from dt flag */
+ continue;
case CSIPHY_DNP_PARAMS:
continue;
default:
@@ -600,6 +721,8 @@ static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
break;
}
writel_relaxed(val, csiphy->base + r->reg_addr);
+ if (r->delay_us)
+ udelay(r->delay_us);
}
}
@@ -626,6 +749,7 @@ static bool csiphy_is_gen2(u32 version)
case CAMSS_8280XP:
case CAMSS_845:
case CAMSS_8550:
+ case CAMSS_X1E80100:
ret = true;
break;
}
@@ -714,6 +838,11 @@ static int csiphy_init(struct csiphy_device *csiphy)
regs->lane_regs = &lane_regs_sc8280xp[0];
regs->lane_array_size = ARRAY_SIZE(lane_regs_sc8280xp);
break;
+ case CAMSS_X1E80100:
+ regs->lane_regs = &lane_regs_x1e80100[0];
+ regs->lane_array_size = ARRAY_SIZE(lane_regs_x1e80100);
+ regs->offset = 0x1000;
+ break;
case CAMSS_8550:
regs->lane_regs = &lane_regs_sm8550[0];
regs->lane_array_size = ARRAY_SIZE(lane_regs_sm8550);
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index c053616558a7..c622efcc92ff 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -586,7 +586,7 @@ int msm_csiphy_subdev_init(struct camss *camss,
{
struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
- int i, j, k;
+ int i, j;
int ret;
csiphy->camss = camss;
@@ -680,23 +680,21 @@ int msm_csiphy_subdev_init(struct camss *camss,
for (j = 0; j < clock->nfreqs; j++)
clock->freq[j] = res->clock_rate[i][j];
- for (k = 0; k < camss->res->csiphy_num; k++) {
- csiphy->rate_set[i] = csiphy_match_clock_name(clock->name,
- "csiphy%d_timer", k);
- if (csiphy->rate_set[i])
- break;
-
- if (camss->res->version == CAMSS_660) {
- csiphy->rate_set[i] = csiphy_match_clock_name(clock->name,
- "csi%d_phy", k);
- if (csiphy->rate_set[i])
- break;
- }
+ csiphy->rate_set[i] = csiphy_match_clock_name(clock->name,
+ "csiphy%d_timer",
+ csiphy->id);
+ if (csiphy->rate_set[i])
+ continue;
- csiphy->rate_set[i] = csiphy_match_clock_name(clock->name, "csiphy%d", k);
+ if (camss->res->version == CAMSS_660) {
+ csiphy->rate_set[i] = csiphy_match_clock_name(clock->name,
+ "csi%d_phy",
+ csiphy->id);
if (csiphy->rate_set[i])
- break;
+ continue;
}
+
+ csiphy->rate_set[i] = csiphy_match_clock_name(clock->name, "csiphy%d", csiphy->id);
}
/* CSIPHY supplies */
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.h b/drivers/media/platform/qcom/camss/camss-csiphy.h
index 86b98b37838e..ab91273303b9 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.h
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.h
@@ -81,6 +81,7 @@ struct csiphy_hw_ops {
};
struct csiphy_subdev_resources {
+ u8 id;
const struct csiphy_hw_ops *hw_ops;
const struct csiphy_formats *formats;
};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-680.c b/drivers/media/platform/qcom/camss/camss-vfe-680.c
new file mode 100644
index 000000000000..99036e7c1e76
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe-680.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * camss-vfe-680.c
+ *
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v680
+ *
+ * Copyright (C) 2025 Linaro Ltd.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+
+#include "camss.h"
+#include "camss-vfe.h"
+
+#define VFE_TOP_IRQn_STATUS(vfe, n) ((vfe_is_lite(vfe) ? 0x1c : 0x44) + (n) * 4)
+#define VFE_TOP_IRQn_MASK(vfe, n) ((vfe_is_lite(vfe) ? 0x24 : 0x34) + (n) * 4)
+#define VFE_TOP_IRQn_CLEAR(vfe, n) ((vfe_is_lite(vfe) ? 0x2c : 0x3c) + (n) * 4)
+#define VFE_IRQ1_SOF(vfe, n) ((vfe_is_lite(vfe) ? BIT(2) : BIT(8)) << ((n) * 2))
+#define VFE_IRQ1_EOF(vfe, n) ((vfe_is_lite(vfe) ? BIT(3) : BIT(9)) << ((n) * 2))
+#define VFE_TOP_IRQ_CMD(vfe) (vfe_is_lite(vfe) ? 0x38 : 0x30)
+#define VFE_TOP_IRQ_CMD_GLOBAL_CLEAR BIT(0)
+#define VFE_TOP_DIAG_CONFIG (vfe_is_lite(vfe) ? 0x40 : 0x50)
+
+#define VFE_TOP_DEBUG_11(vfe) (vfe_is_lite(vfe) ? 0x40 : 0xcc)
+#define VFE_TOP_DEBUG_12(vfe) (vfe_is_lite(vfe) ? 0x40 : 0xd0)
+#define VFE_TOP_DEBUG_13(vfe) (vfe_is_lite(vfe) ? 0x40 : 0xd4)
+
+#define VFE_BUS_IRQn_MASK(vfe, n) ((vfe_is_lite(vfe) ? 0x218 : 0xc18) + (n) * 4)
+#define VFE_BUS_IRQn_CLEAR(vfe, n) ((vfe_is_lite(vfe) ? 0x220 : 0xc20) + (n) * 4)
+#define VFE_BUS_IRQn_STATUS(vfe, n) ((vfe_is_lite(vfe) ? 0x228 : 0xc28) + (n) * 4)
+#define VFE_BUS_IRQ_GLOBAL_CLEAR(vfe) (vfe_is_lite(vfe) ? 0x230 : 0xc30)
+#define VFE_BUS_WR_VIOLATION_STATUS(vfe) (vfe_is_lite(vfe) ? 0x264 : 0xc64)
+#define VFE_BUS_WR_OVERFLOW_STATUS(vfe) (vfe_is_lite(vfe) ? 0x268 : 0xc68)
+#define VFE_BUS_WR_IMAGE_VIOLATION_STATUS(vfe) (vfe_is_lite(vfe) ? 0x270 : 0xc70)
+
+#define VFE_BUS_WRITE_CLIENT_CFG(vfe, c) ((vfe_is_lite(vfe) ? 0x400 : 0xe00) + (c) * 0x100)
+#define VFE_BUS_WRITE_CLIENT_CFG_EN BIT(0)
+#define VFE_BUS_IMAGE_ADDR(vfe, c) ((vfe_is_lite(vfe) ? 0x404 : 0xe04) + (c) * 0x100)
+#define VFE_BUS_FRAME_INCR(vfe, c) ((vfe_is_lite(vfe) ? 0x408 : 0xe08) + (c) * 0x100)
+#define VFE_BUS_IMAGE_CFG0(vfe, c) ((vfe_is_lite(vfe) ? 0x40c : 0xe0c) + (c) * 0x100)
+#define VFE_BUS_IMAGE_CFG0_DATA(h, s) (((h) << 16) | ((s) >> 4))
+#define WM_IMAGE_CFG_0_DEFAULT_WIDTH (0xFFFF)
+
+#define VFE_BUS_IMAGE_CFG1(vfe, c) ((vfe_is_lite(vfe) ? 0x410 : 0xe10) + (c) * 0x100)
+#define VFE_BUS_IMAGE_CFG2(vfe, c) ((vfe_is_lite(vfe) ? 0x414 : 0xe14) + (c) * 0x100)
+#define VFE_BUS_PACKER_CFG(vfe, c) ((vfe_is_lite(vfe) ? 0x418 : 0xe18) + (c) * 0x100)
+#define VFE_BUS_IRQ_SUBSAMPLE_PERIOD(vfe, c) ((vfe_is_lite(vfe) ? 0x430 : 0xe30) + (c) * 0x100)
+#define VFE_BUS_IRQ_SUBSAMPLE_PATTERN(vfe, c) ((vfe_is_lite(vfe) ? 0x434 : 0xe34) + (c) * 0x100)
+#define VFE_BUS_FRAMEDROP_PERIOD(vfe, c) ((vfe_is_lite(vfe) ? 0x438 : 0xe38) + (c) * 0x100)
+#define VFE_BUS_FRAMEDROP_PATTERN(vfe, c) ((vfe_is_lite(vfe) ? 0x43c : 0xe3c) + (c) * 0x100)
+#define VFE_BUS_MMU_PREFETCH_CFG(vfe, c) ((vfe_is_lite(vfe) ? 0x460 : 0xe60) + (c) * 0x100)
+#define VFE_BUS_MMU_PREFETCH_CFG_EN BIT(0)
+#define VFE_BUS_MMU_PREFETCH_MAX_OFFSET(vfe, c) ((vfe_is_lite(vfe) ? 0x464 : 0xe64) + (c) * 0x100)
+#define VFE_BUS_ADDR_STATUS0(vfe, c) ((vfe_is_lite(vfe) ? 0x470 : 0xe70) + (c) * 0x100)
+
+/*
+ * TODO: differentiate the port id based on requested type of RDI, BHIST etc
+ *
+ * IFE write master IDs
+ *
+ * VIDEO_FULL_Y 0
+ * VIDEO_FULL_C 1
+ * VIDEO_DS_4:1 2
+ * VIDEO_DS_16:1 3
+ * DISPLAY_FULL_Y 4
+ * DISPLAY_FULL_C 5
+ * DISPLAY_DS_4:1 6
+ * DISPLAY_DS_16:1 7
+ * FD_Y 8
+ * FD_C 9
+ * PIXEL_RAW 10
+ * STATS_BE0 11
+ * STATS_BHIST0 12
+ * STATS_TINTLESS_BG 13
+ * STATS_AWB_BG 14
+ * STATS_AWB_BFW 15
+ * STATS_BAF 16
+ * STATS_BHIST 17
+ * STATS_RS 18
+ * STATS_IHIST 19
+ * SPARSE_PD 20
+ * PDAF_V2.0_PD_DATA 21
+ * PDAF_V2.0_SAD 22
+ * LCR 23
+ * RDI0 24
+ * RDI1 25
+ * RDI2 26
+ * LTM_STATS 27
+ *
+ * IFE Lite write master IDs
+ *
+ * RDI0 0
+ * RDI1 1
+ * RDI2 2
+ * RDI3 3
+ * GAMMA 4
+ * BE 5
+ */
+
+/* TODO: assign an ENUM in resources and use the provided master
+ * id directly for RDI, STATS, AWB_BG, BHIST.
+ * This macro only works because RDI is all we support right now.
+ */
+#define RDI_WM(n) ((vfe_is_lite(vfe) ? 0 : 24) + (n))
+
+static void vfe_global_reset(struct vfe_device *vfe)
+{
+ /* VFE680 has no global reset, simply report a completion */
+ complete(&vfe->reset_complete);
+}
+
+/*
+ * vfe_isr - VFE module interrupt handler
+ * @irq: Interrupt line
+ * @dev: VFE device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t vfe_isr(int irq, void *dev)
+{
+ return IRQ_HANDLED;
+}
+
+/*
+ * vfe_halt - Trigger halt on VFE module and wait to complete
+ * @vfe: VFE device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int vfe_halt(struct vfe_device *vfe)
+{
+ /* rely on vfe_disable_output() to stop the VFE */
+ return 0;
+}
+
+static void vfe_disable_irq(struct vfe_device *vfe)
+{
+ writel(0u, vfe->base + VFE_TOP_IRQn_MASK(vfe, 0));
+ writel(0u, vfe->base + VFE_TOP_IRQn_MASK(vfe, 1));
+ writel(0u, vfe->base + VFE_BUS_IRQn_MASK(vfe, 0));
+ writel(0u, vfe->base + VFE_BUS_IRQn_MASK(vfe, 1));
+}
+
+static void vfe_wm_update(struct vfe_device *vfe, u8 rdi, u32 addr,
+ struct vfe_line *line)
+{
+ u8 wm = RDI_WM(rdi);
+
+ writel(addr, vfe->base + VFE_BUS_IMAGE_ADDR(vfe, wm));
+}
+
+static void vfe_wm_start(struct vfe_device *vfe, u8 rdi, struct vfe_line *line)
+{
+ struct v4l2_pix_format_mplane *pix =
+ &line->video_out.active_fmt.fmt.pix_mp;
+ u32 stride = pix->plane_fmt[0].bytesperline;
+ u32 cfg;
+ u8 wm;
+
+ cfg = VFE_BUS_IMAGE_CFG0_DATA(pix->height, stride);
+ wm = RDI_WM(rdi);
+
+ writel(cfg, vfe->base + VFE_BUS_IMAGE_CFG0(vfe, wm));
+ writel(0, vfe->base + VFE_BUS_IMAGE_CFG1(vfe, wm));
+ writel(stride, vfe->base + VFE_BUS_IMAGE_CFG2(vfe, wm));
+ writel(0, vfe->base + VFE_BUS_PACKER_CFG(vfe, wm));
+
+ /* Set total frame increment value */
+ writel(pix->plane_fmt[0].bytesperline * pix->height,
+ vfe->base + VFE_BUS_FRAME_INCR(vfe, wm));
+
+ /* MMU */
+ writel(VFE_BUS_MMU_PREFETCH_CFG_EN, vfe->base + VFE_BUS_MMU_PREFETCH_CFG(vfe, wm));
+ writel(~0u, vfe->base + VFE_BUS_MMU_PREFETCH_MAX_OFFSET(vfe, wm));
+
+ /* no dropped frames, one irq per frame */
+ writel(1, vfe->base + VFE_BUS_FRAMEDROP_PATTERN(vfe, wm));
+ writel(0, vfe->base + VFE_BUS_FRAMEDROP_PERIOD(vfe, wm));
+ writel(1, vfe->base + VFE_BUS_IRQ_SUBSAMPLE_PATTERN(vfe, wm));
+ writel(0, vfe->base + VFE_BUS_IRQ_SUBSAMPLE_PERIOD(vfe, wm));
+
+ /* We don't process IRQs for VFE in RDI mode at the moment */
+ vfe_disable_irq(vfe);
+
+ /* Enable WM */
+ writel(VFE_BUS_WRITE_CLIENT_CFG_EN,
+ vfe->base + VFE_BUS_WRITE_CLIENT_CFG(vfe, wm));
+
+ dev_dbg(vfe->camss->dev, "RDI%d WM:%d width %d height %d stride %d\n",
+ rdi, wm, pix->width, pix->height, stride);
+}
+
+static void vfe_wm_stop(struct vfe_device *vfe, u8 rdi)
+{
+ u8 wm = RDI_WM(rdi);
+
+ writel(0, vfe->base + VFE_BUS_WRITE_CLIENT_CFG(vfe, wm));
+}
+
+static const struct camss_video_ops vfe_video_ops_680 = {
+ .queue_buffer = vfe_queue_buffer_v2,
+ .flush_buffers = vfe_flush_buffers,
+};
+
+static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
+{
+ vfe->video_ops = vfe_video_ops_680;
+}
+
+static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ int port_id = line_id;
+
+ camss_reg_update(vfe->camss, vfe->id, port_id, false);
+}
+
+static inline void vfe_reg_update_clear(struct vfe_device *vfe,
+ enum vfe_line_id line_id)
+{
+ int port_id = line_id;
+
+ camss_reg_update(vfe->camss, vfe->id, port_id, true);
+}
+
+const struct vfe_hw_ops vfe_ops_680 = {
+ .global_reset = vfe_global_reset,
+ .hw_version = vfe_hw_version,
+ .isr = vfe_isr,
+ .pm_domain_off = vfe_pm_domain_off,
+ .pm_domain_on = vfe_pm_domain_on,
+ .subdev_init = vfe_subdev_init,
+ .vfe_disable = vfe_disable,
+ .vfe_enable = vfe_enable_v2,
+ .vfe_halt = vfe_halt,
+ .vfe_wm_start = vfe_wm_start,
+ .vfe_wm_stop = vfe_wm_stop,
+ .vfe_buf_done = vfe_buf_done,
+ .vfe_wm_update = vfe_wm_update,
+ .reg_update = vfe_reg_update,
+ .reg_update_clear = vfe_reg_update_clear,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index cf0e8f5c004a..4bca6c3abaff 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -346,6 +346,7 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
case CAMSS_8280XP:
case CAMSS_845:
case CAMSS_8550:
+ case CAMSS_X1E80100:
switch (sink_code) {
case MEDIA_BUS_FMT_YUYV8_1X16:
{
@@ -428,8 +429,8 @@ u32 vfe_hw_version(struct vfe_device *vfe)
u32 rev = (hw_version >> HW_VERSION_REVISION) & 0xFFF;
u32 step = (hw_version >> HW_VERSION_STEPPING) & 0xFFFF;
- dev_info(vfe->camss->dev, "VFE:%d HW Version = %u.%u.%u\n",
- vfe->id, gen, rev, step);
+ dev_dbg(vfe->camss->dev, "VFE:%d HW Version = %u.%u.%u\n",
+ vfe->id, gen, rev, step);
return hw_version;
}
@@ -1973,6 +1974,7 @@ static int vfe_bpl_align(struct vfe_device *vfe)
case CAMSS_8280XP:
case CAMSS_845:
case CAMSS_8550:
+ case CAMSS_X1E80100:
ret = 16;
break;
default:
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
index 9dec5bc0d1b1..a23f666be753 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.h
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -243,6 +243,7 @@ extern const struct vfe_hw_ops vfe_ops_4_7;
extern const struct vfe_hw_ops vfe_ops_4_8;
extern const struct vfe_hw_ops vfe_ops_170;
extern const struct vfe_hw_ops vfe_ops_480;
+extern const struct vfe_hw_ops vfe_ops_680;
extern const struct vfe_hw_ops vfe_ops_780;
int vfe_get(struct vfe_device *vfe);
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 6791dfea91b1..06f42875702f 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -46,6 +46,7 @@ static const struct camss_subdev_resources csiphy_res_8x16[] = {
.reg = { "csiphy0", "csiphy0_clk_mux" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_2ph_1_0,
.formats = &csiphy_formats_8x16
}
@@ -62,6 +63,7 @@ static const struct camss_subdev_resources csiphy_res_8x16[] = {
.reg = { "csiphy1", "csiphy1_clk_mux" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_2ph_1_0,
.formats = &csiphy_formats_8x16
}
@@ -318,6 +320,7 @@ static const struct camss_subdev_resources csiphy_res_8x96[] = {
.reg = { "csiphy0", "csiphy0_clk_mux" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_8x96
}
@@ -334,6 +337,7 @@ static const struct camss_subdev_resources csiphy_res_8x96[] = {
.reg = { "csiphy1", "csiphy1_clk_mux" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_8x96
}
@@ -350,6 +354,7 @@ static const struct camss_subdev_resources csiphy_res_8x96[] = {
.reg = { "csiphy2", "csiphy2_clk_mux" },
.interrupt = { "csiphy2" },
.csiphy = {
+ .id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_8x96
}
@@ -524,6 +529,7 @@ static const struct camss_subdev_resources csiphy_res_660[] = {
.reg = { "csiphy0", "csiphy0_clk_mux" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_8x96
}
@@ -542,6 +548,7 @@ static const struct camss_subdev_resources csiphy_res_660[] = {
.reg = { "csiphy1", "csiphy1_clk_mux" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_8x96
}
@@ -560,6 +567,7 @@ static const struct camss_subdev_resources csiphy_res_660[] = {
.reg = { "csiphy2", "csiphy2_clk_mux" },
.interrupt = { "csiphy2" },
.csiphy = {
+ .id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_8x96
}
@@ -751,6 +759,7 @@ static const struct camss_subdev_resources csiphy_res_670[] = {
.reg = { "csiphy0" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -768,6 +777,7 @@ static const struct camss_subdev_resources csiphy_res_670[] = {
.reg = { "csiphy1" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -785,6 +795,7 @@ static const struct camss_subdev_resources csiphy_res_670[] = {
.reg = { "csiphy2" },
.interrupt = { "csiphy2" },
.csiphy = {
+ .id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -935,6 +946,7 @@ static const struct camss_subdev_resources csiphy_res_845[] = {
.reg = { "csiphy0" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -957,6 +969,7 @@ static const struct camss_subdev_resources csiphy_res_845[] = {
.reg = { "csiphy1" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -979,6 +992,7 @@ static const struct camss_subdev_resources csiphy_res_845[] = {
.reg = { "csiphy2" },
.interrupt = { "csiphy2" },
.csiphy = {
+ .id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1001,6 +1015,7 @@ static const struct camss_subdev_resources csiphy_res_845[] = {
.reg = { "csiphy3" },
.interrupt = { "csiphy3" },
.csiphy = {
+ .id = 3,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1179,6 +1194,7 @@ static const struct camss_subdev_resources csiphy_res_8250[] = {
.reg = { "csiphy0" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1192,6 +1208,7 @@ static const struct camss_subdev_resources csiphy_res_8250[] = {
.reg = { "csiphy1" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1205,6 +1222,7 @@ static const struct camss_subdev_resources csiphy_res_8250[] = {
.reg = { "csiphy2" },
.interrupt = { "csiphy2" },
.csiphy = {
+ .id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1218,6 +1236,7 @@ static const struct camss_subdev_resources csiphy_res_8250[] = {
.reg = { "csiphy3" },
.interrupt = { "csiphy3" },
.csiphy = {
+ .id = 3,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1231,6 +1250,7 @@ static const struct camss_subdev_resources csiphy_res_8250[] = {
.reg = { "csiphy4" },
.interrupt = { "csiphy4" },
.csiphy = {
+ .id = 4,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1244,6 +1264,7 @@ static const struct camss_subdev_resources csiphy_res_8250[] = {
.reg = { "csiphy5" },
.interrupt = { "csiphy5" },
.csiphy = {
+ .id = 5,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1458,6 +1479,7 @@ static const struct camss_subdev_resources csiphy_res_7280[] = {
.reg = { "csiphy0" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sc7280
}
@@ -1472,6 +1494,7 @@ static const struct camss_subdev_resources csiphy_res_7280[] = {
.reg = { "csiphy1" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sc7280
}
@@ -1486,6 +1509,7 @@ static const struct camss_subdev_resources csiphy_res_7280[] = {
.reg = { "csiphy2" },
.interrupt = { "csiphy2" },
.csiphy = {
+ .id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sc7280
}
@@ -1500,6 +1524,7 @@ static const struct camss_subdev_resources csiphy_res_7280[] = {
.reg = { "csiphy3" },
.interrupt = { "csiphy3" },
.csiphy = {
+ .id = 3,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sc7280
}
@@ -1514,6 +1539,7 @@ static const struct camss_subdev_resources csiphy_res_7280[] = {
.reg = { "csiphy4" },
.interrupt = { "csiphy4" },
.csiphy = {
+ .id = 4,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sc7280
}
@@ -1766,6 +1792,7 @@ static const struct camss_subdev_resources csiphy_res_sc8280xp[] = {
.reg = { "csiphy0" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1779,6 +1806,7 @@ static const struct camss_subdev_resources csiphy_res_sc8280xp[] = {
.reg = { "csiphy1" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1792,6 +1820,7 @@ static const struct camss_subdev_resources csiphy_res_sc8280xp[] = {
.reg = { "csiphy2" },
.interrupt = { "csiphy2" },
.csiphy = {
+ .id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -1805,6 +1834,7 @@ static const struct camss_subdev_resources csiphy_res_sc8280xp[] = {
.reg = { "csiphy3" },
.interrupt = { "csiphy3" },
.csiphy = {
+ .id = 3,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2134,6 +2164,7 @@ static const struct camss_subdev_resources csiphy_res_8550[] = {
.reg = { "csiphy0" },
.interrupt = { "csiphy0" },
.csiphy = {
+ .id = 0,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2147,6 +2178,7 @@ static const struct camss_subdev_resources csiphy_res_8550[] = {
.reg = { "csiphy1" },
.interrupt = { "csiphy1" },
.csiphy = {
+ .id = 1,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2160,6 +2192,7 @@ static const struct camss_subdev_resources csiphy_res_8550[] = {
.reg = { "csiphy2" },
.interrupt = { "csiphy2" },
.csiphy = {
+ .id = 2,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2173,6 +2206,7 @@ static const struct camss_subdev_resources csiphy_res_8550[] = {
.reg = { "csiphy3" },
.interrupt = { "csiphy3" },
.csiphy = {
+ .id = 3,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2186,6 +2220,7 @@ static const struct camss_subdev_resources csiphy_res_8550[] = {
.reg = { "csiphy4" },
.interrupt = { "csiphy4" },
.csiphy = {
+ .id = 4,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2199,6 +2234,7 @@ static const struct camss_subdev_resources csiphy_res_8550[] = {
.reg = { "csiphy5" },
.interrupt = { "csiphy5" },
.csiphy = {
+ .id = 5,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2212,6 +2248,7 @@ static const struct camss_subdev_resources csiphy_res_8550[] = {
.reg = { "csiphy6" },
.interrupt = { "csiphy6" },
.csiphy = {
+ .id = 6,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2225,6 +2262,7 @@ static const struct camss_subdev_resources csiphy_res_8550[] = {
.reg = { "csiphy7" },
.interrupt = { "csiphy7" },
.csiphy = {
+ .id = 7,
.hw_ops = &csiphy_ops_3ph_1_0,
.formats = &csiphy_formats_sdm845
}
@@ -2445,6 +2483,299 @@ static const struct resources_icc icc_res_sm8550[] = {
},
};
+static const struct camss_subdev_resources csiphy_res_x1e80100[] = {
+ /* CSIPHY0 */
+ {
+ .regulators = { "vdd-csiphy-0p8-supply",
+ "vdd-csiphy-1p2-supply" },
+ .clock = { "csiphy0", "csiphy0_timer" },
+ .clock_rate = { { 300000000, 400000000, 480000000 },
+ { 266666667, 400000000 } },
+ .reg = { "csiphy0" },
+ .interrupt = { "csiphy0" },
+ .csiphy = {
+ .id = 0,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ },
+ },
+ /* CSIPHY1 */
+ {
+ .regulators = { "vdd-csiphy-0p8-supply",
+ "vdd-csiphy-1p2-supply" },
+ .clock = { "csiphy1", "csiphy1_timer" },
+ .clock_rate = { { 300000000, 400000000, 480000000 },
+ { 266666667, 400000000 } },
+ .reg = { "csiphy1" },
+ .interrupt = { "csiphy1" },
+ .csiphy = {
+ .id = 1,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ },
+ },
+ /* CSIPHY2 */
+ {
+ .regulators = { "vdd-csiphy-0p8-supply",
+ "vdd-csiphy-1p2-supply" },
+ .clock = { "csiphy2", "csiphy2_timer" },
+ .clock_rate = { { 300000000, 400000000, 480000000 },
+ { 266666667, 400000000 } },
+ .reg = { "csiphy2" },
+ .interrupt = { "csiphy2" },
+ .csiphy = {
+ .id = 2,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ },
+ },
+ /* CSIPHY4 */
+ {
+ .regulators = { "vdd-csiphy-0p8-supply",
+ "vdd-csiphy-1p2-supply" },
+ .clock = { "csiphy4", "csiphy4_timer" },
+ .clock_rate = { { 300000000, 400000000, 480000000 },
+ { 266666667, 400000000 } },
+ .reg = { "csiphy4" },
+ .interrupt = { "csiphy4" },
+ .csiphy = {
+ .id = 4,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ },
+ },
+};
+
+static const struct camss_subdev_resources csid_res_x1e80100[] = {
+ /* CSID0 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb",
+ "cpas_fast_ahb", "csid", "csid_csiphy_rx" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 64000000, 80000000 },
+ { 80000000, 100000000, 200000000,
+ 300000000, 400000000 },
+ { 300000000, 400000000, 480000000 },
+ { 300000000, 400000000, 480000000 }, },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" },
+ .csid = {
+ .hw_ops = &csid_ops_680,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ },
+ },
+ /* CSID1 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb",
+ "cpas_fast_ahb", "csid", "csid_csiphy_rx" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 64000000, 80000000 },
+ { 80000000, 100000000, 200000000,
+ 300000000, 400000000 },
+ { 300000000, 400000000, 480000000 },
+ { 300000000, 400000000, 480000000 }, },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" },
+ .csid = {
+ .hw_ops = &csid_ops_680,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ },
+ },
+ /* CSID2 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb",
+ "cpas_fast_ahb", "csid", "csid_csiphy_rx" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 64000000, 80000000 },
+ { 80000000, 100000000, 200000000,
+ 300000000, 400000000 },
+ { 300000000, 400000000, 480000000 },
+ { 300000000, 400000000, 480000000 }, },
+ .reg = { "csid2" },
+ .interrupt = { "csid2" },
+ .csid = {
+ .hw_ops = &csid_ops_680,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ },
+ },
+ /* CSID_LITE0 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb",
+ "cpas_fast_ahb", "csid", "csid_csiphy_rx" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 64000000, 80000000 },
+ { 80000000, 100000000, 200000000,
+ 300000000, 400000000 },
+ { 300000000, 400000000, 480000000 },
+ { 300000000, 400000000, 480000000 }, },
+ .reg = { "csid_lite0" },
+ .interrupt = { "csid_lite0" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_680,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID_LITE1 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "gcc_axi_sf", "cpas_ahb",
+ "cpas_fast_ahb", "csid", "csid_csiphy_rx" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 64000000, 80000000 },
+ { 80000000, 100000000, 200000000,
+ 300000000, 400000000 },
+ { 300000000, 400000000, 480000000 },
+ { 300000000, 400000000, 480000000 }, },
+
+ .reg = { "csid_lite1" },
+ .interrupt = { "csid_lite1" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_680,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+};
+
+static const struct camss_subdev_resources vfe_res_x1e80100[] = {
+ /* IFE0 */
+ {
+ .regulators = {},
+ .clock = {"camnoc_rt_axi", "camnoc_nrt_axi", "cpas_ahb",
+ "cpas_fast_ahb", "cpas_vfe0", "vfe0_fast_ahb",
+ "vfe0" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 345600000, 432000000, 594000000, 675000000,
+ 727000000 }, },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" },
+ .vfe = {
+ .line_num = 4,
+ .pd_name = "ife0",
+ .hw_ops = &vfe_ops_680,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ },
+ },
+ /* IFE1 */
+ {
+ .regulators = {},
+ .clock = { "camnoc_rt_axi", "camnoc_nrt_axi", "cpas_ahb",
+ "cpas_fast_ahb", "cpas_vfe1", "vfe1_fast_ahb",
+ "vfe1" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 345600000, 432000000, 594000000, 675000000,
+ 727000000 }, },
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" },
+ .vfe = {
+ .line_num = 4,
+ .pd_name = "ife1",
+ .hw_ops = &vfe_ops_680,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ },
+ },
+ /* IFE_LITE_0 */
+ {
+ .regulators = {},
+ .clock = { "camnoc_rt_axi", "camnoc_nrt_axi", "cpas_ahb",
+ "vfe_lite_ahb", "cpas_vfe_lite", "vfe_lite",
+ "vfe_lite_csid" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 266666667, 400000000, 480000000 },
+ { 266666667, 400000000, 480000000 }, },
+ .reg = { "vfe_lite0" },
+ .interrupt = { "vfe_lite0" },
+ .vfe = {
+ .is_lite = true,
+ .line_num = 4,
+ .hw_ops = &vfe_ops_680,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ },
+ },
+ /* IFE_LITE_1 */
+ {
+ .regulators = {},
+ .clock = { "camnoc_rt_axi", "camnoc_nrt_axi", "cpas_ahb",
+ "vfe_lite_ahb", "cpas_vfe_lite", "vfe_lite",
+ "vfe_lite_csid" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 0 },
+ { 266666667, 400000000, 480000000 },
+ { 266666667, 400000000, 480000000 }, },
+ .reg = { "vfe_lite1" },
+ .interrupt = { "vfe_lite1" },
+ .vfe = {
+ .is_lite = true,
+ .line_num = 4,
+ .hw_ops = &vfe_ops_680,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ },
+ },
+};
+
+static const struct resources_icc icc_res_x1e80100[] = {
+ {
+ .name = "ahb",
+ .icc_bw_tbl.avg = 150000,
+ .icc_bw_tbl.peak = 300000,
+ },
+ {
+ .name = "hf_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+ {
+ .name = "sf_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+ {
+ .name = "sf_icp_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+};
+
+static const struct resources_wrapper csid_wrapper_res_x1e80100 = {
+ .reg = "csid_wrapper",
+};
+
/*
* camss_add_clock_margin - Add margin to clock frequency rate
* @rate: Clock frequency rate
@@ -2663,6 +2994,15 @@ static int camss_of_parse_endpoint_node(struct device *dev,
if (ret)
return ret;
+ /*
+ * Most SoCs support both D-PHY and C-PHY standards, but currently only
+ * D-PHY is supported in the driver.
+ */
+ if (vep.bus_type != V4L2_MBUS_CSI2_DPHY) {
+ dev_err(dev, "Unsupported bus type %d\n", vep.bus_type);
+ return -EINVAL;
+ }
+
csd->interface.csiphy_id = vep.base.port;
mipi_csi2 = &vep.bus.mipi_csi2;
@@ -2749,7 +3089,8 @@ static int camss_init_subdevices(struct camss *camss)
for (i = 0; i < camss->res->csiphy_num; i++) {
ret = msm_csiphy_subdev_init(camss, &camss->csiphy[i],
- &res->csiphy_res[i], i);
+ &res->csiphy_res[i],
+ res->csiphy_res[i].csiphy.id);
if (ret < 0) {
dev_err(camss->dev,
"Failed to init csiphy%d sub-device: %d\n",
@@ -3505,6 +3846,21 @@ static const struct camss_resources sm8550_resources = {
.link_entities = camss_link_entities
};
+static const struct camss_resources x1e80100_resources = {
+ .version = CAMSS_X1E80100,
+ .pd_name = "top",
+ .csiphy_res = csiphy_res_x1e80100,
+ .csid_res = csid_res_x1e80100,
+ .vfe_res = vfe_res_x1e80100,
+ .csid_wrapper_res = &csid_wrapper_res_x1e80100,
+ .icc_res = icc_res_x1e80100,
+ .icc_path_num = ARRAY_SIZE(icc_res_x1e80100),
+ .csiphy_num = ARRAY_SIZE(csiphy_res_x1e80100),
+ .csid_num = ARRAY_SIZE(csid_res_x1e80100),
+ .vfe_num = ARRAY_SIZE(vfe_res_x1e80100),
+ .link_entities = camss_link_entities
+};
+
static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,msm8916-camss", .data = &msm8916_resources },
{ .compatible = "qcom,msm8953-camss", .data = &msm8953_resources },
@@ -3516,6 +3872,7 @@ static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,sdm845-camss", .data = &sdm845_resources },
{ .compatible = "qcom,sm8250-camss", .data = &sm8250_resources },
{ .compatible = "qcom,sm8550-camss", .data = &sm8550_resources },
+ { .compatible = "qcom,x1e80100-camss", .data = &x1e80100_resources },
{ }
};
diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h
index b284b910ce42..63c0afee154a 100644
--- a/drivers/media/platform/qcom/camss/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -86,6 +86,7 @@ enum camss_version {
CAMSS_8280XP,
CAMSS_845,
CAMSS_8550,
+ CAMSS_X1E80100,
};
enum icc_count {
diff --git a/drivers/media/platform/qcom/iris/Makefile b/drivers/media/platform/qcom/iris/Makefile
index 35390534534e..e86d00ee6f15 100644
--- a/drivers/media/platform/qcom/iris/Makefile
+++ b/drivers/media/platform/qcom/iris/Makefile
@@ -10,7 +10,7 @@ qcom-iris-objs += \
iris_hfi_gen2_packet.o \
iris_hfi_gen2_response.o \
iris_hfi_queue.o \
- iris_platform_sm8550.o \
+ iris_platform_gen2.o \
iris_power.o \
iris_probe.o \
iris_resources.o \
@@ -20,7 +20,7 @@ qcom-iris-objs += \
iris_vb2.o \
iris_vdec.o \
iris_vpu2.o \
- iris_vpu3.o \
+ iris_vpu3x.o \
iris_vpu_buffer.o \
iris_vpu_common.o \
diff --git a/drivers/media/platform/qcom/iris/iris_core.h b/drivers/media/platform/qcom/iris/iris_core.h
index 37fb4919fecc..aeeac32a1f6d 100644
--- a/drivers/media/platform/qcom/iris/iris_core.h
+++ b/drivers/media/platform/qcom/iris/iris_core.h
@@ -43,6 +43,7 @@ struct icc_info {
* @clock_tbl: table of iris clocks
* @clk_count: count of iris clocks
* @resets: table of iris reset clocks
+ * @controller_resets: table of controller reset clocks
* @iris_platform_data: a structure for platform data
* @state: current state of core
* @iface_q_table_daddr: device address for interface queue table memory
@@ -82,6 +83,7 @@ struct iris_core {
struct clk_bulk_data *clock_tbl;
u32 clk_count;
struct reset_control_bulk_data *resets;
+ struct reset_control_bulk_data *controller_resets;
const struct iris_platform_data *iris_platform_data;
enum iris_core_state state;
dma_addr_t iface_q_table_daddr;
diff --git a/drivers/media/platform/qcom/iris/iris_firmware.c b/drivers/media/platform/qcom/iris/iris_firmware.c
index 7c493b4a75db..f1b5cd56db32 100644
--- a/drivers/media/platform/qcom/iris/iris_firmware.c
+++ b/drivers/media/platform/qcom/iris/iris_firmware.c
@@ -53,8 +53,10 @@ static int iris_load_fw_to_memory(struct iris_core *core, const char *fw_name)
}
mem_virt = memremap(mem_phys, res_size, MEMREMAP_WC);
- if (!mem_virt)
+ if (!mem_virt) {
+ ret = -ENOMEM;
goto err_release_fw;
+ }
ret = qcom_mdt_load(dev, firmware, fw_name,
pas_id, mem_virt, mem_phys, res_size, NULL);
diff --git a/drivers/media/platform/qcom/iris/iris_platform_common.h b/drivers/media/platform/qcom/iris/iris_platform_common.h
index f6b15d2805fb..ac76d9e1ef9c 100644
--- a/drivers/media/platform/qcom/iris/iris_platform_common.h
+++ b/drivers/media/platform/qcom/iris/iris_platform_common.h
@@ -33,8 +33,10 @@ enum pipe_type {
PIPE_4 = 4,
};
+extern struct iris_platform_data qcs8300_data;
extern struct iris_platform_data sm8250_data;
extern struct iris_platform_data sm8550_data;
+extern struct iris_platform_data sm8650_data;
enum platform_clk_type {
IRIS_AXI_CLK,
@@ -156,6 +158,8 @@ struct iris_platform_data {
unsigned int clk_tbl_size;
const char * const *clk_rst_tbl;
unsigned int clk_rst_tbl_size;
+ const char * const *controller_rst_tbl;
+ unsigned int controller_rst_tbl_size;
u64 dma_mask;
const char *fwname;
u32 pas_id;
diff --git a/drivers/media/platform/qcom/iris/iris_platform_sm8550.c b/drivers/media/platform/qcom/iris/iris_platform_gen2.c
index 35d278996c43..1e69ba15db0f 100644
--- a/drivers/media/platform/qcom/iris/iris_platform_sm8550.c
+++ b/drivers/media/platform/qcom/iris/iris_platform_gen2.c
@@ -10,6 +10,9 @@
#include "iris_platform_common.h"
#include "iris_vpu_common.h"
+#include "iris_platform_qcs8300.h"
+#include "iris_platform_sm8650.h"
+
#define VIDEO_ARCH_LX 1
static struct platform_inst_fw_cap inst_fw_cap_sm8550[] = {
@@ -264,3 +267,119 @@ struct iris_platform_data sm8550_data = {
.dec_op_int_buf_tbl = sm8550_dec_op_int_buf_tbl,
.dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_op_int_buf_tbl),
};
+
+/*
+ * Shares most of SM8550 data except:
+ * - vpu_ops to iris_vpu33_ops
+ * - clk_rst_tbl to sm8650_clk_reset_table
+ * - controller_rst_tbl to sm8650_controller_reset_table
+ * - fwname to "qcom/vpu/vpu33_p4.mbn"
+ */
+struct iris_platform_data sm8650_data = {
+ .get_instance = iris_hfi_gen2_get_instance,
+ .init_hfi_command_ops = iris_hfi_gen2_command_ops_init,
+ .init_hfi_response_ops = iris_hfi_gen2_response_ops_init,
+ .vpu_ops = &iris_vpu33_ops,
+ .set_preset_registers = iris_set_sm8550_preset_registers,
+ .icc_tbl = sm8550_icc_table,
+ .icc_tbl_size = ARRAY_SIZE(sm8550_icc_table),
+ .clk_rst_tbl = sm8650_clk_reset_table,
+ .clk_rst_tbl_size = ARRAY_SIZE(sm8650_clk_reset_table),
+ .controller_rst_tbl = sm8650_controller_reset_table,
+ .controller_rst_tbl_size = ARRAY_SIZE(sm8650_controller_reset_table),
+ .bw_tbl_dec = sm8550_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sm8550_bw_table_dec),
+ .pmdomain_tbl = sm8550_pmdomain_table,
+ .pmdomain_tbl_size = ARRAY_SIZE(sm8550_pmdomain_table),
+ .opp_pd_tbl = sm8550_opp_pd_table,
+ .opp_pd_tbl_size = ARRAY_SIZE(sm8550_opp_pd_table),
+ .clk_tbl = sm8550_clk_table,
+ .clk_tbl_size = ARRAY_SIZE(sm8550_clk_table),
+ /* Upper bound of DMA address range */
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/vpu/vpu33_p4.mbn",
+ .pas_id = IRIS_PAS_ID,
+ .inst_caps = &platform_inst_cap_sm8550,
+ .inst_fw_caps = inst_fw_cap_sm8550,
+ .inst_fw_caps_size = ARRAY_SIZE(inst_fw_cap_sm8550),
+ .tz_cp_config_data = &tz_cp_config_sm8550,
+ .core_arch = VIDEO_ARCH_LX,
+ .hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
+ .ubwc_config = &ubwc_config_sm8550,
+ .num_vpp_pipe = 4,
+ .max_session_count = 16,
+ .max_core_mbpf = ((8192 * 4352) / 256) * 2,
+ .input_config_params =
+ sm8550_vdec_input_config_params,
+ .input_config_params_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_params),
+ .output_config_params =
+ sm8550_vdec_output_config_params,
+ .output_config_params_size =
+ ARRAY_SIZE(sm8550_vdec_output_config_params),
+ .dec_input_prop = sm8550_vdec_subscribe_input_properties,
+ .dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
+ .dec_output_prop = sm8550_vdec_subscribe_output_properties,
+ .dec_output_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_output_properties),
+
+ .dec_ip_int_buf_tbl = sm8550_dec_ip_int_buf_tbl,
+ .dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
+ .dec_op_int_buf_tbl = sm8550_dec_op_int_buf_tbl,
+ .dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_op_int_buf_tbl),
+};
+
+/*
+ * Shares most of SM8550 data except:
+ * - inst_caps to platform_inst_cap_qcs8300
+ * - inst_fw_caps to inst_fw_cap_qcs8300
+ */
+struct iris_platform_data qcs8300_data = {
+ .get_instance = iris_hfi_gen2_get_instance,
+ .init_hfi_command_ops = iris_hfi_gen2_command_ops_init,
+ .init_hfi_response_ops = iris_hfi_gen2_response_ops_init,
+ .vpu_ops = &iris_vpu3_ops,
+ .set_preset_registers = iris_set_sm8550_preset_registers,
+ .icc_tbl = sm8550_icc_table,
+ .icc_tbl_size = ARRAY_SIZE(sm8550_icc_table),
+ .clk_rst_tbl = sm8550_clk_reset_table,
+ .clk_rst_tbl_size = ARRAY_SIZE(sm8550_clk_reset_table),
+ .bw_tbl_dec = sm8550_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sm8550_bw_table_dec),
+ .pmdomain_tbl = sm8550_pmdomain_table,
+ .pmdomain_tbl_size = ARRAY_SIZE(sm8550_pmdomain_table),
+ .opp_pd_tbl = sm8550_opp_pd_table,
+ .opp_pd_tbl_size = ARRAY_SIZE(sm8550_opp_pd_table),
+ .clk_tbl = sm8550_clk_table,
+ .clk_tbl_size = ARRAY_SIZE(sm8550_clk_table),
+ /* Upper bound of DMA address range */
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/vpu/vpu30_p4_s6.mbn",
+ .pas_id = IRIS_PAS_ID,
+ .inst_caps = &platform_inst_cap_qcs8300,
+ .inst_fw_caps = inst_fw_cap_qcs8300,
+ .inst_fw_caps_size = ARRAY_SIZE(inst_fw_cap_qcs8300),
+ .tz_cp_config_data = &tz_cp_config_sm8550,
+ .core_arch = VIDEO_ARCH_LX,
+ .hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
+ .ubwc_config = &ubwc_config_sm8550,
+ .num_vpp_pipe = 2,
+ .max_session_count = 16,
+ .max_core_mbpf = ((4096 * 2176) / 256) * 4,
+ .input_config_params =
+ sm8550_vdec_input_config_params,
+ .input_config_params_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_params),
+ .output_config_params =
+ sm8550_vdec_output_config_params,
+ .output_config_params_size =
+ ARRAY_SIZE(sm8550_vdec_output_config_params),
+ .dec_input_prop = sm8550_vdec_subscribe_input_properties,
+ .dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
+ .dec_output_prop = sm8550_vdec_subscribe_output_properties,
+ .dec_output_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_output_properties),
+
+ .dec_ip_int_buf_tbl = sm8550_dec_ip_int_buf_tbl,
+ .dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
+ .dec_op_int_buf_tbl = sm8550_dec_op_int_buf_tbl,
+ .dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_op_int_buf_tbl),
+};
diff --git a/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h b/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h
new file mode 100644
index 000000000000..f82355d72fcf
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+static struct platform_inst_fw_cap inst_fw_cap_qcs8300[] = {
+ {
+ .cap_id = PROFILE,
+ .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH),
+ .value = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .hfi_id = HFI_PROP_PROFILE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
+ .cap_id = LEVEL,
+ .min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .max = V4L2_MPEG_VIDEO_H264_LEVEL_6_2,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_2),
+ .value = V4L2_MPEG_VIDEO_H264_LEVEL_6_1,
+ .hfi_id = HFI_PROP_LEVEL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
+ .cap_id = INPUT_BUF_HOST_MAX_COUNT,
+ .min = DEFAULT_MAX_HOST_BUF_COUNT,
+ .max = DEFAULT_MAX_HOST_BURST_BUF_COUNT,
+ .step_or_mask = 1,
+ .value = DEFAULT_MAX_HOST_BUF_COUNT,
+ .hfi_id = HFI_PROP_BUFFER_HOST_MAX_COUNT,
+ .flags = CAP_FLAG_INPUT_PORT,
+ .set = iris_set_u32,
+ },
+ {
+ .cap_id = STAGE,
+ .min = STAGE_1,
+ .max = STAGE_2,
+ .step_or_mask = 1,
+ .value = STAGE_2,
+ .hfi_id = HFI_PROP_STAGE,
+ .set = iris_set_stage,
+ },
+ {
+ .cap_id = PIPE,
+ .min = PIPE_1,
+ .max = PIPE_2,
+ .step_or_mask = 1,
+ .value = PIPE_2,
+ .hfi_id = HFI_PROP_PIPE,
+ .set = iris_set_pipe,
+ },
+ {
+ .cap_id = POC,
+ .min = 0,
+ .max = 2,
+ .step_or_mask = 1,
+ .value = 1,
+ .hfi_id = HFI_PROP_PIC_ORDER_CNT_TYPE,
+ },
+ {
+ .cap_id = CODED_FRAMES,
+ .min = CODED_FRAMES_PROGRESSIVE,
+ .max = CODED_FRAMES_PROGRESSIVE,
+ .step_or_mask = 0,
+ .value = CODED_FRAMES_PROGRESSIVE,
+ .hfi_id = HFI_PROP_CODED_FRAMES,
+ },
+ {
+ .cap_id = BIT_DEPTH,
+ .min = BIT_DEPTH_8,
+ .max = BIT_DEPTH_8,
+ .step_or_mask = 1,
+ .value = BIT_DEPTH_8,
+ .hfi_id = HFI_PROP_LUMA_CHROMA_BIT_DEPTH,
+ },
+ {
+ .cap_id = RAP_FRAME,
+ .min = 0,
+ .max = 1,
+ .step_or_mask = 1,
+ .value = 1,
+ .hfi_id = HFI_PROP_DEC_START_FROM_RAP_FRAME,
+ .flags = CAP_FLAG_INPUT_PORT,
+ .set = iris_set_u32,
+ },
+};
+
+static struct platform_inst_caps platform_inst_cap_qcs8300 = {
+ .min_frame_width = 96,
+ .max_frame_width = 4096,
+ .min_frame_height = 96,
+ .max_frame_height = 4096,
+ .max_mbpf = (4096 * 2176) / 256,
+ .mb_cycles_vpp = 200,
+ .mb_cycles_fw = 326389,
+ .mb_cycles_fw_vpp = 44156,
+ .num_comv = 0,
+};
diff --git a/drivers/media/platform/qcom/iris/iris_platform_sm8650.h b/drivers/media/platform/qcom/iris/iris_platform_sm8650.h
new file mode 100644
index 000000000000..75e9d572e788
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_platform_sm8650.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_PLATFORM_SM8650_H__
+#define __IRIS_PLATFORM_SM8650_H__
+
+static const char * const sm8650_clk_reset_table[] = { "bus", "core" };
+
+static const char * const sm8650_controller_reset_table[] = { "xo" };
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_probe.c b/drivers/media/platform/qcom/iris/iris_probe.c
index aca442dcc153..9a7ce142f700 100644
--- a/drivers/media/platform/qcom/iris/iris_probe.c
+++ b/drivers/media/platform/qcom/iris/iris_probe.c
@@ -91,25 +91,40 @@ static int iris_init_clocks(struct iris_core *core)
return 0;
}
-static int iris_init_resets(struct iris_core *core)
+static int iris_init_reset_table(struct iris_core *core,
+ struct reset_control_bulk_data **resets,
+ const char * const *rst_tbl, u32 rst_tbl_size)
{
- const char * const *rst_tbl;
- u32 rst_tbl_size;
u32 i = 0;
- rst_tbl = core->iris_platform_data->clk_rst_tbl;
- rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size;
-
- core->resets = devm_kzalloc(core->dev,
- sizeof(*core->resets) * rst_tbl_size,
- GFP_KERNEL);
- if (!core->resets)
+ *resets = devm_kzalloc(core->dev,
+ sizeof(struct reset_control_bulk_data) * rst_tbl_size,
+ GFP_KERNEL);
+ if (!*resets)
return -ENOMEM;
for (i = 0; i < rst_tbl_size; i++)
- core->resets[i].id = rst_tbl[i];
+ (*resets)[i].id = rst_tbl[i];
+
+ return devm_reset_control_bulk_get_exclusive(core->dev, rst_tbl_size, *resets);
+}
+
+static int iris_init_resets(struct iris_core *core)
+{
+ int ret;
+
+ ret = iris_init_reset_table(core, &core->resets,
+ core->iris_platform_data->clk_rst_tbl,
+ core->iris_platform_data->clk_rst_tbl_size);
+ if (ret)
+ return ret;
- return devm_reset_control_bulk_get_exclusive(core->dev, rst_tbl_size, core->resets);
+ if (!core->iris_platform_data->controller_rst_tbl_size)
+ return 0;
+
+ return iris_init_reset_table(core, &core->controller_resets,
+ core->iris_platform_data->controller_rst_tbl,
+ core->iris_platform_data->controller_rst_tbl_size);
}
static int iris_init_resources(struct iris_core *core)
@@ -321,15 +336,23 @@ static const struct dev_pm_ops iris_pm_ops = {
static const struct of_device_id iris_dt_match[] = {
{
- .compatible = "qcom,sm8550-iris",
- .data = &sm8550_data,
+ .compatible = "qcom,qcs8300-iris",
+ .data = &qcs8300_data,
},
#if (!IS_ENABLED(CONFIG_VIDEO_QCOM_VENUS))
- {
- .compatible = "qcom,sm8250-venus",
- .data = &sm8250_data,
- },
+ {
+ .compatible = "qcom,sm8250-venus",
+ .data = &sm8250_data,
+ },
#endif
+ {
+ .compatible = "qcom,sm8550-iris",
+ .data = &sm8550_data,
+ },
+ {
+ .compatible = "qcom,sm8650-iris",
+ .data = &sm8650_data,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, iris_dt_match);
diff --git a/drivers/media/platform/qcom/iris/iris_vpu2.c b/drivers/media/platform/qcom/iris/iris_vpu2.c
index 8f502aed43ce..7cf1bfc352d3 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu2.c
+++ b/drivers/media/platform/qcom/iris/iris_vpu2.c
@@ -34,5 +34,6 @@ static u64 iris_vpu2_calc_freq(struct iris_inst *inst, size_t data_size)
const struct vpu_ops iris_vpu2_ops = {
.power_off_hw = iris_vpu_power_off_hw,
+ .power_off_controller = iris_vpu_power_off_controller,
.calc_freq = iris_vpu2_calc_freq,
};
diff --git a/drivers/media/platform/qcom/iris/iris_vpu3.c b/drivers/media/platform/qcom/iris/iris_vpu3.c
deleted file mode 100644
index b484638e6105..000000000000
--- a/drivers/media/platform/qcom/iris/iris_vpu3.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
- */
-
-#include <linux/iopoll.h>
-
-#include "iris_instance.h"
-#include "iris_vpu_common.h"
-#include "iris_vpu_register_defines.h"
-
-#define AON_MVP_NOC_RESET 0x0001F000
-
-#define WRAPPER_CORE_CLOCK_CONFIG (WRAPPER_BASE_OFFS + 0x88)
-#define CORE_CLK_RUN 0x0
-
-#define CPU_CS_AHB_BRIDGE_SYNC_RESET (CPU_CS_BASE_OFFS + 0x160)
-#define CORE_BRIDGE_SW_RESET BIT(0)
-#define CORE_BRIDGE_HW_RESET_DISABLE BIT(1)
-
-#define AON_WRAPPER_MVP_NOC_RESET_REQ (AON_MVP_NOC_RESET + 0x000)
-#define VIDEO_NOC_RESET_REQ (BIT(0) | BIT(1))
-
-#define AON_WRAPPER_MVP_NOC_RESET_ACK (AON_MVP_NOC_RESET + 0x004)
-
-#define VCODEC_SS_IDLE_STATUSN (VCODEC_BASE_OFFS + 0x70)
-
-static bool iris_vpu3_hw_power_collapsed(struct iris_core *core)
-{
- u32 value, pwr_status;
-
- value = readl(core->reg_base + WRAPPER_CORE_POWER_STATUS);
- pwr_status = value & BIT(1);
-
- return pwr_status ? false : true;
-}
-
-static void iris_vpu3_power_off_hardware(struct iris_core *core)
-{
- u32 reg_val = 0, value, i;
- int ret;
-
- if (iris_vpu3_hw_power_collapsed(core))
- goto disable_power;
-
- dev_err(core->dev, "video hw is power on\n");
-
- value = readl(core->reg_base + WRAPPER_CORE_CLOCK_CONFIG);
- if (value)
- writel(CORE_CLK_RUN, core->reg_base + WRAPPER_CORE_CLOCK_CONFIG);
-
- for (i = 0; i < core->iris_platform_data->num_vpp_pipe; i++) {
- ret = readl_poll_timeout(core->reg_base + VCODEC_SS_IDLE_STATUSN + 4 * i,
- reg_val, reg_val & 0x400000, 2000, 20000);
- if (ret)
- goto disable_power;
- }
-
- writel(VIDEO_NOC_RESET_REQ, core->reg_base + AON_WRAPPER_MVP_NOC_RESET_REQ);
-
- ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_RESET_ACK,
- reg_val, reg_val & 0x3, 200, 2000);
- if (ret)
- goto disable_power;
-
- writel(0x0, core->reg_base + AON_WRAPPER_MVP_NOC_RESET_REQ);
-
- ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_RESET_ACK,
- reg_val, !(reg_val & 0x3), 200, 2000);
- if (ret)
- goto disable_power;
-
- writel(CORE_BRIDGE_SW_RESET | CORE_BRIDGE_HW_RESET_DISABLE,
- core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
- writel(CORE_BRIDGE_HW_RESET_DISABLE, core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
- writel(0x0, core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
-
-disable_power:
- iris_vpu_power_off_hw(core);
-}
-
-static u64 iris_vpu3_calculate_frequency(struct iris_inst *inst, size_t data_size)
-{
- struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
- struct v4l2_format *inp_f = inst->fmt_src;
- u32 height, width, mbs_per_second, mbpf;
- u64 fw_cycles, fw_vpp_cycles;
- u64 vsp_cycles, vpp_cycles;
- u32 fps = DEFAULT_FPS;
-
- width = max(inp_f->fmt.pix_mp.width, inst->crop.width);
- height = max(inp_f->fmt.pix_mp.height, inst->crop.height);
-
- mbpf = NUM_MBS_PER_FRAME(height, width);
- mbs_per_second = mbpf * fps;
-
- fw_cycles = fps * caps->mb_cycles_fw;
- fw_vpp_cycles = fps * caps->mb_cycles_fw_vpp;
-
- vpp_cycles = mult_frac(mbs_per_second, caps->mb_cycles_vpp, (u32)inst->fw_caps[PIPE].value);
- /* 21 / 20 is minimum overhead factor */
- vpp_cycles += max(div_u64(vpp_cycles, 20), fw_vpp_cycles);
-
- /* 1.059 is multi-pipe overhead */
- if (inst->fw_caps[PIPE].value > 1)
- vpp_cycles += div_u64(vpp_cycles * 59, 1000);
-
- vsp_cycles = fps * data_size * 8;
- vsp_cycles = div_u64(vsp_cycles, 2);
- /* VSP FW overhead 1.05 */
- vsp_cycles = div_u64(vsp_cycles * 21, 20);
-
- if (inst->fw_caps[STAGE].value == STAGE_1)
- vsp_cycles = vsp_cycles * 3;
-
- return max3(vpp_cycles, vsp_cycles, fw_cycles);
-}
-
-const struct vpu_ops iris_vpu3_ops = {
- .power_off_hw = iris_vpu3_power_off_hardware,
- .calc_freq = iris_vpu3_calculate_frequency,
-};
diff --git a/drivers/media/platform/qcom/iris/iris_vpu3x.c b/drivers/media/platform/qcom/iris/iris_vpu3x.c
new file mode 100644
index 000000000000..9b7c9a1495ee
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vpu3x.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+
+#include "iris_instance.h"
+#include "iris_vpu_common.h"
+#include "iris_vpu_register_defines.h"
+
+#define WRAPPER_TZ_BASE_OFFS 0x000C0000
+#define AON_BASE_OFFS 0x000E0000
+#define AON_MVP_NOC_RESET 0x0001F000
+
+#define WRAPPER_DEBUG_BRIDGE_LPI_CONTROL (WRAPPER_BASE_OFFS + 0x54)
+#define WRAPPER_DEBUG_BRIDGE_LPI_STATUS (WRAPPER_BASE_OFFS + 0x58)
+#define WRAPPER_IRIS_CPU_NOC_LPI_CONTROL (WRAPPER_BASE_OFFS + 0x5C)
+#define REQ_POWER_DOWN_PREP BIT(0)
+#define WRAPPER_IRIS_CPU_NOC_LPI_STATUS (WRAPPER_BASE_OFFS + 0x60)
+#define WRAPPER_CORE_CLOCK_CONFIG (WRAPPER_BASE_OFFS + 0x88)
+#define CORE_CLK_RUN 0x0
+
+#define WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG (WRAPPER_TZ_BASE_OFFS + 0x14)
+#define CTL_AXI_CLK_HALT BIT(0)
+#define CTL_CLK_HALT BIT(1)
+
+#define WRAPPER_TZ_QNS4PDXFIFO_RESET (WRAPPER_TZ_BASE_OFFS + 0x18)
+#define RESET_HIGH BIT(0)
+
+#define CPU_CS_AHB_BRIDGE_SYNC_RESET (CPU_CS_BASE_OFFS + 0x160)
+#define CORE_BRIDGE_SW_RESET BIT(0)
+#define CORE_BRIDGE_HW_RESET_DISABLE BIT(1)
+
+#define CPU_CS_X2RPMH (CPU_CS_BASE_OFFS + 0x168)
+#define MSK_SIGNAL_FROM_TENSILICA BIT(0)
+#define MSK_CORE_POWER_ON BIT(1)
+
+#define AON_WRAPPER_MVP_NOC_RESET_REQ (AON_MVP_NOC_RESET + 0x000)
+#define VIDEO_NOC_RESET_REQ (BIT(0) | BIT(1))
+
+#define AON_WRAPPER_MVP_NOC_RESET_ACK (AON_MVP_NOC_RESET + 0x004)
+
+#define VCODEC_SS_IDLE_STATUSN (VCODEC_BASE_OFFS + 0x70)
+
+#define AON_WRAPPER_MVP_NOC_LPI_CONTROL (AON_BASE_OFFS)
+#define AON_WRAPPER_MVP_NOC_LPI_STATUS (AON_BASE_OFFS + 0x4)
+
+#define AON_WRAPPER_MVP_NOC_CORE_SW_RESET (AON_BASE_OFFS + 0x18)
+#define SW_RESET BIT(0)
+#define AON_WRAPPER_MVP_NOC_CORE_CLK_CONTROL (AON_BASE_OFFS + 0x20)
+#define NOC_HALT BIT(0)
+#define AON_WRAPPER_SPARE (AON_BASE_OFFS + 0x28)
+
+static bool iris_vpu3x_hw_power_collapsed(struct iris_core *core)
+{
+ u32 value, pwr_status;
+
+ value = readl(core->reg_base + WRAPPER_CORE_POWER_STATUS);
+ pwr_status = value & BIT(1);
+
+ return pwr_status ? false : true;
+}
+
+static void iris_vpu3_power_off_hardware(struct iris_core *core)
+{
+ u32 reg_val = 0, value, i;
+ int ret;
+
+ if (iris_vpu3x_hw_power_collapsed(core))
+ goto disable_power;
+
+ dev_err(core->dev, "video hw is power on\n");
+
+ value = readl(core->reg_base + WRAPPER_CORE_CLOCK_CONFIG);
+ if (value)
+ writel(CORE_CLK_RUN, core->reg_base + WRAPPER_CORE_CLOCK_CONFIG);
+
+ for (i = 0; i < core->iris_platform_data->num_vpp_pipe; i++) {
+ ret = readl_poll_timeout(core->reg_base + VCODEC_SS_IDLE_STATUSN + 4 * i,
+ reg_val, reg_val & 0x400000, 2000, 20000);
+ if (ret)
+ goto disable_power;
+ }
+
+ writel(VIDEO_NOC_RESET_REQ, core->reg_base + AON_WRAPPER_MVP_NOC_RESET_REQ);
+
+ ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_RESET_ACK,
+ reg_val, reg_val & 0x3, 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(0x0, core->reg_base + AON_WRAPPER_MVP_NOC_RESET_REQ);
+
+ ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_RESET_ACK,
+ reg_val, !(reg_val & 0x3), 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(CORE_BRIDGE_SW_RESET | CORE_BRIDGE_HW_RESET_DISABLE,
+ core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+ writel(CORE_BRIDGE_HW_RESET_DISABLE, core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+ writel(0x0, core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+
+disable_power:
+ iris_vpu_power_off_hw(core);
+}
+
+static void iris_vpu33_power_off_hardware(struct iris_core *core)
+{
+ u32 reg_val = 0, value, i;
+ int ret;
+
+ if (iris_vpu3x_hw_power_collapsed(core))
+ goto disable_power;
+
+ dev_err(core->dev, "video hw is power on\n");
+
+ value = readl(core->reg_base + WRAPPER_CORE_CLOCK_CONFIG);
+ if (value)
+ writel(CORE_CLK_RUN, core->reg_base + WRAPPER_CORE_CLOCK_CONFIG);
+
+ for (i = 0; i < core->iris_platform_data->num_vpp_pipe; i++) {
+ ret = readl_poll_timeout(core->reg_base + VCODEC_SS_IDLE_STATUSN + 4 * i,
+ reg_val, reg_val & 0x400000, 2000, 20000);
+ if (ret)
+ goto disable_power;
+ }
+
+ ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
+ reg_val, reg_val & BIT(0), 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ /* set MNoC to low power, set PD_NOC_QREQ (bit 0) */
+ writel(BIT(0), core->reg_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
+
+ writel(CORE_BRIDGE_SW_RESET | CORE_BRIDGE_HW_RESET_DISABLE,
+ core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+ writel(CORE_BRIDGE_HW_RESET_DISABLE, core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+ writel(0x0, core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+
+disable_power:
+ iris_vpu_power_off_hw(core);
+}
+
+static int iris_vpu33_power_off_controller(struct iris_core *core)
+{
+ u32 xo_rst_tbl_size = core->iris_platform_data->controller_rst_tbl_size;
+ u32 clk_rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size;
+ u32 val = 0;
+ int ret;
+
+ writel(MSK_SIGNAL_FROM_TENSILICA | MSK_CORE_POWER_ON, core->reg_base + CPU_CS_X2RPMH);
+
+ writel(REQ_POWER_DOWN_PREP, core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_CONTROL);
+
+ ret = readl_poll_timeout(core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_STATUS,
+ val, val & BIT(0), 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(0x0, core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL);
+
+ ret = readl_poll_timeout(core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS,
+ val, val == 0, 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(CTL_AXI_CLK_HALT | CTL_CLK_HALT,
+ core->reg_base + WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG);
+ writel(RESET_HIGH, core->reg_base + WRAPPER_TZ_QNS4PDXFIFO_RESET);
+ writel(0x0, core->reg_base + WRAPPER_TZ_QNS4PDXFIFO_RESET);
+ writel(0x0, core->reg_base + WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG);
+
+ reset_control_bulk_reset(clk_rst_tbl_size, core->resets);
+
+ /* Disable MVP NoC clock */
+ val = readl(core->reg_base + AON_WRAPPER_MVP_NOC_CORE_CLK_CONTROL);
+ val |= NOC_HALT;
+ writel(val, core->reg_base + AON_WRAPPER_MVP_NOC_CORE_CLK_CONTROL);
+
+ /* enable MVP NoC reset */
+ val = readl(core->reg_base + AON_WRAPPER_MVP_NOC_CORE_SW_RESET);
+ val |= SW_RESET;
+ writel(val, core->reg_base + AON_WRAPPER_MVP_NOC_CORE_SW_RESET);
+
+ /* poll AON spare register bit0 to become zero with 50ms timeout */
+ ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_SPARE,
+ val, (val & BIT(0)) == 0, 1000, 50000);
+ if (ret)
+ goto disable_power;
+
+ /* enable bit(1) to avoid cvp noc xo reset */
+ val = readl(core->reg_base + AON_WRAPPER_SPARE);
+ val |= BIT(1);
+ writel(val, core->reg_base + AON_WRAPPER_SPARE);
+
+ reset_control_bulk_assert(xo_rst_tbl_size, core->controller_resets);
+
+ /* De-assert MVP NoC reset */
+ val = readl(core->reg_base + AON_WRAPPER_MVP_NOC_CORE_SW_RESET);
+ val &= ~SW_RESET;
+ writel(val, core->reg_base + AON_WRAPPER_MVP_NOC_CORE_SW_RESET);
+
+ usleep_range(80, 100);
+
+ reset_control_bulk_deassert(xo_rst_tbl_size, core->controller_resets);
+
+ /* reset AON spare register */
+ writel(0, core->reg_base + AON_WRAPPER_SPARE);
+
+ /* Enable MVP NoC clock */
+ val = readl(core->reg_base + AON_WRAPPER_MVP_NOC_CORE_CLK_CONTROL);
+ val &= ~NOC_HALT;
+ writel(val, core->reg_base + AON_WRAPPER_MVP_NOC_CORE_CLK_CONTROL);
+
+ iris_disable_unprepare_clock(core, IRIS_CTRL_CLK);
+
+disable_power:
+ iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
+ iris_disable_unprepare_clock(core, IRIS_AXI_CLK);
+
+ return 0;
+}
+
+static u64 iris_vpu3x_calculate_frequency(struct iris_inst *inst, size_t data_size)
+{
+ struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
+ struct v4l2_format *inp_f = inst->fmt_src;
+ u32 height, width, mbs_per_second, mbpf;
+ u64 fw_cycles, fw_vpp_cycles;
+ u64 vsp_cycles, vpp_cycles;
+ u32 fps = DEFAULT_FPS;
+
+ width = max(inp_f->fmt.pix_mp.width, inst->crop.width);
+ height = max(inp_f->fmt.pix_mp.height, inst->crop.height);
+
+ mbpf = NUM_MBS_PER_FRAME(height, width);
+ mbs_per_second = mbpf * fps;
+
+ fw_cycles = fps * caps->mb_cycles_fw;
+ fw_vpp_cycles = fps * caps->mb_cycles_fw_vpp;
+
+ vpp_cycles = mult_frac(mbs_per_second, caps->mb_cycles_vpp, (u32)inst->fw_caps[PIPE].value);
+ /* 21 / 20 is minimum overhead factor */
+ vpp_cycles += max(div_u64(vpp_cycles, 20), fw_vpp_cycles);
+
+ /* 1.059 is multi-pipe overhead */
+ if (inst->fw_caps[PIPE].value > 1)
+ vpp_cycles += div_u64(vpp_cycles * 59, 1000);
+
+ vsp_cycles = fps * data_size * 8;
+ vsp_cycles = div_u64(vsp_cycles, 2);
+ /* VSP FW overhead 1.05 */
+ vsp_cycles = div_u64(vsp_cycles * 21, 20);
+
+ if (inst->fw_caps[STAGE].value == STAGE_1)
+ vsp_cycles = vsp_cycles * 3;
+
+ return max3(vpp_cycles, vsp_cycles, fw_cycles);
+}
+
+const struct vpu_ops iris_vpu3_ops = {
+ .power_off_hw = iris_vpu3_power_off_hardware,
+ .power_off_controller = iris_vpu_power_off_controller,
+ .calc_freq = iris_vpu3x_calculate_frequency,
+};
+
+const struct vpu_ops iris_vpu33_ops = {
+ .power_off_hw = iris_vpu33_power_off_hardware,
+ .power_off_controller = iris_vpu33_power_off_controller,
+ .calc_freq = iris_vpu3x_calculate_frequency,
+};
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_common.c b/drivers/media/platform/qcom/iris/iris_vpu_common.c
index fe9896d66848..268e45acaa7c 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu_common.c
+++ b/drivers/media/platform/qcom/iris/iris_vpu_common.c
@@ -211,7 +211,7 @@ skip_power_off:
return -EAGAIN;
}
-static int iris_vpu_power_off_controller(struct iris_core *core)
+int iris_vpu_power_off_controller(struct iris_core *core)
{
u32 val = 0;
int ret;
@@ -264,7 +264,7 @@ void iris_vpu_power_off(struct iris_core *core)
{
dev_pm_opp_set_rate(core->dev, 0);
core->iris_platform_data->vpu_ops->power_off_hw(core);
- iris_vpu_power_off_controller(core);
+ core->iris_platform_data->vpu_ops->power_off_controller(core);
iris_unset_icc_bw(core);
if (!iris_vpu_watchdog(core, core->intr_status))
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_common.h b/drivers/media/platform/qcom/iris/iris_vpu_common.h
index 63fa1fa5a498..93b7fa27be3b 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu_common.h
+++ b/drivers/media/platform/qcom/iris/iris_vpu_common.h
@@ -10,9 +10,11 @@ struct iris_core;
extern const struct vpu_ops iris_vpu2_ops;
extern const struct vpu_ops iris_vpu3_ops;
+extern const struct vpu_ops iris_vpu33_ops;
struct vpu_ops {
void (*power_off_hw)(struct iris_core *core);
+ int (*power_off_controller)(struct iris_core *core);
u64 (*calc_freq)(struct iris_inst *inst, size_t data_size);
};
@@ -22,6 +24,7 @@ void iris_vpu_clear_interrupt(struct iris_core *core);
int iris_vpu_watchdog(struct iris_core *core, u32 intr_status);
int iris_vpu_prepare_pc(struct iris_core *core);
int iris_vpu_power_on(struct iris_core *core);
+int iris_vpu_power_off_controller(struct iris_core *core);
void iris_vpu_power_off_hw(struct iris_core *core);
void iris_vpu_power_off(struct iris_core *core);
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 77d48578ecd2..d305d74bb152 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -438,7 +438,7 @@ static int venus_probe(struct platform_device *pdev)
ret = v4l2_device_register(dev, &core->v4l2_dev);
if (ret)
- goto err_core_deinit;
+ goto err_hfi_destroy;
platform_set_drvdata(pdev, core);
@@ -476,24 +476,24 @@ static int venus_probe(struct platform_device *pdev)
ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_DEC);
if (ret)
- goto err_venus_shutdown;
+ goto err_core_deinit;
ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_ENC);
if (ret)
- goto err_venus_shutdown;
+ goto err_core_deinit;
ret = pm_runtime_put_sync(dev);
if (ret) {
pm_runtime_get_noresume(dev);
- goto err_dev_unregister;
+ goto err_core_deinit;
}
venus_dbgfs_init(core);
return 0;
-err_dev_unregister:
- v4l2_device_unregister(&core->v4l2_dev);
+err_core_deinit:
+ hfi_core_deinit(core, false);
err_venus_shutdown:
venus_shutdown(core);
err_firmware_deinit:
@@ -506,9 +506,9 @@ err_runtime_disable:
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
+ v4l2_device_unregister(&core->v4l2_dev);
+err_hfi_destroy:
hfi_destroy(core);
-err_core_deinit:
- hfi_core_deinit(core, false);
err_core_put:
if (core->pm_ops->core_put)
core->pm_ops->core_put(core);
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index abeeafa86697..b412e0c5515a 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -172,6 +172,7 @@ struct venus_format {
* @venus_ver: the venus firmware version
* @dump_core: a flag indicating that a core dump is required
* @ocs: OF changeset pointer
+ * @hwmode_dev: a flag indicating that HW_CTRL_TRIGGER is used in clock driver
*/
struct venus_core {
void __iomem *base;
@@ -235,6 +236,7 @@ struct venus_core {
} venus_ver;
unsigned long dump_core;
struct of_changeset *ocs;
+ bool hwmode_dev;
};
struct vdec_controls {
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index 33a5a659c0ad..409aa9bd0b5d 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -412,9 +412,17 @@ static int vcodec_control_v4(struct venus_core *core, u32 coreid, bool enable)
u32 val;
int ret;
- if (IS_V6(core))
- return dev_pm_genpd_set_hwmode(core->pmdomains->pd_devs[coreid], !enable);
- else if (coreid == VIDC_CORE_ID_1) {
+ ret = dev_pm_genpd_set_hwmode(core->pmdomains->pd_devs[coreid], !enable);
+ if (ret == -EOPNOTSUPP) {
+ core->hwmode_dev = false;
+ goto legacy;
+ }
+
+ core->hwmode_dev = true;
+ return ret;
+
+legacy:
+ if (coreid == VIDC_CORE_ID_1) {
ctrl = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL;
stat = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_STATUS;
} else {
@@ -450,7 +458,7 @@ static int poweroff_coreid(struct venus_core *core, unsigned int coreid_mask)
vcodec_clks_disable(core, core->vcodec0_clks);
- if (!IS_V6(core)) {
+ if (!core->hwmode_dev) {
ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false);
if (ret)
return ret;
@@ -468,7 +476,7 @@ static int poweroff_coreid(struct venus_core *core, unsigned int coreid_mask)
vcodec_clks_disable(core, core->vcodec1_clks);
- if (!IS_V6(core)) {
+ if (!core->hwmode_dev) {
ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false);
if (ret)
return ret;
@@ -491,11 +499,9 @@ static int poweron_coreid(struct venus_core *core, unsigned int coreid_mask)
if (ret < 0)
return ret;
- if (!IS_V6(core)) {
- ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true);
- if (ret)
- return ret;
- }
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true);
+ if (ret)
+ return ret;
ret = vcodec_clks_enable(core, core->vcodec0_clks);
if (ret)
@@ -511,11 +517,9 @@ static int poweron_coreid(struct venus_core *core, unsigned int coreid_mask)
if (ret < 0)
return ret;
- if (!IS_V6(core)) {
- ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true);
- if (ret)
- return ret;
- }
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true);
+ if (ret)
+ return ret;
ret = vcodec_clks_enable(core, core->vcodec1_clks);
if (ret)
@@ -811,7 +815,7 @@ static int vdec_power_v4(struct device *dev, int on)
else
vcodec_clks_disable(core, core->vcodec0_clks);
- vcodec_control_v4(core, VIDC_CORE_ID_1, false);
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false);
return ret;
}
@@ -856,7 +860,7 @@ static int venc_power_v4(struct device *dev, int on)
else
vcodec_clks_disable(core, core->vcodec1_clks);
- vcodec_control_v4(core, VIDC_CORE_ID_2, false);
+ ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false);
return ret;
}
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 9f82882b77bc..99ce5fd41577 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -154,14 +154,14 @@ find_format_by_index(struct venus_inst *inst, unsigned int index, u32 type)
return NULL;
for (i = 0; i < size; i++) {
- bool valid;
+ bool valid = false;
if (fmt[i].type != type)
continue;
if (V4L2_TYPE_IS_OUTPUT(type)) {
valid = venus_helper_check_codec(inst, fmt[i].pixfmt);
- } else if (V4L2_TYPE_IS_CAPTURE(type)) {
+ } else {
valid = venus_helper_check_format(inst, fmt[i].pixfmt);
if (fmt[i].pixfmt == V4L2_PIX_FMT_QC10C &&
@@ -1110,10 +1110,20 @@ static int vdec_start_output(struct venus_inst *inst)
if (inst->codec_state == VENUS_DEC_STATE_SEEK) {
ret = venus_helper_process_initial_out_bufs(inst);
- if (inst->next_buf_last)
+ if (ret)
+ return ret;
+
+ if (inst->next_buf_last) {
inst->codec_state = VENUS_DEC_STATE_DRC;
- else
+ } else {
inst->codec_state = VENUS_DEC_STATE_DECODING;
+
+ if (inst->streamon_cap) {
+ ret = venus_helper_queue_dpb_bufs(inst);
+ if (ret)
+ return ret;
+ }
+ }
goto done;
}
diff --git a/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c b/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
index 69a5f23e7954..fcadb2143c88 100644
--- a/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
+++ b/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
@@ -12,7 +12,6 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/fwnode.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/media/platform/renesas/Kconfig b/drivers/media/platform/renesas/Kconfig
index c7fc718a30a5..27a54fa79083 100644
--- a/drivers/media/platform/renesas/Kconfig
+++ b/drivers/media/platform/renesas/Kconfig
@@ -30,23 +30,6 @@ config VIDEO_RCAR_CSI2
To compile this driver as a module, choose M here: the
module will be called rcar-csi2.
-config VIDEO_RCAR_ISP
- tristate "R-Car Image Signal Processor (ISP)"
- depends on V4L_PLATFORM_DRIVERS
- depends on VIDEO_DEV && OF
- depends on ARCH_RENESAS || COMPILE_TEST
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select RESET_CONTROLLER
- select V4L2_FWNODE
- help
- Support for Renesas R-Car Image Signal Processor (ISP).
- Enable this to support the Renesas R-Car Image Signal
- Processor (ISP).
-
- To compile this driver as a module, choose M here: the
- module will be called rcar-isp.
-
config VIDEO_SH_VOU
tristate "SuperH VOU video output driver"
depends on V4L_PLATFORM_DRIVERS
@@ -56,6 +39,7 @@ config VIDEO_SH_VOU
help
Support for the Video Output Unit (VOU) on SuperH SoCs.
+source "drivers/media/platform/renesas/rcar-isp/Kconfig"
source "drivers/media/platform/renesas/rcar-vin/Kconfig"
source "drivers/media/platform/renesas/rzg2l-cru/Kconfig"
diff --git a/drivers/media/platform/renesas/Makefile b/drivers/media/platform/renesas/Makefile
index 50774a20330c..1127259c09d6 100644
--- a/drivers/media/platform/renesas/Makefile
+++ b/drivers/media/platform/renesas/Makefile
@@ -3,13 +3,13 @@
# Makefile for the Renesas capture/playback device drivers.
#
+obj-y += rcar-isp/
obj-y += rcar-vin/
obj-y += rzg2l-cru/
obj-y += vsp1/
obj-$(CONFIG_VIDEO_RCAR_CSI2) += rcar-csi2.o
obj-$(CONFIG_VIDEO_RCAR_DRIF) += rcar_drif.o
-obj-$(CONFIG_VIDEO_RCAR_ISP) += rcar-isp.o
obj-$(CONFIG_VIDEO_RENESAS_CEU) += renesas-ceu.o
obj-$(CONFIG_VIDEO_RENESAS_FCP) += rcar-fcp.o
obj-$(CONFIG_VIDEO_RENESAS_FDP1) += rcar_fdp1.o
diff --git a/drivers/media/platform/renesas/rcar-csi2.c b/drivers/media/platform/renesas/rcar-csi2.c
index 38a3149f9724..9979de4f6ef1 100644
--- a/drivers/media/platform/renesas/rcar-csi2.c
+++ b/drivers/media/platform/renesas/rcar-csi2.c
@@ -1075,16 +1075,10 @@ static int rcsi2_start_receiver_gen3(struct rcar_csi2 *priv,
vcdt2 |= vcdt_part << ((i % 2) * 16);
}
- if (fmt->field == V4L2_FIELD_ALTERNATE) {
+ if (fmt->field == V4L2_FIELD_ALTERNATE)
fld = FLD_DET_SEL(1) | FLD_FLD_EN4 | FLD_FLD_EN3 | FLD_FLD_EN2
| FLD_FLD_EN;
- if (fmt->height == 240)
- fld |= FLD_FLD_NUM(0);
- else
- fld |= FLD_FLD_NUM(1);
- }
-
/*
* Get the number of active data lanes inspecting the remote mbus
* configuration.
diff --git a/drivers/media/platform/renesas/rcar-isp/Kconfig b/drivers/media/platform/renesas/rcar-isp/Kconfig
new file mode 100644
index 000000000000..242f6a23851f
--- /dev/null
+++ b/drivers/media/platform/renesas/rcar-isp/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config VIDEO_RCAR_ISP
+ tristate "R-Car Image Signal Processor (ISP)"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select RESET_CONTROLLER
+ select V4L2_FWNODE
+ help
+ Support for Renesas R-Car Image Signal Processor (ISP).
+ Enable this to support the Renesas R-Car Image Signal
+ Processor (ISP).
+
+ To compile this driver as a module, choose M here: the
+ module will be called rcar-isp.
diff --git a/drivers/media/platform/renesas/rcar-isp/Makefile b/drivers/media/platform/renesas/rcar-isp/Makefile
new file mode 100644
index 000000000000..b542118c831e
--- /dev/null
+++ b/drivers/media/platform/renesas/rcar-isp/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+rcar-isp-objs = csisp.o
+
+obj-$(CONFIG_VIDEO_RCAR_ISP) += rcar-isp.o
diff --git a/drivers/media/platform/renesas/rcar-isp.c b/drivers/media/platform/renesas/rcar-isp/csisp.c
index 4bc89d4757fa..1eb29a0b774a 100644
--- a/drivers/media/platform/renesas/rcar-isp.c
+++ b/drivers/media/platform/renesas/rcar-isp/csisp.c
@@ -159,7 +159,7 @@ enum rcar_isp_pads {
struct rcar_isp {
struct device *dev;
- void __iomem *base;
+ void __iomem *csbase;
struct reset_control *rstc;
enum rcar_isp_input csi_input;
@@ -184,14 +184,14 @@ static inline struct rcar_isp *notifier_to_isp(struct v4l2_async_notifier *n)
return container_of(n, struct rcar_isp, notifier);
}
-static void risp_write(struct rcar_isp *isp, u32 offset, u32 value)
+static void risp_write_cs(struct rcar_isp *isp, u32 offset, u32 value)
{
- iowrite32(value, isp->base + offset);
+ iowrite32(value, isp->csbase + offset);
}
-static u32 risp_read(struct rcar_isp *isp, u32 offset)
+static u32 risp_read_cs(struct rcar_isp *isp, u32 offset)
{
- return ioread32(isp->base + offset);
+ return ioread32(isp->csbase + offset);
}
static int risp_power_on(struct rcar_isp *isp)
@@ -245,31 +245,31 @@ static int risp_start(struct rcar_isp *isp, struct v4l2_subdev_state *state)
if (isp->csi_input == RISP_CSI_INPUT1)
sel_csi = ISPINPUTSEL0_SEL_CSI0;
- risp_write(isp, ISPINPUTSEL0_REG,
- risp_read(isp, ISPINPUTSEL0_REG) | sel_csi);
+ risp_write_cs(isp, ISPINPUTSEL0_REG,
+ risp_read_cs(isp, ISPINPUTSEL0_REG) | sel_csi);
/* Configure Channel Selector. */
for (vc = 0; vc < 4; vc++) {
u8 ch = vc + 4;
u8 dt = format->datatype;
- risp_write(isp, ISPCS_FILTER_ID_CH_REG(ch), BIT(vc));
- risp_write(isp, ISPCS_DT_CODE03_CH_REG(ch),
- ISPCS_DT_CODE03_EN3 | ISPCS_DT_CODE03_DT3(dt) |
- ISPCS_DT_CODE03_EN2 | ISPCS_DT_CODE03_DT2(dt) |
- ISPCS_DT_CODE03_EN1 | ISPCS_DT_CODE03_DT1(dt) |
- ISPCS_DT_CODE03_EN0 | ISPCS_DT_CODE03_DT0(dt));
+ risp_write_cs(isp, ISPCS_FILTER_ID_CH_REG(ch), BIT(vc));
+ risp_write_cs(isp, ISPCS_DT_CODE03_CH_REG(ch),
+ ISPCS_DT_CODE03_EN3 | ISPCS_DT_CODE03_DT3(dt) |
+ ISPCS_DT_CODE03_EN2 | ISPCS_DT_CODE03_DT2(dt) |
+ ISPCS_DT_CODE03_EN1 | ISPCS_DT_CODE03_DT1(dt) |
+ ISPCS_DT_CODE03_EN0 | ISPCS_DT_CODE03_DT0(dt));
}
/* Setup processing method. */
- risp_write(isp, ISPPROCMODE_DT_REG(format->datatype),
- ISPPROCMODE_DT_PROC_MODE_VC3(format->procmode) |
- ISPPROCMODE_DT_PROC_MODE_VC2(format->procmode) |
- ISPPROCMODE_DT_PROC_MODE_VC1(format->procmode) |
- ISPPROCMODE_DT_PROC_MODE_VC0(format->procmode));
+ risp_write_cs(isp, ISPPROCMODE_DT_REG(format->datatype),
+ ISPPROCMODE_DT_PROC_MODE_VC3(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC2(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC1(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC0(format->procmode));
/* Start ISP. */
- risp_write(isp, ISPSTART_REG, ISPSTART_START);
+ risp_write_cs(isp, ISPSTART_REG, ISPSTART_START);
ret = v4l2_subdev_enable_streams(isp->remote, isp->remote_pad,
BIT_ULL(0));
@@ -284,7 +284,7 @@ static void risp_stop(struct rcar_isp *isp)
v4l2_subdev_disable_streams(isp->remote, isp->remote_pad, BIT_ULL(0));
/* Stop ISP. */
- risp_write(isp, ISPSTART_REG, ISPSTART_STOP);
+ risp_write_cs(isp, ISPSTART_REG, ISPSTART_STOP);
risp_power_off(isp);
}
@@ -465,9 +465,20 @@ static const struct media_entity_operations risp_entity_ops = {
static int risp_probe_resources(struct rcar_isp *isp,
struct platform_device *pdev)
{
- isp->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
- if (IS_ERR(isp->base))
- return PTR_ERR(isp->base);
+ struct resource *res;
+
+ /*
+ * For backward compatibility allow cs base to be the only reg if no
+ * reg-names are set in DT.
+ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
+ if (!res)
+ isp->csbase = devm_platform_ioremap_resource(pdev, 0);
+ else
+ isp->csbase = devm_ioremap_resource(&pdev->dev, res);
+
+ if (IS_ERR(isp->csbase))
+ return PTR_ERR(isp->csbase);
isp->rstc = devm_reset_control_get(&pdev->dev, NULL);
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-core.c b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
index ddfb18e6e7a4..846ae7989b1d 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
@@ -1080,13 +1080,11 @@ static int __maybe_unused rvin_suspend(struct device *dev)
{
struct rvin_dev *vin = dev_get_drvdata(dev);
- if (vin->state != RUNNING)
+ if (!vin->running)
return 0;
rvin_stop_streaming(vin);
- vin->state = SUSPENDED;
-
return 0;
}
@@ -1094,7 +1092,7 @@ static int __maybe_unused rvin_resume(struct device *dev)
{
struct rvin_dev *vin = dev_get_drvdata(dev);
- if (vin->state != SUSPENDED)
+ if (!vin->running)
return 0;
/*
@@ -1275,7 +1273,7 @@ static const struct rvin_info rcar_info_r8a77995 = {
};
static const struct rvin_info rcar_info_gen4 = {
- .model = RCAR_GEN3,
+ .model = RCAR_GEN4,
.use_mc = true,
.use_isp = true,
.nv12 = true,
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
index 8de871240440..5c08ee2c9807 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
@@ -94,6 +94,7 @@
#define VNMC_INF_YUV16 (5 << 16)
#define VNMC_INF_RGB888 (6 << 16)
#define VNMC_INF_RGB666 (7 << 16)
+#define VNMC_EXINF_RAW8 (1 << 12) /* Gen4 specific */
#define VNMC_VUP (1 << 10)
#define VNMC_IM_ODD (0 << 3)
#define VNMC_IM_ODD_EVEN (1 << 3)
@@ -642,8 +643,6 @@ void rvin_scaler_gen3(struct rvin_dev *vin)
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_SEQ_TB:
- case V4L2_FIELD_SEQ_BT:
clip_size |= vin->compose.height / 2;
break;
default:
@@ -679,22 +678,6 @@ void rvin_crop_scale_comp(struct rvin_dev *vin)
fmt = rvin_format_from_pixel(vin, vin->format.pixelformat);
stride = vin->format.bytesperline / fmt->bpp;
-
- /* For RAW8 format bpp is 1, but the hardware process RAW8
- * format in 2 pixel unit hence configure VNIS_REG as stride / 2.
- */
- switch (vin->format.pixelformat) {
- case V4L2_PIX_FMT_SBGGR8:
- case V4L2_PIX_FMT_SGBRG8:
- case V4L2_PIX_FMT_SGRBG8:
- case V4L2_PIX_FMT_SRGGB8:
- case V4L2_PIX_FMT_GREY:
- stride /= 2;
- break;
- default:
- break;
- }
-
rvin_write(vin, stride, VNIS_REG);
}
@@ -727,8 +710,6 @@ static int rvin_setup(struct rvin_dev *vin)
case V4L2_FIELD_INTERLACED_BT:
vnmc = VNMC_IM_FULL | VNMC_FOC;
break;
- case V4L2_FIELD_SEQ_TB:
- case V4L2_FIELD_SEQ_BT:
case V4L2_FIELD_NONE:
case V4L2_FIELD_ALTERNATE:
vnmc = VNMC_IM_ODD_EVEN;
@@ -791,6 +772,8 @@ static int rvin_setup(struct rvin_dev *vin)
case MEDIA_BUS_FMT_SRGGB8_1X8:
case MEDIA_BUS_FMT_Y8_1X8:
vnmc |= VNMC_INF_RAW8;
+ if (vin->info->model == RCAR_GEN4)
+ vnmc |= VNMC_EXINF_RAW8;
break;
case MEDIA_BUS_FMT_SBGGR10_1X10:
case MEDIA_BUS_FMT_SGBRG10_1X10:
@@ -802,31 +785,8 @@ static int rvin_setup(struct rvin_dev *vin)
break;
}
- /* Make sure input interface and input format is valid. */
- if (vin->info->model == RCAR_GEN3) {
- switch (vnmc & VNMC_INF_MASK) {
- case VNMC_INF_YUV8_BT656:
- case VNMC_INF_YUV10_BT656:
- case VNMC_INF_YUV16:
- case VNMC_INF_RGB666:
- if (vin->is_csi) {
- vin_err(vin, "Invalid setting in MIPI CSI2\n");
- return -EINVAL;
- }
- break;
- case VNMC_INF_RAW8:
- if (!vin->is_csi) {
- vin_err(vin, "Invalid setting in Digital Pins\n");
- return -EINVAL;
- }
- break;
- default:
- break;
- }
- }
-
/* Enable VSYNC Field Toggle mode after one VSYNC input */
- if (vin->info->model == RCAR_GEN3)
+ if (vin->info->model == RCAR_GEN3 || vin->info->model == RCAR_GEN4)
dmr2 = VNDMR2_FTEV;
else
dmr2 = VNDMR2_FTEV | VNDMR2_VLV(1);
@@ -910,7 +870,7 @@ static int rvin_setup(struct rvin_dev *vin)
case V4L2_PIX_FMT_SGBRG10:
case V4L2_PIX_FMT_SGRBG10:
case V4L2_PIX_FMT_SRGGB10:
- dmr = VNDMR_RMODE_RAW10 | VNDMR_YC_THR;
+ dmr = VNDMR_RMODE_RAW10;
break;
default:
vin_err(vin, "Invalid pixelformat (0x%x)\n",
@@ -926,7 +886,7 @@ static int rvin_setup(struct rvin_dev *vin)
if (input_is_yuv == output_is_yuv)
vnmc |= VNMC_BPS;
- if (vin->info->model == RCAR_GEN3) {
+ if (vin->info->model == RCAR_GEN3 || vin->info->model == RCAR_GEN4) {
/* Select between CSI-2 and parallel input */
if (vin->is_csi)
vnmc &= ~VNMC_DPINE;
@@ -1021,33 +981,13 @@ static void rvin_fill_hw_slot(struct rvin_dev *vin, int slot)
struct rvin_buffer *buf;
struct vb2_v4l2_buffer *vbuf;
dma_addr_t phys_addr;
- int prev;
/* A already populated slot shall never be overwritten. */
if (WARN_ON(vin->buf_hw[slot].buffer))
return;
- prev = (slot == 0 ? HW_BUFFER_NUM : slot) - 1;
-
- if (vin->buf_hw[prev].type == HALF_TOP) {
- vbuf = vin->buf_hw[prev].buffer;
- vin->buf_hw[slot].buffer = vbuf;
- vin->buf_hw[slot].type = HALF_BOTTOM;
- switch (vin->format.pixelformat) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV16:
- phys_addr = vin->buf_hw[prev].phys +
- vin->format.sizeimage / 4;
- break;
- default:
- phys_addr = vin->buf_hw[prev].phys +
- vin->format.sizeimage / 2;
- break;
- }
- } else if ((vin->state != STOPPED && vin->state != RUNNING) ||
- list_empty(&vin->buf_list)) {
+ if (list_empty(&vin->buf_list)) {
vin->buf_hw[slot].buffer = NULL;
- vin->buf_hw[slot].type = FULL;
phys_addr = vin->scratch_phys;
} else {
/* Keep track of buffer we give to HW */
@@ -1056,16 +996,12 @@ static void rvin_fill_hw_slot(struct rvin_dev *vin, int slot)
list_del_init(to_buf_list(vbuf));
vin->buf_hw[slot].buffer = vbuf;
- vin->buf_hw[slot].type =
- V4L2_FIELD_IS_SEQUENTIAL(vin->format.field) ?
- HALF_TOP : FULL;
-
/* Setup DMA */
phys_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
}
- vin_dbg(vin, "Filling HW slot: %d type: %d buffer: %p\n",
- slot, vin->buf_hw[slot].type, vin->buf_hw[slot].buffer);
+ vin_dbg(vin, "Filling HW slot: %d buffer: %p\n",
+ slot, vin->buf_hw[slot].buffer);
vin->buf_hw[slot].phys = phys_addr;
rvin_set_slot_addr(vin, slot, phys_addr);
@@ -1073,15 +1009,12 @@ static void rvin_fill_hw_slot(struct rvin_dev *vin, int slot)
static int rvin_capture_start(struct rvin_dev *vin)
{
- int slot, ret;
+ int ret;
- for (slot = 0; slot < HW_BUFFER_NUM; slot++) {
+ for (unsigned int slot = 0; slot < HW_BUFFER_NUM; slot++) {
vin->buf_hw[slot].buffer = NULL;
- vin->buf_hw[slot].type = FULL;
- }
-
- for (slot = 0; slot < HW_BUFFER_NUM; slot++)
rvin_fill_hw_slot(vin, slot);
+ }
ret = rvin_setup(vin);
if (ret)
@@ -1094,8 +1027,6 @@ static int rvin_capture_start(struct rvin_dev *vin)
/* Continuous Frame Capture Mode */
rvin_write(vin, VNFC_C_FRAME, VNFC_REG);
- vin->state = STARTING;
-
return 0;
}
@@ -1136,9 +1067,9 @@ static irqreturn_t rvin_irq(int irq, void *data)
if (!(int_status & VNINTS_FIS))
goto done;
- /* Nothing to do if capture status is 'STOPPED' */
- if (vin->state == STOPPED) {
- vin_dbg(vin, "IRQ while state stopped\n");
+ /* Nothing to do if not running. */
+ if (!vin->running) {
+ vin_dbg(vin, "IRQ while not running, ignoring\n");
goto done;
}
@@ -1150,28 +1081,17 @@ static irqreturn_t rvin_irq(int irq, void *data)
* To hand buffers back in a known order to userspace start
* to capture first from slot 0.
*/
- if (vin->state == STARTING) {
+ if (!vin->sequence) {
if (slot != 0) {
vin_dbg(vin, "Starting sync slot: %d\n", slot);
goto done;
}
vin_dbg(vin, "Capture start synced!\n");
- vin->state = RUNNING;
}
/* Capture frame */
if (vin->buf_hw[slot].buffer) {
- /*
- * Nothing to do but refill the hardware slot if
- * capture only filled first half of vb2 buffer.
- */
- if (vin->buf_hw[slot].type == HALF_TOP) {
- vin->buf_hw[slot].buffer = NULL;
- rvin_fill_hw_slot(vin, slot);
- goto done;
- }
-
vin->buf_hw[slot].buffer->field =
rvin_get_active_field(vin, vnms);
vin->buf_hw[slot].buffer->sequence = vin->sequence;
@@ -1322,8 +1242,6 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_SEQ_TB:
- case V4L2_FIELD_SEQ_BT:
/* Supported natively */
break;
case V4L2_FIELD_ALTERNATE:
@@ -1336,8 +1254,6 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_SEQ_TB:
- case V4L2_FIELD_SEQ_BT:
/* Use VIN hardware to combine the two fields */
fmt.format.height *= 2;
break;
@@ -1351,7 +1267,7 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
if (rvin_scaler_needed(vin)) {
/* Gen3 can't scale NV12 */
- if (vin->info->model == RCAR_GEN3 &&
+ if ((vin->info->model == RCAR_GEN3 || vin->info->model == RCAR_GEN4) &&
vin->format.pixelformat == V4L2_PIX_FMT_NV12)
return -EPIPE;
@@ -1434,6 +1350,8 @@ int rvin_start_streaming(struct rvin_dev *vin)
if (ret)
rvin_set_stream(vin, 0);
+ vin->running = true;
+
spin_unlock_irqrestore(&vin->qlock, flags);
return ret;
@@ -1466,44 +1384,21 @@ err_scratch:
void rvin_stop_streaming(struct rvin_dev *vin)
{
- unsigned int i, retries;
unsigned long flags;
- bool buffersFreed;
spin_lock_irqsave(&vin->qlock, flags);
- if (vin->state == STOPPED) {
+ if (!vin->running) {
spin_unlock_irqrestore(&vin->qlock, flags);
return;
}
- vin->state = STOPPING;
-
- /* Wait until only scratch buffer is used, max 3 interrupts. */
- retries = 0;
- while (retries++ < RVIN_RETRIES) {
- buffersFreed = true;
- for (i = 0; i < HW_BUFFER_NUM; i++)
- if (vin->buf_hw[i].buffer)
- buffersFreed = false;
-
- if (buffersFreed)
- break;
-
- spin_unlock_irqrestore(&vin->qlock, flags);
- msleep(RVIN_TIMEOUT_MS);
- spin_lock_irqsave(&vin->qlock, flags);
- }
-
/* Wait for streaming to stop */
- retries = 0;
- while (retries++ < RVIN_RETRIES) {
-
+ for (unsigned int i = 0; i < RVIN_RETRIES; i++) {
rvin_capture_stop(vin);
/* Check if HW is stopped */
if (!rvin_capture_active(vin)) {
- vin->state = STOPPED;
break;
}
@@ -1512,32 +1407,25 @@ void rvin_stop_streaming(struct rvin_dev *vin)
spin_lock_irqsave(&vin->qlock, flags);
}
- if (!buffersFreed || vin->state != STOPPED) {
- /*
- * If this happens something have gone horribly wrong.
- * Set state to stopped to prevent the interrupt handler
- * to make things worse...
- */
- vin_err(vin, "Failed stop HW, something is seriously broken\n");
- vin->state = STOPPED;
- }
+ if (rvin_capture_active(vin))
+ vin_err(vin, "Hardware did not stop\n");
- spin_unlock_irqrestore(&vin->qlock, flags);
+ vin->running = false;
- /* If something went wrong, free buffers with an error. */
- if (!buffersFreed) {
- return_unused_buffers(vin, VB2_BUF_STATE_ERROR);
- for (i = 0; i < HW_BUFFER_NUM; i++) {
- if (vin->buf_hw[i].buffer)
- vb2_buffer_done(&vin->buf_hw[i].buffer->vb2_buf,
- VB2_BUF_STATE_ERROR);
- }
- }
+ spin_unlock_irqrestore(&vin->qlock, flags);
rvin_set_stream(vin, 0);
/* disable interrupts */
rvin_disable_interrupts(vin);
+
+ /* Return unprocessed buffers from hardware. */
+ for (unsigned int i = 0; i < HW_BUFFER_NUM; i++) {
+ if (vin->buf_hw[i].buffer)
+ vb2_buffer_done(&vin->buf_hw[i].buffer->vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ }
+
}
static void rvin_stop_streaming_vq(struct vb2_queue *vq)
@@ -1583,8 +1471,6 @@ int rvin_dma_register(struct rvin_dev *vin, int irq)
spin_lock_init(&vin->qlock);
- vin->state = STOPPED;
-
for (i = 0; i < HW_BUFFER_NUM; i++)
vin->buf_hw[i].buffer = NULL;
@@ -1687,7 +1573,7 @@ void rvin_set_alpha(struct rvin_dev *vin, unsigned int alpha)
vin->alpha = alpha;
- if (vin->state == STOPPED)
+ if (!vin->running)
goto out;
switch (vin->format.pixelformat) {
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
index 756fdfdbce61..db091af57c19 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
@@ -88,19 +88,19 @@ static const struct rvin_video_format rvin_formats[] = {
},
{
.fourcc = V4L2_PIX_FMT_SBGGR10,
- .bpp = 4,
+ .bpp = 2,
},
{
.fourcc = V4L2_PIX_FMT_SGBRG10,
- .bpp = 4,
+ .bpp = 2,
},
{
.fourcc = V4L2_PIX_FMT_SGRBG10,
- .bpp = 4,
+ .bpp = 2,
},
{
.fourcc = V4L2_PIX_FMT_SRGGB10,
- .bpp = 4,
+ .bpp = 2,
},
};
@@ -161,9 +161,6 @@ static u32 rvin_format_bytesperline(struct rvin_dev *vin,
break;
}
- if (V4L2_FIELD_IS_SEQUENTIAL(pix->field))
- align = 0x80;
-
return ALIGN(pix->width, align) * fmt->bpp;
}
@@ -194,8 +191,6 @@ static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
case V4L2_FIELD_ALTERNATE:
- case V4L2_FIELD_SEQ_TB:
- case V4L2_FIELD_SEQ_BT:
break;
default:
pix->field = RVIN_DEFAULT_FIELD;
@@ -504,8 +499,6 @@ static int rvin_remote_rectangle(struct rvin_dev *vin, struct v4l2_rect *rect)
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_SEQ_TB:
- case V4L2_FIELD_SEQ_BT:
rect->height *= 2;
break;
}
@@ -591,8 +584,8 @@ static int rvin_s_selection(struct file *file, void *fh,
vin->crop = s->r = r;
- vin_dbg(vin, "Cropped %dx%d@%d:%d of %dx%d\n",
- r.width, r.height, r.left, r.top,
+ vin_dbg(vin, "Cropped (%d,%d)/%ux%u of %dx%d\n",
+ r.left, r.top, r.width, r.height,
max_rect.width, max_rect.height);
break;
case V4L2_SEL_TGT_COMPOSE:
@@ -616,8 +609,8 @@ static int rvin_s_selection(struct file *file, void *fh,
vin->compose = s->r = r;
- vin_dbg(vin, "Compose %dx%d@%d:%d in %dx%d\n",
- r.width, r.height, r.left, r.top,
+ vin_dbg(vin, "Compose (%d,%d)/%ux%u in %dx%d\n",
+ r.left, r.top, r.width, r.height,
vin->format.width, vin->format.height);
break;
default:
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
index f87d4bc9e53e..83d1b2734c41 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
@@ -39,6 +39,7 @@ enum model_id {
RCAR_M1,
RCAR_GEN2,
RCAR_GEN3,
+ RCAR_GEN4,
};
enum rvin_csi_id {
@@ -62,39 +63,6 @@ enum rvin_isp_id {
(unsigned int)RVIN_CSI_MAX : (unsigned int)RVIN_ISP_MAX)
/**
- * enum rvin_dma_state - DMA states
- * @STOPPED: No operation in progress
- * @STARTING: Capture starting up
- * @RUNNING: Operation in progress have buffers
- * @STOPPING: Stopping operation
- * @SUSPENDED: Capture is suspended
- */
-enum rvin_dma_state {
- STOPPED = 0,
- STARTING,
- RUNNING,
- STOPPING,
- SUSPENDED,
-};
-
-/**
- * enum rvin_buffer_type
- *
- * Describes how a buffer is given to the hardware. To be able
- * to capture SEQ_TB/BT it's needed to capture to the same vb2
- * buffer twice so the type of buffer needs to be kept.
- *
- * @FULL: One capture fills the whole vb2 buffer
- * @HALF_TOP: One capture fills the top half of the vb2 buffer
- * @HALF_BOTTOM: One capture fills the bottom half of the vb2 buffer
- */
-enum rvin_buffer_type {
- FULL,
- HALF_TOP,
- HALF_BOTTOM,
-};
-
-/**
* struct rvin_video_format - Data format stored in memory
* @fourcc: Pixelformat
* @bpp: Bytes per pixel
@@ -194,11 +162,11 @@ struct rvin_info {
* @scratch: cpu address for scratch buffer
* @scratch_phys: physical address of the scratch buffer
*
- * @qlock: protects @buf_hw, @buf_list, @sequence and @state
+ * @qlock: Protects @buf_hw, @buf_list, @sequence and @running
* @buf_hw: Keeps track of buffers given to HW slot
* @buf_list: list of queued buffers
* @sequence: V4L2 buffers sequence number
- * @state: keeps track of operation state
+ * @running: Keeps track of if the VIN is running
*
* @is_csi: flag to mark the VIN as using a CSI-2 subdevice
* @chsel: Cached value of the current CSI-2 channel selection
@@ -237,12 +205,11 @@ struct rvin_dev {
spinlock_t qlock;
struct {
struct vb2_v4l2_buffer *buffer;
- enum rvin_buffer_type type;
dma_addr_t phys;
} buf_hw[HW_BUFFER_NUM];
struct list_head buf_list;
unsigned int sequence;
- enum rvin_dma_state state;
+ bool running;
bool is_csi;
unsigned int chsel;
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
index 89be584a4988..5fa73ab2db53 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
@@ -22,6 +22,7 @@
#include <media/v4l2-mc.h>
#include "rzg2l-cru.h"
+#include "rzg2l-cru-regs.h"
static inline struct rzg2l_cru_dev *notifier_to_cru(struct v4l2_async_notifier *n)
{
@@ -240,10 +241,11 @@ static int rzg2l_cru_media_init(struct rzg2l_cru_dev *cru)
static int rzg2l_cru_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rzg2l_cru_dev *cru;
int irq, ret;
- cru = devm_kzalloc(&pdev->dev, sizeof(*cru), GFP_KERNEL);
+ cru = devm_kzalloc(dev, sizeof(*cru), GFP_KERNEL);
if (!cru)
return -ENOMEM;
@@ -251,32 +253,32 @@ static int rzg2l_cru_probe(struct platform_device *pdev)
if (IS_ERR(cru->base))
return PTR_ERR(cru->base);
- cru->presetn = devm_reset_control_get_shared(&pdev->dev, "presetn");
+ cru->presetn = devm_reset_control_get_shared(dev, "presetn");
if (IS_ERR(cru->presetn))
- return dev_err_probe(&pdev->dev, PTR_ERR(cru->presetn),
+ return dev_err_probe(dev, PTR_ERR(cru->presetn),
"Failed to get cpg presetn\n");
- cru->aresetn = devm_reset_control_get_exclusive(&pdev->dev, "aresetn");
+ cru->aresetn = devm_reset_control_get_exclusive(dev, "aresetn");
if (IS_ERR(cru->aresetn))
- return dev_err_probe(&pdev->dev, PTR_ERR(cru->aresetn),
+ return dev_err_probe(dev, PTR_ERR(cru->aresetn),
"Failed to get cpg aresetn\n");
- cru->vclk = devm_clk_get(&pdev->dev, "video");
+ cru->vclk = devm_clk_get(dev, "video");
if (IS_ERR(cru->vclk))
- return dev_err_probe(&pdev->dev, PTR_ERR(cru->vclk),
+ return dev_err_probe(dev, PTR_ERR(cru->vclk),
"Failed to get video clock\n");
- cru->dev = &pdev->dev;
- cru->info = of_device_get_match_data(&pdev->dev);
+ cru->dev = dev;
+ cru->info = of_device_get_match_data(dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- ret = devm_request_irq(&pdev->dev, irq, rzg2l_cru_irq, 0,
+ ret = devm_request_irq(dev, irq, cru->info->irq_handler, 0,
KBUILD_MODNAME, cru);
if (ret)
- return dev_err_probe(&pdev->dev, ret, "failed to request irq\n");
+ return dev_err_probe(dev, ret, "failed to request irq\n");
platform_set_drvdata(pdev, cru);
@@ -285,8 +287,10 @@ static int rzg2l_cru_probe(struct platform_device *pdev)
return ret;
cru->num_buf = RZG2L_CRU_HW_BUFFER_DEFAULT;
- pm_suspend_ignore_children(&pdev->dev, true);
- pm_runtime_enable(&pdev->dev);
+ pm_suspend_ignore_children(dev, true);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ goto error_dma_unregister;
ret = rzg2l_cru_media_init(cru);
if (ret)
@@ -296,7 +300,6 @@ static int rzg2l_cru_probe(struct platform_device *pdev)
error_dma_unregister:
rzg2l_cru_dma_unregister(cru);
- pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -305,8 +308,6 @@ static void rzg2l_cru_remove(struct platform_device *pdev)
{
struct rzg2l_cru_dev *cru = platform_get_drvdata(pdev);
- pm_runtime_disable(&pdev->dev);
-
v4l2_async_nf_unregister(&cru->notifier);
v4l2_async_nf_cleanup(&cru->notifier);
@@ -317,8 +318,112 @@ static void rzg2l_cru_remove(struct platform_device *pdev)
rzg2l_cru_dma_unregister(cru);
}
+static const u16 rzg3e_cru_regs[] = {
+ [CRUnCTRL] = 0x0,
+ [CRUnIE] = 0x4,
+ [CRUnIE2] = 0x8,
+ [CRUnINTS] = 0xc,
+ [CRUnINTS2] = 0x10,
+ [CRUnRST] = 0x18,
+ [AMnMB1ADDRL] = 0x40,
+ [AMnMB1ADDRH] = 0x44,
+ [AMnMB2ADDRL] = 0x48,
+ [AMnMB2ADDRH] = 0x4c,
+ [AMnMB3ADDRL] = 0x50,
+ [AMnMB3ADDRH] = 0x54,
+ [AMnMB4ADDRL] = 0x58,
+ [AMnMB4ADDRH] = 0x5c,
+ [AMnMB5ADDRL] = 0x60,
+ [AMnMB5ADDRH] = 0x64,
+ [AMnMB6ADDRL] = 0x68,
+ [AMnMB6ADDRH] = 0x6c,
+ [AMnMB7ADDRL] = 0x70,
+ [AMnMB7ADDRH] = 0x74,
+ [AMnMB8ADDRL] = 0x78,
+ [AMnMB8ADDRH] = 0x7c,
+ [AMnMBVALID] = 0x88,
+ [AMnMADRSL] = 0x8c,
+ [AMnMADRSH] = 0x90,
+ [AMnAXIATTR] = 0xec,
+ [AMnFIFOPNTR] = 0xf8,
+ [AMnAXISTP] = 0x110,
+ [AMnAXISTPACK] = 0x114,
+ [AMnIS] = 0x128,
+ [ICnEN] = 0x1f0,
+ [ICnSVCNUM] = 0x1f8,
+ [ICnSVC] = 0x1fc,
+ [ICnIPMC_C0] = 0x200,
+ [ICnMS] = 0x2d8,
+ [ICnDMR] = 0x304,
+};
+
+static const struct rzg2l_cru_info rzg3e_cru_info = {
+ .max_width = 4095,
+ .max_height = 4095,
+ .image_conv = ICnIPMC_C0,
+ .has_stride = true,
+ .regs = rzg3e_cru_regs,
+ .irq_handler = rzg3e_cru_irq,
+ .enable_interrupts = rzg3e_cru_enable_interrupts,
+ .disable_interrupts = rzg3e_cru_disable_interrupts,
+ .fifo_empty = rz3e_fifo_empty,
+ .csi_setup = rzg3e_cru_csi2_setup,
+};
+
+static const u16 rzg2l_cru_regs[] = {
+ [CRUnCTRL] = 0x0,
+ [CRUnIE] = 0x4,
+ [CRUnINTS] = 0x8,
+ [CRUnRST] = 0xc,
+ [AMnMB1ADDRL] = 0x100,
+ [AMnMB1ADDRH] = 0x104,
+ [AMnMB2ADDRL] = 0x108,
+ [AMnMB2ADDRH] = 0x10c,
+ [AMnMB3ADDRL] = 0x110,
+ [AMnMB3ADDRH] = 0x114,
+ [AMnMB4ADDRL] = 0x118,
+ [AMnMB4ADDRH] = 0x11c,
+ [AMnMB5ADDRL] = 0x120,
+ [AMnMB5ADDRH] = 0x124,
+ [AMnMB6ADDRL] = 0x128,
+ [AMnMB6ADDRH] = 0x12c,
+ [AMnMB7ADDRL] = 0x130,
+ [AMnMB7ADDRH] = 0x134,
+ [AMnMB8ADDRL] = 0x138,
+ [AMnMB8ADDRH] = 0x13c,
+ [AMnMBVALID] = 0x148,
+ [AMnMBS] = 0x14c,
+ [AMnAXIATTR] = 0x158,
+ [AMnFIFOPNTR] = 0x168,
+ [AMnAXISTP] = 0x174,
+ [AMnAXISTPACK] = 0x178,
+ [ICnEN] = 0x200,
+ [ICnMC] = 0x208,
+ [ICnMS] = 0x254,
+ [ICnDMR] = 0x26c,
+};
+
+static const struct rzg2l_cru_info rzgl2_cru_info = {
+ .max_width = 2800,
+ .max_height = 4095,
+ .image_conv = ICnMC,
+ .regs = rzg2l_cru_regs,
+ .irq_handler = rzg2l_cru_irq,
+ .enable_interrupts = rzg2l_cru_enable_interrupts,
+ .disable_interrupts = rzg2l_cru_disable_interrupts,
+ .fifo_empty = rzg2l_fifo_empty,
+ .csi_setup = rzg2l_cru_csi2_setup,
+};
+
static const struct of_device_id rzg2l_cru_of_id_table[] = {
- { .compatible = "renesas,rzg2l-cru", },
+ {
+ .compatible = "renesas,r9a09g047-cru",
+ .data = &rzg3e_cru_info,
+ },
+ {
+ .compatible = "renesas,rzg2l-cru",
+ .data = &rzgl2_cru_info,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rzg2l_cru_of_id_table);
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru-regs.h b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru-regs.h
index 1c9f22118a5d..a5a57369ef0e 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru-regs.h
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru-regs.h
@@ -10,71 +10,102 @@
/* HW CRU Registers Definition */
-/* CRU Control Register */
-#define CRUnCTRL 0x0
#define CRUnCTRL_VINSEL(x) ((x) << 0)
-/* CRU Interrupt Enable Register */
-#define CRUnIE 0x4
#define CRUnIE_EFE BIT(17)
-/* CRU Interrupt Status Register */
-#define CRUnINTS 0x8
+#define CRUnIE2_FSxE(x) BIT(((x) * 3))
+#define CRUnIE2_FExE(x) BIT(((x) * 3) + 1)
+
#define CRUnINTS_SFS BIT(16)
-/* CRU Reset Register */
-#define CRUnRST 0xc
+#define CRUnINTS2_FSxS(x) BIT(((x) * 3))
+
#define CRUnRST_VRESETN BIT(0)
/* Memory Bank Base Address (Lower) Register for CRU Image Data */
-#define AMnMBxADDRL(x) (0x100 + ((x) * 8))
+#define AMnMBxADDRL(x) (AMnMB1ADDRL + (x) * 2)
/* Memory Bank Base Address (Higher) Register for CRU Image Data */
-#define AMnMBxADDRH(x) (0x104 + ((x) * 8))
+#define AMnMBxADDRH(x) (AMnMB1ADDRH + (x) * 2)
-/* Memory Bank Enable Register for CRU Image Data */
-#define AMnMBVALID 0x148
#define AMnMBVALID_MBVALID(x) GENMASK(x, 0)
-/* Memory Bank Status Register for CRU Image Data */
-#define AMnMBS 0x14c
#define AMnMBS_MBSTS 0x7
-/* AXI Master Transfer Setting Register for CRU Image Data */
-#define AMnAXIATTR 0x158
#define AMnAXIATTR_AXILEN_MASK GENMASK(3, 0)
#define AMnAXIATTR_AXILEN (0xf)
-/* AXI Master FIFO Pointer Register for CRU Image Data */
-#define AMnFIFOPNTR 0x168
#define AMnFIFOPNTR_FIFOWPNTR GENMASK(7, 0)
+#define AMnFIFOPNTR_FIFOWPNTR_B0 AMnFIFOPNTR_FIFOWPNTR
+#define AMnFIFOPNTR_FIFOWPNTR_B1 GENMASK(15, 8)
#define AMnFIFOPNTR_FIFORPNTR_Y GENMASK(23, 16)
+#define AMnFIFOPNTR_FIFORPNTR_B0 AMnFIFOPNTR_FIFORPNTR_Y
+#define AMnFIFOPNTR_FIFORPNTR_B1 GENMASK(31, 24)
+
+#define AMnIS_IS_MASK GENMASK(14, 7)
+#define AMnIS_IS(x) ((x) << 7)
-/* AXI Master Transfer Stop Register for CRU Image Data */
-#define AMnAXISTP 0x174
#define AMnAXISTP_AXI_STOP BIT(0)
-/* AXI Master Transfer Stop Status Register for CRU Image Data */
-#define AMnAXISTPACK 0x178
#define AMnAXISTPACK_AXI_STOP_ACK BIT(0)
-/* CRU Image Processing Enable Register */
-#define ICnEN 0x200
#define ICnEN_ICEN BIT(0)
-/* CRU Image Processing Main Control Register */
-#define ICnMC 0x208
+#define ICnSVC_SVC0(x) (x)
+#define ICnSVC_SVC1(x) ((x) << 4)
+#define ICnSVC_SVC2(x) ((x) << 8)
+#define ICnSVC_SVC3(x) ((x) << 12)
+
#define ICnMC_CSCTHR BIT(5)
#define ICnMC_INF(x) ((x) << 16)
#define ICnMC_VCSEL(x) ((x) << 22)
#define ICnMC_INF_MASK GENMASK(21, 16)
-/* CRU Module Status Register */
-#define ICnMS 0x254
#define ICnMS_IA BIT(2)
-/* CRU Data Output Mode Register */
-#define ICnDMR 0x26c
#define ICnDMR_YCMODE_UYVY (1 << 4)
+enum rzg2l_cru_common_regs {
+ CRUnCTRL, /* CRU Control */
+ CRUnIE, /* CRU Interrupt Enable */
+ CRUnIE2, /* CRU Interrupt Enable(2) */
+ CRUnINTS, /* CRU Interrupt Status */
+ CRUnINTS2, /* CRU Interrupt Status(2) */
+ CRUnRST, /* CRU Reset */
+ AMnMB1ADDRL, /* Bank 1 Address (Lower) for CRU Image Data */
+ AMnMB1ADDRH, /* Bank 1 Address (Higher) for CRU Image Data */
+ AMnMB2ADDRL, /* Bank 2 Address (Lower) for CRU Image Data */
+ AMnMB2ADDRH, /* Bank 2 Address (Higher) for CRU Image Data */
+ AMnMB3ADDRL, /* Bank 3 Address (Lower) for CRU Image Data */
+ AMnMB3ADDRH, /* Bank 3 Address (Higher) for CRU Image Data */
+ AMnMB4ADDRL, /* Bank 4 Address (Lower) for CRU Image Data */
+ AMnMB4ADDRH, /* Bank 4 Address (Higher) for CRU Image Data */
+ AMnMB5ADDRL, /* Bank 5 Address (Lower) for CRU Image Data */
+ AMnMB5ADDRH, /* Bank 5 Address (Higher) for CRU Image Data */
+ AMnMB6ADDRL, /* Bank 6 Address (Lower) for CRU Image Data */
+ AMnMB6ADDRH, /* Bank 6 Address (Higher) for CRU Image Data */
+ AMnMB7ADDRL, /* Bank 7 Address (Lower) for CRU Image Data */
+ AMnMB7ADDRH, /* Bank 7 Address (Higher) for CRU Image Data */
+ AMnMB8ADDRL, /* Bank 8 Address (Lower) for CRU Image Data */
+ AMnMB8ADDRH, /* Bank 8 Address (Higher) for CRU Image Data */
+ AMnMBVALID, /* Memory Bank Enable for CRU Image Data */
+ AMnMBS, /* Memory Bank Status for CRU Image Data */
+ AMnMADRSL, /* VD Memory Address Lower Status Register */
+ AMnMADRSH, /* VD Memory Address Higher Status Register */
+ AMnAXIATTR, /* AXI Master Transfer Setting Register for CRU Image Data */
+ AMnFIFOPNTR, /* AXI Master FIFO Pointer for CRU Image Data */
+ AMnAXISTP, /* AXI Master Transfer Stop for CRU Image Data */
+ AMnAXISTPACK, /* AXI Master Transfer Stop Status for CRU Image Data */
+ AMnIS, /* Image Stride Setting Register */
+ ICnEN, /* CRU Image Processing Enable */
+ ICnSVCNUM, /* CRU SVC Number Register */
+ ICnSVC, /* CRU VC Select Register */
+ ICnMC, /* CRU Image Processing Main Control */
+ ICnIPMC_C0, /* CRU Image Converter Main Control 0 */
+ ICnMS, /* CRU Module Status */
+ ICnDMR, /* CRU Data Output Mode */
+ RZG2L_CRU_MAX_REG,
+};
+
#endif /* __RZG2L_CRU_REGS_H__ */
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
index 8b898ce05b84..c30f3b281284 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
@@ -27,15 +27,15 @@
#define RZG2L_CRU_CSI2_VCHANNEL 4
#define RZG2L_CRU_MIN_INPUT_WIDTH 320
-#define RZG2L_CRU_MAX_INPUT_WIDTH 2800
#define RZG2L_CRU_MIN_INPUT_HEIGHT 240
-#define RZG2L_CRU_MAX_INPUT_HEIGHT 4095
enum rzg2l_csi2_pads {
RZG2L_CRU_IP_SINK = 0,
RZG2L_CRU_IP_SOURCE,
};
+struct rzg2l_cru_dev;
+
/**
* enum rzg2l_cru_dma_state - DMA states
* @RZG2L_CRU_DMA_STOPPED: No operation in progress
@@ -80,6 +80,21 @@ struct rzg2l_cru_ip_format {
bool yuv;
};
+struct rzg2l_cru_info {
+ unsigned int max_width;
+ unsigned int max_height;
+ u16 image_conv;
+ const u16 *regs;
+ bool has_stride;
+ irqreturn_t (*irq_handler)(int irq, void *data);
+ void (*enable_interrupts)(struct rzg2l_cru_dev *cru);
+ void (*disable_interrupts)(struct rzg2l_cru_dev *cru);
+ bool (*fifo_empty)(struct rzg2l_cru_dev *cru);
+ void (*csi_setup)(struct rzg2l_cru_dev *cru,
+ const struct rzg2l_cru_ip_format *ip_fmt,
+ u8 csi_vc);
+};
+
/**
* struct rzg2l_cru_dev - Renesas CRU device structure
* @dev: (OF) device
@@ -94,6 +109,8 @@ struct rzg2l_cru_ip_format {
* @vdev: V4L2 video device associated with CRU
* @v4l2_dev: V4L2 device
* @num_buf: Holds the current number of buffers enabled
+ * @svc_channel: SVC0/1/2/3 to use for RZ/G3E
+ * @buf_addr: Memory addresses where current video data is written.
* @notifier: V4L2 asynchronous subdevs notifier
*
* @ip: Image processing subdev info
@@ -130,6 +147,9 @@ struct rzg2l_cru_dev {
struct v4l2_device v4l2_dev;
u8 num_buf;
+ u8 svc_channel;
+ dma_addr_t buf_addr[RZG2L_CRU_HW_BUFFER_DEFAULT];
+
struct v4l2_async_notifier notifier;
struct rzg2l_cru_ip ip;
@@ -161,6 +181,7 @@ void rzg2l_cru_dma_unregister(struct rzg2l_cru_dev *cru);
int rzg2l_cru_video_register(struct rzg2l_cru_dev *cru);
void rzg2l_cru_video_unregister(struct rzg2l_cru_dev *cru);
irqreturn_t rzg2l_cru_irq(int irq, void *data);
+irqreturn_t rzg3e_cru_irq(int irq, void *data);
const struct v4l2_format_info *rzg2l_cru_format_from_pixel(u32 format);
@@ -172,4 +193,18 @@ const struct rzg2l_cru_ip_format *rzg2l_cru_ip_code_to_fmt(unsigned int code);
const struct rzg2l_cru_ip_format *rzg2l_cru_ip_format_to_fmt(u32 format);
const struct rzg2l_cru_ip_format *rzg2l_cru_ip_index_to_fmt(u32 index);
+void rzg2l_cru_enable_interrupts(struct rzg2l_cru_dev *cru);
+void rzg2l_cru_disable_interrupts(struct rzg2l_cru_dev *cru);
+void rzg3e_cru_enable_interrupts(struct rzg2l_cru_dev *cru);
+void rzg3e_cru_disable_interrupts(struct rzg2l_cru_dev *cru);
+
+bool rzg2l_fifo_empty(struct rzg2l_cru_dev *cru);
+bool rz3e_fifo_empty(struct rzg2l_cru_dev *cru);
+void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru,
+ const struct rzg2l_cru_ip_format *ip_fmt,
+ u8 csi_vc);
+void rzg3e_cru_csi2_setup(struct rzg2l_cru_dev *cru,
+ const struct rzg2l_cru_ip_format *ip_fmt,
+ u8 csi_vc);
+
#endif
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
index 881e910dce02..9243306e2aa9 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
@@ -85,6 +85,15 @@
CSIDPHYSKW0_UTIL_DL2_SKW_ADJ(1) | \
CSIDPHYSKW0_UTIL_DL3_SKW_ADJ(1))
+/* DPHY registers on RZ/V2H(P) SoC */
+#define CRUm_S_TIMCTL 0x41c
+#define CRUm_S_TIMCTL_S_HSSETTLECTL(x) ((x) << 8)
+
+#define CRUm_S_DPHYCTL_MSB 0x434
+#define CRUm_S_DPHYCTL_MSB_DESKEW BIT(1)
+
+#define CRUm_SWAPCTL 0x438
+
#define VSRSTS_RETRIES 20
#define RZG2L_CSI2_MIN_WIDTH 320
@@ -107,6 +116,7 @@ struct rzg2l_csi2 {
void __iomem *base;
struct reset_control *presetn;
struct reset_control *cmn_rstb;
+ const struct rzg2l_csi2_info *info;
struct clk *sysclk;
struct clk *vclk;
unsigned long vclk_rate;
@@ -123,6 +133,12 @@ struct rzg2l_csi2 {
bool dphy_enabled;
};
+struct rzg2l_csi2_info {
+ int (*dphy_enable)(struct rzg2l_csi2 *csi2);
+ int (*dphy_disable)(struct rzg2l_csi2 *csi2);
+ bool has_system_clk;
+};
+
struct rzg2l_csi2_timings {
u32 t_init;
u32 tclk_miss;
@@ -133,6 +149,30 @@ struct rzg2l_csi2_timings {
u32 max_hsfreq;
};
+struct rzv2h_csi2_s_hssettlectl {
+ unsigned int hsfreq;
+ u16 s_hssettlectl;
+};
+
+static const struct rzv2h_csi2_s_hssettlectl rzv2h_s_hssettlectl[] = {
+ { 90, 1 }, { 130, 2 }, { 180, 3 },
+ { 220, 4 }, { 270, 5 }, { 310, 6 },
+ { 360, 7 }, { 400, 8 }, { 450, 9 },
+ { 490, 10 }, { 540, 11 }, { 580, 12 },
+ { 630, 13 }, { 670, 14 }, { 720, 15 },
+ { 760, 16 }, { 810, 17 }, { 850, 18 },
+ { 900, 19 }, { 940, 20 }, { 990, 21 },
+ { 1030, 22 }, { 1080, 23 }, { 1120, 24 },
+ { 1170, 25 }, { 1220, 26 }, { 1260, 27 },
+ { 1310, 28 }, { 1350, 29 }, { 1400, 30 },
+ { 1440, 31 }, { 1490, 32 }, { 1530, 33 },
+ { 1580, 34 }, { 1620, 35 }, { 1670, 36 },
+ { 1710, 37 }, { 1760, 38 }, { 1800, 39 },
+ { 1850, 40 }, { 1890, 41 }, { 1940, 42 },
+ { 1980, 43 }, { 2030, 44 }, { 2070, 45 },
+ { 2100, 46 },
+};
+
static const struct rzg2l_csi2_timings rzg2l_csi2_global_timings[] = {
{
.max_hsfreq = 80,
@@ -355,14 +395,20 @@ static int rzg2l_csi2_dphy_enable(struct rzg2l_csi2 *csi2)
return ret;
}
+static const struct rzg2l_csi2_info rzg2l_csi2_info = {
+ .dphy_enable = rzg2l_csi2_dphy_enable,
+ .dphy_disable = rzg2l_csi2_dphy_disable,
+ .has_system_clk = true,
+};
+
static int rzg2l_csi2_dphy_setting(struct v4l2_subdev *sd, bool on)
{
struct rzg2l_csi2 *csi2 = sd_to_csi2(sd);
if (on)
- return rzg2l_csi2_dphy_enable(csi2);
+ return csi2->info->dphy_enable(csi2);
- return rzg2l_csi2_dphy_disable(csi2);
+ return csi2->info->dphy_disable(csi2);
}
static int rzg2l_csi2_mipi_link_enable(struct rzg2l_csi2 *csi2)
@@ -421,6 +467,64 @@ static int rzg2l_csi2_mipi_link_disable(struct rzg2l_csi2 *csi2)
return 0;
}
+static int rzv2h_csi2_dphy_disable(struct rzg2l_csi2 *csi2)
+{
+ int ret;
+
+ /* Reset the CRU (D-PHY) */
+ ret = reset_control_assert(csi2->cmn_rstb);
+ if (ret)
+ return ret;
+
+ csi2->dphy_enabled = false;
+
+ return 0;
+}
+
+static int rzv2h_csi2_dphy_enable(struct rzg2l_csi2 *csi2)
+{
+ unsigned int i;
+ u16 hssettle;
+ int mbps;
+
+ mbps = rzg2l_csi2_calc_mbps(csi2);
+ if (mbps < 0)
+ return mbps;
+
+ csi2->hsfreq = mbps;
+
+ for (i = 0; i < ARRAY_SIZE(rzv2h_s_hssettlectl); i++) {
+ if (csi2->hsfreq <= rzv2h_s_hssettlectl[i].hsfreq)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(rzv2h_s_hssettlectl))
+ return -EINVAL;
+
+ rzg2l_csi2_write(csi2, CRUm_SWAPCTL, 0);
+
+ hssettle = rzv2h_s_hssettlectl[i].s_hssettlectl;
+ rzg2l_csi2_write(csi2, CRUm_S_TIMCTL,
+ CRUm_S_TIMCTL_S_HSSETTLECTL(hssettle));
+
+ if (csi2->hsfreq > 1500)
+ rzg2l_csi2_set(csi2, CRUm_S_DPHYCTL_MSB,
+ CRUm_S_DPHYCTL_MSB_DESKEW);
+ else
+ rzg2l_csi2_clr(csi2, CRUm_S_DPHYCTL_MSB,
+ CRUm_S_DPHYCTL_MSB_DESKEW);
+
+ csi2->dphy_enabled = true;
+
+ return 0;
+}
+
+static const struct rzg2l_csi2_info rzv2h_csi2_info = {
+ .dphy_enable = rzv2h_csi2_dphy_enable,
+ .dphy_disable = rzv2h_csi2_dphy_disable,
+ .has_system_clk = false,
+};
+
static int rzg2l_csi2_mipi_link_setting(struct v4l2_subdev *sd, bool on)
{
struct rzg2l_csi2 *csi2 = sd_to_csi2(sd);
@@ -764,39 +868,46 @@ static const struct media_entity_operations rzg2l_csi2_entity_ops = {
static int rzg2l_csi2_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rzg2l_csi2 *csi2;
int ret;
- csi2 = devm_kzalloc(&pdev->dev, sizeof(*csi2), GFP_KERNEL);
+ csi2 = devm_kzalloc(dev, sizeof(*csi2), GFP_KERNEL);
if (!csi2)
return -ENOMEM;
+ csi2->info = of_device_get_match_data(dev);
+ if (!csi2->info)
+ return dev_err_probe(dev, -EINVAL, "Failed to get OF match data\n");
+
csi2->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csi2->base))
return PTR_ERR(csi2->base);
- csi2->cmn_rstb = devm_reset_control_get_exclusive(&pdev->dev, "cmn-rstb");
+ csi2->cmn_rstb = devm_reset_control_get_exclusive(dev, "cmn-rstb");
if (IS_ERR(csi2->cmn_rstb))
- return dev_err_probe(&pdev->dev, PTR_ERR(csi2->cmn_rstb),
+ return dev_err_probe(dev, PTR_ERR(csi2->cmn_rstb),
"Failed to get cpg cmn-rstb\n");
- csi2->presetn = devm_reset_control_get_shared(&pdev->dev, "presetn");
+ csi2->presetn = devm_reset_control_get_shared(dev, "presetn");
if (IS_ERR(csi2->presetn))
- return dev_err_probe(&pdev->dev, PTR_ERR(csi2->presetn),
+ return dev_err_probe(dev, PTR_ERR(csi2->presetn),
"Failed to get cpg presetn\n");
- csi2->sysclk = devm_clk_get(&pdev->dev, "system");
- if (IS_ERR(csi2->sysclk))
- return dev_err_probe(&pdev->dev, PTR_ERR(csi2->sysclk),
- "Failed to get system clk\n");
+ if (csi2->info->has_system_clk) {
+ csi2->sysclk = devm_clk_get(dev, "system");
+ if (IS_ERR(csi2->sysclk))
+ return dev_err_probe(dev, PTR_ERR(csi2->sysclk),
+ "Failed to get system clk\n");
+ }
- csi2->vclk = devm_clk_get(&pdev->dev, "video");
+ csi2->vclk = devm_clk_get(dev, "video");
if (IS_ERR(csi2->vclk))
- return dev_err_probe(&pdev->dev, PTR_ERR(csi2->vclk),
+ return dev_err_probe(dev, PTR_ERR(csi2->vclk),
"Failed to get video clock\n");
csi2->vclk_rate = clk_get_rate(csi2->vclk);
- csi2->dev = &pdev->dev;
+ csi2->dev = dev;
platform_set_drvdata(pdev, csi2);
@@ -804,18 +915,20 @@ static int rzg2l_csi2_probe(struct platform_device *pdev)
if (ret)
return ret;
- pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
ret = rzg2l_validate_csi2_lanes(csi2);
if (ret)
- goto error_pm;
+ return ret;
- csi2->subdev.dev = &pdev->dev;
+ csi2->subdev.dev = dev;
v4l2_subdev_init(&csi2->subdev, &rzg2l_csi2_subdev_ops);
csi2->subdev.internal_ops = &rzg2l_csi2_internal_ops;
- v4l2_set_subdevdata(&csi2->subdev, &pdev->dev);
+ v4l2_set_subdevdata(&csi2->subdev, dev);
snprintf(csi2->subdev.name, sizeof(csi2->subdev.name),
- "csi-%s", dev_name(&pdev->dev));
+ "csi-%s", dev_name(dev));
csi2->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
csi2->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
@@ -833,7 +946,7 @@ static int rzg2l_csi2_probe(struct platform_device *pdev)
ret = media_entity_pads_init(&csi2->subdev.entity, ARRAY_SIZE(csi2->pads),
csi2->pads);
if (ret)
- goto error_pm;
+ return ret;
ret = v4l2_subdev_init_finalize(&csi2->subdev);
if (ret < 0)
@@ -851,8 +964,6 @@ error_async:
v4l2_async_nf_unregister(&csi2->notifier);
v4l2_async_nf_cleanup(&csi2->notifier);
media_entity_cleanup(&csi2->subdev.entity);
-error_pm:
- pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -866,7 +977,6 @@ static void rzg2l_csi2_remove(struct platform_device *pdev)
v4l2_async_unregister_subdev(&csi2->subdev);
v4l2_subdev_cleanup(&csi2->subdev);
media_entity_cleanup(&csi2->subdev.entity);
- pm_runtime_disable(&pdev->dev);
}
static int rzg2l_csi2_pm_runtime_suspend(struct device *dev)
@@ -891,7 +1001,14 @@ static const struct dev_pm_ops rzg2l_csi2_pm_ops = {
};
static const struct of_device_id rzg2l_csi2_of_table[] = {
- { .compatible = "renesas,rzg2l-csi2", },
+ {
+ .compatible = "renesas,r9a09g057-csi2",
+ .data = &rzv2h_csi2_info,
+ },
+ {
+ .compatible = "renesas,rzg2l-csi2",
+ .data = &rzg2l_csi2_info,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rzg2l_csi2_of_table);
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
index 76a2b451f1da..7836c7cd53dc 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
@@ -148,6 +148,8 @@ static int rzg2l_cru_ip_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *fmt)
{
+ struct rzg2l_cru_dev *cru = v4l2_get_subdevdata(sd);
+ const struct rzg2l_cru_info *info = cru->info;
struct v4l2_mbus_framefmt *src_format;
struct v4l2_mbus_framefmt *sink_format;
@@ -170,9 +172,9 @@ static int rzg2l_cru_ip_set_format(struct v4l2_subdev *sd,
sink_format->ycbcr_enc = fmt->format.ycbcr_enc;
sink_format->quantization = fmt->format.quantization;
sink_format->width = clamp_t(u32, fmt->format.width,
- RZG2L_CRU_MIN_INPUT_WIDTH, RZG2L_CRU_MAX_INPUT_WIDTH);
+ RZG2L_CRU_MIN_INPUT_WIDTH, info->max_width);
sink_format->height = clamp_t(u32, fmt->format.height,
- RZG2L_CRU_MIN_INPUT_HEIGHT, RZG2L_CRU_MAX_INPUT_HEIGHT);
+ RZG2L_CRU_MIN_INPUT_HEIGHT, info->max_height);
fmt->format = *sink_format;
@@ -197,6 +199,9 @@ static int rzg2l_cru_ip_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_size_enum *fse)
{
+ struct rzg2l_cru_dev *cru = v4l2_get_subdevdata(sd);
+ const struct rzg2l_cru_info *info = cru->info;
+
if (fse->index != 0)
return -EINVAL;
@@ -205,8 +210,8 @@ static int rzg2l_cru_ip_enum_frame_size(struct v4l2_subdev *sd,
fse->min_width = RZG2L_CRU_MIN_INPUT_WIDTH;
fse->min_height = RZG2L_CRU_MIN_INPUT_HEIGHT;
- fse->max_width = RZG2L_CRU_MAX_INPUT_WIDTH;
- fse->max_height = RZG2L_CRU_MAX_INPUT_HEIGHT;
+ fse->max_width = info->max_width;
+ fse->max_height = info->max_height;
return 0;
}
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
index cd69c8a686d3..067c6af14e95 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
@@ -31,6 +31,9 @@
#define RZG2L_CRU_DEFAULT_FIELD V4L2_FIELD_NONE
#define RZG2L_CRU_DEFAULT_COLORSPACE V4L2_COLORSPACE_SRGB
+#define RZG2L_CRU_STRIDE_MAX 32640
+#define RZG2L_CRU_STRIDE_ALIGN 128
+
struct rzg2l_cru_buffer {
struct vb2_v4l2_buffer vb;
struct list_head list;
@@ -42,16 +45,66 @@ struct rzg2l_cru_buffer {
/* -----------------------------------------------------------------------------
* DMA operations
*/
-static void rzg2l_cru_write(struct rzg2l_cru_dev *cru, u32 offset, u32 value)
+static void __rzg2l_cru_write(struct rzg2l_cru_dev *cru, u32 offset, u32 value)
{
- iowrite32(value, cru->base + offset);
+ const u16 *regs = cru->info->regs;
+
+ /*
+ * CRUnCTRL is a first register on all CRU supported SoCs so validate
+ * rest of the registers have valid offset being set in cru->info->regs.
+ */
+ if (WARN_ON(offset >= RZG2L_CRU_MAX_REG) ||
+ WARN_ON(offset != CRUnCTRL && regs[offset] == 0))
+ return;
+
+ iowrite32(value, cru->base + regs[offset]);
}
-static u32 rzg2l_cru_read(struct rzg2l_cru_dev *cru, u32 offset)
+static u32 __rzg2l_cru_read(struct rzg2l_cru_dev *cru, u32 offset)
{
- return ioread32(cru->base + offset);
+ const u16 *regs = cru->info->regs;
+
+ /*
+ * CRUnCTRL is a first register on all CRU supported SoCs so validate
+ * rest of the registers have valid offset being set in cru->info->regs.
+ */
+ if (WARN_ON(offset >= RZG2L_CRU_MAX_REG) ||
+ WARN_ON(offset != CRUnCTRL && regs[offset] == 0))
+ return 0;
+
+ return ioread32(cru->base + regs[offset]);
}
+static __always_inline void
+__rzg2l_cru_write_constant(struct rzg2l_cru_dev *cru, u32 offset, u32 value)
+{
+ const u16 *regs = cru->info->regs;
+
+ BUILD_BUG_ON(offset >= RZG2L_CRU_MAX_REG);
+
+ iowrite32(value, cru->base + regs[offset]);
+}
+
+static __always_inline u32
+__rzg2l_cru_read_constant(struct rzg2l_cru_dev *cru, u32 offset)
+{
+ const u16 *regs = cru->info->regs;
+
+ BUILD_BUG_ON(offset >= RZG2L_CRU_MAX_REG);
+
+ return ioread32(cru->base + regs[offset]);
+}
+
+#define rzg2l_cru_write(cru, offset, value) \
+ (__builtin_constant_p(offset) ? \
+ __rzg2l_cru_write_constant(cru, offset, value) : \
+ __rzg2l_cru_write(cru, offset, value))
+
+#define rzg2l_cru_read(cru, offset) \
+ (__builtin_constant_p(offset) ? \
+ __rzg2l_cru_read_constant(cru, offset) : \
+ __rzg2l_cru_read(cru, offset))
+
/* Need to hold qlock before calling */
static void return_unused_buffers(struct rzg2l_cru_dev *cru,
enum vb2_buffer_state state)
@@ -134,6 +187,8 @@ static void rzg2l_cru_set_slot_addr(struct rzg2l_cru_dev *cru,
/* Currently, we just use the buffer in 32 bits address */
rzg2l_cru_write(cru, AMnMBxADDRL(slot), addr);
rzg2l_cru_write(cru, AMnMBxADDRH(slot), 0);
+
+ cru->buf_addr[slot] = addr;
}
/*
@@ -174,6 +229,7 @@ static void rzg2l_cru_fill_hw_slot(struct rzg2l_cru_dev *cru, int slot)
static void rzg2l_cru_initialize_axi(struct rzg2l_cru_dev *cru)
{
+ const struct rzg2l_cru_info *info = cru->info;
unsigned int slot;
u32 amnaxiattr;
@@ -186,35 +242,64 @@ static void rzg2l_cru_initialize_axi(struct rzg2l_cru_dev *cru)
for (slot = 0; slot < cru->num_buf; slot++)
rzg2l_cru_fill_hw_slot(cru, slot);
+ if (info->has_stride) {
+ u32 stride = cru->format.bytesperline;
+ u32 amnis;
+
+ stride /= RZG2L_CRU_STRIDE_ALIGN;
+ amnis = rzg2l_cru_read(cru, AMnIS) & ~AMnIS_IS_MASK;
+ rzg2l_cru_write(cru, AMnIS, amnis | AMnIS_IS(stride));
+ }
+
/* Set AXI burst max length to recommended setting */
amnaxiattr = rzg2l_cru_read(cru, AMnAXIATTR) & ~AMnAXIATTR_AXILEN_MASK;
amnaxiattr |= AMnAXIATTR_AXILEN;
rzg2l_cru_write(cru, AMnAXIATTR, amnaxiattr);
}
-static void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru,
- const struct rzg2l_cru_ip_format *ip_fmt,
- u8 csi_vc)
+void rzg3e_cru_csi2_setup(struct rzg2l_cru_dev *cru,
+ const struct rzg2l_cru_ip_format *ip_fmt,
+ u8 csi_vc)
{
+ const struct rzg2l_cru_info *info = cru->info;
u32 icnmc = ICnMC_INF(ip_fmt->datatype);
- icnmc |= (rzg2l_cru_read(cru, ICnMC) & ~ICnMC_INF_MASK);
+ icnmc |= rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_INF_MASK;
/* Set virtual channel CSI2 */
icnmc |= ICnMC_VCSEL(csi_vc);
- rzg2l_cru_write(cru, ICnMC, icnmc);
+ rzg2l_cru_write(cru, ICnSVCNUM, csi_vc);
+ rzg2l_cru_write(cru, ICnSVC, ICnSVC_SVC0(0) | ICnSVC_SVC1(1) |
+ ICnSVC_SVC2(2) | ICnSVC_SVC3(3));
+ rzg2l_cru_write(cru, info->image_conv, icnmc);
+}
+
+void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru,
+ const struct rzg2l_cru_ip_format *ip_fmt,
+ u8 csi_vc)
+{
+ const struct rzg2l_cru_info *info = cru->info;
+ u32 icnmc = ICnMC_INF(ip_fmt->datatype);
+
+ icnmc |= rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_INF_MASK;
+
+ /* Set virtual channel CSI2 */
+ icnmc |= ICnMC_VCSEL(csi_vc);
+
+ rzg2l_cru_write(cru, info->image_conv, icnmc);
}
static int rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev *cru,
struct v4l2_mbus_framefmt *ip_sd_fmt,
u8 csi_vc)
{
+ const struct rzg2l_cru_info *info = cru->info;
const struct rzg2l_cru_ip_format *cru_video_fmt;
const struct rzg2l_cru_ip_format *cru_ip_fmt;
cru_ip_fmt = rzg2l_cru_ip_code_to_fmt(ip_sd_fmt->code);
- rzg2l_cru_csi2_setup(cru, cru_ip_fmt, csi_vc);
+ info->csi_setup(cru, cru_ip_fmt, csi_vc);
/* Output format */
cru_video_fmt = rzg2l_cru_ip_format_to_fmt(cru->format.pixelformat);
@@ -226,11 +311,11 @@ static int rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev *cru,
/* If input and output use same colorspace, do bypass mode */
if (cru_ip_fmt->yuv == cru_video_fmt->yuv)
- rzg2l_cru_write(cru, ICnMC,
- rzg2l_cru_read(cru, ICnMC) | ICnMC_CSCTHR);
+ rzg2l_cru_write(cru, info->image_conv,
+ rzg2l_cru_read(cru, info->image_conv) | ICnMC_CSCTHR);
else
- rzg2l_cru_write(cru, ICnMC,
- rzg2l_cru_read(cru, ICnMC) & (~ICnMC_CSCTHR));
+ rzg2l_cru_write(cru, info->image_conv,
+ rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_CSCTHR);
/* Set output data format */
rzg2l_cru_write(cru, ICnDMR, cru_video_fmt->icndmr);
@@ -238,9 +323,36 @@ static int rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev *cru,
return 0;
}
-void rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev *cru)
+bool rz3e_fifo_empty(struct rzg2l_cru_dev *cru)
+{
+ u32 amnfifopntr = rzg2l_cru_read(cru, AMnFIFOPNTR);
+
+ if ((((amnfifopntr & AMnFIFOPNTR_FIFORPNTR_B1) >> 24) ==
+ ((amnfifopntr & AMnFIFOPNTR_FIFOWPNTR_B1) >> 8)) &&
+ (((amnfifopntr & AMnFIFOPNTR_FIFORPNTR_B0) >> 16) ==
+ (amnfifopntr & AMnFIFOPNTR_FIFOWPNTR_B0)))
+ return true;
+
+ return false;
+}
+
+bool rzg2l_fifo_empty(struct rzg2l_cru_dev *cru)
{
u32 amnfifopntr, amnfifopntr_w, amnfifopntr_r_y;
+
+ amnfifopntr = rzg2l_cru_read(cru, AMnFIFOPNTR);
+
+ amnfifopntr_w = amnfifopntr & AMnFIFOPNTR_FIFOWPNTR;
+ amnfifopntr_r_y =
+ (amnfifopntr & AMnFIFOPNTR_FIFORPNTR_Y) >> 16;
+ if (amnfifopntr_w == amnfifopntr_r_y)
+ return true;
+
+ return amnfifopntr_w == amnfifopntr_r_y;
+}
+
+void rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev *cru)
+{
unsigned int retries = 0;
unsigned long flags;
u32 icnms;
@@ -248,8 +360,7 @@ void rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev *cru)
spin_lock_irqsave(&cru->qlock, flags);
/* Disable and clear the interrupt */
- rzg2l_cru_write(cru, CRUnIE, 0);
- rzg2l_cru_write(cru, CRUnINTS, 0x001F0F0F);
+ cru->info->disable_interrupts(cru);
/* Stop the operation of image conversion */
rzg2l_cru_write(cru, ICnEN, 0);
@@ -269,12 +380,7 @@ void rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev *cru)
/* Wait until the FIFO becomes empty */
for (retries = 5; retries > 0; retries--) {
- amnfifopntr = rzg2l_cru_read(cru, AMnFIFOPNTR);
-
- amnfifopntr_w = amnfifopntr & AMnFIFOPNTR_FIFOWPNTR;
- amnfifopntr_r_y =
- (amnfifopntr & AMnFIFOPNTR_FIFORPNTR_Y) >> 16;
- if (amnfifopntr_w == amnfifopntr_r_y)
+ if (cru->info->fifo_empty(cru))
break;
usleep_range(10, 20);
@@ -341,6 +447,31 @@ static int rzg2l_cru_get_virtual_channel(struct rzg2l_cru_dev *cru)
return fd.entry[0].bus.csi2.vc;
}
+void rzg3e_cru_enable_interrupts(struct rzg2l_cru_dev *cru)
+{
+ rzg2l_cru_write(cru, CRUnIE2, CRUnIE2_FSxE(cru->svc_channel));
+ rzg2l_cru_write(cru, CRUnIE2, CRUnIE2_FExE(cru->svc_channel));
+}
+
+void rzg3e_cru_disable_interrupts(struct rzg2l_cru_dev *cru)
+{
+ rzg2l_cru_write(cru, CRUnIE, 0);
+ rzg2l_cru_write(cru, CRUnIE2, 0);
+ rzg2l_cru_write(cru, CRUnINTS, rzg2l_cru_read(cru, CRUnINTS));
+ rzg2l_cru_write(cru, CRUnINTS2, rzg2l_cru_read(cru, CRUnINTS2));
+}
+
+void rzg2l_cru_enable_interrupts(struct rzg2l_cru_dev *cru)
+{
+ rzg2l_cru_write(cru, CRUnIE, CRUnIE_EFE);
+}
+
+void rzg2l_cru_disable_interrupts(struct rzg2l_cru_dev *cru)
+{
+ rzg2l_cru_write(cru, CRUnIE, 0);
+ rzg2l_cru_write(cru, CRUnINTS, 0x001f000f);
+}
+
int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru)
{
struct v4l2_mbus_framefmt *fmt = rzg2l_cru_ip_get_src_fmt(cru);
@@ -352,6 +483,7 @@ int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru)
if (ret < 0)
return ret;
csi_vc = ret;
+ cru->svc_channel = csi_vc;
spin_lock_irqsave(&cru->qlock, flags);
@@ -362,8 +494,7 @@ int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru)
rzg2l_cru_write(cru, CRUnRST, CRUnRST_VRESETN);
/* Disable and clear the interrupt before using */
- rzg2l_cru_write(cru, CRUnIE, 0);
- rzg2l_cru_write(cru, CRUnINTS, 0x001f000f);
+ cru->info->disable_interrupts(cru);
/* Initialize the AXI master */
rzg2l_cru_initialize_axi(cru);
@@ -376,7 +507,7 @@ int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru)
}
/* Enable interrupt */
- rzg2l_cru_write(cru, CRUnIE, CRUnIE_EFE);
+ cru->info->enable_interrupts(cru);
/* Enable image processing reception */
rzg2l_cru_write(cru, ICnEN, ICnEN_ICEN);
@@ -531,6 +662,104 @@ done:
return IRQ_RETVAL(handled);
}
+static int rzg3e_cru_get_current_slot(struct rzg2l_cru_dev *cru)
+{
+ u64 amnmadrs;
+ int slot;
+
+ /*
+ * When AMnMADRSL is read, AMnMADRSH of the higher-order
+ * address also latches the address.
+ *
+ * AMnMADRSH must be read after AMnMADRSL has been read.
+ */
+ amnmadrs = rzg2l_cru_read(cru, AMnMADRSL);
+ amnmadrs |= (u64)rzg2l_cru_read(cru, AMnMADRSH) << 32;
+
+ /* Ensure amnmadrs is within this buffer range */
+ for (slot = 0; slot < cru->num_buf; slot++) {
+ if (amnmadrs >= cru->buf_addr[slot] &&
+ amnmadrs < cru->buf_addr[slot] + cru->format.sizeimage)
+ return slot;
+ }
+
+ dev_err(cru->dev, "Invalid MB address 0x%llx (out of range)\n", amnmadrs);
+ return -EINVAL;
+}
+
+irqreturn_t rzg3e_cru_irq(int irq, void *data)
+{
+ struct rzg2l_cru_dev *cru = data;
+ u32 irq_status;
+ int slot;
+
+ scoped_guard(spinlock, &cru->qlock) {
+ irq_status = rzg2l_cru_read(cru, CRUnINTS2);
+ if (!irq_status)
+ return IRQ_NONE;
+
+ dev_dbg(cru->dev, "CRUnINTS2 0x%x\n", irq_status);
+
+ rzg2l_cru_write(cru, CRUnINTS2, rzg2l_cru_read(cru, CRUnINTS2));
+
+ /* Nothing to do if capture status is 'RZG2L_CRU_DMA_STOPPED' */
+ if (cru->state == RZG2L_CRU_DMA_STOPPED) {
+ dev_dbg(cru->dev, "IRQ while state stopped\n");
+ return IRQ_HANDLED;
+ }
+
+ if (cru->state == RZG2L_CRU_DMA_STOPPING) {
+ if (irq_status & CRUnINTS2_FSxS(0) ||
+ irq_status & CRUnINTS2_FSxS(1) ||
+ irq_status & CRUnINTS2_FSxS(2) ||
+ irq_status & CRUnINTS2_FSxS(3))
+ dev_dbg(cru->dev, "IRQ while state stopping\n");
+ return IRQ_HANDLED;
+ }
+
+ slot = rzg3e_cru_get_current_slot(cru);
+ if (slot < 0)
+ return IRQ_HANDLED;
+
+ dev_dbg(cru->dev, "Current written slot: %d\n", slot);
+ cru->buf_addr[slot] = 0;
+
+ /*
+ * To hand buffers back in a known order to userspace start
+ * to capture first from slot 0.
+ */
+ if (cru->state == RZG2L_CRU_DMA_STARTING) {
+ if (slot != 0) {
+ dev_dbg(cru->dev, "Starting sync slot: %d\n", slot);
+ return IRQ_HANDLED;
+ }
+ dev_dbg(cru->dev, "Capture start synced!\n");
+ cru->state = RZG2L_CRU_DMA_RUNNING;
+ }
+
+ /* Capture frame */
+ if (cru->queue_buf[slot]) {
+ struct vb2_v4l2_buffer *buf = cru->queue_buf[slot];
+
+ buf->field = cru->format.field;
+ buf->sequence = cru->sequence;
+ buf->vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&buf->vb2_buf, VB2_BUF_STATE_DONE);
+ cru->queue_buf[slot] = NULL;
+ } else {
+ /* Scratch buffer was used, dropping frame. */
+ dev_dbg(cru->dev, "Dropping frame %u\n", cru->sequence);
+ }
+
+ cru->sequence++;
+
+ /* Prepare for next frame */
+ rzg2l_cru_fill_hw_slot(cru, slot);
+ }
+
+ return IRQ_HANDLED;
+}
+
static int rzg2l_cru_start_streaming_vq(struct vb2_queue *vq, unsigned int count)
{
struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq);
@@ -686,6 +915,7 @@ error:
static void rzg2l_cru_format_align(struct rzg2l_cru_dev *cru,
struct v4l2_pix_format *pix)
{
+ const struct rzg2l_cru_info *info = cru->info;
const struct rzg2l_cru_ip_format *fmt;
fmt = rzg2l_cru_ip_format_to_fmt(pix->pixelformat);
@@ -708,10 +938,17 @@ static void rzg2l_cru_format_align(struct rzg2l_cru_dev *cru,
}
/* Limit to CRU capabilities */
- v4l_bound_align_image(&pix->width, 320, RZG2L_CRU_MAX_INPUT_WIDTH, 1,
- &pix->height, 240, RZG2L_CRU_MAX_INPUT_HEIGHT, 2, 0);
+ v4l_bound_align_image(&pix->width, 320, info->max_width, 1,
+ &pix->height, 240, info->max_height, 2, 0);
+
+ if (info->has_stride) {
+ u32 stride = clamp(pix->bytesperline, pix->width * fmt->bpp,
+ RZG2L_CRU_STRIDE_MAX);
+ pix->bytesperline = round_up(stride, RZG2L_CRU_STRIDE_ALIGN);
+ } else {
+ pix->bytesperline = pix->width * fmt->bpp;
+ }
- pix->bytesperline = pix->width * fmt->bpp;
pix->sizeimage = pix->bytesperline * pix->height;
dev_dbg(cru->dev, "Format %ux%u bpl: %u size: %u\n",
diff --git a/drivers/media/platform/renesas/vsp1/Makefile b/drivers/media/platform/renesas/vsp1/Makefile
index 4bb4dcbef7b5..de8c802e1d1a 100644
--- a/drivers/media/platform/renesas/vsp1/Makefile
+++ b/drivers/media/platform/renesas/vsp1/Makefile
@@ -5,6 +5,6 @@ vsp1-y += vsp1_rpf.o vsp1_rwpf.o vsp1_wpf.o
vsp1-y += vsp1_clu.o vsp1_hsit.o vsp1_lut.o
vsp1-y += vsp1_brx.o vsp1_sru.o vsp1_uds.o
vsp1-y += vsp1_hgo.o vsp1_hgt.o vsp1_histo.o
-vsp1-y += vsp1_lif.o vsp1_uif.o
+vsp1-y += vsp1_iif.o vsp1_lif.o vsp1_uif.o
obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1.o
diff --git a/drivers/media/platform/renesas/vsp1/vsp1.h b/drivers/media/platform/renesas/vsp1/vsp1.h
index 2f6f0c6ae555..f97a1a31bfab 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1.h
@@ -32,6 +32,7 @@ struct vsp1_clu;
struct vsp1_hgo;
struct vsp1_hgt;
struct vsp1_hsit;
+struct vsp1_iif;
struct vsp1_lif;
struct vsp1_lut;
struct vsp1_rwpf;
@@ -56,6 +57,8 @@ struct vsp1_uif;
#define VSP1_HAS_BRS BIT(9)
#define VSP1_HAS_EXT_DL BIT(10)
#define VSP1_HAS_NON_ZERO_LBA BIT(11)
+#define VSP1_HAS_IIF BIT(12)
+#define VSP1_HAS_HSIT BIT(13)
struct vsp1_device_info {
u32 version;
@@ -91,6 +94,7 @@ struct vsp1_device {
struct vsp1_hgt *hgt;
struct vsp1_hsit *hsi;
struct vsp1_hsit *hst;
+ struct vsp1_iif *iif;
struct vsp1_lif *lif[VSP1_MAX_LIF];
struct vsp1_lut *lut;
struct vsp1_rwpf *rpf[VSP1_MAX_RPF];
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_brx.c b/drivers/media/platform/renesas/vsp1/vsp1_brx.c
index 5dee0490c593..5fc2e5a3bb30 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_brx.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_brx.c
@@ -15,6 +15,7 @@
#include "vsp1.h"
#include "vsp1_brx.h"
#include "vsp1_dl.h"
+#include "vsp1_entity.h"
#include "vsp1_pipe.h"
#include "vsp1_rwpf.h"
#include "vsp1_video.h"
@@ -108,6 +109,8 @@ static void brx_try_format(struct vsp1_brx *brx,
if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
+
+ vsp1_entity_adjust_color_space(fmt);
break;
default:
@@ -115,13 +118,17 @@ static void brx_try_format(struct vsp1_brx *brx,
format = v4l2_subdev_state_get_format(sd_state,
BRX_PAD_SINK(0));
fmt->code = format->code;
+
+ fmt->colorspace = format->colorspace;
+ fmt->xfer_func = format->xfer_func;
+ fmt->ycbcr_enc = format->ycbcr_enc;
+ fmt->quantization = format->quantization;
break;
}
fmt->width = clamp(fmt->width, BRX_MIN_SIZE, BRX_MAX_SIZE);
fmt->height = clamp(fmt->height, BRX_MIN_SIZE, BRX_MAX_SIZE);
fmt->field = V4L2_FIELD_NONE;
- fmt->colorspace = V4L2_COLORSPACE_SRGB;
}
static int brx_set_format(struct v4l2_subdev *subdev,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_dl.c b/drivers/media/platform/renesas/vsp1/vsp1_dl.c
index ad3fa1c9cc73..bb8228b19824 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_dl.c
@@ -1099,7 +1099,12 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
return NULL;
dlm->index = index;
- dlm->singleshot = vsp1->info->uapi;
+ /*
+ * uapi = single shot mode;
+ * DRM = continuous mode;
+ * VSPX = single shot mode;
+ */
+ dlm->singleshot = vsp1->info->uapi || vsp1->iif;
dlm->vsp1 = vsp1;
spin_lock_init(&dlm->lock);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drm.c b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
index b5d1f238f7be..fe55e8747b05 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
@@ -118,26 +118,26 @@ static int vsp1_du_pipeline_setup_rpf(struct vsp1_device *vsp1,
struct vsp1_entity *uif,
unsigned int brx_input)
{
+ const struct vsp1_drm_input *input = &vsp1->drm->inputs[rpf->entity.index];
struct v4l2_subdev_selection sel = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
- const struct v4l2_rect *crop;
int ret;
/*
* Configure the format on the RPF sink pad and propagate it up to the
* BRx sink pad.
*/
- crop = &vsp1->drm->inputs[rpf->entity.index].crop;
-
format.pad = RWPF_PAD_SINK;
- format.format.width = crop->width + crop->left;
- format.format.height = crop->height + crop->top;
+ format.format.width = input->crop.width + input->crop.left;
+ format.format.height = input->crop.height + input->crop.top;
format.format.code = rpf->fmtinfo->mbus;
format.format.field = V4L2_FIELD_NONE;
+ format.format.ycbcr_enc = input->ycbcr_enc;
+ format.format.quantization = input->quantization;
ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_fmt, NULL,
&format);
@@ -151,7 +151,7 @@ static int vsp1_du_pipeline_setup_rpf(struct vsp1_device *vsp1,
sel.pad = RWPF_PAD_SINK;
sel.target = V4L2_SEL_TGT_CROP;
- sel.r = *crop;
+ sel.r = input->crop;
ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_selection, NULL,
&sel);
@@ -593,8 +593,8 @@ static int vsp1_du_pipeline_set_rwpf_format(struct vsp1_device *vsp1,
fmtinfo = vsp1_get_format_info(vsp1, pixelformat);
if (!fmtinfo) {
- dev_dbg(vsp1->dev, "Unsupported pixel format %08x\n",
- pixelformat);
+ dev_dbg(vsp1->dev, "Unsupported pixel format %p4cc\n",
+ &pixelformat);
return -EINVAL;
}
@@ -826,12 +826,14 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index];
+ struct vsp1_drm_input *input;
struct vsp1_rwpf *rpf;
int ret;
if (rpf_index >= vsp1->info->rpf_count)
return -EINVAL;
+ input = &vsp1->drm->inputs[rpf_index];
rpf = vsp1->rpf[rpf_index];
if (!cfg) {
@@ -849,11 +851,11 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
}
dev_dbg(vsp1->dev,
- "%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%08x), pitch %u dma { %pad, %pad, %pad } zpos %u\n",
+ "%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%p4cc), pitch %u dma { %pad, %pad, %pad } zpos %u\n",
__func__, rpf_index,
cfg->src.left, cfg->src.top, cfg->src.width, cfg->src.height,
cfg->dst.left, cfg->dst.top, cfg->dst.width, cfg->dst.height,
- cfg->pixelformat, cfg->pitch, &cfg->mem[0], &cfg->mem[1],
+ &cfg->pixelformat, cfg->pitch, &cfg->mem[0], &cfg->mem[1],
&cfg->mem[2], cfg->zpos);
/*
@@ -873,9 +875,11 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
rpf->format.flags = cfg->premult ? V4L2_PIX_FMT_FLAG_PREMUL_ALPHA : 0;
- vsp1->drm->inputs[rpf_index].crop = cfg->src;
- vsp1->drm->inputs[rpf_index].compose = cfg->dst;
- vsp1->drm->inputs[rpf_index].zpos = cfg->zpos;
+ input->crop = cfg->src;
+ input->compose = cfg->dst;
+ input->zpos = cfg->zpos;
+ input->ycbcr_enc = cfg->color_encoding;
+ input->quantization = cfg->color_range;
drm_pipe->pipe.inputs[rpf_index] = rpf;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drm.h b/drivers/media/platform/renesas/vsp1/vsp1_drm.h
index 3fd95b53f27e..07a5d0adbd08 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drm.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drm.h
@@ -52,17 +52,19 @@ struct vsp1_drm_pipeline {
* struct vsp1_drm - State for the API exposed to the DRM driver
* @pipe: the VSP1 DRM pipeline used for display
* @lock: protects the BRU and BRS allocation
- * @inputs: source crop rectangle, destination compose rectangle and z-order
- * position for every input (indexed by RPF index)
+ * @inputs: source crop rectangle, destination compose rectangle, z-order
+ * position and colorspace for every input (indexed by RPF index)
*/
struct vsp1_drm {
struct vsp1_drm_pipeline pipe[VSP1_MAX_LIF];
struct mutex lock;
- struct {
+ struct vsp1_drm_input {
struct v4l2_rect crop;
struct v4l2_rect compose;
unsigned int zpos;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
} inputs[VSP1_MAX_RPF];
};
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drv.c b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
index 9fc6bf624a52..8270a9d207cb 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
@@ -29,6 +29,7 @@
#include "vsp1_hgo.h"
#include "vsp1_hgt.h"
#include "vsp1_hsit.h"
+#include "vsp1_iif.h"
#include "vsp1_lif.h"
#include "vsp1_lut.h"
#include "vsp1_pipe.h"
@@ -302,22 +303,6 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->clu->entity.list_dev, &vsp1->entities);
}
- vsp1->hsi = vsp1_hsit_create(vsp1, true);
- if (IS_ERR(vsp1->hsi)) {
- ret = PTR_ERR(vsp1->hsi);
- goto done;
- }
-
- list_add_tail(&vsp1->hsi->entity.list_dev, &vsp1->entities);
-
- vsp1->hst = vsp1_hsit_create(vsp1, false);
- if (IS_ERR(vsp1->hst)) {
- ret = PTR_ERR(vsp1->hst);
- goto done;
- }
-
- list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
-
if (vsp1_feature(vsp1, VSP1_HAS_HGO) && vsp1->info->uapi) {
vsp1->hgo = vsp1_hgo_create(vsp1);
if (IS_ERR(vsp1->hgo)) {
@@ -340,6 +325,34 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
&vsp1->entities);
}
+ if (vsp1_feature(vsp1, VSP1_HAS_IIF)) {
+ vsp1->iif = vsp1_iif_create(vsp1);
+ if (IS_ERR(vsp1->iif)) {
+ ret = PTR_ERR(vsp1->iif);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->iif->entity.list_dev, &vsp1->entities);
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_HSIT)) {
+ vsp1->hsi = vsp1_hsit_create(vsp1, true);
+ if (IS_ERR(vsp1->hsi)) {
+ ret = PTR_ERR(vsp1->hsi);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hsi->entity.list_dev, &vsp1->entities);
+
+ vsp1->hst = vsp1_hsit_create(vsp1, false);
+ if (IS_ERR(vsp1->hst)) {
+ ret = PTR_ERR(vsp1->hst);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
+ }
+
/*
* The LIFs are only supported when used in conjunction with the DU, in
* which case the userspace API is disabled. If the userspace API is
@@ -683,8 +696,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.model = "VSP1-S",
.gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO
- | VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU
- | VSP1_HAS_WPF_VFLIP,
+ | VSP1_HAS_HGT | VSP1_HAS_HSIT | VSP1_HAS_LUT
+ | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.uds_count = 3,
.wpf_count = 4,
@@ -694,7 +707,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPR_H2,
.model = "VSP1-R",
.gen = 2,
- .features = VSP1_HAS_BRU | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
+ .features = VSP1_HAS_BRU | VSP1_HAS_HSIT | VSP1_HAS_SRU
+ | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.uds_count = 3,
.wpf_count = 4,
@@ -704,7 +718,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPD_GEN2,
.model = "VSP1-D",
.gen = 2,
- .features = VSP1_HAS_BRU | VSP1_HAS_HGO | VSP1_HAS_LUT,
+ .features = VSP1_HAS_BRU | VSP1_HAS_HGO | VSP1_HAS_HSIT
+ | VSP1_HAS_LUT,
.lif_count = 1,
.rpf_count = 4,
.uds_count = 1,
@@ -716,8 +731,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.model = "VSP1-S",
.gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO
- | VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU
- | VSP1_HAS_WPF_VFLIP,
+ | VSP1_HAS_HGT | VSP1_HAS_HSIT | VSP1_HAS_LUT
+ | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.uds_count = 1,
.wpf_count = 4,
@@ -727,8 +742,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPS_V2H,
.model = "VSP1V-S",
.gen = 2,
- .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
- | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HSIT
+ | VSP1_HAS_LUT | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
.rpf_count = 4,
.uds_count = 1,
.wpf_count = 4,
@@ -738,7 +753,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPD_V2H,
.model = "VSP1V-D",
.gen = 2,
- .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HSIT
+ | VSP1_HAS_LUT,
.lif_count = 1,
.rpf_count = 4,
.uds_count = 1,
@@ -750,8 +766,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.model = "VSP2-I",
.gen = 3,
.features = VSP1_HAS_CLU | VSP1_HAS_HGO | VSP1_HAS_HGT
- | VSP1_HAS_LUT | VSP1_HAS_SRU | VSP1_HAS_WPF_HFLIP
- | VSP1_HAS_WPF_VFLIP,
+ | VSP1_HAS_HSIT | VSP1_HAS_LUT | VSP1_HAS_SRU
+ | VSP1_HAS_WPF_HFLIP | VSP1_HAS_WPF_VFLIP,
.rpf_count = 1,
.uds_count = 1,
.wpf_count = 1,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_entity.c b/drivers/media/platform/renesas/vsp1/vsp1_entity.c
index 8b8945bd8f10..a6680d531872 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_entity.c
@@ -63,9 +63,14 @@ void vsp1_entity_route_setup(struct vsp1_entity *entity,
/*
* The ILV and BRS share the same data path route. The extra BRSSEL bit
* selects between the ILV and BRS.
+ *
+ * The BRU and IIF share the same data path route. The extra IIFSEL bit
+ * selects between the IIF and BRU.
*/
if (source->type == VSP1_ENTITY_BRS)
route |= VI6_DPR_ROUTE_BRSSEL;
+ else if (source->type == VSP1_ENTITY_IIF)
+ route |= VI6_DPR_ROUTE_IIFSEL;
vsp1_dl_body_write(dlb, source->route->reg, route);
}
@@ -99,6 +104,20 @@ void vsp1_entity_configure_partition(struct vsp1_entity *entity,
dl, dlb);
}
+void vsp1_entity_adjust_color_space(struct v4l2_mbus_framefmt *format)
+{
+ u8 xfer_func = format->xfer_func;
+ u8 ycbcr_enc = format->ycbcr_enc;
+ u8 quantization = format->quantization;
+
+ vsp1_adjust_color_space(format->code, &format->colorspace, &xfer_func,
+ &ycbcr_enc, &quantization);
+
+ format->xfer_func = xfer_func;
+ format->ycbcr_enc = ycbcr_enc;
+ format->quantization = quantization;
+}
+
/* -----------------------------------------------------------------------------
* V4L2 Subdevice Operations
*/
@@ -329,7 +348,13 @@ int vsp1_subdev_set_pad_format(struct v4l2_subdev *subdev,
format->height = clamp_t(unsigned int, fmt->format.height,
min_height, max_height);
format->field = V4L2_FIELD_NONE;
- format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ format->colorspace = fmt->format.colorspace;
+ format->xfer_func = fmt->format.xfer_func;
+ format->ycbcr_enc = fmt->format.ycbcr_enc;
+ format->quantization = fmt->format.quantization;
+
+ vsp1_entity_adjust_color_space(format);
fmt->format = *format;
@@ -528,6 +553,9 @@ struct media_pad *vsp1_entity_remote_pad(struct media_pad *pad)
{ VI6_DPR_NODE_WPF(idx) }, VI6_DPR_NODE_WPF(idx) }
static const struct vsp1_route vsp1_routes[] = {
+ { VSP1_ENTITY_IIF, 0, VI6_DPR_BRU_ROUTE,
+ { VI6_DPR_NODE_BRU_IN(0), VI6_DPR_NODE_BRU_IN(1),
+ VI6_DPR_NODE_BRU_IN(3) }, VI6_DPR_NODE_WPF(0) },
{ VSP1_ENTITY_BRS, 0, VI6_DPR_ILV_BRS_ROUTE,
{ VI6_DPR_NODE_BRS_IN(0), VI6_DPR_NODE_BRS_IN(1) }, 0 },
{ VSP1_ENTITY_BRU, 0, VI6_DPR_BRU_ROUTE,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_entity.h b/drivers/media/platform/renesas/vsp1/vsp1_entity.h
index 1bcc9e27dfdc..b7c72d0b7f8e 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_entity.h
@@ -28,6 +28,7 @@ enum vsp1_entity_type {
VSP1_ENTITY_HGT,
VSP1_ENTITY_HSI,
VSP1_ENTITY_HST,
+ VSP1_ENTITY_IIF,
VSP1_ENTITY_LIF,
VSP1_ENTITY_LUT,
VSP1_ENTITY_RPF,
@@ -170,6 +171,8 @@ void vsp1_entity_configure_partition(struct vsp1_entity *entity,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb);
+void vsp1_entity_adjust_color_space(struct v4l2_mbus_framefmt *format);
+
struct media_pad *vsp1_entity_remote_pad(struct media_pad *pad);
int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_hsit.c b/drivers/media/platform/renesas/vsp1/vsp1_hsit.c
index 8ba2a7c7305c..1fcd1967d3b2 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hsit.c
@@ -14,6 +14,7 @@
#include "vsp1.h"
#include "vsp1_dl.h"
+#include "vsp1_entity.h"
#include "vsp1_hsit.h"
#define HSIT_MIN_SIZE 4U
@@ -96,7 +97,13 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
format->height = clamp_t(unsigned int, fmt->format.height,
HSIT_MIN_SIZE, HSIT_MAX_SIZE);
format->field = V4L2_FIELD_NONE;
- format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ format->colorspace = fmt->format.colorspace;
+ format->xfer_func = fmt->format.xfer_func;
+ format->ycbcr_enc = fmt->format.ycbcr_enc;
+ format->quantization = fmt->format.quantization;
+
+ vsp1_entity_adjust_color_space(format);
fmt->format = *format;
@@ -106,6 +113,8 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
format->code = hsit->inverse ? MEDIA_BUS_FMT_ARGB8888_1X32
: MEDIA_BUS_FMT_AHSV8888_1X32;
+ vsp1_entity_adjust_color_space(format);
+
done:
mutex_unlock(&hsit->entity.lock);
return ret;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_iif.c b/drivers/media/platform/renesas/vsp1/vsp1_iif.c
new file mode 100644
index 000000000000..5dd62bebbe8c
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_iif.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_iif.c -- R-Car VSP1 IIF (ISP Interface)
+ *
+ * Copyright (C) 2025 Ideas On Board Oy
+ * Copyright (C) 2025 Renesas Corporation
+ */
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_iif.h"
+
+#define IIF_MIN_WIDTH 128U
+#define IIF_MIN_HEIGHT 32U
+#define IIF_MAX_WIDTH 5120U
+#define IIF_MAX_HEIGHT 4096U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_iif_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const unsigned int iif_codes[] = {
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y12_1X12,
+ MEDIA_BUS_FMT_Y16_1X16,
+ MEDIA_BUS_FMT_METADATA_FIXED
+};
+
+static int iif_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, iif_codes,
+ ARRAY_SIZE(iif_codes));
+}
+
+static int iif_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ IIF_MIN_WIDTH, IIF_MIN_HEIGHT,
+ IIF_MAX_WIDTH, IIF_MAX_HEIGHT);
+}
+
+static int iif_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, iif_codes,
+ ARRAY_SIZE(iif_codes),
+ IIF_MIN_WIDTH, IIF_MIN_HEIGHT,
+ IIF_MAX_WIDTH, IIF_MAX_HEIGHT);
+}
+
+static const struct v4l2_subdev_pad_ops iif_pad_ops = {
+ .enum_mbus_code = iif_enum_mbus_code,
+ .enum_frame_size = iif_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = iif_set_format,
+};
+
+static const struct v4l2_subdev_ops iif_ops = {
+ .pad = &iif_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void iif_configure_stream(struct vsp1_entity *entity,
+ struct v4l2_subdev_state *state,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ vsp1_iif_write(dlb, VI6_IIF_CTRL, VI6_IIF_CTRL_CTRL);
+}
+
+static const struct vsp1_entity_operations iif_entity_ops = {
+ .configure_stream = iif_configure_stream,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_iif *vsp1_iif_create(struct vsp1_device *vsp1)
+{
+ struct vsp1_iif *iif;
+ int ret;
+
+ iif = devm_kzalloc(vsp1->dev, sizeof(*iif), GFP_KERNEL);
+ if (!iif)
+ return ERR_PTR(-ENOMEM);
+
+ iif->entity.ops = &iif_entity_ops;
+ iif->entity.type = VSP1_ENTITY_IIF;
+
+ /*
+ * The IIF is never exposed to userspace, but media entity registration
+ * requires a function to be set. Use PROC_VIDEO_PIXEL_FORMATTER just to
+ * avoid triggering a WARN_ON(), the value won't be seen anywhere.
+ */
+ ret = vsp1_entity_init(vsp1, &iif->entity, "iif", 3, &iif_ops,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return iif;
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_iif.h b/drivers/media/platform/renesas/vsp1/vsp1_iif.h
new file mode 100644
index 000000000000..46f327851c35
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_iif.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_iif.h -- R-Car VSP1 IIF (ISP Interface)
+ *
+ * Copyright (C) 2025 Ideas On Board Oy
+ * Copyright (C) 2025 Renesas Corporation
+ */
+#ifndef __VSP1_IIF_H__
+#define __VSP1_IIF_H__
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+#define VSPX_IIF_SINK_PAD_IMG 0
+#define VSPX_IIF_SINK_PAD_CONFIG 2
+
+struct vsp1_iif {
+ struct vsp1_entity entity;
+};
+
+static inline struct vsp1_iif *to_iif(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_iif, entity.subdev);
+}
+
+struct vsp1_iif *vsp1_iif_create(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_IIF_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_pipe.c b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
index bb0739f684f3..3cbb768cf6ad 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
@@ -138,14 +138,6 @@ static const struct vsp1_format_info vsp1_video_formats[] = {
VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
1, { 32, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_HSV24, MEDIA_BUS_FMT_AHSV8888_1X32,
- VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 1, { 24, 0, 0 }, false, false, 1, 1, false },
- { V4L2_PIX_FMT_HSV32, MEDIA_BUS_FMT_AHSV8888_1X32,
- VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 1, { 32, 0, 0 }, false, false, 1, 1, false },
{ V4L2_PIX_FMT_RGBX1010102, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_RGB10_RGB10A2_A2RGB10,
VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
@@ -162,10 +154,6 @@ static const struct vsp1_format_info vsp1_video_formats[] = {
VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
1, { 16, 0, 0 }, false, false, 2, 1, false },
- { V4L2_PIX_FMT_VYUY, MEDIA_BUS_FMT_AYUV8_1X32,
- VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
- VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
- 1, { 16, 0, 0 }, false, true, 2, 1, false },
{ V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_AYUV8_1X32,
VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
@@ -222,6 +210,24 @@ static const struct vsp1_format_info vsp1_video_formats[] = {
1, { 32, 0, 0 }, false, false, 2, 1, false },
};
+static const struct vsp1_format_info vsp1_video_gen2_formats[] = {
+ { V4L2_PIX_FMT_VYUY, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, false, true, 2, 1, false },
+};
+
+static const struct vsp1_format_info vsp1_video_hsit_formats[] = {
+ { V4L2_PIX_FMT_HSV24, MEDIA_BUS_FMT_AHSV8888_1X32,
+ VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 24, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_HSV32, MEDIA_BUS_FMT_AHSV8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+};
+
/**
* vsp1_get_format_info - Retrieve format information for a 4CC
* @vsp1: the VSP1 device
@@ -235,26 +241,164 @@ const struct vsp1_format_info *vsp1_get_format_info(struct vsp1_device *vsp1,
{
unsigned int i;
- /* Special case, the VYUY and HSV formats are supported on Gen2 only. */
- if (vsp1->info->gen != 2) {
- switch (fourcc) {
- case V4L2_PIX_FMT_VYUY:
- case V4L2_PIX_FMT_HSV24:
- case V4L2_PIX_FMT_HSV32:
- return NULL;
+ for (i = 0; i < ARRAY_SIZE(vsp1_video_formats); ++i) {
+ const struct vsp1_format_info *info = &vsp1_video_formats[i];
+
+ if (info->fourcc == fourcc)
+ return info;
+ }
+
+ if (vsp1->info->gen == 2) {
+ for (i = 0; i < ARRAY_SIZE(vsp1_video_gen2_formats); ++i) {
+ const struct vsp1_format_info *info =
+ &vsp1_video_gen2_formats[i];
+
+ if (info->fourcc == fourcc)
+ return info;
+ }
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_HSIT)) {
+ for (i = 0; i < ARRAY_SIZE(vsp1_video_hsit_formats); ++i) {
+ const struct vsp1_format_info *info =
+ &vsp1_video_hsit_formats[i];
+
+ if (info->fourcc == fourcc)
+ return info;
}
}
+ return NULL;
+}
+
+/**
+ * vsp1_get_format_info_by_index - Enumerate format information
+ * @vsp1: the VSP1 device
+ * @index: the format index
+ * @code: media bus code to limit enumeration
+ *
+ * Return a pointer to the format information structure corresponding to the
+ * given index, or NULL if the index exceeds the supported formats list. If the
+ * @code parameter is not zero, only formats compatible with the media bus code
+ * will be enumerated.
+ */
+const struct vsp1_format_info *
+vsp1_get_format_info_by_index(struct vsp1_device *vsp1, unsigned int index,
+ u32 code)
+{
+ unsigned int i;
+
+ if (!code) {
+ if (index < ARRAY_SIZE(vsp1_video_formats))
+ return &vsp1_video_formats[index];
+
+ if (vsp1->info->gen == 2) {
+ index -= ARRAY_SIZE(vsp1_video_formats);
+ if (index < ARRAY_SIZE(vsp1_video_gen2_formats))
+ return &vsp1_video_gen2_formats[index];
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_HSIT)) {
+ index -= ARRAY_SIZE(vsp1_video_gen2_formats);
+ if (index < ARRAY_SIZE(vsp1_video_hsit_formats))
+ return &vsp1_video_hsit_formats[index];
+ }
+
+ return NULL;
+ }
+
for (i = 0; i < ARRAY_SIZE(vsp1_video_formats); ++i) {
const struct vsp1_format_info *info = &vsp1_video_formats[i];
- if (info->fourcc == fourcc)
- return info;
+ if (info->mbus == code) {
+ if (!index)
+ return info;
+ index--;
+ }
+ }
+
+ if (vsp1->info->gen == 2) {
+ for (i = 0; i < ARRAY_SIZE(vsp1_video_gen2_formats); ++i) {
+ const struct vsp1_format_info *info =
+ &vsp1_video_gen2_formats[i];
+
+ if (info->mbus == code) {
+ if (!index)
+ return info;
+ index--;
+ }
+ }
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_HSIT)) {
+ for (i = 0; i < ARRAY_SIZE(vsp1_video_hsit_formats); ++i) {
+ const struct vsp1_format_info *info =
+ &vsp1_video_hsit_formats[i];
+
+ if (info->mbus == code) {
+ if (!index)
+ return info;
+ index--;
+ }
+ }
}
return NULL;
}
+/**
+ * vsp1_adjust_color_space - Adjust color space fields in a format
+ * @code: the media bus code
+ * @colorspace: the colorspace
+ * @xfer_func: the transfer function
+ * @encoding: the encoding
+ * @quantization: the quantization
+ *
+ * This function adjusts all color space fields of a video device of subdev
+ * format structure, taking into account the requested format, requested color
+ * space and limitations of the VSP1. It should be used in the video device and
+ * subdev set format handlers.
+ *
+ * The colorspace and xfer_func fields are freely configurable, as they are out
+ * of scope for VSP processing. The encoding and quantization is hardcoded for
+ * non-YUV formats, and can be configured for YUV formats.
+ */
+void vsp1_adjust_color_space(u32 code, u32 *colorspace, u8 *xfer_func,
+ u8 *encoding, u8 *quantization)
+{
+ if (*colorspace == V4L2_COLORSPACE_DEFAULT ||
+ *colorspace >= V4L2_COLORSPACE_LAST)
+ *colorspace = code == MEDIA_BUS_FMT_AYUV8_1X32
+ ? V4L2_COLORSPACE_SMPTE170M
+ : V4L2_COLORSPACE_SRGB;
+
+ if (*xfer_func == V4L2_XFER_FUNC_DEFAULT ||
+ *xfer_func >= V4L2_XFER_FUNC_LAST)
+ *xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(*colorspace);
+
+ switch (code) {
+ case MEDIA_BUS_FMT_ARGB8888_1X32:
+ default:
+ *encoding = V4L2_YCBCR_ENC_601;
+ *quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ break;
+
+ case MEDIA_BUS_FMT_AHSV8888_1X32:
+ *encoding = V4L2_HSV_ENC_256;
+ *quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ break;
+
+ case MEDIA_BUS_FMT_AYUV8_1X32:
+ if (*encoding != V4L2_YCBCR_ENC_601 &&
+ *encoding != V4L2_YCBCR_ENC_709)
+ *encoding = V4L2_YCBCR_ENC_601;
+ if (*quantization != V4L2_QUANTIZATION_FULL_RANGE &&
+ *quantization != V4L2_QUANTIZATION_LIM_RANGE)
+ *quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ break;
+ }
+}
+
/* -----------------------------------------------------------------------------
* Pipeline Management
*/
@@ -286,6 +430,7 @@ void vsp1_pipeline_reset(struct vsp1_pipeline *pipe)
pipe->brx = NULL;
pipe->hgo = NULL;
pipe->hgt = NULL;
+ pipe->iif = NULL;
pipe->lif = NULL;
pipe->uds = NULL;
}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_pipe.h b/drivers/media/platform/renesas/vsp1/vsp1_pipe.h
index 1ba7bdbad5a8..7f623b8cbe5c 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_pipe.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_pipe.h
@@ -119,6 +119,7 @@ struct vsp1_pipeline {
struct vsp1_entity *brx;
struct vsp1_entity *hgo;
struct vsp1_entity *hgt;
+ struct vsp1_entity *iif;
struct vsp1_entity *lif;
struct vsp1_entity *uds;
struct vsp1_entity *uds_input;
@@ -179,5 +180,10 @@ void vsp1_pipeline_calculate_partition(struct vsp1_pipeline *pipe,
const struct vsp1_format_info *vsp1_get_format_info(struct vsp1_device *vsp1,
u32 fourcc);
+const struct vsp1_format_info *
+vsp1_get_format_info_by_index(struct vsp1_device *vsp1, unsigned int index,
+ u32 code);
+void vsp1_adjust_color_space(u32 code, u32 *colorspace, u8 *xfer_func,
+ u8 *encoding, u8 *quantization);
#endif /* __VSP1_PIPE_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_regs.h b/drivers/media/platform/renesas/vsp1/vsp1_regs.h
index 7eca82e0ba7e..86e47c2d991f 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_regs.h
@@ -253,6 +253,13 @@
#define VI6_RPF_BRDITH_CTRL_CBRM BIT(0)
/* -----------------------------------------------------------------------------
+ * IIF Control Registers
+ */
+
+#define VI6_IIF_CTRL 0x0608
+#define VI6_IIF_CTRL_CTRL 0x13
+
+/* -----------------------------------------------------------------------------
* WPF Control Registers
*/
@@ -388,6 +395,7 @@
#define VI6_DPR_HST_ROUTE 0x2044
#define VI6_DPR_HSI_ROUTE 0x2048
#define VI6_DPR_BRU_ROUTE 0x204c
+#define VI6_DPR_ROUTE_IIFSEL BIT(28)
#define VI6_DPR_ILV_BRS_ROUTE 0x2050
#define VI6_DPR_ROUTE_BRSSEL BIT(28)
#define VI6_DPR_ROUTE_FXA_MASK (0xff << 16)
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
index 5c8b3ba1bd3c..811f2b7c5cc5 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
@@ -84,7 +84,7 @@ static void rpf_configure_stream(struct vsp1_entity *entity,
sink_format = v4l2_subdev_state_get_format(state, RWPF_PAD_SINK);
source_format = v4l2_subdev_state_get_format(state, RWPF_PAD_SOURCE);
- infmt = VI6_RPF_INFMT_CIPM
+ infmt = (pipe->iif ? 0 : VI6_RPF_INFMT_CIPM)
| (fmtinfo->hwfmt << VI6_RPF_INFMT_RDFMT_SHIFT);
if (fmtinfo->swap_yc)
@@ -92,12 +92,44 @@ static void rpf_configure_stream(struct vsp1_entity *entity,
if (fmtinfo->swap_uv)
infmt |= VI6_RPF_INFMT_SPUVS;
- if (sink_format->code != source_format->code)
- infmt |= VI6_RPF_INFMT_CSC;
+ if (sink_format->code != source_format->code) {
+ u16 ycbcr_enc;
+ u16 quantization;
+ u32 rdtm;
+
+ if (sink_format->code == MEDIA_BUS_FMT_AYUV8_1X32) {
+ ycbcr_enc = sink_format->ycbcr_enc;
+ quantization = sink_format->quantization;
+ } else {
+ ycbcr_enc = source_format->ycbcr_enc;
+ quantization = source_format->quantization;
+ }
+
+ if (ycbcr_enc == V4L2_YCBCR_ENC_601 &&
+ quantization == V4L2_QUANTIZATION_LIM_RANGE)
+ rdtm = VI6_RPF_INFMT_RDTM_BT601;
+ else if (ycbcr_enc == V4L2_YCBCR_ENC_601 &&
+ quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ rdtm = VI6_RPF_INFMT_RDTM_BT601_EXT;
+ else if (ycbcr_enc == V4L2_YCBCR_ENC_709 &&
+ quantization == V4L2_QUANTIZATION_LIM_RANGE)
+ rdtm = VI6_RPF_INFMT_RDTM_BT709;
+ else
+ rdtm = VI6_RPF_INFMT_RDTM_BT709_EXT;
+
+ infmt |= VI6_RPF_INFMT_CSC | rdtm;
+ }
vsp1_rpf_write(rpf, dlb, VI6_RPF_INFMT, infmt);
vsp1_rpf_write(rpf, dlb, VI6_RPF_DSWAP, fmtinfo->swap);
+ /* No further configuration for VSPX. */
+ if (pipe->iif) {
+ /* VSPX wants alpha_sel to be set to 0. */
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_ALPH_SEL, 0);
+ return;
+ }
+
if (entity->vsp1->info->gen == 4) {
u32 ext_infmt0;
u32 ext_infmt1;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
index 9d38203e73d0..9c8085d5d306 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
@@ -10,6 +10,7 @@
#include <media/v4l2-subdev.h>
#include "vsp1.h"
+#include "vsp1_entity.h"
#include "vsp1_rwpf.h"
#include "vsp1_video.h"
@@ -35,6 +36,11 @@ static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
code->code = codes[code->index];
+ if (code->pad == RWPF_PAD_SOURCE &&
+ code->code == MEDIA_BUS_FMT_AYUV8_1X32)
+ code->flags = V4L2_SUBDEV_MBUS_CODE_CSC_YCBCR_ENC
+ | V4L2_SUBDEV_MBUS_CODE_CSC_QUANTIZATION;
+
return 0;
}
@@ -76,12 +82,45 @@ static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
format = v4l2_subdev_state_get_format(state, fmt->pad);
if (fmt->pad == RWPF_PAD_SOURCE) {
+ const struct v4l2_mbus_framefmt *sink_format =
+ v4l2_subdev_state_get_format(state, RWPF_PAD_SINK);
+ u16 flags = fmt->format.flags & V4L2_MBUS_FRAMEFMT_SET_CSC;
+ bool csc;
+
/*
* The RWPF performs format conversion but can't scale, only the
- * format code can be changed on the source pad.
+ * format code, encoding and quantization can be changed on the
+ * source pad when converting between RGB and YUV.
+ */
+ if (sink_format->code != MEDIA_BUS_FMT_AHSV8888_1X32 &&
+ fmt->format.code != MEDIA_BUS_FMT_AHSV8888_1X32)
+ format->code = fmt->format.code;
+ else
+ format->code = sink_format->code;
+
+ /*
+ * Encoding and quantization can only be configured when YCbCr
+ * <-> RGB is enabled. The V4L2 API requires userspace to set
+ * the V4L2_MBUS_FRAMEFMT_SET_CSC flag. If either of these
+ * conditions is not met, use the encoding and quantization
+ * values from the sink pad.
*/
- format->code = fmt->format.code;
+ csc = (format->code == MEDIA_BUS_FMT_AYUV8_1X32) !=
+ (sink_format->code == MEDIA_BUS_FMT_AYUV8_1X32);
+
+ if (csc && (flags & V4L2_MBUS_FRAMEFMT_SET_CSC)) {
+ format->ycbcr_enc = fmt->format.ycbcr_enc;
+ format->quantization = fmt->format.quantization;
+ } else {
+ format->ycbcr_enc = sink_format->ycbcr_enc;
+ format->quantization = sink_format->quantization;
+ }
+
+ vsp1_entity_adjust_color_space(format);
+
fmt->format = *format;
+ fmt->format.flags = flags;
+
goto done;
}
@@ -91,7 +130,13 @@ static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
format->height = clamp_t(unsigned int, fmt->format.height,
RWPF_MIN_HEIGHT, rwpf->max_height);
format->field = V4L2_FIELD_NONE;
- format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ format->colorspace = fmt->format.colorspace;
+ format->xfer_func = fmt->format.xfer_func;
+ format->ycbcr_enc = fmt->format.ycbcr_enc;
+ format->quantization = fmt->format.quantization;
+
+ vsp1_entity_adjust_color_space(format);
fmt->format = *format;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_sru.c b/drivers/media/platform/renesas/vsp1/vsp1_sru.c
index 1759ce642e6e..bba2872afaf2 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_sru.c
@@ -14,6 +14,7 @@
#include "vsp1.h"
#include "vsp1_dl.h"
+#include "vsp1_entity.h"
#include "vsp1_pipe.h"
#include "vsp1_sru.h"
@@ -178,6 +179,8 @@ static void sru_try_format(struct vsp1_sru *sru,
fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
+ vsp1_entity_adjust_color_space(fmt);
+
fmt->width = clamp(fmt->width, SRU_MIN_SIZE, SRU_MAX_SIZE);
fmt->height = clamp(fmt->height, SRU_MIN_SIZE, SRU_MAX_SIZE);
break;
@@ -187,6 +190,11 @@ static void sru_try_format(struct vsp1_sru *sru,
format = v4l2_subdev_state_get_format(sd_state, SRU_PAD_SINK);
fmt->code = format->code;
+ fmt->colorspace = format->colorspace;
+ fmt->xfer_func = format->xfer_func;
+ fmt->ycbcr_enc = format->ycbcr_enc;
+ fmt->quantization = format->quantization;
+
/*
* We can upscale by 2 in both direction, but not independently.
* Compare the input and output rectangles areas (avoiding
@@ -211,7 +219,6 @@ static void sru_try_format(struct vsp1_sru *sru,
}
fmt->field = V4L2_FIELD_NONE;
- fmt->colorspace = V4L2_COLORSPACE_SRGB;
}
static int sru_set_format(struct v4l2_subdev *subdev,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_uds.c b/drivers/media/platform/renesas/vsp1/vsp1_uds.c
index c5a38478cf8c..2db473b6f83c 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_uds.c
@@ -14,6 +14,7 @@
#include "vsp1.h"
#include "vsp1_dl.h"
+#include "vsp1_entity.h"
#include "vsp1_pipe.h"
#include "vsp1_uds.h"
@@ -177,6 +178,8 @@ static void uds_try_format(struct vsp1_uds *uds,
fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
+ vsp1_entity_adjust_color_space(fmt);
+
fmt->width = clamp(fmt->width, UDS_MIN_SIZE, UDS_MAX_SIZE);
fmt->height = clamp(fmt->height, UDS_MIN_SIZE, UDS_MAX_SIZE);
break;
@@ -186,6 +189,11 @@ static void uds_try_format(struct vsp1_uds *uds,
format = v4l2_subdev_state_get_format(sd_state, UDS_PAD_SINK);
fmt->code = format->code;
+ fmt->colorspace = format->colorspace;
+ fmt->xfer_func = format->xfer_func;
+ fmt->ycbcr_enc = format->ycbcr_enc;
+ fmt->quantization = format->quantization;
+
uds_output_limits(format->width, &minimum, &maximum);
fmt->width = clamp(fmt->width, minimum, maximum);
uds_output_limits(format->height, &minimum, &maximum);
@@ -194,7 +202,6 @@ static void uds_try_format(struct vsp1_uds *uds,
}
fmt->field = V4L2_FIELD_NONE;
- fmt->colorspace = V4L2_COLORSPACE_SRGB;
}
static int uds_set_format(struct v4l2_subdev *subdev,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c
index 03f4efd6b82b..bc66fbdde3cc 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_video.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c
@@ -127,12 +127,24 @@ static int __vsp1_video_try_format(struct vsp1_video *video,
info = vsp1_get_format_info(video->vsp1, VSP1_VIDEO_DEF_FORMAT);
pix->pixelformat = info->fourcc;
- pix->colorspace = V4L2_COLORSPACE_SRGB;
pix->field = V4L2_FIELD_NONE;
- if (info->fourcc == V4L2_PIX_FMT_HSV24 ||
- info->fourcc == V4L2_PIX_FMT_HSV32)
- pix->hsv_enc = V4L2_HSV_ENC_256;
+ /*
+ * Adjust the colour space fields. On capture devices, userspace needs
+ * to set the V4L2_PIX_FMT_FLAG_SET_CSC to override the defaults. Reset
+ * all fields to *_DEFAULT if the flag isn't set, to then handle
+ * capture and output devices in the same way.
+ */
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ !(pix->flags & V4L2_PIX_FMT_FLAG_SET_CSC)) {
+ pix->colorspace = V4L2_COLORSPACE_DEFAULT;
+ pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ }
+
+ vsp1_adjust_color_space(info->mbus, &pix->colorspace, &pix->xfer_func,
+ &pix->ycbcr_enc, &pix->quantization);
memset(pix->reserved, 0, sizeof(pix->reserved));
@@ -888,16 +900,36 @@ vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
struct vsp1_video *video = to_vsp1_video(vfh->vdev);
cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
- | V4L2_CAP_VIDEO_CAPTURE_MPLANE
+ | V4L2_CAP_IO_MC | V4L2_CAP_VIDEO_CAPTURE_MPLANE
| V4L2_CAP_VIDEO_OUTPUT_MPLANE;
-
strscpy(cap->driver, "vsp1", sizeof(cap->driver));
strscpy(cap->card, video->video.name, sizeof(cap->card));
return 0;
}
+static int vsp1_video_enum_format(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+ const struct vsp1_format_info *info;
+
+ info = vsp1_get_format_info_by_index(video->vsp1, f->index, f->mbus_code);
+ if (!info)
+ return -EINVAL;
+
+ f->pixelformat = info->fourcc;
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ info->mbus == MEDIA_BUS_FMT_AYUV8_1X32)
+ f->flags = V4L2_FMT_FLAG_CSC_YCBCR_ENC
+ | V4L2_FMT_FLAG_CSC_QUANTIZATION;
+
+ return 0;
+}
+
static int
vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
{
@@ -1013,6 +1045,8 @@ err_pipe:
static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops = {
.vidioc_querycap = vsp1_video_querycap,
+ .vidioc_enum_fmt_vid_cap = vsp1_video_enum_format,
+ .vidioc_enum_fmt_vid_out = vsp1_video_enum_format,
.vidioc_g_fmt_vid_cap_mplane = vsp1_video_get_format,
.vidioc_s_fmt_vid_cap_mplane = vsp1_video_set_format,
.vidioc_try_fmt_vid_cap_mplane = vsp1_video_try_format,
@@ -1207,14 +1241,14 @@ struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
video->pad.flags = MEDIA_PAD_FL_SOURCE;
video->video.vfl_dir = VFL_DIR_TX;
video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE |
- V4L2_CAP_STREAMING;
+ V4L2_CAP_STREAMING | V4L2_CAP_IO_MC;
} else {
direction = "output";
video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
video->pad.flags = MEDIA_PAD_FL_SINK;
video->video.vfl_dir = VFL_DIR_RX;
video->video.device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
- V4L2_CAP_STREAMING;
+ V4L2_CAP_STREAMING | V4L2_CAP_IO_MC;
}
mutex_init(&video->lock);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_wpf.c b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
index f176750ccd98..30662cfdf837 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
@@ -133,6 +133,7 @@ static int wpf_init_controls(struct vsp1_rwpf *wpf)
{
struct vsp1_device *vsp1 = wpf->entity.vsp1;
unsigned int num_flip_ctrls;
+ int ret;
spin_lock_init(&wpf->flip.lock);
@@ -156,7 +157,9 @@ static int wpf_init_controls(struct vsp1_rwpf *wpf)
num_flip_ctrls = 0;
}
- vsp1_rwpf_init_ctrls(wpf, num_flip_ctrls);
+ ret = vsp1_rwpf_init_ctrls(wpf, num_flip_ctrls);
+ if (ret < 0)
+ return ret;
if (num_flip_ctrls >= 1) {
wpf->flip.ctrls.vflip =
@@ -174,11 +177,8 @@ static int wpf_init_controls(struct vsp1_rwpf *wpf)
v4l2_ctrl_cluster(3, &wpf->flip.ctrls.vflip);
}
- if (wpf->ctrls.error) {
- dev_err(vsp1->dev, "wpf%u: failed to initialize controls\n",
- wpf->entity.index);
+ if (wpf->ctrls.error)
return wpf->ctrls.error;
- }
return 0;
}
@@ -247,8 +247,11 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
sink_format = v4l2_subdev_state_get_format(state, RWPF_PAD_SINK);
source_format = v4l2_subdev_state_get_format(state, RWPF_PAD_SOURCE);
- /* Format */
- if (!pipe->lif || wpf->writeback) {
+ /*
+ * Format configuration. Skip for IIF (VSPX) or if the pipe doesn't
+ * write to memory.
+ */
+ if (!pipe->iif && (!pipe->lif || wpf->writeback)) {
const struct v4l2_pix_format_mplane *format = &wpf->format;
const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
@@ -279,8 +282,33 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
(256 << VI6_WPF_ROT_CTRL_LMEM_WD_SHIFT));
}
- if (sink_format->code != source_format->code)
- outfmt |= VI6_WPF_OUTFMT_CSC;
+ if (sink_format->code != source_format->code) {
+ u16 ycbcr_enc;
+ u16 quantization;
+ u32 wrtm;
+
+ if (sink_format->code == MEDIA_BUS_FMT_AYUV8_1X32) {
+ ycbcr_enc = sink_format->ycbcr_enc;
+ quantization = sink_format->quantization;
+ } else {
+ ycbcr_enc = source_format->ycbcr_enc;
+ quantization = source_format->quantization;
+ }
+
+ if (ycbcr_enc == V4L2_YCBCR_ENC_601 &&
+ quantization == V4L2_QUANTIZATION_LIM_RANGE)
+ wrtm = VI6_WPF_OUTFMT_WRTM_BT601;
+ else if (ycbcr_enc == V4L2_YCBCR_ENC_601 &&
+ quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ wrtm = VI6_WPF_OUTFMT_WRTM_BT601_EXT;
+ else if (ycbcr_enc == V4L2_YCBCR_ENC_709 &&
+ quantization == V4L2_QUANTIZATION_LIM_RANGE)
+ wrtm = VI6_WPF_OUTFMT_WRTM_BT709;
+ else
+ wrtm = VI6_WPF_OUTFMT_WRTM_BT709_EXT;
+
+ outfmt |= VI6_WPF_OUTFMT_CSC | wrtm;
+ }
wpf->outfmt = outfmt;
@@ -291,7 +319,7 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
* Sources. If the pipeline has a single input and BRx is not used,
* configure it as the master layer. Otherwise configure all
* inputs as sub-layers and select the virtual RPF as the master
- * layer.
+ * layer. For VSPX configure the enabled sources as masters.
*/
for (i = 0; i < vsp1->info->rpf_count; ++i) {
struct vsp1_rwpf *input = pipe->inputs[i];
@@ -299,7 +327,7 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
if (!input)
continue;
- srcrpf |= (!pipe->brx && pipe->num_inputs == 1)
+ srcrpf |= (pipe->iif || (!pipe->brx && pipe->num_inputs == 1))
? VI6_WPF_SRCRPF_RPF_ACT_MST(input->entity.index)
: VI6_WPF_SRCRPF_RPF_ACT_SUB(input->entity.index);
}
@@ -316,6 +344,9 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
vsp1_dl_body_write(dlb, VI6_WPF_IRQ_ENB(index),
VI6_WPF_IRQ_ENB_DFEE);
+ if (pipe->iif)
+ return;
+
/*
* Configure writeback for display pipelines (the wpf writeback flag is
* never set for memory-to-memory pipelines). Start by adding a chained
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index d94917211828..8c29a1c9309a 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -881,7 +881,7 @@ static int rkisp1_isp_set_selection(struct v4l2_subdev *sd,
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- dev_dbg(isp->rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
+ dev_dbg(isp->rkisp1->dev, "%s: pad: %d sel(%d,%d)/%ux%u\n", __func__,
sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO)
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
index bf0260600a19..139177db9c6d 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
@@ -327,13 +327,6 @@
#define RKISP1_CIF_IMG_EFF_CTRL_CFG_UPD BIT(4)
#define RKISP1_CIF_IMG_EFF_CTRL_YCBCR_FULL BIT(5)
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_BLACKWHITE_SHIFT 0
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_NEGATIVE_SHIFT 1
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SEPIA_SHIFT 2
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_COLOR_SEL_SHIFT 3
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_EMBOSS_SHIFT 4
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SKETCH_SHIFT 5
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_SHARPEN_SHIFT 6
#define RKISP1_CIF_IMG_EFF_CTRL_MODE_MASK 0xe
/* IMG_EFF_COLOR_SEL */
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
index f073e72a0d37..8e6b753d3081 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
@@ -600,7 +600,7 @@ static int rkisp1_rsz_set_selection(struct v4l2_subdev *sd,
if (sel->target != V4L2_SEL_TGT_CROP || sel->pad == RKISP1_RSZ_PAD_SRC)
return -EINVAL;
- dev_dbg(rsz->rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
+ dev_dbg(rsz->rkisp1->dev, "%s: pad: %d sel(%d,%d)/%ux%u\n", __func__,
sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
rkisp1_rsz_set_sink_crop(rsz, sd_state, &sel->r);
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-capture.c b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
index c3c2e474a18a..5b412afd7d60 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
@@ -700,7 +700,7 @@ static void fimc_capture_try_selection(struct fimc_ctx *ctx,
r->top = clamp_t(u32, r->top, 0, sink->f_height - r->height);
r->left = round_down(r->left, var->hor_offs_align);
- dbg("target %#x: (%d,%d)/%dx%d, sink fmt: %dx%d",
+ dbg("target %#x: (%d,%d)/%ux%u, sink fmt: %dx%d",
target, r->left, r->top, r->width, r->height,
sink->f_width, sink->f_height);
}
@@ -1622,7 +1622,7 @@ static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
r->height = f->height;
}
- dbg("target %#x: l:%d, t:%d, %dx%d, f_w: %d, f_h: %d",
+ dbg("target %#x: (%d,%d)/%ux%u, f_w: %d, f_h: %d",
sel->pad, r->left, r->top, r->width, r->height,
f->f_width, f->f_height);
@@ -1671,7 +1671,7 @@ static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
spin_unlock_irqrestore(&fimc->slock, flags);
}
- dbg("target %#x: (%d,%d)/%dx%d", sel->target, r->left, r->top,
+ dbg("target %#x: (%d,%d)/%ux%u", sel->target, r->left, r->top,
r->width, r->height);
mutex_unlock(&fimc->lock);
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c b/drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c
index 366e6393817d..5f9c44e825a5 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c
@@ -164,6 +164,7 @@ int fimc_is_hw_change_mode(struct fimc_is *is)
if (WARN_ON(is->config_index >= ARRAY_SIZE(cmd)))
return -EINVAL;
+ fimc_is_hw_wait_intmsr0_intmsd0(is);
mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0));
mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
mcuctl_write(is->setfile.sub_index, is, MCUCTL_REG_ISSR(2));
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
index f23e51e3da2f..0ce293b0718b 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
@@ -611,7 +611,7 @@ static void fimc_lite_try_crop(struct fimc_lite *fimc, struct v4l2_rect *r)
r->left = round_down(r->left, fimc->dd->win_hor_offs_align);
r->top = clamp_t(u32, r->top, 0, frame->f_height - r->height);
- v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, sink fmt: %dx%d\n",
+ v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%ux%u, sink fmt: %dx%d\n",
r->left, r->top, r->width, r->height,
frame->f_width, frame->f_height);
}
@@ -631,7 +631,7 @@ static void fimc_lite_try_compose(struct fimc_lite *fimc, struct v4l2_rect *r)
r->left = round_down(r->left, fimc->dd->out_hor_offs_align);
r->top = clamp_t(u32, r->top, 0, fimc->out_frame.f_height - r->height);
- v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, source fmt: %dx%d\n",
+ v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%ux%u, source fmt: %dx%d\n",
r->left, r->top, r->width, r->height,
frame->f_width, frame->f_height);
}
@@ -1140,7 +1140,7 @@ static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
}
mutex_unlock(&fimc->lock);
- v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d\n",
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d)/%ux%u, f_w: %d, f_h: %d\n",
__func__, f->rect.left, f->rect.top, f->rect.width,
f->rect.height, f->f_width, f->f_height);
@@ -1174,7 +1174,7 @@ static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
}
mutex_unlock(&fimc->lock);
- v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d\n",
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d)/%ux%u, f_w: %d, f_h: %d\n",
__func__, f->rect.left, f->rect.top, f->rect.width,
f->rect.height, f->f_width, f->f_height);
diff --git a/drivers/media/platform/samsung/exynos4-is/media-dev.h b/drivers/media/platform/samsung/exynos4-is/media-dev.h
index a50e58ab7ef7..ea496670d4b5 100644
--- a/drivers/media/platform/samsung/exynos4-is/media-dev.h
+++ b/drivers/media/platform/samsung/exynos4-is/media-dev.h
@@ -179,8 +179,8 @@ int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on);
static inline bool fimc_md_is_isp_available(struct device_node *node)
{
struct device_node *child __free(device_node) =
- of_get_child_by_name(node, FIMC_IS_OF_NODE_NAME);
- return child ? of_device_is_available(child) : false;
+ of_get_available_child_by_name(node, FIMC_IS_OF_NODE_NAME);
+ return child;
}
#else
#define fimc_md_is_isp_available(node) (false)
diff --git a/drivers/media/platform/samsung/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
index bd1149e8abc2..3e566b65f417 100644
--- a/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
@@ -1030,9 +1030,9 @@ static int s3c_camif_s_selection(struct file *file, void *priv,
vp->state |= ST_VP_CONFIG;
spin_unlock_irqrestore(&camif->slock, flags);
- pr_debug("type: %#x, target: %#x, flags: %#x, (%d,%d)/%dx%d\n",
- sel->type, sel->target, sel->flags,
- sel->r.left, sel->r.top, sel->r.width, sel->r.height);
+ pr_debug("type: %#x, target: %#x, flags: %#x, (%d,%d)/%ux%u\n",
+ sel->type, sel->target, sel->flags,
+ sel->r.left, sel->r.top, sel->r.width, sel->r.height);
return 0;
}
@@ -1372,7 +1372,7 @@ static int s3c_camif_subdev_get_selection(struct v4l2_subdev *sd,
mutex_unlock(&camif->lock);
- v4l2_dbg(1, debug, sd, "%s: crop: (%d,%d) %dx%d, size: %ux%u\n",
+ v4l2_dbg(1, debug, sd, "%s: crop: (%d,%d)/%ux%u, size: %ux%u\n",
__func__, crop->left, crop->top, crop->width,
crop->height, mf->width, mf->height);
@@ -1424,7 +1424,7 @@ static void __camif_try_crop(struct camif_dev *camif, struct v4l2_rect *r)
}
}
- v4l2_dbg(1, debug, &camif->v4l2_dev, "crop: (%d,%d)/%dx%d, fmt: %ux%u\n",
+ v4l2_dbg(1, debug, &camif->v4l2_dev, "crop: (%d,%d)/%ux%u, fmt: %ux%u\n",
r->left, r->top, r->width, r->height, mf->width, mf->height);
}
@@ -1464,7 +1464,7 @@ static int s3c_camif_subdev_set_selection(struct v4l2_subdev *sd,
}
mutex_unlock(&camif->lock);
- v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %u, f_h: %u\n",
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d)/%ux%u, f_w: %u, f_h: %u\n",
__func__, crop->left, crop->top, crop->width, crop->height,
camif->mbus_fmt.width, camif->mbus_fmt.height);
diff --git a/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v6.h b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v6.h
index fa49fe580e1a..075a58b50b8c 100644
--- a/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v6.h
+++ b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v6.h
@@ -45,6 +45,7 @@
#define S5P_FIMV_H2R_CMD_WAKEUP_V6 8
#define S5P_FIMV_CH_LAST_FRAME_V6 9
#define S5P_FIMV_H2R_CMD_FLUSH_V6 10
+#define S5P_FIMV_H2R_CMD_NAL_ABORT_V6 11
/* RMVME: REALLOC used? */
#define S5P_FIMV_CH_FRAME_START_REALLOC_V6 5
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
index c8e0ee383af3..73fdcd362265 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
@@ -143,7 +143,7 @@ void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq)
static void s5p_mfc_watchdog(struct timer_list *t)
{
- struct s5p_mfc_dev *dev = from_timer(dev, t, watchdog_timer);
+ struct s5p_mfc_dev *dev = timer_container_of(dev, t, watchdog_timer);
if (test_bit(0, &dev->hw_lock))
atomic_inc(&dev->watchdog_cnt);
@@ -739,6 +739,20 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
ctx->state = MFCINST_RUNNING;
goto irq_cleanup_hw;
+ case S5P_MFC_R2H_CMD_ENC_BUFFER_FUL_RET:
+ ctx->state = MFCINST_NAL_ABORT;
+ s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ set_work_bit(ctx);
+ WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ break;
+
+ case S5P_MFC_R2H_CMD_NAL_ABORT_RET:
+ ctx->state = MFCINST_ERROR;
+ s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+ s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+ goto irq_cleanup_hw;
+
default:
mfc_debug(2, "Unknown int reason\n");
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
index 3cc2a4f5c40a..86c316c1ff8f 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
@@ -141,6 +141,7 @@ enum s5p_mfc_inst_state {
MFCINST_RES_CHANGE_INIT,
MFCINST_RES_CHANGE_FLUSH,
MFCINST_RES_CHANGE_END,
+ MFCINST_NAL_ABORT,
};
/*
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
index 0c636090d723..98f8292b3173 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
@@ -2229,6 +2229,11 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
case MFCINST_HEAD_PRODUCED:
ret = s5p_mfc_run_init_enc_buffers(ctx);
break;
+ case MFCINST_NAL_ABORT:
+ mfc_write(dev, ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6);
+ s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc,
+ dev, S5P_FIMV_H2R_CMD_NAL_ABORT_V6, NULL);
+ break;
default:
ret = -EAGAIN;
}
diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-debug.c b/drivers/media/platform/st/sti/bdisp/bdisp-debug.c
index a27f638df11c..f9348aeacc11 100644
--- a/drivers/media/platform/st/sti/bdisp/bdisp-debug.c
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-debug.c
@@ -455,11 +455,11 @@ static int last_request_show(struct seq_file *s, void *data)
seq_printf(s, "Format: %s\t\t\t%s\n",
bdisp_fmt_to_str(src), bdisp_fmt_to_str(dst));
- seq_printf(s, "Crop area: %dx%d @ %d,%d ==>\t%dx%d @ %d,%d\n",
- src.crop.width, src.crop.height,
+ seq_printf(s, "Crop area: (%d,%d)/%ux%u ==>\t(%d,%d)/%ux%u\n",
src.crop.left, src.crop.top,
- dst.crop.width, dst.crop.height,
- dst.crop.left, dst.crop.top);
+ src.crop.width, src.crop.height,
+ dst.crop.left, dst.crop.top,
+ dst.crop.width, dst.crop.height);
seq_printf(s, "Buff size: %dx%d\t\t%dx%d\n\n",
src.width, src.height, dst.width, dst.height);
diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
index 73ad66ed20f2..1eb934490c0b 100644
--- a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
@@ -953,8 +953,8 @@ static int bdisp_s_selection(struct file *file, void *fh,
if ((out.left < 0) || (out.left >= frame->width) ||
(out.top < 0) || (out.top >= frame->height)) {
dev_err(ctx->bdisp_dev->dev,
- "Invalid crop: %dx%d@(%d,%d) vs frame: %dx%d\n",
- out.width, out.height, out.left, out.top,
+ "Invalid crop: (%d,%d)/%ux%u vs frame: %dx%d\n",
+ out.left, out.top, out.width, out.height,
frame->width, frame->height);
return -EINVAL;
}
@@ -966,8 +966,8 @@ static int bdisp_s_selection(struct file *file, void *fh,
if (((out.left + out.width) > frame->width) ||
((out.top + out.height) > frame->height)) {
dev_err(ctx->bdisp_dev->dev,
- "Invalid crop: %dx%d@(%d,%d) vs frame: %dx%d\n",
- out.width, out.height, out.left, out.top,
+ "Invalid crop: (%d,%d)/%ux%u vs frame: %dx%d\n",
+ out.left, out.top, out.width, out.height,
frame->width, frame->height);
return -EINVAL;
}
@@ -982,9 +982,9 @@ static int bdisp_s_selection(struct file *file, void *fh,
if ((out.left != in->left) || (out.top != in->top) ||
(out.width != in->width) || (out.height != in->height)) {
dev_dbg(ctx->bdisp_dev->dev,
- "%s crop updated: %dx%d@(%d,%d) -> %dx%d@(%d,%d)\n",
- __func__, in->width, in->height, in->left, in->top,
- out.width, out.height, out.left, out.top);
+ "%s crop updated: (%d,%d)/%ux%u -> (%d,%d)/%ux%u\n",
+ __func__, in->left, in->top, in->width, in->height,
+ out.left, out.top, out.width, out.height);
*in = out;
}
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
index 87a817dda4a9..602c37cbe177 100644
--- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
@@ -62,7 +62,7 @@ static int load_c8sectpfe_fw(struct c8sectpfei *fei);
static void c8sectpfe_timer_interrupt(struct timer_list *t)
{
- struct c8sectpfei *fei = from_timer(fei, t, timer);
+ struct c8sectpfei *fei = timer_container_of(fei, t, timer);
struct channel_info *channel;
int chan_num;
diff --git a/drivers/media/platform/st/sti/delta/delta-debug.c b/drivers/media/platform/st/sti/delta/delta-debug.c
index 4b2eb6b63aa2..6acf46913cda 100644
--- a/drivers/media/platform/st/sti/delta/delta-debug.c
+++ b/drivers/media/platform/st/sti/delta/delta-debug.c
@@ -16,14 +16,14 @@ char *delta_streaminfo_str(struct delta_streaminfo *s, char *str,
return NULL;
snprintf(str, len,
- "%4.4s %dx%d %s %s dpb=%d %s %s %s%dx%d@(%d,%d) %s%d/%d",
+ "%4.4s %dx%d %s %s dpb=%d %s %s %s(%d,%d)/%ux%u %s%d/%d",
(char *)&s->streamformat, s->width, s->height,
s->profile, s->level, s->dpb,
(s->field == V4L2_FIELD_NONE) ? "progressive" : "interlaced",
s->other,
s->flags & DELTA_STREAMINFO_FLAG_CROP ? "crop=" : "",
- s->crop.width, s->crop.height,
s->crop.left, s->crop.top,
+ s->crop.width, s->crop.height,
s->flags & DELTA_STREAMINFO_FLAG_PIXELASPECT ? "par=" : "",
s->pixelaspect.numerator,
s->pixelaspect.denominator);
@@ -38,13 +38,13 @@ char *delta_frameinfo_str(struct delta_frameinfo *f, char *str,
return NULL;
snprintf(str, len,
- "%4.4s %dx%d aligned %dx%d %s %s%dx%d@(%d,%d) %s%d/%d",
+ "%4.4s %dx%d aligned %dx%d %s %s(%d,%d)/%ux%u %s%d/%d",
(char *)&f->pixelformat, f->width, f->height,
f->aligned_width, f->aligned_height,
(f->field == V4L2_FIELD_NONE) ? "progressive" : "interlaced",
f->flags & DELTA_STREAMINFO_FLAG_CROP ? "crop=" : "",
- f->crop.width, f->crop.height,
f->crop.left, f->crop.top,
+ f->crop.width, f->crop.height,
f->flags & DELTA_STREAMINFO_FLAG_PIXELASPECT ? "par=" : "",
f->pixelaspect.numerator,
f->pixelaspect.denominator);
diff --git a/drivers/media/platform/st/stm32/stm32-dcmi.c b/drivers/media/platform/st/stm32/stm32-dcmi.c
index 9b699ee2b1e0..d94c61b8569d 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmi.c
@@ -388,9 +388,9 @@ static void dcmi_set_crop(struct stm32_dcmi *dcmi)
((dcmi->crop.left << 1));
reg_write(dcmi->regs, DCMI_CWSTRT, start);
- dev_dbg(dcmi->dev, "Cropping to %ux%u@%u:%u\n",
- dcmi->crop.width, dcmi->crop.height,
- dcmi->crop.left, dcmi->crop.top);
+ dev_dbg(dcmi->dev, "Cropping to (%d,%d)/%ux%u\n",
+ dcmi->crop.left, dcmi->crop.top,
+ dcmi->crop.width, dcmi->crop.height);
/* Enable crop */
reg_set(dcmi->regs, DCMI_CR, CR_CROP);
@@ -1292,8 +1292,8 @@ static int dcmi_s_selection(struct file *file, void *priv,
/* Crop if request is different than sensor resolution */
dcmi->do_crop = true;
dcmi->crop = r;
- dev_dbg(dcmi->dev, "s_selection: crop %ux%u@(%u,%u) from %ux%u\n",
- r.width, r.height, r.left, r.top,
+ dev_dbg(dcmi->dev, "s_selection: crop (%d,%d)/%ux%u from %ux%u\n",
+ r.left, r.top, r.width, r.height,
pix.width, pix.height);
} else {
/* Disable crop */
@@ -1682,18 +1682,14 @@ static int dcmi_formats_init(struct stm32_dcmi *dcmi)
return -ENXIO;
dcmi->num_of_sd_formats = num_fmts;
- dcmi->sd_formats = devm_kcalloc(dcmi->dev,
- num_fmts, sizeof(struct dcmi_format *),
- GFP_KERNEL);
+ dcmi->sd_formats = devm_kmemdup_array(dcmi->dev, sd_fmts, num_fmts,
+ sizeof(*sd_fmts), GFP_KERNEL);
if (!dcmi->sd_formats) {
dev_err(dcmi->dev, "Could not allocate memory\n");
return -ENOMEM;
}
- memcpy(dcmi->sd_formats, sd_fmts,
- num_fmts * sizeof(struct dcmi_format *));
dcmi->sd_format = dcmi->sd_formats[0];
-
return 0;
}
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c
index 3c742a546441..db76a02a1848 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c
@@ -373,8 +373,8 @@ static int dcmipp_byteproc_set_selection(struct v4l2_subdev *sd,
mf->width = s->r.width;
mf->height = s->r.height;
- dev_dbg(byteproc->dev, "s_selection: crop %ux%u@(%u,%u)\n",
- crop->width, crop->height, crop->left, crop->top);
+ dev_dbg(byteproc->dev, "s_selection: crop (%d,%d)/%ux%u\n",
+ crop->left, crop->top, crop->width, crop->height);
break;
case V4L2_SEL_TGT_COMPOSE:
mf = v4l2_subdev_state_get_format(sd_state, 0);
@@ -386,9 +386,9 @@ static int dcmipp_byteproc_set_selection(struct v4l2_subdev *sd,
mf->width = s->r.width;
mf->height = s->r.height;
- dev_dbg(byteproc->dev, "s_selection: compose %ux%u@(%u,%u)\n",
- compose->width, compose->height,
- compose->left, compose->top);
+ dev_dbg(byteproc->dev, "s_selection: compose (%d,%d)/%ux%u\n",
+ compose->left, compose->top,
+ compose->width, compose->height);
break;
default:
return -EINVAL;
diff --git a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
index 3d2913de9a86..7af6765532e3 100644
--- a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
+++ b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
@@ -114,7 +114,7 @@ struct hdmirx_stream {
spinlock_t vbq_lock; /* to lock video buffer queue */
bool stopping;
wait_queue_head_t wq_stopped;
- u32 frame_idx;
+ u32 sequence;
u32 line_flag_int_cnt;
u32 irq_stat;
};
@@ -1540,7 +1540,7 @@ static int hdmirx_start_streaming(struct vb2_queue *queue, unsigned int count)
int line_flag;
mutex_lock(&hdmirx_dev->stream_lock);
- stream->frame_idx = 0;
+ stream->sequence = 0;
stream->line_flag_int_cnt = 0;
stream->curr_buf = NULL;
stream->next_buf = NULL;
@@ -1948,7 +1948,7 @@ static void dma_idle_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
if (vb_done) {
vb_done->vb2_buf.timestamp = ktime_get_ns();
- vb_done->sequence = stream->frame_idx;
+ vb_done->sequence = stream->sequence;
if (bt->interlaced)
vb_done->field = V4L2_FIELD_INTERLACED_TB;
@@ -1956,10 +1956,6 @@ static void dma_idle_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
vb_done->field = V4L2_FIELD_NONE;
hdmirx_vb_done(stream, vb_done);
- stream->frame_idx++;
- if (stream->frame_idx == 30)
- v4l2_dbg(1, debug, v4l2_dev,
- "rcv frames\n");
}
stream->curr_buf = NULL;
@@ -1971,6 +1967,10 @@ static void dma_idle_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
v4l2_dbg(3, debug, v4l2_dev,
"%s: next_buf NULL, skip vb_done\n", __func__);
}
+
+ stream->sequence++;
+ if (stream->sequence == 30)
+ v4l2_dbg(1, debug, v4l2_dev, "rcv frames\n");
}
DMA_IDLE_OUT:
diff --git a/drivers/media/platform/ti/am437x/am437x-vpfe.c b/drivers/media/platform/ti/am437x/am437x-vpfe.c
index 44cdccb89377..1ca559df7e59 100644
--- a/drivers/media/platform/ti/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/ti/am437x/am437x-vpfe.c
@@ -2030,7 +2030,7 @@ vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
vpfe->fmt.fmt.pix.sizeimage = vpfe->fmt.fmt.pix.bytesperline *
vpfe->fmt.fmt.pix.height;
- vpfe_dbg(1, vpfe, "cropped (%d,%d)/%dx%d of %dx%d\n",
+ vpfe_dbg(1, vpfe, "cropped (%d,%d)/%ux%u of %dx%d\n",
r.left, r.top, r.width, r.height, cr.width, cr.height);
return 0;
diff --git a/drivers/media/platform/ti/cal/cal-camerarx.c b/drivers/media/platform/ti/cal/cal-camerarx.c
index 9cc875665695..00a71dac0ff4 100644
--- a/drivers/media/platform/ti/cal/cal-camerarx.c
+++ b/drivers/media/platform/ti/cal/cal-camerarx.c
@@ -49,21 +49,38 @@ static s64 cal_camerarx_get_ext_link_freq(struct cal_camerarx *phy)
{
struct v4l2_mbus_config_mipi_csi2 *mipi_csi2 = &phy->endpoint.bus.mipi_csi2;
u32 num_lanes = mipi_csi2->num_data_lanes;
- const struct cal_format_info *fmtinfo;
struct v4l2_subdev_state *state;
- struct v4l2_mbus_framefmt *fmt;
u32 bpp;
s64 freq;
+ /*
+ * v4l2_get_link_freq() uses V4L2_CID_LINK_FREQ first, and falls back
+ * to V4L2_CID_PIXEL_RATE if V4L2_CID_LINK_FREQ is not available.
+ *
+ * With multistream input there is no single pixel rate, and thus we
+ * cannot use V4L2_CID_PIXEL_RATE, so we pass 0 as the bpp which
+ * causes v4l2_get_link_freq() to return an error if it falls back to
+ * V4L2_CID_PIXEL_RATE.
+ */
+
state = v4l2_subdev_get_locked_active_state(&phy->subdev);
- fmt = v4l2_subdev_state_get_format(state, CAL_CAMERARX_PAD_SINK);
+ if (state->routing.num_routes > 1) {
+ bpp = 0;
+ } else {
+ struct v4l2_subdev_route *route = &state->routing.routes[0];
+ const struct cal_format_info *fmtinfo;
+ struct v4l2_mbus_framefmt *fmt;
- fmtinfo = cal_format_by_code(fmt->code);
- if (!fmtinfo)
- return -EINVAL;
+ fmt = v4l2_subdev_state_get_format(state, route->sink_pad,
+ route->sink_stream);
- bpp = fmtinfo->bpp;
+ fmtinfo = cal_format_by_code(fmt->code);
+ if (!fmtinfo)
+ return -EINVAL;
+
+ bpp = fmtinfo->bpp;
+ }
freq = v4l2_get_link_freq(&phy->source->entity.pads[phy->source_pad],
bpp, 2 * num_lanes);
@@ -285,15 +302,32 @@ static void cal_camerarx_ppi_disable(struct cal_camerarx *phy)
0, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
}
-static int cal_camerarx_start(struct cal_camerarx *phy)
+static int cal_camerarx_start(struct cal_camerarx *phy, u32 sink_stream)
{
+ struct media_pad *remote_pad;
s64 link_freq;
u32 sscounter;
u32 val;
int ret;
+ remote_pad = media_pad_remote_pad_first(&phy->pads[CAL_CAMERARX_PAD_SINK]);
+
+ /*
+ * We need to enable the PHY hardware when enabling the first stream,
+ * but for the following streams we just propagate the enable_streams
+ * to the source.
+ */
+
if (phy->enable_count > 0) {
+ ret = v4l2_subdev_enable_streams(phy->source, remote_pad->index,
+ BIT(sink_stream));
+ if (ret) {
+ phy_err(phy, "enable streams failed in source: %d\n", ret);
+ return ret;
+ }
+
phy->enable_count++;
+
return 0;
}
@@ -395,7 +429,8 @@ static int cal_camerarx_start(struct cal_camerarx *phy)
* Start the source to enable the CSI-2 HS clock. We can now wait for
* CSI-2 PHY reset to complete.
*/
- ret = v4l2_subdev_call(phy->source, video, s_stream, 1);
+ ret = v4l2_subdev_enable_streams(phy->source, remote_pad->index,
+ BIT(sink_stream));
if (ret) {
v4l2_subdev_call(phy->source, core, s_power, 0);
cal_camerarx_disable_irqs(phy);
@@ -426,12 +461,22 @@ static int cal_camerarx_start(struct cal_camerarx *phy)
return 0;
}
-static void cal_camerarx_stop(struct cal_camerarx *phy)
+static void cal_camerarx_stop(struct cal_camerarx *phy, u32 sink_stream)
{
+ struct media_pad *remote_pad;
int ret;
- if (--phy->enable_count > 0)
+ remote_pad = media_pad_remote_pad_first(&phy->pads[CAL_CAMERARX_PAD_SINK]);
+
+ if (--phy->enable_count > 0) {
+ ret = v4l2_subdev_disable_streams(phy->source,
+ remote_pad->index,
+ BIT(sink_stream));
+ if (ret)
+ phy_err(phy, "stream off failed in subdev\n");
+
return;
+ }
cal_camerarx_ppi_disable(phy);
@@ -451,7 +496,9 @@ static void cal_camerarx_stop(struct cal_camerarx *phy)
/* Disable the phy */
cal_camerarx_disable(phy);
- if (v4l2_subdev_call(phy->source, video, s_stream, 0))
+ ret = v4l2_subdev_disable_streams(phy->source, remote_pad->index,
+ BIT(sink_stream));
+ if (ret)
phy_err(phy, "stream off failed in subdev\n");
ret = v4l2_subdev_call(phy->source, core, s_power, 0);
@@ -600,22 +647,50 @@ static inline struct cal_camerarx *to_cal_camerarx(struct v4l2_subdev *sd)
return container_of(sd, struct cal_camerarx, subdev);
}
-static int cal_camerarx_sd_s_stream(struct v4l2_subdev *sd, int enable)
+struct cal_camerarx *
+cal_camerarx_get_phy_from_entity(struct media_entity *entity)
+{
+ struct v4l2_subdev *sd;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+ if (!sd)
+ return NULL;
+
+ return to_cal_camerarx(sd);
+}
+
+static int cal_camerarx_sd_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
{
struct cal_camerarx *phy = to_cal_camerarx(sd);
- struct v4l2_subdev_state *state;
- int ret = 0;
+ u32 sink_stream;
+ int ret;
- state = v4l2_subdev_lock_and_get_active_state(sd);
+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, 0,
+ NULL, &sink_stream);
+ if (ret)
+ return ret;
- if (enable)
- ret = cal_camerarx_start(phy);
- else
- cal_camerarx_stop(phy);
+ return cal_camerarx_start(phy, sink_stream);
+}
- v4l2_subdev_unlock_state(state);
+static int cal_camerarx_sd_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct cal_camerarx *phy = to_cal_camerarx(sd);
+ u32 sink_stream;
+ int ret;
- return ret;
+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, 0,
+ NULL, &sink_stream);
+ if (ret)
+ return ret;
+
+ cal_camerarx_stop(phy, sink_stream);
+
+ return 0;
}
static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd,
@@ -629,8 +704,12 @@ static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index > 0)
return -EINVAL;
- fmt = v4l2_subdev_state_get_format(state,
- CAL_CAMERARX_PAD_SINK);
+ fmt = v4l2_subdev_state_get_opposite_stream_format(state,
+ code->pad,
+ code->stream);
+ if (!fmt)
+ return -EINVAL;
+
code->code = fmt->code;
} else {
if (code->index >= cal_num_formats)
@@ -655,8 +734,12 @@ static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd,
if (cal_rx_pad_is_source(fse->pad)) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_state_get_format(state,
- CAL_CAMERARX_PAD_SINK);
+ fmt = v4l2_subdev_state_get_opposite_stream_format(state,
+ fse->pad,
+ fse->stream);
+ if (!fmt)
+ return -EINVAL;
+
if (fse->code != fmt->code)
return -EINVAL;
@@ -712,36 +795,77 @@ static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
/* Store the format and propagate it to the source pad. */
- fmt = v4l2_subdev_state_get_format(state, CAL_CAMERARX_PAD_SINK);
+ fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream);
+ if (!fmt)
+ return -EINVAL;
+
*fmt = format->format;
- fmt = v4l2_subdev_state_get_format(state,
- CAL_CAMERARX_PAD_FIRST_SOURCE);
+ fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad,
+ format->stream);
+ if (!fmt)
+ return -EINVAL;
+
*fmt = format->format;
return 0;
}
+static int cal_camerarx_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_krouting *routing)
+{
+ static const struct v4l2_mbus_framefmt format = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_YCBCR_ENC_601,
+ .quantization = V4L2_QUANTIZATION_LIM_RANGE,
+ .xfer_func = V4L2_XFER_FUNC_SRGB,
+ };
+ int ret;
+
+ ret = v4l2_subdev_routing_validate(sd, routing,
+ V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 |
+ V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cal_camerarx_sd_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *routing)
+{
+ return cal_camerarx_set_routing(sd, state, routing);
+}
+
static int cal_camerarx_sd_init_state(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state)
{
- struct v4l2_subdev_format format = {
- .which = state ? V4L2_SUBDEV_FORMAT_TRY
- : V4L2_SUBDEV_FORMAT_ACTIVE,
- .pad = CAL_CAMERARX_PAD_SINK,
- .format = {
- .width = 640,
- .height = 480,
- .code = MEDIA_BUS_FMT_UYVY8_1X16,
- .field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .ycbcr_enc = V4L2_YCBCR_ENC_601,
- .quantization = V4L2_QUANTIZATION_LIM_RANGE,
- .xfer_func = V4L2_XFER_FUNC_SRGB,
- },
+ struct v4l2_subdev_route routes[] = { {
+ .sink_pad = 0,
+ .sink_stream = 0,
+ .source_pad = 1,
+ .source_stream = 0,
+ .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
+ } };
+
+ struct v4l2_subdev_krouting routing = {
+ .num_routes = 1,
+ .routes = routes,
};
- return cal_camerarx_sd_set_fmt(sd, state, &format);
+ /* Initialize routing to single route to the fist source pad */
+ return cal_camerarx_set_routing(sd, state, &routing);
}
static int cal_camerarx_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
@@ -750,48 +874,71 @@ static int cal_camerarx_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
struct cal_camerarx *phy = to_cal_camerarx(sd);
struct v4l2_mbus_frame_desc remote_desc;
const struct media_pad *remote_pad;
+ struct v4l2_subdev_state *state;
+ u32 sink_stream;
+ unsigned int i;
int ret;
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
+ pad, 0,
+ NULL, &sink_stream);
+ if (ret)
+ goto out_unlock;
+
remote_pad = media_pad_remote_pad_first(&phy->pads[CAL_CAMERARX_PAD_SINK]);
- if (!remote_pad)
- return -EPIPE;
+ if (!remote_pad) {
+ ret = -EPIPE;
+ goto out_unlock;
+ }
ret = v4l2_subdev_call(phy->source, pad, get_frame_desc,
remote_pad->index, &remote_desc);
if (ret)
- return ret;
+ goto out_unlock;
if (remote_desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
cal_err(phy->cal,
"Frame descriptor does not describe CSI-2 link");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock;
}
- if (remote_desc.num_entries > 1)
- cal_err(phy->cal,
- "Multiple streams not supported in remote frame descriptor, using the first one\n");
+ for (i = 0; i < remote_desc.num_entries; i++) {
+ if (remote_desc.entry[i].stream == sink_stream)
+ break;
+ }
+
+ if (i == remote_desc.num_entries) {
+ cal_err(phy->cal, "Stream %u not found in remote frame desc\n",
+ sink_stream);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
fd->num_entries = 1;
- fd->entry[0] = remote_desc.entry[0];
+ fd->entry[0] = remote_desc.entry[i];
- return 0;
-}
+out_unlock:
+ v4l2_subdev_unlock_state(state);
-static const struct v4l2_subdev_video_ops cal_camerarx_video_ops = {
- .s_stream = cal_camerarx_sd_s_stream,
-};
+ return ret;
+}
static const struct v4l2_subdev_pad_ops cal_camerarx_pad_ops = {
+ .enable_streams = cal_camerarx_sd_enable_streams,
+ .disable_streams = cal_camerarx_sd_disable_streams,
.enum_mbus_code = cal_camerarx_sd_enum_mbus_code,
.enum_frame_size = cal_camerarx_sd_enum_frame_size,
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = cal_camerarx_sd_set_fmt,
+ .set_routing = cal_camerarx_sd_set_routing,
.get_frame_desc = cal_camerarx_get_frame_desc,
};
static const struct v4l2_subdev_ops cal_camerarx_subdev_ops = {
- .video = &cal_camerarx_video_ops,
.pad = &cal_camerarx_pad_ops,
};
@@ -801,6 +948,7 @@ static const struct v4l2_subdev_internal_ops cal_camerarx_internal_ops = {
static const struct media_entity_operations cal_camerarx_media_ops = {
.link_validate = v4l2_subdev_link_validate,
+ .has_pad_interdep = v4l2_subdev_has_pad_interdep,
};
/* ------------------------------------------------------------------
@@ -852,7 +1000,7 @@ struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
v4l2_subdev_init(sd, &cal_camerarx_subdev_ops);
sd->internal_ops = &cal_camerarx_internal_ops;
sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
- sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_STREAMS;
snprintf(sd->name, sizeof(sd->name), "CAMERARX%u", instance);
sd->dev = cal->dev;
diff --git a/drivers/media/platform/ti/cal/cal-video.c b/drivers/media/platform/ti/cal/cal-video.c
index e29743ae61e2..d40e24ab1127 100644
--- a/drivers/media/platform/ti/cal/cal-video.c
+++ b/drivers/media/platform/ti/cal/cal-video.c
@@ -25,20 +25,6 @@
#include "cal.h"
-/* Print Four-character-code (FOURCC) */
-static char *fourcc_to_str(u32 fmt)
-{
- static char code[5];
-
- code[0] = (unsigned char)(fmt & 0xff);
- code[1] = (unsigned char)((fmt >> 8) & 0xff);
- code[2] = (unsigned char)((fmt >> 16) & 0xff);
- code[3] = (unsigned char)((fmt >> 24) & 0xff);
- code[4] = '\0';
-
- return code;
-}
-
/* ------------------------------------------------------------------
* V4L2 Common IOCTLs
* ------------------------------------------------------------------
@@ -122,9 +108,10 @@ static int __subdev_get_format(struct cal_ctx *ctx,
.pad = 0,
};
struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
+ struct v4l2_subdev *sd = ctx->phy->source;
int ret;
- ret = v4l2_subdev_call(ctx->phy->source, pad, get_fmt, NULL, &sd_fmt);
+ ret = v4l2_subdev_call_state_active(sd, pad, get_fmt, &sd_fmt);
if (ret)
return ret;
@@ -144,11 +131,12 @@ static int __subdev_set_format(struct cal_ctx *ctx,
.pad = 0,
};
struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
+ struct v4l2_subdev *sd = ctx->phy->source;
int ret;
*mbus_fmt = *fmt;
- ret = v4l2_subdev_call(ctx->phy->source, pad, set_fmt, NULL, &sd_fmt);
+ ret = v4l2_subdev_call_state_active(sd, pad, set_fmt, &sd_fmt);
if (ret)
return ret;
@@ -180,8 +168,8 @@ static void cal_calc_format_size(struct cal_ctx *ctx,
f->fmt.pix.sizeimage = f->fmt.pix.height *
f->fmt.pix.bytesperline;
- ctx_dbg(3, ctx, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
- __func__, fourcc_to_str(f->fmt.pix.pixelformat),
+ ctx_dbg(3, ctx, "%s: fourcc: %p4cc size: %dx%d bpl:%d img_size:%d\n",
+ __func__, &f->fmt.pix.pixelformat,
f->fmt.pix.width, f->fmt.pix.height,
f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
}
@@ -190,6 +178,7 @@ static int cal_legacy_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cal_ctx *ctx = video_drvdata(file);
+ struct v4l2_subdev *sd = ctx->phy->source;
const struct cal_format_info *fmtinfo;
struct v4l2_subdev_frame_size_enum fse = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
@@ -215,8 +204,8 @@ static int cal_legacy_try_fmt_vid_cap(struct file *file, void *priv,
for (fse.index = 0; ; fse.index++) {
int ret;
- ret = v4l2_subdev_call(ctx->phy->source, pad, enum_frame_size,
- NULL, &fse);
+ ret = v4l2_subdev_call_state_active(sd, pad, enum_frame_size,
+ &fse);
if (ret)
break;
@@ -252,6 +241,7 @@ static int cal_legacy_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cal_ctx *ctx = video_drvdata(file);
+ struct v4l2_subdev *sd = &ctx->phy->subdev;
struct vb2_queue *q = &ctx->vb_vidq;
struct v4l2_subdev_format sd_fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
@@ -291,7 +281,7 @@ static int cal_legacy_s_fmt_vid_cap(struct file *file, void *priv,
ctx->v_fmt.fmt.pix.field = sd_fmt.format.field;
cal_calc_format_size(ctx, fmtinfo, &ctx->v_fmt);
- v4l2_subdev_call(&ctx->phy->subdev, pad, set_fmt, NULL, &sd_fmt);
+ v4l2_subdev_call_state_active(sd, pad, set_fmt, &sd_fmt);
ctx->fmtinfo = fmtinfo;
*f = ctx->v_fmt;
@@ -303,6 +293,7 @@ static int cal_legacy_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct cal_ctx *ctx = video_drvdata(file);
+ struct v4l2_subdev *sd = ctx->phy->source;
const struct cal_format_info *fmtinfo;
struct v4l2_subdev_frame_size_enum fse = {
.index = fsize->index,
@@ -321,8 +312,7 @@ static int cal_legacy_enum_framesizes(struct file *file, void *fh,
fse.code = fmtinfo->code;
- ret = v4l2_subdev_call(ctx->phy->source, pad, enum_frame_size, NULL,
- &fse);
+ ret = v4l2_subdev_call_state_active(sd, pad, enum_frame_size, &fse);
if (ret)
return ret;
@@ -364,6 +354,7 @@ static int cal_legacy_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *fival)
{
struct cal_ctx *ctx = video_drvdata(file);
+ struct v4l2_subdev *sd = ctx->phy->source;
const struct cal_format_info *fmtinfo;
struct v4l2_subdev_frame_interval_enum fie = {
.index = fival->index,
@@ -378,8 +369,8 @@ static int cal_legacy_enum_frameintervals(struct file *file, void *priv,
return -EINVAL;
fie.code = fmtinfo->code;
- ret = v4l2_subdev_call(ctx->phy->source, pad, enum_frame_interval,
- NULL, &fie);
+
+ ret = v4l2_subdev_call_state_active(sd, pad, enum_frame_interval, &fie);
if (ret)
return ret;
fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
@@ -446,6 +437,9 @@ static int cal_mc_enum_fmt_vid_cap(struct file *file, void *priv,
idx = 0;
for (i = 0; i < cal_num_formats; ++i) {
+ if (cal_formats[i].meta)
+ continue;
+
if (f->mbus_code && cal_formats[i].code != f->mbus_code)
continue;
@@ -473,7 +467,7 @@ static void cal_mc_try_fmt(struct cal_ctx *ctx, struct v4l2_format *f,
* supported.
*/
fmtinfo = cal_format_by_fourcc(f->fmt.pix.pixelformat);
- if (!fmtinfo)
+ if (!fmtinfo || fmtinfo->meta)
fmtinfo = &cal_formats[0];
/*
@@ -509,8 +503,8 @@ static void cal_mc_try_fmt(struct cal_ctx *ctx, struct v4l2_format *f,
if (info)
*info = fmtinfo;
- ctx_dbg(3, ctx, "%s: %s %ux%u (bytesperline %u sizeimage %u)\n",
- __func__, fourcc_to_str(format->pixelformat),
+ ctx_dbg(3, ctx, "%s: %p4cc %ux%u (bytesperline %u sizeimage %u)\n",
+ __func__, &format->pixelformat,
format->width, format->height,
format->bytesperline, format->sizeimage);
}
@@ -689,16 +683,16 @@ static int cal_video_check_format(struct cal_ctx *ctx)
{
const struct v4l2_mbus_framefmt *format;
struct v4l2_subdev_state *state;
- struct media_pad *remote_pad;
+ struct media_pad *phy_source_pad;
int ret = 0;
- remote_pad = media_pad_remote_pad_first(&ctx->pad);
- if (!remote_pad)
+ phy_source_pad = media_pad_remote_pad_first(&ctx->pad);
+ if (!phy_source_pad)
return -ENODEV;
state = v4l2_subdev_lock_and_get_active_state(&ctx->phy->subdev);
- format = v4l2_subdev_state_get_format(state, remote_pad->index);
+ format = v4l2_subdev_state_get_format(state, phy_source_pad->index, 0);
if (!format) {
ret = -EINVAL;
goto out;
@@ -721,16 +715,28 @@ out:
static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+ struct media_pad *phy_source_pad;
struct cal_buffer *buf;
dma_addr_t addr;
int ret;
+ phy_source_pad = media_pad_remote_pad_first(&ctx->pad);
+ if (!phy_source_pad) {
+ ctx_err(ctx, "Context not connected\n");
+ ret = -ENODEV;
+ goto error_release_buffers;
+ }
+
ret = video_device_pipeline_alloc_start(&ctx->vdev);
if (ret < 0) {
ctx_err(ctx, "Failed to start media pipeline: %d\n", ret);
goto error_release_buffers;
}
+ /* Find the PHY connected to this video device */
+ if (cal_mc_api)
+ ctx->phy = cal_camerarx_get_phy_from_entity(phy_source_pad->entity);
+
/*
* Verify that the currently configured format matches the output of
* the connected CAMERARX.
@@ -758,12 +764,13 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
ret = pm_runtime_resume_and_get(ctx->cal->dev);
if (ret < 0)
- goto error_pipeline;
+ goto error_unprepare;
cal_ctx_set_dma_addr(ctx, addr);
cal_ctx_start(ctx);
- ret = v4l2_subdev_call(&ctx->phy->subdev, video, s_stream, 1);
+ ret = v4l2_subdev_enable_streams(&ctx->phy->subdev,
+ phy_source_pad->index, BIT(0));
if (ret)
goto error_stop;
@@ -775,8 +782,8 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
error_stop:
cal_ctx_stop(ctx);
pm_runtime_put_sync(ctx->cal->dev);
+error_unprepare:
cal_ctx_unprepare(ctx);
-
error_pipeline:
video_device_pipeline_stop(&ctx->vdev);
error_release_buffers:
@@ -788,10 +795,14 @@ error_release_buffers:
static void cal_stop_streaming(struct vb2_queue *vq)
{
struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+ struct media_pad *phy_source_pad;
cal_ctx_stop(ctx);
- v4l2_subdev_call(&ctx->phy->subdev, video, s_stream, 0);
+ phy_source_pad = media_pad_remote_pad_first(&ctx->pad);
+
+ v4l2_subdev_disable_streams(&ctx->phy->subdev, phy_source_pad->index,
+ BIT(0));
pm_runtime_put_sync(ctx->cal->dev);
@@ -800,6 +811,9 @@ static void cal_stop_streaming(struct vb2_queue *vq)
cal_release_buffers(ctx, VB2_BUF_STATE_ERROR);
video_device_pipeline_stop(&ctx->vdev);
+
+ if (cal_mc_api)
+ ctx->phy = NULL;
}
static const struct vb2_ops cal_video_qops = {
@@ -826,6 +840,7 @@ static const struct v4l2_file_operations cal_fops = {
static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx)
{
+ struct v4l2_subdev *sd = ctx->phy->source;
struct v4l2_mbus_framefmt mbus_fmt;
const struct cal_format_info *fmtinfo;
unsigned int i, j, k;
@@ -845,20 +860,20 @@ static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx)
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
- ret = v4l2_subdev_call(ctx->phy->source, pad, enum_mbus_code,
- NULL, &mbus_code);
+ ret = v4l2_subdev_call_state_active(sd, pad, enum_mbus_code,
+ &mbus_code);
if (ret == -EINVAL)
break;
if (ret) {
ctx_err(ctx, "Error enumerating mbus codes in subdev %s: %d\n",
- ctx->phy->source->name, ret);
+ sd->name, ret);
return ret;
}
ctx_dbg(2, ctx,
"subdev %s: code: %04x idx: %u\n",
- ctx->phy->source->name, mbus_code.code, j);
+ sd->name, mbus_code.code, j);
for (k = 0; k < cal_num_formats; k++) {
fmtinfo = &cal_formats[k];
@@ -866,8 +881,8 @@ static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx)
if (mbus_code.code == fmtinfo->code) {
ctx->active_fmt[i] = fmtinfo;
ctx_dbg(2, ctx,
- "matched fourcc: %s: code: %04x idx: %u\n",
- fourcc_to_str(fmtinfo->fourcc),
+ "matched fourcc: %p4cc: code: %04x idx: %u\n",
+ &fmtinfo->fourcc,
fmtinfo->code, i);
ctx->num_active_fmt = ++i;
}
@@ -876,7 +891,7 @@ static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx)
if (i == 0) {
ctx_err(ctx, "No suitable format reported by subdev %s\n",
- ctx->phy->source->name);
+ sd->name);
return -EINVAL;
}
@@ -962,16 +977,52 @@ int cal_ctx_v4l2_register(struct cal_ctx *ctx)
return ret;
}
- ret = media_create_pad_link(&ctx->phy->subdev.entity,
- CAL_CAMERARX_PAD_FIRST_SOURCE,
- &vfd->entity, 0,
- MEDIA_LNK_FL_IMMUTABLE |
- MEDIA_LNK_FL_ENABLED);
- if (ret) {
- ctx_err(ctx, "Failed to create media link for context %u\n",
- ctx->dma_ctx);
- video_unregister_device(vfd);
- return ret;
+ if (cal_mc_api) {
+ u16 phy_idx;
+ u16 pad_idx;
+
+ /* Create links from all video nodes to all PHYs */
+
+ for (phy_idx = 0; phy_idx < ctx->cal->data->num_csi2_phy;
+ ++phy_idx) {
+ struct media_entity *phy_entity =
+ &ctx->cal->phy[phy_idx]->subdev.entity;
+
+ for (pad_idx = 1; pad_idx < CAL_CAMERARX_NUM_PADS;
+ ++pad_idx) {
+ /*
+ * Enable only links from video0 to PHY0 pad 1,
+ * and video1 to PHY1 pad 1.
+ */
+ bool enable = (ctx->dma_ctx == 0 &&
+ phy_idx == 0 && pad_idx == 1) ||
+ (ctx->dma_ctx == 1 &&
+ phy_idx == 1 && pad_idx == 1);
+
+ ret = media_create_pad_link(phy_entity, pad_idx,
+ &vfd->entity, 0,
+ enable ? MEDIA_LNK_FL_ENABLED : 0);
+ if (ret) {
+ ctx_err(ctx,
+ "Failed to create media link for context %u\n",
+ ctx->dma_ctx);
+ video_unregister_device(vfd);
+ return ret;
+ }
+ }
+ }
+ } else {
+ ret = media_create_pad_link(&ctx->phy->subdev.entity,
+ CAL_CAMERARX_PAD_FIRST_SOURCE,
+ &vfd->entity, 0,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ ctx_err(ctx,
+ "Failed to create media link for context %u\n",
+ ctx->dma_ctx);
+ video_unregister_device(vfd);
+ return ret;
+ }
}
ctx_info(ctx, "V4L2 device registered as %s\n",
diff --git a/drivers/media/platform/ti/cal/cal.c b/drivers/media/platform/ti/cal/cal.c
index 6cb3e5f49686..b644ed890412 100644
--- a/drivers/media/platform/ti/cal/cal.c
+++ b/drivers/media/platform/ti/cal/cal.c
@@ -481,8 +481,9 @@ int cal_ctx_prepare(struct cal_ctx *ctx)
ctx->vc = 0;
ctx->datatype = CAL_CSI2_CTX_DT_ANY;
} else if (!ret) {
- ctx_dbg(2, ctx, "Framedesc: len %u, vc %u, dt %#x\n",
- entry.length, entry.bus.csi2.vc, entry.bus.csi2.dt);
+ ctx_dbg(2, ctx, "Framedesc: stream %u, len %u, vc %u, dt %#x\n",
+ entry.stream, entry.length, entry.bus.csi2.vc,
+ entry.bus.csi2.dt);
ctx->vc = entry.bus.csi2.vc;
ctx->datatype = entry.bus.csi2.dt;
@@ -490,7 +491,7 @@ int cal_ctx_prepare(struct cal_ctx *ctx)
return ret;
}
- ctx->use_pix_proc = !ctx->fmtinfo->meta;
+ ctx->use_pix_proc = ctx->vb_vidq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ctx->use_pix_proc) {
ret = cal_reserve_pix_proc(ctx->cal);
@@ -1016,7 +1017,6 @@ static struct cal_ctx *cal_ctx_create(struct cal_dev *cal, int inst)
return NULL;
ctx->cal = cal;
- ctx->phy = cal->phy[inst];
ctx->dma_ctx = inst;
ctx->csi2_ctx = inst;
ctx->cport = inst;
@@ -1228,18 +1228,37 @@ static int cal_probe(struct platform_device *pdev)
}
/* Create contexts. */
- for (i = 0; i < cal->data->num_csi2_phy; ++i) {
- if (!cal->phy[i]->source_node)
- continue;
+ if (!cal_mc_api) {
+ for (i = 0; i < cal->data->num_csi2_phy; ++i) {
+ struct cal_ctx *ctx;
+
+ if (!cal->phy[i]->source_node)
+ continue;
+
+ ctx = cal_ctx_create(cal, i);
+ if (!ctx) {
+ cal_err(cal, "Failed to create context %u\n", cal->num_contexts);
+ ret = -ENODEV;
+ goto error_context;
+ }
+
+ ctx->phy = cal->phy[i];
- cal->ctx[cal->num_contexts] = cal_ctx_create(cal, i);
- if (!cal->ctx[cal->num_contexts]) {
- cal_err(cal, "Failed to create context %u\n", cal->num_contexts);
- ret = -ENODEV;
- goto error_context;
+ cal->ctx[cal->num_contexts++] = ctx;
}
+ } else {
+ for (i = 0; i < ARRAY_SIZE(cal->ctx); ++i) {
+ struct cal_ctx *ctx;
+
+ ctx = cal_ctx_create(cal, i);
+ if (!ctx) {
+ cal_err(cal, "Failed to create context %u\n", i);
+ ret = -ENODEV;
+ goto error_context;
+ }
- cal->num_contexts++;
+ cal->ctx[cal->num_contexts++] = ctx;
+ }
}
/* Register the media device. */
diff --git a/drivers/media/platform/ti/cal/cal.h b/drivers/media/platform/ti/cal/cal.h
index 72a246a64d9e..33b1885aafe1 100644
--- a/drivers/media/platform/ti/cal/cal.h
+++ b/drivers/media/platform/ti/cal/cal.h
@@ -45,7 +45,7 @@
#define CAL_CAMERARX_PAD_SINK 0
#define CAL_CAMERARX_PAD_FIRST_SOURCE 1
-#define CAL_CAMERARX_NUM_SOURCE_PADS 1
+#define CAL_CAMERARX_NUM_SOURCE_PADS 8
#define CAL_CAMERARX_NUM_PADS (1 + CAL_CAMERARX_NUM_SOURCE_PADS)
static inline bool cal_rx_pad_is_sink(u32 pad)
@@ -320,6 +320,7 @@ const struct cal_format_info *cal_format_by_code(u32 code);
void cal_quickdump_regs(struct cal_dev *cal);
+struct cal_camerarx *cal_camerarx_get_phy_from_entity(struct media_entity *entity);
void cal_camerarx_disable(struct cal_camerarx *phy);
void cal_camerarx_i913_errata(struct cal_camerarx *phy);
struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
diff --git a/drivers/media/platform/ti/davinci/vpif.c b/drivers/media/platform/ti/davinci/vpif.c
index a81719702a22..969d623fc842 100644
--- a/drivers/media/platform/ti/davinci/vpif.c
+++ b/drivers/media/platform/ti/davinci/vpif.c
@@ -504,7 +504,7 @@ static int vpif_probe(struct platform_device *pdev)
pdev_display = kzalloc(sizeof(*pdev_display), GFP_KERNEL);
if (!pdev_display) {
ret = -ENOMEM;
- goto err_put_pdev_capture;
+ goto err_del_pdev_capture;
}
pdev_display->name = "vpif_display";
@@ -527,6 +527,8 @@ static int vpif_probe(struct platform_device *pdev)
err_put_pdev_display:
platform_device_put(pdev_display);
+err_del_pdev_capture:
+ platform_device_del(pdev_capture);
err_put_pdev_capture:
platform_device_put(pdev_capture);
err_put_rpm:
diff --git a/drivers/media/platform/ti/omap3isp/ispccdc.c b/drivers/media/platform/ti/omap3isp/ispccdc.c
index dd375c4e180d..7d0c723dcd11 100644
--- a/drivers/media/platform/ti/omap3isp/ispccdc.c
+++ b/drivers/media/platform/ti/omap3isp/ispccdc.c
@@ -446,8 +446,8 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
if (ret < 0)
goto done;
- dma_sync_sg_for_cpu(isp->dev, req->table.sgt.sgl,
- req->table.sgt.nents, DMA_TO_DEVICE);
+ dma_sync_sgtable_for_cpu(isp->dev, &req->table.sgt,
+ DMA_TO_DEVICE);
if (copy_from_user(req->table.addr, config->lsc,
req->config.size)) {
@@ -455,8 +455,8 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
goto done;
}
- dma_sync_sg_for_device(isp->dev, req->table.sgt.sgl,
- req->table.sgt.nents, DMA_TO_DEVICE);
+ dma_sync_sgtable_for_device(isp->dev, &req->table.sgt,
+ DMA_TO_DEVICE);
}
spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
diff --git a/drivers/media/platform/ti/omap3isp/ispstat.c b/drivers/media/platform/ti/omap3isp/ispstat.c
index 359a846205b0..d3da68408ecb 100644
--- a/drivers/media/platform/ti/omap3isp/ispstat.c
+++ b/drivers/media/platform/ti/omap3isp/ispstat.c
@@ -161,8 +161,7 @@ static void isp_stat_buf_sync_for_device(struct ispstat *stat,
if (ISP_STAT_USES_DMAENGINE(stat))
return;
- dma_sync_sg_for_device(stat->isp->dev, buf->sgt.sgl,
- buf->sgt.nents, DMA_FROM_DEVICE);
+ dma_sync_sgtable_for_device(stat->isp->dev, &buf->sgt, DMA_FROM_DEVICE);
}
static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
@@ -171,8 +170,7 @@ static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
if (ISP_STAT_USES_DMAENGINE(stat))
return;
- dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl,
- buf->sgt.nents, DMA_FROM_DEVICE);
+ dma_sync_sgtable_for_cpu(stat->isp->dev, &buf->sgt, DMA_FROM_DEVICE);
}
static void isp_stat_buf_clear(struct ispstat *stat)
diff --git a/drivers/media/platform/ti/omap3isp/ispvideo.c b/drivers/media/platform/ti/omap3isp/ispvideo.c
index 5c9aa80023fd..78e30298c7ad 100644
--- a/drivers/media/platform/ti/omap3isp/ispvideo.c
+++ b/drivers/media/platform/ti/omap3isp/ispvideo.c
@@ -480,29 +480,11 @@ static int isp_video_start_streaming(struct vb2_queue *queue,
return 0;
}
-static void omap3isp_wait_prepare(struct vb2_queue *vq)
-{
- struct isp_video_fh *vfh = vb2_get_drv_priv(vq);
- struct isp_video *video = vfh->video;
-
- mutex_unlock(&video->queue_lock);
-}
-
-static void omap3isp_wait_finish(struct vb2_queue *vq)
-{
- struct isp_video_fh *vfh = vb2_get_drv_priv(vq);
- struct isp_video *video = vfh->video;
-
- mutex_lock(&video->queue_lock);
-}
-
static const struct vb2_ops isp_video_queue_ops = {
.queue_setup = isp_video_queue_setup,
.buf_prepare = isp_video_buffer_prepare,
.buf_queue = isp_video_buffer_queue,
.start_streaming = isp_video_start_streaming,
- .wait_prepare = omap3isp_wait_prepare,
- .wait_finish = omap3isp_wait_finish,
};
/*
@@ -1338,6 +1320,7 @@ static int isp_video_open(struct file *file)
queue->buf_struct_size = sizeof(struct isp_buffer);
queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
queue->dev = video->isp->dev;
+ queue->lock = &video->queue_lock;
ret = vb2_queue_init(&handle->queue);
if (ret < 0) {
diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
index c435a393e0cb..9f559a13d409 100644
--- a/drivers/media/platform/verisilicon/hantro_postproc.c
+++ b/drivers/media/platform/verisilicon/hantro_postproc.c
@@ -250,8 +250,10 @@ int hantro_postproc_init(struct hantro_ctx *ctx)
for (i = 0; i < num_buffers; i++) {
ret = hantro_postproc_alloc(ctx, i);
- if (ret)
+ if (ret) {
+ hantro_postproc_free(ctx);
return ret;
+ }
}
return 0;
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
index 2bce940a5822..7c3515cf7d64 100644
--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
@@ -77,6 +77,7 @@ int hantro_get_format_depth(u32 fourcc)
switch (fourcc) {
case V4L2_PIX_FMT_P010:
case V4L2_PIX_FMT_P010_4L4:
+ case V4L2_PIX_FMT_NV15:
case V4L2_PIX_FMT_NV15_4L4:
return 10;
default:
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
index 69b5d9e12926..e4703bb6be7c 100644
--- a/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
@@ -2202,6 +2202,10 @@ static void rockchip_vpu981_postproc_enable(struct hantro_ctx *ctx)
case V4L2_PIX_FMT_NV12:
hantro_reg_write(vpu, &av1_pp_out_format, 3);
break;
+ case V4L2_PIX_FMT_NV15:
+ /* this mapping is RK specific */
+ hantro_reg_write(vpu, &av1_pp_out_format, 10);
+ break;
default:
hantro_reg_write(vpu, &av1_pp_out_format, 0);
}
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
index 964122e7c355..acd29fa41d2d 100644
--- a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
@@ -85,10 +85,24 @@ static const struct hantro_fmt rockchip_vpu981_postproc_fmts[] = {
.postprocessed = true,
.frmsize = {
.min_width = ROCKCHIP_VPU981_MIN_SIZE,
- .max_width = FMT_UHD_WIDTH,
+ .max_width = FMT_4K_WIDTH,
.step_width = MB_DIM,
.min_height = ROCKCHIP_VPU981_MIN_SIZE,
- .max_height = FMT_UHD_HEIGHT,
+ .max_height = FMT_4K_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV15,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_width = FMT_4K_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_height = FMT_4K_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -99,10 +113,10 @@ static const struct hantro_fmt rockchip_vpu981_postproc_fmts[] = {
.postprocessed = true,
.frmsize = {
.min_width = ROCKCHIP_VPU981_MIN_SIZE,
- .max_width = FMT_UHD_WIDTH,
+ .max_width = FMT_4K_WIDTH,
.step_width = MB_DIM,
.min_height = ROCKCHIP_VPU981_MIN_SIZE,
- .max_height = FMT_UHD_HEIGHT,
+ .max_height = FMT_4K_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -318,10 +332,10 @@ static const struct hantro_fmt rockchip_vpu981_dec_fmts[] = {
.match_depth = true,
.frmsize = {
.min_width = ROCKCHIP_VPU981_MIN_SIZE,
- .max_width = FMT_UHD_WIDTH,
+ .max_width = FMT_4K_WIDTH,
.step_width = MB_DIM,
.min_height = ROCKCHIP_VPU981_MIN_SIZE,
- .max_height = FMT_UHD_HEIGHT,
+ .max_height = FMT_4K_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -331,10 +345,10 @@ static const struct hantro_fmt rockchip_vpu981_dec_fmts[] = {
.match_depth = true,
.frmsize = {
.min_width = ROCKCHIP_VPU981_MIN_SIZE,
- .max_width = FMT_UHD_WIDTH,
+ .max_width = FMT_4K_WIDTH,
.step_width = MB_DIM,
.min_height = ROCKCHIP_VPU981_MIN_SIZE,
- .max_height = FMT_UHD_HEIGHT,
+ .max_height = FMT_4K_HEIGHT,
.step_height = MB_DIM,
},
},
@@ -344,10 +358,10 @@ static const struct hantro_fmt rockchip_vpu981_dec_fmts[] = {
.max_depth = 2,
.frmsize = {
.min_width = ROCKCHIP_VPU981_MIN_SIZE,
- .max_width = FMT_UHD_WIDTH,
+ .max_width = FMT_4K_WIDTH,
.step_width = MB_DIM,
.min_height = ROCKCHIP_VPU981_MIN_SIZE,
- .max_height = FMT_UHD_HEIGHT,
+ .max_height = FMT_4K_HEIGHT,
.step_height = MB_DIM,
},
},
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 2ddf1dfa0522..5110754e1a31 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -284,7 +284,7 @@ static bool cadet_has_rds_data(struct cadet *dev)
static void cadet_handler(struct timer_list *t)
{
- struct cadet *dev = from_timer(dev, t, readtimer);
+ struct cadet *dev = timer_container_of(dev, t, readtimer);
/* Service the RDS fifo */
if (mutex_trylock(&dev->lock)) {
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index 9435cba3f4d9..d6c54a3bccc2 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -659,7 +659,7 @@ exit:
/* timer to simulate tx done interrupt */
static void ene_tx_irqsim(struct timer_list *t)
{
- struct ene_device *dev = from_timer(dev, t, tx_sim_timer);
+ struct ene_device *dev = timer_container_of(dev, t, tx_sim_timer);
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
index bfe86588c69b..e034c93d57cf 100644
--- a/drivers/media/rc/igorplugusb.c
+++ b/drivers/media/rc/igorplugusb.c
@@ -131,7 +131,7 @@ static void igorplugusb_cmd(struct igorplugusb *ir, int cmd)
static void igorplugusb_timer(struct timer_list *t)
{
- struct igorplugusb *ir = from_timer(ir, t, timer);
+ struct igorplugusb *ir = timer_container_of(ir, t, timer);
igorplugusb_cmd(ir, GET_INFRACODE);
}
diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
index da89ddf771c3..63f6f5b36838 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.c
+++ b/drivers/media/rc/img-ir/img-ir-hw.c
@@ -865,7 +865,7 @@ static void img_ir_handle_data(struct img_ir_priv *priv, u32 len, u64 raw)
/* timer function to end waiting for repeat. */
static void img_ir_end_timer(struct timer_list *t)
{
- struct img_ir_priv *priv = from_timer(priv, t, hw.end_timer);
+ struct img_ir_priv *priv = timer_container_of(priv, t, hw.end_timer);
spin_lock_irq(&priv->lock);
img_ir_end_repeat(priv);
@@ -879,7 +879,8 @@ static void img_ir_end_timer(struct timer_list *t)
*/
static void img_ir_suspend_timer(struct timer_list *t)
{
- struct img_ir_priv *priv = from_timer(priv, t, hw.suspend_timer);
+ struct img_ir_priv *priv = timer_container_of(priv, t,
+ hw.suspend_timer);
spin_lock_irq(&priv->lock);
/*
diff --git a/drivers/media/rc/img-ir/img-ir-raw.c b/drivers/media/rc/img-ir/img-ir-raw.c
index 669f3309e237..92fb7b555a0f 100644
--- a/drivers/media/rc/img-ir/img-ir-raw.c
+++ b/drivers/media/rc/img-ir/img-ir-raw.c
@@ -65,7 +65,7 @@ void img_ir_isr_raw(struct img_ir_priv *priv, u32 irq_status)
*/
static void img_ir_echo_timer(struct timer_list *t)
{
- struct img_ir_priv *priv = from_timer(priv, t, raw.timer);
+ struct img_ir_priv *priv = timer_container_of(priv, t, raw.timer);
spin_lock_irq(&priv->lock);
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index cb6f36ebe5c8..f5221b018808 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1091,7 +1091,7 @@ static void usb_tx_callback(struct urb *urb)
*/
static void imon_touch_display_timeout(struct timer_list *t)
{
- struct imon_context *ictx = from_timer(ictx, t, ttimer);
+ struct imon_context *ictx = timer_container_of(ictx, t, ttimer);
if (ictx->display_type != IMON_DISPLAY_TYPE_VGA)
return;
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
index 817030fb50c9..bb2d7c37c263 100644
--- a/drivers/media/rc/ir-mce_kbd-decoder.c
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -109,7 +109,8 @@ static unsigned char kbd_keycodes[256] = {
static void mce_kbd_rx_timeout(struct timer_list *t)
{
- struct ir_raw_event_ctrl *raw = from_timer(raw, t, mce_kbd.rx_timeout);
+ struct ir_raw_event_ctrl *raw = timer_container_of(raw, t,
+ mce_kbd.rx_timeout);
unsigned char maskcode;
unsigned long flags;
int i;
diff --git a/drivers/media/rc/keymaps/rc-hauppauge.c b/drivers/media/rc/keymaps/rc-hauppauge.c
index d7156774aa0e..9e64c0b2d18e 100644
--- a/drivers/media/rc/keymaps/rc-hauppauge.c
+++ b/drivers/media/rc/keymaps/rc-hauppauge.c
@@ -261,6 +261,48 @@ static struct rc_map_table rc5_hauppauge_new[] = {
{ 0x001e, KEY_RED }, /* Reserved */
{ 0x0000, KEY_NUMERIC_0 },
{ 0x0026, KEY_SLEEP }, /* Minimize */
+
+ /*
+ * Keycodes for the black Credit Card Remote Control shipped with, for
+ * example, the WinTV-dualHD tuner.
+ * Keycodes start with address = 0x19
+ */
+ { 0x190a, KEY_LAST }, /* <- */
+ { 0x192f, KEY_MENU }, /* List */
+ { 0x1910, KEY_CHANNELUP },
+ { 0x192e, KEY_CHANNELDOWN },
+ { 0x192c, KEY_OK },
+
+ { 0x1911, KEY_TV },
+ { 0x190c, KEY_POWER },
+
+ { 0x1900, KEY_NUMERIC_0 },
+ { 0x1938, KEY_NUMERIC_1 },
+ { 0x1920, KEY_NUMERIC_2 },
+ { 0x1901, KEY_NUMERIC_3 },
+ { 0x1902, KEY_NUMERIC_4 },
+ { 0x1904, KEY_NUMERIC_5 },
+ { 0x1905, KEY_NUMERIC_6 },
+ { 0x1907, KEY_NUMERIC_7 },
+ { 0x1908, KEY_NUMERIC_8 },
+ { 0x190f, KEY_NUMERIC_9 },
+
+ { 0x1921, KEY_VOLUMEUP },
+ { 0x1903, KEY_VOLUMEDOWN },
+ { 0x1906, KEY_MUTE },
+
+ { 0x1909, KEY_CAMERA }, /* Snap */
+ { 0x1922, KEY_SUBTITLE }, /* CC */
+ { 0x192b, KEY_INFO },
+
+ { 0x1929, KEY_END }, /* Skip to live TV */
+ { 0x190d, KEY_PLAYPAUSE },
+ { 0x1926, KEY_STOP },
+ { 0x192a, KEY_RECORD },
+ { 0x193a, KEY_PREVIOUS }, /* |< */
+ { 0x193b, KEY_REWIND }, /* << */
+ { 0x193c, KEY_FASTFORWARD }, /* >> */
+ { 0x193d, KEY_NEXT }, /* >| */
};
static struct rc_map_list rc5_hauppauge_new_map = {
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index aa4ac43c66fa..5dafe11f61c6 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -552,7 +552,8 @@ EXPORT_SYMBOL(ir_raw_encode_scancode);
*/
static void ir_raw_edge_handle(struct timer_list *t)
{
- struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
+ struct ir_raw_event_ctrl *raw = timer_container_of(raw, t,
+ edge_handle);
struct rc_dev *dev = raw->dev;
unsigned long flags;
ktime_t interval;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index e46358fb8ac0..b9bf5cdcde4a 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -674,7 +674,7 @@ EXPORT_SYMBOL_GPL(rc_keyup);
*/
static void ir_timer_keyup(struct timer_list *t)
{
- struct rc_dev *dev = from_timer(dev, t, timer_keyup);
+ struct rc_dev *dev = timer_container_of(dev, t, timer_keyup);
unsigned long flags;
/*
@@ -703,7 +703,7 @@ static void ir_timer_keyup(struct timer_list *t)
*/
static void ir_timer_repeat(struct timer_list *t)
{
- struct rc_dev *dev = from_timer(dev, t, timer_repeat);
+ struct rc_dev *dev = timer_container_of(dev, t, timer_repeat);
struct input_dev *input = dev->input_dev;
unsigned long flags;
diff --git a/drivers/media/test-drivers/vidtv/vidtv_channel.c b/drivers/media/test-drivers/vidtv/vidtv_channel.c
index 7838e6272712..f3023e91b3eb 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_channel.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_channel.c
@@ -497,7 +497,7 @@ free_sdt:
vidtv_psi_sdt_table_destroy(m->si.sdt);
free_pat:
vidtv_psi_pat_table_destroy(m->si.pat);
- return 0;
+ return -EINVAL;
}
void vidtv_channel_si_destroy(struct vidtv_mux *m)
diff --git a/drivers/media/test-drivers/vim2m.c b/drivers/media/test-drivers/vim2m.c
index 0fe97e208c02..1d1a9e768505 100644
--- a/drivers/media/test-drivers/vim2m.c
+++ b/drivers/media/test-drivers/vim2m.c
@@ -26,6 +26,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#include <media/videobuf2-vmalloc.h>
+#include <media/v4l2-common.h>
MODULE_DESCRIPTION("Virtual device for mem2mem framework testing");
MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
@@ -42,6 +43,10 @@ static unsigned int default_transtime = 40; /* Max 25 fps */
module_param(default_transtime, uint, 0644);
MODULE_PARM_DESC(default_transtime, "default transaction time in ms");
+static unsigned int multiplanar = 1;
+module_param(multiplanar, uint, 0644);
+MODULE_PARM_DESC(multiplanar, "1 (default) creates a single planar device, 2 creates multiplanar device.");
+
#define MIN_W 32
#define MIN_H 32
#define MAX_W 640
@@ -134,7 +139,8 @@ static struct vim2m_fmt formats[] = {
struct vim2m_q_data {
unsigned int width;
unsigned int height;
- unsigned int sizeimage;
+ unsigned int num_mem_planes;
+ unsigned int sizeimage[VIDEO_MAX_PLANES];
unsigned int sequence;
struct vim2m_fmt *fmt;
};
@@ -193,6 +199,7 @@ struct vim2m_dev {
struct mutex dev_mutex;
struct v4l2_m2m_dev *m2m_dev;
+ bool multiplanar;
};
struct vim2m_ctx {
@@ -237,8 +244,10 @@ static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx,
{
switch (type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
return &ctx->q_data[V4L2_M2M_SRC];
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
return &ctx->q_data[V4L2_M2M_DST];
default:
return NULL;
@@ -249,8 +258,10 @@ static const char *type_name(enum v4l2_buf_type type)
{
switch (type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
return "Output";
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
return "Capture";
default:
return "Invalid";
@@ -720,6 +731,7 @@ static int vidioc_g_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f)
{
struct vb2_queue *vq;
struct vim2m_q_data *q_data;
+ int ret;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
@@ -729,12 +741,12 @@ static int vidioc_g_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f)
if (!q_data)
return -EINVAL;
- f->fmt.pix.width = q_data->width;
- f->fmt.pix.height = q_data->height;
+ ret = v4l2_fill_pixfmt(&f->fmt.pix, q_data->fmt->fourcc,
+ q_data->width, q_data->height);
+ if (ret)
+ return ret;
+
f->fmt.pix.field = V4L2_FIELD_NONE;
- f->fmt.pix.pixelformat = q_data->fmt->fourcc;
- f->fmt.pix.bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
- f->fmt.pix.sizeimage = q_data->sizeimage;
f->fmt.pix.colorspace = ctx->colorspace;
f->fmt.pix.xfer_func = ctx->xfer_func;
f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
@@ -743,43 +755,102 @@ static int vidioc_g_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f)
return 0;
}
+static int vidioc_g_fmt_mplane(struct vim2m_ctx *ctx, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct vim2m_q_data *q_data;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ ret = v4l2_fill_pixfmt_mp(&f->fmt.pix_mp, q_data->fmt->fourcc,
+ q_data->width, q_data->height);
+ if (ret)
+ return ret;
+
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f->fmt.pix_mp.colorspace = ctx->colorspace;
+ f->fmt.pix_mp.xfer_func = ctx->xfer_func;
+ f->fmt.pix_mp.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix_mp.quantization = ctx->quant;
+
+ return 0;
+}
+
static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
+ struct vim2m_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+
return vidioc_g_fmt(file2ctx(file), f);
}
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
+ struct vim2m_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+
return vidioc_g_fmt(file2ctx(file), f);
}
-static int vidioc_try_fmt(struct v4l2_format *f, struct vim2m_fmt *fmt)
+static int vidioc_g_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
{
- int walign, halign;
- /*
- * V4L2 specification specifies the driver corrects the
- * format struct if any of the dimensions is unsupported
- */
- if (f->fmt.pix.height < MIN_H)
- f->fmt.pix.height = MIN_H;
- else if (f->fmt.pix.height > MAX_H)
- f->fmt.pix.height = MAX_H;
-
- if (f->fmt.pix.width < MIN_W)
- f->fmt.pix.width = MIN_W;
- else if (f->fmt.pix.width > MAX_W)
- f->fmt.pix.width = MAX_W;
-
- get_alignment(f->fmt.pix.pixelformat, &walign, &halign);
- f->fmt.pix.width &= ~(walign - 1);
- f->fmt.pix.height &= ~(halign - 1);
- f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
- f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ struct vim2m_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+
+ return vidioc_g_fmt_mplane(file2ctx(file), f);
+}
+
+static int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vim2m_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+
+ return vidioc_g_fmt_mplane(file2ctx(file), f);
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, bool is_mplane)
+{
+ int walign, halign, ret;
+ int width = (is_mplane) ? f->fmt.pix_mp.width : f->fmt.pix.width;
+ int height = (is_mplane) ? f->fmt.pix_mp.height : f->fmt.pix.height;
+ u32 pixfmt = (is_mplane) ? f->fmt.pix_mp.pixelformat :
+ f->fmt.pix.pixelformat;
+
+ width = clamp(width, MIN_W, MAX_W);
+ height = clamp(height, MIN_H, MAX_H);
+
+ get_alignment(pixfmt, &walign, &halign);
+ width = ALIGN(width, walign);
+ height = ALIGN(height, halign);
+
f->fmt.pix.field = V4L2_FIELD_NONE;
- return 0;
+ if (is_mplane) {
+ ret = v4l2_fill_pixfmt_mp(&f->fmt.pix_mp, pixfmt, width,
+ height);
+ } else {
+ ret = v4l2_fill_pixfmt(&f->fmt.pix, pixfmt, width, height);
+ }
+ return ret;
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
@@ -787,6 +858,10 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
{
struct vim2m_fmt *fmt;
struct vim2m_ctx *ctx = file2ctx(file);
+ struct vim2m_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
fmt = find_format(f->fmt.pix.pixelformat);
if (!fmt) {
@@ -804,7 +879,36 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
f->fmt.pix.quantization = ctx->quant;
- return vidioc_try_fmt(f, fmt);
+ return vidioc_try_fmt(f, false);
+}
+
+static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vim2m_fmt *fmt;
+ struct vim2m_ctx *ctx = file2ctx(file);
+ struct vim2m_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+
+ fmt = find_format(f->fmt.pix_mp.pixelformat);
+ if (!fmt) {
+ f->fmt.pix_mp.pixelformat = formats[0].fourcc;
+ fmt = find_format(f->fmt.pix_mp.pixelformat);
+ }
+ if (!(fmt->types & MEM2MEM_CAPTURE)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+ f->fmt.pix_mp.colorspace = ctx->colorspace;
+ f->fmt.pix_mp.xfer_func = ctx->xfer_func;
+ f->fmt.pix_mp.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix_mp.quantization = ctx->quant;
+
+ return vidioc_try_fmt(f, true);
}
static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
@@ -812,6 +916,10 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
{
struct vim2m_fmt *fmt;
struct vim2m_ctx *ctx = file2ctx(file);
+ struct vim2m_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
fmt = find_format(f->fmt.pix.pixelformat);
if (!fmt) {
@@ -827,13 +935,45 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
if (!f->fmt.pix.colorspace)
f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
- return vidioc_try_fmt(f, fmt);
+ return vidioc_try_fmt(f, false);
+}
+
+static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vim2m_fmt *fmt;
+ struct vim2m_ctx *ctx = file2ctx(file);
+ struct vim2m_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+
+ fmt = find_format(f->fmt.pix_mp.pixelformat);
+ if (!fmt) {
+ f->fmt.pix_mp.pixelformat = formats[0].fourcc;
+ fmt = find_format(f->fmt.pix_mp.pixelformat);
+ }
+ if (!(fmt->types & MEM2MEM_OUTPUT)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix_mp.pixelformat);
+ return -EINVAL;
+ }
+ if (!f->fmt.pix_mp.colorspace)
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709;
+
+ return vidioc_try_fmt(f, true);
}
static int vidioc_s_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f)
{
struct vim2m_q_data *q_data;
struct vb2_queue *vq;
+ unsigned int i;
+ bool is_mplane = ctx->dev->multiplanar;
+ u32 pixfmt = (is_mplane) ? f->fmt.pix_mp.pixelformat : f->fmt.pix.pixelformat;
+ u32 width = (is_mplane) ? f->fmt.pix_mp.width : f->fmt.pix.width;
+ u32 height = (is_mplane) ? f->fmt.pix_mp.height : f->fmt.pix.height;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
@@ -848,11 +988,17 @@ static int vidioc_s_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f)
return -EBUSY;
}
- q_data->fmt = find_format(f->fmt.pix.pixelformat);
- q_data->width = f->fmt.pix.width;
- q_data->height = f->fmt.pix.height;
- q_data->sizeimage = q_data->width * q_data->height
- * q_data->fmt->depth >> 3;
+ q_data->fmt = find_format(pixfmt);
+ q_data->width = width;
+ q_data->height = height;
+ if (is_mplane) {
+ q_data->num_mem_planes = f->fmt.pix_mp.num_planes;
+ for (i = 0; i < f->fmt.pix_mp.num_planes; i++)
+ q_data->sizeimage[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ } else {
+ q_data->sizeimage[0] = f->fmt.pix.sizeimage;
+ q_data->num_mem_planes = 1;
+ }
dprintk(ctx->dev, 1,
"Format for type %s: %dx%d (%d bpp), fmt: %c%c%c%c\n",
@@ -870,6 +1016,10 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
int ret;
+ struct vim2m_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
ret = vidioc_try_fmt_vid_cap(file, priv, f);
if (ret)
@@ -878,12 +1028,32 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
return vidioc_s_fmt(file2ctx(file), f);
}
+static int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+ struct vim2m_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+
+ ret = vidioc_try_fmt_vid_cap_mplane(file, priv, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(file2ctx(file), f);
+}
+
static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct vim2m_ctx *ctx = file2ctx(file);
+ struct vim2m_dev *dev = video_drvdata(file);
int ret;
+ if (dev->multiplanar)
+ return -ENOTTY;
+
ret = vidioc_try_fmt_vid_out(file, priv, f);
if (ret)
return ret;
@@ -898,6 +1068,30 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
return ret;
}
+static int vidioc_s_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vim2m_ctx *ctx = file2ctx(file);
+ struct vim2m_dev *dev = video_drvdata(file);
+ int ret;
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+
+ ret = vidioc_try_fmt_vid_out_mplane(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = vidioc_s_fmt(file2ctx(file), f);
+ if (!ret) {
+ ctx->colorspace = f->fmt.pix_mp.colorspace;
+ ctx->xfer_func = f->fmt.pix_mp.xfer_func;
+ ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ ctx->quant = f->fmt.pix_mp.quantization;
+ }
+ return ret;
+}
+
static int vim2m_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct vim2m_ctx *ctx =
@@ -948,11 +1142,17 @@ static const struct v4l2_ioctl_ops vim2m_ioctl_ops = {
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap_mplane,
.vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
.vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
.vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
.vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_vid_out_mplane,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out_mplane,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_vid_out_mplane,
.vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
@@ -981,23 +1181,32 @@ static int vim2m_queue_setup(struct vb2_queue *vq,
{
struct vim2m_ctx *ctx = vb2_get_drv_priv(vq);
struct vim2m_q_data *q_data;
- unsigned int size, count = *nbuffers;
+ unsigned int size, p, count = *nbuffers;
q_data = get_q_data(ctx, vq->type);
if (!q_data)
return -EINVAL;
- size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
+ size = 0;
+ for (p = 0; p < q_data->num_mem_planes; p++)
+ size += q_data->sizeimage[p];
while (size * count > MEM2MEM_VID_MEM_LIMIT)
(count)--;
*nbuffers = count;
- if (*nplanes)
- return sizes[0] < size ? -EINVAL : 0;
-
- *nplanes = 1;
- sizes[0] = size;
+ if (*nplanes) {
+ if (*nplanes != q_data->num_mem_planes)
+ return -EINVAL;
+ for (p = 0; p < q_data->num_mem_planes; p++) {
+ if (sizes[p] < q_data->sizeimage[p])
+ return -EINVAL;
+ }
+ } else {
+ *nplanes = q_data->num_mem_planes;
+ for (p = 0; p < q_data->num_mem_planes; p++)
+ sizes[p] = q_data->sizeimage[p];
+ }
dprintk(ctx->dev, 1, "%s: get %d buffer(s) of size %d each.\n",
type_name(vq->type), count, size);
@@ -1024,21 +1233,24 @@ static int vim2m_buf_prepare(struct vb2_buffer *vb)
{
struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vim2m_q_data *q_data;
+ unsigned int p;
dprintk(ctx->dev, 2, "type: %s\n", type_name(vb->vb2_queue->type));
q_data = get_q_data(ctx, vb->vb2_queue->type);
if (!q_data)
return -EINVAL;
- if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
- dprintk(ctx->dev, 1,
- "%s data will not fit into plane (%lu < %lu)\n",
- __func__, vb2_plane_size(vb, 0),
- (long)q_data->sizeimage);
- return -EINVAL;
- }
- vb2_set_plane_payload(vb, 0, q_data->sizeimage);
+ for (p = 0; p < q_data->num_mem_planes; p++) {
+ if (vb2_plane_size(vb, p) < q_data->sizeimage[p]) {
+ dprintk(ctx->dev, 1,
+ "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, p),
+ (long)q_data->sizeimage[p]);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, p, q_data->sizeimage[p]);
+ }
return 0;
}
@@ -1109,7 +1321,8 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
struct vim2m_ctx *ctx = priv;
int ret;
- src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->type = (ctx->dev->multiplanar) ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_OUTPUT;
src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
@@ -1123,7 +1336,8 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
if (ret)
return ret;
- dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->type = (ctx->dev->multiplanar) ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
@@ -1197,10 +1411,11 @@ static int vim2m_open(struct file *file)
ctx->q_data[V4L2_M2M_SRC].fmt = &formats[0];
ctx->q_data[V4L2_M2M_SRC].width = 640;
ctx->q_data[V4L2_M2M_SRC].height = 480;
- ctx->q_data[V4L2_M2M_SRC].sizeimage =
+ ctx->q_data[V4L2_M2M_SRC].sizeimage[0] =
ctx->q_data[V4L2_M2M_SRC].width *
ctx->q_data[V4L2_M2M_SRC].height *
(ctx->q_data[V4L2_M2M_SRC].fmt->depth >> 3);
+ ctx->q_data[V4L2_M2M_SRC].num_mem_planes = 1;
ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
ctx->colorspace = V4L2_COLORSPACE_REC709;
@@ -1277,7 +1492,7 @@ static const struct video_device vim2m_videodev = {
.ioctl_ops = &vim2m_ioctl_ops,
.minor = -1,
.release = vim2m_device_release,
- .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+ .device_caps = V4L2_CAP_STREAMING,
};
static const struct v4l2_m2m_ops m2m_ops = {
@@ -1308,10 +1523,14 @@ static int vim2m_probe(struct platform_device *pdev)
atomic_set(&dev->num_inst, 0);
mutex_init(&dev->dev_mutex);
+ dev->multiplanar = (multiplanar == 2);
+
dev->vfd = vim2m_videodev;
vfd = &dev->vfd;
vfd->lock = &dev->dev_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->device_caps |= (dev->multiplanar) ? V4L2_CAP_VIDEO_M2M_MPLANE :
+ V4L2_CAP_VIDEO_M2M;
video_set_drvdata(vfd, dev);
platform_set_drvdata(pdev, dev);
diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
index 273e8ed8c2a9..d845e1644649 100644
--- a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
@@ -165,13 +165,13 @@ static void vivid_precalc_copy_rects(struct vivid_dev *dev, struct vivid_dev *ou
v4l2_rect_scale(&dev->loop_vid_cap, &dev->crop_cap, &dev->compose_cap);
dprintk(dev, 1,
- "loop_vid_copy: %dx%d@%dx%d loop_vid_out: %dx%d@%dx%d loop_vid_cap: %dx%d@%dx%d\n",
- dev->loop_vid_copy.width, dev->loop_vid_copy.height,
+ "loop_vid_copy: (%d,%d)/%ux%u loop_vid_out: (%d,%d)/%ux%u loop_vid_cap: (%d,%d)/%ux%u\n",
dev->loop_vid_copy.left, dev->loop_vid_copy.top,
- dev->loop_vid_out.width, dev->loop_vid_out.height,
+ dev->loop_vid_copy.width, dev->loop_vid_copy.height,
dev->loop_vid_out.left, dev->loop_vid_out.top,
- dev->loop_vid_cap.width, dev->loop_vid_cap.height,
- dev->loop_vid_cap.left, dev->loop_vid_cap.top);
+ dev->loop_vid_out.width, dev->loop_vid_out.height,
+ dev->loop_vid_cap.left, dev->loop_vid_cap.top,
+ dev->loop_vid_cap.width, dev->loop_vid_cap.height);
v4l2_rect_intersect(&r_overlay, &r_fb, &r_overlay);
@@ -190,13 +190,13 @@ static void vivid_precalc_copy_rects(struct vivid_dev *dev, struct vivid_dev *ou
v4l2_rect_scale(&dev->loop_vid_overlay_cap, &dev->crop_cap, &dev->compose_cap);
dprintk(dev, 1,
- "loop_fb_copy: %dx%d@%dx%d loop_vid_overlay: %dx%d@%dx%d loop_vid_overlay_cap: %dx%d@%dx%d\n",
- dev->loop_fb_copy.width, dev->loop_fb_copy.height,
+ "loop_fb_copy: (%d,%d)/%ux%u loop_vid_overlay: (%d,%d)/%ux%u loop_vid_overlay_cap: (%d,%d)/%ux%u\n",
dev->loop_fb_copy.left, dev->loop_fb_copy.top,
- dev->loop_vid_overlay.width, dev->loop_vid_overlay.height,
+ dev->loop_fb_copy.width, dev->loop_fb_copy.height,
dev->loop_vid_overlay.left, dev->loop_vid_overlay.top,
- dev->loop_vid_overlay_cap.width, dev->loop_vid_overlay_cap.height,
- dev->loop_vid_overlay_cap.left, dev->loop_vid_overlay_cap.top);
+ dev->loop_vid_overlay.width, dev->loop_vid_overlay.height,
+ dev->loop_vid_overlay_cap.left, dev->loop_vid_overlay_cap.top,
+ dev->loop_vid_overlay_cap.width, dev->loop_vid_overlay_cap.height);
}
static void *plane_vaddr(struct tpg_data *tpg, struct vivid_buffer *buf,
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
index b166d90177c6..84e9155b5815 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
@@ -25,16 +25,18 @@
/* Sizes must be in increasing order */
static const struct v4l2_frmsize_discrete webcam_sizes[] = {
{ 320, 180 },
+ { 320, 240 },
{ 640, 360 },
{ 640, 480 },
{ 1280, 720 },
+ { 1280, 960 },
+ { 1600, 1200 },
{ 1920, 1080 },
{ 3840, 2160 },
};
/*
- * Intervals must be in increasing order and there must be twice as many
- * elements in this array as there are in webcam_sizes.
+ * Intervals must be in increasing order.
*/
static const struct v4l2_fract webcam_intervals[] = {
{ 1, 1 },
@@ -946,8 +948,8 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
if (dev->has_compose_cap) {
v4l2_rect_set_min_size(compose, &min_rect);
v4l2_rect_set_max_size(compose, &max_rect);
- v4l2_rect_map_inside(compose, &fmt);
}
+ v4l2_rect_map_inside(compose, &fmt);
dev->fmt_cap_rect = fmt;
tpg_s_buf_height(&dev->tpg, fmt.height);
} else if (dev->has_compose_cap) {
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index 3666f4452d11..5d0447ff7d06 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -97,7 +97,7 @@ static void au0828_restart_dvb_streaming(struct work_struct *work);
static void au0828_bulk_timeout(struct timer_list *t)
{
- struct au0828_dev *dev = from_timer(dev, t, bulk_timeout);
+ struct au0828_dev *dev = timer_container_of(dev, t, bulk_timeout);
dprintk(1, "%s called\n", __func__);
dev->bulk_timeout_running = 0;
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 33d1fad0f7b8..e5dff969ed57 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -948,7 +948,7 @@ int au0828_analog_unregister(struct au0828_dev *dev)
such as tvtime from hanging) */
static void au0828_vid_buffer_timeout(struct timer_list *t)
{
- struct au0828_dev *dev = from_timer(dev, t, vid_timeout);
+ struct au0828_dev *dev = timer_container_of(dev, t, vid_timeout);
struct au0828_dmaqueue *dma_q = &dev->vidq;
struct au0828_buffer *buf;
unsigned char *vid_data;
@@ -972,7 +972,7 @@ static void au0828_vid_buffer_timeout(struct timer_list *t)
static void au0828_vbi_buffer_timeout(struct timer_list *t)
{
- struct au0828_dev *dev = from_timer(dev, t, vbi_timeout);
+ struct au0828_dev *dev = timer_container_of(dev, t, vbi_timeout);
struct au0828_dmaqueue *dma_q = &dev->vbiq;
struct au0828_buffer *buf;
unsigned char *vbi_data;
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index f44529b40989..d0501c1e81d6 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -119,9 +119,8 @@ static void cxusb_gpio_tuner(struct dvb_usb_device *d, int onoff)
o[0] = GPIO_TUNER;
o[1] = onoff;
- cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1);
- if (i != 0x01)
+ if (!cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1) && i != 0x01)
dev_info(&d->udev->dev, "gpio_write failed.\n");
st->gpio_write_state[GPIO_TUNER] = onoff;
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 66c09bc6d59e..2dfa3242a7ab 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -264,7 +264,7 @@ static void em28xx_capture_area_set(struct em28xx *dev, u8 hstart, u8 vstart,
u8 overflow = (height >> 9 & 0x02) | (width >> 10 & 0x01);
/* NOTE: size limit: 2047x1023 = 2MPix */
- em28xx_videodbg("capture area set to (%d,%d): %dx%d\n",
+ em28xx_videodbg("capture area set to (%u,%u)/%ux%u\n",
hstart, vstart,
((overflow & 2) << 9 | cwidth << 2),
((overflow & 1) << 10 | cheight << 2));
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c
index 5a47dcbf1c8e..303b055fefea 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_hdcs.c
@@ -520,12 +520,13 @@ static int hdcs_init(struct sd *sd)
static int hdcs_dump(struct sd *sd)
{
u16 reg, val;
+ int err = 0;
pr_info("Dumping sensor registers:\n");
- for (reg = HDCS_IDENT; reg <= HDCS_ROWEXPH; reg++) {
- stv06xx_read_sensor(sd, reg, &val);
+ for (reg = HDCS_IDENT; reg <= HDCS_ROWEXPH && !err; reg++) {
+ err = stv06xx_read_sensor(sd, reg, &val);
pr_info("reg 0x%02x = 0x%02x\n", reg, val);
}
- return 0;
+ return (err < 0) ? err : 0;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 9a583eeaa329..f21c2806eb9f 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -3562,7 +3562,7 @@ struct hdw_timer {
static void pvr2_ctl_timeout(struct timer_list *t)
{
- struct hdw_timer *timer = from_timer(timer, t, timer);
+ struct hdw_timer *timer = timer_container_of(timer, t, timer);
struct pvr2_hdw *hdw = timer->hdw;
if (hdw->ctl_write_pend_flag || hdw->ctl_read_pend_flag) {
@@ -3806,7 +3806,7 @@ status);
if ((status < 0) && (!probe_fl)) {
pvr2_hdw_render_useless(hdw);
}
- destroy_timer_on_stack(&timer.timer);
+ timer_destroy_on_stack(&timer.timer);
return status;
}
@@ -4421,7 +4421,7 @@ static int state_eval_encoder_run(struct pvr2_hdw *hdw)
/* Timeout function for quiescent timer. */
static void pvr2_hdw_quiescent_timeout(struct timer_list *t)
{
- struct pvr2_hdw *hdw = from_timer(hdw, t, quiescent_timer);
+ struct pvr2_hdw *hdw = timer_container_of(hdw, t, quiescent_timer);
hdw->state_decoder_quiescent = !0;
trace_stbit("state_decoder_quiescent",hdw->state_decoder_quiescent);
hdw->state_stale = !0;
@@ -4432,7 +4432,8 @@ static void pvr2_hdw_quiescent_timeout(struct timer_list *t)
/* Timeout function for decoder stabilization timer. */
static void pvr2_hdw_decoder_stabilization_timeout(struct timer_list *t)
{
- struct pvr2_hdw *hdw = from_timer(hdw, t, decoder_stabilization_timer);
+ struct pvr2_hdw *hdw = timer_container_of(hdw, t,
+ decoder_stabilization_timer);
hdw->state_decoder_ready = !0;
trace_stbit("state_decoder_ready", hdw->state_decoder_ready);
hdw->state_stale = !0;
@@ -4443,7 +4444,7 @@ static void pvr2_hdw_decoder_stabilization_timeout(struct timer_list *t)
/* Timeout function for encoder wait timer. */
static void pvr2_hdw_encoder_wait_timeout(struct timer_list *t)
{
- struct pvr2_hdw *hdw = from_timer(hdw, t, encoder_wait_timer);
+ struct pvr2_hdw *hdw = timer_container_of(hdw, t, encoder_wait_timer);
hdw->state_encoder_waitok = !0;
trace_stbit("state_encoder_waitok",hdw->state_encoder_waitok);
hdw->state_stale = !0;
@@ -4454,7 +4455,7 @@ static void pvr2_hdw_encoder_wait_timeout(struct timer_list *t)
/* Timeout function for encoder run timer. */
static void pvr2_hdw_encoder_run_timeout(struct timer_list *t)
{
- struct pvr2_hdw *hdw = from_timer(hdw, t, encoder_run_timer);
+ struct pvr2_hdw *hdw = timer_container_of(hdw, t, encoder_run_timer);
if (!hdw->state_encoder_runok) {
hdw->state_encoder_runok = !0;
trace_stbit("state_encoder_runok",hdw->state_encoder_runok);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.c b/drivers/media/usb/pvrusb2/pvrusb2-std.c
index e7ab41401577..81c994e62241 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-std.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-std.c
@@ -212,173 +212,6 @@ unsigned int pvr2_std_id_to_str(char *bufPtr, unsigned int bufSize,
}
-// Template data for possible enumerated video standards. Here we group
-// standards which share common frame rates and resolution.
-static struct v4l2_standard generic_standards[] = {
- {
- .id = (TSTD_B|TSTD_B1|
- TSTD_D|TSTD_D1|
- TSTD_G|
- TSTD_H|
- TSTD_I|
- TSTD_K|TSTD_K1|
- TSTD_L|
- V4L2_STD_SECAM_LC |
- TSTD_N|TSTD_Nc),
- .frameperiod =
- {
- .numerator = 1,
- .denominator= 25
- },
- .framelines = 625,
- .reserved = {0,0,0,0}
- }, {
- .id = (TSTD_M|
- V4L2_STD_NTSC_M_JP|
- V4L2_STD_NTSC_M_KR),
- .frameperiod =
- {
- .numerator = 1001,
- .denominator= 30000
- },
- .framelines = 525,
- .reserved = {0,0,0,0}
- }, { // This is a total wild guess
- .id = (TSTD_60),
- .frameperiod =
- {
- .numerator = 1001,
- .denominator= 30000
- },
- .framelines = 525,
- .reserved = {0,0,0,0}
- }, { // This is total wild guess
- .id = V4L2_STD_NTSC_443,
- .frameperiod =
- {
- .numerator = 1001,
- .denominator= 30000
- },
- .framelines = 525,
- .reserved = {0,0,0,0}
- }
-};
-
-static struct v4l2_standard *match_std(v4l2_std_id id)
-{
- unsigned int idx;
- for (idx = 0; idx < ARRAY_SIZE(generic_standards); idx++) {
- if (generic_standards[idx].id & id) {
- return generic_standards + idx;
- }
- }
- return NULL;
-}
-
-static int pvr2_std_fill(struct v4l2_standard *std,v4l2_std_id id)
-{
- struct v4l2_standard *template;
- int idx;
- unsigned int bcnt;
- template = match_std(id);
- if (!template) return 0;
- idx = std->index;
- memcpy(std,template,sizeof(*template));
- std->index = idx;
- std->id = id;
- bcnt = pvr2_std_id_to_str(std->name,sizeof(std->name)-1,id);
- std->name[bcnt] = 0;
- pvr2_trace(PVR2_TRACE_STD,"Set up standard idx=%u name=%s",
- std->index,std->name);
- return !0;
-}
-
-/* These are special cases of combined standards that we should enumerate
- separately if the component pieces are present. */
-static v4l2_std_id std_mixes[] = {
- V4L2_STD_PAL_B | V4L2_STD_PAL_G,
- V4L2_STD_PAL_D | V4L2_STD_PAL_K,
- V4L2_STD_SECAM_B | V4L2_STD_SECAM_G,
- V4L2_STD_SECAM_D | V4L2_STD_SECAM_K,
-};
-
-struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
- v4l2_std_id id)
-{
- unsigned int std_cnt = 0;
- unsigned int idx,bcnt,idx2;
- v4l2_std_id idmsk,cmsk,fmsk;
- struct v4l2_standard *stddefs;
-
- if (pvrusb2_debug & PVR2_TRACE_STD) {
- char buf[100];
- bcnt = pvr2_std_id_to_str(buf,sizeof(buf),id);
- pvr2_trace(
- PVR2_TRACE_STD,"Mapping standards mask=0x%x (%.*s)",
- (int)id,bcnt,buf);
- }
-
- *countptr = 0;
- std_cnt = 0;
- fmsk = 0;
- for (idmsk = 1, cmsk = id; cmsk; idmsk <<= 1) {
- if (!(idmsk & cmsk)) continue;
- cmsk &= ~idmsk;
- if (match_std(idmsk)) {
- std_cnt++;
- continue;
- }
- fmsk |= idmsk;
- }
-
- for (idx2 = 0; idx2 < ARRAY_SIZE(std_mixes); idx2++) {
- if ((id & std_mixes[idx2]) == std_mixes[idx2]) std_cnt++;
- }
-
- /* Don't complain about ATSC standard values */
- fmsk &= ~CSTD_ATSC;
-
- if (fmsk) {
- char buf[100];
- bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk);
- pvr2_trace(
- PVR2_TRACE_ERROR_LEGS,
- "***WARNING*** Failed to classify the following standard(s): %.*s",
- bcnt,buf);
- }
-
- pvr2_trace(PVR2_TRACE_STD,"Setting up %u unique standard(s)",
- std_cnt);
- if (!std_cnt) return NULL; // paranoia
-
- stddefs = kcalloc(std_cnt, sizeof(struct v4l2_standard),
- GFP_KERNEL);
- if (!stddefs)
- return NULL;
-
- for (idx = 0; idx < std_cnt; idx++)
- stddefs[idx].index = idx;
-
- idx = 0;
-
- /* Enumerate potential special cases */
- for (idx2 = 0; (idx2 < ARRAY_SIZE(std_mixes)) && (idx < std_cnt);
- idx2++) {
- if (!(id & std_mixes[idx2])) continue;
- if (pvr2_std_fill(stddefs+idx,std_mixes[idx2])) idx++;
- }
- /* Now enumerate individual pieces */
- for (idmsk = 1, cmsk = id; cmsk && (idx < std_cnt); idmsk <<= 1) {
- if (!(idmsk & cmsk)) continue;
- cmsk &= ~idmsk;
- if (!pvr2_std_fill(stddefs+idx,idmsk)) continue;
- idx++;
- }
-
- *countptr = std_cnt;
- return stddefs;
-}
-
v4l2_std_id pvr2_std_get_usable(void)
{
return CSTD_ALL;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.h b/drivers/media/usb/pvrusb2/pvrusb2-std.h
index d8b4c6dc72fe..74b05ecb9708 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-std.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-std.h
@@ -23,12 +23,6 @@ int pvr2_std_str_to_id(v4l2_std_id *idPtr,const char *bufPtr,
unsigned int pvr2_std_id_to_str(char *bufPtr, unsigned int bufSize,
v4l2_std_id id);
-// Create an array of suitable v4l2_standard structures given a bit mask of
-// video standards to support. The array is allocated from the heap, and
-// the number of elements is returned in the first argument.
-struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
- v4l2_std_id id);
-
// Return mask of which video standard bits are valid
v4l2_std_id pvr2_std_get_usable(void);
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 899a7a67e2ba..8332f2c5aed7 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -471,7 +471,7 @@ static void s2255_reset_dsppower(struct s2255_dev *dev)
*/
static void s2255_timer(struct timer_list *t)
{
- struct s2255_dev *dev = from_timer(dev, t, timer);
+ struct s2255_dev *dev = timer_container_of(dev, t, timer);
struct s2255_fw *data = dev->fw_data;
if (usb_submit_urb(data->fw_urb, GFP_ATOMIC) < 0) {
pr_err("s2255: can't submit urb\n");
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index cbf19aa1d823..44b6513c5264 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -1812,38 +1812,49 @@ static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain,
uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
}
-static void uvc_ctrl_set_handle(struct uvc_fh *handle, struct uvc_control *ctrl,
- struct uvc_fh *new_handle)
+static int uvc_ctrl_set_handle(struct uvc_fh *handle, struct uvc_control *ctrl,
+ struct uvc_fh *new_handle)
{
lockdep_assert_held(&handle->chain->ctrl_mutex);
if (new_handle) {
+ int ret;
+
if (ctrl->handle)
dev_warn_ratelimited(&handle->stream->dev->udev->dev,
"UVC non compliance: Setting an async control with a pending operation.");
if (new_handle == ctrl->handle)
- return;
+ return 0;
if (ctrl->handle) {
WARN_ON(!ctrl->handle->pending_async_ctrls);
if (ctrl->handle->pending_async_ctrls)
ctrl->handle->pending_async_ctrls--;
+ ctrl->handle = new_handle;
+ handle->pending_async_ctrls++;
+ return 0;
}
+ ret = uvc_pm_get(handle->chain->dev);
+ if (ret)
+ return ret;
+
ctrl->handle = new_handle;
handle->pending_async_ctrls++;
- return;
+ return 0;
}
/* Cannot clear the handle for a control not owned by us.*/
if (WARN_ON(ctrl->handle != handle))
- return;
+ return -EINVAL;
ctrl->handle = NULL;
if (WARN_ON(!handle->pending_async_ctrls))
- return;
+ return -EINVAL;
handle->pending_async_ctrls--;
+ uvc_pm_put(handle->chain->dev);
+ return 0;
}
void uvc_ctrl_status_event(struct uvc_video_chain *chain,
@@ -1943,7 +1954,9 @@ static bool uvc_ctrl_xctrls_has_control(const struct v4l2_ext_control *xctrls,
}
static void uvc_ctrl_send_events(struct uvc_fh *handle,
- const struct v4l2_ext_control *xctrls, unsigned int xctrls_count)
+ struct uvc_entity *entity,
+ const struct v4l2_ext_control *xctrls,
+ unsigned int xctrls_count)
{
struct uvc_control_mapping *mapping;
struct uvc_control *ctrl;
@@ -1955,6 +1968,9 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle,
s32 value;
ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
+ if (ctrl->entity != entity)
+ continue;
+
if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
/* Notification will be sent from an Interrupt event. */
continue;
@@ -2090,15 +2106,20 @@ int uvc_ctrl_begin(struct uvc_video_chain *chain)
return mutex_lock_interruptible(&chain->ctrl_mutex) ? -ERESTARTSYS : 0;
}
+/*
+ * Returns the number of uvc controls that have been correctly set, or a
+ * negative number if there has been an error.
+ */
static int uvc_ctrl_commit_entity(struct uvc_device *dev,
struct uvc_fh *handle,
struct uvc_entity *entity,
int rollback,
struct uvc_control **err_ctrl)
{
+ unsigned int processed_ctrls = 0;
struct uvc_control *ctrl;
unsigned int i;
- int ret;
+ int ret = 0;
if (entity == NULL)
return 0;
@@ -2127,8 +2148,9 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
ctrl->info.size);
- else
- ret = 0;
+
+ if (!ret)
+ processed_ctrls++;
if (rollback || ret < 0)
memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
@@ -2137,18 +2159,25 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
ctrl->dirty = 0;
- if (ret < 0) {
+ if (!rollback && handle && !ret &&
+ ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
+ ret = uvc_ctrl_set_handle(handle, ctrl, handle);
+
+ if (ret < 0 && !rollback) {
if (err_ctrl)
*err_ctrl = ctrl;
- return ret;
+ /*
+ * If we fail to set a control, we need to rollback
+ * the next ones.
+ */
+ rollback = 1;
}
-
- if (!rollback && handle &&
- ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
- uvc_ctrl_set_handle(handle, ctrl, handle);
}
- return 0;
+ if (ret)
+ return ret;
+
+ return processed_ctrls;
}
static int uvc_ctrl_find_ctrl_idx(struct uvc_entity *entity,
@@ -2178,7 +2207,8 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
struct uvc_video_chain *chain = handle->chain;
struct uvc_control *err_ctrl;
struct uvc_entity *entity;
- int ret = 0;
+ int ret_out = 0;
+ int ret;
/* Find the control. */
list_for_each_entry(entity, &chain->entities, chain) {
@@ -2189,15 +2219,23 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
ctrls->error_idx =
uvc_ctrl_find_ctrl_idx(entity, ctrls,
err_ctrl);
- goto done;
+ /*
+ * When we fail to commit an entity, we need to
+ * restore the UVC_CTRL_DATA_BACKUP for all the
+ * controls in the other entities, otherwise our cache
+ * and the hardware will be out of sync.
+ */
+ rollback = 1;
+
+ ret_out = ret;
+ } else if (ret > 0 && !rollback) {
+ uvc_ctrl_send_events(handle, entity,
+ ctrls->controls, ctrls->count);
}
}
- if (!rollback)
- uvc_ctrl_send_events(handle, ctrls->controls, ctrls->count);
-done:
mutex_unlock(&chain->ctrl_mutex);
- return ret;
+ return ret_out;
}
static int uvc_mapping_get_xctrl_compound(struct uvc_video_chain *chain,
@@ -3222,6 +3260,7 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
void uvc_ctrl_cleanup_fh(struct uvc_fh *handle)
{
struct uvc_entity *entity;
+ int i;
guard(mutex)(&handle->chain->ctrl_mutex);
@@ -3236,7 +3275,11 @@ void uvc_ctrl_cleanup_fh(struct uvc_fh *handle)
}
}
- WARN_ON(handle->pending_async_ctrls);
+ if (!WARN_ON(handle->pending_async_ctrls))
+ return;
+
+ for (i = 0; i < handle->pending_async_ctrls; i++)
+ uvc_pm_put(handle->stream->dev);
}
/*
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 107e0fafd80f..da24a655ab68 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1299,8 +1299,13 @@ static int uvc_gpio_parse(struct uvc_device *dev)
gpio_privacy = devm_gpiod_get_optional(&dev->intf->dev, "privacy",
GPIOD_IN);
- if (IS_ERR_OR_NULL(gpio_privacy))
- return PTR_ERR_OR_ZERO(gpio_privacy);
+ if (!gpio_privacy)
+ return 0;
+
+ if (IS_ERR(gpio_privacy))
+ return dev_err_probe(&dev->intf->dev,
+ PTR_ERR(gpio_privacy),
+ "Can't get privacy GPIO\n");
irq = gpiod_to_irq(gpio_privacy);
if (irq < 0)
@@ -2232,16 +2237,17 @@ static int uvc_probe(struct usb_interface *intf,
#endif
/* Parse the Video Class control descriptor. */
- if (uvc_parse_control(dev) < 0) {
+ ret = uvc_parse_control(dev);
+ if (ret < 0) {
+ ret = -ENODEV;
uvc_dbg(dev, PROBE, "Unable to parse UVC descriptors\n");
goto error;
}
/* Parse the associated GPIOs. */
- if (uvc_gpio_parse(dev) < 0) {
- uvc_dbg(dev, PROBE, "Unable to parse UVC GPIOs\n");
+ ret = uvc_gpio_parse(dev);
+ if (ret < 0)
goto error;
- }
dev_info(&dev->udev->dev, "Found UVC %u.%02x device %s (%04x:%04x)\n",
dev->uvc_version >> 8, dev->uvc_version & 0xff,
@@ -2264,24 +2270,32 @@ static int uvc_probe(struct usb_interface *intf,
}
/* Register the V4L2 device. */
- if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
+ ret = v4l2_device_register(&intf->dev, &dev->vdev);
+ if (ret < 0)
goto error;
/* Scan the device for video chains. */
- if (uvc_scan_device(dev) < 0)
+ if (uvc_scan_device(dev) < 0) {
+ ret = -ENODEV;
goto error;
+ }
/* Initialize controls. */
- if (uvc_ctrl_init_device(dev) < 0)
+ if (uvc_ctrl_init_device(dev) < 0) {
+ ret = -ENODEV;
goto error;
+ }
/* Register video device nodes. */
- if (uvc_register_chains(dev) < 0)
+ if (uvc_register_chains(dev) < 0) {
+ ret = -ENODEV;
goto error;
+ }
#ifdef CONFIG_MEDIA_CONTROLLER
/* Register the media device node */
- if (media_device_register(&dev->mdev) < 0)
+ ret = media_device_register(&dev->mdev);
+ if (ret < 0)
goto error;
#endif
/* Save our data pointer in the interface data. */
@@ -2315,7 +2329,7 @@ static int uvc_probe(struct usb_interface *intf,
error:
uvc_unregister_video(dev);
kref_put(&dev->ref, uvc_delete);
- return -ENODEV;
+ return ret;
}
static void uvc_disconnect(struct usb_interface *intf)
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 39065db44e86..668a4e9d772c 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -26,6 +26,27 @@
#include "uvcvideo.h"
+int uvc_pm_get(struct uvc_device *dev)
+{
+ int ret;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret)
+ return ret;
+
+ ret = uvc_status_get(dev);
+ if (ret)
+ usb_autopm_put_interface(dev->intf);
+
+ return ret;
+}
+
+void uvc_pm_put(struct uvc_device *dev)
+{
+ uvc_status_put(dev);
+ usb_autopm_put_interface(dev->intf);
+}
+
static int uvc_acquire_privileges(struct uvc_fh *handle);
static int uvc_control_add_xu_mapping(struct uvc_video_chain *chain,
@@ -637,28 +658,14 @@ static int uvc_v4l2_open(struct file *file)
{
struct uvc_streaming *stream;
struct uvc_fh *handle;
- int ret = 0;
stream = video_drvdata(file);
uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
- ret = usb_autopm_get_interface(stream->dev->intf);
- if (ret < 0)
- return ret;
-
/* Create the device handle. */
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- if (handle == NULL) {
- usb_autopm_put_interface(stream->dev->intf);
+ if (!handle)
return -ENOMEM;
- }
-
- ret = uvc_status_get(stream->dev);
- if (ret) {
- usb_autopm_put_interface(stream->dev->intf);
- kfree(handle);
- return ret;
- }
v4l2_fh_init(&handle->vfh, &stream->vdev);
v4l2_fh_add(&handle->vfh);
@@ -683,6 +690,9 @@ static int uvc_v4l2_release(struct file *file)
if (uvc_has_privileges(handle))
uvc_queue_release(&stream->queue);
+ if (handle->is_streaming)
+ uvc_pm_put(stream->dev);
+
/* Release the file handle. */
uvc_dismiss_privileges(handle);
v4l2_fh_del(&handle->vfh);
@@ -690,9 +700,6 @@ static int uvc_v4l2_release(struct file *file)
kfree(handle);
file->private_data = NULL;
- uvc_status_put(stream->dev);
-
- usb_autopm_put_interface(stream->dev->intf);
return 0;
}
@@ -841,11 +848,23 @@ static int uvc_ioctl_streamon(struct file *file, void *fh,
if (!uvc_has_privileges(handle))
return -EBUSY;
- mutex_lock(&stream->mutex);
+ guard(mutex)(&stream->mutex);
+
+ if (handle->is_streaming)
+ return 0;
+
ret = uvc_queue_streamon(&stream->queue, type);
- mutex_unlock(&stream->mutex);
+ if (ret)
+ return ret;
- return ret;
+ ret = uvc_pm_get(stream->dev);
+ if (ret) {
+ uvc_queue_streamoff(&stream->queue, type);
+ return ret;
+ }
+ handle->is_streaming = true;
+
+ return 0;
}
static int uvc_ioctl_streamoff(struct file *file, void *fh,
@@ -857,9 +876,13 @@ static int uvc_ioctl_streamoff(struct file *file, void *fh,
if (!uvc_has_privileges(handle))
return -EBUSY;
- mutex_lock(&stream->mutex);
+ guard(mutex)(&stream->mutex);
+
uvc_queue_streamoff(&stream->queue, type);
- mutex_unlock(&stream->mutex);
+ if (handle->is_streaming) {
+ handle->is_streaming = false;
+ uvc_pm_put(stream->dev);
+ }
return 0;
}
@@ -1358,9 +1381,11 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
#define UVCIOC_CTRL_MAP32 _IOWR('u', 0x20, struct uvc_xu_control_mapping32)
#define UVCIOC_CTRL_QUERY32 _IOWR('u', 0x21, struct uvc_xu_control_query32)
+DEFINE_FREE(uvc_pm_put, struct uvc_device *, if (_T) uvc_pm_put(_T))
static long uvc_v4l2_compat_ioctl32(struct file *file,
unsigned int cmd, unsigned long arg)
{
+ struct uvc_device *uvc_device __free(uvc_pm_put) = NULL;
struct uvc_fh *handle = file->private_data;
union {
struct uvc_xu_control_mapping xmap;
@@ -1369,6 +1394,12 @@ static long uvc_v4l2_compat_ioctl32(struct file *file,
void __user *up = compat_ptr(arg);
long ret;
+ ret = uvc_pm_get(handle->stream->dev);
+ if (ret)
+ return ret;
+
+ uvc_device = handle->stream->dev;
+
switch (cmd) {
case UVCIOC_CTRL_MAP32:
ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
@@ -1403,6 +1434,42 @@ static long uvc_v4l2_compat_ioctl32(struct file *file,
}
#endif
+static long uvc_v4l2_unlocked_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct uvc_fh *handle = file->private_data;
+ int ret;
+
+ /* The following IOCTLs do not need to turn on the camera. */
+ switch (cmd) {
+ case VIDIOC_CREATE_BUFS:
+ case VIDIOC_DQBUF:
+ case VIDIOC_ENUM_FMT:
+ case VIDIOC_ENUM_FRAMEINTERVALS:
+ case VIDIOC_ENUM_FRAMESIZES:
+ case VIDIOC_ENUMINPUT:
+ case VIDIOC_EXPBUF:
+ case VIDIOC_G_FMT:
+ case VIDIOC_G_PARM:
+ case VIDIOC_G_SELECTION:
+ case VIDIOC_QBUF:
+ case VIDIOC_QUERYCAP:
+ case VIDIOC_REQBUFS:
+ case VIDIOC_SUBSCRIBE_EVENT:
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ return video_ioctl2(file, cmd, arg);
+ }
+
+ ret = uvc_pm_get(handle->stream->dev);
+ if (ret)
+ return ret;
+
+ ret = video_ioctl2(file, cmd, arg);
+
+ uvc_pm_put(handle->stream->dev);
+ return ret;
+}
+
static ssize_t uvc_v4l2_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
{
@@ -1487,7 +1554,7 @@ const struct v4l2_file_operations uvc_fops = {
.owner = THIS_MODULE,
.open = uvc_v4l2_open,
.release = uvc_v4l2_release,
- .unlocked_ioctl = video_ioctl2,
+ .unlocked_ioctl = uvc_v4l2_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl32 = uvc_v4l2_compat_ioctl32,
#endif
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index b4ee701835fc..b9f8eb62ba1d 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -630,6 +630,7 @@ struct uvc_fh {
struct uvc_streaming *stream;
enum uvc_handle_state state;
unsigned int pending_async_ctrls;
+ bool is_streaming;
};
/* ------------------------------------------------------------------------
@@ -767,6 +768,10 @@ void uvc_status_suspend(struct uvc_device *dev);
int uvc_status_get(struct uvc_device *dev);
void uvc_status_put(struct uvc_device *dev);
+/* PM */
+int uvc_pm_get(struct uvc_device *dev);
+void uvc_pm_put(struct uvc_device *dev);
+
/* Controls */
extern const struct v4l2_subscribed_event_ops uvc_ctrl_sub_ev_ops;
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index e4b2de3833ee..bd160a8c9efe 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -154,13 +154,18 @@ void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
EXPORT_SYMBOL_GPL(v4l_bound_align_image);
const void *
-__v4l2_find_nearest_size(const void *array, size_t array_size,
- size_t entry_size, size_t width_offset,
- size_t height_offset, s32 width, s32 height)
+__v4l2_find_nearest_size_conditional(const void *array, size_t array_size,
+ size_t entry_size, size_t width_offset,
+ size_t height_offset, s32 width,
+ s32 height,
+ bool (*func)(const void *array,
+ size_t index,
+ const void *context),
+ const void *context)
{
u32 error, min_error = U32_MAX;
const void *best = NULL;
- unsigned int i;
+ size_t i;
if (!array)
return NULL;
@@ -169,6 +174,9 @@ __v4l2_find_nearest_size(const void *array, size_t array_size,
const u32 *entry_width = array + width_offset;
const u32 *entry_height = array + height_offset;
+ if (func && !func(array, i, context))
+ continue;
+
error = abs(*entry_width - width) + abs(*entry_height - height);
if (error > min_error)
continue;
@@ -181,7 +189,7 @@ __v4l2_find_nearest_size(const void *array, size_t array_size,
return best;
}
-EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size);
+EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size_conditional);
int v4l2_g_parm_cap(struct video_device *vdev,
struct v4l2_subdev *sd, struct v4l2_streamparm *a)
@@ -250,6 +258,7 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{ .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB565X, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
@@ -277,8 +286,10 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
/* YUV planar formats */
{ .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV15, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV20, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_P010, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
@@ -357,6 +368,34 @@ static inline unsigned int v4l2_format_block_height(const struct v4l2_format_inf
return info->block_h[plane];
}
+static inline unsigned int v4l2_format_plane_stride(const struct v4l2_format_info *info, int plane,
+ unsigned int width)
+{
+ unsigned int hdiv = plane ? info->hdiv : 1;
+ unsigned int aligned_width =
+ ALIGN(width, v4l2_format_block_width(info, plane));
+
+ return DIV_ROUND_UP(aligned_width, hdiv) *
+ info->bpp[plane] / info->bpp_div[plane];
+}
+
+static inline unsigned int v4l2_format_plane_height(const struct v4l2_format_info *info, int plane,
+ unsigned int height)
+{
+ unsigned int vdiv = plane ? info->vdiv : 1;
+ unsigned int aligned_height =
+ ALIGN(height, v4l2_format_block_height(info, plane));
+
+ return DIV_ROUND_UP(aligned_height, vdiv);
+}
+
+static inline unsigned int v4l2_format_plane_size(const struct v4l2_format_info *info, int plane,
+ unsigned int width, unsigned int height)
+{
+ return v4l2_format_plane_stride(info, plane, width) *
+ v4l2_format_plane_height(info, plane, height);
+}
+
void v4l2_apply_frmsize_constraints(u32 *width, u32 *height,
const struct v4l2_frmsize_stepwise *frmsize)
{
@@ -392,37 +431,19 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
if (info->mem_planes == 1) {
plane = &pixfmt->plane_fmt[0];
- plane->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0];
+ plane->bytesperline = v4l2_format_plane_stride(info, 0, width);
plane->sizeimage = 0;
- for (i = 0; i < info->comp_planes; i++) {
- unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
- unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
- unsigned int aligned_width;
- unsigned int aligned_height;
-
- aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
- aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
-
- plane->sizeimage += info->bpp[i] *
- DIV_ROUND_UP(aligned_width, hdiv) *
- DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i];
- }
+ for (i = 0; i < info->comp_planes; i++)
+ plane->sizeimage +=
+ v4l2_format_plane_size(info, i, width, height);
} else {
for (i = 0; i < info->comp_planes; i++) {
- unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
- unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
- unsigned int aligned_width;
- unsigned int aligned_height;
-
- aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
- aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
-
plane = &pixfmt->plane_fmt[i];
plane->bytesperline =
- info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv) / info->bpp_div[i];
- plane->sizeimage =
- plane->bytesperline * DIV_ROUND_UP(aligned_height, vdiv);
+ v4l2_format_plane_stride(info, i, width);
+ plane->sizeimage = plane->bytesperline *
+ v4l2_format_plane_height(info, i, height);
}
}
return 0;
@@ -446,22 +467,12 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
pixfmt->width = width;
pixfmt->height = height;
pixfmt->pixelformat = pixelformat;
- pixfmt->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0];
+ pixfmt->bytesperline = v4l2_format_plane_stride(info, 0, width);
pixfmt->sizeimage = 0;
- for (i = 0; i < info->comp_planes; i++) {
- unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
- unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
- unsigned int aligned_width;
- unsigned int aligned_height;
-
- aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
- aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
-
- pixfmt->sizeimage += info->bpp[i] *
- DIV_ROUND_UP(aligned_width, hdiv) *
- DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i];
- }
+ for (i = 0; i < info->comp_planes; i++)
+ pixfmt->sizeimage +=
+ v4l2_format_plane_size(info, i, width, height);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index b40c08ce909d..c369235113d9 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -1054,25 +1054,25 @@ int __video_register_device(struct video_device *vdev,
vdev->dev.class = &video_class;
vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
vdev->dev.parent = vdev->dev_parent;
+ vdev->dev.release = v4l2_device_release;
dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num);
+
+ /* Increase v4l2_device refcount */
+ v4l2_device_get(vdev->v4l2_dev);
+
mutex_lock(&videodev_lock);
ret = device_register(&vdev->dev);
if (ret < 0) {
mutex_unlock(&videodev_lock);
pr_err("%s: device_register failed\n", __func__);
- goto cleanup;
+ put_device(&vdev->dev);
+ return ret;
}
- /* Register the release callback that will be called when the last
- reference to the device goes away. */
- vdev->dev.release = v4l2_device_release;
if (nr != -1 && nr != vdev->num && warn_if_nr_in_use)
pr_warn("%s: requested %s%d, got %s\n", __func__,
name_base, nr, video_device_node_name(vdev));
- /* Increase v4l2_device refcount */
- v4l2_device_get(vdev->v4l2_dev);
-
/* Part 5: Register the entity. */
ret = video_register_media_controller(vdev);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index a16fb44c7246..650dc1956f73 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1363,8 +1363,10 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_YUV48_12: descr = "12-bit YUV 4:4:4 Packed"; break;
case V4L2_PIX_FMT_NV12: descr = "Y/UV 4:2:0"; break;
case V4L2_PIX_FMT_NV21: descr = "Y/VU 4:2:0"; break;
+ case V4L2_PIX_FMT_NV15: descr = "10-bit Y/UV 4:2:0 (Packed)"; break;
case V4L2_PIX_FMT_NV16: descr = "Y/UV 4:2:2"; break;
case V4L2_PIX_FMT_NV61: descr = "Y/VU 4:2:2"; break;
+ case V4L2_PIX_FMT_NV20: descr = "10-bit Y/UV 4:2:2 (Packed)"; break;
case V4L2_PIX_FMT_NV24: descr = "Y/UV 4:4:4"; break;
case V4L2_PIX_FMT_NV42: descr = "Y/VU 4:4:4"; break;
case V4L2_PIX_FMT_P010: descr = "10-bit Y/UV 4:2:0"; break;
@@ -1462,6 +1464,8 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_META_FMT_RK_ISP1_PARAMS: descr = "Rockchip ISP1 3A Parameters"; break;
case V4L2_META_FMT_RK_ISP1_STAT_3A: descr = "Rockchip ISP1 3A Statistics"; break;
case V4L2_META_FMT_RK_ISP1_EXT_PARAMS: descr = "Rockchip ISP1 Ext 3A Params"; break;
+ case V4L2_META_FMT_C3ISP_PARAMS: descr = "Amlogic C3 ISP Parameters"; break;
+ case V4L2_META_FMT_C3ISP_STATS: descr = "Amlogic C3 ISP Statistics"; break;
case V4L2_PIX_FMT_NV12_8L128: descr = "NV12 (8x128 Linear)"; break;
case V4L2_PIX_FMT_NV12M_8L128: descr = "NV12M (8x128 Linear)"; break;
case V4L2_PIX_FMT_NV12_10BE_8L128: descr = "10-bit NV12 (8x128 Linear, BE)"; break;
@@ -2833,8 +2837,7 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
p->capability = m.capability | V4L2_TUNER_CAP_FREQ_BANDS;
p->rangelow = m.rangelow;
p->rangehigh = m.rangehigh;
- p->modulation = (type == V4L2_TUNER_RADIO) ?
- V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB;
+ p->modulation = V4L2_BAND_MODULATION_FM;
return 0;
}
return -ENOTTY;
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index c82d8d8a16ea..79df0d22e218 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -32,7 +32,7 @@ config ARM_PL172_MPMC
config ATMEL_EBI
bool "Atmel EBI driver"
- default y if ARCH_AT91
+ default ARCH_AT91
depends on ARCH_AT91 || COMPILE_TEST
depends on OF
select MFD_SYSCON
@@ -147,7 +147,7 @@ config FPGA_DFL_EMIF
config MVEBU_DEVBUS
bool "Marvell EBU Device Bus Controller"
- default y if PLAT_ORION
+ default PLAT_ORION
depends on PLAT_ORION || COMPILE_TEST
depends on OF
help
@@ -198,7 +198,7 @@ config DA8XX_DDRCTL
config PL353_SMC
tristate "ARM PL35X Static Memory Controller(SMC) driver"
- default y if ARM
+ default ARM
depends on ARM || COMPILE_TEST
depends on ARM_AMBA
help
@@ -225,6 +225,23 @@ config STM32_FMC2_EBI
devices (like SRAM, ethernet adapters, FPGAs, LCD displays, ...) on
SOCs containing the FMC2 External Bus Interface.
+config STM32_OMM
+ tristate "STM32 Octo Memory Manager"
+ depends on SPI_STM32_OSPI || COMPILE_TEST
+ help
+ This driver manages the muxing between the 2 OSPI busses and
+ the 2 output ports. There are 4 possible muxing configurations:
+ - direct mode (no multiplexing): OSPI1 output is on port 1 and OSPI2
+ output is on port 2
+ - OSPI1 and OSPI2 are multiplexed over the same output port 1
+ - swapped mode (no multiplexing), OSPI1 output is on port 2,
+ OSPI2 output is on port 1
+ - OSPI1 and OSPI2 are multiplexed over the same output port 2
+ It also manages :
+ - the split of the memory area shared between the 2 OSPI instances.
+ - chip select selection override.
+ - the time between 2 transactions in multiplexed mode.
+
source "drivers/memory/samsung/Kconfig"
source "drivers/memory/tegra/Kconfig"
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index d2e6ca9abbe0..c1959661bf63 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_DA8XX_DDRCTL) += da8xx-ddrctl.o
obj-$(CONFIG_PL353_SMC) += pl353-smc.o
obj-$(CONFIG_RENESAS_RPCIF) += renesas-rpc-if.o
obj-$(CONFIG_STM32_FMC2_EBI) += stm32-fmc2-ebi.o
+obj-$(CONFIG_STM32_OMM) += stm32_omm.o
obj-$(CONFIG_SAMSUNG_MC) += samsung/
obj-$(CONFIG_TEGRA_MC) += tegra/
diff --git a/drivers/memory/bt1-l2-ctl.c b/drivers/memory/bt1-l2-ctl.c
index 78bd71b203f2..0fd96abc172a 100644
--- a/drivers/memory/bt1-l2-ctl.c
+++ b/drivers/memory/bt1-l2-ctl.c
@@ -222,7 +222,7 @@ static ssize_t l2_ctl_latency_show(struct device *dev,
if (ret)
return ret;
- return scnprintf(buf, PAGE_SIZE, "%u\n", data);
+ return sysfs_emit(buf, "%u\n", data);
}
static ssize_t l2_ctl_latency_store(struct device *dev,
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index a8f5467d6b31..c086c22511f7 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -283,6 +283,43 @@ static int mtk_smi_larb_config_port_gen2_general(struct device *dev)
return 0;
}
+static const u8 mtk_smi_larb_mt6893_ostd[][SMI_LARB_PORT_NR_MAX] = {
+ [0] = {0x2, 0x6, 0x2, 0x2, 0x2, 0x28, 0x18, 0x18, 0x1, 0x1, 0x1, 0x8,
+ 0x8, 0x1, 0x3f},
+ [1] = {0x2, 0x6, 0x2, 0x2, 0x2, 0x28, 0x18, 0x18, 0x1, 0x1, 0x1, 0x8,
+ 0x8, 0x1, 0x3f},
+ [2] = {0x5, 0x5, 0x5, 0x5, 0x1, 0x3f},
+ [3] = {0x5, 0x5, 0x5, 0x5, 0x1, 0x3f},
+ [4] = {0x28, 0x19, 0xb, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x1},
+ [5] = {0x1, 0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x16},
+ [6] = {},
+ [7] = {0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x4, 0x1,
+ 0x4, 0x1, 0xa, 0x6, 0x1, 0xa, 0x6, 0x1, 0x1, 0x1, 0x1, 0x5,
+ 0x3, 0x3, 0x4},
+ [8] = {0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x4, 0x1,
+ 0x4, 0x1, 0xa, 0x6, 0x1, 0xa, 0x6, 0x1, 0x1, 0x1, 0x1, 0x5,
+ 0x3, 0x3, 0x4},
+ [9] = {0x9, 0x7, 0xf, 0x8, 0x1, 0x8, 0x9, 0x3, 0x3, 0x6, 0x7, 0x4,
+ 0x9, 0x3, 0x4, 0xe, 0x1, 0x7, 0x8, 0x7, 0x7, 0x1, 0x6, 0x2,
+ 0xf, 0x8, 0x1, 0x1, 0x1},
+ [10] = {},
+ [11] = {0x9, 0x7, 0xf, 0x8, 0x1, 0x8, 0x9, 0x3, 0x3, 0x6, 0x7, 0x4,
+ 0x9, 0x3, 0x4, 0xe, 0x1, 0x7, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1, 0x1},
+ [12] = {},
+ [13] = {0x2, 0xc, 0xc, 0xe, 0x6, 0x6, 0x6, 0x6, 0x6, 0x12, 0x6, 0x1},
+ [14] = {0x2, 0xc, 0xc, 0x28, 0x12, 0x6},
+ [15] = {0x28, 0x1, 0x2, 0x28, 0x1},
+ [16] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x2, 0x14, 0x14, 0x4, 0x4, 0x4, 0x2,
+ 0x4, 0x2, 0x8, 0x4, 0x4},
+ [17] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x2, 0x14, 0x14, 0x4, 0x4, 0x4, 0x2,
+ 0x4, 0x2, 0x8, 0x4, 0x4},
+ [18] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x2, 0x14, 0x14, 0x4, 0x4, 0x4, 0x2,
+ 0x4, 0x2, 0x8, 0x4, 0x4},
+ [19] = {0x2, 0x2, 0x4, 0x2},
+ [20] = {0x9, 0x9, 0x5, 0x5, 0x1, 0x1},
+};
+
static const u8 mtk_smi_larb_mt8188_ostd[][SMI_LARB_PORT_NR_MAX] = {
[0] = {0x02, 0x18, 0x22, 0x22, 0x01, 0x02, 0x0a,},
[1] = {0x12, 0x02, 0x14, 0x14, 0x01, 0x18, 0x0a,},
@@ -429,6 +466,12 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt6779 = {
/* DUMMY | IPU0 | IPU1 | CCU | MDLA */
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt6893 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .flags_general = MTK_SMI_FLAG_THRT_UPDATE | MTK_SMI_FLAG_SW_FLAG,
+ .ostd = mtk_smi_larb_mt6893_ostd,
+};
+
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8167 = {
/* mt8167 do not need the port in larb */
.config_port = mtk_smi_larb_config_port_mt8167,
@@ -474,6 +517,7 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
{.compatible = "mediatek,mt2712-smi-larb", .data = &mtk_smi_larb_mt2712},
{.compatible = "mediatek,mt6779-smi-larb", .data = &mtk_smi_larb_mt6779},
{.compatible = "mediatek,mt6795-smi-larb", .data = &mtk_smi_larb_mt8173},
+ {.compatible = "mediatek,mt6893-smi-larb", .data = &mtk_smi_larb_mt6893},
{.compatible = "mediatek,mt8167-smi-larb", .data = &mtk_smi_larb_mt8167},
{.compatible = "mediatek,mt8173-smi-larb", .data = &mtk_smi_larb_mt8173},
{.compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183},
@@ -694,6 +738,13 @@ static const struct mtk_smi_common_plat mtk_smi_common_mt6795 = {
.init = mtk_smi_common_mt6795_init,
};
+static const struct mtk_smi_common_plat mtk_smi_common_mt6893 = {
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(4) |
+ F_MMU1_LARB(5) | F_MMU1_LARB(7),
+};
+
static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
.type = MTK_SMI_GEN2,
.has_gals = true,
@@ -756,6 +807,7 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
{.compatible = "mediatek,mt2712-smi-common", .data = &mtk_smi_common_gen2},
{.compatible = "mediatek,mt6779-smi-common", .data = &mtk_smi_common_mt6779},
{.compatible = "mediatek,mt6795-smi-common", .data = &mtk_smi_common_mt6795},
+ {.compatible = "mediatek,mt6893-smi-common", .data = &mtk_smi_common_mt6893},
{.compatible = "mediatek,mt8167-smi-common", .data = &mtk_smi_common_gen2},
{.compatible = "mediatek,mt8173-smi-common", .data = &mtk_smi_common_gen2},
{.compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183},
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 53f1888cc84f..9c96eed00194 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -1455,10 +1455,8 @@ static int gpmc_setup_irq(struct gpmc_device *gpmc)
gpmc->irq_chip.irq_unmask = gpmc_irq_unmask;
gpmc->irq_chip.irq_set_type = gpmc_irq_set_type;
- gpmc_irq_domain = irq_domain_add_linear(gpmc->dev->of_node,
- gpmc->nirqs,
- &gpmc_irq_domain_ops,
- gpmc);
+ gpmc_irq_domain = irq_domain_create_linear(of_fwnode_handle(gpmc->dev->of_node),
+ gpmc->nirqs, &gpmc_irq_domain_ops, gpmc);
if (!gpmc_irq_domain) {
dev_err(gpmc->dev, "IRQ domain add failed\n");
return -ENODEV;
@@ -2376,7 +2374,7 @@ static void gpmc_probe_dt_children(struct platform_device *pdev)
static int gpmc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
- return 1; /* we're input only */
+ return GPIO_LINE_DIRECTION_IN; /* we're input only */
}
static int gpmc_gpio_direction_input(struct gpio_chip *chip,
@@ -2385,17 +2383,6 @@ static int gpmc_gpio_direction_input(struct gpio_chip *chip,
return 0; /* we're input only */
}
-static int gpmc_gpio_direction_output(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- return -EINVAL; /* we're input only */
-}
-
-static void gpmc_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
-}
-
static int gpmc_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
u32 reg;
@@ -2417,8 +2404,6 @@ static int gpmc_gpio_init(struct gpmc_device *gpmc)
gpmc->gpio_chip.ngpio = gpmc_nr_waitpins;
gpmc->gpio_chip.get_direction = gpmc_gpio_get_direction;
gpmc->gpio_chip.direction_input = gpmc_gpio_direction_input;
- gpmc->gpio_chip.direction_output = gpmc_gpio_direction_output;
- gpmc->gpio_chip.set = gpmc_gpio_set;
gpmc->gpio_chip.get = gpmc_gpio_get;
gpmc->gpio_chip.base = -1;
diff --git a/drivers/memory/renesas-rpc-if-regs.h b/drivers/memory/renesas-rpc-if-regs.h
new file mode 100644
index 000000000000..e6b33f7c40a8
--- /dev/null
+++ b/drivers/memory/renesas-rpc-if-regs.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * R-Car RPC Interface Registers Definitions
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#ifndef __RENESAS_RPC_IF_REGS_H__
+#define __RENESAS_RPC_IF_REGS_H__
+
+#include <linux/bits.h>
+
+#define RPCIF_CMNCR 0x0000 /* R/W */
+#define RPCIF_CMNCR_MD BIT(31)
+#define RPCIF_CMNCR_MOIIO3(val) (((val) & 0x3) << 22)
+#define RPCIF_CMNCR_MOIIO2(val) (((val) & 0x3) << 20)
+#define RPCIF_CMNCR_MOIIO1(val) (((val) & 0x3) << 18)
+#define RPCIF_CMNCR_MOIIO0(val) (((val) & 0x3) << 16)
+#define RPCIF_CMNCR_MOIIO(val) (RPCIF_CMNCR_MOIIO0(val) | RPCIF_CMNCR_MOIIO1(val) | \
+ RPCIF_CMNCR_MOIIO2(val) | RPCIF_CMNCR_MOIIO3(val))
+#define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* documented for RZ/G2L */
+#define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* documented for RZ/G2L */
+#define RPCIF_CMNCR_IO0FV(val) (((val) & 0x3) << 8)
+#define RPCIF_CMNCR_IOFV(val) (RPCIF_CMNCR_IO0FV(val) | RPCIF_CMNCR_IO2FV(val) | \
+ RPCIF_CMNCR_IO3FV(val))
+#define RPCIF_CMNCR_BSZ(val) (((val) & 0x3) << 0)
+
+#define RPCIF_SSLDR 0x0004 /* R/W */
+#define RPCIF_SSLDR_SPNDL(d) (((d) & 0x7) << 16)
+#define RPCIF_SSLDR_SLNDL(d) (((d) & 0x7) << 8)
+#define RPCIF_SSLDR_SCKDL(d) (((d) & 0x7) << 0)
+
+#define RPCIF_DRCR 0x000C /* R/W */
+#define RPCIF_DRCR_SSLN BIT(24)
+#define RPCIF_DRCR_RBURST(v) ((((v) - 1) & 0x1F) << 16)
+#define RPCIF_DRCR_RCF BIT(9)
+#define RPCIF_DRCR_RBE BIT(8)
+#define RPCIF_DRCR_SSLE BIT(0)
+
+#define RPCIF_DRCMR 0x0010 /* R/W */
+#define RPCIF_DRCMR_CMD(c) (((c) & 0xFF) << 16)
+#define RPCIF_DRCMR_OCMD(c) (((c) & 0xFF) << 0)
+
+#define RPCIF_DREAR 0x0014 /* R/W */
+#define RPCIF_DREAR_EAV(c) (((c) & 0xF) << 16)
+#define RPCIF_DREAR_EAC(c) (((c) & 0x7) << 0)
+
+#define RPCIF_DROPR 0x0018 /* R/W */
+
+#define RPCIF_DRENR 0x001C /* R/W */
+#define RPCIF_DRENR_CDB(o) (((u32)((o) & 0x3)) << 30)
+#define RPCIF_DRENR_OCDB(o) (((o) & 0x3) << 28)
+#define RPCIF_DRENR_ADB(o) (((o) & 0x3) << 24)
+#define RPCIF_DRENR_OPDB(o) (((o) & 0x3) << 20)
+#define RPCIF_DRENR_DRDB(o) (((o) & 0x3) << 16)
+#define RPCIF_DRENR_DME BIT(15)
+#define RPCIF_DRENR_CDE BIT(14)
+#define RPCIF_DRENR_OCDE BIT(12)
+#define RPCIF_DRENR_ADE(v) (((v) & 0xF) << 8)
+#define RPCIF_DRENR_OPDE(v) (((v) & 0xF) << 4)
+
+#define RPCIF_SMCR 0x0020 /* R/W */
+#define RPCIF_SMCR_SSLKP BIT(8)
+#define RPCIF_SMCR_SPIRE BIT(2)
+#define RPCIF_SMCR_SPIWE BIT(1)
+#define RPCIF_SMCR_SPIE BIT(0)
+
+#define RPCIF_SMCMR 0x0024 /* R/W */
+#define RPCIF_SMCMR_CMD(c) (((c) & 0xFF) << 16)
+#define RPCIF_SMCMR_OCMD(c) (((c) & 0xFF) << 0)
+
+#define RPCIF_SMADR 0x0028 /* R/W */
+
+#define RPCIF_SMOPR 0x002C /* R/W */
+#define RPCIF_SMOPR_OPD3(o) (((o) & 0xFF) << 24)
+#define RPCIF_SMOPR_OPD2(o) (((o) & 0xFF) << 16)
+#define RPCIF_SMOPR_OPD1(o) (((o) & 0xFF) << 8)
+#define RPCIF_SMOPR_OPD0(o) (((o) & 0xFF) << 0)
+
+#define RPCIF_SMENR 0x0030 /* R/W */
+#define RPCIF_SMENR_CDB(o) (((o) & 0x3) << 30)
+#define RPCIF_SMENR_OCDB(o) (((o) & 0x3) << 28)
+#define RPCIF_SMENR_ADB(o) (((o) & 0x3) << 24)
+#define RPCIF_SMENR_OPDB(o) (((o) & 0x3) << 20)
+#define RPCIF_SMENR_SPIDB(o) (((o) & 0x3) << 16)
+#define RPCIF_SMENR_DME BIT(15)
+#define RPCIF_SMENR_CDE BIT(14)
+#define RPCIF_SMENR_OCDE BIT(12)
+#define RPCIF_SMENR_ADE(v) (((v) & 0xF) << 8)
+#define RPCIF_SMENR_OPDE(v) (((v) & 0xF) << 4)
+#define RPCIF_SMENR_SPIDE(v) (((v) & 0xF) << 0)
+
+#define RPCIF_SMRDR0 0x0038 /* R */
+#define RPCIF_SMRDR1 0x003C /* R */
+#define RPCIF_SMWDR0 0x0040 /* W */
+#define RPCIF_SMWDR1 0x0044 /* W */
+
+#define RPCIF_CMNSR 0x0048 /* R */
+#define RPCIF_CMNSR_SSLF BIT(1)
+#define RPCIF_CMNSR_TEND BIT(0)
+
+#define RPCIF_DRDMCR 0x0058 /* R/W */
+#define RPCIF_DMDMCR_DMCYC(v) ((((v) - 1) & 0x1F) << 0)
+
+#define RPCIF_DRDRENR 0x005C /* R/W */
+#define RPCIF_DRDRENR_HYPE(v) (((v) & 0x7) << 12)
+#define RPCIF_DRDRENR_ADDRE BIT(8)
+#define RPCIF_DRDRENR_OPDRE BIT(4)
+#define RPCIF_DRDRENR_DRDRE BIT(0)
+
+#define RPCIF_SMDMCR 0x0060 /* R/W */
+#define RPCIF_SMDMCR_DMCYC(v) ((((v) - 1) & 0x1F) << 0)
+
+#define RPCIF_SMDRENR 0x0064 /* R/W */
+#define RPCIF_SMDRENR_HYPE(v) (((v) & 0x7) << 12)
+#define RPCIF_SMDRENR_ADDRE BIT(8)
+#define RPCIF_SMDRENR_OPDRE BIT(4)
+#define RPCIF_SMDRENR_SPIDRE BIT(0)
+
+#define RPCIF_PHYADD 0x0070 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */
+#define RPCIF_PHYWR 0x0074 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */
+
+#define RPCIF_PHYCNT 0x007C /* R/W */
+#define RPCIF_PHYCNT_CAL BIT(31)
+#define RPCIF_PHYCNT_OCTA(v) (((v) & 0x3) << 22)
+#define RPCIF_PHYCNT_EXDS BIT(21)
+#define RPCIF_PHYCNT_OCT BIT(20)
+#define RPCIF_PHYCNT_DDRCAL BIT(19)
+#define RPCIF_PHYCNT_HS BIT(18)
+#define RPCIF_PHYCNT_CKSEL(v) (((v) & 0x3) << 16) /* valid only for RZ/G2L */
+#define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15 | ((v) & 0x8) << 24) /* valid for R-Car and RZ/G2{E,H,M,N} */
+
+#define RPCIF_PHYCNT_WBUF2 BIT(4)
+#define RPCIF_PHYCNT_WBUF BIT(2)
+#define RPCIF_PHYCNT_PHYMEM(v) (((v) & 0x3) << 0)
+#define RPCIF_PHYCNT_PHYMEM_MASK GENMASK(1, 0)
+
+#define RPCIF_PHYOFFSET1 0x0080 /* R/W */
+#define RPCIF_PHYOFFSET1_DDRTMG(v) (((v) & 0x3) << 28)
+
+#define RPCIF_PHYOFFSET2 0x0084 /* R/W */
+#define RPCIF_PHYOFFSET2_OCTTMG(v) (((v) & 0x7) << 8)
+
+#define RPCIF_PHYINT 0x0088 /* R/W */
+#define RPCIF_PHYINT_WPVAL BIT(1)
+
+#endif /* __RENESAS_RPC_IF_REGS_H__ */
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index 15b4706aafee..4a417b693080 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -18,139 +18,8 @@
#include <memory/renesas-rpc-if.h>
-#define RPCIF_CMNCR 0x0000 /* R/W */
-#define RPCIF_CMNCR_MD BIT(31)
-#define RPCIF_CMNCR_MOIIO3(val) (((val) & 0x3) << 22)
-#define RPCIF_CMNCR_MOIIO2(val) (((val) & 0x3) << 20)
-#define RPCIF_CMNCR_MOIIO1(val) (((val) & 0x3) << 18)
-#define RPCIF_CMNCR_MOIIO0(val) (((val) & 0x3) << 16)
-#define RPCIF_CMNCR_MOIIO(val) (RPCIF_CMNCR_MOIIO0(val) | RPCIF_CMNCR_MOIIO1(val) | \
- RPCIF_CMNCR_MOIIO2(val) | RPCIF_CMNCR_MOIIO3(val))
-#define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* documented for RZ/G2L */
-#define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* documented for RZ/G2L */
-#define RPCIF_CMNCR_IO0FV(val) (((val) & 0x3) << 8)
-#define RPCIF_CMNCR_IOFV(val) (RPCIF_CMNCR_IO0FV(val) | RPCIF_CMNCR_IO2FV(val) | \
- RPCIF_CMNCR_IO3FV(val))
-#define RPCIF_CMNCR_BSZ(val) (((val) & 0x3) << 0)
-
-#define RPCIF_SSLDR 0x0004 /* R/W */
-#define RPCIF_SSLDR_SPNDL(d) (((d) & 0x7) << 16)
-#define RPCIF_SSLDR_SLNDL(d) (((d) & 0x7) << 8)
-#define RPCIF_SSLDR_SCKDL(d) (((d) & 0x7) << 0)
-
-#define RPCIF_DRCR 0x000C /* R/W */
-#define RPCIF_DRCR_SSLN BIT(24)
-#define RPCIF_DRCR_RBURST(v) ((((v) - 1) & 0x1F) << 16)
-#define RPCIF_DRCR_RCF BIT(9)
-#define RPCIF_DRCR_RBE BIT(8)
-#define RPCIF_DRCR_SSLE BIT(0)
-
-#define RPCIF_DRCMR 0x0010 /* R/W */
-#define RPCIF_DRCMR_CMD(c) (((c) & 0xFF) << 16)
-#define RPCIF_DRCMR_OCMD(c) (((c) & 0xFF) << 0)
-
-#define RPCIF_DREAR 0x0014 /* R/W */
-#define RPCIF_DREAR_EAV(c) (((c) & 0xF) << 16)
-#define RPCIF_DREAR_EAC(c) (((c) & 0x7) << 0)
-
-#define RPCIF_DROPR 0x0018 /* R/W */
-
-#define RPCIF_DRENR 0x001C /* R/W */
-#define RPCIF_DRENR_CDB(o) (u32)((((o) & 0x3) << 30))
-#define RPCIF_DRENR_OCDB(o) (((o) & 0x3) << 28)
-#define RPCIF_DRENR_ADB(o) (((o) & 0x3) << 24)
-#define RPCIF_DRENR_OPDB(o) (((o) & 0x3) << 20)
-#define RPCIF_DRENR_DRDB(o) (((o) & 0x3) << 16)
-#define RPCIF_DRENR_DME BIT(15)
-#define RPCIF_DRENR_CDE BIT(14)
-#define RPCIF_DRENR_OCDE BIT(12)
-#define RPCIF_DRENR_ADE(v) (((v) & 0xF) << 8)
-#define RPCIF_DRENR_OPDE(v) (((v) & 0xF) << 4)
-
-#define RPCIF_SMCR 0x0020 /* R/W */
-#define RPCIF_SMCR_SSLKP BIT(8)
-#define RPCIF_SMCR_SPIRE BIT(2)
-#define RPCIF_SMCR_SPIWE BIT(1)
-#define RPCIF_SMCR_SPIE BIT(0)
-
-#define RPCIF_SMCMR 0x0024 /* R/W */
-#define RPCIF_SMCMR_CMD(c) (((c) & 0xFF) << 16)
-#define RPCIF_SMCMR_OCMD(c) (((c) & 0xFF) << 0)
-
-#define RPCIF_SMADR 0x0028 /* R/W */
-
-#define RPCIF_SMOPR 0x002C /* R/W */
-#define RPCIF_SMOPR_OPD3(o) (((o) & 0xFF) << 24)
-#define RPCIF_SMOPR_OPD2(o) (((o) & 0xFF) << 16)
-#define RPCIF_SMOPR_OPD1(o) (((o) & 0xFF) << 8)
-#define RPCIF_SMOPR_OPD0(o) (((o) & 0xFF) << 0)
-
-#define RPCIF_SMENR 0x0030 /* R/W */
-#define RPCIF_SMENR_CDB(o) (((o) & 0x3) << 30)
-#define RPCIF_SMENR_OCDB(o) (((o) & 0x3) << 28)
-#define RPCIF_SMENR_ADB(o) (((o) & 0x3) << 24)
-#define RPCIF_SMENR_OPDB(o) (((o) & 0x3) << 20)
-#define RPCIF_SMENR_SPIDB(o) (((o) & 0x3) << 16)
-#define RPCIF_SMENR_DME BIT(15)
-#define RPCIF_SMENR_CDE BIT(14)
-#define RPCIF_SMENR_OCDE BIT(12)
-#define RPCIF_SMENR_ADE(v) (((v) & 0xF) << 8)
-#define RPCIF_SMENR_OPDE(v) (((v) & 0xF) << 4)
-#define RPCIF_SMENR_SPIDE(v) (((v) & 0xF) << 0)
-
-#define RPCIF_SMRDR0 0x0038 /* R */
-#define RPCIF_SMRDR1 0x003C /* R */
-#define RPCIF_SMWDR0 0x0040 /* W */
-#define RPCIF_SMWDR1 0x0044 /* W */
-
-#define RPCIF_CMNSR 0x0048 /* R */
-#define RPCIF_CMNSR_SSLF BIT(1)
-#define RPCIF_CMNSR_TEND BIT(0)
-
-#define RPCIF_DRDMCR 0x0058 /* R/W */
-#define RPCIF_DMDMCR_DMCYC(v) ((((v) - 1) & 0x1F) << 0)
-
-#define RPCIF_DRDRENR 0x005C /* R/W */
-#define RPCIF_DRDRENR_HYPE(v) (((v) & 0x7) << 12)
-#define RPCIF_DRDRENR_ADDRE BIT(8)
-#define RPCIF_DRDRENR_OPDRE BIT(4)
-#define RPCIF_DRDRENR_DRDRE BIT(0)
-
-#define RPCIF_SMDMCR 0x0060 /* R/W */
-#define RPCIF_SMDMCR_DMCYC(v) ((((v) - 1) & 0x1F) << 0)
-
-#define RPCIF_SMDRENR 0x0064 /* R/W */
-#define RPCIF_SMDRENR_HYPE(v) (((v) & 0x7) << 12)
-#define RPCIF_SMDRENR_ADDRE BIT(8)
-#define RPCIF_SMDRENR_OPDRE BIT(4)
-#define RPCIF_SMDRENR_SPIDRE BIT(0)
-
-#define RPCIF_PHYADD 0x0070 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */
-#define RPCIF_PHYWR 0x0074 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */
-
-#define RPCIF_PHYCNT 0x007C /* R/W */
-#define RPCIF_PHYCNT_CAL BIT(31)
-#define RPCIF_PHYCNT_OCTA(v) (((v) & 0x3) << 22)
-#define RPCIF_PHYCNT_EXDS BIT(21)
-#define RPCIF_PHYCNT_OCT BIT(20)
-#define RPCIF_PHYCNT_DDRCAL BIT(19)
-#define RPCIF_PHYCNT_HS BIT(18)
-#define RPCIF_PHYCNT_CKSEL(v) (((v) & 0x3) << 16) /* valid only for RZ/G2L */
-#define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15 | ((v) & 0x8) << 24) /* valid for R-Car and RZ/G2{E,H,M,N} */
-
-#define RPCIF_PHYCNT_WBUF2 BIT(4)
-#define RPCIF_PHYCNT_WBUF BIT(2)
-#define RPCIF_PHYCNT_PHYMEM(v) (((v) & 0x3) << 0)
-#define RPCIF_PHYCNT_PHYMEM_MASK GENMASK(1, 0)
-
-#define RPCIF_PHYOFFSET1 0x0080 /* R/W */
-#define RPCIF_PHYOFFSET1_DDRTMG(v) (((v) & 0x3) << 28)
-
-#define RPCIF_PHYOFFSET2 0x0084 /* R/W */
-#define RPCIF_PHYOFFSET2_OCTTMG(v) (((v) & 0x7) << 8)
-
-#define RPCIF_PHYINT 0x0088 /* R/W */
-#define RPCIF_PHYINT_WPVAL BIT(1)
+#include "renesas-rpc-if-regs.h"
+#include "renesas-xspi-if-regs.h"
static const struct regmap_range rpcif_volatile_ranges[] = {
regmap_reg_range(RPCIF_SMRDR0, RPCIF_SMRDR1),
@@ -163,7 +32,31 @@ static const struct regmap_access_table rpcif_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(rpcif_volatile_ranges),
};
+static const struct regmap_range xspi_volatile_ranges[] = {
+ regmap_reg_range(XSPI_CDD0BUF0, XSPI_CDD0BUF0),
+};
+
+static const struct regmap_access_table xspi_volatile_table = {
+ .yes_ranges = xspi_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(xspi_volatile_ranges),
+};
+
+struct rpcif_priv;
+
+struct rpcif_impl {
+ int (*hw_init)(struct rpcif_priv *rpc, bool hyperflash);
+ void (*prepare)(struct rpcif_priv *rpc, const struct rpcif_op *op,
+ u64 *offs, size_t *len);
+ int (*manual_xfer)(struct rpcif_priv *rpc);
+ size_t (*dirmap_read)(struct rpcif_priv *rpc, u64 offs, size_t len,
+ void *buf);
+ u32 status_reg;
+ u32 status_mask;
+};
+
struct rpcif_info {
+ const struct regmap_config *regmap_config;
+ const struct rpcif_impl *impl;
enum rpcif_type type;
u8 strtim;
};
@@ -180,6 +73,8 @@ struct rpcif_priv {
enum rpcif_data_dir dir;
u8 bus_size;
u8 xfer_size;
+ u8 addr_nbytes; /* Specified for xSPI */
+ u32 proto; /* Specified for xSPI */
void *buffer;
u32 xferlen;
u32 smcr;
@@ -191,26 +86,6 @@ struct rpcif_priv {
u32 ddr; /* DRDRENR or SMDRENR */
};
-static const struct rpcif_info rpcif_info_r8a7796 = {
- .type = RPCIF_RCAR_GEN3,
- .strtim = 6,
-};
-
-static const struct rpcif_info rpcif_info_gen3 = {
- .type = RPCIF_RCAR_GEN3,
- .strtim = 7,
-};
-
-static const struct rpcif_info rpcif_info_rz_g2l = {
- .type = RPCIF_RZ_G2L,
- .strtim = 7,
-};
-
-static const struct rpcif_info rpcif_info_gen4 = {
- .type = RPCIF_RCAR_GEN4,
- .strtim = 15,
-};
-
/*
* Custom accessor functions to ensure SM[RW]DR[01] are always accessed with
* proper width. Requires rpcif_priv.xfer_size to be correctly set before!
@@ -300,6 +175,33 @@ static const struct regmap_config rpcif_regmap_config = {
.volatile_table = &rpcif_volatile_table,
};
+static int xspi_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct rpcif_priv *xspi = context;
+
+ *val = readl(xspi->base + reg);
+ return 0;
+}
+
+static int xspi_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct rpcif_priv *xspi = context;
+
+ writel(val, xspi->base + reg);
+ return 0;
+}
+
+static const struct regmap_config xspi_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .reg_read = xspi_reg_read,
+ .reg_write = xspi_reg_write,
+ .fast_io = true,
+ .max_register = XSPI_INTE,
+ .volatile_table = &xspi_volatile_table,
+};
+
int rpcif_sw_init(struct rpcif *rpcif, struct device *dev)
{
struct rpcif_priv *rpc = dev_get_drvdata(dev);
@@ -307,6 +209,7 @@ int rpcif_sw_init(struct rpcif *rpcif, struct device *dev)
rpcif->dev = dev;
rpcif->dirmap = rpc->dirmap;
rpcif->size = rpc->size;
+ rpcif->xspi = rpc->info->type == XSPI_RZ_G3E;
return 0;
}
EXPORT_SYMBOL(rpcif_sw_init);
@@ -325,16 +228,11 @@ static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif_priv *rpc)
regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000032);
}
-int rpcif_hw_init(struct device *dev, bool hyperflash)
+static int rpcif_hw_init_impl(struct rpcif_priv *rpc, bool hyperflash)
{
- struct rpcif_priv *rpc = dev_get_drvdata(dev);
u32 dummy;
int ret;
- ret = pm_runtime_resume_and_get(dev);
- if (ret)
- return ret;
-
if (rpc->info->type == RPCIF_RZ_G2L) {
ret = reset_control_reset(rpc->rstc);
if (ret)
@@ -382,21 +280,62 @@ int rpcif_hw_init(struct device *dev, bool hyperflash)
regmap_write(rpc->regmap, RPCIF_SSLDR, RPCIF_SSLDR_SPNDL(7) |
RPCIF_SSLDR_SLNDL(7) | RPCIF_SSLDR_SCKDL(7));
- pm_runtime_put(dev);
-
rpc->bus_size = hyperflash ? 2 : 1;
return 0;
}
+
+static int xspi_hw_init_impl(struct rpcif_priv *xspi, bool hyperflash)
+{
+ int ret;
+
+ ret = reset_control_reset(xspi->rstc);
+ if (ret)
+ return ret;
+
+ regmap_write(xspi->regmap, XSPI_WRAPCFG, 0x0);
+
+ regmap_update_bits(xspi->regmap, XSPI_LIOCFGCS0,
+ XSPI_LIOCFG_PRTMD(0x3ff) | XSPI_LIOCFG_CSMIN(0xf) |
+ XSPI_LIOCFG_CSASTEX | XSPI_LIOCFG_CSNEGEX,
+ XSPI_LIOCFG_PRTMD(0) | XSPI_LIOCFG_CSMIN(0) |
+ XSPI_LIOCFG_CSASTEX | XSPI_LIOCFG_CSNEGEX);
+
+ regmap_update_bits(xspi->regmap, XSPI_CCCTL0CS0, XSPI_CCCTL0_CAEN, 0);
+
+ regmap_update_bits(xspi->regmap, XSPI_CDCTL0,
+ XSPI_CDCTL0_TRREQ | XSPI_CDCTL0_CSSEL, 0);
+
+ regmap_update_bits(xspi->regmap, XSPI_INTE, XSPI_INTE_CMDCMPE,
+ XSPI_INTE_CMDCMPE);
+
+ return 0;
+}
+
+int rpcif_hw_init(struct device *dev, bool hyperflash)
+{
+ struct rpcif_priv *rpc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = rpc->info->impl->hw_init(rpc, hyperflash);
+
+ pm_runtime_put(dev);
+
+ return ret;
+}
EXPORT_SYMBOL(rpcif_hw_init);
static int wait_msg_xfer_end(struct rpcif_priv *rpc)
{
u32 sts;
- return regmap_read_poll_timeout(rpc->regmap, RPCIF_CMNSR, sts,
- sts & RPCIF_CMNSR_TEND, 0,
- USEC_PER_SEC);
+ return regmap_read_poll_timeout(rpc->regmap, rpc->info->impl->status_reg,
+ sts, sts & rpc->info->impl->status_mask,
+ 0, USEC_PER_SEC);
}
static u8 rpcif_bits_set(struct rpcif_priv *rpc, u32 nbytes)
@@ -412,11 +351,9 @@ static u8 rpcif_bit_size(u8 buswidth)
return buswidth > 4 ? 2 : ilog2(buswidth);
}
-void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs,
- size_t *len)
+static void rpcif_prepare_impl(struct rpcif_priv *rpc, const struct rpcif_op *op,
+ u64 *offs, size_t *len)
{
- struct rpcif_priv *rpc = dev_get_drvdata(dev);
-
rpc->smcr = 0;
rpc->smadr = 0;
rpc->enable = 0;
@@ -497,18 +434,76 @@ void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs,
rpc->enable |= RPCIF_SMENR_SPIDB(rpcif_bit_size(op->data.buswidth));
}
}
-EXPORT_SYMBOL(rpcif_prepare);
-int rpcif_manual_xfer(struct device *dev)
+static void xspi_prepare_impl(struct rpcif_priv *xspi, const struct rpcif_op *op,
+ u64 *offs, size_t *len)
+{
+ xspi->smadr = 0;
+ xspi->addr_nbytes = 0;
+ xspi->command = 0;
+ xspi->option = 0;
+ xspi->dummy = 0;
+ xspi->xferlen = 0;
+ xspi->proto = 0;
+
+ if (op->cmd.buswidth)
+ xspi->command = op->cmd.opcode;
+
+ if (op->ocmd.buswidth)
+ xspi->command = (xspi->command << 8) | op->ocmd.opcode;
+
+ if (op->addr.buswidth) {
+ xspi->addr_nbytes = op->addr.nbytes;
+ if (offs && len)
+ xspi->smadr = *offs;
+ else
+ xspi->smadr = op->addr.val;
+ }
+
+ if (op->dummy.buswidth)
+ xspi->dummy = op->dummy.ncycles;
+
+ xspi->dir = op->data.dir;
+ if (op->data.buswidth) {
+ u32 nbytes;
+
+ xspi->buffer = op->data.buf.in;
+
+ if (offs && len)
+ nbytes = *len;
+ else
+ nbytes = op->data.nbytes;
+ xspi->xferlen = nbytes;
+ }
+
+ if (op->cmd.buswidth == 1) {
+ if (op->addr.buswidth == 2 || op->data.buswidth == 2)
+ xspi->proto = PROTO_1S_2S_2S;
+ else if (op->addr.buswidth == 4 || op->data.buswidth == 4)
+ xspi->proto = PROTO_1S_4S_4S;
+ } else if (op->cmd.buswidth == 2 &&
+ (op->addr.buswidth == 2 || op->data.buswidth == 2)) {
+ xspi->proto = PROTO_2S_2S_2S;
+ } else if (op->cmd.buswidth == 4 &&
+ (op->addr.buswidth == 4 || op->data.buswidth == 4)) {
+ xspi->proto = PROTO_4S_4S_4S;
+ }
+}
+
+void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs,
+ size_t *len)
{
struct rpcif_priv *rpc = dev_get_drvdata(dev);
+
+ rpc->info->impl->prepare(rpc, op, offs, len);
+}
+EXPORT_SYMBOL(rpcif_prepare);
+
+static int rpcif_manual_xfer_impl(struct rpcif_priv *rpc)
+{
u32 smenr, smcr, pos = 0, max = rpc->bus_size == 2 ? 8 : 4;
int ret = 0;
- ret = pm_runtime_resume_and_get(dev);
- if (ret < 0)
- return ret;
-
regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
RPCIF_PHYCNT_CAL, RPCIF_PHYCNT_CAL);
regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
@@ -616,15 +611,169 @@ int rpcif_manual_xfer(struct device *dev)
goto err_out;
}
-exit:
- pm_runtime_put(dev);
return ret;
err_out:
if (reset_control_reset(rpc->rstc))
- dev_err(dev, "Failed to reset HW\n");
- rpcif_hw_init(dev, rpc->bus_size == 2);
- goto exit;
+ dev_err(rpc->dev, "Failed to reset HW\n");
+ rpcif_hw_init_impl(rpc, rpc->bus_size == 2);
+ return ret;
+}
+
+static int xspi_manual_xfer_impl(struct rpcif_priv *xspi)
+{
+ u32 pos = 0, max = 8;
+ int ret = 0;
+
+ regmap_update_bits(xspi->regmap, XSPI_CDCTL0, XSPI_CDCTL0_TRNUM(0x3),
+ XSPI_CDCTL0_TRNUM(0));
+
+ regmap_update_bits(xspi->regmap, XSPI_CDCTL0, XSPI_CDCTL0_TRREQ, 0);
+
+ regmap_write(xspi->regmap, XSPI_CDTBUF0,
+ XSPI_CDTBUF_CMDSIZE(0x1) | XSPI_CDTBUF_CMD_FIELD(xspi->command));
+
+ regmap_write(xspi->regmap, XSPI_CDABUF0, 0);
+
+ regmap_update_bits(xspi->regmap, XSPI_CDTBUF0, XSPI_CDTBUF_ADDSIZE(0x7),
+ XSPI_CDTBUF_ADDSIZE(xspi->addr_nbytes));
+
+ regmap_write(xspi->regmap, XSPI_CDABUF0, xspi->smadr);
+
+ regmap_update_bits(xspi->regmap, XSPI_LIOCFGCS0, XSPI_LIOCFG_PRTMD(0x3ff),
+ XSPI_LIOCFG_PRTMD(xspi->proto));
+
+ switch (xspi->dir) {
+ case RPCIF_DATA_OUT:
+ while (pos < xspi->xferlen) {
+ u32 bytes_left = xspi->xferlen - pos;
+ u32 nbytes, data[2], *p = data;
+
+ regmap_update_bits(xspi->regmap, XSPI_CDTBUF0,
+ XSPI_CDTBUF_TRTYPE, XSPI_CDTBUF_TRTYPE);
+
+ nbytes = bytes_left >= max ? max : bytes_left;
+
+ regmap_update_bits(xspi->regmap, XSPI_CDTBUF0,
+ XSPI_CDTBUF_DATASIZE(0xf),
+ XSPI_CDTBUF_DATASIZE(nbytes));
+
+ regmap_update_bits(xspi->regmap, XSPI_CDTBUF0,
+ XSPI_CDTBUF_ADDSIZE(0x7),
+ XSPI_CDTBUF_ADDSIZE(xspi->addr_nbytes));
+
+ memcpy(data, xspi->buffer + pos, nbytes);
+
+ if (nbytes > 4) {
+ regmap_write(xspi->regmap, XSPI_CDD0BUF0, *p++);
+ regmap_write(xspi->regmap, XSPI_CDD1BUF0, *p);
+ } else {
+ regmap_write(xspi->regmap, XSPI_CDD0BUF0, *p);
+ }
+
+ regmap_write(xspi->regmap, XSPI_CDABUF0, xspi->smadr + pos);
+
+ regmap_update_bits(xspi->regmap, XSPI_CDCTL0,
+ XSPI_CDCTL0_TRREQ, XSPI_CDCTL0_TRREQ);
+
+ ret = wait_msg_xfer_end(xspi);
+ if (ret)
+ goto err_out;
+
+ regmap_update_bits(xspi->regmap, XSPI_INTC,
+ XSPI_INTC_CMDCMPC, XSPI_INTC_CMDCMPC);
+
+ pos += nbytes;
+ }
+ regmap_update_bits(xspi->regmap, XSPI_CDCTL0, XSPI_CDCTL0_TRREQ, 0);
+ break;
+ case RPCIF_DATA_IN:
+ while (pos < xspi->xferlen) {
+ u32 bytes_left = xspi->xferlen - pos;
+ u32 nbytes, data[2], *p = data;
+
+ regmap_update_bits(xspi->regmap, XSPI_CDTBUF0,
+ XSPI_CDTBUF_TRTYPE,
+ ~(u32)XSPI_CDTBUF_TRTYPE);
+
+ /* nbytes can be up to 8 bytes */
+ nbytes = bytes_left >= max ? max : bytes_left;
+
+ regmap_update_bits(xspi->regmap, XSPI_CDTBUF0,
+ XSPI_CDTBUF_DATASIZE(0xf),
+ XSPI_CDTBUF_DATASIZE(nbytes));
+
+ regmap_update_bits(xspi->regmap, XSPI_CDTBUF0,
+ XSPI_CDTBUF_ADDSIZE(0x7),
+ XSPI_CDTBUF_ADDSIZE(xspi->addr_nbytes));
+
+ if (xspi->addr_nbytes)
+ regmap_write(xspi->regmap, XSPI_CDABUF0,
+ xspi->smadr + pos);
+
+ regmap_update_bits(xspi->regmap, XSPI_CDTBUF0,
+ XSPI_CDTBUF_LATE(0x1f),
+ XSPI_CDTBUF_LATE(xspi->dummy));
+
+ regmap_update_bits(xspi->regmap, XSPI_CDCTL0,
+ XSPI_CDCTL0_TRREQ, XSPI_CDCTL0_TRREQ);
+
+ ret = wait_msg_xfer_end(xspi);
+ if (ret)
+ goto err_out;
+
+ if (nbytes > 4) {
+ regmap_read(xspi->regmap, XSPI_CDD0BUF0, p++);
+ regmap_read(xspi->regmap, XSPI_CDD1BUF0, p);
+ } else {
+ regmap_read(xspi->regmap, XSPI_CDD0BUF0, p);
+ }
+
+ memcpy(xspi->buffer + pos, data, nbytes);
+
+ regmap_update_bits(xspi->regmap, XSPI_INTC,
+ XSPI_INTC_CMDCMPC, XSPI_INTC_CMDCMPC);
+
+ pos += nbytes;
+ }
+ regmap_update_bits(xspi->regmap, XSPI_CDCTL0,
+ XSPI_CDCTL0_TRREQ, 0);
+ break;
+ default:
+ regmap_update_bits(xspi->regmap, XSPI_CDTBUF0,
+ XSPI_CDTBUF_TRTYPE, XSPI_CDTBUF_TRTYPE);
+ regmap_update_bits(xspi->regmap, XSPI_CDCTL0,
+ XSPI_CDCTL0_TRREQ, XSPI_CDCTL0_TRREQ);
+
+ ret = wait_msg_xfer_end(xspi);
+ if (ret)
+ goto err_out;
+
+ regmap_update_bits(xspi->regmap, XSPI_INTC,
+ XSPI_INTC_CMDCMPC, XSPI_INTC_CMDCMPC);
+ }
+
+ return ret;
+
+err_out:
+ xspi_hw_init_impl(xspi, false);
+ return ret;
+}
+
+int rpcif_manual_xfer(struct device *dev)
+{
+ struct rpcif_priv *rpc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = rpc->info->impl->manual_xfer(rpc);
+
+ pm_runtime_put(dev);
+
+ return ret;
}
EXPORT_SYMBOL(rpcif_manual_xfer);
@@ -670,20 +819,15 @@ static void memcpy_fromio_readw(void *to,
}
}
-ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf)
+static size_t rpcif_dirmap_read_impl(struct rpcif_priv *rpc, u64 offs,
+ size_t len, void *buf)
{
- struct rpcif_priv *rpc = dev_get_drvdata(dev);
loff_t from = offs & (rpc->size - 1);
size_t size = rpc->size - from;
- int ret;
if (len > size)
len = size;
- ret = pm_runtime_resume_and_get(dev);
- if (ret < 0)
- return ret;
-
regmap_update_bits(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_MD, 0);
regmap_write(rpc->regmap, RPCIF_DRCR, 0);
regmap_write(rpc->regmap, RPCIF_DRCMR, rpc->command);
@@ -700,12 +844,129 @@ ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf)
else
memcpy_fromio(buf, rpc->dirmap + from, len);
- pm_runtime_put(dev);
+ return len;
+}
+
+static size_t xspi_dirmap_read_impl(struct rpcif_priv *xspi, u64 offs,
+ size_t len, void *buf)
+{
+ loff_t from = offs & (xspi->size - 1);
+ size_t size = xspi->size - from;
+ u8 addsize = xspi->addr_nbytes - 1;
+
+ if (len > size)
+ len = size;
+
+ regmap_update_bits(xspi->regmap, XSPI_CMCFG0CS0,
+ XSPI_CMCFG0_FFMT(0x3) | XSPI_CMCFG0_ADDSIZE(0x3),
+ XSPI_CMCFG0_FFMT(0) | XSPI_CMCFG0_ADDSIZE(addsize));
+
+ regmap_update_bits(xspi->regmap, XSPI_CMCFG1CS0,
+ XSPI_CMCFG1_RDCMD(0xffff) | XSPI_CMCFG1_RDLATE(0x1f),
+ XSPI_CMCFG1_RDCMD_UPPER_BYTE(xspi->command) |
+ XSPI_CMCFG1_RDLATE(xspi->dummy));
+
+ regmap_update_bits(xspi->regmap, XSPI_BMCTL0, XSPI_BMCTL0_CS0ACC(0xff),
+ XSPI_BMCTL0_CS0ACC(0x01));
+
+ regmap_update_bits(xspi->regmap, XSPI_BMCFG,
+ XSPI_BMCFG_WRMD | XSPI_BMCFG_MWRCOMB |
+ XSPI_BMCFG_MWRSIZE(0xff) | XSPI_BMCFG_PREEN,
+ 0 | XSPI_BMCFG_MWRCOMB | XSPI_BMCFG_MWRSIZE(0x0f) |
+ XSPI_BMCFG_PREEN);
+
+ regmap_update_bits(xspi->regmap, XSPI_LIOCFGCS0, XSPI_LIOCFG_PRTMD(0x3ff),
+ XSPI_LIOCFG_PRTMD(xspi->proto));
+
+ memcpy_fromio(buf, xspi->dirmap + from, len);
return len;
}
+
+ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf)
+{
+ struct rpcif_priv *rpc = dev_get_drvdata(dev);
+ size_t read;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ read = rpc->info->impl->dirmap_read(rpc, offs, len, buf);
+
+ pm_runtime_put(dev);
+
+ return read;
+}
EXPORT_SYMBOL(rpcif_dirmap_read);
+/**
+ * xspi_dirmap_write - Write data to xspi memory.
+ * @dev: xspi device
+ * @offs: offset
+ * @len: Number of bytes to be written.
+ * @buf: Buffer holding write data.
+ *
+ * This function writes data into xspi memory.
+ *
+ * Returns number of bytes written on success, else negative errno.
+ */
+ssize_t xspi_dirmap_write(struct device *dev, u64 offs, size_t len, const void *buf)
+{
+ struct rpcif_priv *xspi = dev_get_drvdata(dev);
+ loff_t from = offs & (xspi->size - 1);
+ u8 addsize = xspi->addr_nbytes - 1;
+ size_t size = xspi->size - from;
+ ssize_t writebytes;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ if (len > size)
+ len = size;
+
+ if (len > MWRSIZE_MAX)
+ writebytes = MWRSIZE_MAX;
+ else
+ writebytes = len;
+
+ regmap_update_bits(xspi->regmap, XSPI_CMCFG0CS0,
+ XSPI_CMCFG0_FFMT(0x3) | XSPI_CMCFG0_ADDSIZE(0x3),
+ XSPI_CMCFG0_FFMT(0) | XSPI_CMCFG0_ADDSIZE(addsize));
+
+ regmap_update_bits(xspi->regmap, XSPI_CMCFG2CS0,
+ XSPI_CMCFG2_WRCMD_UPPER(0xff) | XSPI_CMCFG2_WRLATE(0x1f),
+ XSPI_CMCFG2_WRCMD_UPPER(xspi->command) |
+ XSPI_CMCFG2_WRLATE(xspi->dummy));
+
+ regmap_update_bits(xspi->regmap, XSPI_BMCTL0,
+ XSPI_BMCTL0_CS0ACC(0xff), XSPI_BMCTL0_CS0ACC(0x03));
+
+ regmap_update_bits(xspi->regmap, XSPI_BMCFG,
+ XSPI_BMCFG_WRMD | XSPI_BMCFG_MWRCOMB |
+ XSPI_BMCFG_MWRSIZE(0xff) | XSPI_BMCFG_PREEN,
+ 0 | XSPI_BMCFG_MWRCOMB | XSPI_BMCFG_MWRSIZE(0x0f) |
+ XSPI_BMCFG_PREEN);
+
+ regmap_update_bits(xspi->regmap, XSPI_LIOCFGCS0, XSPI_LIOCFG_PRTMD(0x3ff),
+ XSPI_LIOCFG_PRTMD(xspi->proto));
+
+ memcpy_toio(xspi->dirmap + from, buf, writebytes);
+
+ /* Request to push the pending data */
+ if (writebytes < MWRSIZE_MAX)
+ regmap_update_bits(xspi->regmap, XSPI_BMCTL1,
+ XSPI_BMCTL1_MWRPUSH, XSPI_BMCTL1_MWRPUSH);
+
+ pm_runtime_put(dev);
+
+ return writebytes;
+}
+EXPORT_SYMBOL_GPL(xspi_dirmap_write);
+
static int rpcif_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -740,8 +1001,8 @@ static int rpcif_probe(struct platform_device *pdev)
rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs");
if (IS_ERR(rpc->base))
return PTR_ERR(rpc->base);
-
- rpc->regmap = devm_regmap_init(dev, NULL, rpc, &rpcif_regmap_config);
+ rpc->info = of_device_get_match_data(dev);
+ rpc->regmap = devm_regmap_init(dev, NULL, rpc, rpc->info->regmap_config);
if (IS_ERR(rpc->regmap)) {
dev_err(dev, "failed to init regmap for rpcif, error %ld\n",
PTR_ERR(rpc->regmap));
@@ -754,11 +1015,29 @@ static int rpcif_probe(struct platform_device *pdev)
return PTR_ERR(rpc->dirmap);
rpc->size = resource_size(res);
- rpc->info = of_device_get_match_data(dev);
- rpc->rstc = devm_reset_control_get_exclusive(dev, NULL);
+ rpc->rstc = devm_reset_control_array_get_exclusive(dev);
if (IS_ERR(rpc->rstc))
return PTR_ERR(rpc->rstc);
+ /*
+ * The enabling/disabling of spi/spix2 clocks at runtime leading to
+ * flash write failure. So, enable these clocks during probe() and
+ * disable it in remove().
+ */
+ if (rpc->info->type == XSPI_RZ_G3E) {
+ struct clk *spi_clk;
+
+ spi_clk = devm_clk_get_enabled(dev, "spix2");
+ if (IS_ERR(spi_clk))
+ return dev_err_probe(dev, PTR_ERR(spi_clk),
+ "cannot get enabled spix2 clk\n");
+
+ spi_clk = devm_clk_get_enabled(dev, "spi");
+ if (IS_ERR(spi_clk))
+ return dev_err_probe(dev, PTR_ERR(spi_clk),
+ "cannot get enabled spi clk\n");
+ }
+
vdev = platform_device_alloc(name, pdev->id);
if (!vdev)
return -ENOMEM;
@@ -784,8 +1063,61 @@ static void rpcif_remove(struct platform_device *pdev)
platform_device_unregister(rpc->vdev);
}
+static const struct rpcif_impl rpcif_impl = {
+ .hw_init = rpcif_hw_init_impl,
+ .prepare = rpcif_prepare_impl,
+ .manual_xfer = rpcif_manual_xfer_impl,
+ .dirmap_read = rpcif_dirmap_read_impl,
+ .status_reg = RPCIF_CMNSR,
+ .status_mask = RPCIF_CMNSR_TEND,
+};
+
+static const struct rpcif_impl xspi_impl = {
+ .hw_init = xspi_hw_init_impl,
+ .prepare = xspi_prepare_impl,
+ .manual_xfer = xspi_manual_xfer_impl,
+ .dirmap_read = xspi_dirmap_read_impl,
+ .status_reg = XSPI_INTS,
+ .status_mask = XSPI_INTS_CMDCMP,
+};
+
+static const struct rpcif_info rpcif_info_r8a7796 = {
+ .regmap_config = &rpcif_regmap_config,
+ .impl = &rpcif_impl,
+ .type = RPCIF_RCAR_GEN3,
+ .strtim = 6,
+};
+
+static const struct rpcif_info rpcif_info_gen3 = {
+ .regmap_config = &rpcif_regmap_config,
+ .impl = &rpcif_impl,
+ .type = RPCIF_RCAR_GEN3,
+ .strtim = 7,
+};
+
+static const struct rpcif_info rpcif_info_rz_g2l = {
+ .regmap_config = &rpcif_regmap_config,
+ .impl = &rpcif_impl,
+ .type = RPCIF_RZ_G2L,
+ .strtim = 7,
+};
+
+static const struct rpcif_info rpcif_info_gen4 = {
+ .regmap_config = &rpcif_regmap_config,
+ .impl = &rpcif_impl,
+ .type = RPCIF_RCAR_GEN4,
+ .strtim = 15,
+};
+
+static const struct rpcif_info xspi_info_r9a09g047 = {
+ .regmap_config = &xspi_regmap_config,
+ .impl = &xspi_impl,
+ .type = XSPI_RZ_G3E,
+};
+
static const struct of_device_id rpcif_of_match[] = {
{ .compatible = "renesas,r8a7796-rpc-if", .data = &rpcif_info_r8a7796 },
+ { .compatible = "renesas,r9a09g047-xspi", .data = &xspi_info_r9a09g047 },
{ .compatible = "renesas,rcar-gen3-rpc-if", .data = &rpcif_info_gen3 },
{ .compatible = "renesas,rcar-gen4-rpc-if", .data = &rpcif_info_gen4 },
{ .compatible = "renesas,rzg2l-rpc-if", .data = &rpcif_info_rz_g2l },
diff --git a/drivers/memory/renesas-xspi-if-regs.h b/drivers/memory/renesas-xspi-if-regs.h
new file mode 100644
index 000000000000..53f801d591f2
--- /dev/null
+++ b/drivers/memory/renesas-xspi-if-regs.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * RZ xSPI Interface Registers Definitions
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#ifndef __RENESAS_XSPI_IF_REGS_H__
+#define __RENESAS_XSPI_IF_REGS_H__
+
+#include <linux/bits.h>
+
+/* xSPI Wrapper Configuration Register */
+#define XSPI_WRAPCFG 0x0000
+
+/* xSPI Bridge Configuration Register */
+#define XSPI_BMCFG 0x0008
+#define XSPI_BMCFG_WRMD BIT(0)
+#define XSPI_BMCFG_MWRCOMB BIT(7)
+#define XSPI_BMCFG_MWRSIZE(val) (((val) & 0xff) << 8)
+#define XSPI_BMCFG_PREEN BIT(16)
+
+/* xSPI Command Map Configuration Register 0 CS0 */
+#define XSPI_CMCFG0CS0 0x0010
+#define XSPI_CMCFG0_FFMT(val) (((val) & 0x03) << 0)
+#define XSPI_CMCFG0_ADDSIZE(val) (((val) & 0x03) << 2)
+
+/* xSPI Command Map Configuration Register 1 CS0 */
+#define XSPI_CMCFG1CS0 0x0014
+#define XSPI_CMCFG1_RDCMD(val) (((val) & 0xffff) << 0)
+#define XSPI_CMCFG1_RDCMD_UPPER_BYTE(val) (((val) & 0xff) << 8)
+#define XSPI_CMCFG1_RDLATE(val) (((val) & 0x1f) << 16)
+
+/* xSPI Command Map Configuration Register 2 CS0 */
+#define XSPI_CMCFG2CS0 0x0018
+#define XSPI_CMCFG2_WRCMD(val) (((val) & 0xffff) << 0)
+#define XSPI_CMCFG2_WRCMD_UPPER(val) (((val) & 0xff) << 8)
+#define XSPI_CMCFG2_WRLATE(val) (((val) & 0x1f) << 16)
+
+/* xSPI Link I/O Configuration Register CS0 */
+#define XSPI_LIOCFGCS0 0x0050
+#define XSPI_LIOCFG_PRTMD(val) (((val) & 0x3ff) << 0)
+#define XSPI_LIOCFG_CSMIN(val) (((val) & 0x0f) << 16)
+#define XSPI_LIOCFG_CSASTEX BIT(20)
+#define XSPI_LIOCFG_CSNEGEX BIT(21)
+
+/* xSPI Bridge Map Control Register 0 */
+#define XSPI_BMCTL0 0x0060
+#define XSPI_BMCTL0_CS0ACC(val) (((val) & 0x03) << 0)
+
+/* xSPI Bridge Map Control Register 1 */
+#define XSPI_BMCTL1 0x0064
+#define XSPI_BMCTL1_MWRPUSH BIT(8)
+
+/* xSPI Command Manual Control Register 0 */
+#define XSPI_CDCTL0 0x0070
+#define XSPI_CDCTL0_TRREQ BIT(0)
+#define XSPI_CDCTL0_CSSEL BIT(3)
+#define XSPI_CDCTL0_TRNUM(val) (((val) & 0x03) << 4)
+
+/* xSPI Command Manual Type Buf */
+#define XSPI_CDTBUF0 0x0080
+#define XSPI_CDTBUF_CMDSIZE(val) (((val) & 0x03) << 0)
+#define XSPI_CDTBUF_ADDSIZE(val) (((val) & 0x07) << 2)
+#define XSPI_CDTBUF_DATASIZE(val) (((val) & 0x0f) << 5)
+#define XSPI_CDTBUF_LATE(val) (((val) & 0x1f) << 9)
+#define XSPI_CDTBUF_TRTYPE BIT(15)
+#define XSPI_CDTBUF_CMD(val) (((val) & 0xffff) << 16)
+#define XSPI_CDTBUF_CMD_FIELD(val) (((val) & 0xff) << 24)
+
+/* xSPI Command Manual Address Buff */
+#define XSPI_CDABUF0 0x0084
+
+/* xSPI Command Manual Data 0 Buf */
+#define XSPI_CDD0BUF0 0x0088
+
+/* xSPI Command Manual Data 1 Buf */
+#define XSPI_CDD1BUF0 0x008c
+
+/* xSPI Command Calibration Control Register 0 CS0 */
+#define XSPI_CCCTL0CS0 0x0130
+#define XSPI_CCCTL0_CAEN BIT(0)
+
+/* xSPI Interrupt Status Register */
+#define XSPI_INTS 0x0190
+#define XSPI_INTS_CMDCMP BIT(0)
+
+/* xSPI Interrupt Clear Register */
+#define XSPI_INTC 0x0194
+#define XSPI_INTC_CMDCMPC BIT(0)
+
+/* xSPI Interrupt Enable Register */
+#define XSPI_INTE 0x0198
+#define XSPI_INTE_CMDCMPE BIT(0)
+
+/* Maximum data size of MWRSIZE*/
+#define MWRSIZE_MAX 64
+
+/* xSPI Protocol mode */
+#define PROTO_1S_2S_2S 0x48
+#define PROTO_2S_2S_2S 0x49
+#define PROTO_1S_4S_4S 0x090
+#define PROTO_4S_4S_4S 0x092
+
+#endif /* __RENESAS_XSPI_IF_REGS_H__ */
diff --git a/drivers/memory/stm32_omm.c b/drivers/memory/stm32_omm.c
new file mode 100644
index 000000000000..79ceb1635698
--- /dev/null
+++ b/drivers/memory/stm32_omm.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author(s): Patrice Chotard <patrice.chotard@foss.st.com> for STMicroelectronics.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bus/stm32_firewall_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#define OMM_CR 0
+#define CR_MUXEN BIT(0)
+#define CR_MUXENMODE_MASK GENMASK(1, 0)
+#define CR_CSSEL_OVR_EN BIT(4)
+#define CR_CSSEL_OVR_MASK GENMASK(6, 5)
+#define CR_REQ2ACK_MASK GENMASK(23, 16)
+
+#define OMM_CHILD_NB 2
+#define OMM_CLK_NB 3
+
+struct stm32_omm {
+ struct resource *mm_res;
+ struct clk_bulk_data clk_bulk[OMM_CLK_NB];
+ struct reset_control *child_reset[OMM_CHILD_NB];
+ void __iomem *io_base;
+ u32 cr;
+ u8 nb_child;
+ bool restore_omm;
+};
+
+static int stm32_omm_set_amcr(struct device *dev, bool set)
+{
+ struct stm32_omm *omm = dev_get_drvdata(dev);
+ resource_size_t mm_ospi2_size = 0;
+ static const char * const mm_name[] = { "ospi1", "ospi2" };
+ struct regmap *syscfg_regmap;
+ struct device_node *node;
+ struct resource res, res1;
+ u32 amcr_base, amcr_mask;
+ int ret, idx;
+ unsigned int i, amcr, read_amcr;
+
+ for (i = 0; i < omm->nb_child; i++) {
+ idx = of_property_match_string(dev->of_node,
+ "memory-region-names",
+ mm_name[i]);
+ if (idx < 0)
+ continue;
+
+ /* res1 only used on second loop iteration */
+ res1.start = res.start;
+ res1.end = res.end;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", idx);
+ if (!node)
+ continue;
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ of_node_put(node);
+ dev_err(dev, "unable to resolve memory region\n");
+ return ret;
+ }
+
+ /* check that memory region fits inside OMM memory map area */
+ if (!resource_contains(omm->mm_res, &res)) {
+ dev_err(dev, "%s doesn't fit inside OMM memory map area\n",
+ mm_name[i]);
+ dev_err(dev, "%pR doesn't fit inside %pR\n", &res, omm->mm_res);
+ of_node_put(node);
+
+ return -EFAULT;
+ }
+
+ if (i == 1) {
+ mm_ospi2_size = resource_size(&res);
+
+ /* check that OMM memory region 1 doesn't overlap memory region 2 */
+ if (resource_overlaps(&res, &res1)) {
+ dev_err(dev, "OMM memory-region %s overlaps memory region %s\n",
+ mm_name[0], mm_name[1]);
+ dev_err(dev, "%pR overlaps %pR\n", &res1, &res);
+ of_node_put(node);
+
+ return -EFAULT;
+ }
+ }
+ of_node_put(node);
+ }
+
+ syscfg_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "st,syscfg-amcr");
+ if (IS_ERR(syscfg_regmap))
+ return dev_err_probe(dev, PTR_ERR(syscfg_regmap),
+ "Failed to get st,syscfg-amcr property\n");
+
+ ret = of_property_read_u32_index(dev->of_node, "st,syscfg-amcr", 1,
+ &amcr_base);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(dev->of_node, "st,syscfg-amcr", 2,
+ &amcr_mask);
+ if (ret)
+ return ret;
+
+ amcr = mm_ospi2_size / SZ_64M;
+
+ if (set)
+ regmap_update_bits(syscfg_regmap, amcr_base, amcr_mask, amcr);
+
+ /* read AMCR and check coherency with memory-map areas defined in DT */
+ regmap_read(syscfg_regmap, amcr_base, &read_amcr);
+ read_amcr = read_amcr >> (ffs(amcr_mask) - 1);
+
+ if (amcr != read_amcr) {
+ dev_err(dev, "AMCR value not coherent with DT memory-map areas\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int stm32_omm_toggle_child_clock(struct device *dev, bool enable)
+{
+ struct stm32_omm *omm = dev_get_drvdata(dev);
+ int i, ret;
+
+ for (i = 0; i < omm->nb_child; i++) {
+ if (enable) {
+ ret = clk_prepare_enable(omm->clk_bulk[i + 1].clk);
+ if (ret) {
+ dev_err(dev, "Can not enable clock\n");
+ goto clk_error;
+ }
+ } else {
+ clk_disable_unprepare(omm->clk_bulk[i + 1].clk);
+ }
+ }
+
+ return 0;
+
+clk_error:
+ while (i--)
+ clk_disable_unprepare(omm->clk_bulk[i + 1].clk);
+
+ return ret;
+}
+
+static int stm32_omm_disable_child(struct device *dev)
+{
+ struct stm32_omm *omm = dev_get_drvdata(dev);
+ struct reset_control *reset;
+ int ret;
+ u8 i;
+
+ ret = stm32_omm_toggle_child_clock(dev, true);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < omm->nb_child; i++) {
+ /* reset OSPI to ensure CR_EN bit is set to 0 */
+ reset = omm->child_reset[i];
+ ret = reset_control_acquire(reset);
+ if (ret) {
+ stm32_omm_toggle_child_clock(dev, false);
+ dev_err(dev, "Can not acquire reset %d\n", ret);
+ return ret;
+ }
+
+ reset_control_assert(reset);
+ udelay(2);
+ reset_control_deassert(reset);
+
+ reset_control_release(reset);
+ }
+
+ return stm32_omm_toggle_child_clock(dev, false);
+}
+
+static int stm32_omm_configure(struct device *dev)
+{
+ static const char * const clocks_name[] = {"omm", "ospi1", "ospi2"};
+ struct stm32_omm *omm = dev_get_drvdata(dev);
+ unsigned long clk_rate_max = 0;
+ u32 mux = 0;
+ u32 cssel_ovr = 0;
+ u32 req2ack = 0;
+ struct reset_control *rstc;
+ unsigned long clk_rate;
+ int ret;
+ u8 i;
+
+ for (i = 0; i < OMM_CLK_NB; i++)
+ omm->clk_bulk[i].id = clocks_name[i];
+
+ /* retrieve OMM, OSPI1 and OSPI2 clocks */
+ ret = devm_clk_bulk_get(dev, OMM_CLK_NB, omm->clk_bulk);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get OMM/OSPI's clocks\n");
+
+ /* Ensure both OSPI instance are disabled before configuring OMM */
+ ret = stm32_omm_disable_child(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ /* parse children's clock */
+ for (i = 1; i <= omm->nb_child; i++) {
+ clk_rate = clk_get_rate(omm->clk_bulk[i].clk);
+ if (!clk_rate) {
+ dev_err(dev, "Invalid clock rate\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (clk_rate > clk_rate_max)
+ clk_rate_max = clk_rate;
+ }
+
+ rstc = devm_reset_control_get_exclusive(dev, "omm");
+ if (IS_ERR(rstc)) {
+ ret = dev_err_probe(dev, PTR_ERR(rstc), "reset get failed\n");
+ goto error;
+ }
+
+ reset_control_assert(rstc);
+ udelay(2);
+ reset_control_deassert(rstc);
+
+ omm->cr = readl_relaxed(omm->io_base + OMM_CR);
+ /* optional */
+ ret = of_property_read_u32(dev->of_node, "st,omm-mux", &mux);
+ if (!ret) {
+ if (mux & CR_MUXEN) {
+ ret = of_property_read_u32(dev->of_node, "st,omm-req2ack-ns",
+ &req2ack);
+ if (!ret && !req2ack) {
+ req2ack = DIV_ROUND_UP(req2ack, NSEC_PER_SEC / clk_rate_max) - 1;
+
+ if (req2ack > 256)
+ req2ack = 256;
+ }
+
+ req2ack = FIELD_PREP(CR_REQ2ACK_MASK, req2ack);
+
+ omm->cr &= ~CR_REQ2ACK_MASK;
+ omm->cr |= FIELD_PREP(CR_REQ2ACK_MASK, req2ack);
+
+ /*
+ * If the mux is enabled, the 2 OSPI clocks have to be
+ * always enabled
+ */
+ ret = stm32_omm_toggle_child_clock(dev, true);
+ if (ret)
+ goto error;
+ }
+
+ omm->cr &= ~CR_MUXENMODE_MASK;
+ omm->cr |= FIELD_PREP(CR_MUXENMODE_MASK, mux);
+ }
+
+ /* optional */
+ ret = of_property_read_u32(dev->of_node, "st,omm-cssel-ovr", &cssel_ovr);
+ if (!ret) {
+ omm->cr &= ~CR_CSSEL_OVR_MASK;
+ omm->cr |= FIELD_PREP(CR_CSSEL_OVR_MASK, cssel_ovr);
+ omm->cr |= CR_CSSEL_OVR_EN;
+ }
+
+ omm->restore_omm = true;
+ writel_relaxed(omm->cr, omm->io_base + OMM_CR);
+
+ ret = stm32_omm_set_amcr(dev, true);
+
+error:
+ pm_runtime_put_sync_suspend(dev);
+
+ return ret;
+}
+
+static int stm32_omm_check_access(struct device_node *np)
+{
+ struct stm32_firewall firewall;
+ int ret;
+
+ ret = stm32_firewall_get_firewall(np, &firewall, 1);
+ if (ret)
+ return ret;
+
+ return stm32_firewall_grant_access(&firewall);
+}
+
+static int stm32_omm_probe(struct platform_device *pdev)
+{
+ static const char * const resets_name[] = {"ospi1", "ospi2"};
+ struct device *dev = &pdev->dev;
+ u8 child_access_granted = 0;
+ struct stm32_omm *omm;
+ int i, ret;
+
+ omm = devm_kzalloc(dev, sizeof(*omm), GFP_KERNEL);
+ if (!omm)
+ return -ENOMEM;
+
+ omm->io_base = devm_platform_ioremap_resource_byname(pdev, "regs");
+ if (IS_ERR(omm->io_base))
+ return PTR_ERR(omm->io_base);
+
+ omm->mm_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory_map");
+ if (!omm->mm_res)
+ return -ENODEV;
+
+ /* check child's access */
+ for_each_child_of_node_scoped(dev->of_node, child) {
+ if (omm->nb_child >= OMM_CHILD_NB) {
+ dev_err(dev, "Bad DT, found too much children\n");
+ return -E2BIG;
+ }
+
+ ret = stm32_omm_check_access(child);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ if (!ret)
+ child_access_granted++;
+
+ omm->nb_child++;
+ }
+
+ if (omm->nb_child != OMM_CHILD_NB)
+ return -EINVAL;
+
+ platform_set_drvdata(pdev, omm);
+
+ devm_pm_runtime_enable(dev);
+
+ /* check if OMM's resource access is granted */
+ ret = stm32_omm_check_access(dev->of_node);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ for (i = 0; i < omm->nb_child; i++) {
+ omm->child_reset[i] = devm_reset_control_get_exclusive_released(dev,
+ resets_name[i]);
+
+ if (IS_ERR(omm->child_reset[i]))
+ return dev_err_probe(dev, PTR_ERR(omm->child_reset[i]),
+ "Can't get %s reset\n", resets_name[i]);
+ }
+
+ if (!ret && child_access_granted == OMM_CHILD_NB) {
+ ret = stm32_omm_configure(dev);
+ if (ret)
+ return ret;
+ } else {
+ dev_dbg(dev, "Octo Memory Manager resource's access not granted\n");
+ /*
+ * AMCR can't be set, so check if current value is coherent
+ * with memory-map areas defined in DT
+ */
+ ret = stm32_omm_set_amcr(dev, false);
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_of_platform_populate(dev);
+ if (ret) {
+ if (omm->cr & CR_MUXEN)
+ stm32_omm_toggle_child_clock(&pdev->dev, false);
+
+ return dev_err_probe(dev, ret, "Failed to create Octo Memory Manager child\n");
+ }
+
+ return 0;
+}
+
+static void stm32_omm_remove(struct platform_device *pdev)
+{
+ struct stm32_omm *omm = platform_get_drvdata(pdev);
+
+ if (omm->cr & CR_MUXEN)
+ stm32_omm_toggle_child_clock(&pdev->dev, false);
+}
+
+static const struct of_device_id stm32_omm_of_match[] = {
+ { .compatible = "st,stm32mp25-omm", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_omm_of_match);
+
+static int __maybe_unused stm32_omm_runtime_suspend(struct device *dev)
+{
+ struct stm32_omm *omm = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(omm->clk_bulk[0].clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_omm_runtime_resume(struct device *dev)
+{
+ struct stm32_omm *omm = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(omm->clk_bulk[0].clk);
+}
+
+static int __maybe_unused stm32_omm_suspend(struct device *dev)
+{
+ struct stm32_omm *omm = dev_get_drvdata(dev);
+
+ if (omm->restore_omm && omm->cr & CR_MUXEN)
+ stm32_omm_toggle_child_clock(dev, false);
+
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int __maybe_unused stm32_omm_resume(struct device *dev)
+{
+ struct stm32_omm *omm = dev_get_drvdata(dev);
+ int ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ if (!omm->restore_omm)
+ return 0;
+
+ /* Ensure both OSPI instance are disabled before configuring OMM */
+ ret = stm32_omm_disable_child(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ writel_relaxed(omm->cr, omm->io_base + OMM_CR);
+ ret = stm32_omm_set_amcr(dev, true);
+ pm_runtime_put_sync_suspend(dev);
+ if (ret)
+ return ret;
+
+ if (omm->cr & CR_MUXEN)
+ ret = stm32_omm_toggle_child_clock(dev, true);
+
+ return ret;
+}
+
+static const struct dev_pm_ops stm32_omm_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_omm_runtime_suspend,
+ stm32_omm_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_omm_suspend, stm32_omm_resume)
+};
+
+static struct platform_driver stm32_omm_driver = {
+ .probe = stm32_omm_probe,
+ .remove = stm32_omm_remove,
+ .driver = {
+ .name = "stm32-omm",
+ .of_match_table = stm32_omm_of_match,
+ .pm = &stm32_omm_pm_ops,
+ },
+};
+module_platform_driver(stm32_omm_driver);
+
+MODULE_DESCRIPTION("STMicroelectronics Octo Memory Manager driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
index 3fe83d7c2bf8..fc5a27791826 100644
--- a/drivers/memory/tegra/Kconfig
+++ b/drivers/memory/tegra/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config TEGRA_MC
bool "NVIDIA Tegra Memory Controller support"
- default y
+ default ARCH_TEGRA
depends on ARCH_TEGRA || (COMPILE_TEST && COMMON_CLK)
select INTERCONNECT
help
@@ -12,7 +12,7 @@ if TEGRA_MC
config TEGRA20_EMC
tristate "NVIDIA Tegra20 External Memory Controller driver"
- default y
+ default ARCH_TEGRA_2x_SOC
depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_DEVFREQ
@@ -25,7 +25,7 @@ config TEGRA20_EMC
config TEGRA30_EMC
tristate "NVIDIA Tegra30 External Memory Controller driver"
- default y
+ default ARCH_TEGRA_3x_SOC
depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
select PM_OPP
select DDR
@@ -37,7 +37,7 @@ config TEGRA30_EMC
config TEGRA124_EMC
tristate "NVIDIA Tegra124 External Memory Controller driver"
- default y
+ default ARCH_TEGRA_124_SOC
depends on ARCH_TEGRA_124_SOC || COMPILE_TEST
select TEGRA124_CLK_EMC if ARCH_TEGRA
select PM_OPP
diff --git a/drivers/memory/tegra/tegra210-emc-core.c b/drivers/memory/tegra/tegra210-emc-core.c
index e63f62690571..e96ca4157d48 100644
--- a/drivers/memory/tegra/tegra210-emc-core.c
+++ b/drivers/memory/tegra/tegra210-emc-core.c
@@ -558,7 +558,7 @@ tegra210_emc_table_register_offsets = {
static void tegra210_emc_train(struct timer_list *timer)
{
- struct tegra210_emc *emc = from_timer(emc, timer, training);
+ struct tegra210_emc *emc = timer_container_of(emc, timer, training);
unsigned long flags;
if (!emc->last)
@@ -614,7 +614,8 @@ static unsigned int tegra210_emc_get_temperature(struct tegra210_emc *emc)
static void tegra210_emc_poll_refresh(struct timer_list *timer)
{
- struct tegra210_emc *emc = from_timer(emc, timer, refresh_timer);
+ struct tegra210_emc *emc = timer_container_of(emc, timer,
+ refresh_timer);
unsigned int temperature;
if (!emc->debugfs.temperature)
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 7dc2c9987982..d34892782f6e 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1498,7 +1498,7 @@ static int msb_ftl_scan(struct msb_data *msb)
static void msb_cache_flush_timer(struct timer_list *t)
{
- struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
+ struct msb_data *msb = timer_container_of(msb, t, cache_flush_timer);
msb->need_flush_cache = true;
queue_work(msb->io_queue, &msb->io_work);
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index a5a9bb3f16be..cddddb3a5a27 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -590,7 +590,7 @@ static irqreturn_t jmb38x_ms_isr(int irq, void *dev_id)
static void jmb38x_ms_abort(struct timer_list *t)
{
- struct jmb38x_ms_host *host = from_timer(host, t, timer);
+ struct jmb38x_ms_host *host = timer_container_of(host, t, timer);
struct memstick_host *msh = host->msh;
unsigned long flags;
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index 488ef8eecb26..605b2265536f 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -614,7 +614,7 @@ static void r592_update_card_detect(struct r592_device *dev)
/* Timer routine that fires 1 second after last card detection event, */
static void r592_detect_timer(struct timer_list *t)
{
- struct r592_device *dev = from_timer(dev, t, detect_timer);
+ struct r592_device *dev = timer_container_of(dev, t, detect_timer);
r592_update_card_detect(dev);
memstick_detect_change(dev->host);
}
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index 676348eee015..db7f3a088fb0 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -535,7 +535,7 @@ static int tifm_ms_set_param(struct memstick_host *msh,
static void tifm_ms_abort(struct timer_list *t)
{
- struct tifm_ms *host = from_timer(host, t, timer);
+ struct tifm_ms *host = timer_container_of(host, t, timer);
dev_dbg(&host->dev->dev, "status %x\n",
readl(host->dev->addr + SOCK_MS_STATUS));
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 8e68b64bd7f8..488e346047c1 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -624,8 +624,8 @@ static int device_irq_init(struct pm860x_chip *chip,
ret = -EBUSY;
goto out;
}
- irq_domain_add_legacy(node, nr_irqs, chip->irq_base, 0,
- &pm860x_irq_domain_ops, chip);
+ irq_domain_create_legacy(of_fwnode_handle(node), nr_irqs, chip->irq_base, 0,
+ &pm860x_irq_domain_ops, chip);
chip->core_irq = i2c->irq;
if (!chip->core_irq)
goto out;
diff --git a/drivers/mfd/88pm886.c b/drivers/mfd/88pm886.c
index 891fdce5d8c1..39dd9a818b0f 100644
--- a/drivers/mfd/88pm886.c
+++ b/drivers/mfd/88pm886.c
@@ -16,11 +16,11 @@ static const struct regmap_config pm886_regmap_config = {
.max_register = PM886_REG_RTC_SPARE6,
};
-static struct regmap_irq pm886_regmap_irqs[] = {
+static const struct regmap_irq pm886_regmap_irqs[] = {
REGMAP_IRQ_REG(PM886_IRQ_ONKEY, 0, PM886_INT_ENA1_ONKEY),
};
-static struct regmap_irq_chip pm886_regmap_irq_chip = {
+static const struct regmap_irq_chip pm886_regmap_irq_chip = {
.name = "88pm886",
.irqs = pm886_regmap_irqs,
.num_irqs = ARRAY_SIZE(pm886_regmap_irqs),
@@ -30,11 +30,11 @@ static struct regmap_irq_chip pm886_regmap_irq_chip = {
.unmask_base = PM886_REG_INT_ENA_1,
};
-static struct resource pm886_onkey_resources[] = {
+static const struct resource pm886_onkey_resources[] = {
DEFINE_RES_IRQ_NAMED(PM886_IRQ_ONKEY, "88pm886-onkey"),
};
-static struct mfd_cell pm886_devs[] = {
+static const struct mfd_cell pm886_devs[] = {
MFD_CELL_RES("88pm886-onkey", pm886_onkey_resources),
MFD_CELL_NAME("88pm886-regulator"),
MFD_CELL_NAME("88pm886-rtc"),
@@ -124,7 +124,11 @@ static int pm886_probe(struct i2c_client *client)
if (err)
return dev_err_probe(dev, err, "Failed to register power off handler\n");
- device_init_wakeup(dev, device_property_read_bool(dev, "wakeup-source"));
+ if (device_property_read_bool(dev, "wakeup-source")) {
+ err = devm_device_init_wakeup(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to init wakeup\n");
+ }
return 0;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 22b936310039..6fb3768e3d71 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -943,6 +943,26 @@ config MFD_MAX77714
drivers must be enabled in order to use each functionality of the
device.
+config MFD_MAX77759
+ tristate "Maxim Integrated MAX77759 PMIC"
+ depends on I2C
+ depends on OF
+ select IRQ_DOMAIN
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ Say yes here to add support for Maxim Integrated MAX77759.
+ This is a companion Power Management IC for USB Type-C applications
+ with Battery Charger, Fuel Gauge, temperature sensors, USB Type-C
+ Port Controller (TCPC), NVMEM, and additional GPIO interfaces.
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
+ To compile this driver as a module, choose M here: the module will be
+ called max77759.
+
config MFD_MAX77843
bool "Maxim Semiconductor MAX77843 PMIC Support"
depends on I2C=y
@@ -1292,21 +1312,42 @@ config MFD_RN5T618
functionality of the device.
config MFD_SEC_CORE
- tristate "Samsung Electronics PMIC Series Support"
+ tristate
+ select MFD_CORE
+ select REGMAP_IRQ
+
+config MFD_SEC_ACPM
+ tristate "Samsung Electronics S2MPG1x PMICs"
+ depends on EXYNOS_ACPM_PROTOCOL
+ depends on OF
+ select MFD_SEC_CORE
+ help
+ Support for the Samsung Electronics PMICs with ACPM interface.
+ This is a Power Management IC for mobile applications with buck
+ converters, various LDOs, power meters, RTC, clock outputs, and
+ additional GPIOs interfaces.
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
+ To compile this driver as a module, choose M here: the module will be
+ called sec-acpm.
+
+config MFD_SEC_I2C
+ tristate "Samsung Electronics S2MPA/S2MPS1X/S2MPU/S5M series PMICs"
depends on I2C=y
depends on OF
- select MFD_CORE
+ select MFD_SEC_CORE
select REGMAP_I2C
- select REGMAP_IRQ
help
- Support for the Samsung Electronics PMIC devices coming
- usually along with Samsung Exynos SoC chipset.
+ Support for the Samsung Electronics PMIC devices with I2C interface
+ coming usually along with Samsung Exynos SoC chipset.
This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the functionality
- of the device
+ of the device.
To compile this driver as a module, choose M here: the
- module will be called sec-core.
+ module will be called sec-i2c.
Have in mind that important core drivers (like regulators) depend
on this driver so building this as a module might require proper
initial ramdisk or might not boot up as well in certain scenarios.
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 948cbdf42a18..79495f9f3457 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -169,6 +169,7 @@ obj-$(CONFIG_MFD_MAX77686) += max77686.o
obj-$(CONFIG_MFD_MAX77693) += max77693.o
obj-$(CONFIG_MFD_MAX77705) += max77705.o
obj-$(CONFIG_MFD_MAX77714) += max77714.o
+obj-$(CONFIG_MFD_MAX77759) += max77759.o
obj-$(CONFIG_MFD_MAX77843) += max77843.o
obj-$(CONFIG_MFD_MAX8907) += max8907.o
max8925-objs := max8925-core.o max8925-i2c.o
@@ -228,7 +229,10 @@ obj-$(CONFIG_MFD_RK8XX) += rk8xx-core.o
obj-$(CONFIG_MFD_RK8XX_I2C) += rk8xx-i2c.o
obj-$(CONFIG_MFD_RK8XX_SPI) += rk8xx-spi.o
obj-$(CONFIG_MFD_RN5T618) += rn5t618.o
-obj-$(CONFIG_MFD_SEC_CORE) += sec-core.o sec-irq.o
+sec-core-objs := sec-common.o sec-irq.o
+obj-$(CONFIG_MFD_SEC_CORE) += sec-core.o
+obj-$(CONFIG_MFD_SEC_ACPM) += sec-acpm.o
+obj-$(CONFIG_MFD_SEC_I2C) += sec-i2c.o
obj-$(CONFIG_MFD_SYSCON) += syscon.o
obj-$(CONFIG_MFD_LM3533) += lm3533-core.o lm3533-ctrlbank.o
obj-$(CONFIG_MFD_VEXPRESS_SYSREG) += vexpress-sysreg.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 8ef510e84688..34d66ba9646a 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -320,9 +320,7 @@ static const struct file_operations aat2870_reg_fops = {
static void aat2870_init_debugfs(struct aat2870_data *aat2870)
{
- aat2870->dentry_root = debugfs_create_dir("aat2870", NULL);
-
- debugfs_create_file("regs", 0644, aat2870->dentry_root, aat2870,
+ debugfs_create_file("regs", 0644, aat2870->client->debugfs, aat2870,
&aat2870_reg_fops);
}
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 15c95828b09a..049abcbd71ce 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -580,9 +580,9 @@ static int ab8500_irq_init(struct ab8500 *ab8500, struct device_node *np)
num_irqs = AB8500_NR_IRQS;
/* If ->irq_base is zero this will give a linear mapping */
- ab8500->domain = irq_domain_add_simple(ab8500->dev->of_node,
- num_irqs, 0,
- &ab8500_irq_ops, ab8500);
+ ab8500->domain = irq_domain_create_simple(of_fwnode_handle(ab8500->dev->of_node),
+ num_irqs, 0,
+ &ab8500_irq_ops, ab8500);
if (!ab8500->domain) {
dev_err(ab8500->dev, "Failed to create irqdomain\n");
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index d919ae9691e2..ac2139597fab 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -312,8 +312,7 @@ int arizona_irq_init(struct arizona *arizona)
flags |= arizona->pdata.irq_flags;
/* Allocate a virtual IRQ domain to distribute to the regmap domains */
- arizona->virq = irq_domain_add_linear(NULL, 2, &arizona_domain_ops,
- arizona);
+ arizona->virq = irq_domain_create_linear(NULL, 2, &arizona_domain_ops, arizona);
if (!arizona->virq) {
dev_err(arizona->dev, "Failed to add core IRQ domain\n");
ret = -EINVAL;
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index 6c0d89b0c7e3..7ab6fcc9c27c 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -394,7 +394,9 @@ static int as3722_i2c_probe(struct i2c_client *i2c)
return ret;
}
- device_init_wakeup(as3722->dev, true);
+ ret = devm_device_init_wakeup(as3722->dev);
+ if (ret)
+ return dev_err_probe(as3722->dev, ret, "Failed to init wakeup\n");
dev_dbg(as3722->dev, "AS3722 core driver initialized successfully\n");
return 0;
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index 8b56786d85d0..5a8456bbd63f 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -17,6 +17,15 @@
#include <linux/regmap.h>
#include <linux/slab.h>
+/* Under primary I2C address: */
+#define BCM590XX_REG_PMUID 0x1e
+
+#define BCM590XX_REG_PMUREV 0x1f
+#define BCM590XX_PMUREV_DIG_MASK 0xF
+#define BCM590XX_PMUREV_DIG_SHIFT 0
+#define BCM590XX_PMUREV_ANA_MASK 0xF0
+#define BCM590XX_PMUREV_ANA_SHIFT 4
+
static const struct mfd_cell bcm590xx_devs[] = {
{
.name = "bcm590xx-vregs",
@@ -37,6 +46,47 @@ static const struct regmap_config bcm590xx_regmap_config_sec = {
.cache_type = REGCACHE_MAPLE,
};
+/* Map PMU ID value to model name string */
+static const char * const bcm590xx_names[] = {
+ [BCM590XX_PMUID_BCM59054] = "BCM59054",
+ [BCM590XX_PMUID_BCM59056] = "BCM59056",
+};
+
+static int bcm590xx_parse_version(struct bcm590xx *bcm590xx)
+{
+ unsigned int id, rev;
+ int ret;
+
+ /* Get PMU ID and verify that it matches compatible */
+ ret = regmap_read(bcm590xx->regmap_pri, BCM590XX_REG_PMUID, &id);
+ if (ret) {
+ dev_err(bcm590xx->dev, "failed to read PMU ID: %d\n", ret);
+ return ret;
+ }
+
+ if (id != bcm590xx->pmu_id) {
+ dev_err(bcm590xx->dev, "Incorrect ID for %s: expected %x, got %x.\n",
+ bcm590xx_names[bcm590xx->pmu_id], bcm590xx->pmu_id, id);
+ return -ENODEV;
+ }
+
+ /* Get PMU revision and store it in the info struct */
+ ret = regmap_read(bcm590xx->regmap_pri, BCM590XX_REG_PMUREV, &rev);
+ if (ret) {
+ dev_err(bcm590xx->dev, "failed to read PMU revision: %d\n", ret);
+ return ret;
+ }
+
+ bcm590xx->rev_digital = (rev & BCM590XX_PMUREV_DIG_MASK) >> BCM590XX_PMUREV_DIG_SHIFT;
+
+ bcm590xx->rev_analog = (rev & BCM590XX_PMUREV_ANA_MASK) >> BCM590XX_PMUREV_ANA_SHIFT;
+
+ dev_dbg(bcm590xx->dev, "PMU ID 0x%x (%s), revision: digital %d, analog %d",
+ id, bcm590xx_names[id], bcm590xx->rev_digital, bcm590xx->rev_analog);
+
+ return 0;
+}
+
static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri)
{
struct bcm590xx *bcm590xx;
@@ -50,6 +100,8 @@ static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri)
bcm590xx->dev = &i2c_pri->dev;
bcm590xx->i2c_pri = i2c_pri;
+ bcm590xx->pmu_id = (uintptr_t) of_device_get_match_data(bcm590xx->dev);
+
bcm590xx->regmap_pri = devm_regmap_init_i2c(i2c_pri,
&bcm590xx_regmap_config_pri);
if (IS_ERR(bcm590xx->regmap_pri)) {
@@ -76,6 +128,10 @@ static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri)
goto err;
}
+ ret = bcm590xx_parse_version(bcm590xx);
+ if (ret)
+ goto err;
+
ret = devm_mfd_add_devices(&i2c_pri->dev, -1, bcm590xx_devs,
ARRAY_SIZE(bcm590xx_devs), NULL, 0, NULL);
if (ret < 0) {
@@ -91,12 +147,20 @@ err:
}
static const struct of_device_id bcm590xx_of_match[] = {
- { .compatible = "brcm,bcm59056" },
+ {
+ .compatible = "brcm,bcm59054",
+ .data = (void *)BCM590XX_PMUID_BCM59054,
+ },
+ {
+ .compatible = "brcm,bcm59056",
+ .data = (void *)BCM590XX_PMUID_BCM59056,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, bcm590xx_of_match);
static const struct i2c_device_id bcm590xx_i2c_id[] = {
+ { "bcm59054" },
{ "bcm59056" },
{ }
};
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 5b3e355e78f6..21e68a382b11 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2607,9 +2607,9 @@ static int db8500_irq_init(struct device_node *np)
{
int i;
- db8500_irq_domain = irq_domain_add_simple(
- np, NUM_PRCMU_WAKEUPS, 0,
- &db8500_irq_ops, NULL);
+ db8500_irq_domain = irq_domain_create_simple(of_fwnode_handle(np),
+ NUM_PRCMU_WAKEUPS, 0,
+ &db8500_irq_ops, NULL);
if (!db8500_irq_domain) {
pr_err("Failed to create irqdomain\n");
diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c
index 6a585173230b..44797001a432 100644
--- a/drivers/mfd/exynos-lpass.c
+++ b/drivers/mfd/exynos-lpass.c
@@ -104,11 +104,22 @@ static const struct regmap_config exynos_lpass_reg_conf = {
.fast_io = true,
};
+static void exynos_lpass_disable_lpass(void *data)
+{
+ struct platform_device *pdev = data;
+ struct exynos_lpass *lpass = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ exynos_lpass_disable(lpass);
+}
+
static int exynos_lpass_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_lpass *lpass;
void __iomem *base_top;
+ int ret;
lpass = devm_kzalloc(dev, sizeof(*lpass), GFP_KERNEL);
if (!lpass)
@@ -122,8 +133,8 @@ static int exynos_lpass_probe(struct platform_device *pdev)
if (IS_ERR(lpass->sfr0_clk))
return PTR_ERR(lpass->sfr0_clk);
- lpass->top = regmap_init_mmio(dev, base_top,
- &exynos_lpass_reg_conf);
+ lpass->top = devm_regmap_init_mmio(dev, base_top,
+ &exynos_lpass_reg_conf);
if (IS_ERR(lpass->top)) {
dev_err(dev, "LPASS top regmap initialization failed\n");
return PTR_ERR(lpass->top);
@@ -134,18 +145,11 @@ static int exynos_lpass_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
exynos_lpass_enable(lpass);
- return devm_of_platform_populate(dev);
-}
-
-static void exynos_lpass_remove(struct platform_device *pdev)
-{
- struct exynos_lpass *lpass = platform_get_drvdata(pdev);
+ ret = devm_add_action_or_reset(dev, exynos_lpass_disable_lpass, pdev);
+ if (ret)
+ return ret;
- exynos_lpass_disable(lpass);
- pm_runtime_disable(&pdev->dev);
- if (!pm_runtime_status_suspended(&pdev->dev))
- exynos_lpass_disable(lpass);
- regmap_exit(lpass->top);
+ return devm_of_platform_populate(dev);
}
static int __maybe_unused exynos_lpass_suspend(struct device *dev)
@@ -185,7 +189,6 @@ static struct platform_driver exynos_lpass_driver = {
.of_match_table = exynos_lpass_of_match,
},
.probe = exynos_lpass_probe,
- .remove = exynos_lpass_remove,
};
module_platform_driver(exynos_lpass_driver);
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index 6fe388da6fb6..d47152467951 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -65,15 +65,14 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev,
struct mx25_tsadc *tsadc)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
int irq;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- tsadc->domain = irq_domain_add_simple(np, 2, 0, &mx25_tsadc_domain_ops,
- tsadc);
+ tsadc->domain = irq_domain_create_simple(of_fwnode_handle(dev->of_node), 2, 0,
+ &mx25_tsadc_domain_ops, tsadc);
if (!tsadc->domain) {
dev_err(dev, "Failed to add irq domain\n");
return -ENOMEM;
diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c
index 39006297f3d2..ea0fdf7a4b6e 100644
--- a/drivers/mfd/lp8788-irq.c
+++ b/drivers/mfd/lp8788-irq.c
@@ -161,7 +161,7 @@ int lp8788_irq_init(struct lp8788 *lp, int irq)
return -ENOMEM;
irqd->lp = lp;
- irqd->domain = irq_domain_add_linear(lp->dev->of_node, LP8788_INT_MAX,
+ irqd->domain = irq_domain_create_linear(of_fwnode_handle(lp->dev->of_node), LP8788_INT_MAX,
&lp8788_domain_ops, irqd);
if (!irqd->domain) {
dev_err(lp->dev, "failed to add irq domain err\n");
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 6fce79ec2dc6..7e7e8af9af22 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -456,6 +456,7 @@ static void max14577_i2c_remove(struct i2c_client *i2c)
{
struct max14577 *max14577 = i2c_get_clientdata(i2c);
+ device_init_wakeup(max14577->dev, false);
mfd_remove_devices(max14577->dev);
regmap_del_irq_chip(max14577->irq, max14577->irq_data);
if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836)
diff --git a/drivers/mfd/max77541.c b/drivers/mfd/max77541.c
index d77c31c86e43..f91b4f5373ce 100644
--- a/drivers/mfd/max77541.c
+++ b/drivers/mfd/max77541.c
@@ -152,7 +152,7 @@ static int max77541_pmic_setup(struct device *dev)
if (ret)
return dev_err_probe(dev, ret, "Failed to initialize IRQ\n");
- ret = device_init_wakeup(dev, true);
+ ret = devm_device_init_wakeup(dev);
if (ret)
return dev_err_probe(dev, ret, "Unable to init wakeup\n");
diff --git a/drivers/mfd/max77705.c b/drivers/mfd/max77705.c
index 60c457c21d95..6b263bacb8c2 100644
--- a/drivers/mfd/max77705.c
+++ b/drivers/mfd/max77705.c
@@ -131,7 +131,9 @@ static int max77705_i2c_probe(struct i2c_client *i2c)
if (ret)
return dev_err_probe(dev, ret, "Failed to register child devices\n");
- device_init_wakeup(dev, true);
+ ret = devm_device_init_wakeup(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wakeup\n");
return 0;
}
diff --git a/drivers/mfd/max77759.c b/drivers/mfd/max77759.c
new file mode 100644
index 000000000000..6cf6306c4a3b
--- /dev/null
+++ b/drivers/mfd/max77759.c
@@ -0,0 +1,690 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Google Inc
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Core driver for Maxim MAX77759 companion PMIC for USB Type-C
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/completion.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/jiffies.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max77759.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/overflow.h>
+#include <linux/regmap.h>
+
+/* Chip ID as per MAX77759_PMIC_REG_PMIC_ID */
+enum {
+ MAX77759_CHIP_ID = 59,
+};
+
+enum max77759_i2c_subdev_id {
+ /*
+ * These are arbitrary and simply used to match struct
+ * max77759_i2c_subdev entries to the regmap pointers in struct
+ * max77759 during probe().
+ */
+ MAX77759_I2C_SUBDEV_ID_MAXQ,
+ MAX77759_I2C_SUBDEV_ID_CHARGER,
+};
+
+struct max77759_i2c_subdev {
+ enum max77759_i2c_subdev_id id;
+ const struct regmap_config *cfg;
+ u16 i2c_address;
+};
+
+static const struct regmap_range max77759_top_registers[] = {
+ regmap_reg_range(0x00, 0x02), /* PMIC_ID / PMIC_REVISION / OTP_REVISION */
+ regmap_reg_range(0x22, 0x24), /* INTSRC / INTSRCMASK / TOPSYS_INT */
+ regmap_reg_range(0x26, 0x26), /* TOPSYS_INT_MASK */
+ regmap_reg_range(0x40, 0x40), /* I2C_CNFG */
+ regmap_reg_range(0x50, 0x51), /* SWRESET / CONTROL_FG */
+};
+
+static const struct regmap_range max77759_top_ro_registers[] = {
+ regmap_reg_range(0x00, 0x02),
+ regmap_reg_range(0x22, 0x22),
+};
+
+static const struct regmap_range max77759_top_volatile_registers[] = {
+ regmap_reg_range(0x22, 0x22),
+ regmap_reg_range(0x24, 0x24),
+};
+
+static const struct regmap_access_table max77759_top_wr_table = {
+ .yes_ranges = max77759_top_registers,
+ .n_yes_ranges = ARRAY_SIZE(max77759_top_registers),
+ .no_ranges = max77759_top_ro_registers,
+ .n_no_ranges = ARRAY_SIZE(max77759_top_ro_registers),
+};
+
+static const struct regmap_access_table max77759_top_rd_table = {
+ .yes_ranges = max77759_top_registers,
+ .n_yes_ranges = ARRAY_SIZE(max77759_top_registers),
+};
+
+static const struct regmap_access_table max77759_top_volatile_table = {
+ .yes_ranges = max77759_top_volatile_registers,
+ .n_yes_ranges = ARRAY_SIZE(max77759_top_volatile_registers),
+};
+
+static const struct regmap_config max77759_regmap_config_top = {
+ .name = "top",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77759_PMIC_REG_CONTROL_FG,
+ .wr_table = &max77759_top_wr_table,
+ .rd_table = &max77759_top_rd_table,
+ .volatile_table = &max77759_top_volatile_table,
+ .num_reg_defaults_raw = MAX77759_PMIC_REG_CONTROL_FG + 1,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_range max77759_maxq_registers[] = {
+ regmap_reg_range(0x60, 0x73), /* Device ID, Rev, INTx, STATUSx, MASKx */
+ regmap_reg_range(0x81, 0xa1), /* AP_DATAOUTx */
+ regmap_reg_range(0xb1, 0xd1), /* AP_DATAINx */
+ regmap_reg_range(0xe0, 0xe0), /* UIC_SWRST */
+};
+
+static const struct regmap_range max77759_maxq_ro_registers[] = {
+ regmap_reg_range(0x60, 0x63), /* Device ID, Rev */
+ regmap_reg_range(0x68, 0x6f), /* STATUSx */
+ regmap_reg_range(0xb1, 0xd1),
+};
+
+static const struct regmap_range max77759_maxq_volatile_registers[] = {
+ regmap_reg_range(0x64, 0x6f), /* INTx, STATUSx */
+ regmap_reg_range(0xb1, 0xd1),
+ regmap_reg_range(0xe0, 0xe0),
+};
+
+static const struct regmap_access_table max77759_maxq_wr_table = {
+ .yes_ranges = max77759_maxq_registers,
+ .n_yes_ranges = ARRAY_SIZE(max77759_maxq_registers),
+ .no_ranges = max77759_maxq_ro_registers,
+ .n_no_ranges = ARRAY_SIZE(max77759_maxq_ro_registers),
+};
+
+static const struct regmap_access_table max77759_maxq_rd_table = {
+ .yes_ranges = max77759_maxq_registers,
+ .n_yes_ranges = ARRAY_SIZE(max77759_maxq_registers),
+};
+
+static const struct regmap_access_table max77759_maxq_volatile_table = {
+ .yes_ranges = max77759_maxq_volatile_registers,
+ .n_yes_ranges = ARRAY_SIZE(max77759_maxq_volatile_registers),
+};
+
+static const struct regmap_config max77759_regmap_config_maxq = {
+ .name = "maxq",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77759_MAXQ_REG_UIC_SWRST,
+ .wr_table = &max77759_maxq_wr_table,
+ .rd_table = &max77759_maxq_rd_table,
+ .volatile_table = &max77759_maxq_volatile_table,
+ .num_reg_defaults_raw = MAX77759_MAXQ_REG_UIC_SWRST + 1,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_range max77759_charger_registers[] = {
+ regmap_reg_range(0xb0, 0xcc),
+};
+
+static const struct regmap_range max77759_charger_ro_registers[] = {
+ regmap_reg_range(0xb4, 0xb8), /* INT_OK, DETAILS_0x */
+};
+
+static const struct regmap_range max77759_charger_volatile_registers[] = {
+ regmap_reg_range(0xb0, 0xb1), /* INTx */
+ regmap_reg_range(0xb4, 0xb8),
+};
+
+static const struct regmap_access_table max77759_charger_wr_table = {
+ .yes_ranges = max77759_charger_registers,
+ .n_yes_ranges = ARRAY_SIZE(max77759_charger_registers),
+ .no_ranges = max77759_charger_ro_registers,
+ .n_no_ranges = ARRAY_SIZE(max77759_charger_ro_registers),
+};
+
+static const struct regmap_access_table max77759_charger_rd_table = {
+ .yes_ranges = max77759_charger_registers,
+ .n_yes_ranges = ARRAY_SIZE(max77759_charger_registers),
+};
+
+static const struct regmap_access_table max77759_charger_volatile_table = {
+ .yes_ranges = max77759_charger_volatile_registers,
+ .n_yes_ranges = ARRAY_SIZE(max77759_charger_volatile_registers),
+};
+
+static const struct regmap_config max77759_regmap_config_charger = {
+ .name = "charger",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77759_CHGR_REG_CHG_CNFG_19,
+ .wr_table = &max77759_charger_wr_table,
+ .rd_table = &max77759_charger_rd_table,
+ .volatile_table = &max77759_charger_volatile_table,
+ .num_reg_defaults_raw = MAX77759_CHGR_REG_CHG_CNFG_19 + 1,
+ .cache_type = REGCACHE_FLAT,
+};
+
+/*
+ * Interrupts - with the following interrupt hierarchy:
+ * pmic IRQs (INTSRC)
+ * - MAXQ_INT: MaxQ IRQs
+ * - UIC_INT1
+ * - APCmdResI
+ * - SysMsgI
+ * - GPIOxI
+ * - TOPSYS_INT: topsys
+ * - TOPSYS_INT
+ * - TSHDN_INT
+ * - SYSOVLO_INT
+ * - SYSUVLO_INT
+ * - FSHIP_NOT_RD
+ * - CHGR_INT: charger
+ * - CHG_INT
+ * - CHG_INT2
+ */
+enum {
+ MAX77759_INT_MAXQ,
+ MAX77759_INT_TOPSYS,
+ MAX77759_INT_CHGR,
+};
+
+enum {
+ MAX77759_TOPSYS_INT_TSHDN,
+ MAX77759_TOPSYS_INT_SYSOVLO,
+ MAX77759_TOPSYS_INT_SYSUVLO,
+ MAX77759_TOPSYS_INT_FSHIP_NOT_RD,
+};
+
+enum {
+ MAX77759_MAXQ_INT_APCMDRESI,
+ MAX77759_MAXQ_INT_SYSMSGI,
+ MAX77759_MAXQ_INT_GPIO,
+ MAX77759_MAXQ_INT_UIC1,
+ MAX77759_MAXQ_INT_UIC2,
+ MAX77759_MAXQ_INT_UIC3,
+ MAX77759_MAXQ_INT_UIC4,
+};
+
+enum {
+ MAX77759_CHARGER_INT_1,
+ MAX77759_CHARGER_INT_2,
+};
+
+static const struct regmap_irq max77759_pmic_irqs[] = {
+ REGMAP_IRQ_REG(MAX77759_INT_MAXQ, 0, MAX77759_PMIC_REG_INTSRC_MAXQ),
+ REGMAP_IRQ_REG(MAX77759_INT_TOPSYS, 0, MAX77759_PMIC_REG_INTSRC_TOPSYS),
+ REGMAP_IRQ_REG(MAX77759_INT_CHGR, 0, MAX77759_PMIC_REG_INTSRC_CHGR),
+};
+
+static const struct regmap_irq max77759_maxq_irqs[] = {
+ REGMAP_IRQ_REG(MAX77759_MAXQ_INT_APCMDRESI, 0, MAX77759_MAXQ_REG_UIC_INT1_APCMDRESI),
+ REGMAP_IRQ_REG(MAX77759_MAXQ_INT_SYSMSGI, 0, MAX77759_MAXQ_REG_UIC_INT1_SYSMSGI),
+ REGMAP_IRQ_REG(MAX77759_MAXQ_INT_GPIO, 0, GENMASK(1, 0)),
+ REGMAP_IRQ_REG(MAX77759_MAXQ_INT_UIC1, 0, GENMASK(5, 2)),
+ REGMAP_IRQ_REG(MAX77759_MAXQ_INT_UIC2, 1, GENMASK(7, 0)),
+ REGMAP_IRQ_REG(MAX77759_MAXQ_INT_UIC3, 2, GENMASK(7, 0)),
+ REGMAP_IRQ_REG(MAX77759_MAXQ_INT_UIC4, 3, GENMASK(7, 0)),
+};
+
+static const struct regmap_irq max77759_topsys_irqs[] = {
+ REGMAP_IRQ_REG(MAX77759_TOPSYS_INT_TSHDN, 0, MAX77759_PMIC_REG_TOPSYS_INT_TSHDN),
+ REGMAP_IRQ_REG(MAX77759_TOPSYS_INT_SYSOVLO, 0, MAX77759_PMIC_REG_TOPSYS_INT_SYSOVLO),
+ REGMAP_IRQ_REG(MAX77759_TOPSYS_INT_SYSUVLO, 0, MAX77759_PMIC_REG_TOPSYS_INT_SYSUVLO),
+ REGMAP_IRQ_REG(MAX77759_TOPSYS_INT_FSHIP_NOT_RD, 0, MAX77759_PMIC_REG_TOPSYS_INT_FSHIP),
+};
+
+static const struct regmap_irq max77759_chgr_irqs[] = {
+ REGMAP_IRQ_REG(MAX77759_CHARGER_INT_1, 0, GENMASK(7, 0)),
+ REGMAP_IRQ_REG(MAX77759_CHARGER_INT_2, 1, GENMASK(7, 0)),
+};
+
+static const struct regmap_irq_chip max77759_pmic_irq_chip = {
+ .name = "max77759-pmic",
+ /* INTSRC is read-only and doesn't require clearing */
+ .status_base = MAX77759_PMIC_REG_INTSRC,
+ .mask_base = MAX77759_PMIC_REG_INTSRCMASK,
+ .num_regs = 1,
+ .irqs = max77759_pmic_irqs,
+ .num_irqs = ARRAY_SIZE(max77759_pmic_irqs),
+};
+
+/*
+ * We can let regmap-irq auto-ack the topsys interrupt bits as required, but
+ * for all others the individual drivers need to know which interrupt bit
+ * exactly is set inside their interrupt handlers, and therefore we can not set
+ * .ack_base for those.
+ */
+static const struct regmap_irq_chip max77759_maxq_irq_chip = {
+ .name = "max77759-maxq",
+ .domain_suffix = "MAXQ",
+ .status_base = MAX77759_MAXQ_REG_UIC_INT1,
+ .mask_base = MAX77759_MAXQ_REG_UIC_INT1_M,
+ .num_regs = 4,
+ .irqs = max77759_maxq_irqs,
+ .num_irqs = ARRAY_SIZE(max77759_maxq_irqs),
+};
+
+static const struct regmap_irq_chip max77759_topsys_irq_chip = {
+ .name = "max77759-topsys",
+ .domain_suffix = "TOPSYS",
+ .status_base = MAX77759_PMIC_REG_TOPSYS_INT,
+ .mask_base = MAX77759_PMIC_REG_TOPSYS_INT_MASK,
+ .ack_base = MAX77759_PMIC_REG_TOPSYS_INT,
+ .num_regs = 1,
+ .irqs = max77759_topsys_irqs,
+ .num_irqs = ARRAY_SIZE(max77759_topsys_irqs),
+};
+
+static const struct regmap_irq_chip max77759_chrg_irq_chip = {
+ .name = "max77759-chgr",
+ .domain_suffix = "CHGR",
+ .status_base = MAX77759_CHGR_REG_CHG_INT,
+ .mask_base = MAX77759_CHGR_REG_CHG_INT_MASK,
+ .num_regs = 2,
+ .irqs = max77759_chgr_irqs,
+ .num_irqs = ARRAY_SIZE(max77759_chgr_irqs),
+};
+
+static const struct max77759_i2c_subdev max77759_i2c_subdevs[] = {
+ {
+ .id = MAX77759_I2C_SUBDEV_ID_MAXQ,
+ .cfg = &max77759_regmap_config_maxq,
+ /* I2C address is same as for sub-block 'top' */
+ },
+ {
+ .id = MAX77759_I2C_SUBDEV_ID_CHARGER,
+ .cfg = &max77759_regmap_config_charger,
+ .i2c_address = 0x69,
+ },
+};
+
+static const struct resource max77759_gpio_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MAX77759_MAXQ_INT_GPIO, "GPI"),
+};
+
+static const struct resource max77759_charger_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MAX77759_CHARGER_INT_1, "INT1"),
+ DEFINE_RES_IRQ_NAMED(MAX77759_CHARGER_INT_2, "INT2"),
+};
+
+static const struct mfd_cell max77759_cells[] = {
+ MFD_CELL_OF("max77759-nvmem", NULL, NULL, 0, 0,
+ "maxim,max77759-nvmem"),
+};
+
+static const struct mfd_cell max77759_maxq_cells[] = {
+ MFD_CELL_OF("max77759-gpio", max77759_gpio_resources, NULL, 0, 0,
+ "maxim,max77759-gpio"),
+};
+
+static const struct mfd_cell max77759_charger_cells[] = {
+ MFD_CELL_RES("max77759-charger", max77759_charger_resources),
+};
+
+int max77759_maxq_command(struct max77759 *max77759,
+ const struct max77759_maxq_command *cmd,
+ struct max77759_maxq_response *rsp)
+{
+ DEFINE_FLEX(struct max77759_maxq_response, _rsp, rsp, length, 1);
+ struct device *dev = regmap_get_device(max77759->regmap_maxq);
+ static const unsigned int timeout_ms = 200;
+ int ret;
+
+ if (cmd->length > MAX77759_MAXQ_OPCODE_MAXLENGTH)
+ return -EINVAL;
+
+ /*
+ * As a convenience for API users when issuing simple commands, rsp is
+ * allowed to be NULL. In that case we need a temporary here to write
+ * the response to, as we need to verify that the command was indeed
+ * completed correctly.
+ */
+ if (!rsp)
+ rsp = _rsp;
+
+ if (!rsp->length || rsp->length > MAX77759_MAXQ_OPCODE_MAXLENGTH)
+ return -EINVAL;
+
+ guard(mutex)(&max77759->maxq_lock);
+
+ reinit_completion(&max77759->cmd_done);
+
+ /*
+ * MaxQ latches the message when the DATAOUT32 register is written. If
+ * cmd->length is shorter we still need to write 0 to it.
+ */
+ ret = regmap_bulk_write(max77759->regmap_maxq,
+ MAX77759_MAXQ_REG_AP_DATAOUT0, cmd->cmd,
+ cmd->length);
+ if (!ret && cmd->length < MAX77759_MAXQ_OPCODE_MAXLENGTH)
+ ret = regmap_write(max77759->regmap_maxq,
+ MAX77759_MAXQ_REG_AP_DATAOUT32, 0);
+ if (ret) {
+ dev_err(dev, "writing command failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Wait for response from MaxQ */
+ if (!wait_for_completion_timeout(&max77759->cmd_done,
+ msecs_to_jiffies(timeout_ms))) {
+ dev_err(dev, "timed out waiting for response\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = regmap_bulk_read(max77759->regmap_maxq,
+ MAX77759_MAXQ_REG_AP_DATAIN0,
+ rsp->rsp, rsp->length);
+ if (ret) {
+ dev_err(dev, "reading response failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * As per the protocol, the first byte of the reply will match the
+ * request.
+ */
+ if (cmd->cmd[0] != rsp->rsp[0]) {
+ dev_err(dev, "unexpected opcode response for %#.2x: %*ph\n",
+ cmd->cmd[0], (int)rsp->length, rsp->rsp);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(max77759_maxq_command);
+
+static irqreturn_t apcmdres_irq_handler(int irq, void *irq_data)
+{
+ struct max77759 *max77759 = irq_data;
+
+ regmap_write(max77759->regmap_maxq, MAX77759_MAXQ_REG_UIC_INT1,
+ MAX77759_MAXQ_REG_UIC_INT1_APCMDRESI);
+
+ complete(&max77759->cmd_done);
+
+ return IRQ_HANDLED;
+}
+
+static int max77759_create_i2c_subdev(struct i2c_client *client,
+ struct max77759 *max77759,
+ const struct max77759_i2c_subdev *sd)
+{
+ struct i2c_client *sub;
+ struct regmap *regmap;
+ int ret;
+
+ /*
+ * If 'sd' has an I2C address, 'sub' will be assigned a new 'dummy'
+ * device, otherwise use it as-is.
+ */
+ sub = client;
+ if (sd->i2c_address) {
+ sub = devm_i2c_new_dummy_device(&client->dev,
+ client->adapter,
+ sd->i2c_address);
+
+ if (IS_ERR(sub))
+ return dev_err_probe(&client->dev, PTR_ERR(sub),
+ "failed to claim I2C device %s\n",
+ sd->cfg->name);
+ }
+
+ regmap = devm_regmap_init_i2c(sub, sd->cfg);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&sub->dev, PTR_ERR(regmap),
+ "regmap init for '%s' failed\n",
+ sd->cfg->name);
+
+ ret = regmap_attach_dev(&client->dev, regmap, sd->cfg);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "regmap attach of '%s' failed\n",
+ sd->cfg->name);
+
+ if (sd->id == MAX77759_I2C_SUBDEV_ID_MAXQ)
+ max77759->regmap_maxq = regmap;
+ else if (sd->id == MAX77759_I2C_SUBDEV_ID_CHARGER)
+ max77759->regmap_charger = regmap;
+
+ return 0;
+}
+
+static int max77759_add_chained_irq_chip(struct device *dev,
+ struct regmap *regmap,
+ int pirq,
+ struct regmap_irq_chip_data *parent,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
+{
+ int irq, ret;
+
+ irq = regmap_irq_get_virq(parent, pirq);
+ if (irq < 0)
+ return dev_err_probe(dev, irq,
+ "failed to get parent vIRQ(%d) for chip %s\n",
+ pirq, chip->name);
+
+ ret = devm_regmap_add_irq_chip(dev, regmap, irq,
+ IRQF_ONESHOT | IRQF_SHARED, 0, chip,
+ data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add %s IRQ chip\n",
+ chip->name);
+
+ return 0;
+}
+
+static int max77759_add_chained_maxq(struct i2c_client *client,
+ struct max77759 *max77759,
+ struct regmap_irq_chip_data *parent)
+{
+ struct regmap_irq_chip_data *irq_chip_data;
+ int apcmdres_irq;
+ int ret;
+
+ ret = max77759_add_chained_irq_chip(&client->dev,
+ max77759->regmap_maxq,
+ MAX77759_INT_MAXQ,
+ parent,
+ &max77759_maxq_irq_chip,
+ &irq_chip_data);
+ if (ret)
+ return ret;
+
+ init_completion(&max77759->cmd_done);
+ apcmdres_irq = regmap_irq_get_virq(irq_chip_data,
+ MAX77759_MAXQ_INT_APCMDRESI);
+
+ ret = devm_request_threaded_irq(&client->dev, apcmdres_irq,
+ NULL, apcmdres_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ dev_name(&client->dev), max77759);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "MAX77759_MAXQ_INT_APCMDRESI failed\n");
+
+ ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_AUTO,
+ max77759_maxq_cells,
+ ARRAY_SIZE(max77759_maxq_cells),
+ NULL, 0,
+ regmap_irq_get_domain(irq_chip_data));
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to add child devices (MaxQ)\n");
+
+ return 0;
+}
+
+static int max77759_add_chained_topsys(struct i2c_client *client,
+ struct max77759 *max77759,
+ struct regmap_irq_chip_data *parent)
+{
+ struct regmap_irq_chip_data *irq_chip_data;
+ int ret;
+
+ ret = max77759_add_chained_irq_chip(&client->dev,
+ max77759->regmap_top,
+ MAX77759_INT_TOPSYS,
+ parent,
+ &max77759_topsys_irq_chip,
+ &irq_chip_data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int max77759_add_chained_charger(struct i2c_client *client,
+ struct max77759 *max77759,
+ struct regmap_irq_chip_data *parent)
+{
+ struct regmap_irq_chip_data *irq_chip_data;
+ int ret;
+
+ ret = max77759_add_chained_irq_chip(&client->dev,
+ max77759->regmap_charger,
+ MAX77759_INT_CHGR,
+ parent,
+ &max77759_chrg_irq_chip,
+ &irq_chip_data);
+ if (ret)
+ return ret;
+
+ ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_AUTO,
+ max77759_charger_cells,
+ ARRAY_SIZE(max77759_charger_cells),
+ NULL, 0,
+ regmap_irq_get_domain(irq_chip_data));
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to add child devices (charger)\n");
+
+ return 0;
+}
+
+static int max77759_probe(struct i2c_client *client)
+{
+ struct regmap_irq_chip_data *irq_chip_data_pmic;
+ struct irq_data *irq_data;
+ struct max77759 *max77759;
+ unsigned long irq_flags;
+ unsigned int pmic_id;
+ int ret;
+
+ max77759 = devm_kzalloc(&client->dev, sizeof(*max77759), GFP_KERNEL);
+ if (!max77759)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, max77759);
+
+ max77759->regmap_top = devm_regmap_init_i2c(client,
+ &max77759_regmap_config_top);
+ if (IS_ERR(max77759->regmap_top))
+ return dev_err_probe(&client->dev, PTR_ERR(max77759->regmap_top),
+ "regmap init for '%s' failed\n",
+ max77759_regmap_config_top.name);
+
+ ret = regmap_read(max77759->regmap_top,
+ MAX77759_PMIC_REG_PMIC_ID, &pmic_id);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "unable to read device ID\n");
+
+ if (pmic_id != MAX77759_CHIP_ID)
+ return dev_err_probe(&client->dev, -ENODEV,
+ "unsupported device ID %#.2x (%d)\n",
+ pmic_id, pmic_id);
+
+ ret = devm_mutex_init(&client->dev, &max77759->maxq_lock);
+ if (ret)
+ return ret;
+
+ for (int i = 0; i < ARRAY_SIZE(max77759_i2c_subdevs); i++) {
+ ret = max77759_create_i2c_subdev(client, max77759,
+ &max77759_i2c_subdevs[i]);
+ if (ret)
+ return ret;
+ }
+
+ irq_data = irq_get_irq_data(client->irq);
+ if (!irq_data)
+ return dev_err_probe(&client->dev, -EINVAL,
+ "invalid IRQ: %d\n", client->irq);
+
+ irq_flags = IRQF_ONESHOT | IRQF_SHARED;
+ irq_flags |= irqd_get_trigger_type(irq_data);
+
+ ret = devm_regmap_add_irq_chip(&client->dev, max77759->regmap_top,
+ client->irq, irq_flags, 0,
+ &max77759_pmic_irq_chip,
+ &irq_chip_data_pmic);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to add IRQ chip '%s'\n",
+ max77759_pmic_irq_chip.name);
+
+ ret = max77759_add_chained_maxq(client, max77759, irq_chip_data_pmic);
+ if (ret)
+ return ret;
+
+ ret = max77759_add_chained_topsys(client, max77759, irq_chip_data_pmic);
+ if (ret)
+ return ret;
+
+ ret = max77759_add_chained_charger(client, max77759, irq_chip_data_pmic);
+ if (ret)
+ return ret;
+
+ return devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_AUTO,
+ max77759_cells, ARRAY_SIZE(max77759_cells),
+ NULL, 0,
+ regmap_irq_get_domain(irq_chip_data_pmic));
+}
+
+static const struct i2c_device_id max77759_i2c_id[] = {
+ { "max77759" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max77759_i2c_id);
+
+static const struct of_device_id max77759_of_id[] = {
+ { .compatible = "maxim,max77759", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77759_of_id);
+
+static struct i2c_driver max77759_i2c_driver = {
+ .driver = {
+ .name = "max77759",
+ .of_match_table = max77759_of_id,
+ },
+ .probe = max77759_probe,
+ .id_table = max77759_i2c_id,
+};
+module_i2c_driver(max77759_i2c_driver);
+
+MODULE_AUTHOR("André Draszik <andre.draszik@linaro.org>");
+MODULE_DESCRIPTION("Maxim MAX77759 core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 105d79b91493..78b16c67a5fc 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -682,8 +682,8 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
return -EBUSY;
}
- irq_domain_add_legacy(node, MAX8925_NR_IRQS, chip->irq_base, 0,
- &max8925_irq_domain_ops, chip);
+ irq_domain_create_legacy(of_fwnode_handle(node), MAX8925_NR_IRQS, chip->irq_base, 0,
+ &max8925_irq_domain_ops, chip);
/* request irq handler for pmic main irq*/
chip->core_irq = irq;
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index 556aea7ec0a0..ab19ff0c7867 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -201,6 +201,7 @@ static void max8925_remove(struct i2c_client *client)
struct max8925_chip *chip = i2c_get_clientdata(client);
max8925_device_exit(chip);
+ device_init_wakeup(&client->dev, false);
i2c_unregister_device(chip->adc);
i2c_unregister_device(chip->rtc);
}
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c
index 92e348df03d1..cc87571c9af5 100644
--- a/drivers/mfd/max8997-irq.c
+++ b/drivers/mfd/max8997-irq.c
@@ -327,8 +327,8 @@ int max8997_irq_init(struct max8997_dev *max8997)
true : false;
}
- domain = irq_domain_add_linear(NULL, MAX8997_IRQ_NR,
- &max8997_irq_domain_ops, max8997);
+ domain = irq_domain_create_linear(NULL, MAX8997_IRQ_NR,
+ &max8997_irq_domain_ops, max8997);
if (!domain) {
dev_err(max8997->dev, "could not create irq domain\n");
return -ENODEV;
diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c
index 83b6f510bc05..b0773fa6e07f 100644
--- a/drivers/mfd/max8998-irq.c
+++ b/drivers/mfd/max8998-irq.c
@@ -230,7 +230,7 @@ int max8998_irq_init(struct max8998_dev *max8998)
max8998_write_reg(max8998->i2c, MAX8998_REG_STATUSM1, 0xff);
max8998_write_reg(max8998->i2c, MAX8998_REG_STATUSM2, 0xff);
- domain = irq_domain_add_simple(NULL, MAX8998_IRQ_NR,
+ domain = irq_domain_create_simple(NULL, MAX8998_IRQ_NR,
max8998->irq_base, &max8998_irq_domain_ops, max8998);
if (!domain) {
dev_err(max8998->dev, "could not create irq domain\n");
diff --git a/drivers/mfd/mt6358-irq.c b/drivers/mfd/mt6358-irq.c
index 49830b526ee8..9f0bcc3ad7a1 100644
--- a/drivers/mfd/mt6358-irq.c
+++ b/drivers/mfd/mt6358-irq.c
@@ -272,9 +272,9 @@ int mt6358_irq_init(struct mt6397_chip *chip)
irqd->pmic_ints[i].en_reg_shift * j, 0);
}
- chip->irq_domain = irq_domain_add_linear(chip->dev->of_node,
- irqd->num_pmic_irqs,
- &mt6358_irq_domain_ops, chip);
+ chip->irq_domain = irq_domain_create_linear(of_fwnode_handle(chip->dev->of_node),
+ irqd->num_pmic_irqs,
+ &mt6358_irq_domain_ops, chip);
if (!chip->irq_domain) {
dev_err(chip->dev, "Could not create IRQ domain\n");
return -ENODEV;
diff --git a/drivers/mfd/mt6397-irq.c b/drivers/mfd/mt6397-irq.c
index 1310665200ed..badc614b4345 100644
--- a/drivers/mfd/mt6397-irq.c
+++ b/drivers/mfd/mt6397-irq.c
@@ -216,10 +216,8 @@ int mt6397_irq_init(struct mt6397_chip *chip)
regmap_write(chip->regmap, chip->int_con[2], 0x0);
chip->pm_nb.notifier_call = mt6397_irq_pm_notifier;
- chip->irq_domain = irq_domain_add_linear(chip->dev->of_node,
- MT6397_IRQ_NR,
- &mt6397_irq_domain_ops,
- chip);
+ chip->irq_domain = irq_domain_create_linear(of_fwnode_handle(chip->dev->of_node),
+ MT6397_IRQ_NR, &mt6397_irq_domain_ops, chip);
if (!chip->irq_domain) {
dev_err(chip->dev, "could not create irq domain\n");
return -ENOMEM;
diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
index f9ebdf5845b8..c96ea6fbede8 100644
--- a/drivers/mfd/qcom-pm8xxx.c
+++ b/drivers/mfd/qcom-pm8xxx.c
@@ -559,10 +559,8 @@ static int pm8xxx_probe(struct platform_device *pdev)
chip->pm_irq_data = data;
spin_lock_init(&chip->pm_irq_lock);
- chip->irqdomain = irq_domain_add_linear(pdev->dev.of_node,
- data->num_irqs,
- &pm8xxx_irq_domain_ops,
- chip);
+ chip->irqdomain = irq_domain_create_linear(of_fwnode_handle(pdev->dev.of_node),
+ data->num_irqs, &pm8xxx_irq_domain_ops, chip);
if (!chip->irqdomain)
return -ENODEV;
diff --git a/drivers/mfd/rohm-bd96801.c b/drivers/mfd/rohm-bd96801.c
index 60ec8db790a7..66fa017ad568 100644
--- a/drivers/mfd/rohm-bd96801.c
+++ b/drivers/mfd/rohm-bd96801.c
@@ -38,108 +38,172 @@
#include <linux/types.h>
#include <linux/mfd/rohm-bd96801.h>
+#include <linux/mfd/rohm-bd96802.h>
#include <linux/mfd/rohm-generic.h>
-static const struct resource regulator_errb_irqs[] = {
- DEFINE_RES_IRQ_NAMED(BD96801_OTP_ERR_STAT, "bd96801-otp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_DBIST_ERR_STAT, "bd96801-dbist-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_EEP_ERR_STAT, "bd96801-eep-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_ABIST_ERR_STAT, "bd96801-abist-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_PRSTB_ERR_STAT, "bd96801-prstb-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_DRMOS1_ERR_STAT, "bd96801-drmoserr1"),
- DEFINE_RES_IRQ_NAMED(BD96801_DRMOS2_ERR_STAT, "bd96801-drmoserr2"),
- DEFINE_RES_IRQ_NAMED(BD96801_SLAVE_ERR_STAT, "bd96801-slave-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_VREF_ERR_STAT, "bd96801-vref-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_TSD_ERR_STAT, "bd96801-tsd"),
- DEFINE_RES_IRQ_NAMED(BD96801_UVLO_ERR_STAT, "bd96801-uvlo-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_OVLO_ERR_STAT, "bd96801-ovlo-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_OSC_ERR_STAT, "bd96801-osc-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_PON_ERR_STAT, "bd96801-pon-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_POFF_ERR_STAT, "bd96801-poff-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_CMD_SHDN_ERR_STAT, "bd96801-cmd-shdn-err"),
+struct bd968xx {
+ const struct resource *errb_irqs;
+ const struct resource *intb_irqs;
+ int num_errb_irqs;
+ int num_intb_irqs;
+ const struct regmap_irq_chip *errb_irq_chip;
+ const struct regmap_irq_chip *intb_irq_chip;
+ const struct regmap_config *regmap_config;
+ struct mfd_cell *cells;
+ int num_cells;
+ int unlock_reg;
+ int unlock_val;
+};
+
+static const struct resource bd96801_reg_errb_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(BD96801_OTP_ERR_STAT, "otp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_DBIST_ERR_STAT, "dbist-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_EEP_ERR_STAT, "eep-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_ABIST_ERR_STAT, "abist-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_PRSTB_ERR_STAT, "prstb-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_DRMOS1_ERR_STAT, "drmoserr1"),
+ DEFINE_RES_IRQ_NAMED(BD96801_DRMOS2_ERR_STAT, "drmoserr2"),
+ DEFINE_RES_IRQ_NAMED(BD96801_SLAVE_ERR_STAT, "slave-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_VREF_ERR_STAT, "vref-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_TSD_ERR_STAT, "tsd"),
+ DEFINE_RES_IRQ_NAMED(BD96801_UVLO_ERR_STAT, "uvlo-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_OVLO_ERR_STAT, "ovlo-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_OSC_ERR_STAT, "osc-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_PON_ERR_STAT, "pon-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_POFF_ERR_STAT, "poff-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_CMD_SHDN_ERR_STAT, "cmd-shdn-err"),
DEFINE_RES_IRQ_NAMED(BD96801_INT_PRSTB_WDT_ERR, "bd96801-prstb-wdt-err"),
DEFINE_RES_IRQ_NAMED(BD96801_INT_CHIP_IF_ERR, "bd96801-chip-if-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_INT_SHDN_ERR_STAT, "bd96801-int-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_PVIN_ERR_STAT, "bd96801-buck1-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OVP_ERR_STAT, "bd96801-buck1-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_UVP_ERR_STAT, "bd96801-buck1-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_SHDN_ERR_STAT, "bd96801-buck1-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_PVIN_ERR_STAT, "bd96801-buck2-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OVP_ERR_STAT, "bd96801-buck2-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_UVP_ERR_STAT, "bd96801-buck2-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_SHDN_ERR_STAT, "bd96801-buck2-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_PVIN_ERR_STAT, "bd96801-buck3-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OVP_ERR_STAT, "bd96801-buck3-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_UVP_ERR_STAT, "bd96801-buck3-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_SHDN_ERR_STAT, "bd96801-buck3-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_PVIN_ERR_STAT, "bd96801-buck4-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OVP_ERR_STAT, "bd96801-buck4-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_UVP_ERR_STAT, "bd96801-buck4-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_SHDN_ERR_STAT, "bd96801-buck4-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_PVIN_ERR_STAT, "bd96801-ldo5-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OVP_ERR_STAT, "bd96801-ldo5-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_UVP_ERR_STAT, "bd96801-ldo5-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_SHDN_ERR_STAT, "bd96801-ldo5-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_PVIN_ERR_STAT, "bd96801-ldo6-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OVP_ERR_STAT, "bd96801-ldo6-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_UVP_ERR_STAT, "bd96801-ldo6-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_SHDN_ERR_STAT, "bd96801-ldo6-shdn-err"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_PVIN_ERR_STAT, "bd96801-ldo7-pvin-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OVP_ERR_STAT, "bd96801-ldo7-ovp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_UVP_ERR_STAT, "bd96801-ldo7-uvp-err"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_SHDN_ERR_STAT, "bd96801-ldo7-shdn-err"),
-};
-
-static const struct resource regulator_intb_irqs[] = {
- DEFINE_RES_IRQ_NAMED(BD96801_TW_STAT, "bd96801-core-thermal"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPH_STAT, "bd96801-buck1-overcurr-h"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPL_STAT, "bd96801-buck1-overcurr-l"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPN_STAT, "bd96801-buck1-overcurr-n"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OVD_STAT, "bd96801-buck1-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_UVD_STAT, "bd96801-buck1-undervolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_TW_CH_STAT, "bd96801-buck1-thermal"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPH_STAT, "bd96801-buck2-overcurr-h"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPL_STAT, "bd96801-buck2-overcurr-l"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPN_STAT, "bd96801-buck2-overcurr-n"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OVD_STAT, "bd96801-buck2-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_UVD_STAT, "bd96801-buck2-undervolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_TW_CH_STAT, "bd96801-buck2-thermal"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPH_STAT, "bd96801-buck3-overcurr-h"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPL_STAT, "bd96801-buck3-overcurr-l"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPN_STAT, "bd96801-buck3-overcurr-n"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OVD_STAT, "bd96801-buck3-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_UVD_STAT, "bd96801-buck3-undervolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_TW_CH_STAT, "bd96801-buck3-thermal"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPH_STAT, "bd96801-buck4-overcurr-h"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPL_STAT, "bd96801-buck4-overcurr-l"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPN_STAT, "bd96801-buck4-overcurr-n"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OVD_STAT, "bd96801-buck4-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_UVD_STAT, "bd96801-buck4-undervolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_TW_CH_STAT, "bd96801-buck4-thermal"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OCPH_STAT, "bd96801-ldo5-overcurr"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OVD_STAT, "bd96801-ldo5-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO5_UVD_STAT, "bd96801-ldo5-undervolt"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OCPH_STAT, "bd96801-ldo6-overcurr"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OVD_STAT, "bd96801-ldo6-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO6_UVD_STAT, "bd96801-ldo6-undervolt"),
-
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OCPH_STAT, "bd96801-ldo7-overcurr"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OVD_STAT, "bd96801-ldo7-overvolt"),
- DEFINE_RES_IRQ_NAMED(BD96801_LDO7_UVD_STAT, "bd96801-ldo7-undervolt"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_INT_SHDN_ERR_STAT, "int-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_PVIN_ERR_STAT, "buck1-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OVP_ERR_STAT, "buck1-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_UVP_ERR_STAT, "buck1-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_SHDN_ERR_STAT, "buck1-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_PVIN_ERR_STAT, "buck2-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OVP_ERR_STAT, "buck2-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_UVP_ERR_STAT, "buck2-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_SHDN_ERR_STAT, "buck2-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_PVIN_ERR_STAT, "buck3-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OVP_ERR_STAT, "buck3-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_UVP_ERR_STAT, "buck3-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_SHDN_ERR_STAT, "buck3-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_PVIN_ERR_STAT, "buck4-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OVP_ERR_STAT, "buck4-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_UVP_ERR_STAT, "buck4-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_SHDN_ERR_STAT, "buck4-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_PVIN_ERR_STAT, "ldo5-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OVP_ERR_STAT, "ldo5-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_UVP_ERR_STAT, "ldo5-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_SHDN_ERR_STAT, "ldo5-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_PVIN_ERR_STAT, "ldo6-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OVP_ERR_STAT, "ldo6-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_UVP_ERR_STAT, "ldo6-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_SHDN_ERR_STAT, "ldo6-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_PVIN_ERR_STAT, "ldo7-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OVP_ERR_STAT, "ldo7-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_UVP_ERR_STAT, "ldo7-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_SHDN_ERR_STAT, "ldo7-shdn-err"),
+};
+
+static const struct resource bd96802_reg_errb_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(BD96802_OTP_ERR_STAT, "otp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_DBIST_ERR_STAT, "dbist-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_EEP_ERR_STAT, "eep-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_ABIST_ERR_STAT, "abist-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_PRSTB_ERR_STAT, "prstb-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_DRMOS1_ERR_STAT, "drmoserr1"),
+ DEFINE_RES_IRQ_NAMED(BD96802_DRMOS1_ERR_STAT, "drmoserr2"),
+ DEFINE_RES_IRQ_NAMED(BD96802_SLAVE_ERR_STAT, "slave-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_VREF_ERR_STAT, "vref-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_TSD_ERR_STAT, "tsd"),
+ DEFINE_RES_IRQ_NAMED(BD96802_UVLO_ERR_STAT, "uvlo-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_OVLO_ERR_STAT, "ovlo-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_OSC_ERR_STAT, "osc-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_PON_ERR_STAT, "pon-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_POFF_ERR_STAT, "poff-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_CMD_SHDN_ERR_STAT, "cmd-shdn-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_INT_SHDN_ERR_STAT, "int-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_PVIN_ERR_STAT, "buck1-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_OVP_ERR_STAT, "buck1-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_UVP_ERR_STAT, "buck1-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_SHDN_ERR_STAT, "buck1-shdn-err"),
+
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_PVIN_ERR_STAT, "buck2-pvin-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_OVP_ERR_STAT, "buck2-ovp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_UVP_ERR_STAT, "buck2-uvp-err"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_SHDN_ERR_STAT, "buck2-shdn-err"),
+};
+
+static const struct resource bd96801_reg_intb_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(BD96801_TW_STAT, "core-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPH_STAT, "buck1-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPL_STAT, "buck1-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OCPN_STAT, "buck1-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_OVD_STAT, "buck1-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_UVD_STAT, "buck1-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK1_TW_CH_STAT, "buck1-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPH_STAT, "buck2-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPL_STAT, "buck2-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OCPN_STAT, "buck2-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_OVD_STAT, "buck2-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_UVD_STAT, "buck2-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK2_TW_CH_STAT, "buck2-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPH_STAT, "buck3-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPL_STAT, "buck3-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OCPN_STAT, "buck3-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_OVD_STAT, "buck3-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_UVD_STAT, "buck3-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK3_TW_CH_STAT, "buck3-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPH_STAT, "buck4-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPL_STAT, "buck4-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OCPN_STAT, "buck4-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_OVD_STAT, "buck4-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_UVD_STAT, "buck4-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_BUCK4_TW_CH_STAT, "buck4-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OCPH_STAT, "ldo5-overcurr"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_OVD_STAT, "ldo5-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO5_UVD_STAT, "ldo5-undervolt"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OCPH_STAT, "ldo6-overcurr"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_OVD_STAT, "ldo6-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO6_UVD_STAT, "ldo6-undervolt"),
+
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OCPH_STAT, "ldo7-overcurr"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_OVD_STAT, "ldo7-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96801_LDO7_UVD_STAT, "ldo7-undervolt"),
+};
+
+static const struct resource bd96802_reg_intb_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(BD96802_TW_STAT, "core-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_OCPH_STAT, "buck1-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_OCPL_STAT, "buck1-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_OCPN_STAT, "buck1-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_OVD_STAT, "buck1-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_UVD_STAT, "buck1-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK1_TW_CH_STAT, "buck1-thermal"),
+
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_OCPH_STAT, "buck2-overcurr-h"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_OCPL_STAT, "buck2-overcurr-l"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_OCPN_STAT, "buck2-overcurr-n"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_OVD_STAT, "buck2-overvolt"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_UVD_STAT, "buck2-undervolt"),
+ DEFINE_RES_IRQ_NAMED(BD96802_BUCK2_TW_CH_STAT, "buck2-thermal"),
};
enum {
@@ -152,6 +216,20 @@ static struct mfd_cell bd96801_cells[] = {
[REGULATOR_CELL] = { .name = "bd96801-regulator", },
};
+static struct mfd_cell bd96802_cells[] = {
+ [WDG_CELL] = { .name = "bd96801-wdt", },
+ [REGULATOR_CELL] = { .name = "bd96802-regulator", },
+};
+static struct mfd_cell bd96805_cells[] = {
+ [WDG_CELL] = { .name = "bd96801-wdt", },
+ [REGULATOR_CELL] = { .name = "bd96805-regulator", },
+};
+
+static struct mfd_cell bd96806_cells[] = {
+ [WDG_CELL] = { .name = "bd96806-wdt", },
+ [REGULATOR_CELL] = { .name = "bd96806-regulator", },
+};
+
static const struct regmap_range bd96801_volatile_ranges[] = {
/* Status registers */
regmap_reg_range(BD96801_REG_WD_FEED, BD96801_REG_WD_FAILCOUNT),
@@ -169,11 +247,28 @@ static const struct regmap_range bd96801_volatile_ranges[] = {
regmap_reg_range(BD96801_LDO5_VOL_LVL_REG, BD96801_LDO7_VOL_LVL_REG),
};
-static const struct regmap_access_table volatile_regs = {
+static const struct regmap_range bd96802_volatile_ranges[] = {
+ /* Status regs */
+ regmap_reg_range(BD96801_REG_WD_FEED, BD96801_REG_WD_FAILCOUNT),
+ regmap_reg_range(BD96801_REG_WD_ASK, BD96801_REG_WD_ASK),
+ regmap_reg_range(BD96801_REG_WD_STATUS, BD96801_REG_WD_STATUS),
+ regmap_reg_range(BD96801_REG_PMIC_STATE, BD96801_REG_INT_BUCK2_ERRB),
+ regmap_reg_range(BD96801_REG_INT_SYS_INTB, BD96801_REG_INT_BUCK2_INTB),
+ /* Registers which do not update value unless PMIC is in STBY */
+ regmap_reg_range(BD96801_REG_SSCG_CTRL, BD96801_REG_SHD_INTB),
+ regmap_reg_range(BD96801_REG_BUCK_OVP, BD96801_REG_BOOT_OVERTIME),
+};
+
+static const struct regmap_access_table bd96801_volatile_regs = {
.yes_ranges = bd96801_volatile_ranges,
.n_yes_ranges = ARRAY_SIZE(bd96801_volatile_ranges),
};
+static const struct regmap_access_table bd96802_volatile_regs = {
+ .yes_ranges = bd96802_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bd96802_volatile_ranges),
+};
+
/*
* For ERRB we need main register bit mapping as bit(0) indicates active IRQ
* in one of the first 3 sub IRQ registers, For INTB we can use default 1 to 1
@@ -188,7 +283,7 @@ static unsigned int bit5_offsets[] = {7}; /* LDO 5 stat */
static unsigned int bit6_offsets[] = {8}; /* LDO 6 stat */
static unsigned int bit7_offsets[] = {9}; /* LDO 7 stat */
-static const struct regmap_irq_sub_irq_map errb_sub_irq_offsets[] = {
+static const struct regmap_irq_sub_irq_map bd96801_errb_sub_irq_offsets[] = {
REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
REGMAP_IRQ_MAIN_REG_OFFSET(bit1_offsets),
REGMAP_IRQ_MAIN_REG_OFFSET(bit2_offsets),
@@ -199,6 +294,12 @@ static const struct regmap_irq_sub_irq_map errb_sub_irq_offsets[] = {
REGMAP_IRQ_MAIN_REG_OFFSET(bit7_offsets),
};
+static const struct regmap_irq_sub_irq_map bd96802_errb_sub_irq_offsets[] = {
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit1_offsets),
+ REGMAP_IRQ_MAIN_REG_OFFSET(bit2_offsets),
+};
+
static const struct regmap_irq bd96801_errb_irqs[] = {
/* Reg 0x52 Fatal ERRB1 */
REGMAP_IRQ_REG(BD96801_OTP_ERR_STAT, 0, BD96801_OTP_ERR_MASK),
@@ -259,6 +360,39 @@ static const struct regmap_irq bd96801_errb_irqs[] = {
REGMAP_IRQ_REG(BD96801_LDO7_SHDN_ERR_STAT, 9, BD96801_OUT_SHDN_ERR_MASK),
};
+static const struct regmap_irq bd96802_errb_irqs[] = {
+ /* Reg 0x52 Fatal ERRB1 */
+ REGMAP_IRQ_REG(BD96802_OTP_ERR_STAT, 0, BD96801_OTP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_DBIST_ERR_STAT, 0, BD96801_DBIST_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_EEP_ERR_STAT, 0, BD96801_EEP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_ABIST_ERR_STAT, 0, BD96801_ABIST_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_PRSTB_ERR_STAT, 0, BD96801_PRSTB_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_DRMOS1_ERR_STAT, 0, BD96801_DRMOS1_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_DRMOS2_ERR_STAT, 0, BD96801_DRMOS2_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_SLAVE_ERR_STAT, 0, BD96801_SLAVE_ERR_MASK),
+ /* 0x53 Fatal ERRB2 */
+ REGMAP_IRQ_REG(BD96802_VREF_ERR_STAT, 1, BD96801_VREF_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_TSD_ERR_STAT, 1, BD96801_TSD_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_UVLO_ERR_STAT, 1, BD96801_UVLO_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_OVLO_ERR_STAT, 1, BD96801_OVLO_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_OSC_ERR_STAT, 1, BD96801_OSC_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_PON_ERR_STAT, 1, BD96801_PON_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_POFF_ERR_STAT, 1, BD96801_POFF_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_CMD_SHDN_ERR_STAT, 1, BD96801_CMD_SHDN_ERR_MASK),
+ /* 0x54 Fatal INTB shadowed to ERRB */
+ REGMAP_IRQ_REG(BD96802_INT_SHDN_ERR_STAT, 2, BD96801_INT_SHDN_ERR_MASK),
+ /* Reg 0x55 BUCK1 ERR IRQs */
+ REGMAP_IRQ_REG(BD96802_BUCK1_PVIN_ERR_STAT, 3, BD96801_OUT_PVIN_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_OVP_ERR_STAT, 3, BD96801_OUT_OVP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_UVP_ERR_STAT, 3, BD96801_OUT_UVP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_SHDN_ERR_STAT, 3, BD96801_OUT_SHDN_ERR_MASK),
+ /* Reg 0x56 BUCK2 ERR IRQs */
+ REGMAP_IRQ_REG(BD96802_BUCK2_PVIN_ERR_STAT, 4, BD96801_OUT_PVIN_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_OVP_ERR_STAT, 4, BD96801_OUT_OVP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_UVP_ERR_STAT, 4, BD96801_OUT_UVP_ERR_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_SHDN_ERR_STAT, 4, BD96801_OUT_SHDN_ERR_MASK),
+};
+
static const struct regmap_irq bd96801_intb_irqs[] = {
/* STATUS SYSTEM INTB */
REGMAP_IRQ_REG(BD96801_TW_STAT, 0, BD96801_TW_STAT_MASK),
@@ -307,6 +441,69 @@ static const struct regmap_irq bd96801_intb_irqs[] = {
REGMAP_IRQ_REG(BD96801_LDO7_UVD_STAT, 7, BD96801_LDO_UVD_STAT_MASK),
};
+static const struct regmap_irq bd96802_intb_irqs[] = {
+ /* STATUS SYSTEM INTB */
+ REGMAP_IRQ_REG(BD96802_TW_STAT, 0, BD96801_TW_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_WDT_ERR_STAT, 0, BD96801_WDT_ERR_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_I2C_ERR_STAT, 0, BD96801_I2C_ERR_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_CHIP_IF_ERR_STAT, 0, BD96801_CHIP_IF_ERR_STAT_MASK),
+ /* STATUS BUCK1 INTB */
+ REGMAP_IRQ_REG(BD96802_BUCK1_OCPH_STAT, 1, BD96801_BUCK_OCPH_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_OCPL_STAT, 1, BD96801_BUCK_OCPL_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_OCPN_STAT, 1, BD96801_BUCK_OCPN_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_OVD_STAT, 1, BD96801_BUCK_OVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_UVD_STAT, 1, BD96801_BUCK_UVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK1_TW_CH_STAT, 1, BD96801_BUCK_TW_CH_STAT_MASK),
+ /* BUCK 2 INTB */
+ REGMAP_IRQ_REG(BD96802_BUCK2_OCPH_STAT, 2, BD96801_BUCK_OCPH_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_OCPL_STAT, 2, BD96801_BUCK_OCPL_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_OCPN_STAT, 2, BD96801_BUCK_OCPN_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_OVD_STAT, 2, BD96801_BUCK_OVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_UVD_STAT, 2, BD96801_BUCK_UVD_STAT_MASK),
+ REGMAP_IRQ_REG(BD96802_BUCK2_TW_CH_STAT, 2, BD96801_BUCK_TW_CH_STAT_MASK),
+};
+
+/*
+ * The IRQ stuff is a bit hairy. The BD96801 / BD96802 provide two physical
+ * IRQ lines called INTB and ERRB. They share the same main status register.
+ *
+ * For ERRB, mapping from main status to sub-status is such that the
+ * 'global' faults are mapped to first 3 sub-status registers - and indicated
+ * by the first bit[0] in main status reg.
+ *
+ * Rest of the status registers are for indicating stuff for individual
+ * regulators, 1 sub register / regulator and 1 main status register bit /
+ * regulator, starting from bit[1].
+ *
+ * Eg, regulator specific stuff has 1 to 1 mapping from main-status to sub
+ * registers but 'global' ERRB IRQs require mapping from main status bit[0] to
+ * 3 status registers.
+ *
+ * Furthermore, the BD96801 has 7 regulators where the BD96802 has only 2.
+ *
+ * INTB has only 1 sub status register for 'global' events and then own sub
+ * status register for each of the regulators. So, for INTB we have direct
+ * 1 to 1 mapping - BD96801 just having 5 register and 5 main status bits
+ * more than the BD96802.
+ *
+ * Sharing the main status bits could be a problem if we had both INTB and
+ * ERRB IRQs asserted but for different sub-status offsets. This might lead
+ * IRQ controller code to go read a sub status register which indicates no
+ * active IRQs. I assume this occurring repeteadly might lead the IRQ to be
+ * disabled by core as a result of repeteadly returned IRQ_NONEs.
+ *
+ * I don't consider this as a fatal problem for now because:
+ * a) Having ERRB asserted leads to PMIC fault state which will kill
+ * the SoC powered by the PMIC. (So, relevant only for potential
+ * case of not powering the processor with this PMIC).
+ * b) Having ERRB set without having respective INTB is unlikely
+ * (haven't actually verified this).
+ *
+ * So, let's proceed with main status enabled for both INTB and ERRB. We can
+ * later disable main-status usage on systems where this ever proves to be
+ * a problem.
+ */
+
static const struct regmap_irq_chip bd96801_irq_chip_errb = {
.name = "bd96801-irq-errb",
.domain_suffix = "errb",
@@ -320,7 +517,23 @@ static const struct regmap_irq_chip bd96801_irq_chip_errb = {
.init_ack_masked = true,
.num_regs = 10,
.irq_reg_stride = 1,
- .sub_reg_offsets = &errb_sub_irq_offsets[0],
+ .sub_reg_offsets = &bd96801_errb_sub_irq_offsets[0],
+};
+
+static const struct regmap_irq_chip bd96802_irq_chip_errb = {
+ .name = "bd96802-irq-errb",
+ .domain_suffix = "errb",
+ .main_status = BD96801_REG_INT_MAIN,
+ .num_main_regs = 1,
+ .irqs = &bd96802_errb_irqs[0],
+ .num_irqs = ARRAY_SIZE(bd96802_errb_irqs),
+ .status_base = BD96801_REG_INT_SYS_ERRB1,
+ .mask_base = BD96801_REG_MASK_SYS_ERRB,
+ .ack_base = BD96801_REG_INT_SYS_ERRB1,
+ .init_ack_masked = true,
+ .num_regs = 5,
+ .irq_reg_stride = 1,
+ .sub_reg_offsets = &bd96802_errb_sub_irq_offsets[0],
};
static const struct regmap_irq_chip bd96801_irq_chip_intb = {
@@ -338,25 +551,124 @@ static const struct regmap_irq_chip bd96801_irq_chip_intb = {
.irq_reg_stride = 1,
};
+static const struct regmap_irq_chip bd96802_irq_chip_intb = {
+ .name = "bd96802-irq-intb",
+ .domain_suffix = "intb",
+ .main_status = BD96801_REG_INT_MAIN,
+ .num_main_regs = 1,
+ .irqs = &bd96802_intb_irqs[0],
+ .num_irqs = ARRAY_SIZE(bd96802_intb_irqs),
+ .status_base = BD96801_REG_INT_SYS_INTB,
+ .mask_base = BD96801_REG_MASK_SYS_INTB,
+ .ack_base = BD96801_REG_INT_SYS_INTB,
+ .init_ack_masked = true,
+ .num_regs = 3,
+ .irq_reg_stride = 1,
+};
+
static const struct regmap_config bd96801_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .volatile_table = &volatile_regs,
+ .volatile_table = &bd96801_volatile_regs,
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static const struct regmap_config bd96802_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &bd96802_volatile_regs,
.cache_type = REGCACHE_MAPLE,
};
+static const struct bd968xx bd96801_data = {
+ .errb_irqs = bd96801_reg_errb_irqs,
+ .intb_irqs = bd96801_reg_intb_irqs,
+ .num_errb_irqs = ARRAY_SIZE(bd96801_reg_errb_irqs),
+ .num_intb_irqs = ARRAY_SIZE(bd96801_reg_intb_irqs),
+ .errb_irq_chip = &bd96801_irq_chip_errb,
+ .intb_irq_chip = &bd96801_irq_chip_intb,
+ .regmap_config = &bd96801_regmap_config,
+ .cells = bd96801_cells,
+ .num_cells = ARRAY_SIZE(bd96801_cells),
+ .unlock_reg = BD96801_LOCK_REG,
+ .unlock_val = BD96801_UNLOCK,
+};
+
+static const struct bd968xx bd96802_data = {
+ .errb_irqs = bd96802_reg_errb_irqs,
+ .intb_irqs = bd96802_reg_intb_irqs,
+ .num_errb_irqs = ARRAY_SIZE(bd96802_reg_errb_irqs),
+ .num_intb_irqs = ARRAY_SIZE(bd96802_reg_intb_irqs),
+ .errb_irq_chip = &bd96802_irq_chip_errb,
+ .intb_irq_chip = &bd96802_irq_chip_intb,
+ .regmap_config = &bd96802_regmap_config,
+ .cells = bd96802_cells,
+ .num_cells = ARRAY_SIZE(bd96802_cells),
+ .unlock_reg = BD96801_LOCK_REG,
+ .unlock_val = BD96801_UNLOCK,
+};
+
+static const struct bd968xx bd96805_data = {
+ .errb_irqs = bd96801_reg_errb_irqs,
+ .intb_irqs = bd96801_reg_intb_irqs,
+ .num_errb_irqs = ARRAY_SIZE(bd96801_reg_errb_irqs),
+ .num_intb_irqs = ARRAY_SIZE(bd96801_reg_intb_irqs),
+ .errb_irq_chip = &bd96801_irq_chip_errb,
+ .intb_irq_chip = &bd96801_irq_chip_intb,
+ .regmap_config = &bd96801_regmap_config,
+ .cells = bd96805_cells,
+ .num_cells = ARRAY_SIZE(bd96805_cells),
+ .unlock_reg = BD96801_LOCK_REG,
+ .unlock_val = BD96801_UNLOCK,
+};
+
+static struct bd968xx bd96806_data = {
+ .errb_irqs = bd96802_reg_errb_irqs,
+ .intb_irqs = bd96802_reg_intb_irqs,
+ .num_errb_irqs = ARRAY_SIZE(bd96802_reg_errb_irqs),
+ .num_intb_irqs = ARRAY_SIZE(bd96802_reg_intb_irqs),
+ .errb_irq_chip = &bd96802_irq_chip_errb,
+ .intb_irq_chip = &bd96802_irq_chip_intb,
+ .regmap_config = &bd96802_regmap_config,
+ .cells = bd96806_cells,
+ .num_cells = ARRAY_SIZE(bd96806_cells),
+ .unlock_reg = BD96801_LOCK_REG,
+ .unlock_val = BD96801_UNLOCK,
+};
+
static int bd96801_i2c_probe(struct i2c_client *i2c)
{
struct regmap_irq_chip_data *intb_irq_data, *errb_irq_data;
struct irq_domain *intb_domain, *errb_domain;
+ const struct bd968xx *ddata;
const struct fwnode_handle *fwnode;
struct resource *regulator_res;
struct resource wdg_irq;
struct regmap *regmap;
- int intb_irq, errb_irq, num_intb, num_errb = 0;
+ int intb_irq, errb_irq, num_errb = 0;
int num_regu_irqs, wdg_irq_no;
+ unsigned int chip_type;
int i, ret;
+ chip_type = (unsigned int)(uintptr_t)device_get_match_data(&i2c->dev);
+ switch (chip_type) {
+ case ROHM_CHIP_TYPE_BD96801:
+ ddata = &bd96801_data;
+ break;
+ case ROHM_CHIP_TYPE_BD96802:
+ ddata = &bd96802_data;
+ break;
+ case ROHM_CHIP_TYPE_BD96805:
+ ddata = &bd96805_data;
+ break;
+ case ROHM_CHIP_TYPE_BD96806:
+ ddata = &bd96806_data;
+ break;
+ default:
+ dev_err(&i2c->dev, "Unknown IC\n");
+ return -EINVAL;
+ }
+
fwnode = dev_fwnode(&i2c->dev);
if (!fwnode)
return dev_err_probe(&i2c->dev, -EINVAL, "Failed to find fwnode\n");
@@ -365,34 +677,32 @@ static int bd96801_i2c_probe(struct i2c_client *i2c)
if (intb_irq < 0)
return dev_err_probe(&i2c->dev, intb_irq, "INTB IRQ not configured\n");
- num_intb = ARRAY_SIZE(regulator_intb_irqs);
-
/* ERRB may be omitted if processor is powered by the PMIC */
errb_irq = fwnode_irq_get_byname(fwnode, "errb");
- if (errb_irq < 0)
- errb_irq = 0;
+ if (errb_irq == -EPROBE_DEFER)
+ return errb_irq;
- if (errb_irq)
- num_errb = ARRAY_SIZE(regulator_errb_irqs);
+ if (errb_irq > 0)
+ num_errb = ddata->num_errb_irqs;
- num_regu_irqs = num_intb + num_errb;
+ num_regu_irqs = ddata->num_intb_irqs + num_errb;
regulator_res = devm_kcalloc(&i2c->dev, num_regu_irqs,
sizeof(*regulator_res), GFP_KERNEL);
if (!regulator_res)
return -ENOMEM;
- regmap = devm_regmap_init_i2c(i2c, &bd96801_regmap_config);
+ regmap = devm_regmap_init_i2c(i2c, ddata->regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(&i2c->dev, PTR_ERR(regmap),
"Regmap initialization failed\n");
- ret = regmap_write(regmap, BD96801_LOCK_REG, BD96801_UNLOCK);
+ ret = regmap_write(regmap, ddata->unlock_reg, ddata->unlock_val);
if (ret)
return dev_err_probe(&i2c->dev, ret, "Failed to unlock PMIC\n");
ret = devm_regmap_add_irq_chip(&i2c->dev, regmap, intb_irq,
- IRQF_ONESHOT, 0, &bd96801_irq_chip_intb,
+ IRQF_ONESHOT, 0, ddata->intb_irq_chip,
&intb_irq_data);
if (ret)
return dev_err_probe(&i2c->dev, ret, "Failed to add INTB IRQ chip\n");
@@ -404,24 +714,25 @@ static int bd96801_i2c_probe(struct i2c_client *i2c)
* has two domains so we do IRQ mapping here and provide the
* already mapped IRQ numbers to sub-devices.
*/
- for (i = 0; i < num_intb; i++) {
+ for (i = 0; i < ddata->num_intb_irqs; i++) {
struct resource *res = &regulator_res[i];
- *res = regulator_intb_irqs[i];
+ *res = ddata->intb_irqs[i];
res->start = res->end = irq_create_mapping(intb_domain,
res->start);
}
wdg_irq_no = irq_create_mapping(intb_domain, BD96801_WDT_ERR_STAT);
wdg_irq = DEFINE_RES_IRQ_NAMED(wdg_irq_no, "bd96801-wdg");
- bd96801_cells[WDG_CELL].resources = &wdg_irq;
- bd96801_cells[WDG_CELL].num_resources = 1;
+
+ ddata->cells[WDG_CELL].resources = &wdg_irq;
+ ddata->cells[WDG_CELL].num_resources = 1;
if (!num_errb)
goto skip_errb;
ret = devm_regmap_add_irq_chip(&i2c->dev, regmap, errb_irq, IRQF_ONESHOT,
- 0, &bd96801_irq_chip_errb, &errb_irq_data);
+ 0, ddata->errb_irq_chip, &errb_irq_data);
if (ret)
return dev_err_probe(&i2c->dev, ret,
"Failed to add ERRB IRQ chip\n");
@@ -429,18 +740,17 @@ static int bd96801_i2c_probe(struct i2c_client *i2c)
errb_domain = regmap_irq_get_domain(errb_irq_data);
for (i = 0; i < num_errb; i++) {
- struct resource *res = &regulator_res[num_intb + i];
+ struct resource *res = &regulator_res[ddata->num_intb_irqs + i];
- *res = regulator_errb_irqs[i];
+ *res = ddata->errb_irqs[i];
res->start = res->end = irq_create_mapping(errb_domain, res->start);
}
skip_errb:
- bd96801_cells[REGULATOR_CELL].resources = regulator_res;
- bd96801_cells[REGULATOR_CELL].num_resources = num_regu_irqs;
-
- ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO, bd96801_cells,
- ARRAY_SIZE(bd96801_cells), NULL, 0, NULL);
+ ddata->cells[REGULATOR_CELL].resources = regulator_res;
+ ddata->cells[REGULATOR_CELL].num_resources = num_regu_irqs;
+ ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO, ddata->cells,
+ ddata->num_cells, NULL, 0, NULL);
if (ret)
dev_err_probe(&i2c->dev, ret, "Failed to create subdevices\n");
@@ -448,7 +758,10 @@ skip_errb:
}
static const struct of_device_id bd96801_of_match[] = {
- { .compatible = "rohm,bd96801", },
+ { .compatible = "rohm,bd96801", .data = (void *)ROHM_CHIP_TYPE_BD96801 },
+ { .compatible = "rohm,bd96802", .data = (void *)ROHM_CHIP_TYPE_BD96802 },
+ { .compatible = "rohm,bd96805", .data = (void *)ROHM_CHIP_TYPE_BD96805 },
+ { .compatible = "rohm,bd96806", .data = (void *)ROHM_CHIP_TYPE_BD96806 },
{ }
};
MODULE_DEVICE_TABLE(of, bd96801_of_match);
@@ -476,5 +789,5 @@ static void __exit bd96801_i2c_exit(void)
module_exit(bd96801_i2c_exit);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
-MODULE_DESCRIPTION("ROHM BD96801 Power Management IC driver");
+MODULE_DESCRIPTION("ROHM BD9680X Power Management IC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
index 84ebc96f58e4..2204bf1c5a51 100644
--- a/drivers/mfd/rt5033.c
+++ b/drivers/mfd/rt5033.c
@@ -98,7 +98,11 @@ static int rt5033_i2c_probe(struct i2c_client *i2c)
return ret;
}
- device_init_wakeup(rt5033->dev, rt5033->wakeup);
+ if (rt5033->wakeup) {
+ ret = devm_device_init_wakeup(rt5033->dev);
+ if (ret)
+ return dev_err_probe(rt5033->dev, ret, "Failed to init wakeup\n");
+ }
return 0;
}
diff --git a/drivers/mfd/sec-acpm.c b/drivers/mfd/sec-acpm.c
new file mode 100644
index 000000000000..8b31c816d65b
--- /dev/null
+++ b/drivers/mfd/sec-acpm.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Google Inc
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Samsung S2MPG1x ACPM driver
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/rtc.h>
+#include <linux/mfd/samsung/s2mpg10.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include "sec-core.h"
+
+#define ACPM_ADDR_BITS 8
+#define ACPM_MAX_BULK_DATA 8
+
+struct sec_pmic_acpm_platform_data {
+ int device_type;
+
+ unsigned int acpm_chan_id;
+ u8 speedy_channel;
+
+ const struct regmap_config *regmap_cfg_common;
+ const struct regmap_config *regmap_cfg_pmic;
+ const struct regmap_config *regmap_cfg_rtc;
+ const struct regmap_config *regmap_cfg_meter;
+};
+
+static const struct regmap_range s2mpg10_common_registers[] = {
+ regmap_reg_range(0x00, 0x02), /* CHIP_ID_M, INT, INT_MASK */
+ regmap_reg_range(0x0a, 0x0c), /* Speedy control */
+ regmap_reg_range(0x1a, 0x2a), /* Debug */
+};
+
+static const struct regmap_range s2mpg10_common_ro_registers[] = {
+ regmap_reg_range(0x00, 0x01), /* CHIP_ID_M, INT */
+ regmap_reg_range(0x28, 0x2a), /* Debug */
+};
+
+static const struct regmap_range s2mpg10_common_nonvolatile_registers[] = {
+ regmap_reg_range(0x00, 0x00), /* CHIP_ID_M */
+ regmap_reg_range(0x02, 0x02), /* INT_MASK */
+ regmap_reg_range(0x0a, 0x0c), /* Speedy control */
+};
+
+static const struct regmap_range s2mpg10_common_precious_registers[] = {
+ regmap_reg_range(0x01, 0x01), /* INT */
+};
+
+static const struct regmap_access_table s2mpg10_common_wr_table = {
+ .yes_ranges = s2mpg10_common_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_common_registers),
+ .no_ranges = s2mpg10_common_ro_registers,
+ .n_no_ranges = ARRAY_SIZE(s2mpg10_common_ro_registers),
+};
+
+static const struct regmap_access_table s2mpg10_common_rd_table = {
+ .yes_ranges = s2mpg10_common_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_common_registers),
+};
+
+static const struct regmap_access_table s2mpg10_common_volatile_table = {
+ .no_ranges = s2mpg10_common_nonvolatile_registers,
+ .n_no_ranges = ARRAY_SIZE(s2mpg10_common_nonvolatile_registers),
+};
+
+static const struct regmap_access_table s2mpg10_common_precious_table = {
+ .yes_ranges = s2mpg10_common_precious_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_common_precious_registers),
+};
+
+static const struct regmap_config s2mpg10_regmap_config_common = {
+ .name = "common",
+ .reg_bits = ACPM_ADDR_BITS,
+ .val_bits = 8,
+ .max_register = S2MPG10_COMMON_SPD_DEBUG4,
+ .wr_table = &s2mpg10_common_wr_table,
+ .rd_table = &s2mpg10_common_rd_table,
+ .volatile_table = &s2mpg10_common_volatile_table,
+ .precious_table = &s2mpg10_common_precious_table,
+ .num_reg_defaults_raw = S2MPG10_COMMON_SPD_DEBUG4 + 1,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_range s2mpg10_pmic_registers[] = {
+ regmap_reg_range(0x00, 0xf6), /* All PMIC registers */
+};
+
+static const struct regmap_range s2mpg10_pmic_ro_registers[] = {
+ regmap_reg_range(0x00, 0x05), /* INTx */
+ regmap_reg_range(0x0c, 0x0f), /* STATUSx PWRONSRC OFFSRC */
+ regmap_reg_range(0xc7, 0xc7), /* GPIO input */
+};
+
+static const struct regmap_range s2mpg10_pmic_nonvolatile_registers[] = {
+ regmap_reg_range(0x06, 0x0b), /* INTxM */
+};
+
+static const struct regmap_range s2mpg10_pmic_precious_registers[] = {
+ regmap_reg_range(0x00, 0x05), /* INTx */
+};
+
+static const struct regmap_access_table s2mpg10_pmic_wr_table = {
+ .yes_ranges = s2mpg10_pmic_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_pmic_registers),
+ .no_ranges = s2mpg10_pmic_ro_registers,
+ .n_no_ranges = ARRAY_SIZE(s2mpg10_pmic_ro_registers),
+};
+
+static const struct regmap_access_table s2mpg10_pmic_rd_table = {
+ .yes_ranges = s2mpg10_pmic_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_pmic_registers),
+};
+
+static const struct regmap_access_table s2mpg10_pmic_volatile_table = {
+ .no_ranges = s2mpg10_pmic_nonvolatile_registers,
+ .n_no_ranges = ARRAY_SIZE(s2mpg10_pmic_nonvolatile_registers),
+};
+
+static const struct regmap_access_table s2mpg10_pmic_precious_table = {
+ .yes_ranges = s2mpg10_pmic_precious_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_pmic_precious_registers),
+};
+
+static const struct regmap_config s2mpg10_regmap_config_pmic = {
+ .name = "pmic",
+ .reg_bits = ACPM_ADDR_BITS,
+ .val_bits = 8,
+ .max_register = S2MPG10_PMIC_LDO_SENSE4,
+ .wr_table = &s2mpg10_pmic_wr_table,
+ .rd_table = &s2mpg10_pmic_rd_table,
+ .volatile_table = &s2mpg10_pmic_volatile_table,
+ .precious_table = &s2mpg10_pmic_precious_table,
+ .num_reg_defaults_raw = S2MPG10_PMIC_LDO_SENSE4 + 1,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_range s2mpg10_rtc_registers[] = {
+ regmap_reg_range(0x00, 0x2b), /* All RTC registers */
+};
+
+static const struct regmap_range s2mpg10_rtc_volatile_registers[] = {
+ regmap_reg_range(0x01, 0x01), /* RTC_UPDATE */
+ regmap_reg_range(0x05, 0x0c), /* Time / date */
+};
+
+static const struct regmap_access_table s2mpg10_rtc_rd_table = {
+ .yes_ranges = s2mpg10_rtc_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_rtc_registers),
+};
+
+static const struct regmap_access_table s2mpg10_rtc_volatile_table = {
+ .yes_ranges = s2mpg10_rtc_volatile_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_rtc_volatile_registers),
+};
+
+static const struct regmap_config s2mpg10_regmap_config_rtc = {
+ .name = "rtc",
+ .reg_bits = ACPM_ADDR_BITS,
+ .val_bits = 8,
+ .max_register = S2MPG10_RTC_OSC_CTRL,
+ .rd_table = &s2mpg10_rtc_rd_table,
+ .volatile_table = &s2mpg10_rtc_volatile_table,
+ .num_reg_defaults_raw = S2MPG10_RTC_OSC_CTRL + 1,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_range s2mpg10_meter_registers[] = {
+ regmap_reg_range(0x00, 0x21), /* Meter config */
+ regmap_reg_range(0x40, 0x8a), /* Meter data */
+ regmap_reg_range(0xee, 0xee), /* Offset */
+ regmap_reg_range(0xf1, 0xf1), /* Trim */
+};
+
+static const struct regmap_range s2mpg10_meter_ro_registers[] = {
+ regmap_reg_range(0x40, 0x8a), /* Meter data */
+};
+
+static const struct regmap_access_table s2mpg10_meter_wr_table = {
+ .yes_ranges = s2mpg10_meter_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_meter_registers),
+ .no_ranges = s2mpg10_meter_ro_registers,
+ .n_no_ranges = ARRAY_SIZE(s2mpg10_meter_ro_registers),
+};
+
+static const struct regmap_access_table s2mpg10_meter_rd_table = {
+ .yes_ranges = s2mpg10_meter_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_meter_registers),
+};
+
+static const struct regmap_access_table s2mpg10_meter_volatile_table = {
+ .yes_ranges = s2mpg10_meter_ro_registers,
+ .n_yes_ranges = ARRAY_SIZE(s2mpg10_meter_ro_registers),
+};
+
+static const struct regmap_config s2mpg10_regmap_config_meter = {
+ .name = "meter",
+ .reg_bits = ACPM_ADDR_BITS,
+ .val_bits = 8,
+ .max_register = S2MPG10_METER_BUCK_METER_TRIM3,
+ .wr_table = &s2mpg10_meter_wr_table,
+ .rd_table = &s2mpg10_meter_rd_table,
+ .volatile_table = &s2mpg10_meter_volatile_table,
+ .num_reg_defaults_raw = S2MPG10_METER_BUCK_METER_TRIM3 + 1,
+ .cache_type = REGCACHE_FLAT,
+};
+
+struct sec_pmic_acpm_shared_bus_context {
+ const struct acpm_handle *acpm;
+ unsigned int acpm_chan_id;
+ u8 speedy_channel;
+};
+
+enum sec_pmic_acpm_accesstype {
+ SEC_PMIC_ACPM_ACCESSTYPE_COMMON = 0x00,
+ SEC_PMIC_ACPM_ACCESSTYPE_PMIC = 0x01,
+ SEC_PMIC_ACPM_ACCESSTYPE_RTC = 0x02,
+ SEC_PMIC_ACPM_ACCESSTYPE_METER = 0x0a,
+ SEC_PMIC_ACPM_ACCESSTYPE_WLWP = 0x0b,
+ SEC_PMIC_ACPM_ACCESSTYPE_TRIM = 0x0f,
+};
+
+struct sec_pmic_acpm_bus_context {
+ struct sec_pmic_acpm_shared_bus_context *shared;
+ enum sec_pmic_acpm_accesstype type;
+};
+
+static int sec_pmic_acpm_bus_write(void *context, const void *data,
+ size_t count)
+{
+ struct sec_pmic_acpm_bus_context *ctx = context;
+ const struct acpm_handle *acpm = ctx->shared->acpm;
+ const struct acpm_pmic_ops *pmic_ops = &acpm->ops.pmic_ops;
+ size_t val_count = count - BITS_TO_BYTES(ACPM_ADDR_BITS);
+ const u8 *d = data;
+ const u8 *vals = &d[BITS_TO_BYTES(ACPM_ADDR_BITS)];
+ u8 reg;
+
+ if (val_count < 1 || val_count > ACPM_MAX_BULK_DATA)
+ return -EINVAL;
+
+ reg = d[0];
+
+ return pmic_ops->bulk_write(acpm, ctx->shared->acpm_chan_id, ctx->type, reg,
+ ctx->shared->speedy_channel, val_count, vals);
+}
+
+static int sec_pmic_acpm_bus_read(void *context, const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size)
+{
+ struct sec_pmic_acpm_bus_context *ctx = context;
+ const struct acpm_handle *acpm = ctx->shared->acpm;
+ const struct acpm_pmic_ops *pmic_ops = &acpm->ops.pmic_ops;
+ const u8 *r = reg_buf;
+ u8 reg;
+
+ if (reg_size != BITS_TO_BYTES(ACPM_ADDR_BITS) || !val_size ||
+ val_size > ACPM_MAX_BULK_DATA)
+ return -EINVAL;
+
+ reg = r[0];
+
+ return pmic_ops->bulk_read(acpm, ctx->shared->acpm_chan_id, ctx->type, reg,
+ ctx->shared->speedy_channel, val_size, val_buf);
+}
+
+static int sec_pmic_acpm_bus_reg_update_bits(void *context, unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ struct sec_pmic_acpm_bus_context *ctx = context;
+ const struct acpm_handle *acpm = ctx->shared->acpm;
+ const struct acpm_pmic_ops *pmic_ops = &acpm->ops.pmic_ops;
+
+ return pmic_ops->update_reg(acpm, ctx->shared->acpm_chan_id, ctx->type, reg & 0xff,
+ ctx->shared->speedy_channel, val, mask);
+}
+
+static const struct regmap_bus sec_pmic_acpm_regmap_bus = {
+ .write = sec_pmic_acpm_bus_write,
+ .read = sec_pmic_acpm_bus_read,
+ .reg_update_bits = sec_pmic_acpm_bus_reg_update_bits,
+ .max_raw_read = ACPM_MAX_BULK_DATA,
+ .max_raw_write = ACPM_MAX_BULK_DATA,
+};
+
+static struct regmap *sec_pmic_acpm_regmap_init(struct device *dev,
+ struct sec_pmic_acpm_shared_bus_context *shared_ctx,
+ enum sec_pmic_acpm_accesstype type,
+ const struct regmap_config *cfg, bool do_attach)
+{
+ struct sec_pmic_acpm_bus_context *ctx;
+ struct regmap *regmap;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->shared = shared_ctx;
+ ctx->type = type;
+
+ regmap = devm_regmap_init(dev, &sec_pmic_acpm_regmap_bus, ctx, cfg);
+ if (IS_ERR(regmap))
+ return dev_err_cast_probe(dev, regmap, "regmap init (%s) failed\n", cfg->name);
+
+ if (do_attach) {
+ int ret;
+
+ ret = regmap_attach_dev(dev, regmap, cfg);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret, "regmap attach (%s) failed\n",
+ cfg->name);
+ }
+
+ return regmap;
+}
+
+static void sec_pmic_acpm_mask_common_irqs(void *regmap_common)
+{
+ regmap_write(regmap_common, S2MPG10_COMMON_INT_MASK, S2MPG10_COMMON_INT_SRC);
+}
+
+static int sec_pmic_acpm_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap_common, *regmap_pmic, *regmap;
+ const struct sec_pmic_acpm_platform_data *pdata;
+ struct sec_pmic_acpm_shared_bus_context *shared_ctx;
+ const struct acpm_handle *acpm;
+ struct device *dev = &pdev->dev;
+ int ret, irq;
+
+ pdata = device_get_match_data(dev);
+ if (!pdata)
+ return dev_err_probe(dev, -ENODEV, "unsupported device type\n");
+
+ acpm = devm_acpm_get_by_node(dev, dev->parent->of_node);
+ if (IS_ERR(acpm))
+ return dev_err_probe(dev, PTR_ERR(acpm), "failed to get acpm\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ shared_ctx = devm_kzalloc(dev, sizeof(*shared_ctx), GFP_KERNEL);
+ if (!shared_ctx)
+ return -ENOMEM;
+
+ shared_ctx->acpm = acpm;
+ shared_ctx->acpm_chan_id = pdata->acpm_chan_id;
+ shared_ctx->speedy_channel = pdata->speedy_channel;
+
+ regmap_common = sec_pmic_acpm_regmap_init(dev, shared_ctx, SEC_PMIC_ACPM_ACCESSTYPE_COMMON,
+ pdata->regmap_cfg_common, false);
+ if (IS_ERR(regmap_common))
+ return PTR_ERR(regmap_common);
+
+ /* Mask all interrupts from 'common' block, until successful init */
+ ret = regmap_write(regmap_common, S2MPG10_COMMON_INT_MASK, S2MPG10_COMMON_INT_SRC);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to mask common block interrupts\n");
+
+ regmap_pmic = sec_pmic_acpm_regmap_init(dev, shared_ctx, SEC_PMIC_ACPM_ACCESSTYPE_PMIC,
+ pdata->regmap_cfg_pmic, false);
+ if (IS_ERR(regmap_pmic))
+ return PTR_ERR(regmap_pmic);
+
+ regmap = sec_pmic_acpm_regmap_init(dev, shared_ctx, SEC_PMIC_ACPM_ACCESSTYPE_RTC,
+ pdata->regmap_cfg_rtc, true);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ regmap = sec_pmic_acpm_regmap_init(dev, shared_ctx, SEC_PMIC_ACPM_ACCESSTYPE_METER,
+ pdata->regmap_cfg_meter, true);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = sec_pmic_probe(dev, pdata->device_type, irq, regmap_pmic, NULL);
+ if (ret)
+ return ret;
+
+ if (device_property_read_bool(dev, "wakeup-source"))
+ devm_device_init_wakeup(dev);
+
+ /* Unmask PMIC interrupt from 'common' block, now that everything is in place. */
+ ret = regmap_clear_bits(regmap_common, S2MPG10_COMMON_INT_MASK,
+ S2MPG10_COMMON_INT_SRC_PMIC);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to unmask PMIC interrupt\n");
+
+ /* Mask all interrupts from 'common' block on shutdown */
+ ret = devm_add_action_or_reset(dev, sec_pmic_acpm_mask_common_irqs, regmap_common);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void sec_pmic_acpm_shutdown(struct platform_device *pdev)
+{
+ sec_pmic_shutdown(&pdev->dev);
+}
+
+static const struct sec_pmic_acpm_platform_data s2mpg10_data = {
+ .device_type = S2MPG10,
+ .acpm_chan_id = 2,
+ .speedy_channel = 0,
+ .regmap_cfg_common = &s2mpg10_regmap_config_common,
+ .regmap_cfg_pmic = &s2mpg10_regmap_config_pmic,
+ .regmap_cfg_rtc = &s2mpg10_regmap_config_rtc,
+ .regmap_cfg_meter = &s2mpg10_regmap_config_meter,
+};
+
+static const struct of_device_id sec_pmic_acpm_of_match[] = {
+ { .compatible = "samsung,s2mpg10-pmic", .data = &s2mpg10_data, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sec_pmic_acpm_of_match);
+
+static struct platform_driver sec_pmic_acpm_driver = {
+ .driver = {
+ .name = "sec-pmic-acpm",
+ .pm = pm_sleep_ptr(&sec_pmic_pm_ops),
+ .of_match_table = sec_pmic_acpm_of_match,
+ },
+ .probe = sec_pmic_acpm_probe,
+ .shutdown = sec_pmic_acpm_shutdown,
+};
+module_platform_driver(sec_pmic_acpm_driver);
+
+MODULE_AUTHOR("André Draszik <andre.draszik@linaro.org>");
+MODULE_DESCRIPTION("ACPM driver for the Samsung S2MPG1x");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sec-common.c b/drivers/mfd/sec-common.c
new file mode 100644
index 000000000000..42d55e70e34c
--- /dev/null
+++ b/drivers/mfd/sec-common.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2012 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Samsung SxM core driver
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/irq.h>
+#include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/s2mps13.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include "sec-core.h"
+
+static const struct mfd_cell s5m8767_devs[] = {
+ MFD_CELL_NAME("s5m8767-pmic"),
+ MFD_CELL_NAME("s5m-rtc"),
+ MFD_CELL_OF("s5m8767-clk", NULL, NULL, 0, 0, "samsung,s5m8767-clk"),
+};
+
+static const struct mfd_cell s2dos05_devs[] = {
+ MFD_CELL_NAME("s2dos05-regulator"),
+};
+
+static const struct mfd_cell s2mpg10_devs[] = {
+ MFD_CELL_NAME("s2mpg10-meter"),
+ MFD_CELL_NAME("s2mpg10-regulator"),
+ MFD_CELL_NAME("s2mpg10-rtc"),
+ MFD_CELL_OF("s2mpg10-clk", NULL, NULL, 0, 0, "samsung,s2mpg10-clk"),
+ MFD_CELL_OF("s2mpg10-gpio", NULL, NULL, 0, 0, "samsung,s2mpg10-gpio"),
+};
+
+static const struct mfd_cell s2mps11_devs[] = {
+ MFD_CELL_NAME("s2mps11-regulator"),
+ MFD_CELL_NAME("s2mps14-rtc"),
+ MFD_CELL_OF("s2mps11-clk", NULL, NULL, 0, 0, "samsung,s2mps11-clk"),
+};
+
+static const struct mfd_cell s2mps13_devs[] = {
+ MFD_CELL_NAME("s2mps13-regulator"),
+ MFD_CELL_NAME("s2mps13-rtc"),
+ MFD_CELL_OF("s2mps13-clk", NULL, NULL, 0, 0, "samsung,s2mps13-clk"),
+};
+
+static const struct mfd_cell s2mps14_devs[] = {
+ MFD_CELL_NAME("s2mps14-regulator"),
+ MFD_CELL_NAME("s2mps14-rtc"),
+ MFD_CELL_OF("s2mps14-clk", NULL, NULL, 0, 0, "samsung,s2mps14-clk"),
+};
+
+static const struct mfd_cell s2mps15_devs[] = {
+ MFD_CELL_NAME("s2mps15-regulator"),
+ MFD_CELL_NAME("s2mps15-rtc"),
+ MFD_CELL_OF("s2mps13-clk", NULL, NULL, 0, 0, "samsung,s2mps13-clk"),
+};
+
+static const struct mfd_cell s2mpa01_devs[] = {
+ MFD_CELL_NAME("s2mpa01-pmic"),
+ MFD_CELL_NAME("s2mps14-rtc"),
+};
+
+static const struct mfd_cell s2mpu02_devs[] = {
+ MFD_CELL_NAME("s2mpu02-regulator"),
+};
+
+static const struct mfd_cell s2mpu05_devs[] = {
+ MFD_CELL_NAME("s2mpu05-regulator"),
+ MFD_CELL_NAME("s2mps15-rtc"),
+};
+
+static void sec_pmic_dump_rev(struct sec_pmic_dev *sec_pmic)
+{
+ unsigned int val;
+
+ /* For s2mpg1x, the revision is in a different regmap */
+ if (sec_pmic->device_type == S2MPG10)
+ return;
+
+ /* For each device type, the REG_ID is always the first register */
+ if (!regmap_read(sec_pmic->regmap_pmic, S2MPS11_REG_ID, &val))
+ dev_dbg(sec_pmic->dev, "Revision: 0x%x\n", val);
+}
+
+static void sec_pmic_configure(struct sec_pmic_dev *sec_pmic)
+{
+ int err;
+
+ if (sec_pmic->device_type != S2MPS13X)
+ return;
+
+ if (sec_pmic->pdata->disable_wrstbi) {
+ /*
+ * If WRSTBI pin is pulled down this feature must be disabled
+ * because each Suspend to RAM will trigger buck voltage reset
+ * to default values.
+ */
+ err = regmap_update_bits(sec_pmic->regmap_pmic,
+ S2MPS13_REG_WRSTBI,
+ S2MPS13_REG_WRSTBI_MASK, 0x0);
+ if (err)
+ dev_warn(sec_pmic->dev,
+ "Cannot initialize WRSTBI config: %d\n",
+ err);
+ }
+}
+
+/*
+ * Only the common platform data elements for s5m8767 are parsed here from the
+ * device tree. Other sub-modules of s5m8767 such as pmic, rtc , charger and
+ * others have to parse their own platform data elements from device tree.
+ *
+ * The s5m8767 platform data structure is instantiated here and the drivers for
+ * the sub-modules need not instantiate another instance while parsing their
+ * platform data.
+ */
+static struct sec_platform_data *
+sec_pmic_parse_dt_pdata(struct device *dev)
+{
+ struct sec_platform_data *pd;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ pd->manual_poweroff = of_property_read_bool(dev->of_node,
+ "samsung,s2mps11-acokb-ground");
+ pd->disable_wrstbi = of_property_read_bool(dev->of_node,
+ "samsung,s2mps11-wrstbi-ground");
+ return pd;
+}
+
+int sec_pmic_probe(struct device *dev, int device_type, unsigned int irq,
+ struct regmap *regmap, struct i2c_client *client)
+{
+ struct sec_platform_data *pdata;
+ const struct mfd_cell *sec_devs;
+ struct sec_pmic_dev *sec_pmic;
+ int ret, num_sec_devs;
+
+ sec_pmic = devm_kzalloc(dev, sizeof(*sec_pmic), GFP_KERNEL);
+ if (!sec_pmic)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, sec_pmic);
+ sec_pmic->dev = dev;
+ sec_pmic->device_type = device_type;
+ sec_pmic->i2c = client;
+ sec_pmic->irq = irq;
+ sec_pmic->regmap_pmic = regmap;
+
+ pdata = sec_pmic_parse_dt_pdata(sec_pmic->dev);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ return ret;
+ }
+
+ sec_pmic->pdata = pdata;
+
+ ret = sec_irq_init(sec_pmic);
+ if (ret)
+ return ret;
+
+ pm_runtime_set_active(sec_pmic->dev);
+
+ switch (sec_pmic->device_type) {
+ case S5M8767X:
+ sec_devs = s5m8767_devs;
+ num_sec_devs = ARRAY_SIZE(s5m8767_devs);
+ break;
+ case S2DOS05:
+ sec_devs = s2dos05_devs;
+ num_sec_devs = ARRAY_SIZE(s2dos05_devs);
+ break;
+ case S2MPA01:
+ sec_devs = s2mpa01_devs;
+ num_sec_devs = ARRAY_SIZE(s2mpa01_devs);
+ break;
+ case S2MPG10:
+ sec_devs = s2mpg10_devs;
+ num_sec_devs = ARRAY_SIZE(s2mpg10_devs);
+ break;
+ case S2MPS11X:
+ sec_devs = s2mps11_devs;
+ num_sec_devs = ARRAY_SIZE(s2mps11_devs);
+ break;
+ case S2MPS13X:
+ sec_devs = s2mps13_devs;
+ num_sec_devs = ARRAY_SIZE(s2mps13_devs);
+ break;
+ case S2MPS14X:
+ sec_devs = s2mps14_devs;
+ num_sec_devs = ARRAY_SIZE(s2mps14_devs);
+ break;
+ case S2MPS15X:
+ sec_devs = s2mps15_devs;
+ num_sec_devs = ARRAY_SIZE(s2mps15_devs);
+ break;
+ case S2MPU02:
+ sec_devs = s2mpu02_devs;
+ num_sec_devs = ARRAY_SIZE(s2mpu02_devs);
+ break;
+ case S2MPU05:
+ sec_devs = s2mpu05_devs;
+ num_sec_devs = ARRAY_SIZE(s2mpu05_devs);
+ break;
+ default:
+ return dev_err_probe(sec_pmic->dev, -EINVAL,
+ "Unsupported device type %d\n",
+ sec_pmic->device_type);
+ }
+ ret = devm_mfd_add_devices(sec_pmic->dev, -1, sec_devs, num_sec_devs,
+ NULL, 0, NULL);
+ if (ret)
+ return ret;
+
+ sec_pmic_configure(sec_pmic);
+ sec_pmic_dump_rev(sec_pmic);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sec_pmic_probe);
+
+void sec_pmic_shutdown(struct device *dev)
+{
+ struct sec_pmic_dev *sec_pmic = dev_get_drvdata(dev);
+ unsigned int reg, mask;
+
+ if (!sec_pmic->pdata->manual_poweroff)
+ return;
+
+ switch (sec_pmic->device_type) {
+ case S2MPS11X:
+ reg = S2MPS11_REG_CTRL1;
+ mask = S2MPS11_CTRL1_PWRHOLD_MASK;
+ break;
+ default:
+ /*
+ * Currently only one board with S2MPS11 needs this, so just
+ * ignore the rest.
+ */
+ dev_warn(sec_pmic->dev,
+ "Unsupported device %d for manual power off\n",
+ sec_pmic->device_type);
+ return;
+ }
+
+ regmap_update_bits(sec_pmic->regmap_pmic, reg, mask, 0);
+}
+EXPORT_SYMBOL_GPL(sec_pmic_shutdown);
+
+static int sec_pmic_suspend(struct device *dev)
+{
+ struct sec_pmic_dev *sec_pmic = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(sec_pmic->irq);
+ /*
+ * PMIC IRQ must be disabled during suspend for RTC alarm
+ * to work properly.
+ * When device is woken up from suspend, an
+ * interrupt occurs before resuming I2C bus controller.
+ * The interrupt is handled by regmap_irq_thread which tries
+ * to read RTC registers. This read fails (I2C is still
+ * suspended) and RTC Alarm interrupt is disabled.
+ */
+ disable_irq(sec_pmic->irq);
+
+ return 0;
+}
+
+static int sec_pmic_resume(struct device *dev)
+{
+ struct sec_pmic_dev *sec_pmic = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(sec_pmic->irq);
+ enable_irq(sec_pmic->irq);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops, sec_pmic_suspend, sec_pmic_resume);
+EXPORT_SYMBOL_GPL(sec_pmic_pm_ops);
+
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_AUTHOR("André Draszik <andre.draszik@linaro.org>");
+MODULE_DESCRIPTION("Core driver for the Samsung S5M");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
deleted file mode 100644
index 3e9b65c988a7..000000000000
--- a/drivers/mfd/sec-core.c
+++ /dev/null
@@ -1,481 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-//
-// Copyright (c) 2012 Samsung Electronics Co., Ltd
-// http://www.samsung.com
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/of.h>
-#include <linux/interrupt.h>
-#include <linux/pm_runtime.h>
-#include <linux/mutex.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/samsung/core.h>
-#include <linux/mfd/samsung/irq.h>
-#include <linux/mfd/samsung/s2mpa01.h>
-#include <linux/mfd/samsung/s2mps11.h>
-#include <linux/mfd/samsung/s2mps13.h>
-#include <linux/mfd/samsung/s2mps14.h>
-#include <linux/mfd/samsung/s2mps15.h>
-#include <linux/mfd/samsung/s2mpu02.h>
-#include <linux/mfd/samsung/s5m8767.h>
-#include <linux/regmap.h>
-
-static const struct mfd_cell s5m8767_devs[] = {
- { .name = "s5m8767-pmic", },
- { .name = "s5m-rtc", },
- {
- .name = "s5m8767-clk",
- .of_compatible = "samsung,s5m8767-clk",
- },
-};
-
-static const struct mfd_cell s2dos05_devs[] = {
- { .name = "s2dos05-regulator", },
-};
-
-static const struct mfd_cell s2mps11_devs[] = {
- { .name = "s2mps11-regulator", },
- { .name = "s2mps14-rtc", },
- {
- .name = "s2mps11-clk",
- .of_compatible = "samsung,s2mps11-clk",
- },
-};
-
-static const struct mfd_cell s2mps13_devs[] = {
- { .name = "s2mps13-regulator", },
- { .name = "s2mps13-rtc", },
- {
- .name = "s2mps13-clk",
- .of_compatible = "samsung,s2mps13-clk",
- },
-};
-
-static const struct mfd_cell s2mps14_devs[] = {
- { .name = "s2mps14-regulator", },
- { .name = "s2mps14-rtc", },
- {
- .name = "s2mps14-clk",
- .of_compatible = "samsung,s2mps14-clk",
- },
-};
-
-static const struct mfd_cell s2mps15_devs[] = {
- { .name = "s2mps15-regulator", },
- { .name = "s2mps15-rtc", },
- {
- .name = "s2mps13-clk",
- .of_compatible = "samsung,s2mps13-clk",
- },
-};
-
-static const struct mfd_cell s2mpa01_devs[] = {
- { .name = "s2mpa01-pmic", },
- { .name = "s2mps14-rtc", },
-};
-
-static const struct mfd_cell s2mpu02_devs[] = {
- { .name = "s2mpu02-regulator", },
-};
-
-static const struct mfd_cell s2mpu05_devs[] = {
- { .name = "s2mpu05-regulator", },
- { .name = "s2mps15-rtc", },
-};
-
-static const struct of_device_id sec_dt_match[] = {
- {
- .compatible = "samsung,s5m8767-pmic",
- .data = (void *)S5M8767X,
- }, {
- .compatible = "samsung,s2dos05",
- .data = (void *)S2DOS05,
- }, {
- .compatible = "samsung,s2mps11-pmic",
- .data = (void *)S2MPS11X,
- }, {
- .compatible = "samsung,s2mps13-pmic",
- .data = (void *)S2MPS13X,
- }, {
- .compatible = "samsung,s2mps14-pmic",
- .data = (void *)S2MPS14X,
- }, {
- .compatible = "samsung,s2mps15-pmic",
- .data = (void *)S2MPS15X,
- }, {
- .compatible = "samsung,s2mpa01-pmic",
- .data = (void *)S2MPA01,
- }, {
- .compatible = "samsung,s2mpu02-pmic",
- .data = (void *)S2MPU02,
- }, {
- .compatible = "samsung,s2mpu05-pmic",
- .data = (void *)S2MPU05,
- }, {
- /* Sentinel */
- },
-};
-MODULE_DEVICE_TABLE(of, sec_dt_match);
-
-static bool s2mpa01_volatile(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S2MPA01_REG_INT1M:
- case S2MPA01_REG_INT2M:
- case S2MPA01_REG_INT3M:
- return false;
- default:
- return true;
- }
-}
-
-static bool s2mps11_volatile(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S2MPS11_REG_INT1M:
- case S2MPS11_REG_INT2M:
- case S2MPS11_REG_INT3M:
- return false;
- default:
- return true;
- }
-}
-
-static bool s2mpu02_volatile(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S2MPU02_REG_INT1M:
- case S2MPU02_REG_INT2M:
- case S2MPU02_REG_INT3M:
- return false;
- default:
- return true;
- }
-}
-
-static const struct regmap_config sec_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
-static const struct regmap_config s2mpa01_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPA01_REG_LDO_OVCB4,
- .volatile_reg = s2mpa01_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s2mps11_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPS11_REG_L38CTRL,
- .volatile_reg = s2mps11_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s2mps13_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPS13_REG_LDODSCH5,
- .volatile_reg = s2mps11_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s2mps14_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPS14_REG_LDODSCH3,
- .volatile_reg = s2mps11_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s2mps15_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPS15_REG_LDODSCH4,
- .volatile_reg = s2mps11_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s2mpu02_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S2MPU02_REG_DVSDATA,
- .volatile_reg = s2mpu02_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static const struct regmap_config s5m8767_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = S5M8767_REG_LDO28CTRL,
- .volatile_reg = s2mps11_volatile,
- .cache_type = REGCACHE_FLAT,
-};
-
-static void sec_pmic_dump_rev(struct sec_pmic_dev *sec_pmic)
-{
- unsigned int val;
-
- /* For each device type, the REG_ID is always the first register */
- if (!regmap_read(sec_pmic->regmap_pmic, S2MPS11_REG_ID, &val))
- dev_dbg(sec_pmic->dev, "Revision: 0x%x\n", val);
-}
-
-static void sec_pmic_configure(struct sec_pmic_dev *sec_pmic)
-{
- int err;
-
- if (sec_pmic->device_type != S2MPS13X)
- return;
-
- if (sec_pmic->pdata->disable_wrstbi) {
- /*
- * If WRSTBI pin is pulled down this feature must be disabled
- * because each Suspend to RAM will trigger buck voltage reset
- * to default values.
- */
- err = regmap_update_bits(sec_pmic->regmap_pmic,
- S2MPS13_REG_WRSTBI,
- S2MPS13_REG_WRSTBI_MASK, 0x0);
- if (err)
- dev_warn(sec_pmic->dev,
- "Cannot initialize WRSTBI config: %d\n",
- err);
- }
-}
-
-/*
- * Only the common platform data elements for s5m8767 are parsed here from the
- * device tree. Other sub-modules of s5m8767 such as pmic, rtc , charger and
- * others have to parse their own platform data elements from device tree.
- *
- * The s5m8767 platform data structure is instantiated here and the drivers for
- * the sub-modules need not instantiate another instance while parsing their
- * platform data.
- */
-static struct sec_platform_data *
-sec_pmic_i2c_parse_dt_pdata(struct device *dev)
-{
- struct sec_platform_data *pd;
-
- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
-
- pd->manual_poweroff = of_property_read_bool(dev->of_node,
- "samsung,s2mps11-acokb-ground");
- pd->disable_wrstbi = of_property_read_bool(dev->of_node,
- "samsung,s2mps11-wrstbi-ground");
- return pd;
-}
-
-static int sec_pmic_probe(struct i2c_client *i2c)
-{
- const struct regmap_config *regmap;
- struct sec_platform_data *pdata;
- const struct mfd_cell *sec_devs;
- struct sec_pmic_dev *sec_pmic;
- int ret, num_sec_devs;
-
- sec_pmic = devm_kzalloc(&i2c->dev, sizeof(struct sec_pmic_dev),
- GFP_KERNEL);
- if (sec_pmic == NULL)
- return -ENOMEM;
-
- i2c_set_clientdata(i2c, sec_pmic);
- sec_pmic->dev = &i2c->dev;
- sec_pmic->i2c = i2c;
- sec_pmic->irq = i2c->irq;
-
- pdata = sec_pmic_i2c_parse_dt_pdata(sec_pmic->dev);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- return ret;
- }
-
- sec_pmic->device_type = (unsigned long)of_device_get_match_data(sec_pmic->dev);
- sec_pmic->pdata = pdata;
-
- switch (sec_pmic->device_type) {
- case S2MPA01:
- regmap = &s2mpa01_regmap_config;
- break;
- case S2MPS11X:
- regmap = &s2mps11_regmap_config;
- break;
- case S2MPS13X:
- regmap = &s2mps13_regmap_config;
- break;
- case S2MPS14X:
- regmap = &s2mps14_regmap_config;
- break;
- case S2MPS15X:
- regmap = &s2mps15_regmap_config;
- break;
- case S5M8767X:
- regmap = &s5m8767_regmap_config;
- break;
- case S2MPU02:
- regmap = &s2mpu02_regmap_config;
- break;
- default:
- regmap = &sec_regmap_config;
- break;
- }
-
- sec_pmic->regmap_pmic = devm_regmap_init_i2c(i2c, regmap);
- if (IS_ERR(sec_pmic->regmap_pmic)) {
- ret = PTR_ERR(sec_pmic->regmap_pmic);
- dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
- ret);
- return ret;
- }
-
- sec_irq_init(sec_pmic);
-
- pm_runtime_set_active(sec_pmic->dev);
-
- switch (sec_pmic->device_type) {
- case S5M8767X:
- sec_devs = s5m8767_devs;
- num_sec_devs = ARRAY_SIZE(s5m8767_devs);
- break;
- case S2DOS05:
- sec_devs = s2dos05_devs;
- num_sec_devs = ARRAY_SIZE(s2dos05_devs);
- break;
- case S2MPA01:
- sec_devs = s2mpa01_devs;
- num_sec_devs = ARRAY_SIZE(s2mpa01_devs);
- break;
- case S2MPS11X:
- sec_devs = s2mps11_devs;
- num_sec_devs = ARRAY_SIZE(s2mps11_devs);
- break;
- case S2MPS13X:
- sec_devs = s2mps13_devs;
- num_sec_devs = ARRAY_SIZE(s2mps13_devs);
- break;
- case S2MPS14X:
- sec_devs = s2mps14_devs;
- num_sec_devs = ARRAY_SIZE(s2mps14_devs);
- break;
- case S2MPS15X:
- sec_devs = s2mps15_devs;
- num_sec_devs = ARRAY_SIZE(s2mps15_devs);
- break;
- case S2MPU02:
- sec_devs = s2mpu02_devs;
- num_sec_devs = ARRAY_SIZE(s2mpu02_devs);
- break;
- case S2MPU05:
- sec_devs = s2mpu05_devs;
- num_sec_devs = ARRAY_SIZE(s2mpu05_devs);
- break;
- default:
- dev_err(&i2c->dev, "Unsupported device type (%lu)\n",
- sec_pmic->device_type);
- return -ENODEV;
- }
- ret = devm_mfd_add_devices(sec_pmic->dev, -1, sec_devs, num_sec_devs,
- NULL, 0, NULL);
- if (ret)
- return ret;
-
- sec_pmic_configure(sec_pmic);
- sec_pmic_dump_rev(sec_pmic);
-
- return ret;
-}
-
-static void sec_pmic_shutdown(struct i2c_client *i2c)
-{
- struct sec_pmic_dev *sec_pmic = i2c_get_clientdata(i2c);
- unsigned int reg, mask;
-
- if (!sec_pmic->pdata->manual_poweroff)
- return;
-
- switch (sec_pmic->device_type) {
- case S2MPS11X:
- reg = S2MPS11_REG_CTRL1;
- mask = S2MPS11_CTRL1_PWRHOLD_MASK;
- break;
- default:
- /*
- * Currently only one board with S2MPS11 needs this, so just
- * ignore the rest.
- */
- dev_warn(sec_pmic->dev,
- "Unsupported device %lu for manual power off\n",
- sec_pmic->device_type);
- return;
- }
-
- regmap_update_bits(sec_pmic->regmap_pmic, reg, mask, 0);
-}
-
-static int sec_pmic_suspend(struct device *dev)
-{
- struct i2c_client *i2c = to_i2c_client(dev);
- struct sec_pmic_dev *sec_pmic = i2c_get_clientdata(i2c);
-
- if (device_may_wakeup(dev))
- enable_irq_wake(sec_pmic->irq);
- /*
- * PMIC IRQ must be disabled during suspend for RTC alarm
- * to work properly.
- * When device is woken up from suspend, an
- * interrupt occurs before resuming I2C bus controller.
- * The interrupt is handled by regmap_irq_thread which tries
- * to read RTC registers. This read fails (I2C is still
- * suspended) and RTC Alarm interrupt is disabled.
- */
- disable_irq(sec_pmic->irq);
-
- return 0;
-}
-
-static int sec_pmic_resume(struct device *dev)
-{
- struct i2c_client *i2c = to_i2c_client(dev);
- struct sec_pmic_dev *sec_pmic = i2c_get_clientdata(i2c);
-
- if (device_may_wakeup(dev))
- disable_irq_wake(sec_pmic->irq);
- enable_irq(sec_pmic->irq);
-
- return 0;
-}
-
-static DEFINE_SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops,
- sec_pmic_suspend, sec_pmic_resume);
-
-static struct i2c_driver sec_pmic_driver = {
- .driver = {
- .name = "sec_pmic",
- .pm = pm_sleep_ptr(&sec_pmic_pm_ops),
- .of_match_table = sec_dt_match,
- },
- .probe = sec_pmic_probe,
- .shutdown = sec_pmic_shutdown,
-};
-module_i2c_driver(sec_pmic_driver);
-
-MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("Core support for the S5M MFD");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sec-core.h b/drivers/mfd/sec-core.h
new file mode 100644
index 000000000000..92c7558ab8b0
--- /dev/null
+++ b/drivers/mfd/sec-core.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2012 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Samsung SxM core driver internal data
+ */
+
+#ifndef __SEC_CORE_INT_H
+#define __SEC_CORE_INT_H
+
+struct i2c_client;
+
+extern const struct dev_pm_ops sec_pmic_pm_ops;
+
+int sec_pmic_probe(struct device *dev, int device_type, unsigned int irq,
+ struct regmap *regmap, struct i2c_client *client);
+void sec_pmic_shutdown(struct device *dev);
+
+int sec_irq_init(struct sec_pmic_dev *sec_pmic);
+
+#endif /* __SEC_CORE_INT_H */
diff --git a/drivers/mfd/sec-i2c.c b/drivers/mfd/sec-i2c.c
new file mode 100644
index 000000000000..3132b849b4bc
--- /dev/null
+++ b/drivers/mfd/sec-i2c.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2012 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Samsung SxM I2C driver
+ */
+
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/s2mpa01.h>
+#include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/s2mps13.h>
+#include <linux/mfd/samsung/s2mps14.h>
+#include <linux/mfd/samsung/s2mps15.h>
+#include <linux/mfd/samsung/s2mpu02.h>
+#include <linux/mfd/samsung/s5m8767.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include "sec-core.h"
+
+struct sec_pmic_i2c_platform_data {
+ const struct regmap_config *regmap_cfg;
+ int device_type;
+};
+
+static bool s2mpa01_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case S2MPA01_REG_INT1M:
+ case S2MPA01_REG_INT2M:
+ case S2MPA01_REG_INT3M:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool s2mps11_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case S2MPS11_REG_INT1M:
+ case S2MPS11_REG_INT2M:
+ case S2MPS11_REG_INT3M:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool s2mpu02_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case S2MPU02_REG_INT1M:
+ case S2MPU02_REG_INT2M:
+ case S2MPU02_REG_INT3M:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static const struct regmap_config s2dos05_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const struct regmap_config s2mpa01_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPA01_REG_LDO_OVCB4,
+ .volatile_reg = s2mpa01_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mps11_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPS11_REG_L38CTRL,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mps13_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPS13_REG_LDODSCH5,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mps14_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPS14_REG_LDODSCH3,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mps15_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPS15_REG_LDODSCH4,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mpu02_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPU02_REG_DVSDATA,
+ .volatile_reg = s2mpu02_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config s2mpu05_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const struct regmap_config s5m8767_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S5M8767_REG_LDO28CTRL,
+ .volatile_reg = s2mps11_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static int sec_pmic_i2c_probe(struct i2c_client *client)
+{
+ const struct sec_pmic_i2c_platform_data *pdata;
+ struct regmap *regmap_pmic;
+
+ pdata = device_get_match_data(&client->dev);
+ if (!pdata)
+ return dev_err_probe(&client->dev, -ENODEV,
+ "Unsupported device type\n");
+
+ regmap_pmic = devm_regmap_init_i2c(client, pdata->regmap_cfg);
+ if (IS_ERR(regmap_pmic))
+ return dev_err_probe(&client->dev, PTR_ERR(regmap_pmic),
+ "regmap init failed\n");
+
+ return sec_pmic_probe(&client->dev, pdata->device_type, client->irq,
+ regmap_pmic, client);
+}
+
+static void sec_pmic_i2c_shutdown(struct i2c_client *i2c)
+{
+ sec_pmic_shutdown(&i2c->dev);
+}
+
+static const struct sec_pmic_i2c_platform_data s2dos05_data = {
+ .regmap_cfg = &s2dos05_regmap_config,
+ .device_type = S2DOS05
+};
+
+static const struct sec_pmic_i2c_platform_data s2mpa01_data = {
+ .regmap_cfg = &s2mpa01_regmap_config,
+ .device_type = S2MPA01,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mps11_data = {
+ .regmap_cfg = &s2mps11_regmap_config,
+ .device_type = S2MPS11X,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mps13_data = {
+ .regmap_cfg = &s2mps13_regmap_config,
+ .device_type = S2MPS13X,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mps14_data = {
+ .regmap_cfg = &s2mps14_regmap_config,
+ .device_type = S2MPS14X,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mps15_data = {
+ .regmap_cfg = &s2mps15_regmap_config,
+ .device_type = S2MPS15X,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mpu02_data = {
+ .regmap_cfg = &s2mpu02_regmap_config,
+ .device_type = S2MPU02,
+};
+
+static const struct sec_pmic_i2c_platform_data s2mpu05_data = {
+ .regmap_cfg = &s2mpu05_regmap_config,
+ .device_type = S2MPU05,
+};
+
+static const struct sec_pmic_i2c_platform_data s5m8767_data = {
+ .regmap_cfg = &s5m8767_regmap_config,
+ .device_type = S5M8767X,
+};
+
+static const struct of_device_id sec_pmic_i2c_of_match[] = {
+ { .compatible = "samsung,s2dos05", .data = &s2dos05_data, },
+ { .compatible = "samsung,s2mpa01-pmic", .data = &s2mpa01_data, },
+ { .compatible = "samsung,s2mps11-pmic", .data = &s2mps11_data, },
+ { .compatible = "samsung,s2mps13-pmic", .data = &s2mps13_data, },
+ { .compatible = "samsung,s2mps14-pmic", .data = &s2mps14_data, },
+ { .compatible = "samsung,s2mps15-pmic", .data = &s2mps15_data, },
+ { .compatible = "samsung,s2mpu02-pmic", .data = &s2mpu02_data, },
+ { .compatible = "samsung,s2mpu05-pmic", .data = &s2mpu05_data, },
+ { .compatible = "samsung,s5m8767-pmic", .data = &s5m8767_data, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sec_pmic_i2c_of_match);
+
+static struct i2c_driver sec_pmic_i2c_driver = {
+ .driver = {
+ .name = "sec-pmic-i2c",
+ .pm = pm_sleep_ptr(&sec_pmic_pm_ops),
+ .of_match_table = sec_pmic_i2c_of_match,
+ },
+ .probe = sec_pmic_i2c_probe,
+ .shutdown = sec_pmic_i2c_shutdown,
+};
+module_i2c_driver(sec_pmic_i2c_driver);
+
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_AUTHOR("André Draszik <andre.draszik@linaro.org>");
+MODULE_DESCRIPTION("I2C driver for the Samsung S5M");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index 047fc065fcf1..c5c80b1ba104 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -3,227 +3,139 @@
// Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
// http://www.samsung.com
-#include <linux/device.h>
+#include <linux/array_size.h>
+#include <linux/build_bug.h>
+#include <linux/dev_printk.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/irq.h>
+#include <linux/mfd/samsung/s2mpg10.h>
#include <linux/mfd/samsung/s2mps11.h>
#include <linux/mfd/samsung/s2mps14.h>
#include <linux/mfd/samsung/s2mpu02.h>
#include <linux/mfd/samsung/s2mpu05.h>
#include <linux/mfd/samsung/s5m8767.h>
+#include <linux/regmap.h>
+#include "sec-core.h"
+
+static const struct regmap_irq s2mpg10_irqs[] = {
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWRONF, 0, S2MPG10_IRQ_PWRONF_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWRONR, 0, S2MPG10_IRQ_PWRONR_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_JIGONBF, 0, S2MPG10_IRQ_JIGONBF_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_JIGONBR, 0, S2MPG10_IRQ_JIGONBR_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_ACOKBF, 0, S2MPG10_IRQ_ACOKBF_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_ACOKBR, 0, S2MPG10_IRQ_ACOKBR_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWRON1S, 0, S2MPG10_IRQ_PWRON1S_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_MRB, 0, S2MPG10_IRQ_MRB_MASK),
+
+ REGMAP_IRQ_REG(S2MPG10_IRQ_RTC60S, 1, S2MPG10_IRQ_RTC60S_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_RTCA1, 1, S2MPG10_IRQ_RTCA1_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_RTCA0, 1, S2MPG10_IRQ_RTCA0_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_RTC1S, 1, S2MPG10_IRQ_RTC1S_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_WTSR_COLDRST, 1, S2MPG10_IRQ_WTSR_COLDRST_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_WTSR, 1, S2MPG10_IRQ_WTSR_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_WRST, 1, S2MPG10_IRQ_WRST_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_SMPL, 1, S2MPG10_IRQ_SMPL_MASK),
+
+ REGMAP_IRQ_REG(S2MPG10_IRQ_120C, 2, S2MPG10_IRQ_INT120C_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_140C, 2, S2MPG10_IRQ_INT140C_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_TSD, 2, S2MPG10_IRQ_TSD_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PIF_TIMEOUT1, 2, S2MPG10_IRQ_PIF_TIMEOUT1_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PIF_TIMEOUT2, 2, S2MPG10_IRQ_PIF_TIMEOUT2_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_SPD_PARITY_ERR, 2, S2MPG10_IRQ_SPD_PARITY_ERR_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_SPD_ABNORMAL_STOP, 2, S2MPG10_IRQ_SPD_ABNORMAL_STOP_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PMETER_OVERF, 2, S2MPG10_IRQ_PMETER_OVERF_MASK),
+
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B1M, 3, S2MPG10_IRQ_OCP_B1M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B2M, 3, S2MPG10_IRQ_OCP_B2M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B3M, 3, S2MPG10_IRQ_OCP_B3M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B4M, 3, S2MPG10_IRQ_OCP_B4M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B5M, 3, S2MPG10_IRQ_OCP_B5M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B6M, 3, S2MPG10_IRQ_OCP_B6M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B7M, 3, S2MPG10_IRQ_OCP_B7M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B8M, 3, S2MPG10_IRQ_OCP_B8M_MASK),
+
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B9M, 4, S2MPG10_IRQ_OCP_B9M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_OCP_B10M, 4, S2MPG10_IRQ_OCP_B10M_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_WLWP_ACC, 4, S2MPG10_IRQ_WLWP_ACC_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_SMPL_TIMEOUT, 4, S2MPG10_IRQ_SMPL_TIMEOUT_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_WTSR_TIMEOUT, 4, S2MPG10_IRQ_WTSR_TIMEOUT_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_SPD_SRP_PKT_RST, 4, S2MPG10_IRQ_SPD_SRP_PKT_RST_MASK),
+
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH0, 5, S2MPG10_IRQ_PWR_WARN_CH0_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH1, 5, S2MPG10_IRQ_PWR_WARN_CH1_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH2, 5, S2MPG10_IRQ_PWR_WARN_CH2_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH3, 5, S2MPG10_IRQ_PWR_WARN_CH3_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH4, 5, S2MPG10_IRQ_PWR_WARN_CH4_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH5, 5, S2MPG10_IRQ_PWR_WARN_CH5_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH6, 5, S2MPG10_IRQ_PWR_WARN_CH6_MASK),
+ REGMAP_IRQ_REG(S2MPG10_IRQ_PWR_WARN_CH7, 5, S2MPG10_IRQ_PWR_WARN_CH7_MASK),
+};
static const struct regmap_irq s2mps11_irqs[] = {
- [S2MPS11_IRQ_PWRONF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONF_MASK,
- },
- [S2MPS11_IRQ_PWRONR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONR_MASK,
- },
- [S2MPS11_IRQ_JIGONBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBF_MASK,
- },
- [S2MPS11_IRQ_JIGONBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBR_MASK,
- },
- [S2MPS11_IRQ_ACOKBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBF_MASK,
- },
- [S2MPS11_IRQ_ACOKBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBR_MASK,
- },
- [S2MPS11_IRQ_PWRON1S] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRON1S_MASK,
- },
- [S2MPS11_IRQ_MRB] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_MRB_MASK,
- },
- [S2MPS11_IRQ_RTC60S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC60S_MASK,
- },
- [S2MPS11_IRQ_RTCA1] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA1_MASK,
- },
- [S2MPS11_IRQ_RTCA0] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA0_MASK,
- },
- [S2MPS11_IRQ_SMPL] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_SMPL_MASK,
- },
- [S2MPS11_IRQ_RTC1S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC1S_MASK,
- },
- [S2MPS11_IRQ_WTSR] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_WTSR_MASK,
- },
- [S2MPS11_IRQ_INT120C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT120C_MASK,
- },
- [S2MPS11_IRQ_INT140C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT140C_MASK,
- },
+ REGMAP_IRQ_REG(S2MPS11_IRQ_PWRONF, 0, S2MPS11_IRQ_PWRONF_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_PWRONR, 0, S2MPS11_IRQ_PWRONR_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_JIGONBF, 0, S2MPS11_IRQ_JIGONBF_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_JIGONBR, 0, S2MPS11_IRQ_JIGONBR_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_ACOKBF, 0, S2MPS11_IRQ_ACOKBF_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_ACOKBR, 0, S2MPS11_IRQ_ACOKBR_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_PWRON1S, 0, S2MPS11_IRQ_PWRON1S_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_MRB, 0, S2MPS11_IRQ_MRB_MASK),
+
+ REGMAP_IRQ_REG(S2MPS11_IRQ_RTC60S, 1, S2MPS11_IRQ_RTC60S_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_RTCA1, 1, S2MPS11_IRQ_RTCA1_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_RTCA0, 1, S2MPS11_IRQ_RTCA0_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_SMPL, 1, S2MPS11_IRQ_SMPL_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_RTC1S, 1, S2MPS11_IRQ_RTC1S_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_WTSR, 1, S2MPS11_IRQ_WTSR_MASK),
+
+ REGMAP_IRQ_REG(S2MPS11_IRQ_INT120C, 2, S2MPS11_IRQ_INT120C_MASK),
+ REGMAP_IRQ_REG(S2MPS11_IRQ_INT140C, 2, S2MPS11_IRQ_INT140C_MASK),
};
static const struct regmap_irq s2mps14_irqs[] = {
- [S2MPS14_IRQ_PWRONF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONF_MASK,
- },
- [S2MPS14_IRQ_PWRONR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONR_MASK,
- },
- [S2MPS14_IRQ_JIGONBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBF_MASK,
- },
- [S2MPS14_IRQ_JIGONBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBR_MASK,
- },
- [S2MPS14_IRQ_ACOKBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBF_MASK,
- },
- [S2MPS14_IRQ_ACOKBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBR_MASK,
- },
- [S2MPS14_IRQ_PWRON1S] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRON1S_MASK,
- },
- [S2MPS14_IRQ_MRB] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_MRB_MASK,
- },
- [S2MPS14_IRQ_RTC60S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC60S_MASK,
- },
- [S2MPS14_IRQ_RTCA1] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA1_MASK,
- },
- [S2MPS14_IRQ_RTCA0] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA0_MASK,
- },
- [S2MPS14_IRQ_SMPL] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_SMPL_MASK,
- },
- [S2MPS14_IRQ_RTC1S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC1S_MASK,
- },
- [S2MPS14_IRQ_WTSR] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_WTSR_MASK,
- },
- [S2MPS14_IRQ_INT120C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT120C_MASK,
- },
- [S2MPS14_IRQ_INT140C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT140C_MASK,
- },
- [S2MPS14_IRQ_TSD] = {
- .reg_offset = 2,
- .mask = S2MPS14_IRQ_TSD_MASK,
- },
+ REGMAP_IRQ_REG(S2MPS14_IRQ_PWRONF, 0, S2MPS11_IRQ_PWRONF_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_PWRONR, 0, S2MPS11_IRQ_PWRONR_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_JIGONBF, 0, S2MPS11_IRQ_JIGONBF_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_JIGONBR, 0, S2MPS11_IRQ_JIGONBR_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_ACOKBF, 0, S2MPS11_IRQ_ACOKBF_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_ACOKBR, 0, S2MPS11_IRQ_ACOKBR_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_PWRON1S, 0, S2MPS11_IRQ_PWRON1S_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_MRB, 0, S2MPS11_IRQ_MRB_MASK),
+
+ REGMAP_IRQ_REG(S2MPS14_IRQ_RTC60S, 1, S2MPS11_IRQ_RTC60S_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_RTCA1, 1, S2MPS11_IRQ_RTCA1_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_RTCA0, 1, S2MPS11_IRQ_RTCA0_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_SMPL, 1, S2MPS11_IRQ_SMPL_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_RTC1S, 1, S2MPS11_IRQ_RTC1S_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_WTSR, 1, S2MPS11_IRQ_WTSR_MASK),
+
+ REGMAP_IRQ_REG(S2MPS14_IRQ_INT120C, 2, S2MPS11_IRQ_INT120C_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_INT140C, 2, S2MPS11_IRQ_INT140C_MASK),
+ REGMAP_IRQ_REG(S2MPS14_IRQ_TSD, 2, S2MPS14_IRQ_TSD_MASK),
};
static const struct regmap_irq s2mpu02_irqs[] = {
- [S2MPU02_IRQ_PWRONF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONF_MASK,
- },
- [S2MPU02_IRQ_PWRONR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRONR_MASK,
- },
- [S2MPU02_IRQ_JIGONBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBF_MASK,
- },
- [S2MPU02_IRQ_JIGONBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_JIGONBR_MASK,
- },
- [S2MPU02_IRQ_ACOKBF] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBF_MASK,
- },
- [S2MPU02_IRQ_ACOKBR] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_ACOKBR_MASK,
- },
- [S2MPU02_IRQ_PWRON1S] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_PWRON1S_MASK,
- },
- [S2MPU02_IRQ_MRB] = {
- .reg_offset = 0,
- .mask = S2MPS11_IRQ_MRB_MASK,
- },
- [S2MPU02_IRQ_RTC60S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC60S_MASK,
- },
- [S2MPU02_IRQ_RTCA1] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA1_MASK,
- },
- [S2MPU02_IRQ_RTCA0] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTCA0_MASK,
- },
- [S2MPU02_IRQ_SMPL] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_SMPL_MASK,
- },
- [S2MPU02_IRQ_RTC1S] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_RTC1S_MASK,
- },
- [S2MPU02_IRQ_WTSR] = {
- .reg_offset = 1,
- .mask = S2MPS11_IRQ_WTSR_MASK,
- },
- [S2MPU02_IRQ_INT120C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT120C_MASK,
- },
- [S2MPU02_IRQ_INT140C] = {
- .reg_offset = 2,
- .mask = S2MPS11_IRQ_INT140C_MASK,
- },
- [S2MPU02_IRQ_TSD] = {
- .reg_offset = 2,
- .mask = S2MPS14_IRQ_TSD_MASK,
- },
+ REGMAP_IRQ_REG(S2MPU02_IRQ_PWRONF, 0, S2MPS11_IRQ_PWRONF_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_PWRONR, 0, S2MPS11_IRQ_PWRONR_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_JIGONBF, 0, S2MPS11_IRQ_JIGONBF_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_JIGONBR, 0, S2MPS11_IRQ_JIGONBR_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_ACOKBF, 0, S2MPS11_IRQ_ACOKBF_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_ACOKBR, 0, S2MPS11_IRQ_ACOKBR_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_PWRON1S, 0, S2MPS11_IRQ_PWRON1S_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_MRB, 0, S2MPS11_IRQ_MRB_MASK),
+
+ REGMAP_IRQ_REG(S2MPU02_IRQ_RTC60S, 1, S2MPS11_IRQ_RTC60S_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_RTCA1, 1, S2MPS11_IRQ_RTCA1_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_RTCA0, 1, S2MPS11_IRQ_RTCA0_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_SMPL, 1, S2MPS11_IRQ_SMPL_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_RTC1S, 1, S2MPS11_IRQ_RTC1S_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_WTSR, 1, S2MPS11_IRQ_WTSR_MASK),
+
+ REGMAP_IRQ_REG(S2MPU02_IRQ_INT120C, 2, S2MPS11_IRQ_INT120C_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_INT140C, 2, S2MPS11_IRQ_INT140C_MASK),
+ REGMAP_IRQ_REG(S2MPU02_IRQ_TSD, 2, S2MPS14_IRQ_TSD_MASK),
};
static const struct regmap_irq s2mpu05_irqs[] = {
@@ -247,74 +159,35 @@ static const struct regmap_irq s2mpu05_irqs[] = {
};
static const struct regmap_irq s5m8767_irqs[] = {
- [S5M8767_IRQ_PWRR] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_PWRR_MASK,
- },
- [S5M8767_IRQ_PWRF] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_PWRF_MASK,
- },
- [S5M8767_IRQ_PWR1S] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_PWR1S_MASK,
- },
- [S5M8767_IRQ_JIGR] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_JIGR_MASK,
- },
- [S5M8767_IRQ_JIGF] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_JIGF_MASK,
- },
- [S5M8767_IRQ_LOWBAT2] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_LOWBAT2_MASK,
- },
- [S5M8767_IRQ_LOWBAT1] = {
- .reg_offset = 0,
- .mask = S5M8767_IRQ_LOWBAT1_MASK,
- },
- [S5M8767_IRQ_MRB] = {
- .reg_offset = 1,
- .mask = S5M8767_IRQ_MRB_MASK,
- },
- [S5M8767_IRQ_DVSOK2] = {
- .reg_offset = 1,
- .mask = S5M8767_IRQ_DVSOK2_MASK,
- },
- [S5M8767_IRQ_DVSOK3] = {
- .reg_offset = 1,
- .mask = S5M8767_IRQ_DVSOK3_MASK,
- },
- [S5M8767_IRQ_DVSOK4] = {
- .reg_offset = 1,
- .mask = S5M8767_IRQ_DVSOK4_MASK,
- },
- [S5M8767_IRQ_RTC60S] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_RTC60S_MASK,
- },
- [S5M8767_IRQ_RTCA1] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_RTCA1_MASK,
- },
- [S5M8767_IRQ_RTCA2] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_RTCA2_MASK,
- },
- [S5M8767_IRQ_SMPL] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_SMPL_MASK,
- },
- [S5M8767_IRQ_RTC1S] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_RTC1S_MASK,
- },
- [S5M8767_IRQ_WTSR] = {
- .reg_offset = 2,
- .mask = S5M8767_IRQ_WTSR_MASK,
- },
+ REGMAP_IRQ_REG(S5M8767_IRQ_PWRR, 0, S5M8767_IRQ_PWRR_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_PWRF, 0, S5M8767_IRQ_PWRF_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_PWR1S, 0, S5M8767_IRQ_PWR1S_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_JIGR, 0, S5M8767_IRQ_JIGR_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_JIGF, 0, S5M8767_IRQ_JIGF_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_LOWBAT2, 0, S5M8767_IRQ_LOWBAT2_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_LOWBAT1, 0, S5M8767_IRQ_LOWBAT1_MASK),
+
+ REGMAP_IRQ_REG(S5M8767_IRQ_MRB, 1, S5M8767_IRQ_MRB_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_DVSOK2, 1, S5M8767_IRQ_DVSOK2_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_DVSOK3, 1, S5M8767_IRQ_DVSOK3_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_DVSOK4, 1, S5M8767_IRQ_DVSOK4_MASK),
+
+ REGMAP_IRQ_REG(S5M8767_IRQ_RTC60S, 2, S5M8767_IRQ_RTC60S_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_RTCA1, 2, S5M8767_IRQ_RTCA1_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_RTCA2, 2, S5M8767_IRQ_RTCA2_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_SMPL, 2, S5M8767_IRQ_SMPL_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_RTC1S, 2, S5M8767_IRQ_RTC1S_MASK),
+ REGMAP_IRQ_REG(S5M8767_IRQ_WTSR, 2, S5M8767_IRQ_WTSR_MASK),
+};
+
+/* All S2MPG10 interrupt sources are read-only and don't require clearing */
+static const struct regmap_irq_chip s2mpg10_irq_chip = {
+ .name = "s2mpg10",
+ .irqs = s2mpg10_irqs,
+ .num_irqs = ARRAY_SIZE(s2mpg10_irqs),
+ .num_regs = 6,
+ .status_base = S2MPG10_PMIC_INT1,
+ .mask_base = S2MPG10_PMIC_INT1M,
};
static const struct regmap_irq_chip s2mps11_irq_chip = {
@@ -382,23 +255,21 @@ static const struct regmap_irq_chip s5m8767_irq_chip = {
int sec_irq_init(struct sec_pmic_dev *sec_pmic)
{
- int ret = 0;
- int type = sec_pmic->device_type;
const struct regmap_irq_chip *sec_irq_chip;
+ int ret;
- if (!sec_pmic->irq) {
- dev_warn(sec_pmic->dev,
- "No interrupt specified, no interrupts\n");
- return 0;
- }
-
- switch (type) {
+ switch (sec_pmic->device_type) {
case S5M8767X:
sec_irq_chip = &s5m8767_irq_chip;
break;
+ case S2DOS05:
+ return 0;
case S2MPA01:
sec_irq_chip = &s2mps14_irq_chip;
break;
+ case S2MPG10:
+ sec_irq_chip = &s2mpg10_irq_chip;
+ break;
case S2MPS11X:
sec_irq_chip = &s2mps11_irq_chip;
break;
@@ -418,18 +289,24 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
sec_irq_chip = &s2mpu05_irq_chip;
break;
default:
- dev_err(sec_pmic->dev, "Unknown device type %lu\n",
- sec_pmic->device_type);
- return -EINVAL;
+ return dev_err_probe(sec_pmic->dev, -EINVAL,
+ "Unsupported device type %d\n",
+ sec_pmic->device_type);
+ }
+
+ if (!sec_pmic->irq) {
+ dev_warn(sec_pmic->dev,
+ "No interrupt specified, no interrupts\n");
+ return 0;
}
ret = devm_regmap_add_irq_chip(sec_pmic->dev, sec_pmic->regmap_pmic,
sec_pmic->irq, IRQF_ONESHOT,
0, sec_irq_chip, &sec_pmic->irq_data);
- if (ret != 0) {
- dev_err(sec_pmic->dev, "Failed to register IRQ chip: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(sec_pmic->dev, ret,
+ "Failed to add %s IRQ chip\n",
+ sec_irq_chip->name);
/*
* The rtc-s5m driver requests S2MPS14_IRQ_RTCA0 also for S2MPS11
@@ -439,10 +316,3 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
return 0;
}
-EXPORT_SYMBOL_GPL(sec_irq_init);
-
-MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
-MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
-MODULE_DESCRIPTION("Interrupt support for the S5M MFD");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 7ee293b09f62..a5f9241fa3f2 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -631,49 +631,6 @@ unsigned long sm501_set_clock(struct device *dev,
EXPORT_SYMBOL_GPL(sm501_set_clock);
-/* sm501_find_clock
- *
- * finds the closest available frequency for a given clock
-*/
-
-unsigned long sm501_find_clock(struct device *dev,
- int clksrc,
- unsigned long req_freq)
-{
- struct sm501_devdata *sm = dev_get_drvdata(dev);
- unsigned long sm501_freq; /* the frequency achieveable by the 501 */
- struct sm501_clock to;
-
- switch (clksrc) {
- case SM501_CLOCK_P2XCLK:
- if (sm->rev >= 0xC0) {
- /* SM502 -> use the programmable PLL */
- sm501_freq = (sm501_calc_pll(2 * req_freq,
- &to, 5) / 2);
- } else {
- sm501_freq = (sm501_select_clock(2 * req_freq,
- &to, 5) / 2);
- }
- break;
-
- case SM501_CLOCK_V2XCLK:
- sm501_freq = (sm501_select_clock(2 * req_freq, &to, 3) / 2);
- break;
-
- case SM501_CLOCK_MCLK:
- case SM501_CLOCK_M1XCLK:
- sm501_freq = sm501_select_clock(req_freq, &to, 3);
- break;
-
- default:
- sm501_freq = 0; /* error */
- }
-
- return sm501_freq;
-}
-
-EXPORT_SYMBOL_GPL(sm501_find_clock);
-
static struct sm501_device *to_sm_device(struct platform_device *pdev)
{
return container_of(pdev, struct sm501_device, pdev);
@@ -915,7 +872,8 @@ static void sm501_gpio_ensure_gpio(struct sm501_gpio_chip *smchip,
}
}
-static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int sm501_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct sm501_gpio_chip *smchip = gpiochip_get_data(chip);
@@ -939,6 +897,8 @@ static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
sm501_gpio_ensure_gpio(smchip, bit);
spin_unlock_irqrestore(&smgpio->lock, save);
+
+ return 0;
}
static int sm501_gpio_input(struct gpio_chip *chip, unsigned offset)
@@ -1005,7 +965,7 @@ static const struct gpio_chip gpio_chip_template = {
.ngpio = 32,
.direction_input = sm501_gpio_input,
.direction_output = sm501_gpio_output,
- .set = sm501_gpio_set,
+ .set_rv = sm501_gpio_set,
.get = sm501_gpio_get,
};
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index 7186e2108108..d6b4350779e6 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -210,7 +210,10 @@ static int sprd_pmic_probe(struct spi_device *spi)
return ret;
}
- device_init_wakeup(&spi->dev, true);
+ ret = devm_device_init_wakeup(&spi->dev);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "Failed to init wakeup\n");
+
return 0;
}
diff --git a/drivers/mfd/stm32-lptimer.c b/drivers/mfd/stm32-lptimer.c
index b2704a9809c7..09073dbc9c80 100644
--- a/drivers/mfd/stm32-lptimer.c
+++ b/drivers/mfd/stm32-lptimer.c
@@ -6,6 +6,7 @@
* Inspired by Benjamin Gaignard's stm32-timers driver
*/
+#include <linux/bitfield.h>
#include <linux/mfd/stm32-lptimer.h>
#include <linux/module.h>
#include <linux/of_platform.h>
@@ -49,6 +50,36 @@ static int stm32_lptimer_detect_encoder(struct stm32_lptimer *ddata)
return 0;
}
+static int stm32_lptimer_detect_hwcfgr(struct stm32_lptimer *ddata)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(ddata->regmap, STM32_LPTIM_VERR, &ddata->version);
+ if (ret)
+ return ret;
+
+ /* Try to guess parameters from HWCFGR: e.g. encoder mode (STM32MP15) */
+ ret = regmap_read(ddata->regmap, STM32_LPTIM_HWCFGR1, &val);
+ if (ret)
+ return ret;
+
+ /* Fallback to legacy init if HWCFGR isn't present */
+ if (!val)
+ return stm32_lptimer_detect_encoder(ddata);
+
+ ddata->has_encoder = FIELD_GET(STM32_LPTIM_HWCFGR1_ENCODER, val);
+
+ ret = regmap_read(ddata->regmap, STM32_LPTIM_HWCFGR2, &val);
+ if (ret)
+ return ret;
+
+ /* Number of capture/compare channels */
+ ddata->num_cc_chans = FIELD_GET(STM32_LPTIM_HWCFGR2_CHAN_NUM, val);
+
+ return 0;
+}
+
static int stm32_lptimer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -73,7 +104,7 @@ static int stm32_lptimer_probe(struct platform_device *pdev)
if (IS_ERR(ddata->clk))
return PTR_ERR(ddata->clk);
- ret = stm32_lptimer_detect_encoder(ddata);
+ ret = stm32_lptimer_detect_hwcfgr(ddata);
if (ret)
return ret;
diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
index f391c2ccaa72..823b1d29389e 100644
--- a/drivers/mfd/stmfx.c
+++ b/drivers/mfd/stmfx.c
@@ -269,7 +269,7 @@ static int stmfx_irq_init(struct i2c_client *client)
u32 irqoutpin = 0, irqtrigger;
int ret;
- stmfx->irq_domain = irq_domain_add_simple(stmfx->dev->of_node,
+ stmfx->irq_domain = irq_domain_create_simple(of_fwnode_handle(stmfx->dev->of_node),
STMFX_REG_IRQ_SRC_MAX, 0,
&stmfx_irq_ops, stmfx);
if (!stmfx->irq_domain) {
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index 792236f56399..b9cc85ea2c40 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -129,7 +129,7 @@ static const struct spi_device_id stmpe_spi_id[] = {
{ "stmpe2403", STMPE2403 },
{ }
};
-MODULE_DEVICE_TABLE(spi, stmpe_id);
+MODULE_DEVICE_TABLE(spi, stmpe_spi_id);
static struct spi_driver stmpe_spi_driver = {
.driver = {
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 9c3cf58457a7..819d19dc9b4a 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -1219,8 +1219,8 @@ static int stmpe_irq_init(struct stmpe *stmpe, struct device_node *np)
int base = 0;
int num_irqs = stmpe->variant->num_irqs;
- stmpe->domain = irq_domain_add_simple(np, num_irqs, base,
- &stmpe_irq_ops, stmpe);
+ stmpe->domain = irq_domain_create_simple(of_fwnode_handle(np), num_irqs,
+ base, &stmpe_irq_ops, stmpe);
if (!stmpe->domain) {
dev_err(stmpe->dev, "Failed to create irqdomain\n");
return -ENOSYS;
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index ef953ee73145..2d4eb771e230 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -234,9 +234,9 @@ static const struct irq_domain_ops tc3589x_irq_ops = {
static int tc3589x_irq_init(struct tc3589x *tc3589x, struct device_node *np)
{
- tc3589x->domain = irq_domain_add_simple(
- np, TC3589x_NR_INTERNAL_IRQS, 0,
- &tc3589x_irq_ops, tc3589x);
+ tc3589x->domain = irq_domain_create_simple(of_fwnode_handle(np),
+ TC3589x_NR_INTERNAL_IRQS, 0,
+ &tc3589x_irq_ops, tc3589x);
if (!tc3589x->domain) {
dev_err(tc3589x->dev, "Failed to create irqdomain\n");
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 00fb12c4f491..03bd5cd66798 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -446,7 +446,7 @@ static irqreturn_t tps65010_irq(int irq, void *_tps)
* offsets 4..5 == LED1/nPG, LED2 (we set one of the non-BLINK modes)
* offset 6 == vibrator motor driver
*/
-static void
+static int
tps65010_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
if (offset < 4)
@@ -455,6 +455,8 @@ tps65010_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
tps65010_set_led(offset - 3, value ? ON : OFF);
else
tps65010_set_vib(value);
+
+ return 0;
}
static int
@@ -512,7 +514,6 @@ static void tps65010_remove(struct i2c_client *client)
if (client->irq > 0)
free_irq(client->irq, tps);
cancel_delayed_work_sync(&tps->work);
- debugfs_remove(tps->file);
the_tps = NULL;
}
@@ -608,7 +609,7 @@ static int tps65010_probe(struct i2c_client *client)
tps65010_work(&tps->work.work);
- tps->file = debugfs_create_file(DRIVER_NAME, S_IRUGO, NULL,
+ tps->file = debugfs_create_file(DRIVER_NAME, S_IRUGO, client->debugfs,
tps, DEBUG_FOPS);
/* optionally register GPIOs */
@@ -619,7 +620,7 @@ static int tps65010_probe(struct i2c_client *client)
tps->chip.parent = &client->dev;
tps->chip.owner = THIS_MODULE;
- tps->chip.set = tps65010_gpio_set;
+ tps->chip.set_rv = tps65010_gpio_set;
tps->chip.direction_output = tps65010_output;
/* NOTE: only partial support for inputs; nyet IRQs */
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 029ecc32f078..4e9669d327b4 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -158,7 +158,7 @@ static int tps65217_irq_init(struct tps65217 *tps, int irq)
tps65217_set_bits(tps, TPS65217_REG_INT, TPS65217_INT_MASK,
TPS65217_INT_MASK, TPS65217_PROTECT_NONE);
- tps->irq_domain = irq_domain_add_linear(tps->dev->of_node,
+ tps->irq_domain = irq_domain_create_linear(of_fwnode_handle(tps->dev->of_node),
TPS65217_NUM_IRQ, &tps65217_irq_domain_ops, tps);
if (!tps->irq_domain) {
dev_err(tps->dev, "Could not create IRQ domain\n");
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 82714899efb2..853c48286071 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -363,7 +363,7 @@ static int tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
new_irq_base = 0;
}
- tps6586x->irq_domain = irq_domain_add_simple(tps6586x->dev->of_node,
+ tps6586x->irq_domain = irq_domain_create_simple(of_fwnode_handle(tps6586x->dev->of_node),
irq_num, new_irq_base, &tps6586x_domain_ops,
tps6586x);
if (!tps6586x->irq_domain) {
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 87496c1cb8bc..232c2bfe8c18 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -691,8 +691,8 @@ int twl4030_init_irq(struct device *dev, int irq_num)
return irq_base;
}
- irq_domain_add_legacy(node, nr_irqs, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ irq_domain_create_legacy(of_fwnode_handle(node), nr_irqs, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
irq_end = irq_base + TWL4030_CORE_NR_IRQS;
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 3c03681c124c..00b14cef1dfb 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -364,7 +364,6 @@ static const struct of_device_id twl6030_of_match[] __maybe_unused = {
int twl6030_init_irq(struct device *dev, int irq_num)
{
- struct device_node *node = dev->of_node;
int nr_irqs;
int status;
u8 mask[3];
@@ -412,8 +411,8 @@ int twl6030_init_irq(struct device *dev, int irq_num)
twl6030_irq->irq_mapping_tbl = of_id->data;
twl6030_irq->irq_domain =
- irq_domain_add_linear(node, nr_irqs,
- &twl6030_irq_domain_ops, twl6030_irq);
+ irq_domain_create_linear(of_fwnode_handle(dev->of_node), nr_irqs,
+ &twl6030_irq_domain_ops, twl6030_irq);
if (!twl6030_irq->irq_domain) {
dev_err(dev, "Can't add irq_domain\n");
return -ENOMEM;
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index fc4d4c844a81..fd71ba29f6b5 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -104,7 +104,8 @@ unsigned int ucb1x00_io_read(struct ucb1x00 *ucb)
return ucb1x00_reg_read(ucb, UCB_IO_DATA);
}
-static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int ucb1x00_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ucb1x00 *ucb = gpiochip_get_data(chip);
unsigned long flags;
@@ -119,6 +120,8 @@ static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
ucb1x00_disable(ucb);
spin_unlock_irqrestore(&ucb->io_lock, flags);
+
+ return 0;
}
static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -567,7 +570,7 @@ static int ucb1x00_probe(struct mcp *mcp)
ucb->gpio.owner = THIS_MODULE;
ucb->gpio.base = pdata->gpio_base;
ucb->gpio.ngpio = 10;
- ucb->gpio.set = ucb1x00_gpio_set;
+ ucb->gpio.set_rv = ucb1x00_gpio_set;
ucb->gpio.get = ucb1x00_gpio_get;
ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index f1f58e3149ae..b3883fa5dd9f 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -587,16 +587,13 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
}
if (irq_base)
- domain = irq_domain_add_legacy(wm831x->dev->of_node,
- ARRAY_SIZE(wm831x_irqs),
- irq_base, 0,
- &wm831x_irq_domain_ops,
- wm831x);
+ domain = irq_domain_create_legacy(of_fwnode_handle(wm831x->dev->of_node),
+ ARRAY_SIZE(wm831x_irqs), irq_base, 0,
+ &wm831x_irq_domain_ops, wm831x);
else
- domain = irq_domain_add_linear(wm831x->dev->of_node,
- ARRAY_SIZE(wm831x_irqs),
- &wm831x_irq_domain_ops,
- wm831x);
+ domain = irq_domain_create_linear(of_fwnode_handle(wm831x->dev->of_node),
+ ARRAY_SIZE(wm831x_irqs), &wm831x_irq_domain_ops,
+ wm831x);
if (!domain) {
dev_warn(wm831x->dev, "Failed to allocate IRQ domain\n");
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 651a028bc519..1475b1ac6983 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -213,9 +213,7 @@ int wm8994_irq_init(struct wm8994 *wm8994)
return ret;
}
- wm8994->edge_irq = irq_domain_add_linear(NULL, 1,
- &wm8994_edge_irq_ops,
- wm8994);
+ wm8994->edge_irq = irq_domain_create_linear(NULL, 1, &wm8994_edge_irq_ops, wm8994);
ret = regmap_add_irq_chip(wm8994->regmap,
irq_create_mapping(wm8994->edge_irq,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 6b37d61150ee..b9ca56930003 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -114,6 +114,18 @@ config RPMB
If unsure, select N.
+config TI_FPC202
+ tristate "TI FPC202 Dual Port Controller"
+ depends on I2C
+ select GPIOLIB
+ select I2C_ATR
+ help
+ If you say yes here you get support for the Texas Instruments FPC202
+ Dual Port Controller.
+
+ This driver can also be built as a module. If so, the module will be
+ called fpc202.
+
config TIFM_CORE
tristate "TI Flash Media interface support"
depends on PCI
@@ -640,7 +652,6 @@ source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
source "drivers/misc/vmw_vmci/Kconfig"
source "drivers/misc/genwqe/Kconfig"
-source "drivers/misc/echo/Kconfig"
source "drivers/misc/ocxl/Kconfig"
source "drivers/misc/bcm-vk/Kconfig"
source "drivers/misc/cardreader/Kconfig"
@@ -648,4 +659,5 @@ source "drivers/misc/uacce/Kconfig"
source "drivers/misc/pvpanic/Kconfig"
source "drivers/misc/mchp_pci1xxxx/Kconfig"
source "drivers/misc/keba/Kconfig"
+source "drivers/misc/amd-sbi/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d6c917229c45..917b9a7183aa 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
obj-$(CONFIG_DUMMY_IRQ) += dummy-irq.o
obj-$(CONFIG_ICS932S401) += ics932s401.o
obj-$(CONFIG_LKDTM) += lkdtm/
+obj-$(CONFIG_TI_FPC202) += ti_fpc202.o
obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o
@@ -49,7 +50,6 @@ obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o
obj-$(CONFIG_SRAM_EXEC) += sram-exec.o
obj-$(CONFIG_GENWQE) += genwqe/
-obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_DW_XDATA_PCIE) += dw-xdata-pcie.o
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
obj-$(CONFIG_OCXL) += ocxl/
@@ -74,3 +74,4 @@ lan966x-pci-objs := lan966x_pci.o
lan966x-pci-objs += lan966x_pci.dtbo.o
obj-$(CONFIG_MCHP_LAN966X_PCI) += lan966x-pci.o
obj-y += keba/
+obj-y += amd-sbi/
diff --git a/drivers/misc/amd-sbi/Kconfig b/drivers/misc/amd-sbi/Kconfig
new file mode 100644
index 000000000000..4840831c84ca
--- /dev/null
+++ b/drivers/misc/amd-sbi/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config AMD_SBRMI_I2C
+ tristate "AMD side band RMI support"
+ depends on I2C
+ help
+ Side band RMI over I2C support for AMD out of band management.
+
+ This driver can also be built as a module. If so, the module will
+ be called sbrmi-i2c.
+
+config AMD_SBRMI_HWMON
+ bool "SBRMI hardware monitoring"
+ depends on AMD_SBRMI_I2C && HWMON
+ depends on !(AMD_SBRMI_I2C=y && HWMON=m)
+ help
+ This provides support for RMI device hardware monitoring. If enabled,
+ a hardware monitoring device will be created for each socket in
+ the system.
diff --git a/drivers/misc/amd-sbi/Makefile b/drivers/misc/amd-sbi/Makefile
new file mode 100644
index 000000000000..38eaaa651fd9
--- /dev/null
+++ b/drivers/misc/amd-sbi/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+sbrmi-i2c-objs += rmi-i2c.o rmi-core.o
+sbrmi-i2c-$(CONFIG_AMD_SBRMI_HWMON) += rmi-hwmon.o
+obj-$(CONFIG_AMD_SBRMI_I2C) += sbrmi-i2c.o
diff --git a/drivers/misc/amd-sbi/rmi-core.c b/drivers/misc/amd-sbi/rmi-core.c
new file mode 100644
index 000000000000..b653a21a909e
--- /dev/null
+++ b/drivers/misc/amd-sbi/rmi-core.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * sbrmi-core.c - file defining SB-RMI protocols compliant
+ * AMD SoC device.
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include "rmi-core.h"
+
+/* Mask for Status Register bit[1] */
+#define SW_ALERT_MASK 0x2
+/* Mask to check H/W Alert status bit */
+#define HW_ALERT_MASK 0x80
+
+/* Software Interrupt for triggering */
+#define START_CMD 0x80
+#define TRIGGER_MAILBOX 0x01
+
+/* Default message lengths as per APML command protocol */
+/* CPUID */
+#define CPUID_RD_DATA_LEN 0x8
+#define CPUID_WR_DATA_LEN 0x8
+#define CPUID_RD_REG_LEN 0xa
+#define CPUID_WR_REG_LEN 0x9
+/* MSR */
+#define MSR_RD_REG_LEN 0xa
+#define MSR_WR_REG_LEN 0x8
+#define MSR_RD_DATA_LEN 0x8
+#define MSR_WR_DATA_LEN 0x7
+
+/* CPUID MSR Command Ids */
+#define CPUID_MCA_CMD 0x73
+#define RD_CPUID_CMD 0x91
+#define RD_MCA_CMD 0x86
+
+/* CPUID MCAMSR mask & index */
+#define CPUID_MCA_THRD_MASK GENMASK(15, 0)
+#define CPUID_MCA_THRD_INDEX 32
+#define CPUID_MCA_FUNC_MASK GENMASK(31, 0)
+#define CPUID_EXT_FUNC_INDEX 56
+
+/* input for bulk write to CPUID protocol */
+struct cpu_msr_indata {
+ u8 wr_len; /* const value */
+ u8 rd_len; /* const value */
+ u8 proto_cmd; /* const value */
+ u8 thread; /* thread number */
+ union {
+ u8 reg_offset[4]; /* input value */
+ u32 value;
+ } __packed;
+ u8 ext; /* extended function */
+};
+
+/* output for bulk read from CPUID protocol */
+struct cpu_msr_outdata {
+ u8 num_bytes; /* number of bytes return */
+ u8 status; /* Protocol status code */
+ union {
+ u64 value;
+ u8 reg_data[8];
+ } __packed;
+};
+
+static inline void prepare_cpuid_input_message(struct cpu_msr_indata *input,
+ u8 thread_id, u32 func,
+ u8 ext_func)
+{
+ input->rd_len = CPUID_RD_DATA_LEN;
+ input->wr_len = CPUID_WR_DATA_LEN;
+ input->proto_cmd = RD_CPUID_CMD;
+ input->thread = thread_id << 1;
+ input->value = func;
+ input->ext = ext_func;
+}
+
+static inline void prepare_mca_msr_input_message(struct cpu_msr_indata *input,
+ u8 thread_id, u32 data_in)
+{
+ input->rd_len = MSR_RD_DATA_LEN;
+ input->wr_len = MSR_WR_DATA_LEN;
+ input->proto_cmd = RD_MCA_CMD;
+ input->thread = thread_id << 1;
+ input->value = data_in;
+}
+
+static int sbrmi_get_rev(struct sbrmi_data *data)
+{
+ unsigned int rev;
+ u16 offset = SBRMI_REV;
+ int ret;
+
+ ret = regmap_read(data->regmap, offset, &rev);
+ if (ret < 0)
+ return ret;
+
+ data->rev = rev;
+ return 0;
+}
+
+/* Read CPUID function protocol */
+static int rmi_cpuid_read(struct sbrmi_data *data,
+ struct apml_cpuid_msg *msg)
+{
+ struct cpu_msr_indata input = {0};
+ struct cpu_msr_outdata output = {0};
+ int val = 0;
+ int ret, hw_status;
+ u16 thread;
+
+ mutex_lock(&data->lock);
+ /* cache the rev value to identify if protocol is supported or not */
+ if (!data->rev) {
+ ret = sbrmi_get_rev(data);
+ if (ret < 0)
+ goto exit_unlock;
+ }
+ /* CPUID protocol for REV 0x10 is not supported*/
+ if (data->rev == 0x10) {
+ ret = -EOPNOTSUPP;
+ goto exit_unlock;
+ }
+
+ thread = msg->cpu_in_out << CPUID_MCA_THRD_INDEX & CPUID_MCA_THRD_MASK;
+
+ /* Thread > 127, Thread128 CS register, 1'b1 needs to be set to 1 */
+ if (thread > 127) {
+ thread -= 128;
+ val = 1;
+ }
+ ret = regmap_write(data->regmap, SBRMI_THREAD128CS, val);
+ if (ret < 0)
+ goto exit_unlock;
+
+ prepare_cpuid_input_message(&input, thread,
+ msg->cpu_in_out & CPUID_MCA_FUNC_MASK,
+ msg->cpu_in_out >> CPUID_EXT_FUNC_INDEX);
+
+ ret = regmap_bulk_write(data->regmap, CPUID_MCA_CMD,
+ &input, CPUID_WR_REG_LEN);
+ if (ret < 0)
+ goto exit_unlock;
+
+ /*
+ * For RMI Rev 0x20, new h/w status bit is introduced. which is used
+ * by firmware to indicate completion of commands (0x71, 0x72, 0x73).
+ * wait for the status bit to be set by the hardware before
+ * reading the data out.
+ */
+ ret = regmap_read_poll_timeout(data->regmap, SBRMI_STATUS, hw_status,
+ hw_status & HW_ALERT_MASK, 500, 2000000);
+ if (ret)
+ goto exit_unlock;
+
+ ret = regmap_bulk_read(data->regmap, CPUID_MCA_CMD,
+ &output, CPUID_RD_REG_LEN);
+ if (ret < 0)
+ goto exit_unlock;
+
+ ret = regmap_write(data->regmap, SBRMI_STATUS,
+ HW_ALERT_MASK);
+ if (ret < 0)
+ goto exit_unlock;
+
+ if (output.num_bytes != CPUID_RD_REG_LEN - 1) {
+ ret = -EMSGSIZE;
+ goto exit_unlock;
+ }
+ if (output.status) {
+ ret = -EPROTOTYPE;
+ msg->fw_ret_code = output.status;
+ goto exit_unlock;
+ }
+ msg->cpu_in_out = output.value;
+exit_unlock:
+ if (ret < 0)
+ msg->cpu_in_out = 0;
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+/* MCA MSR protocol */
+static int rmi_mca_msr_read(struct sbrmi_data *data,
+ struct apml_mcamsr_msg *msg)
+{
+ struct cpu_msr_outdata output = {0};
+ struct cpu_msr_indata input = {0};
+ int ret, val = 0;
+ int hw_status;
+ u16 thread;
+
+ mutex_lock(&data->lock);
+ /* cache the rev value to identify if protocol is supported or not */
+ if (!data->rev) {
+ ret = sbrmi_get_rev(data);
+ if (ret < 0)
+ goto exit_unlock;
+ }
+ /* MCA MSR protocol for REV 0x10 is not supported*/
+ if (data->rev == 0x10) {
+ ret = -EOPNOTSUPP;
+ goto exit_unlock;
+ }
+
+ thread = msg->mcamsr_in_out << CPUID_MCA_THRD_INDEX & CPUID_MCA_THRD_MASK;
+
+ /* Thread > 127, Thread128 CS register, 1'b1 needs to be set to 1 */
+ if (thread > 127) {
+ thread -= 128;
+ val = 1;
+ }
+ ret = regmap_write(data->regmap, SBRMI_THREAD128CS, val);
+ if (ret < 0)
+ goto exit_unlock;
+
+ prepare_mca_msr_input_message(&input, thread,
+ msg->mcamsr_in_out & CPUID_MCA_FUNC_MASK);
+
+ ret = regmap_bulk_write(data->regmap, CPUID_MCA_CMD,
+ &input, MSR_WR_REG_LEN);
+ if (ret < 0)
+ goto exit_unlock;
+
+ /*
+ * For RMI Rev 0x20, new h/w status bit is introduced. which is used
+ * by firmware to indicate completion of commands (0x71, 0x72, 0x73).
+ * wait for the status bit to be set by the hardware before
+ * reading the data out.
+ */
+ ret = regmap_read_poll_timeout(data->regmap, SBRMI_STATUS, hw_status,
+ hw_status & HW_ALERT_MASK, 500, 2000000);
+ if (ret)
+ goto exit_unlock;
+
+ ret = regmap_bulk_read(data->regmap, CPUID_MCA_CMD,
+ &output, MSR_RD_REG_LEN);
+ if (ret < 0)
+ goto exit_unlock;
+
+ ret = regmap_write(data->regmap, SBRMI_STATUS,
+ HW_ALERT_MASK);
+ if (ret < 0)
+ goto exit_unlock;
+
+ if (output.num_bytes != MSR_RD_REG_LEN - 1) {
+ ret = -EMSGSIZE;
+ goto exit_unlock;
+ }
+ if (output.status) {
+ ret = -EPROTOTYPE;
+ msg->fw_ret_code = output.status;
+ goto exit_unlock;
+ }
+ msg->mcamsr_in_out = output.value;
+
+exit_unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+int rmi_mailbox_xfer(struct sbrmi_data *data,
+ struct apml_mbox_msg *msg)
+{
+ unsigned int bytes, ec;
+ int i, ret;
+ int sw_status;
+ u8 byte;
+
+ mutex_lock(&data->lock);
+
+ msg->fw_ret_code = 0;
+
+ /* Indicate firmware a command is to be serviced */
+ ret = regmap_write(data->regmap, SBRMI_INBNDMSG7, START_CMD);
+ if (ret < 0)
+ goto exit_unlock;
+
+ /* Write the command to SBRMI::InBndMsg_inst0 */
+ ret = regmap_write(data->regmap, SBRMI_INBNDMSG0, msg->cmd);
+ if (ret < 0)
+ goto exit_unlock;
+
+ /*
+ * For both read and write the initiator (BMC) writes
+ * Command Data In[31:0] to SBRMI::InBndMsg_inst[4:1]
+ * SBRMI_x3C(MSB):SBRMI_x39(LSB)
+ */
+ for (i = 0; i < AMD_SBI_MB_DATA_SIZE; i++) {
+ byte = (msg->mb_in_out >> i * 8) & 0xff;
+ ret = regmap_write(data->regmap, SBRMI_INBNDMSG1 + i, byte);
+ if (ret < 0)
+ goto exit_unlock;
+ }
+
+ /*
+ * Write 0x01 to SBRMI::SoftwareInterrupt to notify firmware to
+ * perform the requested read or write command
+ */
+ ret = regmap_write(data->regmap, SBRMI_SW_INTERRUPT, TRIGGER_MAILBOX);
+ if (ret < 0)
+ goto exit_unlock;
+
+ /*
+ * Firmware will write SBRMI::Status[SwAlertSts]=1 to generate
+ * an ALERT (if enabled) to initiator (BMC) to indicate completion
+ * of the requested command
+ */
+ ret = regmap_read_poll_timeout(data->regmap, SBRMI_STATUS, sw_status,
+ sw_status & SW_ALERT_MASK, 500, 2000000);
+ if (ret)
+ goto exit_unlock;
+
+ ret = regmap_read(data->regmap, SBRMI_OUTBNDMSG7, &ec);
+ if (ret || ec)
+ goto exit_clear_alert;
+ /*
+ * For a read operation, the initiator (BMC) reads the firmware
+ * response Command Data Out[31:0] from SBRMI::OutBndMsg_inst[4:1]
+ * {SBRMI_x34(MSB):SBRMI_x31(LSB)}.
+ */
+ for (i = 0; i < AMD_SBI_MB_DATA_SIZE; i++) {
+ ret = regmap_read(data->regmap,
+ SBRMI_OUTBNDMSG1 + i, &bytes);
+ if (ret < 0)
+ break;
+ msg->mb_in_out |= bytes << i * 8;
+ }
+
+exit_clear_alert:
+ /*
+ * BMC must write 1'b1 to SBRMI::Status[SwAlertSts] to clear the
+ * ALERT to initiator
+ */
+ ret = regmap_write(data->regmap, SBRMI_STATUS,
+ sw_status | SW_ALERT_MASK);
+ if (ec) {
+ ret = -EPROTOTYPE;
+ msg->fw_ret_code = ec;
+ }
+exit_unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int apml_rmi_reg_xfer(struct sbrmi_data *data,
+ struct apml_reg_xfer_msg __user *arg)
+{
+ struct apml_reg_xfer_msg msg = { 0 };
+ unsigned int data_read;
+ int ret;
+
+ /* Copy the structure from user */
+ if (copy_from_user(&msg, arg, sizeof(struct apml_reg_xfer_msg)))
+ return -EFAULT;
+
+ mutex_lock(&data->lock);
+ if (msg.rflag) {
+ ret = regmap_read(data->regmap, msg.reg_addr, &data_read);
+ if (!ret)
+ msg.data_in_out = data_read;
+ } else {
+ ret = regmap_write(data->regmap, msg.reg_addr, msg.data_in_out);
+ }
+
+ mutex_unlock(&data->lock);
+
+ if (msg.rflag && !ret)
+ return copy_to_user(arg, &msg, sizeof(struct apml_reg_xfer_msg));
+ return ret;
+}
+
+static int apml_mailbox_xfer(struct sbrmi_data *data, struct apml_mbox_msg __user *arg)
+{
+ struct apml_mbox_msg msg = { 0 };
+ int ret;
+
+ /* Copy the structure from user */
+ if (copy_from_user(&msg, arg, sizeof(struct apml_mbox_msg)))
+ return -EFAULT;
+
+ /* Mailbox protocol */
+ ret = rmi_mailbox_xfer(data, &msg);
+ if (ret && ret != -EPROTOTYPE)
+ return ret;
+
+ return copy_to_user(arg, &msg, sizeof(struct apml_mbox_msg));
+}
+
+static int apml_cpuid_xfer(struct sbrmi_data *data, struct apml_cpuid_msg __user *arg)
+{
+ struct apml_cpuid_msg msg = { 0 };
+ int ret;
+
+ /* Copy the structure from user */
+ if (copy_from_user(&msg, arg, sizeof(struct apml_cpuid_msg)))
+ return -EFAULT;
+
+ /* CPUID Protocol */
+ ret = rmi_cpuid_read(data, &msg);
+ if (ret && ret != -EPROTOTYPE)
+ return ret;
+
+ return copy_to_user(arg, &msg, sizeof(struct apml_cpuid_msg));
+}
+
+static int apml_mcamsr_xfer(struct sbrmi_data *data, struct apml_mcamsr_msg __user *arg)
+{
+ struct apml_mcamsr_msg msg = { 0 };
+ int ret;
+
+ /* Copy the structure from user */
+ if (copy_from_user(&msg, arg, sizeof(struct apml_mcamsr_msg)))
+ return -EFAULT;
+
+ /* MCAMSR Protocol */
+ ret = rmi_mca_msr_read(data, &msg);
+ if (ret && ret != -EPROTOTYPE)
+ return ret;
+
+ return copy_to_user(arg, &msg, sizeof(struct apml_mcamsr_msg));
+}
+
+static long sbrmi_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct sbrmi_data *data;
+
+ data = container_of(fp->private_data, struct sbrmi_data, sbrmi_misc_dev);
+ switch (cmd) {
+ case SBRMI_IOCTL_MBOX_CMD:
+ return apml_mailbox_xfer(data, argp);
+ case SBRMI_IOCTL_CPUID_CMD:
+ return apml_cpuid_xfer(data, argp);
+ case SBRMI_IOCTL_MCAMSR_CMD:
+ return apml_mcamsr_xfer(data, argp);
+ case SBRMI_IOCTL_REG_XFER_CMD:
+ return apml_rmi_reg_xfer(data, argp);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations sbrmi_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sbrmi_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+int create_misc_rmi_device(struct sbrmi_data *data,
+ struct device *dev)
+{
+ data->sbrmi_misc_dev.name = devm_kasprintf(dev,
+ GFP_KERNEL,
+ "sbrmi-%x",
+ data->dev_static_addr);
+ data->sbrmi_misc_dev.minor = MISC_DYNAMIC_MINOR;
+ data->sbrmi_misc_dev.fops = &sbrmi_fops;
+ data->sbrmi_misc_dev.parent = dev;
+ data->sbrmi_misc_dev.nodename = devm_kasprintf(dev,
+ GFP_KERNEL,
+ "sbrmi-%x",
+ data->dev_static_addr);
+ data->sbrmi_misc_dev.mode = 0600;
+
+ return misc_register(&data->sbrmi_misc_dev);
+}
diff --git a/drivers/misc/amd-sbi/rmi-core.h b/drivers/misc/amd-sbi/rmi-core.h
new file mode 100644
index 000000000000..975ae858e9fd
--- /dev/null
+++ b/drivers/misc/amd-sbi/rmi-core.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+
+#ifndef _SBRMI_CORE_H_
+#define _SBRMI_CORE_H_
+
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <uapi/misc/amd-apml.h>
+
+/* SB-RMI registers */
+enum sbrmi_reg {
+ SBRMI_REV,
+ SBRMI_CTRL,
+ SBRMI_STATUS,
+ SBRMI_OUTBNDMSG0 = 0x30,
+ SBRMI_OUTBNDMSG1,
+ SBRMI_OUTBNDMSG2,
+ SBRMI_OUTBNDMSG3,
+ SBRMI_OUTBNDMSG4,
+ SBRMI_OUTBNDMSG5,
+ SBRMI_OUTBNDMSG6,
+ SBRMI_OUTBNDMSG7,
+ SBRMI_INBNDMSG0,
+ SBRMI_INBNDMSG1,
+ SBRMI_INBNDMSG2,
+ SBRMI_INBNDMSG3,
+ SBRMI_INBNDMSG4,
+ SBRMI_INBNDMSG5,
+ SBRMI_INBNDMSG6,
+ SBRMI_INBNDMSG7,
+ SBRMI_SW_INTERRUPT,
+ SBRMI_THREAD128CS = 0x4b,
+};
+
+/*
+ * SB-RMI supports soft mailbox service request to MP1 (power management
+ * firmware) through SBRMI inbound/outbound message registers.
+ * SB-RMI message IDs
+ */
+enum sbrmi_msg_id {
+ SBRMI_READ_PKG_PWR_CONSUMPTION = 0x1,
+ SBRMI_WRITE_PKG_PWR_LIMIT,
+ SBRMI_READ_PKG_PWR_LIMIT,
+ SBRMI_READ_PKG_MAX_PWR_LIMIT,
+};
+
+/* Each client has this additional data */
+struct sbrmi_data {
+ struct miscdevice sbrmi_misc_dev;
+ struct regmap *regmap;
+ /* Mutex locking */
+ struct mutex lock;
+ u32 pwr_limit_max;
+ u8 dev_static_addr;
+ u8 rev;
+};
+
+int rmi_mailbox_xfer(struct sbrmi_data *data, struct apml_mbox_msg *msg);
+#ifdef CONFIG_AMD_SBRMI_HWMON
+int create_hwmon_sensor_device(struct device *dev, struct sbrmi_data *data);
+#else
+static inline int create_hwmon_sensor_device(struct device *dev, struct sbrmi_data *data)
+{
+ return 0;
+}
+#endif
+int create_misc_rmi_device(struct sbrmi_data *data, struct device *dev);
+#endif /*_SBRMI_CORE_H_*/
diff --git a/drivers/misc/amd-sbi/rmi-hwmon.c b/drivers/misc/amd-sbi/rmi-hwmon.c
new file mode 100644
index 000000000000..f4f015605daa
--- /dev/null
+++ b/drivers/misc/amd-sbi/rmi-hwmon.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * rmi-hwmon.c - hwmon sensor support for side band RMI
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <uapi/misc/amd-apml.h>
+#include "rmi-core.h"
+
+/* Do not allow setting negative power limit */
+#define SBRMI_PWR_MIN 0
+
+static int sbrmi_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct sbrmi_data *data = dev_get_drvdata(dev);
+ struct apml_mbox_msg msg = { 0 };
+ int ret;
+
+ if (!data)
+ return -ENODEV;
+
+ if (type != hwmon_power)
+ return -EINVAL;
+
+ switch (attr) {
+ case hwmon_power_input:
+ msg.cmd = SBRMI_READ_PKG_PWR_CONSUMPTION;
+ ret = rmi_mailbox_xfer(data, &msg);
+ break;
+ case hwmon_power_cap:
+ msg.cmd = SBRMI_READ_PKG_PWR_LIMIT;
+ ret = rmi_mailbox_xfer(data, &msg);
+ break;
+ case hwmon_power_cap_max:
+ msg.mb_in_out = data->pwr_limit_max;
+ ret = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret < 0)
+ return ret;
+ /* hwmon power attributes are in microWatt */
+ *val = (long)msg.mb_in_out * 1000;
+ return ret;
+}
+
+static int sbrmi_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct sbrmi_data *data = dev_get_drvdata(dev);
+ struct apml_mbox_msg msg = { 0 };
+
+ if (!data)
+ return -ENODEV;
+
+ if (type != hwmon_power && attr != hwmon_power_cap)
+ return -EINVAL;
+ /*
+ * hwmon power attributes are in microWatt
+ * mailbox read/write is in mWatt
+ */
+ val /= 1000;
+
+ val = clamp_val(val, SBRMI_PWR_MIN, data->pwr_limit_max);
+
+ msg.cmd = SBRMI_WRITE_PKG_PWR_LIMIT;
+ msg.mb_in_out = val;
+
+ return rmi_mailbox_xfer(data, &msg);
+}
+
+static umode_t sbrmi_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_cap_max:
+ return 0444;
+ case hwmon_power_cap:
+ return 0644;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info * const sbrmi_info[] = {
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_CAP | HWMON_P_CAP_MAX),
+ NULL
+};
+
+static const struct hwmon_ops sbrmi_hwmon_ops = {
+ .is_visible = sbrmi_is_visible,
+ .read = sbrmi_read,
+ .write = sbrmi_write,
+};
+
+static const struct hwmon_chip_info sbrmi_chip_info = {
+ .ops = &sbrmi_hwmon_ops,
+ .info = sbrmi_info,
+};
+
+int create_hwmon_sensor_device(struct device *dev, struct sbrmi_data *data)
+{
+ struct device *hwmon_dev;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "sbrmi", data,
+ &sbrmi_chip_info, NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
diff --git a/drivers/misc/amd-sbi/rmi-i2c.c b/drivers/misc/amd-sbi/rmi-i2c.c
new file mode 100644
index 000000000000..f891f5af4bc6
--- /dev/null
+++ b/drivers/misc/amd-sbi/rmi-i2c.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * rmi-i2c.c - Side band RMI over I2C support for AMD out
+ * of band management
+ *
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include "rmi-core.h"
+
+static int sbrmi_enable_alert(struct sbrmi_data *data)
+{
+ int ctrl, ret;
+
+ /*
+ * Enable the SB-RMI Software alert status
+ * by writing 0 to bit 4 of Control register(0x1)
+ */
+ ret = regmap_read(data->regmap, SBRMI_CTRL, &ctrl);
+ if (ret < 0)
+ return ret;
+
+ if (ctrl & 0x10) {
+ ctrl &= ~0x10;
+ return regmap_write(data->regmap, SBRMI_CTRL, ctrl);
+ }
+
+ return 0;
+}
+
+static int sbrmi_get_max_pwr_limit(struct sbrmi_data *data)
+{
+ struct apml_mbox_msg msg = { 0 };
+ int ret;
+
+ msg.cmd = SBRMI_READ_PKG_MAX_PWR_LIMIT;
+ ret = rmi_mailbox_xfer(data, &msg);
+ if (ret < 0)
+ return ret;
+ data->pwr_limit_max = msg.mb_in_out;
+
+ return ret;
+}
+
+static int sbrmi_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct sbrmi_data *data;
+ struct regmap_config sbrmi_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(struct sbrmi_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_init(&data->lock);
+
+ data->regmap = devm_regmap_init_i2c(client, &sbrmi_i2c_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ /* Enable alert for SB-RMI sequence */
+ ret = sbrmi_enable_alert(data);
+ if (ret < 0)
+ return ret;
+
+ /* Cache maximum power limit */
+ ret = sbrmi_get_max_pwr_limit(data);
+ if (ret < 0)
+ return ret;
+
+ data->dev_static_addr = client->addr;
+ dev_set_drvdata(dev, data);
+
+ ret = create_hwmon_sensor_device(dev, data);
+ if (ret < 0)
+ return ret;
+ return create_misc_rmi_device(data, dev);
+}
+
+static void sbrmi_i2c_remove(struct i2c_client *client)
+{
+ struct sbrmi_data *data = dev_get_drvdata(&client->dev);
+
+ misc_deregister(&data->sbrmi_misc_dev);
+ /* Assign fops and parent of misc dev to NULL */
+ data->sbrmi_misc_dev.fops = NULL;
+ data->sbrmi_misc_dev.parent = NULL;
+ dev_info(&client->dev, "Removed sbrmi-i2c driver\n");
+ return;
+}
+
+static const struct i2c_device_id sbrmi_id[] = {
+ {"sbrmi-i2c"},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, sbrmi_id);
+
+static const struct of_device_id __maybe_unused sbrmi_of_match[] = {
+ {
+ .compatible = "amd,sbrmi",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sbrmi_of_match);
+
+static struct i2c_driver sbrmi_driver = {
+ .driver = {
+ .name = "sbrmi-i2c",
+ .of_match_table = of_match_ptr(sbrmi_of_match),
+ },
+ .probe = sbrmi_i2c_probe,
+ .remove = sbrmi_i2c_remove,
+ .id_table = sbrmi_id,
+};
+
+module_i2c_driver(sbrmi_driver);
+
+MODULE_AUTHOR("Akshay Gupta <akshay.gupta@amd.com>");
+MODULE_AUTHOR("Naveen Krishna Chatradhi <naveenkrishna.chatradhi@amd.com>");
+MODULE_DESCRIPTION("Hwmon driver for AMD SB-RMI emulated sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h
index 386884c2a263..9344c2366a4b 100644
--- a/drivers/misc/bcm-vk/bcm_vk.h
+++ b/drivers/misc/bcm-vk/bcm_vk.h
@@ -311,7 +311,6 @@ struct bcm_vk_peer_log {
u32 wr_idx;
u32 buf_size;
u32 mask;
- char data[];
};
/* max buf size allowed */
diff --git a/drivers/misc/bcm-vk/bcm_vk_tty.c b/drivers/misc/bcm-vk/bcm_vk_tty.c
index 44a2dd80054d..e6c42b772e96 100644
--- a/drivers/misc/bcm-vk/bcm_vk_tty.c
+++ b/drivers/misc/bcm-vk/bcm_vk_tty.c
@@ -43,7 +43,7 @@ struct bcm_vk_tty_chan {
static void bcm_vk_tty_poll(struct timer_list *t)
{
- struct bcm_vk *vk = from_timer(vk, t, serial_timer);
+ struct bcm_vk *vk = timer_container_of(vk, t, serial_timer);
queue_work(vk->tty_wq_thread, &vk->tty_wq_work);
mod_timer(&vk->serial_timer, jiffies + SERIAL_TIMER_VALUE);
diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c
index a5549eaf52d0..8e7ea2c9142d 100644
--- a/drivers/misc/cardreader/alcor_pci.c
+++ b/drivers/misc/cardreader/alcor_pci.c
@@ -121,23 +121,23 @@ static int alcor_pci_probe(struct pci_dev *pdev,
priv->cfg = cfg;
priv->irq = pdev->irq;
- ret = pci_request_regions(pdev, DRV_NAME_ALCOR_PCI);
+ ret = pcim_request_all_regions(pdev, DRV_NAME_ALCOR_PCI);
if (ret) {
dev_err(&pdev->dev, "Cannot request region\n");
- ret = -ENOMEM;
+ ret = -EBUSY;
goto error_free_ida;
}
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
ret = -ENODEV;
- goto error_release_regions;
+ goto error_free_ida;
}
priv->iobase = pcim_iomap(pdev, bar, 0);
if (!priv->iobase) {
ret = -ENOMEM;
- goto error_release_regions;
+ goto error_free_ida;
}
/* make sure irqs are disabled */
@@ -147,7 +147,7 @@ static int alcor_pci_probe(struct pci_dev *pdev,
ret = dma_set_mask_and_coherent(priv->dev, AU6601_SDMA_MASK);
if (ret) {
dev_err(priv->dev, "Failed to set DMA mask\n");
- goto error_release_regions;
+ goto error_free_ida;
}
pci_set_master(pdev);
@@ -169,8 +169,6 @@ static int alcor_pci_probe(struct pci_dev *pdev,
error_clear_drvdata:
pci_clear_master(pdev);
pci_set_drvdata(pdev, NULL);
-error_release_regions:
- pci_release_regions(pdev);
error_free_ida:
ida_free(&alcor_pci_idr, priv->id);
return ret;
@@ -186,7 +184,6 @@ static void alcor_pci_remove(struct pci_dev *pdev)
ida_free(&alcor_pci_idr, priv->id);
- pci_release_regions(pdev);
pci_clear_master(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/misc/cardreader/rts5264.c b/drivers/misc/cardreader/rts5264.c
index 8be4ed7d9d47..06d7a8a95fd6 100644
--- a/drivers/misc/cardreader/rts5264.c
+++ b/drivers/misc/cardreader/rts5264.c
@@ -605,6 +605,22 @@ static int rts5264_extra_init_hw(struct rtsx_pcr *pcr)
return 0;
}
+static int rts5264_optimize_phy(struct rtsx_pcr *pcr)
+{
+ u16 subvendor, subdevice, val;
+
+ subvendor = pcr->pci->subsystem_vendor;
+ subdevice = pcr->pci->subsystem_device;
+
+ if ((subvendor == 0x1028) && (subdevice == 0x0CE1)) {
+ rtsx_pci_read_phy_register(pcr, _PHY_REV0, &val);
+ if ((val & 0xFE00) > 0x3800)
+ rtsx_pci_update_phy(pcr, _PHY_REV0, 0x1FF, 0x3800);
+ }
+
+ return 0;
+}
+
static void rts5264_enable_aspm(struct rtsx_pcr *pcr, bool enable)
{
u8 val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
@@ -682,6 +698,7 @@ static const struct pcr_ops rts5264_pcr_ops = {
.turn_on_led = rts5264_turn_on_led,
.turn_off_led = rts5264_turn_off_led,
.extra_init_hw = rts5264_extra_init_hw,
+ .optimize_phy = rts5264_optimize_phy,
.enable_auto_blink = rts5264_enable_auto_blink,
.disable_auto_blink = rts5264_disable_auto_blink,
.card_power_on = rts5264_card_power_on,
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index be3d4e0e50cc..a7b066c48740 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -420,25 +420,6 @@ static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
pcr->sgi++;
}
-int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
- int num_sg, bool read, int timeout)
-{
- int err = 0, count;
-
- pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
- count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
- if (count < 1)
- return -EINVAL;
- pcr_dbg(pcr, "DMA mapping count: %d\n", count);
-
- err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
-
- rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
-
int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
int num_sg, bool read)
{
@@ -1197,33 +1178,6 @@ void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
}
-int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
-{
- rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
- MS_CLK_EN | SD40_CLK_EN, 0);
- rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
- rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
-
- msleep(50);
-
- rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
-
- return 0;
-}
-
-int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
-{
- rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
- MS_CLK_EN | SD40_CLK_EN, 0);
-
- rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
-
- rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
- rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
-
- return 0;
-}
-
static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index 9215d66de00c..8e5951b61143 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -127,7 +127,5 @@ int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val);
void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr);
void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr);
void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr);
-int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr);
-int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr);
#endif
diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index 7314c8d9ae75..148107a4547c 100644
--- a/drivers/misc/cardreader/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -31,7 +31,7 @@ static const struct mfd_cell rtsx_usb_cells[] = {
static void rtsx_usb_sg_timed_out(struct timer_list *t)
{
- struct rtsx_ucr *ucr = from_timer(ucr, t, sg_timer);
+ struct rtsx_ucr *ucr = timer_container_of(ucr, t, sg_timer);
dev_dbg(&ucr->pusb_intf->dev, "%s: sg transfer timed out", __func__);
usb_sg_cancel(&ucr->current_sg);
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
index 18fc1aaa5cdd..2b6778d8d166 100644
--- a/drivers/misc/cs5535-mfgpt.c
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/cs5535.h>
#include <linux/slab.h>
+#include <asm/msr.h>
#define DRV_NAME "cs5535-mfgpt"
diff --git a/drivers/misc/echo/Kconfig b/drivers/misc/echo/Kconfig
deleted file mode 100644
index ce0a37a47fc1..000000000000
--- a/drivers/misc/echo/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config ECHO
- tristate "Line Echo Canceller support"
- help
- This driver provides line echo cancelling support for mISDN and
- Zaptel drivers.
-
- To compile this driver as a module, choose M here. The module
- will be called echo.
diff --git a/drivers/misc/echo/Makefile b/drivers/misc/echo/Makefile
deleted file mode 100644
index 5b97467ffb7d..000000000000
--- a/drivers/misc/echo/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_ECHO) += echo.o
diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c
deleted file mode 100644
index 3c4eaba86576..000000000000
--- a/drivers/misc/echo/echo.c
+++ /dev/null
@@ -1,589 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * SpanDSP - a series of DSP components for telephony
- *
- * echo.c - A line echo canceller. This code is being developed
- * against and partially complies with G168.
- *
- * Written by Steve Underwood <steveu@coppice.org>
- * and David Rowe <david_at_rowetel_dot_com>
- *
- * Copyright (C) 2001, 2003 Steve Underwood, 2007 David Rowe
- *
- * Based on a bit from here, a bit from there, eye of toad, ear of
- * bat, 15 years of failed attempts by David and a few fried brain
- * cells.
- *
- * All rights reserved.
- */
-
-/*! \file */
-
-/* Implementation Notes
- David Rowe
- April 2007
-
- This code started life as Steve's NLMS algorithm with a tap
- rotation algorithm to handle divergence during double talk. I
- added a Geigel Double Talk Detector (DTD) [2] and performed some
- G168 tests. However I had trouble meeting the G168 requirements,
- especially for double talk - there were always cases where my DTD
- failed, for example where near end speech was under the 6dB
- threshold required for declaring double talk.
-
- So I tried a two path algorithm [1], which has so far given better
- results. The original tap rotation/Geigel algorithm is available
- in SVN http://svn.rowetel.com/software/oslec/tags/before_16bit.
- It's probably possible to make it work if some one wants to put some
- serious work into it.
-
- At present no special treatment is provided for tones, which
- generally cause NLMS algorithms to diverge. Initial runs of a
- subset of the G168 tests for tones (e.g ./echo_test 6) show the
- current algorithm is passing OK, which is kind of surprising. The
- full set of tests needs to be performed to confirm this result.
-
- One other interesting change is that I have managed to get the NLMS
- code to work with 16 bit coefficients, rather than the original 32
- bit coefficents. This reduces the MIPs and storage required.
- I evaulated the 16 bit port using g168_tests.sh and listening tests
- on 4 real-world samples.
-
- I also attempted the implementation of a block based NLMS update
- [2] but although this passes g168_tests.sh it didn't converge well
- on the real-world samples. I have no idea why, perhaps a scaling
- problem. The block based code is also available in SVN
- http://svn.rowetel.com/software/oslec/tags/before_16bit. If this
- code can be debugged, it will lead to further reduction in MIPS, as
- the block update code maps nicely onto DSP instruction sets (it's a
- dot product) compared to the current sample-by-sample update.
-
- Steve also has some nice notes on echo cancellers in echo.h
-
- References:
-
- [1] Ochiai, Areseki, and Ogihara, "Echo Canceller with Two Echo
- Path Models", IEEE Transactions on communications, COM-25,
- No. 6, June
- 1977.
- https://www.rowetel.com/images/echo/dual_path_paper.pdf
-
- [2] The classic, very useful paper that tells you how to
- actually build a real world echo canceller:
- Messerschmitt, Hedberg, Cole, Haoui, Winship, "Digital Voice
- Echo Canceller with a TMS320020,
- https://www.rowetel.com/images/echo/spra129.pdf
-
- [3] I have written a series of blog posts on this work, here is
- Part 1: http://www.rowetel.com/blog/?p=18
-
- [4] The source code http://svn.rowetel.com/software/oslec/
-
- [5] A nice reference on LMS filters:
- https://en.wikipedia.org/wiki/Least_mean_squares_filter
-
- Credits:
-
- Thanks to Steve Underwood, Jean-Marc Valin, and Ramakrishnan
- Muthukrishnan for their suggestions and email discussions. Thanks
- also to those people who collected echo samples for me such as
- Mark, Pawel, and Pavel.
-*/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include "echo.h"
-
-#define MIN_TX_POWER_FOR_ADAPTION 64
-#define MIN_RX_POWER_FOR_ADAPTION 64
-#define DTD_HANGOVER 600 /* 600 samples, or 75ms */
-#define DC_LOG2BETA 3 /* log2() of DC filter Beta */
-
-/* adapting coeffs using the traditional stochastic descent (N)LMS algorithm */
-
-static inline void lms_adapt_bg(struct oslec_state *ec, int clean, int shift)
-{
- int i;
-
- int offset1;
- int offset2;
- int factor;
- int exp;
-
- if (shift > 0)
- factor = clean << shift;
- else
- factor = clean >> -shift;
-
- /* Update the FIR taps */
-
- offset2 = ec->curr_pos;
- offset1 = ec->taps - offset2;
-
- for (i = ec->taps - 1; i >= offset1; i--) {
- exp = (ec->fir_state_bg.history[i - offset1] * factor);
- ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
- }
- for (; i >= 0; i--) {
- exp = (ec->fir_state_bg.history[i + offset2] * factor);
- ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
- }
-}
-
-static inline int top_bit(unsigned int bits)
-{
- if (bits == 0)
- return -1;
- else
- return (int)fls((int32_t) bits) - 1;
-}
-
-struct oslec_state *oslec_create(int len, int adaption_mode)
-{
- struct oslec_state *ec;
- int i;
- const int16_t *history;
-
- ec = kzalloc(sizeof(*ec), GFP_KERNEL);
- if (!ec)
- return NULL;
-
- ec->taps = len;
- ec->log2taps = top_bit(len);
- ec->curr_pos = ec->taps - 1;
-
- ec->fir_taps16[0] =
- kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
- if (!ec->fir_taps16[0])
- goto error_oom_0;
-
- ec->fir_taps16[1] =
- kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
- if (!ec->fir_taps16[1])
- goto error_oom_1;
-
- history = fir16_create(&ec->fir_state, ec->fir_taps16[0], ec->taps);
- if (!history)
- goto error_state;
- history = fir16_create(&ec->fir_state_bg, ec->fir_taps16[1], ec->taps);
- if (!history)
- goto error_state_bg;
-
- for (i = 0; i < 5; i++)
- ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0;
-
- ec->cng_level = 1000;
- oslec_adaption_mode(ec, adaption_mode);
-
- ec->snapshot = kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
- if (!ec->snapshot)
- goto error_snap;
-
- ec->cond_met = 0;
- ec->pstates = 0;
- ec->ltxacc = ec->lrxacc = ec->lcleanacc = ec->lclean_bgacc = 0;
- ec->ltx = ec->lrx = ec->lclean = ec->lclean_bg = 0;
- ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
- ec->lbgn = ec->lbgn_acc = 0;
- ec->lbgn_upper = 200;
- ec->lbgn_upper_acc = ec->lbgn_upper << 13;
-
- return ec;
-
-error_snap:
- fir16_free(&ec->fir_state_bg);
-error_state_bg:
- fir16_free(&ec->fir_state);
-error_state:
- kfree(ec->fir_taps16[1]);
-error_oom_1:
- kfree(ec->fir_taps16[0]);
-error_oom_0:
- kfree(ec);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(oslec_create);
-
-void oslec_free(struct oslec_state *ec)
-{
- int i;
-
- fir16_free(&ec->fir_state);
- fir16_free(&ec->fir_state_bg);
- for (i = 0; i < 2; i++)
- kfree(ec->fir_taps16[i]);
- kfree(ec->snapshot);
- kfree(ec);
-}
-EXPORT_SYMBOL_GPL(oslec_free);
-
-void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode)
-{
- ec->adaption_mode = adaption_mode;
-}
-EXPORT_SYMBOL_GPL(oslec_adaption_mode);
-
-void oslec_flush(struct oslec_state *ec)
-{
- int i;
-
- ec->ltxacc = ec->lrxacc = ec->lcleanacc = ec->lclean_bgacc = 0;
- ec->ltx = ec->lrx = ec->lclean = ec->lclean_bg = 0;
- ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
-
- ec->lbgn = ec->lbgn_acc = 0;
- ec->lbgn_upper = 200;
- ec->lbgn_upper_acc = ec->lbgn_upper << 13;
-
- ec->nonupdate_dwell = 0;
-
- fir16_flush(&ec->fir_state);
- fir16_flush(&ec->fir_state_bg);
- ec->fir_state.curr_pos = ec->taps - 1;
- ec->fir_state_bg.curr_pos = ec->taps - 1;
- for (i = 0; i < 2; i++)
- memset(ec->fir_taps16[i], 0, ec->taps * sizeof(int16_t));
-
- ec->curr_pos = ec->taps - 1;
- ec->pstates = 0;
-}
-EXPORT_SYMBOL_GPL(oslec_flush);
-
-void oslec_snapshot(struct oslec_state *ec)
-{
- memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps * sizeof(int16_t));
-}
-EXPORT_SYMBOL_GPL(oslec_snapshot);
-
-/* Dual Path Echo Canceller */
-
-int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
-{
- int32_t echo_value;
- int clean_bg;
- int tmp;
- int tmp1;
-
- /*
- * Input scaling was found be required to prevent problems when tx
- * starts clipping. Another possible way to handle this would be the
- * filter coefficent scaling.
- */
-
- ec->tx = tx;
- ec->rx = rx;
- tx >>= 1;
- rx >>= 1;
-
- /*
- * Filter DC, 3dB point is 160Hz (I think), note 32 bit precision
- * required otherwise values do not track down to 0. Zero at DC, Pole
- * at (1-Beta) on real axis. Some chip sets (like Si labs) don't
- * need this, but something like a $10 X100P card does. Any DC really
- * slows down convergence.
- *
- * Note: removes some low frequency from the signal, this reduces the
- * speech quality when listening to samples through headphones but may
- * not be obvious through a telephone handset.
- *
- * Note that the 3dB frequency in radians is approx Beta, e.g. for Beta
- * = 2^(-3) = 0.125, 3dB freq is 0.125 rads = 159Hz.
- */
-
- if (ec->adaption_mode & ECHO_CAN_USE_RX_HPF) {
- tmp = rx << 15;
-
- /*
- * Make sure the gain of the HPF is 1.0. This can still
- * saturate a little under impulse conditions, and it might
- * roll to 32768 and need clipping on sustained peak level
- * signals. However, the scale of such clipping is small, and
- * the error due to any saturation should not markedly affect
- * the downstream processing.
- */
- tmp -= (tmp >> 4);
-
- ec->rx_1 += -(ec->rx_1 >> DC_LOG2BETA) + tmp - ec->rx_2;
-
- /*
- * hard limit filter to prevent clipping. Note that at this
- * stage rx should be limited to +/- 16383 due to right shift
- * above
- */
- tmp1 = ec->rx_1 >> 15;
- if (tmp1 > 16383)
- tmp1 = 16383;
- if (tmp1 < -16383)
- tmp1 = -16383;
- rx = tmp1;
- ec->rx_2 = tmp;
- }
-
- /* Block average of power in the filter states. Used for
- adaption power calculation. */
-
- {
- int new, old;
-
- /* efficient "out with the old and in with the new" algorithm so
- we don't have to recalculate over the whole block of
- samples. */
- new = (int)tx * (int)tx;
- old = (int)ec->fir_state.history[ec->fir_state.curr_pos] *
- (int)ec->fir_state.history[ec->fir_state.curr_pos];
- ec->pstates +=
- ((new - old) + (1 << (ec->log2taps - 1))) >> ec->log2taps;
- if (ec->pstates < 0)
- ec->pstates = 0;
- }
-
- /* Calculate short term average levels using simple single pole IIRs */
-
- ec->ltxacc += abs(tx) - ec->ltx;
- ec->ltx = (ec->ltxacc + (1 << 4)) >> 5;
- ec->lrxacc += abs(rx) - ec->lrx;
- ec->lrx = (ec->lrxacc + (1 << 4)) >> 5;
-
- /* Foreground filter */
-
- ec->fir_state.coeffs = ec->fir_taps16[0];
- echo_value = fir16(&ec->fir_state, tx);
- ec->clean = rx - echo_value;
- ec->lcleanacc += abs(ec->clean) - ec->lclean;
- ec->lclean = (ec->lcleanacc + (1 << 4)) >> 5;
-
- /* Background filter */
-
- echo_value = fir16(&ec->fir_state_bg, tx);
- clean_bg = rx - echo_value;
- ec->lclean_bgacc += abs(clean_bg) - ec->lclean_bg;
- ec->lclean_bg = (ec->lclean_bgacc + (1 << 4)) >> 5;
-
- /* Background Filter adaption */
-
- /* Almost always adap bg filter, just simple DT and energy
- detection to minimise adaption in cases of strong double talk.
- However this is not critical for the dual path algorithm.
- */
- ec->factor = 0;
- ec->shift = 0;
- if (!ec->nonupdate_dwell) {
- int p, logp, shift;
-
- /* Determine:
-
- f = Beta * clean_bg_rx/P ------ (1)
-
- where P is the total power in the filter states.
-
- The Boffins have shown that if we obey (1) we converge
- quickly and avoid instability.
-
- The correct factor f must be in Q30, as this is the fixed
- point format required by the lms_adapt_bg() function,
- therefore the scaled version of (1) is:
-
- (2^30) * f = (2^30) * Beta * clean_bg_rx/P
- factor = (2^30) * Beta * clean_bg_rx/P ----- (2)
-
- We have chosen Beta = 0.25 by experiment, so:
-
- factor = (2^30) * (2^-2) * clean_bg_rx/P
-
- (30 - 2 - log2(P))
- factor = clean_bg_rx 2 ----- (3)
-
- To avoid a divide we approximate log2(P) as top_bit(P),
- which returns the position of the highest non-zero bit in
- P. This approximation introduces an error as large as a
- factor of 2, but the algorithm seems to handle it OK.
-
- Come to think of it a divide may not be a big deal on a
- modern DSP, so its probably worth checking out the cycles
- for a divide versus a top_bit() implementation.
- */
-
- p = MIN_TX_POWER_FOR_ADAPTION + ec->pstates;
- logp = top_bit(p) + ec->log2taps;
- shift = 30 - 2 - logp;
- ec->shift = shift;
-
- lms_adapt_bg(ec, clean_bg, shift);
- }
-
- /* very simple DTD to make sure we dont try and adapt with strong
- near end speech */
-
- ec->adapt = 0;
- if ((ec->lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->lrx > ec->ltx))
- ec->nonupdate_dwell = DTD_HANGOVER;
- if (ec->nonupdate_dwell)
- ec->nonupdate_dwell--;
-
- /* Transfer logic */
-
- /* These conditions are from the dual path paper [1], I messed with
- them a bit to improve performance. */
-
- if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) &&
- (ec->nonupdate_dwell == 0) &&
- /* (ec->Lclean_bg < 0.875*ec->Lclean) */
- (8 * ec->lclean_bg < 7 * ec->lclean) &&
- /* (ec->Lclean_bg < 0.125*ec->Ltx) */
- (8 * ec->lclean_bg < ec->ltx)) {
- if (ec->cond_met == 6) {
- /*
- * BG filter has had better results for 6 consecutive
- * samples
- */
- ec->adapt = 1;
- memcpy(ec->fir_taps16[0], ec->fir_taps16[1],
- ec->taps * sizeof(int16_t));
- } else
- ec->cond_met++;
- } else
- ec->cond_met = 0;
-
- /* Non-Linear Processing */
-
- ec->clean_nlp = ec->clean;
- if (ec->adaption_mode & ECHO_CAN_USE_NLP) {
- /*
- * Non-linear processor - a fancy way to say "zap small
- * signals, to avoid residual echo due to (uLaw/ALaw)
- * non-linearity in the channel.".
- */
-
- if ((16 * ec->lclean < ec->ltx)) {
- /*
- * Our e/c has improved echo by at least 24 dB (each
- * factor of 2 is 6dB, so 2*2*2*2=16 is the same as
- * 6+6+6+6=24dB)
- */
- if (ec->adaption_mode & ECHO_CAN_USE_CNG) {
- ec->cng_level = ec->lbgn;
-
- /*
- * Very elementary comfort noise generation.
- * Just random numbers rolled off very vaguely
- * Hoth-like. DR: This noise doesn't sound
- * quite right to me - I suspect there are some
- * overflow issues in the filtering as it's too
- * "crackly".
- * TODO: debug this, maybe just play noise at
- * high level or look at spectrum.
- */
-
- ec->cng_rndnum =
- 1664525U * ec->cng_rndnum + 1013904223U;
- ec->cng_filter =
- ((ec->cng_rndnum & 0xFFFF) - 32768 +
- 5 * ec->cng_filter) >> 3;
- ec->clean_nlp =
- (ec->cng_filter * ec->cng_level * 8) >> 14;
-
- } else if (ec->adaption_mode & ECHO_CAN_USE_CLIP) {
- /* This sounds much better than CNG */
- if (ec->clean_nlp > ec->lbgn)
- ec->clean_nlp = ec->lbgn;
- if (ec->clean_nlp < -ec->lbgn)
- ec->clean_nlp = -ec->lbgn;
- } else {
- /*
- * just mute the residual, doesn't sound very
- * good, used mainly in G168 tests
- */
- ec->clean_nlp = 0;
- }
- } else {
- /*
- * Background noise estimator. I tried a few
- * algorithms here without much luck. This very simple
- * one seems to work best, we just average the level
- * using a slow (1 sec time const) filter if the
- * current level is less than a (experimentally
- * derived) constant. This means we dont include high
- * level signals like near end speech. When combined
- * with CNG or especially CLIP seems to work OK.
- */
- if (ec->lclean < 40) {
- ec->lbgn_acc += abs(ec->clean) - ec->lbgn;
- ec->lbgn = (ec->lbgn_acc + (1 << 11)) >> 12;
- }
- }
- }
-
- /* Roll around the taps buffer */
- if (ec->curr_pos <= 0)
- ec->curr_pos = ec->taps;
- ec->curr_pos--;
-
- if (ec->adaption_mode & ECHO_CAN_DISABLE)
- ec->clean_nlp = rx;
-
- /* Output scaled back up again to match input scaling */
-
- return (int16_t) ec->clean_nlp << 1;
-}
-EXPORT_SYMBOL_GPL(oslec_update);
-
-/* This function is separated from the echo canceller is it is usually called
- as part of the tx process. See rx HP (DC blocking) filter above, it's
- the same design.
-
- Some soft phones send speech signals with a lot of low frequency
- energy, e.g. down to 20Hz. This can make the hybrid non-linear
- which causes the echo canceller to fall over. This filter can help
- by removing any low frequency before it gets to the tx port of the
- hybrid.
-
- It can also help by removing and DC in the tx signal. DC is bad
- for LMS algorithms.
-
- This is one of the classic DC removal filters, adjusted to provide
- sufficient bass rolloff to meet the above requirement to protect hybrids
- from things that upset them. The difference between successive samples
- produces a lousy HPF, and then a suitably placed pole flattens things out.
- The final result is a nicely rolled off bass end. The filtering is
- implemented with extended fractional precision, which noise shapes things,
- giving very clean DC removal.
-*/
-
-int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx)
-{
- int tmp;
- int tmp1;
-
- if (ec->adaption_mode & ECHO_CAN_USE_TX_HPF) {
- tmp = tx << 15;
-
- /*
- * Make sure the gain of the HPF is 1.0. The first can still
- * saturate a little under impulse conditions, and it might
- * roll to 32768 and need clipping on sustained peak level
- * signals. However, the scale of such clipping is small, and
- * the error due to any saturation should not markedly affect
- * the downstream processing.
- */
- tmp -= (tmp >> 4);
-
- ec->tx_1 += -(ec->tx_1 >> DC_LOG2BETA) + tmp - ec->tx_2;
- tmp1 = ec->tx_1 >> 15;
- if (tmp1 > 32767)
- tmp1 = 32767;
- if (tmp1 < -32767)
- tmp1 = -32767;
- tx = tmp1;
- ec->tx_2 = tmp;
- }
-
- return tx;
-}
-EXPORT_SYMBOL_GPL(oslec_hpf_tx);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Rowe");
-MODULE_DESCRIPTION("Open Source Line Echo Canceller");
-MODULE_VERSION("0.3.0");
diff --git a/drivers/misc/echo/echo.h b/drivers/misc/echo/echo.h
deleted file mode 100644
index 56b4b95fd020..000000000000
--- a/drivers/misc/echo/echo.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SpanDSP - a series of DSP components for telephony
- *
- * echo.c - A line echo canceller. This code is being developed
- * against and partially complies with G168.
- *
- * Written by Steve Underwood <steveu@coppice.org>
- * and David Rowe <david_at_rowetel_dot_com>
- *
- * Copyright (C) 2001 Steve Underwood and 2007 David Rowe
- *
- * All rights reserved.
- */
-
-#ifndef __ECHO_H
-#define __ECHO_H
-
-/*
-Line echo cancellation for voice
-
-What does it do?
-
-This module aims to provide G.168-2002 compliant echo cancellation, to remove
-electrical echoes (e.g. from 2-4 wire hybrids) from voice calls.
-
-How does it work?
-
-The heart of the echo cancellor is FIR filter. This is adapted to match the
-echo impulse response of the telephone line. It must be long enough to
-adequately cover the duration of that impulse response. The signal transmitted
-to the telephone line is passed through the FIR filter. Once the FIR is
-properly adapted, the resulting output is an estimate of the echo signal
-received from the line. This is subtracted from the received signal. The result
-is an estimate of the signal which originated at the far end of the line, free
-from echos of our own transmitted signal.
-
-The least mean squares (LMS) algorithm is attributed to Widrow and Hoff, and
-was introduced in 1960. It is the commonest form of filter adaption used in
-things like modem line equalisers and line echo cancellers. There it works very
-well. However, it only works well for signals of constant amplitude. It works
-very poorly for things like speech echo cancellation, where the signal level
-varies widely. This is quite easy to fix. If the signal level is normalised -
-similar to applying AGC - LMS can work as well for a signal of varying
-amplitude as it does for a modem signal. This normalised least mean squares
-(NLMS) algorithm is the commonest one used for speech echo cancellation. Many
-other algorithms exist - e.g. RLS (essentially the same as Kalman filtering),
-FAP, etc. Some perform significantly better than NLMS. However, factors such
-as computational complexity and patents favour the use of NLMS.
-
-A simple refinement to NLMS can improve its performance with speech. NLMS tends
-to adapt best to the strongest parts of a signal. If the signal is white noise,
-the NLMS algorithm works very well. However, speech has more low frequency than
-high frequency content. Pre-whitening (i.e. filtering the signal to flatten its
-spectrum) the echo signal improves the adapt rate for speech, and ensures the
-final residual signal is not heavily biased towards high frequencies. A very
-low complexity filter is adequate for this, so pre-whitening adds little to the
-compute requirements of the echo canceller.
-
-An FIR filter adapted using pre-whitened NLMS performs well, provided certain
-conditions are met:
-
- - The transmitted signal has poor self-correlation.
- - There is no signal being generated within the environment being
- cancelled.
-
-The difficulty is that neither of these can be guaranteed.
-
-If the adaption is performed while transmitting noise (or something fairly
-noise like, such as voice) the adaption works very well. If the adaption is
-performed while transmitting something highly correlative (typically narrow
-band energy such as signalling tones or DTMF), the adaption can go seriously
-wrong. The reason is there is only one solution for the adaption on a near
-random signal - the impulse response of the line. For a repetitive signal,
-there are any number of solutions which converge the adaption, and nothing
-guides the adaption to choose the generalised one. Allowing an untrained
-canceller to converge on this kind of narrowband energy probably a good thing,
-since at least it cancels the tones. Allowing a well converged canceller to
-continue converging on such energy is just a way to ruin its generalised
-adaption. A narrowband detector is needed, so adapation can be suspended at
-appropriate times.
-
-The adaption process is based on trying to eliminate the received signal. When
-there is any signal from within the environment being cancelled it may upset
-the adaption process. Similarly, if the signal we are transmitting is small,
-noise may dominate and disturb the adaption process. If we can ensure that the
-adaption is only performed when we are transmitting a significant signal level,
-and the environment is not, things will be OK. Clearly, it is easy to tell when
-we are sending a significant signal. Telling, if the environment is generating
-a significant signal, and doing it with sufficient speed that the adaption will
-not have diverged too much more we stop it, is a little harder.
-
-The key problem in detecting when the environment is sourcing significant
-energy is that we must do this very quickly. Given a reasonably long sample of
-the received signal, there are a number of strategies which may be used to
-assess whether that signal contains a strong far end component. However, by the
-time that assessment is complete the far end signal will have already caused
-major mis-convergence in the adaption process. An assessment algorithm is
-needed which produces a fairly accurate result from a very short burst of far
-end energy.
-
-How do I use it?
-
-The echo cancellor processes both the transmit and receive streams sample by
-sample. The processing function is not declared inline. Unfortunately,
-cancellation requires many operations per sample, so the call overhead is only
-a minor burden.
-*/
-
-#include "fir.h"
-#include "oslec.h"
-
-/*
- G.168 echo canceller descriptor. This defines the working state for a line
- echo canceller.
-*/
-struct oslec_state {
- int16_t tx;
- int16_t rx;
- int16_t clean;
- int16_t clean_nlp;
-
- int nonupdate_dwell;
- int curr_pos;
- int taps;
- int log2taps;
- int adaption_mode;
-
- int cond_met;
- int32_t pstates;
- int16_t adapt;
- int32_t factor;
- int16_t shift;
-
- /* Average levels and averaging filter states */
- int ltxacc;
- int lrxacc;
- int lcleanacc;
- int lclean_bgacc;
- int ltx;
- int lrx;
- int lclean;
- int lclean_bg;
- int lbgn;
- int lbgn_acc;
- int lbgn_upper;
- int lbgn_upper_acc;
-
- /* foreground and background filter states */
- struct fir16_state_t fir_state;
- struct fir16_state_t fir_state_bg;
- int16_t *fir_taps16[2];
-
- /* DC blocking filter states */
- int tx_1;
- int tx_2;
- int rx_1;
- int rx_2;
-
- /* optional High Pass Filter states */
- int32_t xvtx[5];
- int32_t yvtx[5];
- int32_t xvrx[5];
- int32_t yvrx[5];
-
- /* Parameters for the optional Hoth noise generator */
- int cng_level;
- int cng_rndnum;
- int cng_filter;
-
- /* snapshot sample of coeffs used for development */
- int16_t *snapshot;
-};
-
-#endif /* __ECHO_H */
diff --git a/drivers/misc/echo/fir.h b/drivers/misc/echo/fir.h
deleted file mode 100644
index 4d0821025223..000000000000
--- a/drivers/misc/echo/fir.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SpanDSP - a series of DSP components for telephony
- *
- * fir.h - General telephony FIR routines
- *
- * Written by Steve Underwood <steveu@coppice.org>
- *
- * Copyright (C) 2002 Steve Underwood
- *
- * All rights reserved.
- */
-
-#if !defined(_FIR_H_)
-#define _FIR_H_
-
-/*
- Ideas for improvement:
-
- 1/ Rewrite filter for dual MAC inner loop. The issue here is handling
- history sample offsets that are 16 bit aligned - the dual MAC needs
- 32 bit aligmnent. There are some good examples in libbfdsp.
-
- 2/ Use the hardware circular buffer facility tohalve memory usage.
-
- 3/ Consider using internal memory.
-
- Using less memory might also improve speed as cache misses will be
- reduced. A drop in MIPs and memory approaching 50% should be
- possible.
-
- The foreground and background filters currenlty use a total of
- about 10 MIPs/ch as measured with speedtest.c on a 256 TAP echo
- can.
-*/
-
-/*
- * 16 bit integer FIR descriptor. This defines the working state for a single
- * instance of an FIR filter using 16 bit integer coefficients.
- */
-struct fir16_state_t {
- int taps;
- int curr_pos;
- const int16_t *coeffs;
- int16_t *history;
-};
-
-/*
- * 32 bit integer FIR descriptor. This defines the working state for a single
- * instance of an FIR filter using 32 bit integer coefficients, and filtering
- * 16 bit integer data.
- */
-struct fir32_state_t {
- int taps;
- int curr_pos;
- const int32_t *coeffs;
- int16_t *history;
-};
-
-/*
- * Floating point FIR descriptor. This defines the working state for a single
- * instance of an FIR filter using floating point coefficients and data.
- */
-struct fir_float_state_t {
- int taps;
- int curr_pos;
- const float *coeffs;
- float *history;
-};
-
-static inline const int16_t *fir16_create(struct fir16_state_t *fir,
- const int16_t *coeffs, int taps)
-{
- fir->taps = taps;
- fir->curr_pos = taps - 1;
- fir->coeffs = coeffs;
- fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL);
- return fir->history;
-}
-
-static inline void fir16_flush(struct fir16_state_t *fir)
-{
- memset(fir->history, 0, fir->taps * sizeof(int16_t));
-}
-
-static inline void fir16_free(struct fir16_state_t *fir)
-{
- kfree(fir->history);
-}
-
-static inline int16_t fir16(struct fir16_state_t *fir, int16_t sample)
-{
- int32_t y;
- int i;
- int offset1;
- int offset2;
-
- fir->history[fir->curr_pos] = sample;
-
- offset2 = fir->curr_pos;
- offset1 = fir->taps - offset2;
- y = 0;
- for (i = fir->taps - 1; i >= offset1; i--)
- y += fir->coeffs[i] * fir->history[i - offset1];
- for (; i >= 0; i--)
- y += fir->coeffs[i] * fir->history[i + offset2];
- if (fir->curr_pos <= 0)
- fir->curr_pos = fir->taps;
- fir->curr_pos--;
- return (int16_t) (y >> 15);
-}
-
-static inline const int16_t *fir32_create(struct fir32_state_t *fir,
- const int32_t *coeffs, int taps)
-{
- fir->taps = taps;
- fir->curr_pos = taps - 1;
- fir->coeffs = coeffs;
- fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL);
- return fir->history;
-}
-
-static inline void fir32_flush(struct fir32_state_t *fir)
-{
- memset(fir->history, 0, fir->taps * sizeof(int16_t));
-}
-
-static inline void fir32_free(struct fir32_state_t *fir)
-{
- kfree(fir->history);
-}
-
-static inline int16_t fir32(struct fir32_state_t *fir, int16_t sample)
-{
- int i;
- int32_t y;
- int offset1;
- int offset2;
-
- fir->history[fir->curr_pos] = sample;
- offset2 = fir->curr_pos;
- offset1 = fir->taps - offset2;
- y = 0;
- for (i = fir->taps - 1; i >= offset1; i--)
- y += fir->coeffs[i] * fir->history[i - offset1];
- for (; i >= 0; i--)
- y += fir->coeffs[i] * fir->history[i + offset2];
- if (fir->curr_pos <= 0)
- fir->curr_pos = fir->taps;
- fir->curr_pos--;
- return (int16_t) (y >> 15);
-}
-
-#endif
diff --git a/drivers/misc/echo/oslec.h b/drivers/misc/echo/oslec.h
deleted file mode 100644
index f1adac143b90..000000000000
--- a/drivers/misc/echo/oslec.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * OSLEC - A line echo canceller. This code is being developed
- * against and partially complies with G168. Using code from SpanDSP
- *
- * Written by Steve Underwood <steveu@coppice.org>
- * and David Rowe <david_at_rowetel_dot_com>
- *
- * Copyright (C) 2001 Steve Underwood and 2007-2008 David Rowe
- *
- * All rights reserved.
- */
-
-#ifndef __OSLEC_H
-#define __OSLEC_H
-
-/* Mask bits for the adaption mode */
-#define ECHO_CAN_USE_ADAPTION 0x01
-#define ECHO_CAN_USE_NLP 0x02
-#define ECHO_CAN_USE_CNG 0x04
-#define ECHO_CAN_USE_CLIP 0x08
-#define ECHO_CAN_USE_TX_HPF 0x10
-#define ECHO_CAN_USE_RX_HPF 0x20
-#define ECHO_CAN_DISABLE 0x40
-
-/**
- * oslec_state: G.168 echo canceller descriptor.
- *
- * This defines the working state for a line echo canceller.
- */
-struct oslec_state;
-
-/**
- * oslec_create - Create a voice echo canceller context.
- * @len: The length of the canceller, in samples.
- * @return: The new canceller context, or NULL if the canceller could not be
- * created.
- */
-struct oslec_state *oslec_create(int len, int adaption_mode);
-
-/**
- * oslec_free - Free a voice echo canceller context.
- * @ec: The echo canceller context.
- */
-void oslec_free(struct oslec_state *ec);
-
-/**
- * oslec_flush - Flush (reinitialise) a voice echo canceller context.
- * @ec: The echo canceller context.
- */
-void oslec_flush(struct oslec_state *ec);
-
-/**
- * oslec_adaption_mode - set the adaption mode of a voice echo canceller context.
- * @ec The echo canceller context.
- * @adaption_mode: The mode.
- */
-void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode);
-
-void oslec_snapshot(struct oslec_state *ec);
-
-/**
- * oslec_update: Process a sample through a voice echo canceller.
- * @ec: The echo canceller context.
- * @tx: The transmitted audio sample.
- * @rx: The received audio sample.
- *
- * The return value is the clean (echo cancelled) received sample.
- */
-int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx);
-
-/**
- * oslec_hpf_tx: Process to high pass filter the tx signal.
- * @ec: The echo canceller context.
- * @tx: The transmitted auio sample.
- *
- * The return value is the HP filtered transmit sample, send this to your D/A.
- */
-int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx);
-
-#endif /* __OSLEC_H */
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 1fc632ebf22f..60c42170d147 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -61,11 +61,6 @@ MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("T-platforms");
/*
- * csr_dbgdir - CSR read/write operations Debugfs directory
- */
-static struct dentry *csr_dbgdir;
-
-/*
* struct idt_89hpesx_dev - IDT 89HPESx device data structure
* @eesize: Size of EEPROM in bytes (calculated from "idt,eecompatible")
* @eero: EEPROM Read-only flag
@@ -1325,35 +1320,6 @@ static void idt_remove_sysfs_files(struct idt_89hpesx_dev *pdev)
}
/*
- * idt_create_dbgfs_files() - create debugfs files
- * @pdev: Pointer to the driver data
- */
-#define CSRNAME_LEN ((size_t)32)
-static void idt_create_dbgfs_files(struct idt_89hpesx_dev *pdev)
-{
- struct i2c_client *cli = pdev->client;
- char fname[CSRNAME_LEN];
-
- /* Create Debugfs directory for CSR file */
- snprintf(fname, CSRNAME_LEN, "%d-%04hx", cli->adapter->nr, cli->addr);
- pdev->csr_dir = debugfs_create_dir(fname, csr_dbgdir);
-
- /* Create Debugfs file for CSR read/write operations */
- debugfs_create_file(cli->name, 0600, pdev->csr_dir, pdev,
- &csr_dbgfs_ops);
-}
-
-/*
- * idt_remove_dbgfs_files() - remove debugfs files
- * @pdev: Pointer to the driver data
- */
-static void idt_remove_dbgfs_files(struct idt_89hpesx_dev *pdev)
-{
- /* Remove CSR directory and it sysfs-node */
- debugfs_remove_recursive(pdev->csr_dir);
-}
-
-/*
* idt_probe() - IDT 89HPESx driver probe() callback method
*/
static int idt_probe(struct i2c_client *client)
@@ -1382,7 +1348,7 @@ static int idt_probe(struct i2c_client *client)
goto err_free_pdev;
/* Create debugfs files */
- idt_create_dbgfs_files(pdev);
+ debugfs_create_file(pdev->client->name, 0600, client->debugfs, pdev, &csr_dbgfs_ops);
return 0;
@@ -1399,9 +1365,6 @@ static void idt_remove(struct i2c_client *client)
{
struct idt_89hpesx_dev *pdev = i2c_get_clientdata(client);
- /* Remove debugfs files first */
- idt_remove_dbgfs_files(pdev);
-
/* Remove sysfs files */
idt_remove_sysfs_files(pdev);
@@ -1550,38 +1513,4 @@ static struct i2c_driver idt_driver = {
.remove = idt_remove,
.id_table = idt_ids,
};
-
-/*
- * idt_init() - IDT 89HPESx driver init() callback method
- */
-static int __init idt_init(void)
-{
- int ret;
-
- /* Create Debugfs directory first */
- if (debugfs_initialized())
- csr_dbgdir = debugfs_create_dir("idt_csr", NULL);
-
- /* Add new i2c-device driver */
- ret = i2c_add_driver(&idt_driver);
- if (ret) {
- debugfs_remove_recursive(csr_dbgdir);
- return ret;
- }
-
- return 0;
-}
-module_init(idt_init);
-
-/*
- * idt_exit() - IDT 89HPESx driver exit() callback method
- */
-static void __exit idt_exit(void)
-{
- /* Discard debugfs directory and all files if any */
- debugfs_remove_recursive(csr_dbgdir);
-
- /* Unregister i2c-device driver */
- i2c_del_driver(&idt_driver);
-}
-module_exit(idt_exit);
+module_i2c_driver(idt_driver);
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 7b7a22c91fe4..378923594f02 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -2313,7 +2313,7 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
rmem = of_reserved_mem_lookup(rmem_node);
if (!rmem) {
err = -EINVAL;
- goto fdev_error;
+ goto err_free_data;
}
src_perms = BIT(QCOM_SCM_VMID_HLOS);
@@ -2334,7 +2334,7 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
data->unsigned_support = false;
err = fastrpc_device_register(rdev, data, secure_dsp, domains[domain_id]);
if (err)
- goto fdev_error;
+ goto err_free_data;
break;
case CDSP_DOMAIN_ID:
case CDSP1_DOMAIN_ID:
@@ -2342,15 +2342,15 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
/* Create both device nodes so that we can allow both Signed and Unsigned PD */
err = fastrpc_device_register(rdev, data, true, domains[domain_id]);
if (err)
- goto fdev_error;
+ goto err_free_data;
err = fastrpc_device_register(rdev, data, false, domains[domain_id]);
if (err)
- goto populate_error;
+ goto err_deregister_fdev;
break;
default:
err = -EINVAL;
- goto fdev_error;
+ goto err_free_data;
}
kref_init(&data->refcount);
@@ -2367,17 +2367,17 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
err = of_platform_populate(rdev->of_node, NULL, NULL, rdev);
if (err)
- goto populate_error;
+ goto err_deregister_fdev;
return 0;
-populate_error:
+err_deregister_fdev:
if (data->fdevice)
misc_deregister(&data->fdevice->miscdev);
if (data->secure_fdevice)
misc_deregister(&data->secure_fdevice->miscdev);
-fdev_error:
+err_free_data:
kfree(data);
return err;
}
diff --git a/drivers/misc/hi6421v600-irq.c b/drivers/misc/hi6421v600-irq.c
index 69ee4f39af2a..187c5bc91e31 100644
--- a/drivers/misc/hi6421v600-irq.c
+++ b/drivers/misc/hi6421v600-irq.c
@@ -254,8 +254,9 @@ static int hi6421v600_irq_probe(struct platform_device *pdev)
if (!priv->irqs)
return -ENOMEM;
- priv->domain = irq_domain_add_simple(np, PMIC_IRQ_LIST_MAX, 0,
- &hi6421v600_domain_ops, priv);
+ priv->domain = irq_domain_create_simple(of_fwnode_handle(np),
+ PMIC_IRQ_LIST_MAX, 0,
+ &hi6421v600_domain_ops, priv);
if (!priv->domain) {
dev_err(dev, "Failed to create IRQ domain\n");
return -ENODEV;
diff --git a/drivers/misc/lis3lv02d/Kconfig b/drivers/misc/lis3lv02d/Kconfig
index bb2fec4b5880..56005243a230 100644
--- a/drivers/misc/lis3lv02d/Kconfig
+++ b/drivers/misc/lis3lv02d/Kconfig
@@ -10,7 +10,7 @@ config SENSORS_LIS3_SPI
help
This driver provides support for the LIS3LV02Dx accelerometer connected
via SPI. The accelerometer data is readable via
- /sys/devices/platform/lis3lv02d.
+ /sys/devices/faux/lis3lv02d.
This driver also provides an absolute input class device, allowing
the laptop to act as a pinball machine-esque joystick.
@@ -26,7 +26,7 @@ config SENSORS_LIS3_I2C
help
This driver provides support for the LIS3LV02Dx accelerometer connected
via I2C. The accelerometer data is readable via
- /sys/devices/platform/lis3lv02d.
+ /sys/devices/faux/lis3lv02d.
This driver also provides an absolute input class device, allowing
the device to act as a pinball machine-esque joystick.
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
index b1b316f99703..c1a05b935894 100644
--- a/drivers/misc/lkdtm/heap.c
+++ b/drivers/misc/lkdtm/heap.c
@@ -355,23 +355,12 @@ static void lkdtm_SLAB_FREE_PAGE(void)
free_page(p);
}
-/*
- * We have constructors to keep the caches distinctly separated without
- * needing to boot with "slab_nomerge".
- */
-static void ctor_double_free(void *region)
-{ }
-static void ctor_a(void *region)
-{ }
-static void ctor_b(void *region)
-{ }
-
void __init lkdtm_heap_init(void)
{
double_free_cache = kmem_cache_create("lkdtm-heap-double_free",
- 64, 0, 0, ctor_double_free);
- a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a);
- b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b);
+ 64, 0, SLAB_NO_MERGE, NULL);
+ a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, SLAB_NO_MERGE, NULL);
+ b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, SLAB_NO_MERGE, NULL);
}
void __exit lkdtm_heap_exit(void)
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
index 98d3d123004c..ff8f4404d10f 100644
--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
@@ -7,12 +7,14 @@
#include <linux/gpio/driver.h>
#include <linux/bio.h>
#include <linux/mutex.h>
+#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include "mchp_pci1xxxx_gp.h"
#define PCI1XXXX_NR_PINS 93
+#define PCI_DEV_REV_OFFSET 0x08
#define PERI_GEN_RESET 0
#define OUT_EN_OFFSET(x) ((((x) / 32) * 4) + 0x400)
#define INP_EN_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x10)
@@ -40,9 +42,27 @@ struct pci1xxxx_gpio {
raw_spinlock_t wa_lock;
struct gpio_chip gpio;
spinlock_t lock;
+ u32 gpio_wake_mask[3];
int irq_base;
+ u8 dev_rev;
};
+static int pci1xxxx_gpio_get_device_revision(struct pci1xxxx_gpio *priv)
+{
+ struct device *parent = priv->aux_dev->dev.parent;
+ struct pci_dev *pcidev = to_pci_dev(parent);
+ int ret;
+ u32 val;
+
+ ret = pci_read_config_dword(pcidev, PCI_DEV_REV_OFFSET, &val);
+ if (ret)
+ return ret;
+
+ priv->dev_rev = val;
+
+ return 0;
+}
+
static int pci1xxxx_gpio_get_direction(struct gpio_chip *gpio, unsigned int nr)
{
struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
@@ -115,8 +135,7 @@ static int pci1xxxx_gpio_direction_output(struct gpio_chip *gpio,
return 0;
}
-static void pci1xxxx_gpio_set(struct gpio_chip *gpio,
- unsigned int nr, int val)
+static int pci1xxxx_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
unsigned long flags;
@@ -124,6 +143,8 @@ static void pci1xxxx_gpio_set(struct gpio_chip *gpio,
spin_lock_irqsave(&priv->lock, flags);
pci1xxx_assign_bit(priv->reg_base, OUT_OFFSET(nr), (nr % 32), val);
spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
}
static int pci1xxxx_gpio_set_config(struct gpio_chip *gpio, unsigned int offset,
@@ -253,6 +274,22 @@ static int pci1xxxx_gpio_set_type(struct irq_data *data, unsigned int trigger_ty
return true;
}
+static int pci1xxxx_gpio_set_wake(struct irq_data *data, unsigned int enable)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(chip);
+ unsigned int gpio = irqd_to_hwirq(data);
+ unsigned int bitpos = gpio % 32;
+ unsigned int bank = gpio / 32;
+
+ if (enable)
+ priv->gpio_wake_mask[bank] |= (1 << bitpos);
+ else
+ priv->gpio_wake_mask[bank] &= ~(1 << bitpos);
+
+ return 0;
+}
+
static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id)
{
struct pci1xxxx_gpio *priv = dev_id;
@@ -300,6 +337,7 @@ static const struct irq_chip pci1xxxx_gpio_irqchip = {
.irq_mask = pci1xxxx_gpio_irq_mask,
.irq_unmask = pci1xxxx_gpio_irq_unmask,
.irq_set_type = pci1xxxx_gpio_set_type,
+ .irq_set_wake = pci1xxxx_gpio_set_wake,
.flags = IRQCHIP_IMMUTABLE,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
@@ -307,32 +345,83 @@ static const struct irq_chip pci1xxxx_gpio_irqchip = {
static int pci1xxxx_gpio_suspend(struct device *dev)
{
struct pci1xxxx_gpio *priv = dev_get_drvdata(dev);
+ struct device *parent = priv->aux_dev->dev.parent;
+ struct pci_dev *pcidev = to_pci_dev(parent);
+ unsigned int gpio_bank_base;
+ unsigned int wake_mask;
+ unsigned int gpiobank;
unsigned long flags;
+ for (gpiobank = 0; gpiobank < 3; gpiobank++) {
+ wake_mask = priv->gpio_wake_mask[gpiobank];
+
+ if (wake_mask) {
+ gpio_bank_base = gpiobank * 32;
+
+ pci1xxx_assign_bit(priv->reg_base,
+ PIO_PCI_CTRL_REG_OFFSET, 0, true);
+ writel(~wake_mask, priv->reg_base +
+ WAKEMASK_OFFSET(gpio_bank_base));
+ }
+ }
+
spin_lock_irqsave(&priv->lock, flags);
pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET,
16, true);
pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET,
17, false);
pci1xxx_assign_bit(priv->reg_base, PERI_GEN_RESET, 16, true);
+
+ if (priv->dev_rev >= 0xC0)
+ pci1xxx_assign_bit(priv->reg_base, PERI_GEN_RESET, 17, true);
+
spin_unlock_irqrestore(&priv->lock, flags);
+ device_set_wakeup_enable(&pcidev->dev, true);
+ pci_wake_from_d3(pcidev, true);
+
return 0;
}
static int pci1xxxx_gpio_resume(struct device *dev)
{
struct pci1xxxx_gpio *priv = dev_get_drvdata(dev);
+ struct device *parent = priv->aux_dev->dev.parent;
+ struct pci_dev *pcidev = to_pci_dev(parent);
+ unsigned int gpio_bank_base;
+ unsigned int wake_mask;
+ unsigned int gpiobank;
unsigned long flags;
+ for (gpiobank = 0; gpiobank < 3; gpiobank++) {
+ wake_mask = priv->gpio_wake_mask[gpiobank];
+
+ if (wake_mask) {
+ gpio_bank_base = gpiobank * 32;
+
+ writel(wake_mask, priv->reg_base +
+ INTR_STAT_OFFSET(gpio_bank_base));
+ pci1xxx_assign_bit(priv->reg_base,
+ PIO_PCI_CTRL_REG_OFFSET, 0, false);
+ writel(0xffffffff, priv->reg_base +
+ WAKEMASK_OFFSET(gpio_bank_base));
+ }
+ }
+
spin_lock_irqsave(&priv->lock, flags);
pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET,
17, true);
pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET,
16, false);
pci1xxx_assign_bit(priv->reg_base, PERI_GEN_RESET, 16, false);
+
+ if (priv->dev_rev >= 0xC0)
+ pci1xxx_assign_bit(priv->reg_base, PERI_GEN_RESET, 17, false);
+
spin_unlock_irqrestore(&priv->lock, flags);
+ pci_wake_from_d3(pcidev, false);
+
return 0;
}
@@ -349,7 +438,7 @@ static int pci1xxxx_gpio_setup(struct pci1xxxx_gpio *priv, int irq)
gchip->direction_output = pci1xxxx_gpio_direction_output;
gchip->get_direction = pci1xxxx_gpio_get_direction;
gchip->get = pci1xxxx_gpio_get;
- gchip->set = pci1xxxx_gpio_set;
+ gchip->set_rv = pci1xxxx_gpio_set;
gchip->set_config = pci1xxxx_gpio_set_config;
gchip->dbg_show = NULL;
gchip->base = -1;
@@ -412,6 +501,10 @@ static int pci1xxxx_gpio_probe(struct auxiliary_device *aux_dev,
if (retval < 0)
return retval;
+ retval = pci1xxxx_gpio_get_device_revision(priv);
+ if (retval)
+ return retval;
+
dev_set_drvdata(&aux_dev->dev, priv);
return devm_gpiochip_add_data(&aux_dev->dev, &priv->gpio, priv);
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index b09b79fedaba..c484f416fae4 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -133,7 +133,7 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl,
break;
case MEI_EXT_HDR_GSC:
gsc_f2h = (struct mei_ext_hdr_gsc_f2h *)ext;
- cb->ext_hdr = kzalloc(sizeof(*gsc_f2h), GFP_KERNEL);
+ cb->ext_hdr = (struct mei_ext_hdr *)kzalloc(sizeof(*gsc_f2h), GFP_KERNEL);
if (!cb->ext_hdr) {
cb->status = -ENOMEM;
goto discard;
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index da26a080916c..267d0de5fade 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -324,7 +324,7 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
guard(mutex)(&tp->mutex);
/* rom xfer is big endian */
- cpu_to_be32_array((u32 *)tp->tx_buf, obuf, words);
+ cpu_to_be32_array((__be32 *)tp->tx_buf, obuf, words);
ret = read_poll_timeout(gpiod_get_value_cansleep, ret,
!ret, VSC_TP_ROM_XFER_POLL_DELAY_US,
@@ -340,7 +340,7 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
return ret;
if (ibuf)
- be32_to_cpu_array(ibuf, (u32 *)tp->rx_buf, words);
+ be32_to_cpu_array(ibuf, (__be32 *)tp->rx_buf, words);
return ret;
}
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 697a008c14d3..9fe816bf3957 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -164,7 +164,8 @@ struct xpc_arch_operations xpc_arch_ops;
static void
xpc_timeout_partition_disengage(struct timer_list *t)
{
- struct xpc_partition *part = from_timer(part, t, disengage_timer);
+ struct xpc_partition *part = timer_container_of(part, t,
+ disengage_timer);
DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
diff --git a/drivers/misc/ti_fpc202.c b/drivers/misc/ti_fpc202.c
new file mode 100644
index 000000000000..f7cde245ac95
--- /dev/null
+++ b/drivers/misc/ti_fpc202.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ti_fpc202.c - FPC202 Dual Port Controller driver
+ *
+ * Copyright (C) 2024 Bootlin
+ *
+ */
+
+#include <linux/cleanup.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/i2c-atr.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+
+#define FPC202_NUM_PORTS 2
+#define FPC202_ALIASES_PER_PORT 2
+
+/*
+ * GPIO: port mapping
+ *
+ * 0: P0_S0_IN_A
+ * 1: P0_S1_IN_A
+ * 2: P1_S0_IN_A
+ * 3: P1_S1_IN_A
+ * 4: P0_S0_IN_B
+ * ...
+ * 8: P0_S0_IN_C
+ * ...
+ * 12: P0_S0_OUT_A
+ * ...
+ * 16: P0_S0_OUT_B
+ * ...
+ * 19: P1_S1_OUT_B
+ *
+ */
+
+#define FPC202_GPIO_COUNT 20
+#define FPC202_GPIO_P0_S0_IN_B 4
+#define FPC202_GPIO_P0_S0_OUT_A 12
+
+#define FPC202_REG_IN_A_INT 0x6
+#define FPC202_REG_IN_C_IN_B 0x7
+#define FPC202_REG_OUT_A_OUT_B 0x8
+
+#define FPC202_REG_OUT_A_OUT_B_VAL 0xa
+
+#define FPC202_REG_MOD_DEV(port, dev) (0xb4 + ((port) * 4) + (dev))
+#define FPC202_REG_AUX_DEV(port, dev) (0xb6 + ((port) * 4) + (dev))
+
+/*
+ * The FPC202 doesn't support turning off address translation on a single port.
+ * So just set an invalid I2C address as the translation target when no client
+ * address is attached.
+ */
+#define FPC202_REG_DEV_INVALID 0
+
+/* Even aliases are assigned to device 0 and odd aliases to device 1 */
+#define fpc202_dev_num_from_alias(alias) ((alias) % 2)
+
+struct fpc202_priv {
+ struct i2c_client *client;
+ struct i2c_atr *atr;
+ struct gpio_desc *en_gpio;
+ struct gpio_chip gpio;
+
+ /* Lock REG_MOD/AUX_DEV and addr_caches during attach/detach */
+ struct mutex reg_dev_lock;
+
+ /* Cached device addresses for both ports and their devices */
+ u8 addr_caches[2][2];
+
+ /* Keep track of which ports were probed */
+ DECLARE_BITMAP(probed_ports, FPC202_NUM_PORTS);
+};
+
+static void fpc202_fill_alias_table(struct i2c_client *client, u16 *aliases, int port_id)
+{
+ u16 first_alias;
+ int i;
+
+ /*
+ * There is a predefined list of aliases for each FPC202 I2C
+ * self-address. This allows daisy-chained FPC202 units to
+ * automatically take on different sets of aliases.
+ * Each port of an FPC202 unit is assigned two aliases from this list.
+ */
+ first_alias = 0x10 + 4 * port_id + 8 * ((u16)client->addr - 2);
+
+ for (i = 0; i < FPC202_ALIASES_PER_PORT; i++)
+ aliases[i] = first_alias + i;
+}
+
+static int fpc202_gpio_get_dir(int offset)
+{
+ return offset < FPC202_GPIO_P0_S0_OUT_A ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
+}
+
+static int fpc202_read(struct fpc202_priv *priv, u8 reg)
+{
+ int val;
+
+ val = i2c_smbus_read_byte_data(priv->client, reg);
+ return val;
+}
+
+static int fpc202_write(struct fpc202_priv *priv, u8 reg, u8 value)
+{
+ return i2c_smbus_write_byte_data(priv->client, reg, value);
+}
+
+static void fpc202_set_enable(struct fpc202_priv *priv, int enable)
+{
+ if (!priv->en_gpio)
+ return;
+
+ gpiod_set_value(priv->en_gpio, enable);
+}
+
+static void fpc202_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct fpc202_priv *priv = gpiochip_get_data(chip);
+ int ret;
+ u8 val;
+
+ if (fpc202_gpio_get_dir(offset) == GPIO_LINE_DIRECTION_IN)
+ return;
+
+ ret = fpc202_read(priv, FPC202_REG_OUT_A_OUT_B_VAL);
+ if (ret < 0) {
+ dev_err(&priv->client->dev, "Failed to set GPIO %d value! err %d\n", offset, ret);
+ return;
+ }
+
+ val = (u8)ret;
+
+ if (value)
+ val |= BIT(offset - FPC202_GPIO_P0_S0_OUT_A);
+ else
+ val &= ~BIT(offset - FPC202_GPIO_P0_S0_OUT_A);
+
+ fpc202_write(priv, FPC202_REG_OUT_A_OUT_B_VAL, val);
+}
+
+static int fpc202_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct fpc202_priv *priv = gpiochip_get_data(chip);
+ u8 reg, bit;
+ int ret;
+
+ if (offset < FPC202_GPIO_P0_S0_IN_B) {
+ reg = FPC202_REG_IN_A_INT;
+ bit = BIT(4 + offset);
+ } else if (offset < FPC202_GPIO_P0_S0_OUT_A) {
+ reg = FPC202_REG_IN_C_IN_B;
+ bit = BIT(offset - FPC202_GPIO_P0_S0_IN_B);
+ } else {
+ reg = FPC202_REG_OUT_A_OUT_B_VAL;
+ bit = BIT(offset - FPC202_GPIO_P0_S0_OUT_A);
+ }
+
+ ret = fpc202_read(priv, reg);
+ if (ret < 0)
+ return ret;
+
+ return !!(((u8)ret) & bit);
+}
+
+static int fpc202_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ if (fpc202_gpio_get_dir(offset) == GPIO_LINE_DIRECTION_OUT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fpc202_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct fpc202_priv *priv = gpiochip_get_data(chip);
+ int ret;
+ u8 val;
+
+ if (fpc202_gpio_get_dir(offset) == GPIO_LINE_DIRECTION_IN)
+ return -EINVAL;
+
+ fpc202_gpio_set(chip, offset, value);
+
+ ret = fpc202_read(priv, FPC202_REG_OUT_A_OUT_B);
+ if (ret < 0)
+ return ret;
+
+ val = (u8)ret | BIT(offset - FPC202_GPIO_P0_S0_OUT_A);
+
+ return fpc202_write(priv, FPC202_REG_OUT_A_OUT_B, val);
+}
+
+/*
+ * Set the translation table entry associated with a port and device number.
+ *
+ * Each downstream port of the FPC202 has two fixed aliases corresponding to
+ * device numbers 0 and 1. If one of these aliases is found in an incoming I2C
+ * transfer, it will be translated to the address given by the corresponding
+ * translation table entry.
+ */
+static int fpc202_write_dev_addr(struct fpc202_priv *priv, u32 port_id, int dev_num, u16 addr)
+{
+ int ret, reg_mod, reg_aux;
+ u8 val;
+
+ guard(mutex)(&priv->reg_dev_lock);
+
+ reg_mod = FPC202_REG_MOD_DEV(port_id, dev_num);
+ reg_aux = FPC202_REG_AUX_DEV(port_id, dev_num);
+ val = addr & 0x7f;
+
+ ret = fpc202_write(priv, reg_mod, val);
+ if (ret)
+ return ret;
+
+ /*
+ * The FPC202 datasheet is unclear about the role of the AUX registers.
+ * Empirically, writing to them as well seems to be necessary for
+ * address translation to function properly.
+ */
+ ret = fpc202_write(priv, reg_aux, val);
+
+ priv->addr_caches[port_id][dev_num] = val;
+
+ return ret;
+}
+
+static int fpc202_attach_addr(struct i2c_atr *atr, u32 chan_id,
+ u16 addr, u16 alias)
+{
+ struct fpc202_priv *priv = i2c_atr_get_driver_data(atr);
+
+ dev_dbg(&priv->client->dev, "attaching address 0x%02x to alias 0x%02x\n", addr, alias);
+
+ return fpc202_write_dev_addr(priv, chan_id, fpc202_dev_num_from_alias(alias), addr);
+}
+
+static void fpc202_detach_addr(struct i2c_atr *atr, u32 chan_id,
+ u16 addr)
+{
+ struct fpc202_priv *priv = i2c_atr_get_driver_data(atr);
+ int dev_num, reg_mod, val;
+
+ for (dev_num = 0; dev_num < 2; dev_num++) {
+ reg_mod = FPC202_REG_MOD_DEV(chan_id, dev_num);
+
+ mutex_lock(&priv->reg_dev_lock);
+
+ val = priv->addr_caches[chan_id][dev_num];
+
+ mutex_unlock(&priv->reg_dev_lock);
+
+ if (val < 0) {
+ dev_err(&priv->client->dev, "failed to read register 0x%x while detaching address 0x%02x\n",
+ reg_mod, addr);
+ return;
+ }
+
+ if (val == (addr & 0x7f)) {
+ fpc202_write_dev_addr(priv, chan_id, dev_num, FPC202_REG_DEV_INVALID);
+ return;
+ }
+ }
+}
+
+static const struct i2c_atr_ops fpc202_atr_ops = {
+ .attach_addr = fpc202_attach_addr,
+ .detach_addr = fpc202_detach_addr,
+};
+
+static int fpc202_probe_port(struct fpc202_priv *priv, struct device_node *i2c_handle, int port_id)
+{
+ u16 aliases[FPC202_ALIASES_PER_PORT] = { };
+ struct device *dev = &priv->client->dev;
+ struct i2c_atr_adap_desc desc = { };
+ int ret = 0;
+
+ desc.chan_id = port_id;
+ desc.parent = dev;
+ desc.bus_handle = of_node_to_fwnode(i2c_handle);
+ desc.num_aliases = FPC202_ALIASES_PER_PORT;
+
+ fpc202_fill_alias_table(priv->client, aliases, port_id);
+ desc.aliases = aliases;
+
+ ret = i2c_atr_add_adapter(priv->atr, &desc);
+ if (ret)
+ return ret;
+
+ set_bit(port_id, priv->probed_ports);
+
+ ret = fpc202_write_dev_addr(priv, port_id, 0, FPC202_REG_DEV_INVALID);
+ if (ret)
+ return ret;
+
+ return fpc202_write_dev_addr(priv, port_id, 1, FPC202_REG_DEV_INVALID);
+}
+
+static void fpc202_remove_port(struct fpc202_priv *priv, int port_id)
+{
+ i2c_atr_del_adapter(priv->atr, port_id);
+ clear_bit(port_id, priv->probed_ports);
+}
+
+static int fpc202_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device_node *i2c_handle;
+ struct fpc202_priv *priv;
+ int ret, port_id;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->reg_dev_lock);
+
+ priv->client = client;
+ i2c_set_clientdata(client, priv);
+
+ priv->en_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->en_gpio)) {
+ ret = PTR_ERR(priv->en_gpio);
+ dev_err(dev, "failed to fetch enable GPIO! err %d\n", ret);
+ goto destroy_mutex;
+ }
+
+ priv->gpio.label = "gpio-fpc202";
+ priv->gpio.base = -1;
+ priv->gpio.direction_input = fpc202_gpio_direction_input;
+ priv->gpio.direction_output = fpc202_gpio_direction_output;
+ priv->gpio.set = fpc202_gpio_set;
+ priv->gpio.get = fpc202_gpio_get;
+ priv->gpio.ngpio = FPC202_GPIO_COUNT;
+ priv->gpio.parent = dev;
+ priv->gpio.owner = THIS_MODULE;
+
+ ret = gpiochip_add_data(&priv->gpio, priv);
+ if (ret) {
+ priv->gpio.parent = NULL;
+ dev_err(dev, "failed to add gpiochip err %d\n", ret);
+ goto disable_gpio;
+ }
+
+ priv->atr = i2c_atr_new(client->adapter, dev, &fpc202_atr_ops, 2, 0);
+ if (IS_ERR(priv->atr)) {
+ ret = PTR_ERR(priv->atr);
+ dev_err(dev, "failed to create i2c atr err %d\n", ret);
+ goto disable_gpio;
+ }
+
+ i2c_atr_set_driver_data(priv->atr, priv);
+
+ bitmap_zero(priv->probed_ports, FPC202_NUM_PORTS);
+
+ for_each_child_of_node(dev->of_node, i2c_handle) {
+ ret = of_property_read_u32(i2c_handle, "reg", &port_id);
+ if (ret) {
+ if (ret == -EINVAL)
+ continue;
+
+ dev_err(dev, "failed to read 'reg' property of child node, err %d\n", ret);
+ goto unregister_chans;
+ }
+
+ if (port_id > FPC202_NUM_PORTS) {
+ dev_err(dev, "port ID %d is out of range!\n", port_id);
+ ret = -EINVAL;
+ goto unregister_chans;
+ }
+
+ ret = fpc202_probe_port(priv, i2c_handle, port_id);
+ if (ret) {
+ dev_err(dev, "Failed to probe port %d, err %d\n", port_id, ret);
+ goto unregister_chans;
+ }
+ }
+
+ goto out;
+
+unregister_chans:
+ for_each_set_bit(port_id, priv->probed_ports, FPC202_NUM_PORTS)
+ fpc202_remove_port(priv, port_id);
+
+ i2c_atr_delete(priv->atr);
+disable_gpio:
+ fpc202_set_enable(priv, 0);
+ gpiochip_remove(&priv->gpio);
+destroy_mutex:
+ mutex_destroy(&priv->reg_dev_lock);
+out:
+ return ret;
+}
+
+static void fpc202_remove(struct i2c_client *client)
+{
+ struct fpc202_priv *priv = i2c_get_clientdata(client);
+ int port_id;
+
+ for_each_set_bit(port_id, priv->probed_ports, FPC202_NUM_PORTS)
+ fpc202_remove_port(priv, port_id);
+
+ mutex_destroy(&priv->reg_dev_lock);
+
+ i2c_atr_delete(priv->atr);
+
+ fpc202_set_enable(priv, 0);
+ gpiochip_remove(&priv->gpio);
+}
+
+static const struct of_device_id fpc202_of_match[] = {
+ { .compatible = "ti,fpc202" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fpc202_of_match);
+
+static struct i2c_driver fpc202_driver = {
+ .driver = {
+ .name = "fpc202",
+ .of_match_table = fpc202_of_match,
+ },
+ .probe = fpc202_probe,
+ .remove = fpc202_remove,
+};
+
+module_i2c_driver(fpc202_driver);
+
+MODULE_AUTHOR("Romain Gantois <romain.gantois@bootlin.com>");
+MODULE_DESCRIPTION("TI FPC202 Dual Port Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("I2C_ATR");
diff --git a/drivers/misc/tps6594-pfsm.c b/drivers/misc/tps6594-pfsm.c
index 0a24ce44cc37..6db1c9d48f8f 100644
--- a/drivers/misc/tps6594-pfsm.c
+++ b/drivers/misc/tps6594-pfsm.c
@@ -281,6 +281,9 @@ static int tps6594_pfsm_probe(struct platform_device *pdev)
pfsm->miscdev.minor = MISC_DYNAMIC_MINOR;
pfsm->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "pfsm-%ld-0x%02x",
tps->chip_id, tps->reg);
+ if (!pfsm->miscdev.name)
+ return -ENOMEM;
+
pfsm->miscdev.fops = &tps6594_pfsm_fops;
pfsm->miscdev.parent = dev->parent;
pfsm->chip_id = tps->chip_id;
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index bdc2e6fda782..42e7d2a2a90c 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -465,40 +465,6 @@ static void uacce_release(struct device *dev)
kfree(uacce);
}
-static unsigned int uacce_enable_sva(struct device *parent, unsigned int flags)
-{
- int ret;
-
- if (!(flags & UACCE_DEV_SVA))
- return flags;
-
- flags &= ~UACCE_DEV_SVA;
-
- ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_IOPF);
- if (ret) {
- dev_err(parent, "failed to enable IOPF feature! ret = %pe\n", ERR_PTR(ret));
- return flags;
- }
-
- ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA);
- if (ret) {
- dev_err(parent, "failed to enable SVA feature! ret = %pe\n", ERR_PTR(ret));
- iommu_dev_disable_feature(parent, IOMMU_DEV_FEAT_IOPF);
- return flags;
- }
-
- return flags | UACCE_DEV_SVA;
-}
-
-static void uacce_disable_sva(struct uacce_device *uacce)
-{
- if (!(uacce->flags & UACCE_DEV_SVA))
- return;
-
- iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA);
- iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_IOPF);
-}
-
/**
* uacce_alloc() - alloc an accelerator
* @parent: pointer of uacce parent device
@@ -518,8 +484,6 @@ struct uacce_device *uacce_alloc(struct device *parent,
if (!uacce)
return ERR_PTR(-ENOMEM);
- flags = uacce_enable_sva(parent, flags);
-
uacce->parent = parent;
uacce->flags = flags;
uacce->ops = interface->ops;
@@ -542,7 +506,6 @@ struct uacce_device *uacce_alloc(struct device *parent,
return uacce;
err_with_uacce:
- uacce_disable_sva(uacce);
kfree(uacce);
return ERR_PTR(ret);
}
@@ -605,9 +568,6 @@ void uacce_remove(struct uacce_device *uacce)
unmap_mapping_range(q->mapping, 0, 0, 1);
}
- /* disable sva now since no opened queues */
- uacce_disable_sva(uacce);
-
if (uacce->cdev)
cdev_device_del(uacce->cdev, &uacce->dev);
xa_erase(&uacce_xa, uacce->dev_id);
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index abe79f6fd2a7..b64944367ac5 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -227,6 +227,7 @@ static int drv_cp_harray_to_user(void __user *user_buf_uva,
static int vmci_host_setup_notify(struct vmci_ctx *context,
unsigned long uva)
{
+ struct page *page;
int retval;
if (context->notify_page) {
@@ -243,13 +244,11 @@ static int vmci_host_setup_notify(struct vmci_ctx *context,
/*
* Lock physical page backing a given user VA.
*/
- retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page);
- if (retval != 1) {
- context->notify_page = NULL;
+ retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &page);
+ if (retval != 1)
return VMCI_ERROR_GENERIC;
- }
- if (context->notify_page == NULL)
- return VMCI_ERROR_UNAVAILABLE;
+
+ context->notify_page = page;
/*
* Map the locked page and set up notify pointer.
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 4830628510e6..9cc47bf94804 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1220,7 +1220,7 @@ static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req,
int err = 0;
blk_status_t status = BLK_STS_OK;
- if (!mmc_can_erase(card)) {
+ if (!mmc_card_can_erase(card)) {
status = BLK_STS_NOTSUPP;
goto fail;
}
@@ -1276,7 +1276,7 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
int err = 0, type = MMC_BLK_SECDISCARD;
blk_status_t status = BLK_STS_OK;
- if (!(mmc_can_secure_erase_trim(card))) {
+ if (!(mmc_card_can_secure_erase_trim(card))) {
status = BLK_STS_NOTSUPP;
goto out;
}
@@ -1284,7 +1284,7 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
- if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
+ if (mmc_card_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
arg = MMC_SECURE_TRIM1_ARG;
else
arg = MMC_SECURE_ERASE_ARG;
@@ -2278,7 +2278,7 @@ void mmc_blk_mq_recovery(struct mmc_queue *mq)
static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
struct request **prev_req)
{
- if (mmc_host_done_complete(mq->card->host))
+ if (mmc_host_can_done_complete(mq->card->host))
return;
mutex_lock(&mq->complete_lock);
@@ -2317,7 +2317,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
struct mmc_host *host = mq->card->host;
unsigned long flags;
- if (!mmc_host_done_complete(host)) {
+ if (!mmc_host_can_done_complete(host)) {
bool waiting;
/*
@@ -2430,7 +2430,7 @@ static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
mq->rw_wait = false;
/* Release re-tuning here where there is no synchronization required */
- if (err || mmc_host_done_complete(host))
+ if (err || mmc_host_can_done_complete(host))
mmc_retune_release(host);
out_post_req:
@@ -2618,7 +2618,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
*/
md->read_only = mmc_blk_readonly(card);
- if (mmc_host_cmd23(card->host)) {
+ if (mmc_host_can_cmd23(card->host)) {
if ((mmc_card_mmc(card) &&
card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
(mmc_card_sd(card) && !mmc_card_ult_capacity(card) &&
@@ -2655,7 +2655,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->disk->private_data = md;
md->parent = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
- if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
+ if (area_type & MMC_BLK_DATA_AREA_RPMB)
md->disk->flags |= GENHD_FL_NO_PART;
/*
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index 3205feb1e8ff..9cbdd240c3a7 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -89,6 +89,7 @@ struct mmc_fixup {
#define CID_MANFID_MICRON 0x13
#define CID_MANFID_SAMSUNG 0x15
#define CID_MANFID_APACER 0x27
+#define CID_MANFID_SWISSBIT 0x5D
#define CID_MANFID_KINGSTON 0x70
#define CID_MANFID_HYNIX 0x90
#define CID_MANFID_KINGSTON_SD 0x9F
@@ -294,4 +295,9 @@ static inline int mmc_card_broken_sd_poweroff_notify(const struct mmc_card *c)
return c->quirks & MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY;
}
+static inline int mmc_card_no_uhs_ddr50_tuning(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_NO_UHS_DDR50_TUNING;
+}
+
#endif
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index ce08e0ea7fc1..a0e2dce70434 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1837,52 +1837,44 @@ int mmc_erase(struct mmc_card *card, sector_t from, unsigned int nr,
}
EXPORT_SYMBOL(mmc_erase);
-int mmc_can_erase(struct mmc_card *card)
+bool mmc_card_can_erase(struct mmc_card *card)
{
- if (card->csd.cmdclass & CCC_ERASE && card->erase_size)
- return 1;
- return 0;
+ return (card->csd.cmdclass & CCC_ERASE && card->erase_size);
}
-EXPORT_SYMBOL(mmc_can_erase);
+EXPORT_SYMBOL(mmc_card_can_erase);
-int mmc_can_trim(struct mmc_card *card)
+bool mmc_card_can_trim(struct mmc_card *card)
{
- if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
- (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
- return 1;
- return 0;
+ return ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
+ (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)));
}
-EXPORT_SYMBOL(mmc_can_trim);
+EXPORT_SYMBOL(mmc_card_can_trim);
-int mmc_can_discard(struct mmc_card *card)
+bool mmc_card_can_discard(struct mmc_card *card)
{
/*
* As there's no way to detect the discard support bit at v4.5
* use the s/w feature support filed.
*/
- if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
- return 1;
- return 0;
+ return (card->ext_csd.feature_support & MMC_DISCARD_FEATURE);
}
-EXPORT_SYMBOL(mmc_can_discard);
+EXPORT_SYMBOL(mmc_card_can_discard);
-int mmc_can_sanitize(struct mmc_card *card)
+bool mmc_card_can_sanitize(struct mmc_card *card)
{
- if (!mmc_can_trim(card) && !mmc_can_erase(card))
- return 0;
+ if (!mmc_card_can_trim(card) && !mmc_card_can_erase(card))
+ return false;
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
- return 1;
- return 0;
+ return true;
+ return false;
}
-int mmc_can_secure_erase_trim(struct mmc_card *card)
+bool mmc_card_can_secure_erase_trim(struct mmc_card *card)
{
- if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
- !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
- return 1;
- return 0;
+ return ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
+ !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN));
}
-EXPORT_SYMBOL(mmc_can_secure_erase_trim);
+EXPORT_SYMBOL(mmc_card_can_secure_erase_trim);
int mmc_erase_group_aligned(struct mmc_card *card, sector_t from,
unsigned int nr)
@@ -1987,7 +1979,7 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
return card->pref_erase;
max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
- if (mmc_can_trim(card)) {
+ if (mmc_card_can_trim(card)) {
max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
if (max_trim < max_discard || max_discard == 0)
max_discard = max_trim;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index fc9c066e6468..622085cd766f 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -118,11 +118,11 @@ bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq);
int mmc_erase(struct mmc_card *card, sector_t from, unsigned int nr, unsigned int arg);
-int mmc_can_erase(struct mmc_card *card);
-int mmc_can_trim(struct mmc_card *card);
-int mmc_can_discard(struct mmc_card *card);
-int mmc_can_sanitize(struct mmc_card *card);
-int mmc_can_secure_erase_trim(struct mmc_card *card);
+bool mmc_card_can_erase(struct mmc_card *card);
+bool mmc_card_can_trim(struct mmc_card *card);
+bool mmc_card_can_discard(struct mmc_card *card);
+bool mmc_card_can_sanitize(struct mmc_card *card);
+bool mmc_card_can_secure_erase_trim(struct mmc_card *card);
int mmc_erase_group_aligned(struct mmc_card *card, sector_t from, unsigned int nr);
unsigned int mmc_calc_max_discard(struct mmc_card *card);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index dacb5bd9bb71..f14671ea5716 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -212,7 +212,7 @@ out:
static void mmc_retune_timer(struct timer_list *t)
{
- struct mmc_host *host = from_timer(host, t, retune_timer);
+ struct mmc_host *host = timer_container_of(host, t, retune_timer);
mmc_retune_needed(host);
}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 48c4952512a5..5941d68ff989 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -39,22 +39,22 @@ static inline void mmc_retune_recheck(struct mmc_host *host)
host->retune_now = 1;
}
-static inline int mmc_host_cmd23(struct mmc_host *host)
+static inline int mmc_host_can_cmd23(struct mmc_host *host)
{
return host->caps & MMC_CAP_CMD23;
}
-static inline bool mmc_host_done_complete(struct mmc_host *host)
+static inline bool mmc_host_can_done_complete(struct mmc_host *host)
{
return host->caps & MMC_CAP_DONE_COMPLETE;
}
-static inline int mmc_boot_partition_access(struct mmc_host *host)
+static inline int mmc_host_can_access_boot(struct mmc_host *host)
{
return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
}
-static inline int mmc_host_uhs(struct mmc_host *host)
+static inline int mmc_host_can_uhs(struct mmc_host *host)
{
return host->caps &
(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 1522fd2b517d..5be9b42d5057 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -33,6 +33,12 @@
#define MIN_CACHE_EN_TIMEOUT_MS 1600
#define CACHE_FLUSH_TIMEOUT_MS 30000 /* 30s */
+enum mmc_poweroff_type {
+ MMC_POWEROFF_SUSPEND,
+ MMC_POWEROFF_SHUTDOWN,
+ MMC_POWEROFF_UNBIND,
+};
+
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
0, 0, 0, 0
@@ -453,7 +459,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
* There are two boot regions of equal size, defined in
* multiples of 128K.
*/
- if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
+ if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_host_can_access_boot(card->host)) {
for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
mmc_part_add(card, part_size,
@@ -572,7 +578,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
* RPMB regions are defined in multiples of 128K.
*/
card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
- if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
+ if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_can_cmd23(card->host)) {
mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
EXT_CSD_PART_CONFIG_ACC_RPMB,
"rpmb", 0, false,
@@ -674,7 +680,7 @@ static int mmc_read_ext_csd(struct mmc_card *card)
u8 *ext_csd;
int err;
- if (!mmc_can_ext_csd(card))
+ if (!mmc_card_can_ext_csd(card))
return 0;
err = mmc_get_ext_csd(card, &ext_csd);
@@ -953,7 +959,7 @@ static int mmc_select_powerclass(struct mmc_card *card)
int err, ddr;
/* Power class selection is supported for versions >= 4.0 */
- if (!mmc_can_ext_csd(card))
+ if (!mmc_card_can_ext_csd(card))
return 0;
bus_width = host->ios.bus_width;
@@ -1016,7 +1022,7 @@ static int mmc_select_bus_width(struct mmc_card *card)
unsigned idx, bus_width = 0;
int err = 0;
- if (!mmc_can_ext_csd(card) ||
+ if (!mmc_card_can_ext_csd(card) ||
!(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
return 0;
@@ -1537,7 +1543,7 @@ static int mmc_select_timing(struct mmc_card *card)
{
int err = 0;
- if (!mmc_can_ext_csd(card))
+ if (!mmc_card_can_ext_csd(card))
goto bus_speed;
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES) {
@@ -1798,9 +1804,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/* set erase_arg */
- if (mmc_can_discard(card))
+ if (mmc_card_can_discard(card))
card->erase_arg = MMC_DISCARD_ARG;
- else if (mmc_can_trim(card))
+ else if (mmc_card_can_trim(card))
card->erase_arg = MMC_TRIM_ARG;
else
card->erase_arg = MMC_ERASE_ARG;
@@ -1949,7 +1955,7 @@ err:
return err;
}
-static int mmc_can_sleep(struct mmc_card *card)
+static bool mmc_card_can_sleep(struct mmc_card *card)
{
return card->ext_csd.rev >= 3;
}
@@ -2007,13 +2013,26 @@ out_release:
return err;
}
-static int mmc_can_poweroff_notify(const struct mmc_card *card)
+static bool mmc_card_can_poweroff_notify(const struct mmc_card *card)
{
return card &&
mmc_card_mmc(card) &&
(card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
}
+static bool mmc_host_can_poweroff_notify(const struct mmc_host *host,
+ enum mmc_poweroff_type pm_type)
+{
+ if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE)
+ return true;
+
+ if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND &&
+ pm_type == MMC_POWEROFF_SUSPEND)
+ return true;
+
+ return pm_type == MMC_POWEROFF_SHUTDOWN;
+}
+
static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
{
unsigned int timeout = card->ext_csd.generic_cmd6_time;
@@ -2037,15 +2056,6 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
}
/*
- * Host is being removed. Free up the current card.
- */
-static void mmc_remove(struct mmc_host *host)
-{
- mmc_remove_card(host->card);
- host->card = NULL;
-}
-
-/*
* Card detection - card is alive.
*/
static int mmc_alive(struct mmc_host *host)
@@ -2070,7 +2080,8 @@ static void mmc_detect(struct mmc_host *host)
mmc_put_card(host->card, NULL);
if (err) {
- mmc_remove(host);
+ mmc_remove_card(host->card);
+ host->card = NULL;
mmc_claim_host(host);
mmc_detach_bus(host);
@@ -2108,11 +2119,13 @@ static int _mmc_flush_cache(struct mmc_host *host)
return err;
}
-static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
+static int _mmc_suspend(struct mmc_host *host, enum mmc_poweroff_type pm_type)
{
+ unsigned int notify_type = EXT_CSD_POWER_OFF_SHORT;
int err = 0;
- unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
- EXT_CSD_POWER_OFF_LONG;
+
+ if (pm_type == MMC_POWEROFF_SHUTDOWN)
+ notify_type = EXT_CSD_POWER_OFF_LONG;
mmc_claim_host(host);
@@ -2123,11 +2136,10 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
if (err)
goto out;
- if (mmc_can_poweroff_notify(host->card) &&
- ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend ||
- (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND)))
+ if (mmc_card_can_poweroff_notify(host->card) &&
+ mmc_host_can_poweroff_notify(host, pm_type))
err = mmc_poweroff_notify(host->card, notify_type);
- else if (mmc_can_sleep(host->card))
+ else if (mmc_card_can_sleep(host->card))
err = mmc_sleep(host);
else if (!mmc_host_is_spi(host))
err = mmc_deselect_cards(host);
@@ -2142,13 +2154,27 @@ out:
}
/*
+ * Host is being removed. Free up the current card and do a graceful power-off.
+ */
+static void mmc_remove(struct mmc_host *host)
+{
+ get_device(&host->card->dev);
+ mmc_remove_card(host->card);
+
+ _mmc_suspend(host, MMC_POWEROFF_UNBIND);
+
+ put_device(&host->card->dev);
+ host->card = NULL;
+}
+
+/*
* Suspend callback
*/
static int mmc_suspend(struct mmc_host *host)
{
int err;
- err = _mmc_suspend(host, true);
+ err = _mmc_suspend(host, MMC_POWEROFF_SUSPEND);
if (!err) {
pm_runtime_disable(&host->card->dev);
pm_runtime_set_suspended(&host->card->dev);
@@ -2187,15 +2213,16 @@ static int mmc_shutdown(struct mmc_host *host)
int err = 0;
/*
- * In a specific case for poweroff notify, we need to resume the card
- * before we can shutdown it properly.
+ * If the card remains suspended at this point and it was done by using
+ * the sleep-cmd (CMD5), we may need to re-initialize it first, to allow
+ * us to send the preferred poweroff-notification cmd at shutdown.
*/
- if (mmc_can_poweroff_notify(host->card) &&
- !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
+ if (mmc_card_can_poweroff_notify(host->card) &&
+ !mmc_host_can_poweroff_notify(host, MMC_POWEROFF_SUSPEND))
err = _mmc_resume(host);
if (!err)
- err = _mmc_suspend(host, false);
+ err = _mmc_suspend(host, MMC_POWEROFF_SHUTDOWN);
return err;
}
@@ -2219,7 +2246,7 @@ static int mmc_runtime_suspend(struct mmc_host *host)
if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
return 0;
- err = _mmc_suspend(host, true);
+ err = _mmc_suspend(host, MMC_POWEROFF_SUSPEND);
if (err)
pr_err("%s: error %d doing aggressive suspend\n",
mmc_hostname(host), err);
@@ -2242,14 +2269,12 @@ static int mmc_runtime_resume(struct mmc_host *host)
return 0;
}
-static int mmc_can_reset(struct mmc_card *card)
+static bool mmc_card_can_reset(struct mmc_card *card)
{
u8 rst_n_function;
rst_n_function = card->ext_csd.rst_n_function;
- if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
- return 0;
- return 1;
+ return ((rst_n_function & EXT_CSD_RST_N_EN_MASK) == EXT_CSD_RST_N_ENABLED);
}
static int _mmc_hw_reset(struct mmc_host *host)
@@ -2263,7 +2288,7 @@ static int _mmc_hw_reset(struct mmc_host *host)
_mmc_flush_cache(host);
if ((host->caps & MMC_CAP_HW_RESET) && host->ops->card_hw_reset &&
- mmc_can_reset(card)) {
+ mmc_card_can_reset(card)) {
/* If the card accept RST_n signal, send it. */
mmc_set_clock(host, host->f_init);
host->ops->card_hw_reset(host);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 5c8e62e8f331..66283825513c 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -383,7 +383,7 @@ int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
if (!card || !new_ext_csd)
return -EINVAL;
- if (!mmc_can_ext_csd(card))
+ if (!mmc_card_can_ext_csd(card))
return -EOPNOTSUPP;
/*
@@ -944,7 +944,7 @@ out:
return err;
}
-int mmc_can_ext_csd(struct mmc_card *card)
+bool mmc_card_can_ext_csd(struct mmc_card *card)
{
return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
}
@@ -1046,7 +1046,7 @@ int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
struct mmc_host *host = card->host;
int err;
- if (!mmc_can_sanitize(card)) {
+ if (!mmc_card_can_sanitize(card)) {
pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
return -EOPNOTSUPP;
}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 0df3ebd900d1..514c40ff4b4e 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -37,7 +37,7 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
-int mmc_can_ext_csd(struct mmc_card *card);
+bool mmc_card_can_ext_csd(struct mmc_card *card);
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
unsigned int timeout_ms);
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 4f4286b8e0f2..80e5d87a5e50 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -191,7 +191,7 @@ static void mmc_test_prepare_sbc(struct mmc_test_card *test,
{
struct mmc_card *card = test->card;
- if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
+ if (!mrq->sbc || !mmc_host_can_cmd23(card->host) ||
!mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
(card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
mrq->sbc = NULL;
@@ -1510,7 +1510,7 @@ static int mmc_test_area_erase(struct mmc_test_card *test)
{
struct mmc_test_area *t = &test->area;
- if (!mmc_can_erase(test->card))
+ if (!mmc_card_can_erase(test->card))
return 0;
return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
@@ -1746,10 +1746,10 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
struct timespec64 ts1, ts2;
int ret;
- if (!mmc_can_trim(test->card))
+ if (!mmc_card_can_trim(test->card))
return RESULT_UNSUP_CARD;
- if (!mmc_can_erase(test->card))
+ if (!mmc_card_can_erase(test->card))
return RESULT_UNSUP_HOST;
for (sz = 512; sz < t->max_sz; sz <<= 1) {
@@ -1863,10 +1863,10 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
struct timespec64 ts1, ts2;
int ret;
- if (!mmc_can_trim(test->card))
+ if (!mmc_card_can_trim(test->card))
return RESULT_UNSUP_CARD;
- if (!mmc_can_erase(test->card))
+ if (!mmc_card_can_erase(test->card))
return RESULT_UNSUP_HOST;
for (sz = 512; sz <= t->max_sz; sz <<= 1) {
@@ -2114,7 +2114,7 @@ static int mmc_test_rw_multiple(struct mmc_test_card *test,
return 0;
/* prepare test area */
- if (mmc_can_erase(test->card) &&
+ if (mmc_card_can_erase(test->card) &&
tdata->prepare & MMC_TEST_PREP_ERASE) {
ret = mmc_erase(test->card, dev_addr,
size / 512, test->card->erase_arg);
@@ -2390,7 +2390,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
512, write);
if (use_sbc && t->blocks > 1 && !mrq->sbc) {
- ret = mmc_host_cmd23(host) ?
+ ret = mmc_host_can_cmd23(host) ?
RESULT_UNSUP_CARD :
RESULT_UNSUP_HOST;
goto out_free;
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 3ba62f825b84..284856c8f655 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -184,9 +184,9 @@ static void mmc_queue_setup_discard(struct mmc_card *card,
return;
lim->max_hw_discard_sectors = max_discard;
- if (mmc_can_secure_erase_trim(card))
+ if (mmc_card_can_secure_erase_trim(card))
lim->max_secure_erase_sectors = max_discard;
- if (mmc_can_trim(card) && card->erased_byte == 0)
+ if (mmc_card_can_trim(card) && card->erased_byte == 0)
lim->max_write_zeroes_sectors = max_discard;
/* granularity must not be greater than max. discard */
@@ -352,7 +352,7 @@ static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq,
};
struct gendisk *disk;
- if (mmc_can_erase(card))
+ if (mmc_card_can_erase(card))
mmc_queue_setup_discard(card, &lim);
lim.max_hw_sectors = min(host->max_blk_count, host->max_req_size / 512);
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index 89b512905be1..7f893bafaa60 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -34,6 +34,16 @@ static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = {
MMC_QUIRK_BROKEN_SD_CACHE | MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY,
EXT_CSD_REV_ANY),
+ /*
+ * Swissbit series S46-u cards throw I/O errors during tuning requests
+ * after the initial tuning request expectedly times out. This has
+ * only been observed on cards manufactured on 01/2019 that are using
+ * Bay Trail host controllers.
+ */
+ _FIXUP_EXT("0016G", CID_MANFID_SWISSBIT, 0x5342, 2019, 1,
+ 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+ MMC_QUIRK_NO_UHS_DDR50_TUNING, EXT_CSD_REV_ANY),
+
END_FIXUP
};
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 8eba697d3d86..ec02067f03c5 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -455,7 +455,7 @@ static void sd_update_bus_speed_mode(struct mmc_card *card)
* If the host doesn't support any of the UHS-I modes, fallback on
* default speed.
*/
- if (!mmc_host_uhs(card->host)) {
+ if (!mmc_host_can_uhs(card->host)) {
card->sd_bus_speed = 0;
return;
}
@@ -618,6 +618,29 @@ static int sd_set_current_limit(struct mmc_card *card, u8 *status)
}
/*
+ * Determine if the card should tune or not.
+ */
+static bool mmc_sd_use_tuning(struct mmc_card *card)
+{
+ /*
+ * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and
+ * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
+ */
+ if (mmc_host_is_spi(card->host))
+ return false;
+
+ switch (card->host->ios.timing) {
+ case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_SDR104:
+ return true;
+ case MMC_TIMING_UHS_DDR50:
+ return !mmc_card_no_uhs_ddr50_tuning(card);
+ }
+
+ return false;
+}
+
+/*
* UHS-I specific initialization procedure
*/
static int mmc_sd_init_uhs_card(struct mmc_card *card)
@@ -660,14 +683,7 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
if (err)
goto out;
- /*
- * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and
- * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
- */
- if (!mmc_host_is_spi(card->host) &&
- (card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
- card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
- card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
+ if (mmc_sd_use_tuning(card)) {
err = mmc_execute_tuning(card);
/*
@@ -851,7 +867,7 @@ try_again:
* to switch to 1.8V signaling level. If the card has failed
* repeatedly to switch however, skip this.
*/
- if (retries && mmc_host_uhs(host))
+ if (retries && mmc_host_can_uhs(host))
ocr |= SD_OCR_S18R;
/*
@@ -1493,7 +1509,7 @@ retry:
* signaling. Detect that situation and try to initialize a UHS-I (1.8V)
* transfer mode.
*/
- if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) &&
+ if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_can_uhs(host) &&
mmc_sd_card_using_v18(card) &&
host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
if (mmc_host_set_uhs_voltage(host) ||
@@ -1508,7 +1524,7 @@ retry:
}
/* Initialization sequence for UHS-I cards */
- if (rocr & SD_ROCR_S18A && mmc_host_uhs(host)) {
+ if (rocr & SD_ROCR_S18A && mmc_host_can_uhs(host)) {
err = mmc_sd_init_uhs_card(card);
if (err)
goto free_card;
@@ -1597,15 +1613,6 @@ free_card:
}
/*
- * Host is being removed. Free up the current card.
- */
-static void mmc_sd_remove(struct mmc_host *host)
-{
- mmc_remove_card(host->card);
- host->card = NULL;
-}
-
-/*
* Card detection - card is alive.
*/
static int mmc_sd_alive(struct mmc_host *host)
@@ -1630,7 +1637,8 @@ static void mmc_sd_detect(struct mmc_host *host)
mmc_put_card(host->card, NULL);
if (err) {
- mmc_sd_remove(host);
+ mmc_remove_card(host->card);
+ host->card = NULL;
mmc_claim_host(host);
mmc_detach_bus(host);
@@ -1731,6 +1739,19 @@ out:
}
/*
+ * Host is being removed. Free up the current card and do a graceful power-off.
+ */
+static void mmc_sd_remove(struct mmc_host *host)
+{
+ get_device(&host->card->dev);
+ mmc_remove_card(host->card);
+
+ _mmc_sd_suspend(host);
+
+ put_device(&host->card->dev);
+ host->card = NULL;
+}
+/*
* Callback for suspend
*/
static int mmc_sd_suspend(struct mmc_host *host)
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 4b19b8a16b09..0f753367aec1 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -198,7 +198,7 @@ static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
if (ret)
goto out;
- if (mmc_host_uhs(card->host)) {
+ if (mmc_host_can_uhs(card->host)) {
if (data & SDIO_UHS_DDR50)
card->sw_caps.sd3_bus_mode
|= SD_MODE_UHS_DDR50 | SD_MODE_UHS_SDR50
@@ -527,7 +527,7 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card)
* If the host doesn't support any of the UHS-I modes, fallback on
* default speed.
*/
- if (!mmc_host_uhs(card->host))
+ if (!mmc_host_can_uhs(card->host))
return 0;
bus_speed = SDIO_SPEED_SDR12;
@@ -669,7 +669,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
WARN_ON(!host->claimed);
/* to query card if 1.8V signalling is supported */
- if (mmc_host_uhs(host))
+ if (mmc_host_can_uhs(host))
ocr |= R4_18V_PRESENT;
try_again:
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 5fd455816393..c5bc6268803e 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -228,13 +228,13 @@ int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config)
}
EXPORT_SYMBOL(mmc_gpiod_set_cd_config);
-bool mmc_can_gpio_cd(struct mmc_host *host)
+bool mmc_host_can_gpio_cd(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
return ctx->cd_gpio ? true : false;
}
-EXPORT_SYMBOL(mmc_can_gpio_cd);
+EXPORT_SYMBOL(mmc_host_can_gpio_cd);
/**
* mmc_gpiod_request_ro - request a gpio descriptor for write protection
@@ -275,10 +275,10 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
}
EXPORT_SYMBOL(mmc_gpiod_request_ro);
-bool mmc_can_gpio_ro(struct mmc_host *host)
+bool mmc_host_can_gpio_ro(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
return ctx->ro_gpio ? true : false;
}
-EXPORT_SYMBOL(mmc_can_gpio_ro);
+EXPORT_SYMBOL(mmc_host_can_gpio_ro);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 264e11fa58ea..c3f0f41a426d 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -250,6 +250,20 @@ config MMC_SDHCI_OF_DWCMSHC
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+config MMC_SDHCI_OF_K1
+ tristate "SDHCI OF support for the SpacemiT K1 SoC"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ depends on MMC_SDHCI_PLTFM
+ depends on OF
+ depends on COMMON_CLK
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ found in the SpacemiT K1 SoC.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_OF_SPARX5
tristate "SDHCI OF support for the MCHP Sparx5 SoC"
depends on MMC_SDHCI_PLTFM
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 5147467ec825..75bafc7b162b 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -88,6 +88,7 @@ obj-$(CONFIG_MMC_SDHCI_OF_AT91) += sdhci-of-at91.o
obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
obj-$(CONFIG_MMC_SDHCI_OF_DWCMSHC) += sdhci-of-dwcmshc.o
+obj-$(CONFIG_MMC_SDHCI_OF_K1) += sdhci-of-k1.o
obj-$(CONFIG_MMC_SDHCI_OF_SPARX5) += sdhci-of-sparx5.o
obj-$(CONFIG_MMC_SDHCI_OF_MA35D1) += sdhci-of-ma35d1.o
obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
index b6b6dd677ae5..24abd3a93da9 100644
--- a/drivers/mmc/host/alcor.c
+++ b/drivers/mmc/host/alcor.c
@@ -20,6 +20,7 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
@@ -208,7 +209,7 @@ static void alcor_trf_block_pio(struct alcor_sdmmc_host *host, bool read)
len = min(host->sg_miter.length, blksize);
dev_dbg(host->dev, "PIO, %s block size: 0x%zx\n",
- read ? "read" : "write", blksize);
+ str_read_write(read), blksize);
host->sg_miter.consumed = len;
host->blocks--;
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 14e981b834b6..0e0666c0bb6e 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -714,7 +714,7 @@ static inline unsigned int atmci_convert_chksize(struct atmel_mci *host,
static void atmci_timeout_timer(struct timer_list *t)
{
- struct atmel_mci *host = from_timer(host, t, timer);
+ struct atmel_mci *host = timer_container_of(host, t, timer);
struct device *dev = host->dev;
dev_dbg(dev, "software timeout\n");
@@ -1652,7 +1652,8 @@ static void atmci_command_complete(struct atmel_mci *host,
static void atmci_detect_change(struct timer_list *t)
{
- struct atmel_mci_slot *slot = from_timer(slot, t, detect_timer);
+ struct atmel_mci_slot *slot = timer_container_of(slot, t,
+ detect_timer);
bool present;
bool present_old;
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index e5f151d092cd..def054ddd256 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -44,6 +44,7 @@
#include <linux/scatterlist.h>
#include <linux/time.h>
#include <linux/workqueue.h>
+#include <linux/string_choices.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
@@ -391,8 +392,7 @@ static void bcm2835_transfer_block_pio(struct bcm2835_host *host, bool is_read)
if (time_after(jiffies, wait_max)) {
dev_err(dev, "PIO %s timeout - EDM %08x\n",
- is_read ? "read" : "write",
- edm);
+ str_read_write(is_read), edm);
hsts = SDHSTS_REW_TIME_OUT;
break;
}
@@ -435,12 +435,12 @@ static void bcm2835_transfer_pio(struct bcm2835_host *host)
SDHSTS_CRC7_ERROR |
SDHSTS_FIFO_ERROR)) {
dev_err(dev, "%s transfer error - HSTS %08x\n",
- is_read ? "read" : "write", sdhsts);
+ str_read_write(is_read), sdhsts);
host->data->error = -EILSEQ;
} else if ((sdhsts & (SDHSTS_CMD_TIME_OUT |
SDHSTS_REW_TIME_OUT))) {
dev_err(dev, "%s timeout error - HSTS %08x\n",
- is_read ? "read" : "write", sdhsts);
+ str_read_write(is_read), sdhsts);
host->data->error = -ETIMEDOUT;
}
}
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index 2e2ff984f0b3..1373deb3f531 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -72,7 +72,7 @@ static int thunder_mmc_probe(struct pci_dev *pdev,
if (ret)
return ret;
- ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ ret = pcim_request_all_regions(pdev, KBUILD_MODNAME);
if (ret)
return ret;
@@ -164,7 +164,6 @@ error:
}
}
clk_disable_unprepare(host->clk);
- pci_release_regions(pdev);
return ret;
}
@@ -183,7 +182,6 @@ static void thunder_mmc_remove(struct pci_dev *pdev)
writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
clk_disable_unprepare(host->clk);
- pci_release_regions(pdev);
}
static const struct pci_device_id thunder_mmc_id_table[] = {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 578290015e5b..988492237707 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -3179,7 +3179,7 @@ no_dma:
static void dw_mci_cmd11_timer(struct timer_list *t)
{
- struct dw_mci *host = from_timer(host, t, cmd11_timer);
+ struct dw_mci *host = timer_container_of(host, t, cmd11_timer);
if (host->state != STATE_SENDING_CMD11) {
dev_warn(host->dev, "Unexpected CMD11 timeout\n");
@@ -3193,7 +3193,7 @@ static void dw_mci_cmd11_timer(struct timer_list *t)
static void dw_mci_cto_timer(struct timer_list *t)
{
- struct dw_mci *host = from_timer(host, t, cto_timer);
+ struct dw_mci *host = timer_container_of(host, t, cto_timer);
unsigned long irqflags;
u32 pending;
@@ -3248,7 +3248,7 @@ exit:
static void dw_mci_dto_timer(struct timer_list *t)
{
- struct dw_mci *host = from_timer(host, t, dto_timer);
+ struct dw_mci *host = timer_container_of(host, t, dto_timer);
unsigned long irqflags;
u32 pending;
@@ -3622,7 +3622,7 @@ int dw_mci_runtime_suspend(struct device *dev)
clk_disable_unprepare(host->ciu_clk);
if (host->slot &&
- (mmc_can_gpio_cd(host->slot->mmc) ||
+ (mmc_host_can_gpio_cd(host->slot->mmc) ||
!mmc_card_is_removable(host->slot->mmc)))
clk_disable_unprepare(host->biu_clk);
@@ -3636,7 +3636,7 @@ int dw_mci_runtime_resume(struct device *dev)
struct dw_mci *host = dev_get_drvdata(dev);
if (host->slot &&
- (mmc_can_gpio_cd(host->slot->mmc) ||
+ (mmc_host_can_gpio_cd(host->slot->mmc) ||
!mmc_card_is_removable(host->slot->mmc))) {
ret = clk_prepare_enable(host->biu_clk);
if (ret)
@@ -3690,7 +3690,7 @@ int dw_mci_runtime_resume(struct device *dev)
err:
if (host->slot &&
- (mmc_can_gpio_cd(host->slot->mmc) ||
+ (mmc_host_can_gpio_cd(host->slot->mmc) ||
!mmc_card_is_removable(host->slot->mmc)))
clk_disable_unprepare(host->biu_clk);
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index bd1662e275d4..0fbbf57db52e 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -641,7 +641,8 @@ poll_timeout:
static void jz4740_mmc_timeout(struct timer_list *t)
{
- struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
+ struct jz4740_mmc_host *host = timer_container_of(host, t,
+ timeout_timer);
if (!test_and_clear_bit(0, &host->waiting))
return;
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index e0ae5a0c9670..8a49c32fd3f9 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -467,7 +467,8 @@ static irqreturn_t meson_mx_mmc_irq_thread(int irq, void *irq_data)
static void meson_mx_mmc_timeout(struct timer_list *t)
{
- struct meson_mx_mmc_host *host = from_timer(host, t, cmd_timeout);
+ struct meson_mx_mmc_host *host = timer_container_of(host, t,
+ cmd_timeout);
unsigned long irqflags;
u32 irqc;
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 345ea91629e0..31eb90536bce 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -5,6 +5,7 @@
*/
#include <linux/module.h>
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -83,6 +84,7 @@
#define EMMC51_CFG0 0x204
#define EMMC50_CFG0 0x208
#define EMMC50_CFG1 0x20c
+#define EMMC50_CFG2 0x21c
#define EMMC50_CFG3 0x220
#define SDC_FIFO_CFG 0x228
#define CQHCI_SETTING 0x7fc
@@ -233,7 +235,9 @@
/* MSDC_PATCH_BIT mask */
#define MSDC_PATCH_BIT_ODDSUPP BIT(1) /* RW */
+#define MSDC_PATCH_BIT_DIS_WRMON BIT(2) /* RW */
#define MSDC_PATCH_BIT_RD_DAT_SEL BIT(3) /* RW */
+#define MSDC_PATCH_BIT_DESCUP_SEL BIT(6) /* RW */
#define MSDC_INT_DAT_LATCH_CK_SEL GENMASK(9, 7)
#define MSDC_CKGEN_MSDC_DLY_SEL GENMASK(14, 10)
#define MSDC_PATCH_BIT_IODSSEL BIT(16) /* RW */
@@ -246,10 +250,22 @@
#define MSDC_PATCH_BIT_SPCPUSH BIT(29) /* RW */
#define MSDC_PATCH_BIT_DECRCTMO BIT(30) /* RW */
-#define MSDC_PATCH_BIT1_CMDTA GENMASK(5, 3) /* RW */
+/* MSDC_PATCH_BIT1 mask */
+#define MSDC_PB1_WRDAT_CRC_TACNTR GENMASK(2, 0) /* RW */
+#define MSDC_PATCH_BIT1_CMDTA GENMASK(5, 3) /* RW */
#define MSDC_PB1_BUSY_CHECK_SEL BIT(7) /* RW */
#define MSDC_PATCH_BIT1_STOP_DLY GENMASK(11, 8) /* RW */
-
+#define MSDC_PB1_DDR_CMD_FIX_SEL BIT(14) /* RW */
+#define MSDC_PB1_SINGLE_BURST BIT(16) /* RW */
+#define MSDC_PB1_RSVD20 GENMASK(18, 17) /* RW */
+#define MSDC_PB1_AUTO_SYNCST_CLR BIT(19) /* RW */
+#define MSDC_PB1_MARK_POP_WATER BIT(20) /* RW */
+#define MSDC_PB1_LP_DCM_EN BIT(21) /* RW */
+#define MSDC_PB1_RSVD3 BIT(22) /* RW */
+#define MSDC_PB1_AHB_GDMA_HCLK BIT(23) /* RW */
+#define MSDC_PB1_MSDC_CLK_ENFEAT GENMASK(31, 24) /* RW */
+
+/* MSDC_PATCH_BIT2 mask */
#define MSDC_PATCH_BIT2_CFGRESP BIT(15) /* RW */
#define MSDC_PATCH_BIT2_CFGCRCSTS BIT(28) /* RW */
#define MSDC_PB2_SUPPORT_64G BIT(1) /* RW */
@@ -291,7 +307,10 @@
/* EMMC50_CFG1 mask */
#define EMMC50_CFG1_DS_CFG BIT(28) /* RW */
-#define EMMC50_CFG3_OUTS_WR GENMASK(4, 0) /* RW */
+/* EMMC50_CFG2 mask */
+#define EMMC50_CFG2_AXI_SET_LEN GENMASK(27, 24) /* RW */
+
+#define EMMC50_CFG3_OUTS_WR GENMASK(4, 0) /* RW */
#define SDC_FIFO_CFG_WRVALIDSEL BIT(24) /* RW */
#define SDC_FIFO_CFG_RDVALIDSEL BIT(25) /* RW */
@@ -927,15 +946,15 @@ static int msdc_ungate_clock(struct msdc_host *host)
static void msdc_new_tx_setting(struct msdc_host *host)
{
+ u32 val;
+
if (!host->top_base)
return;
- sdr_set_bits(host->top_base + LOOP_TEST_CONTROL,
- TEST_LOOP_DSCLK_MUX_SEL);
- sdr_set_bits(host->top_base + LOOP_TEST_CONTROL,
- TEST_LOOP_LATCH_MUX_SEL);
- sdr_clr_bits(host->top_base + LOOP_TEST_CONTROL,
- TEST_HS400_CMD_LOOP_MUX_SEL);
+ val = readl(host->top_base + LOOP_TEST_CONTROL);
+ val |= TEST_LOOP_DSCLK_MUX_SEL;
+ val |= TEST_LOOP_LATCH_MUX_SEL;
+ val &= ~TEST_HS400_CMD_LOOP_MUX_SEL;
switch (host->timing) {
case MMC_TIMING_LEGACY:
@@ -945,19 +964,18 @@ static void msdc_new_tx_setting(struct msdc_host *host)
case MMC_TIMING_UHS_SDR25:
case MMC_TIMING_UHS_DDR50:
case MMC_TIMING_MMC_DDR52:
- sdr_clr_bits(host->top_base + LOOP_TEST_CONTROL,
- LOOP_EN_SEL_CLK);
+ val &= ~LOOP_EN_SEL_CLK;
break;
case MMC_TIMING_UHS_SDR50:
case MMC_TIMING_UHS_SDR104:
case MMC_TIMING_MMC_HS200:
case MMC_TIMING_MMC_HS400:
- sdr_set_bits(host->top_base + LOOP_TEST_CONTROL,
- LOOP_EN_SEL_CLK);
+ val |= LOOP_EN_SEL_CLK;
break;
default:
break;
}
+ writel(val, host->top_base + LOOP_TEST_CONTROL);
}
static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
@@ -1816,7 +1834,7 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
static void msdc_init_hw(struct msdc_host *host)
{
- u32 val;
+ u32 val, pb1_val, pb2_val;
u32 tune_reg = host->dev_comp->pad_tune_reg;
struct mmc_host *mmc = mmc_from_priv(host);
@@ -1869,71 +1887,115 @@ static void msdc_init_hw(struct msdc_host *host)
}
writel(0, host->base + MSDC_IOCON);
sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
- writel(0x403c0046, host->base + MSDC_PATCH_BIT);
- sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1);
- writel(0xffff4089, host->base + MSDC_PATCH_BIT1);
- sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
+
+ /*
+ * Patch bit 0 and 1 are completely rewritten, but for patch bit 2
+ * defaults are retained and, if necessary, only some bits are fixed
+ * up: read the PB2 register here for later usage in this function.
+ */
+ pb2_val = readl(host->base + MSDC_PATCH_BIT2);
+
+ /* Enable odd number support for 8-bit data bus */
+ val = MSDC_PATCH_BIT_ODDSUPP;
+
+ /* Disable SD command register write monitor */
+ val |= MSDC_PATCH_BIT_DIS_WRMON;
+
+ /* Issue transfer done interrupt after GPD update */
+ val |= MSDC_PATCH_BIT_DESCUP_SEL;
+
+ /* Extend R1B busy detection delay (in clock cycles) */
+ val |= FIELD_PREP(MSDC_PATCH_BIT_BUSYDLY, 15);
+
+ /* Enable CRC phase timeout during data write operation */
+ val |= MSDC_PATCH_BIT_DECRCTMO;
+
+ /* Set CKGEN delay to one stage */
+ val |= FIELD_PREP(MSDC_CKGEN_MSDC_DLY_SEL, 1);
+
+ /* First MSDC_PATCH_BIT setup is done: pull the trigger! */
+ writel(val, host->base + MSDC_PATCH_BIT);
+
+ /* Set wr data, crc status, cmd response turnaround period for UHS104 */
+ pb1_val = FIELD_PREP(MSDC_PB1_WRDAT_CRC_TACNTR, 1);
+ pb1_val |= FIELD_PREP(MSDC_PATCH_BIT1_CMDTA, 1);
+ pb1_val |= MSDC_PB1_DDR_CMD_FIX_SEL;
+
+ /* Support 'single' burst type only when AXI_LEN is 0 */
+ sdr_get_field(host->base + EMMC50_CFG2, EMMC50_CFG2_AXI_SET_LEN, &val);
+ if (!val)
+ pb1_val |= MSDC_PB1_SINGLE_BURST;
+
+ /* Set auto sync state clear, block gap stop clk */
+ pb1_val |= MSDC_PB1_RSVD20 | MSDC_PB1_AUTO_SYNCST_CLR | MSDC_PB1_MARK_POP_WATER;
+
+ /* Set low power DCM, use HCLK for GDMA, use MSDC CLK for everything else */
+ pb1_val |= MSDC_PB1_LP_DCM_EN | MSDC_PB1_RSVD3 |
+ MSDC_PB1_AHB_GDMA_HCLK | MSDC_PB1_MSDC_CLK_ENFEAT;
+
+ /* If needed, enable R1b command busy check at controller init time */
+ if (!host->dev_comp->busy_check)
+ pb1_val |= MSDC_PB1_BUSY_CHECK_SEL;
if (host->dev_comp->stop_clk_fix) {
if (host->dev_comp->stop_dly_sel)
- sdr_set_field(host->base + MSDC_PATCH_BIT1,
- MSDC_PATCH_BIT1_STOP_DLY,
- host->dev_comp->stop_dly_sel);
+ pb1_val |= FIELD_PREP(MSDC_PATCH_BIT1_STOP_DLY,
+ host->dev_comp->stop_dly_sel);
- if (host->dev_comp->pop_en_cnt)
- sdr_set_field(host->base + MSDC_PATCH_BIT2,
- MSDC_PB2_POP_EN_CNT,
- host->dev_comp->pop_en_cnt);
+ if (host->dev_comp->pop_en_cnt) {
+ pb2_val &= ~MSDC_PB2_POP_EN_CNT;
+ pb2_val |= FIELD_PREP(MSDC_PB2_POP_EN_CNT,
+ host->dev_comp->pop_en_cnt);
+ }
- sdr_clr_bits(host->base + SDC_FIFO_CFG,
- SDC_FIFO_CFG_WRVALIDSEL);
- sdr_clr_bits(host->base + SDC_FIFO_CFG,
- SDC_FIFO_CFG_RDVALIDSEL);
+ sdr_clr_bits(host->base + SDC_FIFO_CFG, SDC_FIFO_CFG_WRVALIDSEL);
+ sdr_clr_bits(host->base + SDC_FIFO_CFG, SDC_FIFO_CFG_RDVALIDSEL);
}
- if (host->dev_comp->busy_check)
- sdr_clr_bits(host->base + MSDC_PATCH_BIT1, BIT(7));
-
if (host->dev_comp->async_fifo) {
- sdr_set_field(host->base + MSDC_PATCH_BIT2,
- MSDC_PB2_RESPWAIT, 3);
- if (host->dev_comp->enhance_rx) {
- if (host->top_base)
- sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
- SDC_RX_ENH_EN);
- else
- sdr_set_bits(host->base + SDC_ADV_CFG0,
- SDC_RX_ENHANCE_EN);
+ /* Set CMD response timeout multiplier to 65 + (16 * 3) cycles */
+ pb2_val &= ~MSDC_PB2_RESPWAIT;
+ pb2_val |= FIELD_PREP(MSDC_PB2_RESPWAIT, 3);
+
+ /* eMMC4.5: Select async FIFO path for CMD resp and CRC status */
+ pb2_val &= ~MSDC_PATCH_BIT2_CFGRESP;
+ pb2_val |= MSDC_PATCH_BIT2_CFGCRCSTS;
+
+ if (!host->dev_comp->enhance_rx) {
+ /* eMMC4.5: Delay 2T for CMD resp and CRC status EN signals */
+ pb2_val &= ~(MSDC_PB2_RESPSTSENSEL | MSDC_PB2_CRCSTSENSEL);
+ pb2_val |= FIELD_PREP(MSDC_PB2_RESPSTSENSEL, 2);
+ pb2_val |= FIELD_PREP(MSDC_PB2_CRCSTSENSEL, 2);
+ } else if (host->top_base) {
+ sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, SDC_RX_ENH_EN);
} else {
- sdr_set_field(host->base + MSDC_PATCH_BIT2,
- MSDC_PB2_RESPSTSENSEL, 2);
- sdr_set_field(host->base + MSDC_PATCH_BIT2,
- MSDC_PB2_CRCSTSENSEL, 2);
+ sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_RX_ENHANCE_EN);
}
- /* use async fifo, then no need tune internal delay */
- sdr_clr_bits(host->base + MSDC_PATCH_BIT2,
- MSDC_PATCH_BIT2_CFGRESP);
- sdr_set_bits(host->base + MSDC_PATCH_BIT2,
- MSDC_PATCH_BIT2_CFGCRCSTS);
}
if (host->dev_comp->support_64g)
- sdr_set_bits(host->base + MSDC_PATCH_BIT2,
- MSDC_PB2_SUPPORT_64G);
+ pb2_val |= MSDC_PB2_SUPPORT_64G;
+
+ /* Patch Bit 1/2 setup is done: pull the trigger! */
+ writel(pb1_val, host->base + MSDC_PATCH_BIT1);
+ writel(pb2_val, host->base + MSDC_PATCH_BIT2);
+ sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
+
if (host->dev_comp->data_tune) {
if (host->top_base) {
- sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
- PAD_DAT_RD_RXDLY_SEL);
- sdr_clr_bits(host->top_base + EMMC_TOP_CONTROL,
- DATA_K_VALUE_SEL);
- sdr_set_bits(host->top_base + EMMC_TOP_CMD,
- PAD_CMD_RD_RXDLY_SEL);
+ u32 top_ctl_val = readl(host->top_base + EMMC_TOP_CONTROL);
+ u32 top_cmd_val = readl(host->top_base + EMMC_TOP_CMD);
+
+ top_cmd_val |= PAD_CMD_RD_RXDLY_SEL;
+ top_ctl_val |= PAD_DAT_RD_RXDLY_SEL;
+ top_ctl_val &= ~DATA_K_VALUE_SEL;
if (host->tuning_step > PAD_DELAY_HALF) {
- sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
- PAD_DAT_RD_RXDLY2_SEL);
- sdr_set_bits(host->top_base + EMMC_TOP_CMD,
- PAD_CMD_RD_RXDLY2_SEL);
+ top_cmd_val |= PAD_CMD_RD_RXDLY2_SEL;
+ top_ctl_val |= PAD_DAT_RD_RXDLY2_SEL;
}
+
+ writel(top_ctl_val, host->top_base + EMMC_TOP_CONTROL);
+ writel(top_cmd_val, host->top_base + EMMC_TOP_CMD);
} else {
sdr_set_bits(host->base + tune_reg,
MSDC_PAD_TUNE_RD_SEL |
@@ -2143,15 +2205,17 @@ static inline void msdc_set_cmd_delay(struct msdc_host *host, u32 value)
u32 tune_reg = host->dev_comp->pad_tune_reg;
if (host->top_base) {
+ u32 regval = readl(host->top_base + EMMC_TOP_CMD);
+
+ regval &= ~(PAD_CMD_RXDLY | PAD_CMD_RXDLY2);
+
if (value < PAD_DELAY_HALF) {
- sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, value);
- sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY2, 0);
+ regval |= FIELD_PREP(PAD_CMD_RXDLY, value);
} else {
- sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY,
- PAD_DELAY_HALF - 1);
- sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY2,
- value - PAD_DELAY_HALF);
+ regval |= FIELD_PREP(PAD_CMD_RXDLY, PAD_DELAY_HALF - 1);
+ regval |= FIELD_PREP(PAD_CMD_RXDLY2, value - PAD_DELAY_HALF);
}
+ writel(regval, host->top_base + EMMC_TOP_CMD);
} else {
if (value < PAD_DELAY_HALF) {
sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, value);
@@ -2171,17 +2235,18 @@ static inline void msdc_set_data_delay(struct msdc_host *host, u32 value)
u32 tune_reg = host->dev_comp->pad_tune_reg;
if (host->top_base) {
+ u32 regval = readl(host->top_base + EMMC_TOP_CONTROL);
+
+ regval &= ~(PAD_DAT_RD_RXDLY | PAD_DAT_RD_RXDLY2);
+
if (value < PAD_DELAY_HALF) {
- sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
- PAD_DAT_RD_RXDLY, value);
- sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
- PAD_DAT_RD_RXDLY2, 0);
+ regval |= FIELD_PREP(PAD_DAT_RD_RXDLY, value);
+ regval |= FIELD_PREP(PAD_DAT_RD_RXDLY2, value);
} else {
- sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
- PAD_DAT_RD_RXDLY, PAD_DELAY_HALF - 1);
- sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
- PAD_DAT_RD_RXDLY2, value - PAD_DELAY_HALF);
+ regval |= FIELD_PREP(PAD_DAT_RD_RXDLY, PAD_DELAY_HALF - 1);
+ regval |= FIELD_PREP(PAD_DAT_RD_RXDLY2, value - PAD_DELAY_HALF);
}
+ writel(regval, host->top_base + EMMC_TOP_CONTROL);
} else {
if (value < PAD_DELAY_HALF) {
sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, value);
@@ -2977,7 +3042,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095);
if (!(mmc->caps & MMC_CAP_NONREMOVABLE) &&
- !mmc_can_gpio_cd(mmc) &&
+ !mmc_host_can_gpio_cd(mmc) &&
host->dev_comp->use_internal_cd) {
/*
* Is removable but no GPIO declared, so
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 912ffacbad88..101f36de7b63 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -509,7 +509,7 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
static void mvsd_timeout_timer(struct timer_list *t)
{
- struct mvsd_host *host = from_timer(host, t, timer);
+ struct mvsd_host *host = timer_container_of(host, t, timer);
void __iomem *iobase = host->base;
struct mmc_request *mrq;
unsigned long flags;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 95d8d40a06a8..e588e24256cc 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -955,7 +955,7 @@ static bool filter(struct dma_chan *chan, void *param)
static void mxcmci_watchdog(struct timer_list *t)
{
- struct mxcmci_host *host = from_timer(host, t, watchdog);
+ struct mxcmci_host *host = timer_container_of(host, t, watchdog);
struct mmc_request *req = host->req;
unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS);
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index c50617d03709..c2be0f04439d 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -639,7 +639,8 @@ static void mmc_omap_abort_command(struct work_struct *work)
static void
mmc_omap_cmd_timer(struct timer_list *t)
{
- struct mmc_omap_host *host = from_timer(host, t, cmd_abort_timer);
+ struct mmc_omap_host *host = timer_container_of(host, t,
+ cmd_abort_timer);
unsigned long flags;
spin_lock_irqsave(&host->slot_lock, flags);
@@ -655,7 +656,7 @@ mmc_omap_cmd_timer(struct timer_list *t)
static void
mmc_omap_clk_timer(struct timer_list *t)
{
- struct mmc_omap_host *host = from_timer(host, t, clk_timer);
+ struct mmc_omap_host *host = timer_container_of(host, t, clk_timer);
mmc_omap_fclk_enable(host, 0);
}
@@ -879,7 +880,7 @@ void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
static void mmc_omap_cover_timer(struct timer_list *t)
{
- struct mmc_omap_slot *slot = from_timer(slot, t, cover_timer);
+ struct mmc_omap_slot *slot = timer_container_of(slot, t, cover_timer);
queue_work(system_bh_wq, &slot->cover_bh_work);
}
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 8c83e203c516..e6fa3ed42560 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -1112,7 +1112,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
/* For some SoC, we disable internal WP. GPIO may override this */
- if (mmc_can_gpio_ro(host->mmc))
+ if (mmc_host_can_gpio_ro(host->mmc))
mmc_data->capabilities2 &= ~MMC_CAP2_NO_WRITE_PROTECT;
/* SDR speeds are only available on Gen2+ */
@@ -1166,12 +1166,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (ret)
goto efree;
- rcfg.of_node = of_get_child_by_name(dev->of_node, "vqmmc-regulator");
- if (!of_device_is_available(rcfg.of_node)) {
- of_node_put(rcfg.of_node);
- rcfg.of_node = NULL;
- }
-
+ rcfg.of_node = of_get_available_child_by_name(dev->of_node, "vqmmc-regulator");
if (rcfg.of_node) {
rcfg.driver_data = priv->host;
rdev = devm_regulator_register(dev, &renesas_sdhi_vqmmc_regulator, &rcfg);
@@ -1240,15 +1235,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
- num_irqs = platform_irq_count(pdev);
- if (num_irqs < 0) {
- ret = num_irqs;
- goto edisclk;
- }
-
/* There must be at least one IRQ source */
- if (!num_irqs) {
- ret = -ENXIO;
+ num_irqs = platform_irq_count(pdev);
+ if (num_irqs <= 0) {
+ ret = num_irqs ?: -ENOENT;
goto edisclk;
}
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index ff78a7c6a04c..ac187a8798b7 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -31,7 +31,9 @@
#include "cqhci.h"
#define ESDHC_SYS_CTRL_DTOCV_MASK GENMASK(19, 16)
+#define ESDHC_SYS_CTRL_RST_FIFO BIT(22)
#define ESDHC_SYS_CTRL_IPP_RST_N BIT(23)
+#define ESDHC_SYS_CTRL_RESET_TUNING BIT(28)
#define ESDHC_CTRL_D3CD 0x08
#define ESDHC_BURST_LEN_EN_INCR (1 << 27)
/* VENDOR SPEC register */
@@ -81,7 +83,11 @@
#define ESDHC_TUNE_CTRL_STEP 1
#define ESDHC_TUNE_CTRL_MIN 0
#define ESDHC_TUNE_CTRL_MAX ((1 << 7) - 1)
-
+#define ESDHC_TUNE_CTRL_STATUS_TAP_SEL_MASK GENMASK(30, 16)
+#define ESDHC_TUNE_CTRL_STATUS_TAP_SEL_PRE_MASK GENMASK(30, 24)
+#define ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_PRE_MASK GENMASK(14, 8)
+#define ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_OUT_MASK GENMASK(7, 4)
+#define ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_POST_MASK GENMASK(3, 0)
/* strobe dll register */
#define ESDHC_STROBE_DLL_CTRL 0x70
#define ESDHC_STROBE_DLL_CTRL_ENABLE (1 << 0)
@@ -104,6 +110,7 @@
#define ESDHC_TUNING_CTRL 0xcc
#define ESDHC_STD_TUNING_EN (1 << 24)
+#define ESDHC_TUNING_WINDOW_MASK GENMASK(22, 20)
/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
#define ESDHC_TUNING_START_TAP_DEFAULT 0x1
#define ESDHC_TUNING_START_TAP_MASK 0x7f
@@ -205,6 +212,8 @@
/* The IP does not have GPIO CD wake capabilities */
#define ESDHC_FLAG_SKIP_CD_WAKE BIT(18)
+#define ESDHC_AUTO_TUNING_WINDOW 3
+
enum wp_types {
ESDHC_WP_NONE, /* no WP, neither controller nor gpio */
ESDHC_WP_CONTROLLER, /* mmc controller internal WP */
@@ -235,6 +244,8 @@ struct esdhc_platform_data {
unsigned int tuning_step; /* The delay cell steps in tuning procedure */
unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */
unsigned int strobe_dll_delay_target; /* The delay cell for strobe pad (read clock) */
+ unsigned int saved_tuning_delay_cell; /* save the value of tuning delay cell */
+ unsigned int saved_auto_tuning_window; /* save the auto tuning window width */
};
struct esdhc_soc_data {
@@ -264,35 +275,35 @@ static const struct esdhc_soc_data usdhc_imx6q_data = {
};
static const struct esdhc_soc_data usdhc_imx6sl_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_ERR004536
| ESDHC_FLAG_HS200
| ESDHC_FLAG_BROKEN_AUTO_CMD23,
};
static const struct esdhc_soc_data usdhc_imx6sll_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
};
static const struct esdhc_soc_data usdhc_imx6sx_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_STATE_LOST_IN_LPMODE
| ESDHC_FLAG_BROKEN_AUTO_CMD23,
};
static const struct esdhc_soc_data usdhc_imx6ull_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_ERR010450
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
};
static const struct esdhc_soc_data usdhc_imx7d_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400
| ESDHC_FLAG_STATE_LOST_IN_LPMODE
@@ -308,7 +319,7 @@ static struct esdhc_soc_data usdhc_s32g2_data = {
};
static struct esdhc_soc_data usdhc_imx7ulp_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_PMQOS | ESDHC_FLAG_HS400
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
@@ -321,7 +332,7 @@ static struct esdhc_soc_data usdhc_imxrt1050_data = {
};
static struct esdhc_soc_data usdhc_imx8qxp_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
| ESDHC_FLAG_STATE_LOST_IN_LPMODE
@@ -330,7 +341,7 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = {
};
static struct esdhc_soc_data usdhc_imx8mm_data = {
- .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
@@ -870,6 +881,11 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
esdhc_clrset_le(host, mask, new_val, reg);
return;
+ case SDHCI_TIMEOUT_CONTROL:
+ esdhc_clrset_le(host, ESDHC_SYS_CTRL_DTOCV_MASK,
+ FIELD_PREP(ESDHC_SYS_CTRL_DTOCV_MASK, val),
+ ESDHC_SYSTEM_CONTROL);
+ return;
case SDHCI_SOFTWARE_RESET:
if (val & SDHCI_RESET_DATA)
new_val = readl(host->ioaddr + SDHCI_HOST_CONTROL);
@@ -1057,7 +1073,7 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
- u32 ctrl;
+ u32 ctrl, tuning_ctrl, sys_ctrl;
int ret;
/* Reset the tuning circuit */
@@ -1071,6 +1087,21 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
writel(0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
writel(ctrl, host->ioaddr + ESDHC_MIX_CTRL);
+ /*
+ * enable the std tuning just in case it cleared in
+ * sdhc_esdhc_tuning_restore.
+ */
+ tuning_ctrl = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+ if (!(tuning_ctrl & ESDHC_STD_TUNING_EN)) {
+ tuning_ctrl |= ESDHC_STD_TUNING_EN;
+ writel(tuning_ctrl, host->ioaddr + ESDHC_TUNING_CTRL);
+ }
+
+ /* set the reset tuning bit */
+ sys_ctrl = readl(host->ioaddr + ESDHC_SYSTEM_CONTROL);
+ sys_ctrl |= ESDHC_SYS_CTRL_RESET_TUNING;
+ writel(sys_ctrl, host->ioaddr + ESDHC_SYSTEM_CONTROL);
+
ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
ctrl &= ~ESDHC_MIX_CTRL_EXE_TUNE;
@@ -1130,7 +1161,7 @@ static int usdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
{
- u32 reg;
+ u32 reg, sys_ctrl;
u8 sw_rst;
int ret;
@@ -1149,10 +1180,21 @@ static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL |
ESDHC_MIX_CTRL_FBCLK_SEL;
writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
- writel(val << 8, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
+ writel(FIELD_PREP(ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_PRE_MASK, val),
+ host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
dev_dbg(mmc_dev(host->mmc),
"tuning with delay 0x%x ESDHC_TUNE_CTRL_STATUS 0x%x\n",
val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS));
+
+ /* set RST_FIFO to reset the async FIFO, and wat it to self-clear */
+ sys_ctrl = readl(host->ioaddr + ESDHC_SYSTEM_CONTROL);
+ sys_ctrl |= ESDHC_SYS_CTRL_RST_FIFO;
+ writel(sys_ctrl, host->ioaddr + ESDHC_SYSTEM_CONTROL);
+ ret = readl_poll_timeout(host->ioaddr + ESDHC_SYSTEM_CONTROL, sys_ctrl,
+ !(sys_ctrl & ESDHC_SYS_CTRL_RST_FIFO), 10, 100);
+ if (ret == -ETIMEDOUT)
+ dev_warn(mmc_dev(host->mmc),
+ "warning! RST_FIFO not clear in 100us\n");
}
static void esdhc_post_tuning(struct sdhci_host *host)
@@ -1172,9 +1214,10 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
{
int min, max, avg, ret;
int win_length, target_min, target_max, target_win_length;
+ u32 clk_tune_ctrl_status, temp;
- min = ESDHC_TUNE_CTRL_MIN;
- max = ESDHC_TUNE_CTRL_MIN;
+ min = target_min = ESDHC_TUNE_CTRL_MIN;
+ max = target_max = ESDHC_TUNE_CTRL_MIN;
target_win_length = 0;
while (max < ESDHC_TUNE_CTRL_MAX) {
/* find the mininum delay first which can pass tuning */
@@ -1211,6 +1254,30 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
/* use average delay to get the best timing */
avg = (target_min + target_max) / 2;
esdhc_prepare_tuning(host, avg);
+
+ /*
+ * adjust the delay according to tuning window, make preparation
+ * for the auto-tuning logic. According to hardware suggest, need
+ * to config the auto tuning window width to 3, to make the auto
+ * tuning logic have enough space to handle the sample point shift
+ * caused by temperature change.
+ */
+ clk_tune_ctrl_status = FIELD_PREP(ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_PRE_MASK,
+ avg - ESDHC_AUTO_TUNING_WINDOW) |
+ FIELD_PREP(ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_OUT_MASK,
+ ESDHC_AUTO_TUNING_WINDOW) |
+ FIELD_PREP(ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_POST_MASK,
+ ESDHC_AUTO_TUNING_WINDOW);
+
+ writel(clk_tune_ctrl_status, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
+ ret = readl_poll_timeout(host->ioaddr + ESDHC_TUNE_CTRL_STATUS, temp,
+ clk_tune_ctrl_status ==
+ FIELD_GET(ESDHC_TUNE_CTRL_STATUS_TAP_SEL_MASK, temp),
+ 1, 10);
+ if (ret == -ETIMEDOUT)
+ dev_warn(mmc_dev(host->mmc),
+ "clock tuning control status not set in 10us\n");
+
ret = mmc_send_tuning(host->mmc, opcode, NULL);
esdhc_post_tuning(host);
@@ -1385,17 +1452,6 @@ static unsigned int esdhc_get_max_timeout_count(struct sdhci_host *host)
return esdhc_is_usdhc(imx_data) ? 1 << 29 : 1 << 27;
}
-static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
-{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
-
- /* use maximum timeout counter */
- esdhc_clrset_le(host, ESDHC_SYS_CTRL_DTOCV_MASK,
- esdhc_is_usdhc(imx_data) ? 0xF0000 : 0xE0000,
- ESDHC_SYSTEM_CONTROL);
-}
-
static u32 esdhc_cqhci_irq(struct sdhci_host *host, u32 intmask)
{
int cmd_error = 0;
@@ -1432,7 +1488,6 @@ static struct sdhci_ops sdhci_esdhc_ops = {
.get_min_clock = esdhc_pltfm_get_min_clock,
.get_max_timeout_count = esdhc_get_max_timeout_count,
.get_ro = esdhc_pltfm_get_ro,
- .set_timeout = esdhc_set_timeout,
.set_bus_width = esdhc_pltfm_set_bus_width,
.set_uhs_signaling = esdhc_set_uhs_signaling,
.reset = esdhc_reset,
@@ -1529,6 +1584,16 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
<< ESDHC_TUNING_STEP_SHIFT;
}
+ /*
+ * Config the tuning window to the hardware suggested value 3.
+ * This tuning window is used for auto tuning logic. The default
+ * tuning window is 2, here change to 3 make the window a bit
+ * wider, give auto tuning enough space to handle the sample
+ * point shift cause by temperature change.
+ */
+ tmp &= ~ESDHC_TUNING_WINDOW_MASK;
+ tmp |= FIELD_PREP(ESDHC_TUNING_WINDOW_MASK, ESDHC_AUTO_TUNING_WINDOW);
+
/* Disable the CMD CRC check for tuning, if not, need to
* add some delay after every tuning command, because
* hardware standard tuning logic will directly go to next
@@ -1569,6 +1634,63 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
}
}
+#ifdef CONFIG_PM_SLEEP
+static void sdhc_esdhc_tuning_save(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ /*
+ * SD/eMMC do not need this tuning save because it will re-init
+ * after system resume back.
+ * Here save the tuning delay value for SDIO device since it may
+ * keep power during system PM. And for usdhc, only SDR50 and
+ * SDR104 mode for SDIO device need to do tuning, and need to
+ * save/restore.
+ */
+ if (host->timing == MMC_TIMING_UHS_SDR50 ||
+ host->timing == MMC_TIMING_UHS_SDR104) {
+ reg = readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
+ reg = FIELD_GET(ESDHC_TUNE_CTRL_STATUS_TAP_SEL_PRE_MASK, reg);
+ imx_data->boarddata.saved_tuning_delay_cell = reg;
+ }
+}
+
+static void sdhc_esdhc_tuning_restore(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ if (host->timing == MMC_TIMING_UHS_SDR50 ||
+ host->timing == MMC_TIMING_UHS_SDR104) {
+ /*
+ * restore the tuning delay value actually is a
+ * manual tuning method, so clear the standard
+ * tuning enable bit here. Will set back this
+ * ESDHC_STD_TUNING_EN in esdhc_reset_tuning()
+ * when trigger re-tuning.
+ */
+ reg = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+ reg &= ~ESDHC_STD_TUNING_EN;
+ writel(reg, host->ioaddr + ESDHC_TUNING_CTRL);
+
+ reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
+ reg |= ESDHC_MIX_CTRL_SMPCLK_SEL | ESDHC_MIX_CTRL_FBCLK_SEL;
+ writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
+
+ writel(FIELD_PREP(ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_PRE_MASK,
+ imx_data->boarddata.saved_tuning_delay_cell) |
+ FIELD_PREP(ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_OUT_MASK,
+ ESDHC_AUTO_TUNING_WINDOW) |
+ FIELD_PREP(ESDHC_TUNE_CTRL_STATUS_DLY_CELL_SET_POST_MASK,
+ ESDHC_AUTO_TUNING_WINDOW),
+ host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
+ }
+}
+#endif
+
static void esdhc_cqe_enable(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -1777,6 +1899,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
* to distinguish the card type.
*/
host->mmc_host_ops.init_card = usdhc_init_card;
+
+ host->max_timeout_count = 0xF;
}
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
@@ -1885,11 +2009,14 @@ static int sdhci_esdhc_suspend(struct device *dev)
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int ret;
- if (host->mmc->caps2 & MMC_CAP2_CQE) {
- ret = cqhci_suspend(host->mmc);
- if (ret)
- return ret;
- }
+ /*
+ * Switch to runtime resume for two reasons:
+ * 1, there is register access (e.g., wakeup control register), so
+ * need to make sure gate on ipg clock.
+ * 2, make sure the pm_runtime_force_resume() in sdhci_esdhc_resume() really
+ * invoke its ->runtime_resume callback (needs_force_resume = 1).
+ */
+ pm_runtime_get_sync(dev);
if ((imx_data->socdata->flags & ESDHC_FLAG_STATE_LOST_IN_LPMODE) &&
(host->tuning_mode != SDHCI_TUNING_MODE_1)) {
@@ -1897,12 +2024,22 @@ static int sdhci_esdhc_suspend(struct device *dev)
mmc_retune_needed(host->mmc);
}
- if (host->tuning_mode != SDHCI_TUNING_MODE_3)
- mmc_retune_needed(host->mmc);
-
- ret = sdhci_suspend_host(host);
- if (ret)
- return ret;
+ /*
+ * For the device need to keep power during system PM, need
+ * to save the tuning delay value just in case the usdhc
+ * lost power during system PM.
+ */
+ if (mmc_card_keep_power(host->mmc) && mmc_card_wake_sdio_irq(host->mmc) &&
+ esdhc_is_usdhc(imx_data))
+ sdhc_esdhc_tuning_save(host);
+
+ if (device_may_wakeup(dev)) {
+ /* The irqs of imx are not shared. It is safe to disable */
+ disable_irq(host->irq);
+ ret = sdhci_enable_irq_wakeups(host);
+ if (!ret)
+ dev_warn(dev, "Failed to enable irq wakeup\n");
+ }
ret = pinctrl_pm_select_sleep_state(dev);
if (ret)
@@ -1910,30 +2047,46 @@ static int sdhci_esdhc_suspend(struct device *dev)
ret = mmc_gpio_set_cd_wake(host->mmc, true);
+ /*
+ * Make sure invoke runtime_suspend to gate off clock.
+ * uSDHC IP supports in-band SDIO wakeup even without clock.
+ */
+ pm_runtime_force_suspend(dev);
+
return ret;
}
static int sdhci_esdhc_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int ret;
- ret = pinctrl_pm_select_default_state(dev);
+ pm_runtime_force_resume(dev);
+
+ ret = mmc_gpio_set_cd_wake(host->mmc, false);
if (ret)
return ret;
/* re-initialize hw state in case it's lost in low power mode */
sdhci_esdhc_imx_hwinit(host);
- ret = sdhci_resume_host(host);
- if (ret)
- return ret;
+ if (host->irq_wake_enabled) {
+ sdhci_disable_irq_wakeups(host);
+ enable_irq(host->irq);
+ }
- if (host->mmc->caps2 & MMC_CAP2_CQE)
- ret = cqhci_resume(host->mmc);
+ /*
+ * restore the saved tuning delay value for the device which keep
+ * power during system PM.
+ */
+ if (mmc_card_keep_power(host->mmc) && mmc_card_wake_sdio_irq(host->mmc) &&
+ esdhc_is_usdhc(imx_data))
+ sdhc_esdhc_tuning_restore(host);
- if (!ret)
- ret = mmc_gpio_set_cd_wake(host->mmc, false);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
return ret;
}
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 57bd49eea777..66c0d1ba2a33 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1882,6 +1882,11 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
if (IS_ERR_OR_NULL(ice))
return PTR_ERR_OR_ZERO(ice);
+ if (qcom_ice_get_supported_key_type(ice) != BLK_CRYPTO_KEY_TYPE_RAW) {
+ dev_warn(dev, "Wrapped keys not supported. Disabling inline encryption support.\n");
+ return 0;
+ }
+
msm_host->ice = ice;
/* Initialize the blk_crypto_profile */
@@ -1962,16 +1967,7 @@ static int sdhci_msm_ice_keyslot_program(struct blk_crypto_profile *profile,
struct sdhci_msm_host *msm_host =
sdhci_msm_host_from_crypto_profile(profile);
- /* Only AES-256-XTS has been tested so far. */
- if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS)
- return -EOPNOTSUPP;
-
- return qcom_ice_program_key(msm_host->ice,
- QCOM_ICE_CRYPTO_ALG_AES_XTS,
- QCOM_ICE_CRYPTO_KEY_SIZE_256,
- key->bytes,
- key->crypto_cfg.data_unit_size / 512,
- slot);
+ return qcom_ice_program_key(msm_host->ice, slot, key);
}
static int sdhci_msm_ice_keyslot_evict(struct blk_crypto_profile *profile,
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 09b9ab15e499..a20d03fdd6a9 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/sizes.h>
@@ -745,6 +746,29 @@ static void dwcmshc_rk35xx_postinit(struct sdhci_host *host, struct dwcmshc_priv
}
}
+static void dwcmshc_rk3576_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
+{
+ struct device *dev = mmc_dev(host->mmc);
+ int ret;
+
+ /*
+ * This works around the design of the RK3576's power domains, which
+ * makes the PD_NVM power domain, which the sdhci controller on the
+ * RK3576 is in, never come back the same way once it's run-time
+ * suspended once. This can happen during early kernel boot if no driver
+ * is using either PD_NVM or its child power domain PD_SDGMAC for a
+ * short moment, leading to it being turned off to save power. By
+ * keeping it on, sdhci suspending won't lead to PD_NVM becoming a
+ * candidate for getting turned off.
+ */
+ ret = dev_pm_genpd_rpm_always_on(dev, true);
+ if (ret && ret != -EOPNOTSUPP)
+ dev_warn(dev, "failed to set PD rpm always on, SoC may hang later: %pe\n",
+ ERR_PTR(ret));
+
+ dwcmshc_rk35xx_postinit(host, dwc_priv);
+}
+
static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1176,6 +1200,18 @@ static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk35xx_pdata = {
.postinit = dwcmshc_rk35xx_postinit,
};
+static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk3576_pdata = {
+ .pdata = {
+ .ops = &sdhci_dwcmshc_rk35xx_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
+ },
+ .init = dwcmshc_rk35xx_init,
+ .postinit = dwcmshc_rk3576_postinit,
+};
+
static const struct dwcmshc_pltfm_data sdhci_dwcmshc_th1520_pdata = {
.pdata = {
.ops = &sdhci_dwcmshc_th1520_ops,
@@ -1275,6 +1311,10 @@ static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
.data = &sdhci_dwcmshc_rk35xx_pdata,
},
{
+ .compatible = "rockchip,rk3576-dwcmshc",
+ .data = &sdhci_dwcmshc_rk3576_pdata,
+ },
+ {
.compatible = "rockchip,rk3568-dwcmshc",
.data = &sdhci_dwcmshc_rk35xx_pdata,
},
diff --git a/drivers/mmc/host/sdhci-of-k1.c b/drivers/mmc/host/sdhci-of-k1.c
new file mode 100644
index 000000000000..6880d3e9ab62
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-k1.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
+ * Copyright (c) 2025 Yixun Lan <dlan@gentoo.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/init.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+#define SDHC_MMC_CTRL_REG 0x114
+#define MISC_INT_EN BIT(1)
+#define MISC_INT BIT(2)
+#define ENHANCE_STROBE_EN BIT(8)
+#define MMC_HS400 BIT(9)
+#define MMC_HS200 BIT(10)
+#define MMC_CARD_MODE BIT(12)
+
+#define SDHC_TX_CFG_REG 0x11C
+#define TX_INT_CLK_SEL BIT(30)
+#define TX_MUX_SEL BIT(31)
+
+#define SDHC_PHY_CTRL_REG 0x160
+#define PHY_FUNC_EN BIT(0)
+#define PHY_PLL_LOCK BIT(1)
+#define HOST_LEGACY_MODE BIT(31)
+
+#define SDHC_PHY_FUNC_REG 0x164
+#define PHY_TEST_EN BIT(7)
+#define HS200_USE_RFIFO BIT(15)
+
+#define SDHC_PHY_DLLCFG 0x168
+#define DLL_PREDLY_NUM GENMASK(3, 2)
+#define DLL_FULLDLY_RANGE GENMASK(5, 4)
+#define DLL_VREG_CTRL GENMASK(7, 6)
+#define DLL_ENABLE BIT(31)
+
+#define SDHC_PHY_DLLCFG1 0x16C
+#define DLL_REG1_CTRL GENMASK(7, 0)
+#define DLL_REG2_CTRL GENMASK(15, 8)
+#define DLL_REG3_CTRL GENMASK(23, 16)
+#define DLL_REG4_CTRL GENMASK(31, 24)
+
+#define SDHC_PHY_DLLSTS 0x170
+#define DLL_LOCK_STATE BIT(0)
+
+#define SDHC_PHY_PADCFG_REG 0x178
+#define PHY_DRIVE_SEL GENMASK(2, 0)
+#define RX_BIAS_CTRL BIT(5)
+
+struct spacemit_sdhci_host {
+ struct clk *clk_core;
+ struct clk *clk_io;
+};
+
+/* All helper functions will update clr/set while preserve rest bits */
+static inline void spacemit_sdhci_setbits(struct sdhci_host *host, u32 val, int reg)
+{
+ sdhci_writel(host, sdhci_readl(host, reg) | val, reg);
+}
+
+static inline void spacemit_sdhci_clrbits(struct sdhci_host *host, u32 val, int reg)
+{
+ sdhci_writel(host, sdhci_readl(host, reg) & ~val, reg);
+}
+
+static inline void spacemit_sdhci_clrsetbits(struct sdhci_host *host, u32 clr, u32 set, int reg)
+{
+ u32 val = sdhci_readl(host, reg);
+
+ val = (val & ~clr) | set;
+ sdhci_writel(host, val, reg);
+}
+
+static void spacemit_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ sdhci_reset(host, mask);
+
+ if (mask != SDHCI_RESET_ALL)
+ return;
+
+ spacemit_sdhci_setbits(host, PHY_FUNC_EN | PHY_PLL_LOCK, SDHC_PHY_CTRL_REG);
+
+ spacemit_sdhci_clrsetbits(host, PHY_DRIVE_SEL,
+ RX_BIAS_CTRL | FIELD_PREP(PHY_DRIVE_SEL, 4),
+ SDHC_PHY_PADCFG_REG);
+
+ if (!(host->mmc->caps2 & MMC_CAP2_NO_MMC))
+ spacemit_sdhci_setbits(host, MMC_CARD_MODE, SDHC_MMC_CTRL_REG);
+}
+
+static void spacemit_sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
+{
+ if (timing == MMC_TIMING_MMC_HS200)
+ spacemit_sdhci_setbits(host, MMC_HS200, SDHC_MMC_CTRL_REG);
+
+ if (timing == MMC_TIMING_MMC_HS400)
+ spacemit_sdhci_setbits(host, MMC_HS400, SDHC_MMC_CTRL_REG);
+
+ sdhci_set_uhs_signaling(host, timing);
+
+ if (!(host->mmc->caps2 & MMC_CAP2_NO_SDIO))
+ spacemit_sdhci_setbits(host, SDHCI_CTRL_VDD_180, SDHCI_HOST_CONTROL2);
+}
+
+static void spacemit_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ if (mmc->ios.timing <= MMC_TIMING_UHS_SDR50)
+ spacemit_sdhci_setbits(host, TX_INT_CLK_SEL, SDHC_TX_CFG_REG);
+ else
+ spacemit_sdhci_clrbits(host, TX_INT_CLK_SEL, SDHC_TX_CFG_REG);
+
+ sdhci_set_clock(host, clock);
+};
+
+static void spacemit_sdhci_phy_dll_init(struct sdhci_host *host)
+{
+ u32 state;
+ int ret;
+
+ spacemit_sdhci_clrsetbits(host, DLL_PREDLY_NUM | DLL_FULLDLY_RANGE | DLL_VREG_CTRL,
+ FIELD_PREP(DLL_PREDLY_NUM, 1) |
+ FIELD_PREP(DLL_FULLDLY_RANGE, 1) |
+ FIELD_PREP(DLL_VREG_CTRL, 1),
+ SDHC_PHY_DLLCFG);
+
+ spacemit_sdhci_clrsetbits(host, DLL_REG1_CTRL,
+ FIELD_PREP(DLL_REG1_CTRL, 0x92),
+ SDHC_PHY_DLLCFG1);
+
+ spacemit_sdhci_setbits(host, DLL_ENABLE, SDHC_PHY_DLLCFG);
+
+ ret = readl_poll_timeout(host->ioaddr + SDHC_PHY_DLLSTS, state,
+ state & DLL_LOCK_STATE, 2, 100);
+ if (ret == -ETIMEDOUT)
+ dev_warn(mmc_dev(host->mmc), "fail to lock phy dll in 100us!\n");
+}
+
+static void spacemit_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (!ios->enhanced_strobe) {
+ spacemit_sdhci_clrbits(host, ENHANCE_STROBE_EN, SDHC_MMC_CTRL_REG);
+ return;
+ }
+
+ spacemit_sdhci_setbits(host, ENHANCE_STROBE_EN, SDHC_MMC_CTRL_REG);
+ spacemit_sdhci_phy_dll_init(host);
+}
+
+static unsigned int spacemit_sdhci_clk_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_get_rate(pltfm_host->clk);
+}
+
+static int spacemit_sdhci_pre_select_hs400(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ spacemit_sdhci_setbits(host, MMC_HS400, SDHC_MMC_CTRL_REG);
+ host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
+
+ return 0;
+}
+
+static void spacemit_sdhci_post_select_hs400(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ spacemit_sdhci_phy_dll_init(host);
+ host->mmc->caps &= ~MMC_CAP_WAIT_WHILE_BUSY;
+}
+
+static void spacemit_sdhci_pre_hs400_to_hs200(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ spacemit_sdhci_clrbits(host, PHY_FUNC_EN | PHY_PLL_LOCK, SDHC_PHY_CTRL_REG);
+ spacemit_sdhci_clrbits(host, MMC_HS400 | MMC_HS200 | ENHANCE_STROBE_EN, SDHC_MMC_CTRL_REG);
+ spacemit_sdhci_clrbits(host, HS200_USE_RFIFO, SDHC_PHY_FUNC_REG);
+
+ udelay(5);
+
+ spacemit_sdhci_setbits(host, PHY_FUNC_EN | PHY_PLL_LOCK, SDHC_PHY_CTRL_REG);
+}
+
+static inline int spacemit_sdhci_get_clocks(struct device *dev,
+ struct sdhci_pltfm_host *pltfm_host)
+{
+ struct spacemit_sdhci_host *sdhst = sdhci_pltfm_priv(pltfm_host);
+
+ sdhst->clk_core = devm_clk_get_enabled(dev, "core");
+ if (IS_ERR(sdhst->clk_core))
+ return -EINVAL;
+
+ sdhst->clk_io = devm_clk_get_enabled(dev, "io");
+ if (IS_ERR(sdhst->clk_io))
+ return -EINVAL;
+
+ pltfm_host->clk = sdhst->clk_io;
+
+ return 0;
+}
+
+static const struct sdhci_ops spacemit_sdhci_ops = {
+ .get_max_clock = spacemit_sdhci_clk_get_max_clock,
+ .reset = spacemit_sdhci_reset,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_clock = spacemit_sdhci_set_clock,
+ .set_uhs_signaling = spacemit_sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data spacemit_sdhci_k1_pdata = {
+ .ops = &spacemit_sdhci_ops,
+ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+};
+
+static const struct of_device_id spacemit_sdhci_of_match[] = {
+ { .compatible = "spacemit,k1-sdhci" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, spacemit_sdhci_of_match);
+
+static int spacemit_sdhci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spacemit_sdhci_host *sdhst;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_host *host;
+ struct mmc_host_ops *mops;
+ int ret;
+
+ host = sdhci_pltfm_init(pdev, &spacemit_sdhci_k1_pdata, sizeof(*sdhst));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err_pltfm;
+
+ sdhci_get_of_property(pdev);
+
+ if (!(host->mmc->caps2 & MMC_CAP2_NO_MMC)) {
+ mops = &host->mmc_host_ops;
+ mops->hs400_prepare_ddr = spacemit_sdhci_pre_select_hs400;
+ mops->hs400_complete = spacemit_sdhci_post_select_hs400;
+ mops->hs400_downgrade = spacemit_sdhci_pre_hs400_to_hs200;
+ mops->hs400_enhanced_strobe = spacemit_sdhci_hs400_enhanced_strobe;
+ }
+
+ host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+
+ if (spacemit_sdhci_get_clocks(dev, pltfm_host))
+ goto err_pltfm;
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_pltfm;
+
+ return 0;
+
+err_pltfm:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static struct platform_driver spacemit_sdhci_driver = {
+ .driver = {
+ .name = "sdhci-spacemit",
+ .of_match_table = spacemit_sdhci_of_match,
+ },
+ .probe = spacemit_sdhci_probe,
+ .remove = sdhci_pltfm_remove,
+};
+module_platform_driver(spacemit_sdhci_driver);
+
+MODULE_DESCRIPTION("SpacemiT SDHCI platform driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 26a9a8b5682a..8897839ab2aa 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1270,7 +1270,7 @@ static int sdhci_omap_probe(struct platform_device *pdev)
mmc->f_max = 48000000;
}
- if (!mmc_can_gpio_ro(mmc))
+ if (!mmc_host_can_gpio_ro(mmc))
mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
pltfm_host->clk = devm_clk_get(dev, "fck");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 5f78be7ae16d..f008167d1863 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -158,7 +158,7 @@ static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
u32 present;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
- !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
+ !mmc_card_is_removable(host->mmc) || mmc_host_can_gpio_cd(host->mmc))
return;
if (enable) {
@@ -2571,7 +2571,7 @@ int sdhci_get_ro(struct mmc_host *mmc)
is_readonly = 0;
} else if (host->ops->get_ro) {
is_readonly = host->ops->get_ro(host);
- } else if (mmc_can_gpio_ro(mmc)) {
+ } else if (mmc_host_can_gpio_ro(mmc)) {
is_readonly = mmc_gpio_get_ro(mmc);
/* Do not invert twice */
allow_invert = !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
@@ -3245,7 +3245,7 @@ static void sdhci_timeout_timer(struct timer_list *t)
struct sdhci_host *host;
unsigned long flags;
- host = from_timer(host, t, timer);
+ host = timer_container_of(host, t, timer);
spin_lock_irqsave(&host->lock, flags);
@@ -3267,7 +3267,7 @@ static void sdhci_timeout_data_timer(struct timer_list *t)
struct sdhci_host *host;
unsigned long flags;
- host = from_timer(host, t, data_timer);
+ host = timer_container_of(host, t, data_timer);
spin_lock_irqsave(&host->lock, flags);
@@ -3744,7 +3744,7 @@ static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
{
return mmc_card_is_removable(host->mmc) &&
!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
- !mmc_can_gpio_cd(host->mmc);
+ !mmc_host_can_gpio_cd(host->mmc);
}
/*
@@ -3755,7 +3755,7 @@ static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
* sdhci_disable_irq_wakeups() since it will be set by
* sdhci_enable_card_detection() or sdhci_init().
*/
-static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
+bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
SDHCI_WAKE_ON_INT;
@@ -3787,8 +3787,9 @@ static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
return host->irq_wake_enabled;
}
+EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
-static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
+void sdhci_disable_irq_wakeups(struct sdhci_host *host)
{
u8 val;
u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
@@ -3802,6 +3803,7 @@ static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
host->irq_wake_enabled = false;
}
+EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
int sdhci_suspend_host(struct sdhci_host *host)
{
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index cd0e35a80542..f9d65dd0f2b2 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -875,6 +875,8 @@ void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
dma_addr_t addr, int len, unsigned int cmd);
#ifdef CONFIG_PM
+bool sdhci_enable_irq_wakeups(struct sdhci_host *host);
+void sdhci_disable_irq_wakeups(struct sdhci_host *host);
int sdhci_suspend_host(struct sdhci_host *host);
int sdhci_resume_host(struct sdhci_host *host);
int sdhci_runtime_suspend_host(struct sdhci_host *host);
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index f75c31815ab0..73385ff4c0f3 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -155,6 +155,7 @@ struct sdhci_am654_data {
u32 tuning_loop;
#define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
+#define SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA BIT(1)
};
struct window {
@@ -166,6 +167,7 @@ struct window {
struct sdhci_am654_driver_data {
const struct sdhci_pltfm_data *pdata;
u32 flags;
+ u32 quirks;
#define IOMUX_PRESENT (1 << 0)
#define FREQSEL_2_BIT (1 << 1)
#define STRBSEL_4_BIT (1 << 2)
@@ -356,6 +358,29 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
sdhci_set_clock(host, clock);
}
+static int sdhci_am654_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ if ((sdhci_am654->quirks & SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA) &&
+ ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
+ if (ret < 0) {
+ pr_err("%s: Switching to 1.8V signalling voltage failed,\n",
+ mmc_hostname(mmc));
+ return -EIO;
+ }
+ }
+ return 0;
+ }
+
+ return sdhci_start_signal_voltage_switch(mmc, ios);
+}
+
static u8 sdhci_am654_write_power_on(struct sdhci_host *host, u8 val, int reg)
{
writeb(val, host->ioaddr + reg);
@@ -650,6 +675,12 @@ static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = {
.flags = IOMUX_PRESENT,
};
+static const struct sdhci_am654_driver_data sdhci_am62_4bit_drvdata = {
+ .pdata = &sdhci_j721e_4bit_pdata,
+ .flags = IOMUX_PRESENT,
+ .quirks = SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA,
+};
+
static const struct soc_device_attribute sdhci_am654_devices[] = {
{ .family = "AM65X",
.revision = "SR1.0",
@@ -872,7 +903,7 @@ static const struct of_device_id sdhci_am654_of_match[] = {
},
{
.compatible = "ti,am62-sdhci",
- .data = &sdhci_j721e_4bit_drvdata,
+ .data = &sdhci_am62_4bit_drvdata,
},
{ /* sentinel */ }
};
@@ -906,6 +937,7 @@ static int sdhci_am654_probe(struct platform_device *pdev)
pltfm_host = sdhci_priv(host);
sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
sdhci_am654->flags = drvdata->flags;
+ sdhci_am654->quirks = drvdata->quirks;
clk_xin = devm_clk_get(dev, "clk_xin");
if (IS_ERR(clk_xin)) {
@@ -940,6 +972,7 @@ static int sdhci_am654_probe(struct platform_device *pdev)
goto err_pltfm_free;
}
+ host->mmc_host_ops.start_signal_voltage_switch = sdhci_am654_start_signal_voltage_switch;
host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
pm_runtime_get_noresume(dev);
diff --git a/drivers/mmc/host/sunplus-mmc.c b/drivers/mmc/host/sunplus-mmc.c
index 1cddea615a27..63279760239c 100644
--- a/drivers/mmc/host/sunplus-mmc.c
+++ b/drivers/mmc/host/sunplus-mmc.c
@@ -791,7 +791,7 @@ static int spmmc_get_cd(struct mmc_host *mmc)
{
int ret = 0;
- if (mmc_can_gpio_cd(mmc))
+ if (mmc_host_can_gpio_cd(mmc))
ret = mmc_gpio_get_cd(mmc);
if (ret < 0)
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 713223f2d377..5e5ec92f80e6 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -777,7 +777,7 @@ static void tifm_sd_end_cmd(struct work_struct *t)
static void tifm_sd_abort(struct timer_list *t)
{
- struct tifm_sd *host = from_timer(host, t, timer);
+ struct tifm_sd *host = timer_container_of(host, t, timer);
pr_err("%s : card failed to respond for a long period of time "
"(%x, %x)\n",
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 04c1c54df791..b71241f55df5 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -1176,14 +1176,14 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
dma_max_mapping_size(&pdev->dev));
mmc->max_seg_size = mmc->max_req_size;
- if (mmc_can_gpio_ro(mmc))
+ if (mmc_host_can_gpio_ro(mmc))
_host->ops.get_ro = mmc_gpio_get_ro;
- if (mmc_can_gpio_cd(mmc))
+ if (mmc_host_can_gpio_cd(mmc))
_host->ops.get_cd = mmc_gpio_get_cd;
/* must be set before tmio_mmc_reset() */
- _host->native_hotplug = !(mmc_can_gpio_cd(mmc) ||
+ _host->native_hotplug = !(mmc_host_can_gpio_cd(mmc) ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
!mmc_card_is_removable(mmc));
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 909d80a02824..9903966c2f54 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -937,7 +937,7 @@ static void via_sdc_timeout(struct timer_list *t)
struct via_crdr_mmc_host *sdhost;
unsigned long flags;
- sdhost = from_timer(sdhost, t, timer);
+ sdhost = timer_container_of(sdhost, t, timer);
spin_lock_irqsave(&sdhost->lock, flags);
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index dd71e5b8e1a5..f498fe11ecdf 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -740,8 +740,8 @@ static void vub300_deadwork_thread(struct work_struct *work)
static void vub300_inactivity_timer_expired(struct timer_list *t)
{ /* softirq */
- struct vub300_mmc_host *vub300 = from_timer(vub300, t,
- inactivity_timer);
+ struct vub300_mmc_host *vub300 = timer_container_of(vub300, t,
+ inactivity_timer);
if (!vub300->interface) {
kref_put(&vub300->kref, vub300_delete);
} else if (vub300->cmd) {
@@ -1180,8 +1180,8 @@ static void send_command(struct vub300_mmc_host *vub300)
*/
static void vub300_sg_timed_out(struct timer_list *t)
{
- struct vub300_mmc_host *vub300 = from_timer(vub300, t,
- sg_transfer_timer);
+ struct vub300_mmc_host *vub300 = timer_container_of(vub300, t,
+ sg_transfer_timer);
vub300->usb_timed_out = 1;
usb_sg_cancel(&vub300->sg_request);
usb_unlink_urb(vub300->command_out_urb);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index d5974b355a5a..2ae787d966de 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -947,7 +947,7 @@ static const struct mmc_host_ops wbsd_ops = {
static void wbsd_reset_ignore(struct timer_list *t)
{
- struct wbsd_host *host = from_timer(host, t, ignore_timer);
+ struct wbsd_host *host = timer_container_of(host, t, ignore_timer);
BUG_ON(host == NULL);
diff --git a/drivers/most/most_usb.c b/drivers/most/most_usb.c
index 2199ba821922..cf5be9c449a5 100644
--- a/drivers/most/most_usb.c
+++ b/drivers/most/most_usb.c
@@ -667,7 +667,7 @@ static void hdm_request_netinfo(struct most_interface *iface, int channel,
*/
static void link_stat_timer_handler(struct timer_list *t)
{
- struct most_dev *mdev = from_timer(mdev, t, link_stat_timer);
+ struct most_dev *mdev = timer_container_of(mdev, t, link_stat_timer);
schedule_work(&mdev->poll_work_obj);
mdev->link_stat_timer.expires = jiffies + (2 * HZ);
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index ff2f9e55ef28..aed653ce8fa2 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -98,7 +98,7 @@ config MTD_MCHP48L640
config MTD_SPEAR_SMI
tristate "SPEAR MTD NOR Support through SMI controller"
depends on PLAT_SPEAR || COMPILE_TEST
- default y
+ default PLAT_SPEAR
help
This enable SNOR support on SPEAR platforms using SMI controller
diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c
index 56b56f726b99..1bf9a5a64b87 100644
--- a/drivers/mtd/nand/ecc-mxic.c
+++ b/drivers/mtd/nand/ecc-mxic.c
@@ -614,7 +614,7 @@ static int mxic_ecc_finish_io_req_external(struct nand_device *nand,
{
struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
- int nents, step, ret;
+ int nents, step, ret = 0;
if (req->mode == MTD_OPS_RAW)
return 0;
diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c
index e0ed25b5afea..4dc4d65e7d32 100644
--- a/drivers/mtd/nand/qpic_common.c
+++ b/drivers/mtd/nand/qpic_common.c
@@ -236,21 +236,21 @@ int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
int i, ret;
struct bam_cmd_element *bam_ce_buffer;
struct bam_transaction *bam_txn = nandc->bam_txn;
+ u32 offset;
bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
/* fill the command desc */
for (i = 0; i < size; i++) {
+ offset = nandc->props->bam_offset + reg_off + 4 * i;
if (read)
bam_prep_ce(&bam_ce_buffer[i],
- nandc_reg_phys(nandc, reg_off + 4 * i),
- BAM_READ_COMMAND,
+ offset, BAM_READ_COMMAND,
reg_buf_dma_addr(nandc,
(__le32 *)vaddr + i));
else
bam_prep_ce_le32(&bam_ce_buffer[i],
- nandc_reg_phys(nandc, reg_off + 4 * i),
- BAM_WRITE_COMMAND,
+ offset, BAM_WRITE_COMMAND,
*((__le32 *)vaddr + i));
}
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index b8035df8f732..4b99d9c422c3 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -34,7 +34,7 @@ config MTD_NAND_DENALI_DT
config MTD_NAND_AMS_DELTA
tristate "Amstrad E3 NAND controller"
depends on MACH_AMS_DELTA || COMPILE_TEST
- default y
+ default MACH_AMS_DELTA
help
Support for NAND flash on Amstrad E3 (Delta).
@@ -462,6 +462,13 @@ config MTD_NAND_NUVOTON_MA35
Enables support for the NAND controller found on
the Nuvoton MA35 series SoCs.
+config MTD_NAND_LOONGSON1
+ tristate "Loongson1 NAND controller"
+ depends on LOONGSON1_APB_DMA || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ Enables support for NAND controller on Loongson1 SoCs.
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 99e79c448847..711d043ad4f8 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o
obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o
obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o
obj-$(CONFIG_MTD_NAND_NUVOTON_MA35) += nuvoton-ma35d1-nand-controller.o
+obj-$(CONFIG_MTD_NAND_LOONGSON1) += loongson1-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
index 6487dfc64258..e532c3535b16 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
@@ -171,6 +171,7 @@ static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip,
{
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
u32 code = 0;
+ int rc;
if (cmd == NAND_CMD_NONE)
return;
@@ -182,7 +183,9 @@ static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip,
if (cmd != NAND_CMD_RESET)
code |= NCTL_CSA;
- bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code);
+ rc = bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code);
+ if (rc)
+ pr_err("ctl_cmd didn't work with error %d\n", rc);
}
/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 17f6d9723df9..62bdda3be92f 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -65,6 +65,7 @@ module_param(wp_on, int, 0444);
#define CMD_PARAMETER_READ 0x0e
#define CMD_PARAMETER_CHANGE_COL 0x0f
#define CMD_LOW_LEVEL_OP 0x10
+#define CMD_NOT_SUPPORTED 0xff
struct brcm_nand_dma_desc {
u32 next_desc;
@@ -101,7 +102,7 @@ struct brcm_nand_dma_desc {
#define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
-#define NAND_POLL_STATUS_TIMEOUT_MS 100
+#define NAND_POLL_STATUS_TIMEOUT_MS 500
#define EDU_CMD_WRITE 0x00
#define EDU_CMD_READ 0x01
@@ -199,6 +200,30 @@ static const u16 flash_dma_regs_v4[] = {
[FLASH_DMA_CURRENT_DESC_EXT] = 0x34,
};
+/* Native command conversion for legacy controllers (< v5.0) */
+static const u8 native_cmd_conv[] = {
+ [NAND_CMD_READ0] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READ1] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_RNDOUT] = CMD_PARAMETER_CHANGE_COL,
+ [NAND_CMD_PAGEPROG] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READOOB] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_ERASE1] = CMD_BLOCK_ERASE,
+ [NAND_CMD_STATUS] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_SEQIN] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_RNDIN] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READID] = CMD_DEVICE_ID_READ,
+ [NAND_CMD_ERASE2] = CMD_NULL,
+ [NAND_CMD_PARAM] = CMD_PARAMETER_READ,
+ [NAND_CMD_GET_FEATURES] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_SET_FEATURES] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_RESET] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READSTART] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READCACHESEQ] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_READCACHEEND] = CMD_NOT_SUPPORTED,
+ [NAND_CMD_RNDOUTSTART] = CMD_NULL,
+ [NAND_CMD_CACHEDPROG] = CMD_NOT_SUPPORTED,
+};
+
/* Controller feature flags */
enum {
BRCMNAND_HAS_1K_SECTORS = BIT(0),
@@ -237,6 +262,12 @@ struct brcmnand_controller {
/* List of NAND hosts (one for each chip-select) */
struct list_head host_list;
+ /* Functions to be called from exec_op */
+ int (*check_instr)(struct nand_chip *chip,
+ const struct nand_operation *op);
+ int (*exec_instr)(struct nand_chip *chip,
+ const struct nand_operation *op);
+
/* EDU info, per-transaction */
const u16 *edu_offsets;
void __iomem *edu_base;
@@ -310,9 +341,6 @@ struct brcmnand_host {
struct platform_device *pdev;
int cs;
- unsigned int last_cmd;
- unsigned int last_byte;
- u64 last_addr;
struct brcmnand_cfg hwcfg;
struct brcmnand_controller *ctrl;
};
@@ -2233,14 +2261,11 @@ static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct brcmnand_host *host = nand_get_controller_data(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
u64 addr = (u64)page << chip->page_shift;
- host->last_addr = addr;
-
- return brcmnand_read(mtd, chip, host->last_addr,
- mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+ return brcmnand_read(mtd, chip, addr, mtd->writesize >> FC_SHIFT,
+ (u32 *)buf, oob);
}
static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
@@ -2252,11 +2277,9 @@ static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
int ret;
u64 addr = (u64)page << chip->page_shift;
- host->last_addr = addr;
-
brcmnand_set_ecc_enabled(host, 0);
- ret = brcmnand_read(mtd, chip, host->last_addr,
- mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+ ret = brcmnand_read(mtd, chip, addr, mtd->writesize >> FC_SHIFT,
+ (u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
return ret;
}
@@ -2363,13 +2386,10 @@ static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
u64 addr = (u64)page << chip->page_shift;
- host->last_addr = addr;
-
- return brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+ return brcmnand_write(mtd, chip, addr, (const u32 *)buf, oob);
}
static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
@@ -2381,9 +2401,8 @@ static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
u64 addr = (u64)page << chip->page_shift;
int ret = 0;
- host->last_addr = addr;
brcmnand_set_ecc_enabled(host, 0);
- ret = brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+ ret = brcmnand_write(mtd, chip, addr, (const u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
return ret;
@@ -2490,18 +2509,190 @@ static int brcmnand_op_is_reset(const struct nand_operation *op)
return 0;
}
+static int brcmnand_check_instructions(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ return 0;
+}
+
+static int brcmnand_exec_instructions(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = brcmnand_exec_instr(host, i, op);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int brcmnand_check_instructions_legacy(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr;
+ unsigned int i;
+ u8 cmd;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ instr = &op->instrs[i];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ cmd = native_cmd_conv[instr->ctx.cmd.opcode];
+ if (cmd == CMD_NOT_SUPPORTED)
+ return -EOPNOTSUPP;
+ break;
+ case NAND_OP_ADDR_INSTR:
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_WAITRDY_INSTR:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int brcmnand_exec_instructions_legacy(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ const struct nand_op_instr *instr;
+ unsigned int i, j;
+ u8 cmd = CMD_NULL, last_cmd = CMD_NULL;
+ int ret = 0;
+ u64 last_addr;
+
+ for (i = 0; i < op->ninstrs; i++) {
+ instr = &op->instrs[i];
+
+ if (instr->type == NAND_OP_CMD_INSTR) {
+ cmd = native_cmd_conv[instr->ctx.cmd.opcode];
+ if (cmd == CMD_NOT_SUPPORTED) {
+ dev_err(ctrl->dev, "unsupported cmd=%d\n",
+ instr->ctx.cmd.opcode);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ } else if (instr->type == NAND_OP_ADDR_INSTR) {
+ u64 addr = 0;
+
+ if (cmd == CMD_NULL)
+ continue;
+
+ if (instr->ctx.addr.naddrs > 8) {
+ dev_err(ctrl->dev, "unsupported naddrs=%u\n",
+ instr->ctx.addr.naddrs);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ for (j = 0; j < instr->ctx.addr.naddrs; j++)
+ addr |= (instr->ctx.addr.addrs[j]) << (j << 3);
+
+ if (cmd == CMD_BLOCK_ERASE)
+ addr <<= chip->page_shift;
+ else if (cmd == CMD_PARAMETER_CHANGE_COL)
+ addr &= ~((u64)(FC_BYTES - 1));
+
+ brcmnand_set_cmd_addr(mtd, addr);
+ brcmnand_send_cmd(host, cmd);
+ last_addr = addr;
+ last_cmd = cmd;
+ cmd = CMD_NULL;
+ brcmnand_waitfunc(chip);
+
+ if (last_cmd == CMD_PARAMETER_READ ||
+ last_cmd == CMD_PARAMETER_CHANGE_COL) {
+ /* Copy flash cache word-wise */
+ u32 *flash_cache = (u32 *)ctrl->flash_cache;
+
+ brcmnand_soc_data_bus_prepare(ctrl->soc, true);
+
+ /*
+ * Must cache the FLASH_CACHE now, since changes in
+ * SECTOR_SIZE_1K may invalidate it
+ */
+ for (j = 0; j < FC_WORDS; j++)
+ /*
+ * Flash cache is big endian for parameter pages, at
+ * least on STB SoCs
+ */
+ flash_cache[j] = be32_to_cpu(brcmnand_read_fc(ctrl, j));
+
+ brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
+ }
+ } else if (instr->type == NAND_OP_DATA_IN_INSTR) {
+ u8 *in = instr->ctx.data.buf.in;
+
+ if (last_cmd == CMD_DEVICE_ID_READ) {
+ u32 val;
+
+ if (instr->ctx.data.len > 8) {
+ dev_err(ctrl->dev, "unsupported len=%u\n",
+ instr->ctx.data.len);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ for (j = 0; j < instr->ctx.data.len; j++) {
+ if (j == 0)
+ val = brcmnand_read_reg(ctrl, BRCMNAND_ID);
+ else if (j == 4)
+ val = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT);
+
+ in[j] = (val >> (24 - ((j % 4) << 3))) & 0xff;
+ }
+ } else if (last_cmd == CMD_PARAMETER_READ ||
+ last_cmd == CMD_PARAMETER_CHANGE_COL) {
+ u64 addr;
+ u32 offs;
+
+ for (j = 0; j < instr->ctx.data.len; j++) {
+ addr = last_addr + j;
+ offs = addr & (FC_BYTES - 1);
+
+ if (j > 0 && offs == 0)
+ nand_change_read_column_op(chip, addr, NULL, 0,
+ false);
+
+ in[j] = ctrl->flash_cache[offs];
+ }
+ }
+ } else if (instr->type == NAND_OP_WAITRDY_INSTR) {
+ ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
+ if (ret)
+ break;
+ } else {
+ dev_err(ctrl->dev, "unsupported instruction type: %d\n", instr->type);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ }
+
+ return ret;
+}
+
static int brcmnand_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
struct mtd_info *mtd = nand_to_mtd(chip);
u8 *status;
- unsigned int i;
int ret = 0;
if (check_only)
- return 0;
+ return ctrl->check_instr(chip, op);
if (brcmnand_op_is_status(op)) {
status = op->instrs[1].ctx.data.buf.in;
@@ -2525,11 +2716,7 @@ static int brcmnand_exec_op(struct nand_chip *chip,
if (op->deassert_wp)
brcmnand_wp(mtd, 0);
- for (i = 0; i < op->ninstrs; i++) {
- ret = brcmnand_exec_instr(host, i, op);
- if (ret)
- break;
- }
+ ret = ctrl->exec_instr(chip, op);
if (op->deassert_wp)
brcmnand_wp(mtd, 1);
@@ -3142,6 +3329,15 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
if (ret)
goto err;
+ /* Only v5.0+ controllers have low level ops support */
+ if (ctrl->nand_version >= 0x0500) {
+ ctrl->check_instr = brcmnand_check_instructions;
+ ctrl->exec_instr = brcmnand_exec_instructions;
+ } else {
+ ctrl->check_instr = brcmnand_check_instructions_legacy;
+ ctrl->exec_instr = brcmnand_exec_instructions_legacy;
+ }
+
/*
* Most chips have this cache at a fixed offset within 'nand' block.
* Some must specify this region separately.
diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
index 341318024a19..ec95d787001b 100644
--- a/drivers/mtd/nand/raw/cs553x_nand.c
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -351,20 +351,20 @@ static int __init cs553x_init(void)
return -ENXIO;
/* If it doesn't have the CS553[56], abort */
- rdmsrl(MSR_DIVIL_GLD_CAP, val);
+ rdmsrq(MSR_DIVIL_GLD_CAP, val);
val &= ~0xFFULL;
if (val != CAP_CS5535 && val != CAP_CS5536)
return -ENXIO;
/* If it doesn't have the NAND controller enabled, abort */
- rdmsrl(MSR_DIVIL_BALL_OPTS, val);
+ rdmsrq(MSR_DIVIL_BALL_OPTS, val);
if (val & PIN_OPT_IDE) {
pr_info("CS553x NAND controller: Flash I/O not enabled in MSR_DIVIL_BALL_OPTS.\n");
return -ENXIO;
}
for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
- rdmsrl(MSR_DIVIL_LBAR_FLSH0 + i, val);
+ rdmsrq(MSR_DIVIL_LBAR_FLSH0 + i, val);
if ((val & (FLSH_LBAR_EN|FLSH_NOR_NAND)) == (FLSH_LBAR_EN|FLSH_NOR_NAND))
err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF);
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index e22094e39546..97fa32d73441 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -68,7 +68,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->clk_rate = 50000000; /* 50 MHz */
denali->clk_x_rate = 200000000; /* 200 MHz */
- ret = pci_request_regions(dev, DENALI_NAND_NAME);
+ ret = pcim_request_all_regions(dev, DENALI_NAND_NAME);
if (ret) {
dev_err(&dev->dev, "Spectra: Unable to request memory regions\n");
return ret;
@@ -77,20 +77,18 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->reg = devm_ioremap(denali->dev, csr_base, csr_len);
if (!denali->reg) {
dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
- ret = -ENOMEM;
- goto regions_release;
+ return -ENOMEM;
}
denali->host = devm_ioremap(denali->dev, mem_base, mem_len);
if (!denali->host) {
dev_err(&dev->dev, "Spectra: ioremap failed!");
- ret = -ENOMEM;
- goto regions_release;
+ return -ENOMEM;
}
ret = denali_init(denali);
if (ret)
- goto regions_release;
+ return ret;
nsels = denali->nbanks;
@@ -118,8 +116,6 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
out_remove_denali:
denali_remove(denali);
-regions_release:
- pci_release_regions(dev);
return ret;
}
@@ -127,7 +123,6 @@ static void denali_pci_remove(struct pci_dev *dev)
{
struct denali_controller *denali = pci_get_drvdata(dev);
- pci_release_regions(dev);
denali_remove(denali);
}
diff --git a/drivers/mtd/nand/raw/loongson1-nand-controller.c b/drivers/mtd/nand/raw/loongson1-nand-controller.c
new file mode 100644
index 000000000000..ef8e4f9ce287
--- /dev/null
+++ b/drivers/mtd/nand/raw/loongson1-nand-controller.c
@@ -0,0 +1,836 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NAND Controller Driver for Loongson-1 SoC
+ *
+ * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
+
+/* Loongson-1 NAND Controller Registers */
+#define LS1X_NAND_CMD 0x0
+#define LS1X_NAND_ADDR1 0x4
+#define LS1X_NAND_ADDR2 0x8
+#define LS1X_NAND_TIMING 0xc
+#define LS1X_NAND_IDL 0x10
+#define LS1X_NAND_IDH_STATUS 0x14
+#define LS1X_NAND_PARAM 0x18
+#define LS1X_NAND_OP_NUM 0x1c
+
+/* NAND Command Register Bits */
+#define LS1X_NAND_CMD_OP_DONE BIT(10)
+#define LS1X_NAND_CMD_OP_SPARE BIT(9)
+#define LS1X_NAND_CMD_OP_MAIN BIT(8)
+#define LS1X_NAND_CMD_STATUS BIT(7)
+#define LS1X_NAND_CMD_RESET BIT(6)
+#define LS1X_NAND_CMD_READID BIT(5)
+#define LS1X_NAND_CMD_BLOCKS_ERASE BIT(4)
+#define LS1X_NAND_CMD_ERASE BIT(3)
+#define LS1X_NAND_CMD_WRITE BIT(2)
+#define LS1X_NAND_CMD_READ BIT(1)
+#define LS1X_NAND_CMD_VALID BIT(0)
+
+#define LS1X_NAND_WAIT_CYCLE_MASK GENMASK(7, 0)
+#define LS1X_NAND_HOLD_CYCLE_MASK GENMASK(15, 8)
+#define LS1X_NAND_CELL_SIZE_MASK GENMASK(11, 8)
+
+#define LS1X_NAND_COL_ADDR_CYC 2U
+#define LS1X_NAND_MAX_ADDR_CYC 5U
+
+#define BITS_PER_WORD (4 * BITS_PER_BYTE)
+
+struct ls1x_nand_host;
+
+struct ls1x_nand_op {
+ char addrs[LS1X_NAND_MAX_ADDR_CYC];
+ unsigned int naddrs;
+ unsigned int addrs_offset;
+ unsigned int aligned_offset;
+ unsigned int cmd_reg;
+ unsigned int row_start;
+ unsigned int rdy_timeout_ms;
+ unsigned int orig_len;
+ bool is_readid;
+ bool is_erase;
+ bool is_write;
+ bool is_read;
+ bool is_change_column;
+ size_t len;
+ char *buf;
+};
+
+struct ls1x_nand_data {
+ unsigned int status_field;
+ unsigned int op_scope_field;
+ unsigned int hold_cycle;
+ unsigned int wait_cycle;
+ void (*set_addr)(struct ls1x_nand_host *host, struct ls1x_nand_op *op);
+};
+
+struct ls1x_nand_host {
+ struct device *dev;
+ struct nand_chip chip;
+ struct nand_controller controller;
+ const struct ls1x_nand_data *data;
+ void __iomem *reg_base;
+ struct regmap *regmap;
+ /* DMA Engine stuff */
+ dma_addr_t dma_base;
+ struct dma_chan *dma_chan;
+ dma_cookie_t dma_cookie;
+ struct completion dma_complete;
+};
+
+static const struct regmap_config ls1x_nand_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int ls1x_nand_op_cmd_mapping(struct nand_chip *chip, struct ls1x_nand_op *op, u8 opcode)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+
+ op->row_start = chip->page_shift + 1;
+
+ /* The controller abstracts the following NAND operations. */
+ switch (opcode) {
+ case NAND_CMD_STATUS:
+ op->cmd_reg = LS1X_NAND_CMD_STATUS;
+ break;
+ case NAND_CMD_RESET:
+ op->cmd_reg = LS1X_NAND_CMD_RESET;
+ break;
+ case NAND_CMD_READID:
+ op->is_readid = true;
+ op->cmd_reg = LS1X_NAND_CMD_READID;
+ break;
+ case NAND_CMD_ERASE1:
+ op->is_erase = true;
+ op->addrs_offset = LS1X_NAND_COL_ADDR_CYC;
+ break;
+ case NAND_CMD_ERASE2:
+ if (!op->is_erase)
+ return -EOPNOTSUPP;
+ /* During erasing, row_start differs from the default value. */
+ op->row_start = chip->page_shift;
+ op->cmd_reg = LS1X_NAND_CMD_ERASE;
+ break;
+ case NAND_CMD_SEQIN:
+ op->is_write = true;
+ break;
+ case NAND_CMD_PAGEPROG:
+ if (!op->is_write)
+ return -EOPNOTSUPP;
+ op->cmd_reg = LS1X_NAND_CMD_WRITE;
+ break;
+ case NAND_CMD_READ0:
+ op->is_read = true;
+ break;
+ case NAND_CMD_READSTART:
+ if (!op->is_read)
+ return -EOPNOTSUPP;
+ op->cmd_reg = LS1X_NAND_CMD_READ;
+ break;
+ case NAND_CMD_RNDOUT:
+ op->is_change_column = true;
+ break;
+ case NAND_CMD_RNDOUTSTART:
+ if (!op->is_change_column)
+ return -EOPNOTSUPP;
+ op->cmd_reg = LS1X_NAND_CMD_READ;
+ break;
+ default:
+ dev_dbg(host->dev, "unsupported opcode: %u\n", opcode);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ls1x_nand_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop, struct ls1x_nand_op *op)
+{
+ unsigned int op_id;
+ int ret;
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ const struct nand_op_instr *instr = &subop->instrs[op_id];
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ret = ls1x_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case NAND_OP_ADDR_INSTR:
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ if (naddrs > LS1X_NAND_MAX_ADDR_CYC)
+ return -EOPNOTSUPP;
+ op->naddrs = naddrs;
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+ memcpy(op->addrs + op->addrs_offset, addrs, naddrs);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ op->orig_len = nand_subop_get_data_len(subop, op_id);
+ if (instr->type == NAND_OP_DATA_IN_INSTR)
+ op->buf = instr->ctx.data.buf.in + offset;
+ else if (instr->type == NAND_OP_DATA_OUT_INSTR)
+ op->buf = (void *)instr->ctx.data.buf.out + offset;
+
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void ls1b_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
+{
+ struct nand_chip *chip = &host->chip;
+ int i;
+
+ for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) {
+ int shift, mask, val;
+
+ if (i < LS1X_NAND_COL_ADDR_CYC) {
+ shift = i * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ mask &= GENMASK(chip->page_shift, 0);
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
+ } else if (!op->is_change_column) {
+ shift = op->row_start + (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
+
+ if (i == 4) {
+ mask = (u32)0xff >> (BITS_PER_WORD - shift);
+ val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift);
+ regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val);
+ }
+ }
+ }
+}
+
+static void ls1c_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
+{
+ int i;
+
+ for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) {
+ int shift, mask, val;
+
+ if (i < LS1X_NAND_COL_ADDR_CYC) {
+ shift = i * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
+ } else if (!op->is_change_column) {
+ shift = (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val);
+ }
+ }
+}
+
+static void ls1x_nand_trigger_op(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int col0 = op->addrs[0];
+ short col;
+
+ if (!IS_ALIGNED(col0, chip->buf_align)) {
+ col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align);
+ op->aligned_offset = op->addrs[0] - col0;
+ op->addrs[0] = col0;
+ }
+
+ if (host->data->set_addr)
+ host->data->set_addr(host, op);
+
+ /* set operation length */
+ if (op->is_write || op->is_read || op->is_change_column)
+ op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align);
+ else if (op->is_erase)
+ op->len = 1;
+ else
+ op->len = op->orig_len;
+
+ writel(op->len, host->reg_base + LS1X_NAND_OP_NUM);
+
+ /* set operation area and scope */
+ col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0];
+ if (op->orig_len && !op->is_readid) {
+ unsigned int op_scope = 0;
+
+ if (col < mtd->writesize) {
+ op->cmd_reg |= LS1X_NAND_CMD_OP_MAIN;
+ op_scope = mtd->writesize;
+ }
+
+ op->cmd_reg |= LS1X_NAND_CMD_OP_SPARE;
+ op_scope += mtd->oobsize;
+
+ op_scope <<= __ffs(host->data->op_scope_field);
+ regmap_update_bits(host->regmap, LS1X_NAND_PARAM,
+ host->data->op_scope_field, op_scope);
+ }
+
+ /* set command */
+ writel(op->cmd_reg, host->reg_base + LS1X_NAND_CMD);
+
+ /* trigger operation */
+ regmap_write_bits(host->regmap, LS1X_NAND_CMD, LS1X_NAND_CMD_VALID, LS1X_NAND_CMD_VALID);
+}
+
+static int ls1x_nand_wait_for_op_done(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
+{
+ unsigned int val;
+ int ret = 0;
+
+ if (op->rdy_timeout_ms) {
+ ret = regmap_read_poll_timeout(host->regmap, LS1X_NAND_CMD,
+ val, val & LS1X_NAND_CMD_OP_DONE,
+ 0, op->rdy_timeout_ms * MSEC_PER_SEC);
+ if (ret)
+ dev_err(host->dev, "operation failed\n");
+ }
+
+ return ret;
+}
+
+static void ls1x_nand_dma_callback(void *data)
+{
+ struct ls1x_nand_host *host = (struct ls1x_nand_host *)data;
+ struct dma_chan *chan = host->dma_chan;
+ struct device *dev = chan->device->dev;
+ enum dma_status status;
+
+ status = dmaengine_tx_status(chan, host->dma_cookie, NULL);
+ if (likely(status == DMA_COMPLETE)) {
+ dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie);
+ complete(&host->dma_complete);
+ } else {
+ dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie);
+ }
+}
+
+static int ls1x_nand_dma_transfer(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
+{
+ struct nand_chip *chip = &host->chip;
+ struct dma_chan *chan = host->dma_chan;
+ struct device *dev = chan->device->dev;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ void *buf = op->buf;
+ char *dma_buf = NULL;
+ dma_addr_t dma_addr;
+ int ret;
+
+ if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) &&
+ IS_ALIGNED(op->orig_len, chip->buf_align)) {
+ dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "failed to map DMA buffer\n");
+ return -ENXIO;
+ }
+ } else if (!op->is_write) {
+ dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL);
+ if (!dma_buf)
+ return -ENOMEM;
+ } else {
+ dev_err(dev, "subpage writing not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "failed to prepare DMA descriptor\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ desc->callback = ls1x_nand_dma_callback;
+ desc->callback_param = host;
+
+ host->dma_cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(host->dma_cookie);
+ if (ret) {
+ dev_err(dev, "failed to submit DMA descriptor\n");
+ goto err;
+ }
+
+ dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie);
+ dma_async_issue_pending(chan);
+
+ if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) {
+ dmaengine_terminate_sync(chan);
+ reinit_completion(&host->dma_complete);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ if (dma_buf)
+ memcpy(buf, dma_buf + op->aligned_offset, op->orig_len);
+err:
+ if (dma_buf)
+ dma_free_coherent(dev, op->len, dma_buf, dma_addr);
+ else
+ dma_unmap_single(dev, dma_addr, op->orig_len, data_dir);
+
+ return ret;
+}
+
+static int ls1x_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+ struct ls1x_nand_op op = {};
+ int ret;
+
+ ret = ls1x_nand_parse_instructions(chip, subop, &op);
+ if (ret)
+ return ret;
+
+ ls1x_nand_trigger_op(host, &op);
+
+ ret = ls1x_nand_dma_transfer(host, &op);
+ if (ret)
+ return ret;
+
+ return ls1x_nand_wait_for_op_done(host, &op);
+}
+
+static int ls1x_nand_misc_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop, struct ls1x_nand_op *op)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+ int ret;
+
+ ret = ls1x_nand_parse_instructions(chip, subop, op);
+ if (ret)
+ return ret;
+
+ ls1x_nand_trigger_op(host, op);
+
+ return ls1x_nand_wait_for_op_done(host, op);
+}
+
+static int ls1x_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct ls1x_nand_op op = {};
+
+ return ls1x_nand_misc_type_exec(chip, subop, &op);
+}
+
+static int ls1x_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+ struct ls1x_nand_op op = {};
+ int i, ret;
+ union {
+ char ids[5];
+ struct {
+ int idl;
+ char idh;
+ };
+ } nand_id;
+
+ ret = ls1x_nand_misc_type_exec(chip, subop, &op);
+ if (ret)
+ return ret;
+
+ nand_id.idl = readl(host->reg_base + LS1X_NAND_IDL);
+ nand_id.idh = readb(host->reg_base + LS1X_NAND_IDH_STATUS);
+
+ for (i = 0; i < min(sizeof(nand_id.ids), op.orig_len); i++)
+ op.buf[i] = nand_id.ids[sizeof(nand_id.ids) - 1 - i];
+
+ return ret;
+}
+
+static int ls1x_nand_read_status_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+ struct ls1x_nand_op op = {};
+ int val, ret;
+
+ ret = ls1x_nand_misc_type_exec(chip, subop, &op);
+ if (ret)
+ return ret;
+
+ val = readl(host->reg_base + LS1X_NAND_IDH_STATUS);
+ val &= ~host->data->status_field;
+ op.buf[0] = val << ffs(host->data->status_field);
+
+ return ret;
+}
+
+static const struct nand_op_parser ls1x_nand_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_read_status_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_zerolen_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_zerolen_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_data_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
+ NAND_OP_PARSER_PATTERN(
+ ls1x_nand_data_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ );
+
+static int ls1x_nand_is_valid_cmd(u8 opcode)
+{
+ if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int ls1x_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2)
+{
+ if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART)
+ return 0;
+
+ if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART)
+ return 0;
+
+ if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2)
+ return 0;
+
+ if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int ls1x_nand_check_op(struct nand_chip *chip, const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr1 = NULL, *instr2 = NULL;
+ int op_id;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ const struct nand_op_instr *instr = &op->instrs[op_id];
+
+ if (instr->type == NAND_OP_CMD_INSTR) {
+ if (!instr1)
+ instr1 = instr;
+ else if (!instr2)
+ instr2 = instr;
+ else
+ break;
+ }
+ }
+
+ if (!instr1)
+ return -EOPNOTSUPP;
+
+ if (!instr2)
+ return ls1x_nand_is_valid_cmd(instr1->ctx.cmd.opcode);
+
+ return ls1x_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode);
+}
+
+static int ls1x_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ if (check_only)
+ return ls1x_nand_check_op(chip, op);
+
+ return nand_op_parser_exec_op(chip, &ls1x_nand_op_parser, op, check_only);
+}
+
+static int ls1x_nand_attach_chip(struct nand_chip *chip)
+{
+ struct ls1x_nand_host *host = nand_get_controller_data(chip);
+ u64 chipsize = nanddev_target_size(&chip->base);
+ int cell_size = 0;
+
+ switch (chipsize) {
+ case SZ_128M:
+ cell_size = 0x0;
+ break;
+ case SZ_256M:
+ cell_size = 0x1;
+ break;
+ case SZ_512M:
+ cell_size = 0x2;
+ break;
+ case SZ_1G:
+ cell_size = 0x3;
+ break;
+ case SZ_2G:
+ cell_size = 0x4;
+ break;
+ case SZ_4G:
+ cell_size = 0x5;
+ break;
+ case SZ_8G:
+ cell_size = 0x6;
+ break;
+ case SZ_16G:
+ cell_size = 0x7;
+ break;
+ default:
+ dev_err(host->dev, "unsupported chip size: %llu MB\n", chipsize);
+ return -EINVAL;
+ }
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ break;
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set cell size */
+ regmap_update_bits(host->regmap, LS1X_NAND_PARAM, LS1X_NAND_CELL_SIZE_MASK,
+ FIELD_PREP(LS1X_NAND_CELL_SIZE_MASK, cell_size));
+
+ regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_HOLD_CYCLE_MASK,
+ FIELD_PREP(LS1X_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle));
+
+ regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_WAIT_CYCLE_MASK,
+ FIELD_PREP(LS1X_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle));
+
+ chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
+ chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
+
+ return 0;
+}
+
+static const struct nand_controller_ops ls1x_nand_controller_ops = {
+ .exec_op = ls1x_nand_exec_op,
+ .attach_chip = ls1x_nand_attach_chip,
+};
+
+static void ls1x_nand_controller_cleanup(struct ls1x_nand_host *host)
+{
+ if (host->dma_chan)
+ dma_release_channel(host->dma_chan);
+}
+
+static int ls1x_nand_controller_init(struct ls1x_nand_host *host)
+{
+ struct device *dev = host->dev;
+ struct dma_chan *chan;
+ struct dma_slave_config cfg = {};
+ int ret;
+
+ host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &ls1x_nand_regmap_config);
+ if (IS_ERR(host->regmap))
+ return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n");
+
+ chan = dma_request_chan(dev, "rxtx");
+ if (IS_ERR(chan))
+ return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n");
+ host->dma_chan = chan;
+
+ cfg.src_addr = host->dma_base;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr = host->dma_base;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ ret = dmaengine_slave_config(host->dma_chan, &cfg);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to config DMA channel\n");
+
+ init_completion(&host->dma_complete);
+
+ return 0;
+}
+
+static int ls1x_nand_chip_init(struct ls1x_nand_host *host)
+{
+ struct device *dev = host->dev;
+ int nchips = of_get_child_count(dev->of_node);
+ struct device_node *chip_np;
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ if (nchips != 1)
+ return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n");
+
+ chip_np = of_get_next_child(dev->of_node, NULL);
+ if (!chip_np)
+ return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n");
+
+ nand_set_flash_node(chip, chip_np);
+ of_node_put(chip_np);
+ if (!mtd->name)
+ return dev_err_probe(dev, -EINVAL, "Missing MTD label\n");
+
+ nand_set_controller_data(chip, host);
+ chip->controller = &host->controller;
+ chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD;
+ chip->buf_align = 16;
+ mtd->dev.parent = dev;
+ mtd->owner = THIS_MODULE;
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to scan NAND chip\n");
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_cleanup(chip);
+ return dev_err_probe(dev, ret, "failed to register MTD device\n");
+ }
+
+ return 0;
+}
+
+static int ls1x_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct ls1x_nand_data *data;
+ struct ls1x_nand_host *host;
+ struct resource *res;
+ int ret;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->reg_base))
+ return PTR_ERR(host->reg_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma");
+ if (!res)
+ return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n");
+
+ host->dma_base = dma_map_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(dev, host->dma_base))
+ return -ENXIO;
+
+ host->dev = dev;
+ host->data = data;
+ host->controller.ops = &ls1x_nand_controller_ops;
+
+ nand_controller_init(&host->controller);
+
+ ret = ls1x_nand_controller_init(host);
+ if (ret)
+ goto err;
+
+ ret = ls1x_nand_chip_init(host);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+err:
+ ls1x_nand_controller_cleanup(host);
+
+ return ret;
+}
+
+static void ls1x_nand_remove(struct platform_device *pdev)
+{
+ struct ls1x_nand_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ ls1x_nand_controller_cleanup(host);
+}
+
+static const struct ls1x_nand_data ls1b_nand_data = {
+ .status_field = GENMASK(15, 8),
+ .hold_cycle = 0x2,
+ .wait_cycle = 0xc,
+ .set_addr = ls1b_nand_set_addr,
+};
+
+static const struct ls1x_nand_data ls1c_nand_data = {
+ .status_field = GENMASK(23, 16),
+ .op_scope_field = GENMASK(29, 16),
+ .hold_cycle = 0x2,
+ .wait_cycle = 0xc,
+ .set_addr = ls1c_nand_set_addr,
+};
+
+static const struct of_device_id ls1x_nand_match[] = {
+ {
+ .compatible = "loongson,ls1b-nand-controller",
+ .data = &ls1b_nand_data,
+ },
+ {
+ .compatible = "loongson,ls1c-nand-controller",
+ .data = &ls1c_nand_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ls1x_nand_match);
+
+static struct platform_driver ls1x_nand_driver = {
+ .probe = ls1x_nand_probe,
+ .remove = ls1x_nand_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = ls1x_nand_match,
+ },
+};
+
+module_platform_driver(ls1x_nand_driver);
+
+MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
+MODULE_DESCRIPTION("Loongson-1 NAND Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 5eaa0be367cd..1003cf118c01 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -1863,7 +1863,12 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
const struct nand_op_instr *instr = NULL;
unsigned int op_id = 0;
unsigned int len = 0;
- int ret;
+ int ret, reg_base;
+
+ reg_base = NAND_READ_LOCATION_0;
+
+ if (nandc->props->qpic_version2)
+ reg_base = NAND_READ_LOCATION_LAST_CW_0;
ret = qcom_parse_instructions(chip, subop, &q_op);
if (ret)
@@ -1915,14 +1920,17 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
op_id = q_op.data_instr_idx;
len = nand_subop_get_data_len(subop, op_id);
- nandc_set_read_loc(chip, 0, 0, 0, len, 1);
+ if (nandc->props->qpic_version2)
+ nandc_set_read_loc_last(chip, reg_base, 0, len, 1);
+ else
+ nandc_set_read_loc_first(chip, reg_base, 0, len, 1);
if (!nandc->props->qpic_version2) {
qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
}
- nandc->buf_count = len;
+ nandc->buf_count = 512;
memset(nandc->data_buffer, 0xff, nandc->buf_count);
config_nand_single_cw_page_read(chip, false, 0);
@@ -2360,6 +2368,7 @@ static const struct qcom_nandc_props ipq806x_nandc_props = {
.supports_bam = false,
.use_codeword_fixup = true,
.dev_cmd_reg_start = 0x0,
+ .bam_offset = 0x30000,
};
static const struct qcom_nandc_props ipq4019_nandc_props = {
@@ -2367,6 +2376,7 @@ static const struct qcom_nandc_props ipq4019_nandc_props = {
.supports_bam = true,
.nandc_part_of_qpic = true,
.dev_cmd_reg_start = 0x0,
+ .bam_offset = 0x30000,
};
static const struct qcom_nandc_props ipq8074_nandc_props = {
@@ -2374,6 +2384,7 @@ static const struct qcom_nandc_props ipq8074_nandc_props = {
.supports_bam = true,
.nandc_part_of_qpic = true,
.dev_cmd_reg_start = 0x7000,
+ .bam_offset = 0x30000,
};
static const struct qcom_nandc_props sdx55_nandc_props = {
@@ -2382,6 +2393,7 @@ static const struct qcom_nandc_props sdx55_nandc_props = {
.nandc_part_of_qpic = true,
.qpic_version2 = true,
.dev_cmd_reg_start = 0x7000,
+ .bam_offset = 0x30000,
};
/*
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index fab371e3e9b7..162cd5f4f234 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -817,6 +817,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
if (ret)
return ret;
+ sunxi_nfc_randomizer_config(nand, page, false);
sunxi_nfc_randomizer_enable(nand);
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
@@ -1049,6 +1050,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand,
if (ret)
return ret;
+ sunxi_nfc_randomizer_config(nand, page, false);
sunxi_nfc_randomizer_enable(nand);
sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page);
diff --git a/drivers/mtd/nand/spi/alliancememory.c b/drivers/mtd/nand/spi/alliancememory.c
index 6046c73f8424..2ee498230ec1 100644
--- a/drivers/mtd/nand/spi/alliancememory.c
+++ b/drivers/mtd/nand/spi/alliancememory.c
@@ -17,20 +17,20 @@
#define AM_STATUS_ECC_MAX_CORRECTED (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int am_get_eccsize(struct mtd_info *mtd)
{
diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c
index bb5298911137..2b4df1d917ac 100644
--- a/drivers/mtd/nand/spi/ato.c
+++ b/drivers/mtd/nand/spi/ato.c
@@ -14,17 +14,17 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int ato25d1ga_ooblayout_ecc(struct mtd_info *mtd, int section,
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index d16e42cf8fae..c411fe9be3ef 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -22,7 +22,7 @@
static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
int ret;
@@ -36,7 +36,7 @@ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
{
- struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
*spinand->scratchbuf = val;
@@ -362,7 +362,7 @@ static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
static int spinand_write_enable_op(struct spinand_device *spinand)
{
- struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
+ struct spi_mem_op op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -372,7 +372,7 @@ static int spinand_load_page_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
- struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
+ struct spi_mem_op op = SPINAND_PAGE_READ_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -519,7 +519,7 @@ static int spinand_program_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
- struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
+ struct spi_mem_op op = SPINAND_PROG_EXEC_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -529,7 +529,7 @@ static int spinand_erase_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, pos);
- struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
+ struct spi_mem_op op = SPINAND_BLK_ERASE_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
@@ -549,8 +549,8 @@ static int spinand_erase_op(struct spinand_device *spinand,
int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
unsigned long poll_delay_us, u8 *s)
{
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
- spinand->scratchbuf);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(REG_STATUS,
+ spinand->scratchbuf);
u8 status;
int ret;
@@ -583,7 +583,7 @@ out:
static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
u8 ndummy, u8 *buf)
{
- struct spi_mem_op op = SPINAND_READID_OP(
+ struct spi_mem_op op = SPINAND_READID_1S_1S_1S_OP(
naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
int ret;
@@ -596,7 +596,7 @@ static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
static int spinand_reset_op(struct spinand_device *spinand)
{
- struct spi_mem_op op = SPINAND_RESET_OP;
+ struct spi_mem_op op = SPINAND_RESET_1S_0_0_OP;
int ret;
ret = spi_mem_exec_op(spinand->spimem, &op);
@@ -1585,6 +1585,7 @@ static void spinand_cleanup(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ nanddev_ecc_engine_cleanup(nand);
nanddev_cleanup(nand);
spinand_manufacturer_cleanup(spinand);
kfree(spinand->databuf);
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
index a164d821464d..9e286612a296 100644
--- a/drivers/mtd/nand/spi/esmt.c
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -18,18 +18,18 @@
(CFG_OTP_ENABLE | ESMT_F50L1G41LB_CFG_OTP_PROTECT)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/*
* OOB spare area map (64 bytes)
@@ -137,8 +137,8 @@ static int f50l1g41lb_user_otp_info(struct spinand_device *spinand, size_t len,
static int f50l1g41lb_otp_lock(struct spinand_device *spinand, loff_t from,
size_t len)
{
- struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true);
- struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0);
+ struct spi_mem_op write_op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
+ struct spi_mem_op exec_op = SPINAND_PROG_EXEC_1S_1S_0_OP(0);
u8 status;
int ret;
@@ -199,7 +199,7 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
SPINAND_INFO("F50D1G41LB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f,
- 0x7f, 0x7f),
+ 0x7f),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
diff --git a/drivers/mtd/nand/spi/foresee.c b/drivers/mtd/nand/spi/foresee.c
index ecd5f6bffa33..7c61644bfb10 100644
--- a/drivers/mtd/nand/spi/foresee.c
+++ b/drivers/mtd/nand/spi/foresee.c
@@ -12,18 +12,18 @@
#define SPINAND_MFR_FORESEE 0xCD
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int f35sqa002g_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index d620bb02a20a..cb1d316fc4d8 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -24,44 +24,44 @@
#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_f,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP_3A(0, 0, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(0, 0, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_1gq5,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_2gq5,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -185,7 +185,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
u8 status2;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(GD5FXGQXXEXXG_REG_STATUS2,
spinand->scratchbuf);
int ret;
@@ -228,7 +228,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
u8 status2;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(GD5FXGQXXEXXG_REG_STATUS2,
spinand->scratchbuf);
int ret;
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 1ef08ad850a2..eeaf5bf9f082 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -28,18 +28,18 @@ struct macronix_priv {
};
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int mx35lfxge4ab_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -148,8 +148,8 @@ static int macronix_set_cont_read(struct spinand_device *spinand, bool enable)
static int macronix_set_read_retry(struct spinand_device *spinand,
unsigned int retry_mode)
{
- struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MACRONIX_FEATURE_ADDR_READ_RETRY,
- spinand->scratchbuf);
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(MACRONIX_FEATURE_ADDR_READ_RETRY,
+ spinand->scratchbuf);
*spinand->scratchbuf = retry_mode;
return spi_mem_exec_op(spinand->spimem, &op);
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 691f8a2e0791..8281c9d3f4f7 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -35,33 +35,33 @@
(CFG_OTP_ENABLE | MICRON_MT29F2G01ABAGD_CFG_OTP_STATE)
static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(x4_write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(x4_update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/* Micron MT29F2G01AAAED Device */
static SPINAND_OP_VARIANTS(x4_read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(x1_write_cache_variants,
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(x1_update_cache_variants,
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -137,7 +137,7 @@ static const struct mtd_ooblayout_ops micron_4_ooblayout = {
static int micron_select_target(struct spinand_device *spinand,
unsigned int target)
{
- struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG,
+ struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(MICRON_DIE_SELECT_REG,
spinand->scratchbuf);
if (target > 1)
@@ -251,8 +251,8 @@ static int mt29f2g01abagd_user_otp_info(struct spinand_device *spinand,
static int mt29f2g01abagd_otp_lock(struct spinand_device *spinand, loff_t from,
size_t len)
{
- struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true);
- struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0);
+ struct spi_mem_op write_op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
+ struct spi_mem_op exec_op = SPINAND_PROG_EXEC_1S_1S_0_OP(0);
u8 status;
int ret;
diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c
index 6e7cc6995380..4670bac41245 100644
--- a/drivers/mtd/nand/spi/paragon.c
+++ b/drivers/mtd/nand/spi/paragon.c
@@ -22,20 +22,20 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int pn26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
diff --git a/drivers/mtd/nand/spi/skyhigh.c b/drivers/mtd/nand/spi/skyhigh.c
index 961df0d74984..51d61785df61 100644
--- a/drivers/mtd/nand/spi/skyhigh.c
+++ b/drivers/mtd/nand/spi/skyhigh.c
@@ -17,20 +17,20 @@
#define SKYHIGH_CONFIG_PROTECT_EN BIT(1)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int skyhigh_spinand_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index 2e2106b2705f..4c6923047aeb 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -15,28 +15,28 @@
#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_x4_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_x4_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/*
* Backward compatibility for 1st generation Serial NAND devices
* which don't support Quad Program Load operation.
*/
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int tx58cxgxsxraix_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index 8394a1b1fb0c..b7a28f001a38 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -23,34 +23,50 @@
* "X4" in the core is equivalent to "quad output" in the datasheets.
*/
-static SPINAND_OP_VARIANTS(read_cache_dtr_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_DTR_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_X4_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_DTR_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_X2_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ));
+static SPINAND_OP_VARIANTS(read_cache_octal_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(0, 2, NULL, 0, 105 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 16, NULL, 0, 162 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(0, 1, NULL, 0, 133 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_octal_variants,
+ SPINAND_PROG_LOAD_1S_8S_8S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_8S_OP(0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_octal_variants,
+ SPINAND_PROG_LOAD_1S_8S_8S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(read_cache_dual_quad_dtr_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 104 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 104 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ));
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int w25m02gv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@@ -141,12 +157,47 @@ static const struct mtd_ooblayout_ops w25n02kv_ooblayout = {
.free = w25n02kv_ooblayout_free,
};
+static int w35n01jw_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 12;
+ region->length = 4;
+
+ return 0;
+}
+
+static int w35n01jw_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ region->offset = 16 * section;
+ region->length = 12;
+
+ /* Extract BBM */
+ if (!section) {
+ region->offset += 2;
+ region->length -= 2;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops w35n01jw_ooblayout = {
+ .ecc = w35n01jw_ooblayout_ecc,
+ .free = w35n01jw_ooblayout_free,
+};
+
static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
- struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
@@ -213,7 +264,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbc, 0x21),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants,
+ SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants,
&write_cache_variants,
&update_cache_variants),
0,
@@ -227,6 +278,33 @@ static const struct spinand_info winbond_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)),
+ SPINAND_INFO("W35N01JW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdc, 0x21),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
+ &write_cache_octal_variants,
+ &update_cache_octal_variants),
+ 0,
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
+ SPINAND_INFO("W35N02JW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x22),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 2, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
+ &write_cache_octal_variants,
+ &update_cache_octal_variants),
+ 0,
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
+ SPINAND_INFO("W35N04JW", /* 1.8V */
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x23),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 4, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
+ &write_cache_octal_variants,
+ &update_cache_octal_variants),
+ 0,
+ SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
/* 2G-bit densities */
SPINAND_INFO("W25M02GV", /* 2x1G-bit 3.3V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21),
@@ -242,7 +320,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 2, 1),
NAND_ECCREQ(1, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants,
+ SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants,
&write_cache_variants,
&update_cache_variants),
0,
diff --git a/drivers/mtd/nand/spi/xtx.c b/drivers/mtd/nand/spi/xtx.c
index 3f539ca0de86..37336d5958a9 100644
--- a/drivers/mtd/nand/spi/xtx.c
+++ b/drivers/mtd/nand/spi/xtx.c
@@ -23,20 +23,20 @@
#define XT26XXXD_STATUS_ECC_UNCOR_ERROR (2)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
- SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
- SPINAND_PROG_LOAD(true, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
- SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
- SPINAND_PROG_LOAD(false, 0, NULL, 0));
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int xt26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index d28d4f1790f5..abc7b186353f 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -993,7 +993,7 @@ restart:
/* flush timer, runs a second after last write */
static void sm_cache_flush_timer(struct timer_list *t)
{
- struct sm_ftl *ftl = from_timer(ftl, t, timer);
+ struct sm_ftl *ftl = timer_container_of(ftl, t, timer);
queue_work(cache_flush_workqueue, &ftl->flush_work);
}
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index 55644a3cd88c..e97f5cbd9aad 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -58,6 +58,31 @@ macronix_qpp4b_post_sfdp_fixups(struct spi_nor *nor)
return 0;
}
+static int
+mx25l3255e_late_init_fixups(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ /*
+ * SFDP of MX25L3255E is JESD216, which does not include the Quad
+ * Enable bit Requirement in BFPT. As a result, during BFPT parsing,
+ * the quad_enable method is not set to spi_nor_sr1_bit6_quad_enable.
+ * Therefore, it is necessary to correct this setting by late_init.
+ */
+ params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+
+ /*
+ * In addition, MX25L3255E also supports 1-4-4 page program in 3-byte
+ * address mode. However, since the 3-byte address 1-4-4 page program
+ * is not defined in SFDP, it needs to be configured in late_init.
+ */
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_1_4_4;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_4_4],
+ SPINOR_OP_PP_1_4_4, SNOR_PROTO_1_4_4);
+
+ return 0;
+}
+
static const struct spi_nor_fixups mx25l25635_fixups = {
.post_bfpt = mx25l25635_post_bfpt_fixups,
.post_sfdp = macronix_qpp4b_post_sfdp_fixups,
@@ -67,6 +92,10 @@ static const struct spi_nor_fixups macronix_qpp4b_fixups = {
.post_sfdp = macronix_qpp4b_post_sfdp_fixups,
};
+static const struct spi_nor_fixups mx25l3255e_fixups = {
+ .late_init = mx25l3255e_late_init_fixups,
+};
+
static const struct flash_info macronix_nor_parts[] = {
{
.id = SNOR_ID(0xc2, 0x20, 0x10),
@@ -88,10 +117,8 @@ static const struct flash_info macronix_nor_parts[] = {
.name = "mx25l8005",
.size = SZ_1M,
}, {
+ /* MX25L1606E */
.id = SNOR_ID(0xc2, 0x20, 0x15),
- .name = "mx25l1606e",
- .size = SZ_2M,
- .no_sfdp_flags = SECT_4K,
}, {
.id = SNOR_ID(0xc2, 0x20, 0x16),
.name = "mx25l3205d",
@@ -103,29 +130,21 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_8M,
.no_sfdp_flags = SECT_4K,
}, {
+ /* MX25L12805D */
.id = SNOR_ID(0xc2, 0x20, 0x18),
- .name = "mx25l12805d",
- .size = SZ_16M,
.flags = SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP,
- .no_sfdp_flags = SECT_4K,
}, {
+ /* MX25L25635E, MX25L25645G */
.id = SNOR_ID(0xc2, 0x20, 0x19),
- .name = "mx25l25635e",
- .size = SZ_32M,
- .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixups = &mx25l25635_fixups
}, {
+ /* MX66L51235F */
.id = SNOR_ID(0xc2, 0x20, 0x1a),
- .name = "mx66l51235f",
- .size = SZ_64M,
- .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
.fixups = &macronix_qpp4b_fixups,
}, {
+ /* MX66L1G45G */
.id = SNOR_ID(0xc2, 0x20, 0x1b),
- .name = "mx66l1g45g",
- .size = SZ_128M,
- .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixups = &macronix_qpp4b_fixups,
}, {
/* MX66L2G45G */
@@ -167,29 +186,16 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_16M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
}, {
+ /* MX25U51245G */
.id = SNOR_ID(0xc2, 0x25, 0x3a),
- .name = "mx25u51245g",
- .size = SZ_64M,
- .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
- .fixup_flags = SPI_NOR_4B_OPCODES,
- .fixups = &macronix_qpp4b_fixups,
- }, {
- .id = SNOR_ID(0xc2, 0x25, 0x3a),
- .name = "mx66u51235f",
- .size = SZ_64M,
- .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
- .fixup_flags = SPI_NOR_4B_OPCODES,
.fixups = &macronix_qpp4b_fixups,
}, {
/* MX66U1G45G */
.id = SNOR_ID(0xc2, 0x25, 0x3b),
.fixups = &macronix_qpp4b_fixups,
}, {
+ /* MX66U2G45G */
.id = SNOR_ID(0xc2, 0x25, 0x3c),
- .name = "mx66u2g45g",
- .size = SZ_256M,
- .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
- .fixup_flags = SPI_NOR_4B_OPCODES,
.fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x26, 0x18),
@@ -215,15 +221,14 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_4M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
}, {
+ /* MX25UW51245G */
.id = SNOR_ID(0xc2, 0x81, 0x3a),
- .name = "mx25uw51245g",
.n_banks = 4,
.flags = SPI_NOR_RWW,
}, {
+ /* MX25L3255E */
.id = SNOR_ID(0xc2, 0x9e, 0x16),
- .name = "mx25l3255e",
- .size = SZ_4M,
- .no_sfdp_flags = SECT_4K,
+ .fixups = &mx25l3255e_fixups,
},
/*
* This spares us of adding new flash entries for flashes that can be
diff --git a/drivers/mux/adg792a.c b/drivers/mux/adg792a.c
index 4da5aecb9fc6..a5afe29e3cf1 100644
--- a/drivers/mux/adg792a.c
+++ b/drivers/mux/adg792a.c
@@ -141,7 +141,7 @@ MODULE_DEVICE_TABLE(of, adg792a_of_match);
static struct i2c_driver adg792a_driver = {
.driver = {
.name = "adg792a",
- .of_match_table = of_match_ptr(adg792a_of_match),
+ .of_match_table = adg792a_of_match,
},
.probe = adg792a_probe,
.id_table = adg792a_id,
diff --git a/drivers/mux/adgs1408.c b/drivers/mux/adgs1408.c
index 22ed051eb1a4..5eaf07d09ac9 100644
--- a/drivers/mux/adgs1408.c
+++ b/drivers/mux/adgs1408.c
@@ -59,9 +59,7 @@ static int adgs1408_probe(struct spi_device *spi)
s32 idle_state;
int ret;
- chip_id = (enum adgs1408_chip_id)device_get_match_data(dev);
- if (!chip_id)
- chip_id = spi_get_device_id(spi)->driver_data;
+ chip_id = (kernel_ulong_t)spi_get_device_match_data(spi);
mux_chip = devm_mux_chip_alloc(dev, 1, 0);
if (IS_ERR(mux_chip))
diff --git a/drivers/mux/gpio.c b/drivers/mux/gpio.c
index 5710879cd47f..4cc3202c58f3 100644
--- a/drivers/mux/gpio.c
+++ b/drivers/mux/gpio.c
@@ -15,6 +15,7 @@
#include <linux/mux/driver.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/regulator/consumer.h>
struct mux_gpio {
struct gpio_descs *gpios;
@@ -80,6 +81,10 @@ static int mux_gpio_probe(struct platform_device *pdev)
mux_chip->mux->idle_state = idle_state;
}
+ ret = devm_regulator_get_enable_optional(dev, "mux");
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to get/enable mux supply\n");
+
ret = devm_mux_chip_register(dev, mux_chip);
if (ret < 0)
return ret;
diff --git a/drivers/mux/mmio.c b/drivers/mux/mmio.c
index 30a952c34365..9993ce38a818 100644
--- a/drivers/mux/mmio.c
+++ b/drivers/mux/mmio.c
@@ -33,6 +33,12 @@ static const struct of_device_id mux_mmio_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mux_mmio_dt_ids);
+static const struct regmap_config mux_mmio_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int mux_mmio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -40,6 +46,7 @@ static int mux_mmio_probe(struct platform_device *pdev)
struct regmap_field **fields;
struct mux_chip *mux_chip;
struct regmap *regmap;
+ void __iomem *base;
int num_fields;
int ret;
int i;
@@ -47,7 +54,11 @@ static int mux_mmio_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "mmio-mux")) {
regmap = syscon_node_to_regmap(np->parent);
} else {
- regmap = device_node_to_regmap(np);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ regmap = ERR_PTR(-ENODEV);
+ else
+ regmap = regmap_init_mmio(dev, base, &mux_mmio_regmap_cfg);
/* Fallback to checking the parent node on "real" errors. */
if (IS_ERR(regmap) && regmap != ERR_PTR(-EPROBE_DEFER)) {
regmap = dev_get_regmap(dev->parent, NULL);
@@ -107,7 +118,7 @@ static int mux_mmio_probe(struct platform_device *pdev)
fields[i] = devm_regmap_field_alloc(dev, regmap, field);
if (IS_ERR(fields[i])) {
ret = PTR_ERR(fields[i]);
- dev_err(dev, "bitfield %d: failed allocate: %d\n",
+ dev_err(dev, "bitfield %d: failed to allocate: %d\n",
i, ret);
return ret;
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 271520510b5f..b29628d46be9 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -115,6 +115,21 @@ config WIREGUARD_DEBUG
Say N here unless you know what you're doing.
+config OVPN
+ tristate "OpenVPN data channel offload"
+ depends on NET && INET
+ depends on IPV6 || !IPV6
+ select DST_CACHE
+ select NET_UDP_TUNNEL
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_GCM
+ select CRYPTO_CHACHA20POLY1305
+ select STREAM_PARSER
+ help
+ This module enhances the performance of the OpenVPN userspace software
+ by offloading the data channel processing to kernelspace.
+
config EQUALIZER
tristate "EQL (serial line load balancing) support"
help
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 75333251a01a..73bc63ecd65f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_IPVLAN) += ipvlan/
obj-$(CONFIG_IPVTAP) += ipvlan/
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_WIREGUARD) += wireguard/
+obj-$(CONFIG_OVPN) += ovpn/
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACSEC) += macsec.o
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 602e6e1adf00..882972604c82 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -382,7 +382,7 @@ static void arcdev_setup(struct net_device *dev)
static void arcnet_timer(struct timer_list *t)
{
- struct arcnet_local *lp = from_timer(lp, t, timer);
+ struct arcnet_local *lp = timer_container_of(lp, t, timer);
struct net_device *dev = lp->dev;
spin_lock_irq(&lp->lock);
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index d1473c5f8eef..a9dffdcac805 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -777,27 +777,19 @@ static __net_init int bareudp_init_net(struct net *net)
return 0;
}
-static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
+static void __net_exit bareudp_exit_rtnl_net(struct net *net,
+ struct list_head *dev_kill_list)
{
struct bareudp_net *bn = net_generic(net, bareudp_net_id);
struct bareudp_dev *bareudp, *next;
list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next)
- unregister_netdevice_queue(bareudp->dev, head);
-}
-
-static void __net_exit bareudp_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_kill_list)
-{
- struct net *net;
-
- list_for_each_entry(net, net_list, exit_list)
- bareudp_destroy_tunnels(net, dev_kill_list);
+ bareudp_dellink(bareudp->dev, dev_kill_list);
}
static struct pernet_operations bareudp_net_ops = {
.init = bareudp_init_net,
- .exit_batch_rtnl = bareudp_exit_batch_rtnl,
+ .exit_rtnl = bareudp_exit_rtnl_net,
.id = &bareudp_net_id,
.size = sizeof(struct bareudp_net),
};
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 7edf0fd58c34..2d37b07c8215 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1035,7 +1035,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, const u8 addr[],
*/
memcpy(ss.__data, addr, len);
ss.ss_family = dev->type;
- if (dev_set_mac_address(dev, (struct sockaddr *)&ss, NULL)) {
+ if (dev_set_mac_address(dev, &ss, NULL)) {
slave_err(slave->bond->dev, dev, "dev_set_mac_address on slave failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n");
return -EOPNOTSUPP;
}
@@ -1273,8 +1273,7 @@ unwind:
break;
bond_hw_addr_copy(tmp_addr, rollback_slave->dev->dev_addr,
rollback_slave->dev->addr_len);
- dev_set_mac_address(rollback_slave->dev,
- (struct sockaddr *)&ss, NULL);
+ dev_set_mac_address(rollback_slave->dev, &ss, NULL);
dev_addr_set(rollback_slave->dev, tmp_addr);
}
@@ -1763,8 +1762,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
bond->dev->addr_len);
ss.ss_family = bond->dev->type;
/* we don't care if it can't change its mac, best effort */
- dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss,
- NULL);
+ dev_set_mac_address(new_slave->dev, &ss, NULL);
dev_addr_set(new_slave->dev, tmp_addr);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 8ea183da8d53..c4d53e8e7c15 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -453,13 +453,14 @@ static struct net_device *bond_ipsec_dev(struct xfrm_state *xs)
/**
* bond_ipsec_add_sa - program device with a security association
+ * @bond_dev: pointer to the bond net device
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
**/
-static int bond_ipsec_add_sa(struct xfrm_state *xs,
+static int bond_ipsec_add_sa(struct net_device *bond_dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
- struct net_device *bond_dev = xs->xso.dev;
struct net_device *real_dev;
netdevice_tracker tracker;
struct bond_ipsec *ipsec;
@@ -495,9 +496,9 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs,
goto out;
}
- xs->xso.real_dev = real_dev;
- err = real_dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
+ err = real_dev->xfrmdev_ops->xdo_dev_state_add(real_dev, xs, extack);
if (!err) {
+ xs->xso.real_dev = real_dev;
ipsec->xs = xs;
INIT_LIST_HEAD(&ipsec->list);
mutex_lock(&bond->ipsec_lock);
@@ -539,11 +540,25 @@ static void bond_ipsec_add_sa_all(struct bonding *bond)
if (ipsec->xs->xso.real_dev == real_dev)
continue;
- ipsec->xs->xso.real_dev = real_dev;
- if (real_dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
+ if (real_dev->xfrmdev_ops->xdo_dev_state_add(real_dev,
+ ipsec->xs, NULL)) {
slave_warn(bond_dev, real_dev, "%s: failed to add SA\n", __func__);
- ipsec->xs->xso.real_dev = NULL;
+ continue;
}
+
+ spin_lock_bh(&ipsec->xs->lock);
+ /* xs might have been killed by the user during the migration
+ * to the new dev, but bond_ipsec_del_sa() should have done
+ * nothing, as xso.real_dev is NULL.
+ * Delete it from the device we just added it to. The pending
+ * bond_ipsec_free_sa() call will do the rest of the cleanup.
+ */
+ if (ipsec->xs->km.state == XFRM_STATE_DEAD &&
+ real_dev->xfrmdev_ops->xdo_dev_state_delete)
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev,
+ ipsec->xs);
+ ipsec->xs->xso.real_dev = real_dev;
+ spin_unlock_bh(&ipsec->xs->lock);
}
out:
mutex_unlock(&bond->ipsec_lock);
@@ -551,54 +566,27 @@ out:
/**
* bond_ipsec_del_sa - clear out this specific SA
+ * @bond_dev: pointer to the bond net device
* @xs: pointer to transformer state struct
**/
-static void bond_ipsec_del_sa(struct xfrm_state *xs)
+static void bond_ipsec_del_sa(struct net_device *bond_dev,
+ struct xfrm_state *xs)
{
- struct net_device *bond_dev = xs->xso.dev;
struct net_device *real_dev;
- netdevice_tracker tracker;
- struct bond_ipsec *ipsec;
- struct bonding *bond;
- struct slave *slave;
- if (!bond_dev)
+ if (!bond_dev || !xs->xso.real_dev)
return;
- rcu_read_lock();
- bond = netdev_priv(bond_dev);
- slave = rcu_dereference(bond->curr_active_slave);
- real_dev = slave ? slave->dev : NULL;
- netdev_hold(real_dev, &tracker, GFP_ATOMIC);
- rcu_read_unlock();
-
- if (!slave)
- goto out;
-
- if (!xs->xso.real_dev)
- goto out;
-
- WARN_ON(xs->xso.real_dev != real_dev);
+ real_dev = xs->xso.real_dev;
if (!real_dev->xfrmdev_ops ||
!real_dev->xfrmdev_ops->xdo_dev_state_delete ||
netif_is_bond_master(real_dev)) {
slave_warn(bond_dev, real_dev, "%s: no slave xdo_dev_state_delete\n", __func__);
- goto out;
+ return;
}
- real_dev->xfrmdev_ops->xdo_dev_state_delete(xs);
-out:
- netdev_put(real_dev, &tracker);
- mutex_lock(&bond->ipsec_lock);
- list_for_each_entry(ipsec, &bond->ipsec_list, list) {
- if (ipsec->xs == xs) {
- list_del(&ipsec->list);
- kfree(ipsec);
- break;
- }
- }
- mutex_unlock(&bond->ipsec_lock);
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev, xs);
}
static void bond_ipsec_del_sa_all(struct bonding *bond)
@@ -624,46 +612,55 @@ static void bond_ipsec_del_sa_all(struct bonding *bond)
slave_warn(bond_dev, real_dev,
"%s: no slave xdo_dev_state_delete\n",
__func__);
- } else {
- real_dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
- if (real_dev->xfrmdev_ops->xdo_dev_state_free)
- real_dev->xfrmdev_ops->xdo_dev_state_free(ipsec->xs);
+ continue;
}
+
+ spin_lock_bh(&ipsec->xs->lock);
+ ipsec->xs->xso.real_dev = NULL;
+ /* Don't double delete states killed by the user. */
+ if (ipsec->xs->km.state != XFRM_STATE_DEAD)
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev,
+ ipsec->xs);
+ spin_unlock_bh(&ipsec->xs->lock);
+
+ if (real_dev->xfrmdev_ops->xdo_dev_state_free)
+ real_dev->xfrmdev_ops->xdo_dev_state_free(real_dev,
+ ipsec->xs);
}
mutex_unlock(&bond->ipsec_lock);
}
-static void bond_ipsec_free_sa(struct xfrm_state *xs)
+static void bond_ipsec_free_sa(struct net_device *bond_dev,
+ struct xfrm_state *xs)
{
- struct net_device *bond_dev = xs->xso.dev;
struct net_device *real_dev;
- netdevice_tracker tracker;
+ struct bond_ipsec *ipsec;
struct bonding *bond;
- struct slave *slave;
if (!bond_dev)
return;
- rcu_read_lock();
bond = netdev_priv(bond_dev);
- slave = rcu_dereference(bond->curr_active_slave);
- real_dev = slave ? slave->dev : NULL;
- netdev_hold(real_dev, &tracker, GFP_ATOMIC);
- rcu_read_unlock();
-
- if (!slave)
- goto out;
+ mutex_lock(&bond->ipsec_lock);
if (!xs->xso.real_dev)
goto out;
- WARN_ON(xs->xso.real_dev != real_dev);
+ real_dev = xs->xso.real_dev;
- if (real_dev && real_dev->xfrmdev_ops &&
+ xs->xso.real_dev = NULL;
+ if (real_dev->xfrmdev_ops &&
real_dev->xfrmdev_ops->xdo_dev_state_free)
- real_dev->xfrmdev_ops->xdo_dev_state_free(xs);
+ real_dev->xfrmdev_ops->xdo_dev_state_free(real_dev, xs);
out:
- netdev_put(real_dev, &tracker);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+ if (ipsec->xs == xs) {
+ list_del(&ipsec->list);
+ kfree(ipsec);
+ break;
+ }
+ }
+ mutex_unlock(&bond->ipsec_lock);
}
/**
@@ -1115,8 +1112,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
ss.ss_family = bond->dev->type;
}
- rv = dev_set_mac_address(new_active->dev,
- (struct sockaddr *)&ss, NULL);
+ rv = dev_set_mac_address(new_active->dev, &ss, NULL);
if (rv) {
slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
-rv);
@@ -1130,8 +1126,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
new_active->dev->addr_len);
ss.ss_family = old_active->dev->type;
- rv = dev_set_mac_address(old_active->dev,
- (struct sockaddr *)&ss, NULL);
+ rv = dev_set_mac_address(old_active->dev, &ss, NULL);
if (rv)
slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
-rv);
@@ -2118,15 +2113,26 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
* set the master's mac address to that of the first slave
*/
memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
- ss.ss_family = slave_dev->type;
- res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
- extack);
- if (res) {
- slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
- goto err_restore_mtu;
- }
+ } else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW &&
+ BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
+ memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) {
+ /* Set slave to random address to avoid duplicate mac
+ * address in later fail over.
+ */
+ eth_random_addr(ss.__data);
+ } else {
+ goto skip_mac_set;
+ }
+
+ ss.ss_family = slave_dev->type;
+ res = dev_set_mac_address(slave_dev, &ss, extack);
+ if (res) {
+ slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
+ goto err_restore_mtu;
}
+skip_mac_set:
+
/* set no_addrconf flag before open to prevent IPv6 addrconf */
slave_dev->priv_flags |= IFF_NO_ADDRCONF;
@@ -2447,7 +2453,7 @@ err_restore_mac:
bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
new_slave->dev->addr_len);
ss.ss_family = slave_dev->type;
- dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
+ dev_set_mac_address(slave_dev, &ss, NULL);
}
err_restore_mtu:
@@ -2641,7 +2647,7 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
slave->dev->addr_len);
ss.ss_family = slave_dev->type;
- dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
+ dev_set_mac_address(slave_dev, &ss, NULL);
}
if (unregister) {
@@ -4928,8 +4934,7 @@ unwind:
if (rollback_slave == slave)
break;
- tmp_res = dev_set_mac_address(rollback_slave->dev,
- (struct sockaddr *)&tmp_ss, NULL);
+ tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_ss, NULL);
if (tmp_res) {
slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
__func__, tmp_res);
@@ -6563,7 +6568,7 @@ static int __net_init bond_net_init(struct net *net)
/* According to commit 69b0216ac255 ("bonding: fix bonding_masters
* race condition in bond unloading") we need to remove sysfs files
- * before we remove our devices (done later in bond_net_exit_batch_rtnl())
+ * before we remove our devices (done later in bond_net_exit_rtnl())
*/
static void __net_exit bond_net_pre_exit(struct net *net)
{
@@ -6572,25 +6577,20 @@ static void __net_exit bond_net_pre_exit(struct net *net)
bond_destroy_sysfs(bn);
}
-static void __net_exit bond_net_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_kill_list)
+static void __net_exit bond_net_exit_rtnl(struct net *net,
+ struct list_head *dev_kill_list)
{
- struct bond_net *bn;
- struct net *net;
+ struct bond_net *bn = net_generic(net, bond_net_id);
+ struct bonding *bond, *tmp_bond;
/* Kill off any bonds created after unregistering bond rtnl ops */
- list_for_each_entry(net, net_list, exit_list) {
- struct bonding *bond, *tmp_bond;
-
- bn = net_generic(net, bond_net_id);
- list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
- unregister_netdevice_queue(bond->dev, dev_kill_list);
- }
+ list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
+ unregister_netdevice_queue(bond->dev, dev_kill_list);
}
/* According to commit 23fa5c2caae0 ("bonding: destroy proc directory
* only after all bonds are gone") bond_destroy_proc_dir() is called
- * after bond_net_exit_batch_rtnl() has completed.
+ * after bond_net_exit_rtnl() has completed.
*/
static void __net_exit bond_net_exit_batch(struct list_head *net_list)
{
@@ -6606,7 +6606,7 @@ static void __net_exit bond_net_exit_batch(struct list_head *net_list)
static struct pernet_operations bond_net_ops = {
.init = bond_net_init,
.pre_exit = bond_net_pre_exit,
- .exit_batch_rtnl = bond_net_exit_batch_rtnl,
+ .exit_rtnl = bond_net_exit_rtnl,
.exit_batch = bond_net_exit_batch,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index f65c1a1e05cc..bf6398772960 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -275,7 +275,7 @@ static int ctucan_set_bittiming(struct net_device *ndev)
static int ctucan_set_data_bittiming(struct net_device *ndev)
{
struct ctucan_priv *priv = netdev_priv(ndev);
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
/* Note that dbt may be modified here */
return ctucan_set_btr(ndev, dbt, false);
@@ -290,7 +290,7 @@ static int ctucan_set_data_bittiming(struct net_device *ndev)
static int ctucan_set_secondary_sample_point(struct net_device *ndev)
{
struct ctucan_priv *priv = netdev_priv(ndev);
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
int ssp_offset = 0;
u32 ssp_cfg = 0; /* No SSP by default */
@@ -1358,12 +1358,12 @@ int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigne
priv->ntxbufs = ntxbufs;
priv->dev = dev;
priv->can.bittiming_const = &ctu_can_fd_bit_timing_max;
- priv->can.data_bittiming_const = &ctu_can_fd_bit_timing_data_max;
+ priv->can.fd.data_bittiming_const = &ctu_can_fd_bit_timing_data_max;
priv->can.do_set_mode = ctucan_do_set_mode;
/* Needed for timing adjustment to be performed as soon as possible */
priv->can.do_set_bittiming = ctucan_set_bittiming;
- priv->can.do_set_data_bittiming = ctucan_set_data_bittiming;
+ priv->can.fd.do_set_data_bittiming = ctucan_set_data_bittiming;
priv->can.do_get_berr_counter = ctucan_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 5ec3170b896a..ea8c807af4d8 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -404,8 +404,8 @@ int open_candev(struct net_device *dev)
/* For CAN FD the data bitrate has to be >= the arbitration bitrate */
if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
- (!priv->data_bittiming.bitrate ||
- priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
+ (!priv->fd.data_bittiming.bitrate ||
+ priv->fd.data_bittiming.bitrate < priv->bittiming.bitrate)) {
netdev_err(dev, "incorrect/missing data bit-timing\n");
return -EINVAL;
}
@@ -543,16 +543,16 @@ int register_candev(struct net_device *dev)
if (!priv->bitrate_const != !priv->bitrate_const_cnt)
return -EINVAL;
- if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
+ if (!priv->fd.data_bitrate_const != !priv->fd.data_bitrate_const_cnt)
return -EINVAL;
/* We only support either fixed bit rates or bit timing const. */
- if ((priv->bitrate_const || priv->data_bitrate_const) &&
- (priv->bittiming_const || priv->data_bittiming_const))
+ if ((priv->bitrate_const || priv->fd.data_bitrate_const) &&
+ (priv->bittiming_const || priv->fd.data_bittiming_const))
return -EINVAL;
if (!can_bittiming_const_valid(priv->bittiming_const) ||
- !can_bittiming_const_valid(priv->data_bittiming_const))
+ !can_bittiming_const_valid(priv->fd.data_bittiming_const))
return -EINVAL;
if (!priv->termination_const) {
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index f1db9b7ffd4d..a36842ace084 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -141,7 +141,7 @@ static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
{
struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
struct can_tdc tdc = { 0 };
- const struct can_tdc_const *tdc_const = priv->tdc_const;
+ const struct can_tdc_const *tdc_const = priv->fd.tdc_const;
int err;
if (!tdc_const || !can_tdc_is_enabled(priv))
@@ -179,7 +179,7 @@ static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
tdc.tdcf = tdcf;
}
- priv->tdc = tdc;
+ priv->fd.tdc = tdc;
return 0;
}
@@ -228,10 +228,10 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
dev->mtu = CANFD_MTU;
} else {
dev->mtu = CAN_MTU;
- memset(&priv->data_bittiming, 0,
- sizeof(priv->data_bittiming));
+ memset(&priv->fd.data_bittiming, 0,
+ sizeof(priv->fd.data_bittiming));
priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
- memset(&priv->tdc, 0, sizeof(priv->tdc));
+ memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
}
tdc_mask = cm->mask & CAN_CTRLMODE_TDC_MASK;
@@ -312,16 +312,16 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
* directly via do_set_bitrate(). Bail out if neither
* is given.
*/
- if (!priv->data_bittiming_const && !priv->do_set_data_bittiming &&
- !priv->data_bitrate_const)
+ if (!priv->fd.data_bittiming_const && !priv->fd.do_set_data_bittiming &&
+ !priv->fd.data_bitrate_const)
return -EOPNOTSUPP;
memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
sizeof(dbt));
err = can_get_bittiming(dev, &dbt,
- priv->data_bittiming_const,
- priv->data_bitrate_const,
- priv->data_bitrate_const_cnt,
+ priv->fd.data_bittiming_const,
+ priv->fd.data_bitrate_const,
+ priv->fd.data_bitrate_const_cnt,
extack);
if (err)
return err;
@@ -333,7 +333,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
return -EINVAL;
}
- memset(&priv->tdc, 0, sizeof(priv->tdc));
+ memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
if (data[IFLA_CAN_TDC]) {
/* TDC parameters are provided: use them */
err = can_tdc_changelink(priv, data[IFLA_CAN_TDC],
@@ -346,17 +346,17 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
/* Neither of TDC parameters nor TDC flags are
* provided: do calculation
*/
- can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt,
+ can_calc_tdco(&priv->fd.tdc, priv->fd.tdc_const, &dbt,
&priv->ctrlmode, priv->ctrlmode_supported);
} /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
* turned off. TDC is disabled: do nothing
*/
- memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
+ memcpy(&priv->fd.data_bittiming, &dbt, sizeof(dbt));
- if (priv->do_set_data_bittiming) {
+ if (priv->fd.do_set_data_bittiming) {
/* Finally, set the bit-timing registers */
- err = priv->do_set_data_bittiming(dev);
+ err = priv->fd.do_set_data_bittiming(dev);
if (err)
return err;
}
@@ -394,7 +394,7 @@ static size_t can_tdc_get_size(const struct net_device *dev)
struct can_priv *priv = netdev_priv(dev);
size_t size;
- if (!priv->tdc_const)
+ if (!priv->fd.tdc_const)
return 0;
size = nla_total_size(0); /* nest IFLA_CAN_TDC */
@@ -404,17 +404,17 @@ static size_t can_tdc_get_size(const struct net_device *dev)
}
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MAX */
- if (priv->tdc_const->tdcf_max) {
+ if (priv->fd.tdc_const->tdcf_max) {
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */
}
if (can_tdc_is_enabled(priv)) {
if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL ||
- priv->do_get_auto_tdcv)
+ priv->fd.do_get_auto_tdcv)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO */
- if (priv->tdc_const->tdcf_max)
+ if (priv->fd.tdc_const->tdcf_max)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF */
}
@@ -442,9 +442,9 @@ static size_t can_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
size += nla_total_size(sizeof(struct can_berr_counter));
- if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
+ if (priv->fd.data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
size += nla_total_size(sizeof(struct can_bittiming));
- if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
+ if (priv->fd.data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
size += nla_total_size(sizeof(struct can_bittiming_const));
if (priv->termination_const) {
size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
@@ -454,9 +454,9 @@ static size_t can_get_size(const struct net_device *dev)
if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
size += nla_total_size(sizeof(*priv->bitrate_const) *
priv->bitrate_const_cnt);
- if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
- size += nla_total_size(sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt);
+ if (priv->fd.data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
+ size += nla_total_size(sizeof(*priv->fd.data_bitrate_const) *
+ priv->fd.data_bitrate_const_cnt);
size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */
size += can_ctrlmode_ext_get_size(); /* IFLA_CAN_CTRLMODE_EXT */
@@ -468,8 +468,8 @@ static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct nlattr *nest;
struct can_priv *priv = netdev_priv(dev);
- struct can_tdc *tdc = &priv->tdc;
- const struct can_tdc_const *tdc_const = priv->tdc_const;
+ struct can_tdc *tdc = &priv->fd.tdc;
+ const struct can_tdc_const *tdc_const = priv->fd.tdc_const;
if (!tdc_const)
return 0;
@@ -497,8 +497,8 @@ static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL) {
tdcv = tdc->tdcv;
err = 0;
- } else if (priv->do_get_auto_tdcv) {
- err = priv->do_get_auto_tdcv(dev, &tdcv);
+ } else if (priv->fd.do_get_auto_tdcv) {
+ err = priv->fd.do_get_auto_tdcv(dev, &tdcv);
}
if (!err && nla_put_u32(skb, IFLA_CAN_TDC_TDCV, tdcv))
goto err_cancel;
@@ -564,14 +564,14 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
!priv->do_get_berr_counter(dev, &bec) &&
nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
- (priv->data_bittiming.bitrate &&
+ (priv->fd.data_bittiming.bitrate &&
nla_put(skb, IFLA_CAN_DATA_BITTIMING,
- sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
+ sizeof(priv->fd.data_bittiming), &priv->fd.data_bittiming)) ||
- (priv->data_bittiming_const &&
+ (priv->fd.data_bittiming_const &&
nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
- sizeof(*priv->data_bittiming_const),
- priv->data_bittiming_const)) ||
+ sizeof(*priv->fd.data_bittiming_const),
+ priv->fd.data_bittiming_const)) ||
(priv->termination_const &&
(nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
@@ -586,11 +586,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
priv->bitrate_const_cnt,
priv->bitrate_const)) ||
- (priv->data_bitrate_const &&
+ (priv->fd.data_bitrate_const &&
nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
- sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt,
- priv->data_bitrate_const)) ||
+ sizeof(*priv->fd.data_bitrate_const) *
+ priv->fd.data_bitrate_const_cnt,
+ priv->fd.data_bitrate_const)) ||
(nla_put(skb, IFLA_CAN_BITRATE_MAX,
sizeof(priv->bitrate_max),
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index 6d80c341b26f..06d5d35fc1b5 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -1226,7 +1226,7 @@ static void flexcan_set_bittiming_cbt(const struct net_device *dev)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct can_bittiming *bt = &priv->can.bittiming;
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_cbt, reg_fdctrl;
@@ -2239,7 +2239,7 @@ static int flexcan_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO;
priv->can.bittiming_const = &flexcan_fd_bittiming_const;
- priv->can.data_bittiming_const =
+ priv->can.fd.data_bittiming_const =
&flexcan_fd_data_bittiming_const;
} else {
priv->can.bittiming_const = &flexcan_bittiming_const;
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index adf3970f070f..c5784d9779ef 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -806,7 +806,7 @@ static irqreturn_t grcan_interrupt(int irq, void *dev_id)
*/
static void grcan_running_reset(struct timer_list *t)
{
- struct grcan_priv *priv = from_timer(priv, t, rr_timer);
+ struct grcan_priv *priv = timer_container_of(priv, t, rr_timer);
struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
@@ -897,7 +897,7 @@ static inline void grcan_reset_timer(struct timer_list *timer, __u32 bitrate)
/* Disable channels and schedule a running reset */
static void grcan_initiate_running_reset(struct timer_list *t)
{
- struct grcan_priv *priv = from_timer(priv, t, hang_timer);
+ struct grcan_priv *priv = timer_container_of(priv, t, hang_timer);
struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index c86b57d47085..2eeee65f606f 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -669,7 +669,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
u16 brp, sjw, tseg1, tseg2, tdc;
/* Configure bit timing */
@@ -1000,10 +1000,10 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
priv->can.clock.freq = readl(addr + IFI_CANFD_CANCLOCK);
- priv->can.bittiming_const = &ifi_canfd_bittiming_const;
- priv->can.data_bittiming_const = &ifi_canfd_bittiming_const;
- priv->can.do_set_mode = ifi_canfd_set_mode;
- priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter;
+ priv->can.bittiming_const = &ifi_canfd_bittiming_const;
+ priv->can.fd.data_bittiming_const = &ifi_canfd_bittiming_const;
+ priv->can.do_set_mode = ifi_canfd_set_mode;
+ priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter;
/* IFI CANFD can do both Bosch FD and ISO FD */
priv->can.ctrlmode = CAN_CTRLMODE_FD;
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index cf0d51805272..09510663988c 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/timer.h>
+#include <net/netdev_queues.h>
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
@@ -410,10 +411,13 @@ struct kvaser_pciefd_can {
void __iomem *reg_base;
struct can_berr_counter bec;
u8 cmd_seq;
+ u8 tx_max_count;
+ u8 tx_idx;
+ u8 ack_idx;
int err_rep_cnt;
- int echo_idx;
+ unsigned int completed_tx_pkts;
+ unsigned int completed_tx_bytes;
spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
- spinlock_t echo_lock; /* Locks the message echo buffer */
struct timer_list bec_poll_timer;
struct completion start_comp, flush_comp;
};
@@ -714,6 +718,9 @@ static int kvaser_pciefd_open(struct net_device *netdev)
int ret;
struct kvaser_pciefd_can *can = netdev_priv(netdev);
+ can->tx_idx = 0;
+ can->ack_idx = 0;
+
ret = open_candev(netdev);
if (ret)
return ret;
@@ -745,21 +752,26 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
timer_delete(&can->bec_poll_timer);
}
can->can.state = CAN_STATE_STOPPED;
+ netdev_reset_queue(netdev);
close_candev(netdev);
return ret;
}
+static unsigned int kvaser_pciefd_tx_avail(const struct kvaser_pciefd_can *can)
+{
+ return can->tx_max_count - (READ_ONCE(can->tx_idx) - READ_ONCE(can->ack_idx));
+}
+
static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
- struct kvaser_pciefd_can *can,
+ struct can_priv *can, u8 seq,
struct sk_buff *skb)
{
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
int packet_size;
- int seq = can->echo_idx;
memset(p, 0, sizeof(*p));
- if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ if (can->ctrlmode & CAN_CTRLMODE_ONE_SHOT)
p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
if (cf->can_id & CAN_RTR_FLAG)
@@ -782,7 +794,7 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
} else {
p->header[1] |=
FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
- can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode));
+ can_get_cc_dlc((struct can_frame *)cf, can->ctrlmode));
}
p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq);
@@ -797,22 +809,24 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct kvaser_pciefd_can *can = netdev_priv(netdev);
- unsigned long irq_flags;
struct kvaser_pciefd_tx_packet packet;
+ unsigned int seq = can->tx_idx & (can->can.echo_skb_max - 1);
+ unsigned int frame_len;
int nr_words;
- u8 count;
if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
+ if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1))
+ return NETDEV_TX_BUSY;
- nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
+ nr_words = kvaser_pciefd_prepare_tx_packet(&packet, &can->can, seq, skb);
- spin_lock_irqsave(&can->echo_lock, irq_flags);
/* Prepare and save echo skb in internal slot */
- can_put_echo_skb(skb, netdev, can->echo_idx, 0);
-
- /* Move echo index to the next slot */
- can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
+ WRITE_ONCE(can->can.echo_skb[seq], NULL);
+ frame_len = can_skb_get_frame_len(skb);
+ can_put_echo_skb(skb, netdev, seq, frame_len);
+ netdev_sent_queue(netdev, frame_len);
+ WRITE_ONCE(can->tx_idx, can->tx_idx + 1);
/* Write header to fifo */
iowrite32(packet.header[0],
@@ -836,14 +850,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
}
- count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
- ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
- /* No room for a new message, stop the queue until at least one
- * successful transmit
- */
- if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx])
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&can->echo_lock, irq_flags);
+ netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1);
return NETDEV_TX_OK;
}
@@ -856,7 +863,7 @@ static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
struct can_bittiming *bt;
if (data)
- bt = &can->can.data_bittiming;
+ bt = &can->can.fd.data_bittiming;
else
bt = &can->can.bittiming;
@@ -930,7 +937,8 @@ static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
{
- struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
+ struct kvaser_pciefd_can *can = timer_container_of(can, data,
+ bec_poll_timer);
kvaser_pciefd_enable_err_gen(can);
kvaser_pciefd_request_status(can);
@@ -959,7 +967,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
u32 status, tx_nr_packets_max;
netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
- KVASER_PCIEFD_CAN_TX_MAX_COUNT);
+ roundup_pow_of_two(KVASER_PCIEFD_CAN_TX_MAX_COUNT));
if (!netdev)
return -ENOMEM;
@@ -970,6 +978,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can->kv_pcie = pcie;
can->cmd_seq = 0;
can->err_rep_cnt = 0;
+ can->completed_tx_pkts = 0;
+ can->completed_tx_bytes = 0;
can->bec.txerr = 0;
can->bec.rxerr = 0;
@@ -983,17 +993,15 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
tx_nr_packets_max =
FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK,
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
+ can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
can->can.clock.freq = pcie->freq;
- can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
- can->echo_idx = 0;
- spin_lock_init(&can->echo_lock);
spin_lock_init(&can->lock);
can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
- can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
+ can->can.fd.data_bittiming_const = &kvaser_pciefd_bittiming_const;
can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
- can->can.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming;
+ can->can.fd.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming;
can->can.do_set_mode = kvaser_pciefd_set_mode;
can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
@@ -1201,7 +1209,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
skb = alloc_canfd_skb(priv->dev, &cf);
if (!skb) {
priv->dev->stats.rx_dropped++;
- return -ENOMEM;
+ return 0;
}
cf->len = can_fd_dlc2len(dlc);
@@ -1213,7 +1221,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
if (!skb) {
priv->dev->stats.rx_dropped++;
- return -ENOMEM;
+ return 0;
}
can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode);
}
@@ -1231,7 +1239,9 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
priv->dev->stats.rx_packets++;
kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
- return netif_rx(skb);
+ netif_rx(skb);
+
+ return 0;
}
static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
@@ -1510,19 +1520,21 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
netdev_dbg(can->can.dev, "Packet was flushed\n");
} else {
int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]);
- int len;
- u8 count;
+ unsigned int len, frame_len = 0;
struct sk_buff *skb;
+ if (echo_idx != (can->ack_idx & (can->can.echo_skb_max - 1)))
+ return 0;
skb = can->can.echo_skb[echo_idx];
- if (skb)
- kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
- len = can_get_echo_skb(can->can.dev, echo_idx, NULL);
- count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
- ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
+ if (!skb)
+ return 0;
+ kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
+ len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len);
- if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev))
- netif_wake_queue(can->can.dev);
+ /* Pairs with barrier in kvaser_pciefd_start_xmit() */
+ smp_store_release(&can->ack_idx, can->ack_idx + 1);
+ can->completed_tx_pkts++;
+ can->completed_tx_bytes += frame_len;
if (!one_shot_fail) {
can->can.dev->stats.tx_bytes += len;
@@ -1638,32 +1650,51 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
{
int pos = 0;
int res = 0;
+ unsigned int i;
do {
res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
} while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
+ /* Report ACKs in this buffer to BQL en masse for correct periods */
+ for (i = 0; i < pcie->nr_channels; ++i) {
+ struct kvaser_pciefd_can *can = pcie->can[i];
+
+ if (!can->completed_tx_pkts)
+ continue;
+ netif_subqueue_completed_wake(can->can.dev, 0,
+ can->completed_tx_pkts,
+ can->completed_tx_bytes,
+ kvaser_pciefd_tx_avail(can), 1);
+ can->completed_tx_pkts = 0;
+ can->completed_tx_bytes = 0;
+ }
+
return res;
}
-static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
{
+ void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG;
u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
+ iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
kvaser_pciefd_read_buffer(pcie, 0);
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */
+ }
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
kvaser_pciefd_read_buffer(pcie, 1);
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */
+ }
if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
irq & KVASER_PCIEFD_SRB_IRQ_DUF1))
dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
-
- iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
- return irq;
}
static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
@@ -1691,29 +1722,22 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
- u32 srb_irq = 0;
- u32 srb_release = 0;
int i;
if (!(pci_irq & irq_mask->all))
return IRQ_NONE;
+ iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+
if (pci_irq & irq_mask->kcan_rx0)
- srb_irq = kvaser_pciefd_receive_irq(pcie);
+ kvaser_pciefd_receive_irq(pcie);
for (i = 0; i < pcie->nr_channels; i++) {
if (pci_irq & irq_mask->kcan_tx[i])
kvaser_pciefd_transmit_irq(pcie->can[i]);
}
- if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
- srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0;
-
- if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
- srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1;
-
- if (srb_release)
- iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+ iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
return IRQ_HANDLED;
}
@@ -1733,13 +1757,22 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
}
}
+static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie)
+{
+ unsigned int i;
+
+ /* Masking PCI_IRQ is insufficient as running ISR will unmask it */
+ iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
+ for (i = 0; i < pcie->nr_channels; ++i)
+ iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+}
+
static int kvaser_pciefd_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
struct kvaser_pciefd *pcie;
const struct kvaser_pciefd_irq_mask *irq_mask;
- void __iomem *irq_en_base;
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -1805,8 +1838,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
/* Enable PCI interrupts */
- irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie);
- iowrite32(irq_mask->all, irq_en_base);
+ iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
/* Ready the DMA buffers */
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
@@ -1820,8 +1852,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
return 0;
err_free_irq:
- /* Disable PCI interrupts */
- iowrite32(0, irq_en_base);
+ kvaser_pciefd_disable_irq_srcs(pcie);
free_irq(pcie->pci->irq, pcie);
err_pci_free_irq_vectors:
@@ -1844,35 +1875,26 @@ err_disable_pci:
return ret;
}
-static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
-{
- int i;
-
- for (i = 0; i < pcie->nr_channels; i++) {
- struct kvaser_pciefd_can *can = pcie->can[i];
-
- if (can) {
- iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
- unregister_candev(can->can.dev);
- timer_delete(&can->bec_poll_timer);
- kvaser_pciefd_pwm_stop(can);
- free_candev(can->can.dev);
- }
- }
-}
-
static void kvaser_pciefd_remove(struct pci_dev *pdev)
{
struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
+ unsigned int i;
- kvaser_pciefd_remove_all_ctrls(pcie);
+ for (i = 0; i < pcie->nr_channels; ++i) {
+ struct kvaser_pciefd_can *can = pcie->can[i];
- /* Disable interrupts */
- iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
- iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+ unregister_candev(can->can.dev);
+ timer_delete(&can->bec_poll_timer);
+ kvaser_pciefd_pwm_stop(can);
+ }
+ kvaser_pciefd_disable_irq_srcs(pcie);
free_irq(pcie->pci->irq, pcie);
pci_free_irq_vectors(pcie->pci);
+
+ for (i = 0; i < pcie->nr_channels; ++i)
+ free_candev(pcie->can[i]->can.dev);
+
pci_iounmap(pdev, pcie->reg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index c2c116ce1087..6c656bfdb323 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1372,7 +1372,7 @@ static int m_can_set_bittiming(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
const struct can_bittiming *bt = &cdev->can.bittiming;
- const struct can_bittiming *dbt = &cdev->can.data_bittiming;
+ const struct can_bittiming *dbt = &cdev->can.fd.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
u32 reg_btp;
@@ -1738,7 +1738,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
if (err)
return err;
cdev->can.bittiming_const = &m_can_bittiming_const_30X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
@@ -1746,13 +1746,13 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
if (err)
return err;
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X;
break;
case 32:
case 33:
/* Support both MCAN version v3.2.x and v3.3.0 */
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X;
niso = m_can_niso_supported(cdev);
if (niso < 0)
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index e5c162f8c589..8edaa339d590 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -411,10 +411,11 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv = cdev_to_priv(mcan_class);
priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
- if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto out_m_can_class_free_dev;
- } else {
+ if (IS_ERR(priv->power)) {
+ if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_m_can_class_free_dev;
+ }
priv->power = NULL;
}
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 28f3fd805273..77292afaed22 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -624,7 +624,7 @@ static int peak_canfd_set_data_bittiming(struct net_device *ndev)
{
struct peak_canfd_priv *priv = netdev_priv(ndev);
- return pucan_set_timing_fast(priv, &priv->can.data_bittiming);
+ return pucan_set_timing_fast(priv, &priv->can.fd.data_bittiming);
}
static int peak_canfd_close(struct net_device *ndev)
@@ -813,12 +813,12 @@ struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
/* complete now socket-can initialization side */
priv->can.state = CAN_STATE_STOPPED;
priv->can.bittiming_const = &peak_canfd_nominal_const;
- priv->can.data_bittiming_const = &peak_canfd_data_const;
+ priv->can.fd.data_bittiming_const = &peak_canfd_data_const;
priv->can.do_set_mode = peak_canfd_set_mode;
priv->can.do_get_berr_counter = peak_canfd_get_berr_counter;
priv->can.do_set_bittiming = peak_canfd_set_bittiming;
- priv->can.do_set_data_bittiming = peak_canfd_set_data_bittiming;
+ priv->can.fd.do_set_data_bittiming = peak_canfd_set_data_bittiming;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_3_SAMPLES |
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index aa3df0d05b85..7f10213738e5 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -21,6 +21,7 @@
* wherever it is modified to a readable name.
*/
+#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/can/dev.h>
@@ -74,33 +75,24 @@
#define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3))
/* RSCFDnCFDGERFL / RSCFDnGERFL */
-#define RCANFD_GERFL_EEF0_7 GENMASK(23, 16)
-#define RCANFD_GERFL_EEF(ch) BIT(16 + (ch))
+#define RCANFD_GERFL_EEF GENMASK(23, 16)
#define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */
#define RCANFD_GERFL_THLES BIT(2)
#define RCANFD_GERFL_MES BIT(1)
#define RCANFD_GERFL_DEF BIT(0)
#define RCANFD_GERFL_ERR(gpriv, x) \
- ((x) & (reg_gen4(gpriv, RCANFD_GERFL_EEF0_7, \
- RCANFD_GERFL_EEF(0) | RCANFD_GERFL_EEF(1)) | \
- RCANFD_GERFL_MES | \
- ((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0)))
+({\
+ typeof(gpriv) (_gpriv) = (gpriv); \
+ ((x) & ((FIELD_PREP(RCANFD_GERFL_EEF, (_gpriv)->channels_mask)) | \
+ RCANFD_GERFL_MES | ((_gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0))); \
+})
/* AFL Rx rules registers */
-/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
-#define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \
- (((x) & reg_gen4(gpriv, 0x1ff, 0xff)) << \
- (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8)))
-
-#define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \
- (((x) >> (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8))) & \
- reg_gen4(gpriv, 0x1ff, 0xff))
-
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR_AFLDAE BIT(8)
-#define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_gen4(gpriv, 0x7f, 0x1f))
+#define RCANFD_GAFLECTR_AFLPN(gpriv, page_num) ((page_num) & (gpriv)->info->max_aflpn)
/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
#define RCANFD_GAFLID_GAFLLB BIT(29)
@@ -118,13 +110,13 @@
/* RSCFDnCFDCmNCFG - CAN FD only */
#define RCANFD_NCFG_NTSEG2(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 25, 24))
+ (((x) & ((gpriv)->info->nom_bittiming->tseg2_max - 1)) << (gpriv)->info->sh->ntseg2)
#define RCANFD_NCFG_NTSEG1(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0xff, 0x7f)) << reg_gen4(gpriv, 17, 16))
+ (((x) & ((gpriv)->info->nom_bittiming->tseg1_max - 1)) << (gpriv)->info->sh->ntseg1)
#define RCANFD_NCFG_NSJW(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 10, 11))
+ (((x) & ((gpriv)->info->nom_bittiming->sjw_max - 1)) << (gpriv)->info->sh->nsjw)
#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
@@ -186,13 +178,13 @@
#define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */
/* RSCFDnCFDCmDCFG */
-#define RCANFD_DCFG_DSJW(gpriv, x) (((x) & reg_gen4(gpriv, 0xf, 0x7)) << 24)
+#define RCANFD_DCFG_DSJW(gpriv, x) (((x) & ((gpriv)->info->data_bittiming->sjw_max - 1)) << 24)
#define RCANFD_DCFG_DTSEG2(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x0f, 0x7)) << reg_gen4(gpriv, 16, 20))
+ (((x) & ((gpriv)->info->data_bittiming->tseg2_max - 1)) << (gpriv)->info->sh->dtseg2)
#define RCANFD_DCFG_DTSEG1(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 8, 16))
+ (((x) & ((gpriv)->info->data_bittiming->tseg1_max - 1)) << (gpriv)->info->sh->dtseg1)
#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
@@ -233,11 +225,14 @@
/* Common FIFO bits */
/* RSCFDnCFDCFCCk */
-#define RCANFD_CFCC_CFTML(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 16, 20))
-#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_gen4(gpriv, 8, 16))
+#define RCANFD_CFCC_CFTML(gpriv, cftml) \
+({\
+ typeof(gpriv) (_gpriv) = (gpriv); \
+ (((cftml) & (_gpriv)->info->max_cftml) << (_gpriv)->info->sh->cftml); \
+})
+#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << (gpriv)->info->sh->cfm)
#define RCANFD_CFCC_CFIM BIT(12)
-#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_gen4(gpriv, 21, 8))
+#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << (gpriv)->info->sh->cfdc)
#define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4)
#define RCANFD_CFCC_CFTXIE BIT(2)
#define RCANFD_CFCC_CFE BIT(0)
@@ -298,14 +293,14 @@
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR (0x0098)
/* RSCFDnCFDGAFLCFG / RSCFDnGAFLCFG */
-#define RCANFD_GAFLCFG(ch) (0x009c + (0x04 * ((ch) / 2)))
+#define RCANFD_GAFLCFG(w) (0x009c + (0x04 * (w)))
/* RSCFDnCFDRMNB / RSCFDnRMNB */
#define RCANFD_RMNB (0x00a4)
/* RSCFDnCFDRMND / RSCFDnRMND */
#define RCANFD_RMND(y) (0x00a8 + (0x04 * (y)))
/* RSCFDnCFDRFCCx / RSCFDnRFCCx */
-#define RCANFD_RFCC(gpriv, x) (reg_gen4(gpriv, 0x00c0, 0x00b8) + (0x04 * (x)))
+#define RCANFD_RFCC(gpriv, x) ((gpriv)->info->regs->rfcc + (0x04 * (x)))
/* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */
#define RCANFD_RFSTS(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x20)
/* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */
@@ -315,13 +310,13 @@
/* RSCFDnCFDCFCCx / RSCFDnCFCCx */
#define RCANFD_CFCC(gpriv, ch, idx) \
- (reg_gen4(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx)))
+ ((gpriv)->info->regs->cfcc + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */
#define RCANFD_CFSTS(gpriv, ch, idx) \
- (reg_gen4(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx)))
+ ((gpriv)->info->regs->cfsts + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */
#define RCANFD_CFPCTR(gpriv, ch, idx) \
- (reg_gen4(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx)))
+ ((gpriv)->info->regs->cfpctr + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDFESTS / RSCFDnFESTS */
#define RCANFD_FESTS (0x0238)
@@ -437,7 +432,7 @@
/* CAN FD mode specific register map */
/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
-#define RCANFD_F_DCFG(gpriv, m) (reg_gen4(gpriv, 0x1400, 0x0500) + (0x20 * (m)))
+#define RCANFD_F_DCFG(gpriv, m) ((gpriv)->info->regs->f_dcfg + (0x20 * (m)))
#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m)))
#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m)))
#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m)))
@@ -453,7 +448,7 @@
#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q)))
/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
-#define RCANFD_F_RFOFFSET(gpriv) reg_gen4(gpriv, 0x6000, 0x3000)
+#define RCANFD_F_RFOFFSET(gpriv) ((gpriv)->info->regs->rfoffset)
#define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x)))
#define RCANFD_F_RFPTR(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x)))
#define RCANFD_F_RFFDSTS(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x)))
@@ -461,7 +456,7 @@
(RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df)))
/* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */
-#define RCANFD_F_CFOFFSET(gpriv) reg_gen4(gpriv, 0x6400, 0x3400)
+#define RCANFD_F_CFOFFSET(gpriv) ((gpriv)->info->regs->cfoffset)
#define RCANFD_F_CFID(gpriv, ch, idx) \
(RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx)))
@@ -510,12 +505,43 @@
struct rcar_canfd_global;
+struct rcar_canfd_regs {
+ u16 rfcc; /* RX FIFO Configuration/Control Register */
+ u16 cfcc; /* Common FIFO Configuration/Control Register */
+ u16 cfsts; /* Common FIFO Status Register */
+ u16 cfpctr; /* Common FIFO Pointer Control Register */
+ u16 f_dcfg; /* Global FD Configuration Register */
+ u16 rfoffset; /* Receive FIFO buffer access ID register */
+ u16 cfoffset; /* Transmit/receive FIFO buffer access ID register */
+};
+
+struct rcar_canfd_shift_data {
+ u8 ntseg2; /* Nominal Bit Rate Time Segment 2 Control */
+ u8 ntseg1; /* Nominal Bit Rate Time Segment 1 Control */
+ u8 nsjw; /* Nominal Bit Rate Resynchronization Jump Width Control */
+ u8 dtseg2; /* Data Bit Rate Time Segment 2 Control */
+ u8 dtseg1; /* Data Bit Rate Time Segment 1 Control */
+ u8 cftml; /* Common FIFO TX Message Buffer Link */
+ u8 cfm; /* Common FIFO Mode */
+ u8 cfdc; /* Common FIFO Depth Configuration */
+};
+
struct rcar_canfd_hw_info {
+ const struct can_bittiming_const *nom_bittiming;
+ const struct can_bittiming_const *data_bittiming;
+ const struct rcar_canfd_regs *regs;
+ const struct rcar_canfd_shift_data *sh;
+ u8 rnc_field_width;
+ u8 max_aflpn;
+ u8 max_cftml;
u8 max_channels;
u8 postdiv;
/* hardware features */
unsigned shared_global_irqs:1; /* Has shared global irqs */
unsigned multi_channel_irqs:1; /* Has multiple channel irqs */
+ unsigned ch_interface_mode:1; /* Has channel interface mode */
+ unsigned shared_can_regs:1; /* Has shared classical can registers */
+ unsigned external_clk:1; /* Has external clock */
};
/* Channel priv data */
@@ -548,7 +574,7 @@ struct rcar_canfd_global {
};
/* CAN FD mode nominal rate constants */
-static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = {
+static const struct can_bittiming_const rcar_canfd_gen3_nom_bittiming_const = {
.name = RCANFD_DRV_NAME,
.tseg1_min = 2,
.tseg1_max = 128,
@@ -560,8 +586,20 @@ static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const rcar_canfd_gen4_nom_bittiming_const = {
+ .name = RCANFD_DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
/* CAN FD mode data rate constants */
-static const struct can_bittiming_const rcar_canfd_data_bittiming_const = {
+static const struct can_bittiming_const rcar_canfd_gen3_data_bittiming_const = {
.name = RCANFD_DRV_NAME,
.tseg1_min = 2,
.tseg1_max = 16,
@@ -573,6 +611,18 @@ static const struct can_bittiming_const rcar_canfd_data_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const rcar_canfd_gen4_data_bittiming_const = {
+ .name = RCANFD_DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 32,
+ .tseg2_min = 2,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
/* Classical CAN mode bitrate constants */
static const struct can_bittiming_const rcar_canfd_bittiming_const = {
.name = RCANFD_DRV_NAME,
@@ -586,36 +636,113 @@ static const struct can_bittiming_const rcar_canfd_bittiming_const = {
.brp_inc = 1,
};
+static const struct rcar_canfd_regs rcar_gen3_regs = {
+ .rfcc = 0x00b8,
+ .cfcc = 0x0118,
+ .cfsts = 0x0178,
+ .cfpctr = 0x01d8,
+ .f_dcfg = 0x0500,
+ .rfoffset = 0x3000,
+ .cfoffset = 0x3400,
+};
+
+static const struct rcar_canfd_regs rcar_gen4_regs = {
+ .rfcc = 0x00c0,
+ .cfcc = 0x0120,
+ .cfsts = 0x01e0,
+ .cfpctr = 0x0240,
+ .f_dcfg = 0x1400,
+ .rfoffset = 0x6000,
+ .cfoffset = 0x6400,
+};
+
+static const struct rcar_canfd_shift_data rcar_gen3_shift_data = {
+ .ntseg2 = 24,
+ .ntseg1 = 16,
+ .nsjw = 11,
+ .dtseg2 = 20,
+ .dtseg1 = 16,
+ .cftml = 20,
+ .cfm = 16,
+ .cfdc = 8,
+};
+
+static const struct rcar_canfd_shift_data rcar_gen4_shift_data = {
+ .ntseg2 = 25,
+ .ntseg1 = 17,
+ .nsjw = 10,
+ .dtseg2 = 16,
+ .dtseg1 = 8,
+ .cftml = 16,
+ .cfm = 8,
+ .cfdc = 21,
+};
+
static const struct rcar_canfd_hw_info rcar_gen3_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen3_data_bittiming_const,
+ .regs = &rcar_gen3_regs,
+ .sh = &rcar_gen3_shift_data,
+ .rnc_field_width = 8,
+ .max_aflpn = 31,
+ .max_cftml = 15,
.max_channels = 2,
.postdiv = 2,
.shared_global_irqs = 1,
+ .ch_interface_mode = 0,
+ .shared_can_regs = 0,
+ .external_clk = 1,
};
static const struct rcar_canfd_hw_info rcar_gen4_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen4_data_bittiming_const,
+ .regs = &rcar_gen4_regs,
+ .sh = &rcar_gen4_shift_data,
+ .rnc_field_width = 16,
+ .max_aflpn = 127,
+ .max_cftml = 31,
.max_channels = 8,
.postdiv = 2,
.shared_global_irqs = 1,
+ .ch_interface_mode = 1,
+ .shared_can_regs = 1,
+ .external_clk = 1,
};
static const struct rcar_canfd_hw_info rzg2l_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen3_data_bittiming_const,
+ .regs = &rcar_gen3_regs,
+ .sh = &rcar_gen3_shift_data,
+ .rnc_field_width = 8,
+ .max_aflpn = 31,
+ .max_cftml = 15,
.max_channels = 2,
.postdiv = 1,
.multi_channel_irqs = 1,
+ .ch_interface_mode = 0,
+ .shared_can_regs = 0,
+ .external_clk = 1,
};
-/* Helper functions */
-static inline bool is_gen4(struct rcar_canfd_global *gpriv)
-{
- return gpriv->info == &rcar_gen4_hw_info;
-}
-
-static inline u32 reg_gen4(struct rcar_canfd_global *gpriv,
- u32 gen4, u32 not_gen4)
-{
- return is_gen4(gpriv) ? gen4 : not_gen4;
-}
+static const struct rcar_canfd_hw_info r9a09g047_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen4_data_bittiming_const,
+ .regs = &rcar_gen4_regs,
+ .sh = &rcar_gen4_shift_data,
+ .rnc_field_width = 16,
+ .max_aflpn = 63,
+ .max_cftml = 31,
+ .max_channels = 6,
+ .postdiv = 1,
+ .multi_channel_irqs = 1,
+ .ch_interface_mode = 1,
+ .shared_can_regs = 1,
+ .external_clk = 0,
+};
+/* Helper functions */
static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
{
u32 data = readl(reg);
@@ -681,9 +808,20 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
can_free_echo_skb(ndev, i, NULL);
}
+static void rcar_canfd_setrnc(struct rcar_canfd_global *gpriv, unsigned int ch,
+ unsigned int num_rules)
+{
+ unsigned int rnc_stride = 32 / gpriv->info->rnc_field_width;
+ unsigned int shift = 32 - (ch % rnc_stride + 1) * gpriv->info->rnc_field_width;
+ unsigned int w = ch / rnc_stride;
+ u32 rnc = num_rules << shift;
+
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(w), rnc);
+}
+
static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
{
- if (is_gen4(gpriv)) {
+ if (gpriv->info->ch_interface_mode) {
u32 ch, val = gpriv->fdmode ? RCANFD_GEN4_FDCFG_FDOE
: RCANFD_GEN4_FDCFG_CLOE;
@@ -789,7 +927,7 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
u32 ch, u32 rule_entry)
{
- int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ unsigned int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES;
u32 rule_entry_index = rule_entry % 16;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
@@ -800,9 +938,8 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
RCANFD_GAFLECTR_AFLDAE));
/* Write number of rules for channel */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(ch),
- RCANFD_GAFLCFG_SETRNC(gpriv, ch, num_rules));
- if (is_gen4(gpriv))
+ rcar_canfd_setrnc(gpriv, ch, num_rules);
+ if (gpriv->info->shared_can_regs)
offset = RCANFD_GEN4_GAFL_OFFSET;
else if (gpriv->fdmode)
offset = RCANFD_F_GAFL_OFFSET;
@@ -942,7 +1079,7 @@ static void rcar_canfd_global_error(struct net_device *ndev)
u32 ridx = ch + RCANFD_RFFIFO_IDX;
gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL);
- if (gerfl & RCANFD_GERFL_EEF(ch)) {
+ if (gerfl & FIELD_PREP(RCANFD_GERFL_EEF, BIT(ch))) {
netdev_dbg(ndev, "Ch%u: ECC Error flag\n", ch);
stats->tx_dropped++;
}
@@ -1304,7 +1441,7 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
struct rcar_canfd_channel *priv = netdev_priv(dev);
struct rcar_canfd_global *gpriv = priv->gpriv;
const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
u32 cfg;
u32 ch = priv->channel;
@@ -1338,7 +1475,7 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
brp, sjw, tseg1, tseg2);
} else {
/* Classical CAN only mode */
- if (is_gen4(gpriv)) {
+ if (gpriv->info->shared_can_regs) {
cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) |
RCANFD_NCFG_NBRP(brp) |
RCANFD_NCFG_NSJW(gpriv, sjw) |
@@ -1503,7 +1640,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len));
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
rcar_canfd_write(priv->base,
RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id);
rcar_canfd_write(priv->base,
@@ -1562,7 +1699,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx));
dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx));
@@ -1613,7 +1750,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
if (id & RCANFD_RFID_RFRTR)
cf->can_id |= CAN_RTR_FLAG;
- else if (is_gen4(gpriv))
+ else if (gpriv->info->shared_can_regs)
rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
else
rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0));
@@ -1736,16 +1873,19 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
if (info->multi_channel_irqs) {
char *irq_name;
+ char name[10];
int err_irq;
int tx_irq;
- err_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_err" : "ch1_err");
+ scnprintf(name, sizeof(name), "ch%u_err", ch);
+ err_irq = platform_get_irq_byname(pdev, name);
if (err_irq < 0) {
err = err_irq;
goto fail;
}
- tx_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_trx" : "ch1_trx");
+ scnprintf(name, sizeof(name), "ch%u_trx", ch);
+ tx_irq = platform_get_irq_byname(pdev, name);
if (tx_irq < 0) {
err = tx_irq;
goto fail;
@@ -1782,9 +1922,8 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
}
if (gpriv->fdmode) {
- priv->can.bittiming_const = &rcar_canfd_nom_bittiming_const;
- priv->can.data_bittiming_const =
- &rcar_canfd_data_bittiming_const;
+ priv->can.bittiming_const = gpriv->info->nom_bittiming;
+ priv->can.fd.data_bittiming_const = gpriv->info->data_bittiming;
/* Controller starts in CAN FD only mode */
err = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
@@ -1846,6 +1985,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
u32 rule_entry = 0;
bool fdmode = true; /* CAN FD only mode - default */
char name[9] = "channelX";
+ struct clk *clk_ram;
int i;
info = of_device_get_match_data(dev);
@@ -1855,13 +1995,13 @@ static int rcar_canfd_probe(struct platform_device *pdev)
for (i = 0; i < info->max_channels; ++i) {
name[7] = '0' + i;
- of_child = of_get_child_by_name(dev->of_node, name);
- if (of_child && of_device_is_available(of_child)) {
+ of_child = of_get_available_child_by_name(dev->of_node, name);
+ if (of_child) {
channels_mask |= BIT(i);
transceivers[i] = devm_of_phy_optional_get(dev,
of_child, NULL);
+ of_node_put(of_child);
}
- of_node_put(of_child);
if (IS_ERR(transceivers[i]))
return PTR_ERR(transceivers[i]);
}
@@ -1932,9 +2072,14 @@ static int rcar_canfd_probe(struct platform_device *pdev)
fcan_freq = clk_get_rate(gpriv->can_clk) / info->postdiv;
} else {
fcan_freq = clk_get_rate(gpriv->can_clk);
- gpriv->extclk = true;
+ gpriv->extclk = gpriv->info->external_clk;
}
+ clk_ram = devm_clk_get_optional_enabled(dev, "ram_clk");
+ if (IS_ERR(clk_ram))
+ return dev_err_probe(dev, PTR_ERR(clk_ram),
+ "cannot get enabled ram clock\n");
+
addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
err = PTR_ERR(addr);
@@ -2097,6 +2242,7 @@ static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = {
{ .compatible = "renesas,r8a779a0-canfd", .data = &rcar_gen4_hw_info },
+ { .compatible = "renesas,r9a09g047-canfd", .data = &r9a09g047_hw_info },
{ .compatible = "renesas,rcar-gen3-canfd", .data = &rcar_gen3_hw_info },
{ .compatible = "renesas,rcar-gen4-canfd", .data = &rcar_gen4_hw_info },
{ .compatible = "renesas,rzg2l-canfd", .data = &rzg2l_hw_info },
diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
index c3fb3176ce42..046f0a0ae4d4 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-core.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
@@ -118,7 +118,7 @@ static void rkcanfd_chip_set_work_mode(const struct rkcanfd_priv *priv)
static int rkcanfd_set_bittiming(struct rkcanfd_priv *priv)
{
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
const struct can_bittiming *bt = &priv->can.bittiming;
u32 reg_nbt, reg_dbt, reg_tdc;
u32 tdco;
@@ -899,7 +899,7 @@ static int rkcanfd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
priv->can.clock.freq = clk_get_rate(priv->clks[0].clk);
priv->can.bittiming_const = &rkcanfd_bittiming_const;
- priv->can.data_bittiming_const = &rkcanfd_data_bittiming_const;
+ priv->can.fd.data_bittiming_const = &rkcanfd_data_bittiming_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_BERR_REPORTING;
priv->can.do_set_mode = rkcanfd_set_mode;
diff --git a/drivers/net/can/rockchip/rockchip_canfd-timestamp.c b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
index 43d4b5721812..fa85a75be65a 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
@@ -39,7 +39,7 @@ static void rkcanfd_timestamp_work(struct work_struct *work)
void rkcanfd_timestamp_init(struct rkcanfd_priv *priv)
{
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
const struct can_bittiming *bt = &priv->can.bittiming;
struct cyclecounter *cc = &priv->cc;
u32 bitrate, div, reg, rate;
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 6c7b1c58f85f..ce18e9e56059 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -374,7 +374,7 @@ static inline void pcan_set_can_power(struct pcan_pccard *card, int onoff)
*/
static void pcan_led_timer(struct timer_list *t)
{
- struct pcan_pccard *card = from_timer(card, t, led_timer);
+ struct pcan_pccard *card = timer_container_of(card, t, led_timer);
struct net_device *netdev;
int i, up_count = 0;
u8 ccr;
diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c
index 24c6622d36bd..58ff2ec1d975 100644
--- a/drivers/net/can/slcan/slcan-core.c
+++ b/drivers/net/can/slcan/slcan-core.c
@@ -71,12 +71,21 @@ MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
#define SLCAN_CMD_LEN 1
#define SLCAN_SFF_ID_LEN 3
#define SLCAN_EFF_ID_LEN 8
+#define SLCAN_DATA_LENGTH_LEN 1
+#define SLCAN_ERROR_LEN 1
#define SLCAN_STATE_LEN 1
#define SLCAN_STATE_BE_RXCNT_LEN 3
#define SLCAN_STATE_BE_TXCNT_LEN 3
-#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \
- SLCAN_STATE_BE_RXCNT_LEN + \
- SLCAN_STATE_BE_TXCNT_LEN)
+#define SLCAN_STATE_MSG_LEN (SLCAN_CMD_LEN + \
+ SLCAN_STATE_LEN + \
+ SLCAN_STATE_BE_RXCNT_LEN + \
+ SLCAN_STATE_BE_TXCNT_LEN)
+#define SLCAN_ERROR_MSG_LEN_MIN (SLCAN_CMD_LEN + \
+ SLCAN_ERROR_LEN + \
+ SLCAN_DATA_LENGTH_LEN)
+#define SLCAN_FRAME_MSG_LEN_MIN (SLCAN_CMD_LEN + \
+ SLCAN_SFF_ID_LEN + \
+ SLCAN_DATA_LENGTH_LEN)
struct slcan {
struct can_priv can;
@@ -176,6 +185,9 @@ static void slcan_bump_frame(struct slcan *sl)
u32 tmpid;
char *cmd = sl->rbuff;
+ if (sl->rcount < SLCAN_FRAME_MSG_LEN_MIN)
+ return;
+
skb = alloc_can_skb(sl->dev, &cf);
if (unlikely(!skb)) {
sl->dev->stats.rx_dropped++;
@@ -281,7 +293,7 @@ static void slcan_bump_state(struct slcan *sl)
return;
}
- if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN)
+ if (state == sl->can.state || sl->rcount != SLCAN_STATE_MSG_LEN)
return;
cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1;
@@ -328,6 +340,9 @@ static void slcan_bump_err(struct slcan *sl)
bool rx_errors = false, tx_errors = false, rx_over_errors = false;
int i, len;
+ if (sl->rcount < SLCAN_ERROR_MSG_LEN_MIN)
+ return;
+
/* get len from sanitized ASCII value */
len = cmd[1];
if (len >= '0' && len < '9')
@@ -456,8 +471,7 @@ static void slcan_bump(struct slcan *sl)
static void slcan_unesc(struct slcan *sl, unsigned char s)
{
if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
- if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
- sl->rcount > 4)
+ if (!test_and_clear_bit(SLF_ERROR, &sl->flags))
slcan_bump(sl);
sl->rcount = 0;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index c30b04f8fc0d..7450ea42c1ea 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -527,7 +527,7 @@ static int mcp251xfd_chip_timestamp_init(const struct mcp251xfd_priv *priv)
static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
{
const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
u32 tdcmod, val = 0;
int err;
@@ -600,8 +600,8 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED;
val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) |
- FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.tdc.tdcv) |
- FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.tdc.tdco);
+ FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.fd.tdc.tdcv) |
+ FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.fd.tdc.tdco);
return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
}
@@ -2104,8 +2104,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv->can.do_set_mode = mcp251xfd_set_mode;
priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
priv->can.bittiming_const = &mcp251xfd_bittiming_const;
- priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
- priv->can.tdc_const = &mcp251xfd_tdc_const;
+ priv->can.fd.data_bittiming_const = &mcp251xfd_data_bittiming_const;
+ priv->can.fd.tdc_const = &mcp251xfd_tdc_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
index 03ad10b01867..27a3818885c2 100644
--- a/drivers/net/can/usb/esd_usb.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -1098,7 +1098,7 @@ static int esd_usb_3_set_bittiming(struct net_device *netdev)
const struct can_bittiming_const *data_btc = &esd_usb_3_data_bittiming_const;
struct esd_usb_net_priv *priv = netdev_priv(netdev);
struct can_bittiming *nom_bt = &priv->can.bittiming;
- struct can_bittiming *data_bt = &priv->can.data_bittiming;
+ struct can_bittiming *data_bt = &priv->can.fd.data_bittiming;
struct esd_usb_3_set_baudrate_msg_x *baud_x;
union esd_usb_msg *msg;
u16 flags = 0;
@@ -1218,9 +1218,9 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
priv->can.clock.freq = ESD_USB_3_CAN_CLOCK;
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const;
- priv->can.data_bittiming_const = &esd_usb_3_data_bittiming_const;
+ priv->can.fd.data_bittiming_const = &esd_usb_3_data_bittiming_const;
priv->can.do_set_bittiming = esd_usb_3_set_bittiming;
- priv->can.do_set_data_bittiming = esd_usb_3_set_bittiming;
+ priv->can.fd.do_set_data_bittiming = esd_usb_3_set_bittiming;
break;
case ESD_USB_CANUSBM_PRODUCT_ID:
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 71f24dc0a927..db1acf6d504c 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -2059,8 +2059,8 @@ static int es58x_init_priv(struct es58x_device *es58x_dev,
can->bittiming_const = param->bittiming_const;
if (param->ctrlmode_supported & CAN_CTRLMODE_FD) {
- can->data_bittiming_const = param->data_bittiming_const;
- can->tdc_const = param->tdc_const;
+ can->fd.data_bittiming_const = param->data_bittiming_const;
+ can->fd.tdc_const = param->tdc_const;
}
can->bitrate_max = param->bitrate_max;
can->clock = param->clock;
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index 84ffa1839bac..d924b053677b 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -427,12 +427,12 @@ static int es58x_fd_enable_channel(struct es58x_priv *priv)
if (tx_conf_msg.canfd_enabled) {
es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming,
- &priv->can.data_bittiming);
+ &priv->can.fd.data_bittiming);
if (can_tdc_is_enabled(&priv->can)) {
tx_conf_msg.tdc_enabled = 1;
- tx_conf_msg.tdco = cpu_to_le16(priv->can.tdc.tdco);
- tx_conf_msg.tdcf = cpu_to_le16(priv->can.tdc.tdcf);
+ tx_conf_msg.tdco = cpu_to_le16(priv->can.fd.tdc.tdco);
+ tx_conf_msg.tdcf = cpu_to_le16(priv->can.fd.tdc.tdcf);
}
conf_len = ES58X_FD_CANFD_CONF_LEN;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 3ccac6781b98..bb6335278e46 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -728,7 +728,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
static int gs_usb_set_data_bittiming(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
- struct can_bittiming *bt = &dev->can.data_bittiming;
+ struct can_bittiming *bt = &dev->can.fd.data_bittiming;
struct gs_device_bittiming dbt = {
.prop_seg = cpu_to_le32(bt->prop_seg),
.phase_seg1 = cpu_to_le32(bt->phase_seg1),
@@ -1300,8 +1300,8 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* The data bit timing will be overwritten, if
* GS_CAN_FEATURE_BT_CONST_EXT is set.
*/
- dev->can.data_bittiming_const = &dev->bt_const;
- dev->can.do_set_data_bittiming = gs_usb_set_data_bittiming;
+ dev->can.fd.data_bittiming_const = &dev->bt_const;
+ dev->can.fd.do_set_data_bittiming = gs_usb_set_data_bittiming;
}
if (feature & GS_CAN_FEATURE_TERMINATION) {
@@ -1381,7 +1381,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended.dbrp_max);
dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended.dbrp_inc);
- dev->can.data_bittiming_const = &dev->data_bt_const;
+ dev->can.fd.data_bittiming_const = &dev->data_bt_const;
}
can_rx_offload_add_manual(netdev, &dev->offload, GS_NAPI_WEIGHT);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index 078496d9b7ba..f6c77eca9f43 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -137,7 +137,7 @@ struct kvaser_usb_net_priv {
* @dev_set_mode: used for can.do_set_mode
* @dev_set_bittiming: used for can.do_set_bittiming
* @dev_get_busparams: readback arbitration busparams
- * @dev_set_data_bittiming: used for can.do_set_data_bittiming
+ * @dev_set_data_bittiming: used for can.fd.do_set_data_bittiming
* @dev_get_data_busparams: readback data busparams
* @dev_get_berr_counter: used for can.do_get_berr_counter
*
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index dcb0bcbe0565..daf42080f942 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -592,7 +592,7 @@ static int kvaser_usb_set_data_bittiming(struct net_device *netdev)
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
struct kvaser_usb_busparams busparams;
int tseg1 = dbt->prop_seg + dbt->phase_seg1;
int tseg2 = dbt->phase_seg2;
@@ -842,8 +842,8 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported;
if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
- priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
- priv->can.do_set_data_bittiming = kvaser_usb_set_data_bittiming;
+ priv->can.fd.data_bittiming_const = dev->cfg->data_bittiming_const;
+ priv->can.fd.do_set_data_bittiming = kvaser_usb_set_data_bittiming;
}
netdev->flags |= IFF_ECHO;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index c75df1755b3b..6b293a9056c2 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -319,7 +319,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
*/
static void pcan_usb_restart(struct timer_list *t)
{
- struct pcan_usb *pdev = from_timer(pdev, t, restart_timer);
+ struct pcan_usb *pdev = timer_container_of(pdev, t, restart_timer);
struct peak_usb_device *dev = &pdev->dev;
/* notify candev and netdev */
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 59f7cd8ceb39..117637b9b995 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -770,7 +770,7 @@ static int peak_usb_set_data_bittiming(struct net_device *netdev)
const struct peak_usb_adapter *pa = dev->adapter;
if (pa->dev_set_data_bittiming) {
- struct can_bittiming *bt = &dev->can.data_bittiming;
+ struct can_bittiming *bt = &dev->can.fd.data_bittiming;
int err = pa->dev_set_data_bittiming(dev, bt);
if (err)
@@ -954,8 +954,8 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
dev->can.clock = peak_usb_adapter->clock;
dev->can.bittiming_const = peak_usb_adapter->bittiming_const;
dev->can.do_set_bittiming = peak_usb_set_bittiming;
- dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
- dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
+ dev->can.fd.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
+ dev->can.fd.do_set_data_bittiming = peak_usb_set_data_bittiming;
dev->can.do_set_mode = peak_usb_set_mode;
dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;
dev->can.ctrlmode_supported = peak_usb_adapter->ctrlmode_supported;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 436c0e4b0344..3f2e378199ab 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -481,7 +481,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct can_bittiming *bt = &priv->can.bittiming;
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
u32 btr0, btr1;
u32 is_config_mode;
@@ -517,10 +517,10 @@ static int xcan_set_bittiming(struct net_device *ndev)
btr0 = dbt->brp - 1;
if (can_tdc_is_enabled(&priv->can)) {
if (priv->devtype.cantype == XAXI_CANFD)
- btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
+ btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) |
XCAN_BRPR_TDC_ENABLE;
else
- btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
+ btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) |
XCAN_BRPR_TDC_ENABLE;
}
@@ -1967,22 +1967,22 @@ static int xcan_probe(struct platform_device *pdev)
goto err_free;
if (devtype->cantype == XAXI_CANFD) {
- priv->can.data_bittiming_const =
+ priv->can.fd.data_bittiming_const =
&xcan_data_bittiming_const_canfd;
- priv->can.tdc_const = &xcan_tdc_const_canfd;
+ priv->can.fd.tdc_const = &xcan_tdc_const_canfd;
}
if (devtype->cantype == XAXI_CANFD_2_0) {
- priv->can.data_bittiming_const =
+ priv->can.fd.data_bittiming_const =
&xcan_data_bittiming_const_canfd2;
- priv->can.tdc_const = &xcan_tdc_const_canfd2;
+ priv->can.fd.tdc_const = &xcan_tdc_const_canfd2;
}
if (devtype->cantype == XAXI_CANFD ||
devtype->cantype == XAXI_CANFD_2_0) {
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
CAN_CTRLMODE_TDC_AUTO;
- priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv;
+ priv->can.fd.do_get_auto_tdcv = xcan_get_auto_tdcv;
}
priv->reg_base = addr;
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 9eb39cfa5fb2..dc2f4adac9bc 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -21,6 +21,8 @@
#include <linux/export.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/platform_data/b53.h>
#include <linux/phy.h>
@@ -326,6 +328,26 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
}
}
+static void b53_set_eap_mode(struct b53_device *dev, int port, int mode)
+{
+ u64 eap_conf;
+
+ if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID)
+ return;
+
+ b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf);
+
+ if (is63xx(dev)) {
+ eap_conf &= ~EAP_MODE_MASK_63XX;
+ eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX;
+ } else {
+ eap_conf &= ~EAP_MODE_MASK;
+ eap_conf |= (u64)mode << EAP_MODE_SHIFT;
+ }
+
+ b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf);
+}
+
static void b53_set_forwarding(struct b53_device *dev, int enable)
{
u8 mgmt;
@@ -586,6 +608,13 @@ int b53_setup_port(struct dsa_switch *ds, int port)
b53_port_set_mcast_flood(dev, port, true);
b53_port_set_learning(dev, port, false);
+ /* Force all traffic to go to the CPU port to prevent the ASIC from
+ * trying to forward to bridged ports on matching FDB entries, then
+ * dropping frames because it isn't allowed to forward there.
+ */
+ if (dsa_is_user_port(ds, port))
+ b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
+
return 0;
}
EXPORT_SYMBOL(b53_setup_port);
@@ -1175,6 +1204,10 @@ static int b53_setup(struct dsa_switch *ds)
*/
ds->untag_vlan_aware_bridge_pvid = true;
+ /* Ageing time is set in seconds */
+ ds->ageing_time_min = 1 * 1000;
+ ds->ageing_time_max = AGE_TIME_MAX * 1000;
+
ret = b53_reset_switch(dev);
if (ret) {
dev_err(ds->dev, "failed to reset switch\n");
@@ -1290,41 +1323,17 @@ static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct b53_device *dev = ds->priv;
- u8 rgmii_ctrl = 0, off;
-
- if (port == dev->imp_port)
- off = B53_RGMII_CTRL_IMP;
- else
- off = B53_RGMII_CTRL_P(port);
+ u8 rgmii_ctrl = 0;
- b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
-
- switch (interface) {
- case PHY_INTERFACE_MODE_RGMII_ID:
- rgmii_ctrl |= (RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
- break;
- case PHY_INTERFACE_MODE_RGMII_RXID:
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_TXC);
- rgmii_ctrl |= RGMII_CTRL_DLL_RXC;
- break;
- case PHY_INTERFACE_MODE_RGMII_TXID:
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC);
- rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
- break;
- case PHY_INTERFACE_MODE_RGMII:
- default:
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
- break;
- }
+ b53_read8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), &rgmii_ctrl);
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
- if (port != dev->imp_port) {
- if (is63268(dev))
- rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE;
+ if (is63268(dev))
+ rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE;
- rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII;
- }
+ rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII;
- b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
+ b53_write8(dev, B53_CTRL_PAGE, B53_RGMII_CTRL_P(port), rgmii_ctrl);
dev_dbg(ds->dev, "Configured port %d for %s\n", port,
phy_modes(interface));
@@ -1345,8 +1354,7 @@ static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port,
* tx_clk aligned timing (restoring to reset defaults)
*/
b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
- RGMII_CTRL_TIMING_SEL);
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
/* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
* sure that we enable the port TX clock internal delay to
@@ -1366,7 +1374,10 @@ static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port,
rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
if (interface == PHY_INTERFACE_MODE_RGMII)
rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
- rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
+
+ if (dev->chip_id != BCM53115_DEVICE_ID)
+ rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
+
b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
dev_info(ds->dev, "Configured port %d for %s\n", port,
@@ -1430,6 +1441,10 @@ static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
__set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces);
+ /* BCM63xx RGMII ports support RGMII */
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
+ phy_interface_set_rgmii(config->supported_interfaces);
+
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100;
@@ -1469,7 +1484,7 @@ static void b53_phylink_mac_config(struct phylink_config *config,
struct b53_device *dev = ds->priv;
int port = dp->index;
- if (is63xx(dev) && port >= B53_63XX_RGMII0)
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
b53_adjust_63xx_rgmii(ds, port, interface);
if (mode == MLO_AN_FIXED) {
@@ -2019,9 +2034,6 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
b53_get_vlan_entry(dev, pvid, vl);
vl->members &= ~BIT(port);
- if (vl->members == BIT(cpu_port))
- vl->members &= ~BIT(cpu_port);
- vl->untag = vl->members;
b53_set_vlan_entry(dev, pvid, vl);
}
@@ -2042,6 +2054,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
pvlan |= BIT(i);
}
+ /* Disable redirection of unknown SA to the CPU port */
+ b53_set_eap_mode(dev, port, EAP_MODE_BASIC);
+
/* Configure the local port VLAN control membership to include
* remote ports and update the local port bitmask
*/
@@ -2077,6 +2092,9 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
pvlan &= ~BIT(i);
}
+ /* Enable redirection of unknown SA to the CPU port */
+ b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
+
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
dev->ports[port].vlan_ctl_mask = pvlan;
@@ -2094,8 +2112,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
}
b53_get_vlan_entry(dev, pvid, vl);
- vl->members |= BIT(port) | BIT(cpu_port);
- vl->untag |= BIT(port) | BIT(cpu_port);
+ vl->members |= BIT(port);
b53_set_vlan_entry(dev, pvid, vl);
}
}
@@ -2315,6 +2332,9 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
{
int ret;
+ if (!b53_support_eee(ds, port))
+ return 0;
+
ret = phy_init_eee(phy, false);
if (ret)
return 0;
@@ -2329,7 +2349,7 @@ bool b53_support_eee(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
- return !is5325(dev) && !is5365(dev);
+ return !is5325(dev) && !is5365(dev) && !is63xx(dev);
}
EXPORT_SYMBOL(b53_support_eee);
@@ -2373,6 +2393,28 @@ static int b53_get_max_mtu(struct dsa_switch *ds, int port)
return B53_MAX_MTU;
}
+int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct b53_device *dev = ds->priv;
+ u32 atc;
+ int reg;
+
+ if (is63xx(dev))
+ reg = B53_AGING_TIME_CONTROL_63XX;
+ else
+ reg = B53_AGING_TIME_CONTROL;
+
+ atc = DIV_ROUND_CLOSEST(msecs, 1000);
+
+ if (!is5325(dev) && !is5365(dev))
+ atc |= AGE_CHANGE;
+
+ b53_write32(dev, B53_MGMT_PAGE, reg, atc);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(b53_set_ageing_time);
+
static const struct phylink_mac_ops b53_phylink_mac_ops = {
.mac_select_pcs = b53_phylink_mac_select_pcs,
.mac_config = b53_phylink_mac_config,
@@ -2396,6 +2438,7 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_disable = b53_disable_port,
.support_eee = b53_support_eee,
.set_mac_eee = b53_set_mac_eee,
+ .set_ageing_time = b53_set_ageing_time,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_pre_bridge_flags = b53_br_flags_pre,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 2cf3e6a81e37..a5ef7071ba07 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -343,6 +343,7 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
int b53_get_sset_count(struct dsa_switch *ds, int port, int sset);
void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data);
+int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs);
int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
bool *tx_fwd_offload, struct netlink_ext_ack *extack);
void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge);
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index bfbcb66bef66..1fbc5a204bc7 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -50,6 +50,9 @@
/* Jumbo Frame Registers */
#define B53_JUMBO_PAGE 0x40
+/* EAP Registers */
+#define B53_EAP_PAGE 0x42
+
/* EEE Control Registers Page */
#define B53_EEE_PAGE 0x92
@@ -217,6 +220,13 @@
#define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */
#define BRCM_HDR_P7_EN BIT(2) /* Enable tagging on port 7 */
+/* Aging Time control register (32 bit) */
+#define B53_AGING_TIME_CONTROL 0x06
+#define B53_AGING_TIME_CONTROL_63XX 0x08
+#define AGE_CHANGE BIT(20)
+#define AGE_TIME_MASK 0x7ffff
+#define AGE_TIME_MAX 1048575
+
/* Mirror capture control register (16 bit) */
#define B53_MIR_CAP_CTL 0x10
#define CAP_PORT_MASK 0xf
@@ -481,6 +491,17 @@
#define JMS_MAX_SIZE 9724
/*************************************************************************
+ * EAP Page Registers
+ *************************************************************************/
+#define B53_PORT_EAP_CONF(i) (0x20 + 8 * (i))
+#define EAP_MODE_SHIFT 51
+#define EAP_MODE_SHIFT_63XX 50
+#define EAP_MODE_MASK (0x3ull << EAP_MODE_SHIFT)
+#define EAP_MODE_MASK_63XX (0x3ull << EAP_MODE_SHIFT_63XX)
+#define EAP_MODE_BASIC 0
+#define EAP_MODE_SIMPLIFIED 3
+
+/*************************************************************************
* EEE Configuration Page Registers
*************************************************************************/
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 454a8c7fd7ee..960685596093 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1235,6 +1235,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.port_disable = bcm_sf2_port_disable,
.support_eee = b53_support_eee,
.set_mac_eee = b53_set_mac_eee,
+ .set_ageing_time = b53_set_ageing_time,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_pre_bridge_flags = b53_br_flags_pre,
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index adbab544c60f..d8a35f25a4c8 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -405,7 +405,7 @@ static int __init dsa_loop_init(void)
unsigned int i, ret;
for (i = 0; i < NUM_FIXED_PHYS; i++)
- phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL);
+ phydevs[i] = fixed_phy_register(&status, NULL);
ret = mdio_driver_register(&dsa_loop_drv);
if (ret)
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
index 9c2ed2ba79da..bebf0d3ff330 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.h
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -244,7 +244,7 @@ struct hellcreek_port_hwtstamp {
struct sk_buff *tx_skb;
/* Current timestamp configuration */
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
};
struct hellcreek_port {
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
index ca2500aba96f..99941ff1ebf9 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -40,7 +40,7 @@ int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
* the user requested what is actually available or not
*/
static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct hellcreek_port_hwtstamp *ps =
&hellcreek->ports[port].port_hwtstamp;
@@ -110,41 +110,35 @@ static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
}
int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port_hwtstamp *ps;
- struct hwtstamp_config config;
int err;
ps = &hellcreek->ports[port].port_hwtstamp;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = hellcreek_set_hwtstamp_config(hellcreek, port, &config);
+ err = hellcreek_set_hwtstamp_config(hellcreek, port, config);
if (err)
return err;
/* Save the chosen configuration to be returned later */
- memcpy(&ps->tstamp_config, &config, sizeof(config));
+ ps->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port_hwtstamp *ps;
- struct hwtstamp_config *config;
ps = &hellcreek->ports[port].port_hwtstamp;
- config = &ps->tstamp_config;
+ *config = ps->tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ return 0;
}
/* Returns a pointer to the PTP header if the caller should time stamp, or NULL
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
index 7d88da2134f2..388821c4aa10 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
@@ -38,9 +38,10 @@
#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *config);
bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *clone, unsigned int type);
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index 12a86585a77f..c71d3fd5dfeb 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -6,6 +6,7 @@ menuconfig NET_DSA_MICROCHIP_KSZ_COMMON
select NET_DSA_TAG_NONE
select NET_IEEE8021Q_HELPERS
select DCB
+ select PCS_XPCS
help
This driver adds support for Microchip KSZ8, KSZ9 and
LAN937X series switch chips, being KSZ8863/8873,
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 29fe79ea74cd..d747ea1c41a7 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 switch driver main logic
*
- * Copyright (C) 2017-2024 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#include <linux/kernel.h>
@@ -161,6 +161,190 @@ static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
10, 1000);
}
+static void port_sgmii_s(struct ksz_device *dev, uint port, u16 devid, u16 reg)
+{
+ u32 data;
+
+ data = (devid & MII_MMD_CTRL_DEVAD_MASK) << 16;
+ data |= reg;
+ ksz_pwrite32(dev, port, REG_PORT_SGMII_ADDR__4, data);
+}
+
+static void port_sgmii_r(struct ksz_device *dev, uint port, u16 devid, u16 reg,
+ u16 *buf)
+{
+ port_sgmii_s(dev, port, devid, reg);
+ ksz_pread16(dev, port, REG_PORT_SGMII_DATA__4 + 2, buf);
+}
+
+static void port_sgmii_w(struct ksz_device *dev, uint port, u16 devid, u16 reg,
+ u16 buf)
+{
+ port_sgmii_s(dev, port, devid, reg);
+ ksz_pwrite32(dev, port, REG_PORT_SGMII_DATA__4, buf);
+}
+
+static int ksz9477_pcs_read(struct mii_bus *bus, int phy, int mmd, int reg)
+{
+ struct ksz_device *dev = bus->priv;
+ int port = ksz_get_sgmii_port(dev);
+ u16 val;
+
+ port_sgmii_r(dev, port, mmd, reg, &val);
+
+ /* Simulate a value to activate special code in the XPCS driver if
+ * supported.
+ */
+ if (mmd == MDIO_MMD_PMAPMD) {
+ if (reg == MDIO_DEVID1)
+ val = 0x9477;
+ else if (reg == MDIO_DEVID2)
+ val = 0x22 << 10;
+ } else if (mmd == MDIO_MMD_VEND2) {
+ struct ksz_port *p = &dev->ports[port];
+
+ /* Need to update MII_BMCR register with the exact speed and
+ * duplex mode when running in SGMII mode and this register is
+ * used to detect connected speed in that mode.
+ */
+ if (reg == MMD_SR_MII_AUTO_NEG_STATUS) {
+ int duplex, speed;
+
+ if (val & SR_MII_STAT_LINK_UP) {
+ speed = (val >> SR_MII_STAT_S) & SR_MII_STAT_M;
+ if (speed == SR_MII_STAT_1000_MBPS)
+ speed = SPEED_1000;
+ else if (speed == SR_MII_STAT_100_MBPS)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ if (val & SR_MII_STAT_FULL_DUPLEX)
+ duplex = DUPLEX_FULL;
+ else
+ duplex = DUPLEX_HALF;
+
+ if (!p->phydev.link ||
+ p->phydev.speed != speed ||
+ p->phydev.duplex != duplex) {
+ u16 ctrl;
+
+ p->phydev.link = 1;
+ p->phydev.speed = speed;
+ p->phydev.duplex = duplex;
+ port_sgmii_r(dev, port, mmd, MII_BMCR,
+ &ctrl);
+ ctrl &= BMCR_ANENABLE;
+ ctrl |= mii_bmcr_encode_fixed(speed,
+ duplex);
+ port_sgmii_w(dev, port, mmd, MII_BMCR,
+ ctrl);
+ }
+ } else {
+ p->phydev.link = 0;
+ }
+ } else if (reg == MII_BMSR) {
+ p->phydev.link = (val & BMSR_LSTATUS);
+ }
+ }
+
+ return val;
+}
+
+static int ksz9477_pcs_write(struct mii_bus *bus, int phy, int mmd, int reg,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+ int port = ksz_get_sgmii_port(dev);
+
+ if (mmd == MDIO_MMD_VEND2) {
+ struct ksz_port *p = &dev->ports[port];
+
+ if (reg == MMD_SR_MII_AUTO_NEG_CTRL) {
+ u16 sgmii_mode = SR_MII_PCS_SGMII << SR_MII_PCS_MODE_S;
+
+ /* Need these bits for 1000BASE-X mode to work with
+ * AN on.
+ */
+ if (!(val & sgmii_mode))
+ val |= SR_MII_SGMII_LINK_UP |
+ SR_MII_TX_CFG_PHY_MASTER;
+
+ /* SGMII interrupt in the port cannot be masked, so
+ * make sure interrupt is not enabled as it is not
+ * handled.
+ */
+ val &= ~SR_MII_AUTO_NEG_COMPLETE_INTR;
+ } else if (reg == MII_BMCR) {
+ /* The MII_ADVERTISE register needs to write once
+ * before doing auto-negotiation for the correct
+ * config_word to be sent out after reset.
+ */
+ if ((val & BMCR_ANENABLE) && !p->sgmii_adv_write) {
+ u16 adv;
+
+ /* The SGMII port cannot disable flow control
+ * so it is better to just advertise symmetric
+ * pause.
+ */
+ port_sgmii_r(dev, port, mmd, MII_ADVERTISE,
+ &adv);
+ adv |= ADVERTISE_1000XPAUSE;
+ adv &= ~ADVERTISE_1000XPSE_ASYM;
+ port_sgmii_w(dev, port, mmd, MII_ADVERTISE,
+ adv);
+ p->sgmii_adv_write = 1;
+ } else if (val & BMCR_RESET) {
+ p->sgmii_adv_write = 0;
+ }
+ } else if (reg == MII_ADVERTISE) {
+ /* XPCS driver writes to this register so there is no
+ * need to update it for the errata.
+ */
+ p->sgmii_adv_write = 1;
+ }
+ }
+ port_sgmii_w(dev, port, mmd, reg, val);
+
+ return 0;
+}
+
+int ksz9477_pcs_create(struct ksz_device *dev)
+{
+ /* This chip has a SGMII port. */
+ if (ksz_has_sgmii_port(dev)) {
+ int port = ksz_get_sgmii_port(dev);
+ struct ksz_port *p = &dev->ports[port];
+ struct phylink_pcs *pcs;
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc(dev->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "ksz_pcs_mdio_bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs",
+ dev_name(dev->dev));
+ bus->read_c45 = &ksz9477_pcs_read;
+ bus->write_c45 = &ksz9477_pcs_write;
+ bus->parent = dev->dev;
+ bus->phy_mask = ~0;
+ bus->priv = dev;
+
+ ret = devm_mdiobus_register(dev->dev, bus);
+ if (ret)
+ return ret;
+
+ pcs = xpcs_create_pcs_mdiodev(bus, 0);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
+ p->pcs = pcs;
+ }
+
+ return 0;
+}
+
int ksz9477_reset_switch(struct ksz_device *dev)
{
u8 data8;
@@ -978,6 +1162,14 @@ void ksz9477_get_caps(struct ksz_device *dev, int port,
if (dev->info->gbit_capable[port])
config->mac_capabilities |= MAC_1000FD;
+
+ if (ksz_is_sgmii_port(dev, port)) {
+ struct ksz_port *p = &dev->ports[port];
+
+ phy_interface_or(config->supported_interfaces,
+ config->supported_interfaces,
+ p->pcs->supported_interfaces);
+ }
}
int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index d2166b0d881e..0d1a6dfda23e 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 series Header file
*
- * Copyright (C) 2017-2022 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#ifndef __KSZ9477_H
@@ -97,4 +97,6 @@ void ksz9477_acl_match_process_l2(struct ksz_device *dev, int port,
u16 ethtype, u8 *src_mac, u8 *dst_mac,
unsigned long cookie, u32 prio);
+int ksz9477_pcs_create(struct ksz_device *dev);
+
#endif
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 89f0796894af..7c142c17b3f6 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2,7 +2,7 @@
/*
* Microchip switch driver main logic
*
- * Copyright (C) 2017-2024 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#include <linux/delay.h>
@@ -265,16 +265,70 @@ static void ksz_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface);
+/**
+ * ksz_phylink_mac_disable_tx_lpi() - Callback to signal LPI support (Dummy)
+ * @config: phylink config structure
+ *
+ * This function is a dummy handler. See ksz_phylink_mac_enable_tx_lpi() for
+ * a detailed explanation of EEE/LPI handling in KSZ switches.
+ */
+static void ksz_phylink_mac_disable_tx_lpi(struct phylink_config *config)
+{
+}
+
+/**
+ * ksz_phylink_mac_enable_tx_lpi() - Callback to signal LPI support (Dummy)
+ * @config: phylink config structure
+ * @timer: timer value before entering LPI (unused)
+ * @tx_clock_stop: whether to stop the TX clock in LPI mode (unused)
+ *
+ * This function signals to phylink that the driver architecture supports
+ * LPI management, enabling phylink to control EEE advertisement during
+ * negotiation according to IEEE Std 802.3 (Clause 78).
+ *
+ * Hardware Management of EEE/LPI State:
+ * For KSZ switch ports with integrated PHYs (e.g., KSZ9893R ports 1-2),
+ * observation and testing suggest that the actual EEE / Low Power Idle (LPI)
+ * state transitions are managed autonomously by the hardware based on
+ * the auto-negotiation results. (Note: While the datasheet describes EEE
+ * operation based on negotiation, it doesn't explicitly detail the internal
+ * MAC/PHY interaction, so autonomous hardware management of the MAC state
+ * for LPI is inferred from observed behavior).
+ * This hardware control, consistent with the switch's ability to operate
+ * autonomously via strapping, means MAC-level software intervention is not
+ * required or exposed for managing the LPI state once EEE is negotiated.
+ * (Ref: KSZ9893R Data Sheet DS00002420D, primarily Section 4.7.5 explaining
+ * EEE, also Sections 4.1.7 on Auto-Negotiation and 3.2.1 on Configuration
+ * Straps).
+ *
+ * Additionally, ports configured as MAC interfaces (e.g., KSZ9893R port 3)
+ * lack documented MAC-level LPI control.
+ *
+ * Therefore, this callback performs no action and serves primarily to inform
+ * phylink of LPI awareness and to document the inferred hardware behavior.
+ *
+ * Returns: 0 (Always success)
+ */
+static int ksz_phylink_mac_enable_tx_lpi(struct phylink_config *config,
+ u32 timer, bool tx_clock_stop)
+{
+ return 0;
+}
+
static const struct phylink_mac_ops ksz88x3_phylink_mac_ops = {
.mac_config = ksz88x3_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz8_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
};
static const struct phylink_mac_ops ksz8_phylink_mac_ops = {
.mac_config = ksz_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz8_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
};
static const struct ksz_dev_ops ksz88xx_dev_ops = {
@@ -354,10 +408,29 @@ static void ksz9477_phylink_mac_link_up(struct phylink_config *config,
int speed, int duplex, bool tx_pause,
bool rx_pause);
+static struct phylink_pcs *
+ksz_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ struct ksz_port *p = &dev->ports[dp->index];
+
+ if (ksz_is_sgmii_port(dev, dp->index) &&
+ (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_1000BASEX))
+ return p->pcs;
+
+ return NULL;
+}
+
static const struct phylink_mac_ops ksz9477_phylink_mac_ops = {
.mac_config = ksz_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz9477_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
+ .mac_select_pcs = ksz_phylink_mac_select_pcs,
};
static const struct ksz_dev_ops ksz9477_dev_ops = {
@@ -395,12 +468,15 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.reset = ksz9477_reset_switch,
.init = ksz9477_switch_init,
.exit = ksz9477_switch_exit,
+ .pcs_create = ksz9477_pcs_create,
};
static const struct phylink_mac_ops lan937x_phylink_mac_ops = {
.mac_config = ksz_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz9477_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
};
static const struct ksz_dev_ops lan937x_dev_ops = {
@@ -1035,8 +1111,7 @@ static const struct regmap_range ksz9477_valid_regs[] = {
regmap_reg_range(0x701b, 0x701b),
regmap_reg_range(0x701f, 0x7020),
regmap_reg_range(0x7030, 0x7030),
- regmap_reg_range(0x7200, 0x7203),
- regmap_reg_range(0x7206, 0x7207),
+ regmap_reg_range(0x7200, 0x7207),
regmap_reg_range(0x7300, 0x7301),
regmap_reg_range(0x7400, 0x7401),
regmap_reg_range(0x7403, 0x7403),
@@ -1552,6 +1627,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
.ptp_capable = true,
+ .sgmii_port = 7,
.wr_table = &ksz9477_register_set,
.rd_table = &ksz9477_register_set,
},
@@ -1944,6 +2020,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.internal_phy = {true, true, true, true,
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
+ .sgmii_port = 7,
.wr_table = &ksz9477_register_set,
.rd_table = &ksz9477_register_set,
},
@@ -2016,6 +2093,18 @@ static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
if (dev->dev_ops->get_caps)
dev->dev_ops->get_caps(dev, port, config);
+
+ if (ds->ops->support_eee && ds->ops->support_eee(ds, port)) {
+ memcpy(config->lpi_interfaces, config->supported_interfaces,
+ sizeof(config->lpi_interfaces));
+
+ config->lpi_capabilities = MAC_100FD;
+ if (dev->info->gbit_capable[port])
+ config->lpi_capabilities |= MAC_1000FD;
+
+ /* EEE is fully operational */
+ config->eee_enabled_default = true;
+ }
}
void ksz_r_mib_stats64(struct ksz_device *dev, int port)
@@ -2067,7 +2156,7 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port)
spin_unlock(&mib->stats64_lock);
- if (dev->info->phy_errata_9477) {
+ if (dev->info->phy_errata_9477 && !ksz_is_sgmii_port(dev, port)) {
ret = ksz9477_errata_monitor(dev, port, raw->tx_late_col);
if (ret)
dev_err(dev->dev, "Failed to monitor transmission halt\n");
@@ -2697,8 +2786,9 @@ static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
kirq->dev = dev;
kirq->masked = ~0;
- kirq->domain = irq_domain_add_simple(dev->dev->of_node, kirq->nirqs, 0,
- &ksz_irq_domain_ops, kirq);
+ kirq->domain = irq_domain_create_simple(of_fwnode_handle(dev->dev->of_node),
+ kirq->nirqs, 0,
+ &ksz_irq_domain_ops, kirq);
if (!kirq->domain)
return -ENOMEM;
@@ -2775,6 +2865,12 @@ static int ksz_setup(struct dsa_switch *ds)
if (ret)
return ret;
+ if (ksz_has_sgmii_port(dev) && dev->dev_ops->pcs_create) {
+ ret = dev->dev_ops->pcs_create(dev);
+ if (ret)
+ return ret;
+ }
+
/* set broadcast storm protection 10% rate */
regmap_update_bits(ksz_regmap_16(dev), regs[S_BROADCAST_CTRL],
BROADCAST_STORM_RATE,
@@ -3008,31 +3104,6 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
if (!port)
return MICREL_KSZ8_P1_ERRATA;
break;
- case KSZ8567_CHIP_ID:
- /* KSZ8567R Errata DS80000752C Module 4 */
- case KSZ8765_CHIP_ID:
- case KSZ8794_CHIP_ID:
- case KSZ8795_CHIP_ID:
- /* KSZ879x/KSZ877x/KSZ876x Errata DS80000687C Module 2 */
- case KSZ9477_CHIP_ID:
- /* KSZ9477S Errata DS80000754A Module 4 */
- case KSZ9567_CHIP_ID:
- /* KSZ9567S Errata DS80000756A Module 4 */
- case KSZ9896_CHIP_ID:
- /* KSZ9896C Errata DS80000757A Module 3 */
- case KSZ9897_CHIP_ID:
- case LAN9646_CHIP_ID:
- /* KSZ9897R Errata DS80000758C Module 4 */
- /* Energy Efficient Ethernet (EEE) feature select must be manually disabled
- * The EEE feature is enabled by default, but it is not fully
- * operational. It must be manually disabled through register
- * controls. If not disabled, the PHY ports can auto-negotiate
- * to enable EEE, and this feature can cause link drops when
- * linked to another device supporting EEE.
- *
- * The same item appears in the errata for all switches above.
- */
- return MICREL_NO_EEE;
}
return 0;
@@ -3466,6 +3537,20 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
return -EOPNOTSUPP;
}
+/**
+ * ksz_support_eee - Determine Energy Efficient Ethernet (EEE) support for a
+ * port
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number to check
+ *
+ * This function also documents devices where EEE was initially advertised but
+ * later withdrawn due to reliability issues, as described in official errata
+ * documents. These devices are explicitly listed to record known limitations,
+ * even if there is no technical necessity for runtime checks.
+ *
+ * Returns: true if the internal PHY on the given port supports fully
+ * operational EEE, false otherwise.
+ */
static bool ksz_support_eee(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
@@ -3475,15 +3560,35 @@ static bool ksz_support_eee(struct dsa_switch *ds, int port)
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ return true;
case KSZ8567_CHIP_ID:
+ /* KSZ8567R Errata DS80000752C Module 4 */
+ case KSZ8765_CHIP_ID:
+ case KSZ8794_CHIP_ID:
+ case KSZ8795_CHIP_ID:
+ /* KSZ879x/KSZ877x/KSZ876x Errata DS80000687C Module 2 */
case KSZ9477_CHIP_ID:
- case KSZ9563_CHIP_ID:
+ /* KSZ9477S Errata DS80000754A Module 4 */
case KSZ9567_CHIP_ID:
- case KSZ9893_CHIP_ID:
+ /* KSZ9567S Errata DS80000756A Module 4 */
case KSZ9896_CHIP_ID:
+ /* KSZ9896C Errata DS80000757A Module 3 */
case KSZ9897_CHIP_ID:
case LAN9646_CHIP_ID:
- return true;
+ /* KSZ9897R Errata DS80000758C Module 4 */
+ /* Energy Efficient Ethernet (EEE) feature select must be
+ * manually disabled
+ * The EEE feature is enabled by default, but it is not fully
+ * operational. It must be manually disabled through register
+ * controls. If not disabled, the PHY ports can auto-negotiate
+ * to enable EEE, and this feature can cause link drops when
+ * linked to another device supporting EEE.
+ *
+ * The same item appears in the errata for all switches above.
+ */
+ break;
}
return false;
@@ -3613,6 +3718,10 @@ static void ksz_phylink_mac_config(struct phylink_config *config,
if (dev->info->internal_phy[port])
return;
+ /* No need to configure XMII control register when using SGMII. */
+ if (ksz_is_sgmii_port(dev, port))
+ return;
+
if (phylink_autoneg_inband(mode)) {
dev_err(dev->dev, "In-band AN not supported!\n");
return;
@@ -3999,6 +4108,89 @@ static int ksz_ets_band_to_queue(struct tc_ets_qopt_offload_replace_params *p,
return p->bands - 1 - band;
}
+/**
+ * ksz88x3_tc_ets_add - Configure ETS (Enhanced Transmission Selection)
+ * for a port on KSZ88x3 switch
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number to configure
+ * @p: Pointer to offload replace parameters describing ETS bands and mapping
+ *
+ * The KSZ88x3 supports two scheduling modes: Strict Priority and
+ * Weighted Fair Queuing (WFQ). Both modes have fixed behavior:
+ * - No configurable queue-to-priority mapping
+ * - No weight adjustment in WFQ mode
+ *
+ * This function configures the switch to use strict priority mode by
+ * clearing the WFQ enable bit for all queues associated with ETS bands.
+ * If strict priority is not explicitly requested, the switch will default
+ * to WFQ mode.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_tc_ets_add(struct ksz_device *dev, int port,
+ struct tc_ets_qopt_offload_replace_params *p)
+{
+ int ret, band;
+
+ /* Only strict priority mode is supported for now.
+ * WFQ is implicitly enabled when strict mode is disabled.
+ */
+ for (band = 0; band < p->bands; band++) {
+ int queue = ksz_ets_band_to_queue(p, band);
+ u8 reg;
+
+ /* Calculate TXQ Split Control register address for this
+ * port/queue
+ */
+ reg = KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue);
+
+ /* Clear WFQ enable bit to select strict priority scheduling */
+ ret = ksz_rmw8(dev, reg, KSZ8873_TXQ_WFQ_ENABLE, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz88x3_tc_ets_del - Reset ETS (Enhanced Transmission Selection) config
+ * for a port on KSZ88x3 switch
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number to reset
+ *
+ * The KSZ88x3 supports only fixed scheduling modes: Strict Priority or
+ * Weighted Fair Queuing (WFQ), with no reconfiguration of weights or
+ * queue mapping. This function resets the port’s scheduling mode to
+ * the default, which is WFQ, by enabling the WFQ bit for all queues.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_tc_ets_del(struct ksz_device *dev, int port)
+{
+ int ret, queue;
+
+ /* Iterate over all transmit queues for this port */
+ for (queue = 0; queue < dev->info->num_tx_queues; queue++) {
+ u8 reg;
+
+ /* Calculate TXQ Split Control register address for this
+ * port/queue
+ */
+ reg = KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue);
+
+ /* Set WFQ enable bit to revert back to default scheduling
+ * mode
+ */
+ ret = ksz_rmw8(dev, reg, KSZ8873_TXQ_WFQ_ENABLE,
+ KSZ8873_TXQ_WFQ_ENABLE);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int ksz_queue_set_strict(struct ksz_device *dev, int port, int queue)
{
int ret;
@@ -4080,6 +4272,7 @@ static int ksz_tc_ets_del(struct ksz_device *dev, int port)
for (queue = 0; queue < dev->info->num_tx_queues; queue++) {
ret = ksz_queue_set_wrr(dev, port, queue,
KSZ9477_DEFAULT_WRR_WEIGHT);
+
if (ret)
return ret;
}
@@ -4132,7 +4325,7 @@ static int ksz_tc_setup_qdisc_ets(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
int ret;
- if (is_ksz8(dev))
+ if (is_ksz8(dev) && !ksz_is_ksz88x3(dev))
return -EOPNOTSUPP;
if (qopt->parent != TC_H_ROOT) {
@@ -4146,9 +4339,16 @@ static int ksz_tc_setup_qdisc_ets(struct dsa_switch *ds, int port,
if (ret)
return ret;
- return ksz_tc_ets_add(dev, port, &qopt->replace_params);
+ if (ksz_is_ksz88x3(dev))
+ return ksz88x3_tc_ets_add(dev, port,
+ &qopt->replace_params);
+ else
+ return ksz_tc_ets_add(dev, port, &qopt->replace_params);
case TC_ETS_DESTROY:
- return ksz_tc_ets_del(dev, port);
+ if (ksz_is_ksz88x3(dev))
+ return ksz88x3_tc_ets_del(dev, port);
+ else
+ return ksz_tc_ets_del(dev, port);
case TC_ETS_STATS:
case TC_ETS_GRAFT:
return -EOPNOTSUPP;
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index af17a9c030d4..a08417df2ca4 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Microchip switch driver common header
*
- * Copyright (C) 2017-2024 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#ifndef __KSZ_COMMON_H
@@ -10,6 +10,7 @@
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
+#include <linux/pcs/pcs-xpcs.h>
#include <linux/phy.h>
#include <linux/regmap.h>
#include <net/dsa.h>
@@ -93,6 +94,7 @@ struct ksz_chip_data {
bool internal_phy[KSZ_MAX_NUM_PORTS];
bool gbit_capable[KSZ_MAX_NUM_PORTS];
bool ptp_capable;
+ u8 sgmii_port;
const struct regmap_access_table *wr_table;
const struct regmap_access_table *rd_table;
};
@@ -132,6 +134,7 @@ struct ksz_port {
u32 force:1;
u32 read:1; /* read MIB counters in background */
u32 freeze:1; /* MIB counter freeze is enabled */
+ u32 sgmii_adv_write:1;
struct ksz_port_mib mib;
phy_interface_t interface;
@@ -141,8 +144,9 @@ struct ksz_port {
void *acl_priv;
struct ksz_irq pirq;
u8 num;
+ struct phylink_pcs *pcs;
#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP)
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
bool hwts_tx_en;
bool hwts_rx_en;
struct ksz_irq ptpirq;
@@ -440,6 +444,8 @@ struct ksz_dev_ops {
int (*reset)(struct ksz_device *dev);
int (*init)(struct ksz_device *dev);
void (*exit)(struct ksz_device *dev);
+
+ int (*pcs_create)(struct ksz_device *dev);
};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
@@ -731,6 +737,21 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
dev->chip_id == LAN9372_CHIP_ID) && port == KSZ_PORT_4;
}
+static inline int ksz_get_sgmii_port(struct ksz_device *dev)
+{
+ return dev->info->sgmii_port - 1;
+}
+
+static inline bool ksz_has_sgmii_port(struct ksz_device *dev)
+{
+ return dev->info->sgmii_port > 0;
+}
+
+static inline bool ksz_is_sgmii_port(struct ksz_device *dev, int port)
+{
+ return dev->info->sgmii_port == port + 1;
+}
+
/* STP State Defines */
#define PORT_TX_ENABLE BIT(2)
#define PORT_RX_ENABLE BIT(1)
@@ -836,6 +857,25 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
#define SW_HI_SPEED_DRIVE_STRENGTH_S 4
#define SW_LO_SPEED_DRIVE_STRENGTH_S 0
+/* TXQ Split Control Register for per-port, per-queue configuration.
+ * Register 0xAF is TXQ Split for Q3 on Port 1.
+ * Register offset formula: 0xAF + (port * 4) + (3 - queue)
+ * where: port = 0..2, queue = 0..3
+ */
+#define KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue) \
+ (0xAF + ((port) * 4) + (3 - (queue)))
+
+/* Bit 7 selects between:
+ * 0 = Strict priority mode (highest-priority queue first)
+ * 1 = Weighted Fair Queuing (WFQ) mode:
+ * Queue weights: Q3:Q2:Q1:Q0 = 8:4:2:1
+ * If any queues are empty, weight is redistributed.
+ *
+ * Note: This is referred to as "Weighted Fair Queuing" (WFQ) in KSZ8863/8873
+ * documentation, and as "Weighted Round Robin" (WRR) in KSZ9477 family docs.
+ */
+#define KSZ8873_TXQ_WFQ_ENABLE BIT(7)
+
#define KSZ9477_REG_PORT_OUT_RATE_0 0x0420
#define KSZ9477_OUT_RATE_NO_LIMIT 0
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
index 22fb9ef4645c..8ab664e85f13 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.c
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -319,22 +319,21 @@ int ksz_get_ts_info(struct dsa_switch *ds, int port, struct kernel_ethtool_ts_in
return 0;
}
-int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int ksz_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config)
{
struct ksz_device *dev = ds->priv;
- struct hwtstamp_config *config;
struct ksz_port *prt;
prt = &dev->ports[port];
- config = &prt->tstamp_config;
+ *config = prt->tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ return 0;
}
static int ksz_set_hwtstamp_config(struct ksz_device *dev,
struct ksz_port *prt,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
int ret;
@@ -404,26 +403,21 @@ static int ksz_set_hwtstamp_config(struct ksz_device *dev,
return ksz_ptp_enable_mode(dev);
}
-int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int ksz_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
- struct hwtstamp_config config;
struct ksz_port *prt;
int ret;
prt = &dev->ports[port];
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- ret = ksz_set_hwtstamp_config(dev, prt, &config);
+ ret = ksz_set_hwtstamp_config(dev, prt, config);
if (ret)
return ret;
- memcpy(&prt->tstamp_config, &config, sizeof(config));
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ prt->tstamp_config = *config;
return 0;
}
@@ -1136,8 +1130,8 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
init_completion(&port->tstamp_msg_comp);
- ptpirq->domain = irq_domain_add_linear(dev->dev->of_node, ptpirq->nirqs,
- &ksz_ptp_irq_domain_ops, ptpirq);
+ ptpirq->domain = irq_domain_create_linear(of_fwnode_handle(dev->dev->of_node),
+ ptpirq->nirqs, &ksz_ptp_irq_domain_ops, ptpirq);
if (!ptpirq->domain)
return -ENOMEM;
diff --git a/drivers/net/dsa/microchip/ksz_ptp.h b/drivers/net/dsa/microchip/ksz_ptp.h
index 2f1783c0d723..3086e519b1b6 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.h
+++ b/drivers/net/dsa/microchip/ksz_ptp.h
@@ -39,8 +39,11 @@ void ksz_ptp_clock_unregister(struct dsa_switch *ds);
int ksz_get_ts_info(struct dsa_switch *ds, int port,
struct kernel_ethtool_ts_info *ts);
-int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
-int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int ksz_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config);
+int ksz_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
void ksz_port_deferred_xmit(struct kthread_work *work);
bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
diff --git a/drivers/net/dsa/mt7530-mmio.c b/drivers/net/dsa/mt7530-mmio.c
index 5f2db4317dd3..842d74268e77 100644
--- a/drivers/net/dsa/mt7530-mmio.c
+++ b/drivers/net/dsa/mt7530-mmio.c
@@ -11,6 +11,7 @@
#include "mt7530.h"
static const struct of_device_id mt7988_of_match[] = {
+ { .compatible = "airoha,an7583-switch", .data = &mt753x_table[ID_AN7583], },
{ .compatible = "airoha,en7581-switch", .data = &mt753x_table[ID_EN7581], },
{ .compatible = "mediatek,mt7988-switch", .data = &mt753x_table[ID_MT7988], },
{ /* sentinel */ },
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index c5d6628d7980..df213c37b4fe 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -32,47 +32,15 @@ static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs)
/* String, offset, and register size in bytes if different from 4 bytes */
static const struct mt7530_mib_desc mt7530_mib[] = {
- MIB_DESC(1, 0x00, "TxDrop"),
- MIB_DESC(1, 0x04, "TxCrcErr"),
- MIB_DESC(1, 0x08, "TxUnicast"),
- MIB_DESC(1, 0x0c, "TxMulticast"),
- MIB_DESC(1, 0x10, "TxBroadcast"),
- MIB_DESC(1, 0x14, "TxCollision"),
- MIB_DESC(1, 0x18, "TxSingleCollision"),
- MIB_DESC(1, 0x1c, "TxMultipleCollision"),
- MIB_DESC(1, 0x20, "TxDeferred"),
- MIB_DESC(1, 0x24, "TxLateCollision"),
- MIB_DESC(1, 0x28, "TxExcessiveCollistion"),
- MIB_DESC(1, 0x2c, "TxPause"),
- MIB_DESC(1, 0x30, "TxPktSz64"),
- MIB_DESC(1, 0x34, "TxPktSz65To127"),
- MIB_DESC(1, 0x38, "TxPktSz128To255"),
- MIB_DESC(1, 0x3c, "TxPktSz256To511"),
- MIB_DESC(1, 0x40, "TxPktSz512To1023"),
- MIB_DESC(1, 0x44, "Tx1024ToMax"),
- MIB_DESC(2, 0x48, "TxBytes"),
- MIB_DESC(1, 0x60, "RxDrop"),
- MIB_DESC(1, 0x64, "RxFiltering"),
- MIB_DESC(1, 0x68, "RxUnicast"),
- MIB_DESC(1, 0x6c, "RxMulticast"),
- MIB_DESC(1, 0x70, "RxBroadcast"),
- MIB_DESC(1, 0x74, "RxAlignErr"),
- MIB_DESC(1, 0x78, "RxCrcErr"),
- MIB_DESC(1, 0x7c, "RxUnderSizeErr"),
- MIB_DESC(1, 0x80, "RxFragErr"),
- MIB_DESC(1, 0x84, "RxOverSzErr"),
- MIB_DESC(1, 0x88, "RxJabberErr"),
- MIB_DESC(1, 0x8c, "RxPause"),
- MIB_DESC(1, 0x90, "RxPktSz64"),
- MIB_DESC(1, 0x94, "RxPktSz65To127"),
- MIB_DESC(1, 0x98, "RxPktSz128To255"),
- MIB_DESC(1, 0x9c, "RxPktSz256To511"),
- MIB_DESC(1, 0xa0, "RxPktSz512To1023"),
- MIB_DESC(1, 0xa4, "RxPktSz1024ToMax"),
- MIB_DESC(2, 0xa8, "RxBytes"),
- MIB_DESC(1, 0xb0, "RxCtrlDrop"),
- MIB_DESC(1, 0xb4, "RxIngressDrop"),
- MIB_DESC(1, 0xb8, "RxArlDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_TX_DROP, "TxDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_TX_CRC_ERR, "TxCrcErr"),
+ MIB_DESC(1, MT7530_PORT_MIB_TX_COLLISION, "TxCollision"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_DROP, "RxDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_FILTERING, "RxFiltering"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_CRC_ERR, "RxCrcErr"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_CTRL_DROP, "RxCtrlDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_INGRESS_DROP, "RxIngressDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_ARL_DROP, "RxArlDrop"),
};
static void
@@ -790,23 +758,33 @@ mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
}
static void
+mt7530_read_port_stats(struct mt7530_priv *priv, int port,
+ u32 offset, u8 size, uint64_t *data)
+{
+ u32 val, reg = MT7530_PORT_MIB_COUNTER(port) + offset;
+
+ val = mt7530_read(priv, reg);
+ *data = val;
+
+ if (size == 2) {
+ val = mt7530_read(priv, reg + 4);
+ *data |= (u64)val << 32;
+ }
+}
+
+static void
mt7530_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct mt7530_priv *priv = ds->priv;
const struct mt7530_mib_desc *mib;
- u32 reg, i;
- u64 hi;
+ int i;
for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) {
mib = &mt7530_mib[i];
- reg = MT7530_PORT_MIB_COUNTER(port) + mib->offset;
- data[i] = mt7530_read(priv, reg);
- if (mib->size == 2) {
- hi = mt7530_read(priv, reg + 4);
- data[i] |= hi << 32;
- }
+ mt7530_read_port_stats(priv, port, mib->offset, mib->size,
+ data + i);
}
}
@@ -819,6 +797,172 @@ mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset)
return ARRAY_SIZE(mt7530_mib);
}
+static void mt7530_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ /* MIB counter doesn't provide a FramesTransmittedOK but instead
+ * provide stats for Unicast, Broadcast and Multicast frames separately.
+ * To simulate a global frame counter, read Unicast and addition Multicast
+ * and Broadcast later
+ */
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_UNICAST, 1,
+ &mac_stats->FramesTransmittedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_SINGLE_COLLISION, 1,
+ &mac_stats->SingleCollisionFrames);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_MULTIPLE_COLLISION, 1,
+ &mac_stats->MultipleCollisionFrames);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_UNICAST, 1,
+ &mac_stats->FramesReceivedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BYTES, 2,
+ &mac_stats->OctetsTransmittedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_ALIGN_ERR, 1,
+ &mac_stats->AlignmentErrors);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_DEFERRED, 1,
+ &mac_stats->FramesWithDeferredXmissions);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_LATE_COLLISION, 1,
+ &mac_stats->LateCollisions);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_EXCESSIVE_COLLISION, 1,
+ &mac_stats->FramesAbortedDueToXSColls);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BYTES, 2,
+ &mac_stats->OctetsReceivedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_MULTICAST, 1,
+ &mac_stats->MulticastFramesXmittedOK);
+ mac_stats->FramesTransmittedOK += mac_stats->MulticastFramesXmittedOK;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BROADCAST, 1,
+ &mac_stats->BroadcastFramesXmittedOK);
+ mac_stats->FramesTransmittedOK += mac_stats->BroadcastFramesXmittedOK;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_MULTICAST, 1,
+ &mac_stats->MulticastFramesReceivedOK);
+ mac_stats->FramesReceivedOK += mac_stats->MulticastFramesReceivedOK;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BROADCAST, 1,
+ &mac_stats->BroadcastFramesReceivedOK);
+ mac_stats->FramesReceivedOK += mac_stats->BroadcastFramesReceivedOK;
+}
+
+static const struct ethtool_rmon_hist_range mt7530_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, MT7530_MAX_MTU },
+ {}
+};
+
+static void mt7530_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_UNDER_SIZE_ERR, 1,
+ &rmon_stats->undersize_pkts);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_OVER_SZ_ERR, 1,
+ &rmon_stats->oversize_pkts);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_FRAG_ERR, 1,
+ &rmon_stats->fragments);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_JABBER_ERR, 1,
+ &rmon_stats->jabbers);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_64, 1,
+ &rmon_stats->hist[0]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_65_TO_127, 1,
+ &rmon_stats->hist[1]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_128_TO_255, 1,
+ &rmon_stats->hist[2]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_256_TO_511, 1,
+ &rmon_stats->hist[3]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_512_TO_1023, 1,
+ &rmon_stats->hist[4]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_1024_TO_MAX, 1,
+ &rmon_stats->hist[5]);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_64, 1,
+ &rmon_stats->hist_tx[0]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_65_TO_127, 1,
+ &rmon_stats->hist_tx[1]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_128_TO_255, 1,
+ &rmon_stats->hist_tx[2]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_256_TO_511, 1,
+ &rmon_stats->hist_tx[3]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_512_TO_1023, 1,
+ &rmon_stats->hist_tx[4]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_1024_TO_MAX, 1,
+ &rmon_stats->hist_tx[5]);
+
+ *ranges = mt7530_rmon_ranges;
+}
+
+static void mt7530_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *storage)
+{
+ struct mt7530_priv *priv = ds->priv;
+ uint64_t data;
+
+ /* MIB counter doesn't provide a FramesTransmittedOK but instead
+ * provide stats for Unicast, Broadcast and Multicast frames separately.
+ * To simulate a global frame counter, read Unicast and addition Multicast
+ * and Broadcast later
+ */
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_UNICAST, 1,
+ &storage->rx_packets);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_MULTICAST, 1,
+ &storage->multicast);
+ storage->rx_packets += storage->multicast;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BROADCAST, 1,
+ &data);
+ storage->rx_packets += data;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_UNICAST, 1,
+ &storage->tx_packets);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_MULTICAST, 1,
+ &data);
+ storage->tx_packets += data;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BROADCAST, 1,
+ &data);
+ storage->tx_packets += data;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BYTES, 2,
+ &storage->rx_bytes);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BYTES, 2,
+ &storage->tx_bytes);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_DROP, 1,
+ &storage->rx_dropped);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_DROP, 1,
+ &storage->tx_dropped);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_CRC_ERR, 1,
+ &storage->rx_crc_errors);
+}
+
+static void mt7530_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PAUSE, 1,
+ &ctrl_stats->MACControlFramesTransmitted);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PAUSE, 1,
+ &ctrl_stats->MACControlFramesReceived);
+}
+
static int
mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
{
@@ -1154,7 +1298,7 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
* is affine to the inbound user port.
*/
if (priv->id == ID_MT7531 || priv->id == ID_MT7988 ||
- priv->id == ID_EN7581)
+ priv->id == ID_EN7581 || priv->id == ID_AN7583)
mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port)));
/* CPU port gets connected to all user ports of
@@ -2468,7 +2612,7 @@ mt7531_setup_common(struct dsa_switch *ds)
mt7530_set(priv, MT753X_AGC, LOCAL_EN);
/* Enable Special Tag for rx frames */
- if (priv->id == ID_EN7581)
+ if (priv->id == ID_EN7581 || priv->id == ID_AN7583)
mt7530_write(priv, MT753X_CPORT_SPTAG_CFG,
CPORT_SW2FE_STAG_EN | CPORT_FE2SW_STAG_EN);
@@ -3092,6 +3236,16 @@ static int mt7988_setup(struct dsa_switch *ds)
reset_control_deassert(priv->rstc);
usleep_range(20, 50);
+ /* AN7583 require additional tweak to CONN_CFG */
+ if (priv->id == ID_AN7583)
+ mt7530_rmw(priv, AN7583_GEPHY_CONN_CFG,
+ AN7583_CSR_DPHY_CKIN_SEL |
+ AN7583_CSR_PHY_CORE_REG_CLK_SEL |
+ AN7583_CSR_ETHER_AFE_PWD,
+ AN7583_CSR_DPHY_CKIN_SEL |
+ AN7583_CSR_PHY_CORE_REG_CLK_SEL |
+ FIELD_PREP(AN7583_CSR_ETHER_AFE_PWD, 0));
+
/* Reset the switch PHYs */
mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_PHY_RST);
@@ -3105,6 +3259,10 @@ const struct dsa_switch_ops mt7530_switch_ops = {
.get_strings = mt7530_get_strings,
.get_ethtool_stats = mt7530_get_ethtool_stats,
.get_sset_count = mt7530_get_sset_count,
+ .get_eth_mac_stats = mt7530_get_eth_mac_stats,
+ .get_rmon_stats = mt7530_get_rmon_stats,
+ .get_eth_ctrl_stats = mt7530_get_eth_ctrl_stats,
+ .get_stats64 = mt7530_get_stats64,
.set_ageing_time = mt7530_set_ageing_time,
.port_enable = mt7530_port_enable,
.port_disable = mt7530_port_disable,
@@ -3196,6 +3354,16 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c45 = mt7531_ind_c45_phy_write,
.mac_port_get_caps = en7581_mac_port_get_caps,
},
+ [ID_AN7583] = {
+ .id = ID_AN7583,
+ .pcs_ops = &mt7530_pcs_ops,
+ .sw_setup = mt7988_setup,
+ .phy_read_c22 = mt7531_ind_c22_phy_read,
+ .phy_write_c22 = mt7531_ind_c22_phy_write,
+ .phy_read_c45 = mt7531_ind_c45_phy_read,
+ .phy_write_c45 = mt7531_ind_c45_phy_write,
+ .mac_port_get_caps = en7581_mac_port_get_caps,
+ },
};
EXPORT_SYMBOL_GPL(mt753x_table);
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index c3ea403d7acf..7e47cd9af256 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -20,6 +20,7 @@ enum mt753x_id {
ID_MT7531 = 2,
ID_MT7988 = 3,
ID_EN7581 = 4,
+ ID_AN7583 = 5,
};
#define NUM_TRGMII_CTRL 5
@@ -66,7 +67,8 @@ enum mt753x_id {
#define MT753X_MIRROR_REG(id) ((id == ID_MT7531 || \
id == ID_MT7988 || \
- id == ID_EN7581) ? \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
MT7531_CFC : MT753X_MFC)
#define MT753X_MIRROR_EN(id) ((id == ID_MT7531 || \
@@ -76,19 +78,22 @@ enum mt753x_id {
#define MT753X_MIRROR_PORT_MASK(id) ((id == ID_MT7531 || \
id == ID_MT7988 || \
- id == ID_EN7581) ? \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
MT7531_MIRROR_PORT_MASK : \
MT7530_MIRROR_PORT_MASK)
#define MT753X_MIRROR_PORT_GET(id, val) ((id == ID_MT7531 || \
id == ID_MT7988 || \
- id == ID_EN7581) ? \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
MT7531_MIRROR_PORT_GET(val) : \
MT7530_MIRROR_PORT_GET(val))
#define MT753X_MIRROR_PORT_SET(id, val) ((id == ID_MT7531 || \
id == ID_MT7988 || \
- id == ID_EN7581) ? \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
MT7531_MIRROR_PORT_SET(val) : \
MT7530_MIRROR_PORT_SET(val))
@@ -423,6 +428,48 @@ enum mt7530_vlan_port_acc_frm {
/* Register for MIB */
#define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100)
+/* Each define is an offset of MT7530_PORT_MIB_COUNTER */
+#define MT7530_PORT_MIB_TX_DROP 0x00
+#define MT7530_PORT_MIB_TX_CRC_ERR 0x04
+#define MT7530_PORT_MIB_TX_UNICAST 0x08
+#define MT7530_PORT_MIB_TX_MULTICAST 0x0c
+#define MT7530_PORT_MIB_TX_BROADCAST 0x10
+#define MT7530_PORT_MIB_TX_COLLISION 0x14
+#define MT7530_PORT_MIB_TX_SINGLE_COLLISION 0x18
+#define MT7530_PORT_MIB_TX_MULTIPLE_COLLISION 0x1c
+#define MT7530_PORT_MIB_TX_DEFERRED 0x20
+#define MT7530_PORT_MIB_TX_LATE_COLLISION 0x24
+#define MT7530_PORT_MIB_TX_EXCESSIVE_COLLISION 0x28
+#define MT7530_PORT_MIB_TX_PAUSE 0x2c
+#define MT7530_PORT_MIB_TX_PKT_SZ_64 0x30
+#define MT7530_PORT_MIB_TX_PKT_SZ_65_TO_127 0x34
+#define MT7530_PORT_MIB_TX_PKT_SZ_128_TO_255 0x38
+#define MT7530_PORT_MIB_TX_PKT_SZ_256_TO_511 0x3c
+#define MT7530_PORT_MIB_TX_PKT_SZ_512_TO_1023 0x40
+#define MT7530_PORT_MIB_TX_PKT_SZ_1024_TO_MAX 0x44
+#define MT7530_PORT_MIB_TX_BYTES 0x48 /* 64 bytes */
+#define MT7530_PORT_MIB_RX_DROP 0x60
+#define MT7530_PORT_MIB_RX_FILTERING 0x64
+#define MT7530_PORT_MIB_RX_UNICAST 0x68
+#define MT7530_PORT_MIB_RX_MULTICAST 0x6c
+#define MT7530_PORT_MIB_RX_BROADCAST 0x70
+#define MT7530_PORT_MIB_RX_ALIGN_ERR 0x74
+#define MT7530_PORT_MIB_RX_CRC_ERR 0x78
+#define MT7530_PORT_MIB_RX_UNDER_SIZE_ERR 0x7c
+#define MT7530_PORT_MIB_RX_FRAG_ERR 0x80
+#define MT7530_PORT_MIB_RX_OVER_SZ_ERR 0x84
+#define MT7530_PORT_MIB_RX_JABBER_ERR 0x88
+#define MT7530_PORT_MIB_RX_PAUSE 0x8c
+#define MT7530_PORT_MIB_RX_PKT_SZ_64 0x90
+#define MT7530_PORT_MIB_RX_PKT_SZ_65_TO_127 0x94
+#define MT7530_PORT_MIB_RX_PKT_SZ_128_TO_255 0x98
+#define MT7530_PORT_MIB_RX_PKT_SZ_256_TO_511 0x9c
+#define MT7530_PORT_MIB_RX_PKT_SZ_512_TO_1023 0xa0
+#define MT7530_PORT_MIB_RX_PKT_SZ_1024_TO_MAX 0xa4
+#define MT7530_PORT_MIB_RX_BYTES 0xa8 /* 64 bytes */
+#define MT7530_PORT_MIB_RX_CTRL_DROP 0xb0
+#define MT7530_PORT_MIB_RX_INGRESS_DROP 0xb4
+#define MT7530_PORT_MIB_RX_ARL_DROP 0xb8
#define MT7530_MIB_CCR 0x4fe0
#define CCR_MIB_ENABLE BIT(31)
#define CCR_RX_OCT_CNT_GOOD BIT(7)
@@ -631,6 +678,11 @@ enum mt7531_xtal_fsel {
#define CPORT_SW2FE_STAG_EN BIT(1)
#define CPORT_FE2SW_STAG_EN BIT(0)
+#define AN7583_GEPHY_CONN_CFG 0x7c14
+#define AN7583_CSR_DPHY_CKIN_SEL BIT(31)
+#define AN7583_CSR_PHY_CORE_REG_CLK_SEL BIT(30)
+#define AN7583_CSR_ETHER_AFE_PWD GENMASK(28, 24)
+
/* Registers for LED GPIO control (MT7530 only)
* All registers follow this pattern:
* [ 2: 0] port 0
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 08db846cda8d..2281d6ab8c9a 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -297,7 +297,7 @@ static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
u16 reg, mask;
chip->g1_irq.nirqs = chip->info->g1_irqs;
- chip->g1_irq.domain = irq_domain_add_simple(
+ chip->g1_irq.domain = irq_domain_create_simple(
NULL, chip->g1_irq.nirqs, 0,
&mv88e6xxx_g1_irq_domain_ops, chip);
if (!chip->g1_irq.domain)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 86bf113c9bfa..7d00482f53a3 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -241,7 +241,7 @@ struct mv88e6xxx_port_hwtstamp {
u16 tx_seq_id;
/* Current timestamp configuration */
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
};
enum mv88e6xxx_policy_mapping {
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index b2b5f6ba438f..aaf97c1e3167 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -1154,8 +1154,10 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
if (err)
return err;
- chip->g2_irq.domain = irq_domain_add_simple(
- chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip);
+ chip->g2_irq.domain = irq_domain_create_simple(of_fwnode_handle(chip->dev->of_node),
+ 16, 0,
+ &mv88e6xxx_g2_irq_domain_ops,
+ chip);
if (!chip->g2_irq.domain)
return -ENOMEM;
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index 49e6e1355142..f663799b0b3b 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -89,7 +89,7 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
@@ -169,42 +169,38 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
}
int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
- struct hwtstamp_config config;
int err;
if (!chip->info->ptp_support)
return -EOPNOTSUPP;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = mv88e6xxx_set_hwtstamp_config(chip, port, &config);
+ err = mv88e6xxx_set_hwtstamp_config(chip, port, config);
if (err)
return err;
/* Save the chosen configuration to be returned later. */
- memcpy(&ps->tstamp_config, &config, sizeof(config));
+ ps->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
- struct hwtstamp_config *config = &ps->tstamp_config;
if (!chip->info->ptp_support)
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ *config = ps->tstamp_config;
+
+ return 0;
}
/* Returns a pointer to the PTP header if the caller should time stamp,
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
index 85acc758e3eb..22e4acc957f0 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
@@ -111,9 +111,10 @@
#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *cfg);
bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *clone, unsigned int type);
@@ -132,14 +133,17 @@ int mv88e6165_global_disable(struct mv88e6xxx_chip *chip);
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
-static inline int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds,
- int port, struct ifreq *ifr)
+static inline int
+mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
-static inline int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds,
- int port, struct ifreq *ifr)
+static inline int
+mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
index b6d249eb64e7..4e7827ee684a 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.c
+++ b/drivers/net/dsa/mv88e6xxx/phy.c
@@ -182,7 +182,7 @@ static void mv88e6xxx_phy_ppu_reenable_work(struct work_struct *ugly)
static void mv88e6xxx_phy_ppu_reenable_timer(struct timer_list *t)
{
- struct mv88e6xxx_chip *chip = from_timer(chip, t, ppu_timer);
+ struct mv88e6xxx_chip *chip = timer_container_of(chip, t, ppu_timer);
schedule_work(&chip->ppu_work);
}
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index aed4a4b07f34..1d3b2c94c53e 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -332,13 +332,6 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
int pin;
int err;
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -566,6 +559,10 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
chip->ptp_clock_info.verify = ptp_ops->ptp_verify;
chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work;
+ chip->ptp_clock_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+
if (ptp_ops->set_ptp_cpu_port) {
struct dsa_port *dp;
int upstream = 0;
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 0a4e682a55ef..2dd4e56e1cf1 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1774,22 +1774,25 @@ static void felix_teardown(struct dsa_switch *ds)
}
static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config)
{
struct ocelot *ocelot = ds->priv;
- return ocelot_hwstamp_get(ocelot, port, ifr);
+ ocelot_hwstamp_get(ocelot, port, config);
+
+ return 0;
}
static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
bool using_tag_8021q;
int err;
- err = ocelot_hwstamp_set(ocelot, port, ifr);
+ err = ocelot_hwstamp_set(ocelot, port, config, extack);
if (err)
return err;
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index e9f2c67bc15f..79a29676ca6f 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -821,8 +821,8 @@ static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv)
return ret;
}
- priv->irqdomain = irq_domain_add_linear(np, 1, &ar9331_sw_irqdomain_ops,
- priv);
+ priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(np), 1,
+ &ar9331_sw_irqdomain_ops, priv);
if (!priv->irqdomain) {
dev_err(dev, "failed to create IRQ domain\n");
return -EINVAL;
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index 7e96355c28bd..964a56ee16cc 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -1719,8 +1719,8 @@ static int rtl8365mb_irq_setup(struct realtek_priv *priv)
goto out_put_node;
}
- priv->irqdomain = irq_domain_add_linear(intc, priv->num_ports,
- &rtl8365mb_irqdomain_ops, priv);
+ priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), priv->num_ports,
+ &rtl8365mb_irqdomain_ops, priv);
if (!priv->irqdomain) {
dev_err(priv->dev, "failed to add irq domain\n");
ret = -ENOMEM;
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index f54771cab56d..8bdb52b5fdcb 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -550,10 +550,8 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_priv *priv)
dev_err(priv->dev, "unable to request irq: %d\n", ret);
goto out_put_node;
}
- priv->irqdomain = irq_domain_add_linear(intc,
- RTL8366RB_NUM_INTERRUPT,
- &rtl8366rb_irqdomain_ops,
- priv);
+ priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), RTL8366RB_NUM_INTERRUPT,
+ &rtl8366rb_irqdomain_ops, priv);
if (!priv->irqdomain) {
dev_err(priv->dev, "failed to create IRQ domain\n");
ret = -EINVAL;
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index 31ea8130a495..df7466d4fe8f 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -337,8 +337,9 @@ static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block)
static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
bool set)
{
- u8 offsets[] = {A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK,
- A5PSW_MCAST_DEF_MASK};
+ static const u8 offsets[] = {
+ A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK, A5PSW_MCAST_DEF_MASK
+ };
int i;
for (i = 0; i < ARRAY_SIZE(offsets); i++)
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index f8454f3b6f9c..f674c400f05b 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -2081,6 +2081,7 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
switch (state) {
case BR_STATE_DISABLED:
case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
/* From UM10944 description of DRPDTAG (why put this there?):
* "Management traffic flows to the port regardless of the state
* of the INGRESS flag". So BPDUs are still be allowed to pass.
@@ -2090,11 +2091,6 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
mac[port].egress = false;
mac[port].dyn_learn = false;
break;
- case BR_STATE_LISTENING:
- mac[port].ingress = true;
- mac[port].egress = false;
- mac[port].dyn_learn = false;
- break;
case BR_STATE_LEARNING:
mac[port].ingress = true;
mac[port].egress = false;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index 198e787e8560..fefe46e2a5e6 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -58,19 +58,17 @@ enum sja1105_ptp_clk_mode {
#define ptp_data_to_sja1105(d) \
container_of((d), struct sja1105_private, ptp_data)
-int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
unsigned long hwts_tx_en, hwts_rx_en;
- struct hwtstamp_config config;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
hwts_tx_en = priv->hwts_tx_en;
hwts_rx_en = priv->hwts_rx_en;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
hwts_tx_en &= ~BIT(port);
break;
@@ -81,7 +79,7 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
hwts_rx_en &= ~BIT(port);
break;
@@ -92,32 +90,28 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
return -ERANGE;
}
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
-
priv->hwts_tx_en = hwts_tx_en;
priv->hwts_rx_en = hwts_rx_en;
return 0;
}
-int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config)
{
struct sja1105_private *priv = ds->priv;
- struct hwtstamp_config config;
- config.flags = 0;
+ config->flags = 0;
if (priv->hwts_tx_en & BIT(port))
- config.tx_type = HWTSTAMP_TX_ON;
+ config->tx_type = HWTSTAMP_TX_ON;
else
- config.tx_type = HWTSTAMP_TX_OFF;
+ config->tx_type = HWTSTAMP_TX_OFF;
if (priv->hwts_rx_en & BIT(port))
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
else
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
@@ -737,10 +731,6 @@ static int sja1105_per_out_enable(struct sja1105_private *priv,
if (perout->index != 0)
return -EOPNOTSUPP;
- /* Reject requests with unsupported flags */
- if (perout->flags)
- return -EOPNOTSUPP;
-
mutex_lock(&ptp_data->lock);
rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_PEROUT);
@@ -820,13 +810,6 @@ static int sja1105_extts_enable(struct sja1105_private *priv,
if (extts->index != 0)
return -EOPNOTSUPP;
- /* Reject requests with unsupported flags */
- if (extts->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* We can only enable time stamping on both edges, sadly. */
if ((extts->flags & PTP_STRICT_FLAGS) &&
(extts->flags & PTP_ENABLE_FEATURE) &&
@@ -912,6 +895,9 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
.n_pins = 1,
.n_ext_ts = 1,
.n_per_out = 1,
+ .supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS,
};
/* Only used on SJA1105 */
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 8add2bd5f728..325e3777ea07 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -112,9 +112,12 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
void sja1105_port_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb);
-int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config);
-int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
struct ptp_system_timestamp *sts);
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 5889759b8d83..9ba10efd3794 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -143,7 +143,7 @@ static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
static void eql_timer(struct timer_list *t)
{
- equalizer_t *eql = from_timer(eql, t, timer);
+ equalizer_t *eql = timer_container_of(eql, t, timer);
struct list_head *this, *tmp, *head;
spin_lock(&eql->queue.lock);
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 8ba2ed87fe7c..ecdea58e6a21 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -859,7 +859,7 @@ static int corkscrew_open(struct net_device *dev)
static void corkscrew_timer(struct timer_list *t)
{
#ifdef AUTOMEDIA
- struct corkscrew_private *vp = from_timer(vp, t, timer);
+ struct corkscrew_private *vp = timer_container_of(vp, t, timer);
struct net_device *dev = vp->our_dev;
int ioaddr = dev->base_addr;
unsigned long flags;
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index b295d528a237..1f2070497a75 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -858,7 +858,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
*/
static void media_check(struct timer_list *t)
{
- struct el3_private *lp = from_timer(lp, t, media);
+ struct el3_private *lp = timer_container_of(lp, t, media);
struct net_device *dev = lp->p_dev->priv;
unsigned int ioaddr = dev->base_addr;
unsigned long flags;
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index ff331a3bde73..ea49be43b8c3 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -685,7 +685,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
static void media_check(struct timer_list *t)
{
- struct el3_private *lp = from_timer(lp, t, media);
+ struct el3_private *lp = timer_container_of(lp, t, media);
struct net_device *dev = lp->p_dev->priv;
unsigned int ioaddr = dev->base_addr;
u16 media, errs;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 1a10f5dbc4d7..8c9cc97efd4e 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1783,7 +1783,7 @@ out:
static void
vortex_timer(struct timer_list *t)
{
- struct vortex_private *vp = from_timer(vp, t, timer);
+ struct vortex_private *vp = timer_container_of(vp, t, timer);
struct net_device *dev = vp->mii.dev;
void __iomem *ioaddr = vp->ioaddr;
int next_tick = 60*HZ;
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index e5be5044e1d4..7c8213011b5c 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -550,7 +550,7 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id)
static void ei_watchdog(struct timer_list *t)
{
- struct axnet_dev *info = from_timer(info, t, watchdog);
+ struct axnet_dev *info = timer_container_of(info, t, watchdog);
struct net_device *dev = info->p_dev->priv;
unsigned int nic_base = dev->base_addr;
unsigned int mii_addr = nic_base + AXNET_MII_EEP;
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index a326f25dda09..19f9c5db3f3b 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -1018,7 +1018,7 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id)
static void ei_watchdog(struct timer_list *t)
{
- struct pcnet_dev *info = from_timer(info, t, watchdog);
+ struct pcnet_dev *info = timer_container_of(info, t, watchdog);
struct net_device *dev = info->p_dev->priv;
unsigned int nic_base = dev->base_addr;
unsigned int mii_addr = nic_base + DLINK_GPIO;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index b398adacda91..678eddb36172 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3076,7 +3076,8 @@ err_out:
*/
static void et131x_error_timer_handler(struct timer_list *t)
{
- struct et131x_adapter *adapter = from_timer(adapter, t, error_timer);
+ struct et131x_adapter *adapter = timer_container_of(adapter, t,
+ error_timer);
struct phy_device *phydev = adapter->netdev->phydev;
if (et1310_in_phy_coma(adapter)) {
diff --git a/drivers/net/ethernet/airoha/Kconfig b/drivers/net/ethernet/airoha/Kconfig
index 1a4cf6a259f6..ad3ce501e7a5 100644
--- a/drivers/net/ethernet/airoha/Kconfig
+++ b/drivers/net/ethernet/airoha/Kconfig
@@ -24,4 +24,11 @@ config NET_AIROHA
This driver supports the gigabit ethernet MACs in the
Airoha SoC family.
+config NET_AIROHA_FLOW_STATS
+ default y
+ bool "Airoha flow stats"
+ depends on NET_AIROHA && NET_AIROHA_NPU
+ help
+ Enable Aiorha flowtable statistic counters.
+
endif #NET_VENDOR_AIROHA
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index d748dc6de923..06dea3a13e77 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -5,6 +5,7 @@
*/
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/tcp.h>
#include <linux/u64_stats_sync.h>
@@ -34,46 +35,40 @@ u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
return val;
}
-static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
- u32 clear, u32 set)
+static void airoha_qdma_set_irqmask(struct airoha_irq_bank *irq_bank,
+ int index, u32 clear, u32 set)
{
+ struct airoha_qdma *qdma = irq_bank->qdma;
+ int bank = irq_bank - &qdma->irq_banks[0];
unsigned long flags;
- if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
+ if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask)))
return;
- spin_lock_irqsave(&qdma->irq_lock, flags);
+ spin_lock_irqsave(&irq_bank->irq_lock, flags);
- qdma->irqmask[index] &= ~clear;
- qdma->irqmask[index] |= set;
- airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
+ irq_bank->irqmask[index] &= ~clear;
+ irq_bank->irqmask[index] |= set;
+ airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index),
+ irq_bank->irqmask[index]);
/* Read irq_enable register in order to guarantee the update above
* completes in the spinlock critical section.
*/
- airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
+ airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index));
- spin_unlock_irqrestore(&qdma->irq_lock, flags);
+ spin_unlock_irqrestore(&irq_bank->irq_lock, flags);
}
-static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
- u32 mask)
+static void airoha_qdma_irq_enable(struct airoha_irq_bank *irq_bank,
+ int index, u32 mask)
{
- airoha_qdma_set_irqmask(qdma, index, 0, mask);
+ airoha_qdma_set_irqmask(irq_bank, index, 0, mask);
}
-static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
- u32 mask)
+static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank,
+ int index, u32 mask)
{
- airoha_qdma_set_irqmask(qdma, index, mask, 0);
-}
-
-static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
-{
- /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
- * GDM{2,3,4} can be used as wan port connected to an external
- * phy module.
- */
- return port->id == 1;
+ airoha_qdma_set_irqmask(irq_bank, index, mask, 0);
}
static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
@@ -89,6 +84,8 @@ static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
+
+ airoha_ppe_init_upd_mem(port);
}
static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
@@ -527,6 +524,25 @@ static int airoha_fe_init(struct airoha_eth *eth)
/* disable IFC by default */
airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
+ airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0),
+ FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM1));
+ airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(1),
+ FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM2));
+
/* enable 1:N vlan action, init vlan table */
airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
@@ -614,7 +630,6 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
struct airoha_queue_entry *e = &q->entry[q->tail];
struct airoha_qdma_desc *desc = &q->desc[q->tail];
u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
- dma_addr_t dma_addr = le32_to_cpu(desc->addr);
struct page *page = virt_to_head_page(e->buf);
u32 desc_ctrl = le32_to_cpu(desc->ctrl);
struct airoha_gdm_port *port;
@@ -623,22 +638,16 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
break;
- if (!dma_addr)
- break;
-
- len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
- if (!len)
- break;
-
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
- dma_sync_single_for_cpu(eth->dev, dma_addr,
+ dma_sync_single_for_cpu(eth->dev, e->dma_addr,
SKB_WITH_OVERHEAD(q->buf_size), dir);
+ len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
data_len = q->skb ? q->buf_size
: SKB_WITH_OVERHEAD(q->buf_size);
- if (data_len < len)
+ if (!len || data_len < len)
goto free_frag;
p = airoha_qdma_get_gdm_port(eth, desc);
@@ -694,16 +703,19 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
- airoha_ppe_check_skb(eth->ppe, hash);
+ airoha_ppe_check_skb(eth->ppe, q->skb, hash);
done++;
napi_gro_receive(&q->napi, q->skb);
q->skb = NULL;
continue;
free_frag:
- page_pool_put_full_page(q->page_pool, page, true);
- dev_kfree_skb(q->skb);
- q->skb = NULL;
+ if (q->skb) {
+ dev_kfree_skb(q->skb);
+ q->skb = NULL;
+ } else {
+ page_pool_put_full_page(q->page_pool, page, true);
+ }
}
airoha_qdma_fill_rx_queue(q);
@@ -720,9 +732,20 @@ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
done += cur;
} while (cur && done < budget);
- if (done < budget && napi_complete(napi))
- airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
- RX_DONE_INT_MASK);
+ if (done < budget && napi_complete(napi)) {
+ struct airoha_qdma *qdma = q->qdma;
+ int i, qid = q - &qdma->q_rx[0];
+ int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1
+ : QDMA_INT_REG_IDX2;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i)))
+ continue;
+
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg,
+ BIT(qid % RX_DONE_HIGH_OFFSET));
+ }
+ }
return done;
}
@@ -925,7 +948,7 @@ unlock:
}
if (done < budget && napi_complete(napi))
- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(id));
return done;
@@ -1042,37 +1065,64 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
{
+ int size, index, num_desc = HW_DSCP_NUM;
struct airoha_eth *eth = qdma->eth;
+ int id = qdma - &eth->qdma[0];
+ u32 status, buf_size;
dma_addr_t dma_addr;
- u32 status;
- int size;
+ const char *name;
- size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
- qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
- GFP_KERNEL);
- if (!qdma->hfwd.desc)
+ name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
+ if (!name)
return -ENOMEM;
- airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
+ buf_size = id ? AIROHA_MAX_PACKET_SIZE / 2 : AIROHA_MAX_PACKET_SIZE;
+ index = of_property_match_string(eth->dev->of_node,
+ "memory-region-names", name);
+ if (index >= 0) {
+ struct reserved_mem *rmem;
+ struct device_node *np;
- size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
- qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
- GFP_KERNEL);
- if (!qdma->hfwd.q)
- return -ENOMEM;
+ /* Consume reserved memory for hw forwarding buffers queue if
+ * available in the DTS
+ */
+ np = of_parse_phandle(eth->dev->of_node, "memory-region",
+ index);
+ if (!np)
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+ dma_addr = rmem->base;
+ /* Compute the number of hw descriptors according to the
+ * reserved memory size and the payload buffer size
+ */
+ num_desc = div_u64(rmem->size, buf_size);
+ } else {
+ size = buf_size * num_desc;
+ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL))
+ return -ENOMEM;
+ }
airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
+ size = num_desc * sizeof(struct airoha_qdma_fwd_desc);
+ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
+ return -ENOMEM;
+
+ airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
+ /* QDMA0: 2KB. QDMA1: 1KB */
airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
- FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
+ FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, !!id));
airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
HW_FWD_DESC_NUM_MASK,
- FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
- LMGR_INIT_START);
+ FIELD_PREP(HW_FWD_DESC_NUM_MASK, num_desc) |
+ LMGR_INIT_START | LMGR_SRAM_MODE_MASK);
return read_poll_timeout(airoha_qdma_rr, status,
!(status & LMGR_INIT_START), USEC_PER_MSEC,
@@ -1155,14 +1205,24 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
{
int i;
- /* clear pending irqs */
- for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ /* clear pending irqs */
airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
-
- /* setup irqs */
- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
+ /* setup rx irqs */
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0,
+ INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1,
+ INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2,
+ INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3,
+ INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ }
+ /* setup tx irqs */
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
+ TX_COHERENT_LOW_INT_MASK | INT_TX_MASK);
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
+ TX_COHERENT_HIGH_INT_MASK);
/* setup irq binding */
for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
@@ -1207,30 +1267,39 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
{
- struct airoha_qdma *qdma = dev_instance;
- u32 intr[ARRAY_SIZE(qdma->irqmask)];
+ struct airoha_irq_bank *irq_bank = dev_instance;
+ struct airoha_qdma *qdma = irq_bank->qdma;
+ u32 rx_intr_mask = 0, rx_intr1, rx_intr2;
+ u32 intr[ARRAY_SIZE(irq_bank->irqmask)];
int i;
- for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
+ for (i = 0; i < ARRAY_SIZE(intr); i++) {
intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
- intr[i] &= qdma->irqmask[i];
+ intr[i] &= irq_bank->irqmask[i];
airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
}
if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
return IRQ_NONE;
- if (intr[1] & RX_DONE_INT_MASK) {
- airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
- RX_DONE_INT_MASK);
+ rx_intr1 = intr[1] & RX_DONE_LOW_INT_MASK;
+ if (rx_intr1) {
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1, rx_intr1);
+ rx_intr_mask |= rx_intr1;
+ }
- for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
- if (!qdma->q_rx[i].ndesc)
- continue;
+ rx_intr2 = intr[2] & RX_DONE_HIGH_INT_MASK;
+ if (rx_intr2) {
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX2, rx_intr2);
+ rx_intr_mask |= (rx_intr2 << 16);
+ }
- if (intr[1] & BIT(i))
- napi_schedule(&qdma->q_rx[i].napi);
- }
+ for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ if (rx_intr_mask & BIT(i))
+ napi_schedule(&qdma->q_rx[i].napi);
}
if (intr[0] & INT_TX_MASK) {
@@ -1238,7 +1307,7 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
if (!(intr[0] & TX_DONE_INT_MASK(i)))
continue;
- airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(i));
napi_schedule(&qdma->q_tx_irq[i].napi);
}
@@ -1247,6 +1316,39 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
return IRQ_HANDLED;
}
+static int airoha_qdma_init_irq_banks(struct platform_device *pdev,
+ struct airoha_qdma *qdma)
+{
+ struct airoha_eth *eth = qdma->eth;
+ int i, id = qdma - &eth->qdma[0];
+
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i];
+ int err, irq_index = 4 * id + i;
+ const char *name;
+
+ spin_lock_init(&irq_bank->irq_lock);
+ irq_bank->qdma = qdma;
+
+ irq_bank->irq = platform_get_irq(pdev, irq_index);
+ if (irq_bank->irq < 0)
+ return irq_bank->irq;
+
+ name = devm_kasprintf(eth->dev, GFP_KERNEL,
+ KBUILD_MODNAME ".%d", irq_index);
+ if (!name)
+ return -ENOMEM;
+
+ err = devm_request_irq(eth->dev, irq_bank->irq,
+ airoha_irq_handler, IRQF_SHARED, name,
+ irq_bank);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int airoha_qdma_init(struct platform_device *pdev,
struct airoha_eth *eth,
struct airoha_qdma *qdma)
@@ -1254,9 +1356,7 @@ static int airoha_qdma_init(struct platform_device *pdev,
int err, id = qdma - &eth->qdma[0];
const char *res;
- spin_lock_init(&qdma->irq_lock);
qdma->eth = eth;
-
res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
if (!res)
return -ENOMEM;
@@ -1266,12 +1366,7 @@ static int airoha_qdma_init(struct platform_device *pdev,
return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
"failed to iomap qdma%d regs\n", id);
- qdma->irq = platform_get_irq(pdev, 4 * id);
- if (qdma->irq < 0)
- return qdma->irq;
-
- err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, qdma);
+ err = airoha_qdma_init_irq_banks(pdev, qdma);
if (err)
return err;
@@ -1631,7 +1726,6 @@ static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
if (port->id == 3) {
/* FIXME: handle XSI_PCE1_PORT */
- airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0), 0x5500);
airoha_fe_rmw(eth, REG_FE_WAN_PORT,
WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT));
@@ -2109,6 +2203,125 @@ static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
}
}
+static int airoha_qdma_get_rl_param(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, enum trtcm_param_type param,
+ u32 *val_low, u32 *val_high)
+{
+ u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
+ u32 val, config = FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
+ FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+ if (read_poll_timeout(airoha_qdma_rr, val,
+ val & RATE_LIMIT_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, qdma,
+ REG_TRTCM_CFG_PARAM(addr)))
+ return -ETIMEDOUT;
+
+ *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
+ if (val_high)
+ *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
+
+ return 0;
+}
+
+static int airoha_qdma_set_rl_param(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, enum trtcm_param_type param,
+ u32 val)
+{
+ u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
+ u32 config = RATE_LIMIT_PARAM_RW_MASK |
+ FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
+ FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+
+ return read_poll_timeout(airoha_qdma_rr, val,
+ val & RATE_LIMIT_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr));
+}
+
+static int airoha_qdma_set_rl_config(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, bool enable, u32 enable_mask)
+{
+ u32 val;
+ int err;
+
+ err = airoha_qdma_get_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
+ &val, NULL);
+ if (err)
+ return err;
+
+ val = enable ? val | enable_mask : val & ~enable_mask;
+
+ return airoha_qdma_set_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
+ val);
+}
+
+static int airoha_qdma_set_rl_token_bucket(struct airoha_qdma *qdma,
+ int queue_id, u32 rate_val,
+ u32 bucket_size)
+{
+ u32 val, config, tick, unit, rate, rate_frac;
+ int err;
+
+ err = airoha_qdma_get_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_MISC_MODE, &config, NULL);
+ if (err)
+ return err;
+
+ val = airoha_qdma_rr(qdma, REG_INGRESS_TRTCM_CFG);
+ tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
+ if (config & TRTCM_TICK_SEL)
+ tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
+ if (!tick)
+ return -EINVAL;
+
+ unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
+ if (!unit)
+ return -EINVAL;
+
+ rate = rate_val / unit;
+ rate_frac = rate_val % unit;
+ rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
+ rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
+ FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
+
+ err = airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_TOKEN_RATE_MODE, rate);
+ if (err)
+ return err;
+
+ val = bucket_size;
+ if (!(config & TRTCM_PKT_MODE))
+ val = max_t(u32, val, MIN_TOKEN_SIZE);
+ val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
+
+ return airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_BUCKETSIZE_SHIFT_MODE, val);
+}
+
+static int airoha_qdma_init_rl_config(struct airoha_qdma *qdma, int queue_id,
+ bool enable, enum trtcm_unit_type unit)
+{
+ bool tick_sel = queue_id == 0 || queue_id == 2 || queue_id == 8;
+ enum trtcm_param mode = TRTCM_METER_MODE;
+ int err;
+
+ mode |= unit == TRTCM_PACKET_UNIT ? TRTCM_PKT_MODE : 0;
+ err = airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ enable, mode);
+ if (err)
+ return err;
+
+ return airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ tick_sel, TRTCM_TICK_SEL);
+}
+
static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
u32 addr, enum trtcm_param_type param,
enum trtcm_mode_type mode,
@@ -2273,10 +2486,142 @@ static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
return 0;
}
+static int airoha_qdma_set_rx_meter(struct airoha_gdm_port *port,
+ u32 rate, u32 bucket_size,
+ enum trtcm_unit_type unit_type)
+{
+ struct airoha_qdma *qdma = port->qdma;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ int err;
+
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ err = airoha_qdma_init_rl_config(qdma, i, !!rate, unit_type);
+ if (err)
+ return err;
+
+ err = airoha_qdma_set_rl_token_bucket(qdma, i, rate,
+ bucket_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_tc_matchall_act_validate(struct tc_cls_matchall_offload *f)
+{
+ const struct flow_action *actions = &f->rule->action;
+ const struct flow_action_entry *act;
+
+ if (!flow_action_has_entries(actions)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "filter run with no actions");
+ return -EINVAL;
+ }
+
+ if (!flow_offload_has_one_action(actions)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "only once action per filter is supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &actions->entries[0];
+ if (act->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "invalid exceed action id");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "invalid notexceed action id");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(actions, act)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "action accept must be last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps || act->police.avrate ||
+ act->police.overhead || act->police.mtu) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "peakrate/avrate/overhead/mtu unsupported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int airoha_dev_tc_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *f)
+{
+ enum trtcm_unit_type unit_type = TRTCM_BYTE_UNIT;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u32 rate = 0, bucket_size = 0;
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE: {
+ const struct flow_action_entry *act;
+ int err;
+
+ err = airoha_tc_matchall_act_validate(f);
+ if (err)
+ return err;
+
+ act = &f->rule->action.entries[0];
+ if (act->police.rate_pkt_ps) {
+ rate = act->police.rate_pkt_ps;
+ bucket_size = act->police.burst_pkt;
+ unit_type = TRTCM_PACKET_UNIT;
+ } else {
+ rate = div_u64(act->police.rate_bytes_ps, 1000);
+ rate = rate << 3; /* Kbps */
+ bucket_size = act->police.burst;
+ }
+ fallthrough;
+ }
+ case TC_CLSMATCHALL_DESTROY:
+ return airoha_qdma_set_rx_meter(port, rate, bucket_size,
+ unit_type);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_dev_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct net_device *dev = cb_priv;
+
+ if (!tc_can_offload(dev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return airoha_ppe_setup_tc_block_cb(dev, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return airoha_dev_tc_matchall(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port,
struct flow_block_offload *f)
{
- flow_setup_cb_t *cb = airoha_ppe_setup_tc_block_cb;
+ flow_setup_cb_t *cb = airoha_dev_setup_tc_block_cb;
static LIST_HEAD(block_cb_list);
struct flow_block_cb *block_cb;
@@ -2515,7 +2860,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth,
dev->features |= dev->hw_features;
dev->vlan_features = dev->hw_features;
dev->dev.of_node = np;
- dev->irq = qdma->irq;
+ dev->irq = qdma->irq_banks[0].irq;
SET_NETDEV_DEV(dev, eth->dev);
/* reserve hw queues for HTB offloading */
@@ -2545,7 +2890,15 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth,
if (err)
return err;
- return register_netdev(dev);
+ err = register_netdev(dev);
+ if (err)
+ goto free_metadata_dst;
+
+ return 0;
+
+free_metadata_dst:
+ airoha_metadata_dst_free(port);
+ return err;
}
static int airoha_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h
index ec8908f904c6..a970b789cf23 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.h
+++ b/drivers/net/ethernet/airoha/airoha_eth.h
@@ -17,6 +17,7 @@
#define AIROHA_MAX_NUM_GDM_PORTS 4
#define AIROHA_MAX_NUM_QDMA 2
+#define AIROHA_MAX_NUM_IRQ_BANKS 4
#define AIROHA_MAX_DSA_PORTS 7
#define AIROHA_MAX_NUM_RSTS 3
#define AIROHA_MAX_NUM_XSI_RSTS 5
@@ -49,6 +50,14 @@
#define PPE_NUM 2
#define PPE1_SRAM_NUM_ENTRIES (8 * 1024)
#define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES)
+#ifdef CONFIG_NET_AIROHA_FLOW_STATS
+#define PPE1_STATS_NUM_ENTRIES (4 * 1024)
+#else
+#define PPE1_STATS_NUM_ENTRIES 0
+#endif /* CONFIG_NET_AIROHA_FLOW_STATS */
+#define PPE_STATS_NUM_ENTRIES (2 * PPE1_STATS_NUM_ENTRIES)
+#define PPE1_SRAM_NUM_DATA_ENTRIES (PPE1_SRAM_NUM_ENTRIES - PPE1_STATS_NUM_ENTRIES)
+#define PPE_SRAM_NUM_DATA_ENTRIES (2 * PPE1_SRAM_NUM_DATA_ENTRIES)
#define PPE_DRAM_NUM_ENTRIES (16 * 1024)
#define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES)
#define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1)
@@ -127,6 +136,11 @@ enum tx_sched_mode {
TC_SCH_WRR2,
};
+enum trtcm_unit_type {
+ TRTCM_BYTE_UNIT,
+ TRTCM_PACKET_UNIT,
+};
+
enum trtcm_param_type {
TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
TRTCM_TOKEN_RATE_MODE,
@@ -255,6 +269,8 @@ struct airoha_foe_mac_info {
u16 pppoe_id;
u16 src_mac_lo;
+
+ u32 meter;
};
#define AIROHA_FOE_IB1_UNBIND_PREBIND BIT(24)
@@ -290,6 +306,11 @@ struct airoha_foe_mac_info {
#define AIROHA_FOE_TUNNEL BIT(6)
#define AIROHA_FOE_TUNNEL_ID GENMASK(5, 0)
+#define AIROHA_FOE_TUNNEL_MTU GENMASK(31, 16)
+#define AIROHA_FOE_ACNT_GRP3 GENMASK(15, 9)
+#define AIROHA_FOE_METER_GRP3 GENMASK(8, 5)
+#define AIROHA_FOE_METER_GRP2 GENMASK(4, 0)
+
struct airoha_foe_bridge {
u32 dest_mac_hi;
@@ -373,6 +394,8 @@ struct airoha_foe_ipv6 {
u32 ib2;
struct airoha_foe_mac_info_common l2;
+
+ u32 meter;
};
struct airoha_foe_entry {
@@ -391,6 +414,16 @@ struct airoha_foe_entry {
};
};
+struct airoha_foe_stats {
+ u32 bytes;
+ u32 packets;
+};
+
+struct airoha_foe_stats64 {
+ u64 bytes;
+ u64 packets;
+};
+
struct airoha_flow_data {
struct ethhdr eth;
@@ -422,37 +455,64 @@ struct airoha_flow_data {
} pppoe;
};
+enum airoha_flow_entry_type {
+ FLOW_TYPE_L4,
+ FLOW_TYPE_L2,
+ FLOW_TYPE_L2_SUBFLOW,
+};
+
struct airoha_flow_table_entry {
- struct hlist_node list;
+ union {
+ struct hlist_node list; /* PPE L3 flow entry */
+ struct {
+ struct rhash_head l2_node; /* L2 flow entry */
+ struct hlist_head l2_flows; /* PPE L2 subflows list */
+ };
+ };
struct airoha_foe_entry data;
+ struct hlist_node l2_subflow_node; /* PPE L2 subflow entry */
u32 hash;
+ struct airoha_foe_stats64 stats;
+ enum airoha_flow_entry_type type;
+
struct rhash_head node;
unsigned long cookie;
};
-struct airoha_qdma {
- struct airoha_eth *eth;
- void __iomem *regs;
+/* RX queue to IRQ mapping: BIT(q) in IRQ(n) */
+#define RX_IRQ0_BANK_PIN_MASK 0x839f
+#define RX_IRQ1_BANK_PIN_MASK 0x7fe00000
+#define RX_IRQ2_BANK_PIN_MASK 0x20
+#define RX_IRQ3_BANK_PIN_MASK 0x40
+#define RX_IRQ_BANK_PIN_MASK(_n) \
+ (((_n) == 3) ? RX_IRQ3_BANK_PIN_MASK : \
+ ((_n) == 2) ? RX_IRQ2_BANK_PIN_MASK : \
+ ((_n) == 1) ? RX_IRQ1_BANK_PIN_MASK : \
+ RX_IRQ0_BANK_PIN_MASK)
+
+struct airoha_irq_bank {
+ struct airoha_qdma *qdma;
/* protect concurrent irqmask accesses */
spinlock_t irq_lock;
u32 irqmask[QDMA_INT_REG_MAX];
int irq;
+};
+
+struct airoha_qdma {
+ struct airoha_eth *eth;
+ void __iomem *regs;
atomic_t users;
+ struct airoha_irq_bank irq_banks[AIROHA_MAX_NUM_IRQ_BANKS];
+
struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
-
- /* descriptor and packet buffers for qdma hw forward */
- struct {
- void *desc;
- void *q;
- } hfwd;
};
struct airoha_gdm_port {
@@ -480,9 +540,14 @@ struct airoha_ppe {
void *foe;
dma_addr_t foe_dma;
+ struct rhashtable l2_flows;
+
struct hlist_head *foe_flow;
u16 foe_check_time[PPE_NUM_ENTRIES];
+ struct airoha_foe_stats *foe_stats;
+ dma_addr_t foe_stats_dma;
+
struct dentry *debugfs_dir;
};
@@ -532,16 +597,28 @@ u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val);
#define airoha_qdma_clear(qdma, offset, val) \
airoha_rmw((qdma)->regs, (offset), (val), 0)
+static inline bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
+{
+ /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
+ * GDM{2,3,4} can be used as wan port connected to an external
+ * phy module.
+ */
+ return port->id == 1;
+}
+
bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
struct airoha_gdm_port *port);
-void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash);
-int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv);
+void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
+ u16 hash);
+int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data);
int airoha_ppe_init(struct airoha_eth *eth);
void airoha_ppe_deinit(struct airoha_eth *eth);
+void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port);
struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
u32 hash);
+void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
+ struct airoha_foe_stats64 *stats);
#ifdef CONFIG_DEBUG_FS
int airoha_ppe_debugfs_init(struct airoha_ppe *ppe);
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
index ead0625e781f..0e5b8c21b9aa 100644
--- a/drivers/net/ethernet/airoha/airoha_npu.c
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -12,6 +12,7 @@
#include <linux/of_reserved_mem.h>
#include <linux/regmap.h>
+#include "airoha_eth.h"
#include "airoha_npu.h"
#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
@@ -72,6 +73,7 @@ enum {
PPE_FUNC_SET_WAIT_HWNAT_INIT,
PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
PPE_FUNC_SET_WAIT_API,
+ PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP,
};
enum {
@@ -115,6 +117,10 @@ struct ppe_mbox_data {
u32 size;
u32 data;
} set_info;
+ struct {
+ u32 npu_stats_addr;
+ u32 foe_stats_addr;
+ } stats_info;
};
};
@@ -124,17 +130,12 @@ static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
u16 core = 0; /* FIXME */
u32 val, offset = core << 4;
dma_addr_t dma_addr;
- void *addr;
int ret;
- addr = kmemdup(p, size, GFP_ATOMIC);
- if (!addr)
- return -ENOMEM;
-
- dma_addr = dma_map_single(npu->dev, addr, size, DMA_TO_DEVICE);
+ dma_addr = dma_map_single(npu->dev, p, size, DMA_TO_DEVICE);
ret = dma_mapping_error(npu->dev, dma_addr);
if (ret)
- goto out;
+ return ret;
spin_lock_bh(&npu->cores[core].lock);
@@ -155,8 +156,6 @@ static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
spin_unlock_bh(&npu->cores[core].lock);
dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE);
-out:
- kfree(addr);
return ret;
}
@@ -261,79 +260,137 @@ static irqreturn_t airoha_npu_wdt_handler(int irq, void *core_instance)
static int airoha_npu_ppe_init(struct airoha_npu *npu)
{
- struct ppe_mbox_data ppe_data = {
- .func_type = NPU_OP_SET,
- .func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT,
- .init_info = {
- .ppe_type = PPE_TYPE_L2B_IPV4_IPV6,
- .wan_mode = QDMA_WAN_ETHER,
- },
- };
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
- return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
- sizeof(struct ppe_mbox_data));
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT;
+ ppe_data->init_info.ppe_type = PPE_TYPE_L2B_IPV4_IPV6;
+ ppe_data->init_info.wan_mode = QDMA_WAN_ETHER;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
+
+ return err;
}
static int airoha_npu_ppe_deinit(struct airoha_npu *npu)
{
- struct ppe_mbox_data ppe_data = {
- .func_type = NPU_OP_SET,
- .func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
- };
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
- return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
- sizeof(struct ppe_mbox_data));
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
+
+ return err;
}
static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu,
dma_addr_t foe_addr,
int sram_num_entries)
{
- struct ppe_mbox_data ppe_data = {
- .func_type = NPU_OP_SET,
- .func_id = PPE_FUNC_SET_WAIT_API,
- .set_info = {
- .func_id = PPE_SRAM_RESET_VAL,
- .data = foe_addr,
- .size = sram_num_entries,
- },
- };
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
+ ppe_data->set_info.func_id = PPE_SRAM_RESET_VAL;
+ ppe_data->set_info.data = foe_addr;
+ ppe_data->set_info.size = sram_num_entries;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
- return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
- sizeof(struct ppe_mbox_data));
+ return err;
}
static int airoha_npu_foe_commit_entry(struct airoha_npu *npu,
dma_addr_t foe_addr,
u32 entry_size, u32 hash, bool ppe2)
{
- struct ppe_mbox_data ppe_data = {
- .func_type = NPU_OP_SET,
- .func_id = PPE_FUNC_SET_WAIT_API,
- .set_info = {
- .data = foe_addr,
- .size = entry_size,
- },
- };
+ struct ppe_mbox_data *ppe_data;
int err;
- ppe_data.set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
- : PPE_SRAM_SET_ENTRY;
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
+ ppe_data->set_info.data = foe_addr;
+ ppe_data->set_info.size = entry_size;
+ ppe_data->set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
+ : PPE_SRAM_SET_ENTRY;
- err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
- sizeof(struct ppe_mbox_data));
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
if (err)
- return err;
+ goto out;
+
+ ppe_data->set_info.func_id = PPE_SRAM_SET_VAL;
+ ppe_data->set_info.data = hash;
+ ppe_data->set_info.size = sizeof(u32);
- ppe_data.set_info.func_id = PPE_SRAM_SET_VAL;
- ppe_data.set_info.data = hash;
- ppe_data.set_info.size = sizeof(u32);
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+out:
+ kfree(ppe_data);
- return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
- sizeof(struct ppe_mbox_data));
+ return err;
}
-struct airoha_npu *airoha_npu_get(struct device *dev)
+static int airoha_npu_stats_setup(struct airoha_npu *npu,
+ dma_addr_t foe_stats_addr)
+{
+ int err, size = PPE_STATS_NUM_ENTRIES * sizeof(*npu->stats);
+ struct ppe_mbox_data *ppe_data;
+
+ if (!size) /* flow stats are disabled */
+ return 0;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP;
+ ppe_data->stats_info.foe_stats_addr = foe_stats_addr;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ if (err)
+ goto out;
+
+ npu->stats = devm_ioremap(npu->dev,
+ ppe_data->stats_info.npu_stats_addr,
+ size);
+ if (!npu->stats)
+ err = -ENOMEM;
+out:
+ kfree(ppe_data);
+
+ return err;
+}
+
+struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
{
struct platform_device *pdev;
struct device_node *np;
@@ -371,6 +428,17 @@ struct airoha_npu *airoha_npu_get(struct device *dev)
goto error_module_put;
}
+ if (stats_addr) {
+ int err;
+
+ err = airoha_npu_stats_setup(npu, *stats_addr);
+ if (err) {
+ dev_err(dev, "failed to allocate npu stats buffer\n");
+ npu = ERR_PTR(err);
+ goto error_module_put;
+ }
+ }
+
return npu;
error_module_put:
diff --git a/drivers/net/ethernet/airoha/airoha_npu.h b/drivers/net/ethernet/airoha/airoha_npu.h
index a2b8ae4d9473..98ec3be74ce4 100644
--- a/drivers/net/ethernet/airoha/airoha_npu.h
+++ b/drivers/net/ethernet/airoha/airoha_npu.h
@@ -17,6 +17,8 @@ struct airoha_npu {
struct work_struct wdt_work;
} cores[NPU_NUM_CORES];
+ struct airoha_foe_stats __iomem *stats;
+
struct {
int (*ppe_init)(struct airoha_npu *npu);
int (*ppe_deinit)(struct airoha_npu *npu);
@@ -30,5 +32,5 @@ struct airoha_npu {
} ops;
};
-struct airoha_npu *airoha_npu_get(struct device *dev);
+struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr);
void airoha_npu_put(struct airoha_npu *npu);
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
index f10dab935cab..0e217acfc5ef 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -24,6 +24,13 @@ static const struct rhashtable_params airoha_flow_table_params = {
.automatic_shrinking = true,
};
+static const struct rhashtable_params airoha_l2_flow_table_params = {
+ .head_offset = offsetof(struct airoha_flow_table_entry, l2_node),
+ .key_offset = offsetof(struct airoha_flow_table_entry, data.bridge),
+ .key_len = 2 * ETH_ALEN,
+ .automatic_shrinking = true,
+};
+
static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
{
return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
@@ -77,6 +84,7 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
PPE_TB_CFG_SEARCH_MISS_MASK |
+ PPE_TB_CFG_KEEPALIVE_MASK |
PPE_TB_ENTRY_SIZE_MASK,
FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
@@ -95,7 +103,7 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
if (airoha_ppe2_is_enabled(eth)) {
sram_num_entries =
- PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_ENTRIES);
+ PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES);
airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
PPE_SRAM_TB_NUM_ENTRY_MASK |
PPE_DRAM_TB_NUM_ENTRY_MASK,
@@ -112,7 +120,7 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
dram_num_entries));
} else {
sram_num_entries =
- PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_ENTRIES);
+ PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES);
airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
PPE_SRAM_TB_NUM_ENTRY_MASK |
PPE_DRAM_TB_NUM_ENTRY_MASK,
@@ -197,6 +205,15 @@ static int airoha_get_dsa_port(struct net_device **dev)
#endif
}
+static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br,
+ struct ethhdr *eh)
+{
+ br->dest_mac_hi = get_unaligned_be32(eh->h_dest);
+ br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4);
+ br->src_mac_hi = get_unaligned_be16(eh->h_source);
+ br->src_mac_lo = get_unaligned_be32(eh->h_source + 2);
+}
+
static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
struct airoha_foe_entry *hwe,
struct net_device *dev, int type,
@@ -206,6 +223,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
int dsa_port = airoha_get_dsa_port(&dev);
struct airoha_foe_mac_info_common *l2;
u32 qdata, ports_pad, val;
+ u8 smac_id = 0xf;
memset(hwe, 0, sizeof(*hwe));
@@ -234,6 +252,14 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
else
pse_port = 2; /* uplink relies on GDM2 loopback */
val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
+
+ /* For downlink traffic consume SRAM memory for hw forwarding
+ * descriptors queue.
+ */
+ if (airhoa_is_lan_gdm_port(port))
+ val |= AIROHA_FOE_IB2_FAST_PATH;
+
+ smac_id = port->id;
}
if (is_multicast_ether_addr(data->eth.h_dest))
@@ -247,13 +273,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
if (type == PPE_PKT_TYPE_BRIDGE) {
- hwe->bridge.dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
- hwe->bridge.dest_mac_lo =
- get_unaligned_be16(data->eth.h_dest + 4);
- hwe->bridge.src_mac_hi =
- get_unaligned_be16(data->eth.h_source);
- hwe->bridge.src_mac_lo =
- get_unaligned_be32(data->eth.h_source + 2);
+ airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
hwe->bridge.data = qdata;
hwe->bridge.ib2 = val;
l2 = &hwe->bridge.l2.common;
@@ -274,7 +294,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
hwe->ipv4.l2.src_mac_lo =
get_unaligned_be16(data->eth.h_source + 4);
} else {
- l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, 0xf);
+ l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id);
}
if (data->vlan.num) {
@@ -378,6 +398,19 @@ static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
hv3 ^= hwe->ipv6.src_ip[0];
break;
+ case PPE_PKT_TYPE_BRIDGE: {
+ struct airoha_foe_mac_info *l2 = &hwe->bridge.l2;
+
+ hv1 = l2->common.src_mac_hi & 0xffff;
+ hv1 = hv1 << 16 | l2->src_mac_lo;
+
+ hv2 = l2->common.dest_mac_lo;
+ hv2 = hv2 << 16;
+ hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16);
+
+ hv3 = l2->common.dest_mac_hi;
+ break;
+ }
case PPE_PKT_TYPE_IPV4_DSLITE:
case PPE_PKT_TYPE_IPV6_6RD:
default:
@@ -394,6 +427,77 @@ static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
return hash;
}
+static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash)
+{
+ if (!airoha_ppe2_is_enabled(ppe->eth))
+ return hash;
+
+ return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES
+ : hash;
+}
+
+static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
+ struct airoha_npu *npu,
+ int index)
+{
+ memset_io(&npu->stats[index], 0, sizeof(*npu->stats));
+ memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats));
+}
+
+static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
+ struct airoha_npu *npu)
+{
+ int i;
+
+ for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++)
+ airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
+}
+
+static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
+ struct airoha_npu *npu,
+ struct airoha_foe_entry *hwe,
+ u32 hash)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 index, pse_port, val, *data, *ib2, *meter;
+ u8 nbq;
+
+ index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
+ if (index >= PPE_STATS_NUM_ENTRIES)
+ return;
+
+ if (type == PPE_PKT_TYPE_BRIDGE) {
+ data = &hwe->bridge.data;
+ ib2 = &hwe->bridge.ib2;
+ meter = &hwe->bridge.l2.meter;
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ data = &hwe->ipv6.data;
+ ib2 = &hwe->ipv6.ib2;
+ meter = &hwe->ipv6.meter;
+ } else {
+ data = &hwe->ipv4.data;
+ ib2 = &hwe->ipv4.ib2;
+ meter = &hwe->ipv4.l2.meter;
+ }
+
+ airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
+
+ val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
+ *data = (*data & ~AIROHA_FOE_ACTDP) |
+ FIELD_PREP(AIROHA_FOE_ACTDP, val);
+
+ val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
+ AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
+ *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
+
+ pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
+ nbq = pse_port == 1 ? 6 : 5;
+ *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
+ AIROHA_FOE_IB2_PSE_QOS);
+ *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) |
+ FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
+}
+
struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
u32 hash)
{
@@ -447,6 +551,8 @@ static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
u32 ts = airoha_ppe_get_timestamp(ppe);
struct airoha_eth *eth = ppe->eth;
+ struct airoha_npu *npu;
+ int err = 0;
memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
wmb();
@@ -455,31 +561,133 @@ static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
hwe->ib1 = e->ib1;
+ rcu_read_lock();
+
+ npu = rcu_dereference(eth->npu);
+ if (!npu) {
+ err = -ENODEV;
+ goto unlock;
+ }
+
+ airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
+
if (hash < PPE_SRAM_NUM_ENTRIES) {
dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
bool ppe2 = airoha_ppe2_is_enabled(eth) &&
hash >= PPE1_SRAM_NUM_ENTRIES;
- struct airoha_npu *npu;
- int err = -ENODEV;
- rcu_read_lock();
- npu = rcu_dereference(eth->npu);
- if (npu)
- err = npu->ops.ppe_foe_commit_entry(npu, addr,
- sizeof(*hwe), hash,
- ppe2);
- rcu_read_unlock();
+ err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe),
+ hash, ppe2);
+ }
+unlock:
+ rcu_read_unlock();
- return err;
+ return err;
+}
+
+static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ lockdep_assert_held(&ppe_lock);
+
+ hlist_del_init(&e->list);
+ if (e->hash != 0xffff) {
+ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
+ e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
+ AIROHA_FOE_STATE_INVALID);
+ airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
+ e->hash = 0xffff;
+ }
+ if (e->type == FLOW_TYPE_L2_SUBFLOW) {
+ hlist_del_init(&e->l2_subflow_node);
+ kfree(e);
+ }
+}
+
+static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ struct hlist_head *head = &e->l2_flows;
+ struct hlist_node *n;
+
+ lockdep_assert_held(&ppe_lock);
+
+ rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
+ airoha_l2_flow_table_params);
+ hlist_for_each_entry_safe(e, n, head, l2_subflow_node)
+ airoha_ppe_foe_remove_flow(ppe, e);
+}
+
+static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ spin_lock_bh(&ppe_lock);
+
+ if (e->type == FLOW_TYPE_L2)
+ airoha_ppe_foe_remove_l2_flow(ppe, e);
+ else
+ airoha_ppe_foe_remove_flow(ppe, e);
+
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int
+airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e,
+ u32 hash)
+{
+ u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
+ struct airoha_foe_entry *hwe_p, hwe;
+ struct airoha_flow_table_entry *f;
+ int type;
+
+ hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
+ if (!hwe_p)
+ return -EINVAL;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return -ENOMEM;
+
+ hlist_add_head(&f->l2_subflow_node, &e->l2_flows);
+ f->type = FLOW_TYPE_L2_SUBFLOW;
+ f->hash = hash;
+
+ memcpy(&hwe, hwe_p, sizeof(*hwe_p));
+ hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
+
+ type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
+ if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
+ hwe.ipv6.ib2 = e->data.bridge.ib2;
+ /* setting smac_id to 0xf instruct the hw to keep original
+ * source mac address
+ */
+ hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
+ 0xf);
+ } else {
+ memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
+ sizeof(hwe.bridge.l2));
+ hwe.bridge.ib2 = e->data.bridge.ib2;
+ if (type == PPE_PKT_TYPE_IPV4_HNAPT)
+ memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
+ sizeof(hwe.ipv4.new_tuple));
}
+ hwe.bridge.data = e->data.bridge.data;
+ airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
+
return 0;
}
-static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash)
+static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
+ struct sk_buff *skb,
+ u32 hash)
{
struct airoha_flow_table_entry *e;
+ struct airoha_foe_bridge br = {};
struct airoha_foe_entry *hwe;
+ bool commit_done = false;
struct hlist_node *n;
u32 index, state;
@@ -495,21 +703,68 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash)
index = airoha_ppe_foe_get_entry_hash(hwe);
hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
- if (airoha_ppe_foe_compare_entry(e, hwe)) {
- airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
- e->hash = hash;
- break;
+ if (e->type == FLOW_TYPE_L2_SUBFLOW) {
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (state != AIROHA_FOE_STATE_BIND) {
+ e->hash = 0xffff;
+ airoha_ppe_foe_remove_flow(ppe, e);
+ }
+ continue;
}
+
+ if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) {
+ e->hash = 0xffff;
+ continue;
+ }
+
+ airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
+ commit_done = true;
+ e->hash = hash;
}
+
+ if (commit_done)
+ goto unlock;
+
+ airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb));
+ e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
+ airoha_l2_flow_table_params);
+ if (e)
+ airoha_ppe_foe_commit_subflow_entry(ppe, e, hash);
unlock:
spin_unlock_bh(&ppe_lock);
}
+static int
+airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ struct airoha_flow_table_entry *prev;
+
+ e->type = FLOW_TYPE_L2;
+ prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node,
+ airoha_l2_flow_table_params);
+ if (!prev)
+ return 0;
+
+ if (IS_ERR(prev))
+ return PTR_ERR(prev);
+
+ return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
+ &e->l2_node,
+ airoha_l2_flow_table_params);
+}
+
static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
struct airoha_flow_table_entry *e)
{
- u32 hash = airoha_ppe_foe_get_entry_hash(&e->data);
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
+ u32 hash;
+
+ if (type == PPE_PKT_TYPE_BRIDGE)
+ return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
+ hash = airoha_ppe_foe_get_entry_hash(&e->data);
+ e->type = FLOW_TYPE_L4;
e->hash = 0xffff;
spin_lock_bh(&ppe_lock);
@@ -519,23 +774,100 @@ static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
return 0;
}
-static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
+static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1)
+{
+ u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
+ u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe);
+ int idle;
+
+ if (state == AIROHA_FOE_STATE_BIND) {
+ ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1);
+ ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ } else {
+ ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1);
+ now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now);
+ ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP;
+ }
+ idle = now - ts;
+
+ return idle < 0 ? idle + ts_mask + 1 : idle;
+}
+
+static void
+airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
+ struct airoha_flow_table_entry *iter;
+ struct hlist_node *n;
+
+ lockdep_assert_held(&ppe_lock);
+
+ hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
+ struct airoha_foe_entry *hwe;
+ u32 ib1, state;
+ int idle;
+
+ hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
+ if (!hwe)
+ continue;
+
+ ib1 = READ_ONCE(hwe->ib1);
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
+ if (state != AIROHA_FOE_STATE_BIND) {
+ iter->hash = 0xffff;
+ airoha_ppe_foe_remove_flow(ppe, iter);
+ continue;
+ }
+
+ idle = airoha_ppe_get_entry_idle_time(ppe, ib1);
+ if (idle >= min_idle)
+ continue;
+
+ min_idle = idle;
+ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ }
+}
+
+static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
struct airoha_flow_table_entry *e)
{
+ struct airoha_foe_entry *hwe_p, hwe = {};
+
spin_lock_bh(&ppe_lock);
- hlist_del_init(&e->list);
- if (e->hash != 0xffff) {
- e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
- e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
- AIROHA_FOE_STATE_INVALID);
- airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
+ if (e->type == FLOW_TYPE_L2) {
+ airoha_ppe_foe_flow_l2_entry_update(ppe, e);
+ goto unlock;
+ }
+
+ if (e->hash == 0xffff)
+ goto unlock;
+
+ hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash);
+ if (!hwe_p)
+ goto unlock;
+
+ memcpy(&hwe, hwe_p, sizeof(*hwe_p));
+ if (!airoha_ppe_foe_compare_entry(e, &hwe)) {
e->hash = 0xffff;
+ goto unlock;
}
+ e->data.ib1 = hwe.ib1;
+unlock:
spin_unlock_bh(&ppe_lock);
}
+static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ airoha_ppe_foe_flow_entry_update(ppe, e);
+
+ return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
+}
+
static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
struct flow_cls_offload *f)
{
@@ -751,6 +1083,60 @@ static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
return 0;
}
+void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
+ struct airoha_foe_stats64 *stats)
+{
+ u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
+ struct airoha_eth *eth = ppe->eth;
+ struct airoha_npu *npu;
+
+ if (index >= PPE_STATS_NUM_ENTRIES)
+ return;
+
+ rcu_read_lock();
+
+ npu = rcu_dereference(eth->npu);
+ if (npu) {
+ u64 packets = ppe->foe_stats[index].packets;
+ u64 bytes = ppe->foe_stats[index].bytes;
+ struct airoha_foe_stats npu_stats;
+
+ memcpy_fromio(&npu_stats, &npu->stats[index],
+ sizeof(*npu->stats));
+ stats->packets = packets << 32 | npu_stats.packets;
+ stats->bytes = bytes << 32 | npu_stats.bytes;
+ }
+
+ rcu_read_unlock();
+}
+
+static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ struct airoha_flow_table_entry *e;
+ u32 idle;
+
+ e = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params);
+ if (!e)
+ return -ENOENT;
+
+ idle = airoha_ppe_entry_idle_time(eth->ppe, e);
+ f->stats.lastused = jiffies - idle * HZ;
+
+ if (e->hash != 0xffff) {
+ struct airoha_foe_stats64 stats = {};
+
+ airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats);
+ f->stats.pkts += (stats.packets - e->stats.packets);
+ f->stats.bytes += (stats.bytes - e->stats.bytes);
+ e->stats = stats;
+ }
+
+ return 0;
+}
+
static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
struct flow_cls_offload *f)
{
@@ -759,6 +1145,8 @@ static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
return airoha_ppe_flow_offload_replace(port, f);
case FLOW_CLS_DESTROY:
return airoha_ppe_flow_offload_destroy(port, f);
+ case FLOW_CLS_STATS:
+ return airoha_ppe_flow_offload_stats(port, f);
default:
break;
}
@@ -784,11 +1172,12 @@ static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
{
- struct airoha_npu *npu = airoha_npu_get(eth->dev);
+ struct airoha_npu *npu = airoha_npu_get(eth->dev,
+ &eth->ppe->foe_stats_dma);
if (IS_ERR(npu)) {
request_module("airoha-npu");
- npu = airoha_npu_get(eth->dev);
+ npu = airoha_npu_get(eth->dev, &eth->ppe->foe_stats_dma);
}
return npu;
@@ -811,6 +1200,8 @@ static int airoha_ppe_offload_setup(struct airoha_eth *eth)
if (err)
goto error_npu_put;
+ airoha_ppe_foe_flow_stats_reset(eth->ppe, npu);
+
rcu_assign_pointer(eth->npu, npu);
synchronize_rcu();
@@ -822,18 +1213,13 @@ error_npu_put:
return err;
}
-int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
+int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
{
- struct flow_cls_offload *cls = type_data;
- struct net_device *dev = cb_priv;
struct airoha_gdm_port *port = netdev_priv(dev);
+ struct flow_cls_offload *cls = type_data;
struct airoha_eth *eth = port->qdma->eth;
int err = 0;
- if (!tc_can_offload(dev) || type != TC_SETUP_CLSFLOWER)
- return -EOPNOTSUPP;
-
mutex_lock(&flow_offload_mutex);
if (!eth->npu)
@@ -846,7 +1232,8 @@ int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
return err;
}
-void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash)
+void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
+ u16 hash)
{
u16 now, diff;
@@ -859,7 +1246,28 @@ void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash)
return;
ppe->foe_check_time[hash] = now;
- airoha_ppe_foe_insert_entry(ppe, hash);
+ airoha_ppe_foe_insert_entry(ppe, skb, hash);
+}
+
+void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ struct net_device *dev = port->dev;
+ const u8 *addr = dev->dev_addr;
+ u32 val;
+
+ val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
+ airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
+ FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
+ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
+
+ val = (addr[0] << 8) | addr[1];
+ airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
+ airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
+ FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
+ FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
+ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
}
int airoha_ppe_init(struct airoha_eth *eth)
@@ -886,13 +1294,33 @@ int airoha_ppe_init(struct airoha_eth *eth)
if (!ppe->foe_flow)
return -ENOMEM;
+ foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats);
+ if (foe_size) {
+ ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
+ &ppe->foe_stats_dma,
+ GFP_KERNEL);
+ if (!ppe->foe_stats)
+ return -ENOMEM;
+ }
+
err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
if (err)
return err;
+ err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params);
+ if (err)
+ goto error_flow_table_destroy;
+
err = airoha_ppe_debugfs_init(ppe);
if (err)
- rhashtable_destroy(&eth->flow_table);
+ goto error_l2_flow_table_destroy;
+
+ return 0;
+
+error_l2_flow_table_destroy:
+ rhashtable_destroy(&ppe->l2_flows);
+error_flow_table_destroy:
+ rhashtable_destroy(&eth->flow_table);
return err;
}
@@ -909,6 +1337,7 @@ void airoha_ppe_deinit(struct airoha_eth *eth)
}
rcu_read_unlock();
+ rhashtable_destroy(&eth->ppe->l2_flows);
rhashtable_destroy(&eth->flow_table);
debugfs_remove(eth->ppe->debugfs_dir);
}
diff --git a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
index 3cdc6fd53fc7..05a756233f6a 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
@@ -61,6 +61,7 @@ static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private,
u16 *src_port = NULL, *dest_port = NULL;
struct airoha_foe_mac_info_common *l2;
unsigned char h_source[ETH_ALEN] = {};
+ struct airoha_foe_stats64 stats = {};
unsigned char h_dest[ETH_ALEN];
struct airoha_foe_entry *hwe;
u32 type, state, ib2, data;
@@ -144,14 +145,18 @@ static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private,
cpu_to_be16(hwe->ipv4.l2.src_mac_lo);
}
+ airoha_ppe_foe_entry_get_stats(ppe, i, &stats);
+
*((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi);
*((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo);
*((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi);
seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x"
- " vlan=%d,%d ib1=%08x ib2=%08x\n",
+ " vlan=%d,%d ib1=%08x ib2=%08x"
+ " packets=%llu bytes=%llu\n",
h_source, h_dest, l2->etype, data,
- l2->vlan1, l2->vlan2, hwe->ib1, ib2);
+ l2->vlan1, l2->vlan2, hwe->ib1, ib2,
+ stats.packets, stats.bytes);
}
return 0;
diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h
index 8146cde4e8ba..150c85995cc1 100644
--- a/drivers/net/ethernet/airoha/airoha_regs.h
+++ b/drivers/net/ethernet/airoha/airoha_regs.h
@@ -283,6 +283,7 @@
#define PPE_HASH_SEED 0x12345678
#define REG_PPE_DFT_CPORT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x248)
+#define DFT_CPORT_MASK(_n) GENMASK(3 + ((_n) << 2), ((_n) << 2))
#define REG_PPE_DFT_CPORT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x24c)
@@ -312,6 +313,16 @@
#define REG_PPE_RAM_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320)
#define REG_PPE_RAM_ENTRY(_m, _n) (REG_PPE_RAM_BASE(_m) + ((_n) << 2))
+#define REG_UPDMEM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x370)
+#define PPE_UPDMEM_ACK_MASK BIT(31)
+#define PPE_UPDMEM_ADDR_MASK GENMASK(11, 8)
+#define PPE_UPDMEM_OFFSET_MASK GENMASK(7, 4)
+#define PPE_UPDMEM_SEL_MASK GENMASK(3, 2)
+#define PPE_UPDMEM_WR_MASK BIT(1)
+#define PPE_UPDMEM_REQ_MASK BIT(0)
+
+#define REG_UPDMEM_DATA(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x374)
+
#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
@@ -422,11 +433,12 @@
((_n) == 2) ? 0x0720 : \
((_n) == 1) ? 0x0024 : 0x0020)
-#define REG_INT_ENABLE(_n) \
- (((_n) == 4) ? 0x0750 : \
- ((_n) == 3) ? 0x0744 : \
- ((_n) == 2) ? 0x0740 : \
- ((_n) == 1) ? 0x002c : 0x0028)
+#define REG_INT_ENABLE(_b, _n) \
+ (((_n) == 4) ? 0x0750 + ((_b) << 5) : \
+ ((_n) == 3) ? 0x0744 + ((_b) << 5) : \
+ ((_n) == 2) ? 0x0740 + ((_b) << 5) : \
+ ((_n) == 1) ? 0x002c + ((_b) << 3) : \
+ 0x0028 + ((_b) << 3))
/* QDMA_CSR_INT_ENABLE1 */
#define RX15_COHERENT_INT_MASK BIT(31)
@@ -461,6 +473,26 @@
#define IRQ0_FULL_INT_MASK BIT(1)
#define IRQ0_INT_MASK BIT(0)
+#define RX_COHERENT_LOW_INT_MASK \
+ (RX15_COHERENT_INT_MASK | RX14_COHERENT_INT_MASK | \
+ RX13_COHERENT_INT_MASK | RX12_COHERENT_INT_MASK | \
+ RX11_COHERENT_INT_MASK | RX10_COHERENT_INT_MASK | \
+ RX9_COHERENT_INT_MASK | RX8_COHERENT_INT_MASK | \
+ RX7_COHERENT_INT_MASK | RX6_COHERENT_INT_MASK | \
+ RX5_COHERENT_INT_MASK | RX4_COHERENT_INT_MASK | \
+ RX3_COHERENT_INT_MASK | RX2_COHERENT_INT_MASK | \
+ RX1_COHERENT_INT_MASK | RX0_COHERENT_INT_MASK)
+
+#define RX_COHERENT_LOW_OFFSET __ffs(RX_COHERENT_LOW_INT_MASK)
+#define INT_RX0_MASK(_n) \
+ (((_n) << RX_COHERENT_LOW_OFFSET) & RX_COHERENT_LOW_INT_MASK)
+
+#define TX_COHERENT_LOW_INT_MASK \
+ (TX7_COHERENT_INT_MASK | TX6_COHERENT_INT_MASK | \
+ TX5_COHERENT_INT_MASK | TX4_COHERENT_INT_MASK | \
+ TX3_COHERENT_INT_MASK | TX2_COHERENT_INT_MASK | \
+ TX1_COHERENT_INT_MASK | TX0_COHERENT_INT_MASK)
+
#define TX_DONE_INT_MASK(_n) \
((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
: IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
@@ -469,17 +501,6 @@
(IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-#define INT_IDX0_MASK \
- (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
- TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
- TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
- TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
- RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
- RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
- RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
- RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
- RX15_COHERENT_INT_MASK | INT_TX_MASK)
-
/* QDMA_CSR_INT_ENABLE2 */
#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
@@ -514,19 +535,122 @@
#define RX1_DONE_INT_MASK BIT(1)
#define RX0_DONE_INT_MASK BIT(0)
-#define RX_DONE_INT_MASK \
- (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
- RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
- RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
- RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
- RX15_DONE_INT_MASK)
-#define INT_IDX1_MASK \
- (RX_DONE_INT_MASK | \
- RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
- RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
- RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
- RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
- RX15_NO_CPU_DSCP_INT_MASK)
+#define RX_NO_CPU_DSCP_LOW_INT_MASK \
+ (RX15_NO_CPU_DSCP_INT_MASK | RX14_NO_CPU_DSCP_INT_MASK | \
+ RX13_NO_CPU_DSCP_INT_MASK | RX12_NO_CPU_DSCP_INT_MASK | \
+ RX11_NO_CPU_DSCP_INT_MASK | RX10_NO_CPU_DSCP_INT_MASK | \
+ RX9_NO_CPU_DSCP_INT_MASK | RX8_NO_CPU_DSCP_INT_MASK | \
+ RX7_NO_CPU_DSCP_INT_MASK | RX6_NO_CPU_DSCP_INT_MASK | \
+ RX5_NO_CPU_DSCP_INT_MASK | RX4_NO_CPU_DSCP_INT_MASK | \
+ RX3_NO_CPU_DSCP_INT_MASK | RX2_NO_CPU_DSCP_INT_MASK | \
+ RX1_NO_CPU_DSCP_INT_MASK | RX0_NO_CPU_DSCP_INT_MASK)
+
+#define RX_DONE_LOW_INT_MASK \
+ (RX15_DONE_INT_MASK | RX14_DONE_INT_MASK | \
+ RX13_DONE_INT_MASK | RX12_DONE_INT_MASK | \
+ RX11_DONE_INT_MASK | RX10_DONE_INT_MASK | \
+ RX9_DONE_INT_MASK | RX8_DONE_INT_MASK | \
+ RX7_DONE_INT_MASK | RX6_DONE_INT_MASK | \
+ RX5_DONE_INT_MASK | RX4_DONE_INT_MASK | \
+ RX3_DONE_INT_MASK | RX2_DONE_INT_MASK | \
+ RX1_DONE_INT_MASK | RX0_DONE_INT_MASK)
+
+#define RX_NO_CPU_DSCP_LOW_OFFSET __ffs(RX_NO_CPU_DSCP_LOW_INT_MASK)
+#define INT_RX1_MASK(_n) \
+ ((((_n) << RX_NO_CPU_DSCP_LOW_OFFSET) & RX_NO_CPU_DSCP_LOW_INT_MASK) | \
+ (RX_DONE_LOW_INT_MASK & (_n)))
+
+/* QDMA_CSR_INT_ENABLE3 */
+#define RX31_NO_CPU_DSCP_INT_MASK BIT(31)
+#define RX30_NO_CPU_DSCP_INT_MASK BIT(30)
+#define RX29_NO_CPU_DSCP_INT_MASK BIT(29)
+#define RX28_NO_CPU_DSCP_INT_MASK BIT(28)
+#define RX27_NO_CPU_DSCP_INT_MASK BIT(27)
+#define RX26_NO_CPU_DSCP_INT_MASK BIT(26)
+#define RX25_NO_CPU_DSCP_INT_MASK BIT(25)
+#define RX24_NO_CPU_DSCP_INT_MASK BIT(24)
+#define RX23_NO_CPU_DSCP_INT_MASK BIT(23)
+#define RX22_NO_CPU_DSCP_INT_MASK BIT(22)
+#define RX21_NO_CPU_DSCP_INT_MASK BIT(21)
+#define RX20_NO_CPU_DSCP_INT_MASK BIT(20)
+#define RX19_NO_CPU_DSCP_INT_MASK BIT(19)
+#define RX18_NO_CPU_DSCP_INT_MASK BIT(18)
+#define RX17_NO_CPU_DSCP_INT_MASK BIT(17)
+#define RX16_NO_CPU_DSCP_INT_MASK BIT(16)
+#define RX31_DONE_INT_MASK BIT(15)
+#define RX30_DONE_INT_MASK BIT(14)
+#define RX29_DONE_INT_MASK BIT(13)
+#define RX28_DONE_INT_MASK BIT(12)
+#define RX27_DONE_INT_MASK BIT(11)
+#define RX26_DONE_INT_MASK BIT(10)
+#define RX25_DONE_INT_MASK BIT(9)
+#define RX24_DONE_INT_MASK BIT(8)
+#define RX23_DONE_INT_MASK BIT(7)
+#define RX22_DONE_INT_MASK BIT(6)
+#define RX21_DONE_INT_MASK BIT(5)
+#define RX20_DONE_INT_MASK BIT(4)
+#define RX19_DONE_INT_MASK BIT(3)
+#define RX18_DONE_INT_MASK BIT(2)
+#define RX17_DONE_INT_MASK BIT(1)
+#define RX16_DONE_INT_MASK BIT(0)
+
+#define RX_NO_CPU_DSCP_HIGH_INT_MASK \
+ (RX31_NO_CPU_DSCP_INT_MASK | RX30_NO_CPU_DSCP_INT_MASK | \
+ RX29_NO_CPU_DSCP_INT_MASK | RX28_NO_CPU_DSCP_INT_MASK | \
+ RX27_NO_CPU_DSCP_INT_MASK | RX26_NO_CPU_DSCP_INT_MASK | \
+ RX25_NO_CPU_DSCP_INT_MASK | RX24_NO_CPU_DSCP_INT_MASK | \
+ RX23_NO_CPU_DSCP_INT_MASK | RX22_NO_CPU_DSCP_INT_MASK | \
+ RX21_NO_CPU_DSCP_INT_MASK | RX20_NO_CPU_DSCP_INT_MASK | \
+ RX19_NO_CPU_DSCP_INT_MASK | RX18_NO_CPU_DSCP_INT_MASK | \
+ RX17_NO_CPU_DSCP_INT_MASK | RX16_NO_CPU_DSCP_INT_MASK)
+
+#define RX_DONE_HIGH_INT_MASK \
+ (RX31_DONE_INT_MASK | RX30_DONE_INT_MASK | \
+ RX29_DONE_INT_MASK | RX28_DONE_INT_MASK | \
+ RX27_DONE_INT_MASK | RX26_DONE_INT_MASK | \
+ RX25_DONE_INT_MASK | RX24_DONE_INT_MASK | \
+ RX23_DONE_INT_MASK | RX22_DONE_INT_MASK | \
+ RX21_DONE_INT_MASK | RX20_DONE_INT_MASK | \
+ RX19_DONE_INT_MASK | RX18_DONE_INT_MASK | \
+ RX17_DONE_INT_MASK | RX16_DONE_INT_MASK)
+
+#define RX_DONE_HIGH_OFFSET fls(RX_DONE_HIGH_INT_MASK)
+#define RX_DONE_INT_MASK \
+ ((RX_DONE_HIGH_INT_MASK << RX_DONE_HIGH_OFFSET) | RX_DONE_LOW_INT_MASK)
+
+#define INT_RX2_MASK(_n) \
+ ((RX_NO_CPU_DSCP_HIGH_INT_MASK & (_n)) | \
+ (((_n) >> RX_DONE_HIGH_OFFSET) & RX_DONE_HIGH_INT_MASK))
+
+/* QDMA_CSR_INT_ENABLE4 */
+#define RX31_COHERENT_INT_MASK BIT(31)
+#define RX30_COHERENT_INT_MASK BIT(30)
+#define RX29_COHERENT_INT_MASK BIT(29)
+#define RX28_COHERENT_INT_MASK BIT(28)
+#define RX27_COHERENT_INT_MASK BIT(27)
+#define RX26_COHERENT_INT_MASK BIT(26)
+#define RX25_COHERENT_INT_MASK BIT(25)
+#define RX24_COHERENT_INT_MASK BIT(24)
+#define RX23_COHERENT_INT_MASK BIT(23)
+#define RX22_COHERENT_INT_MASK BIT(22)
+#define RX21_COHERENT_INT_MASK BIT(21)
+#define RX20_COHERENT_INT_MASK BIT(20)
+#define RX19_COHERENT_INT_MASK BIT(19)
+#define RX18_COHERENT_INT_MASK BIT(18)
+#define RX17_COHERENT_INT_MASK BIT(17)
+#define RX16_COHERENT_INT_MASK BIT(16)
+
+#define RX_COHERENT_HIGH_INT_MASK \
+ (RX31_COHERENT_INT_MASK | RX30_COHERENT_INT_MASK | \
+ RX29_COHERENT_INT_MASK | RX28_COHERENT_INT_MASK | \
+ RX27_COHERENT_INT_MASK | RX26_COHERENT_INT_MASK | \
+ RX25_COHERENT_INT_MASK | RX24_COHERENT_INT_MASK | \
+ RX23_COHERENT_INT_MASK | RX22_COHERENT_INT_MASK | \
+ RX21_COHERENT_INT_MASK | RX20_COHERENT_INT_MASK | \
+ RX19_COHERENT_INT_MASK | RX18_COHERENT_INT_MASK | \
+ RX17_COHERENT_INT_MASK | RX16_COHERENT_INT_MASK)
+
+#define INT_RX3_MASK(_n) (RX_COHERENT_HIGH_INT_MASK & (_n))
/* QDMA_CSR_INT_ENABLE5 */
#define TX31_COHERENT_INT_MASK BIT(31)
@@ -554,19 +678,19 @@
#define TX9_COHERENT_INT_MASK BIT(9)
#define TX8_COHERENT_INT_MASK BIT(8)
-#define INT_IDX4_MASK \
- (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
- TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
- TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
- TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
- TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
- TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
- TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
- TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
- TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
- TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
- TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
- TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
+#define TX_COHERENT_HIGH_INT_MASK \
+ (TX31_COHERENT_INT_MASK | TX30_COHERENT_INT_MASK | \
+ TX29_COHERENT_INT_MASK | TX28_COHERENT_INT_MASK | \
+ TX27_COHERENT_INT_MASK | TX26_COHERENT_INT_MASK | \
+ TX25_COHERENT_INT_MASK | TX24_COHERENT_INT_MASK | \
+ TX23_COHERENT_INT_MASK | TX22_COHERENT_INT_MASK | \
+ TX21_COHERENT_INT_MASK | TX20_COHERENT_INT_MASK | \
+ TX19_COHERENT_INT_MASK | TX18_COHERENT_INT_MASK | \
+ TX17_COHERENT_INT_MASK | TX16_COHERENT_INT_MASK | \
+ TX15_COHERENT_INT_MASK | TX14_COHERENT_INT_MASK | \
+ TX13_COHERENT_INT_MASK | TX12_COHERENT_INT_MASK | \
+ TX11_COHERENT_INT_MASK | TX10_COHERENT_INT_MASK | \
+ TX9_COHERENT_INT_MASK | TX8_COHERENT_INT_MASK)
#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
@@ -691,6 +815,12 @@
#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
+#define RATE_LIMIT_PARAM_RW_MASK BIT(31)
+#define RATE_LIMIT_PARAM_RW_DONE_MASK BIT(30)
+#define RATE_LIMIT_PARAM_TYPE_MASK GENMASK(29, 28)
+#define RATE_LIMIT_METER_GROUP_MASK GENMASK(27, 26)
+#define RATE_LIMIT_PARAM_INDEX_MASK GENMASK(23, 16)
+
#define REG_TXWRR_MODE_CFG 0x1020
#define TWRR_WEIGHT_SCALE_MASK BIT(31)
#define TWRR_WEIGHT_BASE_MASK BIT(3)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 897720fdf5d8..86fd08f375df 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1777,7 +1777,7 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter,
if (ENA_IS_XDP_INDEX(adapter, i))
napi_handler = ena_xdp_io_poll;
- netif_napi_add(adapter->netdev, &napi->napi, napi_handler);
+ netif_napi_add_config(adapter->netdev, &napi->napi, napi_handler, i);
if (!ENA_IS_XDP_INDEX(adapter, i))
napi->rx_ring = rx_ring;
@@ -3667,7 +3667,8 @@ static void ena_update_host_info(struct ena_admin_host_info *host_info,
static void ena_timer_service(struct timer_list *t)
{
- struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
+ struct ena_adapter *adapter = timer_container_of(adapter, t,
+ timer_service);
u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
struct ena_admin_host_info *host_info =
adapter->ena_dev->host_attr.host_info;
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index b923ad9e1581..ce9445425045 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -636,7 +636,7 @@ static void lance_set_multicast(struct net_device *dev)
static void lance_set_multicast_retry(struct timer_list *t)
{
- struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct lance_private *lp = timer_container_of(lp, t, multicast_timer);
lance_set_multicast(lp->dev);
}
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 86522e8574cb..76e8c13d5985 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1641,7 +1641,8 @@ static int __maybe_unused amd8111e_resume(struct device *dev_d)
static void amd8111e_config_ipg(struct timer_list *t)
{
- struct amd8111e_priv *lp = from_timer(lp, t, ipg_data.ipg_timer);
+ struct amd8111e_priv *lp = timer_container_of(lp, t,
+ ipg_data.ipg_timer);
struct ipg_info *ipg_data = &lp->ipg_data;
void __iomem *mmio = lp->mmio;
unsigned int prev_col_cnt = ipg_data->col_cnt;
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index b072ca5930fc..8d05a0c5f2d5 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -1004,7 +1004,7 @@ static void lance_set_multicast(struct net_device *dev)
static void lance_set_multicast_retry(struct timer_list *t)
{
- struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct lance_private *lp = timer_container_of(lp, t, multicast_timer);
struct net_device *dev = lp->dev;
lance_set_multicast(dev);
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index e5adafecc686..9eaefa0f5e80 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -2905,7 +2905,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
static void pcnet32_watchdog(struct timer_list *t)
{
- struct pcnet32_private *lp = from_timer(lp, t, watchdog_timer);
+ struct pcnet32_private *lp = timer_container_of(lp, t, watchdog_timer);
struct net_device *dev = lp->dev;
unsigned long flags;
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
index 506f682d15c1..097bb092bdb8 100644
--- a/drivers/net/ethernet/amd/pds_core/adminq.c
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -225,7 +225,7 @@ int pdsc_adminq_post(struct pdsc *pdsc,
union pds_core_adminq_comp *comp,
bool fast_poll)
{
- unsigned long poll_interval = 1;
+ unsigned long poll_interval = 200;
unsigned long poll_jiffies;
unsigned long time_limit;
unsigned long time_start;
@@ -252,7 +252,7 @@ int pdsc_adminq_post(struct pdsc *pdsc,
time_limit = time_start + HZ * pdsc->devcmd_timeout;
do {
/* Timeslice the actual wait to catch IO errors etc early */
- poll_jiffies = msecs_to_jiffies(poll_interval);
+ poll_jiffies = usecs_to_jiffies(poll_interval);
remaining = wait_for_completion_timeout(wc, poll_jiffies);
if (remaining)
break;
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 9512aa4083f0..076dfe2910c7 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -402,6 +402,7 @@ err_out_uninit:
static struct pdsc_viftype pdsc_viftype_defaults[] = {
[PDS_DEV_TYPE_FWCTL] = { .name = PDS_DEV_TYPE_FWCTL_STR,
+ .enabled = true,
.vif_id = PDS_DEV_TYPE_FWCTL,
.dl_id = -1 },
[PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR,
@@ -414,7 +415,8 @@ static int pdsc_viftypes_init(struct pdsc *pdsc)
{
enum pds_core_vif_types vt;
- pdsc->viftype_status = kzalloc(sizeof(pdsc_viftype_defaults),
+ pdsc->viftype_status = kcalloc(ARRAY_SIZE(pdsc_viftype_defaults),
+ sizeof(*pdsc->viftype_status),
GFP_KERNEL);
if (!pdsc->viftype_status)
return -ENOMEM;
@@ -431,9 +433,6 @@ static int pdsc_viftypes_init(struct pdsc *pdsc)
/* See what the Core device has for support */
vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]);
- if (vt == PDS_DEV_TYPE_FWCTL)
- pdsc->viftype_status[vt].enabled = true;
-
dev_dbg(pdsc->dev, "VIF %s is %ssupported\n",
pdsc->viftype_status[vt].name,
vt_support ? "" : "not ");
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 4843f9249a31..9b81e1c260c2 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -23,7 +23,7 @@ MODULE_DEVICE_TABLE(pci, pdsc_id_table);
static void pdsc_wdtimer_cb(struct timer_list *t)
{
- struct pdsc *pdsc = from_timer(pdsc, t, wdtimer);
+ struct pdsc *pdsc = timer_container_of(pdsc, t, wdtimer);
dev_dbg(pdsc->dev, "%s: jiffies %ld\n", __func__, jiffies);
mod_timer(&pdsc->wdtimer,
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 3cd31855a5f6..0b273327f5a6 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1246,7 +1246,7 @@ static void lance_set_multicast(struct net_device *dev)
static void lance_set_multicast_retry(struct timer_list *t)
{
- struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct lance_private *lp = timer_container_of(lp, t, multicast_timer);
struct net_device *dev = lp->dev;
lance_set_multicast(dev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 3b70f6737633..e1296cbf4ff3 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#ifndef __XGBE_COMMON_H__
@@ -900,6 +791,11 @@
#define PCS_V2_RV_WINDOW_SELECT 0x1064
#define PCS_V2_YC_WINDOW_DEF 0x18060
#define PCS_V2_YC_WINDOW_SELECT 0x18064
+#define PCS_V3_RN_WINDOW_DEF 0xf8078
+#define PCS_V3_RN_WINDOW_SELECT 0xf807c
+
+#define PCS_RN_SMN_BASE_ADDR 0x11e00000
+#define PCS_RN_PORT_ADDR_SIZE 0x100000
/* PCS register entry bit positions and sizes */
#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index c68ace804e37..1474df5544fa 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index b35808d3d07f..d9157c4acde9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/debugfs.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index d41b58fad37b..7c8a19988a52 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include "xgbe.h"
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index f1b0fb02b3cd..466b5f6e5578 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/phy.h>
@@ -120,9 +11,11 @@
#include <linux/bitrev.h>
#include <linux/crc32.h>
#include <linux/crc32poly.h>
+#include <linux/pci.h>
#include "xgbe.h"
#include "xgbe-common.h"
+#include "xgbe-smn.h"
static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
{
@@ -1162,18 +1055,19 @@ static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
return 0;
}
-static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
- int mmd_reg)
+static unsigned int xgbe_get_mmd_address(struct xgbe_prv_data *pdata,
+ int mmd_reg)
{
- unsigned long flags;
- unsigned int mmd_address, index, offset;
- int mmd_data;
-
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ return (mmd_reg & XGBE_ADDR_C45) ?
+ mmd_reg & ~XGBE_ADDR_C45 :
+ (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+}
+static void xgbe_get_pcs_index_and_offset(struct xgbe_prv_data *pdata,
+ unsigned int mmd_address,
+ unsigned int *index,
+ unsigned int *offset)
+{
/* The PCS registers are accessed using mmio. The underlying
* management interface uses indirect addressing to access the MMD
* register sets. This requires accessing of the PCS register in two
@@ -1184,8 +1078,98 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
* offset 1 bit and reading 16 bits of data.
*/
mmd_address <<= 1;
- index = mmd_address & ~pdata->xpcs_window_mask;
- offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+ *index = mmd_address & ~pdata->xpcs_window_mask;
+ *offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+}
+
+static int xgbe_read_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address, index, offset;
+ u32 smn_address;
+ int mmd_data;
+ int ret;
+
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
+
+ smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg;
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret)
+ return ret;
+
+ ret = amd_smn_read(0, pdata->smn_base + offset, &mmd_data);
+ if (ret)
+ return ret;
+
+ mmd_data = (offset % 4) ? FIELD_GET(XGBE_GEN_HI_MASK, mmd_data) :
+ FIELD_GET(XGBE_GEN_LO_MASK, mmd_data);
+
+ return mmd_data;
+}
+
+static void xgbe_write_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ unsigned int pci_mmd_data, hi_mask, lo_mask;
+ unsigned int mmd_address, index, offset;
+ struct pci_dev *dev;
+ u32 smn_address;
+ int ret;
+
+ dev = pdata->pcidev;
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
+
+ smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg;
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", index);
+ return;
+ }
+
+ ret = amd_smn_read(0, pdata->smn_base + offset, &pci_mmd_data);
+ if (ret) {
+ pci_err(dev, "Failed to read data\n");
+ return;
+ }
+
+ if (offset % 4) {
+ hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, mmd_data);
+ lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, pci_mmd_data);
+ } else {
+ hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK,
+ FIELD_GET(XGBE_GEN_HI_MASK, pci_mmd_data));
+ lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, mmd_data);
+ }
+
+ pci_mmd_data = hi_mask | lo_mask;
+
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", index);
+ return;
+ }
+
+ ret = amd_smn_write(0, (pdata->smn_base + offset), pci_mmd_data);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", pci_mmd_data);
+ return;
+ }
+}
+
+static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address, index, offset;
+ unsigned long flags;
+ int mmd_data;
+
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
@@ -1201,23 +1185,9 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
unsigned long flags;
unsigned int mmd_address, index, offset;
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
- /* The PCS registers are accessed using mmio. The underlying
- * management interface uses indirect addressing to access the MMD
- * register sets. This requires accessing of the PCS register in two
- * phases, an address phase and a data phase.
- *
- * The mmio interface is based on 16-bit offsets and values. All
- * register offsets must therefore be adjusted by left shifting the
- * offset 1 bit and writing 16 bits of data.
- */
- mmd_address <<= 1;
- index = mmd_address & ~pdata->xpcs_window_mask;
- offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
@@ -1232,10 +1202,7 @@ static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
int mmd_data;
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
@@ -1260,10 +1227,7 @@ static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
unsigned long flags;
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
@@ -1290,6 +1254,9 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
case XGBE_XPCS_ACCESS_V2:
default:
return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
+
+ case XGBE_XPCS_ACCESS_V3:
+ return xgbe_read_mmd_regs_v3(pdata, prtad, mmd_reg);
}
}
@@ -1300,6 +1267,9 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
case XGBE_XPCS_ACCESS_V1:
return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
+ case XGBE_XPCS_ACCESS_V3:
+ return xgbe_write_mmd_regs_v3(pdata, prtad, mmd_reg, mmd_data);
+
case XGBE_XPCS_ACCESS_V2:
default:
return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 8e09ad8fa022..65447f9a0a59 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
@@ -643,7 +534,8 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data)
static void xgbe_tx_timer(struct timer_list *t)
{
- struct xgbe_channel *channel = from_timer(channel, t, tx_timer);
+ struct xgbe_channel *channel = timer_container_of(channel, t,
+ tx_timer);
struct xgbe_prv_data *pdata = channel->pdata;
struct napi_struct *napi;
@@ -681,7 +573,8 @@ static void xgbe_service(struct work_struct *work)
static void xgbe_service_timer(struct timer_list *t)
{
- struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
+ struct xgbe_prv_data *pdata = timer_container_of(pdata, t,
+ service_timer);
struct xgbe_channel *channel;
unsigned int i;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 4431ab1c18b3..be0d2c7d08dc 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/spinlock.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index 7a833894f52a..d40011e8ddf2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 0e8698928e4d..4ebdd123c435 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 07f4f3418d01..71449edbb76d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index c636999a6a84..097ec5e4f261 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -1,123 +1,15 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/log2.h>
+#include "xgbe-smn.h"
#include "xgbe.h"
#include "xgbe-common.h"
@@ -207,14 +99,14 @@ out:
static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct xgbe_prv_data *pdata;
- struct device *dev = &pdev->dev;
void __iomem * const *iomap_table;
- struct pci_dev *rdev;
+ unsigned int port_addr_size, reg;
+ struct device *dev = &pdev->dev;
+ struct xgbe_prv_data *pdata;
unsigned int ma_lo, ma_hi;
- unsigned int reg;
- int bar_mask;
- int ret;
+ struct pci_dev *rdev;
+ int bar_mask, ret;
+ u32 address;
pdata = xgbe_alloc_pdata(dev);
if (IS_ERR(pdata)) {
@@ -274,20 +166,31 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set the PCS indirect addressing definition registers */
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
- if (rdev &&
- (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
- pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
- pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
- } else if (rdev && (rdev->vendor == PCI_VENDOR_ID_AMD) &&
- (rdev->device == 0x14b5)) {
- pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
- pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
-
- /* Yellow Carp devices do not need cdr workaround */
- pdata->vdata->an_cdr_workaround = 0;
-
- /* Yellow Carp devices do not need rrc */
- pdata->vdata->enable_rrc = 0;
+ if (rdev && rdev->vendor == PCI_VENDOR_ID_AMD) {
+ switch (rdev->device) {
+ case XGBE_RV_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+ break;
+ case XGBE_YC_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
+
+ /* Yellow Carp devices do not need cdr workaround */
+ pdata->vdata->an_cdr_workaround = 0;
+
+ /* Yellow Carp devices do not need rrc */
+ pdata->vdata->enable_rrc = 0;
+ break;
+ case XGBE_RN_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V3_RN_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V3_RN_WINDOW_SELECT;
+ break;
+ default:
+ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+ break;
+ }
} else {
pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
@@ -295,7 +198,22 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_dev_put(rdev);
/* Configure the PCS indirect addressing support */
- reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
+ if (pdata->vdata->xpcs_access == XGBE_XPCS_ACCESS_V3) {
+ reg = XP_IOREAD(pdata, XP_PROP_0);
+ port_addr_size = PCS_RN_PORT_ADDR_SIZE *
+ XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
+ pdata->smn_base = PCS_RN_SMN_BASE_ADDR + port_addr_size;
+
+ address = pdata->smn_base + (pdata->xpcs_window_def_reg);
+ ret = amd_smn_read(0, address, &reg);
+ if (ret) {
+ pci_err(pdata->pcidev, "Failed to read data\n");
+ goto err_pci_enable;
+ }
+ } else {
+ reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
+ }
+
pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
pdata->xpcs_window <<= 6;
pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
@@ -473,6 +391,22 @@ static int __maybe_unused xgbe_pci_resume(struct device *dev)
return ret;
}
+static struct xgbe_version_data xgbe_v3 = {
+ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = XGBE_XPCS_ACCESS_V3,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 65536,
+ .rx_max_fifo_size = 65536,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .irq_reissue_support = 1,
+ .tx_desc_prefetch = 5,
+ .rx_desc_prefetch = 5,
+ .an_cdr_workaround = 0,
+ .enable_rrc = 0,
+};
+
static struct xgbe_version_data xgbe_v2a = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V2,
@@ -510,6 +444,8 @@ static const struct pci_device_id xgbe_pci_table[] = {
.driver_data = (kernel_ulong_t)&xgbe_v2a },
{ PCI_VDEVICE(AMD, 0x1459),
.driver_data = (kernel_ulong_t)&xgbe_v2b },
+ { PCI_VDEVICE(AMD, 0x1641),
+ .driver_data = (kernel_ulong_t)&xgbe_v3 },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
index d16eae415f72..2e6b8ffe785c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 268399dfcf22..7a4dfa4e19c7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index 4365bd62942c..47d53e59ccf6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index 7051bd7cf6dc..978c4dd01fa0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/clk.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-smn.h b/drivers/net/ethernet/amd/xgbe/xgbe-smn.h
new file mode 100644
index 000000000000..c6ae127ced03
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-smn.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#ifndef __SMN_H__
+#define __SMN_H__
+
+#ifdef CONFIG_AMD_NB
+
+#include <asm/amd/nb.h>
+
+#else
+
+static inline int amd_smn_write(u16 node, u32 address, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int amd_smn_read(u16 node, u32 address, u32 *value)
+{
+ return -ENODEV;
+}
+
+#endif
+#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index ed5d43c16d0e..6359bb87dc13 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#ifndef __XGBE_H__
@@ -347,6 +238,15 @@
(_src)->link_modes._sname, \
__ETHTOOL_LINK_MODE_MASK_NBITS)
+/* XGBE PCI device id */
+#define XGBE_RV_PCI_DEVICE_ID 0x15d0
+#define XGBE_YC_PCI_DEVICE_ID 0x14b5
+#define XGBE_RN_PCI_DEVICE_ID 0x1630
+
+ /* Generic low and high masks */
+#define XGBE_GEN_HI_MASK GENMASK(31, 16)
+#define XGBE_GEN_LO_MASK GENMASK(15, 0)
+
struct xgbe_prv_data;
struct xgbe_packet_data {
@@ -565,6 +465,7 @@ enum xgbe_speed {
enum xgbe_xpcs_access {
XGBE_XPCS_ACCESS_V1 = 0,
XGBE_XPCS_ACCESS_V2,
+ XGBE_XPCS_ACCESS_V3,
};
enum xgbe_an_mode {
@@ -1060,6 +961,7 @@ struct xgbe_prv_data {
struct device *dev;
struct platform_device *phy_platdev;
struct device *phy_dev;
+ unsigned int smn_base;
/* Version related data */
struct xgbe_version_data *vdata;
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index b9fdd61f1fdb..b3bf8d6f88e8 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -20,7 +20,6 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/crc32.h>
-#include <linux/crc32poly.h>
#include <linux/bitrev.h>
#include <linux/ethtool.h>
#include <linux/slab.h>
@@ -796,59 +795,6 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
}
#ifndef SUNHME_MULTICAST
-/* Real fast bit-reversal algorithm, 6-bit values */
-static int reverse6[64] = {
- 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
- 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
- 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
- 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
- 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
- 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
- 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
- 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
-};
-
-static unsigned int
-crc416(unsigned int curval, unsigned short nxtval)
-{
- unsigned int counter, cur = curval, next = nxtval;
- int high_crc_set, low_data_set;
-
- /* Swap bytes */
- next = ((next & 0x00FF) << 8) | (next >> 8);
-
- /* Compute bit-by-bit */
- for (counter = 0; counter < 16; ++counter) {
- /* is high CRC bit set? */
- if ((cur & 0x80000000) == 0) high_crc_set = 0;
- else high_crc_set = 1;
-
- cur = cur << 1;
-
- if ((next & 0x0001) == 0) low_data_set = 0;
- else low_data_set = 1;
-
- next = next >> 1;
-
- /* do the XOR */
- if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
- }
- return cur;
-}
-
-static unsigned int
-bmac_crc(unsigned short *address)
-{
- unsigned int newcrc;
-
- XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
- newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
- newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
- newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
-
- return(newcrc);
-}
-
/*
* Add requested mcast addr to BMac's hash table filter.
*
@@ -861,8 +807,7 @@ bmac_addhash(struct bmac_data *bp, unsigned char *addr)
unsigned short mask;
if (!(*addr)) return;
- crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
- crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ crc = crc32(~0, addr, ETH_ALEN) >> 26;
if (bp->hash_use_count[crc]++) return; /* This bit is already set */
mask = crc % 16;
mask = (unsigned char)1 << mask;
@@ -876,8 +821,7 @@ bmac_removehash(struct bmac_data *bp, unsigned char *addr)
unsigned char mask;
/* Now, delete the address from the filter copy, as indicated */
- crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
- crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ crc = crc32(~0, addr, ETH_ALEN) >> 26;
if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
mask = crc % 16;
@@ -1466,7 +1410,7 @@ bmac_output(struct sk_buff *skb, struct net_device *dev)
static void bmac_tx_timeout(struct timer_list *t)
{
- struct bmac_data *bp = from_timer(bp, t, tx_timeout);
+ struct bmac_data *bp = timer_container_of(bp, t, tx_timeout);
struct net_device *dev = macio_get_drvdata(bp->mdev);
volatile struct dbdma_regs __iomem *td = bp->tx_dma;
volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index 1fed112f4e68..af26905e44e3 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -805,7 +805,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
static void mace_tx_timeout(struct timer_list *t)
{
- struct mace_data *mp = from_timer(mp, t, tx_timeout);
+ struct mace_data *mp = timer_container_of(mp, t, tx_timeout);
struct net_device *dev = macio_get_drvdata(mp->mdev);
volatile struct mace __iomem *mb = mp->mace;
volatile struct dbdma_regs __iomem *td = mp->tx_dma;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index c1d1673c5749..b565189e5913 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -123,7 +123,6 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd
}
#endif
- skb_tx_timestamp(skb);
return aq_nic_xmit(aq_nic, skb);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index bf3aa46887a1..b24eaa5283fa 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -254,7 +254,7 @@ static void aq_nic_service_task(struct work_struct *work)
static void aq_nic_service_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = from_timer(self, t, service_timer);
+ struct aq_nic_s *self = timer_container_of(self, t, service_timer);
mod_timer(&self->service_timer,
jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
@@ -264,7 +264,7 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
static void aq_nic_polling_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = from_timer(self, t, polling_timer);
+ struct aq_nic_s *self = timer_container_of(self, t, polling_timer);
unsigned int i = 0U;
for (i = 0U; self->aq_vecs > i; ++i)
@@ -898,6 +898,8 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
frags = aq_nic_map_skb(self, skb, ring);
+ skb_tx_timestamp(skb);
+
if (likely(frags)) {
err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
ring, frags);
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 67b654889cae..d8e6f23e1432 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1563,7 +1563,7 @@ err_drop:
static void ag71xx_oom_timer_handler(struct timer_list *t)
{
- struct ag71xx *ag = from_timer(ag, t, oom_timer);
+ struct ag71xx *ag = timer_container_of(ag, t, oom_timer);
napi_schedule(&ag->napi);
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 82137f9deae9..ef1a51347351 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -231,8 +231,8 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
*/
static void atl1c_phy_config(struct timer_list *t)
{
- struct atl1c_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl1c_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl1c_hw *hw = &adapter->hw;
unsigned long flags;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index f664a0edbc49..40290028580b 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -115,8 +115,8 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
*/
static void atl1e_phy_config(struct timer_list *t)
{
- struct atl1e_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl1e_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl1e_hw *hw = &adapter->hw;
unsigned long flags;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 38cd84b7677c..cfdb546a09e7 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2556,8 +2556,8 @@ static irqreturn_t atl1_intr(int irq, void *data)
*/
static void atl1_phy_config(struct timer_list *t)
{
- struct atl1_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl1_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl1_hw *hw = &adapter->hw;
unsigned long flags;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 88f65f8cf4d3..280e2f5f4aa5 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1010,7 +1010,8 @@ static void atl2_tx_timeout(struct net_device *netdev, unsigned int txqueue)
*/
static void atl2_watchdog(struct timer_list *t)
{
- struct atl2_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct atl2_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
u32 drop_rxd, drop_rxs;
@@ -1035,8 +1036,8 @@ static void atl2_watchdog(struct timer_list *t)
*/
static void atl2_phy_config(struct timer_list *t)
{
- struct atl2_adapter *adapter = from_timer(adapter, t,
- phy_config_timer);
+ struct atl2_adapter *adapter = timer_container_of(adapter, t,
+ phy_config_timer);
struct atl2_hw *hw = &adapter->hw;
unsigned long flags;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 1bd4313215d7..81a74e07464f 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -96,7 +96,6 @@ config BNX2
config CNIC
tristate "QLogic CNIC support"
depends on PCI && (IPV6 || IPV6=n)
- depends on MMU
select BNX2
select UIO
help
@@ -123,6 +122,7 @@ config TIGON3
tristate "Broadcom Tigon3 support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select CRC32
select PHYLIB
help
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index a68fab1b05f0..fd35f4b4dc50 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -141,7 +141,7 @@ void bcmasp_flush_rx_port(struct bcmasp_intf *intf)
return;
}
- rx_ctrl_core_wl(priv, mask, priv->hw_info->rx_ctrl_flush);
+ rx_ctrl_core_wl(priv, mask, ASP_RX_CTRL_FLUSH);
}
static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
@@ -518,7 +518,7 @@ void bcmasp_netfilt_suspend(struct bcmasp_intf *intf)
int ret, i;
/* Write all filters to HW */
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
/* If the filter does not match the port, skip programming. */
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
@@ -551,7 +551,7 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
struct bcmasp_priv *priv = intf->parent;
int j = 0, i;
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -577,7 +577,7 @@ int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
struct bcmasp_priv *priv = intf->parent;
int cnt = 0, i;
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -602,7 +602,7 @@ bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
size_t fs_size = 0;
int i;
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -670,7 +670,7 @@ struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
int i, open_index = -1;
/* Check whether we exceed the filter table capacity */
- if (loc != RX_CLS_LOC_ANY && loc >= NUM_NET_FILTERS)
+ if (loc != RX_CLS_LOC_ANY && loc >= priv->num_net_filters)
return ERR_PTR(-EINVAL);
/* If the filter location is busy (already claimed) and we are initializing
@@ -686,7 +686,7 @@ struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
/* Initialize the loop index based on the desired location or from 0 */
i = loc == RX_CLS_LOC_ANY ? 0 : loc;
- for ( ; i < NUM_NET_FILTERS; i++) {
+ for ( ; i < priv->num_net_filters; i++) {
/* Found matching network filter */
if (!init &&
priv->net_filters[i].claimed &&
@@ -779,7 +779,7 @@ static void bcmasp_en_mda_filter(struct bcmasp_intf *intf, bool en,
priv->mda_filters[i].en = en;
priv->mda_filters[i].port = intf->port;
- rx_filter_core_wl(priv, ((intf->channel + 8) |
+ rx_filter_core_wl(priv, ((intf->channel + priv->tx_chan_offset) |
(en << ASP_RX_FILTER_MDA_CFG_EN_SHIFT) |
ASP_RX_FILTER_MDA_CFG_UMC_SEL(intf->port)),
ASP_RX_FILTER_MDA_CFG(i));
@@ -865,7 +865,7 @@ void bcmasp_disable_all_filters(struct bcmasp_intf *intf)
res_count = bcmasp_total_res_mda_cnt(intf->parent);
/* Disable all filters held by this port */
- for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ for (i = res_count; i < priv->num_mda_filters; i++) {
if (priv->mda_filters[i].en &&
priv->mda_filters[i].port == intf->port)
bcmasp_en_mda_filter(intf, 0, i);
@@ -909,7 +909,7 @@ int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
res_count = bcmasp_total_res_mda_cnt(intf->parent);
- for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ for (i = res_count; i < priv->num_mda_filters; i++) {
/* If filter not enabled or belongs to another port skip */
if (!priv->mda_filters[i].en ||
priv->mda_filters[i].port != intf->port)
@@ -924,7 +924,7 @@ int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
}
/* Create new filter if possible */
- for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ for (i = res_count; i < priv->num_mda_filters; i++) {
if (priv->mda_filters[i].en)
continue;
@@ -944,12 +944,12 @@ static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
/* Disable all filters and reset software view since the HW
* can lose context while in deep sleep suspend states
*/
- for (i = 0; i < NUM_MDA_FILTERS; i++) {
+ for (i = 0; i < priv->num_mda_filters; i++) {
rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_MDA_CFG(i));
priv->mda_filters[i].en = 0;
}
- for (i = 0; i < NUM_NET_FILTERS; i++)
+ for (i = 0; i < priv->num_net_filters; i++)
rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_NET_CFG(i));
/* Top level filter enable bit should be enabled at all times, set
@@ -966,18 +966,8 @@ static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
/* ASP core initialization */
static void bcmasp_core_init(struct bcmasp_priv *priv)
{
- tx_analytics_core_wl(priv, 0x0, ASP_TX_ANALYTICS_CTRL);
- rx_analytics_core_wl(priv, 0x4, ASP_RX_ANALYTICS_CTRL);
-
- rx_edpkt_core_wl(priv, (ASP_EDPKT_HDR_SZ_128 << ASP_EDPKT_HDR_SZ_SHIFT),
- ASP_EDPKT_HDR_CFG);
- rx_edpkt_core_wl(priv,
- (ASP_EDPKT_ENDI_BT_SWP_WD << ASP_EDPKT_ENDI_DESC_SHIFT),
- ASP_EDPKT_ENDI);
-
rx_edpkt_core_wl(priv, 0x1b, ASP_EDPKT_BURST_BUF_PSCAL_TOUT);
rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_WRITE_TOUT);
- rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_READ_TOUT);
rx_edpkt_core_wl(priv, ASP_EDPKT_ENABLE_EN, ASP_EDPKT_ENABLE);
@@ -1020,6 +1010,18 @@ static void bcmasp_core_clock_select_one(struct bcmasp_priv *priv, bool slow)
ctrl_core_wl(priv, reg, ASP_CTRL_CORE_CLOCK_SELECT);
}
+static void bcmasp_core_clock_select_one_ctrl2(struct bcmasp_priv *priv, bool slow)
+{
+ u32 reg;
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CORE_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CORE_CLOCK_SELECT);
+}
+
static void bcmasp_core_clock_set_ll(struct bcmasp_priv *priv, u32 clr, u32 set)
{
u32 reg;
@@ -1108,7 +1110,7 @@ static int bcmasp_get_and_request_irq(struct bcmasp_priv *priv, int i)
return irq;
}
-static void bcmasp_init_wol_shared(struct bcmasp_priv *priv)
+static void bcmasp_init_wol(struct bcmasp_priv *priv)
{
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
@@ -1125,7 +1127,7 @@ static void bcmasp_init_wol_shared(struct bcmasp_priv *priv)
device_set_wakeup_capable(&pdev->dev, 1);
}
-static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en)
+void bcmasp_enable_wol(struct bcmasp_intf *intf, bool en)
{
struct bcmasp_priv *priv = intf->parent;
struct device *dev = &priv->pdev->dev;
@@ -1154,54 +1156,12 @@ static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en)
}
}
-static void bcmasp_wol_irq_destroy_shared(struct bcmasp_priv *priv)
+static void bcmasp_wol_irq_destroy(struct bcmasp_priv *priv)
{
if (priv->wol_irq > 0)
free_irq(priv->wol_irq, priv);
}
-static void bcmasp_init_wol_per_intf(struct bcmasp_priv *priv)
-{
- struct platform_device *pdev = priv->pdev;
- struct device *dev = &pdev->dev;
- struct bcmasp_intf *intf;
- int irq;
-
- list_for_each_entry(intf, &priv->intfs, list) {
- irq = bcmasp_get_and_request_irq(priv, intf->port + 1);
- if (irq < 0) {
- dev_warn(dev, "Failed to init WoL irq(port %d): %d\n",
- intf->port, irq);
- continue;
- }
-
- intf->wol_irq = irq;
- intf->wol_irq_enabled = false;
- device_set_wakeup_capable(&pdev->dev, 1);
- }
-}
-
-static void bcmasp_enable_wol_per_intf(struct bcmasp_intf *intf, bool en)
-{
- struct device *dev = &intf->parent->pdev->dev;
-
- if (en ^ intf->wol_irq_enabled)
- irq_set_irq_wake(intf->wol_irq, en);
-
- intf->wol_irq_enabled = en;
- device_set_wakeup_enable(dev, en);
-}
-
-static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv)
-{
- struct bcmasp_intf *intf;
-
- list_for_each_entry(intf, &priv->intfs, list) {
- if (intf->wol_irq > 0)
- free_irq(intf->wol_irq, priv);
- }
-}
-
static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
{
u32 reg, phy_lpi_overwrite;
@@ -1220,70 +1180,53 @@ static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
usleep_range(50, 100);
}
-static struct bcmasp_hw_info v20_hw_info = {
- .rx_ctrl_flush = ASP_RX_CTRL_FLUSH,
- .umac2fb = UMAC2FB_OFFSET,
- .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT,
- .rx_ctrl_fb_filt_out_frame_count = ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT,
- .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH,
-};
-
-static const struct bcmasp_plat_data v20_plat_data = {
- .init_wol = bcmasp_init_wol_per_intf,
- .enable_wol = bcmasp_enable_wol_per_intf,
- .destroy_wol = bcmasp_wol_irq_destroy_per_intf,
- .core_clock_select = bcmasp_core_clock_select_one,
- .hw_info = &v20_hw_info,
-};
-
-static struct bcmasp_hw_info v21_hw_info = {
- .rx_ctrl_flush = ASP_RX_CTRL_FLUSH_2_1,
- .umac2fb = UMAC2FB_OFFSET_2_1,
- .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1,
- .rx_ctrl_fb_filt_out_frame_count =
- ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1,
- .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1,
-};
-
static const struct bcmasp_plat_data v21_plat_data = {
- .init_wol = bcmasp_init_wol_shared,
- .enable_wol = bcmasp_enable_wol_shared,
- .destroy_wol = bcmasp_wol_irq_destroy_shared,
.core_clock_select = bcmasp_core_clock_select_one,
- .hw_info = &v21_hw_info,
+ .num_mda_filters = 32,
+ .num_net_filters = 32,
+ .tx_chan_offset = 8,
+ .rx_ctrl_offset = 0x0,
};
static const struct bcmasp_plat_data v22_plat_data = {
- .init_wol = bcmasp_init_wol_shared,
- .enable_wol = bcmasp_enable_wol_shared,
- .destroy_wol = bcmasp_wol_irq_destroy_shared,
.core_clock_select = bcmasp_core_clock_select_many,
- .hw_info = &v21_hw_info,
.eee_fixup = bcmasp_eee_fixup,
+ .num_mda_filters = 32,
+ .num_net_filters = 32,
+ .tx_chan_offset = 8,
+ .rx_ctrl_offset = 0x0,
+};
+
+static const struct bcmasp_plat_data v30_plat_data = {
+ .core_clock_select = bcmasp_core_clock_select_one_ctrl2,
+ .num_mda_filters = 20,
+ .num_net_filters = 16,
+ .tx_chan_offset = 0,
+ .rx_ctrl_offset = 0x10000,
};
static void bcmasp_set_pdata(struct bcmasp_priv *priv, const struct bcmasp_plat_data *pdata)
{
- priv->init_wol = pdata->init_wol;
- priv->enable_wol = pdata->enable_wol;
- priv->destroy_wol = pdata->destroy_wol;
priv->core_clock_select = pdata->core_clock_select;
priv->eee_fixup = pdata->eee_fixup;
- priv->hw_info = pdata->hw_info;
+ priv->num_mda_filters = pdata->num_mda_filters;
+ priv->num_net_filters = pdata->num_net_filters;
+ priv->tx_chan_offset = pdata->tx_chan_offset;
+ priv->rx_ctrl_offset = pdata->rx_ctrl_offset;
}
static const struct of_device_id bcmasp_of_match[] = {
- { .compatible = "brcm,asp-v2.0", .data = &v20_plat_data },
{ .compatible = "brcm,asp-v2.1", .data = &v21_plat_data },
{ .compatible = "brcm,asp-v2.2", .data = &v22_plat_data },
+ { .compatible = "brcm,asp-v3.0", .data = &v30_plat_data },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcmasp_of_match);
static const struct of_device_id bcmasp_mdio_of_match[] = {
- { .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
- { .compatible = "brcm,asp-v2.0-mdio", },
+ { .compatible = "brcm,asp-v2.2-mdio", },
+ { .compatible = "brcm,asp-v3.0-mdio", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcmasp_mdio_of_match);
@@ -1365,6 +1308,17 @@ static int bcmasp_probe(struct platform_device *pdev)
* how many interfaces come up.
*/
bcmasp_core_init(priv);
+
+ priv->mda_filters = devm_kcalloc(dev, priv->num_mda_filters,
+ sizeof(*priv->mda_filters), GFP_KERNEL);
+ if (!priv->mda_filters)
+ return -ENOMEM;
+
+ priv->net_filters = devm_kcalloc(dev, priv->num_net_filters,
+ sizeof(*priv->net_filters), GFP_KERNEL);
+ if (!priv->net_filters)
+ return -ENOMEM;
+
bcmasp_core_init_filters(priv);
ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports");
@@ -1387,7 +1341,7 @@ static int bcmasp_probe(struct platform_device *pdev)
}
/* Check and enable WoL */
- priv->init_wol(priv);
+ bcmasp_init_wol(priv);
/* Drop the clock reference count now and let ndo_open()/ndo_close()
* manage it for us from now on.
@@ -1404,7 +1358,7 @@ static int bcmasp_probe(struct platform_device *pdev)
if (ret) {
netdev_err(intf->ndev,
"failed to register net_device: %d\n", ret);
- priv->destroy_wol(priv);
+ bcmasp_wol_irq_destroy(priv);
bcmasp_remove_intfs(priv);
goto of_put_exit;
}
@@ -1425,7 +1379,7 @@ static void bcmasp_remove(struct platform_device *pdev)
if (!priv)
return;
- priv->destroy_wol(priv);
+ bcmasp_wol_irq_destroy(priv);
bcmasp_remove_intfs(priv);
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index 8fc75bcedb70..74adfdb50e11 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -53,22 +53,15 @@
#define ASP_RX_CTRL_FB_0_FRAME_COUNT 0x14
#define ASP_RX_CTRL_FB_1_FRAME_COUNT 0x18
#define ASP_RX_CTRL_FB_8_FRAME_COUNT 0x1c
-/* asp2.1 diverges offsets here */
-/* ASP2.0 */
-#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x20
-#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x24
-#define ASP_RX_CTRL_FLUSH 0x28
-#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12))
-#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13))
-#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20))
-#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x30
-/* ASP2.1 */
-#define ASP_RX_CTRL_FB_9_FRAME_COUNT_2_1 0x20
-#define ASP_RX_CTRL_FB_10_FRAME_COUNT_2_1 0x24
-#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1 0x28
-#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1 0x2c
-#define ASP_RX_CTRL_FLUSH_2_1 0x30
-#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1 0x38
+#define ASP_RX_CTRL_FB_9_FRAME_COUNT 0x20
+#define ASP_RX_CTRL_FB_10_FRAME_COUNT 0x24
+#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x28
+#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x2c
+#define ASP_RX_CTRL_FLUSH 0x30
+#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12))
+#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13))
+#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20))
+#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x38
#define ASP_RX_FILTER_OFFSET 0x80000
#define ASP_RX_FILTER_BLK_CTRL 0x0
@@ -345,9 +338,6 @@ struct bcmasp_intf {
u32 wolopts;
u8 sopass[SOPASS_MAX];
- /* Used if per intf wol irq */
- int wol_irq;
- unsigned int wol_irq_enabled:1;
};
#define NUM_NET_FILTERS 32
@@ -370,21 +360,13 @@ struct bcmasp_mda_filter {
u8 mask[ETH_ALEN];
};
-struct bcmasp_hw_info {
- u32 rx_ctrl_flush;
- u32 umac2fb;
- u32 rx_ctrl_fb_out_frame_count;
- u32 rx_ctrl_fb_filt_out_frame_count;
- u32 rx_ctrl_fb_rx_fifo_depth;
-};
-
struct bcmasp_plat_data {
- void (*init_wol)(struct bcmasp_priv *priv);
- void (*enable_wol)(struct bcmasp_intf *intf, bool en);
- void (*destroy_wol)(struct bcmasp_priv *priv);
void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
void (*eee_fixup)(struct bcmasp_intf *priv, bool en);
- struct bcmasp_hw_info *hw_info;
+ unsigned int num_mda_filters;
+ unsigned int num_net_filters;
+ unsigned int tx_chan_offset;
+ unsigned int rx_ctrl_offset;
};
struct bcmasp_priv {
@@ -399,18 +381,18 @@ struct bcmasp_priv {
int wol_irq;
unsigned long wol_irq_enabled_mask;
- void (*init_wol)(struct bcmasp_priv *priv);
- void (*enable_wol)(struct bcmasp_intf *intf, bool en);
- void (*destroy_wol)(struct bcmasp_priv *priv);
void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
void (*eee_fixup)(struct bcmasp_intf *intf, bool en);
+ unsigned int num_mda_filters;
+ unsigned int num_net_filters;
+ unsigned int tx_chan_offset;
+ unsigned int rx_ctrl_offset;
void __iomem *base;
- struct bcmasp_hw_info *hw_info;
struct list_head intfs;
- struct bcmasp_mda_filter mda_filters[NUM_MDA_FILTERS];
+ struct bcmasp_mda_filter *mda_filters;
/* MAC destination address filters lock */
spinlock_t mda_lock;
@@ -418,7 +400,7 @@ struct bcmasp_priv {
/* Protects accesses to ASP_CTRL_CLOCK_CTRL */
spinlock_t clk_lock;
- struct bcmasp_net_filter net_filters[NUM_NET_FILTERS];
+ struct bcmasp_net_filter *net_filters;
/* Network filter lock */
struct mutex net_lock;
@@ -508,8 +490,8 @@ BCMASP_FP_IO_MACRO_Q(rx_edpkt_cfg);
#define PKT_OFFLOAD_EPKT_IP(x) ((x) << 21)
#define PKT_OFFLOAD_EPKT_TP(x) ((x) << 19)
#define PKT_OFFLOAD_EPKT_LEN(x) ((x) << 16)
-#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(15)
-#define PKT_OFFLOAD_EPKT_CSUM_L2 BIT(14)
+#define PKT_OFFLOAD_EPKT_CSUM_L4 BIT(15)
+#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(14)
#define PKT_OFFLOAD_EPKT_ID(x) ((x) << 12)
#define PKT_OFFLOAD_EPKT_SEQ(x) ((x) << 10)
#define PKT_OFFLOAD_EPKT_TS(x) ((x) << 8)
@@ -541,12 +523,27 @@ BCMASP_CORE_IO_MACRO(intr2, ASP_INTR2_OFFSET);
BCMASP_CORE_IO_MACRO(wakeup_intr2, ASP_WAKEUP_INTR2_OFFSET);
BCMASP_CORE_IO_MACRO(tx_analytics, ASP_TX_ANALYTICS_OFFSET);
BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET);
-BCMASP_CORE_IO_MACRO(rx_ctrl, ASP_RX_CTRL_OFFSET);
BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET);
BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET);
BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL_OFFSET);
BCMASP_CORE_IO_MACRO(ctrl2, ASP_CTRL2_OFFSET);
+#define BCMASP_CORE_IO_MACRO_OFFSET(name, offset) \
+static inline u32 name##_core_rl(struct bcmasp_priv *priv, \
+ u32 off) \
+{ \
+ u32 reg = readl_relaxed(priv->base + priv->name##_offset + \
+ (offset) + off); \
+ return reg; \
+} \
+static inline void name##_core_wl(struct bcmasp_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, priv->base + priv->name##_offset + \
+ (offset) + off); \
+}
+BCMASP_CORE_IO_MACRO_OFFSET(rx_ctrl, ASP_RX_CTRL_OFFSET);
+
struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
struct device_node *ndev_dn, int i);
@@ -599,4 +596,5 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
+void bcmasp_enable_wol(struct bcmasp_intf *intf, bool en);
#endif
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index a537c121d3e2..4381a4cfd8c6 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -10,7 +10,6 @@
#include "bcmasp_intf_defs.h"
enum bcmasp_stat_type {
- BCMASP_STAT_RX_EDPKT,
BCMASP_STAT_RX_CTRL,
BCMASP_STAT_RX_CTRL_PER_INTF,
BCMASP_STAT_SOFT,
@@ -33,8 +32,6 @@ struct bcmasp_stats {
.reg_offset = offset, \
}
-#define STAT_BCMASP_RX_EDPKT(str, offset) \
- STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_EDPKT, offset)
#define STAT_BCMASP_RX_CTRL(str, offset) \
STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_CTRL, offset)
#define STAT_BCMASP_RX_CTRL_PER_INTF(str, offset) \
@@ -42,11 +39,6 @@ struct bcmasp_stats {
/* Must match the order of struct bcmasp_mib_counters */
static const struct bcmasp_stats bcmasp_gstrings_stats[] = {
- /* EDPKT counters */
- STAT_BCMASP_RX_EDPKT("RX Time Stamp", ASP_EDPKT_RX_TS_COUNTER),
- STAT_BCMASP_RX_EDPKT("RX PKT Count", ASP_EDPKT_RX_PKT_CNT),
- STAT_BCMASP_RX_EDPKT("RX PKT Buffered", ASP_EDPKT_HDR_EXTR_CNT),
- STAT_BCMASP_RX_EDPKT("RX PKT Pushed to DRAM", ASP_EDPKT_HDR_OUT_CNT),
/* ASP RX control */
STAT_BCMASP_RX_CTRL_PER_INTF("Frames From Unimac",
ASP_RX_CTRL_UMAC_0_FRAME_COUNT),
@@ -71,23 +63,6 @@ static const struct bcmasp_stats bcmasp_gstrings_stats[] = {
#define BCMASP_STATS_LEN ARRAY_SIZE(bcmasp_gstrings_stats)
-static u16 bcmasp_stat_fixup_offset(struct bcmasp_intf *intf,
- const struct bcmasp_stats *s)
-{
- struct bcmasp_priv *priv = intf->parent;
-
- if (!strcmp("Frames Out(Buffer)", s->stat_string))
- return priv->hw_info->rx_ctrl_fb_out_frame_count;
-
- if (!strcmp("Frames Out(Filters)", s->stat_string))
- return priv->hw_info->rx_ctrl_fb_filt_out_frame_count;
-
- if (!strcmp("RX Buffer FIFO Depth", s->stat_string))
- return priv->hw_info->rx_ctrl_fb_rx_fifo_depth;
-
- return s->reg_offset;
-}
-
static int bcmasp_get_sset_count(struct net_device *dev, int string_set)
{
switch (string_set) {
@@ -126,13 +101,10 @@ static void bcmasp_update_mib_counters(struct bcmasp_intf *intf)
char *p;
s = &bcmasp_gstrings_stats[i];
- offset = bcmasp_stat_fixup_offset(intf, s);
+ offset = s->reg_offset;
switch (s->type) {
case BCMASP_STAT_SOFT:
continue;
- case BCMASP_STAT_RX_EDPKT:
- val = rx_edpkt_core_rl(intf->parent, offset);
- break;
case BCMASP_STAT_RX_CTRL:
val = rx_ctrl_core_rl(intf->parent, offset);
break;
@@ -215,7 +187,7 @@ static int bcmasp_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
memcpy(intf->sopass, wol->sopass, sizeof(wol->sopass));
mutex_lock(&priv->wol_lock);
- priv->enable_wol(intf, !!intf->wolopts);
+ bcmasp_enable_wol(intf, !!intf->wolopts);
mutex_unlock(&priv->wol_lock);
return 0;
@@ -289,7 +261,7 @@ static int bcmasp_flow_get(struct bcmasp_intf *intf, struct ethtool_rxnfc *cmd)
memcpy(&cmd->fs, &nfilter->fs, sizeof(nfilter->fs));
- cmd->data = NUM_NET_FILTERS;
+ cmd->data = intf->parent->num_net_filters;
return 0;
}
@@ -336,7 +308,7 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
break;
case ETHTOOL_GRXCLSRLALL:
err = bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
- cmd->data = NUM_NET_FILTERS;
+ cmd->data = intf->parent->num_net_filters;
break;
default:
err = -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 45ec1a9214a2..0d61b8580d72 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -180,14 +180,14 @@ static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
case htons(ETH_P_IP):
header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf);
header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff);
- epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2;
+ epkt |= PKT_OFFLOAD_EPKT_IP(0);
ip_proto = ip_hdr(skb)->protocol;
header_cnt += 2;
break;
case htons(ETH_P_IPV6):
header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf);
header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff);
- epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2;
+ epkt |= PKT_OFFLOAD_EPKT_IP(1);
ip_proto = ipv6_hdr(skb)->nexthdr;
header_cnt += 2;
break;
@@ -198,12 +198,12 @@ static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
switch (ip_proto) {
case IPPROTO_TCP:
header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb));
- epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3;
+ epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L4;
header_cnt++;
break;
case IPPROTO_UDP:
header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN);
- epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3;
+ epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L4;
header_cnt++;
break;
default:
@@ -818,9 +818,7 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf)
/* Tx SPB */
tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
TX_SPB_CTRL_XF_CTRL2);
- tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
- tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL);
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
@@ -1185,7 +1183,7 @@ static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
{
/* Per port */
intf->res.umac = priv->base + UMC_OFFSET(intf);
- intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb +
+ intf->res.umac2fb = priv->base + (UMAC2FB_OFFSET + priv->rx_ctrl_offset +
(intf->port * 0x4));
intf->res.rgmii = priv->base + RGMII_OFFSET(intf);
@@ -1200,7 +1198,6 @@ static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf);
}
-#define MAX_IRQ_STR_LEN 64
struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
struct device_node *ndev_dn, int i)
{
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
index ad742612895f..af7418348e81 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
@@ -118,8 +118,7 @@
#define UMC_PSW_MS 0x624
#define UMC_PSW_LS 0x628
-#define UMAC2FB_OFFSET_2_1 0x9f044
-#define UMAC2FB_OFFSET 0x9f03c
+#define UMAC2FB_OFFSET 0x9f044
#define UMAC2FB_CFG 0x0
#define UMAC2FB_CFG_OPUT_EN BIT(0)
#define UMAC2FB_CFG_VLAN_EN BIT(1)
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index c91884373429..8267417b3750 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -575,7 +575,7 @@ static void b44_check_phy(struct b44 *bp)
static void b44_timer(struct timer_list *t)
{
- struct b44 *bp = from_timer(bp, t, timer);
+ struct b44 *bp = timer_container_of(bp, t, timer);
spin_lock_irq(&bp->lock);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 19611bdd86e6..92204fea1f08 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -286,7 +286,7 @@ static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode)
*/
static void bcm_enet_refill_rx_timer(struct timer_list *t)
{
- struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
+ struct bcm_enet_priv *priv = timer_container_of(priv, t, rx_timeout);
struct net_device *dev = priv->net_dev;
spin_lock(&priv->rx_lock);
@@ -2001,7 +2001,7 @@ static inline int bcm_enet_port_is_rgmii(int portid)
*/
static void swphy_poll_timer(struct timer_list *t)
{
- struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
+ struct bcm_enet_priv *priv = timer_container_of(priv, t, swphy_poll);
unsigned int i;
for (i = 0; i < priv->num_ports; i++) {
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index a461ec612e95..3e9c57196a39 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1446,7 +1446,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
struct phy_device *phy_dev;
int err;
- phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+ phy_dev = fixed_phy_register(&fphy_status, NULL);
if (IS_ERR(phy_dev)) {
dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
return PTR_ERR(phy_dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index ec0c9584f3bb..cb1011f6fd30 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6163,7 +6163,7 @@ bnx2_5708_serdes_timer(struct bnx2 *bp)
static void
bnx2_timer(struct timer_list *t)
{
- struct bnx2 *bp = from_timer(bp, t, timer);
+ struct bnx2 *bp = timer_container_of(bp, t, timer);
if (!netif_running(bp->dev))
return;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f522ca8ff66b..c9a1a1d504c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -5783,7 +5783,7 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
static void bnx2x_timer(struct timer_list *t)
{
- struct bnx2x *bp = from_timer(bp, t, timer);
+ struct bnx2x *bp = timer_container_of(bp, t, timer);
if (!netif_running(bp->dev))
return;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 86a5de44b6f3..2cb3185c442c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -893,9 +893,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
bnapi->events &= ~BNXT_TX_CMP_EVENT;
}
-static bool bnxt_separate_head_pool(void)
+static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
{
- return PAGE_SIZE > BNXT_RX_PAGE_SIZE;
+ return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE;
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -919,6 +919,20 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
return page;
}
+static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
+ struct bnxt_rx_ring_info *rxr,
+ gfp_t gfp)
+{
+ netmem_ref netmem;
+
+ netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ if (!netmem)
+ return 0;
+
+ *mapping = page_pool_get_dma_addr_netmem(netmem);
+ return netmem;
+}
+
static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
gfp_t gfp)
@@ -999,21 +1013,19 @@ static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
return next;
}
-static inline int bnxt_alloc_rx_page(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr,
- u16 prod, gfp_t gfp)
+static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
{
struct rx_bd *rxbd =
&rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
struct bnxt_sw_rx_agg_bd *rx_agg_buf;
- struct page *page;
- dma_addr_t mapping;
u16 sw_prod = rxr->rx_sw_agg_prod;
unsigned int offset = 0;
+ dma_addr_t mapping;
+ netmem_ref netmem;
- page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
-
- if (!page)
+ netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, gfp);
+ if (!netmem)
return -ENOMEM;
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
@@ -1023,7 +1035,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
- rx_agg_buf->page = page;
+ rx_agg_buf->netmem = netmem;
rx_agg_buf->offset = offset;
rx_agg_buf->mapping = mapping;
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
@@ -1067,11 +1079,11 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
- u16 cons;
- struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_agg_cmp *agg;
struct rx_bd *prod_bd;
- struct page *page;
+ netmem_ref netmem;
+ u16 cons;
if (p5_tpa)
agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
@@ -1088,11 +1100,11 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
cons_rx_buf = &rxr->rx_agg_ring[cons];
/* It is possible for sw_prod to be equal to cons, so
- * set cons_rx_buf->page to NULL first.
+ * set cons_rx_buf->netmem to 0 first.
*/
- page = cons_rx_buf->page;
- cons_rx_buf->page = NULL;
- prod_rx_buf->page = page;
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
+ prod_rx_buf->netmem = netmem;
prod_rx_buf->offset = cons_rx_buf->offset;
prod_rx_buf->mapping = cons_rx_buf->mapping;
@@ -1218,29 +1230,35 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return skb;
}
-static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct skb_shared_info *shinfo,
- u16 idx, u32 agg_bufs, bool tpa,
- struct xdp_buff *xdp)
+static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ u16 idx, u32 agg_bufs, bool tpa,
+ struct sk_buff *skb,
+ struct xdp_buff *xdp)
{
struct bnxt_napi *bnapi = cpr->bnapi;
- struct pci_dev *pdev = bp->pdev;
- struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- u16 prod = rxr->rx_agg_prod;
+ struct skb_shared_info *shinfo;
+ struct bnxt_rx_ring_info *rxr;
u32 i, total_frag_len = 0;
bool p5_tpa = false;
+ u16 prod;
+
+ rxr = bnapi->rx_ring;
+ prod = rxr->rx_agg_prod;
if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
p5_tpa = true;
+ if (skb)
+ shinfo = skb_shinfo(skb);
+ else
+ shinfo = xdp_get_shared_info_from_buff(xdp);
+
for (i = 0; i < agg_bufs; i++) {
- skb_frag_t *frag = &shinfo->frags[i];
- u16 cons, frag_len;
- struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf;
- struct page *page;
- dma_addr_t mapping;
+ struct rx_agg_cmp *agg;
+ u16 cons, frag_len;
+ netmem_ref netmem;
if (p5_tpa)
agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
@@ -1251,27 +1269,41 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
- skb_frag_fill_page_desc(frag, cons_rx_buf->page,
- cons_rx_buf->offset, frag_len);
- shinfo->nr_frags = i + 1;
+ if (skb) {
+ skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len, BNXT_RX_PAGE_SIZE);
+ } else {
+ skb_frag_t *frag = &shinfo->frags[i];
+
+ skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len);
+ shinfo->nr_frags = i + 1;
+ }
__clear_bit(cons, rxr->rx_agg_bmap);
- /* It is possible for bnxt_alloc_rx_page() to allocate
+ /* It is possible for bnxt_alloc_rx_netmem() to allocate
* a sw_prod index that equals the cons index, so we
* need to clear the cons entry now.
*/
- mapping = cons_rx_buf->mapping;
- page = cons_rx_buf->page;
- cons_rx_buf->page = NULL;
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
- if (xdp && page_is_pfmemalloc(page))
+ if (xdp && netmem_is_pfmemalloc(netmem))
xdp_buff_set_frag_pfmemalloc(xdp);
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ if (skb) {
+ skb->len -= frag_len;
+ skb->data_len -= frag_len;
+ skb->truesize -= BNXT_RX_PAGE_SIZE;
+ }
+
--shinfo->nr_frags;
- cons_rx_buf->page = page;
+ cons_rx_buf->netmem = netmem;
- /* Update prod since possibly some pages have been
+ /* Update prod since possibly some netmems have been
* allocated already.
*/
rxr->rx_agg_prod = prod;
@@ -1279,8 +1311,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
return 0;
}
- dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
- bp->rx_dir);
+ page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
+ BNXT_RX_PAGE_SIZE);
total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
@@ -1289,32 +1321,28 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
return total_frag_len;
}
-static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct sk_buff *skb, u16 idx,
- u32 agg_bufs, bool tpa)
+static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
u32 total_frag_len = 0;
- total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
- agg_bufs, tpa, NULL);
+ total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
+ skb, NULL);
if (!total_frag_len) {
skb_mark_for_recycle(skb);
dev_kfree_skb(skb);
return NULL;
}
- skb->data_len += total_frag_len;
- skb->len += total_frag_len;
- skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
return skb;
}
-static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct xdp_buff *xdp, u16 idx,
- u32 agg_bufs, bool tpa)
+static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct xdp_buff *xdp, u16 idx,
+ u32 agg_bufs, bool tpa)
{
struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
u32 total_frag_len = 0;
@@ -1322,8 +1350,8 @@ static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
if (!xdp_buff_has_frags(xdp))
shinfo->nr_frags = 0;
- total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
- idx, agg_bufs, tpa, xdp);
+ total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
+ NULL, xdp);
if (total_frag_len) {
xdp_buff_set_frags_flag(xdp);
shinfo->nr_frags = agg_bufs;
@@ -1895,7 +1923,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (agg_bufs) {
- skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
+ skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
+ true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
cpr->sw_stats->rx.rx_oom_discards += 1;
@@ -2176,9 +2205,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (bnxt_xdp_attached(bp, rxr)) {
bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
if (agg_bufs) {
- u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
- cp_cons, agg_bufs,
- false);
+ u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp,
+ cp_cons,
+ agg_bufs,
+ false);
if (!frag_len)
goto oom_next_rx;
@@ -2230,7 +2260,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (agg_bufs) {
if (!xdp_active) {
- skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
+ skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
+ agg_bufs, false);
if (!skb)
goto oom_next_rx;
} else {
@@ -3449,15 +3480,15 @@ static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
- struct page *page = rx_agg_buf->page;
+ netmem_ref netmem = rx_agg_buf->netmem;
- if (!page)
+ if (!netmem)
continue;
- rx_agg_buf->page = NULL;
+ rx_agg_buf->netmem = 0;
__clear_bit(i, rxr->rx_agg_bmap);
- page_pool_recycle_direct(rxr->page_pool, page);
+ page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
}
}
@@ -3750,7 +3781,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
- if (bnxt_separate_head_pool())
+ if (bnxt_separate_head_pool(rxr))
page_pool_destroy(rxr->head_pool);
rxr->page_pool = rxr->head_pool = NULL;
@@ -3781,15 +3812,19 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.dev = &bp->pdev->dev;
pp.dma_dir = bp->rx_dir;
pp.max_len = PAGE_SIZE;
- pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
+ PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rxr->bnapi->index;
pool = page_pool_create(&pp);
if (IS_ERR(pool))
return PTR_ERR(pool);
rxr->page_pool = pool;
- if (bnxt_separate_head_pool()) {
+ rxr->need_head_pool = page_pool_is_unreadable(pool);
+ if (bnxt_separate_head_pool(rxr)) {
pp.pool_size = max(bp->rx_ring_size, 1024);
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pool = page_pool_create(&pp);
if (IS_ERR(pool))
goto err_destroy_pp;
@@ -4201,6 +4236,8 @@ static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
rxr->page_pool->p.napi = NULL;
rxr->page_pool = NULL;
+ rxr->head_pool->p.napi = NULL;
+ rxr->head_pool = NULL;
memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
ring = &rxr->rx_ring_struct;
@@ -4325,16 +4362,16 @@ static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
rxr->rx_prod = prod;
}
-static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr,
- int ring_nr)
+static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ int ring_nr)
{
u32 prod;
int i;
prod = rxr->rx_agg_prod;
for (i = 0; i < bp->rx_agg_ring_size; i++) {
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
+ if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
ring_nr, i, bp->rx_ring_size);
break;
@@ -4375,7 +4412,7 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return 0;
- bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);
+ bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
if (rxr->rx_tpa) {
rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
@@ -10077,7 +10114,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
struct hwrm_ver_get_input *req;
u16 fw_maj, fw_min, fw_bld, fw_rsv;
u32 dev_caps_cfg, hwrm_ver;
- int rc, len;
+ int rc, len, max_tmo_secs;
rc = hwrm_req_init(bp, req, HWRM_VER_GET);
if (rc)
@@ -10150,9 +10187,14 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
if (!bp->hwrm_cmd_max_timeout)
bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
- else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
- netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
- bp->hwrm_cmd_max_timeout / 1000);
+ max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
+#ifdef CONFIG_DETECT_HUNG_TASK
+ if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
+ max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
+ netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
+ max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
+ }
+#endif
if (resp->hwrm_intf_maj_8b >= 1) {
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
@@ -10738,6 +10780,72 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
bp->num_rss_ctx--;
}
+static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int rxr_id)
+{
+ u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+ int i, vnic_rx;
+
+ /* Ntuple VNIC always has all the rx rings. Any change of ring id
+ * must be updated because a future filter may use it.
+ */
+ if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
+ return true;
+
+ for (i = 0; i < tbl_size; i++) {
+ if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
+ vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
+ else
+ vnic_rx = bp->rss_indir_tbl[i];
+
+ if (rxr_id == vnic_rx)
+ return true;
+ }
+
+ return false;
+}
+
+static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u16 mru, int rxr_id)
+{
+ int rc;
+
+ if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
+ return 0;
+
+ if (mru) {
+ rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ }
+ vnic->mru = mru;
+ bnxt_hwrm_vnic_update(bp, vnic,
+ VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+
+ return 0;
+}
+
+static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
+{
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+ int rc;
+
+ xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+ struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+
+ rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
{
bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
@@ -13953,7 +14061,7 @@ fw_reset:
static void bnxt_timer(struct timer_list *t)
{
- struct bnxt *bp = from_timer(bp, t, timer);
+ struct bnxt *bp = timer_container_of(bp, t, timer);
struct net_device *dev = bp->dev;
if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
@@ -14013,13 +14121,28 @@ static void bnxt_unlock_sp(struct bnxt *bp)
netdev_unlock(bp->dev);
}
+/* Same as bnxt_lock_sp() with additional rtnl_lock */
+static void bnxt_rtnl_lock_sp(struct bnxt *bp)
+{
+ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ rtnl_lock();
+ netdev_lock(bp->dev);
+}
+
+static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+{
+ set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ netdev_unlock(bp->dev);
+ rtnl_unlock();
+}
+
/* Only called from bnxt_sp_task() */
static void bnxt_reset(struct bnxt *bp, bool silent)
{
- bnxt_lock_sp(bp);
+ bnxt_rtnl_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_reset_task(bp, silent);
- bnxt_unlock_sp(bp);
+ bnxt_rtnl_unlock_sp(bp);
}
/* Only called from bnxt_sp_task() */
@@ -14027,9 +14150,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
{
int i;
- bnxt_lock_sp(bp);
+ bnxt_rtnl_lock_sp(bp);
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
- bnxt_unlock_sp(bp);
+ bnxt_rtnl_unlock_sp(bp);
return;
}
/* Disable and flush TPA before resetting the RX ring */
@@ -14068,7 +14191,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
}
if (bp->flags & BNXT_FLAG_TPA)
bnxt_set_tpa(bp, true);
- bnxt_unlock_sp(bp);
+ bnxt_rtnl_unlock_sp(bp);
}
static void bnxt_fw_fatal_close(struct bnxt *bp)
@@ -14960,15 +15083,17 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
fallthrough;
case BNXT_FW_RESET_STATE_OPENING:
- while (!netdev_trylock(bp->dev)) {
+ while (!rtnl_trylock()) {
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
}
+ netdev_lock(bp->dev);
rc = bnxt_open(bp->dev);
if (rc) {
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
bnxt_fw_reset_abort(bp, rc);
netdev_unlock(bp->dev);
+ rtnl_unlock();
goto ulp_start;
}
@@ -14988,6 +15113,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_dl_health_fw_status_update(bp, true);
}
netdev_unlock(bp->dev);
+ rtnl_unlock();
bnxt_ulp_start(bp, 0);
bnxt_reenable_sriov(bp);
netdev_lock(bp->dev);
@@ -15711,6 +15837,7 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
clone->rx_agg_prod = 0;
clone->rx_sw_agg_prod = 0;
clone->rx_next_cons = 0;
+ clone->need_head_pool = false;
rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
if (rc)
@@ -15753,7 +15880,7 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- bnxt_alloc_one_rx_ring_page(bp, clone, idx);
+ bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
if (bp->flags & BNXT_FLAG_TPA)
bnxt_alloc_one_tpa_info_data(bp, clone);
@@ -15769,7 +15896,7 @@ err_rxq_info_unreg:
xdp_rxq_info_unreg(&clone->xdp_rxq);
err_page_pool_destroy:
page_pool_destroy(clone->page_pool);
- if (bnxt_separate_head_pool())
+ if (bnxt_separate_head_pool(clone))
page_pool_destroy(clone->head_pool);
clone->page_pool = NULL;
clone->head_pool = NULL;
@@ -15788,7 +15915,7 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
- if (bnxt_separate_head_pool())
+ if (bnxt_separate_head_pool(rxr))
page_pool_destroy(rxr->head_pool);
rxr->page_pool = NULL;
rxr->head_pool = NULL;
@@ -15866,6 +15993,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
struct bnxt_vnic_info *vnic;
struct bnxt_napi *bnapi;
int i, rc;
+ u16 mru;
rxr = &bp->rx_ring[idx];
clone = qmem;
@@ -15879,6 +16007,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
rxr->page_pool = clone->page_pool;
rxr->head_pool = clone->head_pool;
rxr->xdp_rxq = clone->xdp_rxq;
+ rxr->need_head_pool = clone->need_head_pool;
bnxt_copy_rx_ring(bp, rxr, clone);
@@ -15915,28 +16044,22 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
napi_enable_locked(&bnapi->napi);
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
- rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
- vnic->vnic_id, rc);
+ rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
+ if (rc)
return rc;
- }
- vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
- bnxt_hwrm_vnic_update(bp, vnic,
- VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
}
-
- return 0;
+ return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
err_reset:
netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
rc);
napi_enable_locked(&bnapi->napi);
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
- bnxt_reset_task(bp, true);
+ netif_close(dev);
return rc;
}
@@ -15951,10 +16074,10 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
- vnic->mru = 0;
- bnxt_hwrm_vnic_update(bp, vnic,
- VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+
+ bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
}
+ bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
/* Make sure NAPI sees that the VNIC is disabled */
synchronize_net();
rxr = &bp->rx_ring[idx];
@@ -15964,7 +16087,7 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
page_pool_disable_direct_recycling(rxr->page_pool);
- if (bnxt_separate_head_pool())
+ if (bnxt_separate_head_pool(rxr))
page_pool_disable_direct_recycling(rxr->head_pool);
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@@ -16752,6 +16875,7 @@ static int bnxt_resume(struct device *device)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
+ rtnl_lock();
netdev_lock(dev);
rc = pci_enable_device(bp->pdev);
if (rc) {
@@ -16796,6 +16920,7 @@ static int bnxt_resume(struct device *device)
resume_exit:
netdev_unlock(bp->dev);
+ rtnl_unlock();
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
@@ -16961,6 +17086,7 @@ static void bnxt_io_resume(struct pci_dev *pdev)
int err;
netdev_info(bp->dev, "PCI Slot Resume\n");
+ rtnl_lock();
netdev_lock(netdev);
err = bnxt_hwrm_func_qcaps(bp);
@@ -16978,6 +17104,7 @@ static void bnxt_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
netdev_unlock(netdev);
+ rtnl_unlock();
bnxt_ulp_start(bp, err);
if (!err)
bnxt_reenable_sriov(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index bc8b3b7e915d..fda0d3cc6227 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -903,7 +903,7 @@ struct bnxt_sw_rx_bd {
};
struct bnxt_sw_rx_agg_bd {
- struct page *page;
+ netmem_ref netmem;
unsigned int offset;
dma_addr_t mapping;
};
@@ -1106,6 +1106,7 @@ struct bnxt_rx_ring_info {
unsigned long *rx_agg_bmap;
u16 rx_agg_bmap_size;
+ bool need_head_pool;
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index a000d3f630bd..ce97befd3cb3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -506,9 +506,16 @@ err:
start_utc, coredump.total_segs + 1,
rc);
kfree(coredump.data);
- *dump_len += sizeof(struct bnxt_coredump_record);
- if (rc == -ENOBUFS)
+ if (!rc) {
+ *dump_len += sizeof(struct bnxt_coredump_record);
+ /* The actual coredump length can be smaller than the FW
+ * reported length earlier. Use the ethtool provided length.
+ */
+ if (buf_len)
+ *dump_len = buf_len;
+ } else if (rc == -ENOBUFS) {
netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
+ }
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
index 15ca51b5d204..fb5f5b063c3d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -58,7 +58,7 @@ void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s);
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
-#define HWRM_CMD_MAX_TIMEOUT 40000U
+#define HWRM_CMD_MAX_TIMEOUT 60000U
#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index a8e930d5dbb0..2450a369b792 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -20,6 +20,7 @@
#include <asm/byteorder.h>
#include <linux/bitmap.h>
#include <linux/auxiliary_bus.h>
+#include <net/netdev_lock.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -148,7 +149,6 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
- int i = 0;
ulp = edev->ulp_tbl;
netdev_lock(dev);
@@ -164,10 +164,6 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
synchronize_rcu();
ulp->max_async_event_id = 0;
ulp->async_events_bmap = NULL;
- while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
- msleep(100);
- i++;
- }
mutex_unlock(&edev->en_dev_lock);
netdev_unlock(dev);
return;
@@ -235,10 +231,9 @@ void bnxt_ulp_stop(struct bnxt *bp)
return;
mutex_lock(&edev->en_dev_lock);
- if (!bnxt_ulp_registered(edev)) {
- mutex_unlock(&edev->en_dev_lock);
- return;
- }
+ if (!bnxt_ulp_registered(edev) ||
+ (edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
+ goto ulp_stop_exit;
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
if (aux_priv) {
@@ -254,6 +249,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
adrv->suspend(adev, pm);
}
}
+ulp_stop_exit:
mutex_unlock(&edev->en_dev_lock);
}
@@ -262,19 +258,13 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
struct bnxt_aux_priv *aux_priv = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
- if (!edev)
- return;
-
- edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
-
- if (err)
+ if (!edev || err)
return;
mutex_lock(&edev->en_dev_lock);
- if (!bnxt_ulp_registered(edev)) {
- mutex_unlock(&edev->en_dev_lock);
- return;
- }
+ if (!bnxt_ulp_registered(edev) ||
+ !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
+ goto ulp_start_exit;
if (edev->ulp_tbl->msix_requested)
bnxt_fill_msix_vecs(bp, edev->msix_entries);
@@ -291,6 +281,8 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
adrv->resume(adev);
}
}
+ulp_start_exit:
+ edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
mutex_unlock(&edev->en_dev_lock);
}
@@ -309,14 +301,12 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
if (!ulp->msix_requested)
return;
- netdev_lock(bp->dev);
- ops = rcu_dereference(ulp->ulp_ops);
+ ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
if (!ops || !ops->ulp_irq_stop)
return;
if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
reset = true;
ops->ulp_irq_stop(ulp->handle, reset);
- netdev_unlock(bp->dev);
}
}
@@ -335,8 +325,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
if (!ulp->msix_requested)
return;
- netdev_lock(bp->dev);
- ops = rcu_dereference(ulp->ulp_ops);
+ ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
if (!ops || !ops->ulp_irq_restart)
return;
@@ -348,7 +337,6 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
bnxt_fill_msix_vecs(bp, ent);
}
ops->ulp_irq_restart(ulp->handle, ent);
- netdev_unlock(bp->dev);
kfree(ent);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 7fa3b8d1ebd2..7b9dd8ebe4bc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -10,9 +10,6 @@
#ifndef BNXT_ULP_H
#define BNXT_ULP_H
-#define BNXT_ROCE_ULP 0
-#define BNXT_MAX_ULP 1
-
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
@@ -50,7 +47,6 @@ struct bnxt_ulp {
unsigned long *async_events_bmap;
u16 max_async_event_id;
u16 msix_requested;
- atomic_t ref_count;
};
struct bnxt_en_dev {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index e675611777b5..4a6d8cb9f970 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -425,9 +425,9 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog) {
bnxt_set_rx_skb_mode(bp, true);
- xdp_features_set_redirect_target(dev, true);
+ xdp_features_set_redirect_target_locked(dev, true);
} else {
- xdp_features_clear_redirect_target(dev);
+ xdp_features_clear_redirect_target_locked(dev);
bnxt_set_rx_skb_mode(bp, false);
}
bp->tx_nr_rings_xdp = tx_xdp;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 73d78dcb774d..fa0077bc67b7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -969,12 +969,13 @@ static int bcmgenet_set_pauseparam(struct net_device *dev,
/* standard ethtool support functions. */
enum bcmgenet_stat_type {
- BCMGENET_STAT_NETDEV = -1,
+ BCMGENET_STAT_RTNL = -1,
BCMGENET_STAT_MIB_RX,
BCMGENET_STAT_MIB_TX,
BCMGENET_STAT_RUNT,
BCMGENET_STAT_MISC,
BCMGENET_STAT_SOFT,
+ BCMGENET_STAT_SOFT64,
};
struct bcmgenet_stats {
@@ -984,13 +985,15 @@ struct bcmgenet_stats {
enum bcmgenet_stat_type type;
/* reg offset from UMAC base for misc counters */
u16 reg_offset;
+ /* sync for u64 stats counters */
+ int syncp_offset;
};
-#define STAT_NETDEV(m) { \
+#define STAT_RTNL(m) { \
.stat_string = __stringify(m), \
- .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
- .stat_offset = offsetof(struct net_device_stats, m), \
- .type = BCMGENET_STAT_NETDEV, \
+ .stat_sizeof = sizeof(((struct rtnl_link_stats64 *)0)->m), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m), \
+ .type = BCMGENET_STAT_RTNL, \
}
#define STAT_GENET_MIB(str, m, _type) { \
@@ -1000,6 +1003,14 @@ struct bcmgenet_stats {
.type = _type, \
}
+#define STAT_GENET_SOFT_MIB64(str, s, m) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->s.m), \
+ .stat_offset = offsetof(struct bcmgenet_priv, s.m), \
+ .type = BCMGENET_STAT_SOFT64, \
+ .syncp_offset = offsetof(struct bcmgenet_priv, s.syncp), \
+}
+
#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
@@ -1014,18 +1025,38 @@ struct bcmgenet_stats {
}
#define STAT_GENET_Q(num) \
- STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
- tx_rings[num].packets), \
- STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
- tx_rings[num].bytes), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
- rx_rings[num].bytes), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
- rx_rings[num].packets), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
- rx_rings[num].errors), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
- rx_rings[num].dropped)
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_packets", \
+ tx_rings[num].stats64, packets), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_bytes", \
+ tx_rings[num].stats64, bytes), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_errors", \
+ tx_rings[num].stats64, errors), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_dropped", \
+ tx_rings[num].stats64, dropped), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_bytes", \
+ rx_rings[num].stats64, bytes), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_packets", \
+ rx_rings[num].stats64, packets), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_errors", \
+ rx_rings[num].stats64, errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_dropped", \
+ rx_rings[num].stats64, dropped), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_multicast", \
+ rx_rings[num].stats64, multicast), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_missed", \
+ rx_rings[num].stats64, missed), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_length_errors", \
+ rx_rings[num].stats64, length_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_over_errors", \
+ rx_rings[num].stats64, over_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_crc_errors", \
+ rx_rings[num].stats64, crc_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_frame_errors", \
+ rx_rings[num].stats64, frame_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_fragmented_errors", \
+ rx_rings[num].stats64, fragmented_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_broadcast", \
+ rx_rings[num].stats64, broadcast)
/* There is a 0xC gap between the end of RX and beginning of TX stats and then
* between the end of TX stats and the beginning of the RX RUNT
@@ -1037,15 +1068,20 @@ struct bcmgenet_stats {
*/
static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
/* general stats */
- STAT_NETDEV(rx_packets),
- STAT_NETDEV(tx_packets),
- STAT_NETDEV(rx_bytes),
- STAT_NETDEV(tx_bytes),
- STAT_NETDEV(rx_errors),
- STAT_NETDEV(tx_errors),
- STAT_NETDEV(rx_dropped),
- STAT_NETDEV(tx_dropped),
- STAT_NETDEV(multicast),
+ STAT_RTNL(rx_packets),
+ STAT_RTNL(tx_packets),
+ STAT_RTNL(rx_bytes),
+ STAT_RTNL(tx_bytes),
+ STAT_RTNL(rx_errors),
+ STAT_RTNL(tx_errors),
+ STAT_RTNL(rx_dropped),
+ STAT_RTNL(tx_dropped),
+ STAT_RTNL(multicast),
+ STAT_RTNL(rx_missed_errors),
+ STAT_RTNL(rx_length_errors),
+ STAT_RTNL(rx_over_errors),
+ STAT_RTNL(rx_crc_errors),
+ STAT_RTNL(rx_frame_errors),
/* UniMAC RSV counters */
STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
@@ -1133,6 +1169,20 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
+#define BCMGENET_STATS64_ADD(stats, m, v) \
+ do { \
+ u64_stats_update_begin(&stats->syncp); \
+ u64_stats_add(&stats->m, v); \
+ u64_stats_update_end(&stats->syncp); \
+ } while (0)
+
+#define BCMGENET_STATS64_INC(stats, m) \
+ do { \
+ u64_stats_update_begin(&stats->syncp); \
+ u64_stats_inc(&stats->m); \
+ u64_stats_update_end(&stats->syncp); \
+ } while (0)
+
static void bcmgenet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1216,8 +1266,9 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
s = &bcmgenet_gstrings_stats[i];
switch (s->type) {
- case BCMGENET_STAT_NETDEV:
+ case BCMGENET_STAT_RTNL:
case BCMGENET_STAT_SOFT:
+ case BCMGENET_STAT_SOFT64:
continue;
case BCMGENET_STAT_RUNT:
offset += BCMGENET_STAT_OFFSET;
@@ -1255,28 +1306,40 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
u64 *data)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct rtnl_link_stats64 stats64;
+ struct u64_stats_sync *syncp;
+ unsigned int start;
int i;
if (netif_running(dev))
bcmgenet_update_mib_counters(priv);
- dev->netdev_ops->ndo_get_stats(dev);
+ dev_get_stats(dev, &stats64);
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
const struct bcmgenet_stats *s;
char *p;
s = &bcmgenet_gstrings_stats[i];
- if (s->type == BCMGENET_STAT_NETDEV)
- p = (char *)&dev->stats;
- else
- p = (char *)priv;
- p += s->stat_offset;
- if (sizeof(unsigned long) != sizeof(u32) &&
- s->stat_sizeof == sizeof(unsigned long))
- data[i] = *(unsigned long *)p;
- else
- data[i] = *(u32 *)p;
+ p = (char *)priv;
+
+ if (s->type == BCMGENET_STAT_SOFT64) {
+ syncp = (struct u64_stats_sync *)(p + s->syncp_offset);
+ do {
+ start = u64_stats_fetch_begin(syncp);
+ data[i] = u64_stats_read((u64_stats_t *)(p + s->stat_offset));
+ } while (u64_stats_fetch_retry(syncp, start));
+ } else {
+ if (s->type == BCMGENET_STAT_RTNL)
+ p = (char *)&stats64;
+
+ p += s->stat_offset;
+ if (sizeof(unsigned long) != sizeof(u32) &&
+ s->stat_sizeof == sizeof(unsigned long))
+ data[i] = *(unsigned long *)p;
+ else
+ data[i] = *(u32 *)p;
+ }
}
}
@@ -1856,6 +1919,7 @@ static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring)
{
+ struct bcmgenet_tx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = netdev_priv(dev);
unsigned int txbds_processed = 0;
unsigned int bytes_compl = 0;
@@ -1896,8 +1960,10 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
ring->free_bds += txbds_processed;
ring->c_index = c_index;
- ring->packets += pkts_compl;
- ring->bytes += bytes_compl;
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_add(&stats->packets, pkts_compl);
+ u64_stats_add(&stats->bytes, bytes_compl);
+ u64_stats_update_end(&stats->syncp);
netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index),
pkts_compl, bytes_compl);
@@ -1983,8 +2049,10 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
* the transmit checksum offsets in the descriptors
*/
static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct bcmgenet_tx_ring *ring)
{
+ struct bcmgenet_tx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = netdev_priv(dev);
struct status_64 *status = NULL;
struct sk_buff *new_skb;
@@ -2001,7 +2069,7 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
if (!new_skb) {
dev_kfree_skb_any(skb);
priv->mib.tx_realloc_tsb_failed++;
- dev->stats.tx_dropped++;
+ BCMGENET_STATS64_INC(stats, dropped);
return NULL;
}
dev_consume_skb_any(skb);
@@ -2089,7 +2157,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
GENET_CB(skb)->bytes_sent = skb->len;
/* add the Transmit Status Block */
- skb = bcmgenet_add_tsb(dev, skb);
+ skb = bcmgenet_add_tsb(dev, skb, ring);
if (!skb) {
ret = NETDEV_TX_OK;
goto out;
@@ -2231,6 +2299,7 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int budget)
{
+ struct bcmgenet_rx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = ring->priv;
struct net_device *dev = priv->dev;
struct enet_cb *cb;
@@ -2253,7 +2322,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
DMA_P_INDEX_DISCARD_CNT_MASK;
if (discards > ring->old_discards) {
discards = discards - ring->old_discards;
- ring->errors += discards;
+ BCMGENET_STATS64_ADD(stats, missed, discards);
ring->old_discards += discards;
/* Clear HW register when we reach 75% of maximum 0xFFFF */
@@ -2279,7 +2348,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
skb = bcmgenet_rx_refill(priv, cb);
if (unlikely(!skb)) {
- ring->dropped++;
+ BCMGENET_STATS64_INC(stats, dropped);
goto next;
}
@@ -2306,8 +2375,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(len > RX_BUF_LENGTH)) {
netif_err(priv, rx_status, dev, "oversized packet\n");
- dev->stats.rx_length_errors++;
- dev->stats.rx_errors++;
+ BCMGENET_STATS64_INC(stats, length_errors);
dev_kfree_skb_any(skb);
goto next;
}
@@ -2315,7 +2383,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
- ring->errors++;
+ BCMGENET_STATS64_INC(stats, fragmented_errors);
dev_kfree_skb_any(skb);
goto next;
}
@@ -2328,15 +2396,22 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
DMA_RX_RXER))) {
netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
(unsigned int)dma_flag);
+ u64_stats_update_begin(&stats->syncp);
if (dma_flag & DMA_RX_CRC_ERROR)
- dev->stats.rx_crc_errors++;
+ u64_stats_inc(&stats->crc_errors);
if (dma_flag & DMA_RX_OV)
- dev->stats.rx_over_errors++;
+ u64_stats_inc(&stats->over_errors);
if (dma_flag & DMA_RX_NO)
- dev->stats.rx_frame_errors++;
+ u64_stats_inc(&stats->frame_errors);
if (dma_flag & DMA_RX_LG)
- dev->stats.rx_length_errors++;
- dev->stats.rx_errors++;
+ u64_stats_inc(&stats->length_errors);
+ if ((dma_flag & (DMA_RX_CRC_ERROR |
+ DMA_RX_OV |
+ DMA_RX_NO |
+ DMA_RX_LG |
+ DMA_RX_RXER)) == DMA_RX_RXER)
+ u64_stats_inc(&stats->errors);
+ u64_stats_update_end(&stats->syncp);
dev_kfree_skb_any(skb);
goto next;
} /* error packet */
@@ -2356,10 +2431,15 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
/*Finish setting up the received SKB and send it to the kernel*/
skb->protocol = eth_type_trans(skb, priv->dev);
- ring->packets++;
- ring->bytes += len;
+
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->packets);
+ u64_stats_add(&stats->bytes, len);
if (dma_flag & DMA_RX_MULT)
- dev->stats.multicast++;
+ u64_stats_inc(&stats->multicast);
+ else if (dma_flag & DMA_RX_BRDCAST)
+ u64_stats_inc(&stats->broadcast);
+ u64_stats_update_end(&stats->syncp);
/* Notify kernel */
napi_gro_receive(&ring->napi, skb);
@@ -3420,7 +3500,7 @@ static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
netif_trans_update(dev);
- dev->stats.tx_errors++;
+ BCMGENET_STATS64_INC((&priv->tx_rings[txqueue].stats64), errors);
netif_tx_wake_all_queues(dev);
}
@@ -3509,39 +3589,72 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
+static void bcmgenet_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned long tx_bytes = 0, tx_packets = 0;
- unsigned long rx_bytes = 0, rx_packets = 0;
- unsigned long rx_errors = 0, rx_dropped = 0;
- struct bcmgenet_tx_ring *tx_ring;
- struct bcmgenet_rx_ring *rx_ring;
+ struct bcmgenet_tx_stats64 *tx_stats;
+ struct bcmgenet_rx_stats64 *rx_stats;
+ u64 rx_length_errors, rx_over_errors;
+ u64 rx_missed, rx_fragmented_errors;
+ u64 rx_crc_errors, rx_frame_errors;
+ u64 tx_errors, tx_dropped;
+ u64 rx_errors, rx_dropped;
+ u64 tx_bytes, tx_packets;
+ u64 rx_bytes, rx_packets;
+ unsigned int start;
unsigned int q;
+ u64 multicast;
for (q = 0; q <= priv->hw_params->tx_queues; q++) {
- tx_ring = &priv->tx_rings[q];
- tx_bytes += tx_ring->bytes;
- tx_packets += tx_ring->packets;
+ tx_stats = &priv->tx_rings[q].stats64;
+ do {
+ start = u64_stats_fetch_begin(&tx_stats->syncp);
+ tx_bytes = u64_stats_read(&tx_stats->bytes);
+ tx_packets = u64_stats_read(&tx_stats->packets);
+ tx_errors = u64_stats_read(&tx_stats->errors);
+ tx_dropped = u64_stats_read(&tx_stats->dropped);
+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
+
+ stats->tx_bytes += tx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_errors += tx_errors;
+ stats->tx_dropped += tx_dropped;
}
for (q = 0; q <= priv->hw_params->rx_queues; q++) {
- rx_ring = &priv->rx_rings[q];
-
- rx_bytes += rx_ring->bytes;
- rx_packets += rx_ring->packets;
- rx_errors += rx_ring->errors;
- rx_dropped += rx_ring->dropped;
+ rx_stats = &priv->rx_rings[q].stats64;
+ do {
+ start = u64_stats_fetch_begin(&rx_stats->syncp);
+ rx_bytes = u64_stats_read(&rx_stats->bytes);
+ rx_packets = u64_stats_read(&rx_stats->packets);
+ rx_errors = u64_stats_read(&rx_stats->errors);
+ rx_dropped = u64_stats_read(&rx_stats->dropped);
+ rx_missed = u64_stats_read(&rx_stats->missed);
+ rx_length_errors = u64_stats_read(&rx_stats->length_errors);
+ rx_over_errors = u64_stats_read(&rx_stats->over_errors);
+ rx_crc_errors = u64_stats_read(&rx_stats->crc_errors);
+ rx_frame_errors = u64_stats_read(&rx_stats->frame_errors);
+ rx_fragmented_errors = u64_stats_read(&rx_stats->fragmented_errors);
+ multicast = u64_stats_read(&rx_stats->multicast);
+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
+
+ rx_errors += rx_length_errors;
+ rx_errors += rx_crc_errors;
+ rx_errors += rx_frame_errors;
+ rx_errors += rx_fragmented_errors;
+
+ stats->rx_bytes += rx_bytes;
+ stats->rx_packets += rx_packets;
+ stats->rx_errors += rx_errors;
+ stats->rx_dropped += rx_dropped;
+ stats->rx_missed_errors += rx_missed;
+ stats->rx_length_errors += rx_length_errors;
+ stats->rx_over_errors += rx_over_errors;
+ stats->rx_crc_errors += rx_crc_errors;
+ stats->rx_frame_errors += rx_frame_errors;
+ stats->multicast += multicast;
}
-
- dev->stats.tx_bytes = tx_bytes;
- dev->stats.tx_packets = tx_packets;
- dev->stats.rx_bytes = rx_bytes;
- dev->stats.rx_packets = rx_packets;
- dev->stats.rx_errors = rx_errors;
- dev->stats.rx_missed_errors = rx_errors;
- dev->stats.rx_dropped = rx_dropped;
- return &dev->stats;
}
static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier)
@@ -3569,7 +3682,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_set_mac_address = bcmgenet_set_mac_addr,
.ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_features = bcmgenet_set_features,
- .ndo_get_stats = bcmgenet_get_stats,
+ .ndo_get_stats64 = bcmgenet_get_stats64,
.ndo_change_carrier = bcmgenet_change_carrier,
};
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 10c631bbe964..5ec3979779ec 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -155,6 +155,30 @@ struct bcmgenet_mib_counters {
u32 tx_realloc_tsb_failed;
};
+struct bcmgenet_tx_stats64 {
+ struct u64_stats_sync syncp;
+ u64_stats_t packets;
+ u64_stats_t bytes;
+ u64_stats_t errors;
+ u64_stats_t dropped;
+};
+
+struct bcmgenet_rx_stats64 {
+ struct u64_stats_sync syncp;
+ u64_stats_t bytes;
+ u64_stats_t packets;
+ u64_stats_t errors;
+ u64_stats_t dropped;
+ u64_stats_t multicast;
+ u64_stats_t broadcast;
+ u64_stats_t missed;
+ u64_stats_t length_errors;
+ u64_stats_t over_errors;
+ u64_stats_t crc_errors;
+ u64_stats_t frame_errors;
+ u64_stats_t fragmented_errors;
+};
+
#define UMAC_MIB_START 0x400
#define UMAC_MDIO_CMD 0x614
@@ -515,8 +539,7 @@ struct bcmgenet_skb_cb {
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
struct napi_struct napi; /* NAPI per tx queue */
- unsigned long packets;
- unsigned long bytes;
+ struct bcmgenet_tx_stats64 stats64;
unsigned int index; /* ring index */
struct enet_cb *cbs; /* tx ring buffer control block*/
unsigned int size; /* size of each tx ring */
@@ -540,10 +563,7 @@ struct bcmgenet_net_dim {
struct bcmgenet_rx_ring {
struct napi_struct napi; /* Rx NAPI struct */
- unsigned long bytes;
- unsigned long packets;
- unsigned long errors;
- unsigned long dropped;
+ struct bcmgenet_rx_stats64 stats64;
unsigned int index; /* Rx ring index */
struct enet_cb *cbs; /* Rx ring buffer control block */
unsigned int size; /* Rx ring size */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 71c619d2bea5..b6437ba7a2eb 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -625,7 +625,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
.asym_pause = 0,
};
- phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+ phydev = fixed_phy_register(&fphy_status, NULL);
if (IS_ERR(phydev)) {
dev_err(kdev, "failed to register fixed PHY device\n");
return PTR_ERR(phydev);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index d1f541af4e3b..91104cc2c238 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -54,7 +54,7 @@
#include <linux/ssb/ssb_driver_gige.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#include <linux/crc32poly.h>
+#include <linux/crc32.h>
#include <linux/dmi.h>
#include <net/checksum.h>
@@ -9809,26 +9809,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
static inline u32 calc_crc(unsigned char *buf, int len)
{
- u32 reg;
- u32 tmp;
- int j, k;
-
- reg = 0xffffffff;
-
- for (j = 0; j < len; j++) {
- reg ^= buf[j];
-
- for (k = 0; k < 8; k++) {
- tmp = reg & 0x01;
-
- reg >>= 1;
-
- if (tmp)
- reg ^= CRC32_POLY_LE;
- }
- }
-
- return ~reg;
+ return ~crc32(~0, buf, len);
}
static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
@@ -11081,7 +11062,7 @@ static void tg3_chk_missed_msi(struct tg3 *tp)
static void tg3_timer(struct timer_list *t)
{
- struct tg3 *tp = from_timer(tp, t, timer);
+ struct tg3 *tp = timer_container_of(tp, t, timer);
spin_lock(&tp->lock);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index a03eff3d4425..9bed33295839 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1688,7 +1688,8 @@ err_return:
static void
bnad_ioc_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.ioc_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1699,7 +1700,8 @@ bnad_ioc_timeout(struct timer_list *t)
static void
bnad_ioc_hb_check(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.hb_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1710,7 +1712,8 @@ bnad_ioc_hb_check(struct timer_list *t)
static void
bnad_iocpf_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.iocpf_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1721,7 +1724,8 @@ bnad_iocpf_timeout(struct timer_list *t)
static void
bnad_iocpf_sem_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
+ struct bnad *bnad = timer_container_of(bnad, t,
+ bna.ioceth.ioc.sem_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1735,7 +1739,7 @@ bnad_iocpf_sem_timeout(struct timer_list *t)
* Time CPU m CPU n
* 0 1 = test_bit
* 1 clear_bit
- * 2 del_timer_sync
+ * 2 timer_delete_sync
* 3 mod_timer
*/
@@ -1743,7 +1747,7 @@ bnad_iocpf_sem_timeout(struct timer_list *t)
static void
bnad_dim_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, dim_timer);
+ struct bnad *bnad = timer_container_of(bnad, t, dim_timer);
struct bnad_rx_info *rx_info;
struct bnad_rx_ctrl *rx_ctrl;
int i, j;
@@ -1776,7 +1780,7 @@ bnad_dim_timeout(struct timer_list *t)
static void
bnad_stats_timeout(struct timer_list *t)
{
- struct bnad *bnad = from_timer(bnad, t, stats_timer);
+ struct bnad *bnad = timer_container_of(bnad, t, stats_timer);
unsigned long flags;
if (!netif_running(bnad->netdev) ||
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 1fe8ec37491b..d1f1ae5ea161 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -997,22 +997,15 @@ static void macb_update_stats(struct macb *bp)
static int macb_halt_tx(struct macb *bp)
{
- unsigned long halt_time, timeout;
- u32 status;
+ u32 status;
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
- timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
- do {
- halt_time = jiffies;
- status = macb_readl(bp, TSR);
- if (!(status & MACB_BIT(TGO)))
- return 0;
-
- udelay(250);
- } while (time_before(halt_time, timeout));
-
- return -ETIMEDOUT;
+ /* Poll TSR until TGO is cleared or timeout. */
+ return read_poll_timeout_atomic(macb_readl, status,
+ !(status & MACB_BIT(TGO)),
+ 250, MACB_HALT_TIMEOUT, false,
+ bp, TSR);
}
static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
@@ -5290,7 +5283,11 @@ static int macb_probe(struct platform_device *pdev)
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (err) {
+ dev_err(&pdev->dev, "failed to set DMA mask\n");
+ goto err_out_free_netdev;
+ }
bp->hw_dma_cap |= HW_DMA_CAP_64B;
}
#endif
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 06397cc8bb36..5211759bfe47 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1389,11 +1389,9 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
}
- /* Check if timestamp is requested */
- if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- skb_tx_timestamp(skb);
+ /* Check if hw timestamp is requested */
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
return;
- }
/* Tx timestamping not supported along with TSO, so ignore request */
if (skb_shinfo(skb)->gso_size)
@@ -1472,6 +1470,8 @@ static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
netdev_tx_sent_queue(txq, skb->len);
+ skb_tx_timestamp(skb);
+
/* make sure all memory stores are done before ringing doorbell */
smp_wmb();
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 608cc6af5af1..3b7ad744b2dd 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1605,10 +1605,10 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return dev_err_probe(dev, err, "Failed to enable PCI device\n");
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
- goto err_disable_device;
+ goto err_zero_drv_data;
}
/* MAP configuration registers */
@@ -1616,7 +1616,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!bgx->reg_base) {
dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
err = -ENOMEM;
- goto err_release_regions;
+ goto err_zero_drv_data;
}
set_max_bgx_per_node(pdev);
@@ -1688,10 +1688,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_enable:
bgx_vnic[bgx->bgx_id] = NULL;
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
-err_release_regions:
- pci_release_regions(pdev);
-err_disable_device:
- pci_disable_device(pdev);
+err_zero_drv_data:
pci_set_drvdata(pdev, NULL);
return err;
}
@@ -1710,8 +1707,6 @@ static void bgx_remove(struct pci_dev *pdev)
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
bgx_vnic[bgx->bgx_id] = NULL;
- pci_release_regions(pdev);
- pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 3b7068832f95..4a0e2d2eb60a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -351,7 +351,7 @@ static void set_msglevel(struct net_device *dev, u32 val)
adapter->msg_enable = val;
}
-static const char stats_strings[][ETH_GSTRING_LEN] = {
+static const char stats_strings[][ETH_GSTRING_LEN] __nonstring_array = {
"TxOctetsOK",
"TxOctetsBad",
"TxUnicastFramesOK",
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index a10923c7e25c..5f354cf62cdd 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1922,7 +1922,7 @@ send:
static void sge_tx_reclaim_cb(struct timer_list *t)
{
int i;
- struct sge *sge = from_timer(sge, t, tx_reclaim_timer);
+ struct sge *sge = timer_container_of(sge, t, tx_reclaim_timer);
for (i = 0; i < SGE_CMDQ_N; ++i) {
struct cmdQ *q = &sge->cmdQ[i];
@@ -2017,7 +2017,7 @@ void t1_sge_start(struct sge *sge)
*/
static void espibug_workaround_t204(struct timer_list *t)
{
- struct sge *sge = from_timer(sge, t, espibug_timer);
+ struct sge *sge = timer_container_of(sge, t, espibug_timer);
struct adapter *adapter = sge->adapter;
unsigned int nports = adapter->params.nports;
u32 seop[MAX_NPORTS];
@@ -2060,7 +2060,7 @@ static void espibug_workaround_t204(struct timer_list *t)
static void espibug_workaround(struct timer_list *t)
{
- struct sge *sge = from_timer(sge, t, espibug_timer);
+ struct sge *sge = timer_container_of(sge, t, espibug_timer);
struct adapter *adapter = sge->adapter;
if (netif_running(adapter->port[0].dev)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index bd5c3b3fa5e3..b59735d0e065 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2906,7 +2906,7 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
*/
static void sge_timer_tx(struct timer_list *t)
{
- struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
+ struct sge_qset *qs = timer_container_of(qs, t, tx_reclaim_timer);
struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
@@ -2947,7 +2947,7 @@ static void sge_timer_tx(struct timer_list *t)
static void sge_timer_rx(struct timer_list *t)
{
spinlock_t *lock;
- struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
+ struct sge_qset *qs = timer_container_of(qs, t, rx_reclaim_timer);
struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
u32 status;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 551c279dc14b..51395c96b2e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6480,10 +6480,11 @@ static const struct tlsdev_ops cxgb4_ktls_ops = {
#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
-static int cxgb4_xfrm_add_state(struct xfrm_state *x,
+static int cxgb4_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
int ret;
if (!mutex_trylock(&uld_mutex)) {
@@ -6494,7 +6495,8 @@ static int cxgb4_xfrm_add_state(struct xfrm_state *x,
if (ret)
goto out_unlock;
- ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x, extack);
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(dev, x,
+ extack);
out_unlock:
mutex_unlock(&uld_mutex);
@@ -6502,9 +6504,9 @@ out_unlock:
return ret;
}
-static void cxgb4_xfrm_del_state(struct xfrm_state *x)
+static void cxgb4_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
@@ -6514,15 +6516,15 @@ static void cxgb4_xfrm_del_state(struct xfrm_state *x)
if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
goto out_unlock;
- adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x);
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(dev, x);
out_unlock:
mutex_unlock(&uld_mutex);
}
-static void cxgb4_xfrm_free_state(struct xfrm_state *x)
+static void cxgb4_xfrm_free_state(struct net_device *dev, struct xfrm_state *x)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
@@ -6532,7 +6534,7 @@ static void cxgb4_xfrm_free_state(struct xfrm_state *x)
if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
goto out_unlock;
- adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x);
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(dev, x);
out_unlock:
mutex_unlock(&uld_mutex);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 69d045d769c4..0765d000eaef 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -1051,7 +1051,7 @@ static void ch_flower_stats_handler(struct work_struct *work)
static void ch_flower_stats_cb(struct timer_list *t)
{
- struct adapter *adap = from_timer(adap, t, flower_stats_timer);
+ struct adapter *adap = timer_container_of(adap, t, flower_stats_timer);
schedule_work(&adap->flower_stats_work);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
index b08356060fb4..7bab8da8f6e6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
@@ -29,7 +29,7 @@ static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_device_ops cxgb4_thermal_ops = {
+static const struct thermal_zone_device_ops cxgb4_thermal_ops = {
.get_temp = cxgb4_thermal_get_temp,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index f991a28a71c3..64402e3646b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1533,7 +1533,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
q = &adap->sge.ethtxq[qidx + pi->first_qset];
}
- skb_tx_timestamp(skb);
reclaim_completed_tx(adap, &q->q, -1, true);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
@@ -1706,6 +1705,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
+ skb_tx_timestamp(skb);
+
if (immediate) {
cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
@@ -2268,7 +2269,6 @@ static int ethofld_hard_xmit(struct net_device *dev,
d = &eosw_txq->desc[eosw_txq->last_pidx];
skb = d->skb;
- skb_tx_timestamp(skb);
wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
@@ -2373,6 +2373,7 @@ write_wr_headers:
eohw_txq->vlan_ins++;
txq_advance(&eohw_txq->q, ndesc);
+ skb_tx_timestamp(skb);
cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
@@ -4233,7 +4234,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
{
unsigned long m;
unsigned int i;
- struct adapter *adap = from_timer(adap, t, sge.rx_timer);
+ struct adapter *adap = timer_container_of(adap, t, sge.rx_timer);
struct sge *s = &adap->sge;
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
@@ -4268,7 +4269,7 @@ done:
static void sge_tx_timer_cb(struct timer_list *t)
{
- struct adapter *adap = from_timer(adap, t, sge.tx_timer);
+ struct adapter *adap = timer_container_of(adap, t, sge.tx_timer);
struct sge *s = &adap->sge;
unsigned long m, period;
unsigned int i, budget;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index f42af01f4114..4e6ecb9c8dcc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2062,7 +2062,7 @@ irq_handler_t t4vf_intr_handler(struct adapter *adapter)
*/
static void sge_rx_timer_cb(struct timer_list *t)
{
- struct adapter *adapter = from_timer(adapter, t, sge.rx_timer);
+ struct adapter *adapter = timer_container_of(adapter, t, sge.rx_timer);
struct sge *s = &adapter->sge;
unsigned int i;
@@ -2121,7 +2121,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
*/
static void sge_tx_timer_cb(struct timer_list *t)
{
- struct adapter *adapter = from_timer(adapter, t, sge.tx_timer);
+ struct adapter *adapter = timer_container_of(adapter, t, sge.tx_timer);
struct sge *s = &adapter->sge;
unsigned int i, budget;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index baba96883f48..ecd9a0bd5e18 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -75,9 +75,12 @@ static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
static void ch_ipsec_advance_esn_state(struct xfrm_state *x);
-static void ch_ipsec_xfrm_free_state(struct xfrm_state *x);
-static void ch_ipsec_xfrm_del_state(struct xfrm_state *x);
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
+static void ch_ipsec_xfrm_free_state(struct net_device *dev,
+ struct xfrm_state *x);
+static void ch_ipsec_xfrm_del_state(struct net_device *dev,
+ struct xfrm_state *x);
+static int ch_ipsec_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack);
static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
@@ -223,7 +226,8 @@ out:
* returns 0 on success, negative error if failed to send message to FPGA
* positive error if FPGA returned a bad response
*/
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
+static int ch_ipsec_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
struct ipsec_sa_entry *sa_entry;
@@ -302,14 +306,16 @@ out:
return res;
}
-static void ch_ipsec_xfrm_del_state(struct xfrm_state *x)
+static void ch_ipsec_xfrm_del_state(struct net_device *dev,
+ struct xfrm_state *x)
{
/* do nothing */
if (!x->xso.offload_handle)
return;
}
-static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
+static void ch_ipsec_xfrm_free_state(struct net_device *dev,
+ struct xfrm_state *x)
{
struct ipsec_sa_entry *sa_entry;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index e8e460a92e0e..4e2096e49684 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -1640,6 +1640,7 @@ static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info,
cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
+ skb_tx_timestamp(skb);
cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
return 0;
}
@@ -1903,7 +1904,6 @@ static int chcr_ktls_sw_fallback(struct sk_buff *skb,
th = tcp_hdr(nskb);
skb_offset = skb_tcp_all_headers(nskb);
data_len = nskb->len - skb_offset;
- skb_tx_timestamp(nskb);
if (chcr_ktls_tunnel_pkt(tx_info, nskb, q))
goto out;
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 9c12e967e9f1..301b3f3114af 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -26,6 +26,7 @@
#define ENIC_WQ_MAX 256
#define ENIC_RQ_MAX 256
+#define ENIC_RQ_MIN_DEFAULT 8
#define ENIC_WQ_NAPI_BUDGET 256
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 9900993b6aea..837f954873ee 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -125,7 +125,7 @@ struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
#ifdef CONFIG_RFS_ACCEL
void enic_flow_may_expire(struct timer_list *t)
{
- struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire);
+ struct enic *enic = timer_container_of(enic, t, rfs_h.rfs_may_expire);
bool res;
int j;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index c753c35b26eb..773f5ad972a2 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1510,7 +1510,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
static void enic_notify_timer(struct timer_list *t)
{
- struct enic *enic = from_timer(enic, t, notify_timer);
+ struct enic *enic = timer_container_of(enic, t, notify_timer);
enic_notify_check(enic);
@@ -2296,7 +2296,8 @@ static int enic_adjust_resources(struct enic *enic)
* used based on which resource is the most constrained
*/
wq_avail = min(enic->wq_avail, ENIC_WQ_MAX);
- rq_default = netif_get_num_default_rss_queues();
+ rq_default = max(netif_get_num_default_rss_queues(),
+ ENIC_RQ_MIN_DEFAULT);
rq_avail = min3(enic->rq_avail, ENIC_RQ_MAX, rq_default);
max_queues = min(enic->cq_avail,
enic->intr_avail - ENIC_MSIX_RESERVED_INTR);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 517a15904fb0..6a2004bbe87f 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1144,6 +1144,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
struct gmac_txdesc *txd;
skb_frag_t *skb_frag;
dma_addr_t mapping;
+ bool tcp = false;
void *buffer;
u16 mss;
int ret;
@@ -1151,6 +1152,13 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
word1 = skb->len;
word3 = SOF_BIT;
+ /* Determine if we are doing TCP */
+ if (skb->protocol == htons(ETH_P_IP))
+ tcp = (ip_hdr(skb)->protocol == IPPROTO_TCP);
+ else
+ /* IPv6 */
+ tcp = (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP);
+
mss = skb_shinfo(skb)->gso_size;
if (mss) {
/* This means we are dealing with TCP and skb->len is the
@@ -1163,8 +1171,26 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
mss, skb->len);
word1 |= TSS_MTU_ENABLE_BIT;
word3 |= mss;
+ } else if (tcp) {
+ /* Even if we are not using TSO, use the hardware offloader
+ * for transferring the TCP frame: this hardware has partial
+ * TCP awareness (called TOE - TCP Offload Engine) and will
+ * according to the datasheet put packets belonging to the
+ * same TCP connection in the same queue for the TOE/TSO
+ * engine to process. The engine will deal with chopping
+ * up frames that exceed ETH_DATA_LEN which the
+ * checksumming engine cannot handle (see below) into
+ * manageable chunks. It flawlessly deals with quite big
+ * frames and frames containing custom DSA EtherTypes.
+ */
+ mss = netdev->mtu + skb_tcp_all_headers(skb);
+ mss = min(mss, skb->len);
+ netdev_dbg(netdev, "TOE/TSO len %04x mtu %04x mss %04x\n",
+ skb->len, netdev->mtu, mss);
+ word1 |= TSS_MTU_ENABLE_BIT;
+ word3 |= mss;
} else if (skb->len >= ETH_FRAME_LEN) {
- /* Hardware offloaded checksumming isn't working on frames
+ /* Hardware offloaded checksumming isn't working on non-TCP frames
* bigger than 1514 bytes. A hypothesis about this is that the
* checksum buffer is only 1518 bytes, so when the frames get
* bigger they get truncated, or the last few bytes get
@@ -1181,21 +1207,16 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- int tcp = 0;
-
/* We do not switch off the checksumming on non TCP/UDP
* frames: as is shown from tests, the checksumming engine
* is smart enough to see that a frame is not actually TCP
* or UDP and then just pass it through without any changes
* to the frame.
*/
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb->protocol == htons(ETH_P_IP))
word1 |= TSS_IP_CHKSUM_BIT;
- tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
- } else { /* IPv6 */
+ else
word1 |= TSS_IPV6_ENABLE_BIT;
- tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
- }
word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
}
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index f9339d0772b5..f9504f340c4a 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -963,7 +963,7 @@ static void de_next_media (struct de_private *de, const u32 *media,
static void de21040_media_timer (struct timer_list *t)
{
- struct de_private *de = from_timer(de, t, media_timer);
+ struct de_private *de = timer_container_of(de, t, media_timer);
struct net_device *dev = de->dev;
u32 status = dr32(SIAStatus);
unsigned int carrier;
@@ -1044,7 +1044,7 @@ static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
static void de21041_media_timer (struct timer_list *t)
{
- struct de_private *de = from_timer(de, t, media_timer);
+ struct de_private *de = timer_container_of(de, t, media_timer);
struct net_device *dev = de->dev;
u32 status = dr32(SIAStatus);
unsigned int carrier;
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index ae34b95ed676..2d3bd343b6e6 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -1115,7 +1115,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
static void dmfe_timer(struct timer_list *t)
{
- struct dmfe_board_info *db = from_timer(db, t, timer);
+ struct dmfe_board_info *db = timer_container_of(db, t, timer);
struct net_device *dev = pci_get_drvdata(db->pdev);
void __iomem *ioaddr = db->ioaddr;
u32 tmp_cr8;
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 2d926a26fbb9..0a12cb9b3ba7 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -104,7 +104,7 @@ int tulip_refill_rx(struct net_device *dev)
void oom_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, oom_timer);
+ struct tulip_private *tp = timer_container_of(tp, t, oom_timer);
napi_schedule(&tp->napi);
}
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 653bde48ef44..1de5ed967070 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -86,7 +86,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
void pnic_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
diff --git a/drivers/net/ethernet/dec/tulip/pnic2.c b/drivers/net/ethernet/dec/tulip/pnic2.c
index 2e3bdc0fcdc0..39c410bf224e 100644
--- a/drivers/net/ethernet/dec/tulip/pnic2.c
+++ b/drivers/net/ethernet/dec/tulip/pnic2.c
@@ -78,7 +78,7 @@
void pnic2_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c
index 642e9dfc5451..ca0c509b601c 100644
--- a/drivers/net/ethernet/dec/tulip/timer.c
+++ b/drivers/net/ethernet/dec/tulip/timer.c
@@ -139,7 +139,7 @@ void tulip_media_task(struct work_struct *work)
void mxic_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
@@ -156,7 +156,7 @@ void mxic_timer(struct timer_list *t)
void comet_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
int next_tick = 2*HZ;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index c8c53121557f..5b7e6eb080f3 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -114,7 +114,7 @@ int tulip_debug = 1;
static void tulip_timer(struct timer_list *t)
{
- struct tulip_private *tp = from_timer(tp, t, timer);
+ struct tulip_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
if (netif_running(dev))
@@ -1411,7 +1411,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* grab all resources from both PIO and MMIO regions, as we
* don't want anyone else messing around with our hardware */
- if (pci_request_regions(pdev, DRV_NAME))
+ if (pcim_request_all_regions(pdev, DRV_NAME))
return -ENODEV;
ioaddr = pcim_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 3f1bd670700b..6e4d8d31aba9 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1014,7 +1014,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
static void uli526x_timer(struct timer_list *t)
{
- struct uli526x_board_info *db = from_timer(db, t, timer);
+ struct uli526x_board_info *db = timer_container_of(db, t, timer);
struct net_device *dev = pci_get_drvdata(db->pdev);
struct uli_phy_ops *phy = &db->phy;
void __iomem *ioaddr = db->ioaddr;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 5930cdec6f2f..a24a25a5f73d 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -375,7 +375,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- if (pci_request_regions(pdev, DRV_NAME))
+ if (pcim_request_all_regions(pdev, DRV_NAME))
goto err_out_netdev;
ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
@@ -763,7 +763,7 @@ static inline void update_csr6(struct net_device *dev, int new)
static void netdev_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = pci_get_drvdata(np->pci_dev);
void __iomem *ioaddr = np->base_addr;
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 232e839a9d07..da9b7715df05 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -146,6 +146,8 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
np->ioaddr = ioaddr;
np->chip_id = chip_idx;
np->pdev = pdev;
+
+ spin_lock_init(&np->stats_lock);
spin_lock_init (&np->tx_lock);
spin_lock_init (&np->rx_lock);
@@ -648,7 +650,7 @@ static int rio_open(struct net_device *dev)
static void
rio_timer (struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = pci_get_drvdata(np->pdev);
unsigned int entry;
int next_tick = 1*HZ;
@@ -865,7 +867,6 @@ tx_error (struct net_device *dev, int tx_status)
frame_id = (tx_status & 0xffff0000);
printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
dev->name, tx_status, frame_id);
- dev->stats.tx_errors++;
/* Ttransmit Underrun */
if (tx_status & 0x10) {
dev->stats.tx_fifo_errors++;
@@ -902,9 +903,15 @@ tx_error (struct net_device *dev, int tx_status)
rio_set_led_mode(dev);
/* Let TxStartThresh stay default value */
}
+
+ spin_lock(&np->stats_lock);
/* Maximum Collisions */
if (tx_status & 0x08)
dev->stats.collisions++;
+
+ dev->stats.tx_errors++;
+ spin_unlock(&np->stats_lock);
+
/* Restart the Tx */
dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
}
@@ -1073,7 +1080,9 @@ get_stats (struct net_device *dev)
int i;
#endif
unsigned int stat_reg;
+ unsigned long flags;
+ spin_lock_irqsave(&np->stats_lock, flags);
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
@@ -1123,6 +1132,9 @@ get_stats (struct net_device *dev)
dr16(TCPCheckSumErrors);
dr16(UDPCheckSumErrors);
dr16(IPCheckSumErrors);
+
+ spin_unlock_irqrestore(&np->stats_lock, flags);
+
return &dev->stats;
}
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 0e33e2eaae96..ba679025e866 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -329,18 +329,18 @@ enum _pcs_anlpar {
};
typedef struct t_SROM {
- u16 config_param; /* 0x00 */
- u16 asic_ctrl; /* 0x02 */
- u16 sub_vendor_id; /* 0x04 */
- u16 sub_system_id; /* 0x06 */
- u16 pci_base_1; /* 0x08 (IP1000A only) */
- u16 pci_base_2; /* 0x0a (IP1000A only) */
+ __le16 config_param; /* 0x00 */
+ __le16 asic_ctrl; /* 0x02 */
+ __le16 sub_vendor_id; /* 0x04 */
+ __le16 sub_system_id; /* 0x06 */
+ __le16 pci_base_1; /* 0x08 (IP1000A only) */
+ __le16 pci_base_2; /* 0x0a (IP1000A only) */
__le16 led_mode; /* 0x0c (IP1000A only) */
- u16 reserved1[9]; /* 0x0e-0x1f */
+ __le16 reserved1[9]; /* 0x0e-0x1f */
u8 mac_addr[6]; /* 0x20-0x25 */
u8 reserved2[10]; /* 0x26-0x2f */
u8 sib[204]; /* 0x30-0xfb */
- u32 crc; /* 0xfc-0xff */
+ __le32 crc; /* 0xfc-0xff */
} SROM_t, *PSROM_t;
/* Ioctl custom data */
@@ -372,6 +372,8 @@ struct netdev_private {
struct pci_dev *pdev;
void __iomem *ioaddr;
void __iomem *eeprom_addr;
+ // To ensure synchronization when stats are updated.
+ spinlock_t stats_lock;
spinlock_t tx_lock;
spinlock_t rx_lock;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 51b8377edd1d..d730af4a50c7 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1609,7 +1609,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
/* version 1 of the cmd is not supported only by BE2 */
if (BE2_chip(adapter))
hdr->version = 0;
- if (BE3_chip(adapter) || lancer_chip(adapter))
+ else if (BE3_chip(adapter) || lancer_chip(adapter))
hdr->version = 1;
else
hdr->version = 2;
@@ -2615,7 +2615,11 @@ err:
return status;
}
-static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
+/*
+ * Since the cookie is text, add a parsing-skipped space to keep it from
+ * ever being matched on storage holding this source file.
+ */
+static const char flash_cookie[32] __nonstring = "*** SE FLAS" "H DIRECTORY *** ";
static bool phy_flashing_required(struct be_adapter *adapter)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d70818f06be7..5e2d3ddb5d43 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1415,7 +1415,7 @@ struct flash_section_entry {
} __packed;
struct flash_section_info {
- u8 cookie[32];
+ u8 cookie[32] __nonstring;
struct flash_section_hdr fsec_hdr;
struct flash_section_entry fsec_entry[32];
} __packed;
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 625245b0845c..eba73246f986 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -67,6 +67,8 @@
#define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE)
#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
#define TSNEP_TX_TYPE_XSK BIT(12)
+#define TSNEP_TX_TYPE_TSTAMP BIT(13)
+#define TSNEP_TX_TYPE_SKB_TSTAMP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_TSTAMP)
#define TSNEP_XDP_TX BIT(0)
#define TSNEP_XDP_REDIRECT BIT(1)
@@ -386,8 +388,7 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
if (entry->skb) {
entry->properties = length & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
- if ((entry->type & TSNEP_TX_TYPE_SKB) &&
- (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS))
+ if ((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP)
entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
/* toggle user flag to prevent false acknowledge
@@ -479,7 +480,8 @@ static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry,
return mapped;
}
-static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
+static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count,
+ bool do_tstamp)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
@@ -505,6 +507,9 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
entry->type = TSNEP_TX_TYPE_SKB_INLINE;
mapped = 0;
}
+
+ if (do_tstamp)
+ entry->type |= TSNEP_TX_TYPE_TSTAMP;
} else {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -558,11 +563,12 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
struct tsnep_tx *tx)
{
- int count = 1;
struct tsnep_tx_entry *entry;
+ bool do_tstamp = false;
+ int count = 1;
int length;
- int i;
int retval;
+ int i;
if (skb_shinfo(skb)->nr_frags > 0)
count += skb_shinfo(skb)->nr_frags;
@@ -579,7 +585,13 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
entry = &tx->entry[tx->write];
entry->skb = skb;
- retval = tsnep_tx_map(skb, tx, count);
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ tx->adapter->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ do_tstamp = true;
+ }
+
+ retval = tsnep_tx_map(skb, tx, count, do_tstamp);
if (retval < 0) {
tsnep_tx_unmap(tx, tx->write, count);
dev_kfree_skb_any(entry->skb);
@@ -591,9 +603,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
}
length = retval;
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-
for (i = 0; i < count; i++)
tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
i == count - 1);
@@ -844,8 +853,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
length = tsnep_tx_unmap(tx, tx->read, count);
- if ((entry->type & TSNEP_TX_TYPE_SKB) &&
- (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ if (((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) &&
(__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
struct skb_shared_hwtstamps hwtstamps;
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index c699bd6bcbb9..474073c7f94d 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -31,6 +31,7 @@ config FTGMAC100
depends on ARM || COMPILE_TEST
depends on !64BIT || BROKEN
select PHYLIB
+ select FIXED_PHY
select MDIO_ASPEED if MACH_ASPEED_G6
select CRC32
help
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 17ec35e75a65..a98d5af3f9e3 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1906,7 +1906,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
goto err_phy_connect;
}
- phydev = fixed_phy_register(PHY_POLL, &ncsi_phy_status, np);
+ phydev = fixed_phy_register(&ncsi_phy_status, np);
if (IS_ERR(phydev)) {
dev_err(&pdev->dev, "failed to register fixed PHY device\n");
err = PTR_ERR(phydev);
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 670b68201376..6ac8547ef9b8 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1074,7 +1074,7 @@ static void allocate_rx_buffers(struct net_device *dev)
static void netdev_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = np->mii.dev;
void __iomem *ioaddr = np->mem;
int old_crvalue = np->crvalue;
@@ -1163,7 +1163,7 @@ static void enable_rxtx(struct net_device *dev)
static void reset_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, reset_timer);
+ struct netdev_private *np = timer_container_of(np, t, reset_timer);
struct net_device *dev = np->mii.dev;
unsigned long flags;
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index a2d7300925a8..bbef47c3480c 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -71,7 +71,6 @@ config FSL_XGMAC_MDIO
tristate "Freescale XGMAC MDIO"
select PHYLIB
depends on OF
- select MDIO_DEVRES
select OF_MDIO
help
This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 4948b4906584..23c23cca2620 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -3089,15 +3089,25 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
return nxmit;
}
-static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int dpaa_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
struct dpaa_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
+ config->tx_type = priv->tx_tstamp ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
- switch (config.tx_type) {
+ return 0;
+}
+
+static int dpaa_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa_priv *priv = netdev_priv(dev);
+
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
/* Couldn't disable rx/tx timestamping separately.
* Do nothing here.
@@ -3112,7 +3122,7 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -ERANGE;
}
- if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
/* Couldn't disable rx/tx timestamping separately.
* Do nothing here.
*/
@@ -3121,28 +3131,17 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
priv->rx_tstamp = true;
/* TS is set for all frame types, not only those requested */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
- return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
{
- int ret = -EINVAL;
struct dpaa_priv *priv = netdev_priv(net_dev);
- if (cmd == SIOCGMIIREG) {
- if (net_dev->phydev)
- return phylink_mii_ioctl(priv->mac_dev->phylink, rq,
- cmd);
- }
-
- if (cmd == SIOCSHWTSTAMP)
- return dpaa_ts_ioctl(net_dev, rq, cmd);
-
- return ret;
+ return phylink_mii_ioctl(priv->mac_dev->phylink, rq, cmd);
}
static const struct net_device_ops dpaa_ops = {
@@ -3160,6 +3159,8 @@ static const struct net_device_ops dpaa_ops = {
.ndo_change_mtu = dpaa_change_mtu,
.ndo_bpf = dpaa_xdp,
.ndo_xdp_xmit = dpaa_xdp_xmit,
+ .ndo_hwtstamp_get = dpaa_hwtstamp_get,
+ .ndo_hwtstamp_set = dpaa_hwtstamp_set,
};
static int dpaa_napi_add(struct net_device *net_dev)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 29886a8ba73f..2ec2c3dab250 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -2585,40 +2585,52 @@ static int dpaa2_eth_set_features(struct net_device *net_dev,
return 0;
}
-static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int dpaa2_eth_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct dpaa2_eth_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
if (!dpaa2_ptp)
return -EINVAL;
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_ONESTEP_SYNC:
- priv->tx_tstamp_type = config.tx_type;
+ priv->tx_tstamp_type = config->tx_type;
break;
default:
return -ERANGE;
}
- if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
priv->rx_tstamp = false;
} else {
priv->rx_tstamp = true;
/* TS is set for all frame types, not only those requested */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
dpaa2_ptp_onestep_reg_update_method(priv);
- return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
+}
+
+static int dpaa2_eth_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
+ if (!dpaa2_ptp)
+ return -EINVAL;
+
+ config->tx_type = priv->tx_tstamp_type;
+ config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
+
+ return 0;
}
static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2626,9 +2638,6 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
struct dpaa2_eth_priv *priv = netdev_priv(dev);
int err;
- if (cmd == SIOCSHWTSTAMP)
- return dpaa2_eth_ts_ioctl(dev, rq, cmd);
-
mutex_lock(&priv->mac_lock);
if (dpaa2_eth_is_type_phy(priv)) {
@@ -3034,7 +3043,9 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_xsk_wakeup = dpaa2_xsk_wakeup,
.ndo_setup_tc = dpaa2_eth_setup_tc,
.ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
- .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
+ .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid,
+ .ndo_hwtstamp_get = dpaa2_eth_hwtstamp_get,
+ .ndo_hwtstamp_set = dpaa2_eth_hwtstamp_set,
};
static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 6c2779047dcd..54b0f0a5a6bb 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config FSL_ENETC_CORE
tristate
+ select NXP_NETC_LIB if NXP_NTMP
help
This module supports common functionality between the PF and VF
drivers for the NXP ENETC controller.
@@ -15,10 +16,19 @@ config NXP_ENETC_PF_COMMON
If compiled as module (M), the module name is nxp-enetc-pf-common.
+config NXP_NETC_LIB
+ tristate
+ help
+ This module provides common functionalities for both ENETC and NETC
+ Switch, such as NETC Table Management Protocol (NTMP) 2.0, common tc
+ flower and debugfs interfaces and so on.
+
+config NXP_NTMP
+ bool
+
config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI_MSI
- select MDIO_DEVRES
select FSL_ENETC_CORE
select FSL_ENETC_IERB
select FSL_ENETC_MDIO
@@ -36,10 +46,10 @@ config FSL_ENETC
config NXP_ENETC4
tristate "ENETC4 PF driver"
depends on PCI_MSI
- select MDIO_DEVRES
select FSL_ENETC_CORE
select FSL_ENETC_MDIO
select NXP_ENETC_PF_COMMON
+ select NXP_NTMP
select PHYLINK
select DIMLIB
help
@@ -73,7 +83,7 @@ config FSL_ENETC_IERB
config FSL_ENETC_MDIO
tristate "ENETC MDIO driver"
- depends on PCI && MDIO_DEVRES && MDIO_BUS
+ depends on PCI && PHYLIB
help
This driver supports NXP ENETC Central MDIO controller as a PCIe
physical function (PF) device.
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index 6fd27ee4fcd1..f1c5ad45fd76 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -6,6 +6,9 @@ fsl-enetc-core-y := enetc.o enetc_cbdr.o enetc_ethtool.o
obj-$(CONFIG_NXP_ENETC_PF_COMMON) += nxp-enetc-pf-common.o
nxp-enetc-pf-common-y := enetc_pf_common.o
+obj-$(CONFIG_NXP_NETC_LIB) += nxp-netc-lib.o
+nxp-netc-lib-y := ntmp.o
+
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
fsl-enetc-y := enetc_pf.o
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
@@ -13,6 +16,7 @@ fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_NXP_ENETC4) += nxp-enetc4.o
nxp-enetc4-y := enetc4_pf.o
+nxp-enetc4-$(CONFIG_DEBUG_FS) += enetc4_debugfs.o
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 3ee52f4b1166..dcc3fbac3481 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -36,6 +36,42 @@ static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv,
enetc_mm_commit_preemptible_tcs(priv);
}
+static int enetc_mac_addr_hash_idx(const u8 *addr)
+{
+ u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
+ u64 mask = 0;
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ mask |= BIT_ULL(i * 6);
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight64(fold & (mask << i)) & 0x1) << i;
+
+ return res;
+}
+
+void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr)
+{
+ int idx = enetc_mac_addr_hash_idx(addr);
+
+ /* add hash table entry */
+ __set_bit(idx, filter->mac_hash_table);
+ filter->mac_addr_cnt++;
+}
+EXPORT_SYMBOL_GPL(enetc_add_mac_addr_ht_filter);
+
+void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
+{
+ filter->mac_addr_cnt = 0;
+
+ bitmap_zero(filter->mac_hash_table,
+ ENETC_MADDR_HASH_TBL_SZ);
+}
+EXPORT_SYMBOL_GPL(enetc_reset_mac_addr_filter);
+
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
int num_tx_rings = priv->num_tx_rings;
@@ -2379,7 +2415,7 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
for (i = 0; i < si->num_rss; i++)
rss_table[i] = i % num_groups;
- enetc_set_rss_table(si, rss_table, si->num_rss);
+ si->ops->set_rss_table(si, rss_table, si->num_rss);
kfree(rss_table);
@@ -2394,6 +2430,20 @@ static void enetc_set_lso_flags_mask(struct enetc_hw *hw)
enetc_wr(hw, ENETC4_SILSOSFMR1, 0);
}
+static void enetc_set_rss(struct net_device *ndev, int en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 reg;
+
+ enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
+
+ reg = enetc_rd(hw, ENETC_SIMR);
+ reg &= ~ENETC_SIMR_RSSE;
+ reg |= (en) ? ENETC_SIMR_RSSE : 0;
+ enetc_wr(hw, ENETC_SIMR, reg);
+}
+
int enetc_configure_si(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
@@ -2410,13 +2460,13 @@ int enetc_configure_si(struct enetc_ndev_priv *priv)
if (si->hw_features & ENETC_SI_F_LSO)
enetc_set_lso_flags_mask(hw);
- /* TODO: RSS support for i.MX95 will be supported later, and the
- * is_enetc_rev1() condition will be removed
- */
- if (si->num_rss && is_enetc_rev1(si)) {
+ if (si->num_rss) {
err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
if (err)
return err;
+
+ if (priv->ndev->features & NETIF_F_RXHASH)
+ enetc_set_rss(priv->ndev, true);
}
return 0;
@@ -3209,22 +3259,6 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev)
}
EXPORT_SYMBOL_GPL(enetc_get_stats);
-static int enetc_set_rss(struct net_device *ndev, int en)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
- u32 reg;
-
- enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
-
- reg = enetc_rd(hw, ENETC_SIMR);
- reg &= ~ENETC_SIMR_RSSE;
- reg |= (en) ? ENETC_SIMR_RSSE : 0;
- enetc_wr(hw, ENETC_SIMR, reg);
-
- return 0;
-}
-
static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -3262,16 +3296,17 @@ void enetc_set_features(struct net_device *ndev, netdev_features_t features)
}
EXPORT_SYMBOL_GPL(enetc_set_features);
-static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
+int enetc_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int err, new_offloads = priv->active_offloads;
- struct hwtstamp_config config;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK))
+ return -EOPNOTSUPP;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
break;
@@ -3290,13 +3325,13 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
new_offloads &= ~ENETC_F_RX_TSTAMP;
break;
default:
new_offloads |= ENETC_F_RX_TSTAMP;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) {
@@ -3309,42 +3344,36 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
priv->active_offloads = new_offloads;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
+EXPORT_SYMBOL_GPL(enetc_hwtstamp_set);
-static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
+int enetc_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct hwtstamp_config config;
- config.flags = 0;
+ if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK))
+ return -EOPNOTSUPP;
if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
- config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
- config.tx_type = HWTSTAMP_TX_ON;
+ config->tx_type = HWTSTAMP_TX_ON;
else
- config.tx_type = HWTSTAMP_TX_OFF;
+ config->tx_type = HWTSTAMP_TX_OFF;
- config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ config->rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
+EXPORT_SYMBOL_GPL(enetc_hwtstamp_get);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) {
- if (cmd == SIOCSHWTSTAMP)
- return enetc_hwtstamp_set(ndev, rq);
- if (cmd == SIOCGHWTSTAMP)
- return enetc_hwtstamp_get(ndev, rq);
- }
-
if (!priv->phylink)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 4ad4eb5c5a74..872d2cbd088b 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -8,6 +8,7 @@
#include <linux/dma-mapping.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
+#include <linux/fsl/ntmp.h>
#include <linux/if_vlan.h>
#include <linux/phylink.h>
#include <linux/dim.h>
@@ -22,6 +23,18 @@
#define ENETC_CBD_DATA_MEM_ALIGN 64
+#define ENETC_MADDR_HASH_TBL_SZ 64
+
+enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
+
+struct enetc_mac_filter {
+ union {
+ char mac_addr[ETH_ALEN];
+ DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
+ };
+ int mac_addr_cnt;
+};
+
struct enetc_tx_swbd {
union {
struct sk_buff *skb;
@@ -266,6 +279,19 @@ struct enetc_platform_info {
const struct enetc_drvdata *data;
};
+struct enetc_si;
+
+/*
+ * This structure defines the some common hooks for ENETC PSI and VSI.
+ * In addition, since VSI only uses the struct enetc_si as its private
+ * driver data, so this structure also define some hooks specifically
+ * for VSI. For VSI-specific hooks, the format is ‘vf_*()’.
+ */
+struct enetc_si_ops {
+ int (*get_rss_table)(struct enetc_si *si, u32 *table, int count);
+ int (*set_rss_table)(struct enetc_si *si, const u32 *table, int count);
+};
+
/* PCI IEP device data */
struct enetc_si {
struct pci_dev *pdev;
@@ -274,7 +300,10 @@ struct enetc_si {
struct net_device *ndev; /* back ref. */
- struct enetc_cbdr cbd_ring;
+ union {
+ struct enetc_cbdr cbd_ring; /* Only ENETC 1.0 */
+ struct ntmp_user ntmp_user; /* ENETC 4.1 and later */
+ };
int num_rx_rings; /* how many rings are available in the SI */
int num_tx_rings;
@@ -284,6 +313,11 @@ struct enetc_si {
u16 revision;
int hw_features;
const struct enetc_drvdata *drvdata;
+ const struct enetc_si_ops *ops;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct rx_mode_task;
+ struct dentry *debugfs_root;
};
#define ENETC_SI_ALIGN 32
@@ -466,6 +500,9 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
void enetc_free_si_resources(struct enetc_ndev_priv *priv);
int enetc_configure_si(struct enetc_ndev_priv *priv);
int enetc_get_driver_data(struct enetc_si *si);
+void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr);
+void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter);
int enetc_open(struct net_device *ndev);
int enetc_close(struct net_device *ndev);
@@ -481,6 +518,12 @@ int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
struct xdp_frame **frames, u32 flags);
+int enetc_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config);
+int enetc_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+
/* ethtool */
extern const struct ethtool_ops enetc_pf_ethtool_ops;
extern const struct ethtool_ops enetc4_pf_ethtool_ops;
@@ -493,15 +536,19 @@ void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv);
int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
struct enetc_cbdr *cbdr);
void enetc_teardown_cbdr(struct enetc_cbdr *cbdr);
+int enetc4_setup_cbdr(struct enetc_si *si);
+void enetc4_teardown_cbdr(struct enetc_si *si);
int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
char *mac_addr, int si_map);
int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
int index);
-void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
+void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes);
int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
+int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count);
+int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count);
static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si,
struct enetc_cbd *cbd,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c
new file mode 100644
index 000000000000..1b1591dce73d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright 2025 NXP */
+
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string_choices.h>
+
+#include "enetc_pf.h"
+#include "enetc4_debugfs.h"
+
+static void enetc_show_si_mac_hash_filter(struct seq_file *s, int i)
+{
+ struct enetc_si *si = s->private;
+ struct enetc_hw *hw = &si->hw;
+ u32 hash_h, hash_l;
+
+ hash_l = enetc_port_rd(hw, ENETC4_PSIUMHFR0(i));
+ hash_h = enetc_port_rd(hw, ENETC4_PSIUMHFR1(i));
+ seq_printf(s, "SI %d unicast MAC hash filter: 0x%08x%08x\n",
+ i, hash_h, hash_l);
+
+ hash_l = enetc_port_rd(hw, ENETC4_PSIMMHFR0(i));
+ hash_h = enetc_port_rd(hw, ENETC4_PSIMMHFR1(i));
+ seq_printf(s, "SI %d multicast MAC hash filter: 0x%08x%08x\n",
+ i, hash_h, hash_l);
+}
+
+static int enetc_mac_filter_show(struct seq_file *s, void *data)
+{
+ struct enetc_si *si = s->private;
+ struct enetc_hw *hw = &si->hw;
+ struct maft_entry_data maft;
+ struct enetc_pf *pf;
+ int i, err, num_si;
+ u32 val;
+
+ pf = enetc_si_priv(si);
+ num_si = pf->caps.num_vsi + 1;
+
+ val = enetc_port_rd(hw, ENETC4_PSIPMMR);
+ for (i = 0; i < num_si; i++) {
+ seq_printf(s, "SI %d Unicast Promiscuous mode: %s\n", i,
+ str_enabled_disabled(PSIPMMR_SI_MAC_UP(i) & val));
+ seq_printf(s, "SI %d Multicast Promiscuous mode: %s\n", i,
+ str_enabled_disabled(PSIPMMR_SI_MAC_MP(i) & val));
+ }
+
+ /* MAC hash filter table */
+ for (i = 0; i < num_si; i++)
+ enetc_show_si_mac_hash_filter(s, i);
+
+ if (!pf->num_mfe)
+ return 0;
+
+ /* MAC address filter table */
+ seq_puts(s, "MAC address filter table\n");
+ for (i = 0; i < pf->num_mfe; i++) {
+ memset(&maft, 0, sizeof(maft));
+ err = ntmp_maft_query_entry(&si->ntmp_user, i, &maft);
+ if (err)
+ return err;
+
+ seq_printf(s, "Entry %d, MAC: %pM, SI bitmap: 0x%04x\n", i,
+ maft.keye.mac_addr, le16_to_cpu(maft.cfge.si_bitmap));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(enetc_mac_filter);
+
+void enetc_create_debugfs(struct enetc_si *si)
+{
+ struct net_device *ndev = si->ndev;
+ struct dentry *root;
+
+ root = debugfs_create_dir(netdev_name(ndev), NULL);
+ if (IS_ERR(root))
+ return;
+
+ si->debugfs_root = root;
+
+ debugfs_create_file("mac_filter", 0444, root, si, &enetc_mac_filter_fops);
+}
+
+void enetc_remove_debugfs(struct enetc_si *si)
+{
+ debugfs_remove(si->debugfs_root);
+ si->debugfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h
new file mode 100644
index 000000000000..96caca35f79d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2025 NXP */
+
+#ifndef __ENETC4_DEBUGFS_H
+#define __ENETC4_DEBUGFS_H
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+void enetc_create_debugfs(struct enetc_si *si);
+void enetc_remove_debugfs(struct enetc_si *si);
+#else
+static inline void enetc_create_debugfs(struct enetc_si *si)
+{
+}
+
+static inline void enetc_remove_debugfs(struct enetc_si *si)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
index 695cb07c74bc..aa25b445d301 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
@@ -99,6 +99,18 @@
#define ENETC4_PSICFGR2(a) ((a) * 0x80 + 0x2018)
#define PSICFGR2_NUM_MSIX GENMASK(5, 0)
+/* Port station interface a unicast MAC hash filter register 0/1 */
+#define ENETC4_PSIUMHFR0(a) ((a) * 0x80 + 0x2050)
+#define ENETC4_PSIUMHFR1(a) ((a) * 0x80 + 0x2054)
+
+/* Port station interface a multicast MAC hash filter register 0/1 */
+#define ENETC4_PSIMMHFR0(a) ((a) * 0x80 + 0x2058)
+#define ENETC4_PSIMMHFR1(a) ((a) * 0x80 + 0x205c)
+
+/* Port station interface a VLAN hash filter register 0/1 */
+#define ENETC4_PSIVHFR0(a) ((a) * 0x80 + 0x2060)
+#define ENETC4_PSIVHFR1(a) ((a) * 0x80 + 0x2064)
+
#define ENETC4_PMCAPR 0x4004
#define PMCAPR_HD BIT(8)
#define PMCAPR_FP GENMASK(10, 9)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
index 73ac8c6afb3a..b3dc1afeefd1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
@@ -8,9 +8,19 @@
#include <linux/unaligned.h>
#include "enetc_pf_common.h"
+#include "enetc4_debugfs.h"
#define ENETC_SI_MAX_RING_NUM 8
+#define ENETC_MAC_FILTER_TYPE_UC BIT(0)
+#define ENETC_MAC_FILTER_TYPE_MC BIT(1)
+#define ENETC_MAC_FILTER_TYPE_ALL (ENETC_MAC_FILTER_TYPE_UC | \
+ ENETC_MAC_FILTER_TYPE_MC)
+
+struct enetc_mac_addr {
+ u8 addr[ETH_ALEN];
+};
+
static void enetc4_get_port_caps(struct enetc_pf *pf)
{
struct enetc_hw *hw = &pf->si->hw;
@@ -26,6 +36,9 @@ static void enetc4_get_port_caps(struct enetc_pf *pf)
val = enetc_port_rd(hw, ENETC4_PMCAPR);
pf->caps.half_duplex = (val & PMCAPR_HD) ? 1 : 0;
+
+ val = enetc_port_rd(hw, ENETC4_PSIMAFCAPR);
+ pf->caps.mac_filter_num = val & PSIMAFCAPR_NUM_MAC_AFTE;
}
static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si,
@@ -56,6 +69,200 @@ static void enetc4_pf_get_si_primary_mac(struct enetc_hw *hw, int si,
put_unaligned_le16(lower, addr + 4);
}
+static void enetc4_pf_set_si_mac_promisc(struct enetc_hw *hw, int si,
+ bool uc_promisc, bool mc_promisc)
+{
+ u32 val = enetc_port_rd(hw, ENETC4_PSIPMMR);
+
+ if (uc_promisc)
+ val |= PSIPMMR_SI_MAC_UP(si);
+ else
+ val &= ~PSIPMMR_SI_MAC_UP(si);
+
+ if (mc_promisc)
+ val |= PSIPMMR_SI_MAC_MP(si);
+ else
+ val &= ~PSIPMMR_SI_MAC_MP(si);
+
+ enetc_port_wr(hw, ENETC4_PSIPMMR, val);
+}
+
+static void enetc4_pf_set_si_uc_hash_filter(struct enetc_hw *hw, int si,
+ u64 hash)
+{
+ enetc_port_wr(hw, ENETC4_PSIUMHFR0(si), lower_32_bits(hash));
+ enetc_port_wr(hw, ENETC4_PSIUMHFR1(si), upper_32_bits(hash));
+}
+
+static void enetc4_pf_set_si_mc_hash_filter(struct enetc_hw *hw, int si,
+ u64 hash)
+{
+ enetc_port_wr(hw, ENETC4_PSIMMHFR0(si), lower_32_bits(hash));
+ enetc_port_wr(hw, ENETC4_PSIMMHFR1(si), upper_32_bits(hash));
+}
+
+static void enetc4_pf_set_loopback(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+ u32 val;
+
+ val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val = u32_replace_bits(val, en ? 1 : 0, PM_CMD_CFG_LOOP_EN);
+ /* Default to select MAC level loopback mode if loopback is enabled. */
+ val = u32_replace_bits(val, en ? LPBCK_MODE_MAC_LEVEL : 0,
+ PM_CMD_CFG_LPBK_MODE);
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_pf_clear_maft_entries(struct enetc_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->num_mfe; i++)
+ ntmp_maft_delete_entry(&pf->si->ntmp_user, i);
+
+ pf->num_mfe = 0;
+}
+
+static int enetc4_pf_add_maft_entries(struct enetc_pf *pf,
+ struct enetc_mac_addr *mac,
+ int mac_cnt)
+{
+ struct maft_entry_data maft = {};
+ u16 si_bit = BIT(0);
+ int i, err;
+
+ maft.cfge.si_bitmap = cpu_to_le16(si_bit);
+ for (i = 0; i < mac_cnt; i++) {
+ ether_addr_copy(maft.keye.mac_addr, mac[i].addr);
+ err = ntmp_maft_add_entry(&pf->si->ntmp_user, i, &maft);
+ if (unlikely(err)) {
+ pf->num_mfe = i;
+ goto clear_maft_entries;
+ }
+ }
+
+ pf->num_mfe = mac_cnt;
+
+ return 0;
+
+clear_maft_entries:
+ enetc4_pf_clear_maft_entries(pf);
+
+ return err;
+}
+
+static int enetc4_pf_set_uc_exact_filter(struct enetc_pf *pf)
+{
+ int max_num_mfe = pf->caps.mac_filter_num;
+ struct enetc_mac_filter mac_filter = {};
+ struct net_device *ndev = pf->si->ndev;
+ struct enetc_hw *hw = &pf->si->hw;
+ struct enetc_mac_addr *mac_tbl;
+ struct netdev_hw_addr *ha;
+ int i = 0, err;
+ int mac_cnt;
+
+ netif_addr_lock_bh(ndev);
+
+ mac_cnt = netdev_uc_count(ndev);
+ if (!mac_cnt) {
+ netif_addr_unlock_bh(ndev);
+ /* clear both MAC hash and exact filters */
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+ enetc4_pf_clear_maft_entries(pf);
+
+ return 0;
+ }
+
+ if (mac_cnt > max_num_mfe) {
+ err = -ENOSPC;
+ goto unlock_netif_addr;
+ }
+
+ mac_tbl = kcalloc(mac_cnt, sizeof(*mac_tbl), GFP_ATOMIC);
+ if (!mac_tbl) {
+ err = -ENOMEM;
+ goto unlock_netif_addr;
+ }
+
+ netdev_for_each_uc_addr(ha, ndev) {
+ enetc_add_mac_addr_ht_filter(&mac_filter, ha->addr);
+ ether_addr_copy(mac_tbl[i++].addr, ha->addr);
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Set temporary unicast hash filters in case of Rx loss when
+ * updating MAC address filter table
+ */
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, *mac_filter.mac_hash_table);
+ enetc4_pf_clear_maft_entries(pf);
+
+ if (!enetc4_pf_add_maft_entries(pf, mac_tbl, i))
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+
+ kfree(mac_tbl);
+
+ return 0;
+
+unlock_netif_addr:
+ netif_addr_unlock_bh(ndev);
+
+ return err;
+}
+
+static void enetc4_pf_set_mac_hash_filter(struct enetc_pf *pf, int type)
+{
+ struct net_device *ndev = pf->si->ndev;
+ struct enetc_mac_filter *mac_filter;
+ struct enetc_hw *hw = &pf->si->hw;
+ struct netdev_hw_addr *ha;
+
+ netif_addr_lock_bh(ndev);
+ if (type & ENETC_MAC_FILTER_TYPE_UC) {
+ mac_filter = &pf->mac_filter[UC];
+ enetc_reset_mac_addr_filter(mac_filter);
+ netdev_for_each_uc_addr(ha, ndev)
+ enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
+
+ enetc4_pf_set_si_uc_hash_filter(hw, 0,
+ *mac_filter->mac_hash_table);
+ }
+
+ if (type & ENETC_MAC_FILTER_TYPE_MC) {
+ mac_filter = &pf->mac_filter[MC];
+ enetc_reset_mac_addr_filter(mac_filter);
+ netdev_for_each_mc_addr(ha, ndev)
+ enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
+
+ enetc4_pf_set_si_mc_hash_filter(hw, 0,
+ *mac_filter->mac_hash_table);
+ }
+ netif_addr_unlock_bh(ndev);
+}
+
+static void enetc4_pf_set_mac_filter(struct enetc_pf *pf, int type)
+{
+ /* Currently, the MAC address filter table (MAFT) only has 4 entries,
+ * and multiple multicast addresses for filtering will be configured
+ * in the default network configuration, so MAFT is only suitable for
+ * unicast filtering. If the number of unicast addresses exceeds the
+ * table capacity, the MAC hash filter will be used.
+ */
+ if (type & ENETC_MAC_FILTER_TYPE_UC && enetc4_pf_set_uc_exact_filter(pf)) {
+ /* Fall back to the MAC hash filter */
+ enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_UC);
+ /* Clear the old MAC exact filter */
+ enetc4_pf_clear_maft_entries(pf);
+ }
+
+ if (type & ENETC_MAC_FILTER_TYPE_MC)
+ enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_MC);
+}
+
static const struct enetc_pf_ops enetc4_pf_ops = {
.set_si_primary_mac = enetc4_pf_set_si_primary_mac,
.get_si_primary_mac = enetc4_pf_get_si_primary_mac,
@@ -226,24 +433,6 @@ static void enetc4_set_trx_frame_size(struct enetc_pf *pf)
enetc4_pf_reset_tc_msdu(&si->hw);
}
-static void enetc4_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
-{
- int i;
-
- for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- enetc_port_wr(hw, ENETC4_PRSSKR(i), ((u32 *)bytes)[i]);
-}
-
-static void enetc4_set_default_rss_key(struct enetc_pf *pf)
-{
- u8 hash_key[ENETC_RSSHASH_KEY_SIZE] = {0};
- struct enetc_hw *hw = &pf->si->hw;
-
- /* set up hash key */
- get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
- enetc4_set_rss_key(hw, hash_key);
-}
-
static void enetc4_enable_trx(struct enetc_pf *pf)
{
struct enetc_hw *hw = &pf->si->hw;
@@ -256,10 +445,25 @@ static void enetc4_configure_port(struct enetc_pf *pf)
{
enetc4_configure_port_si(pf);
enetc4_set_trx_frame_size(pf);
- enetc4_set_default_rss_key(pf);
+ enetc_set_default_rss_key(pf);
enetc4_enable_trx(pf);
}
+static int enetc4_init_ntmp_user(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+
+ /* For ENETC 4.1, all table versions are 0 */
+ memset(&user->tbl, 0, sizeof(user->tbl));
+
+ return enetc4_setup_cbdr(si);
+}
+
+static void enetc4_free_ntmp_user(struct enetc_si *si)
+{
+ enetc4_teardown_cbdr(si);
+}
+
static int enetc4_pf_init(struct enetc_pf *pf)
{
struct device *dev = &pf->si->pdev->dev;
@@ -272,17 +476,99 @@ static int enetc4_pf_init(struct enetc_pf *pf)
return err;
}
+ err = enetc4_init_ntmp_user(pf->si);
+ if (err) {
+ dev_err(dev, "Failed to init CBDR\n");
+ return err;
+ }
+
enetc4_configure_port(pf);
return 0;
}
+static void enetc4_pf_free(struct enetc_pf *pf)
+{
+ enetc4_free_ntmp_user(pf->si);
+}
+
+static void enetc4_psi_do_set_rx_mode(struct work_struct *work)
+{
+ struct enetc_si *si = container_of(work, struct enetc_si, rx_mode_task);
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct net_device *ndev = si->ndev;
+ struct enetc_hw *hw = &si->hw;
+ bool uc_promisc = false;
+ bool mc_promisc = false;
+ int type = 0;
+
+ rtnl_lock();
+
+ if (ndev->flags & IFF_PROMISC) {
+ uc_promisc = true;
+ mc_promisc = true;
+ } else if (ndev->flags & IFF_ALLMULTI) {
+ mc_promisc = true;
+ type = ENETC_MAC_FILTER_TYPE_UC;
+ } else {
+ type = ENETC_MAC_FILTER_TYPE_ALL;
+ }
+
+ enetc4_pf_set_si_mac_promisc(hw, 0, uc_promisc, mc_promisc);
+
+ if (uc_promisc) {
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+ enetc4_pf_clear_maft_entries(pf);
+ }
+
+ if (mc_promisc)
+ enetc4_pf_set_si_mc_hash_filter(hw, 0, 0);
+
+ /* Set new MAC filter */
+ enetc4_pf_set_mac_filter(pf, type);
+
+ rtnl_unlock();
+}
+
+static void enetc4_pf_set_rx_mode(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+
+ queue_work(si->workqueue, &si->rx_mode_task);
+}
+
+static int enetc4_pf_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = ndev->features ^ features;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ bool promisc_en = !(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+
+ enetc4_pf_set_si_vlan_promisc(hw, 0, promisc_en);
+ }
+
+ if (changed & NETIF_F_LOOPBACK)
+ enetc4_pf_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
+
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
static const struct net_device_ops enetc4_ndev_ops = {
.ndo_open = enetc_open,
.ndo_stop = enetc_close,
.ndo_start_xmit = enetc_xmit,
.ndo_get_stats = enetc_get_stats,
.ndo_set_mac_address = enetc_pf_set_mac_addr,
+ .ndo_set_rx_mode = enetc4_pf_set_rx_mode,
+ .ndo_set_features = enetc4_pf_set_features,
+ .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid,
};
static struct phylink_pcs *
@@ -617,6 +903,19 @@ static void enetc4_link_deinit(struct enetc_ndev_priv *priv)
enetc_mdiobus_destroy(pf);
}
+static int enetc4_psi_wq_task_init(struct enetc_si *si)
+{
+ char wq_name[24];
+
+ INIT_WORK(&si->rx_mode_task, enetc4_psi_do_set_rx_mode);
+ snprintf(wq_name, sizeof(wq_name), "enetc-%s", pci_name(si->pdev));
+ si->workqueue = create_singlethread_workqueue(wq_name);
+ if (!si->workqueue)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int enetc4_pf_netdev_create(struct enetc_si *si)
{
struct device *dev = &si->pdev->dev;
@@ -657,6 +956,12 @@ static int enetc4_pf_netdev_create(struct enetc_si *si)
if (err)
goto err_link_init;
+ err = enetc4_psi_wq_task_init(si);
+ if (err) {
+ dev_err(dev, "Failed to init workqueue\n");
+ goto err_wq_init;
+ }
+
err = register_netdev(ndev);
if (err) {
dev_err(dev, "Failed to register netdev\n");
@@ -666,6 +971,8 @@ static int enetc4_pf_netdev_create(struct enetc_si *si)
return 0;
err_reg_netdev:
+ destroy_workqueue(si->workqueue);
+err_wq_init:
enetc4_link_deinit(priv);
err_link_init:
enetc_free_msix(priv);
@@ -683,11 +990,18 @@ static void enetc4_pf_netdev_destroy(struct enetc_si *si)
struct net_device *ndev = si->ndev;
unregister_netdev(ndev);
+ cancel_work(&si->rx_mode_task);
+ destroy_workqueue(si->workqueue);
enetc4_link_deinit(priv);
enetc_free_msix(priv);
free_netdev(ndev);
}
+static const struct enetc_si_ops enetc4_psi_ops = {
+ .get_rss_table = enetc4_get_rss_table,
+ .set_rss_table = enetc4_set_rss_table,
+};
+
static int enetc4_pf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -712,6 +1026,7 @@ static int enetc4_pf_probe(struct pci_dev *pdev,
"Couldn't map PF only space\n");
si->revision = enetc_get_ip_revision(&si->hw);
+ si->ops = &enetc4_psi_ops;
err = enetc_get_driver_data(si);
if (err)
return dev_err_probe(dev, err,
@@ -728,14 +1043,28 @@ static int enetc4_pf_probe(struct pci_dev *pdev,
enetc_get_si_caps(si);
- return enetc4_pf_netdev_create(si);
+ err = enetc4_pf_netdev_create(si);
+ if (err)
+ goto err_netdev_create;
+
+ enetc_create_debugfs(si);
+
+ return 0;
+
+err_netdev_create:
+ enetc4_pf_free(pf);
+
+ return err;
}
static void enetc4_pf_remove(struct pci_dev *pdev)
{
struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_pf *pf = enetc_si_priv(si);
+ enetc_remove_debugfs(si);
enetc4_pf_netdev_destroy(si);
+ enetc4_pf_free(pf);
}
static const struct pci_device_id enetc4_pf_id_table[] = {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index 20bfdf7fb4b4..3d5f31879d5c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -60,6 +60,44 @@ void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
}
EXPORT_SYMBOL_GPL(enetc_teardown_cbdr);
+int enetc4_setup_cbdr(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+ struct device *dev = &si->pdev->dev;
+ struct enetc_hw *hw = &si->hw;
+ struct netc_cbdr_regs regs;
+
+ user->cbdr_num = 1;
+ user->dev = dev;
+ user->ring = devm_kcalloc(dev, user->cbdr_num,
+ sizeof(struct netc_cbdr), GFP_KERNEL);
+ if (!user->ring)
+ return -ENOMEM;
+
+ /* set CBDR cache attributes */
+ enetc_wr(hw, ENETC_SICAR2,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+
+ regs.pir = hw->reg + ENETC_SICBDRPIR;
+ regs.cir = hw->reg + ENETC_SICBDRCIR;
+ regs.mr = hw->reg + ENETC_SICBDRMR;
+ regs.bar0 = hw->reg + ENETC_SICBDRBAR0;
+ regs.bar1 = hw->reg + ENETC_SICBDRBAR1;
+ regs.lenr = hw->reg + ENETC_SICBDRLENR;
+
+ return ntmp_init_cbdr(user->ring, dev, &regs);
+}
+EXPORT_SYMBOL_GPL(enetc4_setup_cbdr);
+
+void enetc4_teardown_cbdr(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+
+ ntmp_free_cbdr(user->ring);
+ user->dev = NULL;
+}
+EXPORT_SYMBOL_GPL(enetc4_teardown_cbdr);
+
static void enetc_clean_cbdr(struct enetc_cbdr *ring)
{
struct enetc_cbd *dest_cbd;
@@ -256,3 +294,15 @@ int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
return enetc_cmd_rss_table(si, (u32 *)table, count, false);
}
EXPORT_SYMBOL_GPL(enetc_set_rss_table);
+
+int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count)
+{
+ return ntmp_rsst_query_entry(&si->ntmp_user, table, count);
+}
+EXPORT_SYMBOL_GPL(enetc4_get_rss_table);
+
+int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count)
+{
+ return ntmp_rsst_update_entry(&si->ntmp_user, table, count);
+}
+EXPORT_SYMBOL_GPL(enetc4_set_rss_table);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index ece3ae28ba82..d38cd36be4a6 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -141,7 +141,7 @@ static const struct {
static const struct {
int reg;
- char name[ETH_GSTRING_LEN];
+ char name[ETH_GSTRING_LEN] __nonstring;
} enetc_port_counters[] = {
{ ENETC_PM_REOCT(0), "MAC rx ethernet octets" },
{ ENETC_PM_RALN(0), "MAC rx alignment errors" },
@@ -264,7 +264,7 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
break;
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
- ethtool_puts(&data, enetc_port_counters[i].name);
+ ethtool_cpy(&data, enetc_port_counters[i].name);
break;
}
@@ -625,6 +625,29 @@ static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
return 0;
}
+/* i.MX95 ENETC does not support RFS table, but we can use ingress port
+ * filter table to implement Wake-on-LAN filter or drop the matched flow,
+ * so the implementation will be different from enetc_get_rxnfc() and
+ * enetc_set_rxnfc(). Therefore, add enetc4_get_rxnfc() for ENETC v4 PF.
+ */
+static int enetc4_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
+ u32 *rule_locs)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = priv->num_rx_rings;
+ break;
+ case ETHTOOL_GRXFH:
+ return enetc_get_rsshash(rxnfc);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -677,36 +700,53 @@ static u32 enetc_get_rxfh_indir_size(struct net_device *ndev)
return priv->si->num_rss;
}
+static int enetc_get_rss_key_base(struct enetc_si *si)
+{
+ if (is_enetc_rev1(si))
+ return ENETC_PRSSK(0);
+
+ return ENETC4_PRSSKR(0);
+}
+
+static void enetc_get_rss_key(struct enetc_si *si, const u8 *key)
+{
+ int base = enetc_get_rss_key_base(si);
+ struct enetc_hw *hw = &si->hw;
+ int i;
+
+ for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
+ ((u32 *)key)[i] = enetc_port_rd(hw, base + i * 4);
+}
+
static int enetc_get_rxfh(struct net_device *ndev,
struct ethtool_rxfh_param *rxfh)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
- int err = 0, i;
+ struct enetc_si *si = priv->si;
+ int err = 0;
/* return hash function */
rxfh->hfunc = ETH_RSS_HASH_TOP;
/* return hash key */
- if (rxfh->key && hw->port)
- for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- ((u32 *)rxfh->key)[i] = enetc_port_rd(hw,
- ENETC_PRSSK(i));
+ if (rxfh->key && enetc_si_is_pf(si))
+ enetc_get_rss_key(si, rxfh->key);
/* return RSS table */
if (rxfh->indir)
- err = enetc_get_rss_table(priv->si, rxfh->indir,
- priv->si->num_rss);
+ err = si->ops->get_rss_table(si, rxfh->indir, si->num_rss);
return err;
}
-void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
+void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes)
{
+ int base = enetc_get_rss_key_base(si);
+ struct enetc_hw *hw = &si->hw;
int i;
for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]);
+ enetc_port_wr(hw, base + i * 4, ((u32 *)bytes)[i]);
}
EXPORT_SYMBOL_GPL(enetc_set_rss_key);
@@ -715,17 +755,16 @@ static int enetc_set_rxfh(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
int err = 0;
/* set hash key, if PF */
- if (rxfh->key && hw->port)
- enetc_set_rss_key(hw, rxfh->key);
+ if (rxfh->key && enetc_si_is_pf(si))
+ enetc_set_rss_key(si, rxfh->key);
/* set RSS table */
if (rxfh->indir)
- err = enetc_set_rss_table(priv->si, rxfh->indir,
- priv->si->num_rss);
+ err = si->ops->set_rss_table(si, rxfh->indir, si->num_rss);
return err;
}
@@ -1240,6 +1279,11 @@ const struct ethtool_ops enetc4_pf_ethtool_ops = {
.set_wol = enetc_set_wol,
.get_pauseparam = enetc_get_pauseparam,
.set_pauseparam = enetc_set_pauseparam,
+ .get_rxnfc = enetc4_get_rxnfc,
+ .get_rxfh_key_size = enetc_get_rxfh_key_size,
+ .get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+ .get_rxfh = enetc_get_rxfh,
+ .set_rxfh = enetc_set_rxfh,
};
void enetc_set_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 203862ec1114..f63a29e2e031 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -72,30 +72,6 @@ static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos)
enetc_port_wr(hw, ENETC_PSIVLANR(si), val);
}
-static int enetc_mac_addr_hash_idx(const u8 *addr)
-{
- u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
- u64 mask = 0;
- int res = 0;
- int i;
-
- for (i = 0; i < 8; i++)
- mask |= BIT_ULL(i * 6);
-
- for (i = 0; i < 6; i++)
- res |= (hweight64(fold & (mask << i)) & 0x1) << i;
-
- return res;
-}
-
-static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
-{
- filter->mac_addr_cnt = 0;
-
- bitmap_zero(filter->mac_hash_table,
- ENETC_MADDR_HASH_TBL_SZ);
-}
-
static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
const unsigned char *addr)
{
@@ -104,16 +80,6 @@ static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
filter->mac_addr_cnt++;
}
-static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
- const unsigned char *addr)
-{
- int idx = enetc_mac_addr_hash_idx(addr);
-
- /* add hash table entry */
- __set_bit(idx, filter->mac_hash_table);
- filter->mac_addr_cnt++;
-}
-
static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type)
{
bool err = si->errata & ENETC_ERR_UCMCSWP;
@@ -250,67 +216,6 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev)
enetc_port_wr(hw, ENETC_PSIPMR, psipmr);
}
-static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx,
- unsigned long hash)
-{
- enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), lower_32_bits(hash));
- enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), upper_32_bits(hash));
-}
-
-static int enetc_vid_hash_idx(unsigned int vid)
-{
- int res = 0;
- int i;
-
- for (i = 0; i < 6; i++)
- res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
-
- return res;
-}
-
-static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash)
-{
- int i;
-
- if (rehash) {
- bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
-
- for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
- int hidx = enetc_vid_hash_idx(i);
-
- __set_bit(hidx, pf->vlan_ht_filter);
- }
- }
-
- enetc_set_vlan_ht_filter(&pf->si->hw, 0, *pf->vlan_ht_filter);
-}
-
-static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_pf *pf = enetc_si_priv(priv->si);
- int idx;
-
- __set_bit(vid, pf->active_vlans);
-
- idx = enetc_vid_hash_idx(vid);
- if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
- enetc_sync_vlan_ht_filter(pf, false);
-
- return 0;
-}
-
-static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_pf *pf = enetc_si_priv(priv->si);
-
- __clear_bit(vid, pf->active_vlans);
- enetc_sync_vlan_ht_filter(pf, true);
-
- return 0;
-}
-
static void enetc_set_loopback(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -549,7 +454,6 @@ static void enetc_mac_enable(struct enetc_si *si, bool en)
static void enetc_configure_port(struct enetc_pf *pf)
{
- u8 hash_key[ENETC_RSSHASH_KEY_SIZE];
struct enetc_hw *hw = &pf->si->hw;
enetc_configure_port_mac(pf->si);
@@ -557,8 +461,7 @@ static void enetc_configure_port(struct enetc_pf *pf)
enetc_port_si_configure(pf->si);
/* set up hash key */
- get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
- enetc_set_rss_key(hw, hash_key);
+ enetc_set_default_rss_key(pf);
/* split up RFS entries */
enetc_port_assign_rfs_entries(pf->si);
@@ -728,6 +631,8 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_setup_tc = enetc_pf_setup_tc,
.ndo_bpf = enetc_setup_bpf,
.ndo_xdp_xmit = enetc_xdp_xmit,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
};
static struct phylink_pcs *
@@ -939,6 +844,11 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
return enetc_ierb_register_pf(ierb_pdev, pdev);
}
+static const struct enetc_si_ops enetc_psi_ops = {
+ .get_rss_table = enetc_get_rss_table,
+ .set_rss_table = enetc_set_rss_table,
+};
+
static struct enetc_si *enetc_psi_create(struct pci_dev *pdev)
{
struct enetc_si *si;
@@ -958,6 +868,7 @@ static struct enetc_si *enetc_psi_create(struct pci_dev *pdev)
}
si->revision = enetc_get_ip_revision(&si->hw);
+ si->ops = &enetc_psi_ops;
err = enetc_get_driver_data(si);
if (err) {
dev_err(&pdev->dev, "Could not get PF driver data\n");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
index a26a12863855..ae407e9e9ee7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -5,19 +5,8 @@
#include <linux/phylink.h>
#define ENETC_PF_NUM_RINGS 8
-
-enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
#define ENETC_MAX_NUM_MAC_FLT ((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE)
-#define ENETC_MADDR_HASH_TBL_SZ 64
-struct enetc_mac_filter {
- union {
- char mac_addr[ETH_ALEN];
- DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
- };
- int mac_addr_cnt;
-};
-
#define ENETC_VLAN_HT_SIZE 64
enum enetc_vf_flags {
@@ -34,6 +23,7 @@ struct enetc_port_caps {
int num_msix;
int num_rx_bdr;
int num_tx_bdr;
+ int mac_filter_num;
};
struct enetc_pf;
@@ -71,6 +61,8 @@ struct enetc_pf {
struct enetc_port_caps caps;
const struct enetc_pf_ops *ops;
+
+ int num_mfe; /* number of mac address filter table entries */
};
#define phylink_to_enetc_pf(config) \
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
index 3fd9b0727875..edf14a95cab7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
@@ -128,14 +128,14 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
if (si->hw_features & ENETC_SI_F_LSO)
priv->active_offloads |= ENETC_F_LSO;
- /* TODO: currently, i.MX95 ENETC driver does not support advanced features */
- if (!is_enetc_rev1(si)) {
- ndev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK);
- goto end;
+ if (si->num_rss) {
+ ndev->hw_features |= NETIF_F_RXHASH;
+ ndev->features |= NETIF_F_RXHASH;
}
- if (si->num_rss)
- ndev->hw_features |= NETIF_F_RXHASH;
+ /* TODO: currently, i.MX95 ENETC driver does not support advanced features */
+ if (!is_enetc_rev1(si))
+ goto end;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
@@ -341,5 +341,86 @@ void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
}
EXPORT_SYMBOL_GPL(enetc_phylink_destroy);
+void enetc_set_default_rss_key(struct enetc_pf *pf)
+{
+ u8 hash_key[ENETC_RSSHASH_KEY_SIZE] = {0};
+
+ /* set up hash key */
+ get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
+ enetc_set_rss_key(pf->si, hash_key);
+}
+EXPORT_SYMBOL_GPL(enetc_set_default_rss_key);
+
+static int enetc_vid_hash_idx(unsigned int vid)
+{
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
+
+ return res;
+}
+
+static void enetc_refresh_vlan_ht_filter(struct enetc_pf *pf)
+{
+ int i;
+
+ bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
+ for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
+ int hidx = enetc_vid_hash_idx(i);
+
+ __set_bit(hidx, pf->vlan_ht_filter);
+ }
+}
+
+static void enetc_set_si_vlan_ht_filter(struct enetc_si *si,
+ int si_id, u64 hash)
+{
+ struct enetc_hw *hw = &si->hw;
+ int high_reg_off, low_reg_off;
+
+ if (is_enetc_rev1(si)) {
+ low_reg_off = ENETC_PSIVHFR0(si_id);
+ high_reg_off = ENETC_PSIVHFR1(si_id);
+ } else {
+ low_reg_off = ENETC4_PSIVHFR0(si_id);
+ high_reg_off = ENETC4_PSIVHFR1(si_id);
+ }
+
+ enetc_port_wr(hw, low_reg_off, lower_32_bits(hash));
+ enetc_port_wr(hw, high_reg_off, upper_32_bits(hash));
+}
+
+int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ int idx;
+
+ __set_bit(vid, pf->active_vlans);
+
+ idx = enetc_vid_hash_idx(vid);
+ if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
+ enetc_set_si_vlan_ht_filter(pf->si, 0, *pf->vlan_ht_filter);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_vlan_rx_add_vid);
+
+int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ if (__test_and_clear_bit(vid, pf->active_vlans)) {
+ enetc_refresh_vlan_ht_filter(pf);
+ enetc_set_si_vlan_ht_filter(pf->si, 0, *pf->vlan_ht_filter);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_vlan_rx_del_vid);
+
MODULE_DESCRIPTION("NXP ENETC PF common functionality driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
index 48f55ee743ad..96d4840a3107 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
@@ -12,6 +12,9 @@ void enetc_mdiobus_destroy(struct enetc_pf *pf);
int enetc_phylink_create(struct enetc_ndev_priv *priv, struct device_node *node,
const struct phylink_mac_ops *ops);
void enetc_phylink_destroy(struct enetc_ndev_priv *priv);
+void enetc_set_default_rss_key(struct enetc_pf *pf);
+int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid);
+int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid);
static inline u16 enetc_get_ip_revision(struct enetc_hw *hw)
{
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 3768752b6008..6c4b374bcb0e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -121,6 +121,8 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_features = enetc_vf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
.ndo_setup_tc = enetc_vf_setup_tc,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
};
static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
@@ -155,13 +157,20 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- if (si->num_rss)
+ if (si->num_rss) {
ndev->hw_features |= NETIF_F_RXHASH;
+ ndev->features |= NETIF_F_RXHASH;
+ }
/* pick up primary MAC address from SI */
enetc_load_primary_mac_addr(&si->hw, ndev);
}
+static const struct enetc_si_ops enetc_vsi_ops = {
+ .get_rss_table = enetc_get_rss_table,
+ .set_rss_table = enetc_set_rss_table,
+};
+
static int enetc_vf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -176,6 +185,7 @@ static int enetc_vf_probe(struct pci_dev *pdev,
si = pci_get_drvdata(pdev);
si->revision = ENETC_REV_1_0;
+ si->ops = &enetc_vsi_ops;
err = enetc_get_driver_data(si);
if (err) {
dev_err_probe(&pdev->dev, err,
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp.c b/drivers/net/ethernet/freescale/enetc/ntmp.c
new file mode 100644
index 000000000000..ba32c1bbd9e1
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/ntmp.c
@@ -0,0 +1,462 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * NETC NTMP (NETC Table Management Protocol) 2.0 Library
+ * Copyright 2025 NXP
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fsl/netc_global.h>
+#include <linux/iopoll.h>
+
+#include "ntmp_private.h"
+
+#define NETC_CBDR_TIMEOUT 1000 /* us */
+#define NETC_CBDR_DELAY_US 10
+#define NETC_CBDR_MR_EN BIT(31)
+
+#define NTMP_BASE_ADDR_ALIGN 128
+#define NTMP_DATA_ADDR_ALIGN 32
+
+/* Define NTMP Table ID */
+#define NTMP_MAFT_ID 1
+#define NTMP_RSST_ID 3
+
+/* Generic Update Actions for most tables */
+#define NTMP_GEN_UA_CFGEU BIT(0)
+#define NTMP_GEN_UA_STSEU BIT(1)
+
+#define NTMP_ENTRY_ID_SIZE 4
+#define RSST_ENTRY_NUM 64
+#define RSST_STSE_DATA_SIZE(n) ((n) * 8)
+#define RSST_CFGE_DATA_SIZE(n) (n)
+
+int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
+ const struct netc_cbdr_regs *regs)
+{
+ int cbd_num = NETC_CBDR_BD_NUM;
+ size_t size;
+
+ size = cbd_num * sizeof(union netc_cbd) + NTMP_BASE_ADDR_ALIGN;
+ cbdr->addr_base = dma_alloc_coherent(dev, size, &cbdr->dma_base,
+ GFP_KERNEL);
+ if (!cbdr->addr_base)
+ return -ENOMEM;
+
+ cbdr->dma_size = size;
+ cbdr->bd_num = cbd_num;
+ cbdr->regs = *regs;
+ cbdr->dev = dev;
+
+ /* The base address of the Control BD Ring must be 128 bytes aligned */
+ cbdr->dma_base_align = ALIGN(cbdr->dma_base, NTMP_BASE_ADDR_ALIGN);
+ cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base,
+ NTMP_BASE_ADDR_ALIGN);
+
+ cbdr->next_to_clean = 0;
+ cbdr->next_to_use = 0;
+ spin_lock_init(&cbdr->ring_lock);
+
+ /* Step 1: Configure the base address of the Control BD Ring */
+ netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align));
+ netc_write(cbdr->regs.bar1, upper_32_bits(cbdr->dma_base_align));
+
+ /* Step 2: Configure the producer index register */
+ netc_write(cbdr->regs.pir, cbdr->next_to_clean);
+
+ /* Step 3: Configure the consumer index register */
+ netc_write(cbdr->regs.cir, cbdr->next_to_use);
+
+ /* Step4: Configure the number of BDs of the Control BD Ring */
+ netc_write(cbdr->regs.lenr, cbdr->bd_num);
+
+ /* Step 5: Enable the Control BD Ring */
+ netc_write(cbdr->regs.mr, NETC_CBDR_MR_EN);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ntmp_init_cbdr);
+
+void ntmp_free_cbdr(struct netc_cbdr *cbdr)
+{
+ /* Disable the Control BD Ring */
+ netc_write(cbdr->regs.mr, 0);
+ dma_free_coherent(cbdr->dev, cbdr->dma_size, cbdr->addr_base,
+ cbdr->dma_base);
+ memset(cbdr, 0, sizeof(*cbdr));
+}
+EXPORT_SYMBOL_GPL(ntmp_free_cbdr);
+
+static int ntmp_get_free_cbd_num(struct netc_cbdr *cbdr)
+{
+ return (cbdr->next_to_clean - cbdr->next_to_use - 1 +
+ cbdr->bd_num) % cbdr->bd_num;
+}
+
+static union netc_cbd *ntmp_get_cbd(struct netc_cbdr *cbdr, int index)
+{
+ return &((union netc_cbd *)(cbdr->addr_base_align))[index];
+}
+
+static void ntmp_clean_cbdr(struct netc_cbdr *cbdr)
+{
+ union netc_cbd *cbd;
+ int i;
+
+ i = cbdr->next_to_clean;
+ while (netc_read(cbdr->regs.cir) != i) {
+ cbd = ntmp_get_cbd(cbdr, i);
+ memset(cbd, 0, sizeof(*cbd));
+ i = (i + 1) % cbdr->bd_num;
+ }
+
+ cbdr->next_to_clean = i;
+}
+
+static int netc_xmit_ntmp_cmd(struct ntmp_user *user, union netc_cbd *cbd)
+{
+ union netc_cbd *cur_cbd;
+ struct netc_cbdr *cbdr;
+ int i, err;
+ u16 status;
+ u32 val;
+
+ /* Currently only i.MX95 ENETC is supported, and it only has one
+ * command BD ring
+ */
+ cbdr = &user->ring[0];
+
+ spin_lock_bh(&cbdr->ring_lock);
+
+ if (unlikely(!ntmp_get_free_cbd_num(cbdr)))
+ ntmp_clean_cbdr(cbdr);
+
+ i = cbdr->next_to_use;
+ cur_cbd = ntmp_get_cbd(cbdr, i);
+ *cur_cbd = *cbd;
+ dma_wmb();
+
+ /* Update producer index of both software and hardware */
+ i = (i + 1) % cbdr->bd_num;
+ cbdr->next_to_use = i;
+ netc_write(cbdr->regs.pir, i);
+
+ err = read_poll_timeout_atomic(netc_read, val, val == i,
+ NETC_CBDR_DELAY_US, NETC_CBDR_TIMEOUT,
+ true, cbdr->regs.cir);
+ if (unlikely(err))
+ goto cbdr_unlock;
+
+ dma_rmb();
+ /* Get the writeback command BD, because the caller may need
+ * to check some other fields of the response header.
+ */
+ *cbd = *cur_cbd;
+
+ /* Check the writeback error status */
+ status = le16_to_cpu(cbd->resp_hdr.error_rr) & NTMP_RESP_ERROR;
+ if (unlikely(status)) {
+ err = -EIO;
+ dev_err(user->dev, "Command BD error: 0x%04x\n", status);
+ }
+
+ ntmp_clean_cbdr(cbdr);
+ dma_wmb();
+
+cbdr_unlock:
+ spin_unlock_bh(&cbdr->ring_lock);
+
+ return err;
+}
+
+static int ntmp_alloc_data_mem(struct ntmp_dma_buf *data, void **buf_align)
+{
+ void *buf;
+
+ buf = dma_alloc_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
+ &data->dma, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ data->buf = buf;
+ *buf_align = PTR_ALIGN(buf, NTMP_DATA_ADDR_ALIGN);
+
+ return 0;
+}
+
+static void ntmp_free_data_mem(struct ntmp_dma_buf *data)
+{
+ dma_free_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
+ data->buf, data->dma);
+}
+
+static void ntmp_fill_request_hdr(union netc_cbd *cbd, dma_addr_t dma,
+ int len, int table_id, int cmd,
+ int access_method)
+{
+ dma_addr_t dma_align;
+
+ memset(cbd, 0, sizeof(*cbd));
+ dma_align = ALIGN(dma, NTMP_DATA_ADDR_ALIGN);
+ cbd->req_hdr.addr = cpu_to_le64(dma_align);
+ cbd->req_hdr.len = cpu_to_le32(len);
+ cbd->req_hdr.cmd = cmd;
+ cbd->req_hdr.access_method = FIELD_PREP(NTMP_ACCESS_METHOD,
+ access_method);
+ cbd->req_hdr.table_id = table_id;
+ cbd->req_hdr.ver_cci_rr = FIELD_PREP(NTMP_HDR_VERSION,
+ NTMP_HDR_VER2);
+ /* For NTMP version 2.0 or later version */
+ cbd->req_hdr.npf = cpu_to_le32(NTMP_NPF);
+}
+
+static void ntmp_fill_crd(struct ntmp_cmn_req_data *crd, u8 tblv,
+ u8 qa, u16 ua)
+{
+ crd->update_act = cpu_to_le16(ua);
+ crd->tblv_qact = NTMP_TBLV_QACT(tblv, qa);
+}
+
+static void ntmp_fill_crd_eid(struct ntmp_req_by_eid *rbe, u8 tblv,
+ u8 qa, u16 ua, u32 entry_id)
+{
+ ntmp_fill_crd(&rbe->crd, tblv, qa, ua);
+ rbe->entry_id = cpu_to_le32(entry_id);
+}
+
+static const char *ntmp_table_name(int tbl_id)
+{
+ switch (tbl_id) {
+ case NTMP_MAFT_ID:
+ return "MAC Address Filter Table";
+ case NTMP_RSST_ID:
+ return "RSS Table";
+ default:
+ return "Unknown Table";
+ };
+}
+
+static int ntmp_delete_entry_by_id(struct ntmp_user *user, int tbl_id,
+ u8 tbl_ver, u32 entry_id, u32 req_len,
+ u32 resp_len)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = max(req_len, resp_len),
+ };
+ struct ntmp_req_by_eid *req;
+ union netc_cbd cbd;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ ntmp_fill_crd_eid(req, tbl_ver, 0, 0, entry_id);
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(req_len, resp_len),
+ tbl_id, NTMP_CMD_DELETE, NTMP_AM_ENTRY_ID);
+
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev,
+ "Failed to delete entry 0x%x of %s, err: %pe",
+ entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+
+static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id,
+ u32 len, struct ntmp_req_by_eid *req,
+ dma_addr_t dma, bool compare_eid)
+{
+ struct ntmp_cmn_resp_query *resp;
+ int cmd = NTMP_CMD_QUERY;
+ union netc_cbd cbd;
+ u32 entry_id;
+ int err;
+
+ entry_id = le32_to_cpu(req->entry_id);
+ if (le16_to_cpu(req->crd.update_act))
+ cmd = NTMP_CMD_QU;
+
+ /* Request header */
+ ntmp_fill_request_hdr(&cbd, dma, len, tbl_id, cmd, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err) {
+ dev_err(user->dev,
+ "Failed to query entry 0x%x of %s, err: %pe\n",
+ entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
+ return err;
+ }
+
+ /* For a few tables, the first field of their response data is not
+ * entry_id, so directly return success.
+ */
+ if (!compare_eid)
+ return 0;
+
+ resp = (struct ntmp_cmn_resp_query *)req;
+ if (unlikely(le32_to_cpu(resp->entry_id) != entry_id)) {
+ dev_err(user->dev,
+ "%s: query EID 0x%x doesn't match response EID 0x%x\n",
+ ntmp_table_name(tbl_id), entry_id, le32_to_cpu(resp->entry_id));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = sizeof(struct maft_req_add),
+ };
+ struct maft_req_add *req;
+ union netc_cbd cbd;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set mac address filter table request data buffer */
+ ntmp_fill_crd_eid(&req->rbe, user->tbl.maft_ver, 0, 0, entry_id);
+ req->keye = maft->keye;
+ req->cfge = maft->cfge;
+
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
+ NTMP_MAFT_ID, NTMP_CMD_ADD, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev, "Failed to add MAFT entry 0x%x, err: %pe\n",
+ entry_id, ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_add_entry);
+
+int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = sizeof(struct maft_resp_query),
+ };
+ struct maft_resp_query *resp;
+ struct ntmp_req_by_eid *req;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ ntmp_fill_crd_eid(req, user->tbl.maft_ver, 0, 0, entry_id);
+ err = ntmp_query_entry_by_id(user, NTMP_MAFT_ID,
+ NTMP_LEN(sizeof(*req), data.size),
+ req, data.dma, true);
+ if (err)
+ goto end;
+
+ resp = (struct maft_resp_query *)req;
+ maft->keye = resp->keye;
+ maft->cfge = resp->cfge;
+
+end:
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_query_entry);
+
+int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id)
+{
+ return ntmp_delete_entry_by_id(user, NTMP_MAFT_ID, user->tbl.maft_ver,
+ entry_id, NTMP_EID_REQ_LEN, 0);
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_delete_entry);
+
+int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
+ int count)
+{
+ struct ntmp_dma_buf data = {.dev = user->dev};
+ struct rsst_req_update *req;
+ union netc_cbd cbd;
+ int err, i;
+
+ if (count != RSST_ENTRY_NUM)
+ /* HW only takes in a full 64 entry table */
+ return -EINVAL;
+
+ data.size = struct_size(req, groups, count);
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set the request data buffer */
+ ntmp_fill_crd_eid(&req->rbe, user->tbl.rsst_ver, 0,
+ NTMP_GEN_UA_CFGEU | NTMP_GEN_UA_STSEU, 0);
+ for (i = 0; i < count; i++)
+ req->groups[i] = (u8)(table[i]);
+
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
+ NTMP_RSST_ID, NTMP_CMD_UPDATE, NTMP_AM_ENTRY_ID);
+
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev, "Failed to update RSST entry, err: %pe\n",
+ ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_rsst_update_entry);
+
+int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count)
+{
+ struct ntmp_dma_buf data = {.dev = user->dev};
+ struct ntmp_req_by_eid *req;
+ union netc_cbd cbd;
+ int err, i;
+ u8 *group;
+
+ if (count != RSST_ENTRY_NUM)
+ /* HW only takes in a full 64 entry table */
+ return -EINVAL;
+
+ data.size = NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count) +
+ RSST_CFGE_DATA_SIZE(count);
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set the request data buffer */
+ ntmp_fill_crd_eid(req, user->tbl.rsst_ver, 0, 0, 0);
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(sizeof(*req), data.size),
+ NTMP_RSST_ID, NTMP_CMD_QUERY, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err) {
+ dev_err(user->dev, "Failed to query RSST entry, err: %pe\n",
+ ERR_PTR(err));
+ goto end;
+ }
+
+ group = (u8 *)req;
+ group += NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count);
+ for (i = 0; i < count; i++)
+ table[i] = group[i];
+
+end:
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_rsst_query_entry);
+
+MODULE_DESCRIPTION("NXP NETC Library");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp_private.h b/drivers/net/ethernet/freescale/enetc/ntmp_private.h
new file mode 100644
index 000000000000..34394e40fddd
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/ntmp_private.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * NTMP table request and response data buffer formats
+ * Copyright 2025 NXP
+ */
+
+#ifndef __NTMP_PRIVATE_H
+#define __NTMP_PRIVATE_H
+
+#include <linux/bitfield.h>
+#include <linux/fsl/ntmp.h>
+
+#define NTMP_EID_REQ_LEN 8
+#define NETC_CBDR_BD_NUM 256
+
+union netc_cbd {
+ struct {
+ __le64 addr;
+ __le32 len;
+#define NTMP_RESP_LEN GENMASK(19, 0)
+#define NTMP_REQ_LEN GENMASK(31, 20)
+#define NTMP_LEN(req, resp) (FIELD_PREP(NTMP_REQ_LEN, (req)) | \
+ ((resp) & NTMP_RESP_LEN))
+ u8 cmd;
+#define NTMP_CMD_DELETE BIT(0)
+#define NTMP_CMD_UPDATE BIT(1)
+#define NTMP_CMD_QUERY BIT(2)
+#define NTMP_CMD_ADD BIT(3)
+#define NTMP_CMD_QU (NTMP_CMD_QUERY | NTMP_CMD_UPDATE)
+ u8 access_method;
+#define NTMP_ACCESS_METHOD GENMASK(7, 4)
+#define NTMP_AM_ENTRY_ID 0
+#define NTMP_AM_EXACT_KEY 1
+#define NTMP_AM_SEARCH 2
+#define NTMP_AM_TERNARY_KEY 3
+ u8 table_id;
+ u8 ver_cci_rr;
+#define NTMP_HDR_VERSION GENMASK(5, 0)
+#define NTMP_HDR_VER2 2
+#define NTMP_CCI BIT(6)
+#define NTMP_RR BIT(7)
+ __le32 resv[3];
+ __le32 npf;
+#define NTMP_NPF BIT(15)
+ } req_hdr; /* NTMP Request Message Header Format */
+
+ struct {
+ __le32 resv0[3];
+ __le16 num_matched;
+ __le16 error_rr;
+#define NTMP_RESP_ERROR GENMASK(11, 0)
+#define NTMP_RESP_RR BIT(15)
+ __le32 resv1[4];
+ } resp_hdr; /* NTMP Response Message Header Format */
+};
+
+struct ntmp_dma_buf {
+ struct device *dev;
+ size_t size;
+ void *buf;
+ dma_addr_t dma;
+};
+
+struct ntmp_cmn_req_data {
+ __le16 update_act;
+ u8 dbg_opt;
+ u8 tblv_qact;
+#define NTMP_QUERY_ACT GENMASK(3, 0)
+#define NTMP_TBL_VER GENMASK(7, 4)
+#define NTMP_TBLV_QACT(v, a) (FIELD_PREP(NTMP_TBL_VER, (v)) | \
+ ((a) & NTMP_QUERY_ACT))
+};
+
+struct ntmp_cmn_resp_query {
+ __le32 entry_id;
+};
+
+/* Generic structure for request data by entry ID */
+struct ntmp_req_by_eid {
+ struct ntmp_cmn_req_data crd;
+ __le32 entry_id;
+};
+
+/* MAC Address Filter Table Request Data Buffer Format of Add action */
+struct maft_req_add {
+ struct ntmp_req_by_eid rbe;
+ struct maft_keye_data keye;
+ struct maft_cfge_data cfge;
+};
+
+/* MAC Address Filter Table Response Data Buffer Format of Query action */
+struct maft_resp_query {
+ __le32 entry_id;
+ struct maft_keye_data keye;
+ struct maft_cfge_data cfge;
+};
+
+/* RSS Table Request Data Buffer Format of Update action */
+struct rsst_req_update {
+ struct ntmp_req_by_eid rbe;
+ u8 groups[];
+};
+
+#endif
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index deb35b38c976..bcbcad613512 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2061,15 +2061,13 @@ static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
schedule_work(&priv->reset_task);
}
-static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+static int gfar_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
priv->hwts_tx_en = 0;
break;
@@ -2082,7 +2080,7 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
if (priv->hwts_rx_en) {
priv->hwts_rx_en = 0;
@@ -2096,44 +2094,23 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
priv->hwts_rx_en = 1;
reset_gfar(netdev);
}
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
-static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+static int gfar_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
- config.flags = 0;
- config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- config.rx_filter = (priv->hwts_rx_en ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = dev->phydev;
+ config->tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config->rx_filter = priv->hwts_rx_en ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
- if (!netif_running(dev))
- return -EINVAL;
-
- if (cmd == SIOCSHWTSTAMP)
- return gfar_hwtstamp_set(dev, rq);
- if (cmd == SIOCGHWTSTAMP)
- return gfar_hwtstamp_get(dev, rq);
-
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, rq, cmd);
+ return 0;
}
/* Interrupt Handler for Transmit complete */
@@ -3174,7 +3151,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_set_features = gfar_set_features,
.ndo_set_rx_mode = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
- .ndo_eth_ioctl = gfar_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_get_stats64 = gfar_get_stats64,
.ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = gfar_set_mac_addr,
@@ -3182,6 +3159,8 @@ static const struct net_device_ops gfar_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = gfar_netpoll,
#endif
+ .ndo_hwtstamp_get = gfar_hwtstamp_get,
+ .ndo_hwtstamp_set = gfar_hwtstamp_set,
};
/* Set up the ethernet device structure, private data,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index eae1a7595a69..3c1da0cf3f61 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -67,7 +67,7 @@ static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
"tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
};
-static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
+static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] __nonstring_array = {
"adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
"adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
"adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
@@ -113,7 +113,7 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
i);
for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++)
- ethtool_puts(&s, gve_gstrings_adminq_stats[i]);
+ ethtool_cpy(&s, gve_gstrings_adminq_stats[i]);
break;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index c3791cf23c87..dc35a23ec47f 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -268,7 +268,8 @@ static void gve_stats_report_schedule(struct gve_priv *priv)
static void gve_stats_report_timer(struct timer_list *t)
{
- struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
+ struct gve_priv *priv = timer_container_of(priv, t,
+ stats_report_timer);
mod_timer(&priv->stats_report_timer,
round_jiffies(jiffies +
@@ -1830,7 +1831,7 @@ static void gve_turndown(struct gve_priv *priv)
/* Stop tx queues */
netif_tx_disable(priv->dev);
- xdp_features_clear_redirect_target(priv->dev);
+ xdp_features_clear_redirect_target_locked(priv->dev);
gve_clear_napi_enabled(priv);
gve_clear_report_stats(priv);
@@ -1902,7 +1903,7 @@ static void gve_turnup(struct gve_priv *priv)
}
if (priv->tx_cfg.num_xdp_queues && gve_supports_xdp_xmit(priv))
- xdp_features_set_redirect_target(priv->dev, false);
+ xdp_features_set_redirect_target_locked(priv->dev, false);
gve_set_napi_enabled(priv);
}
@@ -2153,7 +2154,7 @@ void gve_handle_report_stats(struct gve_priv *priv)
};
stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
- .value = cpu_to_be64(priv->rx[0].fill_cnt),
+ .value = cpu_to_be64(priv->rx[idx].fill_cnt),
.queue_id = cpu_to_be32(idx),
};
}
@@ -2185,7 +2186,7 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
xdp_features = 0;
}
- xdp_set_features_flag(priv->dev, xdp_features);
+ xdp_set_features_flag_locked(priv->dev, xdp_features);
}
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
@@ -2659,6 +2660,9 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto abort_with_wq;
+ if (!gve_is_gqi(priv) && !gve_is_qpl(priv))
+ dev->netmem_tx = true;
+
err = register_netdev(dev);
if (err)
goto abort_with_gve_init;
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 2eba868d8037..9d705d94b065 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -660,7 +660,8 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
goto err;
dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
- dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ netmem_dma_unmap_addr_set(skb_frag_netmem(frag), pkt,
+ dma[pkt->num_bufs], addr);
++pkt->num_bufs;
gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
@@ -763,6 +764,9 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
s16 completion_tag;
pkt = gve_alloc_pending_packet(tx);
+ if (!pkt)
+ return -ENOMEM;
+
pkt->skb = skb;
completion_tag = pkt - tx->dqo.pending_packets;
@@ -1038,8 +1042,9 @@ static void gve_unmap_packet(struct device *dev,
dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
for (i = 1; i < pkt->num_bufs; i++) {
- dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
- dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(dev, dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]),
+ DMA_TO_DEVICE, 0);
}
pkt->num_bufs = 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
index a0bcfb5a713d..ff3295b60a69 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -61,6 +61,8 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
return -EBUSY;
}
+ netif_device_detach(priv->netdev);
+
priv->reset_type = type;
set_bit(HBG_NIC_STATE_RESETTING, &priv->state);
clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
@@ -91,6 +93,8 @@ static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
return ret;
}
+ netif_device_attach(priv->netdev);
+
dev_info(&priv->pdev->dev, "reset done\n");
return ret;
}
@@ -117,16 +121,13 @@ void hbg_err_reset(struct hbg_priv *priv)
if (running)
dev_close(priv->netdev);
- hbg_reset(priv);
-
- /* in hbg_pci_err_detected(), we will detach first,
- * so we need to attach before open
- */
- if (!netif_device_present(priv->netdev))
- netif_device_attach(priv->netdev);
+ if (hbg_reset(priv))
+ goto err_unlock;
if (running)
dev_open(priv->netdev, NULL);
+
+err_unlock:
rtnl_unlock();
}
@@ -160,7 +161,6 @@ static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
hbg_err_reset(priv);
- netif_device_attach(netdev);
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
index 8f1107b85fbb..55520053270a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
@@ -317,6 +317,9 @@ static void hbg_update_stats_by_info(struct hbg_priv *priv,
const struct hbg_ethtool_stats *stats;
u32 i;
+ if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return;
+
for (i = 0; i < info_len; i++) {
stats = &info[i];
if (!stats->reg)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index d98f8d3ce7c8..e905f10b894e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2075,7 +2075,7 @@ static void hns_nic_task_schedule(struct hns_nic_priv *priv)
static void hns_nic_service_timer(struct timer_list *t)
{
- struct hns_nic_priv *priv = from_timer(priv, t, service_timer);
+ struct hns_nic_priv *priv = timer_container_of(priv, t, service_timer);
(void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 3e28a08934ab..a7de67699a01 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -4503,7 +4503,7 @@ static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
static void hclge_reset_timer(struct timer_list *t)
{
- struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+ struct hclge_dev *hdev = timer_container_of(hdev, t, reset_timer);
/* if default_reset_request has no value, it means that this reset
* request has already be handled, so just return here
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index dada42e7e0ec..c4f35e8e2177 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2057,7 +2057,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
static void hclgevf_reset_timer(struct timer_list *t)
{
- struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
+ struct hclgevf_dev *hdev = timer_container_of(hdev, t, reset_timer);
hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
hclgevf_reset_task_schedule(hdev);
diff --git a/drivers/net/ethernet/huawei/Kconfig b/drivers/net/ethernet/huawei/Kconfig
index c05fce15eb51..7d0feb1da158 100644
--- a/drivers/net/ethernet/huawei/Kconfig
+++ b/drivers/net/ethernet/huawei/Kconfig
@@ -16,5 +16,6 @@ config NET_VENDOR_HUAWEI
if NET_VENDOR_HUAWEI
source "drivers/net/ethernet/huawei/hinic/Kconfig"
+source "drivers/net/ethernet/huawei/hinic3/Kconfig"
endif # NET_VENDOR_HUAWEI
diff --git a/drivers/net/ethernet/huawei/Makefile b/drivers/net/ethernet/huawei/Makefile
index 2549ad5afe6d..59865b882879 100644
--- a/drivers/net/ethernet/huawei/Makefile
+++ b/drivers/net/ethernet/huawei/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_HINIC) += hinic/
+obj-$(CONFIG_HINIC3) += hinic3/
diff --git a/drivers/net/ethernet/huawei/hinic3/Kconfig b/drivers/net/ethernet/huawei/hinic3/Kconfig
new file mode 100644
index 000000000000..ce4331d1387b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Huawei driver configuration
+#
+
+config HINIC3
+ tristate "Huawei 3rd generation network adapters (HINIC3) support"
+ # Fields of HW and management structures are little endian and are
+ # currently not converted
+ depends on !CPU_BIG_ENDIAN
+ depends on X86 || ARM64 || COMPILE_TEST
+ depends on PCI_MSI && 64BIT
+ select AUXILIARY_BUS
+ select PAGE_POOL
+ help
+ This driver supports HiNIC 3rd gen Network Adapter (HINIC3).
+ The driver is supported on X86_64 and ARM64 little endian.
+
+ To compile this driver as a module, choose M here.
+ The module will be called hinic3.
diff --git a/drivers/net/ethernet/huawei/hinic3/Makefile b/drivers/net/ethernet/huawei/hinic3/Makefile
new file mode 100644
index 000000000000..509dfbfb0e96
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+obj-$(CONFIG_HINIC3) += hinic3.o
+
+hinic3-objs := hinic3_common.o \
+ hinic3_hw_cfg.o \
+ hinic3_hw_comm.o \
+ hinic3_hwdev.o \
+ hinic3_hwif.o \
+ hinic3_irq.o \
+ hinic3_lld.o \
+ hinic3_main.o \
+ hinic3_mbox.o \
+ hinic3_netdev_ops.o \
+ hinic3_nic_cfg.o \
+ hinic3_nic_io.o \
+ hinic3_queue_common.o \
+ hinic3_rx.o \
+ hinic3_tx.o \
+ hinic3_wq.o
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c
new file mode 100644
index 000000000000..0aa42068728c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+
+#include "hinic3_common.h"
+
+int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align,
+ gfp_t flag,
+ struct hinic3_dma_addr_align *mem_align)
+{
+ dma_addr_t paddr, align_paddr;
+ void *vaddr, *align_vaddr;
+ u32 real_size = size;
+
+ vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag);
+ if (!vaddr)
+ return -ENOMEM;
+
+ align_paddr = ALIGN(paddr, align);
+ if (align_paddr == paddr) {
+ align_vaddr = vaddr;
+ goto out;
+ }
+
+ dma_free_coherent(dev, real_size, vaddr, paddr);
+
+ /* realloc memory for align */
+ real_size = size + align;
+ vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag);
+ if (!vaddr)
+ return -ENOMEM;
+
+ align_paddr = ALIGN(paddr, align);
+ align_vaddr = vaddr + (align_paddr - paddr);
+
+out:
+ mem_align->real_size = real_size;
+ mem_align->ori_vaddr = vaddr;
+ mem_align->ori_paddr = paddr;
+ mem_align->align_vaddr = align_vaddr;
+ mem_align->align_paddr = align_paddr;
+
+ return 0;
+}
+
+void hinic3_dma_free_coherent_align(struct device *dev,
+ struct hinic3_dma_addr_align *mem_align)
+{
+ dma_free_coherent(dev, mem_align->real_size,
+ mem_align->ori_vaddr, mem_align->ori_paddr);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h
new file mode 100644
index 000000000000..bb795dace04c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_COMMON_H_
+#define _HINIC3_COMMON_H_
+
+#include <linux/device.h>
+
+#define HINIC3_MIN_PAGE_SIZE 0x1000
+
+struct hinic3_dma_addr_align {
+ u32 real_size;
+
+ void *ori_vaddr;
+ dma_addr_t ori_paddr;
+
+ void *align_vaddr;
+ dma_addr_t align_paddr;
+};
+
+int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align,
+ gfp_t flag,
+ struct hinic3_dma_addr_align *mem_align);
+void hinic3_dma_free_coherent_align(struct device *dev,
+ struct hinic3_dma_addr_align *mem_align);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
new file mode 100644
index 000000000000..87d9450c30ca
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/device.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+bool hinic3_support_nic(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->cfg_mgmt->cap.supp_svcs_bitmap &
+ BIT(HINIC3_SERVICE_T_NIC);
+}
+
+u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->cfg_mgmt->cap.nic_svc_cap.max_sqs;
+}
+
+u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->cfg_mgmt->cap.port_id;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
new file mode 100644
index 000000000000..e017b1ae9f05
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HW_CFG_H_
+#define _HINIC3_HW_CFG_H_
+
+#include <linux/mutex.h>
+#include <linux/pci.h>
+
+struct hinic3_hwdev;
+
+struct hinic3_irq {
+ u32 irq_id;
+ u16 msix_entry_idx;
+ bool allocated;
+};
+
+struct hinic3_irq_info {
+ struct hinic3_irq *irq;
+ u16 num_irq;
+ /* device max irq number */
+ u16 num_irq_hw;
+ /* protect irq alloc and free */
+ struct mutex irq_mutex;
+};
+
+struct hinic3_nic_service_cap {
+ u16 max_sqs;
+};
+
+/* Device capabilities */
+struct hinic3_dev_cap {
+ /* Bitmasks of services supported by device */
+ u16 supp_svcs_bitmap;
+ /* Physical port */
+ u8 port_id;
+ struct hinic3_nic_service_cap nic_svc_cap;
+};
+
+struct hinic3_cfg_mgmt_info {
+ struct hinic3_irq_info irq_info;
+ struct hinic3_dev_cap cap;
+};
+
+int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num,
+ struct msix_entry *alloc_arr, u16 *act_num);
+void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id);
+
+bool hinic3_support_nic(struct hinic3_hwdev *hwdev);
+u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev);
+u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
new file mode 100644
index 000000000000..434696ce7dc2
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag)
+{
+ struct comm_cmd_func_reset func_reset = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ func_reset.func_id = func_id;
+ func_reset.reset_flag = reset_flag;
+
+ mgmt_msg_params_init_default(&msg_params, &func_reset,
+ sizeof(func_reset));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_FUNC_RESET, &msg_params);
+ if (err || func_reset.head.status) {
+ dev_err(hwdev->dev, "Failed to reset func resources, reset_flag 0x%llx, err: %d, status: 0x%x\n",
+ reset_flag, err, func_reset.head.status);
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
new file mode 100644
index 000000000000..c33a1c77da9c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HW_COMM_H_
+#define _HINIC3_HW_COMM_H_
+
+#include "hinic3_hw_intf.h"
+
+struct hinic3_hwdev;
+
+int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
new file mode 100644
index 000000000000..22c84093efa2
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HW_INTF_H_
+#define _HINIC3_HW_INTF_H_
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#define MGMT_MSG_CMD_OP_SET 1
+#define MGMT_MSG_CMD_OP_GET 0
+
+#define MGMT_STATUS_PF_SET_VF_ALREADY 0x4
+#define MGMT_STATUS_EXIST 0x6
+#define MGMT_STATUS_CMD_UNSUPPORTED 0xFF
+
+#define MGMT_MSG_POLLING_TIMEOUT 0
+
+struct mgmt_msg_head {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+};
+
+struct mgmt_msg_params {
+ const void *buf_in;
+ u32 in_size;
+ void *buf_out;
+ u32 expected_out_size;
+ u32 timeout_ms;
+};
+
+/* CMDQ MODULE_TYPE */
+enum mgmt_mod_type {
+ /* HW communication module */
+ MGMT_MOD_COMM = 0,
+ /* L2NIC module */
+ MGMT_MOD_L2NIC = 1,
+ /* Configuration module */
+ MGMT_MOD_CFGM = 7,
+ MGMT_MOD_HILINK = 14,
+};
+
+static inline void mgmt_msg_params_init_default(struct mgmt_msg_params *msg_params,
+ void *inout_buf, u32 buf_size)
+{
+ msg_params->buf_in = inout_buf;
+ msg_params->buf_out = inout_buf;
+ msg_params->in_size = buf_size;
+ msg_params->expected_out_size = buf_size;
+ msg_params->timeout_ms = 0;
+}
+
+/* COMM Commands between Driver to fw */
+enum comm_cmd {
+ /* Commands for clearing FLR and resources */
+ COMM_CMD_FUNC_RESET = 0,
+ COMM_CMD_FEATURE_NEGO = 1,
+ COMM_CMD_FLUSH_DOORBELL = 2,
+ COMM_CMD_START_FLUSH = 3,
+ COMM_CMD_GET_GLOBAL_ATTR = 5,
+ COMM_CMD_SET_FUNC_SVC_USED_STATE = 7,
+
+ /* Driver Configuration Commands */
+ COMM_CMD_SET_CMDQ_CTXT = 20,
+ COMM_CMD_SET_VAT = 21,
+ COMM_CMD_CFG_PAGESIZE = 22,
+ COMM_CMD_CFG_MSIX_CTRL_REG = 23,
+ COMM_CMD_SET_CEQ_CTRL_REG = 24,
+ COMM_CMD_SET_DMA_ATTR = 25,
+};
+
+enum comm_func_reset_bits {
+ COMM_FUNC_RESET_BIT_FLUSH = BIT(0),
+ COMM_FUNC_RESET_BIT_MQM = BIT(1),
+ COMM_FUNC_RESET_BIT_SMF = BIT(2),
+ COMM_FUNC_RESET_BIT_PF_BW_CFG = BIT(3),
+
+ COMM_FUNC_RESET_BIT_COMM = BIT(10),
+ /* clear mbox and aeq, The COMM_FUNC_RESET_BIT_COMM bit must be set */
+ COMM_FUNC_RESET_BIT_COMM_MGMT_CH = BIT(11),
+ /* clear cmdq and ceq, The COMM_FUNC_RESET_BIT_COMM bit must be set */
+ COMM_FUNC_RESET_BIT_COMM_CMD_CH = BIT(12),
+ COMM_FUNC_RESET_BIT_NIC = BIT(13),
+};
+
+struct comm_cmd_func_reset {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 rsvd1[3];
+ u64 reset_flag;
+};
+
+#define COMM_MAX_FEATURE_QWORD 4
+struct comm_cmd_feature_nego {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd;
+ u64 s_feature[COMM_MAX_FEATURE_QWORD];
+};
+
+/* Services supported by HW. HW uses these values when delivering events.
+ * HW supports multiple services that are not yet supported by driver
+ * (e.g. RoCE).
+ */
+enum hinic3_service_type {
+ HINIC3_SERVICE_T_NIC = 0,
+ /* MAX is only used by SW for array sizes. */
+ HINIC3_SERVICE_T_MAX = 1,
+};
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
new file mode 100644
index 000000000000..6e8788a64925
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+#include "hinic3_mgmt.h"
+
+int hinic3_init_hwdev(struct pci_dev *pdev)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
+
+void hinic3_free_hwdev(struct hinic3_hwdev *hwdev)
+{
+ /* Completed by later submission due to LoC limit. */
+}
+
+void hinic3_set_api_stop(struct hinic3_hwdev *hwdev)
+{
+ /* Completed by later submission due to LoC limit. */
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
new file mode 100644
index 000000000000..62e2745e9316
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HWDEV_H_
+#define _HINIC3_HWDEV_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/pci.h>
+
+#include "hinic3_hw_intf.h"
+
+struct hinic3_cmdqs;
+struct hinic3_hwif;
+
+enum hinic3_event_service_type {
+ HINIC3_EVENT_SRV_COMM = 0,
+ HINIC3_EVENT_SRV_NIC = 1
+};
+
+#define HINIC3_SRV_EVENT_TYPE(svc, type) (((svc) << 16) | (type))
+
+/* driver-specific data of pci_dev */
+struct hinic3_pcidev {
+ struct pci_dev *pdev;
+ struct hinic3_hwdev *hwdev;
+ /* Auxiliary devices */
+ struct hinic3_adev *hadev[HINIC3_SERVICE_T_MAX];
+
+ void __iomem *cfg_reg_base;
+ void __iomem *intr_reg_base;
+ void __iomem *db_base;
+ u64 db_dwqe_len;
+ u64 db_base_phy;
+
+ /* lock for attach/detach uld */
+ struct mutex pdev_mutex;
+ unsigned long state;
+};
+
+struct hinic3_hwdev {
+ struct hinic3_pcidev *adapter;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int dev_id;
+ struct hinic3_hwif *hwif;
+ struct hinic3_cfg_mgmt_info *cfg_mgmt;
+ struct hinic3_aeqs *aeqs;
+ struct hinic3_ceqs *ceqs;
+ struct hinic3_mbox *mbox;
+ struct hinic3_cmdqs *cmdqs;
+ struct workqueue_struct *workq;
+ /* protect channel init and uninit */
+ spinlock_t channel_lock;
+ u64 features[COMM_MAX_FEATURE_QWORD];
+ u32 wq_page_size;
+ u8 max_cmdq;
+ ulong func_state;
+};
+
+struct hinic3_event_info {
+ /* enum hinic3_event_service_type */
+ u16 service;
+ u16 type;
+ u8 event_data[104];
+};
+
+struct hinic3_adev {
+ struct auxiliary_device adev;
+ struct hinic3_hwdev *hwdev;
+ enum hinic3_service_type svc_type;
+
+ void (*event)(struct auxiliary_device *adev,
+ struct hinic3_event_info *event);
+};
+
+int hinic3_init_hwdev(struct pci_dev *pdev);
+void hinic3_free_hwdev(struct hinic3_hwdev *hwdev);
+
+void hinic3_set_api_stop(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
new file mode 100644
index 000000000000..0865453bf0e7
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/io.h>
+
+#include "hinic3_common.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+
+void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_state flag)
+{
+ /* Completed by later submission due to LoC limit. */
+}
+
+u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->hwif->attr.func_global_idx;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
new file mode 100644
index 000000000000..513c9680e6b6
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HWIF_H_
+#define _HINIC3_HWIF_H_
+
+#include <linux/build_bug.h>
+#include <linux/spinlock_types.h>
+
+struct hinic3_hwdev;
+
+enum hinic3_func_type {
+ HINIC3_FUNC_TYPE_VF = 1,
+};
+
+struct hinic3_db_area {
+ unsigned long *db_bitmap_array;
+ u32 db_max_areas;
+ /* protect doorbell area alloc and free */
+ spinlock_t idx_lock;
+};
+
+struct hinic3_func_attr {
+ enum hinic3_func_type func_type;
+ u16 func_global_idx;
+ u16 global_vf_id_of_pf;
+ u16 num_irqs;
+ u16 num_sq;
+ u8 port_to_port_idx;
+ u8 pci_intf_idx;
+ u8 ppf_idx;
+ u8 num_aeqs;
+ u8 num_ceqs;
+ u8 msix_flex_en;
+};
+
+static_assert(sizeof(struct hinic3_func_attr) == 20);
+
+struct hinic3_hwif {
+ u8 __iomem *cfg_regs_base;
+ u64 db_base_phy;
+ u64 db_dwqe_len;
+ u8 __iomem *db_base;
+ struct hinic3_db_area db_area;
+ struct hinic3_func_attr attr;
+};
+
+enum hinic3_msix_state {
+ HINIC3_MSIX_ENABLE,
+ HINIC3_MSIX_DISABLE,
+};
+
+void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_state flag);
+
+u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
new file mode 100644
index 000000000000..8b92eed25edf
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/netdevice.h>
+
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_rx.h"
+#include "hinic3_tx.h"
+
+static int hinic3_poll(struct napi_struct *napi, int budget)
+{
+ struct hinic3_irq_cfg *irq_cfg =
+ container_of(napi, struct hinic3_irq_cfg, napi);
+ struct hinic3_nic_dev *nic_dev;
+ bool busy = false;
+ int work_done;
+
+ nic_dev = netdev_priv(irq_cfg->netdev);
+
+ busy |= hinic3_tx_poll(irq_cfg->txq, budget);
+
+ if (unlikely(!budget))
+ return 0;
+
+ work_done = hinic3_rx_poll(irq_cfg->rxq, budget);
+ busy |= work_done >= budget;
+
+ if (busy)
+ return budget;
+
+ if (likely(napi_complete_done(napi, work_done)))
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+
+ return work_done;
+}
+
+void qp_add_napi(struct hinic3_irq_cfg *irq_cfg)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
+
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi);
+ netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll);
+ napi_enable(&irq_cfg->napi);
+}
+
+void qp_del_napi(struct hinic3_irq_cfg *irq_cfg)
+{
+ napi_disable(&irq_cfg->napi);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ netif_stop_subqueue(irq_cfg->netdev, irq_cfg->irq_id);
+ netif_napi_del(&irq_cfg->napi);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
new file mode 100644
index 000000000000..4827326e6a59
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_lld.h"
+#include "hinic3_mgmt.h"
+
+#define HINIC3_VF_PCI_CFG_REG_BAR 0
+#define HINIC3_PCI_INTR_REG_BAR 2
+#define HINIC3_PCI_DB_BAR 4
+
+#define HINIC3_EVENT_POLL_SLEEP_US 1000
+#define HINIC3_EVENT_POLL_TIMEOUT_US 10000000
+
+static struct hinic3_adev_device {
+ const char *name;
+} hinic3_adev_devices[HINIC3_SERVICE_T_MAX] = {
+ [HINIC3_SERVICE_T_NIC] = {
+ .name = "nic",
+ },
+};
+
+static bool hinic3_adev_svc_supported(struct hinic3_hwdev *hwdev,
+ enum hinic3_service_type svc_type)
+{
+ switch (svc_type) {
+ case HINIC3_SERVICE_T_NIC:
+ return hinic3_support_nic(hwdev);
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static void hinic3_comm_adev_release(struct device *dev)
+{
+ struct hinic3_adev *hadev = container_of(dev, struct hinic3_adev,
+ adev.dev);
+
+ kfree(hadev);
+}
+
+static struct hinic3_adev *hinic3_add_one_adev(struct hinic3_hwdev *hwdev,
+ enum hinic3_service_type svc_type)
+{
+ struct hinic3_adev *hadev;
+ const char *svc_name;
+ int ret;
+
+ hadev = kzalloc(sizeof(*hadev), GFP_KERNEL);
+ if (!hadev)
+ return NULL;
+
+ svc_name = hinic3_adev_devices[svc_type].name;
+ hadev->adev.name = svc_name;
+ hadev->adev.id = hwdev->dev_id;
+ hadev->adev.dev.parent = hwdev->dev;
+ hadev->adev.dev.release = hinic3_comm_adev_release;
+ hadev->svc_type = svc_type;
+ hadev->hwdev = hwdev;
+
+ ret = auxiliary_device_init(&hadev->adev);
+ if (ret) {
+ dev_err(hwdev->dev, "failed init adev %s %u\n",
+ svc_name, hwdev->dev_id);
+ kfree(hadev);
+ return NULL;
+ }
+
+ ret = auxiliary_device_add(&hadev->adev);
+ if (ret) {
+ dev_err(hwdev->dev, "failed to add adev %s %u\n",
+ svc_name, hwdev->dev_id);
+ auxiliary_device_uninit(&hadev->adev);
+ return NULL;
+ }
+
+ return hadev;
+}
+
+static void hinic3_del_one_adev(struct hinic3_hwdev *hwdev,
+ enum hinic3_service_type svc_type)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ struct hinic3_adev *hadev;
+ int timeout;
+ bool state;
+
+ timeout = read_poll_timeout(test_and_set_bit, state, !state,
+ HINIC3_EVENT_POLL_SLEEP_US,
+ HINIC3_EVENT_POLL_TIMEOUT_US,
+ false, svc_type, &pci_adapter->state);
+
+ hadev = pci_adapter->hadev[svc_type];
+ auxiliary_device_delete(&hadev->adev);
+ auxiliary_device_uninit(&hadev->adev);
+ pci_adapter->hadev[svc_type] = NULL;
+ if (!timeout)
+ clear_bit(svc_type, &pci_adapter->state);
+}
+
+static int hinic3_attach_aux_devices(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ enum hinic3_service_type svc_type;
+
+ mutex_lock(&pci_adapter->pdev_mutex);
+
+ for (svc_type = 0; svc_type < HINIC3_SERVICE_T_MAX; svc_type++) {
+ if (!hinic3_adev_svc_supported(hwdev, svc_type))
+ continue;
+
+ pci_adapter->hadev[svc_type] = hinic3_add_one_adev(hwdev,
+ svc_type);
+ if (!pci_adapter->hadev[svc_type])
+ goto err_del_adevs;
+ }
+ mutex_unlock(&pci_adapter->pdev_mutex);
+ return 0;
+
+err_del_adevs:
+ while (svc_type > 0) {
+ svc_type--;
+ if (pci_adapter->hadev[svc_type]) {
+ hinic3_del_one_adev(hwdev, svc_type);
+ pci_adapter->hadev[svc_type] = NULL;
+ }
+ }
+ mutex_unlock(&pci_adapter->pdev_mutex);
+ return -ENOMEM;
+}
+
+static void hinic3_detach_aux_devices(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ int i;
+
+ mutex_lock(&pci_adapter->pdev_mutex);
+ for (i = 0; i < ARRAY_SIZE(hinic3_adev_devices); i++) {
+ if (pci_adapter->hadev[i])
+ hinic3_del_one_adev(hwdev, i);
+ }
+ mutex_unlock(&pci_adapter->pdev_mutex);
+}
+
+struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev)
+{
+ struct hinic3_adev *hadev;
+
+ hadev = container_of(adev, struct hinic3_adev, adev);
+ return hadev->hwdev;
+}
+
+void hinic3_adev_event_register(struct auxiliary_device *adev,
+ void (*event_handler)(struct auxiliary_device *adev,
+ struct hinic3_event_info *event))
+{
+ struct hinic3_adev *hadev;
+
+ hadev = container_of(adev, struct hinic3_adev, adev);
+ hadev->event = event_handler;
+}
+
+void hinic3_adev_event_unregister(struct auxiliary_device *adev)
+{
+ struct hinic3_adev *hadev;
+
+ hadev = container_of(adev, struct hinic3_adev, adev);
+ hadev->event = NULL;
+}
+
+static int hinic3_mapping_bar(struct pci_dev *pdev,
+ struct hinic3_pcidev *pci_adapter)
+{
+ pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev,
+ HINIC3_VF_PCI_CFG_REG_BAR);
+ if (!pci_adapter->cfg_reg_base) {
+ dev_err(&pdev->dev, "Failed to map configuration regs\n");
+ return -ENOMEM;
+ }
+
+ pci_adapter->intr_reg_base = pci_ioremap_bar(pdev,
+ HINIC3_PCI_INTR_REG_BAR);
+ if (!pci_adapter->intr_reg_base) {
+ dev_err(&pdev->dev, "Failed to map interrupt regs\n");
+ goto err_unmap_cfg_reg_base;
+ }
+
+ pci_adapter->db_base_phy = pci_resource_start(pdev, HINIC3_PCI_DB_BAR);
+ pci_adapter->db_dwqe_len = pci_resource_len(pdev, HINIC3_PCI_DB_BAR);
+ pci_adapter->db_base = pci_ioremap_bar(pdev, HINIC3_PCI_DB_BAR);
+ if (!pci_adapter->db_base) {
+ dev_err(&pdev->dev, "Failed to map doorbell regs\n");
+ goto err_unmap_intr_reg_base;
+ }
+
+ return 0;
+
+err_unmap_intr_reg_base:
+ iounmap(pci_adapter->intr_reg_base);
+
+err_unmap_cfg_reg_base:
+ iounmap(pci_adapter->cfg_reg_base);
+
+ return -ENOMEM;
+}
+
+static void hinic3_unmapping_bar(struct hinic3_pcidev *pci_adapter)
+{
+ iounmap(pci_adapter->db_base);
+ iounmap(pci_adapter->intr_reg_base);
+ iounmap(pci_adapter->cfg_reg_base);
+}
+
+static int hinic3_pci_init(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter;
+ int err;
+
+ pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL);
+ if (!pci_adapter)
+ return -ENOMEM;
+
+ pci_adapter->pdev = pdev;
+ mutex_init(&pci_adapter->pdev_mutex);
+
+ pci_set_drvdata(pdev, pci_adapter);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ goto err_free_pci_adapter;
+ }
+
+ err = pci_request_regions(pdev, HINIC3_NIC_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request regions\n");
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask\n");
+ goto err_release_regions;
+ }
+
+ return 0;
+
+err_release_regions:
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+
+err_disable_device:
+ pci_disable_device(pdev);
+
+err_free_pci_adapter:
+ pci_set_drvdata(pdev, NULL);
+ mutex_destroy(&pci_adapter->pdev_mutex);
+ kfree(pci_adapter);
+
+ return err;
+}
+
+static void hinic3_pci_uninit(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ mutex_destroy(&pci_adapter->pdev_mutex);
+ kfree(pci_adapter);
+}
+
+static int hinic3_func_init(struct pci_dev *pdev,
+ struct hinic3_pcidev *pci_adapter)
+{
+ int err;
+
+ err = hinic3_init_hwdev(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize hardware device\n");
+ return err;
+ }
+
+ err = hinic3_attach_aux_devices(pci_adapter->hwdev);
+ if (err)
+ goto err_free_hwdev;
+
+ return 0;
+
+err_free_hwdev:
+ hinic3_free_hwdev(pci_adapter->hwdev);
+
+ return err;
+}
+
+static void hinic3_func_uninit(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ hinic3_detach_aux_devices(pci_adapter->hwdev);
+ hinic3_free_hwdev(pci_adapter->hwdev);
+}
+
+static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter)
+{
+ struct pci_dev *pdev = pci_adapter->pdev;
+ int err;
+
+ err = hinic3_mapping_bar(pdev, pci_adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map bar\n");
+ goto err_out;
+ }
+
+ err = hinic3_func_init(pdev, pci_adapter);
+ if (err)
+ goto err_unmap_bar;
+
+ return 0;
+
+err_unmap_bar:
+ hinic3_unmapping_bar(pci_adapter);
+
+err_out:
+ dev_err(&pdev->dev, "PCIe device probe function failed\n");
+ return err;
+}
+
+static void hinic3_remove_func(struct hinic3_pcidev *pci_adapter)
+{
+ struct pci_dev *pdev = pci_adapter->pdev;
+
+ hinic3_func_uninit(pdev);
+ hinic3_unmapping_bar(pci_adapter);
+}
+
+static int hinic3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hinic3_pcidev *pci_adapter;
+ int err;
+
+ err = hinic3_pci_init(pdev);
+ if (err)
+ goto err_out;
+
+ pci_adapter = pci_get_drvdata(pdev);
+ err = hinic3_probe_func(pci_adapter);
+ if (err)
+ goto err_uninit_pci;
+
+ return 0;
+
+err_uninit_pci:
+ hinic3_pci_uninit(pdev);
+
+err_out:
+ dev_err(&pdev->dev, "PCIe device probe failed\n");
+ return err;
+}
+
+static void hinic3_remove(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ hinic3_remove_func(pci_adapter);
+ hinic3_pci_uninit(pdev);
+}
+
+static const struct pci_device_id hinic3_pci_table[] = {
+ /* Completed by later submission due to LoC limit. */
+ {0, 0}
+
+};
+
+MODULE_DEVICE_TABLE(pci, hinic3_pci_table);
+
+static void hinic3_shutdown(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ pci_disable_device(pdev);
+
+ if (pci_adapter)
+ hinic3_set_api_stop(pci_adapter->hwdev);
+}
+
+static struct pci_driver hinic3_driver = {
+ .name = HINIC3_NIC_DRV_NAME,
+ .id_table = hinic3_pci_table,
+ .probe = hinic3_probe,
+ .remove = hinic3_remove,
+ .shutdown = hinic3_shutdown,
+ .sriov_configure = pci_sriov_configure_simple
+};
+
+int hinic3_lld_init(void)
+{
+ return pci_register_driver(&hinic3_driver);
+}
+
+void hinic3_lld_exit(void)
+{
+ pci_unregister_driver(&hinic3_driver);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h
new file mode 100644
index 000000000000..322b44803476
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_LLD_H_
+#define _HINIC3_LLD_H_
+
+#include <linux/auxiliary_bus.h>
+
+struct hinic3_event_info;
+
+#define HINIC3_NIC_DRV_NAME "hinic3"
+
+int hinic3_lld_init(void);
+void hinic3_lld_exit(void);
+void hinic3_adev_event_register(struct auxiliary_device *adev,
+ void (*event_handler)(struct auxiliary_device *adev,
+ struct hinic3_event_info *event));
+void hinic3_adev_event_unregister(struct auxiliary_device *adev);
+struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
new file mode 100644
index 000000000000..497f2a36f35d
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+
+#include "hinic3_common.h"
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_lld.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_rx.h"
+#include "hinic3_tx.h"
+
+#define HINIC3_NIC_DRV_DESC "Intelligent Network Interface Card Driver"
+
+#define HINIC3_RX_BUF_LEN 2048
+#define HINIC3_LRO_REPLENISH_THLD 256
+#define HINIC3_NIC_DEV_WQ_NAME "hinic3_nic_dev_wq"
+
+#define HINIC3_SQ_DEPTH 1024
+#define HINIC3_RQ_DEPTH 1024
+
+static int hinic3_alloc_txrxqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ err = hinic3_alloc_txqs(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc txqs\n");
+ return err;
+ }
+
+ err = hinic3_alloc_rxqs(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc rxqs\n");
+ goto err_free_txqs;
+ }
+
+ return 0;
+
+err_free_txqs:
+ hinic3_free_txqs(netdev);
+
+ return err;
+}
+
+static void hinic3_free_txrxqs(struct net_device *netdev)
+{
+ hinic3_free_rxqs(netdev);
+ hinic3_free_txqs(netdev);
+}
+
+static int hinic3_init_nic_dev(struct net_device *netdev,
+ struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = hwdev->pdev;
+
+ nic_dev->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ nic_dev->hwdev = hwdev;
+ nic_dev->pdev = pdev;
+
+ nic_dev->rx_buf_len = HINIC3_RX_BUF_LEN;
+ nic_dev->lro_replenish_thld = HINIC3_LRO_REPLENISH_THLD;
+ nic_dev->nic_svc_cap = hwdev->cfg_mgmt->cap.nic_svc_cap;
+
+ return 0;
+}
+
+static int hinic3_sw_init(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH;
+ nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH;
+
+ /* VF driver always uses random MAC address. During VM migration to a
+ * new device, the new device should learn the VMs old MAC rather than
+ * provide its own MAC. The product design assumes that every VF is
+ * suspectable to migration so the device avoids offering MAC address
+ * to VFs.
+ */
+ eth_hw_addr_random(netdev);
+ err = hinic3_set_mac(hwdev, netdev->dev_addr, 0,
+ hinic3_global_func_id(hwdev));
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set default MAC\n");
+ return err;
+ }
+
+ err = hinic3_alloc_txrxqs(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc qps\n");
+ goto err_del_mac;
+ }
+
+ return 0;
+
+err_del_mac:
+ hinic3_del_mac(hwdev, netdev->dev_addr, 0,
+ hinic3_global_func_id(hwdev));
+
+ return err;
+}
+
+static void hinic3_sw_uninit(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_free_txrxqs(netdev);
+ hinic3_del_mac(nic_dev->hwdev, netdev->dev_addr, 0,
+ hinic3_global_func_id(nic_dev->hwdev));
+}
+
+static void hinic3_assign_netdev_ops(struct net_device *netdev)
+{
+ hinic3_set_netdev_ops(netdev);
+}
+
+static void netdev_feature_init(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ netdev_features_t cso_fts = 0;
+ netdev_features_t tso_fts = 0;
+ netdev_features_t dft_fts;
+
+ dft_fts = NETIF_F_SG | NETIF_F_HIGHDMA;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_CSUM))
+ cso_fts |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_SCTP_CRC))
+ cso_fts |= NETIF_F_SCTP_CRC;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_TSO))
+ tso_fts |= NETIF_F_TSO | NETIF_F_TSO6;
+
+ netdev->features |= dft_fts | cso_fts | tso_fts;
+}
+
+static int hinic3_set_default_hw_feature(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ err = hinic3_set_nic_feature_to_hw(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set nic features\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void hinic3_link_status_change(struct net_device *netdev,
+ bool link_status_up)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (link_status_up) {
+ if (netif_carrier_ok(netdev))
+ return;
+
+ nic_dev->link_status_up = true;
+ netif_carrier_on(netdev);
+ netdev_dbg(netdev, "Link is up\n");
+ } else {
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ nic_dev->link_status_up = false;
+ netif_carrier_off(netdev);
+ netdev_dbg(netdev, "Link is down\n");
+ }
+}
+
+static void hinic3_nic_event(struct auxiliary_device *adev,
+ struct hinic3_event_info *event)
+{
+ struct hinic3_nic_dev *nic_dev = dev_get_drvdata(&adev->dev);
+ struct net_device *netdev;
+
+ netdev = nic_dev->netdev;
+
+ switch (HINIC3_SRV_EVENT_TYPE(event->service, event->type)) {
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC,
+ HINIC3_NIC_EVENT_LINK_UP):
+ hinic3_link_status_change(netdev, true);
+ break;
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC,
+ HINIC3_NIC_EVENT_LINK_DOWN):
+ hinic3_link_status_change(netdev, false);
+ break;
+ default:
+ break;
+ }
+}
+
+static int hinic3_nic_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct hinic3_hwdev *hwdev = hinic3_adev_get_hwdev(adev);
+ struct pci_dev *pdev = hwdev->pdev;
+ struct hinic3_nic_dev *nic_dev;
+ struct net_device *netdev;
+ u16 max_qps, glb_func_id;
+ int err;
+
+ if (!hinic3_support_nic(hwdev)) {
+ dev_dbg(&adev->dev, "HW doesn't support nic\n");
+ return 0;
+ }
+
+ hinic3_adev_event_register(adev, hinic3_nic_event);
+
+ glb_func_id = hinic3_global_func_id(hwdev);
+ err = hinic3_func_reset(hwdev, glb_func_id, COMM_FUNC_RESET_BIT_NIC);
+ if (err) {
+ dev_err(&adev->dev, "Failed to reset function\n");
+ goto err_unregister_adev_event;
+ }
+
+ max_qps = hinic3_func_max_qnum(hwdev);
+ netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps);
+ if (!netdev) {
+ dev_err(&adev->dev, "Failed to allocate netdev\n");
+ err = -ENOMEM;
+ goto err_unregister_adev_event;
+ }
+
+ nic_dev = netdev_priv(netdev);
+ dev_set_drvdata(&adev->dev, nic_dev);
+ err = hinic3_init_nic_dev(netdev, hwdev);
+ if (err)
+ goto err_free_netdev;
+
+ err = hinic3_init_nic_io(nic_dev);
+ if (err)
+ goto err_free_netdev;
+
+ err = hinic3_sw_init(netdev);
+ if (err)
+ goto err_free_nic_io;
+
+ hinic3_assign_netdev_ops(netdev);
+
+ netdev_feature_init(netdev);
+ err = hinic3_set_default_hw_feature(netdev);
+ if (err)
+ goto err_uninit_sw;
+
+ netif_carrier_off(netdev);
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_uninit_nic_feature;
+
+ return 0;
+
+err_uninit_nic_feature:
+ hinic3_update_nic_feature(nic_dev, 0);
+ hinic3_set_nic_feature_to_hw(nic_dev);
+
+err_uninit_sw:
+ hinic3_sw_uninit(netdev);
+
+err_free_nic_io:
+ hinic3_free_nic_io(nic_dev);
+
+err_free_netdev:
+ free_netdev(netdev);
+
+err_unregister_adev_event:
+ hinic3_adev_event_unregister(adev);
+ dev_err(&pdev->dev, "NIC service probe failed\n");
+
+ return err;
+}
+
+static void hinic3_nic_remove(struct auxiliary_device *adev)
+{
+ struct hinic3_nic_dev *nic_dev = dev_get_drvdata(&adev->dev);
+ struct net_device *netdev;
+
+ if (!hinic3_support_nic(nic_dev->hwdev))
+ return;
+
+ netdev = nic_dev->netdev;
+ unregister_netdev(netdev);
+
+ hinic3_update_nic_feature(nic_dev, 0);
+ hinic3_set_nic_feature_to_hw(nic_dev);
+ hinic3_sw_uninit(netdev);
+
+ hinic3_free_nic_io(nic_dev);
+
+ free_netdev(netdev);
+}
+
+static const struct auxiliary_device_id hinic3_nic_id_table[] = {
+ {
+ .name = HINIC3_NIC_DRV_NAME ".nic",
+ },
+ {}
+};
+
+static struct auxiliary_driver hinic3_nic_driver = {
+ .probe = hinic3_nic_probe,
+ .remove = hinic3_nic_remove,
+ .suspend = NULL,
+ .resume = NULL,
+ .name = "nic",
+ .id_table = hinic3_nic_id_table,
+};
+
+static __init int hinic3_nic_lld_init(void)
+{
+ int err;
+
+ err = hinic3_lld_init();
+ if (err)
+ return err;
+
+ err = auxiliary_driver_register(&hinic3_nic_driver);
+ if (err) {
+ hinic3_lld_exit();
+ return err;
+ }
+
+ return 0;
+}
+
+static __exit void hinic3_nic_lld_exit(void)
+{
+ auxiliary_driver_unregister(&hinic3_nic_driver);
+
+ hinic3_lld_exit();
+}
+
+module_init(hinic3_nic_lld_init);
+module_exit(hinic3_nic_lld_exit);
+
+MODULE_AUTHOR("Huawei Technologies CO., Ltd");
+MODULE_DESCRIPTION(HINIC3_NIC_DRV_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
new file mode 100644
index 000000000000..e74d1eb09730
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/dma-mapping.h>
+
+#include "hinic3_common.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
new file mode 100644
index 000000000000..d7a6c37b7eff
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_MBOX_H_
+#define _HINIC3_MBOX_H_
+
+#include <linux/bitfield.h>
+#include <linux/mutex.h>
+
+struct hinic3_hwdev;
+
+int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
new file mode 100644
index 000000000000..4edabeb32112
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_MGMT_H_
+#define _HINIC3_MGMT_H_
+
+#include <linux/types.h>
+
+struct hinic3_hwdev;
+
+void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
new file mode 100644
index 000000000000..c4434efdc7f7
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_MGMT_INTERFACE_H_
+#define _HINIC3_MGMT_INTERFACE_H_
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/if_ether.h>
+
+#include "hinic3_hw_intf.h"
+
+struct l2nic_cmd_feature_nego {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd;
+ u64 s_feature[4];
+};
+
+enum l2nic_func_tbl_cfg_bitmap {
+ L2NIC_FUNC_TBL_CFG_INIT = 0,
+ L2NIC_FUNC_TBL_CFG_RX_BUF_SIZE = 1,
+ L2NIC_FUNC_TBL_CFG_MTU = 2,
+};
+
+struct l2nic_func_tbl_cfg {
+ u16 rx_wqe_buf_size;
+ u16 mtu;
+ u32 rsvd[9];
+};
+
+struct l2nic_cmd_set_func_tbl {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd;
+ u32 cfg_bitmap;
+ struct l2nic_func_tbl_cfg tbl_cfg;
+};
+
+struct l2nic_cmd_set_mac {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 mac[ETH_ALEN];
+};
+
+struct l2nic_cmd_update_mac {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 old_mac[ETH_ALEN];
+ u16 rsvd2;
+ u8 new_mac[ETH_ALEN];
+};
+
+struct l2nic_cmd_force_pkt_drop {
+ struct mgmt_msg_head msg_head;
+ u8 port;
+ u8 rsvd1[3];
+};
+
+/* Commands between NIC to fw */
+enum l2nic_cmd {
+ /* FUNC CFG */
+ L2NIC_CMD_SET_FUNC_TBL = 5,
+ L2NIC_CMD_SET_VPORT_ENABLE = 6,
+ L2NIC_CMD_SET_SQ_CI_ATTR = 8,
+ L2NIC_CMD_CLEAR_QP_RESOURCE = 11,
+ L2NIC_CMD_FEATURE_NEGO = 15,
+ L2NIC_CMD_SET_MAC = 21,
+ L2NIC_CMD_DEL_MAC = 22,
+ L2NIC_CMD_UPDATE_MAC = 23,
+ L2NIC_CMD_CFG_RSS = 60,
+ L2NIC_CMD_CFG_RSS_HASH_KEY = 63,
+ L2NIC_CMD_CFG_RSS_HASH_ENGINE = 64,
+ L2NIC_CMD_SET_RSS_CTX_TBL = 65,
+ L2NIC_CMD_QOS_DCB_STATE = 110,
+ L2NIC_CMD_FORCE_PKT_DROP = 113,
+ L2NIC_CMD_MAX = 256,
+};
+
+enum hinic3_nic_feature_cap {
+ HINIC3_NIC_F_CSUM = BIT(0),
+ HINIC3_NIC_F_SCTP_CRC = BIT(1),
+ HINIC3_NIC_F_TSO = BIT(2),
+ HINIC3_NIC_F_LRO = BIT(3),
+ HINIC3_NIC_F_UFO = BIT(4),
+ HINIC3_NIC_F_RSS = BIT(5),
+ HINIC3_NIC_F_RX_VLAN_FILTER = BIT(6),
+ HINIC3_NIC_F_RX_VLAN_STRIP = BIT(7),
+ HINIC3_NIC_F_TX_VLAN_INSERT = BIT(8),
+ HINIC3_NIC_F_VXLAN_OFFLOAD = BIT(9),
+ HINIC3_NIC_F_FDIR = BIT(11),
+ HINIC3_NIC_F_PROMISC = BIT(12),
+ HINIC3_NIC_F_ALLMULTI = BIT(13),
+ HINIC3_NIC_F_RATE_LIMIT = BIT(16),
+};
+
+#define HINIC3_NIC_F_ALL_MASK 0x33bff
+#define HINIC3_NIC_DRV_DEFAULT_FEATURE 0x3f03f
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
new file mode 100644
index 000000000000..71104a6b8bef
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+
+#include "hinic3_hwif.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_rx.h"
+#include "hinic3_tx.h"
+
+static int hinic3_open(struct net_device *netdev)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
+
+static int hinic3_close(struct net_device *netdev)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
+
+static int hinic3_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int err;
+
+ err = hinic3_set_port_mtu(netdev, new_mtu);
+ if (err) {
+ netdev_err(netdev, "Failed to change port mtu to %d\n",
+ new_mtu);
+ return err;
+ }
+
+ netdev_dbg(netdev, "Change mtu from %u to %d\n", netdev->mtu, new_mtu);
+ WRITE_ONCE(netdev->mtu, new_mtu);
+
+ return 0;
+}
+
+static int hinic3_set_mac_addr(struct net_device *netdev, void *addr)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+ int err;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, saddr->sa_data))
+ return 0;
+
+ err = hinic3_update_mac(nic_dev->hwdev, netdev->dev_addr,
+ saddr->sa_data, 0,
+ hinic3_global_func_id(nic_dev->hwdev));
+
+ if (err)
+ return err;
+
+ eth_hw_addr_set(netdev, saddr->sa_data);
+
+ return 0;
+}
+
+static const struct net_device_ops hinic3_netdev_ops = {
+ .ndo_open = hinic3_open,
+ .ndo_stop = hinic3_close,
+ .ndo_change_mtu = hinic3_change_mtu,
+ .ndo_set_mac_address = hinic3_set_mac_addr,
+ .ndo_start_xmit = hinic3_xmit_frame,
+};
+
+void hinic3_set_netdev_ops(struct net_device *netdev)
+{
+ netdev->netdev_ops = &hinic3_netdev_ops;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
new file mode 100644
index 000000000000..5b1a91a18c67
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/if_vlan.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+
+static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode,
+ u64 *s_feature, u16 size)
+{
+ struct l2nic_cmd_feature_nego feature_nego = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ feature_nego.func_id = hinic3_global_func_id(hwdev);
+ feature_nego.opcode = opcode;
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ memcpy(feature_nego.s_feature, s_feature, size * sizeof(u64));
+
+ mgmt_msg_params_init_default(&msg_params, &feature_nego,
+ sizeof(feature_nego));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_FEATURE_NEGO, &msg_params);
+ if (err || feature_nego.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to negotiate nic feature, err:%d, status: 0x%x\n",
+ err, feature_nego.msg_head.status);
+ return -EIO;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ memcpy(s_feature, feature_nego.s_feature, size * sizeof(u64));
+
+ return 0;
+}
+
+int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev)
+{
+ return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_SET,
+ &nic_dev->nic_io->feature_cap, 1);
+}
+
+bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
+ enum hinic3_nic_feature_cap feature_bits)
+{
+ return (nic_dev->nic_io->feature_cap & feature_bits) == feature_bits;
+}
+
+void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap)
+{
+ nic_dev->nic_io->feature_cap = feature_cap;
+}
+
+static int hinic3_set_function_table(struct hinic3_hwdev *hwdev, u32 cfg_bitmap,
+ const struct l2nic_func_tbl_cfg *cfg)
+{
+ struct l2nic_cmd_set_func_tbl cmd_func_tbl = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ cmd_func_tbl.func_id = hinic3_global_func_id(hwdev);
+ cmd_func_tbl.cfg_bitmap = cfg_bitmap;
+ cmd_func_tbl.tbl_cfg = *cfg;
+
+ mgmt_msg_params_init_default(&msg_params, &cmd_func_tbl,
+ sizeof(cmd_func_tbl));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_FUNC_TBL, &msg_params);
+ if (err || cmd_func_tbl.msg_head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set func table, bitmap: 0x%x, err: %d, status: 0x%x\n",
+ cfg_bitmap, err, cmd_func_tbl.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct l2nic_func_tbl_cfg func_tbl_cfg = {};
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+
+ func_tbl_cfg.mtu = new_mtu;
+ return hinic3_set_function_table(hwdev, BIT(L2NIC_FUNC_TBL_CFG_MTU),
+ &func_tbl_cfg);
+}
+
+static int hinic3_check_mac_info(struct hinic3_hwdev *hwdev, u8 status,
+ u16 vlan_id)
+{
+ if ((status && status != MGMT_STATUS_EXIST) ||
+ ((vlan_id & BIT(15)) && status == MGMT_STATUS_EXIST)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id)
+{
+ struct l2nic_cmd_set_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(hwdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC3_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ ether_addr_copy(mac_info.mac, mac_addr);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_MAC, &msg_params);
+ if (err || hinic3_check_mac_info(hwdev, mac_info.msg_head.status,
+ mac_info.vlan_id)) {
+ dev_err(hwdev->dev,
+ "Failed to update MAC, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return -EIO;
+ }
+
+ if (mac_info.msg_head.status == MGMT_STATUS_PF_SET_VF_ALREADY) {
+ dev_warn(hwdev->dev, "PF has already set VF mac, Ignore set operation\n");
+ return 0;
+ }
+
+ if (mac_info.msg_head.status == MGMT_STATUS_EXIST) {
+ dev_warn(hwdev->dev, "MAC is repeated. Ignore update operation\n");
+ return 0;
+ }
+
+ return 0;
+}
+
+int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id)
+{
+ struct l2nic_cmd_set_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(hwdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC3_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ ether_addr_copy(mac_info.mac, mac_addr);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_DEL_MAC, &msg_params);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to delete MAC, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return err;
+ }
+
+ return 0;
+}
+
+int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
+ u8 *new_mac, u16 vlan_id, u16 func_id)
+{
+ struct l2nic_cmd_update_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(hwdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC3_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ ether_addr_copy(mac_info.old_mac, old_mac);
+ ether_addr_copy(mac_info.new_mac, new_mac);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_UPDATE_MAC, &msg_params);
+ if (err || hinic3_check_mac_info(hwdev, mac_info.msg_head.status,
+ mac_info.vlan_id)) {
+ dev_err(hwdev->dev,
+ "Failed to update MAC, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return -EIO;
+ }
+ return 0;
+}
+
+int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev)
+{
+ struct l2nic_cmd_force_pkt_drop pkt_drop = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ pkt_drop.port = hinic3_physical_port_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &pkt_drop, sizeof(pkt_drop));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_FORCE_PKT_DROP, &msg_params);
+ if ((pkt_drop.msg_head.status != MGMT_STATUS_CMD_UNSUPPORTED &&
+ pkt_drop.msg_head.status) || err) {
+ dev_err(hwdev->dev,
+ "Failed to set force tx packets drop, err: %d, status: 0x%x\n",
+ err, pkt_drop.msg_head.status);
+ return -EFAULT;
+ }
+
+ return pkt_drop.msg_head.status;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
new file mode 100644
index 000000000000..bf9ce51dc401
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_NIC_CFG_H_
+#define _HINIC3_NIC_CFG_H_
+
+#include <linux/types.h>
+
+#include "hinic3_hw_intf.h"
+#include "hinic3_mgmt_interface.h"
+
+struct hinic3_hwdev;
+struct hinic3_nic_dev;
+
+#define HINIC3_MIN_MTU_SIZE 256
+#define HINIC3_MAX_JUMBO_FRAME_SIZE 9600
+
+#define HINIC3_VLAN_ID_MASK 0x7FFF
+
+enum hinic3_nic_event_type {
+ HINIC3_NIC_EVENT_LINK_DOWN = 0,
+ HINIC3_NIC_EVENT_LINK_UP = 1,
+};
+
+int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev);
+bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
+ enum hinic3_nic_feature_cap feature_bits);
+void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap);
+
+int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu);
+
+int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id);
+int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id);
+int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
+ u8 *new_mac, u16 vlan_id, u16 func_id);
+
+int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
new file mode 100644
index 000000000000..c994fc9b6ee0
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_NIC_DEV_H_
+#define _HINIC3_NIC_DEV_H_
+
+#include <linux/netdevice.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_mgmt_interface.h"
+
+enum hinic3_flags {
+ HINIC3_RSS_ENABLE,
+};
+
+enum hinic3_rss_hash_type {
+ HINIC3_RSS_HASH_ENGINE_TYPE_XOR = 0,
+ HINIC3_RSS_HASH_ENGINE_TYPE_TOEP = 1,
+};
+
+struct hinic3_rss_type {
+ u8 tcp_ipv6_ext;
+ u8 ipv6_ext;
+ u8 tcp_ipv6;
+ u8 ipv6;
+ u8 tcp_ipv4;
+ u8 ipv4;
+ u8 udp_ipv6;
+ u8 udp_ipv4;
+};
+
+struct hinic3_irq_cfg {
+ struct net_device *netdev;
+ u16 msix_entry_idx;
+ /* provided by OS */
+ u32 irq_id;
+ char irq_name[IFNAMSIZ + 16];
+ struct napi_struct napi;
+ cpumask_t affinity_mask;
+ struct hinic3_txq *txq;
+ struct hinic3_rxq *rxq;
+};
+
+struct hinic3_dyna_txrxq_params {
+ u16 num_qps;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ struct hinic3_dyna_txq_res *txqs_res;
+ struct hinic3_dyna_rxq_res *rxqs_res;
+ struct hinic3_irq_cfg *irq_cfg;
+};
+
+struct hinic3_nic_dev {
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_nic_io *nic_io;
+
+ u16 max_qps;
+ u16 rx_buf_len;
+ u32 lro_replenish_thld;
+ unsigned long flags;
+ struct hinic3_nic_service_cap nic_svc_cap;
+
+ struct hinic3_dyna_txrxq_params q_params;
+ struct hinic3_txq *txqs;
+ struct hinic3_rxq *rxqs;
+
+ u16 num_qp_irq;
+ struct msix_entry *qps_msix_entries;
+
+ bool link_status_up;
+};
+
+void hinic3_set_netdev_ops(struct net_device *netdev);
+
+/* Temporary prototypes. Functions become static in later submission. */
+void qp_add_napi(struct hinic3_irq_cfg *irq_cfg);
+void qp_del_napi(struct hinic3_irq_cfg *irq_cfg);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
new file mode 100644
index 000000000000..34a1f5bd5ac1
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include "hinic3_hw_comm.h"
+#include "hinic3_hw_intf.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+
+int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
+
+void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev)
+{
+ /* Completed by later submission due to LoC limit. */
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
new file mode 100644
index 000000000000..865ba6878c48
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_NIC_IO_H_
+#define _HINIC3_NIC_IO_H_
+
+#include <linux/bitfield.h>
+
+#include "hinic3_wq.h"
+
+struct hinic3_nic_dev;
+
+#define HINIC3_SQ_WQEBB_SHIFT 4
+#define HINIC3_RQ_WQEBB_SHIFT 3
+#define HINIC3_SQ_WQEBB_SIZE BIT(HINIC3_SQ_WQEBB_SHIFT)
+
+/* ******************** RQ_CTRL ******************** */
+enum hinic3_rq_wqe_type {
+ HINIC3_NORMAL_RQ_WQE = 1,
+};
+
+/* ******************** SQ_CTRL ******************** */
+#define HINIC3_TX_MSS_DEFAULT 0x3E00
+#define HINIC3_TX_MSS_MIN 0x50
+#define HINIC3_MAX_SQ_SGE 18
+
+struct hinic3_io_queue {
+ struct hinic3_wq wq;
+ u8 owner;
+ u16 q_id;
+ u16 msix_entry_idx;
+ u8 __iomem *db_addr;
+ u16 *cons_idx_addr;
+} ____cacheline_aligned;
+
+static inline u16 hinic3_get_sq_local_ci(const struct hinic3_io_queue *sq)
+{
+ const struct hinic3_wq *wq = &sq->wq;
+
+ return wq->cons_idx & wq->idx_mask;
+}
+
+static inline u16 hinic3_get_sq_local_pi(const struct hinic3_io_queue *sq)
+{
+ const struct hinic3_wq *wq = &sq->wq;
+
+ return wq->prod_idx & wq->idx_mask;
+}
+
+static inline u16 hinic3_get_sq_hw_ci(const struct hinic3_io_queue *sq)
+{
+ const struct hinic3_wq *wq = &sq->wq;
+
+ return READ_ONCE(*sq->cons_idx_addr) & wq->idx_mask;
+}
+
+/* ******************** DB INFO ******************** */
+#define DB_INFO_QID_MASK GENMASK(12, 0)
+#define DB_INFO_CFLAG_MASK BIT(23)
+#define DB_INFO_COS_MASK GENMASK(26, 24)
+#define DB_INFO_TYPE_MASK GENMASK(31, 27)
+#define DB_INFO_SET(val, member) \
+ FIELD_PREP(DB_INFO_##member##_MASK, val)
+
+#define DB_PI_LOW_MASK 0xFFU
+#define DB_PI_HIGH_MASK 0xFFU
+#define DB_PI_HI_SHIFT 8
+#define DB_PI_LOW(pi) ((pi) & DB_PI_LOW_MASK)
+#define DB_PI_HIGH(pi) (((pi) >> DB_PI_HI_SHIFT) & DB_PI_HIGH_MASK)
+#define DB_ADDR(q, pi) ((u64 __iomem *)((q)->db_addr) + DB_PI_LOW(pi))
+#define DB_SRC_TYPE 1
+
+/* CFLAG_DATA_PATH */
+#define DB_CFLAG_DP_SQ 0
+#define DB_CFLAG_DP_RQ 1
+
+struct hinic3_nic_db {
+ u32 db_info;
+ u32 pi_hi;
+};
+
+static inline void hinic3_write_db(struct hinic3_io_queue *queue, int cos,
+ u8 cflag, u16 pi)
+{
+ struct hinic3_nic_db db;
+
+ db.db_info = DB_INFO_SET(DB_SRC_TYPE, TYPE) |
+ DB_INFO_SET(cflag, CFLAG) |
+ DB_INFO_SET(cos, COS) |
+ DB_INFO_SET(queue->q_id, QID);
+ db.pi_hi = DB_PI_HIGH(pi);
+
+ writeq(*((u64 *)&db), DB_ADDR(queue, pi));
+}
+
+struct hinic3_nic_io {
+ struct hinic3_io_queue *sq;
+ struct hinic3_io_queue *rq;
+
+ u16 num_qps;
+ u16 max_qps;
+
+ /* Base address for consumer index of all tx queues. Each queue is
+ * given a full cache line to hold its consumer index. HW updates
+ * current consumer index as it consumes tx WQEs.
+ */
+ void *ci_vaddr_base;
+ dma_addr_t ci_dma_base;
+
+ u8 __iomem *sqs_db_addr;
+ u8 __iomem *rqs_db_addr;
+
+ u16 rx_buf_len;
+ u64 feature_cap;
+};
+
+int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev);
+void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c
new file mode 100644
index 000000000000..fab9011de9ad
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/device.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_queue_common.h"
+
+void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth,
+ u32 page_size, u32 elem_size)
+{
+ u32 elem_per_page;
+
+ elem_per_page = min(page_size / elem_size, q_depth);
+
+ qpages->pages = NULL;
+ qpages->page_size = page_size;
+ qpages->num_pages = max(q_depth / elem_per_page, 1);
+ qpages->elem_size_shift = ilog2(elem_size);
+ qpages->elem_per_pg_shift = ilog2(elem_per_page);
+}
+
+static void __queue_pages_free(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages, u32 pg_cnt)
+{
+ while (pg_cnt > 0) {
+ pg_cnt--;
+ hinic3_dma_free_coherent_align(hwdev->dev,
+ qpages->pages + pg_cnt);
+ }
+ kfree(qpages->pages);
+ qpages->pages = NULL;
+}
+
+void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages)
+{
+ __queue_pages_free(hwdev, qpages, qpages->num_pages);
+}
+
+int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages, u32 align)
+{
+ u32 pg_idx;
+ int err;
+
+ qpages->pages = kcalloc(qpages->num_pages, sizeof(qpages->pages[0]),
+ GFP_KERNEL);
+ if (!qpages->pages)
+ return -ENOMEM;
+
+ if (align == 0)
+ align = qpages->page_size;
+
+ for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) {
+ err = hinic3_dma_zalloc_coherent_align(hwdev->dev,
+ qpages->page_size,
+ align,
+ GFP_KERNEL,
+ qpages->pages + pg_idx);
+ if (err) {
+ __queue_pages_free(hwdev, qpages, pg_idx);
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h
new file mode 100644
index 000000000000..ec4cae0a0929
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_QUEUE_COMMON_H_
+#define _HINIC3_QUEUE_COMMON_H_
+
+#include <linux/types.h>
+
+#include "hinic3_common.h"
+
+struct hinic3_hwdev;
+
+struct hinic3_queue_pages {
+ /* Array of DMA-able pages that actually holds the queue entries. */
+ struct hinic3_dma_addr_align *pages;
+ /* Page size in bytes. */
+ u32 page_size;
+ /* Number of pages, must be power of 2. */
+ u16 num_pages;
+ u8 elem_size_shift;
+ u8 elem_per_pg_shift;
+};
+
+void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth,
+ u32 page_size, u32 elem_size);
+int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages, u32 align);
+void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages);
+
+/* Get pointer to queue entry at the specified index. Index does not have to be
+ * masked to queue depth, only least significant bits will be used. Also
+ * provides remaining elements in same page (including the first one) in case
+ * caller needs multiple entries.
+ */
+static inline void *get_q_element(const struct hinic3_queue_pages *qpages,
+ u32 idx, u32 *remaining_in_page)
+{
+ const struct hinic3_dma_addr_align *page;
+ u32 page_idx, elem_idx, elem_per_pg, ofs;
+ u8 shift;
+
+ shift = qpages->elem_per_pg_shift;
+ page_idx = (idx >> shift) & (qpages->num_pages - 1);
+ elem_per_pg = 1 << shift;
+ elem_idx = idx & (elem_per_pg - 1);
+ if (remaining_in_page)
+ *remaining_in_page = elem_per_pg - elem_idx;
+ ofs = elem_idx << qpages->elem_size_shift;
+ page = qpages->pages + page_idx;
+ return (char *)page->align_vaddr + ofs;
+}
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
new file mode 100644
index 000000000000..860163e9d66c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <net/gro.h>
+#include <net/page_pool/helpers.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_rx.h"
+
+#define HINIC3_RX_HDR_SIZE 256
+#define HINIC3_RX_BUFFER_WRITE 16
+
+#define HINIC3_RX_TCP_PKT 0x3
+#define HINIC3_RX_UDP_PKT 0x4
+#define HINIC3_RX_SCTP_PKT 0x7
+
+#define HINIC3_RX_IPV4_PKT 0
+#define HINIC3_RX_IPV6_PKT 1
+#define HINIC3_RX_INVALID_IP_TYPE 2
+
+#define HINIC3_RX_PKT_FORMAT_NON_TUNNEL 0
+#define HINIC3_RX_PKT_FORMAT_VXLAN 1
+
+#define HINIC3_LRO_PKT_HDR_LEN_IPV4 66
+#define HINIC3_LRO_PKT_HDR_LEN_IPV6 86
+#define HINIC3_LRO_PKT_HDR_LEN(cqe) \
+ (RQ_CQE_OFFOLAD_TYPE_GET((cqe)->offload_type, IP_TYPE) == \
+ HINIC3_RX_IPV6_PKT ? HINIC3_LRO_PKT_HDR_LEN_IPV6 : \
+ HINIC3_LRO_PKT_HDR_LEN_IPV4)
+
+int hinic3_alloc_rxqs(struct net_device *netdev)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
+
+void hinic3_free_rxqs(struct net_device *netdev)
+{
+ /* Completed by later submission due to LoC limit. */
+}
+
+static int rx_alloc_mapped_page(struct page_pool *page_pool,
+ struct hinic3_rx_info *rx_info, u16 buf_len)
+{
+ struct page *page;
+ u32 page_offset;
+
+ page = page_pool_dev_alloc_frag(page_pool, &page_offset, buf_len);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ rx_info->page = page;
+ rx_info->page_offset = page_offset;
+
+ return 0;
+}
+
+static void rq_wqe_buf_set(struct hinic3_io_queue *rq, uint32_t wqe_idx,
+ dma_addr_t dma_addr, u16 len)
+{
+ struct hinic3_rq_wqe *rq_wqe;
+
+ rq_wqe = get_q_element(&rq->wq.qpages, wqe_idx, NULL);
+ rq_wqe->buf_hi_addr = upper_32_bits(dma_addr);
+ rq_wqe->buf_lo_addr = lower_32_bits(dma_addr);
+}
+
+static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq)
+{
+ u32 i, free_wqebbs = rxq->delta - 1;
+ struct hinic3_rx_info *rx_info;
+ dma_addr_t dma_addr;
+ int err;
+
+ for (i = 0; i < free_wqebbs; i++) {
+ rx_info = &rxq->rx_info[rxq->next_to_update];
+
+ err = rx_alloc_mapped_page(rxq->page_pool, rx_info,
+ rxq->buf_len);
+ if (unlikely(err))
+ break;
+
+ dma_addr = page_pool_get_dma_addr(rx_info->page) +
+ rx_info->page_offset;
+ rq_wqe_buf_set(rxq->rq, rxq->next_to_update, dma_addr,
+ rxq->buf_len);
+ rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask;
+ }
+
+ if (likely(i)) {
+ hinic3_write_db(rxq->rq, rxq->q_id & 3, DB_CFLAG_DP_RQ,
+ rxq->next_to_update << HINIC3_NORMAL_RQ_WQE);
+ rxq->delta -= i;
+ rxq->next_to_alloc = rxq->next_to_update;
+ }
+
+ return i;
+}
+
+static void hinic3_add_rx_frag(struct hinic3_rxq *rxq,
+ struct hinic3_rx_info *rx_info,
+ struct sk_buff *skb, u32 size)
+{
+ struct page *page;
+ u8 *va;
+
+ page = rx_info->page;
+ va = (u8 *)page_address(page) + rx_info->page_offset;
+ net_prefetch(va);
+
+ page_pool_dma_sync_for_cpu(rxq->page_pool, page, rx_info->page_offset,
+ rxq->buf_len);
+
+ if (size <= HINIC3_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
+ memcpy(__skb_put(skb, size), va,
+ ALIGN(size, sizeof(long)));
+ page_pool_put_full_page(rxq->page_pool, page, false);
+
+ return;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_info->page_offset, size, rxq->buf_len);
+ skb_mark_for_recycle(skb);
+}
+
+static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *skb,
+ u32 sge_num, u32 pkt_len)
+{
+ struct hinic3_rx_info *rx_info;
+ u32 temp_pkt_len = pkt_len;
+ u32 temp_sge_num = sge_num;
+ u32 sw_ci;
+ u32 size;
+
+ sw_ci = rxq->cons_idx & rxq->q_mask;
+ while (temp_sge_num) {
+ rx_info = &rxq->rx_info[sw_ci];
+ sw_ci = (sw_ci + 1) & rxq->q_mask;
+ if (unlikely(temp_pkt_len > rxq->buf_len)) {
+ size = rxq->buf_len;
+ temp_pkt_len -= rxq->buf_len;
+ } else {
+ size = temp_pkt_len;
+ }
+
+ hinic3_add_rx_frag(rxq, rx_info, skb, size);
+
+ /* clear contents of buffer_info */
+ rx_info->page = NULL;
+ temp_sge_num--;
+ }
+}
+
+static u32 hinic3_get_sge_num(struct hinic3_rxq *rxq, u32 pkt_len)
+{
+ u32 sge_num;
+
+ sge_num = pkt_len >> rxq->buf_len_shift;
+ sge_num += (pkt_len & (rxq->buf_len - 1)) ? 1 : 0;
+
+ return sge_num;
+}
+
+static struct sk_buff *hinic3_fetch_rx_buffer(struct hinic3_rxq *rxq,
+ u32 pkt_len)
+{
+ struct sk_buff *skb;
+ u32 sge_num;
+
+ skb = napi_alloc_skb(&rxq->irq_cfg->napi, HINIC3_RX_HDR_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ sge_num = hinic3_get_sge_num(rxq, pkt_len);
+
+ net_prefetchw(skb->data);
+ packaging_skb(rxq, skb, sge_num, pkt_len);
+
+ rxq->cons_idx += sge_num;
+ rxq->delta += sge_num;
+
+ return skb;
+}
+
+static void hinic3_pull_tail(struct sk_buff *skb)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int pull_len;
+ unsigned char *va;
+
+ va = skb_frag_address(frag);
+
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(skb->dev, va, HINIC3_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ skb_frag_off_add(frag, pull_len);
+
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+static void hinic3_rx_csum(struct hinic3_rxq *rxq, u32 offload_type,
+ u32 status, struct sk_buff *skb)
+{
+ u32 pkt_fmt = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT);
+ u32 pkt_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE);
+ u32 ip_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE);
+ u32 csum_err = RQ_CQE_STATUS_GET(status, CSUM_ERR);
+ struct net_device *netdev = rxq->netdev;
+
+ if (!(netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (unlikely(csum_err)) {
+ /* pkt type is recognized by HW, and csum is wrong */
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ if (ip_type == HINIC3_RX_INVALID_IP_TYPE ||
+ !(pkt_fmt == HINIC3_RX_PKT_FORMAT_NON_TUNNEL ||
+ pkt_fmt == HINIC3_RX_PKT_FORMAT_VXLAN)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ switch (pkt_type) {
+ case HINIC3_RX_TCP_PKT:
+ case HINIC3_RX_UDP_PKT:
+ case HINIC3_RX_SCTP_PKT:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ default:
+ skb->ip_summed = CHECKSUM_NONE;
+ break;
+ }
+}
+
+static void hinic3_lro_set_gso_params(struct sk_buff *skb, u16 num_lro)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ __be16 proto;
+
+ proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
+
+ skb_shinfo(skb)->gso_size = DIV_ROUND_UP(skb->len - skb_headlen(skb),
+ num_lro);
+ skb_shinfo(skb)->gso_type = proto == htons(ETH_P_IP) ?
+ SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+ skb_shinfo(skb)->gso_segs = num_lro;
+}
+
+static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe,
+ u32 pkt_len, u32 vlan_len, u32 status)
+{
+ struct net_device *netdev = rxq->netdev;
+ struct sk_buff *skb;
+ u32 offload_type;
+ u16 num_lro;
+
+ skb = hinic3_fetch_rx_buffer(rxq, pkt_len);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ hinic3_pull_tail(skb);
+
+ offload_type = rx_cqe->offload_type;
+ hinic3_rx_csum(rxq, offload_type, status, skb);
+
+ num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
+ if (num_lro)
+ hinic3_lro_set_gso_params(skb, num_lro);
+
+ skb_record_rx_queue(skb, rxq->q_id);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (skb_has_frag_list(skb)) {
+ napi_gro_flush(&rxq->irq_cfg->napi, false);
+ netif_receive_skb(skb);
+ } else {
+ napi_gro_receive(&rxq->irq_cfg->napi, skb);
+ }
+
+ return 0;
+}
+
+int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev);
+ u32 sw_ci, status, pkt_len, vlan_len;
+ struct hinic3_rq_cqe *rx_cqe;
+ u32 num_wqe = 0;
+ int nr_pkts = 0;
+ u16 num_lro;
+
+ while (likely(nr_pkts < budget)) {
+ sw_ci = rxq->cons_idx & rxq->q_mask;
+ rx_cqe = rxq->cqe_arr + sw_ci;
+ status = rx_cqe->status;
+ if (!RQ_CQE_STATUS_GET(status, RXDONE))
+ break;
+
+ /* make sure we read rx_done before packet length */
+ rmb();
+
+ vlan_len = rx_cqe->vlan_len;
+ pkt_len = RQ_CQE_SGE_GET(vlan_len, LEN);
+ if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status))
+ break;
+
+ nr_pkts++;
+ num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
+ if (num_lro)
+ num_wqe += hinic3_get_sge_num(rxq, pkt_len);
+
+ rx_cqe->status = 0;
+
+ if (num_wqe >= nic_dev->lro_replenish_thld)
+ break;
+ }
+
+ if (rxq->delta >= HINIC3_RX_BUFFER_WRITE)
+ hinic3_rx_fill_buffers(rxq);
+
+ return nr_pkts;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
new file mode 100644
index 000000000000..1cca21858d40
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_RX_H_
+#define _HINIC3_RX_H_
+
+#include <linux/bitfield.h>
+#include <linux/netdevice.h>
+
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK GENMASK(4, 0)
+#define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK GENMASK(6, 5)
+#define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK GENMASK(11, 8)
+#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK BIT(21)
+#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \
+ FIELD_GET(RQ_CQE_OFFOLAD_TYPE_##member##_MASK, val)
+
+#define RQ_CQE_SGE_VLAN_MASK GENMASK(15, 0)
+#define RQ_CQE_SGE_LEN_MASK GENMASK(31, 16)
+#define RQ_CQE_SGE_GET(val, member) \
+ FIELD_GET(RQ_CQE_SGE_##member##_MASK, val)
+
+#define RQ_CQE_STATUS_CSUM_ERR_MASK GENMASK(15, 0)
+#define RQ_CQE_STATUS_NUM_LRO_MASK GENMASK(23, 16)
+#define RQ_CQE_STATUS_RXDONE_MASK BIT(31)
+#define RQ_CQE_STATUS_GET(val, member) \
+ FIELD_GET(RQ_CQE_STATUS_##member##_MASK, val)
+
+/* RX Completion information that is provided by HW for a specific RX WQE */
+struct hinic3_rq_cqe {
+ u32 status;
+ u32 vlan_len;
+ u32 offload_type;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+ u32 rsvd6;
+ u32 pkt_info;
+};
+
+struct hinic3_rq_wqe {
+ u32 buf_hi_addr;
+ u32 buf_lo_addr;
+ u32 cqe_hi_addr;
+ u32 cqe_lo_addr;
+};
+
+struct hinic3_rx_info {
+ struct page *page;
+ u32 page_offset;
+};
+
+struct hinic3_rxq {
+ struct net_device *netdev;
+
+ u16 q_id;
+ u32 q_depth;
+ u32 q_mask;
+
+ u16 buf_len;
+ u32 buf_len_shift;
+
+ u32 cons_idx;
+ u32 delta;
+
+ u32 irq_id;
+ u16 msix_entry_idx;
+
+ /* cqe_arr and rx_info are arrays of rq_depth elements. Each element is
+ * statically associated (by index) to a specific rq_wqe.
+ */
+ struct hinic3_rq_cqe *cqe_arr;
+ struct hinic3_rx_info *rx_info;
+ struct page_pool *page_pool;
+
+ struct hinic3_io_queue *rq;
+
+ struct hinic3_irq_cfg *irq_cfg;
+ u16 next_to_alloc;
+ u16 next_to_update;
+ struct device *dev; /* device for DMA mapping */
+
+ dma_addr_t cqe_start_paddr;
+} ____cacheline_aligned;
+
+int hinic3_alloc_rxqs(struct net_device *netdev);
+void hinic3_free_rxqs(struct net_device *netdev);
+
+int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
new file mode 100644
index 000000000000..ae08257dd1d2
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <net/ip6_checksum.h>
+#include <net/ipv6.h>
+#include <net/netdev_queues.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_tx.h"
+#include "hinic3_wq.h"
+
+#define MIN_SKB_LEN 32
+
+int hinic3_alloc_txqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ u16 q_id, num_txqs = nic_dev->max_qps;
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic3_txq *txq;
+
+ if (!num_txqs) {
+ dev_err(hwdev->dev, "Cannot allocate zero size txqs\n");
+ return -EINVAL;
+ }
+
+ nic_dev->txqs = kcalloc(num_txqs, sizeof(*nic_dev->txqs), GFP_KERNEL);
+ if (!nic_dev->txqs)
+ return -ENOMEM;
+
+ for (q_id = 0; q_id < num_txqs; q_id++) {
+ txq = &nic_dev->txqs[q_id];
+ txq->netdev = netdev;
+ txq->q_id = q_id;
+ txq->q_depth = nic_dev->q_params.sq_depth;
+ txq->q_mask = nic_dev->q_params.sq_depth - 1;
+ txq->dev = &pdev->dev;
+ }
+
+ return 0;
+}
+
+void hinic3_free_txqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->txqs);
+}
+
+static void hinic3_set_buf_desc(struct hinic3_sq_bufdesc *buf_descs,
+ dma_addr_t addr, u32 len)
+{
+ buf_descs->hi_addr = upper_32_bits(addr);
+ buf_descs->lo_addr = lower_32_bits(addr);
+ buf_descs->len = len;
+}
+
+static int hinic3_tx_map_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct hinic3_txq *txq,
+ struct hinic3_tx_info *tx_info,
+ struct hinic3_sq_wqe_combo *wqe_combo)
+{
+ struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0;
+ struct hinic3_sq_bufdesc *buf_desc = wqe_combo->bds_head;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dma_info *dma_info = tx_info->dma_info;
+ struct pci_dev *pdev = nic_dev->pdev;
+ skb_frag_t *frag;
+ u32 i, idx;
+ int err;
+
+ dma_info[0].dma = dma_map_single(&pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_info[0].dma))
+ return -EFAULT;
+
+ dma_info[0].len = skb_headlen(skb);
+
+ wqe_desc->hi_addr = upper_32_bits(dma_info[0].dma);
+ wqe_desc->lo_addr = lower_32_bits(dma_info[0].dma);
+
+ wqe_desc->ctrl_len = dma_info[0].len;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &(skb_shinfo(skb)->frags[i]);
+ if (unlikely(i == wqe_combo->first_bds_num))
+ buf_desc = wqe_combo->bds_sec2;
+
+ idx = i + 1;
+ dma_info[idx].dma = skb_frag_dma_map(&pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_info[idx].dma)) {
+ err = -EFAULT;
+ goto err_unmap_page;
+ }
+ dma_info[idx].len = skb_frag_size(frag);
+
+ hinic3_set_buf_desc(buf_desc, dma_info[idx].dma,
+ dma_info[idx].len);
+ buf_desc++;
+ }
+
+ return 0;
+
+err_unmap_page:
+ while (idx > 1) {
+ idx--;
+ dma_unmap_page(&pdev->dev, dma_info[idx].dma,
+ dma_info[idx].len, DMA_TO_DEVICE);
+ }
+ dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len,
+ DMA_TO_DEVICE);
+ return err;
+}
+
+static void hinic3_tx_unmap_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct hinic3_dma_info *dma_info)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags;) {
+ i++;
+ dma_unmap_page(&pdev->dev,
+ dma_info[i].dma,
+ dma_info[i].len, DMA_TO_DEVICE);
+ }
+
+ dma_unmap_single(&pdev->dev, dma_info[0].dma,
+ dma_info[0].len, DMA_TO_DEVICE);
+}
+
+union hinic3_ip {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+};
+
+union hinic3_l4 {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+};
+
+enum hinic3_l3_type {
+ HINIC3_L3_UNKNOWN = 0,
+ HINIC3_L3_IP6_PKT = 1,
+ HINIC3_L3_IP4_PKT_NO_CSUM = 2,
+ HINIC3_L3_IP4_PKT_CSUM = 3,
+};
+
+enum hinic3_l4_offload_type {
+ HINIC3_L4_OFFLOAD_DISABLE = 0,
+ HINIC3_L4_OFFLOAD_TCP = 1,
+ HINIC3_L4_OFFLOAD_STCP = 2,
+ HINIC3_L4_OFFLOAD_UDP = 3,
+};
+
+/* initialize l4 offset and offload */
+static void get_inner_l4_info(struct sk_buff *skb, union hinic3_l4 *l4,
+ u8 l4_proto, u32 *offset,
+ enum hinic3_l4_offload_type *l4_offload)
+{
+ switch (l4_proto) {
+ case IPPROTO_TCP:
+ *l4_offload = HINIC3_L4_OFFLOAD_TCP;
+ /* To be same with TSO, payload offset begins from payload */
+ *offset = (l4->tcp->doff << TCP_HDR_DATA_OFF_UNIT_SHIFT) +
+ TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+
+ case IPPROTO_UDP:
+ *l4_offload = HINIC3_L4_OFFLOAD_UDP;
+ *offset = TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+ default:
+ *l4_offload = HINIC3_L4_OFFLOAD_DISABLE;
+ *offset = 0;
+ }
+}
+
+static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task,
+ struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (skb->encapsulation) {
+ union hinic3_ip ip;
+ u8 l4_proto;
+
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG);
+
+ ip.hdr = skb_network_header(skb);
+ if (ip.v4->version == 4) {
+ l4_proto = ip.v4->protocol;
+ } else if (ip.v4->version == 6) {
+ union hinic3_l4 l4;
+ unsigned char *exthdr;
+ __be16 frag_off;
+
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ l4.hdr = skb_transport_header(skb);
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ } else {
+ l4_proto = IPPROTO_RAW;
+ }
+
+ if (l4_proto != IPPROTO_UDP ||
+ ((struct udphdr *)skb_transport_header(skb))->dest !=
+ VXLAN_OFFLOAD_PORT_LE) {
+ /* Unsupported tunnel packet, disable csum offload */
+ skb_checksum_help(skb);
+ return 0;
+ }
+ }
+
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+
+ return 1;
+}
+
+static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic3_ip *ip,
+ union hinic3_l4 *l4,
+ enum hinic3_l3_type *l3_type, u8 *l4_proto)
+{
+ unsigned char *exthdr;
+ __be16 frag_off;
+
+ if (ip->v4->version == 4) {
+ *l3_type = HINIC3_L3_IP4_PKT_CSUM;
+ *l4_proto = ip->v4->protocol;
+ } else if (ip->v4->version == 6) {
+ *l3_type = HINIC3_L3_IP6_PKT;
+ exthdr = ip->hdr + sizeof(*ip->v6);
+ *l4_proto = ip->v6->nexthdr;
+ if (exthdr != l4->hdr) {
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ l4_proto, &frag_off);
+ }
+ } else {
+ *l3_type = HINIC3_L3_UNKNOWN;
+ *l4_proto = 0;
+ }
+}
+
+static void hinic3_set_tso_info(struct hinic3_sq_task *task, u32 *queue_info,
+ enum hinic3_l4_offload_type l4_offload,
+ u32 offset, u32 mss)
+{
+ if (l4_offload == HINIC3_L4_OFFLOAD_TCP) {
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, TSO);
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+ } else if (l4_offload == HINIC3_L4_OFFLOAD_UDP) {
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UFO);
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+ }
+
+ /* enable L3 calculation */
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L3_EN);
+
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF);
+
+ /* set MSS value */
+ *queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK;
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS);
+}
+
+static __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto)
+{
+ return (ip->v4->version == 4) ?
+ csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
+ csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
+}
+
+static int hinic3_tso(struct hinic3_sq_task *task, u32 *queue_info,
+ struct sk_buff *skb)
+{
+ enum hinic3_l4_offload_type l4_offload;
+ enum hinic3_l3_type l3_type;
+ union hinic3_ip ip;
+ union hinic3_l4 l4;
+ u8 l4_proto;
+ u32 offset;
+ int err;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ if (skb->encapsulation) {
+ u32 gso_type = skb_shinfo(skb)->gso_type;
+ /* L3 checksum is always enabled */
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L3_EN);
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG);
+
+ l4.hdr = skb_transport_header(skb);
+ ip.hdr = skb_network_header(skb);
+
+ if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L4_EN);
+ }
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
+
+ get_inner_l3_l4_type(skb, &ip, &l4, &l3_type, &l4_proto);
+
+ if (l4_proto == IPPROTO_TCP)
+ l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
+
+ get_inner_l4_info(skb, &l4, l4_proto, &offset, &l4_offload);
+
+ hinic3_set_tso_info(task, queue_info, l4_offload, offset,
+ skb_shinfo(skb)->gso_size);
+
+ return 1;
+}
+
+static void hinic3_set_vlan_tx_offload(struct hinic3_sq_task *task,
+ u16 vlan_tag, u8 vlan_tpid)
+{
+ /* vlan_tpid: 0=select TPID0 in IPSU, 1=select TPID1 in IPSU
+ * 2=select TPID2 in IPSU, 3=select TPID3 in IPSU,
+ * 4=select TPID4 in IPSU
+ */
+ task->vlan_offload = SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) |
+ SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) |
+ SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID);
+}
+
+static u32 hinic3_tx_offload(struct sk_buff *skb, struct hinic3_sq_task *task,
+ u32 *queue_info, struct hinic3_txq *txq)
+{
+ u32 offload = 0;
+ int tso_cs_en;
+
+ task->pkt_info0 = 0;
+ task->ip_identify = 0;
+ task->rsvd = 0;
+ task->vlan_offload = 0;
+
+ tso_cs_en = hinic3_tso(task, queue_info, skb);
+ if (tso_cs_en < 0) {
+ offload = HINIC3_TX_OFFLOAD_INVALID;
+ return offload;
+ } else if (tso_cs_en) {
+ offload |= HINIC3_TX_OFFLOAD_TSO;
+ } else {
+ tso_cs_en = hinic3_tx_csum(txq, task, skb);
+ if (tso_cs_en)
+ offload |= HINIC3_TX_OFFLOAD_CSUM;
+ }
+
+#define VLAN_INSERT_MODE_MAX 5
+ if (unlikely(skb_vlan_tag_present(skb))) {
+ /* select vlan insert mode by qid, default 802.1Q Tag type */
+ hinic3_set_vlan_tx_offload(task, skb_vlan_tag_get(skb),
+ txq->q_id % VLAN_INSERT_MODE_MAX);
+ offload |= HINIC3_TX_OFFLOAD_VLAN;
+ }
+
+ if (unlikely(SQ_CTRL_QUEUE_INFO_GET(*queue_info, PLDOFF) >
+ SQ_CTRL_MAX_PLDOFF)) {
+ offload = HINIC3_TX_OFFLOAD_INVALID;
+ return offload;
+ }
+
+ return offload;
+}
+
+static u16 hinic3_get_and_update_sq_owner(struct hinic3_io_queue *sq,
+ u16 curr_pi, u16 wqebb_cnt)
+{
+ u16 owner = sq->owner;
+
+ if (unlikely(curr_pi + wqebb_cnt >= sq->wq.q_depth))
+ sq->owner = !sq->owner;
+
+ return owner;
+}
+
+static u16 hinic3_set_wqe_combo(struct hinic3_txq *txq,
+ struct hinic3_sq_wqe_combo *wqe_combo,
+ u32 offload, u16 num_sge, u16 *curr_pi)
+{
+ struct hinic3_sq_bufdesc *first_part_wqebbs, *second_part_wqebbs;
+ u16 first_part_wqebbs_num, tmp_pi;
+
+ wqe_combo->ctrl_bd0 = hinic3_wq_get_one_wqebb(&txq->sq->wq, curr_pi);
+ if (!offload && num_sge == 1) {
+ wqe_combo->wqe_type = SQ_WQE_COMPACT_TYPE;
+ return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi, 1);
+ }
+
+ wqe_combo->wqe_type = SQ_WQE_EXTENDED_TYPE;
+
+ if (offload) {
+ wqe_combo->task = hinic3_wq_get_one_wqebb(&txq->sq->wq,
+ &tmp_pi);
+ wqe_combo->task_type = SQ_WQE_TASKSECT_16BYTES;
+ } else {
+ wqe_combo->task_type = SQ_WQE_TASKSECT_46BITS;
+ }
+
+ if (num_sge > 1) {
+ /* first wqebb contain bd0, and bd size is equal to sq wqebb
+ * size, so we use (num_sge - 1) as wanted weqbb_cnt
+ */
+ hinic3_wq_get_multi_wqebbs(&txq->sq->wq, num_sge - 1, &tmp_pi,
+ &first_part_wqebbs,
+ &second_part_wqebbs,
+ &first_part_wqebbs_num);
+ wqe_combo->bds_head = first_part_wqebbs;
+ wqe_combo->bds_sec2 = second_part_wqebbs;
+ wqe_combo->first_bds_num = first_part_wqebbs_num;
+ }
+
+ return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi,
+ num_sge + !!offload);
+}
+
+static void hinic3_prepare_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo,
+ u32 queue_info, int nr_descs, u16 owner)
+{
+ struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0;
+
+ if (wqe_combo->wqe_type == SQ_WQE_COMPACT_TYPE) {
+ wqe_desc->ctrl_len |=
+ SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER);
+
+ /* compact wqe queue_info will transfer to chip */
+ wqe_desc->queue_info = 0;
+ return;
+ }
+
+ wqe_desc->ctrl_len |= SQ_CTRL_SET(nr_descs, BUFDESC_NUM) |
+ SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) |
+ SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER);
+
+ wqe_desc->queue_info = queue_info;
+ wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UC);
+
+ if (!SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS)) {
+ wqe_desc->queue_info |=
+ SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS);
+ } else if (SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS) <
+ HINIC3_TX_MSS_MIN) {
+ /* mss should not be less than 80 */
+ wqe_desc->queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK;
+ wqe_desc->queue_info |=
+ SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS);
+ }
+}
+
+static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct hinic3_txq *txq)
+{
+ struct hinic3_sq_wqe_combo wqe_combo = {};
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_txq *tx_q = txq;
+ u32 offload, queue_info = 0;
+ struct hinic3_sq_task task;
+ u16 wqebb_cnt, num_sge;
+ u16 saved_wq_prod_idx;
+ u16 owner, pi = 0;
+ u8 saved_sq_owner;
+ int err;
+
+ if (unlikely(skb->len < MIN_SKB_LEN)) {
+ if (skb_pad(skb, MIN_SKB_LEN - skb->len))
+ goto err_out;
+
+ skb->len = MIN_SKB_LEN;
+ }
+
+ num_sge = skb_shinfo(skb)->nr_frags + 1;
+ /* assume normal wqe format + 1 wqebb for task info */
+ wqebb_cnt = num_sge + 1;
+
+ if (unlikely(hinic3_wq_free_wqebbs(&txq->sq->wq) < wqebb_cnt)) {
+ if (likely(wqebb_cnt > txq->tx_stop_thrs))
+ txq->tx_stop_thrs = min(wqebb_cnt, txq->tx_start_thrs);
+
+ netif_subqueue_try_stop(netdev, tx_q->sq->q_id,
+ hinic3_wq_free_wqebbs(&tx_q->sq->wq),
+ tx_q->tx_start_thrs);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ offload = hinic3_tx_offload(skb, &task, &queue_info, txq);
+ if (unlikely(offload == HINIC3_TX_OFFLOAD_INVALID)) {
+ goto err_drop_pkt;
+ } else if (!offload) {
+ wqebb_cnt -= 1;
+ if (unlikely(num_sge == 1 &&
+ skb->len > HINIC3_COMPACT_WQEE_SKB_MAX_LEN))
+ goto err_drop_pkt;
+ }
+
+ saved_wq_prod_idx = txq->sq->wq.prod_idx;
+ saved_sq_owner = txq->sq->owner;
+
+ owner = hinic3_set_wqe_combo(txq, &wqe_combo, offload, num_sge, &pi);
+ if (offload)
+ *wqe_combo.task = task;
+
+ tx_info = &txq->tx_info[pi];
+ tx_info->skb = skb;
+ tx_info->wqebb_cnt = wqebb_cnt;
+
+ err = hinic3_tx_map_skb(netdev, skb, txq, tx_info, &wqe_combo);
+ if (err) {
+ /* Rollback work queue to reclaim the wqebb we did not use */
+ txq->sq->wq.prod_idx = saved_wq_prod_idx;
+ txq->sq->owner = saved_sq_owner;
+ goto err_drop_pkt;
+ }
+
+ netdev_tx_sent_queue(netdev_get_tx_queue(netdev, txq->sq->q_id),
+ skb->len);
+ netif_subqueue_maybe_stop(netdev, tx_q->sq->q_id,
+ hinic3_wq_free_wqebbs(&tx_q->sq->wq),
+ tx_q->tx_stop_thrs,
+ tx_q->tx_start_thrs);
+
+ hinic3_prepare_sq_ctrl(&wqe_combo, queue_info, num_sge, owner);
+ hinic3_write_db(txq->sq, 0, DB_CFLAG_DP_SQ,
+ hinic3_get_sq_local_pi(txq->sq));
+
+ return NETDEV_TX_OK;
+
+err_drop_pkt:
+ dev_kfree_skb_any(skb);
+
+err_out:
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 q_id = skb_get_queue_mapping(skb);
+
+ if (unlikely(!netif_carrier_ok(netdev)))
+ goto err_drop_pkt;
+
+ if (unlikely(q_id >= nic_dev->q_params.num_qps))
+ goto err_drop_pkt;
+
+ return hinic3_send_one_skb(skb, netdev, &nic_dev->txqs[q_id]);
+
+err_drop_pkt:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static bool is_hw_complete_sq_process(struct hinic3_io_queue *sq)
+{
+ u16 sw_pi, hw_ci;
+
+ sw_pi = hinic3_get_sq_local_pi(sq);
+ hw_ci = hinic3_get_sq_hw_ci(sq);
+
+ return sw_pi == hw_ci;
+}
+
+#define HINIC3_FLUSH_QUEUE_POLL_SLEEP_US 10000
+#define HINIC3_FLUSH_QUEUE_POLL_TIMEOUT_US 10000000
+static int hinic3_stop_sq(struct hinic3_txq *txq)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(txq->netdev);
+ int err, rc;
+
+ err = read_poll_timeout(hinic3_force_drop_tx_pkt, rc,
+ is_hw_complete_sq_process(txq->sq) || rc,
+ HINIC3_FLUSH_QUEUE_POLL_SLEEP_US,
+ HINIC3_FLUSH_QUEUE_POLL_TIMEOUT_US,
+ true, nic_dev->hwdev);
+ if (rc)
+ return rc;
+ else
+ return err;
+}
+
+/* packet transmission should be stopped before calling this function */
+void hinic3_flush_txqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 qid;
+ int err;
+
+ for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) {
+ err = hinic3_stop_sq(&nic_dev->txqs[qid]);
+ netdev_tx_reset_subqueue(netdev, qid);
+ if (err)
+ netdev_err(netdev, "Failed to stop sq%u\n", qid);
+ }
+}
+
+#define HINIC3_BDS_PER_SQ_WQEBB \
+ (HINIC3_SQ_WQEBB_SIZE / sizeof(struct hinic3_sq_bufdesc))
+
+bool hinic3_tx_poll(struct hinic3_txq *txq, int budget)
+{
+ struct net_device *netdev = txq->netdev;
+ u16 hw_ci, sw_ci, q_id = txq->sq->q_id;
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_txq *tx_q = txq;
+ unsigned int bytes_compl = 0;
+ unsigned int pkts = 0;
+ u16 wqebb_cnt = 0;
+
+ hw_ci = hinic3_get_sq_hw_ci(txq->sq);
+ dma_rmb();
+ sw_ci = hinic3_get_sq_local_ci(txq->sq);
+
+ do {
+ tx_info = &txq->tx_info[sw_ci];
+
+ /* Did all wqebb of this wqe complete? */
+ if (hw_ci == sw_ci ||
+ ((hw_ci - sw_ci) & txq->q_mask) < tx_info->wqebb_cnt)
+ break;
+
+ sw_ci = (sw_ci + tx_info->wqebb_cnt) & txq->q_mask;
+ net_prefetch(&txq->tx_info[sw_ci]);
+
+ wqebb_cnt += tx_info->wqebb_cnt;
+ bytes_compl += tx_info->skb->len;
+ pkts++;
+
+ hinic3_tx_unmap_skb(netdev, tx_info->skb, tx_info->dma_info);
+ napi_consume_skb(tx_info->skb, budget);
+ tx_info->skb = NULL;
+ } while (likely(pkts < HINIC3_TX_POLL_WEIGHT));
+
+ hinic3_wq_put_wqebbs(&txq->sq->wq, wqebb_cnt);
+
+ netif_subqueue_completed_wake(netdev, q_id, pkts, bytes_compl,
+ hinic3_wq_free_wqebbs(&tx_q->sq->wq),
+ tx_q->tx_start_thrs);
+
+ return pkts == HINIC3_TX_POLL_WEIGHT;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
new file mode 100644
index 000000000000..9e505cc19dd5
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_TX_H_
+#define _HINIC3_TX_H_
+
+#include <linux/bitops.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/netdevice.h>
+#include <net/checksum.h>
+
+#define VXLAN_OFFLOAD_PORT_LE cpu_to_be16(4789)
+#define TCP_HDR_DATA_OFF_UNIT_SHIFT 2
+#define TRANSPORT_OFFSET(l4_hdr, skb) ((l4_hdr) - (skb)->data)
+
+#define HINIC3_COMPACT_WQEE_SKB_MAX_LEN 16383
+#define HINIC3_TX_POLL_WEIGHT 64
+#define HINIC3_DEFAULT_STOP_THRS 6
+#define HINIC3_DEFAULT_START_THRS 24
+
+enum sq_wqe_data_format {
+ SQ_NORMAL_WQE = 0,
+};
+
+enum sq_wqe_ec_type {
+ SQ_WQE_COMPACT_TYPE = 0,
+ SQ_WQE_EXTENDED_TYPE = 1,
+};
+
+enum sq_wqe_tasksect_len_type {
+ SQ_WQE_TASKSECT_46BITS = 0,
+ SQ_WQE_TASKSECT_16BYTES = 1,
+};
+
+enum hinic3_tx_offload_type {
+ HINIC3_TX_OFFLOAD_TSO = BIT(0),
+ HINIC3_TX_OFFLOAD_CSUM = BIT(1),
+ HINIC3_TX_OFFLOAD_VLAN = BIT(2),
+ HINIC3_TX_OFFLOAD_INVALID = BIT(3),
+ HINIC3_TX_OFFLOAD_ESP = BIT(4),
+};
+
+#define SQ_CTRL_BUFDESC_NUM_MASK GENMASK(26, 19)
+#define SQ_CTRL_TASKSECT_LEN_MASK BIT(27)
+#define SQ_CTRL_DATA_FORMAT_MASK BIT(28)
+#define SQ_CTRL_EXTENDED_MASK BIT(30)
+#define SQ_CTRL_OWNER_MASK BIT(31)
+#define SQ_CTRL_SET(val, member) \
+ FIELD_PREP(SQ_CTRL_##member##_MASK, val)
+
+#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK GENMASK(9, 2)
+#define SQ_CTRL_QUEUE_INFO_UFO_MASK BIT(10)
+#define SQ_CTRL_QUEUE_INFO_TSO_MASK BIT(11)
+#define SQ_CTRL_QUEUE_INFO_MSS_MASK GENMASK(26, 13)
+#define SQ_CTRL_QUEUE_INFO_UC_MASK BIT(28)
+
+#define SQ_CTRL_QUEUE_INFO_SET(val, member) \
+ FIELD_PREP(SQ_CTRL_QUEUE_INFO_##member##_MASK, val)
+#define SQ_CTRL_QUEUE_INFO_GET(val, member) \
+ FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, val)
+
+#define SQ_CTRL_MAX_PLDOFF 221
+
+#define SQ_TASK_INFO0_TUNNEL_FLAG_MASK BIT(19)
+#define SQ_TASK_INFO0_INNER_L4_EN_MASK BIT(24)
+#define SQ_TASK_INFO0_INNER_L3_EN_MASK BIT(25)
+#define SQ_TASK_INFO0_OUT_L4_EN_MASK BIT(27)
+#define SQ_TASK_INFO0_OUT_L3_EN_MASK BIT(28)
+#define SQ_TASK_INFO0_SET(val, member) \
+ FIELD_PREP(SQ_TASK_INFO0_##member##_MASK, val)
+
+#define SQ_TASK_INFO3_VLAN_TAG_MASK GENMASK(15, 0)
+#define SQ_TASK_INFO3_VLAN_TPID_MASK GENMASK(18, 16)
+#define SQ_TASK_INFO3_VLAN_TAG_VALID_MASK BIT(19)
+#define SQ_TASK_INFO3_SET(val, member) \
+ FIELD_PREP(SQ_TASK_INFO3_##member##_MASK, val)
+
+struct hinic3_sq_wqe_desc {
+ u32 ctrl_len;
+ u32 queue_info;
+ u32 hi_addr;
+ u32 lo_addr;
+};
+
+struct hinic3_sq_task {
+ u32 pkt_info0;
+ u32 ip_identify;
+ u32 rsvd;
+ u32 vlan_offload;
+};
+
+struct hinic3_sq_wqe_combo {
+ struct hinic3_sq_wqe_desc *ctrl_bd0;
+ struct hinic3_sq_task *task;
+ struct hinic3_sq_bufdesc *bds_head;
+ struct hinic3_sq_bufdesc *bds_sec2;
+ u16 first_bds_num;
+ u32 wqe_type;
+ u32 task_type;
+};
+
+struct hinic3_dma_info {
+ dma_addr_t dma;
+ u32 len;
+};
+
+struct hinic3_tx_info {
+ struct sk_buff *skb;
+ u16 wqebb_cnt;
+ struct hinic3_dma_info *dma_info;
+};
+
+struct hinic3_txq {
+ struct net_device *netdev;
+ struct device *dev;
+
+ u16 q_id;
+ u16 tx_stop_thrs;
+ u16 tx_start_thrs;
+ u32 q_mask;
+ u32 q_depth;
+
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_io_queue *sq;
+} ____cacheline_aligned;
+
+int hinic3_alloc_txqs(struct net_device *netdev);
+void hinic3_free_txqs(struct net_device *netdev);
+
+netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+bool hinic3_tx_poll(struct hinic3_txq *txq, int budget);
+void hinic3_flush_txqs(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c
new file mode 100644
index 000000000000..2ac7efcd1365
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/dma-mapping.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_wq.h"
+
+void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
+ u16 num_wqebbs, u16 *prod_idx,
+ struct hinic3_sq_bufdesc **first_part_wqebbs,
+ struct hinic3_sq_bufdesc **second_part_wqebbs,
+ u16 *first_part_wqebbs_num)
+{
+ u32 idx, remaining;
+
+ idx = wq->prod_idx & wq->idx_mask;
+ wq->prod_idx += num_wqebbs;
+ *prod_idx = idx;
+ *first_part_wqebbs = get_q_element(&wq->qpages, idx, &remaining);
+ if (likely(remaining >= num_wqebbs)) {
+ *first_part_wqebbs_num = num_wqebbs;
+ *second_part_wqebbs = NULL;
+ } else {
+ *first_part_wqebbs_num = remaining;
+ idx += remaining;
+ *second_part_wqebbs = get_q_element(&wq->qpages, idx, NULL);
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
new file mode 100644
index 000000000000..ab37893efd7e
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_WQ_H_
+#define _HINIC3_WQ_H_
+
+#include <linux/io.h>
+
+#include "hinic3_queue_common.h"
+
+struct hinic3_sq_bufdesc {
+ /* 31-bits Length, L2NIC only uses length[17:0] */
+ u32 len;
+ u32 rsvd;
+ u32 hi_addr;
+ u32 lo_addr;
+};
+
+/* Work queue is used to submit elements (tx, rx, cmd) to hw.
+ * Driver is the producer that advances prod_idx. cons_idx is advanced when
+ * HW reports completions of previously submitted elements.
+ */
+struct hinic3_wq {
+ struct hinic3_queue_pages qpages;
+ /* Unmasked producer/consumer indices that are advanced to natural
+ * integer overflow regardless of queue depth.
+ */
+ u16 cons_idx;
+ u16 prod_idx;
+
+ u32 q_depth;
+ u16 idx_mask;
+
+ /* Work Queue (logical WQEBB array) is mapped to hw via Chip Logical
+ * Address (CLA) using 1 of 2 levels:
+ * level 0 - direct mapping of single wq page
+ * level 1 - indirect mapping of multiple pages via additional page
+ * table.
+ * When wq uses level 1, wq_block will hold the allocated indirection
+ * table.
+ */
+ dma_addr_t wq_block_paddr;
+ __be64 *wq_block_vaddr;
+} ____cacheline_aligned;
+
+/* Get number of elements in work queue that are in-use. */
+static inline u16 hinic3_wq_get_used(const struct hinic3_wq *wq)
+{
+ return READ_ONCE(wq->prod_idx) - READ_ONCE(wq->cons_idx);
+}
+
+static inline u16 hinic3_wq_free_wqebbs(struct hinic3_wq *wq)
+{
+ /* Don't allow queue to become completely full, report (free - 1). */
+ return wq->q_depth - hinic3_wq_get_used(wq) - 1;
+}
+
+static inline void *hinic3_wq_get_one_wqebb(struct hinic3_wq *wq, u16 *pi)
+{
+ *pi = wq->prod_idx & wq->idx_mask;
+ wq->prod_idx++;
+ return get_q_element(&wq->qpages, *pi, NULL);
+}
+
+static inline void hinic3_wq_put_wqebbs(struct hinic3_wq *wq, u16 num_wqebbs)
+{
+ wq->cons_idx += num_wqebbs;
+}
+
+void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
+ u16 num_wqebbs, u16 *prod_idx,
+ struct hinic3_sq_bufdesc **first_part_wqebbs,
+ struct hinic3_sq_bufdesc **second_part_wqebbs,
+ u16 *first_part_wqebbs_num);
+
+#endif
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
index c0c112d95b89..4f4b23465c47 100644
--- a/drivers/net/ethernet/ibm/Kconfig
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -27,6 +27,19 @@ config IBMVETH
To compile this driver as a module, choose M here. The module will
be called ibmveth.
+config IBMVETH_KUNIT_TEST
+ bool "KUnit test for IBM LAN Virtual Ethernet support" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on KUNIT=y && IBMVETH=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for the IBM LAN Virtual Ethernet driver.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
source "drivers/net/ethernet/ibm/emac/Kconfig"
config EHEA
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 04192190beba..24046fe16634 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -39,8 +39,6 @@
#include "ibmveth.h"
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
- bool reuse);
static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
static struct kobj_type ktype_veth_pool;
@@ -231,7 +229,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
index = pool->free_map[free_index];
skb = NULL;
- BUG_ON(index == IBM_VETH_INVALID_MAP);
+ if (WARN_ON(index == IBM_VETH_INVALID_MAP)) {
+ schedule_work(&adapter->work);
+ goto bad_index_failure;
+ }
/* are we allocating a new buffer or recycling an old one */
if (pool->skbuff[index])
@@ -300,6 +301,7 @@ failure:
DMA_FROM_DEVICE);
dev_kfree_skb_any(pool->skbuff[index]);
pool->skbuff[index] = NULL;
+bad_index_failure:
adapter->replenish_add_buff_failure++;
mb();
@@ -370,20 +372,36 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
}
}
-/* remove a buffer from a pool */
-static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
- u64 correlator, bool reuse)
+/**
+ * ibmveth_remove_buffer_from_pool - remove a buffer from a pool
+ * @adapter: adapter instance
+ * @correlator: identifies pool and index
+ * @reuse: whether to reuse buffer
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - correlator maps to pool or index out of range
+ * * %-EFAULT - pool and index map to null skb
+ */
+static int ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
+ u64 correlator, bool reuse)
{
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
unsigned int free_index;
struct sk_buff *skb;
- BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
- BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+ if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) ||
+ WARN_ON(index >= adapter->rx_buff_pool[pool].size)) {
+ schedule_work(&adapter->work);
+ return -EINVAL;
+ }
skb = adapter->rx_buff_pool[pool].skbuff[index];
- BUG_ON(skb == NULL);
+ if (WARN_ON(!skb)) {
+ schedule_work(&adapter->work);
+ return -EFAULT;
+ }
/* if we are going to reuse the buffer then keep the pointers around
* but mark index as available. replenish will see the skb pointer and
@@ -411,6 +429,8 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
mb();
atomic_dec(&(adapter->rx_buff_pool[pool].available));
+
+ return 0;
}
/* get the current buffer on the rx queue */
@@ -420,24 +440,44 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
- BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
- BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+ if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) ||
+ WARN_ON(index >= adapter->rx_buff_pool[pool].size)) {
+ schedule_work(&adapter->work);
+ return NULL;
+ }
return adapter->rx_buff_pool[pool].skbuff[index];
}
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
- bool reuse)
+/**
+ * ibmveth_rxq_harvest_buffer - Harvest buffer from pool
+ *
+ * @adapter: pointer to adapter
+ * @reuse: whether to reuse buffer
+ *
+ * Context: called from ibmveth_poll
+ *
+ * Return:
+ * * %0 - success
+ * * other - non-zero return from ibmveth_remove_buffer_from_pool
+ */
+static int ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
+ bool reuse)
{
u64 cor;
+ int rc;
cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
- ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
+ rc = ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
+ if (unlikely(rc))
+ return rc;
if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
adapter->rx_queue.index = 0;
adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
}
+
+ return 0;
}
static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx)
@@ -709,6 +749,35 @@ static int ibmveth_close(struct net_device *netdev)
return 0;
}
+/**
+ * ibmveth_reset - Handle scheduled reset work
+ *
+ * @w: pointer to work_struct embedded in adapter structure
+ *
+ * Context: This routine acquires rtnl_mutex and disables its NAPI through
+ * ibmveth_close. It can't be called directly in a context that has
+ * already acquired rtnl_mutex or disabled its NAPI, or directly from
+ * a poll routine.
+ *
+ * Return: void
+ */
+static void ibmveth_reset(struct work_struct *w)
+{
+ struct ibmveth_adapter *adapter = container_of(w, struct ibmveth_adapter, work);
+ struct net_device *netdev = adapter->netdev;
+
+ netdev_dbg(netdev, "reset starting\n");
+
+ rtnl_lock();
+
+ dev_close(adapter->netdev);
+ dev_open(adapter->netdev, NULL);
+
+ rtnl_unlock();
+
+ netdev_dbg(netdev, "reset complete\n");
+}
+
static int ibmveth_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
@@ -1324,7 +1393,8 @@ restart_poll:
wmb(); /* suggested by larson1 */
adapter->rx_invalid_buffer++;
netdev_dbg(netdev, "recycling invalid buffer\n");
- ibmveth_rxq_harvest_buffer(adapter, true);
+ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true)))
+ break;
} else {
struct sk_buff *skb, *new_skb;
int length = ibmveth_rxq_frame_length(adapter);
@@ -1334,6 +1404,8 @@ restart_poll:
__sum16 iph_check = 0;
skb = ibmveth_rxq_get_buffer(adapter);
+ if (unlikely(!skb))
+ break;
/* if the large packet bit is set in the rx queue
* descriptor, the mss will be written by PHYP eight
@@ -1357,10 +1429,12 @@ restart_poll:
if (rx_flush)
ibmveth_flush_buffer(skb->data,
length + offset);
- ibmveth_rxq_harvest_buffer(adapter, true);
+ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true)))
+ break;
skb = new_skb;
} else {
- ibmveth_rxq_harvest_buffer(adapter, false);
+ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, false)))
+ break;
skb_reserve(skb, offset);
}
@@ -1407,7 +1481,10 @@ restart_poll:
* then check once more to make sure we are done.
*/
lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
- BUG_ON(lpar_rc != H_SUCCESS);
+ if (WARN_ON(lpar_rc != H_SUCCESS)) {
+ schedule_work(&adapter->work);
+ goto out;
+ }
if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
@@ -1428,7 +1505,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
if (napi_schedule_prep(&adapter->napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_DISABLE);
- BUG_ON(lpar_rc != H_SUCCESS);
+ WARN_ON(lpar_rc != H_SUCCESS);
__napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
@@ -1670,6 +1747,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->vdev = dev;
adapter->netdev = netdev;
+ INIT_WORK(&adapter->work, ibmveth_reset);
adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
ibmveth_init_link_settings(netdev);
@@ -1762,6 +1840,8 @@ static void ibmveth_remove(struct vio_dev *dev)
struct ibmveth_adapter *adapter = netdev_priv(netdev);
int i;
+ cancel_work_sync(&adapter->work);
+
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
kobject_put(&adapter->rx_buff_pool[i].kobj);
@@ -1791,6 +1871,26 @@ static ssize_t veth_pool_show(struct kobject *kobj,
return 0;
}
+/**
+ * veth_pool_store - sysfs store handler for pool attributes
+ * @kobj: kobject embedded in pool
+ * @attr: attribute being changed
+ * @buf: value being stored
+ * @count: length of @buf in bytes
+ *
+ * Stores new value in pool attribute. Verifies the range of the new value for
+ * size and buff_size. Verifies that at least one pool remains available to
+ * receive MTU-sized packets.
+ *
+ * Context: Process context.
+ * Takes and releases rtnl_mutex to ensure correct ordering of close
+ * and open calls.
+ * Return:
+ * * %-EPERM - Not allowed to disabled all MTU-sized buffer pools
+ * * %-EINVAL - New pool size or buffer size is out of range
+ * * count - Return count for success
+ * * other - Return value from a failed ibmveth_open call
+ */
static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
@@ -1800,28 +1900,30 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent));
struct ibmveth_adapter *adapter = netdev_priv(netdev);
long value = simple_strtol(buf, NULL, 10);
+ bool change = false;
+ u32 newbuff_size;
+ u32 oldbuff_size;
+ int newactive;
+ int oldactive;
+ u32 newsize;
+ u32 oldsize;
long rc;
rtnl_lock();
+ oldbuff_size = pool->buff_size;
+ oldactive = pool->active;
+ oldsize = pool->size;
+
+ newbuff_size = oldbuff_size;
+ newactive = oldactive;
+ newsize = oldsize;
+
if (attr == &veth_active_attr) {
- if (value && !pool->active) {
- if (netif_running(netdev)) {
- if (ibmveth_alloc_buffer_pool(pool)) {
- netdev_err(netdev,
- "unable to alloc pool\n");
- rc = -ENOMEM;
- goto unlock_err;
- }
- pool->active = 1;
- ibmveth_close(netdev);
- rc = ibmveth_open(netdev);
- if (rc)
- goto unlock_err;
- } else {
- pool->active = 1;
- }
- } else if (!value && pool->active) {
+ if (value && !oldactive) {
+ newactive = 1;
+ change = true;
+ } else if (!value && oldactive) {
int mtu = netdev->mtu + IBMVETH_BUFF_OH;
int i;
/* Make sure there is a buffer pool with buffers that
@@ -1841,43 +1943,44 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
goto unlock_err;
}
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->active = 0;
- rc = ibmveth_open(netdev);
- if (rc)
- goto unlock_err;
- }
- pool->active = 0;
+ newactive = 0;
+ change = true;
}
} else if (attr == &veth_num_attr) {
if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
rc = -EINVAL;
goto unlock_err;
- } else {
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->size = value;
- rc = ibmveth_open(netdev);
- if (rc)
- goto unlock_err;
- } else {
- pool->size = value;
- }
+ }
+ if (value != oldsize) {
+ newsize = value;
+ change = true;
}
} else if (attr == &veth_size_attr) {
if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
rc = -EINVAL;
goto unlock_err;
- } else {
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->buff_size = value;
- rc = ibmveth_open(netdev);
- if (rc)
- goto unlock_err;
- } else {
- pool->buff_size = value;
+ }
+ if (value != oldbuff_size) {
+ newbuff_size = value;
+ change = true;
+ }
+ }
+
+ if (change) {
+ if (netif_running(netdev))
+ ibmveth_close(netdev);
+
+ pool->active = newactive;
+ pool->buff_size = newbuff_size;
+ pool->size = newsize;
+
+ if (netif_running(netdev)) {
+ rc = ibmveth_open(netdev);
+ if (rc) {
+ pool->active = oldactive;
+ pool->buff_size = oldbuff_size;
+ pool->size = oldsize;
+ goto unlock_err;
}
}
}
@@ -1962,3 +2065,132 @@ static void __exit ibmveth_module_exit(void)
module_init(ibmveth_module_init);
module_exit(ibmveth_module_exit);
+
+#ifdef CONFIG_IBMVETH_KUNIT_TEST
+#include <kunit/test.h>
+
+/**
+ * ibmveth_reset_kunit - reset routine for running in KUnit environment
+ *
+ * @w: pointer to work_struct embedded in adapter structure
+ *
+ * Context: Called in the KUnit environment. Does nothing.
+ *
+ * Return: void
+ */
+static void ibmveth_reset_kunit(struct work_struct *w)
+{
+ netdev_dbg(NULL, "reset_kunit starting\n");
+ netdev_dbg(NULL, "reset_kunit complete\n");
+}
+
+/**
+ * ibmveth_remove_buffer_from_pool_test - unit test for some of
+ * ibmveth_remove_buffer_from_pool
+ * @test: pointer to kunit structure
+ *
+ * Tests the error returns from ibmveth_remove_buffer_from_pool.
+ * ibmveth_remove_buffer_from_pool also calls WARN_ON, so dmesg should be
+ * checked to see that these warnings happened.
+ *
+ * Return: void
+ */
+static void ibmveth_remove_buffer_from_pool_test(struct kunit *test)
+{
+ struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL);
+ struct ibmveth_buff_pool *pool;
+ u64 correlator;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter);
+
+ INIT_WORK(&adapter->work, ibmveth_reset_kunit);
+
+ /* Set sane values for buffer pools */
+ for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+ pool_count[i], pool_size[i],
+ pool_active[i]);
+
+ pool = &adapter->rx_buff_pool[0];
+ pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff);
+
+ correlator = ((u64)IBMVETH_NUM_BUFF_POOLS << 32) | 0;
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+ correlator = ((u64)0 << 32) | adapter->rx_buff_pool[0].size;
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+ correlator = (u64)0 | 0;
+ pool->skbuff[0] = NULL;
+ KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+ KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+ flush_work(&adapter->work);
+}
+
+/**
+ * ibmveth_rxq_get_buffer_test - unit test for ibmveth_rxq_get_buffer
+ * @test: pointer to kunit structure
+ *
+ * Tests ibmveth_rxq_get_buffer. ibmveth_rxq_get_buffer also calls WARN_ON for
+ * the NULL returns, so dmesg should be checked to see that these warnings
+ * happened.
+ *
+ * Return: void
+ */
+static void ibmveth_rxq_get_buffer_test(struct kunit *test)
+{
+ struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL);
+ struct sk_buff *skb = kunit_kzalloc(test, sizeof(*skb), GFP_KERNEL);
+ struct ibmveth_buff_pool *pool;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
+
+ INIT_WORK(&adapter->work, ibmveth_reset_kunit);
+
+ adapter->rx_queue.queue_len = 1;
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.queue_addr = kunit_kzalloc(test, sizeof(struct ibmveth_rx_q_entry),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter->rx_queue.queue_addr);
+
+ /* Set sane values for buffer pools */
+ for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+ pool_count[i], pool_size[i],
+ pool_active[i]);
+
+ pool = &adapter->rx_buff_pool[0];
+ pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff);
+
+ adapter->rx_queue.queue_addr[0].correlator = (u64)IBMVETH_NUM_BUFF_POOLS << 32 | 0;
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter));
+
+ adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | adapter->rx_buff_pool[0].size;
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter));
+
+ pool->skbuff[0] = skb;
+ adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | 0;
+ KUNIT_EXPECT_PTR_EQ(test, skb, ibmveth_rxq_get_buffer(adapter));
+
+ flush_work(&adapter->work);
+}
+
+static struct kunit_case ibmveth_test_cases[] = {
+ KUNIT_CASE(ibmveth_remove_buffer_from_pool_test),
+ KUNIT_CASE(ibmveth_rxq_get_buffer_test),
+ {}
+};
+
+static struct kunit_suite ibmveth_test_suite = {
+ .name = "ibmveth-kunit-test",
+ .test_cases = ibmveth_test_cases,
+};
+
+kunit_test_suite(ibmveth_test_suite);
+#endif
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 8468e2c59d7a..b0a2460ec9f9 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -134,38 +134,39 @@ struct ibmveth_rx_q {
};
struct ibmveth_adapter {
- struct vio_dev *vdev;
- struct net_device *netdev;
- struct napi_struct napi;
- unsigned int mcastFilterSize;
- void * buffer_list_addr;
- void * filter_list_addr;
- void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
- unsigned int tx_ltb_size;
- dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
- dma_addr_t buffer_list_dma;
- dma_addr_t filter_list_dma;
- struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
- struct ibmveth_rx_q rx_queue;
- int rx_csum;
- int large_send;
- bool is_active_trunk;
-
- u64 fw_ipv6_csum_support;
- u64 fw_ipv4_csum_support;
- u64 fw_large_send_support;
- /* adapter specific stats */
- u64 replenish_task_cycles;
- u64 replenish_no_mem;
- u64 replenish_add_buff_failure;
- u64 replenish_add_buff_success;
- u64 rx_invalid_buffer;
- u64 rx_no_buffer;
- u64 tx_map_failed;
- u64 tx_send_failed;
- u64 tx_large_packets;
- u64 rx_large_packets;
- /* Ethtool settings */
+ struct vio_dev *vdev;
+ struct net_device *netdev;
+ struct napi_struct napi;
+ struct work_struct work;
+ unsigned int mcastFilterSize;
+ void *buffer_list_addr;
+ void *filter_list_addr;
+ void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
+ unsigned int tx_ltb_size;
+ dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
+ dma_addr_t buffer_list_dma;
+ dma_addr_t filter_list_dma;
+ struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
+ struct ibmveth_rx_q rx_queue;
+ int rx_csum;
+ int large_send;
+ bool is_active_trunk;
+
+ u64 fw_ipv6_csum_support;
+ u64 fw_ipv4_csum_support;
+ u64 fw_large_send_support;
+ /* adapter specific stats */
+ u64 replenish_task_cycles;
+ u64 replenish_no_mem;
+ u64 replenish_add_buff_failure;
+ u64 replenish_add_buff_success;
+ u64 rx_invalid_buffer;
+ u64 rx_no_buffer;
+ u64 tx_map_failed;
+ u64 tx_send_failed;
+ u64 tx_large_packets;
+ u64 rx_large_packets;
+ /* Ethtool settings */
u8 duplex;
u32 speed;
};
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 1640d2f27833..5a331c1c76cb 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -147,6 +147,8 @@ config IXGBE
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
select MDIO
+ select NET_DEVLINK
+ select PLDMFW
select PHYLIB
help
This driver supports Intel(R) 10GbE PCI Express family of
@@ -367,6 +369,7 @@ config IGC
default n
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on ETHTOOL_NETLINK
help
This driver supports Intel(R) Ethernet Controller I225-LM/I225-V
family of adapters.
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index c0ead54ea186..5c56c1edd492 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1682,7 +1682,7 @@ static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
static void e100_watchdog(struct timer_list *t)
{
- struct nic *nic = from_timer(nic, t, watchdog);
+ struct nic *nic = timer_container_of(nic, t, watchdog);
struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
u32 speed;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3f089c3d47b2..d8595e84326d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -477,10 +477,6 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter)
cancel_delayed_work_sync(&adapter->phy_info_task);
cancel_delayed_work_sync(&adapter->fifo_stall_task);
-
- /* Only kill reset task if adapter is not resetting */
- if (!test_bit(__E1000_RESETTING, &adapter->flags))
- cancel_work_sync(&adapter->reset_task);
}
void e1000_down(struct e1000_adapter *adapter)
@@ -1266,6 +1262,10 @@ static void e1000_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
+ /* Only kill reset task if adapter is not resetting */
+ if (!test_bit(__E1000_RESETTING, &adapter->flags))
+ cancel_work_sync(&adapter->reset_task);
+
e1000_phy_hw_reset(hw);
kfree(adapter->tx_ring);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ba9c19e6994c..952898151565 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -319,7 +319,7 @@ struct e1000_adapter {
u16 tx_ring_count;
u16 rx_ring_count;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
struct delayed_work systim_overflow_work;
struct sk_buff *tx_hwtstamp_skb;
unsigned long tx_hwtstamp_start;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 8ebcb6a7d608..7719e15813ee 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3534,9 +3534,6 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
- case e1000_pch_mtp:
- case e1000_pch_lnp:
- case e1000_pch_ptp:
case e1000_pch_nvp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
@@ -3552,6 +3549,17 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
adapter->cc.shift = shift;
}
break;
+ case e1000_pch_mtp:
+ case e1000_pch_lnp:
+ case e1000_pch_ptp:
+ /* System firmware can misreport this value, so set it to a
+ * stable 38400KHz frequency.
+ */
+ incperiod = INCPERIOD_38400KHZ;
+ incvalue = INCVALUE_38400KHZ;
+ shift = INCVALUE_SHIFT_38400KHZ;
+ adapter->cc.shift = shift;
+ break;
case e1000_82574:
case e1000_82583:
/* Stable 25MHz frequency */
@@ -3574,6 +3582,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
* e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
* @adapter: board private structure
* @config: timestamp configuration
+ * @extack: netlink extended ACK for error report
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -3587,7 +3596,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
* exception of "all V2 events regardless of level 2 or 4".
**/
static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct e1000_hw *hw = &adapter->hw;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
@@ -3598,8 +3608,10 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
bool is_l2 = false;
u32 regval;
- if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) {
+ NL_SET_ERR_MSG(extack, "No HW timestamp support");
return -EINVAL;
+ }
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
@@ -3608,6 +3620,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
case HWTSTAMP_TX_ON:
break;
default:
+ NL_SET_ERR_MSG(extack, "Unsupported TX HW timestamp type");
return -ERANGE;
}
@@ -3681,6 +3694,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
+ NL_SET_ERR_MSG(extack, "Unsupported RX HW timestamp filter");
return -ERANGE;
}
@@ -3693,7 +3707,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
ew32(TSYNCTXCTL, regval);
if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
(regval & E1000_TSYNCTXCTL_ENABLED)) {
- e_err("Timesync Tx Control register not set as expected\n");
+ NL_SET_ERR_MSG(extack,
+ "Timesync Tx Control register not set as expected");
return -EAGAIN;
}
@@ -3706,7 +3721,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
E1000_TSYNCRXCTL_TYPE_MASK)) !=
(regval & (E1000_TSYNCRXCTL_ENABLED |
E1000_TSYNCRXCTL_TYPE_MASK))) {
- e_err("Timesync Rx Control register not set as expected\n");
+ NL_SET_ERR_MSG(extack,
+ "Timesync Rx Control register not set as expected");
return -EAGAIN;
}
@@ -3901,6 +3917,7 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
{
struct ptp_clock_info *info = &adapter->ptp_clock_info;
struct e1000_hw *hw = &adapter->hw;
+ struct netlink_ext_ack extack = {};
unsigned long flags;
u32 timinca;
s32 ret_val;
@@ -3932,7 +3949,12 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
spin_unlock_irqrestore(&adapter->systim_lock, flags);
/* restore the previous hwtstamp configuration settings */
- e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+ ret_val = e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config,
+ &extack);
+ if (ret_val) {
+ if (extack._msg)
+ e_err("%s\n", extack._msg);
+ }
}
/**
@@ -4839,7 +4861,8 @@ static void e1000e_update_phy_task(struct work_struct *work)
**/
static void e1000_update_phy_info(struct timer_list *t)
{
- struct e1000_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+ struct e1000_adapter *adapter = timer_container_of(adapter, t,
+ phy_info_timer);
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -5175,7 +5198,8 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
**/
static void e1000_watchdog(struct timer_list *t)
{
- struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct e1000_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
@@ -6079,8 +6103,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
- int cmd)
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct mii_ioctl_data *data = if_mii(ifr);
@@ -6140,7 +6163,8 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
/**
* e1000e_hwtstamp_set - control hardware time stamping
* @netdev: network interface device structure
- * @ifr: interface request
+ * @config: timestamp configuration
+ * @extack: netlink extended ACK report
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -6153,20 +6177,18 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
* specified. Matching the kind of event packet is not supported, with the
* exception of "all V2 events regardless of level 2 or 4".
**/
-static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+static int e1000e_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
int ret_val;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- ret_val = e1000e_config_hwtstamp(adapter, &config);
+ ret_val = e1000e_config_hwtstamp(adapter, config, extack);
if (ret_val)
return ret_val;
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
@@ -6178,38 +6200,23 @@ static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
* by hardware so notify the caller the requested packets plus
* some others are time stamped.
*/
- config.rx_filter = HWTSTAMP_FILTER_SOME;
+ config->rx_filter = HWTSTAMP_FILTER_SOME;
break;
default:
break;
}
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
-static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+static int e1000e_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *kernel_config)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
- sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
-}
+ *kernel_config = adapter->hwtstamp_config;
-static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- return e1000_mii_ioctl(netdev, ifr, cmd);
- case SIOCSHWTSTAMP:
- return e1000e_hwtstamp_set(netdev, ifr);
- case SIOCGHWTSTAMP:
- return e1000e_hwtstamp_get(netdev, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ return 0;
}
static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
@@ -7346,9 +7353,11 @@ static const struct net_device_ops e1000e_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = e1000_netpoll,
#endif
- .ndo_set_features = e1000_set_features,
- .ndo_fix_features = e1000_fix_features,
+ .ndo_set_features = e1000_set_features,
+ .ndo_fix_features = e1000_fix_features,
.ndo_features_check = passthru_features_check,
+ .ndo_hwtstamp_get = e1000e_hwtstamp_get,
+ .ndo_hwtstamp_set = e1000e_hwtstamp_set,
};
/**
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 89d57dd911dc..ea3c3eb2ef20 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -295,15 +295,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
- case e1000_pch_mtp:
- case e1000_pch_lnp:
- case e1000_pch_ptp:
case e1000_pch_nvp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
else
adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
break;
+ case e1000_pch_mtp:
+ case e1000_pch_lnp:
+ case e1000_pch_ptp:
+ adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
+ break;
case e1000_82574:
case e1000_82583:
adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 21267ab603ef..ae5fe34659cf 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -199,8 +199,8 @@ static void fm10k_start_service_event(struct fm10k_intfc *interface)
**/
static void fm10k_service_timer(struct timer_list *t)
{
- struct fm10k_intfc *interface = from_timer(interface, t,
- service_timer);
+ struct fm10k_intfc *interface = timer_container_of(interface, t,
+ service_timer);
/* Reset the timer */
mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 370b4bddee44..b11c35e307ca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -817,10 +817,11 @@ int i40e_pf_reset(struct i40e_hw *hw)
void i40e_clear_hw(struct i40e_hw *hw)
{
u32 num_queues, base_queue;
- u32 num_pf_int;
- u32 num_vf_int;
+ s32 num_pf_int;
+ s32 num_vf_int;
u32 num_vfs;
- u32 i, j;
+ s32 i;
+ u32 j;
u32 val;
u32 eol = 0x7ff;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 120d68654e3f..f1c9e575703e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -11412,7 +11412,7 @@ static void i40e_service_task(struct work_struct *work)
**/
static void i40e_service_timer(struct timer_list *t)
{
- struct i40e_pf *pf = from_timer(pf, t, service_timer);
+ struct i40e_pf *pf = timer_container_of(pf, t, service_timer);
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 1120f8e4bb67..88e6bef69342 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1546,8 +1546,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
*
- * Returns true if the VF is in reset, resets successfully, or resets
- * are disabled and false otherwise.
+ * Return: True if reset was performed successfully or if resets are disabled.
+ * False if reset is already in progress.
**/
bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
@@ -1566,7 +1566,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
/* If VF is being reset already we don't need to continue. */
if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
- return true;
+ return false;
i40e_trigger_vf_reset(vf, flr);
@@ -4328,7 +4328,10 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
if (reg & BIT(bit_idx))
/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
- i40e_reset_vf(vf, true);
+ if (!i40e_reset_vf(vf, true)) {
+ /* At least one VF did not finish resetting, retry next time */
+ set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
+ }
}
return 0;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 9de3e0ba3731..f7a98ff43a57 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -268,7 +268,6 @@ struct iavf_adapter {
struct list_head vlan_filter_list;
int num_vlan_filters;
struct list_head mac_filter_list;
- struct mutex crit_lock;
/* Lock to protect accesses to MAC and VLAN lists */
spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 288bb5b2e72e..2b2b315205b5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -4,6 +4,8 @@
#include <linux/bitfield.h>
#include <linux/uaccess.h>
+#include <net/netdev_lock.h>
+
/* ethtool support for iavf */
#include "iavf.h"
@@ -1256,9 +1258,10 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
{
struct ethtool_rx_flow_spec *fsp = &cmd->fs;
struct iavf_fdir_fltr *fltr;
- int count = 50;
int err;
+ netdev_assert_locked(adapter->netdev);
+
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
@@ -1277,14 +1280,6 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (!fltr)
return -ENOMEM;
- while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0) {
- kfree(fltr);
- return -EINVAL;
- }
- udelay(1);
- }
-
err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
if (!err)
err = iavf_fdir_add_fltr(adapter, fltr);
@@ -1292,7 +1287,6 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (err)
kfree(fltr);
- mutex_unlock(&adapter->crit_lock);
return err;
}
@@ -1435,11 +1429,13 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
{
struct iavf_adv_rss *rss_old, *rss_new;
bool rss_new_add = false;
- int count = 50, err = 0;
bool symm = false;
u64 hash_flds;
+ int err = 0;
u32 hdrs;
+ netdev_assert_locked(adapter->netdev);
+
if (!ADV_RSS_SUPPORT(adapter))
return -EOPNOTSUPP;
@@ -1463,15 +1459,6 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
return -EINVAL;
}
- while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0) {
- kfree(rss_new);
- return -EINVAL;
- }
-
- udelay(1);
- }
-
spin_lock_bh(&adapter->adv_rss_lock);
rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
if (rss_old) {
@@ -1500,8 +1487,6 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
if (!err)
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
- mutex_unlock(&adapter->crit_lock);
-
if (!rss_new_add)
kfree(rss_new);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 6d7ba4d67a19..81d7249d1149 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1287,11 +1287,11 @@ static void iavf_configure(struct iavf_adapter *adapter)
/**
* iavf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure
- *
- * Expects to be called while holding crit_lock.
- **/
+ */
static void iavf_up_complete(struct iavf_adapter *adapter)
{
+ netdev_assert_locked(adapter->netdev);
+
iavf_change_state(adapter, __IAVF_RUNNING);
clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
@@ -1410,13 +1410,13 @@ static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
/**
* iavf_down - Shutdown the connection processing
* @adapter: board private structure
- *
- * Expects to be called while holding crit_lock.
- **/
+ */
void iavf_down(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ netdev_assert_locked(netdev);
+
if (adapter->state <= __IAVF_DOWN_PENDING)
return;
@@ -2025,22 +2025,21 @@ err:
* iavf_finish_config - do all netdev work that needs RTNL
* @work: our work_struct
*
- * Do work that needs both RTNL and crit_lock.
- **/
+ * Do work that needs RTNL.
+ */
static void iavf_finish_config(struct work_struct *work)
{
struct iavf_adapter *adapter;
- bool locks_released = false;
+ bool netdev_released = false;
int pairs, err;
adapter = container_of(work, struct iavf_adapter, finish_config);
/* Always take RTNL first to prevent circular lock dependency;
- * The dev->lock is needed to update the queue number
+ * the dev->lock (== netdev lock) is needed to update the queue number.
*/
rtnl_lock();
netdev_lock(adapter->netdev);
- mutex_lock(&adapter->crit_lock);
if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
adapter->netdev->reg_state == NETREG_REGISTERED &&
@@ -2059,22 +2058,21 @@ static void iavf_finish_config(struct work_struct *work)
netif_set_real_num_tx_queues(adapter->netdev, pairs);
if (adapter->netdev->reg_state != NETREG_REGISTERED) {
- mutex_unlock(&adapter->crit_lock);
netdev_unlock(adapter->netdev);
- locks_released = true;
+ netdev_released = true;
err = register_netdevice(adapter->netdev);
if (err) {
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
err);
/* go back and try again.*/
- mutex_lock(&adapter->crit_lock);
+ netdev_lock(adapter->netdev);
iavf_free_rss(adapter);
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
iavf_change_state(adapter,
__IAVF_INIT_CONFIG_ADAPTER);
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(adapter->netdev);
goto out;
}
}
@@ -2090,10 +2088,8 @@ static void iavf_finish_config(struct work_struct *work)
}
out:
- if (!locks_released) {
- mutex_unlock(&adapter->crit_lock);
+ if (!netdev_released)
netdev_unlock(adapter->netdev);
- }
rtnl_unlock();
}
@@ -2911,28 +2907,15 @@ err:
iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
-/**
- * iavf_watchdog_task - Periodic call-back task
- * @work: pointer to work_struct
- **/
-static void iavf_watchdog_task(struct work_struct *work)
+static const int IAVF_NO_RESCHED = -1;
+
+/* return: msec delay for requeueing itself */
+static int iavf_watchdog_step(struct iavf_adapter *adapter)
{
- struct iavf_adapter *adapter = container_of(work,
- struct iavf_adapter,
- watchdog_task.work);
- struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
u32 reg_val;
- netdev_lock(netdev);
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state == __IAVF_REMOVE) {
- netdev_unlock(netdev);
- return;
- }
-
- goto restart_watchdog;
- }
+ netdev_assert_locked(adapter->netdev);
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
iavf_change_state(adapter, __IAVF_COMM_FAILED);
@@ -2940,39 +2923,19 @@ static void iavf_watchdog_task(struct work_struct *work)
switch (adapter->state) {
case __IAVF_STARTUP:
iavf_startup(adapter);
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(30));
- return;
+ return 30;
case __IAVF_INIT_VERSION_CHECK:
iavf_init_version_check(adapter);
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(30));
- return;
+ return 30;
case __IAVF_INIT_GET_RESOURCES:
iavf_init_get_resources(adapter);
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
+ return 1;
case __IAVF_INIT_EXTENDED_CAPS:
iavf_init_process_extended_caps(adapter);
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
+ return 1;
case __IAVF_INIT_CONFIG_ADAPTER:
iavf_init_config_adapter(adapter);
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(1));
- return;
+ return 1;
case __IAVF_INIT_FAILED:
if (test_bit(__IAVF_IN_REMOVE_TASK,
&adapter->crit_section)) {
@@ -2980,27 +2943,18 @@ static void iavf_watchdog_task(struct work_struct *work)
* watchdog task, iavf_remove should handle this state
* as it can loop forever
*/
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- return;
+ return IAVF_NO_RESCHED;
}
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&adapter->pdev->dev,
"Failed to communicate with PF; waiting before retry\n");
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
iavf_shutdown_adminq(hw);
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task, (5 * HZ));
- return;
+ return 5000;
}
/* Try again from failed step*/
iavf_change_state(adapter, adapter->last_state);
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
- return;
+ return 1000;
case __IAVF_COMM_FAILED:
if (test_bit(__IAVF_IN_REMOVE_TASK,
&adapter->crit_section)) {
@@ -3010,9 +2964,7 @@ static void iavf_watchdog_task(struct work_struct *work)
*/
iavf_change_state(adapter, __IAVF_INIT_FAILED);
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- return;
+ return IAVF_NO_RESCHED;
}
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
@@ -3030,18 +2982,9 @@ static void iavf_watchdog_task(struct work_struct *work)
}
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task,
- msecs_to_jiffies(10));
- return;
+ return 10;
case __IAVF_RESETTING:
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- HZ * 2);
- return;
+ return 2000;
case __IAVF_DOWN:
case __IAVF_DOWN_PENDING:
case __IAVF_TESTING:
@@ -3068,9 +3011,7 @@ static void iavf_watchdog_task(struct work_struct *work)
break;
case __IAVF_REMOVE:
default:
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- return;
+ return IAVF_NO_RESCHED;
}
/* check for hw reset */
@@ -3080,24 +3021,29 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
- mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
- queue_delayed_work(adapter->wq,
- &adapter->watchdog_task, HZ * 2);
- return;
}
- mutex_unlock(&adapter->crit_lock);
-restart_watchdog:
- netdev_unlock(netdev);
+ return adapter->aq_required ? 20 : 2000;
+}
+
+static void iavf_watchdog_task(struct work_struct *work)
+{
+ struct iavf_adapter *adapter = container_of(work,
+ struct iavf_adapter,
+ watchdog_task.work);
+ struct net_device *netdev = adapter->netdev;
+ int msec_delay;
+
+ netdev_lock(netdev);
+ msec_delay = iavf_watchdog_step(adapter);
+ /* note that we schedule a different task */
if (adapter->state >= __IAVF_DOWN)
queue_work(adapter->wq, &adapter->adminq_task);
- if (adapter->aq_required)
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(20));
- else
+
+ if (msec_delay != IAVF_NO_RESCHED)
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- HZ * 2);
+ msecs_to_jiffies(msec_delay));
+ netdev_unlock(netdev);
}
/**
@@ -3105,14 +3051,15 @@ restart_watchdog:
* @adapter: board private structure
*
* Set communication failed flag and free all resources.
- * NOTE: This function is expected to be called with crit_lock being held.
- **/
+ */
static void iavf_disable_vf(struct iavf_adapter *adapter)
{
struct iavf_mac_filter *f, *ftmp;
struct iavf_vlan_filter *fv, *fvtmp;
struct iavf_cloud_filter *cf, *cftmp;
+ netdev_assert_locked(adapter->netdev);
+
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
/* We don't use netif_running() because it may be true prior to
@@ -3212,17 +3159,7 @@ static void iavf_reset_task(struct work_struct *work)
int i = 0, err;
bool running;
- /* When device is being removed it doesn't make sense to run the reset
- * task, just return in such a case.
- */
netdev_lock(netdev);
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state != __IAVF_REMOVE)
- queue_work(adapter->wq, &adapter->reset_task);
-
- netdev_unlock(netdev);
- return;
- }
iavf_misc_irq_disable(adapter);
if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
@@ -3267,12 +3204,22 @@ static void iavf_reset_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
reg_val);
iavf_disable_vf(adapter);
- mutex_unlock(&adapter->crit_lock);
netdev_unlock(netdev);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
continue_reset:
+ /* If we are still early in the state machine, just restart. */
+ if (adapter->state <= __IAVF_INIT_FAILED) {
+ iavf_shutdown_adminq(hw);
+ iavf_change_state(adapter, __IAVF_STARTUP);
+ iavf_startup(adapter);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(30));
+ netdev_unlock(netdev);
+ return;
+ }
+
/* We don't use netif_running() because it may be true prior to
* ndo_open() returning, so we can't assume it means all our open
* tasks have finished, since we're not holding the rtnl_lock here.
@@ -3411,7 +3358,6 @@ continue_reset:
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
wake_up(&adapter->reset_waitqueue);
- mutex_unlock(&adapter->crit_lock);
netdev_unlock(netdev);
return;
@@ -3422,7 +3368,6 @@ reset_err:
}
iavf_disable_vf(adapter);
- mutex_unlock(&adapter->crit_lock);
netdev_unlock(netdev);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
}
@@ -3435,6 +3380,7 @@ static void iavf_adminq_task(struct work_struct *work)
{
struct iavf_adapter *adapter =
container_of(work, struct iavf_adapter, adminq_task);
+ struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event;
enum virtchnl_ops v_op;
@@ -3442,13 +3388,7 @@ static void iavf_adminq_task(struct work_struct *work)
u32 val, oldval;
u16 pending;
- if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state == __IAVF_REMOVE)
- return;
-
- queue_work(adapter->wq, &adapter->adminq_task);
- goto out;
- }
+ netdev_lock(netdev);
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
goto unlock;
@@ -3515,8 +3455,7 @@ static void iavf_adminq_task(struct work_struct *work)
freedom:
kfree(event.msg_buf);
unlock:
- mutex_unlock(&adapter->crit_lock);
-out:
+ netdev_unlock(netdev);
/* re-enable Admin queue interrupt cause */
iavf_misc_irq_enable(adapter);
}
@@ -4209,8 +4148,8 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
struct flow_cls_offload *cls_flower)
{
int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
- struct iavf_cloud_filter *filter = NULL;
- int err = -EINVAL, count = 50;
+ struct iavf_cloud_filter *filter;
+ int err;
if (tc < 0) {
dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
@@ -4220,17 +4159,10 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
if (!filter)
return -ENOMEM;
-
- while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0) {
- kfree(filter);
- return err;
- }
- udelay(1);
- }
-
filter->cookie = cls_flower->cookie;
+ netdev_lock(adapter->netdev);
+
/* bail out here if filter already exists */
spin_lock_bh(&adapter->cloud_filter_list_lock);
if (iavf_find_cf(adapter, &cls_flower->cookie)) {
@@ -4264,7 +4196,7 @@ err:
if (err)
kfree(filter);
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(adapter->netdev);
return err;
}
@@ -4568,28 +4500,13 @@ static int iavf_open(struct net_device *netdev)
return -EIO;
}
- while (!mutex_trylock(&adapter->crit_lock)) {
- /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
- * is already taken and iavf_open is called from an upper
- * device's notifier reacting on NETDEV_REGISTER event.
- * We have to leave here to avoid dead lock.
- */
- if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
- return -EBUSY;
-
- usleep_range(500, 1000);
- }
-
- if (adapter->state != __IAVF_DOWN) {
- err = -EBUSY;
- goto err_unlock;
- }
+ if (adapter->state != __IAVF_DOWN)
+ return -EBUSY;
if (adapter->state == __IAVF_RUNNING &&
!test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
- err = 0;
- goto err_unlock;
+ return 0;
}
/* allocate transmit descriptors */
@@ -4608,9 +4525,7 @@ static int iavf_open(struct net_device *netdev)
goto err_req_irq;
spin_lock_bh(&adapter->mac_vlan_list_lock);
-
iavf_add_filter(adapter, adapter->hw.mac.addr);
-
spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* Restore filters that were removed with IFF_DOWN */
@@ -4623,8 +4538,6 @@ static int iavf_open(struct net_device *netdev)
iavf_irq_enable(adapter, true);
- mutex_unlock(&adapter->crit_lock);
-
return 0;
err_req_irq:
@@ -4634,8 +4547,6 @@ err_setup_rx:
iavf_free_all_rx_resources(adapter);
err_setup_tx:
iavf_free_all_tx_resources(adapter);
-err_unlock:
- mutex_unlock(&adapter->crit_lock);
return err;
}
@@ -4659,12 +4570,8 @@ static int iavf_close(struct net_device *netdev)
netdev_assert_locked(netdev);
- mutex_lock(&adapter->crit_lock);
-
- if (adapter->state <= __IAVF_DOWN_PENDING) {
- mutex_unlock(&adapter->crit_lock);
+ if (adapter->state <= __IAVF_DOWN_PENDING)
return 0;
- }
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
/* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
@@ -4695,7 +4602,6 @@ static int iavf_close(struct net_device *netdev)
iavf_change_state(adapter, __IAVF_DOWN_PENDING);
iavf_free_traffic_irqs(adapter);
- mutex_unlock(&adapter->crit_lock);
netdev_unlock(netdev);
/* We explicitly don't free resources here because the hardware is
@@ -4714,11 +4620,10 @@ static int iavf_close(struct net_device *netdev)
msecs_to_jiffies(500));
if (!status)
netdev_warn(netdev, "Device resources not yet released\n");
-
netdev_lock(netdev);
- mutex_lock(&adapter->crit_lock);
+
adapter->aq_required |= aq_to_restore;
- mutex_unlock(&adapter->crit_lock);
+
return 0;
}
@@ -5227,15 +5132,16 @@ iavf_shaper_set(struct net_shaper_binding *binding,
struct iavf_adapter *adapter = netdev_priv(binding->netdev);
const struct net_shaper_handle *handle = &shaper->handle;
struct iavf_ring *tx_ring;
- int ret = 0;
+ int ret;
+
+ netdev_assert_locked(adapter->netdev);
- mutex_lock(&adapter->crit_lock);
if (handle->id >= adapter->num_active_queues)
- goto unlock;
+ return 0;
ret = iavf_verify_shaper(binding, shaper, extack);
if (ret)
- goto unlock;
+ return ret;
tx_ring = &adapter->tx_rings[handle->id];
@@ -5245,9 +5151,7 @@ iavf_shaper_set(struct net_shaper_binding *binding,
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
-unlock:
- mutex_unlock(&adapter->crit_lock);
- return ret;
+ return 0;
}
static int iavf_shaper_del(struct net_shaper_binding *binding,
@@ -5257,9 +5161,10 @@ static int iavf_shaper_del(struct net_shaper_binding *binding,
struct iavf_adapter *adapter = netdev_priv(binding->netdev);
struct iavf_ring *tx_ring;
- mutex_lock(&adapter->crit_lock);
+ netdev_assert_locked(adapter->netdev);
+
if (handle->id >= adapter->num_active_queues)
- goto unlock;
+ return 0;
tx_ring = &adapter->tx_rings[handle->id];
tx_ring->q_shaper.bw_min = 0;
@@ -5268,8 +5173,6 @@ static int iavf_shaper_del(struct net_shaper_binding *binding,
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
-unlock:
- mutex_unlock(&adapter->crit_lock);
return 0;
}
@@ -5530,10 +5433,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_alloc_qos_cap;
}
- /* set up the locks for the AQ, do this only once in probe
- * and destroy them only once in remove
- */
- mutex_init(&adapter->crit_lock);
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
@@ -5596,22 +5495,24 @@ static int iavf_suspend(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct iavf_adapter *adapter = netdev_priv(netdev);
+ bool running;
netif_device_detach(netdev);
+ running = netif_running(netdev);
+ if (running)
+ rtnl_lock();
netdev_lock(netdev);
- mutex_lock(&adapter->crit_lock);
- if (netif_running(netdev)) {
- rtnl_lock();
+ if (running)
iavf_down(adapter);
- rtnl_unlock();
- }
+
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
- mutex_unlock(&adapter->crit_lock);
netdev_unlock(netdev);
+ if (running)
+ rtnl_unlock();
return 0;
}
@@ -5688,20 +5589,20 @@ static void iavf_remove(struct pci_dev *pdev)
* There are flows where register/unregister netdev may race.
*/
while (1) {
- mutex_lock(&adapter->crit_lock);
+ netdev_lock(netdev);
if (adapter->state == __IAVF_RUNNING ||
adapter->state == __IAVF_DOWN ||
adapter->state == __IAVF_INIT_FAILED) {
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
break;
}
/* Simply return if we already went through iavf_shutdown */
if (adapter->state == __IAVF_REMOVE) {
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return;
}
- mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
usleep_range(500, 1000);
}
cancel_delayed_work_sync(&adapter->watchdog_task);
@@ -5711,7 +5612,6 @@ static void iavf_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
netdev_lock(netdev);
- mutex_lock(&adapter->crit_lock);
dev_info(&adapter->pdev->dev, "Removing device\n");
iavf_change_state(adapter, __IAVF_REMOVE);
@@ -5727,9 +5627,11 @@ static void iavf_remove(struct pci_dev *pdev)
iavf_misc_irq_disable(adapter);
/* Shut down all the garbage mashers on the detention level */
+ netdev_unlock(netdev);
cancel_work_sync(&adapter->reset_task);
cancel_delayed_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->adminq_task);
+ netdev_lock(netdev);
adapter->aq_required = 0;
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
@@ -5747,8 +5649,6 @@ static void iavf_remove(struct pci_dev *pdev)
/* destroy the locks only once, here */
mutex_destroy(&hw->aq.arq_mutex);
mutex_destroy(&hw->aq.asq_mutex);
- mutex_unlock(&adapter->crit_lock);
- mutex_destroy(&adapter->crit_lock);
netdev_unlock(netdev);
iounmap(hw->hw_addr);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index a6f0e5990be2..07f0d0a0f1e2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -79,6 +79,23 @@ iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
return iavf_status_to_errno(status);
received_op =
(enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
+
+ if (received_op == VIRTCHNL_OP_EVENT) {
+ struct iavf_adapter *adapter = hw->back;
+ struct virtchnl_pf_event *vpe =
+ (struct virtchnl_pf_event *)event->msg_buf;
+
+ if (vpe->event != VIRTCHNL_EVENT_RESET_IMPENDING)
+ continue;
+
+ dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
+ if (!(adapter->flags & IAVF_FLAG_RESET_PENDING))
+ iavf_schedule_reset(adapter,
+ IAVF_FLAG_RESET_PENDING);
+
+ return -EIO;
+ }
+
if (op_to_poll == received_op)
break;
}
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index fcb199efbea5..4af60e2f37df 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -1339,8 +1339,13 @@ ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
- ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
+ ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2);
return 0;
}
@@ -1350,19 +1355,24 @@ static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
bool roce_ena = ctx->val.vbool;
int ret;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
if (!roce_ena) {
ice_unplug_aux_dev(pf);
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
return 0;
}
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2;
ret = ice_plug_aux_dev(pf);
if (ret)
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
return ret;
}
@@ -1373,11 +1383,16 @@ ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
+
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
return -EOPNOTSUPP;
- if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) {
+ if (cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP) {
NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
return -EOPNOTSUPP;
}
@@ -1390,8 +1405,13 @@ ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
- ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
+ ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP);
return 0;
}
@@ -1401,19 +1421,24 @@ static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
bool iw_ena = ctx->val.vbool;
int ret;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
if (!iw_ena) {
ice_unplug_aux_dev(pf);
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP;
return 0;
}
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_IWARP;
ret = ice_plug_aux_dev(pf);
if (ret)
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP;
return ret;
}
@@ -1428,7 +1453,7 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
return -EOPNOTSUPP;
- if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) {
+ if (pf->cdev_info->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2) {
NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index fd083647c14a..ddd0ad68185b 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -193,8 +193,6 @@
#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
-#define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned)
-
enum ice_feature {
ICE_F_DSCP,
ICE_F_PHY_RCLK,
@@ -401,7 +399,6 @@ struct ice_vsi {
u16 req_rxq; /* User requested Rx queues */
u16 num_rx_desc;
u16 num_tx_desc;
- u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_tx_ring **xdp_rings; /* XDP ring array */
@@ -515,6 +512,7 @@ enum ice_pf_flags {
ICE_FLAG_MTU_CHANGED,
ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */
+ ICE_FLAG_LLDP_AQ_FLTR,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -557,7 +555,6 @@ struct ice_pf {
struct devlink_port devlink_port;
/* OS reserved IRQ details */
- struct msix_entry *msix_entries;
struct ice_irq_tracker irq_tracker;
struct ice_virt_irq_tracker virt_irq_tracker;
@@ -592,7 +589,6 @@ struct ice_pf {
struct gnss_serial *gnss_serial;
struct gnss_device *gnss_dev;
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
- u16 rdma_base_vector;
/* spinlock to protect the AdminQ wait list */
spinlock_t aq_wait_lock;
@@ -625,14 +621,12 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded:1; /* has previous stats been loaded */
- u8 rdma_mode;
u16 dcbx_cap;
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
char int_name_ll_ts[ICE_INT_NAME_STR_LEN];
- struct auxiliary_device *adev;
int aux_idx;
u32 sw_int_count;
/* count of tc_flower filters specific to channel (aka where filter
@@ -664,6 +658,7 @@ struct ice_pf {
struct ice_dplls dplls;
struct device *hwmon_dev;
struct ice_health health_reporters;
+ struct iidc_rdma_core_dev_info *cdev_info;
u8 num_quanta_prof_used;
};
@@ -1045,4 +1040,62 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
}
extern const struct xdp_metadata_ops ice_xdp_md_ops;
+
+/**
+ * ice_is_dual - Check if given config is multi-NAC
+ * @hw: pointer to HW structure
+ *
+ * Return: true if the device is running in mutli-NAC (Network
+ * Acceleration Complex) configuration variant, false otherwise
+ * (always false for non-E825 devices).
+ */
+static inline bool ice_is_dual(struct ice_hw *hw)
+{
+ return hw->mac_type == ICE_MAC_GENERIC_3K_E825 &&
+ (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M);
+}
+
+/**
+ * ice_is_primary - Check if given device belongs to the primary complex
+ * @hw: pointer to HW structure
+ *
+ * Check if given PF/HW is running on primary complex in multi-NAC
+ * configuration.
+ *
+ * Return: true if the device is dual, false otherwise (always true
+ * for non-E825 devices).
+ */
+static inline bool ice_is_primary(struct ice_hw *hw)
+{
+ return hw->mac_type != ICE_MAC_GENERIC_3K_E825 ||
+ !ice_is_dual(hw) ||
+ (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M);
+}
+
+/**
+ * ice_pf_src_tmr_owned - Check if a primary timer is owned by PF
+ * @pf: pointer to PF structure
+ *
+ * Return: true if PF owns primary timer, false otherwise.
+ */
+static inline bool ice_pf_src_tmr_owned(struct ice_pf *pf)
+{
+ return pf->hw.func_caps.ts_func_info.src_tmr_owned &&
+ ice_is_primary(&pf->hw);
+}
+
+/**
+ * ice_get_primary_hw - Get pointer to primary ice_hw structure
+ * @pf: pointer to PF structure
+ *
+ * Return: A pointer to ice_hw structure with access to timesync
+ * register space.
+ */
+static inline struct ice_hw *ice_get_primary_hw(struct ice_pf *pf)
+{
+ if (!pf->adapter->ctrl_pf)
+ return &pf->hw;
+ else
+ return &pf->adapter->ctrl_pf->hw;
+}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 2bc5c7f59844..1f7834c03550 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -378,6 +378,50 @@ ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
}
/**
+ * ice_arfs_cmp - Check if aRFS filter matches this flow.
+ * @fltr_info: filter info of the saved ARFS entry.
+ * @fk: flow dissector keys.
+ * @n_proto: One of htons(ETH_P_IP) or htons(ETH_P_IPV6).
+ * @ip_proto: One of IPPROTO_TCP or IPPROTO_UDP.
+ *
+ * Since this function assumes limited values for n_proto and ip_proto, it
+ * is meant to be called only from ice_rx_flow_steer().
+ *
+ * Return:
+ * * true - fltr_info refers to the same flow as fk.
+ * * false - fltr_info and fk refer to different flows.
+ */
+static bool
+ice_arfs_cmp(const struct ice_fdir_fltr *fltr_info, const struct flow_keys *fk,
+ __be16 n_proto, u8 ip_proto)
+{
+ /* Determine if the filter is for IPv4 or IPv6 based on flow_type,
+ * which is one of ICE_FLTR_PTYPE_NONF_IPV{4,6}_{TCP,UDP}.
+ */
+ bool is_v4 = fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+
+ /* Following checks are arranged in the quickest and most discriminative
+ * fields first for early failure.
+ */
+ if (is_v4)
+ return n_proto == htons(ETH_P_IP) &&
+ fltr_info->ip.v4.src_port == fk->ports.src &&
+ fltr_info->ip.v4.dst_port == fk->ports.dst &&
+ fltr_info->ip.v4.src_ip == fk->addrs.v4addrs.src &&
+ fltr_info->ip.v4.dst_ip == fk->addrs.v4addrs.dst &&
+ fltr_info->ip.v4.proto == ip_proto;
+
+ return fltr_info->ip.v6.src_port == fk->ports.src &&
+ fltr_info->ip.v6.dst_port == fk->ports.dst &&
+ fltr_info->ip.v6.proto == ip_proto &&
+ !memcmp(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
+ sizeof(struct in6_addr)) &&
+ !memcmp(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
+ sizeof(struct in6_addr));
+}
+
+/**
* ice_rx_flow_steer - steer the Rx flow to where application is being run
* @netdev: ptr to the netdev being adjusted
* @skb: buffer with required header information
@@ -448,6 +492,10 @@ ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
continue;
fltr_info = &arfs_entry->fltr_info;
+
+ if (!ice_arfs_cmp(fltr_info, &fk, n_proto, ip_proto))
+ continue;
+
ret = fltr_info->fltr_id;
if (fltr_info->q_index == rxq_idx ||
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 59df31c2c83f..4fedf0181c4e 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1135,6 +1135,8 @@ int ice_init_hw(struct ice_hw *hw)
}
}
+ hw->lane_num = ice_get_phy_lane_number(hw);
+
return 0;
err_unroll_fltr_mgmt_struct:
ice_cleanup_fltr_mgmt_struct(hw);
@@ -3434,7 +3436,7 @@ int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
msg.msg_addr_low = lower_16_bits(reg_offset);
msg.msg_addr_high = receiver_id;
msg.opcode = ice_sbq_msg_rd;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
err = ice_sbq_rw_reg(hw, &msg, flag);
if (err)
@@ -4082,10 +4084,12 @@ int ice_get_phy_lane_number(struct ice_hw *hw)
continue;
if (hw->pf_id == lport) {
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 &&
+ ice_is_dual(hw) && !ice_is_primary(hw))
+ lane += ICE_PORTS_PER_QUAD;
kfree(options);
return lane;
}
-
lport++;
}
@@ -6011,15 +6015,21 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
/**
* ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
* @hw: pointer to HW struct
- * @vsi_num: absolute HW index for VSI
+ * @vsi: VSI to add the filter to
* @add: boolean for if adding or removing a filter
+ *
+ * Return: 0 on success, -EOPNOTSUPP if the operation cannot be performed
+ * with this HW or VSI, otherwise an error corresponding to
+ * the AQ transaction result.
*/
-int
-ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
+int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add)
{
struct ice_aqc_lldp_filter_ctrl *cmd;
struct ice_aq_desc desc;
+ if (vsi->type != ICE_VSI_PF || !ice_fw_supports_lldp_fltr_ctrl(hw))
+ return -EOPNOTSUPP;
+
cmd = &desc.params.lldp_filter_ctrl;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
@@ -6029,7 +6039,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
else
cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
- cmd->vsi_num = cpu_to_le16(vsi_num);
+ cmd->vsi_num = cpu_to_le16(vsi->vsi_num);
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 9b00aa0ddf10..64c530b39191 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -290,8 +290,7 @@ int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
-int
-ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
+int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add);
int ice_lldp_execute_pending_mib(struct ice_hw *hw);
int
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 74418c445cc4..64737fc62306 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -1288,7 +1288,7 @@ ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
tlv->ouisubtype = htonl(ouisubtype);
/* bytes 0 - 63 - IPv4 DSCP2UP LUT */
- for (i = 0; i < ICE_DSCP_NUM_VAL; i++) {
+ for (i = 0; i < DSCP_MAX; i++) {
/* IPv4 mapping */
buf[i] = dcbcfg->dscp_map[i];
/* IPv6 mapping */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index a7c510832824..533eb8930aa8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -352,8 +352,8 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct device *dev = ice_pf_to_dev(pf);
+ struct iidc_rdma_event *event;
int ret = ICE_DCB_NO_HW_CHG;
- struct iidc_event *event;
struct ice_vsi *pf_vsi;
curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
@@ -405,7 +405,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
goto free_cfg;
}
- set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
@@ -740,7 +740,9 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
{
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
- struct iidc_event *event;
+ struct iidc_rdma_priv_dev_info *privd;
+ struct iidc_rdma_core_dev_info *cdev;
+ struct iidc_rdma_event *event;
u8 tc_map = 0;
int v, ret;
@@ -783,13 +785,17 @@ void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi);
}
- if (!locked) {
+
+ cdev = pf->cdev_info;
+ if (cdev && !locked) {
+ privd = cdev->iidc_priv;
+ ice_setup_dcb_qos_info(pf, &privd->qos_info);
/* Notify the AUX drivers that TC change is finished */
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return;
- set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_AFTER_TC_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
}
@@ -846,7 +852,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
goto dcb_init_err;
}
- ice_cfg_sw_lldp(pf_vsi, false, true);
+ ice_cfg_sw_rx_lldp(pf, true);
pf->dcbx_cap = ice_dcb_get_mode(port_info, true);
return 0;
@@ -945,6 +951,37 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
}
/**
+ * ice_setup_dcb_qos_info - Setup DCB QoS information
+ * @pf: ptr to ice_pf
+ * @qos_info: QoS param instance
+ */
+void ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info)
+{
+ struct ice_dcbx_cfg *dcbx_cfg;
+ unsigned int i;
+ u32 up2tc;
+
+ if (!pf || !qos_info)
+ return;
+
+ dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
+
+ qos_info->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
+
+ for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
+ qos_info->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ qos_info->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
+
+ qos_info->pfc_mode = dcbx_cfg->pfc_mode;
+ if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE)
+ for (i = 0; i < DSCP_MAX; i++)
+ qos_info->dscp_map[i] = dcbx_cfg->dscp_map[i];
+}
+
+/**
* ice_dcb_is_mib_change_pending - Check if MIB change is pending
* @state: MIB change state
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 800879a88c5e..da9ba814b4e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -31,6 +31,9 @@ void
ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first);
void
+ice_setup_dcb_qos_info(struct ice_pf *pf,
+ struct iidc_rdma_qos_params *qos_info);
+void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
/**
@@ -134,5 +137,11 @@ static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { }
+static inline void
+ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info)
+{
+ qos_info->num_tc = 1;
+ qos_info->tc_info[0].rel_bw = 100;
+}
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 6d50b90a7359..a10c1c8d8697 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -754,7 +754,7 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
if (!ice_is_feature_supported(pf, ICE_F_DSCP))
return -EOPNOTSUPP;
- if (app->protocol >= ICE_DSCP_NUM_VAL) {
+ if (app->protocol >= DSCP_MAX) {
netdev_err(netdev, "DSCP value 0x%04X out of range\n",
app->protocol);
return -EINVAL;
@@ -931,7 +931,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
/* if the last DSCP mapping just got deleted, need to switch
* to L2 VLAN QoS mode
*/
- if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) &&
+ if (bitmap_empty(new_cfg->dscp_mapped, DSCP_MAX) &&
new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) {
ret = ice_aq_set_pfc_mode(&pf->hw,
ICE_AQC_PFC_VLAN_BASED_PFC,
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index ed21d7f55ac1..2e4f0969035f 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -29,6 +29,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
return -ENODEV;
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
+ ice_vsi_cfg_sw_lldp(uplink_vsi, true, false);
netif_addr_lock_bh(netdev);
__dev_uc_unsync(netdev, NULL);
@@ -245,6 +246,10 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
u64 cd_cmd, dst_vsi;
if (!dst) {
+ struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
+
+ if (unlikely(eth->h_proto == htons(ETH_P_LLDP)))
+ return;
cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
} else {
@@ -278,6 +283,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
+ ice_vsi_cfg_sw_lldp(uplink_vsi, true, true);
}
/**
@@ -502,10 +508,14 @@ err_create_repr:
*/
int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
{
- struct ice_repr *repr = ice_repr_create_vf(vf);
struct devlink *devlink = priv_to_devlink(pf);
+ struct ice_repr *repr;
int err;
+ if (!ice_is_eswitch_mode_switchdev(pf))
+ return 0;
+
+ repr = ice_repr_create_vf(vf);
if (IS_ERR(repr))
return PTR_ERR(repr);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 7c2dc347e4e5..bbf9e6fd315b 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -1818,7 +1818,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* Remove rule to direct LLDP packets to default VSI.
* The FW LLDP engine will now be consuming them.
*/
- ice_cfg_sw_lldp(vsi, false, false);
+ ice_cfg_sw_rx_lldp(vsi->back, false);
/* AQ command to start FW LLDP agent will return an
* error if the agent is already started
@@ -3964,11 +3964,11 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EINVAL;
}
- if (pf->adev) {
+ if (pf->cdev_info && pf->cdev_info->adev) {
mutex_lock(&pf->adev_mutex);
- device_lock(&pf->adev->dev);
+ device_lock(&pf->cdev_info->adev->dev);
locked = true;
- if (pf->adev->dev.driver) {
+ if (pf->cdev_info->adev->dev.driver) {
netdev_err(dev, "Cannot change channels when RDMA is active\n");
ret = -EBUSY;
goto adev_unlock;
@@ -3987,7 +3987,7 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
adev_unlock:
if (locked) {
- device_unlock(&pf->adev->dev);
+ device_unlock(&pf->cdev_info->adev->dev);
mutex_unlock(&pf->adev_mutex);
}
return ret;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 1d118171de37..aceec184e89b 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1605,7 +1605,7 @@ void ice_fdir_replay_fltrs(struct ice_pf *pf)
*/
int ice_fdir_create_dflt_rules(struct ice_pf *pf)
{
- const enum ice_fltr_ptype dflt_rules[] = {
+ static const enum ice_fltr_ptype dflt_rules[] = {
ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_UDP,
ICE_FLTR_PTYPE_NONF_IPV6_TCP, ICE_FLTR_PTYPE_NONF_IPV6_UDP,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index bab3e81cad5d..6ab53e430f91 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -9,22 +9,25 @@
static DEFINE_XARRAY_ALLOC1(ice_aux_id);
/**
- * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
- * @pf: pointer to PF struct
+ * ice_get_auxiliary_drv - retrieve iidc_rdma_core_auxiliary_drv struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
*
* This function has to be called with a device_lock on the
- * pf->adev.dev to avoid race conditions.
+ * cdev->adev.dev to avoid race conditions.
+ *
+ * Return: pointer to the matched auxiliary driver struct
*/
-static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
+static struct iidc_rdma_core_auxiliary_drv *
+ice_get_auxiliary_drv(struct iidc_rdma_core_dev_info *cdev)
{
struct auxiliary_device *adev;
- adev = pf->adev;
+ adev = cdev->adev;
if (!adev || !adev->dev.driver)
return NULL;
- return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
- adrv.driver);
+ return container_of(adev->dev.driver,
+ struct iidc_rdma_core_auxiliary_drv, adrv.driver);
}
/**
@@ -32,44 +35,54 @@ static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
* @pf: pointer to PF struct
* @event: event struct
*/
-void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event)
{
- struct iidc_auxiliary_drv *iadrv;
+ struct iidc_rdma_core_auxiliary_drv *iadrv;
+ struct iidc_rdma_core_dev_info *cdev;
if (WARN_ON_ONCE(!in_task()))
return;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return;
+
mutex_lock(&pf->adev_mutex);
- if (!pf->adev)
+ if (!cdev->adev)
goto finish;
- device_lock(&pf->adev->dev);
- iadrv = ice_get_auxiliary_drv(pf);
+ device_lock(&cdev->adev->dev);
+ iadrv = ice_get_auxiliary_drv(cdev);
if (iadrv && iadrv->event_handler)
- iadrv->event_handler(pf, event);
- device_unlock(&pf->adev->dev);
+ iadrv->event_handler(cdev, event);
+ device_unlock(&cdev->adev->dev);
finish:
mutex_unlock(&pf->adev_mutex);
}
/**
* ice_add_rdma_qset - Add Leaf Node for RDMA Qset
- * @pf: PF struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @qset: Resource to be allocated
+ *
+ * Return: Zero on success or error code encountered
*/
-int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
+int ice_add_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset)
{
u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
struct ice_vsi *vsi;
struct device *dev;
+ struct ice_pf *pf;
u32 qset_teid;
u16 qs_handle;
int status;
int i;
- if (WARN_ON(!pf || !qset))
+ if (WARN_ON(!cdev || !qset))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
dev = ice_pf_to_dev(pf);
if (!ice_is_rdma_ena(pf))
@@ -100,7 +113,6 @@ int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
dev_err(dev, "Failed VSI RDMA Qset enable\n");
return status;
}
- vsi->qset_handle[qset->tc] = qset->qs_handle;
qset->teid = qset_teid;
return 0;
@@ -109,18 +121,23 @@ EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
/**
* ice_del_rdma_qset - Delete leaf node for RDMA Qset
- * @pf: PF struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @qset: Resource to be freed
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
+int ice_del_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset)
{
struct ice_vsi *vsi;
+ struct ice_pf *pf;
u32 teid;
u16 q_id;
- if (WARN_ON(!pf || !qset))
+ if (WARN_ON(!cdev || !qset))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
vsi = ice_find_vsi(pf, qset->vport_id);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
@@ -130,36 +147,36 @@ int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
q_id = qset->qs_handle;
teid = qset->teid;
- vsi->qset_handle[qset->tc] = 0;
-
return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
}
EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
/**
* ice_rdma_request_reset - accept request from RDMA to perform a reset
- * @pf: struct for PF
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @reset_type: type of reset
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
+int ice_rdma_request_reset(struct iidc_rdma_core_dev_info *cdev,
+ enum iidc_rdma_reset_type reset_type)
{
enum ice_reset_req reset;
+ struct ice_pf *pf;
- if (WARN_ON(!pf))
+ if (WARN_ON(!cdev))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
+
switch (reset_type) {
- case IIDC_PFR:
+ case IIDC_FUNC_RESET:
reset = ICE_RESET_PFR;
break;
- case IIDC_CORER:
+ case IIDC_DEV_RESET:
reset = ICE_RESET_CORER;
break;
- case IIDC_GLOBR:
- reset = ICE_RESET_GLOBR;
- break;
default:
- dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
return -EINVAL;
}
@@ -169,18 +186,23 @@ EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
/**
* ice_rdma_update_vsi_filter - update main VSI filters for RDMA
- * @pf: pointer to struct for PF
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @vsi_id: VSI HW idx to update filter on
* @enable: bool whether to enable or disable filters
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
+int ice_rdma_update_vsi_filter(struct iidc_rdma_core_dev_info *cdev,
+ u16 vsi_id, bool enable)
{
struct ice_vsi *vsi;
+ struct ice_pf *pf;
int status;
- if (WARN_ON(!pf))
+ if (WARN_ON(!cdev))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
vsi = ice_find_vsi(pf, vsi_id);
if (!vsi)
return -EINVAL;
@@ -201,37 +223,23 @@ int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
/**
- * ice_get_qos_params - parse QoS params for RDMA consumption
- * @pf: pointer to PF struct
- * @qos: set of QoS values
+ * ice_alloc_rdma_qvector - alloc vector resources reserved for RDMA driver
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
+ * @entry: MSI-X entry to be removed
+ *
+ * Return: Zero on success, error code on failure
*/
-void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
+int ice_alloc_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry)
{
- struct ice_dcbx_cfg *dcbx_cfg;
- unsigned int i;
- u32 up2tc;
-
- dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
- up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
-
- qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
- for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
- qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
-
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
-
- qos->pfc_mode = dcbx_cfg->pfc_mode;
- if (qos->pfc_mode == IIDC_DSCP_PFC_MODE)
- for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++)
- qos->dscp_map[i] = dcbx_cfg->dscp_map[i];
-}
-EXPORT_SYMBOL_GPL(ice_get_qos_params);
+ struct msi_map map;
+ struct ice_pf *pf;
-int ice_alloc_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry)
-{
- struct msi_map map = ice_alloc_irq(pf, true);
+ if (WARN_ON(!cdev))
+ return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
+ map = ice_alloc_irq(pf, true);
if (map.index < 0)
return -ENOMEM;
@@ -244,12 +252,19 @@ EXPORT_SYMBOL_GPL(ice_alloc_rdma_qvector);
/**
* ice_free_rdma_qvector - free vector resources reserved for RDMA driver
- * @pf: board private structure to initialize
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @entry: MSI-X entry to be removed
*/
-void ice_free_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry)
+void ice_free_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry)
{
struct msi_map map;
+ struct ice_pf *pf;
+
+ if (WARN_ON(!cdev || !entry))
+ return;
+
+ pf = pci_get_drvdata(cdev->pdev);
map.index = entry->entry;
map.virq = entry->vector;
@@ -263,19 +278,23 @@ EXPORT_SYMBOL_GPL(ice_free_rdma_qvector);
*/
static void ice_adev_release(struct device *dev)
{
- struct iidc_auxiliary_dev *iadev;
+ struct iidc_rdma_core_auxiliary_dev *iadev;
- iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
+ iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev,
+ adev.dev);
kfree(iadev);
}
/**
* ice_plug_aux_dev - allocate and register AUX device
* @pf: pointer to pf struct
+ *
+ * Return: Zero on success, error code on failure
*/
int ice_plug_aux_dev(struct ice_pf *pf)
{
- struct iidc_auxiliary_dev *iadev;
+ struct iidc_rdma_core_auxiliary_dev *iadev;
+ struct iidc_rdma_core_dev_info *cdev;
struct auxiliary_device *adev;
int ret;
@@ -285,17 +304,22 @@ int ice_plug_aux_dev(struct ice_pf *pf)
if (!ice_is_rdma_ena(pf))
return 0;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
if (!iadev)
return -ENOMEM;
adev = &iadev->adev;
- iadev->pf = pf;
+ iadev->cdev_info = cdev;
adev->id = pf->aux_idx;
adev->dev.release = ice_adev_release;
adev->dev.parent = &pf->pdev->dev;
- adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp";
+ adev->name = cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2 ?
+ "roce" : "iwarp";
ret = auxiliary_device_init(adev);
if (ret) {
@@ -310,7 +334,7 @@ int ice_plug_aux_dev(struct ice_pf *pf)
}
mutex_lock(&pf->adev_mutex);
- pf->adev = adev;
+ cdev->adev = adev;
mutex_unlock(&pf->adev_mutex);
return 0;
@@ -324,8 +348,8 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
struct auxiliary_device *adev;
mutex_lock(&pf->adev_mutex);
- adev = pf->adev;
- pf->adev = NULL;
+ adev = pf->cdev_info->adev;
+ pf->cdev_info->adev = NULL;
mutex_unlock(&pf->adev_mutex);
if (adev) {
@@ -340,7 +364,9 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
*/
int ice_init_rdma(struct ice_pf *pf)
{
+ struct iidc_rdma_priv_dev_info *privd;
struct device *dev = &pf->pdev->dev;
+ struct iidc_rdma_core_dev_info *cdev;
int ret;
if (!ice_is_rdma_ena(pf)) {
@@ -348,22 +374,50 @@ int ice_init_rdma(struct ice_pf *pf)
return 0;
}
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ pf->cdev_info = cdev;
+
+ privd = kzalloc(sizeof(*privd), GFP_KERNEL);
+ if (!privd) {
+ ret = -ENOMEM;
+ goto err_privd_alloc;
+ }
+
+ privd->pf_id = pf->hw.pf_id;
ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX),
GFP_KERNEL);
if (ret) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_alloc_xa;
}
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->iidc_priv = privd;
+ privd->netdev = pf->vsi[0]->netdev;
+
+ privd->hw_addr = (u8 __iomem *)pf->hw.hw_addr;
+ cdev->pdev = pf->pdev;
+ privd->vport_id = pf->vsi[0]->vsi_num;
+
+ pf->cdev_info->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ ice_setup_dcb_qos_info(pf, &privd->qos_info);
ret = ice_plug_aux_dev(pf);
if (ret)
goto err_plug_aux_dev;
return 0;
err_plug_aux_dev:
- pf->adev = NULL;
+ pf->cdev_info->adev = NULL;
xa_erase(&ice_aux_id, pf->aux_idx);
+err_alloc_xa:
+ kfree(privd);
+err_privd_alloc:
+ kfree(cdev);
+ pf->cdev_info = NULL;
+
return ret;
}
@@ -378,4 +432,7 @@ void ice_deinit_rdma(struct ice_pf *pf)
ice_unplug_aux_dev(pf);
xa_erase(&ice_aux_id, pf->aux_idx);
+ kfree(pf->cdev_info->iidc_priv);
+ kfree(pf->cdev_info);
+ pf->cdev_info = NULL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h
index 4b0c86757df9..17dbfcfb6a2a 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc_int.h
+++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h
@@ -4,10 +4,11 @@
#ifndef _ICE_IDC_INT_H_
#define _ICE_IDC_INT_H_
-#include <linux/net/intel/iidc.h>
+#include <linux/net/intel/iidc_rdma.h>
+#include <linux/net/intel/iidc_rdma_ice.h>
struct ice_pf;
-void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event);
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event);
#endif /* !_ICE_IDC_INT_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 22371011c249..2410aee59fb2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -1321,12 +1321,18 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
*/
if (!primary_lag) {
lag->primary = true;
+ if (!ice_is_switchdev_running(lag->pf))
+ return;
+
/* Configure primary's SWID to be shared */
ice_lag_primary_swid(lag, true);
primary_lag = lag;
} else {
u16 swid;
+ if (!ice_is_switchdev_running(primary_lag->pf))
+ return;
+
swid = primary_lag->pf->hw.port_info->sw_id;
ice_lag_set_swid(swid, lag, true);
ice_lag_add_prune_list(primary_lag, lag->pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 0bcf9d127ac9..03bb16191237 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2065,12 +2065,15 @@ static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
}
/**
- * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
+ * ice_vsi_cfg_sw_lldp - Config switch rules for LLDP packet handling
* @vsi: the VSI being configured
* @tx: bool to determine Tx or Rx rule
* @create: bool to determine create or remove Rule
+ *
+ * Adding an ethtype Tx rule to the uplink VSI results in it being applied
+ * to the whole port, so LLDP transmission for VFs will be blocked too.
*/
-void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
+void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
enum ice_sw_fwd_act_type act);
@@ -2085,19 +2088,59 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
ICE_DROP_PACKET);
} else {
- if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) {
- status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
- create);
- } else {
+ if (!test_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags)) {
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
ICE_FWD_TO_VSI);
+ if (!status || !create)
+ goto report;
+
+ dev_info(dev,
+ "Failed to add generic LLDP Rx filter on VSI %i error: %d, falling back to specialized AQ control\n",
+ vsi->vsi_num, status);
}
+
+ status = ice_lldp_fltr_add_remove(&pf->hw, vsi, create);
+ if (!status)
+ set_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags);
+
}
+report:
if (status)
- dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
- create ? "adding" : "removing", tx ? "TX" : "RX",
- vsi->vsi_num, status);
+ dev_warn(dev, "Failed to %s %s LLDP rule on VSI %i error: %d\n",
+ create ? "add" : "remove", tx ? "Tx" : "Rx",
+ vsi->vsi_num, status);
+}
+
+/**
+ * ice_cfg_sw_rx_lldp - Enable/disable software handling of LLDP
+ * @pf: the PF being configured
+ * @enable: enable or disable
+ *
+ * Configure switch rules to enable/disable LLDP handling by software
+ * across PF.
+ */
+void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable)
+{
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ vsi = ice_get_main_vsi(pf);
+ ice_vsi_cfg_sw_lldp(vsi, false, enable);
+
+ if (!test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
+ return;
+
+ ice_for_each_vf(pf, bkt, vf) {
+ vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ continue;
+
+ if (ice_vf_is_lldp_ena(vf))
+ ice_vsi_cfg_sw_lldp(vsi, false, enable);
+ }
}
/**
@@ -2528,7 +2571,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
ICE_DROP_PACKET);
- ice_cfg_sw_lldp(vsi, true, true);
+ ice_vsi_cfg_sw_lldp(vsi, true, true);
}
if (!vsi->agg_node)
@@ -2825,9 +2868,11 @@ int ice_vsi_release(struct ice_vsi *vsi)
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
*/
- if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
- !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, false);
+ if (!ice_is_safe_mode(pf) &&
+ !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) &&
+ (vsi->type == ICE_VSI_PF || (vsi->type == ICE_VSI_VF &&
+ ice_vf_is_lldp_ena(vsi->vf))))
+ ice_vsi_cfg_sw_lldp(vsi, false, false);
ice_vsi_decfg(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index b4c9cb28a016..654516c5fc3e 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -29,7 +29,8 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
-void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
+void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
+void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable);
int ice_set_link(struct ice_vsi *vsi, bool ena);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index d390157b59fe..0a11b4281092 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1743,7 +1743,7 @@ static void ice_service_task_restart(struct ice_pf *pf)
*/
static void ice_service_timer(struct timer_list *t)
{
- struct ice_pf *pf = from_timer(pf, t, serv_tmr);
+ struct ice_pf *pf = timer_container_of(pf, t, serv_tmr);
mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
ice_service_task_schedule(pf);
@@ -2401,11 +2401,11 @@ static void ice_service_task(struct work_struct *work)
}
if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
- struct iidc_event *event;
+ struct iidc_rdma_event *event;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (event) {
- set_bit(IIDC_EVENT_CRIT_ERR, event->type);
+ set_bit(IIDC_RDMA_EVENT_CRIT_ERR, event->type);
/* report the entire OICR value to AUX driver */
swap(event->reg, pf->oicr_err_reg);
ice_send_event_to_aux(pf, event);
@@ -2424,11 +2424,11 @@ static void ice_service_task(struct work_struct *work)
ice_plug_aux_dev(pf);
if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
- struct iidc_event *event;
+ struct iidc_rdma_event *event;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (event) {
- set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
}
@@ -2741,6 +2741,27 @@ void ice_map_xdp_rings(struct ice_vsi *vsi)
}
/**
+ * ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors
+ * @vsi: the VSI with XDP rings being unmapped
+ */
+static void ice_unmap_xdp_rings(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct ice_tx_ring *ring;
+
+ ice_for_each_tx_ring(ring, q_vector->tx)
+ if (!ring->tx_buf || !ice_ring_is_xdp(ring))
+ break;
+
+ /* restore the value of last node prior to XDP setup */
+ q_vector->tx.tx_ring = ring;
+ }
+}
+
+/**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
* @prog: bpf program that will be assigned to VSI
@@ -2803,7 +2824,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
if (status) {
dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
status);
- goto clear_xdp_rings;
+ goto unmap_xdp_rings;
}
/* assign the prog only when it's not already present on VSI;
@@ -2819,6 +2840,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
ice_vsi_assign_bpf_prog(vsi, prog);
return 0;
+unmap_xdp_rings:
+ ice_unmap_xdp_rings(vsi);
clear_xdp_rings:
ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
@@ -2835,6 +2858,8 @@ err_map_xdp:
mutex_unlock(&pf->avail_q_mutex);
devm_kfree(dev, vsi->xdp_rings);
+ vsi->xdp_rings = NULL;
+
return -ENOMEM;
}
@@ -2850,7 +2875,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
- int i, v_idx;
+ int i;
/* q_vectors are freed in reset path so there's no point in detaching
* rings
@@ -2858,17 +2883,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
if (cfg_type == ICE_XDP_CFG_PART)
goto free_qmap;
- ice_for_each_q_vector(vsi, v_idx) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- struct ice_tx_ring *ring;
-
- ice_for_each_tx_ring(ring, q_vector->tx)
- if (!ring->tx_buf || !ice_ring_is_xdp(ring))
- break;
-
- /* restore the value of last node prior to XDP setup */
- q_vector->tx.tx_ring = ring;
- }
+ ice_unmap_xdp_rings(vsi);
free_qmap:
mutex_lock(&pf->avail_q_mutex);
@@ -3013,11 +3028,14 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
+ goto resume_if;
} else {
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
ICE_XDP_CFG_FULL);
- if (xdp_ring_err)
+ if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ goto resume_if;
+ }
}
xdp_features_set_redirect_target(vsi->netdev, true);
/* reallocate Rx queues that are used for zero-copy */
@@ -3035,6 +3053,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
}
+resume_if:
if (if_running)
ret = ice_up(vsi);
@@ -8330,11 +8349,16 @@ void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
* @np: net device to configure
* @filter_dev: device on which filter is added
* @cls_flower: offload data
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was successfully added or deleted,
+ * negative error code otherwise.
*/
static int
ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
struct net_device *filter_dev,
- struct flow_cls_offload *cls_flower)
+ struct flow_cls_offload *cls_flower,
+ bool ingress)
{
struct ice_vsi *vsi = np->vsi;
@@ -8343,7 +8367,7 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
- return ice_add_cls_flower(filter_dev, vsi, cls_flower);
+ return ice_add_cls_flower(filter_dev, vsi, cls_flower, ingress);
case FLOW_CLS_DESTROY:
return ice_del_cls_flower(vsi, cls_flower);
default:
@@ -8352,20 +8376,46 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
}
/**
- * ice_setup_tc_block_cb - callback handler registered for TC block
+ * ice_setup_tc_block_cb_ingress - callback handler for ingress TC block
+ * @type: TC SETUP type
+ * @type_data: TC flower offload data that contains user input
+ * @cb_priv: netdev private data
+ *
+ * Return: 0 if the setup was successful, negative error code otherwise.
+ */
+static int
+ice_setup_tc_block_cb_ingress(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct ice_netdev_priv *np = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ice_setup_tc_cls_flower(np, np->vsi->netdev,
+ type_data, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_setup_tc_block_cb_egress - callback handler for egress TC block
* @type: TC SETUP type
* @type_data: TC flower offload data that contains user input
* @cb_priv: netdev private data
+ *
+ * Return: 0 if the setup was successful, negative error code otherwise.
*/
static int
-ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+ice_setup_tc_block_cb_egress(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
{
struct ice_netdev_priv *np = cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
return ice_setup_tc_cls_flower(np, np->vsi->netdev,
- type_data);
+ type_data, false);
default:
return -EOPNOTSUPP;
}
@@ -9310,27 +9360,45 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ enum flow_block_binder_type binder_type;
+ struct iidc_rdma_core_dev_info *cdev;
struct ice_pf *pf = np->vsi->back;
+ flow_setup_cb_t *flower_handler;
bool locked = false;
int err;
switch (type) {
case TC_SETUP_BLOCK:
+ binder_type =
+ ((struct flow_block_offload *)type_data)->binder_type;
+
+ switch (binder_type) {
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
+ flower_handler = ice_setup_tc_block_cb_ingress;
+ break;
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
+ flower_handler = ice_setup_tc_block_cb_egress;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
return flow_block_cb_setup_simple(type_data,
&ice_block_cb_list,
- ice_setup_tc_block_cb,
- np, np, true);
+ flower_handler,
+ np, np, false);
case TC_SETUP_QDISC_MQPRIO:
if (ice_is_eswitch_mode_switchdev(pf)) {
netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
return -EOPNOTSUPP;
}
- if (pf->adev) {
+ cdev = pf->cdev_info;
+ if (cdev && cdev->adev) {
mutex_lock(&pf->adev_mutex);
- device_lock(&pf->adev->dev);
+ device_lock(&cdev->adev->dev);
locked = true;
- if (pf->adev->dev.driver) {
+ if (cdev->adev->dev.driver) {
netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
err = -EBUSY;
goto adev_unlock;
@@ -9344,7 +9412,7 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
adev_unlock:
if (locked) {
- device_unlock(&pf->adev->dev);
+ device_unlock(&cdev->adev->dev);
mutex_unlock(&pf->adev_mutex);
}
return err;
@@ -9380,7 +9448,7 @@ ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
case TC_SETUP_CLSFLOWER:
return ice_setup_tc_cls_flower(np, priv->netdev,
(struct flow_cls_offload *)
- type_data);
+ type_data, false);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 1fd1ae03eb90..55cad824c5b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -305,6 +305,9 @@ u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
u32 hi, lo, lo2;
u8 tmr_idx;
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
tmr_idx = ice_get_ptp_src_clock_index(hw);
guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
/* Read the system timestamp pre PHC read */
@@ -1624,14 +1627,6 @@ static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq,
int pin_desc_idx;
u8 tmr_idx;
- /* Reject requests with unsupported flags */
-
- if (rq->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
chan = rq->index;
@@ -1802,9 +1797,6 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
struct ice_hw *hw = &pf->hw;
int pin_desc_idx;
- if (rq->flags & ~PTP_PEROUT_PHASE)
- return -EOPNOTSUPP;
-
pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index);
if (pin_desc_idx < 0)
return -EIO;
@@ -2307,6 +2299,7 @@ static int ice_capture_crosststamp(ktime_t *device,
ts = ((u64)ts_hi << 32) | ts_lo;
system->cycles = ts;
system->cs_id = CSID_X86_ART;
+ system->use_nsecs = true;
/* Read Device source clock time */
ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
@@ -2737,6 +2730,11 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
info->enable = ice_ptp_gpio_enable;
info->verify = ice_verify_pin;
+ info->supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+ info->supported_perout_flags = PTP_PEROUT_PHASE;
+
switch (pf->hw.mac_type) {
case ICE_MAC_E810:
ice_ptp_set_funcs_e810(pf);
@@ -2986,6 +2984,32 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
}
/**
+ * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild
+ * @pf: Board private structure
+ * @rebuild: rebuild if true, prepare if false
+ * @reset_type: the reset type being performed
+ */
+static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild,
+ enum ice_reset_req reset_type)
+{
+ struct list_head *entry;
+
+ list_for_each(entry, &pf->adapter->ports.ports) {
+ struct ice_ptp_port *port = list_entry(entry,
+ struct ice_ptp_port,
+ list_node);
+ struct ice_pf *peer_pf = ptp_port_to_pf(port);
+
+ if (!ice_is_primary(&peer_pf->hw)) {
+ if (rebuild)
+ ice_ptp_rebuild(peer_pf, reset_type);
+ else
+ ice_ptp_prepare_for_reset(peer_pf, reset_type);
+ }
+ }
+}
+
+/**
* ice_ptp_prepare_for_reset - Prepare PTP for reset
* @pf: Board private structure
* @reset_type: the reset type being performed
@@ -2993,6 +3017,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_ptp *ptp = &pf->ptp;
+ struct ice_hw *hw = &pf->hw;
u8 src_tmr;
if (ptp->state != ICE_PTP_READY)
@@ -3008,6 +3033,9 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
if (reset_type == ICE_RESET_PFR)
return;
+ if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825)
+ ice_ptp_prepare_rebuild_sec(pf, false, reset_type);
+
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
/* Disable periodic outputs */
@@ -3129,13 +3157,6 @@ err:
dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
}
-static bool ice_is_primary(struct ice_hw *hw)
-{
- return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) ?
- !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) :
- true;
-}
-
static int ice_ptp_setup_adapter(struct ice_pf *pf)
{
if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
@@ -3355,17 +3376,16 @@ void ice_ptp_init(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
- int lane_num, err;
+ int err;
ptp->state = ICE_PTP_INITIALIZING;
- lane_num = ice_get_phy_lane_number(hw);
- if (lane_num < 0) {
- err = lane_num;
+ if (hw->lane_num < 0) {
+ err = hw->lane_num;
goto err_exit;
}
+ ptp->port.port_num = hw->lane_num;
- ptp->port.port_num = (u8)lane_num;
ice_ptp_init_hw(hw);
ice_ptp_init_tx_interrupt_mode(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 89bb8461284a..ccac84eb34c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -240,7 +240,7 @@ static int ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
{
struct ice_sbq_msg_input cgu_msg = {
.opcode = ice_sbq_msg_rd,
- .dest_dev = cgu,
+ .dest_dev = ice_sbq_dev_cgu,
.msg_addr_low = addr
};
int err;
@@ -272,7 +272,7 @@ static int ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
{
struct ice_sbq_msg_input cgu_msg = {
.opcode = ice_sbq_msg_wr,
- .dest_dev = cgu,
+ .dest_dev = ice_sbq_dev_cgu,
.msg_addr_low = addr,
.data = val
};
@@ -874,8 +874,12 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
*/
void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u32 cmd_val = ice_ptp_tmr_cmd_to_src_reg(hw, cmd);
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
wr32(hw, GLTSYN_CMD, cmd_val);
}
@@ -891,6 +895,9 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
{
struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
ice_flush(hw);
@@ -919,16 +926,24 @@ static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay)
*
* Return: destination sideband queue PHY device.
*/
-static enum ice_sbq_msg_dev ice_ptp_get_dest_dev_e825(struct ice_hw *hw,
- u8 port)
+static enum ice_sbq_dev_id ice_ptp_get_dest_dev_e825(struct ice_hw *hw,
+ u8 port)
{
- /* On a single complex E825, PHY 0 is always destination device phy_0
+ u8 curr_phy, tgt_phy;
+
+ tgt_phy = port >= hw->ptp.ports_per_phy;
+ curr_phy = hw->lane_num >= hw->ptp.ports_per_phy;
+ /* In the driver, lanes 4..7 are in fact 0..3 on a second PHY.
+ * On a single complex E825C, PHY 0 is always destination device phy_0
* and PHY 1 is phy_0_peer.
+ * On dual complex E825C, device phy_0 points to PHY on a current
+ * complex and phy_0_peer to PHY on a different complex.
*/
- if (port >= hw->ptp.ports_per_phy)
- return eth56g_phy_1;
+ if ((!ice_is_dual(hw) && tgt_phy == 1) ||
+ (ice_is_dual(hw) && tgt_phy != curr_phy))
+ return ice_sbq_dev_phy_0_peer;
else
- return eth56g_phy_0;
+ return ice_sbq_dev_phy_0;
}
/**
@@ -2417,6 +2432,7 @@ int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold)
static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port,
u64 *phy_time, u64 *phc_time)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u64 tx_time, rx_time;
u32 zo, lo;
u8 tmr_idx;
@@ -2436,8 +2452,13 @@ static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port,
ice_ptp_exec_tmr_cmd(hw);
/* Read the captured PHC time from the shadow time registers */
- zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
- lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ if (ice_is_primary(hw)) {
+ zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ } else {
+ zo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_L(tmr_idx));
+ }
*phc_time = (u64)lo << 32 | zo;
/* Read the captured PHY time from the PHY shadow registers */
@@ -2574,6 +2595,7 @@ int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset)
*/
int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u32 lo, hi;
u64 incval;
u8 tmr_idx;
@@ -2599,8 +2621,13 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
if (err)
return err;
- lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
- hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ if (ice_is_primary(hw)) {
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ } else {
+ lo = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_H(tmr_idx));
+ }
incval = (u64)hi << 32 | lo;
err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, incval);
@@ -2631,25 +2658,6 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
}
/**
- * ice_sb_access_ena_eth56g - Enable SB devices (PHY and others) access
- * @hw: pointer to HW struct
- * @enable: Enable or disable access
- *
- * Enable sideband devices (PHY and others) access.
- */
-static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable)
-{
- u32 val = rd32(hw, PF_SB_REM_DEV_CTL);
-
- if (enable)
- val |= BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1);
- else
- val &= ~(BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1));
-
- wr32(hw, PF_SB_REM_DEV_CTL, val);
-}
-
-/**
* ice_ptp_init_phc_e825 - Perform E825 specific PHC initialization
* @hw: pointer to HW struct
*
@@ -2659,8 +2667,6 @@ static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable)
*/
static int ice_ptp_init_phc_e825(struct ice_hw *hw)
{
- ice_sb_access_ena_eth56g(hw, true);
-
/* Initialize the Clock Generation Unit */
return ice_init_cgu_e82x(hw);
}
@@ -2747,8 +2753,6 @@ static void ice_ptp_init_phy_e825(struct ice_hw *hw)
params->num_phys = 2;
ptp->ports_per_phy = 4;
ptp->num_lports = params->num_phys * ptp->ports_per_phy;
-
- ice_sb_access_ena_eth56g(hw, true);
}
/* E822 family functions
@@ -2781,7 +2785,7 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
}
- msg->dest_dev = rmn_0;
+ msg->dest_dev = ice_sbq_dev_phy_0;
}
/**
@@ -3104,7 +3108,7 @@ static int ice_fill_quad_msg_e82x(struct ice_hw *hw,
if (quad >= ICE_GET_QUAD_NUM(hw->ptp.num_lports))
return -EINVAL;
- msg->dest_dev = rmn_0;
+ msg->dest_dev = ice_sbq_dev_phy_0;
if (!(quad % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy)))
addr = Q_0_BASE + offset;
@@ -4823,7 +4827,7 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
msg.opcode = ice_sbq_msg_rd;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
@@ -4853,7 +4857,7 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
msg.opcode = ice_sbq_msg_wr;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
msg.data = val;
err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index e5925ccc2613..83f20fa7ace7 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -444,11 +444,6 @@ static inline u64 ice_get_base_incval(struct ice_hw *hw)
}
}
-static inline bool ice_is_dual(struct ice_hw *hw)
-{
- return !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M);
-}
-
#define PFTSYN_SEM_BYTES 4
#define ICE_PTP_CLOCK_INDEX_0 0x00
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index fb7a1b9a4313..cb08746556a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -219,7 +219,8 @@ ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
{
switch (flower->command) {
case FLOW_CLS_REPLACE:
- return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
+ return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower,
+ true);
case FLOW_CLS_DESTROY:
return ice_del_cls_flower(repr->src_vsi, flower);
default:
@@ -336,6 +337,7 @@ void ice_repr_destroy(struct ice_repr *repr)
static void ice_repr_rem_vf(struct ice_repr *repr)
{
ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
+ ice_pass_vf_tx_lldp(repr->src_vsi, true);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
@@ -417,6 +419,10 @@ static int ice_repr_add_vf(struct ice_repr *repr)
if (err)
goto err_netdev;
+ err = ice_drop_vf_tx_lldp(repr->src_vsi, true);
+ if (err)
+ goto err_drop_lldp;
+
err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
if (err)
goto err_cfg_vsi;
@@ -429,6 +435,8 @@ static int ice_repr_add_vf(struct ice_repr *repr)
return 0;
err_cfg_vsi:
+ ice_pass_vf_tx_lldp(repr->src_vsi, true);
+err_drop_lldp:
unregister_netdev(repr->netdev);
err_netdev:
ice_devlink_destroy_vf_port(vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
index 3b0054faf70c..183dd5457d6a 100644
--- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
@@ -46,13 +46,10 @@ struct ice_sbq_evt_desc {
u8 data[24];
};
-enum ice_sbq_msg_dev {
- eth56g_phy_0 = 0x02,
- rmn_0 = 0x02,
- rmn_1 = 0x03,
- rmn_2 = 0x04,
- cgu = 0x06,
- eth56g_phy_1 = 0x0D,
+enum ice_sbq_dev_id {
+ ice_sbq_dev_phy_0 = 0x02,
+ ice_sbq_dev_cgu = 0x06,
+ ice_sbq_dev_phy_0_peer = 0x0D,
};
enum ice_sbq_msg_opcode {
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 6ca13c5dcb14..d9d09296d1d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -85,6 +85,27 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
}
/**
+ * ice_sched_find_next_vsi_node - find the next node for a given VSI
+ * @vsi_node: VSI support node to start search with
+ *
+ * Return: Next VSI support node, or NULL.
+ *
+ * The function returns a pointer to the next node from the VSI layer
+ * assigned to the given VSI, or NULL if there is no such a node.
+ */
+static struct ice_sched_node *
+ice_sched_find_next_vsi_node(struct ice_sched_node *vsi_node)
+{
+ unsigned int vsi_handle = vsi_node->vsi_handle;
+
+ while ((vsi_node = vsi_node->sibling) != NULL)
+ if (vsi_node->vsi_handle == vsi_handle)
+ break;
+
+ return vsi_node;
+}
+
+/**
* ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
* @hw: pointer to the HW struct
* @cmd_opc: cmd opcode
@@ -1084,8 +1105,10 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
if (parent->num_children < max_child_nodes) {
new_num_nodes = max_child_nodes - parent->num_children;
} else {
- /* This parent is full, try the next sibling */
- parent = parent->sibling;
+ /* This parent is full,
+ * try the next available sibling.
+ */
+ parent = ice_sched_find_next_vsi_node(parent);
/* Don't modify the first node TEID memory if the
* first node was added already in the above call.
* Instead send some temp memory for all other
@@ -1528,12 +1551,23 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) {
+ struct ice_sched_node *next_vsi_node;
+
/* make sure the qgroup node is part of the VSI subtree */
if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
if (qgrp_node->num_children < max_children &&
qgrp_node->owner == owner)
break;
qgrp_node = qgrp_node->sibling;
+ if (qgrp_node)
+ continue;
+
+ next_vsi_node = ice_sched_find_next_vsi_node(vsi_node);
+ if (!next_vsi_node)
+ break;
+
+ vsi_node = next_vsi_node;
+ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
}
/* Select the best queue group */
@@ -1604,16 +1638,16 @@ ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
/**
* ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
* @hw: pointer to the HW struct
- * @num_qs: number of queues
+ * @num_new_qs: number of new queues that will be added to the tree
* @num_nodes: num nodes array
*
* This function calculates the number of VSI child nodes based on the
* number of queues.
*/
static void
-ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
+ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_new_qs, u16 *num_nodes)
{
- u16 num = num_qs;
+ u16 num = num_new_qs;
u8 i, qgl, vsil;
qgl = ice_sched_get_qgrp_layer(hw);
@@ -1779,7 +1813,11 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!parent)
return -EIO;
- if (i == vsil)
+ /* Do not modify the VSI handle for already existing VSI nodes,
+ * (if no new VSI node was added to the tree).
+ * Assign the VSI handle only to newly added VSI nodes.
+ */
+ if (i == vsil && num_added)
parent->vsi_handle = vsi_handle;
}
@@ -1813,6 +1851,41 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
}
/**
+ * ice_sched_recalc_vsi_support_nodes - recalculate VSI support nodes count
+ * @hw: pointer to the HW struct
+ * @vsi_node: pointer to the leftmost VSI node that needs to be extended
+ * @new_numqs: new number of queues that has to be handled by the VSI
+ * @new_num_nodes: pointer to nodes count table to modify the VSI layer entry
+ *
+ * This function recalculates the number of supported nodes that need to
+ * be added after adding more Tx queues for a given VSI.
+ * The number of new VSI support nodes that shall be added will be saved
+ * to the @new_num_nodes table for the VSI layer.
+ */
+static void
+ice_sched_recalc_vsi_support_nodes(struct ice_hw *hw,
+ struct ice_sched_node *vsi_node,
+ unsigned int new_numqs, u16 *new_num_nodes)
+{
+ u32 vsi_nodes_cnt = 1;
+ u32 max_queue_cnt = 1;
+ u32 qgl, vsil;
+
+ qgl = ice_sched_get_qgrp_layer(hw);
+ vsil = ice_sched_get_vsi_layer(hw);
+
+ for (u32 i = vsil; i <= qgl; i++)
+ max_queue_cnt *= hw->max_children[i];
+
+ while ((vsi_node = ice_sched_find_next_vsi_node(vsi_node)) != NULL)
+ vsi_nodes_cnt++;
+
+ if (new_numqs > (max_queue_cnt * vsi_nodes_cnt))
+ new_num_nodes[vsil] = DIV_ROUND_UP(new_numqs, max_queue_cnt) -
+ vsi_nodes_cnt;
+}
+
+/**
* ice_sched_update_vsi_child_nodes - update VSI child nodes
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -1863,15 +1936,25 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
return status;
}
- if (new_numqs)
- ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
- /* Keep the max number of queue configuration all the time. Update the
- * tree only if number of queues > previous number of queues. This may
+ ice_sched_recalc_vsi_support_nodes(hw, vsi_node,
+ new_numqs, new_num_nodes);
+ ice_sched_calc_vsi_child_nodes(hw, new_numqs - prev_numqs,
+ new_num_nodes);
+
+ /* Never decrease the number of queues in the tree. Update the tree
+ * only if number of queues > previous number of queues. This may
* leave some extra nodes in the tree if number of queues < previous
* number but that wouldn't harm anything. Removing those extra nodes
* may complicate the code if those nodes are part of SRL or
* individually rate limited.
+ * Also, add the required VSI support nodes if the existing ones cannot
+ * handle the requested new number of queues.
*/
+ status = ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
+ new_num_nodes);
+ if (status)
+ return status;
+
status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
new_num_nodes, owner);
if (status)
@@ -2013,6 +2096,58 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
}
/**
+ * ice_sched_rm_vsi_subtree - remove all nodes assigned to a given VSI
+ * @pi: port information structure
+ * @vsi_node: pointer to the leftmost node of the VSI to be removed
+ * @owner: LAN or RDMA
+ * @tc: TC number
+ *
+ * Return: Zero in case of success, or -EBUSY if the VSI has leaf nodes in TC.
+ *
+ * This function removes all the VSI support nodes associated with a given VSI
+ * and its LAN or RDMA children nodes from the scheduler tree.
+ */
+static int
+ice_sched_rm_vsi_subtree(struct ice_port_info *pi,
+ struct ice_sched_node *vsi_node, u8 owner, u8 tc)
+{
+ u16 vsi_handle = vsi_node->vsi_handle;
+ bool all_vsi_nodes_removed = true;
+ int j = 0;
+
+ while (vsi_node) {
+ struct ice_sched_node *next_vsi_node;
+
+ if (ice_sched_is_leaf_node_present(vsi_node)) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", tc);
+ return -EBUSY;
+ }
+ while (j < vsi_node->num_children) {
+ if (vsi_node->children[j]->owner == owner)
+ ice_free_sched_node(pi, vsi_node->children[j]);
+ else
+ j++;
+ }
+
+ next_vsi_node = ice_sched_find_next_vsi_node(vsi_node);
+
+ /* remove the VSI if it has no children */
+ if (!vsi_node->num_children)
+ ice_free_sched_node(pi, vsi_node);
+ else
+ all_vsi_nodes_removed = false;
+
+ vsi_node = next_vsi_node;
+ }
+
+ /* clean up aggregator related VSI info if any */
+ if (all_vsi_nodes_removed)
+ ice_sched_rm_agg_vsi_info(pi, vsi_handle);
+
+ return 0;
+}
+
+/**
* ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -2038,7 +2173,6 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
ice_for_each_traffic_class(i) {
struct ice_sched_node *vsi_node, *tc_node;
- u8 j = 0;
tc_node = ice_sched_get_tc_node(pi, i);
if (!tc_node)
@@ -2048,31 +2182,12 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (!vsi_node)
continue;
- if (ice_sched_is_leaf_node_present(vsi_node)) {
- ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
- status = -EBUSY;
+ status = ice_sched_rm_vsi_subtree(pi, vsi_node, owner, i);
+ if (status)
goto exit_sched_rm_vsi_cfg;
- }
- while (j < vsi_node->num_children) {
- if (vsi_node->children[j]->owner == owner) {
- ice_free_sched_node(pi, vsi_node->children[j]);
- /* reset the counter again since the num
- * children will be updated after node removal
- */
- j = 0;
- } else {
- j++;
- }
- }
- /* remove the VSI if it has no children */
- if (!vsi_node->num_children) {
- ice_free_sched_node(pi, vsi_node);
- vsi_ctx->sched.vsi_node[i] = NULL;
+ vsi_ctx->sched.vsi_node[i] = NULL;
- /* clean up aggregator related VSI info if any */
- ice_sched_rm_agg_vsi_info(pi, vsi_handle);
- }
if (owner == ICE_SCHED_NODE_OWNER_LAN)
vsi_ctx->sched.max_lanq[i] = 0;
else
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index f1648cf103b7..0e4dc1a5cff0 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -63,6 +63,7 @@ static void ice_free_vf_res(struct ice_vf *vf)
if (vf->lan_vsi_idx != ICE_NO_VSI) {
ice_vf_vsi_release(vf);
vf->num_mac = 0;
+ vf->num_mac_lldp = 0;
}
last_vector_idx = vf->first_vector_idx + vf->num_msix - 1;
@@ -1402,6 +1403,9 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
mutex_lock(&vf->cfg_lock);
+ while (!trusted && vf->num_mac_lldp)
+ ice_vf_update_mac_lldp_num(vf, ice_get_vf_vsi(vf), false);
+
vf->trusted = trusted;
ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 4a91e0aaf0a5..9d9a7edd3618 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -3146,7 +3146,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle_arr[2];
/* A rule already exists with the new VSI being added */
- if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
+ if (cur_fltr->vsi_handle == new_fltr->vsi_handle)
return -EEXIST;
vsi_handle_arr[0] = cur_fltr->vsi_handle;
@@ -5978,7 +5978,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
- return 0;
+ return -EEXIST;
/* Update the previously created VSI list set with
* the new VSI ID passed in
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index ea39b999a0d0..fb9ea7f8ef44 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -12,14 +12,11 @@
/**
* ice_tc_count_lkups - determine lookup count for switch filter
* @flags: TC-flower flags
- * @headers: Pointer to TC flower filter header structure
* @fltr: Pointer to outer TC filter structure
*
- * Determine lookup count based on TC flower input for switch filter.
+ * Return: lookup count based on TC flower input for a switch filter.
*/
-static int
-ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
- struct ice_tc_flower_fltr *fltr)
+static int ice_tc_count_lkups(u32 flags, struct ice_tc_flower_fltr *fltr)
{
int lkups_cnt = 1; /* 0th lookup is metadata */
@@ -684,26 +681,26 @@ static int ice_tc_setup_action(struct net_device *filter_dev,
fltr->action.fltr_act = action;
if (ice_is_port_repr_netdev(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
+ ice_is_port_repr_netdev(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
repr = ice_netdev_to_repr(target_dev);
fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_is_port_repr_netdev(filter_dev) &&
- ice_tc_is_dev_uplink(target_dev)) {
+ ice_tc_is_dev_uplink(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
repr = ice_netdev_to_repr(filter_dev);
fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_tc_is_dev_uplink(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
+ ice_is_port_repr_netdev(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
repr = ice_netdev_to_repr(target_dev);
fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
} else {
NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unsupported netdevice in switchdev mode");
+ "The action is not supported for this netdevice");
return -EINVAL;
}
@@ -716,13 +713,11 @@ ice_tc_setup_drop_action(struct net_device *filter_dev,
{
fltr->action.fltr_act = ICE_DROP_PACKET;
- if (ice_is_port_repr_netdev(filter_dev)) {
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
- } else if (ice_tc_is_dev_uplink(filter_dev)) {
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
- } else {
+ if (!ice_tc_is_dev_uplink(filter_dev) &&
+ !(ice_is_port_repr_netdev(filter_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_INGRESS)) {
NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unsupported netdevice in switchdev mode");
+ "The action is not supported for this netdevice");
return -EINVAL;
}
@@ -767,10 +762,157 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
return 0;
}
+static bool ice_is_fltr_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ return fltr->outer_headers.l2_key.n_proto == htons(ETH_P_LLDP);
+}
+
+static bool ice_is_fltr_pf_tx_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_vsi *vsi = fltr->src_vsi, *uplink;
+
+ if (!ice_is_switchdev_running(vsi->back))
+ return false;
+
+ uplink = vsi->back->eswitch.uplink_vsi;
+ return vsi == uplink && fltr->action.fltr_act == ICE_DROP_PACKET &&
+ ice_is_fltr_lldp(fltr) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ fltr->flags == ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
+}
+
+static bool ice_is_fltr_vf_tx_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_vsi *vsi = fltr->src_vsi, *uplink;
+
+ uplink = vsi->back->eswitch.uplink_vsi;
+ return fltr->src_vsi->type == ICE_VSI_VF && ice_is_fltr_lldp(fltr) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ fltr->dest_vsi == uplink;
+}
+
+static struct ice_tc_flower_fltr *
+ice_find_pf_tx_lldp_fltr(struct ice_pf *pf)
+{
+ struct ice_tc_flower_fltr *fltr;
+
+ hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ return fltr;
+
+ return NULL;
+}
+
+static bool ice_any_vf_lldp_tx_ena(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ ice_for_each_vf(pf, bkt, vf)
+ if (vf->lldp_tx_ena)
+ return true;
+
+ return false;
+}
+
+int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit)
+{
+ struct ice_rule_query_data remove_entry = {
+ .rid = vsi->vf->lldp_recipe_id,
+ .rule_id = vsi->vf->lldp_rule_id,
+ .vsi_handle = vsi->idx,
+ };
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (vsi->vf->lldp_tx_ena)
+ return 0;
+
+ if (!deinit && !ice_find_pf_tx_lldp_fltr(vsi->back))
+ return -EINVAL;
+
+ if (!deinit && ice_any_vf_lldp_tx_ena(pf))
+ return -EINVAL;
+
+ err = ice_rem_adv_rule_by_id(&pf->hw, &remove_entry);
+ if (!err)
+ vsi->vf->lldp_tx_ena = true;
+
+ return err;
+}
+
+int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init)
+{
+ struct ice_rule_query_data rule_added;
+ struct ice_adv_rule_info rinfo = {
+ .priority = 7,
+ .src_vsi = vsi->idx,
+ .sw_act = {
+ .src = vsi->idx,
+ .flag = ICE_FLTR_TX,
+ .fltr_act = ICE_DROP_PACKET,
+ .vsi_handle = vsi->idx,
+ },
+ .flags_info.act_valid = true,
+ };
+ struct ice_adv_lkup_elem list[3];
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (!init && !vsi->vf->lldp_tx_ena)
+ return 0;
+
+ memset(list, 0, sizeof(list));
+ ice_rule_add_direction_metadata(&list[0]);
+ ice_rule_add_src_vsi_metadata(&list[1]);
+ list[2].type = ICE_ETYPE_OL;
+ list[2].h_u.ethertype.ethtype_id = htons(ETH_P_LLDP);
+ list[2].m_u.ethertype.ethtype_id = htons(0xFFFF);
+
+ err = ice_add_adv_rule(&pf->hw, list, ARRAY_SIZE(list), &rinfo,
+ &rule_added);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "Failed to add an LLDP rule to VSI 0x%X: %d\n",
+ vsi->idx, err);
+ } else {
+ vsi->vf->lldp_recipe_id = rule_added.rid;
+ vsi->vf->lldp_rule_id = rule_added.rule_id;
+ vsi->vf->lldp_tx_ena = false;
+ }
+
+ return err;
+}
+
+static void ice_handle_add_pf_lldp_drop_rule(struct ice_vsi *vsi)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct ice_pf *pf = vsi->back;
+
+ hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) {
+ if (!ice_is_fltr_vf_tx_lldp(fltr))
+ continue;
+ ice_pass_vf_tx_lldp(fltr->src_vsi, true);
+ break;
+ }
+}
+
+static void ice_handle_del_pf_lldp_drop_rule(struct ice_pf *pf)
+{
+ int i;
+
+ /* Make the VF LLDP fwd to uplink rule dormant */
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vf_vsi = pf->vsi[i];
+
+ if (vf_vsi && vf_vsi->type == ICE_VSI_VF)
+ ice_drop_vf_tx_lldp(vf_vsi, false);
+ }
+}
+
static int
ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
{
- struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
struct ice_adv_rule_info rule_info = { 0 };
struct ice_rule_query_data rule_added;
struct ice_hw *hw = &vsi->back->hw;
@@ -785,7 +927,10 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
return -EOPNOTSUPP;
}
- lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
+ if (ice_is_fltr_vf_tx_lldp(fltr))
+ return ice_pass_vf_tx_lldp(vsi, false);
+
+ lkups_cnt = ice_tc_count_lkups(flags, fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
if (!list)
return -ENOMEM;
@@ -814,6 +959,11 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.sw_act.src = hw->pf_id;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
} else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ !fltr->dest_vsi && vsi == vsi->back->eswitch.uplink_vsi) {
+ /* PF to Uplink */
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) {
/* VF to Uplink */
rule_info.sw_act.flag |= ICE_FLTR_TX;
@@ -846,11 +996,17 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
+ } else if (ret == -ENOSPC) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter: insufficient space available.");
+ goto exit;
} else if (ret) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
goto exit;
}
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ ice_handle_add_pf_lldp_drop_rule(vsi);
+
/* store the output params, which are needed later for removing
* advanced switch filter
*/
@@ -985,7 +1141,6 @@ static int
ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
struct ice_tc_flower_fltr *tc_fltr)
{
- struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
struct ice_adv_rule_info rule_info = {0};
struct ice_rule_query_data rule_added;
struct ice_adv_lkup_elem *list;
@@ -1021,7 +1176,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
return PTR_ERR(dest_vsi);
}
- lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
+ lkups_cnt = ice_tc_count_lkups(flags, tc_fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
if (!list)
return -ENOMEM;
@@ -1056,8 +1211,13 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
break;
case ICE_DROP_PACKET:
- rule_info.sw_act.flag |= ICE_FLTR_RX;
- rule_info.sw_act.src = hw->pf_id;
+ if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ } else {
+ rule_info.sw_act.flag |= ICE_FLTR_RX;
+ rule_info.sw_act.src = hw->pf_id;
+ }
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
break;
default:
@@ -1071,6 +1231,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
"Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
+ } else if (ret == -ENOSPC) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter: insufficient space available.");
+ goto exit;
} else if (ret) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack,
"Unable to add filter due to error");
@@ -1463,11 +1627,16 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
* @filter_dev: Pointer to device on which filter is being added
* @f: Pointer to struct flow_cls_offload
* @fltr: Pointer to filter structure
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was parsed successfully, -EINVAL if the flower
+ * cannot be parsed, -EOPNOTSUPP if such filter cannot be configured
+ * for the given VSI.
*/
static int
ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
struct flow_cls_offload *f,
- struct ice_tc_flower_fltr *fltr)
+ struct ice_tc_flower_fltr *fltr, bool ingress)
{
struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
@@ -1551,6 +1720,20 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
}
+ if (!ingress) {
+ bool switchdev =
+ ice_is_eswitch_mode_switchdev(vsi->back);
+
+ if (switchdev != (n_proto_key == ETH_P_LLDP)) {
+ NL_SET_ERR_MSG_FMT_MOD(fltr->extack,
+ "%sLLDP filtering is not supported on egress in %s mode",
+ switchdev ? "Non-" : "",
+ switchdev ? "switchdev" :
+ "legacy");
+ return -EOPNOTSUPP;
+ }
+ }
+
headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
headers->l3_key.ip_proto = match.key->ip_proto;
@@ -1726,6 +1909,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
return -EINVAL;
}
}
+
+ /* Ingress filter on representor results in an egress filter in HW
+ * and vice versa
+ */
+ ingress = ice_is_port_repr_netdev(filter_dev) ? !ingress : ingress;
+ fltr->direction = ingress ? ICE_ESWITCH_FLTR_INGRESS :
+ ICE_ESWITCH_FLTR_EGRESS;
+
return 0;
}
@@ -1939,6 +2130,12 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
struct ice_pf *pf = vsi->back;
int err;
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ ice_handle_del_pf_lldp_drop_rule(pf);
+
+ if (ice_is_fltr_vf_tx_lldp(fltr))
+ return ice_drop_vf_tx_lldp(vsi, false);
+
rule_rem.rid = fltr->rid;
rule_rem.rule_id = fltr->rule_id;
rule_rem.vsi_handle = fltr->dest_vsi_handle;
@@ -1975,14 +2172,18 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
* @vsi: Pointer to VSI
* @f: Pointer to flower offload structure
* @__fltr: Pointer to struct ice_tc_flower_fltr
+ * @ingress: if the rule is added to an ingress block
*
* This function parses TC-flower input fields, parses action,
* and adds a filter.
+ *
+ * Return: 0 if the filter was successfully added,
+ * negative error code otherwise.
*/
static int
ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
struct flow_cls_offload *f,
- struct ice_tc_flower_fltr **__fltr)
+ struct ice_tc_flower_fltr **__fltr, bool ingress)
{
struct ice_tc_flower_fltr *fltr;
int err;
@@ -1999,7 +2200,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
fltr->src_vsi = vsi;
INIT_HLIST_NODE(&fltr->tc_flower_node);
- err = ice_parse_cls_flower(netdev, vsi, f, fltr);
+ err = ice_parse_cls_flower(netdev, vsi, f, fltr, ingress);
if (err < 0)
goto err;
@@ -2042,10 +2243,13 @@ ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
* @netdev: Pointer to filter device
* @vsi: Pointer to VSI
* @cls_flower: Pointer to flower offload structure
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was successfully added,
+ * negative error code otherwise.
*/
-int
-ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower)
+int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower, bool ingress)
{
struct netlink_ext_ack *extack = cls_flower->common.extack;
struct net_device *vsi_netdev = vsi->netdev;
@@ -2080,7 +2284,7 @@ ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
}
/* prep and add TC-flower filter in HW */
- err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
+ err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr, ingress);
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index d84f153517ec..8a3ab2f22af9 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -211,13 +211,14 @@ static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf)
}
struct ice_vsi *ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue);
-int
-ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower);
-int
-ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower);
+int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower, bool ingress);
+int ice_del_cls_flower(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower);
void ice_replay_tc_fltrs(struct ice_pf *pf);
bool ice_is_tunnel_supported(struct net_device *dev);
+int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init);
+int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit);
static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 1e4f6f6ee449..0e5107fe62ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -2440,19 +2440,20 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
eth = (struct ethhdr *)skb_mac_header(skb);
- if (unlikely((skb->priority == TC_PRIO_CONTROL ||
- eth->h_proto == htons(ETH_P_LLDP)) &&
- vsi->type == ICE_VSI_PF &&
- vsi->port_info->qos_cfg.is_sw_lldp))
- offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
- ICE_TX_CTX_DESC_SWTCH_UPLINK <<
- ICE_TXD_CTX_QW1_CMD_S);
- ice_tstamp(tx_ring, skb, first, &offload);
if ((ice_is_switchdev_running(vsi->back) ||
ice_lag_is_switchdev_running(vsi->back)) &&
vsi->type != ICE_VSI_SF)
ice_eswitch_set_target_vsi(skb, &offload);
+ else if (unlikely((skb->priority == TC_PRIO_CONTROL ||
+ eth->h_proto == htons(ETH_P_LLDP)) &&
+ vsi->type == ICE_VSI_PF &&
+ vsi->port_info->qos_cfg.is_sw_lldp))
+ offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+ ICE_TX_CTX_DESC_SWTCH_UPLINK <<
+ ICE_TXD_CTX_QW1_CMD_S);
+
+ ice_tstamp(tx_ring, skb, first, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 0aab21113cc4..3d68f465952d 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -19,6 +19,7 @@
#include "ice_vlan_mode.h"
#include "ice_fwlog.h"
#include <linux/wait.h>
+#include <net/dscp.h>
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -695,7 +696,6 @@ struct ice_dcb_app_priority_table {
#define ICE_MAX_USER_PRIORITY 8
#define ICE_DCBX_MAX_APPS 64
-#define ICE_DSCP_NUM_VAL 64
#define ICE_LLDPDU_SIZE 1500
#define ICE_TLV_STATUS_OPER 0x1
#define ICE_TLV_STATUS_SYNC 0x2
@@ -718,9 +718,9 @@ struct ice_dcbx_cfg {
u8 pfc_mode;
struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
/* when DSCP mapping defined by user set its bit to 1 */
- DECLARE_BITMAP(dscp_mapped, ICE_DSCP_NUM_VAL);
+ DECLARE_BITMAP(dscp_mapped, DSCP_MAX);
/* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */
- u8 dscp_map[ICE_DSCP_NUM_VAL];
+ u8 dscp_map[DSCP_MAX];
u8 dcbx_mode;
#define ICE_DCBX_MODE_CEE 0x1
#define ICE_DCBX_MODE_IEEE 0x2
@@ -970,6 +970,7 @@ struct ice_hw {
u8 intrl_gran;
struct ice_ptp_hw ptp;
+ s8 lane_num;
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 815ad0bfe832..48cd533e93b7 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -226,6 +226,7 @@ static void ice_vf_clear_counters(struct ice_vf *vf)
vsi->num_vlan = 0;
vf->num_mac = 0;
+ vf->num_mac_lldp = 0;
memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
}
@@ -1401,3 +1402,28 @@ struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
rcu_read_unlock();
return ctrl_vsi;
}
+
+/**
+ * ice_vf_update_mac_lldp_num - update the VF's number of LLDP addresses
+ * @vf: a VF to add the address to
+ * @vsi: the corresponding VSI
+ * @incr: is the rule added or removed
+ */
+void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi,
+ bool incr)
+{
+ bool lldp_by_fw = test_bit(ICE_FLAG_FW_LLDP_AGENT, vsi->back->flags);
+ bool was_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw;
+ bool is_ena;
+
+ if (WARN_ON(!vsi)) {
+ vf->num_mac_lldp = 0;
+ return;
+ }
+
+ vf->num_mac_lldp += incr ? 1 : -1;
+ is_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw;
+
+ if (was_ena != is_ena)
+ ice_vsi_cfg_sw_lldp(vsi, false, is_ena);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 799b2c1f1184..482f4285fd35 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -124,6 +124,7 @@ struct ice_vf {
u8 spoofchk:1;
u8 link_forced:1;
u8 link_up:1; /* only valid if VF link is forced */
+ u8 lldp_tx_ena:1;
u32 ptp_caps;
@@ -134,6 +135,7 @@ struct ice_vf {
unsigned long vf_caps; /* VF's adv. capabilities */
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
+ u16 num_mac_lldp;
u16 num_vf_qs; /* num of queue configured per VF */
u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */
#define ICE_INNER_VLAN_STRIP_ENA BIT(0)
@@ -149,6 +151,9 @@ struct ice_vf {
/* devlink port data */
struct devlink_port devlink_port;
+ u16 lldp_recipe_id;
+ u16 lldp_rule_id;
+
u16 num_msix; /* num of MSI-X configured on this VF */
struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF];
};
@@ -180,6 +185,11 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
return vf->port_vlan_info.tpid;
}
+static inline bool ice_vf_is_lldp_ena(struct ice_vf *vf)
+{
+ return vf->num_mac_lldp && vf->trusted;
+}
+
/* VF Hash Table access functions
*
* These functions provide abstraction for interacting with the VF hash table.
@@ -245,6 +255,8 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
int ice_reset_vf(struct ice_vf *vf, u32 flags);
void ice_reset_all_vfs(struct ice_pf *pf);
struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi);
+void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi,
+ bool incr);
#else /* CONFIG_PCI_IOV */
static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 7c3006eb68dd..eeeb9968e477 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -2266,6 +2266,51 @@ ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
}
/**
+ * ice_is_mc_lldp_eth_addr - check if the given MAC is a multicast LLDP address
+ * @mac: address to check
+ *
+ * Return: true if the address is one of the three possible LLDP multicast
+ * addresses, false otherwise.
+ */
+static bool ice_is_mc_lldp_eth_addr(const u8 *mac)
+{
+ const u8 lldp_mac_base[] = {0x01, 0x80, 0xc2, 0x00, 0x00};
+
+ if (memcmp(mac, lldp_mac_base, sizeof(lldp_mac_base)))
+ return false;
+
+ return (mac[5] == 0x0e || mac[5] == 0x03 || mac[5] == 0x00);
+}
+
+/**
+ * ice_vc_can_add_mac - check if the VF is allowed to add a given MAC
+ * @vf: a VF to add the address to
+ * @mac: address to check
+ *
+ * Return: true if the VF is allowed to add such MAC address, false otherwise.
+ */
+static bool ice_vc_can_add_mac(const struct ice_vf *vf, const u8 *mac)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+
+ if (is_unicast_ether_addr(mac) &&
+ !ice_can_vf_change_mac((struct ice_vf *)vf)) {
+ dev_err(dev,
+ "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ return false;
+ }
+
+ if (!vf->trusted && ice_is_mc_lldp_eth_addr(mac)) {
+ dev_warn(dev,
+ "An untrusted VF %u is attempting to configure an LLDP multicast address\n",
+ vf->vf_id);
+ return false;
+ }
+
+ return true;
+}
+
+/**
* ice_vc_add_mac_addr - attempt to add the MAC address passed in
* @vf: pointer to the VF info
* @vsi: pointer to the VF's VSI
@@ -2283,10 +2328,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
if (ether_addr_equal(mac_addr, vf->dev_lan_addr))
return 0;
- if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
- dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ if (!ice_vc_can_add_mac(vf, mac_addr))
return -EPERM;
- }
ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (ret == -EEXIST) {
@@ -2301,6 +2344,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
return ret;
} else {
vf->num_mac++;
+ if (ice_is_mc_lldp_eth_addr(mac_addr))
+ ice_vf_update_mac_lldp_num(vf, vsi, true);
}
ice_vfhw_mac_add(vf, vc_ether_addr);
@@ -2395,6 +2440,8 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
ice_vfhw_mac_del(vf, vc_ether_addr);
vf->num_mac--;
+ if (ice_is_mc_lldp_eth_addr(mac_addr))
+ ice_vf_update_mac_lldp_num(vf, vsi, false);
return 0;
}
@@ -4275,7 +4322,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
}
ice_vfhw_mac_add(vf, &al->list[i]);
- vf->num_mac++;
break;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index 1cca9b2262e8..ae83c3914e29 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -1450,7 +1450,8 @@ err_free_pkt:
*/
static void ice_vf_fdir_timer(struct timer_list *t)
{
- struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
+ struct ice_vf_fdir_ctx *ctx_irq = timer_container_of(ctx_irq, t,
+ rx_tmr);
struct ice_vf_fdir_ctx *ctx_done;
struct ice_vf_fdir *fdir;
unsigned long flags;
diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig
index 1addd663acad..2c359a8551c7 100644
--- a/drivers/net/ethernet/intel/idpf/Kconfig
+++ b/drivers/net/ethernet/intel/idpf/Kconfig
@@ -4,6 +4,7 @@
config IDPF
tristate "Intel(R) Infrastructure Data Path Function Support"
depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
select DIMLIB
select LIBETH
help
diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile
index 2ce01a0b5898..83ac5e296382 100644
--- a/drivers/net/ethernet/intel/idpf/Makefile
+++ b/drivers/net/ethernet/intel/idpf/Makefile
@@ -17,3 +17,6 @@ idpf-y := \
idpf_vf_dev.o
idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o
+
+idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_ptp.o
+idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_virtchnl_ptp.o
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index aef0e9775a33..1e812c3f62f9 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -143,6 +143,7 @@ enum idpf_vport_state {
* @vport_id: Vport identifier
* @link_speed_mbps: Link speed in mbps
* @vport_idx: Relative vport index
+ * @max_tx_hdr_size: Max header length hardware can support
* @state: See enum idpf_vport_state
* @netstats: Packet and byte stats
* @stats_lock: Lock to protect stats update
@@ -153,6 +154,7 @@ struct idpf_netdev_priv {
u32 vport_id;
u32 link_speed_mbps;
u16 vport_idx;
+ u16 max_tx_hdr_size;
enum idpf_vport_state state;
struct rtnl_link_stats64 netstats;
spinlock_t stats_lock;
@@ -189,6 +191,7 @@ struct idpf_vport_max_q {
* @mb_intr_reg_init: Mailbox interrupt register initialization
* @reset_reg_init: Reset register initialization
* @trigger_reset: Trigger a reset to occur
+ * @ptp_reg_init: PTP register initialization
*/
struct idpf_reg_ops {
void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq);
@@ -197,6 +200,7 @@ struct idpf_reg_ops {
void (*reset_reg_init)(struct idpf_adapter *adapter);
void (*trigger_reset)(struct idpf_adapter *adapter,
enum idpf_flags trig_cause);
+ void (*ptp_reg_init)(const struct idpf_adapter *adapter);
};
/**
@@ -290,6 +294,9 @@ struct idpf_port_stats {
* @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up
* @sw_marker_wq: workqueue for marker packets
+ * @tx_tstamp_caps: Capabilities negotiated for Tx timestamping
+ * @tstamp_config: The Tx tstamp config
+ * @tstamp_task: Tx timestamping task
*/
struct idpf_vport {
u16 num_txq;
@@ -334,6 +341,10 @@ struct idpf_vport {
bool link_up;
wait_queue_head_t sw_marker_wq;
+
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct kernel_hwtstamp_config tstamp_config;
+ struct work_struct tstamp_task;
};
/**
@@ -478,6 +489,13 @@ struct idpf_vport_config {
struct idpf_vc_xn_manager;
+#define idpf_for_each_vport(adapter, iter) \
+ for (struct idpf_vport **__##iter = &(adapter)->vports[0], \
+ *iter = (adapter)->max_vports ? *__##iter : NULL; \
+ iter; \
+ iter = (++__##iter) < &(adapter)->vports[(adapter)->max_vports] ? \
+ *__##iter : NULL)
+
/**
* struct idpf_adapter - Device data struct generated on probe
* @pdev: PCI device struct given on probe
@@ -530,6 +548,7 @@ struct idpf_vc_xn_manager;
* @vector_lock: Lock to protect vector distribution
* @queue_lock: Lock to protect queue distribution
* @vc_buf_lock: Lock to protect virtchnl buffer
+ * @ptp: Storage for PTP-related data
*/
struct idpf_adapter {
struct pci_dev *pdev;
@@ -587,6 +606,8 @@ struct idpf_adapter {
struct mutex vector_lock;
struct mutex queue_lock;
struct mutex vc_buf_lock;
+
+ struct idpf_ptp *ptp;
};
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
index e8e046ef2f0d..9642494a67d8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
@@ -123,9 +123,12 @@ struct idpf_ctlq_info {
/**
* enum idpf_mbx_opc - PF/VF mailbox commands
* @idpf_mbq_opc_send_msg_to_cp: used by PF or VF to send a message to its CP
+ * @idpf_mbq_opc_send_msg_to_peer_drv: used by PF or VF to send a message to
+ * any peer driver
*/
enum idpf_mbx_opc {
idpf_mbq_opc_send_msg_to_cp = 0x0801,
+ idpf_mbq_opc_send_msg_to_peer_drv = 0x0804,
};
/* API supported for control queue management */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 41e4bd49402a..3fae81f1f988 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -4,6 +4,7 @@
#include "idpf.h"
#include "idpf_lan_pf_regs.h"
#include "idpf_virtchnl.h"
+#include "idpf_ptp.h"
#define IDPF_PF_ITR_IDX_SPACING 0x4
@@ -149,6 +150,18 @@ static void idpf_trigger_reset(struct idpf_adapter *adapter,
}
/**
+ * idpf_ptp_reg_init - Initialize required registers
+ * @adapter: Driver specific private structure
+ *
+ * Set the bits required for enabling shtime and cmd execution
+ */
+static void idpf_ptp_reg_init(const struct idpf_adapter *adapter)
+{
+ adapter->ptp->cmd.shtime_enable_mask = PF_GLTSYN_CMD_SYNC_SHTIME_EN_M;
+ adapter->ptp->cmd.exec_cmd_mask = PF_GLTSYN_CMD_SYNC_EXEC_CMD_M;
+}
+
+/**
* idpf_reg_ops_init - Initialize register API function pointers
* @adapter: Driver specific private structure
*/
@@ -159,6 +172,7 @@ static void idpf_reg_ops_init(struct idpf_adapter *adapter)
adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_mb_intr_reg_init;
adapter->dev_ops.reg_ops.reset_reg_init = idpf_reset_reg_init;
adapter->dev_ops.reg_ops.trigger_reset = idpf_trigger_reset;
+ adapter->dev_ops.reg_ops.ptp_reg_init = idpf_ptp_reg_init;
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 59b1a1a09996..9bdb309b668e 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_ptp.h"
/**
* idpf_get_rxnfc - command to get RX flow classification rules
@@ -1312,6 +1313,71 @@ static int idpf_get_link_ksettings(struct net_device *netdev,
return 0;
}
+/**
+ * idpf_get_timestamp_filters - Get the supported timestamping mode
+ * @vport: Virtual port structure
+ * @info: ethtool timestamping info structure
+ *
+ * Get the Tx/Rx timestamp filters.
+ */
+static void idpf_get_timestamp_filters(const struct idpf_vport *vport,
+ struct kernel_ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ if (!vport->tx_tstamp_caps ||
+ vport->adapter->ptp->tx_tstamp_access == IDPF_PTP_NONE)
+ return;
+
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE;
+
+ info->tx_types |= BIT(HWTSTAMP_TX_ON);
+}
+
+/**
+ * idpf_get_ts_info - Get device PHC association
+ * @netdev: network interface device structure
+ * @info: ethtool timestamping info structure
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport *vport;
+ int err = 0;
+
+ if (!mutex_trylock(&np->adapter->vport_ctrl_lock))
+ return -EBUSY;
+
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!vport->adapter->ptp) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ if (idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP) &&
+ vport->adapter->ptp->clock) {
+ info->phc_index = ptp_clock_index(vport->adapter->ptp->clock);
+ idpf_get_timestamp_filters(vport, info);
+ } else {
+ pci_dbg(vport->adapter->pdev, "PTP clock not detected\n");
+ err = ethtool_op_get_ts_info(netdev, info);
+ }
+
+unlock:
+ mutex_unlock(&np->adapter->vport_ctrl_lock);
+
+ return err;
+}
+
static const struct ethtool_ops idpf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
@@ -1336,6 +1402,7 @@ static const struct ethtool_ops idpf_ethtool_ops = {
.get_ringparam = idpf_get_ringparam,
.set_ringparam = idpf_set_ringparam,
.get_link_ksettings = idpf_get_link_ksettings,
+ .get_ts_info = idpf_get_ts_info,
};
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
index 24edb8a6ec2e..cc9aa2b6a14a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
@@ -53,6 +53,10 @@
#define PF_FW_ATQH_ATQH_M GENMASK(9, 0)
#define PF_FW_ATQT (PF_FW_BASE + 0x24)
+/* Timesync registers */
+#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M GENMASK(1, 0)
+#define PF_GLTSYN_CMD_SYNC_SHTIME_EN_M BIT(2)
+
/* Interrupts */
#define PF_GLINT_BASE 0x08900000
#define PF_GLINT_DYN_CTL(_INT) (PF_GLINT_BASE + ((_INT) * 0x1000))
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
index 8c7f8ef8f1a1..7492d1713243 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
@@ -282,7 +282,18 @@ struct idpf_flex_tx_tso_ctx_qw {
u8 flex;
};
-struct idpf_flex_tx_ctx_desc {
+union idpf_flex_tx_ctx_desc {
+ /* DTYPE = IDPF_TX_DESC_DTYPE_CTX (0x01) */
+ struct {
+ __le64 qw0;
+#define IDPF_TX_CTX_L2TAG2_M GENMASK_ULL(47, 32)
+ __le64 qw1;
+#define IDPF_TX_CTX_DTYPE_M GENMASK_ULL(3, 0)
+#define IDPF_TX_CTX_CMD_M GENMASK_ULL(15, 4)
+#define IDPF_TX_CTX_TSYN_REG_M GENMASK_ULL(47, 30)
+#define IDPF_TX_CTX_MSS_M GENMASK_ULL(50, 63)
+ } tsyn;
+
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */
struct {
struct idpf_flex_tx_tso_ctx_qw qw0;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 82f09b4030bc..4eb20ec2accb 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_virtchnl.h"
+#include "idpf_ptp.h"
static const struct net_device_ops idpf_netdev_ops;
@@ -144,22 +145,6 @@ static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
}
/**
- * idpf_set_mb_vec_id - Set vector index for mailbox
- * @adapter: adapter structure to access the vector chunks
- *
- * The first vector id in the requested vector chunks from the CP is for
- * the mailbox
- */
-static void idpf_set_mb_vec_id(struct idpf_adapter *adapter)
-{
- if (adapter->req_vec_chunks)
- adapter->mb_vector.v_idx =
- le16_to_cpu(adapter->caps.mailbox_vector_id);
- else
- adapter->mb_vector.v_idx = 0;
-}
-
-/**
* idpf_mb_intr_init - Initialize the mailbox interrupt
* @adapter: adapter structure to store the mailbox vector
*/
@@ -349,7 +334,7 @@ int idpf_intr_req(struct idpf_adapter *adapter)
goto free_irq;
}
- idpf_set_mb_vec_id(adapter);
+ adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id);
vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
if (!vecids) {
@@ -723,6 +708,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
np->vport = vport;
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
+ np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
vport->netdev = netdev;
return idpf_init_mac_addr(vport, netdev);
@@ -740,6 +726,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
np->adapter = adapter;
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
+ np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
spin_lock_init(&np->stats_lock);
@@ -1814,11 +1801,19 @@ void idpf_vc_event_task(struct work_struct *work)
if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
return;
- if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
- test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
- set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
- idpf_init_hard_reset(adapter);
- }
+ if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags))
+ goto func_reset;
+
+ if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags))
+ goto drv_load;
+
+ return;
+
+func_reset:
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+drv_load:
+ set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+ idpf_init_hard_reset(adapter);
}
/**
@@ -2203,8 +2198,8 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features)
{
- struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ u16 max_tx_hdr_size = np->max_tx_hdr_size;
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
@@ -2227,7 +2222,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
goto unsupported;
len = skb_network_header_len(skb);
- if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
+ if (unlikely(len > max_tx_hdr_size))
goto unsupported;
if (!skb->encapsulation)
@@ -2240,7 +2235,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
/* IPLEN can support at most 127 dwords */
len = skb_inner_network_header_len(skb);
- if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
+ if (unlikely(len > max_tx_hdr_size))
goto unsupported;
/* No need to validate L4LEN as TCP is the only protocol with a
@@ -2342,6 +2337,60 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
mem->pa = 0;
}
+static int idpf_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct idpf_vport *vport;
+ int err;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!vport->link_up) {
+ idpf_vport_ctrl_unlock(netdev);
+ return -EPERM;
+ }
+
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) &&
+ !idpf_ptp_is_vport_rx_tstamp_ena(vport)) {
+ idpf_vport_ctrl_unlock(netdev);
+ return -EOPNOTSUPP;
+ }
+
+ err = idpf_ptp_set_timestamp_mode(vport, config);
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+static int idpf_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct idpf_vport *vport;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!vport->link_up) {
+ idpf_vport_ctrl_unlock(netdev);
+ return -EPERM;
+ }
+
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) &&
+ !idpf_ptp_is_vport_rx_tstamp_ena(vport)) {
+ idpf_vport_ctrl_unlock(netdev);
+ return 0;
+ }
+
+ *config = vport->tstamp_config;
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return 0;
+}
+
static const struct net_device_ops idpf_netdev_ops = {
.ndo_open = idpf_open,
.ndo_stop = idpf_stop,
@@ -2354,4 +2403,6 @@ static const struct net_device_ops idpf_netdev_ops = {
.ndo_get_stats64 = idpf_get_stats64,
.ndo_set_features = idpf_set_features,
.ndo_tx_timeout = idpf_tx_timeout,
+ .ndo_hwtstamp_get = idpf_hwtstamp_get,
+ .ndo_hwtstamp_set = idpf_hwtstamp_set,
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index b35713036a54..0efd9c0c7a90 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -168,6 +168,10 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free;
}
+ err = pci_enable_ptm(pdev, NULL);
+ if (err)
+ pci_dbg(pdev, "PCIe PTM is not supported by PCIe bus/controller\n");
+
/* set up for high or low dma */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (err) {
@@ -199,9 +203,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_serv_wq_alloc;
}
- adapter->mbx_wq = alloc_workqueue("%s-%s-mbx",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
- dev_driver_string(dev),
+ adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", WQ_UNBOUND | WQ_HIGHPRI,
+ 0, dev_driver_string(dev),
dev_name(dev));
if (!adapter->mbx_wq) {
dev_err(dev, "Failed to allocate mailbox workqueue\n");
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
new file mode 100644
index 000000000000..4f8725c85332
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
@@ -0,0 +1,873 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_ptp.h"
+
+/**
+ * idpf_ptp_get_access - Determine the access type of the PTP features
+ * @adapter: Driver specific private structure
+ * @direct: Capability that indicates the direct access
+ * @mailbox: Capability that indicates the mailbox access
+ *
+ * Return: the type of supported access for the PTP feature.
+ */
+static enum idpf_ptp_access
+idpf_ptp_get_access(const struct idpf_adapter *adapter, u32 direct, u32 mailbox)
+{
+ if (adapter->ptp->caps & direct)
+ return IDPF_PTP_DIRECT;
+ else if (adapter->ptp->caps & mailbox)
+ return IDPF_PTP_MAILBOX;
+ else
+ return IDPF_PTP_NONE;
+}
+
+/**
+ * idpf_ptp_get_features_access - Determine the access type of PTP features
+ * @adapter: Driver specific private structure
+ *
+ * Fulfill the adapter structure with type of the supported PTP features
+ * access.
+ */
+void idpf_ptp_get_features_access(const struct idpf_adapter *adapter)
+{
+ struct idpf_ptp *ptp = adapter->ptp;
+ u32 direct, mailbox;
+
+ /* Get the device clock time */
+ direct = VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME;
+ mailbox = VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB;
+ ptp->get_dev_clk_time_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Set the device clock time */
+ direct = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME;
+ mailbox = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME;
+ ptp->set_dev_clk_time_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Adjust the device clock time */
+ direct = VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK;
+ mailbox = VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB;
+ ptp->adj_dev_clk_time_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Tx timestamping */
+ direct = VIRTCHNL2_CAP_PTP_TX_TSTAMPS;
+ mailbox = VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB;
+ ptp->tx_tstamp_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+}
+
+/**
+ * idpf_ptp_enable_shtime - Enable shadow time and execute a command
+ * @adapter: Driver specific private structure
+ */
+static void idpf_ptp_enable_shtime(struct idpf_adapter *adapter)
+{
+ u32 shtime_enable, exec_cmd;
+
+ /* Get offsets */
+ shtime_enable = adapter->ptp->cmd.shtime_enable_mask;
+ exec_cmd = adapter->ptp->cmd.exec_cmd_mask;
+
+ /* Set the shtime en and the sync field */
+ writel(shtime_enable, adapter->ptp->dev_clk_regs.cmd_sync);
+ writel(exec_cmd | shtime_enable, adapter->ptp->dev_clk_regs.cmd_sync);
+}
+
+/**
+ * idpf_ptp_read_src_clk_reg_direct - Read directly the main timer value
+ * @adapter: Driver specific private structure
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored when NULL is given.
+ *
+ * Return: the device clock time.
+ */
+static u64 idpf_ptp_read_src_clk_reg_direct(struct idpf_adapter *adapter,
+ struct ptp_system_timestamp *sts)
+{
+ struct idpf_ptp *ptp = adapter->ptp;
+ u32 hi, lo;
+
+ spin_lock(&ptp->read_dev_clk_lock);
+
+ /* Read the system timestamp pre PHC read */
+ ptp_read_system_prets(sts);
+
+ idpf_ptp_enable_shtime(adapter);
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ lo = readl(ptp->dev_clk_regs.dev_clk_ns_l);
+ hi = readl(ptp->dev_clk_regs.dev_clk_ns_h);
+
+ spin_unlock(&ptp->read_dev_clk_lock);
+
+ return ((u64)hi << 32) | lo;
+}
+
+/**
+ * idpf_ptp_read_src_clk_reg_mailbox - Read the main timer value through mailbox
+ * @adapter: Driver specific private structure
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored when NULL is given.
+ * @src_clk: Returned main timer value in nanoseconds unit
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_read_src_clk_reg_mailbox(struct idpf_adapter *adapter,
+ struct ptp_system_timestamp *sts,
+ u64 *src_clk)
+{
+ struct idpf_ptp_dev_timers clk_time;
+ int err;
+
+ /* Read the system timestamp pre PHC read */
+ ptp_read_system_prets(sts);
+
+ err = idpf_ptp_get_dev_clk_time(adapter, &clk_time);
+ if (err)
+ return err;
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ *src_clk = clk_time.dev_clk_time_ns;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_read_src_clk_reg - Read the main timer value
+ * @adapter: Driver specific private structure
+ * @src_clk: Returned main timer value in nanoseconds unit
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored if NULL is given.
+ *
+ * Return: the device clock time on success, -errno otherwise.
+ */
+static int idpf_ptp_read_src_clk_reg(struct idpf_adapter *adapter, u64 *src_clk,
+ struct ptp_system_timestamp *sts)
+{
+ switch (adapter->ptp->get_dev_clk_time_access) {
+ case IDPF_PTP_NONE:
+ return -EOPNOTSUPP;
+ case IDPF_PTP_MAILBOX:
+ return idpf_ptp_read_src_clk_reg_mailbox(adapter, sts, src_clk);
+ case IDPF_PTP_DIRECT:
+ *src_clk = idpf_ptp_read_src_clk_reg_direct(adapter, sts);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_gettimex64 - Get the time of the clock
+ * @info: the driver's PTP info structure
+ * @ts: timespec64 structure to hold the current time value
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored if NULL is given.
+ *
+ * Return: the device clock value in ns, after converting it into a timespec
+ * struct on success, -errno otherwise.
+ */
+static int idpf_ptp_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ u64 time_ns;
+ int err;
+
+ err = idpf_ptp_read_src_clk_reg(adapter, &time_ns, sts);
+ if (err)
+ return -EACCES;
+
+ *ts = ns_to_timespec64(time_ns);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_update_phctime_rxq_grp - Update the cached PHC time for a given Rx
+ * queue group.
+ * @grp: receive queue group in which Rx timestamp is enabled
+ * @split: Indicates whether the queue model is split or single queue
+ * @systime: Cached system time
+ */
+static void
+idpf_ptp_update_phctime_rxq_grp(const struct idpf_rxq_group *grp, bool split,
+ u64 systime)
+{
+ struct idpf_rx_queue *rxq;
+ u16 i;
+
+ if (!split) {
+ for (i = 0; i < grp->singleq.num_rxq; i++) {
+ rxq = grp->singleq.rxqs[i];
+ if (rxq)
+ WRITE_ONCE(rxq->cached_phc_time, systime);
+ }
+ } else {
+ for (i = 0; i < grp->splitq.num_rxq_sets; i++) {
+ rxq = &grp->splitq.rxq_sets[i]->rxq;
+ if (rxq)
+ WRITE_ONCE(rxq->cached_phc_time, systime);
+ }
+ }
+}
+
+/**
+ * idpf_ptp_update_cached_phctime - Update the cached PHC time values
+ * @adapter: Driver specific private structure
+ *
+ * This function updates the system time values which are cached in the adapter
+ * structure and the Rx queues.
+ *
+ * This function must be called periodically to ensure that the cached value
+ * is never more than 2 seconds old.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_update_cached_phctime(struct idpf_adapter *adapter)
+{
+ u64 systime;
+ int err;
+
+ err = idpf_ptp_read_src_clk_reg(adapter, &systime, NULL);
+ if (err)
+ return -EACCES;
+
+ /* Update the cached PHC time stored in the adapter structure.
+ * These values are used to extend Tx timestamp values to 64 bit
+ * expected by the stack.
+ */
+ WRITE_ONCE(adapter->ptp->cached_phc_time, systime);
+ WRITE_ONCE(adapter->ptp->cached_phc_jiffies, jiffies);
+
+ idpf_for_each_vport(adapter, vport) {
+ bool split;
+
+ if (!vport || !vport->rxq_grps)
+ continue;
+
+ split = idpf_is_queue_model_split(vport->rxq_model);
+
+ for (u16 i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+
+ idpf_ptp_update_phctime_rxq_grp(grp, split, systime);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_settime64 - Set the time of the clock
+ * @info: the driver's PTP info structure
+ * @ts: timespec64 structure that holds the new time value
+ *
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ enum idpf_ptp_access access;
+ int err;
+ u64 ns;
+
+ access = adapter->ptp->set_dev_clk_time_access;
+ if (access != IDPF_PTP_MAILBOX)
+ return -EOPNOTSUPP;
+
+ ns = timespec64_to_ns(ts);
+
+ err = idpf_ptp_set_dev_clk_time(adapter, ns);
+ if (err) {
+ pci_err(adapter->pdev, "Failed to set the time, err: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ err = idpf_ptp_update_cached_phctime(adapter);
+ if (err)
+ pci_warn(adapter->pdev,
+ "Unable to immediately update cached PHC time\n");
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
+ * @info: the driver's PTP info structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
+{
+ struct timespec64 now, then;
+ int err;
+
+ err = idpf_ptp_gettimex64(info, &now, NULL);
+ if (err)
+ return err;
+
+ then = ns_to_timespec64(delta);
+ now = timespec64_add(now, then);
+
+ return idpf_ptp_settime64(info, &now);
+}
+
+/**
+ * idpf_ptp_adjtime - Adjust the time of the clock by the indicated delta
+ * @info: the driver's PTP info structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ enum idpf_ptp_access access;
+ int err;
+
+ access = adapter->ptp->adj_dev_clk_time_access;
+ if (access != IDPF_PTP_MAILBOX)
+ return -EOPNOTSUPP;
+
+ /* Hardware only supports atomic adjustments using signed 32-bit
+ * integers. For any adjustment outside this range, perform
+ * a non-atomic get->adjust->set flow.
+ */
+ if (delta > S32_MAX || delta < S32_MIN)
+ return idpf_ptp_adjtime_nonatomic(info, delta);
+
+ err = idpf_ptp_adj_dev_clk_time(adapter, delta);
+ if (err) {
+ pci_err(adapter->pdev, "Failed to adjust the clock with delta %lld err: %pe\n",
+ delta, ERR_PTR(err));
+ return err;
+ }
+
+ err = idpf_ptp_update_cached_phctime(adapter);
+ if (err)
+ pci_warn(adapter->pdev,
+ "Unable to immediately update cached PHC time\n");
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adjfine - Adjust clock increment rate
+ * @info: the driver's PTP info structure
+ * @scaled_ppm: Parts per million with 16-bit fractional field
+ *
+ * Adjust the frequency of the clock by the indicated scaled ppm from the
+ * base frequency.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ enum idpf_ptp_access access;
+ u64 incval, diff;
+ int err;
+
+ access = adapter->ptp->adj_dev_clk_time_access;
+ if (access != IDPF_PTP_MAILBOX)
+ return -EOPNOTSUPP;
+
+ incval = adapter->ptp->base_incval;
+
+ diff = adjust_by_scaled_ppm(incval, scaled_ppm);
+ err = idpf_ptp_adj_dev_clk_fine(adapter, diff);
+ if (err)
+ pci_err(adapter->pdev, "Failed to adjust clock increment rate for scaled ppm %ld %pe\n",
+ scaled_ppm, ERR_PTR(err));
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_verify_pin - Verify if pin supports requested pin function
+ * @info: the driver's PTP info structure
+ * @pin: Pin index
+ * @func: Assigned function
+ * @chan: Assigned channel
+ *
+ * Return: EOPNOTSUPP as not supported yet.
+ */
+static int idpf_ptp_verify_pin(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * idpf_ptp_gpio_enable - Enable/disable ancillary features of PHC
+ * @info: the driver's PTP info structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ *
+ * Return: EOPNOTSUPP as not supported yet.
+ */
+static int idpf_ptp_gpio_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * idpf_ptp_tstamp_extend_32b_to_64b - Convert a 32b nanoseconds Tx or Rx
+ * timestamp value to 64b.
+ * @cached_phc_time: recently cached copy of PHC time
+ * @in_timestamp: Ingress/egress 32b nanoseconds timestamp value
+ *
+ * Hardware captures timestamps which contain only 32 bits of nominal
+ * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
+ *
+ * Return: Tx timestamp value extended to 64 bits based on cached PHC time.
+ */
+u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, u32 in_timestamp)
+{
+ u32 delta, phc_time_lo;
+ u64 ns;
+
+ /* Extract the lower 32 bits of the PHC time */
+ phc_time_lo = (u32)cached_phc_time;
+
+ /* Calculate the delta between the lower 32bits of the cached PHC
+ * time and the in_timestamp value.
+ */
+ delta = in_timestamp - phc_time_lo;
+
+ if (delta > U32_MAX / 2) {
+ /* Reverse the delta calculation here */
+ delta = phc_time_lo - in_timestamp;
+ ns = cached_phc_time - delta;
+ } else {
+ ns = cached_phc_time + delta;
+ }
+
+ return ns;
+}
+
+/**
+ * idpf_ptp_extend_ts - Convert a 40b timestamp to 64b nanoseconds
+ * @vport: Virtual port structure
+ * @in_tstamp: Ingress/egress timestamp value
+ *
+ * It is assumed that the caller verifies the timestamp is valid prior to
+ * calling this function.
+ *
+ * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
+ * time stored in the device private PTP structure as the basis for timestamp
+ * extension.
+ *
+ * Return: Tx timestamp value extended to 64 bits.
+ */
+u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp)
+{
+ struct idpf_ptp *ptp = vport->adapter->ptp;
+ unsigned long discard_time;
+
+ discard_time = ptp->cached_phc_jiffies + 2 * HZ;
+
+ if (time_is_before_jiffies(discard_time))
+ return 0;
+
+ return idpf_ptp_tstamp_extend_32b_to_64b(ptp->cached_phc_time,
+ lower_32_bits(in_tstamp));
+}
+
+/**
+ * idpf_ptp_request_ts - Request an available Tx timestamp index
+ * @tx_q: Transmit queue on which the Tx timestamp is requested
+ * @skb: The SKB to associate with this timestamp request
+ * @idx: Index of the Tx timestamp latch
+ *
+ * Request tx timestamp index negotiated during PTP init that will be set into
+ * Tx descriptor.
+ *
+ * Return: 0 and the index that can be provided to Tx descriptor on success,
+ * -errno otherwise.
+ */
+int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ u32 *idx)
+{
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp;
+ struct list_head *head;
+
+ /* Get the index from the free latches list */
+ spin_lock(&tx_q->cached_tstamp_caps->latches_lock);
+
+ head = &tx_q->cached_tstamp_caps->latches_free;
+ if (list_empty(head)) {
+ spin_unlock(&tx_q->cached_tstamp_caps->latches_lock);
+ return -ENOBUFS;
+ }
+
+ ptp_tx_tstamp = list_first_entry(head, struct idpf_ptp_tx_tstamp,
+ list_member);
+ list_del(&ptp_tx_tstamp->list_member);
+
+ ptp_tx_tstamp->skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ /* Move the element to the used latches list */
+ list_add(&ptp_tx_tstamp->list_member,
+ &tx_q->cached_tstamp_caps->latches_in_use);
+ spin_unlock(&tx_q->cached_tstamp_caps->latches_lock);
+
+ *idx = ptp_tx_tstamp->idx;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_set_rx_tstamp - Enable or disable Rx timestamping
+ * @vport: Virtual port structure
+ * @rx_filter: Receive timestamp filter
+ */
+static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter)
+{
+ bool enable = true, splitq;
+
+ splitq = idpf_is_queue_model_split(vport->rxq_model);
+
+ if (rx_filter == HWTSTAMP_FILTER_NONE) {
+ enable = false;
+ vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ } else {
+ vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ for (u16 i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ struct idpf_rx_queue *rx_queue;
+ u16 j, num_rxq;
+
+ if (splitq)
+ num_rxq = grp->splitq.num_rxq_sets;
+ else
+ num_rxq = grp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ if (splitq)
+ rx_queue = &grp->splitq.rxq_sets[j]->rxq;
+ else
+ rx_queue = grp->singleq.rxqs[j];
+
+ if (enable)
+ idpf_queue_set(PTP, rx_queue);
+ else
+ idpf_queue_clear(PTP, rx_queue);
+ }
+ }
+}
+
+/**
+ * idpf_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
+ * @vport: Virtual port structure
+ * @config: Hwtstamp settings requested or saved
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport,
+ struct kernel_hwtstamp_config *config)
+{
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ON:
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ vport->tstamp_config.tx_type = config->tx_type;
+ idpf_ptp_set_rx_tstamp(vport, config->rx_filter);
+ *config = vport->tstamp_config;
+
+ return 0;
+}
+
+/**
+ * idpf_tstamp_task - Delayed task to handle Tx tstamps
+ * @work: work_struct handle
+ */
+void idpf_tstamp_task(struct work_struct *work)
+{
+ struct idpf_vport *vport;
+
+ vport = container_of(work, struct idpf_vport, tstamp_task);
+
+ idpf_ptp_get_tx_tstamp(vport);
+}
+
+/**
+ * idpf_ptp_do_aux_work - Do PTP periodic work
+ * @info: Driver's PTP info structure
+ *
+ * Return: Number of jiffies to periodic work.
+ */
+static long idpf_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+
+ idpf_ptp_update_cached_phctime(adapter);
+
+ return msecs_to_jiffies(500);
+}
+
+/**
+ * idpf_ptp_set_caps - Set PTP capabilities
+ * @adapter: Driver specific private structure
+ *
+ * This function sets the PTP functions.
+ */
+static void idpf_ptp_set_caps(const struct idpf_adapter *adapter)
+{
+ struct ptp_clock_info *info = &adapter->ptp->info;
+
+ snprintf(info->name, sizeof(info->name), "%s-%s-clk",
+ KBUILD_MODNAME, pci_name(adapter->pdev));
+
+ info->owner = THIS_MODULE;
+ info->max_adj = adapter->ptp->max_adj;
+ info->gettimex64 = idpf_ptp_gettimex64;
+ info->settime64 = idpf_ptp_settime64;
+ info->adjfine = idpf_ptp_adjfine;
+ info->adjtime = idpf_ptp_adjtime;
+ info->verify = idpf_ptp_verify_pin;
+ info->enable = idpf_ptp_gpio_enable;
+ info->do_aux_work = idpf_ptp_do_aux_work;
+}
+
+/**
+ * idpf_ptp_create_clock - Create PTP clock device for userspace
+ * @adapter: Driver specific private structure
+ *
+ * This function creates a new PTP clock device.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_create_clock(const struct idpf_adapter *adapter)
+{
+ struct ptp_clock *clock;
+
+ idpf_ptp_set_caps(adapter);
+
+ /* Attempt to register the clock before enabling the hardware. */
+ clock = ptp_clock_register(&adapter->ptp->info,
+ &adapter->pdev->dev);
+ if (IS_ERR(clock)) {
+ pci_err(adapter->pdev, "PTP clock creation failed: %pe\n",
+ clock);
+ return PTR_ERR(clock);
+ }
+
+ adapter->ptp->clock = clock;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_release_vport_tstamp - Release the Tx timestamps trakcers for a
+ * given vport.
+ * @vport: Virtual port structure
+ *
+ * Remove the queues and delete lists that tracks Tx timestamp entries for a
+ * given vport.
+ */
+static void idpf_ptp_release_vport_tstamp(struct idpf_vport *vport)
+{
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp;
+ struct list_head *head;
+
+ cancel_work_sync(&vport->tstamp_task);
+
+ /* Remove list with free latches */
+ spin_lock_bh(&vport->tx_tstamp_caps->latches_lock);
+
+ head = &vport->tx_tstamp_caps->latches_free;
+ list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ list_del(&ptp_tx_tstamp->list_member);
+ kfree(ptp_tx_tstamp);
+ }
+
+ /* Remove list with latches in use */
+ head = &vport->tx_tstamp_caps->latches_in_use;
+ list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ list_del(&ptp_tx_tstamp->list_member);
+ kfree(ptp_tx_tstamp);
+ }
+
+ spin_unlock_bh(&vport->tx_tstamp_caps->latches_lock);
+
+ kfree(vport->tx_tstamp_caps);
+ vport->tx_tstamp_caps = NULL;
+}
+
+/**
+ * idpf_ptp_release_tstamp - Release the Tx timestamps trackers
+ * @adapter: Driver specific private structure
+ *
+ * Remove the queues and delete lists that tracks Tx timestamp entries.
+ */
+static void idpf_ptp_release_tstamp(struct idpf_adapter *adapter)
+{
+ idpf_for_each_vport(adapter, vport) {
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport))
+ continue;
+
+ idpf_ptp_release_vport_tstamp(vport);
+ }
+}
+
+/**
+ * idpf_ptp_get_txq_tstamp_capability - Verify the timestamping capability
+ * for a given tx queue.
+ * @txq: Transmit queue
+ *
+ * Since performing timestamp flows requires reading the device clock value and
+ * the support in the Control Plane, the function checks both factors and
+ * summarizes the support for the timestamping.
+ *
+ * Return: true if the timestamping is supported, false otherwise.
+ */
+bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq)
+{
+ if (!txq || !txq->cached_tstamp_caps)
+ return false;
+ else if (txq->cached_tstamp_caps->access)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * idpf_ptp_init - Initialize PTP hardware clock support
+ * @adapter: Driver specific private structure
+ *
+ * Set up the device for interacting with the PTP hardware clock for all
+ * functions. Function will allocate and register a ptp_clock with the
+ * PTP_1588_CLOCK infrastructure.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_init(struct idpf_adapter *adapter)
+{
+ struct timespec64 ts;
+ int err;
+
+ if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP)) {
+ pci_dbg(adapter->pdev, "PTP capability is not detected\n");
+ return -EOPNOTSUPP;
+ }
+
+ adapter->ptp = kzalloc(sizeof(*adapter->ptp), GFP_KERNEL);
+ if (!adapter->ptp)
+ return -ENOMEM;
+
+ /* add a back pointer to adapter */
+ adapter->ptp->adapter = adapter;
+
+ if (adapter->dev_ops.reg_ops.ptp_reg_init)
+ adapter->dev_ops.reg_ops.ptp_reg_init(adapter);
+
+ err = idpf_ptp_get_caps(adapter);
+ if (err) {
+ pci_err(adapter->pdev, "Failed to get PTP caps err %d\n", err);
+ goto free_ptp;
+ }
+
+ err = idpf_ptp_create_clock(adapter);
+ if (err)
+ goto free_ptp;
+
+ if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ ptp_schedule_worker(adapter->ptp->clock, 0);
+
+ /* Write the default increment time value if the clock adjustments
+ * are enabled.
+ */
+ if (adapter->ptp->adj_dev_clk_time_access != IDPF_PTP_NONE) {
+ err = idpf_ptp_adj_dev_clk_fine(adapter,
+ adapter->ptp->base_incval);
+ if (err)
+ goto remove_clock;
+ }
+
+ /* Write the initial time value if the set time operation is enabled */
+ if (adapter->ptp->set_dev_clk_time_access != IDPF_PTP_NONE) {
+ ts = ktime_to_timespec64(ktime_get_real());
+ err = idpf_ptp_settime64(&adapter->ptp->info, &ts);
+ if (err)
+ goto remove_clock;
+ }
+
+ spin_lock_init(&adapter->ptp->read_dev_clk_lock);
+
+ pci_dbg(adapter->pdev, "PTP init successful\n");
+
+ return 0;
+
+remove_clock:
+ if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ ptp_cancel_worker_sync(adapter->ptp->clock);
+
+ ptp_clock_unregister(adapter->ptp->clock);
+ adapter->ptp->clock = NULL;
+
+free_ptp:
+ kfree(adapter->ptp);
+ adapter->ptp = NULL;
+
+ return err;
+}
+
+/**
+ * idpf_ptp_release - Clear PTP hardware clock support
+ * @adapter: Driver specific private structure
+ */
+void idpf_ptp_release(struct idpf_adapter *adapter)
+{
+ struct idpf_ptp *ptp = adapter->ptp;
+
+ if (!ptp)
+ return;
+
+ if (ptp->tx_tstamp_access != IDPF_PTP_NONE &&
+ ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ idpf_ptp_release_tstamp(adapter);
+
+ if (ptp->clock) {
+ if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ ptp_cancel_worker_sync(adapter->ptp->clock);
+
+ ptp_clock_unregister(ptp->clock);
+ }
+
+ kfree(ptp);
+ adapter->ptp = NULL;
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.h b/drivers/net/ethernet/intel/idpf/idpf_ptp.h
new file mode 100644
index 000000000000..a876749d6116
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.h
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef _IDPF_PTP_H
+#define _IDPF_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+
+/**
+ * struct idpf_ptp_cmd - PTP command masks
+ * @exec_cmd_mask: mask to trigger command execution
+ * @shtime_enable_mask: mask to enable shadow time
+ */
+struct idpf_ptp_cmd {
+ u32 exec_cmd_mask;
+ u32 shtime_enable_mask;
+};
+
+/* struct idpf_ptp_dev_clk_regs - PTP device registers
+ * @dev_clk_ns_l: low part of the device clock register
+ * @dev_clk_ns_h: high part of the device clock register
+ * @phy_clk_ns_l: low part of the PHY clock register
+ * @phy_clk_ns_h: high part of the PHY clock register
+ * @incval_l: low part of the increment value register
+ * @incval_h: high part of the increment value register
+ * @shadj_l: low part of the shadow adjust register
+ * @shadj_h: high part of the shadow adjust register
+ * @phy_incval_l: low part of the PHY increment value register
+ * @phy_incval_h: high part of the PHY increment value register
+ * @phy_shadj_l: low part of the PHY shadow adjust register
+ * @phy_shadj_h: high part of the PHY shadow adjust register
+ * @cmd: PTP command register
+ * @phy_cmd: PHY command register
+ * @cmd_sync: PTP command synchronization register
+ */
+struct idpf_ptp_dev_clk_regs {
+ /* Main clock */
+ void __iomem *dev_clk_ns_l;
+ void __iomem *dev_clk_ns_h;
+
+ /* PHY timer */
+ void __iomem *phy_clk_ns_l;
+ void __iomem *phy_clk_ns_h;
+
+ /* Main timer adjustments */
+ void __iomem *incval_l;
+ void __iomem *incval_h;
+ void __iomem *shadj_l;
+ void __iomem *shadj_h;
+
+ /* PHY timer adjustments */
+ void __iomem *phy_incval_l;
+ void __iomem *phy_incval_h;
+ void __iomem *phy_shadj_l;
+ void __iomem *phy_shadj_h;
+
+ /* Command */
+ void __iomem *cmd;
+ void __iomem *phy_cmd;
+ void __iomem *cmd_sync;
+};
+
+/**
+ * enum idpf_ptp_access - the type of access to PTP operations
+ * @IDPF_PTP_NONE: no access
+ * @IDPF_PTP_DIRECT: direct access through BAR registers
+ * @IDPF_PTP_MAILBOX: access through mailbox messages
+ */
+enum idpf_ptp_access {
+ IDPF_PTP_NONE = 0,
+ IDPF_PTP_DIRECT,
+ IDPF_PTP_MAILBOX,
+};
+
+/**
+ * struct idpf_ptp_secondary_mbx - PTP secondary mailbox
+ * @peer_mbx_q_id: PTP mailbox queue ID
+ * @peer_id: Peer ID for PTP Device Control daemon
+ * @valid: indicates whether secondary mailblox is supported by the Control
+ * Plane
+ */
+struct idpf_ptp_secondary_mbx {
+ u16 peer_mbx_q_id;
+ u16 peer_id;
+ bool valid:1;
+};
+
+/**
+ * enum idpf_ptp_tx_tstamp_state - Tx timestamp states
+ * @IDPF_PTP_FREE: Tx timestamp index free to use
+ * @IDPF_PTP_REQUEST: Tx timestamp index set to the Tx descriptor
+ * @IDPF_PTP_READ_VALUE: Tx timestamp value ready to be read
+ */
+enum idpf_ptp_tx_tstamp_state {
+ IDPF_PTP_FREE,
+ IDPF_PTP_REQUEST,
+ IDPF_PTP_READ_VALUE,
+};
+
+/**
+ * struct idpf_ptp_tx_tstamp_status - Parameters to track Tx timestamp
+ * @skb: the pointer to the SKB that received the completion tag
+ * @state: the state of the Tx timestamp
+ */
+struct idpf_ptp_tx_tstamp_status {
+ struct sk_buff *skb;
+ enum idpf_ptp_tx_tstamp_state state;
+};
+
+/**
+ * struct idpf_ptp_tx_tstamp - Parameters for Tx timestamping
+ * @list_member: the list member structure
+ * @tx_latch_reg_offset_l: Tx tstamp latch low register offset
+ * @tx_latch_reg_offset_h: Tx tstamp latch high register offset
+ * @skb: the pointer to the SKB for this timestamp request
+ * @tstamp: the Tx tstamp value
+ * @idx: the index of the Tx tstamp
+ */
+struct idpf_ptp_tx_tstamp {
+ struct list_head list_member;
+ u32 tx_latch_reg_offset_l;
+ u32 tx_latch_reg_offset_h;
+ struct sk_buff *skb;
+ u64 tstamp;
+ u32 idx;
+};
+
+/**
+ * struct idpf_ptp_vport_tx_tstamp_caps - Tx timestamp capabilities
+ * @vport_id: the vport id
+ * @num_entries: the number of negotiated Tx timestamp entries
+ * @tstamp_ns_lo_bit: first bit for nanosecond part of the timestamp
+ * @latches_lock: the lock to the lists of free/used timestamp indexes
+ * @status_lock: the lock to the status tracker
+ * @access: indicates an access to Tx timestamp
+ * @latches_free: the list of the free Tx timestamps latches
+ * @latches_in_use: the list of the used Tx timestamps latches
+ * @tx_tstamp_status: Tx tstamp status tracker
+ */
+struct idpf_ptp_vport_tx_tstamp_caps {
+ u32 vport_id;
+ u16 num_entries;
+ u16 tstamp_ns_lo_bit;
+ spinlock_t latches_lock;
+ spinlock_t status_lock;
+ bool access:1;
+ struct list_head latches_free;
+ struct list_head latches_in_use;
+ struct idpf_ptp_tx_tstamp_status tx_tstamp_status[];
+};
+
+/**
+ * struct idpf_ptp - PTP parameters
+ * @info: structure defining PTP hardware capabilities
+ * @clock: pointer to registered PTP clock device
+ * @adapter: back pointer to the adapter
+ * @base_incval: base increment value of the PTP clock
+ * @max_adj: maximum adjustment of the PTP clock
+ * @cmd: HW specific command masks
+ * @cached_phc_time: a cached copy of the PHC time for timestamp extension
+ * @cached_phc_jiffies: jiffies when cached_phc_time was last updated
+ * @dev_clk_regs: the set of registers to access the device clock
+ * @caps: PTP capabilities negotiated with the Control Plane
+ * @get_dev_clk_time_access: access type for getting the device clock time
+ * @set_dev_clk_time_access: access type for setting the device clock time
+ * @adj_dev_clk_time_access: access type for the adjusting the device clock
+ * @tx_tstamp_access: access type for the Tx timestamp value read
+ * @rsv: reserved bits
+ * @secondary_mbx: parameters for using dedicated PTP mailbox
+ * @read_dev_clk_lock: spinlock protecting access to the device clock read
+ * operation executed by the HW latch
+ */
+struct idpf_ptp {
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct idpf_adapter *adapter;
+ u64 base_incval;
+ u64 max_adj;
+ struct idpf_ptp_cmd cmd;
+ u64 cached_phc_time;
+ unsigned long cached_phc_jiffies;
+ struct idpf_ptp_dev_clk_regs dev_clk_regs;
+ u32 caps;
+ enum idpf_ptp_access get_dev_clk_time_access:2;
+ enum idpf_ptp_access set_dev_clk_time_access:2;
+ enum idpf_ptp_access adj_dev_clk_time_access:2;
+ enum idpf_ptp_access tx_tstamp_access:2;
+ u8 rsv;
+ struct idpf_ptp_secondary_mbx secondary_mbx;
+ spinlock_t read_dev_clk_lock;
+};
+
+/**
+ * idpf_ptp_info_to_adapter - get driver adapter struct from ptp_clock_info
+ * @info: pointer to ptp_clock_info struct
+ *
+ * Return: pointer to the corresponding adapter struct
+ */
+static inline struct idpf_adapter *
+idpf_ptp_info_to_adapter(const struct ptp_clock_info *info)
+{
+ const struct idpf_ptp *ptp = container_of_const(info, struct idpf_ptp,
+ info);
+ return ptp->adapter;
+}
+
+/**
+ * struct idpf_ptp_dev_timers - System time and device time values
+ * @sys_time_ns: system time value expressed in nanoseconds
+ * @dev_clk_time_ns: device clock time value expressed in nanoseconds
+ */
+struct idpf_ptp_dev_timers {
+ u64 sys_time_ns;
+ u64 dev_clk_time_ns;
+};
+
+/**
+ * idpf_ptp_is_vport_tx_tstamp_ena - Verify the Tx timestamping enablement for
+ * a given vport.
+ * @vport: Virtual port structure
+ *
+ * Tx timestamp capabilities are negotiated with the Control Plane only if the
+ * device clock value can be read, Tx timestamp access type is different than
+ * NONE, and the PTP clock for the adapter is created. When all those conditions
+ * are satisfied, Tx timestamp feature is enabled and tx_tstamp_caps is
+ * allocated and fulfilled.
+ *
+ * Return: true if the Tx timestamping is enabled, false otherwise.
+ */
+static inline bool idpf_ptp_is_vport_tx_tstamp_ena(struct idpf_vport *vport)
+{
+ if (!vport->tx_tstamp_caps)
+ return false;
+ else
+ return true;
+}
+
+/**
+ * idpf_ptp_is_vport_rx_tstamp_ena - Verify the Rx timestamping enablement for
+ * a given vport.
+ * @vport: Virtual port structure
+ *
+ * Rx timestamp feature is enabled if the PTP clock for the adapter is created
+ * and it is possible to read the value of the device clock. The second
+ * assumption comes from the need to extend the Rx timestamp value to 64 bit
+ * based on the current device clock time.
+ *
+ * Return: true if the Rx timestamping is enabled, false otherwise.
+ */
+static inline bool idpf_ptp_is_vport_rx_tstamp_ena(struct idpf_vport *vport)
+{
+ if (!vport->adapter->ptp ||
+ vport->adapter->ptp->get_dev_clk_time_access == IDPF_PTP_NONE)
+ return false;
+ else
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+int idpf_ptp_init(struct idpf_adapter *adapter);
+void idpf_ptp_release(struct idpf_adapter *adapter);
+int idpf_ptp_get_caps(struct idpf_adapter *adapter);
+void idpf_ptp_get_features_access(const struct idpf_adapter *adapter);
+bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq);
+int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *dev_clk_time);
+int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time);
+int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval);
+int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta);
+int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport);
+int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport);
+int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport,
+ struct kernel_hwtstamp_config *config);
+u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp);
+u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, u32 in_timestamp);
+int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ u32 *idx);
+void idpf_tstamp_task(struct work_struct *work);
+#else /* CONFIG_PTP_1588_CLOCK */
+static inline int idpf_ptp_init(struct idpf_adapter *adapter)
+{
+ return 0;
+}
+
+static inline void idpf_ptp_release(struct idpf_adapter *adapter) { }
+
+static inline int idpf_ptp_get_caps(struct idpf_adapter *adapter)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+idpf_ptp_get_features_access(const struct idpf_adapter *adapter) { }
+
+static inline bool
+idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq)
+{
+ return false;
+}
+
+static inline int
+idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *dev_clk_time)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter,
+ u64 time)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter,
+ u64 incval)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter,
+ s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+idpf_ptp_set_timestamp_mode(struct idpf_vport *vport,
+ struct kernel_hwtstamp_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u32 in_tstamp)
+{
+ return 0;
+}
+
+static inline u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time,
+ u32 in_timestamp)
+{
+ return 0;
+}
+
+static inline int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q,
+ struct sk_buff *skb, u32 *idx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void idpf_tstamp_task(struct work_struct *work) { }
+#endif /* CONFIG_PTP_1588_CLOCK */
+#endif /* _IDPF_PTP_H */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index eae1b6f474e6..993c354aa27a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -362,17 +362,18 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
{
struct idpf_tx_offload_params offload = { };
struct idpf_tx_buf *first;
+ int csum, tso, needed;
unsigned int count;
__be16 protocol;
- int csum, tso;
count = idpf_tx_desc_count_required(tx_q, skb);
if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb);
- if (idpf_tx_maybe_stop_common(tx_q,
- count + IDPF_TX_DESCS_PER_CACHE_LINE +
- IDPF_TX_DESCS_FOR_CTX)) {
+ needed = count + IDPF_TX_DESCS_PER_CACHE_LINE + IDPF_TX_DESCS_FOR_CTX;
+ if (!netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+ IDPF_DESC_UNUSED(tx_q),
+ needed, needed)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
u64_stats_update_begin(&tx_q->stats_sync);
@@ -891,7 +892,6 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
* idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
- * @ptype: pointer that will store packet type
*
* Decode the Rx descriptor and extract relevant information including the
* size and Rx packet type.
@@ -901,21 +901,20 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
*/
static void
idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
- struct libeth_rqe_info *fields, u32 *ptype)
+ struct libeth_rqe_info *fields)
{
u64 qword;
qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len);
fields->len = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
- *ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
+ fields->ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
}
/**
* idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
- * @ptype: pointer that will store packet type
*
* Decode the Rx descriptor and extract relevant information including the
* size and Rx packet type.
@@ -925,12 +924,12 @@ idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
*/
static void
idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
- struct libeth_rqe_info *fields, u32 *ptype)
+ struct libeth_rqe_info *fields)
{
fields->len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
- *ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
- le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
+ fields->ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
}
/**
@@ -938,18 +937,17 @@ idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
* @rx_q: Rx descriptor queue
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
- * @ptype: pointer that will store packet type
*
*/
static void
idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
const union virtchnl2_rx_desc *rx_desc,
- struct libeth_rqe_info *fields, u32 *ptype)
+ struct libeth_rqe_info *fields)
{
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M)
- idpf_rx_singleq_extract_base_fields(rx_desc, fields, ptype);
+ idpf_rx_singleq_extract_base_fields(rx_desc, fields);
else
- idpf_rx_singleq_extract_flex_fields(rx_desc, fields, ptype);
+ idpf_rx_singleq_extract_flex_fields(rx_desc, fields);
}
/**
@@ -972,7 +970,6 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
struct libeth_rqe_info fields = { };
union virtchnl2_rx_desc *rx_desc;
struct idpf_rx_buf *rx_buf;
- u32 ptype;
/* get the Rx desc from Rx queue based on 'next_to_clean' */
rx_desc = &rx_q->rx[ntc];
@@ -993,7 +990,7 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
*/
dma_rmb();
- idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields, &ptype);
+ idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
rx_buf = &rx_q->rx_buf[ntc];
if (!libeth_rx_sync_for_cpu(rx_buf, fields.len))
@@ -1037,7 +1034,8 @@ skip_data:
total_rx_bytes += skb->len;
/* protocol */
- idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc, ptype);
+ idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc,
+ fields.ptype);
/* send completed skb up the stack */
napi_gro_receive(rx_q->pp->p.napi, skb);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index bdf52cef3891..5cf440e09d0a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -5,6 +5,7 @@
#include <net/libeth/tx.h>
#include "idpf.h"
+#include "idpf_ptp.h"
#include "idpf_virtchnl.h"
struct idpf_tx_stash {
@@ -1107,6 +1108,8 @@ void idpf_vport_queues_rel(struct idpf_vport *vport)
*/
static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
{
+ struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps;
+ struct work_struct *tstamp_task = &vport->tstamp_task;
int i, j, k = 0;
vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
@@ -1121,6 +1124,12 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
for (j = 0; j < tx_grp->num_txq; j++, k++) {
vport->txqs[k] = tx_grp->txqs[j];
vport->txqs[k]->idx = k;
+
+ if (!caps)
+ continue;
+
+ vport->txqs[k]->cached_tstamp_caps = caps;
+ vport->txqs[k]->tstamp_task = tstamp_task;
}
}
@@ -1655,6 +1664,40 @@ static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
}
/**
+ * idpf_tx_read_tstamp - schedule a work to read Tx timestamp value
+ * @txq: queue to read the timestamp from
+ * @skb: socket buffer to provide Tx timestamp value
+ *
+ * Schedule a work to read Tx timestamp value generated once the packet is
+ * transmitted.
+ */
+static void idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb)
+{
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct idpf_ptp_tx_tstamp_status *tx_tstamp_status;
+
+ tx_tstamp_caps = txq->cached_tstamp_caps;
+ spin_lock_bh(&tx_tstamp_caps->status_lock);
+
+ for (u32 i = 0; i < tx_tstamp_caps->num_entries; i++) {
+ tx_tstamp_status = &tx_tstamp_caps->tx_tstamp_status[i];
+ if (tx_tstamp_status->state != IDPF_PTP_FREE)
+ continue;
+
+ tx_tstamp_status->skb = skb;
+ tx_tstamp_status->state = IDPF_PTP_REQUEST;
+
+ /* Fetch timestamp from completion descriptor through
+ * virtchnl msg to report to stack.
+ */
+ queue_work(system_unbound_wq, txq->tstamp_task);
+ break;
+ }
+
+ spin_unlock_bh(&tx_tstamp_caps->status_lock);
+}
+
+/**
* idpf_tx_clean_stashed_bufs - clean bufs that were stored for
* out of order completions
* @txq: queue to clean
@@ -1682,6 +1725,11 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
continue;
hash_del(&stash->hlist);
+
+ if (stash->buf.type == LIBETH_SQE_SKB &&
+ (skb_shinfo(stash->buf.skb)->tx_flags & SKBTX_IN_PROGRESS))
+ idpf_tx_read_tstamp(txq, stash->buf.skb);
+
libeth_tx_complete(&stash->buf, &cp);
/* Push shadow buf back onto stack */
@@ -1876,8 +1924,12 @@ static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
idpf_tx_buf_compl_tag(tx_buf) != compl_tag))
return false;
- if (tx_buf->type == LIBETH_SQE_SKB)
+ if (tx_buf->type == LIBETH_SQE_SKB) {
+ if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS)
+ idpf_tx_read_tstamp(txq, tx_buf->skb);
+
libeth_tx_complete(tx_buf, &cp);
+ }
idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
@@ -2127,11 +2179,24 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size)
{
- desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd;
+ *(u32 *)&desc->flow.qw1.cmd_dtype = (u8)(params->dtype | td_cmd);
desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
}
+/* Global conditions to tell whether the txq (and related resources)
+ * has room to allow the use of "size" descriptors.
+ */
+static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
+{
+ if (IDPF_DESC_UNUSED(tx_q) < size ||
+ IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
+ IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
+ IDPF_TX_BUF_RSV_LOW(tx_q))
+ return 0;
+ return 1;
+}
+
/**
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
* @tx_q: the queue to be checked
@@ -2142,29 +2207,11 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
unsigned int descs_needed)
{
- if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
- goto out;
-
- /* If there are too many outstanding completions expected on the
- * completion queue, stop the TX queue to give the device some time to
- * catch up
- */
- if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
- IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
- goto splitq_stop;
-
- /* Also check for available book keeping buffers; if we are low, stop
- * the queue to wait for more completions
- */
- if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
- goto splitq_stop;
-
- return 0;
-
-splitq_stop:
- netif_stop_subqueue(tx_q->netdev, tx_q->idx);
+ if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+ idpf_txq_has_room(tx_q, descs_needed),
+ 1, 1))
+ return 0;
-out:
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
@@ -2190,12 +2237,6 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
tx_q->next_to_use = val;
- if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
- u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.q_busy);
- u64_stats_update_end(&tx_q->stats_sync);
- }
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -2296,7 +2337,7 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
* descriptor. Reset that here.
*/
tx_desc = &txq->flex_tx[idx];
- memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
+ memset(tx_desc, 0, sizeof(*tx_desc));
if (idx == 0)
idx = txq->desc_count;
idx--;
@@ -2699,10 +2740,10 @@ static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
* Since the TX buffer rings mimics the descriptor ring, update the tx buffer
* ring entry to reflect that this index is a context descriptor
*/
-static struct idpf_flex_tx_ctx_desc *
+static union idpf_flex_tx_ctx_desc *
idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
{
- struct idpf_flex_tx_ctx_desc *desc;
+ union idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
txq->tx_buf[i].type = LIBETH_SQE_CTX;
@@ -2732,6 +2773,73 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
return NETDEV_TX_OK;
}
+#if (IS_ENABLED(CONFIG_PTP_1588_CLOCK))
+/**
+ * idpf_tx_tstamp - set up context descriptor for hardware timestamp
+ * @tx_q: queue to send buffer on
+ * @skb: pointer to the SKB we're sending
+ * @off: pointer to the offload struct
+ *
+ * Return: Positive index number on success, negative otherwise.
+ */
+static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ struct idpf_tx_offload_params *off)
+{
+ int err, idx;
+
+ /* only timestamp the outbound packet if the user has requested it */
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ return -1;
+
+ if (!idpf_ptp_get_txq_tstamp_capability(tx_q))
+ return -1;
+
+ /* Tx timestamps cannot be sampled when doing TSO */
+ if (off->tx_flags & IDPF_TX_FLAGS_TSO)
+ return -1;
+
+ /* Grab an open timestamp slot */
+ err = idpf_ptp_request_ts(tx_q, skb, &idx);
+ if (err) {
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.tstamp_skipped);
+ u64_stats_update_end(&tx_q->stats_sync);
+
+ return -1;
+ }
+
+ off->tx_flags |= IDPF_TX_FLAGS_TSYN;
+
+ return idx;
+}
+
+/**
+ * idpf_tx_set_tstamp_desc - Set the Tx descriptor fields needed to generate
+ * PHY Tx timestamp
+ * @ctx_desc: Context descriptor
+ * @idx: Index of the Tx timestamp latch
+ */
+static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
+ u32 idx)
+{
+ ctx_desc->tsyn.qw1 = le64_encode_bits(IDPF_TX_DESC_DTYPE_CTX,
+ IDPF_TX_CTX_DTYPE_M) |
+ le64_encode_bits(IDPF_TX_CTX_DESC_TSYN,
+ IDPF_TX_CTX_CMD_M) |
+ le64_encode_bits(idx, IDPF_TX_CTX_TSYN_REG_M);
+}
+#else /* CONFIG_PTP_1588_CLOCK */
+static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ struct idpf_tx_offload_params *off)
+{
+ return -1;
+}
+
+static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
+ u32 idx)
+{ }
+#endif /* CONFIG_PTP_1588_CLOCK */
+
/**
* idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
* @skb: send buffer
@@ -2743,9 +2851,10 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q)
{
struct idpf_tx_splitq_params tx_params = { };
+ union idpf_flex_tx_ctx_desc *ctx_desc;
struct idpf_tx_buf *first;
unsigned int count;
- int tso;
+ int tso, idx;
count = idpf_tx_desc_count_required(tx_q, skb);
if (unlikely(!count))
@@ -2765,8 +2874,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
if (tso) {
/* If tso is needed, set up context desc */
- struct idpf_flex_tx_ctx_desc *ctx_desc =
- idpf_tx_splitq_get_ctx_desc(tx_q);
+ ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
ctx_desc->tso.qw1.cmd_dtype =
cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
@@ -2784,6 +2892,12 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
u64_stats_update_end(&tx_q->stats_sync);
}
+ idx = idpf_tx_tstamp(tx_q, skb, &tx_params.offload);
+ if (idx != -1) {
+ ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
+ idpf_tx_set_tstamp_desc(ctx_desc, idx);
+ }
+
/* record the location of the first descriptor for this packet */
first = &tx_q->tx_buf[tx_q->next_to_use];
first->skb = skb;
@@ -3046,6 +3160,33 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
}
/**
+ * idpf_rx_hwtstamp - check for an RX timestamp and pass up the stack
+ * @rxq: pointer to the rx queue that receives the timestamp
+ * @rx_desc: pointer to rx descriptor containing timestamp
+ * @skb: skb to put timestamp in
+ */
+static void
+idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct sk_buff *skb)
+{
+ u64 cached_time, ts_ns;
+ u32 ts_high;
+
+ if (!(rx_desc->ts_low & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
+ return;
+
+ cached_time = READ_ONCE(rxq->cached_phc_time);
+
+ ts_high = le32_to_cpu(rx_desc->ts_high);
+ ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
+
+ *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) {
+ .hwtstamp = ns_to_ktime(ts_ns),
+ };
+}
+
+/**
* idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
* @rxq: Rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being populated
@@ -3070,6 +3211,9 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
/* process RSS/hash */
idpf_rx_hash(rxq, skb, rx_desc, decoded);
+ if (idpf_queue_has(PTP, rxq))
+ idpf_rx_hwtstamp(rxq, rx_desc, skb);
+
skb->protocol = eth_type_trans(skb, rxq->netdev);
skb_record_rx_queue(skb, rxq->idx);
@@ -4025,6 +4169,14 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
return budget;
}
+ /* Switch to poll mode in the tear-down path after sending disable
+ * queues virtchnl message, as the interrupts will be disabled after
+ * that.
+ */
+ if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
+ q_vector->tx[0])))
+ return budget;
+
work_done = min_t(int, work_done, budget - 1);
/* Exit the polling mode, but don't re-enable interrupts if stack might
@@ -4035,15 +4187,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
else
idpf_vport_intr_set_wb_on_itr(q_vector);
- /* Switch to poll mode in the tear-down path after sending disable
- * queues virtchnl message, as the interrupts will be disabled after
- * that
- */
- if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
- q_vector->tx[0])))
- return budget;
- else
- return work_done;
+ return work_done;
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index b029f566e57c..36a0f828a6f8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -142,6 +142,7 @@ do { \
#define IDPF_TX_FLAGS_IPV4 BIT(1)
#define IDPF_TX_FLAGS_IPV6 BIT(2)
#define IDPF_TX_FLAGS_TUNNEL BIT(3)
+#define IDPF_TX_FLAGS_TSYN BIT(4)
union idpf_tx_flex_desc {
struct idpf_flex_tx_desc q; /* queue based scheduling */
@@ -289,6 +290,8 @@ struct idpf_ptype_state {
* @__IDPF_Q_POLL_MODE: Enable poll mode
* @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
* @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
+ * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the
+ * queue
* @__IDPF_Q_FLAGS_NBITS: Must be last
*/
enum idpf_queue_flags_t {
@@ -299,6 +302,7 @@ enum idpf_queue_flags_t {
__IDPF_Q_POLL_MODE,
__IDPF_Q_CRC_EN,
__IDPF_Q_HSPLIT_EN,
+ __IDPF_Q_PTP,
__IDPF_Q_FLAGS_NBITS,
};
@@ -443,6 +447,7 @@ struct idpf_tx_queue_stats {
u64_stats_t q_busy;
u64_stats_t skb_drops;
u64_stats_t dma_map_errs;
+ u64_stats_t tstamp_skipped;
};
#define IDPF_ITR_DYNAMIC 1
@@ -494,6 +499,7 @@ struct idpf_txq_stash {
* @next_to_alloc: RX buffer to allocate at
* @skb: Pointer to the skb
* @truesize: data buffer truesize in singleq
+ * @cached_phc_time: Cached PHC time for the Rx queue
* @stats_sync: See struct u64_stats_sync
* @q_stats: See union idpf_rx_queue_stats
* @q_id: Queue id
@@ -541,6 +547,7 @@ struct idpf_rx_queue {
struct sk_buff *skb;
u32 truesize;
+ u64 cached_phc_time;
struct u64_stats_sync stats_sync;
struct idpf_rx_queue_stats q_stats;
@@ -560,7 +567,7 @@ struct idpf_rx_queue {
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
- 80 + sizeof(struct u64_stats_sync),
+ 88 + sizeof(struct u64_stats_sync),
32);
/**
@@ -617,6 +624,8 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* @compl_tag_bufid_m: Completion tag buffer id mask
* @compl_tag_cur_gen: Used to keep track of current completion tag generation
* @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
+ * @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP
+ * @tstamp_task: Work that handles Tx timestamp read
* @stats_sync: See struct u64_stats_sync
* @q_stats: See union idpf_tx_queue_stats
* @q_id: Queue id
@@ -630,7 +639,7 @@ struct idpf_tx_queue {
struct idpf_base_tx_desc *base_tx;
struct idpf_base_tx_ctx_desc *base_ctx;
union idpf_tx_flex_desc *flex_tx;
- struct idpf_flex_tx_ctx_desc *flex_ctx;
+ union idpf_flex_tx_ctx_desc *flex_ctx;
void *desc_ring;
};
@@ -666,6 +675,9 @@ struct idpf_tx_queue {
u16 compl_tag_cur_gen;
u16 compl_tag_gen_max;
+ struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps;
+ struct work_struct *tstamp_task;
+
struct u64_stats_sync stats_sync;
struct idpf_tx_queue_stats q_stats;
__cacheline_group_end_aligned(read_write);
@@ -679,7 +691,7 @@ struct idpf_tx_queue {
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
- 88 + sizeof(struct u64_stats_sync),
+ 112 + sizeof(struct u64_stats_sync),
24);
/**
@@ -1037,12 +1049,4 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
-static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q,
- u32 needed)
-{
- return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
- IDPF_DESC_UNUSED(tx_q),
- needed, needed);
-}
-
#endif /* !_IDPF_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 3d2413b8684f..24febaaa8fbb 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -5,88 +5,7 @@
#include "idpf.h"
#include "idpf_virtchnl.h"
-
-#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
-#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
-#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
-#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
-#define IDPF_VC_XN_RING_LEN U8_MAX
-
-/**
- * enum idpf_vc_xn_state - Virtchnl transaction status
- * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
- * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
- * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received,
- * buffer updated
- * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
- * was an error, buffer not updated
- * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
- * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
- * return context; a callback may be provided to handle
- * return
- */
-enum idpf_vc_xn_state {
- IDPF_VC_XN_IDLE = 1,
- IDPF_VC_XN_WAITING,
- IDPF_VC_XN_COMPLETED_SUCCESS,
- IDPF_VC_XN_COMPLETED_FAILED,
- IDPF_VC_XN_SHUTDOWN,
- IDPF_VC_XN_ASYNC,
-};
-
-struct idpf_vc_xn;
-/* Callback for asynchronous messages */
-typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
- const struct idpf_ctlq_msg *);
-
-/**
- * struct idpf_vc_xn - Data structure representing virtchnl transactions
- * @completed: virtchnl event loop uses that to signal when a reply is
- * available, uses kernel completion API
- * @state: virtchnl event loop stores the data below, protected by the
- * completion's lock.
- * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
- * truncated on its way to the receiver thread according to
- * reply_buf.iov_len.
- * @reply: Reference to the buffer(s) where the reply data should be written
- * to. May be 0-length (then NULL address permitted) if the reply data
- * should be ignored.
- * @async_handler: if sent asynchronously, a callback can be provided to handle
- * the reply when it's received
- * @vc_op: corresponding opcode sent with this transaction
- * @idx: index used as retrieval on reply receive, used for cookie
- * @salt: changed every message to make unique, used for cookie
- */
-struct idpf_vc_xn {
- struct completion completed;
- enum idpf_vc_xn_state state;
- size_t reply_sz;
- struct kvec reply;
- async_vc_cb async_handler;
- u32 vc_op;
- u8 idx;
- u8 salt;
-};
-
-/**
- * struct idpf_vc_xn_params - Parameters for executing transaction
- * @send_buf: kvec for send buffer
- * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
- * @timeout_ms: timeout to wait for reply
- * @async: send message asynchronously, will not wait on completion
- * @async_handler: If sent asynchronously, optional callback handler. The user
- * must be careful when using async handlers as the memory for
- * the recv_buf _cannot_ be on stack if this is async.
- * @vc_op: virtchnl op to send
- */
-struct idpf_vc_xn_params {
- struct kvec send_buf;
- struct kvec recv_buf;
- int timeout_ms;
- bool async;
- async_vc_cb async_handler;
- u32 vc_op;
-};
+#include "idpf_ptp.h"
/**
* struct idpf_vc_xn_manager - Manager for tracking transactions
@@ -235,6 +154,55 @@ err_kfree:
return err;
}
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+/**
+ * idpf_ptp_is_mb_msg - Check if the message is PTP-related
+ * @op: virtchnl opcode
+ *
+ * Return: true if msg is PTP-related, false otherwise.
+ */
+static bool idpf_ptp_is_mb_msg(u32 op)
+{
+ switch (op) {
+ case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_GET_CROSS_TIME:
+ case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE:
+ case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS:
+ case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * idpf_prepare_ptp_mb_msg - Prepare PTP related message
+ *
+ * @adapter: Driver specific private structure
+ * @op: virtchnl opcode
+ * @ctlq_msg: Corresponding control queue message
+ */
+static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
+ struct idpf_ctlq_msg *ctlq_msg)
+{
+ /* If the message is PTP-related and the secondary mailbox is available,
+ * send the message through the secondary mailbox.
+ */
+ if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid)
+ return;
+
+ ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv;
+ ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id;
+ ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id;
+}
+#else /* !CONFIG_PTP_1588_CLOCK */
+static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
+ struct idpf_ctlq_msg *ctlq_msg)
+{ }
+#endif /* CONFIG_PTP_1588_CLOCK */
+
/**
* idpf_send_mb_msg - Send message over mailbox
* @adapter: Driver specific private structure
@@ -278,6 +246,9 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;
ctlq_msg->func_id = 0;
+
+ idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg);
+
ctlq_msg->data_len = msg_size;
ctlq_msg->cookie.mbx.chnl_opcode = op;
ctlq_msg->cookie.mbx.chnl_retval = 0;
@@ -376,7 +347,7 @@ static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
* All waiting threads will be woken-up and their transaction aborted. Further
* operations on that object will fail.
*/
-static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
+void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
{
int i;
@@ -449,8 +420,8 @@ static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
* >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
* error.
*/
-static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
- const struct idpf_vc_xn_params *params)
+ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params)
{
const struct kvec *send_buf = &params->send_buf;
struct idpf_vc_xn *xn;
@@ -900,7 +871,8 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
VIRTCHNL2_CAP_MACFILTER |
VIRTCHNL2_CAP_SPLITQ_QSCHED |
VIRTCHNL2_CAP_PROMISC |
- VIRTCHNL2_CAP_LOOPBACK);
+ VIRTCHNL2_CAP_LOOPBACK |
+ VIRTCHNL2_CAP_PTP);
xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
xn_params.send_buf.iov_base = &caps;
@@ -3029,6 +3001,11 @@ restart:
goto err_intr_req;
}
+ err = idpf_ptp_init(adapter);
+ if (err)
+ pci_err(adapter->pdev, "PTP init failed, err=%pe\n",
+ ERR_PTR(err));
+
idpf_init_avail_queues(adapter);
/* Skew the delay for init tasks for each function based on fn number
@@ -3091,6 +3068,7 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
if (!remove_in_prog)
idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+ idpf_ptp_release(adapter);
idpf_deinit_task(adapter);
idpf_intr_rel(adapter);
@@ -3158,6 +3136,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
u16 rx_itr[] = {2, 8, 32, 96, 128};
struct idpf_rss_data *rss_data;
u16 idx = vport->idx;
+ int err;
vport_config = adapter->vport_config[idx];
rss_data = &vport_config->user_config.rss_data;
@@ -3192,6 +3171,18 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
idpf_vport_alloc_vec_indexes(vport);
vport->crc_enable = adapter->crc_enable;
+
+ if (!(vport_msg->vport_flags &
+ cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT)))
+ return;
+
+ err = idpf_ptp_get_vport_tstamps_caps(vport);
+ if (err) {
+ pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n");
+ return;
+ }
+
+ INIT_WORK(&vport->tstamp_task, idpf_tstamp_task);
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
index 83da5d8da56b..77578206bada 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -4,6 +4,88 @@
#ifndef _IDPF_VIRTCHNL_H_
#define _IDPF_VIRTCHNL_H_
+#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
+#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
+#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
+#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
+#define IDPF_VC_XN_RING_LEN U8_MAX
+
+/**
+ * enum idpf_vc_xn_state - Virtchnl transaction status
+ * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
+ * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
+ * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, buffer
+ * updated
+ * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
+ * was an error, buffer not updated
+ * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
+ * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
+ * return context; a callback may be provided to handle
+ * return
+ */
+enum idpf_vc_xn_state {
+ IDPF_VC_XN_IDLE = 1,
+ IDPF_VC_XN_WAITING,
+ IDPF_VC_XN_COMPLETED_SUCCESS,
+ IDPF_VC_XN_COMPLETED_FAILED,
+ IDPF_VC_XN_SHUTDOWN,
+ IDPF_VC_XN_ASYNC,
+};
+
+struct idpf_vc_xn;
+/* Callback for asynchronous messages */
+typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
+ const struct idpf_ctlq_msg *);
+
+/**
+ * struct idpf_vc_xn - Data structure representing virtchnl transactions
+ * @completed: virtchnl event loop uses that to signal when a reply is
+ * available, uses kernel completion API
+ * @state: virtchnl event loop stores the data below, protected by the
+ * completion's lock.
+ * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
+ * truncated on its way to the receiver thread according to
+ * reply_buf.iov_len.
+ * @reply: Reference to the buffer(s) where the reply data should be written
+ * to. May be 0-length (then NULL address permitted) if the reply data
+ * should be ignored.
+ * @async_handler: if sent asynchronously, a callback can be provided to handle
+ * the reply when it's received
+ * @vc_op: corresponding opcode sent with this transaction
+ * @idx: index used as retrieval on reply receive, used for cookie
+ * @salt: changed every message to make unique, used for cookie
+ */
+struct idpf_vc_xn {
+ struct completion completed;
+ enum idpf_vc_xn_state state;
+ size_t reply_sz;
+ struct kvec reply;
+ async_vc_cb async_handler;
+ u32 vc_op;
+ u8 idx;
+ u8 salt;
+};
+
+/**
+ * struct idpf_vc_xn_params - Parameters for executing transaction
+ * @send_buf: kvec for send buffer
+ * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
+ * @timeout_ms: timeout to wait for reply
+ * @async: send message asynchronously, will not wait on completion
+ * @async_handler: If sent asynchronously, optional callback handler. The user
+ * must be careful when using async handlers as the memory for
+ * the recv_buf _cannot_ be on stack if this is async.
+ * @vc_op: virtchnl op to send
+ */
+struct idpf_vc_xn_params {
+ struct kvec send_buf;
+ struct kvec recv_buf;
+ int timeout_ms;
+ bool async;
+ async_vc_cb async_handler;
+ u32 vc_op;
+};
+
struct idpf_adapter;
struct idpf_netdev_priv;
struct idpf_vec_regs;
@@ -11,6 +93,8 @@ struct idpf_vport;
struct idpf_vport_max_q;
struct idpf_vport_user_config_data;
+ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params);
int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
int idpf_vc_core_init(struct idpf_adapter *adapter);
@@ -66,5 +150,6 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport);
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
+void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr);
#endif /* _IDPF_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
new file mode 100644
index 000000000000..bdcc54a5fb56
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_ptp.h"
+#include "idpf_virtchnl.h"
+
+/**
+ * idpf_ptp_get_caps - Send virtchnl get ptp capabilities message
+ * @adapter: Driver specific private structure
+ *
+ * Send virtchnl get PTP capabilities message.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_ptp_get_caps(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_ptp_get_caps *recv_ptp_caps_msg __free(kfree) = NULL;
+ struct virtchnl2_ptp_get_caps send_ptp_caps_msg = {
+ .caps = cpu_to_le32(VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME |
+ VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB |
+ VIRTCHNL2_CAP_PTP_GET_CROSS_TIME |
+ VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB |
+ VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB |
+ VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB)
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_CAPS,
+ .send_buf.iov_base = &send_ptp_caps_msg,
+ .send_buf.iov_len = sizeof(send_ptp_caps_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
+ struct virtchnl2_ptp_clk_reg_offsets clock_offsets;
+ struct idpf_ptp_secondary_mbx *scnd_mbx;
+ struct idpf_ptp *ptp = adapter->ptp;
+ enum idpf_ptp_access access_type;
+ u32 temp_offset;
+ int reply_sz;
+
+ recv_ptp_caps_msg = kzalloc(sizeof(struct virtchnl2_ptp_get_caps),
+ GFP_KERNEL);
+ if (!recv_ptp_caps_msg)
+ return -ENOMEM;
+
+ xn_params.recv_buf.iov_base = recv_ptp_caps_msg;
+ xn_params.recv_buf.iov_len = sizeof(*recv_ptp_caps_msg);
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ else if (reply_sz != sizeof(*recv_ptp_caps_msg))
+ return -EIO;
+
+ ptp->caps = le32_to_cpu(recv_ptp_caps_msg->caps);
+ ptp->base_incval = le64_to_cpu(recv_ptp_caps_msg->base_incval);
+ ptp->max_adj = le32_to_cpu(recv_ptp_caps_msg->max_adj);
+
+ scnd_mbx = &ptp->secondary_mbx;
+ scnd_mbx->peer_mbx_q_id = le16_to_cpu(recv_ptp_caps_msg->peer_mbx_q_id);
+
+ /* if the ptp_mb_q_id holds invalid value (0xffff), the secondary
+ * mailbox is not supported.
+ */
+ scnd_mbx->valid = scnd_mbx->peer_mbx_q_id != 0xffff;
+ if (scnd_mbx->valid)
+ scnd_mbx->peer_id = recv_ptp_caps_msg->peer_id;
+
+ /* Determine the access type for the PTP features */
+ idpf_ptp_get_features_access(adapter);
+
+ access_type = ptp->get_dev_clk_time_access;
+ if (access_type != IDPF_PTP_DIRECT)
+ goto discipline_clock;
+
+ clock_offsets = recv_ptp_caps_msg->clk_offsets;
+
+ temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_l);
+ ptp->dev_clk_regs.dev_clk_ns_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_h);
+ ptp->dev_clk_regs.dev_clk_ns_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_l);
+ ptp->dev_clk_regs.phy_clk_ns_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_h);
+ ptp->dev_clk_regs.phy_clk_ns_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.cmd_sync_trigger);
+ ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
+
+discipline_clock:
+ access_type = ptp->adj_dev_clk_time_access;
+ if (access_type != IDPF_PTP_DIRECT)
+ return 0;
+
+ clk_adj_offsets = recv_ptp_caps_msg->clk_adj_offsets;
+
+ /* Device clock offsets */
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_cmd_type);
+ ptp->dev_clk_regs.cmd = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_l);
+ ptp->dev_clk_regs.incval_l = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_h);
+ ptp->dev_clk_regs.incval_h = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_l);
+ ptp->dev_clk_regs.shadj_l = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_h);
+ ptp->dev_clk_regs.shadj_h = idpf_get_reg_addr(adapter, temp_offset);
+
+ /* PHY clock offsets */
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_cmd_type);
+ ptp->dev_clk_regs.phy_cmd = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_l);
+ ptp->dev_clk_regs.phy_incval_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_h);
+ ptp->dev_clk_regs.phy_incval_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_l);
+ ptp->dev_clk_regs.phy_shadj_l = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_h);
+ ptp->dev_clk_regs.phy_shadj_h = idpf_get_reg_addr(adapter, temp_offset);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_dev_clk_time - Send virtchnl get device clk time message
+ * @adapter: Driver specific private structure
+ * @dev_clk_time: Pointer to the device clock structure where the value is set
+ *
+ * Send virtchnl get time message to get the time of the clock.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *dev_clk_time)
+{
+ struct virtchnl2_ptp_get_dev_clk_time get_dev_clk_time_msg;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME,
+ .send_buf.iov_base = &get_dev_clk_time_msg,
+ .send_buf.iov_len = sizeof(get_dev_clk_time_msg),
+ .recv_buf.iov_base = &get_dev_clk_time_msg,
+ .recv_buf.iov_len = sizeof(get_dev_clk_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+ u64 dev_time;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(get_dev_clk_time_msg))
+ return -EIO;
+
+ dev_time = le64_to_cpu(get_dev_clk_time_msg.dev_time_ns);
+ dev_clk_time->dev_clk_time_ns = dev_time;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_set_dev_clk_time - Send virtchnl set device time message
+ * @adapter: Driver specific private structure
+ * @time: New time value
+ *
+ * Send virtchnl set time message to set the time of the clock.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time)
+{
+ struct virtchnl2_ptp_set_dev_clk_time set_dev_clk_time_msg = {
+ .dev_time_ns = cpu_to_le64(time),
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME,
+ .send_buf.iov_base = &set_dev_clk_time_msg,
+ .send_buf.iov_len = sizeof(set_dev_clk_time_msg),
+ .recv_buf.iov_base = &set_dev_clk_time_msg,
+ .recv_buf.iov_len = sizeof(set_dev_clk_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(set_dev_clk_time_msg))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adj_dev_clk_time - Send virtchnl adj device clock time message
+ * @adapter: Driver specific private structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * Send virtchnl adj time message to adjust the clock by the indicated delta.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta)
+{
+ struct virtchnl2_ptp_adj_dev_clk_time adj_dev_clk_time_msg = {
+ .delta = cpu_to_le64(delta),
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME,
+ .send_buf.iov_base = &adj_dev_clk_time_msg,
+ .send_buf.iov_len = sizeof(adj_dev_clk_time_msg),
+ .recv_buf.iov_base = &adj_dev_clk_time_msg,
+ .recv_buf.iov_len = sizeof(adj_dev_clk_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(adj_dev_clk_time_msg))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adj_dev_clk_fine - Send virtchnl adj time message
+ * @adapter: Driver specific private structure
+ * @incval: Source timer increment value per clock cycle
+ *
+ * Send virtchnl adj fine message to adjust the frequency of the clock by
+ * incval.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval)
+{
+ struct virtchnl2_ptp_adj_dev_clk_fine adj_dev_clk_fine_msg = {
+ .incval = cpu_to_le64(incval),
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE,
+ .send_buf.iov_base = &adj_dev_clk_fine_msg,
+ .send_buf.iov_len = sizeof(adj_dev_clk_fine_msg),
+ .recv_buf.iov_base = &adj_dev_clk_fine_msg,
+ .recv_buf.iov_len = sizeof(adj_dev_clk_fine_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(adj_dev_clk_fine_msg))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_vport_tstamps_caps - Send virtchnl to get tstamps caps for vport
+ * @vport: Virtual port structure
+ *
+ * Send virtchnl get vport tstamps caps message to receive the set of tstamp
+ * capabilities per vport.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)
+{
+ struct virtchnl2_ptp_get_vport_tx_tstamp_caps send_tx_tstamp_caps;
+ struct virtchnl2_ptp_get_vport_tx_tstamp_caps *rcv_tx_tstamp_caps;
+ struct virtchnl2_ptp_tx_tstamp_latch_caps tx_tstamp_latch_caps;
+ struct idpf_ptp_vport_tx_tstamp_caps *tstamp_caps;
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS,
+ .send_buf.iov_base = &send_tx_tstamp_caps,
+ .send_buf.iov_len = sizeof(send_tx_tstamp_caps),
+ .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ enum idpf_ptp_access tstamp_access, get_dev_clk_access;
+ struct idpf_ptp *ptp = vport->adapter->ptp;
+ struct list_head *head;
+ int err = 0, reply_sz;
+ u16 num_latches;
+ u32 size;
+
+ if (!ptp)
+ return -EOPNOTSUPP;
+
+ tstamp_access = ptp->tx_tstamp_access;
+ get_dev_clk_access = ptp->get_dev_clk_time_access;
+ if (tstamp_access == IDPF_PTP_NONE ||
+ get_dev_clk_access == IDPF_PTP_NONE)
+ return -EOPNOTSUPP;
+
+ rcv_tx_tstamp_caps = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!rcv_tx_tstamp_caps)
+ return -ENOMEM;
+
+ send_tx_tstamp_caps.vport_id = cpu_to_le32(vport->vport_id);
+ xn_params.recv_buf.iov_base = rcv_tx_tstamp_caps;
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0) {
+ err = reply_sz;
+ goto get_tstamp_caps_out;
+ }
+
+ num_latches = le16_to_cpu(rcv_tx_tstamp_caps->num_latches);
+ size = struct_size(rcv_tx_tstamp_caps, tstamp_latches, num_latches);
+ if (reply_sz != size) {
+ err = -EIO;
+ goto get_tstamp_caps_out;
+ }
+
+ size = struct_size(tstamp_caps, tx_tstamp_status, num_latches);
+ tstamp_caps = kzalloc(size, GFP_KERNEL);
+ if (!tstamp_caps) {
+ err = -ENOMEM;
+ goto get_tstamp_caps_out;
+ }
+
+ tstamp_caps->access = true;
+ tstamp_caps->num_entries = num_latches;
+
+ INIT_LIST_HEAD(&tstamp_caps->latches_in_use);
+ INIT_LIST_HEAD(&tstamp_caps->latches_free);
+
+ spin_lock_init(&tstamp_caps->latches_lock);
+ spin_lock_init(&tstamp_caps->status_lock);
+
+ tstamp_caps->tstamp_ns_lo_bit = rcv_tx_tstamp_caps->tstamp_ns_lo_bit;
+
+ for (u16 i = 0; i < tstamp_caps->num_entries; i++) {
+ __le32 offset_l, offset_h;
+
+ ptp_tx_tstamp = kzalloc(sizeof(*ptp_tx_tstamp), GFP_KERNEL);
+ if (!ptp_tx_tstamp) {
+ err = -ENOMEM;
+ goto err_free_ptp_tx_stamp_list;
+ }
+
+ tx_tstamp_latch_caps = rcv_tx_tstamp_caps->tstamp_latches[i];
+
+ if (tstamp_access != IDPF_PTP_DIRECT)
+ goto skip_offsets;
+
+ offset_l = tx_tstamp_latch_caps.tx_latch_reg_offset_l;
+ offset_h = tx_tstamp_latch_caps.tx_latch_reg_offset_h;
+ ptp_tx_tstamp->tx_latch_reg_offset_l = le32_to_cpu(offset_l);
+ ptp_tx_tstamp->tx_latch_reg_offset_h = le32_to_cpu(offset_h);
+
+skip_offsets:
+ ptp_tx_tstamp->idx = tx_tstamp_latch_caps.index;
+
+ list_add(&ptp_tx_tstamp->list_member,
+ &tstamp_caps->latches_free);
+
+ tstamp_caps->tx_tstamp_status[i].state = IDPF_PTP_FREE;
+ }
+
+ vport->tx_tstamp_caps = tstamp_caps;
+ kfree(rcv_tx_tstamp_caps);
+
+ return 0;
+
+err_free_ptp_tx_stamp_list:
+ head = &tstamp_caps->latches_free;
+ list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ list_del(&ptp_tx_tstamp->list_member);
+ kfree(ptp_tx_tstamp);
+ }
+
+ kfree(tstamp_caps);
+get_tstamp_caps_out:
+ kfree(rcv_tx_tstamp_caps);
+
+ return err;
+}
+
+/**
+ * idpf_ptp_update_tstamp_tracker - Update the Tx timestamp tracker based on
+ * the skb compatibility.
+ * @caps: Tx timestamp capabilities that monitor the latch status
+ * @skb: skb for which the tstamp value is returned through virtchnl message
+ * @current_state: Current state of the Tx timestamp latch
+ * @expected_state: Expected state of the Tx timestamp latch
+ *
+ * Find a proper skb tracker for which the Tx timestamp is received and change
+ * the state to expected value.
+ *
+ * Return: true if the tracker has been found and updated, false otherwise.
+ */
+static bool
+idpf_ptp_update_tstamp_tracker(struct idpf_ptp_vport_tx_tstamp_caps *caps,
+ struct sk_buff *skb,
+ enum idpf_ptp_tx_tstamp_state current_state,
+ enum idpf_ptp_tx_tstamp_state expected_state)
+{
+ bool updated = false;
+
+ spin_lock(&caps->status_lock);
+ for (u16 i = 0; i < caps->num_entries; i++) {
+ struct idpf_ptp_tx_tstamp_status *status;
+
+ status = &caps->tx_tstamp_status[i];
+
+ if (skb == status->skb && status->state == current_state) {
+ status->state = expected_state;
+ updated = true;
+ break;
+ }
+ }
+ spin_unlock(&caps->status_lock);
+
+ return updated;
+}
+
+/**
+ * idpf_ptp_get_tstamp_value - Get the Tx timestamp value and provide it
+ * back to the skb.
+ * @vport: Virtual port structure
+ * @tstamp_latch: Tx timestamp latch structure fulfilled by the Control Plane
+ * @ptp_tx_tstamp: Tx timestamp latch to add to the free list
+ *
+ * Read the value of the Tx timestamp for a given latch received from the
+ * Control Plane, extend it to 64 bit and provide back to the skb.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int
+idpf_ptp_get_tstamp_value(struct idpf_vport *vport,
+ struct virtchnl2_ptp_tx_tstamp_latch *tstamp_latch,
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp)
+{
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct skb_shared_hwtstamps shhwtstamps;
+ bool state_upd = false;
+ u8 tstamp_ns_lo_bit;
+ u64 tstamp;
+
+ tx_tstamp_caps = vport->tx_tstamp_caps;
+ tstamp_ns_lo_bit = tx_tstamp_caps->tstamp_ns_lo_bit;
+
+ ptp_tx_tstamp->tstamp = le64_to_cpu(tstamp_latch->tstamp);
+ ptp_tx_tstamp->tstamp >>= tstamp_ns_lo_bit;
+
+ state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps,
+ ptp_tx_tstamp->skb,
+ IDPF_PTP_READ_VALUE,
+ IDPF_PTP_FREE);
+ if (!state_upd)
+ return -EINVAL;
+
+ tstamp = idpf_ptp_extend_ts(vport, ptp_tx_tstamp->tstamp);
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ skb_tstamp_tx(ptp_tx_tstamp->skb, &shhwtstamps);
+ consume_skb(ptp_tx_tstamp->skb);
+
+ list_add(&ptp_tx_tstamp->list_member,
+ &tx_tstamp_caps->latches_free);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_tx_tstamp_async_handler - Async callback for getting Tx tstamps
+ * @adapter: Driver specific private structure
+ * @xn: transaction for message
+ * @ctlq_msg: received message
+ *
+ * Read the tstamps Tx tstamp values from a received message and put them
+ * directly to the skb. The number of timestamps to read is specified by
+ * the virtchnl message.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int
+idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter *adapter,
+ struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ struct virtchnl2_ptp_get_vport_tx_tstamp_latches *recv_tx_tstamp_msg;
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct virtchnl2_ptp_tx_tstamp_latch tstamp_latch;
+ struct idpf_ptp_tx_tstamp *tx_tstamp, *tmp;
+ struct idpf_vport *tstamp_vport = NULL;
+ struct list_head *head;
+ u16 num_latches;
+ u32 vport_id;
+ int err = 0;
+
+ recv_tx_tstamp_msg = ctlq_msg->ctx.indirect.payload->va;
+ vport_id = le32_to_cpu(recv_tx_tstamp_msg->vport_id);
+
+ idpf_for_each_vport(adapter, vport) {
+ if (!vport)
+ continue;
+
+ if (vport->vport_id == vport_id) {
+ tstamp_vport = vport;
+ break;
+ }
+ }
+
+ if (!tstamp_vport || !tstamp_vport->tx_tstamp_caps)
+ return -EINVAL;
+
+ tx_tstamp_caps = tstamp_vport->tx_tstamp_caps;
+ num_latches = le16_to_cpu(recv_tx_tstamp_msg->num_latches);
+
+ spin_lock_bh(&tx_tstamp_caps->latches_lock);
+ head = &tx_tstamp_caps->latches_in_use;
+
+ for (u16 i = 0; i < num_latches; i++) {
+ tstamp_latch = recv_tx_tstamp_msg->tstamp_latches[i];
+
+ if (!tstamp_latch.valid)
+ continue;
+
+ if (list_empty(head)) {
+ err = -ENOBUFS;
+ goto unlock;
+ }
+
+ list_for_each_entry_safe(tx_tstamp, tmp, head, list_member) {
+ if (tstamp_latch.index == tx_tstamp->idx) {
+ list_del(&tx_tstamp->list_member);
+ err = idpf_ptp_get_tstamp_value(tstamp_vport,
+ &tstamp_latch,
+ tx_tstamp);
+ if (err)
+ goto unlock;
+
+ break;
+ }
+ }
+ }
+
+unlock:
+ spin_unlock_bh(&tx_tstamp_caps->latches_lock);
+
+ return err;
+}
+
+/**
+ * idpf_ptp_get_tx_tstamp - Send virtchnl get Tx timestamp latches message
+ * @vport: Virtual port structure
+ *
+ * Send virtchnl get Tx tstamp message to read the value of the HW timestamp.
+ * The message contains a list of indexes set in the Tx descriptors.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport)
+{
+ struct virtchnl2_ptp_get_vport_tx_tstamp_latches *send_tx_tstamp_msg;
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ .async = true,
+ .async_handler = idpf_ptp_get_tx_tstamp_async_handler,
+ };
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp;
+ int reply_sz, size, msg_size;
+ struct list_head *head;
+ bool state_upd;
+ u16 id = 0;
+
+ tx_tstamp_caps = vport->tx_tstamp_caps;
+ head = &tx_tstamp_caps->latches_in_use;
+
+ size = struct_size(send_tx_tstamp_msg, tstamp_latches,
+ tx_tstamp_caps->num_entries);
+ send_tx_tstamp_msg = kzalloc(size, GFP_KERNEL);
+ if (!send_tx_tstamp_msg)
+ return -ENOMEM;
+
+ spin_lock_bh(&tx_tstamp_caps->latches_lock);
+ list_for_each_entry(ptp_tx_tstamp, head, list_member) {
+ u8 idx;
+
+ state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps,
+ ptp_tx_tstamp->skb,
+ IDPF_PTP_REQUEST,
+ IDPF_PTP_READ_VALUE);
+ if (!state_upd)
+ continue;
+
+ idx = ptp_tx_tstamp->idx;
+ send_tx_tstamp_msg->tstamp_latches[id].index = idx;
+ id++;
+ }
+ spin_unlock_bh(&tx_tstamp_caps->latches_lock);
+
+ msg_size = struct_size(send_tx_tstamp_msg, tstamp_latches, id);
+ send_tx_tstamp_msg->vport_id = cpu_to_le32(vport->vport_id);
+ send_tx_tstamp_msg->num_latches = cpu_to_le16(id);
+ xn_params.send_buf.iov_base = send_tx_tstamp_msg;
+ xn_params.send_buf.iov_len = msg_size;
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ kfree(send_tx_tstamp_msg);
+
+ return min(reply_sz, 0);
+}
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
index 63deb120359c..11b8f6f05799 100644
--- a/drivers/net/ethernet/intel/idpf/virtchnl2.h
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -68,6 +68,16 @@ enum virtchnl2_op {
VIRTCHNL2_OP_ADD_MAC_ADDR = 535,
VIRTCHNL2_OP_DEL_MAC_ADDR = 536,
VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE = 537,
+
+ /* TimeSync opcodes */
+ VIRTCHNL2_OP_PTP_GET_CAPS = 541,
+ VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP = 542,
+ VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME = 543,
+ VIRTCHNL2_OP_PTP_GET_CROSS_TIME = 544,
+ VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME = 545,
+ VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE = 546,
+ VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME = 547,
+ VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS = 548,
};
/**
@@ -560,6 +570,14 @@ struct virtchnl2_queue_reg_chunks {
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
/**
+ * enum virtchnl2_vport_flags - Vport flags that indicate vport capabilities.
+ * @VIRTCHNL2_VPORT_UPLINK_PORT: Representatives of underlying physical ports
+ */
+enum virtchnl2_vport_flags {
+ VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0),
+};
+
+/**
* struct virtchnl2_create_vport - Create vport config info.
* @vport_type: See enum virtchnl2_vport_type.
* @txq_model: See virtchnl2_queue_model.
@@ -577,7 +595,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
* @max_mtu: Max MTU. CP populates this field on response.
* @vport_id: Vport id. CP populates this field on response.
* @default_mac_addr: Default MAC address.
- * @pad: Padding.
+ * @vport_flags: See enum virtchnl2_vport_flags.
* @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
* @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions.
* @pad1: Padding.
@@ -610,7 +628,7 @@ struct virtchnl2_create_vport {
__le16 max_mtu;
__le32 vport_id;
u8 default_mac_addr[ETH_ALEN];
- __le16 pad;
+ __le16 vport_flags;
__le64 rx_desc_ids;
__le64 tx_desc_ids;
u8 pad1[72];
@@ -1270,4 +1288,296 @@ struct virtchnl2_promisc_info {
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
+/**
+ * enum virtchnl2_ptp_caps - PTP capabilities
+ * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME: direct access to get the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB: mailbox access to get the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME: direct access to cross timestamp
+ * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB: mailbox access to cross timestamp
+ * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME: direct access to set the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB: mailbox access to set the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK: direct access to adjust the time of device
+ * clock
+ * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB: mailbox access to adjust the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS: direct access to the Tx timestamping
+ * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB: mailbox access to the Tx timestamping
+ *
+ * PF/VF negotiates a set of supported PTP capabilities with the Control Plane.
+ * There are two access methods - mailbox (_MB) and direct.
+ * PTP capabilities enables Main Timer operations: get/set/adjust Main Timer,
+ * cross timestamping and the Tx timestamping.
+ */
+enum virtchnl2_ptp_caps {
+ VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME = BIT(0),
+ VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB = BIT(1),
+ VIRTCHNL2_CAP_PTP_GET_CROSS_TIME = BIT(2),
+ VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB = BIT(3),
+ VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME = BIT(4),
+ VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB = BIT(5),
+ VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK = BIT(6),
+ VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB = BIT(7),
+ VIRTCHNL2_CAP_PTP_TX_TSTAMPS = BIT(8),
+ VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB = BIT(9),
+};
+
+/**
+ * struct virtchnl2_ptp_clk_reg_offsets - Offsets of device and PHY clocks
+ * registers.
+ * @dev_clk_ns_l: Device clock low register offset
+ * @dev_clk_ns_h: Device clock high register offset
+ * @phy_clk_ns_l: PHY clock low register offset
+ * @phy_clk_ns_h: PHY clock high register offset
+ * @cmd_sync_trigger: The command sync trigger register offset
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_clk_reg_offsets {
+ __le32 dev_clk_ns_l;
+ __le32 dev_clk_ns_h;
+ __le32 phy_clk_ns_l;
+ __le32 phy_clk_ns_h;
+ __le32 cmd_sync_trigger;
+ u8 pad[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_clk_reg_offsets);
+
+/**
+ * struct virtchnl2_ptp_cross_time_reg_offsets - Offsets of the device cross
+ * time registers.
+ * @sys_time_ns_l: System time low register offset
+ * @sys_time_ns_h: System time high register offset
+ * @cmd_sync_trigger: The command sync trigger register offset
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_cross_time_reg_offsets {
+ __le32 sys_time_ns_l;
+ __le32 sys_time_ns_h;
+ __le32 cmd_sync_trigger;
+ u8 pad[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_cross_time_reg_offsets);
+
+/**
+ * struct virtchnl2_ptp_clk_adj_reg_offsets - Offsets of device and PHY clocks
+ * adjustments registers.
+ * @dev_clk_cmd_type: Device clock command type register offset
+ * @dev_clk_incval_l: Device clock increment value low register offset
+ * @dev_clk_incval_h: Device clock increment value high registers offset
+ * @dev_clk_shadj_l: Device clock shadow adjust low register offset
+ * @dev_clk_shadj_h: Device clock shadow adjust high register offset
+ * @phy_clk_cmd_type: PHY timer command type register offset
+ * @phy_clk_incval_l: PHY timer increment value low register offset
+ * @phy_clk_incval_h: PHY timer increment value high register offset
+ * @phy_clk_shadj_l: PHY timer shadow adjust low register offset
+ * @phy_clk_shadj_h: PHY timer shadow adjust high register offset
+ */
+struct virtchnl2_ptp_clk_adj_reg_offsets {
+ __le32 dev_clk_cmd_type;
+ __le32 dev_clk_incval_l;
+ __le32 dev_clk_incval_h;
+ __le32 dev_clk_shadj_l;
+ __le32 dev_clk_shadj_h;
+ __le32 phy_clk_cmd_type;
+ __le32 phy_clk_incval_l;
+ __le32 phy_clk_incval_h;
+ __le32 phy_clk_shadj_l;
+ __le32 phy_clk_shadj_h;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_ptp_clk_adj_reg_offsets);
+
+/**
+ * struct virtchnl2_ptp_tx_tstamp_latch_caps - PTP Tx timestamp latch
+ * capabilities.
+ * @tx_latch_reg_offset_l: Tx timestamp latch low register offset
+ * @tx_latch_reg_offset_h: Tx timestamp latch high register offset
+ * @index: Latch index provided to the Tx descriptor
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_tx_tstamp_latch_caps {
+ __le32 tx_latch_reg_offset_l;
+ __le32 tx_latch_reg_offset_h;
+ u8 index;
+ u8 pad[7];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch_caps);
+
+/**
+ * struct virtchnl2_ptp_get_vport_tx_tstamp_caps - Structure that defines Tx
+ * tstamp entries.
+ * @vport_id: Vport number
+ * @num_latches: Total number of latches
+ * @tstamp_ns_lo_bit: First bit for nanosecond part of the timestamp
+ * @tstamp_ns_hi_bit: Last bit for nanosecond part of the timestamp
+ * @pad: Padding for future tstamp granularity extensions
+ * @tstamp_latches: Capabilities of Tx timestamp entries
+ *
+ * PF/VF sends this message to negotiate the Tx timestamp latches for each
+ * Vport.
+ *
+ * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS.
+ */
+struct virtchnl2_ptp_get_vport_tx_tstamp_caps {
+ __le32 vport_id;
+ __le16 num_latches;
+ u8 tstamp_ns_lo_bit;
+ u8 tstamp_ns_hi_bit;
+ u8 pad[8];
+
+ struct virtchnl2_ptp_tx_tstamp_latch_caps tstamp_latches[]
+ __counted_by_le(num_latches);
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_caps);
+
+/**
+ * struct virtchnl2_ptp_get_caps - Get PTP capabilities
+ * @caps: PTP capability bitmap. See enum virtchnl2_ptp_caps
+ * @max_adj: The maximum possible frequency adjustment
+ * @base_incval: The default timer increment value
+ * @peer_mbx_q_id: ID of the PTP Device Control daemon queue
+ * @peer_id: Peer ID for PTP Device Control daemon
+ * @secondary_mbx: Indicates to the driver that it should create a secondary
+ * mailbox to inetract with control plane for PTP
+ * @pad: Padding for future extensions
+ * @clk_offsets: Main timer and PHY registers offsets
+ * @cross_time_offsets: Cross time registers offsets
+ * @clk_adj_offsets: Offsets needed to adjust the PHY and the main timer
+ *
+ * PF/VF sends this message to negotiate PTP capabilities. CP updates bitmap
+ * with supported features and fulfills appropriate structures.
+ * If HW uses primary MBX for PTP: secondary_mbx is set to false.
+ * If HW uses secondary MBX for PTP: secondary_mbx is set to true.
+ * Control plane has 2 MBX and the driver has 1 MBX, send to peer
+ * driver may be used to send a message using valid ptp_peer_mb_q_id and
+ * ptp_peer_id.
+ * If HW does not use send to peer driver: secondary_mbx is no care field and
+ * peer_mbx_q_id holds invalid value (0xFFFF).
+ *
+ * Associated with VIRTCHNL2_OP_PTP_GET_CAPS.
+ */
+struct virtchnl2_ptp_get_caps {
+ __le32 caps;
+ __le32 max_adj;
+ __le64 base_incval;
+ __le16 peer_mbx_q_id;
+ u8 peer_id;
+ u8 secondary_mbx;
+ u8 pad[4];
+
+ struct virtchnl2_ptp_clk_reg_offsets clk_offsets;
+ struct virtchnl2_ptp_cross_time_reg_offsets cross_time_offsets;
+ struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(104, virtchnl2_ptp_get_caps);
+
+/**
+ * struct virtchnl2_ptp_tx_tstamp_latch - Structure that describes tx tstamp
+ * values, index and validity.
+ * @tstamp: Timestamp value
+ * @index: Timestamp index from which the value is read
+ * @valid: Timestamp validity
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_tx_tstamp_latch {
+ __le64 tstamp;
+ u8 index;
+ u8 valid;
+ u8 pad[6];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch);
+
+/**
+ * struct virtchnl2_ptp_get_vport_tx_tstamp_latches - Tx timestamp latches
+ * associated with the vport.
+ * @vport_id: Number of vport that requests the timestamp
+ * @num_latches: Number of latches
+ * @get_devtime_with_txtstmp: Flag to request device time along with Tx timestamp
+ * @pad: Padding for future extensions
+ * @device_time: device time if get_devtime_with_txtstmp was set in request
+ * @tstamp_latches: PTP TX timestamp latch
+ *
+ * PF/VF sends this message to receive a specified number of timestamps
+ * entries.
+ *
+ * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP.
+ */
+struct virtchnl2_ptp_get_vport_tx_tstamp_latches {
+ __le32 vport_id;
+ __le16 num_latches;
+ u8 get_devtime_with_txtstmp;
+ u8 pad[1];
+ __le64 device_time;
+
+ struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[]
+ __counted_by_le(num_latches);
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_latches);
+
+/**
+ * struct virtchnl2_ptp_get_dev_clk_time - Associated with message
+ * VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME.
+ * @dev_time_ns: Device clock time value in nanoseconds
+ *
+ * PF/VF sends this message to receive the time from the main timer.
+ */
+struct virtchnl2_ptp_get_dev_clk_time {
+ __le64 dev_time_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_get_dev_clk_time);
+
+/**
+ * struct virtchnl2_ptp_get_cross_time: Associated with message
+ * VIRTCHNL2_OP_PTP_GET_CROSS_TIME.
+ * @sys_time_ns: System counter value expressed in nanoseconds, read
+ * synchronously with device time
+ * @dev_time_ns: Device clock time value expressed in nanoseconds
+ *
+ * PF/VF sends this message to receive the cross time.
+ */
+struct virtchnl2_ptp_get_cross_time {
+ __le64 sys_time_ns;
+ __le64 dev_time_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_cross_time);
+
+/**
+ * struct virtchnl2_ptp_set_dev_clk_time: Associated with message
+ * VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME.
+ * @dev_time_ns: Device time value expressed in nanoseconds to set
+ *
+ * PF/VF sends this message to set the time of the main timer.
+ */
+struct virtchnl2_ptp_set_dev_clk_time {
+ __le64 dev_time_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_set_dev_clk_time);
+
+/**
+ * struct virtchnl2_ptp_adj_dev_clk_fine: Associated with message
+ * VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE.
+ * @incval: Source timer increment value per clock cycle
+ *
+ * PF/VF sends this message to adjust the frequency of the main timer by the
+ * indicated increment value.
+ */
+struct virtchnl2_ptp_adj_dev_clk_fine {
+ __le64 incval;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_fine);
+
+/**
+ * struct virtchnl2_ptp_adj_dev_clk_time: Associated with message
+ * VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME.
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * PF/VF sends this message to adjust the time of the main timer by the delta.
+ */
+struct virtchnl2_ptp_adj_dev_clk_time {
+ __le64 delta;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_time);
+
#endif /* _VIRTCHNL_2_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 02f340280d20..f34ead8243e9 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -391,7 +391,8 @@ enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG,
- IGB_RING_FLAG_TX_DISABLED
+ IGB_RING_FLAG_TX_DISABLED,
+ IGB_RING_FLAG_RX_ALLOC_FAILED,
};
#define ring_uses_large_buffer(ring) \
@@ -722,6 +723,8 @@ enum igb_boards {
extern char igb_driver_name[];
+void igb_set_queue_napi(struct igb_adapter *adapter, int q_idx,
+ struct napi_struct *napi);
int igb_xmit_xdp_ring(struct igb_adapter *adapter,
struct igb_ring *ring,
struct xdp_frame *xdpf);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c646c71915f0..b76a154e635e 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -947,6 +947,9 @@ static int igb_request_msix(struct igb_adapter *adapter)
q_vector);
if (err)
goto err_free;
+
+ netif_napi_set_irq(&q_vector->napi,
+ adapter->msix_entries[vector].vector);
}
igb_configure_msix(adapter);
@@ -1194,7 +1197,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll);
+ netif_napi_add_config(adapter->netdev, &q_vector->napi, igb_poll,
+ v_idx);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -2096,6 +2100,22 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
wr32(E1000_CTRL_EXT, ctrl_ext);
}
+void igb_set_queue_napi(struct igb_adapter *adapter, int vector,
+ struct napi_struct *napi)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[vector];
+
+ if (q_vector->rx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->rx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_RX, napi);
+
+ if (q_vector->tx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->tx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_TX, napi);
+}
+
/**
* igb_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
@@ -2103,6 +2123,7 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
int igb_up(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int i;
/* hardware has been reset, we need to reload some things */
@@ -2110,8 +2131,11 @@ int igb_up(struct igb_adapter *adapter)
clear_bit(__IGB_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&(adapter->q_vector[i]->napi));
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igb_set_queue_napi(adapter, i, napi);
+ }
if (adapter->flags & IGB_FLAG_HAS_MSIX)
igb_configure_msix(adapter);
@@ -2181,6 +2205,7 @@ void igb_down(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) {
if (adapter->q_vector[i]) {
napi_synchronize(&adapter->q_vector[i]->napi);
+ igb_set_queue_napi(adapter, i, NULL);
napi_disable(&adapter->q_vector[i]->napi);
}
}
@@ -4113,8 +4138,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
static int __igb_open(struct net_device *netdev, bool resuming)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int err;
int i;
@@ -4166,8 +4192,11 @@ static int __igb_open(struct net_device *netdev, bool resuming)
/* From here on the code is the same as igb_up() */
clear_bit(__IGB_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&(adapter->q_vector[i]->napi));
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igb_set_queue_napi(adapter, i, napi);
+ }
/* Clear any pending interrupts. */
rd32(E1000_TSICR);
@@ -5436,7 +5465,8 @@ static void igb_spoof_check(struct igb_adapter *adapter)
*/
static void igb_update_phy_info(struct timer_list *t)
{
- struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+ struct igb_adapter *adapter = timer_container_of(adapter, t,
+ phy_info_timer);
igb_get_phy_info(&adapter->hw);
}
@@ -5526,7 +5556,8 @@ static void igb_check_lvmmc(struct igb_adapter *adapter)
**/
static void igb_watchdog(struct timer_list *t)
{
- struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct igb_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
}
@@ -5726,11 +5757,29 @@ no_wait:
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
- for (i = 0; i < adapter->num_q_vectors; i++)
- eics |= adapter->q_vector[i]->eims_value;
- wr32(E1000_EICS, eics);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+ struct igb_ring *rx_ring;
+
+ if (!q_vector->rx.ring)
+ continue;
+
+ rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index];
+
+ if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ eics |= q_vector->eims_value;
+ clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ }
+ }
+ if (eics)
+ wr32(E1000_EICS, eics);
} else {
- wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ struct igb_ring *rx_ring = adapter->rx_ring[0];
+
+ if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ }
}
igb_spoof_check(adapter);
@@ -9061,6 +9110,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
break;
}
@@ -9120,6 +9170,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -9136,6 +9187,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
__free_pages(page, igb_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -9674,8 +9726,11 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
+ rtnl_lock();
if (netif_running(netdev))
igb_down(adapter);
+ rtnl_unlock();
+
pci_disable_device(pdev);
/* Request a slot reset. */
@@ -9734,16 +9789,21 @@ static void igb_io_resume(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
if (netif_running(netdev)) {
if (!test_bit(__IGB_DOWN, &adapter->state)) {
dev_dbg(&pdev->dev, "Resuming from non-fatal error, do nothing.\n");
+ rtnl_unlock();
return;
}
+
if (igb_up(adapter)) {
dev_err(&pdev->dev, "igb_up failed after reset\n");
+ rtnl_unlock();
return;
}
}
+ rtnl_unlock();
netif_device_attach(netdev);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index f323e1c1989f..793c96016288 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -502,13 +502,6 @@ static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Both the rising and falling edge are timestamped */
if (rq->extts.flags & PTP_STRICT_FLAGS &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -658,13 +651,6 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests failing to enable both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -1356,6 +1342,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
adapter->ptp_caps.n_pins = IGB_N_SDP;
adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
@@ -1378,6 +1367,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS;
adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
adapter->ptp_caps.n_pins = IGB_N_SDP;
+ adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
adapter->ptp_caps.pps = 1;
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c
index 157d43787fa0..5cf67ba29269 100644
--- a/drivers/net/ethernet/intel/igb/igb_xsk.c
+++ b/drivers/net/ethernet/intel/igb/igb_xsk.c
@@ -415,6 +415,7 @@ int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
break;
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index beb01248600f..e55dd9345833 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1900,7 +1900,8 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter)
**/
static void igbvf_watchdog(struct timer_list *t)
{
- struct igbvf_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct igbvf_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 2f265c0959c7..859a15e4ccba 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -40,6 +40,11 @@ void igc_ethtool_set_ops(struct net_device *);
#define IGC_MAX_TX_TSTAMP_REGS 4
+struct igc_fpe_t {
+ struct ethtool_mmsv mmsv;
+ u32 tx_min_frag_size;
+};
+
enum igc_mac_filter_type {
IGC_MAC_FILTER_TYPE_DST = 0,
IGC_MAC_FILTER_TYPE_SRC
@@ -333,6 +338,8 @@ struct igc_adapter {
struct timespec64 period;
} perout[IGC_N_PEROUT];
+ struct igc_fpe_t fpe;
+
/* LEDs */
struct mutex led_mutex;
struct igc_led_classdev *leds;
@@ -387,11 +394,11 @@ extern char igc_driver_name[];
#define IGC_FLAG_RX_LEGACY BIT(16)
#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
#define IGC_FLAG_TSN_QAV_ENABLED BIT(18)
-#define IGC_FLAG_TSN_LEGACY_ENABLED BIT(19)
+#define IGC_FLAG_TSN_PREEMPT_ENABLED BIT(19)
#define IGC_FLAG_TSN_ANY_ENABLED \
(IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED | \
- IGC_FLAG_TSN_LEGACY_ENABLED)
+ IGC_FLAG_TSN_PREEMPT_ENABLED)
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
@@ -736,7 +743,10 @@ struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
u32 location);
int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
-
+void igc_disable_empty_addr_recv(struct igc_adapter *adapter);
+int igc_enable_empty_addr_recv(struct igc_adapter *adapter);
+struct igc_ring *igc_get_tx_ring(struct igc_adapter *adapter, int cpu);
+void igc_flush_tx_descriptors(struct igc_ring *ring);
void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
void igc_ptp_suspend(struct igc_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index bf8cdfbba9ff..6320eabb72fe 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -49,6 +49,7 @@ struct igc_adv_tx_context_desc {
#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IGC_ADVTXD_PAYLEN_MASK 0XFFFFC000 /* Adv desc PAYLEN mask */
#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
#define IGC_RAR_ENTRIES 16
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index d19325b0e6e0..7189dfc389ad 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -308,6 +308,8 @@
#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */
#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IGC_TXD_POPTS_SMD_MASK 0x3000 /* Indicates whether it's SMD-V or SMD-R */
+
#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */
#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */
#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
@@ -363,6 +365,8 @@
#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17)
/* Receive Descriptor bit definitions */
+#define IGC_RXD_STAT_SMD_TYPE_V 0x01 /* SMD-V Packet */
+#define IGC_RXD_STAT_SMD_TYPE_R 0x02 /* SMD-R Packet */
#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
@@ -372,7 +376,8 @@
#define IGC_RXDEXT_STATERR_LB 0x00040000
/* Advanced Receive Descriptor bit definitions */
-#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
+#define IGC_RXDADV_STAT_SMD_TYPE_MASK 0x06000
+#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
#define IGC_RXDEXT_STATERR_L4E 0x20000000
#define IGC_RXDEXT_STATERR_IPE 0x40000000
@@ -396,11 +401,47 @@
#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
-#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
-#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
-#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
-
-#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */
+/* Mask for RX packet buffer size */
+#define IGC_RXPBSIZE_EXP_MASK GENMASK(5, 0)
+#define IGC_BMC2OSPBSIZE_MASK GENMASK(11, 6)
+#define IGC_RXPBSIZE_BE_MASK GENMASK(17, 12)
+/* Mask for timestamp in RX buffer */
+#define IGC_RXPBS_CFG_TS_EN_MASK GENMASK(31, 31)
+/* High-priority RX packet buffer size (KB). Used for Express traffic when preemption is enabled */
+#define IGC_RXPBSIZE_EXP(x) FIELD_PREP(IGC_RXPBSIZE_EXP_MASK, (x))
+/* BMC to OS packet buffer size in KB */
+#define IGC_BMC2OSPBSIZE(x) FIELD_PREP(IGC_BMC2OSPBSIZE_MASK, (x))
+/* Low-priority RX packet buffer size (KB). Used for BE traffic when preemption is enabled */
+#define IGC_RXPBSIZE_BE(x) FIELD_PREP(IGC_RXPBSIZE_BE_MASK, (x))
+/* Enable RX packet buffer for timestamp descriptor, saving 16 bytes per packet if set */
+#define IGC_RXPBS_CFG_TS_EN FIELD_PREP(IGC_RXPBS_CFG_TS_EN_MASK, 1)
+/* Default value following I225/I226 SW User Manual Section 8.3.1 */
+#define IGC_RXPBSIZE_EXP_BMC_DEFAULT ( \
+ IGC_RXPBSIZE_EXP(34) | IGC_BMC2OSPBSIZE(2))
+#define IGC_RXPBSIZE_EXP_BMC_BE_TSN ( \
+ IGC_RXPBSIZE_EXP(15) | IGC_BMC2OSPBSIZE(2) | IGC_RXPBSIZE_BE(15))
+
+/* Mask for TX packet buffer size */
+#define IGC_TXPB0SIZE_MASK GENMASK(5, 0)
+#define IGC_TXPB1SIZE_MASK GENMASK(11, 6)
+#define IGC_TXPB2SIZE_MASK GENMASK(17, 12)
+#define IGC_TXPB3SIZE_MASK GENMASK(23, 18)
+/* Mask for OS to BMC packet buffer size */
+#define IGC_OS2BMCPBSIZE_MASK GENMASK(29, 24)
+/* TX Packet buffer size in KB */
+#define IGC_TXPB0SIZE(x) FIELD_PREP(IGC_TXPB0SIZE_MASK, (x))
+#define IGC_TXPB1SIZE(x) FIELD_PREP(IGC_TXPB1SIZE_MASK, (x))
+#define IGC_TXPB2SIZE(x) FIELD_PREP(IGC_TXPB2SIZE_MASK, (x))
+#define IGC_TXPB3SIZE(x) FIELD_PREP(IGC_TXPB3SIZE_MASK, (x))
+/* OS to BMC packet buffer size in KB */
+#define IGC_OS2BMCPBSIZE(x) FIELD_PREP(IGC_OS2BMCPBSIZE_MASK, (x))
+/* Default value following I225/I226 SW User Manual Section 8.3.2 */
+#define IGC_TXPBSIZE_DEFAULT ( \
+ IGC_TXPB0SIZE(20) | IGC_TXPB1SIZE(0) | IGC_TXPB2SIZE(0) | \
+ IGC_TXPB3SIZE(0) | IGC_OS2BMCPBSIZE(4))
+#define IGC_TXPBSIZE_TSN ( \
+ IGC_TXPB0SIZE(7) | IGC_TXPB1SIZE(7) | IGC_TXPB2SIZE(7) | \
+ IGC_TXPB3SIZE(7) | IGC_OS2BMCPBSIZE(4))
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
@@ -539,8 +580,10 @@
/* Transmit Scheduling */
#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001
+#define IGC_TQAVCTRL_PREEMPT_ENA 0x00000002
#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008
#define IGC_TQAVCTRL_FUTSCDDIS 0x00000080
+#define IGC_TQAVCTRL_MIN_FRAG_MASK 0x0000C000
#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 817838677817..3fc1eded9605 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -8,6 +8,7 @@
#include "igc.h"
#include "igc_diag.h"
+#include "igc_tsn.h"
/* forward declaration */
struct igc_stats {
@@ -1781,6 +1782,83 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return 0;
}
+static int igc_ethtool_get_mm(struct net_device *netdev,
+ struct ethtool_mm_state *cmd)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_fpe_t *fpe = &adapter->fpe;
+
+ ethtool_mmsv_get_mm(&fpe->mmsv, cmd);
+ cmd->tx_min_frag_size = fpe->tx_min_frag_size;
+ cmd->rx_min_frag_size = IGC_RX_MIN_FRAG_SIZE;
+
+ return 0;
+}
+
+static int igc_ethtool_set_mm(struct net_device *netdev,
+ struct ethtool_mm_cfg *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_fpe_t *fpe = &adapter->fpe;
+
+ fpe->tx_min_frag_size = igc_fpe_get_supported_frag_size(cmd->tx_min_frag_size);
+ if (fpe->tx_min_frag_size != cmd->tx_min_frag_size)
+ NL_SET_ERR_MSG_MOD(extack,
+ "tx-min-frag-size value set is unsupported. Rounded up to supported value (64, 128, 192, 256)");
+
+ if (fpe->mmsv.pmac_enabled != cmd->pmac_enabled) {
+ if (cmd->pmac_enabled)
+ static_branch_inc(&igc_fpe_enabled);
+ else
+ static_branch_dec(&igc_fpe_enabled);
+ }
+
+ ethtool_mmsv_set_mm(&fpe->mmsv, cmd);
+
+ return igc_tsn_offload_apply(adapter);
+}
+
+/**
+ * igc_ethtool_get_frame_ass_error - Get the frame assembly error count.
+ * @reg_value: Register value for IGC_PRMEXCPRCNT
+ * Return: The count of frame assembly errors.
+ */
+static u64 igc_ethtool_get_frame_ass_error(u32 reg_value)
+{
+ /* Out of order statistics */
+ u32 ooo_frame_cnt, ooo_frag_cnt;
+ u32 miss_frame_frag_cnt;
+
+ ooo_frame_cnt = FIELD_GET(IGC_PRMEXCPRCNT_OOO_FRAME_CNT, reg_value);
+ ooo_frag_cnt = FIELD_GET(IGC_PRMEXCPRCNT_OOO_FRAG_CNT, reg_value);
+ miss_frame_frag_cnt = FIELD_GET(IGC_PRMEXCPRCNT_MISS_FRAME_FRAG_CNT,
+ reg_value);
+
+ return ooo_frame_cnt + ooo_frag_cnt + miss_frame_frag_cnt;
+}
+
+static u64 igc_ethtool_get_frame_smd_error(u32 reg_value)
+{
+ return FIELD_GET(IGC_PRMEXCPRCNT_OOO_SMDC, reg_value);
+}
+
+static void igc_ethtool_get_mm_stats(struct net_device *dev,
+ struct ethtool_mm_stats *stats)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 reg_value;
+
+ reg_value = rd32(IGC_PRMEXCPRCNT);
+
+ stats->MACMergeFrameAssErrorCount = igc_ethtool_get_frame_ass_error(reg_value);
+ stats->MACMergeFrameSmdErrorCount = igc_ethtool_get_frame_smd_error(reg_value);
+ stats->MACMergeFrameAssOkCount = rd32(IGC_PRMPTDRCNT);
+ stats->MACMergeFragCountRx = rd32(IGC_PRMEVNTRCNT);
+ stats->MACMergeFragCountTx = rd32(IGC_PRMEVNTTCNT);
+}
+
static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
@@ -2076,6 +2154,9 @@ static const struct ethtool_ops igc_ethtool_ops = {
.get_link_ksettings = igc_ethtool_get_link_ksettings,
.set_link_ksettings = igc_ethtool_set_link_ksettings,
.self_test = igc_ethtool_diag_test,
+ .get_mm = igc_ethtool_get_mm,
+ .get_mm_stats = igc_ethtool_get_mm_stats,
+ .set_mm = igc_ethtool_set_mm,
};
void igc_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index b1669d7cf435..686793c539f2 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2464,8 +2464,7 @@ unmap:
return -ENOMEM;
}
-static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
- int cpu)
+struct igc_ring *igc_get_tx_ring(struct igc_adapter *adapter, int cpu)
{
int index = cpu;
@@ -2489,7 +2488,7 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
if (unlikely(!xdpf))
return -EFAULT;
- ring = igc_xdp_get_tx_ring(adapter, cpu);
+ ring = igc_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
@@ -2549,7 +2548,7 @@ out:
}
/* This function assumes __netif_tx_lock is held by the caller. */
-static void igc_flush_tx_descriptors(struct igc_ring *ring)
+void igc_flush_tx_descriptors(struct igc_ring *ring)
{
/* Once tail pointer is updated, hardware can fetch the descriptors
* any time so we issue a write membar here to ensure all memory
@@ -2566,7 +2565,7 @@ static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
struct igc_ring *ring;
if (status & IGC_XDP_TX) {
- ring = igc_xdp_get_tx_ring(adapter, cpu);
+ ring = igc_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
@@ -2638,6 +2637,14 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
size -= IGC_TS_HDR_LEN;
}
+ if (igc_fpe_is_pmac_enabled(adapter) &&
+ igc_fpe_handle_mpacket(adapter, rx_desc, size, pktbuf)) {
+ /* Advance the ring next-to-clean */
+ igc_is_non_eop(rx_ring, rx_desc);
+ cleaned_count++;
+ continue;
+ }
+
if (!skb) {
xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq);
xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring),
@@ -3145,6 +3152,11 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
break;
+ if (igc_fpe_is_pmac_enabled(adapter) &&
+ igc_fpe_transmitted_smd_v(tx_desc))
+ ethtool_mmsv_event_handle(&adapter->fpe.mmsv,
+ ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET);
+
/* Hold the completions while there's a pending tx hardware
* timestamp request from XDP Tx metadata.
*/
@@ -4037,6 +4049,30 @@ static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
}
/**
+ * igc_enable_empty_addr_recv - Enable Rx of packets with all-zeroes MAC address
+ * @adapter: Pointer to the igc_adapter structure.
+ *
+ * Frame preemption verification requires that packets with the all-zeroes
+ * MAC address are allowed to be received by the driver. This function adds the
+ * all-zeroes destination address to the list of acceptable addresses.
+ *
+ * Return: 0 on success, negative value otherwise.
+ */
+int igc_enable_empty_addr_recv(struct igc_adapter *adapter)
+{
+ u8 empty[ETH_ALEN] = {};
+
+ return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty, -1);
+}
+
+void igc_disable_empty_addr_recv(struct igc_adapter *adapter)
+{
+ u8 empty[ETH_ALEN] = {};
+
+ igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty);
+}
+
+/**
* igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -5311,6 +5347,9 @@ void igc_down(struct igc_adapter *adapter)
igc_disable_all_tx_rings_hw(adapter);
igc_clean_all_tx_rings(adapter);
igc_clean_all_rx_rings(adapter);
+
+ if (adapter->fpe.mmsv.pmac_enabled)
+ ethtool_mmsv_stop(&adapter->fpe.mmsv);
}
void igc_reinit_locked(struct igc_adapter *adapter)
@@ -5710,7 +5749,8 @@ static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
*/
static void igc_update_phy_info(struct timer_list *t)
{
- struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+ struct igc_adapter *adapter = timer_container_of(adapter, t,
+ phy_info_timer);
igc_get_phy_info(&adapter->hw);
}
@@ -5752,7 +5792,8 @@ bool igc_has_link(struct igc_adapter *adapter)
*/
static void igc_watchdog(struct timer_list *t)
{
- struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ struct igc_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
}
@@ -5835,6 +5876,10 @@ static void igc_watchdog_task(struct work_struct *work)
*/
igc_tsn_adjust_txtime_offset(adapter);
+ if (adapter->fpe.mmsv.pmac_enabled)
+ ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv,
+ true);
+
if (adapter->link_speed != SPEED_1000)
goto no_wait;
@@ -5870,6 +5915,10 @@ no_wait:
netdev_info(netdev, "NIC Link is Down\n");
netif_carrier_off(netdev);
+ if (adapter->fpe.mmsv.pmac_enabled)
+ ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv,
+ false);
+
/* link state has changed, schedule phy info update */
if (!test_bit(__IGC_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
@@ -6439,6 +6488,10 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (!validate_schedule(adapter, qopt))
return -EINVAL;
+ /* preemptible isn't supported yet */
+ if (qopt->mqprio.preemptible_tcs)
+ return -EOPNOTSUPP;
+
igc_ptp_read(adapter, &now);
if (igc_tsn_is_taprio_activated_by_user(adapter) &&
@@ -6679,13 +6732,14 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
struct tc_mqprio_qopt_offload *mqprio)
{
struct igc_hw *hw = &adapter->hw;
- int i;
+ int err, i;
if (hw->mac.type != igc_i225)
return -EOPNOTSUPP;
if (!mqprio->qopt.num_tc) {
adapter->strict_priority_enable = false;
+ netdev_reset_tc(adapter->netdev);
goto apply;
}
@@ -6716,6 +6770,21 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
igc_save_mqprio_params(adapter, mqprio->qopt.num_tc,
mqprio->qopt.offset);
+ err = netdev_set_num_tc(adapter->netdev, adapter->num_tc);
+ if (err)
+ return err;
+
+ for (i = 0; i < adapter->num_tc; i++) {
+ err = netdev_set_tc_queue(adapter->netdev, i, 1,
+ adapter->queue_per_tc[i]);
+ if (err)
+ return err;
+ }
+
+ /* In case the card is configured with less than four queues. */
+ for (; i < IGC_MAX_TX_QUEUES; i++)
+ adapter->queue_per_tc[i] = i;
+
mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
apply:
@@ -6779,7 +6848,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
- ring = igc_xdp_get_tx_ring(adapter, cpu);
+ ring = igc_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
@@ -7125,6 +7194,9 @@ static int igc_probe(struct pci_dev *pdev,
netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ /* enable HW vlan tag insertion/stripping by default */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
@@ -7157,8 +7229,8 @@ static int igc_probe(struct pci_dev *pdev,
}
/* configure RXPBSIZE and TXPBSIZE */
- wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
- wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+ wr32(IGC_RXPBS, IGC_RXPBSIZE_EXP_BMC_DEFAULT);
+ wr32(IGC_TXPBS, IGC_TXPBSIZE_DEFAULT);
timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
@@ -7190,6 +7262,8 @@ static int igc_probe(struct pci_dev *pdev,
igc_tsn_clear_schedule(adapter);
+ igc_fpe_init(adapter);
+
/* reset the hardware with the new settings */
igc_reset(adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index efc7b30e4211..f4f5c28615d3 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -257,13 +257,6 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests failing to enable both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -300,10 +293,6 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
return 0;
case PTP_CLK_REQ_PEROUT:
- /* Reject requests with unsupported flags */
- if (rq->perout.flags)
- return -EOPNOTSUPP;
-
if (on) {
pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
@@ -1162,6 +1151,9 @@ void igc_ptp_init(struct igc_adapter *adapter)
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS;
adapter->ptp_caps.n_per_out = IGC_N_PEROUT;
+ adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
adapter->ptp_caps.n_pins = IGC_N_SDP;
adapter->ptp_caps.verify = igc_ptp_verify_pin;
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 12ddc5793651..f343c6bfc6be 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -222,6 +222,22 @@
#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
+/* Time sync registers - preemption statistics */
+#define IGC_PRMPTDRCNT 0x04284 /* Good RX Preempted Packets */
+#define IGC_PRMEVNTTCNT 0x04298 /* TX Preemption event counter */
+#define IGC_PRMEVNTRCNT 0x0429C /* RX Preemption event counter */
+
+ /* Preemption Exception Counter */
+ #define IGC_PRMEXCPRCNT 0x42A0
+/* Received out of order packets with SMD-C */
+#define IGC_PRMEXCPRCNT_OOO_SMDC 0x000000FF
+/* Received out of order packets with SMD-C and wrong Frame CNT */
+#define IGC_PRMEXCPRCNT_OOO_FRAME_CNT 0x0000FF00
+/* Received out of order packets with SMD-C and wrong Frag CNT */
+#define IGC_PRMEXCPRCNT_OOO_FRAG_CNT 0x00FF0000
+/* Received packets with SMD-S and wrong Frag CNT and Frame CNT */
+#define IGC_PRMEXCPRCNT_MISS_FRAME_FRAG_CNT 0xFF000000
+
/* Transmit Scheduling Registers */
#define IGC_TQAVCTRL 0x3570
#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n))
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index 1e44374ca1ff..f22cc4d4f459 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -2,9 +2,143 @@
/* Copyright (c) 2019 Intel Corporation */
#include "igc.h"
+#include "igc_base.h"
#include "igc_hw.h"
#include "igc_tsn.h"
+#define MIN_MULTPLIER_TX_MIN_FRAG 0
+#define MAX_MULTPLIER_TX_MIN_FRAG 3
+/* Frag size is based on the Section 8.12.2 of the SW User Manual */
+#define TX_MIN_FRAG_SIZE 64
+#define TX_MAX_FRAG_SIZE (TX_MIN_FRAG_SIZE * \
+ (MAX_MULTPLIER_TX_MIN_FRAG + 1))
+
+DEFINE_STATIC_KEY_FALSE(igc_fpe_enabled);
+
+static int igc_fpe_init_smd_frame(struct igc_ring *ring,
+ struct igc_tx_buffer *buffer,
+ struct sk_buff *skb)
+{
+ dma_addr_t dma = dma_map_single(ring->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(ring->dev, dma)) {
+ netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
+ return -ENOMEM;
+ }
+
+ buffer->skb = skb;
+ buffer->protocol = 0;
+ buffer->bytecount = skb->len;
+ buffer->gso_segs = 1;
+ buffer->time_stamp = jiffies;
+ dma_unmap_len_set(buffer, len, skb->len);
+ dma_unmap_addr_set(buffer, dma, dma);
+
+ return 0;
+}
+
+static int igc_fpe_init_tx_descriptor(struct igc_ring *ring,
+ struct sk_buff *skb,
+ enum igc_txd_popts_type type)
+{
+ u32 cmd_type, olinfo_status = 0;
+ struct igc_tx_buffer *buffer;
+ union igc_adv_tx_desc *desc;
+ int err;
+
+ if (!igc_desc_unused(ring))
+ return -EBUSY;
+
+ buffer = &ring->tx_buffer_info[ring->next_to_use];
+ err = igc_fpe_init_smd_frame(ring, buffer, skb);
+ if (err)
+ return err;
+
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
+ buffer->bytecount;
+
+ olinfo_status |= FIELD_PREP(IGC_ADVTXD_PAYLEN_MASK, buffer->bytecount);
+
+ switch (type) {
+ case SMD_V:
+ case SMD_R:
+ olinfo_status |= FIELD_PREP(IGC_TXD_POPTS_SMD_MASK, type);
+ break;
+ }
+
+ desc = IGC_TX_DESC(ring, ring->next_to_use);
+ desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
+
+ netdev_tx_sent_queue(txring_txq(ring), skb->len);
+
+ buffer->next_to_watch = desc;
+ ring->next_to_use = (ring->next_to_use + 1) % ring->count;
+
+ return 0;
+}
+
+static int igc_fpe_xmit_smd_frame(struct igc_adapter *adapter,
+ enum igc_txd_popts_type type)
+{
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ struct igc_ring *ring;
+ struct sk_buff *skb;
+ int err;
+
+ ring = igc_get_tx_ring(adapter, cpu);
+ nq = txring_txq(ring);
+
+ skb = alloc_skb(SMD_FRAME_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_zero(skb, SMD_FRAME_SIZE);
+
+ __netif_tx_lock(nq, cpu);
+
+ err = igc_fpe_init_tx_descriptor(ring, skb, type);
+ igc_flush_tx_descriptors(ring);
+
+ __netif_tx_unlock(nq);
+
+ return err;
+}
+
+static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
+ enum ethtool_mpacket type)
+{
+ struct igc_fpe_t *fpe = container_of(mmsv, struct igc_fpe_t, mmsv);
+ struct igc_adapter *adapter;
+ int err;
+
+ adapter = container_of(fpe, struct igc_adapter, fpe);
+
+ if (type == ETHTOOL_MPACKET_VERIFY) {
+ err = igc_fpe_xmit_smd_frame(adapter, SMD_V);
+ if (err && net_ratelimit())
+ netdev_err(adapter->netdev, "Error sending SMD-V\n");
+ } else if (type == ETHTOOL_MPACKET_RESPONSE) {
+ err = igc_fpe_xmit_smd_frame(adapter, SMD_R);
+ if (err && net_ratelimit())
+ netdev_err(adapter->netdev, "Error sending SMD-R frame\n");
+ }
+}
+
+static const struct ethtool_mmsv_ops igc_mmsv_ops = {
+ .send_mpacket = igc_fpe_send_mpacket,
+};
+
+void igc_fpe_init(struct igc_adapter *adapter)
+{
+ adapter->fpe.tx_min_frag_size = TX_MIN_FRAG_SIZE;
+ ethtool_mmsv_init(&adapter->fpe.mmsv, adapter->netdev, &igc_mmsv_ops);
+}
+
static bool is_any_launchtime(struct igc_adapter *adapter)
{
int i;
@@ -37,17 +171,16 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
{
unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED;
- if (adapter->taprio_offload_enable)
- new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
- if (is_any_launchtime(adapter))
+ if (adapter->taprio_offload_enable || is_any_launchtime(adapter) ||
+ adapter->strict_priority_enable)
new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
if (is_cbs_enabled(adapter))
new_flags |= IGC_FLAG_TSN_QAV_ENABLED;
- if (adapter->strict_priority_enable)
- new_flags |= IGC_FLAG_TSN_LEGACY_ENABLED;
+ if (adapter->fpe.mmsv.pmac_enabled)
+ new_flags |= IGC_FLAG_TSN_PREEMPT_ENABLED;
return new_flags;
}
@@ -125,6 +258,29 @@ static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc)
wr32(IGC_TXARB, txarb);
}
+/**
+ * igc_tsn_set_rxpbsize - Set the receive packet buffer size
+ * @adapter: Pointer to the igc_adapter structure
+ * @rxpbs_exp_bmc_be: Value to set the receive packet buffer size, including
+ * express buffer, BMC buffer, and Best Effort buffer
+ *
+ * The IGC_RXPBS register value may include allocations for the Express buffer,
+ * BMC buffer, Best Effort buffer, and the timestamp descriptor buffer
+ * (IGC_RXPBS_CFG_TS_EN).
+ */
+static void igc_tsn_set_rxpbsize(struct igc_adapter *adapter,
+ u32 rxpbs_exp_bmc_be)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 rxpbs = rd32(IGC_RXPBS);
+
+ rxpbs &= ~(IGC_RXPBSIZE_EXP_MASK | IGC_BMC2OSPBSIZE_MASK |
+ IGC_RXPBSIZE_BE_MASK);
+ rxpbs |= rxpbs_exp_bmc_be;
+
+ wr32(IGC_RXPBS, rxpbs);
+}
+
/* Returns the TSN specific registers to their default values after
* the adapter is reset.
*/
@@ -136,15 +292,18 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
int i;
wr32(IGC_GTXOFFSET, 0);
- wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+ wr32(IGC_TXPBS, IGC_TXPBSIZE_DEFAULT);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
+ igc_tsn_set_rxpbsize(adapter, IGC_RXPBSIZE_EXP_BMC_DEFAULT);
+
if (igc_is_device_id_i226(hw))
igc_tsn_restore_retx_default(adapter);
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
- IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS);
+ IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS |
+ IGC_TQAVCTRL_PREEMPT_ENA | IGC_TQAVCTRL_MIN_FRAG_MASK);
wr32(IGC_TQAVCTRL, tqavctrl);
@@ -157,16 +316,12 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
wr32(IGC_QBVCYCLET_S, 0);
wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
- /* Reset mqprio TC configuration. */
- netdev_reset_tc(adapter->netdev);
-
/* Restore the default Tx arbitration: Priority 0 has the highest
* priority and is assigned to queue 0 and so on and so forth.
*/
igc_tsn_tx_arb(adapter, queue_per_tc);
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
- adapter->flags &= ~IGC_FLAG_TSN_LEGACY_ENABLED;
return 0;
}
@@ -190,53 +345,51 @@ static void igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter *adapter)
wr32(IGC_RETX_CTL, retxctl);
}
+static u8 igc_fpe_get_frag_size_mult(const struct igc_fpe_t *fpe)
+{
+ u8 mult = (fpe->tx_min_frag_size / TX_MIN_FRAG_SIZE) - 1;
+
+ return clamp_t(u8, mult, MIN_MULTPLIER_TX_MIN_FRAG,
+ MAX_MULTPLIER_TX_MIN_FRAG);
+}
+
+u32 igc_fpe_get_supported_frag_size(u32 frag_size)
+{
+ const u32 supported_sizes[] = {64, 128, 192, 256};
+
+ /* Find the smallest supported size that is >= frag_size */
+ for (int i = 0; i < ARRAY_SIZE(supported_sizes); i++) {
+ if (frag_size <= supported_sizes[i])
+ return supported_sizes[i];
+ }
+
+ /* Should not happen */
+ return TX_MAX_FRAG_SIZE;
+}
+
static int igc_tsn_enable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl, baset_l, baset_h;
u32 sec, nsec, cycle;
ktime_t base_time, systim;
+ u32 frag_size_mult;
int i;
wr32(IGC_TSAUXC, 0);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
+ igc_tsn_set_rxpbsize(adapter, IGC_RXPBSIZE_EXP_BMC_BE_TSN);
+
if (igc_is_device_id_i226(hw))
igc_tsn_set_retx_qbvfullthreshold(adapter);
if (adapter->strict_priority_enable) {
- int err;
-
- err = netdev_set_num_tc(adapter->netdev, adapter->num_tc);
- if (err)
- return err;
-
- for (i = 0; i < adapter->num_tc; i++) {
- err = netdev_set_tc_queue(adapter->netdev, i, 1,
- adapter->queue_per_tc[i]);
- if (err)
- return err;
- }
-
- /* In case the card is configured with less than four queues. */
- for (; i < IGC_MAX_TX_QUEUES; i++)
- adapter->queue_per_tc[i] = i;
-
/* Configure queue priorities according to the user provided
* mapping.
*/
igc_tsn_tx_arb(adapter, adapter->queue_per_tc);
-
- /* Enable legacy TSN mode which will do strict priority without
- * any other TSN features.
- */
- tqavctrl = rd32(IGC_TQAVCTRL);
- tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN;
- tqavctrl &= ~IGC_TQAVCTRL_ENHANCED_QAV;
- wr32(IGC_TQAVCTRL, tqavctrl);
-
- return 0;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -361,10 +514,16 @@ skip_cbs:
wr32(IGC_TXQCTL(i), txqctl);
}
- tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS;
-
+ tqavctrl = rd32(IGC_TQAVCTRL) & ~(IGC_TQAVCTRL_FUTSCDDIS |
+ IGC_TQAVCTRL_PREEMPT_ENA | IGC_TQAVCTRL_MIN_FRAG_MASK);
tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
+ if (adapter->fpe.mmsv.pmac_enabled)
+ tqavctrl |= IGC_TQAVCTRL_PREEMPT_ENA;
+
+ frag_size_mult = igc_fpe_get_frag_size_mult(&adapter->fpe);
+ tqavctrl |= FIELD_PREP(IGC_TQAVCTRL_MIN_FRAG_MASK, frag_size_mult);
+
adapter->qbv_count++;
cycle = adapter->cycle_time;
@@ -425,6 +584,14 @@ int igc_tsn_reset(struct igc_adapter *adapter)
unsigned int new_flags;
int err = 0;
+ if (adapter->fpe.mmsv.pmac_enabled) {
+ err = igc_enable_empty_addr_recv(adapter);
+ if (err && net_ratelimit())
+ netdev_err(adapter->netdev, "Error adding empty address to MAC filter\n");
+ } else {
+ igc_disable_empty_addr_recv(adapter);
+ }
+
new_flags = igc_tsn_new_flags(adapter);
if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED))
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
index 98ec845a86bf..c2a77229207b 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.h
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
@@ -4,9 +4,61 @@
#ifndef _IGC_TSN_H_
#define _IGC_TSN_H_
+#define IGC_RX_MIN_FRAG_SIZE 60
+#define SMD_FRAME_SIZE 60
+
+enum igc_txd_popts_type {
+ SMD_V = 0x01,
+ SMD_R = 0x02,
+};
+
+DECLARE_STATIC_KEY_FALSE(igc_fpe_enabled);
+
+void igc_fpe_init(struct igc_adapter *adapter);
+u32 igc_fpe_get_supported_frag_size(u32 frag_size);
int igc_tsn_offload_apply(struct igc_adapter *adapter);
int igc_tsn_reset(struct igc_adapter *adapter);
void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter);
bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter);
+static inline bool igc_fpe_is_pmac_enabled(struct igc_adapter *adapter)
+{
+ return static_branch_unlikely(&igc_fpe_enabled) &&
+ adapter->fpe.mmsv.pmac_enabled;
+}
+
+static inline bool igc_fpe_handle_mpacket(struct igc_adapter *adapter,
+ union igc_adv_rx_desc *rx_desc,
+ unsigned int size, void *pktbuf)
+{
+ u32 status_error = le32_to_cpu(rx_desc->wb.upper.status_error);
+ int smd;
+
+ smd = FIELD_GET(IGC_RXDADV_STAT_SMD_TYPE_MASK, status_error);
+ if (smd != IGC_RXD_STAT_SMD_TYPE_V && smd != IGC_RXD_STAT_SMD_TYPE_R)
+ return false;
+
+ if (size == SMD_FRAME_SIZE && mem_is_zero(pktbuf, SMD_FRAME_SIZE)) {
+ struct ethtool_mmsv *mmsv = &adapter->fpe.mmsv;
+ enum ethtool_mmsv_event event;
+
+ if (smd == IGC_RXD_STAT_SMD_TYPE_V)
+ event = ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET;
+ else
+ event = ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET;
+
+ ethtool_mmsv_event_handle(mmsv, event);
+ }
+
+ return true;
+}
+
+static inline bool igc_fpe_transmitted_smd_v(union igc_adv_tx_desc *tx_desc)
+{
+ u32 olinfo_status = le32_to_cpu(tx_desc->read.olinfo_status);
+ u8 smd = FIELD_GET(IGC_TXD_POPTS_SMD_MASK, olinfo_status);
+
+ return smd == SMD_V;
+}
+
#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index b456d102655a..2e7738f41c58 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -4,12 +4,14 @@
# Makefile for the Intel(R) 10GbE PCI Express ethernet driver
#
+subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
- ixgbe_xsk.o ixgbe_e610.o
+ ixgbe_xsk.o ixgbe_e610.o devlink/devlink.o ixgbe_fw_update.o \
+ devlink/region.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
new file mode 100644
index 000000000000..54f1b83dfe42
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
@@ -0,0 +1,557 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025, Intel Corporation. */
+
+#include "ixgbe.h"
+#include "devlink.h"
+#include "ixgbe_fw_update.h"
+
+struct ixgbe_info_ctx {
+ char buf[128];
+ struct ixgbe_orom_info pending_orom;
+ struct ixgbe_nvm_info pending_nvm;
+ struct ixgbe_netlist_info pending_netlist;
+ struct ixgbe_hw_dev_caps dev_caps;
+};
+
+enum ixgbe_devlink_version_type {
+ IXGBE_DL_VERSION_RUNNING,
+ IXGBE_DL_VERSION_STORED
+};
+
+static void ixgbe_info_get_dsn(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx)
+{
+ u8 dsn[8];
+
+ /* Copy the DSN into an array in Big Endian format */
+ put_unaligned_be64(pci_get_dsn(adapter->pdev), dsn);
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn);
+}
+
+static void ixgbe_info_orom_ver(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_nvm_version nvm_ver;
+
+ ctx->buf[0] = '\0';
+
+ if (hw->mac.type == ixgbe_mac_e610) {
+ struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_orom)
+ orom = &ctx->pending_orom;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ orom->major, orom->build, orom->patch);
+ return;
+ }
+
+ ixgbe_get_oem_prod_version(hw, &nvm_ver);
+ if (nvm_ver.oem_valid) {
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x",
+ nvm_ver.oem_major, nvm_ver.oem_minor,
+ nvm_ver.oem_release);
+
+ return;
+ }
+
+ ixgbe_get_orom_version(hw, &nvm_ver);
+ if (nvm_ver.or_valid)
+ snprintf(ctx->buf, sizeof(ctx->buf), "%d.%d.%d",
+ nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch);
+}
+
+static void ixgbe_info_eetrack(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_nvm_version nvm_ver;
+
+ if (hw->mac.type == ixgbe_mac_e610) {
+ u32 eetrack = hw->flash.nvm.eetrack;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ eetrack = ctx->pending_nvm.eetrack;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", eetrack);
+ return;
+ }
+
+ ixgbe_get_oem_prod_version(hw, &nvm_ver);
+
+ /* No ETRACK version for OEM */
+ if (nvm_ver.oem_valid) {
+ ctx->buf[0] = '\0';
+ return;
+ }
+
+ ixgbe_get_etk_id(hw, &nvm_ver);
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm_ver.etk_id);
+}
+
+static void ixgbe_info_fw_api(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ hw->api_maj_ver, hw->api_min_ver, hw->api_patch);
+}
+
+static void ixgbe_info_fw_build(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build);
+}
+
+static void ixgbe_info_fw_srev(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ nvm = &ctx->pending_nvm;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u", nvm->srev);
+}
+
+static void ixgbe_info_orom_srev(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_orom)
+ orom = &ctx->pending_orom;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u", orom->srev);
+}
+
+static void ixgbe_info_nvm_ver(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ nvm = &ctx->pending_nvm;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
+}
+
+static void ixgbe_info_netlist_ver(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_netlist_info *netlist = &adapter->hw.flash.netlist;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_netlist)
+ netlist = &ctx->pending_netlist;
+
+ /* The netlist version fields are BCD formatted */
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
+ netlist->major, netlist->minor,
+ netlist->type >> 16, netlist->type & 0xFFFF,
+ netlist->rev, netlist->cust_ver);
+}
+
+static void ixgbe_info_netlist_build(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_netlist_info *netlist = &adapter->hw.flash.netlist;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_netlist)
+ netlist = &ctx->pending_netlist;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
+}
+
+static int ixgbe_set_ctx_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_info_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ bool *pending_orom, *pending_nvm, *pending_netlist;
+ int err;
+
+ err = ixgbe_discover_dev_caps(hw, &ctx->dev_caps);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to discover device capabilities");
+ return err;
+ }
+
+ pending_orom = &ctx->dev_caps.common_cap.nvm_update_pending_orom;
+ pending_nvm = &ctx->dev_caps.common_cap.nvm_update_pending_nvm;
+ pending_netlist = &ctx->dev_caps.common_cap.nvm_update_pending_netlist;
+
+ if (*pending_orom) {
+ err = ixgbe_get_inactive_orom_ver(hw, &ctx->pending_orom);
+ if (err)
+ *pending_orom = false;
+ }
+
+ if (*pending_nvm) {
+ err = ixgbe_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
+ if (err)
+ *pending_nvm = false;
+ }
+
+ if (*pending_netlist) {
+ err = ixgbe_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
+ if (err)
+ *pending_netlist = false;
+ }
+
+ return 0;
+}
+
+static int ixgbe_devlink_info_get_e610(struct ixgbe_adapter *adapter,
+ struct devlink_info_req *req,
+ struct ixgbe_info_ctx *ctx)
+{
+ int err;
+
+ ixgbe_info_fw_api(adapter, ctx);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
+ ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_fw_build(adapter, ctx);
+ err = devlink_info_version_running_put(req, "fw.mgmt.build", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_fw_srev(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.mgmt.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_orom_srev(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.undi.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_nvm_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.psid.api", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.netlist", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_build(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ return devlink_info_version_running_put(req, "fw.netlist.build",
+ ctx->buf);
+}
+
+static int
+ixgbe_devlink_pending_info_get_e610(struct ixgbe_adapter *adapter,
+ struct devlink_info_req *req,
+ struct ixgbe_info_ctx *ctx)
+{
+ int err;
+
+ ixgbe_info_orom_ver(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_UNDI,
+ ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_eetrack(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID,
+ ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_fw_srev(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.mgmt.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_orom_srev(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.undi.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_nvm_ver(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.psid.api", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_ver(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.netlist", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_build(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ return devlink_info_version_stored_put(req, "fw.netlist.build",
+ ctx->buf);
+}
+
+static int ixgbe_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_info_ctx *ctx;
+ int err;
+
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (hw->mac.type == ixgbe_mac_e610)
+ ixgbe_refresh_fw_version(adapter);
+
+ ixgbe_info_get_dsn(adapter, ctx);
+ err = devlink_info_serial_number_put(req, ctx->buf);
+ if (err)
+ goto free_ctx;
+
+ err = hw->eeprom.ops.read_pba_string(hw, ctx->buf, sizeof(ctx->buf));
+ if (err)
+ goto free_ctx;
+
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ ctx->buf);
+ if (err)
+ goto free_ctx;
+
+ ixgbe_info_orom_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_UNDI,
+ ctx->buf);
+ if (err)
+ goto free_ctx;
+
+ ixgbe_info_eetrack(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID,
+ ctx->buf);
+ if (err || hw->mac.type != ixgbe_mac_e610)
+ goto free_ctx;
+
+ err = ixgbe_set_ctx_dev_caps(hw, ctx, extack);
+ if (err)
+ goto free_ctx;
+
+ err = ixgbe_devlink_info_get_e610(adapter, req, ctx);
+ if (err)
+ goto free_ctx;
+
+ err = ixgbe_devlink_pending_info_get_e610(adapter, req, ctx);
+free_ctx:
+ kfree(ctx);
+ return err;
+}
+
+/**
+ * ixgbe_devlink_reload_empr_start - Start EMP reset to activate new firmware
+ * @devlink: pointer to the devlink instance to reload
+ * @netns_change: if true, the network namespace is changing
+ * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE
+ * @limit: limits on what reload should do, such as not resetting
+ * @extack: netlink extended ACK structure
+ *
+ * Allow user to activate new Embedded Management Processor firmware by
+ * issuing device specific EMP reset. Called in response to
+ * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE.
+ *
+ * Note that teardown and rebuild of the driver state happens automatically as
+ * part of an interrupt and watchdog task. This is because all physical
+ * functions on the device must be able to reset when an EMP reset occurs from
+ * any source.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_devlink_reload_empr_start(struct devlink *devlink,
+ bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 pending;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+
+ err = ixgbe_get_pending_updates(adapter, &pending, extack);
+ if (err)
+ return err;
+
+ /* Pending is a bitmask of which flash banks have a pending update,
+ * including the main NVM bank, the Option ROM bank, and the netlist
+ * bank. If any of these bits are set, then there is a pending update
+ * waiting to be activated.
+ */
+ if (!pending) {
+ NL_SET_ERR_MSG_MOD(extack, "No pending firmware update");
+ return -ECANCELED;
+ }
+
+ if (adapter->fw_emp_reset_disabled) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "EMP reset is not available. To activate firmware, a reboot or power cycle is needed");
+ return -ECANCELED;
+ }
+
+ err = ixgbe_aci_nvm_update_empr(hw);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to trigger EMP device reset to reload firmware");
+
+ return err;
+}
+
+/*Wait for 10 sec with 0.5 sec tic. EMPR takes no less than half of a sec */
+#define IXGBE_DEVLINK_RELOAD_TIMEOUT_SEC 20
+
+/**
+ * ixgbe_devlink_reload_empr_finish - finishes EMP reset
+ * @devlink: pointer to the devlink instance
+ * @action: the action to perform.
+ * @limit: limits on what reload should do
+ * @actions_performed: actions performed
+ * @extack: netlink extended ACK structure
+ *
+ * Wait for new NVM to be loaded during EMP reset.
+ *
+ * Return: -ETIME when timer is exceeded, 0 on success.
+ */
+static int ixgbe_devlink_reload_empr_finish(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i = 0;
+ u32 fwsm;
+
+ do {
+ /* Just right away after triggering EMP reset the FWSM register
+ * may be not cleared yet, so begin the loop with the delay
+ * in order to not check the not updated register.
+ */
+ mdelay(500);
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+
+ if (i++ >= IXGBE_DEVLINK_RELOAD_TIMEOUT_SEC)
+ return -ETIME;
+
+ } while (!(fwsm & IXGBE_FWSM_FW_VAL_BIT));
+
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
+
+ adapter->flags2 &= ~(IXGBE_FLAG2_API_MISMATCH |
+ IXGBE_FLAG2_FW_ROLLBACK);
+
+ return 0;
+}
+
+static const struct devlink_ops ixgbe_devlink_ops = {
+ .info_get = ixgbe_devlink_info_get,
+ .supported_flash_update_params =
+ DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .flash_update = ixgbe_flash_pldm_image,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ .reload_down = ixgbe_devlink_reload_empr_start,
+ .reload_up = ixgbe_devlink_reload_empr_finish,
+};
+
+/**
+ * ixgbe_allocate_devlink - Allocate devlink instance
+ * @dev: device to allocate devlink for
+ *
+ * Allocate a devlink instance for this physical function.
+ *
+ * Return: pointer to the device adapter structure on success,
+ * ERR_PTR(-ENOMEM) when allocation failed.
+ */
+struct ixgbe_adapter *ixgbe_allocate_devlink(struct device *dev)
+{
+ struct ixgbe_adapter *adapter;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&ixgbe_devlink_ops, sizeof(*adapter), dev);
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+
+ adapter = devlink_priv(devlink);
+ adapter->devlink = devlink;
+
+ return adapter;
+}
+
+/**
+ * ixgbe_devlink_set_switch_id - Set unique switch ID based on PCI DSN
+ * @adapter: pointer to the device adapter structure
+ * @ppid: struct with switch id information
+ */
+static void ixgbe_devlink_set_switch_id(struct ixgbe_adapter *adapter,
+ struct netdev_phys_item_id *ppid)
+{
+ u64 id = pci_get_dsn(adapter->pdev);
+
+ ppid->id_len = sizeof(id);
+ put_unaligned_be64(id, &ppid->id);
+}
+
+/**
+ * ixgbe_devlink_register_port - Register devlink port
+ * @adapter: pointer to the device adapter structure
+ *
+ * Create and register a devlink_port for this physical function.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter)
+{
+ struct devlink_port *devlink_port = &adapter->devlink_port;
+ struct devlink *devlink = adapter->devlink;
+ struct device *dev = &adapter->pdev->dev;
+ struct devlink_port_attrs attrs = {};
+ int err;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = adapter->hw.bus.func;
+ ixgbe_devlink_set_switch_id(adapter, &attrs.switch_id);
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+
+ err = devl_port_register(devlink, devlink_port, 0);
+ if (err) {
+ dev_err(dev,
+ "devlink port registration failed, err %d\n", err);
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h
new file mode 100644
index 000000000000..381558058048
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025, Intel Corporation. */
+
+#ifndef _IXGBE_DEVLINK_H_
+#define _IXGBE_DEVLINK_H_
+
+struct ixgbe_adapter *ixgbe_allocate_devlink(struct device *dev);
+int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter);
+void ixgbe_devlink_init_regions(struct ixgbe_adapter *adapter);
+void ixgbe_devlink_destroy_regions(struct ixgbe_adapter *adapter);
+
+#endif /* _IXGBE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/region.c b/drivers/net/ethernet/intel/ixgbe/devlink/region.c
new file mode 100644
index 000000000000..76f6571c3c34
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/region.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025, Intel Corporation. */
+
+#include "ixgbe.h"
+#include "devlink.h"
+
+#define IXGBE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
+
+static const struct devlink_region_ops ixgbe_nvm_region_ops;
+static const struct devlink_region_ops ixgbe_sram_region_ops;
+
+static int ixgbe_devlink_parse_region(struct ixgbe_hw *hw,
+ const struct devlink_region_ops *ops,
+ bool *read_shadow_ram, u32 *nvm_size)
+{
+ if (ops == &ixgbe_nvm_region_ops) {
+ *read_shadow_ram = false;
+ *nvm_size = hw->flash.flash_size;
+ } else if (ops == &ixgbe_sram_region_ops) {
+ *read_shadow_ram = true;
+ *nvm_size = hw->flash.sr_words * 2u;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_devlink_nvm_snapshot - Capture a snapshot of the NVM content
+ * @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_NEW cmd.
+ *
+ * Capture a snapshot of the whole requested NVM region.
+ *
+ * No need to worry with freeing @data, devlink core takes care if it.
+ *
+ * Return: 0 on success, -EOPNOTSUPP for unsupported regions, -EBUSY when
+ * cannot lock NVM, -ENOMEM when cannot alloc mem and -EIO when error
+ * occurs during reading.
+ */
+static int ixgbe_devlink_nvm_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack, u8 **data)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool read_shadow_ram;
+ u8 *nvm_data, *buf;
+ u32 nvm_size, left;
+ u8 num_blks;
+ int err;
+
+ err = ixgbe_devlink_parse_region(hw, ops, &read_shadow_ram, &nvm_size);
+ if (err)
+ return err;
+
+ nvm_data = kvzalloc(nvm_size, GFP_KERNEL);
+ if (!nvm_data)
+ return -ENOMEM;
+
+ num_blks = DIV_ROUND_UP(nvm_size, IXGBE_DEVLINK_READ_BLK_SIZE);
+ buf = nvm_data;
+ left = nvm_size;
+
+ for (int i = 0; i < num_blks; i++) {
+ u32 read_sz = min_t(u32, IXGBE_DEVLINK_READ_BLK_SIZE, left);
+
+ /* Need to acquire NVM lock during each loop run because the
+ * total period of reading whole NVM is longer than the maximum
+ * period the lock can be taken defined by the IXGBE_NVM_TIMEOUT.
+ */
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to acquire NVM semaphore");
+ kvfree(nvm_data);
+ return -EBUSY;
+ }
+
+ err = ixgbe_read_flat_nvm(hw, i * IXGBE_DEVLINK_READ_BLK_SIZE,
+ &read_sz, buf, read_shadow_ram);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read RAM content");
+ ixgbe_release_nvm(hw);
+ kvfree(nvm_data);
+ return -EIO;
+ }
+
+ ixgbe_release_nvm(hw);
+
+ buf += read_sz;
+ left -= read_sz;
+ }
+
+ *data = nvm_data;
+ return 0;
+}
+
+/**
+ * ixgbe_devlink_devcaps_snapshot - Capture a snapshot of device capabilities
+ * @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_NEW for
+ * the device-caps devlink region.
+ *
+ * Capture a snapshot of the device capabilities reported by firmware.
+ *
+ * No need to worry with freeing @data, devlink core takes care if it.
+ *
+ * Return: 0 on success, -ENOMEM when cannot alloc mem, or return code of
+ * the reading operation.
+ */
+static int ixgbe_devlink_devcaps_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_aci_cmd_list_caps_elem *caps;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ caps = kvzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ err = ixgbe_aci_list_caps(hw, caps, IXGBE_ACI_MAX_BUFFER_SIZE, NULL,
+ ixgbe_aci_opc_list_dev_caps);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read device capabilities");
+ kvfree(caps);
+ return err;
+ }
+
+ *data = (u8 *)caps;
+ return 0;
+}
+
+/**
+ * ixgbe_devlink_nvm_read - Read a portion of NVM flash content
+ * @devlink: the devlink instance
+ * @ops: the devlink region to snapshot
+ * @extack: extended ACK response structure
+ * @offset: the offset to start at
+ * @size: the amount to read
+ * @data: the data buffer to read into
+ *
+ * This function is called in response to DEVLINK_CMD_REGION_READ to directly
+ * read a section of the NVM contents.
+ *
+ * Read from either the nvm-flash region either shadow-ram region.
+ *
+ * Return: 0 on success, -EOPNOTSUPP for unsupported regions, -EBUSY when
+ * cannot lock NVM, -ERANGE when buffer limit exceeded and -EIO when error
+ * occurs during reading.
+ */
+static int ixgbe_devlink_nvm_read(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u64 offset, u32 size, u8 *data)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool read_shadow_ram;
+ u32 nvm_size;
+ int err;
+
+ err = ixgbe_devlink_parse_region(hw, ops, &read_shadow_ram, &nvm_size);
+ if (err)
+ return err;
+
+ if (offset + size > nvm_size) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size");
+ return -ERANGE;
+ }
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ return -EBUSY;
+ }
+
+ err = ixgbe_read_flat_nvm(hw, (u32)offset, &size, data, read_shadow_ram);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
+ ixgbe_release_nvm(hw);
+ return -EIO;
+ }
+
+ ixgbe_release_nvm(hw);
+ return 0;
+}
+
+static const struct devlink_region_ops ixgbe_nvm_region_ops = {
+ .name = "nvm-flash",
+ .destructor = kvfree,
+ .snapshot = ixgbe_devlink_nvm_snapshot,
+ .read = ixgbe_devlink_nvm_read,
+};
+
+static const struct devlink_region_ops ixgbe_sram_region_ops = {
+ .name = "shadow-ram",
+ .destructor = kvfree,
+ .snapshot = ixgbe_devlink_nvm_snapshot,
+ .read = ixgbe_devlink_nvm_read,
+};
+
+static const struct devlink_region_ops ixgbe_devcaps_region_ops = {
+ .name = "device-caps",
+ .destructor = kvfree,
+ .snapshot = ixgbe_devlink_devcaps_snapshot,
+};
+
+/**
+ * ixgbe_devlink_init_regions - Initialize devlink regions
+ * @adapter: adapter instance
+ *
+ * Create devlink regions used to enable access to dump the contents of the
+ * flash memory of the device.
+ */
+void ixgbe_devlink_init_regions(struct ixgbe_adapter *adapter)
+{
+ struct devlink *devlink = adapter->devlink;
+ struct device *dev = &adapter->pdev->dev;
+ u64 nvm_size, sram_size;
+
+ if (adapter->hw.mac.type != ixgbe_mac_e610)
+ return;
+
+ nvm_size = adapter->hw.flash.flash_size;
+ adapter->nvm_region = devl_region_create(devlink, &ixgbe_nvm_region_ops,
+ 1, nvm_size);
+ if (IS_ERR(adapter->nvm_region)) {
+ dev_err(dev,
+ "Failed to create NVM devlink region, err %ld\n",
+ PTR_ERR(adapter->nvm_region));
+ adapter->nvm_region = NULL;
+ }
+
+ sram_size = adapter->hw.flash.sr_words * 2u;
+ adapter->sram_region = devl_region_create(devlink, &ixgbe_sram_region_ops,
+ 1, sram_size);
+ if (IS_ERR(adapter->sram_region)) {
+ dev_err(dev,
+ "Failed to create shadow-ram devlink region, err %ld\n",
+ PTR_ERR(adapter->sram_region));
+ adapter->sram_region = NULL;
+ }
+
+ adapter->devcaps_region = devl_region_create(devlink,
+ &ixgbe_devcaps_region_ops,
+ 10, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if (IS_ERR(adapter->devcaps_region)) {
+ dev_err(dev,
+ "Failed to create device-caps devlink region, err %ld\n",
+ PTR_ERR(adapter->devcaps_region));
+ adapter->devcaps_region = NULL;
+ }
+}
+
+/**
+ * ixgbe_devlink_destroy_regions - Destroy devlink regions
+ * @adapter: adapter instance
+ *
+ * Remove previously created regions for this adapter instance.
+ */
+void ixgbe_devlink_destroy_regions(struct ixgbe_adapter *adapter)
+{
+ if (adapter->hw.mac.type != ixgbe_mac_e610)
+ return;
+
+ if (adapter->nvm_region)
+ devl_region_destroy(adapter->nvm_region);
+
+ if (adapter->sram_region)
+ devl_region_destroy(adapter->sram_region);
+
+ if (adapter->devcaps_region)
+ devl_region_destroy(adapter->devcaps_region);
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index e6a380d4929b..47311b134a7a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -17,6 +17,8 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/devlink.h>
+
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb.h"
@@ -612,6 +614,11 @@ struct ixgbe_adapter {
struct bpf_prog *xdp_prog;
struct pci_dev *pdev;
struct mii_bus *mii_bus;
+ struct devlink *devlink;
+ struct devlink_port devlink_port;
+ struct devlink_region *nvm_region;
+ struct devlink_region *sram_region;
+ struct devlink_region *devcaps_region;
unsigned long state;
@@ -667,6 +674,8 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_PHY_FW_LOAD_FAILED BIT(20)
#define IXGBE_FLAG2_NO_MEDIA BIT(21)
#define IXGBE_FLAG2_MOD_POWER_UNSUPPORTED BIT(22)
+#define IXGBE_FLAG2_API_MISMATCH BIT(23)
+#define IXGBE_FLAG2_FW_ROLLBACK BIT(24)
/* Tx fast path data */
int num_tx_queues;
@@ -755,6 +764,8 @@ struct ixgbe_adapter {
u32 atr_sample_rate;
spinlock_t fdir_perfect_lock;
+ bool fw_emp_reset_disabled;
+
#ifdef IXGBE_FCOE
struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
@@ -830,6 +841,17 @@ struct ixgbe_adapter {
spinlock_t vfs_lock;
};
+struct ixgbe_netdevice_priv {
+ struct ixgbe_adapter *adapter;
+};
+
+static inline struct ixgbe_adapter *ixgbe_from_netdev(struct net_device *netdev)
+{
+ struct ixgbe_netdevice_priv *priv = netdev_priv(netdev);
+
+ return priv->adapter;
+}
+
static inline int ixgbe_determine_xdp_q_idx(int cpu)
{
if (static_key_enabled(&ixgbe_xdp_locking_key))
@@ -945,6 +967,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
u16 subdevice_id);
+void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter);
+void ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 4aaaea3b5f8f..444da982593f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1169,6 +1169,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
+ .read_pba_string = &ixgbe_read_pba_string_generic,
};
static const struct ixgbe_phy_operations phy_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 964988b4d58b..d5b1b974b4a3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -2230,6 +2230,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
+ .read_pba_string = &ixgbe_read_pba_string_generic,
};
static const struct ixgbe_phy_operations phy_ops_82599 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 7beaf6ea57f9..5784d5d1896e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -332,6 +332,7 @@ int ixgbe_start_hw_generic(struct ixgbe_hw *hw)
* Devices in the second generation:
* 82599
* X540
+ * E610
**/
int ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 19d6b6fa8fb3..3dd5a16a14df 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -118,14 +118,14 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
}
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
@@ -142,7 +142,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
u8 *perm_addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int i, j;
memset(perm_addr, 0xff, MAX_ADDR_LEN);
@@ -167,7 +167,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
u8 prio, u8 bwg_id, u8 bw_pct,
u8 up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
@@ -184,7 +184,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
u8 bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
}
@@ -193,7 +193,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
u8 prio, u8 bwg_id, u8 bw_pct,
u8 up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
@@ -210,7 +210,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
u8 bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
}
@@ -219,7 +219,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
u8 *prio, u8 *bwg_id, u8 *bw_pct,
u8 *up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type;
*bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id;
@@ -230,7 +230,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
u8 *bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id];
}
@@ -239,7 +239,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
u8 *prio, u8 *bwg_id, u8 *bw_pct,
u8 *up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type;
*bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id;
@@ -250,7 +250,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
u8 *bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id];
}
@@ -258,7 +258,7 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
u8 setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
@@ -269,14 +269,14 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
u8 *setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
}
static void ixgbe_dcbnl_devreset(struct net_device *dev)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
@@ -295,7 +295,7 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev)
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
struct ixgbe_hw *hw = &adapter->hw;
int ret = DCB_NO_HW_CHG;
@@ -383,7 +383,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
switch (capid) {
case DCB_CAP_ATTR_PG:
@@ -420,7 +420,7 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
switch (tcid) {
@@ -447,14 +447,14 @@ static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return adapter->dcb_cfg.pfc_mode_enable;
}
static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.pfc_mode_enable = state;
}
@@ -471,7 +471,7 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
*/
static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct dcb_app app = {
.selector = idtype,
.protocol = id,
@@ -486,7 +486,7 @@ static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
struct ieee_ets *ets)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs;
@@ -506,7 +506,7 @@ static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
struct ieee_ets *ets)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i, err;
__u8 max_tc = 0;
@@ -559,7 +559,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
int i;
@@ -584,7 +584,7 @@ static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
u8 *prio_tc;
int err;
@@ -616,7 +616,7 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
struct dcb_app *app)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int err;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
@@ -661,7 +661,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
struct dcb_app *app)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int err;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
@@ -705,13 +705,13 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
return adapter->dcbx_cap;
}
static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ieee_ets ets = {0};
struct ieee_pfc pfc = {0};
int err = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
index 00935747c8c5..71ea25de1bac 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -344,6 +344,40 @@ void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
}
/**
+ * ixgbe_aci_get_fw_ver - Get the firmware version
+ * @hw: pointer to the HW struct
+ *
+ * Get the firmware version using ACI command (0x0001).
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_ver *resp;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ resp = &desc.params.get_ver;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver);
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!err) {
+ hw->fw_branch = resp->fw_branch;
+ hw->fw_maj_ver = resp->fw_major;
+ hw->fw_min_ver = resp->fw_minor;
+ hw->fw_patch = resp->fw_patch;
+ hw->fw_build = le32_to_cpu(resp->fw_build);
+ hw->api_branch = resp->api_branch;
+ hw->api_maj_ver = resp->api_major;
+ hw->api_min_ver = resp->api_minor;
+ hw->api_patch = resp->api_patch;
+ }
+
+ return err;
+}
+
+/**
* ixgbe_aci_req_res - request a common resource
* @hw: pointer to the HW struct
* @res: resource ID
@@ -554,6 +588,20 @@ static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw,
break;
case IXGBE_ACI_CAPS_NVM_VER:
break;
+ case IXGBE_ACI_CAPS_PENDING_NVM_VER:
+ caps->nvm_update_pending_nvm = true;
+ break;
+ case IXGBE_ACI_CAPS_PENDING_OROM_VER:
+ caps->nvm_update_pending_orom = true;
+ break;
+ case IXGBE_ACI_CAPS_PENDING_NET_VER:
+ caps->nvm_update_pending_netlist = true;
+ break;
+ case IXGBE_ACI_CAPS_NVM_MGMT:
+ caps->nvm_unified_update =
+ (number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+ true : false;
+ break;
case IXGBE_ACI_CAPS_MAX_MTU:
caps->max_mtu = number;
break;
@@ -1411,6 +1459,61 @@ int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
}
/**
+ * ixgbe_start_hw_e610 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Get firmware version and start the hardware using the generic
+ * start_hw() and ixgbe_start_hw_gen2() functions.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_start_hw_e610(struct ixgbe_hw *hw)
+{
+ int err;
+
+ err = ixgbe_aci_get_fw_ver(hw);
+ if (err)
+ return err;
+
+ err = ixgbe_start_hw_generic(hw);
+ if (err)
+ return err;
+
+ ixgbe_start_hw_gen2(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_set_port_id_led - set LED value for the given port
+ * @hw: pointer to the HW struct
+ * @orig_mode: set LED original mode
+ *
+ * Set LED value for the given port (0x06E9)
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
+{
+ struct ixgbe_aci_cmd_set_port_id_led *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_port_id_led;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
+
+ cmd->lport_num = (u8)hw->bus.func;
+ cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID;
+
+ if (orig_mode)
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG;
+ else
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
* ixgbe_get_media_type_e610 - Gets media type
* @hw: pointer to the HW struct
*
@@ -1743,6 +1846,38 @@ void ixgbe_disable_rx_e610(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_fw_recovery_mode_e610 - Check FW NVM recovery mode
+ * @hw: pointer to hardware structure
+ *
+ * Check FW NVM recovery mode by reading the value of
+ * the dedicated register.
+ *
+ * Return: true if FW is in recovery mode, otherwise false.
+ */
+static bool ixgbe_fw_recovery_mode_e610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, IXGBE_GL_MNG_FWSM);
+
+ return !!(fwsm & IXGBE_GL_MNG_FWSM_RECOVERY_M);
+}
+
+/**
+ * ixgbe_fw_rollback_mode_e610 - Check FW NVM rollback mode
+ * @hw: pointer to hardware structure
+ *
+ * Check FW NVM rollback mode by reading the value of
+ * the dedicated register.
+ *
+ * Return: true if FW is in rollback mode, otherwise false.
+ */
+static bool ixgbe_fw_rollback_mode_e610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, IXGBE_GL_MNG_FWSM);
+
+ return !!(fwsm & IXGBE_GL_MNG_FWSM_ROLLBACK_M);
+}
+
+/**
* ixgbe_init_phy_ops_e610 - PHY specific init
* @hw: pointer to hardware structure
*
@@ -2226,6 +2361,131 @@ int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
}
/**
+ * ixgbe_aci_erase_nvm - erase NVM sector
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ *
+ * Erase the NVM sector using the ACI command (0x0702).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+ __le16 len;
+ int err;
+
+ /* Read a length value from SR, so module_typeid is equal to 0,
+ * calculate offset where module size is placed from bytes to words
+ * set last command and read from SR values to true.
+ */
+ err = ixgbe_aci_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true,
+ true);
+ if (err)
+ return err;
+
+ cmd = &desc.params.nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase);
+
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->length = len;
+ cmd->offset_low = 0;
+ cmd->offset_high = 0;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_update_nvm - update NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @command_flags: command parameters
+ *
+ * Update the NVM using the ACI command (0x0703).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.nvm;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000)
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_write);
+
+ cmd->cmd_flags |= command_flags;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
+ cmd->offset_high = FIELD_GET(IXGBE_ACI_NVM_OFFSET_HI_U_MASK, offset);
+ cmd->length = cpu_to_le16(length);
+
+ desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_nvm_write_activate - NVM activate write
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flags for write activate command
+ * @response_flags: response indicators from firmware
+ *
+ * Update the control word with the required banks' validity bits
+ * and dumps the Shadow RAM to flash using ACI command (0x0707).
+ *
+ * cmd_flags controls which banks to activate, the preservation level to use
+ * when activating the NVM bank, and whether an EMP reset is required for
+ * activation.
+ *
+ * Note that the 16bit cmd_flags value is split between two separate 1 byte
+ * flag values in the descriptor.
+ *
+ * On successful return of the firmware command, the response_flags variable
+ * is updated with the flags reported by firmware indicating certain status,
+ * such as whether EMP reset is enabled.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 err;
+
+ cmd = &desc.params.nvm;
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_nvm_write_activate);
+
+ cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
+ cmd->offset_high = (u8)FIELD_GET(IXGBE_ACI_NVM_OFFSET_HI_A_MASK,
+ cmd_flags);
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!err && response_flags)
+ *response_flags = cmd->cmd_flags;
+
+ return err;
+}
+
+/**
* ixgbe_nvm_validate_checksum - validate checksum
* @hw: pointer to the HW struct
*
@@ -2267,6 +2527,955 @@ int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_discover_flash_size - Discover the available flash size
+ * @hw: pointer to the HW struct
+ *
+ * The device flash could be up to 16MB in size. However, it is possible that
+ * the actual size is smaller. Use bisection to determine the accessible size
+ * of flash memory.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_discover_flash_size(struct ixgbe_hw *hw)
+{
+ u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
+ int err;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ while ((max_size - min_size) > 1) {
+ u32 offset = (max_size + min_size) / 2;
+ u32 len = 1;
+ u8 data;
+
+ err = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
+ if (err == -EIO &&
+ hw->aci.last_status == IXGBE_ACI_RC_EINVAL) {
+ err = 0;
+ max_size = offset;
+ } else if (!err) {
+ min_size = offset;
+ } else {
+ /* an unexpected error occurred */
+ goto err_read_flat_nvm;
+ }
+ }
+
+ hw->flash.flash_size = max_size;
+
+err_read_flat_nvm:
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_base_address - Read the value of a Shadow RAM pointer word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM word to read
+ * @pointer: pointer value read from Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to a pointer value specified
+ * in bytes. This function assumes the specified offset is a valid pointer
+ * word.
+ *
+ * Each pointer word specifies whether it is stored in word size or 4KB
+ * sector size by using the highest bit. The reported pointer value will be in
+ * bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_sr_base_address(struct ixgbe_hw *hw, u16 offset,
+ u32 *pointer)
+{
+ u16 value;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, offset, &value);
+ if (err)
+ return err;
+
+ /* Determine if the pointer is in 4KB or word units */
+ if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
+ *pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * SZ_4K;
+ else
+ *pointer = value * sizeof(u16);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM to read
+ * @size: size value read from the Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to an area size value
+ * specified in bytes. This function assumes the specified offset is a valid
+ * area size word.
+ *
+ * Each area size word is specified in 4KB sector units. This function reports
+ * the size in bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
+{
+ u16 value;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, offset, &value);
+ if (err)
+ return err;
+
+ /* Area sizes are always specified in 4KB units */
+ *size = value * SZ_4K;
+
+ return 0;
+}
+
+/**
+ * ixgbe_determine_active_flash_banks - Discover active bank for each module
+ * @hw: pointer to the HW struct
+ *
+ * Read the Shadow RAM control word and determine which banks are active for
+ * the NVM, OROM, and Netlist modules. Also read and calculate the associated
+ * pointer and size. These values are then cached into the ixgbe_flash_info
+ * structure for later use in order to calculate the correct offset to read
+ * from the active module.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ u16 ctrl_word;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_NVM_CTRL_WORD,
+ &ctrl_word);
+ if (err)
+ return err;
+
+ if (FIELD_GET(IXGBE_SR_CTRL_WORD_1_M, ctrl_word) !=
+ IXGBE_SR_CTRL_WORD_VALID)
+ return -ENODATA;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
+ banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
+ banks->orom_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->orom_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
+ banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
+
+ err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_NVM_BANK_PTR,
+ &banks->nvm_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NVM_BANK_SIZE,
+ &banks->nvm_size);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_OROM_BANK_PTR,
+ &banks->orom_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_OROM_BANK_SIZE,
+ &banks->orom_size);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_NETLIST_BANK_PTR,
+ &banks->netlist_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NETLIST_BANK_SIZE,
+ &banks->netlist_size);
+
+ return err;
+}
+
+/**
+ * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive flash bank
+ * @module: the module to read from
+ *
+ * Based on the module, lookup the module offset from the beginning of the
+ * flash.
+ *
+ * Return: the flash offset. Note that a value of zero is invalid and must be
+ * treated as an error.
+ */
+static int ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ enum ixgbe_flash_bank active_bank;
+ bool second_bank_active;
+ u32 offset, size;
+
+ switch (module) {
+ case IXGBE_E610_SR_1ST_NVM_BANK_PTR:
+ offset = banks->nvm_ptr;
+ size = banks->nvm_size;
+ active_bank = banks->nvm_bank;
+ break;
+ case IXGBE_E610_SR_1ST_OROM_BANK_PTR:
+ offset = banks->orom_ptr;
+ size = banks->orom_size;
+ active_bank = banks->orom_bank;
+ break;
+ case IXGBE_E610_SR_NETLIST_BANK_PTR:
+ offset = banks->netlist_ptr;
+ size = banks->netlist_size;
+ active_bank = banks->netlist_bank;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (active_bank) {
+ case IXGBE_1ST_FLASH_BANK:
+ second_bank_active = false;
+ break;
+ case IXGBE_2ND_FLASH_BANK:
+ second_bank_active = true;
+ break;
+ default:
+ return 0;
+ }
+
+ /* The second flash bank is stored immediately following the first
+ * bank. Based on whether the 1st or 2nd bank is active, and whether
+ * we want the active or inactive bank, calculate the desired offset.
+ */
+ switch (bank) {
+ case IXGBE_ACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? size : 0);
+ case IXGBE_INACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? 0 : size);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_flash_module - Read a word from one of the main NVM modules
+ * @hw: pointer to the HW structure
+ * @bank: which bank of the module to read
+ * @module: the module to read
+ * @offset: the offset into the module in bytes
+ * @data: storage for the word read from the flash
+ * @length: bytes of data to read
+ *
+ * Read data from the specified flash module. The bank parameter indicates
+ * whether or not to read from the active bank or the inactive bank of that
+ * module.
+ *
+ * The word will be read using flat NVM access, and relies on the
+ * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
+ * during initialization.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_flash_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module, u32 offset, u8 *data, u32 length)
+{
+ u32 start;
+ int err;
+
+ start = ixgbe_get_flash_bank_offset(hw, bank, module);
+ if (!start)
+ return -EINVAL;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
+
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_nvm_module - Read from the active main NVM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive NVM module
+ * @offset: offset into the NVM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active NVM module. This includes the CSS
+ * header at the start of the NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_nvm_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ int err;
+
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_NVM_BANK_PTR,
+ offset * sizeof(data_local),
+ (u8 *)&data_local,
+ sizeof(data_local));
+ if (!err)
+ *data = le16_to_cpu(data_local);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_netlist_module - Read data from the netlist module area
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive module
+ * @offset: offset into the netlist to read from
+ * @data: storage for returned word value
+ *
+ * Read a word from the specified netlist bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_netlist_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ int err;
+
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR,
+ offset * sizeof(data_local),
+ (u8 *)&data_local, sizeof(data_local));
+ if (!err)
+ *data = le16_to_cpu(data_local);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_orom_module - Read from the active Option ROM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive OROM module
+ * @offset: offset into the OROM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active Option ROM module of the flash.
+ * Note that unlike the NVM module, the CSS data is stored at the end of the
+ * module instead of at the beginning.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_orom_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ int err;
+
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_OROM_BANK_PTR,
+ offset * sizeof(data_local),
+ (u8 *)&data_local, sizeof(data_local));
+ if (!err)
+ *data = le16_to_cpu(data_local);
+
+ return err;
+}
+
+/**
+ * ixgbe_get_nvm_css_hdr_len - Read the CSS header length
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the
+ * Authentication header size, and then convert to words.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 *hdr_len)
+{
+ u16 hdr_len_l, hdr_len_h;
+ u32 hdr_len_dword;
+ int err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
+ &hdr_len_l);
+ if (err)
+ return err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
+ &hdr_len_h);
+ if (err)
+ return err;
+
+ /* CSS header length is in DWORD, so convert to words and add
+ * authentication header size.
+ */
+ hdr_len_dword = (hdr_len_h << 16) | hdr_len_l;
+ *hdr_len = hdr_len_dword * 2 + IXGBE_NVM_AUTH_HEADER_LEN;
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive NVM module
+ * @offset: offset into the Shadow RAM copy to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the copy of the Shadow RAM found in the
+ * specified NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ u32 hdr_len;
+ int err;
+
+ err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (err)
+ return err;
+
+ hdr_len = round_up(hdr_len, IXGBE_HDR_LEN_ROUNDUP);
+
+ return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
+}
+
+/**
+ * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @srev: storage for security revision
+ *
+ * Read the security revision out of the CSS header of the active NVM module
+ * bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank, u32 *srev)
+{
+ u16 srev_l, srev_h;
+ int err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
+ if (err)
+ return err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
+ if (err)
+ return err;
+
+ *srev = (srev_h << 16) | srev_l;
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_orom_civd_data - Get the combo version information from Option ROM
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash module
+ * @civd: storage for the Option ROM CIVD data.
+ *
+ * Searches through the Option ROM flash contents to locate the CIVD data for
+ * the image.
+ *
+ * Return: the exit code of the operation.
+ */
+static int
+ixgbe_get_orom_civd_data(struct ixgbe_hw *hw, enum ixgbe_bank_select bank,
+ struct ixgbe_orom_civd_info *civd)
+{
+ struct ixgbe_orom_civd_info tmp;
+ u32 offset;
+ int err;
+
+ /* The CIVD section is located in the Option ROM aligned to 512 bytes.
+ * The first 4 bytes must contain the ASCII characters "$CIV".
+ * A simple modulo 256 sum of all of the bytes of the structure must
+ * equal 0.
+ */
+ for (offset = 0; (offset + SZ_512) <= hw->flash.banks.orom_size;
+ offset += SZ_512) {
+ u8 sum = 0;
+ u32 i;
+
+ err = ixgbe_read_flash_module(hw, bank,
+ IXGBE_E610_SR_1ST_OROM_BANK_PTR,
+ offset,
+ (u8 *)&tmp, sizeof(tmp));
+ if (err)
+ return err;
+
+ /* Skip forward until we find a matching signature */
+ if (memcmp(IXGBE_OROM_CIV_SIGNATURE, tmp.signature,
+ sizeof(tmp.signature)))
+ continue;
+
+ /* Verify that the simple checksum is zero */
+ for (i = 0; i < sizeof(tmp); i++)
+ sum += ((u8 *)&tmp)[i];
+
+ if (sum)
+ return -EDOM;
+
+ *civd = tmp;
+ return 0;
+ }
+
+ return -ENODATA;
+}
+
+/**
+ * ixgbe_get_orom_srev - Read the security revision from the OROM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from active or inactive flash module
+ * @srev: storage for security revision
+ *
+ * Read the security revision out of the CSS header of the active OROM module
+ * bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_orom_srev(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 *srev)
+{
+ u32 orom_size_word = hw->flash.banks.orom_size / 2;
+ u32 css_start, hdr_len;
+ u16 srev_l, srev_h;
+ int err;
+
+ err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (err)
+ return err;
+
+ if (orom_size_word < hdr_len)
+ return -EINVAL;
+
+ /* Calculate how far into the Option ROM the CSS header starts. Note
+ * that ixgbe_read_orom_module takes a word offset.
+ */
+ css_start = orom_size_word - hdr_len;
+ err = ixgbe_read_orom_module(hw, bank,
+ css_start + IXGBE_NVM_CSS_SREV_L,
+ &srev_l);
+ if (err)
+ return err;
+
+ err = ixgbe_read_orom_module(hw, bank,
+ css_start + IXGBE_NVM_CSS_SREV_H,
+ &srev_h);
+ if (err)
+ return err;
+
+ *srev = srev_h << 16 | srev_l;
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_orom_ver_info - Read Option ROM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash module
+ * @orom: pointer to Option ROM info structure
+ *
+ * Read Option ROM version and security revision from the Option ROM flash
+ * section.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_orom_ver_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_orom_info *orom)
+{
+ struct ixgbe_orom_civd_info civd;
+ u32 combo_ver;
+ int err;
+
+ err = ixgbe_get_orom_civd_data(hw, bank, &civd);
+ if (err)
+ return err;
+
+ combo_ver = le32_to_cpu(civd.combo_ver);
+
+ orom->major = (u8)FIELD_GET(IXGBE_OROM_VER_MASK, combo_ver);
+ orom->patch = (u8)FIELD_GET(IXGBE_OROM_VER_PATCH_MASK, combo_ver);
+ orom->build = (u16)FIELD_GET(IXGBE_OROM_VER_BUILD_MASK, combo_ver);
+
+ return ixgbe_get_orom_srev(hw, bank, &orom->srev);
+}
+
+/**
+ * ixgbe_get_inactive_orom_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @orom: storage for Option ROM version information
+ *
+ * Read the Option ROM version and security revision data for the inactive
+ * section of flash. Used to access version data for a pending update that has
+ * not yet been activated.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_inactive_orom_ver(struct ixgbe_hw *hw,
+ struct ixgbe_orom_info *orom)
+{
+ return ixgbe_get_orom_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, orom);
+}
+
+/**
+ * ixgbe_get_nvm_ver_info - Read NVM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @nvm: pointer to NVM info structure
+ *
+ * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
+ * in the nvm info structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_nvm_info *nvm)
+{
+ u16 eetrack_lo, eetrack_hi, ver;
+ int err;
+
+ err = ixgbe_read_nvm_sr_copy(hw, bank,
+ IXGBE_E610_SR_NVM_DEV_STARTER_VER, &ver);
+ if (err)
+ return err;
+
+ nvm->major = FIELD_GET(IXGBE_E610_NVM_VER_HI_MASK, ver);
+ nvm->minor = FIELD_GET(IXGBE_E610_NVM_VER_LO_MASK, ver);
+
+ err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_LO,
+ &eetrack_lo);
+ if (err)
+ return err;
+
+ err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_HI,
+ &eetrack_hi);
+ if (err)
+ return err;
+
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Read the NVM EETRACK ID, Map version, and security revision of the
+ * inactive NVM bank. Used to access version data for a pending update that
+ * has not yet been activated.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * active NVM bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_netlist_info - Read the netlist version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @netlist: pointer to netlist version info structure
+ *
+ * Get the netlist version information from the requested bank. Reads the Link
+ * Topology section to find the Netlist ID block and extract the relevant
+ * information into the netlist version structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_netlist_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_netlist_info *netlist)
+{
+ u16 module_id, length, node_count, i;
+ u16 *id_blk;
+ int err;
+
+ err = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET,
+ &module_id);
+ if (err)
+ return err;
+
+ if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID)
+ return -EIO;
+
+ err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN,
+ &length);
+ if (err)
+ return err;
+
+ /* Sanity check that we have at least enough words to store the
+ * netlist ID block.
+ */
+ if (length < IXGBE_NETLIST_ID_BLK_SIZE)
+ return -EIO;
+
+ err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT,
+ &node_count);
+ if (err)
+ return err;
+
+ node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M;
+
+ id_blk = kcalloc(IXGBE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL);
+ if (!id_blk)
+ return -ENOMEM;
+
+ /* Read out the entire Netlist ID Block at once. */
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR,
+ IXGBE_NETLIST_ID_BLK_OFFSET(node_count) *
+ sizeof(*id_blk), (u8 *)id_blk,
+ IXGBE_NETLIST_ID_BLK_SIZE *
+ sizeof(*id_blk));
+ if (err)
+ goto free_id_blk;
+
+ for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++)
+ id_blk[i] = le16_to_cpu(((__le16 *)id_blk)[i]);
+
+ netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW];
+ netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW];
+ netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW];
+ netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER];
+ /* Read the left most 4 bytes of SHA */
+ netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
+
+free_id_blk:
+ kfree(id_blk);
+ return err;
+}
+
+/**
+ * ixgbe_get_inactive_netlist_ver - Read netlist version from the inactive bank
+ * @hw: pointer to the HW struct
+ * @netlist: pointer to netlist version info structure
+ *
+ * Read the netlist version data from the inactive netlist bank. Used to
+ * extract version data of a pending flash update in order to display the
+ * version data.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
+ struct ixgbe_netlist_info *netlist)
+{
+ return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist);
+}
+
+/**
+ * ixgbe_get_flash_data - get flash data
+ * @hw: pointer to the HW struct
+ *
+ * Read and populate flash data such as Shadow RAM size,
+ * max_timeout and blank_nvm_mode
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_flash_data(struct ixgbe_hw *hw)
+{
+ struct ixgbe_flash_info *flash = &hw->flash;
+ u32 fla, gens_stat;
+ u8 sr_size;
+ int err;
+
+ /* The SR size is stored regardless of the NVM programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
+
+ /* Switching to words (sr_size contains power of 2) */
+ flash->sr_words = BIT(sr_size) * (SZ_1K / sizeof(u16));
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if (fla & IXGBE_GLNVM_FLA_LOCKED_M) {
+ flash->blank_nvm_mode = false;
+ } else {
+ flash->blank_nvm_mode = true;
+ return -EIO;
+ }
+
+ err = ixgbe_discover_flash_size(hw);
+ if (err)
+ return err;
+
+ err = ixgbe_determine_active_flash_banks(hw);
+ if (err)
+ return err;
+
+ err = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->nvm);
+ if (err)
+ return err;
+
+ err = ixgbe_get_orom_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->orom);
+ if (err)
+ return err;
+
+ err = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->netlist);
+ return err;
+}
+
+/**
+ * ixgbe_aci_nvm_update_empr - update NVM using EMPR
+ * @hw: pointer to the HW struct
+ *
+ * Force EMP reset using ACI command (0x0709). This command allows SW to
+ * request an EMPR to activate new FW.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_nvm_update_empr(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_update_empr);
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/* ixgbe_nvm_set_pkg_data - NVM set package data
+ * @hw: pointer to the HW struct
+ * @del_pkg_data_flag: If is set then the current pkg_data store by FW
+ * is deleted.
+ * If bit is set to 1, then buffer should be size 0.
+ * @data: pointer to buffer
+ * @length: length of the buffer
+ *
+ * Set package data using ACI command (0x070A).
+ * This command is equivalent to the reception of
+ * a PLDM FW Update GetPackageData cmd. This command should be sent
+ * as part of the NVM update as the first cmd in the flow.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_set_pkg_data(struct ixgbe_hw *hw, bool del_pkg_data_flag,
+ u8 *data, u16 length)
+{
+ struct ixgbe_aci_cmd_nvm_pkg_data *cmd;
+ struct ixgbe_aci_desc desc;
+
+ if (length != 0 && !data)
+ return -EINVAL;
+
+ cmd = &desc.params.pkg_data;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_pkg_data);
+ desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+
+ if (del_pkg_data_flag)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_PKG_DELETE;
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/* ixgbe_nvm_pass_component_tbl - NVM pass component table
+ * @hw: pointer to the HW struct
+ * @data: pointer to buffer
+ * @length: length of the buffer
+ * @transfer_flag: parameter for determining stage of the update
+ * @comp_response: a pointer to the response from the 0x070B ACI.
+ * @comp_response_code: a pointer to the response code from the 0x070B ACI.
+ *
+ * Pass component table using ACI command (0x070B). This command is equivalent
+ * to the reception of a PLDM FW Update PassComponentTable cmd.
+ * This command should be sent once per component. It can be only sent after
+ * Set Package Data cmd and before actual update. FW will assume these
+ * commands are going to be sent until the TransferFlag is set to End or
+ * StartAndEnd.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_pass_component_tbl(struct ixgbe_hw *hw, u8 *data, u16 length,
+ u8 transfer_flag, u8 *comp_response,
+ u8 *comp_response_code)
+{
+ struct ixgbe_aci_cmd_nvm_pass_comp_tbl *cmd;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ if (!data || !comp_response || !comp_response_code)
+ return -EINVAL;
+
+ cmd = &desc.params.pass_comp_tbl;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_nvm_pass_component_tbl);
+ desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+
+ cmd->transfer_flag = transfer_flag;
+ err = ixgbe_aci_send_cmd(hw, &desc, data, length);
+ if (!err) {
+ *comp_response = cmd->component_response;
+ *comp_response_code = cmd->component_response_code;
+ }
+
+ return err;
+}
+
+/**
* ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
@@ -2485,7 +3694,7 @@ int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val)
if (err)
return err;
- err = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD,
+ err = ixgbe_read_sr_word_aci(hw, IXGBE_E610_SR_SW_CHECKSUM_WORD,
&tmp_checksum);
ixgbe_release_nvm(hw);
@@ -2580,9 +3789,129 @@ reset_hw_out:
return err;
}
+/**
+ * ixgbe_get_pfa_module_tlv - Read sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Find the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv,
+ u16 *module_tlv_len, u16 module_type)
+{
+ u16 pfa_len, pfa_ptr, pfa_end_ptr;
+ u16 next_tlv;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_PFA_PTR, &pfa_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_ee_aci_e610(hw, pfa_ptr, &pfa_len);
+ if (err)
+ return err;
+
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+ pfa_end_ptr = pfa_ptr + pfa_len;
+ while (next_tlv < pfa_end_ptr) {
+ u16 tlv_sub_module_type, tlv_len;
+
+ /* Read TLV type */
+ err = ixgbe_read_ee_aci_e610(hw, next_tlv,
+ &tlv_sub_module_type);
+ if (err)
+ break;
+
+ /* Read TLV length */
+ err = ixgbe_read_ee_aci_e610(hw, next_tlv + 1, &tlv_len);
+ if (err)
+ break;
+
+ if (tlv_sub_module_type == module_type) {
+ if (tlv_len) {
+ *module_tlv = next_tlv;
+ *module_tlv_len = tlv_len;
+ return 0;
+ }
+ return -EIO;
+ }
+ /* Check next TLV, i.e. current TLV pointer + length + 2 words
+ * (for current TLV's type and length).
+ */
+ next_tlv = next_tlv + tlv_len + 2;
+ }
+ /* Module does not exist */
+ return -ENODATA;
+}
+
+/**
+ * ixgbe_read_pba_string_e610 - Read PBA string from NVM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the NVM
+ * @pba_num_size: part number string buffer length
+ *
+ * Read the part number string from the NVM.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_pba_string_e610(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ u16 pba_tlv, pba_tlv_len;
+ u16 pba_word, pba_size;
+ int err;
+
+ *pba_num = '\0';
+
+ err = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
+ IXGBE_E610_SR_PBA_BLOCK_PTR);
+ if (err)
+ return err;
+
+ /* pba_size is the next word */
+ err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2), &pba_size);
+ if (err)
+ return err;
+
+ if (pba_tlv_len < pba_size)
+ return -EINVAL;
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size).
+ */
+ pba_size--;
+
+ if (pba_num_size < (((u32)pba_size * 2) + 1))
+ return -EINVAL;
+
+ for (u16 i = 0; i < pba_size; i++) {
+ err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2 + 1) + i,
+ &pba_word);
+ if (err)
+ return err;
+
+ pba_num[(i * 2)] = FIELD_GET(IXGBE_E610_SR_PBA_BLOCK_MASK,
+ pba_word);
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+
+ pba_num[(pba_size * 2)] = '\0';
+
+ return err;
+}
+
static const struct ixgbe_mac_operations mac_ops_e610 = {
.init_hw = ixgbe_init_hw_generic,
- .start_hw = ixgbe_start_hw_X540,
+ .start_hw = ixgbe_start_hw_e610,
.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic,
.enable_rx_dma = ixgbe_enable_rx_dma_generic,
.get_mac_addr = ixgbe_get_mac_addr_generic,
@@ -2621,8 +3950,12 @@ static const struct ixgbe_mac_operations mac_ops_e610 = {
.led_off = ixgbe_led_off_generic,
.init_led_link_act = ixgbe_init_led_link_act_generic,
.reset_hw = ixgbe_reset_hw_e610,
+ .get_fw_ver = ixgbe_aci_get_fw_ver,
.get_media_type = ixgbe_get_media_type_e610,
.setup_link = ixgbe_setup_link_e610,
+ .fw_recovery_mode = ixgbe_fw_recovery_mode_e610,
+ .fw_rollback_mode = ixgbe_fw_rollback_mode_e610,
+ .get_nvm_ver = ixgbe_get_active_nvm_ver,
.get_link_capabilities = ixgbe_get_link_capabilities_e610,
.get_bus_info = ixgbe_get_bus_info_generic,
.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540,
@@ -2647,6 +3980,8 @@ static const struct ixgbe_eeprom_operations eeprom_ops_e610 = {
.read = ixgbe_read_ee_aci_e610,
.read_buffer = ixgbe_read_ee_aci_buffer_e610,
.validate_checksum = ixgbe_validate_eeprom_checksum_e610,
+ .read_pba_string = ixgbe_read_pba_string_e610,
+ .init_params = ixgbe_init_eeprom_params_e610,
};
const struct ixgbe_info ixgbe_e610_info = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
index ba8c06b73810..bb31d65bd1c8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
@@ -36,6 +36,7 @@ int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
struct ixgbe_link_status *link);
int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask);
int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask);
+int ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode);
enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw);
int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait);
@@ -67,6 +68,11 @@ int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command,
bool read_shadow_ram);
int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw);
+int ixgbe_get_inactive_orom_ver(struct ixgbe_hw *hw,
+ struct ixgbe_orom_info *orom);
+int ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+int ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
+ struct ixgbe_netlist_info *netlist);
int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data);
int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
u8 *data, bool read_shadow_ram);
@@ -77,5 +83,18 @@ int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val);
int ixgbe_reset_hw_e610(struct ixgbe_hw *hw);
+int ixgbe_get_flash_data(struct ixgbe_hw *hw);
+int ixgbe_aci_nvm_update_empr(struct ixgbe_hw *hw);
+int ixgbe_nvm_set_pkg_data(struct ixgbe_hw *hw, bool del_pkg_data_flag,
+ u8 *data, u16 length);
+int ixgbe_nvm_pass_component_tbl(struct ixgbe_hw *hw, u8 *data, u16 length,
+ u8 transfer_flag, u8 *comp_response,
+ u8 *comp_response_code);
+int ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid);
+int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags);
+int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags);
#endif /* _IXGBE_E610_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index f03925c1f521..d8a919ab7027 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -213,7 +213,7 @@ static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw,
static int ixgbe_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
ixgbe_link_speed supported_link;
bool autoneg = false;
@@ -458,7 +458,7 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
static int ixgbe_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 advertised, old;
int err = 0;
@@ -535,7 +535,7 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
static void ixgbe_get_pause_stats(struct net_device *netdev,
struct ethtool_pause_stats *stats)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw_stats *hwstats = &adapter->stats;
stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
@@ -545,7 +545,7 @@ static void ixgbe_get_pause_stats(struct net_device *netdev,
static void ixgbe_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (ixgbe_device_supports_autoneg_fc(hw) &&
@@ -564,10 +564,26 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
}
}
+static void ixgbe_set_pauseparam_finalize(struct net_device *netdev,
+ struct ixgbe_fc_info *fc)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* If the thing changed then we'll update and use new autoneg. */
+ if (memcmp(fc, &hw->fc, sizeof(*fc))) {
+ hw->fc = *fc;
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+ }
+}
+
static int ixgbe_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fc_info fc = hw->fc;
@@ -592,27 +608,52 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
else
fc.requested_mode = ixgbe_fc_none;
- /* if the thing changed then we'll update and use new autoneg */
- if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
- hw->fc = fc;
- if (netif_running(netdev))
- ixgbe_reinit_locked(adapter);
- else
- ixgbe_reset(adapter);
+ ixgbe_set_pauseparam_finalize(netdev, &fc);
+
+ return 0;
+}
+
+static int ixgbe_set_pauseparam_e610(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fc_info fc = hw->fc;
+
+ if (!ixgbe_device_supports_autoneg_fc(hw))
+ return -EOPNOTSUPP;
+
+ if (pause->autoneg == AUTONEG_DISABLE) {
+ netdev_info(netdev,
+ "Cannot disable autonegotiation on this device.\n");
+ return -EOPNOTSUPP;
}
+ fc.disable_fc_autoneg = false;
+
+ if (pause->rx_pause && pause->tx_pause)
+ fc.requested_mode = ixgbe_fc_full;
+ else if (pause->rx_pause)
+ fc.requested_mode = ixgbe_fc_rx_pause;
+ else if (pause->tx_pause)
+ fc.requested_mode = ixgbe_fc_tx_pause;
+ else
+ fc.requested_mode = ixgbe_fc_none;
+
+ ixgbe_set_pauseparam_finalize(netdev, &fc);
+
return 0;
}
static u32 ixgbe_get_msglevel(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return adapter->msg_enable;
}
static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->msg_enable = data;
}
@@ -627,7 +668,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev)
static void ixgbe_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 *regs_buff = p;
u8 i;
@@ -994,14 +1035,14 @@ static void ixgbe_get_regs(struct net_device *netdev,
static int ixgbe_get_eeprom_len(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return adapter->hw.eeprom.word_size * 2;
}
static int ixgbe_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u16 *eeprom_buff;
int first_word, last_word, eeprom_len;
@@ -1037,7 +1078,7 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
static int ixgbe_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u16 *eeprom_buff;
void *ptr;
@@ -1104,10 +1145,22 @@ err:
return ret_val;
}
+void ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ ixgbe_get_flash_data(hw);
+ ixgbe_set_fw_version_e610(adapter);
+}
+
static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+
+ /* need to refresh info for e610 in case fw reloads in runtime */
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_refresh_fw_version(adapter);
strscpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
@@ -1161,7 +1214,7 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
@@ -1176,7 +1229,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_ring *temp_ring;
int i, j, err = 0;
u32 new_rx_count, new_tx_count;
@@ -1336,7 +1389,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
static void ixgbe_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *net_stats;
unsigned int start;
@@ -1710,7 +1763,7 @@ static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
static irqreturn_t ixgbe_test_intr(int irq, void *data)
{
struct net_device *netdev = (struct net_device *) data;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
@@ -2183,7 +2236,7 @@ out:
static void ixgbe_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
bool if_running = netif_running(netdev);
if (ixgbe_removed(adapter->hw.hw_addr)) {
@@ -2306,7 +2359,7 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
static void ixgbe_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
wol->supported = WAKE_UCAST | WAKE_MCAST |
WAKE_BCAST | WAKE_MAGIC;
@@ -2328,7 +2381,7 @@ static void ixgbe_get_wol(struct net_device *netdev,
static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
WAKE_FILTER))
@@ -2353,9 +2406,53 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
+static int ixgbe_set_wol_acpi(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 grc;
+
+ if (ixgbe_wol_exclusion(adapter, wol))
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+
+ /* disable APM wakeup */
+ grc = IXGBE_READ_REG(hw, IXGBE_GRC_X550EM_a);
+ grc &= ~IXGBE_GRC_APME;
+ IXGBE_WRITE_REG(hw, IXGBE_GRC_X550EM_a, grc);
+
+ /* erase existing filters */
+ IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= IXGBE_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= IXGBE_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= IXGBE_WUFC_BC;
+
+ IXGBE_WRITE_REG(hw, IXGBE_WUC, IXGBE_WUC_PME_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wol);
+
+ hw->wol_enabled = adapter->wol;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+static int ixgbe_set_wol_e610(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ if (wol->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
+ return ixgbe_set_wol_acpi(netdev, wol);
+ else
+ return ixgbe_set_wol(netdev, wol);
+}
+
static int ixgbe_nway_reset(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -2366,7 +2463,7 @@ static int ixgbe_nway_reset(struct net_device *netdev)
static int ixgbe_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
@@ -2394,12 +2491,32 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
return 0;
}
+static int ixgbe_set_phys_id_e610(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ bool led_active;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ led_active = true;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ led_active = false;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ixgbe_aci_set_port_id_led(&adapter->hw, !led_active);
+}
+
static int ixgbe_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* only valid if in constant ITR mode */
if (adapter->rx_itr_setting <= 1)
@@ -2455,7 +2572,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_q_vector *q_vector;
int i;
u16 tx_itr_param, rx_itr_param, tx_itr_prev;
@@ -2681,7 +2798,7 @@ static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
@@ -3069,7 +3186,7 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
@@ -3096,7 +3213,7 @@ static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
static u32 ixgbe_rss_indir_size(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return ixgbe_rss_indir_tbl_entries(adapter);
}
@@ -3116,7 +3233,7 @@ static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
static int ixgbe_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -3134,7 +3251,7 @@ static int ixgbe_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int i;
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
@@ -3176,7 +3293,7 @@ static int ixgbe_set_rxfh(struct net_device *netdev,
static int ixgbe_get_ts_info(struct net_device *dev,
struct kernel_ethtool_ts_info *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
/* we always support timestamping disabled */
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
@@ -3252,7 +3369,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
static void ixgbe_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
/* report maximum channels */
ch->max_combined = ixgbe_max_channels(adapter);
@@ -3289,7 +3406,7 @@ static void ixgbe_get_channels(struct net_device *dev,
static int ixgbe_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
unsigned int count = ch->combined_count;
u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
@@ -3327,7 +3444,7 @@ static int ixgbe_set_channels(struct net_device *dev,
static int ixgbe_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
u8 sff8472_rev, addr_mode;
bool page_swap = false;
@@ -3373,7 +3490,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
struct ethtool_eeprom *ee,
u8 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
int status = -EFAULT;
u8 databyte = 0xFF;
@@ -3469,7 +3586,7 @@ ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata)
static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
@@ -3483,7 +3600,7 @@ static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct ethtool_keee eee_data;
int ret_val;
@@ -3538,7 +3655,7 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
static u32 ixgbe_get_priv_flags(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
u32 priv_flags = 0;
if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
@@ -3555,7 +3672,7 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev)
static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
unsigned int flags2 = adapter->flags2;
unsigned int i;
@@ -3638,7 +3755,57 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_link_ksettings = ixgbe_set_link_ksettings,
};
+static const struct ethtool_ops ixgbe_ethtool_ops_e610 = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_drvinfo = ixgbe_get_drvinfo,
+ .get_regs_len = ixgbe_get_regs_len,
+ .get_regs = ixgbe_get_regs,
+ .get_wol = ixgbe_get_wol,
+ .set_wol = ixgbe_set_wol_e610,
+ .nway_reset = ixgbe_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = ixgbe_get_eeprom_len,
+ .get_eeprom = ixgbe_get_eeprom,
+ .set_eeprom = ixgbe_set_eeprom,
+ .get_ringparam = ixgbe_get_ringparam,
+ .set_ringparam = ixgbe_set_ringparam,
+ .get_pause_stats = ixgbe_get_pause_stats,
+ .get_pauseparam = ixgbe_get_pauseparam,
+ .set_pauseparam = ixgbe_set_pauseparam_e610,
+ .get_msglevel = ixgbe_get_msglevel,
+ .set_msglevel = ixgbe_set_msglevel,
+ .self_test = ixgbe_diag_test,
+ .get_strings = ixgbe_get_strings,
+ .set_phys_id = ixgbe_set_phys_id_e610,
+ .get_sset_count = ixgbe_get_sset_count,
+ .get_ethtool_stats = ixgbe_get_ethtool_stats,
+ .get_coalesce = ixgbe_get_coalesce,
+ .set_coalesce = ixgbe_set_coalesce,
+ .get_rxnfc = ixgbe_get_rxnfc,
+ .set_rxnfc = ixgbe_set_rxnfc,
+ .get_rxfh_indir_size = ixgbe_rss_indir_size,
+ .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
+ .get_rxfh = ixgbe_get_rxfh,
+ .set_rxfh = ixgbe_set_rxfh,
+ .get_eee = ixgbe_get_eee,
+ .set_eee = ixgbe_set_eee,
+ .get_channels = ixgbe_get_channels,
+ .set_channels = ixgbe_set_channels,
+ .get_priv_flags = ixgbe_get_priv_flags,
+ .set_priv_flags = ixgbe_set_priv_flags,
+ .get_ts_info = ixgbe_get_ts_info,
+ .get_module_info = ixgbe_get_module_info,
+ .get_module_eeprom = ixgbe_get_module_eeprom,
+ .get_link_ksettings = ixgbe_get_link_ksettings,
+ .set_link_ksettings = ixgbe_set_link_ksettings,
+};
+
void ixgbe_set_ethtool_ops(struct net_device *netdev)
{
- netdev->ethtool_ops = &ixgbe_ethtool_ops;
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ netdev->ethtool_ops = &ixgbe_ethtool_ops_e610;
+ else
+ netdev->ethtool_ops = &ixgbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 955dced844a9..7dcf6ecd157b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -56,7 +56,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
if (xid >= netdev->fcoe_ddp_xid)
return 0;
- adapter = netdev_priv(netdev);
+ adapter = ixgbe_from_netdev(netdev);
fcoe = &adapter->fcoe;
ddp = &fcoe->ddp[xid];
if (!ddp->udl)
@@ -153,7 +153,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
if (!netdev || !sgl)
return 0;
- adapter = netdev_priv(netdev);
+ adapter = ixgbe_from_netdev(netdev);
if (xid >= netdev->fcoe_ddp_xid) {
e_warn(drv, "xid=0x%x out-of-range\n", xid);
return 0;
@@ -834,7 +834,7 @@ static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
*/
int ixgbe_fcoe_enable(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
atomic_inc(&fcoe->refcnt);
@@ -881,7 +881,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
*/
int ixgbe_fcoe_disable(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
return -EINVAL;
@@ -927,7 +927,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
{
u16 prefix = 0xffff;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_mac_info *mac = &adapter->hw.mac;
switch (type) {
@@ -967,7 +967,7 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
struct netdev_fcoe_hbainfo *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u64 dsn;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c
new file mode 100644
index 000000000000..49d3b66add7e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2025 Intel Corporation. */
+
+#include <linux/crc32.h>
+#include <linux/pldmfw.h>
+#include <linux/uuid.h>
+
+#include "ixgbe.h"
+#include "ixgbe_fw_update.h"
+
+struct ixgbe_fwu_priv {
+ struct pldmfw context;
+
+ struct ixgbe_adapter *adapter;
+ struct netlink_ext_ack *extack;
+
+ /* Track which NVM banks to activate at the end of the update */
+ u8 activate_flags;
+ bool emp_reset_available;
+};
+
+/**
+ * ixgbe_send_package_data - Send record package data to firmware
+ * @context: PLDM fw update structure
+ * @data: pointer to the package data
+ * @length: length of the package data
+ *
+ * Send a copy of the package data associated with the PLDM record matching
+ * this device to the firmware.
+ *
+ * Note that this function sends an AdminQ command that will fail unless the
+ * NVM resource has been acquired.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int ixgbe_send_package_data(struct pldmfw *context,
+ const u8 *data, u16 length)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct ixgbe_adapter *adapter = priv->adapter;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 *package_data;
+ int err;
+
+ package_data = kmemdup(data, length, GFP_KERNEL);
+ if (!package_data)
+ return -ENOMEM;
+
+ err = ixgbe_nvm_set_pkg_data(hw, false, package_data, length);
+
+ kfree(package_data);
+
+ return err;
+}
+
+/**
+ * ixgbe_check_component_response - Report firmware response to a component
+ * @adapter: device private data structure
+ * @response: indicates whether this component can be updated
+ * @code: code indicating reason for response
+ * @extack: netlink extended ACK structure
+ *
+ * Check whether firmware indicates if this component can be updated. Report
+ * a suitable error message over the netlink extended ACK if the component
+ * cannot be updated.
+ *
+ * Return: 0 if the component can be updated, or -ECANCELED if the
+ * firmware indicates the component cannot be updated.
+ */
+static int ixgbe_check_component_response(struct ixgbe_adapter *adapter,
+ u8 response, u8 code,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (response) {
+ case IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED:
+ /* Firmware indicated this update is good to proceed. */
+ return 0;
+ case IXGBE_ACI_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware recommends not updating, as it may result in a downgrade. Continuing anyways");
+ return 0;
+ case IXGBE_ACI_NVM_PASS_COMP_CAN_NOT_BE_UPDATED:
+ NL_SET_ERR_MSG_MOD(extack, "Firmware has rejected updating.");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_PARTIAL_CHECK:
+ if (hw->mac.ops.fw_recovery_mode &&
+ hw->mac.ops.fw_recovery_mode(hw))
+ return 0;
+ break;
+ }
+
+ switch (code) {
+ case IXGBE_ACI_NVM_PASS_COMP_STAMP_IDENTICAL_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component comparison stamp is identical to running image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_STAMP_LOWER:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component comparison stamp is lower than running image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_INVALID_STAMP_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component comparison stamp is invalid");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_CONFLICT_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component table conflict occurred");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Component pre-requisites not met");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_NOT_SUPPORTED_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Component not supported");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Component cannot be downgraded");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Incomplete component image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component version is identical to running image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_VER_STR_LOWER_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component version is lower than the running image");
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Received unexpected response code from firmware");
+ break;
+ }
+
+ return -ECANCELED;
+}
+
+/**
+ * ixgbe_send_component_table - Send PLDM component table to firmware
+ * @context: PLDM fw update structure
+ * @component: the component to process
+ * @transfer_flag: relative transfer order of this component
+ *
+ * Read relevant data from the component and forward it to the device
+ * firmware. Check the response to determine if the firmware indicates that
+ * the update can proceed.
+ *
+ * This function sends ACI commands related to the NVM, and assumes that
+ * the NVM resource has been acquired.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_send_component_table(struct pldmfw *context,
+ struct pldmfw_component *component,
+ u8 transfer_flag)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct ixgbe_adapter *adapter = priv->adapter;
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ixgbe_aci_cmd_nvm_comp_tbl *comp_tbl;
+ u8 comp_response, comp_response_code;
+ struct ixgbe_hw *hw = &adapter->hw;
+ size_t length;
+ int err;
+
+ switch (component->identifier) {
+ case NVM_COMP_ID_OROM:
+ case NVM_COMP_ID_NVM:
+ case NVM_COMP_ID_NETLIST:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to update due to unknown firmware component");
+ return -EOPNOTSUPP;
+ }
+
+ length = struct_size(comp_tbl, cvs, component->version_len);
+ comp_tbl = kzalloc(length, GFP_KERNEL);
+ if (!comp_tbl)
+ return -ENOMEM;
+
+ comp_tbl->comp_class = cpu_to_le16(component->classification);
+ comp_tbl->comp_id = cpu_to_le16(component->identifier);
+ comp_tbl->comp_class_idx = FWU_COMP_CLASS_IDX_NOT_USE;
+ comp_tbl->comp_cmp_stamp = cpu_to_le32(component->comparison_stamp);
+ comp_tbl->cvs_type = component->version_type;
+ comp_tbl->cvs_len = component->version_len;
+
+ memcpy(comp_tbl->cvs, component->version_string,
+ component->version_len);
+
+ err = ixgbe_nvm_pass_component_tbl(hw, (u8 *)comp_tbl, length,
+ transfer_flag, &comp_response,
+ &comp_response_code);
+
+ kfree(comp_tbl);
+
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transfer component table to firmware");
+ return -EIO;
+ }
+
+ return ixgbe_check_component_response(adapter,
+ comp_response,
+ comp_response_code, extack);
+}
+
+/**
+ * ixgbe_write_one_nvm_block - Write an NVM block and await completion response
+ * @adapter: the PF data structure
+ * @module: the module to write to
+ * @offset: offset in bytes
+ * @block_size: size of the block to write, up to 4k
+ * @block: pointer to block of data to write
+ * @last_cmd: whether this is the last command
+ * @extack: netlink extended ACK structure
+ *
+ * Write a block of data to a flash module, and await for the completion
+ * response message from firmware.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * On successful return, reset level indicates the device reset required to
+ * complete the update.
+ *
+ * 0 - IXGBE_ACI_NVM_POR_FLAG - A full power on is required
+ * 1 - IXGBE_ACI_NVM_PERST_FLAG - A cold PCIe reset is required
+ * 2 - IXGBE_ACI_NVM_EMPR_FLAG - An EMP reset is required
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_write_one_nvm_block(struct ixgbe_adapter *adapter,
+ u16 module, u32 offset,
+ u16 block_size, u8 *block, bool last_cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ return ixgbe_aci_update_nvm(hw, module, offset, block_size, block,
+ last_cmd, 0);
+}
+
+/**
+ * ixgbe_write_nvm_module - Write data to an NVM module
+ * @adapter: the PF driver structure
+ * @module: the module id to program
+ * @component: the name of the component being updated
+ * @image: buffer of image data to write to the NVM
+ * @length: length of the buffer
+ * @extack: netlink extended ACK structure
+ *
+ * Loop over the data for a given NVM module and program it in 4 Kb
+ * blocks. Notify devlink core of progress after each block is programmed.
+ * Loops over a block of data and programs the NVM in 4k block chunks.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_write_nvm_module(struct ixgbe_adapter *adapter, u16 module,
+ const char *component, const u8 *image,
+ u32 length,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = adapter->devlink;
+ u32 offset = 0;
+ bool last_cmd;
+ u8 *block;
+ int err;
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component, 0, length);
+
+ block = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!block)
+ return -ENOMEM;
+
+ do {
+ u32 block_size;
+
+ block_size = min_t(u32, IXGBE_ACI_MAX_BUFFER_SIZE,
+ length - offset);
+ last_cmd = !(offset + block_size < length);
+
+ memcpy(block, image + offset, block_size);
+
+ err = ixgbe_write_one_nvm_block(adapter, module, offset,
+ block_size, block, last_cmd,
+ extack);
+ if (err)
+ break;
+
+ offset += block_size;
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component, offset, length);
+ } while (!last_cmd);
+
+ if (err)
+ devlink_flash_update_status_notify(devlink, "Flashing failed",
+ component, length, length);
+ else
+ devlink_flash_update_status_notify(devlink, "Flashing done",
+ component, length, length);
+
+ kfree(block);
+
+ return err;
+}
+
+/* Length in seconds to wait before timing out when erasing a flash module.
+ * Yes, erasing really can take minutes to complete.
+ */
+#define IXGBE_FW_ERASE_TIMEOUT 300
+
+/**
+ * ixgbe_erase_nvm_module - Erase an NVM module and await firmware completion
+ * @adapter: the PF data structure
+ * @module: the module to erase
+ * @component: name of the component being updated
+ * @extack: netlink extended ACK structure
+ *
+ * Erase the inactive NVM bank associated with this module, and await for
+ * a completion response message from firmware.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_erase_nvm_module(struct ixgbe_adapter *adapter, u16 module,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = adapter->devlink;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ devlink_flash_update_timeout_notify(devlink, "Erasing", component,
+ IXGBE_FW_ERASE_TIMEOUT);
+
+ err = ixgbe_aci_erase_nvm(hw, module);
+ if (err)
+ devlink_flash_update_status_notify(devlink, "Erasing failed",
+ component, 0, 0);
+ else
+ devlink_flash_update_status_notify(devlink, "Erasing done",
+ component, 0, 0);
+
+ return err;
+}
+
+/**
+ * ixgbe_switch_flash_banks - Tell firmware to switch NVM banks
+ * @adapter: Pointer to the PF data structure
+ * @activate_flags: flags used for the activation command
+ * @emp_reset_available: on return, indicates if EMP reset is available
+ * @extack: netlink extended ACK structure
+ *
+ * Notify firmware to activate the newly written flash banks, and wait for the
+ * firmware response.
+ *
+ * Return: 0 on success or an error code on failure.
+ */
+static int ixgbe_switch_flash_banks(struct ixgbe_adapter *adapter,
+ u8 activate_flags,
+ bool *emp_reset_available,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 response_flags;
+ int err;
+
+ err = ixgbe_nvm_write_activate(hw, activate_flags, &response_flags);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to switch active flash banks");
+ return err;
+ }
+
+ if (emp_reset_available) {
+ if (hw->dev_caps.common_cap.reset_restrict_support)
+ *emp_reset_available =
+ response_flags & IXGBE_ACI_NVM_EMPR_ENA;
+ else
+ *emp_reset_available = true;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_flash_component - Flash a component of the NVM
+ * @context: PLDM fw update structure
+ * @component: the component table to program
+ *
+ * Program the flash contents for a given component. First, determine the
+ * module id. Then, erase the secondary bank for this module. Finally, write
+ * the contents of the component to the NVM.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_flash_component(struct pldmfw *context,
+ struct pldmfw_component *component)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ixgbe_adapter *adapter = priv->adapter;
+ const char *name;
+ u16 module;
+ int err;
+ u8 flag;
+
+ switch (component->identifier) {
+ case NVM_COMP_ID_OROM:
+ module = IXGBE_E610_SR_1ST_OROM_BANK_PTR;
+ flag = IXGBE_ACI_NVM_ACTIV_SEL_OROM;
+ name = "fw.undi";
+ break;
+ case NVM_COMP_ID_NVM:
+ module = IXGBE_E610_SR_1ST_NVM_BANK_PTR;
+ flag = IXGBE_ACI_NVM_ACTIV_SEL_NVM;
+ name = "fw.mgmt";
+ break;
+ case NVM_COMP_ID_NETLIST:
+ module = IXGBE_E610_SR_NETLIST_BANK_PTR;
+ flag = IXGBE_ACI_NVM_ACTIV_SEL_NETLIST;
+ name = "fw.netlist";
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Mark this component for activating at the end. */
+ priv->activate_flags |= flag;
+
+ err = ixgbe_erase_nvm_module(adapter, module, name, extack);
+ if (err)
+ return err;
+
+ return ixgbe_write_nvm_module(adapter, module, name,
+ component->component_data,
+ component->component_size, extack);
+}
+
+/**
+ * ixgbe_finalize_update - Perform last steps to complete device update
+ * @context: PLDM fw update structure
+ *
+ * Called as the last step of the update process. Complete the update by
+ * telling the firmware to switch active banks, and perform a reset of
+ * configured.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+static int ixgbe_finalize_update(struct pldmfw *context)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct ixgbe_adapter *adapter = priv->adapter;
+ struct netlink_ext_ack *extack = priv->extack;
+ struct devlink *devlink = adapter->devlink;
+ int err;
+
+ /* Finally, notify firmware to activate the written NVM banks */
+ err = ixgbe_switch_flash_banks(adapter, priv->activate_flags,
+ &priv->emp_reset_available, extack);
+ if (err)
+ return err;
+
+ adapter->fw_emp_reset_disabled = !priv->emp_reset_available;
+
+ if (!adapter->fw_emp_reset_disabled)
+ devlink_flash_update_status_notify(devlink,
+ "Suggested is to activate new firmware by devlink reload, if it doesn't work then a power cycle is required",
+ NULL, 0, 0);
+
+ return 0;
+}
+
+static const struct pldmfw_ops ixgbe_fwu_ops_e610 = {
+ .match_record = &pldmfw_op_pci_match_record,
+ .send_package_data = &ixgbe_send_package_data,
+ .send_component_table = &ixgbe_send_component_table,
+ .flash_component = &ixgbe_flash_component,
+ .finalize_update = &ixgbe_finalize_update,
+};
+
+/**
+ * ixgbe_get_pending_updates - Check if the component has a pending update
+ * @adapter: the PF driver structure
+ * @pending: on return, bitmap of updates pending
+ * @extack: Netlink extended ACK
+ *
+ * Check if the device has any pending updates on any flash components.
+ *
+ * Return: 0 on success, or a negative error code on failure. Update
+ * pending with the bitmap of pending updates.
+ */
+int ixgbe_get_pending_updates(struct ixgbe_adapter *adapter, u8 *pending,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw_dev_caps *dev_caps;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL);
+ if (!dev_caps)
+ return -ENOMEM;
+
+ err = ixgbe_discover_dev_caps(hw, dev_caps);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to read device capabilities");
+ kfree(dev_caps);
+ return -EIO;
+ }
+
+ *pending = 0;
+
+ if (dev_caps->common_cap.nvm_update_pending_nvm)
+ *pending |= IXGBE_ACI_NVM_ACTIV_SEL_NVM;
+
+ if (dev_caps->common_cap.nvm_update_pending_orom)
+ *pending |= IXGBE_ACI_NVM_ACTIV_SEL_OROM;
+
+ if (dev_caps->common_cap.nvm_update_pending_netlist)
+ *pending |= IXGBE_ACI_NVM_ACTIV_SEL_NETLIST;
+
+ kfree(dev_caps);
+
+ return 0;
+}
+
+/**
+ * ixgbe_cancel_pending_update - Cancel any pending update for a component
+ * @adapter: the PF driver structure
+ * @component: if not NULL, the name of the component being updated
+ * @extack: Netlink extended ACK structure
+ *
+ * Cancel any pending update for the specified component. If component is
+ * NULL, all device updates will be canceled.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_cancel_pending_update(struct ixgbe_adapter *adapter,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = adapter->devlink;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 pending;
+ int err;
+
+ err = ixgbe_get_pending_updates(adapter, &pending, extack);
+ if (err)
+ return err;
+
+ /* If the flash_update request is for a specific component, ignore all
+ * of the other components.
+ */
+ if (component) {
+ if (strcmp(component, "fw.mgmt") == 0)
+ pending &= IXGBE_ACI_NVM_ACTIV_SEL_NVM;
+ else if (strcmp(component, "fw.undi") == 0)
+ pending &= IXGBE_ACI_NVM_ACTIV_SEL_OROM;
+ else if (strcmp(component, "fw.netlist") == 0)
+ pending &= IXGBE_ACI_NVM_ACTIV_SEL_NETLIST;
+ else
+ return -EINVAL;
+ }
+
+ /* There is no previous pending update, so this request may continue */
+ if (!pending)
+ return 0;
+
+ /* In order to allow overwriting a previous pending update, notify
+ * firmware to cancel that update by issuing the appropriate command.
+ */
+ devlink_flash_update_status_notify(devlink,
+ "Canceling previous pending update",
+ component, 0, 0);
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to acquire device flash lock");
+ return -EIO;
+ }
+
+ pending |= IXGBE_ACI_NVM_REVERT_LAST_ACTIV;
+ err = ixgbe_switch_flash_banks(adapter, pending, NULL, extack);
+
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_flash_pldm_image - Write a PLDM-formatted firmware image to the device
+ * @devlink: pointer to devlink associated with the device to update
+ * @params: devlink flash update parameters
+ * @extack: netlink extended ACK structure
+ *
+ * Parse the data for a given firmware file, verifying that it is a valid PLDM
+ * formatted image that matches this device.
+ *
+ * Extract the device record Package Data and Component Tables and send them
+ * to the firmware. Extract and write the flash data for each of the three
+ * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify
+ * firmware once the data is written to the inactive banks.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int ixgbe_flash_pldm_image(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct device *dev = &adapter->pdev->dev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fwu_priv priv;
+ u8 preservation;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+
+ switch (params->overwrite_mask) {
+ case 0:
+ /* preserve all settings and identifiers */
+ preservation = IXGBE_ACI_NVM_PRESERVE_ALL;
+ break;
+ case DEVLINK_FLASH_OVERWRITE_SETTINGS:
+ /* Overwrite settings, but preserve vital information such as
+ * device identifiers.
+ */
+ preservation = IXGBE_ACI_NVM_PRESERVE_SELECTED;
+ break;
+ case (DEVLINK_FLASH_OVERWRITE_SETTINGS |
+ DEVLINK_FLASH_OVERWRITE_IDENTIFIERS):
+ /* overwrite both settings and identifiers, preserve nothing */
+ preservation = IXGBE_ACI_NVM_NO_PRESERVATION;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested overwrite mask is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ /* Cannot get caps in recovery mode, so lack of nvm_unified_update bit
+ * cannot lead to error
+ */
+ if (!hw->dev_caps.common_cap.nvm_unified_update &&
+ (hw->mac.ops.fw_recovery_mode &&
+ !hw->mac.ops.fw_recovery_mode(hw))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Current firmware does not support unified update");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&priv, 0, sizeof(priv));
+
+ priv.context.ops = &ixgbe_fwu_ops_e610;
+ priv.context.dev = dev;
+ priv.extack = extack;
+ priv.adapter = adapter;
+ priv.activate_flags = preservation;
+
+ devlink_flash_update_status_notify(devlink,
+ "Preparing to flash", NULL, 0, 0);
+
+ err = ixgbe_cancel_pending_update(adapter, NULL, extack);
+ if (err)
+ return err;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to acquire device flash lock");
+ return -EIO;
+ }
+
+ err = pldmfw_flash_image(&priv.context, params->fw);
+ if (err == -ENOENT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image has no record matching this device");
+ } else if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to flash PLDM image");
+ }
+
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h
new file mode 100644
index 000000000000..abdd708c93df
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2025 Intel Corporation. */
+
+#ifndef _IXGBE_FW_UPDATE_H_
+#define _IXGBE_FW_UPDATE_H_
+
+int ixgbe_flash_pldm_image(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack);
+int ixgbe_get_pending_updates(struct ixgbe_adapter *adapter, u8 *pending,
+ struct netlink_ext_ack *extack);
+#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 07ea1954a276..d1f4073b36f9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -9,7 +9,7 @@
#define IXGBE_IPSEC_KEY_BITS 160
static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
-static void ixgbe_ipsec_del_sa(struct xfrm_state *xs);
+static void ixgbe_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs);
/**
* ixgbe_ipsec_set_tx_sa - set the Tx SA registers
@@ -321,7 +321,7 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
if (r->used) {
if (r->mode & IXGBE_RXTXMOD_VF)
- ixgbe_ipsec_del_sa(r->xs);
+ ixgbe_ipsec_del_sa(adapter->netdev, r->xs);
else
ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi,
r->key, r->salt,
@@ -330,7 +330,7 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
if (t->used) {
if (t->mode & IXGBE_RXTXMOD_VF)
- ixgbe_ipsec_del_sa(t->xs);
+ ixgbe_ipsec_del_sa(adapter->netdev, t->xs);
else
ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt);
}
@@ -417,6 +417,7 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
/**
* ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
+ * @dev: pointer to net device
* @xs: pointer to xfrm_state struct
* @mykey: pointer to key array to populate
* @mysalt: pointer to salt value to populate
@@ -424,10 +425,10 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
* This copies the protocol keys and salt to our own data tables. The
* 82599 family only supports the one algorithm.
**/
-static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
+static int ixgbe_ipsec_parse_proto_keys(struct net_device *dev,
+ struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
- struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -473,12 +474,13 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
/**
* ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters
+ * @dev: pointer to net device
* @xs: pointer to transformer state struct
**/
-static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
+static int ixgbe_ipsec_check_mgmt_ip(struct net_device *dev,
+ struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.real_dev;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
u32 mfval, manc, reg;
int num_filters = 4;
@@ -556,14 +558,15 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
/**
* ixgbe_ipsec_add_sa - program device with a security association
+ * @dev: pointer to device to program
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
**/
-static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
+static int ixgbe_ipsec_add_sa(struct net_device *dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
- struct net_device *dev = xs->xso.real_dev;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct ixgbe_hw *hw = &adapter->hw;
int checked, match, first;
@@ -581,7 +584,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
return -EINVAL;
}
- if (ixgbe_ipsec_check_mgmt_ip(xs)) {
+ if (ixgbe_ipsec_check_mgmt_ip(dev, xs)) {
NL_SET_ERR_MSG_MOD(extack, "IPsec IP addr clash with mgmt filters");
return -EINVAL;
}
@@ -615,7 +618,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
rsa.decrypt = xs->ealg || xs->aead;
/* get the key and salt */
- ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
+ ret = ixgbe_ipsec_parse_proto_keys(dev, xs, rsa.key, &rsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");
return ret;
@@ -724,7 +727,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
if (xs->id.proto & IPPROTO_ESP)
tsa.encrypt = xs->ealg || xs->aead;
- ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
+ ret = ixgbe_ipsec_parse_proto_keys(dev, xs, tsa.key, &tsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");
memset(&tsa, 0, sizeof(tsa));
@@ -752,12 +755,12 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
/**
* ixgbe_ipsec_del_sa - clear out this specific SA
+ * @dev: pointer to device to program
* @xs: pointer to transformer state struct
**/
-static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
+static void ixgbe_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.real_dev;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct ixgbe_hw *hw = &adapter->hw;
u32 zerobuf[4] = {0, 0, 0, 0};
@@ -841,7 +844,8 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
continue;
if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
ipsec->rx_tbl[i].vf == vf)
- ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs);
+ ixgbe_ipsec_del_sa(adapter->netdev,
+ ipsec->rx_tbl[i].xs);
}
/* search tx sa table */
@@ -850,7 +854,8 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
continue;
if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
ipsec->tx_tbl[i].vf == vf)
- ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs);
+ ixgbe_ipsec_del_sa(adapter->netdev,
+ ipsec->tx_tbl[i].xs);
}
}
@@ -930,7 +935,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name));
/* set up the HW offload */
- err = ixgbe_ipsec_add_sa(xs, NULL);
+ err = ixgbe_ipsec_add_sa(adapter->netdev, xs, NULL);
if (err)
goto err_aead;
@@ -1034,7 +1039,7 @@ int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
xs = ipsec->tx_tbl[sa_idx].xs;
}
- ixgbe_ipsec_del_sa(xs);
+ ixgbe_ipsec_del_sa(adapter->netdev, xs);
/* remove the xs that was made-up in the add request */
kfree_sensitive(xs);
@@ -1052,7 +1057,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
struct ixgbe_ipsec_tx_data *itd)
{
- struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(tx_ring->netdev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct xfrm_state *xs;
struct sec_path *sp;
@@ -1142,7 +1147,7 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(rx_ring->netdev);
__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
__le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index a2718218963e..cba860f0e1f1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -49,6 +49,7 @@
#include "ixgbe_sriov.h"
#include "ixgbe_model.h"
#include "ixgbe_txrx_common.h"
+#include "devlink/devlink.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
@@ -1095,7 +1096,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
static int ixgbe_tx_maxrate(struct net_device *netdev,
int queue_index, u32 maxrate)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 bcnrc_val = ixgbe_link_mbps(adapter);
@@ -4678,7 +4679,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
/* add VID to filter table */
@@ -4737,7 +4738,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
/* remove VID from filter table */
@@ -4962,7 +4963,7 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
**/
static int ixgbe_write_mc_addr_list(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!netif_running(netdev))
@@ -5138,7 +5139,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int ret;
ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
@@ -5148,7 +5149,7 @@ static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
@@ -5166,7 +5167,7 @@ static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
**/
void ixgbe_set_rx_mode(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
netdev_features_t features = netdev->features;
@@ -5268,7 +5269,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
struct udp_tunnel_info ti;
@@ -6600,7 +6601,7 @@ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
**/
static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* Do the reset outside of interrupt context */
ixgbe_tx_timeout_reset(adapter);
@@ -6849,7 +6850,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
/* initialize eeprom parameters */
- if (ixgbe_init_eeprom_params_generic(hw)) {
+ if (hw->eeprom.ops.init_params(hw)) {
e_dev_err("EEPROM initialization failed\n");
return -EIO;
}
@@ -7165,7 +7166,7 @@ static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
**/
static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (ixgbe_enabled_xdp_adapter(adapter)) {
int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD;
@@ -7212,7 +7213,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
**/
int ixgbe_open(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
int err, queues;
@@ -7316,7 +7317,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
**/
int ixgbe_close(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
ixgbe_ptp_stop(adapter);
@@ -8309,7 +8310,8 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
**/
static void ixgbe_service_timer(struct timer_list *t)
{
- struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
+ struct ixgbe_adapter *adapter = timer_container_of(adapter, t,
+ service_timer);
unsigned long next_event_offset;
/* poll faster when waiting for link */
@@ -8364,6 +8366,34 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
rtnl_unlock();
}
+static int ixgbe_check_fw_api_mismatch(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return 0;
+
+ if (hw->mac.ops.get_fw_ver && hw->mac.ops.get_fw_ver(hw))
+ return 0;
+
+ if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) {
+ e_dev_err("The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
+
+ adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH;
+ return -EOPNOTSUPP;
+ } else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR &&
+ hw->api_min_ver > IXGBE_FW_API_VER_MINOR + IXGBE_FW_API_VER_DIFF_ALLOWED) {
+ e_dev_info("The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH;
+ } else if (hw->api_maj_ver < IXGBE_FW_API_VER_MAJOR ||
+ hw->api_min_ver < IXGBE_FW_API_VER_MINOR - IXGBE_FW_API_VER_DIFF_ALLOWED) {
+ e_dev_info("The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH;
+ }
+
+ return 0;
+}
+
/**
* ixgbe_check_fw_error - Check firmware for errors
* @adapter: the adapter private structure
@@ -8374,12 +8404,14 @@ static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 fwsm;
+ int err;
/* read fwsm.ext_err_ind register and log errors */
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+ /* skip if E610's FW is reloading, warning in that case may be misleading */
if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
- !(fwsm & IXGBE_FWSM_FW_VAL_BIT))
+ (!(fwsm & IXGBE_FWSM_FW_VAL_BIT) && !(hw->mac.type == ixgbe_mac_e610)))
e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
fwsm);
@@ -8387,10 +8419,53 @@ static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
return true;
}
+ if (!(adapter->flags2 & IXGBE_FLAG2_API_MISMATCH)) {
+ err = ixgbe_check_fw_api_mismatch(adapter);
+ if (err)
+ return true;
+ }
+
+ /* return here if FW rollback mode has been already detected */
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ROLLBACK)
+ return false;
+
+ if (hw->mac.ops.fw_rollback_mode && hw->mac.ops.fw_rollback_mode(hw)) {
+ struct ixgbe_nvm_info *nvm_info = &adapter->hw.flash.nvm;
+ char ver_buff[64] = "";
+
+ if (hw->mac.ops.get_fw_ver && hw->mac.ops.get_fw_ver(hw))
+ goto no_version;
+
+ if (hw->mac.ops.get_nvm_ver &&
+ hw->mac.ops.get_nvm_ver(hw, nvm_info))
+ goto no_version;
+
+ snprintf(ver_buff, sizeof(ver_buff),
+ "Current version is NVM:%x.%x.%x, FW:%d.%d. ",
+ nvm_info->major, nvm_info->minor, nvm_info->eetrack,
+ hw->fw_maj_ver, hw->fw_maj_ver);
+no_version:
+ e_dev_warn("Firmware rollback mode detected. %sDevice may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode.",
+ ver_buff);
+
+ adapter->flags2 |= IXGBE_FLAG2_FW_ROLLBACK;
+ }
return false;
}
+static void ixgbe_recovery_service_task(struct work_struct *work)
+{
+ struct ixgbe_adapter *adapter = container_of(work,
+ struct ixgbe_adapter,
+ service_task);
+
+ ixgbe_handle_fw_event(adapter);
+ ixgbe_service_event_complete(adapter);
+
+ mod_timer(&adapter->service_timer, jiffies + msecs_to_jiffies(100));
+}
+
/**
* ixgbe_service_task - manages and runs subtasks
* @work: pointer to work_struct containing our data
@@ -8410,8 +8485,13 @@ static void ixgbe_service_task(struct work_struct *work)
return;
}
if (ixgbe_check_fw_error(adapter)) {
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ if (adapter->mii_bus) {
+ mdiobus_unregister(adapter->mii_bus);
+ adapter->mii_bus = NULL;
+ }
unregister_netdev(adapter->netdev);
+ }
ixgbe_service_event_complete(adapter);
return;
}
@@ -9001,7 +9081,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_FCOE):
case htons(ETH_P_FIP):
- adapter = netdev_priv(dev);
+ adapter = ixgbe_from_netdev(dev);
if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
break;
@@ -9260,7 +9340,7 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
struct net_device *netdev,
struct ixgbe_ring *ring)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_ring *tx_ring;
/*
@@ -9292,7 +9372,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
**/
static int ixgbe_set_mac(struct net_device *netdev, void *p)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
@@ -9310,7 +9390,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
static int
ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u16 value;
int rc;
@@ -9336,7 +9416,7 @@ ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
u16 addr, u16 value)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (adapter->mii_bus) {
@@ -9356,7 +9436,7 @@ static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
switch (cmd) {
case SIOCSHWTSTAMP:
@@ -9382,7 +9462,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
static int ixgbe_add_sanmac_netdev(struct net_device *dev)
{
int err = 0;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
if (is_valid_ether_addr(hw->mac.san_addr)) {
@@ -9406,7 +9486,7 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
static int ixgbe_del_sanmac_netdev(struct net_device *dev)
{
int err = 0;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_mac_info *mac = &adapter->hw.mac;
if (is_valid_ether_addr(mac->san_addr)) {
@@ -9437,7 +9517,7 @@ static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
static void ixgbe_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int i;
rcu_read_lock();
@@ -9480,7 +9560,7 @@ static void ixgbe_get_stats64(struct net_device *netdev,
static int ixgbe_ndo_get_vf_stats(struct net_device *netdev, int vf,
struct ifla_vf_stats *vf_stats)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (vf < 0 || vf >= adapter->num_vfs)
return -EINVAL;
@@ -9597,7 +9677,7 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev,
static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct netdev_nested_priv priv = {
.data = (void *)adapter,
};
@@ -9618,7 +9698,7 @@ static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
*/
int ixgbe_setup_tc(struct net_device *dev, u8 tc)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
/* Hardware supports up to 8 traffic classes */
@@ -10176,7 +10256,7 @@ static LIST_HEAD(ixgbe_block_cb_list);
static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
switch (type) {
case TC_SETUP_BLOCK:
@@ -10204,7 +10284,7 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
#endif
void ixgbe_do_reset(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -10215,7 +10295,7 @@ void ixgbe_do_reset(struct net_device *netdev)
static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
netdev_features_t features)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
if (!(features & NETIF_F_RXCSUM))
@@ -10252,7 +10332,7 @@ static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
static int ixgbe_set_features(struct net_device *netdev,
netdev_features_t features)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
netdev_features_t changed = netdev->features ^ features;
bool need_reset = false;
@@ -10328,7 +10408,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
{
/* guarantee we can provide a unique filter for the unicast address */
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
u16 pool = VMDQ_P(0);
if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
@@ -10416,7 +10496,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh, u16 flags,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct nlattr *attr, *br_spec;
int rem;
@@ -10444,7 +10524,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
u32 filter_mask, int nlflags)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return 0;
@@ -10456,7 +10536,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(pdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(pdev);
struct ixgbe_fwd_adapter *accel;
int tcs = adapter->hw_tcs ? : 1;
int pool, err;
@@ -10553,7 +10633,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
{
struct ixgbe_fwd_adapter *accel = priv;
- struct ixgbe_adapter *adapter = netdev_priv(pdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(pdev);
unsigned int rxbase = accel->rx_base_queue;
unsigned int i;
@@ -10631,7 +10711,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
{
int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct bpf_prog *old_prog;
bool need_reset;
int num_queues;
@@ -10703,7 +10783,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -10738,7 +10818,7 @@ void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring)
static int ixgbe_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ring *ring;
int nxmit = 0;
int i;
@@ -11146,7 +11226,7 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
* format to display. The FW version is taken from the EEPROM/NVM.
*
*/
-static void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter)
+void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter)
{
struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
@@ -11197,6 +11277,66 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_recovery_probe - Handle FW recovery mode during probe
+ * @adapter: the adapter private structure
+ *
+ * Perform limited driver initialization when FW error is detected.
+ *
+ * Return: 0 on successful probe for E610, -EIO if recovery mode is detected
+ * for non-E610 adapter, error status code on any other case.
+ */
+static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool disable_dev;
+ int err = -EIO;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ goto clean_up_probe;
+
+ ixgbe_get_hw_control(adapter);
+ mutex_init(&hw->aci.lock);
+ err = ixgbe_get_flash_data(&adapter->hw);
+ if (err)
+ goto shutdown_aci;
+
+ timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
+ INIT_WORK(&adapter->service_task, ixgbe_recovery_service_task);
+ set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
+ clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
+
+ if (hw->mac.ops.get_bus_info)
+ hw->mac.ops.get_bus_info(hw);
+
+ pci_set_drvdata(pdev, adapter);
+ /* We are creating devlink interface so NIC can be managed,
+ * e.g. new NVM image loaded
+ */
+ devl_lock(adapter->devlink);
+ ixgbe_devlink_register_port(adapter);
+ SET_NETDEV_DEVLINK_PORT(adapter->netdev,
+ &adapter->devlink_port);
+ ixgbe_devlink_init_regions(adapter);
+ devl_register(adapter->devlink);
+ devl_unlock(adapter->devlink);
+
+ return 0;
+shutdown_aci:
+ mutex_destroy(&adapter->hw.aci.lock);
+ ixgbe_release_hw_control(adapter);
+ devlink_free(adapter->devlink);
+clean_up_probe:
+ disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
+ free_netdev(netdev);
+ pci_release_mem_regions(pdev);
+ if (disable_dev)
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in ixgbe_pci_tbl
@@ -11210,6 +11350,7 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
+ struct ixgbe_netdevice_priv *netdev_priv_wrapper;
struct ixgbe_adapter *adapter = NULL;
struct ixgbe_hw *hw;
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
@@ -11263,7 +11404,13 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
indices = IXGBE_MAX_RSS_INDICES_X550;
}
- netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
+ adapter = ixgbe_allocate_devlink(&pdev->dev);
+ if (IS_ERR(adapter)) {
+ err = PTR_ERR(adapter);
+ goto err_devlink;
+ }
+
+ netdev = alloc_etherdev_mq(sizeof(*netdev_priv_wrapper), indices);
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
@@ -11271,7 +11418,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(netdev, &pdev->dev);
- adapter = netdev_priv(netdev);
+ netdev_priv_wrapper = netdev_priv(netdev);
+ netdev_priv_wrapper->adapter = adapter;
adapter->netdev = netdev;
adapter->pdev = pdev;
@@ -11287,11 +11435,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_ioremap;
}
- netdev->netdev_ops = &ixgbe_netdev_ops;
- ixgbe_set_ethtool_ops(netdev);
- netdev->watchdog_timeo = 5 * HZ;
- strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
-
/* Setup hw api */
hw->mac.ops = *ii->mac_ops;
hw->mac.type = ii->mac;
@@ -11321,15 +11464,31 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->phy.mdio.mdio_read = ixgbe_mdio_read;
hw->phy.mdio.mdio_write = ixgbe_mdio_write;
+ netdev->netdev_ops = &ixgbe_netdev_ops;
+ ixgbe_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+
/* setup the private structure */
err = ixgbe_sw_init(adapter, ii);
if (err)
goto err_sw_init;
+ /* Make sure the SWFW semaphore is in a valid state */
+ if (hw->mac.ops.init_swfw_sync)
+ hw->mac.ops.init_swfw_sync(hw);
+
+ if (ixgbe_check_fw_error(adapter))
+ return ixgbe_recovery_probe(adapter);
+
if (adapter->hw.mac.type == ixgbe_mac_e610) {
err = ixgbe_get_caps(&adapter->hw);
if (err)
dev_err(&pdev->dev, "ixgbe_get_caps failed %d\n", err);
+
+ err = ixgbe_get_flash_data(&adapter->hw);
+ if (err)
+ goto err_sw_init;
}
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
@@ -11348,10 +11507,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- /* Make sure the SWFW semaphore is in a valid state */
- if (hw->mac.ops.init_swfw_sync)
- hw->mac.ops.init_swfw_sync(hw);
-
/* Make it possible the adapter to be woken up via WOL */
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
@@ -11504,11 +11659,6 @@ skip_sriov:
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
netdev->features |= NETIF_F_LRO;
- if (ixgbe_check_fw_error(adapter)) {
- err = -EIO;
- goto err_sw_init;
- }
-
/* make sure the EEPROM is good */
if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
e_dev_err("The EEPROM Checksum Is Not Valid\n");
@@ -11591,7 +11741,7 @@ skip_sriov:
if (expected_gts > 0)
ixgbe_check_minimum_link(adapter, expected_gts);
- err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
+ err = hw->eeprom.ops.read_pba_string(hw, part_str, sizeof(part_str));
if (err)
strscpy(part_str, "Unknown", sizeof(part_str));
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
@@ -11617,6 +11767,11 @@ skip_sriov:
}
strcpy(netdev->name, "eth%d");
pci_set_drvdata(pdev, adapter);
+
+ devl_lock(adapter->devlink);
+ ixgbe_devlink_register_port(adapter);
+ SET_NETDEV_DEVLINK_PORT(adapter->netdev, &adapter->devlink_port);
+
err = register_netdev(netdev);
if (err)
goto err_register;
@@ -11671,11 +11826,16 @@ skip_sriov:
if (err)
goto err_netdev;
+ ixgbe_devlink_init_regions(adapter);
+ devl_register(adapter->devlink);
+ devl_unlock(adapter->devlink);
return 0;
err_netdev:
unregister_netdev(netdev);
err_register:
+ devl_port_unregister(&adapter->devlink_port);
+ devl_unlock(adapter->devlink);
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
if (hw->mac.type == ixgbe_mac_e610)
@@ -11692,7 +11852,9 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
+ devlink_free(adapter->devlink);
pci_release_mem_regions(pdev);
+err_devlink:
err_pci_reg:
err_dma:
if (!adapter || disable_dev)
@@ -11721,6 +11883,9 @@ static void ixgbe_remove(struct pci_dev *pdev)
return;
netdev = adapter->netdev;
+ devl_lock(adapter->devlink);
+ devl_unregister(adapter->devlink);
+ ixgbe_devlink_destroy_regions(adapter);
ixgbe_dbg_adapter_exit(adapter);
set_bit(__IXGBE_REMOVING, &adapter->state);
@@ -11756,6 +11921,10 @@ static void ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ devl_port_unregister(&adapter->devlink_port);
+ devl_unlock(adapter->devlink);
+ devlink_free(adapter->devlink);
+
ixgbe_stop_ipsec_offload(adapter);
ixgbe_clear_interrupt_scheme(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 0a03a8bb5f88..2d54828bdfbb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -167,7 +167,7 @@ int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- int max_retry = 1;
+ int max_retry = 3;
int retry = 0;
u8 reg_high;
u8 csum;
@@ -2285,7 +2285,7 @@ static int ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- u32 max_retry = 1;
+ u32 max_retry = 3;
u32 retry = 0;
int status;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index ccdce80edd14..0dbbd2befd4d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1418,7 +1418,7 @@ void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int retval;
if (vf >= adapter->num_vfs)
@@ -1526,7 +1526,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
u8 qos, __be16 vlan_proto)
{
int err = 0;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
@@ -1644,7 +1644,7 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int max_tx_rate)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int link_speed;
/* verify VF is active */
@@ -1679,7 +1679,7 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (vf >= adapter->num_vfs)
@@ -1757,7 +1757,7 @@ void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state)
**/
int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int ret = 0;
if (vf < 0 || vf >= adapter->num_vfs) {
@@ -1794,7 +1794,7 @@ int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
bool setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* This operation is currently supported only for 82599 and x540
* devices.
@@ -1813,7 +1813,7 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (vf >= adapter->num_vfs)
return -EINVAL;
@@ -1836,7 +1836,7 @@ int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (vf >= adapter->num_vfs)
return -EINVAL;
ivi->vf = vf;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 5fdf32d79d82..892fa6c1f879 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3446,6 +3446,8 @@ struct ixgbe_eeprom_operations {
int (*validate_checksum)(struct ixgbe_hw *, u16 *);
int (*update_checksum)(struct ixgbe_hw *);
int (*calc_checksum)(struct ixgbe_hw *);
+ int (*read_pba_string)(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
};
struct ixgbe_mac_operations {
@@ -3454,6 +3456,7 @@ struct ixgbe_mac_operations {
int (*start_hw)(struct ixgbe_hw *);
int (*clear_hw_cntrs)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ int (*get_fw_ver)(struct ixgbe_hw *hw);
int (*get_mac_addr)(struct ixgbe_hw *, u8 *);
int (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
int (*get_device_caps)(struct ixgbe_hw *, u16 *);
@@ -3522,6 +3525,8 @@ struct ixgbe_mac_operations {
int (*get_thermal_sensor_data)(struct ixgbe_hw *);
int (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
+ bool (*fw_rollback_mode)(struct ixgbe_hw *hw);
+ int (*get_nvm_ver)(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
void (*disable_rx)(struct ixgbe_hw *hw);
void (*enable_rx)(struct ixgbe_hw *hw);
void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
index 617e07878e4f..09df67f03cf4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
@@ -10,11 +10,75 @@
#define IXGBE_MAX_VSI 768
/* Checksum and Shadow RAM pointers */
-#define E610_SR_SW_CHECKSUM_WORD 0x3F
+#define IXGBE_E610_SR_NVM_CTRL_WORD 0x00
+#define IXGBE_E610_SR_PBA_BLOCK_PTR 0x16
+#define IXGBE_E610_SR_PBA_BLOCK_MASK GENMASK(15, 8)
+#define IXGBE_E610_SR_NVM_DEV_STARTER_VER 0x18
+#define IXGBE_E610_SR_NVM_EETRACK_LO 0x2D
+#define IXGBE_E610_SR_NVM_EETRACK_HI 0x2E
+#define IXGBE_E610_NVM_VER_LO_MASK GENMASK(7, 0)
+#define IXGBE_E610_NVM_VER_HI_MASK GENMASK(15, 12)
+#define IXGBE_E610_SR_SW_CHECKSUM_WORD 0x3F
+#define IXGBE_E610_SR_PFA_PTR 0x40
+#define IXGBE_E610_SR_1ST_NVM_BANK_PTR 0x42
+#define IXGBE_E610_SR_NVM_BANK_SIZE 0x43
+#define IXGBE_E610_SR_1ST_OROM_BANK_PTR 0x44
+#define IXGBE_E610_SR_OROM_BANK_SIZE 0x45
+#define IXGBE_E610_SR_NETLIST_BANK_PTR 0x46
+#define IXGBE_E610_SR_NETLIST_BANK_SIZE 0x47
+
+/* The OROM version topology */
+#define IXGBE_OROM_VER_PATCH_MASK GENMASK_ULL(7, 0)
+#define IXGBE_OROM_VER_BUILD_MASK GENMASK_ULL(23, 8)
+#define IXGBE_OROM_VER_MASK GENMASK_ULL(31, 24)
+
+/* CSS Header words */
+#define IXGBE_NVM_CSS_HDR_LEN_L 0x02
+#define IXGBE_NVM_CSS_HDR_LEN_H 0x03
+#define IXGBE_NVM_CSS_SREV_L 0x14
+#define IXGBE_NVM_CSS_SREV_H 0x15
+
+#define IXGBE_HDR_LEN_ROUNDUP 32
+
+/* Length of Authentication header section in words */
+#define IXGBE_NVM_AUTH_HEADER_LEN 0x08
/* Shadow RAM related */
#define IXGBE_SR_WORDS_IN_1KB 512
+/* The Netlist ID Block is located after all of the Link Topology nodes. */
+#define IXGBE_NETLIST_ID_BLK_SIZE 0x30
+#define IXGBE_NETLIST_ID_BLK_OFFSET(n) IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n))
+
+/* netlist ID block field offsets (word offsets) */
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05
+#define IXGBE_NETLIST_ID_BLK_TYPE_LOW 0x06
+#define IXGBE_NETLIST_ID_BLK_TYPE_HIGH 0x07
+#define IXGBE_NETLIST_ID_BLK_REV_LOW 0x08
+#define IXGBE_NETLIST_ID_BLK_REV_HIGH 0x09
+#define IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n))
+#define IXGBE_NETLIST_ID_BLK_CUST_VER 0x2F
+
+/* The Link Topology Netlist section is stored as a series of words. It is
+ * stored in the NVM as a TLV, with the first two words containing the type
+ * and length.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_MOD_ID 0x011B
+#define IXGBE_NETLIST_TYPE_OFFSET 0x0000
+#define IXGBE_NETLIST_LEN_OFFSET 0x0001
+
+/* The Link Topology section follows the TLV header. When reading the netlist
+ * using ixgbe_read_netlist_module, we need to account for the 2-word TLV
+ * header.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2)
+#define IXGBE_LINK_TOPO_MODULE_LEN IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0000)
+#define IXGBE_LINK_TOPO_NODE_COUNT IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0001)
+#define IXGBE_LINK_TOPO_NODE_COUNT_M GENMASK_ULL(9, 0)
+
/* Firmware Status Register (GL_FWSTS) */
#define GL_FWSTS 0x00083048 /* Reset Source: POR */
#define GL_FWSTS_EP_PF0 BIT(24)
@@ -24,11 +88,23 @@
#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */
#define GLNVM_GENS_SR_SIZE_M GENMASK(7, 5)
+#define IXGBE_GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */
+#define IXGBE_GL_MNG_FWSM_RECOVERY_M BIT(1)
+#define IXGBE_GL_MNG_FWSM_ROLLBACK_M BIT(2)
+
/* Flash Access Register */
#define IXGBE_GLNVM_FLA 0x000B6108 /* Reset Source: POR */
#define IXGBE_GLNVM_FLA_LOCKED_S 6
#define IXGBE_GLNVM_FLA_LOCKED_M BIT(6)
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define IXGBE_SR_CTRL_WORD_1_M GENMASK(7, 6)
+#define IXGBE_SR_CTRL_WORD_VALID BIT(0)
+#define IXGBE_SR_CTRL_WORD_OROM_BANK BIT(3)
+#define IXGBE_SR_CTRL_WORD_NETLIST_BANK BIT(4)
+#define IXGBE_SR_CTRL_WORD_NVM_BANK BIT(5)
+#define IXGBE_SR_NVM_PTR_4KB_UNITS BIT(15)
+
/* Admin Command Interface (ACI) registers */
#define IXGBE_PF_HIDA(_i) (0x00085000 + ((_i) * 4))
#define IXGBE_PF_HIDA_2(_i) (0x00085020 + ((_i) * 4))
@@ -40,6 +116,10 @@
#define IXGBE_PF_HICR_SV BIT(2)
#define IXGBE_PF_HICR_EV BIT(3)
+#define IXGBE_FW_API_VER_MAJOR 0x01
+#define IXGBE_FW_API_VER_MINOR 0x07
+#define IXGBE_FW_API_VER_DIFF_ALLOWED 0x02
+
#define IXGBE_ACI_DESC_SIZE 32
#define IXGBE_ACI_DESC_SIZE_IN_DWORDS (IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD)
@@ -143,6 +223,7 @@ enum ixgbe_aci_opc {
ixgbe_aci_opc_write_mdio = 0x06E5,
ixgbe_aci_opc_set_gpio_by_func = 0x06E6,
ixgbe_aci_opc_get_gpio_by_func = 0x06E7,
+ ixgbe_aci_opc_set_port_id_led = 0x06E9,
ixgbe_aci_opc_set_gpio = 0x06EC,
ixgbe_aci_opc_get_gpio = 0x06ED,
ixgbe_aci_opc_sff_eeprom = 0x06EE,
@@ -728,6 +809,18 @@ struct ixgbe_aci_cmd_get_link_topo_pin {
u8 rsvd[7];
};
+/* Set Port Identification LED (direct, 0x06E9) */
+struct ixgbe_aci_cmd_set_port_id_led {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 ident_mode;
+ u8 rsvd[13];
+};
+
+#define IXGBE_ACI_PORT_ID_PORT_NUM_VALID BIT(0)
+#define IXGBE_ACI_PORT_IDENT_LED_ORIG 0
+#define IXGBE_ACI_PORT_IDENT_LED_BLINK BIT(0)
+
/* Read/Write SFF EEPROM command (indirect 0x06EE) */
struct ixgbe_aci_cmd_sff_eeprom {
u8 lport_num;
@@ -761,6 +854,8 @@ struct ixgbe_aci_cmd_nvm {
#define IXGBE_ACI_NVM_MAX_OFFSET 0xFFFFFF
__le16 offset_low;
u8 offset_high; /* For Write Activate offset_high is used as flags2 */
+#define IXGBE_ACI_NVM_OFFSET_HI_A_MASK GENMASK(15, 8)
+#define IXGBE_ACI_NVM_OFFSET_HI_U_MASK GENMASK(23, 16)
u8 cmd_flags;
#define IXGBE_ACI_NVM_LAST_CMD BIT(0)
#define IXGBE_ACI_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */
@@ -776,6 +871,9 @@ struct ixgbe_aci_cmd_nvm {
#define IXGBE_ACI_NVM_PERST_FLAG 1
#define IXGBE_ACI_NVM_EMPR_FLAG 2
#define IXGBE_ACI_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+#define IXGBE_ACI_NVM_NO_PRESERVATION 0x0
+#define IXGBE_ACI_NVM_PRESERVE_SELECTED 0x6
+
/* For Write Activate, several flags are sent as part of a separate
* flags2 field using a separate byte. For simplicity of the software
* interface, we pass the flags as a 16 bit value so these flags are
@@ -805,6 +903,63 @@ struct ixgbe_aci_cmd_nvm_checksum {
u8 rsvd2[12];
};
+/* Used for NVM Set Package Data command - 0x070A */
+struct ixgbe_aci_cmd_nvm_pkg_data {
+ u8 reserved[3];
+ u8 cmd_flags;
+#define IXGBE_ACI_NVM_PKG_DELETE BIT(0) /* used for command call */
+
+ u32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Used for Pass Component Table command - 0x070B */
+struct ixgbe_aci_cmd_nvm_pass_comp_tbl {
+ u8 component_response; /* Response only */
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED 0x0
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2
+#define IXGBE_ACI_NVM_PASS_COMP_PARTIAL_CHECK 0x3
+ u8 component_response_code; /* Response only */
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0
+#define IXGBE_ACI_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1
+#define IXGBE_ACI_NVM_PASS_COMP_STAMP_LOWER 0x2
+#define IXGBE_ACI_NVM_PASS_COMP_INVALID_STAMP_CODE 0x3
+#define IXGBE_ACI_NVM_PASS_COMP_CONFLICT_CODE 0x4
+#define IXGBE_ACI_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE 0x5
+#define IXGBE_ACI_NVM_PASS_COMP_NOT_SUPPORTED_CODE 0x6
+#define IXGBE_ACI_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE 0x7
+#define IXGBE_ACI_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE 0x8
+#define IXGBE_ACI_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE 0xA
+#define IXGBE_ACI_NVM_PASS_COMP_VER_STR_LOWER_CODE 0xB
+ u8 reserved;
+ u8 transfer_flag;
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ixgbe_aci_cmd_nvm_comp_tbl {
+ __le16 comp_class;
+#define NVM_COMP_CLASS_ALL_FW 0x000A
+
+ __le16 comp_id;
+#define NVM_COMP_ID_OROM 0x5
+#define NVM_COMP_ID_NVM 0x6
+#define NVM_COMP_ID_NETLIST 0x8
+
+ u8 comp_class_idx;
+#define FWU_COMP_CLASS_IDX_NOT_USE 0x0
+
+ __le32 comp_cmp_stamp;
+ u8 cvs_type;
+#define NVM_CVS_TYPE_ASCII 0x1
+
+ u8 cvs_len;
+ u8 cvs[]; /* Component Version String */
+} __packed;
+
/**
* struct ixgbe_aci_desc - Admin Command (AC) descriptor
* @flags: IXGBE_ACI_FLAG_* flags
@@ -843,11 +998,14 @@ struct ixgbe_aci_desc {
struct ixgbe_aci_cmd_restart_an restart_an;
struct ixgbe_aci_cmd_get_link_status get_link_status;
struct ixgbe_aci_cmd_set_event_mask set_event_mask;
+ struct ixgbe_aci_cmd_set_port_id_led set_port_id_led;
struct ixgbe_aci_cmd_get_link_topo get_link_topo;
struct ixgbe_aci_cmd_get_link_topo_pin get_link_topo_pin;
struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param;
struct ixgbe_aci_cmd_nvm nvm;
struct ixgbe_aci_cmd_nvm_checksum nvm_checksum;
+ struct ixgbe_aci_cmd_nvm_pkg_data pkg_data;
+ struct ixgbe_aci_cmd_nvm_pass_comp_tbl pass_comp_tbl;
} params;
};
@@ -984,6 +1142,16 @@ struct ixgbe_hw_caps {
#define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
} __packed;
+#define IXGBE_OROM_CIV_SIGNATURE "$CIV"
+
+struct ixgbe_orom_civd_info {
+ u8 signature[4]; /* Must match ASCII '$CIV' characters */
+ u8 checksum; /* Simple modulo 256 sum of all structure bytes must equal 0 */
+ __le32 combo_ver; /* Combo Image Version number */
+ u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
+ __le16 combo_name[32]; /* Unicode string representing the Combo Image version */
+};
+
/* Function specific capabilities */
struct ixgbe_hw_func_caps {
u32 num_allocd_vfs; /* Number of allocated VFs */
@@ -1015,6 +1183,11 @@ struct ixgbe_aci_info {
enum ixgbe_aci_err last_status; /* last status of sent admin command */
};
+enum ixgbe_bank_select {
+ IXGBE_ACTIVE_FLASH_BANK,
+ IXGBE_INACTIVE_FLASH_BANK,
+};
+
/* Option ROM version information */
struct ixgbe_orom_info {
u8 major; /* Major version of OROM */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 1fc821fb351a..f1ab95aa8c83 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -894,6 +894,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X540 = {
.calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
.validate_checksum = &ixgbe_validate_eeprom_checksum_X540,
.update_checksum = &ixgbe_update_eeprom_checksum_X540,
+ .read_pba_string = &ixgbe_read_pba_string_generic,
};
static const struct ixgbe_phy_operations phy_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 277ceaf8a793..1d2acdb64f45 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -3959,6 +3959,7 @@ static const struct ixgbe_mac_operations mac_ops_x550em_a_fw = {
.validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \
.update_checksum = &ixgbe_update_eeprom_checksum_X550, \
.calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \
+ .read_pba_string = &ixgbe_read_pba_string_generic, \
static const struct ixgbe_eeprom_operations eeprom_ops_X550 = {
X550_COMMON_EEP
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 3e3b471e53f0..ac58964b2f08 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -508,7 +508,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ring *ring;
if (test_bit(__IXGBE_DOWN, &adapter->state))
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index 8ba037e3d9c2..65580b9cb06f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -201,6 +201,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
/**
* ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol
+ * @dev: pointer to net device to program
* @xs: pointer to xfrm_state struct
* @mykey: pointer to key array to populate
* @mysalt: pointer to salt value to populate
@@ -208,10 +209,10 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
* This copies the protocol keys and salt to our own data tables. The
* 82599 family only supports the one algorithm.
**/
-static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
+static int ixgbevf_ipsec_parse_proto_keys(struct net_device *dev,
+ struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
- struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -256,13 +257,14 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
/**
* ixgbevf_ipsec_add_sa - program device with a security association
+ * @dev: pointer to net device to program
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
**/
-static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
+static int ixgbevf_ipsec_add_sa(struct net_device *dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
- struct net_device *dev = xs->xso.real_dev;
struct ixgbevf_adapter *adapter;
struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
@@ -310,7 +312,8 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
rsa.decrypt = xs->ealg || xs->aead;
/* get the key and salt */
- ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
+ ret = ixgbevf_ipsec_parse_proto_keys(dev, xs, rsa.key,
+ &rsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");
return ret;
@@ -363,7 +366,8 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
if (xs->id.proto & IPPROTO_ESP)
tsa.encrypt = xs->ealg || xs->aead;
- ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
+ ret = ixgbevf_ipsec_parse_proto_keys(dev, xs, tsa.key,
+ &tsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");
memset(&tsa, 0, sizeof(tsa));
@@ -388,11 +392,12 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
/**
* ixgbevf_ipsec_del_sa - clear out this specific SA
+ * @dev: pointer to net device to program
* @xs: pointer to transformer state struct
**/
-static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
+static void ixgbevf_ipsec_del_sa(struct net_device *dev,
+ struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.real_dev;
struct ixgbevf_adapter *adapter;
struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index a217c5c04804..535d0f71f521 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3176,8 +3176,8 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_service_timer(struct timer_list *t)
{
- struct ixgbevf_adapter *adapter = from_timer(adapter, t,
- service_timer);
+ struct ixgbevf_adapter *adapter = timer_container_of(adapter, t,
+ service_timer);
/* Reset the timer */
mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 1e2ac1a5f099..891a94d89f4b 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -900,7 +900,8 @@ static void korina_check_media(struct net_device *dev, unsigned int init_media)
static void korina_poll_media(struct timer_list *t)
{
- struct korina_private *lp = from_timer(lp, t, media_check_timer);
+ struct korina_private *lp = timer_container_of(lp, t,
+ media_check_timer);
struct net_device *dev = lp->dev;
korina_check_media(dev, 0);
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 837295fecd17..50f7c59e8a04 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -34,7 +34,6 @@ config MV643XX_ETH
config MVMDIO
tristate "Marvell MDIO interface support"
depends on HAS_IOMEM
- select MDIO_DEVRES
select PHYLIB
help
This driver supports the MDIO interface found in the network
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 8cc888bf6094..0ab52c57c648 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1333,7 +1333,8 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
static void mib_counters_timer_wrapper(struct timer_list *t)
{
- struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
+ struct mv643xx_eth_private *mp = timer_container_of(mp, t,
+ mib_counters_timer);
mib_counters_update(mp);
mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
@@ -2306,7 +2307,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
static inline void oom_timer_wrapper(struct timer_list *t)
{
- struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
+ struct mv643xx_eth_private *mp = timer_container_of(mp, t, rx_oom);
napi_schedule(&mp->napi);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 416a926a8281..a7872d14a49d 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5173,38 +5173,40 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_dropped = dev->stats.tx_dropped;
}
-static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+static int mvpp2_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct mvpp2_port *port = netdev_priv(dev);
void __iomem *ptp;
u32 gcr, int_mask;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!port->hwtstamp)
+ return -EOPNOTSUPP;
- if (config.tx_type != HWTSTAMP_TX_OFF &&
- config.tx_type != HWTSTAMP_TX_ON)
+ if (config->tx_type != HWTSTAMP_TX_OFF &&
+ config->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
int_mask = gcr = 0;
- if (config.tx_type != HWTSTAMP_TX_OFF) {
+ if (config->tx_type != HWTSTAMP_TX_OFF) {
gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
MVPP22_PTP_INT_MASK_QUEUE0;
}
/* It seems we must also release the TX reset when enabling the TSU */
- if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ if (config->rx_filter != HWTSTAMP_FILTER_NONE)
gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
MVPP22_PTP_GCR_TX_RESET;
if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
mvpp22_tai_start(port->priv->tai);
- if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (config->rx_filter != HWTSTAMP_FILTER_NONE) {
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
mvpp2_modify(ptp + MVPP22_PTP_GCR,
MVPP22_PTP_GCR_RX_RESET |
MVPP22_PTP_GCR_TX_RESET |
@@ -5225,26 +5227,22 @@ static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
mvpp22_tai_stop(port->priv->tai);
- port->tx_hwtstamp_type = config.tx_type;
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ port->tx_hwtstamp_type = config->tx_type;
return 0;
}
-static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+static int mvpp2_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
-
- memset(&config, 0, sizeof(config));
+ struct mvpp2_port *port = netdev_priv(dev);
- config.tx_type = port->tx_hwtstamp_type;
- config.rx_filter = port->rx_hwtstamp ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ if (!port->hwtstamp)
+ return -EOPNOTSUPP;
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ config->tx_type = port->tx_hwtstamp_type;
+ config->rx_filter = port->rx_hwtstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
return 0;
}
@@ -5274,18 +5272,6 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mvpp2_port *port = netdev_priv(dev);
- switch (cmd) {
- case SIOCSHWTSTAMP:
- if (port->hwtstamp)
- return mvpp2_set_ts_config(port, ifr);
- break;
-
- case SIOCGHWTSTAMP:
- if (port->hwtstamp)
- return mvpp2_get_ts_config(port, ifr);
- break;
- }
-
if (!port->phylink)
return -ENOTSUPP;
@@ -5799,6 +5785,8 @@ static const struct net_device_ops mvpp2_netdev_ops = {
.ndo_set_features = mvpp2_set_features,
.ndo_bpf = mvpp2_xdp,
.ndo_xdp_xmit = mvpp2_xdp_xmit,
+ .ndo_hwtstamp_get = mvpp2_hwtstamp_get,
+ .ndo_hwtstamp_set = mvpp2_hwtstamp_set,
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
index ccb69bc5c952..420c3f4cf741 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -18,8 +18,6 @@
#include "octep_vf_config.h"
#include "octep_vf_main.h"
-struct workqueue_struct *octep_vf_wq;
-
/* Supported Devices */
static const struct pci_device_id octep_vf_pci_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)},
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
index 1a352f41f823..b9f13506f462 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
@@ -320,8 +320,6 @@ static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct)
#define octep_vf_read_csr64(octep_vf_dev, reg_off) \
readq((octep_vf_dev)->mmio.hw_addr + (reg_off))
-extern struct workqueue_struct *octep_vf_wq;
-
int octep_vf_device_setup(struct octep_vf_device *oct);
int octep_vf_setup_iqs(struct octep_vf_device *oct);
void octep_vf_free_iqs(struct octep_vf_device *oct);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 0b27a695008b..971993586fb4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -717,6 +717,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+
+ /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
+ if (idx >= CGX_RX_STAT_GLOBAL_INDEX)
+ lmac_id = 0;
+
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 1e5aa5397504..7d21905deed8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -188,14 +188,13 @@ int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
{
unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
- struct device *sender = &mbox->pdev->dev;
while (!time_after(jiffies, timeout)) {
if (mdev->num_msgs == mdev->msgs_acked)
return 0;
usleep_range(800, 1000);
}
- dev_dbg(sender, "timed out while waiting for rsp\n");
+ trace_otx2_msg_wait_rsp(mbox->pdev);
return -EIO;
}
EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
@@ -219,6 +218,7 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
void *hw_mbase = mdev->hwbase;
+ struct mbox_msghdr *msg;
u64 intr_val;
tx_hdr = hw_mbase + mbox->tx_start;
@@ -251,7 +251,10 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
tx_hdr->num_msgs = mdev->num_msgs;
rx_hdr->num_msgs = 0;
- trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
+ msg = (struct mbox_msghdr *)(hw_mbase + mbox->tx_start + msgs_offset);
+
+ trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size,
+ msg->id, msg->pcifunc);
spin_unlock(&mdev->mbox_lock);
@@ -445,6 +448,14 @@ const char *otx2_mbox_id2name(u16 id)
#define M(_name, _id, _1, _2, _3) case _id: return # _name;
MBOX_MESSAGES
#undef M
+
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
+ MBOX_UP_CGX_MESSAGES
+#undef M
+
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
+ MBOX_UP_CPT_MESSAGES
+#undef M
default:
return "INVALID ID";
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 005ca8a056c0..a213b2663583 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -524,6 +524,8 @@ struct get_hw_cap_rsp {
u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
u8 nix_shaping; /* Is shaping and coloring supported */
u8 npc_hash_extract; /* Is hash extract supported */
+#define HW_CAP_MACSEC BIT_ULL(1)
+ u64 hw_caps;
};
/* CGX mbox message formats */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index 655dd4726d36..0277d226293e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -143,6 +143,8 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+ otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+
mutex_unlock(&rvu->mbox_lock);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 6575c422635b..a8025f0486c9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1393,8 +1393,6 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (blkaddr < 0)
return;
- if (blktype == BLKTYPE_NIX)
- rvu_nix_reset_mac(pfvf, pcifunc);
block = &hw->block[blkaddr];
@@ -1407,6 +1405,10 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (lf < 0) /* This should never happen */
continue;
+ if (blktype == BLKTYPE_NIX) {
+ rvu_nix_reset_mac(pfvf, pcifunc);
+ rvu_npc_clear_ucast_entry(rvu, pcifunc, lf);
+ }
/* Disable the LF */
rvu_write64(rvu, blkaddr, block->lfcfg_reg |
(lf << block->lfshift), 0x00ULL);
@@ -2031,6 +2033,9 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
rsp->nix_shaping = hw->cap.nix_shaping;
rsp->npc_hash_extract = hw->cap.npc_hash_extract;
+ if (rvu->mcs_blk_cnt)
+ rsp->hw_caps = HW_CAP_MACSEC;
+
return 0;
}
@@ -2173,7 +2178,7 @@ static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
if (rsp && err) \
rsp->hdr.rc = err; \
\
- trace_otx2_msg_process(mbox->pdev, _id, err); \
+ trace_otx2_msg_process(mbox->pdev, _id, err, req->pcifunc); \
return rsp ? err : -ENOMEM; \
}
MBOX_MESSAGES
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 60f085b00a8c..48f66292ad5c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -969,8 +969,6 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
bool enable);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
- bool enable);
void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u64 chan);
void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
@@ -996,6 +994,8 @@ void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
+void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf);
+
bool is_npc_intf_tx(u8 intf);
bool is_npc_intf_rx(u8 intf);
bool is_npc_interface_valid(struct rvu *rvu, u8 intf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 992fa0b82e8d..d0331b0e0bfd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -34,7 +34,7 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
- trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
+ trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req), 0); \
return req; \
}
@@ -272,6 +272,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
+ otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
+
mutex_unlock(&rvu->mbox_lock);
} while (pfmap);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 7fa98aeb3663..4a3370a40dd8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -13,19 +13,26 @@
/* RVU LMTST */
#define LMT_TBL_OP_READ 0
#define LMT_TBL_OP_WRITE 1
-#define LMT_MAP_TABLE_SIZE (128 * 1024)
#define LMT_MAPTBL_ENTRY_SIZE 16
+#define LMT_MAX_VFS 256
+
+#define LMT_MAP_ENTRY_ENA BIT_ULL(20)
+#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16)
/* Function to perform operations (read/write) on lmtst map table */
static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
int lmt_tbl_op)
{
void __iomem *lmt_map_base;
- u64 tbl_base;
+ u64 tbl_base, cfg;
+ int pfs, vfs;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+ cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ vfs = 1 << (cfg & 0xF);
+ pfs = 1 << ((cfg >> 4) & 0x7);
- lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
+ lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE);
if (!lmt_map_base) {
dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
return -ENOMEM;
@@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
*val = readq(lmt_map_base + index);
} else {
writeq((*val), (lmt_map_base + index));
+
+ cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1);
+ /* 2048 LMTLINES */
+ cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6);
+
+ writeq(cfg, (lmt_map_base + (index + 8)));
+
/* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
* changes effective. Write 1 for flush and read is being used as a
* barrier and sets up a data dependency. Write to 0 after a write
@@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
#define LMT_MAP_TBL_W1_OFF 8
static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
{
- return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
+ return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) +
(pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
}
@@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
mutex_lock(&rvu->rsrc_lock);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
- pf = rvu_get_pf(pcifunc) & 0x1F;
+ pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK;
val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index a1f9ec03c2ce..c827da626471 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -553,6 +553,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
u64 lmt_addr, val, tbl_base;
int pf, vf, num_vfs, hw_vfs;
void __iomem *lmt_map_base;
+ int apr_pfs, apr_vfs;
int buf_size = 10240;
size_t off = 0;
int index = 0;
@@ -568,8 +569,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
return -ENOMEM;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+ val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ apr_vfs = 1 << (val & 0xF);
+ apr_pfs = 1 << ((val >> 4) & 0x7);
- lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
+ lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs *
+ LMT_MAPTBL_ENTRY_SIZE);
if (!lmt_map_base) {
dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
kfree(buf);
@@ -591,7 +596,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
pf);
- index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
+ index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE;
off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
(tbl_base + index));
lmt_addr = readq(lmt_map_base + index);
@@ -604,7 +609,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
/* Reading num of VFs per PF */
rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
for (vf = 0; vf < num_vfs; vf++) {
- index = (pf * rvu->hw->total_vfs * 16) +
+ index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) +
((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
off += scnprintf(&buf[off], buf_size - 1 - off,
"PF%d:VF%d \t\t", pf, vf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 821fe242f821..da15bb451178 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -820,24 +820,6 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
- bool enable)
-{
- struct npc_mcam *mcam = &rvu->hw->mcam;
- int blkaddr, index;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
- if (blkaddr < 0)
- return;
-
- /* Get 'pcifunc' of PF device */
- pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
-
- index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
- NIXLF_BCAST_ENTRY);
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
-}
-
void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u64 chan)
{
@@ -1125,6 +1107,7 @@ void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
int nixlf, bool enable)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
int index, blkaddr;
@@ -1133,9 +1116,12 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
return;
/* Ucast MCAM match entry of this PF/VF */
- index = npc_get_nixlf_mcam_index(mcam, pcifunc,
- nixlf, NIXLF_UCAST_ENTRY);
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ if (npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC),
+ pfvf->nix_rx_intf)) {
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ }
/* Nothing to do for VFs, on platforms where pkt replication
* is not supported
@@ -3588,3 +3574,33 @@ int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu,
return 0;
}
+
+void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ int ucast_idx, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, ucast_idx, false);
+
+ npc_set_mcam_action(rvu, mcam, blkaddr, ucast_idx, 0);
+
+ npc_clear_mcam_entry(rvu, mcam, blkaddr, ucast_idx);
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == ucast_idx) {
+ list_del(&rule->list);
+ kfree(rule);
+ break;
+ }
+ }
+ mutex_unlock(&mcam->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
index 052ae5923e3a..32953cca108c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
@@ -60,6 +60,8 @@ static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event)
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+ otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+
mutex_unlock(&rvu->mbox_lock);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
index 775fd4c35794..19e0d16b12f6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
@@ -11,3 +11,5 @@
EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc);
EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt);
EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_status);
+EXPORT_TRACEPOINT_SYMBOL(otx2_parse_dump);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
index 5704520f9b02..4cd0fc4b0d20 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -18,33 +18,42 @@
#include "mbox.h"
TRACE_EVENT(otx2_msg_alloc,
- TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size),
- TP_ARGS(pdev, id, size),
+ TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size, u16 pcifunc),
+ TP_ARGS(pdev, id, size, pcifunc),
TP_STRUCT__entry(__string(dev, pci_name(pdev))
__field(u16, id)
__field(u64, size)
+ __field(u16, pcifunc)
),
TP_fast_assign(__assign_str(dev);
__entry->id = id;
__entry->size = size;
+ __entry->pcifunc = pcifunc;
),
- TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev),
- otx2_mbox_id2name(__entry->id), __entry->size)
+ TP_printk("[%s] msg:(%s) size:%lld pcifunc:0x%x\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->size,
+ __entry->pcifunc)
);
TRACE_EVENT(otx2_msg_send,
- TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size),
- TP_ARGS(pdev, num_msgs, msg_size),
+ TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size,
+ u16 id, u16 pcifunc),
+ TP_ARGS(pdev, num_msgs, msg_size, id, pcifunc),
TP_STRUCT__entry(__string(dev, pci_name(pdev))
__field(u16, num_msgs)
__field(u64, msg_size)
+ __field(u16, id)
+ __field(u16, pcifunc)
),
TP_fast_assign(__assign_str(dev);
__entry->num_msgs = num_msgs;
__entry->msg_size = msg_size;
+ __entry->id = id;
+ __entry->pcifunc = pcifunc;
),
- TP_printk("[%s] sent %d msg(s) of size:%lld\n", __get_str(dev),
- __entry->num_msgs, __entry->msg_size)
+ TP_printk("[%s] sent %d msg(s) of size:%lld msg:(%s) pcifunc:0x%x\n",
+ __get_str(dev), __entry->num_msgs, __entry->msg_size,
+ otx2_mbox_id2name(__entry->id), __entry->pcifunc)
);
TRACE_EVENT(otx2_msg_check,
@@ -81,18 +90,73 @@ TRACE_EVENT(otx2_msg_interrupt,
);
TRACE_EVENT(otx2_msg_process,
- TP_PROTO(const struct pci_dev *pdev, u16 id, int err),
- TP_ARGS(pdev, id, err),
+ TP_PROTO(const struct pci_dev *pdev, u16 id, int err, u16 pcifunc),
+ TP_ARGS(pdev, id, err, pcifunc),
TP_STRUCT__entry(__string(dev, pci_name(pdev))
__field(u16, id)
__field(int, err)
+ __field(u16, pcifunc)
),
TP_fast_assign(__assign_str(dev);
__entry->id = id;
__entry->err = err;
+ __entry->pcifunc = pcifunc;
+ ),
+ TP_printk("[%s] msg:(%s) error:%d pcifunc:0x%x\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id),
+ __entry->err, __entry->pcifunc)
+);
+
+TRACE_EVENT(otx2_msg_wait_rsp,
+ TP_PROTO(const struct pci_dev *pdev),
+ TP_ARGS(pdev),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ ),
+ TP_fast_assign(__assign_str(dev)
+ ),
+ TP_printk("[%s] timed out while waiting for response\n",
+ __get_str(dev))
+);
+
+TRACE_EVENT(otx2_msg_status,
+ TP_PROTO(const struct pci_dev *pdev, const char *msg, u16 num_msgs),
+ TP_ARGS(pdev, msg, num_msgs),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __string(str, msg)
+ __field(u16, num_msgs)
+ ),
+ TP_fast_assign(__assign_str(dev);
+ __assign_str(str);
+ __entry->num_msgs = num_msgs;
+ ),
+ TP_printk("[%s] %s num_msgs:%d\n", __get_str(dev),
+ __get_str(str), __entry->num_msgs)
+);
+
+TRACE_EVENT(otx2_parse_dump,
+ TP_PROTO(const struct pci_dev *pdev, char *msg, u64 *word),
+ TP_ARGS(pdev, msg, word),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __string(str, msg)
+ __field(u64, w0)
+ __field(u64, w1)
+ __field(u64, w2)
+ __field(u64, w3)
+ __field(u64, w4)
+ __field(u64, w5)
+ ),
+ TP_fast_assign(__assign_str(dev);
+ __assign_str(str);
+ __entry->w0 = *(word + 0);
+ __entry->w1 = *(word + 1);
+ __entry->w2 = *(word + 2);
+ __entry->w3 = *(word + 3);
+ __entry->w4 = *(word + 4);
+ __entry->w5 = *(word + 5);
),
- TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev),
- otx2_mbox_id2name(__entry->id), __entry->err)
+ TP_printk("[%s] nix parse %s W0:%#llx W1:%#llx W2:%#llx W3:%#llx W4:%#llx W5:%#llx\n",
+ __get_str(dev), __get_str(str), __entry->w0, __entry->w1, __entry->w2,
+ __entry->w3, __entry->w4, __entry->w5)
);
#endif /* __RVU_TRACE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index c3b6e0f60a79..7f6a435ac680 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -357,9 +357,12 @@ int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
mutex_lock(&pfvf->mbox.lock);
/* Remove RQ's policer mapping */
- for (qidx = 0; qidx < hw->rx_queues; qidx++)
- cn10k_map_unmap_rq_policer(pfvf, qidx,
- hw->matchall_ipolicer, false);
+ for (qidx = 0; qidx < hw->rx_queues; qidx++) {
+ rc = cn10k_map_unmap_rq_policer(pfvf, qidx, hw->matchall_ipolicer, false);
+ if (rc)
+ dev_warn(pfvf->dev, "Failed to unmap RQ %d's policer (error %d).",
+ qidx, rc);
+ }
rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index fc59e50bafce..a6500e3673f2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -663,10 +663,10 @@ static int cn10k_ipsec_inb_add_state(struct xfrm_state *x,
return -EOPNOTSUPP;
}
-static int cn10k_ipsec_outb_add_state(struct xfrm_state *x,
+static int cn10k_ipsec_outb_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
- struct net_device *netdev = x->xso.dev;
struct cn10k_tx_sa_s *sa_entry;
struct qmem *sa_info;
struct otx2_nic *pf;
@@ -676,7 +676,7 @@ static int cn10k_ipsec_outb_add_state(struct xfrm_state *x,
if (err)
return err;
- pf = netdev_priv(netdev);
+ pf = netdev_priv(dev);
err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN);
if (err)
@@ -700,18 +700,18 @@ static int cn10k_ipsec_outb_add_state(struct xfrm_state *x,
return 0;
}
-static int cn10k_ipsec_add_state(struct xfrm_state *x,
+static int cn10k_ipsec_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
return cn10k_ipsec_inb_add_state(x, extack);
else
- return cn10k_ipsec_outb_add_state(x, extack);
+ return cn10k_ipsec_outb_add_state(dev, x, extack);
}
-static void cn10k_ipsec_del_state(struct xfrm_state *x)
+static void cn10k_ipsec_del_state(struct net_device *dev, struct xfrm_state *x)
{
- struct net_device *netdev = x->xso.dev;
struct cn10k_tx_sa_s *sa_entry;
struct qmem *sa_info;
struct otx2_nic *pf;
@@ -720,7 +720,7 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x)
if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
return;
- pf = netdev_priv(netdev);
+ pf = netdev_priv(dev);
sa_info = (struct qmem *)x->xso.offload_handle;
sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
@@ -732,7 +732,7 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x)
err = cn10k_outb_write_sa(pf, sa_info);
if (err)
- netdev_err(netdev, "Error (%d) deleting SA\n", err);
+ netdev_err(dev, "Error (%d) deleting SA\n", err);
x->xso.offload_handle = 0;
qmem_free(pf->dev, sa_info);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index f3b9daffaec3..4c7e0f345cb5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -531,7 +531,8 @@ static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
if (sw_tx_sc->encrypt)
sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
- policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
+ policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU,
+ pfvf->netdev->mtu + OTX2_ETH_HLEN);
/* Write SecTag excluding AN bits(1..0) */
policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 84cd029a85aa..6b5c9536d26d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1822,7 +1822,7 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
req->bpid_per_chan = 1;
} else {
- req->chan_cnt = 1;
+ req->chan_cnt = pfvf->hw.rx_chan_cnt;
req->bpid_per_chan = 0;
}
@@ -1847,7 +1847,7 @@ int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable)
req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
req->bpid_per_chan = 1;
} else {
- req->chan_cnt = 1;
+ req->chan_cnt = pfvf->hw.rx_chan_cnt;
req->bpid_per_chan = 0;
}
@@ -2055,6 +2055,43 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t
}
EXPORT_SYMBOL(otx2_handle_ntuple_tc_features);
+int otx2_set_hw_capabilities(struct otx2_nic *pfvf)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct otx2_hw *hw = &pfvf->hw;
+ struct get_hw_cap_rsp *rsp;
+ struct msg_req *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_get_hw_cap(mbox);
+ if (!req)
+ goto fail;
+
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct get_hw_cap_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (rsp->hw_caps & HW_CAP_MACSEC)
+ __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Cannot get MACSEC capability from AF\n");
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
int __weak \
otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 1e88422825be..ca0e6ab12ceb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -356,6 +356,7 @@ struct otx2_flow_config {
struct list_head flow_list_tc;
u8 ucast_flt_cnt;
bool ntuple;
+ u16 ntuple_cnt;
};
struct dev_hw_ops {
@@ -631,9 +632,6 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
__set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
__set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag);
}
-
- if (is_dev_cn10kb(pfvf->pdev))
- __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
}
/* Register read/write APIs */
@@ -871,6 +869,7 @@ static struct _req_type __maybe_unused \
*otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \
{ \
struct _req_type *req; \
+ u16 pcifunc = mbox->pfvf->pcifunc; \
\
req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
&mbox->mbox, 0, sizeof(struct _req_type), \
@@ -879,7 +878,8 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
- trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \
+ req->hdr.pcifunc = pcifunc; \
+ trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req), pcifunc); \
return req; \
}
@@ -1043,6 +1043,7 @@ void otx2_disable_napi(struct otx2_nic *pf);
irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq);
int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura);
int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx);
+int otx2_set_hw_capabilities(struct otx2_nic *pfvf);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
@@ -1107,6 +1108,8 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
u64 iova, int len, u16 qidx, u16 flags);
+void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf,
+ u64 dma_addr, int len, int *offset, u16 flags);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
netdev_features_t features);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 33ec9a7f7c03..e13ae5484c19 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -41,6 +41,7 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
if (!pfvf->flow_cfg)
return 0;
+ pfvf->flow_cfg->ntuple_cnt = ctx->val.vu16;
otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 010385b29988..45b8c9230184 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -315,7 +315,7 @@ static void otx2_get_pauseparam(struct net_device *netdev,
struct otx2_nic *pfvf = netdev_priv(netdev);
struct cgx_pause_frm_cfg *req, *rsp;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return;
mutex_lock(&pfvf->mbox.lock);
@@ -347,7 +347,7 @@ static int otx2_set_pauseparam(struct net_device *netdev,
if (pause->autoneg)
return -EOPNOTSUPP;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return -EOPNOTSUPP;
if (pause->rx_pause)
@@ -941,8 +941,8 @@ static u32 otx2_get_link(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- /* LBK link is internal and always UP */
- if (is_otx2_lbkvf(pfvf->pdev))
+ /* LBK and SDP links are internal and always UP */
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return 1;
return pfvf->linfo.link_up;
}
@@ -1413,7 +1413,7 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- if (is_otx2_lbkvf(pfvf->pdev)) {
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) {
cmd->base.duplex = DUPLEX_FULL;
cmd->base.speed = SPEED_100000;
} else {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 47bfd1fb37d4..64c6d9162ef6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -247,7 +247,7 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf)
mutex_unlock(&pfvf->mbox.lock);
/* Allocate entries for Ntuple filters */
- count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
+ count = otx2_alloc_mcam_entries(pfvf, flow_cfg->ntuple_cnt);
if (count <= 0) {
otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
return 0;
@@ -307,6 +307,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS;
+ pf->flow_cfg->ntuple_cnt = OTX2_DEFAULT_FLOWCOUNT;
/* Allocate bare minimum number of MCAM entries needed for
* unicast and ntuple filters.
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index cfed9ec5b157..db7c466fdc39 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -465,6 +465,9 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work)
offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ trace_otx2_msg_status(pf->pdev, "PF-VF down queue handler(forwarding)",
+ vf_mbox->num_msgs);
+
for (id = 0; id < vf_mbox->num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
offset);
@@ -473,7 +476,7 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work)
goto inval_msg;
/* Set VF's number in each of the msg */
- msg->pcifunc &= RVU_PFVF_FUNC_MASK;
+ msg->pcifunc &= ~RVU_PFVF_FUNC_MASK;
msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
offset = msg->next_msgoff;
}
@@ -503,6 +506,9 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
offset = mbox->rx_start + ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ trace_otx2_msg_status(pf->pdev, "PF-VF up queue handler(response)",
+ vf_mbox->up_num_msgs);
+
for (id = 0; id < vf_mbox->up_num_msgs; id++) {
msg = mdev->mbase + offset;
@@ -819,6 +825,9 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work)
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
pf = af_mbox->pfvf;
+ trace_otx2_msg_status(pf->pdev, "PF-AF down queue handler(response)",
+ num_msgs);
+
for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2_process_pfaf_mbox_msg(pf, msg);
@@ -974,6 +983,9 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ trace_otx2_msg_status(pf->pdev, "PF-AF up queue handler(notification)",
+ num_msgs);
+
for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
@@ -1023,6 +1035,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
BIT_ULL(0));
+
+ trace_otx2_msg_status(pf->pdev, "PF-AF up work queued(interrupt)",
+ hdr->num_msgs);
}
if (mbox_data & MBOX_DOWN_MSG) {
@@ -1039,6 +1054,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
BIT_ULL(0));
+
+ trace_otx2_msg_status(pf->pdev, "PF-AF down work queued(interrupt)",
+ hdr->num_msgs);
}
return IRQ_HANDLED;
@@ -3048,7 +3066,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
return err;
@@ -3057,7 +3075,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "DMA mask config failed, abort\n");
- goto err_release_regions;
+ return err;
}
pci_set_master(pdev);
@@ -3067,10 +3085,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount);
- if (!netdev) {
- err = -ENOMEM;
- goto err_release_regions;
- }
+ if (!netdev)
+ return -ENOMEM;
pci_set_drvdata(pdev, netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -3128,6 +3144,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_ptp_destroy;
+ otx2_set_hw_capabilities(pf);
+
err = cn10k_mcs_init(pf);
if (err)
goto err_del_mcam_entries;
@@ -3246,8 +3264,6 @@ err_detach_rsrc:
err_free_netdev:
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-err_release_regions:
- pci_release_regions(pdev);
return err;
}
@@ -3289,6 +3305,7 @@ static void otx2_vf_link_event_task(struct work_struct *work)
req = (struct cgx_link_info_msg *)msghdr;
req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = pf->pcifunc;
memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx);
@@ -3447,8 +3464,6 @@ static void otx2_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pf->pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-
- pci_release_regions(pdev);
}
static struct pci_driver otx2_pf_driver = {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 0a6bb346ba45..99ace381cc78 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -335,6 +335,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_rx_parse_s *parse = &cqe->parse;
struct nix_rx_sg_s *sg = &cqe->sg;
struct sk_buff *skb = NULL;
+ u64 *word = (u64 *)parse;
void *end, *start;
u32 metasize = 0;
u64 *seg_addr;
@@ -342,9 +343,12 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
int seg;
if (unlikely(parse->errlev || parse->errcode)) {
- if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
+ if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) {
+ trace_otx2_parse_dump(pfvf->pdev, "Err:", word);
return;
+ }
}
+ trace_otx2_parse_dump(pfvf->pdev, "", word);
if (pfvf->xdp_prog)
if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq,
@@ -1410,9 +1414,8 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf)
}
}
-static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq,
- struct xdp_frame *xdpf,
- u64 dma_addr, int len, int *offset, u16 flags)
+void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf,
+ u64 dma_addr, int len, int *offset, u16 flags)
{
struct nix_sqe_sg_s *sg = NULL;
u64 *iova = NULL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 7ef3ba477d49..8a8b598bd389 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -136,7 +136,7 @@ static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
- rsp->hdr.pcifunc = 0;
+ rsp->hdr.pcifunc = req->pcifunc;
rsp->hdr.rc = 0;
err = otx2_mbox_up_handler_cgx_link_event(
vf, (struct cgx_link_info_msg *)req, rsp);
@@ -548,7 +548,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
return err;
@@ -557,7 +557,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "DMA mask config failed, abort\n");
- goto err_release_regions;
+ return err;
}
pci_set_master(pdev);
@@ -565,10 +565,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
qcount = num_online_cpus();
qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
netdev = alloc_etherdev_mqs(sizeof(*vf), qcount + qos_txqs, qcount);
- if (!netdev) {
- err = -ENOMEM;
- goto err_release_regions;
- }
+ if (!netdev)
+ return -ENOMEM;
pci_set_drvdata(pdev, netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -729,9 +727,12 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
#ifdef CONFIG_DCB
- err = otx2_dcbnl_set_ops(netdev);
- if (err)
- goto err_free_zc_bmap;
+ /* Priority flow control is not supported for LBK and SDP vf(s) */
+ if (!(is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev))) {
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_free_zc_bmap;
+ }
#endif
otx2_qos_init(vf, qos_txqs);
@@ -765,8 +766,6 @@ err_free_irq_vectors:
err_free_netdev:
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-err_release_regions:
- pci_release_regions(pdev);
return err;
}
@@ -815,8 +814,6 @@ static void otx2vf_remove(struct pci_dev *pdev)
pci_free_irq_vectors(vf->pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-
- pci_release_regions(pdev);
}
static struct pci_driver otx2vf_driver = {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
index ce10caea8511..b328aae23d73 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
@@ -11,6 +11,7 @@
#include <net/xdp.h>
#include "otx2_common.h"
+#include "otx2_struct.h"
#include "otx2_xsk.h"
int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
@@ -196,11 +197,39 @@ void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int
sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx);
}
+static void otx2_xsk_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len,
+ u16 qidx)
+{
+ struct nix_sqe_hdr_s *sqe_hdr;
+ struct otx2_snd_queue *sq;
+ int offset;
+
+ sq = &pfvf->qset.sq[qidx];
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
+
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+
+ if (!sqe_hdr->total) {
+ sqe_hdr->aura = sq->aura_id;
+ sqe_hdr->df = 1;
+ sqe_hdr->sq = qidx;
+ sqe_hdr->pnc = 1;
+ }
+ sqe_hdr->total = len;
+ sqe_hdr->sqe_id = sq->head;
+
+ offset = sizeof(*sqe_hdr);
+
+ otx2_xdp_sqe_add_sg(sq, NULL, iova, len, &offset, OTX2_AF_XDP_FRAME);
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
+}
+
void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
int queue, int budget)
{
struct xdp_desc *xdp_desc = pool->tx_descs;
- int err, i, work_done = 0, batch;
+ int i, batch;
budget = min(budget, otx2_read_free_sqe(pfvf, queue));
batch = xsk_tx_peek_release_desc_batch(pool, budget);
@@ -211,15 +240,6 @@ void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
dma_addr_t dma_addr;
dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc[i].addr);
- err = otx2_xdp_sq_append_pkt(pfvf, NULL, dma_addr, xdp_desc[i].len,
- queue, OTX2_AF_XDP_FRAME);
- if (!err) {
- netdev_err(pfvf->netdev, "AF_XDP: Unable to transfer packet err%d\n", err);
- break;
- }
- work_done++;
+ otx2_xsk_sq_append_pkt(pfvf, dma_addr, xdp_desc[i].len, queue);
}
-
- if (work_done)
- xsk_tx_release(pool);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 35acc07bd964..5765bac119f0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -1638,6 +1638,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
if (!node->is_static)
dwrr_del_node = true;
+ WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
/* destroy the leaf node */
otx2_qos_disable_sq(pfvf, qid);
otx2_qos_destroy_node(pfvf, node);
@@ -1682,9 +1683,6 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
}
kfree(new_cfg);
- /* update tx_real_queues */
- otx2_qos_update_tx_netdev_queues(pfvf);
-
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
index c5dbae0e513b..58d572ce08ef 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
@@ -256,6 +256,26 @@ out:
return err;
}
+static int otx2_qos_nix_npa_ndc_sync(struct otx2_nic *pfvf)
+{
+ struct ndc_sync_op *req;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_ndc_sync_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->nix_lf_tx_sync = true;
+ req->npa_lf_sync = true;
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
{
struct otx2_qset *qset = &pfvf->qset;
@@ -285,6 +305,8 @@ void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
otx2_qos_sqb_flush(pfvf, sq_idx);
otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx));
+ /* NIX/NPA NDC sync */
+ otx2_qos_nix_npa_ndc_sync(pfvf);
otx2_cleanup_tx_cqes(pfvf, cq);
mutex_lock(&pfvf->mbox.lock);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
index 7153a71dfc86..2cd3da3b6843 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
@@ -765,7 +765,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
return err;
@@ -774,7 +774,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "DMA mask config failed, abort\n");
- goto err_release_regions;
+ goto err_set_drv_data;
}
pci_set_master(pdev);
@@ -782,7 +782,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
err = -ENOMEM;
- goto err_release_regions;
+ goto err_set_drv_data;
}
pci_set_drvdata(pdev, priv);
@@ -799,7 +799,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = otx2_init_rsrc(pdev, priv);
if (err)
- goto err_release_regions;
+ goto err_set_drv_data;
priv->iommu_domain = iommu_get_domain_for_dev(dev);
@@ -822,9 +822,8 @@ err_detach_rsrc:
otx2_disable_mbox_intr(priv);
otx2_pfaf_mbox_destroy(priv);
pci_free_irq_vectors(pdev);
-err_release_regions:
+err_set_drv_data:
pci_set_drvdata(pdev, NULL);
- pci_release_regions(pdev);
return err;
}
@@ -844,7 +843,6 @@ static void rvu_rep_remove(struct pci_dev *pdev)
otx2_pfaf_mbox_destroy(priv);
pci_free_irq_vectors(priv->pdev);
pci_set_drvdata(pdev, NULL);
- pci_release_regions(pdev);
}
static struct pci_driver rvu_rep_driver = {
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.c b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
index 4cd53a2dae46..634f4543c1d7 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_counter.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
@@ -336,8 +336,7 @@ prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx)
static void prestera_counter_stats_work(struct work_struct *work)
{
- struct delayed_work *dl_work =
- container_of(work, struct delayed_work, work);
+ struct delayed_work *dl_work = to_delayed_work(work);
struct prestera_counter *counter =
container_of(dl_work, struct prestera_counter, stats_dw);
struct prestera_counter_block *block;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index 35857dc19542..c45d108b2f6d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -845,9 +845,9 @@ static int prestera_pci_probe(struct pci_dev *pdev,
goto err_pci_enable_device;
}
- err = pci_request_regions(pdev, driver_name);
+ err = pcim_request_all_regions(pdev, driver_name);
if (err) {
- dev_err(&pdev->dev, "pci_request_regions failed\n");
+ dev_err(&pdev->dev, "pcim_request_all_regions failed\n");
goto err_pci_request_regions;
}
@@ -938,7 +938,6 @@ err_pci_dev_alloc:
err_pp_ioremap:
err_mem_ioremap:
err_dma_mask:
- pci_release_regions(pdev);
err_pci_request_regions:
err_pci_enable_device:
return err;
@@ -953,7 +952,6 @@ static void prestera_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
destroy_workqueue(fw->wq);
prestera_fw_uninit(fw);
- pci_release_regions(pdev);
}
static const struct pci_device_id prestera_pci_devices[] = {
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 72c1967768f4..e4cfdc8bc055 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -353,7 +353,7 @@ static void rxq_refill(struct net_device *dev)
static inline void rxq_refill_timer_wrapper(struct timer_list *t)
{
- struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
+ struct pxa168_eth_private *pep = timer_container_of(pep, t, timeout);
napi_schedule(&pep->napi);
}
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index b2081d6e34f0..05349a0b2db1 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -1494,7 +1494,7 @@ static int xm_check_link(struct net_device *dev)
*/
static void xm_link_timer(struct timer_list *t)
{
- struct skge_port *skge = from_timer(skge, t, link_timer);
+ struct skge_port *skge = timer_container_of(skge, t, link_timer);
struct net_device *dev = skge->netdev;
struct skge_hw *hw = skge->hw;
int port = skge->port;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index e2a9aae8bc9b..3831f533b9db 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2961,7 +2961,7 @@ static int sky2_rx_hung(struct net_device *dev)
static void sky2_watchdog(struct timer_list *t)
{
- struct sky2_hw *hw = from_timer(hw, t, watchdog_timer);
+ struct sky2_hw *hw = timer_container_of(hw, t, watchdog_timer);
/* Check for lost IRQ once a second */
if (sky2_read32(hw, B0_ISRC)) {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
index 7c27a19c4d8f..b4c01e2878f6 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -14,7 +14,7 @@
struct mtk_eth_muxc {
const char *name;
- int cap_bit;
+ u64 cap_bit;
int (*set_path)(struct mtk_eth *eth, u64 path);
};
@@ -31,6 +31,8 @@ static const char *mtk_eth_path_name(u64 path)
return "gmac2_rgmii";
case MTK_ETH_PATH_GMAC2_SGMII:
return "gmac2_sgmii";
+ case MTK_ETH_PATH_GMAC2_2P5GPHY:
+ return "gmac2_2p5gphy";
case MTK_ETH_PATH_GMAC2_GEPHY:
return "gmac2_gephy";
case MTK_ETH_PATH_GDM1_ESW:
@@ -127,6 +129,29 @@ static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, u64 path)
return 0;
}
+static int set_mux_gmac2_to_2p5gphy(struct mtk_eth *eth, u64 path)
+{
+ int ret;
+
+ if (path == MTK_ETH_PATH_GMAC2_2P5GPHY) {
+ ret = regmap_clear_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_GMAC2_V2);
+ if (ret)
+ return ret;
+
+ /* Setup mux to 2p5g PHY */
+ ret = regmap_clear_bits(eth->infra, TOP_MISC_NETSYS_PCS_MUX,
+ MUX_G2_USXGMII_SEL);
+ if (ret)
+ return ret;
+
+ dev_dbg(eth->dev, "path %s in %s updated\n",
+ mtk_eth_path_name(path), __func__);
+ }
+
+ return 0;
+}
+
static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
@@ -210,6 +235,10 @@ static const struct mtk_eth_muxc mtk_eth_muxc[] = {
.cap_bit = MTK_ETH_MUX_U3_GMAC2_TO_QPHY,
.set_path = set_mux_u3_gmac2_to_qphy,
}, {
+ .name = "mux_gmac2_to_2p5gphy",
+ .cap_bit = MTK_ETH_MUX_GMAC2_TO_2P5GPHY,
+ .set_path = set_mux_gmac2_to_2p5gphy,
+ }, {
.name = "mux_gmac1_gmac2_to_sgmii_rgmii",
.cap_bit = MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII,
.set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii,
@@ -260,6 +289,20 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
return mtk_eth_mux_setup(eth, path);
}
+int mtk_gmac_2p5gphy_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ u64 path = 0;
+
+ if (mac_id == MTK_GMAC2_ID)
+ path = MTK_ETH_PATH_GMAC2_2P5GPHY;
+
+ if (!path)
+ return -EINVAL;
+
+ /* Setup proper MUXes along the path */
+ return mtk_eth_mux_setup(eth, path);
+}
+
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
{
u64 path = 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 22a532695fb0..b38e4f2de674 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -503,7 +503,7 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
static void mtk_setup_bridge_switch(struct mtk_eth *eth)
{
/* Force Port1 XGMAC Link Up */
- mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
+ mtk_m32(eth, 0, MTK_XGMAC_FORCE_MODE(MTK_GMAC1_ID),
MTK_XGMAC_STS(MTK_GMAC1_ID));
/* Adjust GSW bridge IPG to 11 */
@@ -532,6 +532,26 @@ static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
return NULL;
}
+static int mtk_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+
+ if (mtk_interface_mode_is_xgmii(eth, iface) &&
+ mac->id != MTK_GMAC1_ID) {
+ mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE,
+ XMAC_MCR_TRX_DISABLE, MTK_XMAC_MCR(mac->id));
+
+ mtk_m32(mac->hw, MTK_XGMAC_FORCE_MODE(mac->id) |
+ MTK_XGMAC_FORCE_LINK(mac->id),
+ MTK_XGMAC_FORCE_MODE(mac->id), MTK_XGMAC_STS(mac->id));
+ }
+
+ return 0;
+}
+
static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -573,6 +593,12 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
}
break;
case PHY_INTERFACE_MODE_INTERNAL:
+ if (mac->id == MTK_GMAC2_ID &&
+ MTK_HAS_CAPS(eth->soc->caps, MTK_2P5GPHY)) {
+ err = mtk_gmac_2p5gphy_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
break;
default:
goto err_phy;
@@ -644,12 +670,12 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
}
/* Setup gmac */
- if (mtk_is_netsys_v3_or_greater(eth) &&
- mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
+ if (mtk_interface_mode_is_xgmii(eth, state->interface)) {
mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
- mtk_setup_bridge_switch(eth);
+ if (mac->id == MTK_GMAC1_ID)
+ mtk_setup_bridge_switch(eth);
}
return;
@@ -696,10 +722,19 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
- u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
- mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+ if (!mtk_interface_mode_is_xgmii(mac->hw, interface)) {
+ /* GMAC modes */
+ mtk_m32(mac->hw,
+ MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK, 0,
+ MTK_MAC_MCR(mac->id));
+ } else if (mac->id != MTK_GMAC1_ID) {
+ /* XGMAC except for built-in switch */
+ mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE, XMAC_MCR_TRX_DISABLE,
+ MTK_XMAC_MCR(mac->id));
+ mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id), 0,
+ MTK_XGMAC_STS(mac->id));
+ }
}
static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
@@ -771,13 +806,12 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
}
-static void mtk_mac_link_up(struct phylink_config *config,
- struct phy_device *phy,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex, bool tx_pause, bool rx_pause)
+static void mtk_gdm_mac_link_up(struct mtk_mac *mac,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause)
{
- struct mtk_mac *mac = container_of(config, struct mtk_mac,
- phylink_config);
u32 mcr;
mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
@@ -811,6 +845,56 @@ static void mtk_mac_link_up(struct phylink_config *config,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
+static void mtk_xgdm_mac_link_up(struct mtk_mac *mac,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ u32 mcr;
+
+ if (mac->id == MTK_GMAC1_ID)
+ return;
+
+ /* Eliminate the interference(before link-up) caused by PHY noise */
+ mtk_m32(mac->hw, XMAC_LOGIC_RST, 0, MTK_XMAC_LOGIC_RST(mac->id));
+ mdelay(20);
+ mtk_m32(mac->hw, XMAC_GLB_CNTCLR, XMAC_GLB_CNTCLR,
+ MTK_XMAC_CNT_CTRL(mac->id));
+
+ mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id),
+ MTK_XGMAC_FORCE_LINK(mac->id), MTK_XGMAC_STS(mac->id));
+
+ mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
+ mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC |
+ XMAC_MCR_TRX_DISABLE);
+ /* Configure pause modes -
+ * phylink will avoid these for half duplex
+ */
+ if (tx_pause)
+ mcr |= XMAC_MCR_FORCE_TX_FC;
+ if (rx_pause)
+ mcr |= XMAC_MCR_FORCE_RX_FC;
+
+ mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
+}
+
+static void mtk_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+
+ if (mtk_interface_mode_is_xgmii(mac->hw, interface))
+ mtk_xgdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
+ tx_pause, rx_pause);
+ else
+ mtk_gdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
+ tx_pause, rx_pause);
+}
+
static void mtk_mac_disable_tx_lpi(struct phylink_config *config)
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
@@ -828,6 +912,9 @@ static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
struct mtk_eth *eth = mac->hw;
u32 val;
+ if (mtk_interface_mode_is_xgmii(eth, mac->interface))
+ return -EOPNOTSUPP;
+
/* Tx idle timer in ms */
timer = DIV_ROUND_UP(timer, 1000);
@@ -858,6 +945,7 @@ static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
}
static const struct phylink_mac_ops mtk_phylink_ops = {
+ .mac_prepare = mtk_mac_prepare,
.mac_select_pcs = mtk_mac_select_pcs,
.mac_config = mtk_mac_config,
.mac_finish = mtk_mac_finish,
@@ -4748,7 +4836,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
}
if (mtk_is_netsys_v3_or_greater(mac->hw) &&
- MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) &&
id == MTK_GMAC1_ID) {
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
MAC_SYM_PAUSE |
@@ -4768,6 +4856,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink = phylink;
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_2P5GPHY) &&
+ id == MTK_GMAC2_ID)
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ mac->phylink_config.supported_interfaces);
+
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 88ef2e9c50fc..6f72a8c8ae1e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -431,7 +431,8 @@
/* XMAC status registers */
#define MTK_XGMAC_STS(x) (((x) == MTK_GMAC3_ID) ? 0x1001C : 0x1000C)
-#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
+#define MTK_XGMAC_FORCE_MODE(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
+#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(27) : BIT(11))
#define MTK_USXGMII_PCS_LINK BIT(8)
#define MTK_XGMAC_RX_FC BIT(5)
#define MTK_XGMAC_TX_FC BIT(4)
@@ -524,6 +525,21 @@
#define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
#define INTF_MODE_RGMII_10_100 0
+/* XFI Mac control registers */
+#define MTK_XMAC_BASE(x) (0x12000 + (((x) - 1) * 0x1000))
+#define MTK_XMAC_MCR(x) (MTK_XMAC_BASE(x))
+#define XMAC_MCR_TRX_DISABLE 0xf
+#define XMAC_MCR_FORCE_TX_FC BIT(5)
+#define XMAC_MCR_FORCE_RX_FC BIT(4)
+
+/* XFI Mac logic reset registers */
+#define MTK_XMAC_LOGIC_RST(x) (MTK_XMAC_BASE(x) + 0x10)
+#define XMAC_LOGIC_RST BIT(0)
+
+/* XFI Mac count global control */
+#define MTK_XMAC_CNT_CTRL(x) (MTK_XMAC_BASE(x) + 0x100)
+#define XMAC_GLB_CNTCLR BIT(0)
+
/* GPIO port control registers for GMAC 2*/
#define GPIO_OD33_CTRL8 0x4c0
#define GPIO_BIAS_CTRL 0xed0
@@ -587,6 +603,10 @@
#define GEPHY_MAC_SEL BIT(1)
/* Top misc registers */
+#define TOP_MISC_NETSYS_PCS_MUX 0x0
+#define NETSYS_PCS_MUX_MASK GENMASK(1, 0)
+#define MUX_G2_USXGMII_SEL BIT(1)
+
#define USB_PHY_SWITCH_REG 0x218
#define QPHY_SEL_MASK GENMASK(1, 0)
#define SGMII_QPHY_SEL 0x2
@@ -951,6 +971,7 @@ enum mkt_eth_capabilities {
MTK_RGMII_BIT = 0,
MTK_TRGMII_BIT,
MTK_SGMII_BIT,
+ MTK_2P5GPHY_BIT,
MTK_ESW_BIT,
MTK_GEPHY_BIT,
MTK_MUX_BIT,
@@ -971,6 +992,7 @@ enum mkt_eth_capabilities {
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT,
MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT,
+ MTK_ETH_MUX_GMAC2_TO_2P5GPHY_BIT,
MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT,
MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT,
@@ -980,6 +1002,7 @@ enum mkt_eth_capabilities {
MTK_ETH_PATH_GMAC1_SGMII_BIT,
MTK_ETH_PATH_GMAC2_RGMII_BIT,
MTK_ETH_PATH_GMAC2_SGMII_BIT,
+ MTK_ETH_PATH_GMAC2_2P5GPHY_BIT,
MTK_ETH_PATH_GMAC2_GEPHY_BIT,
MTK_ETH_PATH_GDM1_ESW_BIT,
};
@@ -988,6 +1011,7 @@ enum mkt_eth_capabilities {
#define MTK_RGMII BIT_ULL(MTK_RGMII_BIT)
#define MTK_TRGMII BIT_ULL(MTK_TRGMII_BIT)
#define MTK_SGMII BIT_ULL(MTK_SGMII_BIT)
+#define MTK_2P5GPHY BIT_ULL(MTK_2P5GPHY_BIT)
#define MTK_ESW BIT_ULL(MTK_ESW_BIT)
#define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT)
#define MTK_MUX BIT_ULL(MTK_MUX_BIT)
@@ -1010,6 +1034,8 @@ enum mkt_eth_capabilities {
BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \
BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
+#define MTK_ETH_MUX_GMAC2_TO_2P5GPHY \
+ BIT_ULL(MTK_ETH_MUX_GMAC2_TO_2P5GPHY_BIT)
#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
BIT_ULL(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \
@@ -1021,6 +1047,7 @@ enum mkt_eth_capabilities {
#define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT)
#define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT)
#define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_2P5GPHY BIT_ULL(MTK_ETH_PATH_GMAC2_2P5GPHY_BIT)
#define MTK_ETH_PATH_GMAC2_GEPHY BIT_ULL(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
#define MTK_ETH_PATH_GDM1_ESW BIT_ULL(MTK_ETH_PATH_GDM1_ESW_BIT)
@@ -1030,6 +1057,7 @@ enum mkt_eth_capabilities {
#define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII)
#define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII)
#define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY)
+#define MTK_GMAC2_2P5GPHY (MTK_ETH_PATH_GMAC2_2P5GPHY | MTK_2P5GPHY)
#define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW)
/* MUXes present on SoCs */
@@ -1049,6 +1077,10 @@ enum mkt_eth_capabilities {
(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \
MTK_SHARED_SGMII)
+/* 2: GMAC2 -> 2P5GPHY */
+#define MTK_MUX_GMAC2_TO_2P5GPHY \
+ (MTK_ETH_MUX_GMAC2_TO_2P5GPHY | MTK_MUX | MTK_INFRA)
+
/* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */
#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX)
@@ -1084,8 +1116,9 @@ enum mkt_eth_capabilities {
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
MTK_RSTCTRL_PPE1 | MTK_SRAM)
-#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_QDMA | \
- MTK_RSTCTRL_PPE1 | MTK_RSTCTRL_PPE2 | MTK_SRAM)
+#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_GMAC2_2P5GPHY | \
+ MTK_MUX_GMAC2_TO_2P5GPHY | MTK_QDMA | MTK_RSTCTRL_PPE1 | \
+ MTK_RSTCTRL_PPE2 | MTK_SRAM)
struct mtk_tx_dma_desc_info {
dma_addr_t addr;
@@ -1145,7 +1178,7 @@ struct mtk_reg_map {
};
/* struct mtk_eth_data - This is the structure holding all differences
- * among various plaforms
+ * among various platforms
* @reg_map Soc register map.
* @ana_rgc3: The offset for register ANA_RGC3 related to
* sgmiisys syscon
@@ -1245,7 +1278,7 @@ struct mtk_soc_data {
* @mii_bus: If there is a bus we need to create an instance for it
* @pending_work: The workqueue used to reset the dma ring
* @state: Initialization and runtime state of the device
- * @soc: Holding specific data among vaious SoCs
+ * @soc: Holding specific data among various SoCs
*/
struct mtk_eth {
@@ -1437,6 +1470,23 @@ static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
return MTK_FOE_IB2_MULTICAST;
}
+static inline bool mtk_interface_mode_is_xgmii(struct mtk_eth *eth,
+ phy_interface_t interface)
+{
+ if (!mtk_is_netsys_v3_or_greater(eth))
+ return false;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_INTERNAL:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_5GBASER:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* read the hardware status register */
void mtk_stats_update_mac(struct mtk_mac *mac);
@@ -1445,6 +1495,7 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg);
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_2p5gphy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index b175119a6a7d..b83886a41121 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1463,6 +1463,8 @@ static __maybe_unused int mtk_star_suspend(struct device *dev)
if (netif_running(ndev))
mtk_star_disable(ndev);
+ netif_device_detach(ndev);
+
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
return 0;
@@ -1487,6 +1489,8 @@ static __maybe_unused int mtk_star_resume(struct device *dev)
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
}
+ netif_device_attach(ndev);
+
return ret;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index e212a4ba9275..351dd152f4f3 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -2000,7 +2000,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
if (mtk_wed_is_v3_or_greater(dev->hw))
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
- /* initail tx interrupt trigger */
+ /* initial tx interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
@@ -2011,7 +2011,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
dev->wlan.tx_tbit[1]));
- /* initail txfree interrupt trigger */
+ /* initial txfree interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 33ba0a5c38ac..edcc6f662618 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -236,7 +236,7 @@ static void dump_err_buf(struct mlx4_dev *dev)
static void poll_catas(struct timer_list *t)
{
- struct mlx4_priv *priv = from_timer(priv, t, catas_err.timer);
+ struct mlx4_priv *priv = timer_container_of(priv, t, catas_err.timer);
struct mlx4_dev *dev = &priv->dev;
u32 slave_read;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index cd754cd76bde..d73a2044dc26 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -249,7 +249,7 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
static u32 freq_to_shift(u16 freq)
{
u32 freq_khz = freq * 1000;
- u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
+ u64 max_val_cycles = freq_khz * 1000ULL * MLX4_EN_WRAP_AROUND_SEC;
u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
/* calculate max possible multiplier in order to fit in 64bit */
u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index cd17a3f4faf8..a68cd3f0304c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1897,6 +1897,7 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
info->so_timestamping |=
SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index d7444782bfdd..698a5d1f0d7e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -106,7 +106,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
- buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *),
+ buddy->bits = kcalloc(buddy->max_order + 1, sizeof(*buddy->bits),
GFP_KERNEL);
buddy->num_free = kcalloc(buddy->max_order + 1, sizeof(*buddy->num_free),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 568bbe5f83f5..d292e6a9e22c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -154,7 +154,8 @@ mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/cmd.o \
steering/hws/vport.o \
steering/hws/bwc_complex.o \
steering/hws/fs_hws_pools.o \
- steering/hws/fs_hws.o
+ steering/hws/fs_hws.o \
+ steering/hws/action_ste_pool.o
#
# SF device
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e53dbdc0a7a1..b1aeea7c4a91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -927,8 +927,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
static void cb_timeout_handler(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work,
- work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct mlx5_cmd_work_ent *ent = container_of(dwork,
struct mlx5_cmd_work_ent,
cb_timeout_work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 32ed4963b8ad..5b0d03b3efe8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -520,6 +520,12 @@ struct mlx5e_xdpsq {
struct mlx5e_channel *channel;
} ____cacheline_aligned_in_smp;
+struct mlx5e_xdp_buff {
+ struct xdp_buff xdp;
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_rq *rq;
+};
+
struct mlx5e_ktls_resync_resp;
struct mlx5e_icosq {
@@ -716,6 +722,7 @@ struct mlx5e_rq {
struct mlx5e_xdpsq *xdpsq;
DECLARE_BITMAP(flags, 8);
struct page_pool *page_pool;
+ struct mlx5e_xdp_buff mxbuf;
/* AF_XDP zero-copy */
struct xsk_buff_pool *xsk_pool;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 81523825faa2..cb972b2d46e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -114,6 +114,7 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv)
int err = 0;
rtnl_lock();
+ netdev_lock(priv->netdev);
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
@@ -123,6 +124,7 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv)
out:
mutex_unlock(&priv->state_lock);
+ netdev_unlock(priv->netdev);
rtnl_unlock();
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 131ed97ca997..5d0014129a7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -8,6 +8,7 @@
#include "en/fs_tt_redirect.h"
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <net/netdev_lock.h>
struct mlx5e_ptp_fs {
struct mlx5_flow_handle *l2_rule;
@@ -449,8 +450,22 @@ static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work)
{
struct mlx5e_ptpsq *ptpsq =
container_of(work, struct mlx5e_ptpsq, report_unhealthy_work);
+ struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+
+ /* Recovering the PTP SQ means re-enabling NAPI, which requires the
+ * netdev instance lock. However, SQ closing has to wait for this work
+ * task to finish while also holding the same lock. So either get the
+ * lock or find that the SQ is no longer enabled and thus this work is
+ * not relevant anymore.
+ */
+ while (!netdev_trylock(sq->netdev)) {
+ if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))
+ return;
+ msleep(20);
+ }
mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq);
+ netdev_unlock(sq->netdev);
}
static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
@@ -892,7 +907,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (err)
goto err_free;
- netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll);
+ netif_napi_add_locked(netdev, &c->napi, mlx5e_ptp_napi_poll);
mlx5e_ptp_build_params(c, cparams, params);
@@ -910,7 +925,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
return 0;
err_napi_del:
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
err_free:
kvfree(cparams);
kvfree(c);
@@ -920,7 +935,7 @@ err_free:
void mlx5e_ptp_close(struct mlx5e_ptp *c)
{
mlx5e_ptp_close_queues(c);
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
kvfree(c);
}
@@ -929,7 +944,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
{
int tc;
- napi_enable(&c->napi);
+ napi_enable_locked(&c->napi);
if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
for (tc = 0; tc < c->num_tc; tc++)
@@ -957,7 +972,7 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
}
- napi_disable(&c->napi);
+ napi_disable_locked(&c->napi);
}
int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index dbd9482359e1..c3bda4612fa9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -107,9 +107,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
mlx5e_reset_txqsq_cc_pc(sq);
sq->stats->recover++;
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
- rtnl_lock();
mlx5e_activate_txqsq(sq);
- rtnl_unlock();
if (sq->channel)
mlx5e_trigger_napi_icosq(sq->channel);
@@ -176,7 +174,6 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
priv = ptpsq->txqsq.priv;
- rtnl_lock();
mutex_lock(&priv->state_lock);
chs = &priv->channels;
netdev = priv->netdev;
@@ -196,7 +193,6 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
netif_carrier_on(netdev);
mutex_unlock(&priv->state_lock);
- rtnl_unlock();
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 140606fcd23b..b5c19396e096 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -149,7 +149,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
t->stats = &priv->trap_stats.ch;
- netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll);
+ netif_napi_add_locked(netdev, &t->napi, mlx5e_trap_napi_poll);
err = mlx5e_open_trap_rq(priv, t);
if (unlikely(err))
@@ -164,7 +164,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
err_close_trap_rq:
mlx5e_close_trap_rq(&t->rq);
err_napi_del:
- netif_napi_del(&t->napi);
+ netif_napi_del_locked(&t->napi);
kvfree(t);
return ERR_PTR(err);
}
@@ -173,13 +173,13 @@ void mlx5e_close_trap(struct mlx5e_trap *trap)
{
mlx5e_tir_destroy(&trap->tir);
mlx5e_close_trap_rq(&trap->rq);
- netif_napi_del(&trap->napi);
+ netif_napi_del_locked(&trap->napi);
kvfree(trap);
}
static void mlx5e_activate_trap(struct mlx5e_trap *trap)
{
- napi_enable(&trap->napi);
+ napi_enable_locked(&trap->napi);
mlx5e_activate_rq(&trap->rq);
mlx5e_trigger_napi_sched(&trap->napi);
}
@@ -189,7 +189,7 @@ void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
struct mlx5e_trap *trap = priv->en_trap;
mlx5e_deactivate_rq(&trap->rq);
- napi_disable(&trap->napi);
+ napi_disable_locked(&trap->napi);
}
static struct mlx5e_trap *mlx5e_add_trap_queue(struct mlx5e_priv *priv)
@@ -285,6 +285,7 @@ int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
+ netdev_lock(priv->netdev);
switch (trap_ctx->action) {
case DEVLINK_TRAP_ACTION_TRAP:
err = mlx5e_handle_action_trap(priv, trap_ctx->id);
@@ -297,6 +298,7 @@ int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_
trap_ctx->action);
err = -EINVAL;
}
+ netdev_unlock(priv->netdev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index f803e1c93590..5ce1b463b7a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -707,8 +707,8 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
page = xdpi.page.page;
- /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
- * as we know this is a page_pool page.
+ /* No need to check page_pool_page_is_pp() as we
+ * know this is a page_pool page.
*/
page_pool_recycle_direct(page->pp, page);
} while (++n < num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 446e492c6bb8..46ab0a9e8cdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -45,12 +45,6 @@
(MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
sizeof(struct mlx5_wqe_inline_seg))
-struct mlx5e_xdp_buff {
- struct xdp_buff xdp;
- struct mlx5_cqe64 *cqe;
- struct mlx5e_rq *rq;
-};
-
/* XDP packets can be transmitted in different ways. On completion, we need to
* distinguish between them to clean up things in a proper way.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 2dd842aac6fc..77f61cd28a79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -259,8 +259,7 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
- struct xfrm_state *x = sa_entry->x;
- struct net_device *netdev;
+ struct net_device *netdev = sa_entry->dev;
struct neighbour *n;
u8 addr[ETH_ALEN];
const void *pkey;
@@ -270,8 +269,6 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->type != XFRM_DEV_OFFLOAD_PACKET)
return;
- netdev = x->xso.real_dev;
-
mlx5_query_mac_address(mdev, addr);
switch (attrs->dir) {
case XFRM_DEV_OFFLOAD_IN:
@@ -692,17 +689,17 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
return 0;
}
-static int mlx5e_xfrm_add_state(struct xfrm_state *x,
+static int mlx5e_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
- struct net_device *netdev = x->xso.real_dev;
struct mlx5e_ipsec *ipsec;
struct mlx5e_priv *priv;
gfp_t gfp;
int err;
- priv = netdev_priv(netdev);
+ priv = netdev_priv(dev);
if (!priv->ipsec)
return -EOPNOTSUPP;
@@ -713,6 +710,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
return -ENOMEM;
sa_entry->x = x;
+ sa_entry->dev = dev;
sa_entry->ipsec = ipsec;
/* Check if this SA is originated from acquire flow temporary SA */
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
@@ -809,7 +807,7 @@ err_xfrm:
return err;
}
-static void mlx5e_xfrm_del_state(struct xfrm_state *x)
+static void mlx5e_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
@@ -822,7 +820,7 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
WARN_ON(old != sa_entry);
}
-static void mlx5e_xfrm_free_state(struct xfrm_state *x)
+static void mlx5e_xfrm_free_state(struct net_device *dev, struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
@@ -855,8 +853,6 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5e_ipsec *ipsec;
struct neighbour *n = ptr;
- struct net_device *netdev;
- struct xfrm_state *x;
unsigned long idx;
if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID))
@@ -876,11 +872,9 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
continue;
}
- x = sa_entry->x;
- netdev = x->xso.real_dev;
data = sa_entry->work->data;
- neigh_ha_snapshot(data->addr, n, netdev);
+ neigh_ha_snapshot(data->addr, n, sa_entry->dev);
queue_work(ipsec->wq, &sa_entry->work->work);
}
@@ -996,8 +990,8 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
size_t headers;
lockdep_assert(lockdep_is_held(&x->lock) ||
- lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
- lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
+ lockdep_is_held(&net->xfrm.xfrm_cfg_mutex) ||
+ lockdep_is_held(&net->xfrm.xfrm_state_lock));
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
return;
@@ -1170,7 +1164,7 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
struct netlink_ext_ack *extack)
{
- struct net_device *netdev = x->xdo.real_dev;
+ struct net_device *netdev = x->xdo.dev;
struct mlx5e_ipsec_pol_entry *pol_entry;
struct mlx5e_priv *priv;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index a63c2289f8af..ffcd0cdeb775 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -274,6 +274,7 @@ struct mlx5e_ipsec_limits {
struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_esn_state esn_state;
struct xfrm_state *x;
+ struct net_device *dev;
struct mlx5e_ipsec *ipsec;
struct mlx5_accel_esp_xfrm_attrs attrs;
void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 8705cffc747f..5fe016e477b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1147,6 +1147,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
bool reset = true;
int err;
+ netdev_lock(priv->netdev);
mutex_lock(&priv->state_lock);
new_params = priv->channels.params;
@@ -1162,6 +1163,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
&trust_state, reset);
mutex_unlock(&priv->state_lock);
+ netdev_unlock(priv->netdev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index fdf9e9bb99ac..3cb8d3bf9044 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -43,7 +43,6 @@
#include "en/fs_ethtool.h"
#define LANES_UNKNOWN 0
-#define MAX_LANES 8
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
@@ -1098,10 +1097,8 @@ static void get_link_properties(struct net_device *netdev,
speed = info->speed;
lanes = info->lanes;
duplex = DUPLEX_FULL;
- } else if (data_rate_oper) {
+ } else if (data_rate_oper)
speed = 100 * data_rate_oper;
- lanes = MAX_LANES;
- }
out:
link_ksettings->base.duplex = duplex;
@@ -1689,6 +1686,7 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
return 0;
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -2059,14 +2057,9 @@ int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
if (err)
return err;
- dev_hold(dev);
- rtnl_unlock();
-
err = mlx5_firmware_flash(mdev, fw, NULL);
release_firmware(fw);
- rtnl_lock();
- dev_put(dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 05058710d2c7..04a969128161 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -484,7 +484,9 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs,
}
/* Need to fix some features.. */
+ netdev_lock(netdev);
netdev_update_features(netdev);
+ netdev_unlock(netdev);
return err;
}
@@ -521,7 +523,9 @@ int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
clear_bit(vid, fs->vlan->active_svlans);
mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ netdev_lock(netdev);
netdev_update_features(netdev);
+ netdev_unlock(netdev);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 3506024c2453..ea822c69d137 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,6 +39,7 @@
#include <linux/debugfs.h>
#include <linux/if_bridge.h>
#include <linux/filter.h>
+#include <net/netdev_lock.h>
#include <net/netdev_queues.h>
#include <net/page_pool/types.h>
#include <net/pkt_sched.h>
@@ -1903,7 +1904,20 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
recover_work);
+ /* Recovering queues means re-enabling NAPI, which requires the netdev
+ * instance lock. However, SQ closing flows have to wait for work tasks
+ * to finish while also holding the netdev instance lock. So either get
+ * the lock or find that the SQ is no longer enabled and thus this work
+ * is not relevant anymore.
+ */
+ while (!netdev_trylock(sq->netdev)) {
+ if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))
+ return;
+ msleep(20);
+ }
+
mlx5e_reporter_tx_err_cqe(sq);
+ netdev_unlock(sq->netdev);
}
static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
@@ -2705,8 +2719,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(mdev, ix);
- netif_napi_add_config(netdev, &c->napi, mlx5e_napi_poll, ix);
- netif_napi_set_irq(&c->napi, irq);
+ netif_napi_add_config_locked(netdev, &c->napi, mlx5e_napi_poll, ix);
+ netif_napi_set_irq_locked(&c->napi, irq);
err = mlx5e_open_queues(c, params, cparam);
if (unlikely(err))
@@ -2728,7 +2742,7 @@ err_close_queues:
mlx5e_close_queues(c);
err_napi_del:
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
err_free:
kvfree(cparam);
@@ -2741,7 +2755,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
{
int tc;
- napi_enable(&c->napi);
+ napi_enable_locked(&c->napi);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
@@ -2773,7 +2787,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
mlx5e_deactivate_txqsq(&c->sq[tc]);
mlx5e_qos_deactivate_queues(c);
- napi_disable(&c->napi);
+ napi_disable_locked(&c->napi);
}
static void mlx5e_close_channel(struct mlx5e_channel *c)
@@ -2782,7 +2796,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
mlx5e_close_xsk(c);
mlx5e_close_queues(c);
mlx5e_qos_close_queues(c);
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
kvfree(c);
}
@@ -4276,7 +4290,7 @@ void mlx5e_set_xdp_feature(struct net_device *netdev)
if (!netdev->netdev_ops->ndo_bpf ||
params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
- xdp_clear_features_flag(netdev);
+ xdp_set_features_flag_locked(netdev, 0);
return;
}
@@ -4285,7 +4299,7 @@ void mlx5e_set_xdp_feature(struct net_device *netdev)
NETDEV_XDP_ACT_RX_SG |
NETDEV_XDP_ACT_NDO_XMIT |
NETDEV_XDP_ACT_NDO_XMIT_SG;
- xdp_set_features_flag(netdev, val);
+ xdp_set_features_flag_locked(netdev, val);
}
int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
@@ -4349,6 +4363,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
+ features &= ~NETIF_F_HW_MACSEC;
+ if (netdev->features & NETIF_F_HW_MACSEC)
+ netdev_warn(netdev, "Disabling HW MACsec offload, not supported in switchdev mode\n");
+
return features;
}
@@ -4964,21 +4982,19 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
struct net_device *netdev = priv->netdev;
int i;
- /* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues
- * through this flow. However, channel closing flows have to wait for
- * this work to finish while holding rtnl lock too. So either get the
- * lock or find that channels are being closed for other reason and
- * this work is not relevant anymore.
+ /* Recovering the TX queues implies re-enabling NAPI, which requires
+ * the netdev instance lock.
+ * However, channel closing flows have to wait for this work to finish
+ * while holding the same lock. So either get the lock or find that
+ * channels are being closed for other reason and this work is not
+ * relevant anymore.
*/
- while (!rtnl_trylock()) {
+ while (!netdev_trylock(netdev)) {
if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state))
return;
msleep(20);
}
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
-
for (i = 0; i < netdev->real_num_tx_queues; i++) {
struct netdev_queue *dev_queue =
netdev_get_tx_queue(netdev, i);
@@ -4992,8 +5008,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
break;
}
-unlock:
- rtnl_unlock();
+ netdev_unlock(netdev);
}
static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
@@ -5317,7 +5332,6 @@ static void mlx5e_get_queue_stats_rx(struct net_device *dev, int i,
struct mlx5e_rq_stats *xskrq_stats;
struct mlx5e_rq_stats *rq_stats;
- ASSERT_RTNL();
if (mlx5e_is_uplink_rep(priv) || !priv->stats_nch)
return;
@@ -5337,7 +5351,6 @@ static void mlx5e_get_queue_stats_tx(struct net_device *dev, int i,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_sq_stats *sq_stats;
- ASSERT_RTNL();
if (!priv->stats_nch)
return;
@@ -5358,7 +5371,6 @@ static void mlx5e_get_base_stats(struct net_device *dev,
struct mlx5e_ptp *ptp_channel;
int i, tc;
- ASSERT_RTNL();
if (!mlx5e_is_uplink_rep(priv)) {
rx->packets = 0;
rx->bytes = 0;
@@ -5454,6 +5466,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->netdev_ops = &mlx5e_netdev_ops;
netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
netdev->xsk_tx_metadata_ops = &mlx5e_xsk_tx_metadata_ops;
+ netdev->request_ops_lock = true;
+ netdev_lockdep_set_classes(netdev);
mlx5e_dcbnl_build_netdev(netdev);
@@ -5835,9 +5849,11 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5e_nic_set_rx_mode(priv);
rtnl_lock();
+ netdev_lock(netdev);
if (netif_running(netdev))
mlx5e_open(netdev);
udp_tunnel_nic_reset_ntf(priv->netdev);
+ netdev_unlock(netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
@@ -5850,9 +5866,16 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_dcbnl_delete_app(priv);
rtnl_lock();
+ netdev_lock(priv->netdev);
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
netif_device_detach(priv->netdev);
+ if (priv->en_trap) {
+ mlx5e_deactivate_trap(priv);
+ mlx5e_close_trap(priv->en_trap);
+ priv->en_trap = NULL;
+ }
+ netdev_unlock(priv->netdev);
rtnl_unlock();
mlx5e_nic_set_rx_mode(priv);
@@ -5862,11 +5885,6 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_monitor_counter_cleanup(priv);
mlx5e_disable_blocking_events(priv);
- if (priv->en_trap) {
- mlx5e_deactivate_trap(priv);
- mlx5e_close_trap(priv->en_trap);
- priv->en_trap = NULL;
- }
mlx5e_disable_async_events(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
@@ -6121,7 +6139,9 @@ static void mlx5e_update_features(struct net_device *netdev)
return; /* features will be updated on netdev registration */
rtnl_lock();
+ netdev_lock(netdev);
netdev_update_features(netdev);
+ netdev_unlock(netdev);
rtnl_unlock();
}
@@ -6132,7 +6152,7 @@ static void mlx5e_reset_channels(struct net_device *netdev)
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
- const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
+ const bool need_lock = priv->netdev->reg_state == NETREG_REGISTERED;
const struct mlx5e_profile *profile = priv->profile;
int max_nch;
int err;
@@ -6174,15 +6194,19 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
* 2. Set our default XPS cpumask.
* 3. Build the RQT.
*
- * rtnl_lock is required by netif_set_real_num_*_queues in case the
+ * Locking is required by netif_set_real_num_*_queues in case the
* netdev has been registered by this point (if this function was called
* in the reload or resume flow).
*/
- if (take_rtnl)
+ if (need_lock) {
rtnl_lock();
+ netdev_lock(priv->netdev);
+ }
err = mlx5e_num_channels_changed(priv);
- if (take_rtnl)
+ if (need_lock) {
+ netdev_unlock(priv->netdev);
rtnl_unlock();
+ }
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2abab241f03b..63a7a788fb0d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -33,6 +33,7 @@
#include <linux/dim.h>
#include <linux/debugfs.h>
#include <linux/mlx5/fs.h>
+#include <net/netdev_lock.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/act_api.h>
@@ -803,6 +804,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_setup_tc = mlx5e_rep_setup_tc,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
@@ -885,6 +887,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
{
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops_rep;
+ netdev->request_ops_lock = true;
+ netdev_lockdep_set_classes(netdev);
eth_hw_addr_random(netdev);
netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
@@ -1344,9 +1348,11 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
netdev->wanted_features |= NETIF_F_HW_TC;
rtnl_lock();
+ netdev_lock(netdev);
if (netif_running(netdev))
mlx5e_open(netdev);
udp_tunnel_nic_reset_ntf(priv->netdev);
+ netdev_unlock(netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
@@ -1356,9 +1362,11 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
rtnl_lock();
+ netdev_lock(priv->netdev);
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
netif_device_detach(priv->netdev);
+ netdev_unlock(priv->netdev);
rtnl_unlock();
mlx5e_rep_bridge_cleanup(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 5fd70b4d55be..84b1ab8233b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1684,17 +1684,17 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct mlx5e_xdp_buff mxbuf;
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- cqe_bcnt, &mxbuf);
- if (mlx5e_xdp_handle(rq, prog, &mxbuf))
+ cqe_bcnt, mxbuf);
+ if (mlx5e_xdp_handle(rq, prog, mxbuf))
return NULL; /* page/packet was consumed by XDP */
- rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
- metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
- cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
+ rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
+ metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
+ cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -1713,11 +1713,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
struct mlx5e_wqe_frag_info *head_wi = wi;
u16 rx_headroom = rq->buff.headroom;
struct mlx5e_frag_page *frag_page;
struct skb_shared_info *sinfo;
- struct mlx5e_xdp_buff mxbuf;
u32 frag_consumed_bytes;
struct bpf_prog *prog;
struct sk_buff *skb;
@@ -1737,8 +1737,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
net_prefetch(va + rx_headroom);
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- frag_consumed_bytes, &mxbuf);
- sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
+ frag_consumed_bytes, mxbuf);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
truesize = 0;
cqe_bcnt -= frag_consumed_bytes;
@@ -1750,8 +1750,9 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page,
- wi->offset, frag_consumed_bytes);
+ mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
+ frag_page, wi->offset,
+ frag_consumed_bytes);
truesize += frag_info->frag_stride;
cqe_bcnt -= frag_consumed_bytes;
@@ -1760,7 +1761,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
}
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ if (prog && mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_wqe_frag_info *pwi;
@@ -1770,21 +1771,23 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
- mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
- mxbuf.xdp.data_end - mxbuf.xdp.data,
- mxbuf.xdp.data - mxbuf.xdp.data_meta);
+ skb = mlx5e_build_linear_skb(
+ rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz,
+ mxbuf->xdp.data - mxbuf->xdp.data_hard_start,
+ mxbuf->xdp.data_end - mxbuf->xdp.data,
+ mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb))
return NULL;
skb_mark_for_recycle(skb);
head_wi->frag_page->frags++;
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
/* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, wi - head_wi - 1,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_buff_is_frag_pfmemalloc(
+ &mxbuf->xdp));
for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
pwi->frag_page->frags++;
@@ -1984,10 +1987,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
struct mlx5e_frag_page *head_page = frag_page;
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
u32 frag_offset = head_offset;
u32 byte_cnt = cqe_bcnt;
struct skb_shared_info *sinfo;
- struct mlx5e_xdp_buff mxbuf;
unsigned int truesize = 0;
struct bpf_prog *prog;
struct sk_buff *skb;
@@ -2033,9 +2036,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
}
- mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf);
+ mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz,
+ linear_data_len, mxbuf);
- sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
while (byte_cnt) {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
@@ -2046,7 +2050,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
else
truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
- mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset,
+ mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
+ frag_page, frag_offset,
pg_consumed_bytes);
byte_cnt -= pg_consumed_bytes;
frag_offset = 0;
@@ -2054,7 +2059,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
if (prog) {
- if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_frag_page *pfp;
@@ -2067,10 +2072,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start,
- linear_frame_sz,
- mxbuf.xdp.data - mxbuf.xdp.data_hard_start, 0,
- mxbuf.xdp.data - mxbuf.xdp.data_meta);
+ skb = mlx5e_build_linear_skb(
+ rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
+ mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0,
+ mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb)) {
mlx5e_page_release_fragmented(rq, &wi->linear_page);
return NULL;
@@ -2080,13 +2085,14 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
wi->linear_page.frags++;
mlx5e_page_release_fragmented(rq, &wi->linear_page);
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
/* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, frag_page - head_page,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_buff_is_frag_pfmemalloc(
+ &mxbuf->xdp));
pagep = head_page;
do
@@ -2097,12 +2103,13 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
} else {
dma_addr_t addr;
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
xdp_update_skb_shared_info(skb, sinfo->nr_frags,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_buff_is_frag_pfmemalloc(
+ &mxbuf->xdp));
pagep = frag_page - sinfo->nr_frags;
do
@@ -2152,20 +2159,20 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct mlx5e_xdp_buff mxbuf;
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- cqe_bcnt, &mxbuf);
- if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ cqe_bcnt, mxbuf);
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
frag_page->frags++;
return NULL; /* page/packet was consumed by XDP */
}
- rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
- metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
- cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
+ rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
+ metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
+ cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 1c121b435016..19664fa7f217 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -2424,8 +2424,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
}
if (priv->rx_ptp_opened) {
for (i = 0; i < NUM_PTP_RQ_STATS; i++)
- ethtool_sprintf(data, ptp_rq_stats_desc[i].format,
- MLX5E_PTP_CHANNEL_IX);
+ ethtool_puts(data, ptp_rq_stats_desc[i].format);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 8de6fcbd3a03..def5dea1463d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -54,7 +54,7 @@
#define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld)
-#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq0_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index f1d908f61134..fef418e1ed1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2028,9 +2028,8 @@ err_out:
return err;
}
-static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
+static bool mlx5_flow_has_geneve_opt(struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
void *headers_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
misc_parameters_3);
@@ -2069,7 +2068,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
}
complete_all(&flow->del_hw_done);
- if (mlx5_flow_has_geneve_opt(flow))
+ if (mlx5_flow_has_geneve_opt(&attr->parse_attr->spec))
mlx5_geneve_tlv_option_del(priv->mdev->geneve);
if (flow->decap_route)
@@ -2574,12 +2573,13 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
if (err) {
- kvfree(tmp_spec);
NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
- return err;
+ } else {
+ err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
}
- err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
+ if (mlx5_flow_has_geneve_opt(tmp_spec))
+ mlx5_geneve_tlv_option_del(priv->mdev->geneve);
kvfree(tmp_spec);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 4fd853d19e31..55a8629f0792 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -337,10 +337,11 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
};
}
-static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb)
+static void mlx5e_tx_skb_update_ts_flags(struct sk_buff *skb)
{
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_tx_timestamp(skb);
}
static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
@@ -392,7 +393,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt);
- mlx5e_tx_skb_update_hwts_flags(skb);
+ mlx5e_tx_skb_update_ts_flags(skb);
sq->pc += wi->num_wqebbs;
@@ -625,7 +626,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
mlx5e_tx_mpwqe_add_dseg(sq, &txd);
- mlx5e_tx_skb_update_hwts_flags(skb);
+ mlx5e_tx_skb_update_ts_flags(skb);
if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
/* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 7fb8a3381f84..4917d185d0c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1295,12 +1295,15 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_ECPF, enabled_events);
if (ret)
goto ecpf_err;
- if (mlx5_core_ec_sriov_enabled(esw->dev)) {
- ret = mlx5_eswitch_load_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs,
- enabled_events);
- if (ret)
- goto ec_vf_err;
- }
+ }
+
+ /* Enable ECVF vports */
+ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ ret = mlx5_eswitch_load_ec_vf_vports(esw,
+ esw->esw_funcs.num_ec_vfs,
+ enabled_events);
+ if (ret)
+ goto ec_vf_err;
}
/* Enable VF vports */
@@ -1331,9 +1334,11 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
+ if (mlx5_core_ec_sriov_enabled(esw->dev))
+ mlx5_eswitch_unload_ec_vf_vports(esw,
+ esw->esw_funcs.num_ec_vfs);
+
if (mlx5_ecpf_vport_exists(esw->dev)) {
- if (mlx5_core_ec_sriov_enabled(esw->dev))
- mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs);
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index a47c29571f64..1af76da8b132 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -527,7 +527,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_rule *dst;
void *in_flow_context, *vlan;
void *in_match_value;
- int reformat_id = 0;
+ u32 reformat_id = 0;
unsigned int inlen;
int dst_cnt_size;
u32 *in, action;
@@ -580,23 +580,21 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, action, action);
if (!extended_dest && fte->act_dests.action.pkt_reformat) {
- struct mlx5_pkt_reformat *pkt_reformat = fte->act_dests.action.pkt_reformat;
-
- if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
- reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat);
- if (reformat_id < 0) {
- mlx5_core_err(dev,
- "Unsupported SW-owned pkt_reformat type (%d) in FW-owned table\n",
- pkt_reformat->reformat_type);
- err = reformat_id;
- goto err_out;
- }
- } else {
- reformat_id = fte->act_dests.action.pkt_reformat->id;
+ struct mlx5_pkt_reformat *pkt_reformat =
+ fte->act_dests.action.pkt_reformat;
+
+ err = mlx5_fs_get_packet_reformat_id(pkt_reformat,
+ &reformat_id);
+ if (err) {
+ mlx5_core_err(dev,
+ "Unsupported pkt_reformat type (%d)\n",
+ pkt_reformat->reformat_type);
+ goto err_out;
}
}
- MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id);
+ MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
+ reformat_id);
if (fte->act_dests.action.modify_hdr) {
if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 6163bc98d94a..a8046200d376 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1830,14 +1830,35 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
return err;
}
+int mlx5_fs_get_packet_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *id)
+{
+ switch (pkt_reformat->owner) {
+ case MLX5_FLOW_RESOURCE_OWNER_FW:
+ *id = pkt_reformat->id;
+ return 0;
+ case MLX5_FLOW_RESOURCE_OWNER_SW:
+ return mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat, id);
+ case MLX5_FLOW_RESOURCE_OWNER_HWS:
+ return mlx5_fs_hws_action_get_pkt_reformat_id(pkt_reformat, id);
+ default:
+ return -EINVAL;
+ }
+}
+
static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1,
struct mlx5_pkt_reformat *p2)
{
- return p1->owner == p2->owner &&
- (p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ?
- p1->id == p2->id :
- mlx5_fs_dr_action_get_pkt_reformat_id(p1) ==
- mlx5_fs_dr_action_get_pkt_reformat_id(p2));
+ int err1, err2;
+ u32 id1, id2;
+
+ if (p1->owner != p2->owner)
+ return false;
+
+ err1 = mlx5_fs_get_packet_reformat_id(p1, &id1);
+ err2 = mlx5_fs_get_packet_reformat_id(p2, &id2);
+
+ return !err1 && !err2 && id1 == id2;
}
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
@@ -2207,6 +2228,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct mlx5_flow_handle *rule;
struct match_list *iter;
bool take_write = false;
+ bool try_again = false;
struct fs_fte *fte;
u64 version = 0;
int err;
@@ -2271,6 +2293,7 @@ skip_search:
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
if (!g->node.active) {
+ try_again = true;
up_write_ref_node(&g->node, false);
continue;
}
@@ -2292,7 +2315,8 @@ skip_search:
tree_put_node(&fte->node, false);
return rule;
}
- rule = ERR_PTR(-ENOENT);
+ err = try_again ? -EAGAIN : -ENOENT;
+ rule = ERR_PTR(err);
out:
kmem_cache_free(steering->ftes_cache, fte);
return rule;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 0767239f651c..500826229b0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -58,6 +58,7 @@ struct mlx5_flow_definer {
enum mlx5_flow_resource_owner {
MLX5_FLOW_RESOURCE_OWNER_FW,
MLX5_FLOW_RESOURCE_OWNER_SW,
+ MLX5_FLOW_RESOURCE_OWNER_HWS,
};
struct mlx5_modify_hdr {
@@ -386,6 +387,9 @@ u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace
struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
+int mlx5_fs_get_packet_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *id);
+
#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
#define fs_list_for_each_entry(pos, root) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 5442a02c4097..69933addd921 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -278,7 +278,8 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
static void poll_sync_reset(struct timer_list *t)
{
- struct mlx5_fw_reset *fw_reset = from_timer(fw_reset, t, timer);
+ struct mlx5_fw_reset *fw_reset = timer_container_of(fw_reset, t,
+ timer);
struct mlx5_core_dev *dev = fw_reset->dev;
u32 fatal_error;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 624452ddebc0..cf7a1edd0530 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -779,7 +779,8 @@ static void mlx5_health_log_ts_update(struct work_struct *work)
static void poll_health(struct timer_list *t)
{
- struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer);
+ struct mlx5_core_dev *dev = timer_container_of(dev, t,
+ priv.health.timer);
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
u32 fatal_error;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 0979d672d47f..79ae3a51a4b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -32,6 +32,7 @@
#include <rdma/ib_verbs.h>
#include <linux/mlx5/fs.h>
+#include <net/netdev_lock.h>
#include "en.h"
#include "en/params.h"
#include "ipoib.h"
@@ -102,6 +103,8 @@ int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev)
netdev->netdev_ops = &mlx5i_netdev_ops;
netdev->ethtool_ops = &mlx5i_ethtool_ops;
+ netdev->request_ops_lock = true;
+ netdev_lockdep_set_classes(netdev);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 65a94e46edcf..cec18efadc73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -643,13 +643,6 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
int pin = -1;
int err = 0;
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -820,12 +813,6 @@ static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clo
return 0;
}
-static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags)
-{
- return ((!mlx5_npps_real_time_supported(mdev) && flags) ||
- (mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE));
-}
-
static int mlx5_perout_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
@@ -861,12 +848,6 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
goto unlock;
}
- /* Reject requests with unsupported flags */
- if (mlx5_perout_verify_flags(mdev, rq->perout.flags)) {
- err = -EOPNOTSUPP;
- goto unlock;
- }
-
if (on) {
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
@@ -1034,6 +1015,13 @@ static void mlx5_init_pin_config(struct mlx5_core_dev *mdev)
clock->ptp_info.verify = mlx5_ptp_verify;
clock->ptp_info.pps = 1;
+ clock->ptp_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+
+ if (mlx5_npps_real_time_supported(mdev))
+ clock->ptp_info.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
+
for (i = 0; i < clock->ptp_info.n_pins; i++) {
snprintf(clock->ptp_info.pin_config[i].name,
sizeof(clock->ptp_info.pin_config[i].name),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 972e8e9df585..9bc9bd83c232 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -291,7 +291,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
{
struct device *device = mlx5_core_dma_dev(dev);
- int nid = dev_to_node(device);
+ int nid = dev->priv.numa_node;
struct page *page;
u64 zero_addr = 1;
u64 addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 2c5f850c31f6..40024cfa3099 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -148,7 +148,7 @@ out:
* Free the IRQ and other resources such as rmap from the system.
* BUT doesn't free or remove reference from mlx5.
* This function is very important for the shutdown flow, where we need to
- * cleanup system resoruces but keep mlx5 objects alive,
+ * cleanup system resources but keep mlx5 objects alive,
* see mlx5_irq_table_free_irqs().
*/
static void mlx5_system_free_irq(struct mlx5_irq *irq)
@@ -588,7 +588,7 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
struct mlx5_irq *irq;
unsigned long index;
- /* There are cases in which we are destrying the irq_table before
+ /* There are cases in which we are destroying the irq_table before
* freeing all the IRQs, fast teardown for example. Hence, free the irqs
* which might not have been freed.
*/
@@ -617,7 +617,7 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec,
if (!mlx5_sf_max_functions(dev))
return 0;
if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
- mlx5_core_dbg(dev, "Not enught IRQs for SFs. SF may run at lower performance\n");
+ mlx5_core_dbg(dev, "Not enough IRQs for SFs. SF may run at lower performance\n");
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
index b5332c54d4fb..447ea3f8722c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
@@ -72,6 +72,11 @@ enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action)
return action->type;
}
+struct mlx5_core_dev *mlx5hws_action_get_dev(struct mlx5hws_action *action)
+{
+ return action->ctx->mdev;
+}
+
static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
enum mlx5hws_context_shared_stc_type stc_type,
u8 tbl_type)
@@ -238,6 +243,7 @@ hws_action_fixup_stc_attr(struct mlx5hws_context *ctx,
enum mlx5hws_table_type table_type,
bool is_mirror)
{
+ struct mlx5hws_pool *pool;
bool use_fixup = false;
u32 fw_tbl_type;
u32 base_id;
@@ -253,13 +259,11 @@ hws_action_fixup_stc_attr(struct mlx5hws_context *ctx,
use_fixup = true;
break;
}
+ pool = stc_attr->ste_table.ste_pool;
if (!is_mirror)
- base_id = mlx5hws_pool_chunk_get_base_id(stc_attr->ste_table.ste_pool,
- &stc_attr->ste_table.ste);
+ base_id = mlx5hws_pool_get_base_id(pool);
else
- base_id =
- mlx5hws_pool_chunk_get_base_mirror_id(stc_attr->ste_table.ste_pool,
- &stc_attr->ste_table.ste);
+ base_id = mlx5hws_pool_get_base_mirror_id(pool);
*fixup_stc_attr = *stc_attr;
fixup_stc_attr->ste_table.ste_obj_id = base_id;
@@ -337,7 +341,7 @@ __must_hold(&ctx->ctrl_lock)
if (!mlx5hws_context_cap_dynamic_reparse(ctx))
stc_attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
- obj_0_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+ obj_0_id = mlx5hws_pool_get_base_id(stc_pool);
/* According to table/action limitation change the stc_attr */
use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, table_type, false);
@@ -353,7 +357,7 @@ __must_hold(&ctx->ctrl_lock)
if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
u32 obj_1_id;
- obj_1_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+ obj_1_id = mlx5hws_pool_get_base_mirror_id(stc_pool);
use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr,
&fixup_stc_attr,
@@ -393,11 +397,11 @@ __must_hold(&ctx->ctrl_lock)
stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
stc_attr.stc_offset = stc->offset;
- obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+ obj_id = mlx5hws_pool_get_base_id(stc_pool);
mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+ obj_id = mlx5hws_pool_get_base_mirror_id(stc_pool);
mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
}
@@ -1186,14 +1190,15 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action,
struct mlx5hws_action_mh_pattern *pattern,
u32 log_bulk_size)
{
+ u16 num_actions, max_mh_actions = 0, hw_max_actions;
struct mlx5hws_context *ctx = action->ctx;
- u16 num_actions, max_mh_actions = 0;
int i, ret, size_in_bytes;
u32 pat_id, arg_id = 0;
__be64 *new_pattern;
size_t pat_max_sz;
pat_max_sz = MLX5HWS_ARG_CHUNK_SIZE_MAX * MLX5HWS_ARG_DATA_SIZE;
+ hw_max_actions = pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE;
size_in_bytes = pat_max_sz * sizeof(__be64);
new_pattern = kcalloc(num_of_patterns, size_in_bytes, GFP_KERNEL);
if (!new_pattern)
@@ -1203,16 +1208,20 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action,
for (i = 0; i < num_of_patterns; i++) {
size_t new_num_actions;
size_t cur_num_actions;
- u32 nope_location;
+ u32 nop_locations;
cur_num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
- mlx5hws_pat_calc_nope(pattern[i].data, cur_num_actions,
- pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE,
- &new_num_actions, &nope_location,
- &new_pattern[i * pat_max_sz]);
+ ret = mlx5hws_pat_calc_nop(pattern[i].data, cur_num_actions,
+ hw_max_actions, &new_num_actions,
+ &nop_locations,
+ &new_pattern[i * pat_max_sz]);
+ if (ret) {
+ mlx5hws_err(ctx, "Too many actions after nop insertion\n");
+ goto free_new_pat;
+ }
- action[i].modify_header.nope_locations = nope_location;
+ action[i].modify_header.nop_locations = nop_locations;
action[i].modify_header.num_of_actions = new_num_actions;
max_mh_actions = max(max_mh_actions, new_num_actions);
@@ -1259,7 +1268,7 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action,
MLX5_GET(set_action_in, pattern[i].data, action_type);
} else {
/* Multiple modify actions require a pattern */
- if (unlikely(action[i].modify_header.nope_locations)) {
+ if (unlikely(action[i].modify_header.nop_locations)) {
size_t pattern_sz;
pattern_sz = action[i].modify_header.num_of_actions *
@@ -1361,8 +1370,8 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
struct mlx5hws_cmd_forward_tbl *fw_island;
struct mlx5hws_action *action;
- u32 i /*, packet_reformat_id*/;
- int ret;
+ int ret, last_dest_idx = -1;
+ u32 i;
if (num_dest <= 1) {
mlx5hws_err(ctx, "Action must have multiple dests\n");
@@ -1392,11 +1401,8 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
dest_list[i].destination_id = dests[i].dest->dest_obj.obj_id;
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte_attr.ignore_flow_level = ignore_flow_level;
- /* ToDo: In SW steering we have a handling of 'go to WIRE'
- * destination here by upper layer setting 'is_wire_ft' flag
- * if the destination is wire.
- * This is because uplink should be last dest in the list.
- */
+ if (dests[i].is_wire_ft)
+ last_dest_idx = i;
break;
case MLX5HWS_ACTION_TYP_VPORT:
dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
@@ -1420,6 +1426,9 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
}
}
+ if (last_dest_idx != -1)
+ swap(dest_list[last_dest_idx], dest_list[num_dest - 1]);
+
fte_attr.dests_num = num_dest;
fte_attr.dests = dest_list;
@@ -1575,17 +1584,15 @@ hws_action_create_dest_match_range_definer(struct mlx5hws_context *ctx)
return definer;
}
-static struct mlx5hws_matcher_action_ste *
+static struct mlx5hws_range_action_table *
hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
struct mlx5hws_definer *definer,
u32 miss_ft_id)
{
struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
- struct mlx5hws_action_default_stc *default_stc;
- struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_range_action_table *table_ste;
struct mlx5hws_pool_attr pool_attr = {0};
struct mlx5hws_pool *ste_pool, *stc_pool;
- struct mlx5hws_pool_chunk *ste;
u32 *rtc_0_id, *rtc_1_id;
u32 obj_id;
int ret;
@@ -1604,7 +1611,6 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
- pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
pool_attr.alloc_log_sz = 1;
table_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
if (!table_ste->pool) {
@@ -1616,8 +1622,6 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
rtc_0_id = &table_ste->rtc_0_id;
rtc_1_id = &table_ste->rtc_1_id;
ste_pool = table_ste->pool;
- ste = &table_ste->ste;
- ste->order = 1;
rtc_attr.log_size = 0;
rtc_attr.log_depth = 0;
@@ -1629,18 +1633,16 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
rtc_attr.fw_gen_wqe = true;
rtc_attr.is_scnd_range = true;
- obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ obj_id = mlx5hws_pool_get_base_id(ste_pool);
rtc_attr.pd = ctx->pd_num;
rtc_attr.ste_base = obj_id;
- rtc_attr.ste_offset = ste->offset;
rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false);
/* STC is a single resource (obj_id), use any STC for the ID */
stc_pool = ctx->stc_pool;
- default_stc = ctx->common_res.default_stc;
- obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+ obj_id = mlx5hws_pool_get_base_id(stc_pool);
rtc_attr.stc_base = obj_id;
ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
@@ -1650,11 +1652,11 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
}
/* Create mirror RTC */
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ obj_id = mlx5hws_pool_get_base_mirror_id(ste_pool);
rtc_attr.ste_base = obj_id;
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, true);
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+ obj_id = mlx5hws_pool_get_base_mirror_id(stc_pool);
rtc_attr.stc_base = obj_id;
ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
@@ -1677,9 +1679,9 @@ free_ste:
return NULL;
}
-static void
-hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx,
- struct mlx5hws_matcher_action_ste *table_ste)
+static void hws_action_destroy_dest_match_range_table(
+ struct mlx5hws_context *ctx,
+ struct mlx5hws_range_action_table *table_ste)
{
mutex_lock(&ctx->ctrl_lock);
@@ -1691,12 +1693,11 @@ hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx,
mutex_unlock(&ctx->ctrl_lock);
}
-static int
-hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
- struct mlx5hws_matcher_action_ste *table_ste,
- struct mlx5hws_action *hit_ft_action,
- struct mlx5hws_definer *range_definer,
- u32 min, u32 max)
+static int hws_action_create_dest_match_range_fill_table(
+ struct mlx5hws_context *ctx,
+ struct mlx5hws_range_action_table *table_ste,
+ struct mlx5hws_action *hit_ft_action,
+ struct mlx5hws_definer *range_definer, u32 min, u32 max)
{
struct mlx5hws_wqe_gta_data_seg_ste match_wqe_data = {0};
struct mlx5hws_wqe_gta_data_seg_ste range_wqe_data = {0};
@@ -1792,7 +1793,7 @@ mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
u32 min, u32 max, u32 flags)
{
struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
- struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_range_action_table *table_ste;
struct mlx5hws_action *hit_ft_action;
struct mlx5hws_definer *definer;
struct mlx5hws_action *action;
@@ -1837,7 +1838,6 @@ mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
- stc_attr.ste_table.ste = table_ste->ste;
stc_attr.ste_table.ste_pool = table_ste->pool;
stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
@@ -2110,21 +2110,23 @@ static void hws_action_modify_write(struct mlx5hws_send_engine *queue,
u32 arg_idx,
u8 *arg_data,
u16 num_of_actions,
- u32 nope_locations)
+ u32 nop_locations)
{
u8 *new_arg_data = NULL;
int i, j;
- if (unlikely(nope_locations)) {
+ if (unlikely(nop_locations)) {
new_arg_data = kcalloc(num_of_actions,
MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
if (unlikely(!new_arg_data))
return;
- for (i = 0, j = 0; i < num_of_actions; i++, j++) {
- memcpy(&new_arg_data[j], arg_data, MLX5HWS_MODIFY_ACTION_SIZE);
- if (BIT(i) & nope_locations)
+ for (i = 0, j = 0; j < num_of_actions; i++, j++) {
+ if (BIT(i) & nop_locations)
j++;
+ memcpy(&new_arg_data[j * MLX5HWS_MODIFY_ACTION_SIZE],
+ &arg_data[i * MLX5HWS_MODIFY_ACTION_SIZE],
+ MLX5HWS_MODIFY_ACTION_SIZE);
}
}
@@ -2220,6 +2222,7 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
struct mlx5hws_action *action;
u32 arg_sz, arg_idx;
u8 *single_action;
+ u8 max_actions;
__be32 stc_idx;
rule_action = &apply->rule_action[setter->idx_double];
@@ -2247,21 +2250,23 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] =
*(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data);
- } else {
- /* Argument offset multiple with number of args per these actions */
- arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
- arg_idx = rule_action->modify_header.offset * arg_sz;
-
- apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
-
- if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
- apply->require_dep = 1;
- hws_action_modify_write(apply->queue,
- action->modify_header.arg_id + arg_idx,
- rule_action->modify_header.data,
- action->modify_header.num_of_actions,
- action->modify_header.nope_locations);
- }
+ return;
+ }
+
+ /* Argument offset multiple with number of args per these actions */
+ max_actions = action->modify_header.max_num_of_actions;
+ arg_sz = mlx5hws_arg_get_arg_size(max_actions);
+ arg_idx = rule_action->modify_header.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ hws_action_modify_write(apply->queue,
+ action->modify_header.arg_id + arg_idx,
+ rule_action->modify_header.data,
+ action->modify_header.num_of_actions,
+ action->modify_header.nop_locations);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
index 64b76075f7f8..55a079fdd08f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
@@ -118,6 +118,12 @@ struct mlx5hws_action_template {
u8 only_term;
};
+struct mlx5hws_range_action_table {
+ struct mlx5hws_pool *pool;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+};
+
struct mlx5hws_action {
u8 type;
u8 flags;
@@ -130,7 +136,7 @@ struct mlx5hws_action {
u32 pat_id;
u32 arg_id;
__be64 single_action;
- u32 nope_locations;
+ u32 nop_locations;
u8 num_of_patterns;
u8 single_action_type;
u8 num_of_actions;
@@ -186,7 +192,7 @@ struct mlx5hws_action {
size_t size;
} remove_header;
struct {
- struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_range_action_table *table_ste;
struct mlx5hws_action *hit_ft_action;
struct mlx5hws_definer *definer;
} range;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c
new file mode 100644
index 000000000000..5766a9c82f96
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+static const char *
+hws_pool_opt_to_str(enum mlx5hws_pool_optimize opt)
+{
+ switch (opt) {
+ case MLX5HWS_POOL_OPTIMIZE_NONE:
+ return "rx-and-tx";
+ case MLX5HWS_POOL_OPTIMIZE_ORIG:
+ return "rx-only";
+ case MLX5HWS_POOL_OPTIMIZE_MIRROR:
+ return "tx-only";
+ default:
+ return "unknown";
+ }
+}
+
+static int
+hws_action_ste_table_create_pool(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl,
+ enum mlx5hws_pool_optimize opt, size_t log_sz)
+{
+ struct mlx5hws_pool_attr pool_attr = { 0 };
+
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+ pool_attr.flags = MLX5HWS_POOL_FLAG_BUDDY;
+ pool_attr.opt_type = opt;
+ pool_attr.alloc_log_sz = log_sz;
+
+ action_tbl->pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!action_tbl->pool) {
+ mlx5hws_err(ctx, "Failed to allocate STE pool\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hws_action_ste_table_create_single_rtc(
+ struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl,
+ enum mlx5hws_pool_optimize opt, size_t log_sz, bool tx)
+{
+ struct mlx5hws_cmd_rtc_create_attr rtc_attr = { 0 };
+ u32 *rtc_id;
+
+ rtc_attr.log_depth = 0;
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ /* Action STEs use the default always hit definer. */
+ rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+ rtc_attr.is_frst_jumbo = false;
+ rtc_attr.miss_ft_id = 0;
+ rtc_attr.pd = ctx->pd_num;
+ rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+
+ if (tx) {
+ rtc_attr.table_type = FS_FT_FDB_TX;
+ rtc_attr.ste_base =
+ mlx5hws_pool_get_base_mirror_id(action_tbl->pool);
+ rtc_attr.stc_base =
+ mlx5hws_pool_get_base_mirror_id(ctx->stc_pool);
+ rtc_attr.log_size =
+ opt == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_sz;
+ rtc_id = &action_tbl->rtc_1_id;
+ } else {
+ rtc_attr.table_type = FS_FT_FDB_RX;
+ rtc_attr.ste_base = mlx5hws_pool_get_base_id(action_tbl->pool);
+ rtc_attr.stc_base = mlx5hws_pool_get_base_id(ctx->stc_pool);
+ rtc_attr.log_size =
+ opt == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_sz;
+ rtc_id = &action_tbl->rtc_0_id;
+ }
+
+ return mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_id);
+}
+
+static int
+hws_action_ste_table_create_rtcs(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl,
+ enum mlx5hws_pool_optimize opt, size_t log_sz)
+{
+ int err;
+
+ err = hws_action_ste_table_create_single_rtc(ctx, action_tbl, opt,
+ log_sz, false);
+ if (err)
+ return err;
+
+ err = hws_action_ste_table_create_single_rtc(ctx, action_tbl, opt,
+ log_sz, true);
+ if (err) {
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, action_tbl->rtc_0_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+hws_action_ste_table_destroy_rtcs(struct mlx5hws_action_ste_table *action_tbl)
+{
+ mlx5hws_cmd_rtc_destroy(action_tbl->pool->ctx->mdev,
+ action_tbl->rtc_1_id);
+ mlx5hws_cmd_rtc_destroy(action_tbl->pool->ctx->mdev,
+ action_tbl->rtc_0_id);
+}
+
+static int
+hws_action_ste_table_create_stc(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = { 0 };
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.ste_table.ste_pool = action_tbl->pool;
+ stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
+
+ return mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
+ MLX5HWS_TABLE_TYPE_FDB,
+ &action_tbl->stc);
+}
+
+static struct mlx5hws_action_ste_table *
+hws_action_ste_table_alloc(struct mlx5hws_action_ste_pool_element *parent_elem)
+{
+ enum mlx5hws_pool_optimize opt = parent_elem->opt;
+ struct mlx5hws_context *ctx = parent_elem->ctx;
+ struct mlx5hws_action_ste_table *action_tbl;
+ size_t log_sz;
+ int err;
+
+ log_sz = min(parent_elem->log_sz ?
+ parent_elem->log_sz +
+ MLX5HWS_ACTION_STE_TABLE_STEP_LOG_SZ :
+ MLX5HWS_ACTION_STE_TABLE_INIT_LOG_SZ,
+ MLX5HWS_ACTION_STE_TABLE_MAX_LOG_SZ);
+
+ action_tbl = kzalloc(sizeof(*action_tbl), GFP_KERNEL);
+ if (!action_tbl)
+ return ERR_PTR(-ENOMEM);
+
+ err = hws_action_ste_table_create_pool(ctx, action_tbl, opt, log_sz);
+ if (err)
+ goto free_tbl;
+
+ err = hws_action_ste_table_create_rtcs(ctx, action_tbl, opt, log_sz);
+ if (err)
+ goto destroy_pool;
+
+ err = hws_action_ste_table_create_stc(ctx, action_tbl);
+ if (err)
+ goto destroy_rtcs;
+
+ action_tbl->parent_elem = parent_elem;
+ INIT_LIST_HEAD(&action_tbl->list_node);
+ action_tbl->last_used = jiffies;
+ list_add(&action_tbl->list_node, &parent_elem->available);
+ parent_elem->log_sz = log_sz;
+
+ mlx5hws_dbg(ctx,
+ "Allocated %s action STE table log_sz %zu; STEs (%d, %d); RTCs (%d, %d); STC %d\n",
+ hws_pool_opt_to_str(opt), log_sz,
+ mlx5hws_pool_get_base_id(action_tbl->pool),
+ mlx5hws_pool_get_base_mirror_id(action_tbl->pool),
+ action_tbl->rtc_0_id, action_tbl->rtc_1_id,
+ action_tbl->stc.offset);
+
+ return action_tbl;
+
+destroy_rtcs:
+ hws_action_ste_table_destroy_rtcs(action_tbl);
+destroy_pool:
+ mlx5hws_pool_destroy(action_tbl->pool);
+free_tbl:
+ kfree(action_tbl);
+
+ return ERR_PTR(err);
+}
+
+static void
+hws_action_ste_table_destroy(struct mlx5hws_action_ste_table *action_tbl)
+{
+ struct mlx5hws_context *ctx = action_tbl->parent_elem->ctx;
+
+ mlx5hws_dbg(ctx,
+ "Destroying %s action STE table: STEs (%d, %d); RTCs (%d, %d); STC %d\n",
+ hws_pool_opt_to_str(action_tbl->parent_elem->opt),
+ mlx5hws_pool_get_base_id(action_tbl->pool),
+ mlx5hws_pool_get_base_mirror_id(action_tbl->pool),
+ action_tbl->rtc_0_id, action_tbl->rtc_1_id,
+ action_tbl->stc.offset);
+
+ mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
+ &action_tbl->stc);
+ hws_action_ste_table_destroy_rtcs(action_tbl);
+ mlx5hws_pool_destroy(action_tbl->pool);
+
+ list_del(&action_tbl->list_node);
+ kfree(action_tbl);
+}
+
+static int
+hws_action_ste_pool_element_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_pool_element *elem,
+ enum mlx5hws_pool_optimize opt)
+{
+ elem->ctx = ctx;
+ elem->opt = opt;
+ INIT_LIST_HEAD(&elem->available);
+ INIT_LIST_HEAD(&elem->full);
+
+ return 0;
+}
+
+static void hws_action_ste_pool_element_destroy(
+ struct mlx5hws_action_ste_pool_element *elem)
+{
+ struct mlx5hws_action_ste_table *action_tbl, *p;
+
+ /* This should be empty, but attempt to free its elements anyway. */
+ list_for_each_entry_safe(action_tbl, p, &elem->full, list_node)
+ hws_action_ste_table_destroy(action_tbl);
+
+ list_for_each_entry_safe(action_tbl, p, &elem->available, list_node)
+ hws_action_ste_table_destroy(action_tbl);
+}
+
+static int hws_action_ste_pool_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_pool *pool)
+{
+ enum mlx5hws_pool_optimize opt;
+ int err;
+
+ mutex_init(&pool->lock);
+
+ /* Rules which are added for both RX and TX must use the same action STE
+ * indices for both. If we were to use a single table, then RX-only and
+ * TX-only rules would waste the unused entries. Thus, we use separate
+ * table sets for the three cases.
+ */
+ for (opt = MLX5HWS_POOL_OPTIMIZE_NONE; opt < MLX5HWS_POOL_OPTIMIZE_MAX;
+ opt++) {
+ err = hws_action_ste_pool_element_init(ctx, &pool->elems[opt],
+ opt);
+ if (err)
+ goto destroy_elems;
+ pool->elems[opt].parent_pool = pool;
+ }
+
+ return 0;
+
+destroy_elems:
+ while (opt-- > MLX5HWS_POOL_OPTIMIZE_NONE)
+ hws_action_ste_pool_element_destroy(&pool->elems[opt]);
+
+ return err;
+}
+
+static void hws_action_ste_pool_destroy(struct mlx5hws_action_ste_pool *pool)
+{
+ int opt;
+
+ for (opt = MLX5HWS_POOL_OPTIMIZE_MAX - 1;
+ opt >= MLX5HWS_POOL_OPTIMIZE_NONE; opt--)
+ hws_action_ste_pool_element_destroy(&pool->elems[opt]);
+}
+
+static void hws_action_ste_pool_element_collect_stale(
+ struct mlx5hws_action_ste_pool_element *elem, struct list_head *cleanup)
+{
+ struct mlx5hws_action_ste_table *action_tbl, *p;
+ unsigned long expire_time, now;
+
+ expire_time = secs_to_jiffies(MLX5HWS_ACTION_STE_POOL_EXPIRE_SECONDS);
+ now = jiffies;
+
+ list_for_each_entry_safe(action_tbl, p, &elem->available, list_node) {
+ if (mlx5hws_pool_full(action_tbl->pool) &&
+ time_before(action_tbl->last_used + expire_time, now))
+ list_move(&action_tbl->list_node, cleanup);
+ }
+}
+
+static void hws_action_ste_table_cleanup_list(struct list_head *cleanup)
+{
+ struct mlx5hws_action_ste_table *action_tbl, *p;
+
+ list_for_each_entry_safe(action_tbl, p, cleanup, list_node)
+ hws_action_ste_table_destroy(action_tbl);
+}
+
+static void hws_action_ste_pool_cleanup(struct work_struct *work)
+{
+ enum mlx5hws_pool_optimize opt;
+ struct mlx5hws_context *ctx;
+ LIST_HEAD(cleanup);
+ int i;
+
+ ctx = container_of(work, struct mlx5hws_context,
+ action_ste_cleanup.work);
+
+ for (i = 0; i < ctx->queues; i++) {
+ struct mlx5hws_action_ste_pool *p = &ctx->action_ste_pool[i];
+
+ mutex_lock(&p->lock);
+ for (opt = MLX5HWS_POOL_OPTIMIZE_NONE;
+ opt < MLX5HWS_POOL_OPTIMIZE_MAX; opt++)
+ hws_action_ste_pool_element_collect_stale(
+ &p->elems[opt], &cleanup);
+ mutex_unlock(&p->lock);
+ }
+
+ hws_action_ste_table_cleanup_list(&cleanup);
+
+ schedule_delayed_work(&ctx->action_ste_cleanup,
+ secs_to_jiffies(
+ MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS));
+}
+
+int mlx5hws_action_ste_pool_init(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_ste_pool *pool;
+ size_t queues = ctx->queues;
+ int i, err;
+
+ pool = kcalloc(queues, sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return -ENOMEM;
+
+ for (i = 0; i < queues; i++) {
+ err = hws_action_ste_pool_init(ctx, &pool[i]);
+ if (err)
+ goto free_pool;
+ }
+
+ ctx->action_ste_pool = pool;
+
+ INIT_DELAYED_WORK(&ctx->action_ste_cleanup,
+ hws_action_ste_pool_cleanup);
+ schedule_delayed_work(
+ &ctx->action_ste_cleanup,
+ secs_to_jiffies(MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS));
+
+ return 0;
+
+free_pool:
+ while (i--)
+ hws_action_ste_pool_destroy(&pool[i]);
+ kfree(pool);
+
+ return err;
+}
+
+void mlx5hws_action_ste_pool_uninit(struct mlx5hws_context *ctx)
+{
+ size_t queues = ctx->queues;
+ int i;
+
+ cancel_delayed_work_sync(&ctx->action_ste_cleanup);
+
+ for (i = 0; i < queues; i++)
+ hws_action_ste_pool_destroy(&ctx->action_ste_pool[i]);
+
+ kfree(ctx->action_ste_pool);
+}
+
+static struct mlx5hws_action_ste_pool_element *
+hws_action_ste_choose_elem(struct mlx5hws_action_ste_pool *pool,
+ bool skip_rx, bool skip_tx)
+{
+ if (skip_rx)
+ return &pool->elems[MLX5HWS_POOL_OPTIMIZE_MIRROR];
+
+ if (skip_tx)
+ return &pool->elems[MLX5HWS_POOL_OPTIMIZE_ORIG];
+
+ return &pool->elems[MLX5HWS_POOL_OPTIMIZE_NONE];
+}
+
+static int
+hws_action_ste_table_chunk_alloc(struct mlx5hws_action_ste_table *action_tbl,
+ struct mlx5hws_action_ste_chunk *chunk)
+{
+ int err;
+
+ err = mlx5hws_pool_chunk_alloc(action_tbl->pool, &chunk->ste);
+ if (err)
+ return err;
+
+ chunk->action_tbl = action_tbl;
+ action_tbl->last_used = jiffies;
+
+ return 0;
+}
+
+int mlx5hws_action_ste_chunk_alloc(struct mlx5hws_action_ste_pool *pool,
+ bool skip_rx, bool skip_tx,
+ struct mlx5hws_action_ste_chunk *chunk)
+{
+ struct mlx5hws_action_ste_pool_element *elem;
+ struct mlx5hws_action_ste_table *action_tbl;
+ bool found;
+ int err;
+
+ if (skip_rx && skip_tx)
+ return -EINVAL;
+
+ mutex_lock(&pool->lock);
+
+ elem = hws_action_ste_choose_elem(pool, skip_rx, skip_tx);
+
+ mlx5hws_dbg(elem->ctx,
+ "Allocating action STEs skip_rx %d skip_tx %d order %d\n",
+ skip_rx, skip_tx, chunk->ste.order);
+
+ found = false;
+ list_for_each_entry(action_tbl, &elem->available, list_node) {
+ if (!hws_action_ste_table_chunk_alloc(action_tbl, chunk)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ action_tbl = hws_action_ste_table_alloc(elem);
+ if (IS_ERR(action_tbl)) {
+ err = PTR_ERR(action_tbl);
+ goto out;
+ }
+
+ err = hws_action_ste_table_chunk_alloc(action_tbl, chunk);
+ if (err)
+ goto out;
+ }
+
+ if (mlx5hws_pool_empty(action_tbl->pool))
+ list_move(&action_tbl->list_node, &elem->full);
+
+ err = 0;
+
+out:
+ mutex_unlock(&pool->lock);
+
+ return err;
+}
+
+void mlx5hws_action_ste_chunk_free(struct mlx5hws_action_ste_chunk *chunk)
+{
+ struct mutex *lock = &chunk->action_tbl->parent_elem->parent_pool->lock;
+
+ mlx5hws_dbg(chunk->action_tbl->pool->ctx,
+ "Freeing action STEs offset %d order %d\n",
+ chunk->ste.offset, chunk->ste.order);
+
+ mutex_lock(lock);
+ mlx5hws_pool_chunk_free(chunk->action_tbl->pool, &chunk->ste);
+ chunk->action_tbl->last_used = jiffies;
+ list_move(&chunk->action_tbl->list_node,
+ &chunk->action_tbl->parent_elem->available);
+ mutex_unlock(lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h
new file mode 100644
index 000000000000..a8ba97359e31
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef ACTION_STE_POOL_H_
+#define ACTION_STE_POOL_H_
+
+#define MLX5HWS_ACTION_STE_TABLE_INIT_LOG_SZ 10
+#define MLX5HWS_ACTION_STE_TABLE_STEP_LOG_SZ 1
+#define MLX5HWS_ACTION_STE_TABLE_MAX_LOG_SZ 20
+
+#define MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS 300
+#define MLX5HWS_ACTION_STE_POOL_EXPIRE_SECONDS 300
+
+struct mlx5hws_action_ste_pool_element;
+
+struct mlx5hws_action_ste_table {
+ struct mlx5hws_action_ste_pool_element *parent_elem;
+ /* Wraps the RTC and STE range for this given action. */
+ struct mlx5hws_pool *pool;
+ /* Match STEs use this STC to jump to this pool's RTC. */
+ struct mlx5hws_pool_chunk stc;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct list_head list_node;
+ unsigned long last_used;
+};
+
+struct mlx5hws_action_ste_pool_element {
+ struct mlx5hws_context *ctx;
+ struct mlx5hws_action_ste_pool *parent_pool;
+ size_t log_sz; /* Size of the largest table so far. */
+ enum mlx5hws_pool_optimize opt;
+ struct list_head available;
+ struct list_head full;
+};
+
+/* Central repository of action STEs. The context contains one of these pools
+ * per queue.
+ */
+struct mlx5hws_action_ste_pool {
+ /* Protects the entire pool. We have one pool per queue and only one
+ * operation can be active per rule at a given time. Thus this lock
+ * protects solely against concurrent garbage collection and we expect
+ * very little contention.
+ */
+ struct mutex lock;
+ struct mlx5hws_action_ste_pool_element elems[MLX5HWS_POOL_OPTIMIZE_MAX];
+};
+
+/* A chunk of STEs and the table it was allocated from. Used by rules. */
+struct mlx5hws_action_ste_chunk {
+ struct mlx5hws_action_ste_table *action_tbl;
+ struct mlx5hws_pool_chunk ste;
+};
+
+int mlx5hws_action_ste_pool_init(struct mlx5hws_context *ctx);
+
+void mlx5hws_action_ste_pool_uninit(struct mlx5hws_context *ctx);
+
+/* Callers are expected to fill chunk->ste.order. On success, this function
+ * populates chunk->tbl and chunk->ste.offset.
+ */
+int mlx5hws_action_ste_chunk_alloc(struct mlx5hws_action_ste_pool *pool,
+ bool skip_rx, bool skip_tx,
+ struct mlx5hws_action_ste_chunk *chunk);
+
+void mlx5hws_action_ste_chunk_free(struct mlx5hws_action_ste_chunk *chunk);
+
+#endif /* ACTION_STE_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
index 19dce1ba512d..9e057f808ea5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
@@ -46,10 +46,14 @@ static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx)
}
}
-static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr,
+static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
u32 priority,
- u8 size_log)
+ u8 size_log,
+ struct mlx5hws_matcher_attr *attr)
{
+ struct mlx5hws_bwc_matcher *first_matcher =
+ bwc_matcher->complex_first_bwc_matcher;
+
memset(attr, 0, sizeof(*attr));
attr->priority = priority;
@@ -61,6 +65,9 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr,
attr->rule.num_log = size_log;
attr->resizable = true;
attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
+
+ attr->isolated_matcher_end_ft_id =
+ first_matcher ? first_matcher->matcher->end_ft_id : 0;
}
int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
@@ -83,20 +90,27 @@ int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
for (i = 0; i < bwc_queues; i++)
INIT_LIST_HEAD(&bwc_matcher->rules[i]);
- hws_bwc_matcher_init_attr(&attr,
+ hws_bwc_matcher_init_attr(bwc_matcher,
priority,
- MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG);
+ MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG,
+ &attr);
bwc_matcher->priority = priority;
bwc_matcher->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+ bwc_matcher->size_of_at_array = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
+ bwc_matcher->at = kcalloc(bwc_matcher->size_of_at_array,
+ sizeof(*bwc_matcher->at), GFP_KERNEL);
+ if (!bwc_matcher->at)
+ goto free_bwc_matcher_rules;
+
/* create dummy action template */
bwc_matcher->at[0] =
mlx5hws_action_template_create(action_types ?
action_types : init_action_types);
if (!bwc_matcher->at[0]) {
mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n");
- goto free_bwc_matcher_rules;
+ goto free_bwc_matcher_at_array;
}
bwc_matcher->num_of_at = 1;
@@ -126,6 +140,8 @@ free_mt:
mlx5hws_match_template_destroy(bwc_matcher->mt);
free_at:
mlx5hws_action_template_destroy(bwc_matcher->at[0]);
+free_bwc_matcher_at_array:
+ kfree(bwc_matcher->at);
free_bwc_matcher_rules:
kfree(bwc_matcher->rules);
err:
@@ -153,6 +169,7 @@ mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
return NULL;
atomic_set(&bwc_matcher->num_of_rules, 0);
+ atomic_set(&bwc_matcher->rehash_required, false);
/* Check if the required match params can be all matched
* in single STE, otherwise complex matcher is needed.
@@ -192,6 +209,7 @@ int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
for (i = 0; i < bwc_matcher->num_of_at; i++)
mlx5hws_action_template_destroy(bwc_matcher->at[i]);
+ kfree(bwc_matcher->at);
mlx5hws_match_template_destroy(bwc_matcher->mt);
kfree(bwc_matcher->rules);
@@ -208,16 +226,19 @@ int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
"BWC matcher destroy: matcher still has %d rules\n",
num_of_rules);
- mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+ if (bwc_matcher->complex)
+ mlx5hws_bwc_matcher_destroy_complex(bwc_matcher);
+ else
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
kfree(bwc_matcher);
return 0;
}
-static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
- u16 queue_id,
- u32 *pending_rules,
- bool drain)
+int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 *pending_rules,
+ bool drain)
{
unsigned long timeout = jiffies +
secs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT);
@@ -320,16 +341,12 @@ static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
{
struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
- atomic_inc(&bwc_matcher->num_of_rules);
bwc_rule->bwc_queue_idx = idx;
list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
}
static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
{
- struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
-
- atomic_dec(&bwc_matcher->num_of_rules);
list_del_init(&bwc_rule->list_node);
}
@@ -352,7 +369,8 @@ hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(ret))
return ret;
- ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+ ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+ &expected_completions, true);
if (unlikely(ret))
return ret;
@@ -382,6 +400,7 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
mutex_lock(queue_lock);
ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
+ atomic_dec(&bwc_matcher->num_of_rules);
hws_bwc_rule_list_remove(bwc_rule);
mutex_unlock(queue_lock);
@@ -391,9 +410,13 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
{
- int ret;
+ bool is_complex = !!bwc_rule->bwc_matcher->complex;
+ int ret = 0;
- ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+ if (is_complex)
+ ret = mlx5hws_bwc_rule_destroy_complex(bwc_rule);
+ else
+ ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
mlx5hws_bwc_rule_free(bwc_rule);
return ret;
@@ -433,9 +456,8 @@ hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(ret))
return ret;
- ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
-
- return ret;
+ return mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+ &expected_completions, true);
}
static int
@@ -456,7 +478,8 @@ hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(ret))
return ret;
- ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+ ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+ &expected_completions, true);
if (unlikely(ret))
mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret);
@@ -469,21 +492,9 @@ hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher)
struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
/* check the match RTC size */
- if ((bwc_matcher->size_log +
- MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH +
- MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP) >
- (caps->ste_alloc_log_max - 1))
- return true;
-
- /* check the action RTC size */
- if ((bwc_matcher->size_log +
- MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP +
- ilog2(roundup_pow_of_two(bwc_matcher->matcher->action_ste.max_stes)) +
- MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT) >
- (caps->ste_alloc_log_max - 1))
- return true;
-
- return false;
+ return (bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH +
+ MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP) >
+ (caps->ste_alloc_log_max - 1);
}
static bool
@@ -520,6 +531,23 @@ hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher,
struct mlx5hws_rule_action rule_actions[])
{
enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS];
+ void *p;
+
+ if (unlikely(bwc_matcher->num_of_at >= bwc_matcher->size_of_at_array)) {
+ if (bwc_matcher->size_of_at_array >= MLX5HWS_MATCHER_MAX_AT)
+ return -ENOMEM;
+ bwc_matcher->size_of_at_array *= 2;
+ p = krealloc(bwc_matcher->at,
+ bwc_matcher->size_of_at_array *
+ sizeof(*bwc_matcher->at),
+ __GFP_ZERO | GFP_KERNEL);
+ if (!p) {
+ bwc_matcher->size_of_at_array /= 2;
+ return -ENOMEM;
+ }
+
+ bwc_matcher->at = p;
+ }
hws_bwc_rule_actions_to_action_types(rule_actions, action_types);
@@ -582,100 +610,79 @@ hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher,
static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
{
+ bool move_error = false, poll_error = false, drain_error = false;
struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
u16 bwc_queues = mlx5hws_bwc_queues(ctx);
- struct mlx5hws_bwc_rule **bwc_rules;
struct mlx5hws_rule_attr rule_attr;
- u32 *pending_rules;
- int i, j, ret = 0;
- bool all_done;
- u16 burst_th;
+ struct mlx5hws_bwc_rule *bwc_rule;
+ struct mlx5hws_send_engine *queue;
+ struct list_head *rules_list;
+ u32 pending_rules;
+ int i, ret = 0;
mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
- pending_rules = kcalloc(bwc_queues, sizeof(*pending_rules), GFP_KERNEL);
- if (!pending_rules)
- return -ENOMEM;
-
- bwc_rules = kcalloc(bwc_queues, sizeof(*bwc_rules), GFP_KERNEL);
- if (!bwc_rules) {
- ret = -ENOMEM;
- goto free_pending_rules;
- }
-
for (i = 0; i < bwc_queues; i++) {
if (list_empty(&bwc_matcher->rules[i]))
- bwc_rules[i] = NULL;
- else
- bwc_rules[i] = list_first_entry(&bwc_matcher->rules[i],
- struct mlx5hws_bwc_rule,
- list_node);
- }
-
- do {
- all_done = true;
+ continue;
- for (i = 0; i < bwc_queues; i++) {
- rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
- burst_th = hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
+ pending_rules = 0;
+ rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+ rules_list = &bwc_matcher->rules[i];
- for (j = 0; j < burst_th && bwc_rules[i]; j++) {
- rule_attr.burst = !!((j + 1) % burst_th);
- ret = mlx5hws_matcher_resize_rule_move(bwc_matcher->matcher,
- bwc_rules[i]->rule,
- &rule_attr);
- if (unlikely(ret)) {
- mlx5hws_err(ctx,
- "Moving BWC rule failed during rehash (%d)\n",
- ret);
- goto free_bwc_rules;
- }
-
- all_done = false;
- pending_rules[i]++;
- bwc_rules[i] = list_is_last(&bwc_rules[i]->list_node,
- &bwc_matcher->rules[i]) ?
- NULL : list_next_entry(bwc_rules[i], list_node);
+ list_for_each_entry(bwc_rule, rules_list, list_node) {
+ ret = mlx5hws_matcher_resize_rule_move(matcher,
+ bwc_rule->rule,
+ &rule_attr);
+ if (unlikely(ret && !move_error)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n",
+ ret);
+ move_error = true;
+ }
- ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id,
- &pending_rules[i], false);
- if (unlikely(ret)) {
- mlx5hws_err(ctx,
- "Moving BWC rule failed during rehash (%d)\n",
- ret);
- goto free_bwc_rules;
- }
+ pending_rules++;
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &pending_rules,
+ false);
+ if (unlikely(ret && !poll_error)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: poll failed (%d), attempting to move rest of the rules\n",
+ ret);
+ poll_error = true;
}
}
- } while (!all_done);
- /* drain all the bwc queues */
- for (i = 0; i < bwc_queues; i++) {
- if (pending_rules[i]) {
- u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
-
- mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]);
- ret = hws_bwc_queue_poll(ctx, queue_id,
- &pending_rules[i], true);
- if (unlikely(ret)) {
+ if (pending_rules) {
+ queue = &ctx->send_queue[rule_attr.queue_id];
+ mlx5hws_send_engine_flush_queue(queue);
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &pending_rules,
+ true);
+ if (unlikely(ret && !drain_error)) {
mlx5hws_err(ctx,
- "Moving BWC rule failed during rehash (%d)\n", ret);
- goto free_bwc_rules;
+ "Moving BWC rule: drain failed (%d), attempting to move rest of the rules\n",
+ ret);
+ drain_error = true;
}
}
}
-free_bwc_rules:
- kfree(bwc_rules);
-free_pending_rules:
- kfree(pending_rules);
+ if (move_error || poll_error || drain_error)
+ ret = -EINVAL;
return ret;
}
static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- return hws_bwc_matcher_move_all_simple(bwc_matcher);
+ if (!bwc_matcher->complex)
+ return hws_bwc_matcher_move_all_simple(bwc_matcher);
+
+ return mlx5hws_bwc_matcher_move_all_complex(bwc_matcher);
}
static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
@@ -686,9 +693,10 @@ static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
struct mlx5hws_matcher *new_matcher;
int ret;
- hws_bwc_matcher_init_attr(&matcher_attr,
+ hws_bwc_matcher_init_attr(bwc_matcher,
bwc_matcher->priority,
- bwc_matcher->size_log);
+ bwc_matcher->size_log,
+ &matcher_attr);
old_matcher = bwc_matcher->matcher;
new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
@@ -708,15 +716,18 @@ static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
}
ret = hws_bwc_matcher_move_all(bwc_matcher);
- if (ret) {
- mlx5hws_err(ctx, "Rehash error: moving rules failed\n");
- return -ENOMEM;
- }
+ if (ret)
+ mlx5hws_err(ctx, "Rehash error: moving rules failed, attempting to remove the old matcher\n");
+
+ /* Error during rehash can't be rolled back.
+ * The best option here is to allow the rehash to complete and remove
+ * the old matcher - can't leave the matcher in the 'in_resize' state.
+ */
bwc_matcher->matcher = new_matcher;
mlx5hws_matcher_destroy(old_matcher);
- return 0;
+ return ret;
}
static int
@@ -733,9 +744,9 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
/* It is possible that other rule has already performed rehash.
* Need to check again if we really need rehash.
- * If the reason for rehash was size, but not any more - skip rehash.
*/
- if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher,
+ if (!atomic_read(&bwc_matcher->rehash_required) &&
+ !hws_bwc_matcher_rehash_size_needed(bwc_matcher,
atomic_read(&bwc_matcher->num_of_rules)))
return 0;
@@ -746,6 +757,8 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
* - destroy the old matcher
*/
+ atomic_set(&bwc_matcher->rehash_required, false);
+
ret = hws_bwc_matcher_extend_size(bwc_matcher);
if (ret)
return ret;
@@ -753,17 +766,51 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
return hws_bwc_matcher_move(bwc_matcher);
}
-static int
-hws_bwc_matcher_rehash_at(struct mlx5hws_bwc_matcher *bwc_matcher)
+static int hws_bwc_rule_get_at_idx(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx)
{
- /* Rehash by action template doesn't require any additional checking.
- * The bwc_matcher already contains the new action template.
- * Just do the usual rehash:
- * - create new matcher
- * - move all the rules to the new matcher
- * - destroy the old matcher
- */
- return hws_bwc_matcher_move(bwc_matcher);
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mutex *queue_lock; /* Protect the queue */
+ int at_idx, ret;
+
+ /* check if rehash needed due to missing action template */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (likely(at_idx >= 0))
+ return at_idx;
+
+ /* we need to extend BWC matcher action templates array */
+ queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+ hws_bwc_lock_all_queues(ctx);
+
+ /* check again - perhaps other thread already did extend_at */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (at_idx >= 0)
+ goto out;
+
+ ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx, "BWC rule: failed extending AT (%d)", ret);
+ at_idx = -EINVAL;
+ goto out;
+ }
+
+ /* action templates array was extended, we need the last idx */
+ at_idx = bwc_matcher->num_of_at - 1;
+ ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
+ bwc_matcher->at[at_idx]);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx, "BWC rule: failed attaching new AT (%d)", ret);
+ at_idx = -EINVAL;
+ goto out;
+ }
+
+out:
+ hws_bwc_unlock_all_queues(ctx);
+ mutex_lock(queue_lock);
+ return at_idx;
}
int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
@@ -786,50 +833,16 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
mutex_lock(queue_lock);
- /* check if rehash needed due to missing action template */
- at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, bwc_queue_idx);
if (unlikely(at_idx < 0)) {
- /* we need to extend BWC matcher action templates array */
mutex_unlock(queue_lock);
- hws_bwc_lock_all_queues(ctx);
-
- ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
- if (unlikely(ret)) {
- hws_bwc_unlock_all_queues(ctx);
- return ret;
- }
-
- /* action templates array was extended, we need the last idx */
- at_idx = bwc_matcher->num_of_at - 1;
-
- ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
- bwc_matcher->at[at_idx]);
- if (unlikely(ret)) {
- /* Action template attach failed, possibly due to
- * requiring more action STEs.
- * Need to attempt creating new matcher with all
- * the action templates, including the new one.
- */
- ret = hws_bwc_matcher_rehash_at(bwc_matcher);
- if (unlikely(ret)) {
- mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
- bwc_matcher->at[at_idx] = NULL;
- bwc_matcher->num_of_at--;
-
- hws_bwc_unlock_all_queues(ctx);
-
- mlx5hws_err(ctx,
- "BWC rule insertion: rehash AT failed (%d)\n", ret);
- return ret;
- }
- }
-
- hws_bwc_unlock_all_queues(ctx);
- mutex_lock(queue_lock);
+ mlx5hws_err(ctx, "BWC rule create: failed getting AT (%d)",
+ ret);
+ return -EINVAL;
}
/* check if number of rules require rehash */
- num_of_rules = atomic_read(&bwc_matcher->num_of_rules);
+ num_of_rules = atomic_inc_return(&bwc_matcher->num_of_rules);
if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
mutex_unlock(queue_lock);
@@ -843,6 +856,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
bwc_matcher->size_log,
ret);
+ atomic_dec(&bwc_matcher->num_of_rules);
return ret;
}
@@ -867,6 +881,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
* Try rehash by size and insert rule again - last chance.
*/
+ atomic_set(&bwc_matcher->rehash_required, true);
mutex_unlock(queue_lock);
hws_bwc_lock_all_queues(ctx);
@@ -875,6 +890,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
if (ret) {
mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret);
+ atomic_dec(&bwc_matcher->num_of_rules);
return ret;
}
@@ -890,6 +906,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(ret)) {
mutex_unlock(queue_lock);
mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret);
+ atomic_dec(&bwc_matcher->num_of_rules);
return ret;
}
@@ -921,11 +938,18 @@ mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
- ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
- params->match_buf,
- rule_actions,
- flow_source,
- bwc_queue_idx);
+ if (bwc_matcher->complex)
+ ret = mlx5hws_bwc_rule_create_complex(bwc_rule,
+ params,
+ flow_source,
+ rule_actions,
+ bwc_queue_idx);
+ else
+ ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
+ params->match_buf,
+ rule_actions,
+ flow_source,
+ bwc_queue_idx);
if (unlikely(ret)) {
mlx5hws_bwc_rule_free(bwc_rule);
return NULL;
@@ -952,52 +976,11 @@ hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
mutex_lock(queue_lock);
- /* check if rehash needed due to missing action template */
- at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, idx);
if (unlikely(at_idx < 0)) {
- /* we need to extend BWC matcher action templates array */
mutex_unlock(queue_lock);
- hws_bwc_lock_all_queues(ctx);
-
- /* check again - perhaps other thread already did extend_at */
- at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
- if (likely(at_idx < 0)) {
- ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
- if (unlikely(ret)) {
- hws_bwc_unlock_all_queues(ctx);
- mlx5hws_err(ctx, "BWC rule update: failed extending AT (%d)", ret);
- return -EINVAL;
- }
-
- /* action templates array was extended, we need the last idx */
- at_idx = bwc_matcher->num_of_at - 1;
-
- ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
- bwc_matcher->at[at_idx]);
- if (unlikely(ret)) {
- /* Action template attach failed, possibly due to
- * requiring more action STEs.
- * Need to attempt creating new matcher with all
- * the action templates, including the new one.
- */
- ret = hws_bwc_matcher_rehash_at(bwc_matcher);
- if (unlikely(ret)) {
- mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
- bwc_matcher->at[at_idx] = NULL;
- bwc_matcher->num_of_at--;
-
- hws_bwc_unlock_all_queues(ctx);
-
- mlx5hws_err(ctx,
- "BWC rule update: rehash AT failed (%d)\n",
- ret);
- return ret;
- }
- }
- }
-
- hws_bwc_unlock_all_queues(ctx);
- mutex_lock(queue_lock);
+ mlx5hws_err(ctx, "BWC rule update: failed getting AT\n");
+ return -EINVAL;
}
ret = hws_bwc_rule_update_sync(bwc_rule,
@@ -1023,5 +1006,10 @@ int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
return -EINVAL;
}
- return hws_bwc_rule_action_update(bwc_rule, rule_actions);
+ /* For complex rule, the update should happen on the second matcher */
+ if (bwc_rule->isolated_bwc_rule)
+ return hws_bwc_rule_action_update(bwc_rule->isolated_bwc_rule,
+ rule_actions);
+ else
+ return hws_bwc_rule_action_update(bwc_rule, rule_actions);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
index 47f7ed141553..d21fc247a510 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
@@ -10,9 +10,7 @@
#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
/* Max number of AT attach operations for the same matcher.
- * When the limit is reached, next attempt to attach new AT
- * will result in creation of a new matcher and moving all
- * the rules to this matcher.
+ * When the limit is reached, a larger buffer is allocated for the ATs.
*/
#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 8
@@ -20,20 +18,27 @@
#define MLX5HWS_BWC_POLLING_TIMEOUT 60
+struct mlx5hws_bwc_matcher_complex_data;
struct mlx5hws_bwc_matcher {
struct mlx5hws_matcher *matcher;
struct mlx5hws_match_template *mt;
- struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
- u32 priority;
+ struct mlx5hws_action_template **at;
+ struct mlx5hws_bwc_matcher_complex_data *complex;
+ struct mlx5hws_bwc_matcher *complex_first_bwc_matcher;
u8 num_of_at;
+ u8 size_of_at_array;
u8 size_log;
+ u32 priority;
atomic_t num_of_rules;
+ atomic_t rehash_required;
struct list_head *rules;
};
struct mlx5hws_bwc_rule {
struct mlx5hws_bwc_matcher *bwc_matcher;
struct mlx5hws_rule *rule;
+ struct mlx5hws_bwc_rule *isolated_bwc_rule;
+ struct mlx5hws_bwc_complex_rule_hash_node *complex_hash_node;
u16 bwc_queue_idx;
struct list_head list_node;
};
@@ -65,6 +70,11 @@ void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
u32 flow_source,
struct mlx5hws_rule_attr *rule_attr);
+int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 *pending_rules,
+ bool drain);
+
static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
{
/* Besides the control queue, half of the queues are
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
index 9fb059a6511f..ca7501c57468 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
@@ -3,6 +3,22 @@
#include "internal.h"
+#define HWS_CLEAR_MATCH_PARAM(mask, field) \
+ MLX5_SET(fte_match_param, (mask)->match_buf, field, 0)
+
+#define HWS_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
+
+static const struct rhashtable_params hws_refcount_hash = {
+ .key_len = sizeof_field(struct mlx5hws_bwc_complex_rule_hash_node,
+ match_buf),
+ .key_offset = offsetof(struct mlx5hws_bwc_complex_rule_hash_node,
+ match_buf),
+ .head_offset = offsetof(struct mlx5hws_bwc_complex_rule_hash_node,
+ hash_node),
+ .automatic_shrinking = true,
+ .min_size = 1,
+};
+
bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask)
@@ -48,20 +64,1093 @@ bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
return is_complex;
}
+static void
+hws_bwc_matcher_complex_params_clear_fld(struct mlx5hws_context *ctx,
+ enum mlx5hws_definer_fname fname,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ switch (fname) {
+ case MLX5HWS_DEFINER_FNAME_ETH_TYPE_O:
+ case MLX5HWS_DEFINER_FNAME_ETH_TYPE_I:
+ case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O:
+ case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I:
+ case MLX5HWS_DEFINER_FNAME_IP_VERSION_O:
+ case MLX5HWS_DEFINER_FNAME_IP_VERSION_I:
+ /* Because of the strict requirements for IP address matching
+ * that require ethtype/ip_version matching as well, don't clear
+ * these fields - have them in both parts of the complex matcher
+ */
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.smac_47_16);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.smac_47_16);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.smac_15_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.smac_15_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.dmac_47_16);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.dmac_47_16);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.dmac_15_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.dmac_15_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.cvlan_tag);
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.svlan_tag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.cvlan_tag);
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.svlan_tag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_prio);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_prio);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_CFI_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_cfi);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_CFI_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_cfi);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_ID_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_vid);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_ID_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_vid);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.outer_second_cvlan_tag);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.outer_second_svlan_tag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.inner_second_cvlan_tag);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.inner_second_svlan_tag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_O:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_prio);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_I:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_prio);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_O:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_cfi);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_I:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_cfi);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_O:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_vid);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_I:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_vid);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_IHL_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ipv4_ihl);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_IHL_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ipv4_ihl);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_DSCP_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_dscp);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_DSCP_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_dscp);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_ECN_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_ecn);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_ECN_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_ecn);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_TTL_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ttl_hoplimit);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_TTL_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ttl_hoplimit);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_DST_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_SRC_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_DST_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_SRC_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_FRAG_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.frag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_FRAG_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.frag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.outer_ipv6_flow_label);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.inner_ipv6_flow_label);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_protocol);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_protocol);
+ break;
+ case MLX5HWS_DEFINER_FNAME_L4_SPORT_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_sport);
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.udp_sport);
+ break;
+ case MLX5HWS_DEFINER_FNAME_L4_SPORT_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.tcp_dport);
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.udp_dport);
+ break;
+ case MLX5HWS_DEFINER_FNAME_L4_DPORT_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_dport);
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.udp_dport);
+ break;
+ case MLX5HWS_DEFINER_FNAME_L4_DPORT_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.tcp_dport);
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.udp_dport);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TCP_FLAGS_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_flags);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM:
+ case MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.outer_tcp_seq_num);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.outer_tcp_ack_num);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.inner_tcp_seq_num);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.inner_tcp_ack_num);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GTP_TEID:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_teid);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_msg_type);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_msg_flags);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.gtpu_first_ext_dw_0);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_dw_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GTPU_DW2:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_dw_2);
+ break;
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_2.outer_first_mpls_over_gre);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_2.outer_first_mpls_over_udp);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.geneve_tlv_option_0_data);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_id_0);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_value_0);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_value_1);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_id_2);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_value_2);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_id_3);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_value_3);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VXLAN_VNI:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.vxlan_vni);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.outer_vxlan_gpe_flags);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD0:
+ break;
+ case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.outer_vxlan_gpe_next_protocol);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.outer_vxlan_gpe_vni);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_opt_len);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GENEVE_OAM:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_oam);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GENEVE_PROTO:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.geneve_protocol_type);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GENEVE_VNI:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_vni);
+ break;
+ case MLX5HWS_DEFINER_FNAME_SOURCE_QP:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.source_sqn);
+ break;
+ case MLX5HWS_DEFINER_FNAME_SOURCE_GVMI:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.source_port);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.source_eswitch_owner_vhca_id);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_0:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_1:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_1);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_2:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_2);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_3:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_3);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_4:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_4);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_5:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_5);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_7:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_7);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_A:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_a);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GRE_C:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_c_present);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GRE_K:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_k_present);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GRE_S:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_s_present);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_protocol);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GRE_OPT_KEY:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_key.key);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ICMP_DW1:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_header_data);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_type);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_code);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.icmpv6_header_data);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmpv6_type);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmpv6_code);
+ break;
+ case MLX5HWS_DEFINER_FNAME_MPLS0_O:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.outer_first_mpls);
+ break;
+ case MLX5HWS_DEFINER_FNAME_MPLS0_I:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.inner_first_mpls);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TNL_HDR_0:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TNL_HDR_1:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_1);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TNL_HDR_2:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_2);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TNL_HDR_3:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_3);
+ break;
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK:
+ /* assuming this is flex parser for geneve option */
+ if ((fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 0) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 1) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 2) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 3) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 4) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 5) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 6) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 7)) {
+ mlx5hws_err(ctx,
+ "Complex params: unsupported field %s (%d), flex parser ID for geneve is %d\n",
+ mlx5hws_definer_fname_to_str(fname), fname,
+ caps->flex_parser_id_geneve_tlv_option_0);
+ break;
+ }
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.geneve_tlv_option_0_exist);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_6:
+ default:
+ mlx5hws_err(ctx, "Complex params: unsupported field %s (%d)\n",
+ mlx5hws_definer_fname_to_str(fname), fname);
+ break;
+ }
+}
+
+static bool
+hws_bwc_matcher_complex_params_comb_is_valid(struct mlx5hws_definer_fc *fc,
+ int fc_sz,
+ u32 combination_num)
+{
+ bool m1[MLX5HWS_DEFINER_FNAME_MAX] = {0};
+ bool m2[MLX5HWS_DEFINER_FNAME_MAX] = {0};
+ bool is_first_matcher;
+ int i;
+
+ for (i = 0; i < fc_sz; i++) {
+ is_first_matcher = !(combination_num & BIT(i));
+ if (is_first_matcher)
+ m1[fc[i].fname] = true;
+ else
+ m2[fc[i].fname] = true;
+ }
+
+ /* Not all the fields can be split into separate matchers.
+ * Some should be together on the same matcher.
+ * For example, IPv6 parts - the whole IPv6 address should be on the
+ * same matcher in order for us to deduce if it's IPv6 or IPv4 address.
+ */
+ if (m1[MLX5HWS_DEFINER_FNAME_IP_FRAG_O] &&
+ (m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O]))
+ return false;
+
+ if (m2[MLX5HWS_DEFINER_FNAME_IP_FRAG_O] &&
+ (m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O]))
+ return false;
+
+ if (m1[MLX5HWS_DEFINER_FNAME_IP_FRAG_I] &&
+ (m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I]))
+ return false;
+
+ if (m2[MLX5HWS_DEFINER_FNAME_IP_FRAG_I] &&
+ (m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I]))
+ return false;
+
+ /* Don't split outer IPv6 dest address. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O]))
+ return false;
+
+ /* Don't split outer IPv6 source address. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O]))
+ return false;
+
+ /* Don't split inner IPv6 dest address. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I]))
+ return false;
+
+ /* Don't split inner IPv6 source address. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I]))
+ return false;
+
+ /* Don't split GRE parameters. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_GRE_C] ||
+ m1[MLX5HWS_DEFINER_FNAME_GRE_K] ||
+ m1[MLX5HWS_DEFINER_FNAME_GRE_S] ||
+ m1[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_GRE_C] ||
+ m2[MLX5HWS_DEFINER_FNAME_GRE_K] ||
+ m2[MLX5HWS_DEFINER_FNAME_GRE_S] ||
+ m2[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL]))
+ return false;
+
+ /* Don't split TCP ack/seq numbers. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM] ||
+ m1[MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM] ||
+ m2[MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM]))
+ return false;
+
+ /* Don't split flex parser. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7]))
+ return false;
+
+ return true;
+}
+
+static void
+hws_bwc_matcher_complex_params_comb_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_parameters *m,
+ struct mlx5hws_match_parameters *m1,
+ struct mlx5hws_match_parameters *m2,
+ struct mlx5hws_definer_fc *fc,
+ int fc_sz,
+ u32 combination_num)
+{
+ bool is_first_matcher;
+ int i;
+
+ memcpy(m1->match_buf, m->match_buf, m->match_sz);
+ memcpy(m2->match_buf, m->match_buf, m->match_sz);
+
+ for (i = 0; i < fc_sz; i++) {
+ is_first_matcher = !(combination_num & BIT(i));
+ hws_bwc_matcher_complex_params_clear_fld(ctx,
+ fc[i].fname,
+ is_first_matcher ?
+ m2 : m1);
+ }
+
+ MLX5_SET(fte_match_param, m2->match_buf,
+ misc_parameters_2.metadata_reg_c_6, -1);
+}
+
+static void
+hws_bwc_matcher_complex_params_destroy(struct mlx5hws_match_parameters *mask_1,
+ struct mlx5hws_match_parameters *mask_2)
+{
+ kfree(mask_1->match_buf);
+ kfree(mask_2->match_buf);
+}
+
+static int
+hws_bwc_matcher_complex_params_create(struct mlx5hws_context *ctx,
+ u8 match_criteria,
+ struct mlx5hws_match_parameters *mask,
+ struct mlx5hws_match_parameters *mask_1,
+ struct mlx5hws_match_parameters *mask_2)
+{
+ struct mlx5hws_definer_fc *fc;
+ u32 num_of_combinations;
+ int fc_sz = 0;
+ int res = 0;
+ u32 i;
+
+ if (MLX5_GET(fte_match_param, mask->match_buf,
+ misc_parameters_2.metadata_reg_c_6)) {
+ mlx5hws_err(ctx, "Complex matcher: REG_C_6 matching is reserved\n");
+ res = -EINVAL;
+ goto out;
+ }
+
+ mask_1->match_buf = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param),
+ GFP_KERNEL);
+ mask_2->match_buf = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param),
+ GFP_KERNEL);
+ if (!mask_1->match_buf || !mask_2->match_buf) {
+ mlx5hws_err(ctx, "Complex matcher: failed to allocate match_param\n");
+ res = -ENOMEM;
+ goto free_params;
+ }
+
+ mask_1->match_sz = mask->match_sz;
+ mask_2->match_sz = mask->match_sz;
+
+ fc = mlx5hws_definer_conv_match_params_to_compressed_fc(ctx,
+ match_criteria,
+ mask->match_buf,
+ &fc_sz);
+ if (!fc) {
+ res = -ENOMEM;
+ goto free_params;
+ }
+
+ if (fc_sz >= sizeof(num_of_combinations) * BITS_PER_BYTE) {
+ mlx5hws_err(ctx,
+ "Complex matcher: too many match parameters (%d)\n",
+ fc_sz);
+ res = -EINVAL;
+ goto free_fc;
+ }
+
+ /* We have list of all the match fields from the match parameter.
+ * Now try all the possibilities of splitting them into two match
+ * buffers and look for the supported combination.
+ */
+ num_of_combinations = 1 << fc_sz;
+
+ /* Start from combination at index 1 - we know that 0 is unsupported */
+ for (i = 1; i < num_of_combinations; i++) {
+ if (!hws_bwc_matcher_complex_params_comb_is_valid(fc, fc_sz, i))
+ continue;
+
+ hws_bwc_matcher_complex_params_comb_create(ctx,
+ mask, mask_1, mask_2,
+ fc, fc_sz, i);
+ /* We now have two separate sets of match params.
+ * Check if each of them can be used in its own matcher.
+ */
+ if (!mlx5hws_bwc_match_params_is_complex(ctx,
+ match_criteria,
+ mask_1) &&
+ !mlx5hws_bwc_match_params_is_complex(ctx,
+ match_criteria,
+ mask_2))
+ break;
+ }
+
+ if (i == num_of_combinations) {
+ /* We've scanned all the combinations, but to no avail */
+ mlx5hws_err(ctx, "Complex matcher: couldn't find match params combination\n");
+ res = -EINVAL;
+ goto free_fc;
+ }
+
+ kfree(fc);
+ return 0;
+
+free_fc:
+ kfree(fc);
+free_params:
+ hws_bwc_matcher_complex_params_destroy(mask_1, mask_2);
+out:
+ return res;
+}
+
+static int
+hws_bwc_isolated_table_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+ struct mlx5hws_context *ctx = table->ctx;
+ struct mlx5hws_table_attr tbl_attr = {0};
+ struct mlx5hws_table *isolated_tbl;
+ int ret = 0;
+
+ tbl_attr.type = table->type;
+ tbl_attr.level = table->level;
+
+ bwc_matcher->complex->isolated_tbl =
+ mlx5hws_table_create(ctx, &tbl_attr);
+ isolated_tbl = bwc_matcher->complex->isolated_tbl;
+ if (!isolated_tbl)
+ return -EINVAL;
+
+ /* Set the default miss of the isolated table to
+ * point to the end anchor of the original matcher.
+ */
+ mlx5hws_cmd_set_attr_connect_miss_tbl(ctx,
+ isolated_tbl->fw_ft_type,
+ isolated_tbl->type,
+ &ft_attr);
+ ft_attr.table_miss_id = bwc_matcher->matcher->end_ft_id;
+
+ ret = mlx5hws_cmd_flow_table_modify(ctx->mdev,
+ &ft_attr,
+ isolated_tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed setting isolated tbl default miss\n");
+ goto destroy_tbl;
+ }
+
+ return 0;
+
+destroy_tbl:
+ mlx5hws_table_destroy(isolated_tbl);
+ return ret;
+}
+
+static void hws_bwc_isolated_table_destroy(struct mlx5hws_table *isolated_tbl)
+{
+ /* This table is isolated - no table is pointing to it, no need to
+ * disconnect it from anywhere, it won't affect any other table's miss.
+ */
+ mlx5hws_table_destroy(isolated_tbl);
+}
+
+static int
+hws_bwc_isolated_matcher_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_table *isolated_tbl = bwc_matcher->complex->isolated_tbl;
+ struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
+ struct mlx5hws_context *ctx = table->ctx;
+ int ret;
+
+ isolated_bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL);
+ if (!isolated_bwc_matcher)
+ return -ENOMEM;
+
+ bwc_matcher->complex->isolated_bwc_matcher = isolated_bwc_matcher;
+
+ /* Isolated BWC matcher needs access to the first BWC matcher */
+ isolated_bwc_matcher->complex_first_bwc_matcher = bwc_matcher;
+
+ /* Isolated matcher needs to match on REG_C_6,
+ * so make sure its criteria bit is on.
+ */
+ match_criteria_enable |= MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2;
+
+ ret = mlx5hws_bwc_matcher_create_simple(isolated_bwc_matcher,
+ isolated_tbl,
+ 0,
+ match_criteria_enable,
+ mask,
+ NULL);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed creating isolated BWC matcher\n");
+ goto free_matcher;
+ }
+
+ return 0;
+
+free_matcher:
+ kfree(bwc_matcher->complex->isolated_bwc_matcher);
+ return ret;
+}
+
+static void
+hws_bwc_isolated_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+ kfree(bwc_matcher);
+}
+
+static int
+hws_bwc_isolated_actions_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table)
+{
+ struct mlx5hws_table *isolated_tbl = bwc_matcher->complex->isolated_tbl;
+ u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0};
+ struct mlx5hws_context *ctx = table->ctx;
+ struct mlx5hws_action_mh_pattern ptrn;
+ int ret = 0;
+
+ /* Create action to jump to isolated table */
+
+ bwc_matcher->complex->action_go_to_tbl =
+ mlx5hws_action_create_dest_table(ctx,
+ isolated_tbl,
+ MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!bwc_matcher->complex->action_go_to_tbl) {
+ mlx5hws_err(ctx, "Complex matcher: failed to create go-to-tbl action\n");
+ return -EINVAL;
+ }
+
+ /* Create modify header action to set REG_C_6 */
+
+ MLX5_SET(set_action_in, modify_hdr_action,
+ action_type, MLX5_MODIFICATION_TYPE_SET);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ field, MLX5_MODI_META_REG_C_6);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ length, 0); /* zero means length of 32 */
+ MLX5_SET(set_action_in, modify_hdr_action, offset, 0);
+ MLX5_SET(set_action_in, modify_hdr_action, data, 0);
+
+ ptrn.data = (void *)modify_hdr_action;
+ ptrn.sz = MLX5HWS_ACTION_DOUBLE_SIZE;
+
+ bwc_matcher->complex->action_metadata =
+ mlx5hws_action_create_modify_header(ctx, 1, &ptrn, 0,
+ MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!bwc_matcher->complex->action_metadata) {
+ ret = -EINVAL;
+ goto destroy_action_go_to_tbl;
+ }
+
+ /* Create last action */
+
+ bwc_matcher->complex->action_last =
+ mlx5hws_action_create_last(ctx, MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!bwc_matcher->complex->action_last) {
+ mlx5hws_err(ctx, "Complex matcher: failed to create last action\n");
+ ret = -EINVAL;
+ goto destroy_action_metadata;
+ }
+
+ return 0;
+
+destroy_action_metadata:
+ mlx5hws_action_destroy(bwc_matcher->complex->action_metadata);
+destroy_action_go_to_tbl:
+ mlx5hws_action_destroy(bwc_matcher->complex->action_go_to_tbl);
+ return ret;
+}
+
+static void
+hws_bwc_isolated_actions_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ mlx5hws_action_destroy(bwc_matcher->complex->action_last);
+ mlx5hws_action_destroy(bwc_matcher->complex->action_metadata);
+ mlx5hws_action_destroy(bwc_matcher->complex->action_go_to_tbl);
+}
+
int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
struct mlx5hws_table *table,
u32 priority,
u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask)
{
- mlx5hws_err(table->ctx, "Complex matcher is not supported yet\n");
- return -EOPNOTSUPP;
+ enum mlx5hws_action_type complex_init_action_types[3];
+ struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
+ struct mlx5hws_match_parameters mask_1 = {0};
+ struct mlx5hws_match_parameters mask_2 = {0};
+ struct mlx5hws_context *ctx = table->ctx;
+ int ret;
+
+ ret = hws_bwc_matcher_complex_params_create(table->ctx,
+ match_criteria_enable,
+ mask, &mask_1, &mask_2);
+ if (ret)
+ goto err;
+
+ bwc_matcher->complex =
+ kzalloc(sizeof(*bwc_matcher->complex), GFP_KERNEL);
+ if (!bwc_matcher->complex) {
+ ret = -ENOMEM;
+ goto free_masks;
+ }
+
+ ret = rhashtable_init(&bwc_matcher->complex->refcount_hash,
+ &hws_refcount_hash);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed to initialize rhashtable\n");
+ goto free_complex;
+ }
+
+ mutex_init(&bwc_matcher->complex->hash_lock);
+ ida_init(&bwc_matcher->complex->metadata_ida);
+
+ /* Create initial action template for the first matcher.
+ * Usually the initial AT is just dummy, but in case of complex
+ * matcher we know exactly which actions should it have.
+ */
+
+ complex_init_action_types[0] = MLX5HWS_ACTION_TYP_MODIFY_HDR;
+ complex_init_action_types[1] = MLX5HWS_ACTION_TYP_TBL;
+ complex_init_action_types[2] = MLX5HWS_ACTION_TYP_LAST;
+
+ /* Create the first matcher */
+
+ ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher,
+ table,
+ priority,
+ match_criteria_enable,
+ &mask_1,
+ complex_init_action_types);
+ if (ret)
+ goto destroy_ida;
+
+ /* Create isolated table to hold the second isolated matcher */
+
+ ret = hws_bwc_isolated_table_create(bwc_matcher, table);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed creating isolated table\n");
+ goto destroy_first_matcher;
+ }
+
+ /* Now create the second BWC matcher - the isolated one */
+
+ ret = hws_bwc_isolated_matcher_create(bwc_matcher, table,
+ match_criteria_enable, &mask_2);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed creating isolated matcher\n");
+ goto destroy_isolated_tbl;
+ }
+
+ /* Create action for isolated matcher's rules */
+
+ ret = hws_bwc_isolated_actions_create(bwc_matcher, table);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed creating isolated actions\n");
+ goto destroy_isolated_matcher;
+ }
+
+ hws_bwc_matcher_complex_params_destroy(&mask_1, &mask_2);
+ return 0;
+
+destroy_isolated_matcher:
+ isolated_bwc_matcher = bwc_matcher->complex->isolated_bwc_matcher;
+ hws_bwc_isolated_matcher_destroy(isolated_bwc_matcher);
+destroy_isolated_tbl:
+ hws_bwc_isolated_table_destroy(bwc_matcher->complex->isolated_tbl);
+destroy_first_matcher:
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+destroy_ida:
+ ida_destroy(&bwc_matcher->complex->metadata_ida);
+ mutex_destroy(&bwc_matcher->complex->hash_lock);
+ rhashtable_destroy(&bwc_matcher->complex->refcount_hash);
+free_complex:
+ kfree(bwc_matcher->complex);
+ bwc_matcher->complex = NULL;
+free_masks:
+ hws_bwc_matcher_complex_params_destroy(&mask_1, &mask_2);
+err:
+ return ret;
}
void
mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- /* nothing to do here */
+ struct mlx5hws_bwc_matcher *isolated_bwc_matcher =
+ bwc_matcher->complex->isolated_bwc_matcher;
+
+ hws_bwc_isolated_actions_destroy(bwc_matcher);
+ hws_bwc_isolated_matcher_destroy(isolated_bwc_matcher);
+ hws_bwc_isolated_table_destroy(bwc_matcher->complex->isolated_tbl);
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+ ida_destroy(&bwc_matcher->complex->metadata_ida);
+ mutex_destroy(&bwc_matcher->complex->hash_lock);
+ rhashtable_destroy(&bwc_matcher->complex->refcount_hash);
+ kfree(bwc_matcher->complex);
+ bwc_matcher->complex = NULL;
+}
+
+static void
+hws_bwc_matcher_complex_hash_lock(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ mutex_lock(&bwc_matcher->complex->hash_lock);
+}
+
+static void
+hws_bwc_matcher_complex_hash_unlock(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ mutex_unlock(&bwc_matcher->complex->hash_lock);
+}
+
+static int
+hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_bwc_complex_rule_hash_node *node, *old_node;
+ struct rhashtable *refcount_hash;
+ int ret, i;
+
+ bwc_rule->complex_hash_node = NULL;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (unlikely(!node))
+ return -ENOMEM;
+
+ ret = ida_alloc(&bwc_matcher->complex->metadata_ida, GFP_KERNEL);
+ if (ret < 0)
+ goto err_free_node;
+ node->tag = ret;
+
+ refcount_set(&node->refcount, 1);
+
+ /* Clear match buffer - turn off all the unrelated fields
+ * in accordance with the match params mask for the first
+ * matcher out of the two parts of the complex matcher.
+ * The resulting mask is the key for the hash.
+ */
+ for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
+ node->match_buf[i] = params->match_buf[i] &
+ bwc_matcher->mt->match_param[i];
+
+ refcount_hash = &bwc_matcher->complex->refcount_hash;
+ old_node = rhashtable_lookup_get_insert_fast(refcount_hash,
+ &node->hash_node,
+ hws_refcount_hash);
+ if (IS_ERR(old_node)) {
+ ret = PTR_ERR(old_node);
+ goto err_free_ida;
+ }
+
+ if (old_node) {
+ /* Rule with the same tag already exists - update refcount */
+ refcount_inc(&old_node->refcount);
+ /* Let the new rule use the same tag as the existing rule.
+ * Note that we don't have any indication for the rule creation
+ * process that a rule with similar matching params already
+ * exists - no harm done when this rule is be overwritten by
+ * the same STE.
+ * There's some performance advantage in skipping such cases,
+ * so this is left for future optimizations.
+ */
+ ida_free(&bwc_matcher->complex->metadata_ida, node->tag);
+ kfree(node);
+ node = old_node;
+ }
+
+ bwc_rule->complex_hash_node = node;
+ return 0;
+
+err_free_ida:
+ ida_free(&bwc_matcher->complex->metadata_ida, node->tag);
+err_free_node:
+ kfree(node);
+ return ret;
+}
+
+static void
+hws_bwc_rule_complex_hash_node_put(struct mlx5hws_bwc_rule *bwc_rule,
+ bool *is_last_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_bwc_complex_rule_hash_node *node;
+
+ if (is_last_rule)
+ *is_last_rule = false;
+
+ node = bwc_rule->complex_hash_node;
+ if (refcount_dec_and_test(&node->refcount)) {
+ rhashtable_remove_fast(&bwc_matcher->complex->refcount_hash,
+ &node->hash_node,
+ hws_refcount_hash);
+ ida_free(&bwc_matcher->complex->metadata_ida, node->tag);
+ kfree(node);
+ if (is_last_rule)
+ *is_last_rule = true;
+ }
+
+ bwc_rule->complex_hash_node = NULL;
}
int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
@@ -70,19 +1159,271 @@ int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_rule_action rule_actions[],
u16 bwc_queue_idx)
{
- mlx5hws_err(bwc_rule->bwc_matcher->matcher->tbl->ctx,
- "Complex rule is not supported yet\n");
- return -EOPNOTSUPP;
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0};
+ struct mlx5hws_rule_action rule_actions_1[3] = {0};
+ struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
+ u32 *match_buf_2;
+ u32 metadata_val;
+ int ret = 0;
+
+ isolated_bwc_matcher = bwc_matcher->complex->isolated_bwc_matcher;
+ bwc_rule->isolated_bwc_rule =
+ mlx5hws_bwc_rule_alloc(isolated_bwc_matcher);
+ if (unlikely(!bwc_rule->isolated_bwc_rule))
+ return -ENOMEM;
+
+ hws_bwc_matcher_complex_hash_lock(bwc_matcher);
+
+ /* Get a new hash node for this complex rule.
+ * If this is a unique set of match params for the first matcher,
+ * we will get a new hash node with newly allocated IDA.
+ * Otherwise we will get an existing node with IDA and updated refcount.
+ */
+ ret = hws_bwc_rule_complex_hash_node_get(bwc_rule, params);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx, "Complex rule: failed getting RHT node for this rule\n");
+ goto free_isolated_rule;
+ }
+
+ /* No need to clear match buffer's fields in accordance to what
+ * will actually be matched on first and second matchers.
+ * Both matchers were created with the appropriate masks
+ * and each of them holds the appropriate field copy array,
+ * so rule creation will use only the fields that will be copied
+ * in accordance with setters in field copy array.
+ * We do, however, need to temporary allocate match buffer
+ * for the second (isolated) rule in order to not modify
+ * user's match params buffer.
+ */
+
+ match_buf_2 = kmemdup(params->match_buf,
+ MLX5_ST_SZ_BYTES(fte_match_param),
+ GFP_KERNEL);
+ if (unlikely(!match_buf_2)) {
+ mlx5hws_err(ctx, "Complex rule: failed allocating match_buf\n");
+ ret = -ENOMEM;
+ goto hash_node_put;
+ }
+
+ /* On 2nd matcher, use unique 32-bit ID as a matching tag */
+ metadata_val = bwc_rule->complex_hash_node->tag;
+ MLX5_SET(fte_match_param, match_buf_2,
+ misc_parameters_2.metadata_reg_c_6, metadata_val);
+
+ /* Isolated rule's rule_actions contain all the original actions */
+ ret = mlx5hws_bwc_rule_create_simple(bwc_rule->isolated_bwc_rule,
+ match_buf_2,
+ rule_actions,
+ flow_source,
+ bwc_queue_idx);
+ kfree(match_buf_2);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx,
+ "Complex rule: failed creating isolated BWC rule (%d)\n",
+ ret);
+ goto hash_node_put;
+ }
+
+ /* First rule's rule_actions contain setting metadata and
+ * jump to isolated table that contains the second matcher.
+ * Set metadata value to a unique value for this rule.
+ */
+
+ MLX5_SET(set_action_in, modify_hdr_action,
+ action_type, MLX5_MODIFICATION_TYPE_SET);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ field, MLX5_MODI_META_REG_C_6);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ length, 0); /* zero means length of 32 */
+ MLX5_SET(set_action_in, modify_hdr_action,
+ offset, 0);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ data, metadata_val);
+
+ rule_actions_1[0].action = bwc_matcher->complex->action_metadata;
+ rule_actions_1[0].modify_header.offset = 0;
+ rule_actions_1[0].modify_header.data = modify_hdr_action;
+
+ rule_actions_1[1].action = bwc_matcher->complex->action_go_to_tbl;
+ rule_actions_1[2].action = bwc_matcher->complex->action_last;
+
+ ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
+ params->match_buf,
+ rule_actions_1,
+ flow_source,
+ bwc_queue_idx);
+
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx,
+ "Complex rule: failed creating first BWC rule (%d)\n",
+ ret);
+ goto destroy_isolated_rule;
+ }
+
+ hws_bwc_matcher_complex_hash_unlock(bwc_matcher);
+
+ return 0;
+
+destroy_isolated_rule:
+ mlx5hws_bwc_rule_destroy_simple(bwc_rule->isolated_bwc_rule);
+hash_node_put:
+ hws_bwc_rule_complex_hash_node_put(bwc_rule, NULL);
+free_isolated_rule:
+ hws_bwc_matcher_complex_hash_unlock(bwc_matcher);
+ mlx5hws_bwc_rule_free(bwc_rule->isolated_bwc_rule);
+ return ret;
}
int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
{
- return 0;
+ struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_bwc_rule *isolated_bwc_rule;
+ int ret_isolated, ret;
+ bool is_last_rule;
+
+ hws_bwc_matcher_complex_hash_lock(bwc_rule->bwc_matcher);
+
+ hws_bwc_rule_complex_hash_node_put(bwc_rule, &is_last_rule);
+ bwc_rule->rule->skip_delete = !is_last_rule;
+
+ ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+ if (unlikely(ret))
+ mlx5hws_err(ctx, "BWC complex rule: failed destroying first rule\n");
+
+ isolated_bwc_rule = bwc_rule->isolated_bwc_rule;
+ ret_isolated = mlx5hws_bwc_rule_destroy_simple(isolated_bwc_rule);
+ if (unlikely(ret_isolated))
+ mlx5hws_err(ctx, "BWC complex rule: failed destroying second (isolated) rule\n");
+
+ hws_bwc_matcher_complex_hash_unlock(bwc_rule->bwc_matcher);
+
+ mlx5hws_bwc_rule_free(isolated_bwc_rule);
+
+ return ret || ret_isolated;
}
-int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+static void
+hws_bwc_matcher_clear_hash_rtcs(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
- "Moving complex rule is not supported yet\n");
- return -EOPNOTSUPP;
+ struct mlx5hws_bwc_complex_rule_hash_node *node;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&bwc_matcher->complex->refcount_hash, &iter);
+ rhashtable_walk_start(&iter);
+
+ while ((node = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(node))
+ continue;
+ node->rtc_valid = false;
+ }
+
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
+
+int
+mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
+ bool move_error = false, poll_error = false;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_bwc_rule *tmp_bwc_rule;
+ struct mlx5hws_rule_attr rule_attr;
+ struct mlx5hws_table *isolated_tbl;
+ struct mlx5hws_rule *tmp_rule;
+ struct list_head *rules_list;
+ u32 expected_completions = 1;
+ u32 end_ft_id;
+ int i, ret;
+
+ /* We are rehashing the matcher that is the first part of the complex
+ * matcher. Need to update the isolated matcher to point to the end_ft
+ * of this new matcher. This needs to be done before moving any rules
+ * to prevent possible steering loops.
+ */
+ isolated_tbl = bwc_matcher->complex->isolated_tbl;
+ end_ft_id = bwc_matcher->matcher->resize_dst->end_ft_id;
+ ret = mlx5hws_matcher_update_end_ft_isolated(isolated_tbl, end_ft_id);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed updating end_ft of isolated matcher (%d)\n",
+ ret);
+ return ret;
+ }
+
+ hws_bwc_matcher_clear_hash_rtcs(bwc_matcher);
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
+
+ for (i = 0; i < bwc_queues; i++) {
+ rules_list = &bwc_matcher->rules[i];
+ if (list_empty(rules_list))
+ continue;
+
+ rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+
+ list_for_each_entry(tmp_bwc_rule, rules_list, list_node) {
+ /* Check if a rule with similar tag has already
+ * been moved.
+ */
+ if (tmp_bwc_rule->complex_hash_node->rtc_valid) {
+ /* This rule is a duplicate of rule with similar
+ * tag that has already been moved earlier.
+ * Just update this rule's RTCs.
+ */
+ tmp_bwc_rule->rule->rtc_0 =
+ tmp_bwc_rule->complex_hash_node->rtc_0;
+ tmp_bwc_rule->rule->rtc_1 =
+ tmp_bwc_rule->complex_hash_node->rtc_1;
+ tmp_bwc_rule->rule->matcher =
+ tmp_bwc_rule->rule->matcher->resize_dst;
+ continue;
+ }
+
+ /* First time we're moving rule with this tag.
+ * Move it for real.
+ */
+ tmp_rule = tmp_bwc_rule->rule;
+ tmp_rule->skip_delete = false;
+ ret = mlx5hws_matcher_resize_rule_move(matcher,
+ tmp_rule,
+ &rule_attr);
+ if (unlikely(ret && !move_error)) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule failed (%d), attempting to move rest of the rules\n",
+ ret);
+ move_error = true;
+ }
+
+ expected_completions = 1;
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &expected_completions,
+ true);
+ if (unlikely(ret && !poll_error)) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule: poll failed (%d), attempting to move rest of the rules\n",
+ ret);
+ poll_error = true;
+ }
+
+ /* Done moving the rule to the new matcher,
+ * now update RTCs for all the duplicated rules.
+ */
+ tmp_bwc_rule->complex_hash_node->rtc_0 =
+ tmp_bwc_rule->rule->rtc_0;
+ tmp_bwc_rule->complex_hash_node->rtc_1 =
+ tmp_bwc_rule->rule->rtc_1;
+
+ tmp_bwc_rule->complex_hash_node->rtc_valid = true;
+ }
+ }
+
+ if (move_error || poll_error)
+ ret = -EINVAL;
+
+ return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
index 340f0688e394..a6887c7e39d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
@@ -4,6 +4,27 @@
#ifndef HWS_BWC_COMPLEX_H_
#define HWS_BWC_COMPLEX_H_
+struct mlx5hws_bwc_complex_rule_hash_node {
+ u32 match_buf[MLX5_ST_SZ_DW_MATCH_PARAM];
+ u32 tag;
+ refcount_t refcount;
+ bool rtc_valid;
+ u32 rtc_0;
+ u32 rtc_1;
+ struct rhash_head hash_node;
+};
+
+struct mlx5hws_bwc_matcher_complex_data {
+ struct mlx5hws_table *isolated_tbl;
+ struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
+ struct mlx5hws_action *action_metadata;
+ struct mlx5hws_action *action_go_to_tbl;
+ struct mlx5hws_action *action_last;
+ struct rhashtable refcount_hash;
+ struct mutex hash_lock; /* Protect the refcount rhashtable */
+ struct ida metadata_ida;
+};
+
bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
index e8f98c109b99..9c83753e4592 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
@@ -406,7 +406,6 @@ int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
- MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
MLX5_SET(rtc, attr, reparse_mode, rtc_attr->reparse_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
index 51d9e0291ac1..fa6bff210266 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
@@ -70,7 +70,6 @@ struct mlx5hws_cmd_rtc_create_attr {
u32 pd;
u32 stc_base;
u32 ste_base;
- u32 ste_offset;
u32 miss_ft_id;
bool fw_gen_wqe;
u8 update_index_mode;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
index 9cda2774fd64..428dae869706 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
@@ -34,7 +34,6 @@ static int hws_context_pools_init(struct mlx5hws_context *ctx)
/* Create an STC pool per FT type */
pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC;
- pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STC_POOL;
max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
@@ -159,10 +158,16 @@ static int hws_context_init_hws(struct mlx5hws_context *ctx,
if (ret)
goto pools_uninit;
+ ret = mlx5hws_action_ste_pool_init(ctx);
+ if (ret)
+ goto close_queues;
+
INIT_LIST_HEAD(&ctx->tbl_list);
return 0;
+close_queues:
+ mlx5hws_send_queues_close(ctx);
pools_uninit:
hws_context_pools_uninit(ctx);
uninit_pd:
@@ -175,6 +180,7 @@ static void hws_context_uninit_hws(struct mlx5hws_context *ctx)
if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
return;
+ mlx5hws_action_ste_pool_uninit(ctx);
mlx5hws_send_queues_close(ctx);
hws_context_pools_uninit(ctx);
hws_context_uninit_pd(ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
index 38c3647444ad..3f8938c73dc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
@@ -39,6 +39,8 @@ struct mlx5hws_context {
struct mlx5hws_cmd_query_caps *caps;
u32 pd_num;
struct mlx5hws_pool *stc_pool;
+ struct mlx5hws_action_ste_pool *action_ste_pool; /* One per queue */
+ struct delayed_work action_ste_cleanup;
struct mlx5hws_context_common_res common_res;
struct mlx5hws_pattern_cache *pattern_cache;
struct mlx5hws_definer_cache *definer_cache;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
index 696275fd0ce2..91568d6c1dac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
@@ -118,7 +118,6 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
{
enum mlx5hws_table_type tbl_type = matcher->tbl->type;
struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
- struct mlx5hws_pool_chunk *ste;
struct mlx5hws_pool *ste_pool;
u64 icm_addr_0 = 0;
u64 icm_addr_1 = 0;
@@ -134,12 +133,11 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
matcher->end_ft_id,
matcher->col_matcher ? HWS_PTR_TO_ID(matcher->col_matcher) : 0);
- ste = &matcher->match_ste.ste;
ste_pool = matcher->match_ste.pool;
if (ste_pool) {
- ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ ste_0_id = mlx5hws_pool_get_base_id(ste_pool);
if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
- ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ ste_1_id = mlx5hws_pool_get_base_mirror_id(ste_pool);
}
seq_printf(f, ",%d,%d,%d,%d",
@@ -148,19 +146,6 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
matcher->match_ste.rtc_1_id,
(int)ste_1_id);
- ste = &matcher->action_ste.ste;
- ste_pool = matcher->action_ste.pool;
- if (ste_pool) {
- ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
- if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
- ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
- else
- ste_1_id = -1;
- } else {
- ste_0_id = -1;
- ste_1_id = -1;
- }
-
ft_attr.type = matcher->tbl->fw_ft_type;
ret = mlx5hws_cmd_flow_table_query(matcher->tbl->ctx->mdev,
matcher->end_ft_id,
@@ -170,10 +155,7 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
if (ret)
return ret;
- seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n",
- matcher->action_ste.rtc_0_id, (int)ste_0_id,
- matcher->action_ste.rtc_1_id, (int)ste_1_id,
- 0,
+ seq_printf(f, ",-1,-1,-1,-1,0,0x%llx,0x%llx\n",
mlx5hws_debug_icm_to_idx(icm_addr_0),
mlx5hws_debug_icm_to_idx(icm_addr_1));
@@ -387,14 +369,17 @@ static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context
if (!stc_pool)
return 0;
- if (stc_pool->resource[0]) {
- ret = hws_debug_dump_context_stc_resource(f, ctx, stc_pool->resource[0]);
+ if (stc_pool->resource) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx,
+ stc_pool->resource);
if (ret)
return ret;
}
- if (stc_pool->mirror_resource[0]) {
- ret = hws_debug_dump_context_stc_resource(f, ctx, stc_pool->mirror_resource[0]);
+ if (stc_pool->mirror_resource) {
+ struct mlx5hws_pool_resource *res = stc_pool->mirror_resource;
+
+ ret = hws_debug_dump_context_stc_resource(f, ctx, res);
if (ret)
return ret;
}
@@ -402,10 +387,41 @@ static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context
return 0;
}
+static void
+hws_debug_dump_action_ste_table(struct seq_file *f,
+ struct mlx5hws_action_ste_table *action_tbl)
+{
+ int ste_0_id = mlx5hws_pool_get_base_id(action_tbl->pool);
+ int ste_1_id = mlx5hws_pool_get_base_mirror_id(action_tbl->pool);
+
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_ACTION_STE_TABLE,
+ HWS_PTR_TO_ID(action_tbl),
+ action_tbl->rtc_0_id, ste_0_id,
+ action_tbl->rtc_1_id, ste_1_id);
+}
+
+static void hws_debug_dump_action_ste_pool(struct seq_file *f,
+ struct mlx5hws_action_ste_pool *pool)
+{
+ struct mlx5hws_action_ste_table *action_tbl;
+ enum mlx5hws_pool_optimize opt;
+
+ mutex_lock(&pool->lock);
+ for (opt = MLX5HWS_POOL_OPTIMIZE_NONE; opt < MLX5HWS_POOL_OPTIMIZE_MAX;
+ opt++) {
+ list_for_each_entry(action_tbl, &pool->elems[opt].available,
+ list_node) {
+ hws_debug_dump_action_ste_table(f, action_tbl);
+ }
+ }
+ mutex_unlock(&pool->lock);
+}
+
static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ctx)
{
struct mlx5hws_table *tbl;
- int ret;
+ int ret, i;
ret = hws_debug_dump_context_info(f, ctx);
if (ret)
@@ -425,6 +441,9 @@ static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ct
return ret;
}
+ for (i = 0; i < ctx->queues; i++)
+ hws_debug_dump_action_ste_pool(f, &ctx->action_ste_pool[i]);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h
index e44e7ae28f93..89c396f9f266 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h
@@ -26,6 +26,8 @@ enum mlx5hws_debug_res_type {
MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_HASH_DEFINER = 4205,
MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_RANGE_DEFINER = 4206,
MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_COMPARE_MATCH_DEFINER = 4207,
+
+ MLX5HWS_DEBUG_RES_TYPE_ACTION_STE_TABLE = 4300,
};
static inline u64
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index c8cc0c8115f5..d45e1145d197 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -158,6 +158,218 @@ struct mlx5hws_definer_conv_data {
u32 match_flags;
};
+#define HWS_DEFINER_ENTRY(name)[MLX5HWS_DEFINER_FNAME_##name] = #name
+
+static const char * const hws_definer_fname_to_str[] = {
+ HWS_DEFINER_ENTRY(ETH_SMAC_47_16_O),
+ HWS_DEFINER_ENTRY(ETH_SMAC_47_16_I),
+ HWS_DEFINER_ENTRY(ETH_SMAC_15_0_O),
+ HWS_DEFINER_ENTRY(ETH_SMAC_15_0_I),
+ HWS_DEFINER_ENTRY(ETH_DMAC_47_16_O),
+ HWS_DEFINER_ENTRY(ETH_DMAC_47_16_I),
+ HWS_DEFINER_ENTRY(ETH_DMAC_15_0_O),
+ HWS_DEFINER_ENTRY(ETH_DMAC_15_0_I),
+ HWS_DEFINER_ENTRY(ETH_TYPE_O),
+ HWS_DEFINER_ENTRY(ETH_TYPE_I),
+ HWS_DEFINER_ENTRY(ETH_L3_TYPE_O),
+ HWS_DEFINER_ENTRY(ETH_L3_TYPE_I),
+ HWS_DEFINER_ENTRY(VLAN_TYPE_O),
+ HWS_DEFINER_ENTRY(VLAN_TYPE_I),
+ HWS_DEFINER_ENTRY(VLAN_FIRST_PRIO_O),
+ HWS_DEFINER_ENTRY(VLAN_FIRST_PRIO_I),
+ HWS_DEFINER_ENTRY(VLAN_CFI_O),
+ HWS_DEFINER_ENTRY(VLAN_CFI_I),
+ HWS_DEFINER_ENTRY(VLAN_ID_O),
+ HWS_DEFINER_ENTRY(VLAN_ID_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_TYPE_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_TYPE_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_PRIO_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_PRIO_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_CFI_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_CFI_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_ID_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_ID_I),
+ HWS_DEFINER_ENTRY(IPV4_IHL_O),
+ HWS_DEFINER_ENTRY(IPV4_IHL_I),
+ HWS_DEFINER_ENTRY(IP_DSCP_O),
+ HWS_DEFINER_ENTRY(IP_DSCP_I),
+ HWS_DEFINER_ENTRY(IP_ECN_O),
+ HWS_DEFINER_ENTRY(IP_ECN_I),
+ HWS_DEFINER_ENTRY(IP_TTL_O),
+ HWS_DEFINER_ENTRY(IP_TTL_I),
+ HWS_DEFINER_ENTRY(IPV4_DST_O),
+ HWS_DEFINER_ENTRY(IPV4_DST_I),
+ HWS_DEFINER_ENTRY(IPV4_SRC_O),
+ HWS_DEFINER_ENTRY(IPV4_SRC_I),
+ HWS_DEFINER_ENTRY(IP_VERSION_O),
+ HWS_DEFINER_ENTRY(IP_VERSION_I),
+ HWS_DEFINER_ENTRY(IP_FRAG_O),
+ HWS_DEFINER_ENTRY(IP_FRAG_I),
+ HWS_DEFINER_ENTRY(IP_LEN_O),
+ HWS_DEFINER_ENTRY(IP_LEN_I),
+ HWS_DEFINER_ENTRY(IP_TOS_O),
+ HWS_DEFINER_ENTRY(IP_TOS_I),
+ HWS_DEFINER_ENTRY(IPV6_FLOW_LABEL_O),
+ HWS_DEFINER_ENTRY(IPV6_FLOW_LABEL_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_127_96_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_95_64_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_63_32_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_31_0_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_127_96_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_95_64_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_63_32_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_31_0_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_127_96_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_95_64_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_63_32_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_31_0_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_127_96_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_95_64_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_63_32_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_31_0_I),
+ HWS_DEFINER_ENTRY(IP_PROTOCOL_O),
+ HWS_DEFINER_ENTRY(IP_PROTOCOL_I),
+ HWS_DEFINER_ENTRY(L4_SPORT_O),
+ HWS_DEFINER_ENTRY(L4_SPORT_I),
+ HWS_DEFINER_ENTRY(L4_DPORT_O),
+ HWS_DEFINER_ENTRY(L4_DPORT_I),
+ HWS_DEFINER_ENTRY(TCP_FLAGS_I),
+ HWS_DEFINER_ENTRY(TCP_FLAGS_O),
+ HWS_DEFINER_ENTRY(TCP_SEQ_NUM),
+ HWS_DEFINER_ENTRY(TCP_ACK_NUM),
+ HWS_DEFINER_ENTRY(GTP_TEID),
+ HWS_DEFINER_ENTRY(GTP_MSG_TYPE),
+ HWS_DEFINER_ENTRY(GTP_EXT_FLAG),
+ HWS_DEFINER_ENTRY(GTP_NEXT_EXT_HDR),
+ HWS_DEFINER_ENTRY(GTP_EXT_HDR_PDU),
+ HWS_DEFINER_ENTRY(GTP_EXT_HDR_QFI),
+ HWS_DEFINER_ENTRY(GTPU_DW0),
+ HWS_DEFINER_ENTRY(GTPU_FIRST_EXT_DW0),
+ HWS_DEFINER_ENTRY(GTPU_DW2),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_0),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_1),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_2),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_3),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_4),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_5),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_6),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_7),
+ HWS_DEFINER_ENTRY(VPORT_REG_C_0),
+ HWS_DEFINER_ENTRY(VXLAN_FLAGS),
+ HWS_DEFINER_ENTRY(VXLAN_VNI),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_FLAGS),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_RSVD0),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_PROTO),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_VNI),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_RSVD1),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_LEN),
+ HWS_DEFINER_ENTRY(GENEVE_OAM),
+ HWS_DEFINER_ENTRY(GENEVE_PROTO),
+ HWS_DEFINER_ENTRY(GENEVE_VNI),
+ HWS_DEFINER_ENTRY(SOURCE_QP),
+ HWS_DEFINER_ENTRY(SOURCE_GVMI),
+ HWS_DEFINER_ENTRY(REG_0),
+ HWS_DEFINER_ENTRY(REG_1),
+ HWS_DEFINER_ENTRY(REG_2),
+ HWS_DEFINER_ENTRY(REG_3),
+ HWS_DEFINER_ENTRY(REG_4),
+ HWS_DEFINER_ENTRY(REG_5),
+ HWS_DEFINER_ENTRY(REG_6),
+ HWS_DEFINER_ENTRY(REG_7),
+ HWS_DEFINER_ENTRY(REG_8),
+ HWS_DEFINER_ENTRY(REG_9),
+ HWS_DEFINER_ENTRY(REG_10),
+ HWS_DEFINER_ENTRY(REG_11),
+ HWS_DEFINER_ENTRY(REG_A),
+ HWS_DEFINER_ENTRY(REG_B),
+ HWS_DEFINER_ENTRY(GRE_KEY_PRESENT),
+ HWS_DEFINER_ENTRY(GRE_C),
+ HWS_DEFINER_ENTRY(GRE_K),
+ HWS_DEFINER_ENTRY(GRE_S),
+ HWS_DEFINER_ENTRY(GRE_PROTOCOL),
+ HWS_DEFINER_ENTRY(GRE_OPT_KEY),
+ HWS_DEFINER_ENTRY(GRE_OPT_SEQ),
+ HWS_DEFINER_ENTRY(GRE_OPT_CHECKSUM),
+ HWS_DEFINER_ENTRY(INTEGRITY_O),
+ HWS_DEFINER_ENTRY(INTEGRITY_I),
+ HWS_DEFINER_ENTRY(ICMP_DW1),
+ HWS_DEFINER_ENTRY(ICMP_DW2),
+ HWS_DEFINER_ENTRY(ICMP_DW3),
+ HWS_DEFINER_ENTRY(IPSEC_SPI),
+ HWS_DEFINER_ENTRY(IPSEC_SEQUENCE_NUMBER),
+ HWS_DEFINER_ENTRY(IPSEC_SYNDROME),
+ HWS_DEFINER_ENTRY(MPLS0_O),
+ HWS_DEFINER_ENTRY(MPLS1_O),
+ HWS_DEFINER_ENTRY(MPLS2_O),
+ HWS_DEFINER_ENTRY(MPLS3_O),
+ HWS_DEFINER_ENTRY(MPLS4_O),
+ HWS_DEFINER_ENTRY(MPLS0_I),
+ HWS_DEFINER_ENTRY(MPLS1_I),
+ HWS_DEFINER_ENTRY(MPLS2_I),
+ HWS_DEFINER_ENTRY(MPLS3_I),
+ HWS_DEFINER_ENTRY(MPLS4_I),
+ HWS_DEFINER_ENTRY(FLEX_PARSER0_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER1_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER2_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER3_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER4_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER5_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER6_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER7_OK),
+ HWS_DEFINER_ENTRY(OKS2_MPLS0_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS1_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS2_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS3_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS4_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS0_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS1_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS2_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS3_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS4_I),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_0),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_1),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_2),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_3),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_4),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_5),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_6),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_7),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_0),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_1),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_2),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_3),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_4),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_5),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_6),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_7),
+ HWS_DEFINER_ENTRY(IB_L4_OPCODE),
+ HWS_DEFINER_ENTRY(IB_L4_QPN),
+ HWS_DEFINER_ENTRY(IB_L4_A),
+ HWS_DEFINER_ENTRY(RANDOM_NUM),
+ HWS_DEFINER_ENTRY(PTYPE_L2_O),
+ HWS_DEFINER_ENTRY(PTYPE_L2_I),
+ HWS_DEFINER_ENTRY(PTYPE_L3_O),
+ HWS_DEFINER_ENTRY(PTYPE_L3_I),
+ HWS_DEFINER_ENTRY(PTYPE_L4_O),
+ HWS_DEFINER_ENTRY(PTYPE_L4_I),
+ HWS_DEFINER_ENTRY(PTYPE_L4_EXT_O),
+ HWS_DEFINER_ENTRY(PTYPE_L4_EXT_I),
+ HWS_DEFINER_ENTRY(PTYPE_FRAG_O),
+ HWS_DEFINER_ENTRY(PTYPE_FRAG_I),
+ HWS_DEFINER_ENTRY(TNL_HDR_0),
+ HWS_DEFINER_ENTRY(TNL_HDR_1),
+ HWS_DEFINER_ENTRY(TNL_HDR_2),
+ HWS_DEFINER_ENTRY(TNL_HDR_3),
+ [MLX5HWS_DEFINER_FNAME_MAX] = "DEFINER_FNAME_UNKNOWN",
+};
+
+const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname)
+{
+ if (fname > MLX5HWS_DEFINER_FNAME_MAX)
+ fname = MLX5HWS_DEFINER_FNAME_MAX;
+ return hws_definer_fname_to_str[fname];
+}
+
static void
hws_definer_ones_set(struct mlx5hws_definer_fc *fc,
void *match_param,
@@ -509,7 +721,7 @@ static int
hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
u32 *match_param)
{
- bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+ bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set;
struct mlx5hws_definer_fc *fc = cd->fc;
struct mlx5hws_definer_fc *curr_fc;
u32 *s_ipv6, *d_ipv6;
@@ -521,6 +733,20 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
return -EINVAL;
}
+ ip_addr_set = HWS_IS_FLD_SET_SZ(match_param,
+ outer_headers.src_ipv4_src_ipv6,
+ 0x80) ||
+ HWS_IS_FLD_SET_SZ(match_param,
+ outer_headers.dst_ipv4_dst_ipv6, 0x80);
+ ip_ver_set = HWS_IS_FLD_SET(match_param, outer_headers.ip_version) ||
+ HWS_IS_FLD_SET(match_param, outer_headers.ethertype);
+
+ if (ip_addr_set && !ip_ver_set) {
+ mlx5hws_err(cd->ctx,
+ "Unsupported match on IP address without version or ethertype\n");
+ return -EINVAL;
+ }
+
/* L2 Check ethertype */
HWS_SET_HDR(fc, match_param, ETH_TYPE_O,
outer_headers.ethertype,
@@ -559,6 +785,9 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O,
outer_headers.ip_protocol,
eth_l3_outer.protocol_next_header);
+ HWS_SET_HDR(fc, match_param, IP_VERSION_O,
+ outer_headers.ip_version,
+ eth_l3_outer.ip_version);
HWS_SET_HDR(fc, match_param, IP_TTL_O,
outer_headers.ttl_hoplimit,
eth_l3_outer.time_to_live_hop_limit);
@@ -570,10 +799,16 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout);
/* Assume IPv6 is used if ipv6 bits are set */
- is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
- is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+ is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] ||
+ d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+ /* IHL is an IPv4-specific field. */
+ if (is_ipv6 && HWS_IS_FLD_SET(match_param, outer_headers.ipv4_ihl)) {
+ mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n");
+ return -EINVAL;
+ }
- if (is_s_ipv6) {
+ if (is_ipv6) {
/* Handle IPv6 source address */
HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O,
outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
@@ -587,13 +822,6 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O,
outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
ipv6_src_outer.ipv6_address_31_0);
- } else {
- /* Handle IPv4 source address */
- HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
- outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
- ipv4_src_dest_outer.source_address);
- }
- if (is_d_ipv6) {
/* Handle IPv6 destination address */
HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O,
outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
@@ -608,6 +836,10 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
ipv6_dst_outer.ipv6_address_31_0);
} else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_outer.source_address);
/* Handle IPv4 destination address */
HWS_SET_HDR(fc, match_param, IPV4_DST_O,
outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
@@ -665,7 +897,7 @@ static int
hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
u32 *match_param)
{
- bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+ bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set;
struct mlx5hws_definer_fc *fc = cd->fc;
struct mlx5hws_definer_fc *curr_fc;
u32 *s_ipv6, *d_ipv6;
@@ -677,6 +909,20 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
return -EINVAL;
}
+ ip_addr_set = HWS_IS_FLD_SET_SZ(match_param,
+ inner_headers.src_ipv4_src_ipv6,
+ 0x80) ||
+ HWS_IS_FLD_SET_SZ(match_param,
+ inner_headers.dst_ipv4_dst_ipv6, 0x80);
+ ip_ver_set = HWS_IS_FLD_SET(match_param, inner_headers.ip_version) ||
+ HWS_IS_FLD_SET(match_param, inner_headers.ethertype);
+
+ if (ip_addr_set && !ip_ver_set) {
+ mlx5hws_err(cd->ctx,
+ "Unsupported match on IP address without version or ethertype\n");
+ return -EINVAL;
+ }
+
/* L2 Check ethertype */
HWS_SET_HDR(fc, match_param, ETH_TYPE_I,
inner_headers.ethertype,
@@ -728,10 +974,16 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
inner_headers.dst_ipv4_dst_ipv6.ipv6_layout);
/* Assume IPv6 is used if ipv6 bits are set */
- is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
- is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+ is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] ||
+ d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
- if (is_s_ipv6) {
+ /* IHL is an IPv4-specific field. */
+ if (is_ipv6 && HWS_IS_FLD_SET(match_param, inner_headers.ipv4_ihl)) {
+ mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n");
+ return -EINVAL;
+ }
+
+ if (is_ipv6) {
/* Handle IPv6 source address */
HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I,
inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
@@ -745,13 +997,6 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I,
inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
ipv6_src_inner.ipv6_address_31_0);
- } else {
- /* Handle IPv4 source address */
- HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
- inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
- ipv4_src_dest_inner.source_address);
- }
- if (is_d_ipv6) {
/* Handle IPv6 destination address */
HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I,
inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
@@ -766,6 +1011,10 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
ipv6_dst_inner.ipv6_address_31_0);
} else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_inner.source_address);
/* Handle IPv4 destination address */
HWS_SET_HDR(fc, match_param, IPV4_DST_I,
inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
index 5c1a2086efba..62da55389331 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
@@ -831,4 +831,6 @@ mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
u32 *match_param,
int *fc_sz);
+const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname);
+
#endif /* HWS_DEFINER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
index 1b787cd66e6f..bf4643d0ce17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -966,6 +966,9 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
switch (attr->type) {
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
dest_action = mlx5_fs_get_dest_action_ft(fs_ctx, dst);
+ if (dst->dest_attr.ft->flags &
+ MLX5_FLOW_TABLE_UPLINK_VPORT)
+ dest_actions[num_dest_actions].is_wire_ft = true;
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
dest_action = mlx5_fs_get_dest_action_table_num(fs_ctx,
@@ -1081,13 +1084,8 @@ static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5hws_bwc_rule *rule;
int err = 0;
- if (mlx5_fs_cmd_is_fw_term_table(ft)) {
- /* Packet reformat on terminamtion table not supported yet */
- if (fte->act_dests.action.action &
- MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
- return -EOPNOTSUPP;
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
- }
err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
if (err)
@@ -1362,7 +1360,8 @@ mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
pkt_reformat->fs_hws_action.pr_data = pr_data;
}
- pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
+ mutex_init(&pkt_reformat->fs_hws_action.lock);
+ pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_HWS;
pkt_reformat->fs_hws_action.hws_action = hws_action;
return 0;
@@ -1380,6 +1379,15 @@ static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace
struct mlx5_fs_hws_pr *pr_data;
struct mlx5_fs_pool *pr_pool;
+ if (pkt_reformat->fs_hws_action.fw_reformat_id != 0) {
+ struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
+
+ fw_pkt_reformat.id = pkt_reformat->fs_hws_action.fw_reformat_id;
+ mlx5_fs_cmd_get_fw_cmds()->
+ packet_reformat_dealloc(ns, &fw_pkt_reformat);
+ pkt_reformat->fs_hws_action.fw_reformat_id = 0;
+ }
+
if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR)
return;
@@ -1532,6 +1540,58 @@ static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace *
modify_hdr->fs_hws_action.mh_data = NULL;
}
+int
+mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
+{
+ enum mlx5_flow_namespace_type ns_type = pkt_reformat->ns_type;
+ struct mutex *lock = &pkt_reformat->fs_hws_action.lock;
+ u32 *id = &pkt_reformat->fs_hws_action.fw_reformat_id;
+ struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
+ struct mlx5_pkt_reformat_params params = { 0 };
+ struct mlx5_flow_root_namespace *ns;
+ struct mlx5_core_dev *dev;
+ int ret;
+
+ mutex_lock(lock);
+
+ if (*id != 0) {
+ *reformat_id = *id;
+ ret = 0;
+ goto unlock;
+ }
+
+ dev = mlx5hws_action_get_dev(pkt_reformat->fs_hws_action.hws_action);
+ if (!dev) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ns = mlx5_get_root_namespace(dev, ns_type);
+ if (!ns) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ params.type = pkt_reformat->reformat_type;
+ params.size = pkt_reformat->fs_hws_action.pr_data->data_size;
+ params.data = pkt_reformat->fs_hws_action.pr_data->data;
+
+ ret = mlx5_fs_cmd_get_fw_cmds()->
+ packet_reformat_alloc(ns, &params, ns_type, &fw_pkt_reformat);
+ if (ret)
+ goto unlock;
+
+ *id = fw_pkt_reformat.id;
+ *reformat_id = *id;
+ ret = 0;
+
+unlock:
+ mutex_unlock(lock);
+
+ return ret;
+}
+
static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns,
u16 format_id, u32 *match_mask)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
index 8b56298288da..b92d55b2d147 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
@@ -41,6 +41,11 @@ struct mlx5_fs_hws_action {
struct mlx5_fs_pool *fs_pool;
struct mlx5_fs_hws_pr *pr_data;
struct mlx5_fs_hws_mh *mh_data;
+ u32 fw_reformat_id;
+ /* Protect `fw_reformat_id` against being initialized from multiple
+ * threads.
+ */
+ struct mutex lock;
};
struct mlx5_fs_hws_matcher {
@@ -84,12 +89,23 @@ void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data);
#ifdef CONFIG_MLX5_HW_STEERING
+int
+mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id);
+
bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void);
#else
+static inline int
+mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
+{
+ return -EOPNOTSUPP;
+}
+
static inline bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
{
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
index 30ccd635b505..21279d503117 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
@@ -17,6 +17,7 @@
#include "context.h"
#include "table.h"
#include "send.h"
+#include "action_ste_pool.h"
#include "rule.h"
#include "cmd.h"
#include "action.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
index b61864b32053..ce28ee1c0e41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
@@ -3,25 +3,6 @@
#include "internal.h"
-enum mlx5hws_matcher_rtc_type {
- HWS_MATCHER_RTC_TYPE_MATCH,
- HWS_MATCHER_RTC_TYPE_STE_ARRAY,
- HWS_MATCHER_RTC_TYPE_MAX,
-};
-
-static const char * const mlx5hws_matcher_rtc_type_str[] = {
- [HWS_MATCHER_RTC_TYPE_MATCH] = "MATCH",
- [HWS_MATCHER_RTC_TYPE_STE_ARRAY] = "STE_ARRAY",
- [HWS_MATCHER_RTC_TYPE_MAX] = "UNKNOWN",
-};
-
-static const char *hws_matcher_rtc_type_to_str(enum mlx5hws_matcher_rtc_type rtc_type)
-{
- if (rtc_type > HWS_MATCHER_RTC_TYPE_MAX)
- rtc_type = HWS_MATCHER_RTC_TYPE_MAX;
- return mlx5hws_matcher_rtc_type_str[rtc_type];
-}
-
static bool hws_matcher_requires_col_tbl(u8 log_num_of_rules)
{
/* Collision table concatenation is done only for large rule tables */
@@ -42,19 +23,199 @@ static void hws_matcher_destroy_end_ft(struct mlx5hws_matcher *matcher)
mlx5hws_table_destroy_default_ft(matcher->tbl, matcher->end_ft_id);
}
+int mlx5hws_matcher_update_end_ft_isolated(struct mlx5hws_table *tbl,
+ u32 miss_ft_id)
+{
+ struct mlx5hws_matcher *tmp_matcher;
+
+ if (list_empty(&tbl->matchers_list))
+ return -EINVAL;
+
+ /* Update isolated_matcher_end_ft_id attribute for all
+ * the matchers in isolated table.
+ */
+ list_for_each_entry(tmp_matcher, &tbl->matchers_list, list_node)
+ tmp_matcher->attr.isolated_matcher_end_ft_id = miss_ft_id;
+
+ tmp_matcher = list_last_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+
+ return mlx5hws_table_ft_set_next_ft(tbl->ctx,
+ tmp_matcher->end_ft_id,
+ tbl->fw_ft_type,
+ miss_ft_id);
+}
+
+static int hws_matcher_connect_end_ft_isolated(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ u32 end_ft_id;
+ int ret;
+
+ /* Reset end_ft next RTCs */
+ ret = mlx5hws_table_ft_set_next_rtc(tbl->ctx,
+ matcher->end_ft_id,
+ matcher->tbl->fw_ft_type,
+ 0, 0);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to reset FT's next RTCs\n");
+ return ret;
+ }
+
+ /* Connect isolated matcher's end_ft to the complex matcher's end FT */
+ end_ft_id = matcher->attr.isolated_matcher_end_ft_id;
+ ret = mlx5hws_table_ft_set_next_ft(tbl->ctx,
+ matcher->end_ft_id,
+ matcher->tbl->fw_ft_type,
+ end_ft_id);
+
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to set FT's miss_ft_id\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hws_matcher_create_end_ft_isolated(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ int ret;
+
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
+ tbl,
+ &matcher->end_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to create end flow table\n");
+ return ret;
+ }
+
+ ret = hws_matcher_connect_end_ft_isolated(matcher);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to connect end FT\n");
+ goto destroy_default_ft;
+ }
+
+ return 0;
+
+destroy_default_ft:
+ mlx5hws_table_destroy_default_ft(tbl, matcher->end_ft_id);
+ return ret;
+}
+
static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_table *tbl = matcher->tbl;
int ret;
- ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &matcher->end_ft_id);
+ if (mlx5hws_matcher_is_isolated(matcher))
+ ret = hws_matcher_create_end_ft_isolated(matcher);
+ else
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl,
+ &matcher->end_ft_id);
+
if (ret) {
mlx5hws_err(tbl->ctx, "Failed to create matcher end flow table\n");
return ret;
}
+
return 0;
}
+static int hws_matcher_connect_isolated_first(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ /* Isolated matcher's end_ft is already pointing to the end_ft
+ * of the complex matcher - it was set at creation of end_ft,
+ * so no need to connect it.
+ * We still need to connect the isolated table's start FT to
+ * this matcher's RTC.
+ */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Isolated matcher: failed to connect start FT to match RTC\n");
+ return ret;
+ }
+
+ /* Reset table's FT default miss (drop refcount) */
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Isolated matcher: failed to reset table ft default miss\n");
+ return ret;
+ }
+
+ list_add(&matcher->list_node, &tbl->matchers_list);
+
+ return ret;
+}
+
+static int hws_matcher_connect_isolated_last(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_matcher *last;
+ int ret;
+
+ last = list_last_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+
+ /* New matcher's end_ft is already pointing to the end_ft of
+ * the complex matcher.
+ * Connect previous matcher's end_ft to this new matcher RTC.
+ */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ last->end_ft_id,
+ tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Isolated matcher: failed to connect matcher end_ft to new match RTC\n");
+ return ret;
+ }
+
+ /* Reset prev matcher FT default miss (drop refcount) */
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, last->end_ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Isolated matcher: failed to reset matcher ft default miss\n");
+ return ret;
+ }
+
+ /* Insert after the last matcher */
+ list_add(&matcher->list_node, &last->list_node);
+
+ return 0;
+}
+
+static int hws_matcher_connect_isolated(struct mlx5hws_matcher *matcher)
+{
+ /* Isolated matcher is expected to be the only one in its table.
+ * However, it can have a collision matcher, and it can go through
+ * rehash process, in which case we will temporary have both old and
+ * new matchers in the isolated table.
+ * Check if this is the first matcher in the isolated table.
+ */
+ if (list_empty(&matcher->tbl->matchers_list))
+ return hws_matcher_connect_isolated_first(matcher);
+
+ /* If this wasn't the first matcher, then we have 3 possible cases:
+ * - this is a collision matcher for the first matcher
+ * - this is a new rehash dest matcher
+ * - this is a collision matcher for the new rehash dest matcher
+ * The logic to add new matcher is the same for all these cases.
+ */
+ return hws_matcher_connect_isolated_last(matcher);
+}
+
static int hws_matcher_connect(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_table *tbl = matcher->tbl;
@@ -64,6 +225,9 @@ static int hws_matcher_connect(struct mlx5hws_matcher *matcher)
struct mlx5hws_matcher *tmp_matcher;
int ret;
+ if (mlx5hws_matcher_is_isolated(matcher))
+ return hws_matcher_connect_isolated(matcher);
+
/* Find location in matcher list */
if (list_empty(&tbl->matchers_list)) {
list_add(&matcher->list_node, &tbl->matchers_list);
@@ -140,6 +304,92 @@ remove_from_list:
return ret;
}
+static int hws_matcher_disconnect_isolated(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher *first, *last, *prev, *next;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u32 end_ft_id;
+ int ret;
+
+ first = list_first_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+ last = list_last_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+ prev = list_prev_entry(matcher, list_node);
+ next = list_next_entry(matcher, list_node);
+
+ list_del_init(&matcher->list_node);
+
+ if (first == last) {
+ /* This was the only matcher in the list.
+ * Reset isolated table FT next RTCs and connect it
+ * to the whole complex matcher end FT instead.
+ */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ 0, 0);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to reset FT's next RTCs\n");
+ return ret;
+ }
+
+ end_ft_id = matcher->attr.isolated_matcher_end_ft_id;
+ ret = mlx5hws_table_ft_set_next_ft(tbl->ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ end_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to set FT's miss_ft_id\n");
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /* At this point we know that there are more matchers in the list */
+
+ if (matcher == first) {
+ /* We've disconnected the first matcher.
+ * Now update isolated table default FT.
+ */
+ if (!next)
+ return -EINVAL;
+ return mlx5hws_table_ft_set_next_rtc(ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+ }
+
+ if (matcher == last) {
+ /* If we've disconnected the last matcher - update prev
+ * matcher's end_ft to point to the complex matcher end_ft.
+ */
+ if (!prev)
+ return -EINVAL;
+ return hws_matcher_connect_end_ft_isolated(prev);
+ }
+
+ /* This wasn't the first or the last matcher, which means that it has
+ * both prev and next matchers. Note that this only happens if we're
+ * disconnecting collision matcher of the old matcher during rehash.
+ */
+ if (!prev || !next ||
+ !(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+ return -EINVAL;
+
+ /* Update prev end FT to point to next match RTC */
+ return mlx5hws_table_ft_set_next_rtc(ctx,
+ prev->end_ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+}
+
static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher *next = NULL, *prev = NULL;
@@ -147,6 +397,9 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
u32 prev_ft_id = tbl->ft_id;
int ret;
+ if (mlx5hws_matcher_is_isolated(matcher))
+ return hws_matcher_disconnect_isolated(matcher);
+
if (!list_is_first(&matcher->list_node, &tbl->matchers_list)) {
prev = list_prev_entry(matcher, list_node);
prev_ft_id = prev->end_ft_id;
@@ -197,149 +450,96 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
- enum mlx5hws_matcher_rtc_type rtc_type,
bool is_mirror)
{
- struct mlx5hws_pool_chunk *ste = &matcher->action_ste.ste;
enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src;
- bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH;
if ((flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT && !is_mirror) ||
(flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE && is_mirror)) {
/* Optimize FDB RTC */
rtc_attr->log_size = 0;
rtc_attr->log_depth = 0;
- } else {
- /* Keep original values */
- rtc_attr->log_size = is_match_rtc ? matcher->attr.table.sz_row_log : ste->order;
- rtc_attr->log_depth = is_match_rtc ? matcher->attr.table.sz_col_log : 0;
}
}
-static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
- enum mlx5hws_matcher_rtc_type rtc_type)
+static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher_attr *attr = &matcher->attr;
struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
struct mlx5hws_match_template *mt = matcher->mt;
struct mlx5hws_context *ctx = matcher->tbl->ctx;
- struct mlx5hws_action_default_stc *default_stc;
- struct mlx5hws_matcher_action_ste *action_ste;
struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_pool *ste_pool, *stc_pool;
- struct mlx5hws_pool_chunk *ste;
- u32 *rtc_0_id, *rtc_1_id;
u32 obj_id;
int ret;
- switch (rtc_type) {
- case HWS_MATCHER_RTC_TYPE_MATCH:
- rtc_0_id = &matcher->match_ste.rtc_0_id;
- rtc_1_id = &matcher->match_ste.rtc_1_id;
- ste_pool = matcher->match_ste.pool;
- ste = &matcher->match_ste.ste;
- ste->order = attr->table.sz_col_log + attr->table.sz_row_log;
-
- rtc_attr.log_size = attr->table.sz_row_log;
- rtc_attr.log_depth = attr->table.sz_col_log;
- rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
- rtc_attr.is_scnd_range = 0;
- rtc_attr.miss_ft_id = matcher->end_ft_id;
-
- if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
- /* The usual Hash Table */
- rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
-
- /* The first mt is used since all share the same definer */
- rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
- } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) {
- rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
- rtc_attr.num_hash_definer = 1;
-
- if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
- /* Hash Split Table */
- rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
- rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
- } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
- /* Linear Lookup Table */
- rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
- rtc_attr.match_definer_0 = ctx->caps->linear_match_definer;
- }
- }
-
- /* Match pool requires implicit allocation */
- ret = mlx5hws_pool_chunk_alloc(ste_pool, ste);
- if (ret) {
- mlx5hws_err(ctx, "Failed to allocate STE for %s RTC",
- hws_matcher_rtc_type_to_str(rtc_type));
- return ret;
+ rtc_attr.log_size = attr->table.sz_row_log;
+ rtc_attr.log_depth = attr->table.sz_col_log;
+ rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+ rtc_attr.is_scnd_range = 0;
+ rtc_attr.miss_ft_id = matcher->end_ft_id;
+
+ if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
+ /* The usual Hash Table */
+ rtc_attr.update_index_mode =
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+
+ /* The first mt is used since all share the same definer */
+ rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) {
+ rtc_attr.update_index_mode =
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ rtc_attr.num_hash_definer = 1;
+
+ if (attr->distribute_mode ==
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ /* Hash Split Table */
+ rtc_attr.access_index_mode =
+ MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
+ rtc_attr.match_definer_0 =
+ mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->distribute_mode ==
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
+ /* Linear Lookup Table */
+ rtc_attr.access_index_mode =
+ MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
+ rtc_attr.match_definer_0 =
+ ctx->caps->linear_match_definer;
}
- break;
-
- case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
- action_ste = &matcher->action_ste;
-
- rtc_0_id = &action_ste->rtc_0_id;
- rtc_1_id = &action_ste->rtc_1_id;
- ste_pool = action_ste->pool;
- ste = &action_ste->ste;
- /* Action RTC size calculation:
- * log((max number of rules in matcher) *
- * (max number of action STEs per rule) *
- * (2 to support writing new STEs for update rule))
- */
- ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
- attr->table.sz_row_log +
- MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT;
- rtc_attr.log_size = ste->order;
- rtc_attr.log_depth = 0;
- rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
- /* The action STEs use the default always hit definer */
- rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
- rtc_attr.is_frst_jumbo = false;
- rtc_attr.miss_ft_id = 0;
- break;
-
- default:
- mlx5hws_err(ctx, "HWS Invalid RTC type\n");
- return -EINVAL;
}
- obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ obj_id = mlx5hws_pool_get_base_id(matcher->match_ste.pool);
rtc_attr.pd = ctx->pd_num;
rtc_attr.ste_base = obj_id;
- rtc_attr.ste_offset = ste->offset;
rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, false);
- hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false);
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, false);
/* STC is a single resource (obj_id), use any STC for the ID */
- stc_pool = ctx->stc_pool;
- default_stc = ctx->common_res.default_stc;
- obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+ obj_id = mlx5hws_pool_get_base_id(ctx->stc_pool);
rtc_attr.stc_base = obj_id;
- ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr,
+ &matcher->match_ste.rtc_0_id);
if (ret) {
- mlx5hws_err(ctx, "Failed to create matcher RTC of type %s",
- hws_matcher_rtc_type_to_str(rtc_type));
- goto free_ste;
+ mlx5hws_err(ctx, "Failed to create matcher RTC\n");
+ return ret;
}
if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ obj_id = mlx5hws_pool_get_base_mirror_id(
+ matcher->match_ste.pool);
rtc_attr.ste_base = obj_id;
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, true);
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+ obj_id = mlx5hws_pool_get_base_mirror_id(ctx->stc_pool);
rtc_attr.stc_base = obj_id;
- hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, true);
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, true);
- ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr,
+ &matcher->match_ste.rtc_1_id);
if (ret) {
- mlx5hws_err(ctx, "Failed to create peer matcher RTC of type %s",
- hws_matcher_rtc_type_to_str(rtc_type));
+ mlx5hws_err(ctx, "Failed to create mirror matcher RTC\n");
goto destroy_rtc_0;
}
}
@@ -347,46 +547,18 @@ static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
return 0;
destroy_rtc_0:
- mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
-free_ste:
- if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
- mlx5hws_pool_chunk_free(ste_pool, ste);
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, matcher->match_ste.rtc_0_id);
return ret;
}
-static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher,
- enum mlx5hws_matcher_rtc_type rtc_type)
+static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher)
{
- struct mlx5hws_matcher_action_ste *action_ste;
- struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_pool_chunk *ste;
- struct mlx5hws_pool *ste_pool;
- u32 rtc_0_id, rtc_1_id;
-
- switch (rtc_type) {
- case HWS_MATCHER_RTC_TYPE_MATCH:
- rtc_0_id = matcher->match_ste.rtc_0_id;
- rtc_1_id = matcher->match_ste.rtc_1_id;
- ste_pool = matcher->match_ste.pool;
- ste = &matcher->match_ste.ste;
- break;
- case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
- action_ste = &matcher->action_ste;
- rtc_0_id = action_ste->rtc_0_id;
- rtc_1_id = action_ste->rtc_1_id;
- ste_pool = action_ste->pool;
- ste = &action_ste->ste;
- break;
- default:
- return;
- }
+ struct mlx5_core_dev *mdev = matcher->tbl->ctx->mdev;
- if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_1_id);
+ if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ mlx5hws_cmd_rtc_destroy(mdev, matcher->match_ste.rtc_1_id);
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_0_id);
- if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
- mlx5hws_pool_chunk_free(ste_pool, ste);
+ mlx5hws_cmd_rtc_destroy(mdev, matcher->match_ste.rtc_0_id);
}
static int
@@ -454,85 +626,17 @@ static int hws_matcher_check_and_process_at(struct mlx5hws_matcher *matcher,
return 0;
}
-static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher)
-{
- struct mlx5hws_matcher_resize_data *resize_data;
-
- resize_data = kzalloc(sizeof(*resize_data), GFP_KERNEL);
- if (!resize_data)
- return -ENOMEM;
-
- resize_data->max_stes = src_matcher->action_ste.max_stes;
-
- resize_data->stc = src_matcher->action_ste.stc;
- resize_data->rtc_0_id = src_matcher->action_ste.rtc_0_id;
- resize_data->rtc_1_id = src_matcher->action_ste.rtc_1_id;
- resize_data->pool = src_matcher->action_ste.max_stes ?
- src_matcher->action_ste.pool : NULL;
-
- /* Place the new resized matcher on the dst matcher's list */
- list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
-
- /* Move all the previous resized matchers to the dst matcher's list */
- while (!list_empty(&src_matcher->resize_data)) {
- resize_data = list_first_entry(&src_matcher->resize_data,
- struct mlx5hws_matcher_resize_data,
- list_node);
- list_del_init(&resize_data->list_node);
- list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
- }
-
- return 0;
-}
-
-static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher)
-{
- struct mlx5hws_matcher_resize_data *resize_data;
-
- if (!mlx5hws_matcher_is_resizable(matcher))
- return;
-
- while (!list_empty(&matcher->resize_data)) {
- resize_data = list_first_entry(&matcher->resize_data,
- struct mlx5hws_matcher_resize_data,
- list_node);
- list_del_init(&resize_data->list_node);
-
- if (resize_data->max_stes) {
- mlx5hws_action_free_single_stc(matcher->tbl->ctx,
- matcher->tbl->type,
- &resize_data->stc);
-
- if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB)
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->rtc_1_id);
-
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->rtc_0_id);
-
- if (resize_data->pool)
- mlx5hws_pool_destroy(resize_data->pool);
- }
-
- kfree(resize_data);
- }
-}
-
static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
{
bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
- struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
- struct mlx5hws_matcher_action_ste *action_ste;
- struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_pool_attr pool_attr = {0};
- struct mlx5hws_context *ctx = tbl->ctx;
- u32 required_stes;
- u8 max_stes = 0;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u8 required_stes, max_stes;
int i, ret;
if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
return 0;
+ max_stes = 0;
for (i = 0; i < matcher->num_of_at; i++) {
struct mlx5hws_action_template *at = &matcher->at[i];
@@ -548,75 +652,33 @@ static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
/* Future: Optimize reparse */
}
- /* There are no additional STEs required for matcher */
- if (!max_stes)
- return 0;
-
- matcher->action_ste.max_stes = max_stes;
-
- action_ste = &matcher->action_ste;
-
- /* Allocate action STE mempool */
- pool_attr.table_type = tbl->type;
- pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
- pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
- /* Pool size is similar to action RTC size */
- pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
- matcher->attr.table.sz_row_log +
- MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT;
- hws_matcher_set_pool_attr(&pool_attr, matcher);
- action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
- if (!action_ste->pool) {
- mlx5hws_err(ctx, "Failed to create action ste pool\n");
- return -EINVAL;
- }
-
- /* Allocate action RTC */
- ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY);
- if (ret) {
- mlx5hws_err(ctx, "Failed to create action RTC\n");
- goto free_ste_pool;
- }
-
- /* Allocate STC for jumps to STE */
- stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
- stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
- stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
- stc_attr.ste_table.ste = action_ste->ste;
- stc_attr.ste_table.ste_pool = action_ste->pool;
- stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
-
- ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl->type,
- &action_ste->stc);
- if (ret) {
- mlx5hws_err(ctx, "Failed to create action jump to table STC\n");
- goto free_rtc;
- }
+ matcher->num_of_action_stes = max_stes;
return 0;
-
-free_rtc:
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY);
-free_ste_pool:
- mlx5hws_pool_destroy(action_ste->pool);
- return ret;
}
-static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher)
+static void hws_matcher_set_ip_version_match(struct mlx5hws_matcher *matcher)
{
- struct mlx5hws_matcher_action_ste *action_ste;
- struct mlx5hws_table *tbl = matcher->tbl;
-
- action_ste = &matcher->action_ste;
-
- if (!action_ste->max_stes ||
- matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION ||
- mlx5hws_matcher_is_in_resize(matcher))
- return;
+ int i;
- mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc);
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY);
- mlx5hws_pool_destroy(action_ste->pool);
+ for (i = 0; i < matcher->mt->fc_sz; i++) {
+ switch (matcher->mt->fc[i].fname) {
+ case MLX5HWS_DEFINER_FNAME_ETH_TYPE_O:
+ matcher->matches_outer_ethertype = 1;
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O:
+ matcher->matches_outer_ip_version = 1;
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_TYPE_I:
+ matcher->matches_inner_ethertype = 1;
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I:
+ matcher->matches_inner_ip_version = 1;
+ break;
+ default:
+ break;
+ }
+ }
}
static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
@@ -635,10 +697,11 @@ static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
}
}
+ hws_matcher_set_ip_version_match(matcher);
+
/* Create an STE pool per matcher*/
pool_attr.table_type = matcher->tbl->type;
pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
- pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL;
pool_attr.alloc_log_sz = matcher->attr.table.sz_col_log +
matcher->attr.table.sz_row_log;
hws_matcher_set_pool_attr(&pool_attr, matcher);
@@ -740,6 +803,8 @@ hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps,
attr->table.sz_col_log = hws_matcher_rules_to_tbl_depth(attr->rule.num_log);
matcher->flags |= attr->resizable ? MLX5HWS_MATCHER_FLAGS_RESIZABLE : 0;
+ matcher->flags |= attr->isolated_matcher_end_ft_id ?
+ MLX5HWS_MATCHER_FLAGS_ISOLATED : 0;
return hws_matcher_check_attr_sz(caps, matcher);
}
@@ -761,10 +826,10 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
/* Create matcher end flow table anchor */
ret = hws_matcher_create_end_ft(matcher);
if (ret)
- goto unbind_at;
+ goto unbind_mt;
/* Allocate the RTC for the new matcher */
- ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH);
+ ret = hws_matcher_create_rtc(matcher);
if (ret)
goto destroy_end_ft;
@@ -776,11 +841,9 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
return 0;
destroy_rtc:
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH);
+ hws_matcher_destroy_rtc(matcher);
destroy_end_ft:
hws_matcher_destroy_end_ft(matcher);
-unbind_at:
- hws_matcher_unbind_at(matcher);
unbind_mt:
hws_matcher_unbind_mt(matcher);
return ret;
@@ -788,11 +851,9 @@ unbind_mt:
static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher)
{
- hws_matcher_resize_uninit(matcher);
hws_matcher_disconnect(matcher);
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH);
+ hws_matcher_destroy_rtc(matcher);
hws_matcher_destroy_end_ft(matcher);
- hws_matcher_unbind_at(matcher);
hws_matcher_unbind_mt(matcher);
}
@@ -814,8 +875,6 @@ hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
if (!col_matcher)
return -ENOMEM;
- INIT_LIST_HEAD(&col_matcher->resize_data);
-
col_matcher->tbl = matcher->tbl;
col_matcher->mt = matcher->mt;
col_matcher->at = matcher->at;
@@ -832,6 +891,8 @@ hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
col_matcher->attr.table.sz_row_log -= MLX5HWS_MATCHER_ASSURED_ROW_RATIO;
col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach;
+ col_matcher->attr.isolated_matcher_end_ft_id =
+ matcher->attr.isolated_matcher_end_ft_id;
ret = hws_matcher_process_attr(ctx->caps, col_matcher);
if (ret)
@@ -869,8 +930,6 @@ static int hws_matcher_init(struct mlx5hws_matcher *matcher)
struct mlx5hws_context *ctx = matcher->tbl->ctx;
int ret;
- INIT_LIST_HEAD(&matcher->resize_data);
-
mutex_lock(&ctx->ctrl_lock);
/* Allocate matcher resource and connect to the packet pipe */
@@ -905,18 +964,44 @@ static int hws_matcher_uninit(struct mlx5hws_matcher *matcher)
return 0;
}
+static int hws_matcher_grow_at_array(struct mlx5hws_matcher *matcher)
+{
+ void *p;
+
+ if (matcher->size_of_at_array >= MLX5HWS_MATCHER_MAX_AT)
+ return -ENOMEM;
+
+ matcher->size_of_at_array *= 2;
+ p = krealloc(matcher->at,
+ matcher->size_of_at_array * sizeof(*matcher->at),
+ __GFP_ZERO | GFP_KERNEL);
+ if (!p) {
+ matcher->size_of_at_array /= 2;
+ return -ENOMEM;
+ }
+
+ matcher->at = p;
+
+ return 0;
+}
+
int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
struct mlx5hws_action_template *at)
{
bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
- struct mlx5hws_context *ctx = matcher->tbl->ctx;
u32 required_stes;
int ret;
- if (!matcher->attr.max_num_of_at_attach) {
- mlx5hws_dbg(ctx, "Num of current at (%d) exceed allowed value\n",
- matcher->num_of_at);
- return -EOPNOTSUPP;
+ if (unlikely(matcher->num_of_at >= matcher->size_of_at_array)) {
+ ret = hws_matcher_grow_at_array(matcher);
+ if (ret)
+ return ret;
+
+ if (matcher->col_matcher) {
+ ret = hws_matcher_grow_at_array(matcher->col_matcher);
+ if (ret)
+ return ret;
+ }
}
ret = hws_matcher_check_and_process_at(matcher, at);
@@ -924,15 +1009,11 @@ int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
return ret;
required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
- if (matcher->action_ste.max_stes < required_stes) {
- mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n",
- required_stes, matcher->action_ste.max_stes);
- return -ENOMEM;
- }
+ if (matcher->num_of_action_stes < required_stes)
+ matcher->num_of_action_stes = required_stes;
matcher->at[matcher->num_of_at] = *at;
matcher->num_of_at += 1;
- matcher->attr.max_num_of_at_attach -= 1;
if (matcher->col_matcher)
matcher->col_matcher->num_of_at = matcher->num_of_at;
@@ -960,8 +1041,9 @@ hws_matcher_set_templates(struct mlx5hws_matcher *matcher,
if (!matcher->mt)
return -ENOMEM;
- matcher->at = kvcalloc(num_of_at + matcher->attr.max_num_of_at_attach,
- sizeof(*matcher->at),
+ matcher->size_of_at_array =
+ num_of_at + matcher->attr.max_num_of_at_attach;
+ matcher->at = kvcalloc(matcher->size_of_at_array, sizeof(*matcher->at),
GFP_KERNEL);
if (!matcher->at) {
mlx5hws_err(ctx, "Failed to allocate action template array\n");
@@ -1110,7 +1192,7 @@ static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher,
return -EINVAL;
}
- if (src_matcher->action_ste.max_stes > dst_matcher->action_ste.max_stes) {
+ if (src_matcher->num_of_action_stes > dst_matcher->num_of_action_stes) {
mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n");
return -EINVAL;
}
@@ -1139,10 +1221,6 @@ int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher,
src_matcher->resize_dst = dst_matcher;
- ret = hws_matcher_resize_init(src_matcher);
- if (ret)
- src_matcher->resize_dst = NULL;
-
out:
mutex_unlock(&src_matcher->tbl->ctx->ctrl_lock);
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
index 020de70270c5..32e83cddcd60 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
@@ -23,6 +23,9 @@
*/
#define MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT 1
+/* Maximum number of action templates that can be attached to a matcher. */
+#define MLX5HWS_MATCHER_MAX_AT 128
+
enum mlx5hws_matcher_offset {
MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12,
MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13,
@@ -31,6 +34,7 @@ enum mlx5hws_matcher_offset {
enum mlx5hws_matcher_flags {
MLX5HWS_MATCHER_FLAGS_COLLISION = 1 << 2,
MLX5HWS_MATCHER_FLAGS_RESIZABLE = 1 << 3,
+ MLX5HWS_MATCHER_FLAGS_ISOLATED = 1 << 4,
};
struct mlx5hws_match_template {
@@ -42,28 +46,15 @@ struct mlx5hws_match_template {
};
struct mlx5hws_matcher_match_ste {
- struct mlx5hws_pool_chunk ste;
u32 rtc_0_id;
u32 rtc_1_id;
struct mlx5hws_pool *pool;
};
-struct mlx5hws_matcher_action_ste {
- struct mlx5hws_pool_chunk ste;
- struct mlx5hws_pool_chunk stc;
- u32 rtc_0_id;
- u32 rtc_1_id;
- struct mlx5hws_pool *pool;
- u8 max_stes;
-};
-
-struct mlx5hws_matcher_resize_data {
- struct mlx5hws_pool_chunk stc;
- u32 rtc_0_id;
- u32 rtc_1_id;
- struct mlx5hws_pool *pool;
- u8 max_stes;
- struct list_head list_node;
+enum {
+ MLX5HWS_MATCHER_IPV_UNSET = 0,
+ MLX5HWS_MATCHER_IPV_4 = 1,
+ MLX5HWS_MATCHER_IPV_6 = 2,
};
struct mlx5hws_matcher {
@@ -72,16 +63,22 @@ struct mlx5hws_matcher {
struct mlx5hws_match_template *mt;
struct mlx5hws_action_template *at;
u8 num_of_at;
+ u8 size_of_at_array;
u8 num_of_mt;
+ u8 num_of_action_stes;
/* enum mlx5hws_matcher_flags */
u8 flags;
+ u8 matches_outer_ethertype:1;
+ u8 matches_outer_ip_version:1;
+ u8 matches_inner_ethertype:1;
+ u8 matches_inner_ip_version:1;
+ u8 outer_ip_version:2;
+ u8 inner_ip_version:2;
u32 end_ft_id;
struct mlx5hws_matcher *col_matcher;
struct mlx5hws_matcher *resize_dst;
struct mlx5hws_matcher_match_ste match_ste;
- struct mlx5hws_matcher_action_ste action_ste;
struct list_head list_node;
- struct list_head resize_data;
};
static inline bool
@@ -100,9 +97,17 @@ static inline bool mlx5hws_matcher_is_in_resize(struct mlx5hws_matcher *matcher)
return !!matcher->resize_dst;
}
+static inline bool mlx5hws_matcher_is_isolated(struct mlx5hws_matcher *matcher)
+{
+ return !!(matcher->flags & MLX5HWS_MATCHER_FLAGS_ISOLATED);
+}
+
static inline bool mlx5hws_matcher_is_insert_by_idx(struct mlx5hws_matcher *matcher)
{
return matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX;
}
+int mlx5hws_matcher_update_end_ft_isolated(struct mlx5hws_table *tbl,
+ u32 miss_ft_id);
+
#endif /* HWS_MATCHER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
index 5121951f2778..d8ac6c196211 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -119,6 +119,8 @@ struct mlx5hws_matcher_attr {
};
/* Optional AT attach configuration - Max number of additional AT */
u8 max_num_of_at_attach;
+ /* Optional end FT (miss FT ID) for match RTC (for isolated matcher) */
+ u32 isolated_matcher_end_ft_id;
};
struct mlx5hws_rule_attr {
@@ -211,6 +213,7 @@ struct mlx5hws_action_dest_attr {
struct mlx5hws_action *dest;
/* Optional reformat action */
struct mlx5hws_action *reformat;
+ bool is_wire_ft;
};
/**
@@ -502,6 +505,15 @@ enum mlx5hws_action_type
mlx5hws_action_get_type(struct mlx5hws_action *action);
/**
+ * mlx5hws_action_get_dev - Get mlx5 core device.
+ *
+ * @action: The action to get the device from.
+ *
+ * Return: mlx5 core device.
+ */
+struct mlx5_core_dev *mlx5hws_action_get_dev(struct mlx5hws_action *action);
+
+/**
* mlx5hws_action_create_dest_drop - Create a direct rule drop action.
*
* @ctx: The context in which the new action will be created.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
index f51ed24526b9..51e4c551e0ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
@@ -490,8 +490,8 @@ hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern,
switch (action_type) {
case MLX5_ACTION_TYPE_SET:
case MLX5_ACTION_TYPE_ADD:
- *src_field = MLX5_GET(set_action_in, pattern, field);
- *dst_field = INVALID_FIELD;
+ *src_field = INVALID_FIELD;
+ *dst_field = MLX5_GET(set_action_in, pattern, field);
break;
case MLX5_ACTION_TYPE_COPY:
*src_field = MLX5_GET(copy_action_in, pattern, src_field);
@@ -522,57 +522,59 @@ bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], s
return true;
}
-void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions,
- size_t max_actions, size_t *new_size,
- u32 *nope_location, __be64 *new_pat)
+int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
+ size_t max_actions, size_t *new_size,
+ u32 *nop_locations, __be64 *new_pat)
{
- u16 prev_src_field = 0, prev_dst_field = 0;
+ u16 prev_src_field = INVALID_FIELD, prev_dst_field = INVALID_FIELD;
u16 src_field, dst_field;
u8 action_type;
+ bool dependent;
size_t i, j;
*new_size = num_actions;
- *nope_location = 0;
+ *nop_locations = 0;
if (num_actions == 1)
- return;
+ return 0;
for (i = 0, j = 0; i < num_actions; i++, j++) {
- action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
+ if (j >= max_actions)
+ return -EINVAL;
+ action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
hws_action_modify_get_target_fields(action_type, &pattern[i],
&src_field, &dst_field);
- if (i % 2) {
- if (action_type == MLX5_ACTION_TYPE_COPY &&
- (prev_src_field == src_field ||
- prev_dst_field == dst_field)) {
- /* need Nope */
- *new_size += 1;
- *nope_location |= BIT(i);
- memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
- MLX5_SET(set_action_in, &new_pat[j],
- action_type,
- MLX5_MODIFICATION_TYPE_NOP);
- j++;
- } else if (prev_src_field == src_field) {
- /* need Nope*/
- *new_size += 1;
- *nope_location |= BIT(i);
- MLX5_SET(set_action_in, &new_pat[j],
- action_type,
- MLX5_MODIFICATION_TYPE_NOP);
- j++;
- }
- }
- memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
- /* check if no more space */
- if (j > max_actions) {
- *new_size = num_actions;
- *nope_location = 0;
- return;
+
+ /* For every action, look at it and the previous one. The two
+ * actions are dependent if:
+ */
+ dependent =
+ (i > 0) &&
+ /* At least one of the actions is a write and */
+ (dst_field != INVALID_FIELD ||
+ prev_dst_field != INVALID_FIELD) &&
+ /* One reads from the other's source */
+ (dst_field == prev_src_field ||
+ src_field == prev_dst_field ||
+ /* Or both write to the same destination */
+ dst_field == prev_dst_field);
+
+ if (dependent) {
+ *new_size += 1;
+ *nop_locations |= BIT(i);
+ memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
+ MLX5_SET(set_action_in, &new_pat[j], action_type,
+ MLX5_MODIFICATION_TYPE_NOP);
+ j++;
+ if (j >= max_actions)
+ return -EINVAL;
}
+ memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
prev_src_field = src_field;
prev_dst_field = dst_field;
}
+
+ return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
index 8ddb51980044..7fbd8dc7aa18 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
@@ -96,6 +96,7 @@ int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
u8 *arg_data,
size_t data_size);
-void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, size_t max_actions,
- size_t *new_size, u32 *nope_location, __be64 *new_pat);
+int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
+ size_t max_actions, size_t *new_size,
+ u32 *nop_locations, __be64 *new_pat);
#endif /* MLX5HWS_PAT_ARG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
index 50a81d360bb2..7e37d6e9eb83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
@@ -20,15 +20,14 @@ static void hws_pool_free_one_resource(struct mlx5hws_pool_resource *resource)
kfree(resource);
}
-static void hws_pool_resource_free(struct mlx5hws_pool *pool,
- int resource_idx)
+static void hws_pool_resource_free(struct mlx5hws_pool *pool)
{
- hws_pool_free_one_resource(pool->resource[resource_idx]);
- pool->resource[resource_idx] = NULL;
+ hws_pool_free_one_resource(pool->resource);
+ pool->resource = NULL;
if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
- hws_pool_free_one_resource(pool->mirror_resource[resource_idx]);
- pool->mirror_resource[resource_idx] = NULL;
+ hws_pool_free_one_resource(pool->mirror_resource);
+ pool->mirror_resource = NULL;
}
}
@@ -61,10 +60,8 @@ hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range,
ret = -EINVAL;
}
- if (ret) {
- mlx5hws_err(pool->ctx, "Failed to allocate resource objects\n");
+ if (ret)
goto free_resource;
- }
resource->pool = pool;
resource->range = 1 << log_range;
@@ -77,199 +74,114 @@ free_resource:
return NULL;
}
-static int
-hws_pool_resource_alloc(struct mlx5hws_pool *pool, u32 log_range, int idx)
+static int hws_pool_resource_alloc(struct mlx5hws_pool *pool)
{
struct mlx5hws_pool_resource *resource;
u32 fw_ft_type, opt_log_range;
fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, false);
- opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_range;
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ?
+ 0 : pool->alloc_log_sz;
resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
if (!resource) {
- mlx5hws_err(pool->ctx, "Failed allocating resource\n");
+ mlx5hws_err(pool->ctx, "Failed to allocate resource\n");
return -EINVAL;
}
- pool->resource[idx] = resource;
+ pool->resource = resource;
if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
struct mlx5hws_pool_resource *mirror_resource;
fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, true);
- opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_range;
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ?
+ 0 : pool->alloc_log_sz;
mirror_resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
if (!mirror_resource) {
- mlx5hws_err(pool->ctx, "Failed allocating mirrored resource\n");
+ mlx5hws_err(pool->ctx, "Failed to allocate mirrored resource\n");
hws_pool_free_one_resource(resource);
- pool->resource[idx] = NULL;
+ pool->resource = NULL;
return -EINVAL;
}
- pool->mirror_resource[idx] = mirror_resource;
+ pool->mirror_resource = mirror_resource;
}
return 0;
}
-static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range)
-{
- unsigned long *cur_bmp;
-
- cur_bmp = bitmap_zalloc(1 << log_range, GFP_KERNEL);
- if (!cur_bmp)
- return NULL;
-
- bitmap_fill(cur_bmp, 1 << log_range);
-
- return cur_bmp;
-}
-
-static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static int hws_pool_buddy_init(struct mlx5hws_pool *pool)
{
struct mlx5hws_buddy_mem *buddy;
- buddy = pool->db.buddy_manager->buddies[chunk->resource_idx];
+ buddy = mlx5hws_buddy_create(pool->alloc_log_sz);
if (!buddy) {
- mlx5hws_err(pool->ctx, "No such buddy (%d)\n", chunk->resource_idx);
- return;
- }
-
- mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order);
-}
-
-static struct mlx5hws_buddy_mem *
-hws_pool_buddy_get_next_buddy(struct mlx5hws_pool *pool, int idx,
- u32 order, bool *is_new_buddy)
-{
- static struct mlx5hws_buddy_mem *buddy;
- u32 new_buddy_size;
-
- buddy = pool->db.buddy_manager->buddies[idx];
- if (buddy)
- return buddy;
-
- new_buddy_size = max(pool->alloc_log_sz, order);
- *is_new_buddy = true;
- buddy = mlx5hws_buddy_create(new_buddy_size);
- if (!buddy) {
- mlx5hws_err(pool->ctx, "Failed to create buddy order: %d index: %d\n",
- new_buddy_size, idx);
- return NULL;
+ mlx5hws_err(pool->ctx, "Failed to create buddy order: %zu\n",
+ pool->alloc_log_sz);
+ return -ENOMEM;
}
- if (hws_pool_resource_alloc(pool, new_buddy_size, idx) != 0) {
- mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
- pool->type, new_buddy_size, idx);
+ if (hws_pool_resource_alloc(pool) != 0) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d size %zu\n",
+ pool->type, pool->alloc_log_sz);
mlx5hws_buddy_cleanup(buddy);
- return NULL;
+ return -ENOMEM;
}
- pool->db.buddy_manager->buddies[idx] = buddy;
+ pool->db.buddy = buddy;
- return buddy;
+ return 0;
}
-static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool,
- int order,
- u32 *buddy_idx,
- int *seg)
+static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
{
- struct mlx5hws_buddy_mem *buddy;
- bool new_mem = false;
- int ret = 0;
- int i;
-
- *seg = -1;
-
- /* Find the next free place from the buddy array */
- while (*seg < 0) {
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
- buddy = hws_pool_buddy_get_next_buddy(pool, i,
- order,
- &new_mem);
- if (!buddy) {
- ret = -ENOMEM;
- goto out;
- }
-
- *seg = mlx5hws_buddy_alloc_mem(buddy, order);
- if (*seg >= 0)
- goto found;
-
- if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) {
- mlx5hws_err(pool->ctx,
- "Fail to allocate seg for one resource pool\n");
- ret = -ENOMEM;
- goto out;
- }
-
- if (new_mem) {
- /* We have new memory pool, should be place for us */
- mlx5hws_err(pool->ctx,
- "No memory for order: %d with buddy no: %d\n",
- order, i);
- ret = -ENOMEM;
- goto out;
- }
- }
+ struct mlx5hws_buddy_mem *buddy = pool->db.buddy;
+
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "Bad buddy state\n");
+ return -EINVAL;
}
-found:
- *buddy_idx = i;
-out:
- return ret;
+ chunk->offset = mlx5hws_buddy_alloc_mem(buddy, chunk->order);
+ if (chunk->offset >= 0)
+ return 0;
+
+ return -ENOMEM;
}
-static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
{
- int ret = 0;
+ struct mlx5hws_buddy_mem *buddy;
- /* Go over the buddies and find next free slot */
- ret = hws_pool_buddy_get_mem_chunk(pool, chunk->order,
- &chunk->resource_idx,
- &chunk->offset);
- if (ret)
- mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
- chunk->order);
+ buddy = pool->db.buddy;
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "Bad buddy state\n");
+ return;
+ }
- return ret;
+ mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order);
}
static void hws_pool_buddy_db_uninit(struct mlx5hws_pool *pool)
{
struct mlx5hws_buddy_mem *buddy;
- int i;
-
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
- buddy = pool->db.buddy_manager->buddies[i];
- if (buddy) {
- mlx5hws_buddy_cleanup(buddy);
- kfree(buddy);
- pool->db.buddy_manager->buddies[i] = NULL;
- }
- }
- kfree(pool->db.buddy_manager);
+ buddy = pool->db.buddy;
+ if (buddy) {
+ mlx5hws_buddy_cleanup(buddy);
+ kfree(buddy);
+ pool->db.buddy = NULL;
+ }
}
-static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range)
+static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool)
{
- pool->db.buddy_manager = kzalloc(sizeof(*pool->db.buddy_manager), GFP_KERNEL);
- if (!pool->db.buddy_manager)
- return -ENOMEM;
-
- if (pool->flags & MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE) {
- bool new_buddy;
+ int ret;
- if (!hws_pool_buddy_get_next_buddy(pool, 0, log_range, &new_buddy)) {
- mlx5hws_err(pool->ctx,
- "Failed allocating memory on create log_sz: %d\n", log_range);
- kfree(pool->db.buddy_manager);
- return -ENOMEM;
- }
- }
+ ret = hws_pool_buddy_init(pool);
+ if (ret)
+ return ret;
pool->p_db_uninit = &hws_pool_buddy_db_uninit;
pool->p_get_chunk = &hws_pool_buddy_db_get_chunk;
@@ -278,261 +190,105 @@ static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range)
return 0;
}
-static int hws_pool_create_resource_on_index(struct mlx5hws_pool *pool,
- u32 alloc_size, int idx)
-{
- int ret = hws_pool_resource_alloc(pool, alloc_size, idx);
-
- if (ret) {
- mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
- pool->type, alloc_size, idx);
- return ret;
- }
-
- return 0;
-}
-
-static struct mlx5hws_pool_elements *
-hws_pool_element_create_new_elem(struct mlx5hws_pool *pool, u32 order, int idx)
+static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range)
{
- struct mlx5hws_pool_elements *elem;
- u32 alloc_size;
-
- alloc_size = pool->alloc_log_sz;
+ unsigned long *bitmap;
- elem = kzalloc(sizeof(*elem), GFP_KERNEL);
- if (!elem)
+ bitmap = bitmap_zalloc(1 << log_range, GFP_KERNEL);
+ if (!bitmap)
return NULL;
- /* Sharing the same resource, also means that all the elements are with size 1 */
- if ((pool->flags & MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS) &&
- !(pool->flags & MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK)) {
- /* Currently all chunks in size 1 */
- elem->bitmap = hws_pool_create_and_init_bitmap(alloc_size - order);
- if (!elem->bitmap) {
- mlx5hws_err(pool->ctx,
- "Failed to create bitmap type: %d: size %d index: %d\n",
- pool->type, alloc_size, idx);
- goto free_elem;
- }
-
- elem->log_size = alloc_size - order;
- }
-
- if (hws_pool_create_resource_on_index(pool, alloc_size, idx)) {
- mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
- pool->type, alloc_size, idx);
- goto free_db;
- }
-
- pool->db.element_manager->elements[idx] = elem;
-
- return elem;
+ bitmap_fill(bitmap, 1 << log_range);
-free_db:
- bitmap_free(elem->bitmap);
-free_elem:
- kfree(elem);
- return NULL;
+ return bitmap;
}
-static int hws_pool_element_find_seg(struct mlx5hws_pool_elements *elem, int *seg)
+static int hws_pool_bitmap_init(struct mlx5hws_pool *pool)
{
- unsigned int segment, size;
-
- size = 1 << elem->log_size;
+ unsigned long *bitmap;
- segment = find_first_bit(elem->bitmap, size);
- if (segment >= size) {
- elem->is_full = true;
+ bitmap = hws_pool_create_and_init_bitmap(pool->alloc_log_sz);
+ if (!bitmap) {
+ mlx5hws_err(pool->ctx, "Failed to create bitmap order: %zu\n",
+ pool->alloc_log_sz);
return -ENOMEM;
}
- bitmap_clear(elem->bitmap, segment, 1);
- *seg = segment;
- return 0;
-}
-
-static int
-hws_pool_onesize_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
- u32 *idx, int *seg)
-{
- struct mlx5hws_pool_elements *elem;
-
- elem = pool->db.element_manager->elements[0];
- if (!elem)
- elem = hws_pool_element_create_new_elem(pool, order, 0);
- if (!elem)
- goto err_no_elem;
-
- if (hws_pool_element_find_seg(elem, seg) != 0) {
- mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
+ if (hws_pool_resource_alloc(pool) != 0) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %zu\n",
+ pool->type, pool->alloc_log_sz);
+ bitmap_free(bitmap);
return -ENOMEM;
}
- *idx = 0;
- elem->num_of_elements++;
- return 0;
+ pool->db.bitmap = bitmap;
-err_no_elem:
- mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
- return -ENOMEM;
-}
-
-static int
-hws_pool_general_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
- u32 *idx, int *seg)
-{
- int ret, i;
-
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
- if (!pool->resource[i]) {
- ret = hws_pool_create_resource_on_index(pool, order, i);
- if (ret)
- goto err_no_res;
- *idx = i;
- *seg = 0; /* One memory slot in that element */
- return 0;
- }
- }
-
- mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
- return -ENOMEM;
-
-err_no_res:
- mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
- return -ENOMEM;
+ return 0;
}
-static int hws_pool_general_element_db_get_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static int hws_pool_bitmap_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
{
- int ret;
-
- /* Go over all memory elements and find/allocate free slot */
- ret = hws_pool_general_element_get_mem_chunk(pool, chunk->order,
- &chunk->resource_idx,
- &chunk->offset);
- if (ret)
- mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
- chunk->order);
+ unsigned long *bitmap, size;
- return ret;
-}
+ if (chunk->order != 0) {
+ mlx5hws_err(pool->ctx, "Pool only supports order 0 allocs\n");
+ return -EINVAL;
+ }
-static void hws_pool_general_element_db_put_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
-{
- if (unlikely(!pool->resource[chunk->resource_idx]))
- pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+ bitmap = pool->db.bitmap;
+ if (!bitmap) {
+ mlx5hws_err(pool->ctx, "Bad bitmap state\n");
+ return -EINVAL;
+ }
- if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE)
- hws_pool_resource_free(pool, chunk->resource_idx);
-}
+ size = 1 << pool->alloc_log_sz;
-static void hws_pool_general_element_db_uninit(struct mlx5hws_pool *pool)
-{
- (void)pool;
-}
+ chunk->offset = find_first_bit(bitmap, size);
+ if (chunk->offset >= size)
+ return -ENOMEM;
-/* This memory management works as the following:
- * - At start doesn't allocate no mem at all.
- * - When new request for chunk arrived:
- * allocate resource and give it.
- * - When free that chunk:
- * the resource is freed.
- */
-static int hws_pool_general_element_db_init(struct mlx5hws_pool *pool)
-{
- pool->p_db_uninit = &hws_pool_general_element_db_uninit;
- pool->p_get_chunk = &hws_pool_general_element_db_get_chunk;
- pool->p_put_chunk = &hws_pool_general_element_db_put_chunk;
+ bitmap_clear(bitmap, chunk->offset, 1);
return 0;
}
-static void hws_onesize_element_db_destroy_element(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_elements *elem,
- struct mlx5hws_pool_chunk *chunk)
-{
- if (unlikely(!pool->resource[chunk->resource_idx]))
- pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
-
- hws_pool_resource_free(pool, chunk->resource_idx);
- kfree(elem);
- pool->db.element_manager->elements[chunk->resource_idx] = NULL;
-}
-
-static void hws_onesize_element_db_put_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static void hws_pool_bitmap_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
{
- struct mlx5hws_pool_elements *elem;
+ unsigned long *bitmap;
- if (unlikely(chunk->resource_idx))
- pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
-
- elem = pool->db.element_manager->elements[chunk->resource_idx];
- if (!elem) {
- mlx5hws_err(pool->ctx, "No such element (%d)\n", chunk->resource_idx);
+ bitmap = pool->db.bitmap;
+ if (!bitmap) {
+ mlx5hws_err(pool->ctx, "Bad bitmap state\n");
return;
}
- bitmap_set(elem->bitmap, chunk->offset, 1);
- elem->is_full = false;
- elem->num_of_elements--;
-
- if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE &&
- !elem->num_of_elements)
- hws_onesize_element_db_destroy_element(pool, elem, chunk);
+ bitmap_set(bitmap, chunk->offset, 1);
}
-static int hws_onesize_element_db_get_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static void hws_pool_bitmap_db_uninit(struct mlx5hws_pool *pool)
{
- int ret = 0;
+ unsigned long *bitmap;
- /* Go over all memory elements and find/allocate free slot */
- ret = hws_pool_onesize_element_get_mem_chunk(pool, chunk->order,
- &chunk->resource_idx,
- &chunk->offset);
- if (ret)
- mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
- chunk->order);
-
- return ret;
-}
-
-static void hws_onesize_element_db_uninit(struct mlx5hws_pool *pool)
-{
- struct mlx5hws_pool_elements *elem;
- int i;
-
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
- elem = pool->db.element_manager->elements[i];
- if (elem) {
- bitmap_free(elem->bitmap);
- kfree(elem);
- pool->db.element_manager->elements[i] = NULL;
- }
+ bitmap = pool->db.bitmap;
+ if (bitmap) {
+ bitmap_free(bitmap);
+ pool->db.bitmap = NULL;
}
- kfree(pool->db.element_manager);
}
-/* This memory management works as the following:
- * - At start doesn't allocate no mem at all.
- * - When new request for chunk arrived:
- * aloocate the first and only slot of memory/resource
- * when it ended return error.
- */
-static int hws_pool_onesize_element_db_init(struct mlx5hws_pool *pool)
+static int hws_pool_bitmap_db_init(struct mlx5hws_pool *pool)
{
- pool->db.element_manager = kzalloc(sizeof(*pool->db.element_manager), GFP_KERNEL);
- if (!pool->db.element_manager)
- return -ENOMEM;
+ int ret;
- pool->p_db_uninit = &hws_onesize_element_db_uninit;
- pool->p_get_chunk = &hws_onesize_element_db_get_chunk;
- pool->p_put_chunk = &hws_onesize_element_db_put_chunk;
+ ret = hws_pool_bitmap_init(pool);
+ if (ret)
+ return ret;
+
+ pool->p_db_uninit = &hws_pool_bitmap_db_uninit;
+ pool->p_get_chunk = &hws_pool_bitmap_db_get_chunk;
+ pool->p_put_chunk = &hws_pool_bitmap_db_put_chunk;
return 0;
}
@@ -542,15 +298,14 @@ static int hws_pool_db_init(struct mlx5hws_pool *pool,
{
int ret;
- if (db_type == MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE)
- ret = hws_pool_general_element_db_init(pool);
- else if (db_type == MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE)
- ret = hws_pool_onesize_element_db_init(pool);
+ if (db_type == MLX5HWS_POOL_DB_TYPE_BITMAP)
+ ret = hws_pool_bitmap_db_init(pool);
else
- ret = hws_pool_buddy_db_init(pool, pool->alloc_log_sz);
+ ret = hws_pool_buddy_db_init(pool);
if (ret) {
- mlx5hws_err(pool->ctx, "Failed to init general db : %d (ret: %d)\n", db_type, ret);
+ mlx5hws_err(pool->ctx, "Failed to init pool type: %d (ret: %d)\n",
+ db_type, ret);
return ret;
}
@@ -569,6 +324,8 @@ int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
mutex_lock(&pool->lock);
ret = pool->p_get_chunk(pool, chunk);
+ if (ret == 0)
+ pool->available_elems -= 1 << chunk->order;
mutex_unlock(&pool->lock);
return ret;
@@ -579,6 +336,7 @@ void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
{
mutex_lock(&pool->lock);
pool->p_put_chunk(pool, chunk);
+ pool->available_elems += 1 << chunk->order;
mutex_unlock(&pool->lock);
}
@@ -599,17 +357,13 @@ mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_
pool->tbl_type = pool_attr->table_type;
pool->opt_type = pool_attr->opt_type;
- /* Support general db */
- if (pool->flags == (MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
- MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK))
- res_db_type = MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE;
- else if (pool->flags == (MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
- MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS))
- res_db_type = MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE;
- else
+ if (pool->flags & MLX5HWS_POOL_FLAG_BUDDY)
res_db_type = MLX5HWS_POOL_DB_TYPE_BUDDY;
+ else
+ res_db_type = MLX5HWS_POOL_DB_TYPE_BITMAP;
pool->alloc_log_sz = pool_attr->alloc_log_sz;
+ pool->available_elems = 1 << pool_attr->alloc_log_sz;
if (hws_pool_db_init(pool, res_db_type))
goto free_pool;
@@ -623,18 +377,17 @@ free_pool:
return NULL;
}
-int mlx5hws_pool_destroy(struct mlx5hws_pool *pool)
+void mlx5hws_pool_destroy(struct mlx5hws_pool *pool)
{
- int i;
-
mutex_destroy(&pool->lock);
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++)
- if (pool->resource[i])
- hws_pool_resource_free(pool, i);
+ if (pool->available_elems != 1 << pool->alloc_log_sz)
+ mlx5hws_err(pool->ctx, "Attempting to destroy non-empty pool\n");
+
+ if (pool->resource)
+ hws_pool_resource_free(pool);
hws_pool_db_unint(pool);
kfree(pool);
- return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
index 621298b352b2..33e33d5f1fb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
@@ -6,16 +6,12 @@
#define MLX5HWS_POOL_STC_LOG_SZ 15
-#define MLX5HWS_POOL_RESOURCE_ARR_SZ 100
-
enum mlx5hws_pool_type {
MLX5HWS_POOL_TYPE_STE,
MLX5HWS_POOL_TYPE_STC,
};
struct mlx5hws_pool_chunk {
- u32 resource_idx;
- /* Internal offset, relative to base index */
int offset;
int order;
};
@@ -27,35 +23,17 @@ struct mlx5hws_pool_resource {
};
enum mlx5hws_pool_flags {
- /* Only a one resource in that pool */
- MLX5HWS_POOL_FLAGS_ONE_RESOURCE = 1 << 0,
- MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE = 1 << 1,
- /* No sharing resources between chunks */
- MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK = 1 << 2,
- /* All objects are in the same size */
- MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS = 1 << 3,
- /* Managed by buddy allocator */
- MLX5HWS_POOL_FLAGS_BUDDY_MANAGED = 1 << 4,
- /* Allocate pool_type memory on pool creation */
- MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE = 1 << 5,
-
- /* These values should be used by the caller */
- MLX5HWS_POOL_FLAGS_FOR_STC_POOL =
- MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
- MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS,
- MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL =
- MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
- MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK,
- MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL =
- MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
- MLX5HWS_POOL_FLAGS_BUDDY_MANAGED |
- MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE,
+ /* Managed by a buddy allocator. If this is not set only allocations of
+ * order 0 are supported.
+ */
+ MLX5HWS_POOL_FLAG_BUDDY = BIT(0),
};
enum mlx5hws_pool_optimize {
MLX5HWS_POOL_OPTIMIZE_NONE = 0x0,
MLX5HWS_POOL_OPTIMIZE_ORIG = 0x1,
MLX5HWS_POOL_OPTIMIZE_MIRROR = 0x2,
+ MLX5HWS_POOL_OPTIMIZE_MAX = 0x3,
};
struct mlx5hws_pool_attr {
@@ -68,34 +46,17 @@ struct mlx5hws_pool_attr {
};
enum mlx5hws_db_type {
- /* Uses for allocating chunk of big memory, each element has its own resource in the FW*/
- MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE,
- /* One resource only, all the elements are with same one size */
- MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE,
- /* Many resources, the memory allocated with buddy mechanism */
+ /* Uses a bitmap, supports only allocations of order 0. */
+ MLX5HWS_POOL_DB_TYPE_BITMAP,
+ /* Entries are managed using a buddy mechanism. */
MLX5HWS_POOL_DB_TYPE_BUDDY,
};
-struct mlx5hws_buddy_manager {
- struct mlx5hws_buddy_mem *buddies[MLX5HWS_POOL_RESOURCE_ARR_SZ];
-};
-
-struct mlx5hws_pool_elements {
- u32 num_of_elements;
- unsigned long *bitmap;
- u32 log_size;
- bool is_full;
-};
-
-struct mlx5hws_element_manager {
- struct mlx5hws_pool_elements *elements[MLX5HWS_POOL_RESOURCE_ARR_SZ];
-};
-
struct mlx5hws_pool_db {
enum mlx5hws_db_type type;
union {
- struct mlx5hws_element_manager *element_manager;
- struct mlx5hws_buddy_manager *buddy_manager;
+ unsigned long *bitmap;
+ struct mlx5hws_buddy_mem *buddy;
};
};
@@ -111,11 +72,11 @@ struct mlx5hws_pool {
enum mlx5hws_pool_flags flags;
struct mutex lock; /* protect the pool */
size_t alloc_log_sz;
+ size_t available_elems;
enum mlx5hws_table_type tbl_type;
enum mlx5hws_pool_optimize opt_type;
- struct mlx5hws_pool_resource *resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
- struct mlx5hws_pool_resource *mirror_resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
- /* DB */
+ struct mlx5hws_pool_resource *resource;
+ struct mlx5hws_pool_resource *mirror_resource;
struct mlx5hws_pool_db db;
/* Functions */
mlx5hws_pool_unint_db p_db_uninit;
@@ -127,7 +88,7 @@ struct mlx5hws_pool *
mlx5hws_pool_create(struct mlx5hws_context *ctx,
struct mlx5hws_pool_attr *pool_attr);
-int mlx5hws_pool_destroy(struct mlx5hws_pool *pool);
+void mlx5hws_pool_destroy(struct mlx5hws_pool *pool);
int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
struct mlx5hws_pool_chunk *chunk);
@@ -135,17 +96,37 @@ int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
struct mlx5hws_pool_chunk *chunk);
-static inline u32
-mlx5hws_pool_chunk_get_base_id(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static inline u32 mlx5hws_pool_get_base_id(struct mlx5hws_pool *pool)
{
- return pool->resource[chunk->resource_idx]->base_id;
+ return pool->resource->base_id;
}
-static inline u32
-mlx5hws_pool_chunk_get_base_mirror_id(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static inline u32 mlx5hws_pool_get_base_mirror_id(struct mlx5hws_pool *pool)
{
- return pool->mirror_resource[chunk->resource_idx]->base_id;
+ return pool->mirror_resource->base_id;
+}
+
+static inline bool
+mlx5hws_pool_empty(struct mlx5hws_pool *pool)
+{
+ bool ret;
+
+ mutex_lock(&pool->lock);
+ ret = pool->available_elems == 0;
+ mutex_unlock(&pool->lock);
+
+ return ret;
+}
+
+static inline bool
+mlx5hws_pool_full(struct mlx5hws_pool *pool)
+{
+ bool ret;
+
+ mutex_lock(&pool->lock);
+ ret = pool->available_elems == (1 << pool->alloc_log_sz);
+ mutex_unlock(&pool->lock);
+
+ return ret;
}
#endif /* MLX5HWS_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
index a27a2d5ffc7b..5342a4cc7194 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
@@ -195,44 +195,30 @@ hws_rule_load_delete_info(struct mlx5hws_rule *rule,
}
}
-static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule)
+static int mlx5hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
+ u16 queue_id, bool skip_rx,
+ bool skip_tx)
{
struct mlx5hws_matcher *matcher = rule->matcher;
- struct mlx5hws_matcher_action_ste *action_ste;
- struct mlx5hws_pool_chunk ste = {0};
- int ret;
-
- action_ste = &matcher->action_ste;
- ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes));
- ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste);
- if (unlikely(ret)) {
- mlx5hws_err(matcher->tbl->ctx,
- "Failed to allocate STE for rule actions");
- return ret;
- }
-
- rule->action_ste.pool = matcher->action_ste.pool;
- rule->action_ste.num_stes = matcher->action_ste.max_stes;
- rule->action_ste.index = ste.offset;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
- return 0;
+ rule->action_ste.ste.order =
+ ilog2(roundup_pow_of_two(matcher->num_of_action_stes));
+ return mlx5hws_action_ste_chunk_alloc(&ctx->action_ste_pool[queue_id],
+ skip_rx, skip_tx,
+ &rule->action_ste);
}
-void mlx5hws_rule_free_action_ste(struct mlx5hws_rule_action_ste_info *action_ste)
+void mlx5hws_rule_free_action_ste(struct mlx5hws_action_ste_chunk *action_ste)
{
- struct mlx5hws_pool_chunk ste = {0};
-
- if (!action_ste->num_stes)
+ if (!action_ste->action_tbl)
return;
- ste.order = ilog2(roundup_pow_of_two(action_ste->num_stes));
- ste.offset = action_ste->index;
-
/* This release is safe only when the rule match STE was deleted
* (when the rule is being deleted) or replaced with the new STE that
* isn't pointing to old action STEs (when the rule is being updated).
*/
- mlx5hws_pool_chunk_free(action_ste->pool, &ste);
+ mlx5hws_action_ste_chunk_free(action_ste);
}
static void hws_rule_create_init(struct mlx5hws_rule *rule,
@@ -250,22 +236,15 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule,
rule->rtc_0 = 0;
rule->rtc_1 = 0;
- rule->action_ste.pool = NULL;
- rule->action_ste.num_stes = 0;
- rule->action_ste.index = -1;
-
rule->status = MLX5HWS_RULE_STATUS_CREATING;
} else {
rule->status = MLX5HWS_RULE_STATUS_UPDATING;
+ /* Save the old action STE info so we can free it after writing
+ * new action STEs and a corresponding match STE.
+ */
+ rule->old_action_ste = rule->action_ste;
}
- /* Initialize the old action STE info - shallow-copy action_ste.
- * In create flow this will set old_action_ste fields to initial values.
- * In update flow this will save the existing action STE info,
- * so that we will later use it to free old STEs.
- */
- rule->old_action_ste = rule->action_ste;
-
rule->pending_wqes = 0;
/* Init default send STE attributes */
@@ -277,7 +256,6 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule,
/* Init default action apply */
apply->tbl_type = tbl->type;
apply->common_res = &ctx->common_res;
- apply->jump_to_action_stc = matcher->action_ste.stc.offset;
apply->require_dep = 0;
}
@@ -353,17 +331,24 @@ static int hws_rule_create_hws(struct mlx5hws_rule *rule,
if (action_stes) {
/* Allocate action STEs for rules that need more than match STE */
- ret = hws_rule_alloc_action_ste(rule);
+ ret = mlx5hws_rule_alloc_action_ste(rule, attr->queue_id,
+ !!ste_attr.rtc_0,
+ !!ste_attr.rtc_1);
if (ret) {
mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
mlx5hws_send_abort_new_dep_wqe(queue);
return ret;
}
+ apply.jump_to_action_stc =
+ rule->action_ste.action_tbl->stc.offset;
/* Skip RX/TX based on the dep_wqe init */
- ste_attr.rtc_0 = dep_wqe->rtc_0 ? matcher->action_ste.rtc_0_id : 0;
- ste_attr.rtc_1 = dep_wqe->rtc_1 ? matcher->action_ste.rtc_1_id : 0;
+ ste_attr.rtc_0 = dep_wqe->rtc_0 ?
+ rule->action_ste.action_tbl->rtc_0_id : 0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1 ?
+ rule->action_ste.action_tbl->rtc_1_id : 0;
/* Action STEs are written to a specific index last to first */
- ste_attr.direct_index = rule->action_ste.index + action_stes;
+ ste_attr.direct_index =
+ rule->action_ste.ste.offset + action_stes;
apply.next_direct_idx = ste_attr.direct_index;
} else {
apply.next_direct_idx = 0;
@@ -670,6 +655,124 @@ int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
return 0;
}
+static u8 hws_rule_ethertype_to_matcher_ipv(u32 ethertype)
+{
+ switch (ethertype) {
+ case ETH_P_IP:
+ return MLX5HWS_MATCHER_IPV_4;
+ case ETH_P_IPV6:
+ return MLX5HWS_MATCHER_IPV_6;
+ default:
+ return MLX5HWS_MATCHER_IPV_UNSET;
+ }
+}
+
+static u8 hws_rule_ip_version_to_matcher_ipv(u32 ip_version)
+{
+ switch (ip_version) {
+ case 4:
+ return MLX5HWS_MATCHER_IPV_4;
+ case 6:
+ return MLX5HWS_MATCHER_IPV_6;
+ default:
+ return MLX5HWS_MATCHER_IPV_UNSET;
+ }
+}
+
+static int hws_rule_check_outer_ip_version(struct mlx5hws_matcher *matcher,
+ u32 *match_param)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u8 outer_ipv_ether = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 outer_ipv_ip = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 outer_ipv, ver;
+
+ if (matcher->matches_outer_ethertype) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ outer_headers.ethertype);
+ outer_ipv_ether = hws_rule_ethertype_to_matcher_ipv(ver);
+ }
+ if (matcher->matches_outer_ip_version) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ outer_headers.ip_version);
+ outer_ipv_ip = hws_rule_ip_version_to_matcher_ipv(ver);
+ }
+
+ if (outer_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET &&
+ outer_ipv_ip != MLX5HWS_MATCHER_IPV_UNSET &&
+ outer_ipv_ether != outer_ipv_ip) {
+ mlx5hws_err(ctx, "Rule matches on inconsistent outer ethertype and ip version\n");
+ return -EINVAL;
+ }
+
+ outer_ipv = outer_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET ?
+ outer_ipv_ether : outer_ipv_ip;
+ if (outer_ipv != MLX5HWS_MATCHER_IPV_UNSET &&
+ matcher->outer_ip_version != MLX5HWS_MATCHER_IPV_UNSET &&
+ outer_ipv != matcher->outer_ip_version) {
+ mlx5hws_err(ctx, "Matcher and rule disagree on outer IP version\n");
+ return -EINVAL;
+ }
+ matcher->outer_ip_version = outer_ipv;
+
+ return 0;
+}
+
+static int hws_rule_check_inner_ip_version(struct mlx5hws_matcher *matcher,
+ u32 *match_param)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u8 inner_ipv_ether = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 inner_ipv_ip = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 inner_ipv, ver;
+
+ if (matcher->matches_inner_ethertype) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ inner_headers.ethertype);
+ inner_ipv_ether = hws_rule_ethertype_to_matcher_ipv(ver);
+ }
+ if (matcher->matches_inner_ip_version) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ inner_headers.ip_version);
+ inner_ipv_ip = hws_rule_ip_version_to_matcher_ipv(ver);
+ }
+
+ if (inner_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET &&
+ inner_ipv_ip != MLX5HWS_MATCHER_IPV_UNSET &&
+ inner_ipv_ether != inner_ipv_ip) {
+ mlx5hws_err(ctx, "Rule matches on inconsistent inner ethertype and ip version\n");
+ return -EINVAL;
+ }
+
+ inner_ipv = inner_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET ?
+ inner_ipv_ether : inner_ipv_ip;
+ if (inner_ipv != MLX5HWS_MATCHER_IPV_UNSET &&
+ matcher->inner_ip_version != MLX5HWS_MATCHER_IPV_UNSET &&
+ inner_ipv != matcher->inner_ip_version) {
+ mlx5hws_err(ctx, "Matcher and rule disagree on inner IP version\n");
+ return -EINVAL;
+ }
+ matcher->inner_ip_version = inner_ipv;
+
+ return 0;
+}
+
+static int hws_rule_check_ip_version(struct mlx5hws_matcher *matcher,
+ u32 *match_param)
+{
+ int ret;
+
+ ret = hws_rule_check_outer_ip_version(matcher, match_param);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_rule_check_inner_ip_version(matcher, match_param);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
u8 mt_idx,
u32 *match_param,
@@ -680,6 +783,10 @@ int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
{
int ret;
+ ret = hws_rule_check_ip_version(matcher, match_param);
+ if (unlikely(ret))
+ return ret;
+
rule_handle->matcher = matcher;
ret = hws_rule_enqueue_precheck_create(rule_handle, attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
index b5ee94ac449b..1c47a9c11572 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
@@ -43,12 +43,6 @@ struct mlx5hws_rule_match_tag {
};
};
-struct mlx5hws_rule_action_ste_info {
- struct mlx5hws_pool *pool;
- int index; /* STE array index */
- u8 num_stes;
-};
-
struct mlx5hws_rule_resize_info {
u32 rtc_0;
u32 rtc_1;
@@ -64,8 +58,8 @@ struct mlx5hws_rule {
struct mlx5hws_rule_match_tag tag;
struct mlx5hws_rule_resize_info *resize_info;
};
- struct mlx5hws_rule_action_ste_info action_ste;
- struct mlx5hws_rule_action_ste_info old_action_ste;
+ struct mlx5hws_action_ste_chunk action_ste;
+ struct mlx5hws_action_ste_chunk old_action_ste;
u32 rtc_0; /* The RTC into which the STE was inserted */
u32 rtc_1; /* The RTC into which the STE was inserted */
u8 status; /* enum mlx5hws_rule_status */
@@ -75,7 +69,7 @@ struct mlx5hws_rule {
*/
};
-void mlx5hws_rule_free_action_ste(struct mlx5hws_rule_action_ste_info *action_ste);
+void mlx5hws_rule_free_action_ste(struct mlx5hws_action_ste_chunk *action_ste);
int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
void *queue, void *user_data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
index cb6abc4ab7df..c4b22be19a9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
@@ -344,18 +344,133 @@ hws_send_engine_update_rule_resize(struct mlx5hws_send_engine *queue,
}
}
+static void hws_send_engine_dump_error_cqe(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ struct mlx5_cqe64 *cqe)
+{
+ u8 wqe_opcode = cqe ? be32_to_cpu(cqe->sop_drop_qpn) >> 24 : 0;
+ struct mlx5hws_context *ctx = priv->rule->matcher->tbl->ctx;
+ u32 opcode = cqe ? get_cqe_opcode(cqe) : 0;
+ struct mlx5hws_rule *rule = priv->rule;
+
+ /* If something bad happens and lots of rules are failing, we don't
+ * want to pollute dmesg. Print only the first bad cqe per engine,
+ * the one that started the avalanche.
+ */
+ if (queue->error_cqe_printed)
+ return;
+
+ queue->error_cqe_printed = true;
+
+ if (mlx5hws_rule_move_in_progress(rule))
+ mlx5hws_err(ctx,
+ "--- rule 0x%08llx: error completion moving rule: phase %s, wqes left %d\n",
+ HWS_PTR_TO_ID(rule),
+ rule->resize_info->state ==
+ MLX5HWS_RULE_RESIZE_STATE_WRITING ? "WRITING" :
+ rule->resize_info->state ==
+ MLX5HWS_RULE_RESIZE_STATE_DELETING ? "DELETING" :
+ "UNKNOWN",
+ rule->pending_wqes);
+ else
+ mlx5hws_err(ctx,
+ "--- rule 0x%08llx: error completion %s (%d), wqes left %d\n",
+ HWS_PTR_TO_ID(rule),
+ rule->status ==
+ MLX5HWS_RULE_STATUS_CREATING ? "CREATING" :
+ rule->status ==
+ MLX5HWS_RULE_STATUS_DELETING ? "DELETING" :
+ rule->status ==
+ MLX5HWS_RULE_STATUS_FAILING ? "FAILING" :
+ rule->status ==
+ MLX5HWS_RULE_STATUS_UPDATING ? "UPDATING" : "NA",
+ rule->status,
+ rule->pending_wqes);
+
+ mlx5hws_err(ctx, " rule 0x%08llx: matcher 0x%llx %s\n",
+ HWS_PTR_TO_ID(rule),
+ HWS_PTR_TO_ID(rule->matcher),
+ (rule->matcher->flags & MLX5HWS_MATCHER_FLAGS_ISOLATED) ?
+ "(isolated)" : "");
+
+ if (!cqe) {
+ mlx5hws_err(ctx, " rule 0x%08llx: no CQE\n",
+ HWS_PTR_TO_ID(rule));
+ return;
+ }
+
+ mlx5hws_err(ctx, " rule 0x%08llx: cqe->opcode = %d %s\n",
+ HWS_PTR_TO_ID(rule), opcode,
+ opcode == MLX5_CQE_REQ ? "(MLX5_CQE_REQ)" :
+ opcode == MLX5_CQE_REQ_ERR ? "(MLX5_CQE_REQ_ERR)" : " ");
+
+ if (opcode == MLX5_CQE_REQ_ERR) {
+ struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
+
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- hw_error_syndrome = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->rsvd1[16]);
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- hw_syndrome_type = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->rsvd1[17] >> 4);
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- vendor_err_synd = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->vendor_err_synd);
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- syndrome = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->syndrome);
+ }
+
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: cqe->byte_cnt = 0x%08x\n",
+ HWS_PTR_TO_ID(rule), be32_to_cpu(cqe->byte_cnt));
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |-- UPDATE STATUS = %s\n",
+ HWS_PTR_TO_ID(rule),
+ (be32_to_cpu(cqe->byte_cnt) & 0x80000000) ?
+ "FAILURE" : "SUCCESS");
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |------- SYNDROME = %s\n",
+ HWS_PTR_TO_ID(rule),
+ ((be32_to_cpu(cqe->byte_cnt) & 0x00000003) == 1) ?
+ "SET_FLOW_FAIL" :
+ ((be32_to_cpu(cqe->byte_cnt) & 0x00000003) == 2) ?
+ "DISABLE_FLOW_FAIL" : "UNKNOWN");
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: cqe->sop_drop_qpn = 0x%08x\n",
+ HWS_PTR_TO_ID(rule), be32_to_cpu(cqe->sop_drop_qpn));
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |-send wqe opcode = 0x%02x %s\n",
+ HWS_PTR_TO_ID(rule), wqe_opcode,
+ wqe_opcode == MLX5HWS_WQE_OPCODE_TBL_ACCESS ?
+ "(MLX5HWS_WQE_OPCODE_TBL_ACCESS)" : "(UNKNOWN)");
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |------------ qpn = 0x%06x\n",
+ HWS_PTR_TO_ID(rule),
+ be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff);
+}
+
static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
struct mlx5hws_send_ring_priv *priv,
u16 wqe_cnt,
- enum mlx5hws_flow_op_status *status)
+ enum mlx5hws_flow_op_status *status,
+ struct mlx5_cqe64 *cqe)
{
priv->rule->pending_wqes--;
- if (*status == MLX5HWS_FLOW_OP_ERROR) {
+ if (unlikely(*status == MLX5HWS_FLOW_OP_ERROR)) {
if (priv->retry_id) {
+ /* If there is a retry_id, then it's not an error yet,
+ * retry to insert this rule in the collision RTC.
+ */
hws_send_engine_retry_post_send(queue, priv, wqe_cnt);
return;
}
+ hws_send_engine_dump_error_cqe(queue, priv, cqe);
/* Some part of the rule failed */
priv->rule->status = MLX5HWS_RULE_STATUS_FAILING;
*priv->used_id = 0;
@@ -420,7 +535,8 @@ static void hws_send_engine_update(struct mlx5hws_send_engine *queue,
if (priv->user_data) {
if (priv->rule) {
- hws_send_engine_update_rule(queue, priv, wqe_cnt, &status);
+ hws_send_engine_update_rule(queue, priv, wqe_cnt,
+ &status, cqe);
/* Completion is provided on the last rule WQE */
if (priv->rule->pending_wqes)
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
index f833092235c1..3fb8e99309b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
@@ -140,6 +140,7 @@ struct mlx5hws_send_engine {
u16 used_entries;
u16 num_entries;
bool err;
+ bool error_cqe_printed;
struct mutex lock; /* Protects the send engine */
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
index ab1297531232..568f691733f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
@@ -342,10 +342,10 @@ int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
}
-static int hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
- u32 ft_id,
- u32 fw_ft_type,
- u32 next_ft_id)
+int mlx5hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 next_ft_id)
{
struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
@@ -389,10 +389,10 @@ int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
if (dst_tbl) {
if (list_empty(&dst_tbl->matchers_list)) {
/* Connect src_tbl last_ft to dst_tbl start anchor */
- ret = hws_table_ft_set_next_ft(src_tbl->ctx,
- last_ft_id,
- src_tbl->fw_ft_type,
- dst_tbl->ft_id);
+ ret = mlx5hws_table_ft_set_next_ft(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ dst_tbl->ft_id);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
index dd50420eec9e..0400cce0c317 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
@@ -65,4 +65,9 @@ int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
u32 rtc_0_id,
u32 rtc_1_id);
+int mlx5hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 next_ft_id);
+
#endif /* MLX5HWS_TABLE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
index 8007d3f523c9..f367997ab61e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
@@ -833,15 +833,21 @@ static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
return steering_caps;
}
-int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
+int
+mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
{
+ struct mlx5dr_action *dr_action;
+
switch (pkt_reformat->reformat_type) {
case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
case MLX5_REFORMAT_TYPE_INSERT_HDR:
- return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->fs_dr_action.dr_action);
+ dr_action = pkt_reformat->fs_dr_action.dr_action;
+ *reformat_id = mlx5dr_action_get_pkt_reformat_id(dr_action);
+ return 0;
}
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h
index 99a3b2eff6b8..f869f2daefbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h
@@ -38,7 +38,9 @@ struct mlx5_fs_dr_table {
bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev);
-int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat);
+int
+mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void);
@@ -49,9 +51,11 @@ static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
return NULL;
}
-static inline u32 mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
+static inline int
+mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
{
- return 0;
+ return -EOPNOTSUPP;
}
static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index d10d4c396040..da5c24fc7b30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -465,19 +465,22 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
+ if (err)
+ goto out;
*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.node_guid);
-
+out:
kvfree(out);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
@@ -519,19 +522,22 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
+ if (err)
+ goto out;
*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.qkey_violation_counter);
-
+out:
kvfree(out);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index fb2e5b844c15..d76d7a945899 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -447,8 +447,10 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy", 0);
- if (phy_irq < 0) {
- dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead");
+ if (phy_irq == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto out;
+ } else if (phy_irq < 0) {
phy_irq = PHY_POLL;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index e746cd9c68ed..eac9a14a6058 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -205,11 +205,11 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_params mlxsw_thermal_params = {
+static const struct thermal_zone_params mlxsw_thermal_params = {
.no_hwmon = true,
};
-static struct thermal_zone_device_ops mlxsw_thermal_ops = {
+static const struct thermal_zone_device_ops mlxsw_thermal_ops = {
.should_bind = mlxsw_thermal_should_bind,
.get_temp = mlxsw_thermal_get_temp,
};
@@ -252,7 +252,7 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
+static const struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.should_bind = mlxsw_thermal_module_should_bind,
.get_temp = mlxsw_thermal_module_temp_get,
};
@@ -280,7 +280,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
+static const struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
.should_bind = mlxsw_thermal_module_should_bind,
.get_temp = mlxsw_thermal_gearbox_temp_get,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 3080ea032e7f..618957d65663 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1159,63 +1159,31 @@ static int mlxsw_sp_set_features(struct net_device *dev,
return 0;
}
-static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct ifreq *ifr)
+static int mlxsw_sp_port_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
- int err;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
- &config);
- if (err)
- return err;
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- return 0;
+ return mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
+ config, extack);
}
-static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct ifreq *ifr)
+static int mlxsw_sp_port_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
- int err;
-
- err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
- &config);
- if (err)
- return err;
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- return 0;
+ return mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
+ config);
}
static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct hwtstamp_config config = {0};
-
- mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
-}
-
-static int
-mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct kernel_hwtstamp_config config = {};
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
- case SIOCGHWTSTAMP:
- return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config,
+ NULL);
}
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
@@ -1232,7 +1200,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_set_features = mlxsw_sp_set_features,
- .ndo_eth_ioctl = mlxsw_sp_port_ioctl,
+ .ndo_hwtstamp_get = mlxsw_sp_port_hwtstamp_get,
+ .ndo_hwtstamp_set = mlxsw_sp_port_hwtstamp_set,
};
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 37cd1d002b3b..b03ff9e044f9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -233,9 +233,10 @@ struct mlxsw_sp_ptp_ops {
u16 local_port);
int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void (*shaper_work)(struct work_struct *work);
int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp,
struct kernel_ethtool_ts_info *info);
@@ -351,7 +352,7 @@ struct mlxsw_sp_port {
struct mlxsw_sp_flow_block *eg_flow_block;
struct {
struct delayed_work shaper_dw;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
u16 ing_types;
u16 egr_types;
struct mlxsw_sp_ptp_port_stats stats;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 3f64cdbabfa3..0a8fb9c842d3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -262,7 +262,7 @@ err_port_pause_configure:
}
struct mlxsw_sp_port_hw_stats {
- char str[ETH_GSTRING_LEN];
+ char str[ETH_GSTRING_LEN] __nonstring;
u64 (*getter)(const char *payload);
bool cells_bytes;
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index ca8b9d18fbb9..e8182dd76c7d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -46,7 +46,7 @@ struct mlxsw_sp2_ptp_state {
refcount_t ptp_port_enabled_ref; /* Number of ports with time stamping
* enabled.
*/
- struct hwtstamp_config config;
+ struct kernel_hwtstamp_config config;
struct mutex lock; /* Protects 'config' and HW configuration. */
};
@@ -1083,14 +1083,14 @@ void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
}
int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
*config = mlxsw_sp_port->ptp.hwtstamp_config;
return 0;
}
static int
-mlxsw_sp1_ptp_get_message_types(const struct hwtstamp_config *config,
+mlxsw_sp1_ptp_get_message_types(const struct kernel_hwtstamp_config *config,
u16 *p_ing_types, u16 *p_egr_types,
enum hwtstamp_rx_filters *p_rx_filter)
{
@@ -1246,7 +1246,8 @@ void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
}
int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
enum hwtstamp_rx_filters rx_filter;
u16 ing_types;
@@ -1270,7 +1271,7 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
- /* Notify the ioctl caller what we are actually timestamping. */
+ /* Notify the caller what we are actually timestamping. */
config->rx_filter = rx_filter;
return 0;
@@ -1451,7 +1452,7 @@ void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
}
int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct mlxsw_sp2_ptp_state *ptp_state;
@@ -1465,7 +1466,7 @@ int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
+mlxsw_sp2_ptp_get_message_types(const struct kernel_hwtstamp_config *config,
u16 *p_ing_types, u16 *p_egr_types,
enum hwtstamp_rx_filters *p_rx_filter)
{
@@ -1542,7 +1543,7 @@ static int mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp *mlxsw_sp, bool ptp_trap_en,
static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
u16 egr_types,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
int err;
@@ -1556,7 +1557,7 @@ static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
}
static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
int err;
@@ -1571,7 +1572,7 @@ static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
u16 ing_types, u16 egr_types,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
@@ -1592,7 +1593,7 @@ static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
@@ -1614,11 +1615,12 @@ err_ptp_disable:
}
int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
+ struct kernel_hwtstamp_config new_config;
struct mlxsw_sp2_ptp_state *ptp_state;
enum hwtstamp_rx_filters rx_filter;
- struct hwtstamp_config new_config;
u16 new_ing_types, new_egr_types;
bool ptp_enabled;
int err;
@@ -1652,7 +1654,7 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->ptp.ing_types = new_ing_types;
mlxsw_sp_port->ptp.egr_types = new_egr_types;
- /* Notify the ioctl caller what we are actually timestamping. */
+ /* Notify the caller what we are actually timestamping. */
config->rx_filter = rx_filter;
mutex_unlock(&ptp_state->lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index 102db9060135..df37f1470830 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -34,10 +34,11 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
u64 timestamp);
int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void mlxsw_sp1_ptp_shaper_work(struct work_struct *work);
@@ -65,10 +66,11 @@ void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
struct sk_buff *skb, u16 local_port);
int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
struct kernel_ethtool_ts_info *info);
@@ -117,14 +119,15 @@ mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
static inline int
mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
static inline int
mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
@@ -181,14 +184,15 @@ static inline void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
static inline int
mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
static inline int
mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 464821dd492d..a2033837182e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -3014,6 +3014,9 @@ static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
.rif = rif,
};
+ if (!mlxsw_sp_dev_lower_is_port(mlxsw_sp_rif_dev(rif)))
+ return 0;
+
neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
if (rms.err)
goto err_arp;
diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig
index 831921b9d4d5..3ba527514f1e 100644
--- a/drivers/net/ethernet/meta/Kconfig
+++ b/drivers/net/ethernet/meta/Kconfig
@@ -27,6 +27,7 @@ config FBNIC
select NET_DEVLINK
select PAGE_POOL
select PHYLINK
+ select PLDMFW
help
This driver supports Meta Platforms Host Network Interface.
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index de6b1a340f55..65815d4f379e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -19,6 +19,7 @@
struct fbnic_napi_vector;
#define FBNIC_MAX_NAPI_VECTORS 128u
+#define FBNIC_MBX_CMPL_SLOTS 4
struct fbnic_dev {
struct device *dev;
@@ -42,7 +43,7 @@ struct fbnic_dev {
struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES];
struct fbnic_fw_cap fw_cap;
- struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_fw_completion *cmpl_data[FBNIC_MBX_CMPL_SLOTS];
/* Lock protecting Tx Mailbox queue to prevent possible races */
spinlock_t fw_tx_lock;
@@ -81,6 +82,9 @@ struct fbnic_dev {
/* Local copy of hardware statistics */
struct fbnic_hw_stats hw_stats;
+
+ /* Lock protecting access to hw_stats */
+ spinlock_t hw_stats_lock;
};
/* Reserve entry 0 in the MSI-X "others" array until we have filled all
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index 51bee8072420..36393a17d92d 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -397,6 +397,15 @@ enum {
#define FBNIC_TCE_DROP_CTRL_TTI_FRM_DROP_EN CSR_BIT(1)
#define FBNIC_TCE_DROP_CTRL_TTI_TBI_DROP_EN CSR_BIT(2)
+#define FBNIC_TCE_TTI_CM_DROP_PKTS 0x0403e /* 0x100f8 */
+#define FBNIC_TCE_TTI_CM_DROP_BYTE_L 0x0403f /* 0x100fc */
+#define FBNIC_TCE_TTI_CM_DROP_BYTE_H 0x04040 /* 0x10100 */
+#define FBNIC_TCE_TTI_FRAME_DROP_PKTS 0x04041 /* 0x10104 */
+#define FBNIC_TCE_TTI_FRAME_DROP_BYTE_L 0x04042 /* 0x10108 */
+#define FBNIC_TCE_TTI_FRAME_DROP_BYTE_H 0x04043 /* 0x1010c */
+#define FBNIC_TCE_TBI_DROP_PKTS 0x04044 /* 0x10110 */
+#define FBNIC_TCE_TBI_DROP_BYTE_L 0x04045 /* 0x10114 */
+
#define FBNIC_TCE_TCAM_IDX2DEST_MAP 0x0404A /* 0x10128 */
#define FBNIC_TCE_TCAM_IDX2DEST_MAP_DEST_ID_0 CSR_GENMASK(3, 0)
enum {
@@ -432,6 +441,11 @@ enum {
#define FBNIC_TMI_SOP_PROT_CTRL 0x04400 /* 0x11000 */
#define FBNIC_TMI_DROP_CTRL 0x04401 /* 0x11004 */
#define FBNIC_TMI_DROP_CTRL_EN CSR_BIT(0)
+#define FBNIC_TMI_DROP_PKTS 0x04402 /* 0x11008 */
+#define FBNIC_TMI_DROP_BYTE_L 0x04403 /* 0x1100c */
+#define FBNIC_TMI_ILLEGAL_PTP_REQS 0x04409 /* 0x11024 */
+#define FBNIC_TMI_GOOD_PTP_TS 0x0440a /* 0x11028 */
+#define FBNIC_TMI_BAD_PTP_TS 0x0440b /* 0x1102c */
#define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */
/* Precision Time Protocol Registers */
@@ -485,6 +499,14 @@ enum {
FBNIC_RXB_FIFO_INDICES = 8
};
+enum {
+ FBNIC_RXB_INTF_NET = 0,
+ FBNIC_RXB_INTF_RBT = 1,
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_INTF_INDICES = 4
+};
+
#define FBNIC_RXB_CT_SIZE(n) (0x08000 + (n)) /* 0x20000 + 4*n */
#define FBNIC_RXB_CT_SIZE_CNT 8
#define FBNIC_RXB_CT_SIZE_HEADER CSR_GENMASK(5, 0)
@@ -866,6 +888,12 @@ enum {
#define FBNIC_QUEUE_TWQ1_BAL 0x022 /* 0x088 */
#define FBNIC_QUEUE_TWQ1_BAH 0x023 /* 0x08c */
+/* Tx Work Queue Statistics Registers */
+#define FBNIC_QUEUE_TWQ0_PKT_CNT 0x062 /* 0x188 */
+#define FBNIC_QUEUE_TWQ0_ERR_CNT 0x063 /* 0x18c */
+#define FBNIC_QUEUE_TWQ1_PKT_CNT 0x072 /* 0x1c8 */
+#define FBNIC_QUEUE_TWQ1_ERR_CNT 0x073 /* 0x1cc */
+
/* Tx Completion Queue Registers */
#define FBNIC_QUEUE_TCQ_CTL 0x080 /* 0x200 */
#define FBNIC_QUEUE_TCQ_CTL_RESET CSR_BIT(0)
@@ -955,6 +983,12 @@ enum {
FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_RSS = 2,
};
+/* Rx Per CQ Statistics Counters */
+#define FBNIC_QUEUE_RDE_PKT_CNT 0x2a2 /* 0xa88 */
+#define FBNIC_QUEUE_RDE_PKT_ERR_CNT 0x2a3 /* 0xa8c */
+#define FBNIC_QUEUE_RDE_CQ_DROP_CNT 0x2a4 /* 0xa90 */
+#define FBNIC_QUEUE_RDE_BDQ_DROP_CNT 0x2a5 /* 0xa94 */
+
/* Rx Interrupt Manager Registers */
#define FBNIC_QUEUE_RIM_CTL 0x2c0 /* 0xb00 */
#define FBNIC_QUEUE_RIM_CTL_MSIX_MASK CSR_GENMASK(7, 0)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
index 0072d612215e..4c4938eedd7b 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -3,10 +3,12 @@
#include <linux/unaligned.h>
#include <linux/pci.h>
+#include <linux/pldmfw.h>
#include <linux/types.h>
#include <net/devlink.h>
#include "fbnic.h"
+#include "fbnic_tlv.h"
#define FBNIC_SN_STR_LEN 24
@@ -109,8 +111,262 @@ static int fbnic_devlink_info_get(struct devlink *devlink,
return 0;
}
+static bool
+fbnic_pldm_match_record(struct pldmfw *context, struct pldmfw_record *record)
+{
+ struct pldmfw_desc_tlv *desc;
+ u32 anti_rollback_ver = 0;
+ struct devlink *devlink;
+ struct fbnic_dev *fbd;
+ struct pci_dev *pdev;
+
+ /* First, use the standard PCI matching function */
+ if (!pldmfw_op_pci_match_record(context, record))
+ return false;
+
+ pdev = to_pci_dev(context->dev);
+ fbd = pci_get_drvdata(pdev);
+ devlink = priv_to_devlink(fbd);
+
+ /* If PCI match is successful, check for vendor-specific descriptors */
+ list_for_each_entry(desc, &record->descs, entry) {
+ if (desc->type != PLDM_DESC_ID_VENDOR_DEFINED)
+ continue;
+
+ if (desc->size < 21 || desc->data[0] != 1 ||
+ desc->data[1] != 15)
+ continue;
+
+ if (memcmp(desc->data + 2, "AntiRollbackVer", 15) != 0)
+ continue;
+
+ anti_rollback_ver = get_unaligned_le32(desc->data + 17);
+ break;
+ }
+
+ /* Compare versions and return error if they do not match */
+ if (anti_rollback_ver < fbd->fw_cap.anti_rollback_version) {
+ char buf[128];
+
+ snprintf(buf, sizeof(buf),
+ "New firmware anti-rollback version (0x%x) is older than device version (0x%x)!",
+ anti_rollback_ver, fbd->fw_cap.anti_rollback_version);
+ devlink_flash_update_status_notify(devlink, buf,
+ "Anti-Rollback", 0, 0);
+
+ return false;
+ }
+
+ return true;
+}
+
+static int
+fbnic_flash_start(struct fbnic_dev *fbd, struct pldmfw_component *component)
+{
+ struct fbnic_fw_completion *cmpl;
+ int err;
+
+ cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ);
+ if (!cmpl)
+ return -ENOMEM;
+
+ err = fbnic_fw_xmit_fw_start_upgrade(fbd, cmpl,
+ component->identifier,
+ component->component_size);
+ if (err)
+ goto cmpl_free;
+
+ /* Wait for firmware to ack firmware upgrade start */
+ if (wait_for_completion_timeout(&cmpl->done, 10 * HZ))
+ err = cmpl->result;
+ else
+ err = -ETIMEDOUT;
+
+ fbnic_fw_clear_cmpl(fbd, cmpl);
+cmpl_free:
+ fbnic_fw_put_cmpl(cmpl);
+
+ return err;
+}
+
+static int
+fbnic_flash_component(struct pldmfw *context,
+ struct pldmfw_component *component)
+{
+ const u8 *data = component->component_data;
+ const u32 size = component->component_size;
+ struct fbnic_fw_completion *cmpl;
+ const char *component_name;
+ struct devlink *devlink;
+ struct fbnic_dev *fbd;
+ struct pci_dev *pdev;
+ u32 offset = 0;
+ u32 length = 0;
+ char buf[32];
+ int err;
+
+ pdev = to_pci_dev(context->dev);
+ fbd = pci_get_drvdata(pdev);
+ devlink = priv_to_devlink(fbd);
+
+ switch (component->identifier) {
+ case QSPI_SECTION_CMRT:
+ component_name = "boot1";
+ break;
+ case QSPI_SECTION_CONTROL_FW:
+ component_name = "boot2";
+ break;
+ case QSPI_SECTION_OPTION_ROM:
+ component_name = "option-rom";
+ break;
+ default:
+ snprintf(buf, sizeof(buf), "Unknown component ID %u!",
+ component->identifier);
+ devlink_flash_update_status_notify(devlink, buf, NULL, 0,
+ size);
+ return -EINVAL;
+ }
+
+ /* Once firmware receives the request to start upgrading it responds
+ * with two messages:
+ * 1. An ACK that it received the message and possible error code
+ * indicating that an upgrade is not currently possible.
+ * 2. A request for the first chunk of data
+ *
+ * Setup completions for write before issuing the start message so
+ * the driver can catch both messages.
+ */
+ cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ);
+ if (!cmpl)
+ return -ENOMEM;
+
+ err = fbnic_mbx_set_cmpl(fbd, cmpl);
+ if (err)
+ goto cmpl_free;
+
+ devlink_flash_update_timeout_notify(devlink, "Initializing",
+ component_name, 15);
+ err = fbnic_flash_start(fbd, component);
+ if (err)
+ goto err_no_msg;
+
+ while (offset < size) {
+ if (!wait_for_completion_timeout(&cmpl->done, 15 * HZ)) {
+ err = -ETIMEDOUT;
+ break;
+ }
+
+ err = cmpl->result;
+ if (err)
+ break;
+
+ /* Verify firmware is requesting the next chunk in the seq. */
+ if (cmpl->u.fw_update.offset != offset + length) {
+ err = -EFAULT;
+ break;
+ }
+
+ offset = cmpl->u.fw_update.offset;
+ length = cmpl->u.fw_update.length;
+
+ if (length > TLV_MAX_DATA || offset + length > size) {
+ err = -EFAULT;
+ break;
+ }
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component_name,
+ offset, size);
+
+ /* Mailbox will set length to 0 once it receives the finish
+ * message.
+ */
+ if (!length)
+ continue;
+
+ reinit_completion(&cmpl->done);
+ err = fbnic_fw_xmit_fw_write_chunk(fbd, data, offset, length,
+ 0);
+ if (err)
+ break;
+ }
+
+ if (err) {
+ fbnic_fw_xmit_fw_write_chunk(fbd, NULL, 0, 0, err);
+err_no_msg:
+ snprintf(buf, sizeof(buf), "Mailbox encountered error %d!",
+ err);
+ devlink_flash_update_status_notify(devlink, buf,
+ component_name, 0, 0);
+ }
+
+ fbnic_fw_clear_cmpl(fbd, cmpl);
+cmpl_free:
+ fbnic_fw_put_cmpl(cmpl);
+
+ return err;
+}
+
+static const struct pldmfw_ops fbnic_pldmfw_ops = {
+ .match_record = fbnic_pldm_match_record,
+ .flash_component = fbnic_flash_component,
+};
+
+static int
+fbnic_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_priv(devlink);
+ const struct firmware *fw = params->fw;
+ struct device *dev = fbd->dev;
+ struct pldmfw context;
+ char *err_msg;
+ int err;
+
+ context.ops = &fbnic_pldmfw_ops;
+ context.dev = dev;
+
+ err = pldmfw_flash_image(&context, fw);
+ if (err) {
+ switch (err) {
+ case -EINVAL:
+ err_msg = "Invalid image";
+ break;
+ case -EOPNOTSUPP:
+ err_msg = "Unsupported image";
+ break;
+ case -ENOMEM:
+ err_msg = "Out of memory";
+ break;
+ case -EFAULT:
+ err_msg = "Invalid header";
+ break;
+ case -ENOENT:
+ err_msg = "No matching record";
+ break;
+ case -ENODEV:
+ err_msg = "No matching device";
+ break;
+ case -ETIMEDOUT:
+ err_msg = "Timed out waiting for reply";
+ break;
+ default:
+ err_msg = "Unknown error";
+ break;
+ }
+
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Failed to flash PLDM Image: %s (error: %d)",
+ err_msg, err);
+ }
+
+ return err;
+}
+
static const struct devlink_ops fbnic_devlink_ops = {
- .info_get = fbnic_devlink_info_get,
+ .info_get = fbnic_devlink_info_get,
+ .flash_update = fbnic_devlink_flash_update,
};
void fbnic_devlink_free(struct fbnic_dev *fbd)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index 0a751a2aaf73..5c7556c8c4c5 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -27,6 +27,19 @@ struct fbnic_stat {
FBNIC_STAT_FIELDS(fbnic_hw_stats, name, stat)
static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
+ /* TTI */
+ FBNIC_HW_STAT("tti_cm_drop_frames", tti.cm_drop.frames),
+ FBNIC_HW_STAT("tti_cm_drop_bytes", tti.cm_drop.bytes),
+ FBNIC_HW_STAT("tti_frame_drop_frames", tti.frame_drop.frames),
+ FBNIC_HW_STAT("tti_frame_drop_bytes", tti.frame_drop.bytes),
+ FBNIC_HW_STAT("tti_tbi_drop_frames", tti.tbi_drop.frames),
+ FBNIC_HW_STAT("tti_tbi_drop_bytes", tti.tbi_drop.bytes),
+
+ /* TMI */
+ FBNIC_HW_STAT("ptp_illegal_req", tmi.ptp_illegal_req),
+ FBNIC_HW_STAT("ptp_good_ts", tmi.ptp_good_ts),
+ FBNIC_HW_STAT("ptp_bad_ts", tmi.ptp_bad_ts),
+
/* RPC */
FBNIC_HW_STAT("rpc_unkn_etype", rpc.unkn_etype),
FBNIC_HW_STAT("rpc_unkn_ext_hdr", rpc.unkn_ext_hdr),
@@ -39,7 +52,64 @@ static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
};
#define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats)
-#define FBNIC_HW_STATS_LEN FBNIC_HW_FIXED_STATS_LEN
+
+#define FBNIC_RXB_ENQUEUE_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_rxb_enqueue_stats, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_rxb_enqueue_stats[] = {
+ FBNIC_RXB_ENQUEUE_STAT("rxb_integrity_err%u", integrity_err),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_mac_err%u", mac_err),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_parser_err%u", parser_err),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_frm_err%u", frm_err),
+
+ FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_frames", drbo.frames),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_bytes", drbo.bytes),
+};
+
+#define FBNIC_HW_RXB_ENQUEUE_STATS_LEN \
+ ARRAY_SIZE(fbnic_gstrings_rxb_enqueue_stats)
+
+#define FBNIC_RXB_FIFO_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_rxb_fifo_stats, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_rxb_fifo_stats[] = {
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_drop", trans_drop),
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_dropped_frames", drop.frames),
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_ecn", trans_ecn),
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_level", level),
+};
+
+#define FBNIC_HW_RXB_FIFO_STATS_LEN ARRAY_SIZE(fbnic_gstrings_rxb_fifo_stats)
+
+#define FBNIC_RXB_DEQUEUE_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_rxb_dequeue_stats, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_rxb_dequeue_stats[] = {
+ FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_frames", intf.frames),
+ FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_bytes", intf.bytes),
+ FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_frames", pbuf.frames),
+ FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_bytes", pbuf.bytes),
+};
+
+#define FBNIC_HW_RXB_DEQUEUE_STATS_LEN \
+ ARRAY_SIZE(fbnic_gstrings_rxb_dequeue_stats)
+
+#define FBNIC_HW_Q_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_hw_q_stats, name, stat.value)
+
+static const struct fbnic_stat fbnic_gstrings_hw_q_stats[] = {
+ FBNIC_HW_Q_STAT("rde_%u_pkt_err", rde_pkt_err),
+ FBNIC_HW_Q_STAT("rde_%u_pkt_cq_drop", rde_pkt_cq_drop),
+ FBNIC_HW_Q_STAT("rde_%u_pkt_bdq_drop", rde_pkt_bdq_drop),
+};
+
+#define FBNIC_HW_Q_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_q_stats)
+#define FBNIC_HW_STATS_LEN \
+ (FBNIC_HW_FIXED_STATS_LEN + \
+ FBNIC_HW_RXB_ENQUEUE_STATS_LEN * FBNIC_RXB_ENQUEUE_INDICES + \
+ FBNIC_HW_RXB_FIFO_STATS_LEN * FBNIC_RXB_FIFO_INDICES + \
+ FBNIC_HW_RXB_DEQUEUE_STATS_LEN * FBNIC_RXB_DEQUEUE_INDICES + \
+ FBNIC_HW_Q_STATS_LEN * FBNIC_MAX_QUEUES)
static void
fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
@@ -298,31 +368,125 @@ err_free_clone:
return err;
}
-static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
+static void fbnic_get_rxb_enqueue_strings(u8 **data, unsigned int idx)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ stat = fbnic_gstrings_rxb_enqueue_stats;
+ for (i = 0; i < FBNIC_HW_RXB_ENQUEUE_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_rxb_fifo_strings(u8 **data, unsigned int idx)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ stat = fbnic_gstrings_rxb_fifo_stats;
+ for (i = 0; i < FBNIC_HW_RXB_FIFO_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_rxb_dequeue_strings(u8 **data, unsigned int idx)
{
+ const struct fbnic_stat *stat;
int i;
+ stat = fbnic_gstrings_rxb_dequeue_stats;
+ for (i = 0; i < FBNIC_HW_RXB_DEQUEUE_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
+{
+ const struct fbnic_stat *stat;
+ int i, idx;
+
switch (sset) {
case ETH_SS_STATS:
- for (i = 0; i < FBNIC_HW_STATS_LEN; i++)
+ for (i = 0; i < FBNIC_HW_FIXED_STATS_LEN; i++)
ethtool_puts(&data, fbnic_gstrings_hw_stats[i].string);
+
+ for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++)
+ fbnic_get_rxb_enqueue_strings(&data, i);
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_get_rxb_fifo_strings(&data, i);
+
+ for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++)
+ fbnic_get_rxb_dequeue_strings(&data, i);
+
+ for (idx = 0; idx < FBNIC_MAX_QUEUES; idx++) {
+ stat = fbnic_gstrings_hw_q_stats;
+
+ for (i = 0; i < FBNIC_HW_Q_STATS_LEN; i++, stat++)
+ ethtool_sprintf(&data, stat->string, idx);
+ }
break;
}
}
+static void fbnic_report_hw_stats(const struct fbnic_stat *stat,
+ const void *base, int len, u64 **data)
+{
+ while (len--) {
+ u8 *curr = (u8 *)base + stat->offset;
+
+ **data = *(u64 *)curr;
+
+ stat++;
+ (*data)++;
+ }
+}
+
static void fbnic_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct fbnic_net *fbn = netdev_priv(dev);
- const struct fbnic_stat *stat;
+ struct fbnic_dev *fbd = fbn->fbd;
int i;
fbnic_get_hw_stats(fbn->fbd);
- for (i = 0; i < FBNIC_HW_STATS_LEN; i++) {
- stat = &fbnic_gstrings_hw_stats[i];
- data[i] = *(u64 *)((u8 *)&fbn->fbd->hw_stats + stat->offset);
+ spin_lock(&fbd->hw_stats_lock);
+ fbnic_report_hw_stats(fbnic_gstrings_hw_stats, &fbd->hw_stats,
+ FBNIC_HW_FIXED_STATS_LEN, &data);
+
+ for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++) {
+ const struct fbnic_rxb_enqueue_stats *enq;
+
+ enq = &fbd->hw_stats.rxb.enq[i];
+ fbnic_report_hw_stats(fbnic_gstrings_rxb_enqueue_stats,
+ enq, FBNIC_HW_RXB_ENQUEUE_STATS_LEN,
+ &data);
+ }
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) {
+ const struct fbnic_rxb_fifo_stats *fifo;
+
+ fifo = &fbd->hw_stats.rxb.fifo[i];
+ fbnic_report_hw_stats(fbnic_gstrings_rxb_fifo_stats,
+ fifo, FBNIC_HW_RXB_FIFO_STATS_LEN,
+ &data);
+ }
+
+ for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++) {
+ const struct fbnic_rxb_dequeue_stats *deq;
+
+ deq = &fbd->hw_stats.rxb.deq[i];
+ fbnic_report_hw_stats(fbnic_gstrings_rxb_dequeue_stats,
+ deq, FBNIC_HW_RXB_DEQUEUE_STATS_LEN,
+ &data);
+ }
+
+ for (i = 0; i < FBNIC_MAX_QUEUES; i++) {
+ const struct fbnic_hw_q_stats *hw_q = &fbd->hw_stats.hw_q[i];
+
+ fbnic_report_hw_stats(fbnic_gstrings_hw_q_stats, hw_q,
+ FBNIC_HW_Q_STATS_LEN, &data);
}
+ spin_unlock(&fbd->hw_stats_lock);
}
static int fbnic_get_sset_count(struct net_device *dev, int sset)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index 3d9636a6c968..4521d0483d18 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -127,11 +127,8 @@ static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
return -EBUSY;
addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
- if (dma_mapping_error(fbd->dev, addr)) {
- free_page((unsigned long)msg);
-
+ if (dma_mapping_error(fbd->dev, addr))
return -ENOSPC;
- }
mbx->buf_info[tail].msg = msg;
mbx->buf_info[tail].addr = addr;
@@ -237,6 +234,44 @@ static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
return err;
}
+static int fbnic_mbx_set_cmpl_slot(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+ int free = -EXFULL;
+ int i;
+
+ if (!tx_mbx->ready)
+ return -ENODEV;
+
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ if (!fbd->cmpl_data[i])
+ free = i;
+ else if (fbd->cmpl_data[i]->msg_type == cmpl_data->msg_type)
+ return -EEXIST;
+ }
+
+ if (free == -EXFULL)
+ return -EXFULL;
+
+ fbd->cmpl_data[free] = cmpl_data;
+
+ return 0;
+}
+
+static void fbnic_mbx_clear_cmpl_slot(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ if (fbd->cmpl_data[i] == cmpl_data) {
+ fbd->cmpl_data[i] = NULL;
+ break;
+ }
+ }
+}
+
static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
{
struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
@@ -258,6 +293,19 @@ static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
tx_mbx->head = head;
}
+int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return err;
+}
+
static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
struct fbnic_tlv_msg *msg,
struct fbnic_fw_completion *cmpl_data)
@@ -266,23 +314,20 @@ static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
int err;
spin_lock_irqsave(&fbd->fw_tx_lock, flags);
-
- /* If we are already waiting on a completion then abort */
- if (cmpl_data && fbd->cmpl_data) {
- err = -EBUSY;
- goto unlock_mbx;
+ if (cmpl_data) {
+ err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
+ if (err)
+ goto unlock_mbx;
}
- /* Record completion location and submit request */
- if (cmpl_data)
- fbd->cmpl_data = cmpl_data;
-
err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
- /* If msg failed then clear completion data for next caller */
+ /* If we successfully reserved a completion and msg failed
+ * then clear completion data for next caller
+ */
if (err && cmpl_data)
- fbd->cmpl_data = NULL;
+ fbnic_mbx_clear_cmpl_slot(fbd, cmpl_data);
unlock_mbx:
spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
@@ -304,12 +349,18 @@ fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
{
struct fbnic_fw_completion *cmpl_data = NULL;
unsigned long flags;
+ int i;
spin_lock_irqsave(&fbd->fw_tx_lock, flags);
- if (fbd->cmpl_data && fbd->cmpl_data->msg_type == msg_type) {
- cmpl_data = fbd->cmpl_data;
- kref_get(&fbd->cmpl_data->ref_count);
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ if (fbd->cmpl_data[i] &&
+ fbd->cmpl_data[i]->msg_type == msg_type) {
+ cmpl_data = fbd->cmpl_data[i];
+ kref_get(&cmpl_data->ref_count);
+ break;
+ }
}
+
spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
return cmpl_data;
@@ -464,6 +515,7 @@ static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION),
FBNIC_TLV_ATTR_LAST
};
@@ -586,6 +638,9 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
fbd->fw_cap.all_multi = all_multi;
+ fbd->fw_cap.anti_rollback_version =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION);
+
return 0;
}
@@ -708,6 +763,188 @@ void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
dev_warn(fbd->dev, "Failed to send heartbeat message\n");
}
+int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ unsigned int id, unsigned int len)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ if (!len)
+ return -EINVAL;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_SECTION, id);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_IMAGE_LENGTH,
+ len);
+ if (err)
+ goto free_message;
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_start_upgrade_resp_index[] = {
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_START_UPGRADE_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_fw_start_upgrade_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer */
+ msg_type = FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ /* Check for errors */
+ err = fta_get_sint(results, FBNIC_FW_START_UPGRADE_ERROR);
+
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return 0;
+}
+
+int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
+ const u8 *data, u32 offset, u16 length,
+ int cancel_error)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP);
+ if (!msg)
+ return -ENOMEM;
+
+ /* Report error to FW to cancel upgrade */
+ if (cancel_error) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_ERROR,
+ cancel_error);
+ if (err)
+ goto free_message;
+ }
+
+ if (data) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_OFFSET,
+ offset);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_LENGTH,
+ length);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_value(msg, FBNIC_FW_WRITE_CHUNK_DATA,
+ data + offset, length);
+ if (err)
+ goto free_message;
+ }
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_write_chunk_req_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_OFFSET),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_LENGTH),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_fw_write_chunk_req(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ u32 offset;
+ u32 length;
+
+ /* Verify we have a completion pointer */
+ msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ /* Pull length/offset pair and mark it as complete */
+ offset = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_OFFSET);
+ length = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_LENGTH);
+ cmpl_data->u.fw_update.offset = offset;
+ cmpl_data->u.fw_update.length = length;
+
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return 0;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_finish_upgrade_req_index[] = {
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_FINISH_UPGRADE_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_fw_finish_upgrade_req(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer */
+ msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ /* Check for errors */
+ err = fta_get_sint(results, FBNIC_FW_FINISH_UPGRADE_ERROR);
+
+ /* Close out update by incrementing offset by length which should
+ * match the total size of the component. Set length to 0 since no
+ * new chunks will be requested.
+ */
+ cmpl_data->u.fw_update.offset += cmpl_data->u.fw_update.length;
+ cmpl_data->u.fw_update.length = 0;
+
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return 0;
+}
+
/**
* fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
* @fbd: FBNIC device structure
@@ -792,6 +1029,15 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
fbnic_fw_parse_ownership_resp),
FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
fbnic_fw_parse_heartbeat_resp),
+ FBNIC_TLV_PARSER(FW_START_UPGRADE_RESP,
+ fbnic_fw_start_upgrade_resp_index,
+ fbnic_fw_parse_fw_start_upgrade_resp),
+ FBNIC_TLV_PARSER(FW_WRITE_CHUNK_REQ,
+ fbnic_fw_write_chunk_req_index,
+ fbnic_fw_parse_fw_write_chunk_req),
+ FBNIC_TLV_PARSER(FW_FINISH_UPGRADE_REQ,
+ fbnic_fw_finish_upgrade_req_index,
+ fbnic_fw_parse_fw_finish_upgrade_req),
FBNIC_TLV_PARSER(TSENE_READ_RESP,
fbnic_tsene_read_resp_index,
fbnic_fw_parse_tsene_read_resp),
@@ -921,10 +1167,16 @@ static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data)
static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd)
{
- if (fbd->cmpl_data) {
- __fbnic_fw_evict_cmpl(fbd->cmpl_data);
- fbd->cmpl_data = NULL;
+ int i;
+
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ struct fbnic_fw_completion *cmpl_data = fbd->cmpl_data[i];
+
+ if (cmpl_data)
+ __fbnic_fw_evict_cmpl(cmpl_data);
}
+
+ memset(fbd->cmpl_data, 0, sizeof(fbd->cmpl_data));
}
void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
@@ -977,20 +1229,28 @@ void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
fw_version, str_sz);
}
-void fbnic_fw_init_cmpl(struct fbnic_fw_completion *fw_cmpl,
- u32 msg_type)
+struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
{
- fw_cmpl->msg_type = msg_type;
- init_completion(&fw_cmpl->done);
- kref_init(&fw_cmpl->ref_count);
+ struct fbnic_fw_completion *cmpl;
+
+ cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL);
+ if (!cmpl)
+ return NULL;
+
+ cmpl->msg_type = msg_type;
+ init_completion(&cmpl->done);
+ kref_init(&cmpl->ref_count);
+
+ return cmpl;
}
-void fbnic_fw_clear_compl(struct fbnic_dev *fbd)
+void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *fw_cmpl)
{
unsigned long flags;
spin_lock_irqsave(&fbd->fw_tx_lock, flags);
- fbd->cmpl_data = NULL;
+ fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl);
spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index a3618e7826c2..08bc4b918de7 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -42,6 +42,7 @@ struct fbnic_fw_cap {
u8 all_multi : 1;
u8 link_speed;
u8 link_fec;
+ u32 anti_rollback_version;
};
struct fbnic_fw_completion {
@@ -51,6 +52,10 @@ struct fbnic_fw_completion {
int result;
union {
struct {
+ u32 offset;
+ u32 length;
+ } fw_update;
+ struct {
s32 millivolts;
s32 millidegrees;
} tsene;
@@ -59,17 +64,25 @@ struct fbnic_fw_completion {
void fbnic_mbx_init(struct fbnic_dev *fbd);
void fbnic_mbx_clean(struct fbnic_dev *fbd);
+int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
void fbnic_mbx_poll(struct fbnic_dev *fbd);
int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd);
void fbnic_mbx_flush_tx(struct fbnic_dev *fbd);
int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership);
int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll);
void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd);
+int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ unsigned int id, unsigned int len);
+int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
+ const u8 *data, u32 offset, u16 length,
+ int cancel_error);
int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
struct fbnic_fw_completion *cmpl_data);
-void fbnic_fw_init_cmpl(struct fbnic_fw_completion *cmpl_data,
- u32 msg_type);
-void fbnic_fw_clear_compl(struct fbnic_dev *fbd);
+struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type);
+void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data);
#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \
@@ -86,6 +99,15 @@ do { \
#define fbnic_mk_fw_ver_str(_rev_id, _str) \
fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str, sizeof(_str))
+enum {
+ QSPI_SECTION_CMRT = 0,
+ QSPI_SECTION_CONTROL_FW = 1,
+ QSPI_SECTION_UCODE = 2,
+ QSPI_SECTION_OPTION_ROM = 3,
+ QSPI_SECTION_USER = 4,
+ QSPI_SECTION_INVALID,
+};
+
#define FW_HEARTBEAT_PERIOD (10 * HZ)
enum {
@@ -95,6 +117,12 @@ enum {
FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13,
FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14,
FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15,
+ FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ = 0x22,
+ FBNIC_TLV_MSG_ID_FW_START_UPGRADE_RESP = 0x23,
+ FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ = 0x24,
+ FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP = 0x25,
+ FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_REQ = 0x28,
+ FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_RESP = 0x29,
FBNIC_TLV_MSG_ID_TSENE_READ_REQ = 0x3C,
FBNIC_TLV_MSG_ID_TSENE_READ_RESP = 0x3D,
};
@@ -122,6 +150,7 @@ enum {
FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR = 0x10,
FBNIC_FW_CAP_RESP_UEFI_VERSION = 0x11,
FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR = 0x12,
+ FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION = 0x15,
FBNIC_FW_CAP_RESP_MSG_MAX
};
@@ -149,4 +178,25 @@ enum {
FBNIC_FW_OWNERSHIP_FLAG = 0x0,
FBNIC_FW_OWNERSHIP_MSG_MAX
};
+
+enum {
+ FBNIC_FW_START_UPGRADE_ERROR = 0x0,
+ FBNIC_FW_START_UPGRADE_SECTION = 0x1,
+ FBNIC_FW_START_UPGRADE_IMAGE_LENGTH = 0x2,
+ FBNIC_FW_START_UPGRADE_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_WRITE_CHUNK_OFFSET = 0x0,
+ FBNIC_FW_WRITE_CHUNK_LENGTH = 0x1,
+ FBNIC_FW_WRITE_CHUNK_DATA = 0x2,
+ FBNIC_FW_WRITE_CHUNK_ERROR = 0x3,
+ FBNIC_FW_WRITE_CHUNK_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_FINISH_UPGRADE_ERROR = 0x0,
+ FBNIC_FW_FINISH_UPGRADE_MSG_MAX
+};
+
#endif /* _FBNIC_FW_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
index 89ac6bc8c7fc..4223d8100e64 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
@@ -70,6 +70,100 @@ static void fbnic_hw_stat_rd64(struct fbnic_dev *fbd, u32 reg, s32 offset,
stat->u.old_reg_value_64 = new_reg_value;
}
+static void fbnic_reset_tmi_stats(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TMI_ILLEGAL_PTP_REQS,
+ &tmi->ptp_illegal_req);
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts);
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts);
+}
+
+static void fbnic_get_tmi_stats32(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames);
+
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TMI_ILLEGAL_PTP_REQS,
+ &tmi->ptp_illegal_req);
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts);
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts);
+}
+
+static void fbnic_get_tmi_stats(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes);
+}
+
+static void fbnic_reset_tti_stats(struct fbnic_dev *fbd,
+ struct fbnic_tti_stats *tti)
+{
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TCE_TTI_CM_DROP_PKTS,
+ &tti->cm_drop.frames);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_TCE_TTI_CM_DROP_BYTE_L,
+ 1,
+ &tti->cm_drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_PKTS,
+ &tti->frame_drop.frames);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_BYTE_L,
+ 1,
+ &tti->frame_drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TCE_TBI_DROP_PKTS,
+ &tti->tbi_drop.frames);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_TCE_TBI_DROP_BYTE_L,
+ 1,
+ &tti->tbi_drop.bytes);
+}
+
+static void fbnic_get_tti_stats32(struct fbnic_dev *fbd,
+ struct fbnic_tti_stats *tti)
+{
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TCE_TTI_CM_DROP_PKTS,
+ &tti->cm_drop.frames);
+
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_PKTS,
+ &tti->frame_drop.frames);
+
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TCE_TBI_DROP_PKTS,
+ &tti->tbi_drop.frames);
+}
+
+static void fbnic_get_tti_stats(struct fbnic_dev *fbd,
+ struct fbnic_tti_stats *tti)
+{
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_TCE_TTI_CM_DROP_BYTE_L,
+ 1,
+ &tti->cm_drop.bytes);
+
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_BYTE_L,
+ 1,
+ &tti->frame_drop.bytes);
+
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_TCE_TBI_DROP_BYTE_L,
+ 1,
+ &tti->tbi_drop.bytes);
+}
+
static void fbnic_reset_rpc_stats(struct fbnic_dev *fbd,
struct fbnic_rpc_stats *rpc)
{
@@ -117,6 +211,221 @@ static void fbnic_get_rpc_stats32(struct fbnic_dev *fbd,
&rpc->ovr_size_err);
}
+static void fbnic_reset_rxb_fifo_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_fifo_stats *fifo)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_DROP_FRMS_STS(i),
+ &fifo->drop.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_DROP_BYTES_STS_L(i), 1,
+ &fifo->drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRUN_FRMS_STS(i),
+ &fifo->trunc.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_TRUN_BYTES_STS_L(i), 1,
+ &fifo->trunc.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRANS_DROP_STS(i),
+ &fifo->trans_drop);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRANS_ECN_STS(i),
+ &fifo->trans_ecn);
+
+ fifo->level.u.old_reg_value_32 = 0;
+}
+
+static void fbnic_reset_rxb_enq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_enqueue_stats *enq)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_DRBO_FRM_CNT_SRC(i),
+ &enq->drbo.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(i), 4,
+ &enq->drbo.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_INTEGRITY_ERR(i),
+ &enq->integrity_err);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_MAC_ERR(i),
+ &enq->mac_err);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_PARSER_ERR(i),
+ &enq->parser_err);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_FRM_ERR(i),
+ &enq->frm_err);
+}
+
+static void fbnic_reset_rxb_deq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_dequeue_stats *deq)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_INTF_FRM_CNT_DST(i),
+ &deq->intf.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_INTF_BYTE_CNT_DST_L(i), 4,
+ &deq->intf.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_PBUF_FRM_CNT_DST(i),
+ &deq->pbuf.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_PBUF_BYTE_CNT_DST_L(i), 4,
+ &deq->pbuf.bytes);
+}
+
+static void fbnic_reset_rxb_stats(struct fbnic_dev *fbd,
+ struct fbnic_rxb_stats *rxb)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_reset_rxb_fifo_stats(fbd, i, &rxb->fifo[i]);
+
+ for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) {
+ fbnic_reset_rxb_enq_stats(fbd, i, &rxb->enq[i]);
+ fbnic_reset_rxb_deq_stats(fbd, i, &rxb->deq[i]);
+ }
+}
+
+static void fbnic_get_rxb_fifo_stats32(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_fifo_stats *fifo)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_DROP_FRMS_STS(i),
+ &fifo->drop.frames);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRUN_FRMS_STS(i),
+ &fifo->trunc.frames);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRANS_DROP_STS(i),
+ &fifo->trans_drop);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRANS_ECN_STS(i),
+ &fifo->trans_ecn);
+
+ fifo->level.value = rd32(fbd, FBNIC_RXB_PBUF_FIFO_LEVEL(i));
+}
+
+static void fbnic_get_rxb_fifo_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_fifo_stats *fifo)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_DROP_BYTES_STS_L(i), 1,
+ &fifo->drop.bytes);
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_TRUN_BYTES_STS_L(i), 1,
+ &fifo->trunc.bytes);
+
+ fbnic_get_rxb_fifo_stats32(fbd, i, fifo);
+}
+
+static void fbnic_get_rxb_enq_stats32(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_enqueue_stats *enq)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_DRBO_FRM_CNT_SRC(i),
+ &enq->drbo.frames);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_INTEGRITY_ERR(i),
+ &enq->integrity_err);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_MAC_ERR(i),
+ &enq->mac_err);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_PARSER_ERR(i),
+ &enq->parser_err);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_FRM_ERR(i),
+ &enq->frm_err);
+}
+
+static void fbnic_get_rxb_enq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_enqueue_stats *enq)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(i), 4,
+ &enq->drbo.bytes);
+
+ fbnic_get_rxb_enq_stats32(fbd, i, enq);
+}
+
+static void fbnic_get_rxb_deq_stats32(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_dequeue_stats *deq)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_INTF_FRM_CNT_DST(i),
+ &deq->intf.frames);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_PBUF_FRM_CNT_DST(i),
+ &deq->pbuf.frames);
+}
+
+static void fbnic_get_rxb_deq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_dequeue_stats *deq)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_INTF_BYTE_CNT_DST_L(i), 4,
+ &deq->intf.bytes);
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_PBUF_BYTE_CNT_DST_L(i), 4,
+ &deq->pbuf.bytes);
+
+ fbnic_get_rxb_deq_stats32(fbd, i, deq);
+}
+
+static void fbnic_get_rxb_stats32(struct fbnic_dev *fbd,
+ struct fbnic_rxb_stats *rxb)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_get_rxb_fifo_stats32(fbd, i, &rxb->fifo[i]);
+
+ for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) {
+ fbnic_get_rxb_enq_stats32(fbd, i, &rxb->enq[i]);
+ fbnic_get_rxb_deq_stats32(fbd, i, &rxb->deq[i]);
+ }
+}
+
+static void fbnic_get_rxb_stats(struct fbnic_dev *fbd,
+ struct fbnic_rxb_stats *rxb)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_get_rxb_fifo_stats(fbd, i, &rxb->fifo[i]);
+
+ for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) {
+ fbnic_get_rxb_enq_stats(fbd, i, &rxb->enq[i]);
+ fbnic_get_rxb_deq_stats(fbd, i, &rxb->deq[i]);
+ }
+}
+
+static void fbnic_reset_hw_rxq_stats(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q)
+{
+ int i;
+
+ for (i = 0; i < fbd->max_num_queues; i++, hw_q++) {
+ u32 base = FBNIC_QUEUE(i);
+
+ fbnic_hw_stat_rst32(fbd,
+ base + FBNIC_QUEUE_RDE_PKT_ERR_CNT,
+ &hw_q->rde_pkt_err);
+ fbnic_hw_stat_rst32(fbd,
+ base + FBNIC_QUEUE_RDE_CQ_DROP_CNT,
+ &hw_q->rde_pkt_cq_drop);
+ fbnic_hw_stat_rst32(fbd,
+ base + FBNIC_QUEUE_RDE_BDQ_DROP_CNT,
+ &hw_q->rde_pkt_bdq_drop);
+ }
+}
+
+static void fbnic_get_hw_rxq_stats32(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q)
+{
+ int i;
+
+ for (i = 0; i < fbd->max_num_queues; i++, hw_q++) {
+ u32 base = FBNIC_QUEUE(i);
+
+ fbnic_hw_stat_rd32(fbd,
+ base + FBNIC_QUEUE_RDE_PKT_ERR_CNT,
+ &hw_q->rde_pkt_err);
+ fbnic_hw_stat_rd32(fbd,
+ base + FBNIC_QUEUE_RDE_CQ_DROP_CNT,
+ &hw_q->rde_pkt_cq_drop);
+ fbnic_hw_stat_rd32(fbd,
+ base + FBNIC_QUEUE_RDE_BDQ_DROP_CNT,
+ &hw_q->rde_pkt_bdq_drop);
+ }
+}
+
+void fbnic_get_hw_q_stats(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q)
+{
+ spin_lock(&fbd->hw_stats_lock);
+ fbnic_get_hw_rxq_stats32(fbd, hw_q);
+ spin_unlock(&fbd->hw_stats_lock);
+}
+
static void fbnic_reset_pcie_stats_asic(struct fbnic_dev *fbd,
struct fbnic_pcie_stats *pcie)
{
@@ -203,18 +512,40 @@ static void fbnic_get_pcie_stats_asic64(struct fbnic_dev *fbd,
void fbnic_reset_hw_stats(struct fbnic_dev *fbd)
{
+ spin_lock(&fbd->hw_stats_lock);
+ fbnic_reset_tmi_stats(fbd, &fbd->hw_stats.tmi);
+ fbnic_reset_tti_stats(fbd, &fbd->hw_stats.tti);
fbnic_reset_rpc_stats(fbd, &fbd->hw_stats.rpc);
+ fbnic_reset_rxb_stats(fbd, &fbd->hw_stats.rxb);
+ fbnic_reset_hw_rxq_stats(fbd, fbd->hw_stats.hw_q);
fbnic_reset_pcie_stats_asic(fbd, &fbd->hw_stats.pcie);
+ spin_unlock(&fbd->hw_stats_lock);
}
-void fbnic_get_hw_stats32(struct fbnic_dev *fbd)
+static void __fbnic_get_hw_stats32(struct fbnic_dev *fbd)
{
+ fbnic_get_tmi_stats32(fbd, &fbd->hw_stats.tmi);
+ fbnic_get_tti_stats32(fbd, &fbd->hw_stats.tti);
fbnic_get_rpc_stats32(fbd, &fbd->hw_stats.rpc);
+ fbnic_get_rxb_stats32(fbd, &fbd->hw_stats.rxb);
+ fbnic_get_hw_rxq_stats32(fbd, fbd->hw_stats.hw_q);
+}
+
+void fbnic_get_hw_stats32(struct fbnic_dev *fbd)
+{
+ spin_lock(&fbd->hw_stats_lock);
+ __fbnic_get_hw_stats32(fbd);
+ spin_unlock(&fbd->hw_stats_lock);
}
void fbnic_get_hw_stats(struct fbnic_dev *fbd)
{
- fbnic_get_hw_stats32(fbd);
+ spin_lock(&fbd->hw_stats_lock);
+ __fbnic_get_hw_stats32(fbd);
+ fbnic_get_tmi_stats(fbd, &fbd->hw_stats.tmi);
+ fbnic_get_tti_stats(fbd, &fbd->hw_stats.tti);
+ fbnic_get_rxb_stats(fbd, &fbd->hw_stats.rxb);
fbnic_get_pcie_stats_asic64(fbd, &fbd->hw_stats.pcie);
+ spin_unlock(&fbd->hw_stats_lock);
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
index 78df56b87745..07e54bb75bf3 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
@@ -17,6 +17,11 @@ struct fbnic_stat_counter {
bool reported;
};
+struct fbnic_hw_stat {
+ struct fbnic_stat_counter frames;
+ struct fbnic_stat_counter bytes;
+};
+
struct fbnic_eth_mac_stats {
struct fbnic_stat_counter FramesTransmittedOK;
struct fbnic_stat_counter FramesReceivedOK;
@@ -37,12 +42,49 @@ struct fbnic_mac_stats {
struct fbnic_eth_mac_stats eth_mac;
};
+struct fbnic_tmi_stats {
+ struct fbnic_hw_stat drop;
+ struct fbnic_stat_counter ptp_illegal_req, ptp_good_ts, ptp_bad_ts;
+};
+
+struct fbnic_tti_stats {
+ struct fbnic_hw_stat cm_drop, frame_drop, tbi_drop;
+};
+
struct fbnic_rpc_stats {
struct fbnic_stat_counter unkn_etype, unkn_ext_hdr;
struct fbnic_stat_counter ipv4_frag, ipv6_frag, ipv4_esp, ipv6_esp;
struct fbnic_stat_counter tcp_opt_err, out_of_hdr_err, ovr_size_err;
};
+struct fbnic_rxb_enqueue_stats {
+ struct fbnic_hw_stat drbo;
+ struct fbnic_stat_counter integrity_err, mac_err;
+ struct fbnic_stat_counter parser_err, frm_err;
+};
+
+struct fbnic_rxb_fifo_stats {
+ struct fbnic_hw_stat drop, trunc;
+ struct fbnic_stat_counter trans_drop, trans_ecn;
+ struct fbnic_stat_counter level;
+};
+
+struct fbnic_rxb_dequeue_stats {
+ struct fbnic_hw_stat intf, pbuf;
+};
+
+struct fbnic_rxb_stats {
+ struct fbnic_rxb_enqueue_stats enq[FBNIC_RXB_ENQUEUE_INDICES];
+ struct fbnic_rxb_fifo_stats fifo[FBNIC_RXB_FIFO_INDICES];
+ struct fbnic_rxb_dequeue_stats deq[FBNIC_RXB_DEQUEUE_INDICES];
+};
+
+struct fbnic_hw_q_stats {
+ struct fbnic_stat_counter rde_pkt_err;
+ struct fbnic_stat_counter rde_pkt_cq_drop;
+ struct fbnic_stat_counter rde_pkt_bdq_drop;
+};
+
struct fbnic_pcie_stats {
struct fbnic_stat_counter ob_rd_tlp, ob_rd_dword;
struct fbnic_stat_counter ob_wr_tlp, ob_wr_dword;
@@ -55,13 +97,19 @@ struct fbnic_pcie_stats {
struct fbnic_hw_stats {
struct fbnic_mac_stats mac;
+ struct fbnic_tmi_stats tmi;
+ struct fbnic_tti_stats tti;
struct fbnic_rpc_stats rpc;
+ struct fbnic_rxb_stats rxb;
+ struct fbnic_hw_q_stats hw_q[FBNIC_MAX_QUEUES];
struct fbnic_pcie_stats pcie;
};
u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset);
void fbnic_reset_hw_stats(struct fbnic_dev *fbd);
+void fbnic_get_hw_q_stats(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q);
void fbnic_get_hw_stats32(struct fbnic_dev *fbd);
void fbnic_get_hw_stats(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index dde4a37116e2..10e108c1fcd0 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -687,13 +687,10 @@ static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
int err = 0, retries = 5;
s32 *sensor;
- fw_cmpl = kzalloc(sizeof(*fw_cmpl), GFP_KERNEL);
+ fw_cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
if (!fw_cmpl)
return -ENOMEM;
- /* Initialize completion and queue it for FW to process */
- fbnic_fw_init_cmpl(fw_cmpl, FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
-
switch (id) {
case FBNIC_SENSOR_TEMP:
sensor = &fw_cmpl->u.tsene.millidegrees;
@@ -744,7 +741,7 @@ static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
*val = *sensor;
exit_cleanup:
- fbnic_fw_clear_compl(fbd);
+ fbnic_fw_clear_cmpl(fbd, fw_cmpl);
exit_free:
fbnic_fw_put_cmpl(fw_cmpl);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index 2524d9b88d59..aa812c63d5af 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -404,12 +404,16 @@ static int fbnic_hwtstamp_set(struct net_device *netdev,
static void fbnic_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats64)
{
+ u64 rx_bytes, rx_packets, rx_dropped = 0, rx_errors = 0;
u64 tx_bytes, tx_packets, tx_dropped = 0;
- u64 rx_bytes, rx_packets, rx_dropped = 0;
struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_dev *fbd = fbn->fbd;
struct fbnic_queue_stats *stats;
+ u64 rx_over = 0, rx_missed = 0;
unsigned int start, i;
+ fbnic_get_hw_stats(fbd);
+
stats = &fbn->tx_stats;
tx_bytes = stats->bytes;
@@ -420,6 +424,12 @@ static void fbnic_get_stats64(struct net_device *dev,
stats64->tx_packets = tx_packets;
stats64->tx_dropped = tx_dropped;
+ /* Record drops from Tx HW Datapath */
+ tx_dropped += fbd->hw_stats.tmi.drop.frames.value +
+ fbd->hw_stats.tti.cm_drop.frames.value +
+ fbd->hw_stats.tti.frame_drop.frames.value +
+ fbd->hw_stats.tti.tbi_drop.frames.value;
+
for (i = 0; i < fbn->num_tx_queues; i++) {
struct fbnic_ring *txr = fbn->tx[i];
@@ -445,9 +455,34 @@ static void fbnic_get_stats64(struct net_device *dev,
rx_packets = stats->packets;
rx_dropped = stats->dropped;
+ spin_lock(&fbd->hw_stats_lock);
+ /* Record drops for the host FIFOs.
+ * 4: network to Host, 6: BMC to Host
+ * Exclude the BMC and MC FIFOs as those stats may contain drops
+ * due to unrelated items such as TCAM misses. They are still
+ * accessible through the ethtool stats.
+ */
+ i = FBNIC_RXB_FIFO_HOST;
+ rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
+ i = FBNIC_RXB_FIFO_BMC_TO_HOST;
+ rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
+
+ for (i = 0; i < fbd->max_num_queues; i++) {
+ /* Report packets dropped due to CQ/BDQ being full/empty */
+ rx_over += fbd->hw_stats.hw_q[i].rde_pkt_cq_drop.value;
+ rx_over += fbd->hw_stats.hw_q[i].rde_pkt_bdq_drop.value;
+
+ /* Report packets with errors */
+ rx_errors += fbd->hw_stats.hw_q[i].rde_pkt_err.value;
+ }
+ spin_unlock(&fbd->hw_stats_lock);
+
stats64->rx_bytes = rx_bytes;
stats64->rx_packets = rx_packets;
stats64->rx_dropped = rx_dropped;
+ stats64->rx_over_errors = rx_over;
+ stats64->rx_errors = rx_errors;
+ stats64->rx_missed_errors = rx_missed;
for (i = 0; i < fbn->num_rx_queues; i++) {
struct fbnic_ring *rxr = fbn->rx[i];
@@ -487,6 +522,7 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
{
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_ring *rxr = fbn->rx[idx];
+ struct fbnic_dev *fbd = fbn->fbd;
struct fbnic_queue_stats *stats;
u64 bytes, packets, alloc_fail;
u64 csum_complete, csum_none;
@@ -510,6 +546,15 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
rx->alloc_fail = alloc_fail;
rx->csum_complete = csum_complete;
rx->csum_none = csum_none;
+
+ fbnic_get_hw_q_stats(fbd, fbd->hw_stats.hw_q);
+
+ spin_lock(&fbd->hw_stats_lock);
+ rx->hw_drop_overruns = fbd->hw_stats.hw_q[idx].rde_pkt_cq_drop.value +
+ fbd->hw_stats.hw_q[idx].rde_pkt_bdq_drop.value;
+ rx->hw_drops = fbd->hw_stats.hw_q[idx].rde_pkt_err.value +
+ rx->hw_drop_overruns;
+ spin_unlock(&fbd->hw_stats_lock);
}
static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 4e8595239c0f..249d3ef862d5 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <linux/rtnetlink.h>
#include <linux/types.h>
+#include <net/devlink.h>
#include "fbnic.h"
#include "fbnic_drvinfo.h"
@@ -292,6 +293,7 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
fbnic_devlink_register(fbd);
fbnic_dbg_fbd_init(fbd);
+ spin_lock_init(&fbd->hw_stats_lock);
/* Capture snapshot of hardware stats so netdev can calculate delta */
fbnic_reset_hw_stats(fbd);
@@ -387,8 +389,12 @@ static int fbnic_pm_suspend(struct device *dev)
rtnl_unlock();
null_uc_addr:
+ devl_lock(priv_to_devlink(fbd));
+
fbnic_fw_free_mbx(fbd);
+ devl_unlock(priv_to_devlink(fbd));
+
/* Free the IRQs so they aren't trying to occupy sleeping CPUs */
fbnic_free_irqs(fbd);
@@ -419,11 +425,15 @@ static int __fbnic_pm_resume(struct device *dev)
fbd->mac->init_regs(fbd);
+ devl_lock(priv_to_devlink(fbd));
+
/* Re-enable mailbox */
err = fbnic_fw_request_mbx(fbd);
if (err)
goto err_free_irqs;
+ devl_unlock(priv_to_devlink(fbd));
+
/* No netdev means there isn't a network interface to bring up */
if (fbnic_init_failure(fbd))
return 0;
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 1302aa8e0853..cdde19b8edc4 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -6304,7 +6304,8 @@ static void mib_read_work(struct work_struct *work)
static void mib_monitor(struct timer_list *t)
{
- struct dev_info *hw_priv = from_timer(hw_priv, t, mib_timer_info.timer);
+ struct dev_info *hw_priv = timer_container_of(hw_priv, t,
+ mib_timer_info.timer);
mib_read_work(&hw_priv->mib_read);
@@ -6331,7 +6332,8 @@ static void mib_monitor(struct timer_list *t)
*/
static void dev_monitor(struct timer_list *t)
{
- struct dev_priv *priv = from_timer(priv, t, monitor_timer_info.timer);
+ struct dev_priv *priv = timer_container_of(priv, t,
+ monitor_timer_info.timer);
struct net_device *dev = priv->mii_if.dev;
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 1459acfb1e61..64a3b953cc17 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -18,6 +18,8 @@
#define EEPROM_MAC_OFFSET (0x01)
#define MAX_EEPROM_SIZE (512)
#define MAX_OTP_SIZE (1024)
+#define MAX_HS_OTP_SIZE (8 * 1024)
+#define MAX_HS_EEPROM_SIZE (64 * 1024)
#define OTP_INDICATOR_1 (0xF3)
#define OTP_INDICATOR_2 (0xF7)
@@ -272,6 +274,9 @@ static int lan743x_hs_otp_read(struct lan743x_adapter *adapter, u32 offset,
int ret;
int i;
+ if (offset + length > MAX_HS_OTP_SIZE)
+ return -EINVAL;
+
ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (ret < 0)
return ret;
@@ -320,6 +325,9 @@ static int lan743x_hs_otp_write(struct lan743x_adapter *adapter, u32 offset,
int ret;
int i;
+ if (offset + length > MAX_HS_OTP_SIZE)
+ return -EINVAL;
+
ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (ret < 0)
return ret;
@@ -497,6 +505,9 @@ static int lan743x_hs_eeprom_read(struct lan743x_adapter *adapter,
u32 val;
int i;
+ if (offset + length > MAX_HS_EEPROM_SIZE)
+ return -EINVAL;
+
retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (retval < 0)
return retval;
@@ -539,6 +550,9 @@ static int lan743x_hs_eeprom_write(struct lan743x_adapter *adapter,
u32 val;
int i;
+ if (offset + length > MAX_HS_EEPROM_SIZE)
+ return -EINVAL;
+
retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (retval < 0)
return retval;
@@ -604,9 +618,9 @@ static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev)
struct lan743x_adapter *adapter = netdev_priv(netdev);
if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP)
- return MAX_OTP_SIZE;
+ return adapter->is_pci11x1x ? MAX_HS_OTP_SIZE : MAX_OTP_SIZE;
- return MAX_EEPROM_SIZE;
+ return adapter->is_pci11x1x ? MAX_HS_EEPROM_SIZE : MAX_EEPROM_SIZE;
}
static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index e2d6bfb5d693..9d70b51ca91d 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1330,7 +1330,7 @@ static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
}
/* PHY */
-static int lan743x_phy_reset(struct lan743x_adapter *adapter)
+static int lan743x_hw_reset_phy(struct lan743x_adapter *adapter)
{
u32 data;
@@ -1346,11 +1346,6 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter)
50000, 1000000);
}
-static int lan743x_phy_init(struct lan743x_adapter *adapter)
-{
- return lan743x_phy_reset(adapter);
-}
-
static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
{
u32 id_rev;
@@ -1729,6 +1724,7 @@ int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter,
default:
return -ERANGE;
}
+ adapter->rx_tstamp_filter = rx_filter;
return 0;
}
@@ -2499,8 +2495,7 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
/* save existing skb, allocate new skb and map to dma */
skb = buffer_info->skb;
- if (lan743x_rx_init_ring_element(rx, rx->last_head,
- GFP_ATOMIC | GFP_DMA)) {
+ if (lan743x_rx_init_ring_element(rx, rx->last_head, GFP_ATOMIC)) {
/* failed to allocate next skb.
* Memory is very low.
* Drop this packet and reuse buffer.
@@ -3352,8 +3347,6 @@ static int lan743x_netdev_ioctl(struct net_device *netdev,
if (!netif_running(netdev))
return -EINVAL;
- if (cmd == SIOCSHWTSTAMP)
- return lan743x_ptp_ioctl(netdev, ifr, cmd);
return phylink_mii_ioctl(adapter->phylink, ifr, cmd);
}
@@ -3448,6 +3441,8 @@ static const struct net_device_ops lan743x_netdev_ops = {
.ndo_change_mtu = lan743x_netdev_change_mtu,
.ndo_get_stats64 = lan743x_netdev_get_stats64,
.ndo_set_mac_address = lan743x_netdev_set_mac_address,
+ .ndo_hwtstamp_get = lan743x_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = lan743x_ptp_hwtstamp_set,
};
static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
@@ -3495,6 +3490,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
struct pci_dev *pdev)
{
struct lan743x_tx *tx;
+ u32 sgmii_ctl;
int index;
int ret;
@@ -3507,6 +3503,15 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
spin_lock_init(&adapter->eth_syslock_spinlock);
mutex_init(&adapter->sgmii_rw_lock);
pci11x1x_set_rfe_rd_fifo_threshold(adapter);
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ if (adapter->is_sgmii_en) {
+ sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
+ } else {
+ sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
+ }
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
} else {
adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
@@ -3524,10 +3529,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
if (ret)
return ret;
- ret = lan743x_phy_init(adapter);
- if (ret)
- return ret;
-
ret = lan743x_ptp_init(adapter);
if (ret)
return ret;
@@ -3558,7 +3559,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
{
- u32 sgmii_ctl;
int ret;
adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
@@ -3570,10 +3570,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
adapter->mdiobus->priv = (void *)adapter;
if (adapter->is_pci11x1x) {
if (adapter->is_sgmii_en) {
- sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
- sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
- sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
- lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
netif_dbg(adapter, drv, adapter->netdev,
"SGMII operation\n");
adapter->mdiobus->read = lan743x_mdiobus_read_c22;
@@ -3584,10 +3580,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
netif_dbg(adapter, drv, adapter->netdev,
"lan743x-mdiobus-c45\n");
} else {
- sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
- sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
- sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
- lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
netif_dbg(adapter, drv, adapter->netdev,
"RGMII operation\n");
// Only C22 support when RGMII I/F
@@ -3673,6 +3665,10 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
if (ret)
goto cleanup_pci;
+ ret = lan743x_hw_reset_phy(adapter);
+ if (ret)
+ goto cleanup_pci;
+
ret = lan743x_hardware_init(adapter, pdev);
if (ret)
goto cleanup_pci;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index db5fc73e41cc..02a28b709163 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -1087,6 +1087,7 @@ struct lan743x_adapter {
phy_interface_t phy_interface;
struct phylink *phylink;
struct phylink_config phylink_config;
+ int rx_tstamp_filter;
};
#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 0be44dcb3393..a3b48388b3fd 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -463,10 +463,6 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
struct lan743x_ptp_perout *perout = &ptp->perout[index];
int ret = 0;
- /* Reject requests with unsupported flags */
- if (perout_request->flags & ~PTP_PEROUT_DUTY_CYCLE)
- return -EOPNOTSUPP;
-
if (on) {
perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
perout_request->index);
@@ -942,12 +938,6 @@ static int lan743x_ptp_io_extts(struct lan743x_adapter *adapter, int on,
extts = &ptp->extts[index];
- if (extts_request->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
if (on) {
extts_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, index);
if (extts_pin < 0)
@@ -1543,6 +1533,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
ptp->ptp_clock_info.n_per_out = LAN743X_PTP_N_EVENT_CHAN;
ptp->ptp_clock_info.n_pins = n_pins;
ptp->ptp_clock_info.pps = LAN743X_PTP_N_PPS;
+ ptp->ptp_clock_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+ ptp->ptp_clock_info.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
ptp->ptp_clock_info.pin_config = ptp->pin_config;
ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime;
@@ -1742,23 +1736,32 @@ void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter,
lan743x_ptp_tx_ts_complete(adapter);
}
-int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+int lan743x_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
- int ret = 0;
- int index;
+ struct lan743x_tx *tx = &adapter->tx[0];
- if (!ifr) {
- netif_err(adapter, drv, adapter->netdev,
- "SIOCSHWTSTAMP, ifr == NULL\n");
- return -EINVAL;
- }
+ if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
+ config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ else if (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED)
+ config->tx_type = HWTSTAMP_TX_ON;
+ else
+ config->tx_type = HWTSTAMP_TX_OFF;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ config->rx_filter = adapter->rx_tstamp_filter;
- switch (config.tx_type) {
+ return 0;
+}
+
+int lan743x_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int index;
+
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
for (index = 0; index < adapter->used_tx_channels;
index++)
@@ -1782,19 +1785,12 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
lan743x_ptp_set_sync_ts_insert(adapter, true);
break;
case HWTSTAMP_TX_ONESTEP_P2P:
- ret = -ERANGE;
- break;
+ return -ERANGE;
default:
netif_warn(adapter, drv, adapter->netdev,
- " tx_type = %d, UNKNOWN\n", config.tx_type);
- ret = -EINVAL;
- break;
+ " tx_type = %d, UNKNOWN\n", config->tx_type);
+ return -EINVAL;
}
- ret = lan743x_rx_set_tstamp_mode(adapter, config.rx_filter);
-
- if (!ret)
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
- return ret;
+ return lan743x_rx_set_tstamp_mode(adapter, config->rx_filter);
}
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
index 0d29914cd460..f33dc83c5700 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.h
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -18,9 +18,9 @@
*/
#define LAN743X_PTP_N_EVENT_CHAN 2
#define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN
-#define LAN743X_PTP_N_EXTTS 4
-#define LAN743X_PTP_N_PPS 0
#define PCI11X1X_PTP_IO_MAX_CHANNELS 8
+#define LAN743X_PTP_N_EXTTS PCI11X1X_PTP_IO_MAX_CHANNELS
+#define LAN743X_PTP_N_PPS 0
#define PTP_CMD_CTL_TIMEOUT_CNT 50
struct lan743x_adapter;
@@ -51,8 +51,11 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter);
void lan743x_ptp_close(struct lan743x_adapter *adapter);
void lan743x_ptp_update_latency(struct lan743x_adapter *adapter,
u32 link_speed);
-
-int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int lan743x_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int lan743x_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
#define LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS (4)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 0af143ec0f86..7001584f1b7a 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -353,6 +353,11 @@ static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op)
lan966x_ifh_set(ifh, rew_op, IFH_POS_REW_CMD, IFH_WID_REW_CMD);
}
+static void lan966x_ifh_set_oam_type(void *ifh, u64 oam_type)
+{
+ lan966x_ifh_set(ifh, oam_type, IFH_POS_PDU_TYPE, IFH_WID_PDU_TYPE);
+}
+
static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
{
lan966x_ifh_set(ifh, timestamp, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP);
@@ -380,6 +385,7 @@ static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb,
return err;
lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op);
+ lan966x_ifh_set_oam_type(ifh, LAN966X_SKB_CB(skb)->pdu_type);
lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id);
}
@@ -873,6 +879,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
lan966x_vlan_port_set_vlan_aware(port, 0);
lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
lan966x_vlan_port_apply(port);
+ lan966x_vlan_port_rew_host(port);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 1efa584e7107..4f75f0688369 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -75,6 +75,10 @@
#define IFH_REW_OP_ONE_STEP_PTP 0x3
#define IFH_REW_OP_TWO_STEP_PTP 0x4
+#define IFH_PDU_TYPE_NONE 0
+#define IFH_PDU_TYPE_IPV4 7
+#define IFH_PDU_TYPE_IPV6 8
+
#define FDMA_RX_DCB_MAX_DBS 1
#define FDMA_TX_DCB_MAX_DBS 1
@@ -254,6 +258,7 @@ struct lan966x_phc {
struct lan966x_skb_cb {
u8 rew_op;
+ u8 pdu_type;
u16 ts_id;
unsigned long jiffies;
};
@@ -492,6 +497,7 @@ void lan966x_vlan_port_apply(struct lan966x_port *port);
bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid);
void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
bool vlan_aware);
+void lan966x_vlan_port_rew_host(struct lan966x_port *port);
int lan966x_vlan_port_set_vid(struct lan966x_port *port,
u16 vid,
bool pvid,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index 63905bb5a63a..b4377b8613c3 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -322,34 +322,55 @@ void lan966x_ptp_hwtstamp_get(struct lan966x_port *port,
*cfg = phc->hwtstamp_config;
}
-static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb)
+static void lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb,
+ u8 *rew_op, u8 *pdu_type)
{
struct ptp_header *header;
u8 msgtype;
int type;
- if (port->ptp_tx_cmd == IFH_REW_OP_NOOP)
- return IFH_REW_OP_NOOP;
+ if (port->ptp_tx_cmd == IFH_REW_OP_NOOP) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ return;
+ }
type = ptp_classify_raw(skb);
- if (type == PTP_CLASS_NONE)
- return IFH_REW_OP_NOOP;
+ if (type == PTP_CLASS_NONE) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ return;
+ }
header = ptp_parse_header(skb, type);
- if (!header)
- return IFH_REW_OP_NOOP;
+ if (!header) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ return;
+ }
- if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP)
- return IFH_REW_OP_TWO_STEP_PTP;
+ if (type & PTP_CLASS_L2)
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ if (type & PTP_CLASS_IPV4)
+ *pdu_type = IFH_PDU_TYPE_IPV4;
+ if (type & PTP_CLASS_IPV6)
+ *pdu_type = IFH_PDU_TYPE_IPV6;
+
+ if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
+ return;
+ }
/* If it is sync and run 1 step then set the correct operation,
* otherwise run as 2 step
*/
msgtype = ptp_get_msgtype(header, type);
- if ((msgtype & 0xf) == 0)
- return IFH_REW_OP_ONE_STEP_PTP;
+ if ((msgtype & 0xf) == 0) {
+ *rew_op = IFH_REW_OP_ONE_STEP_PTP;
+ return;
+ }
- return IFH_REW_OP_TWO_STEP_PTP;
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
}
static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port)
@@ -374,10 +395,12 @@ int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
{
struct lan966x *lan966x = port->lan966x;
unsigned long flags;
+ u8 pdu_type;
u8 rew_op;
- rew_op = lan966x_ptp_classify(port, skb);
+ lan966x_ptp_classify(port, skb, &rew_op, &pdu_type);
LAN966X_SKB_CB(skb)->rew_op = rew_op;
+ LAN966X_SKB_CB(skb)->pdu_type = pdu_type;
if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
return 0;
@@ -815,10 +838,6 @@ static int lan966x_ptp_perout(struct ptp_clock_info *ptp,
bool pps = false;
int pin;
- if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
- PTP_PEROUT_PHASE))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index);
if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
return -EINVAL;
@@ -917,12 +936,6 @@ static int lan966x_ptp_extts(struct ptp_clock_info *ptp,
if (lan966x->ptp_ext_irq <= 0)
return -EOPNOTSUPP;
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index);
if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
return -EINVAL;
@@ -978,6 +991,10 @@ static struct ptp_clock_info lan966x_ptp_clock_info = {
.n_per_out = LAN966X_PHC_PINS_NUM,
.n_ext_ts = LAN966X_PHC_PINS_NUM,
.n_pins = LAN966X_PHC_PINS_NUM,
+ .supported_extts_flags = PTP_RISING_EDGE |
+ PTP_STRICT_FLAGS,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE,
};
static int lan966x_ptp_phc_init(struct lan966x *lan966x,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
index 1c88120eb291..bcb4db76b75c 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -297,6 +297,7 @@ static void lan966x_port_bridge_leave(struct lan966x_port *port,
lan966x_vlan_port_set_vlan_aware(port, false);
lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
lan966x_vlan_port_apply(port);
+ lan966x_vlan_port_rew_host(port);
}
int lan966x_port_changeupper(struct net_device *dev,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
index fa34a739c748..7da22520724c 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
@@ -149,6 +149,27 @@ void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
port->vlan_aware = vlan_aware;
}
+/* When the interface is in host mode, the interface should not be vlan aware
+ * but it should insert all the tags that it gets from the network stack.
+ * The tags are not in the data of the frame but actually in the skb and the ifh
+ * is configured already to get this tag. So what we need to do is to update the
+ * rewriter to insert the vlan tag for all frames which have a vlan tag
+ * different than 0.
+ */
+void lan966x_vlan_port_rew_host(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ /* Tag all frames except when VID=0*/
+ val = REW_TAG_CFG_TAG_CFG_SET(2);
+
+ /* Update only some bits in the register */
+ lan_rmw(val,
+ REW_TAG_CFG_TAG_CFG,
+ lan966x, REW_TAG_CFG(port->chip_port));
+}
+
void lan966x_vlan_port_apply(struct lan966x_port *port)
{
struct lan966x *lan966x = port->lan966x;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 4ffaf7588885..3504507477c6 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -391,6 +391,7 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
case GDMA_EQE_HWC_INIT_EQ_ID_DB:
case GDMA_EQE_HWC_INIT_DATA:
case GDMA_EQE_HWC_INIT_DONE:
+ case GDMA_EQE_HWC_SOC_SERVICE:
case GDMA_EQE_RNIC_QP_FATAL:
if (!eq->eq.callback)
break;
@@ -964,6 +965,7 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
err, resp.hdr.status);
return err ? err : -EPROTO;
}
+ gc->pf_cap_flags1 = resp.pf_cap_flags1;
if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) {
err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout);
if (err) {
@@ -1004,7 +1006,6 @@ int mana_gd_register_device(struct gdma_dev *gd)
return 0;
}
-EXPORT_SYMBOL_NS(mana_gd_register_device, "NET_MANA");
int mana_gd_deregister_device(struct gdma_dev *gd)
{
@@ -1035,7 +1036,6 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
return err;
}
-EXPORT_SYMBOL_NS(mana_gd_deregister_device, "NET_MANA");
u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
{
@@ -1469,10 +1469,14 @@ static int mana_gd_setup(struct pci_dev *pdev)
mana_gd_init_registers(pdev);
mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
+ gc->service_wq = alloc_ordered_workqueue("gdma_service_wq", 0);
+ if (!gc->service_wq)
+ return -ENOMEM;
+
err = mana_gd_setup_irqs(pdev);
if (err) {
dev_err(gc->dev, "Failed to setup IRQs: %d\n", err);
- return err;
+ goto free_workqueue;
}
err = mana_hwc_create_channel(gc);
@@ -1498,6 +1502,8 @@ destroy_hwc:
mana_hwc_destroy_channel(gc);
remove_irq:
mana_gd_remove_irqs(pdev);
+free_workqueue:
+ destroy_workqueue(gc->service_wq);
dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err);
return err;
}
@@ -1509,6 +1515,8 @@ static void mana_gd_cleanup(struct pci_dev *pdev)
mana_hwc_destroy_channel(gc);
mana_gd_remove_irqs(pdev);
+
+ destroy_workqueue(gc->service_wq);
dev_dbg(&pdev->dev, "mana gdma cleanup successful\n");
}
@@ -1578,8 +1586,14 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto cleanup_gd;
+ err = mana_rdma_probe(&gc->mana_ib);
+ if (err)
+ goto cleanup_mana;
+
return 0;
+cleanup_mana:
+ mana_remove(&gc->mana, false);
cleanup_gd:
mana_gd_cleanup(pdev);
unmap_bar:
@@ -1607,6 +1621,7 @@ static void mana_gd_remove(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, false);
mana_gd_cleanup(pdev);
@@ -1630,6 +1645,7 @@ static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, true);
mana_gd_cleanup(pdev);
@@ -1654,6 +1670,10 @@ static int mana_gd_resume(struct pci_dev *pdev)
if (err)
return err;
+ err = mana_rdma_probe(&gc->mana_ib);
+ if (err)
+ return err;
+
return 0;
}
@@ -1664,6 +1684,7 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
dev_info(&pdev->dev, "Shutdown was called\n");
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, true);
mana_gd_cleanup(pdev);
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 1ba49602089b..a8c4d8db75a5 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -112,11 +112,13 @@ out:
static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
struct gdma_event *event)
{
+ union hwc_init_soc_service_type service_data;
struct hw_channel_context *hwc = ctx;
struct gdma_dev *gd = hwc->gdma_dev;
union hwc_init_type_data type_data;
union hwc_init_eq_id_db eq_db;
u32 type, val;
+ int ret;
switch (event->type) {
case GDMA_EQE_HWC_INIT_EQ_ID_DB:
@@ -199,7 +201,24 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
}
break;
+ case GDMA_EQE_HWC_SOC_SERVICE:
+ service_data.as_uint32 = event->details[0];
+ type = service_data.type;
+ switch (type) {
+ case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
+ case GDMA_SERVICE_TYPE_RDMA_RESUME:
+ ret = mana_rdma_service_event(gd->gdma_context, type);
+ if (ret)
+ dev_err(hwc->dev, "Failed to schedule adev service event: %d\n",
+ ret);
+ break;
+ default:
+ dev_warn(hwc->dev, "Received unknown SOC service type %u\n", type);
+ break;
+ }
+
+ break;
default:
dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
/* Ignore unknown events, which should never happen. */
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 2bac6be8f6a0..ccd2885c939e 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -921,7 +921,7 @@ static void mana_pf_deregister_filter(struct mana_port_context *apc)
static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
u32 proto_minor_ver, u32 proto_micro_ver,
- u16 *max_num_vports)
+ u16 *max_num_vports, u8 *bm_hostmode)
{
struct gdma_context *gc = ac->gdma_dev->gdma_context;
struct mana_query_device_cfg_resp resp = {};
@@ -932,7 +932,7 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
sizeof(req), sizeof(resp));
- req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
req.proto_major_ver = proto_major_ver;
req.proto_minor_ver = proto_minor_ver;
@@ -956,11 +956,16 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
*max_num_vports = resp.max_num_vports;
- if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
+ if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V2)
gc->adapter_mtu = resp.adapter_mtu;
else
gc->adapter_mtu = ETH_FRAME_LEN;
+ if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V3)
+ *bm_hostmode = resp.bm_hostmode;
+ else
+ *bm_hostmode = 0;
+
debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu);
return 0;
@@ -2441,7 +2446,7 @@ static void mana_destroy_vport(struct mana_port_context *apc)
mana_destroy_txq(apc);
mana_uncfg_vport(apc);
- if (gd->gdma_context->is_pf)
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
mana_pf_deregister_hw_vport(apc);
}
@@ -2453,7 +2458,7 @@ static int mana_create_vport(struct mana_port_context *apc,
apc->default_rxobj = INVALID_MANA_HANDLE;
- if (gd->gdma_context->is_pf) {
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
err = mana_pf_register_hw_vport(apc);
if (err)
return err;
@@ -2689,7 +2694,7 @@ int mana_alloc_queues(struct net_device *ndev)
goto destroy_vport;
}
- if (gd->gdma_context->is_pf) {
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
err = mana_pf_register_filter(apc);
if (err)
goto destroy_vport;
@@ -2751,7 +2756,7 @@ static int mana_dealloc_queues(struct net_device *ndev)
mana_chn_setxdp(apc, NULL);
- if (gd->gdma_context->is_pf)
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
mana_pf_deregister_filter(apc);
/* No packet can be transmitted now since apc->port_is_up is false.
@@ -2945,7 +2950,7 @@ static void remove_adev(struct gdma_dev *gd)
gd->adev = NULL;
}
-static int add_adev(struct gdma_dev *gd)
+static int add_adev(struct gdma_dev *gd, const char *name)
{
struct auxiliary_device *adev;
struct mana_adev *madev;
@@ -2961,7 +2966,7 @@ static int add_adev(struct gdma_dev *gd)
goto idx_fail;
adev->id = ret;
- adev->name = "rdma";
+ adev->name = name;
adev->dev.parent = gd->gdma_context->dev;
adev->dev.release = adev_release;
madev->mdev = gd;
@@ -2993,11 +2998,76 @@ idx_fail:
return ret;
}
+static void mana_rdma_service_handle(struct work_struct *work)
+{
+ struct mana_service_work *serv_work =
+ container_of(work, struct mana_service_work, work);
+ struct gdma_dev *gd = serv_work->gdma_dev;
+ struct device *dev = gd->gdma_context->dev;
+ int ret;
+
+ if (READ_ONCE(gd->rdma_teardown))
+ goto out;
+
+ switch (serv_work->event) {
+ case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
+ if (!gd->adev || gd->is_suspended)
+ break;
+
+ remove_adev(gd);
+ gd->is_suspended = true;
+ break;
+
+ case GDMA_SERVICE_TYPE_RDMA_RESUME:
+ if (!gd->is_suspended)
+ break;
+
+ ret = add_adev(gd, "rdma");
+ if (ret)
+ dev_err(dev, "Failed to add adev on resume: %d\n", ret);
+ else
+ gd->is_suspended = false;
+ break;
+
+ default:
+ dev_warn(dev, "unknown adev service event %u\n",
+ serv_work->event);
+ break;
+ }
+
+out:
+ kfree(serv_work);
+}
+
+int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event)
+{
+ struct gdma_dev *gd = &gc->mana_ib;
+ struct mana_service_work *serv_work;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return 0;
+ }
+
+ serv_work = kzalloc(sizeof(*serv_work), GFP_ATOMIC);
+ if (!serv_work)
+ return -ENOMEM;
+
+ serv_work->event = event;
+ serv_work->gdma_dev = gd;
+
+ INIT_WORK(&serv_work->work, mana_rdma_service_handle);
+ queue_work(gc->service_wq, &serv_work->work);
+
+ return 0;
+}
+
int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
struct device *dev = gc->dev;
+ u8 bm_hostmode = 0;
u16 num_ports = 0;
int err;
int i;
@@ -3026,10 +3096,12 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
- MANA_MICRO_VERSION, &num_ports);
+ MANA_MICRO_VERSION, &num_ports, &bm_hostmode);
if (err)
goto out;
+ ac->bm_hostmode = bm_hostmode;
+
if (!resuming) {
ac->num_ports = num_ports;
} else {
@@ -3077,7 +3149,7 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
}
- err = add_adev(gd);
+ err = add_adev(gd, "eth");
out:
if (err) {
mana_remove(gd, false);
@@ -3151,6 +3223,44 @@ out:
dev_dbg(dev, "%s succeeded\n", __func__);
}
+int mana_rdma_probe(struct gdma_dev *gd)
+{
+ int err = 0;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return err;
+ }
+
+ err = mana_gd_register_device(gd);
+ if (err)
+ return err;
+
+ err = add_adev(gd, "rdma");
+ if (err)
+ mana_gd_deregister_device(gd);
+
+ return err;
+}
+
+void mana_rdma_remove(struct gdma_dev *gd)
+{
+ struct gdma_context *gc = gd->gdma_context;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return;
+ }
+
+ WRITE_ONCE(gd->rdma_teardown, true);
+ flush_workqueue(gc->service_wq);
+
+ if (gd->adev)
+ remove_adev(gd);
+
+ mana_gd_deregister_device(gd);
+}
+
struct net_device *mana_get_primary_netdev(struct mana_context *ac,
u32 port_index,
netdevice_tracker *tracker)
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 7663d196eaf8..469784d3a1a6 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -870,23 +870,30 @@ static int ocelot_set_features(struct net_device *dev,
static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+
+static int ocelot_port_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
- /* If the attached PHY device isn't capable of timestamping operations,
- * use our own (when possible).
- */
- if (!phy_has_hwtstamp(dev->phydev) && ocelot->ptp) {
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return ocelot_hwstamp_set(ocelot, port, ifr);
- case SIOCGHWTSTAMP:
- return ocelot_hwstamp_get(ocelot, port, ifr);
- }
- }
+ ocelot_hwstamp_get(ocelot, port, cfg);
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
+ return 0;
+}
+
+static int ocelot_port_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->port.index;
+
+ return ocelot_hwstamp_set(ocelot, port, cfg, extack);
}
static int ocelot_change_mtu(struct net_device *dev, int new_mtu)
@@ -917,6 +924,8 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_set_features = ocelot_set_features,
.ndo_setup_tc = ocelot_setup_tc,
.ndo_eth_ioctl = ocelot_ioctl,
+ .ndo_hwtstamp_get = ocelot_port_hwtstamp_get,
+ .ndo_hwtstamp_set = ocelot_port_hwtstamp_set,
};
struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port)
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index cc1088988da0..88b5422cc2a0 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -211,11 +211,6 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
- /* Reject requests with unsupported flags */
- if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
- PTP_PEROUT_PHASE))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(ocelot->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
if (pin == 0)
@@ -519,47 +514,42 @@ static int ocelot_ptp_tx_type_to_cmd(int tx_type, int *ptp_cmd)
return 0;
}
-int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
+void ocelot_hwstamp_get(struct ocelot *ocelot, int port,
+ struct kernel_hwtstamp_config *cfg)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct hwtstamp_config cfg = {};
switch (ocelot_port->ptp_cmd) {
case IFH_REW_OP_TWO_STEP_PTP:
- cfg.tx_type = HWTSTAMP_TX_ON;
+ cfg->tx_type = HWTSTAMP_TX_ON;
break;
case IFH_REW_OP_ORIGIN_PTP:
- cfg.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ cfg->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
break;
default:
- cfg.tx_type = HWTSTAMP_TX_OFF;
+ cfg->tx_type = HWTSTAMP_TX_OFF;
break;
}
- cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
-
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ cfg->rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
}
EXPORT_SYMBOL(ocelot_hwstamp_get);
-int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
+int ocelot_hwstamp_set(struct ocelot *ocelot, int port,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- int ptp_cmd, old_ptp_cmd = ocelot_port->ptp_cmd;
bool l2 = false, l4 = false;
- struct hwtstamp_config cfg;
- bool old_l2, old_l4;
+ int ptp_cmd;
int err;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
/* Tx type sanity check */
- err = ocelot_ptp_tx_type_to_cmd(cfg.tx_type, &ptp_cmd);
+ err = ocelot_ptp_tx_type_to_cmd(cfg->tx_type, &ptp_cmd);
if (err)
return err;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -582,27 +572,15 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
return -ERANGE;
}
- old_l2 = ocelot_port->trap_proto & OCELOT_PROTO_PTP_L2;
- old_l4 = ocelot_port->trap_proto & OCELOT_PROTO_PTP_L4;
-
err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
if (err)
return err;
ocelot_port->ptp_cmd = ptp_cmd;
- cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
-
- if (copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg))) {
- err = -EFAULT;
- goto out_restore_ptp_traps;
- }
+ cfg->rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
return 0;
-out_restore_ptp_traps:
- ocelot_setup_ptp_traps(ocelot, port, old_l2, old_l4);
- ocelot_port->ptp_cmd = old_ptp_cmd;
- return err;
}
EXPORT_SYMBOL(ocelot_hwstamp_set);
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 055b55651a49..498eec8ae61d 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -108,6 +108,8 @@ static struct ptp_clock_info ocelot_ptp_clock_info = {
.n_ext_ts = 0,
.n_per_out = OCELOT_PTP_PINS_NUM,
.n_pins = OCELOT_PTP_PINS_NUM,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE,
.pps = 0,
.gettime64 = ocelot_ptp_gettime64,
.settime64 = ocelot_ptp_settime64,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 7c501a758325..e611ff7fa3fa 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3478,7 +3478,7 @@ static void myri10ge_watchdog_timer(struct timer_list *t)
u32 rx_pause_cnt;
u16 cmd;
- mgp = from_timer(mgp, t, watchdog_timer);
+ mgp = timer_container_of(mgp, t, watchdog_timer);
rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
busy_slice_cnt = 0;
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 05606692e631..b253734dbc80 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -846,7 +846,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- i = pci_request_regions(pdev, DRV_NAME);
+ i = pcim_request_all_regions(pdev, DRV_NAME);
if (i)
goto err_pci_request_regions;
@@ -1786,7 +1786,7 @@ static void init_registers(struct net_device *dev)
*/
static void netdev_timer(struct timer_list *t)
{
- struct netdev_private *np = from_timer(np, t, timer);
+ struct netdev_private *np = timer_container_of(np, t, timer);
struct net_device *dev = np->dev;
void __iomem * ioaddr = ns_ioaddr(dev);
int next_tick = NATSEMI_TIMER_FREQ;
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index bf0347715a05..56d5464222d9 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1587,7 +1587,7 @@ static void ns83820_tx_timeout(struct net_device *ndev, unsigned int txqueue)
static void ns83820_tx_watch(struct timer_list *t)
{
- struct ns83820 *dev = from_timer(dev, t, tx_watchdog);
+ struct ns83820 *dev = timer_container_of(dev, t, tx_watchdog);
struct net_device *ndev = dev->ndev;
#if defined(DEBUG)
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 3e55e8dc664c..27443e346f9f 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4195,7 +4195,7 @@ pci_map_failed:
static void
s2io_alarm_handle(struct timer_list *t)
{
- struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
+ struct s2io_nic *sp = timer_container_of(sp, t, alarm_timer);
struct net_device *dev = sp->dev;
s2io_handle_errors(dev);
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
index 671af5d4c5d2..9e7c285eaa6b 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -266,17 +266,17 @@ static void set_sha2_512hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
}
}
-static int nfp_net_xfrm_add_state(struct xfrm_state *x,
+static int nfp_net_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
- struct net_device *netdev = x->xso.real_dev;
struct nfp_ipsec_cfg_mssg msg = {};
int i, key_len, trunc_len, err = 0;
struct nfp_ipsec_cfg_add_sa *cfg;
struct nfp_net *nn;
unsigned int saidx;
- nn = netdev_priv(netdev);
+ nn = netdev_priv(dev);
cfg = &msg.cfg_add_sa;
/* General */
@@ -546,17 +546,16 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x,
return 0;
}
-static void nfp_net_xfrm_del_state(struct xfrm_state *x)
+static void nfp_net_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
{
struct nfp_ipsec_cfg_mssg msg = {
.cmd = NFP_IPSEC_CFG_MSSG_INV_SA,
.sa_idx = x->xso.offload_handle - 1,
};
- struct net_device *netdev = x->xso.real_dev;
struct nfp_net *nn;
int err;
- nn = netdev_priv(netdev);
+ nn = netdev_priv(dev);
err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
sizeof(msg), nfp_net_ipsec_cfg);
if (err)
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index f1c6c47564b1..08086eb76996 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -779,7 +779,7 @@ nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
case NFP_NET_META_CSUM:
meta->csum_type = CHECKSUM_COMPLETE;
meta->csum =
- (__force __wsum)__get_unaligned_cpu32(data);
+ (__force __wsum)get_unaligned((u32 *)data);
data += 4;
break;
case NFP_NET_META_RESYNC_INFO:
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index ebeb6ab4465c..ab3cd06ed63e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -779,7 +779,7 @@ nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
case NFP_NET_META_CSUM:
meta->csum_type = CHECKSUM_COMPLETE;
meta->csum =
- (__force __wsum)__get_unaligned_cpu32(data);
+ (__force __wsum)get_unaligned((u32 *)data);
data += 4;
break;
case NFP_NET_META_RESYNC_INFO:
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 95514fabadf2..932f59d70f41 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -159,7 +159,7 @@ static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
static void nfp_net_reconfig_timer(struct timer_list *t)
{
- struct nfp_net *nn = from_timer(nn, t, reconfig_timer);
+ struct nfp_net *nn = timer_container_of(nn, t, reconfig_timer);
spin_lock_bh(&nn->reconfig_lock);
@@ -2538,7 +2538,7 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
nn->dp.num_r_vecs, num_online_cpus());
nn->max_r_vecs = nn->dp.num_r_vecs;
- nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(nn->dp.xsk_pools),
+ nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(*nn->dp.xsk_pools),
GFP_KERNEL);
if (!nn->dp.xsk_pools) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 29cb74ccb25a..19aa1f1538aa 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1891,7 +1891,7 @@ packet_dropped:
/* If rx bufs are exhausted called after 50ms to attempt to refresh */
static void nv_do_rx_refill(struct timer_list *t)
{
- struct fe_priv *np = from_timer(np, t, oom_kick);
+ struct fe_priv *np = timer_container_of(np, t, oom_kick);
/* Just reschedule NAPI rx processing */
napi_schedule(&np->napi);
@@ -4140,7 +4140,7 @@ static void nv_free_irq(struct net_device *dev)
static void nv_do_nic_poll(struct timer_list *t)
{
- struct fe_priv *np = from_timer(np, t, nic_poll);
+ struct fe_priv *np = timer_container_of(np, t, nic_poll);
struct net_device *dev = np->dev;
u8 __iomem *base = get_hwbase(dev);
u32 mask = 0;
@@ -4259,7 +4259,7 @@ static void nv_do_stats_poll(struct timer_list *t)
__acquires(&netdev_priv(dev)->hwstats_lock)
__releases(&netdev_priv(dev)->hwstats_lock)
{
- struct fe_priv *np = from_timer(np, t, stats_poll);
+ struct fe_priv *np = timer_container_of(np, t, stats_poll);
struct net_device *dev = np->dev;
/* If lock is currently taken, the stats are being refreshed
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 1651df8a7c21..e5a6f59af0b6 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1014,8 +1014,8 @@ static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
*/
static void pch_gbe_watchdog(struct timer_list *t)
{
- struct pch_gbe_adapter *adapter = from_timer(adapter, t,
- watchdog_timer);
+ struct pch_gbe_adapter *adapter = timer_container_of(adapter, t,
+ watchdog_timer);
struct net_device *netdev = adapter->netdev;
struct pch_gbe_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 26bc8b3b1ec8..b0de7e9f12a5 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1025,7 +1025,7 @@ static inline int hamachi_tx(struct net_device *dev)
static void hamachi_timer(struct timer_list *t)
{
- struct hamachi_private *hmp = from_timer(hmp, t, timer);
+ struct hamachi_private *hmp = timer_container_of(hmp, t, timer);
struct net_device *dev = hmp->mii_if.dev;
void __iomem *ioaddr = hmp->base;
int next_tick = 10*HZ;
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 21b760e65d73..1e25ac13a7d8 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -652,7 +652,7 @@ err_free_irq:
static void yellowfin_timer(struct timer_list *t)
{
- struct yellowfin_private *yp = from_timer(yp, t, timer);
+ struct yellowfin_private *yp = timer_container_of(yp, t, timer);
struct net_device *dev = pci_get_drvdata(yp->pci_dev);
void __iomem *ioaddr = yp->base;
int next_tick = 60*HZ;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 801380729046..fe58024b5901 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -934,7 +934,8 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
static void pasemi_mac_tx_timer(struct timer_list *t)
{
- struct pasemi_mac_txring *txring = from_timer(txring, t, clean_timer);
+ struct pasemi_mac_txring *txring = timer_container_of(txring, t,
+ clean_timer);
struct pasemi_mac *mac = txring->mac;
pasemi_mac_clean_tx(txring);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 57edcde9e6f8..18b9c8a810ae 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -13,7 +13,7 @@
static void ionic_watchdog_cb(struct timer_list *t)
{
- struct ionic *ionic = from_timer(ionic, t, watchdog_timer);
+ struct ionic *ionic = timer_container_of(ionic, t, watchdog_timer);
struct ionic_lif *lif = ionic->lif;
struct ionic_deferred_work *work;
int hb;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index a2d4336d2766..92f30ff2d631 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -948,64 +948,20 @@ static int ionic_get_tunable(struct net_device *netdev,
return 0;
}
-static int ionic_get_module_info(struct net_device *netdev,
- struct ethtool_modinfo *modinfo)
-
-{
- struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_dev *idev = &lif->ionic->idev;
- struct ionic_xcvr_status *xcvr;
- struct sfp_eeprom_base *sfp;
-
- xcvr = &idev->port_info->status.xcvr;
- sfp = (struct sfp_eeprom_base *) xcvr->sprom;
-
- /* report the module data type and length */
- switch (sfp->phys_id) {
- case SFF8024_ID_SFP:
- modinfo->type = ETH_MODULE_SFF_8079;
- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- break;
- case SFF8024_ID_QSFP_8436_8636:
- case SFF8024_ID_QSFP28_8636:
- case SFF8024_ID_QSFP_PLUS_CMIS:
- modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
- break;
- default:
- netdev_info(netdev, "unknown xcvr type 0x%02x\n",
- xcvr->sprom[0]);
- modinfo->type = 0;
- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- break;
- }
-
- return 0;
-}
-
-static int ionic_get_module_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *ee,
- u8 *data)
+static int ionic_do_module_copy(u8 *dst, u8 *src, u32 len)
{
- struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_dev *idev = &lif->ionic->idev;
- struct ionic_xcvr_status *xcvr;
- char tbuf[sizeof(xcvr->sprom)];
+ char tbuf[sizeof_field(struct ionic_xcvr_status, sprom)];
int count = 10;
- u32 len;
/* The NIC keeps the module prom up-to-date in the DMA space
* so we can simply copy the module bytes into the data buffer.
*/
- xcvr = &idev->port_info->status.xcvr;
- len = min_t(u32, sizeof(xcvr->sprom), ee->len);
-
do {
- memcpy(data, &xcvr->sprom[ee->offset], len);
- memcpy(tbuf, &xcvr->sprom[ee->offset], len);
+ memcpy(dst, src, len);
+ memcpy(tbuf, src, len);
/* Let's make sure we got a consistent copy */
- if (!memcmp(data, tbuf, len))
+ if (!memcmp(dst, tbuf, len))
break;
} while (--count);
@@ -1016,6 +972,48 @@ static int ionic_get_module_eeprom(struct net_device *netdev,
return 0;
}
+static int ionic_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_dev *idev = &lif->ionic->idev;
+ u32 err = -EINVAL;
+ u8 *src;
+
+ if (!page_data->length)
+ return -EINVAL;
+
+ if (page_data->bank != 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Only bank 0 is supported");
+ return -EINVAL;
+ }
+
+ switch (page_data->page) {
+ case 0:
+ src = &idev->port_info->status.xcvr.sprom[page_data->offset];
+ break;
+ case 1:
+ src = &idev->port_info->sprom_page1[page_data->offset - 128];
+ break;
+ case 2:
+ src = &idev->port_info->sprom_page2[page_data->offset - 128];
+ break;
+ case 17:
+ src = &idev->port_info->sprom_page17[page_data->offset - 128];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ memset(page_data->data, 0, page_data->length);
+ err = ionic_do_module_copy(page_data->data, src, page_data->length);
+ if (err)
+ return err;
+
+ return page_data->length;
+}
+
static int ionic_get_ts_info(struct net_device *netdev,
struct kernel_ethtool_ts_info *info)
{
@@ -1161,8 +1159,7 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.set_rxfh = ionic_set_rxfh,
.get_tunable = ionic_get_tunable,
.set_tunable = ionic_set_tunable,
- .get_module_info = ionic_get_module_info,
- .get_module_eeprom = ionic_get_module_eeprom,
+ .get_module_eeprom_by_page = ionic_get_module_eeprom_by_page,
.get_pauseparam = ionic_get_pauseparam,
.set_pauseparam = ionic_set_pauseparam,
.get_fecparam = ionic_get_fecparam,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 830c8adbfbee..f1ddbe9994a3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -2839,6 +2839,10 @@ union ionic_port_identity {
* @status: Port status data
* @stats: Port statistics data
* @mgmt_stats: Port management statistics data
+ * @sprom_epage: Extended Transceiver sprom
+ * @sprom_page1: Extended Transceiver sprom, page 1
+ * @sprom_page2: Extended Transceiver sprom, page 2
+ * @sprom_page17: Extended Transceiver sprom, page 17
* @rsvd: reserved byte(s)
* @pb_stats: uplink pb drop stats
*/
@@ -2849,8 +2853,17 @@ struct ionic_port_info {
struct ionic_port_stats stats;
struct ionic_mgmt_port_stats mgmt_stats;
};
- /* room for pb_stats to start at 2k offset */
- u8 rsvd[760];
+ union {
+ u8 sprom_epage[384];
+ struct {
+ u8 sprom_page1[128];
+ u8 sprom_page2[128];
+ u8 sprom_page17[128];
+ };
+ };
+ u8 rsvd[376];
+
+ /* pb_stats must start at 2k offset */
struct ionic_port_pb_stats pb_stats;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index daf1e82cb76b..0e60a6bef99a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -516,9 +516,9 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
unsigned long start_time;
unsigned long max_wait;
unsigned long duration;
- int done = 0;
bool fw_up;
int opcode;
+ bool done;
int err;
/* Wait for dev cmd to complete, retrying if we get EAGAIN,
@@ -526,6 +526,7 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
*/
max_wait = jiffies + (max_seconds * HZ);
try_again:
+ done = false;
opcode = idev->opcode;
start_time = jiffies;
for (fw_up = ionic_is_fw_running(idev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index b7def3b54937..016b575861b9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -939,7 +939,6 @@ u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
/* doorbell recovery mechanism */
-void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
index f6cd1b3efdfd..27e91d0d39f8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
@@ -1305,37 +1305,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
char *results_buf);
/**
- * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and
- * keeps the MCP trace meta data allocated, to support continuous MCP Trace
- * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
- * be called to free the meta data.
- *
- * @p_hwfn: HW device data.
- * @dump_buf: MVP trace dump buffer, starting from the header.
- * @results_buf: Buffer for printing the mcp trace results.
- *
- * Return: Error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- char *results_buf);
-
-/**
- * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line
- *
- * @p_hwfn: HW device data.
- * @dump_buf: MCP trace dump buffer, starting from the header.
- * @num_dumped_bytes: Number of bytes that were dumped.
- * @results_buf: Buffer for printing the mcp trace results.
- *
- * Return: Error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
- u8 *dump_buf,
- u32 num_dumped_bytes,
- char *results_buf);
-
-/**
* qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data.
* Should be called after continuous MCP Trace parsing.
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 464a72afb758..9c3d3dd2f847 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -7614,31 +7614,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
results_buf, &parsed_buf_size, true);
}
-enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- char *results_buf)
-{
- u32 parsed_buf_size;
-
- return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
- &parsed_buf_size, false);
-}
-
-enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
- u8 *dump_buf,
- u32 num_dumped_bytes,
- char *results_buf)
-{
- u32 parsed_results_bytes;
-
- return qed_parse_mcp_trace_buf(p_hwfn,
- dump_buf,
- num_dumped_bytes,
- 0,
- num_dumped_bytes,
- results_buf, &parsed_results_bytes);
-}
-
/* Frees the specified MCP Trace meta data */
void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 86a93cac2647..9659ce5b0712 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -255,25 +255,6 @@ static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn)
p_hwfn->db_recovery_info.db_recovery_counter = 0;
}
-/* Print the content of the doorbell recovery mechanism */
-void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
-{
- struct qed_db_recovery_entry *db_entry = NULL;
-
- DP_NOTICE(p_hwfn,
- "Displaying doorbell recovery database. Counter was %d\n",
- p_hwfn->db_recovery_info.db_recovery_counter);
-
- /* Protect the list */
- spin_lock_bh(&p_hwfn->db_recovery_info.lock);
- list_for_each_entry(db_entry,
- &p_hwfn->db_recovery_info.list, list_entry) {
- qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
- }
-
- spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
-}
-
/* Ring the doorbell of a single doorbell recovery entry */
static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
struct qed_db_recovery_entry *db_entry)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index ed1a84542ad2..10e355397cee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -2666,58 +2666,6 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
-/**
- * qed_calc_session_ctx_validation(): Calcualte validation byte for
- * session context.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: Context size.
- * @ctx_type: Context type.
- * @cid: Context cid.
- *
- * Return: Void.
- */
-void qed_calc_session_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 cid);
-
-/**
- * qed_calc_task_ctx_validation(): Calcualte validation byte for task
- * context.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: Context size.
- * @ctx_type: Context type.
- * @tid: Context tid.
- *
- * Return: Void.
- */
-void qed_calc_task_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 tid);
-
-/**
- * qed_memset_session_ctx(): Memset session context to 0 while
- * preserving validation bytes.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: Size to initialzie.
- * @ctx_type: Context type.
- *
- * Return: Void.
- */
-void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
-
-/**
- * qed_memset_task_ctx(): Memset task context to 0 while preserving
- * validation bytes.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: size to initialzie.
- * @ctx_type: context type.
- *
- * Return: Void.
- */
-void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
-
#define NUM_STORMS 6
/**
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 9e5f0dbc8a07..9907973399dc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -69,17 +69,6 @@ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
return 0;
}
-void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
-{
- struct qed_ptt *p_ptt;
- int i;
-
- for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
- p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
- p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
- }
-}
-
void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->p_ptt_pool);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index e535983ce21b..3c98f58a184f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -62,15 +62,6 @@ enum _dmae_cmd_crc_mask {
void qed_gtt_init(struct qed_hwfn *p_hwfn);
/**
- * qed_ptt_invalidate(): Forces all ptt entries to be re-configured
- *
- * @p_hwfn: HW device data.
- *
- * Return: Void.
- */
-void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
-
-/**
* qed_ptt_pool_alloc(): Allocate and initialize PTT pool.
*
* @p_hwfn: HW device data.
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 407029a36fa1..aa20bb8caa9a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -18,16 +18,6 @@
#define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG
-static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
- {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
- {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
- {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
-};
-
-static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
- {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
-};
-
/* General constants */
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
QM_PQ_ELEMENT_SIZE, \
@@ -1576,134 +1566,6 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
}
-DECLARE_CRC8_TABLE(cdu_crc8_table);
-
-/* Calculate and return CDU validation byte per connection type/region/cid */
-static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
-{
- const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
- u8 crc, validation_byte = 0;
- static u8 crc8_table_valid; /* automatically initialized to 0 */
- u32 validation_string = 0;
- __be32 data_to_crc;
-
- if (!crc8_table_valid) {
- crc8_populate_msb(cdu_crc8_table, 0x07);
- crc8_table_valid = 1;
- }
-
- /* The CRC is calculated on the String-to-compress:
- * [31:8] = {CID[31:20],CID[11:0]}
- * [7:4] = Region
- * [3:0] = Type
- */
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
- validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
-
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
- validation_string |= ((region & 0xF) << 4);
-
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
- validation_string |= (conn_type & 0xF);
-
- /* Convert to big-endian and calculate CRC8 */
- data_to_crc = cpu_to_be32(validation_string);
- crc = crc8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
- CRC8_INIT_VALUE);
-
- /* The validation byte [7:0] is composed:
- * for type A validation
- * [7] = active configuration bit
- * [6:0] = crc[6:0]
- *
- * for type B validation
- * [7] = active configuration bit
- * [6:3] = connection_type[3:0]
- * [2:0] = crc[2:0]
- */
- validation_byte |=
- ((validation_cfg >>
- CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
-
- if ((validation_cfg >>
- CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
- validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
- else
- validation_byte |= crc & 0x7F;
-
- return validation_byte;
-}
-
-/* Calcualte and set validation bytes for session context */
-void qed_calc_session_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 cid)
-{
- u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
-
- p_ctx = (u8 * const)p_ctx_mem;
- x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
- t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
- u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
-
- memset(p_ctx, 0, ctx_size);
-
- *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
- *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
- *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
-}
-
-/* Calcualte and set validation bytes for task context */
-void qed_calc_task_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 tid)
-{
- u8 *p_ctx, *region1_val_ptr;
-
- p_ctx = (u8 * const)p_ctx_mem;
- region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
-
- memset(p_ctx, 0, ctx_size);
-
- *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
-}
-
-/* Memset session context to 0 while preserving validation bytes */
-void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
-{
- u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
- u8 x_val, t_val, u_val;
-
- p_ctx = (u8 * const)p_ctx_mem;
- x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
- t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
- u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
-
- x_val = *x_val_ptr;
- t_val = *t_val_ptr;
- u_val = *u_val_ptr;
-
- memset(p_ctx, 0, ctx_size);
-
- *x_val_ptr = x_val;
- *t_val_ptr = t_val;
- *u_val_ptr = u_val;
-}
-
-/* Memset task context to 0 while preserving validation bytes */
-void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
-{
- u8 *p_ctx, *region1_val_ptr;
- u8 region1_val;
-
- p_ctx = (u8 * const)p_ctx_mem;
- region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
-
- region1_val = *region1_val_ptr;
-
- memset(p_ctx, 0, ctx_size);
-
- *region1_val_ptr = region1_val;
-}
-
/* Enable and configure context validation */
void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 99df00c30b8c..b5d744d2586f 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -203,7 +203,7 @@ static struct pci_driver qede_pci_driver = {
};
static struct qed_eth_cb_ops qede_ll_ops = {
- {
+ .common = {
#ifdef CONFIG_RFS_ACCEL
.arfs_filter_op = qede_arfs_filter_op,
#endif
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index bf5bf8c95c85..aee4e63b4b82 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3735,7 +3735,7 @@ static void ql_get_board_info(struct ql3_adapter *qdev)
static void ql3xxx_timer(struct timer_list *t)
{
- struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
+ struct ql3_adapter *qdev = timer_container_of(qdev, t, adapter_timer);
queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 28d24d59efb8..d57b976b9040 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1484,8 +1484,11 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_o
}
cmd_op = (cmd.rsp.arg[0] & 0xff);
- if (cmd.rsp.arg[0] >> 25 == 2)
- return 2;
+ if (cmd.rsp.arg[0] >> 25 == 2) {
+ ret = 2;
+ goto out;
+ }
+
if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
set_bit(QLC_BC_VF_STATE, &vf->state);
else
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index 9210ff360fdc..a4434eb38950 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -52,7 +52,6 @@ config QCOM_EMAC
depends on HAS_DMA && HAS_IOMEM
select CRC32
select PHYLIB
- select MDIO_DEVRES
help
This driver supports the Qualcomm Technologies, Inc. Gigabit
Ethernet Media Access Controller (EMAC). The controller
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index c73a57e4a144..0d65434982a2 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -717,7 +717,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
problem where the adapter forgets its ethernet address. */
static void atp_timed_checker(struct timer_list *t)
{
- struct net_local *lp = from_timer(lp, t, timer);
+ struct net_local *lp = timer_container_of(lp, t, timer);
struct net_device *dev = lp->dev;
long ioaddr = dev->base_addr;
int tickssofar = jiffies - lp->last_rx_time;
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 7a194a8ab989..2c1a0c21af8d 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -64,16 +64,15 @@ enum mac_version {
/* support for RTL_GIGA_MAC_VER_50 has been removed */
RTL_GIGA_MAC_VER_51,
RTL_GIGA_MAC_VER_52,
- RTL_GIGA_MAC_VER_53,
/* support for RTL_GIGA_MAC_VER_60 has been removed */
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
RTL_GIGA_MAC_VER_64,
- RTL_GIGA_MAC_VER_65,
RTL_GIGA_MAC_VER_66,
RTL_GIGA_MAC_VER_70,
- RTL_GIGA_MAC_VER_71,
- RTL_GIGA_MAC_NONE
+ RTL_GIGA_MAC_VER_80,
+ RTL_GIGA_MAC_NONE,
+ RTL_GIGA_MAC_VER_LAST = RTL_GIGA_MAC_NONE - 1
};
struct rtl8169_private;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 4eebd9cb40a3..43170500d566 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -60,6 +60,7 @@
#define FIRMWARE_8125BP_2 "rtl_nic/rtl8125bp-2.fw"
#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
#define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw"
+#define FIRMWARE_8127A_1 "rtl_nic/rtl8127a-1.fw"
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -91,61 +92,117 @@
#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define JUMBO_16K (SZ_16K - VLAN_ETH_HLEN - ETH_FCS_LEN)
-static const struct {
+static const struct rtl_chip_info {
+ u16 mask;
+ u16 val;
+ enum mac_version mac_version;
const char *name;
const char *fw_name;
} rtl_chip_infos[] = {
- /* PCI devices. */
- [RTL_GIGA_MAC_VER_02] = {"RTL8169s" },
- [RTL_GIGA_MAC_VER_03] = {"RTL8110s" },
- [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" },
- [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" },
- [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" },
- /* PCI-E devices. */
- [RTL_GIGA_MAC_VER_07] = {"RTL8102e" },
- [RTL_GIGA_MAC_VER_08] = {"RTL8102e" },
- [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" },
- [RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" },
- [RTL_GIGA_MAC_VER_14] = {"RTL8401" },
- [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" },
- [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" },
- [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" },
- [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" },
- [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1},
- [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2},
- [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" },
- [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1},
- [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1},
- [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" },
- [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1},
- [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2},
- [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3},
- [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1},
- [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2},
- [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 },
- [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 },
- [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1},
- [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2},
- [RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3},
- [RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2},
- [RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 },
- [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2},
- [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2},
- [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
- [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3},
- [RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", },
- [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3},
- /* reserve 62 for CFG_METHOD_4 in the vendor driver */
- [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
- [RTL_GIGA_MAC_VER_64] = {"RTL8125D", FIRMWARE_8125D_1},
- [RTL_GIGA_MAC_VER_65] = {"RTL8125D", FIRMWARE_8125D_2},
- [RTL_GIGA_MAC_VER_66] = {"RTL8125BP", FIRMWARE_8125BP_2},
- [RTL_GIGA_MAC_VER_70] = {"RTL8126A", FIRMWARE_8126A_2},
- [RTL_GIGA_MAC_VER_71] = {"RTL8126A", FIRMWARE_8126A_3},
+ /* 8127A family. */
+ { 0x7cf, 0x6c9, RTL_GIGA_MAC_VER_80, "RTL8127A", FIRMWARE_8127A_1 },
+
+ /* 8126A family. */
+ { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_70, "RTL8126A", FIRMWARE_8126A_3 },
+ { 0x7cf, 0x649, RTL_GIGA_MAC_VER_70, "RTL8126A", FIRMWARE_8126A_2 },
+
+ /* 8125BP family. */
+ { 0x7cf, 0x681, RTL_GIGA_MAC_VER_66, "RTL8125BP", FIRMWARE_8125BP_2 },
+
+ /* 8125D family. */
+ { 0x7cf, 0x689, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_2 },
+ { 0x7cf, 0x688, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_1 },
+
+ /* 8125B family. */
+ { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63, "RTL8125B", FIRMWARE_8125B_2 },
+
+ /* 8125A family. */
+ { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61, "RTL8125A", FIRMWARE_8125A_3 },
+
+ /* RTL8117 */
+ { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_52, "RTL8168fp/RTL8117" },
+ { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52, "RTL8168fp/RTL8117",
+ FIRMWARE_8168FP_3 },
+
+ /* 8168EP family. */
+ { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51, "RTL8168ep/8111ep" },
+
+ /* 8168H family. */
+ { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46, "RTL8168h/8111h",
+ FIRMWARE_8168H_2 },
+ /* Realtek calls it RTL8168M, but it's handled like RTL8168H */
+ { 0x7cf, 0x6c0, RTL_GIGA_MAC_VER_46, "RTL8168M", FIRMWARE_8168H_2 },
+
+ /* 8168G family. */
+ { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44, "RTL8411b", FIRMWARE_8411_2 },
+ { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42, "RTL8168gu/8111gu",
+ FIRMWARE_8168G_3 },
+ { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40, "RTL8168g/8111g",
+ FIRMWARE_8168G_2 },
+
+ /* 8168F family. */
+ { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38, "RTL8411", FIRMWARE_8411_1 },
+ { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36, "RTL8168f/8111f",
+ FIRMWARE_8168F_2 },
+ { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35, "RTL8168f/8111f",
+ FIRMWARE_8168F_1 },
+
+ /* 8168E family. */
+ { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34, "RTL8168evl/8111evl",
+ FIRMWARE_8168E_3 },
+ { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32, "RTL8168e/8111e",
+ FIRMWARE_8168E_1 },
+ { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33, "RTL8168e/8111e",
+ FIRMWARE_8168E_2 },
+
+ /* 8168D family. */
+ { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25, "RTL8168d/8111d",
+ FIRMWARE_8168D_1 },
+ { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26, "RTL8168d/8111d",
+ FIRMWARE_8168D_2 },
+
+ /* 8168DP family. */
+ { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28, "RTL8168dp/8111dp" },
+ { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31, "RTL8168dp/8111dp" },
+
+ /* 8168C family. */
+ { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23, "RTL8168cp/8111cp" },
+ { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18, "RTL8168cp/8111cp" },
+ { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24, "RTL8168cp/8111cp" },
+ { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19, "RTL8168c/8111c" },
+ { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20, "RTL8168c/8111c" },
+ { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21, "RTL8168c/8111c" },
+ { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22, "RTL8168c/8111c" },
+
+ /* 8168B family. */
+ { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17, "RTL8168b/8111b" },
+ /* This one is very old and rare, support has been removed.
+ * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11, "RTL8168b/8111b" },
+ */
+
+ /* 8101 family. */
+ { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39, "RTL8106e", FIRMWARE_8106E_1 },
+ { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37, "RTL8402", FIRMWARE_8402_1 },
+ { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29, "RTL8105e", FIRMWARE_8105E_1 },
+ { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30, "RTL8105e", FIRMWARE_8105E_1 },
+ { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08, "RTL8102e" },
+ { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08, "RTL8102e" },
+ { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07, "RTL8102e" },
+ { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07, "RTL8102e" },
+ { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14, "RTL8401" },
+ { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09, "RTL8102e/RTL8103e" },
+ { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09, "RTL8102e/RTL8103e" },
+ { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10, "RTL8101e/RTL8100e" },
+
+ /* 8110 family. */
+ { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06, "RTL8169sc/8110sc" },
+ { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05, "RTL8169sc/8110sc" },
+ { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04, "RTL8169sb/8110sb" },
+ { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03, "RTL8110s" },
+ { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02, "RTL8169s" },
+
+ /* Catch-all */
+ { 0x000, 0x000, RTL_GIGA_MAC_NONE }
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -169,8 +226,10 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
{ PCI_VDEVICE(REALTEK, 0x8125) },
{ PCI_VDEVICE(REALTEK, 0x8126) },
+ { PCI_VDEVICE(REALTEK, 0x8127) },
{ PCI_VDEVICE(REALTEK, 0x3000) },
{ PCI_VDEVICE(REALTEK, 0x5000) },
+ { PCI_VDEVICE(REALTEK, 0x0e10) },
{}
};
@@ -716,6 +775,7 @@ MODULE_FIRMWARE(FIRMWARE_8125D_2);
MODULE_FIRMWARE(FIRMWARE_8125BP_2);
MODULE_FIRMWARE(FIRMWARE_8126A_2);
MODULE_FIRMWARE(FIRMWARE_8126A_3);
+MODULE_FIRMWARE(FIRMWARE_8127A_1);
static inline struct device *tp_to_dev(struct rtl8169_private *tp)
{
@@ -777,7 +837,7 @@ static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
tp->mac_version != RTL_GIGA_MAC_VER_39 &&
- tp->mac_version <= RTL_GIGA_MAC_VER_53;
+ tp->mac_version <= RTL_GIGA_MAC_VER_52;
}
static bool rtl_supports_eee(struct rtl8169_private *tp)
@@ -945,9 +1005,7 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx,
static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
{
/* based on RTL8168FP_OOBMAC_BASE in vendor driver */
- if (type == ERIAR_OOB &&
- (tp->mac_version == RTL_GIGA_MAC_VER_52 ||
- tp->mac_version == RTL_GIGA_MAC_VER_53))
+ if (type == ERIAR_OOB && tp->mac_version == RTL_GIGA_MAC_VER_52)
*cmd |= 0xf70 << 18;
}
@@ -1236,7 +1294,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
r8168g_mdio_write(tp, location, val);
break;
default:
@@ -1251,7 +1309,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
return r8168g_mdio_read(tp, location);
default:
return r8169_mdio_read(tp, location);
@@ -1447,7 +1505,7 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return RTL_DASH_DP;
- case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_52:
return RTL_DASH_EP;
case RTL_GIGA_MAC_VER_66:
return RTL_DASH_25_BP;
@@ -1603,7 +1661,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_LAST:
r8169_mod_reg8_cond(tp, Config2, PME_SIGNAL, wolopts);
break;
default:
@@ -2076,7 +2134,7 @@ static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
tp->tx_lpi_timer = timer_val;
r8168_mac_ocp_write(tp, 0xe048, timer_val);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
tp->tx_lpi_timer = timer_val;
RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
break;
@@ -2265,151 +2323,30 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_eth_ctrl_stats = rtl8169_get_eth_ctrl_stats,
};
-static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
+static const struct rtl_chip_info *rtl8169_get_chip_version(u16 xid, bool gmii)
{
- /*
- * The driver currently handles the 8168Bf and the 8168Be identically
- * but they can be identified more specifically through the test below
- * if needed:
- *
- * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
- *
- * Same thing for the 8101Eb and the 8101Ec:
- *
- * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
- */
- static const struct rtl_mac_info {
- u16 mask;
- u16 val;
- enum mac_version ver;
- } mac_info[] = {
- /* 8126A family. */
- { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_71 },
- { 0x7cf, 0x649, RTL_GIGA_MAC_VER_70 },
-
- /* 8125BP family. */
- { 0x7cf, 0x681, RTL_GIGA_MAC_VER_66 },
-
- /* 8125D family. */
- { 0x7cf, 0x689, RTL_GIGA_MAC_VER_65 },
- { 0x7cf, 0x688, RTL_GIGA_MAC_VER_64 },
-
- /* 8125B family. */
- { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 },
-
- /* 8125A family. */
- { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61 },
- /* It seems only XID 609 made it to the mass market.
- * { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
- * { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
- */
-
- /* RTL8117 */
- { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 },
- { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 },
-
- /* 8168EP family. */
- { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
- /* It seems this chip version never made it to
- * the wild. Let's disable detection.
- * { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
- * { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 },
- */
-
- /* 8168H family. */
- { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 },
- /* It seems this chip version never made it to
- * the wild. Let's disable detection.
- * { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
- */
- /* Realtek calls it RTL8168M, but it's handled like RTL8168H */
- { 0x7cf, 0x6c0, RTL_GIGA_MAC_VER_46 },
-
- /* 8168G family. */
- { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 },
- { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 },
- /* It seems this chip version never made it to
- * the wild. Let's disable detection.
- * { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 },
- */
- { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 },
-
- /* 8168F family. */
- { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 },
- { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
- { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 },
-
- /* 8168E family. */
- { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 },
- { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 },
- { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 },
-
- /* 8168D family. */
- { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 },
- { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 },
-
- /* 8168DP family. */
- /* It seems this early RTL8168dp version never made it to
- * the wild. Support has been removed.
- * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
- */
- { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 },
- { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 },
-
- /* 8168C family. */
- { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 },
- { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 },
- { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 },
- { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 },
- { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 },
- { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 },
- { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 },
-
- /* 8168B family. */
- { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
- /* This one is very old and rare, support has been removed.
- * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
- */
-
- /* 8101 family. */
- { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 },
- { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 },
- { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 },
- { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 },
- { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 },
- { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 },
- { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
- { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
- { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 },
- { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
- { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 },
- { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10 },
-
- /* 8110 family. */
- { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 },
- { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 },
- { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 },
- { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 },
- { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 },
-
- /* Catch-all */
- { 0x000, 0x000, RTL_GIGA_MAC_NONE }
+ /* Chips combining a 1Gbps MAC with a 100Mbps PHY */
+ static const struct rtl_chip_info rtl8106eus_info = {
+ .mac_version = RTL_GIGA_MAC_VER_43,
+ .name = "RTL8106eus",
+ .fw_name = FIRMWARE_8106E_2,
};
- const struct rtl_mac_info *p = mac_info;
- enum mac_version ver;
+ static const struct rtl_chip_info rtl8107e_info = {
+ .mac_version = RTL_GIGA_MAC_VER_48,
+ .name = "RTL8107e",
+ .fw_name = FIRMWARE_8107E_2,
+ };
+ const struct rtl_chip_info *p = rtl_chip_infos;
while ((xid & p->mask) != p->val)
p++;
- ver = p->ver;
- if (ver != RTL_GIGA_MAC_NONE && !gmii) {
- if (ver == RTL_GIGA_MAC_VER_42)
- ver = RTL_GIGA_MAC_VER_43;
- else if (ver == RTL_GIGA_MAC_VER_46)
- ver = RTL_GIGA_MAC_VER_48;
- }
+ if (p->mac_version == RTL_GIGA_MAC_VER_42 && !gmii)
+ return &rtl8106eus_info;
+ if (p->mac_version == RTL_GIGA_MAC_VER_46 && !gmii)
+ return &rtl8107e_info;
- return ver;
+ return p;
}
static void rtl_release_firmware(struct rtl8169_private *tp)
@@ -2553,13 +2490,13 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
case RTL_GIGA_MAC_VER_61:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
RX_PAUSE_SLOT_ON);
break;
@@ -2684,14 +2621,14 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond_2)
static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
@@ -2852,10 +2789,23 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
RTL_R32(tp, CSIDR) : ~0;
}
+static void rtl_csi_mod(struct rtl8169_private *tp, int addr,
+ u32 mask, u32 set)
+{
+ u32 val;
+
+ WARN(addr % 4, "Invalid CSI address %#x\n", addr);
+
+ netdev_notice_once(tp->dev,
+ "No native access to PCI extended config space, falling back to CSI\n");
+
+ val = rtl_csi_read(tp, addr);
+ rtl_csi_write(tp, addr, (val & ~mask) | set);
+}
+
static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp)
{
struct pci_dev *pdev = tp->pci_dev;
- u32 csi;
int rc;
u8 val;
@@ -2872,16 +2822,12 @@ static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp)
}
}
- netdev_notice_once(tp->dev,
- "No native access to PCI extended config space, falling back to CSI\n");
- csi = rtl_csi_read(tp, RTL_GEN3_RELATED_OFF);
- rtl_csi_write(tp, RTL_GEN3_RELATED_OFF, csi & ~RTL_GEN3_ZRXDC_NONCOMPL);
+ rtl_csi_mod(tp, RTL_GEN3_RELATED_OFF, RTL_GEN3_ZRXDC_NONCOMPL, 0);
}
static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
{
struct pci_dev *pdev = tp->pci_dev;
- u32 csi;
/* According to Realtek the value at config space address 0x070f
* controls the L0s/L1 entrance latency. We try standard ECAM access
@@ -2893,10 +2839,7 @@ static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL)
return;
- netdev_notice_once(tp->dev,
- "No native access to PCI extended config space, falling back to CSI\n");
- csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
- rtl_csi_write(tp, 0x070c, csi | val << 24);
+ rtl_csi_mod(tp, 0x070c, 0xff000000, val << 24);
}
static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
@@ -2960,7 +2903,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -2974,7 +2917,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
break;
default:
@@ -3001,7 +2944,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
rtl_mod_config5(tp, 0, ASPM_en);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_70:
- case RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_80:
val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -3012,7 +2955,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
/* reset ephy tx/rx disable timer */
r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
/* chip can trigger L1.2 */
@@ -3024,7 +2967,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
} else {
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
break;
default:
@@ -3033,7 +2976,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_70:
- case RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_80:
val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -3754,11 +3697,12 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
- tp->mac_version == RTL_GIGA_MAC_VER_71)
+ tp->mac_version == RTL_GIGA_MAC_VER_80)
RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02);
- if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
- tp->mac_version == RTL_GIGA_MAC_VER_71)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_80)
+ r8168_mac_ocp_modify(tp, 0xe614, 0x0f00, 0x0f00);
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_70)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
else if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
@@ -3777,7 +3721,7 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
- tp->mac_version == RTL_GIGA_MAC_VER_71)
+ tp->mac_version == RTL_GIGA_MAC_VER_80)
r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000);
else
r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
@@ -3855,6 +3799,12 @@ static void rtl_hw_start_8126a(struct rtl8169_private *tp)
rtl_hw_start_8125_common(tp);
}
+static void rtl_hw_start_8127a(struct rtl8169_private *tp)
+{
+ rtl_set_def_aspm_entry_latency(tp);
+ rtl_hw_start_8125_common(tp);
+}
+
static void rtl_hw_config(struct rtl8169_private *tp)
{
static const rtl_generic_fct hw_configs[] = {
@@ -3893,14 +3843,12 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
[RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
- [RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
[RTL_GIGA_MAC_VER_64] = rtl_hw_start_8125d,
- [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8125d,
[RTL_GIGA_MAC_VER_66] = rtl_hw_start_8125d,
[RTL_GIGA_MAC_VER_70] = rtl_hw_start_8126a,
- [RTL_GIGA_MAC_VER_71] = rtl_hw_start_8126a,
+ [RTL_GIGA_MAC_VER_80] = rtl_hw_start_8127a,
};
if (hw_configs[tp->mac_version])
@@ -3917,14 +3865,15 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_64:
- case RTL_GIGA_MAC_VER_65:
case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_80:
for (i = 0xa00; i < 0xb00; i += 4)
RTL_W32(tp, i, 0);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_80)
+ RTL_W16(tp, INT_CFG1_8125, 0x0000);
break;
case RTL_GIGA_MAC_VER_63:
case RTL_GIGA_MAC_VER_70:
- case RTL_GIGA_MAC_VER_71:
for (i = 0xa00; i < 0xa80; i += 4)
RTL_W32(tp, i, 0);
RTL_W16(tp, INT_CFG1_8125, 0x0000);
@@ -4156,7 +4105,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
rtl_enable_rxdvgate(tp);
fsleep(2000);
break;
@@ -4313,7 +4262,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
padto = max_t(unsigned int, padto, ETH_ZLEN);
break;
default:
@@ -5101,10 +5050,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
/* Restore original MAC address */
rtl_rar_set(tp, tp->dev->perm_addr);
- if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) {
- pci_wake_from_d3(pdev, tp->saved_wolopts);
- pci_set_power_state(pdev, PCI_D3hot);
- }
+ if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled)
+ pci_prepare_to_sleep(pdev);
}
static void rtl_remove_one(struct pci_dev *pdev)
@@ -5358,13 +5305,13 @@ static void rtl_hw_init_8125(struct rtl8169_private *tp)
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_stop_cmac(tp);
fallthrough;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
rtl_hw_init_8125(tp);
break;
default:
@@ -5389,7 +5336,7 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
return JUMBO_6K;
/* RTL8125/8126 */
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
return JUMBO_16K;
default:
return JUMBO_9K;
@@ -5434,9 +5381,9 @@ static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ const struct rtl_chip_info *chip;
struct rtl8169_private *tp;
int jumbo_max, region, rc;
- enum mac_version chipset;
struct net_device *dev;
u32 txconfig;
u16 xid;
@@ -5486,12 +5433,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
xid = (txconfig >> 20) & 0xfcf;
/* Identify chip attached to board */
- chipset = rtl8169_get_mac_version(xid, tp->supports_gmii);
- if (chipset == RTL_GIGA_MAC_NONE)
+ chip = rtl8169_get_chip_version(xid, tp->supports_gmii);
+ if (chip->mac_version == RTL_GIGA_MAC_NONE)
return dev_err_probe(&pdev->dev, -ENODEV,
"unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n",
xid);
- tp->mac_version = chipset;
+ tp->mac_version = chip->mac_version;
+ tp->fw_name = chip->fw_name;
/* Disable ASPM L1 as that cause random device stop working
* problems as well as full system hangs for some PCIe devices users.
@@ -5596,8 +5544,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_set_irq_mask(tp);
- tp->fw_name = rtl_chip_infos[chipset].fw_name;
-
tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
&tp->counters_phys_addr,
GFP_KERNEL);
@@ -5622,7 +5568,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
- rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);
+ chip->name, dev->dev_addr, xid, tp->irq);
if (jumbo_max)
netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index cf95e579c65d..032d9d2cfa2a 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -50,6 +50,15 @@ static void r8168g_phy_param(struct phy_device *phydev, u16 parm,
phy_restore_page(phydev, oldpage, 0);
}
+static void rtl8125_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ phy_lock_mdio_bus(phydev);
+ __phy_write_mmd(phydev, MDIO_MMD_VEND2, 0xb87c, parm);
+ __phy_modify_mmd(phydev, MDIO_MMD_VEND2, 0xb87e, mask, val);
+ phy_unlock_mdio_bus(phydev);
+}
+
struct phy_reg {
u16 reg;
u16 val;
@@ -1004,12 +1013,8 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
phy_write_paged(phydev, 0xac5, 0x16, 0x01ff);
phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030);
- phy_write(phydev, 0x1f, 0x0b87);
- phy_write(phydev, 0x16, 0x80a2);
- phy_write(phydev, 0x17, 0x0153);
- phy_write(phydev, 0x16, 0x809c);
- phy_write(phydev, 0x17, 0x0153);
- phy_write(phydev, 0x1f, 0x0000);
+ rtl8125_phy_param(phydev, 0x80a2, 0xffff, 0x0153);
+ rtl8125_phy_param(phydev, 0x809c, 0xffff, 0x0153);
phy_write(phydev, 0x1f, 0x0a43);
phy_write(phydev, 0x13, 0x81B3);
@@ -1061,14 +1066,9 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090);
phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001);
- phy_write(phydev, 0x1f, 0x0b87);
- phy_write(phydev, 0x16, 0x80f5);
- phy_write(phydev, 0x17, 0x760e);
- phy_write(phydev, 0x16, 0x8107);
- phy_write(phydev, 0x17, 0x360e);
- phy_write(phydev, 0x16, 0x8551);
- phy_modify(phydev, 0x17, 0xff00, 0x0800);
- phy_write(phydev, 0x1f, 0x0000);
+ rtl8125_phy_param(phydev, 0x80f5, 0xffff, 0x760e);
+ rtl8125_phy_param(phydev, 0x8107, 0xffff, 0x360e);
+ rtl8125_phy_param(phydev, 0x8551, 0xff00, 0x0800);
phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000);
phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300);
@@ -1110,12 +1110,8 @@ static void rtl8125bp_hw_phy_config(struct rtl8169_private *tp,
r8168g_phy_param(phydev, 0x8010, 0x0800, 0x0000);
- phy_write(phydev, 0x1f, 0x0b87);
- phy_write(phydev, 0x16, 0x8088);
- phy_modify(phydev, 0x17, 0xff00, 0x9000);
- phy_write(phydev, 0x16, 0x808f);
- phy_modify(phydev, 0x17, 0xff00, 0x9000);
- phy_write(phydev, 0x1f, 0x0000);
+ rtl8125_phy_param(phydev, 0x8088, 0xff00, 0x9000);
+ rtl8125_phy_param(phydev, 0x808f, 0xff00, 0x9000);
r8168g_phy_param(phydev, 0x8174, 0x2000, 0x1800);
@@ -1134,6 +1130,171 @@ static void rtl8126a_hw_phy_config(struct rtl8169_private *tp,
rtl8125_common_config_eee_phy(phydev);
}
+static void rtl8127a_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
+
+ r8168g_phy_param(phydev, 0x8415, 0xff00, 0x9300);
+ r8168g_phy_param(phydev, 0x81a3, 0xff00, 0x0f00);
+ r8168g_phy_param(phydev, 0x81ae, 0xff00, 0x0f00);
+ r8168g_phy_param(phydev, 0x81b9, 0xff00, 0xb900);
+ rtl8125_phy_param(phydev, 0x83b0, 0x0e00, 0x0000);
+ rtl8125_phy_param(phydev, 0x83C5, 0x0e00, 0x0000);
+ rtl8125_phy_param(phydev, 0x83da, 0x0e00, 0x0000);
+ rtl8125_phy_param(phydev, 0x83ef, 0x0e00, 0x0000);
+ phy_modify_paged(phydev, 0x0bf3, 0x14, 0x01f0, 0x0160);
+ phy_modify_paged(phydev, 0x0bf3, 0x15, 0x001f, 0x0014);
+ phy_modify_paged(phydev, 0x0bf2, 0x14, 0x6000, 0x0000);
+ phy_modify_paged(phydev, 0x0bf2, 0x16, 0xc000, 0x0000);
+ phy_modify_paged(phydev, 0x0bf2, 0x14, 0x1fff, 0x0187);
+ phy_modify_paged(phydev, 0x0bf2, 0x15, 0x003f, 0x0003);
+
+ r8168g_phy_param(phydev, 0x8173, 0xffff, 0x8620);
+ r8168g_phy_param(phydev, 0x8175, 0xffff, 0x8671);
+ r8168g_phy_param(phydev, 0x817c, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x8187, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x8192, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x819d, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x81a8, 0x2000, 0x0000);
+ r8168g_phy_param(phydev, 0x81b3, 0x2000, 0x0000);
+ r8168g_phy_param(phydev, 0x81be, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x817d, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x8188, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x8193, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x819e, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x81a9, 0xff00, 0x1400);
+ r8168g_phy_param(phydev, 0x81b4, 0xff00, 0x1400);
+ r8168g_phy_param(phydev, 0x81bf, 0xff00, 0xa600);
+
+ phy_modify_paged(phydev, 0x0aea, 0x15, 0x0028, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x84f0, 0xffff, 0x201c);
+ rtl8125_phy_param(phydev, 0x84f2, 0xffff, 0x3117);
+
+ phy_write_paged(phydev, 0x0aec, 0x13, 0x0000);
+ phy_write_paged(phydev, 0x0ae2, 0x10, 0xffff);
+ phy_write_paged(phydev, 0x0aec, 0x17, 0xffff);
+ phy_write_paged(phydev, 0x0aed, 0x11, 0xffff);
+ phy_write_paged(phydev, 0x0aec, 0x14, 0x0000);
+ phy_modify_paged(phydev, 0x0aed, 0x10, 0x0001, 0x0000);
+ phy_write_paged(phydev, 0x0adb, 0x14, 0x0150);
+ rtl8125_phy_param(phydev, 0x8197, 0xff00, 0x5000);
+ rtl8125_phy_param(phydev, 0x8231, 0xff00, 0x5000);
+ rtl8125_phy_param(phydev, 0x82cb, 0xff00, 0x5000);
+ rtl8125_phy_param(phydev, 0x82cd, 0xff00, 0x5700);
+ rtl8125_phy_param(phydev, 0x8233, 0xff00, 0x5700);
+ rtl8125_phy_param(phydev, 0x8199, 0xff00, 0x5700);
+
+ rtl8125_phy_param(phydev, 0x815a, 0xffff, 0x0150);
+ rtl8125_phy_param(phydev, 0x81f4, 0xffff, 0x0150);
+ rtl8125_phy_param(phydev, 0x828e, 0xffff, 0x0150);
+ rtl8125_phy_param(phydev, 0x81b1, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x824b, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x82e5, 0xffff, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x84f7, 0xff00, 0x2800);
+ phy_modify_paged(phydev, 0x0aec, 0x11, 0x0000, 0x1000);
+ rtl8125_phy_param(phydev, 0x81b3, 0xff00, 0xad00);
+ rtl8125_phy_param(phydev, 0x824d, 0xff00, 0xad00);
+ rtl8125_phy_param(phydev, 0x82e7, 0xff00, 0xad00);
+ phy_modify_paged(phydev, 0x0ae4, 0x17, 0x000f, 0x0001);
+ rtl8125_phy_param(phydev, 0x82ce, 0xf000, 0x4000);
+
+ rtl8125_phy_param(phydev, 0x84ac, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x84ae, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x84b0, 0xffff, 0xf818);
+ rtl8125_phy_param(phydev, 0x84b2, 0xff00, 0x6000);
+
+ rtl8125_phy_param(phydev, 0x8ffc, 0xffff, 0x6008);
+ rtl8125_phy_param(phydev, 0x8ffe, 0xffff, 0xf450);
+
+ rtl8125_phy_param(phydev, 0x8015, 0x0000, 0x0200);
+ rtl8125_phy_param(phydev, 0x8016, 0x0800, 0x0000);
+ rtl8125_phy_param(phydev, 0x8fe6, 0xff00, 0x0800);
+ rtl8125_phy_param(phydev, 0x8fe4, 0xffff, 0x2114);
+
+ rtl8125_phy_param(phydev, 0x8647, 0xffff, 0xa7b1);
+ rtl8125_phy_param(phydev, 0x8649, 0xffff, 0xbbca);
+ rtl8125_phy_param(phydev, 0x864b, 0xff00, 0xdc00);
+
+ rtl8125_phy_param(phydev, 0x8154, 0xc000, 0x4000);
+ rtl8125_phy_param(phydev, 0x8158, 0xc000, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x826c, 0xffff, 0xffff);
+ rtl8125_phy_param(phydev, 0x826e, 0xffff, 0xffff);
+
+ rtl8125_phy_param(phydev, 0x8872, 0xff00, 0x0e00);
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x0800);
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x4000);
+ phy_modify_paged(phydev, 0x0b57, 0x13, 0x0000, 0x0001);
+ r8168g_phy_param(phydev, 0x834a, 0xff00, 0x0700);
+ rtl8125_phy_param(phydev, 0x8217, 0x3f00, 0x2a00);
+ r8168g_phy_param(phydev, 0x81b1, 0xff00, 0x0b00);
+ rtl8125_phy_param(phydev, 0x8fed, 0xff00, 0x4e00);
+
+ rtl8125_phy_param(phydev, 0x88ac, 0xff00, 0x2300);
+ phy_modify_paged(phydev, 0x0bf0, 0x16, 0x0000, 0x3800);
+ rtl8125_phy_param(phydev, 0x88de, 0xff00, 0x0000);
+ rtl8125_phy_param(phydev, 0x80b4, 0xffff, 0x5195);
+
+ r8168g_phy_param(phydev, 0x8370, 0xffff, 0x8671);
+ r8168g_phy_param(phydev, 0x8372, 0xffff, 0x86c8);
+
+ r8168g_phy_param(phydev, 0x8401, 0xffff, 0x86c8);
+ r8168g_phy_param(phydev, 0x8403, 0xffff, 0x86da);
+ r8168g_phy_param(phydev, 0x8406, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8408, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x840a, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x840c, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x840e, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8410, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8412, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8414, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8416, 0x1800, 0x1000);
+
+ r8168g_phy_param(phydev, 0x82bd, 0xffff, 0x1f40);
+
+ phy_modify_paged(phydev, 0x0bfb, 0x12, 0x07ff, 0x0328);
+ phy_write_paged(phydev, 0x0bfb, 0x13, 0x3e14);
+
+ r8168g_phy_param(phydev, 0x81c4, 0xffff, 0x003b);
+ r8168g_phy_param(phydev, 0x81c6, 0xffff, 0x0086);
+ r8168g_phy_param(phydev, 0x81c8, 0xffff, 0x00b7);
+ r8168g_phy_param(phydev, 0x81ca, 0xffff, 0x00db);
+ r8168g_phy_param(phydev, 0x81cc, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81ce, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81d0, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81d2, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81d4, 0xffff, 0x00c3);
+ r8168g_phy_param(phydev, 0x81d6, 0xffff, 0x0078);
+ r8168g_phy_param(phydev, 0x81d8, 0xffff, 0x0047);
+ r8168g_phy_param(phydev, 0x81da, 0xffff, 0x0023);
+
+ rtl8125_phy_param(phydev, 0x88d7, 0xffff, 0x01a0);
+ rtl8125_phy_param(phydev, 0x88d9, 0xffff, 0x01a0);
+ rtl8125_phy_param(phydev, 0x8ffa, 0xffff, 0x002a);
+
+ rtl8125_phy_param(phydev, 0x8fee, 0xffff, 0xffdf);
+ rtl8125_phy_param(phydev, 0x8ff0, 0xffff, 0xffff);
+ rtl8125_phy_param(phydev, 0x8ff2, 0xffff, 0x0a4a);
+ rtl8125_phy_param(phydev, 0x8ff4, 0xffff, 0xaa5a);
+ rtl8125_phy_param(phydev, 0x8ff6, 0xffff, 0x0a4a);
+
+ rtl8125_phy_param(phydev, 0x8ff8, 0xffff, 0xaa5a);
+ rtl8125_phy_param(phydev, 0x88d5, 0xff00, 0x0200);
+
+ r8168g_phy_param(phydev, 0x84bb, 0xff00, 0x0a00);
+ r8168g_phy_param(phydev, 0x84c0, 0xff00, 0x1600);
+
+ phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x0003);
+
+ rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_common_config_eee_phy(phydev);
+}
+
void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
enum mac_version ver)
{
@@ -1180,14 +1341,12 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
- [RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
[RTL_GIGA_MAC_VER_64] = rtl8125d_hw_phy_config,
- [RTL_GIGA_MAC_VER_65] = rtl8125d_hw_phy_config,
[RTL_GIGA_MAC_VER_66] = rtl8125bp_hw_phy_config,
[RTL_GIGA_MAC_VER_70] = rtl8126a_hw_phy_config,
- [RTL_GIGA_MAC_VER_71] = rtl8126a_hw_phy_config,
+ [RTL_GIGA_MAC_VER_80] = rtl8127a_1_hw_phy_config,
};
if (phy_configs[ver])
diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
index 2bbfcad613ab..498cfe4d0cac 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase.h
+++ b/drivers/net/ethernet/realtek/rtase/rtase.h
@@ -170,6 +170,7 @@ enum rtase_registers {
#define RTASE_TC_MODE_MASK GENMASK(11, 10)
RTASE_TOKSEL = 0x2046,
+ RTASE_TXQCRDT_0 = 0x2500,
RTASE_RFIFONFULL = 0x4406,
RTASE_INT_MITI_TX = 0x0A00,
RTASE_INT_MITI_RX = 0x0A80,
@@ -259,6 +260,12 @@ union rtase_rx_desc {
#define RTASE_VLAN_TAG_MASK GENMASK(15, 0)
#define RTASE_RX_PKT_SIZE_MASK GENMASK(13, 0)
+/* txqos hardware definitions */
+#define RTASE_1T_CLOCK 64
+#define RTASE_1T_POWER 10000000
+#define RTASE_IDLESLOPE_INT_SHIFT 25
+#define RTASE_IDLESLOPE_INT_MASK GENMASK(31, 25)
+
#define RTASE_IVEC_NAME_SIZE (IFNAMSIZ + 10)
struct rtase_int_vector {
@@ -294,6 +301,13 @@ struct rtase_ring {
u64 alloc_fail;
};
+struct rtase_txqos {
+ int hicredit;
+ int locredit;
+ int idleslope;
+ int sendslope;
+};
+
struct rtase_stats {
u64 tx_dropped;
u64 rx_dropped;
@@ -313,6 +327,7 @@ struct rtase_private {
struct page_pool *page_pool;
struct rtase_ring tx_ring[RTASE_NUM_TX_QUEUE];
+ struct rtase_txqos tx_qos[RTASE_NUM_TX_QUEUE];
struct rtase_ring rx_ring[RTASE_NUM_RX_QUEUE];
struct rtase_counters *tally_vaddr;
dma_addr_t tally_paddr;
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index 55b8d3666153..4d37217e9a14 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -1114,7 +1114,7 @@ static int rtase_open(struct net_device *dev)
/* request other interrupts to handle multiqueue */
for (i = 1; i < tp->int_nums; i++) {
ivec = &tp->int_vector[i];
- snprintf(ivec->name, sizeof(ivec->name), "%s_int%i",
+ snprintf(ivec->name, sizeof(ivec->name), "%s_int%u",
tp->dev->name, i);
ret = request_irq(ivec->irq, rtase_q_interrupt, 0,
ivec->name, ivec);
@@ -1661,6 +1661,65 @@ static void rtase_get_stats64(struct net_device *dev,
stats->rx_length_errors = tp->stats.rx_length_errors;
}
+static void rtase_set_hw_cbs(const struct rtase_private *tp, u32 queue)
+{
+ u32 idle = tp->tx_qos[queue].idleslope * RTASE_1T_CLOCK;
+ u32 val, i;
+
+ val = u32_encode_bits(idle / RTASE_1T_POWER, RTASE_IDLESLOPE_INT_MASK);
+ idle %= RTASE_1T_POWER;
+
+ for (i = 1; i <= RTASE_IDLESLOPE_INT_SHIFT; i++) {
+ idle *= 2;
+ if ((idle / RTASE_1T_POWER) == 1)
+ val |= BIT(RTASE_IDLESLOPE_INT_SHIFT - i);
+
+ idle %= RTASE_1T_POWER;
+ }
+
+ rtase_w32(tp, RTASE_TXQCRDT_0 + queue * 4, val);
+}
+
+static int rtase_setup_tc_cbs(struct rtase_private *tp,
+ const struct tc_cbs_qopt_offload *qopt)
+{
+ int queue = qopt->queue;
+
+ if (queue < 0 || queue >= tp->func_tx_queue_num)
+ return -EINVAL;
+
+ if (!qopt->enable) {
+ tp->tx_qos[queue].hicredit = 0;
+ tp->tx_qos[queue].locredit = 0;
+ tp->tx_qos[queue].idleslope = 0;
+ tp->tx_qos[queue].sendslope = 0;
+
+ rtase_w32(tp, RTASE_TXQCRDT_0 + queue * 4, 0);
+ } else {
+ tp->tx_qos[queue].hicredit = qopt->hicredit;
+ tp->tx_qos[queue].locredit = qopt->locredit;
+ tp->tx_qos[queue].idleslope = qopt->idleslope;
+ tp->tx_qos[queue].sendslope = qopt->sendslope;
+
+ rtase_set_hw_cbs(tp, queue);
+ }
+
+ return 0;
+}
+
+static int rtase_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return rtase_setup_tc_cbs(tp, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static netdev_features_t rtase_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1696,6 +1755,7 @@ static const struct net_device_ops rtase_netdev_ops = {
.ndo_change_mtu = rtase_change_mtu,
.ndo_tx_timeout = rtase_tx_timeout,
.ndo_get_stats64 = rtase_get_stats64,
+ .ndo_setup_tc = rtase_setup_tc,
.ndo_fix_features = rtase_fix_features,
.ndo_set_features = rtase_set_features,
};
@@ -1923,7 +1983,7 @@ static u16 rtase_calc_time_mitigation(u32 time_us)
u8 msb, time_count, time_unit;
u16 int_miti;
- time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME);
+ time_us = min(time_us, RTASE_MITI_MAX_TIME);
if (time_us > RTASE_MITI_TIME_COUNT_MASK) {
msb = fls(time_us);
@@ -1945,7 +2005,7 @@ static u16 rtase_calc_packet_num_mitigation(u16 pkt_num)
u8 msb, pkt_num_count, pkt_num_unit;
u16 int_miti;
- pkt_num = min_t(int, pkt_num, RTASE_MITI_MAX_PKT_NUM);
+ pkt_num = min(pkt_num, RTASE_MITI_MAX_PKT_NUM);
if (pkt_num > 60) {
pkt_num_unit = RTASE_MITI_MAX_PKT_NUM_IDX;
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index b4365906669f..226c6c0ab945 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -176,12 +176,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
struct net_device *ndev = priv->ndev;
unsigned long flags;
- /* Reject requests with unsupported flags */
- if (req->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE))
- return -EOPNOTSUPP;
-
if (req->index)
return -EINVAL;
@@ -212,10 +206,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
unsigned long flags;
int error = 0;
- /* Reject requests with unsupported flags */
- if (req->flags)
- return -EOPNOTSUPP;
-
if (req->index)
return -EINVAL;
@@ -287,6 +277,7 @@ static const struct ptp_clock_info ravb_ptp_info = {
.max_adj = 50000000,
.n_ext_ts = N_EXT_TS,
.n_per_out = N_PER_OUT,
+ .supported_extts_flags = PTP_RISING_EDGE | PTP_FALLING_EDGE,
.adjfine = ravb_ptp_adjfine,
.adjtime = ravb_ptp_adjtime,
.gettime64 = ravb_ptp_gettime64,
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index d5db26103d82..61e50517c05b 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1933,7 +1933,7 @@ static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
/* Check if adding and already exists, or removing and can't find */
- if (!found != !removing) {
+ if (!found == removing) {
kfree(fdb);
if (!found && removing)
return 0;
@@ -1982,7 +1982,7 @@ err_out:
static void ofdpa_fdb_cleanup(struct timer_list *t)
{
- struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
+ struct ofdpa *ofdpa = timer_container_of(ofdpa, t, fdb_cleanup_timer);
struct ofdpa_port *ofdpa_port;
struct ofdpa_fdb_tbl_entry *entry;
struct hlist_node *tmp;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 36b63bf343a9..75bad561b352 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -104,7 +104,8 @@ void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
*/
static void sxgbe_eee_ctrl_timer(struct timer_list *t)
{
- struct sxgbe_priv_data *priv = from_timer(priv, t, eee_ctrl_timer);
+ struct sxgbe_priv_data *priv = timer_container_of(priv, t,
+ eee_ctrl_timer);
sxgbe_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
@@ -1012,7 +1013,7 @@ static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
*/
static void sxgbe_tx_timer(struct timer_list *t)
{
- struct sxgbe_tx_queue *p = from_timer(p, t, txtimer);
+ struct sxgbe_tx_queue *p = timer_container_of(p, t, txtimer);
sxgbe_tx_queue_clean(p);
}
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 1d65113fab76..20dad39b5ab9 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -170,7 +170,7 @@ ether3_setbuffer(struct net_device *dev, buffer_rw_t read, int start)
*/
static void ether3_ledoff(struct timer_list *t)
{
- struct dev_priv *private = from_timer(private, t, timer);
+ struct dev_priv *private = timer_container_of(private, t, timer);
struct net_device *dev = private->dev;
ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2);
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c
index b865275beb66..c44df8e4dd30 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon.c
@@ -1453,8 +1453,8 @@ static void falcon_stats_complete(struct ef4_nic *efx)
static void falcon_stats_timer_func(struct timer_list *t)
{
- struct falcon_nic_data *nic_data = from_timer(nic_data, t,
- stats_timer);
+ struct falcon_nic_data *nic_data = timer_container_of(nic_data, t,
+ stats_timer);
struct ef4_nic *efx = nic_data->efx;
spin_lock(&efx->stats_lock);
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 38ad7ac07726..f69fcf6caca8 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -382,7 +382,8 @@ void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
void ef4_rx_slow_fill(struct timer_list *t)
{
- struct ef4_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+ struct ef4_rx_queue *rx_queue = timer_container_of(rx_queue, t,
+ slow_fill);
/* Post an event to cause NAPI to run and refill the queue */
ef4_nic_generate_fill_event(rx_queue);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index dcef0588be96..5e9b8def5e42 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -605,7 +605,7 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
static void efx_mcdi_timeout_async(struct timer_list *t)
{
- struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
+ struct efx_mcdi_iface *mcdi = timer_container_of(mcdi, t, async_timer);
efx_mcdi_complete_async(mcdi, true);
}
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index 8eb272ba674b..f4f75299dfa9 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -352,7 +352,8 @@ void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
void efx_rx_slow_fill(struct timer_list *t)
{
- struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+ struct efx_rx_queue *rx_queue = timer_container_of(rx_queue, t,
+ slow_fill);
/* Post an event to cause NAPI to run and refill the queue */
efx_nic_generate_fill_event(rx_queue);
diff --git a/drivers/net/ethernet/sfc/siena/mcdi.c b/drivers/net/ethernet/sfc/siena/mcdi.c
index 99ab5f294691..c8f0fb43e285 100644
--- a/drivers/net/ethernet/sfc/siena/mcdi.c
+++ b/drivers/net/ethernet/sfc/siena/mcdi.c
@@ -609,7 +609,7 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
static void efx_mcdi_timeout_async(struct timer_list *t)
{
- struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
+ struct efx_mcdi_iface *mcdi = timer_container_of(mcdi, t, async_timer);
efx_mcdi_complete_async(mcdi, true);
}
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
index ab493e529d5c..98d27174015d 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.c
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -349,7 +349,8 @@ void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
void efx_siena_rx_slow_fill(struct timer_list *t)
{
- struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+ struct efx_rx_queue *rx_queue = timer_container_of(rx_queue, t,
+ slow_fill);
/* Post an event to cause NAPI to run and refill the queue */
efx_nic_generate_fill_event(rx_queue);
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 7196e1c607f3..39731069d99e 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -570,7 +570,7 @@ static inline void ioc3_setup_duplex(struct ioc3_private *ip)
static void ioc3_timer(struct timer_list *t)
{
- struct ioc3_private *ip = from_timer(ip, t, ioc3_timer);
+ struct ioc3_private *ip = timer_container_of(ip, t, ioc3_timer);
/* Print the link status if it has changed */
mii_check_media(&ip->mii, 1, 0);
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index d10b14787607..15e46e6ac262 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1023,7 +1023,7 @@ out_unlock:
static void sis190_phy_timer(struct timer_list *t)
{
- struct sis190_private *tp = from_timer(tp, t, timer);
+ struct sis190_private *tp = timer_container_of(tp, t, timer);
struct net_device *dev = tp->dev;
if (likely(netif_running(dev)))
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 332cbd725900..b461918dc5f4 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -468,7 +468,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
/* We do a request_region() to register /proc/ioports info. */
- ret = pci_request_regions(pci_dev, "sis900");
+ ret = pcim_request_all_regions(pci_dev, "sis900");
if (ret)
goto err_out;
@@ -1314,7 +1314,8 @@ static void sis630_set_eq(struct net_device *net_dev, u8 revision)
static void sis900_timer(struct timer_list *t)
{
- struct sis900_private *sis_priv = from_timer(sis_priv, t, timer);
+ struct sis900_private *sis_priv = timer_container_of(sis_priv, t,
+ timer);
struct net_device *net_dev = sis_priv->mii_info.dev;
struct mii_phy *mii_phy = sis_priv->mii;
static const int next_tick = 5*HZ;
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index ca0ab3a35b73..45f703fe0e5a 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -850,7 +850,7 @@ static void check_media(struct net_device *dev)
static void epic_timer(struct timer_list *t)
{
- struct epic_private *ep = from_timer(ep, t, timer);
+ struct epic_private *ep = timer_container_of(ep, t, timer);
struct net_device *dev = ep->mii.dev;
void __iomem *ioaddr = ep->ioaddr;
int next_tick = 5*HZ;
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 6fa957fb523b..cc0c75694351 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1713,7 +1713,7 @@ static void smc_reset(struct net_device *dev)
static void media_check(struct timer_list *t)
{
- struct smc_private *smc = from_timer(smc, t, media);
+ struct smc_private *smc = timer_container_of(smc, t, media);
struct net_device *dev = smc->mii_if.dev;
unsigned int ioaddr = dev->base_addr;
u_short i, media, saved_bank;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 3c820ef56775..67fa879b1e52 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -3,6 +3,7 @@ config STMMAC_ETH
tristate "STMicroelectronics Multi-Gigabit Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on ETHTOOL_NETLINK
select MII
select PCS_XPCS
select PAGE_POOL
@@ -131,6 +132,17 @@ config DWMAC_QCOM_ETHQOS
This selects the Qualcomm ETHQOS glue layer support for the
stmmac device driver.
+config DWMAC_RENESAS_GBETH
+ tristate "Renesas RZ/V2H(P) GBETH support"
+ default ARCH_RENESAS
+ depends on OF && (ARCH_RENESAS || COMPILE_TEST)
+ help
+ Support for Gigabit Ethernet Interface (GBETH) on Renesas
+ RZ/V2H(P) SoCs.
+
+ This selects the Renesas RZ/V2H(P) Soc specific glue layer support
+ for the stmmac device driver.
+
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
default ARCH_ROCKCHIP
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 594883fb4164..b591d93f8503 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -6,7 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \
stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \
- stmmac_xdp.o stmmac_est.o stmmac_fpe.o \
+ stmmac_xdp.o stmmac_est.o stmmac_fpe.o stmmac_vlan.o \
$(stmmac-y)
stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o
@@ -20,6 +20,7 @@ obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o
+obj-$(CONFIG_DWMAC_RENESAS_GBETH) += dwmac-renesas-gbeth.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o
obj-$(CONFIG_DWMAC_S32) += dwmac-s32.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 412b07e77945..ea5da5793362 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -602,6 +602,7 @@ struct mac_device_info {
const struct stmmac_tc_ops *tc;
const struct stmmac_mmc_ops *mmc;
const struct stmmac_est_ops *est;
+ const struct stmmac_vlan_ops *vlan;
struct dw_xpcs *xpcs;
struct phylink_pcs *phylink_pcs;
struct mii_regs mii; /* MII register Addresses */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 37fe7c288878..84072c8ed741 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -65,13 +65,12 @@ anarion_config_dt(struct platform_device *pdev,
{
struct anarion_gmac *gmac;
void __iomem *ctl_block;
- int err;
ctl_block = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctl_block)) {
- err = PTR_ERR(ctl_block);
- dev_err(&pdev->dev, "Cannot get reset region (%d)!\n", err);
- return ERR_PTR(err);
+ dev_err(&pdev->dev, "Cannot get reset region (%pe)!\n",
+ ctl_block);
+ return ERR_CAST(ctl_block);
}
gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL);
@@ -80,17 +79,11 @@ anarion_config_dt(struct platform_device *pdev,
gmac->ctl_block = ctl_block;
- switch (plat_dat->phy_interface) {
- case PHY_INTERFACE_MODE_RGMII:
- fallthrough;
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (phy_interface_mode_is_rgmii(plat_dat->phy_interface)) {
gmac->phy_intf_sel = GMAC_CONFIG_INTF_RGMII;
- break;
- default:
- dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n",
- plat_dat->phy_interface);
+ } else {
+ dev_err(&pdev->dev, "Unsupported phy-mode (%s)\n",
+ phy_modes(plat_dat->phy_interface));
return ERR_PTR(-ENOTSUPP);
}
@@ -118,10 +111,9 @@ static int anarion_dwmac_probe(struct platform_device *pdev)
plat_dat->init = anarion_gmac_init;
plat_dat->exit = anarion_gmac_exit;
- anarion_gmac_init(pdev, gmac);
plat_dat->bsp_priv = gmac;
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
static const struct of_device_id anarion_dwmac_match[] = {
@@ -132,7 +124,6 @@ MODULE_DEVICE_TABLE(of, anarion_dwmac_match);
static struct platform_driver anarion_dwmac_driver = {
.probe = anarion_dwmac_probe,
- .remove = stmmac_pltfr_remove,
.driver = {
.name = "anarion-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index cd431f84f34f..09ae16e026eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -29,21 +29,10 @@ struct tegra_eqos {
void __iomem *regs;
struct reset_control *rst;
- struct clk *clk_slave;
struct gpio_desc *reset;
};
-static struct clk *dwc_eth_find_clk(struct plat_stmmacenet_data *plat_dat,
- const char *name)
-{
- for (int i = 0; i < plat_dat->num_clks; i++)
- if (strcmp(plat_dat->clks[i].id, name) == 0)
- return plat_dat->clks[i].clk;
-
- return NULL;
-}
-
static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat)
{
@@ -132,7 +121,7 @@ static int dwc_qos_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *stmmac_res)
{
- plat_dat->pclk = dwc_eth_find_clk(plat_dat, "phy_ref_clk");
+ plat_dat->pclk = stmmac_pltfr_find_clk(plat_dat, "phy_ref_clk");
return 0;
}
@@ -147,10 +136,11 @@ static int dwc_qos_probe(struct platform_device *pdev,
#define AUTO_CAL_STATUS 0x880c
#define AUTO_CAL_STATUS_ACTIVE BIT(31)
-static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode)
+static void tegra_eqos_fix_speed(void *bsp_priv, int speed, unsigned int mode)
{
- struct tegra_eqos *eqos = priv;
+ struct tegra_eqos *eqos = bsp_priv;
bool needs_calibration = false;
+ struct stmmac_priv *priv;
u32 value;
int err;
@@ -169,6 +159,11 @@ static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode)
}
if (needs_calibration) {
+ priv = netdev_priv(dev_get_drvdata(eqos->dev));
+
+ /* Calibration should be done with the MDIO bus idle */
+ mutex_lock(&priv->mii->mdio_lock);
+
/* calibrate */
value = readl(eqos->regs + SDMEMCOMPPADCTRL);
value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
@@ -202,6 +197,8 @@ static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode)
value = readl(eqos->regs + SDMEMCOMPPADCTRL);
value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
writel(value, eqos->regs + SDMEMCOMPPADCTRL);
+
+ mutex_unlock(&priv->mii->mdio_lock);
} else {
value = readl(eqos->regs + AUTO_CAL_CONFIG);
value &= ~AUTO_CAL_CONFIG_ENABLE;
@@ -209,20 +206,6 @@ static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode)
}
}
-static int tegra_eqos_init(struct platform_device *pdev, void *priv)
-{
- struct tegra_eqos *eqos = priv;
- unsigned long rate;
- u32 value;
-
- rate = clk_get_rate(eqos->clk_slave);
-
- value = (rate / 1000000) - 1;
- writel(value, eqos->regs + GMAC_1US_TIC_COUNTER);
-
- return 0;
-}
-
static int tegra_eqos_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res)
@@ -237,12 +220,11 @@ static int tegra_eqos_probe(struct platform_device *pdev,
eqos->dev = &pdev->dev;
eqos->regs = res->addr;
- eqos->clk_slave = plat_dat->stmmac_clk;
if (!is_of_node(dev->fwnode))
goto bypass_clk_reset_gpio;
- plat_dat->clk_tx_i = dwc_eth_find_clk(plat_dat, "tx");
+ plat_dat->clk_tx_i = stmmac_pltfr_find_clk(plat_dat, "tx");
eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
if (IS_ERR(eqos->reset)) {
@@ -277,17 +259,12 @@ static int tegra_eqos_probe(struct platform_device *pdev,
bypass_clk_reset_gpio:
plat_dat->fix_mac_speed = tegra_eqos_fix_speed;
plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
- plat_dat->init = tegra_eqos_init;
plat_dat->bsp_priv = eqos;
- plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
-
- err = tegra_eqos_init(pdev, eqos);
- if (err < 0)
- goto reset;
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE |
+ STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
return 0;
-reset:
- reset_control_assert(eqos->rst);
+
reset_phy:
gpiod_set_value(eqos->reset, 1);
@@ -362,8 +339,8 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(&pdev->dev, ret, "Failed to enable clocks\n");
- plat_dat->stmmac_clk = dwc_eth_find_clk(plat_dat,
- data->stmmac_clk_name);
+ plat_dat->stmmac_clk = stmmac_pltfr_find_clk(plat_dat,
+ data->stmmac_clk_name);
if (data->probe)
ret = data->probe(pdev, plat_dat, &stmmac_res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 5d279fa54b3e..889e2bb6f7f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -379,10 +379,6 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = imx_dwmac_init(pdev, dwmac);
- if (ret)
- goto err_dwmac_init;
-
if (dwmac->ops->fix_mac_speed) {
plat_dat->fix_mac_speed = dwmac->ops->fix_mac_speed;
} else if (!dwmac->ops->mac_rgmii_txclk_auto_adj) {
@@ -392,16 +388,10 @@ static int imx_dwmac_probe(struct platform_device *pdev)
dwmac->plat_dat->fix_soc_reset = dwmac->ops->fix_soc_reset;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
if (ret)
- goto err_drv_probe;
-
- return 0;
+ imx_dwmac_clks_config(dwmac, false);
-err_drv_probe:
- imx_dwmac_exit(pdev, plat_dat->bsp_priv);
-err_dwmac_init:
- imx_dwmac_clks_config(dwmac, false);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
index 066783d66422..15abe214131f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -56,6 +56,7 @@ enum ingenic_mac_version {
struct ingenic_mac {
const struct ingenic_soc_info *soc_info;
+ struct plat_stmmacenet_data *plat_dat;
struct device *dev;
struct regmap *regmap;
@@ -70,13 +71,13 @@ struct ingenic_soc_info {
int (*set_mode)(struct plat_stmmacenet_data *plat_dat);
};
-static int ingenic_mac_init(struct plat_stmmacenet_data *plat_dat)
+static int ingenic_mac_init(struct platform_device *pdev, void *bsp_priv)
{
- struct ingenic_mac *mac = plat_dat->bsp_priv;
+ struct ingenic_mac *mac = bsp_priv;
int ret;
if (mac->soc_info->set_mode) {
- ret = mac->soc_info->set_mode(plat_dat);
+ ret = mac->soc_info->set_mode(mac->plat_dat);
if (ret)
return ret;
}
@@ -284,44 +285,14 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->soc_info = data;
mac->dev = &pdev->dev;
+ mac->plat_dat = plat_dat;
plat_dat->bsp_priv = mac;
+ plat_dat->init = ingenic_mac_init;
- ret = ingenic_mac_init(plat_dat);
- if (ret)
- return ret;
-
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int ingenic_mac_suspend(struct device *dev)
-{
- int ret;
-
- ret = stmmac_suspend(dev);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-static int ingenic_mac_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- int ret;
-
- ret = ingenic_mac_init(priv->plat);
- if (ret)
- return ret;
-
- ret = stmmac_resume(dev);
-
- return ret;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(ingenic_mac_pm_ops, ingenic_mac_suspend, ingenic_mac_resume);
-
static struct ingenic_soc_info jz4775_soc_info = {
.version = ID_JZ4775,
.mask = MACPHYC_TXCLK_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
@@ -370,10 +341,9 @@ MODULE_DEVICE_TABLE(of, ingenic_mac_of_matches);
static struct platform_driver ingenic_mac_driver = {
.probe = ingenic_mac_probe,
- .remove = stmmac_pltfr_remove,
.driver = {
.name = "ingenic-mac",
- .pm = pm_ptr(&ingenic_mac_pm_ops),
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = ingenic_mac_of_matches,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index 599def7b3a64..4ea7b0a803d7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -113,16 +113,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
plat_dat->clk_tx_i = dwmac->tx_clk;
plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
-
plat_dat->bsp_priv = dwmac;
- plat_dat->eee_usecs_rate = plat_dat->clk_ptp_rate;
-
- if (plat_dat->eee_usecs_rate > 0) {
- u32 tx_lpi_usec;
-
- tx_lpi_usec = (plat_dat->eee_usecs_rate / 1000000) - 1;
- writel(tx_lpi_usec, stmmac_res.addr + GMAC_1US_TIC_COUNTER);
- }
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index c8bb9265bbb4..9a47015254bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -284,28 +284,28 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
}
}
-static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
+static void tgl_get_interfaces(struct stmmac_priv *priv, void *bsp_priv,
+ unsigned long *interfaces)
{
- struct intel_priv_data *intel_priv = intel_data;
- struct stmmac_priv *priv = netdev_priv(ndev);
- int serdes_phy_addr = 0;
- u32 data = 0;
-
- serdes_phy_addr = intel_priv->mdio_adhoc_addr;
+ struct intel_priv_data *intel_priv = bsp_priv;
+ phy_interface_t interface;
+ int data;
/* Determine the link speed mode: 2.5Gbps/1Gbps */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR);
+ data = mdiobus_read(priv->mii, intel_priv->mdio_adhoc_addr, SERDES_GCR);
+ if (data < 0)
+ return;
- if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) ==
- SERDES_LINK_MODE_2G5) {
+ if (FIELD_GET(SERDES_LINK_MODE_MASK, data) == SERDES_LINK_MODE_2G5) {
dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
- priv->plat->max_speed = 2500;
- priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
priv->plat->mdio_bus_data->default_an_inband = false;
+ interface = PHY_INTERFACE_MODE_2500BASEX;
} else {
- priv->plat->max_speed = 1000;
+ interface = PHY_INTERFACE_MODE_SGMII;
}
+
+ __set_bit(interface, interfaces);
+ priv->plat->phy_interface = interface;
}
/* Program PTP Clock Frequency for different variant of
@@ -682,7 +682,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->axi->axi_blen[2] = 16;
plat->ptp_max_adj = plat->clk_ptp_rate;
- plat->eee_usecs_rate = plat->clk_ptp_rate;
/* Set system clock */
sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
@@ -933,7 +932,7 @@ static int tgl_common_data(struct pci_dev *pdev,
plat->rx_queues_to_use = 6;
plat->tx_queues_to_use = 4;
plat->clk_ptp_rate = 204800000;
- plat->speed_mode_2500 = intel_speed_mode_2500;
+ plat->get_interfaces = tgl_get_interfaces;
plat->safety_feat_cfg->tsoee = 1;
plat->safety_feat_cfg->mrxpee = 0;
@@ -952,7 +951,6 @@ static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -966,7 +964,6 @@ static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -980,7 +977,6 @@ static int adls_sgmii_phy0_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
/* SerDes power up and power down are done in BIOS for ADL */
@@ -995,7 +991,6 @@ static int adls_sgmii_phy1_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
/* SerDes power up and power down are done in BIOS for ADL */
@@ -1313,13 +1308,6 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
memset(&res, 0, sizeof(res));
res.addr = pcim_iomap_table(pdev)[0];
- if (plat->eee_usecs_rate > 0) {
- u32 tx_lpi_usec;
-
- tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
- writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
- }
-
ret = stmmac_config_multi_msi(pdev, plat, &res);
if (ret) {
ret = stmmac_config_single_msi(pdev, plat, &res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
index a12f8e65f89f..7511c224b312 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
@@ -21,7 +21,6 @@
#define SERDES_RATE_MASK GENMASK(9, 8)
#define SERDES_PCLK_MASK GENMASK(14, 12) /* PCLK rate to PHY */
#define SERDES_LINK_MODE_MASK GENMASK(2, 1)
-#define SERDES_LINK_MODE_SHIFT 1
#define SERDES_PWR_ST_SHIFT 4
#define SERDES_PWR_ST_P0 0x0
#define SERDES_PWR_ST_P3 0x3
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index 1a93787056a7..e1591e6217d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -66,12 +66,14 @@
DMA_STATUS_TPS | DMA_STATUS_TI | \
DMA_STATUS_MSK_COMMON_LOONGSON)
-#define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03
+#define PCI_DEVICE_ID_LOONGSON_GMAC1 0x7a03
+#define PCI_DEVICE_ID_LOONGSON_GMAC2 0x7a23
#define PCI_DEVICE_ID_LOONGSON_GNET 0x7a13
-#define DWMAC_CORE_LS_MULTICHAN 0x10 /* Loongson custom ID */
-#define CHANNEL_NUM 8
+#define DWMAC_CORE_MULTICHAN_V1 0x10 /* Loongson custom ID 0x10 */
+#define DWMAC_CORE_MULTICHAN_V2 0x12 /* Loongson custom ID 0x12 */
struct loongson_data {
+ u32 multichan;
u32 loongson_id;
struct device *dev;
};
@@ -83,6 +85,8 @@ struct stmmac_pci_info {
static void loongson_default_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct loongson_data *ld = plat->bsp_priv;
+
/* Get bus_id, this can be overwritten later */
plat->bus_id = pci_dev_id(pdev);
@@ -116,31 +120,37 @@ static void loongson_default_data(struct pci_dev *pdev,
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
-}
-
-static int loongson_gmac_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- struct loongson_data *ld;
- int i;
-
- ld = plat->bsp_priv;
- loongson_default_data(pdev, plat);
-
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
- plat->rx_queues_to_use = CHANNEL_NUM;
- plat->tx_queues_to_use = CHANNEL_NUM;
+ switch (ld->loongson_id) {
+ case DWMAC_CORE_MULTICHAN_V1:
+ ld->multichan = 1;
+ plat->rx_queues_to_use = 8;
+ plat->tx_queues_to_use = 8;
/* Only channel 0 supports checksum,
* so turn off checksum to enable multiple channels.
*/
- for (i = 1; i < CHANNEL_NUM; i++)
+ for (int i = 1; i < 8; i++)
plat->tx_queues_cfg[i].coe_unsupported = 1;
- } else {
+
+ break;
+ case DWMAC_CORE_MULTICHAN_V2:
+ ld->multichan = 1;
+ plat->rx_queues_to_use = 4;
+ plat->tx_queues_to_use = 4;
+ break;
+ default:
+ ld->multichan = 0;
plat->tx_queues_to_use = 1;
plat->rx_queues_to_use = 1;
+ break;
}
+}
+
+static int loongson_gmac_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ loongson_default_data(pdev, plat);
plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
@@ -172,27 +182,8 @@ static void loongson_gnet_fix_speed(void *priv, int speed, unsigned int mode)
static int loongson_gnet_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
- struct loongson_data *ld;
- int i;
-
- ld = plat->bsp_priv;
-
loongson_default_data(pdev, plat);
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
- plat->rx_queues_to_use = CHANNEL_NUM;
- plat->tx_queues_to_use = CHANNEL_NUM;
-
- /* Only channel 0 supports checksum,
- * so turn off checksum to enable multiple channels.
- */
- for (i = 1; i < CHANNEL_NUM; i++)
- plat->tx_queues_cfg[i].coe_unsupported = 1;
- } else {
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
- }
-
plat->phy_interface = PHY_INTERFACE_MODE_GMII;
plat->mdio_bus_data->phy_mask = ~(u32)BIT(2);
plat->fix_mac_speed = loongson_gnet_fix_speed;
@@ -350,14 +341,14 @@ static struct mac_device_info *loongson_dwmac_setup(void *apriv)
return NULL;
/* The Loongson GMAC and GNET devices are based on the DW GMAC
- * v3.50a and v3.73a IP-cores. But the HW designers have changed the
- * GMAC_VERSION.SNPSVER field to the custom 0x10 value on the
- * network controllers with the multi-channels feature
+ * v3.50a and v3.73a IP-cores. But the HW designers have changed
+ * the GMAC_VERSION.SNPSVER field to the custom 0x10/0x12 value
+ * on the network controllers with the multi-channels feature
* available to emphasize the differences: multiple DMA-channels,
* AV feature and GMAC_INT_STATUS CSR flags layout. Get back the
* original value so the correct HW-interface would be selected.
*/
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
+ if (ld->multichan) {
priv->synopsys_id = DWMAC_CORE_3_70;
*dma = dwmac1000_dma_ops;
dma->init_chan = loongson_dwmac_dma_init_channel;
@@ -378,13 +369,13 @@ static struct mac_device_info *loongson_dwmac_setup(void *apriv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
- /* Loongson GMAC doesn't support the flow control. LS2K2000
- * GNET doesn't support the half-duplex link mode.
+ /* Loongson GMAC doesn't support the flow control. Loongson GNET
+ * without multi-channel doesn't support the half-duplex link mode.
*/
- if (pdev->device == PCI_DEVICE_ID_LOONGSON_GMAC) {
+ if (pdev->device != PCI_DEVICE_ID_LOONGSON_GNET) {
mac->link.caps = MAC_10 | MAC_100 | MAC_1000;
} else {
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ if (ld->multichan)
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000;
else
@@ -413,9 +404,11 @@ static int loongson_dwmac_msi_config(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat,
struct stmmac_resources *res)
{
- int i, ret, vecs;
+ int i, ch_num, ret, vecs;
+
+ ch_num = min(plat->tx_queues_to_use, plat->rx_queues_to_use);
- vecs = roundup_pow_of_two(CHANNEL_NUM * 2 + 1);
+ vecs = roundup_pow_of_two(ch_num * 2 + 1);
ret = pci_alloc_irq_vectors(pdev, vecs, vecs, PCI_IRQ_MSI);
if (ret < 0) {
dev_warn(&pdev->dev, "Failed to allocate MSI IRQs\n");
@@ -424,14 +417,12 @@ static int loongson_dwmac_msi_config(struct pci_dev *pdev,
res->irq = pci_irq_vector(pdev, 0);
- for (i = 0; i < plat->rx_queues_to_use; i++) {
- res->rx_irq[CHANNEL_NUM - 1 - i] =
- pci_irq_vector(pdev, 1 + i * 2);
+ for (i = 0; i < ch_num; i++) {
+ res->rx_irq[ch_num - 1 - i] = pci_irq_vector(pdev, 1 + i * 2);
}
- for (i = 0; i < plat->tx_queues_to_use; i++) {
- res->tx_irq[CHANNEL_NUM - 1 - i] =
- pci_irq_vector(pdev, 2 + i * 2);
+ for (i = 0; i < ch_num; i++) {
+ res->tx_irq[ch_num - 1 - i] = pci_irq_vector(pdev, 2 + i * 2);
}
plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
@@ -593,7 +584,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
goto err_disable_device;
/* Use the common MAC IRQ if per-channel MSIs allocation failed */
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ if (ld->multichan)
loongson_dwmac_msi_config(pdev, plat, &res);
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
@@ -605,7 +596,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
err_plat_clear:
if (dev_of_node(&pdev->dev))
loongson_dwmac_dt_clear(pdev, plat);
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ if (ld->multichan)
loongson_dwmac_msi_clear(pdev);
err_disable_device:
pci_disable_device(pdev);
@@ -624,7 +615,7 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
if (dev_of_node(&pdev->dev))
loongson_dwmac_dt_clear(pdev, priv->plat);
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ if (ld->multichan)
loongson_dwmac_msi_clear(pdev);
pci_disable_device(pdev);
@@ -669,7 +660,8 @@ static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend,
loongson_dwmac_resume);
static const struct pci_device_id loongson_dwmac_id_table[] = {
- { PCI_DEVICE_DATA(LOONGSON, GMAC, &loongson_gmac_pci_info) },
+ { PCI_DEVICE_DATA(LOONGSON, GMAC1, &loongson_gmac_pci_info) },
+ { PCI_DEVICE_DATA(LOONGSON, GMAC2, &loongson_gmac_pci_info) },
{ PCI_DEVICE_DATA(LOONGSON, GNET, &loongson_gnet_pci_info) },
{}
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index d178d5ddc7c7..39421d6a34e4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -581,7 +581,6 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
int i;
priv_plat->phy_mode = plat->phy_interface;
- plat->mac_interface = priv_plat->phy_mode;
if (priv_plat->mac_wol)
plat->flags &= ~STMMAC_FLAG_USE_PHY_WOL;
else
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 0e4da216f942..e30bdf72331a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -106,12 +106,11 @@ struct qcom_ethqos {
struct platform_device *pdev;
void __iomem *rgmii_base;
void __iomem *mac_base;
- int (*configure_func)(struct qcom_ethqos *ethqos);
+ int (*configure_func)(struct qcom_ethqos *ethqos, int speed);
unsigned int link_clk_rate;
struct clk *link_clk;
struct phy *serdes_phy;
- int speed;
int serdes_speed;
phy_interface_t phy_mode;
@@ -385,7 +384,7 @@ static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
return 0;
}
-static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
+static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed)
{
struct device *dev = &ethqos->pdev->dev;
int phase_shift;
@@ -412,7 +411,7 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, RGMII_CONFIG_INTF_SEL,
0, RGMII_IO_MACRO_CONFIG);
- switch (ethqos->speed) {
+ switch (speed) {
case SPEED_1000:
rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
@@ -532,14 +531,14 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
loopback, RGMII_IO_MACRO_CONFIG);
break;
default:
- dev_err(dev, "Invalid speed %d\n", ethqos->speed);
+ dev_err(dev, "Invalid speed %d\n", speed);
return -EINVAL;
}
return 0;
}
-static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
+static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed)
{
struct device *dev = &ethqos->pdev->dev;
volatile unsigned int dll_lock;
@@ -562,7 +561,7 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
if (ethqos->has_emac_ge_3) {
- if (ethqos->speed == SPEED_1000) {
+ if (speed == SPEED_1000) {
rgmii_writel(ethqos, 0x1800000, SDCC_TEST_CTL);
rgmii_writel(ethqos, 0x2C010800, SDCC_USR_CTL);
rgmii_writel(ethqos, 0xA001, SDCC_HC_REG_DLL_CONFIG2);
@@ -580,7 +579,7 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, 0,
SDCC_HC_REG_DLL_CONFIG);
- if (ethqos->speed != SPEED_100 && ethqos->speed != SPEED_10) {
+ if (speed != SPEED_100 && speed != SPEED_10) {
/* Set DLL_EN */
rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
@@ -607,10 +606,10 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
dev_err(dev, "Timeout while waiting for DLL lock\n");
}
- if (ethqos->speed == SPEED_1000)
+ if (speed == SPEED_1000)
ethqos_dll_configure(ethqos);
- ethqos_rgmii_macro_init(ethqos);
+ ethqos_rgmii_macro_init(ethqos, speed);
return 0;
}
@@ -626,7 +625,7 @@ static void ethqos_set_serdes_speed(struct qcom_ethqos *ethqos, int speed)
/* On interface toggle MAC registers gets reset.
* Configure MAC block for SGMII on ethernet phy link up
*/
-static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
+static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed)
{
struct net_device *dev = platform_get_drvdata(ethqos->pdev);
struct stmmac_priv *priv = netdev_priv(dev);
@@ -634,7 +633,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
val = readl(ethqos->mac_base + MAC_CTRL_REG);
- switch (ethqos->speed) {
+ switch (speed) {
case SPEED_2500:
val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
@@ -673,17 +672,9 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
return val;
}
-static void qcom_ethqos_speed_mode_2500(struct net_device *ndev, void *data)
+static int ethqos_configure(struct qcom_ethqos *ethqos, int speed)
{
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- priv->plat->max_speed = 2500;
- priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
-}
-
-static int ethqos_configure(struct qcom_ethqos *ethqos)
-{
- return ethqos->configure_func(ethqos);
+ return ethqos->configure_func(ethqos, speed);
}
static void ethqos_fix_mac_speed(void *priv, int speed, unsigned int mode)
@@ -691,9 +682,8 @@ static void ethqos_fix_mac_speed(void *priv, int speed, unsigned int mode)
struct qcom_ethqos *ethqos = priv;
qcom_ethqos_set_sgmii_loopback(ethqos, false);
- ethqos->speed = speed;
ethqos_update_link_clk(ethqos, speed);
- ethqos_configure(ethqos);
+ ethqos_configure(ethqos, speed);
}
static int qcom_ethqos_serdes_powerup(struct net_device *ndev, void *priv)
@@ -709,7 +699,7 @@ static int qcom_ethqos_serdes_powerup(struct net_device *ndev, void *priv)
if (ret)
return ret;
- return phy_set_speed(ethqos->serdes_phy, ethqos->speed);
+ return phy_set_speed(ethqos->serdes_phy, ethqos->serdes_speed);
}
static void qcom_ethqos_serdes_powerdown(struct net_device *ndev, void *priv)
@@ -803,8 +793,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
ethqos->configure_func = ethqos_configure_rgmii;
break;
case PHY_INTERFACE_MODE_2500BASEX:
- plat_dat->speed_mode_2500 = qcom_ethqos_speed_mode_2500;
- fallthrough;
case PHY_INTERFACE_MODE_SGMII:
ethqos->configure_func = ethqos_configure_sgmii;
break;
@@ -847,7 +835,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(ethqos->serdes_phy),
"Failed to get serdes phy\n");
- ethqos->speed = SPEED_1000;
ethqos->serdes_speed = SPEED_1000;
ethqos_update_link_clk(ethqos, SPEED_1000);
ethqos_set_func_clk_en(ethqos);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
new file mode 100644
index 000000000000..9a774046455b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * dwmac-renesas-gbeth.c - DWMAC Specific Glue layer for Renesas GBETH
+ *
+ * The Rx and Tx clocks are supplied as follows for the GBETH IP.
+ *
+ * Rx / Tx
+ * -------+------------- on / off -------
+ * |
+ * | Rx-180 / Tx-180
+ * +---- not ---- on / off -------
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "stmmac_platform.h"
+
+struct renesas_gbeth {
+ struct plat_stmmacenet_data *plat_dat;
+ struct reset_control *rstc;
+ struct device *dev;
+};
+
+static const char *const renesas_gbeth_clks[] = {
+ "tx", "tx-180", "rx", "rx-180",
+};
+
+static int renesas_gbeth_init(struct platform_device *pdev, void *priv)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct renesas_gbeth *gbeth = priv;
+ int ret;
+
+ plat_dat = gbeth->plat_dat;
+
+ ret = reset_control_deassert(gbeth->rstc);
+ if (ret) {
+ dev_err(gbeth->dev, "Reset deassert failed\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(plat_dat->num_clks,
+ plat_dat->clks);
+ if (ret)
+ reset_control_assert(gbeth->rstc);
+
+ return ret;
+}
+
+static void renesas_gbeth_exit(struct platform_device *pdev, void *priv)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct renesas_gbeth *gbeth = priv;
+ int ret;
+
+ plat_dat = gbeth->plat_dat;
+
+ clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
+
+ ret = reset_control_assert(gbeth->rstc);
+ if (ret)
+ dev_err(gbeth->dev, "Reset assert failed\n");
+}
+
+static int renesas_gbeth_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ struct renesas_gbeth *gbeth;
+ unsigned int i;
+ int err;
+
+ err = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to get resources\n");
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return dev_err_probe(dev, PTR_ERR(plat_dat),
+ "dt configuration failed\n");
+
+ gbeth = devm_kzalloc(dev, sizeof(*gbeth), GFP_KERNEL);
+ if (!gbeth)
+ return -ENOMEM;
+
+ plat_dat->num_clks = ARRAY_SIZE(renesas_gbeth_clks);
+ plat_dat->clks = devm_kcalloc(dev, plat_dat->num_clks,
+ sizeof(*plat_dat->clks), GFP_KERNEL);
+ if (!plat_dat->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < plat_dat->num_clks; i++)
+ plat_dat->clks[i].id = renesas_gbeth_clks[i];
+
+ err = devm_clk_bulk_get(dev, plat_dat->num_clks, plat_dat->clks);
+ if (err < 0)
+ return err;
+
+ plat_dat->clk_tx_i = stmmac_pltfr_find_clk(plat_dat, "tx");
+ if (!plat_dat->clk_tx_i)
+ return dev_err_probe(dev, -EINVAL,
+ "error finding tx clock\n");
+
+ gbeth->rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(gbeth->rstc))
+ return PTR_ERR(gbeth->rstc);
+
+ gbeth->dev = dev;
+ gbeth->plat_dat = plat_dat;
+ plat_dat->bsp_priv = gbeth;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->init = renesas_gbeth_init;
+ plat_dat->exit = renesas_gbeth_exit;
+ plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY |
+ STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP |
+ STMMAC_FLAG_SPH_DISABLE;
+
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
+}
+
+static const struct of_device_id renesas_gbeth_match[] = {
+ { .compatible = "renesas,rzv2h-gbeth", },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, renesas_gbeth_match);
+
+static struct platform_driver renesas_gbeth_driver = {
+ .probe = renesas_gbeth_probe,
+ .driver = {
+ .name = "renesas-gbeth",
+ .of_match_table = renesas_gbeth_match,
+ },
+};
+module_platform_driver(renesas_gbeth_driver);
+
+MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas GBETH DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 116855658559..72b50f6d72f4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -50,6 +50,7 @@ struct socfpga_dwmac {
u32 reg_offset;
u32 reg_shift;
struct device *dev;
+ struct plat_stmmacenet_data *plat_dat;
struct regmap *sys_mgr_base_addr;
struct reset_control *stmmac_rst;
struct reset_control *stmmac_ocp_rst;
@@ -58,17 +59,15 @@ struct socfpga_dwmac {
void __iomem *sgmii_adapter_base;
bool f2h_ptp_ref_clk;
const struct socfpga_dwmac_ops *ops;
- struct mdio_device *pcs_mdiodev;
};
-static void socfpga_dwmac_fix_mac_speed(void *priv, int speed, unsigned int mode)
+static void socfpga_dwmac_fix_mac_speed(void *bsp_priv, int speed,
+ unsigned int mode)
{
- struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
+ struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)bsp_priv;
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dwmac->dev));
void __iomem *splitter_base = dwmac->splitter_base;
void __iomem *sgmii_adapter_base = dwmac->sgmii_adapter_base;
- struct device *dev = dwmac->dev;
- struct net_device *ndev = dev_get_drvdata(dev);
- struct phy_device *phy_dev = ndev->phydev;
u32 val;
if (sgmii_adapter_base)
@@ -95,7 +94,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, int speed, unsigned int mode
writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
}
- if (phy_dev && sgmii_adapter_base)
+ if ((priv->plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ priv->plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) &&
+ sgmii_adapter_base)
writew(SGMII_ADAPTER_ENABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
}
@@ -233,10 +234,7 @@ err_node_put:
static int socfpga_get_plat_phymode(struct socfpga_dwmac *dwmac)
{
- struct net_device *ndev = dev_get_drvdata(dwmac->dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- return priv->plat->mac_interface;
+ return dwmac->plat_dat->mac_interface;
}
static void socfpga_sgmii_config(struct socfpga_dwmac *dwmac, bool enable)
@@ -258,6 +256,7 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val)
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
*val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
break;
case PHY_INTERFACE_MODE_RMII:
@@ -435,6 +434,13 @@ static struct phylink_pcs *socfpga_dwmac_select_pcs(struct stmmac_priv *priv,
return priv->hw->phylink_pcs;
}
+static int socfpga_dwmac_init(struct platform_device *pdev, void *bsp_priv)
+{
+ struct socfpga_dwmac *dwmac = bsp_priv;
+
+ return dwmac->ops->set_phy_mode(dwmac);
+}
+
static int socfpga_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -442,8 +448,6 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret;
struct socfpga_dwmac *dwmac;
- struct net_device *ndev;
- struct stmmac_priv *stpriv;
const struct socfpga_dwmac_ops *ops;
ops = device_get_match_data(&pdev->dev);
@@ -479,9 +483,17 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
return ret;
}
+ /* The socfpga driver needs to control the stmmac reset to set the phy
+ * mode. Create a copy of the core reset handle so it can be used by
+ * the driver later.
+ */
+ dwmac->stmmac_rst = plat_dat->stmmac_rst;
dwmac->ops = ops;
+ dwmac->plat_dat = plat_dat;
+
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
+ plat_dat->init = socfpga_dwmac_init;
plat_dat->pcs_init = socfpga_dwmac_pcs_init;
plat_dat->pcs_exit = socfpga_dwmac_pcs_exit;
plat_dat->select_pcs = socfpga_dwmac_select_pcs;
@@ -489,67 +501,9 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
plat_dat->riwt_off = 1;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- return ret;
-
- ndev = platform_get_drvdata(pdev);
- stpriv = netdev_priv(ndev);
-
- /* The socfpga driver needs to control the stmmac reset to set the phy
- * mode. Create a copy of the core reset handle so it can be used by
- * the driver later.
- */
- dwmac->stmmac_rst = stpriv->plat->stmmac_rst;
-
- ret = ops->set_phy_mode(dwmac);
- if (ret)
- goto err_dvr_remove;
-
- return 0;
-
-err_dvr_remove:
- stmmac_dvr_remove(&pdev->dev);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-#ifdef CONFIG_PM_SLEEP
-static int socfpga_dwmac_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct socfpga_dwmac *dwmac_priv = get_stmmac_bsp_priv(dev);
-
- dwmac_priv->ops->set_phy_mode(priv->plat->bsp_priv);
-
- return stmmac_resume(dev);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static int __maybe_unused socfpga_dwmac_runtime_suspend(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- stmmac_bus_clks_config(priv, false);
-
- return 0;
-}
-
-static int __maybe_unused socfpga_dwmac_runtime_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- return stmmac_bus_clks_config(priv, true);
-}
-
-static const struct dev_pm_ops socfpga_dwmac_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, socfpga_dwmac_resume)
- SET_RUNTIME_PM_OPS(socfpga_dwmac_runtime_suspend, socfpga_dwmac_runtime_resume, NULL)
-};
-
static const struct socfpga_dwmac_ops socfpga_gen5_ops = {
.set_phy_mode = socfpga_gen5_set_phy_mode,
};
@@ -567,10 +521,9 @@ MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
static struct platform_driver socfpga_dwmac_driver = {
.probe = socfpga_dwmac_probe,
- .remove = stmmac_pltfr_remove,
.driver = {
.name = "socfpga-dwmac",
- .pm = &socfpga_dwmac_pm_ops,
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = socfpga_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index be57c6c12c1c..53d5ce1f6dc6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -23,12 +23,7 @@
#define DWMAC_50MHZ 50000000
-#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
- iface == PHY_INTERFACE_MODE_RGMII_ID || \
- iface == PHY_INTERFACE_MODE_RGMII_RXID || \
- iface == PHY_INTERFACE_MODE_RGMII_TXID)
-
-#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
+#define IS_PHY_IF_MODE_GBIT(iface) (phy_interface_mode_is_rgmii(iface) || \
iface == PHY_INTERFACE_MODE_GMII)
/* STiH4xx register definitions (STiH407/STiH410 families)
@@ -148,7 +143,7 @@ static void stih4xx_fix_retime_src(void *priv, int spd, unsigned int mode)
src = TX_RETIME_SRC_CLKGEN;
freq = DWMAC_50MHZ;
}
- } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
+ } else if (phy_interface_mode_is_rgmii(dwmac->interface)) {
/* On GiGa clk source can be either ext or from clkgen */
freq = rgmii_clock(spd);
@@ -238,6 +233,29 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
return 0;
}
+static int sti_dwmac_init(struct platform_device *pdev, void *bsp_priv)
+{
+ struct sti_dwmac *dwmac = bsp_priv;
+ int ret;
+
+ ret = clk_prepare_enable(dwmac->clk);
+ if (ret)
+ return ret;
+
+ ret = sti_dwmac_set_mode(dwmac);
+ if (ret)
+ clk_disable_unprepare(dwmac->clk);
+
+ return ret;
+}
+
+static void sti_dwmac_exit(struct platform_device *pdev, void *bsp_priv)
+{
+ struct sti_dwmac *dwmac = bsp_priv;
+
+ clk_disable_unprepare(dwmac->clk);
+}
+
static int sti_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -274,59 +292,12 @@ static int sti_dwmac_probe(struct platform_device *pdev)
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = data->fix_retime_src;
+ plat_dat->init = sti_dwmac_init;
+ plat_dat->exit = sti_dwmac_exit;
- ret = clk_prepare_enable(dwmac->clk);
- if (ret)
- return ret;
-
- ret = sti_dwmac_set_mode(dwmac);
- if (ret)
- goto disable_clk;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto disable_clk;
-
- return 0;
-
-disable_clk:
- clk_disable_unprepare(dwmac->clk);
-
- return ret;
-}
-
-static void sti_dwmac_remove(struct platform_device *pdev)
-{
- struct sti_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
-
- stmmac_dvr_remove(&pdev->dev);
-
- clk_disable_unprepare(dwmac->clk);
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-static int sti_dwmac_suspend(struct device *dev)
-{
- struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
- int ret = stmmac_suspend(dev);
-
- clk_disable_unprepare(dwmac->clk);
-
- return ret;
-}
-
-static int sti_dwmac_resume(struct device *dev)
-{
- struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
-
- clk_prepare_enable(dwmac->clk);
- sti_dwmac_set_mode(dwmac);
-
- return stmmac_resume(dev);
-}
-
-static DEFINE_SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
- sti_dwmac_resume);
-
static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
.fix_retime_src = stih4xx_fix_retime_src,
};
@@ -339,10 +310,9 @@ MODULE_DEVICE_TABLE(of, sti_dwmac_match);
static struct platform_driver sti_dwmac_driver = {
.probe = sti_dwmac_probe,
- .remove = sti_dwmac_remove,
.driver = {
.name = "sti-dwmac",
- .pm = pm_sleep_ptr(&sti_dwmac_pm_ops),
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = sti_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index c3d321192581..1eb16eec9c0d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -119,7 +119,7 @@ struct stm32_ops {
u32 syscfg_clr_off;
};
-static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
+static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac)
{
int ret;
@@ -127,11 +127,9 @@ static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
if (ret)
goto err_clk_tx;
- if (!dwmac->ops->clk_rx_enable_in_suspend || !resume) {
- ret = clk_prepare_enable(dwmac->clk_rx);
- if (ret)
- goto err_clk_rx;
- }
+ ret = clk_prepare_enable(dwmac->clk_rx);
+ if (ret)
+ goto err_clk_rx;
ret = clk_prepare_enable(dwmac->syscfg_clk);
if (ret)
@@ -148,15 +146,14 @@ static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
err_clk_eth_ck:
clk_disable_unprepare(dwmac->syscfg_clk);
err_syscfg_clk:
- if (!dwmac->ops->clk_rx_enable_in_suspend || !resume)
- clk_disable_unprepare(dwmac->clk_rx);
+ clk_disable_unprepare(dwmac->clk_rx);
err_clk_rx:
clk_disable_unprepare(dwmac->clk_tx);
err_clk_tx:
return ret;
}
-static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume)
+static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
int ret;
@@ -167,7 +164,7 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume)
return ret;
}
- return stm32_dwmac_clk_enable(dwmac, resume);
+ return stm32_dwmac_clk_enable(dwmac);
}
static int stm32mp1_select_ethck_external(struct plat_stmmacenet_data *plat_dat)
@@ -382,12 +379,10 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
SYSCFG_MCU_ETH_MASK, val << 23);
}
-static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac, bool suspend)
+static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac)
{
clk_disable_unprepare(dwmac->clk_tx);
- if (!dwmac->ops->clk_rx_enable_in_suspend || !suspend)
- clk_disable_unprepare(dwmac->clk_rx);
-
+ clk_disable_unprepare(dwmac->clk_rx);
clk_disable_unprepare(dwmac->syscfg_clk);
if (dwmac->enable_eth_ck)
clk_disable_unprepare(dwmac->clk_eth_ck);
@@ -541,18 +536,32 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
plat_dat->bsp_priv = dwmac;
- ret = stm32_dwmac_init(plat_dat, false);
+ ret = stm32_dwmac_init(plat_dat);
if (ret)
return ret;
+ /* If this platform requires the clock to be running in suspend,
+ * prepare and enable the receive clock an additional time to keep
+ * it running.
+ */
+ if (dwmac->ops->clk_rx_enable_in_suspend) {
+ ret = clk_prepare_enable(dwmac->clk_rx);
+ if (ret)
+ goto err_clk_disable;
+ }
+
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- goto err_clk_disable;
+ goto err_clk_disable_suspend;
return 0;
+err_clk_disable_suspend:
+ if (dwmac->ops->clk_rx_enable_in_suspend)
+ clk_disable_unprepare(dwmac->clk_rx);
+
err_clk_disable:
- stm32_dwmac_clk_disable(dwmac, false);
+ stm32_dwmac_clk_disable(dwmac);
return ret;
}
@@ -565,7 +574,15 @@ static void stm32_dwmac_remove(struct platform_device *pdev)
stmmac_dvr_remove(&pdev->dev);
- stm32_dwmac_clk_disable(dwmac, false);
+ /* If this platform requires the clock to be running in suspend,
+ * we need to disable and unprepare the receive clock an additional
+ * time to balance the extra clk_prepare_enable() in the probe
+ * function.
+ */
+ if (dwmac->ops->clk_rx_enable_in_suspend)
+ clk_disable_unprepare(dwmac->clk_rx);
+
+ stm32_dwmac_clk_disable(dwmac);
if (dwmac->irq_pwr_wakeup >= 0) {
dev_pm_clear_wake_irq(&pdev->dev);
@@ -596,7 +613,7 @@ static int stm32_dwmac_suspend(struct device *dev)
if (ret)
return ret;
- stm32_dwmac_clk_disable(dwmac, true);
+ stm32_dwmac_clk_disable(dwmac);
if (dwmac->ops->suspend)
ret = dwmac->ops->suspend(dwmac);
@@ -614,7 +631,7 @@ static int stm32_dwmac_resume(struct device *dev)
if (dwmac->ops->resume)
dwmac->ops->resume(dwmac);
- ret = stm32_dwmac_init(priv->plat, true);
+ ret = stm32_dwmac_init(priv->plat);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 85723a78793a..2796dc426943 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -964,7 +964,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
* address. No need to mask it again.
*/
- reg |= 1 << H3_EPHY_ADDR_SHIFT;
+ reg |= ret << H3_EPHY_ADDR_SHIFT;
} else {
/* For SoCs without internal PHY the PHY selection bit should be
* set to 0 (external PHY).
@@ -1239,14 +1239,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
+ ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
if (ret)
goto dwmac_syscon;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto dwmac_exit;
-
ndev = dev_get_drvdata(&pdev->dev);
priv = netdev_priv(ndev);
@@ -1283,9 +1279,7 @@ dwmac_mux:
clk_put(gmac->ephy_clk);
dwmac_remove:
pm_runtime_put_noidle(&pdev->dev);
- stmmac_dvr_remove(&pdev->dev);
-dwmac_exit:
- sun8i_dwmac_exit(pdev, gmac);
+ stmmac_pltfr_remove(pdev);
dwmac_syscon:
sun8i_dwmac_unset_syscon(gmac);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 9f098ff0ff05..1eadcf5d1ad6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -72,28 +72,28 @@ static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
regulator_disable(gmac->regulator);
}
-static void sun7i_fix_speed(void *priv, int speed, unsigned int mode)
+static int sun7i_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct sunxi_priv_data *gmac = priv;
-
- /* only GMII mode requires us to reconfigure the clock lines */
- if (gmac->interface != PHY_INTERFACE_MODE_GMII)
- return;
-
- if (gmac->clk_enabled) {
- clk_disable(gmac->tx_clk);
- gmac->clk_enabled = 0;
- }
- clk_unprepare(gmac->tx_clk);
-
- if (speed == 1000) {
- clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
- clk_prepare_enable(gmac->tx_clk);
- gmac->clk_enabled = 1;
- } else {
- clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
- clk_prepare(gmac->tx_clk);
+ struct sunxi_priv_data *gmac = bsp_priv;
+
+ if (interface == PHY_INTERFACE_MODE_GMII) {
+ if (gmac->clk_enabled) {
+ clk_disable(gmac->tx_clk);
+ gmac->clk_enabled = 0;
+ }
+ clk_unprepare(gmac->tx_clk);
+
+ if (speed == 1000) {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
+ clk_prepare_enable(gmac->tx_clk);
+ gmac->clk_enabled = 1;
+ } else {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
+ clk_prepare(gmac->tx_clk);
+ }
}
+ return 0;
}
static int sun7i_gmac_probe(struct platform_device *pdev)
@@ -140,24 +140,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
plat_dat->bsp_priv = gmac;
plat_dat->init = sun7i_gmac_init;
plat_dat->exit = sun7i_gmac_exit;
- plat_dat->fix_mac_speed = sun7i_fix_speed;
+ plat_dat->set_clk_tx_rate = sun7i_set_clk_tx_rate;
plat_dat->tx_fifo_size = 4096;
plat_dat->rx_fifo_size = 16384;
- ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
- if (ret)
- return ret;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_gmac_exit;
-
- return 0;
-
-err_gmac_exit:
- sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
static const struct of_device_id sun7i_dwmac_match[] = {
@@ -168,7 +155,6 @@ MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
static struct platform_driver sun7i_dwmac_driver = {
.probe = sun7i_gmac_probe,
- .remove = stmmac_pltfr_remove,
.driver = {
.name = "sun7i-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index 33cf99797df5..5e6ac82a89b9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -51,21 +51,14 @@ struct visconti_eth {
u32 phy_intf_sel;
struct clk *phy_ref_clk;
struct device *dev;
- spinlock_t lock; /* lock to protect register update */
};
-static void visconti_eth_fix_mac_speed(void *priv, int speed, unsigned int mode)
+static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct visconti_eth *dwmac = priv;
+ struct visconti_eth *dwmac = bsp_priv;
struct net_device *netdev = dev_get_drvdata(dwmac->dev);
unsigned int val, clk_sel_val = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&dwmac->lock, flags);
-
- /* adjust link */
- val = readl(dwmac->reg + MAC_CTRL_REG);
- val &= ~(GMAC_CONFIG_PS | GMAC_CONFIG_FES);
switch (speed) {
case SPEED_1000:
@@ -77,24 +70,19 @@ static void visconti_eth_fix_mac_speed(void *priv, int speed, unsigned int mode)
clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_25M;
if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
clk_sel_val = ETHER_CLK_SEL_DIV_SEL_2;
- val |= GMAC_CONFIG_PS | GMAC_CONFIG_FES;
break;
case SPEED_10:
if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_2P5M;
if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
clk_sel_val = ETHER_CLK_SEL_DIV_SEL_20;
- val |= GMAC_CONFIG_PS;
break;
default:
/* No bit control */
netdev_err(netdev, "Unsupported speed request (%d)", speed);
- spin_unlock_irqrestore(&dwmac->lock, flags);
- return;
+ return -EINVAL;
}
- writel(val, dwmac->reg + MAC_CTRL_REG);
-
/* Stop internal clock */
val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
val &= ~(ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN);
@@ -136,7 +124,7 @@ static void visconti_eth_fix_mac_speed(void *priv, int speed, unsigned int mode)
break;
}
- spin_unlock_irqrestore(&dwmac->lock, flags);
+ return 0;
}
static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat)
@@ -228,11 +216,10 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
- spin_lock_init(&dwmac->lock);
dwmac->reg = stmmac_res.addr;
dwmac->dev = &pdev->dev;
plat_dat->bsp_priv = dwmac;
- plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = visconti_eth_set_clk_tx_rate;
ret = visconti_eth_clock_probe(pdev, plat_dat);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 42fe29a4e300..f4694fd576f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -17,11 +17,7 @@
#define GMAC_EXT_CONFIG 0x00000004
#define GMAC_PACKET_FILTER 0x00000008
#define GMAC_HASH_TAB(x) (0x10 + (x) * 4)
-#define GMAC_VLAN_TAG 0x00000050
-#define GMAC_VLAN_TAG_DATA 0x00000054
-#define GMAC_VLAN_HASH_TABLE 0x00000058
#define GMAC_RX_FLOW_CTRL 0x00000090
-#define GMAC_VLAN_INCL 0x00000060
#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
#define GMAC_TXQ_PRTY_MAP0 0x98
#define GMAC_TXQ_PRTY_MAP1 0x9C
@@ -31,7 +27,6 @@
#define GMAC_RXQ_CTRL3 0x000000ac
#define GMAC_INT_STATUS 0x000000b0
#define GMAC_INT_EN 0x000000b4
-#define GMAC_1US_TIC_COUNTER 0x000000dc
#define GMAC_PCS_BASE 0x000000e0
#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
#define GMAC_PMT 0x000000c0
@@ -82,42 +77,6 @@
#define GMAC_MAX_PERFECT_ADDRESSES 128
-/* MAC VLAN */
-#define GMAC_VLAN_EDVLP BIT(26)
-#define GMAC_VLAN_VTHM BIT(25)
-#define GMAC_VLAN_DOVLTC BIT(20)
-#define GMAC_VLAN_ESVL BIT(18)
-#define GMAC_VLAN_ETV BIT(16)
-#define GMAC_VLAN_VID GENMASK(15, 0)
-#define GMAC_VLAN_VLTI BIT(20)
-#define GMAC_VLAN_CSVL BIT(19)
-#define GMAC_VLAN_VLC GENMASK(17, 16)
-#define GMAC_VLAN_VLC_SHIFT 16
-#define GMAC_VLAN_VLHT GENMASK(15, 0)
-
-/* MAC VLAN Tag */
-#define GMAC_VLAN_TAG_VID GENMASK(15, 0)
-#define GMAC_VLAN_TAG_ETV BIT(16)
-
-/* MAC VLAN Tag Control */
-#define GMAC_VLAN_TAG_CTRL_OB BIT(0)
-#define GMAC_VLAN_TAG_CTRL_CT BIT(1)
-#define GMAC_VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2)
-#define GMAC_VLAN_TAG_CTRL_OFS_SHIFT 2
-#define GMAC_VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21)
-#define GMAC_VLAN_TAG_CTRL_EVLS_SHIFT 21
-#define GMAC_VLAN_TAG_CTRL_EVLRXS BIT(24)
-
-#define GMAC_VLAN_TAG_STRIP_NONE (0x0 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-#define GMAC_VLAN_TAG_STRIP_PASS (0x1 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-#define GMAC_VLAN_TAG_STRIP_FAIL (0x2 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-#define GMAC_VLAN_TAG_STRIP_ALL (0x3 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-
-/* MAC VLAN Tag Data/Filter */
-#define GMAC_VLAN_TAG_DATA_VID GENMASK(15, 0)
-#define GMAC_VLAN_TAG_DATA_VEN BIT(16)
-#define GMAC_VLAN_TAG_DATA_ETV BIT(17)
-
/* MAC RX Queue Enable */
#define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2))
#define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index cc4ddf608652..9c2549d4100f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -18,6 +18,7 @@
#include "stmmac.h"
#include "stmmac_fpe.h"
#include "stmmac_pcs.h"
+#include "stmmac_vlan.h"
#include "dwmac4.h"
#include "dwmac5.h"
@@ -448,165 +449,6 @@ static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
}
-static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
-{
- void __iomem *ioaddr = (void __iomem *)dev->base_addr;
- u32 val;
-
- val = readl(ioaddr + GMAC_VLAN_TAG);
- val &= ~GMAC_VLAN_TAG_VID;
- val |= GMAC_VLAN_TAG_ETV | vid;
-
- writel(val, ioaddr + GMAC_VLAN_TAG);
-}
-
-static int dwmac4_write_vlan_filter(struct net_device *dev,
- struct mac_device_info *hw,
- u8 index, u32 data)
-{
- void __iomem *ioaddr = (void __iomem *)dev->base_addr;
- int ret;
- u32 val;
-
- if (index >= hw->num_vlan)
- return -EINVAL;
-
- writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
-
- val = readl(ioaddr + GMAC_VLAN_TAG);
- val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
- GMAC_VLAN_TAG_CTRL_CT |
- GMAC_VLAN_TAG_CTRL_OB);
- val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
-
- writel(val, ioaddr + GMAC_VLAN_TAG);
-
- ret = readl_poll_timeout(ioaddr + GMAC_VLAN_TAG, val,
- !(val & GMAC_VLAN_TAG_CTRL_OB),
- 1000, 500000);
- if (ret) {
- netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid)
-{
- int index = -1;
- u32 val = 0;
- int i, ret;
-
- if (vid > 4095)
- return -EINVAL;
-
- /* Single Rx VLAN Filter */
- if (hw->num_vlan == 1) {
- /* For single VLAN filter, VID 0 means VLAN promiscuous */
- if (vid == 0) {
- netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
- return -EPERM;
- }
-
- if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
- netdev_err(dev, "Only single VLAN ID supported\n");
- return -EPERM;
- }
-
- hw->vlan_filter[0] = vid;
- dwmac4_write_single_vlan(dev, vid);
-
- return 0;
- }
-
- /* Extended Rx VLAN Filter Enable */
- val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
-
- for (i = 0; i < hw->num_vlan; i++) {
- if (hw->vlan_filter[i] == val)
- return 0;
- else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
- index = i;
- }
-
- if (index == -1) {
- netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
- hw->num_vlan);
- return -EPERM;
- }
-
- ret = dwmac4_write_vlan_filter(dev, hw, index, val);
-
- if (!ret)
- hw->vlan_filter[index] = val;
-
- return ret;
-}
-
-static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid)
-{
- int i, ret = 0;
-
- /* Single Rx VLAN Filter */
- if (hw->num_vlan == 1) {
- if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
- hw->vlan_filter[0] = 0;
- dwmac4_write_single_vlan(dev, 0);
- }
- return 0;
- }
-
- /* Extended Rx VLAN Filter Enable */
- for (i = 0; i < hw->num_vlan; i++) {
- if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
- ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
-
- if (!ret)
- hw->vlan_filter[i] = 0;
- else
- return ret;
- }
- }
-
- return ret;
-}
-
-static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
- struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
- u32 hash;
- u32 val;
- int i;
-
- /* Single Rx VLAN Filter */
- if (hw->num_vlan == 1) {
- dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
- return;
- }
-
- /* Extended Rx VLAN Filter Enable */
- for (i = 0; i < hw->num_vlan; i++) {
- if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
- val = hw->vlan_filter[i];
- dwmac4_write_vlan_filter(dev, hw, i, val);
- }
- }
-
- hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
- if (hash & GMAC_VLAN_VLHT) {
- value = readl(ioaddr + GMAC_VLAN_TAG);
- value |= GMAC_VLAN_VTHM;
- writel(value, ioaddr + GMAC_VLAN_TAG);
- }
-}
-
static void dwmac4_set_filter(struct mac_device_info *hw,
struct net_device *dev)
{
@@ -965,45 +807,6 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
writel(value, ioaddr + GMAC_CONFIG);
}
-static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- u16 perfect_match, bool is_double)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
-
- writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
-
- value = readl(ioaddr + GMAC_VLAN_TAG);
-
- if (hash) {
- value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
- if (is_double) {
- value |= GMAC_VLAN_EDVLP;
- value |= GMAC_VLAN_ESVL;
- value |= GMAC_VLAN_DOVLTC;
- }
-
- writel(value, ioaddr + GMAC_VLAN_TAG);
- } else if (perfect_match) {
- u32 value = GMAC_VLAN_ETV;
-
- if (is_double) {
- value |= GMAC_VLAN_EDVLP;
- value |= GMAC_VLAN_ESVL;
- value |= GMAC_VLAN_DOVLTC;
- }
-
- writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
- } else {
- value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
- value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
- value &= ~GMAC_VLAN_DOVLTC;
- value &= ~GMAC_VLAN_VID;
-
- writel(value, ioaddr + GMAC_VLAN_TAG);
- }
-}
-
static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
{
u32 value = readl(ioaddr + GMAC_CONFIG);
@@ -1014,19 +817,6 @@ static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
writel(value, ioaddr + GMAC_CONFIG);
}
-static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
-
- value = readl(ioaddr + GMAC_VLAN_INCL);
- value |= GMAC_VLAN_VLTI;
- value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
- value &= ~GMAC_VLAN_VLC;
- value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
- writel(value, ioaddr + GMAC_VLAN_INCL);
-}
-
static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
u32 addr)
{
@@ -1143,35 +933,6 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
return 0;
}
-static void dwmac4_rx_hw_vlan(struct mac_device_info *hw,
- struct dma_desc *rx_desc, struct sk_buff *skb)
-{
- if (hw->desc->get_rx_vlan_valid(rx_desc)) {
- u16 vid = hw->desc->get_rx_vlan_tci(rx_desc);
-
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
- }
-}
-
-static void dwmac4_set_hw_vlan_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value = readl(ioaddr + GMAC_VLAN_TAG);
-
- value &= ~GMAC_VLAN_TAG_CTRL_EVLS_MASK;
-
- if (hw->hw_vlan_en)
- /* Always strip VLAN on Receive */
- value |= GMAC_VLAN_TAG_STRIP_ALL;
- else
- /* Do not strip VLAN on Receive */
- value |= GMAC_VLAN_TAG_STRIP_NONE;
-
- /* Enable outer VLAN Tag in Rx DMA descriptor */
- value |= GMAC_VLAN_TAG_CTRL_EVLRXS;
- writel(value, ioaddr + GMAC_VLAN_TAG);
-}
-
const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
.update_caps = dwmac4_update_caps,
@@ -1201,17 +962,10 @@ const struct stmmac_ops dwmac4_ops = {
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
.set_mac_loopback = dwmac4_set_mac_loopback,
- .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
- .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
- .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
- .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
- .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
- .rx_hw_vlan = dwmac4_rx_hw_vlan,
- .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
const struct stmmac_ops dwmac410_ops = {
@@ -1244,18 +998,11 @@ const struct stmmac_ops dwmac410_ops = {
.set_filter = dwmac4_set_filter,
.flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback,
- .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
- .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
.fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
- .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
- .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
- .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
- .rx_hw_vlan = dwmac4_rx_hw_vlan,
- .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
const struct stmmac_ops dwmac510_ops = {
@@ -1292,51 +1039,13 @@ const struct stmmac_ops dwmac510_ops = {
.rxp_config = dwmac5_rxp_config,
.flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback,
- .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
- .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
.fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
- .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
- .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
- .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
- .rx_hw_vlan = dwmac4_rx_hw_vlan,
- .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
-static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
-{
- u32 val, num_vlan;
-
- val = readl(ioaddr + GMAC_HW_FEATURE3);
- switch (val & GMAC_HW_FEAT_NRVF) {
- case 0:
- num_vlan = 1;
- break;
- case 1:
- num_vlan = 4;
- break;
- case 2:
- num_vlan = 8;
- break;
- case 3:
- num_vlan = 16;
- break;
- case 4:
- num_vlan = 24;
- break;
- case 5:
- num_vlan = 32;
- break;
- default:
- num_vlan = 1;
- }
-
- return num_vlan;
-}
-
int dwmac4_setup(struct stmmac_priv *priv)
{
struct mac_device_info *mac = priv->hw;
@@ -1368,7 +1077,7 @@ int dwmac4_setup(struct stmmac_priv *priv)
mac->mii.reg_mask = GENMASK(20, 16);
mac->mii.clk_csr_shift = 8;
mac->mii.clk_csr_mask = GENMASK(11, 8);
- mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
+ mac->num_vlan = stmmac_get_num_vlan(priv->ioaddr);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index a03f5d771566..0d408ee17f33 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -57,19 +57,6 @@
#define XGMAC_FILTER_PR BIT(0)
#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
#define XGMAC_MAX_HASH_TABLE 8
-#define XGMAC_VLAN_TAG 0x00000050
-#define XGMAC_VLAN_EDVLP BIT(26)
-#define XGMAC_VLAN_VTHM BIT(25)
-#define XGMAC_VLAN_DOVLTC BIT(20)
-#define XGMAC_VLAN_ESVL BIT(18)
-#define XGMAC_VLAN_ETV BIT(16)
-#define XGMAC_VLAN_VID GENMASK(15, 0)
-#define XGMAC_VLAN_HASH_TABLE 0x00000058
-#define XGMAC_VLAN_INCL 0x00000060
-#define XGMAC_VLAN_VLTI BIT(20)
-#define XGMAC_VLAN_CSVL BIT(19)
-#define XGMAC_VLAN_VLC GENMASK(17, 16)
-#define XGMAC_VLAN_VLC_SHIFT 16
#define XGMAC_RXQ_CTRL0 0x000000a0
#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
@@ -477,6 +464,7 @@
#define XGMAC_RDES3_RSV BIT(26)
#define XGMAC_RDES3_L34T GENMASK(23, 20)
#define XGMAC_RDES3_L34T_SHIFT 20
+#define XGMAC_RDES3_ET_LT GENMASK(19, 16)
#define XGMAC_L34T_IP4TCP 0x1
#define XGMAC_L34T_IP4UDP 0x2
#define XGMAC_L34T_IP6TCP 0x9
@@ -486,6 +474,17 @@
#define XGMAC_RDES3_TSD BIT(6)
#define XGMAC_RDES3_TSA BIT(4)
+/* RDES0 (write back format) */
+#define XGMAC_RDES0_VLAN_TAG_MASK GENMASK(15, 0)
+
+/* Error Type or L2 Type(ET/LT) Field Number */
+#define XGMAC_ET_LT_VLAN_STAG 8
+#define XGMAC_ET_LT_VLAN_CTAG 9
+#define XGMAC_ET_LT_DVLAN_CTAG_CTAG 10
+#define XGMAC_ET_LT_DVLAN_STAG_STAG 11
+#define XGMAC_ET_LT_DVLAN_CTAG_STAG 12
+#define XGMAC_ET_LT_DVLAN_STAG_CTAG 13
+
extern const struct stmmac_ops dwxgmac210_ops;
extern const struct stmmac_ops dwxlgmac2_ops;
extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index a6d395c6bacd..6cadf8de4fdf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -10,6 +10,7 @@
#include "stmmac.h"
#include "stmmac_fpe.h"
#include "stmmac_ptp.h"
+#include "stmmac_vlan.h"
#include "dwxlgmac2.h"
#include "dwxgmac2.h"
@@ -614,76 +615,6 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
return 0;
}
-static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- u16 perfect_match, bool is_double)
-{
- void __iomem *ioaddr = hw->pcsr;
-
- writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
-
- if (hash) {
- u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
-
- value |= XGMAC_FILTER_VTFE;
-
- writel(value, ioaddr + XGMAC_PACKET_FILTER);
-
- value = readl(ioaddr + XGMAC_VLAN_TAG);
-
- value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
- if (is_double) {
- value |= XGMAC_VLAN_EDVLP;
- value |= XGMAC_VLAN_ESVL;
- value |= XGMAC_VLAN_DOVLTC;
- } else {
- value &= ~XGMAC_VLAN_EDVLP;
- value &= ~XGMAC_VLAN_ESVL;
- value &= ~XGMAC_VLAN_DOVLTC;
- }
-
- value &= ~XGMAC_VLAN_VID;
- writel(value, ioaddr + XGMAC_VLAN_TAG);
- } else if (perfect_match) {
- u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
-
- value |= XGMAC_FILTER_VTFE;
-
- writel(value, ioaddr + XGMAC_PACKET_FILTER);
-
- value = readl(ioaddr + XGMAC_VLAN_TAG);
-
- value &= ~XGMAC_VLAN_VTHM;
- value |= XGMAC_VLAN_ETV;
- if (is_double) {
- value |= XGMAC_VLAN_EDVLP;
- value |= XGMAC_VLAN_ESVL;
- value |= XGMAC_VLAN_DOVLTC;
- } else {
- value &= ~XGMAC_VLAN_EDVLP;
- value &= ~XGMAC_VLAN_ESVL;
- value &= ~XGMAC_VLAN_DOVLTC;
- }
-
- value &= ~XGMAC_VLAN_VID;
- writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
- } else {
- u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
-
- value &= ~XGMAC_FILTER_VTFE;
-
- writel(value, ioaddr + XGMAC_PACKET_FILTER);
-
- value = readl(ioaddr + XGMAC_VLAN_TAG);
-
- value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
- value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
- value &= ~XGMAC_VLAN_DOVLTC;
- value &= ~XGMAC_VLAN_VID;
-
- writel(value, ioaddr + XGMAC_VLAN_TAG);
- }
-}
-
struct dwxgmac3_error_desc {
bool valid;
const char *desc;
@@ -1300,19 +1231,6 @@ static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
writel(value, ioaddr + XGMAC_TX_CONFIG);
}
-static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
-
- value = readl(ioaddr + XGMAC_VLAN_INCL);
- value |= XGMAC_VLAN_VLTI;
- value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
- value &= ~XGMAC_VLAN_VLC;
- value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
- writel(value, ioaddr + XGMAC_VLAN_INCL);
-}
-
static int dwxgmac2_filter_wait(struct mac_device_info *hw)
{
void __iomem *ioaddr = hw->pcsr;
@@ -1534,12 +1452,10 @@ const struct stmmac_ops dwxgmac210_ops = {
.safety_feat_dump = dwxgmac3_safety_feat_dump,
.set_mac_loopback = dwxgmac2_set_mac_loopback,
.rss_configure = dwxgmac2_rss_configure,
- .update_vlan_hash = dwxgmac2_update_vlan_hash,
.rxp_config = dwxgmac3_rxp_config,
.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
.flex_pps_config = dwxgmac2_flex_pps_config,
.sarc_configure = dwxgmac2_sarc_configure,
- .enable_vlan = dwxgmac2_enable_vlan,
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
@@ -1590,12 +1506,10 @@ const struct stmmac_ops dwxlgmac2_ops = {
.safety_feat_dump = dwxgmac3_safety_feat_dump,
.set_mac_loopback = dwxgmac2_set_mac_loopback,
.rss_configure = dwxgmac2_rss_configure,
- .update_vlan_hash = dwxgmac2_update_vlan_hash,
.rxp_config = dwxgmac3_rxp_config,
.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
.flex_pps_config = dwxgmac2_flex_pps_config,
.sarc_configure = dwxgmac2_sarc_configure,
- .enable_vlan = dwxgmac2_enable_vlan,
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
@@ -1638,6 +1552,7 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
mac->mii.reg_mask = GENMASK(15, 0);
mac->mii.clk_csr_shift = 19;
mac->mii.clk_csr_mask = GENMASK(21, 19);
+ mac->num_vlan = stmmac_get_num_vlan(priv->ioaddr);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 389aad7b5c1e..a2980482fcce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -4,6 +4,7 @@
* stmmac XGMAC support.
*/
+#include <linux/bitfield.h>
#include <linux/stmmac.h>
#include "common.h"
#include "dwxgmac2.h"
@@ -69,6 +70,21 @@ static int dwxgmac2_get_tx_ls(struct dma_desc *p)
return (le32_to_cpu(p->des3) & XGMAC_RDES3_LD) > 0;
}
+static u16 dwxgmac2_wrback_get_rx_vlan_tci(struct dma_desc *p)
+{
+ return le32_to_cpu(p->des0) & XGMAC_RDES0_VLAN_TAG_MASK;
+}
+
+static bool dwxgmac2_wrback_get_rx_vlan_valid(struct dma_desc *p)
+{
+ u32 et_lt;
+
+ et_lt = FIELD_GET(XGMAC_RDES3_ET_LT, le32_to_cpu(p->des3));
+
+ return et_lt >= XGMAC_ET_LT_VLAN_STAG &&
+ et_lt <= XGMAC_ET_LT_DVLAN_STAG_CTAG;
+}
+
static int dwxgmac2_get_rx_frame_len(struct dma_desc *p, int rx_coe)
{
return (le32_to_cpu(p->des3) & XGMAC_RDES3_PL);
@@ -351,6 +367,8 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.set_tx_owner = dwxgmac2_set_tx_owner,
.set_rx_owner = dwxgmac2_set_rx_owner,
.get_tx_ls = dwxgmac2_get_tx_ls,
+ .get_rx_vlan_tci = dwxgmac2_wrback_get_rx_vlan_tci,
+ .get_rx_vlan_valid = dwxgmac2_wrback_get_rx_vlan_valid,
.get_rx_frame_len = dwxgmac2_get_rx_frame_len,
.enable_tx_timestamp = dwxgmac2_enable_tx_timestamp,
.get_tx_timestamp_status = dwxgmac2_get_tx_timestamp_status,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 31bdbab9a46c..99635b37044a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -9,6 +9,7 @@
#include "stmmac_fpe.h"
#include "stmmac_ptp.h"
#include "stmmac_est.h"
+#include "stmmac_vlan.h"
#include "dwmac4_descs.h"
#include "dwxgmac2.h"
@@ -120,6 +121,7 @@ static const struct stmmac_hwif_entry {
const void *tc;
const void *mmc;
const void *est;
+ const void *vlan;
int (*setup)(struct stmmac_priv *priv);
int (*quirks)(struct stmmac_priv *priv);
} stmmac_hw[] = {
@@ -175,6 +177,7 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
.mac = &dwmac4_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
@@ -197,6 +200,7 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
.mac = &dwmac410_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
@@ -219,6 +223,7 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
.mac = &dwmac410_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
@@ -241,6 +246,7 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
.mac = &dwmac510_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
@@ -264,6 +270,7 @@ static const struct stmmac_hwif_entry {
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
.mac = &dwxgmac210_ops,
+ .vlan = &dwxgmac210_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
@@ -287,6 +294,7 @@ static const struct stmmac_hwif_entry {
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
.mac = &dwxlgmac2_ops,
+ .vlan = &dwxlgmac2_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
@@ -368,6 +376,7 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
mac->tc = mac->tc ? : entry->tc;
mac->mmc = mac->mmc ? : entry->mmc;
mac->est = mac->est ? : entry->est;
+ mac->vlan = mac->vlan ? : entry->vlan;
priv->hw = mac;
priv->fpe_cfg.reg = entry->regs.fpe_reg;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 27c63a9fc163..ae4efffb785f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -398,21 +398,6 @@ struct stmmac_ops {
/* RSS */
int (*rss_configure)(struct mac_device_info *hw,
struct stmmac_rss *cfg, u32 num_rxq);
- /* VLAN */
- void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
- u16 perfect_match, bool is_double);
- void (*enable_vlan)(struct mac_device_info *hw, u32 type);
- void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc,
- struct sk_buff *skb);
- void (*set_hw_vlan_mode)(struct mac_device_info *hw);
- int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid);
- int (*del_hw_vlan_rx_fltr)(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid);
- void (*restore_hw_vlan_rx_fltr)(struct net_device *dev,
- struct mac_device_info *hw);
/* TX Timestamp */
int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
/* Source Address Insertion / Replacement */
@@ -498,20 +483,6 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args)
#define stmmac_rss_configure(__priv, __args...) \
stmmac_do_callback(__priv, mac, rss_configure, __args)
-#define stmmac_update_vlan_hash(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
-#define stmmac_enable_vlan(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, enable_vlan, __args)
-#define stmmac_rx_hw_vlan(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, rx_hw_vlan, __args)
-#define stmmac_set_hw_vlan_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_hw_vlan_mode, __args)
-#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
- stmmac_do_callback(__priv, mac, add_hw_vlan_rx_fltr, __args)
-#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
- stmmac_do_callback(__priv, mac, del_hw_vlan_rx_fltr, __args)
-#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, restore_hw_vlan_rx_fltr, __args)
#define stmmac_get_mac_tx_timestamp(__priv, __args...) \
stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args)
#define stmmac_sarc_configure(__priv, __args...) \
@@ -659,6 +630,39 @@ struct stmmac_est_ops {
#define stmmac_est_irq_status(__priv, __args...) \
stmmac_do_void_callback(__priv, est, irq_status, __args)
+struct stmmac_vlan_ops {
+ /* VLAN */
+ void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
+ u16 perfect_match, bool is_double);
+ void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc,
+ struct sk_buff *skb);
+ void (*set_hw_vlan_mode)(struct mac_device_info *hw);
+ int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid);
+ int (*del_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid);
+ void (*restore_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw);
+};
+
+#define stmmac_update_vlan_hash(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, update_vlan_hash, __args)
+#define stmmac_enable_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, enable_vlan, __args)
+#define stmmac_rx_hw_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, rx_hw_vlan, __args)
+#define stmmac_set_hw_vlan_mode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, set_hw_vlan_mode, __args)
+#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_callback(__priv, vlan, add_hw_vlan_rx_fltr, __args)
+#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_callback(__priv, vlan, del_hw_vlan_rx_fltr, __args)
+#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, restore_hw_vlan_rx_fltr, __args)
+
struct stmmac_regs_off {
const struct stmmac_fpe_reg *fpe_reg;
u32 ptp_off;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index bddfa0f4aa21..cda09cf5dcca 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -149,21 +149,9 @@ struct stmmac_channel {
};
struct stmmac_fpe_cfg {
- /* Serialize access to MAC Merge state between ethtool requests
- * and link state updates.
- */
- spinlock_t lock;
-
+ struct ethtool_mmsv mmsv;
const struct stmmac_fpe_reg *reg;
- u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
-
- enum ethtool_mm_verify_status status;
- struct timer_list verify_timer;
- bool verify_enabled;
- int verify_retries;
- bool pmac_enabled;
- u32 verify_time;
- bool tx_enabled;
+ u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
};
struct stmmac_tc_entry {
@@ -313,7 +301,7 @@ struct stmmac_priv {
unsigned int mode;
unsigned int chain_mode;
int extend_desc;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_ops;
unsigned int default_addend;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
index c9693f77e1f6..ac6f2e3a3fcd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
@@ -32,6 +32,11 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg,
int i, ret = 0;
u32 ctrl;
+ if (!ptp_rate) {
+ netdev_warn(priv->dev, "Invalid PTP rate");
+ return -EINVAL;
+ }
+
ret |= est_write(est_addr, EST_BTR_LOW, cfg->btr[0], false);
ret |= est_write(est_addr, EST_BTR_HIGH, cfg->btr[1], false);
ret |= est_write(est_addr, EST_TER, cfg->ter, false);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 918a32f8fda8..f702f7b7bf9f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -37,7 +37,7 @@
#define ETHTOOL_DMA_OFFSET 55
struct stmmac_stats {
- char stat_string[ETH_GSTRING_LEN];
+ char stat_string[ETH_GSTRING_LEN] __nonstring;
int sizeof_stat;
int stat_offset;
};
@@ -1210,36 +1210,16 @@ static int stmmac_get_mm(struct net_device *ndev,
struct ethtool_mm_state *state)
{
struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned long flags;
u32 frag_size;
if (!stmmac_fpe_supported(priv))
return -EOPNOTSUPP;
- spin_lock_irqsave(&priv->fpe_cfg.lock, flags);
-
- state->max_verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
- state->verify_enabled = priv->fpe_cfg.verify_enabled;
- state->pmac_enabled = priv->fpe_cfg.pmac_enabled;
- state->verify_time = priv->fpe_cfg.verify_time;
- state->tx_enabled = priv->fpe_cfg.tx_enabled;
- state->verify_status = priv->fpe_cfg.status;
state->rx_min_frag_size = ETH_ZLEN;
-
- /* FPE active if common tx_enabled and
- * (verification success or disabled(forced))
- */
- if (state->tx_enabled &&
- (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED ||
- state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED))
- state->tx_active = true;
- else
- state->tx_active = false;
-
frag_size = stmmac_fpe_get_add_frag_size(priv);
state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(frag_size);
- spin_unlock_irqrestore(&priv->fpe_cfg.lock, flags);
+ ethtool_mmsv_get_mm(&priv->fpe_cfg.mmsv, state);
return 0;
}
@@ -1248,8 +1228,6 @@ static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(ndev);
- struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
- unsigned long flags;
u32 frag_size;
int err;
@@ -1258,23 +1236,8 @@ static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
if (err)
return err;
- /* Wait for the verification that's currently in progress to finish */
- timer_shutdown_sync(&fpe_cfg->verify_timer);
-
- spin_lock_irqsave(&fpe_cfg->lock, flags);
-
- fpe_cfg->verify_enabled = cfg->verify_enabled;
- fpe_cfg->pmac_enabled = cfg->pmac_enabled;
- fpe_cfg->verify_time = cfg->verify_time;
- fpe_cfg->tx_enabled = cfg->tx_enabled;
-
- if (!cfg->verify_enabled)
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
-
stmmac_fpe_set_add_frag_size(priv, frag_size);
- stmmac_fpe_apply(priv);
-
- spin_unlock_irqrestore(&fpe_cfg->lock, flags);
+ ethtool_mmsv_set_mm(&priv->fpe_cfg.mmsv, cfg);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
index 3a4bee029c7f..75b470ee621a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
@@ -27,12 +27,6 @@
#define STMMAC_MAC_FPE_CTRL_STS_SVER BIT(1)
#define STMMAC_MAC_FPE_CTRL_STS_EFPE BIT(0)
-/* FPE link-partner hand-shaking mPacket type */
-enum stmmac_mpacket_type {
- MPACKET_VERIFY = 0,
- MPACKET_RESPONSE = 1,
-};
-
struct stmmac_fpe_reg {
const u32 mac_fpe_reg; /* offset of MAC_FPE_CTRL_STS */
const u32 mtl_fpe_reg; /* offset of MTL_FPE_CTRL_STS */
@@ -48,10 +42,10 @@ bool stmmac_fpe_supported(struct stmmac_priv *priv)
priv->hw->mac->fpe_map_preemption_class;
}
-static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable,
- bool pmac_enable)
+static void stmmac_fpe_configure_tx(struct ethtool_mmsv *mmsv, bool tx_enable)
{
- struct stmmac_fpe_cfg *cfg = &priv->fpe_cfg;
+ struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
+ struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
const struct stmmac_fpe_reg *reg = cfg->reg;
u32 num_rxq = priv->plat->rx_queues_to_use;
void __iomem *ioaddr = priv->ioaddr;
@@ -68,6 +62,15 @@ static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable,
cfg->fpe_csr = 0;
}
writel(cfg->fpe_csr, ioaddr + reg->mac_fpe_reg);
+}
+
+static void stmmac_fpe_configure_pmac(struct ethtool_mmsv *mmsv, bool pmac_enable)
+{
+ struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
+ struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
+ const struct stmmac_fpe_reg *reg = cfg->reg;
+ void __iomem *ioaddr = priv->ioaddr;
+ u32 value;
value = readl(ioaddr + reg->int_en_reg);
@@ -85,47 +88,45 @@ static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable,
writel(value, ioaddr + reg->int_en_reg);
}
-static void stmmac_fpe_send_mpacket(struct stmmac_priv *priv,
- enum stmmac_mpacket_type type)
+static void stmmac_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
+ enum ethtool_mpacket type)
{
- const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
+ struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
+ struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
+ const struct stmmac_fpe_reg *reg = cfg->reg;
void __iomem *ioaddr = priv->ioaddr;
- u32 value = priv->fpe_cfg.fpe_csr;
+ u32 value = cfg->fpe_csr;
- if (type == MPACKET_VERIFY)
+ if (type == ETHTOOL_MPACKET_VERIFY)
value |= STMMAC_MAC_FPE_CTRL_STS_SVER;
- else if (type == MPACKET_RESPONSE)
+ else if (type == ETHTOOL_MPACKET_RESPONSE)
value |= STMMAC_MAC_FPE_CTRL_STS_SRSP;
writel(value, ioaddr + reg->mac_fpe_reg);
}
+static const struct ethtool_mmsv_ops stmmac_mmsv_ops = {
+ .configure_tx = stmmac_fpe_configure_tx,
+ .configure_pmac = stmmac_fpe_configure_pmac,
+ .send_mpacket = stmmac_fpe_send_mpacket,
+};
+
static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
{
struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+ struct ethtool_mmsv *mmsv = &fpe_cfg->mmsv;
- /* This is interrupt context, just spin_lock() */
- spin_lock(&fpe_cfg->lock);
-
- if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN)
- goto unlock_out;
+ if (status == FPE_EVENT_UNKNOWN)
+ return;
- /* LP has sent verify mPacket */
if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
- stmmac_fpe_send_mpacket(priv, MPACKET_RESPONSE);
+ ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET);
- /* Local has sent verify mPacket */
- if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER &&
- fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED)
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
+ if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER)
+ ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET);
- /* LP has sent response mPacket */
- if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP &&
- fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING)
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
-
-unlock_out:
- spin_unlock(&fpe_cfg->lock);
+ if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
+ ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET);
}
void stmmac_fpe_irq_status(struct stmmac_priv *priv)
@@ -164,119 +165,16 @@ void stmmac_fpe_irq_status(struct stmmac_priv *priv)
stmmac_fpe_event_status(priv, status);
}
-/**
- * stmmac_fpe_verify_timer - Timer for MAC Merge verification
- * @t: timer_list struct containing private info
- *
- * Verify the MAC Merge capability in the local TX direction, by
- * transmitting Verify mPackets up to 3 times. Wait until link
- * partner responds with a Response mPacket, otherwise fail.
- */
-static void stmmac_fpe_verify_timer(struct timer_list *t)
-{
- struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer);
- struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv,
- fpe_cfg);
- unsigned long flags;
- bool rearm = false;
-
- spin_lock_irqsave(&fpe_cfg->lock, flags);
-
- switch (fpe_cfg->status) {
- case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
- case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
- if (fpe_cfg->verify_retries != 0) {
- stmmac_fpe_send_mpacket(priv, MPACKET_VERIFY);
- rearm = true;
- } else {
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
- }
-
- fpe_cfg->verify_retries--;
- break;
-
- case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
- stmmac_fpe_configure(priv, true, true);
- break;
-
- default:
- break;
- }
-
- if (rearm) {
- mod_timer(&fpe_cfg->verify_timer,
- jiffies + msecs_to_jiffies(fpe_cfg->verify_time));
- }
-
- spin_unlock_irqrestore(&fpe_cfg->lock, flags);
-}
-
-static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg)
-{
- if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled &&
- fpe_cfg->verify_enabled &&
- fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED &&
- fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) {
- timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0);
- mod_timer(&fpe_cfg->verify_timer, jiffies);
- }
-}
-
void stmmac_fpe_init(struct stmmac_priv *priv)
{
- priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
- priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
- priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
- timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0);
- spin_lock_init(&priv->fpe_cfg.lock);
+ ethtool_mmsv_init(&priv->fpe_cfg.mmsv, priv->dev,
+ &stmmac_mmsv_ops);
if ((!priv->fpe_cfg.reg || !priv->hw->mac->fpe_map_preemption_class) &&
priv->dma_cap.fpesel)
dev_info(priv->device, "FPE is not supported by driver.\n");
}
-void stmmac_fpe_apply(struct stmmac_priv *priv)
-{
- struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
-
- /* If verification is disabled, configure FPE right away.
- * Otherwise let the timer code do it.
- */
- if (!fpe_cfg->verify_enabled) {
- stmmac_fpe_configure(priv, fpe_cfg->tx_enabled,
- fpe_cfg->pmac_enabled);
- } else {
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL;
- fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
-
- if (netif_running(priv->dev))
- stmmac_fpe_verify_timer_arm(fpe_cfg);
- }
-}
-
-void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
-{
- struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
- unsigned long flags;
-
- timer_shutdown_sync(&fpe_cfg->verify_timer);
-
- spin_lock_irqsave(&fpe_cfg->lock, flags);
-
- if (is_up && fpe_cfg->pmac_enabled) {
- /* VERIFY process requires pmac enabled when NIC comes up */
- stmmac_fpe_configure(priv, false, true);
-
- /* New link => maybe new partner => new verification process */
- stmmac_fpe_apply(priv);
- } else {
- /* No link => turn off EFPE */
- stmmac_fpe_configure(priv, false, false);
- }
-
- spin_unlock_irqrestore(&fpe_cfg->lock, flags);
-}
-
int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv)
{
const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h
index b884eac7142d..3fc46acf7001 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h
@@ -9,15 +9,10 @@
#include <linux/types.h>
#include <linux/netdevice.h>
-#define STMMAC_FPE_MM_MAX_VERIFY_RETRIES 3
-#define STMMAC_FPE_MM_MAX_VERIFY_TIME_MS 128
-
struct stmmac_priv;
-void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up);
bool stmmac_fpe_supported(struct stmmac_priv *priv);
void stmmac_fpe_init(struct stmmac_priv *priv);
-void stmmac_fpe_apply(struct stmmac_priv *priv);
void stmmac_fpe_irq_status(struct stmmac_priv *priv);
int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv);
void stmmac_fpe_set_add_frag_size(struct stmmac_priv *priv, u32 add_frag_size);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 59d07d0d3369..b948df1bff9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -481,7 +481,7 @@ static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
*/
static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
+ struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
stmmac_try_to_start_sw_lpi(priv);
}
@@ -568,18 +568,19 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
/**
* stmmac_hwtstamp_set - control hardware timestamping.
* @dev: device pointer.
- * @ifr: An IOCTL specific structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
+ * @config: the timestamping configuration.
+ * @extack: netlink extended ack structure for error reporting.
* Description:
* This function configures the MAC to enable/disable both outgoing(TX)
* and incoming(RX) packets time stamping based on user input.
* Return Value:
* 0 on success and an appropriate -ve integer on failure.
*/
-static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+static int stmmac_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
u32 ptp_v2 = 0;
u32 tstamp_all = 0;
u32 ptp_over_ipv4_udp = 0;
@@ -590,34 +591,36 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
u32 ts_event_en = 0;
if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
- netdev_alert(priv->dev, "No support for HW time stamping\n");
+ NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
return -EOPNOTSUPP;
}
- if (copy_from_user(&config, ifr->ifr_data,
- sizeof(config)))
- return -EFAULT;
+ if (!netif_running(dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change timestamping configuration while down");
+ return -ENODEV;
+ }
netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
- __func__, config.flags, config.tx_type, config.rx_filter);
+ __func__, config->flags, config->tx_type, config->rx_filter);
- if (config.tx_type != HWTSTAMP_TX_OFF &&
- config.tx_type != HWTSTAMP_TX_ON)
+ if (config->tx_type != HWTSTAMP_TX_OFF &&
+ config->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
if (priv->adv_ts) {
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
/* time stamp no incoming packet at all */
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
/* PTP v1, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* 'xmac' hardware can support Sync, Pdelay_Req and
* Pdelay_resp by setting bit14 and bits17/16 to 01
* This leaves Delay_Req timestamps out.
@@ -631,7 +634,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
/* PTP v1, UDP, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -641,7 +644,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
/* PTP v1, UDP, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -652,7 +655,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
/* PTP v2, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
@@ -663,7 +666,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
/* PTP v2, UDP, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -674,7 +677,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
/* PTP v2, UDP, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
@@ -686,7 +689,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
/* PTP v2/802.AS1 any layer, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
if (priv->synopsys_id < DWMAC_CORE_4_10)
@@ -698,7 +701,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
/* PTP v2/802.AS1, any layer, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -710,7 +713,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
/* PTP v2/802.AS1, any layer, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
@@ -724,7 +727,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
/* time stamp any incoming packet */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
tstamp_all = PTP_TCR_TSENALL;
break;
@@ -732,18 +735,18 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE;
}
} else {
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
break;
default:
/* PTP v1, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
}
}
- priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
- priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
+ priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
+ priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
priv->systime_flags = STMMAC_HWTS_ACTIVE;
@@ -756,31 +759,30 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
- memcpy(&priv->tstamp_config, &config, sizeof(config));
+ priv->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
/**
* stmmac_hwtstamp_get - read hardware timestamping.
* @dev: device pointer.
- * @ifr: An IOCTL specific structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
+ * @config: the timestamping configuration.
* Description:
* This function obtain the current hardware timestamping settings
* as requested.
*/
-static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+static int stmmac_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct hwtstamp_config *config = &priv->tstamp_config;
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, config,
- sizeof(*config)) ? -EFAULT : 0;
+ *config = priv->tstamp_config;
+
+ return 0;
}
/**
@@ -803,6 +805,11 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
+ if (!priv->plat->clk_ptp_rate) {
+ netdev_err(priv->dev, "Invalid PTP clock rate");
+ return -EINVAL;
+ }
+
stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
priv->systime_flags = systime_flags;
@@ -946,7 +953,7 @@ static void stmmac_mac_link_down(struct phylink_config *config,
stmmac_set_eee_pls(priv, priv->hw, false);
if (stmmac_fpe_supported(priv))
- stmmac_fpe_link_state_handle(priv, false);
+ ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
}
static void stmmac_mac_link_up(struct phylink_config *config,
@@ -1064,7 +1071,7 @@ static void stmmac_mac_link_up(struct phylink_config *config,
stmmac_set_eee_pls(priv, priv->hw, true);
if (stmmac_fpe_supported(priv))
- stmmac_fpe_link_state_handle(priv, true);
+ ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
stmmac_hwtstamp_correct_latency(priv, priv);
@@ -1258,20 +1265,22 @@ static int stmmac_init_phy(struct net_device *dev)
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
- int mode = priv->plat->phy_interface;
+ struct phylink_config *config;
struct fwnode_handle *fwnode;
struct phylink_pcs *pcs;
struct phylink *phylink;
- priv->phylink_config.dev = &priv->dev->dev;
- priv->phylink_config.type = PHYLINK_NETDEV;
- priv->phylink_config.mac_managed_pm = true;
+ config = &priv->phylink_config;
+
+ config->dev = &priv->dev->dev;
+ config->type = PHYLINK_NETDEV;
+ config->mac_managed_pm = true;
/* Stmmac always requires an RX clock for hardware initialization */
- priv->phylink_config.mac_requires_rxc = true;
+ config->mac_requires_rxc = true;
if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
- priv->phylink_config.eee_rx_clk_stop_enable = true;
+ config->eee_rx_clk_stop_enable = true;
/* Set the default transmit clock stop bit based on the platform glue */
priv->tx_lpi_clk_stop = priv->plat->flags &
@@ -1279,13 +1288,22 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
mdio_bus_data = priv->plat->mdio_bus_data;
if (mdio_bus_data)
- priv->phylink_config.default_an_inband =
- mdio_bus_data->default_an_inband;
+ config->default_an_inband = mdio_bus_data->default_an_inband;
+
+ /* Get the PHY interface modes (at the PHY end of the link) that
+ * are supported by the platform.
+ */
+ if (priv->plat->get_interfaces)
+ priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
+ config->supported_interfaces);
- /* Set the platform/firmware specified interface mode. Note, phylink
- * deals with the PHY interface mode, not the MAC interface mode.
+ /* Set the platform/firmware specified interface mode if the
+ * supported interfaces have not already been provided using
+ * phy_interface as a last resort.
*/
- __set_bit(mode, priv->phylink_config.supported_interfaces);
+ if (phy_interface_empty(config->supported_interfaces))
+ __set_bit(priv->plat->phy_interface,
+ config->supported_interfaces);
/* If we have an xpcs, it defines which PHY interfaces are supported. */
if (priv->hw->xpcs)
@@ -1294,29 +1312,27 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
pcs = priv->hw->phylink_pcs;
if (pcs)
- phy_interface_or(priv->phylink_config.supported_interfaces,
- priv->phylink_config.supported_interfaces,
+ phy_interface_or(config->supported_interfaces,
+ config->supported_interfaces,
pcs->supported_interfaces);
if (priv->dma_cap.eee) {
/* Assume all supported interfaces also support LPI */
- memcpy(priv->phylink_config.lpi_interfaces,
- priv->phylink_config.supported_interfaces,
- sizeof(priv->phylink_config.lpi_interfaces));
+ memcpy(config->lpi_interfaces, config->supported_interfaces,
+ sizeof(config->lpi_interfaces));
/* All full duplex speeds above 100Mbps are supported */
- priv->phylink_config.lpi_capabilities = ~(MAC_1000FD - 1) |
- MAC_100FD;
- priv->phylink_config.lpi_timer_default = eee_timer * 1000;
- priv->phylink_config.eee_enabled_default = true;
+ config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
+ config->lpi_timer_default = eee_timer * 1000;
+ config->eee_enabled_default = true;
}
fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
- phylink = phylink_create(&priv->phylink_config, fwnode,
- mode, &stmmac_phylink_mac_ops);
+ phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
+ &stmmac_phylink_mac_ops);
if (IS_ERR(phylink))
return PTR_ERR(phylink);
@@ -4152,7 +4168,7 @@ static int stmmac_release(struct net_device *dev)
stmmac_release_ptp(priv);
if (stmmac_fpe_supported(priv))
- timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
+ ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
pm_runtime_put(priv->device);
@@ -4488,8 +4504,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
- skb_tx_timestamp(skb);
-
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
/* declare that device is doing timestamping */
@@ -4522,6 +4536,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+ skb_tx_timestamp(skb);
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -4765,8 +4780,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
- skb_tx_timestamp(skb);
-
/* Ready to fill the first descriptor and set the OWN bit w/o any
* problems because all the descriptors are actually ready to be
* passed to the DMA engine.
@@ -4813,7 +4826,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
-
+ skb_tx_timestamp(skb);
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -6219,12 +6232,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCSMIIREG:
ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
break;
- case SIOCSHWTSTAMP:
- ret = stmmac_hwtstamp_set(dev, rq);
- break;
- case SIOCGHWTSTAMP:
- ret = stmmac_hwtstamp_get(dev, rq);
- break;
default:
break;
}
@@ -7163,6 +7170,8 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_bpf = stmmac_bpf,
.ndo_xdp_xmit = stmmac_xdp_xmit,
.ndo_xsk_wakeup = stmmac_xsk_wakeup,
+ .ndo_hwtstamp_get = stmmac_hwtstamp_get,
+ .ndo_hwtstamp_set = stmmac_hwtstamp_set,
};
static void stmmac_reset_subtask(struct stmmac_priv *priv)
@@ -7644,7 +7653,7 @@ int stmmac_dvr_probe(struct device *device,
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
- if (priv->plat->has_gmac4) {
+ if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
priv->hw->hw_vlan_en = true;
}
@@ -7727,9 +7736,6 @@ int stmmac_dvr_probe(struct device *device,
goto error_mdio_register;
}
- if (priv->plat->speed_mode_2500)
- priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
-
ret = stmmac_pcs_setup(ndev);
if (ret)
goto error_pcs_setup;
@@ -7871,7 +7877,7 @@ int stmmac_suspend(struct device *dev)
rtnl_unlock();
if (stmmac_fpe_supported(priv))
- timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
+ ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index c73eff6a56b8..b80c1efdb323 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -430,6 +430,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
+ static int bus_id = -ENODEV;
int phy_mode;
void *ret;
int rc;
@@ -465,8 +466,14 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
of_property_read_u32(np, "max-speed", &plat->max_speed);
plat->bus_id = of_alias_get_id(np, "ethernet");
- if (plat->bus_id < 0)
- plat->bus_id = 0;
+ if (plat->bus_id < 0) {
+ if (bus_id < 0)
+ bus_id = of_alias_get_highest_id("ethernet");
+ /* No ethernet alias found, init at -1 so first bus_id is 0 */
+ if (bus_id < 0)
+ bus_id = -1;
+ plat->bus_id = ++bus_id;
+ }
/* Default to phy auto-detection */
plat->phy_addr = -1;
@@ -709,6 +716,17 @@ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
#endif /* CONFIG_OF */
EXPORT_SYMBOL_GPL(devm_stmmac_probe_config_dt);
+struct clk *stmmac_pltfr_find_clk(struct plat_stmmacenet_data *plat_dat,
+ const char *name)
+{
+ for (int i = 0; i < plat_dat->num_clks; i++)
+ if (strcmp(plat_dat->clks[i].id, name) == 0)
+ return plat_dat->clks[i].clk;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(stmmac_pltfr_find_clk);
+
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 72dc1a32e46d..6e6561e29d6e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -14,6 +14,9 @@
struct plat_stmmacenet_data *
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac);
+struct clk *stmmac_pltfr_find_clk(struct plat_stmmacenet_data *plat_dat,
+ const char *name);
+
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 429b2d357813..3767ba495e78 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -317,7 +317,7 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
/* Calculate the clock domain crossing (CDC) error if necessary */
priv->plat->cdc_error_adj = 0;
- if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
+ if (priv->plat->has_gmac4)
priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
/* Update the ptp clock parameters based on feature discovery, when
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
new file mode 100644
index 000000000000..0b6f6228ae35
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025, Altera Corporation
+ * stmmac VLAN (802.1Q) handling
+ */
+
+#include "stmmac.h"
+#include "stmmac_vlan.h"
+
+static void vlan_write_single(struct net_device *dev, u16 vid)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ u32 val;
+
+ val = readl(ioaddr + VLAN_TAG);
+ val &= ~VLAN_TAG_VID;
+ val |= VLAN_TAG_ETV | vid;
+
+ writel(val, ioaddr + VLAN_TAG);
+}
+
+static int vlan_write_filter(struct net_device *dev,
+ struct mac_device_info *hw,
+ u8 index, u32 data)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ int ret;
+ u32 val;
+
+ if (index >= hw->num_vlan)
+ return -EINVAL;
+
+ writel(data, ioaddr + VLAN_TAG_DATA);
+
+ val = readl(ioaddr + VLAN_TAG);
+ val &= ~(VLAN_TAG_CTRL_OFS_MASK |
+ VLAN_TAG_CTRL_CT |
+ VLAN_TAG_CTRL_OB);
+ val |= (index << VLAN_TAG_CTRL_OFS_SHIFT) | VLAN_TAG_CTRL_OB;
+
+ writel(val, ioaddr + VLAN_TAG);
+
+ ret = readl_poll_timeout(ioaddr + VLAN_TAG, val,
+ !(val & VLAN_TAG_CTRL_OB),
+ 1000, 500000);
+ if (ret) {
+ netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vlan_add_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid)
+{
+ int index = -1;
+ u32 val = 0;
+ int i, ret;
+
+ if (vid > 4095)
+ return -EINVAL;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ /* For single VLAN filter, VID 0 means VLAN promiscuous */
+ if (vid == 0) {
+ netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
+ return -EPERM;
+ }
+
+ if (hw->vlan_filter[0] & VLAN_TAG_VID) {
+ netdev_err(dev, "Only single VLAN ID supported\n");
+ return -EPERM;
+ }
+
+ hw->vlan_filter[0] = vid;
+ vlan_write_single(dev, vid);
+
+ return 0;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ val |= VLAN_TAG_DATA_ETV | VLAN_TAG_DATA_VEN | vid;
+
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] == val)
+ return 0;
+ else if (!(hw->vlan_filter[i] & VLAN_TAG_DATA_VEN))
+ index = i;
+ }
+
+ if (index == -1) {
+ netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
+ hw->num_vlan);
+ return -EPERM;
+ }
+
+ ret = vlan_write_filter(dev, hw, index, val);
+
+ if (!ret)
+ hw->vlan_filter[index] = val;
+
+ return ret;
+}
+
+static int vlan_del_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid)
+{
+ int i, ret = 0;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ if ((hw->vlan_filter[0] & VLAN_TAG_VID) == vid) {
+ hw->vlan_filter[0] = 0;
+ vlan_write_single(dev, 0);
+ }
+ return 0;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if ((hw->vlan_filter[i] & VLAN_TAG_DATA_VID) == vid) {
+ ret = vlan_write_filter(dev, hw, i, 0);
+
+ if (!ret)
+ hw->vlan_filter[i] = 0;
+ else
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void vlan_restore_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+ u32 hash;
+ u32 val;
+ int i;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ vlan_write_single(dev, hw->vlan_filter[0]);
+ return;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) {
+ val = hw->vlan_filter[i];
+ vlan_write_filter(dev, hw, i, val);
+ }
+ }
+
+ hash = readl(ioaddr + VLAN_HASH_TABLE);
+ if (hash & VLAN_VLHT) {
+ value = readl(ioaddr + VLAN_TAG);
+ value |= VLAN_VTHM;
+ writel(value, ioaddr + VLAN_TAG);
+ }
+}
+
+static void vlan_update_hash(struct mac_device_info *hw, u32 hash,
+ u16 perfect_match, bool is_double)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ writel(hash, ioaddr + VLAN_HASH_TABLE);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ if (hash) {
+ value |= VLAN_VTHM | VLAN_ETV;
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ }
+
+ writel(value, ioaddr + VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = VLAN_ETV;
+
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ }
+
+ writel(value | perfect_match, ioaddr + VLAN_TAG);
+ } else {
+ value &= ~(VLAN_VTHM | VLAN_ETV);
+ value &= ~(VLAN_EDVLP | VLAN_ESVL);
+ value &= ~VLAN_DOVLTC;
+ value &= ~VLAN_VID;
+
+ writel(value, ioaddr + VLAN_TAG);
+ }
+}
+
+static void vlan_enable(struct mac_device_info *hw, u32 type)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + VLAN_INCL);
+ value |= VLAN_VLTI;
+ value |= VLAN_CSVL; /* Only use SVLAN */
+ value &= ~VLAN_VLC;
+ value |= (type << VLAN_VLC_SHIFT) & VLAN_VLC;
+ writel(value, ioaddr + VLAN_INCL);
+}
+
+static void vlan_rx_hw(struct mac_device_info *hw,
+ struct dma_desc *rx_desc, struct sk_buff *skb)
+{
+ if (hw->desc->get_rx_vlan_valid(rx_desc)) {
+ u16 vid = hw->desc->get_rx_vlan_tci(rx_desc);
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+}
+
+static void vlan_set_hw_mode(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + VLAN_TAG);
+
+ value &= ~VLAN_TAG_CTRL_EVLS_MASK;
+
+ if (hw->hw_vlan_en)
+ /* Always strip VLAN on Receive */
+ value |= VLAN_TAG_STRIP_ALL;
+ else
+ /* Do not strip VLAN on Receive */
+ value |= VLAN_TAG_STRIP_NONE;
+
+ /* Enable outer VLAN Tag in Rx DMA descriptor */
+ value |= VLAN_TAG_CTRL_EVLRXS;
+ writel(value, ioaddr + VLAN_TAG);
+}
+
+static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+ u16 perfect_match, bool is_double)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ writel(hash, ioaddr + VLAN_HASH_TABLE);
+
+ if (hash) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ value |= VLAN_VTHM | VLAN_ETV;
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ } else {
+ value &= ~VLAN_EDVLP;
+ value &= ~VLAN_ESVL;
+ value &= ~VLAN_DOVLTC;
+ }
+
+ value &= ~VLAN_VID;
+ writel(value, ioaddr + VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ value &= ~VLAN_VTHM;
+ value |= VLAN_ETV;
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ } else {
+ value &= ~VLAN_EDVLP;
+ value &= ~VLAN_ESVL;
+ value &= ~VLAN_DOVLTC;
+ }
+
+ value &= ~VLAN_VID;
+ writel(value | perfect_match, ioaddr + VLAN_TAG);
+ } else {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value &= ~XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ value &= ~(VLAN_VTHM | VLAN_ETV);
+ value &= ~(VLAN_EDVLP | VLAN_ESVL);
+ value &= ~VLAN_DOVLTC;
+ value &= ~VLAN_VID;
+
+ writel(value, ioaddr + VLAN_TAG);
+ }
+}
+
+const struct stmmac_vlan_ops dwmac_vlan_ops = {
+ .update_vlan_hash = vlan_update_hash,
+ .enable_vlan = vlan_enable,
+ .add_hw_vlan_rx_fltr = vlan_add_hw_rx_fltr,
+ .del_hw_vlan_rx_fltr = vlan_del_hw_rx_fltr,
+ .restore_hw_vlan_rx_fltr = vlan_restore_hw_rx_fltr,
+ .rx_hw_vlan = vlan_rx_hw,
+ .set_hw_vlan_mode = vlan_set_hw_mode,
+};
+
+const struct stmmac_vlan_ops dwxlgmac2_vlan_ops = {
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .enable_vlan = vlan_enable,
+};
+
+const struct stmmac_vlan_ops dwxgmac210_vlan_ops = {
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .enable_vlan = vlan_enable,
+ .add_hw_vlan_rx_fltr = vlan_add_hw_rx_fltr,
+ .del_hw_vlan_rx_fltr = vlan_del_hw_rx_fltr,
+ .restore_hw_vlan_rx_fltr = vlan_restore_hw_rx_fltr,
+ .rx_hw_vlan = vlan_rx_hw,
+ .set_hw_vlan_mode = vlan_set_hw_mode,
+};
+
+u32 stmmac_get_num_vlan(void __iomem *ioaddr)
+{
+ u32 val, num_vlan;
+
+ val = readl(ioaddr + HW_FEATURE3);
+ switch (val & VLAN_HW_FEAT_NRVF) {
+ case 0:
+ num_vlan = 1;
+ break;
+ case 1:
+ num_vlan = 4;
+ break;
+ case 2:
+ num_vlan = 8;
+ break;
+ case 3:
+ num_vlan = 16;
+ break;
+ case 4:
+ num_vlan = 24;
+ break;
+ case 5:
+ num_vlan = 32;
+ break;
+ default:
+ num_vlan = 1;
+ }
+
+ return num_vlan;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h
new file mode 100644
index 000000000000..c24f89a9049b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025, Altera Corporation
+ * stmmac VLAN(802.1Q) handling
+ */
+
+#ifndef __STMMAC_VLAN_H__
+#define __STMMAC_VLAN_H__
+
+#include <linux/bitfield.h>
+#include "dwxgmac2.h"
+
+#define VLAN_TAG 0x00000050
+#define VLAN_TAG_DATA 0x00000054
+#define VLAN_HASH_TABLE 0x00000058
+#define VLAN_INCL 0x00000060
+
+/* MAC VLAN */
+#define VLAN_EDVLP BIT(26)
+#define VLAN_VTHM BIT(25)
+#define VLAN_DOVLTC BIT(20)
+#define VLAN_ESVL BIT(18)
+#define VLAN_ETV BIT(16)
+#define VLAN_VID GENMASK(15, 0)
+#define VLAN_VLTI BIT(20)
+#define VLAN_CSVL BIT(19)
+#define VLAN_VLC GENMASK(17, 16)
+#define VLAN_VLC_SHIFT 16
+#define VLAN_VLHT GENMASK(15, 0)
+
+/* MAC VLAN Tag */
+#define VLAN_TAG_VID GENMASK(15, 0)
+#define VLAN_TAG_ETV BIT(16)
+
+/* MAC VLAN Tag Control */
+#define VLAN_TAG_CTRL_OB BIT(0)
+#define VLAN_TAG_CTRL_CT BIT(1)
+#define VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2)
+#define VLAN_TAG_CTRL_OFS_SHIFT 2
+#define VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21)
+#define VLAN_TAG_CTRL_EVLS_SHIFT 21
+#define VLAN_TAG_CTRL_EVLRXS BIT(24)
+
+#define VLAN_TAG_STRIP_NONE FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x0)
+#define VLAN_TAG_STRIP_PASS FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x1)
+#define VLAN_TAG_STRIP_FAIL FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x2)
+#define VLAN_TAG_STRIP_ALL FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x3)
+
+/* MAC VLAN Tag Data/Filter */
+#define VLAN_TAG_DATA_VID GENMASK(15, 0)
+#define VLAN_TAG_DATA_VEN BIT(16)
+#define VLAN_TAG_DATA_ETV BIT(17)
+
+/* MAC VLAN HW FEAT */
+#define HW_FEATURE3 0x00000128
+#define VLAN_HW_FEAT_NRVF GENMASK(2, 0)
+
+extern const struct stmmac_vlan_ops dwmac_vlan_ops;
+extern const struct stmmac_vlan_ops dwxgmac210_vlan_ops;
+extern const struct stmmac_vlan_ops dwxlgmac2_vlan_ops;
+
+u32 stmmac_get_num_vlan(void __iomem *ioaddr);
+
+#endif /* __STMMAC_VLAN_H__ */
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index b777e5a099eb..acfb523214b9 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4021,7 +4021,7 @@ done:
static void cas_link_timer(struct timer_list *t)
{
- struct cas *cp = from_timer(cp, t, link_timer);
+ struct cas *cp = timer_container_of(cp, t, link_timer);
int mask, pending = 0, reset = 0;
unsigned long flags;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 379b6e90121d..ddca8fc7883e 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -2225,7 +2225,7 @@ static int niu_link_status(struct niu *np, int *link_up_p)
static void niu_timer(struct timer_list *t)
{
- struct niu *np = from_timer(np, t, timer);
+ struct niu *np = timer_container_of(np, t, timer);
unsigned long off;
int err, link_up;
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index d2c82102133c..edb2fd3a6551 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -526,7 +526,7 @@ static int try_next_permutation(struct bigmac *bp, void __iomem *tregs)
static void bigmac_timer(struct timer_list *t)
{
- struct bigmac *bp = from_timer(bp, t, bigmac_timer);
+ struct bigmac *bp = timer_container_of(bp, t, bigmac_timer);
void __iomem *tregs = bp->tregs;
int restart_timer = 0;
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 06579d7b5220..8e69d917d827 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1481,7 +1481,7 @@ static int gem_mdio_link_not_up(struct gem *gp)
static void gem_link_timer(struct timer_list *t)
{
- struct gem *gp = from_timer(gp, t, link_timer);
+ struct gem *gp = timer_container_of(gp, t, link_timer);
struct net_device *dev = gp->dev;
int restart_aneg = 0;
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 9a7586623318..4bc0e114d5ee 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -721,7 +721,7 @@ force_link:
static void happy_meal_timer(struct timer_list *t)
{
- struct happy_meal *hp = from_timer(hp, t, happy_timer);
+ struct happy_meal *hp = timer_container_of(hp, t, happy_timer);
void __iomem *tregs = hp->tcvregs;
int restart_timer = 0;
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index ddc6d46a7a86..0212853c9430 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1044,7 +1044,7 @@ static inline void vnet_free_skbs(struct sk_buff *skb)
void sunvnet_clean_timer_expire_common(struct timer_list *t)
{
- struct vnet_port *port = from_timer(port, t, clean_timer);
+ struct vnet_port *port = timer_container_of(port, t, clean_timer);
struct sk_buff *freeskbs;
unsigned pending;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index 24e4b246f25f..37101dc50d04 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -360,7 +360,8 @@ static irqreturn_t xlgmac_dma_isr(int irq, void *data)
static void xlgmac_tx_timer(struct timer_list *t)
{
- struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
+ struct xlgmac_channel *channel = timer_container_of(channel, t,
+ tx_timer);
struct xlgmac_pdata *pdata = channel->pdata;
struct napi_struct *napi;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 1e6d2335293d..f20d1ff192ef 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -427,7 +427,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
if (netif_tx_queue_stopped(netif_txq)) {
/* try recover if stopped by us */
- txq_trans_update(netif_txq);
+ txq_trans_update(ndev, netif_txq);
netif_tx_wake_queue(netif_txq);
}
}
@@ -2679,13 +2679,15 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
goto of_node_put;
ret = of_get_mac_address(port_np, port->slave.mac_addr);
- if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ goto of_node_put;
+ } else if (ret) {
am65_cpsw_am654_get_efuse_macid(port_np,
port->port_id,
port->slave.mac_addr);
if (!is_valid_ether_addr(port->slave.mac_addr)) {
eth_random_addr(port->slave.mac_addr);
- dev_err(dev, "Use random MAC address\n");
+ dev_info(dev, "Use random MAC address\n");
}
}
@@ -2760,7 +2762,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
mutex_init(&ndev_priv->mm_lock);
port->qos.link_speed = SPEED_UNKNOWN;
SET_NETDEV_DEV(port->ndev, dev);
- port->ndev->dev.of_node = port->slave.port_np;
+ device_set_node(&port->ndev->dev, of_fwnode_handle(port->slave.port_np));
eth_hw_addr_set(port->ndev, port->slave.mac_addr);
@@ -3561,6 +3563,16 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return ret;
}
+ am65_cpsw_nuss_get_ver(common);
+
+ ret = am65_cpsw_nuss_init_host_p(common);
+ if (ret)
+ goto err_pm_clear;
+
+ ret = am65_cpsw_nuss_init_slave_ports(common);
+ if (ret)
+ goto err_pm_clear;
+
node = of_get_child_by_name(dev->of_node, "mdio");
if (!node) {
dev_warn(dev, "MDIO node not found\n");
@@ -3577,16 +3589,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
}
of_node_put(node);
- am65_cpsw_nuss_get_ver(common);
-
- ret = am65_cpsw_nuss_init_host_p(common);
- if (ret)
- goto err_of_clear;
-
- ret = am65_cpsw_nuss_init_slave_ports(common);
- if (ret)
- goto err_of_clear;
-
/* init common data */
ale_params.dev = dev;
ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a984b7d84e5e..54c24cd3d3be 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1156,6 +1156,27 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
}
#endif
+/* We need a custom implementation of phy_do_ioctl_running() because in switch
+ * mode, dev->phydev may be different than the phy of the active_slave. We need
+ * to operate on the locally saved phy instead.
+ */
+static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+ struct phy_device *phy;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ phy = cpsw->slaves[slave_no].phy;
+ if (phy)
+ return phy_mii_ioctl(phy, req, cmd);
+
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
@@ -1174,6 +1195,8 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_setup_tc = cpsw_ndo_setup_tc,
.ndo_bpf = cpsw_ndo_bpf,
.ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
+ .ndo_hwtstamp_get = cpsw_hwtstamp_get,
+ .ndo_hwtstamp_set = cpsw_hwtstamp_set,
};
static void cpsw_get_drvinfo(struct net_device *ndev,
@@ -1646,6 +1669,9 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT;
+ /* Hijack PHY timestamping requests in order to block them */
+ if (!cpsw->data.dual_emac)
+ ndev->see_all_hwtstamp_requests = true;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 7f77694ecfba..fbe35af615a6 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -1231,7 +1231,7 @@ int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int rateli
static void cpsw_ale_timer(struct timer_list *t)
{
- struct cpsw_ale *ale = from_timer(ale, t, timer);
+ struct cpsw_ale *ale = timer_container_of(ale, t, timer);
cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 5b5b52e4e7a7..8b9e2078c602 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1132,7 +1132,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
.ndo_set_mac_address = cpsw_ndo_set_mac_address,
- .ndo_eth_ioctl = cpsw_ndo_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
.ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
@@ -1147,6 +1147,8 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_bpf = cpsw_ndo_bpf,
.ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
.ndo_get_port_parent_id = cpsw_get_port_parent_id,
+ .ndo_hwtstamp_get = cpsw_hwtstamp_get,
+ .ndo_hwtstamp_set = cpsw_hwtstamp_set,
};
static void cpsw_get_drvinfo(struct net_device *ndev,
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 6fe4edabba44..bc4fdf17a99e 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -614,24 +614,29 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
}
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct cpsw_priv *priv = netdev_priv(dev);
struct cpsw_common *cpsw = priv->cpsw;
- struct hwtstamp_config cfg;
+
+ /* This will only execute if dev->see_all_hwtstamp_requests is set */
+ if (cfg->source != HWTSTAMP_SOURCE_NETDEV) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Switch mode only supports MAC timestamping");
+ return -EOPNOTSUPP;
+ }
if (cpsw->version != CPSW_VERSION_1 &&
cpsw->version != CPSW_VERSION_2 &&
cpsw->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+ if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
priv->rx_ts_enabled = 0;
break;
@@ -651,13 +656,13 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
}
- priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
+ priv->tx_ts_enabled = cfg->tx_type == HWTSTAMP_TX_ON;
switch (cpsw->version) {
case CPSW_VERSION_1:
@@ -671,65 +676,40 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
WARN_ON(1);
}
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
{
struct cpsw_common *cpsw = ndev_to_cpsw(dev);
struct cpsw_priv *priv = netdev_priv(dev);
- struct hwtstamp_config cfg;
if (cpsw->version != CPSW_VERSION_1 &&
cpsw->version != CPSW_VERSION_2 &&
cpsw->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
- cfg.flags = 0;
- cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = priv->rx_ts_enabled;
+ cfg->tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg->rx_filter = priv->rx_ts_enabled;
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
#else
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
{
return -EOPNOTSUPP;
}
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
#endif /*CONFIG_TI_CPTS*/
-int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
-{
- struct cpsw_priv *priv = netdev_priv(dev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
- struct phy_device *phy;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- phy = cpsw->slaves[slave_no].phy;
-
- if (!phy_has_hwtstamp(phy)) {
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return cpsw_hwtstamp_set(dev, req);
- case SIOCGHWTSTAMP:
- return cpsw_hwtstamp_get(dev, req);
- }
- }
-
- if (phy)
- return phy_mii_ioctl(phy, req, cmd);
-
- return -EOPNOTSUPP;
-}
-
int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
{
struct cpsw_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index f2fc55d9295d..91add8925e23 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -461,7 +461,6 @@ void soft_reset(const char *module, void __iomem *reg);
void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue);
int cpsw_need_resplit(struct cpsw_common *cpsw);
-int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd);
int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate);
int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
@@ -469,6 +468,11 @@ bool cpsw_shp_is_off(struct cpsw_priv *priv);
void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_qos_clsflower_resume(struct cpsw_priv *priv);
+int cpsw_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg);
+int cpsw_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
/* ethtool */
u32 cpsw_get_msglevel(struct net_device *ndev);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index d88a0180294e..12f25cec6255 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -98,20 +98,11 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
{
struct cppi5_host_desc_t *first_desc, *next_desc;
dma_addr_t buf_dma, next_desc_dma;
- struct prueth_swdata *swdata;
- struct page *page;
u32 buf_dma_len;
first_desc = desc;
next_desc = first_desc;
- swdata = cppi5_hdesc_get_swdata(desc);
- if (swdata->type == PRUETH_SWDATA_PAGE) {
- page = swdata->data.page;
- page_pool_recycle_direct(page->pp, swdata->data.page);
- goto free_desc;
- }
-
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
@@ -135,7 +126,6 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
}
-free_desc:
k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
}
EXPORT_SYMBOL_GPL(prueth_xmit_free);
@@ -612,13 +602,8 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- if (page) {
- swdata->type = PRUETH_SWDATA_PAGE;
- swdata->data.page = page;
- } else {
- swdata->type = PRUETH_SWDATA_XDPF;
- swdata->data.xdpf = xdpf;
- }
+ swdata->type = PRUETH_SWDATA_XDPF;
+ swdata->data.xdpf = xdpf;
/* Report BQL before sending the packet */
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
@@ -1329,10 +1314,28 @@ void icssg_ndo_get_stats64(struct net_device *ndev,
stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors");
stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames");
- stats->rx_errors = ndev->stats.rx_errors;
- stats->rx_dropped = ndev->stats.rx_dropped;
+ stats->rx_errors = ndev->stats.rx_errors +
+ emac_get_stat_by_name(emac, "FW_RX_ERROR") +
+ emac_get_stat_by_name(emac, "FW_RX_EOF_SHORT_FRMERR") +
+ emac_get_stat_by_name(emac, "FW_RX_B0_DROP_EARLY_EOF") +
+ emac_get_stat_by_name(emac, "FW_RX_EXP_FRAG_Q_DROP") +
+ emac_get_stat_by_name(emac, "FW_RX_FIFO_OVERRUN");
+ stats->rx_dropped = ndev->stats.rx_dropped +
+ emac_get_stat_by_name(emac, "FW_DROPPED_PKT") +
+ emac_get_stat_by_name(emac, "FW_INF_PORT_DISABLED") +
+ emac_get_stat_by_name(emac, "FW_INF_SAV") +
+ emac_get_stat_by_name(emac, "FW_INF_SA_DL") +
+ emac_get_stat_by_name(emac, "FW_INF_PORT_BLOCKED") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_TAGGED") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_PRIOTAGGED") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_NOTAG") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_NOTMEMBER");
stats->tx_errors = ndev->stats.tx_errors;
- stats->tx_dropped = ndev->stats.tx_dropped;
+ stats->tx_dropped = ndev->stats.tx_dropped +
+ emac_get_stat_by_name(emac, "FW_RTU_PKT_DROP") +
+ emac_get_stat_by_name(emac, "FW_TX_DROPPED_PACKET") +
+ emac_get_stat_by_name(emac, "FW_TX_TS_DROPPED_PACKET") +
+ emac_get_stat_by_name(emac, "FW_TX_JUMBO_FRM_CUTOFF");
}
EXPORT_SYMBOL_GPL(icssg_ndo_get_stats64);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index b6be4aa57a61..23c465f1ce7f 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -54,7 +54,7 @@
#define ICSSG_MAX_RFLOWS 8 /* per slice */
-#define ICSSG_NUM_PA_STATS 4
+#define ICSSG_NUM_PA_STATS 32
#define ICSSG_NUM_MIIG_STATS 60
/* Number of ICSSG related stats */
#define ICSSG_NUM_STATS (ICSSG_NUM_MIIG_STATS + ICSSG_NUM_PA_STATS)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index 6f0edae38ea2..7159baa0155c 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -11,7 +11,6 @@
#define ICSSG_TX_PACKET_OFFSET 0xA0
#define ICSSG_TX_BYTE_OFFSET 0xEC
-#define ICSSG_FW_STATS_BASE 0x0248
static u32 stats_base[] = { 0x54c, /* Slice 0 stats start */
0xb18, /* Slice 1 stats start */
@@ -29,6 +28,14 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
spin_lock(&prueth->stats_lock);
for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
+ /* In MII mode TX lines are swapped inside ICSSG, so read Tx stats
+ * from slice1 for port0 and slice0 for port1 to get accurate Tx
+ * stats for a given port
+ */
+ if (emac->phy_if == PHY_INTERFACE_MODE_MII &&
+ icssg_all_miig_stats[i].offset >= ICSSG_TX_PACKET_OFFSET &&
+ icssg_all_miig_stats[i].offset <= ICSSG_TX_BYTE_OFFSET)
+ base = stats_base[slice ^ 1];
regmap_read(prueth->miig_rt,
base + icssg_all_miig_stats[i].offset,
&val);
@@ -46,9 +53,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
if (prueth->pa_stats) {
for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
- reg = ICSSG_FW_STATS_BASE +
- icssg_all_pa_stats[i].offset *
- PRUETH_NUM_MACS + slice * sizeof(u32);
+ reg = icssg_all_pa_stats[i].offset +
+ slice * sizeof(u32);
regmap_read(prueth->pa_stats, reg, &val);
emac->pa_stats[i] += val;
}
@@ -80,7 +86,7 @@ int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name)
if (emac->prueth->pa_stats) {
for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
if (!strcmp(icssg_all_pa_stats[i].name, stat_name))
- return emac->pa_stats[icssg_all_pa_stats[i].offset / sizeof(u32)];
+ return emac->pa_stats[i];
}
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.h b/drivers/net/ethernet/ti/icssg/icssg_stats.h
index e88b919f532c..5ec0b38e0c67 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.h
@@ -155,24 +155,10 @@ static const struct icssg_miig_stats icssg_all_miig_stats[] = {
ICSSG_MIIG_STATS(tx_bytes, true),
};
-/**
- * struct pa_stats_regs - ICSSG Firmware maintained PA Stats register
- * @fw_rx_cnt: Number of valid packets sent by Rx PRU to Host on PSI
- * @fw_tx_cnt: Number of valid packets copied by RTU0 to Tx queues
- * @fw_tx_pre_overflow: Host Egress Q (Pre-emptible) Overflow Counter
- * @fw_tx_exp_overflow: Host Egress Q (Express) Overflow Counter
- */
-struct pa_stats_regs {
- u32 fw_rx_cnt;
- u32 fw_tx_cnt;
- u32 fw_tx_pre_overflow;
- u32 fw_tx_exp_overflow;
-};
-
-#define ICSSG_PA_STATS(field) \
-{ \
- #field, \
- offsetof(struct pa_stats_regs, field), \
+#define ICSSG_PA_STATS(field) \
+{ \
+ #field, \
+ field, \
}
struct icssg_pa_stats {
@@ -181,10 +167,38 @@ struct icssg_pa_stats {
};
static const struct icssg_pa_stats icssg_all_pa_stats[] = {
- ICSSG_PA_STATS(fw_rx_cnt),
- ICSSG_PA_STATS(fw_tx_cnt),
- ICSSG_PA_STATS(fw_tx_pre_overflow),
- ICSSG_PA_STATS(fw_tx_exp_overflow),
+ ICSSG_PA_STATS(FW_RTU_PKT_DROP),
+ ICSSG_PA_STATS(FW_Q0_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q1_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q2_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q3_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q4_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q5_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q6_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q7_OVERFLOW),
+ ICSSG_PA_STATS(FW_DROPPED_PKT),
+ ICSSG_PA_STATS(FW_RX_ERROR),
+ ICSSG_PA_STATS(FW_RX_DS_INVALID),
+ ICSSG_PA_STATS(FW_TX_DROPPED_PACKET),
+ ICSSG_PA_STATS(FW_TX_TS_DROPPED_PACKET),
+ ICSSG_PA_STATS(FW_INF_PORT_DISABLED),
+ ICSSG_PA_STATS(FW_INF_SAV),
+ ICSSG_PA_STATS(FW_INF_SA_DL),
+ ICSSG_PA_STATS(FW_INF_PORT_BLOCKED),
+ ICSSG_PA_STATS(FW_INF_DROP_TAGGED),
+ ICSSG_PA_STATS(FW_INF_DROP_PRIOTAGGED),
+ ICSSG_PA_STATS(FW_INF_DROP_NOTAG),
+ ICSSG_PA_STATS(FW_INF_DROP_NOTMEMBER),
+ ICSSG_PA_STATS(FW_RX_EOF_SHORT_FRMERR),
+ ICSSG_PA_STATS(FW_RX_B0_DROP_EARLY_EOF),
+ ICSSG_PA_STATS(FW_TX_JUMBO_FRM_CUTOFF),
+ ICSSG_PA_STATS(FW_RX_EXP_FRAG_Q_DROP),
+ ICSSG_PA_STATS(FW_RX_FIFO_OVERRUN),
+ ICSSG_PA_STATS(FW_CUT_THR_PKT),
+ ICSSG_PA_STATS(FW_HOST_RX_PKT_CNT),
+ ICSSG_PA_STATS(FW_HOST_TX_PKT_CNT),
+ ICSSG_PA_STATS(FW_HOST_EGRESS_Q_PRE_OVERFLOW),
+ ICSSG_PA_STATS(FW_HOST_EGRESS_Q_EXP_OVERFLOW),
};
#endif /* __NET_TI_ICSSG_STATS_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
index 424a7e945ea8..490a9cc06fb0 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
@@ -231,4 +231,37 @@
/* Start of 32 bits PA_STAT counters */
#define PA_STAT_32b_START_OFFSET 0x0080
+#define FW_RTU_PKT_DROP 0x0088
+#define FW_Q0_OVERFLOW 0x0090
+#define FW_Q1_OVERFLOW 0x0098
+#define FW_Q2_OVERFLOW 0x00A0
+#define FW_Q3_OVERFLOW 0x00A8
+#define FW_Q4_OVERFLOW 0x00B0
+#define FW_Q5_OVERFLOW 0x00B8
+#define FW_Q6_OVERFLOW 0x00C0
+#define FW_Q7_OVERFLOW 0x00C8
+#define FW_DROPPED_PKT 0x00F8
+#define FW_RX_ERROR 0x0100
+#define FW_RX_DS_INVALID 0x0108
+#define FW_TX_DROPPED_PACKET 0x0110
+#define FW_TX_TS_DROPPED_PACKET 0x0118
+#define FW_INF_PORT_DISABLED 0x0120
+#define FW_INF_SAV 0x0128
+#define FW_INF_SA_DL 0x0130
+#define FW_INF_PORT_BLOCKED 0x0138
+#define FW_INF_DROP_TAGGED 0x0140
+#define FW_INF_DROP_PRIOTAGGED 0x0148
+#define FW_INF_DROP_NOTAG 0x0150
+#define FW_INF_DROP_NOTMEMBER 0x0158
+#define FW_RX_EOF_SHORT_FRMERR 0x0188
+#define FW_RX_B0_DROP_EARLY_EOF 0x0190
+#define FW_TX_JUMBO_FRM_CUTOFF 0x0198
+#define FW_RX_EXP_FRAG_Q_DROP 0x01A0
+#define FW_RX_FIFO_OVERRUN 0x01A8
+#define FW_CUT_THR_PKT 0x01B0
+#define FW_HOST_RX_PKT_CNT 0x0248
+#define FW_HOST_TX_PKT_CNT 0x0250
+#define FW_HOST_EGRESS_Q_PRE_OVERFLOW 0x0258
+#define FW_HOST_EGRESS_Q_EXP_OVERFLOW 0x0260
+
#endif /* __NET_TI_ICSSG_SWITCH_MAP_H */
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index fd2b74508980..55a1a96cd834 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2833,7 +2833,7 @@ static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
static void netcp_ethss_timer(struct timer_list *t)
{
- struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
+ struct gbe_priv *gbe_dev = timer_container_of(gbe_dev, t, timer);
struct gbe_intf *gbe_intf;
struct gbe_slave *slave;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index d9240fb91747..a55b0f951181 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -1815,7 +1815,7 @@ ThunderLAN driver timer function
static void tlan_timer(struct timer_list *t)
{
- struct tlan_priv *priv = from_timer(priv, t, timer);
+ struct tlan_priv *priv = timer_container_of(priv, t, timer);
struct net_device *dev = priv->dev;
u32 elapsed;
unsigned long flags = 0;
@@ -2746,7 +2746,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
static void tlan_phy_monitor(struct timer_list *t)
{
- struct tlan_priv *priv = from_timer(priv, t, media_timer);
+ struct tlan_priv *priv = timer_container_of(priv, t, media_timer);
struct net_device *dev = priv->dev;
u16 phy;
u16 phy_status;
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 7ec0e3c13d54..7e0b3d694ac0 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1652,7 +1652,7 @@ regs_fail:
static void tsi108_timed_checker(struct timer_list *t)
{
- struct tsi108_prv_data *data = from_timer(data, t, timer);
+ struct tsi108_prv_data *data = timer_container_of(data, t, timer);
struct net_device *dev = data->dev;
tsi108_check_phy(dev);
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index e4d993f31374..a75bca1243e3 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -8,6 +8,7 @@
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -16,6 +17,7 @@
#include <linux/cache.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include <linux/spi/spi.h>
#include <linux/of_net.h>
@@ -45,7 +47,6 @@
struct mse102x_stats {
u64 xfer_err;
- u64 invalid_cmd;
u64 invalid_ctr;
u64 invalid_dft;
u64 invalid_len;
@@ -56,7 +57,6 @@ struct mse102x_stats {
static const char mse102x_gstrings_stats[][ETH_GSTRING_LEN] = {
"SPI transfer errors",
- "Invalid command",
"Invalid CTR",
"Invalid DFT",
"Invalid frame length",
@@ -85,6 +85,8 @@ struct mse102x_net_spi {
struct spi_message spi_msg;
struct spi_transfer spi_xfer;
+ bool valid_cmd_received;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *device_root;
#endif
@@ -98,16 +100,18 @@ static int mse102x_info_show(struct seq_file *s, void *what)
{
struct mse102x_net_spi *mses = s->private;
- seq_printf(s, "TX ring size : %u\n",
+ seq_printf(s, "TX ring size : %u\n",
skb_queue_len(&mses->mse102x.txq));
- seq_printf(s, "IRQ : %d\n",
+ seq_printf(s, "IRQ : %d\n",
mses->spidev->irq);
- seq_printf(s, "SPI effective speed : %lu\n",
+ seq_printf(s, "SPI effective speed : %lu\n",
(unsigned long)mses->spi_xfer.effective_speed_hz);
- seq_printf(s, "SPI mode : %x\n",
+ seq_printf(s, "SPI mode : %x\n",
mses->spidev->mode);
+ seq_printf(s, "Received valid CMD once : %s\n",
+ str_yes_no(mses->valid_cmd_received));
return 0;
}
@@ -194,10 +198,10 @@ static int mse102x_rx_cmd_spi(struct mse102x_net *mse, u8 *rxb)
} else if (*cmd != cpu_to_be16(DET_CMD)) {
net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
__func__, *cmd);
- mse->stats.invalid_cmd++;
ret = -EIO;
} else {
memcpy(rxb, trx + 2, 2);
+ mses->valid_cmd_received = true;
}
return ret;
@@ -306,7 +310,7 @@ static void mse102x_dump_packet(const char *msg, int len, const char *data)
data, len, true);
}
-static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+static irqreturn_t mse102x_rx_pkt_spi(struct mse102x_net *mse)
{
struct sk_buff *skb;
unsigned int rxalign;
@@ -315,31 +319,20 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
__be16 rx = 0;
u16 cmd_resp;
u8 *rxpkt;
- int ret;
mse102x_tx_cmd_spi(mse, CMD_CTR);
- ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
- cmd_resp = be16_to_cpu(rx);
-
- if (ret || ((cmd_resp & CMD_MASK) != CMD_RTS)) {
+ if (mse102x_rx_cmd_spi(mse, (u8 *)&rx)) {
usleep_range(50, 100);
+ return IRQ_NONE;
+ }
- mse102x_tx_cmd_spi(mse, CMD_CTR);
- ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
- if (ret)
- return;
-
- cmd_resp = be16_to_cpu(rx);
- if ((cmd_resp & CMD_MASK) != CMD_RTS) {
- net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
- __func__, cmd_resp);
- mse->stats.invalid_rts++;
- drop = true;
- goto drop;
- }
-
- net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
- __func__);
+ cmd_resp = be16_to_cpu(rx);
+ if ((cmd_resp & CMD_MASK) != CMD_RTS) {
+ net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
+ __func__, cmd_resp);
+ mse->stats.invalid_rts++;
+ drop = true;
+ goto drop;
}
rxlen = cmd_resp & LEN_MASK;
@@ -360,7 +353,7 @@ drop:
rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
if (!skb)
- return;
+ return IRQ_NONE;
/* 2 bytes Start of frame (before ethernet header)
* 2 bytes Data frame tail (after ethernet frame)
@@ -370,7 +363,7 @@ drop:
if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) {
mse->ndev->stats.rx_errors++;
dev_kfree_skb(skb);
- return;
+ return IRQ_HANDLED;
}
if (netif_msg_pktdata(mse))
@@ -381,6 +374,8 @@ drop:
mse->ndev->stats.rx_packets++;
mse->ndev->stats.rx_bytes += rxlen;
+
+ return IRQ_HANDLED;
}
static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb,
@@ -512,20 +507,36 @@ static irqreturn_t mse102x_irq(int irq, void *_mse)
{
struct mse102x_net *mse = _mse;
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ irqreturn_t ret;
mutex_lock(&mses->lock);
- mse102x_rx_pkt_spi(mse);
+ ret = mse102x_rx_pkt_spi(mse);
mutex_unlock(&mses->lock);
- return IRQ_HANDLED;
+ return ret;
}
static int mse102x_net_open(struct net_device *ndev)
{
+ struct irq_data *irq_data = irq_get_irq_data(ndev->irq);
struct mse102x_net *mse = netdev_priv(ndev);
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
int ret;
+ if (!irq_data) {
+ netdev_err(ndev, "Invalid IRQ: %d\n", ndev->irq);
+ return -EINVAL;
+ }
+
+ switch (irqd_get_trigger_type(irq_data)) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_LEVEL_LOW:
+ break;
+ default:
+ netdev_warn_once(ndev, "Only IRQ type level recommended, please update your device tree firmware.\n");
+ break;
+ }
+
ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
ndev->name, mse);
if (ret < 0) {
@@ -543,7 +554,8 @@ static int mse102x_net_open(struct net_device *ndev)
* So poll for possible packet(s) to re-arm the interrupt.
*/
mutex_lock(&mses->lock);
- mse102x_rx_pkt_spi(mse);
+ if (mse102x_rx_pkt_spi(mse) == IRQ_NONE)
+ mse102x_rx_pkt_spi(mse);
mutex_unlock(&mses->lock);
netif_dbg(mse, ifup, ndev, "network device up\n");
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index 47e3e8434b9e..e5fc942c28cc 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -40,7 +40,7 @@ config NGBE
will be called ngbe.
config TXGBE
- tristate "Wangxun(R) 10GbE PCI Express adapters support"
+ tristate "Wangxun(R) 10/25/40GbE PCI Express adapters support"
depends on PCI
depends on COMMON_CLK
depends on I2C_DESIGNWARE_PLATFORM
@@ -55,7 +55,7 @@ config TXGBE
select PCS_XPCS
select LIBWX
help
- This driver supports Wangxun(R) 10GbE PCI Express family of
+ This driver supports Wangxun(R) 10/25/40GbE PCI Express family of
adapters.
More specific information on configuring the driver is in
diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
index e9f0f1f2309b..9b78b604a94e 100644
--- a/drivers/net/ethernet/wangxun/libwx/Makefile
+++ b/drivers/net/ethernet/wangxun/libwx/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_LIBWX) += libwx.o
-libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_ptp.o
+libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_ptp.o wx_mbx.o wx_sriov.o
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
index 43019ec9329c..c12a4cb951f6 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -219,7 +219,7 @@ int wx_nway_reset(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml)
+ if (wx->mac.type == wx_mac_aml40)
return -EOPNOTSUPP;
return phylink_ethtool_nway_reset(wx->phylink);
@@ -231,9 +231,6 @@ int wx_get_link_ksettings(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml)
- return -EOPNOTSUPP;
-
return phylink_ethtool_ksettings_get(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_get_link_ksettings);
@@ -243,7 +240,7 @@ int wx_set_link_ksettings(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml)
+ if (wx->mac.type == wx_mac_aml40)
return -EOPNOTSUPP;
return phylink_ethtool_ksettings_set(wx->phylink, cmd);
@@ -255,7 +252,7 @@ void wx_get_pauseparam(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml)
+ if (wx->mac.type == wx_mac_aml40)
return;
phylink_ethtool_get_pauseparam(wx->phylink, pause);
@@ -267,7 +264,7 @@ int wx_set_pauseparam(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml)
+ if (wx->mac.type == wx_mac_aml40)
return -EOPNOTSUPP;
return phylink_ethtool_set_pauseparam(wx->phylink, pause);
@@ -345,6 +342,7 @@ int wx_set_coalesce(struct net_device *netdev,
max_eitr = WX_SP_MAX_EITR;
break;
case wx_mac_aml:
+ case wx_mac_aml40:
max_eitr = WX_AML_MAX_EITR;
break;
default:
@@ -375,6 +373,7 @@ int wx_set_coalesce(struct net_device *netdev,
switch (wx->mac.type) {
case wx_mac_sp:
case wx_mac_aml:
+ case wx_mac_aml40:
tx_itr_param = WX_12K_ITR;
break;
default:
@@ -413,15 +412,10 @@ static unsigned int wx_max_channels(struct wx *wx)
max_combined = 1;
} else {
/* support up to max allowed queues with RSS */
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
max_combined = 63;
- break;
- default:
+ else
max_combined = 8;
- break;
- }
}
return max_combined;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index aed45abafb1b..0f4be72116b8 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -10,6 +10,7 @@
#include "wx_type.h"
#include "wx_lib.h"
+#include "wx_sriov.h"
#include "wx_hw.h"
static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
@@ -112,15 +113,10 @@ static void wx_intr_disable(struct wx *wx, u64 qmask)
if (mask)
wr32(wx, WX_PX_IMS(0), mask);
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
mask = (qmask >> 32);
if (mask)
wr32(wx, WX_PX_IMS(1), mask);
- break;
- default:
- break;
}
}
@@ -132,15 +128,10 @@ void wx_intr_enable(struct wx *wx, u64 qmask)
if (mask)
wr32(wx, WX_PX_IMC(0), mask);
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
mask = (qmask >> 32);
if (mask)
wr32(wx, WX_PX_IMC(1), mask);
- break;
- default:
- break;
}
}
EXPORT_SYMBOL(wx_intr_enable);
@@ -434,14 +425,20 @@ static int wx_host_interface_command_r(struct wx *wx, u32 *buffer,
wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, WX_SW2FW_MBOX_CMD_VLD);
/* polling reply from FW */
- err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 1000, 50000,
- true, wx, buffer, send_cmd);
+ err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 2000,
+ timeout * 1000, true, wx, buffer, send_cmd);
if (err) {
wx_err(wx, "Polling from FW messages timeout, cmd: 0x%x, index: %d\n",
send_cmd, wx->swfw_index);
goto rel_out;
}
+ if (hdr->cmd_or_resp.ret_status == 0x80) {
+ wx_err(wx, "Unknown FW command: 0x%x\n", send_cmd);
+ err = -EINVAL;
+ goto rel_out;
+ }
+
/* expect no reply from FW then return */
if (!return_data)
goto rel_out;
@@ -698,6 +695,7 @@ void wx_init_eeprom_params(struct wx *wx)
switch (wx->mac.type) {
case wx_mac_sp:
case wx_mac_aml:
+ case wx_mac_aml40:
if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) {
wx_err(wx, "NVM Read Error\n");
return;
@@ -767,14 +765,8 @@ static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
/* setup VMDq pool mapping */
wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
- break;
- default:
- break;
- }
/* HW expects these in little endian so we reverse the byte
* order from network order (big endian) to little endian
@@ -912,14 +904,9 @@ void wx_init_rx_addrs(struct wx *wx)
wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
/* clear VMDq pool/queue selection for RAR 0 */
wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
- break;
- default:
- break;
}
}
@@ -964,11 +951,28 @@ static void wx_sync_mac_table(struct wx *wx)
}
}
+static void wx_full_sync_mac_table(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
+ wx_set_rar(wx, i,
+ wx->mac_table[i].addr,
+ wx->mac_table[i].pools,
+ WX_PSR_MAC_SWC_AD_H_AV);
+ } else {
+ wx_clear_rar(wx, i);
+ }
+ wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED);
+ }
+}
+
/* this function destroys the first RAR entry */
void wx_mac_set_default_filter(struct wx *wx, u8 *addr)
{
memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN);
- wx->mac_table[0].pools = 1ULL;
+ wx->mac_table[0].pools = BIT(VMDQ_P(0));
wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE);
wx_set_rar(wx, 0, wx->mac_table[0].addr,
wx->mac_table[0].pools,
@@ -993,7 +997,7 @@ void wx_flush_sw_mac_table(struct wx *wx)
}
EXPORT_SYMBOL(wx_flush_sw_mac_table);
-static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
+int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
{
u32 i;
@@ -1024,7 +1028,7 @@ static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
return -ENOMEM;
}
-static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
+int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
{
u32 i;
@@ -1206,6 +1210,35 @@ static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev)
wx_dbg(wx, "Update mc addr list Complete\n");
}
+static void wx_restore_vf_multicasts(struct wx *wx)
+{
+ u32 i, j, vector_bit, vector_reg;
+ struct vf_data_storage *vfinfo;
+
+ for (i = 0; i < wx->num_vfs; i++) {
+ u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(i));
+
+ vfinfo = &wx->vfinfo[i];
+ for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
+ wx->addr_ctrl.mta_in_use++;
+ vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[j]);
+ vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[j]);
+ wr32m(wx, WX_PSR_MC_TBL(vector_reg),
+ BIT(vector_bit), BIT(vector_bit));
+ /* errata 5: maintain a copy of the reg table conf */
+ wx->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
+ }
+ if (vfinfo->num_vf_mc_hashes)
+ vmolr |= WX_PSR_VM_L2CTL_ROMPE;
+ else
+ vmolr &= ~WX_PSR_VM_L2CTL_ROMPE;
+ wr32(wx, WX_PSR_VM_L2CTL(i), vmolr);
+ }
+
+ /* Restore any VF macvlans */
+ wx_full_sync_mac_table(wx);
+}
+
/**
* wx_write_mc_addr_list - write multicast addresses to MTA
* @netdev: network interface device structure
@@ -1223,6 +1256,9 @@ static int wx_write_mc_addr_list(struct net_device *netdev)
wx_update_mc_addr_list(wx, netdev);
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ wx_restore_vf_multicasts(wx);
+
return netdev_mc_count(netdev);
}
@@ -1243,7 +1279,7 @@ int wx_set_mac(struct net_device *netdev, void *p)
if (retval)
return retval;
- wx_del_mac_filter(wx, wx->mac.addr, 0);
+ wx_del_mac_filter(wx, wx->mac.addr, VMDQ_P(0));
eth_hw_addr_set(netdev, addr->sa_data);
memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
@@ -1345,6 +1381,10 @@ static int wx_hpbthresh(struct wx *wx)
/* Calculate delay value for device */
dv_id = WX_DV(link, tc);
+ /* Loopback switch introduces additional latency */
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ dv_id += WX_B2BT(tc);
+
/* Delay value is calculated in bit times convert to KB */
kb = WX_BT2KB(dv_id);
rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT;
@@ -1400,12 +1440,107 @@ static void wx_pbthresh_setup(struct wx *wx)
wx->fc.low_water = 0;
}
+static void wx_set_ethertype_anti_spoofing(struct wx *wx, bool enable, int vf)
+{
+ u32 pfvfspoof, reg_offset, vf_shift;
+
+ vf_shift = WX_VF_IND_SHIFT(vf);
+ reg_offset = WX_VF_REG_OFFSET(vf);
+
+ pfvfspoof = rd32(wx, WX_TDM_ETYPE_AS(reg_offset));
+ if (enable)
+ pfvfspoof |= BIT(vf_shift);
+ else
+ pfvfspoof &= ~BIT(vf_shift);
+ wr32(wx, WX_TDM_ETYPE_AS(reg_offset), pfvfspoof);
+}
+
+int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+{
+ u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
+ struct wx *wx = netdev_priv(netdev);
+ u32 regval;
+
+ if (vf >= wx->num_vfs)
+ return -EINVAL;
+
+ wx->vfinfo[vf].spoofchk_enabled = setting;
+
+ regval = (setting << vf_bit);
+ wr32m(wx, WX_TDM_MAC_AS(index), regval | BIT(vf_bit), regval);
+
+ if (wx->vfinfo[vf].vlan_count)
+ wr32m(wx, WX_TDM_VLAN_AS(index), regval | BIT(vf_bit), regval);
+
+ return 0;
+}
+
+static void wx_configure_virtualization(struct wx *wx)
+{
+ u16 pool = wx->num_rx_pools;
+ u32 reg_offset, vf_shift;
+ u32 i;
+
+ if (!test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ return;
+
+ wr32m(wx, WX_PSR_VM_CTL,
+ WX_PSR_VM_CTL_POOL_MASK | WX_PSR_VM_CTL_REPLEN,
+ FIELD_PREP(WX_PSR_VM_CTL_POOL_MASK, VMDQ_P(0)) |
+ WX_PSR_VM_CTL_REPLEN);
+ while (pool--)
+ wr32m(wx, WX_PSR_VM_L2CTL(pool),
+ WX_PSR_VM_L2CTL_AUPE, WX_PSR_VM_L2CTL_AUPE);
+
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ vf_shift = BIT(VMDQ_P(0));
+ /* Enable only the PF pools for Tx/Rx */
+ wr32(wx, WX_RDM_VF_RE(0), vf_shift);
+ wr32(wx, WX_TDM_VF_TE(0), vf_shift);
+ } else {
+ vf_shift = WX_VF_IND_SHIFT(VMDQ_P(0));
+ reg_offset = WX_VF_REG_OFFSET(VMDQ_P(0));
+
+ /* Enable only the PF pools for Tx/Rx */
+ wr32(wx, WX_RDM_VF_RE(reg_offset), GENMASK(31, vf_shift));
+ wr32(wx, WX_RDM_VF_RE(reg_offset ^ 1), reg_offset - 1);
+ wr32(wx, WX_TDM_VF_TE(reg_offset), GENMASK(31, vf_shift));
+ wr32(wx, WX_TDM_VF_TE(reg_offset ^ 1), reg_offset - 1);
+ }
+
+ /* clear VLAN promisc flag so VFTA will be updated if necessary */
+ clear_bit(WX_FLAG_VLAN_PROMISC, wx->flags);
+
+ for (i = 0; i < wx->num_vfs; i++) {
+ if (!wx->vfinfo[i].spoofchk_enabled)
+ wx_set_vf_spoofchk(wx->netdev, i, false);
+ /* enable ethertype anti spoofing if hw supports it */
+ wx_set_ethertype_anti_spoofing(wx, true, i);
+ }
+}
+
static void wx_configure_port(struct wx *wx)
{
u32 value, i;
- value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ value = (wx->num_vfs == 0) ?
+ WX_CFG_PORT_CTL_NUM_VT_NONE :
+ WX_CFG_PORT_CTL_NUM_VT_8;
+ } else {
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) {
+ if (wx->ring_feature[RING_F_RSS].indices == 4)
+ value = WX_CFG_PORT_CTL_NUM_VT_32;
+ else
+ value = WX_CFG_PORT_CTL_NUM_VT_64;
+ } else {
+ value = 0;
+ }
+ }
+
+ value |= WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_NUM_VT_MASK |
WX_CFG_PORT_CTL_D_VLAN |
WX_CFG_PORT_CTL_QINQ,
value);
@@ -1466,6 +1601,83 @@ static void wx_vlan_strip_control(struct wx *wx, bool enable)
}
}
+static void wx_vlan_promisc_enable(struct wx *wx)
+{
+ u32 vlnctrl, i, vind, bits, reg_idx;
+
+ vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) {
+ /* we need to keep the VLAN filter on in SRIOV */
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ } else {
+ vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ return;
+ }
+ /* We are already in VLAN promisc, nothing to do */
+ if (test_bit(WX_FLAG_VLAN_PROMISC, wx->flags))
+ return;
+ /* Set flag so we don't redo unnecessary work */
+ set_bit(WX_FLAG_VLAN_PROMISC, wx->flags);
+ /* Add PF to all active pools */
+ for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) {
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, i);
+ vind = WX_VF_IND_SHIFT(VMDQ_P(0));
+ reg_idx = WX_VF_REG_OFFSET(VMDQ_P(0));
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx));
+ bits |= BIT(vind);
+ wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits);
+ }
+ /* Set all bits in the VLAN filter table array */
+ for (i = 0; i < wx->mac.vft_size; i++)
+ wr32(wx, WX_PSR_VLAN_TBL(i), U32_MAX);
+}
+
+static void wx_scrub_vfta(struct wx *wx)
+{
+ u32 i, vid, bits, vfta, vind, vlvf, reg_idx;
+
+ for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) {
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, i);
+ vlvf = rd32(wx, WX_PSR_VLAN_SWC_IDX);
+ /* pull VLAN ID from VLVF */
+ vid = vlvf & ~WX_PSR_VLAN_SWC_VIEN;
+ if (vlvf & WX_PSR_VLAN_SWC_VIEN) {
+ /* if PF is part of this then continue */
+ if (test_bit(vid, wx->active_vlans))
+ continue;
+ }
+ /* remove PF from the pool */
+ vind = WX_VF_IND_SHIFT(VMDQ_P(0));
+ reg_idx = WX_VF_REG_OFFSET(VMDQ_P(0));
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx));
+ bits &= ~BIT(vind);
+ wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits);
+ }
+ /* extract values from vft_shadow and write back to VFTA */
+ for (i = 0; i < wx->mac.vft_size; i++) {
+ vfta = wx->mac.vft_shadow[i];
+ wr32(wx, WX_PSR_VLAN_TBL(i), vfta);
+ }
+}
+
+static void wx_vlan_promisc_disable(struct wx *wx)
+{
+ u32 vlnctrl;
+
+ /* configure vlan filtering */
+ vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ /* We are not in VLAN promisc, nothing to do */
+ if (!test_bit(WX_FLAG_VLAN_PROMISC, wx->flags))
+ return;
+ /* Set flag so we don't redo unnecessary work */
+ clear_bit(WX_FLAG_VLAN_PROMISC, wx->flags);
+ wx_scrub_vfta(wx);
+}
+
void wx_set_rx_mode(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
@@ -1478,7 +1690,7 @@ void wx_set_rx_mode(struct net_device *netdev)
/* Check for Promiscuous and All Multicast modes */
fctrl = rd32(wx, WX_PSR_CTL);
fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE);
- vmolr = rd32(wx, WX_PSR_VM_L2CTL(0));
+ vmolr = rd32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)));
vmolr &= ~(WX_PSR_VM_L2CTL_UPE |
WX_PSR_VM_L2CTL_MPE |
WX_PSR_VM_L2CTL_ROPE |
@@ -1499,7 +1711,10 @@ void wx_set_rx_mode(struct net_device *netdev)
fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE;
/* pf don't want packets routing to vf, so clear UPE */
vmolr |= WX_PSR_VM_L2CTL_MPE;
- vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+ features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
}
if (netdev->flags & IFF_ALLMULTI) {
@@ -1522,7 +1737,7 @@ void wx_set_rx_mode(struct net_device *netdev)
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
- count = wx_write_uc_addr_list(netdev, 0);
+ count = wx_write_uc_addr_list(netdev, VMDQ_P(0));
if (count < 0) {
vmolr &= ~WX_PSR_VM_L2CTL_ROPE;
vmolr |= WX_PSR_VM_L2CTL_UPE;
@@ -1540,7 +1755,7 @@ void wx_set_rx_mode(struct net_device *netdev)
wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
wr32(wx, WX_PSR_CTL, fctrl);
- wr32(wx, WX_PSR_VM_L2CTL(0), vmolr);
+ wr32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)), vmolr);
if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
(features & NETIF_F_HW_VLAN_STAG_RX))
@@ -1548,6 +1763,10 @@ void wx_set_rx_mode(struct net_device *netdev)
else
wx_vlan_strip_control(wx, false);
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ wx_vlan_promisc_disable(wx);
+ else
+ wx_vlan_promisc_enable(wx);
}
EXPORT_SYMBOL(wx_set_rx_mode);
@@ -1797,6 +2016,13 @@ static void wx_setup_reta(struct wx *wx)
u32 random_key_size = WX_RSS_KEY_SIZE / 4;
u32 i, j;
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) {
+ if (wx->mac.type == wx_mac_em)
+ rss_i = 1;
+ else
+ rss_i = rss_i < 4 ? 4 : rss_i;
+ }
+
/* Fill out hash function seeds */
for (i = 0; i < random_key_size; i++)
wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
@@ -1814,10 +2040,42 @@ static void wx_setup_reta(struct wx *wx)
wx_store_reta(wx);
}
+#define WX_RDB_RSS_PL_2 FIELD_PREP(GENMASK(31, 29), 1)
+#define WX_RDB_RSS_PL_4 FIELD_PREP(GENMASK(31, 29), 2)
+static void wx_setup_psrtype(struct wx *wx)
+{
+ int rss_i = wx->ring_feature[RING_F_RSS].indices;
+ u32 psrtype;
+ int pool;
+
+ psrtype = WX_RDB_PL_CFG_L4HDR |
+ WX_RDB_PL_CFG_L3HDR |
+ WX_RDB_PL_CFG_L2HDR |
+ WX_RDB_PL_CFG_TUN_OUTL2HDR |
+ WX_RDB_PL_CFG_TUN_TUNHDR;
+
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ for_each_set_bit(pool, &wx->fwd_bitmask, 8)
+ wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype);
+ } else {
+ if (rss_i > 3)
+ psrtype |= WX_RDB_RSS_PL_4;
+ else if (rss_i > 1)
+ psrtype |= WX_RDB_RSS_PL_2;
+
+ for_each_set_bit(pool, &wx->fwd_bitmask, 32)
+ wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype);
+ }
+}
+
static void wx_setup_mrqc(struct wx *wx)
{
u32 rss_field = 0;
+ /* VT, and RSS do not coexist at the same time */
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
+ return;
+
/* Disable indicating checksum in descriptor, enables RSS hash */
wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
@@ -1847,16 +2105,11 @@ static void wx_setup_mrqc(struct wx *wx)
**/
void wx_configure_rx(struct wx *wx)
{
- u32 psrtype, i;
int ret;
+ u32 i;
wx_disable_rx(wx);
-
- psrtype = WX_RDB_PL_CFG_L4HDR |
- WX_RDB_PL_CFG_L3HDR |
- WX_RDB_PL_CFG_L2HDR |
- WX_RDB_PL_CFG_TUN_TUNHDR;
- wr32(wx, WX_RDB_PL_CFG(0), psrtype);
+ wx_setup_psrtype(wx);
/* enable hw crc stripping */
wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
@@ -1904,6 +2157,7 @@ void wx_configure(struct wx *wx)
{
wx_set_rxpba(wx);
wx_pbthresh_setup(wx);
+ wx_configure_virtualization(wx);
wx_configure_port(wx);
wx_set_rx_mode(wx->netdev);
@@ -1998,10 +2252,8 @@ int wx_stop_adapter(struct wx *wx)
}
EXPORT_SYMBOL(wx_stop_adapter);
-void wx_reset_misc(struct wx *wx)
+void wx_reset_mac(struct wx *wx)
{
- int i;
-
/* receive packets that size > 2048 */
wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE);
@@ -2013,6 +2265,14 @@ void wx_reset_misc(struct wx *wx)
WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE);
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
+}
+EXPORT_SYMBOL(wx_reset_mac);
+
+void wx_reset_misc(struct wx *wx)
+{
+ int i;
+
+ wx_reset_mac(wx);
wr32m(wx, WX_MIS_RST_ST,
WX_MIS_RST_ST_RST_INIT, 0x1E00);
@@ -2262,7 +2522,7 @@ static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
+int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
{
u32 bitindex, vfta, targetbit;
bool vfta_changed = false;
@@ -2518,7 +2778,8 @@ void wx_update_stats(struct wx *wx)
hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
}
- for (i = 0; i < wx->mac.max_rx_queues; i++)
+ for (i = wx->num_vfs * wx->num_rx_queues_per_pool;
+ i < wx->mac.max_rx_queues; i++)
hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
}
EXPORT_SYMBOL(wx_update_stats);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index b883342bb576..26a56cba60b9 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -26,9 +26,12 @@ void wx_init_eeprom_params(struct wx *wx);
void wx_get_mac_addr(struct wx *wx, u8 *mac_addr);
void wx_init_rx_addrs(struct wx *wx);
void wx_mac_set_default_filter(struct wx *wx, u8 *addr);
+int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool);
+int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool);
void wx_flush_sw_mac_table(struct wx *wx);
int wx_set_mac(struct net_device *netdev, void *p);
void wx_disable_rx(struct wx *wx);
+int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int wx_disable_sec_rx_path(struct wx *wx);
void wx_enable_sec_rx_path(struct wx *wx);
void wx_set_rx_mode(struct net_device *netdev);
@@ -39,9 +42,11 @@ void wx_configure(struct wx *wx);
void wx_start_hw(struct wx *wx);
int wx_disable_pcie_master(struct wx *wx);
int wx_stop_adapter(struct wx *wx);
+void wx_reset_mac(struct wx *wx);
void wx_reset_misc(struct wx *wx);
int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count);
int wx_sw_init(struct wx *wx);
+int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on);
int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index e69eaa65e0de..7f2e6cddfeb1 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -5,6 +5,7 @@
#include <net/ip6_checksum.h>
#include <net/page_pool/helpers.h>
#include <net/inet_ecn.h>
+#include <linux/workqueue.h>
#include <linux/iopoll.h>
#include <linux/sctp.h>
#include <linux/pci.h>
@@ -1620,6 +1621,65 @@ void wx_napi_disable_all(struct wx *wx)
}
EXPORT_SYMBOL(wx_napi_disable_all);
+static bool wx_set_vmdq_queues(struct wx *wx)
+{
+ u16 vmdq_i = wx->ring_feature[RING_F_VMDQ].limit;
+ u16 rss_i = wx->ring_feature[RING_F_RSS].limit;
+ u16 rss_m = WX_RSS_DISABLED_MASK;
+ u16 vmdq_m = 0;
+
+ /* only proceed if VMDq is enabled */
+ if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
+ return false;
+ /* Add starting offset to total pool count */
+ vmdq_i += wx->ring_feature[RING_F_VMDQ].offset;
+
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ /* double check we are limited to maximum pools */
+ vmdq_i = min_t(u16, 64, vmdq_i);
+
+ /* 64 pool mode with 2 queues per pool, or
+ * 16/32/64 pool mode with 1 queue per pool
+ */
+ if (vmdq_i > 32 || rss_i < 4) {
+ vmdq_m = WX_VMDQ_2Q_MASK;
+ rss_m = WX_RSS_2Q_MASK;
+ rss_i = min_t(u16, rss_i, 2);
+ /* 32 pool mode with 4 queues per pool */
+ } else {
+ vmdq_m = WX_VMDQ_4Q_MASK;
+ rss_m = WX_RSS_4Q_MASK;
+ rss_i = 4;
+ }
+ } else {
+ /* double check we are limited to maximum pools */
+ vmdq_i = min_t(u16, 8, vmdq_i);
+
+ /* when VMDQ on, disable RSS */
+ rss_i = 1;
+ }
+
+ /* remove the starting offset from the pool count */
+ vmdq_i -= wx->ring_feature[RING_F_VMDQ].offset;
+
+ /* save features for later use */
+ wx->ring_feature[RING_F_VMDQ].indices = vmdq_i;
+ wx->ring_feature[RING_F_VMDQ].mask = vmdq_m;
+
+ /* limit RSS based on user input and save for later use */
+ wx->ring_feature[RING_F_RSS].indices = rss_i;
+ wx->ring_feature[RING_F_RSS].mask = rss_m;
+
+ wx->queues_per_pool = rss_i;/*maybe same to num_rx_queues_per_pool*/
+ wx->num_rx_pools = vmdq_i;
+ wx->num_rx_queues_per_pool = rss_i;
+
+ wx->num_rx_queues = vmdq_i * rss_i;
+ wx->num_tx_queues = vmdq_i * rss_i;
+
+ return true;
+}
+
/**
* wx_set_rss_queues: Allocate queues for RSS
* @wx: board private structure to initialize
@@ -1634,6 +1694,10 @@ static void wx_set_rss_queues(struct wx *wx)
/* set mask for 16 queue limit of RSS */
f = &wx->ring_feature[RING_F_RSS];
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ f->mask = WX_RSS_64Q_MASK;
+ else
+ f->mask = WX_RSS_8Q_MASK;
f->indices = f->limit;
if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)))
@@ -1666,6 +1730,9 @@ static void wx_set_num_queues(struct wx *wx)
wx->num_tx_queues = 1;
wx->queues_per_pool = 1;
+ if (wx_set_vmdq_queues(wx))
+ return;
+
wx_set_rss_queues(wx);
}
@@ -1746,6 +1813,10 @@ static int wx_set_interrupt_capability(struct wx *wx)
if (ret == 0 || (ret == -ENOMEM))
return ret;
+ /* Disable VMDq support */
+ dev_warn(&wx->pdev->dev, "Disabling VMQQ support\n");
+ clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
+
/* Disable RSS */
dev_warn(&wx->pdev->dev, "Disabling RSS support\n");
wx->ring_feature[RING_F_RSS].limit = 1;
@@ -1772,6 +1843,49 @@ static int wx_set_interrupt_capability(struct wx *wx)
return 0;
}
+static bool wx_cache_ring_vmdq(struct wx *wx)
+{
+ struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
+ struct wx_ring_feature *rss = &wx->ring_feature[RING_F_RSS];
+ u16 reg_idx;
+ int i;
+
+ /* only proceed if VMDq is enabled */
+ if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
+ return false;
+
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ /* start at VMDq register offset for SR-IOV enabled setups */
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < wx->num_rx_queues; i++, reg_idx++) {
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & ~vmdq->mask) >= rss->indices)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ wx->rx_ring[i]->reg_idx = reg_idx;
+ }
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < wx->num_tx_queues; i++, reg_idx++) {
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & rss->mask) >= rss->indices)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ wx->tx_ring[i]->reg_idx = reg_idx;
+ }
+ } else {
+ /* start at VMDq register offset for SR-IOV enabled setups */
+ reg_idx = vmdq->offset;
+ for (i = 0; i < wx->num_rx_queues; i++)
+ /* If we are greater than indices move to next pool */
+ wx->rx_ring[i]->reg_idx = reg_idx + i;
+
+ reg_idx = vmdq->offset;
+ for (i = 0; i < wx->num_tx_queues; i++)
+ /* If we are greater than indices move to next pool */
+ wx->tx_ring[i]->reg_idx = reg_idx + i;
+ }
+
+ return true;
+}
+
/**
* wx_cache_ring_rss - Descriptor ring to register mapping for RSS
* @wx: board private structure to initialize
@@ -1783,6 +1897,9 @@ static void wx_cache_ring_rss(struct wx *wx)
{
u16 i;
+ if (wx_cache_ring_vmdq(wx))
+ return;
+
for (i = 0; i < wx->num_rx_queues; i++)
wx->rx_ring[i]->reg_idx = i;
@@ -1843,6 +1960,7 @@ static int wx_alloc_q_vector(struct wx *wx,
switch (wx->mac.type) {
case wx_mac_sp:
case wx_mac_aml:
+ case wx_mac_aml40:
default_itr = WX_12K_ITR;
break;
default:
@@ -2181,7 +2299,8 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
wr32(wx, WX_PX_MISC_IVAR, ivar);
} else {
/* tx or rx causes */
- msix_vector += 1; /* offset for queue vectors */
+ if (!(wx->mac.type == wx_mac_em && wx->num_vfs == 7))
+ msix_vector += 1; /* offset for queue vectors */
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = ((16 * (queue & 1)) + (8 * direction));
ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
@@ -2210,6 +2329,7 @@ void wx_write_eitr(struct wx_q_vector *q_vector)
itr_reg = q_vector->itr & WX_SP_MAX_EITR;
break;
case wx_mac_aml:
+ case wx_mac_aml40:
itr_reg = (q_vector->itr >> 3) & WX_AML_MAX_EITR;
break;
default:
@@ -2233,10 +2353,17 @@ void wx_configure_vectors(struct wx *wx)
{
struct pci_dev *pdev = wx->pdev;
u32 eitrsel = 0;
- u16 v_idx;
+ u16 v_idx, i;
if (pdev->msix_enabled) {
/* Populate MSIX to EITR Select */
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ if (wx->num_vfs >= 32)
+ eitrsel = BIT(wx->num_vfs % 32) - 1;
+ } else {
+ for (i = 0; i < wx->num_vfs; i++)
+ eitrsel |= BIT(i);
+ }
wr32(wx, WX_PX_ITRSEL, eitrsel);
/* use EIAM to auto-mask when MSI-X interrupt is asserted
* this saves a register write for every interrupt
@@ -2876,6 +3003,33 @@ netdev_features_t wx_fix_features(struct net_device *netdev,
}
EXPORT_SYMBOL(wx_fix_features);
+#define WX_MAX_TUNNEL_HDR_LEN 80
+netdev_features_t wx_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ if (!skb->encapsulation)
+ return features;
+
+ if (wx->mac.type == wx_mac_em)
+ return features & ~NETIF_F_CSUM_MASK;
+
+ if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
+ WX_MAX_TUNNEL_HDR_LEN))
+ return features & ~NETIF_F_CSUM_MASK;
+
+ if (skb->inner_protocol_type == ENCAP_TYPE_ETHER &&
+ skb->inner_protocol != htons(ETH_P_IP) &&
+ skb->inner_protocol != htons(ETH_P_IPV6) &&
+ skb->inner_protocol != htons(ETH_P_TEB))
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+
+ return features;
+}
+EXPORT_SYMBOL(wx_features_check);
+
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring)
{
@@ -2942,5 +3096,35 @@ void wx_set_ring(struct wx *wx, u32 new_tx_count,
}
EXPORT_SYMBOL(wx_set_ring);
+void wx_service_event_schedule(struct wx *wx)
+{
+ if (!test_and_set_bit(WX_STATE_SERVICE_SCHED, wx->state))
+ queue_work(system_power_efficient_wq, &wx->service_task);
+}
+EXPORT_SYMBOL(wx_service_event_schedule);
+
+void wx_service_event_complete(struct wx *wx)
+{
+ if (WARN_ON(!test_bit(WX_STATE_SERVICE_SCHED, wx->state)))
+ return;
+
+ /* flush memory to make sure state is correct before next watchdog */
+ smp_mb__before_atomic();
+ clear_bit(WX_STATE_SERVICE_SCHED, wx->state);
+}
+EXPORT_SYMBOL(wx_service_event_complete);
+
+void wx_service_timer(struct timer_list *t)
+{
+ struct wx *wx = timer_container_of(wx, t, service_timer);
+ unsigned long next_event_offset = HZ * 2;
+
+ /* Reset the timer */
+ mod_timer(&wx->service_timer, next_event_offset + jiffies);
+
+ wx_service_event_schedule(wx);
+}
+EXPORT_SYMBOL(wx_service_timer);
+
MODULE_DESCRIPTION("Common library for Wangxun(R) Ethernet drivers.");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
index fdeb0c315b75..aed6ea8cf0d6 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
@@ -33,7 +33,13 @@ void wx_get_stats64(struct net_device *netdev,
int wx_set_features(struct net_device *netdev, netdev_features_t features);
netdev_features_t wx_fix_features(struct net_device *netdev,
netdev_features_t features);
+netdev_features_t wx_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features);
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring);
+void wx_service_event_schedule(struct wx *wx);
+void wx_service_event_complete(struct wx *wx);
+void wx_service_timer(struct timer_list *t);
-#endif /* _NGBE_LIB_H_ */
+#endif /* _WX_LIB_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.c b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
new file mode 100644
index 000000000000..73af5f11c3bd
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/pci.h>
+#include "wx_type.h"
+#include "wx_mbx.h"
+
+/**
+ * wx_obtain_mbx_lock_pf - obtain mailbox lock
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * Return: return 0 on success and -EBUSY on failure
+ **/
+static int wx_obtain_mbx_lock_pf(struct wx *wx, u16 vf)
+{
+ int count = 5;
+ u32 mailbox;
+
+ while (count--) {
+ /* Take ownership of the buffer */
+ wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ mailbox = rd32(wx, WX_PXMAILBOX(vf));
+ if (mailbox & WX_PXMAILBOX_PFU)
+ return 0;
+ else if (count)
+ udelay(10);
+ }
+ wx_err(wx, "Failed to obtain mailbox lock for PF%d", vf);
+
+ return -EBUSY;
+}
+
+static int wx_check_for_bit_pf(struct wx *wx, u32 mask, int index)
+{
+ u32 mbvficr = rd32(wx, WX_MBVFICR(index));
+
+ if (!(mbvficr & mask))
+ return -EBUSY;
+ wr32(wx, WX_MBVFICR(index), mask);
+
+ return 0;
+}
+
+/**
+ * wx_check_for_ack_pf - checks to see if the VF has acked
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * Return: return 0 if the VF has set the status bit or else -EBUSY
+ **/
+int wx_check_for_ack_pf(struct wx *wx, u16 vf)
+{
+ u32 index = vf / 16, vf_bit = vf % 16;
+
+ return wx_check_for_bit_pf(wx,
+ FIELD_PREP(WX_MBVFICR_VFACK_MASK,
+ BIT(vf_bit)),
+ index);
+}
+
+/**
+ * wx_check_for_msg_pf - checks to see if the VF has sent mail
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * Return: return 0 if the VF has got req bit or else -EBUSY
+ **/
+int wx_check_for_msg_pf(struct wx *wx, u16 vf)
+{
+ u32 index = vf / 16, vf_bit = vf % 16;
+
+ return wx_check_for_bit_pf(wx,
+ FIELD_PREP(WX_MBVFICR_VFREQ_MASK,
+ BIT(vf_bit)),
+ index);
+}
+
+/**
+ * wx_write_mbx_pf - Places a message in the mailbox
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf: the VF index
+ *
+ * Return: return 0 on success and -EINVAL/-EBUSY on failure
+ **/
+int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ int ret, i;
+
+ /* mbx->size is up to 15 */
+ if (size > mbx->size) {
+ wx_err(wx, "Invalid mailbox message size %d", size);
+ return -EINVAL;
+ }
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret = wx_obtain_mbx_lock_pf(wx, vf);
+ if (ret)
+ return ret;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ wx_check_for_msg_pf(wx, vf);
+ wx_check_for_ack_pf(wx, vf);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ wr32a(wx, WX_PXMBMEM(vf), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer */
+ /* set mirrored mailbox flags */
+ wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_STS);
+ wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_STS);
+
+ return 0;
+}
+
+/**
+ * wx_read_mbx_pf - Read a message from the mailbox
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf: the VF index
+ *
+ * Return: return 0 on success and -EBUSY on failure
+ **/
+int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ int ret;
+ u16 i;
+
+ /* limit read to size of mailbox and mbx->size is up to 15 */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret = wx_obtain_mbx_lock_pf(wx, vf);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < size; i++)
+ msg[i] = rd32a(wx, WX_PXMBMEM(vf), i);
+
+ /* Acknowledge the message and release buffer */
+ /* set mirrored mailbox flags */
+ wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_ACK);
+ wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_ACK);
+
+ return 0;
+}
+
+/**
+ * wx_check_for_rst_pf - checks to see if the VF has reset
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * Return: return 0 on success and -EBUSY on failure
+ **/
+int wx_check_for_rst_pf(struct wx *wx, u16 vf)
+{
+ u32 reg_offset = WX_VF_REG_OFFSET(vf);
+ u32 vf_shift = WX_VF_IND_SHIFT(vf);
+ u32 vflre = 0;
+
+ vflre = rd32(wx, WX_VFLRE(reg_offset));
+ if (!(vflre & BIT(vf_shift)))
+ return -EBUSY;
+ wr32(wx, WX_VFLREC(reg_offset), BIT(vf_shift));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
new file mode 100644
index 000000000000..05aae138dbc3
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+#ifndef _WX_MBX_H_
+#define _WX_MBX_H_
+
+#define WX_VXMAILBOX_SIZE 15
+
+/* PF Registers */
+#define WX_PXMAILBOX(i) (0x600 + (4 * (i))) /* i=[0,63] */
+#define WX_PXMAILBOX_STS BIT(0) /* Initiate message send to VF */
+#define WX_PXMAILBOX_ACK BIT(1) /* Ack message recv'd from VF */
+#define WX_PXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */
+
+#define WX_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */
+
+#define WX_VFLRE(i) (0x4A0 + (4 * (i))) /* i=[0,1] */
+#define WX_VFLREC(i) (0x4A8 + (4 * (i))) /* i=[0,1] */
+
+/* SR-IOV specific macros */
+#define WX_MBVFICR(i) (0x480 + (4 * (i))) /* i=[0,3] */
+#define WX_MBVFICR_VFREQ_MASK GENMASK(15, 0)
+#define WX_MBVFICR_VFACK_MASK GENMASK(31, 16)
+
+#define WX_VT_MSGTYPE_ACK BIT(31)
+#define WX_VT_MSGTYPE_NACK BIT(30)
+#define WX_VT_MSGTYPE_CTS BIT(29)
+#define WX_VT_MSGINFO_SHIFT 16
+#define WX_VT_MSGINFO_MASK GENMASK(23, 16)
+
+enum wx_pfvf_api_rev {
+ wx_mbox_api_null,
+ wx_mbox_api_13 = 4, /* API version 1.3 */
+ wx_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API */
+#define WX_VF_RESET 0x01 /* VF requests reset */
+#define WX_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define WX_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define WX_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define WX_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define WX_VF_SET_MACVLAN 0x06 /* VF requests PF unicast filter */
+#define WX_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+#define WX_VF_GET_QUEUES 0x09 /* get queue configuration */
+#define WX_VF_GET_RETA 0x0a /* VF request for RETA */
+#define WX_VF_GET_RSS_KEY 0x0b /* get RSS key */
+#define WX_VF_UPDATE_XCAST_MODE 0x0c
+#define WX_VF_GET_LINK_STATE 0x10 /* get vf link state */
+#define WX_VF_GET_FW_VERSION 0x11 /* get fw version */
+
+#define WX_VF_BACKUP 0x8001 /* VF requests backup */
+
+#define WX_PF_CONTROL_MSG BIT(8) /* PF control message */
+#define WX_PF_NOFITY_VF_LINK_STATUS 0x1
+#define WX_PF_NOFITY_VF_NET_NOT_RUNNING BIT(31)
+
+#define WX_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define WX_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define WX_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define WX_VF_DEF_QUEUE 4 /* Default queue offset */
+
+#define WX_VF_PERMADDR_MSG_LEN 4
+
+enum wxvf_xcast_modes {
+ WXVF_XCAST_MODE_NONE = 0,
+ WXVF_XCAST_MODE_MULTI,
+ WXVF_XCAST_MODE_ALLMULTI,
+ WXVF_XCAST_MODE_PROMISC,
+};
+
+int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
+int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
+int wx_check_for_rst_pf(struct wx *wx, u16 mbx_id);
+int wx_check_for_msg_pf(struct wx *wx, u16 mbx_id);
+int wx_check_for_ack_pf(struct wx *wx, u16 mbx_id);
+
+#endif /* _WX_MBX_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
index 07c015ba338f..2c39b879f977 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
@@ -15,12 +15,14 @@
#define WX_INCVAL_100 0xA00000
#define WX_INCVAL_10 0xC7F380
#define WX_INCVAL_EM 0x2000000
+#define WX_INCVAL_AML 0xA00000
#define WX_INCVAL_SHIFT_10GB 20
#define WX_INCVAL_SHIFT_1GB 18
#define WX_INCVAL_SHIFT_100 15
#define WX_INCVAL_SHIFT_10 12
#define WX_INCVAL_SHIFT_EM 22
+#define WX_INCVAL_SHIFT_AML 21
#define WX_OVERFLOW_PERIOD (HZ * 30)
#define WX_PTP_TX_TIMEOUT (HZ)
@@ -504,15 +506,27 @@ static long wx_ptp_create_clock(struct wx *wx)
wx->ptp_caps.gettimex64 = wx_ptp_gettimex64;
wx->ptp_caps.settime64 = wx_ptp_settime64;
wx->ptp_caps.do_aux_work = wx_ptp_do_aux_work;
- if (wx->mac.type == wx_mac_em) {
- wx->ptp_caps.max_adj = 500000000;
+ switch (wx->mac.type) {
+ case wx_mac_aml:
+ case wx_mac_aml40:
+ wx->ptp_caps.max_adj = 250000000;
wx->ptp_caps.n_per_out = 1;
wx->ptp_setup_sdp = wx_ptp_setup_sdp;
wx->ptp_caps.enable = wx_ptp_feature_enable;
- } else {
+ break;
+ case wx_mac_sp:
wx->ptp_caps.max_adj = 250000000;
wx->ptp_caps.n_per_out = 0;
wx->ptp_setup_sdp = NULL;
+ break;
+ case wx_mac_em:
+ wx->ptp_caps.max_adj = 500000000;
+ wx->ptp_caps.n_per_out = 1;
+ wx->ptp_setup_sdp = wx_ptp_setup_sdp;
+ wx->ptp_caps.enable = wx_ptp_feature_enable;
+ break;
+ default:
+ return -EOPNOTSUPP;
}
wx->ptp_clock = ptp_clock_register(&wx->ptp_caps, &wx->pdev->dev);
@@ -647,10 +661,18 @@ static u64 wx_ptp_read(const struct cyclecounter *hw_cc)
static void wx_ptp_link_speed_adjust(struct wx *wx, u32 *shift, u32 *incval)
{
- if (wx->mac.type == wx_mac_em) {
+ switch (wx->mac.type) {
+ case wx_mac_aml:
+ case wx_mac_aml40:
+ *shift = WX_INCVAL_SHIFT_AML;
+ *incval = WX_INCVAL_AML;
+ return;
+ case wx_mac_em:
*shift = WX_INCVAL_SHIFT_EM;
*incval = WX_INCVAL_EM;
return;
+ default:
+ break;
}
switch (wx->speed) {
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
new file mode 100644
index 000000000000..e8656d9d733b
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
@@ -0,0 +1,909 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_hw.h"
+#include "wx_mbx.h"
+#include "wx_sriov.h"
+
+static void wx_vf_configuration(struct pci_dev *pdev, int event_mask)
+{
+ bool enable = !!WX_VF_ENABLE_CHECK(event_mask);
+ struct wx *wx = pci_get_drvdata(pdev);
+ u32 vfn = WX_VF_NUM_GET(event_mask);
+
+ if (enable)
+ eth_zero_addr(wx->vfinfo[vfn].vf_mac_addr);
+}
+
+static int wx_alloc_vf_macvlans(struct wx *wx, u8 num_vfs)
+{
+ struct vf_macvlans *mv_list;
+ int num_vf_macvlans, i;
+
+ /* Initialize list of VF macvlans */
+ INIT_LIST_HEAD(&wx->vf_mvs.mvlist);
+
+ num_vf_macvlans = wx->mac.num_rar_entries -
+ (WX_MAX_PF_MACVLANS + 1 + num_vfs);
+ if (!num_vf_macvlans)
+ return -EINVAL;
+
+ mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
+ GFP_KERNEL);
+ if (!mv_list)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vf_macvlans; i++) {
+ mv_list[i].vf = -1;
+ mv_list[i].free = true;
+ list_add(&mv_list[i].mvlist, &wx->vf_mvs.mvlist);
+ }
+ wx->mv_list = mv_list;
+
+ return 0;
+}
+
+static void wx_sriov_clear_data(struct wx *wx)
+{
+ /* set num VFs to 0 to prevent access to vfinfo */
+ wx->num_vfs = 0;
+
+ /* free VF control structures */
+ kfree(wx->vfinfo);
+ wx->vfinfo = NULL;
+
+ /* free macvlan list */
+ kfree(wx->mv_list);
+ wx->mv_list = NULL;
+
+ /* set default pool back to 0 */
+ wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0);
+ wx->ring_feature[RING_F_VMDQ].offset = 0;
+
+ clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
+ /* Disable VMDq flag so device will be set in NM mode */
+ if (wx->ring_feature[RING_F_VMDQ].limit == 1)
+ clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
+}
+
+static int __wx_enable_sriov(struct wx *wx, u8 num_vfs)
+{
+ int i, ret = 0;
+ u32 value = 0;
+
+ set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
+ dev_info(&wx->pdev->dev, "SR-IOV enabled with %d VFs\n", num_vfs);
+
+ /* Enable VMDq flag so device will be set in VM mode */
+ set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
+ if (!wx->ring_feature[RING_F_VMDQ].limit)
+ wx->ring_feature[RING_F_VMDQ].limit = 1;
+ wx->ring_feature[RING_F_VMDQ].offset = num_vfs;
+
+ wx->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
+ GFP_KERNEL);
+ if (!wx->vfinfo)
+ return -ENOMEM;
+
+ ret = wx_alloc_vf_macvlans(wx, num_vfs);
+ if (ret)
+ return ret;
+
+ /* Initialize default switching mode VEB */
+ wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_SW_EN, WX_PSR_CTL_SW_EN);
+
+ for (i = 0; i < num_vfs; i++) {
+ /* enable spoof checking for all VFs */
+ wx->vfinfo[i].spoofchk_enabled = true;
+ wx->vfinfo[i].link_enable = true;
+ /* untrust all VFs */
+ wx->vfinfo[i].trusted = false;
+ /* set the default xcast mode */
+ wx->vfinfo[i].xcast_mode = WXVF_XCAST_MODE_NONE;
+ }
+
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ value = WX_CFG_PORT_CTL_NUM_VT_8;
+ } else {
+ if (num_vfs < 32)
+ value = WX_CFG_PORT_CTL_NUM_VT_32;
+ else
+ value = WX_CFG_PORT_CTL_NUM_VT_64;
+ }
+ wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_NUM_VT_MASK,
+ value);
+
+ return ret;
+}
+
+static void wx_sriov_reinit(struct wx *wx)
+{
+ rtnl_lock();
+ wx->setup_tc(wx->netdev, netdev_get_num_tc(wx->netdev));
+ rtnl_unlock();
+}
+
+void wx_disable_sriov(struct wx *wx)
+{
+ if (!pci_vfs_assigned(wx->pdev))
+ pci_disable_sriov(wx->pdev);
+ else
+ wx_err(wx, "Unloading driver while VFs are assigned.\n");
+
+ /* clear flags and free allloced data */
+ wx_sriov_clear_data(wx);
+}
+EXPORT_SYMBOL(wx_disable_sriov);
+
+static int wx_pci_sriov_enable(struct pci_dev *dev,
+ int num_vfs)
+{
+ struct wx *wx = pci_get_drvdata(dev);
+ int err = 0, i;
+
+ err = __wx_enable_sriov(wx, num_vfs);
+ if (err)
+ return err;
+
+ wx->num_vfs = num_vfs;
+ for (i = 0; i < wx->num_vfs; i++)
+ wx_vf_configuration(dev, (i | WX_VF_ENABLE));
+
+ /* reset before enabling SRIOV to avoid mailbox issues */
+ wx_sriov_reinit(wx);
+
+ err = pci_enable_sriov(dev, num_vfs);
+ if (err) {
+ wx_err(wx, "Failed to enable PCI sriov: %d\n", err);
+ goto err_out;
+ }
+
+ return num_vfs;
+err_out:
+ wx_sriov_clear_data(wx);
+ return err;
+}
+
+static void wx_pci_sriov_disable(struct pci_dev *dev)
+{
+ struct wx *wx = pci_get_drvdata(dev);
+
+ wx_disable_sriov(wx);
+ wx_sriov_reinit(wx);
+}
+
+int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct wx *wx = pci_get_drvdata(pdev);
+ int err;
+
+ if (!num_vfs) {
+ if (!pci_vfs_assigned(pdev)) {
+ wx_pci_sriov_disable(pdev);
+ return 0;
+ }
+
+ wx_err(wx, "can't free VFs because some are assigned to VMs.\n");
+ return -EBUSY;
+ }
+
+ err = wx_pci_sriov_enable(pdev, num_vfs);
+ if (err)
+ return err;
+
+ return num_vfs;
+}
+EXPORT_SYMBOL(wx_pci_sriov_configure);
+
+static int wx_set_vf_mac(struct wx *wx, u16 vf, const u8 *mac_addr)
+{
+ u8 hw_addr[ETH_ALEN];
+ int ret = 0;
+
+ ether_addr_copy(hw_addr, mac_addr);
+ wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
+ ret = wx_add_mac_filter(wx, hw_addr, vf);
+ if (ret >= 0)
+ ether_addr_copy(wx->vfinfo[vf].vf_mac_addr, mac_addr);
+ else
+ eth_zero_addr(wx->vfinfo[vf].vf_mac_addr);
+
+ return ret;
+}
+
+static void wx_set_vmolr(struct wx *wx, u16 vf, bool aupe)
+{
+ u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
+
+ vmolr |= WX_PSR_VM_L2CTL_BAM;
+ if (aupe)
+ vmolr |= WX_PSR_VM_L2CTL_AUPE;
+ else
+ vmolr &= ~WX_PSR_VM_L2CTL_AUPE;
+ wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
+}
+
+static void wx_set_vmvir(struct wx *wx, u16 vid, u16 qos, u16 vf)
+{
+ u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) |
+ WX_TDM_VLAN_INS_VLANA_DEFAULT;
+
+ wr32(wx, WX_TDM_VLAN_INS(vf), vmvir);
+}
+
+static int wx_set_vf_vlan(struct wx *wx, int add, int vid, u16 vf)
+{
+ if (!vid && !add)
+ return 0;
+
+ return wx_set_vfta(wx, vid, vf, (bool)add);
+}
+
+static void wx_set_vlan_anti_spoofing(struct wx *wx, bool enable, int vf)
+{
+ u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
+ u32 pfvfspoof;
+
+ pfvfspoof = rd32(wx, WX_TDM_VLAN_AS(index));
+ if (enable)
+ pfvfspoof |= BIT(vf_bit);
+ else
+ pfvfspoof &= ~BIT(vf_bit);
+ wr32(wx, WX_TDM_VLAN_AS(index), pfvfspoof);
+}
+
+static void wx_write_qde(struct wx *wx, u32 vf, u32 qde)
+{
+ struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
+ u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+ u32 reg = 0, n = vf * q_per_pool / 32;
+ u32 i = vf * q_per_pool;
+
+ reg = rd32(wx, WX_RDM_PF_QDE(n));
+ for (i = (vf * q_per_pool - n * 32);
+ i < ((vf + 1) * q_per_pool - n * 32);
+ i++) {
+ if (qde == 1)
+ reg |= qde << i;
+ else
+ reg &= qde << i;
+ }
+
+ wr32(wx, WX_RDM_PF_QDE(n), reg);
+}
+
+static void wx_clear_vmvir(struct wx *wx, u32 vf)
+{
+ wr32(wx, WX_TDM_VLAN_INS(vf), 0);
+}
+
+static void wx_ping_vf(struct wx *wx, int vf)
+{
+ u32 ping = WX_PF_CONTROL_MSG;
+
+ if (wx->vfinfo[vf].clear_to_send)
+ ping |= WX_VT_MSGTYPE_CTS;
+ wx_write_mbx_pf(wx, &ping, 1, vf);
+}
+
+static void wx_set_vf_rx_tx(struct wx *wx, int vf)
+{
+ u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
+ u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
+
+ reg_cur_tx = rd32(wx, WX_TDM_VF_TE(index));
+ reg_cur_rx = rd32(wx, WX_RDM_VF_RE(index));
+
+ if (wx->vfinfo[vf].link_enable) {
+ reg_req_tx = reg_cur_tx | BIT(vf_bit);
+ reg_req_rx = reg_cur_rx | BIT(vf_bit);
+ /* Enable particular VF */
+ if (reg_cur_tx != reg_req_tx)
+ wr32(wx, WX_TDM_VF_TE(index), reg_req_tx);
+ if (reg_cur_rx != reg_req_rx)
+ wr32(wx, WX_RDM_VF_RE(index), reg_req_rx);
+ } else {
+ reg_req_tx = BIT(vf_bit);
+ reg_req_rx = BIT(vf_bit);
+ /* Disable particular VF */
+ if (reg_cur_tx & reg_req_tx)
+ wr32(wx, WX_TDM_VFTE_CLR(index), reg_req_tx);
+ if (reg_cur_rx & reg_req_rx)
+ wr32(wx, WX_RDM_VFRE_CLR(index), reg_req_rx);
+ }
+}
+
+static int wx_get_vf_queues(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
+ unsigned int default_tc = 0;
+
+ msgbuf[WX_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+ msgbuf[WX_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+
+ if (wx->vfinfo[vf].pf_vlan || wx->vfinfo[vf].pf_qos)
+ msgbuf[WX_VF_TRANS_VLAN] = 1;
+ else
+ msgbuf[WX_VF_TRANS_VLAN] = 0;
+
+ /* notify VF of default queue */
+ msgbuf[WX_VF_DEF_QUEUE] = default_tc;
+
+ return 0;
+}
+
+static void wx_vf_reset_event(struct wx *wx, u16 vf)
+{
+ struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
+ u8 num_tcs = netdev_get_num_tc(wx->netdev);
+
+ /* add PF assigned VLAN */
+ wx_set_vf_vlan(wx, true, vfinfo->pf_vlan, vf);
+
+ /* reset offloads to defaults */
+ wx_set_vmolr(wx, vf, !vfinfo->pf_vlan);
+
+ /* set outgoing tags for VFs */
+ if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
+ wx_clear_vmvir(wx, vf);
+ } else {
+ if (vfinfo->pf_qos || !num_tcs)
+ wx_set_vmvir(wx, vfinfo->pf_vlan,
+ vfinfo->pf_qos, vf);
+ else
+ wx_set_vmvir(wx, vfinfo->pf_vlan,
+ wx->default_up, vf);
+ }
+
+ /* reset multicast table array for vf */
+ wx->vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ wx_set_rx_mode(wx->netdev);
+
+ wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
+ /* reset VF api back to unknown */
+ wx->vfinfo[vf].vf_api = wx_mbox_api_null;
+}
+
+static void wx_vf_reset_msg(struct wx *wx, u16 vf)
+{
+ const u8 *vf_mac = wx->vfinfo[vf].vf_mac_addr;
+ struct net_device *dev = wx->netdev;
+ u32 msgbuf[5] = {0, 0, 0, 0, 0};
+ u8 *addr = (u8 *)(&msgbuf[1]);
+ u32 reg = 0, index, vf_bit;
+ int pf_max_frame;
+
+ /* reset the filters for the device */
+ wx_vf_reset_event(wx, vf);
+
+ /* set vf mac address */
+ if (!is_zero_ether_addr(vf_mac))
+ wx_set_vf_mac(wx, vf, vf_mac);
+
+ index = WX_VF_REG_OFFSET(vf);
+ vf_bit = WX_VF_IND_SHIFT(vf);
+
+ /* force drop enable for all VF Rx queues */
+ wx_write_qde(wx, vf, 1);
+
+ /* set transmit and receive for vf */
+ wx_set_vf_rx_tx(wx, vf);
+
+ pf_max_frame = dev->mtu + ETH_HLEN;
+
+ if (pf_max_frame > ETH_FRAME_LEN)
+ reg = BIT(vf_bit);
+ wr32(wx, WX_RDM_VFRE_CLR(index), reg);
+
+ /* enable VF mailbox for further messages */
+ wx->vfinfo[vf].clear_to_send = true;
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = WX_VF_RESET;
+ if (!is_zero_ether_addr(vf_mac)) {
+ msgbuf[0] |= WX_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, ETH_ALEN);
+ } else {
+ msgbuf[0] |= WX_VT_MSGTYPE_NACK;
+ wx_err(wx, "VF %d has no MAC address assigned", vf);
+ }
+
+ msgbuf[3] = wx->mac.mc_filter_type;
+ wx_write_mbx_pf(wx, msgbuf, WX_VF_PERMADDR_MSG_LEN, vf);
+}
+
+static int wx_set_vf_mac_addr(struct wx *wx, u32 *msgbuf, u16 vf)
+{
+ const u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ int ret;
+
+ if (!is_valid_ether_addr(new_mac)) {
+ wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
+ return -EINVAL;
+ }
+
+ if (wx->vfinfo[vf].pf_set_mac &&
+ memcmp(wx->vfinfo[vf].vf_mac_addr, new_mac, ETH_ALEN)) {
+ wx_err(wx,
+ "VF %d attempt to set a MAC but it already had a MAC.",
+ vf);
+ return -EBUSY;
+ }
+
+ ret = wx_set_vf_mac(wx, vf, new_mac);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void wx_set_vf_multicasts(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
+ u16 entries = (msgbuf[0] & WX_VT_MSGINFO_MASK)
+ >> WX_VT_MSGINFO_SHIFT;
+ u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
+ u32 vector_bit, vector_reg, mta_reg, i;
+ u16 *hash_list = (u16 *)&msgbuf[1];
+
+ /* only so many hash values supported */
+ entries = min_t(u16, entries, WX_MAX_VF_MC_ENTRIES);
+ vfinfo->num_vf_mc_hashes = entries;
+
+ for (i = 0; i < entries; i++)
+ vfinfo->vf_mc_hashes[i] = hash_list[i];
+
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[i]);
+ vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[i]);
+ mta_reg = wx->mac.mta_shadow[vector_reg];
+ mta_reg |= BIT(vector_bit);
+ wx->mac.mta_shadow[vector_reg] = mta_reg;
+ wr32(wx, WX_PSR_MC_TBL(vector_reg), mta_reg);
+ }
+ vmolr |= WX_PSR_VM_L2CTL_ROMPE;
+ wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
+}
+
+static void wx_set_vf_lpe(struct wx *wx, u32 max_frame, u32 vf)
+{
+ u32 index, vf_bit, vfre;
+ u32 max_frs, reg_val;
+
+ /* determine VF receive enable location */
+ index = WX_VF_REG_OFFSET(vf);
+ vf_bit = WX_VF_IND_SHIFT(vf);
+
+ vfre = rd32(wx, WX_RDM_VF_RE(index));
+ vfre |= BIT(vf_bit);
+ wr32(wx, WX_RDM_VF_RE(index), vfre);
+
+ /* pull current max frame size from hardware */
+ max_frs = DIV_ROUND_UP(max_frame, 1024);
+ reg_val = rd32(wx, WX_MAC_WDG_TIMEOUT) & WX_MAC_WDG_TIMEOUT_WTO_MASK;
+ if (max_frs > (reg_val + WX_MAC_WDG_TIMEOUT_WTO_DELTA))
+ wr32(wx, WX_MAC_WDG_TIMEOUT,
+ max_frs - WX_MAC_WDG_TIMEOUT_WTO_DELTA);
+}
+
+static int wx_find_vlvf_entry(struct wx *wx, u32 vlan)
+{
+ int regindex;
+ u32 vlvf;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /* Search for the vlan id in the VLVF entries */
+ for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) {
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
+ vlvf = rd32(wx, WX_PSR_VLAN_SWC);
+ if ((vlvf & VLAN_VID_MASK) == vlan)
+ break;
+ }
+
+ /* Return a negative value if not found */
+ if (regindex >= WX_PSR_VLAN_SWC_ENTRIES)
+ regindex = -EINVAL;
+
+ return regindex;
+}
+
+static int wx_set_vf_macvlan(struct wx *wx,
+ u16 vf, int index, unsigned char *mac_addr)
+{
+ struct vf_macvlans *entry;
+ struct list_head *pos;
+ int retval = 0;
+
+ if (index <= 1) {
+ list_for_each(pos, &wx->vf_mvs.mvlist) {
+ entry = list_entry(pos, struct vf_macvlans, mvlist);
+ if (entry->vf == vf) {
+ entry->vf = -1;
+ entry->free = true;
+ entry->is_macvlan = false;
+ wx_del_mac_filter(wx, entry->vf_macvlan, vf);
+ }
+ }
+ }
+
+ if (!index)
+ return 0;
+
+ entry = NULL;
+ list_for_each(pos, &wx->vf_mvs.mvlist) {
+ entry = list_entry(pos, struct vf_macvlans, mvlist);
+ if (entry->free)
+ break;
+ }
+
+ if (!entry || !entry->free)
+ return -ENOSPC;
+
+ retval = wx_add_mac_filter(wx, mac_addr, vf);
+ if (retval >= 0) {
+ entry->free = false;
+ entry->is_macvlan = true;
+ entry->vf = vf;
+ memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
+ }
+
+ return retval;
+}
+
+static int wx_set_vf_vlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
+{
+ int add = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> WX_VT_MSGINFO_SHIFT;
+ int vid = (msgbuf[1] & WX_PSR_VLAN_SWC_VLANID_MASK);
+ int ret;
+
+ if (add)
+ wx->vfinfo[vf].vlan_count++;
+ else if (wx->vfinfo[vf].vlan_count)
+ wx->vfinfo[vf].vlan_count--;
+
+ /* in case of promiscuous mode any VLAN filter set for a VF must
+ * also have the PF pool added to it.
+ */
+ if (add && wx->netdev->flags & IFF_PROMISC)
+ wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
+
+ ret = wx_set_vf_vlan(wx, add, vid, vf);
+ if (!ret && wx->vfinfo[vf].spoofchk_enabled)
+ wx_set_vlan_anti_spoofing(wx, true, vf);
+
+ /* Go through all the checks to see if the VLAN filter should
+ * be wiped completely.
+ */
+ if (!add && wx->netdev->flags & IFF_PROMISC) {
+ u32 bits = 0, vlvf;
+ int reg_ndx;
+
+ reg_ndx = wx_find_vlvf_entry(wx, vid);
+ if (reg_ndx < 0)
+ return -ENOSPC;
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, reg_ndx);
+ vlvf = rd32(wx, WX_PSR_VLAN_SWC);
+ /* See if any other pools are set for this VLAN filter
+ * entry other than the PF.
+ */
+ if (VMDQ_P(0) < 32) {
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
+ bits &= ~BIT(VMDQ_P(0));
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
+ } else {
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
+ bits &= ~BIT(VMDQ_P(0) % 32);
+ bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
+ }
+ /* If the filter was removed then ensure PF pool bit
+ * is cleared if the PF only added itself to the pool
+ * because the PF is in promiscuous mode.
+ */
+ if ((vlvf & VLAN_VID_MASK) == vid && !bits)
+ wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
+ }
+
+ return 0;
+}
+
+static int wx_set_vf_macvlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
+{
+ int index = (msgbuf[0] & WX_VT_MSGINFO_MASK) >>
+ WX_VT_MSGINFO_SHIFT;
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ int err;
+
+ if (wx->vfinfo[vf].pf_set_mac && index > 0) {
+ wx_err(wx, "VF %d request MACVLAN filter but is denied\n", vf);
+ return -EINVAL;
+ }
+
+ /* An non-zero index indicates the VF is setting a filter */
+ if (index) {
+ if (!is_valid_ether_addr(new_mac)) {
+ wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
+ return -EINVAL;
+ }
+ /* If the VF is allowed to set MAC filters then turn off
+ * anti-spoofing to avoid false positives.
+ */
+ if (wx->vfinfo[vf].spoofchk_enabled)
+ wx_set_vf_spoofchk(wx->netdev, vf, false);
+ }
+
+ err = wx_set_vf_macvlan(wx, vf, index, new_mac);
+ if (err == -ENOSPC)
+ wx_err(wx,
+ "VF %d request MACVLAN filter but there is no space\n",
+ vf);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int wx_negotiate_vf_api(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ int api = msgbuf[1];
+
+ switch (api) {
+ case wx_mbox_api_13:
+ wx->vfinfo[vf].vf_api = api;
+ return 0;
+ default:
+ wx_err(wx, "VF %d requested invalid api version %u\n", vf, api);
+ return -EINVAL;
+ }
+}
+
+static int wx_get_vf_link_state(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ msgbuf[1] = wx->vfinfo[vf].link_enable;
+
+ return 0;
+}
+
+static int wx_get_fw_version(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ unsigned long fw_version = 0ULL;
+ int ret = 0;
+
+ ret = kstrtoul(wx->eeprom_id, 16, &fw_version);
+ if (ret)
+ return -EOPNOTSUPP;
+ msgbuf[1] = fw_version;
+
+ return 0;
+}
+
+static int wx_update_vf_xcast_mode(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ int xcast_mode = msgbuf[1];
+ u32 vmolr, disable, enable;
+
+ if (wx->vfinfo[vf].xcast_mode == xcast_mode)
+ return 0;
+
+ switch (xcast_mode) {
+ case WXVF_XCAST_MODE_NONE:
+ disable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
+ WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
+ WX_PSR_VM_L2CTL_VPE;
+ enable = 0;
+ break;
+ case WXVF_XCAST_MODE_MULTI:
+ disable = WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
+ WX_PSR_VM_L2CTL_VPE;
+ enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE;
+ break;
+ case WXVF_XCAST_MODE_ALLMULTI:
+ disable = WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
+ enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
+ WX_PSR_VM_L2CTL_MPE;
+ break;
+ case WXVF_XCAST_MODE_PROMISC:
+ disable = 0;
+ enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
+ WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
+ WX_PSR_VM_L2CTL_VPE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
+ vmolr &= ~disable;
+ vmolr |= enable;
+ wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
+
+ wx->vfinfo[vf].xcast_mode = xcast_mode;
+ msgbuf[1] = xcast_mode;
+
+ return 0;
+}
+
+static void wx_rcv_msg_from_vf(struct wx *wx, u16 vf)
+{
+ u16 mbx_size = WX_VXMAILBOX_SIZE;
+ u32 msgbuf[WX_VXMAILBOX_SIZE];
+ int retval;
+
+ retval = wx_read_mbx_pf(wx, msgbuf, mbx_size, vf);
+ if (retval) {
+ wx_err(wx, "Error receiving message from VF\n");
+ return;
+ }
+
+ /* this is a message we already processed, do nothing */
+ if (msgbuf[0] & (WX_VT_MSGTYPE_ACK | WX_VT_MSGTYPE_NACK))
+ return;
+
+ if (msgbuf[0] == WX_VF_RESET) {
+ wx_vf_reset_msg(wx, vf);
+ return;
+ }
+
+ /* until the vf completes a virtual function reset it should not be
+ * allowed to start any configuration.
+ */
+ if (!wx->vfinfo[vf].clear_to_send) {
+ msgbuf[0] |= WX_VT_MSGTYPE_NACK;
+ wx_write_mbx_pf(wx, msgbuf, 1, vf);
+ return;
+ }
+
+ switch ((msgbuf[0] & U16_MAX)) {
+ case WX_VF_SET_MAC_ADDR:
+ retval = wx_set_vf_mac_addr(wx, msgbuf, vf);
+ break;
+ case WX_VF_SET_MULTICAST:
+ wx_set_vf_multicasts(wx, msgbuf, vf);
+ retval = 0;
+ break;
+ case WX_VF_SET_VLAN:
+ retval = wx_set_vf_vlan_msg(wx, msgbuf, vf);
+ break;
+ case WX_VF_SET_LPE:
+ wx_set_vf_lpe(wx, msgbuf[1], vf);
+ retval = 0;
+ break;
+ case WX_VF_SET_MACVLAN:
+ retval = wx_set_vf_macvlan_msg(wx, msgbuf, vf);
+ break;
+ case WX_VF_API_NEGOTIATE:
+ retval = wx_negotiate_vf_api(wx, msgbuf, vf);
+ break;
+ case WX_VF_GET_QUEUES:
+ retval = wx_get_vf_queues(wx, msgbuf, vf);
+ break;
+ case WX_VF_GET_LINK_STATE:
+ retval = wx_get_vf_link_state(wx, msgbuf, vf);
+ break;
+ case WX_VF_GET_FW_VERSION:
+ retval = wx_get_fw_version(wx, msgbuf, vf);
+ break;
+ case WX_VF_UPDATE_XCAST_MODE:
+ retval = wx_update_vf_xcast_mode(wx, msgbuf, vf);
+ break;
+ case WX_VF_BACKUP:
+ break;
+ default:
+ wx_err(wx, "Unhandled Msg %8.8x\n", msgbuf[0]);
+ break;
+ }
+
+ /* notify the VF of the results of what it sent us */
+ if (retval)
+ msgbuf[0] |= WX_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= WX_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= WX_VT_MSGTYPE_CTS;
+
+ wx_write_mbx_pf(wx, msgbuf, mbx_size, vf);
+}
+
+static void wx_rcv_ack_from_vf(struct wx *wx, u16 vf)
+{
+ u32 msg = WX_VT_MSGTYPE_NACK;
+
+ /* if device isn't clear to send it shouldn't be reading either */
+ if (!wx->vfinfo[vf].clear_to_send)
+ wx_write_mbx_pf(wx, &msg, 1, vf);
+}
+
+void wx_msg_task(struct wx *wx)
+{
+ u16 vf;
+
+ for (vf = 0; vf < wx->num_vfs; vf++) {
+ /* process any reset requests */
+ if (!wx_check_for_rst_pf(wx, vf))
+ wx_vf_reset_event(wx, vf);
+
+ /* process any messages pending */
+ if (!wx_check_for_msg_pf(wx, vf))
+ wx_rcv_msg_from_vf(wx, vf);
+
+ /* process any acks */
+ if (!wx_check_for_ack_pf(wx, vf))
+ wx_rcv_ack_from_vf(wx, vf);
+ }
+}
+EXPORT_SYMBOL(wx_msg_task);
+
+void wx_disable_vf_rx_tx(struct wx *wx)
+{
+ wr32(wx, WX_TDM_VFTE_CLR(0), U32_MAX);
+ wr32(wx, WX_RDM_VFRE_CLR(0), U32_MAX);
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ wr32(wx, WX_TDM_VFTE_CLR(1), U32_MAX);
+ wr32(wx, WX_RDM_VFRE_CLR(1), U32_MAX);
+ }
+}
+EXPORT_SYMBOL(wx_disable_vf_rx_tx);
+
+void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up)
+{
+ u32 msgbuf[2] = {0, 0};
+ u16 i;
+
+ if (!wx->num_vfs)
+ return;
+ msgbuf[0] = WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG;
+ if (link_up)
+ msgbuf[1] = FIELD_PREP(GENMASK(31, 1), wx->speed) | link_up;
+ if (wx->notify_down)
+ msgbuf[1] |= WX_PF_NOFITY_VF_NET_NOT_RUNNING;
+ for (i = 0; i < wx->num_vfs; i++) {
+ if (wx->vfinfo[i].clear_to_send)
+ msgbuf[0] |= WX_VT_MSGTYPE_CTS;
+ wx_write_mbx_pf(wx, msgbuf, 2, i);
+ }
+}
+EXPORT_SYMBOL(wx_ping_all_vfs_with_link_status);
+
+static void wx_set_vf_link_state(struct wx *wx, int vf, int state)
+{
+ wx->vfinfo[vf].link_state = state;
+ switch (state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ if (netif_running(wx->netdev))
+ wx->vfinfo[vf].link_enable = true;
+ else
+ wx->vfinfo[vf].link_enable = false;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ wx->vfinfo[vf].link_enable = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ wx->vfinfo[vf].link_enable = false;
+ break;
+ }
+ /* restart the VF */
+ wx->vfinfo[vf].clear_to_send = false;
+ wx_ping_vf(wx, vf);
+
+ wx_set_vf_rx_tx(wx, vf);
+}
+
+void wx_set_all_vfs(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->num_vfs; i++)
+ wx_set_vf_link_state(wx, i, wx->vfinfo[i].link_state);
+}
+EXPORT_SYMBOL(wx_set_all_vfs);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
new file mode 100644
index 000000000000..8a3a47bb5815
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_SRIOV_H_
+#define _WX_SRIOV_H_
+
+#define WX_VF_ENABLE_CHECK(_m) FIELD_GET(BIT(31), (_m))
+#define WX_VF_NUM_GET(_m) FIELD_GET(GENMASK(5, 0), (_m))
+#define WX_VF_ENABLE BIT(31)
+
+void wx_disable_sriov(struct wx *wx);
+int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void wx_msg_task(struct wx *wx);
+void wx_disable_vf_rx_tx(struct wx *wx);
+void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up);
+void wx_set_all_vfs(struct wx *wx);
+
+#endif /* _WX_SRIOV_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 4c545b2aa997..7730c9fc3e02 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -20,8 +20,13 @@
/* MSI-X capability fields masks */
#define WX_PCIE_MSIX_TBL_SZ_MASK 0x7FF
#define WX_PCI_LINK_STATUS 0xB2
+#define WX_MAX_PF_MACVLANS 15
+#define WX_MAX_VF_MC_ENTRIES 30
/**************** Global Registers ****************************/
+#define WX_VF_REG_OFFSET(_v) FIELD_GET(GENMASK(15, 5), (_v))
+#define WX_VF_IND_SHIFT(_v) FIELD_GET(GENMASK(4, 0), (_v))
+
/* chip control Registers */
#define WX_MIS_PWR 0x10000
#define WX_MIS_RST 0x1000C
@@ -76,6 +81,9 @@
#define WX_MAC_LXONOFFRXC 0x11E0C
/*********************** Receive DMA registers **************************/
+#define WX_RDM_VF_RE(_i) (0x12004 + ((_i) * 4))
+#define WX_RDM_PF_QDE(_i) (0x12080 + ((_i) * 4))
+#define WX_RDM_VFRE_CLR(_i) (0x120A0 + ((_i) * 4))
#define WX_RDM_DRP_PKT 0x12500
#define WX_RDM_PKT_CNT 0x12504
#define WX_RDM_BYTE_CNT_LSB 0x12508
@@ -84,12 +92,17 @@
/************************* Port Registers ************************************/
/* port cfg Registers */
#define WX_CFG_PORT_CTL 0x14400
+#define WX_CFG_PORT_CTL_PFRSTD BIT(14)
#define WX_CFG_PORT_CTL_DRV_LOAD BIT(3)
#define WX_CFG_PORT_CTL_QINQ BIT(2)
#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/
#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4))
#define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */
+#define WX_CFG_PORT_CTL_NUM_VT_NONE 0
+#define WX_CFG_PORT_CTL_NUM_VT_8 FIELD_PREP(GENMASK(13, 12), 1)
+#define WX_CFG_PORT_CTL_NUM_VT_32 FIELD_PREP(GENMASK(13, 12), 2)
+#define WX_CFG_PORT_CTL_NUM_VT_64 FIELD_PREP(GENMASK(13, 12), 3)
/* GPIO Registers */
#define WX_GPIO_DR 0x14800
@@ -112,6 +125,11 @@
/*********************** Transmit DMA registers **************************/
/* transmit global control */
#define WX_TDM_CTL 0x18000
+#define WX_TDM_VF_TE(_i) (0x18004 + ((_i) * 4))
+#define WX_TDM_MAC_AS(_i) (0x18060 + ((_i) * 4))
+#define WX_TDM_VLAN_AS(_i) (0x18070 + ((_i) * 4))
+#define WX_TDM_VFTE_CLR(_i) (0x180A0 + ((_i) * 4))
+
/* TDM CTL BIT */
#define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */
#define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4))
@@ -165,6 +183,7 @@
/******************************* PSR Registers *******************************/
/* psr control */
#define WX_PSR_CTL 0x15000
+#define WX_PSR_VM_CTL 0x151B0
/* Header split receive */
#define WX_PSR_CTL_SW_EN BIT(18)
#define WX_PSR_CTL_RSC_ACK BIT(17)
@@ -201,12 +220,17 @@
#define WX_PSR_1588_CTL_VALID BIT(0)
/* mcasst/ucast overflow tbl */
#define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4))
+#define WX_PSR_MC_TBL_REG(_i) FIELD_GET(GENMASK(11, 5), (_i))
+#define WX_PSR_MC_TBL_BIT(_i) FIELD_GET(GENMASK(4, 0), (_i))
#define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4))
+#define WX_PSR_VM_CTL_REPLEN BIT(30) /* replication enabled */
+#define WX_PSR_VM_CTL_POOL_MASK GENMASK(12, 7)
/* VM L2 contorl */
#define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4))
#define WX_PSR_VM_L2CTL_UPE BIT(4) /* unicast promiscuous */
#define WX_PSR_VM_L2CTL_VACC BIT(6) /* accept nomatched vlan */
+#define WX_PSR_VM_L2CTL_VPE BIT(7) /* vlan promiscuous mode */
#define WX_PSR_VM_L2CTL_AUPE BIT(8) /* accept untagged packets */
#define WX_PSR_VM_L2CTL_ROMPE BIT(9) /* accept packets in MTA tbl */
#define WX_PSR_VM_L2CTL_ROPE BIT(10) /* accept packets in UC tbl */
@@ -245,10 +269,12 @@
#define WX_PSR_VLAN_SWC 0x16220
#define WX_PSR_VLAN_SWC_VM_L 0x16224
#define WX_PSR_VLAN_SWC_VM_H 0x16228
+#define WX_PSR_VLAN_SWC_VM(_i) (0x16224 + ((_i) * 4))
#define WX_PSR_VLAN_SWC_IDX 0x16230 /* 64 vlan entries */
/* VLAN pool filtering masks */
#define WX_PSR_VLAN_SWC_VIEN BIT(31) /* filter is valid */
#define WX_PSR_VLAN_SWC_ENTRIES 64
+#define WX_PSR_VLAN_SWC_VLANID_MASK GENMASK(11, 0)
/********************************* RSEC **************************************/
/* general rsec */
@@ -259,6 +285,13 @@
#define WX_RSC_ST 0x17004
#define WX_RSC_ST_RSEC_RDY BIT(0)
+/*********************** Transmit DMA registers **************************/
+/* transmit global control */
+#define WX_TDM_ETYPE_AS(_i) (0x18058 + ((_i) * 4))
+#define WX_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4))
+/* Per VF Port VLAN insertion rules */
+#define WX_TDM_VLAN_INS_VLANA_DEFAULT BIT(30) /* Always use default VLAN*/
+
/****************************** TDB ******************************************/
#define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4))
#define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
@@ -328,6 +361,9 @@
#define WX_MAC_WDG_TIMEOUT 0x1100C
#define WX_MAC_RX_FLOW_CTRL 0x11090
#define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */
+
+#define WX_MAC_WDG_TIMEOUT_WTO_MASK GENMASK(3, 0)
+#define WX_MAC_WDG_TIMEOUT_WTO_DELTA 2
/* MDIO Registers */
#define WX_MSCA 0x11200
#define WX_MSCA_RA(v) FIELD_PREP(U16_MAX, v)
@@ -417,6 +453,15 @@ enum WX_MSCA_CMD_value {
/* Number of 80 microseconds we wait for PCI Express master disable */
#define WX_PCI_MASTER_DISABLE_TIMEOUT 80000
+#define WX_RSS_64Q_MASK 0x3F
+#define WX_RSS_8Q_MASK 0x7
+#define WX_RSS_4Q_MASK 0x3
+#define WX_RSS_2Q_MASK 0x1
+#define WX_RSS_DISABLED_MASK 0x0
+
+#define WX_VMDQ_4Q_MASK 0x7C
+#define WX_VMDQ_2Q_MASK 0x7E
+
/****************** Manageablility Host Interface defines ********************/
#define WX_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */
#define WX_HI_COMMAND_TIMEOUT 1000 /* Process HI command limit */
@@ -484,7 +529,7 @@ enum WX_MSCA_CMD_value {
#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 128
#define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
-#define VMDQ_P(p) p
+#define VMDQ_P(p) ((p) + wx->ring_feature[RING_F_VMDQ].offset)
/* Supported Rx Buffer Sizes */
#define WX_RXBUFFER_256 256 /* Used for skb receive header */
@@ -778,6 +823,10 @@ struct wx_bus_info {
u16 device;
};
+struct wx_mbx_info {
+ u16 size;
+};
+
struct wx_thermal_sensor_data {
s16 temp;
s16 alarm_thresh;
@@ -789,13 +838,14 @@ enum wx_mac_type {
wx_mac_sp,
wx_mac_em,
wx_mac_aml,
+ wx_mac_aml40,
};
-enum sp_media_type {
- sp_media_unknown = 0,
- sp_media_fiber,
- sp_media_copper,
- sp_media_backplane
+enum wx_media_type {
+ wx_media_unknown = 0,
+ wx_media_fiber,
+ wx_media_copper,
+ wx_media_backplane
};
enum em_mac_type {
@@ -1051,6 +1101,7 @@ struct wx_ring_feature {
enum wx_ring_f_enum {
RING_F_NONE = 0,
+ RING_F_VMDQ,
RING_F_RSS,
RING_F_FDIR,
RING_F_ARRAY_SIZE /* must be last in enum set */
@@ -1103,11 +1154,43 @@ enum wx_state {
WX_STATE_SWFW_BUSY,
WX_STATE_PTP_RUNNING,
WX_STATE_PTP_TX_IN_PROGRESS,
+ WX_STATE_SERVICE_SCHED,
WX_STATE_NBITS /* must be last */
};
+struct vf_data_storage {
+ struct pci_dev *vfdev;
+ unsigned char vf_mac_addr[ETH_ALEN];
+ bool spoofchk_enabled;
+ bool link_enable;
+ bool trusted;
+ int xcast_mode;
+ unsigned int vf_api;
+ bool clear_to_send;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
+ bool pf_set_mac;
+
+ u16 vf_mc_hashes[WX_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
+ u16 vlan_count;
+ int link_state;
+};
+
+struct vf_macvlans {
+ struct list_head mvlist;
+ int vf;
+ bool free;
+ bool is_macvlan;
+ u8 vf_macvlan[ETH_ALEN];
+};
+
enum wx_pf_flags {
+ WX_FLAG_MULTI_64_FUNC,
WX_FLAG_SWFW_RING,
+ WX_FLAG_VMDQ_ENABLED,
+ WX_FLAG_VLAN_PROMISC,
+ WX_FLAG_SRIOV_ENABLED,
WX_FLAG_FDIR_CAPABLE,
WX_FLAG_FDIR_HASH,
WX_FLAG_FDIR_PERFECT,
@@ -1115,6 +1198,8 @@ enum wx_pf_flags {
WX_FLAG_RX_HWTSTAMP_ENABLED,
WX_FLAG_RX_HWTSTAMP_IN_REGISTER,
WX_FLAG_PTP_PPS_ENABLED,
+ WX_FLAG_NEED_LINK_CONFIG,
+ WX_FLAG_NEED_SFP_RESET,
WX_PF_FLAGS_NBITS /* must be last */
};
@@ -1128,9 +1213,10 @@ struct wx {
struct pci_dev *pdev;
struct net_device *netdev;
struct wx_bus_info bus;
+ struct wx_mbx_info mbx;
struct wx_mac_info mac;
enum em_mac_type mac_type;
- enum sp_media_type media_type;
+ enum wx_media_type media_type;
struct wx_eeprom_info eeprom;
struct wx_addr_filter_info addr_ctrl;
struct wx_fc_info fc;
@@ -1151,6 +1237,9 @@ struct wx {
u8 swfw_index;
/* PHY stuff */
+ bool notify_down;
+ int adv_speed;
+ int adv_duplex;
unsigned int link;
int speed;
int duplex;
@@ -1182,6 +1271,8 @@ struct wx {
struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp;
struct wx_ring *rx_ring[64];
struct wx_q_vector *q_vector[64];
+ int num_rx_pools;
+ int num_rx_queues_per_pool;
unsigned int queues_per_pool;
struct msix_entry *msix_q_entries;
@@ -1203,6 +1294,7 @@ struct wx {
u32 wol;
u16 bd_number;
+ bool default_up;
struct wx_hw_stats stats;
u64 tx_busy;
@@ -1211,10 +1303,16 @@ struct wx {
u64 hw_csum_rx_good;
u64 hw_csum_rx_error;
u64 alloc_rx_buff_failed;
+ unsigned int num_vfs;
+ struct vf_data_storage *vfinfo;
+ struct vf_macvlans vf_mvs;
+ struct vf_macvlans *mv_list;
+ unsigned long fwd_bitmask;
u32 atr_sample_rate;
void (*atr)(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype);
void (*configure_fdir)(struct wx *wx);
+ int (*setup_tc)(struct net_device *netdev, u8 tc);
void (*do_reset)(struct net_device *netdev);
int (*ptp_setup_sdp)(struct wx *wx);
@@ -1239,6 +1337,9 @@ struct wx {
struct ptp_clock_info ptp_caps;
struct kernel_hwtstamp_config tstamp_config;
struct sk_buff *ptp_tx_skb;
+
+ struct timer_list service_timer;
+ struct work_struct service_task;
};
#define WX_INTR_ALL (~0ULL)
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index 91b3055a5a9f..b5022c49dc5e 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -15,6 +15,8 @@
#include "../libwx/wx_hw.h"
#include "../libwx/wx_lib.h"
#include "../libwx/wx_ptp.h"
+#include "../libwx/wx_mbx.h"
+#include "../libwx/wx_sriov.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
#include "ngbe_hw.h"
@@ -129,6 +131,10 @@ static int ngbe_sw_init(struct wx *wx)
wx->tx_work_limit = NGBE_DEFAULT_TX_WORK;
wx->rx_work_limit = NGBE_DEFAULT_RX_WORK;
+ wx->mbx.size = WX_VXMAILBOX_SIZE;
+ wx->setup_tc = ngbe_setup_tc;
+ set_bit(0, &wx->fwd_bitmask);
+
return 0;
}
@@ -200,12 +206,10 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
+static irqreturn_t __ngbe_msix_misc(struct wx *wx, u32 eicr)
{
- struct wx *wx = data;
- u32 eicr;
-
- eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ if (eicr & NGBE_PX_MISC_IC_VF_MBOX)
+ wx_msg_task(wx);
if (unlikely(eicr & NGBE_PX_MISC_IC_TIMESYNC))
wx_ptp_check_pps_event(wx);
@@ -217,6 +221,35 @@ static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t ngbe_msix_misc(int __always_unused irq, void *data)
+{
+ struct wx *wx = data;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+
+ return __ngbe_msix_misc(wx, eicr);
+}
+
+static irqreturn_t ngbe_misc_and_queue(int __always_unused irq, void *data)
+{
+ struct wx_q_vector *q_vector;
+ struct wx *wx = data;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ if (!eicr) {
+ /* queue */
+ q_vector = wx->q_vector[0];
+ napi_schedule_irqoff(&q_vector->napi);
+ if (netif_running(wx->netdev))
+ ngbe_irq_enable(wx, true);
+ return IRQ_HANDLED;
+ }
+
+ return __ngbe_msix_misc(wx, eicr);
+}
+
/**
* ngbe_request_msix_irqs - Initialize MSI-X interrupts
* @wx: board private structure
@@ -249,8 +282,16 @@ static int ngbe_request_msix_irqs(struct wx *wx)
}
}
- err = request_irq(wx->msix_entry->vector,
- ngbe_msix_other, 0, netdev->name, wx);
+ /* Due to hardware design, when num_vfs < 7, pf can use 0 for misc and 1
+ * for queue. But when num_vfs == 7, vector[1] is assigned to vf6.
+ * Misc and queue should reuse interrupt vector[0].
+ */
+ if (wx->num_vfs == 7)
+ err = request_irq(wx->msix_entry->vector,
+ ngbe_misc_and_queue, 0, netdev->name, wx);
+ else
+ err = request_irq(wx->msix_entry->vector,
+ ngbe_msix_misc, 0, netdev->name, wx);
if (err) {
wx_err(wx, "request_irq for msix_other failed: %d\n", err);
@@ -302,6 +343,22 @@ static void ngbe_disable_device(struct wx *wx)
struct net_device *netdev = wx->netdev;
u32 i;
+ if (wx->num_vfs) {
+ /* Clear EITR Select mapping */
+ wr32(wx, WX_PX_ITRSEL, 0);
+
+ /* Mark all the VFs as inactive */
+ for (i = 0; i < wx->num_vfs; i++)
+ wx->vfinfo[i].clear_to_send = 0;
+ wx->notify_down = true;
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
+ wx->notify_down = false;
+
+ /* Disable all VFTE/VFRE TX/RX */
+ wx_disable_vf_rx_tx(wx);
+ }
+
/* disable all enabled rx queues */
for (i = 0; i < wx->num_rx_queues; i++)
/* this call also flushes the previous write */
@@ -324,12 +381,19 @@ static void ngbe_disable_device(struct wx *wx)
wx_update_stats(wx);
}
+static void ngbe_reset(struct wx *wx)
+{
+ wx_flush_sw_mac_table(wx);
+ wx_mac_set_default_filter(wx, wx->mac.addr);
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset(wx);
+}
+
void ngbe_down(struct wx *wx)
{
phylink_stop(wx->phylink);
ngbe_disable_device(wx);
- if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
- wx_ptp_reset(wx);
+ ngbe_reset(wx);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
}
@@ -352,6 +416,11 @@ void ngbe_up(struct wx *wx)
ngbe_sfp_modules_txrx_powerctl(wx, true);
phylink_start(wx->phylink);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_PFRSTD, WX_CFG_PORT_CTL_PFRSTD);
+ if (wx->num_vfs)
+ wx_ping_all_vfs_with_link_status(wx, false);
}
/**
@@ -518,6 +587,7 @@ static const struct net_device_ops ngbe_netdev_ops = {
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
.ndo_fix_features = wx_fix_features,
+ .ndo_features_check = wx_features_check,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
@@ -596,6 +666,10 @@ static int ngbe_probe(struct pci_dev *pdev,
goto err_pci_release_regions;
}
+ /* The emerald supports up to 8 VFs per pf, but physical
+ * function also need one pool for basic networking.
+ */
+ pci_sriov_set_totalvfs(pdev, NGBE_MAX_VFS_DRV_LIMIT);
wx->driver_name = ngbe_driver_name;
ngbe_set_ethtool_ops(netdev);
netdev->netdev_ops = &ngbe_netdev_ops;
@@ -744,6 +818,7 @@ static void ngbe_remove(struct pci_dev *pdev)
struct net_device *netdev;
netdev = wx->netdev;
+ wx_disable_sriov(wx);
unregister_netdev(netdev);
phylink_destroy(wx->phylink);
pci_release_selected_regions(pdev,
@@ -803,6 +878,7 @@ static struct pci_driver ngbe_driver = {
.suspend = ngbe_suspend,
.resume = ngbe_resume,
.shutdown = ngbe_shutdown,
+ .sriov_configure = wx_pci_sriov_configure,
};
module_pci_driver(ngbe_driver);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index ea1d7e9a91f3..c63bb6e6f405 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -9,6 +9,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_sriov.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
@@ -70,6 +71,8 @@ static void ngbe_mac_link_down(struct phylink_config *config,
wx->speed = SPEED_UNKNOWN;
if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
}
static void ngbe_mac_link_up(struct phylink_config *config,
@@ -114,6 +117,8 @@ static void ngbe_mac_link_up(struct phylink_config *config,
wx->last_rx_ptp_check = jiffies;
if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going up */
+ wx_ping_all_vfs_with_link_status(wx, true);
}
static const struct phylink_mac_ops ngbe_mac_ops = {
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index 992adbb98c7d..bb74263f0498 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -73,12 +73,14 @@
#define NGBE_PX_MISC_IEN_TIMESYNC BIT(11)
#define NGBE_PX_MISC_IEN_ETH_LK BIT(18)
#define NGBE_PX_MISC_IEN_INT_ERR BIT(20)
+#define NGBE_PX_MISC_IC_VF_MBOX BIT(23)
#define NGBE_PX_MISC_IEN_GPIO BIT(26)
#define NGBE_PX_MISC_IEN_MASK ( \
NGBE_PX_MISC_IEN_DEV_RST | \
NGBE_PX_MISC_IEN_TIMESYNC | \
NGBE_PX_MISC_IEN_ETH_LK | \
NGBE_PX_MISC_IEN_INT_ERR | \
+ NGBE_PX_MISC_IC_VF_MBOX | \
NGBE_PX_MISC_IEN_GPIO)
/* Extended Interrupt Cause Read */
@@ -134,6 +136,7 @@
#define NGBE_MAX_RXD 8192
#define NGBE_MIN_RXD 128
+#define NGBE_MAX_VFS_DRV_LIMIT 7
extern char ngbe_driver_name[];
void ngbe_down(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index f74576fe7062..c757fa95e58e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -11,4 +11,5 @@ txgbe-objs := txgbe_main.o \
txgbe_phy.o \
txgbe_irq.o \
txgbe_fdir.o \
- txgbe_ethtool.o
+ txgbe_ethtool.o \
+ txgbe_aml.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
new file mode 100644
index 000000000000..7dbcf41750c1
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/phylink.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
+#include "../libwx/wx_hw.h"
+#include "../libwx/wx_sriov.h"
+#include "txgbe_type.h"
+#include "txgbe_aml.h"
+#include "txgbe_hw.h"
+
+void txgbe_gpio_init_aml(struct wx *wx)
+{
+ u32 status;
+
+ wr32(wx, WX_GPIO_INTTYPE_LEVEL, TXGBE_GPIOBIT_2 | TXGBE_GPIOBIT_3);
+ wr32(wx, WX_GPIO_INTEN, TXGBE_GPIOBIT_2 | TXGBE_GPIOBIT_3);
+
+ status = rd32(wx, WX_GPIO_INTSTATUS);
+ for (int i = 0; i < 6; i++) {
+ if (status & BIT(i))
+ wr32(wx, WX_GPIO_EOI, BIT(i));
+ }
+}
+
+irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data)
+{
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ u32 status;
+
+ wr32(wx, WX_GPIO_INTMASK, 0xFF);
+ status = rd32(wx, WX_GPIO_INTSTATUS);
+ if (status & TXGBE_GPIOBIT_2) {
+ set_bit(WX_FLAG_NEED_SFP_RESET, wx->flags);
+ wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_2);
+ wx_service_event_schedule(wx);
+ }
+ if (status & TXGBE_GPIOBIT_3) {
+ set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
+ wx_service_event_schedule(wx);
+ wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_3);
+ }
+
+ wr32(wx, WX_GPIO_INTMASK, 0);
+ return IRQ_HANDLED;
+}
+
+int txgbe_test_hostif(struct wx *wx)
+{
+ struct txgbe_hic_ephy_getlink buffer;
+
+ if (wx->mac.type != wx_mac_aml)
+ return 0;
+
+ buffer.hdr.cmd = FW_PHY_GET_LINK_CMD;
+ buffer.hdr.buf_len = sizeof(struct txgbe_hic_ephy_getlink) -
+ sizeof(struct wx_hic_hdr);
+ buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+
+ return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
+ WX_HI_COMMAND_TIMEOUT, true);
+}
+
+static int txgbe_identify_sfp_hostif(struct wx *wx, struct txgbe_hic_i2c_read *buffer)
+{
+ buffer->hdr.cmd = FW_READ_SFP_INFO_CMD;
+ buffer->hdr.buf_len = sizeof(struct txgbe_hic_i2c_read) -
+ sizeof(struct wx_hic_hdr);
+ buffer->hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+
+ return wx_host_interface_command(wx, (u32 *)buffer,
+ sizeof(struct txgbe_hic_i2c_read),
+ WX_HI_COMMAND_TIMEOUT, true);
+}
+
+static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int duplex)
+{
+ struct txgbe_hic_ephy_setlink buffer;
+
+ buffer.hdr.cmd = FW_PHY_SET_LINK_CMD;
+ buffer.hdr.buf_len = sizeof(struct txgbe_hic_ephy_setlink) -
+ sizeof(struct wx_hic_hdr);
+ buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+
+ switch (speed) {
+ case SPEED_25000:
+ buffer.speed = TXGBE_LINK_SPEED_25GB_FULL;
+ break;
+ case SPEED_10000:
+ buffer.speed = TXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ }
+
+ buffer.fec_mode = TXGBE_PHY_FEC_AUTO;
+ buffer.autoneg = autoneg;
+ buffer.duplex = duplex;
+
+ return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
+ WX_HI_COMMAND_TIMEOUT, true);
+}
+
+static void txgbe_get_link_capabilities(struct wx *wx)
+{
+ struct txgbe *txgbe = wx->priv;
+
+ if (test_bit(PHY_INTERFACE_MODE_25GBASER, txgbe->sfp_interfaces))
+ wx->adv_speed = SPEED_25000;
+ else if (test_bit(PHY_INTERFACE_MODE_10GBASER, txgbe->sfp_interfaces))
+ wx->adv_speed = SPEED_10000;
+ else
+ wx->adv_speed = SPEED_UNKNOWN;
+
+ wx->adv_duplex = wx->adv_speed == SPEED_UNKNOWN ?
+ DUPLEX_HALF : DUPLEX_FULL;
+}
+
+static void txgbe_get_phy_link(struct wx *wx, int *speed)
+{
+ u32 status;
+
+ status = rd32(wx, TXGBE_CFG_PORT_ST);
+ if (!(status & TXGBE_CFG_PORT_ST_LINK_UP))
+ *speed = SPEED_UNKNOWN;
+ else if (status & TXGBE_CFG_PORT_ST_LINK_AML_25G)
+ *speed = SPEED_25000;
+ else if (status & TXGBE_CFG_PORT_ST_LINK_AML_10G)
+ *speed = SPEED_10000;
+ else
+ *speed = SPEED_UNKNOWN;
+}
+
+int txgbe_set_phy_link(struct wx *wx)
+{
+ int speed, err;
+ u32 gpio;
+
+ /* Check RX signal */
+ gpio = rd32(wx, WX_GPIO_EXT);
+ if (gpio & TXGBE_GPIOBIT_3)
+ return -ENODEV;
+
+ txgbe_get_link_capabilities(wx);
+ if (wx->adv_speed == SPEED_UNKNOWN)
+ return -ENODEV;
+
+ txgbe_get_phy_link(wx, &speed);
+ if (speed == wx->adv_speed)
+ return 0;
+
+ err = txgbe_set_phy_link_hostif(wx, wx->adv_speed, 0, wx->adv_duplex);
+ if (err) {
+ wx_err(wx, "Failed to setup link\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int txgbe_sfp_to_linkmodes(struct wx *wx, struct txgbe_sfp_id *id)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ struct txgbe *txgbe = wx->priv;
+
+ if (id->com_25g_code & (TXGBE_SFF_25GBASESR_CAPABLE |
+ TXGBE_SFF_25GBASEER_CAPABLE |
+ TXGBE_SFF_25GBASELR_CAPABLE)) {
+ phylink_set(modes, 25000baseSR_Full);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
+ }
+ if (id->com_10g_code & TXGBE_SFF_10GBASESR_CAPABLE) {
+ phylink_set(modes, 10000baseSR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (id->com_10g_code & TXGBE_SFF_10GBASELR_CAPABLE) {
+ phylink_set(modes, 10000baseLR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+
+ if (phy_interface_empty(interfaces)) {
+ wx_err(wx, "unsupported SFP module\n");
+ return -EINVAL;
+ }
+
+ phylink_set(modes, Pause);
+ phylink_set(modes, Asym_Pause);
+ phylink_set(modes, FIBRE);
+ txgbe->link_port = PORT_FIBRE;
+
+ if (!linkmode_equal(txgbe->sfp_support, modes)) {
+ linkmode_copy(txgbe->sfp_support, modes);
+ phy_interface_and(txgbe->sfp_interfaces,
+ wx->phylink_config.supported_interfaces,
+ interfaces);
+ linkmode_copy(txgbe->advertising, modes);
+
+ set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
+ }
+
+ return 0;
+}
+
+int txgbe_identify_sfp(struct wx *wx)
+{
+ struct txgbe_hic_i2c_read buffer;
+ struct txgbe_sfp_id *id;
+ int err = 0;
+ u32 gpio;
+
+ gpio = rd32(wx, WX_GPIO_EXT);
+ if (gpio & TXGBE_GPIOBIT_2)
+ return -ENODEV;
+
+ err = txgbe_identify_sfp_hostif(wx, &buffer);
+ if (err) {
+ wx_err(wx, "Failed to identify SFP module\n");
+ return err;
+ }
+
+ id = &buffer.id;
+ if (id->identifier != TXGBE_SFF_IDENTIFIER_SFP) {
+ wx_err(wx, "Invalid SFP module\n");
+ return -ENODEV;
+ }
+
+ err = txgbe_sfp_to_linkmodes(wx, id);
+ if (err)
+ return err;
+
+ if (gpio & TXGBE_GPIOBIT_3)
+ set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
+
+ return 0;
+}
+
+void txgbe_setup_link(struct wx *wx)
+{
+ struct txgbe *txgbe = wx->priv;
+
+ phy_interface_zero(txgbe->sfp_interfaces);
+ linkmode_zero(txgbe->sfp_support);
+
+ txgbe_identify_sfp(wx);
+}
+
+static void txgbe_get_link_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ struct wx *wx = phylink_to_wx(config);
+ int speed;
+
+ txgbe_get_phy_link(wx, &speed);
+ state->link = speed != SPEED_UNKNOWN;
+ state->speed = speed;
+ state->duplex = state->link ? DUPLEX_FULL : DUPLEX_UNKNOWN;
+}
+
+static void txgbe_reconfig_mac(struct wx *wx)
+{
+ u32 wdg, fc;
+
+ wdg = rd32(wx, WX_MAC_WDG_TIMEOUT);
+ fc = rd32(wx, WX_MAC_RX_FLOW_CTRL);
+
+ wr32(wx, WX_MIS_RST, TXGBE_MIS_RST_MAC_RST(wx->bus.func));
+ /* wait for MAC reset complete */
+ usleep_range(1000, 1500);
+
+ wr32m(wx, TXGBE_MAC_MISC_CTL, TXGBE_MAC_MISC_CTL_LINK_STS_MOD,
+ TXGBE_MAC_MISC_CTL_LINK_BOTH);
+ wx_reset_mac(wx);
+
+ wr32(wx, WX_MAC_WDG_TIMEOUT, wdg);
+ wr32(wx, WX_MAC_RX_FLOW_CTRL, fc);
+}
+
+static void txgbe_mac_link_up_aml(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct wx *wx = phylink_to_wx(config);
+ u32 txcfg;
+
+ wx_fc_enable(wx, tx_pause, rx_pause);
+
+ txgbe_reconfig_mac(wx);
+
+ txcfg = rd32(wx, TXGBE_AML_MAC_TX_CFG);
+ txcfg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
+
+ switch (speed) {
+ case SPEED_25000:
+ txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G;
+ break;
+ case SPEED_10000:
+ txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_10G;
+ break;
+ default:
+ break;
+ }
+
+ wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
+ wr32(wx, TXGBE_AML_MAC_TX_CFG, txcfg | TXGBE_AML_MAC_TX_CFG_TE);
+
+ wx->speed = speed;
+ wx->last_rx_ptp_check = jiffies;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going up */
+ wx_ping_all_vfs_with_link_status(wx, true);
+}
+
+static void txgbe_mac_link_down_aml(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct wx *wx = phylink_to_wx(config);
+
+ wr32m(wx, TXGBE_AML_MAC_TX_CFG, TXGBE_AML_MAC_TX_CFG_TE, 0);
+ wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0);
+
+ wx->speed = SPEED_UNKNOWN;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
+}
+
+static void txgbe_mac_config_aml(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static const struct phylink_mac_ops txgbe_mac_ops_aml = {
+ .mac_config = txgbe_mac_config_aml,
+ .mac_link_down = txgbe_mac_link_down_aml,
+ .mac_link_up = txgbe_mac_link_up_aml,
+};
+
+int txgbe_phylink_init_aml(struct txgbe *txgbe)
+{
+ struct phylink_link_state state;
+ struct phylink_config *config;
+ struct wx *wx = txgbe->wx;
+ phy_interface_t phy_mode;
+ struct phylink *phylink;
+ int err;
+
+ config = &wx->phylink_config;
+ config->dev = &wx->netdev->dev;
+ config->type = PHYLINK_NETDEV;
+ config->mac_capabilities = MAC_25000FD | MAC_10000FD |
+ MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+ config->get_fixed_state = txgbe_get_link_state;
+
+ phy_mode = PHY_INTERFACE_MODE_25GBASER;
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces);
+
+ phylink = phylink_create(config, NULL, phy_mode, &txgbe_mac_ops_aml);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ state.speed = SPEED_25000;
+ state.duplex = DUPLEX_FULL;
+ err = phylink_set_fixed_link(phylink, &state);
+ if (err) {
+ wx_err(wx, "Failed to set fixed link\n");
+ return err;
+ }
+
+ wx->phylink = phylink;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h
new file mode 100644
index 000000000000..25d4971ca0d9
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_AML_H_
+#define _TXGBE_AML_H_
+
+void txgbe_gpio_init_aml(struct wx *wx);
+irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data);
+int txgbe_test_hostif(struct wx *wx);
+int txgbe_set_phy_link(struct wx *wx);
+int txgbe_identify_sfp(struct wx *wx);
+void txgbe_setup_link(struct wx *wx);
+int txgbe_phylink_init_aml(struct txgbe *txgbe);
+
+#endif /* _TXGBE_AML_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index 78999d484f18..a4753402660e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -12,6 +12,31 @@
#include "txgbe_fdir.h"
#include "txgbe_ethtool.h"
+int txgbe_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct txgbe *txgbe = wx->priv;
+ int err;
+
+ if (wx->mac.type == wx_mac_aml40)
+ return -EOPNOTSUPP;
+
+ err = wx_get_link_ksettings(netdev, cmd);
+ if (err)
+ return err;
+
+ if (wx->mac.type == wx_mac_sp)
+ return 0;
+
+ cmd->base.port = txgbe->link_port;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ linkmode_copy(cmd->link_modes.supported, txgbe->sfp_support);
+ linkmode_copy(cmd->link_modes.advertising, txgbe->advertising);
+
+ return 0;
+}
+
static int txgbe_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -342,12 +367,19 @@ static int txgbe_add_ethtool_fdir_entry(struct txgbe *txgbe,
queue = TXGBE_RDB_FDIR_DROP_QUEUE;
} else {
u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
- if (ring >= wx->num_rx_queues)
+ if (!vf && ring >= wx->num_rx_queues)
+ return -EINVAL;
+ else if (vf && (vf > wx->num_vfs ||
+ ring >= wx->num_rx_queues_per_pool))
return -EINVAL;
/* Map the ring onto the absolute queue index */
- queue = wx->rx_ring[ring]->reg_idx;
+ if (!vf)
+ queue = wx->rx_ring[ring]->reg_idx;
+ else
+ queue = ((vf - 1) * wx->num_rx_queues_per_pool) + ring;
}
/* Don't allow indexes to exist outside of available space */
@@ -510,7 +542,7 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.get_drvinfo = wx_get_drvinfo,
.nway_reset = wx_nway_reset,
.get_link = ethtool_op_get_link,
- .get_link_ksettings = wx_get_link_ksettings,
+ .get_link_ksettings = txgbe_get_link_ksettings,
.set_link_ksettings = wx_set_link_ksettings,
.get_sset_count = wx_get_sset_count,
.get_strings = wx_get_strings,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h
index ace1b3571012..66dbc8ec1bb6 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h
@@ -4,6 +4,8 @@
#ifndef _TXGBE_ETHTOOL_H_
#define _TXGBE_ETHTOOL_H_
+int txgbe_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd);
void txgbe_set_ethtool_ops(struct net_device *netdev);
#endif /* _TXGBE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
index ef50efbaec0f..a84010828551 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
@@ -307,6 +307,7 @@ void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype)
int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask)
{
u32 fdirm = 0, fdirtcpm = 0, flex = 0;
+ int index, offset;
/* Program the relevant mask registers. If src/dst_port or src/dst_addr
* are zero, then assume a full mask for that field. Also assume that
@@ -352,15 +353,17 @@ int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask)
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
wr32(wx, TXGBE_RDB_FDIR_OTHER_MSK, fdirm);
- flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0));
- flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0;
+ index = VMDQ_P(0) / 4;
+ offset = VMDQ_P(0) % 4;
+ flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index));
+ flex &= ~(TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 << (offset * 8));
flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC |
- TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6));
+ TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)) << (offset * 8);
switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
case 0x0000:
/* Mask Flex Bytes */
- flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK;
+ flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK << (offset * 8);
break;
case 0xFFFF:
break;
@@ -368,7 +371,7 @@ int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask)
wx_err(wx, "Error on flexible byte mask\n");
return -EINVAL;
}
- wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex);
+ wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index), flex);
/* store the TCP/UDP port masks, bit reversed from port layout */
fdirtcpm = ntohs(input_mask->formatted.dst_port);
@@ -516,14 +519,16 @@ static void txgbe_fdir_enable(struct wx *wx, u32 fdirctrl)
static void txgbe_init_fdir_signature(struct wx *wx)
{
u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K;
+ int index = VMDQ_P(0) / 4;
+ int offset = VMDQ_P(0) % 4;
u32 flex = 0;
- flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0));
- flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0;
+ flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index));
+ flex &= ~(TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 << (offset * 8));
flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC |
- TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6));
- wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex);
+ TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)) << (offset * 8);
+ wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index), flex);
/* Continue setup of fdirctrl register bits:
* Move the flexible bytes to use the ethertype - shift 6 words
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index 4b9921b7bb11..e551ae0e2069 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -99,9 +99,15 @@ static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum)
}
local_buffer = eeprom_ptrs;
- for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++)
+ for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) {
+ if (wx->mac.type == wx_mac_aml) {
+ if (i >= TXGBE_EEPROM_I2C_SRART_PTR &&
+ i < TXGBE_EEPROM_I2C_END_PTR)
+ local_buffer[i] = 0xffff;
+ }
if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM)
*checksum += local_buffer[i];
+ }
kvfree(eeprom_ptrs);
@@ -182,7 +188,7 @@ int txgbe_reset_hw(struct wx *wx)
if (status != 0)
return status;
- if (wx->media_type != sp_media_copper) {
+ if (wx->media_type != wx_media_copper) {
u32 val;
val = WX_MIS_RST_LAN_RST(wx->bus.func);
@@ -212,7 +218,7 @@ int txgbe_reset_hw(struct wx *wx)
* clear the multicast table. Also reset num_rar_entries to 128,
* since we modify this value when programming the SAN MAC address.
*/
- wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
+ wx->mac.num_rar_entries = TXGBE_RAR_ENTRIES;
wx_init_rx_addrs(wx);
pci_set_master(wx->pdev);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
index 8658a51ee810..20b9a28bcb55 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -6,10 +6,13 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_sriov.h"
#include "txgbe_type.h"
#include "txgbe_phy.h"
#include "txgbe_irq.h"
+#include "txgbe_aml.h"
/**
* txgbe_irq_enable - Enable default interrupt generation settings
@@ -18,7 +21,14 @@
**/
void txgbe_irq_enable(struct wx *wx, bool queues)
{
- wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
+ u32 misc_ien = TXGBE_PX_MISC_IEN_MASK;
+
+ if (wx->mac.type == wx_mac_aml) {
+ misc_ien |= TXGBE_PX_MISC_GPIO;
+ txgbe_gpio_init_aml(wx);
+ }
+
+ wr32(wx, WX_PX_MISC_IEN, misc_ien);
/* unmask interrupt */
wx_intr_enable(wx, TXGBE_INTR_MISC);
@@ -80,6 +90,14 @@ static int txgbe_request_link_irq(struct txgbe *txgbe)
IRQF_ONESHOT, "txgbe-link-irq", txgbe);
}
+static int txgbe_request_gpio_irq(struct txgbe *txgbe)
+{
+ txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ return request_threaded_irq(txgbe->gpio_irq, NULL,
+ txgbe_gpio_irq_handler_aml,
+ IRQF_ONESHOT, "txgbe-gpio-irq", txgbe);
+}
+
static const struct irq_chip txgbe_irq_chip = {
.name = "txgbe-misc-irq",
};
@@ -109,8 +127,15 @@ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
struct wx *wx = txgbe->wx;
u32 eicr;
- if (wx->pdev->msix_enabled)
+ if (wx->pdev->msix_enabled) {
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ txgbe->eicr = eicr;
+ if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) {
+ wx_msg_task(txgbe->wx);
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
+ }
return IRQ_WAKE_THREAD;
+ }
eicr = wx_misc_isb(wx, WX_ISB_VEC0);
if (!eicr) {
@@ -129,6 +154,8 @@ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
q_vector = wx->q_vector[0];
napi_schedule_irqoff(&q_vector->napi);
+ txgbe->eicr = wx_misc_isb(wx, WX_ISB_MISC);
+
return IRQ_WAKE_THREAD;
}
@@ -140,13 +167,22 @@ static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data)
unsigned int sub_irq;
u32 eicr;
- eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ eicr = txgbe->eicr;
if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
TXGBE_PX_MISC_ETH_AN)) {
sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
handle_nested_irq(sub_irq);
nhandled++;
}
+ if (eicr & TXGBE_PX_MISC_GPIO) {
+ sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ handle_nested_irq(sub_irq);
+ nhandled++;
+ }
+ if (unlikely(eicr & TXGBE_PX_MISC_IC_TIMESYNC)) {
+ wx_ptp_check_pps_event(wx);
+ nhandled++;
+ }
wx_intr_enable(wx, TXGBE_INTR_MISC);
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
@@ -166,9 +202,12 @@ static void txgbe_del_irq_domain(struct txgbe *txgbe)
void txgbe_free_misc_irq(struct txgbe *txgbe)
{
- if (txgbe->wx->mac.type == wx_mac_aml)
+ if (txgbe->wx->mac.type == wx_mac_aml40)
return;
+ if (txgbe->wx->mac.type == wx_mac_aml)
+ free_irq(txgbe->gpio_irq, txgbe);
+
free_irq(txgbe->link_irq, txgbe);
free_irq(txgbe->misc.irq, txgbe);
txgbe_del_irq_domain(txgbe);
@@ -180,12 +219,12 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int hwirq, err;
- if (wx->mac.type == wx_mac_aml)
+ if (wx->mac.type == wx_mac_aml40)
goto skip_sp_irq;
- txgbe->misc.nirqs = 1;
- txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
- &txgbe_misc_irq_domain_ops, txgbe);
+ txgbe->misc.nirqs = TXGBE_IRQ_MAX;
+ txgbe->misc.domain = irq_domain_create_simple(NULL, txgbe->misc.nirqs, 0,
+ &txgbe_misc_irq_domain_ops, txgbe);
if (!txgbe->misc.domain)
return -ENOMEM;
@@ -212,11 +251,20 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
if (err)
goto free_msic_irq;
+ if (wx->mac.type == wx_mac_sp)
+ goto skip_sp_irq;
+
+ err = txgbe_request_gpio_irq(txgbe);
+ if (err)
+ goto free_link_irq;
+
skip_sp_irq:
wx->misc_irq_domain = true;
return 0;
+free_link_irq:
+ free_irq(txgbe->link_irq, txgbe);
free_msic_irq:
free_irq(txgbe->misc.irq, txgbe);
del_misc_irq:
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 38206a46693b..f3d2778b8e35 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -8,6 +8,7 @@
#include <linux/string.h>
#include <linux/etherdevice.h>
#include <linux/phylink.h>
+#include <net/udp_tunnel.h>
#include <net/ip.h>
#include <linux/if_vlan.h>
@@ -15,9 +16,12 @@
#include "../libwx/wx_lib.h"
#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_mbx.h"
+#include "../libwx/wx_sriov.h"
#include "txgbe_type.h"
#include "txgbe_hw.h"
#include "txgbe_phy.h"
+#include "txgbe_aml.h"
#include "txgbe_irq.h"
#include "txgbe_fdir.h"
#include "txgbe_ethtool.h"
@@ -85,9 +89,62 @@ static int txgbe_enumerate_functions(struct wx *wx)
return physfns;
}
+static void txgbe_sfp_detection_subtask(struct wx *wx)
+{
+ int err;
+
+ if (!test_bit(WX_FLAG_NEED_SFP_RESET, wx->flags))
+ return;
+
+ /* wait for SFP module ready */
+ msleep(200);
+
+ err = txgbe_identify_sfp(wx);
+ if (err)
+ return;
+
+ clear_bit(WX_FLAG_NEED_SFP_RESET, wx->flags);
+}
+
+static void txgbe_link_config_subtask(struct wx *wx)
+{
+ int err;
+
+ if (!test_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags))
+ return;
+
+ err = txgbe_set_phy_link(wx);
+ if (err)
+ return;
+
+ clear_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
+}
+
+/**
+ * txgbe_service_task - manages and runs subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void txgbe_service_task(struct work_struct *work)
+{
+ struct wx *wx = container_of(work, struct wx, service_task);
+
+ txgbe_sfp_detection_subtask(wx);
+ txgbe_link_config_subtask(wx);
+
+ wx_service_event_complete(wx);
+}
+
+static void txgbe_init_service(struct wx *wx)
+{
+ timer_setup(&wx->service_timer, wx_service_timer, 0);
+ INIT_WORK(&wx->service_task, txgbe_service_task);
+ clear_bit(WX_STATE_SERVICE_SCHED, wx->state);
+}
+
static void txgbe_up_complete(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
+ u32 reg;
wx_control_hw(wx, true);
wx_configure_vectors(wx);
@@ -96,17 +153,26 @@ static void txgbe_up_complete(struct wx *wx)
smp_mb__before_atomic();
wx_napi_enable_all(wx);
- if (wx->mac.type == wx_mac_aml) {
- u32 reg;
-
+ switch (wx->mac.type) {
+ case wx_mac_aml40:
reg = rd32(wx, TXGBE_AML_MAC_TX_CFG);
reg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
- reg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G;
+ reg |= TXGBE_AML_MAC_TX_CFG_SPEED_40G;
wr32(wx, WX_MAC_TX_CFG, reg);
txgbe_enable_sec_tx_path(wx);
netif_carrier_on(wx->netdev);
- } else {
+ break;
+ case wx_mac_aml:
+ /* Enable TX laser */
+ wr32m(wx, WX_GPIO_DR, TXGBE_GPIOBIT_1, 0);
+ txgbe_setup_link(wx);
+ phylink_start(wx->phylink);
+ break;
+ case wx_mac_sp:
phylink_start(wx->phylink);
+ break;
+ default:
+ break;
}
/* clear any pending interrupts, may auto mask */
@@ -117,6 +183,13 @@ static void txgbe_up_complete(struct wx *wx)
/* enable transmits */
netif_tx_start_all_queues(netdev);
+ mod_timer(&wx->service_timer, jiffies);
+
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_PFRSTD,
+ WX_CFG_PORT_CTL_PFRSTD);
+ /* update setting rx tx for all active vfs */
+ wx_set_all_vfs(wx);
}
static void txgbe_reset(struct wx *wx)
@@ -159,12 +232,24 @@ static void txgbe_disable_device(struct wx *wx)
wx_irq_disable(wx);
wx_napi_disable_all(wx);
+ timer_delete_sync(&wx->service_timer);
+
if (wx->bus.func < 2)
wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0);
else
wx_err(wx, "%s: invalid bus lan id %d\n",
__func__, wx->bus.func);
+ if (wx->num_vfs) {
+ /* Clear EITR Select mapping */
+ wr32(wx, WX_PX_ITRSEL, 0);
+ /* Mark all the VFs as inactive */
+ for (i = 0; i < wx->num_vfs; i++)
+ wx->vfinfo[i].clear_to_send = 0;
+ /* update setting rx tx for all active vfs */
+ wx_set_all_vfs(wx);
+ }
+
if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
/* disable mac transmiter */
@@ -188,10 +273,22 @@ void txgbe_down(struct wx *wx)
{
txgbe_disable_device(wx);
txgbe_reset(wx);
- if (wx->mac.type == wx_mac_aml)
+
+ switch (wx->mac.type) {
+ case wx_mac_aml40:
netif_carrier_off(wx->netdev);
- else
+ break;
+ case wx_mac_aml:
+ phylink_stop(wx->phylink);
+ /* Disable TX laser */
+ wr32m(wx, WX_GPIO_DR, TXGBE_GPIOBIT_1, TXGBE_GPIOBIT_1);
+ break;
+ case wx_mac_sp:
phylink_stop(wx->phylink);
+ break;
+ default:
+ break;
+ }
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
@@ -221,9 +318,11 @@ static void txgbe_init_type_code(struct wx *wx)
case TXGBE_DEV_ID_AML5110:
case TXGBE_DEV_ID_AML5025:
case TXGBE_DEV_ID_AML5125:
+ wx->mac.type = wx_mac_aml;
+ break;
case TXGBE_DEV_ID_AML5040:
case TXGBE_DEV_ID_AML5140:
- wx->mac.type = wx_mac_aml;
+ wx->mac.type = wx_mac_aml40;
break;
default:
wx->mac.type = wx_mac_unknown;
@@ -232,25 +331,25 @@ static void txgbe_init_type_code(struct wx *wx)
switch (device_type) {
case TXGBE_ID_SFP:
- wx->media_type = sp_media_fiber;
+ wx->media_type = wx_media_fiber;
break;
case TXGBE_ID_XAUI:
case TXGBE_ID_SGMII:
- wx->media_type = sp_media_copper;
+ wx->media_type = wx_media_copper;
break;
case TXGBE_ID_KR_KX_KX4:
case TXGBE_ID_MAC_XAUI:
case TXGBE_ID_MAC_SGMII:
- wx->media_type = sp_media_backplane;
+ wx->media_type = wx_media_backplane;
break;
case TXGBE_ID_SFI_XAUI:
if (wx->bus.func == 0)
- wx->media_type = sp_media_fiber;
+ wx->media_type = wx_media_fiber;
else
- wx->media_type = sp_media_copper;
+ wx->media_type = wx_media_copper;
break;
default:
- wx->media_type = sp_media_unknown;
+ wx->media_type = wx_media_unknown;
break;
}
}
@@ -264,13 +363,13 @@ static int txgbe_sw_init(struct wx *wx)
u16 msix_count = 0;
int err;
- wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
- wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES;
- wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES;
- wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE;
- wx->mac.vft_size = TXGBE_SP_VFT_TBL_SIZE;
- wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE;
- wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ;
+ wx->mac.num_rar_entries = TXGBE_RAR_ENTRIES;
+ wx->mac.max_tx_queues = TXGBE_MAX_TXQ;
+ wx->mac.max_rx_queues = TXGBE_MAX_RXQ;
+ wx->mac.mcft_size = TXGBE_MC_TBL_SIZE;
+ wx->mac.vft_size = TXGBE_VFT_TBL_SIZE;
+ wx->mac.rx_pb_size = TXGBE_RX_PB_SIZE;
+ wx->mac.tx_pb_size = TXGBE_TDB_PB_SZ;
/* PCI config space info */
err = wx_sw_init(wx);
@@ -299,6 +398,7 @@ static int txgbe_sw_init(struct wx *wx)
wx->configure_fdir = txgbe_configure_fdir;
set_bit(WX_FLAG_RSC_CAPABLE, wx->flags);
+ set_bit(WX_FLAG_MULTI_64_FUNC, wx->flags);
/* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1;
@@ -307,17 +407,21 @@ static int txgbe_sw_init(struct wx *wx)
/* set default ring sizes */
wx->tx_ring_count = TXGBE_DEFAULT_TXD;
wx->rx_ring_count = TXGBE_DEFAULT_RXD;
+ wx->mbx.size = WX_VXMAILBOX_SIZE;
/* set default work limits */
wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
+ wx->setup_tc = txgbe_setup_tc;
wx->do_reset = txgbe_do_reset;
+ set_bit(0, &wx->fwd_bitmask);
switch (wx->mac.type) {
case wx_mac_sp:
break;
case wx_mac_aml:
+ case wx_mac_aml40:
set_bit(WX_FLAG_SWFW_RING, wx->flags);
wx->swfw_index = 0;
break;
@@ -516,6 +620,39 @@ void txgbe_do_reset(struct net_device *netdev)
txgbe_reset(wx);
}
+static int txgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
+{
+ struct wx *wx = netdev_priv(dev);
+ struct udp_tunnel_info ti;
+
+ udp_tunnel_nic_get_port(dev, table, 0, &ti);
+ switch (ti.type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ wr32(wx, TXGBE_CFG_VXLAN, ntohs(ti.port));
+ break;
+ case UDP_TUNNEL_TYPE_VXLAN_GPE:
+ wr32(wx, TXGBE_CFG_VXLAN_GPE, ntohs(ti.port));
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ wr32(wx, TXGBE_CFG_GENEVE, ntohs(ti.port));
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct udp_tunnel_nic_info txgbe_udp_tunnels = {
+ .sync_table = txgbe_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+};
+
static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open,
.ndo_stop = txgbe_close,
@@ -524,6 +661,7 @@ static const struct net_device_ops txgbe_netdev_ops = {
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
.ndo_fix_features = wx_fix_features,
+ .ndo_features_check = wx_features_check,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
@@ -604,9 +742,14 @@ static int txgbe_probe(struct pci_dev *pdev,
goto err_pci_release_regions;
}
+ /* The sapphire supports up to 63 VFs per pf, but physical
+ * function also need one pool for basic networking.
+ */
+ pci_sriov_set_totalvfs(pdev, TXGBE_MAX_VFS_DRV_LIMIT);
wx->driver_name = txgbe_driver_name;
txgbe_set_ethtool_ops(netdev);
netdev->netdev_ops = &txgbe_netdev_ops;
+ netdev->udp_tunnel_nic_info = &txgbe_udp_tunnels;
/* setup the private structure */
err = txgbe_sw_init(wx);
@@ -652,6 +795,7 @@ static int txgbe_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_HIGHDMA;
netdev->hw_features |= NETIF_F_GRO;
netdev->features |= NETIF_F_GRO;
+ netdev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
@@ -673,9 +817,11 @@ static int txgbe_probe(struct pci_dev *pdev,
eth_hw_addr_set(netdev, wx->mac.perm_addr);
wx_mac_set_default_filter(wx, wx->mac.perm_addr);
+ txgbe_init_service(wx);
+
err = wx_init_interrupt_scheme(wx);
if (err)
- goto err_free_mac_table;
+ goto err_cancel_service;
/* Save off EEPROM version number and Option Rom version which
* together make a unique identify for the eeprom
@@ -718,6 +864,13 @@ static int txgbe_probe(struct pci_dev *pdev,
if (etrack_id < 0x20010)
dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n");
+ err = txgbe_test_hostif(wx);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Mismatched Firmware version\n");
+ err = -EIO;
+ goto err_release_hw;
+ }
+
txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL);
if (!txgbe) {
err = -ENOMEM;
@@ -768,6 +921,9 @@ err_free_misc_irq:
err_release_hw:
wx_clear_interrupt_scheme(wx);
wx_control_hw(wx, false);
+err_cancel_service:
+ timer_delete_sync(&wx->service_timer);
+ cancel_work_sync(&wx->service_task);
err_free_mac_table:
kfree(wx->rss_key);
kfree(wx->mac_table);
@@ -794,7 +950,10 @@ static void txgbe_remove(struct pci_dev *pdev)
struct txgbe *txgbe = wx->priv;
struct net_device *netdev;
+ cancel_work_sync(&wx->service_task);
+
netdev = wx->netdev;
+ wx_disable_sriov(wx);
unregister_netdev(netdev);
txgbe_remove_phy(txgbe);
@@ -817,11 +976,12 @@ static struct pci_driver txgbe_driver = {
.probe = txgbe_probe,
.remove = txgbe_remove,
.shutdown = txgbe_shutdown,
+ .sriov_configure = wx_pci_sriov_configure,
};
module_pci_driver(txgbe_driver);
MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl);
MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
-MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver");
+MODULE_DESCRIPTION("WangXun(R) 10/25/40 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 85f022ceef4f..03f1b9bc604d 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -16,8 +16,11 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
#include "../libwx/wx_ptp.h"
+#include "../libwx/wx_sriov.h"
+#include "../libwx/wx_mbx.h"
#include "../libwx/wx_hw.h"
#include "txgbe_type.h"
+#include "txgbe_aml.h"
#include "txgbe_phy.h"
#include "txgbe_hw.h"
@@ -163,7 +166,7 @@ static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *confi
struct wx *wx = phylink_to_wx(config);
struct txgbe *txgbe = wx->priv;
- if (wx->media_type != sp_media_copper)
+ if (wx->media_type != wx_media_copper)
return txgbe->pcs;
return NULL;
@@ -184,6 +187,8 @@ static void txgbe_mac_link_down(struct phylink_config *config,
wx->speed = SPEED_UNKNOWN;
if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
}
static void txgbe_mac_link_up(struct phylink_config *config,
@@ -225,6 +230,8 @@ static void txgbe_mac_link_up(struct phylink_config *config,
wx->last_rx_ptp_check = jiffies;
if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going up */
+ wx_ping_all_vfs_with_link_status(wx, true);
}
static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
@@ -272,7 +279,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD |
MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
- if (wx->media_type == sp_media_copper) {
+ if (wx->media_type == wx_media_copper) {
phy_mode = PHY_INTERFACE_MODE_XAUI;
__set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces);
} else {
@@ -312,7 +319,10 @@ irqreturn_t txgbe_link_irq_handler(int irq, void *data)
status = rd32(wx, TXGBE_CFG_PORT_ST);
up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP);
- phylink_pcs_change(txgbe->pcs, up);
+ if (txgbe->pcs)
+ phylink_pcs_change(txgbe->pcs, up);
+ else
+ phylink_mac_change(wx->phylink, up);
return IRQ_HANDLED;
}
@@ -567,11 +577,18 @@ int txgbe_init_phy(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int ret;
- if (wx->mac.type == wx_mac_aml)
+ switch (wx->mac.type) {
+ case wx_mac_aml40:
return 0;
-
- if (txgbe->wx->media_type == sp_media_copper)
- return txgbe_ext_phy_init(txgbe);
+ case wx_mac_aml:
+ return txgbe_phylink_init_aml(txgbe);
+ case wx_mac_sp:
+ if (wx->media_type == wx_media_copper)
+ return txgbe_ext_phy_init(txgbe);
+ break;
+ default:
+ break;
+ }
ret = txgbe_swnodes_register(txgbe);
if (ret) {
@@ -634,13 +651,21 @@ err_unregister_swnode:
void txgbe_remove_phy(struct txgbe *txgbe)
{
- if (txgbe->wx->mac.type == wx_mac_aml)
+ switch (txgbe->wx->mac.type) {
+ case wx_mac_aml40:
return;
-
- if (txgbe->wx->media_type == sp_media_copper) {
- phylink_disconnect_phy(txgbe->wx->phylink);
+ case wx_mac_aml:
phylink_destroy(txgbe->wx->phylink);
return;
+ case wx_mac_sp:
+ if (txgbe->wx->media_type == wx_media_copper) {
+ phylink_disconnect_phy(txgbe->wx->phylink);
+ phylink_destroy(txgbe->wx->phylink);
+ return;
+ }
+ break;
+ default:
+ break;
}
platform_device_unregister(txgbe->sfp_dev);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
index 3938985355ed..a32b19d71ea2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
@@ -8,4 +8,4 @@ irqreturn_t txgbe_link_irq_handler(int irq, void *data);
int txgbe_init_phy(struct txgbe *txgbe);
void txgbe_remove_phy(struct txgbe *txgbe);
-#endif /* _TXGBE_NODE_H_ */
+#endif /* _TXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 9c1c26234cad..42ec815159e8 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -6,6 +6,8 @@
#include <linux/property.h>
#include <linux/irq.h>
+#include <linux/phy.h>
+#include "../libwx/wx_type.h"
/* Device IDs */
#define TXGBE_DEV_ID_SP1000 0x1001
@@ -50,6 +52,8 @@
/**************** SP Registers ****************************/
/* chip control Registers */
+#define TXGBE_MIS_RST 0x1000C
+#define TXGBE_MIS_RST_MAC_RST(_i) BIT(20 - (_i) * 3)
#define TXGBE_MIS_PRB_CTL 0x10010
#define TXGBE_MIS_PRB_CTL_LAN_UP(_i) BIT(1 - (_i))
/* FMGR Registers */
@@ -62,6 +66,11 @@
#define TXGBE_TS_CTL 0x10300
#define TXGBE_TS_CTL_EVAL_MD BIT(31)
+/* MAC Misc Registers */
+#define TXGBE_MAC_MISC_CTL 0x11F00
+#define TXGBE_MAC_MISC_CTL_LINK_STS_MOD BIT(0)
+#define TXGBE_MAC_MISC_CTL_LINK_PCS FIELD_PREP(BIT(0), 0)
+#define TXGBE_MAC_MISC_CTL_LINK_BOTH FIELD_PREP(BIT(0), 1)
/* GPIO register bit */
#define TXGBE_GPIOBIT_0 BIT(0) /* I:tx fault */
#define TXGBE_GPIOBIT_1 BIT(1) /* O:tx disabled */
@@ -73,19 +82,27 @@
/* Extended Interrupt Enable Set */
#define TXGBE_PX_MISC_ETH_LKDN BIT(8)
#define TXGBE_PX_MISC_DEV_RST BIT(10)
+#define TXGBE_PX_MISC_IC_TIMESYNC BIT(11)
#define TXGBE_PX_MISC_ETH_EVENT BIT(17)
#define TXGBE_PX_MISC_ETH_LK BIT(18)
#define TXGBE_PX_MISC_ETH_AN BIT(19)
#define TXGBE_PX_MISC_INT_ERR BIT(20)
+#define TXGBE_PX_MISC_IC_VF_MBOX BIT(23)
#define TXGBE_PX_MISC_GPIO BIT(26)
#define TXGBE_PX_MISC_IEN_MASK \
(TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_DEV_RST | \
TXGBE_PX_MISC_ETH_EVENT | TXGBE_PX_MISC_ETH_LK | \
- TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR)
+ TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR | \
+ TXGBE_PX_MISC_IC_VF_MBOX | TXGBE_PX_MISC_IC_TIMESYNC)
/* Port cfg registers */
#define TXGBE_CFG_PORT_ST 0x14404
#define TXGBE_CFG_PORT_ST_LINK_UP BIT(0)
+#define TXGBE_CFG_PORT_ST_LINK_AML_25G BIT(3)
+#define TXGBE_CFG_PORT_ST_LINK_AML_10G BIT(4)
+#define TXGBE_CFG_VXLAN 0x14410
+#define TXGBE_CFG_VXLAN_GPE 0x14414
+#define TXGBE_CFG_GENEVE 0x14418
/* I2C registers */
#define TXGBE_I2C_BASE 0x14900
@@ -146,8 +163,11 @@
/*************************** Amber Lite Registers ****************************/
#define TXGBE_PX_PF_BME 0x4B8
#define TXGBE_AML_MAC_TX_CFG 0x11000
+#define TXGBE_AML_MAC_TX_CFG_TE BIT(0)
#define TXGBE_AML_MAC_TX_CFG_SPEED_MASK GENMASK(30, 27)
-#define TXGBE_AML_MAC_TX_CFG_SPEED_25G BIT(28)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_40G FIELD_PREP(GENMASK(30, 27), 0)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_25G FIELD_PREP(GENMASK(30, 27), 2)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_10G FIELD_PREP(GENMASK(30, 27), 8)
#define TXGBE_RDM_RSC_CTL 0x1200C
#define TXGBE_RDM_RSC_CTL_FREE_CTL BIT(7)
@@ -158,6 +178,8 @@
#define TXGBE_EEPROM_VERSION_L 0x1D
#define TXGBE_EEPROM_VERSION_H 0x1E
#define TXGBE_ISCSI_BOOT_CONFIG 0x07
+#define TXGBE_EEPROM_I2C_SRART_PTR 0x580
+#define TXGBE_EEPROM_I2C_END_PTR 0x800
#define TXGBE_MAX_MSIX_VECTORS 64
#define TXGBE_MAX_FDIR_INDICES 63
@@ -166,13 +188,15 @@
#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
-#define TXGBE_SP_MAX_TX_QUEUES 128
-#define TXGBE_SP_MAX_RX_QUEUES 128
-#define TXGBE_SP_RAR_ENTRIES 128
-#define TXGBE_SP_MC_TBL_SIZE 128
-#define TXGBE_SP_VFT_TBL_SIZE 128
-#define TXGBE_SP_RX_PB_SIZE 512
-#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
+#define TXGBE_MAX_TXQ 128
+#define TXGBE_MAX_RXQ 128
+#define TXGBE_RAR_ENTRIES 128
+#define TXGBE_MC_TBL_SIZE 128
+#define TXGBE_VFT_TBL_SIZE 128
+#define TXGBE_RX_PB_SIZE 512
+#define TXGBE_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
+
+#define TXGBE_MAX_VFS_DRV_LIMIT 63
#define TXGBE_DEFAULT_ATR_SAMPLE_RATE 20
@@ -263,7 +287,7 @@ struct txgbe_fdir_filter {
struct hlist_node fdir_node;
union txgbe_atr_input filter;
u16 sw_idx;
- u16 action;
+ u64 action;
};
/* TX/RX descriptor defines */
@@ -290,6 +314,72 @@ void txgbe_up(struct wx *wx);
int txgbe_setup_tc(struct net_device *dev, u8 tc);
void txgbe_do_reset(struct net_device *netdev);
+#define TXGBE_LINK_SPEED_10GB_FULL 4
+#define TXGBE_LINK_SPEED_25GB_FULL 0x10
+
+#define TXGBE_SFF_IDENTIFIER_SFP 0x3
+#define TXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define TXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMIT 0x4
+#define TXGBE_SFF_FCPI4_LIMITING 0x3
+#define TXGBE_SFF_10GBASESR_CAPABLE 0x10
+#define TXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define TXGBE_SFF_25GBASESR_CAPABLE 0x2
+#define TXGBE_SFF_25GBASELR_CAPABLE 0x3
+#define TXGBE_SFF_25GBASEER_CAPABLE 0x4
+#define TXGBE_SFF_25GBASECR_91FEC 0xB
+#define TXGBE_SFF_25GBASECR_74FEC 0xC
+#define TXGBE_SFF_25GBASECR_NOFEC 0xD
+
+#define TXGBE_PHY_FEC_RS BIT(0)
+#define TXGBE_PHY_FEC_BASER BIT(1)
+#define TXGBE_PHY_FEC_OFF BIT(2)
+#define TXGBE_PHY_FEC_AUTO (TXGBE_PHY_FEC_OFF | \
+ TXGBE_PHY_FEC_BASER |\
+ TXGBE_PHY_FEC_RS)
+
+#define FW_PHY_GET_LINK_CMD 0xC0
+#define FW_PHY_SET_LINK_CMD 0xC1
+#define FW_READ_SFP_INFO_CMD 0xC5
+
+struct txgbe_sfp_id {
+ u8 identifier; /* A0H 0x00 */
+ u8 com_1g_code; /* A0H 0x06 */
+ u8 com_10g_code; /* A0H 0x03 */
+ u8 com_25g_code; /* A0H 0x24 */
+ u8 cable_spec; /* A0H 0x3C */
+ u8 cable_tech; /* A0H 0x08 */
+ u8 vendor_oui0; /* A0H 0x25 */
+ u8 vendor_oui1; /* A0H 0x26 */
+ u8 vendor_oui2; /* A0H 0x27 */
+ u8 reserved[3];
+};
+
+struct txgbe_hic_i2c_read {
+ struct wx_hic_hdr hdr;
+ struct txgbe_sfp_id id;
+};
+
+struct txgbe_hic_ephy_setlink {
+ struct wx_hic_hdr hdr;
+ u8 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 fec_mode;
+ u8 resv[4];
+};
+
+struct txgbe_hic_ephy_getlink {
+ struct wx_hic_hdr hdr;
+ u8 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 flow_ctl;
+ u8 power;
+ u8 fec_mode;
+ u8 resv[6];
+};
+
#define NODE_PROP(_NAME, _PROP) \
(const struct software_node) { \
.name = _NAME, \
@@ -327,6 +417,7 @@ struct txgbe_nodes {
enum txgbe_misc_irqs {
TXGBE_IRQ_LINK = 0,
+ TXGBE_IRQ_GPIO,
TXGBE_IRQ_MAX
};
@@ -348,12 +439,19 @@ struct txgbe {
struct clk *clk;
struct gpio_chip *gpio;
unsigned int link_irq;
+ unsigned int gpio_irq;
+ u32 eicr;
/* flow director */
struct hlist_head fdir_filter_list;
union txgbe_atr_input fdir_mask;
int fdir_filter_count;
spinlock_t fdir_perfect_lock; /* spinlock for FDIR */
+
+ DECLARE_PHY_INTERFACE_MASK(sfp_interfaces);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ u8 link_port;
};
#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 054abf283ab3..6011d7eae0c7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -880,7 +880,7 @@ static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
dev_consume_skb_any(skbuf_dma->skb);
netif_txq_completed_wake(txq, 1, len,
CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
- 2 * MAX_SKB_FRAGS);
+ 2);
}
/**
@@ -914,7 +914,7 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
dma_dev = lp->tx_chan->device;
sg_len = skb_shinfo(skb)->nr_frags + 1;
- if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
+ if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) {
netif_stop_queue(ndev);
if (net_ratelimit())
netdev_warn(ndev, "TX ring unexpectedly full\n");
@@ -964,7 +964,7 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
txq = skb_get_tx_queue(lp->ndev, skb);
netdev_tx_sent_queue(txq, skb->len);
netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
- MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
+ 1, 2);
dmaengine_submit(dma_tx_desc);
dma_async_issue_pending(lp->tx_chan);
@@ -2980,7 +2980,7 @@ static int axienet_probe(struct platform_device *pdev)
}
}
if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
- dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
+ dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n");
ret = -EINVAL;
goto cleanup_clk;
}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index a2ab1c150822..e1e7f65553e7 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -394,16 +394,20 @@ static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
__raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
}
-static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+static int ixp4xx_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config cfg;
struct ixp46x_ts_regs *regs;
struct port *port = netdev_priv(netdev);
int ret;
int ch;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
+ if (!cpu_is_ixp46x())
+ return -EOPNOTSUPP;
+
+ if (!netif_running(netdev))
+ return -EINVAL;
ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index);
if (ret)
@@ -412,10 +416,10 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
ch = PORT2CHANNEL(port);
regs = port->timesync_regs;
- if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+ if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
port->hwts_rx_en = 0;
break;
@@ -431,39 +435,45 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
+ port->hwts_tx_en = cfg->tx_type == HWTSTAMP_TX_ON;
/* Clear out any old time stamps. */
__raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
&regs->channel[ch].ch_event);
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
-static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+static int ixp4xx_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg)
{
- struct hwtstamp_config cfg;
struct port *port = netdev_priv(netdev);
- cfg.flags = 0;
- cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ if (!cpu_is_ixp46x())
+ return -EOPNOTSUPP;
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ cfg->flags = 0;
+ cfg->tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
switch (port->hwts_rx_en) {
case 0:
- cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ cfg->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case PTP_SLAVE_MODE:
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
break;
case PTP_MASTER_MODE:
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
break;
default:
WARN_ON_ONCE(1);
return -ERANGE;
}
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
@@ -985,21 +995,6 @@ static void eth_set_mcast_list(struct net_device *dev)
}
-static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (cpu_is_ixp46x()) {
- if (cmd == SIOCSHWTSTAMP)
- return hwtstamp_set(dev, req);
- if (cmd == SIOCGHWTSTAMP)
- return hwtstamp_get(dev, req);
- }
-
- return phy_mii_ioctl(dev->phydev, req, cmd);
-}
-
/* ethtool support */
static void ixp4xx_get_drvinfo(struct net_device *dev,
@@ -1433,9 +1428,11 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_change_mtu = ixp4xx_eth_change_mtu,
.ndo_start_xmit = eth_xmit,
.ndo_set_rx_mode = eth_set_mcast_list,
- .ndo_eth_ioctl = eth_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_hwtstamp_get = ixp4xx_hwtstamp_get,
+ .ndo_hwtstamp_set = ixp4xx_hwtstamp_set,
};
static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
index 54b7f24f3810..2098c3068d34 100644
--- a/drivers/net/fddi/defza.c
+++ b/drivers/net/fddi/defza.c
@@ -1044,7 +1044,7 @@ static irqreturn_t fza_interrupt(int irq, void *dev_id)
static void fza_reset_timer(struct timer_list *t)
{
- struct fza_private *fp = from_timer(fp, t, reset_timer);
+ struct fza_private *fp = timer_container_of(fp, t, reset_timer);
if (!fp->timer_state) {
pr_err("%s: RESET timed out!\n", fp->name);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 66e38ce9cd1d..ffc15a432689 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1946,22 +1946,14 @@ static __net_init int geneve_init_net(struct net *net)
return 0;
}
-static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
+static void __net_exit geneve_exit_rtnl_net(struct net *net,
+ struct list_head *dev_to_kill)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *geneve, *next;
list_for_each_entry_safe(geneve, next, &gn->geneve_list, next)
- geneve_dellink(geneve->dev, head);
-}
-
-static void __net_exit geneve_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_to_kill)
-{
- struct net *net;
-
- list_for_each_entry(net, net_list, exit_list)
- geneve_destroy_tunnels(net, dev_to_kill);
+ geneve_dellink(geneve->dev, dev_to_kill);
}
static void __net_exit geneve_exit_net(struct net *net)
@@ -1973,7 +1965,7 @@ static void __net_exit geneve_exit_net(struct net *net)
static struct pernet_operations geneve_net_ops = {
.init = geneve_init_net,
- .exit_batch_rtnl = geneve_exit_batch_rtnl,
+ .exit_rtnl = geneve_exit_rtnl_net,
.exit = geneve_exit_net,
.id = &geneve_net_id,
.size = sizeof(struct geneve_net),
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index ef793607890d..d4dec741c7f4 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -2475,23 +2475,19 @@ static int __net_init gtp_net_init(struct net *net)
return 0;
}
-static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_to_kill)
+static void __net_exit gtp_net_exit_rtnl(struct net *net,
+ struct list_head *dev_to_kill)
{
- struct net *net;
-
- list_for_each_entry(net, net_list, exit_list) {
- struct gtp_net *gn = net_generic(net, gtp_net_id);
- struct gtp_dev *gtp, *gtp_next;
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+ struct gtp_dev *gtp, *gtp_next;
- list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
- gtp_dellink(gtp->dev, dev_to_kill);
- }
+ list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
+ gtp_dellink(gtp->dev, dev_to_kill);
}
static struct pernet_operations gtp_net_ops = {
.init = gtp_net_init,
- .exit_batch_rtnl = gtp_net_exit_batch_rtnl,
+ .exit_rtnl = gtp_net_exit_rtnl,
.id = &gtp_net_id,
.size = sizeof(struct gtp_net),
};
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index b33d84ed5bbf..c5e5423e1863 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -133,7 +133,7 @@ static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
static void sp_xmit_on_air(struct timer_list *t)
{
- struct sixpack *sp = from_timer(sp, t, tx_t);
+ struct sixpack *sp = timer_container_of(sp, t, tx_t);
int actual, when = sp->slottime;
static unsigned char random;
@@ -491,7 +491,7 @@ static inline void tnc_set_sync_state(struct sixpack *sp, int new_tnc_state)
static void resync_tnc(struct timer_list *t)
{
- struct sixpack *sp = from_timer(sp, t, resync_t);
+ struct sixpack *sp = timer_container_of(sp, t, resync_t);
static char resync_cmd = 0xe8;
/* clear any data that might have been received */
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 9e366f275406..5fda7a0fcce0 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -1074,7 +1074,7 @@ static int baycom_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
return 0;
case HDLCDRVCTL_DRIVERNAME:
- strscpy_pad(hi.data.drivername, "baycom_epp", sizeof(hi.data.drivername));
+ strscpy_pad(hi.data.drivername, "baycom_epp");
break;
case HDLCDRVCTL_GETMODE:
@@ -1091,8 +1091,7 @@ static int baycom_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
return baycom_setmode(bc, hi.data.modename);
case HDLCDRVCTL_MODELIST:
- strscpy_pad(hi.data.modename, "intclk,extclk,intmodem,extmodem,divider=x",
- sizeof(hi.data.modename));
+ strscpy_pad(hi.data.modename, "intclk,extclk,intmodem,extmodem,divider=x");
break;
case HDLCDRVCTL_MODEMPARMASK:
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index f88721dec681..ae5048efde68 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1127,7 +1127,7 @@ static inline int is_grouped(struct scc_channel *scc)
static void t_dwait(struct timer_list *t)
{
- struct scc_channel *scc = from_timer(scc, t, tx_t);
+ struct scc_channel *scc = timer_container_of(scc, t, tx_t);
if (scc->stat.tx_state == TXS_WAIT) /* maxkeyup or idle timeout */
{
@@ -1169,7 +1169,7 @@ static void t_dwait(struct timer_list *t)
static void t_txdelay(struct timer_list *t)
{
- struct scc_channel *scc = from_timer(scc, t, tx_t);
+ struct scc_channel *scc = timer_container_of(scc, t, tx_t);
scc_start_maxkeyup(scc);
@@ -1190,7 +1190,7 @@ static void t_txdelay(struct timer_list *t)
static void t_tail(struct timer_list *t)
{
- struct scc_channel *scc = from_timer(scc, t, tx_t);
+ struct scc_channel *scc = timer_container_of(scc, t, tx_t);
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
@@ -1217,7 +1217,7 @@ static void t_tail(struct timer_list *t)
static void t_busy(struct timer_list *t)
{
- struct scc_channel *scc = from_timer(scc, t, tx_wdog);
+ struct scc_channel *scc = timer_container_of(scc, t, tx_wdog);
timer_delete(&scc->tx_t);
netif_stop_queue(scc->dev); /* don't pile on the wabbit! */
@@ -1236,7 +1236,7 @@ static void t_busy(struct timer_list *t)
static void t_maxkeyup(struct timer_list *t)
{
- struct scc_channel *scc = from_timer(scc, t, tx_wdog);
+ struct scc_channel *scc = timer_container_of(scc, t, tx_wdog);
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
@@ -1270,7 +1270,7 @@ static void t_maxkeyup(struct timer_list *t)
static void t_idle(struct timer_list *t)
{
- struct scc_channel *scc = from_timer(scc, t, tx_t);
+ struct scc_channel *scc = timer_container_of(scc, t, tx_t);
timer_delete(&scc->tx_wdog);
@@ -1403,7 +1403,7 @@ static unsigned long scc_get_param(struct scc_channel *scc, unsigned int cmd)
static void scc_stop_calibrate(struct timer_list *t)
{
- struct scc_channel *scc = from_timer(scc, t, tx_wdog);
+ struct scc_channel *scc = timer_container_of(scc, t, tx_wdog);
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 6342c319c0e4..7b7e7a47a75e 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1154,7 +1154,7 @@ static inline void rr_raz_rx(struct rr_private *rrpriv,
static void rr_timer(struct timer_list *t)
{
- struct rr_private *rrpriv = from_timer(rrpriv, t, timer);
+ struct rr_private *rrpriv = timer_container_of(rrpriv, t, timer);
struct net_device *dev = pci_get_drvdata(rrpriv->pci_dev);
struct rr_regs __iomem *regs = rrpriv->regs;
unsigned long flags;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 70f7cb383228..cb6f5482d203 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -158,7 +158,6 @@ struct hv_netvsc_packet {
u8 cp_partial; /* partial copy into send buffer */
u8 rmsg_size; /* RNDIS header and PPI size */
- u8 rmsg_pgcnt; /* page count of RNDIS header and PPI */
u8 page_buf_cnt;
u16 q_idx;
@@ -893,6 +892,18 @@ struct nvsp_message {
sizeof(struct nvsp_message))
#define NETVSC_MIN_IN_MSG_SIZE sizeof(struct vmpacket_descriptor)
+/* Maximum # of contiguous data ranges that can make up a trasmitted packet.
+ * Typically it's the max SKB fragments plus 2 for the rndis packet and the
+ * linear portion of the SKB. But if MAX_SKB_FRAGS is large, the value may
+ * need to be limited to MAX_PAGE_BUFFER_COUNT, which is the max # of entries
+ * in a GPA direct packet sent to netvsp over VMBus.
+ */
+#if MAX_SKB_FRAGS + 2 < MAX_PAGE_BUFFER_COUNT
+#define MAX_DATA_RANGES (MAX_SKB_FRAGS + 2)
+#else
+#define MAX_DATA_RANGES MAX_PAGE_BUFFER_COUNT
+#endif
+
/* Estimated requestor size:
* out_ring_size/min_out_msg_size + in_ring_size/min_in_msg_size
*/
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d6f5b9ea3109..720104661d7f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -953,8 +953,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+ pend_size;
int i;
u32 padding = 0;
- u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
- packet->page_buf_cnt;
+ u32 page_count = packet->cp_partial ? 1 : packet->page_buf_cnt;
u32 remain;
/* Add padding */
@@ -1055,6 +1054,42 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
return 0;
}
+/* Build an "array" of mpb entries describing the data to be transferred
+ * over VMBus. After the desc header fields, each "array" entry is variable
+ * size, and each entry starts after the end of the previous entry. The
+ * "offset" and "len" fields for each entry imply the size of the entry.
+ *
+ * The pfns are in HV_HYP_PAGE_SIZE, because all communication with Hyper-V
+ * uses that granularity, even if the system page size of the guest is larger.
+ * Each entry in the input "pb" array must describe a contiguous range of
+ * guest physical memory so that the pfns are sequential if the range crosses
+ * a page boundary. The offset field must be < HV_HYP_PAGE_SIZE.
+ */
+static inline void netvsc_build_mpb_array(struct hv_page_buffer *pb,
+ u32 page_buffer_count,
+ struct vmbus_packet_mpb_array *desc,
+ u32 *desc_size)
+{
+ struct hv_mpb_array *mpb_entry = &desc->range;
+ int i, j;
+
+ for (i = 0; i < page_buffer_count; i++) {
+ u32 offset = pb[i].offset;
+ u32 len = pb[i].len;
+
+ mpb_entry->offset = offset;
+ mpb_entry->len = len;
+
+ for (j = 0; j < HVPFN_UP(offset + len); j++)
+ mpb_entry->pfn_array[j] = pb[i].pfn + j;
+
+ mpb_entry = (struct hv_mpb_array *)&mpb_entry->pfn_array[j];
+ }
+
+ desc->rangecount = page_buffer_count;
+ *desc_size = (char *)mpb_entry - (char *)desc;
+}
+
static inline int netvsc_send_pkt(
struct hv_device *device,
struct hv_netvsc_packet *packet,
@@ -1097,8 +1132,11 @@ static inline int netvsc_send_pkt(
packet->dma_range = NULL;
if (packet->page_buf_cnt) {
+ struct vmbus_channel_packet_page_buffer desc;
+ u32 desc_size;
+
if (packet->cp_partial)
- pb += packet->rmsg_pgcnt;
+ pb++;
ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
if (ret) {
@@ -1106,11 +1144,12 @@ static inline int netvsc_send_pkt(
goto exit;
}
- ret = vmbus_sendpacket_pagebuffer(out_channel,
- pb, packet->page_buf_cnt,
- &nvmsg, sizeof(nvmsg),
- req_id);
-
+ netvsc_build_mpb_array(pb, packet->page_buf_cnt,
+ (struct vmbus_packet_mpb_array *)&desc,
+ &desc_size);
+ ret = vmbus_sendpacket_mpb_desc(out_channel,
+ (struct vmbus_packet_mpb_array *)&desc,
+ desc_size, &nvmsg, sizeof(nvmsg), req_id);
if (ret)
netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
} else {
@@ -1259,7 +1298,7 @@ int netvsc_send(struct net_device *ndev,
packet->send_buf_index = section_index;
if (packet->cp_partial) {
- packet->page_buf_cnt -= packet->rmsg_pgcnt;
+ packet->page_buf_cnt--;
packet->total_data_buflen = msd_len + packet->rmsg_size;
} else {
packet->page_buf_cnt = 0;
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index e01c5997a551..1dd3755d9e6d 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -183,7 +183,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
xdp.command = XDP_SETUP_PROG;
xdp.prog = prog;
- ret = dev_xdp_propagate(vf_netdev, &xdp);
+ ret = netif_xdp_propagate(vf_netdev, &xdp);
if (ret && prog)
bpf_prog_put(prog);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index c51b318b8a72..c41a025c66f0 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -326,43 +326,10 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
return txq;
}
-static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
- struct hv_page_buffer *pb)
-{
- int j = 0;
-
- hvpfn += offset >> HV_HYP_PAGE_SHIFT;
- offset = offset & ~HV_HYP_PAGE_MASK;
-
- while (len > 0) {
- unsigned long bytes;
-
- bytes = HV_HYP_PAGE_SIZE - offset;
- if (bytes > len)
- bytes = len;
- pb[j].pfn = hvpfn;
- pb[j].offset = offset;
- pb[j].len = bytes;
-
- offset += bytes;
- len -= bytes;
-
- if (offset == HV_HYP_PAGE_SIZE && len) {
- hvpfn++;
- offset = 0;
- j++;
- }
- }
-
- return j + 1;
-}
-
static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
struct hv_netvsc_packet *packet,
struct hv_page_buffer *pb)
{
- u32 slots_used = 0;
- char *data = skb->data;
int frags = skb_shinfo(skb)->nr_frags;
int i;
@@ -371,28 +338,27 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
* 2. skb linear data
* 3. skb fragment data
*/
- slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
- offset_in_hvpage(hdr),
- len,
- &pb[slots_used]);
+ pb[0].offset = offset_in_hvpage(hdr);
+ pb[0].len = len;
+ pb[0].pfn = virt_to_hvpfn(hdr);
packet->rmsg_size = len;
- packet->rmsg_pgcnt = slots_used;
- slots_used += fill_pg_buf(virt_to_hvpfn(data),
- offset_in_hvpage(data),
- skb_headlen(skb),
- &pb[slots_used]);
+ pb[1].offset = offset_in_hvpage(skb->data);
+ pb[1].len = skb_headlen(skb);
+ pb[1].pfn = virt_to_hvpfn(skb->data);
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+ struct hv_page_buffer *cur_pb = &pb[i + 2];
+ u64 pfn = page_to_hvpfn(skb_frag_page(frag));
+ u32 offset = skb_frag_off(frag);
- slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
- skb_frag_off(frag),
- skb_frag_size(frag),
- &pb[slots_used]);
+ cur_pb->offset = offset_in_hvpage(offset);
+ cur_pb->len = skb_frag_size(frag);
+ cur_pb->pfn = pfn + (offset >> HV_HYP_PAGE_SHIFT);
}
- return slots_used;
+ return frags + 2;
}
static int count_skb_frag_slots(struct sk_buff *skb)
@@ -483,7 +449,7 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
struct net_device *vf_netdev;
u32 rndis_msg_size;
u32 hash;
- struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
+ struct hv_page_buffer pb[MAX_DATA_RANGES];
/* If VF is present and up then redirect packets to it.
* Skip the VF if it is marked down or has no carrier.
@@ -1405,7 +1371,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
struct net_device_context *ndc = netdev_priv(ndev);
struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
- struct sockaddr *addr = p;
+ struct sockaddr_storage *addr = p;
int err;
err = eth_prepare_mac_addr_change(ndev, p);
@@ -1421,12 +1387,12 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
return err;
}
- err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
+ err = rndis_filter_set_device_mac(nvdev, addr->__data);
if (!err) {
eth_commit_mac_addr_change(ndev, p);
} else if (vf_netdev) {
/* rollback change on VF */
- memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
+ memcpy(addr->__data, ndev->dev_addr, ETH_ALEN);
dev_set_mac_address(vf_netdev, addr, NULL);
}
@@ -2496,8 +2462,6 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
- netvsc_vf_setxdp(vf_netdev, NULL);
-
reinit_completion(&net_device_ctx->vf_add);
netdev_rx_handler_unregister(vf_netdev);
netdev_upper_dev_unlink(vf_netdev, ndev);
@@ -2665,7 +2629,9 @@ static int netvsc_probe(struct hv_device *dev,
continue;
netvsc_prepare_bonding(vf_netdev);
+ netdev_lock_ops(vf_netdev);
netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE);
+ netdev_unlock_ops(vf_netdev);
__netvsc_vf_setup(net, vf_netdev);
break;
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 82747dfacd70..9e73959e61ee 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -225,8 +225,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
struct rndis_request *req)
{
struct hv_netvsc_packet *packet;
- struct hv_page_buffer page_buf[2];
- struct hv_page_buffer *pb = page_buf;
+ struct hv_page_buffer pb;
int ret;
/* Setup the packet to send it */
@@ -235,27 +234,14 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->total_data_buflen = req->request_msg.msg_len;
packet->page_buf_cnt = 1;
- pb[0].pfn = virt_to_phys(&req->request_msg) >>
- HV_HYP_PAGE_SHIFT;
- pb[0].len = req->request_msg.msg_len;
- pb[0].offset = offset_in_hvpage(&req->request_msg);
-
- /* Add one page_buf when request_msg crossing page boundary */
- if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
- packet->page_buf_cnt++;
- pb[0].len = HV_HYP_PAGE_SIZE -
- pb[0].offset;
- pb[1].pfn = virt_to_phys((void *)&req->request_msg
- + pb[0].len) >> HV_HYP_PAGE_SHIFT;
- pb[1].offset = 0;
- pb[1].len = req->request_msg.msg_len -
- pb[0].len;
- }
+ pb.pfn = virt_to_phys(&req->request_msg) >> HV_HYP_PAGE_SHIFT;
+ pb.len = req->request_msg.msg_len;
+ pb.offset = offset_in_hvpage(&req->request_msg);
trace_rndis_send(dev->ndev, 0, &req->request_msg);
rcu_read_lock_bh();
- ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
+ ret = netvsc_send(dev->ndev, packet, NULL, &pb, NULL, false);
rcu_read_unlock_bh();
return ret;
diff --git a/drivers/net/ipa/data/ipa_data-v3.1.c b/drivers/net/ipa/data/ipa_data-v3.1.c
index e902d731776d..65dba4729155 100644
--- a/drivers/net/ipa/data/ipa_data-v3.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.1.c
@@ -493,7 +493,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146bd000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00002000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c
index f632aab56f4c..315e617a8eeb 100644
--- a/drivers/net/ipa/data/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c
@@ -374,7 +374,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146bd000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00002000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.11.c b/drivers/net/ipa/data/ipa_data-v4.11.c
index c1428483ca34..f5d66779c2fb 100644
--- a/drivers/net/ipa/data/ipa_data-v4.11.c
+++ b/drivers/net/ipa/data/ipa_data-v4.11.c
@@ -367,7 +367,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146a8000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00009000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.2.c b/drivers/net/ipa/data/ipa_data-v4.2.c
index 2c7e8cb429b9..f5ed5d745aeb 100644
--- a/drivers/net/ipa/data/ipa_data-v4.2.c
+++ b/drivers/net/ipa/data/ipa_data-v4.2.c
@@ -340,7 +340,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146a8000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00002000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.5.c b/drivers/net/ipa/data/ipa_data-v4.5.c
index 57dc78c526b0..730d8c43a45c 100644
--- a/drivers/net/ipa/data/ipa_data-v4.5.c
+++ b/drivers/net/ipa/data/ipa_data-v4.5.c
@@ -418,7 +418,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x14688000,
.imem_size = 0x00003000,
- .smem_id = 497,
.smem_size = 0x00009000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.7.c b/drivers/net/ipa/data/ipa_data-v4.7.c
index 41f212209993..5e1d9049c62b 100644
--- a/drivers/net/ipa/data/ipa_data-v4.7.c
+++ b/drivers/net/ipa/data/ipa_data-v4.7.c
@@ -360,7 +360,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146a8000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00009000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.9.c b/drivers/net/ipa/data/ipa_data-v4.9.c
index 4eb9c909d5b3..da472a2a2e29 100644
--- a/drivers/net/ipa/data/ipa_data-v4.9.c
+++ b/drivers/net/ipa/data/ipa_data-v4.9.c
@@ -416,7 +416,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146bd000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00009000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v5.0.c b/drivers/net/ipa/data/ipa_data-v5.0.c
index 050580c99b65..bc5722e4b053 100644
--- a/drivers/net/ipa/data/ipa_data-v5.0.c
+++ b/drivers/net/ipa/data/ipa_data-v5.0.c
@@ -442,7 +442,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x14688000,
.imem_size = 0x00003000,
- .smem_id = 497,
.smem_size = 0x00009000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v5.5.c b/drivers/net/ipa/data/ipa_data-v5.5.c
index 0e6663e22533..741ae21d9d78 100644
--- a/drivers/net/ipa/data/ipa_data-v5.5.c
+++ b/drivers/net/ipa/data/ipa_data-v5.5.c
@@ -448,7 +448,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x14688000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x0000b000,
};
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index d88cbbbf18b7..2fd03f0799b2 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -180,7 +180,6 @@ struct ipa_resource_data {
* @local: array of IPA-local memory region descriptors
* @imem_addr: physical address of IPA region within IMEM
* @imem_size: size in bytes of IPA IMEM region
- * @smem_id: item identifier for IPA region within SMEM memory
* @smem_size: size in bytes of the IPA SMEM region
*/
struct ipa_mem_data {
@@ -188,7 +187,6 @@ struct ipa_mem_data {
const struct ipa_mem *local;
u32 imem_addr;
u32 imem_size;
- u32 smem_id;
u32 smem_size;
};
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index dee985eb08cb..835a3c9c1fd4 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -26,6 +26,8 @@
/* SMEM host id representing the modem. */
#define QCOM_SMEM_HOST_MODEM 1
+#define SMEM_IPA_FILTER_TABLE 497
+
const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id)
{
u32 i;
@@ -509,7 +511,6 @@ static void ipa_imem_exit(struct ipa *ipa)
/**
* ipa_smem_init() - Initialize SMEM memory used by the IPA
* @ipa: IPA pointer
- * @item: Item ID of SMEM memory
* @size: Size (bytes) of SMEM memory region
*
* SMEM is a managed block of shared DRAM, from which numbered "items"
@@ -523,7 +524,7 @@ static void ipa_imem_exit(struct ipa *ipa)
*
* Note: @size and the item address are is not guaranteed to be page-aligned.
*/
-static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
+static int ipa_smem_init(struct ipa *ipa, size_t size)
{
struct device *dev = ipa->dev;
struct iommu_domain *domain;
@@ -545,25 +546,25 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
* The item might have already been allocated, in which case we
* use it unless the size isn't what we expect.
*/
- ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, item, size);
+ ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, SMEM_IPA_FILTER_TABLE, size);
if (ret && ret != -EEXIST) {
- dev_err(dev, "error %d allocating size %zu SMEM item %u\n",
- ret, size, item);
+ dev_err(dev, "error %d allocating size %zu SMEM item\n",
+ ret, size);
return ret;
}
/* Now get the address of the SMEM memory region */
- virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, item, &actual);
+ virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, SMEM_IPA_FILTER_TABLE, &actual);
if (IS_ERR(virt)) {
ret = PTR_ERR(virt);
- dev_err(dev, "error %d getting SMEM item %u\n", ret, item);
+ dev_err(dev, "error %d getting SMEM item\n", ret);
return ret;
}
/* In case the region was already allocated, verify the size */
if (ret && actual != size) {
- dev_err(dev, "SMEM item %u has size %zu, expected %zu\n",
- item, actual, size);
+ dev_err(dev, "SMEM item has size %zu, expected %zu\n",
+ actual, size);
return -EINVAL;
}
@@ -659,7 +660,7 @@ int ipa_mem_init(struct ipa *ipa, struct platform_device *pdev,
if (ret)
goto err_unmap;
- ret = ipa_smem_init(ipa, mem_data->smem_id, mem_data->smem_size);
+ ret = ipa_smem_init(ipa, mem_data->smem_size);
if (ret)
goto err_imem_exit;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index ca62188a317a..e3e65772c599 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -219,7 +219,7 @@ void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
unsigned int ipvlan_mac_hash(const unsigned char *addr)
{
- u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
+ u32 hash = jhash_1word(get_unaligned((u32 *)(addr + 2)),
ipvlan_jhash_secret);
return hash & IPVLAN_MAC_FILTER_MASK;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 3d315e30ee47..7edbe76b5455 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -247,15 +247,39 @@ static sci_t make_sci(const u8 *addr, __be16 port)
return sci;
}
-static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
+static sci_t macsec_active_sci(struct macsec_secy *secy)
{
- sci_t sci;
+ struct macsec_rx_sc *rx_sc = rcu_dereference_bh(secy->rx_sc);
+
+ /* Case single RX SC */
+ if (rx_sc && !rcu_dereference_bh(rx_sc->next))
+ return (rx_sc->active) ? rx_sc->sci : 0;
+ /* Case no RX SC or multiple */
+ else
+ return 0;
+}
+
+static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present,
+ struct macsec_rxh_data *rxd)
+{
+ struct macsec_dev *macsec;
+ sci_t sci = 0;
- if (sci_present)
+ /* SC = 1 */
+ if (sci_present) {
memcpy(&sci, hdr->secure_channel_id,
sizeof(hdr->secure_channel_id));
- else
+ /* SC = 0; ES = 0 */
+ } else if ((!(hdr->tci_an & (MACSEC_TCI_ES | MACSEC_TCI_SC))) &&
+ (list_is_singular(&rxd->secys))) {
+ /* Only one SECY should exist on this scenario */
+ macsec = list_first_or_null_rcu(&rxd->secys, struct macsec_dev,
+ secys);
+ if (macsec)
+ return macsec_active_sci(&macsec->secy);
+ } else {
sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
+ }
return sci;
}
@@ -1109,7 +1133,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
unsigned int len;
- sci_t sci;
+ sci_t sci = 0;
u32 hdr_pn;
bool cbit;
struct pcpu_rx_sc_stats *rxsc_stats;
@@ -1156,11 +1180,14 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
- sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
+ sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci, rxd);
+ if (!sci)
+ goto drop_nosc;
+
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
@@ -1283,6 +1310,7 @@ drop:
macsec_rxsa_put(rx_sa);
drop_nosa:
macsec_rxsc_put(rx_sc);
+drop_nosc:
rcu_read_unlock();
drop_direct:
kfree_skb(skb);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d0dfa6bca6cc..4df991e494bd 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -254,7 +254,7 @@ static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
static unsigned int mc_hash(const struct macvlan_dev *vlan,
const unsigned char *addr)
{
- u32 val = __get_unaligned_cpu32(addr + 2);
+ u32 val = get_unaligned((u32 *)(addr + 2));
val ^= macvlan_hash_mix(vlan);
return hash_32(val, MACVLAN_MC_FILTER_BITS);
@@ -754,13 +754,13 @@ static int macvlan_sync_address(struct net_device *dev,
static int macvlan_set_mac_address(struct net_device *dev, void *p)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- struct sockaddr *addr = p;
+ struct sockaddr_storage *addr = p;
- if (!is_valid_ether_addr(addr->sa_data))
+ if (!is_valid_ether_addr(addr->__data))
return -EADDRNOTAVAIL;
/* If the addresses are the same, this is a no-op */
- if (ether_addr_equal(dev->dev_addr, addr->sa_data))
+ if (ether_addr_equal(dev->dev_addr, addr->__data))
return 0;
if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
@@ -768,10 +768,10 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
return dev_set_mac_address(vlan->lowerdev, addr, NULL);
}
- if (macvlan_addr_busy(vlan->port, addr->sa_data))
+ if (macvlan_addr_busy(vlan->port, addr->__data))
return -EADDRINUSE;
- return macvlan_sync_address(dev, addr->sa_data);
+ return macvlan_sync_address(dev, addr->__data);
}
static void macvlan_change_rx_flags(struct net_device *dev, int change)
@@ -1295,11 +1295,11 @@ static void macvlan_port_destroy(struct net_device *dev)
*/
if (macvlan_passthru(port) &&
!ether_addr_equal(port->dev->dev_addr, port->perm_addr)) {
- struct sockaddr sa;
+ struct sockaddr_storage ss;
- sa.sa_family = port->dev->type;
- memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len);
- dev_set_mac_address(port->dev, &sa, NULL);
+ ss.ss_family = port->dev->type;
+ memcpy(&ss.__data, port->perm_addr, port->dev->addr_len);
+ dev_set_mac_address(port->dev, &ss, NULL);
}
kfree(port);
diff --git a/drivers/net/mctp/mctp-usb.c b/drivers/net/mctp/mctp-usb.c
index e8d4b01c3f34..775a386d0aca 100644
--- a/drivers/net/mctp/mctp-usb.c
+++ b/drivers/net/mctp/mctp-usb.c
@@ -257,6 +257,8 @@ static int mctp_usb_open(struct net_device *dev)
WRITE_ONCE(mctp_usb->stopped, false);
+ netif_start_queue(dev);
+
return mctp_usb_rx_queue(mctp_usb, GFP_KERNEL);
}
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
index 4a7a303be2f7..7db40aaa079d 100644
--- a/drivers/net/mdio/Kconfig
+++ b/drivers/net/mdio/Kconfig
@@ -3,49 +3,30 @@
# MDIO Layer Configuration
#
-menuconfig MDIO_DEVICE
- tristate "MDIO bus device drivers"
- help
- MDIO devices and driver infrastructure code.
-
-if MDIO_DEVICE
-
config MDIO_BUS
- tristate
- default m if PHYLIB=m
- default MDIO_DEVICE
+ tristate "MDIO bus consumer layer"
help
- This internal symbol is used for link time dependencies and it
- reflects whether the mdio_bus/mdio_device code is built as a
- loadable module or built-in.
+ MDIO bus consumer layer
+
+if PHYLIB
config FWNODE_MDIO
- def_tristate PHYLIB
- depends on (ACPI || OF) || COMPILE_TEST
+ def_tristate (ACPI || OF) || COMPILE_TEST
select FIXED_PHY
help
FWNODE MDIO bus (Ethernet PHY) accessors
config OF_MDIO
- def_tristate PHYLIB
- depends on OF
- depends on PHYLIB
+ def_tristate OF
select FIXED_PHY
help
OpenFirmware MDIO bus (Ethernet PHY) accessors
config ACPI_MDIO
- def_tristate PHYLIB
- depends on ACPI
- depends on PHYLIB
+ def_tristate ACPI
help
ACPI MDIO bus (Ethernet PHY) accessors
-if MDIO_BUS
-
-config MDIO_DEVRES
- tristate
-
config MDIO_SUN4I
tristate "Allwinner sun4i MDIO interface support"
depends on ARCH_SUNXI || COMPILE_TEST
@@ -65,7 +46,6 @@ config MDIO_ASPEED
tristate "ASPEED MDIO bus controller"
depends on ARCH_ASPEED || COMPILE_TEST
depends on OF_MDIO && HAS_IOMEM
- depends on MDIO_DEVRES
help
This module provides a driver for the independent MDIO bus
controllers found in the ASPEED AST2600 SoC. This is a driver for the
@@ -135,7 +115,6 @@ config MDIO_I2C
config MDIO_MVUSB
tristate "Marvell USB to MDIO Adapter"
depends on USB
- select MDIO_DEVRES
help
A USB to MDIO converter present on development boards for
Marvell's Link Street family of Ethernet switches.
@@ -143,7 +122,6 @@ config MDIO_MVUSB
config MDIO_MSCC_MIIM
tristate "Microsemi MIIM interface support"
depends on HAS_IOMEM && REGMAP_MMIO
- select MDIO_DEVRES
help
This driver supports the MIIM (MDIO) interface found in the network
switches of the Microsemi SoCs; it is recommended to switch on
@@ -161,7 +139,6 @@ config MDIO_OCTEON
depends on (64BIT && OF_MDIO) || COMPILE_TEST
depends on HAS_IOMEM
select MDIO_CAVIUM
- select MDIO_DEVRES
help
This module provides a driver for the Octeon and ThunderX MDIO
buses. It is required by the Octeon and ThunderX ethernet device
@@ -171,7 +148,6 @@ config MDIO_IPQ4019
tristate "Qualcomm IPQ4019 MDIO interface support"
depends on HAS_IOMEM && OF_MDIO
depends on COMMON_CLK
- depends on MDIO_DEVRES
help
This driver supports the MDIO interface found in Qualcomm
IPQ40xx, IPQ60xx, IPQ807x and IPQ50xx series Soc-s.
@@ -180,11 +156,17 @@ config MDIO_IPQ8064
tristate "Qualcomm IPQ8064 MDIO interface support"
depends on HAS_IOMEM && OF_MDIO
depends on MFD_SYSCON
- depends on MDIO_DEVRES
help
This driver supports the MDIO interface found in the network
interface units of the IPQ8064 SoC
+config MDIO_REALTEK_RTL9300
+ tristate "Realtek RTL9300 MDIO interface support"
+ depends on MACH_REALTEK_RTL || COMPILE_TEST
+ help
+ This driver supports the MDIO interface found in the Realtek
+ RTL9300 family of Ethernet switches with integrated SoC.
+
config MDIO_REGMAP
tristate
help
@@ -201,7 +183,6 @@ config MDIO_THUNDER
depends on 64BIT
depends on PCI
select MDIO_CAVIUM
- select MDIO_DEVRES
help
This driver supports the MDIO interfaces found on Cavium
ThunderX SoCs when the MDIO bus device appears as a PCI
@@ -299,4 +280,3 @@ config MDIO_BUS_MUX_MMIOREG
endif
-endif
diff --git a/drivers/net/mdio/Makefile b/drivers/net/mdio/Makefile
index 1015f0db4531..c23778e73890 100644
--- a/drivers/net/mdio/Makefile
+++ b/drivers/net/mdio/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o
obj-$(CONFIG_MDIO_MVUSB) += mdio-mvusb.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
+obj-$(CONFIG_MDIO_REALTEK_RTL9300) += mdio-realtek-rtl9300.o
obj-$(CONFIG_MDIO_REGMAP) += mdio-regmap.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index 074d96328f41..b6e30bdf5325 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -334,9 +334,9 @@ static SIMPLE_DEV_PM_OPS(unimac_mdio_pm_ops,
NULL, unimac_mdio_resume);
static const struct of_device_id unimac_mdio_ids[] = {
+ { .compatible = "brcm,asp-v3.0-mdio", },
{ .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
- { .compatible = "brcm,asp-v2.0-mdio", },
{ .compatible = "brcm,bcm6846-mdio", },
{ .compatible = "brcm,genet-mdio-v5", },
{ .compatible = "brcm,genet-mdio-v4", },
diff --git a/drivers/net/mdio/mdio-realtek-rtl9300.c b/drivers/net/mdio/mdio-realtek-rtl9300.c
new file mode 100644
index 000000000000..33694c3ff9a7
--- /dev/null
+++ b/drivers/net/mdio/mdio-realtek-rtl9300.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MDIO controller for RTL9300 switches with integrated SoC.
+ *
+ * The MDIO communication is abstracted by the switch. At the software level
+ * communication uses the switch port to address the PHY. We work out the
+ * mapping based on the MDIO bus described in device tree and phandles on the
+ * ethernet-ports property.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bits.h>
+#include <linux/find.h>
+#include <linux/mdio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define SMI_GLB_CTRL 0xca00
+#define GLB_CTRL_INTF_SEL(intf) BIT(16 + (intf))
+#define SMI_PORT0_15_POLLING_SEL 0xca08
+#define SMI_ACCESS_PHY_CTRL_0 0xcb70
+#define SMI_ACCESS_PHY_CTRL_1 0xcb74
+#define PHY_CTRL_REG_ADDR GENMASK(24, 20)
+#define PHY_CTRL_PARK_PAGE GENMASK(19, 15)
+#define PHY_CTRL_MAIN_PAGE GENMASK(14, 3)
+#define PHY_CTRL_WRITE BIT(2)
+#define PHY_CTRL_READ 0
+#define PHY_CTRL_TYPE_C45 BIT(1)
+#define PHY_CTRL_TYPE_C22 0
+#define PHY_CTRL_CMD BIT(0)
+#define PHY_CTRL_FAIL BIT(25)
+#define SMI_ACCESS_PHY_CTRL_2 0xcb78
+#define PHY_CTRL_INDATA GENMASK(31, 16)
+#define PHY_CTRL_DATA GENMASK(15, 0)
+#define SMI_ACCESS_PHY_CTRL_3 0xcb7c
+#define PHY_CTRL_MMD_DEVAD GENMASK(20, 16)
+#define PHY_CTRL_MMD_REG GENMASK(15, 0)
+#define SMI_PORT0_5_ADDR_CTRL 0xcb80
+
+#define MAX_PORTS 28
+#define MAX_SMI_BUSSES 4
+#define MAX_SMI_ADDR 0x1f
+
+struct rtl9300_mdio_priv {
+ struct regmap *regmap;
+ struct mutex lock; /* protect HW access */
+ DECLARE_BITMAP(valid_ports, MAX_PORTS);
+ u8 smi_bus[MAX_PORTS];
+ u8 smi_addr[MAX_PORTS];
+ bool smi_bus_is_c45[MAX_SMI_BUSSES];
+ struct mii_bus *bus[MAX_SMI_BUSSES];
+};
+
+struct rtl9300_mdio_chan {
+ struct rtl9300_mdio_priv *priv;
+ u8 mdio_bus;
+};
+
+static int rtl9300_mdio_phy_to_port(struct mii_bus *bus, int phy_id)
+{
+ struct rtl9300_mdio_chan *chan = bus->priv;
+ struct rtl9300_mdio_priv *priv;
+ int i;
+
+ priv = chan->priv;
+
+ for_each_set_bit(i, priv->valid_ports, MAX_PORTS)
+ if (priv->smi_bus[i] == chan->mdio_bus &&
+ priv->smi_addr[i] == phy_id)
+ return i;
+
+ return -ENOENT;
+}
+
+static int rtl9300_mdio_wait_ready(struct rtl9300_mdio_priv *priv)
+{
+ struct regmap *regmap = priv->regmap;
+ u32 val;
+
+ lockdep_assert_held(&priv->lock);
+
+ return regmap_read_poll_timeout(regmap, SMI_ACCESS_PHY_CTRL_1,
+ val, !(val & PHY_CTRL_CMD), 10, 1000);
+}
+
+static int rtl9300_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct rtl9300_mdio_chan *chan = bus->priv;
+ struct rtl9300_mdio_priv *priv;
+ struct regmap *regmap;
+ int port;
+ u32 val;
+ int err;
+
+ priv = chan->priv;
+ regmap = priv->regmap;
+
+ port = rtl9300_mdio_phy_to_port(bus, phy_id);
+ if (port < 0)
+ return port;
+
+ mutex_lock(&priv->lock);
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_2, FIELD_PREP(PHY_CTRL_INDATA, port));
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_REG_ADDR, regnum) |
+ FIELD_PREP(PHY_CTRL_PARK_PAGE, 0x1f) |
+ FIELD_PREP(PHY_CTRL_MAIN_PAGE, 0xfff) |
+ PHY_CTRL_READ | PHY_CTRL_TYPE_C22 | PHY_CTRL_CMD;
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_1, val);
+ if (err)
+ goto out_err;
+
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ err = regmap_read(regmap, SMI_ACCESS_PHY_CTRL_2, &val);
+ if (err)
+ goto out_err;
+
+ mutex_unlock(&priv->lock);
+ return FIELD_GET(PHY_CTRL_DATA, val);
+
+out_err:
+ mutex_unlock(&priv->lock);
+ return err;
+}
+
+static int rtl9300_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+{
+ struct rtl9300_mdio_chan *chan = bus->priv;
+ struct rtl9300_mdio_priv *priv;
+ struct regmap *regmap;
+ int port;
+ u32 val;
+ int err;
+
+ priv = chan->priv;
+ regmap = priv->regmap;
+
+ port = rtl9300_mdio_phy_to_port(bus, phy_id);
+ if (port < 0)
+ return port;
+
+ mutex_lock(&priv->lock);
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_0, BIT(port));
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_2, FIELD_PREP(PHY_CTRL_INDATA, value));
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_REG_ADDR, regnum) |
+ FIELD_PREP(PHY_CTRL_PARK_PAGE, 0x1f) |
+ FIELD_PREP(PHY_CTRL_MAIN_PAGE, 0xfff) |
+ PHY_CTRL_WRITE | PHY_CTRL_TYPE_C22 | PHY_CTRL_CMD;
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_1, val);
+ if (err)
+ goto out_err;
+
+ err = regmap_read_poll_timeout(regmap, SMI_ACCESS_PHY_CTRL_1,
+ val, !(val & PHY_CTRL_CMD), 10, 100);
+ if (err)
+ goto out_err;
+
+ if (val & PHY_CTRL_FAIL) {
+ err = -ENXIO;
+ goto out_err;
+ }
+
+ mutex_unlock(&priv->lock);
+ return 0;
+
+out_err:
+ mutex_unlock(&priv->lock);
+ return err;
+}
+
+static int rtl9300_mdio_read_c45(struct mii_bus *bus, int phy_id, int dev_addr, int regnum)
+{
+ struct rtl9300_mdio_chan *chan = bus->priv;
+ struct rtl9300_mdio_priv *priv;
+ struct regmap *regmap;
+ int port;
+ u32 val;
+ int err;
+
+ priv = chan->priv;
+ regmap = priv->regmap;
+
+ port = rtl9300_mdio_phy_to_port(bus, phy_id);
+ if (port < 0)
+ return port;
+
+ mutex_lock(&priv->lock);
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_INDATA, port);
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_2, val);
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_MMD_DEVAD, dev_addr) |
+ FIELD_PREP(PHY_CTRL_MMD_REG, regnum);
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_3, val);
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_1,
+ PHY_CTRL_READ | PHY_CTRL_TYPE_C45 | PHY_CTRL_CMD);
+ if (err)
+ goto out_err;
+
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ err = regmap_read(regmap, SMI_ACCESS_PHY_CTRL_2, &val);
+ if (err)
+ goto out_err;
+
+ mutex_unlock(&priv->lock);
+ return FIELD_GET(PHY_CTRL_DATA, val);
+
+out_err:
+ mutex_unlock(&priv->lock);
+ return err;
+}
+
+static int rtl9300_mdio_write_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum, u16 value)
+{
+ struct rtl9300_mdio_chan *chan = bus->priv;
+ struct rtl9300_mdio_priv *priv;
+ struct regmap *regmap;
+ int port;
+ u32 val;
+ int err;
+
+ priv = chan->priv;
+ regmap = priv->regmap;
+
+ port = rtl9300_mdio_phy_to_port(bus, phy_id);
+ if (port < 0)
+ return port;
+
+ mutex_lock(&priv->lock);
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_0, BIT(port));
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_INDATA, value);
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_2, val);
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_MMD_DEVAD, dev_addr) |
+ FIELD_PREP(PHY_CTRL_MMD_REG, regnum);
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_3, val);
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_1,
+ PHY_CTRL_TYPE_C45 | PHY_CTRL_WRITE | PHY_CTRL_CMD);
+ if (err)
+ goto out_err;
+
+ err = regmap_read_poll_timeout(regmap, SMI_ACCESS_PHY_CTRL_1,
+ val, !(val & PHY_CTRL_CMD), 10, 100);
+ if (err)
+ goto out_err;
+
+ if (val & PHY_CTRL_FAIL) {
+ err = -ENXIO;
+ goto out_err;
+ }
+
+ mutex_unlock(&priv->lock);
+ return 0;
+
+out_err:
+ mutex_unlock(&priv->lock);
+ return err;
+}
+
+static int rtl9300_mdiobus_init(struct rtl9300_mdio_priv *priv)
+{
+ u32 glb_ctrl_mask = 0, glb_ctrl_val = 0;
+ struct regmap *regmap = priv->regmap;
+ u32 port_addr[5] = { 0 };
+ u32 poll_sel[2] = { 0 };
+ int i, err;
+
+ /* Associate the port with the SMI interface and PHY */
+ for_each_set_bit(i, priv->valid_ports, MAX_PORTS) {
+ int pos;
+
+ pos = (i % 6) * 5;
+ port_addr[i / 6] |= (priv->smi_addr[i] & 0x1f) << pos;
+
+ pos = (i % 16) * 2;
+ poll_sel[i / 16] |= (priv->smi_bus[i] & 0x3) << pos;
+ }
+
+ /* Put the interfaces into C45 mode if required */
+ glb_ctrl_mask = GENMASK(19, 16);
+ for (i = 0; i < MAX_SMI_BUSSES; i++)
+ if (priv->smi_bus_is_c45[i])
+ glb_ctrl_val |= GLB_CTRL_INTF_SEL(i);
+
+ err = regmap_bulk_write(regmap, SMI_PORT0_5_ADDR_CTRL,
+ port_addr, 5);
+ if (err)
+ return err;
+
+ err = regmap_bulk_write(regmap, SMI_PORT0_15_POLLING_SEL,
+ poll_sel, 2);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(regmap, SMI_GLB_CTRL,
+ glb_ctrl_mask, glb_ctrl_val);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int rtl9300_mdiobus_probe_one(struct device *dev, struct rtl9300_mdio_priv *priv,
+ struct fwnode_handle *node)
+{
+ struct rtl9300_mdio_chan *chan;
+ struct fwnode_handle *child;
+ struct mii_bus *bus;
+ u32 mdio_bus;
+ int err;
+
+ err = fwnode_property_read_u32(node, "reg", &mdio_bus);
+ if (err)
+ return err;
+
+ /* The MDIO accesses from the kernel work with the PHY polling unit in
+ * the switch. We need to tell the PPU to operate either in GPHY (i.e.
+ * clause 22) or 10GPHY mode (i.e. clause 45).
+ *
+ * We select 10GPHY mode if there is at least one PHY that declares
+ * compatible = "ethernet-phy-ieee802.3-c45". This does mean we can't
+ * support both c45 and c22 on the same MDIO bus.
+ */
+ fwnode_for_each_child_node(node, child)
+ if (fwnode_device_is_compatible(child, "ethernet-phy-ieee802.3-c45"))
+ priv->smi_bus_is_c45[mdio_bus] = true;
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*chan));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Realtek Switch MDIO Bus";
+ if (priv->smi_bus_is_c45[mdio_bus]) {
+ bus->read_c45 = rtl9300_mdio_read_c45;
+ bus->write_c45 = rtl9300_mdio_write_c45;
+ } else {
+ bus->read = rtl9300_mdio_read_c22;
+ bus->write = rtl9300_mdio_write_c22;
+ }
+ bus->parent = dev;
+ chan = bus->priv;
+ chan->mdio_bus = mdio_bus;
+ chan->priv = priv;
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", dev_name(dev), mdio_bus);
+
+ err = devm_of_mdiobus_register(dev, bus, to_of_node(node));
+ if (err)
+ return dev_err_probe(dev, err, "cannot register MDIO bus\n");
+
+ return 0;
+}
+
+/* The mdio-controller is part of a switch block so we parse the sibling
+ * ethernet-ports node and build a mapping of the switch port to MDIO bus/addr
+ * based on the phy-handle.
+ */
+static int rtl9300_mdiobus_map_ports(struct device *dev)
+{
+ struct rtl9300_mdio_priv *priv = dev_get_drvdata(dev);
+ struct device *parent = dev->parent;
+ struct fwnode_handle *port;
+ int err;
+
+ struct fwnode_handle *ports __free(fwnode_handle) =
+ device_get_named_child_node(parent, "ethernet-ports");
+ if (!ports)
+ return dev_err_probe(dev, -EINVAL, "%pfwP missing ethernet-ports\n",
+ dev_fwnode(parent));
+
+ fwnode_for_each_child_node(ports, port) {
+ struct device_node *mdio_dn;
+ u32 addr;
+ u32 bus;
+ u32 pn;
+
+ struct device_node *phy_dn __free(device_node) =
+ of_parse_phandle(to_of_node(port), "phy-handle", 0);
+ /* skip ports without phys */
+ if (!phy_dn)
+ continue;
+
+ mdio_dn = phy_dn->parent;
+ /* only map ports that are connected to this mdio-controller */
+ if (mdio_dn->parent != dev->of_node)
+ continue;
+
+ err = fwnode_property_read_u32(port, "reg", &pn);
+ if (err)
+ return err;
+
+ if (pn >= MAX_PORTS)
+ return dev_err_probe(dev, -EINVAL, "illegal port number %d\n", pn);
+
+ if (test_bit(pn, priv->valid_ports))
+ return dev_err_probe(dev, -EINVAL, "duplicated port number %d\n", pn);
+
+ err = of_property_read_u32(mdio_dn, "reg", &bus);
+ if (err)
+ return err;
+
+ if (bus >= MAX_SMI_BUSSES)
+ return dev_err_probe(dev, -EINVAL, "illegal smi bus number %d\n", bus);
+
+ err = of_property_read_u32(phy_dn, "reg", &addr);
+ if (err)
+ return err;
+
+ __set_bit(pn, priv->valid_ports);
+ priv->smi_bus[pn] = bus;
+ priv->smi_addr[pn] = addr;
+ }
+
+ return 0;
+}
+
+static int rtl9300_mdiobus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rtl9300_mdio_priv *priv;
+ struct fwnode_handle *child;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ err = devm_mutex_init(dev, &priv->lock);
+ if (err)
+ return err;
+
+ priv->regmap = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ platform_set_drvdata(pdev, priv);
+
+ err = rtl9300_mdiobus_map_ports(dev);
+ if (err)
+ return err;
+
+ device_for_each_child_node(dev, child) {
+ err = rtl9300_mdiobus_probe_one(dev, priv, child);
+ if (err)
+ return err;
+ }
+
+ err = rtl9300_mdiobus_init(priv);
+ if (err)
+ return dev_err_probe(dev, err, "failed to initialise MDIO bus controller\n");
+
+ return 0;
+}
+
+static const struct of_device_id rtl9300_mdio_ids[] = {
+ { .compatible = "realtek,rtl9301-mdio" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rtl9300_mdio_ids);
+
+static struct platform_driver rtl9300_mdio_driver = {
+ .probe = rtl9300_mdiobus_probe,
+ .driver = {
+ .name = "mdio-rtl9300",
+ .of_match_table = rtl9300_mdio_ids,
+ },
+};
+
+module_platform_driver(rtl9300_mdio_driver);
+
+MODULE_DESCRIPTION("RTL9300 MDIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index 1e1aa72b1eff..a3047f7258a7 100644
--- a/drivers/net/mdio/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
@@ -40,16 +40,16 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
return err;
}
- err = pci_request_regions(pdev, KBUILD_MODNAME);
+ err = pcim_request_all_regions(pdev, KBUILD_MODNAME);
if (err) {
- dev_err(&pdev->dev, "pci_request_regions failed\n");
+ dev_err(&pdev->dev, "pcim_request_all_regions failed\n");
goto err_disable_device;
}
nexus->bar0 = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
if (!nexus->bar0) {
err = -ENOMEM;
- goto err_release_regions;
+ goto err_disable_device;
}
i = 0;
@@ -107,9 +107,6 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
}
return 0;
-err_release_regions:
- pci_release_regions(pdev);
-
err_disable_device:
pci_set_drvdata(pdev, NULL);
return err;
@@ -129,7 +126,6 @@ static void thunder_mdiobus_pci_remove(struct pci_dev *pdev)
mdiobus_unregister(bus->mii_bus);
oct_mdio_writeq(0, bus->register_base + SMI_EN);
}
- pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 2f4fc664d2e1..98f667b121f7 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -458,7 +458,7 @@ int of_phy_register_fixed_link(struct device_node *np)
return -ENODEV;
register_phy:
- return PTR_ERR_OR_ZERO(fixed_phy_register(PHY_POLL, &status, np));
+ return PTR_ERR_OR_ZERO(fixed_phy_register(&status, np));
}
EXPORT_SYMBOL(of_phy_register_fixed_link);
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 4289ccd3e41b..176935a8645f 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -1252,7 +1252,6 @@ static int sysdata_append_release(struct netconsole_target *nt, int offset)
*/
static int prepare_extradata(struct netconsole_target *nt)
{
- u32 fields = SYSDATA_CPU_NR | SYSDATA_TASKNAME;
int extradata_len;
/* userdata was appended when configfs write helper was called
@@ -1260,7 +1259,7 @@ static int prepare_extradata(struct netconsole_target *nt)
*/
extradata_len = nt->userdata_length;
- if (!(nt->sysdata_fields & fields))
+ if (!nt->sysdata_fields)
goto out;
if (nt->sysdata_fields & SYSDATA_CPU_NR)
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
index d88bdb9a1717..47cdee5577d4 100644
--- a/drivers/net/netdevsim/ipsec.c
+++ b/drivers/net/netdevsim/ipsec.c
@@ -85,11 +85,11 @@ static int nsim_ipsec_find_empty_idx(struct nsim_ipsec *ipsec)
return -ENOSPC;
}
-static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
+static int nsim_ipsec_parse_proto_keys(struct net_device *dev,
+ struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
const char aes_gcm_name[] = "rfc4106(gcm(aes))";
- struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -129,17 +129,16 @@ static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
return 0;
}
-static int nsim_ipsec_add_sa(struct xfrm_state *xs,
+static int nsim_ipsec_add_sa(struct net_device *dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
struct nsim_ipsec *ipsec;
- struct net_device *dev;
struct netdevsim *ns;
struct nsim_sa sa;
u16 sa_idx;
int ret;
- dev = xs->xso.real_dev;
ns = netdev_priv(dev);
ipsec = &ns->ipsec;
@@ -174,7 +173,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs,
sa.crypt = xs->ealg || xs->aead;
/* get the key and salt */
- ret = nsim_ipsec_parse_proto_keys(xs, sa.key, &sa.salt);
+ ret = nsim_ipsec_parse_proto_keys(dev, xs, sa.key, &sa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for SA table");
return ret;
@@ -200,9 +199,9 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs,
return 0;
}
-static void nsim_ipsec_del_sa(struct xfrm_state *xs)
+static void nsim_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs)
{
- struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
+ struct netdevsim *ns = netdev_priv(dev);
struct nsim_ipsec *ipsec = &ns->ipsec;
u16 sa_idx;
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 0e0321a7ddd7..fa5fbd97ad69 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -29,6 +29,7 @@
#include <net/pkt_cls.h>
#include <net/rtnetlink.h>
#include <net/udp_tunnel.h>
+#include <net/busy_poll.h>
#include "netdevsim.h"
@@ -357,6 +358,7 @@ static int nsim_rcv(struct nsim_rq *rq, int budget)
break;
skb = skb_dequeue(&rq->skb_queue);
+ skb_mark_napi_id(skb, &rq->napi);
netif_receive_skb(skb);
}
@@ -369,7 +371,8 @@ static int nsim_poll(struct napi_struct *napi, int budget)
int done;
done = nsim_rcv(rq, budget);
- napi_complete(napi);
+ if (done < budget)
+ napi_complete_done(napi, done);
return done;
}
@@ -879,11 +882,13 @@ static void nsim_setup(struct net_device *dev)
NETIF_F_SG |
NETIF_F_FRAGLIST |
NETIF_F_HW_CSUM |
+ NETIF_F_LRO |
NETIF_F_TSO;
dev->hw_features |= NETIF_F_HW_TC |
NETIF_F_SG |
NETIF_F_FRAGLIST |
NETIF_F_HW_CSUM |
+ NETIF_F_LRO |
NETIF_F_TSO;
dev->max_mtu = ETH_MAX_MTU;
dev->xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD;
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index ef4204638392..fbeae05817e9 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -229,7 +229,7 @@ err:
static void ntb_netdev_tx_timer(struct timer_list *t)
{
- struct ntb_netdev *dev = from_timer(dev, t, tx_timer);
+ struct ntb_netdev *dev = timer_container_of(dev, t, tx_timer);
struct net_device *ndev = dev->ndev;
if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
diff --git a/drivers/net/ovpn/Makefile b/drivers/net/ovpn/Makefile
new file mode 100644
index 000000000000..229be66167e1
--- /dev/null
+++ b/drivers/net/ovpn/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# ovpn -- OpenVPN data channel offload in kernel space
+#
+# Copyright (C) 2020-2025 OpenVPN, Inc.
+#
+# Author: Antonio Quartulli <antonio@openvpn.net>
+
+obj-$(CONFIG_OVPN) := ovpn.o
+ovpn-y += bind.o
+ovpn-y += crypto.o
+ovpn-y += crypto_aead.o
+ovpn-y += main.o
+ovpn-y += io.o
+ovpn-y += netlink.o
+ovpn-y += netlink-gen.o
+ovpn-y += peer.o
+ovpn-y += pktid.o
+ovpn-y += socket.o
+ovpn-y += stats.o
+ovpn-y += tcp.o
+ovpn-y += udp.o
diff --git a/drivers/net/ovpn/bind.c b/drivers/net/ovpn/bind.c
new file mode 100644
index 000000000000..24d2788a277e
--- /dev/null
+++ b/drivers/net/ovpn/bind.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2012-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/socket.h>
+
+#include "ovpnpriv.h"
+#include "bind.h"
+#include "peer.h"
+
+/**
+ * ovpn_bind_from_sockaddr - retrieve binding matching sockaddr
+ * @ss: the sockaddr to match
+ *
+ * Return: the bind matching the passed sockaddr if found, NULL otherwise
+ */
+struct ovpn_bind *ovpn_bind_from_sockaddr(const struct sockaddr_storage *ss)
+{
+ struct ovpn_bind *bind;
+ size_t sa_len;
+
+ if (ss->ss_family == AF_INET)
+ sa_len = sizeof(struct sockaddr_in);
+ else if (ss->ss_family == AF_INET6)
+ sa_len = sizeof(struct sockaddr_in6);
+ else
+ return ERR_PTR(-EAFNOSUPPORT);
+
+ bind = kzalloc(sizeof(*bind), GFP_ATOMIC);
+ if (unlikely(!bind))
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&bind->remote, ss, sa_len);
+
+ return bind;
+}
+
+/**
+ * ovpn_bind_reset - assign new binding to peer
+ * @peer: the peer whose binding has to be replaced
+ * @new: the new bind to assign
+ */
+void ovpn_bind_reset(struct ovpn_peer *peer, struct ovpn_bind *new)
+{
+ lockdep_assert_held(&peer->lock);
+
+ kfree_rcu(rcu_replace_pointer(peer->bind, new,
+ lockdep_is_held(&peer->lock)), rcu);
+}
diff --git a/drivers/net/ovpn/bind.h b/drivers/net/ovpn/bind.h
new file mode 100644
index 000000000000..4e0b8398bfd9
--- /dev/null
+++ b/drivers/net/ovpn/bind.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2012-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNBIND_H_
+#define _NET_OVPN_OVPNBIND_H_
+
+#include <net/ip.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+struct ovpn_peer;
+
+/**
+ * union ovpn_sockaddr - basic transport layer address
+ * @in4: IPv4 address
+ * @in6: IPv6 address
+ */
+union ovpn_sockaddr {
+ struct sockaddr_in in4;
+ struct sockaddr_in6 in6;
+};
+
+/**
+ * struct ovpn_bind - remote peer binding
+ * @remote: the remote peer sockaddress
+ * @local: local endpoint used to talk to the peer
+ * @local.ipv4: local IPv4 used to talk to the peer
+ * @local.ipv6: local IPv6 used to talk to the peer
+ * @rcu: used to schedule RCU cleanup job
+ */
+struct ovpn_bind {
+ union ovpn_sockaddr remote; /* remote sockaddr */
+
+ union {
+ struct in_addr ipv4;
+ struct in6_addr ipv6;
+ } local;
+
+ struct rcu_head rcu;
+};
+
+/**
+ * ovpn_bind_skb_src_match - match packet source with binding
+ * @bind: the binding to match
+ * @skb: the packet to match
+ *
+ * Return: true if the packet source matches the remote peer sockaddr
+ * in the binding
+ */
+static inline bool ovpn_bind_skb_src_match(const struct ovpn_bind *bind,
+ const struct sk_buff *skb)
+{
+ const union ovpn_sockaddr *remote;
+
+ if (unlikely(!bind))
+ return false;
+
+ remote = &bind->remote;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ if (unlikely(remote->in4.sin_family != AF_INET))
+ return false;
+
+ if (unlikely(remote->in4.sin_addr.s_addr != ip_hdr(skb)->saddr))
+ return false;
+
+ if (unlikely(remote->in4.sin_port != udp_hdr(skb)->source))
+ return false;
+ break;
+ case htons(ETH_P_IPV6):
+ if (unlikely(remote->in6.sin6_family != AF_INET6))
+ return false;
+
+ if (unlikely(!ipv6_addr_equal(&remote->in6.sin6_addr,
+ &ipv6_hdr(skb)->saddr)))
+ return false;
+
+ if (unlikely(remote->in6.sin6_port != udp_hdr(skb)->source))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+struct ovpn_bind *ovpn_bind_from_sockaddr(const struct sockaddr_storage *sa);
+void ovpn_bind_reset(struct ovpn_peer *peer, struct ovpn_bind *bind);
+
+#endif /* _NET_OVPN_OVPNBIND_H_ */
diff --git a/drivers/net/ovpn/crypto.c b/drivers/net/ovpn/crypto.c
new file mode 100644
index 000000000000..90580e32052f
--- /dev/null
+++ b/drivers/net/ovpn/crypto.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <uapi/linux/ovpn.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "pktid.h"
+#include "crypto_aead.h"
+#include "crypto.h"
+
+static void ovpn_ks_destroy_rcu(struct rcu_head *head)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ ks = container_of(head, struct ovpn_crypto_key_slot, rcu);
+ ovpn_aead_crypto_key_slot_destroy(ks);
+}
+
+void ovpn_crypto_key_slot_release(struct kref *kref)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ ks = container_of(kref, struct ovpn_crypto_key_slot, refcount);
+ call_rcu(&ks->rcu, ovpn_ks_destroy_rcu);
+}
+
+/* can only be invoked when all peer references have been dropped (i.e. RCU
+ * release routine)
+ */
+void ovpn_crypto_state_release(struct ovpn_crypto_state *cs)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ ks = rcu_access_pointer(cs->slots[0]);
+ if (ks) {
+ RCU_INIT_POINTER(cs->slots[0], NULL);
+ ovpn_crypto_key_slot_put(ks);
+ }
+
+ ks = rcu_access_pointer(cs->slots[1]);
+ if (ks) {
+ RCU_INIT_POINTER(cs->slots[1], NULL);
+ ovpn_crypto_key_slot_put(ks);
+ }
+}
+
+/* removes the key matching the specified id from the crypto context */
+bool ovpn_crypto_kill_key(struct ovpn_crypto_state *cs, u8 key_id)
+{
+ struct ovpn_crypto_key_slot *ks = NULL;
+
+ spin_lock_bh(&cs->lock);
+ if (rcu_access_pointer(cs->slots[0])->key_id == key_id) {
+ ks = rcu_replace_pointer(cs->slots[0], NULL,
+ lockdep_is_held(&cs->lock));
+ } else if (rcu_access_pointer(cs->slots[1])->key_id == key_id) {
+ ks = rcu_replace_pointer(cs->slots[1], NULL,
+ lockdep_is_held(&cs->lock));
+ }
+ spin_unlock_bh(&cs->lock);
+
+ if (ks)
+ ovpn_crypto_key_slot_put(ks);
+
+ /* let the caller know if a key was actually killed */
+ return ks;
+}
+
+/* Reset the ovpn_crypto_state object in a way that is atomic
+ * to RCU readers.
+ */
+int ovpn_crypto_state_reset(struct ovpn_crypto_state *cs,
+ const struct ovpn_peer_key_reset *pkr)
+{
+ struct ovpn_crypto_key_slot *old = NULL, *new;
+ u8 idx;
+
+ if (pkr->slot != OVPN_KEY_SLOT_PRIMARY &&
+ pkr->slot != OVPN_KEY_SLOT_SECONDARY)
+ return -EINVAL;
+
+ new = ovpn_aead_crypto_key_slot_new(&pkr->key);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+
+ spin_lock_bh(&cs->lock);
+ idx = cs->primary_idx;
+ switch (pkr->slot) {
+ case OVPN_KEY_SLOT_PRIMARY:
+ old = rcu_replace_pointer(cs->slots[idx], new,
+ lockdep_is_held(&cs->lock));
+ break;
+ case OVPN_KEY_SLOT_SECONDARY:
+ old = rcu_replace_pointer(cs->slots[!idx], new,
+ lockdep_is_held(&cs->lock));
+ break;
+ }
+ spin_unlock_bh(&cs->lock);
+
+ if (old)
+ ovpn_crypto_key_slot_put(old);
+
+ return 0;
+}
+
+void ovpn_crypto_key_slot_delete(struct ovpn_crypto_state *cs,
+ enum ovpn_key_slot slot)
+{
+ struct ovpn_crypto_key_slot *ks = NULL;
+ u8 idx;
+
+ if (slot != OVPN_KEY_SLOT_PRIMARY &&
+ slot != OVPN_KEY_SLOT_SECONDARY) {
+ pr_warn("Invalid slot to release: %u\n", slot);
+ return;
+ }
+
+ spin_lock_bh(&cs->lock);
+ idx = cs->primary_idx;
+ switch (slot) {
+ case OVPN_KEY_SLOT_PRIMARY:
+ ks = rcu_replace_pointer(cs->slots[idx], NULL,
+ lockdep_is_held(&cs->lock));
+ break;
+ case OVPN_KEY_SLOT_SECONDARY:
+ ks = rcu_replace_pointer(cs->slots[!idx], NULL,
+ lockdep_is_held(&cs->lock));
+ break;
+ }
+ spin_unlock_bh(&cs->lock);
+
+ if (!ks) {
+ pr_debug("Key slot already released: %u\n", slot);
+ return;
+ }
+
+ pr_debug("deleting key slot %u, key_id=%u\n", slot, ks->key_id);
+ ovpn_crypto_key_slot_put(ks);
+}
+
+void ovpn_crypto_key_slots_swap(struct ovpn_crypto_state *cs)
+{
+ const struct ovpn_crypto_key_slot *old_primary, *old_secondary;
+ u8 idx;
+
+ spin_lock_bh(&cs->lock);
+ idx = cs->primary_idx;
+ old_primary = rcu_dereference_protected(cs->slots[idx],
+ lockdep_is_held(&cs->lock));
+ old_secondary = rcu_dereference_protected(cs->slots[!idx],
+ lockdep_is_held(&cs->lock));
+ /* perform real swap by switching the index of the primary key */
+ WRITE_ONCE(cs->primary_idx, !cs->primary_idx);
+
+ pr_debug("key swapped: (old primary) %d <-> (new primary) %d\n",
+ old_primary ? old_primary->key_id : -1,
+ old_secondary ? old_secondary->key_id : -1);
+
+ spin_unlock_bh(&cs->lock);
+}
+
+/**
+ * ovpn_crypto_config_get - populate keyconf object with non-sensible key data
+ * @cs: the crypto state to extract the key data from
+ * @slot: the specific slot to inspect
+ * @keyconf: the output object to populate
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_crypto_config_get(struct ovpn_crypto_state *cs,
+ enum ovpn_key_slot slot,
+ struct ovpn_key_config *keyconf)
+{
+ struct ovpn_crypto_key_slot *ks;
+ int idx;
+
+ switch (slot) {
+ case OVPN_KEY_SLOT_PRIMARY:
+ idx = cs->primary_idx;
+ break;
+ case OVPN_KEY_SLOT_SECONDARY:
+ idx = !cs->primary_idx;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+ ks = rcu_dereference(cs->slots[idx]);
+ if (!ks) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ keyconf->cipher_alg = ovpn_aead_crypto_alg(ks);
+ keyconf->key_id = ks->key_id;
+ rcu_read_unlock();
+
+ return 0;
+}
diff --git a/drivers/net/ovpn/crypto.h b/drivers/net/ovpn/crypto.h
new file mode 100644
index 000000000000..0e284fec3a75
--- /dev/null
+++ b/drivers/net/ovpn/crypto.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNCRYPTO_H_
+#define _NET_OVPN_OVPNCRYPTO_H_
+
+#include "pktid.h"
+#include "proto.h"
+
+/* info needed for both encrypt and decrypt directions */
+struct ovpn_key_direction {
+ const u8 *cipher_key;
+ size_t cipher_key_size;
+ const u8 *nonce_tail; /* only needed for GCM modes */
+ size_t nonce_tail_size; /* only needed for GCM modes */
+};
+
+/* all info for a particular symmetric key (primary or secondary) */
+struct ovpn_key_config {
+ enum ovpn_cipher_alg cipher_alg;
+ u8 key_id;
+ struct ovpn_key_direction encrypt;
+ struct ovpn_key_direction decrypt;
+};
+
+/* used to pass settings from netlink to the crypto engine */
+struct ovpn_peer_key_reset {
+ enum ovpn_key_slot slot;
+ struct ovpn_key_config key;
+};
+
+struct ovpn_crypto_key_slot {
+ u8 key_id;
+
+ struct crypto_aead *encrypt;
+ struct crypto_aead *decrypt;
+ u8 nonce_tail_xmit[OVPN_NONCE_TAIL_SIZE];
+ u8 nonce_tail_recv[OVPN_NONCE_TAIL_SIZE];
+
+ struct ovpn_pktid_recv pid_recv ____cacheline_aligned_in_smp;
+ struct ovpn_pktid_xmit pid_xmit ____cacheline_aligned_in_smp;
+ struct kref refcount;
+ struct rcu_head rcu;
+};
+
+struct ovpn_crypto_state {
+ struct ovpn_crypto_key_slot __rcu *slots[2];
+ u8 primary_idx;
+
+ /* protects primary and secondary slots */
+ spinlock_t lock;
+};
+
+static inline bool ovpn_crypto_key_slot_hold(struct ovpn_crypto_key_slot *ks)
+{
+ return kref_get_unless_zero(&ks->refcount);
+}
+
+static inline void ovpn_crypto_state_init(struct ovpn_crypto_state *cs)
+{
+ RCU_INIT_POINTER(cs->slots[0], NULL);
+ RCU_INIT_POINTER(cs->slots[1], NULL);
+ cs->primary_idx = 0;
+ spin_lock_init(&cs->lock);
+}
+
+static inline struct ovpn_crypto_key_slot *
+ovpn_crypto_key_id_to_slot(const struct ovpn_crypto_state *cs, u8 key_id)
+{
+ struct ovpn_crypto_key_slot *ks;
+ u8 idx;
+
+ if (unlikely(!cs))
+ return NULL;
+
+ rcu_read_lock();
+ idx = READ_ONCE(cs->primary_idx);
+ ks = rcu_dereference(cs->slots[idx]);
+ if (ks && ks->key_id == key_id) {
+ if (unlikely(!ovpn_crypto_key_slot_hold(ks)))
+ ks = NULL;
+ goto out;
+ }
+
+ ks = rcu_dereference(cs->slots[!idx]);
+ if (ks && ks->key_id == key_id) {
+ if (unlikely(!ovpn_crypto_key_slot_hold(ks)))
+ ks = NULL;
+ goto out;
+ }
+
+ /* when both key slots are occupied but no matching key ID is found, ks
+ * has to be reset to NULL to avoid carrying a stale pointer
+ */
+ ks = NULL;
+out:
+ rcu_read_unlock();
+
+ return ks;
+}
+
+static inline struct ovpn_crypto_key_slot *
+ovpn_crypto_key_slot_primary(const struct ovpn_crypto_state *cs)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ rcu_read_lock();
+ ks = rcu_dereference(cs->slots[cs->primary_idx]);
+ if (unlikely(ks && !ovpn_crypto_key_slot_hold(ks)))
+ ks = NULL;
+ rcu_read_unlock();
+
+ return ks;
+}
+
+void ovpn_crypto_key_slot_release(struct kref *kref);
+
+static inline void ovpn_crypto_key_slot_put(struct ovpn_crypto_key_slot *ks)
+{
+ kref_put(&ks->refcount, ovpn_crypto_key_slot_release);
+}
+
+int ovpn_crypto_state_reset(struct ovpn_crypto_state *cs,
+ const struct ovpn_peer_key_reset *pkr);
+
+void ovpn_crypto_key_slot_delete(struct ovpn_crypto_state *cs,
+ enum ovpn_key_slot slot);
+
+void ovpn_crypto_state_release(struct ovpn_crypto_state *cs);
+
+void ovpn_crypto_key_slots_swap(struct ovpn_crypto_state *cs);
+
+int ovpn_crypto_config_get(struct ovpn_crypto_state *cs,
+ enum ovpn_key_slot slot,
+ struct ovpn_key_config *keyconf);
+
+bool ovpn_crypto_kill_key(struct ovpn_crypto_state *cs, u8 key_id);
+
+#endif /* _NET_OVPN_OVPNCRYPTO_H_ */
diff --git a/drivers/net/ovpn/crypto_aead.c b/drivers/net/ovpn/crypto_aead.c
new file mode 100644
index 000000000000..2cca759feffa
--- /dev/null
+++ b/drivers/net/ovpn/crypto_aead.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <crypto/aead.h>
+#include <linux/skbuff.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "io.h"
+#include "pktid.h"
+#include "crypto_aead.h"
+#include "crypto.h"
+#include "peer.h"
+#include "proto.h"
+#include "skb.h"
+
+#define OVPN_AUTH_TAG_SIZE 16
+#define OVPN_AAD_SIZE (OVPN_OPCODE_SIZE + OVPN_NONCE_WIRE_SIZE)
+
+#define ALG_NAME_AES "gcm(aes)"
+#define ALG_NAME_CHACHAPOLY "rfc7539(chacha20,poly1305)"
+
+static int ovpn_aead_encap_overhead(const struct ovpn_crypto_key_slot *ks)
+{
+ return OVPN_OPCODE_SIZE + /* OP header size */
+ sizeof(u32) + /* Packet ID */
+ crypto_aead_authsize(ks->encrypt); /* Auth Tag */
+}
+
+int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
+ struct sk_buff *skb)
+{
+ const unsigned int tag_size = crypto_aead_authsize(ks->encrypt);
+ struct aead_request *req;
+ struct sk_buff *trailer;
+ struct scatterlist *sg;
+ int nfrags, ret;
+ u32 pktid, op;
+ u8 *iv;
+
+ ovpn_skb_cb(skb)->peer = peer;
+ ovpn_skb_cb(skb)->ks = ks;
+
+ /* Sample AEAD header format:
+ * 48000001 00000005 7e7046bd 444a7e28 cc6387b1 64a4d6c1 380275a...
+ * [ OP32 ] [seq # ] [ auth tag ] [ payload ... ]
+ * [4-byte
+ * IV head]
+ */
+
+ /* check that there's enough headroom in the skb for packet
+ * encapsulation
+ */
+ if (unlikely(skb_cow_head(skb, OVPN_HEAD_ROOM)))
+ return -ENOBUFS;
+
+ /* get number of skb frags and ensure that packet data is writable */
+ nfrags = skb_cow_data(skb, 0, &trailer);
+ if (unlikely(nfrags < 0))
+ return nfrags;
+
+ if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2)))
+ return -ENOSPC;
+
+ /* sg may be required by async crypto */
+ ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) *
+ (nfrags + 2), GFP_ATOMIC);
+ if (unlikely(!ovpn_skb_cb(skb)->sg))
+ return -ENOMEM;
+
+ sg = ovpn_skb_cb(skb)->sg;
+
+ /* sg table:
+ * 0: op, wire nonce (AD, len=OVPN_OP_SIZE_V2+OVPN_NONCE_WIRE_SIZE),
+ * 1, 2, 3, ..., n: payload,
+ * n+1: auth_tag (len=tag_size)
+ */
+ sg_init_table(sg, nfrags + 2);
+
+ /* build scatterlist to encrypt packet payload */
+ ret = skb_to_sgvec_nomark(skb, sg + 1, 0, skb->len);
+ if (unlikely(ret < 0)) {
+ netdev_err(peer->ovpn->dev,
+ "encrypt: cannot map skb to sg: %d\n", ret);
+ return ret;
+ }
+
+ /* append auth_tag onto scatterlist */
+ __skb_push(skb, tag_size);
+ sg_set_buf(sg + ret + 1, skb->data, tag_size);
+
+ /* obtain packet ID, which is used both as a first
+ * 4 bytes of nonce and last 4 bytes of associated data.
+ */
+ ret = ovpn_pktid_xmit_next(&ks->pid_xmit, &pktid);
+ if (unlikely(ret < 0))
+ return ret;
+
+ /* iv may be required by async crypto */
+ ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC);
+ if (unlikely(!ovpn_skb_cb(skb)->iv))
+ return -ENOMEM;
+
+ iv = ovpn_skb_cb(skb)->iv;
+
+ /* concat 4 bytes packet id and 8 bytes nonce tail into 12 bytes
+ * nonce
+ */
+ ovpn_pktid_aead_write(pktid, ks->nonce_tail_xmit, iv);
+
+ /* make space for packet id and push it to the front */
+ __skb_push(skb, OVPN_NONCE_WIRE_SIZE);
+ memcpy(skb->data, iv, OVPN_NONCE_WIRE_SIZE);
+
+ /* add packet op as head of additional data */
+ op = ovpn_opcode_compose(OVPN_DATA_V2, ks->key_id, peer->id);
+ __skb_push(skb, OVPN_OPCODE_SIZE);
+ BUILD_BUG_ON(sizeof(op) != OVPN_OPCODE_SIZE);
+ *((__force __be32 *)skb->data) = htonl(op);
+
+ /* AEAD Additional data */
+ sg_set_buf(sg, skb->data, OVPN_AAD_SIZE);
+
+ req = aead_request_alloc(ks->encrypt, GFP_ATOMIC);
+ if (unlikely(!req))
+ return -ENOMEM;
+
+ ovpn_skb_cb(skb)->req = req;
+
+ /* setup async crypto operation */
+ aead_request_set_tfm(req, ks->encrypt);
+ aead_request_set_callback(req, 0, ovpn_encrypt_post, skb);
+ aead_request_set_crypt(req, sg, sg,
+ skb->len - ovpn_aead_encap_overhead(ks), iv);
+ aead_request_set_ad(req, OVPN_AAD_SIZE);
+
+ /* encrypt it */
+ return crypto_aead_encrypt(req);
+}
+
+int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
+ struct sk_buff *skb)
+{
+ const unsigned int tag_size = crypto_aead_authsize(ks->decrypt);
+ int ret, payload_len, nfrags;
+ unsigned int payload_offset;
+ struct aead_request *req;
+ struct sk_buff *trailer;
+ struct scatterlist *sg;
+ u8 *iv;
+
+ payload_offset = OVPN_AAD_SIZE + tag_size;
+ payload_len = skb->len - payload_offset;
+
+ ovpn_skb_cb(skb)->payload_offset = payload_offset;
+ ovpn_skb_cb(skb)->peer = peer;
+ ovpn_skb_cb(skb)->ks = ks;
+
+ /* sanity check on packet size, payload size must be >= 0 */
+ if (unlikely(payload_len < 0))
+ return -EINVAL;
+
+ /* Prepare the skb data buffer to be accessed up until the auth tag.
+ * This is required because this area is directly mapped into the sg
+ * list.
+ */
+ if (unlikely(!pskb_may_pull(skb, payload_offset)))
+ return -ENODATA;
+
+ /* get number of skb frags and ensure that packet data is writable */
+ nfrags = skb_cow_data(skb, 0, &trailer);
+ if (unlikely(nfrags < 0))
+ return nfrags;
+
+ if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2)))
+ return -ENOSPC;
+
+ /* sg may be required by async crypto */
+ ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) *
+ (nfrags + 2), GFP_ATOMIC);
+ if (unlikely(!ovpn_skb_cb(skb)->sg))
+ return -ENOMEM;
+
+ sg = ovpn_skb_cb(skb)->sg;
+
+ /* sg table:
+ * 0: op, wire nonce (AD, len=OVPN_OPCODE_SIZE+OVPN_NONCE_WIRE_SIZE),
+ * 1, 2, 3, ..., n: payload,
+ * n+1: auth_tag (len=tag_size)
+ */
+ sg_init_table(sg, nfrags + 2);
+
+ /* packet op is head of additional data */
+ sg_set_buf(sg, skb->data, OVPN_AAD_SIZE);
+
+ /* build scatterlist to decrypt packet payload */
+ ret = skb_to_sgvec_nomark(skb, sg + 1, payload_offset, payload_len);
+ if (unlikely(ret < 0)) {
+ netdev_err(peer->ovpn->dev,
+ "decrypt: cannot map skb to sg: %d\n", ret);
+ return ret;
+ }
+
+ /* append auth_tag onto scatterlist */
+ sg_set_buf(sg + ret + 1, skb->data + OVPN_AAD_SIZE, tag_size);
+
+ /* iv may be required by async crypto */
+ ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC);
+ if (unlikely(!ovpn_skb_cb(skb)->iv))
+ return -ENOMEM;
+
+ iv = ovpn_skb_cb(skb)->iv;
+
+ /* copy nonce into IV buffer */
+ memcpy(iv, skb->data + OVPN_OPCODE_SIZE, OVPN_NONCE_WIRE_SIZE);
+ memcpy(iv + OVPN_NONCE_WIRE_SIZE, ks->nonce_tail_recv,
+ OVPN_NONCE_TAIL_SIZE);
+
+ req = aead_request_alloc(ks->decrypt, GFP_ATOMIC);
+ if (unlikely(!req))
+ return -ENOMEM;
+
+ ovpn_skb_cb(skb)->req = req;
+
+ /* setup async crypto operation */
+ aead_request_set_tfm(req, ks->decrypt);
+ aead_request_set_callback(req, 0, ovpn_decrypt_post, skb);
+ aead_request_set_crypt(req, sg, sg, payload_len + tag_size, iv);
+
+ aead_request_set_ad(req, OVPN_AAD_SIZE);
+
+ /* decrypt it */
+ return crypto_aead_decrypt(req);
+}
+
+/* Initialize a struct crypto_aead object */
+static struct crypto_aead *ovpn_aead_init(const char *title,
+ const char *alg_name,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct crypto_aead *aead;
+ int ret;
+
+ aead = crypto_alloc_aead(alg_name, 0, 0);
+ if (IS_ERR(aead)) {
+ ret = PTR_ERR(aead);
+ pr_err("%s crypto_alloc_aead failed, err=%d\n", title, ret);
+ aead = NULL;
+ goto error;
+ }
+
+ ret = crypto_aead_setkey(aead, key, keylen);
+ if (ret) {
+ pr_err("%s crypto_aead_setkey size=%u failed, err=%d\n", title,
+ keylen, ret);
+ goto error;
+ }
+
+ ret = crypto_aead_setauthsize(aead, OVPN_AUTH_TAG_SIZE);
+ if (ret) {
+ pr_err("%s crypto_aead_setauthsize failed, err=%d\n", title,
+ ret);
+ goto error;
+ }
+
+ /* basic AEAD assumption */
+ if (crypto_aead_ivsize(aead) != OVPN_NONCE_SIZE) {
+ pr_err("%s IV size must be %d\n", title, OVPN_NONCE_SIZE);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ pr_debug("********* Cipher %s (%s)\n", alg_name, title);
+ pr_debug("*** IV size=%u\n", crypto_aead_ivsize(aead));
+ pr_debug("*** req size=%u\n", crypto_aead_reqsize(aead));
+ pr_debug("*** block size=%u\n", crypto_aead_blocksize(aead));
+ pr_debug("*** auth size=%u\n", crypto_aead_authsize(aead));
+ pr_debug("*** alignmask=0x%x\n", crypto_aead_alignmask(aead));
+
+ return aead;
+
+error:
+ crypto_free_aead(aead);
+ return ERR_PTR(ret);
+}
+
+void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks)
+{
+ if (!ks)
+ return;
+
+ crypto_free_aead(ks->encrypt);
+ crypto_free_aead(ks->decrypt);
+ kfree(ks);
+}
+
+struct ovpn_crypto_key_slot *
+ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc)
+{
+ struct ovpn_crypto_key_slot *ks = NULL;
+ const char *alg_name;
+ int ret;
+
+ /* validate crypto alg */
+ switch (kc->cipher_alg) {
+ case OVPN_CIPHER_ALG_AES_GCM:
+ alg_name = ALG_NAME_AES;
+ break;
+ case OVPN_CIPHER_ALG_CHACHA20_POLY1305:
+ alg_name = ALG_NAME_CHACHAPOLY;
+ break;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (kc->encrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE ||
+ kc->decrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ /* build the key slot */
+ ks = kmalloc(sizeof(*ks), GFP_KERNEL);
+ if (!ks)
+ return ERR_PTR(-ENOMEM);
+
+ ks->encrypt = NULL;
+ ks->decrypt = NULL;
+ kref_init(&ks->refcount);
+ ks->key_id = kc->key_id;
+
+ ks->encrypt = ovpn_aead_init("encrypt", alg_name,
+ kc->encrypt.cipher_key,
+ kc->encrypt.cipher_key_size);
+ if (IS_ERR(ks->encrypt)) {
+ ret = PTR_ERR(ks->encrypt);
+ ks->encrypt = NULL;
+ goto destroy_ks;
+ }
+
+ ks->decrypt = ovpn_aead_init("decrypt", alg_name,
+ kc->decrypt.cipher_key,
+ kc->decrypt.cipher_key_size);
+ if (IS_ERR(ks->decrypt)) {
+ ret = PTR_ERR(ks->decrypt);
+ ks->decrypt = NULL;
+ goto destroy_ks;
+ }
+
+ memcpy(ks->nonce_tail_xmit, kc->encrypt.nonce_tail,
+ OVPN_NONCE_TAIL_SIZE);
+ memcpy(ks->nonce_tail_recv, kc->decrypt.nonce_tail,
+ OVPN_NONCE_TAIL_SIZE);
+
+ /* init packet ID generation/validation */
+ ovpn_pktid_xmit_init(&ks->pid_xmit);
+ ovpn_pktid_recv_init(&ks->pid_recv);
+
+ return ks;
+
+destroy_ks:
+ ovpn_aead_crypto_key_slot_destroy(ks);
+ return ERR_PTR(ret);
+}
+
+enum ovpn_cipher_alg ovpn_aead_crypto_alg(struct ovpn_crypto_key_slot *ks)
+{
+ const char *alg_name;
+
+ if (!ks->encrypt)
+ return OVPN_CIPHER_ALG_NONE;
+
+ alg_name = crypto_tfm_alg_name(crypto_aead_tfm(ks->encrypt));
+
+ if (!strcmp(alg_name, ALG_NAME_AES))
+ return OVPN_CIPHER_ALG_AES_GCM;
+ else if (!strcmp(alg_name, ALG_NAME_CHACHAPOLY))
+ return OVPN_CIPHER_ALG_CHACHA20_POLY1305;
+ else
+ return OVPN_CIPHER_ALG_NONE;
+}
diff --git a/drivers/net/ovpn/crypto_aead.h b/drivers/net/ovpn/crypto_aead.h
new file mode 100644
index 000000000000..65a2ff307898
--- /dev/null
+++ b/drivers/net/ovpn/crypto_aead.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNAEAD_H_
+#define _NET_OVPN_OVPNAEAD_H_
+
+#include "crypto.h"
+
+#include <asm/types.h>
+#include <linux/skbuff.h>
+
+int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
+ struct sk_buff *skb);
+int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
+ struct sk_buff *skb);
+
+struct ovpn_crypto_key_slot *
+ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc);
+void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks);
+
+enum ovpn_cipher_alg ovpn_aead_crypto_alg(struct ovpn_crypto_key_slot *ks);
+
+#endif /* _NET_OVPN_OVPNAEAD_H_ */
diff --git a/drivers/net/ovpn/io.c b/drivers/net/ovpn/io.c
new file mode 100644
index 000000000000..ebf1e849506b
--- /dev/null
+++ b/drivers/net/ovpn/io.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <crypto/aead.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <net/gro_cells.h>
+#include <net/gso.h>
+#include <net/ip.h>
+
+#include "ovpnpriv.h"
+#include "peer.h"
+#include "io.h"
+#include "bind.h"
+#include "crypto.h"
+#include "crypto_aead.h"
+#include "netlink.h"
+#include "proto.h"
+#include "tcp.h"
+#include "udp.h"
+#include "skb.h"
+#include "socket.h"
+
+const unsigned char ovpn_keepalive_message[OVPN_KEEPALIVE_SIZE] = {
+ 0x2a, 0x18, 0x7b, 0xf3, 0x64, 0x1e, 0xb4, 0xcb,
+ 0x07, 0xed, 0x2d, 0x0a, 0x98, 0x1f, 0xc7, 0x48
+};
+
+/**
+ * ovpn_is_keepalive - check if skb contains a keepalive message
+ * @skb: packet to check
+ *
+ * Assumes that the first byte of skb->data is defined.
+ *
+ * Return: true if skb contains a keepalive or false otherwise
+ */
+static bool ovpn_is_keepalive(struct sk_buff *skb)
+{
+ if (*skb->data != ovpn_keepalive_message[0])
+ return false;
+
+ if (skb->len != OVPN_KEEPALIVE_SIZE)
+ return false;
+
+ if (!pskb_may_pull(skb, OVPN_KEEPALIVE_SIZE))
+ return false;
+
+ return !memcmp(skb->data, ovpn_keepalive_message, OVPN_KEEPALIVE_SIZE);
+}
+
+/* Called after decrypt to write the IP packet to the device.
+ * This method is expected to manage/free the skb.
+ */
+static void ovpn_netdev_write(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+ unsigned int pkt_len;
+ int ret;
+
+ /* we can't guarantee the packet wasn't corrupted before entering the
+ * VPN, therefore we give other layers a chance to check that
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* skb hash for transport packet no longer valid after decapsulation */
+ skb_clear_hash(skb);
+
+ /* post-decrypt scrub -- prepare to inject encapsulated packet onto the
+ * interface, based on __skb_tunnel_rx() in dst.h
+ */
+ skb->dev = peer->ovpn->dev;
+ skb_set_queue_mapping(skb, 0);
+ skb_scrub_packet(skb, true);
+
+ /* network header reset in ovpn_decrypt_post() */
+ skb_reset_transport_header(skb);
+ skb_reset_inner_headers(skb);
+
+ /* cause packet to be "received" by the interface */
+ pkt_len = skb->len;
+ ret = gro_cells_receive(&peer->ovpn->gro_cells, skb);
+ if (likely(ret == NET_RX_SUCCESS)) {
+ /* update RX stats with the size of decrypted packet */
+ ovpn_peer_stats_increment_rx(&peer->vpn_stats, pkt_len);
+ dev_dstats_rx_add(peer->ovpn->dev, pkt_len);
+ }
+}
+
+void ovpn_decrypt_post(void *data, int ret)
+{
+ struct ovpn_crypto_key_slot *ks;
+ unsigned int payload_offset = 0;
+ struct sk_buff *skb = data;
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ __be16 proto;
+ __be32 *pid;
+
+ /* crypto is happening asynchronously. this function will be called
+ * again later by the crypto callback with a proper return code
+ */
+ if (unlikely(ret == -EINPROGRESS))
+ return;
+
+ payload_offset = ovpn_skb_cb(skb)->payload_offset;
+ ks = ovpn_skb_cb(skb)->ks;
+ peer = ovpn_skb_cb(skb)->peer;
+
+ /* crypto is done, cleanup skb CB and its members */
+ kfree(ovpn_skb_cb(skb)->iv);
+ kfree(ovpn_skb_cb(skb)->sg);
+ aead_request_free(ovpn_skb_cb(skb)->req);
+
+ if (unlikely(ret < 0))
+ goto drop;
+
+ /* PID sits after the op */
+ pid = (__force __be32 *)(skb->data + OVPN_OPCODE_SIZE);
+ ret = ovpn_pktid_recv(&ks->pid_recv, ntohl(*pid), 0);
+ if (unlikely(ret < 0)) {
+ net_err_ratelimited("%s: PKT ID RX error for peer %u: %d\n",
+ netdev_name(peer->ovpn->dev), peer->id,
+ ret);
+ goto drop;
+ }
+
+ /* keep track of last received authenticated packet for keepalive */
+ WRITE_ONCE(peer->last_recv, ktime_get_real_seconds());
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (sock && sock->sk->sk_protocol == IPPROTO_UDP)
+ /* check if this peer changed local or remote endpoint */
+ ovpn_peer_endpoints_update(peer, skb);
+ rcu_read_unlock();
+
+ /* point to encapsulated IP packet */
+ __skb_pull(skb, payload_offset);
+
+ /* check if this is a valid datapacket that has to be delivered to the
+ * ovpn interface
+ */
+ skb_reset_network_header(skb);
+ proto = ovpn_ip_check_protocol(skb);
+ if (unlikely(!proto)) {
+ /* check if null packet */
+ if (unlikely(!pskb_may_pull(skb, 1))) {
+ net_info_ratelimited("%s: NULL packet received from peer %u\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id);
+ goto drop;
+ }
+
+ if (ovpn_is_keepalive(skb)) {
+ net_dbg_ratelimited("%s: ping received from peer %u\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id);
+ /* we drop the packet, but this is not a failure */
+ consume_skb(skb);
+ goto drop_nocount;
+ }
+
+ net_info_ratelimited("%s: unsupported protocol received from peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto drop;
+ }
+ skb->protocol = proto;
+
+ /* perform Reverse Path Filtering (RPF) */
+ if (unlikely(!ovpn_peer_check_by_src(peer->ovpn, skb, peer))) {
+ if (skb->protocol == htons(ETH_P_IPV6))
+ net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI6c\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, &ipv6_hdr(skb)->saddr);
+ else
+ net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI4\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, &ip_hdr(skb)->saddr);
+ goto drop;
+ }
+
+ ovpn_netdev_write(peer, skb);
+ /* skb is passed to upper layer - don't free it */
+ skb = NULL;
+drop:
+ if (unlikely(skb))
+ dev_dstats_rx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+drop_nocount:
+ if (likely(peer))
+ ovpn_peer_put(peer);
+ if (likely(ks))
+ ovpn_crypto_key_slot_put(ks);
+}
+
+/* RX path entry point: decrypt packet and forward it to the device */
+void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+ struct ovpn_crypto_key_slot *ks;
+ u8 key_id;
+
+ ovpn_peer_stats_increment_rx(&peer->link_stats, skb->len);
+
+ /* get the key slot matching the key ID in the received packet */
+ key_id = ovpn_key_id_from_skb(skb);
+ ks = ovpn_crypto_key_id_to_slot(&peer->crypto, key_id);
+ if (unlikely(!ks)) {
+ net_info_ratelimited("%s: no available key for peer %u, key-id: %u\n",
+ netdev_name(peer->ovpn->dev), peer->id,
+ key_id);
+ dev_dstats_rx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+ ovpn_peer_put(peer);
+ return;
+ }
+
+ memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb));
+ ovpn_decrypt_post(skb, ovpn_aead_decrypt(peer, ks, skb));
+}
+
+void ovpn_encrypt_post(void *data, int ret)
+{
+ struct ovpn_crypto_key_slot *ks;
+ struct sk_buff *skb = data;
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ unsigned int orig_len;
+
+ /* encryption is happening asynchronously. This function will be
+ * called later by the crypto callback with a proper return value
+ */
+ if (unlikely(ret == -EINPROGRESS))
+ return;
+
+ ks = ovpn_skb_cb(skb)->ks;
+ peer = ovpn_skb_cb(skb)->peer;
+
+ /* crypto is done, cleanup skb CB and its members */
+ kfree(ovpn_skb_cb(skb)->iv);
+ kfree(ovpn_skb_cb(skb)->sg);
+ aead_request_free(ovpn_skb_cb(skb)->req);
+
+ if (unlikely(ret == -ERANGE)) {
+ /* we ran out of IVs and we must kill the key as it can't be
+ * use anymore
+ */
+ netdev_warn(peer->ovpn->dev,
+ "killing key %u for peer %u\n", ks->key_id,
+ peer->id);
+ if (ovpn_crypto_kill_key(&peer->crypto, ks->key_id))
+ /* let userspace know so that a new key must be negotiated */
+ ovpn_nl_key_swap_notify(peer, ks->key_id);
+
+ goto err;
+ }
+
+ if (unlikely(ret < 0))
+ goto err;
+
+ skb_mark_not_on_list(skb);
+ orig_len = skb->len;
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (unlikely(!sock))
+ goto err_unlock;
+
+ switch (sock->sk->sk_protocol) {
+ case IPPROTO_UDP:
+ ovpn_udp_send_skb(peer, sock->sk, skb);
+ break;
+ case IPPROTO_TCP:
+ ovpn_tcp_send_skb(peer, sock->sk, skb);
+ break;
+ default:
+ /* no transport configured yet */
+ goto err_unlock;
+ }
+
+ ovpn_peer_stats_increment_tx(&peer->link_stats, orig_len);
+ /* keep track of last sent packet for keepalive */
+ WRITE_ONCE(peer->last_sent, ktime_get_real_seconds());
+ /* skb passed down the stack - don't free it */
+ skb = NULL;
+err_unlock:
+ rcu_read_unlock();
+err:
+ if (unlikely(skb))
+ dev_dstats_tx_dropped(peer->ovpn->dev);
+ if (likely(peer))
+ ovpn_peer_put(peer);
+ if (likely(ks))
+ ovpn_crypto_key_slot_put(ks);
+ kfree_skb(skb);
+}
+
+static bool ovpn_encrypt_one(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ /* get primary key to be used for encrypting data */
+ ks = ovpn_crypto_key_slot_primary(&peer->crypto);
+ if (unlikely(!ks))
+ return false;
+
+ /* take a reference to the peer because the crypto code may run async.
+ * ovpn_encrypt_post() will release it upon completion
+ */
+ if (unlikely(!ovpn_peer_hold(peer))) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ ovpn_crypto_key_slot_put(ks);
+ return false;
+ }
+
+ memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb));
+ ovpn_encrypt_post(skb, ovpn_aead_encrypt(peer, ks, skb));
+ return true;
+}
+
+/* send skb to connected peer, if any */
+static void ovpn_send(struct ovpn_priv *ovpn, struct sk_buff *skb,
+ struct ovpn_peer *peer)
+{
+ struct sk_buff *curr, *next;
+
+ /* this might be a GSO-segmented skb list: process each skb
+ * independently
+ */
+ skb_list_walk_safe(skb, curr, next) {
+ if (unlikely(!ovpn_encrypt_one(peer, curr))) {
+ dev_dstats_tx_dropped(ovpn->dev);
+ kfree_skb(curr);
+ }
+ }
+
+ ovpn_peer_put(peer);
+}
+
+/* Send user data to the network
+ */
+netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+ struct sk_buff *segments, *curr, *next;
+ struct sk_buff_head skb_list;
+ struct ovpn_peer *peer;
+ __be16 proto;
+ int ret;
+
+ /* reset netfilter state */
+ nf_reset_ct(skb);
+
+ /* verify IP header size in network packet */
+ proto = ovpn_ip_check_protocol(skb);
+ if (unlikely(!proto || skb->protocol != proto))
+ goto drop;
+
+ if (skb_is_gso(skb)) {
+ segments = skb_gso_segment(skb, 0);
+ if (IS_ERR(segments)) {
+ ret = PTR_ERR(segments);
+ net_err_ratelimited("%s: cannot segment payload packet: %d\n",
+ netdev_name(dev), ret);
+ goto drop;
+ }
+
+ consume_skb(skb);
+ skb = segments;
+ }
+
+ /* from this moment on, "skb" might be a list */
+
+ __skb_queue_head_init(&skb_list);
+ skb_list_walk_safe(skb, curr, next) {
+ skb_mark_not_on_list(curr);
+
+ curr = skb_share_check(curr, GFP_ATOMIC);
+ if (unlikely(!curr)) {
+ net_err_ratelimited("%s: skb_share_check failed for payload packet\n",
+ netdev_name(dev));
+ dev_dstats_tx_dropped(ovpn->dev);
+ continue;
+ }
+
+ __skb_queue_tail(&skb_list, curr);
+ }
+ skb_list.prev->next = NULL;
+
+ /* retrieve peer serving the destination IP of this packet */
+ peer = ovpn_peer_get_by_dst(ovpn, skb);
+ if (unlikely(!peer)) {
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ net_dbg_ratelimited("%s: no peer to send data to dst=%pI4\n",
+ netdev_name(ovpn->dev),
+ &ip_hdr(skb)->daddr);
+ break;
+ case htons(ETH_P_IPV6):
+ net_dbg_ratelimited("%s: no peer to send data to dst=%pI6c\n",
+ netdev_name(ovpn->dev),
+ &ipv6_hdr(skb)->daddr);
+ break;
+ }
+ goto drop;
+ }
+ /* dst was needed for peer selection - it can now be dropped */
+ skb_dst_drop(skb);
+
+ ovpn_peer_stats_increment_tx(&peer->vpn_stats, skb->len);
+ ovpn_send(ovpn, skb_list.next, peer);
+
+ return NETDEV_TX_OK;
+
+drop:
+ dev_dstats_tx_dropped(ovpn->dev);
+ skb_tx_error(skb);
+ kfree_skb_list(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ovpn_xmit_special - encrypt and transmit an out-of-band message to peer
+ * @peer: peer to send the message to
+ * @data: message content
+ * @len: message length
+ *
+ * Assumes that caller holds a reference to peer, which will be
+ * passed to ovpn_send()
+ */
+void ovpn_xmit_special(struct ovpn_peer *peer, const void *data,
+ const unsigned int len)
+{
+ struct ovpn_priv *ovpn;
+ struct sk_buff *skb;
+
+ ovpn = peer->ovpn;
+ if (unlikely(!ovpn)) {
+ ovpn_peer_put(peer);
+ return;
+ }
+
+ skb = alloc_skb(256 + len, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ ovpn_peer_put(peer);
+ return;
+ }
+
+ skb_reserve(skb, 128);
+ skb->priority = TC_PRIO_BESTEFFORT;
+ __skb_put_data(skb, data, len);
+
+ ovpn_send(ovpn, skb, peer);
+}
diff --git a/drivers/net/ovpn/io.h b/drivers/net/ovpn/io.h
new file mode 100644
index 000000000000..db9e10f9077c
--- /dev/null
+++ b/drivers/net/ovpn/io.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPN_H_
+#define _NET_OVPN_OVPN_H_
+
+/* DATA_V2 header size with AEAD encryption */
+#define OVPN_HEAD_ROOM (OVPN_OPCODE_SIZE + OVPN_NONCE_WIRE_SIZE + \
+ 16 /* AEAD TAG length */ + \
+ max(sizeof(struct udphdr), sizeof(struct tcphdr)) +\
+ max(sizeof(struct ipv6hdr), sizeof(struct iphdr)))
+
+/* max padding required by encryption */
+#define OVPN_MAX_PADDING 16
+
+#define OVPN_KEEPALIVE_SIZE 16
+extern const unsigned char ovpn_keepalive_message[OVPN_KEEPALIVE_SIZE];
+
+netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev);
+
+void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb);
+void ovpn_xmit_special(struct ovpn_peer *peer, const void *data,
+ const unsigned int len);
+
+void ovpn_encrypt_post(void *data, int ret);
+void ovpn_decrypt_post(void *data, int ret);
+
+#endif /* _NET_OVPN_OVPN_H_ */
diff --git a/drivers/net/ovpn/main.c b/drivers/net/ovpn/main.c
new file mode 100644
index 000000000000..1bb1afe766a4
--- /dev/null
+++ b/drivers/net/ovpn/main.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#include <linux/ethtool.h>
+#include <linux/genetlink.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <net/gro_cells.h>
+#include <net/ip.h>
+#include <net/rtnetlink.h>
+#include <uapi/linux/if_arp.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "netlink.h"
+#include "io.h"
+#include "peer.h"
+#include "proto.h"
+#include "tcp.h"
+#include "udp.h"
+
+static void ovpn_priv_free(struct net_device *net)
+{
+ struct ovpn_priv *ovpn = netdev_priv(net);
+
+ kfree(ovpn->peers);
+}
+
+static int ovpn_mp_alloc(struct ovpn_priv *ovpn)
+{
+ struct in_device *dev_v4;
+ int i;
+
+ if (ovpn->mode != OVPN_MODE_MP)
+ return 0;
+
+ dev_v4 = __in_dev_get_rtnl(ovpn->dev);
+ if (dev_v4) {
+ /* disable redirects as Linux gets confused by ovpn
+ * handling same-LAN routing.
+ * This happens because a multipeer interface is used as
+ * relay point between hosts in the same subnet, while
+ * in a classic LAN this would not be needed because the
+ * two hosts would be able to talk directly.
+ */
+ IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false);
+ IPV4_DEVCONF_ALL(dev_net(ovpn->dev), SEND_REDIRECTS) = false;
+ }
+
+ /* the peer container is fairly large, therefore we allocate it only in
+ * MP mode
+ */
+ ovpn->peers = kzalloc(sizeof(*ovpn->peers), GFP_KERNEL);
+ if (!ovpn->peers)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(ovpn->peers->by_id); i++) {
+ INIT_HLIST_HEAD(&ovpn->peers->by_id[i]);
+ INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_vpn_addr4[i], i);
+ INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_vpn_addr6[i], i);
+ INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_transp_addr[i], i);
+ }
+
+ return 0;
+}
+
+static int ovpn_net_init(struct net_device *dev)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+ int err = gro_cells_init(&ovpn->gro_cells, dev);
+
+ if (err < 0)
+ return err;
+
+ err = ovpn_mp_alloc(ovpn);
+ if (err < 0) {
+ gro_cells_destroy(&ovpn->gro_cells);
+ return err;
+ }
+
+ return 0;
+}
+
+static void ovpn_net_uninit(struct net_device *dev)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+
+ gro_cells_destroy(&ovpn->gro_cells);
+}
+
+static const struct net_device_ops ovpn_netdev_ops = {
+ .ndo_init = ovpn_net_init,
+ .ndo_uninit = ovpn_net_uninit,
+ .ndo_start_xmit = ovpn_net_xmit,
+};
+
+static const struct device_type ovpn_type = {
+ .name = OVPN_FAMILY_NAME,
+};
+
+static const struct nla_policy ovpn_policy[IFLA_OVPN_MAX + 1] = {
+ [IFLA_OVPN_MODE] = NLA_POLICY_RANGE(NLA_U8, OVPN_MODE_P2P,
+ OVPN_MODE_MP),
+};
+
+/**
+ * ovpn_dev_is_valid - check if the netdevice is of type 'ovpn'
+ * @dev: the interface to check
+ *
+ * Return: whether the netdevice is of type 'ovpn'
+ */
+bool ovpn_dev_is_valid(const struct net_device *dev)
+{
+ return dev->netdev_ops == &ovpn_netdev_ops;
+}
+
+static void ovpn_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, "ovpn", sizeof(info->driver));
+ strscpy(info->bus_info, "ovpn", sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops ovpn_ethtool_ops = {
+ .get_drvinfo = ovpn_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static void ovpn_setup(struct net_device *dev)
+{
+ netdev_features_t feat = NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA;
+
+ dev->needs_free_netdev = true;
+
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
+
+ dev->ethtool_ops = &ovpn_ethtool_ops;
+ dev->netdev_ops = &ovpn_netdev_ops;
+
+ dev->priv_destructor = ovpn_priv_free;
+
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->mtu = ETH_DATA_LEN - OVPN_HEAD_ROOM;
+ dev->min_mtu = IPV4_MIN_MTU;
+ dev->max_mtu = IP_MAX_MTU - OVPN_HEAD_ROOM;
+
+ dev->type = ARPHRD_NONE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->priv_flags |= IFF_NO_QUEUE;
+ /* when routing packets to a LAN behind a client, we rely on the
+ * route entry that originally brought the packet into ovpn, so
+ * don't release it
+ */
+ netif_keep_dst(dev);
+
+ dev->lltx = true;
+ dev->features |= feat;
+ dev->hw_features |= feat;
+ dev->hw_enc_features |= feat;
+
+ dev->needed_headroom = ALIGN(OVPN_HEAD_ROOM, 4);
+ dev->needed_tailroom = OVPN_MAX_PADDING;
+
+ SET_NETDEV_DEVTYPE(dev, &ovpn_type);
+}
+
+static int ovpn_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+ struct nlattr **data = params->data;
+ enum ovpn_mode mode = OVPN_MODE_P2P;
+
+ if (data && data[IFLA_OVPN_MODE]) {
+ mode = nla_get_u8(data[IFLA_OVPN_MODE]);
+ netdev_dbg(dev, "setting device mode: %u\n", mode);
+ }
+
+ ovpn->dev = dev;
+ ovpn->mode = mode;
+ spin_lock_init(&ovpn->lock);
+ INIT_DELAYED_WORK(&ovpn->keepalive_work, ovpn_peer_keepalive_work);
+
+ /* Set carrier explicitly after registration, this way state is
+ * clearly defined.
+ *
+ * In case of MP interfaces we keep the carrier always on.
+ *
+ * Carrier for P2P interfaces is initially off and it is then
+ * switched on and off when the remote peer is added or deleted.
+ */
+ if (ovpn->mode == OVPN_MODE_MP)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ return register_netdevice(dev);
+}
+
+static void ovpn_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+
+ cancel_delayed_work_sync(&ovpn->keepalive_work);
+ ovpn_peers_free(ovpn, NULL, OVPN_DEL_PEER_REASON_TEARDOWN);
+ unregister_netdevice_queue(dev, head);
+}
+
+static int ovpn_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+
+ if (nla_put_u8(skb, IFLA_OVPN_MODE, ovpn->mode))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static struct rtnl_link_ops ovpn_link_ops = {
+ .kind = "ovpn",
+ .netns_refund = false,
+ .priv_size = sizeof(struct ovpn_priv),
+ .setup = ovpn_setup,
+ .policy = ovpn_policy,
+ .maxtype = IFLA_OVPN_MAX,
+ .newlink = ovpn_newlink,
+ .dellink = ovpn_dellink,
+ .fill_info = ovpn_fill_info,
+};
+
+static int __init ovpn_init(void)
+{
+ int err = rtnl_link_register(&ovpn_link_ops);
+
+ if (err) {
+ pr_err("ovpn: can't register rtnl link ops: %d\n", err);
+ return err;
+ }
+
+ err = ovpn_nl_register();
+ if (err) {
+ pr_err("ovpn: can't register netlink family: %d\n", err);
+ goto unreg_rtnl;
+ }
+
+ ovpn_tcp_init();
+
+ return 0;
+
+unreg_rtnl:
+ rtnl_link_unregister(&ovpn_link_ops);
+ return err;
+}
+
+static __exit void ovpn_cleanup(void)
+{
+ ovpn_nl_unregister();
+ rtnl_link_unregister(&ovpn_link_ops);
+
+ rcu_barrier();
+}
+
+module_init(ovpn_init);
+module_exit(ovpn_cleanup);
+
+MODULE_DESCRIPTION("OpenVPN data channel offload (ovpn)");
+MODULE_AUTHOR("Antonio Quartulli <antonio@openvpn.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ovpn/main.h b/drivers/net/ovpn/main.h
new file mode 100644
index 000000000000..017cd0100765
--- /dev/null
+++ b/drivers/net/ovpn/main.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_MAIN_H_
+#define _NET_OVPN_MAIN_H_
+
+bool ovpn_dev_is_valid(const struct net_device *dev);
+
+#endif /* _NET_OVPN_MAIN_H_ */
diff --git a/drivers/net/ovpn/netlink-gen.c b/drivers/net/ovpn/netlink-gen.c
new file mode 100644
index 000000000000..58e1a4342378
--- /dev/null
+++ b/drivers/net/ovpn/netlink-gen.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/ovpn.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "netlink-gen.h"
+
+#include <uapi/linux/ovpn.h>
+
+/* Integer value ranges */
+static const struct netlink_range_validation ovpn_a_peer_id_range = {
+ .max = 16777215ULL,
+};
+
+static const struct netlink_range_validation ovpn_a_keyconf_peer_id_range = {
+ .max = 16777215ULL,
+};
+
+/* Common nested types */
+const struct nla_policy ovpn_keyconf_nl_policy[OVPN_A_KEYCONF_DECRYPT_DIR + 1] = {
+ [OVPN_A_KEYCONF_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_keyconf_peer_id_range),
+ [OVPN_A_KEYCONF_SLOT] = NLA_POLICY_MAX(NLA_U32, 1),
+ [OVPN_A_KEYCONF_KEY_ID] = NLA_POLICY_MAX(NLA_U32, 7),
+ [OVPN_A_KEYCONF_CIPHER_ALG] = NLA_POLICY_MAX(NLA_U32, 2),
+ [OVPN_A_KEYCONF_ENCRYPT_DIR] = NLA_POLICY_NESTED(ovpn_keydir_nl_policy),
+ [OVPN_A_KEYCONF_DECRYPT_DIR] = NLA_POLICY_NESTED(ovpn_keydir_nl_policy),
+};
+
+const struct nla_policy ovpn_keydir_nl_policy[OVPN_A_KEYDIR_NONCE_TAIL + 1] = {
+ [OVPN_A_KEYDIR_CIPHER_KEY] = NLA_POLICY_MAX_LEN(256),
+ [OVPN_A_KEYDIR_NONCE_TAIL] = NLA_POLICY_EXACT_LEN(OVPN_NONCE_TAIL_SIZE),
+};
+
+const struct nla_policy ovpn_peer_nl_policy[OVPN_A_PEER_LINK_TX_PACKETS + 1] = {
+ [OVPN_A_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_peer_id_range),
+ [OVPN_A_PEER_REMOTE_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_REMOTE_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID] = { .type = NLA_U32, },
+ [OVPN_A_PEER_REMOTE_PORT] = NLA_POLICY_MIN(NLA_BE16, 1),
+ [OVPN_A_PEER_SOCKET] = { .type = NLA_U32, },
+ [OVPN_A_PEER_SOCKET_NETNSID] = { .type = NLA_S32, },
+ [OVPN_A_PEER_VPN_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_VPN_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_LOCAL_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_LOCAL_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_LOCAL_PORT] = NLA_POLICY_MIN(NLA_BE16, 1),
+ [OVPN_A_PEER_KEEPALIVE_INTERVAL] = { .type = NLA_U32, },
+ [OVPN_A_PEER_KEEPALIVE_TIMEOUT] = { .type = NLA_U32, },
+ [OVPN_A_PEER_DEL_REASON] = NLA_POLICY_MAX(NLA_U32, 4),
+ [OVPN_A_PEER_VPN_RX_BYTES] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_VPN_TX_BYTES] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_VPN_RX_PACKETS] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_VPN_TX_PACKETS] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_LINK_RX_BYTES] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_LINK_TX_BYTES] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_LINK_RX_PACKETS] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_LINK_TX_PACKETS] = { .type = NLA_UINT, },
+};
+
+/* OVPN_CMD_PEER_NEW - do */
+static const struct nla_policy ovpn_peer_new_nl_policy[OVPN_A_PEER + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy),
+};
+
+/* OVPN_CMD_PEER_SET - do */
+static const struct nla_policy ovpn_peer_set_nl_policy[OVPN_A_PEER + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy),
+};
+
+/* OVPN_CMD_PEER_GET - do */
+static const struct nla_policy ovpn_peer_get_do_nl_policy[OVPN_A_PEER + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy),
+};
+
+/* OVPN_CMD_PEER_GET - dump */
+static const struct nla_policy ovpn_peer_get_dump_nl_policy[OVPN_A_IFINDEX + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+};
+
+/* OVPN_CMD_PEER_DEL - do */
+static const struct nla_policy ovpn_peer_del_nl_policy[OVPN_A_PEER + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy),
+};
+
+/* OVPN_CMD_KEY_NEW - do */
+static const struct nla_policy ovpn_key_new_nl_policy[OVPN_A_KEYCONF + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy),
+};
+
+/* OVPN_CMD_KEY_GET - do */
+static const struct nla_policy ovpn_key_get_nl_policy[OVPN_A_KEYCONF + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy),
+};
+
+/* OVPN_CMD_KEY_SWAP - do */
+static const struct nla_policy ovpn_key_swap_nl_policy[OVPN_A_KEYCONF + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy),
+};
+
+/* OVPN_CMD_KEY_DEL - do */
+static const struct nla_policy ovpn_key_del_nl_policy[OVPN_A_KEYCONF + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy),
+};
+
+/* Ops table for ovpn */
+static const struct genl_split_ops ovpn_nl_ops[] = {
+ {
+ .cmd = OVPN_CMD_PEER_NEW,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_peer_new_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_peer_new_nl_policy,
+ .maxattr = OVPN_A_PEER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_PEER_SET,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_peer_set_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_peer_set_nl_policy,
+ .maxattr = OVPN_A_PEER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_PEER_GET,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_peer_get_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_peer_get_do_nl_policy,
+ .maxattr = OVPN_A_PEER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_PEER_GET,
+ .dumpit = ovpn_nl_peer_get_dumpit,
+ .policy = ovpn_peer_get_dump_nl_policy,
+ .maxattr = OVPN_A_IFINDEX,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = OVPN_CMD_PEER_DEL,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_peer_del_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_peer_del_nl_policy,
+ .maxattr = OVPN_A_PEER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_KEY_NEW,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_key_new_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_key_new_nl_policy,
+ .maxattr = OVPN_A_KEYCONF,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_KEY_GET,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_key_get_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_key_get_nl_policy,
+ .maxattr = OVPN_A_KEYCONF,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_KEY_SWAP,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_key_swap_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_key_swap_nl_policy,
+ .maxattr = OVPN_A_KEYCONF,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_KEY_DEL,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_key_del_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_key_del_nl_policy,
+ .maxattr = OVPN_A_KEYCONF,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+};
+
+static const struct genl_multicast_group ovpn_nl_mcgrps[] = {
+ [OVPN_NLGRP_PEERS] = { "peers", },
+};
+
+struct genl_family ovpn_nl_family __ro_after_init = {
+ .name = OVPN_FAMILY_NAME,
+ .version = OVPN_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = ovpn_nl_ops,
+ .n_split_ops = ARRAY_SIZE(ovpn_nl_ops),
+ .mcgrps = ovpn_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(ovpn_nl_mcgrps),
+};
diff --git a/drivers/net/ovpn/netlink-gen.h b/drivers/net/ovpn/netlink-gen.h
new file mode 100644
index 000000000000..66a4e4a0a055
--- /dev/null
+++ b/drivers/net/ovpn/netlink-gen.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/ovpn.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_OVPN_GEN_H
+#define _LINUX_OVPN_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/ovpn.h>
+
+/* Common nested types */
+extern const struct nla_policy ovpn_keyconf_nl_policy[OVPN_A_KEYCONF_DECRYPT_DIR + 1];
+extern const struct nla_policy ovpn_keydir_nl_policy[OVPN_A_KEYDIR_NONCE_TAIL + 1];
+extern const struct nla_policy ovpn_peer_nl_policy[OVPN_A_PEER_LINK_TX_PACKETS + 1];
+
+int ovpn_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+void
+ovpn_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+
+int ovpn_nl_peer_new_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_peer_set_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_peer_get_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_peer_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int ovpn_nl_peer_del_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_key_new_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_key_get_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_key_swap_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_key_del_doit(struct sk_buff *skb, struct genl_info *info);
+
+enum {
+ OVPN_NLGRP_PEERS,
+};
+
+extern struct genl_family ovpn_nl_family;
+
+#endif /* _LINUX_OVPN_GEN_H */
diff --git a/drivers/net/ovpn/netlink.c b/drivers/net/ovpn/netlink.c
new file mode 100644
index 000000000000..a4ec53def46e
--- /dev/null
+++ b/drivers/net/ovpn/netlink.c
@@ -0,0 +1,1258 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/ovpn.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "netlink.h"
+#include "netlink-gen.h"
+#include "bind.h"
+#include "crypto.h"
+#include "peer.h"
+#include "socket.h"
+
+MODULE_ALIAS_GENL_FAMILY(OVPN_FAMILY_NAME);
+
+/**
+ * ovpn_get_dev_from_attrs - retrieve the ovpn private data from the netdevice
+ * a netlink message is targeting
+ * @net: network namespace where to look for the interface
+ * @info: generic netlink info from the user request
+ * @tracker: tracker object to be used for the netdev reference acquisition
+ *
+ * Return: the ovpn private data, if found, or an error otherwise
+ */
+static struct ovpn_priv *
+ovpn_get_dev_from_attrs(struct net *net, const struct genl_info *info,
+ netdevice_tracker *tracker)
+{
+ struct ovpn_priv *ovpn;
+ struct net_device *dev;
+ int ifindex;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_IFINDEX))
+ return ERR_PTR(-EINVAL);
+
+ ifindex = nla_get_u32(info->attrs[OVPN_A_IFINDEX]);
+
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (!dev) {
+ rcu_read_unlock();
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "ifindex does not match any interface");
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!ovpn_dev_is_valid(dev)) {
+ rcu_read_unlock();
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "specified interface is not ovpn");
+ NL_SET_BAD_ATTR(info->extack, info->attrs[OVPN_A_IFINDEX]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ovpn = netdev_priv(dev);
+ netdev_hold(dev, tracker, GFP_ATOMIC);
+ rcu_read_unlock();
+
+ return ovpn;
+}
+
+int ovpn_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ netdevice_tracker *tracker = (netdevice_tracker *)&info->user_ptr[1];
+ struct ovpn_priv *ovpn = ovpn_get_dev_from_attrs(genl_info_net(info),
+ info, tracker);
+
+ if (IS_ERR(ovpn))
+ return PTR_ERR(ovpn);
+
+ info->user_ptr[0] = ovpn;
+
+ return 0;
+}
+
+void ovpn_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ netdevice_tracker *tracker = (netdevice_tracker *)&info->user_ptr[1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+
+ if (ovpn)
+ netdev_put(ovpn->dev, tracker);
+}
+
+static bool ovpn_nl_attr_sockaddr_remote(struct nlattr **attrs,
+ struct sockaddr_storage *ss)
+{
+ struct sockaddr_in6 *sin6;
+ struct sockaddr_in *sin;
+ struct in6_addr *in6;
+ __be16 port = 0;
+ __be32 *in;
+
+ ss->ss_family = AF_UNSPEC;
+
+ if (attrs[OVPN_A_PEER_REMOTE_PORT])
+ port = nla_get_be16(attrs[OVPN_A_PEER_REMOTE_PORT]);
+
+ if (attrs[OVPN_A_PEER_REMOTE_IPV4]) {
+ ss->ss_family = AF_INET;
+ in = nla_data(attrs[OVPN_A_PEER_REMOTE_IPV4]);
+ } else if (attrs[OVPN_A_PEER_REMOTE_IPV6]) {
+ ss->ss_family = AF_INET6;
+ in6 = nla_data(attrs[OVPN_A_PEER_REMOTE_IPV6]);
+ } else {
+ return false;
+ }
+
+ switch (ss->ss_family) {
+ case AF_INET6:
+ /* If this is a regular IPv6 just break and move on,
+ * otherwise switch to AF_INET and extract the IPv4 accordingly
+ */
+ if (!ipv6_addr_v4mapped(in6)) {
+ sin6 = (struct sockaddr_in6 *)ss;
+ sin6->sin6_port = port;
+ memcpy(&sin6->sin6_addr, in6, sizeof(*in6));
+ break;
+ }
+
+ /* v4-mapped-v6 address */
+ ss->ss_family = AF_INET;
+ in = &in6->s6_addr32[3];
+ fallthrough;
+ case AF_INET:
+ sin = (struct sockaddr_in *)ss;
+ sin->sin_port = port;
+ sin->sin_addr.s_addr = *in;
+ break;
+ }
+
+ return true;
+}
+
+static u8 *ovpn_nl_attr_local_ip(struct nlattr **attrs)
+{
+ u8 *addr6;
+
+ if (!attrs[OVPN_A_PEER_LOCAL_IPV4] && !attrs[OVPN_A_PEER_LOCAL_IPV6])
+ return NULL;
+
+ if (attrs[OVPN_A_PEER_LOCAL_IPV4])
+ return nla_data(attrs[OVPN_A_PEER_LOCAL_IPV4]);
+
+ addr6 = nla_data(attrs[OVPN_A_PEER_LOCAL_IPV6]);
+ /* this is an IPv4-mapped IPv6 address, therefore extract the actual
+ * v4 address from the last 4 bytes
+ */
+ if (ipv6_addr_v4mapped((struct in6_addr *)addr6))
+ return addr6 + 12;
+
+ return addr6;
+}
+
+static sa_family_t ovpn_nl_family_get(struct nlattr *addr4,
+ struct nlattr *addr6)
+{
+ if (addr4)
+ return AF_INET;
+
+ if (addr6) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)nla_data(addr6)))
+ return AF_INET;
+ return AF_INET6;
+ }
+
+ return AF_UNSPEC;
+}
+
+static int ovpn_nl_peer_precheck(struct ovpn_priv *ovpn,
+ struct genl_info *info,
+ struct nlattr **attrs)
+{
+ sa_family_t local_fam, remote_fam;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs,
+ OVPN_A_PEER_ID))
+ return -EINVAL;
+
+ if (attrs[OVPN_A_PEER_REMOTE_IPV4] && attrs[OVPN_A_PEER_REMOTE_IPV6]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify both remote IPv4 or IPv6 address");
+ return -EINVAL;
+ }
+
+ if (!attrs[OVPN_A_PEER_REMOTE_IPV4] &&
+ !attrs[OVPN_A_PEER_REMOTE_IPV6] && attrs[OVPN_A_PEER_REMOTE_PORT]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify remote port without IP address");
+ return -EINVAL;
+ }
+
+ if ((attrs[OVPN_A_PEER_REMOTE_IPV4] ||
+ attrs[OVPN_A_PEER_REMOTE_IPV6]) &&
+ !attrs[OVPN_A_PEER_REMOTE_PORT]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify remote IP address without port");
+ return -EINVAL;
+ }
+
+ if (!attrs[OVPN_A_PEER_REMOTE_IPV4] &&
+ attrs[OVPN_A_PEER_LOCAL_IPV4]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify local IPv4 address without remote");
+ return -EINVAL;
+ }
+
+ if (!attrs[OVPN_A_PEER_REMOTE_IPV6] &&
+ attrs[OVPN_A_PEER_LOCAL_IPV6]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify local IPV6 address without remote");
+ return -EINVAL;
+ }
+
+ /* check that local and remote address families are the same even
+ * after parsing v4mapped IPv6 addresses.
+ * (if addresses are not provided, family will be AF_UNSPEC and
+ * the check is skipped)
+ */
+ local_fam = ovpn_nl_family_get(attrs[OVPN_A_PEER_LOCAL_IPV4],
+ attrs[OVPN_A_PEER_LOCAL_IPV6]);
+ remote_fam = ovpn_nl_family_get(attrs[OVPN_A_PEER_REMOTE_IPV4],
+ attrs[OVPN_A_PEER_REMOTE_IPV6]);
+ if (local_fam != AF_UNSPEC && remote_fam != AF_UNSPEC &&
+ local_fam != remote_fam) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "mismatching local and remote address families");
+ return -EINVAL;
+ }
+
+ if (remote_fam != AF_INET6 && attrs[OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify scope id without remote IPv6 address");
+ return -EINVAL;
+ }
+
+ /* VPN IPs are needed only in MP mode for selecting the right peer */
+ if (ovpn->mode == OVPN_MODE_P2P && (attrs[OVPN_A_PEER_VPN_IPV4] ||
+ attrs[OVPN_A_PEER_VPN_IPV6])) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "unexpected VPN IP in P2P mode");
+ return -EINVAL;
+ }
+
+ if ((attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] &&
+ !attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]) ||
+ (!attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] &&
+ attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT])) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "keepalive interval and timeout are required together");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ovpn_nl_peer_modify - modify the peer attributes according to the incoming msg
+ * @peer: the peer to modify
+ * @info: generic netlink info from the user request
+ * @attrs: the attributes from the user request
+ *
+ * Return: a negative error code in case of failure, 0 on success or 1 on
+ * success and the VPN IPs have been modified (requires rehashing in MP
+ * mode)
+ */
+static int ovpn_nl_peer_modify(struct ovpn_peer *peer, struct genl_info *info,
+ struct nlattr **attrs)
+{
+ struct sockaddr_storage ss = {};
+ void *local_ip = NULL;
+ u32 interv, timeout;
+ bool rehash = false;
+ int ret;
+
+ spin_lock_bh(&peer->lock);
+
+ if (ovpn_nl_attr_sockaddr_remote(attrs, &ss)) {
+ /* we carry the local IP in a generic container.
+ * ovpn_peer_reset_sockaddr() will properly interpret it
+ * based on ss.ss_family
+ */
+ local_ip = ovpn_nl_attr_local_ip(attrs);
+
+ /* set peer sockaddr */
+ ret = ovpn_peer_reset_sockaddr(peer, &ss, local_ip);
+ if (ret < 0) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot set peer sockaddr: %d",
+ ret);
+ goto err_unlock;
+ }
+ dst_cache_reset(&peer->dst_cache);
+ }
+
+ if (attrs[OVPN_A_PEER_VPN_IPV4]) {
+ rehash = true;
+ peer->vpn_addrs.ipv4.s_addr =
+ nla_get_in_addr(attrs[OVPN_A_PEER_VPN_IPV4]);
+ }
+
+ if (attrs[OVPN_A_PEER_VPN_IPV6]) {
+ rehash = true;
+ peer->vpn_addrs.ipv6 =
+ nla_get_in6_addr(attrs[OVPN_A_PEER_VPN_IPV6]);
+ }
+
+ /* when setting the keepalive, both parameters have to be configured */
+ if (attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] &&
+ attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]) {
+ interv = nla_get_u32(attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL]);
+ timeout = nla_get_u32(attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]);
+ ovpn_peer_keepalive_set(peer, interv, timeout);
+ }
+
+ netdev_dbg(peer->ovpn->dev,
+ "modify peer id=%u endpoint=%pIScp VPN-IPv4=%pI4 VPN-IPv6=%pI6c\n",
+ peer->id, &ss,
+ &peer->vpn_addrs.ipv4.s_addr, &peer->vpn_addrs.ipv6);
+
+ spin_unlock_bh(&peer->lock);
+
+ return rehash ? 1 : 0;
+err_unlock:
+ spin_unlock_bh(&peer->lock);
+ return ret;
+}
+
+int ovpn_nl_peer_new_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_socket *ovpn_sock;
+ struct socket *sock = NULL;
+ struct ovpn_peer *peer;
+ u32 sockfd, peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
+ ovpn_peer_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ ret = ovpn_nl_peer_precheck(ovpn, info, attrs);
+ if (ret < 0)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs,
+ OVPN_A_PEER_SOCKET))
+ return -EINVAL;
+
+ /* in MP mode VPN IPs are required for selecting the right peer */
+ if (ovpn->mode == OVPN_MODE_MP && !attrs[OVPN_A_PEER_VPN_IPV4] &&
+ !attrs[OVPN_A_PEER_VPN_IPV6]) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "VPN IP must be provided in MP mode");
+ return -EINVAL;
+ }
+
+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
+ peer = ovpn_peer_new(ovpn, peer_id);
+ if (IS_ERR(peer)) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot create new peer object for peer %u: %ld",
+ peer_id, PTR_ERR(peer));
+ return PTR_ERR(peer);
+ }
+
+ /* lookup the fd in the kernel table and extract the socket object */
+ sockfd = nla_get_u32(attrs[OVPN_A_PEER_SOCKET]);
+ /* sockfd_lookup() increases sock's refcounter */
+ sock = sockfd_lookup(sockfd, &ret);
+ if (!sock) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot lookup peer socket (fd=%u): %d",
+ sockfd, ret);
+ ret = -ENOTSOCK;
+ goto peer_release;
+ }
+
+ /* Only when using UDP as transport protocol the remote endpoint
+ * can be configured so that ovpn knows where to send packets to.
+ */
+ if (sock->sk->sk_protocol == IPPROTO_UDP &&
+ !attrs[OVPN_A_PEER_REMOTE_IPV4] &&
+ !attrs[OVPN_A_PEER_REMOTE_IPV6]) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "missing remote IP address for UDP socket");
+ sockfd_put(sock);
+ ret = -EINVAL;
+ goto peer_release;
+ }
+
+ /* In case of TCP, the socket is connected to the peer and ovpn
+ * will just send bytes over it, without the need to specify a
+ * destination.
+ */
+ if (sock->sk->sk_protocol == IPPROTO_TCP &&
+ (attrs[OVPN_A_PEER_REMOTE_IPV4] ||
+ attrs[OVPN_A_PEER_REMOTE_IPV6])) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "unexpected remote IP address with TCP socket");
+ sockfd_put(sock);
+ ret = -EINVAL;
+ goto peer_release;
+ }
+
+ ovpn_sock = ovpn_socket_new(sock, peer);
+ /* at this point we unconditionally drop the reference to the socket:
+ * - in case of error, the socket has to be dropped
+ * - if case of success, the socket is configured and let
+ * userspace own the reference, so that the latter can
+ * trigger the final close()
+ */
+ sockfd_put(sock);
+ if (IS_ERR(ovpn_sock)) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot encapsulate socket: %ld",
+ PTR_ERR(ovpn_sock));
+ ret = -ENOTSOCK;
+ goto peer_release;
+ }
+
+ rcu_assign_pointer(peer->sock, ovpn_sock);
+
+ ret = ovpn_nl_peer_modify(peer, info, attrs);
+ if (ret < 0)
+ goto sock_release;
+
+ ret = ovpn_peer_add(ovpn, peer);
+ if (ret < 0) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot add new peer (id=%u) to hashtable: %d",
+ peer->id, ret);
+ goto sock_release;
+ }
+
+ return 0;
+
+sock_release:
+ ovpn_socket_release(peer);
+peer_release:
+ /* release right away because peer was not yet hashed, thus it is not
+ * used in any context
+ */
+ ovpn_peer_release(peer);
+
+ return ret;
+}
+
+int ovpn_nl_peer_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
+ ovpn_peer_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ ret = ovpn_nl_peer_precheck(ovpn, info, attrs);
+ if (ret < 0)
+ return ret;
+
+ if (attrs[OVPN_A_PEER_SOCKET]) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "socket cannot be modified");
+ return -EINVAL;
+ }
+
+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot find peer with id %u", peer_id);
+ return -ENOENT;
+ }
+
+ /* when using a TCP socket the remote IP is not expected */
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (sock && sock->sk->sk_protocol == IPPROTO_TCP &&
+ (attrs[OVPN_A_PEER_REMOTE_IPV4] ||
+ attrs[OVPN_A_PEER_REMOTE_IPV6])) {
+ rcu_read_unlock();
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "unexpected remote IP address with TCP socket");
+ ovpn_peer_put(peer);
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ spin_lock_bh(&ovpn->lock);
+ ret = ovpn_nl_peer_modify(peer, info, attrs);
+ if (ret < 0) {
+ spin_unlock_bh(&ovpn->lock);
+ ovpn_peer_put(peer);
+ return ret;
+ }
+
+ /* ret == 1 means that VPN IPv4/6 has been modified and rehashing
+ * is required
+ */
+ if (ret > 0)
+ ovpn_peer_hash_vpn_ip(peer);
+ spin_unlock_bh(&ovpn->lock);
+ ovpn_peer_put(peer);
+
+ return 0;
+}
+
+static int ovpn_nl_send_peer(struct sk_buff *skb, const struct genl_info *info,
+ const struct ovpn_peer *peer, u32 portid, u32 seq,
+ int flags)
+{
+ const struct ovpn_bind *bind;
+ struct ovpn_socket *sock;
+ int ret = -EMSGSIZE;
+ struct nlattr *attr;
+ __be16 local_port;
+ void *hdr;
+ int id;
+
+ hdr = genlmsg_put(skb, portid, seq, &ovpn_nl_family, flags,
+ OVPN_CMD_PEER_GET);
+ if (!hdr)
+ return -ENOBUFS;
+
+ attr = nla_nest_start(skb, OVPN_A_PEER);
+ if (!attr)
+ goto err;
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (!sock) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ if (!net_eq(genl_info_net(info), sock_net(sock->sk))) {
+ id = peernet2id_alloc(genl_info_net(info),
+ sock_net(sock->sk),
+ GFP_ATOMIC);
+ if (nla_put_s32(skb, OVPN_A_PEER_SOCKET_NETNSID, id))
+ goto err_unlock;
+ }
+ local_port = inet_sk(sock->sk)->inet_sport;
+ rcu_read_unlock();
+
+ if (nla_put_u32(skb, OVPN_A_PEER_ID, peer->id))
+ goto err;
+
+ if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY))
+ if (nla_put_in_addr(skb, OVPN_A_PEER_VPN_IPV4,
+ peer->vpn_addrs.ipv4.s_addr))
+ goto err;
+
+ if (!ipv6_addr_equal(&peer->vpn_addrs.ipv6, &in6addr_any))
+ if (nla_put_in6_addr(skb, OVPN_A_PEER_VPN_IPV6,
+ &peer->vpn_addrs.ipv6))
+ goto err;
+
+ if (nla_put_u32(skb, OVPN_A_PEER_KEEPALIVE_INTERVAL,
+ peer->keepalive_interval) ||
+ nla_put_u32(skb, OVPN_A_PEER_KEEPALIVE_TIMEOUT,
+ peer->keepalive_timeout))
+ goto err;
+
+ rcu_read_lock();
+ bind = rcu_dereference(peer->bind);
+ if (bind) {
+ if (bind->remote.in4.sin_family == AF_INET) {
+ if (nla_put_in_addr(skb, OVPN_A_PEER_REMOTE_IPV4,
+ bind->remote.in4.sin_addr.s_addr) ||
+ nla_put_net16(skb, OVPN_A_PEER_REMOTE_PORT,
+ bind->remote.in4.sin_port) ||
+ nla_put_in_addr(skb, OVPN_A_PEER_LOCAL_IPV4,
+ bind->local.ipv4.s_addr))
+ goto err_unlock;
+ } else if (bind->remote.in4.sin_family == AF_INET6) {
+ if (nla_put_in6_addr(skb, OVPN_A_PEER_REMOTE_IPV6,
+ &bind->remote.in6.sin6_addr) ||
+ nla_put_u32(skb, OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID,
+ bind->remote.in6.sin6_scope_id) ||
+ nla_put_net16(skb, OVPN_A_PEER_REMOTE_PORT,
+ bind->remote.in6.sin6_port) ||
+ nla_put_in6_addr(skb, OVPN_A_PEER_LOCAL_IPV6,
+ &bind->local.ipv6))
+ goto err_unlock;
+ }
+ }
+ rcu_read_unlock();
+
+ if (nla_put_net16(skb, OVPN_A_PEER_LOCAL_PORT, local_port) ||
+ /* VPN RX stats */
+ nla_put_uint(skb, OVPN_A_PEER_VPN_RX_BYTES,
+ atomic64_read(&peer->vpn_stats.rx.bytes)) ||
+ nla_put_uint(skb, OVPN_A_PEER_VPN_RX_PACKETS,
+ atomic64_read(&peer->vpn_stats.rx.packets)) ||
+ /* VPN TX stats */
+ nla_put_uint(skb, OVPN_A_PEER_VPN_TX_BYTES,
+ atomic64_read(&peer->vpn_stats.tx.bytes)) ||
+ nla_put_uint(skb, OVPN_A_PEER_VPN_TX_PACKETS,
+ atomic64_read(&peer->vpn_stats.tx.packets)) ||
+ /* link RX stats */
+ nla_put_uint(skb, OVPN_A_PEER_LINK_RX_BYTES,
+ atomic64_read(&peer->link_stats.rx.bytes)) ||
+ nla_put_uint(skb, OVPN_A_PEER_LINK_RX_PACKETS,
+ atomic64_read(&peer->link_stats.rx.packets)) ||
+ /* link TX stats */
+ nla_put_uint(skb, OVPN_A_PEER_LINK_TX_BYTES,
+ atomic64_read(&peer->link_stats.tx.bytes)) ||
+ nla_put_uint(skb, OVPN_A_PEER_LINK_TX_PACKETS,
+ atomic64_read(&peer->link_stats.tx.packets)))
+ goto err;
+
+ nla_nest_end(skb, attr);
+ genlmsg_end(skb, hdr);
+
+ return 0;
+err_unlock:
+ rcu_read_unlock();
+err:
+ genlmsg_cancel(skb, hdr);
+ return ret;
+}
+
+int ovpn_nl_peer_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_peer *peer;
+ struct sk_buff *msg;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
+ ovpn_peer_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs,
+ OVPN_A_PEER_ID))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot find peer with id %u", peer_id);
+ return -ENOENT;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = ovpn_nl_send_peer(msg, info, peer, info->snd_portid,
+ info->snd_seq, 0);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ goto err;
+ }
+
+ ret = genlmsg_reply(msg, info);
+err:
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+int ovpn_nl_peer_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct genl_info *info = genl_info_dump(cb);
+ int bkt, last_idx = cb->args[1], dumped = 0;
+ netdevice_tracker tracker;
+ struct ovpn_priv *ovpn;
+ struct ovpn_peer *peer;
+
+ ovpn = ovpn_get_dev_from_attrs(sock_net(cb->skb->sk), info, &tracker);
+ if (IS_ERR(ovpn))
+ return PTR_ERR(ovpn);
+
+ if (ovpn->mode == OVPN_MODE_P2P) {
+ /* if we already dumped a peer it means we are done */
+ if (last_idx)
+ goto out;
+
+ rcu_read_lock();
+ peer = rcu_dereference(ovpn->peer);
+ if (peer) {
+ if (ovpn_nl_send_peer(skb, info, peer,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI) == 0)
+ dumped++;
+ }
+ rcu_read_unlock();
+ } else {
+ rcu_read_lock();
+ hash_for_each_rcu(ovpn->peers->by_id, bkt, peer,
+ hash_entry_id) {
+ /* skip already dumped peers that were dumped by
+ * previous invocations
+ */
+ if (last_idx > 0) {
+ last_idx--;
+ continue;
+ }
+
+ if (ovpn_nl_send_peer(skb, info, peer,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI) < 0)
+ break;
+
+ /* count peers being dumped during this invocation */
+ dumped++;
+ }
+ rcu_read_unlock();
+ }
+
+out:
+ netdev_put(ovpn->dev, &tracker);
+
+ /* sum up peers dumped in this message, so that at the next invocation
+ * we can continue from where we left
+ */
+ cb->args[1] += dumped;
+ return skb->len;
+}
+
+int ovpn_nl_peer_del_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
+ ovpn_peer_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs,
+ OVPN_A_PEER_ID))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot find peer with id %u", peer_id);
+ return -ENOENT;
+ }
+
+ netdev_dbg(ovpn->dev, "del peer %u\n", peer->id);
+ ret = ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_USERSPACE);
+ ovpn_peer_put(peer);
+
+ return ret;
+}
+
+static int ovpn_nl_get_key_dir(struct genl_info *info, struct nlattr *key,
+ enum ovpn_cipher_alg cipher,
+ struct ovpn_key_direction *dir)
+{
+ struct nlattr *attrs[OVPN_A_KEYDIR_MAX + 1];
+ int ret;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYDIR_MAX, key,
+ ovpn_keydir_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ switch (cipher) {
+ case OVPN_CIPHER_ALG_AES_GCM:
+ case OVPN_CIPHER_ALG_CHACHA20_POLY1305:
+ if (NL_REQ_ATTR_CHECK(info->extack, key, attrs,
+ OVPN_A_KEYDIR_CIPHER_KEY) ||
+ NL_REQ_ATTR_CHECK(info->extack, key, attrs,
+ OVPN_A_KEYDIR_NONCE_TAIL))
+ return -EINVAL;
+
+ dir->cipher_key = nla_data(attrs[OVPN_A_KEYDIR_CIPHER_KEY]);
+ dir->cipher_key_size = nla_len(attrs[OVPN_A_KEYDIR_CIPHER_KEY]);
+
+ /* These algorithms require a 96bit nonce,
+ * Construct it by combining 4-bytes packet id and
+ * 8-bytes nonce-tail from userspace
+ */
+ dir->nonce_tail = nla_data(attrs[OVPN_A_KEYDIR_NONCE_TAIL]);
+ dir->nonce_tail_size = nla_len(attrs[OVPN_A_KEYDIR_NONCE_TAIL]);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(info->extack, "unsupported cipher");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ovpn_nl_key_new_doit - configure a new key for the specified peer
+ * @skb: incoming netlink message
+ * @info: genetlink metadata
+ *
+ * This function allows the user to install a new key in the peer crypto
+ * state.
+ * Each peer has two 'slots', namely 'primary' and 'secondary', where
+ * keys can be installed. The key in the 'primary' slot is used for
+ * encryption, while both keys can be used for decryption by matching the
+ * key ID carried in the incoming packet.
+ *
+ * The user is responsible for rotating keys when necessary. The user
+ * may fetch peer traffic statistics via netlink in order to better
+ * identify the right time to rotate keys.
+ * The renegotiation follows these steps:
+ * 1. a new key is computed by the user and is installed in the 'secondary'
+ * slot
+ * 2. at user discretion (usually after a predetermined time) 'primary' and
+ * 'secondary' contents are swapped and the new key starts being used for
+ * encryption, while the old key is kept around for decryption of late
+ * packets.
+ *
+ * Return: 0 on success or a negative error code otherwise.
+ */
+int ovpn_nl_key_new_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_peer_key_reset pkr;
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
+ info->attrs[OVPN_A_KEYCONF],
+ ovpn_keyconf_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_PEER_ID))
+ return -EINVAL;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_SLOT) ||
+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_KEY_ID) ||
+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_CIPHER_ALG) ||
+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_ENCRYPT_DIR) ||
+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_DECRYPT_DIR))
+ return -EINVAL;
+
+ pkr.slot = nla_get_u32(attrs[OVPN_A_KEYCONF_SLOT]);
+ pkr.key.key_id = nla_get_u32(attrs[OVPN_A_KEYCONF_KEY_ID]);
+ pkr.key.cipher_alg = nla_get_u32(attrs[OVPN_A_KEYCONF_CIPHER_ALG]);
+
+ ret = ovpn_nl_get_key_dir(info, attrs[OVPN_A_KEYCONF_ENCRYPT_DIR],
+ pkr.key.cipher_alg, &pkr.key.encrypt);
+ if (ret < 0)
+ return ret;
+
+ ret = ovpn_nl_get_key_dir(info, attrs[OVPN_A_KEYCONF_DECRYPT_DIR],
+ pkr.key.cipher_alg, &pkr.key.decrypt);
+ if (ret < 0)
+ return ret;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "no peer with id %u to set key for",
+ peer_id);
+ return -ENOENT;
+ }
+
+ ret = ovpn_crypto_state_reset(&peer->crypto, &pkr);
+ if (ret < 0) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot install new key for peer %u",
+ peer_id);
+ goto out;
+ }
+
+ netdev_dbg(ovpn->dev, "new key installed (id=%u) for peer %u\n",
+ pkr.key.key_id, peer_id);
+out:
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+static int ovpn_nl_send_key(struct sk_buff *skb, const struct genl_info *info,
+ u32 peer_id, enum ovpn_key_slot slot,
+ const struct ovpn_key_config *keyconf)
+{
+ struct nlattr *attr;
+ void *hdr;
+
+ hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, &ovpn_nl_family,
+ 0, OVPN_CMD_KEY_GET);
+ if (!hdr)
+ return -ENOBUFS;
+
+ attr = nla_nest_start(skb, OVPN_A_KEYCONF);
+ if (!attr)
+ goto err;
+
+ if (nla_put_u32(skb, OVPN_A_KEYCONF_PEER_ID, peer_id))
+ goto err;
+
+ if (nla_put_u32(skb, OVPN_A_KEYCONF_SLOT, slot) ||
+ nla_put_u32(skb, OVPN_A_KEYCONF_KEY_ID, keyconf->key_id) ||
+ nla_put_u32(skb, OVPN_A_KEYCONF_CIPHER_ALG, keyconf->cipher_alg))
+ goto err;
+
+ nla_nest_end(skb, attr);
+ genlmsg_end(skb, hdr);
+
+ return 0;
+err:
+ genlmsg_cancel(skb, hdr);
+ return -EMSGSIZE;
+}
+
+int ovpn_nl_key_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_key_config keyconf = { 0 };
+ enum ovpn_key_slot slot;
+ struct ovpn_peer *peer;
+ struct sk_buff *msg;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
+ info->attrs[OVPN_A_KEYCONF],
+ ovpn_keyconf_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_PEER_ID))
+ return -EINVAL;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_SLOT))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot find peer with id %u", peer_id);
+ return -ENOENT;
+ }
+
+ slot = nla_get_u32(attrs[OVPN_A_KEYCONF_SLOT]);
+
+ ret = ovpn_crypto_config_get(&peer->crypto, slot, &keyconf);
+ if (ret < 0) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot extract key from slot %u for peer %u",
+ slot, peer_id);
+ goto err;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = ovpn_nl_send_key(msg, info, peer->id, slot, &keyconf);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ goto err;
+ }
+
+ ret = genlmsg_reply(msg, info);
+err:
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+int ovpn_nl_key_swap_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
+ info->attrs[OVPN_A_KEYCONF],
+ ovpn_keyconf_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_PEER_ID))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "no peer with id %u to swap keys for",
+ peer_id);
+ return -ENOENT;
+ }
+
+ ovpn_crypto_key_slots_swap(&peer->crypto);
+ ovpn_peer_put(peer);
+
+ return 0;
+}
+
+int ovpn_nl_key_del_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ enum ovpn_key_slot slot;
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
+ info->attrs[OVPN_A_KEYCONF],
+ ovpn_keyconf_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_PEER_ID))
+ return -EINVAL;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_SLOT))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
+ slot = nla_get_u32(attrs[OVPN_A_KEYCONF_SLOT]);
+
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "no peer with id %u to delete key for",
+ peer_id);
+ return -ENOENT;
+ }
+
+ ovpn_crypto_key_slot_delete(&peer->crypto, slot);
+ ovpn_peer_put(peer);
+
+ return 0;
+}
+
+/**
+ * ovpn_nl_peer_del_notify - notify userspace about peer being deleted
+ * @peer: the peer being deleted
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_nl_peer_del_notify(struct ovpn_peer *peer)
+{
+ struct ovpn_socket *sock;
+ struct sk_buff *msg;
+ struct nlattr *attr;
+ int ret = -EMSGSIZE;
+ void *hdr;
+
+ netdev_info(peer->ovpn->dev, "deleting peer with id %u, reason %d\n",
+ peer->id, peer->delete_reason);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &ovpn_nl_family, 0, OVPN_CMD_PEER_DEL_NTF);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto err_free_msg;
+ }
+
+ if (nla_put_u32(msg, OVPN_A_IFINDEX, peer->ovpn->dev->ifindex))
+ goto err_cancel_msg;
+
+ attr = nla_nest_start(msg, OVPN_A_PEER);
+ if (!attr)
+ goto err_cancel_msg;
+
+ if (nla_put_u32(msg, OVPN_A_PEER_DEL_REASON, peer->delete_reason))
+ goto err_cancel_msg;
+
+ if (nla_put_u32(msg, OVPN_A_PEER_ID, peer->id))
+ goto err_cancel_msg;
+
+ nla_nest_end(msg, attr);
+
+ genlmsg_end(msg, hdr);
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (!sock) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sk), msg, 0,
+ OVPN_NLGRP_PEERS, GFP_ATOMIC);
+ rcu_read_unlock();
+
+ return 0;
+
+err_unlock:
+ rcu_read_unlock();
+err_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+err_free_msg:
+ nlmsg_free(msg);
+ return ret;
+}
+
+/**
+ * ovpn_nl_key_swap_notify - notify userspace peer's key must be renewed
+ * @peer: the peer whose key needs to be renewed
+ * @key_id: the ID of the key that needs to be renewed
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_nl_key_swap_notify(struct ovpn_peer *peer, u8 key_id)
+{
+ struct ovpn_socket *sock;
+ struct nlattr *k_attr;
+ struct sk_buff *msg;
+ int ret = -EMSGSIZE;
+ void *hdr;
+
+ netdev_info(peer->ovpn->dev, "peer with id %u must rekey - primary key unusable.\n",
+ peer->id);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &ovpn_nl_family, 0, OVPN_CMD_KEY_SWAP_NTF);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto err_free_msg;
+ }
+
+ if (nla_put_u32(msg, OVPN_A_IFINDEX, peer->ovpn->dev->ifindex))
+ goto err_cancel_msg;
+
+ k_attr = nla_nest_start(msg, OVPN_A_KEYCONF);
+ if (!k_attr)
+ goto err_cancel_msg;
+
+ if (nla_put_u32(msg, OVPN_A_KEYCONF_PEER_ID, peer->id))
+ goto err_cancel_msg;
+
+ if (nla_put_u16(msg, OVPN_A_KEYCONF_KEY_ID, key_id))
+ goto err_cancel_msg;
+
+ nla_nest_end(msg, k_attr);
+ genlmsg_end(msg, hdr);
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (!sock) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sk), msg, 0,
+ OVPN_NLGRP_PEERS, GFP_ATOMIC);
+ rcu_read_unlock();
+
+ return 0;
+err_unlock:
+ rcu_read_unlock();
+err_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+err_free_msg:
+ nlmsg_free(msg);
+ return ret;
+}
+
+/**
+ * ovpn_nl_register - perform any needed registration in the NL subsustem
+ *
+ * Return: 0 on success, a negative error code otherwise
+ */
+int __init ovpn_nl_register(void)
+{
+ int ret = genl_register_family(&ovpn_nl_family);
+
+ if (ret) {
+ pr_err("ovpn: genl_register_family failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ovpn_nl_unregister - undo any module wide netlink registration
+ */
+void ovpn_nl_unregister(void)
+{
+ genl_unregister_family(&ovpn_nl_family);
+}
diff --git a/drivers/net/ovpn/netlink.h b/drivers/net/ovpn/netlink.h
new file mode 100644
index 000000000000..8615dfc3c472
--- /dev/null
+++ b/drivers/net/ovpn/netlink.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_NETLINK_H_
+#define _NET_OVPN_NETLINK_H_
+
+int ovpn_nl_register(void);
+void ovpn_nl_unregister(void);
+
+int ovpn_nl_peer_del_notify(struct ovpn_peer *peer);
+int ovpn_nl_key_swap_notify(struct ovpn_peer *peer, u8 key_id);
+
+#endif /* _NET_OVPN_NETLINK_H_ */
diff --git a/drivers/net/ovpn/ovpnpriv.h b/drivers/net/ovpn/ovpnpriv.h
new file mode 100644
index 000000000000..5898f6adada7
--- /dev/null
+++ b/drivers/net/ovpn/ovpnpriv.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNSTRUCT_H_
+#define _NET_OVPN_OVPNSTRUCT_H_
+
+#include <linux/workqueue.h>
+#include <net/gro_cells.h>
+#include <uapi/linux/if_link.h>
+#include <uapi/linux/ovpn.h>
+
+/**
+ * struct ovpn_peer_collection - container of peers for MultiPeer mode
+ * @by_id: table of peers index by ID
+ * @by_vpn_addr4: table of peers indexed by VPN IPv4 address (items can be
+ * rehashed on the fly due to peer IP change)
+ * @by_vpn_addr6: table of peers indexed by VPN IPv6 address (items can be
+ * rehashed on the fly due to peer IP change)
+ * @by_transp_addr: table of peers indexed by transport address (items can be
+ * rehashed on the fly due to peer IP change)
+ */
+struct ovpn_peer_collection {
+ DECLARE_HASHTABLE(by_id, 12);
+ struct hlist_nulls_head by_vpn_addr4[1 << 12];
+ struct hlist_nulls_head by_vpn_addr6[1 << 12];
+ struct hlist_nulls_head by_transp_addr[1 << 12];
+};
+
+/**
+ * struct ovpn_priv - per ovpn interface state
+ * @dev: the actual netdev representing the tunnel
+ * @mode: device operation mode (i.e. p2p, mp, ..)
+ * @lock: protect this object
+ * @peers: data structures holding multi-peer references
+ * @peer: in P2P mode, this is the only remote peer
+ * @gro_cells: pointer to the Generic Receive Offload cell
+ * @keepalive_work: struct used to schedule keepalive periodic job
+ */
+struct ovpn_priv {
+ struct net_device *dev;
+ enum ovpn_mode mode;
+ spinlock_t lock; /* protect writing to the ovpn_priv object */
+ struct ovpn_peer_collection *peers;
+ struct ovpn_peer __rcu *peer;
+ struct gro_cells gro_cells;
+ struct delayed_work keepalive_work;
+};
+
+#endif /* _NET_OVPN_OVPNSTRUCT_H_ */
diff --git a/drivers/net/ovpn/peer.c b/drivers/net/ovpn/peer.c
new file mode 100644
index 000000000000..4bfcab0c8652
--- /dev/null
+++ b/drivers/net/ovpn/peer.c
@@ -0,0 +1,1364 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/hashtable.h>
+#include <net/ip6_route.h>
+
+#include "ovpnpriv.h"
+#include "bind.h"
+#include "pktid.h"
+#include "crypto.h"
+#include "io.h"
+#include "main.h"
+#include "netlink.h"
+#include "peer.h"
+#include "socket.h"
+
+static void unlock_ovpn(struct ovpn_priv *ovpn,
+ struct llist_head *release_list)
+ __releases(&ovpn->lock)
+{
+ struct ovpn_peer *peer;
+
+ spin_unlock_bh(&ovpn->lock);
+
+ llist_for_each_entry(peer, release_list->first, release_entry) {
+ ovpn_socket_release(peer);
+ ovpn_peer_put(peer);
+ }
+}
+
+/**
+ * ovpn_peer_keepalive_set - configure keepalive values for peer
+ * @peer: the peer to configure
+ * @interval: outgoing keepalive interval
+ * @timeout: incoming keepalive timeout
+ */
+void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout)
+{
+ time64_t now = ktime_get_real_seconds();
+
+ netdev_dbg(peer->ovpn->dev,
+ "scheduling keepalive for peer %u: interval=%u timeout=%u\n",
+ peer->id, interval, timeout);
+
+ peer->keepalive_interval = interval;
+ WRITE_ONCE(peer->last_sent, now);
+ peer->keepalive_xmit_exp = now + interval;
+
+ peer->keepalive_timeout = timeout;
+ WRITE_ONCE(peer->last_recv, now);
+ peer->keepalive_recv_exp = now + timeout;
+
+ /* now that interval and timeout have been changed, kick
+ * off the worker so that the next delay can be recomputed
+ */
+ mod_delayed_work(system_wq, &peer->ovpn->keepalive_work, 0);
+}
+
+/**
+ * ovpn_peer_keepalive_send - periodic worker sending keepalive packets
+ * @work: pointer to the work member of the related peer object
+ *
+ * NOTE: the reference to peer is not dropped because it gets inherited
+ * by ovpn_xmit_special()
+ */
+static void ovpn_peer_keepalive_send(struct work_struct *work)
+{
+ struct ovpn_peer *peer = container_of(work, struct ovpn_peer,
+ keepalive_work);
+
+ local_bh_disable();
+ ovpn_xmit_special(peer, ovpn_keepalive_message,
+ sizeof(ovpn_keepalive_message));
+ local_bh_enable();
+}
+
+/**
+ * ovpn_peer_new - allocate and initialize a new peer object
+ * @ovpn: the openvpn instance inside which the peer should be created
+ * @id: the ID assigned to this peer
+ *
+ * Return: a pointer to the new peer on success or an error code otherwise
+ */
+struct ovpn_peer *ovpn_peer_new(struct ovpn_priv *ovpn, u32 id)
+{
+ struct ovpn_peer *peer;
+ int ret;
+
+ /* alloc and init peer object */
+ peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+ if (!peer)
+ return ERR_PTR(-ENOMEM);
+
+ peer->id = id;
+ peer->ovpn = ovpn;
+
+ peer->vpn_addrs.ipv4.s_addr = htonl(INADDR_ANY);
+ peer->vpn_addrs.ipv6 = in6addr_any;
+
+ RCU_INIT_POINTER(peer->bind, NULL);
+ ovpn_crypto_state_init(&peer->crypto);
+ spin_lock_init(&peer->lock);
+ kref_init(&peer->refcount);
+ ovpn_peer_stats_init(&peer->vpn_stats);
+ ovpn_peer_stats_init(&peer->link_stats);
+ INIT_WORK(&peer->keepalive_work, ovpn_peer_keepalive_send);
+
+ ret = dst_cache_init(&peer->dst_cache, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(ovpn->dev,
+ "cannot initialize dst cache for peer %u\n",
+ peer->id);
+ kfree(peer);
+ return ERR_PTR(ret);
+ }
+
+ netdev_hold(ovpn->dev, &peer->dev_tracker, GFP_KERNEL);
+
+ return peer;
+}
+
+/**
+ * ovpn_peer_reset_sockaddr - recreate binding for peer
+ * @peer: peer to recreate the binding for
+ * @ss: sockaddr to use as remote endpoint for the binding
+ * @local_ip: local IP for the binding
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_peer_reset_sockaddr(struct ovpn_peer *peer,
+ const struct sockaddr_storage *ss,
+ const void *local_ip)
+{
+ struct ovpn_bind *bind;
+ size_t ip_len;
+
+ lockdep_assert_held(&peer->lock);
+
+ /* create new ovpn_bind object */
+ bind = ovpn_bind_from_sockaddr(ss);
+ if (IS_ERR(bind))
+ return PTR_ERR(bind);
+
+ if (local_ip) {
+ if (ss->ss_family == AF_INET) {
+ ip_len = sizeof(struct in_addr);
+ } else if (ss->ss_family == AF_INET6) {
+ ip_len = sizeof(struct in6_addr);
+ } else {
+ net_dbg_ratelimited("%s: invalid family %u for remote endpoint for peer %u\n",
+ netdev_name(peer->ovpn->dev),
+ ss->ss_family, peer->id);
+ kfree(bind);
+ return -EINVAL;
+ }
+
+ memcpy(&bind->local, local_ip, ip_len);
+ }
+
+ /* set binding */
+ ovpn_bind_reset(peer, bind);
+
+ return 0;
+}
+
+/* variable name __tbl2 needs to be different from __tbl1
+ * in the macro below to avoid confusing clang
+ */
+#define ovpn_get_hash_slot(_tbl, _key, _key_len) ({ \
+ typeof(_tbl) *__tbl2 = &(_tbl); \
+ jhash(_key, _key_len, 0) % HASH_SIZE(*__tbl2); \
+})
+
+#define ovpn_get_hash_head(_tbl, _key, _key_len) ({ \
+ typeof(_tbl) *__tbl1 = &(_tbl); \
+ &(*__tbl1)[ovpn_get_hash_slot(*__tbl1, _key, _key_len)];\
+})
+
+/**
+ * ovpn_peer_endpoints_update - update remote or local endpoint for peer
+ * @peer: peer to update the remote endpoint for
+ * @skb: incoming packet to retrieve the source/destination address from
+ */
+void ovpn_peer_endpoints_update(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+ struct hlist_nulls_head *nhead;
+ struct sockaddr_storage ss;
+ struct sockaddr_in6 *sa6;
+ bool reset_cache = false;
+ struct sockaddr_in *sa;
+ struct ovpn_bind *bind;
+ const void *local_ip;
+ size_t salen = 0;
+
+ spin_lock_bh(&peer->lock);
+ bind = rcu_dereference_protected(peer->bind,
+ lockdep_is_held(&peer->lock));
+ if (unlikely(!bind))
+ goto unlock;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ /* float check */
+ if (unlikely(!ovpn_bind_skb_src_match(bind, skb))) {
+ /* unconditionally save local endpoint in case
+ * of float, as it may have changed as well
+ */
+ local_ip = &ip_hdr(skb)->daddr;
+ sa = (struct sockaddr_in *)&ss;
+ sa->sin_family = AF_INET;
+ sa->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sa->sin_port = udp_hdr(skb)->source;
+ salen = sizeof(*sa);
+ reset_cache = true;
+ break;
+ }
+
+ /* if no float happened, let's double check if the local endpoint
+ * has changed
+ */
+ if (unlikely(bind->local.ipv4.s_addr != ip_hdr(skb)->daddr)) {
+ net_dbg_ratelimited("%s: learning local IPv4 for peer %d (%pI4 -> %pI4)\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, &bind->local.ipv4.s_addr,
+ &ip_hdr(skb)->daddr);
+ bind->local.ipv4.s_addr = ip_hdr(skb)->daddr;
+ reset_cache = true;
+ }
+ break;
+ case htons(ETH_P_IPV6):
+ /* float check */
+ if (unlikely(!ovpn_bind_skb_src_match(bind, skb))) {
+ /* unconditionally save local endpoint in case
+ * of float, as it may have changed as well
+ */
+ local_ip = &ipv6_hdr(skb)->daddr;
+ sa6 = (struct sockaddr_in6 *)&ss;
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_addr = ipv6_hdr(skb)->saddr;
+ sa6->sin6_port = udp_hdr(skb)->source;
+ sa6->sin6_scope_id = ipv6_iface_scope_id(&ipv6_hdr(skb)->saddr,
+ skb->skb_iif);
+ salen = sizeof(*sa6);
+ reset_cache = true;
+ break;
+ }
+
+ /* if no float happened, let's double check if the local endpoint
+ * has changed
+ */
+ if (unlikely(!ipv6_addr_equal(&bind->local.ipv6,
+ &ipv6_hdr(skb)->daddr))) {
+ net_dbg_ratelimited("%s: learning local IPv6 for peer %d (%pI6c -> %pI6c)\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, &bind->local.ipv6,
+ &ipv6_hdr(skb)->daddr);
+ bind->local.ipv6 = ipv6_hdr(skb)->daddr;
+ reset_cache = true;
+ }
+ break;
+ default:
+ goto unlock;
+ }
+
+ if (unlikely(reset_cache))
+ dst_cache_reset(&peer->dst_cache);
+
+ /* if the peer did not float, we can bail out now */
+ if (likely(!salen))
+ goto unlock;
+
+ if (unlikely(ovpn_peer_reset_sockaddr(peer,
+ (struct sockaddr_storage *)&ss,
+ local_ip) < 0))
+ goto unlock;
+
+ net_dbg_ratelimited("%s: peer %d floated to %pIScp",
+ netdev_name(peer->ovpn->dev), peer->id, &ss);
+
+ spin_unlock_bh(&peer->lock);
+
+ /* rehashing is required only in MP mode as P2P has one peer
+ * only and thus there is no hashtable
+ */
+ if (peer->ovpn->mode == OVPN_MODE_MP) {
+ spin_lock_bh(&peer->ovpn->lock);
+ spin_lock_bh(&peer->lock);
+ bind = rcu_dereference_protected(peer->bind,
+ lockdep_is_held(&peer->lock));
+ if (unlikely(!bind)) {
+ spin_unlock_bh(&peer->lock);
+ spin_unlock_bh(&peer->ovpn->lock);
+ return;
+ }
+
+ /* This function may be invoked concurrently, therefore another
+ * float may have happened in parallel: perform rehashing
+ * using the peer->bind->remote directly as key
+ */
+
+ switch (bind->remote.in4.sin_family) {
+ case AF_INET:
+ salen = sizeof(*sa);
+ break;
+ case AF_INET6:
+ salen = sizeof(*sa6);
+ break;
+ }
+
+ /* remove old hashing */
+ hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr);
+ /* re-add with new transport address */
+ nhead = ovpn_get_hash_head(peer->ovpn->peers->by_transp_addr,
+ &bind->remote, salen);
+ hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead);
+ spin_unlock_bh(&peer->lock);
+ spin_unlock_bh(&peer->ovpn->lock);
+ }
+ return;
+unlock:
+ spin_unlock_bh(&peer->lock);
+}
+
+/**
+ * ovpn_peer_release_rcu - RCU callback performing last peer release steps
+ * @head: RCU member of the ovpn_peer
+ */
+static void ovpn_peer_release_rcu(struct rcu_head *head)
+{
+ struct ovpn_peer *peer = container_of(head, struct ovpn_peer, rcu);
+
+ /* this call will immediately free the dst_cache, therefore we
+ * perform it in the RCU callback, when all contexts are done
+ */
+ dst_cache_destroy(&peer->dst_cache);
+ kfree(peer);
+}
+
+/**
+ * ovpn_peer_release - release peer private members
+ * @peer: the peer to release
+ */
+void ovpn_peer_release(struct ovpn_peer *peer)
+{
+ ovpn_crypto_state_release(&peer->crypto);
+ spin_lock_bh(&peer->lock);
+ ovpn_bind_reset(peer, NULL);
+ spin_unlock_bh(&peer->lock);
+ call_rcu(&peer->rcu, ovpn_peer_release_rcu);
+ netdev_put(peer->ovpn->dev, &peer->dev_tracker);
+}
+
+/**
+ * ovpn_peer_release_kref - callback for kref_put
+ * @kref: the kref object belonging to the peer
+ */
+void ovpn_peer_release_kref(struct kref *kref)
+{
+ struct ovpn_peer *peer = container_of(kref, struct ovpn_peer, refcount);
+
+ ovpn_peer_release(peer);
+}
+
+/**
+ * ovpn_peer_skb_to_sockaddr - fill sockaddr with skb source address
+ * @skb: the packet to extract data from
+ * @ss: the sockaddr to fill
+ *
+ * Return: sockaddr length on success or -1 otherwise
+ */
+static int ovpn_peer_skb_to_sockaddr(struct sk_buff *skb,
+ struct sockaddr_storage *ss)
+{
+ struct sockaddr_in6 *sa6;
+ struct sockaddr_in *sa4;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ sa4 = (struct sockaddr_in *)ss;
+ sa4->sin_family = AF_INET;
+ sa4->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sa4->sin_port = udp_hdr(skb)->source;
+ return sizeof(*sa4);
+ case htons(ETH_P_IPV6):
+ sa6 = (struct sockaddr_in6 *)ss;
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_addr = ipv6_hdr(skb)->saddr;
+ sa6->sin6_port = udp_hdr(skb)->source;
+ return sizeof(*sa6);
+ }
+
+ return -1;
+}
+
+/**
+ * ovpn_nexthop_from_skb4 - retrieve IPv4 nexthop for outgoing skb
+ * @skb: the outgoing packet
+ *
+ * Return: the IPv4 of the nexthop
+ */
+static __be32 ovpn_nexthop_from_skb4(struct sk_buff *skb)
+{
+ const struct rtable *rt = skb_rtable(skb);
+
+ if (rt && rt->rt_uses_gateway)
+ return rt->rt_gw4;
+
+ return ip_hdr(skb)->daddr;
+}
+
+/**
+ * ovpn_nexthop_from_skb6 - retrieve IPv6 nexthop for outgoing skb
+ * @skb: the outgoing packet
+ *
+ * Return: the IPv6 of the nexthop
+ */
+static struct in6_addr ovpn_nexthop_from_skb6(struct sk_buff *skb)
+{
+ const struct rt6_info *rt = skb_rt6_info(skb);
+
+ if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
+ return ipv6_hdr(skb)->daddr;
+
+ return rt->rt6i_gateway;
+}
+
+/**
+ * ovpn_peer_get_by_vpn_addr4 - retrieve peer by its VPN IPv4 address
+ * @ovpn: the openvpn instance to search
+ * @addr: VPN IPv4 to use as search key
+ *
+ * Refcounter is not increased for the returned peer.
+ *
+ * Return: the peer if found or NULL otherwise
+ */
+static struct ovpn_peer *ovpn_peer_get_by_vpn_addr4(struct ovpn_priv *ovpn,
+ __be32 addr)
+{
+ struct hlist_nulls_head *nhead;
+ struct hlist_nulls_node *ntmp;
+ struct ovpn_peer *tmp;
+ unsigned int slot;
+
+begin:
+ slot = ovpn_get_hash_slot(ovpn->peers->by_vpn_addr4, &addr,
+ sizeof(addr));
+ nhead = &ovpn->peers->by_vpn_addr4[slot];
+
+ hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr4)
+ if (addr == tmp->vpn_addrs.ipv4.s_addr)
+ return tmp;
+
+ /* item may have moved during lookup - check nulls and restart
+ * if that's the case
+ */
+ if (get_nulls_value(ntmp) != slot)
+ goto begin;
+
+ return NULL;
+}
+
+/**
+ * ovpn_peer_get_by_vpn_addr6 - retrieve peer by its VPN IPv6 address
+ * @ovpn: the openvpn instance to search
+ * @addr: VPN IPv6 to use as search key
+ *
+ * Refcounter is not increased for the returned peer.
+ *
+ * Return: the peer if found or NULL otherwise
+ */
+static struct ovpn_peer *ovpn_peer_get_by_vpn_addr6(struct ovpn_priv *ovpn,
+ struct in6_addr *addr)
+{
+ struct hlist_nulls_head *nhead;
+ struct hlist_nulls_node *ntmp;
+ struct ovpn_peer *tmp;
+ unsigned int slot;
+
+begin:
+ slot = ovpn_get_hash_slot(ovpn->peers->by_vpn_addr6, addr,
+ sizeof(*addr));
+ nhead = &ovpn->peers->by_vpn_addr6[slot];
+
+ hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr6)
+ if (ipv6_addr_equal(addr, &tmp->vpn_addrs.ipv6))
+ return tmp;
+
+ /* item may have moved during lookup - check nulls and restart
+ * if that's the case
+ */
+ if (get_nulls_value(ntmp) != slot)
+ goto begin;
+
+ return NULL;
+}
+
+/**
+ * ovpn_peer_transp_match - check if sockaddr and peer binding match
+ * @peer: the peer to get the binding from
+ * @ss: the sockaddr to match
+ *
+ * Return: true if sockaddr and binding match or false otherwise
+ */
+static bool ovpn_peer_transp_match(const struct ovpn_peer *peer,
+ const struct sockaddr_storage *ss)
+{
+ struct ovpn_bind *bind = rcu_dereference(peer->bind);
+ struct sockaddr_in6 *sa6;
+ struct sockaddr_in *sa4;
+
+ if (unlikely(!bind))
+ return false;
+
+ if (ss->ss_family != bind->remote.in4.sin_family)
+ return false;
+
+ switch (ss->ss_family) {
+ case AF_INET:
+ sa4 = (struct sockaddr_in *)ss;
+ if (sa4->sin_addr.s_addr != bind->remote.in4.sin_addr.s_addr)
+ return false;
+ if (sa4->sin_port != bind->remote.in4.sin_port)
+ return false;
+ break;
+ case AF_INET6:
+ sa6 = (struct sockaddr_in6 *)ss;
+ if (!ipv6_addr_equal(&sa6->sin6_addr,
+ &bind->remote.in6.sin6_addr))
+ return false;
+ if (sa6->sin6_port != bind->remote.in6.sin6_port)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ovpn_peer_get_by_transp_addr_p2p - get peer by transport address in a P2P
+ * instance
+ * @ovpn: the openvpn instance to search
+ * @ss: the transport socket address
+ *
+ * Return: the peer if found or NULL otherwise
+ */
+static struct ovpn_peer *
+ovpn_peer_get_by_transp_addr_p2p(struct ovpn_priv *ovpn,
+ struct sockaddr_storage *ss)
+{
+ struct ovpn_peer *tmp, *peer = NULL;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(ovpn->peer);
+ if (likely(tmp && ovpn_peer_transp_match(tmp, ss) &&
+ ovpn_peer_hold(tmp)))
+ peer = tmp;
+ rcu_read_unlock();
+
+ return peer;
+}
+
+/**
+ * ovpn_peer_get_by_transp_addr - retrieve peer by transport address
+ * @ovpn: the openvpn instance to search
+ * @skb: the skb to retrieve the source transport address from
+ *
+ * Return: a pointer to the peer if found or NULL otherwise
+ */
+struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_priv *ovpn,
+ struct sk_buff *skb)
+{
+ struct ovpn_peer *tmp, *peer = NULL;
+ struct sockaddr_storage ss = { 0 };
+ struct hlist_nulls_head *nhead;
+ struct hlist_nulls_node *ntmp;
+ unsigned int slot;
+ ssize_t sa_len;
+
+ sa_len = ovpn_peer_skb_to_sockaddr(skb, &ss);
+ if (unlikely(sa_len < 0))
+ return NULL;
+
+ if (ovpn->mode == OVPN_MODE_P2P)
+ return ovpn_peer_get_by_transp_addr_p2p(ovpn, &ss);
+
+ rcu_read_lock();
+begin:
+ slot = ovpn_get_hash_slot(ovpn->peers->by_transp_addr, &ss, sa_len);
+ nhead = &ovpn->peers->by_transp_addr[slot];
+
+ hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead,
+ hash_entry_transp_addr) {
+ if (!ovpn_peer_transp_match(tmp, &ss))
+ continue;
+
+ if (!ovpn_peer_hold(tmp))
+ continue;
+
+ peer = tmp;
+ break;
+ }
+
+ /* item may have moved during lookup - check nulls and restart
+ * if that's the case
+ */
+ if (!peer && get_nulls_value(ntmp) != slot)
+ goto begin;
+ rcu_read_unlock();
+
+ return peer;
+}
+
+/**
+ * ovpn_peer_get_by_id_p2p - get peer by ID in a P2P instance
+ * @ovpn: the openvpn instance to search
+ * @peer_id: the ID of the peer to find
+ *
+ * Return: the peer if found or NULL otherwise
+ */
+static struct ovpn_peer *ovpn_peer_get_by_id_p2p(struct ovpn_priv *ovpn,
+ u32 peer_id)
+{
+ struct ovpn_peer *tmp, *peer = NULL;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(ovpn->peer);
+ if (likely(tmp && tmp->id == peer_id && ovpn_peer_hold(tmp)))
+ peer = tmp;
+ rcu_read_unlock();
+
+ return peer;
+}
+
+/**
+ * ovpn_peer_get_by_id - retrieve peer by ID
+ * @ovpn: the openvpn instance to search
+ * @peer_id: the unique peer identifier to match
+ *
+ * Return: a pointer to the peer if found or NULL otherwise
+ */
+struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_priv *ovpn, u32 peer_id)
+{
+ struct ovpn_peer *tmp, *peer = NULL;
+ struct hlist_head *head;
+
+ if (ovpn->mode == OVPN_MODE_P2P)
+ return ovpn_peer_get_by_id_p2p(ovpn, peer_id);
+
+ head = ovpn_get_hash_head(ovpn->peers->by_id, &peer_id,
+ sizeof(peer_id));
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp, head, hash_entry_id) {
+ if (tmp->id != peer_id)
+ continue;
+
+ if (!ovpn_peer_hold(tmp))
+ continue;
+
+ peer = tmp;
+ break;
+ }
+ rcu_read_unlock();
+
+ return peer;
+}
+
+static void ovpn_peer_remove(struct ovpn_peer *peer,
+ enum ovpn_del_peer_reason reason,
+ struct llist_head *release_list)
+{
+ lockdep_assert_held(&peer->ovpn->lock);
+
+ switch (peer->ovpn->mode) {
+ case OVPN_MODE_MP:
+ /* prevent double remove */
+ if (hlist_unhashed(&peer->hash_entry_id))
+ return;
+
+ hlist_del_init_rcu(&peer->hash_entry_id);
+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr4);
+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr6);
+ hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr);
+ break;
+ case OVPN_MODE_P2P:
+ /* prevent double remove */
+ if (peer != rcu_access_pointer(peer->ovpn->peer))
+ return;
+
+ RCU_INIT_POINTER(peer->ovpn->peer, NULL);
+ /* in P2P mode the carrier is switched off when the peer is
+ * deleted so that third party protocols can react accordingly
+ */
+ netif_carrier_off(peer->ovpn->dev);
+ break;
+ }
+
+ peer->delete_reason = reason;
+ ovpn_nl_peer_del_notify(peer);
+
+ /* append to provided list for later socket release and ref drop */
+ llist_add(&peer->release_entry, release_list);
+}
+
+/**
+ * ovpn_peer_get_by_dst - Lookup peer to send skb to
+ * @ovpn: the private data representing the current VPN session
+ * @skb: the skb to extract the destination address from
+ *
+ * This function takes a tunnel packet and looks up the peer to send it to
+ * after encapsulation. The skb is expected to be the in-tunnel packet, without
+ * any OpenVPN related header.
+ *
+ * Assume that the IP header is accessible in the skb data.
+ *
+ * Return: the peer if found or NULL otherwise.
+ */
+struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn,
+ struct sk_buff *skb)
+{
+ struct ovpn_peer *peer = NULL;
+ struct in6_addr addr6;
+ __be32 addr4;
+
+ /* in P2P mode, no matter the destination, packets are always sent to
+ * the single peer listening on the other side
+ */
+ if (ovpn->mode == OVPN_MODE_P2P) {
+ rcu_read_lock();
+ peer = rcu_dereference(ovpn->peer);
+ if (unlikely(peer && !ovpn_peer_hold(peer)))
+ peer = NULL;
+ rcu_read_unlock();
+ return peer;
+ }
+
+ rcu_read_lock();
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ addr4 = ovpn_nexthop_from_skb4(skb);
+ peer = ovpn_peer_get_by_vpn_addr4(ovpn, addr4);
+ break;
+ case htons(ETH_P_IPV6):
+ addr6 = ovpn_nexthop_from_skb6(skb);
+ peer = ovpn_peer_get_by_vpn_addr6(ovpn, &addr6);
+ break;
+ }
+
+ if (unlikely(peer && !ovpn_peer_hold(peer)))
+ peer = NULL;
+ rcu_read_unlock();
+
+ return peer;
+}
+
+/**
+ * ovpn_nexthop_from_rt4 - look up the IPv4 nexthop for the given destination
+ * @ovpn: the private data representing the current VPN session
+ * @dest: the destination to be looked up
+ *
+ * Looks up in the IPv4 system routing table the IP of the nexthop to be used
+ * to reach the destination passed as argument. If no nexthop can be found, the
+ * destination itself is returned as it probably has to be used as nexthop.
+ *
+ * Return: the IP of the next hop if found or dest itself otherwise
+ */
+static __be32 ovpn_nexthop_from_rt4(struct ovpn_priv *ovpn, __be32 dest)
+{
+ struct rtable *rt;
+ struct flowi4 fl = {
+ .daddr = dest
+ };
+
+ rt = ip_route_output_flow(dev_net(ovpn->dev), &fl, NULL);
+ if (IS_ERR(rt)) {
+ net_dbg_ratelimited("%s: no route to host %pI4\n",
+ netdev_name(ovpn->dev), &dest);
+ /* if we end up here this packet is probably going to be
+ * thrown away later
+ */
+ return dest;
+ }
+
+ if (!rt->rt_uses_gateway)
+ goto out;
+
+ dest = rt->rt_gw4;
+out:
+ ip_rt_put(rt);
+ return dest;
+}
+
+/**
+ * ovpn_nexthop_from_rt6 - look up the IPv6 nexthop for the given destination
+ * @ovpn: the private data representing the current VPN session
+ * @dest: the destination to be looked up
+ *
+ * Looks up in the IPv6 system routing table the IP of the nexthop to be used
+ * to reach the destination passed as argument. If no nexthop can be found, the
+ * destination itself is returned as it probably has to be used as nexthop.
+ *
+ * Return: the IP of the next hop if found or dest itself otherwise
+ */
+static struct in6_addr ovpn_nexthop_from_rt6(struct ovpn_priv *ovpn,
+ struct in6_addr dest)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct dst_entry *entry;
+ struct rt6_info *rt;
+ struct flowi6 fl = {
+ .daddr = dest,
+ };
+
+ entry = ipv6_stub->ipv6_dst_lookup_flow(dev_net(ovpn->dev), NULL, &fl,
+ NULL);
+ if (IS_ERR(entry)) {
+ net_dbg_ratelimited("%s: no route to host %pI6c\n",
+ netdev_name(ovpn->dev), &dest);
+ /* if we end up here this packet is probably going to be
+ * thrown away later
+ */
+ return dest;
+ }
+
+ rt = dst_rt6_info(entry);
+
+ if (!(rt->rt6i_flags & RTF_GATEWAY))
+ goto out;
+
+ dest = rt->rt6i_gateway;
+out:
+ dst_release((struct dst_entry *)rt);
+#endif
+ return dest;
+}
+
+/**
+ * ovpn_peer_check_by_src - check that skb source is routed via peer
+ * @ovpn: the openvpn instance to search
+ * @skb: the packet to extract source address from
+ * @peer: the peer to check against the source address
+ *
+ * Return: true if the peer is matching or false otherwise
+ */
+bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb,
+ struct ovpn_peer *peer)
+{
+ bool match = false;
+ struct in6_addr addr6;
+ __be32 addr4;
+
+ if (ovpn->mode == OVPN_MODE_P2P) {
+ /* in P2P mode, no matter the destination, packets are always
+ * sent to the single peer listening on the other side
+ */
+ return peer == rcu_access_pointer(ovpn->peer);
+ }
+
+ /* This function performs a reverse path check, therefore we now
+ * lookup the nexthop we would use if we wanted to route a packet
+ * to the source IP. If the nexthop matches the sender we know the
+ * latter is valid and we allow the packet to come in
+ */
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ addr4 = ovpn_nexthop_from_rt4(ovpn, ip_hdr(skb)->saddr);
+ rcu_read_lock();
+ match = (peer == ovpn_peer_get_by_vpn_addr4(ovpn, addr4));
+ rcu_read_unlock();
+ break;
+ case htons(ETH_P_IPV6):
+ addr6 = ovpn_nexthop_from_rt6(ovpn, ipv6_hdr(skb)->saddr);
+ rcu_read_lock();
+ match = (peer == ovpn_peer_get_by_vpn_addr6(ovpn, &addr6));
+ rcu_read_unlock();
+ break;
+ }
+
+ return match;
+}
+
+void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer)
+{
+ struct hlist_nulls_head *nhead;
+
+ lockdep_assert_held(&peer->ovpn->lock);
+
+ /* rehashing makes sense only in multipeer mode */
+ if (peer->ovpn->mode != OVPN_MODE_MP)
+ return;
+
+ if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) {
+ /* remove potential old hashing */
+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr4);
+
+ nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr4,
+ &peer->vpn_addrs.ipv4,
+ sizeof(peer->vpn_addrs.ipv4));
+ hlist_nulls_add_head_rcu(&peer->hash_entry_addr4, nhead);
+ }
+
+ if (!ipv6_addr_any(&peer->vpn_addrs.ipv6)) {
+ /* remove potential old hashing */
+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr6);
+
+ nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr6,
+ &peer->vpn_addrs.ipv6,
+ sizeof(peer->vpn_addrs.ipv6));
+ hlist_nulls_add_head_rcu(&peer->hash_entry_addr6, nhead);
+ }
+}
+
+/**
+ * ovpn_peer_add_mp - add peer to related tables in a MP instance
+ * @ovpn: the instance to add the peer to
+ * @peer: the peer to add
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_add_mp(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
+{
+ struct sockaddr_storage sa = { 0 };
+ struct hlist_nulls_head *nhead;
+ struct sockaddr_in6 *sa6;
+ struct sockaddr_in *sa4;
+ struct ovpn_bind *bind;
+ struct ovpn_peer *tmp;
+ size_t salen;
+ int ret = 0;
+
+ spin_lock_bh(&ovpn->lock);
+ /* do not add duplicates */
+ tmp = ovpn_peer_get_by_id(ovpn, peer->id);
+ if (tmp) {
+ ovpn_peer_put(tmp);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ bind = rcu_dereference_protected(peer->bind, true);
+ /* peers connected via TCP have bind == NULL */
+ if (bind) {
+ switch (bind->remote.in4.sin_family) {
+ case AF_INET:
+ sa4 = (struct sockaddr_in *)&sa;
+
+ sa4->sin_family = AF_INET;
+ sa4->sin_addr.s_addr = bind->remote.in4.sin_addr.s_addr;
+ sa4->sin_port = bind->remote.in4.sin_port;
+ salen = sizeof(*sa4);
+ break;
+ case AF_INET6:
+ sa6 = (struct sockaddr_in6 *)&sa;
+
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_addr = bind->remote.in6.sin6_addr;
+ sa6->sin6_port = bind->remote.in6.sin6_port;
+ salen = sizeof(*sa6);
+ break;
+ default:
+ ret = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ nhead = ovpn_get_hash_head(ovpn->peers->by_transp_addr, &sa,
+ salen);
+ hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead);
+ }
+
+ hlist_add_head_rcu(&peer->hash_entry_id,
+ ovpn_get_hash_head(ovpn->peers->by_id, &peer->id,
+ sizeof(peer->id)));
+
+ ovpn_peer_hash_vpn_ip(peer);
+out:
+ spin_unlock_bh(&ovpn->lock);
+ return ret;
+}
+
+/**
+ * ovpn_peer_add_p2p - add peer to related tables in a P2P instance
+ * @ovpn: the instance to add the peer to
+ * @peer: the peer to add
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_add_p2p(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
+{
+ LLIST_HEAD(release_list);
+ struct ovpn_peer *tmp;
+
+ spin_lock_bh(&ovpn->lock);
+ /* in p2p mode it is possible to have a single peer only, therefore the
+ * old one is released and substituted by the new one
+ */
+ tmp = rcu_dereference_protected(ovpn->peer,
+ lockdep_is_held(&ovpn->lock));
+ if (tmp)
+ ovpn_peer_remove(tmp, OVPN_DEL_PEER_REASON_TEARDOWN,
+ &release_list);
+
+ rcu_assign_pointer(ovpn->peer, peer);
+ /* in P2P mode the carrier is switched on when the peer is added */
+ netif_carrier_on(ovpn->dev);
+ unlock_ovpn(ovpn, &release_list);
+
+ return 0;
+}
+
+/**
+ * ovpn_peer_add - add peer to the related tables
+ * @ovpn: the openvpn instance the peer belongs to
+ * @peer: the peer object to add
+ *
+ * Assume refcounter was increased by caller
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_peer_add(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
+{
+ switch (ovpn->mode) {
+ case OVPN_MODE_MP:
+ return ovpn_peer_add_mp(ovpn, peer);
+ case OVPN_MODE_P2P:
+ return ovpn_peer_add_p2p(ovpn, peer);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ovpn_peer_del_mp - delete peer from related tables in a MP instance
+ * @peer: the peer to delete
+ * @reason: reason why the peer was deleted (sent to userspace)
+ * @release_list: list where delete peer should be appended
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_del_mp(struct ovpn_peer *peer,
+ enum ovpn_del_peer_reason reason,
+ struct llist_head *release_list)
+{
+ struct ovpn_peer *tmp;
+ int ret = -ENOENT;
+
+ lockdep_assert_held(&peer->ovpn->lock);
+
+ tmp = ovpn_peer_get_by_id(peer->ovpn, peer->id);
+ if (tmp == peer) {
+ ovpn_peer_remove(peer, reason, release_list);
+ ret = 0;
+ }
+
+ if (tmp)
+ ovpn_peer_put(tmp);
+
+ return ret;
+}
+
+/**
+ * ovpn_peer_del_p2p - delete peer from related tables in a P2P instance
+ * @peer: the peer to delete
+ * @reason: reason why the peer was deleted (sent to userspace)
+ * @release_list: list where delete peer should be appended
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_del_p2p(struct ovpn_peer *peer,
+ enum ovpn_del_peer_reason reason,
+ struct llist_head *release_list)
+{
+ struct ovpn_peer *tmp;
+
+ lockdep_assert_held(&peer->ovpn->lock);
+
+ tmp = rcu_dereference_protected(peer->ovpn->peer,
+ lockdep_is_held(&peer->ovpn->lock));
+ if (tmp != peer)
+ return -ENOENT;
+
+ ovpn_peer_remove(peer, reason, release_list);
+
+ return 0;
+}
+
+/**
+ * ovpn_peer_del - delete peer from related tables
+ * @peer: the peer object to delete
+ * @reason: reason for deleting peer (will be sent to userspace)
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason)
+{
+ LLIST_HEAD(release_list);
+ int ret = -EOPNOTSUPP;
+
+ spin_lock_bh(&peer->ovpn->lock);
+ switch (peer->ovpn->mode) {
+ case OVPN_MODE_MP:
+ ret = ovpn_peer_del_mp(peer, reason, &release_list);
+ break;
+ case OVPN_MODE_P2P:
+ ret = ovpn_peer_del_p2p(peer, reason, &release_list);
+ break;
+ default:
+ break;
+ }
+ unlock_ovpn(peer->ovpn, &release_list);
+
+ return ret;
+}
+
+/**
+ * ovpn_peer_release_p2p - release peer upon P2P device teardown
+ * @ovpn: the instance being torn down
+ * @sk: if not NULL, release peer only if it's using this specific socket
+ * @reason: the reason for releasing the peer
+ */
+static void ovpn_peer_release_p2p(struct ovpn_priv *ovpn, struct sock *sk,
+ enum ovpn_del_peer_reason reason)
+{
+ struct ovpn_socket *ovpn_sock;
+ LLIST_HEAD(release_list);
+ struct ovpn_peer *peer;
+
+ spin_lock_bh(&ovpn->lock);
+ peer = rcu_dereference_protected(ovpn->peer,
+ lockdep_is_held(&ovpn->lock));
+ if (!peer) {
+ spin_unlock_bh(&ovpn->lock);
+ return;
+ }
+
+ if (sk) {
+ ovpn_sock = rcu_access_pointer(peer->sock);
+ if (!ovpn_sock || ovpn_sock->sk != sk) {
+ spin_unlock_bh(&ovpn->lock);
+ ovpn_peer_put(peer);
+ return;
+ }
+ }
+
+ ovpn_peer_remove(peer, reason, &release_list);
+ unlock_ovpn(ovpn, &release_list);
+}
+
+static void ovpn_peers_release_mp(struct ovpn_priv *ovpn, struct sock *sk,
+ enum ovpn_del_peer_reason reason)
+{
+ struct ovpn_socket *ovpn_sock;
+ LLIST_HEAD(release_list);
+ struct ovpn_peer *peer;
+ struct hlist_node *tmp;
+ int bkt;
+
+ spin_lock_bh(&ovpn->lock);
+ hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) {
+ bool remove = true;
+
+ /* if a socket was passed as argument, skip all peers except
+ * those using it
+ */
+ if (sk) {
+ rcu_read_lock();
+ ovpn_sock = rcu_dereference(peer->sock);
+ remove = ovpn_sock && ovpn_sock->sk == sk;
+ rcu_read_unlock();
+ }
+
+ if (remove)
+ ovpn_peer_remove(peer, reason, &release_list);
+ }
+ unlock_ovpn(ovpn, &release_list);
+}
+
+/**
+ * ovpn_peers_free - free all peers in the instance
+ * @ovpn: the instance whose peers should be released
+ * @sk: if not NULL, only peers using this socket are removed and the socket
+ * is released immediately
+ * @reason: the reason for releasing all peers
+ */
+void ovpn_peers_free(struct ovpn_priv *ovpn, struct sock *sk,
+ enum ovpn_del_peer_reason reason)
+{
+ switch (ovpn->mode) {
+ case OVPN_MODE_P2P:
+ ovpn_peer_release_p2p(ovpn, sk, reason);
+ break;
+ case OVPN_MODE_MP:
+ ovpn_peers_release_mp(ovpn, sk, reason);
+ break;
+ }
+}
+
+static time64_t ovpn_peer_keepalive_work_single(struct ovpn_peer *peer,
+ time64_t now,
+ struct llist_head *release_list)
+{
+ time64_t last_recv, last_sent, next_run1, next_run2;
+ unsigned long timeout, interval;
+ bool expired;
+
+ spin_lock_bh(&peer->lock);
+ /* we expect both timers to be configured at the same time,
+ * therefore bail out if either is not set
+ */
+ if (!peer->keepalive_timeout || !peer->keepalive_interval) {
+ spin_unlock_bh(&peer->lock);
+ return 0;
+ }
+
+ /* check for peer timeout */
+ expired = false;
+ timeout = peer->keepalive_timeout;
+ last_recv = READ_ONCE(peer->last_recv);
+ if (now < last_recv + timeout) {
+ peer->keepalive_recv_exp = last_recv + timeout;
+ next_run1 = peer->keepalive_recv_exp;
+ } else if (peer->keepalive_recv_exp > now) {
+ next_run1 = peer->keepalive_recv_exp;
+ } else {
+ expired = true;
+ }
+
+ if (expired) {
+ /* peer is dead -> kill it and move on */
+ spin_unlock_bh(&peer->lock);
+ netdev_dbg(peer->ovpn->dev, "peer %u expired\n",
+ peer->id);
+ ovpn_peer_remove(peer, OVPN_DEL_PEER_REASON_EXPIRED,
+ release_list);
+ return 0;
+ }
+
+ /* check for peer keepalive */
+ expired = false;
+ interval = peer->keepalive_interval;
+ last_sent = READ_ONCE(peer->last_sent);
+ if (now < last_sent + interval) {
+ peer->keepalive_xmit_exp = last_sent + interval;
+ next_run2 = peer->keepalive_xmit_exp;
+ } else if (peer->keepalive_xmit_exp > now) {
+ next_run2 = peer->keepalive_xmit_exp;
+ } else {
+ expired = true;
+ next_run2 = now + interval;
+ }
+ spin_unlock_bh(&peer->lock);
+
+ if (expired) {
+ /* a keepalive packet is required */
+ netdev_dbg(peer->ovpn->dev,
+ "sending keepalive to peer %u\n",
+ peer->id);
+ if (schedule_work(&peer->keepalive_work))
+ ovpn_peer_hold(peer);
+ }
+
+ if (next_run1 < next_run2)
+ return next_run1;
+
+ return next_run2;
+}
+
+static time64_t ovpn_peer_keepalive_work_mp(struct ovpn_priv *ovpn,
+ time64_t now,
+ struct llist_head *release_list)
+{
+ time64_t tmp_next_run, next_run = 0;
+ struct hlist_node *tmp;
+ struct ovpn_peer *peer;
+ int bkt;
+
+ lockdep_assert_held(&ovpn->lock);
+
+ hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) {
+ tmp_next_run = ovpn_peer_keepalive_work_single(peer, now,
+ release_list);
+ if (!tmp_next_run)
+ continue;
+
+ /* the next worker run will be scheduled based on the shortest
+ * required interval across all peers
+ */
+ if (!next_run || tmp_next_run < next_run)
+ next_run = tmp_next_run;
+ }
+
+ return next_run;
+}
+
+static time64_t ovpn_peer_keepalive_work_p2p(struct ovpn_priv *ovpn,
+ time64_t now,
+ struct llist_head *release_list)
+{
+ struct ovpn_peer *peer;
+ time64_t next_run = 0;
+
+ lockdep_assert_held(&ovpn->lock);
+
+ peer = rcu_dereference_protected(ovpn->peer,
+ lockdep_is_held(&ovpn->lock));
+ if (peer)
+ next_run = ovpn_peer_keepalive_work_single(peer, now,
+ release_list);
+
+ return next_run;
+}
+
+/**
+ * ovpn_peer_keepalive_work - run keepalive logic on each known peer
+ * @work: pointer to the work member of the related ovpn object
+ *
+ * Each peer has two timers (if configured):
+ * 1. peer timeout: when no data is received for a certain interval,
+ * the peer is considered dead and it gets killed.
+ * 2. peer keepalive: when no data is sent to a certain peer for a
+ * certain interval, a special 'keepalive' packet is explicitly sent.
+ *
+ * This function iterates across the whole peer collection while
+ * checking the timers described above.
+ */
+void ovpn_peer_keepalive_work(struct work_struct *work)
+{
+ struct ovpn_priv *ovpn = container_of(work, struct ovpn_priv,
+ keepalive_work.work);
+ time64_t next_run = 0, now = ktime_get_real_seconds();
+ LLIST_HEAD(release_list);
+
+ spin_lock_bh(&ovpn->lock);
+ switch (ovpn->mode) {
+ case OVPN_MODE_MP:
+ next_run = ovpn_peer_keepalive_work_mp(ovpn, now,
+ &release_list);
+ break;
+ case OVPN_MODE_P2P:
+ next_run = ovpn_peer_keepalive_work_p2p(ovpn, now,
+ &release_list);
+ break;
+ }
+
+ /* prevent rearming if the interface is being destroyed */
+ if (next_run > 0) {
+ netdev_dbg(ovpn->dev,
+ "scheduling keepalive work: now=%llu next_run=%llu delta=%llu\n",
+ next_run, now, next_run - now);
+ schedule_delayed_work(&ovpn->keepalive_work,
+ (next_run - now) * HZ);
+ }
+ unlock_ovpn(ovpn, &release_list);
+}
diff --git a/drivers/net/ovpn/peer.h b/drivers/net/ovpn/peer.h
new file mode 100644
index 000000000000..a1423f2b09e0
--- /dev/null
+++ b/drivers/net/ovpn/peer.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNPEER_H_
+#define _NET_OVPN_OVPNPEER_H_
+
+#include <net/dst_cache.h>
+#include <net/strparser.h>
+
+#include "crypto.h"
+#include "socket.h"
+#include "stats.h"
+
+/**
+ * struct ovpn_peer - the main remote peer object
+ * @ovpn: main openvpn instance this peer belongs to
+ * @dev_tracker: reference tracker for associated dev
+ * @id: unique identifier
+ * @vpn_addrs: IP addresses assigned over the tunnel
+ * @vpn_addrs.ipv4: IPv4 assigned to peer on the tunnel
+ * @vpn_addrs.ipv6: IPv6 assigned to peer on the tunnel
+ * @hash_entry_id: entry in the peer ID hashtable
+ * @hash_entry_addr4: entry in the peer IPv4 hashtable
+ * @hash_entry_addr6: entry in the peer IPv6 hashtable
+ * @hash_entry_transp_addr: entry in the peer transport address hashtable
+ * @sock: the socket being used to talk to this peer
+ * @tcp: keeps track of TCP specific state
+ * @tcp.strp: stream parser context (TCP only)
+ * @tcp.user_queue: received packets that have to go to userspace (TCP only)
+ * @tcp.out_queue: packets on hold while socket is taken by user (TCP only)
+ * @tcp.tx_in_progress: true if TX is already ongoing (TCP only)
+ * @tcp.out_msg.skb: packet scheduled for sending (TCP only)
+ * @tcp.out_msg.offset: offset where next send should start (TCP only)
+ * @tcp.out_msg.len: remaining data to send within packet (TCP only)
+ * @tcp.sk_cb.sk_data_ready: pointer to original cb (TCP only)
+ * @tcp.sk_cb.sk_write_space: pointer to original cb (TCP only)
+ * @tcp.sk_cb.prot: pointer to original prot object (TCP only)
+ * @tcp.sk_cb.ops: pointer to the original prot_ops object (TCP only)
+ * @crypto: the crypto configuration (ciphers, keys, etc..)
+ * @dst_cache: cache for dst_entry used to send to peer
+ * @bind: remote peer binding
+ * @keepalive_interval: seconds after which a new keepalive should be sent
+ * @keepalive_xmit_exp: future timestamp when next keepalive should be sent
+ * @last_sent: timestamp of the last successfully sent packet
+ * @keepalive_timeout: seconds after which an inactive peer is considered dead
+ * @keepalive_recv_exp: future timestamp when the peer should expire
+ * @last_recv: timestamp of the last authenticated received packet
+ * @vpn_stats: per-peer in-VPN TX/RX stats
+ * @link_stats: per-peer link/transport TX/RX stats
+ * @delete_reason: why peer was deleted (i.e. timeout, transport error, ..)
+ * @lock: protects binding to peer (bind) and keepalive* fields
+ * @refcount: reference counter
+ * @rcu: used to free peer in an RCU safe way
+ * @release_entry: entry for the socket release list
+ * @keepalive_work: used to schedule keepalive sending
+ */
+struct ovpn_peer {
+ struct ovpn_priv *ovpn;
+ netdevice_tracker dev_tracker;
+ u32 id;
+ struct {
+ struct in_addr ipv4;
+ struct in6_addr ipv6;
+ } vpn_addrs;
+ struct hlist_node hash_entry_id;
+ struct hlist_nulls_node hash_entry_addr4;
+ struct hlist_nulls_node hash_entry_addr6;
+ struct hlist_nulls_node hash_entry_transp_addr;
+ struct ovpn_socket __rcu *sock;
+
+ struct {
+ struct strparser strp;
+ struct sk_buff_head user_queue;
+ struct sk_buff_head out_queue;
+ bool tx_in_progress;
+
+ struct {
+ struct sk_buff *skb;
+ int offset;
+ int len;
+ } out_msg;
+
+ struct {
+ void (*sk_data_ready)(struct sock *sk);
+ void (*sk_write_space)(struct sock *sk);
+ struct proto *prot;
+ const struct proto_ops *ops;
+ } sk_cb;
+
+ struct work_struct defer_del_work;
+ } tcp;
+ struct ovpn_crypto_state crypto;
+ struct dst_cache dst_cache;
+ struct ovpn_bind __rcu *bind;
+ unsigned long keepalive_interval;
+ unsigned long keepalive_xmit_exp;
+ time64_t last_sent;
+ unsigned long keepalive_timeout;
+ unsigned long keepalive_recv_exp;
+ time64_t last_recv;
+ struct ovpn_peer_stats vpn_stats;
+ struct ovpn_peer_stats link_stats;
+ enum ovpn_del_peer_reason delete_reason;
+ spinlock_t lock; /* protects bind and keepalive* */
+ struct kref refcount;
+ struct rcu_head rcu;
+ struct llist_node release_entry;
+ struct work_struct keepalive_work;
+};
+
+/**
+ * ovpn_peer_hold - increase reference counter
+ * @peer: the peer whose counter should be increased
+ *
+ * Return: true if the counter was increased or false if it was zero already
+ */
+static inline bool ovpn_peer_hold(struct ovpn_peer *peer)
+{
+ return kref_get_unless_zero(&peer->refcount);
+}
+
+void ovpn_peer_release(struct ovpn_peer *peer);
+void ovpn_peer_release_kref(struct kref *kref);
+
+/**
+ * ovpn_peer_put - decrease reference counter
+ * @peer: the peer whose counter should be decreased
+ */
+static inline void ovpn_peer_put(struct ovpn_peer *peer)
+{
+ kref_put(&peer->refcount, ovpn_peer_release_kref);
+}
+
+struct ovpn_peer *ovpn_peer_new(struct ovpn_priv *ovpn, u32 id);
+int ovpn_peer_add(struct ovpn_priv *ovpn, struct ovpn_peer *peer);
+int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason);
+void ovpn_peers_free(struct ovpn_priv *ovpn, struct sock *sock,
+ enum ovpn_del_peer_reason reason);
+
+struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_priv *ovpn,
+ struct sk_buff *skb);
+struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_priv *ovpn, u32 peer_id);
+struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn,
+ struct sk_buff *skb);
+void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer);
+bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb,
+ struct ovpn_peer *peer);
+
+void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout);
+void ovpn_peer_keepalive_work(struct work_struct *work);
+
+void ovpn_peer_endpoints_update(struct ovpn_peer *peer, struct sk_buff *skb);
+int ovpn_peer_reset_sockaddr(struct ovpn_peer *peer,
+ const struct sockaddr_storage *ss,
+ const void *local_ip);
+
+#endif /* _NET_OVPN_OVPNPEER_H_ */
diff --git a/drivers/net/ovpn/pktid.c b/drivers/net/ovpn/pktid.c
new file mode 100644
index 000000000000..2f29049897e3
--- /dev/null
+++ b/drivers/net/ovpn/pktid.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#include <linux/atomic.h>
+#include <linux/jiffies.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "pktid.h"
+
+void ovpn_pktid_xmit_init(struct ovpn_pktid_xmit *pid)
+{
+ atomic_set(&pid->seq_num, 1);
+}
+
+void ovpn_pktid_recv_init(struct ovpn_pktid_recv *pr)
+{
+ memset(pr, 0, sizeof(*pr));
+ spin_lock_init(&pr->lock);
+}
+
+/* Packet replay detection.
+ * Allows ID backtrack of up to REPLAY_WINDOW_SIZE - 1.
+ */
+int ovpn_pktid_recv(struct ovpn_pktid_recv *pr, u32 pkt_id, u32 pkt_time)
+{
+ const unsigned long now = jiffies;
+ int ret;
+
+ /* ID must not be zero */
+ if (unlikely(pkt_id == 0))
+ return -EINVAL;
+
+ spin_lock_bh(&pr->lock);
+
+ /* expire backtracks at or below pr->id after PKTID_RECV_EXPIRE time */
+ if (unlikely(time_after_eq(now, pr->expire)))
+ pr->id_floor = pr->id;
+
+ /* time changed? */
+ if (unlikely(pkt_time != pr->time)) {
+ if (pkt_time > pr->time) {
+ /* time moved forward, accept */
+ pr->base = 0;
+ pr->extent = 0;
+ pr->id = 0;
+ pr->time = pkt_time;
+ pr->id_floor = 0;
+ } else {
+ /* time moved backward, reject */
+ ret = -ETIME;
+ goto out;
+ }
+ }
+
+ if (likely(pkt_id == pr->id + 1)) {
+ /* well-formed ID sequence (incremented by 1) */
+ pr->base = REPLAY_INDEX(pr->base, -1);
+ pr->history[pr->base / 8] |= (1 << (pr->base % 8));
+ if (pr->extent < REPLAY_WINDOW_SIZE)
+ ++pr->extent;
+ pr->id = pkt_id;
+ } else if (pkt_id > pr->id) {
+ /* ID jumped forward by more than one */
+ const unsigned int delta = pkt_id - pr->id;
+
+ if (delta < REPLAY_WINDOW_SIZE) {
+ unsigned int i;
+
+ pr->base = REPLAY_INDEX(pr->base, -delta);
+ pr->history[pr->base / 8] |= (1 << (pr->base % 8));
+ pr->extent += delta;
+ if (pr->extent > REPLAY_WINDOW_SIZE)
+ pr->extent = REPLAY_WINDOW_SIZE;
+ for (i = 1; i < delta; ++i) {
+ unsigned int newb = REPLAY_INDEX(pr->base, i);
+
+ pr->history[newb / 8] &= ~BIT(newb % 8);
+ }
+ } else {
+ pr->base = 0;
+ pr->extent = REPLAY_WINDOW_SIZE;
+ memset(pr->history, 0, sizeof(pr->history));
+ pr->history[0] = 1;
+ }
+ pr->id = pkt_id;
+ } else {
+ /* ID backtrack */
+ const unsigned int delta = pr->id - pkt_id;
+
+ if (delta > pr->max_backtrack)
+ pr->max_backtrack = delta;
+ if (delta < pr->extent) {
+ if (pkt_id > pr->id_floor) {
+ const unsigned int ri = REPLAY_INDEX(pr->base,
+ delta);
+ u8 *p = &pr->history[ri / 8];
+ const u8 mask = (1 << (ri % 8));
+
+ if (*p & mask) {
+ ret = -EINVAL;
+ goto out;
+ }
+ *p |= mask;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ pr->expire = now + PKTID_RECV_EXPIRE;
+ ret = 0;
+out:
+ spin_unlock_bh(&pr->lock);
+ return ret;
+}
diff --git a/drivers/net/ovpn/pktid.h b/drivers/net/ovpn/pktid.h
new file mode 100644
index 000000000000..0262d026d15e
--- /dev/null
+++ b/drivers/net/ovpn/pktid.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNPKTID_H_
+#define _NET_OVPN_OVPNPKTID_H_
+
+#include "proto.h"
+
+/* If no packets received for this length of time, set a backtrack floor
+ * at highest received packet ID thus far.
+ */
+#define PKTID_RECV_EXPIRE (30 * HZ)
+
+/* Packet-ID state for transmitter */
+struct ovpn_pktid_xmit {
+ atomic_t seq_num;
+};
+
+/* replay window sizing in bytes = 2^REPLAY_WINDOW_ORDER */
+#define REPLAY_WINDOW_ORDER 8
+
+#define REPLAY_WINDOW_BYTES BIT(REPLAY_WINDOW_ORDER)
+#define REPLAY_WINDOW_SIZE (REPLAY_WINDOW_BYTES * 8)
+#define REPLAY_INDEX(base, i) (((base) + (i)) & (REPLAY_WINDOW_SIZE - 1))
+
+/* Packet-ID state for receiver.
+ * Other than lock member, can be zeroed to initialize.
+ */
+struct ovpn_pktid_recv {
+ /* "sliding window" bitmask of recent packet IDs received */
+ u8 history[REPLAY_WINDOW_BYTES];
+ /* bit position of deque base in history */
+ unsigned int base;
+ /* extent (in bits) of deque in history */
+ unsigned int extent;
+ /* expiration of history in jiffies */
+ unsigned long expire;
+ /* highest sequence number received */
+ u32 id;
+ /* highest time stamp received */
+ u32 time;
+ /* we will only accept backtrack IDs > id_floor */
+ u32 id_floor;
+ unsigned int max_backtrack;
+ /* protects entire pktd ID state */
+ spinlock_t lock;
+};
+
+/* Get the next packet ID for xmit */
+static inline int ovpn_pktid_xmit_next(struct ovpn_pktid_xmit *pid, u32 *pktid)
+{
+ const u32 seq_num = atomic_fetch_add_unless(&pid->seq_num, 1, 0);
+ /* when the 32bit space is over, we return an error because the packet
+ * ID is used to create the cipher IV and we do not want to reuse the
+ * same value more than once
+ */
+ if (unlikely(!seq_num))
+ return -ERANGE;
+
+ *pktid = seq_num;
+
+ return 0;
+}
+
+/* Write 12-byte AEAD IV to dest */
+static inline void ovpn_pktid_aead_write(const u32 pktid,
+ const u8 nt[],
+ unsigned char *dest)
+{
+ *(__force __be32 *)(dest) = htonl(pktid);
+ BUILD_BUG_ON(4 + OVPN_NONCE_TAIL_SIZE != OVPN_NONCE_SIZE);
+ memcpy(dest + 4, nt, OVPN_NONCE_TAIL_SIZE);
+}
+
+void ovpn_pktid_xmit_init(struct ovpn_pktid_xmit *pid);
+void ovpn_pktid_recv_init(struct ovpn_pktid_recv *pr);
+
+int ovpn_pktid_recv(struct ovpn_pktid_recv *pr, u32 pkt_id, u32 pkt_time);
+
+#endif /* _NET_OVPN_OVPNPKTID_H_ */
diff --git a/drivers/net/ovpn/proto.h b/drivers/net/ovpn/proto.h
new file mode 100644
index 000000000000..b7d285b4d9c1
--- /dev/null
+++ b/drivers/net/ovpn/proto.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_PROTO_H_
+#define _NET_OVPN_PROTO_H_
+
+#include "main.h"
+
+#include <linux/bitfield.h>
+#include <linux/skbuff.h>
+
+/* When the OpenVPN protocol is ran in AEAD mode, use
+ * the OpenVPN packet ID as the AEAD nonce:
+ *
+ * 00000005 521c3b01 4308c041
+ * [seq # ] [ nonce_tail ]
+ * [ 12-byte full IV ] -> OVPN_NONCE_SIZE
+ * [4-bytes -> OVPN_NONCE_WIRE_SIZE
+ * on wire]
+ */
+
+/* nonce size (96bits) as required by AEAD ciphers */
+#define OVPN_NONCE_SIZE 12
+/* last 8 bytes of AEAD nonce: provided by userspace and usually derived
+ * from key material generated during TLS handshake
+ */
+#define OVPN_NONCE_TAIL_SIZE 8
+
+/* OpenVPN nonce size reduced by 8-byte nonce tail -- this is the
+ * size of the AEAD Associated Data (AD) sent over the wire
+ * and is normally the head of the IV
+ */
+#define OVPN_NONCE_WIRE_SIZE (OVPN_NONCE_SIZE - OVPN_NONCE_TAIL_SIZE)
+
+#define OVPN_OPCODE_SIZE 4 /* DATA_V2 opcode size */
+#define OVPN_OPCODE_KEYID_MASK 0x07000000
+#define OVPN_OPCODE_PKTTYPE_MASK 0xF8000000
+#define OVPN_OPCODE_PEERID_MASK 0x00FFFFFF
+
+/* packet opcodes of interest to us */
+#define OVPN_DATA_V1 6 /* data channel v1 packet */
+#define OVPN_DATA_V2 9 /* data channel v2 packet */
+
+#define OVPN_PEER_ID_UNDEF 0x00FFFFFF
+
+/**
+ * ovpn_opcode_from_skb - extract OP code from skb at specified offset
+ * @skb: the packet to extract the OP code from
+ * @offset: the offset in the data buffer where the OP code is located
+ *
+ * Note: this function assumes that the skb head was pulled enough
+ * to access the first 4 bytes.
+ *
+ * Return: the OP code
+ */
+static inline u8 ovpn_opcode_from_skb(const struct sk_buff *skb, u16 offset)
+{
+ u32 opcode = be32_to_cpu(*(__be32 *)(skb->data + offset));
+
+ return FIELD_GET(OVPN_OPCODE_PKTTYPE_MASK, opcode);
+}
+
+/**
+ * ovpn_peer_id_from_skb - extract peer ID from skb at specified offset
+ * @skb: the packet to extract the OP code from
+ * @offset: the offset in the data buffer where the OP code is located
+ *
+ * Note: this function assumes that the skb head was pulled enough
+ * to access the first 4 bytes.
+ *
+ * Return: the peer ID
+ */
+static inline u32 ovpn_peer_id_from_skb(const struct sk_buff *skb, u16 offset)
+{
+ u32 opcode = be32_to_cpu(*(__be32 *)(skb->data + offset));
+
+ return FIELD_GET(OVPN_OPCODE_PEERID_MASK, opcode);
+}
+
+/**
+ * ovpn_key_id_from_skb - extract key ID from the skb head
+ * @skb: the packet to extract the key ID code from
+ *
+ * Note: this function assumes that the skb head was pulled enough
+ * to access the first 4 bytes.
+ *
+ * Return: the key ID
+ */
+static inline u8 ovpn_key_id_from_skb(const struct sk_buff *skb)
+{
+ u32 opcode = be32_to_cpu(*(__be32 *)skb->data);
+
+ return FIELD_GET(OVPN_OPCODE_KEYID_MASK, opcode);
+}
+
+/**
+ * ovpn_opcode_compose - combine OP code, key ID and peer ID to wire format
+ * @opcode: the OP code
+ * @key_id: the key ID
+ * @peer_id: the peer ID
+ *
+ * Return: a 4 bytes integer obtained combining all input values following the
+ * OpenVPN wire format. This integer can then be written to the packet header.
+ */
+static inline u32 ovpn_opcode_compose(u8 opcode, u8 key_id, u32 peer_id)
+{
+ return FIELD_PREP(OVPN_OPCODE_PKTTYPE_MASK, opcode) |
+ FIELD_PREP(OVPN_OPCODE_KEYID_MASK, key_id) |
+ FIELD_PREP(OVPN_OPCODE_PEERID_MASK, peer_id);
+}
+
+#endif /* _NET_OVPN_OVPNPROTO_H_ */
diff --git a/drivers/net/ovpn/skb.h b/drivers/net/ovpn/skb.h
new file mode 100644
index 000000000000..64430880f1da
--- /dev/null
+++ b/drivers/net/ovpn/skb.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_SKB_H_
+#define _NET_OVPN_SKB_H_
+
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct ovpn_cb {
+ struct ovpn_peer *peer;
+ struct ovpn_crypto_key_slot *ks;
+ struct aead_request *req;
+ struct scatterlist *sg;
+ u8 *iv;
+ unsigned int payload_offset;
+ bool nosignal;
+};
+
+static inline struct ovpn_cb *ovpn_skb_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ovpn_cb) > sizeof(skb->cb));
+ return (struct ovpn_cb *)skb->cb;
+}
+
+/* Return IP protocol version from skb header.
+ * Return 0 if protocol is not IPv4/IPv6 or cannot be read.
+ */
+static inline __be16 ovpn_ip_check_protocol(struct sk_buff *skb)
+{
+ __be16 proto = 0;
+
+ /* skb could be non-linear,
+ * make sure IP header is in non-fragmented part
+ */
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ return 0;
+
+ if (ip_hdr(skb)->version == 4) {
+ proto = htons(ETH_P_IP);
+ } else if (ip_hdr(skb)->version == 6) {
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+ return 0;
+ proto = htons(ETH_P_IPV6);
+ }
+
+ return proto;
+}
+
+#endif /* _NET_OVPN_SKB_H_ */
diff --git a/drivers/net/ovpn/socket.c b/drivers/net/ovpn/socket.c
new file mode 100644
index 000000000000..9750871ab65c
--- /dev/null
+++ b/drivers/net/ovpn/socket.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "io.h"
+#include "peer.h"
+#include "socket.h"
+#include "tcp.h"
+#include "udp.h"
+
+static void ovpn_socket_release_kref(struct kref *kref)
+{
+ struct ovpn_socket *sock = container_of(kref, struct ovpn_socket,
+ refcount);
+
+ if (sock->sk->sk_protocol == IPPROTO_UDP)
+ ovpn_udp_socket_detach(sock);
+ else if (sock->sk->sk_protocol == IPPROTO_TCP)
+ ovpn_tcp_socket_detach(sock);
+}
+
+/**
+ * ovpn_socket_put - decrease reference counter
+ * @peer: peer whose socket reference counter should be decreased
+ * @sock: the RCU protected peer socket
+ *
+ * This function is only used internally. Users willing to release
+ * references to the ovpn_socket should use ovpn_socket_release()
+ *
+ * Return: true if the socket was released, false otherwise
+ */
+static bool ovpn_socket_put(struct ovpn_peer *peer, struct ovpn_socket *sock)
+{
+ return kref_put(&sock->refcount, ovpn_socket_release_kref);
+}
+
+/**
+ * ovpn_socket_release - release resources owned by socket user
+ * @peer: peer whose socket should be released
+ *
+ * This function should be invoked when the peer is being removed
+ * and wants to drop its link to the socket.
+ *
+ * In case of UDP, the detach routine will drop a reference to the
+ * ovpn netdev, pointed by the ovpn_socket.
+ *
+ * In case of TCP, releasing the socket will cause dropping
+ * the refcounter for the peer it is linked to, thus allowing the peer
+ * disappear as well.
+ *
+ * This function is expected to be invoked exactly once per peer
+ *
+ * NOTE: this function may sleep
+ */
+void ovpn_socket_release(struct ovpn_peer *peer)
+{
+ struct ovpn_socket *sock;
+ bool released;
+
+ might_sleep();
+
+ sock = rcu_replace_pointer(peer->sock, NULL, true);
+ /* release may be invoked after socket was detached */
+ if (!sock)
+ return;
+
+ /* Drop the reference while holding the sock lock to avoid
+ * concurrent ovpn_socket_new call to mess up with a partially
+ * detached socket.
+ *
+ * Holding the lock ensures that a socket with refcnt 0 is fully
+ * detached before it can be picked by a concurrent reader.
+ */
+ lock_sock(sock->sk);
+ released = ovpn_socket_put(peer, sock);
+ release_sock(sock->sk);
+
+ /* align all readers with sk_user_data being NULL */
+ synchronize_rcu();
+
+ /* following cleanup should happen with lock released */
+ if (released) {
+ if (sock->sk->sk_protocol == IPPROTO_UDP) {
+ netdev_put(sock->ovpn->dev, &sock->dev_tracker);
+ } else if (sock->sk->sk_protocol == IPPROTO_TCP) {
+ /* wait for TCP jobs to terminate */
+ ovpn_tcp_socket_wait_finish(sock);
+ ovpn_peer_put(sock->peer);
+ }
+ /* drop reference acquired in ovpn_socket_new() */
+ sock_put(sock->sk);
+ /* we can call plain kfree() because we already waited one RCU
+ * period due to synchronize_rcu()
+ */
+ kfree(sock);
+ }
+}
+
+static bool ovpn_socket_hold(struct ovpn_socket *sock)
+{
+ return kref_get_unless_zero(&sock->refcount);
+}
+
+static int ovpn_socket_attach(struct ovpn_socket *ovpn_sock,
+ struct socket *sock,
+ struct ovpn_peer *peer)
+{
+ if (sock->sk->sk_protocol == IPPROTO_UDP)
+ return ovpn_udp_socket_attach(ovpn_sock, sock, peer->ovpn);
+ else if (sock->sk->sk_protocol == IPPROTO_TCP)
+ return ovpn_tcp_socket_attach(ovpn_sock, peer);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ovpn_socket_new - create a new socket and initialize it
+ * @sock: the kernel socket to embed
+ * @peer: the peer reachable via this socket
+ *
+ * Return: an openvpn socket on success or a negative error code otherwise
+ */
+struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer)
+{
+ struct ovpn_socket *ovpn_sock;
+ struct sock *sk = sock->sk;
+ int ret;
+
+ lock_sock(sk);
+
+ /* a TCP socket can only be owned by a single peer, therefore there
+ * can't be any other user
+ */
+ if (sk->sk_protocol == IPPROTO_TCP && sk->sk_user_data) {
+ ovpn_sock = ERR_PTR(-EBUSY);
+ goto sock_release;
+ }
+
+ /* a UDP socket can be shared across multiple peers, but we must make
+ * sure it is not owned by something else
+ */
+ if (sk->sk_protocol == IPPROTO_UDP) {
+ u8 type = READ_ONCE(udp_sk(sk)->encap_type);
+
+ /* socket owned by other encapsulation module */
+ if (type && type != UDP_ENCAP_OVPNINUDP) {
+ ovpn_sock = ERR_PTR(-EBUSY);
+ goto sock_release;
+ }
+
+ rcu_read_lock();
+ ovpn_sock = rcu_dereference_sk_user_data(sk);
+ if (ovpn_sock) {
+ /* socket owned by another ovpn instance, we can't use it */
+ if (ovpn_sock->ovpn != peer->ovpn) {
+ ovpn_sock = ERR_PTR(-EBUSY);
+ rcu_read_unlock();
+ goto sock_release;
+ }
+
+ /* this socket is already owned by this instance,
+ * therefore we can increase the refcounter and
+ * use it as expected
+ */
+ if (WARN_ON(!ovpn_socket_hold(ovpn_sock))) {
+ /* this should never happen because setting
+ * the refcnt to 0 and detaching the socket
+ * is expected to be atomic
+ */
+ ovpn_sock = ERR_PTR(-EAGAIN);
+ rcu_read_unlock();
+ goto sock_release;
+ }
+
+ rcu_read_unlock();
+ goto sock_release;
+ }
+ rcu_read_unlock();
+ }
+
+ /* socket is not owned: attach to this ovpn instance */
+
+ ovpn_sock = kzalloc(sizeof(*ovpn_sock), GFP_KERNEL);
+ if (!ovpn_sock) {
+ ovpn_sock = ERR_PTR(-ENOMEM);
+ goto sock_release;
+ }
+
+ ovpn_sock->sk = sk;
+ kref_init(&ovpn_sock->refcount);
+
+ /* the newly created ovpn_socket is holding reference to sk,
+ * therefore we increase its refcounter.
+ *
+ * This ovpn_socket instance is referenced by all peers
+ * using the same socket.
+ *
+ * ovpn_socket_release() will take care of dropping the reference.
+ */
+ sock_hold(sk);
+
+ ret = ovpn_socket_attach(ovpn_sock, sock, peer);
+ if (ret < 0) {
+ sock_put(sk);
+ kfree(ovpn_sock);
+ ovpn_sock = ERR_PTR(ret);
+ goto sock_release;
+ }
+
+ /* TCP sockets are per-peer, therefore they are linked to their unique
+ * peer
+ */
+ if (sk->sk_protocol == IPPROTO_TCP) {
+ INIT_WORK(&ovpn_sock->tcp_tx_work, ovpn_tcp_tx_work);
+ ovpn_sock->peer = peer;
+ ovpn_peer_hold(peer);
+ } else if (sk->sk_protocol == IPPROTO_UDP) {
+ /* in UDP we only link the ovpn instance since the socket is
+ * shared among multiple peers
+ */
+ ovpn_sock->ovpn = peer->ovpn;
+ netdev_hold(peer->ovpn->dev, &ovpn_sock->dev_tracker,
+ GFP_KERNEL);
+ }
+
+ rcu_assign_sk_user_data(sk, ovpn_sock);
+sock_release:
+ release_sock(sk);
+ return ovpn_sock;
+}
diff --git a/drivers/net/ovpn/socket.h b/drivers/net/ovpn/socket.h
new file mode 100644
index 000000000000..4afcec71040d
--- /dev/null
+++ b/drivers/net/ovpn/socket.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_SOCK_H_
+#define _NET_OVPN_SOCK_H_
+
+#include <linux/net.h>
+#include <linux/kref.h>
+#include <net/sock.h>
+
+struct ovpn_priv;
+struct ovpn_peer;
+
+/**
+ * struct ovpn_socket - a kernel socket referenced in the ovpn code
+ * @ovpn: ovpn instance owning this socket (UDP only)
+ * @dev_tracker: reference tracker for associated dev (UDP only)
+ * @peer: unique peer transmitting over this socket (TCP only)
+ * @sk: the low level sock object
+ * @refcount: amount of contexts currently referencing this object
+ * @work: member used to schedule release routine (it may block)
+ * @tcp_tx_work: work for deferring outgoing packet processing (TCP only)
+ */
+struct ovpn_socket {
+ union {
+ struct {
+ struct ovpn_priv *ovpn;
+ netdevice_tracker dev_tracker;
+ };
+ struct ovpn_peer *peer;
+ };
+
+ struct sock *sk;
+ struct kref refcount;
+ struct work_struct work;
+ struct work_struct tcp_tx_work;
+};
+
+struct ovpn_socket *ovpn_socket_new(struct socket *sock,
+ struct ovpn_peer *peer);
+void ovpn_socket_release(struct ovpn_peer *peer);
+
+#endif /* _NET_OVPN_SOCK_H_ */
diff --git a/drivers/net/ovpn/stats.c b/drivers/net/ovpn/stats.c
new file mode 100644
index 000000000000..d637143473bb
--- /dev/null
+++ b/drivers/net/ovpn/stats.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/atomic.h>
+
+#include "stats.h"
+
+void ovpn_peer_stats_init(struct ovpn_peer_stats *ps)
+{
+ atomic64_set(&ps->rx.bytes, 0);
+ atomic64_set(&ps->rx.packets, 0);
+
+ atomic64_set(&ps->tx.bytes, 0);
+ atomic64_set(&ps->tx.packets, 0);
+}
diff --git a/drivers/net/ovpn/stats.h b/drivers/net/ovpn/stats.h
new file mode 100644
index 000000000000..53433d8b6c33
--- /dev/null
+++ b/drivers/net/ovpn/stats.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ * Lev Stipakov <lev@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNSTATS_H_
+#define _NET_OVPN_OVPNSTATS_H_
+
+/* one stat */
+struct ovpn_peer_stat {
+ atomic64_t bytes;
+ atomic64_t packets;
+};
+
+/* rx and tx stats combined */
+struct ovpn_peer_stats {
+ struct ovpn_peer_stat rx;
+ struct ovpn_peer_stat tx;
+};
+
+void ovpn_peer_stats_init(struct ovpn_peer_stats *ps);
+
+static inline void ovpn_peer_stats_increment(struct ovpn_peer_stat *stat,
+ const unsigned int n)
+{
+ atomic64_add(n, &stat->bytes);
+ atomic64_inc(&stat->packets);
+}
+
+static inline void ovpn_peer_stats_increment_rx(struct ovpn_peer_stats *stats,
+ const unsigned int n)
+{
+ ovpn_peer_stats_increment(&stats->rx, n);
+}
+
+static inline void ovpn_peer_stats_increment_tx(struct ovpn_peer_stats *stats,
+ const unsigned int n)
+{
+ ovpn_peer_stats_increment(&stats->tx, n);
+}
+
+#endif /* _NET_OVPN_OVPNSTATS_H_ */
diff --git a/drivers/net/ovpn/tcp.c b/drivers/net/ovpn/tcp.c
new file mode 100644
index 000000000000..289f62c5d2c7
--- /dev/null
+++ b/drivers/net/ovpn/tcp.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/skbuff.h>
+#include <net/hotdata.h>
+#include <net/inet_common.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <net/transp_v6.h>
+#include <net/route.h>
+#include <trace/events/sock.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "io.h"
+#include "peer.h"
+#include "proto.h"
+#include "skb.h"
+#include "tcp.h"
+
+#define OVPN_TCP_DEPTH_NESTING 2
+#if OVPN_TCP_DEPTH_NESTING == SINGLE_DEPTH_NESTING
+#error "OVPN TCP requires its own lockdep subclass"
+#endif
+
+static struct proto ovpn_tcp_prot __ro_after_init;
+static struct proto_ops ovpn_tcp_ops __ro_after_init;
+static struct proto ovpn_tcp6_prot __ro_after_init;
+static struct proto_ops ovpn_tcp6_ops __ro_after_init;
+
+static int ovpn_tcp_parse(struct strparser *strp, struct sk_buff *skb)
+{
+ struct strp_msg *rxm = strp_msg(skb);
+ __be16 blen;
+ u16 len;
+ int err;
+
+ /* when packets are written to the TCP stream, they are prepended with
+ * two bytes indicating the actual packet size.
+ * Parse accordingly and return the actual size (including the size
+ * header)
+ */
+
+ if (skb->len < rxm->offset + 2)
+ return 0;
+
+ err = skb_copy_bits(skb, rxm->offset, &blen, sizeof(blen));
+ if (err < 0)
+ return err;
+
+ len = be16_to_cpu(blen);
+ if (len < 2)
+ return -EINVAL;
+
+ return len + 2;
+}
+
+/* queue skb for sending to userspace via recvmsg on the socket */
+static void ovpn_tcp_to_userspace(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb)
+{
+ skb_set_owner_r(skb, sk);
+ memset(skb->cb, 0, sizeof(skb->cb));
+ skb_queue_tail(&peer->tcp.user_queue, skb);
+ peer->tcp.sk_cb.sk_data_ready(sk);
+}
+
+static void ovpn_tcp_rcv(struct strparser *strp, struct sk_buff *skb)
+{
+ struct ovpn_peer *peer = container_of(strp, struct ovpn_peer, tcp.strp);
+ struct strp_msg *msg = strp_msg(skb);
+ size_t pkt_len = msg->full_len - 2;
+ size_t off = msg->offset + 2;
+ u8 opcode;
+
+ /* ensure skb->data points to the beginning of the openvpn packet */
+ if (!pskb_pull(skb, off)) {
+ net_warn_ratelimited("%s: packet too small for peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto err;
+ }
+
+ /* strparser does not trim the skb for us, therefore we do it now */
+ if (pskb_trim(skb, pkt_len) != 0) {
+ net_warn_ratelimited("%s: trimming skb failed for peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto err;
+ }
+
+ /* we need the first 4 bytes of data to be accessible
+ * to extract the opcode and the key ID later on
+ */
+ if (!pskb_may_pull(skb, OVPN_OPCODE_SIZE)) {
+ net_warn_ratelimited("%s: packet too small to fetch opcode for peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto err;
+ }
+
+ /* DATA_V2 packets are handled in kernel, the rest goes to user space */
+ opcode = ovpn_opcode_from_skb(skb, 0);
+ if (unlikely(opcode != OVPN_DATA_V2)) {
+ if (opcode == OVPN_DATA_V1) {
+ net_warn_ratelimited("%s: DATA_V1 detected on the TCP stream\n",
+ netdev_name(peer->ovpn->dev));
+ goto err;
+ }
+
+ /* The packet size header must be there when sending the packet
+ * to userspace, therefore we put it back
+ */
+ skb_push(skb, 2);
+ ovpn_tcp_to_userspace(peer, strp->sk, skb);
+ return;
+ }
+
+ /* hold reference to peer as required by ovpn_recv().
+ *
+ * NOTE: in this context we should already be holding a reference to
+ * this peer, therefore ovpn_peer_hold() is not expected to fail
+ */
+ if (WARN_ON(!ovpn_peer_hold(peer)))
+ goto err_nopeer;
+
+ ovpn_recv(peer, skb);
+ return;
+err:
+ /* take reference for deferred peer deletion. should never fail */
+ if (WARN_ON(!ovpn_peer_hold(peer)))
+ goto err_nopeer;
+ schedule_work(&peer->tcp.defer_del_work);
+ dev_dstats_rx_dropped(peer->ovpn->dev);
+err_nopeer:
+ kfree_skb(skb);
+}
+
+static int ovpn_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int flags, int *addr_len)
+{
+ int err = 0, off, copied = 0, ret;
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ struct sk_buff *skb;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!sock || !sock->peer || !ovpn_peer_hold(sock->peer))) {
+ rcu_read_unlock();
+ return -EBADF;
+ }
+ peer = sock->peer;
+ rcu_read_unlock();
+
+ skb = __skb_recv_datagram(sk, &peer->tcp.user_queue, flags, &off, &err);
+ if (!skb) {
+ if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN) {
+ ret = 0;
+ goto out;
+ }
+ ret = err;
+ goto out;
+ }
+
+ copied = len;
+ if (copied > skb->len)
+ copied = skb->len;
+ else if (copied < skb->len)
+ msg->msg_flags |= MSG_TRUNC;
+
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ ret = err;
+ goto out;
+ }
+
+ if (flags & MSG_TRUNC)
+ copied = skb->len;
+ kfree_skb(skb);
+ ret = copied;
+out:
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+void ovpn_tcp_socket_detach(struct ovpn_socket *ovpn_sock)
+{
+ struct ovpn_peer *peer = ovpn_sock->peer;
+ struct sock *sk = ovpn_sock->sk;
+
+ strp_stop(&peer->tcp.strp);
+ skb_queue_purge(&peer->tcp.user_queue);
+
+ /* restore CBs that were saved in ovpn_sock_set_tcp_cb() */
+ sk->sk_data_ready = peer->tcp.sk_cb.sk_data_ready;
+ sk->sk_write_space = peer->tcp.sk_cb.sk_write_space;
+ sk->sk_prot = peer->tcp.sk_cb.prot;
+ sk->sk_socket->ops = peer->tcp.sk_cb.ops;
+
+ rcu_assign_sk_user_data(sk, NULL);
+}
+
+void ovpn_tcp_socket_wait_finish(struct ovpn_socket *sock)
+{
+ struct ovpn_peer *peer = sock->peer;
+
+ /* NOTE: we don't wait for peer->tcp.defer_del_work to finish:
+ * either the worker is not running or this function
+ * was invoked by that worker.
+ */
+
+ cancel_work_sync(&sock->tcp_tx_work);
+ strp_done(&peer->tcp.strp);
+
+ skb_queue_purge(&peer->tcp.out_queue);
+ kfree_skb(peer->tcp.out_msg.skb);
+ peer->tcp.out_msg.skb = NULL;
+}
+
+static void ovpn_tcp_send_sock(struct ovpn_peer *peer, struct sock *sk)
+{
+ struct sk_buff *skb = peer->tcp.out_msg.skb;
+ int ret, flags;
+
+ if (!skb)
+ return;
+
+ if (peer->tcp.tx_in_progress)
+ return;
+
+ peer->tcp.tx_in_progress = true;
+
+ do {
+ flags = ovpn_skb_cb(skb)->nosignal ? MSG_NOSIGNAL : 0;
+ ret = skb_send_sock_locked_with_flags(sk, skb,
+ peer->tcp.out_msg.offset,
+ peer->tcp.out_msg.len,
+ flags);
+ if (unlikely(ret < 0)) {
+ if (ret == -EAGAIN)
+ goto out;
+
+ net_warn_ratelimited("%s: TCP error to peer %u: %d\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, ret);
+
+ /* in case of TCP error we can't recover the VPN
+ * stream therefore we abort the connection
+ */
+ ovpn_peer_hold(peer);
+ schedule_work(&peer->tcp.defer_del_work);
+
+ /* we bail out immediately and keep tx_in_progress set
+ * to true. This way we prevent more TX attempts
+ * which would lead to more invocations of
+ * schedule_work()
+ */
+ return;
+ }
+
+ peer->tcp.out_msg.len -= ret;
+ peer->tcp.out_msg.offset += ret;
+ } while (peer->tcp.out_msg.len > 0);
+
+ if (!peer->tcp.out_msg.len) {
+ preempt_disable();
+ dev_dstats_tx_add(peer->ovpn->dev, skb->len);
+ preempt_enable();
+ }
+
+ kfree_skb(peer->tcp.out_msg.skb);
+ peer->tcp.out_msg.skb = NULL;
+ peer->tcp.out_msg.len = 0;
+ peer->tcp.out_msg.offset = 0;
+
+out:
+ peer->tcp.tx_in_progress = false;
+}
+
+void ovpn_tcp_tx_work(struct work_struct *work)
+{
+ struct ovpn_socket *sock;
+
+ sock = container_of(work, struct ovpn_socket, tcp_tx_work);
+
+ lock_sock(sock->sk);
+ if (sock->peer)
+ ovpn_tcp_send_sock(sock->peer, sock->sk);
+ release_sock(sock->sk);
+}
+
+static void ovpn_tcp_send_sock_skb(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb)
+{
+ if (peer->tcp.out_msg.skb)
+ ovpn_tcp_send_sock(peer, sk);
+
+ if (peer->tcp.out_msg.skb) {
+ dev_dstats_tx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+ return;
+ }
+
+ peer->tcp.out_msg.skb = skb;
+ peer->tcp.out_msg.len = skb->len;
+ peer->tcp.out_msg.offset = 0;
+ ovpn_tcp_send_sock(peer, sk);
+}
+
+void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb)
+{
+ u16 len = skb->len;
+
+ *(__be16 *)__skb_push(skb, sizeof(u16)) = htons(len);
+
+ spin_lock_nested(&sk->sk_lock.slock, OVPN_TCP_DEPTH_NESTING);
+ if (sock_owned_by_user(sk)) {
+ if (skb_queue_len(&peer->tcp.out_queue) >=
+ READ_ONCE(net_hotdata.max_backlog)) {
+ dev_dstats_tx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+ goto unlock;
+ }
+ __skb_queue_tail(&peer->tcp.out_queue, skb);
+ } else {
+ ovpn_tcp_send_sock_skb(peer, sk, skb);
+ }
+unlock:
+ spin_unlock(&sk->sk_lock.slock);
+}
+
+static void ovpn_tcp_release(struct sock *sk)
+{
+ struct sk_buff_head queue;
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ struct sk_buff *skb;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (!sock) {
+ rcu_read_unlock();
+ return;
+ }
+
+ peer = sock->peer;
+
+ /* during initialization this function is called before
+ * assigning sock->peer
+ */
+ if (unlikely(!peer || !ovpn_peer_hold(peer))) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
+ __skb_queue_head_init(&queue);
+ skb_queue_splice_init(&peer->tcp.out_queue, &queue);
+
+ while ((skb = __skb_dequeue(&queue)))
+ ovpn_tcp_send_sock_skb(peer, sk, skb);
+
+ peer->tcp.sk_cb.prot->release_cb(sk);
+ ovpn_peer_put(peer);
+}
+
+static int ovpn_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+{
+ struct ovpn_socket *sock;
+ int ret, linear = PAGE_SIZE;
+ struct ovpn_peer *peer;
+ struct sk_buff *skb;
+
+ lock_sock(sk);
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!sock || !sock->peer || !ovpn_peer_hold(sock->peer))) {
+ rcu_read_unlock();
+ release_sock(sk);
+ return -EIO;
+ }
+ rcu_read_unlock();
+ peer = sock->peer;
+
+ if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL)) {
+ ret = -EOPNOTSUPP;
+ goto peer_free;
+ }
+
+ if (peer->tcp.out_msg.skb) {
+ ret = -EAGAIN;
+ goto peer_free;
+ }
+
+ if (size < linear)
+ linear = size;
+
+ skb = sock_alloc_send_pskb(sk, linear, size - linear,
+ msg->msg_flags & MSG_DONTWAIT, &ret, 0);
+ if (!skb) {
+ net_err_ratelimited("%s: skb alloc failed: %d\n",
+ netdev_name(peer->ovpn->dev), ret);
+ goto peer_free;
+ }
+
+ skb_put(skb, linear);
+ skb->len = size;
+ skb->data_len = size - linear;
+
+ ret = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
+ if (ret) {
+ kfree_skb(skb);
+ net_err_ratelimited("%s: skb copy from iter failed: %d\n",
+ netdev_name(peer->ovpn->dev), ret);
+ goto peer_free;
+ }
+
+ ovpn_skb_cb(skb)->nosignal = msg->msg_flags & MSG_NOSIGNAL;
+ ovpn_tcp_send_sock_skb(peer, sk, skb);
+ ret = size;
+peer_free:
+ release_sock(sk);
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+static int ovpn_tcp_disconnect(struct sock *sk, int flags)
+{
+ return -EBUSY;
+}
+
+static void ovpn_tcp_data_ready(struct sock *sk)
+{
+ struct ovpn_socket *sock;
+
+ trace_sk_data_ready(sk);
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (likely(sock && sock->peer))
+ strp_data_ready(&sock->peer->tcp.strp);
+ rcu_read_unlock();
+}
+
+static void ovpn_tcp_write_space(struct sock *sk)
+{
+ struct ovpn_socket *sock;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (likely(sock && sock->peer)) {
+ schedule_work(&sock->tcp_tx_work);
+ sock->peer->tcp.sk_cb.sk_write_space(sk);
+ }
+ rcu_read_unlock();
+}
+
+static void ovpn_tcp_build_protos(struct proto *new_prot,
+ struct proto_ops *new_ops,
+ const struct proto *orig_prot,
+ const struct proto_ops *orig_ops);
+
+static void ovpn_tcp_peer_del_work(struct work_struct *work)
+{
+ struct ovpn_peer *peer = container_of(work, struct ovpn_peer,
+ tcp.defer_del_work);
+
+ ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_TRANSPORT_ERROR);
+ ovpn_peer_put(peer);
+}
+
+/* Set TCP encapsulation callbacks */
+int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock,
+ struct ovpn_peer *peer)
+{
+ struct strp_callbacks cb = {
+ .rcv_msg = ovpn_tcp_rcv,
+ .parse_msg = ovpn_tcp_parse,
+ };
+ int ret;
+
+ /* make sure no pre-existing encapsulation handler exists */
+ if (ovpn_sock->sk->sk_user_data)
+ return -EBUSY;
+
+ /* only a fully connected socket is expected. Connection should be
+ * handled in userspace
+ */
+ if (ovpn_sock->sk->sk_state != TCP_ESTABLISHED) {
+ net_err_ratelimited("%s: provided TCP socket is not in ESTABLISHED state: %d\n",
+ netdev_name(peer->ovpn->dev),
+ ovpn_sock->sk->sk_state);
+ return -EINVAL;
+ }
+
+ ret = strp_init(&peer->tcp.strp, ovpn_sock->sk, &cb);
+ if (ret < 0) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ return ret;
+ }
+
+ INIT_WORK(&peer->tcp.defer_del_work, ovpn_tcp_peer_del_work);
+
+ __sk_dst_reset(ovpn_sock->sk);
+ skb_queue_head_init(&peer->tcp.user_queue);
+ skb_queue_head_init(&peer->tcp.out_queue);
+
+ /* save current CBs so that they can be restored upon socket release */
+ peer->tcp.sk_cb.sk_data_ready = ovpn_sock->sk->sk_data_ready;
+ peer->tcp.sk_cb.sk_write_space = ovpn_sock->sk->sk_write_space;
+ peer->tcp.sk_cb.prot = ovpn_sock->sk->sk_prot;
+ peer->tcp.sk_cb.ops = ovpn_sock->sk->sk_socket->ops;
+
+ /* assign our static CBs and prot/ops */
+ ovpn_sock->sk->sk_data_ready = ovpn_tcp_data_ready;
+ ovpn_sock->sk->sk_write_space = ovpn_tcp_write_space;
+
+ if (ovpn_sock->sk->sk_family == AF_INET) {
+ ovpn_sock->sk->sk_prot = &ovpn_tcp_prot;
+ ovpn_sock->sk->sk_socket->ops = &ovpn_tcp_ops;
+ } else {
+ ovpn_sock->sk->sk_prot = &ovpn_tcp6_prot;
+ ovpn_sock->sk->sk_socket->ops = &ovpn_tcp6_ops;
+ }
+
+ /* avoid using task_frag */
+ ovpn_sock->sk->sk_allocation = GFP_ATOMIC;
+ ovpn_sock->sk->sk_use_task_frag = false;
+
+ /* enqueue the RX worker */
+ strp_check_rcv(&peer->tcp.strp);
+
+ return 0;
+}
+
+static void ovpn_tcp_close(struct sock *sk, long timeout)
+{
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (!sock || !sock->peer || !ovpn_peer_hold(sock->peer)) {
+ rcu_read_unlock();
+ return;
+ }
+ peer = sock->peer;
+ rcu_read_unlock();
+
+ ovpn_peer_del(sock->peer, OVPN_DEL_PEER_REASON_TRANSPORT_DISCONNECT);
+ peer->tcp.sk_cb.prot->close(sk, timeout);
+ ovpn_peer_put(peer);
+}
+
+static __poll_t ovpn_tcp_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ __poll_t mask = datagram_poll(file, sock, wait);
+ struct ovpn_socket *ovpn_sock;
+
+ rcu_read_lock();
+ ovpn_sock = rcu_dereference_sk_user_data(sock->sk);
+ if (ovpn_sock && ovpn_sock->peer &&
+ !skb_queue_empty(&ovpn_sock->peer->tcp.user_queue))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ rcu_read_unlock();
+
+ return mask;
+}
+
+static void ovpn_tcp_build_protos(struct proto *new_prot,
+ struct proto_ops *new_ops,
+ const struct proto *orig_prot,
+ const struct proto_ops *orig_ops)
+{
+ memcpy(new_prot, orig_prot, sizeof(*new_prot));
+ memcpy(new_ops, orig_ops, sizeof(*new_ops));
+ new_prot->recvmsg = ovpn_tcp_recvmsg;
+ new_prot->sendmsg = ovpn_tcp_sendmsg;
+ new_prot->disconnect = ovpn_tcp_disconnect;
+ new_prot->close = ovpn_tcp_close;
+ new_prot->release_cb = ovpn_tcp_release;
+ new_ops->poll = ovpn_tcp_poll;
+}
+
+/* Initialize TCP static objects */
+void __init ovpn_tcp_init(void)
+{
+ ovpn_tcp_build_protos(&ovpn_tcp_prot, &ovpn_tcp_ops, &tcp_prot,
+ &inet_stream_ops);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ ovpn_tcp_build_protos(&ovpn_tcp6_prot, &ovpn_tcp6_ops, &tcpv6_prot,
+ &inet6_stream_ops);
+#endif
+}
diff --git a/drivers/net/ovpn/tcp.h b/drivers/net/ovpn/tcp.h
new file mode 100644
index 000000000000..a3aa3570ae5e
--- /dev/null
+++ b/drivers/net/ovpn/tcp.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_TCP_H_
+#define _NET_OVPN_TCP_H_
+
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include "peer.h"
+#include "skb.h"
+#include "socket.h"
+
+void __init ovpn_tcp_init(void);
+
+int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock,
+ struct ovpn_peer *peer);
+void ovpn_tcp_socket_detach(struct ovpn_socket *ovpn_sock);
+void ovpn_tcp_socket_wait_finish(struct ovpn_socket *sock);
+
+/* Prepare skb and enqueue it for sending to peer.
+ *
+ * Preparation consist in prepending the skb payload with its size.
+ * Required by the OpenVPN protocol in order to extract packets from
+ * the TCP stream on the receiver side.
+ */
+void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb);
+void ovpn_tcp_tx_work(struct work_struct *work);
+
+#endif /* _NET_OVPN_TCP_H_ */
diff --git a/drivers/net/ovpn/udp.c b/drivers/net/ovpn/udp.c
new file mode 100644
index 000000000000..bff00946eae2
--- /dev/null
+++ b/drivers/net/ovpn/udp.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/udp.h>
+#include <net/addrconf.h>
+#include <net/dst_cache.h>
+#include <net/route.h>
+#include <net/ipv6_stubs.h>
+#include <net/transp_v6.h>
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "bind.h"
+#include "io.h"
+#include "peer.h"
+#include "proto.h"
+#include "socket.h"
+#include "udp.h"
+
+/* Retrieve the corresponding ovpn object from a UDP socket
+ * rcu_read_lock must be held on entry
+ */
+static struct ovpn_socket *ovpn_socket_from_udp_sock(struct sock *sk)
+{
+ struct ovpn_socket *ovpn_sock;
+
+ if (unlikely(READ_ONCE(udp_sk(sk)->encap_type) != UDP_ENCAP_OVPNINUDP))
+ return NULL;
+
+ ovpn_sock = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!ovpn_sock))
+ return NULL;
+
+ /* make sure that sk matches our stored transport socket */
+ if (unlikely(!ovpn_sock->sk || sk != ovpn_sock->sk))
+ return NULL;
+
+ return ovpn_sock;
+}
+
+/**
+ * ovpn_udp_encap_recv - Start processing a received UDP packet.
+ * @sk: socket over which the packet was received
+ * @skb: the received packet
+ *
+ * If the first byte of the payload is:
+ * - DATA_V2 the packet is accepted for further processing,
+ * - DATA_V1 the packet is dropped as not supported,
+ * - anything else the packet is forwarded to the UDP stack for
+ * delivery to user space.
+ *
+ * Return:
+ * 0 if skb was consumed or dropped
+ * >0 if skb should be passed up to userspace as UDP (packet not consumed)
+ * <0 if skb should be resubmitted as proto -N (packet not consumed)
+ */
+static int ovpn_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct ovpn_socket *ovpn_sock;
+ struct ovpn_priv *ovpn;
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ u8 opcode;
+
+ ovpn_sock = ovpn_socket_from_udp_sock(sk);
+ if (unlikely(!ovpn_sock)) {
+ net_err_ratelimited("ovpn: %s invoked on non ovpn socket\n",
+ __func__);
+ goto drop_noovpn;
+ }
+
+ ovpn = ovpn_sock->ovpn;
+ if (unlikely(!ovpn)) {
+ net_err_ratelimited("ovpn: cannot obtain ovpn object from UDP socket\n");
+ goto drop_noovpn;
+ }
+
+ /* Make sure the first 4 bytes of the skb data buffer after the UDP
+ * header are accessible.
+ * They are required to fetch the OP code, the key ID and the peer ID.
+ */
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct udphdr) +
+ OVPN_OPCODE_SIZE))) {
+ net_dbg_ratelimited("%s: packet too small from UDP socket\n",
+ netdev_name(ovpn->dev));
+ goto drop;
+ }
+
+ opcode = ovpn_opcode_from_skb(skb, sizeof(struct udphdr));
+ if (unlikely(opcode != OVPN_DATA_V2)) {
+ /* DATA_V1 is not supported */
+ if (opcode == OVPN_DATA_V1)
+ goto drop;
+
+ /* unknown or control packet: let it bubble up to userspace */
+ return 1;
+ }
+
+ peer_id = ovpn_peer_id_from_skb(skb, sizeof(struct udphdr));
+ /* some OpenVPN server implementations send data packets with the
+ * peer-id set to UNDEF. In this case we skip the peer lookup by peer-id
+ * and we try with the transport address
+ */
+ if (peer_id == OVPN_PEER_ID_UNDEF)
+ peer = ovpn_peer_get_by_transp_addr(ovpn, skb);
+ else
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+
+ if (unlikely(!peer))
+ goto drop;
+
+ /* pop off outer UDP header */
+ __skb_pull(skb, sizeof(struct udphdr));
+ ovpn_recv(peer, skb);
+ return 0;
+
+drop:
+ dev_dstats_rx_dropped(ovpn->dev);
+drop_noovpn:
+ kfree_skb(skb);
+ return 0;
+}
+
+/**
+ * ovpn_udp4_output - send IPv4 packet over udp socket
+ * @peer: the destination peer
+ * @bind: the binding related to the destination peer
+ * @cache: dst cache
+ * @sk: the socket to send the packet over
+ * @skb: the packet to send
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_udp4_output(struct ovpn_peer *peer, struct ovpn_bind *bind,
+ struct dst_cache *cache, struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct rtable *rt;
+ struct flowi4 fl = {
+ .saddr = bind->local.ipv4.s_addr,
+ .daddr = bind->remote.in4.sin_addr.s_addr,
+ .fl4_sport = inet_sk(sk)->inet_sport,
+ .fl4_dport = bind->remote.in4.sin_port,
+ .flowi4_proto = sk->sk_protocol,
+ .flowi4_mark = sk->sk_mark,
+ };
+ int ret;
+
+ local_bh_disable();
+ rt = dst_cache_get_ip4(cache, &fl.saddr);
+ if (rt)
+ goto transmit;
+
+ if (unlikely(!inet_confirm_addr(sock_net(sk), NULL, 0, fl.saddr,
+ RT_SCOPE_HOST))) {
+ /* we may end up here when the cached address is not usable
+ * anymore. In this case we reset address/cache and perform a
+ * new look up
+ */
+ fl.saddr = 0;
+ spin_lock_bh(&peer->lock);
+ bind->local.ipv4.s_addr = 0;
+ spin_unlock_bh(&peer->lock);
+ dst_cache_reset(cache);
+ }
+
+ rt = ip_route_output_flow(sock_net(sk), &fl, sk);
+ if (IS_ERR(rt) && PTR_ERR(rt) == -EINVAL) {
+ fl.saddr = 0;
+ spin_lock_bh(&peer->lock);
+ bind->local.ipv4.s_addr = 0;
+ spin_unlock_bh(&peer->lock);
+ dst_cache_reset(cache);
+
+ rt = ip_route_output_flow(sock_net(sk), &fl, sk);
+ }
+
+ if (IS_ERR(rt)) {
+ ret = PTR_ERR(rt);
+ net_dbg_ratelimited("%s: no route to host %pISpc: %d\n",
+ netdev_name(peer->ovpn->dev),
+ &bind->remote.in4,
+ ret);
+ goto err;
+ }
+ dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
+
+transmit:
+ udp_tunnel_xmit_skb(rt, sk, skb, fl.saddr, fl.daddr, 0,
+ ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
+ fl.fl4_dport, false, sk->sk_no_check_tx);
+ ret = 0;
+err:
+ local_bh_enable();
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+/**
+ * ovpn_udp6_output - send IPv6 packet over udp socket
+ * @peer: the destination peer
+ * @bind: the binding related to the destination peer
+ * @cache: dst cache
+ * @sk: the socket to send the packet over
+ * @skb: the packet to send
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_udp6_output(struct ovpn_peer *peer, struct ovpn_bind *bind,
+ struct dst_cache *cache, struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct dst_entry *dst;
+ int ret;
+
+ struct flowi6 fl = {
+ .saddr = bind->local.ipv6,
+ .daddr = bind->remote.in6.sin6_addr,
+ .fl6_sport = inet_sk(sk)->inet_sport,
+ .fl6_dport = bind->remote.in6.sin6_port,
+ .flowi6_proto = sk->sk_protocol,
+ .flowi6_mark = sk->sk_mark,
+ .flowi6_oif = bind->remote.in6.sin6_scope_id,
+ };
+
+ local_bh_disable();
+ dst = dst_cache_get_ip6(cache, &fl.saddr);
+ if (dst)
+ goto transmit;
+
+ if (unlikely(!ipv6_chk_addr(sock_net(sk), &fl.saddr, NULL, 0))) {
+ /* we may end up here when the cached address is not usable
+ * anymore. In this case we reset address/cache and perform a
+ * new look up
+ */
+ fl.saddr = in6addr_any;
+ spin_lock_bh(&peer->lock);
+ bind->local.ipv6 = in6addr_any;
+ spin_unlock_bh(&peer->lock);
+ dst_cache_reset(cache);
+ }
+
+ dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sk), sk, &fl, NULL);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ net_dbg_ratelimited("%s: no route to host %pISpc: %d\n",
+ netdev_name(peer->ovpn->dev),
+ &bind->remote.in6, ret);
+ goto err;
+ }
+ dst_cache_set_ip6(cache, dst, &fl.saddr);
+
+transmit:
+ /* user IPv6 packets may be larger than the transport interface
+ * MTU (after encapsulation), however, since they are locally
+ * generated we should ensure they get fragmented.
+ * Setting the ignore_df flag to 1 will instruct ip6_fragment() to
+ * fragment packets if needed.
+ *
+ * NOTE: this is not needed for IPv4 because we pass df=0 to
+ * udp_tunnel_xmit_skb()
+ */
+ skb->ignore_df = 1;
+ udp_tunnel6_xmit_skb(dst, sk, skb, skb->dev, &fl.saddr, &fl.daddr, 0,
+ ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
+ fl.fl6_dport, udp_get_no_check6_tx(sk));
+ ret = 0;
+err:
+ local_bh_enable();
+ return ret;
+}
+#endif
+
+/**
+ * ovpn_udp_output - transmit skb using udp-tunnel
+ * @peer: the destination peer
+ * @cache: dst cache
+ * @sk: the socket to send the packet over
+ * @skb: the packet to send
+ *
+ * rcu_read_lock should be held on entry.
+ * On return, the skb is consumed.
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_udp_output(struct ovpn_peer *peer, struct dst_cache *cache,
+ struct sock *sk, struct sk_buff *skb)
+{
+ struct ovpn_bind *bind;
+ int ret;
+
+ /* set sk to null if skb is already orphaned */
+ if (!skb->destructor)
+ skb->sk = NULL;
+
+ rcu_read_lock();
+ bind = rcu_dereference(peer->bind);
+ if (unlikely(!bind)) {
+ net_warn_ratelimited("%s: no bind for remote peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ switch (bind->remote.in4.sin_family) {
+ case AF_INET:
+ ret = ovpn_udp4_output(peer, bind, cache, sk, skb);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ ret = ovpn_udp6_output(peer, bind, cache, sk, skb);
+ break;
+#endif
+ default:
+ ret = -EAFNOSUPPORT;
+ break;
+ }
+
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * ovpn_udp_send_skb - prepare skb and send it over via UDP
+ * @peer: the destination peer
+ * @sk: peer socket
+ * @skb: the packet to send
+ */
+void ovpn_udp_send_skb(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int ret;
+
+ skb->dev = peer->ovpn->dev;
+ /* no checksum performed at this layer */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* crypto layer -> transport (UDP) */
+ ret = ovpn_udp_output(peer, &peer->dst_cache, sk, skb);
+ if (unlikely(ret < 0))
+ kfree_skb(skb);
+}
+
+static void ovpn_udp_encap_destroy(struct sock *sk)
+{
+ struct ovpn_socket *sock;
+ struct ovpn_priv *ovpn;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (!sock || !sock->ovpn) {
+ rcu_read_unlock();
+ return;
+ }
+ ovpn = sock->ovpn;
+ rcu_read_unlock();
+
+ ovpn_peers_free(ovpn, sk, OVPN_DEL_PEER_REASON_TRANSPORT_DISCONNECT);
+}
+
+/**
+ * ovpn_udp_socket_attach - set udp-tunnel CBs on socket and link it to ovpn
+ * @ovpn_sock: socket to configure
+ * @sock: the socket container to be passed to setup_udp_tunnel_sock()
+ * @ovpn: the openvp instance to link
+ *
+ * After invoking this function, the sock will be controlled by ovpn so that
+ * any incoming packet may be processed by ovpn first.
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, struct socket *sock,
+ struct ovpn_priv *ovpn)
+{
+ struct udp_tunnel_sock_cfg cfg = {
+ .encap_type = UDP_ENCAP_OVPNINUDP,
+ .encap_rcv = ovpn_udp_encap_recv,
+ .encap_destroy = ovpn_udp_encap_destroy,
+ };
+ struct ovpn_socket *old_data;
+ int ret;
+
+ /* make sure no pre-existing encapsulation handler exists */
+ rcu_read_lock();
+ old_data = rcu_dereference_sk_user_data(ovpn_sock->sk);
+ if (!old_data) {
+ /* socket is currently unused - we can take it */
+ rcu_read_unlock();
+ setup_udp_tunnel_sock(sock_net(ovpn_sock->sk), sock, &cfg);
+ return 0;
+ }
+
+ /* socket is in use. We need to understand if it's owned by this ovpn
+ * instance or by something else.
+ * In the former case, we can increase the refcounter and happily
+ * use it, because the same UDP socket is expected to be shared among
+ * different peers.
+ *
+ * Unlikely TCP, a single UDP socket can be used to talk to many remote
+ * hosts and therefore openvpn instantiates one only for all its peers
+ */
+ if ((READ_ONCE(udp_sk(ovpn_sock->sk)->encap_type) == UDP_ENCAP_OVPNINUDP) &&
+ old_data->ovpn == ovpn) {
+ netdev_dbg(ovpn->dev,
+ "provided socket already owned by this interface\n");
+ ret = -EALREADY;
+ } else {
+ netdev_dbg(ovpn->dev,
+ "provided socket already taken by other user\n");
+ ret = -EBUSY;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/**
+ * ovpn_udp_socket_detach - clean udp-tunnel status for this socket
+ * @ovpn_sock: the socket to clean
+ */
+void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock)
+{
+ struct sock *sk = ovpn_sock->sk;
+
+ /* Re-enable multicast loopback */
+ inet_set_bit(MC_LOOP, sk);
+ /* Disable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
+ inet_dec_convert_csum(sk);
+
+ WRITE_ONCE(udp_sk(sk)->encap_type, 0);
+ WRITE_ONCE(udp_sk(sk)->encap_rcv, NULL);
+ WRITE_ONCE(udp_sk(sk)->encap_destroy, NULL);
+
+ rcu_assign_sk_user_data(sk, NULL);
+}
diff --git a/drivers/net/ovpn/udp.h b/drivers/net/ovpn/udp.h
new file mode 100644
index 000000000000..fe26fbe25c5a
--- /dev/null
+++ b/drivers/net/ovpn/udp.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_UDP_H_
+#define _NET_OVPN_UDP_H_
+
+#include <net/sock.h>
+
+struct ovpn_peer;
+struct ovpn_priv;
+struct socket;
+
+int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, struct socket *sock,
+ struct ovpn_priv *ovpn);
+void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock);
+
+void ovpn_udp_send_skb(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb);
+
+#endif /* _NET_OVPN_UDP_H_ */
diff --git a/drivers/net/pfcp.c b/drivers/net/pfcp.c
index f873a92d2445..28e6bc4a1f14 100644
--- a/drivers/net/pfcp.c
+++ b/drivers/net/pfcp.c
@@ -245,30 +245,21 @@ static int __net_init pfcp_net_init(struct net *net)
return 0;
}
-static void __net_exit pfcp_net_exit(struct net *net)
+static void __net_exit pfcp_net_exit_rtnl(struct net *net,
+ struct list_head *dev_to_kill)
{
struct pfcp_net *pn = net_generic(net, pfcp_net_id);
struct pfcp_dev *pfcp, *pfcp_next;
- struct net_device *dev;
- LIST_HEAD(list);
-
- rtnl_lock();
- for_each_netdev(net, dev)
- if (dev->rtnl_link_ops == &pfcp_link_ops)
- pfcp_dellink(dev, &list);
list_for_each_entry_safe(pfcp, pfcp_next, &pn->pfcp_dev_list, list)
- pfcp_dellink(pfcp->dev, &list);
-
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ pfcp_dellink(pfcp->dev, dev_to_kill);
}
static struct pernet_operations pfcp_net_ops = {
- .init = pfcp_net_init,
- .exit = pfcp_net_exit,
- .id = &pfcp_net_id,
- .size = sizeof(struct pfcp_net),
+ .init = pfcp_net_init,
+ .exit_rtnl = pfcp_net_exit_rtnl,
+ .id = &pfcp_net_id,
+ .size = sizeof(struct pfcp_net),
};
static int __init pfcp_init(void)
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index d29f9f7fd2e1..53dad2482026 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -5,7 +5,6 @@
config PHYLINK
tristate
- depends on NETDEVICES
select PHYLIB
select SWPHY
help
@@ -15,9 +14,7 @@ config PHYLINK
menuconfig PHYLIB
tristate "PHY Device support and infrastructure"
- depends on NETDEVICES
- select MDIO_DEVICE
- select MDIO_DEVRES
+ select MDIO_BUS
help
Ethernet controllers are usually attached to PHY
devices. This option provides infrastructure for
@@ -79,6 +76,18 @@ config SFP
comment "MII PHY device drivers"
+config AS21XXX_PHY
+ tristate "Aeonsemi AS21xxx PHYs"
+ help
+ Currently supports the Aeonsemi AS21xxx PHY.
+
+ These are C45 PHYs 10G that require all a generic firmware.
+
+ Supported PHYs AS21011JB1, AS21011PB1, AS21010JB1, AS21010PB1,
+ AS21511JB1, AS21511PB1, AS21510JB1, AS21510PB1, AS21210JB1,
+ AS21210PB1 that all register with the PHY ID 0x7500 0x7500
+ before the firmware is loaded.
+
config AIR_EN8811H_PHY
tristate "Airoha EN8811H 2.5 Gigabit PHY"
help
@@ -266,6 +275,18 @@ config MAXLINEAR_GPHY
Support for the Maxlinear GPY115, GPY211, GPY212, GPY215,
GPY241, GPY245 PHYs.
+config MAXLINEAR_86110_PHY
+ tristate "MaxLinear MXL86110 PHY support"
+ help
+ Support for the MaxLinear MXL86110 Gigabit Ethernet
+ Physical Layer transceiver.
+ The MXL86110 is commonly used in networking equipment such as
+ routers, switches, and embedded systems, providing the
+ physical interface for 10/100/1000 Mbps Ethernet connections
+ over copper media.
+ If you are using a board with the MXL86110 PHY connected to your
+ Ethernet MAC, you should enable this option.
+
source "drivers/net/phy/mediatek/Kconfig"
config MICREL_PHY
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 23ce205ae91d..7827609e9032 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -3,30 +3,22 @@
libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \
linkmode.o phy_link_topology.o \
- phy_package.o phy_caps.o
+ phy_package.o phy_caps.o mdio_bus_provider.o
mdio-bus-y += mdio_bus.o mdio_device.o
-ifdef CONFIG_MDIO_DEVICE
-obj-y += mdio-boardinfo.o
-endif
-
-# PHYLIB implies MDIO_DEVICE, in that case, we have a bunch of circular
-# dependencies that does not make it possible to split mdio-bus objects into a
-# dedicated loadable module, so we bundle them all together into libphy.ko
ifdef CONFIG_PHYLIB
-libphy-y += $(mdio-bus-y)
-# the stubs are built-in whenever PHYLIB is built-in or module
-obj-y += stubs.o
-else
-obj-$(CONFIG_MDIO_DEVICE) += mdio-bus.o
+# built-in whenever PHYLIB is built-in or module
+obj-y += stubs.o mdio-boardinfo.o
endif
-obj-$(CONFIG_MDIO_DEVRES) += mdio_devres.o
+
libphy-$(CONFIG_SWPHY) += swphy.o
libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o
libphy-$(CONFIG_OPEN_ALLIANCE_HELPERS) += open_alliance_helpers.o
+obj-$(CONFIG_MDIO_BUS) += mdio-bus.o
obj-$(CONFIG_PHYLINK) += phylink.o
obj-$(CONFIG_PHYLIB) += libphy.o
+obj-$(CONFIG_PHYLIB) += mdio_devres.o
obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += mii_timestamper.o
@@ -40,6 +32,7 @@ obj-$(CONFIG_AIR_EN8811H_PHY) += air_en8811h.o
obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_AMCC_QT2025_PHY) += qt2025.o
obj-$(CONFIG_AQUANTIA_PHY) += aquantia/
+obj-$(CONFIG_AS21XXX_PHY) += as21xxx.o
ifdef CONFIG_AX88796B_RUST_PHY
obj-$(CONFIG_AX88796B_PHY) += ax88796b_rust.o
else
@@ -75,6 +68,7 @@ obj-$(CONFIG_MARVELL_PHY) += marvell.o
obj-$(CONFIG_MARVELL_88Q2XXX_PHY) += marvell-88q2xxx.o
obj-$(CONFIG_MARVELL_88X2222_PHY) += marvell-88x2222.o
obj-$(CONFIG_MAXLINEAR_GPHY) += mxl-gpy.o
+obj-$(CONFIG_MAXLINEAR_86110_PHY) += mxl-86110.o
obj-y += mediatek/
obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
diff --git a/drivers/net/phy/air_en8811h.c b/drivers/net/phy/air_en8811h.c
index e9fd24cb7270..57fbd8df9438 100644
--- a/drivers/net/phy/air_en8811h.c
+++ b/drivers/net/phy/air_en8811h.c
@@ -11,6 +11,7 @@
* Copyright (C) 2023 Airoha Technology Corp.
*/
+#include <linux/clk-provider.h>
#include <linux/phy.h>
#include <linux/firmware.h>
#include <linux/property.h>
@@ -115,6 +116,11 @@
#define EN8811H_GPIO_OUTPUT 0xcf8b8
#define EN8811H_GPIO_OUTPUT_345 (BIT(3) | BIT(4) | BIT(5))
+#define EN8811H_HWTRAP1 0xcf914
+#define EN8811H_HWTRAP1_CKO BIT(12)
+#define EN8811H_CLK_CGM 0xcf958
+#define EN8811H_CLK_CGM_CKO BIT(26)
+
#define EN8811H_FW_CTRL_1 0x0f0018
#define EN8811H_FW_CTRL_1_START 0x0
#define EN8811H_FW_CTRL_1_FINISH 0x1
@@ -142,10 +148,15 @@ struct led {
unsigned long state;
};
+#define clk_hw_to_en8811h_priv(_hw) \
+ container_of(_hw, struct en8811h_priv, hw)
+
struct en8811h_priv {
- u32 firmware_version;
- bool mcu_needs_restart;
- struct led led[EN8811H_LED_COUNT];
+ u32 firmware_version;
+ bool mcu_needs_restart;
+ struct led led[EN8811H_LED_COUNT];
+ struct clk_hw hw;
+ struct phy_device *phydev;
};
enum {
@@ -806,6 +817,86 @@ static int en8811h_led_hw_is_supported(struct phy_device *phydev, u8 index,
return 0;
};
+static unsigned long en8811h_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+ struct phy_device *phydev = priv->phydev;
+ u32 pbus_value;
+ int ret;
+
+ ret = air_buckpbus_reg_read(phydev, EN8811H_HWTRAP1, &pbus_value);
+ if (ret < 0)
+ return ret;
+
+ return (pbus_value & EN8811H_HWTRAP1_CKO) ? 50000000 : 25000000;
+}
+
+static int en8811h_clk_enable(struct clk_hw *hw)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+ struct phy_device *phydev = priv->phydev;
+
+ return air_buckpbus_reg_modify(phydev, EN8811H_CLK_CGM,
+ EN8811H_CLK_CGM_CKO,
+ EN8811H_CLK_CGM_CKO);
+}
+
+static void en8811h_clk_disable(struct clk_hw *hw)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+ struct phy_device *phydev = priv->phydev;
+
+ air_buckpbus_reg_modify(phydev, EN8811H_CLK_CGM,
+ EN8811H_CLK_CGM_CKO, 0);
+}
+
+static int en8811h_clk_is_enabled(struct clk_hw *hw)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+ struct phy_device *phydev = priv->phydev;
+ u32 pbus_value;
+ int ret;
+
+ ret = air_buckpbus_reg_read(phydev, EN8811H_CLK_CGM, &pbus_value);
+ if (ret < 0)
+ return ret;
+
+ return (pbus_value & EN8811H_CLK_CGM_CKO);
+}
+
+static const struct clk_ops en8811h_clk_ops = {
+ .recalc_rate = en8811h_clk_recalc_rate,
+ .enable = en8811h_clk_enable,
+ .disable = en8811h_clk_disable,
+ .is_enabled = en8811h_clk_is_enabled,
+};
+
+static int en8811h_clk_provider_setup(struct device *dev, struct clk_hw *hw)
+{
+ struct clk_init_data init;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_COMMON_CLK))
+ return 0;
+
+ init.name = devm_kasprintf(dev, GFP_KERNEL, "%s-cko",
+ fwnode_get_name(dev_fwnode(dev)));
+ if (!init.name)
+ return -ENOMEM;
+
+ init.ops = &en8811h_clk_ops;
+ init.flags = 0;
+ init.num_parents = 0;
+ hw->init = &init;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
+}
+
static int en8811h_probe(struct phy_device *phydev)
{
struct en8811h_priv *priv;
@@ -838,6 +929,12 @@ static int en8811h_probe(struct phy_device *phydev)
return ret;
}
+ priv->phydev = phydev;
+ /* Co-Clock Output */
+ ret = en8811h_clk_provider_setup(&phydev->mdio.dev, &priv->hw);
+ if (ret)
+ return ret;
+
/* Configure led gpio pins as output */
ret = air_buckpbus_reg_modify(phydev, EN8811H_GPIO_OUTPUT,
EN8811H_GPIO_OUTPUT_345,
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index 08b1c9cc902b..77a48635d7bf 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -516,8 +516,7 @@ static int aqr105_read_status(struct phy_device *phydev)
if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
return 0;
- /**
- * The status register is not immediately correct on line side link up.
+ /* The status register is not immediately correct on line side link up.
* Poll periodically until it reflects the correct ON state.
* Only return fail for read error, timeout defaults to OFF state.
*/
@@ -634,8 +633,7 @@ static int aqr107_read_status(struct phy_device *phydev)
if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
return 0;
- /**
- * The status register is not immediately correct on line side link up.
+ /* The status register is not immediately correct on line side link up.
* Poll periodically until it reflects the correct ON state.
* Only return fail for read error, timeout defaults to OFF state.
*/
diff --git a/drivers/net/phy/as21xxx.c b/drivers/net/phy/as21xxx.c
new file mode 100644
index 000000000000..92697f43087d
--- /dev/null
+++ b/drivers/net/phy/as21xxx.c
@@ -0,0 +1,1087 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Aeonsemi AS21XXxX PHY Driver
+ *
+ * Author: Christian Marangi <ansuelsmth@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+
+#define VEND1_GLB_REG_CPU_RESET_ADDR_LO_BASEADDR 0x3
+#define VEND1_GLB_REG_CPU_RESET_ADDR_HI_BASEADDR 0x4
+
+#define VEND1_GLB_REG_CPU_CTRL 0xe
+#define VEND1_GLB_CPU_CTRL_MASK GENMASK(4, 0)
+#define VEND1_GLB_CPU_CTRL_LED_POLARITY_MASK GENMASK(12, 8)
+#define VEND1_GLB_CPU_CTRL_LED_POLARITY(_n) FIELD_PREP(VEND1_GLB_CPU_CTRL_LED_POLARITY_MASK, \
+ BIT(_n))
+
+#define VEND1_FW_START_ADDR 0x100
+
+#define VEND1_GLB_REG_MDIO_INDIRECT_ADDRCMD 0x101
+#define VEND1_GLB_REG_MDIO_INDIRECT_LOAD 0x102
+
+#define VEND1_GLB_REG_MDIO_INDIRECT_STATUS 0x103
+
+#define VEND1_PTP_CLK 0x142
+#define VEND1_PTP_CLK_EN BIT(6)
+
+/* 5 LED at step of 0x20
+ * FE: Fast-Ethernet (10/100)
+ * GE: Gigabit-Ethernet (1000)
+ * NG: New-Generation (2500/5000/10000)
+ */
+#define VEND1_LED_REG(_n) (0x1800 + ((_n) * 0x10))
+#define VEND1_LED_REG_A_EVENT GENMASK(15, 11)
+#define VEND1_LED_CONF 0x1881
+#define VEND1_LED_CONFG_BLINK GENMASK(7, 0)
+
+#define VEND1_SPEED_STATUS 0x4002
+#define VEND1_SPEED_MASK GENMASK(7, 0)
+#define VEND1_SPEED_10000 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x3)
+#define VEND1_SPEED_5000 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x5)
+#define VEND1_SPEED_2500 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x9)
+#define VEND1_SPEED_1000 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x10)
+#define VEND1_SPEED_100 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x20)
+#define VEND1_SPEED_10 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x0)
+
+#define VEND1_IPC_CMD 0x5801
+#define AEON_IPC_CMD_PARITY BIT(15)
+#define AEON_IPC_CMD_SIZE GENMASK(10, 6)
+#define AEON_IPC_CMD_OPCODE GENMASK(5, 0)
+
+#define IPC_CMD_NOOP 0x0 /* Do nothing */
+#define IPC_CMD_INFO 0x1 /* Get Firmware Version */
+#define IPC_CMD_SYS_CPU 0x2 /* SYS_CPU */
+#define IPC_CMD_BULK_DATA 0xa /* Pass bulk data in ipc registers. */
+#define IPC_CMD_BULK_WRITE 0xc /* Write bulk data to memory */
+#define IPC_CMD_CFG_PARAM 0x1a /* Write config parameters to memory */
+#define IPC_CMD_NG_TESTMODE 0x1b /* Set NG test mode and tone */
+#define IPC_CMD_TEMP_MON 0x15 /* Temperature monitoring function */
+#define IPC_CMD_SET_LED 0x23 /* Set led */
+
+#define VEND1_IPC_STS 0x5802
+#define AEON_IPC_STS_PARITY BIT(15)
+#define AEON_IPC_STS_SIZE GENMASK(14, 10)
+#define AEON_IPC_STS_OPCODE GENMASK(9, 4)
+#define AEON_IPC_STS_STATUS GENMASK(3, 0)
+#define AEON_IPC_STS_STATUS_RCVD FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0x1)
+#define AEON_IPC_STS_STATUS_PROCESS FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0x2)
+#define AEON_IPC_STS_STATUS_SUCCESS FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0x4)
+#define AEON_IPC_STS_STATUS_ERROR FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0x8)
+#define AEON_IPC_STS_STATUS_BUSY FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0xe)
+#define AEON_IPC_STS_STATUS_READY FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0xf)
+
+#define VEND1_IPC_DATA0 0x5808
+#define VEND1_IPC_DATA1 0x5809
+#define VEND1_IPC_DATA2 0x580a
+#define VEND1_IPC_DATA3 0x580b
+#define VEND1_IPC_DATA4 0x580c
+#define VEND1_IPC_DATA5 0x580d
+#define VEND1_IPC_DATA6 0x580e
+#define VEND1_IPC_DATA7 0x580f
+#define VEND1_IPC_DATA(_n) (VEND1_IPC_DATA0 + (_n))
+
+/* Sub command of CMD_INFO */
+#define IPC_INFO_VERSION 0x1
+
+/* Sub command of CMD_SYS_CPU */
+#define IPC_SYS_CPU_REBOOT 0x3
+#define IPC_SYS_CPU_IMAGE_OFST 0x4
+#define IPC_SYS_CPU_IMAGE_CHECK 0x5
+#define IPC_SYS_CPU_PHY_ENABLE 0x6
+
+/* Sub command of CMD_CFG_PARAM */
+#define IPC_CFG_PARAM_DIRECT 0x4
+
+/* CFG DIRECT sub command */
+#define IPC_CFG_PARAM_DIRECT_NG_PHYCTRL 0x1
+#define IPC_CFG_PARAM_DIRECT_CU_AN 0x2
+#define IPC_CFG_PARAM_DIRECT_SDS_PCS 0x3
+#define IPC_CFG_PARAM_DIRECT_AUTO_EEE 0x4
+#define IPC_CFG_PARAM_DIRECT_SDS_PMA 0x5
+#define IPC_CFG_PARAM_DIRECT_DPC_RA 0x6
+#define IPC_CFG_PARAM_DIRECT_DPC_PKT_CHK 0x7
+#define IPC_CFG_PARAM_DIRECT_DPC_SDS_WAIT_ETH 0x8
+#define IPC_CFG_PARAM_DIRECT_WDT 0x9
+#define IPC_CFG_PARAM_DIRECT_SDS_RESTART_AN 0x10
+#define IPC_CFG_PARAM_DIRECT_TEMP_MON 0x11
+#define IPC_CFG_PARAM_DIRECT_WOL 0x12
+
+/* Sub command of CMD_TEMP_MON */
+#define IPC_CMD_TEMP_MON_GET 0x4
+
+#define AS21XXX_MDIO_AN_C22 0xffe0
+
+#define PHY_ID_AS21XXX 0x75009410
+/* AS21xxx ID Legend
+ * AS21x1xxB1
+ * ^ ^^
+ * | |J: Supports SyncE/PTP
+ * | |P: No SyncE/PTP support
+ * | 1: Supports 2nd Serdes
+ * | 2: Not 2nd Serdes support
+ * 0: 10G, 5G, 2.5G
+ * 5: 5G, 2.5G
+ * 2: 2.5G
+ */
+#define PHY_ID_AS21011JB1 0x75009402
+#define PHY_ID_AS21011PB1 0x75009412
+#define PHY_ID_AS21010JB1 0x75009422
+#define PHY_ID_AS21010PB1 0x75009432
+#define PHY_ID_AS21511JB1 0x75009442
+#define PHY_ID_AS21511PB1 0x75009452
+#define PHY_ID_AS21510JB1 0x75009462
+#define PHY_ID_AS21510PB1 0x75009472
+#define PHY_ID_AS21210JB1 0x75009482
+#define PHY_ID_AS21210PB1 0x75009492
+#define PHY_VENDOR_AEONSEMI 0x75009400
+
+#define AEON_MAX_LEDS 5
+#define AEON_IPC_DELAY 10000
+#define AEON_IPC_TIMEOUT (AEON_IPC_DELAY * 100)
+#define AEON_IPC_DATA_NUM_REGISTERS 8
+#define AEON_IPC_DATA_MAX (AEON_IPC_DATA_NUM_REGISTERS * sizeof(u16))
+
+#define AEON_BOOT_ADDR 0x1000
+#define AEON_CPU_BOOT_ADDR 0x2000
+#define AEON_CPU_CTRL_FW_LOAD (BIT(4) | BIT(2) | BIT(1) | BIT(0))
+#define AEON_CPU_CTRL_FW_START BIT(0)
+
+enum as21xxx_led_event {
+ VEND1_LED_REG_A_EVENT_ON_10 = 0x0,
+ VEND1_LED_REG_A_EVENT_ON_100,
+ VEND1_LED_REG_A_EVENT_ON_1000,
+ VEND1_LED_REG_A_EVENT_ON_2500,
+ VEND1_LED_REG_A_EVENT_ON_5000,
+ VEND1_LED_REG_A_EVENT_ON_10000,
+ VEND1_LED_REG_A_EVENT_ON_FE_GE,
+ VEND1_LED_REG_A_EVENT_ON_NG,
+ VEND1_LED_REG_A_EVENT_ON_FULL_DUPLEX,
+ VEND1_LED_REG_A_EVENT_ON_COLLISION,
+ VEND1_LED_REG_A_EVENT_BLINK_TX,
+ VEND1_LED_REG_A_EVENT_BLINK_RX,
+ VEND1_LED_REG_A_EVENT_BLINK_ACT,
+ VEND1_LED_REG_A_EVENT_ON_LINK,
+ VEND1_LED_REG_A_EVENT_ON_LINK_BLINK_ACT,
+ VEND1_LED_REG_A_EVENT_ON_LINK_BLINK_RX,
+ VEND1_LED_REG_A_EVENT_ON_FE_GE_BLINK_ACT,
+ VEND1_LED_REG_A_EVENT_ON_NG_BLINK_ACT,
+ VEND1_LED_REG_A_EVENT_ON_NG_BLINK_FE_GE,
+ VEND1_LED_REG_A_EVENT_ON_FD_BLINK_COLLISION,
+ VEND1_LED_REG_A_EVENT_ON,
+ VEND1_LED_REG_A_EVENT_OFF,
+};
+
+struct as21xxx_led_pattern_info {
+ unsigned int pattern;
+ u16 val;
+};
+
+struct as21xxx_priv {
+ bool parity_status;
+ /* Protect concurrent IPC access */
+ struct mutex ipc_lock;
+};
+
+static struct as21xxx_led_pattern_info as21xxx_led_supported_pattern[] = {
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10),
+ .val = VEND1_LED_REG_A_EVENT_ON_10
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_100),
+ .val = VEND1_LED_REG_A_EVENT_ON_100
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_1000),
+ .val = VEND1_LED_REG_A_EVENT_ON_1000
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_2500),
+ .val = VEND1_LED_REG_A_EVENT_ON_2500
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_5000),
+ .val = VEND1_LED_REG_A_EVENT_ON_5000
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10000),
+ .val = VEND1_LED_REG_A_EVENT_ON_10000
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK),
+ .val = VEND1_LED_REG_A_EVENT_ON_LINK
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000),
+ .val = VEND1_LED_REG_A_EVENT_ON_FE_GE
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000),
+ .val = VEND1_LED_REG_A_EVENT_ON_NG
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_FULL_DUPLEX),
+ .val = VEND1_LED_REG_A_EVENT_ON_FULL_DUPLEX
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_TX),
+ .val = VEND1_LED_REG_A_EVENT_BLINK_TX
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_BLINK_RX
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_BLINK_ACT
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000),
+ .val = VEND1_LED_REG_A_EVENT_ON_LINK
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000) |
+ BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_ON_LINK_BLINK_ACT
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000) |
+ BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_ON_LINK_BLINK_RX
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_ON_FE_GE_BLINK_ACT
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000) |
+ BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_ON_NG_BLINK_ACT
+ }
+};
+
+static int aeon_firmware_boot(struct phy_device *phydev, const u8 *data,
+ size_t size)
+{
+ int i, ret;
+ u16 val;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLB_REG_CPU_CTRL,
+ VEND1_GLB_CPU_CTRL_MASK, AEON_CPU_CTRL_FW_LOAD);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_FW_START_ADDR,
+ AEON_BOOT_ADDR);
+ if (ret)
+ return ret;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_MDIO_INDIRECT_ADDRCMD,
+ 0x3ffc, 0xc000);
+ if (ret)
+ return ret;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_MDIO_INDIRECT_STATUS);
+ if (val > 1) {
+ phydev_err(phydev, "wrong origin mdio_indirect_status: %x\n", val);
+ return -EINVAL;
+ }
+
+ /* Firmware is always aligned to u16 */
+ for (i = 0; i < size; i += 2) {
+ val = data[i + 1] << 8 | data[i];
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_MDIO_INDIRECT_LOAD, val);
+ if (ret)
+ return ret;
+ }
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_CPU_RESET_ADDR_LO_BASEADDR,
+ lower_16_bits(AEON_CPU_BOOT_ADDR));
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_CPU_RESET_ADDR_HI_BASEADDR,
+ upper_16_bits(AEON_CPU_BOOT_ADDR));
+ if (ret)
+ return ret;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLB_REG_CPU_CTRL,
+ VEND1_GLB_CPU_CTRL_MASK, AEON_CPU_CTRL_FW_START);
+}
+
+static int aeon_firmware_load(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ const struct firmware *fw;
+ const char *fw_name;
+ int ret;
+
+ ret = of_property_read_string(dev->of_node, "firmware-name",
+ &fw_name);
+ if (ret)
+ return ret;
+
+ ret = request_firmware(&fw, fw_name, dev);
+ if (ret) {
+ phydev_err(phydev, "failed to find FW file %s (%d)\n",
+ fw_name, ret);
+ return ret;
+ }
+
+ ret = aeon_firmware_boot(phydev, fw->data, fw->size);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+static bool aeon_ipc_ready(u16 val, bool parity_status)
+{
+ u16 status;
+
+ if (FIELD_GET(AEON_IPC_STS_PARITY, val) != parity_status)
+ return false;
+
+ status = val & AEON_IPC_STS_STATUS;
+
+ return status != AEON_IPC_STS_STATUS_RCVD &&
+ status != AEON_IPC_STS_STATUS_PROCESS &&
+ status != AEON_IPC_STS_STATUS_BUSY;
+}
+
+static int aeon_ipc_wait_cmd(struct phy_device *phydev, bool parity_status)
+{
+ u16 val;
+
+ /* Exit condition logic:
+ * - Wait for parity bit equal
+ * - Wait for status success, error OR ready
+ */
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, VEND1_IPC_STS, val,
+ aeon_ipc_ready(val, parity_status),
+ AEON_IPC_DELAY, AEON_IPC_TIMEOUT, false);
+}
+
+static int aeon_ipc_send_cmd(struct phy_device *phydev,
+ struct as21xxx_priv *priv,
+ u16 cmd, u16 *ret_sts)
+{
+ bool curr_parity;
+ int ret;
+
+ /* The IPC sync by using a single parity bit.
+ * Each CMD have alternately this bit set or clear
+ * to understand correct flow and packet order.
+ */
+ curr_parity = priv->parity_status;
+ if (priv->parity_status)
+ cmd |= AEON_IPC_CMD_PARITY;
+
+ /* Always update parity for next packet */
+ priv->parity_status = !priv->parity_status;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_IPC_CMD, cmd);
+ if (ret)
+ return ret;
+
+ /* Wait for packet to be processed */
+ usleep_range(AEON_IPC_DELAY, AEON_IPC_DELAY + 5000);
+
+ /* With no ret_sts, ignore waiting for packet completion
+ * (ipc parity bit sync)
+ */
+ if (!ret_sts)
+ return 0;
+
+ ret = aeon_ipc_wait_cmd(phydev, curr_parity);
+ if (ret)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_IPC_STS);
+ if (ret < 0)
+ return ret;
+
+ *ret_sts = ret;
+ if ((*ret_sts & AEON_IPC_STS_STATUS) != AEON_IPC_STS_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* If data is NULL, return 0 or negative error.
+ * If data not NULL, return number of Bytes received from IPC or
+ * a negative error.
+ */
+static int aeon_ipc_send_msg(struct phy_device *phydev,
+ u16 opcode, u16 *data, unsigned int data_len,
+ u16 *ret_data)
+{
+ struct as21xxx_priv *priv = phydev->priv;
+ unsigned int ret_size;
+ u16 cmd, ret_sts;
+ int ret;
+ int i;
+
+ /* IPC have a max of 8 register to transfer data,
+ * make sure we never exceed this.
+ */
+ if (data_len > AEON_IPC_DATA_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < data_len / sizeof(u16); i++)
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_IPC_DATA(i),
+ data[i]);
+
+ cmd = FIELD_PREP(AEON_IPC_CMD_SIZE, data_len) |
+ FIELD_PREP(AEON_IPC_CMD_OPCODE, opcode);
+
+ mutex_lock(&priv->ipc_lock);
+
+ ret = aeon_ipc_send_cmd(phydev, priv, cmd, &ret_sts);
+ if (ret) {
+ phydev_err(phydev, "failed to send ipc msg for %x: %d\n",
+ opcode, ret);
+ goto out;
+ }
+
+ if (!data)
+ goto out;
+
+ if ((ret_sts & AEON_IPC_STS_STATUS) == AEON_IPC_STS_STATUS_ERROR) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Prevent IPC from stack smashing the kernel.
+ * We can't trust IPC to return a good value and we always
+ * preallocate space for 16 Bytes.
+ */
+ ret_size = FIELD_GET(AEON_IPC_STS_SIZE, ret_sts);
+ if (ret_size > AEON_IPC_DATA_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Read data from IPC data register for ret_size value from IPC */
+ for (i = 0; i < DIV_ROUND_UP(ret_size, sizeof(u16)); i++) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_IPC_DATA(i));
+ if (ret < 0)
+ goto out;
+
+ ret_data[i] = ret;
+ }
+
+ ret = ret_size;
+
+out:
+ mutex_unlock(&priv->ipc_lock);
+
+ return ret;
+}
+
+static int aeon_ipc_noop(struct phy_device *phydev,
+ struct as21xxx_priv *priv, u16 *ret_sts)
+{
+ u16 cmd;
+
+ cmd = FIELD_PREP(AEON_IPC_CMD_SIZE, 0) |
+ FIELD_PREP(AEON_IPC_CMD_OPCODE, IPC_CMD_NOOP);
+
+ return aeon_ipc_send_cmd(phydev, priv, cmd, ret_sts);
+}
+
+/* Logic to sync parity bit with IPC.
+ * We send 2 NOP cmd with same partity and we wait for IPC
+ * to handle the packet only for the second one. This way
+ * we make sure we are sync for every next cmd.
+ */
+static int aeon_ipc_sync_parity(struct phy_device *phydev,
+ struct as21xxx_priv *priv)
+{
+ u16 ret_sts;
+ int ret;
+
+ mutex_lock(&priv->ipc_lock);
+
+ /* Send NOP with no parity */
+ aeon_ipc_noop(phydev, priv, NULL);
+
+ /* Reset packet parity */
+ priv->parity_status = false;
+
+ /* Send second NOP with no parity */
+ ret = aeon_ipc_noop(phydev, priv, &ret_sts);
+
+ mutex_unlock(&priv->ipc_lock);
+
+ /* We expect to return -EINVAL */
+ if (ret != -EINVAL)
+ return ret;
+
+ if ((ret_sts & AEON_IPC_STS_STATUS) != AEON_IPC_STS_STATUS_READY) {
+ phydev_err(phydev, "Invalid IPC status on sync parity: %x\n",
+ ret_sts);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int aeon_ipc_get_fw_version(struct phy_device *phydev)
+{
+ u16 ret_data[AEON_IPC_DATA_NUM_REGISTERS], data[1];
+ char fw_version[AEON_IPC_DATA_MAX + 1];
+ int ret;
+
+ data[0] = IPC_INFO_VERSION;
+
+ ret = aeon_ipc_send_msg(phydev, IPC_CMD_INFO, data,
+ sizeof(data), ret_data);
+ if (ret < 0)
+ return ret;
+
+ /* Make sure FW version is NULL terminated */
+ memcpy(fw_version, ret_data, ret);
+ fw_version[ret] = '\0';
+
+ phydev_info(phydev, "Firmware Version: %s\n", fw_version);
+
+ return 0;
+}
+
+static int aeon_dpc_ra_enable(struct phy_device *phydev)
+{
+ u16 data[2];
+
+ data[0] = IPC_CFG_PARAM_DIRECT;
+ data[1] = IPC_CFG_PARAM_DIRECT_DPC_RA;
+
+ return aeon_ipc_send_msg(phydev, IPC_CMD_CFG_PARAM, data,
+ sizeof(data), NULL);
+}
+
+static int as21xxx_probe(struct phy_device *phydev)
+{
+ struct as21xxx_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&phydev->mdio.dev,
+ sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ phydev->priv = priv;
+
+ ret = devm_mutex_init(&phydev->mdio.dev,
+ &priv->ipc_lock);
+ if (ret)
+ return ret;
+
+ ret = aeon_ipc_sync_parity(phydev, priv);
+ if (ret)
+ return ret;
+
+ ret = aeon_ipc_get_fw_version(phydev);
+ if (ret)
+ return ret;
+
+ /* Enable PTP clk if not already Enabled */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CLK,
+ VEND1_PTP_CLK_EN);
+ if (ret)
+ return ret;
+
+ return aeon_dpc_ra_enable(phydev);
+}
+
+static int as21xxx_read_link(struct phy_device *phydev, int *bmcr)
+{
+ int status;
+
+ /* Normal C22 BMCR report inconsistent data, use
+ * the mapped C22 in C45 to have more consistent link info.
+ */
+ *bmcr = phy_read_mmd(phydev, MDIO_MMD_AN,
+ AS21XXX_MDIO_AN_C22 + MII_BMCR);
+ if (*bmcr < 0)
+ return *bmcr;
+
+ /* Autoneg is being started, therefore disregard current
+ * link status and report link as down.
+ */
+ if (*bmcr & BMCR_ANRESTART) {
+ phydev->link = 0;
+ return 0;
+ }
+
+ status = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (status < 0)
+ return status;
+
+ phydev->link = !!(status & MDIO_STAT1_LSTATUS);
+
+ return 0;
+}
+
+static int as21xxx_read_c22_lpa(struct phy_device *phydev)
+{
+ int lpagb;
+
+ /* MII_STAT1000 are only filled in the mapped C22
+ * in C45, use that to fill lpagb values and check.
+ */
+ lpagb = phy_read_mmd(phydev, MDIO_MMD_AN,
+ AS21XXX_MDIO_AN_C22 + MII_STAT1000);
+ if (lpagb < 0)
+ return lpagb;
+
+ if (lpagb & LPA_1000MSFAIL) {
+ int adv = phy_read_mmd(phydev, MDIO_MMD_AN,
+ AS21XXX_MDIO_AN_C22 + MII_CTRL1000);
+
+ if (adv < 0)
+ return adv;
+
+ if (adv & CTL1000_ENABLE_MASTER)
+ phydev_err(phydev, "Master/Slave resolution failed, maybe conflicting manual settings?\n");
+ else
+ phydev_err(phydev, "Master/Slave resolution failed\n");
+ return -ENOLINK;
+ }
+
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising,
+ lpagb);
+
+ return 0;
+}
+
+static int as21xxx_read_status(struct phy_device *phydev)
+{
+ int bmcr, old_link = phydev->link;
+ int ret;
+
+ ret = as21xxx_read_link(phydev, &bmcr);
+ if (ret)
+ return ret;
+
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ ret = genphy_c45_read_lpa(phydev);
+ if (ret)
+ return ret;
+
+ ret = as21xxx_read_c22_lpa(phydev);
+ if (ret)
+ return ret;
+
+ phy_resolve_aneg_linkmode(phydev);
+ } else {
+ int speed;
+
+ linkmode_zero(phydev->lp_advertising);
+
+ speed = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_SPEED_STATUS);
+ if (speed < 0)
+ return speed;
+
+ switch (speed & VEND1_SPEED_STATUS) {
+ case VEND1_SPEED_10000:
+ phydev->speed = SPEED_10000;
+ phydev->duplex = DUPLEX_FULL;
+ break;
+ case VEND1_SPEED_5000:
+ phydev->speed = SPEED_5000;
+ phydev->duplex = DUPLEX_FULL;
+ break;
+ case VEND1_SPEED_2500:
+ phydev->speed = SPEED_2500;
+ phydev->duplex = DUPLEX_FULL;
+ break;
+ case VEND1_SPEED_1000:
+ phydev->speed = SPEED_1000;
+ if (bmcr & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+ break;
+ case VEND1_SPEED_100:
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_FULL;
+ break;
+ case VEND1_SPEED_10:
+ phydev->speed = SPEED_10;
+ phydev->duplex = DUPLEX_FULL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int as21xxx_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ u16 val = VEND1_LED_REG_A_EVENT_OFF;
+
+ if (index > AEON_MAX_LEDS)
+ return -EINVAL;
+
+ if (value)
+ val = VEND1_LED_REG_A_EVENT_ON;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_LED_REG(index),
+ VEND1_LED_REG_A_EVENT,
+ FIELD_PREP(VEND1_LED_REG_A_EVENT, val));
+}
+
+static int as21xxx_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int i;
+
+ if (index > AEON_MAX_LEDS)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(as21xxx_led_supported_pattern); i++)
+ if (rules == as21xxx_led_supported_pattern[i].pattern)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int as21xxx_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int i, val;
+
+ if (index > AEON_MAX_LEDS)
+ return -EINVAL;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_LED_REG(index));
+ if (val < 0)
+ return val;
+
+ val = FIELD_GET(VEND1_LED_REG_A_EVENT, val);
+ for (i = 0; i < ARRAY_SIZE(as21xxx_led_supported_pattern); i++)
+ if (val == as21xxx_led_supported_pattern[i].val) {
+ *rules = as21xxx_led_supported_pattern[i].pattern;
+ return 0;
+ }
+
+ /* Should be impossible */
+ return -EINVAL;
+}
+
+static int as21xxx_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 val = 0;
+ int i;
+
+ if (index > AEON_MAX_LEDS)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(as21xxx_led_supported_pattern); i++)
+ if (rules == as21xxx_led_supported_pattern[i].pattern) {
+ val = as21xxx_led_supported_pattern[i].val;
+ break;
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_LED_REG(index),
+ VEND1_LED_REG_A_EVENT,
+ FIELD_PREP(VEND1_LED_REG_A_EVENT, val));
+}
+
+static int as21xxx_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ bool led_active_low = false;
+ u16 mask, val = 0;
+ u32 mode;
+
+ if (index > AEON_MAX_LEDS)
+ return -EINVAL;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ led_active_low = true;
+ break;
+ case PHY_LED_ACTIVE_HIGH: /* default mode */
+ led_active_low = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ mask = VEND1_GLB_CPU_CTRL_LED_POLARITY(index);
+ if (led_active_low)
+ val = VEND1_GLB_CPU_CTRL_LED_POLARITY(index);
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_CPU_CTRL,
+ mask, val);
+}
+
+static int as21xxx_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
+{
+ struct as21xxx_priv *priv;
+ u16 ret_sts;
+ u32 phy_id;
+ int ret;
+
+ /* Skip PHY that are not AS21xxx or already have firmware loaded */
+ if (phydev->c45_ids.device_ids[MDIO_MMD_PCS] != PHY_ID_AS21XXX)
+ return genphy_match_phy_device(phydev, phydrv);
+
+ /* Read PHY ID to handle firmware just loaded */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MII_PHYSID1);
+ if (ret < 0)
+ return ret;
+ phy_id = ret << 16;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MII_PHYSID2);
+ if (ret < 0)
+ return ret;
+ phy_id |= ret;
+
+ /* With PHY ID not the generic AS21xxx one assume
+ * the firmware just loaded
+ */
+ if (phy_id != PHY_ID_AS21XXX)
+ return phy_id == phydrv->phy_id;
+
+ /* Allocate temp priv and load the firmware */
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->ipc_lock);
+
+ ret = aeon_firmware_load(phydev);
+ if (ret)
+ goto out;
+
+ /* Sync parity... */
+ ret = aeon_ipc_sync_parity(phydev, priv);
+ if (ret)
+ goto out;
+
+ /* ...and send a third NOOP cmd to wait for firmware finish loading */
+ ret = aeon_ipc_noop(phydev, priv, &ret_sts);
+ if (ret)
+ goto out;
+
+out:
+ mutex_destroy(&priv->ipc_lock);
+ kfree(priv);
+
+ /* Return can either be 0 or a negative error code.
+ * Returning 0 here means THIS is NOT a suitable PHY.
+ *
+ * For the specific case of the generic Aeonsemi PHY ID that
+ * needs the firmware the be loaded first to have a correct PHY ID,
+ * this is OK as a matching PHY ID will be found right after.
+ * This relies on the driver probe order where the first PHY driver
+ * probed is the generic one.
+ */
+ return ret;
+}
+
+static struct phy_driver as21xxx_drivers[] = {
+ {
+ /* PHY expose in C45 as 0x7500 0x9410
+ * before firmware is loaded.
+ * This driver entry must be attempted first to load
+ * the firmware and thus update the ID registers.
+ */
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21XXX),
+ .name = "Aeonsemi AS21xxx",
+ .match_phy_device = as21xxx_match_phy_device,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21011JB1),
+ .name = "Aeonsemi AS21011JB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21011PB1),
+ .name = "Aeonsemi AS21011PB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21010PB1),
+ .name = "Aeonsemi AS21010PB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21010JB1),
+ .name = "Aeonsemi AS21010JB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21210PB1),
+ .name = "Aeonsemi AS21210PB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21510JB1),
+ .name = "Aeonsemi AS21510JB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21510PB1),
+ .name = "Aeonsemi AS21510PB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21511JB1),
+ .name = "Aeonsemi AS21511JB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21210JB1),
+ .name = "Aeonsemi AS21210JB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21511PB1),
+ .name = "Aeonsemi AS21511PB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+};
+module_phy_driver(as21xxx_drivers);
+
+static struct mdio_device_id __maybe_unused as21xxx_tbl[] = {
+ { PHY_ID_MATCH_VENDOR(PHY_VENDOR_AEONSEMI) },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, as21xxx_tbl);
+
+MODULE_DESCRIPTION("Aeonsemi AS21xxx PHY driver");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index e81404bf8994..299f9a8f30f4 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -185,14 +185,10 @@ static irqreturn_t bcm87xx_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
-static int bcm8706_match_phy_device(struct phy_device *phydev)
+static int bcm87xx_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
- return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8706;
-}
-
-static int bcm8727_match_phy_device(struct phy_device *phydev)
-{
- return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8727;
+ return phydev->c45_ids.device_ids[4] == phydrv->phy_id;
}
static struct phy_driver bcm87xx_driver[] = {
@@ -206,7 +202,7 @@ static struct phy_driver bcm87xx_driver[] = {
.read_status = bcm87xx_read_status,
.config_intr = bcm87xx_config_intr,
.handle_interrupt = bcm87xx_handle_interrupt,
- .match_phy_device = bcm8706_match_phy_device,
+ .match_phy_device = bcm87xx_match_phy_device,
}, {
.phy_id = PHY_ID_BCM8727,
.phy_id_mask = 0xffffffff,
@@ -217,7 +213,7 @@ static struct phy_driver bcm87xx_driver[] = {
.read_status = bcm87xx_read_status,
.config_intr = bcm87xx_config_intr,
.handle_interrupt = bcm87xx_handle_interrupt,
- .match_phy_device = bcm8727_match_phy_device,
+ .match_phy_device = bcm87xx_match_phy_device,
} };
module_phy_driver(bcm87xx_driver);
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 85e231451093..daab555721df 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -478,13 +478,6 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -513,9 +506,6 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
return 0;
case PTP_CLK_REQ_PEROUT:
- /* Reject requests with unsupported flags */
- if (rq->perout.flags)
- return -EOPNOTSUPP;
if (rq->perout.index >= N_PER_OUT)
return -EINVAL;
return periodic_output(clock, rq, on, rq->perout.index);
@@ -1002,6 +992,9 @@ static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
clock->caps.n_per_out = N_PER_OUT;
clock->caps.n_pins = DP83640_N_PINS;
clock->caps.pps = 0;
+ clock->caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
clock->caps.adjfine = ptp_dp83640_adjfine;
clock->caps.adjtime = ptp_dp83640_adjtime;
clock->caps.gettime64 = ptp_dp83640_gettime;
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index e32013eb0186..01255dada600 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -33,6 +33,7 @@
#define MII_DP83822_MLEDCR 0x25
#define MII_DP83822_LDCTRL 0x403
#define MII_DP83822_LEDCFG1 0x460
+#define MII_DP83822_IOCTRL 0x461
#define MII_DP83822_IOCTRL1 0x462
#define MII_DP83822_IOCTRL2 0x463
#define MII_DP83822_GENCFG 0x465
@@ -118,6 +119,9 @@
#define DP83822_LEDCFG1_LED1_CTRL GENMASK(11, 8)
#define DP83822_LEDCFG1_LED3_CTRL GENMASK(7, 4)
+/* IOCTRL bits */
+#define DP83822_IOCTRL_MAC_IMPEDANCE_CTRL GENMASK(4, 1)
+
/* IOCTRL1 bits */
#define DP83822_IOCTRL1_GPIO3_CTRL GENMASK(10, 8)
#define DP83822_IOCTRL1_GPIO3_CTRL_LED3 BIT(0)
@@ -202,6 +206,7 @@ struct dp83822_private {
u32 gpio2_clk_out;
bool led_pin_enable[DP83822_MAX_LED_PINS];
int tx_amplitude_100base_tx_index;
+ int mac_termination_index;
};
static int dp83822_config_wol(struct phy_device *phydev,
@@ -533,6 +538,12 @@ static int dp83822_config_init(struct phy_device *phydev)
FIELD_PREP(DP83822_100BASE_TX_LINE_DRIVER_SWING,
dp83822->tx_amplitude_100base_tx_index));
+ if (dp83822->mac_termination_index >= 0)
+ phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL,
+ DP83822_IOCTRL_MAC_IMPEDANCE_CTRL,
+ FIELD_PREP(DP83822_IOCTRL_MAC_IMPEDANCE_CTRL,
+ dp83822->mac_termination_index));
+
err = dp83822_config_init_leds(phydev);
if (err)
return err;
@@ -736,6 +747,10 @@ static const u32 tx_amplitude_100base_tx_gain[] = {
93, 95, 97, 98, 100, 102, 103, 105,
};
+static const u32 mac_termination[] = {
+ 99, 91, 84, 78, 73, 69, 65, 61, 58, 55, 53, 50, 48, 46, 44, 43,
+};
+
static int dp83822_of_init_leds(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -852,6 +867,23 @@ static int dp83822_of_init(struct phy_device *phydev)
}
}
+ ret = phy_get_mac_termination(phydev, dev, &val);
+ if (!ret) {
+ for (i = 0; i < ARRAY_SIZE(mac_termination); i++) {
+ if (mac_termination[i] == val) {
+ dp83822->mac_termination_index = i;
+ break;
+ }
+ }
+
+ if (dp83822->mac_termination_index < 0) {
+ phydev_err(phydev,
+ "Invalid value for mac-termination-ohms property (%u)\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
return dp83822_of_init_leds(phydev);
}
@@ -931,6 +963,7 @@ static int dp8382x_probe(struct phy_device *phydev)
return -ENOMEM;
dp83822->tx_amplitude_100base_tx_index = -1;
+ dp83822->mac_termination_index = -1;
phydev->priv = dp83822;
return 0;
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 063266cafe9c..deeefb962566 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -92,11 +92,6 @@
#define DP83867_STRAP_STS1_RESERVED BIT(11)
/* STRAP_STS2 bits */
-#define DP83867_STRAP_STS2_CLK_SKEW_TX_MASK GENMASK(6, 4)
-#define DP83867_STRAP_STS2_CLK_SKEW_TX_SHIFT 4
-#define DP83867_STRAP_STS2_CLK_SKEW_RX_MASK GENMASK(2, 0)
-#define DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT 0
-#define DP83867_STRAP_STS2_CLK_SKEW_NONE BIT(2)
#define DP83867_STRAP_STS2_STRAP_FLD BIT(10)
/* PHY CTRL bits */
@@ -111,10 +106,8 @@
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
-#define DP83867_RGMII_TX_CLK_DELAY_INV (DP83867_RGMII_TX_CLK_DELAY_MAX + 1)
#define DP83867_RGMII_RX_CLK_DELAY_MAX 0xf
#define DP83867_RGMII_RX_CLK_DELAY_SHIFT 0
-#define DP83867_RGMII_RX_CLK_DELAY_INV (DP83867_RGMII_RX_CLK_DELAY_MAX + 1)
/* IO_MUX_CFG bits */
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK 0x1f
@@ -506,48 +499,6 @@ static int dp83867_config_port_mirroring(struct phy_device *phydev)
return 0;
}
-static int dp83867_verify_rgmii_cfg(struct phy_device *phydev)
-{
- struct dp83867_private *dp83867 = phydev->priv;
-
- /* Existing behavior was to use default pin strapping delay in rgmii
- * mode, but rgmii should have meant no delay. Warn existing users.
- */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
- const u16 val = phy_read_mmd(phydev, DP83867_DEVADDR,
- DP83867_STRAP_STS2);
- const u16 txskew = (val & DP83867_STRAP_STS2_CLK_SKEW_TX_MASK) >>
- DP83867_STRAP_STS2_CLK_SKEW_TX_SHIFT;
- const u16 rxskew = (val & DP83867_STRAP_STS2_CLK_SKEW_RX_MASK) >>
- DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT;
-
- if (txskew != DP83867_STRAP_STS2_CLK_SKEW_NONE ||
- rxskew != DP83867_STRAP_STS2_CLK_SKEW_NONE)
- phydev_warn(phydev,
- "PHY has delays via pin strapping, but phy-mode = 'rgmii'\n"
- "Should be 'rgmii-id' to use internal delays txskew:%x rxskew:%x\n",
- txskew, rxskew);
- }
-
- /* RX delay *must* be specified if internal delay of RX is used. */
- if ((phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) &&
- dp83867->rx_id_delay == DP83867_RGMII_RX_CLK_DELAY_INV) {
- phydev_err(phydev, "ti,rx-internal-delay must be specified\n");
- return -EINVAL;
- }
-
- /* TX delay *must* be specified if internal delay of TX is used. */
- if ((phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) &&
- dp83867->tx_id_delay == DP83867_RGMII_TX_CLK_DELAY_INV) {
- phydev_err(phydev, "ti,tx-internal-delay must be specified\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
#if IS_ENABLED(CONFIG_OF_MDIO)
static int dp83867_of_init_io_impedance(struct phy_device *phydev)
{
@@ -631,7 +582,7 @@ static int dp83867_of_init(struct phy_device *phydev)
dp83867->sgmii_ref_clk_en = of_property_read_bool(of_node,
"ti,sgmii-ref-clock-output-enable");
- dp83867->rx_id_delay = DP83867_RGMII_RX_CLK_DELAY_INV;
+ dp83867->rx_id_delay = DP83867_RGMIIDCTL_2_00_NS;
ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
&dp83867->rx_id_delay);
if (!ret && dp83867->rx_id_delay > DP83867_RGMII_RX_CLK_DELAY_MAX) {
@@ -641,7 +592,7 @@ static int dp83867_of_init(struct phy_device *phydev)
return -EINVAL;
}
- dp83867->tx_id_delay = DP83867_RGMII_TX_CLK_DELAY_INV;
+ dp83867->tx_id_delay = DP83867_RGMIIDCTL_2_00_NS;
ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
&dp83867->tx_id_delay);
if (!ret && dp83867->tx_id_delay > DP83867_RGMII_TX_CLK_DELAY_MAX) {
@@ -761,7 +712,6 @@ static int dp83867_config_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867 = phydev->priv;
int ret, val, bs;
- u16 delay;
/* Force speed optimization for the PHY even if it strapped */
ret = phy_modify(phydev, DP83867_CFG2, DP83867_DOWNSHIFT_EN,
@@ -769,10 +719,6 @@ static int dp83867_config_init(struct phy_device *phydev)
if (ret)
return ret;
- ret = dp83867_verify_rgmii_cfg(phydev);
- if (ret)
- return ret;
-
/* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */
if (dp83867->rxctrl_strap_quirk)
phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
@@ -836,13 +782,7 @@ static int dp83867_config_init(struct phy_device *phydev)
if (ret)
return ret;
- /* If rgmii mode with no internal delay is selected, we do NOT use
- * aligned mode as one might expect. Instead we use the PHY's default
- * based on pin strapping. And the "mode 0" default is to *use*
- * internal delay with a value of 7 (2.00 ns).
- *
- * Set up RGMII delays
- */
+ /* Set up RGMII delays */
val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
val &= ~(DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
@@ -857,15 +797,9 @@ static int dp83867_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, val);
- delay = 0;
- if (dp83867->rx_id_delay != DP83867_RGMII_RX_CLK_DELAY_INV)
- delay |= dp83867->rx_id_delay;
- if (dp83867->tx_id_delay != DP83867_RGMII_TX_CLK_DELAY_INV)
- delay |= dp83867->tx_id_delay <<
- DP83867_RGMII_TX_CLK_DELAY_SHIFT;
-
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL,
- delay);
+ dp83867->rx_id_delay |
+ (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT));
}
/* If specified, set io impedance */
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index ee7831a9849b..033656d574b8 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -131,7 +131,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
- struct fixed_phy_status *status,
+ const struct fixed_phy_status *status,
struct gpio_desc *gpiod)
{
int ret;
@@ -160,10 +160,9 @@ static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
return 0;
}
-int fixed_phy_add(unsigned int irq, int phy_addr,
- struct fixed_phy_status *status)
+int fixed_phy_add(int phy_addr, const struct fixed_phy_status *status)
{
- return fixed_phy_add_gpiod(irq, phy_addr, status, NULL);
+ return fixed_phy_add_gpiod(PHY_POLL, phy_addr, status, NULL);
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
@@ -223,12 +222,11 @@ static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
}
#endif
-static struct phy_device *__fixed_phy_register(unsigned int irq,
- struct fixed_phy_status *status,
- struct device_node *np,
- struct gpio_desc *gpiod)
+struct phy_device *fixed_phy_register(const struct fixed_phy_status *status,
+ struct device_node *np)
{
struct fixed_mdio_bus *fmb = &platform_fmb;
+ struct gpio_desc *gpiod;
struct phy_device *phy;
int phy_addr;
int ret;
@@ -237,18 +235,16 @@ static struct phy_device *__fixed_phy_register(unsigned int irq,
return ERR_PTR(-EPROBE_DEFER);
/* Check if we have a GPIO associated with this fixed phy */
- if (!gpiod) {
- gpiod = fixed_phy_get_gpiod(np);
- if (IS_ERR(gpiod))
- return ERR_CAST(gpiod);
- }
+ gpiod = fixed_phy_get_gpiod(np);
+ if (IS_ERR(gpiod))
+ return ERR_CAST(gpiod);
/* Get the next available PHY address, up to PHY_MAX_ADDR */
phy_addr = ida_alloc_max(&phy_fixed_ida, PHY_MAX_ADDR - 1, GFP_KERNEL);
if (phy_addr < 0)
return ERR_PTR(phy_addr);
- ret = fixed_phy_add_gpiod(irq, phy_addr, status, gpiod);
+ ret = fixed_phy_add_gpiod(PHY_POLL, phy_addr, status, gpiod);
if (ret < 0) {
ida_free(&phy_fixed_ida, phy_addr);
return ERR_PTR(ret);
@@ -306,24 +302,8 @@ static struct phy_device *__fixed_phy_register(unsigned int irq,
return phy;
}
-
-struct phy_device *fixed_phy_register(unsigned int irq,
- struct fixed_phy_status *status,
- struct device_node *np)
-{
- return __fixed_phy_register(irq, status, np, NULL);
-}
EXPORT_SYMBOL_GPL(fixed_phy_register);
-struct phy_device *
-fixed_phy_register_with_gpiod(unsigned int irq,
- struct fixed_phy_status *status,
- struct gpio_desc *gpiod)
-{
- return __fixed_phy_register(irq, status, NULL, gpiod);
-}
-EXPORT_SYMBOL_GPL(fixed_phy_register_with_gpiod);
-
void fixed_phy_unregister(struct phy_device *phy)
{
phy_device_remove(phy);
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index bbcc7d2b54cd..c0c4f19cfb6a 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -520,12 +520,14 @@ static int ip101a_g_match_phy_device(struct phy_device *phydev, bool ip101a)
return ip101a == !ret;
}
-static int ip101a_match_phy_device(struct phy_device *phydev)
+static int ip101a_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return ip101a_g_match_phy_device(phydev, true);
}
-static int ip101g_match_phy_device(struct phy_device *phydev)
+static int ip101g_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return ip101a_g_match_phy_device(phydev, false);
}
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index 23e1f0521f54..f3d83b04c953 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -119,7 +119,6 @@
#define MV88Q2XXX_LED_INDEX_GPIO 1
struct mv88q2xxx_priv {
- bool enable_temp;
bool enable_led0;
};
@@ -482,49 +481,6 @@ static int mv88q2xxx_config_aneg(struct phy_device *phydev)
return phydev->drv->soft_reset(phydev);
}
-static int mv88q2xxx_config_init(struct phy_device *phydev)
-{
- struct mv88q2xxx_priv *priv = phydev->priv;
- int ret;
-
- /* The 88Q2XXX PHYs do have the extended ability register available, but
- * register MDIO_PMA_EXTABLE where they should signalize it does not
- * work according to specification. Therefore, we force it here.
- */
- phydev->pma_extable = MDIO_PMA_EXTABLE_BT1;
-
- /* Configure interrupt with default settings, output is driven low for
- * active interrupt and high for inactive.
- */
- if (phy_interrupt_is_valid(phydev)) {
- ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS,
- MDIO_MMD_PCS_MV_GPIO_INT_CTRL,
- MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS);
- if (ret < 0)
- return ret;
- }
-
- /* Enable LED function and disable TX disable feature on LED/TX_ENABLE */
- if (priv->enable_led0) {
- ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PCS,
- MDIO_MMD_PCS_MV_RESET_CTRL,
- MDIO_MMD_PCS_MV_RESET_CTRL_TX_DISABLE);
- if (ret < 0)
- return ret;
- }
-
- /* Enable temperature sense */
- if (priv->enable_temp) {
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- MDIO_MMD_PCS_MV_TEMP_SENSOR2,
- MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
static int mv88q2xxx_get_sqi(struct phy_device *phydev)
{
int ret;
@@ -667,6 +623,12 @@ static int mv88q2xxx_resume(struct phy_device *phydev)
}
#if IS_ENABLED(CONFIG_HWMON)
+static int mv88q2xxx_enable_temp_sense(struct phy_device *phydev)
+{
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TEMP_SENSOR2,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
+}
+
static const struct hwmon_channel_info * const mv88q2xxx_hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_ALARM),
NULL
@@ -762,11 +724,13 @@ static const struct hwmon_chip_info mv88q2xxx_hwmon_chip_info = {
static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
{
- struct mv88q2xxx_priv *priv = phydev->priv;
struct device *dev = &phydev->mdio.dev;
struct device *hwmon;
+ int ret;
- priv->enable_temp = true;
+ ret = mv88q2xxx_enable_temp_sense(phydev);
+ if (ret < 0)
+ return ret;
hwmon = devm_hwmon_device_register_with_info(dev, NULL, phydev,
&mv88q2xxx_hwmon_chip_info,
@@ -776,6 +740,11 @@ static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
}
#else
+static int mv88q2xxx_enable_temp_sense(struct phy_device *phydev)
+{
+ return 0;
+}
+
static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
{
return 0;
@@ -828,6 +797,7 @@ static int mv88q2xxx_leds_probe(struct phy_device *phydev)
static int mv88q2xxx_probe(struct phy_device *phydev)
{
struct mv88q2xxx_priv *priv;
+ int ret;
priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -835,22 +805,53 @@ static int mv88q2xxx_probe(struct phy_device *phydev)
phydev->priv = priv;
- return 0;
+ ret = mv88q2xxx_leds_probe(phydev);
+ if (ret)
+ return ret;
+
+ return mv88q2xxx_hwmon_probe(phydev);
}
-static int mv88q222x_probe(struct phy_device *phydev)
+static int mv88q2xxx_config_init(struct phy_device *phydev)
{
+ struct mv88q2xxx_priv *priv = phydev->priv;
int ret;
- ret = mv88q2xxx_probe(phydev);
- if (ret)
- return ret;
+ /* The 88Q2XXX PHYs do have the extended ability register available, but
+ * register MDIO_PMA_EXTABLE where they should signalize it does not
+ * work according to specification. Therefore, we force it here.
+ */
+ phydev->pma_extable = MDIO_PMA_EXTABLE_BT1;
- ret = mv88q2xxx_leds_probe(phydev);
- if (ret)
+ /* Configure interrupt with default settings, output is driven low for
+ * active interrupt and high for inactive.
+ */
+ if (phy_interrupt_is_valid(phydev)) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable LED function and disable TX disable feature on LED/TX_ENABLE */
+ if (priv->enable_led0) {
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_RESET_CTRL,
+ MDIO_MMD_PCS_MV_RESET_CTRL_TX_DISABLE);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable temperature sense again. There might have been a hard reset
+ * of the PHY and in this case the register content is restored to
+ * defaults and we need to enable it again.
+ */
+ ret = mv88q2xxx_enable_temp_sense(phydev);
+ if (ret < 0)
return ret;
- return mv88q2xxx_hwmon_probe(phydev);
+ return 0;
}
static int mv88q2110_config_init(struct phy_device *phydev)
@@ -1118,7 +1119,7 @@ static struct phy_driver mv88q2xxx_driver[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88q2220",
.flags = PHY_POLL_CABLE_TEST,
- .probe = mv88q222x_probe,
+ .probe = mv88q2xxx_probe,
.get_features = mv88q2xxx_get_features,
.config_aneg = mv88q2xxx_config_aneg,
.aneg_done = genphy_c45_aneg_done,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 5354c8895163..13e81dff42c1 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -1264,7 +1264,8 @@ static int mv3310_get_number_of_ports(struct phy_device *phydev)
return ret + 1;
}
-static int mv3310_match_phy_device(struct phy_device *phydev)
+static int mv3310_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
if ((phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] &
MARVELL_PHY_ID_MASK) != MARVELL_PHY_ID_88X3310)
@@ -1273,7 +1274,8 @@ static int mv3310_match_phy_device(struct phy_device *phydev)
return mv3310_get_number_of_ports(phydev) == 1;
}
-static int mv3340_match_phy_device(struct phy_device *phydev)
+static int mv3340_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
if ((phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] &
MARVELL_PHY_ID_MASK) != MARVELL_PHY_ID_88X3310)
@@ -1297,12 +1299,14 @@ static int mv211x_match_phy_device(struct phy_device *phydev, bool has_5g)
return !!(val & MDIO_PCS_SPEED_5G) == has_5g;
}
-static int mv2110_match_phy_device(struct phy_device *phydev)
+static int mv2110_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return mv211x_match_phy_device(phydev, true);
}
-static int mv2111_match_phy_device(struct phy_device *phydev)
+static int mv2111_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return mv211x_match_phy_device(phydev, false);
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index ede596c1a69d..fda2e27c1810 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -8,17 +8,14 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
-#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/micrel_phy.h>
#include <linux/mii.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -27,7 +24,6 @@
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/reset.h>
-#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -37,8 +33,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/mdio.h>
-#include "mdio-boardinfo.h"
-
static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
{
/* Deassert the optional reset signal */
@@ -137,45 +131,6 @@ bool mdiobus_is_registered_device(struct mii_bus *bus, int addr)
EXPORT_SYMBOL(mdiobus_is_registered_device);
/**
- * mdiobus_alloc_size - allocate a mii_bus structure
- * @size: extra amount of memory to allocate for private storage.
- * If non-zero, then bus->priv is points to that memory.
- *
- * Description: called by a bus driver to allocate an mii_bus
- * structure to fill in.
- */
-struct mii_bus *mdiobus_alloc_size(size_t size)
-{
- struct mii_bus *bus;
- size_t aligned_size = ALIGN(sizeof(*bus), NETDEV_ALIGN);
- size_t alloc_size;
- int i;
-
- /* If we alloc extra space, it should be aligned */
- if (size)
- alloc_size = aligned_size + size;
- else
- alloc_size = sizeof(*bus);
-
- bus = kzalloc(alloc_size, GFP_KERNEL);
- if (!bus)
- return NULL;
-
- bus->state = MDIOBUS_ALLOCATED;
- if (size)
- bus->priv = (void *)bus + aligned_size;
-
- /* Initialise the interrupts to polling and 64-bit seqcounts */
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- bus->irq[i] = PHY_POLL;
- u64_stats_init(&bus->stats[i].syncp);
- }
-
- return bus;
-}
-EXPORT_SYMBOL(mdiobus_alloc_size);
-
-/**
* mdiobus_release - mii_bus device release callback
* @d: the target struct device that contains the mii_bus
*
@@ -403,11 +358,12 @@ static const struct attribute_group *mdio_bus_groups[] = {
NULL,
};
-static struct class mdio_bus_class = {
+const struct class mdio_bus_class = {
.name = "mdio_bus",
.dev_release = mdiobus_release,
.dev_groups = mdio_bus_groups,
};
+EXPORT_SYMBOL_GPL(mdio_bus_class);
/**
* mdio_find_bus - Given the name of a mdiobus, find the mii_bus.
@@ -451,422 +407,8 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
return d ? to_mii_bus(d) : NULL;
}
EXPORT_SYMBOL(of_mdio_find_bus);
-
-/* Walk the list of subnodes of a mdio bus and look for a node that
- * matches the mdio device's address with its 'reg' property. If
- * found, set the of_node pointer for the mdio device. This allows
- * auto-probed phy devices to be supplied with information passed in
- * via DT.
- * If a PHY package is found, PHY is searched also there.
- */
-static int of_mdiobus_find_phy(struct device *dev, struct mdio_device *mdiodev,
- struct device_node *np)
-{
- struct device_node *child;
-
- for_each_available_child_of_node(np, child) {
- int addr;
-
- if (of_node_name_eq(child, "ethernet-phy-package")) {
- /* Validate PHY package reg presence */
- if (!of_property_present(child, "reg")) {
- of_node_put(child);
- return -EINVAL;
- }
-
- if (!of_mdiobus_find_phy(dev, mdiodev, child)) {
- /* The refcount for the PHY package will be
- * incremented later when PHY join the Package.
- */
- of_node_put(child);
- return 0;
- }
-
- continue;
- }
-
- addr = of_mdio_parse_addr(dev, child);
- if (addr < 0)
- continue;
-
- if (addr == mdiodev->addr) {
- device_set_node(dev, of_fwnode_handle(child));
- /* The refcount on "child" is passed to the mdio
- * device. Do _not_ use of_node_put(child) here.
- */
- return 0;
- }
- }
-
- return -ENODEV;
-}
-
-static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
- struct mdio_device *mdiodev)
-{
- struct device *dev = &mdiodev->dev;
-
- if (dev->of_node || !bus->dev.of_node)
- return;
-
- of_mdiobus_find_phy(dev, mdiodev, bus->dev.of_node);
-}
-#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
-static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio,
- struct mdio_device *mdiodev)
-{
-}
#endif
-/**
- * mdiobus_create_device - create a full MDIO device given
- * a mdio_board_info structure
- * @bus: MDIO bus to create the devices on
- * @bi: mdio_board_info structure describing the devices
- *
- * Returns 0 on success or < 0 on error.
- */
-static int mdiobus_create_device(struct mii_bus *bus,
- struct mdio_board_info *bi)
-{
- struct mdio_device *mdiodev;
- int ret = 0;
-
- mdiodev = mdio_device_create(bus, bi->mdio_addr);
- if (IS_ERR(mdiodev))
- return -ENODEV;
-
- strscpy(mdiodev->modalias, bi->modalias,
- sizeof(mdiodev->modalias));
- mdiodev->bus_match = mdio_device_bus_match;
- mdiodev->dev.platform_data = (void *)bi->platform_data;
-
- ret = mdio_device_register(mdiodev);
- if (ret)
- mdio_device_free(mdiodev);
-
- return ret;
-}
-
-static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45)
-{
- struct phy_device *phydev = ERR_PTR(-ENODEV);
- struct fwnode_handle *fwnode;
- char node_name[16];
- int err;
-
- phydev = get_phy_device(bus, addr, c45);
- if (IS_ERR(phydev))
- return phydev;
-
- /* For DT, see if the auto-probed phy has a corresponding child
- * in the bus node, and set the of_node pointer in this case.
- */
- of_mdiobus_link_mdiodev(bus, &phydev->mdio);
-
- /* Search for a swnode for the phy in the swnode hierarchy of the bus.
- * If there is no swnode for the phy provided, just ignore it.
- */
- if (dev_fwnode(&bus->dev) && !dev_fwnode(&phydev->mdio.dev)) {
- snprintf(node_name, sizeof(node_name), "ethernet-phy@%d",
- addr);
- fwnode = fwnode_get_named_child_node(dev_fwnode(&bus->dev),
- node_name);
- if (fwnode)
- device_set_node(&phydev->mdio.dev, fwnode);
- }
-
- err = phy_device_register(phydev);
- if (err) {
- phy_device_free(phydev);
- return ERR_PTR(-ENODEV);
- }
-
- return phydev;
-}
-
-/**
- * mdiobus_scan_c22 - scan one address on a bus for C22 MDIO devices.
- * @bus: mii_bus to scan
- * @addr: address on bus to scan
- *
- * This function scans one address on the MDIO bus, looking for
- * devices which can be identified using a vendor/product ID in
- * registers 2 and 3. Not all MDIO devices have such registers, but
- * PHY devices typically do. Hence this function assumes anything
- * found is a PHY, or can be treated as a PHY. Other MDIO devices,
- * such as switches, will probably not be found during the scan.
- */
-struct phy_device *mdiobus_scan_c22(struct mii_bus *bus, int addr)
-{
- return mdiobus_scan(bus, addr, false);
-}
-EXPORT_SYMBOL(mdiobus_scan_c22);
-
-/**
- * mdiobus_scan_c45 - scan one address on a bus for C45 MDIO devices.
- * @bus: mii_bus to scan
- * @addr: address on bus to scan
- *
- * This function scans one address on the MDIO bus, looking for
- * devices which can be identified using a vendor/product ID in
- * registers 2 and 3. Not all MDIO devices have such registers, but
- * PHY devices typically do. Hence this function assumes anything
- * found is a PHY, or can be treated as a PHY. Other MDIO devices,
- * such as switches, will probably not be found during the scan.
- */
-static struct phy_device *mdiobus_scan_c45(struct mii_bus *bus, int addr)
-{
- return mdiobus_scan(bus, addr, true);
-}
-
-static int mdiobus_scan_bus_c22(struct mii_bus *bus)
-{
- int i;
-
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- if ((bus->phy_mask & BIT(i)) == 0) {
- struct phy_device *phydev;
-
- phydev = mdiobus_scan_c22(bus, i);
- if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV))
- return PTR_ERR(phydev);
- }
- }
- return 0;
-}
-
-static int mdiobus_scan_bus_c45(struct mii_bus *bus)
-{
- int i;
-
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- if ((bus->phy_mask & BIT(i)) == 0) {
- struct phy_device *phydev;
-
- /* Don't scan C45 if we already have a C22 device */
- if (bus->mdio_map[i])
- continue;
-
- phydev = mdiobus_scan_c45(bus, i);
- if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV))
- return PTR_ERR(phydev);
- }
- }
- return 0;
-}
-
-/* There are some C22 PHYs which do bad things when where is a C45
- * transaction on the bus, like accepting a read themselves, and
- * stomping over the true devices reply, to performing a write to
- * themselves which was intended for another device. Now that C22
- * devices have been found, see if any of them are bad for C45, and if we
- * should skip the C45 scan.
- */
-static bool mdiobus_prevent_c45_scan(struct mii_bus *bus)
-{
- int i;
-
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- struct phy_device *phydev;
- u32 oui;
-
- phydev = mdiobus_get_phy(bus, i);
- if (!phydev)
- continue;
- oui = phydev->phy_id >> 10;
-
- if (oui == MICREL_OUI)
- return true;
- }
- return false;
-}
-
-/**
- * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
- * @bus: target mii_bus
- * @owner: module containing bus accessor functions
- *
- * Description: Called by a bus driver to bring up all the PHYs
- * on a given bus, and attach them to the bus. Drivers should use
- * mdiobus_register() rather than __mdiobus_register() unless they
- * need to pass a specific owner module. MDIO devices which are not
- * PHYs will not be brought up by this function. They are expected
- * to be explicitly listed in DT and instantiated by of_mdiobus_register().
- *
- * Returns 0 on success or < 0 on error.
- */
-int __mdiobus_register(struct mii_bus *bus, struct module *owner)
-{
- struct mdio_device *mdiodev;
- struct gpio_desc *gpiod;
- bool prevent_c45_scan;
- int i, err;
-
- if (!bus || !bus->name)
- return -EINVAL;
-
- /* An access method always needs both read and write operations */
- if (!!bus->read != !!bus->write || !!bus->read_c45 != !!bus->write_c45)
- return -EINVAL;
-
- /* At least one method is mandatory */
- if (!bus->read && !bus->read_c45)
- return -EINVAL;
-
- if (bus->parent && bus->parent->of_node)
- bus->parent->of_node->fwnode.flags |=
- FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;
-
- WARN(bus->state != MDIOBUS_ALLOCATED &&
- bus->state != MDIOBUS_UNREGISTERED,
- "%s: not in ALLOCATED or UNREGISTERED state\n", bus->id);
-
- bus->owner = owner;
- bus->dev.parent = bus->parent;
- bus->dev.class = &mdio_bus_class;
- bus->dev.groups = NULL;
- dev_set_name(&bus->dev, "%s", bus->id);
-
- /* If the bus state is allocated, we're registering a fresh bus
- * that may have a fwnode associated with it. Grab a reference
- * to the fwnode. This will be dropped when the bus is released.
- * If the bus was set to unregistered, it means that the bus was
- * previously registered, and we've already grabbed a reference.
- */
- if (bus->state == MDIOBUS_ALLOCATED)
- fwnode_handle_get(dev_fwnode(&bus->dev));
-
- /* We need to set state to MDIOBUS_UNREGISTERED to correctly release
- * the device in mdiobus_free()
- *
- * State will be updated later in this function in case of success
- */
- bus->state = MDIOBUS_UNREGISTERED;
-
- err = device_register(&bus->dev);
- if (err) {
- pr_err("mii_bus %s failed to register\n", bus->id);
- return -EINVAL;
- }
-
- mutex_init(&bus->mdio_lock);
- mutex_init(&bus->shared_lock);
-
- /* assert bus level PHY GPIO reset */
- gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(gpiod)) {
- err = dev_err_probe(&bus->dev, PTR_ERR(gpiod),
- "mii_bus %s couldn't get reset GPIO\n",
- bus->id);
- device_del(&bus->dev);
- return err;
- } else if (gpiod) {
- bus->reset_gpiod = gpiod;
- fsleep(bus->reset_delay_us);
- gpiod_set_value_cansleep(gpiod, 0);
- if (bus->reset_post_delay_us > 0)
- fsleep(bus->reset_post_delay_us);
- }
-
- if (bus->reset) {
- err = bus->reset(bus);
- if (err)
- goto error_reset_gpiod;
- }
-
- if (bus->read) {
- err = mdiobus_scan_bus_c22(bus);
- if (err)
- goto error;
- }
-
- prevent_c45_scan = mdiobus_prevent_c45_scan(bus);
-
- if (!prevent_c45_scan && bus->read_c45) {
- err = mdiobus_scan_bus_c45(bus);
- if (err)
- goto error;
- }
-
- mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device);
-
- bus->state = MDIOBUS_REGISTERED;
- dev_dbg(&bus->dev, "probed\n");
- return 0;
-
-error:
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- mdiodev = bus->mdio_map[i];
- if (!mdiodev)
- continue;
-
- mdiodev->device_remove(mdiodev);
- mdiodev->device_free(mdiodev);
- }
-error_reset_gpiod:
- /* Put PHYs in RESET to save power */
- if (bus->reset_gpiod)
- gpiod_set_value_cansleep(bus->reset_gpiod, 1);
-
- device_del(&bus->dev);
- return err;
-}
-EXPORT_SYMBOL(__mdiobus_register);
-
-void mdiobus_unregister(struct mii_bus *bus)
-{
- struct mdio_device *mdiodev;
- int i;
-
- if (WARN_ON_ONCE(bus->state != MDIOBUS_REGISTERED))
- return;
- bus->state = MDIOBUS_UNREGISTERED;
-
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- mdiodev = bus->mdio_map[i];
- if (!mdiodev)
- continue;
-
- if (mdiodev->reset_gpio)
- gpiod_put(mdiodev->reset_gpio);
-
- mdiodev->device_remove(mdiodev);
- mdiodev->device_free(mdiodev);
- }
-
- /* Put PHYs in RESET to save power */
- if (bus->reset_gpiod)
- gpiod_set_value_cansleep(bus->reset_gpiod, 1);
-
- device_del(&bus->dev);
-}
-EXPORT_SYMBOL(mdiobus_unregister);
-
-/**
- * mdiobus_free - free a struct mii_bus
- * @bus: mii_bus to free
- *
- * This function releases the reference to the underlying device
- * object in the mii_bus. If this is the last reference, the mii_bus
- * will be freed.
- */
-void mdiobus_free(struct mii_bus *bus)
-{
- /* For compatibility with error handling in drivers. */
- if (bus->state == MDIOBUS_ALLOCATED) {
- kfree(bus);
- return;
- }
-
- WARN(bus->state != MDIOBUS_UNREGISTERED,
- "%s: not in UNREGISTERED state\n", bus->id);
- bus->state = MDIOBUS_RELEASED;
-
- put_device(&bus->dev);
-}
-EXPORT_SYMBOL(mdiobus_free);
-
static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret)
{
preempt_disable();
@@ -903,6 +445,9 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->read)
retval = bus->read(bus, addr, regnum);
else
@@ -932,6 +477,9 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->write)
err = bus->write(bus, addr, regnum, val);
else
@@ -993,6 +541,9 @@ int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum)
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->read_c45)
retval = bus->read_c45(bus, addr, devad, regnum);
else
@@ -1024,6 +575,9 @@ int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->write_c45)
err = bus->write_c45(bus, addr, devad, regnum, val);
else
@@ -1446,7 +1000,7 @@ const struct bus_type mdio_bus_type = {
};
EXPORT_SYMBOL(mdio_bus_type);
-int __init mdio_bus_init(void)
+static int __init mdio_bus_init(void)
{
int ret;
@@ -1460,16 +1014,14 @@ int __init mdio_bus_init(void)
return ret;
}
-#if IS_ENABLED(CONFIG_PHYLIB)
-void mdio_bus_exit(void)
+static void __exit mdio_bus_exit(void)
{
class_unregister(&mdio_bus_class);
bus_unregister(&mdio_bus_type);
}
-EXPORT_SYMBOL_GPL(mdio_bus_exit);
-#else
-module_init(mdio_bus_init);
-/* no module_exit, intentional */
+
+subsys_initcall(mdio_bus_init);
+module_exit(mdio_bus_exit);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MDIO bus/device layer");
-#endif
diff --git a/drivers/net/phy/mdio_bus_provider.c b/drivers/net/phy/mdio_bus_provider.c
new file mode 100644
index 000000000000..65850e36284d
--- /dev/null
+++ b/drivers/net/phy/mdio_bus_provider.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* MDIO Bus provider interface
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/micrel_phy.h>
+#include <linux/mii.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/unistd.h>
+
+#include "mdio-boardinfo.h"
+
+/**
+ * mdiobus_alloc_size - allocate a mii_bus structure
+ * @size: extra amount of memory to allocate for private storage.
+ * If non-zero, then bus->priv is points to that memory.
+ *
+ * Description: called by a bus driver to allocate an mii_bus
+ * structure to fill in.
+ */
+struct mii_bus *mdiobus_alloc_size(size_t size)
+{
+ struct mii_bus *bus;
+ size_t aligned_size = ALIGN(sizeof(*bus), NETDEV_ALIGN);
+ size_t alloc_size;
+ int i;
+
+ /* If we alloc extra space, it should be aligned */
+ if (size)
+ alloc_size = aligned_size + size;
+ else
+ alloc_size = sizeof(*bus);
+
+ bus = kzalloc(alloc_size, GFP_KERNEL);
+ if (!bus)
+ return NULL;
+
+ bus->state = MDIOBUS_ALLOCATED;
+ if (size)
+ bus->priv = (void *)bus + aligned_size;
+
+ /* Initialise the interrupts to polling and 64-bit seqcounts */
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ bus->irq[i] = PHY_POLL;
+ u64_stats_init(&bus->stats[i].syncp);
+ }
+
+ return bus;
+}
+EXPORT_SYMBOL(mdiobus_alloc_size);
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+/* Walk the list of subnodes of a mdio bus and look for a node that
+ * matches the mdio device's address with its 'reg' property. If
+ * found, set the of_node pointer for the mdio device. This allows
+ * auto-probed phy devices to be supplied with information passed in
+ * via DT.
+ * If a PHY package is found, PHY is searched also there.
+ */
+static int of_mdiobus_find_phy(struct device *dev, struct mdio_device *mdiodev,
+ struct device_node *np)
+{
+ struct device_node *child;
+
+ for_each_available_child_of_node(np, child) {
+ int addr;
+
+ if (of_node_name_eq(child, "ethernet-phy-package")) {
+ /* Validate PHY package reg presence */
+ if (!of_property_present(child, "reg")) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ if (!of_mdiobus_find_phy(dev, mdiodev, child)) {
+ /* The refcount for the PHY package will be
+ * incremented later when PHY join the Package.
+ */
+ of_node_put(child);
+ return 0;
+ }
+
+ continue;
+ }
+
+ addr = of_mdio_parse_addr(dev, child);
+ if (addr < 0)
+ continue;
+
+ if (addr == mdiodev->addr) {
+ device_set_node(dev, of_fwnode_handle(child));
+ /* The refcount on "child" is passed to the mdio
+ * device. Do _not_ use of_node_put(child) here.
+ */
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
+ struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+
+ if (dev->of_node || !bus->dev.of_node)
+ return;
+
+ of_mdiobus_find_phy(dev, mdiodev, bus->dev.of_node);
+}
+#endif
+
+/**
+ * mdiobus_create_device - create a full MDIO device given
+ * a mdio_board_info structure
+ * @bus: MDIO bus to create the devices on
+ * @bi: mdio_board_info structure describing the devices
+ *
+ * Returns 0 on success or < 0 on error.
+ */
+static int mdiobus_create_device(struct mii_bus *bus,
+ struct mdio_board_info *bi)
+{
+ struct mdio_device *mdiodev;
+ int ret = 0;
+
+ mdiodev = mdio_device_create(bus, bi->mdio_addr);
+ if (IS_ERR(mdiodev))
+ return -ENODEV;
+
+ strscpy(mdiodev->modalias, bi->modalias,
+ sizeof(mdiodev->modalias));
+ mdiodev->bus_match = mdio_device_bus_match;
+ mdiodev->dev.platform_data = (void *)bi->platform_data;
+
+ ret = mdio_device_register(mdiodev);
+ if (ret)
+ mdio_device_free(mdiodev);
+
+ return ret;
+}
+
+static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45)
+{
+ struct phy_device *phydev = ERR_PTR(-ENODEV);
+ struct fwnode_handle *fwnode;
+ char node_name[16];
+ int err;
+
+ phydev = get_phy_device(bus, addr, c45);
+ if (IS_ERR(phydev))
+ return phydev;
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+ /* For DT, see if the auto-probed phy has a corresponding child
+ * in the bus node, and set the of_node pointer in this case.
+ */
+ of_mdiobus_link_mdiodev(bus, &phydev->mdio);
+#endif
+
+ /* Search for a swnode for the phy in the swnode hierarchy of the bus.
+ * If there is no swnode for the phy provided, just ignore it.
+ */
+ if (dev_fwnode(&bus->dev) && !dev_fwnode(&phydev->mdio.dev)) {
+ snprintf(node_name, sizeof(node_name), "ethernet-phy@%d",
+ addr);
+ fwnode = fwnode_get_named_child_node(dev_fwnode(&bus->dev),
+ node_name);
+ if (fwnode)
+ device_set_node(&phydev->mdio.dev, fwnode);
+ }
+
+ err = phy_device_register(phydev);
+ if (err) {
+ phy_device_free(phydev);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return phydev;
+}
+
+/**
+ * mdiobus_scan_c22 - scan one address on a bus for C22 MDIO devices.
+ * @bus: mii_bus to scan
+ * @addr: address on bus to scan
+ *
+ * This function scans one address on the MDIO bus, looking for
+ * devices which can be identified using a vendor/product ID in
+ * registers 2 and 3. Not all MDIO devices have such registers, but
+ * PHY devices typically do. Hence this function assumes anything
+ * found is a PHY, or can be treated as a PHY. Other MDIO devices,
+ * such as switches, will probably not be found during the scan.
+ */
+struct phy_device *mdiobus_scan_c22(struct mii_bus *bus, int addr)
+{
+ return mdiobus_scan(bus, addr, false);
+}
+EXPORT_SYMBOL(mdiobus_scan_c22);
+
+/**
+ * mdiobus_scan_c45 - scan one address on a bus for C45 MDIO devices.
+ * @bus: mii_bus to scan
+ * @addr: address on bus to scan
+ *
+ * This function scans one address on the MDIO bus, looking for
+ * devices which can be identified using a vendor/product ID in
+ * registers 2 and 3. Not all MDIO devices have such registers, but
+ * PHY devices typically do. Hence this function assumes anything
+ * found is a PHY, or can be treated as a PHY. Other MDIO devices,
+ * such as switches, will probably not be found during the scan.
+ */
+static struct phy_device *mdiobus_scan_c45(struct mii_bus *bus, int addr)
+{
+ return mdiobus_scan(bus, addr, true);
+}
+
+static int mdiobus_scan_bus_c22(struct mii_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ if ((bus->phy_mask & BIT(i)) == 0) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan_c22(bus, i);
+ if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV))
+ return PTR_ERR(phydev);
+ }
+ }
+ return 0;
+}
+
+static int mdiobus_scan_bus_c45(struct mii_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ if ((bus->phy_mask & BIT(i)) == 0) {
+ struct phy_device *phydev;
+
+ /* Don't scan C45 if we already have a C22 device */
+ if (bus->mdio_map[i])
+ continue;
+
+ phydev = mdiobus_scan_c45(bus, i);
+ if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV))
+ return PTR_ERR(phydev);
+ }
+ }
+ return 0;
+}
+
+/* There are some C22 PHYs which do bad things when where is a C45
+ * transaction on the bus, like accepting a read themselves, and
+ * stomping over the true devices reply, to performing a write to
+ * themselves which was intended for another device. Now that C22
+ * devices have been found, see if any of them are bad for C45, and if we
+ * should skip the C45 scan.
+ */
+static bool mdiobus_prevent_c45_scan(struct mii_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *phydev;
+ u32 oui;
+
+ phydev = mdiobus_get_phy(bus, i);
+ if (!phydev)
+ continue;
+ oui = phydev->phy_id >> 10;
+
+ if (oui == MICREL_OUI)
+ return true;
+ }
+ return false;
+}
+
+/**
+ * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
+ * @bus: target mii_bus
+ * @owner: module containing bus accessor functions
+ *
+ * Description: Called by a bus driver to bring up all the PHYs
+ * on a given bus, and attach them to the bus. Drivers should use
+ * mdiobus_register() rather than __mdiobus_register() unless they
+ * need to pass a specific owner module. MDIO devices which are not
+ * PHYs will not be brought up by this function. They are expected
+ * to be explicitly listed in DT and instantiated by of_mdiobus_register().
+ *
+ * Returns 0 on success or < 0 on error.
+ */
+int __mdiobus_register(struct mii_bus *bus, struct module *owner)
+{
+ struct mdio_device *mdiodev;
+ struct gpio_desc *gpiod;
+ bool prevent_c45_scan;
+ int i, err;
+
+ if (!bus || !bus->name)
+ return -EINVAL;
+
+ /* An access method always needs both read and write operations */
+ if (!!bus->read != !!bus->write || !!bus->read_c45 != !!bus->write_c45)
+ return -EINVAL;
+
+ /* At least one method is mandatory */
+ if (!bus->read && !bus->read_c45)
+ return -EINVAL;
+
+ if (bus->parent && bus->parent->of_node)
+ bus->parent->of_node->fwnode.flags |=
+ FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;
+
+ WARN(bus->state != MDIOBUS_ALLOCATED &&
+ bus->state != MDIOBUS_UNREGISTERED,
+ "%s: not in ALLOCATED or UNREGISTERED state\n", bus->id);
+
+ bus->owner = owner;
+ bus->dev.parent = bus->parent;
+ bus->dev.class = &mdio_bus_class;
+ bus->dev.groups = NULL;
+ dev_set_name(&bus->dev, "%s", bus->id);
+
+ /* If the bus state is allocated, we're registering a fresh bus
+ * that may have a fwnode associated with it. Grab a reference
+ * to the fwnode. This will be dropped when the bus is released.
+ * If the bus was set to unregistered, it means that the bus was
+ * previously registered, and we've already grabbed a reference.
+ */
+ if (bus->state == MDIOBUS_ALLOCATED)
+ fwnode_handle_get(dev_fwnode(&bus->dev));
+
+ /* We need to set state to MDIOBUS_UNREGISTERED to correctly release
+ * the device in mdiobus_free()
+ *
+ * State will be updated later in this function in case of success
+ */
+ bus->state = MDIOBUS_UNREGISTERED;
+
+ err = device_register(&bus->dev);
+ if (err) {
+ pr_err("mii_bus %s failed to register\n", bus->id);
+ return -EINVAL;
+ }
+
+ mutex_init(&bus->mdio_lock);
+ mutex_init(&bus->shared_lock);
+
+ /* assert bus level PHY GPIO reset */
+ gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod)) {
+ err = dev_err_probe(&bus->dev, PTR_ERR(gpiod),
+ "mii_bus %s couldn't get reset GPIO\n",
+ bus->id);
+ device_del(&bus->dev);
+ return err;
+ } else if (gpiod) {
+ bus->reset_gpiod = gpiod;
+ fsleep(bus->reset_delay_us);
+ gpiod_set_value_cansleep(gpiod, 0);
+ if (bus->reset_post_delay_us > 0)
+ fsleep(bus->reset_post_delay_us);
+ }
+
+ if (bus->reset) {
+ err = bus->reset(bus);
+ if (err)
+ goto error_reset_gpiod;
+ }
+
+ if (bus->read) {
+ err = mdiobus_scan_bus_c22(bus);
+ if (err)
+ goto error;
+ }
+
+ prevent_c45_scan = mdiobus_prevent_c45_scan(bus);
+
+ if (!prevent_c45_scan && bus->read_c45) {
+ err = mdiobus_scan_bus_c45(bus);
+ if (err)
+ goto error;
+ }
+
+ mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device);
+
+ bus->state = MDIOBUS_REGISTERED;
+ dev_dbg(&bus->dev, "probed\n");
+ return 0;
+
+error:
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ mdiodev = bus->mdio_map[i];
+ if (!mdiodev)
+ continue;
+
+ mdiodev->device_remove(mdiodev);
+ mdiodev->device_free(mdiodev);
+ }
+error_reset_gpiod:
+ /* Put PHYs in RESET to save power */
+ if (bus->reset_gpiod)
+ gpiod_set_value_cansleep(bus->reset_gpiod, 1);
+
+ device_del(&bus->dev);
+ return err;
+}
+EXPORT_SYMBOL(__mdiobus_register);
+
+void mdiobus_unregister(struct mii_bus *bus)
+{
+ struct mdio_device *mdiodev;
+ int i;
+
+ if (WARN_ON_ONCE(bus->state != MDIOBUS_REGISTERED))
+ return;
+ bus->state = MDIOBUS_UNREGISTERED;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ mdiodev = bus->mdio_map[i];
+ if (!mdiodev)
+ continue;
+
+ if (mdiodev->reset_gpio)
+ gpiod_put(mdiodev->reset_gpio);
+
+ mdiodev->device_remove(mdiodev);
+ mdiodev->device_free(mdiodev);
+ }
+
+ /* Put PHYs in RESET to save power */
+ if (bus->reset_gpiod)
+ gpiod_set_value_cansleep(bus->reset_gpiod, 1);
+
+ device_del(&bus->dev);
+}
+EXPORT_SYMBOL(mdiobus_unregister);
+
+/**
+ * mdiobus_free - free a struct mii_bus
+ * @bus: mii_bus to free
+ *
+ * This function releases the reference to the underlying device
+ * object in the mii_bus. If this is the last reference, the mii_bus
+ * will be freed.
+ */
+void mdiobus_free(struct mii_bus *bus)
+{
+ /* For compatibility with error handling in drivers. */
+ if (bus->state == MDIOBUS_ALLOCATED) {
+ kfree(bus);
+ return;
+ }
+
+ WARN(bus->state != MDIOBUS_UNREGISTERED,
+ "%s: not in UNREGISTERED state\n", bus->id);
+ bus->state = MDIOBUS_RELEASED;
+
+ put_device(&bus->dev);
+}
+EXPORT_SYMBOL(mdiobus_free);
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index e747ee63c665..cce3f405d1a4 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -45,6 +45,7 @@ int mdio_device_bus_match(struct device *dev, const struct device_driver *drv)
return strcmp(mdiodev->modalias, drv->name) == 0;
}
+EXPORT_SYMBOL_GPL(mdio_device_bus_match);
struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr)
{
diff --git a/drivers/net/phy/mediatek/Kconfig b/drivers/net/phy/mediatek/Kconfig
index 2a8ac5aed0f8..9f30a91be8dd 100644
--- a/drivers/net/phy/mediatek/Kconfig
+++ b/drivers/net/phy/mediatek/Kconfig
@@ -1,6 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
-config MTK_NET_PHYLIB
- tristate
+config MEDIATEK_2P5GE_PHY
+ tristate "MediaTek 2.5Gb Ethernet PHYs"
+ depends on (ARM64 && ARCH_MEDIATEK) || COMPILE_TEST
+ select MTK_NET_PHYLIB
+ help
+ Supports MediaTek SoC built-in 2.5Gb Ethernet PHYs.
+
+ This will load necessary firmware and add appropriate time delay.
+ Accelerate this procedure through internal pbus instead of MDIO
+ bus. Certain link-up issues will also be fixed here.
config MEDIATEK_GE_PHY
tristate "MediaTek Gigabit Ethernet PHYs"
@@ -15,8 +23,9 @@ config MEDIATEK_GE_PHY
config MEDIATEK_GE_SOC_PHY
tristate "MediaTek SoC Ethernet PHYs"
- depends on (ARM64 && ARCH_MEDIATEK) || COMPILE_TEST
- depends on NVMEM_MTK_EFUSE
+ depends on ARM64 || COMPILE_TEST
+ depends on ARCH_AIROHA || (ARCH_MEDIATEK && NVMEM_MTK_EFUSE) || \
+ COMPILE_TEST
select MTK_NET_PHYLIB
help
Supports MediaTek SoC built-in Gigabit Ethernet PHYs.
@@ -25,3 +34,6 @@ config MEDIATEK_GE_SOC_PHY
the MT7981 and MT7988 SoCs. These PHYs need calibration data
present in the SoCs efuse and will dynamically calibrate VCM
(common-mode voltage) during startup.
+
+config MTK_NET_PHYLIB
+ tristate
diff --git a/drivers/net/phy/mediatek/Makefile b/drivers/net/phy/mediatek/Makefile
index 814879d0abe5..ac57ecc799fc 100644
--- a/drivers/net/phy/mediatek/Makefile
+++ b/drivers/net/phy/mediatek/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MTK_NET_PHYLIB) += mtk-phy-lib.o
+obj-$(CONFIG_MEDIATEK_2P5GE_PHY) += mtk-2p5ge.o
obj-$(CONFIG_MEDIATEK_GE_PHY) += mtk-ge.o
obj-$(CONFIG_MEDIATEK_GE_SOC_PHY) += mtk-ge-soc.o
+obj-$(CONFIG_MTK_NET_PHYLIB) += mtk-phy-lib.o
diff --git a/drivers/net/phy/mediatek/mtk-2p5ge.c b/drivers/net/phy/mediatek/mtk-2p5ge.c
new file mode 100644
index 000000000000..e147eab523ef
--- /dev/null
+++ b/drivers/net/phy/mediatek/mtk-2p5ge.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/bitfield.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/phy.h>
+
+#include "mtk.h"
+
+#define MTK_2P5GPHY_ID_MT7988 0x00339c11
+
+#define MT7988_2P5GE_PMB_FW "mediatek/mt7988/i2p5ge-phy-pmb.bin"
+#define MT7988_2P5GE_PMB_FW_SIZE 0x20000
+#define MT7988_2P5GE_PMB_FW_BASE 0x0f100000
+#define MT7988_2P5GE_PMB_FW_LEN 0x20000
+#define MTK_2P5GPHY_MCU_CSR_BASE 0x0f0f0000
+#define MTK_2P5GPHY_MCU_CSR_LEN 0x20
+#define MD32_EN_CFG 0x18
+#define MD32_EN BIT(0)
+
+#define BASE100T_STATUS_EXTEND 0x10
+#define BASE1000T_STATUS_EXTEND 0x11
+#define EXTEND_CTRL_AND_STATUS 0x16
+
+#define PHY_AUX_CTRL_STATUS 0x1d
+#define PHY_AUX_DPX_MASK GENMASK(5, 5)
+#define PHY_AUX_SPEED_MASK GENMASK(4, 2)
+
+/* Registers on MDIO_MMD_VEND1 */
+#define MTK_PHY_LPI_PCS_DSP_CTRL 0x121
+#define MTK_PHY_LPI_SIG_EN_LO_THRESH100_MASK GENMASK(12, 8)
+
+#define MTK_PHY_HOST_CMD1 0x800e
+#define MTK_PHY_HOST_CMD2 0x800f
+/* Registers on Token Ring debug nodes */
+/* ch_addr = 0x0, node_addr = 0xf, data_addr = 0x3c */
+#define AUTO_NP_10XEN BIT(6)
+
+enum {
+ PHY_AUX_SPD_10 = 0,
+ PHY_AUX_SPD_100,
+ PHY_AUX_SPD_1000,
+ PHY_AUX_SPD_2500,
+};
+
+static int mt798x_2p5ge_phy_load_fw(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ void __iomem *mcu_csr_base, *pmb_addr;
+ const struct firmware *fw;
+ int ret, i;
+ u32 reg;
+
+ pmb_addr = ioremap(MT7988_2P5GE_PMB_FW_BASE, MT7988_2P5GE_PMB_FW_LEN);
+ if (!pmb_addr)
+ return -ENOMEM;
+ mcu_csr_base = ioremap(MTK_2P5GPHY_MCU_CSR_BASE,
+ MTK_2P5GPHY_MCU_CSR_LEN);
+ if (!mcu_csr_base) {
+ ret = -ENOMEM;
+ goto free_pmb;
+ }
+
+ ret = request_firmware_direct(&fw, MT7988_2P5GE_PMB_FW, dev);
+ if (ret) {
+ dev_err(dev, "failed to load firmware: %s, ret: %d\n",
+ MT7988_2P5GE_PMB_FW, ret);
+ goto free;
+ }
+
+ if (fw->size != MT7988_2P5GE_PMB_FW_SIZE) {
+ dev_err(dev, "Firmware size 0x%zx != 0x%x\n",
+ fw->size, MT7988_2P5GE_PMB_FW_SIZE);
+ ret = -EINVAL;
+ goto release_fw;
+ }
+
+ reg = readw(mcu_csr_base + MD32_EN_CFG);
+ if (reg & MD32_EN) {
+ phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
+ usleep_range(10000, 11000);
+ }
+ phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN);
+
+ /* Write magic number to safely stall MCU */
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_HOST_CMD1, 0x1100);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_HOST_CMD2, 0x00df);
+
+ for (i = 0; i < MT7988_2P5GE_PMB_FW_SIZE - 1; i += 4)
+ writel(*((uint32_t *)(fw->data + i)), pmb_addr + i);
+
+ writew(reg & ~MD32_EN, mcu_csr_base + MD32_EN_CFG);
+ writew(reg | MD32_EN, mcu_csr_base + MD32_EN_CFG);
+ phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
+ /* We need a delay here to stabilize initialization of MCU */
+ usleep_range(7000, 8000);
+
+ dev_info(dev, "Firmware date code: %x/%x/%x, version: %x.%x\n",
+ be16_to_cpu(*((__be16 *)(fw->data +
+ MT7988_2P5GE_PMB_FW_SIZE - 8))),
+ *(fw->data + MT7988_2P5GE_PMB_FW_SIZE - 6),
+ *(fw->data + MT7988_2P5GE_PMB_FW_SIZE - 5),
+ *(fw->data + MT7988_2P5GE_PMB_FW_SIZE - 2),
+ *(fw->data + MT7988_2P5GE_PMB_FW_SIZE - 1));
+
+release_fw:
+ release_firmware(fw);
+free:
+ iounmap(mcu_csr_base);
+free_pmb:
+ iounmap(pmb_addr);
+
+ return ret;
+}
+
+static int mt798x_2p5ge_phy_config_init(struct phy_device *phydev)
+{
+ /* Check if PHY interface type is compatible */
+ if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL)
+ return -ENODEV;
+
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_LPI_PCS_DSP_CTRL,
+ MTK_PHY_LPI_SIG_EN_LO_THRESH100_MASK, 0);
+
+ /* Enable 16-bit next page exchange bit if 1000-BT isn't advertising */
+ mtk_tr_modify(phydev, 0x0, 0xf, 0x3c, AUTO_NP_10XEN,
+ FIELD_PREP(AUTO_NP_10XEN, 0x1));
+
+ /* Enable HW auto downshift */
+ phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED_1,
+ MTK_PHY_AUX_CTRL_AND_STATUS,
+ 0, MTK_PHY_ENABLE_DOWNSHIFT);
+
+ return 0;
+}
+
+static int mt798x_2p5ge_phy_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ u32 adv;
+ int ret;
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ /* Clause 45 doesn't define 1000BaseT support. Use Clause 22 instead in
+ * our design.
+ */
+ adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
+ ret = phy_modify_changed(phydev, MII_CTRL1000, ADVERTISE_1000FULL, adv);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+
+static int mt798x_2p5ge_phy_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* This phy can't handle collision, and neither can (XFI)MAC it's
+ * connected to. Although it can do HDX handshake, it doesn't support
+ * CSMA/CD that HDX requires.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ phydev->supported);
+
+ return 0;
+}
+
+static int mt798x_2p5ge_phy_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ /* When MDIO_STAT1_LSTATUS is raised genphy_c45_read_link(), this phy
+ * actually hasn't finished AN. So use CL22's link update function
+ * instead.
+ */
+ ret = genphy_update_link(phydev);
+ if (ret)
+ return ret;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ /* We'll read link speed through vendor specific registers down below.
+ * So remove phy_resolve_aneg_linkmode (AN on) & genphy_c45_read_pma
+ * (AN off).
+ */
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
+ ret = genphy_c45_read_lpa(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Clause 45 doesn't define 1000BaseT support. Read the link
+ * partner's 1G advertisement via Clause 22.
+ */
+ ret = phy_read(phydev, MII_STAT1000);
+ if (ret < 0)
+ return ret;
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, ret);
+ } else if (phydev->autoneg == AUTONEG_DISABLE) {
+ linkmode_zero(phydev->lp_advertising);
+ }
+
+ if (phydev->link) {
+ ret = phy_read(phydev, PHY_AUX_CTRL_STATUS);
+ if (ret < 0)
+ return ret;
+
+ switch (FIELD_GET(PHY_AUX_SPEED_MASK, ret)) {
+ case PHY_AUX_SPD_10:
+ phydev->speed = SPEED_10;
+ break;
+ case PHY_AUX_SPD_100:
+ phydev->speed = SPEED_100;
+ break;
+ case PHY_AUX_SPD_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ case PHY_AUX_SPD_2500:
+ phydev->speed = SPEED_2500;
+ break;
+ }
+
+ phydev->duplex = DUPLEX_FULL;
+ phydev->rate_matching = RATE_MATCH_PAUSE;
+ }
+
+ return 0;
+}
+
+static int mt798x_2p5ge_phy_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ return RATE_MATCH_PAUSE;
+}
+
+static int mt798x_2p5ge_phy_probe(struct phy_device *phydev)
+{
+ struct pinctrl *pinctrl;
+ int ret;
+
+ switch (phydev->drv->phy_id) {
+ case MTK_2P5GPHY_ID_MT7988:
+ /* This built-in 2.5GbE hardware only sets MDIO_DEVS_PMAPMD.
+ * Set the rest by this driver since PCS/AN/VEND1/VEND2 MDIO
+ * manageable devices actually exist.
+ */
+ phydev->c45_ids.mmds_present |= MDIO_DEVS_PCS |
+ MDIO_DEVS_AN |
+ MDIO_DEVS_VEND1 |
+ MDIO_DEVS_VEND2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = mt798x_2p5ge_phy_load_fw(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Setup LED */
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED0_ON_CTRL,
+ MTK_PHY_LED_ON_POLARITY | MTK_PHY_LED_ON_LINK10 |
+ MTK_PHY_LED_ON_LINK100 | MTK_PHY_LED_ON_LINK1000 |
+ MTK_PHY_LED_ON_LINK2500);
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED1_ON_CTRL,
+ MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX);
+
+ /* Switch pinctrl after setting polarity to avoid bogus blinking */
+ pinctrl = devm_pinctrl_get_select(&phydev->mdio.dev, "i2p5gbe-led");
+ if (IS_ERR(pinctrl))
+ dev_err(&phydev->mdio.dev, "Fail to set LED pins!\n");
+
+ return 0;
+}
+
+static struct phy_driver mtk_2p5gephy_driver[] = {
+ {
+ PHY_ID_MATCH_MODEL(MTK_2P5GPHY_ID_MT7988),
+ .name = "MediaTek MT7988 2.5GbE PHY",
+ .probe = mt798x_2p5ge_phy_probe,
+ .config_init = mt798x_2p5ge_phy_config_init,
+ .config_aneg = mt798x_2p5ge_phy_config_aneg,
+ .get_features = mt798x_2p5ge_phy_get_features,
+ .read_status = mt798x_2p5ge_phy_read_status,
+ .get_rate_matching = mt798x_2p5ge_phy_get_rate_matching,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_page = mtk_phy_read_page,
+ .write_page = mtk_phy_write_page,
+ },
+};
+
+module_phy_driver(mtk_2p5gephy_driver);
+
+static struct mdio_device_id __maybe_unused mtk_2p5ge_phy_tbl[] = {
+ { PHY_ID_MATCH_VENDOR(0x00339c00) },
+ { }
+};
+
+MODULE_DESCRIPTION("MediaTek 2.5Gb Ethernet PHY driver");
+MODULE_AUTHOR("SkyLake Huang <SkyLake.Huang@mediatek.com>");
+MODULE_LICENSE("GPL");
+
+MODULE_DEVICE_TABLE(mdio, mtk_2p5ge_phy_tbl);
+MODULE_FIRMWARE(MT7988_2P5GE_PMB_FW);
diff --git a/drivers/net/phy/mediatek/mtk-ge-soc.c b/drivers/net/phy/mediatek/mtk-ge-soc.c
index 175cf5239bba..cd09fbf92ef2 100644
--- a/drivers/net/phy/mediatek/mtk-ge-soc.c
+++ b/drivers/net/phy/mediatek/mtk-ge-soc.c
@@ -7,12 +7,17 @@
#include <linux/pinctrl/consumer.h>
#include <linux/phy.h>
#include <linux/regmap.h>
+#include <linux/of.h>
#include "../phylib.h"
#include "mtk.h"
+#define MTK_PHY_MAX_LEDS 2
+
#define MTK_GPHY_ID_MT7981 0x03a29461
#define MTK_GPHY_ID_MT7988 0x03a29481
+#define MTK_GPHY_ID_AN7581 0x03a294c1
+#define MTK_GPHY_ID_AN7583 0xc0ff0420
#define MTK_EXT_PAGE_ACCESS 0x1f
#define MTK_PHY_PAGE_STANDARD 0x0000
@@ -1319,6 +1324,7 @@ static int mt7988_phy_probe_shared(struct phy_device *phydev)
{
struct device_node *np = dev_of_node(&phydev->mdio.bus->dev);
struct mtk_socphy_shared *shared = phy_package_get_priv(phydev);
+ struct device_node *pio_np;
struct regmap *regmap;
u32 reg;
int ret;
@@ -1336,7 +1342,13 @@ static int mt7988_phy_probe_shared(struct phy_device *phydev)
* The 4 bits in TPBANK0 are kept as package shared data and are used to
* set LED polarity for each of the LED0.
*/
- regmap = syscon_regmap_lookup_by_phandle(np, "mediatek,pio");
+ pio_np = of_parse_phandle(np, "mediatek,pio", 0);
+ if (!pio_np)
+ return -ENODEV;
+
+ regmap = device_node_to_regmap(pio_np);
+ of_node_put(pio_np);
+
if (IS_ERR(regmap))
return PTR_ERR(regmap);
@@ -1406,6 +1418,58 @@ static int mt7981_phy_probe(struct phy_device *phydev)
return mt798x_phy_calibration(phydev);
}
+static int an7581_phy_probe(struct phy_device *phydev)
+{
+ struct mtk_socphy_priv *priv;
+ struct pinctrl *pinctrl;
+
+ /* Toggle pinctrl to enable PHY LED */
+ pinctrl = devm_pinctrl_get_select(&phydev->mdio.dev, "gbe-led");
+ if (IS_ERR(pinctrl))
+ dev_err(&phydev->mdio.bus->dev,
+ "Failed to setup PHY LED pinctrl\n");
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int an7581_phy_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ u16 val = 0;
+ u32 mode;
+
+ if (index >= MTK_PHY_MAX_LEDS)
+ return -EINVAL;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ val = MTK_PHY_LED_ON_POLARITY;
+ break;
+ case PHY_LED_ACTIVE_HIGH:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ?
+ MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL,
+ MTK_PHY_LED_ON_POLARITY, val);
+}
+
+static int an7583_phy_config_init(struct phy_device *phydev)
+{
+ /* BMCR_PDOWN is enabled by default */
+ return phy_clear_bits(phydev, MII_BMCR, BMCR_PDOWN);
+}
+
static struct phy_driver mtk_socphy_driver[] = {
{
PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7981),
@@ -1441,6 +1505,29 @@ static struct phy_driver mtk_socphy_driver[] = {
.led_hw_control_set = mt798x_phy_led_hw_control_set,
.led_hw_control_get = mt798x_phy_led_hw_control_get,
},
+ {
+ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_AN7581),
+ .name = "Airoha AN7581 PHY",
+ .probe = an7581_phy_probe,
+ .led_blink_set = mt798x_phy_led_blink_set,
+ .led_brightness_set = mt798x_phy_led_brightness_set,
+ .led_hw_is_supported = mt798x_phy_led_hw_is_supported,
+ .led_hw_control_set = mt798x_phy_led_hw_control_set,
+ .led_hw_control_get = mt798x_phy_led_hw_control_get,
+ .led_polarity_set = an7581_phy_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_AN7583),
+ .name = "Airoha AN7583 PHY",
+ .config_init = an7583_phy_config_init,
+ .probe = an7581_phy_probe,
+ .led_blink_set = mt798x_phy_led_blink_set,
+ .led_brightness_set = mt798x_phy_led_brightness_set,
+ .led_hw_is_supported = mt798x_phy_led_hw_is_supported,
+ .led_hw_control_set = mt798x_phy_led_hw_control_set,
+ .led_hw_control_get = mt798x_phy_led_hw_control_get,
+ .led_polarity_set = an7581_phy_led_polarity_set,
+ },
};
module_phy_driver(mtk_socphy_driver);
@@ -1448,6 +1535,8 @@ module_phy_driver(mtk_socphy_driver);
static const struct mdio_device_id __maybe_unused mtk_socphy_tbl[] = {
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7981) },
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7988) },
+ { PHY_ID_MATCH_EXACT(MTK_GPHY_ID_AN7581) },
+ { PHY_ID_MATCH_EXACT(MTK_GPHY_ID_AN7583) },
{ }
};
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 24882d30f685..64aa03aed770 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -768,7 +768,8 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
return !ret;
}
-static int ksz8051_match_phy_device(struct phy_device *phydev)
+static int ksz8051_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return ksz8051_ksz8795_match_phy_device(phydev, true);
}
@@ -888,7 +889,8 @@ static int ksz8061_config_init(struct phy_device *phydev)
return kszphy_config_init(phydev);
}
-static int ksz8795_match_phy_device(struct phy_device *phydev)
+static int ksz8795_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return ksz8051_ksz8795_match_phy_device(phydev, false);
}
@@ -2027,12 +2029,6 @@ static int ksz9477_config_init(struct phy_device *phydev)
return err;
}
- /* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
- * in this switch shall be regarded as broken.
- */
- if (phydev->dev_flags & MICREL_NO_EEE)
- phy_disable_eee(phydev);
-
return kszphy_config_init(phydev);
}
@@ -3236,10 +3232,6 @@ static int lan8814_ptp_perout(struct ptp_clock_info *ptpci,
int pulse_width;
int pin, event;
- /* Reject requests with unsupported flags */
- if (rq->perout.flags & ~PTP_PEROUT_DUTY_CYCLE)
- return -EOPNOTSUPP;
-
mutex_lock(&shared->shared_lock);
event = rq->perout.index;
pin = ptp_find_pin(shared->ptp_clock, PTP_PF_PEROUT, event);
@@ -3406,11 +3398,6 @@ static int lan8814_ptp_extts(struct ptp_clock_info *ptpci,
struct phy_device *phydev = shared->phydev;
int pin;
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_EXTTS_EDGES |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(shared->ptp_clock, PTP_PF_EXTTS,
rq->extts.index);
if (pin == -1 || pin != LAN8814_PTP_EXTTS_NUM)
@@ -3917,6 +3904,10 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
shared->ptp_clock_info.n_ext_ts = LAN8814_PTP_EXTTS_NUM;
shared->ptp_clock_info.n_pins = LAN8814_PTP_GPIO_NUM;
shared->ptp_clock_info.pps = 0;
+ shared->ptp_clock_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+ shared->ptp_clock_info.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
shared->ptp_clock_info.pin_config = shared->pin_config;
shared->ptp_clock_info.n_per_out = LAN8814_PTP_PEROUT_NUM;
shared->ptp_clock_info.adjfine = lan8814_ptpci_adjfine;
@@ -5068,9 +5059,6 @@ static int lan8841_ptp_perout(struct ptp_clock_info *ptp,
int pin;
int ret;
- if (rq->perout.flags & ~PTP_PEROUT_DUTY_CYCLE)
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(ptp_priv->ptp_clock, PTP_PF_PEROUT, rq->perout.index);
if (pin == -1 || pin >= LAN8841_PTP_GPIO_NUM)
return -EINVAL;
@@ -5314,6 +5302,7 @@ static struct ptp_clock_info lan8841_ptp_clock_info = {
.n_per_out = LAN8841_PTP_GPIO_NUM,
.n_ext_ts = LAN8841_PTP_GPIO_NUM,
.n_pins = LAN8841_PTP_GPIO_NUM,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE,
};
#define LAN8841_OPERATION_MODE_STRAP_LOW_REGISTER 3
@@ -5705,7 +5694,6 @@ static struct phy_driver ksphy_driver[] = {
.handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
.resume = ksz9477_resume,
- .get_features = ksz9477_get_features,
} };
module_phy_driver(ksphy_driver);
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 93de88c1c8fd..13570f628aa5 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -474,6 +474,8 @@ static struct phy_driver microchip_phy_driver[] = {
/* This mask (0xfffffff2) is to differentiate from
* LAN8742 (phy_id 0x0007c130 and 0x0007c131)
* and allows future phy_id revisions.
+ * These PHYs are integrated in LAN7800 and LAN7850 USB/Ethernet
+ * controllers.
*/
.phy_id_mask = 0xfffffff2,
.name = "Microchip LAN88xx",
diff --git a/drivers/net/phy/microchip_rds_ptp.c b/drivers/net/phy/microchip_rds_ptp.c
index 3e6bf10cdeed..e6514ce04c29 100644
--- a/drivers/net/phy/microchip_rds_ptp.c
+++ b/drivers/net/phy/microchip_rds_ptp.c
@@ -224,10 +224,6 @@ static int mchp_rds_ptp_perout(struct ptp_clock_info *ptpci,
struct phy_device *phydev = clock->phydev;
int ret, event_pin, pulsewidth;
- /* Reject requests with unsupported flags */
- if (perout->flags & ~PTP_PEROUT_DUTY_CYCLE)
- return -EOPNOTSUPP;
-
event_pin = ptp_find_pin(clock->ptp_clock, PTP_PF_PEROUT,
perout->index);
if (event_pin != clock->event_pin)
@@ -1259,6 +1255,7 @@ struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd,
clock->caps.pps = 0;
clock->caps.n_pins = MCHP_RDS_PTP_N_PIN;
clock->caps.n_per_out = MCHP_RDS_PTP_N_PEROUT;
+ clock->caps.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
clock->caps.pin_config = clock->pin_config;
clock->caps.adjfine = mchp_rds_ptp_ltc_adjfine;
clock->caps.adjtime = mchp_rds_ptp_ltc_adjtime;
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index ed8fb14a7f21..6b800081eed5 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -946,7 +946,9 @@ static int vsc85xx_ip1_conf(struct phy_device *phydev, enum ts_blk blk,
/* UDP checksum offset in IPv4 packet
* according to: https://tools.ietf.org/html/rfc768
*/
- val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26) | IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
+ val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26);
+ if (enable)
+ val |= IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
val);
@@ -1166,18 +1168,24 @@ static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts,
container_of(mii_ts, struct vsc8531_private, mii_ts);
if (!vsc8531->ptp->configured)
- return;
+ goto out;
- if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF) {
- kfree_skb(skb);
- return;
- }
+ if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF)
+ goto out;
+
+ if (vsc8531->ptp->tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ if (ptp_msg_is_sync(skb, type))
+ goto out;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
mutex_lock(&vsc8531->ts_lock);
__skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
mutex_unlock(&vsc8531->ts_lock);
+ return;
+
+out:
+ kfree_skb(skb);
}
static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
diff --git a/drivers/net/phy/mxl-86110.c b/drivers/net/phy/mxl-86110.c
new file mode 100644
index 000000000000..ff2a3a22bd5b
--- /dev/null
+++ b/drivers/net/phy/mxl-86110.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PHY driver for Maxlinear MXL86110
+ *
+ * Copyright 2023 MaxLinear Inc.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+
+/* PHY ID */
+#define PHY_ID_MXL86110 0xc1335580
+
+/* required to access extended registers */
+#define MXL86110_EXTD_REG_ADDR_OFFSET 0x1E
+#define MXL86110_EXTD_REG_ADDR_DATA 0x1F
+#define PHY_IRQ_ENABLE_REG 0x12
+#define PHY_IRQ_ENABLE_REG_WOL BIT(6)
+
+/* SyncE Configuration Register - COM_EXT SYNCE_CFG */
+#define MXL86110_EXT_SYNCE_CFG_REG 0xA012
+#define MXL86110_EXT_SYNCE_CFG_CLK_FRE_SEL BIT(4)
+#define MXL86110_EXT_SYNCE_CFG_EN_SYNC_E_DURING_LNKDN BIT(5)
+#define MXL86110_EXT_SYNCE_CFG_EN_SYNC_E BIT(6)
+#define MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_MASK GENMASK(3, 1)
+#define MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_125M_PLL 0
+#define MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_25M 4
+
+/* MAC Address registers */
+#define MXL86110_EXT_MAC_ADDR_CFG1 0xA007
+#define MXL86110_EXT_MAC_ADDR_CFG2 0xA008
+#define MXL86110_EXT_MAC_ADDR_CFG3 0xA009
+
+#define MXL86110_EXT_WOL_CFG_REG 0xA00A
+#define MXL86110_WOL_CFG_WOL_MASK BIT(3)
+
+/* RGMII register */
+#define MXL86110_EXT_RGMII_CFG1_REG 0xA003
+/* delay can be adjusted in steps of about 150ps */
+#define MXL86110_EXT_RGMII_CFG1_RX_NO_DELAY (0x0 << 10)
+/* Closest value to 2000 ps */
+#define MXL86110_EXT_RGMII_CFG1_RX_DELAY_1950PS (0xD << 10)
+#define MXL86110_EXT_RGMII_CFG1_RX_DELAY_MASK GENMASK(13, 10)
+
+#define MXL86110_EXT_RGMII_CFG1_TX_1G_DELAY_1950PS (0xD << 0)
+#define MXL86110_EXT_RGMII_CFG1_TX_1G_DELAY_MASK GENMASK(3, 0)
+
+#define MXL86110_EXT_RGMII_CFG1_TX_10MB_100MB_DELAY_1950PS (0xD << 4)
+#define MXL86110_EXT_RGMII_CFG1_TX_10MB_100MB_DELAY_MASK GENMASK(7, 4)
+
+#define MXL86110_EXT_RGMII_CFG1_FULL_MASK \
+ ((MXL86110_EXT_RGMII_CFG1_RX_DELAY_MASK) | \
+ (MXL86110_EXT_RGMII_CFG1_TX_1G_DELAY_MASK) | \
+ (MXL86110_EXT_RGMII_CFG1_TX_10MB_100MB_DELAY_MASK))
+
+/* EXT Sleep Control register */
+#define MXL86110_UTP_EXT_SLEEP_CTRL_REG 0x27
+#define MXL86110_UTP_EXT_SLEEP_CTRL_EN_SLEEP_SW_OFF 0
+#define MXL86110_UTP_EXT_SLEEP_CTRL_EN_SLEEP_SW_MASK BIT(15)
+
+/* RGMII In-Band Status and MDIO Configuration Register */
+#define MXL86110_EXT_RGMII_MDIO_CFG 0xA005
+#define MXL86110_RGMII_MDIO_CFG_EPA0_MASK GENMASK(6, 6)
+#define MXL86110_EXT_RGMII_MDIO_CFG_EBA_MASK GENMASK(5, 5)
+#define MXL86110_EXT_RGMII_MDIO_CFG_BA_MASK GENMASK(4, 0)
+
+#define MXL86110_MAX_LEDS 3
+/* LED registers and defines */
+#define MXL86110_LED0_CFG_REG 0xA00C
+#define MXL86110_LED1_CFG_REG 0xA00D
+#define MXL86110_LED2_CFG_REG 0xA00E
+
+#define MXL86110_LEDX_CFG_BLINK BIT(13)
+#define MXL86110_LEDX_CFG_LINK_UP_FULL_DUPLEX_ON BIT(12)
+#define MXL86110_LEDX_CFG_LINK_UP_HALF_DUPLEX_ON BIT(11)
+#define MXL86110_LEDX_CFG_LINK_UP_TX_ACT_ON BIT(10)
+#define MXL86110_LEDX_CFG_LINK_UP_RX_ACT_ON BIT(9)
+#define MXL86110_LEDX_CFG_LINK_UP_TX_ON BIT(8)
+#define MXL86110_LEDX_CFG_LINK_UP_RX_ON BIT(7)
+#define MXL86110_LEDX_CFG_LINK_UP_1GB_ON BIT(6)
+#define MXL86110_LEDX_CFG_LINK_UP_100MB_ON BIT(5)
+#define MXL86110_LEDX_CFG_LINK_UP_10MB_ON BIT(4)
+#define MXL86110_LEDX_CFG_LINK_UP_COLLISION BIT(3)
+#define MXL86110_LEDX_CFG_LINK_UP_1GB_BLINK BIT(2)
+#define MXL86110_LEDX_CFG_LINK_UP_100MB_BLINK BIT(1)
+#define MXL86110_LEDX_CFG_LINK_UP_10MB_BLINK BIT(0)
+
+#define MXL86110_LED_BLINK_CFG_REG 0xA00F
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE1_2HZ 0
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE1_4HZ BIT(0)
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE1_8HZ BIT(1)
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE1_16HZ (BIT(1) | BIT(0))
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE2_2HZ 0
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE2_4HZ BIT(2)
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE2_8HZ BIT(3)
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE2_16HZ (BIT(3) | BIT(2))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_50_ON 0
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_67_ON (BIT(4))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_75_ON (BIT(5))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_83_ON (BIT(5) | BIT(4))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_50_OFF (BIT(6))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_33_ON (BIT(6) | BIT(4))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_25_ON (BIT(6) | BIT(5))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_17_ON (BIT(6) | BIT(5) | BIT(4))
+
+/* Chip Configuration Register - COM_EXT_CHIP_CFG */
+#define MXL86110_EXT_CHIP_CFG_REG 0xA001
+#define MXL86110_EXT_CHIP_CFG_RXDLY_ENABLE BIT(8)
+#define MXL86110_EXT_CHIP_CFG_SW_RST_N_MODE BIT(15)
+
+/**
+ * __mxl86110_write_extended_reg() - write to a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * Unlocked version of mxl86110_write_extended_reg
+ *
+ * Note: This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 0 or negative error code
+ */
+static int __mxl86110_write_extended_reg(struct phy_device *phydev,
+ u16 regnum, u16 val)
+{
+ int ret;
+
+ ret = __phy_write(phydev, MXL86110_EXTD_REG_ADDR_OFFSET, regnum);
+ if (ret < 0)
+ return ret;
+
+ return __phy_write(phydev, MXL86110_EXTD_REG_ADDR_DATA, val);
+}
+
+/**
+ * __mxl86110_read_extended_reg - Read a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: extended register number to read (address written to reg 30)
+ *
+ * Unlocked version of mxl86110_read_extended_reg
+ *
+ * Reads the content of a PHY extended register using the MaxLinear
+ * 2-step access mechanism: write the register address to reg 30 (0x1E),
+ * then read the value from reg 31 (0x1F).
+ *
+ * Note: This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 16-bit register value on success, or negative errno code on failure.
+ */
+static int __mxl86110_read_extended_reg(struct phy_device *phydev, u16 regnum)
+{
+ int ret;
+
+ ret = __phy_write(phydev, MXL86110_EXTD_REG_ADDR_OFFSET, regnum);
+ if (ret < 0)
+ return ret;
+ return __phy_read(phydev, MXL86110_EXTD_REG_ADDR_DATA);
+}
+
+/**
+ * __mxl86110_modify_extended_reg() - modify bits of a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Note: register value = (old register value & ~mask) | set.
+ * This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 0 or negative error code
+ */
+static int __mxl86110_modify_extended_reg(struct phy_device *phydev,
+ u16 regnum, u16 mask, u16 set)
+{
+ int ret;
+
+ ret = __phy_write(phydev, MXL86110_EXTD_REG_ADDR_OFFSET, regnum);
+ if (ret < 0)
+ return ret;
+
+ return __phy_modify(phydev, MXL86110_EXTD_REG_ADDR_DATA, mask, set);
+}
+
+/**
+ * mxl86110_write_extended_reg() - Write to a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * This function writes to an extended register of the PHY using the
+ * MaxLinear two-step access method (reg 0x1E/0x1F). It handles acquiring
+ * and releasing the MDIO bus lock internally.
+ *
+ * Return: 0 or negative error code
+ */
+static int mxl86110_write_extended_reg(struct phy_device *phydev,
+ u16 regnum, u16 val)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __mxl86110_write_extended_reg(phydev, regnum, val);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
+ * mxl86110_read_extended_reg() - Read a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: extended register number to read
+ *
+ * This function reads from an extended register of the PHY using the
+ * MaxLinear two-step access method (reg 0x1E/0x1F). It handles acquiring
+ * and releasing the MDIO bus lock internally.
+ *
+ * Return: 16-bit register value on success, or negative errno code on failure
+ */
+static int mxl86110_read_extended_reg(struct phy_device *phydev, u16 regnum)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __mxl86110_read_extended_reg(phydev, regnum);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
+ * mxl86110_get_wol() - report if wake-on-lan is enabled
+ * @phydev: pointer to the phy_device
+ * @wol: a pointer to a &struct ethtool_wolinfo
+ */
+static void mxl86110_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int val;
+
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+ val = mxl86110_read_extended_reg(phydev, MXL86110_EXT_WOL_CFG_REG);
+ if (val >= 0 && (val & MXL86110_WOL_CFG_WOL_MASK))
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+/**
+ * mxl86110_set_wol() - enable/disable wake-on-lan
+ * @phydev: pointer to the phy_device
+ * @wol: a pointer to a &struct ethtool_wolinfo
+ *
+ * Configures the WOL Magic Packet MAC
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86110_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct net_device *netdev;
+ const unsigned char *mac;
+ int ret = 0;
+
+ phy_lock_mdio_bus(phydev);
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ netdev = phydev->attached_dev;
+ if (!netdev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Configure the MAC address of the WOL magic packet */
+ mac = netdev->dev_addr;
+ ret = __mxl86110_write_extended_reg(phydev,
+ MXL86110_EXT_MAC_ADDR_CFG1,
+ ((mac[0] << 8) | mac[1]));
+ if (ret < 0)
+ goto out;
+
+ ret = __mxl86110_write_extended_reg(phydev,
+ MXL86110_EXT_MAC_ADDR_CFG2,
+ ((mac[2] << 8) | mac[3]));
+ if (ret < 0)
+ goto out;
+
+ ret = __mxl86110_write_extended_reg(phydev,
+ MXL86110_EXT_MAC_ADDR_CFG3,
+ ((mac[4] << 8) | mac[5]));
+ if (ret < 0)
+ goto out;
+
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_WOL_CFG_REG,
+ MXL86110_WOL_CFG_WOL_MASK,
+ MXL86110_WOL_CFG_WOL_MASK);
+ if (ret < 0)
+ goto out;
+
+ /* Enables Wake-on-LAN interrupt in the PHY. */
+ ret = __phy_modify(phydev, PHY_IRQ_ENABLE_REG, 0,
+ PHY_IRQ_ENABLE_REG_WOL);
+ if (ret < 0)
+ goto out;
+
+ phydev_dbg(phydev,
+ "%s, MAC Addr: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ __func__,
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ } else {
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_WOL_CFG_REG,
+ MXL86110_WOL_CFG_WOL_MASK,
+ 0);
+ if (ret < 0)
+ goto out;
+
+ /* Disables Wake-on-LAN interrupt in the PHY. */
+ ret = __phy_modify(phydev, PHY_IRQ_ENABLE_REG,
+ PHY_IRQ_ENABLE_REG_WOL, 0);
+ }
+
+out:
+ phy_unlock_mdio_bus(phydev);
+ return ret;
+}
+
+static const unsigned long supported_trgs = (BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_HALF_DUPLEX) |
+ BIT(TRIGGER_NETDEV_FULL_DUPLEX) |
+ BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX));
+
+static int mxl86110_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ if (index >= MXL86110_MAX_LEDS)
+ return -EINVAL;
+
+ /* All combinations of the supported triggers are allowed */
+ if (rules & ~supported_trgs)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int mxl86110_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ if (index >= MXL86110_MAX_LEDS)
+ return -EINVAL;
+
+ val = mxl86110_read_extended_reg(phydev,
+ MXL86110_LED0_CFG_REG + index);
+ if (val < 0)
+ return val;
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_TX_ACT_ON)
+ *rules |= BIT(TRIGGER_NETDEV_TX);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_RX_ACT_ON)
+ *rules |= BIT(TRIGGER_NETDEV_RX);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_HALF_DUPLEX_ON)
+ *rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_FULL_DUPLEX_ON)
+ *rules |= BIT(TRIGGER_NETDEV_FULL_DUPLEX);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_10MB_ON)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_10);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_100MB_ON)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_100);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_1GB_ON)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_1000);
+
+ return 0;
+}
+
+static int mxl86110_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 val = 0;
+
+ if (index >= MXL86110_MAX_LEDS)
+ return -EINVAL;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_10))
+ val |= MXL86110_LEDX_CFG_LINK_UP_10MB_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_100))
+ val |= MXL86110_LEDX_CFG_LINK_UP_100MB_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_1000))
+ val |= MXL86110_LEDX_CFG_LINK_UP_1GB_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_TX))
+ val |= MXL86110_LEDX_CFG_LINK_UP_TX_ACT_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_RX))
+ val |= MXL86110_LEDX_CFG_LINK_UP_RX_ACT_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_HALF_DUPLEX))
+ val |= MXL86110_LEDX_CFG_LINK_UP_HALF_DUPLEX_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_FULL_DUPLEX))
+ val |= MXL86110_LEDX_CFG_LINK_UP_FULL_DUPLEX_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_TX) ||
+ rules & BIT(TRIGGER_NETDEV_RX))
+ val |= MXL86110_LEDX_CFG_BLINK;
+
+ return mxl86110_write_extended_reg(phydev,
+ MXL86110_LED0_CFG_REG + index, val);
+}
+
+/**
+ * mxl86110_synce_clk_cfg() - applies syncE/clk output configuration
+ * @phydev: pointer to the phy_device
+ *
+ * Note: This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86110_synce_clk_cfg(struct phy_device *phydev)
+{
+ u16 mask = 0, val = 0;
+
+ /*
+ * Configures the clock output to its default
+ * setting as per the datasheet.
+ * This results in a 25MHz clock output being selected in the
+ * COM_EXT_SYNCE_CFG register for SyncE configuration.
+ */
+ val = MXL86110_EXT_SYNCE_CFG_EN_SYNC_E |
+ FIELD_PREP(MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_MASK,
+ MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_25M);
+ mask = MXL86110_EXT_SYNCE_CFG_EN_SYNC_E |
+ MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_MASK |
+ MXL86110_EXT_SYNCE_CFG_CLK_FRE_SEL;
+
+ /* Write clock output configuration */
+ return __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_SYNCE_CFG_REG,
+ mask, val);
+}
+
+/**
+ * mxl86110_broadcast_cfg - Configure MDIO broadcast setting for PHY
+ * @phydev: Pointer to the PHY device structure
+ *
+ * This function configures the MDIO broadcast behavior of the MxL86110 PHY.
+ * Currently, broadcast mode is explicitly disabled by clearing the EPA0 bit
+ * in the RGMII_MDIO_CFG extended register.
+ *
+ * Note: This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 0 on success or a negative errno code on failure.
+ */
+static int mxl86110_broadcast_cfg(struct phy_device *phydev)
+{
+ return __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_RGMII_MDIO_CFG,
+ MXL86110_RGMII_MDIO_CFG_EPA0_MASK,
+ 0);
+}
+
+/**
+ * mxl86110_enable_led_activity_blink - Enable LEDs activity blink on PHY
+ * @phydev: Pointer to the PHY device structure
+ *
+ * Configure all PHY LEDs to blink on traffic activity regardless of whether
+ * they are ON or OFF. This behavior allows each LED to serve as a pure activity
+ * indicator, independently of its use as a link status indicator.
+ *
+ * By default, each LED blinks only when it is also in the ON state.
+ * This function modifies the appropriate registers (LABx fields)
+ * to enable blinking even when the LEDs are OFF, to allow the LED to be used
+ * as a traffic indicator without requiring it to also serve
+ * as a link status LED.
+ *
+ * Note: Any further LED customization can be performed via the
+ * /sys/class/leds interface; the functions led_hw_is_supported,
+ * led_hw_control_get, and led_hw_control_set are used
+ * to support this mechanism.
+ *
+ * This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 0 on success or a negative errno code on failure.
+ */
+static int mxl86110_enable_led_activity_blink(struct phy_device *phydev)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < MXL86110_MAX_LEDS; i++) {
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86110_LED0_CFG_REG + i,
+ 0,
+ MXL86110_LEDX_CFG_BLINK);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * mxl86110_config_init() - initialize the PHY
+ * @phydev: pointer to the phy_device
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86110_config_init(struct phy_device *phydev)
+{
+ u16 val = 0;
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+
+ /* configure syncE / clk output */
+ ret = mxl86110_synce_clk_cfg(phydev);
+ if (ret < 0)
+ goto out;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val = 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val = MXL86110_EXT_RGMII_CFG1_RX_DELAY_1950PS;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = MXL86110_EXT_RGMII_CFG1_TX_1G_DELAY_1950PS |
+ MXL86110_EXT_RGMII_CFG1_TX_10MB_100MB_DELAY_1950PS;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ val = MXL86110_EXT_RGMII_CFG1_TX_1G_DELAY_1950PS |
+ MXL86110_EXT_RGMII_CFG1_TX_10MB_100MB_DELAY_1950PS |
+ MXL86110_EXT_RGMII_CFG1_RX_DELAY_1950PS;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_RGMII_CFG1_REG,
+ MXL86110_EXT_RGMII_CFG1_FULL_MASK,
+ val);
+ if (ret < 0)
+ goto out;
+
+ /* Configure RXDLY (RGMII Rx Clock Delay) to disable
+ * the default additional delay value on RX_CLK
+ * (2 ns for 125 MHz, 8 ns for 25 MHz/2.5 MHz)
+ * and use just the digital one selected before
+ */
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_CHIP_CFG_REG,
+ MXL86110_EXT_CHIP_CFG_RXDLY_ENABLE,
+ 0);
+ if (ret < 0)
+ goto out;
+
+ ret = mxl86110_enable_led_activity_blink(phydev);
+ if (ret < 0)
+ goto out;
+
+ ret = mxl86110_broadcast_cfg(phydev);
+
+out:
+ phy_unlock_mdio_bus(phydev);
+ return ret;
+}
+
+static struct phy_driver mxl_phy_drvs[] = {
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_MXL86110),
+ .name = "MXL86110 Gigabit Ethernet",
+ .config_init = mxl86110_config_init,
+ .get_wol = mxl86110_get_wol,
+ .set_wol = mxl86110_set_wol,
+ .led_hw_is_supported = mxl86110_led_hw_is_supported,
+ .led_hw_control_get = mxl86110_led_hw_control_get,
+ .led_hw_control_set = mxl86110_led_hw_control_set,
+ },
+};
+
+module_phy_driver(mxl_phy_drvs);
+
+static const struct mdio_device_id __maybe_unused mxl_tbl[] = {
+ { PHY_ID_MATCH_EXACT(PHY_ID_MXL86110) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, mxl_tbl);
+
+MODULE_DESCRIPTION("MaxLinear MXL86110 PHY driver");
+MODULE_AUTHOR("Stefano Radaelli");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 250a018d5546..4c6d905f0a9f 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -19,7 +19,6 @@
#include "nxp-c45-tja11xx.h"
-#define PHY_ID_MASK GENMASK(31, 4)
/* Same id: TJA1103, TJA1104 */
#define PHY_ID_TJA_1103 0x001BB010
/* Same id: TJA1120, TJA1121 */
@@ -763,9 +762,6 @@ static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
struct phy_device *phydev = priv->phydev;
int pin;
- if (perout->flags & ~PTP_PEROUT_PHASE)
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
if (pin < 0)
return pin;
@@ -861,12 +857,6 @@ static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
int pin;
- if (extts->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Sampling on both edges is not supported */
if ((extts->flags & PTP_RISING_EDGE) &&
(extts->flags & PTP_FALLING_EDGE) &&
@@ -962,6 +952,10 @@ static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
.n_pins = ARRAY_SIZE(nxp_c45_ptp_pins),
.n_ext_ts = 1,
.n_per_out = 1,
+ .supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS,
+ .supported_perout_flags = PTP_PEROUT_PHASE,
};
priv->ptp_clock = ptp_clock_register(&priv->caps,
@@ -1971,28 +1965,24 @@ static int nxp_c45_macsec_ability(struct phy_device *phydev)
return macsec_ability;
}
-static int tja1103_match_phy_device(struct phy_device *phydev)
+static int tja11xx_no_macsec_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
- return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
- !nxp_c45_macsec_ability(phydev);
-}
+ if (!phy_id_compare(phydev->phy_id, phydrv->phy_id,
+ phydrv->phy_id_mask))
+ return 0;
-static int tja1104_match_phy_device(struct phy_device *phydev)
-{
- return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
- nxp_c45_macsec_ability(phydev);
+ return !nxp_c45_macsec_ability(phydev);
}
-static int tja1120_match_phy_device(struct phy_device *phydev)
+static int tja11xx_macsec_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
- return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) &&
- !nxp_c45_macsec_ability(phydev);
-}
+ if (!phy_id_compare(phydev->phy_id, phydrv->phy_id,
+ phydrv->phy_id_mask))
+ return 0;
-static int tja1121_match_phy_device(struct phy_device *phydev)
-{
- return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) &&
- nxp_c45_macsec_ability(phydev);
+ return nxp_c45_macsec_ability(phydev);
}
static const struct nxp_c45_regmap tja1120_regmap = {
@@ -2065,6 +2055,7 @@ static const struct nxp_c45_phy_data tja1120_phy_data = {
static struct phy_driver nxp_c45_driver[] = {
{
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
.name = "NXP C45 TJA1103",
.get_features = nxp_c45_get_features,
.driver_data = &tja1103_phy_data,
@@ -2086,9 +2077,10 @@ static struct phy_driver nxp_c45_driver[] = {
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
.remove = nxp_c45_remove,
- .match_phy_device = tja1103_match_phy_device,
+ .match_phy_device = tja11xx_no_macsec_match_phy_device,
},
{
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
.name = "NXP C45 TJA1104",
.get_features = nxp_c45_get_features,
.driver_data = &tja1103_phy_data,
@@ -2110,9 +2102,10 @@ static struct phy_driver nxp_c45_driver[] = {
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
.remove = nxp_c45_remove,
- .match_phy_device = tja1104_match_phy_device,
+ .match_phy_device = tja11xx_macsec_match_phy_device,
},
{
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
.name = "NXP C45 TJA1120",
.get_features = nxp_c45_get_features,
.driver_data = &tja1120_phy_data,
@@ -2135,9 +2128,10 @@ static struct phy_driver nxp_c45_driver[] = {
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
.remove = nxp_c45_remove,
- .match_phy_device = tja1120_match_phy_device,
+ .match_phy_device = tja11xx_no_macsec_match_phy_device,
},
{
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
.name = "NXP C45 TJA1121",
.get_features = nxp_c45_get_features,
.driver_data = &tja1120_phy_data,
@@ -2160,7 +2154,7 @@ static struct phy_driver nxp_c45_driver[] = {
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
.remove = nxp_c45_remove,
- .match_phy_device = tja1121_match_phy_device,
+ .match_phy_device = tja11xx_macsec_match_phy_device,
},
};
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index 07e94a2478ac..3c38a8ddae2f 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -651,12 +651,14 @@ static int tja1102_match_phy_device(struct phy_device *phydev, bool port0)
return !ret;
}
-static int tja1102_p0_match_phy_device(struct phy_device *phydev)
+static int tja1102_p0_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return tja1102_match_phy_device(phydev, true);
}
-static int tja1102_p1_match_phy_device(struct phy_device *phydev)
+static int tja1102_p1_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return tja1102_match_phy_device(phydev, false);
}
diff --git a/drivers/net/phy/phy_caps.c b/drivers/net/phy/phy_caps.c
index 703321689726..38417e288611 100644
--- a/drivers/net/phy/phy_caps.c
+++ b/drivers/net/phy/phy_caps.c
@@ -188,6 +188,9 @@ phy_caps_lookup_by_linkmode_rev(const unsigned long *linkmodes, bool fdx_only)
* When @exact is not set, we return either an exact match, or matching capabilities
* at lower speed, or the lowest matching speed, or NULL.
*
+ * Non-exact matches will try to return an exact speed and duplex match, but may
+ * return matching capabilities with same speed but a different duplex.
+ *
* Returns: a matched link_capabilities according to the above process, NULL
* otherwise.
*/
@@ -195,7 +198,7 @@ const struct link_capabilities *
phy_caps_lookup(int speed, unsigned int duplex, const unsigned long *supported,
bool exact)
{
- const struct link_capabilities *lcap, *last = NULL;
+ const struct link_capabilities *lcap, *match = NULL, *last = NULL;
for_each_link_caps_desc_speed(lcap) {
if (linkmode_intersects(lcap->linkmodes, supported)) {
@@ -204,16 +207,19 @@ phy_caps_lookup(int speed, unsigned int duplex, const unsigned long *supported,
if (lcap->speed == speed && lcap->duplex == duplex) {
return lcap;
} else if (!exact) {
- if (lcap->speed <= speed)
- return lcap;
+ if (!match && lcap->speed <= speed)
+ match = lcap;
+
+ if (lcap->speed < speed)
+ break;
}
}
}
- if (!exact)
- return last;
+ if (!match && !exact)
+ match = last;
- return NULL;
+ return match;
}
EXPORT_SYMBOL_GPL(phy_caps_lookup);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index cc1bfd22fb81..73f9cb2e2844 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -543,20 +543,26 @@ static int phy_scan_fixups(struct phy_device *phydev)
return 0;
}
-static int phy_bus_match(struct device *dev, const struct device_driver *drv)
+/**
+ * genphy_match_phy_device - match a PHY device with a PHY driver
+ * @phydev: target phy_device struct
+ * @phydrv: target phy_driver struct
+ *
+ * Description: Checks whether the given PHY device matches the specified
+ * PHY driver. For Clause 45 PHYs, iterates over the available device
+ * identifiers and compares them against the driver's expected PHY ID,
+ * applying the provided mask. For Clause 22 PHYs, a direct ID comparison
+ * is performed.
+ *
+ * Return: 1 if the PHY device matches the driver, 0 otherwise.
+ */
+int genphy_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
- struct phy_device *phydev = to_phy_device(dev);
- const struct phy_driver *phydrv = to_phy_driver(drv);
- const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
- int i;
-
- if (!(phydrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY))
- return 0;
-
- if (phydrv->match_phy_device)
- return phydrv->match_phy_device(phydev);
-
if (phydev->is_c45) {
+ const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
+ int i;
+
for (i = 1; i < num_ids; i++) {
if (phydev->c45_ids.device_ids[i] == 0xffffffff)
continue;
@@ -565,11 +571,27 @@ static int phy_bus_match(struct device *dev, const struct device_driver *drv)
phydrv->phy_id, phydrv->phy_id_mask))
return 1;
}
+
return 0;
- } else {
- return phy_id_compare(phydev->phy_id, phydrv->phy_id,
- phydrv->phy_id_mask);
}
+
+ return phy_id_compare(phydev->phy_id, phydrv->phy_id,
+ phydrv->phy_id_mask);
+}
+EXPORT_SYMBOL_GPL(genphy_match_phy_device);
+
+static int phy_bus_match(struct device *dev, const struct device_driver *drv)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+ const struct phy_driver *phydrv = to_phy_driver(drv);
+
+ if (!(phydrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY))
+ return 0;
+
+ if (phydrv->match_phy_device)
+ return phydrv->match_phy_device(phydev, phydrv);
+
+ return genphy_match_phy_device(phydev, phydrv);
}
static ssize_t
@@ -1727,8 +1749,10 @@ void phy_detach(struct phy_device *phydev)
struct module *ndev_owner = NULL;
struct mii_bus *bus;
- if (phydev->devlink)
+ if (phydev->devlink) {
device_link_del(phydev->devlink);
+ phydev->devlink = NULL;
+ }
if (phydev->sysfs_links) {
if (dev)
@@ -2975,6 +2999,21 @@ int phy_get_tx_amplitude_gain(struct phy_device *phydev, struct device *dev,
}
EXPORT_SYMBOL_GPL(phy_get_tx_amplitude_gain);
+/**
+ * phy_get_mac_termination - stores MAC termination in @val
+ * @phydev: phy_device struct
+ * @dev: pointer to the devices device struct
+ * @val: MAC termination
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int phy_get_mac_termination(struct phy_device *phydev, struct device *dev,
+ u32 *val)
+{
+ return phy_get_u32_property(dev, "mac-termination-ohms", val);
+}
+EXPORT_SYMBOL_GPL(phy_get_mac_termination);
+
static int phy_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
@@ -3236,18 +3275,6 @@ struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode)
EXPORT_SYMBOL(fwnode_phy_find_device);
/**
- * device_phy_find_device - For the given device, get the phy_device
- * @dev: Pointer to the given device
- *
- * Refer return conditions of fwnode_phy_find_device().
- */
-struct phy_device *device_phy_find_device(struct device *dev)
-{
- return fwnode_phy_find_device(dev_fwnode(dev));
-}
-EXPORT_SYMBOL_GPL(device_phy_find_device);
-
-/**
* fwnode_get_phy_node - Get the phy_node using the named reference.
* @fwnode: Pointer to fwnode from which phy_node has to be obtained.
*
@@ -3262,12 +3289,12 @@ struct fwnode_handle *fwnode_get_phy_node(const struct fwnode_handle *fwnode)
/* Only phy-handle is used for ACPI */
phy_node = fwnode_find_reference(fwnode, "phy-handle", 0);
- if (is_acpi_node(fwnode) || !IS_ERR(phy_node))
+ if (!IS_ERR(phy_node) || is_acpi_node(fwnode))
return phy_node;
phy_node = fwnode_find_reference(fwnode, "phy", 0);
- if (IS_ERR(phy_node))
- phy_node = fwnode_find_reference(fwnode, "phy-device", 0);
- return phy_node;
+ if (!IS_ERR(phy_node))
+ return phy_node;
+ return fwnode_find_reference(fwnode, "phy-device", 0);
}
EXPORT_SYMBOL_GPL(fwnode_get_phy_node);
@@ -3554,19 +3581,15 @@ static int __init phy_init(void)
phylib_register_stubs();
rtnl_unlock();
- rc = mdio_bus_init();
- if (rc)
- goto err_ethtool_phy_ops;
-
rc = phy_caps_init();
if (rc)
- goto err_mdio_bus;
+ goto err_ethtool_phy_ops;
features_init();
rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE);
if (rc)
- goto err_mdio_bus;
+ goto err_ethtool_phy_ops;
rc = phy_driver_register(&genphy_driver, THIS_MODULE);
if (rc)
@@ -3576,8 +3599,6 @@ static int __init phy_init(void)
err_c45:
phy_driver_unregister(&genphy_c45_driver);
-err_mdio_bus:
- mdio_bus_exit();
err_ethtool_phy_ops:
rtnl_lock();
phylib_unregister_stubs();
@@ -3591,7 +3612,6 @@ static void __exit phy_exit(void)
{
phy_driver_unregister(&genphy_c45_driver);
phy_driver_unregister(&genphy_driver);
- mdio_bus_exit();
rtnl_lock();
phylib_unregister_stubs();
ethtool_set_ethtool_phy_ops(NULL);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 1bdd5d8bb5b0..0faa3d97e06b 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -24,13 +24,6 @@
#include "sfp.h"
#include "swphy.h"
-#define SUPPORTED_INTERFACES \
- (SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE | \
- SUPPORTED_BNC | SUPPORTED_AUI | SUPPORTED_Backplane)
-#define ADVERTISED_INTERFACES \
- (ADVERTISED_TP | ADVERTISED_MII | ADVERTISED_FIBRE | \
- ADVERTISED_BNC | ADVERTISED_AUI | ADVERTISED_Backplane)
-
enum {
PHYLINK_DISABLE_STOPPED,
PHYLINK_DISABLE_LINK,
diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c
index 893c82479671..c3dcb6257430 100644
--- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/of.h>
#include <linux/phy.h>
+#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/clk.h>
@@ -17,9 +18,15 @@
#include "realtek.h"
-#define RTL821x_PHYSR 0x11
-#define RTL821x_PHYSR_DUPLEX BIT(13)
-#define RTL821x_PHYSR_SPEED GENMASK(15, 14)
+#define RTL8201F_IER 0x13
+
+#define RTL8201F_ISR 0x1e
+#define RTL8201F_ISR_ANERR BIT(15)
+#define RTL8201F_ISR_DUPLEX BIT(13)
+#define RTL8201F_ISR_LINK BIT(11)
+#define RTL8201F_ISR_MASK (RTL8201F_ISR_ANERR | \
+ RTL8201F_ISR_DUPLEX | \
+ RTL8201F_ISR_LINK)
#define RTL821x_INER 0x12
#define RTL8211B_INER_INIT 0x6400
@@ -29,15 +36,48 @@
#define RTL821x_INSR 0x13
#define RTL821x_EXT_PAGE_SELECT 0x1e
+
#define RTL821x_PAGE_SELECT 0x1f
+#define RTL821x_SET_EXT_PAGE 0x07
+
+/* RTL8211E extension page 44/0x2c */
+#define RTL8211E_LEDCR_EXT_PAGE 0x2c
+#define RTL8211E_LEDCR1 0x1a
+#define RTL8211E_LEDCR1_ACT_TXRX BIT(4)
+#define RTL8211E_LEDCR1_MASK BIT(4)
+#define RTL8211E_LEDCR1_SHIFT 1
+
+#define RTL8211E_LEDCR2 0x1c
+#define RTL8211E_LEDCR2_LINK_1000 BIT(2)
+#define RTL8211E_LEDCR2_LINK_100 BIT(1)
+#define RTL8211E_LEDCR2_LINK_10 BIT(0)
+#define RTL8211E_LEDCR2_MASK GENMASK(2, 0)
+#define RTL8211E_LEDCR2_SHIFT 4
+
+/* RTL8211E extension page 164/0xa4 */
+#define RTL8211E_RGMII_EXT_PAGE 0xa4
+#define RTL8211E_RGMII_DELAY 0x1c
+#define RTL8211E_CTRL_DELAY BIT(13)
+#define RTL8211E_TX_DELAY BIT(12)
+#define RTL8211E_RX_DELAY BIT(11)
+#define RTL8211E_DELAY_MASK GENMASK(13, 11)
+/* RTL8211F PHY configuration */
+#define RTL8211F_PHYCR_PAGE 0xa43
#define RTL8211F_PHYCR1 0x18
+#define RTL8211F_ALDPS_PLL_OFF BIT(1)
+#define RTL8211F_ALDPS_ENABLE BIT(2)
+#define RTL8211F_ALDPS_XTAL_OFF BIT(12)
+
#define RTL8211F_PHYCR2 0x19
#define RTL8211F_CLKOUT_EN BIT(0)
#define RTL8211F_PHYCR2_PHY_EEE_ENABLE BIT(5)
+#define RTL8211F_INSR_PAGE 0xa43
#define RTL8211F_INSR 0x1d
+/* RTL8211F LED configuration */
+#define RTL8211F_LEDCR_PAGE 0xd04
#define RTL8211F_LEDCR 0x10
#define RTL8211F_LEDCR_MODE BIT(15)
#define RTL8211F_LEDCR_ACT_TXRX BIT(4)
@@ -47,25 +87,32 @@
#define RTL8211F_LEDCR_MASK GENMASK(4, 0)
#define RTL8211F_LEDCR_SHIFT 5
+/* RTL8211F RGMII configuration */
+#define RTL8211F_RGMII_PAGE 0xd08
+
+#define RTL8211F_TXCR 0x11
#define RTL8211F_TX_DELAY BIT(8)
+
+#define RTL8211F_RXCR 0x15
#define RTL8211F_RX_DELAY BIT(3)
-#define RTL8211F_ALDPS_PLL_OFF BIT(1)
-#define RTL8211F_ALDPS_ENABLE BIT(2)
-#define RTL8211F_ALDPS_XTAL_OFF BIT(12)
+/* RTL8211F WOL interrupt configuration */
+#define RTL8211F_INTBCR_PAGE 0xd40
+#define RTL8211F_INTBCR 0x16
+#define RTL8211F_INTBCR_INTB_PMEB BIT(5)
-#define RTL8211E_CTRL_DELAY BIT(13)
-#define RTL8211E_TX_DELAY BIT(12)
-#define RTL8211E_RX_DELAY BIT(11)
+/* RTL8211F WOL settings */
+#define RTL8211F_WOL_SETTINGS_PAGE 0xd8a
+#define RTL8211F_WOL_SETTINGS_EVENTS 16
+#define RTL8211F_WOL_EVENT_MAGIC BIT(12)
+#define RTL8211F_WOL_SETTINGS_STATUS 17
+#define RTL8211F_WOL_STATUS_RESET (BIT(15) | 0x1fff)
-#define RTL8201F_ISR 0x1e
-#define RTL8201F_ISR_ANERR BIT(15)
-#define RTL8201F_ISR_DUPLEX BIT(13)
-#define RTL8201F_ISR_LINK BIT(11)
-#define RTL8201F_ISR_MASK (RTL8201F_ISR_ANERR | \
- RTL8201F_ISR_DUPLEX | \
- RTL8201F_ISR_LINK)
-#define RTL8201F_IER 0x13
+/* RTL8211F Unique phyiscal and multicast address (WOL) */
+#define RTL8211F_PHYSICAL_ADDR_PAGE 0xd8c
+#define RTL8211F_PHYSICAL_ADDR_WORD0 16
+#define RTL8211F_PHYSICAL_ADDR_WORD1 17
+#define RTL8211F_PHYSICAL_ADDR_WORD2 18
#define RTL822X_VND1_SERDES_OPTION 0x697a
#define RTL822X_VND1_SERDES_OPTION_MODE_MASK GENMASK(5, 0)
@@ -111,8 +158,10 @@
#define RTL_8221B_VB_CG 0x001cc849
#define RTL_8221B_VN_CG 0x001cc84a
#define RTL_8251B 0x001cc862
+#define RTL_8261C 0x001cc890
-#define RTL8211F_LED_COUNT 3
+/* RTL8211E and RTL8211F support up to three LEDs */
+#define RTL8211x_LED_COUNT 3
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
@@ -123,6 +172,7 @@ struct rtl821x_priv {
u16 phycr2;
bool has_phycr2;
struct clk *clk;
+ u32 saved_wolopts;
};
static int rtl821x_read_page(struct phy_device *phydev)
@@ -135,6 +185,36 @@ static int rtl821x_write_page(struct phy_device *phydev, int page)
return __phy_write(phydev, RTL821x_PAGE_SELECT, page);
}
+static int rtl821x_read_ext_page(struct phy_device *phydev, u16 ext_page,
+ u32 regnum)
+{
+ int oldpage, ret = 0;
+
+ oldpage = phy_select_page(phydev, RTL821x_SET_EXT_PAGE);
+ if (oldpage >= 0) {
+ ret = __phy_write(phydev, RTL821x_EXT_PAGE_SELECT, ext_page);
+ if (ret == 0)
+ ret = __phy_read(phydev, regnum);
+ }
+
+ return phy_restore_page(phydev, oldpage, ret);
+}
+
+static int rtl821x_modify_ext_page(struct phy_device *phydev, u16 ext_page,
+ u32 regnum, u16 mask, u16 set)
+{
+ int oldpage, ret = 0;
+
+ oldpage = phy_select_page(phydev, RTL821x_SET_EXT_PAGE);
+ if (oldpage >= 0) {
+ ret = __phy_write(phydev, RTL821x_EXT_PAGE_SELECT, ext_page);
+ if (ret == 0)
+ ret = __phy_modify(phydev, regnum, mask, set);
+ }
+
+ return phy_restore_page(phydev, oldpage, ret);
+}
+
static int rtl821x_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
@@ -151,7 +231,7 @@ static int rtl821x_probe(struct phy_device *phydev)
return dev_err_probe(dev, PTR_ERR(priv->clk),
"failed to get phy clock\n");
- ret = phy_read_paged(phydev, 0xa43, RTL8211F_PHYCR1);
+ ret = phy_read_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR1);
if (ret < 0)
return ret;
@@ -161,7 +241,7 @@ static int rtl821x_probe(struct phy_device *phydev)
priv->has_phycr2 = !(phy_id == RTL_8211FVD_PHYID);
if (priv->has_phycr2) {
- ret = phy_read_paged(phydev, 0xa43, RTL8211F_PHYCR2);
+ ret = phy_read_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR2);
if (ret < 0)
return ret;
@@ -197,7 +277,7 @@ static int rtl8211f_ack_interrupt(struct phy_device *phydev)
{
int err;
- err = phy_read_paged(phydev, 0xa43, RTL8211F_INSR);
+ err = phy_read_paged(phydev, RTL8211F_INSR_PAGE, RTL8211F_INSR);
return (err < 0) ? err : 0;
}
@@ -340,7 +420,7 @@ static irqreturn_t rtl8211f_handle_interrupt(struct phy_device *phydev)
{
int irq_status;
- irq_status = phy_read_paged(phydev, 0xa43, RTL8211F_INSR);
+ irq_status = phy_read_paged(phydev, RTL8211F_INSR_PAGE, RTL8211F_INSR);
if (irq_status < 0) {
phy_error(phydev);
return IRQ_NONE;
@@ -354,6 +434,53 @@ static irqreturn_t rtl8211f_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
+static void rtl8211f_get_wol(struct phy_device *dev, struct ethtool_wolinfo *wol)
+{
+ wol->supported = WAKE_MAGIC;
+ if (phy_read_paged(dev, RTL8211F_WOL_SETTINGS_PAGE, RTL8211F_WOL_SETTINGS_EVENTS)
+ & RTL8211F_WOL_EVENT_MAGIC)
+ wol->wolopts = WAKE_MAGIC;
+}
+
+static int rtl8211f_set_wol(struct phy_device *dev, struct ethtool_wolinfo *wol)
+{
+ const u8 *mac_addr = dev->attached_dev->dev_addr;
+ int oldpage;
+
+ oldpage = phy_save_page(dev);
+ if (oldpage < 0)
+ goto err;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ /* Store the device address for the magic packet */
+ rtl821x_write_page(dev, RTL8211F_PHYSICAL_ADDR_PAGE);
+ __phy_write(dev, RTL8211F_PHYSICAL_ADDR_WORD0, mac_addr[1] << 8 | (mac_addr[0]));
+ __phy_write(dev, RTL8211F_PHYSICAL_ADDR_WORD1, mac_addr[3] << 8 | (mac_addr[2]));
+ __phy_write(dev, RTL8211F_PHYSICAL_ADDR_WORD2, mac_addr[5] << 8 | (mac_addr[4]));
+
+ /* Enable magic packet matching and reset WOL status */
+ rtl821x_write_page(dev, RTL8211F_WOL_SETTINGS_PAGE);
+ __phy_write(dev, RTL8211F_WOL_SETTINGS_EVENTS, RTL8211F_WOL_EVENT_MAGIC);
+ __phy_write(dev, RTL8211F_WOL_SETTINGS_STATUS, RTL8211F_WOL_STATUS_RESET);
+
+ /* Enable the WOL interrupt */
+ rtl821x_write_page(dev, RTL8211F_INTBCR_PAGE);
+ __phy_set_bits(dev, RTL8211F_INTBCR, RTL8211F_INTBCR_INTB_PMEB);
+ } else {
+ /* Disable the WOL interrupt */
+ rtl821x_write_page(dev, RTL8211F_INTBCR_PAGE);
+ __phy_clear_bits(dev, RTL8211F_INTBCR, RTL8211F_INTBCR_INTB_PMEB);
+
+ /* Disable magic packet matching and reset WOL status */
+ rtl821x_write_page(dev, RTL8211F_WOL_SETTINGS_PAGE);
+ __phy_write(dev, RTL8211F_WOL_SETTINGS_EVENTS, 0);
+ __phy_write(dev, RTL8211F_WOL_SETTINGS_STATUS, RTL8211F_WOL_STATUS_RESET);
+ }
+
+err:
+ return phy_restore_page(dev, oldpage, 0);
+}
+
static int rtl8211_config_aneg(struct phy_device *phydev)
{
int ret;
@@ -390,7 +517,7 @@ static int rtl8211f_config_init(struct phy_device *phydev)
u16 val_txdly, val_rxdly;
int ret;
- ret = phy_modify_paged_changed(phydev, 0xa43, RTL8211F_PHYCR1,
+ ret = phy_modify_paged_changed(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR1,
RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF,
priv->phycr1);
if (ret < 0) {
@@ -424,7 +551,8 @@ static int rtl8211f_config_init(struct phy_device *phydev)
return 0;
}
- ret = phy_modify_paged_changed(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY,
+ ret = phy_modify_paged_changed(phydev, RTL8211F_RGMII_PAGE,
+ RTL8211F_TXCR, RTL8211F_TX_DELAY,
val_txdly);
if (ret < 0) {
dev_err(dev, "Failed to update the TX delay register\n");
@@ -439,7 +567,8 @@ static int rtl8211f_config_init(struct phy_device *phydev)
str_enabled_disabled(val_txdly));
}
- ret = phy_modify_paged_changed(phydev, 0xd08, 0x15, RTL8211F_RX_DELAY,
+ ret = phy_modify_paged_changed(phydev, RTL8211F_RGMII_PAGE,
+ RTL8211F_RXCR, RTL8211F_RX_DELAY,
val_rxdly);
if (ret < 0) {
dev_err(dev, "Failed to update the RX delay register\n");
@@ -455,14 +584,15 @@ static int rtl8211f_config_init(struct phy_device *phydev)
}
/* Disable PHY-mode EEE so LPI is passed to the MAC */
- ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
+ ret = phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR2,
RTL8211F_PHYCR2_PHY_EEE_ENABLE, 0);
if (ret)
return ret;
if (priv->has_phycr2) {
- ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
- RTL8211F_CLKOUT_EN, priv->phycr2);
+ ret = phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE,
+ RTL8211F_PHYCR2, RTL8211F_CLKOUT_EN,
+ priv->phycr2);
if (ret < 0) {
dev_err(dev, "clkout configuration failed: %pe\n",
ERR_PTR(ret));
@@ -509,7 +639,7 @@ static int rtl821x_resume(struct phy_device *phydev)
return 0;
}
-static int rtl8211f_led_hw_is_supported(struct phy_device *phydev, u8 index,
+static int rtl8211x_led_hw_is_supported(struct phy_device *phydev, u8 index,
unsigned long rules)
{
const unsigned long mask = BIT(TRIGGER_NETDEV_LINK_10) |
@@ -528,9 +658,11 @@ static int rtl8211f_led_hw_is_supported(struct phy_device *phydev, u8 index,
* rates and Active indication always at all three 10+100+1000
* link rates.
* This code currently uses mode B only.
+ *
+ * RTL8211E PHY LED has one mode, which works like RTL8211F mode B.
*/
- if (index >= RTL8211F_LED_COUNT)
+ if (index >= RTL8211x_LED_COUNT)
return -EINVAL;
/* Filter out any other unsupported triggers. */
@@ -549,7 +681,7 @@ static int rtl8211f_led_hw_control_get(struct phy_device *phydev, u8 index,
{
int val;
- if (index >= RTL8211F_LED_COUNT)
+ if (index >= RTL8211x_LED_COUNT)
return -EINVAL;
val = phy_read_paged(phydev, 0xd04, RTL8211F_LEDCR);
@@ -560,17 +692,17 @@ static int rtl8211f_led_hw_control_get(struct phy_device *phydev, u8 index,
val &= RTL8211F_LEDCR_MASK;
if (val & RTL8211F_LEDCR_LINK_10)
- set_bit(TRIGGER_NETDEV_LINK_10, rules);
+ __set_bit(TRIGGER_NETDEV_LINK_10, rules);
if (val & RTL8211F_LEDCR_LINK_100)
- set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ __set_bit(TRIGGER_NETDEV_LINK_100, rules);
if (val & RTL8211F_LEDCR_LINK_1000)
- set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ __set_bit(TRIGGER_NETDEV_LINK_1000, rules);
if (val & RTL8211F_LEDCR_ACT_TXRX) {
- set_bit(TRIGGER_NETDEV_RX, rules);
- set_bit(TRIGGER_NETDEV_TX, rules);
+ __set_bit(TRIGGER_NETDEV_RX, rules);
+ __set_bit(TRIGGER_NETDEV_TX, rules);
}
return 0;
@@ -582,7 +714,7 @@ static int rtl8211f_led_hw_control_set(struct phy_device *phydev, u8 index,
const u16 mask = RTL8211F_LEDCR_MASK << (RTL8211F_LEDCR_SHIFT * index);
u16 reg = 0;
- if (index >= RTL8211F_LED_COUNT)
+ if (index >= RTL8211x_LED_COUNT)
return -EINVAL;
if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
@@ -605,9 +737,86 @@ static int rtl8211f_led_hw_control_set(struct phy_device *phydev, u8 index,
return phy_modify_paged(phydev, 0xd04, RTL8211F_LEDCR, mask, reg);
}
+static int rtl8211e_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int ret;
+ u16 cr1, cr2;
+
+ if (index >= RTL8211x_LED_COUNT)
+ return -EINVAL;
+
+ ret = rtl821x_read_ext_page(phydev, RTL8211E_LEDCR_EXT_PAGE,
+ RTL8211E_LEDCR1);
+ if (ret < 0)
+ return ret;
+
+ cr1 = ret >> RTL8211E_LEDCR1_SHIFT * index;
+ if (cr1 & RTL8211E_LEDCR1_ACT_TXRX) {
+ __set_bit(TRIGGER_NETDEV_RX, rules);
+ __set_bit(TRIGGER_NETDEV_TX, rules);
+ }
+
+ ret = rtl821x_read_ext_page(phydev, RTL8211E_LEDCR_EXT_PAGE,
+ RTL8211E_LEDCR2);
+ if (ret < 0)
+ return ret;
+
+ cr2 = ret >> RTL8211E_LEDCR2_SHIFT * index;
+ if (cr2 & RTL8211E_LEDCR2_LINK_10)
+ __set_bit(TRIGGER_NETDEV_LINK_10, rules);
+
+ if (cr2 & RTL8211E_LEDCR2_LINK_100)
+ __set_bit(TRIGGER_NETDEV_LINK_100, rules);
+
+ if (cr2 & RTL8211E_LEDCR2_LINK_1000)
+ __set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+
+ return ret;
+}
+
+static int rtl8211e_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ const u16 cr1mask =
+ RTL8211E_LEDCR1_MASK << (RTL8211E_LEDCR1_SHIFT * index);
+ const u16 cr2mask =
+ RTL8211E_LEDCR2_MASK << (RTL8211E_LEDCR2_SHIFT * index);
+ u16 cr1 = 0, cr2 = 0;
+ int ret;
+
+ if (index >= RTL8211x_LED_COUNT)
+ return -EINVAL;
+
+ if (test_bit(TRIGGER_NETDEV_RX, &rules) ||
+ test_bit(TRIGGER_NETDEV_TX, &rules)) {
+ cr1 |= RTL8211E_LEDCR1_ACT_TXRX;
+ }
+
+ cr1 <<= RTL8211E_LEDCR1_SHIFT * index;
+ ret = rtl821x_modify_ext_page(phydev, RTL8211E_LEDCR_EXT_PAGE,
+ RTL8211E_LEDCR1, cr1mask, cr1);
+ if (ret < 0)
+ return ret;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ cr2 |= RTL8211E_LEDCR2_LINK_10;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ cr2 |= RTL8211E_LEDCR2_LINK_100;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ cr2 |= RTL8211E_LEDCR2_LINK_1000;
+
+ cr2 <<= RTL8211E_LEDCR2_SHIFT * index;
+ ret = rtl821x_modify_ext_page(phydev, RTL8211E_LEDCR_EXT_PAGE,
+ RTL8211E_LEDCR2, cr2mask, cr2);
+
+ return ret;
+}
+
static int rtl8211e_config_init(struct phy_device *phydev)
{
- int ret = 0, oldpage;
u16 val;
/* enable TX/RX delay for rgmii-* modes, and disable them for rgmii. */
@@ -637,20 +846,9 @@ static int rtl8211e_config_init(struct phy_device *phydev)
* 12 = RX Delay, 11 = TX Delay
* 10:0 = Test && debug settings reserved by realtek
*/
- oldpage = phy_select_page(phydev, 0x7);
- if (oldpage < 0)
- goto err_restore_page;
-
- ret = __phy_write(phydev, RTL821x_EXT_PAGE_SELECT, 0xa4);
- if (ret)
- goto err_restore_page;
-
- ret = __phy_modify(phydev, 0x1c, RTL8211E_CTRL_DELAY
- | RTL8211E_TX_DELAY | RTL8211E_RX_DELAY,
- val);
-
-err_restore_page:
- return phy_restore_page(phydev, oldpage, ret);
+ return rtl821x_modify_ext_page(phydev, RTL8211E_RGMII_EXT_PAGE,
+ RTL8211E_RGMII_DELAY,
+ RTL8211E_DELAY_MASK, val);
}
static int rtl8211b_suspend(struct phy_device *phydev)
@@ -1117,13 +1315,15 @@ static bool rtlgen_supports_mmd(struct phy_device *phydev)
return val > 0;
}
-static int rtlgen_match_phy_device(struct phy_device *phydev)
+static int rtlgen_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return phydev->phy_id == RTL_GENERIC_PHYID &&
!rtlgen_supports_2_5gbps(phydev);
}
-static int rtl8226_match_phy_device(struct phy_device *phydev)
+static int rtl8226_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return phydev->phy_id == RTL_GENERIC_PHYID &&
rtlgen_supports_2_5gbps(phydev) &&
@@ -1139,32 +1339,38 @@ static int rtlgen_is_c45_match(struct phy_device *phydev, unsigned int id,
return !is_c45 && (id == phydev->phy_id);
}
-static int rtl8221b_match_phy_device(struct phy_device *phydev)
+static int rtl8221b_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return phydev->phy_id == RTL_8221B && rtlgen_supports_mmd(phydev);
}
-static int rtl8221b_vb_cg_c22_match_phy_device(struct phy_device *phydev)
+static int rtl8221b_vb_cg_c22_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, false);
}
-static int rtl8221b_vb_cg_c45_match_phy_device(struct phy_device *phydev)
+static int rtl8221b_vb_cg_c45_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, true);
}
-static int rtl8221b_vn_cg_c22_match_phy_device(struct phy_device *phydev)
+static int rtl8221b_vn_cg_c22_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, false);
}
-static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev)
+static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true);
}
-static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev)
+static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
if (phydev->is_c45)
return false;
@@ -1173,6 +1379,7 @@ static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev)
case RTL_GENERIC_PHYID:
case RTL_8221B:
case RTL_8251B:
+ case RTL_8261C:
case 0x001cc841:
break;
default:
@@ -1182,7 +1389,8 @@ static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev)
return rtlgen_supports_2_5gbps(phydev) && !rtlgen_supports_mmd(phydev);
}
-static int rtl8251b_c45_match_phy_device(struct phy_device *phydev)
+static int rtl8251b_c45_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return rtlgen_is_c45_match(phydev, RTL_8251B, true);
}
@@ -1392,6 +1600,9 @@ static struct phy_driver realtek_drvs[] = {
.resume = genphy_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
+ .led_hw_is_supported = rtl8211x_led_hw_is_supported,
+ .led_hw_control_get = rtl8211e_led_hw_control_get,
+ .led_hw_control_set = rtl8211e_led_hw_control_set,
}, {
PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
@@ -1400,12 +1611,14 @@ static struct phy_driver realtek_drvs[] = {
.read_status = rtlgen_read_status,
.config_intr = &rtl8211f_config_intr,
.handle_interrupt = rtl8211f_handle_interrupt,
+ .set_wol = rtl8211f_set_wol,
+ .get_wol = rtl8211f_get_wol,
.suspend = rtl821x_suspend,
.resume = rtl821x_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
.flags = PHY_ALWAYS_CALL_SUSPEND,
- .led_hw_is_supported = rtl8211f_led_hw_is_supported,
+ .led_hw_is_supported = rtl8211x_led_hw_is_supported,
.led_hw_control_get = rtl8211f_led_hw_control_get,
.led_hw_control_set = rtl8211f_led_hw_control_set,
}, {
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index 752d4bf7bb99..46c5ff7d7b56 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -67,7 +67,8 @@ static int teranetics_read_status(struct phy_device *phydev)
return 0;
}
-static int teranetics_match_phy_device(struct phy_device *phydev)
+static int teranetics_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020;
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 53463767cc43..def84e87e05b 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1131,6 +1131,8 @@ static const struct file_operations ppp_device_fops = {
.llseek = noop_llseek,
};
+static void ppp_nl_dellink(struct net_device *dev, struct list_head *head);
+
static __net_init int ppp_init_net(struct net *net)
{
struct ppp_net *pn = net_generic(net, ppp_net_id);
@@ -1146,28 +1148,20 @@ static __net_init int ppp_init_net(struct net *net)
return 0;
}
-static __net_exit void ppp_exit_net(struct net *net)
+static __net_exit void ppp_exit_rtnl_net(struct net *net,
+ struct list_head *dev_to_kill)
{
struct ppp_net *pn = net_generic(net, ppp_net_id);
- struct net_device *dev;
- struct net_device *aux;
struct ppp *ppp;
- LIST_HEAD(list);
int id;
- rtnl_lock();
- for_each_netdev_safe(net, dev, aux) {
- if (dev->netdev_ops == &ppp_netdev_ops)
- unregister_netdevice_queue(dev, &list);
- }
-
idr_for_each_entry(&pn->units_idr, ppp, id)
- /* Skip devices already unregistered by previous loop */
- if (!net_eq(dev_net(ppp->dev), net))
- unregister_netdevice_queue(ppp->dev, &list);
+ ppp_nl_dellink(ppp->dev, dev_to_kill);
+}
- unregister_netdevice_many(&list);
- rtnl_unlock();
+static __net_exit void ppp_exit_net(struct net *net)
+{
+ struct ppp_net *pn = net_generic(net, ppp_net_id);
mutex_destroy(&pn->all_ppp_mutex);
idr_destroy(&pn->units_idr);
@@ -1177,6 +1171,7 @@ static __net_exit void ppp_exit_net(struct net *net)
static struct pernet_operations ppp_net_ops = {
.init = ppp_init_net,
+ .exit_rtnl = ppp_exit_rtnl_net,
.exit = ppp_exit_net,
.id = &ppp_net_id,
.size = sizeof(struct ppp_net),
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 3cfa17cd5073..c889fb374703 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -1378,7 +1378,7 @@ module_exit(slip_exit);
static void sl_outfill(struct timer_list *t)
{
- struct slip *sl = from_timer(sl, t, outfill_timer);
+ struct slip *sl = timer_container_of(sl, t, outfill_timer);
spin_lock(&sl->lock);
@@ -1409,7 +1409,7 @@ out:
static void sl_keepalive(struct timer_list *t)
{
- struct slip *sl = from_timer(sl, t, keepalive_timer);
+ struct slip *sl = timer_container_of(sl, t, keepalive_timer);
spin_lock(&sl->lock);
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index d4ece538f1b2..bdf0788d8e66 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -923,7 +923,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
unsigned int __user *up = argp;
unsigned short u;
int __user *sp = argp;
- struct sockaddr sa;
+ struct sockaddr_storage ss;
int s;
int ret;
@@ -1000,16 +1000,17 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
return -ENOLINK;
}
ret = 0;
- dev_get_mac_address(&sa, dev_net(tap->dev), tap->dev->name);
+ dev_get_mac_address((struct sockaddr *)&ss, dev_net(tap->dev),
+ tap->dev->name);
if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
- copy_to_user(&ifr->ifr_hwaddr, &sa, sizeof(sa)))
+ copy_to_user(&ifr->ifr_hwaddr, &ss, sizeof(ifr->ifr_hwaddr)))
ret = -EFAULT;
tap_put_tap_dev(tap);
rtnl_unlock();
return ret;
case SIOCSIFHWADDR:
- if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
+ if (copy_from_user(&ss, &ifr->ifr_hwaddr, sizeof(ifr->ifr_hwaddr)))
return -EFAULT;
rtnl_lock();
tap = tap_get_tap_dev(q);
@@ -1017,7 +1018,10 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
rtnl_unlock();
return -ENOLINK;
}
- ret = dev_set_mac_address_user(tap->dev, &sa, NULL);
+ if (tap->dev->addr_len > sizeof(ifr->ifr_hwaddr))
+ ret = -EINVAL;
+ else
+ ret = dev_set_mac_address_user(tap->dev, &ss, NULL);
tap_put_tap_dev(tap);
rtnl_unlock();
return ret;
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index d8fc0c79745d..8bc56186b2a3 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -55,7 +55,7 @@ static int __set_port_dev_addr(struct net_device *port_dev,
memcpy(addr.__data, dev_addr, port_dev->addr_len);
addr.ss_family = port_dev->type;
- return dev_set_mac_address(port_dev, (struct sockaddr *)&addr, NULL);
+ return dev_set_mac_address(port_dev, &addr, NULL);
}
static int team_port_set_orig_dev_addr(struct team_port *port)
@@ -1778,8 +1778,8 @@ static void team_change_rx_flags(struct net_device *dev, int change)
struct team_port *port;
int inc;
- rcu_read_lock();
- list_for_each_entry_rcu(port, &team->port_list, list) {
+ mutex_lock(&team->lock);
+ list_for_each_entry(port, &team->port_list, list) {
if (change & IFF_PROMISC) {
inc = dev->flags & IFF_PROMISC ? 1 : -1;
dev_set_promiscuity(port->dev, inc);
@@ -1789,7 +1789,7 @@ static void team_change_rx_flags(struct net_device *dev, int change)
dev_set_allmulti(port->dev, inc);
}
}
- rcu_read_unlock();
+ mutex_unlock(&team->lock);
}
static void team_set_rx_mode(struct net_device *dev)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7babd1e9a378..f8c5e2fd04df 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -377,7 +377,7 @@ static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
static void tun_flow_cleanup(struct timer_list *t)
{
- struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
+ struct tun_struct *tun = timer_container_of(tun, t, flow_gc_timer);
unsigned long delay = tun->ageing_time;
unsigned long next_timer = jiffies + delay;
unsigned long count = 0;
@@ -3193,7 +3193,13 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
case SIOCSIFHWADDR:
/* Set hw address */
- ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
+ if (tun->dev->addr_len > sizeof(ifr.ifr_hwaddr)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = dev_set_mac_address_user(tun->dev,
+ (struct sockaddr_storage *)&ifr.ifr_hwaddr,
+ NULL);
break;
case TUNGETSNDBUF:
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3c360d4f0635..370b32fc2588 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -101,9 +101,7 @@ config USB_RTL8152
select MII
select PHYLIB
select CRC32
- select CRYPTO
- select CRYPTO_HASH
- select CRYPTO_SHA256
+ select CRYPTO_LIB_SHA256
help
This option adds support for Realtek RTL8152 based USB 2.0
10/100 Ethernet adapters and RTL8153 based USB 3.0 10/100/1000
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index ff5be2cbf17b..9201ee10a13f 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -30,11 +30,14 @@ static int aqc111_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
ret = usbnet_read_cmd_nopm(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, value, index, data, size);
- if (unlikely(ret < 0))
+ if (unlikely(ret < size)) {
netdev_warn(dev->net,
"Failed to read(0x%x) reg index 0x%04x: %d\n",
cmd, index, ret);
+ ret = ret < 0 ? ret : -ENODATA;
+ }
+
return ret;
}
@@ -46,11 +49,14 @@ static int aqc111_read_cmd(struct usbnet *dev, u8 cmd, u16 value,
ret = usbnet_read_cmd(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, value, index, data, size);
- if (unlikely(ret < 0))
+ if (unlikely(ret < size)) {
netdev_warn(dev->net,
"Failed to read(0x%x) reg index 0x%04x: %d\n",
cmd, index, ret);
+ ret = ret < 0 ? ret : -ENODATA;
+ }
+
return ret;
}
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 74162190bccc..8531b804021a 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -224,7 +224,6 @@ int asix_write_rx_ctl(struct usbnet *dev, u16 mode, int in_pm);
u16 asix_read_medium_status(struct usbnet *dev, int in_pm);
int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm);
-void asix_adjust_link(struct net_device *netdev);
int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 72ffc89b477a..7fd763917ae2 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -414,28 +414,6 @@ int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm)
return ret;
}
-/* set MAC link settings according to information from phylib */
-void asix_adjust_link(struct net_device *netdev)
-{
- struct phy_device *phydev = netdev->phydev;
- struct usbnet *dev = netdev_priv(netdev);
- u16 mode = 0;
-
- if (phydev->link) {
- mode = AX88772_MEDIUM_DEFAULT;
-
- if (phydev->duplex == DUPLEX_HALF)
- mode &= ~AX_MEDIUM_FD;
-
- if (phydev->speed != SPEED_100)
- mode &= ~AX_MEDIUM_PS;
- }
-
- asix_write_medium_mode(dev, mode, 0);
- phy_print_status(phydev);
- usbnet_link_change(dev, phydev->link, 0);
-}
-
int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm)
{
int ret;
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index da24941a6e44..9b0318fb50b5 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -752,7 +752,6 @@ static void ax88772_mac_link_down(struct phylink_config *config,
struct usbnet *dev = netdev_priv(to_net_dev(config->dev));
asix_write_medium_mode(dev, 0, 0);
- usbnet_link_change(dev, false, false);
}
static void ax88772_mac_link_up(struct phylink_config *config,
@@ -783,7 +782,6 @@ static void ax88772_mac_link_up(struct phylink_config *config,
m |= AX_MEDIUM_RFC;
asix_write_medium_mode(dev, m, 0);
- usbnet_link_change(dev, true, false);
}
static const struct phylink_mac_ops ax88772_phylink_mac_ops = {
@@ -1350,10 +1348,9 @@ static const struct driver_info ax88772_info = {
.description = "ASIX AX88772 USB 2.0 Ethernet",
.bind = ax88772_bind,
.unbind = ax88772_unbind,
- .status = asix_status,
.reset = ax88772_reset,
.stop = ax88772_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
};
@@ -1362,11 +1359,9 @@ static const struct driver_info ax88772b_info = {
.description = "ASIX AX88772B USB 2.0 Ethernet",
.bind = ax88772_bind,
.unbind = ax88772_unbind,
- .status = asix_status,
.reset = ax88772_reset,
.stop = ax88772_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
- FLAG_MULTI_PACKET,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
.data = FLAG_EEPROM_MAC,
@@ -1376,11 +1371,9 @@ static const struct driver_info lxausb_t1l_info = {
.description = "Linux Automation GmbH USB 10Base-T1L",
.bind = ax88772_bind,
.unbind = ax88772_unbind,
- .status = asix_status,
.reset = ax88772_reset,
.stop = ax88772_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
- FLAG_MULTI_PACKET,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
.data = FLAG_EEPROM_MAC,
@@ -1412,10 +1405,8 @@ static const struct driver_info hg20f9_info = {
.description = "HG20F9 USB 2.0 Ethernet",
.bind = ax88772_bind,
.unbind = ax88772_unbind,
- .status = asix_status,
.reset = ax88772_reset,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
- FLAG_MULTI_PACKET,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
.data = FLAG_EEPROM_MAC,
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index fc5e441aa7c3..6759388692f8 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -602,7 +602,7 @@ static void catc_stats_done(struct catc *catc, struct ctrl_queue *q)
static void catc_stats_timer(struct timer_list *t)
{
- struct catc *catc = from_timer(catc, t, timer);
+ struct catc *catc = timer_container_of(catc, t, timer);
int i;
for (i = 0; i < 8; i++)
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index f69d9b902da0..a206ffa76f1b 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -178,6 +178,7 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(netdev);
unsigned char buff[2];
+ int ret;
netdev_dbg(netdev, "%s phy_id:%02x loc:%02x\n",
__func__, phy_id, loc);
@@ -185,8 +186,10 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
if (phy_id != 0)
return -ENODEV;
- control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
- CONTROL_TIMEOUT_MS);
+ ret = control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
+ CONTROL_TIMEOUT_MS);
+ if (ret < 0)
+ return ret;
return (buff[0] | buff[1] << 8);
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index e4f1663b6204..f53e255116ea 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1554,10 +1554,12 @@ static void lan78xx_set_multicast(struct net_device *netdev)
schedule_work(&pdata->set_multicast);
}
+static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
+ bool tx_pause, bool rx_pause);
+
static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
u16 lcladv, u16 rmtadv)
{
- u32 flow = 0, fct_flow = 0;
u8 cap;
if (dev->fc_autoneg)
@@ -1565,27 +1567,13 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
else
cap = dev->fc_request_control;
- if (cap & FLOW_CTRL_TX)
- flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
-
- if (cap & FLOW_CTRL_RX)
- flow |= FLOW_CR_RX_FCEN_;
-
- if (dev->udev->speed == USB_SPEED_SUPER)
- fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
- else if (dev->udev->speed == USB_SPEED_HIGH)
- fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
-
netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
(cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
(cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
- lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
-
- /* threshold value should be set before enabling flow */
- lan78xx_write_reg(dev, FLOW, flow);
-
- return 0;
+ return lan78xx_configure_flowcontrol(dev,
+ cap & FLOW_CTRL_TX,
+ cap & FLOW_CTRL_RX);
}
static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
@@ -1636,15 +1624,30 @@ exit_unlock:
return ret;
}
+/**
+ * lan78xx_phy_int_ack - Acknowledge PHY interrupt
+ * @dev: pointer to the LAN78xx device structure
+ *
+ * This function acknowledges the PHY interrupt by setting the
+ * INT_STS_PHY_INT_ bit in the interrupt status register (INT_STS).
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int lan78xx_phy_int_ack(struct lan78xx_net *dev)
+{
+ return lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
+}
+
+static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed);
+
static int lan78xx_link_reset(struct lan78xx_net *dev)
{
struct phy_device *phydev = dev->net->phydev;
struct ethtool_link_ksettings ecmd;
int ladv, radv, ret, link;
- u32 buf;
/* clear LAN78xx interrupt status */
- ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
+ ret = lan78xx_phy_int_ack(dev);
if (unlikely(ret < 0))
return ret;
@@ -1667,36 +1670,9 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
phy_ethtool_ksettings_get(phydev, &ecmd);
- if (dev->udev->speed == USB_SPEED_SUPER) {
- if (ecmd.base.speed == 1000) {
- /* disable U2 */
- ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
- if (ret < 0)
- return ret;
- buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
- ret = lan78xx_write_reg(dev, USB_CFG1, buf);
- if (ret < 0)
- return ret;
- /* enable U1 */
- ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
- if (ret < 0)
- return ret;
- buf |= USB_CFG1_DEV_U1_INIT_EN_;
- ret = lan78xx_write_reg(dev, USB_CFG1, buf);
- if (ret < 0)
- return ret;
- } else {
- /* enable U1 & U2 */
- ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
- if (ret < 0)
- return ret;
- buf |= USB_CFG1_DEV_U2_INIT_EN_;
- buf |= USB_CFG1_DEV_U1_INIT_EN_;
- ret = lan78xx_write_reg(dev, USB_CFG1, buf);
- if (ret < 0)
- return ret;
- }
- }
+ ret = lan78xx_configure_usb(dev, ecmd.base.speed);
+ if (ret < 0)
+ return ret;
ladv = phy_read(phydev, MII_ADVERTISE);
if (ladv < 0)
@@ -2456,14 +2432,11 @@ static struct irq_chip lan78xx_irqchip = {
static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
{
- struct device_node *of_node;
struct irq_domain *irqdomain;
unsigned int irqmap = 0;
u32 buf;
int ret = 0;
- of_node = dev->udev->dev.parent->of_node;
-
mutex_init(&dev->domain_data.irq_lock);
ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
@@ -2475,8 +2448,10 @@ static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
dev->domain_data.irqchip = &lan78xx_irqchip;
dev->domain_data.irq_handler = handle_simple_irq;
- irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
- &chip_domain_ops, &dev->domain_data);
+ irqdomain = irq_domain_create_simple(of_fwnode_handle(dev->udev->dev.parent->of_node),
+ MAX_INT_EP, 0,
+ &chip_domain_ops,
+ &dev->domain_data);
if (irqdomain) {
/* create mapping for PHY interrupt */
irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
@@ -2508,48 +2483,323 @@ static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
dev->domain_data.irqdomain = NULL;
}
-static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
+/**
+ * lan78xx_configure_usb - Configure USB link power settings
+ * @dev: pointer to the LAN78xx device structure
+ * @speed: negotiated Ethernet link speed (in Mbps)
+ *
+ * This function configures U1/U2 link power management for SuperSpeed
+ * USB devices based on the current Ethernet link speed. It uses the
+ * USB_CFG1 register to enable or disable U1 and U2 low-power states.
+ *
+ * Note: Only LAN7800 and LAN7801 support SuperSpeed (USB 3.x).
+ * LAN7850 is a High-Speed-only (USB 2.0) device and is skipped.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed)
{
- u32 buf;
+ u32 mask, val;
int ret;
+
+ /* Only configure USB settings for SuperSpeed devices */
+ if (dev->udev->speed != USB_SPEED_SUPER)
+ return 0;
+
+ /* LAN7850 does not support USB 3.x */
+ if (dev->chipid == ID_REV_CHIP_ID_7850_) {
+ netdev_warn_once(dev->net, "Unexpected SuperSpeed for LAN7850 (USB 2.0 only)\n");
+ return 0;
+ }
+
+ switch (speed) {
+ case SPEED_1000:
+ /* Disable U2, enable U1 */
+ ret = lan78xx_update_reg(dev, USB_CFG1,
+ USB_CFG1_DEV_U2_INIT_EN_, 0);
+ if (ret < 0)
+ return ret;
+
+ return lan78xx_update_reg(dev, USB_CFG1,
+ USB_CFG1_DEV_U1_INIT_EN_,
+ USB_CFG1_DEV_U1_INIT_EN_);
+
+ case SPEED_100:
+ case SPEED_10:
+ /* Enable both U1 and U2 */
+ mask = USB_CFG1_DEV_U1_INIT_EN_ | USB_CFG1_DEV_U2_INIT_EN_;
+ val = mask;
+ return lan78xx_update_reg(dev, USB_CFG1, mask, val);
+
+ default:
+ netdev_warn(dev->net, "Unsupported link speed: %d\n", speed);
+ return -EINVAL;
+ }
+}
+
+/**
+ * lan78xx_configure_flowcontrol - Set MAC and FIFO flow control configuration
+ * @dev: pointer to the LAN78xx device structure
+ * @tx_pause: enable transmission of pause frames
+ * @rx_pause: enable reception of pause frames
+ *
+ * This function configures the LAN78xx flow control settings by writing
+ * to the FLOW and FCT_FLOW registers. The pause time is set to the
+ * maximum allowed value (65535 quanta). FIFO thresholds are selected
+ * based on USB speed.
+ *
+ * The Pause Time field is measured in units of 512-bit times (quanta):
+ * - At 1 Gbps: 1 quanta = 512 ns → max ~33.6 ms pause
+ * - At 100 Mbps: 1 quanta = 5.12 µs → max ~335 ms pause
+ * - At 10 Mbps: 1 quanta = 51.2 µs → max ~3.3 s pause
+ *
+ * Flow control thresholds (FCT_FLOW) are used to trigger pause/resume:
+ * - RXUSED is the number of bytes used in the RX FIFO
+ * - Flow is turned ON when RXUSED ≥ FLOW_ON threshold
+ * - Flow is turned OFF when RXUSED ≤ FLOW_OFF threshold
+ * - Both thresholds are encoded in units of 512 bytes (rounded up)
+ *
+ * Thresholds differ by USB speed because available USB bandwidth
+ * affects how fast packets can be drained from the RX FIFO:
+ * - USB 3.x (SuperSpeed):
+ * FLOW_ON = 9216 bytes → 18 units
+ * FLOW_OFF = 4096 bytes → 8 units
+ * - USB 2.0 (High-Speed):
+ * FLOW_ON = 8704 bytes → 17 units
+ * FLOW_OFF = 1024 bytes → 2 units
+ *
+ * Note: The FCT_FLOW register must be configured before enabling TX pause
+ * (i.e., before setting FLOW_CR_TX_FCEN_), as required by the hardware.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
+ bool tx_pause, bool rx_pause)
+{
+ /* Use maximum pause time: 65535 quanta (512-bit times) */
+ const u32 pause_time_quanta = 65535;
+ u32 fct_flow = 0;
+ u32 flow = 0;
+ int ret;
+
+ /* Prepare MAC flow control bits */
+ if (tx_pause)
+ flow |= FLOW_CR_TX_FCEN_ | pause_time_quanta;
+
+ if (rx_pause)
+ flow |= FLOW_CR_RX_FCEN_;
+
+ /* Select RX FIFO thresholds based on USB speed
+ *
+ * FCT_FLOW layout:
+ * bits [6:0] FLOW_ON threshold (RXUSED ≥ ON → assert pause)
+ * bits [14:8] FLOW_OFF threshold (RXUSED ≤ OFF → deassert pause)
+ * thresholds are expressed in units of 512 bytes
+ */
+ switch (dev->udev->speed) {
+ case USB_SPEED_SUPER:
+ fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
+ break;
+ case USB_SPEED_HIGH:
+ fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
+ break;
+ default:
+ netdev_warn(dev->net, "Unsupported USB speed: %d\n",
+ dev->udev->speed);
+ return -EINVAL;
+ }
+
+ /* Step 1: Write FIFO thresholds before enabling pause frames */
+ ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
+ if (ret < 0)
+ return ret;
+
+ /* Step 2: Enable MAC pause functionality */
+ return lan78xx_write_reg(dev, FLOW, flow);
+}
+
+/**
+ * lan78xx_register_fixed_phy() - Register a fallback fixed PHY
+ * @dev: LAN78xx device
+ *
+ * Registers a fixed PHY with 1 Gbps full duplex. This is used in special cases
+ * like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface to a
+ * switch without a visible PHY.
+ *
+ * Return: pointer to the registered fixed PHY, or ERR_PTR() on error.
+ */
+static struct phy_device *lan78xx_register_fixed_phy(struct lan78xx_net *dev)
+{
struct fixed_phy_status fphy_status = {
.link = 1,
.speed = SPEED_1000,
.duplex = DUPLEX_FULL,
};
+
+ netdev_info(dev->net,
+ "No PHY found on LAN7801 – registering fixed PHY (e.g. EVB-KSZ9897-1)\n");
+
+ return fixed_phy_register(&fphy_status, NULL);
+}
+
+/**
+ * lan78xx_get_phy() - Probe or register PHY device and set interface mode
+ * @dev: LAN78xx device structure
+ *
+ * This function attempts to find a PHY on the MDIO bus. If no PHY is found
+ * and the chip is LAN7801, it registers a fixed PHY as fallback. It also
+ * sets dev->interface based on chip ID and detected PHY type.
+ *
+ * Return: a valid PHY device pointer, or ERR_PTR() on failure.
+ */
+static struct phy_device *lan78xx_get_phy(struct lan78xx_net *dev)
+{
struct phy_device *phydev;
+ /* Attempt to locate a PHY on the MDIO bus */
phydev = phy_find_first(dev->mdiobus);
- if (!phydev) {
- netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
- phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
- if (IS_ERR(phydev)) {
- netdev_err(dev->net, "No PHY/fixed_PHY found\n");
- return NULL;
+
+ switch (dev->chipid) {
+ case ID_REV_CHIP_ID_7801_:
+ if (phydev) {
+ /* External RGMII PHY detected */
+ dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
+ phydev->is_internal = false;
+
+ if (!phydev->drv)
+ netdev_warn(dev->net,
+ "PHY driver not found – assuming RGMII delays are on PCB or strapped for the PHY\n");
+
+ return phydev;
}
- netdev_dbg(dev->net, "Registered FIXED PHY\n");
+
dev->interface = PHY_INTERFACE_MODE_RGMII;
+ /* No PHY found – fallback to fixed PHY (e.g. KSZ switch board) */
+ return lan78xx_register_fixed_phy(dev);
+
+ case ID_REV_CHIP_ID_7800_:
+ case ID_REV_CHIP_ID_7850_:
+ if (!phydev)
+ return ERR_PTR(-ENODEV);
+
+ /* These use internal GMII-connected PHY */
+ dev->interface = PHY_INTERFACE_MODE_GMII;
+ phydev->is_internal = true;
+ return phydev;
+
+ default:
+ netdev_err(dev->net, "Unknown CHIP ID: 0x%08x\n", dev->chipid);
+ return ERR_PTR(-ENODEV);
+ }
+}
+
+/**
+ * lan78xx_mac_prepare_for_phy() - Preconfigure MAC-side interface settings
+ * @dev: LAN78xx device
+ *
+ * Configure MAC-side registers according to dev->interface, which should be
+ * set by lan78xx_get_phy().
+ *
+ * - For PHY_INTERFACE_MODE_RGMII:
+ * Enable MAC-side TXC delay. This mode seems to be used in a special setup
+ * without a real PHY, likely on EVB-KSZ9897-1. In that design, LAN7801 is
+ * connected to the KSZ9897 switch, and the link timing is expected to be
+ * hardwired (e.g. via strapping or board layout). No devicetree support is
+ * assumed here.
+ *
+ * - For PHY_INTERFACE_MODE_RGMII_ID:
+ * Disable MAC-side delay and rely on the PHY driver to provide delay.
+ *
+ * - For GMII, no MAC-specific config is needed.
+ *
+ * Return: 0 on success or a negative error code.
+ */
+static int lan78xx_mac_prepare_for_phy(struct lan78xx_net *dev)
+{
+ int ret;
+
+ switch (dev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ /* Enable MAC-side TX clock delay */
ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
MAC_RGMII_ID_TXC_DELAY_EN_);
+ if (ret < 0)
+ return ret;
+
ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
- ret = lan78xx_read_reg(dev, HW_CFG, &buf);
- buf |= HW_CFG_CLK125_EN_;
- buf |= HW_CFG_REFCLK25_EN_;
- ret = lan78xx_write_reg(dev, HW_CFG, buf);
- } else {
- if (!phydev->drv) {
- netdev_err(dev->net, "no PHY driver found\n");
- return NULL;
- }
- dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
- /* The PHY driver is responsible to configure proper RGMII
- * interface delays. Disable RGMII delays on MAC side.
- */
- lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_update_reg(dev, HW_CFG,
+ HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_,
+ HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_);
+ if (ret < 0)
+ return ret;
+
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ /* Disable MAC-side TXC delay, PHY provides it */
+ ret = lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
+ if (ret < 0)
+ return ret;
+
+ break;
- phydev->is_internal = false;
+ case PHY_INTERFACE_MODE_GMII:
+ /* No MAC-specific configuration required */
+ break;
+
+ default:
+ netdev_warn(dev->net, "Unsupported interface mode: %d\n",
+ dev->interface);
+ break;
}
- return phydev;
+
+ return 0;
+}
+
+/**
+ * lan78xx_configure_leds_from_dt() - Configure LED enables based on DT
+ * @dev: LAN78xx device
+ * @phydev: PHY device (must be valid)
+ *
+ * Reads "microchip,led-modes" property from the PHY's DT node and enables
+ * the corresponding number of LEDs by writing to HW_CFG.
+ *
+ * This helper preserves the original logic, enabling up to 4 LEDs.
+ * If the property is not present, this function does nothing.
+ *
+ * Return: 0 on success or a negative error code.
+ */
+static int lan78xx_configure_leds_from_dt(struct lan78xx_net *dev,
+ struct phy_device *phydev)
+{
+ struct device_node *np = phydev->mdio.dev.of_node;
+ u32 reg;
+ int len, ret;
+
+ if (!np)
+ return 0;
+
+ len = of_property_count_elems_of_size(np, "microchip,led-modes",
+ sizeof(u32));
+ if (len < 0)
+ return 0;
+
+ ret = lan78xx_read_reg(dev, HW_CFG, &reg);
+ if (ret < 0)
+ return ret;
+
+ reg &= ~(HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_ |
+ HW_CFG_LED2_EN_ | HW_CFG_LED3_EN_);
+
+ reg |= (len > 0) * HW_CFG_LED0_EN_ |
+ (len > 1) * HW_CFG_LED1_EN_ |
+ (len > 2) * HW_CFG_LED2_EN_ |
+ (len > 3) * HW_CFG_LED3_EN_;
+
+ return lan78xx_write_reg(dev, HW_CFG, reg);
}
static int lan78xx_phy_init(struct lan78xx_net *dev)
@@ -2559,30 +2809,13 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
u32 mii_adv;
struct phy_device *phydev;
- switch (dev->chipid) {
- case ID_REV_CHIP_ID_7801_:
- phydev = lan7801_phy_init(dev);
- if (!phydev) {
- netdev_err(dev->net, "lan7801: PHY Init Failed");
- return -EIO;
- }
- break;
-
- case ID_REV_CHIP_ID_7800_:
- case ID_REV_CHIP_ID_7850_:
- phydev = phy_find_first(dev->mdiobus);
- if (!phydev) {
- netdev_err(dev->net, "no PHY found\n");
- return -EIO;
- }
- phydev->is_internal = true;
- dev->interface = PHY_INTERFACE_MODE_GMII;
- break;
+ phydev = lan78xx_get_phy(dev);
+ if (IS_ERR(phydev))
+ return PTR_ERR(phydev);
- default:
- netdev_err(dev->net, "Unknown CHIP ID found\n");
- return -EIO;
- }
+ ret = lan78xx_mac_prepare_for_phy(dev);
+ if (ret < 0)
+ goto free_phy;
/* if phyirq is not set, use polling mode in phylib */
if (dev->domain_data.phyirq > 0)
@@ -2624,33 +2857,23 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
phy_support_eee(phydev);
- if (phydev->mdio.dev.of_node) {
- u32 reg;
- int len;
-
- len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
- "microchip,led-modes",
- sizeof(u32));
- if (len >= 0) {
- /* Ensure the appropriate LEDs are enabled */
- lan78xx_read_reg(dev, HW_CFG, &reg);
- reg &= ~(HW_CFG_LED0_EN_ |
- HW_CFG_LED1_EN_ |
- HW_CFG_LED2_EN_ |
- HW_CFG_LED3_EN_);
- reg |= (len > 0) * HW_CFG_LED0_EN_ |
- (len > 1) * HW_CFG_LED1_EN_ |
- (len > 2) * HW_CFG_LED2_EN_ |
- (len > 3) * HW_CFG_LED3_EN_;
- lan78xx_write_reg(dev, HW_CFG, reg);
- }
- }
+ ret = lan78xx_configure_leds_from_dt(dev, phydev);
+ if (ret)
+ goto free_phy;
genphy_config_aneg(phydev);
dev->fc_autoneg = phydev->autoneg;
return 0;
+
+free_phy:
+ if (phy_is_pseudo_fixed_link(phydev)) {
+ fixed_phy_unregister(phydev);
+ phy_device_free(phydev);
+ }
+
+ return ret;
}
static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
@@ -4419,7 +4642,7 @@ static const struct net_device_ops lan78xx_netdev_ops = {
static void lan78xx_stat_monitor(struct timer_list *t)
{
- struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
+ struct lan78xx_net *dev = timer_container_of(dev, t, stat_monitor);
lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2cab046749a9..44cba7acfe7d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -26,7 +26,7 @@
#include <linux/atomic.h>
#include <linux/acpi.h>
#include <linux/firmware.h>
-#include <crypto/hash.h>
+#include <crypto/sha2.h>
#include <linux/usb/r8152.h>
#include <net/gso.h>
@@ -1665,14 +1665,14 @@ static int
rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex,
u32 advertising);
-static int __rtl8152_set_mac_address(struct net_device *netdev, void *p,
+static int __rtl8152_set_mac_address(struct net_device *netdev,
+ struct sockaddr_storage *addr,
bool in_resume)
{
struct r8152 *tp = netdev_priv(netdev);
- struct sockaddr *addr = p;
int ret = -EADDRNOTAVAIL;
- if (!is_valid_ether_addr(addr->sa_data))
+ if (!is_valid_ether_addr(addr->__data))
goto out1;
if (!in_resume) {
@@ -1683,10 +1683,10 @@ static int __rtl8152_set_mac_address(struct net_device *netdev, void *p,
mutex_lock(&tp->control);
- eth_hw_addr_set(netdev, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->__data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
- pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data);
+ pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->__data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
mutex_unlock(&tp->control);
@@ -1706,7 +1706,8 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
* host system provided MAC address.
* Examples of this are Dell TB15 and Dell WD15 docks
*/
-static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
+static int vendor_mac_passthru_addr_read(struct r8152 *tp,
+ struct sockaddr_storage *ss)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -1774,47 +1775,48 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
ret = -EINVAL;
goto amacout;
}
- memcpy(sa->sa_data, buf, 6);
+ memcpy(ss->__data, buf, 6);
tp->netdev->addr_assign_type = NET_ADDR_STOLEN;
netif_info(tp, probe, tp->netdev,
- "Using pass-thru MAC addr %pM\n", sa->sa_data);
+ "Using pass-thru MAC addr %pM\n", ss->__data);
amacout:
kfree(obj);
return ret;
}
-static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
+static int determine_ethernet_addr(struct r8152 *tp,
+ struct sockaddr_storage *ss)
{
struct net_device *dev = tp->netdev;
int ret;
- sa->sa_family = dev->type;
+ ss->ss_family = dev->type;
- ret = eth_platform_get_mac_address(&tp->udev->dev, sa->sa_data);
+ ret = eth_platform_get_mac_address(&tp->udev->dev, ss->__data);
if (ret < 0) {
if (tp->version == RTL_VER_01) {
- ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data);
+ ret = pla_ocp_read(tp, PLA_IDR, 8, ss->__data);
} else {
/* if device doesn't support MAC pass through this will
* be expected to be non-zero
*/
- ret = vendor_mac_passthru_addr_read(tp, sa);
+ ret = vendor_mac_passthru_addr_read(tp, ss);
if (ret < 0)
ret = pla_ocp_read(tp, PLA_BACKUP, 8,
- sa->sa_data);
+ ss->__data);
}
}
if (ret < 0) {
netif_err(tp, probe, dev, "Get ether addr fail\n");
- } else if (!is_valid_ether_addr(sa->sa_data)) {
+ } else if (!is_valid_ether_addr(ss->__data)) {
netif_err(tp, probe, dev, "Invalid ether addr %pM\n",
- sa->sa_data);
+ ss->__data);
eth_hw_addr_random(dev);
- ether_addr_copy(sa->sa_data, dev->dev_addr);
+ ether_addr_copy(ss->__data, dev->dev_addr);
netif_info(tp, probe, dev, "Random ether addr %pM\n",
- sa->sa_data);
+ ss->__data);
return 0;
}
@@ -1824,17 +1826,17 @@ static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
static int set_ethernet_addr(struct r8152 *tp, bool in_resume)
{
struct net_device *dev = tp->netdev;
- struct sockaddr sa;
+ struct sockaddr_storage ss;
int ret;
- ret = determine_ethernet_addr(tp, &sa);
+ ret = determine_ethernet_addr(tp, &ss);
if (ret < 0)
return ret;
if (tp->version == RTL_VER_01)
- eth_hw_addr_set(dev, sa.sa_data);
+ eth_hw_addr_set(dev, ss.__data);
else
- ret = __rtl8152_set_mac_address(dev, &sa, in_resume);
+ ret = __rtl8152_set_mac_address(dev, &ss, in_resume);
return ret;
}
@@ -4628,48 +4630,16 @@ out:
static long rtl8152_fw_verify_checksum(struct r8152 *tp,
struct fw_header *fw_hdr, size_t size)
{
- unsigned char checksum[sizeof(fw_hdr->checksum)];
- struct crypto_shash *alg;
- struct shash_desc *sdesc;
- size_t len;
- long rc;
-
- alg = crypto_alloc_shash("sha256", 0, 0);
- if (IS_ERR(alg)) {
- rc = PTR_ERR(alg);
- goto out;
- }
-
- if (crypto_shash_digestsize(alg) != sizeof(fw_hdr->checksum)) {
- rc = -EFAULT;
- dev_err(&tp->intf->dev, "digestsize incorrect (%u)\n",
- crypto_shash_digestsize(alg));
- goto free_shash;
- }
+ u8 checksum[sizeof(fw_hdr->checksum)];
- len = sizeof(*sdesc) + crypto_shash_descsize(alg);
- sdesc = kmalloc(len, GFP_KERNEL);
- if (!sdesc) {
- rc = -ENOMEM;
- goto free_shash;
- }
- sdesc->tfm = alg;
-
- len = size - sizeof(fw_hdr->checksum);
- rc = crypto_shash_digest(sdesc, fw_hdr->version, len, checksum);
- kfree(sdesc);
- if (rc)
- goto free_shash;
+ BUILD_BUG_ON(sizeof(checksum) != SHA256_DIGEST_SIZE);
+ sha256(fw_hdr->version, size - sizeof(checksum), checksum);
- if (memcmp(fw_hdr->checksum, checksum, sizeof(fw_hdr->checksum))) {
+ if (memcmp(fw_hdr->checksum, checksum, sizeof(checksum))) {
dev_err(&tp->intf->dev, "checksum fail\n");
- rc = -EFAULT;
+ return -EFAULT;
}
-
-free_shash:
- crypto_free_shash(alg);
-out:
- return rc;
+ return 0;
}
static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw)
@@ -8453,7 +8423,7 @@ static int rtl8152_post_reset(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
struct net_device *netdev;
- struct sockaddr sa;
+ struct sockaddr_storage ss;
if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
goto exit;
@@ -8461,8 +8431,8 @@ static int rtl8152_post_reset(struct usb_interface *intf)
rtl_set_accessible(tp);
/* reset the MAC address in case of policy change */
- if (determine_ethernet_addr(tp, &sa) >= 0)
- dev_set_mac_address (tp->netdev, &sa, NULL);
+ if (determine_ethernet_addr(tp, &ss) >= 0)
+ dev_set_mac_address(tp->netdev, &ss, NULL);
netdev = tp->netdev;
if (!netif_running(netdev))
@@ -10084,6 +10054,7 @@ static const struct usb_device_id rtl8152_table[] = {
{ USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) },
{ USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) },
{ USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) },
+ { USB_DEVICE(VENDOR_ID_TPLINK, 0x0602) },
{ USB_DEVICE(VENDOR_ID_DLINK, 0xb301) },
{ USB_DEVICE(VENDOR_ID_DELL, 0xb097) },
{ USB_DEVICE(VENDOR_ID_ASUS, 0x1976) },
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index dec6e82eb0e0..c30ca415d1d3 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -573,7 +573,7 @@ static void sierra_net_defer_kevent(struct usbnet *dev, int work)
*/
static void sierra_sync_timer(struct timer_list *t)
{
- struct sierra_net_data *priv = from_timer(priv, t, sync_timer);
+ struct sierra_net_data *priv = timer_container_of(priv, t, sync_timer);
struct usbnet *dev = priv->usbnet;
dev_dbg(&dev->udev->dev, "%s", __func__);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index c39dfa17813a..c04e715a4c2a 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1551,7 +1551,7 @@ static inline void usb_free_skb(struct sk_buff *skb)
static void usbnet_bh (struct timer_list *t)
{
- struct usbnet *dev = from_timer(dev, t, delay);
+ struct usbnet *dev = timer_container_of(dev, t, delay);
struct sk_buff *skb;
struct skb_data *entry;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 7bb53961c0ea..a3046142cb8e 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -307,12 +307,10 @@ static void __veth_xdp_flush(struct veth_rq *rq)
static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
{
- if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
- }
+ if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb)))
+ return NETDEV_TX_BUSY; /* signal qdisc layer */
- return NET_RX_SUCCESS;
+ return NET_RX_SUCCESS; /* same as NETDEV_TX_OK */
}
static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
@@ -346,11 +344,11 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct veth_rq *rq = NULL;
- int ret = NETDEV_TX_OK;
+ struct netdev_queue *txq;
struct net_device *rcv;
int length = skb->len;
bool use_napi = false;
- int rxq;
+ int ret, rxq;
rcu_read_lock();
rcv = rcu_dereference(priv->peer);
@@ -373,17 +371,45 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
}
skb_tx_timestamp(skb);
- if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
+
+ ret = veth_forward_skb(rcv, skb, rq, use_napi);
+ switch (ret) {
+ case NET_RX_SUCCESS: /* same as NETDEV_TX_OK */
if (!use_napi)
dev_sw_netstats_tx_add(dev, 1, length);
else
__veth_xdp_flush(rq);
- } else {
+ break;
+ case NETDEV_TX_BUSY:
+ /* If a qdisc is attached to our virtual device, returning
+ * NETDEV_TX_BUSY is allowed.
+ */
+ txq = netdev_get_tx_queue(dev, rxq);
+
+ if (qdisc_txq_has_no_queue(txq)) {
+ dev_kfree_skb_any(skb);
+ goto drop;
+ }
+ /* Restore Eth hdr pulled by dev_forward_skb/eth_type_trans */
+ __skb_push(skb, ETH_HLEN);
+ /* Depend on prior success packets started NAPI consumer via
+ * __veth_xdp_flush(). Cancel TXQ stop if consumer stopped,
+ * paired with empty check in veth_poll().
+ */
+ netif_tx_stop_queue(txq);
+ smp_mb__after_atomic();
+ if (unlikely(__ptr_ring_empty(&rq->xdp_ring)))
+ netif_tx_wake_queue(txq);
+ break;
+ case NET_RX_DROP: /* same as NET_XMIT_DROP */
drop:
atomic64_inc(&priv->dropped);
ret = NET_XMIT_DROP;
+ break;
+ default:
+ net_crit_ratelimited("%s(%s): Invalid return code(%d)",
+ __func__, dev->name, ret);
}
-
rcu_read_unlock();
return ret;
@@ -874,9 +900,17 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
struct veth_xdp_tx_bq *bq,
struct veth_stats *stats)
{
+ struct veth_priv *priv = netdev_priv(rq->dev);
+ int queue_idx = rq->xdp_rxq.queue_index;
+ struct netdev_queue *peer_txq;
+ struct net_device *peer_dev;
int i, done = 0, n_xdpf = 0;
void *xdpf[VETH_XDP_BATCH];
+ /* NAPI functions as RCU section */
+ peer_dev = rcu_dereference_check(priv->peer, rcu_read_lock_bh_held());
+ peer_txq = peer_dev ? netdev_get_tx_queue(peer_dev, queue_idx) : NULL;
+
for (i = 0; i < budget; i++) {
void *ptr = __ptr_ring_consume(&rq->xdp_ring);
@@ -925,6 +959,9 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
rq->stats.vs.xdp_packets += done;
u64_stats_update_end(&rq->stats.syncp);
+ if (peer_txq && unlikely(netif_tx_queue_stopped(peer_txq)))
+ netif_tx_wake_queue(peer_txq);
+
return done;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3df6aabc7e33..0572f6a9bdb6 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -27,6 +27,10 @@
#include <linux/module.h>
#include <net/ip6_checksum.h>
+#ifdef CONFIG_X86
+#include <asm/msr.h>
+#endif
+
#include "vmxnet3_int.h"
#include "vmxnet3_xdp.h"
@@ -1568,6 +1572,30 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
return (hlen + (hdr.tcp->doff << 2));
}
+static void
+vmxnet3_lro_tunnel(struct sk_buff *skb, __be16 ip_proto)
+{
+ struct udphdr *uh = NULL;
+
+ if (ip_proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+}
+
static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter, int quota)
@@ -1881,6 +1909,8 @@ sop_done:
if (segCnt != 0 && mss != 0) {
skb_shinfo(skb)->gso_type = rcd->v4 ?
SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+ if (encap_lro)
+ vmxnet3_lro_tunnel(skb, skb->protocol);
skb_shinfo(skb)->gso_size = mss;
skb_shinfo(skb)->gso_segs = segCnt;
} else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
@@ -3607,8 +3637,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int err = 0;
- WRITE_ONCE(netdev->mtu, new_mtu);
-
/*
* Reset_work may be in the middle of resetting the device, wait for its
* completion.
@@ -3622,6 +3650,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
/* we need to re-create the rx queue based on the new mtu */
vmxnet3_rq_destroy_all(adapter);
+ WRITE_ONCE(netdev->mtu, new_mtu);
vmxnet3_adjust_rx_ring_size(adapter);
err = vmxnet3_rq_create_all(adapter);
if (err) {
@@ -3638,6 +3667,8 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
"Closing it\n", err);
goto out;
}
+ } else {
+ WRITE_ONCE(netdev->mtu, new_mtu);
}
out:
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 7168b33adadb..9a4beea6ee0c 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -343,15 +343,13 @@ unlock:
static bool qdisc_tx_is_default(const struct net_device *dev)
{
struct netdev_queue *txq;
- struct Qdisc *qdisc;
if (dev->num_tx_queues > 1)
return false;
txq = netdev_get_tx_queue(dev, 0);
- qdisc = rcu_access_pointer(txq->qdisc);
- return !qdisc->enqueue;
+ return qdisc_txq_has_no_queue(txq);
}
/* Local traffic destined to local address. Reinsert the packet to rx
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 9ccc3f09f71b..97792de896b7 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -15,6 +15,7 @@
#include <linux/igmp.h>
#include <linux/if_ether.h>
#include <linux/ethtool.h>
+#include <linux/rhashtable.h>
#include <net/arp.h>
#include <net/ndisc.h>
#include <net/gro.h>
@@ -63,8 +64,12 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan);
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
-/* salt for hash table */
-static u32 vxlan_salt __read_mostly;
+static const struct rhashtable_params vxlan_fdb_rht_params = {
+ .head_offset = offsetof(struct vxlan_fdb, rhnode),
+ .key_offset = offsetof(struct vxlan_fdb, key),
+ .key_len = sizeof(struct vxlan_fdb_key),
+ .automatic_shrinking = true,
+};
static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
{
@@ -186,7 +191,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
} else if (nh) {
ndm->ndm_family = nh_family;
}
- send_eth = !is_zero_ether_addr(fdb->eth_addr);
+ send_eth = !is_zero_ether_addr(fdb->key.eth_addr);
} else
ndm->ndm_family = AF_BRIDGE;
ndm->ndm_state = fdb->state;
@@ -201,7 +206,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
peernet2id(dev_net(vxlan->dev), vxlan->net)))
goto nla_put_failure;
- if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
+ if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.eth_addr))
goto nla_put_failure;
if (nh) {
if (nla_put_u32(skb, NDA_NH_ID, nh_id))
@@ -223,9 +228,9 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
goto nla_put_failure;
}
- if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
+ if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->key.vni &&
nla_put_u32(skb, NDA_SRC_VNI,
- be32_to_cpu(fdb->vni)))
+ be32_to_cpu(fdb->key.vni)))
goto nla_put_failure;
ci.ndm_used = jiffies_to_clock_t(now - READ_ONCE(fdb->used));
@@ -293,8 +298,8 @@ static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan,
fdb_info->remote_port = rd->remote_port;
fdb_info->remote_vni = rd->remote_vni;
fdb_info->remote_ifindex = rd->remote_ifindex;
- memcpy(fdb_info->eth_addr, fdb->eth_addr, ETH_ALEN);
- fdb_info->vni = fdb->vni;
+ memcpy(fdb_info->eth_addr, fdb->key.eth_addr, ETH_ALEN);
+ fdb_info->vni = fdb->key.vni;
fdb_info->offloaded = rd->offloaded;
fdb_info->added_by_user = fdb->flags & NTF_VXLAN_ADDED_BY_USER;
}
@@ -366,67 +371,42 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
};
struct vxlan_rdst remote = { };
- memcpy(f.eth_addr, eth_addr, ETH_ALEN);
+ memcpy(f.key.eth_addr, eth_addr, ETH_ALEN);
vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL);
}
-/* Hash Ethernet address */
-static u32 eth_hash(const unsigned char *addr)
-{
- u64 value = get_unaligned((u64 *)addr);
-
- /* only want 6 bytes */
-#ifdef __BIG_ENDIAN
- value >>= 16;
-#else
- value <<= 16;
-#endif
- return hash_64(value, FDB_HASH_BITS);
-}
-
-u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
+/* Look up Ethernet address in forwarding table */
+static struct vxlan_fdb *vxlan_find_mac_rcu(struct vxlan_dev *vxlan,
+ const u8 *mac, __be32 vni)
{
- /* use 1 byte of OUI and 3 bytes of NIC */
- u32 key = get_unaligned((u32 *)(addr + 2));
-
- return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
-}
+ struct vxlan_fdb_key key;
-u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni)
-{
- if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
- return eth_vni_hash(mac, vni);
+ memset(&key, 0, sizeof(key));
+ memcpy(key.eth_addr, mac, sizeof(key.eth_addr));
+ if (!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA))
+ key.vni = vxlan->default_dst.remote_vni;
else
- return eth_hash(mac);
-}
+ key.vni = vni;
-/* Hash chain to use given mac address */
-static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
- const u8 *mac, __be32 vni)
-{
- return &vxlan->fdb_head[fdb_head_index(vxlan, mac, vni)];
+ return rhashtable_lookup(&vxlan->fdb_hash_tbl, &key,
+ vxlan_fdb_rht_params);
}
-/* Look up Ethernet address in forwarding table */
-static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
- const u8 *mac, __be32 vni)
+static struct vxlan_fdb *vxlan_find_mac_tx(struct vxlan_dev *vxlan,
+ const u8 *mac, __be32 vni)
{
- struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni);
struct vxlan_fdb *f;
- hlist_for_each_entry_rcu(f, head, hlist) {
- if (ether_addr_equal(mac, f->eth_addr)) {
- if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
- if (vni == f->vni)
- return f;
- } else {
- return f;
- }
- }
+ f = vxlan_find_mac_rcu(vxlan, mac, vni);
+ if (f) {
+ unsigned long now = jiffies;
+
+ if (READ_ONCE(f->used) != now)
+ WRITE_ONCE(f->used, now);
}
- return NULL;
+ return f;
}
static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
@@ -434,13 +414,11 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
{
struct vxlan_fdb *f;
- f = __vxlan_find_mac(vxlan, mac, vni);
- if (f) {
- unsigned long now = jiffies;
+ lockdep_assert_held_once(&vxlan->hash_lock);
- if (READ_ONCE(f->used) != now)
- WRITE_ONCE(f->used, now);
- }
+ rcu_read_lock();
+ f = vxlan_find_mac_rcu(vxlan, mac, vni);
+ rcu_read_unlock();
return f;
}
@@ -480,7 +458,7 @@ int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
rcu_read_lock();
- f = __vxlan_find_mac(vxlan, eth_addr, vni);
+ f = vxlan_find_mac_rcu(vxlan, eth_addr, vni);
if (!f) {
rc = -ENOENT;
goto out;
@@ -517,32 +495,28 @@ int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
struct vxlan_dev *vxlan;
struct vxlan_rdst *rdst;
struct vxlan_fdb *f;
- unsigned int h;
int rc = 0;
if (!netif_is_vxlan(dev))
return -EINVAL;
vxlan = netdev_priv(dev);
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- spin_lock_bh(&vxlan->hash_lock[h]);
- hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) {
- if (f->vni == vni) {
- list_for_each_entry(rdst, &f->remotes, list) {
- rc = vxlan_fdb_notify_one(nb, vxlan,
- f, rdst,
- extack);
- if (rc)
- goto unlock;
- }
+ spin_lock_bh(&vxlan->hash_lock);
+ hlist_for_each_entry(f, &vxlan->fdb_list, fdb_node) {
+ if (f->key.vni == vni) {
+ list_for_each_entry(rdst, &f->remotes, list) {
+ rc = vxlan_fdb_notify_one(nb, vxlan, f, rdst,
+ extack);
+ if (rc)
+ goto unlock;
}
}
- spin_unlock_bh(&vxlan->hash_lock[h]);
}
+ spin_unlock_bh(&vxlan->hash_lock);
return 0;
unlock:
- spin_unlock_bh(&vxlan->hash_lock[h]);
+ spin_unlock_bh(&vxlan->hash_lock);
return rc;
}
EXPORT_SYMBOL_GPL(vxlan_fdb_replay);
@@ -552,20 +526,19 @@ void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni)
struct vxlan_dev *vxlan;
struct vxlan_rdst *rdst;
struct vxlan_fdb *f;
- unsigned int h;
if (!netif_is_vxlan(dev))
return;
vxlan = netdev_priv(dev);
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- spin_lock_bh(&vxlan->hash_lock[h]);
- hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist)
- if (f->vni == vni)
- list_for_each_entry(rdst, &f->remotes, list)
- rdst->offloaded = false;
- spin_unlock_bh(&vxlan->hash_lock[h]);
+ spin_lock_bh(&vxlan->hash_lock);
+ hlist_for_each_entry(f, &vxlan->fdb_list, fdb_node) {
+ if (f->key.vni == vni) {
+ list_for_each_entry(rdst, &f->remotes, list)
+ rdst->offloaded = false;
+ }
}
+ spin_unlock_bh(&vxlan->hash_lock);
}
EXPORT_SYMBOL_GPL(vxlan_fdb_clear_offload);
@@ -610,10 +583,10 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
if (rd == NULL)
return -ENOMEM;
- if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
- kfree(rd);
- return -ENOMEM;
- }
+ /* The driver can work correctly without a dst cache, so do not treat
+ * dst cache initialization errors as fatal.
+ */
+ dst_cache_init(&rd->dst_cache, GFP_ATOMIC | __GFP_NOWARN);
rd->remote_ip = *ip;
rd->remote_port = port;
@@ -803,27 +776,20 @@ static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac,
f = kmalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
return NULL;
+ memset(&f->key, 0, sizeof(f->key));
f->state = state;
f->flags = ndm_flags;
f->updated = f->used = jiffies;
- f->vni = src_vni;
+ f->key.vni = src_vni;
f->nh = NULL;
RCU_INIT_POINTER(f->vdev, vxlan);
INIT_LIST_HEAD(&f->nh_list);
INIT_LIST_HEAD(&f->remotes);
- memcpy(f->eth_addr, mac, ETH_ALEN);
+ memcpy(f->key.eth_addr, mac, ETH_ALEN);
return f;
}
-static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac,
- __be32 src_vni, struct vxlan_fdb *f)
-{
- ++vxlan->addrcnt;
- hlist_add_head_rcu(&f->hlist,
- vxlan_fdb_head(vxlan, mac, src_vni));
-}
-
static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
u32 nhid, struct netlink_ext_ack *extack)
{
@@ -913,10 +879,27 @@ int vxlan_fdb_create(struct vxlan_dev *vxlan,
if (rc < 0)
goto errout;
+ rc = rhashtable_lookup_insert_fast(&vxlan->fdb_hash_tbl, &f->rhnode,
+ vxlan_fdb_rht_params);
+ if (rc)
+ goto destroy_remote;
+
+ ++vxlan->addrcnt;
+ hlist_add_head_rcu(&f->fdb_node, &vxlan->fdb_list);
+
*fdb = f;
return 0;
+destroy_remote:
+ if (rcu_access_pointer(f->nh)) {
+ list_del_rcu(&f->nh_list);
+ nexthop_put(rtnl_dereference(f->nh));
+ } else {
+ list_del(&rd->list);
+ dst_cache_destroy(&rd->dst_cache);
+ kfree(rd);
+ }
errout:
kfree(f);
return rc;
@@ -953,7 +936,7 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
{
struct vxlan_rdst *rd;
- netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr);
+ netdev_dbg(vxlan->dev, "delete %pM\n", f->key.eth_addr);
--vxlan->addrcnt;
if (do_notify) {
@@ -966,7 +949,9 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
swdev_notify, NULL);
}
- hlist_del_rcu(&f->hlist);
+ hlist_del_init_rcu(&f->fdb_node);
+ rhashtable_remove_fast(&vxlan->fdb_hash_tbl, &f->rhnode,
+ vxlan_fdb_rht_params);
list_del_rcu(&f->nh_list);
call_rcu(&f->rcu, vxlan_fdb_free);
}
@@ -1024,8 +1009,8 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
if ((flags & NLM_F_REPLACE)) {
/* Only change unicasts */
- if (!(is_multicast_ether_addr(f->eth_addr) ||
- is_zero_ether_addr(f->eth_addr))) {
+ if (!(is_multicast_ether_addr(f->key.eth_addr) ||
+ is_zero_ether_addr(f->key.eth_addr))) {
if (nhid) {
rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack);
if (rc < 0)
@@ -1041,8 +1026,8 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
}
}
if ((flags & NLM_F_APPEND) &&
- (is_multicast_ether_addr(f->eth_addr) ||
- is_zero_ether_addr(f->eth_addr))) {
+ (is_multicast_ether_addr(f->key.eth_addr) ||
+ is_zero_ether_addr(f->key.eth_addr))) {
rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
if (rc < 0)
@@ -1101,7 +1086,6 @@ static int vxlan_fdb_update_create(struct vxlan_dev *vxlan,
if (rc < 0)
return rc;
- vxlan_fdb_insert(vxlan, mac, src_vni, f);
rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
swdev_notify, extack);
if (rc)
@@ -1125,7 +1109,7 @@ int vxlan_fdb_update(struct vxlan_dev *vxlan,
{
struct vxlan_fdb *f;
- f = __vxlan_find_mac(vxlan, mac, src_vni);
+ f = vxlan_find_mac(vxlan, mac, src_vni);
if (f) {
if (flags & NLM_F_EXCL) {
netdev_dbg(vxlan->dev,
@@ -1253,7 +1237,6 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
__be16 port;
__be32 src_vni, vni;
u32 ifindex, nhid;
- u32 hash_index;
int err;
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
@@ -1273,13 +1256,12 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
return -EAFNOSUPPORT;
- hash_index = fdb_head_index(vxlan, addr, src_vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
port, src_vni, vni, ifindex,
ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER,
nhid, true, extack);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
if (!err)
*notified = true;
@@ -1296,7 +1278,7 @@ int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
struct vxlan_fdb *f;
int err = -ENOENT;
- f = __vxlan_find_mac(vxlan, addr, src_vni);
+ f = vxlan_find_mac(vxlan, addr, src_vni);
if (!f)
return err;
@@ -1330,7 +1312,6 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
union vxlan_addr ip;
__be32 src_vni, vni;
u32 ifindex, nhid;
- u32 hash_index;
__be16 port;
int err;
@@ -1339,11 +1320,10 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
if (err)
return err;
- hash_index = fdb_head_index(vxlan, addr, src_vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
true);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
if (!err)
*notified = true;
@@ -1358,52 +1338,46 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
{
struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
struct vxlan_dev *vxlan = netdev_priv(dev);
- unsigned int h;
+ struct vxlan_fdb *f;
int err = 0;
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- struct vxlan_fdb *f;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
- struct vxlan_rdst *rd;
-
- if (rcu_access_pointer(f->nh)) {
- if (*idx < ctx->fdb_idx)
- goto skip_nh;
- err = vxlan_fdb_info(skb, vxlan, f,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH,
- NLM_F_MULTI, NULL);
- if (err < 0) {
- rcu_read_unlock();
- goto out;
- }
-skip_nh:
- *idx += 1;
- continue;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &vxlan->fdb_list, fdb_node) {
+ struct vxlan_rdst *rd;
+
+ if (rcu_access_pointer(f->nh)) {
+ if (*idx < ctx->fdb_idx)
+ goto skip_nh;
+ err = vxlan_fdb_info(skb, vxlan, f,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH, NLM_F_MULTI, NULL);
+ if (err < 0) {
+ rcu_read_unlock();
+ goto out;
}
+skip_nh:
+ *idx += 1;
+ continue;
+ }
- list_for_each_entry_rcu(rd, &f->remotes, list) {
- if (*idx < ctx->fdb_idx)
- goto skip;
-
- err = vxlan_fdb_info(skb, vxlan, f,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH,
- NLM_F_MULTI, rd);
- if (err < 0) {
- rcu_read_unlock();
- goto out;
- }
-skip:
- *idx += 1;
+ list_for_each_entry_rcu(rd, &f->remotes, list) {
+ if (*idx < ctx->fdb_idx)
+ goto skip;
+
+ err = vxlan_fdb_info(skb, vxlan, f,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH, NLM_F_MULTI, rd);
+ if (err < 0) {
+ rcu_read_unlock();
+ goto out;
}
+skip:
+ *idx += 1;
}
- rcu_read_unlock();
}
+ rcu_read_unlock();
out:
return err;
}
@@ -1427,7 +1401,7 @@ static int vxlan_fdb_get(struct sk_buff *skb,
rcu_read_lock();
- f = __vxlan_find_mac(vxlan, addr, vni);
+ f = vxlan_find_mac_rcu(vxlan, addr, vni);
if (!f) {
NL_SET_ERR_MSG(extack, "Fdb entry not found");
err = -ENOENT;
@@ -1463,7 +1437,7 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
ifindex = src_ifindex;
#endif
- f = __vxlan_find_mac(vxlan, src_mac, vni);
+ f = vxlan_find_mac_rcu(vxlan, src_mac, vni);
if (likely(f)) {
struct vxlan_rdst *rdst = first_remote_rcu(f);
unsigned long now = jiffies;
@@ -1491,10 +1465,8 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
rdst->remote_ip = *src_ip;
vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL);
} else {
- u32 hash_index = fdb_head_index(vxlan, src_mac, vni);
-
/* learned new entry */
- spin_lock(&vxlan->hash_lock[hash_index]);
+ spin_lock(&vxlan->hash_lock);
/* close off race between vxlan_flush and incoming packets */
if (netif_running(dev))
@@ -1505,7 +1477,7 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
vni,
vxlan->default_dst.remote_vni,
ifindex, NTF_SELF, 0, true, NULL);
- spin_unlock(&vxlan->hash_lock[hash_index]);
+ spin_unlock(&vxlan->hash_lock);
}
return SKB_NOT_DROPPED_YET;
@@ -1916,12 +1888,15 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
}
- f = vxlan_find_mac(vxlan, n->ha, vni);
+ rcu_read_lock();
+ f = vxlan_find_mac_tx(vxlan, n->ha, vni);
if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
/* bridge-local neighbor */
neigh_release(n);
+ rcu_read_unlock();
goto out;
}
+ rcu_read_unlock();
reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
n->ha, sha);
@@ -2080,7 +2055,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
}
- f = vxlan_find_mac(vxlan, n->ha, vni);
+ f = vxlan_find_mac_tx(vxlan, n->ha, vni);
if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
/* bridge-local neighbor */
neigh_release(n);
@@ -2648,14 +2623,10 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
memset(&nh_rdst, 0, sizeof(struct vxlan_rdst));
hash = skb_get_hash(skb);
- rcu_read_lock();
nh = rcu_dereference(f->nh);
- if (!nh) {
- rcu_read_unlock();
+ if (!nh)
goto drop;
- }
do_xmit = vxlan_fdb_nh_path_select(nh, hash, &nh_rdst);
- rcu_read_unlock();
if (likely(do_xmit))
vxlan_xmit_one(skb, dev, vni, &nh_rdst, did_rsc);
@@ -2782,7 +2753,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
}
eth = eth_hdr(skb);
- f = vxlan_find_mac(vxlan, eth->h_dest, vni);
+ rcu_read_lock();
+ f = vxlan_find_mac_tx(vxlan, eth->h_dest, vni);
did_rsc = false;
if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) &&
@@ -2790,11 +2762,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
ntohs(eth->h_proto) == ETH_P_IPV6)) {
did_rsc = route_shortcircuit(dev, skb);
if (did_rsc)
- f = vxlan_find_mac(vxlan, eth->h_dest, vni);
+ f = vxlan_find_mac_tx(vxlan, eth->h_dest, vni);
}
if (f == NULL) {
- f = vxlan_find_mac(vxlan, all_zeros_mac, vni);
+ f = vxlan_find_mac_tx(vxlan, all_zeros_mac, vni);
if (f == NULL) {
if ((vxlan->cfg.flags & VXLAN_F_L2MISS) &&
!is_multicast_ether_addr(eth->h_dest))
@@ -2804,7 +2776,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
- return NETDEV_TX_OK;
+ goto out;
}
}
@@ -2829,46 +2801,46 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
}
+out:
+ rcu_read_unlock();
return NETDEV_TX_OK;
}
/* Walk the forwarding table and purge stale entries */
static void vxlan_cleanup(struct timer_list *t)
{
- struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer);
+ struct vxlan_dev *vxlan = timer_container_of(vxlan, t, age_timer);
unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
- unsigned int h;
+ struct vxlan_fdb *f;
if (!netif_running(vxlan->dev))
return;
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- struct hlist_node *p, *n;
-
- spin_lock(&vxlan->hash_lock[h]);
- hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
- struct vxlan_fdb *f
- = container_of(p, struct vxlan_fdb, hlist);
- unsigned long timeout;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &vxlan->fdb_list, fdb_node) {
+ unsigned long timeout;
- if (f->state & (NUD_PERMANENT | NUD_NOARP))
- continue;
+ if (f->state & (NUD_PERMANENT | NUD_NOARP))
+ continue;
- if (f->flags & NTF_EXT_LEARNED)
- continue;
+ if (f->flags & NTF_EXT_LEARNED)
+ continue;
- timeout = READ_ONCE(f->updated) + vxlan->cfg.age_interval * HZ;
- if (time_before_eq(timeout, jiffies)) {
- netdev_dbg(vxlan->dev,
- "garbage collect %pM\n",
- f->eth_addr);
+ timeout = READ_ONCE(f->updated) + vxlan->cfg.age_interval * HZ;
+ if (time_before_eq(timeout, jiffies)) {
+ spin_lock(&vxlan->hash_lock);
+ if (!hlist_unhashed(&f->fdb_node)) {
+ netdev_dbg(vxlan->dev, "garbage collect %pM\n",
+ f->key.eth_addr);
f->state = NUD_STALE;
vxlan_fdb_destroy(vxlan, f, true, true);
- } else if (time_before(timeout, next_timer))
- next_timer = timeout;
+ }
+ spin_unlock(&vxlan->hash_lock);
+ } else if (time_before(timeout, next_timer)) {
+ next_timer = timeout;
}
- spin_unlock(&vxlan->hash_lock[h]);
}
+ rcu_read_unlock();
mod_timer(&vxlan->age_timer, next_timer);
}
@@ -2903,10 +2875,14 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
int err;
+ err = rhashtable_init(&vxlan->fdb_hash_tbl, &vxlan_fdb_rht_params);
+ if (err)
+ return err;
+
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
err = vxlan_vnigroup_init(vxlan);
if (err)
- return err;
+ goto err_rhashtable_destroy;
}
err = gro_cells_init(&vxlan->gro_cells, dev);
@@ -2925,21 +2901,11 @@ err_gro_cells_destroy:
err_vnigroup_uninit:
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
vxlan_vnigroup_uninit(vxlan);
+err_rhashtable_destroy:
+ rhashtable_destroy(&vxlan->fdb_hash_tbl);
return err;
}
-static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
-{
- struct vxlan_fdb *f;
- u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, vni);
-
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
- f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
- if (f)
- vxlan_fdb_destroy(vxlan, f, true, true);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
-}
-
static void vxlan_uninit(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -2951,7 +2917,7 @@ static void vxlan_uninit(struct net_device *dev)
gro_cells_destroy(&vxlan->gro_cells);
- vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
+ rhashtable_destroy(&vxlan->fdb_hash_tbl);
}
/* Start ageing timer and join group when device is brought up */
@@ -2992,7 +2958,8 @@ struct vxlan_fdb_flush_desc {
static bool vxlan_fdb_is_default_entry(const struct vxlan_fdb *f,
const struct vxlan_dev *vxlan)
{
- return is_zero_ether_addr(f->eth_addr) && f->vni == vxlan->cfg.vni;
+ return is_zero_ether_addr(f->key.eth_addr) &&
+ f->key.vni == vxlan->cfg.vni;
}
static bool vxlan_fdb_nhid_matches(const struct vxlan_fdb *f, u32 nhid)
@@ -3015,7 +2982,7 @@ static bool vxlan_fdb_flush_matches(const struct vxlan_fdb *f,
if (desc->ignore_default_entry && vxlan_fdb_is_default_entry(f, vxlan))
return false;
- if (desc->src_vni && f->vni != desc->src_vni)
+ if (desc->src_vni && f->key.vni != desc->src_vni)
return false;
if (desc->nhid && !vxlan_fdb_nhid_matches(f, desc->nhid))
@@ -3071,33 +3038,32 @@ static void vxlan_flush(struct vxlan_dev *vxlan,
const struct vxlan_fdb_flush_desc *desc)
{
bool match_remotes = vxlan_fdb_flush_should_match_remotes(desc);
- unsigned int h;
-
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- struct hlist_node *p, *n;
+ struct vxlan_fdb *f;
- spin_lock_bh(&vxlan->hash_lock[h]);
- hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
- struct vxlan_fdb *f
- = container_of(p, struct vxlan_fdb, hlist);
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &vxlan->fdb_list, fdb_node) {
+ if (!vxlan_fdb_flush_matches(f, vxlan, desc))
+ continue;
- if (!vxlan_fdb_flush_matches(f, vxlan, desc))
- continue;
+ spin_lock_bh(&vxlan->hash_lock);
+ if (hlist_unhashed(&f->fdb_node))
+ goto unlock;
- if (match_remotes) {
- bool destroy_fdb = false;
+ if (match_remotes) {
+ bool destroy_fdb = false;
- vxlan_fdb_flush_match_remotes(f, vxlan, desc,
- &destroy_fdb);
+ vxlan_fdb_flush_match_remotes(f, vxlan, desc,
+ &destroy_fdb);
- if (!destroy_fdb)
- continue;
- }
-
- vxlan_fdb_destroy(vxlan, f, true, true);
+ if (!destroy_fdb)
+ goto unlock;
}
- spin_unlock_bh(&vxlan->hash_lock[h]);
+
+ vxlan_fdb_destroy(vxlan, f, true, true);
+unlock:
+ spin_unlock_bh(&vxlan->hash_lock);
}
+ rcu_read_unlock();
}
static const struct nla_policy vxlan_del_bulk_policy[NDA_MAX + 1] = {
@@ -3185,7 +3151,7 @@ static int vxlan_stop(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb_flush_desc desc = {
- /* Default entry is deleted at vxlan_uninit. */
+ /* Default entry is deleted at vxlan_dellink. */
.ignore_default_entry = true,
.state = 0,
.state_mask = NUD_PERMANENT | NUD_NOARP,
@@ -3348,7 +3314,6 @@ static void vxlan_offload_rx_ports(struct net_device *dev, bool push)
static void vxlan_setup(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- unsigned int h;
eth_hw_addr_random(dev);
ether_setup(dev);
@@ -3375,15 +3340,13 @@ static void vxlan_setup(struct net_device *dev)
dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
INIT_LIST_HEAD(&vxlan->next);
+ spin_lock_init(&vxlan->hash_lock);
timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
vxlan->dev = dev;
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- spin_lock_init(&vxlan->hash_lock[h]);
- INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
- }
+ INIT_HLIST_HEAD(&vxlan->fdb_list);
}
static void vxlan_ether_setup(struct net_device *dev)
@@ -3960,8 +3923,6 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct net_device *remote_dev = NULL;
- struct vxlan_fdb *f = NULL;
- bool unregister = false;
struct vxlan_rdst *dst;
int err;
@@ -3972,72 +3933,54 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
dev->ethtool_ops = &vxlan_ethtool_ops;
- /* create an fdb entry for a valid default destination */
- if (!vxlan_addr_any(&dst->remote_ip)) {
- err = vxlan_fdb_create(vxlan, all_zeros_mac,
- &dst->remote_ip,
- NUD_REACHABLE | NUD_PERMANENT,
- vxlan->cfg.dst_port,
- dst->remote_vni,
- dst->remote_vni,
- dst->remote_ifindex,
- NTF_SELF, 0, &f, extack);
- if (err)
- return err;
- }
-
err = register_netdevice(dev);
if (err)
- goto errout;
- unregister = true;
+ return err;
if (dst->remote_ifindex) {
remote_dev = __dev_get_by_index(net, dst->remote_ifindex);
if (!remote_dev) {
err = -ENODEV;
- goto errout;
+ goto unregister;
}
err = netdev_upper_dev_link(remote_dev, dev, extack);
if (err)
- goto errout;
+ goto unregister;
+
+ dst->remote_dev = remote_dev;
}
err = rtnl_configure_link(dev, NULL, 0, NULL);
if (err < 0)
goto unlink;
- if (f) {
- vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f);
-
- /* notify default fdb entry */
- err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
- RTM_NEWNEIGH, true, extack);
- if (err) {
- vxlan_fdb_destroy(vxlan, f, false, false);
- if (remote_dev)
- netdev_upper_dev_unlink(remote_dev, dev);
- goto unregister;
- }
+ /* create an fdb entry for a valid default destination */
+ if (!vxlan_addr_any(&dst->remote_ip)) {
+ spin_lock_bh(&vxlan->hash_lock);
+ err = vxlan_fdb_update(vxlan, all_zeros_mac,
+ &dst->remote_ip,
+ NUD_REACHABLE | NUD_PERMANENT,
+ NLM_F_EXCL | NLM_F_CREATE,
+ vxlan->cfg.dst_port,
+ dst->remote_vni,
+ dst->remote_vni,
+ dst->remote_ifindex,
+ NTF_SELF, 0, true, extack);
+ spin_unlock_bh(&vxlan->hash_lock);
+ if (err)
+ goto unlink;
}
list_add(&vxlan->next, &vn->vxlan_list);
- if (remote_dev)
- dst->remote_dev = remote_dev;
+
return 0;
+
unlink:
if (remote_dev)
netdev_upper_dev_unlink(remote_dev, dev);
-errout:
- /* unregister_netdevice() destroys the default FDB entry with deletion
- * notification. But the addition notification was not sent yet, so
- * destroy the entry by hand here.
- */
- if (f)
- __vxlan_fdb_free(f);
unregister:
- if (unregister)
- unregister_netdevice(dev);
+ unregister_netdevice(dev);
return err;
}
@@ -4454,9 +4397,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
/* handle default dst entry */
if (rem_ip_changed) {
- u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
-
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
if (!vxlan_addr_any(&conf.remote_ip)) {
err = vxlan_fdb_update(vxlan, all_zeros_mac,
&conf.remote_ip,
@@ -4467,7 +4408,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
conf.remote_ifindex,
NTF_SELF, 0, true, extack);
if (err) {
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
netdev_adjacent_change_abort(dst->remote_dev,
lowerdev, dev);
return err;
@@ -4481,7 +4422,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
dst->remote_vni,
dst->remote_ifindex,
true);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
/* If vni filtering device, also update fdb entries of
* all vnis that were using default remote ip
@@ -4518,10 +4459,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_fdb_flush_desc desc = {
- /* Default entry is deleted at vxlan_uninit. */
- .ignore_default_entry = true,
- };
+ struct vxlan_fdb_flush_desc desc = {};
vxlan_flush(vxlan, &desc);
@@ -4784,13 +4722,10 @@ vxlan_fdb_offloaded_set(struct net_device *dev,
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *rdst;
struct vxlan_fdb *f;
- u32 hash_index;
-
- hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
- f = __vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
+ f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
if (!f)
goto out;
@@ -4804,7 +4739,7 @@ vxlan_fdb_offloaded_set(struct net_device *dev,
rdst->offloaded = fdb_info->offloaded;
out:
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
}
static int
@@ -4813,13 +4748,11 @@ vxlan_fdb_external_learn_add(struct net_device *dev,
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct netlink_ext_ack *extack;
- u32 hash_index;
int err;
- hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
extack = switchdev_notifier_info_to_extack(&fdb_info->info);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip,
NUD_REACHABLE,
NLM_F_CREATE | NLM_F_REPLACE,
@@ -4829,7 +4762,7 @@ vxlan_fdb_external_learn_add(struct net_device *dev,
fdb_info->remote_ifindex,
NTF_USE | NTF_SELF | NTF_EXT_LEARNED,
0, false, extack);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
return err;
}
@@ -4840,13 +4773,11 @@ vxlan_fdb_external_learn_del(struct net_device *dev,
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
- u32 hash_index;
int err = 0;
- hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
- f = __vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
+ f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
if (!f)
err = -ENOENT;
else if (f->flags & NTF_EXT_LEARNED)
@@ -4858,7 +4789,7 @@ vxlan_fdb_external_learn_del(struct net_device *dev,
fdb_info->remote_ifindex,
false);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
return err;
}
@@ -4907,18 +4838,15 @@ static void vxlan_fdb_nh_flush(struct nexthop *nh)
{
struct vxlan_fdb *fdb;
struct vxlan_dev *vxlan;
- u32 hash_index;
rcu_read_lock();
list_for_each_entry_rcu(fdb, &nh->fdb_list, nh_list) {
vxlan = rcu_dereference(fdb->vdev);
WARN_ON(!vxlan);
- hash_index = fdb_head_index(vxlan, fdb->eth_addr,
- vxlan->default_dst.remote_vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
- if (!hlist_unhashed(&fdb->hlist))
+ spin_lock_bh(&vxlan->hash_lock);
+ if (!hlist_unhashed(&fdb->fdb_node))
vxlan_fdb_destroy(vxlan, fdb, false, false);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
}
rcu_read_unlock();
}
@@ -4966,19 +4894,15 @@ static void __net_exit vxlan_destroy_tunnels(struct vxlan_net *vn,
vxlan_dellink(vxlan->dev, dev_to_kill);
}
-static void __net_exit vxlan_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_to_kill)
+static void __net_exit vxlan_exit_rtnl(struct net *net,
+ struct list_head *dev_to_kill)
{
- struct net *net;
-
- ASSERT_RTNL();
- list_for_each_entry(net, net_list, exit_list) {
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- __unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
+ ASSERT_RTNL_NET(net);
- vxlan_destroy_tunnels(vn, dev_to_kill);
- }
+ __unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
+ vxlan_destroy_tunnels(vn, dev_to_kill);
}
static void __net_exit vxlan_exit_net(struct net *net)
@@ -4992,7 +4916,7 @@ static void __net_exit vxlan_exit_net(struct net *net)
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
- .exit_batch_rtnl = vxlan_exit_batch_rtnl,
+ .exit_rtnl = vxlan_exit_rtnl,
.exit = vxlan_exit_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
@@ -5002,8 +4926,6 @@ static int __init vxlan_init_module(void)
{
int rc;
- get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
-
rc = register_pernet_subsys(&vxlan_net_ops);
if (rc)
goto out1;
diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h
index 76a351a997d5..d328aed9feef 100644
--- a/drivers/net/vxlan/vxlan_private.h
+++ b/drivers/net/vxlan/vxlan_private.h
@@ -24,18 +24,23 @@ struct vxlan_net {
struct notifier_block nexthop_notifier_block;
};
+struct vxlan_fdb_key {
+ u8 eth_addr[ETH_ALEN];
+ __be32 vni;
+};
+
/* Forwarding table entry */
struct vxlan_fdb {
- struct hlist_node hlist; /* linked list of entries */
+ struct rhash_head rhnode;
struct rcu_head rcu;
unsigned long updated; /* jiffies */
unsigned long used;
struct list_head remotes;
- u8 eth_addr[ETH_ALEN];
+ struct vxlan_fdb_key key;
u16 state; /* see ndm_state */
- __be32 vni;
u16 flags; /* see ndm_flags and below */
struct list_head nh_list;
+ struct hlist_node fdb_node;
struct nexthop __rcu *nh;
struct vxlan_dev __rcu *vdev;
};
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
index 06d19e90eadb..186d0660669a 100644
--- a/drivers/net/vxlan/vxlan_vnifilter.c
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -411,13 +411,12 @@ static int vxlan_vnifilter_dump(struct sk_buff *skb, struct netlink_callback *cb
struct tunnel_msg *tmsg;
struct net_device *dev;
- if (cb->nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct tunnel_msg))) {
+ tmsg = nlmsg_payload(cb->nlh, sizeof(*tmsg));
+ if (!tmsg) {
NL_SET_ERR_MSG(cb->extack, "Invalid msg length");
return -EINVAL;
}
- tmsg = nlmsg_data(cb->nlh);
-
if (tmsg->flags & ~TUNNEL_MSG_VALID_USER_FLAGS) {
NL_SET_ERR_MSG(cb->extack, "Invalid tunnelmsg flags in ancillary header");
return -EINVAL;
@@ -483,11 +482,9 @@ static int vxlan_update_default_fdb_entry(struct vxlan_dev *vxlan, __be32 vni,
struct netlink_ext_ack *extack)
{
struct vxlan_rdst *dst = &vxlan->default_dst;
- u32 hash_index;
int err = 0;
- hash_index = fdb_head_index(vxlan, all_zeros_mac, vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
if (remote_ip && !vxlan_addr_any(remote_ip)) {
err = vxlan_fdb_update(vxlan, all_zeros_mac,
remote_ip,
@@ -499,7 +496,7 @@ static int vxlan_update_default_fdb_entry(struct vxlan_dev *vxlan, __be32 vni,
dst->remote_ifindex,
NTF_SELF, 0, true, extack);
if (err) {
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
return err;
}
}
@@ -512,7 +509,7 @@ static int vxlan_update_default_fdb_entry(struct vxlan_dev *vxlan, __be32 vni,
dst->remote_ifindex,
true);
}
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
return err;
}
@@ -628,10 +625,7 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
*/
if (!vxlan_addr_any(&vninode->remote_ip) ||
!vxlan_addr_any(&dst->remote_ip)) {
- u32 hash_index = fdb_head_index(vxlan, all_zeros_mac,
- vninode->vni);
-
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
__vxlan_fdb_delete(vxlan, all_zeros_mac,
(vxlan_addr_any(&vninode->remote_ip) ?
dst->remote_ip : vninode->remote_ip),
@@ -639,7 +633,7 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
vninode->vni, vninode->vni,
dst->remote_ifindex,
true);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
}
if (vxlan->dev->flags & IFF_UP) {
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 7e653432c139..bfc978b15bc2 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -244,7 +244,7 @@ rx_error:
static void cisco_timer(struct timer_list *t)
{
- struct cisco_state *st = from_timer(st, t, timer);
+ struct cisco_state *st = timer_container_of(st, t, timer);
struct net_device *dev = st->dev;
spin_lock(&st->lock);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 34014f427060..08a0ba5ca471 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -581,7 +581,7 @@ static void fr_set_link_state(int reliable, struct net_device *dev)
static void fr_timer(struct timer_list *t)
{
- struct frad_state *st = from_timer(st, t, timer);
+ struct frad_state *st = timer_container_of(st, t, timer);
struct net_device *dev = st->dev;
hdlc_device *hdlc = dev_to_hdlc(dev);
int i, cnt = 0, reliable;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 19921b02846d..7496a2e9a282 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -561,7 +561,7 @@ out:
static void ppp_timer(struct timer_list *t)
{
- struct proto *proto = from_timer(proto, t, timer);
+ struct proto *proto = timer_container_of(proto, t, timer);
struct ppp *ppp = get_ppp(proto->dev);
unsigned long flags;
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
index 4b8528206cc8..09f7fcd7da78 100644
--- a/drivers/net/wireguard/allowedips.c
+++ b/drivers/net/wireguard/allowedips.c
@@ -249,6 +249,52 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
return 0;
}
+static void remove_node(struct allowedips_node *node, struct mutex *lock)
+{
+ struct allowedips_node *child, **parent_bit, *parent;
+ bool free_parent;
+
+ list_del_init(&node->peer_list);
+ RCU_INIT_POINTER(node->peer, NULL);
+ if (node->bit[0] && node->bit[1])
+ return;
+ child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
+ lockdep_is_held(lock));
+ if (child)
+ child->parent_bit_packed = node->parent_bit_packed;
+ parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
+ *parent_bit = child;
+ parent = (void *)parent_bit -
+ offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
+ free_parent = !rcu_access_pointer(node->bit[0]) && !rcu_access_pointer(node->bit[1]) &&
+ (node->parent_bit_packed & 3) <= 1 && !rcu_access_pointer(parent->peer);
+ if (free_parent)
+ child = rcu_dereference_protected(parent->bit[!(node->parent_bit_packed & 1)],
+ lockdep_is_held(lock));
+ call_rcu(&node->rcu, node_free_rcu);
+ if (!free_parent)
+ return;
+ if (child)
+ child->parent_bit_packed = parent->parent_bit_packed;
+ *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child;
+ call_rcu(&parent->rcu, node_free_rcu);
+}
+
+static int remove(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ struct allowedips_node *node;
+
+ if (unlikely(cidr > bits))
+ return -EINVAL;
+ if (!rcu_access_pointer(*trie) || !node_placement(*trie, key, cidr, bits, &node, lock) ||
+ peer != rcu_access_pointer(node->peer))
+ return 0;
+
+ remove_node(node, lock);
+ return 0;
+}
+
void wg_allowedips_init(struct allowedips *table)
{
table->root4 = table->root6 = NULL;
@@ -300,44 +346,38 @@ int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
return add(&table->root6, 128, key, cidr, peer, lock);
}
+int wg_allowedips_remove_v4(struct allowedips *table, const struct in_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ /* Aligned so it can be passed to fls */
+ u8 key[4] __aligned(__alignof(u32));
+
+ ++table->seq;
+ swap_endian(key, (const u8 *)ip, 32);
+ return remove(&table->root4, 32, key, cidr, peer, lock);
+}
+
+int wg_allowedips_remove_v6(struct allowedips *table, const struct in6_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ /* Aligned so it can be passed to fls64 */
+ u8 key[16] __aligned(__alignof(u64));
+
+ ++table->seq;
+ swap_endian(key, (const u8 *)ip, 128);
+ return remove(&table->root6, 128, key, cidr, peer, lock);
+}
+
void wg_allowedips_remove_by_peer(struct allowedips *table,
struct wg_peer *peer, struct mutex *lock)
{
- struct allowedips_node *node, *child, **parent_bit, *parent, *tmp;
- bool free_parent;
+ struct allowedips_node *node, *tmp;
if (list_empty(&peer->allowedips_list))
return;
++table->seq;
- list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) {
- list_del_init(&node->peer_list);
- RCU_INIT_POINTER(node->peer, NULL);
- if (node->bit[0] && node->bit[1])
- continue;
- child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
- lockdep_is_held(lock));
- if (child)
- child->parent_bit_packed = node->parent_bit_packed;
- parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
- *parent_bit = child;
- parent = (void *)parent_bit -
- offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
- free_parent = !rcu_access_pointer(node->bit[0]) &&
- !rcu_access_pointer(node->bit[1]) &&
- (node->parent_bit_packed & 3) <= 1 &&
- !rcu_access_pointer(parent->peer);
- if (free_parent)
- child = rcu_dereference_protected(
- parent->bit[!(node->parent_bit_packed & 1)],
- lockdep_is_held(lock));
- call_rcu(&node->rcu, node_free_rcu);
- if (!free_parent)
- continue;
- if (child)
- child->parent_bit_packed = parent->parent_bit_packed;
- *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child;
- call_rcu(&parent->rcu, node_free_rcu);
- }
+ list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list)
+ remove_node(node, lock);
}
int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
diff --git a/drivers/net/wireguard/allowedips.h b/drivers/net/wireguard/allowedips.h
index 2346c797eb4d..931958cb6e10 100644
--- a/drivers/net/wireguard/allowedips.h
+++ b/drivers/net/wireguard/allowedips.h
@@ -38,6 +38,10 @@ int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip,
u8 cidr, struct wg_peer *peer, struct mutex *lock);
int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
u8 cidr, struct wg_peer *peer, struct mutex *lock);
+int wg_allowedips_remove_v4(struct allowedips *table, const struct in_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock);
+int wg_allowedips_remove_v6(struct allowedips *table, const struct in6_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock);
void wg_allowedips_remove_by_peer(struct allowedips *table,
struct wg_peer *peer, struct mutex *lock);
/* The ip input pointer should be __aligned(__alignof(u64))) */
diff --git a/drivers/net/wireguard/cookie.c b/drivers/net/wireguard/cookie.c
index f89581b5e8cb..94d0a7206084 100644
--- a/drivers/net/wireguard/cookie.c
+++ b/drivers/net/wireguard/cookie.c
@@ -26,8 +26,8 @@ void wg_cookie_checker_init(struct cookie_checker *checker,
}
enum { COOKIE_KEY_LABEL_LEN = 8 };
-static const u8 mac1_key_label[COOKIE_KEY_LABEL_LEN] = "mac1----";
-static const u8 cookie_key_label[COOKIE_KEY_LABEL_LEN] = "cookie--";
+static const u8 mac1_key_label[COOKIE_KEY_LABEL_LEN] __nonstring = "mac1----";
+static const u8 cookie_key_label[COOKIE_KEY_LABEL_LEN] __nonstring = "cookie--";
static void precompute_key(u8 key[NOISE_SYMMETRIC_KEY_LEN],
const u8 pubkey[NOISE_PUBLIC_KEY_LEN],
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 3ffeeba5dccf..4a529f1f9bea 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -366,6 +366,7 @@ static int wg_newlink(struct net_device *dev,
if (ret < 0)
goto err_free_handshake_queue;
+ dev_set_threaded(dev, true);
ret = register_netdevice(dev);
if (ret < 0)
goto err_uninit_ratelimiter;
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index f7055180ba4a..67f962eb8b46 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -24,7 +24,7 @@ static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = {
[WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
[WGDEVICE_A_PRIVATE_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
[WGDEVICE_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
- [WGDEVICE_A_FLAGS] = { .type = NLA_U32 },
+ [WGDEVICE_A_FLAGS] = NLA_POLICY_MASK(NLA_U32, __WGDEVICE_F_ALL),
[WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 },
[WGDEVICE_A_FWMARK] = { .type = NLA_U32 },
[WGDEVICE_A_PEERS] = { .type = NLA_NESTED }
@@ -33,7 +33,7 @@ static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = {
static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = {
[WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
[WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(NOISE_SYMMETRIC_KEY_LEN),
- [WGPEER_A_FLAGS] = { .type = NLA_U32 },
+ [WGPEER_A_FLAGS] = NLA_POLICY_MASK(NLA_U32, __WGPEER_F_ALL),
[WGPEER_A_ENDPOINT] = NLA_POLICY_MIN_LEN(sizeof(struct sockaddr)),
[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 },
[WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(sizeof(struct __kernel_timespec)),
@@ -46,7 +46,8 @@ static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = {
static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = {
[WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 },
[WGALLOWEDIP_A_IPADDR] = NLA_POLICY_MIN_LEN(sizeof(struct in_addr)),
- [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 }
+ [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 },
+ [WGALLOWEDIP_A_FLAGS] = NLA_POLICY_MASK(NLA_U32, __WGALLOWEDIP_F_ALL),
};
static struct wg_device *lookup_interface(struct nlattr **attrs,
@@ -329,6 +330,7 @@ static int set_port(struct wg_device *wg, u16 port)
static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs)
{
int ret = -EINVAL;
+ u32 flags = 0;
u16 family;
u8 cidr;
@@ -337,19 +339,30 @@ static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs)
return ret;
family = nla_get_u16(attrs[WGALLOWEDIP_A_FAMILY]);
cidr = nla_get_u8(attrs[WGALLOWEDIP_A_CIDR_MASK]);
+ if (attrs[WGALLOWEDIP_A_FLAGS])
+ flags = nla_get_u32(attrs[WGALLOWEDIP_A_FLAGS]);
if (family == AF_INET && cidr <= 32 &&
- nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr))
- ret = wg_allowedips_insert_v4(
- &peer->device->peer_allowedips,
- nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
- &peer->device->device_update_lock);
- else if (family == AF_INET6 && cidr <= 128 &&
- nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr))
- ret = wg_allowedips_insert_v6(
- &peer->device->peer_allowedips,
- nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
- &peer->device->device_update_lock);
+ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr)) {
+ if (flags & WGALLOWEDIP_F_REMOVE_ME)
+ ret = wg_allowedips_remove_v4(&peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr,
+ peer, &peer->device->device_update_lock);
+ else
+ ret = wg_allowedips_insert_v4(&peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr,
+ peer, &peer->device->device_update_lock);
+ } else if (family == AF_INET6 && cidr <= 128 &&
+ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr)) {
+ if (flags & WGALLOWEDIP_F_REMOVE_ME)
+ ret = wg_allowedips_remove_v6(&peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr,
+ peer, &peer->device->device_update_lock);
+ else
+ ret = wg_allowedips_insert_v6(&peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr,
+ peer, &peer->device->device_update_lock);
+ }
return ret;
}
@@ -373,9 +386,6 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
if (attrs[WGPEER_A_FLAGS])
flags = nla_get_u32(attrs[WGPEER_A_FLAGS]);
- ret = -EOPNOTSUPP;
- if (flags & ~__WGPEER_F_ALL)
- goto out;
ret = -EPFNOSUPPORT;
if (attrs[WGPEER_A_PROTOCOL_VERSION]) {
@@ -506,9 +516,6 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[WGDEVICE_A_FLAGS])
flags = nla_get_u32(info->attrs[WGDEVICE_A_FLAGS]);
- ret = -EOPNOTSUPP;
- if (flags & ~__WGDEVICE_F_ALL)
- goto out;
if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) {
struct net *net;
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
index 202a33af5a72..7eb9a23a3d4d 100644
--- a/drivers/net/wireguard/noise.c
+++ b/drivers/net/wireguard/noise.c
@@ -25,8 +25,8 @@
* <- e, ee, se, psk, {}
*/
-static const u8 handshake_name[37] = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
-static const u8 identifier_name[34] = "WireGuard v1 zx2c4 Jason@zx2c4.com";
+static const u8 handshake_name[37] __nonstring = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
+static const u8 identifier_name[34] __nonstring = "WireGuard v1 zx2c4 Jason@zx2c4.com";
static u8 handshake_init_hash[NOISE_HASH_LEN] __ro_after_init;
static u8 handshake_init_chaining_key[NOISE_HASH_LEN] __ro_after_init;
static atomic64_t keypair_counter = ATOMIC64_INIT(0);
diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
index 25de7058701a..41837efa70cb 100644
--- a/drivers/net/wireguard/selftest/allowedips.c
+++ b/drivers/net/wireguard/selftest/allowedips.c
@@ -460,6 +460,10 @@ static __init struct wg_peer *init_peer(void)
wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
cidr, mem, &mutex)
+#define remove(version, mem, ipa, ipb, ipc, ipd, cidr) \
+ wg_allowedips_remove_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
+ cidr, mem, &mutex)
+
#define maybe_fail() do { \
++i; \
if (!_s) { \
@@ -585,6 +589,50 @@ bool __init wg_allowedips_selftest(void)
test_negative(4, a, 192, 0, 0, 0);
test_negative(4, a, 255, 0, 0, 0);
+ insert(4, a, 1, 0, 0, 0, 32);
+ insert(4, a, 192, 0, 0, 0, 24);
+ insert(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128);
+ insert(6, a, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
+ test(4, a, 1, 0, 0, 0);
+ test(4, a, 192, 0, 0, 1);
+ test(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ test(6, a, 0x24446800, 0xf0e40800, 0xeeaebeef, 0x10101010);
+ /* Must be an exact match to remove */
+ remove(4, a, 192, 0, 0, 0, 32);
+ test(4, a, 192, 0, 0, 1);
+ /* NULL peer should have no effect and return 0 */
+ test_boolean(!remove(4, NULL, 192, 0, 0, 0, 24));
+ test(4, a, 192, 0, 0, 1);
+ /* different peer should have no effect and return 0 */
+ test_boolean(!remove(4, b, 192, 0, 0, 0, 24));
+ test(4, a, 192, 0, 0, 1);
+ /* invalid CIDR should have no effect and return -EINVAL */
+ test_boolean(remove(4, b, 192, 0, 0, 0, 33) == -EINVAL);
+ test(4, a, 192, 0, 0, 1);
+ remove(4, a, 192, 0, 0, 0, 24);
+ test_negative(4, a, 192, 0, 0, 1);
+ remove(4, a, 1, 0, 0, 0, 32);
+ test_negative(4, a, 1, 0, 0, 0);
+ /* Must be an exact match to remove */
+ remove(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 96);
+ test(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ /* NULL peer should have no effect and return 0 */
+ test_boolean(!remove(6, NULL, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128));
+ test(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ /* different peer should have no effect and return 0 */
+ test_boolean(!remove(6, b, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128));
+ test(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ /* invalid CIDR should have no effect and return -EINVAL */
+ test_boolean(remove(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 129) == -EINVAL);
+ test(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ remove(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128);
+ test_negative(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ /* Must match the peer to remove */
+ remove(6, b, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
+ test(6, a, 0x24446800, 0xf0e40800, 0xeeaebeef, 0x10101010);
+ remove(6, a, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
+ test_negative(6, a, 0x24446800, 0xf0e40800, 0xeeaebeef, 0x10101010);
+
wg_allowedips_free(&t, &mutex);
wg_allowedips_init(&t);
insert(4, a, 192, 168, 0, 0, 16);
diff --git a/drivers/net/wireguard/timers.c b/drivers/net/wireguard/timers.c
index a9e0890c2f77..4016a3065602 100644
--- a/drivers/net/wireguard/timers.c
+++ b/drivers/net/wireguard/timers.c
@@ -40,8 +40,8 @@ static inline void mod_peer_timer(struct wg_peer *peer,
static void wg_expired_retransmit_handshake(struct timer_list *timer)
{
- struct wg_peer *peer = from_timer(peer, timer,
- timer_retransmit_handshake);
+ struct wg_peer *peer = timer_container_of(peer, timer,
+ timer_retransmit_handshake);
if (peer->timer_handshake_attempts > MAX_TIMER_HANDSHAKES) {
pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d attempts, giving up\n",
@@ -78,7 +78,8 @@ static void wg_expired_retransmit_handshake(struct timer_list *timer)
static void wg_expired_send_keepalive(struct timer_list *timer)
{
- struct wg_peer *peer = from_timer(peer, timer, timer_send_keepalive);
+ struct wg_peer *peer = timer_container_of(peer, timer,
+ timer_send_keepalive);
wg_packet_send_keepalive(peer);
if (peer->timer_need_another_keepalive) {
@@ -90,7 +91,8 @@ static void wg_expired_send_keepalive(struct timer_list *timer)
static void wg_expired_new_handshake(struct timer_list *timer)
{
- struct wg_peer *peer = from_timer(peer, timer, timer_new_handshake);
+ struct wg_peer *peer = timer_container_of(peer, timer,
+ timer_new_handshake);
pr_debug("%s: Retrying handshake with peer %llu (%pISpfsc) because we stopped hearing back after %d seconds\n",
peer->device->dev->name, peer->internal_id,
@@ -104,7 +106,8 @@ static void wg_expired_new_handshake(struct timer_list *timer)
static void wg_expired_zero_key_material(struct timer_list *timer)
{
- struct wg_peer *peer = from_timer(peer, timer, timer_zero_key_material);
+ struct wg_peer *peer = timer_container_of(peer, timer,
+ timer_zero_key_material);
rcu_read_lock_bh();
if (!READ_ONCE(peer->is_dead)) {
@@ -134,8 +137,8 @@ static void wg_queued_expired_zero_key_material(struct work_struct *work)
static void wg_expired_send_persistent_keepalive(struct timer_list *timer)
{
- struct wg_peer *peer = from_timer(peer, timer,
- timer_persistent_keepalive);
+ struct wg_peer *peer = timer_container_of(peer, timer,
+ timer_persistent_keepalive);
if (likely(peer->persistent_keepalive_interval))
wg_packet_send_keepalive(peer);
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 96dc2778022a..343c9de2749c 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -902,7 +902,7 @@ static void ar5523_tx_work(struct work_struct *work)
static void ar5523_tx_wd_timer(struct timer_list *t)
{
- struct ar5523 *ar = from_timer(ar, t, tx_wd_timer);
+ struct ar5523 *ar = timer_container_of(ar, t, tx_wd_timer);
ar5523_dbg(ar, "TX watchdog timer triggered\n");
ieee80211_queue_work(ar->hw, &ar->tx_wd_work);
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index db9f9ebcb62d..eb8b35b6224d 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -497,7 +497,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%pK mem_len: %lu gcc mem: 0x%pK tcsr_mem: 0x%pK\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%p mem_len: %lu gcc mem: 0x%p tcsr_mem: 0x%p\n",
ar_ahb->mem, ar_ahb->mem_len,
ar_ahb->gcc_mem, ar_ahb->tcsr_mem);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 9a4f8e815412..48efdc71d54d 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -349,7 +349,7 @@ static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 l
int ret;
size_t buf_len;
- ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%pK length %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%p length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
@@ -395,7 +395,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
u32 txlen;
int ret;
- ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
@@ -461,7 +461,7 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI,
- "bmi fast download address 0x%x buffer 0x%pK length %d\n",
+ "bmi fast download address 0x%x buffer 0x%p length %d\n",
address, buffer, length);
ret = ath10k_bmi_lz_stream_start(ar, address);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index afae4a8027f8..a89a7491a76c 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -80,7 +80,7 @@ static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
static inline unsigned int
ath10k_set_ring_byte(unsigned int offset,
- struct ath10k_hw_ce_regs_addr_map *addr_map)
+ const struct ath10k_hw_ce_regs_addr_map *addr_map)
{
return ((offset << addr_map->lsb) & addr_map->mask);
}
@@ -203,7 +203,7 @@ static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+ const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ctrl_regs->addr);
@@ -217,7 +217,7 @@ static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+ const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ctrl_regs->addr);
@@ -231,7 +231,7 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+ const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ctrl_regs->addr);
@@ -313,7 +313,7 @@ static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
+ const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
@@ -325,7 +325,7 @@ static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
+ const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
@@ -337,7 +337,7 @@ static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
+ const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
@@ -349,7 +349,7 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
+ const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
@@ -360,7 +360,7 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
+ const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
@@ -372,7 +372,7 @@ static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
+ const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
@@ -384,7 +384,7 @@ static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+ const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
@@ -396,7 +396,7 @@ static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
+ const struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
u32 misc_ie_addr = ath10k_ce_read32(ar,
ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
@@ -410,7 +410,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int mask)
{
- struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+ const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
}
@@ -1230,7 +1230,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
- struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+ const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 ctrl_addr = ce_state->ctrl_addr;
/*
@@ -1388,7 +1388,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot init ce src ring id %d entries %d base_addr %pK\n",
+ "boot init ce src ring id %d entries %d base_addr %p\n",
ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
@@ -1426,7 +1426,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot ce dest ring id %d entries %d base_addr %pK\n",
+ "boot ce dest ring id %d entries %d base_addr %p\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 6d336e39d673..fe3a8f4a1cc1 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1200,7 +1200,7 @@ static int ath10k_download_fw(struct ath10k *ar)
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot uploading firmware image %pK len %d\n",
+ "boot uploading firmware image %p len %d\n",
data, data_len);
/* Check if device supports to download firmware via
@@ -1826,7 +1826,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
if (!ar->running_fw->fw_file.otp_data ||
!ar->running_fw->fw_file.otp_len) {
- ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n",
+ ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
ar->running_fw->fw_file.otp_data,
ar->running_fw->fw_file.otp_len);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index a6e21ce90bad..2da08dfebd3e 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -34,7 +34,7 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
skb_cb = ATH10K_SKB_CB(skb);
memset(skb_cb, 0, sizeof(*skb_cb));
- ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
return skb;
}
@@ -54,7 +54,7 @@ void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
struct ath10k *ar = ep->htc->ar;
struct ath10k_htc_hdr *hdr;
- ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
ep->eid, skb);
/* A corner case where the copy completion is reaching to host but still
@@ -515,7 +515,7 @@ void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
/* zero length packet with trailer data, just drop these */
goto out;
- ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ar, skb);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 83eab7479f06..fb0d5d4cae3a 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -257,7 +257,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
{
- struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
+ struct ath10k_htt *htt = timer_container_of(htt, t,
+ rx_ring.refill_retry_timer);
ath10k_htt_rx_msdu_buff_replenish(htt);
}
@@ -1373,7 +1374,7 @@ static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
}
ath10k_dbg(ar, ATH10K_DBG_DATA,
- "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
ieee80211_get_SA(hdr),
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 8fafe096adff..84b35a22fc23 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -212,40 +212,40 @@ const struct ath10k_hw_regs wcn3990_regs = {
.pcie_intr_fw_mask = 0x00100000,
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = {
.msb = 0x00000010,
.lsb = 0x00000010,
.mask = GENMASK(17, 17),
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = {
.msb = 0x00000012,
.lsb = 0x00000012,
.mask = GENMASK(18, 18),
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = {
.msb = 0x00000000,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = {
+static const struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = {
.addr = 0x00000018,
.src_ring = &wcn3990_src_ring,
.dst_ring = &wcn3990_dst_ring,
.dmax = &wcn3990_dmax,
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = {
.mask = GENMASK(0, 0),
};
-static struct ath10k_hw_ce_host_ie wcn3990_host_ie = {
+static const struct ath10k_hw_ce_host_ie wcn3990_host_ie = {
.copy_complete = &wcn3990_host_ie_cc,
};
-static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
+static const struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
.dstr_lmask = 0x00000010,
.dstr_hmask = 0x00000008,
.srcr_lmask = 0x00000004,
@@ -255,7 +255,7 @@ static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
.addr = 0x00000030,
};
-static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
+static const struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
.axi_err = 0x00000100,
.dstr_add_err = 0x00000200,
.srcr_len_err = 0x00000100,
@@ -266,19 +266,19 @@ static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
.addr = 0x00000038,
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = {
.msb = 0x00000000,
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
+static const struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
.addr = 0x0000004c,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
@@ -286,18 +286,18 @@ static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
.wm_high = &wcn3990_src_wm_high,
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = {
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
+static const struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
.addr = 0x00000050,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
@@ -305,7 +305,7 @@ static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
.wm_high = &wcn3990_dst_wm_high,
};
-static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = {
+static const struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = {
.shift = 19,
.mask = 0x00080000,
.enable = 0x00000000,
@@ -344,25 +344,25 @@ const struct ath10k_hw_values wcn3990_values = {
.ce_desc_meta_data_lsb = 4,
};
-static struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
.msb = 0x00000010,
.lsb = 0x00000010,
.mask = GENMASK(16, 16),
};
-static struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = {
.msb = 0x00000011,
.lsb = 0x00000011,
.mask = GENMASK(17, 17),
};
-static struct ath10k_hw_ce_regs_addr_map qcax_dmax = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_dmax = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
+static const struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
.addr = 0x00000010,
.hw_mask = 0x0007ffff,
.sw_mask = 0x0007ffff,
@@ -375,31 +375,31 @@ static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
.dmax = &qcax_dmax,
};
-static struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = {
.msb = 0x00000003,
.lsb = 0x00000003,
.mask = GENMASK(3, 3),
};
-static struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = {
+static const struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = {
.msb = 0x00000000,
.mask = GENMASK(0, 0),
.status_reset = 0x00000000,
.status = &qcax_cmd_halt_status,
};
-static struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = {
.msb = 0x00000000,
.lsb = 0x00000000,
.mask = GENMASK(0, 0),
};
-static struct ath10k_hw_ce_host_ie qcax_host_ie = {
+static const struct ath10k_hw_ce_host_ie qcax_host_ie = {
.copy_complete_reset = 0x00000000,
.copy_complete = &qcax_host_ie_cc,
};
-static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
+static const struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
.dstr_lmask = 0x00000010,
.dstr_hmask = 0x00000008,
.srcr_lmask = 0x00000004,
@@ -409,7 +409,7 @@ static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
.addr = 0x00000030,
};
-static struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
+static const struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
.axi_err = 0x00000400,
.dstr_add_err = 0x00000200,
.srcr_len_err = 0x00000100,
@@ -420,19 +420,19 @@ static struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
.addr = 0x00000038,
};
-static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = {
.msb = 0x0000001f,
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
-static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
+static const struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
.addr = 0x0000004c,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
@@ -440,18 +440,18 @@ static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
.wm_high = &qcax_src_wm_high,
};
-static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = {
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
-static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
+static const struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
.addr = 0x00000050,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 442091c6dfd2..7ffa1fbe2874 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -289,19 +289,22 @@ struct ath10k_hw_ce_ctrl1 {
u32 sw_wr_mask;
u32 reset_mask;
u32 reset;
- struct ath10k_hw_ce_regs_addr_map *src_ring;
- struct ath10k_hw_ce_regs_addr_map *dst_ring;
- struct ath10k_hw_ce_regs_addr_map *dmax; };
+ const struct ath10k_hw_ce_regs_addr_map *src_ring;
+ const struct ath10k_hw_ce_regs_addr_map *dst_ring;
+ const struct ath10k_hw_ce_regs_addr_map *dmax;
+};
struct ath10k_hw_ce_cmd_halt {
u32 status_reset;
u32 msb;
u32 mask;
- struct ath10k_hw_ce_regs_addr_map *status; };
+ const struct ath10k_hw_ce_regs_addr_map *status;
+};
struct ath10k_hw_ce_host_ie {
u32 copy_complete_reset;
- struct ath10k_hw_ce_regs_addr_map *copy_complete; };
+ const struct ath10k_hw_ce_regs_addr_map *copy_complete;
+};
struct ath10k_hw_ce_host_wm_regs {
u32 dstr_lmask;
@@ -328,8 +331,9 @@ struct ath10k_hw_ce_dst_src_wm_regs {
u32 addr;
u32 low_rst;
u32 high_rst;
- struct ath10k_hw_ce_regs_addr_map *wm_low;
- struct ath10k_hw_ce_regs_addr_map *wm_high; };
+ const struct ath10k_hw_ce_regs_addr_map *wm_low;
+ const struct ath10k_hw_ce_regs_addr_map *wm_high;
+};
struct ath10k_hw_ce_ctrl1_upd {
u32 shift;
@@ -355,14 +359,14 @@ struct ath10k_hw_ce_regs {
u32 ce_rri_low;
u32 ce_rri_high;
u32 host_ie_addr;
- struct ath10k_hw_ce_host_wm_regs *wm_regs;
- struct ath10k_hw_ce_misc_regs *misc_regs;
- struct ath10k_hw_ce_ctrl1 *ctrl1_regs;
- struct ath10k_hw_ce_cmd_halt *cmd_halt;
- struct ath10k_hw_ce_host_ie *host_ie;
- struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr;
- struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr;
- struct ath10k_hw_ce_ctrl1_upd *upd;
+ const struct ath10k_hw_ce_host_wm_regs *wm_regs;
+ const struct ath10k_hw_ce_misc_regs *misc_regs;
+ const struct ath10k_hw_ce_ctrl1 *ctrl1_regs;
+ const struct ath10k_hw_ce_cmd_halt *cmd_halt;
+ const struct ath10k_hw_ce_host_ie *host_ie;
+ const struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr;
+ const struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr;
+ const struct ath10k_hw_ce_ctrl1_upd *upd;
};
struct ath10k_hw_values {
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index c61b95a928da..07fe05384cdf 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -4,6 +4,7 @@
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "mac.h"
@@ -875,7 +876,7 @@ static void ath10k_peer_map_cleanup(struct ath10k *ar, struct ath10k_peer *peer)
*/
for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
if (ar->peer_map[i] == peer) {
- ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
+ ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %p idx %d)\n",
peer->addr, peer, i);
ar->peer_map[i] = NULL;
}
@@ -1022,6 +1023,26 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
return ar->last_wmi_vdev_start_status;
}
+static inline int ath10k_vdev_delete_sync(struct ath10k *ar)
+{
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map))
+ return 0;
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ return -ESHUTDOWN;
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH10K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
{
struct cfg80211_chan_def *chandef = NULL;
@@ -4063,7 +4084,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
if (!ath10k_mac_tx_frm_has_freq(ar)) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %pK len %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %p len %d\n",
skb, skb->len);
skb_queue_tail(&ar->offchan_tx_queue, skb);
@@ -4126,7 +4147,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK len %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p len %d\n",
skb, skb->len);
hdr = (struct ieee80211_hdr *)skb->data;
@@ -4181,7 +4202,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
time_left =
wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
if (time_left == 0)
- ath10k_warn(ar, "timed out waiting for offchannel skb %pK, len: %d\n",
+ ath10k_warn(ar, "timed out waiting for offchannel skb %p, len: %d\n",
skb, skb->len);
if (!peer && tmp_peer_created) {
@@ -5900,7 +5921,6 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k_peer *peer;
- unsigned long time_left;
int ret;
int i;
@@ -5940,13 +5960,10 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
arvif->vdev_id, ret);
- if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) {
- time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
- ATH10K_VDEV_DELETE_TIMEOUT_HZ);
- if (time_left == 0) {
- ath10k_warn(ar, "Timeout in receiving vdev delete response\n");
- goto out;
- }
+ ret = ath10k_vdev_delete_sync(ar);
+ if (ret) {
+ ath10k_warn(ar, "Error in receiving vdev delete response: %d\n", ret);
+ goto out;
}
/* Some firmware revisions don't notify host about self-peer removal
@@ -7604,7 +7621,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
* Existing station deletion.
*/
ath10k_dbg(ar, ATH10K_DBG_STA,
- "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
+ "mac vdev %d peer delete %pM sta %p (sta gone)\n",
arvif->vdev_id, sta->addr, sta);
if (sta->tdls) {
@@ -7631,7 +7648,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
continue;
if (peer->sta == sta) {
- ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
+ ath10k_warn(ar, "found sta peer %pM (ptr %p id %d) entry on vdev %i after it was supposedly removed\n",
sta->addr, peer, i, arvif->vdev_id);
peer->sta = NULL;
@@ -8811,7 +8828,7 @@ ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx add freq %u width %d ptr %pK\n",
+ "mac chanctx add freq %u width %d ptr %p\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -8835,7 +8852,7 @@ ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx remove freq %u width %d ptr %pK\n",
+ "mac chanctx remove freq %u width %d ptr %p\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -8900,7 +8917,7 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx change freq %u width %d ptr %pK changed %x\n",
+ "mac chanctx change freq %u width %d ptr %p changed %x\n",
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
/* This shouldn't really happen because channel switching should use
@@ -8959,7 +8976,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx assign ptr %pK vdev_id %i\n",
+ "mac chanctx assign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
if (WARN_ON(arvif->is_started)) {
@@ -9039,7 +9056,7 @@ ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx unassign ptr %pK vdev_id %i\n",
+ "mac chanctx unassign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
WARN_ON(!arvif->is_started);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index fb2c60ee433c..1e6d43285138 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -591,7 +591,7 @@ skip:
static void ath10k_pci_ps_timer(struct timer_list *t)
{
- struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer);
+ struct ath10k_pci *ar_pci = timer_container_of(ar_pci, t, ps_timer);
struct ath10k *ar = ar_pci->ar;
unsigned long flags;
@@ -844,7 +844,8 @@ void ath10k_pci_rx_post(struct ath10k *ar)
void ath10k_pci_rx_replenish_retry(struct timer_list *t)
{
- struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry);
+ struct ath10k_pci *ar_pci = timer_container_of(ar_pci, t,
+ rx_post_retry);
struct ath10k *ar = ar_pci->ar;
ath10k_pci_rx_post(ar);
@@ -3411,7 +3412,7 @@ static int ath10k_pci_claim(struct ath10k *ar)
goto err_region;
}
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
return 0;
err_region:
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 7ce74b4ef201..c06d50db40b8 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -1447,7 +1447,8 @@ release:
static void ath10k_sdio_sleep_timer_handler(struct timer_list *t)
{
- struct ath10k_sdio *ar_sdio = from_timer(ar_sdio, t, sleep_timer);
+ struct ath10k_sdio *ar_sdio = timer_container_of(ar_sdio, t,
+ sleep_timer);
ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE;
queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
@@ -1844,7 +1845,7 @@ static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)
ret = ath10k_sdio_diag_read32(ar, addr, &val);
if (ret) {
ath10k_warn(ar,
- "unable to read hi_acs_flags for htt tx comple : %d\n", ret);
+ "unable to read hi_acs_flags for htt tx complete: %d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 866bad2db334..d51f2e5a79a4 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -644,7 +644,8 @@ static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
{
- struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
+ struct ath10k_snoc *ar_snoc = timer_container_of(ar_snoc, t,
+ rx_post_retry);
struct ath10k *ar = ar_snoc->ar;
ath10k_snoc_rx_post(ar);
@@ -937,7 +938,9 @@ static int ath10k_snoc_hif_start(struct ath10k *ar)
dev_set_threaded(ar->napi_dev, true);
ath10k_core_napi_enable(ar);
- ath10k_snoc_irq_enable(ar);
+ /* IRQs are left enabled when we restart due to a firmware crash */
+ if (!test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
+ ath10k_snoc_irq_enable(ar);
ath10k_snoc_rx_post(ar);
clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 7a9b9bbcdbfc..3fcefc55b74f 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -35,7 +35,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
int ret;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode event wmi cmd_id %d skb %pK skb->len %d\n",
+ "testmode event wmi cmd_id %d skb %p skb->len %d\n",
cmd_id, skb, skb->len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
@@ -397,7 +397,7 @@ static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
+ "testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
cmd_id, buf, buf_len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index da3bc35e41aa..493bfb410aff 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -35,7 +35,7 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
complete(&ar->offchan_tx_completed);
ar->offchan_tx_skb = NULL; /* just for sanity */
- ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
out:
spin_unlock_bh(&ar->data_lock);
}
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index 3b51b7f52130..1732a4f98418 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -131,7 +131,7 @@ static void ath10k_usb_recv_complete(struct urb *urb)
int status = 0;
ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
- "usb recv pipe %d stat %d len %d urb 0x%pK\n",
+ "usb recv pipe %d stat %d len %d urb 0x%p\n",
pipe->logical_pipe_num, urb->status, urb->actual_length,
urb);
@@ -230,7 +230,7 @@ static void ath10k_usb_post_recv_transfers(struct ath10k *ar,
ath10k_usb_recv_complete, urb_context);
ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
- "usb bulk recv submit %d 0x%x ep 0x%2.2x len %d buf 0x%pK\n",
+ "usb bulk recv submit %d 0x%x ep 0x%2.2x len %d buf 0x%p\n",
recv_pipe->logical_pipe_num,
recv_pipe->usb_pipe_handle, recv_pipe->ep_address,
ATH10K_USB_RX_BUFFER_SIZE, urb_context->skb);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 5e061f7525a6..df6a24f8f8d5 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2029,7 +2029,7 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
memcpy(cmd->buf, msdu->data, msdu->len);
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
fc & IEEE80211_FCTL_STYPE);
trace_ath10k_tx_hdr(ar, skb->data, skb->len);
@@ -2637,7 +2637,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->boottime_ns = ktime_get_boottime_ns();
ath10k_dbg(ar, ATH10K_DBG_MGMT,
- "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
+ "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
skb, skb->len,
fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
index 2e935d381b6b..659ef134ef16 100644
--- a/drivers/net/wireless/ath/ath11k/Kconfig
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -24,7 +24,7 @@ config ATH11K_PCI
select MHI_BUS
select QRTR
select QRTR_MHI
- select PCI_PWRCTL_PWRSEQ if HAVE_PWRCTL
+ select PCI_PWRCTRL_PWRSEQ if HAVE_PWRCTRL
help
This module adds support for PCIE bus
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 2f862f8f10ca..fde1ce43c499 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -413,7 +413,7 @@ static int ath11k_ahb_power_up(struct ath11k_base *ab)
return ret;
}
-static void ath11k_ahb_power_down(struct ath11k_base *ab)
+static void ath11k_ahb_power_down(struct ath11k_base *ab, bool is_suspend)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
@@ -1280,7 +1280,7 @@ static void ath11k_ahb_remove(struct platform_device *pdev)
struct ath11k_base *ab = platform_get_drvdata(pdev);
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath11k_ahb_power_down(ab);
+ ath11k_ahb_power_down(ab, false);
ath11k_debugfs_soc_destroy(ab);
ath11k_qmi_deinit_service(ab);
goto qmi_fail;
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
index e66e86bdec20..746038006eb4 100644
--- a/drivers/net/wireless/ath/ath11k/ce.c
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -393,11 +393,10 @@ static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
goto err;
}
+ /* Make sure descriptor is read after the head pointer. */
+ dma_rmb();
+
*nbytes = ath11k_hal_ce_dst_status_get_length(desc);
- if (*nbytes == 0) {
- ret = -EIO;
- goto err;
- }
*skb = pipe->dest_ring->skb[sw_index];
pipe->dest_ring->skb[sw_index] = NULL;
@@ -430,8 +429,8 @@ static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
max_nbytes, DMA_FROM_DEVICE);
- if (unlikely(max_nbytes < nbytes)) {
- ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
+ if (unlikely(max_nbytes < nbytes || nbytes == 0)) {
+ ath11k_warn(ab, "unexpected rx length (nbytes %d, max %d)",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;
@@ -908,7 +907,7 @@ EXPORT_SYMBOL(ath11k_ce_rx_post_buf);
void ath11k_ce_rx_replenish_retry(struct timer_list *t)
{
- struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
+ struct ath11k_base *ab = timer_container_of(ab, t, rx_replenish_retry);
ath11k_ce_rx_post_buf(ab);
}
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 3d39ff85ba94..22a101136135 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -907,12 +907,51 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
},
};
-static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base *ab)
-{
- WARN_ON(!ab->hw_params.single_pdev_only);
-
- return &ab->pdevs[0];
-}
+static const struct dmi_system_id ath11k_pm_quirk_table[] = {
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J4"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K4"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K6"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K8"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21KA"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21F9"),
+ },
+ },
+ {}
+};
void ath11k_fw_stats_pdevs_free(struct list_head *head)
{
@@ -951,6 +990,7 @@ void ath11k_fw_stats_init(struct ath11k *ar)
INIT_LIST_HEAD(&ar->fw_stats.bcn);
init_completion(&ar->fw_stats_complete);
+ init_completion(&ar->fw_stats_done);
}
void ath11k_fw_stats_free(struct ath11k_fw_stats *stats)
@@ -972,23 +1012,33 @@ bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab)
return ab->hw_params.coldboot_cal_mm;
}
-int ath11k_core_suspend(struct ath11k_base *ab)
+/* Check if we need to continue with suspend/resume operation.
+ * Return:
+ * a negative value: error happens and don't continue.
+ * 0: no error but don't continue.
+ * positive value: no error and do continue.
+ */
+static int ath11k_core_continue_suspend_resume(struct ath11k_base *ab)
{
- int ret;
- struct ath11k_pdev *pdev;
struct ath11k *ar;
if (!ab->hw_params.supports_suspend)
return -EOPNOTSUPP;
/* so far single_pdev_only chips have supports_suspend as true
- * and only the first pdev is valid.
+ * so pass 0 as a dummy pdev_id here.
*/
- pdev = ath11k_core_get_single_pdev(ab);
- ar = pdev->ar;
+ ar = ab->pdevs[0].ar;
if (!ar || ar->state != ATH11K_STATE_OFF)
return 0;
+ return 1;
+}
+
+static int ath11k_core_suspend_wow(struct ath11k_base *ab)
+{
+ int ret;
+
ret = ath11k_dp_rx_pktlog_stop(ab, true);
if (ret) {
ath11k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n",
@@ -996,7 +1046,10 @@ int ath11k_core_suspend(struct ath11k_base *ab)
return ret;
}
- ret = ath11k_mac_wait_tx_complete(ar);
+ /* So far only single_pdev_only devices can reach here,
+ * so it is valid to handle the first, and the only, pdev.
+ */
+ ret = ath11k_mac_wait_tx_complete(ab->pdevs[0].ar);
if (ret) {
ath11k_warn(ab, "failed to wait tx complete: %d\n", ret);
return ret;
@@ -1029,24 +1082,146 @@ int ath11k_core_suspend(struct ath11k_base *ab)
return 0;
}
+
+static int ath11k_core_suspend_default(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_dp_rx_pktlog_stop(ab, true);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* So far only single_pdev_only devices can reach here,
+ * so it is valid to handle the first, and the only, pdev.
+ */
+ ret = ath11k_mac_wait_tx_complete(ab->pdevs[0].ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to wait tx complete: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_rx_pktlog_stop(ab, false);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath11k_ce_stop_shadow_timers(ab);
+ ath11k_dp_stop_shadow_timers(ab);
+
+ /* PM framework skips suspend_late/resume_early callbacks
+ * if other devices report errors in their suspend callbacks.
+ * However ath11k_core_resume() would still be called because
+ * here we return success thus kernel put us on dpm_suspended_list.
+ * Since we won't go through a power down/up cycle, there is
+ * no chance to call complete(&ab->restart_completed) in
+ * ath11k_core_restart(), making ath11k_core_resume() timeout.
+ * So call it here to avoid this issue. This also works in case
+ * no error happens thus suspend_late/resume_early get called,
+ * because it will be reinitialized in ath11k_core_resume_early().
+ */
+ complete(&ab->restart_completed);
+
+ return 0;
+}
+
+int ath11k_core_suspend(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ if (ab->actual_pm_policy == ATH11K_PM_WOW)
+ return ath11k_core_suspend_wow(ab);
+
+ return ath11k_core_suspend_default(ab);
+}
EXPORT_SYMBOL(ath11k_core_suspend);
-int ath11k_core_resume(struct ath11k_base *ab)
+int ath11k_core_suspend_late(struct ath11k_base *ab)
{
int ret;
- struct ath11k_pdev *pdev;
+
+ ret = ath11k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ if (ab->actual_pm_policy == ATH11K_PM_WOW)
+ return 0;
+
+ ath11k_hif_irq_disable(ab);
+ ath11k_hif_ce_irq_disable(ab);
+
+ ath11k_hif_power_down(ab, true);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_core_suspend_late);
+
+int ath11k_core_resume_early(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ if (ab->actual_pm_policy == ATH11K_PM_WOW)
+ return 0;
+
+ reinit_completion(&ab->restart_completed);
+ ret = ath11k_hif_power_up(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to power up hif during resume: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath11k_core_resume_early);
+
+static int ath11k_core_resume_default(struct ath11k_base *ab)
+{
struct ath11k *ar;
+ long time_left;
+ int ret;
- if (!ab->hw_params.supports_suspend)
- return -EOPNOTSUPP;
+ time_left = wait_for_completion_timeout(&ab->restart_completed,
+ ATH11K_RESET_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath11k_warn(ab, "timeout while waiting for restart complete");
+ return -ETIMEDOUT;
+ }
- /* so far signle_pdev_only chips have supports_suspend as true
- * and only the first pdev is valid.
+ /* So far only single_pdev_only devices can reach here,
+ * so it is valid to handle the first, and the only, pdev.
*/
- pdev = ath11k_core_get_single_pdev(ab);
- ar = pdev->ar;
- if (!ar || ar->state != ATH11K_STATE_OFF)
- return 0;
+ ar = ab->pdevs[0].ar;
+ if (ab->hw_params.current_cc_support &&
+ ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
+ ret = ath11k_reg_set_cc(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to set country code during resume: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = ath11k_dp_rx_pktlog_start(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int ath11k_core_resume_wow(struct ath11k_base *ab)
+{
+ int ret;
ret = ath11k_hif_resume(ab);
if (ret) {
@@ -1072,6 +1247,20 @@ int ath11k_core_resume(struct ath11k_base *ab)
return 0;
}
+
+int ath11k_core_resume(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ if (ab->actual_pm_policy == ATH11K_PM_WOW)
+ return ath11k_core_resume_wow(ab);
+
+ return ath11k_core_resume_default(ab);
+}
EXPORT_SYMBOL(ath11k_core_resume);
static void ath11k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
@@ -1946,6 +2135,20 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
{
int ret;
+ switch (ath11k_crypto_mode) {
+ case ATH11K_CRYPT_MODE_SW:
+ set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+ set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ break;
+ case ATH11K_CRYPT_MODE_HW:
+ clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+ clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ break;
+ default:
+ ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode);
+ return -EINVAL;
+ }
+
ret = ath11k_core_start_firmware(ab, ab->fw_mode);
if (ret) {
ath11k_err(ab, "failed to start firmware: %d\n", ret);
@@ -1964,20 +2167,6 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
goto err_firmware_stop;
}
- switch (ath11k_crypto_mode) {
- case ATH11K_CRYPT_MODE_SW:
- set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
- set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
- break;
- case ATH11K_CRYPT_MODE_HW:
- clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
- clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
- break;
- default:
- ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode);
- return -EINVAL;
- }
-
if (ath11k_frame_mode == ATH11K_HW_TXRX_RAW)
set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
@@ -2050,6 +2239,7 @@ err_hal_srng_deinit:
void ath11k_core_halt(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
+ struct list_head *pos, *n;
lockdep_assert_held(&ar->conf_mutex);
@@ -2065,7 +2255,12 @@ void ath11k_core_halt(struct ath11k *ar)
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
synchronize_rcu();
- INIT_LIST_HEAD(&ar->arvifs);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_safe(pos, n, &ar->arvifs)
+ list_del_init(pos);
+ spin_unlock_bh(&ar->data_lock);
+
idr_init(&ar->txmgmt_idr);
}
@@ -2205,6 +2400,8 @@ static void ath11k_core_restart(struct work_struct *work)
if (!ab->is_reset)
ath11k_core_post_reconfigure_recovery(ab);
+
+ complete(&ab->restart_completed);
}
static void ath11k_core_reset(struct work_struct *work)
@@ -2275,7 +2472,7 @@ static void ath11k_core_reset(struct work_struct *work)
ath11k_hif_irq_disable(ab);
ath11k_hif_ce_irq_disable(ab);
- ath11k_hif_power_down(ab);
+ ath11k_hif_power_down(ab, false);
ath11k_hif_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n");
@@ -2325,10 +2522,62 @@ int ath11k_core_pre_init(struct ath11k_base *ab)
}
EXPORT_SYMBOL(ath11k_core_pre_init);
+static int ath11k_core_pm_notify(struct notifier_block *nb,
+ unsigned long action, void *nouse)
+{
+ struct ath11k_base *ab = container_of(nb, struct ath11k_base,
+ pm_nb);
+
+ switch (action) {
+ case PM_SUSPEND_PREPARE:
+ ab->actual_pm_policy = ab->pm_policy;
+ break;
+ case PM_HIBERNATION_PREPARE:
+ ab->actual_pm_policy = ATH11K_PM_DEFAULT;
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int ath11k_core_pm_notifier_register(struct ath11k_base *ab)
+{
+ ab->pm_nb.notifier_call = ath11k_core_pm_notify;
+ return register_pm_notifier(&ab->pm_nb);
+}
+
+void ath11k_core_pm_notifier_unregister(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = unregister_pm_notifier(&ab->pm_nb);
+ if (ret)
+ /* just warn here, there is nothing can be done in fail case */
+ ath11k_warn(ab, "failed to unregister PM notifier %d\n", ret);
+}
+EXPORT_SYMBOL(ath11k_core_pm_notifier_unregister);
+
int ath11k_core_init(struct ath11k_base *ab)
{
+ const struct dmi_system_id *dmi_id;
int ret;
+ dmi_id = dmi_first_match(ath11k_pm_quirk_table);
+ if (dmi_id)
+ ab->pm_policy = (kernel_ulong_t)dmi_id->driver_data;
+ else
+ ab->pm_policy = ATH11K_PM_DEFAULT;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "pm policy %u\n", ab->pm_policy);
+
+ ret = ath11k_core_pm_notifier_register(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to register PM notifier: %d\n", ret);
+ return ret;
+ }
+
ret = ath11k_core_soc_create(ab);
if (ret) {
ath11k_err(ab, "failed to create soc core: %d\n", ret);
@@ -2348,9 +2597,10 @@ void ath11k_core_deinit(struct ath11k_base *ab)
mutex_unlock(&ab->core_lock);
- ath11k_hif_power_down(ab);
+ ath11k_hif_power_down(ab, false);
ath11k_mac_destroy(ab);
ath11k_core_soc_destroy(ab);
+ ath11k_core_pm_notifier_unregister(ab);
}
EXPORT_SYMBOL(ath11k_core_deinit);
@@ -2401,6 +2651,7 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
init_completion(&ab->wow.wakeup_completed);
+ init_completion(&ab->restart_completed);
ab->dev = dev;
ab->hif.bus = bus;
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 1a3d0de4afde..6b2f207975e3 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -16,6 +16,7 @@
#include <linux/rhashtable.h>
#include <linux/average.h>
#include <linux/firmware.h>
+#include <linux/suspend.h>
#include "qmi.h"
#include "htc.h"
@@ -599,6 +600,8 @@ struct ath11k_fw_stats {
struct list_head pdevs;
struct list_head vdevs;
struct list_head bcn;
+ u32 num_vdev_recvd;
+ u32 num_bcn_recvd;
};
struct ath11k_dbg_htt_stats {
@@ -783,7 +786,7 @@ struct ath11k {
u8 alpha2[REG_ALPHA2_LEN + 1];
struct ath11k_fw_stats fw_stats;
struct completion fw_stats_complete;
- bool fw_stats_done;
+ struct completion fw_stats_done;
/* protected by conf_mutex */
bool ps_state_enable;
@@ -892,6 +895,11 @@ struct ath11k_msi_config {
u16 hw_rev;
};
+enum ath11k_pm_policy {
+ ATH11K_PM_DEFAULT,
+ ATH11K_PM_WOW,
+};
+
/* Master structure to hold the hw data which may be used in core module */
struct ath11k_base {
enum ath11k_hw_rev hw_rev;
@@ -1050,6 +1058,8 @@ struct ath11k_base {
DECLARE_BITMAP(fw_features, ATH11K_FW_FEATURE_COUNT);
} fw;
+ struct completion restart_completed;
+
#ifdef CONFIG_NL80211_TESTMODE
struct {
u32 data_pos;
@@ -1058,6 +1068,10 @@ struct ath11k_base {
} testmode;
#endif
+ enum ath11k_pm_policy pm_policy;
+ enum ath11k_pm_policy actual_pm_policy;
+ struct notifier_block pm_nb;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -1249,8 +1263,10 @@ void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
int ath11k_core_check_dt(struct ath11k_base *ath11k);
int ath11k_core_check_smbios(struct ath11k_base *ab);
void ath11k_core_halt(struct ath11k *ar);
+int ath11k_core_resume_early(struct ath11k_base *ab);
int ath11k_core_resume(struct ath11k_base *ab);
int ath11k_core_suspend(struct ath11k_base *ab);
+int ath11k_core_suspend_late(struct ath11k_base *ab);
void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab);
bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab);
@@ -1322,4 +1338,6 @@ static inline const char *ath11k_bus_str(enum ath11k_bus bus)
return "unknown";
}
+void ath11k_core_pm_notifier_unregister(struct ath11k_base *ab);
+
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index bf192529e3fe..5d46f8e4c231 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -93,57 +93,14 @@ void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
spin_unlock_bh(&dbr_data->lock);
}
-static void ath11k_debugfs_fw_stats_reset(struct ath11k *ar)
-{
- spin_lock_bh(&ar->data_lock);
- ar->fw_stats_done = false;
- ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
- ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
- spin_unlock_bh(&ar->data_lock);
-}
-
void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats)
{
struct ath11k_base *ab = ar->ab;
- struct ath11k_pdev *pdev;
- bool is_end;
- static unsigned int num_vdev, num_bcn;
- size_t total_vdevs_started = 0;
- int i;
-
- /* WMI_REQUEST_PDEV_STAT request has been already processed */
-
- if (stats->stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
- ar->fw_stats_done = true;
- return;
- }
-
- if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
- if (list_empty(&stats->vdevs)) {
- ath11k_warn(ab, "empty vdev stats");
- return;
- }
- /* FW sends all the active VDEV stats irrespective of PDEV,
- * hence limit until the count of all VDEVs started
- */
- for (i = 0; i < ab->num_radios; i++) {
- pdev = rcu_dereference(ab->pdevs_active[i]);
- if (pdev && pdev->ar)
- total_vdevs_started += ar->num_started_vdevs;
- }
-
- is_end = ((++num_vdev) == total_vdevs_started);
-
- list_splice_tail_init(&stats->vdevs,
- &ar->fw_stats.vdevs);
-
- if (is_end) {
- ar->fw_stats_done = true;
- num_vdev = 0;
- }
- return;
- }
+ bool is_end = true;
+ /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_RSSI_PER_CHAIN_STAT and
+ * WMI_REQUEST_VDEV_STAT requests have been already processed.
+ */
if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
if (list_empty(&stats->bcn)) {
ath11k_warn(ab, "empty bcn stats");
@@ -152,97 +109,18 @@ void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *
/* Mark end until we reached the count of all started VDEVs
* within the PDEV
*/
- is_end = ((++num_bcn) == ar->num_started_vdevs);
+ if (ar->num_started_vdevs)
+ is_end = ((++ar->fw_stats.num_bcn_recvd) ==
+ ar->num_started_vdevs);
list_splice_tail_init(&stats->bcn,
&ar->fw_stats.bcn);
- if (is_end) {
- ar->fw_stats_done = true;
- num_bcn = 0;
- }
+ if (is_end)
+ complete(&ar->fw_stats_done);
}
}
-static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
- struct stats_request_params *req_param)
-{
- struct ath11k_base *ab = ar->ab;
- unsigned long timeout, time_left;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- /* FW stats can get split when exceeding the stats data buffer limit.
- * In that case, since there is no end marking for the back-to-back
- * received 'update stats' event, we keep a 3 seconds timeout in case,
- * fw_stats_done is not marked yet
- */
- timeout = jiffies + secs_to_jiffies(3);
-
- ath11k_debugfs_fw_stats_reset(ar);
-
- reinit_completion(&ar->fw_stats_complete);
-
- ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
-
- if (ret) {
- ath11k_warn(ab, "could not request fw stats (%d)\n",
- ret);
- return ret;
- }
-
- time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
-
- if (!time_left)
- return -ETIMEDOUT;
-
- for (;;) {
- if (time_after(jiffies, timeout))
- break;
-
- spin_lock_bh(&ar->data_lock);
- if (ar->fw_stats_done) {
- spin_unlock_bh(&ar->data_lock);
- break;
- }
- spin_unlock_bh(&ar->data_lock);
- }
- return 0;
-}
-
-int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
- u32 vdev_id, u32 stats_id)
-{
- struct ath11k_base *ab = ar->ab;
- struct stats_request_params req_param;
- int ret;
-
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH11K_STATE_ON) {
- ret = -ENETDOWN;
- goto err_unlock;
- }
-
- req_param.pdev_id = pdev_id;
- req_param.vdev_id = vdev_id;
- req_param.stats_id = stats_id;
-
- ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
- if (ret)
- ath11k_warn(ab, "failed to request fw stats: %d\n", ret);
-
- ath11k_dbg(ab, ATH11K_DBG_WMI,
- "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n",
- pdev_id, vdev_id, stats_id);
-
-err_unlock:
- mutex_unlock(&ar->conf_mutex);
-
- return ret;
-}
-
static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
{
struct ath11k *ar = inode->i_private;
@@ -268,7 +146,7 @@ static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
req_param.vdev_id = 0;
req_param.stats_id = WMI_REQUEST_PDEV_STAT;
- ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
if (ret) {
ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
goto err_free;
@@ -339,7 +217,7 @@ static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
req_param.vdev_id = 0;
req_param.stats_id = WMI_REQUEST_VDEV_STAT;
- ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
if (ret) {
ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
goto err_free;
@@ -415,7 +293,7 @@ static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
continue;
req_param.vdev_id = arvif->vdev_id;
- ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
if (ret) {
ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
goto err_free;
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
index a39e458637b0..ed7fec177588 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_DEBUGFS_H_
@@ -273,8 +273,6 @@ void ath11k_debugfs_unregister(struct ath11k *ar);
void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats);
void ath11k_debugfs_fw_stats_init(struct ath11k *ar);
-int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
- u32 vdev_id, u32 stats_id);
static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar)
{
@@ -381,12 +379,6 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
return 0;
}
-static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
- u32 pdev_id, u32 vdev_id, u32 stats_id)
-{
- return 0;
-}
-
static inline void
ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 3a544e5fefca..bf3928ada995 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -1117,8 +1117,9 @@ fail_link_desc_cleanup:
static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
{
- struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
- t, timer);
+ struct ath11k_hp_update_timer *update_timer = timer_container_of(update_timer,
+ t,
+ timer);
struct ath11k_base *ab = update_timer->ab;
struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id];
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 218ab41c0f3c..9230a965f6f0 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -308,7 +308,7 @@ static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
static void ath11k_dp_service_mon_ring(struct timer_list *t)
{
- struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
+ struct ath11k_base *ab = timer_container_of(ab, t, mon_reap_timer);
int i;
for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++)
@@ -2637,7 +2637,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
struct ath11k *ar;
struct hal_reo_dest_ring *desc;
enum hal_reo_dest_ring_push_reason push_reason;
- u32 cookie;
+ u32 cookie, info0, rx_msdu_info0, rx_mpdu_info0;
int i;
for (i = 0; i < MAX_RADIOS; i++)
@@ -2650,11 +2650,14 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
try_again:
ath11k_hal_srng_access_begin(ab, srng);
+ /* Make sure descriptor is read after the head pointer. */
+ dma_rmb();
+
while (likely(desc =
(struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
srng))) {
cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
- desc->buf_addr_info.info1);
+ READ_ONCE(desc->buf_addr_info.info1));
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
cookie);
mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
@@ -2683,8 +2686,9 @@ try_again:
num_buffs_reaped[mac_id]++;
+ info0 = READ_ONCE(desc->info0);
push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
- desc->info0);
+ info0);
if (unlikely(push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
dev_kfree_skb_any(msdu);
@@ -2692,18 +2696,21 @@ try_again:
continue;
}
- rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
+ rx_msdu_info0 = READ_ONCE(desc->rx_msdu_info.info0);
+ rx_mpdu_info0 = READ_ONCE(desc->rx_mpdu_info.info0);
+
+ rxcb->is_first_msdu = !!(rx_msdu_info0 &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
- rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
+ rxcb->is_last_msdu = !!(rx_msdu_info0 &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
- rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
+ rxcb->is_continuation = !!(rx_msdu_info0 &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
- desc->rx_mpdu_info.meta_data);
+ READ_ONCE(desc->rx_mpdu_info.meta_data));
rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
- desc->rx_mpdu_info.info0);
+ rx_mpdu_info0);
rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
- desc->info0);
+ info0);
rxcb->mac_id = mac_id;
__skb_queue_tail(&msdu_list[mac_id], msdu);
@@ -3167,7 +3174,8 @@ move_next:
static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
{
- struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
+ struct dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer,
+ frag_timer);
spin_lock_bh(&rx_tid->ab->base_lock);
if (rx_tid->last_frag_no &&
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index 61f4b6dd5380..8cb1505a5a0c 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -599,7 +599,7 @@ u32 ath11k_hal_ce_dst_status_get_length(void *buf)
struct hal_ce_srng_dst_status_desc *desc = buf;
u32 len;
- len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
+ len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, READ_ONCE(desc->flags));
desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
return len;
@@ -829,7 +829,7 @@ void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
srng->u.src_ring.cached_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
} else {
- srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+ srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
/* Try to prefetch the next descriptor in the ring */
if (srng->flags & HAL_SRNG_FLAGS_CACHED)
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
index 770c39ff99b4..cd9c4b838246 100644
--- a/drivers/net/wireless/ath/ath11k/hif.h
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _HIF_H_
@@ -18,7 +18,7 @@ struct ath11k_hif_ops {
int (*start)(struct ath11k_base *ab);
void (*stop)(struct ath11k_base *ab);
int (*power_up)(struct ath11k_base *ab);
- void (*power_down)(struct ath11k_base *ab);
+ void (*power_down)(struct ath11k_base *ab, bool is_suspend);
int (*suspend)(struct ath11k_base *ab);
int (*resume)(struct ath11k_base *ab);
int (*map_service_to_pipe)(struct ath11k_base *ab, u16 service_id,
@@ -68,12 +68,18 @@ static inline void ath11k_hif_irq_disable(struct ath11k_base *ab)
static inline int ath11k_hif_power_up(struct ath11k_base *ab)
{
+ if (!ab->hif.ops->power_up)
+ return -EOPNOTSUPP;
+
return ab->hif.ops->power_up(ab);
}
-static inline void ath11k_hif_power_down(struct ath11k_base *ab)
+static inline void ath11k_hif_power_down(struct ath11k_base *ab, bool is_suspend)
{
- ab->hif.ops->power_down(ab);
+ if (!ab->hif.ops->power_down)
+ return;
+
+ ab->hif.ops->power_down(ab, is_suspend);
}
static inline int ath11k_hif_suspend(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 97816916abac..13301ca317a5 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1531,8 +1531,14 @@ static int ath11k_mac_set_vif_params(struct ath11k_vif *arvif,
static struct ath11k_vif *ath11k_mac_get_tx_arvif(struct ath11k_vif *arvif)
{
- if (arvif->vif->mbssid_tx_vif)
- return ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
+ struct ieee80211_bss_conf *link_conf, *tx_bss_conf;
+
+ lockdep_assert_wiphy(arvif->ar->hw->wiphy);
+
+ link_conf = &arvif->vif->bss_conf;
+ tx_bss_conf = wiphy_dereference(arvif->ar->hw->wiphy, link_conf->tx_bss_conf);
+ if (tx_bss_conf)
+ return ath11k_vif_to_arvif(tx_bss_conf->vif);
return NULL;
}
@@ -8991,6 +8997,81 @@ static void ath11k_mac_put_chain_rssi(struct station_info *sinfo,
}
}
+static void ath11k_mac_fw_stats_reset(struct ath11k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
+ ar->fw_stats.num_vdev_recvd = 0;
+ ar->fw_stats.num_bcn_recvd = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+int ath11k_mac_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param)
+{
+ struct ath11k_base *ab = ar->ab;
+ unsigned long time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_mac_fw_stats_reset(ar);
+
+ reinit_completion(&ar->fw_stats_complete);
+ reinit_completion(&ar->fw_stats_done);
+
+ ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+
+ if (ret) {
+ ath11k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ /* FW stats can get split when exceeding the stats data buffer limit.
+ * In that case, since there is no end marking for the back-to-back
+ * received 'update stats' event, we keep a 3 seconds timeout in case,
+ * fw_stats_done is not marked yet
+ */
+ time_left = wait_for_completion_timeout(&ar->fw_stats_done, 3 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath11k_mac_get_fw_stats(struct ath11k *ar, u32 pdev_id,
+ u32 vdev_id, u32 stats_id)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON)
+ return -ENETDOWN;
+
+ req_param.pdev_id = pdev_id;
+ req_param.vdev_id = vdev_id;
+ req_param.stats_id = stats_id;
+
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
+ if (ret)
+ ath11k_warn(ab, "failed to request fw stats: %d\n", ret);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n",
+ pdev_id, vdev_id, stats_id);
+
+ return ret;
+}
+
static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -9025,11 +9106,12 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
ath11k_mac_put_chain_rssi(sinfo, arsta, "ppdu", false);
+ mutex_lock(&ar->conf_mutex);
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) &&
arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
ar->ab->hw_params.supports_rssi_stats &&
- !ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0,
- WMI_REQUEST_RSSI_PER_CHAIN_STAT)) {
+ !ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_RSSI_PER_CHAIN_STAT)) {
ath11k_mac_put_chain_rssi(sinfo, arsta, "fw stats", true);
}
@@ -9037,9 +9119,10 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
if (!signal &&
arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
ar->ab->hw_params.supports_rssi_stats &&
- !(ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0,
- WMI_REQUEST_VDEV_STAT)))
+ !(ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_VDEV_STAT)))
signal = arsta->rssi_beacon;
+ mutex_unlock(&ar->conf_mutex);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"sta statistics db2dbm %u rssi comb %d rssi beacon %d\n",
@@ -9374,38 +9457,6 @@ exit:
return ret;
}
-static int ath11k_fw_stats_request(struct ath11k *ar,
- struct stats_request_params *req_param)
-{
- struct ath11k_base *ab = ar->ab;
- unsigned long time_left;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- spin_lock_bh(&ar->data_lock);
- ar->fw_stats_done = false;
- ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
- spin_unlock_bh(&ar->data_lock);
-
- reinit_completion(&ar->fw_stats_complete);
-
- ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
- if (ret) {
- ath11k_warn(ab, "could not request fw stats (%d)\n",
- ret);
- return ret;
- }
-
- time_left = wait_for_completion_timeout(&ar->fw_stats_complete,
- 1 * HZ);
-
- if (!time_left)
- return -ETIMEDOUT;
-
- return 0;
-}
-
static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
unsigned int link_id,
@@ -9413,7 +9464,6 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
- struct stats_request_params req_param = {0};
struct ath11k_fw_stats_pdev *pdev;
int ret;
@@ -9425,9 +9475,6 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
*/
mutex_lock(&ar->conf_mutex);
- if (ar->state != ATH11K_STATE_ON)
- goto err_fallback;
-
/* Firmware doesn't provide Tx power during CAC hence no need to fetch
* the stats.
*/
@@ -9436,10 +9483,8 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
return -EAGAIN;
}
- req_param.pdev_id = ar->pdev->pdev_id;
- req_param.stats_id = WMI_REQUEST_PDEV_STAT;
-
- ret = ath11k_fw_stats_request(ar, &req_param);
+ ret = ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_PDEV_STAT);
if (ret) {
ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
goto err_fallback;
@@ -9966,12 +10011,17 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
struct ath11k_base *ab = ar->ab;
struct ieee80211_iface_combination *combinations;
struct ieee80211_iface_limit *limits;
- int n_limits;
+ int n_limits, n_combos;
bool p2p;
p2p = ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE);
- combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
+ if (ab->hw_params.support_dual_stations)
+ n_combos = 2;
+ else
+ n_combos = 1;
+
+ combinations = kcalloc(n_combos, sizeof(*combinations), GFP_KERNEL);
if (!combinations)
return -ENOMEM;
@@ -9986,7 +10036,9 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
return -ENOMEM;
}
+ limits[0].max = 1;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
+ limits[1].max = 16;
limits[1].types |= BIT(NL80211_IFTYPE_AP);
if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
@@ -9996,25 +10048,24 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
combinations[0].n_limits = n_limits;
combinations[0].beacon_int_infra_match = true;
combinations[0].beacon_int_min_gcd = 100;
+ combinations[0].max_interfaces = 16;
+ combinations[0].num_different_channels = 1;
+ combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160);
if (ab->hw_params.support_dual_stations) {
limits[0].max = 2;
- limits[1].max = 1;
-
- combinations[0].max_interfaces = ab->hw_params.num_vdevs;
- combinations[0].num_different_channels = 2;
- } else {
- limits[0].max = 1;
- limits[1].max = 16;
- combinations[0].max_interfaces = 16;
- combinations[0].num_different_channels = 1;
- combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_80P80) |
- BIT(NL80211_CHAN_WIDTH_160);
+ combinations[1].limits = limits;
+ combinations[1].n_limits = n_limits;
+ combinations[1].beacon_int_infra_match = true;
+ combinations[1].beacon_int_min_gcd = 100;
+ combinations[1].max_interfaces = ab->hw_params.num_vdevs;
+ combinations[1].num_different_channels = 2;
}
if (p2p) {
@@ -10025,7 +10076,7 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
}
ar->hw->wiphy->iface_combinations = combinations;
- ar->hw->wiphy->n_iface_combinations = 1;
+ ar->hw->wiphy->n_iface_combinations = n_combos;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index f5800fbecff8..5e61eea1bb03 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_MAC_H
@@ -179,4 +179,6 @@ int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *ctx);
+int ath11k_mac_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index fc77eac83e95..acd76e9392d3 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
@@ -454,9 +454,17 @@ int ath11k_mhi_start(struct ath11k_pci *ab_pci)
return 0;
}
-void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
+void ath11k_mhi_stop(struct ath11k_pci *ab_pci, bool is_suspend)
{
- mhi_power_down(ab_pci->mhi_ctrl, true);
+ /* During suspend we need to use mhi_power_down_keep_dev()
+ * workaround, otherwise ath11k_core_resume() will timeout
+ * during resume.
+ */
+ if (is_suspend)
+ mhi_power_down_keep_dev(ab_pci->mhi_ctrl, true);
+ else
+ mhi_power_down(ab_pci->mhi_ctrl, true);
+
mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
}
diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h
index 651470091bd5..5c5c2b03c81f 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.h
+++ b/drivers/net/wireless/ath/ath11k/mhi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_MHI_H
#define _ATH11K_MHI_H
@@ -18,7 +18,7 @@
#define MHICTRL_RESET_MASK 0x2
int ath11k_mhi_start(struct ath11k_pci *ar_pci);
-void ath11k_mhi_stop(struct ath11k_pci *ar_pci);
+void ath11k_mhi_stop(struct ath11k_pci *ar_pci, bool is_suspend);
int ath11k_mhi_register(struct ath11k_pci *ar_pci);
void ath11k_mhi_unregister(struct ath11k_pci *ar_pci);
void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab);
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 412f4a134e4a..78444f8ea153 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -821,7 +821,7 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
return 0;
}
-static void ath11k_pci_power_down(struct ath11k_base *ab)
+static void ath11k_pci_power_down(struct ath11k_base *ab, bool is_suspend)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -832,7 +832,7 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
ath11k_pci_msi_disable(ab_pci);
- ath11k_mhi_stop(ab_pci);
+ ath11k_mhi_stop(ab_pci, is_suspend);
clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pci_sw_reset(ab_pci->ab, false);
}
@@ -929,7 +929,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
{
struct ath11k_base *ab;
struct ath11k_pci *ab_pci;
- u32 soc_hw_version_major, soc_hw_version_minor, addr;
+ u32 soc_hw_version_major, soc_hw_version_minor;
int ret;
u32 sub_version;
@@ -955,8 +955,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
* from DT. If memory is reserved from DT for FW, ath11k driver need not
* allocate memory.
*/
- ret = of_property_read_u32(ab->dev->of_node, "memory-region", &addr);
- if (!ret)
+ if (of_property_present(ab->dev->of_node, "memory-region"))
set_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags);
ret = ath11k_pci_claim(ab_pci, pdev);
@@ -1161,9 +1160,10 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath11k_pci_power_down(ab);
+ ath11k_pci_power_down(ab, false);
ath11k_debugfs_soc_destroy(ab);
ath11k_qmi_deinit_service(ab);
+ ath11k_core_pm_notifier_unregister(ab);
goto qmi_fail;
}
@@ -1192,7 +1192,7 @@ static void ath11k_pci_shutdown(struct pci_dev *pdev)
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
- ath11k_pci_power_down(ab);
+ ath11k_pci_power_down(ab, false);
}
static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
@@ -1229,9 +1229,39 @@ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
return ret;
}
-static SIMPLE_DEV_PM_OPS(ath11k_pci_pm_ops,
- ath11k_pci_pm_suspend,
- ath11k_pci_pm_resume);
+static __maybe_unused int ath11k_pci_pm_suspend_late(struct device *dev)
+{
+ struct ath11k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath11k_core_suspend_late(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to late suspend core: %d\n", ret);
+
+ /* Similar to ath11k_pci_pm_suspend(), we return success here
+ * even error happens, to allow system suspend/hibernation survive.
+ */
+ return 0;
+}
+
+static __maybe_unused int ath11k_pci_pm_resume_early(struct device *dev)
+{
+ struct ath11k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath11k_core_resume_early(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to early resume core: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops __maybe_unused ath11k_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend,
+ ath11k_pci_pm_resume)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend_late,
+ ath11k_pci_pm_resume_early)
+};
static struct pci_driver ath11k_pci_driver = {
.name = "ath11k_pci",
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 4f8b08ed1bbc..2782f4723e41 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
@@ -1993,6 +1993,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
chunk->prev_size == chunk->size)
continue;
+ if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "size/type mismatch (current %d %u) (prev %d %u), try later with small size\n",
+ chunk->size, chunk->type,
+ chunk->prev_size, chunk->prev_type);
+ ab->qmi.target_mem_delayed = true;
+ return 0;
+ }
+
/* cannot reuse the existing chunk */
dma_free_coherent(ab->dev, chunk->prev_size,
chunk->vaddr, chunk->paddr);
@@ -2887,7 +2896,7 @@ int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab)
}
/* reset the firmware */
- ath11k_hif_power_down(ab);
+ ath11k_hif_power_down(ab, false);
ath11k_hif_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_QMI, "exit wait for cold boot done\n");
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c
index 9be1cd742339..a9751ea2a0b7 100644
--- a/drivers/net/wireless/ath/ath11k/testmode.c
+++ b/drivers/net/wireless/ath/ath11k/testmode.c
@@ -107,7 +107,7 @@ static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
u32 pdev_id;
ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
- "event wmi cmd_id %d ftm event msg %pK datalen %d\n",
+ "event wmi cmd_id %d ftm event msg %p datalen %d\n",
cmd_id, ftm_msg, length);
ath11k_dbg_dump(ab, ATH11K_DBG_TESTMODE, NULL, "", ftm_msg, length);
pdev_id = DP_HW2SW_MACID(ftm_msg->seg_hdr.pdev_id);
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index d7f852bebf4a..56af2e9634f4 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -8158,6 +8158,11 @@ static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff
static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct ath11k_fw_stats stats = {};
+ size_t total_vdevs_started = 0;
+ struct ath11k_pdev *pdev;
+ bool is_end = true;
+ int i;
+
struct ath11k *ar;
int ret;
@@ -8184,25 +8189,57 @@ static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *sk
spin_lock_bh(&ar->data_lock);
- /* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
+ /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_VDEV_STAT and
+ * WMI_REQUEST_RSSI_PER_CHAIN_STAT can be requested via mac ops or via
* debugfs fw stats. Therefore, processing it separately.
*/
if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
- ar->fw_stats_done = true;
+ complete(&ar->fw_stats_done);
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
+ complete(&ar->fw_stats_done);
goto complete;
}
- /* WMI_REQUEST_VDEV_STAT, WMI_REQUEST_BCN_STAT and WMI_REQUEST_RSSI_PER_CHAIN_STAT
- * are currently requested only via debugfs fw stats. Hence, processing these
- * in debugfs context
+ if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats.vdevs)) {
+ ath11k_warn(ab, "empty vdev stats");
+ goto complete;
+ }
+ /* FW sends all the active VDEV stats irrespective of PDEV,
+ * hence limit until the count of all VDEVs started
+ */
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar)
+ total_vdevs_started += ar->num_started_vdevs;
+ }
+
+ if (total_vdevs_started)
+ is_end = ((++ar->fw_stats.num_vdev_recvd) ==
+ total_vdevs_started);
+
+ list_splice_tail_init(&stats.vdevs,
+ &ar->fw_stats.vdevs);
+
+ if (is_end)
+ complete(&ar->fw_stats_done);
+
+ goto complete;
+ }
+
+ /* WMI_REQUEST_BCN_STAT is currently requested only via debugfs fw stats.
+ * Hence, processing it in debugfs context
*/
ath11k_debugfs_fw_stats_process(ar, &stats);
complete:
complete(&ar->fw_stats_complete);
- rcu_read_unlock();
spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
/* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
* at this point, no need to free the individual list.
diff --git a/drivers/net/wireless/ath/ath12k/Kconfig b/drivers/net/wireless/ath/ath12k/Kconfig
index 52a1bb19e3da..1ea1af1b8f6c 100644
--- a/drivers/net/wireless/ath/ath12k/Kconfig
+++ b/drivers/net/wireless/ath/ath12k/Kconfig
@@ -7,7 +7,7 @@ config ATH12K
select MHI_BUS
select QRTR
select QRTR_MHI
- select PCI_PWRCTL_PWRSEQ if HAVE_PWRCTL
+ select PCI_PWRCTRL_PWRSEQ if HAVE_PWRCTRL
help
Enable support for Qualcomm Technologies Wi-Fi 7 (IEEE
802.11be) family of chipsets, for example WCN7850 and
@@ -15,6 +15,14 @@ config ATH12K
If you choose to build a module, it'll be called ath12k.
+config ATH12K_AHB
+ bool "QTI ath12k AHB support"
+ depends on ATH12K && REMOTEPROC
+ select QCOM_MDT_LOADER
+ select QCOM_SCM
+ help
+ Enable support for Ath12k AHB bus chipsets, example IPQ5332.
+
config ATH12K_DEBUG
bool "ath12k debugging"
depends on ATH12K
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
index 60644cb42c76..d95ee525a6cd 100644
--- a/drivers/net/wireless/ath/ath12k/Makefile
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -23,6 +23,7 @@ ath12k-y += core.o \
fw.o \
p2p.o
+ath12k-$(CONFIG_ATH12K_AHB) += ahb.o
ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
ath12k-$(CONFIG_ACPI) += acpi.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
diff --git a/drivers/net/wireless/ath/ath12k/ahb.c b/drivers/net/wireless/ath/ath12k/ahb.c
new file mode 100644
index 000000000000..8d1a86e420a4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/ahb.c
@@ -0,0 +1,1155 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#include <linux/soc/qcom/smem_state.h>
+#include "ahb.h"
+#include "debug.h"
+#include "hif.h"
+
+static const struct of_device_id ath12k_ahb_of_match[] = {
+ { .compatible = "qcom,ipq5332-wifi",
+ .data = (void *)ATH12K_HW_IPQ5332_HW10,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ath12k_ahb_of_match);
+
+#define ATH12K_IRQ_CE0_OFFSET 4
+#define ATH12K_MAX_UPDS 1
+#define ATH12K_UPD_IRQ_WRD_LEN 18
+static const char ath12k_userpd_irq[][9] = {"spawn",
+ "ready",
+ "stop-ack"};
+
+static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
+ "misc-pulse1",
+ "misc-latch",
+ "sw-exception",
+ "watchdog",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring4",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+enum ext_irq_num {
+ host2wbm_desc_feed = 16,
+ host2reo_re_injection,
+ host2reo_command,
+ host2rxdma_monitor_ring3,
+ host2rxdma_monitor_ring2,
+ host2rxdma_monitor_ring1,
+ reo2host_exception,
+ wbm2host_rx_release,
+ reo2host_status,
+ reo2host_destination_ring4,
+ reo2host_destination_ring3,
+ reo2host_destination_ring2,
+ reo2host_destination_ring1,
+ rxdma2host_monitor_destination_mac3,
+ rxdma2host_monitor_destination_mac2,
+ rxdma2host_monitor_destination_mac1,
+ ppdu_end_interrupts_mac3,
+ ppdu_end_interrupts_mac2,
+ ppdu_end_interrupts_mac1,
+ rxdma2host_monitor_status_ring_mac3,
+ rxdma2host_monitor_status_ring_mac2,
+ rxdma2host_monitor_status_ring_mac1,
+ host2rxdma_host_buf_ring_mac3,
+ host2rxdma_host_buf_ring_mac2,
+ host2rxdma_host_buf_ring_mac1,
+ rxdma2host_destination_ring_mac3,
+ rxdma2host_destination_ring_mac2,
+ rxdma2host_destination_ring_mac1,
+ host2tcl_input_ring4,
+ host2tcl_input_ring3,
+ host2tcl_input_ring2,
+ host2tcl_input_ring1,
+ wbm2host_tx_completions_ring4,
+ wbm2host_tx_completions_ring3,
+ wbm2host_tx_completions_ring2,
+ wbm2host_tx_completions_ring1,
+ tcl2host_status_ring,
+};
+
+static u32 ath12k_ahb_read32(struct ath12k_base *ab, u32 offset)
+{
+ if (ab->ce_remap && offset < HAL_SEQ_WCSS_CMEM_OFFSET)
+ return ioread32(ab->mem_ce + offset);
+ return ioread32(ab->mem + offset);
+}
+
+static void ath12k_ahb_write32(struct ath12k_base *ab, u32 offset,
+ u32 value)
+{
+ if (ab->ce_remap && offset < HAL_SEQ_WCSS_CMEM_OFFSET)
+ iowrite32(value, ab->mem_ce + offset);
+ else
+ iowrite32(value, ab->mem + offset);
+}
+
+static void ath12k_ahb_cancel_workqueue(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ cancel_work_sync(&ce_pipe->intr_wq);
+ }
+}
+
+static void ath12k_ahb_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath12k_ahb_ext_irq_disable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ ath12k_ahb_ext_grp_disable(irq_grp);
+ if (irq_grp->napi_enabled) {
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ irq_grp->napi_enabled = false;
+ }
+ }
+}
+
+static void ath12k_ahb_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void ath12k_ahb_setbit32(struct ath12k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath12k_ahb_read32(ab, offset);
+ ath12k_ahb_write32(ab, offset, val | BIT(bit));
+}
+
+static void ath12k_ahb_clearbit32(struct ath12k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath12k_ahb_read32(ab, offset);
+ ath12k_ahb_write32(ab, offset, val & ~BIT(bit));
+}
+
+static void ath12k_ahb_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
+{
+ const struct ce_attr *ce_attr;
+ const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr;
+ u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
+
+ ie1_reg_addr = ce_ie_addr->ie1_reg_addr;
+ ie2_reg_addr = ce_ie_addr->ie2_reg_addr;
+ ie3_reg_addr = ce_ie_addr->ie3_reg_addr;
+
+ ce_attr = &ab->hw_params->host_ce_config[ce_id];
+ if (ce_attr->src_nentries)
+ ath12k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
+
+ if (ce_attr->dest_nentries) {
+ ath12k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
+ ath12k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ ie3_reg_addr);
+ }
+}
+
+static void ath12k_ahb_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
+{
+ const struct ce_attr *ce_attr;
+ const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr;
+ u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
+
+ ie1_reg_addr = ce_ie_addr->ie1_reg_addr;
+ ie2_reg_addr = ce_ie_addr->ie2_reg_addr;
+ ie3_reg_addr = ce_ie_addr->ie3_reg_addr;
+
+ ce_attr = &ab->hw_params->host_ce_config[ce_id];
+ if (ce_attr->src_nentries)
+ ath12k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
+
+ if (ce_attr->dest_nentries) {
+ ath12k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
+ ath12k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ ie3_reg_addr);
+ }
+}
+
+static void ath12k_ahb_sync_ce_irqs(struct ath12k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH12K_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath12k_ahb_sync_ext_irqs(struct ath12k_base *ab)
+{
+ int i, j;
+ int irq_idx;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+static void ath12k_ahb_ce_irqs_enable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath12k_ahb_ce_irq_enable(ab, i);
+ }
+}
+
+static void ath12k_ahb_ce_irqs_disable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath12k_ahb_ce_irq_disable(ab, i);
+ }
+}
+
+static int ath12k_ahb_start(struct ath12k_base *ab)
+{
+ ath12k_ahb_ce_irqs_enable(ab);
+ ath12k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+
+static void ath12k_ahb_ext_irq_enable(struct ath12k_base *ab)
+{
+ struct ath12k_ext_irq_grp *irq_grp;
+ int i;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ irq_grp = &ab->ext_irq_grp[i];
+ if (!irq_grp->napi_enabled) {
+ napi_enable(&irq_grp->napi);
+ irq_grp->napi_enabled = true;
+ }
+ ath12k_ahb_ext_grp_enable(irq_grp);
+ }
+}
+
+static void ath12k_ahb_ext_irq_disable(struct ath12k_base *ab)
+{
+ __ath12k_ahb_ext_irq_disable(ab);
+ ath12k_ahb_sync_ext_irqs(ab);
+}
+
+static void ath12k_ahb_stop(struct ath12k_base *ab)
+{
+ if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ ath12k_ahb_ce_irqs_disable(ab);
+ ath12k_ahb_sync_ce_irqs(ab);
+ ath12k_ahb_cancel_workqueue(ab);
+ timer_delete_sync(&ab->rx_replenish_retry);
+ ath12k_ce_cleanup_pipes(ab);
+}
+
+static int ath12k_ahb_power_up(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ char fw_name[ATH12K_USERPD_FW_NAME_LEN];
+ char fw2_name[ATH12K_USERPD_FW_NAME_LEN];
+ struct device *dev = ab->dev;
+ const struct firmware *fw, *fw2;
+ struct reserved_mem *rmem = NULL;
+ unsigned long time_left;
+ phys_addr_t mem_phys;
+ void *mem_region;
+ size_t mem_size;
+ u32 pasid;
+ int ret;
+
+ rmem = ath12k_core_get_reserved_mem(ab, 0);
+ if (!rmem)
+ return -ENODEV;
+
+ mem_phys = rmem->base;
+ mem_size = rmem->size;
+ mem_region = devm_memremap(dev, mem_phys, mem_size, MEMREMAP_WC);
+ if (IS_ERR(mem_region)) {
+ ath12k_err(ab, "unable to map memory region: %pa+%pa\n",
+ &rmem->base, &rmem->size);
+ return PTR_ERR(mem_region);
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "%s/%s/%s%d%s", ATH12K_FW_DIR,
+ ab->hw_params->fw.dir, ATH12K_AHB_FW_PREFIX, ab_ahb->userpd_id,
+ ATH12K_AHB_FW_SUFFIX);
+
+ ret = request_firmware(&fw, fw_name, dev);
+ if (ret < 0) {
+ ath12k_err(ab, "request_firmware failed\n");
+ return ret;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "Booting fw image %s, size %zd\n", fw_name,
+ fw->size);
+
+ if (!fw->size) {
+ ath12k_err(ab, "Invalid firmware size\n");
+ ret = -EINVAL;
+ goto err_fw;
+ }
+
+ pasid = (u32_encode_bits(ab_ahb->userpd_id, ATH12K_USERPD_ID_MASK)) |
+ ATH12K_AHB_UPD_SWID;
+
+ /* Load FW image to a reserved memory location */
+ ret = qcom_mdt_load(dev, fw, fw_name, pasid, mem_region, mem_phys, mem_size,
+ &mem_phys);
+ if (ret) {
+ ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
+ goto err_fw;
+ }
+
+ snprintf(fw2_name, sizeof(fw2_name), "%s/%s/%s", ATH12K_FW_DIR,
+ ab->hw_params->fw.dir, ATH12K_AHB_FW2);
+
+ ret = request_firmware(&fw2, fw2_name, dev);
+ if (ret < 0) {
+ ath12k_err(ab, "request_firmware failed\n");
+ goto err_fw;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "Booting fw image %s, size %zd\n", fw2_name,
+ fw2->size);
+
+ if (!fw2->size) {
+ ath12k_err(ab, "Invalid firmware size\n");
+ ret = -EINVAL;
+ goto err_fw2;
+ }
+
+ ret = qcom_mdt_load_no_init(dev, fw2, fw2_name, pasid, mem_region, mem_phys,
+ mem_size, &mem_phys);
+ if (ret) {
+ ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
+ goto err_fw2;
+ }
+
+ /* Authenticate FW image using peripheral ID */
+ ret = qcom_scm_pas_auth_and_reset(pasid);
+ if (ret) {
+ ath12k_err(ab, "failed to boot the remote processor %d\n", ret);
+ goto err_fw2;
+ }
+
+ /* Instruct Q6 to spawn userPD thread */
+ ret = qcom_smem_state_update_bits(ab_ahb->spawn_state, BIT(ab_ahb->spawn_bit),
+ BIT(ab_ahb->spawn_bit));
+ if (ret) {
+ ath12k_err(ab, "Failed to update spawn state %d\n", ret);
+ goto err_fw2;
+ }
+
+ time_left = wait_for_completion_timeout(&ab_ahb->userpd_spawned,
+ ATH12K_USERPD_SPAWN_TIMEOUT);
+ if (!time_left) {
+ ath12k_err(ab, "UserPD spawn wait timed out\n");
+ ret = -ETIMEDOUT;
+ goto err_fw2;
+ }
+
+ time_left = wait_for_completion_timeout(&ab_ahb->userpd_ready,
+ ATH12K_USERPD_READY_TIMEOUT);
+ if (!time_left) {
+ ath12k_err(ab, "UserPD ready wait timed out\n");
+ ret = -ETIMEDOUT;
+ goto err_fw2;
+ }
+
+ qcom_smem_state_update_bits(ab_ahb->spawn_state, BIT(ab_ahb->spawn_bit), 0);
+
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "UserPD%d is now UP\n", ab_ahb->userpd_id);
+
+err_fw2:
+ release_firmware(fw2);
+err_fw:
+ release_firmware(fw);
+ return ret;
+}
+
+static void ath12k_ahb_power_down(struct ath12k_base *ab, bool is_suspend)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ unsigned long time_left;
+ u32 pasid;
+ int ret;
+
+ qcom_smem_state_update_bits(ab_ahb->stop_state, BIT(ab_ahb->stop_bit),
+ BIT(ab_ahb->stop_bit));
+
+ time_left = wait_for_completion_timeout(&ab_ahb->userpd_stopped,
+ ATH12K_USERPD_STOP_TIMEOUT);
+ if (!time_left) {
+ ath12k_err(ab, "UserPD stop wait timed out\n");
+ return;
+ }
+
+ qcom_smem_state_update_bits(ab_ahb->stop_state, BIT(ab_ahb->stop_bit), 0);
+
+ pasid = (u32_encode_bits(ab_ahb->userpd_id, ATH12K_USERPD_ID_MASK)) |
+ ATH12K_AHB_UPD_SWID;
+ /* Release the firmware */
+ ret = qcom_scm_pas_shutdown(pasid);
+ if (ret)
+ ath12k_err(ab, "scm pas shutdown failed for userPD%d: %d\n",
+ ab_ahb->userpd_id, ret);
+}
+
+static void ath12k_ahb_init_qmi_ce_config(struct ath12k_base *ab)
+{
+ struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+
+ cfg->tgt_ce_len = ab->hw_params->target_ce_count;
+ cfg->tgt_ce = ab->hw_params->target_ce_config;
+ cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
+ cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
+ ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
+}
+
+static void ath12k_ahb_ce_workqueue(struct work_struct *work)
+{
+ struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq);
+
+ ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ ath12k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+}
+
+static irqreturn_t ath12k_ahb_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath12k_ce_pipe *ce_pipe = arg;
+
+ /* last interrupt received for this CE */
+ ce_pipe->timestamp = jiffies;
+
+ ath12k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
+
+ queue_work(system_bh_wq, &ce_pipe->intr_wq);
+
+ return IRQ_HANDLED;
+}
+
+static int ath12k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath12k_ext_irq_grp,
+ napi);
+ struct ath12k_base *ab = irq_grp->ab;
+ int work_done;
+
+ work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ ath12k_ahb_ext_grp_enable(irq_grp);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath12k_ahb_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath12k_ext_irq_grp *irq_grp = arg;
+
+ /* last interrupt received for this group */
+ irq_grp->timestamp = jiffies;
+
+ ath12k_ahb_ext_grp_disable(irq_grp);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath12k_ahb_config_ext_irq(struct ath12k_base *ab)
+{
+ const struct ath12k_hw_ring_mask *ring_mask;
+ struct ath12k_ext_irq_grp *irq_grp;
+ const struct hal_ops *hal_ops;
+ int i, j, irq, irq_idx, ret;
+ u32 num_irq;
+
+ ring_mask = ab->hw_params->ring_mask;
+ hal_ops = ab->hw_params->hal_ops;
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ irq_grp = &ab->ext_irq_grp[i];
+ num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+
+ irq_grp->napi_ndev = alloc_netdev_dummy(0);
+ if (!irq_grp->napi_ndev)
+ return -ENOMEM;
+
+ netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
+ ath12k_ahb_ext_grp_napi_poll);
+
+ for (j = 0; j < ATH12K_EXT_IRQ_NUM_MAX; j++) {
+ /* For TX ring, ensure that the ring mask and the
+ * tcl_to_wbm_rbm_map point to the same ring number.
+ */
+ if (ring_mask->tx[i] &
+ BIT(hal_ops->tcl_to_wbm_rbm_map[j].wbm_ring_num)) {
+ irq_grp->irqs[num_irq++] =
+ wbm2host_tx_completions_ring1 - j;
+ }
+
+ if (ring_mask->rx[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ reo2host_destination_ring1 - j;
+ }
+
+ if (ring_mask->rx_err[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_exception;
+
+ if (ring_mask->rx_wbm_rel[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = wbm2host_rx_release;
+
+ if (ring_mask->reo_status[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_status;
+
+ if (ring_mask->rx_mon_dest[i] & BIT(j))
+ irq_grp->irqs[num_irq++] =
+ rxdma2host_monitor_destination_mac1;
+ }
+
+ irq_grp->num_irq = num_irq;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+
+ irq = platform_get_irq_byname(ab->pdev,
+ irq_name[irq_idx]);
+ ab->irq_num[irq_idx] = irq;
+ irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
+ ret = devm_request_irq(ab->dev, irq,
+ ath12k_ahb_ext_interrupt_handler,
+ IRQF_TRIGGER_RISING,
+ irq_name[irq_idx], irq_grp);
+ if (ret)
+ ath12k_warn(ab, "failed request_irq for %d\n", irq);
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_ahb_config_irq(struct ath12k_base *ab)
+{
+ int irq, irq_idx, i;
+ int ret;
+
+ /* Configure CE irqs */
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH12K_IRQ_CE0_OFFSET + i;
+
+ INIT_WORK(&ce_pipe->intr_wq, ath12k_ahb_ce_workqueue);
+ irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
+ ret = devm_request_irq(ab->dev, irq, ath12k_ahb_ce_interrupt_handler,
+ IRQF_TRIGGER_RISING, irq_name[irq_idx],
+ ce_pipe);
+ if (ret)
+ return ret;
+
+ ab->irq_num[irq_idx] = irq;
+ }
+
+ /* Configure external interrupts */
+ ret = ath12k_ahb_config_ext_irq(ab);
+
+ return ret;
+}
+
+static int ath12k_ahb_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ u32 pipedir;
+ int i;
+
+ for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
+ entry = &ab->hw_params->svc_to_ce_map[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ pipedir = __le32_to_cpu(entry->pipedir);
+ if (pipedir == PIPEDIR_IN || pipedir == PIPEDIR_INOUT) {
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ }
+
+ if (pipedir == PIPEDIR_OUT || pipedir == PIPEDIR_INOUT) {
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+
+static const struct ath12k_hif_ops ath12k_ahb_hif_ops_ipq5332 = {
+ .start = ath12k_ahb_start,
+ .stop = ath12k_ahb_stop,
+ .read32 = ath12k_ahb_read32,
+ .write32 = ath12k_ahb_write32,
+ .irq_enable = ath12k_ahb_ext_irq_enable,
+ .irq_disable = ath12k_ahb_ext_irq_disable,
+ .map_service_to_pipe = ath12k_ahb_map_service_to_pipe,
+ .power_up = ath12k_ahb_power_up,
+ .power_down = ath12k_ahb_power_down,
+};
+
+static irqreturn_t ath12k_userpd_irq_handler(int irq, void *data)
+{
+ struct ath12k_base *ab = data;
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_SPAWN_IRQ]) {
+ complete(&ab_ahb->userpd_spawned);
+ } else if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_READY_IRQ]) {
+ complete(&ab_ahb->userpd_ready);
+ } else if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_STOP_ACK_IRQ]) {
+ complete(&ab_ahb->userpd_stopped);
+ } else {
+ ath12k_err(ab, "Invalid userpd interrupt\n");
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ath12k_ahb_config_rproc_irq(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ int i, ret;
+ char *upd_irq_name;
+
+ for (i = 0; i < ATH12K_USERPD_MAX_IRQ; i++) {
+ ab_ahb->userpd_irq_num[i] = platform_get_irq_byname(ab->pdev,
+ ath12k_userpd_irq[i]);
+ if (ab_ahb->userpd_irq_num[i] < 0)
+ return ab_ahb->userpd_irq_num[i];
+
+ upd_irq_name = devm_kzalloc(&ab->pdev->dev, ATH12K_UPD_IRQ_WRD_LEN,
+ GFP_KERNEL);
+ if (!upd_irq_name)
+ return -ENOMEM;
+
+ scnprintf(upd_irq_name, ATH12K_UPD_IRQ_WRD_LEN, "UserPD%u-%s",
+ ab_ahb->userpd_id, ath12k_userpd_irq[i]);
+ ret = devm_request_threaded_irq(&ab->pdev->dev, ab_ahb->userpd_irq_num[i],
+ NULL, ath12k_userpd_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ upd_irq_name, ab);
+ if (ret)
+ return dev_err_probe(&ab->pdev->dev, ret,
+ "Request %s irq failed: %d\n",
+ ath12k_userpd_irq[i], ret);
+ }
+
+ ab_ahb->spawn_state = devm_qcom_smem_state_get(&ab->pdev->dev, "spawn",
+ &ab_ahb->spawn_bit);
+ if (IS_ERR(ab_ahb->spawn_state))
+ return dev_err_probe(&ab->pdev->dev, PTR_ERR(ab_ahb->spawn_state),
+ "Failed to acquire spawn state\n");
+
+ ab_ahb->stop_state = devm_qcom_smem_state_get(&ab->pdev->dev, "stop",
+ &ab_ahb->stop_bit);
+ if (IS_ERR(ab_ahb->stop_state))
+ return dev_err_probe(&ab->pdev->dev, PTR_ERR(ab_ahb->stop_state),
+ "Failed to acquire stop state\n");
+
+ init_completion(&ab_ahb->userpd_spawned);
+ init_completion(&ab_ahb->userpd_ready);
+ init_completion(&ab_ahb->userpd_stopped);
+ return 0;
+}
+
+static int ath12k_ahb_root_pd_state_notifier(struct notifier_block *nb,
+ const unsigned long event, void *data)
+{
+ struct ath12k_ahb *ab_ahb = container_of(nb, struct ath12k_ahb, root_pd_nb);
+ struct ath12k_base *ab = ab_ahb->ab;
+
+ if (event == ATH12K_RPROC_AFTER_POWERUP) {
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "Root PD is UP\n");
+ complete(&ab_ahb->rootpd_ready);
+ }
+
+ return 0;
+}
+
+static int ath12k_ahb_register_rproc_notifier(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ ab_ahb->root_pd_nb.notifier_call = ath12k_ahb_root_pd_state_notifier;
+ init_completion(&ab_ahb->rootpd_ready);
+
+ ab_ahb->root_pd_notifier = qcom_register_ssr_notifier(ab_ahb->tgt_rproc->name,
+ &ab_ahb->root_pd_nb);
+ if (IS_ERR(ab_ahb->root_pd_notifier))
+ return PTR_ERR(ab_ahb->root_pd_notifier);
+
+ return 0;
+}
+
+static void ath12k_ahb_unregister_rproc_notifier(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ if (!ab_ahb->root_pd_notifier) {
+ ath12k_err(ab, "Rproc notifier not registered\n");
+ return;
+ }
+
+ qcom_unregister_ssr_notifier(ab_ahb->root_pd_notifier,
+ &ab_ahb->root_pd_nb);
+ ab_ahb->root_pd_notifier = NULL;
+}
+
+static int ath12k_ahb_get_rproc(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ struct device *dev = ab->dev;
+ struct device_node *np;
+ struct rproc *prproc;
+
+ np = of_parse_phandle(dev->of_node, "qcom,rproc", 0);
+ if (!np) {
+ ath12k_err(ab, "failed to get q6_rproc handle\n");
+ return -ENOENT;
+ }
+
+ prproc = rproc_get_by_phandle(np->phandle);
+ of_node_put(np);
+ if (!prproc)
+ return dev_err_probe(&ab->pdev->dev, -EPROBE_DEFER,
+ "failed to get rproc\n");
+
+ ab_ahb->tgt_rproc = prproc;
+
+ return 0;
+}
+
+static int ath12k_ahb_boot_root_pd(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ unsigned long time_left;
+ int ret;
+
+ ret = rproc_boot(ab_ahb->tgt_rproc);
+ if (ret < 0) {
+ ath12k_err(ab, "RootPD boot failed\n");
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ab_ahb->rootpd_ready,
+ ATH12K_ROOTPD_READY_TIMEOUT);
+ if (!time_left) {
+ ath12k_err(ab, "RootPD ready wait timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath12k_ahb_configure_rproc(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ int ret;
+
+ ret = ath12k_ahb_get_rproc(ab);
+ if (ret < 0)
+ return ret;
+
+ ret = ath12k_ahb_register_rproc_notifier(ab);
+ if (ret < 0) {
+ ret = dev_err_probe(&ab->pdev->dev, ret,
+ "failed to register rproc notifier\n");
+ goto err_put_rproc;
+ }
+
+ if (ab_ahb->tgt_rproc->state != RPROC_RUNNING) {
+ ret = ath12k_ahb_boot_root_pd(ab);
+ if (ret < 0) {
+ ath12k_err(ab, "failed to boot the remote processor Q6\n");
+ goto err_unreg_notifier;
+ }
+ }
+
+ return ath12k_ahb_config_rproc_irq(ab);
+
+err_unreg_notifier:
+ ath12k_ahb_unregister_rproc_notifier(ab);
+
+err_put_rproc:
+ rproc_put(ab_ahb->tgt_rproc);
+ return ret;
+}
+
+static void ath12k_ahb_deconfigure_rproc(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ ath12k_ahb_unregister_rproc_notifier(ab);
+ rproc_put(ab_ahb->tgt_rproc);
+}
+
+static int ath12k_ahb_resource_init(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ struct platform_device *pdev = ab->pdev;
+ struct resource *mem_res;
+ int ret;
+
+ ab->mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
+ if (IS_ERR(ab->mem)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(ab->mem), "ioremap error\n");
+ goto out;
+ }
+
+ ab->mem_len = resource_size(mem_res);
+
+ if (ab->hw_params->ce_remap) {
+ const struct ce_remap *ce_remap = ab->hw_params->ce_remap;
+ /* CE register space is moved out of WCSS and the space is not
+ * contiguous, hence remapping the CE registers to a new space
+ * for accessing them.
+ */
+ ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
+ if (!ab->mem_ce) {
+ dev_err(&pdev->dev, "ce ioremap error\n");
+ ret = -ENOMEM;
+ goto err_mem_unmap;
+ }
+ ab->ce_remap = true;
+ ab->ce_remap_base_addr = HAL_IPQ5332_CE_WFSS_REG_BASE;
+ }
+
+ ab_ahb->xo_clk = devm_clk_get(ab->dev, "xo");
+ if (IS_ERR(ab_ahb->xo_clk)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(ab_ahb->xo_clk),
+ "failed to get xo clock\n");
+ goto err_mem_ce_unmap;
+ }
+
+ ret = clk_prepare_enable(ab_ahb->xo_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable gcc_xo_clk: %d\n", ret);
+ goto err_clock_deinit;
+ }
+
+ return 0;
+
+err_clock_deinit:
+ devm_clk_put(ab->dev, ab_ahb->xo_clk);
+
+err_mem_ce_unmap:
+ ab_ahb->xo_clk = NULL;
+ if (ab->hw_params->ce_remap)
+ iounmap(ab->mem_ce);
+
+err_mem_unmap:
+ ab->mem_ce = NULL;
+ devm_iounmap(ab->dev, ab->mem);
+
+out:
+ ab->mem = NULL;
+ return ret;
+}
+
+static void ath12k_ahb_resource_deinit(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ if (ab->mem)
+ devm_iounmap(ab->dev, ab->mem);
+
+ if (ab->mem_ce)
+ iounmap(ab->mem_ce);
+
+ ab->mem = NULL;
+ ab->mem_ce = NULL;
+
+ clk_disable_unprepare(ab_ahb->xo_clk);
+ devm_clk_put(ab->dev, ab_ahb->xo_clk);
+ ab_ahb->xo_clk = NULL;
+}
+
+static int ath12k_ahb_probe(struct platform_device *pdev)
+{
+ struct ath12k_base *ab;
+ const struct ath12k_hif_ops *hif_ops;
+ struct ath12k_ahb *ab_ahb;
+ enum ath12k_hw_rev hw_rev;
+ u32 addr, userpd_id;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set 32-bit coherent dma\n");
+ return ret;
+ }
+
+ ab = ath12k_core_alloc(&pdev->dev, sizeof(struct ath12k_ahb),
+ ATH12K_BUS_AHB);
+ if (!ab)
+ return -ENOMEM;
+
+ hw_rev = (enum ath12k_hw_rev)(kernel_ulong_t)of_device_get_match_data(&pdev->dev);
+ switch (hw_rev) {
+ case ATH12K_HW_IPQ5332_HW10:
+ hif_ops = &ath12k_ahb_hif_ops_ipq5332;
+ userpd_id = ATH12K_IPQ5332_USERPD_ID;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto err_core_free;
+ }
+
+ ab->hif.ops = hif_ops;
+ ab->pdev = pdev;
+ ab->hw_rev = hw_rev;
+ platform_set_drvdata(pdev, ab);
+ ab_ahb = ath12k_ab_to_ahb(ab);
+ ab_ahb->ab = ab;
+ ab_ahb->userpd_id = userpd_id;
+
+ /* Set fixed_mem_region to true for platforms that support fixed memory
+ * reservation from DT. If memory is reserved from DT for FW, ath12k driver
+ * need not to allocate memory.
+ */
+ if (!of_property_read_u32(ab->dev->of_node, "memory-region", &addr))
+ set_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags);
+
+ ret = ath12k_core_pre_init(ab);
+ if (ret)
+ goto err_core_free;
+
+ ret = ath12k_ahb_resource_init(ab);
+ if (ret)
+ goto err_core_free;
+
+ ret = ath12k_hal_srng_init(ab);
+ if (ret)
+ goto err_resource_deinit;
+
+ ret = ath12k_ce_alloc_pipes(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret);
+ goto err_hal_srng_deinit;
+ }
+
+ ath12k_ahb_init_qmi_ce_config(ab);
+
+ ret = ath12k_ahb_configure_rproc(ab);
+ if (ret)
+ goto err_ce_free;
+
+ ret = ath12k_ahb_config_irq(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to configure irq: %d\n", ret);
+ goto err_rproc_deconfigure;
+ }
+
+ ret = ath12k_core_init(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to init core: %d\n", ret);
+ goto err_rproc_deconfigure;
+ }
+
+ return 0;
+
+err_rproc_deconfigure:
+ ath12k_ahb_deconfigure_rproc(ab);
+
+err_ce_free:
+ ath12k_ce_free_pipes(ab);
+
+err_hal_srng_deinit:
+ ath12k_hal_srng_deinit(ab);
+
+err_resource_deinit:
+ ath12k_ahb_resource_deinit(ab);
+
+err_core_free:
+ ath12k_core_free(ab);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static void ath12k_ahb_remove_prepare(struct ath12k_base *ab)
+{
+ unsigned long left;
+
+ if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) {
+ left = wait_for_completion_timeout(&ab->driver_recovery,
+ ATH12K_AHB_RECOVERY_TIMEOUT);
+ if (!left)
+ ath12k_warn(ab, "failed to receive recovery response completion\n");
+ }
+
+ set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
+ cancel_work_sync(&ab->restart_work);
+ cancel_work_sync(&ab->qmi.event_work);
+}
+
+static void ath12k_ahb_free_resources(struct ath12k_base *ab)
+{
+ struct platform_device *pdev = ab->pdev;
+
+ ath12k_hal_srng_deinit(ab);
+ ath12k_ce_free_pipes(ab);
+ ath12k_ahb_resource_deinit(ab);
+ ath12k_ahb_deconfigure_rproc(ab);
+ ath12k_core_free(ab);
+ platform_set_drvdata(pdev, NULL);
+}
+
+static void ath12k_ahb_remove(struct platform_device *pdev)
+{
+ struct ath12k_base *ab = platform_get_drvdata(pdev);
+
+ if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath12k_ahb_power_down(ab, false);
+ goto qmi_fail;
+ }
+
+ ath12k_ahb_remove_prepare(ab);
+ ath12k_core_hw_group_cleanup(ab->ag);
+qmi_fail:
+ ath12k_core_deinit(ab);
+ ath12k_ahb_free_resources(ab);
+}
+
+static struct platform_driver ath12k_ahb_driver = {
+ .driver = {
+ .name = "ath12k_ahb",
+ .of_match_table = ath12k_ahb_of_match,
+ },
+ .probe = ath12k_ahb_probe,
+ .remove = ath12k_ahb_remove,
+};
+
+int ath12k_ahb_init(void)
+{
+ return platform_driver_register(&ath12k_ahb_driver);
+}
+
+void ath12k_ahb_exit(void)
+{
+ platform_driver_unregister(&ath12k_ahb_driver);
+}
diff --git a/drivers/net/wireless/ath/ath12k/ahb.h b/drivers/net/wireless/ath/ath12k/ahb.h
new file mode 100644
index 000000000000..d56244b20a6a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/ahb.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH12K_AHB_H
+#define ATH12K_AHB_H
+
+#include <linux/clk.h>
+#include <linux/remoteproc/qcom_rproc.h>
+#include "core.h"
+
+#define ATH12K_AHB_RECOVERY_TIMEOUT (3 * HZ)
+
+#define ATH12K_AHB_SMP2P_SMEM_MSG GENMASK(15, 0)
+#define ATH12K_AHB_SMP2P_SMEM_SEQ_NO GENMASK(31, 16)
+#define ATH12K_AHB_SMP2P_SMEM_VALUE_MASK 0xFFFFFFFF
+#define ATH12K_PCI_CE_WAKE_IRQ 2
+#define ATH12K_PCI_IRQ_CE0_OFFSET 3
+#define ATH12K_ROOTPD_READY_TIMEOUT (5 * HZ)
+#define ATH12K_RPROC_AFTER_POWERUP QCOM_SSR_AFTER_POWERUP
+#define ATH12K_AHB_FW_PREFIX "q6_fw"
+#define ATH12K_AHB_FW_SUFFIX ".mdt"
+#define ATH12K_AHB_FW2 "iu_fw.mdt"
+#define ATH12K_AHB_UPD_SWID 0x12
+#define ATH12K_USERPD_SPAWN_TIMEOUT (5 * HZ)
+#define ATH12K_USERPD_READY_TIMEOUT (10 * HZ)
+#define ATH12K_USERPD_STOP_TIMEOUT (5 * HZ)
+#define ATH12K_USERPD_ID_MASK GENMASK(9, 8)
+#define ATH12K_USERPD_FW_NAME_LEN 35
+
+enum ath12k_ahb_smp2p_msg_id {
+ ATH12K_AHB_POWER_SAVE_ENTER = 1,
+ ATH12K_AHB_POWER_SAVE_EXIT,
+};
+
+enum ath12k_ahb_userpd_irq {
+ ATH12K_USERPD_SPAWN_IRQ,
+ ATH12K_USERPD_READY_IRQ,
+ ATH12K_USERPD_STOP_ACK_IRQ,
+ ATH12K_USERPD_MAX_IRQ,
+};
+
+struct ath12k_base;
+
+struct ath12k_ahb {
+ struct ath12k_base *ab;
+ struct rproc *tgt_rproc;
+ struct clk *xo_clk;
+ struct completion rootpd_ready;
+ struct notifier_block root_pd_nb;
+ void *root_pd_notifier;
+ struct qcom_smem_state *spawn_state;
+ struct qcom_smem_state *stop_state;
+ struct completion userpd_spawned;
+ struct completion userpd_ready;
+ struct completion userpd_stopped;
+ u32 userpd_id;
+ u32 spawn_bit;
+ u32 stop_bit;
+ int userpd_irq_num[ATH12K_USERPD_MAX_IRQ];
+};
+
+static inline struct ath12k_ahb *ath12k_ab_to_ahb(struct ath12k_base *ab)
+{
+ return (struct ath12k_ahb *)ab->drv_priv;
+}
+
+#ifdef CONFIG_ATH12K_AHB
+int ath12k_ahb_init(void);
+void ath12k_ahb_exit(void);
+#else
+static inline int ath12k_ahb_init(void)
+{
+ return 0;
+}
+
+static inline void ath12k_ahb_exit(void) {};
+#endif
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c
index be0d669d31fc..3f3439262cf4 100644
--- a/drivers/net/wireless/ath/ath12k/ce.c
+++ b/drivers/net/wireless/ath/ath12k/ce.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_rx.h"
@@ -219,6 +219,96 @@ const struct ce_attr ath12k_host_ce_config_wcn7850[] = {
};
+const struct ce_attr ath12k_host_ce_config_ipq5332[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+ /* CE3: host->target WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+ /* CE5: target -> host PKTLOG */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_dp_htt_htc_t2h_msg_handler,
+ },
+ /* CE6: Target autonomous HIF_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+ /* CE7: CV Prefetch */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+ /* CE8: Target HIF memcpy (Generic HIF memcypy) */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+ /* CE9: WMI logging/CFR/Spectral/Radar */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ },
+ /* CE10: Unused */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+ /* CE11: Unused */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+};
+
static int ath12k_ce_rx_buf_enqueue_pipe(struct ath12k_ce_pipe *pipe,
struct sk_buff *skb, dma_addr_t paddr)
{
@@ -343,11 +433,10 @@ static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe,
goto err;
}
+ /* Make sure descriptor is read after the head pointer. */
+ dma_rmb();
+
*nbytes = ath12k_hal_ce_dst_status_get_length(desc);
- if (*nbytes == 0) {
- ret = -EIO;
- goto err;
- }
*skb = pipe->dest_ring->skb[sw_index];
pipe->dest_ring->skb[sw_index] = NULL;
@@ -380,8 +469,8 @@ static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
max_nbytes, DMA_FROM_DEVICE);
- if (unlikely(max_nbytes < nbytes)) {
- ath12k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
+ if (unlikely(max_nbytes < nbytes || nbytes == 0)) {
+ ath12k_warn(ab, "unexpected rx length (nbytes %d, max %d)",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;
@@ -779,7 +868,7 @@ void ath12k_ce_rx_post_buf(struct ath12k_base *ab)
void ath12k_ce_rx_replenish_retry(struct timer_list *t)
{
- struct ath12k_base *ab = from_timer(ab, t, rx_replenish_retry);
+ struct ath12k_base *ab = timer_container_of(ab, t, rx_replenish_retry);
ath12k_ce_rx_post_buf(ab);
}
diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h
index 1a14b9fb86b8..57f75899ee03 100644
--- a/drivers/net/wireless/ath/ath12k/ce.h
+++ b/drivers/net/wireless/ath/ath12k/ce.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_CE_H
@@ -39,8 +39,8 @@
#define PIPEDIR_INOUT_H2H 4 /* bidirectional, host to host */
/* CE address/mask */
-#define CE_HOST_IE_ADDRESS 0x00A1803C
-#define CE_HOST_IE_2_ADDRESS 0x00A18040
+#define CE_HOST_IE_ADDRESS 0x75804C
+#define CE_HOST_IE_2_ADDRESS 0x758050
#define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS
#define CE_HOST_IE_3_SHIFT 0xC
@@ -76,6 +76,17 @@ struct ce_pipe_config {
__le32 reserved;
};
+struct ce_ie_addr {
+ u32 ie1_reg_addr;
+ u32 ie2_reg_addr;
+ u32 ie3_reg_addr;
+};
+
+struct ce_remap {
+ u32 base;
+ u32 size;
+};
+
struct ce_attr {
/* CE_ATTR_* values */
unsigned int flags;
@@ -164,6 +175,7 @@ struct ath12k_ce {
extern const struct ce_attr ath12k_host_ce_config_qcn9274[];
extern const struct ce_attr ath12k_host_ce_config_wcn7850[];
+extern const struct ce_attr ath12k_host_ce_config_ipq5332[];
void ath12k_ce_cleanup_pipes(struct ath12k_base *ab);
void ath12k_ce_rx_replenish_retry(struct timer_list *t);
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 0b2dec081c6e..89ae80934b30 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -10,15 +10,18 @@
#include <linux/firmware.h>
#include <linux/of.h>
#include <linux/of_graph.h>
+#include "ahb.h"
#include "core.h"
#include "dp_tx.h"
#include "dp_rx.h"
#include "debug.h"
-#include "hif.h"
-#include "fw.h"
#include "debugfs.h"
+#include "fw.h"
+#include "hif.h"
+#include "pci.h"
#include "wow.h"
+static int ahb_err, pci_err;
unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
@@ -612,9 +615,74 @@ u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
return TARGET_NUM_TIDS(SINGLE);
}
+struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
+ int index)
+{
+ struct device *dev = ab->dev;
+ struct reserved_mem *rmem;
+ struct device_node *node;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", index);
+ if (!node) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to parse memory-region for index %d\n", index);
+ return NULL;
+ }
+
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+ if (!rmem) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "unable to get memory-region for index %d\n", index);
+ return NULL;
+ }
+
+ return rmem;
+}
+
+static inline
+void ath12k_core_to_group_ref_get(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (ab->hw_group_ref) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already attached to group %d\n",
+ ag->id);
+ return;
+ }
+
+ ab->hw_group_ref = true;
+ ag->num_started++;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "core attached to group %d, num_started %d\n",
+ ag->id, ag->num_started);
+}
+
+static inline
+void ath12k_core_to_group_ref_put(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (!ab->hw_group_ref) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already de-attached from group %d\n",
+ ag->id);
+ return;
+ }
+
+ ab->hw_group_ref = false;
+ ag->num_started--;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "core de-attached from group %d, num_started %d\n",
+ ag->id, ag->num_started);
+}
+
static void ath12k_core_stop(struct ath12k_base *ab)
{
- ath12k_core_stopped(ab);
+ ath12k_core_to_group_ref_put(ab);
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath12k_qmi_firmware_stop(ab);
@@ -629,7 +697,7 @@ static void ath12k_core_stop(struct ath12k_base *ab)
/* De-Init of components as needed */
}
-static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
+static void ath12k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
{
struct ath12k_base *ab = data;
const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
@@ -651,6 +719,28 @@ static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
return;
}
+ spin_lock_bh(&ab->base_lock);
+
+ switch (smbios->country_code_flag) {
+ case ATH12K_SMBIOS_CC_ISO:
+ ab->new_alpha2[0] = u16_get_bits(smbios->cc_code >> 8, 0xff);
+ ab->new_alpha2[1] = u16_get_bits(smbios->cc_code, 0xff);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios cc_code %c%c\n",
+ ab->new_alpha2[0], ab->new_alpha2[1]);
+ break;
+ case ATH12K_SMBIOS_CC_WW:
+ ab->new_alpha2[0] = '0';
+ ab->new_alpha2[1] = '0';
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios worldwide regdomain\n");
+ break;
+ default:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot ignore smbios country code setting %d\n",
+ smbios->country_code_flag);
+ break;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
if (!smbios->bdf_enabled) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
return;
@@ -690,7 +780,7 @@ static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
int ath12k_core_check_smbios(struct ath12k_base *ab)
{
ab->qmi.target.bdf_ext[0] = '\0';
- dmi_walk(ath12k_core_check_bdfext, ab);
+ dmi_walk(ath12k_core_check_cc_code_bdfext, ab);
if (ab->qmi.target.bdf_ext[0] == '\0')
return -ENODATA;
@@ -721,6 +811,8 @@ static int ath12k_core_soc_create(struct ath12k_base *ab)
goto err_qmi_deinit;
}
+ ath12k_debugfs_pdev_create(ab);
+
return 0;
err_qmi_deinit:
@@ -851,9 +943,8 @@ static int ath12k_core_start(struct ath12k_base *ab)
ath12k_acpi_set_dsm_func(ab);
- if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
- /* Indicate the core start in the appropriate group */
- ath12k_core_started(ab);
+ /* Indicate the core start in the appropriate group */
+ ath12k_core_to_group_ref_get(ab);
return 0;
@@ -891,6 +982,9 @@ static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
ab = ag->ab[i];
if (!ab)
continue;
+
+ clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
ath12k_core_device_cleanup(ab);
}
@@ -1026,6 +1120,8 @@ core_pdev_create:
mutex_lock(&ab->core_lock);
+ set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
ret = ath12k_core_pdev_create(ab);
if (ret) {
ath12k_err(ab, "failed to create pdev core %d\n", ret);
@@ -1084,6 +1180,61 @@ bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
return (ag->num_started == ag->num_devices);
}
+static void ath12k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath12k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath12k_fw_stats_bcn_free(struct list_head *head)
+{
+ struct ath12k_fw_stats_bcn *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath12k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath12k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath12k_fw_stats_init(struct ath12k *ar)
+{
+ INIT_LIST_HEAD(&ar->fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.bcn);
+ init_completion(&ar->fw_stats_complete);
+ init_completion(&ar->fw_stats_done);
+}
+
+void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
+{
+ ath12k_fw_stats_pdevs_free(&stats->pdevs);
+ ath12k_fw_stats_vdevs_free(&stats->vdevs);
+ ath12k_fw_stats_bcn_free(&stats->bcn);
+}
+
+void ath12k_fw_stats_reset(struct ath12k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ath12k_fw_stats_free(&ar->fw_stats);
+ ar->fw_stats.num_vdev_recvd = 0;
+ ar->fw_stats.num_bcn_recvd = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
static void ath12k_core_trigger_partner(struct ath12k_base *ab)
{
struct ath12k_hw_group *ag = ab->ag;
@@ -1246,6 +1397,7 @@ static void ath12k_rfkill_work(struct work_struct *work)
void ath12k_core_halt(struct ath12k *ar)
{
+ struct list_head *pos, *n;
struct ath12k_base *ab = ar->ab;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -1258,10 +1410,16 @@ void ath12k_core_halt(struct ath12k *ar)
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ab->rfkill_work);
+ cancel_work_sync(&ab->update_11d_work);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
synchronize_rcu();
- INIT_LIST_HEAD(&ar->arvifs);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_safe(pos, n, &ar->arvifs)
+ list_del_init(pos);
+ spin_unlock_bh(&ar->data_lock);
+
idr_init(&ar->txmgmt_idr);
}
@@ -1285,14 +1443,28 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
ah->state == ATH12K_HW_STATE_TM)
continue;
+ wiphy_lock(ah->hw->wiphy);
+
+ /* If queue 0 is stopped, it is safe to assume that all
+ * other queues are stopped by driver via
+ * ieee80211_stop_queues() below. This means, there is
+ * no need to stop it again and hence continue
+ */
+ if (ieee80211_queue_stopped(ah->hw, 0)) {
+ wiphy_unlock(ah->hw->wiphy);
+ continue;
+ }
+
ieee80211_stop_queues(ah->hw);
for (j = 0; j < ah->num_radio; j++) {
ar = &ah->radio[j];
ath12k_mac_drain_tx(ar);
+ ar->state_11d = ATH12K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
complete(&ar->scan.started);
- complete(&ar->scan.completed);
+ complete_all(&ar->scan.completed);
complete(&ar->scan.on_channel);
complete(&ar->peer_assoc_done);
complete(&ar->peer_delete_done);
@@ -1306,13 +1478,47 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
ath12k_mac_tx_mgmt_pending_free, ar);
idr_destroy(&ar->txmgmt_idr);
wake_up(&ar->txmgmt_empty_waitq);
+
+ ar->monitor_vdev_id = -1;
+ ar->monitor_vdev_created = false;
+ ar->monitor_started = false;
}
+
+ wiphy_unlock(ah->hw->wiphy);
}
wake_up(&ab->wmi_ab.tx_credits_wq);
wake_up(&ab->peer_mapping_wq);
}
+static void ath12k_update_11d(struct work_struct *work)
+{
+ struct ath12k_base *ab = container_of(work, struct ath12k_base, update_11d_work);
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ struct wmi_set_current_country_arg arg = {};
+ int ret, i;
+
+ spin_lock_bh(&ab->base_lock);
+ memcpy(&arg.alpha2, &ab->new_alpha2, 2);
+ spin_unlock_bh(&ab->base_lock);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "update 11d new cc %c%c\n",
+ arg.alpha2[0], arg.alpha2[1]);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ memcpy(&ar->alpha2, &arg.alpha2, 2);
+ ret = ath12k_wmi_send_set_current_country_cmd(ar, &arg);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "pdev id %d failed set current country code: %d\n",
+ i, ret);
+ }
+}
+
static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
{
struct ath12k_hw_group *ag = ab->ag;
@@ -1386,19 +1592,30 @@ static void ath12k_core_restart(struct work_struct *work)
ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
}
+ mutex_lock(&ag->mutex);
+
+ if (!ath12k_core_hw_group_start_ready(ag)) {
+ mutex_unlock(&ag->mutex);
+ goto exit_restart;
+ }
+
for (i = 0; i < ag->num_hw; i++) {
- ah = ath12k_ag_to_ah(ab->ag, i);
+ ah = ath12k_ag_to_ah(ag, i);
ieee80211_restart_hw(ah->hw);
}
+
+ mutex_unlock(&ag->mutex);
}
+exit_restart:
complete(&ab->restart_completed);
}
static void ath12k_core_reset(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
- int reset_count, fail_cont_count;
+ struct ath12k_hw_group *ag = ab->ag;
+ int reset_count, fail_cont_count, i;
long time_left;
if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
@@ -1457,9 +1674,34 @@ static void ath12k_core_reset(struct work_struct *work)
ath12k_hif_ce_irq_disable(ab);
ath12k_hif_power_down(ab, false);
- ath12k_hif_power_up(ab);
- ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
+ /* prepare for power up */
+ ab->qmi.num_radios = U8_MAX;
+
+ mutex_lock(&ag->mutex);
+ ath12k_core_to_group_ref_put(ab);
+
+ if (ag->num_started > 0) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "waiting for %d partner device(s) to reset\n",
+ ag->num_started);
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ /* Prepare MLO global memory region for power up */
+ ath12k_qmi_reset_mlo_mem(ag);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ ath12k_hif_power_up(ab);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
+ }
+
+ mutex_unlock(&ag->mutex);
}
int ath12k_core_pre_init(struct ath12k_base *ab)
@@ -1596,9 +1838,9 @@ static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
of_node_put(next_rx_endpoint);
device_count++;
- if (device_count > ATH12K_MAX_SOCS) {
+ if (device_count > ATH12K_MAX_DEVICES) {
ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
- device_count, ATH12K_MAX_SOCS);
+ device_count, ATH12K_MAX_DEVICES);
of_node_put(next_wsi_dev);
return -EINVAL;
}
@@ -1774,7 +2016,7 @@ static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
}
}
-static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
+void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
{
struct ath12k_base *ab;
int i;
@@ -1843,20 +2085,18 @@ void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
lockdep_assert_held(&ag->mutex);
- /* If more than one devices are grouped, then inter MLO
- * functionality can work still independent of whether internally
- * each device supports single_chip_mlo or not.
- * Only when there is one device, then disable for WCN chipsets
- * till the required driver implementation is in place.
- */
if (ag->num_devices == 1) {
ab = ag->ab[0];
-
- /* WCN chipsets does not advertise in firmware features
- * hence skip checking
- */
- if (ab->hw_params->def_num_link)
+ /* QCN9274 firmware uses firmware IE for MLO advertisement */
+ if (ab->fw.fw_features_valid) {
+ ag->mlo_capable =
+ ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO);
return;
+ }
+
+ /* while WCN7850 firmware uses QMI single_chip_mlo_support bit */
+ ag->mlo_capable = ab->single_chip_mlo_support;
+ return;
}
ag->mlo_capable = true;
@@ -1869,7 +2109,7 @@ void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
/* even if 1 device's firmware feature indicates MLO
* unsupported, make MLO unsupported for the whole group
*/
- if (!test_bit(ATH12K_FW_FEATURE_MLO, ab->fw.fw_features)) {
+ if (!ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO)) {
ag->mlo_capable = false;
return;
}
@@ -1891,7 +2131,8 @@ int ath12k_core_init(struct ath12k_base *ab)
if (!ag) {
mutex_unlock(&ath12k_hw_group_mutex);
ath12k_warn(ab, "unable to get hw group\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_unregister_notifier;
}
mutex_unlock(&ath12k_hw_group_mutex);
@@ -1906,7 +2147,7 @@ int ath12k_core_init(struct ath12k_base *ab)
if (ret) {
mutex_unlock(&ag->mutex);
ath12k_warn(ab, "unable to create hw group\n");
- goto err;
+ goto err_destroy_hw_group;
}
}
@@ -1914,18 +2155,20 @@ int ath12k_core_init(struct ath12k_base *ab)
return 0;
-err:
+err_destroy_hw_group:
ath12k_core_hw_group_destroy(ab->ag);
ath12k_core_hw_group_unassign(ab);
+err_unregister_notifier:
+ ath12k_core_panic_notifier_unregister(ab);
+
return ret;
}
void ath12k_core_deinit(struct ath12k_base *ab)
{
- ath12k_core_panic_notifier_unregister(ab);
- ath12k_core_hw_group_cleanup(ab->ag);
ath12k_core_hw_group_destroy(ab->ag);
ath12k_core_hw_group_unassign(ab);
+ ath12k_core_panic_notifier_unregister(ab);
}
void ath12k_core_free(struct ath12k_base *ab)
@@ -1966,6 +2209,7 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
INIT_WORK(&ab->reset_work, ath12k_core_reset);
INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
+ INIT_WORK(&ab->update_11d_work, ath12k_update_11d);
timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
@@ -1975,6 +2219,7 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
ab->dev = dev;
ab->hif.bus = bus;
ab->qmi.num_radios = U8_MAX;
+ ab->single_chip_mlo_support = false;
/* Device index used to identify the devices in a group.
*
@@ -1995,5 +2240,31 @@ err_sc_free:
return NULL;
}
-MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11be wireless LAN cards.");
+static int ath12k_init(void)
+{
+ ahb_err = ath12k_ahb_init();
+ if (ahb_err)
+ pr_warn("Failed to initialize ath12k AHB device: %d\n", ahb_err);
+
+ pci_err = ath12k_pci_init();
+ if (pci_err)
+ pr_warn("Failed to initialize ath12k PCI device: %d\n", pci_err);
+
+ /* If both failed, return one of the failures (arbitrary) */
+ return ahb_err && pci_err ? ahb_err : 0;
+}
+
+static void ath12k_exit(void)
+{
+ if (!pci_err)
+ ath12k_pci_exit();
+
+ if (!ahb_err)
+ ath12k_ahb_exit();
+}
+
+module_init(ath12k_init);
+module_exit(ath12k_exit);
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN devices");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 3fac4f00d383..7bcd9c70309f 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -14,8 +14,10 @@
#include <linux/dmi.h>
#include <linux/ctype.h>
#include <linux/firmware.h>
+#include <linux/of_reserved_mem.h>
#include <linux/panic_notifier.h>
#include <linux/average.h>
+#include <linux/of.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -62,8 +64,8 @@
#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
-#define ATH12K_MAX_SOCS 3
-#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_SOCS * MAX_RADIOS)
+#define ATH12K_MAX_DEVICES 3
+#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_DEVICES * MAX_RADIOS)
#define ATH12K_INVALID_GROUP_ID 0xFF
#define ATH12K_INVALID_DEVICE_ID 0xFF
@@ -147,7 +149,8 @@ struct ath12k_skb_rxcb {
enum ath12k_hw_rev {
ATH12K_HW_QCN9274_HW10,
ATH12K_HW_QCN9274_HW20,
- ATH12K_HW_WCN7850_HW20
+ ATH12K_HW_WCN7850_HW20,
+ ATH12K_HW_IPQ5332_HW10,
};
enum ath12k_firmware_mode {
@@ -160,6 +163,7 @@ enum ath12k_firmware_mode {
#define ATH12K_IRQ_NUM_MAX 57
#define ATH12K_EXT_IRQ_NUM_MAX 16
+#define ATH12K_MAX_TCL_RING_NUM 3
struct ath12k_ext_irq_grp {
struct ath12k_base *ab;
@@ -172,9 +176,34 @@ struct ath12k_ext_irq_grp {
struct net_device *napi_ndev;
};
+enum ath12k_smbios_cc_type {
+ /* disable country code setting from SMBIOS */
+ ATH12K_SMBIOS_CC_DISABLE = 0,
+
+ /* set country code by ANSI country name, based on ISO3166-1 alpha2 */
+ ATH12K_SMBIOS_CC_ISO = 1,
+
+ /* worldwide regdomain */
+ ATH12K_SMBIOS_CC_WW = 2,
+};
+
struct ath12k_smbios_bdf {
struct dmi_header hdr;
- u32 padding;
+ u8 features_disabled;
+
+ /* enum ath12k_smbios_cc_type */
+ u8 country_code_flag;
+
+ /* To set specific country, you need to set country code
+ * flag=ATH12K_SMBIOS_CC_ISO first, then if country is United
+ * States, then country code value = 0x5553 ("US",'U' = 0x55, 'S'=
+ * 0x53). To set country to INDONESIA, then country code value =
+ * 0x4944 ("IN", 'I'=0x49, 'D'=0x44). If country code flag =
+ * ATH12K_SMBIOS_CC_WW, then you can use worldwide regulatory
+ * setting.
+ */
+ u16 cc_code;
+
u8 bdf_enabled;
u8 bdf_ext[];
} __packed;
@@ -219,6 +248,12 @@ enum ath12k_scan_state {
ATH12K_SCAN_ABORTING,
};
+enum ath12k_11d_state {
+ ATH12K_11D_IDLE,
+ ATH12K_11D_PREPARING,
+ ATH12K_11D_RUNNING,
+};
+
enum ath12k_hw_group_flags {
ATH12K_GROUP_FLAG_REGISTERED,
ATH12K_GROUP_FLAG_UNREGISTER,
@@ -238,6 +273,7 @@ enum ath12k_dev_flags {
ATH12K_FLAG_EXT_IRQ_ENABLED,
ATH12K_FLAG_QMI_FW_READY_COMPLETE,
ATH12K_FLAG_FTM_SEGMENTED,
+ ATH12K_FLAG_FIXED_MEM_REGION,
};
struct ath12k_tx_conf {
@@ -295,14 +331,20 @@ struct ath12k_link_vif {
int txpower;
bool rsnie_present;
bool wpaie_present;
- struct ieee80211_chanctx_conf chanctx;
u8 vdev_stats_id;
u32 punct_bitmap;
u8 link_id;
struct ath12k_vif *ahvif;
struct ath12k_rekey_data rekey_data;
+ struct ath12k_link_stats link_stats;
+ spinlock_t link_stats_lock; /* Protects updates to link_stats */
u8 current_cntdown_counter;
+
+ /* only used in station mode */
+ bool is_sta_assoc_link;
+
+ struct ath12k_reg_tpc_power_info reg_tpc_info;
};
struct ath12k_vif {
@@ -364,6 +406,8 @@ struct ath12k_vif_iter {
#define HAL_RX_MAX_NSS 8
#define HAL_RX_MAX_NUM_LEGACY_RATES 12
+#define ATH12K_SCAN_TIMEOUT_HZ (20 * HZ)
+
struct ath12k_rx_peer_rate_stats {
u64 ht_mcs_count[HAL_RX_MAX_MCS_HT + 1];
u64 vht_mcs_count[HAL_RX_MAX_MCS_VHT + 1];
@@ -519,6 +563,12 @@ struct ath12k_link_sta {
u8 link_idx;
};
+struct ath12k_reoq_buf {
+ void *vaddr;
+ dma_addr_t paddr_aligned;
+ u32 size;
+};
+
struct ath12k_sta {
struct ath12k_vif *ahvif;
enum hal_pn_type pn_type;
@@ -531,13 +581,31 @@ struct ath12k_sta {
u8 num_peer;
enum ieee80211_sta_state state;
+
+ struct ath12k_reoq_buf reoq_bufs[IEEE80211_NUM_TIDS + 1];
};
-#define ATH12K_MIN_5G_FREQ 4150
-#define ATH12K_MIN_6G_FREQ 5925
-#define ATH12K_MAX_6G_FREQ 7115
+#define ATH12K_HALF_20MHZ_BW 10
+#define ATH12K_2GHZ_MIN_CENTER 2412
+#define ATH12K_2GHZ_MAX_CENTER 2484
+#define ATH12K_5GHZ_MIN_CENTER 4900
+#define ATH12K_5GHZ_MAX_CENTER 5920
+#define ATH12K_6GHZ_MIN_CENTER 5935
+#define ATH12K_6GHZ_MAX_CENTER 7115
+#define ATH12K_MIN_2GHZ_FREQ (ATH12K_2GHZ_MIN_CENTER - ATH12K_HALF_20MHZ_BW - 1)
+#define ATH12K_MAX_2GHZ_FREQ (ATH12K_2GHZ_MAX_CENTER + ATH12K_HALF_20MHZ_BW + 1)
+#define ATH12K_MIN_5GHZ_FREQ (ATH12K_5GHZ_MIN_CENTER - ATH12K_HALF_20MHZ_BW)
+#define ATH12K_MAX_5GHZ_FREQ (ATH12K_5GHZ_MAX_CENTER + ATH12K_HALF_20MHZ_BW)
+#define ATH12K_MIN_6GHZ_FREQ (ATH12K_6GHZ_MIN_CENTER - ATH12K_HALF_20MHZ_BW)
+#define ATH12K_MAX_6GHZ_FREQ (ATH12K_6GHZ_MAX_CENTER + ATH12K_HALF_20MHZ_BW)
#define ATH12K_NUM_CHANS 101
-#define ATH12K_MAX_5G_CHAN 173
+#define ATH12K_MAX_5GHZ_CHAN 173
+
+static inline bool ath12k_is_2ghz_channel_freq(u32 freq)
+{
+ return freq >= ATH12K_MIN_2GHZ_FREQ &&
+ freq <= ATH12K_MAX_2GHZ_FREQ;
+}
enum ath12k_hw_state {
ATH12K_HW_STATE_OFF,
@@ -564,7 +632,8 @@ struct ath12k_fw_stats {
struct list_head pdevs;
struct list_head vdevs;
struct list_head bcn;
- bool fw_stats_done;
+ u32 num_vdev_recvd;
+ u32 num_bcn_recvd;
};
struct ath12k_dbg_htt_stats {
@@ -728,7 +797,6 @@ struct ath12k {
#endif
bool dfs_block_radar_events;
- bool monitor_conf_enabled;
bool monitor_vdev_created;
bool monitor_started;
int monitor_vdev_id;
@@ -737,12 +805,23 @@ struct ath12k {
bool nlo_enabled;
+ /* Protected by wiphy::mtx lock. */
+ u32 vdev_id_11d_scan;
+ struct completion completed_11d_scan;
+ enum ath12k_11d_state state_11d;
+ u8 alpha2[REG_ALPHA2_LEN];
+ bool regdom_set_by_user;
+
struct completion fw_stats_complete;
+ struct completion fw_stats_done;
struct completion mlo_setup_done;
u32 mlo_setup_status;
u8 ftm_msgref;
struct ath12k_fw_stats fw_stats;
+ unsigned long last_tx_power_update;
+
+ s8 max_allowed_tx_power;
};
struct ath12k_hw {
@@ -834,7 +913,7 @@ struct ath12k_board_data {
size_t len;
};
-struct ath12k_soc_dp_tx_err_stats {
+struct ath12k_device_dp_tx_err_stats {
/* TCL Ring Descriptor unavailable */
u32 desc_na[DP_TCL_NUM_RING_MAX];
/* Other failures during dp_tx due to mem allocation failure
@@ -843,13 +922,25 @@ struct ath12k_soc_dp_tx_err_stats {
atomic_t misc_fail;
};
-struct ath12k_soc_dp_stats {
+struct ath12k_device_dp_stats {
u32 err_ring_pkts;
u32 invalid_rbm;
u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
u32 hal_reo_error[DP_REO_DST_RING_MAX];
- struct ath12k_soc_dp_tx_err_stats tx_err;
+ struct ath12k_device_dp_tx_err_stats tx_err;
+ u32 reo_rx[DP_REO_DST_RING_MAX][ATH12K_MAX_DEVICES];
+ u32 rx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX][ATH12K_MAX_DEVICES];
+ u32 tqm_rel_reason[MAX_TQM_RELEASE_REASON];
+ u32 fw_tx_status[MAX_FW_TX_STATUS];
+ u32 tx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX];
+ u32 tx_enqueued[DP_TCL_NUM_RING_MAX];
+ u32 tx_completed[DP_TCL_NUM_RING_MAX];
+};
+
+struct ath12k_reg_freq {
+ u32 start_freq;
+ u32 end_freq;
};
struct ath12k_mlo_memory {
@@ -873,7 +964,7 @@ struct ath12k_hw_group {
u8 num_probed;
u8 num_started;
unsigned long flags;
- struct ath12k_base *ab[ATH12K_MAX_SOCS];
+ struct ath12k_base *ab[ATH12K_MAX_DEVICES];
/* protects access to this struct */
struct mutex mutex;
@@ -887,7 +978,7 @@ struct ath12k_hw_group {
struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO];
u8 num_hw;
bool mlo_capable;
- struct device_node *wsi_node[ATH12K_MAX_SOCS];
+ struct device_node *wsi_node[ATH12K_MAX_DEVICES];
struct ath12k_mlo_memory mlo_mem;
struct ath12k_hw_link hw_links[ATH12K_GROUP_MAX_RADIO];
bool hw_link_id_init_done;
@@ -923,6 +1014,10 @@ struct ath12k_base {
void __iomem *mem;
unsigned long mem_len;
+ void __iomem *mem_ce;
+ u32 ce_remap_base_addr;
+ bool ce_remap;
+
struct {
enum ath12k_bus bus;
const struct ath12k_hif_ops *ops;
@@ -991,9 +1086,11 @@ struct ath12k_base {
*/
struct ieee80211_regdomain *new_regd[MAX_RADIOS];
+ struct ath12k_reg_info *reg_info[MAX_RADIOS];
+
/* Current DFS Regulatory */
enum ath12k_dfs_region dfs_region;
- struct ath12k_soc_dp_stats soc_stats;
+ struct ath12k_device_dp_stats device_stats;
#ifdef CONFIG_ATH12K_DEBUGFS
struct dentry *debugfs_soc;
#endif
@@ -1011,6 +1108,8 @@ struct ath12k_base {
/* continuous recovery fail count */
atomic_t fail_cont_count;
unsigned long reset_fail_timeout;
+ struct work_struct update_11d_work;
+ u8 new_alpha2[2];
struct {
/* protected by data_lock */
u32 fw_crash_counter;
@@ -1020,8 +1119,6 @@ struct ath12k_base {
struct ath12k_dbring_cap *db_caps;
u32 num_db_cap;
- struct timer_list mon_reap_timer;
-
struct completion htc_suspend;
u64 fw_soc_drop_count;
@@ -1051,6 +1148,7 @@ struct ath12k_base {
size_t m3_len;
DECLARE_BITMAP(fw_features, ATH12K_FW_FEATURE_COUNT);
+ bool fw_features_valid;
} fw;
const struct hal_rx_ops *hal_rx_ops;
@@ -1087,6 +1185,14 @@ struct ath12k_base {
struct ath12k_wsi_info wsi_info;
enum ath12k_firmware_mode fw_mode;
struct ath12k_ftm_event_obj ftm_event_obj;
+ bool hw_group_ref;
+
+ /* Denote whether MLO is possible within the device */
+ bool single_chip_mlo_support;
+
+ struct ath12k_reg_freq reg_freq_2ghz;
+ struct ath12k_reg_freq reg_freq_5ghz;
+ struct ath12k_reg_freq reg_freq_6ghz;
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
@@ -1185,6 +1291,7 @@ struct ath12k_fw_stats_pdev {
};
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab);
+void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag);
int ath12k_core_pre_init(struct ath12k_base *ab);
int ath12k_core_init(struct ath12k_base *ath12k);
void ath12k_core_deinit(struct ath12k_base *ath12k);
@@ -1215,6 +1322,12 @@ u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab);
u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab);
void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag);
+void ath12k_fw_stats_init(struct ath12k *ar);
+void ath12k_fw_stats_bcn_free(struct list_head *head);
+void ath12k_fw_stats_free(struct ath12k_fw_stats *stats);
+void ath12k_fw_stats_reset(struct ath12k *ar);
+struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
+ int index);
static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
{
@@ -1275,8 +1388,16 @@ static inline void ath12k_core_create_firmware_path(struct ath12k_base *ab,
const char *filename,
void *buf, size_t buf_len)
{
- snprintf(buf, buf_len, "%s/%s/%s", ATH12K_FW_DIR,
- ab->hw_params->fw.dir, filename);
+ const char *fw_name = NULL;
+
+ of_property_read_string(ab->dev->of_node, "firmware-name", &fw_name);
+
+ if (fw_name && strncmp(filename, "board", 5))
+ snprintf(buf, buf_len, "%s/%s/%s/%s", ATH12K_FW_DIR,
+ ab->hw_params->fw.dir, fw_name, filename);
+ else
+ snprintf(buf, buf_len, "%s/%s/%s", ATH12K_FW_DIR,
+ ab->hw_params->fw.dir, filename);
}
static inline const char *ath12k_bus_str(enum ath12k_bus bus)
@@ -1284,6 +1405,8 @@ static inline const char *ath12k_bus_str(enum ath12k_bus bus)
switch (bus) {
case ATH12K_BUS_PCI:
return "pci";
+ case ATH12K_BUS_AHB:
+ return "ahb";
}
return "unknown";
@@ -1333,20 +1456,6 @@ static inline struct ath12k_hw_group *ath12k_ab_to_ag(struct ath12k_base *ab)
return ab->ag;
}
-static inline void ath12k_core_started(struct ath12k_base *ab)
-{
- lockdep_assert_held(&ab->ag->mutex);
-
- ab->ag->num_started++;
-}
-
-static inline void ath12k_core_stopped(struct ath12k_base *ab)
-{
- lockdep_assert_held(&ab->ag->mutex);
-
- ab->ag->num_started--;
-}
-
static inline struct ath12k_base *ath12k_ag_to_ab(struct ath12k_hw_group *ag,
u8 device_id)
{
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c
index 57002215ddf1..23da93afaa5c 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs.c
@@ -33,6 +33,76 @@ static const struct file_operations fops_simulate_radar = {
.open = simple_open
};
+static ssize_t ath12k_read_simulate_fw_crash(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char buf[] =
+ "To simulate firmware crash write one of the keywords to this file:\n"
+ "`assert` - send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static ssize_t
+ath12k_write_simulate_fw_crash(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k_base *ab = file->private_data;
+ struct ath12k_pdev *pdev;
+ struct ath12k *ar = NULL;
+ char buf[32] = {0};
+ int i, ret;
+ ssize_t rc;
+
+ /* filter partial writes and invalid commands */
+ if (*ppos != 0 || count >= sizeof(buf) || count == 0)
+ return -EINVAL;
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (rc < 0)
+ return rc;
+
+ /* drop the possible '\n' from the end */
+ if (buf[*ppos - 1] == '\n')
+ buf[*ppos - 1] = '\0';
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ar)
+ break;
+ }
+
+ if (!ar)
+ return -ENETDOWN;
+
+ if (!strcmp(buf, "assert")) {
+ ath12k_info(ab, "simulating firmware assert crash\n");
+ ret = ath12k_wmi_force_fw_hang_cmd(ar,
+ ATH12K_WMI_FW_HANG_ASSERT_TYPE,
+ ATH12K_WMI_FW_HANG_DELAY);
+ } else {
+ return -EINVAL;
+ }
+
+ if (ret) {
+ ath12k_warn(ab, "failed to simulate firmware crash: %d\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+ .read = ath12k_read_simulate_fw_crash,
+ .write = ath12k_write_simulate_fw_crash,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static ssize_t ath12k_write_tpc_stats_type(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -88,8 +158,8 @@ static int ath12k_get_tpc_ctl_mode_idx(struct wmi_tpc_stats_arg *tpc_stats,
u32 chan_freq = le32_to_cpu(tpc_stats->tpc_config.chan_freq);
u8 band;
- band = ((chan_freq > ATH12K_MIN_6G_FREQ) ? NL80211_BAND_6GHZ :
- ((chan_freq > ATH12K_MIN_5G_FREQ) ? NL80211_BAND_5GHZ :
+ band = ((chan_freq > ATH12K_MIN_6GHZ_FREQ) ? NL80211_BAND_6GHZ :
+ ((chan_freq > ATH12K_MIN_5GHZ_FREQ) ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ));
if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) {
@@ -833,199 +903,352 @@ static const struct file_operations fops_extd_rx_stats = {
.open = simple_open,
};
-void ath12k_debugfs_soc_create(struct ath12k_base *ab)
+static int ath12k_open_link_stats(struct inode *inode, struct file *file)
{
- bool dput_needed;
- char soc_name[64] = { 0 };
- struct dentry *debugfs_ath12k;
+ struct ath12k_vif *ahvif = inode->i_private;
+ size_t len = 0, buf_len = (PAGE_SIZE * 2);
+ struct ath12k_link_stats linkstat;
+ struct ath12k_link_vif *arvif;
+ unsigned long links_map;
+ struct wiphy *wiphy;
+ int link_id, i;
+ char *buf;
- debugfs_ath12k = debugfs_lookup("ath12k", NULL);
- if (debugfs_ath12k) {
- /* a dentry from lookup() needs dput() after we don't use it */
- dput_needed = true;
- } else {
- debugfs_ath12k = debugfs_create_dir("ath12k", NULL);
- if (IS_ERR_OR_NULL(debugfs_ath12k))
- return;
- dput_needed = false;
+ if (!ahvif)
+ return -EINVAL;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ wiphy = ahvif->ah->hw->wiphy;
+ wiphy_lock(wiphy);
+
+ links_map = ahvif->links_map;
+ for_each_set_bit(link_id, &links_map,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = rcu_dereference_protected(ahvif->link[link_id],
+ lockdep_is_held(&wiphy->mtx));
+
+ spin_lock_bh(&arvif->link_stats_lock);
+ linkstat = arvif->link_stats;
+ spin_unlock_bh(&arvif->link_stats_lock);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "link[%d] Tx Unicast Frames Enqueued = %d\n",
+ link_id, linkstat.tx_enqueued);
+ len += scnprintf(buf + len, buf_len - len,
+ "link[%d] Tx Broadcast Frames Enqueued = %d\n",
+ link_id, linkstat.tx_bcast_mcast);
+ len += scnprintf(buf + len, buf_len - len,
+ "link[%d] Tx Frames Completed = %d\n",
+ link_id, linkstat.tx_completed);
+ len += scnprintf(buf + len, buf_len - len,
+ "link[%d] Tx Frames Dropped = %d\n",
+ link_id, linkstat.tx_dropped);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "link[%d] Tx Frame descriptor Encap Type = ",
+ link_id);
+
+ len += scnprintf(buf + len, buf_len - len,
+ " raw:%d",
+ linkstat.tx_encap_type[0]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ " native_wifi:%d",
+ linkstat.tx_encap_type[1]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ " ethernet:%d",
+ linkstat.tx_encap_type[2]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "\nlink[%d] Tx Frame descriptor Encrypt Type = ",
+ link_id);
+
+ for (i = 0; i < HAL_ENCRYPT_TYPE_MAX; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ " %d:%d", i,
+ linkstat.tx_encrypt_type[i]);
+ }
+ len += scnprintf(buf + len, buf_len - len,
+ "\nlink[%d] Tx Frame descriptor Type = buffer:%d extension:%d\n",
+ link_id, linkstat.tx_desc_type[0],
+ linkstat.tx_desc_type[1]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "------------------------------------------------------\n");
}
- scnprintf(soc_name, sizeof(soc_name), "%s-%s", ath12k_bus_str(ab->hif.bus),
- dev_name(ab->dev));
+ wiphy_unlock(wiphy);
- ab->debugfs_soc = debugfs_create_dir(soc_name, debugfs_ath12k);
+ file->private_data = buf;
- if (dput_needed)
- dput(debugfs_ath12k);
+ return 0;
}
-void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
+static int ath12k_release_link_stats(struct inode *inode, struct file *file)
{
- debugfs_remove_recursive(ab->debugfs_soc);
- ab->debugfs_soc = NULL;
- /* We are not removing ath12k directory on purpose, even if it
- * would be empty. This simplifies the directory handling and it's
- * a minor cosmetic issue to leave an empty ath12k directory to
- * debugfs.
- */
+ kfree(file->private_data);
+ return 0;
}
-static void ath12k_fw_stats_pdevs_free(struct list_head *head)
+static ssize_t ath12k_read_link_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
{
- struct ath12k_fw_stats_pdev *i, *tmp;
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
-static void ath12k_fw_stats_bcn_free(struct list_head *head)
+static const struct file_operations ath12k_fops_link_stats = {
+ .open = ath12k_open_link_stats,
+ .release = ath12k_release_link_stats,
+ .read = ath12k_read_link_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
- struct ath12k_fw_stats_bcn *i, *tmp;
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
+ debugfs_create_file("link_stats", 0400, vif->debugfs_dir, ahvif,
+ &ath12k_fops_link_stats);
}
-static void ath12k_fw_stats_vdevs_free(struct list_head *head)
+static ssize_t ath12k_debugfs_dump_device_dp_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
{
- struct ath12k_fw_stats_vdev *i, *tmp;
+ struct ath12k_base *ab = file->private_data;
+ struct ath12k_device_dp_stats *device_stats = &ab->device_stats;
+ int len = 0, i, j, ret;
+ struct ath12k *ar;
+ const int size = 4096;
+ static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR] = "Overflow",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR] = "MPDU len",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_FCS_ERR] = "FCS",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR] = "Decrypt",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR] = "TKIP MIC",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_UNECRYPTED_ERR] = "Unencrypt",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LEN_ERR] = "MSDU len",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LIMIT_ERR] = "MSDU limit",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_WIFI_PARSE_ERR] = "WiFi parse",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_PARSE_ERR] = "AMSDU parse",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_SA_TIMEOUT_ERR] = "SA timeout",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_DA_TIMEOUT_ERR] = "DA timeout",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR] = "Flow timeout",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR] = "Flush req",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_FRAG_ERR] = "AMSDU frag",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_MULTICAST_ECHO_ERR] = "Multicast echo",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_MISMATCH_ERR] = "AMSDU mismatch",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_UNAUTH_WDS_ERR] = "Unauth WDS",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_GRPCAST_AMSDU_WDS_ERR] = "AMSDU or WDS"};
+
+ static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = {
+ [HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO] = "Desc addr zero",
+ [HAL_REO_DEST_RING_ERROR_CODE_DESC_INVALID] = "Desc inval",
+ [HAL_REO_DEST_RING_ERROR_CODE_AMPDU_IN_NON_BA] = "AMPDU in non BA",
+ [HAL_REO_DEST_RING_ERROR_CODE_NON_BA_DUPLICATE] = "Non BA dup",
+ [HAL_REO_DEST_RING_ERROR_CODE_BA_DUPLICATE] = "BA dup",
+ [HAL_REO_DEST_RING_ERROR_CODE_FRAME_2K_JUMP] = "Frame 2k jump",
+ [HAL_REO_DEST_RING_ERROR_CODE_BAR_2K_JUMP] = "BAR 2k jump",
+ [HAL_REO_DEST_RING_ERROR_CODE_FRAME_OOR] = "Frame OOR",
+ [HAL_REO_DEST_RING_ERROR_CODE_BAR_OOR] = "BAR OOR",
+ [HAL_REO_DEST_RING_ERROR_CODE_NO_BA_SESSION] = "No BA session",
+ [HAL_REO_DEST_RING_ERROR_CODE_FRAME_SN_EQUALS_SSN] = "Frame SN equal SSN",
+ [HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED] = "PN check fail",
+ [HAL_REO_DEST_RING_ERROR_CODE_2K_ERR_FLAG_SET] = "2k err",
+ [HAL_REO_DEST_RING_ERROR_CODE_PN_ERR_FLAG_SET] = "PN err",
+ [HAL_REO_DEST_RING_ERROR_CODE_DESC_BLOCKED] = "Desc blocked"};
+
+ static const char *wbm_rel_src[HAL_WBM_REL_SRC_MODULE_MAX] = {
+ [HAL_WBM_REL_SRC_MODULE_TQM] = "TQM",
+ [HAL_WBM_REL_SRC_MODULE_RXDMA] = "Rxdma",
+ [HAL_WBM_REL_SRC_MODULE_REO] = "Reo",
+ [HAL_WBM_REL_SRC_MODULE_FW] = "FW",
+ [HAL_WBM_REL_SRC_MODULE_SW] = "SW"};
+
+ char *buf __free(kfree) = kzalloc(size, GFP_KERNEL);
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
-}
+ if (!buf)
+ return -ENOMEM;
-void ath12k_debugfs_fw_stats_reset(struct ath12k *ar)
-{
- spin_lock_bh(&ar->data_lock);
- ar->fw_stats.fw_stats_done = false;
- ath12k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
- ath12k_fw_stats_bcn_free(&ar->fw_stats.bcn);
- ath12k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
- spin_unlock_bh(&ar->data_lock);
-}
+ len += scnprintf(buf + len, size - len, "DEVICE RX STATS:\n\n");
+ len += scnprintf(buf + len, size - len, "err ring pkts: %u\n",
+ device_stats->err_ring_pkts);
+ len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n",
+ device_stats->invalid_rbm);
+ len += scnprintf(buf + len, size - len, "RXDMA errors:\n");
-static int ath12k_debugfs_fw_stats_request(struct ath12k *ar,
- struct ath12k_fw_stats_req_params *param)
-{
- struct ath12k_base *ab = ar->ab;
- unsigned long timeout, time_left;
- int ret;
+ for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ rxdma_err[i], device_stats->rxdma_error[i]);
- lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+ len += scnprintf(buf + len, size - len, "\nREO errors:\n");
- /* FW stats can get split when exceeding the stats data buffer limit.
- * In that case, since there is no end marking for the back-to-back
- * received 'update stats' event, we keep a 3 seconds timeout in case,
- * fw_stats_done is not marked yet
- */
- timeout = jiffies + msecs_to_jiffies(3 * 1000);
+ for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ reo_err[i], device_stats->reo_error[i]);
- ath12k_debugfs_fw_stats_reset(ar);
+ len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n");
- reinit_completion(&ar->fw_stats_complete);
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++)
+ len += scnprintf(buf + len, size - len,
+ "ring%d: %u\n", i,
+ device_stats->hal_reo_error[i]);
- ret = ath12k_wmi_send_stats_request_cmd(ar, param->stats_id,
- param->vdev_id, param->pdev_id);
+ len += scnprintf(buf + len, size - len, "\nDEVICE TX STATS:\n");
+ len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n");
- if (ret) {
- ath12k_warn(ab, "could not request fw stats (%d)\n",
- ret);
- return ret;
- }
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+ len += scnprintf(buf + len, size - len, "ring%d: %u\n",
+ i, device_stats->tx_err.desc_na[i]);
- time_left = wait_for_completion_timeout(&ar->fw_stats_complete,
- 1 * HZ);
- /* If the wait timed out, return -ETIMEDOUT */
- if (!time_left)
- return -ETIMEDOUT;
+ len += scnprintf(buf + len, size - len,
+ "\nMisc Transmit Failures: %d\n",
+ atomic_read(&device_stats->tx_err.misc_fail));
- /* Firmware sends WMI_UPDATE_STATS_EVENTID back-to-back
- * when stats data buffer limit is reached. fw_stats_complete
- * is completed once host receives first event from firmware, but
- * still end might not be marked in the TLV.
- * Below loop is to confirm that firmware completed sending all the event
- * and fw_stats_done is marked true when end is marked in the TLV
- */
- for (;;) {
- if (time_after(jiffies, timeout))
- break;
+ len += scnprintf(buf + len, size - len, "\ntx_wbm_rel_source:");
- spin_lock_bh(&ar->data_lock);
- if (ar->fw_stats.fw_stats_done) {
- spin_unlock_bh(&ar->data_lock);
- break;
- }
- spin_unlock_bh(&ar->data_lock);
- }
- return 0;
-}
+ for (i = 0; i < HAL_WBM_REL_SRC_MODULE_MAX; i++)
+ len += scnprintf(buf + len, size - len, " %d:%u",
+ i, device_stats->tx_wbm_rel_source[i]);
-void
-ath12k_debugfs_fw_stats_process(struct ath12k *ar,
- struct ath12k_fw_stats *stats)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_pdev *pdev;
- bool is_end;
- static unsigned int num_vdev, num_bcn;
- size_t total_vdevs_started = 0;
- int i;
-
- if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
- if (list_empty(&stats->vdevs)) {
- ath12k_warn(ab, "empty vdev stats");
- return;
- }
- /* FW sends all the active VDEV stats irrespective of PDEV,
- * hence limit until the count of all VDEVs started
- */
- rcu_read_lock();
- for (i = 0; i < ab->num_radios; i++) {
- pdev = rcu_dereference(ab->pdevs_active[i]);
- if (pdev && pdev->ar)
- total_vdevs_started += pdev->ar->num_started_vdevs;
- }
- rcu_read_unlock();
+ len += scnprintf(buf + len, size - len, "\n");
+
+ len += scnprintf(buf + len, size - len, "\ntqm_rel_reason:");
+
+ for (i = 0; i < MAX_TQM_RELEASE_REASON; i++)
+ len += scnprintf(buf + len, size - len, " %d:%u",
+ i, device_stats->tqm_rel_reason[i]);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ len += scnprintf(buf + len, size - len, "\nfw_tx_status:");
+
+ for (i = 0; i < MAX_FW_TX_STATUS; i++)
+ len += scnprintf(buf + len, size - len, " %d:%u",
+ i, device_stats->fw_tx_status[i]);
+
+ len += scnprintf(buf + len, size - len, "\n");
- is_end = ((++num_vdev) == total_vdevs_started);
+ len += scnprintf(buf + len, size - len, "\ntx_enqueued:");
- list_splice_tail_init(&stats->vdevs,
- &ar->fw_stats.vdevs);
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+ len += scnprintf(buf + len, size - len, " %d:%u", i,
+ device_stats->tx_enqueued[i]);
- if (is_end) {
- ar->fw_stats.fw_stats_done = true;
- num_vdev = 0;
+ len += scnprintf(buf + len, size - len, "\n");
+
+ len += scnprintf(buf + len, size - len, "\ntx_completed:");
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+ len += scnprintf(buf + len, size - len, " %d:%u",
+ i, device_stats->tx_completed[i]);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, DP_SW2HW_MACID(i));
+ if (ar) {
+ len += scnprintf(buf + len, size - len,
+ "\nradio%d tx_pending: %u\n", i,
+ atomic_read(&ar->dp.num_tx_pending));
}
- return;
}
- if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
- if (list_empty(&stats->bcn)) {
- ath12k_warn(ab, "empty beacon stats");
- return;
+
+ len += scnprintf(buf + len, size - len, "\nREO Rx Received:\n");
+
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
+ len += scnprintf(buf + len, size - len, "Ring%d:", i + 1);
+
+ for (j = 0; j < ATH12K_MAX_DEVICES; j++) {
+ len += scnprintf(buf + len, size - len,
+ "\t%d:%u", j,
+ device_stats->reo_rx[i][j]);
}
- /* Mark end until we reached the count of all started VDEVs
- * within the PDEV
- */
- is_end = ((++num_bcn) == ar->num_started_vdevs);
- list_splice_tail_init(&stats->bcn,
- &ar->fw_stats.bcn);
+ len += scnprintf(buf + len, size - len, "\n");
+ }
+
+ len += scnprintf(buf + len, size - len, "\nRx WBM REL SRC Errors:\n");
+
+ for (i = 0; i < HAL_WBM_REL_SRC_MODULE_MAX; i++) {
+ len += scnprintf(buf + len, size - len, "%s:", wbm_rel_src[i]);
- if (is_end) {
- ar->fw_stats.fw_stats_done = true;
- num_bcn = 0;
+ for (j = 0; j < ATH12K_MAX_DEVICES; j++) {
+ len += scnprintf(buf + len,
+ size - len,
+ "\t%d:%u", j,
+ device_stats->rx_wbm_rel_source[i][j]);
}
+
+ len += scnprintf(buf + len, size - len, "\n");
}
- if (stats->stats_id == WMI_REQUEST_PDEV_STAT) {
- list_splice_tail_init(&stats->pdevs, &ar->fw_stats.pdevs);
- ar->fw_stats.fw_stats_done = true;
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ return ret;
+}
+
+static const struct file_operations fops_device_dp_stats = {
+ .read = ath12k_debugfs_dump_device_dp_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath12k_debugfs_pdev_create(struct ath12k_base *ab)
+{
+ debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
+ &fops_simulate_fw_crash);
+
+ debugfs_create_file("device_dp_stats", 0400, ab->debugfs_soc, ab,
+ &fops_device_dp_stats);
+}
+
+void ath12k_debugfs_soc_create(struct ath12k_base *ab)
+{
+ bool dput_needed;
+ char soc_name[64] = { 0 };
+ struct dentry *debugfs_ath12k;
+
+ debugfs_ath12k = debugfs_lookup("ath12k", NULL);
+ if (debugfs_ath12k) {
+ /* a dentry from lookup() needs dput() after we don't use it */
+ dput_needed = true;
+ } else {
+ debugfs_ath12k = debugfs_create_dir("ath12k", NULL);
+ if (IS_ERR_OR_NULL(debugfs_ath12k))
+ return;
+ dput_needed = false;
}
+
+ scnprintf(soc_name, sizeof(soc_name), "%s-%s", ath12k_bus_str(ab->hif.bus),
+ dev_name(ab->dev));
+
+ ab->debugfs_soc = debugfs_create_dir(soc_name, debugfs_ath12k);
+
+ if (dput_needed)
+ dput(debugfs_ath12k);
+}
+
+void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_soc);
+ ab->debugfs_soc = NULL;
+ /* We are not removing ath12k directory on purpose, even if it
+ * would be empty. This simplifies the directory handling and it's
+ * a minor cosmetic issue to leave an empty ath12k directory to
+ * debugfs.
+ */
}
static int ath12k_open_vdev_stats(struct inode *inode, struct file *file)
@@ -1052,7 +1275,7 @@ static int ath12k_open_vdev_stats(struct inode *inode, struct file *file)
param.vdev_id = 0;
param.stats_id = WMI_REQUEST_VDEV_STAT;
- ret = ath12k_debugfs_fw_stats_request(ar, &param);
+ ret = ath12k_mac_get_fw_stats(ar, &param);
if (ret) {
ath12k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
return ret;
@@ -1117,7 +1340,7 @@ static int ath12k_open_bcn_stats(struct inode *inode, struct file *file)
continue;
param.vdev_id = arvif->vdev_id;
- ret = ath12k_debugfs_fw_stats_request(ar, &param);
+ ret = ath12k_mac_get_fw_stats(ar, &param);
if (ret) {
ath12k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
return ret;
@@ -1184,7 +1407,7 @@ static int ath12k_open_pdev_stats(struct inode *inode, struct file *file)
param.vdev_id = 0;
param.stats_id = WMI_REQUEST_PDEV_STAT;
- ret = ath12k_debugfs_fw_stats_request(ar, &param);
+ ret = ath12k_mac_get_fw_stats(ar, &param);
if (ret) {
ath12k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
return ret;
@@ -1239,11 +1462,7 @@ void ath12k_debugfs_fw_stats_register(struct ath12k *ar)
debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
&fops_pdev_stats);
- INIT_LIST_HEAD(&ar->fw_stats.vdevs);
- INIT_LIST_HEAD(&ar->fw_stats.bcn);
- INIT_LIST_HEAD(&ar->fw_stats.pdevs);
-
- init_completion(&ar->fw_stats_complete);
+ ath12k_fw_stats_init(ar);
}
void ath12k_debugfs_register(struct ath12k *ar)
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h
index d7041297d5d8..21641a8a0346 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs.h
@@ -12,9 +12,9 @@ void ath12k_debugfs_soc_create(struct ath12k_base *ab);
void ath12k_debugfs_soc_destroy(struct ath12k_base *ab);
void ath12k_debugfs_register(struct ath12k *ar);
void ath12k_debugfs_unregister(struct ath12k *ar);
-void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
- struct ath12k_fw_stats *stats);
-void ath12k_debugfs_fw_stats_reset(struct ath12k *ar);
+void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+void ath12k_debugfs_pdev_create(struct ath12k_base *ab);
static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
{
@@ -124,23 +124,23 @@ static inline void ath12k_debugfs_unregister(struct ath12k *ar)
{
}
-static inline void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
- struct ath12k_fw_stats *stats)
+static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
{
+ return false;
}
-static inline void ath12k_debugfs_fw_stats_reset(struct ath12k *ar)
+static inline int ath12k_debugfs_rx_filter(struct ath12k *ar)
{
+ return 0;
}
-static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
+static inline void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
- return false;
}
-static inline int ath12k_debugfs_rx_filter(struct ath12k *ar)
+static inline void ath12k_debugfs_pdev_create(struct ath12k_base *ab)
{
- return 0;
}
#endif /* CONFIG_ATH12K_DEBUGFS */
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
index 1c0d5fa39a8d..aeaf970339d4 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
@@ -5377,6 +5377,9 @@ static ssize_t ath12k_write_htt_stats_type(struct file *file,
const int size = 32;
int num_args;
+ if (count > size)
+ return -EINVAL;
+
char *buf __free(kfree) = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index 50c36e6ea102..6317c6d4c043 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -168,6 +168,8 @@ static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
grp_mask = &ab->hw_params->ring_mask->reo_status[0];
break;
case HAL_RXDMA_MONITOR_STATUS:
+ grp_mask = &ab->hw_params->ring_mask->rx_mon_status[0];
+ break;
case HAL_RXDMA_MONITOR_DST:
grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0];
break;
@@ -274,12 +276,17 @@ int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
break;
case HAL_RXDMA_BUF:
case HAL_RXDMA_MONITOR_BUF:
- case HAL_RXDMA_MONITOR_STATUS:
params.low_threshold = num_entries >> 3;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
params.intr_batch_cntr_thres_entries = 0;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
break;
+ case HAL_RXDMA_MONITOR_STATUS:
+ params.low_threshold = num_entries >> 3;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.intr_batch_cntr_thres_entries = 1;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
case HAL_TX_MONITOR_DST:
params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
@@ -354,7 +361,10 @@ u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab,
u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD);
/* only valid if idx_lookup_override is not set in tcl_data_cmd */
- bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
+ bank_config |= u32_encode_bits(1, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
+ else
+ bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN,
HAL_TX_BANK_CONFIG_ADDRX_EN) |
@@ -919,6 +929,25 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
goto done;
}
+ if (ab->hw_params->ring_mask->rx_mon_status[grp_id]) {
+ ring_mask = ab->hw_params->ring_mask->rx_mon_status[grp_id];
+ for (i = 0; i < ab->num_radios; i++) {
+ for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params->num_rxdma_per_pdev + j;
+
+ if (ring_mask & BIT(id)) {
+ work_done =
+ ath12k_dp_mon_process_ring(ab, id, napi, budget,
+ 0);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
+ }
+ }
+ }
+ }
+
if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) {
monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
@@ -982,11 +1011,6 @@ void ath12k_dp_pdev_free(struct ath12k_base *ab)
{
int i;
- if (!ab->mon_reap_timer.function)
- return;
-
- timer_delete_sync(&ab->mon_reap_timer);
-
for (i = 0; i < ab->num_radios; i++)
ath12k_dp_rx_pdev_free(ab, i);
}
@@ -1024,27 +1048,6 @@ void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab)
ab->hal_rx_ops->rx_desc_get_desc_size();
}
-static void ath12k_dp_service_mon_ring(struct timer_list *t)
-{
- struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
- int i;
-
- for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
- ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET,
- ATH12K_DP_RX_MONITOR_MODE);
-
- mod_timer(&ab->mon_reap_timer, jiffies +
- msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
-}
-
-static void ath12k_dp_mon_reap_timer_init(struct ath12k_base *ab)
-{
- if (ab->hw_params->rxdma1_enable)
- return;
-
- timer_setup(&ab->mon_reap_timer, ath12k_dp_service_mon_ring, 0);
-}
-
int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
{
struct ath12k *ar;
@@ -1055,8 +1058,6 @@ int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
if (ret)
goto out;
- ath12k_dp_mon_reap_timer_init(ab);
-
/* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
@@ -1107,11 +1108,8 @@ static void ath12k_dp_update_vdev_search(struct ath12k_link_vif *arvif)
{
switch (arvif->ahvif->vdev_type) {
case WMI_VDEV_TYPE_STA:
- /* TODO: Verify the search type and flags since ast hash
- * is not part of peer mapv3
- */
arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
- arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
break;
case WMI_VDEV_TYPE_AP:
case WMI_VDEV_TYPE_IBSS:
@@ -1206,11 +1204,19 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
if (!skb)
continue;
+ skb_cb = ATH12K_SKB_CB(skb);
+ if (skb_cb->paddr_ext_desc) {
+ dma_unmap_single(ab->dev,
+ skb_cb->paddr_ext_desc,
+ tx_desc_info->skb_ext_desc->len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(tx_desc_info->skb_ext_desc);
+ }
+
/* if we are unregistering, hw would've been destroyed and
* ar is no longer valid.
*/
if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) {
- skb_cb = ATH12K_SKB_CB(skb);
ar = skb_cb->ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
@@ -1261,22 +1267,24 @@ static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
if (!ab->hw_params->reoq_lut_support)
return;
- if (dp->reoq_lut.vaddr) {
+ if (dp->reoq_lut.vaddr_unaligned) {
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_REO_REG +
HAL_REO1_QDESC_LUT_BASE0(ab), 0);
- dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
- dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
- dp->reoq_lut.vaddr = NULL;
+ dma_free_coherent(ab->dev, dp->reoq_lut.size,
+ dp->reoq_lut.vaddr_unaligned,
+ dp->reoq_lut.paddr_unaligned);
+ dp->reoq_lut.vaddr_unaligned = NULL;
}
- if (dp->ml_reoq_lut.vaddr) {
+ if (dp->ml_reoq_lut.vaddr_unaligned) {
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_REO_REG +
HAL_REO1_QDESC_LUT_BASE1(ab), 0);
- dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
- dp->ml_reoq_lut.vaddr, dp->ml_reoq_lut.paddr);
- dp->ml_reoq_lut.vaddr = NULL;
+ dma_free_coherent(ab->dev, dp->ml_reoq_lut.size,
+ dp->ml_reoq_lut.vaddr_unaligned,
+ dp->ml_reoq_lut.paddr_unaligned);
+ dp->ml_reoq_lut.vaddr_unaligned = NULL;
}
}
@@ -1608,39 +1616,67 @@ free:
return ret;
}
+static int ath12k_dp_alloc_reoq_lut(struct ath12k_base *ab,
+ struct ath12k_reo_q_addr_lut *lut)
+{
+ lut->size = DP_REOQ_LUT_SIZE + HAL_REO_QLUT_ADDR_ALIGN - 1;
+ lut->vaddr_unaligned = dma_alloc_coherent(ab->dev, lut->size,
+ &lut->paddr_unaligned,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!lut->vaddr_unaligned)
+ return -ENOMEM;
+
+ lut->vaddr = PTR_ALIGN(lut->vaddr_unaligned, HAL_REO_QLUT_ADDR_ALIGN);
+ lut->paddr = lut->paddr_unaligned +
+ ((unsigned long)lut->vaddr - (unsigned long)lut->vaddr_unaligned);
+ return 0;
+}
+
static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
+ u32 val;
+ int ret;
if (!ab->hw_params->reoq_lut_support)
return 0;
- dp->reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
- DP_REOQ_LUT_SIZE,
- &dp->reoq_lut.paddr,
- GFP_KERNEL | __GFP_ZERO);
- if (!dp->reoq_lut.vaddr) {
+ ret = ath12k_dp_alloc_reoq_lut(ab, &dp->reoq_lut);
+ if (ret) {
ath12k_warn(ab, "failed to allocate memory for reoq table");
- return -ENOMEM;
+ return ret;
}
- dp->ml_reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
- DP_REOQ_LUT_SIZE,
- &dp->ml_reoq_lut.paddr,
- GFP_KERNEL | __GFP_ZERO);
- if (!dp->ml_reoq_lut.vaddr) {
+ ret = ath12k_dp_alloc_reoq_lut(ab, &dp->ml_reoq_lut);
+ if (ret) {
ath12k_warn(ab, "failed to allocate memory for ML reoq table");
- dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
- dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
- dp->reoq_lut.vaddr = NULL;
- return -ENOMEM;
+ dma_free_coherent(ab->dev, dp->reoq_lut.size,
+ dp->reoq_lut.vaddr_unaligned,
+ dp->reoq_lut.paddr_unaligned);
+ dp->reoq_lut.vaddr_unaligned = NULL;
+ return ret;
}
+ /* Bits in the register have address [39:8] LUT base address to be
+ * allocated such that LSBs are assumed to be zero. Also, current
+ * design supports paddr up to 4 GB max hence it fits in 32 bit
+ * register only
+ */
+
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
- dp->reoq_lut.paddr);
+ dp->reoq_lut.paddr >> 8);
+
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE1(ab),
dp->ml_reoq_lut.paddr >> 8);
+ val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_ADDR(ab));
+
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_ADDR(ab),
+ val | HAL_REO_QDESC_ADDR_READ_LUT_ENABLE);
+
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_MAX_PEERID(ab),
+ HAL_REO_QDESC_MAX_PEERID);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 75435a931548..a353333f83b6 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -7,6 +7,7 @@
#ifndef ATH12K_DP_H
#define ATH12K_DP_H
+#include "hal_desc.h"
#include "hal_rx.h"
#include "hw.h"
@@ -69,6 +70,16 @@ struct ath12k_pdev_mon_stats {
u32 dest_mpdu_drop;
u32 dup_mon_linkdesc_cnt;
u32 dup_mon_buf_cnt;
+ u32 dest_mon_stuck;
+ u32 dest_mon_not_reaped;
+};
+
+enum dp_mon_status_buf_state {
+ DP_MON_STATUS_MATCH,
+ DP_MON_STATUS_NO_DMA,
+ DP_MON_STATUS_LAG,
+ DP_MON_STATUS_LEAD,
+ DP_MON_STATUS_REPLINISH,
};
struct dp_link_desc_bank {
@@ -106,6 +117,8 @@ struct dp_mon_mpdu {
struct list_head list;
struct sk_buff *head;
struct sk_buff *tail;
+ u32 err_bitmap;
+ u8 decap_format;
};
#define DP_MON_MAX_STATUS_BUF 32
@@ -118,8 +131,11 @@ struct ath12k_mon_data {
u32 mon_last_buf_cookie;
u64 mon_last_linkdesc_paddr;
u16 chan_noise_floor;
+ u32 err_bitmap;
+ u8 decap_format;
struct ath12k_pdev_mon_stats rx_mon_stats;
+ enum dp_mon_status_buf_state buf_state;
/* lock for monitor data */
spinlock_t mon_lock;
struct sk_buff_head rx_status_q;
@@ -188,6 +204,14 @@ struct ath12k_pdev_dp {
#define DP_RX_BUFFER_SIZE_LITE 1024
#define DP_RX_BUFFER_ALIGN_SIZE 128
+#define RX_MON_STATUS_BASE_BUF_SIZE 2048
+#define RX_MON_STATUS_BUF_ALIGN 128
+#define RX_MON_STATUS_BUF_RESERVATION 128
+#define RX_MON_STATUS_BUF_SIZE (RX_MON_STATUS_BASE_BUF_SIZE - \
+ (RX_MON_STATUS_BUF_RESERVATION + \
+ RX_MON_STATUS_BUF_ALIGN + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
+
#define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0)
#define DP_RXDMA_BUF_COOKIE_PDEV_ID GENMASK(19, 18)
@@ -263,6 +287,9 @@ struct ath12k_pdev_dp {
/* Invalid TX Bank ID value */
#define DP_INVALID_BANK_ID -1
+#define MAX_TQM_RELEASE_REASON 15
+#define MAX_FW_TX_STATUS 7
+
struct ath12k_dp_tx_bank_profile {
u8 is_configured;
u32 num_users;
@@ -293,11 +320,18 @@ struct ath12k_rx_desc_info {
struct ath12k_tx_desc_info {
struct list_head list;
struct sk_buff *skb;
+ struct sk_buff *skb_ext_desc;
u32 desc_id; /* Cookie */
u8 mac_id;
u8 pool_id;
};
+struct ath12k_tx_desc_params {
+ struct sk_buff *skb;
+ struct sk_buff *skb_ext_desc;
+ u8 mac_id;
+};
+
struct ath12k_spt_info {
dma_addr_t paddr;
u64 *vaddr;
@@ -309,12 +343,26 @@ struct ath12k_reo_queue_ref {
} __packed;
struct ath12k_reo_q_addr_lut {
- dma_addr_t paddr;
+ u32 *vaddr_unaligned;
u32 *vaddr;
+ dma_addr_t paddr_unaligned;
+ dma_addr_t paddr;
+ u32 size;
+};
+
+struct ath12k_link_stats {
+ u32 tx_enqueued;
+ u32 tx_completed;
+ u32 tx_bcast_mcast;
+ u32 tx_dropped;
+ u32 tx_encap_type[HAL_TCL_ENCAP_TYPE_MAX];
+ u32 tx_encrypt_type[HAL_ENCRYPT_TYPE_MAX];
+ u32 tx_desc_type[HAL_TCL_DESC_TYPE_MAX];
};
struct ath12k_dp {
struct ath12k_base *ab;
+ u32 mon_dest_ring_stuck_cnt;
u8 num_bank_profiles;
/* protects the access and update of bank_profiles */
spinlock_t tx_bank_lock;
@@ -367,6 +415,7 @@ struct ath12k_dp {
struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
struct dp_rxdma_mon_ring rxdma_mon_buf_ring;
struct dp_rxdma_mon_ring tx_mon_buf_ring;
+ struct dp_rxdma_mon_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV];
struct ath12k_reo_q_addr_lut reoq_lut;
struct ath12k_reo_q_addr_lut ml_reoq_lut;
};
@@ -1307,6 +1356,8 @@ struct htt_t2h_version_conf_msg {
#define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 GENMASK(15, 0)
#define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID GENMASK(31, 16)
#define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL GENMASK(31, 16)
#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M BIT(16)
#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S 16
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index d22800e89485..28cadc4167f7 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -1416,6 +1416,40 @@ ath12k_dp_mon_hal_rx_parse_user_info(const struct hal_receive_user_info *rx_usr_
}
}
+static void ath12k_dp_mon_parse_rx_msdu_end_err(u32 info, u32 *errmap)
+{
+ if (info & RX_MSDU_END_INFO13_FCS_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_FCS;
+
+ if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+ *errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+}
+
+static void
+ath12k_dp_mon_parse_status_msdu_end(struct ath12k_mon_data *pmon,
+ const struct hal_rx_msdu_end *msdu_end)
+{
+ ath12k_dp_mon_parse_rx_msdu_end_err(__le32_to_cpu(msdu_end->info2),
+ &pmon->err_bitmap);
+ pmon->decap_format = le32_get_bits(msdu_end->info1,
+ RX_MSDU_END_INFO11_DECAP_FORMAT);
+}
+
static enum hal_rx_mon_status
ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
struct ath12k_mon_data *pmon,
@@ -1647,7 +1681,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
u32_get_bits(info[0], HAL_RX_MPDU_START_INFO0_PPDU_ID);
}
- break;
+ return HAL_RX_MON_STATUS_MPDU_START;
}
case HAL_RX_MSDU_START:
/* TODO: add msdu start parsing logic */
@@ -1655,6 +1689,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
case HAL_MON_BUF_ADDR:
return HAL_RX_MON_STATUS_BUF_ADDR;
case HAL_RX_MSDU_END:
+ ath12k_dp_mon_parse_status_msdu_end(pmon, tlv_data);
return HAL_RX_MON_STATUS_MSDU_END;
case HAL_RX_MPDU_END:
return HAL_RX_MON_STATUS_MPDU_END;
@@ -1688,45 +1723,295 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
}
+static void
+ath12k_dp_mon_fill_rx_stats_info(struct ath12k *ar,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ u32 center_freq = ppdu_info->freq;
+
+ rx_status->freq = center_freq;
+ rx_status->bw = ath12k_mac_bw_to_mac80211_bw(ppdu_info->bw);
+ rx_status->nss = ppdu_info->nss;
+ rx_status->rate_idx = 0;
+ rx_status->encoding = RX_ENC_LEGACY;
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
+ center_freq <= ATH12K_MAX_6GHZ_FREQ) {
+ rx_status->band = NL80211_BAND_6GHZ;
+ } else if (center_freq >= ATH12K_MIN_2GHZ_FREQ &&
+ center_freq <= ATH12K_MAX_2GHZ_FREQ) {
+ rx_status->band = NL80211_BAND_2GHZ;
+ } else if (center_freq >= ATH12K_MIN_5GHZ_FREQ &&
+ center_freq <= ATH12K_MAX_5GHZ_FREQ) {
+ rx_status->band = NL80211_BAND_5GHZ;
+ } else {
+ rx_status->band = NUM_NL80211_BANDS;
+ }
+}
+
+static struct sk_buff
+*ath12k_dp_rx_alloc_mon_status_buf(struct ath12k_base *ab,
+ struct dp_rxdma_mon_ring *rx_ring,
+ int *buf_id)
+{
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+
+ skb = dev_alloc_skb(RX_MON_STATUS_BUF_SIZE);
+
+ if (!skb)
+ goto fail_alloc_skb;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ RX_MON_STATUS_BUF_ALIGN)) {
+ skb_pull(skb, PTR_ALIGN(skb->data, RX_MON_STATUS_BUF_ALIGN) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ab->dev, paddr)))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max, GFP_ATOMIC);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (*buf_id < 0)
+ goto fail_dma_unmap;
+
+ ATH12K_SKB_RXCB(skb)->paddr = paddr;
+ return skb;
+
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+fail_alloc_skb:
+ return NULL;
+}
+
+static enum dp_mon_status_buf_state
+ath12k_dp_rx_mon_buf_done(struct ath12k_base *ab, struct hal_srng *srng,
+ struct dp_rxdma_mon_ring *rx_ring)
+{
+ struct ath12k_skb_rxcb *rxcb;
+ struct hal_tlv_64_hdr *tlv;
+ struct sk_buff *skb;
+ void *status_desc;
+ dma_addr_t paddr;
+ u32 cookie;
+ int buf_id;
+ u8 rbm;
+
+ status_desc = ath12k_hal_srng_src_next_peek(ab, srng);
+ if (!status_desc)
+ return DP_MON_STATUS_NO_DMA;
+
+ ath12k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm);
+
+ buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ if (!skb)
+ return DP_MON_STATUS_NO_DMA;
+
+ rxcb = ATH12K_SKB_RXCB(skb);
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ tlv = (struct hal_tlv_64_hdr *)skb->data;
+ if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) != HAL_RX_STATUS_BUFFER_DONE)
+ return DP_MON_STATUS_NO_DMA;
+
+ return DP_MON_STATUS_REPLINISH;
+}
+
+static u32 ath12k_dp_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id)
+{
+ u32 ret = 0;
+
+ if ((*ppdu_id < msdu_ppdu_id) &&
+ ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
+ /* Hold on mon dest ring, and reap mon status ring. */
+ *ppdu_id = msdu_ppdu_id;
+ ret = msdu_ppdu_id;
+ } else if ((*ppdu_id > msdu_ppdu_id) &&
+ ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
+ /* PPDU ID has exceeded the maximum value and will
+ * restart from 0.
+ */
+ *ppdu_id = msdu_ppdu_id;
+ ret = msdu_ppdu_id;
+ }
+ return ret;
+}
+
+static
+void ath12k_dp_mon_next_link_desc_get(struct hal_rx_msdu_link *msdu_link,
+ dma_addr_t *paddr, u32 *sw_cookie, u8 *rbm,
+ struct ath12k_buffer_addr **pp_buf_addr_info)
+{
+ struct ath12k_buffer_addr *buf_addr_info;
+
+ buf_addr_info = &msdu_link->buf_addr_info;
+
+ ath12k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
+
+ *pp_buf_addr_info = buf_addr_info;
+}
+
+static void
+ath12k_dp_mon_fill_rx_rate(struct ath12k *ar,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_supported_band *sband;
+ enum rx_msdu_start_pkt_type pkt_type;
+ u8 rate_mcs, nss, sgi;
+ bool is_cck;
+
+ pkt_type = ppdu_info->preamble_type;
+ rate_mcs = ppdu_info->rate;
+ nss = ppdu_info->nss;
+ sgi = ppdu_info->gi;
+
+ switch (pkt_type) {
+ case RX_MSDU_START_PKT_TYPE_11A:
+ case RX_MSDU_START_PKT_TYPE_11B:
+ is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
+ if (rx_status->band < NUM_NL80211_BANDS) {
+ sband = &ar->mac.sbands[rx_status->band];
+ rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
+ is_cck);
+ }
+ break;
+ case RX_MSDU_START_PKT_TYPE_11N:
+ rx_status->encoding = RX_ENC_HT;
+ if (rate_mcs > ATH12K_HT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in HT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AC:
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH12K_VHT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in VHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AX:
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH12K_HE_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in HE mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11BE:
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH12K_EHT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in EHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->encoding = RX_ENC_EHT;
+ rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
+ break;
+ default:
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "monitor receives invalid preamble type %d",
+ pkt_type);
+ break;
+ }
+}
+
static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar,
struct sk_buff *head_msdu,
struct sk_buff *tail_msdu)
{
- u32 rx_pkt_offset, l2_hdr_offset;
+ u32 rx_pkt_offset, l2_hdr_offset, total_offset;
rx_pkt_offset = ar->ab->hal.hal_desc_sz;
l2_hdr_offset =
ath12k_dp_rx_h_l3pad(ar->ab, (struct hal_rx_desc *)tail_msdu->data);
- skb_pull(head_msdu, rx_pkt_offset + l2_hdr_offset);
+
+ if (ar->ab->hw_params->rxdma1_enable)
+ total_offset = ATH12K_MON_RX_PKT_OFFSET;
+ else
+ total_offset = rx_pkt_offset + l2_hdr_offset;
+
+ skb_pull(head_msdu, total_offset);
}
static struct sk_buff *
ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
- struct sk_buff *head_msdu, struct sk_buff *tail_msdu,
- struct ieee80211_rx_status *rxs, bool *fcs_err)
+ struct dp_mon_mpdu *mon_mpdu,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct ieee80211_rx_status *rxs)
{
struct ath12k_base *ab = ar->ab;
struct sk_buff *msdu, *mpdu_buf, *prev_buf, *head_frag_list;
- struct hal_rx_desc *rx_desc, *tail_rx_desc;
- u8 *hdr_desc, *dest, decap_format;
+ struct sk_buff *head_msdu, *tail_msdu;
+ struct hal_rx_desc *rx_desc;
+ u8 *hdr_desc, *dest, decap_format = mon_mpdu->decap_format;
struct ieee80211_hdr_3addr *wh;
- u32 err_bitmap, frag_list_sum_len = 0;
+ struct ieee80211_channel *channel;
+ u32 frag_list_sum_len = 0;
+ u8 channel_num = ppdu_info->chan_num;
mpdu_buf = NULL;
+ head_msdu = mon_mpdu->head;
+ tail_msdu = mon_mpdu->tail;
- if (!head_msdu)
+ if (!head_msdu || !tail_msdu)
goto err_merge_fail;
- rx_desc = (struct hal_rx_desc *)head_msdu->data;
- tail_rx_desc = (struct hal_rx_desc *)tail_msdu->data;
+ ath12k_dp_mon_fill_rx_stats_info(ar, ppdu_info, rxs);
+
+ if (unlikely(rxs->band == NUM_NL80211_BANDS ||
+ !ath12k_ar_to_hw(ar)->wiphy->bands[rxs->band])) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n",
+ rxs->band, channel_num, ppdu_info->freq, ar->pdev_idx);
- err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, tail_rx_desc);
- if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
- *fcs_err = true;
+ spin_lock_bh(&ar->data_lock);
+ channel = ar->rx_channel;
+ if (channel) {
+ rxs->band = channel->band;
+ channel_num =
+ ieee80211_frequency_to_channel(channel->center_freq);
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
- decap_format = ath12k_dp_rx_h_decap_type(ab, tail_rx_desc);
+ if (rxs->band < NUM_NL80211_BANDS)
+ rxs->freq = ieee80211_channel_to_frequency(channel_num,
+ rxs->band);
- ath12k_dp_rx_h_ppdu(ar, tail_rx_desc, rxs);
+ ath12k_dp_mon_fill_rx_rate(ar, ppdu_info, rxs);
if (decap_format == DP_RX_DECAP_TYPE_RAW) {
ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
@@ -1736,7 +2021,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
head_frag_list = NULL;
while (msdu) {
- ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu);
+ ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
if (!head_frag_list)
head_frag_list = msdu;
@@ -1748,7 +2033,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
prev_buf->next = NULL;
- skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
+ skb_trim(prev_buf, prev_buf->len);
if (head_frag_list) {
skb_shinfo(head_msdu)->frag_list = head_frag_list;
head_msdu->data_len = frag_list_sum_len;
@@ -1771,7 +2056,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
msdu = head_msdu;
while (msdu) {
- ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu);
+ ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
if (qos_pkt) {
dest = skb_push(msdu, sizeof(__le16));
if (!dest)
@@ -1954,7 +2239,8 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
struct sk_buff *msdu,
- struct ieee80211_rx_status *status)
+ struct ieee80211_rx_status *status,
+ u8 decap)
{
static const struct ieee80211_radiotap_he known = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
@@ -1966,10 +2252,12 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
struct ieee80211_sta *pubsta = NULL;
struct ath12k_peer *peer;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- u8 decap = DP_RX_DECAP_TYPE_RAW;
+ struct ath12k_dp_rx_info rx_info;
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol_tkip = rxcb->is_eapol;
+ status->link_valid = 0;
+
if ((status->encoding == RX_ENC_HE) && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
!(status->flag & RX_FLAG_SKIP_MONITOR)) {
he = skb_push(msdu, sizeof(known));
@@ -1977,10 +2265,9 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
status->flag |= RX_FLAG_RADIOTAP_HE;
}
- if (!(status->flag & RX_FLAG_ONLY_MONITOR))
- decap = ath12k_dp_rx_h_decap_type(ar->ab, rxcb->rx_desc);
spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
+ rx_info.addr2_present = false;
+ peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, &rx_info);
if (peer && peer->sta) {
pubsta = peer->sta;
if (pubsta->valid_links) {
@@ -2035,25 +2322,23 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
}
static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
- struct sk_buff *head_msdu, struct sk_buff *tail_msdu,
+ struct dp_mon_mpdu *mon_mpdu,
struct hal_rx_mon_ppdu_info *ppduinfo,
struct napi_struct *napi)
{
struct ath12k_pdev_dp *dp = &ar->dp;
struct sk_buff *mon_skb, *skb_next, *header;
struct ieee80211_rx_status *rxs = &dp->rx_status;
- bool fcs_err = false;
+ u8 decap = DP_RX_DECAP_TYPE_RAW;
- mon_skb = ath12k_dp_mon_rx_merg_msdus(ar,
- head_msdu, tail_msdu,
- rxs, &fcs_err);
+ mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mon_mpdu, ppduinfo, rxs);
if (!mon_skb)
goto mon_deliver_fail;
header = mon_skb;
rxs->flag = 0;
- if (fcs_err)
+ if (mon_mpdu->err_bitmap & HAL_RX_MPDU_ERR_FCS)
rxs->flag = RX_FLAG_FAILED_FCS_CRC;
do {
@@ -2070,8 +2355,12 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
}
rxs->flag |= RX_FLAG_ONLY_MONITOR;
+
+ if (!(rxs->flag & RX_FLAG_ONLY_MONITOR))
+ decap = mon_mpdu->decap_format;
+
ath12k_dp_mon_update_radiotap(ar, ppduinfo, mon_skb, rxs);
- ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs);
+ ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs, decap);
mon_skb = skb_next;
} while (mon_skb);
rxs->flag = 0;
@@ -2079,7 +2368,7 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
return 0;
mon_deliver_fail:
- mon_skb = head_msdu;
+ mon_skb = mon_mpdu->head;
while (mon_skb) {
skb_next = mon_skb->next;
dev_kfree_skb_any(mon_skb);
@@ -2088,6 +2377,133 @@ mon_deliver_fail:
return -EINVAL;
}
+static int ath12k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
+{
+ if (skb->len > len) {
+ skb_trim(skb, len);
+ } else {
+ if (skb_tailroom(skb) < len - skb->len) {
+ if ((pskb_expand_head(skb, 0,
+ len - skb->len - skb_tailroom(skb),
+ GFP_ATOMIC))) {
+ return -ENOMEM;
+ }
+ }
+ skb_put(skb, (len - skb->len));
+ }
+
+ return 0;
+}
+
+/* Hardware fill buffer with 128 bytes aligned. So need to reap it
+ * with 128 bytes aligned.
+ */
+#define RXDMA_DATA_DMA_BLOCK_SIZE 128
+
+static void
+ath12k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
+ bool *is_frag, u32 *total_len,
+ u32 *frag_len, u32 *msdu_cnt)
+{
+ if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
+ *is_frag = true;
+ *frag_len = (RX_MON_STATUS_BASE_BUF_SIZE -
+ sizeof(struct hal_rx_desc)) &
+ ~(RXDMA_DATA_DMA_BLOCK_SIZE - 1);
+ *total_len += *frag_len;
+ } else {
+ if (*is_frag)
+ *frag_len = info->msdu_len - *total_len;
+ else
+ *frag_len = info->msdu_len;
+
+ *msdu_cnt -= 1;
+ }
+}
+
+static int
+ath12k_dp_mon_parse_status_buf(struct ath12k *ar,
+ struct ath12k_mon_data *pmon,
+ const struct dp_mon_packet_info *packet_info)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
+ struct sk_buff *msdu;
+ int buf_id;
+ u32 offset;
+
+ buf_id = u32_get_bits(packet_info->cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&buf_ring->idr_lock);
+ msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&buf_ring->idr_lock);
+
+ if (unlikely(!msdu)) {
+ ath12k_warn(ab, "mon dest desc with inval buf_id %d\n", buf_id);
+ return 0;
+ }
+
+ dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(msdu)->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ offset = packet_info->dma_length + ATH12K_MON_RX_DOT11_OFFSET;
+ if (ath12k_dp_pkt_set_pktlen(msdu, offset)) {
+ dev_kfree_skb_any(msdu);
+ goto dest_replenish;
+ }
+
+ if (!pmon->mon_mpdu->head)
+ pmon->mon_mpdu->head = msdu;
+ else
+ pmon->mon_mpdu->tail->next = msdu;
+
+ pmon->mon_mpdu->tail = msdu;
+
+dest_replenish:
+ ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+
+ return 0;
+}
+
+static int
+ath12k_dp_mon_parse_rx_dest_tlv(struct ath12k *ar,
+ struct ath12k_mon_data *pmon,
+ enum hal_rx_mon_status hal_status,
+ const void *tlv_data)
+{
+ switch (hal_status) {
+ case HAL_RX_MON_STATUS_MPDU_START:
+ if (WARN_ON_ONCE(pmon->mon_mpdu))
+ break;
+
+ pmon->mon_mpdu = kzalloc(sizeof(*pmon->mon_mpdu), GFP_ATOMIC);
+ if (!pmon->mon_mpdu)
+ return -ENOMEM;
+ break;
+ case HAL_RX_MON_STATUS_BUF_ADDR:
+ return ath12k_dp_mon_parse_status_buf(ar, pmon, tlv_data);
+ case HAL_RX_MON_STATUS_MPDU_END:
+ /* If no MSDU then free empty MPDU */
+ if (pmon->mon_mpdu->tail) {
+ pmon->mon_mpdu->tail->next = NULL;
+ list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list);
+ } else {
+ kfree(pmon->mon_mpdu);
+ }
+ pmon->mon_mpdu = NULL;
+ break;
+ case HAL_RX_MON_STATUS_MSDU_END:
+ pmon->mon_mpdu->decap_format = pmon->decap_format;
+ pmon->mon_mpdu->err_bitmap = pmon->err_bitmap;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static enum hal_rx_mon_status
ath12k_dp_mon_parse_rx_dest(struct ath12k *ar, struct ath12k_mon_data *pmon,
struct sk_buff *skb)
@@ -2114,14 +2530,20 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k *ar, struct ath12k_mon_data *pmon,
tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
hal_status = ath12k_dp_mon_rx_parse_status_tlv(ar, pmon, tlv);
+
+ if (ar->monitor_started && ar->ab->hw_params->rxdma1_enable &&
+ ath12k_dp_mon_parse_rx_dest_tlv(ar, pmon, hal_status, tlv->value))
+ return HAL_RX_MON_STATUS_PPDU_DONE;
+
ptr += sizeof(*tlv) + tlv_len;
ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN);
- if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
+ if ((ptr - skb->data) > skb->len)
break;
} while ((hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE) ||
(hal_status == HAL_RX_MON_STATUS_BUF_ADDR) ||
+ (hal_status == HAL_RX_MON_STATUS_MPDU_START) ||
(hal_status == HAL_RX_MON_STATUS_MPDU_END) ||
(hal_status == HAL_RX_MON_STATUS_MSDU_END));
@@ -2141,23 +2563,21 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
struct dp_mon_mpdu *tmp;
struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
- struct sk_buff *head_msdu, *tail_msdu;
- enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
+ enum hal_rx_mon_status hal_status;
- ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
+ hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
+ if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE)
+ return hal_status;
list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) {
list_del(&mon_mpdu->list);
- head_msdu = mon_mpdu->head;
- tail_msdu = mon_mpdu->tail;
- if (head_msdu && tail_msdu) {
- ath12k_dp_mon_rx_deliver(ar, head_msdu,
- tail_msdu, ppdu_info, napi);
- }
+ if (mon_mpdu->head && mon_mpdu->tail)
+ ath12k_dp_mon_rx_deliver(ar, mon_mpdu, ppdu_info, napi);
kfree(mon_mpdu);
}
+
return hal_status;
}
@@ -2236,6 +2656,94 @@ fail_alloc_skb:
return -ENOMEM;
}
+int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab,
+ struct dp_rxdma_mon_ring *rx_ring,
+ int req_entries)
+{
+ enum hal_rx_buf_return_buf_manager mgr =
+ ab->hw_params->hal_params->rx_buf_rbm;
+ int num_free, num_remain, buf_id;
+ struct ath12k_buffer_addr *desc;
+ struct hal_srng *srng;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ u32 cookie;
+
+ req_entries = min(req_entries, rx_ring->bufs_max);
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
+ if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
+ req_entries = num_free;
+
+ req_entries = min(num_free, req_entries);
+ num_remain = req_entries;
+
+ while (num_remain > 0) {
+ skb = dev_alloc_skb(RX_MON_STATUS_BUF_SIZE);
+ if (!skb)
+ break;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ RX_MON_STATUS_BUF_ALIGN)) {
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, RX_MON_STATUS_BUF_ALIGN) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max * 3, GFP_ATOMIC);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (buf_id < 0)
+ goto fail_dma_unmap;
+ cookie = u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc)
+ goto fail_buf_unassign;
+
+ ATH12K_SKB_RXCB(skb)->paddr = paddr;
+
+ num_remain--;
+
+ ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+
+fail_buf_unassign:
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+}
+
static struct dp_mon_tx_ppdu_info *
ath12k_dp_mon_tx_get_ppdu_info(struct ath12k_mon_data *pmon,
unsigned int ppdu_id,
@@ -2838,16 +3346,13 @@ ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar,
struct dp_mon_tx_ppdu_info *tx_ppdu_info)
{
struct dp_mon_mpdu *tmp, *mon_mpdu;
- struct sk_buff *head_msdu, *tail_msdu;
list_for_each_entry_safe(mon_mpdu, tmp,
&tx_ppdu_info->dp_tx_mon_mpdu_list, list) {
list_del(&mon_mpdu->list);
- head_msdu = mon_mpdu->head;
- tail_msdu = mon_mpdu->tail;
- if (head_msdu)
- ath12k_dp_mon_rx_deliver(ar, head_msdu, tail_msdu,
+ if (mon_mpdu->head)
+ ath12k_dp_mon_rx_deliver(ar, mon_mpdu,
&tx_ppdu_info->rx_status, napi);
kfree(mon_mpdu);
@@ -2949,11 +3454,10 @@ static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
struct ath12k_rx_peer_stats *rx_stats = arsta->rx_stats;
u32 num_msdu;
- if (!rx_stats)
- return;
-
arsta->rssi_comb = ppdu_info->rssi_comb;
ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
+ if (!rx_stats)
+ return;
num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
@@ -3126,14 +3630,12 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
ahsta = ath12k_sta_to_ahsta(peer->sta);
arsta = &ahsta->deflink;
+ arsta->rssi_comb = ppdu_info->rssi_comb;
+ ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
rx_stats = arsta->rx_stats;
-
if (!rx_stats)
return;
- arsta->rssi_comb = ppdu_info->rssi_comb;
- ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
-
num_msdu = user_stats->tcp_msdu_count + user_stats->tcp_ack_msdu_count +
user_stats->udp_msdu_count + user_stats->other_msdu_count;
@@ -3346,7 +3848,7 @@ move_next:
ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info);
while ((skb = __skb_dequeue(&skb_list))) {
- hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
+ hal_status = ath12k_dp_mon_rx_parse_mon_status(ar, pmon, skb, napi);
if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
ppdu_info->ppdu_continuation = true;
dev_kfree_skb_any(skb);
@@ -3388,6 +3890,487 @@ free_skb:
return num_buffs_reaped;
}
+static int ath12k_dp_rx_reap_mon_status_ring(struct ath12k_base *ab, int mac_id,
+ int *budget, struct sk_buff_head *skb_list)
+{
+ const struct ath12k_hw_hal_params *hal_params;
+ int buf_id, srng_id, num_buffs_reaped = 0;
+ enum dp_mon_status_buf_state reap_status;
+ struct dp_rxdma_mon_ring *rx_ring;
+ struct ath12k_mon_data *pmon;
+ struct ath12k_skb_rxcb *rxcb;
+ struct hal_tlv_64_hdr *tlv;
+ void *rx_mon_status_desc;
+ struct hal_srng *srng;
+ struct ath12k_dp *dp;
+ struct sk_buff *skb;
+ struct ath12k *ar;
+ dma_addr_t paddr;
+ u32 cookie;
+ u8 rbm;
+
+ ar = ab->pdevs[ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id)].ar;
+ dp = &ab->dp;
+ pmon = &ar->dp.mon_data;
+ srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
+ rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (*budget) {
+ *budget -= 1;
+ rx_mon_status_desc = ath12k_hal_srng_src_peek(ab, srng);
+ if (!rx_mon_status_desc) {
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ break;
+ }
+ ath12k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
+ &cookie, &rbm);
+ if (paddr) {
+ buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ if (!skb) {
+ ath12k_warn(ab, "rx monitor status with invalid buf_id %d\n",
+ buf_id);
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ goto move_next;
+ }
+
+ rxcb = ATH12K_SKB_RXCB(skb);
+
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ tlv = (struct hal_tlv_64_hdr *)skb->data;
+ if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) !=
+ HAL_RX_STATUS_BUFFER_DONE) {
+ pmon->buf_state = DP_MON_STATUS_NO_DMA;
+ ath12k_warn(ab,
+ "mon status DONE not set %llx, buf_id %d\n",
+ le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG),
+ buf_id);
+ /* RxDMA status done bit might not be set even
+ * though tp is moved by HW.
+ */
+
+ /* If done status is missing:
+ * 1. As per MAC team's suggestion,
+ * when HP + 1 entry is peeked and if DMA
+ * is not done and if HP + 2 entry's DMA done
+ * is set. skip HP + 1 entry and
+ * start processing in next interrupt.
+ * 2. If HP + 2 entry's DMA done is not set,
+ * poll onto HP + 1 entry DMA done to be set.
+ * Check status for same buffer for next time
+ * dp_rx_mon_status_srng_process
+ */
+ reap_status = ath12k_dp_rx_mon_buf_done(ab, srng,
+ rx_ring);
+ if (reap_status == DP_MON_STATUS_NO_DMA)
+ continue;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ goto move_next;
+ }
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ if (ath12k_dp_pkt_set_pktlen(skb, RX_MON_STATUS_BUF_SIZE)) {
+ dev_kfree_skb_any(skb);
+ goto move_next;
+ }
+ __skb_queue_tail(skb_list, skb);
+ } else {
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ }
+move_next:
+ skb = ath12k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
+ &buf_id);
+
+ if (!skb) {
+ ath12k_warn(ab, "failed to alloc buffer for status ring\n");
+ hal_params = ab->hw_params->hal_params;
+ ath12k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
+ hal_params->rx_buf_rbm);
+ num_buffs_reaped++;
+ break;
+ }
+ rxcb = ATH12K_SKB_RXCB(skb);
+
+ cookie = u32_encode_bits(mac_id, DP_RXDMA_BUF_COOKIE_PDEV_ID) |
+ u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ ath12k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
+ cookie,
+ ab->hw_params->hal_params->rx_buf_rbm);
+ ath12k_hal_srng_src_get_next_entry(ab, srng);
+ num_buffs_reaped++;
+ }
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return num_buffs_reaped;
+}
+
+static u32
+ath12k_dp_rx_mon_mpdu_pop(struct ath12k *ar, int mac_id,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu,
+ struct list_head *used_list,
+ u32 *npackets, u32 *ppdu_id)
+{
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
+ struct ath12k_buffer_addr *p_buf_addr_info, *p_last_buf_addr_info;
+ u32 msdu_ppdu_id = 0, msdu_cnt = 0, total_len = 0, frag_len = 0;
+ u32 rx_buf_size, rx_pkt_offset, sw_cookie;
+ bool is_frag, is_first_msdu, drop_mpdu = false;
+ struct hal_reo_entrance_ring *ent_desc =
+ (struct hal_reo_entrance_ring *)ring_entry;
+ u32 rx_bufs_used = 0, i = 0, desc_bank = 0;
+ struct hal_rx_desc *rx_desc, *tail_rx_desc;
+ struct hal_rx_msdu_link *msdu_link_desc;
+ struct sk_buff *msdu = NULL, *last = NULL;
+ struct ath12k_rx_desc_info *desc_info;
+ struct ath12k_buffer_addr buf_info;
+ struct hal_rx_msdu_list msdu_list;
+ struct ath12k_skb_rxcb *rxcb;
+ u16 num_msdus = 0;
+ dma_addr_t paddr;
+ u8 rbm;
+
+ ath12k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
+ &sw_cookie,
+ &p_last_buf_addr_info, &rbm,
+ &msdu_cnt);
+
+ spin_lock_bh(&pmon->mon_lock);
+
+ if (le32_get_bits(ent_desc->info1,
+ HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON) ==
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ u8 rxdma_err = le32_get_bits(ent_desc->info1,
+ HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE);
+ if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dest_mpdu_drop++;
+ }
+ }
+
+ is_frag = false;
+ is_first_msdu = true;
+ rx_pkt_offset = sizeof(struct hal_rx_desc);
+
+ do {
+ if (pmon->mon_last_linkdesc_paddr == paddr) {
+ pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
+ spin_unlock_bh(&pmon->mon_lock);
+ return rx_bufs_used;
+ }
+
+ desc_bank = u32_get_bits(sw_cookie, DP_LINK_DESC_BANK_MASK);
+ msdu_link_desc =
+ ar->ab->dp.link_desc_banks[desc_bank].vaddr +
+ (paddr - ar->ab->dp.link_desc_banks[desc_bank].paddr);
+
+ ath12k_hal_rx_msdu_list_get(ar, msdu_link_desc, &msdu_list,
+ &num_msdus);
+ desc_info = ath12k_dp_get_rx_desc(ar->ab,
+ msdu_list.sw_cookie[num_msdus - 1]);
+ tail_rx_desc = (struct hal_rx_desc *)(desc_info->skb)->data;
+
+ for (i = 0; i < num_msdus; i++) {
+ u32 l2_hdr_offset;
+
+ if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "i %d last_cookie %d is same\n",
+ i, pmon->mon_last_buf_cookie);
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dup_mon_buf_cnt++;
+ continue;
+ }
+
+ desc_info =
+ ath12k_dp_get_rx_desc(ar->ab, msdu_list.sw_cookie[i]);
+ msdu = desc_info->skb;
+
+ if (!msdu) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "msdu_pop: invalid msdu (%d/%d)\n",
+ i + 1, num_msdus);
+ goto next_msdu;
+ }
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ if (rxcb->paddr != msdu_list.paddr[i]) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "i %d paddr %lx != %lx\n",
+ i, (unsigned long)rxcb->paddr,
+ (unsigned long)msdu_list.paddr[i]);
+ drop_mpdu = true;
+ continue;
+ }
+ if (!rxcb->unmapped) {
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len +
+ skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ rxcb->unmapped = 1;
+ }
+ if (drop_mpdu) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "i %d drop msdu %p *ppdu_id %x\n",
+ i, msdu, *ppdu_id);
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab, tail_rx_desc);
+ if (is_first_msdu) {
+ if (!ath12k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ pmon->mon_last_linkdesc_paddr = paddr;
+ goto next_msdu;
+ }
+ msdu_ppdu_id =
+ ath12k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
+
+ if (ath12k_dp_mon_comp_ppduid(msdu_ppdu_id,
+ ppdu_id)) {
+ spin_unlock_bh(&pmon->mon_lock);
+ return rx_bufs_used;
+ }
+ pmon->mon_last_linkdesc_paddr = paddr;
+ is_first_msdu = false;
+ }
+ ath12k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
+ &is_frag, &total_len,
+ &frag_len, &msdu_cnt);
+ rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
+
+ if (ath12k_dp_pkt_set_pktlen(msdu, rx_buf_size)) {
+ dev_kfree_skb_any(msdu);
+ goto next_msdu;
+ }
+
+ if (!(*head_msdu))
+ *head_msdu = msdu;
+ else if (last)
+ last->next = msdu;
+
+ last = msdu;
+next_msdu:
+ pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
+ rx_bufs_used++;
+ desc_info->skb = NULL;
+ list_add_tail(&desc_info->list, used_list);
+ }
+
+ ath12k_hal_rx_buf_addr_info_set(&buf_info, paddr, sw_cookie, rbm);
+
+ ath12k_dp_mon_next_link_desc_get(msdu_link_desc, &paddr,
+ &sw_cookie, &rbm,
+ &p_buf_addr_info);
+
+ ath12k_dp_rx_link_desc_return(ar->ab, &buf_info,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+
+ p_last_buf_addr_info = p_buf_addr_info;
+
+ } while (paddr && msdu_cnt);
+
+ spin_unlock_bh(&pmon->mon_lock);
+
+ if (last)
+ last->next = NULL;
+
+ *tail_msdu = msdu;
+
+ if (msdu_cnt == 0)
+ *npackets = 1;
+
+ return rx_bufs_used;
+}
+
+/* The destination ring processing is stuck if the destination is not
+ * moving while status ring moves 16 PPDU. The destination ring processing
+ * skips this destination ring PPDU as a workaround.
+ */
+#define MON_DEST_RING_STUCK_MAX_CNT 16
+
+static void ath12k_dp_rx_mon_dest_process(struct ath12k *ar, int mac_id,
+ u32 quota, struct napi_struct *napi)
+{
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
+ struct ath12k_pdev_mon_stats *rx_mon_stats;
+ u32 ppdu_id, rx_bufs_used = 0, ring_id;
+ u32 mpdu_rx_bufs_used, npackets = 0;
+ struct ath12k_dp *dp = &ar->ab->dp;
+ struct ath12k_base *ab = ar->ab;
+ void *ring_entry, *mon_dst_srng;
+ struct dp_mon_mpdu *tmp_mpdu;
+ LIST_HEAD(rx_desc_used_list);
+ struct hal_srng *srng;
+
+ ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
+ srng = &ab->hal.srng_list[ring_id];
+
+ mon_dst_srng = &ab->hal.srng_list[ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, mon_dst_srng);
+
+ ppdu_id = pmon->mon_ppdu_info.ppdu_id;
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ while ((ring_entry = ath12k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
+ struct sk_buff *head_msdu, *tail_msdu;
+
+ head_msdu = NULL;
+ tail_msdu = NULL;
+
+ mpdu_rx_bufs_used = ath12k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
+ &head_msdu, &tail_msdu,
+ &rx_desc_used_list,
+ &npackets, &ppdu_id);
+
+ rx_bufs_used += mpdu_rx_bufs_used;
+
+ if (mpdu_rx_bufs_used) {
+ dp->mon_dest_ring_stuck_cnt = 0;
+ } else {
+ dp->mon_dest_ring_stuck_cnt++;
+ rx_mon_stats->dest_mon_not_reaped++;
+ }
+
+ if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
+ rx_mon_stats->dest_mon_stuck++;
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
+ pmon->mon_ppdu_info.ppdu_id, ppdu_id,
+ dp->mon_dest_ring_stuck_cnt,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
+ spin_lock_bh(&pmon->mon_lock);
+ pmon->mon_ppdu_info.ppdu_id = ppdu_id;
+ spin_unlock_bh(&pmon->mon_lock);
+ continue;
+ }
+
+ if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
+ spin_lock_bh(&pmon->mon_lock);
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ spin_unlock_bh(&pmon->mon_lock);
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
+ ppdu_id, pmon->mon_ppdu_info.ppdu_id,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
+ break;
+ }
+
+ if (head_msdu && tail_msdu) {
+ tmp_mpdu = kzalloc(sizeof(*tmp_mpdu), GFP_ATOMIC);
+ if (!tmp_mpdu)
+ break;
+
+ tmp_mpdu->head = head_msdu;
+ tmp_mpdu->tail = tail_msdu;
+ tmp_mpdu->err_bitmap = pmon->err_bitmap;
+ tmp_mpdu->decap_format = pmon->decap_format;
+ ath12k_dp_mon_rx_deliver(ar, tmp_mpdu,
+ &pmon->mon_ppdu_info, napi);
+ rx_mon_stats->dest_mpdu_done++;
+ kfree(tmp_mpdu);
+ }
+
+ ring_entry = ath12k_hal_srng_dst_get_next_entry(ar->ab,
+ mon_dst_srng);
+ }
+ ath12k_hal_srng_access_end(ar->ab, mon_dst_srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (rx_bufs_used) {
+ rx_mon_stats->dest_ppdu_done++;
+ ath12k_dp_rx_bufs_replenish(ar->ab,
+ &dp->rx_refill_buf_ring,
+ &rx_desc_used_list,
+ rx_bufs_used);
+ }
+}
+
+static int
+__ath12k_dp_mon_process_ring(struct ath12k *ar, int mac_id,
+ struct napi_struct *napi, int *budget)
+{
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
+ struct ath12k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+ enum hal_rx_mon_status hal_status;
+ struct sk_buff_head skb_list;
+ int num_buffs_reaped;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&skb_list);
+
+ num_buffs_reaped = ath12k_dp_rx_reap_mon_status_ring(ar->ab, mac_id,
+ budget, &skb_list);
+ if (!num_buffs_reaped)
+ goto exit;
+
+ while ((skb = __skb_dequeue(&skb_list))) {
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+
+ hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
+
+ if (ar->monitor_started &&
+ pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
+ hal_status == HAL_TLV_STATUS_PPDU_DONE) {
+ rx_mon_stats->status_ppdu_done++;
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
+ ath12k_dp_rx_mon_dest_process(ar, mac_id, *budget, napi);
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+exit:
+ return num_buffs_reaped;
+}
+
int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
struct napi_struct *napi, int budget,
enum dp_monitor_mode monitor_mode)
@@ -3398,6 +4381,10 @@ int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
if (ab->hw_params->rxdma1_enable) {
if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
num_buffs_reaped = ath12k_dp_mon_srng_process(ar, &budget, napi);
+ } else {
+ if (ar->monitor_started)
+ num_buffs_reaped =
+ __ath12k_dp_mon_process_ring(ar, mac_id, napi, &budget);
}
return num_buffs_reaped;
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.h b/drivers/net/wireless/ath/ath12k/dp_mon.h
index e4368eb42aca..e25595cbdcf3 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.h
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_MON_H
@@ -9,6 +9,9 @@
#include "core.h"
+#define ATH12K_MON_RX_DOT11_OFFSET 5
+#define ATH12K_MON_RX_PKT_OFFSET 8
+
enum dp_monitor_mode {
ATH12K_DP_TX_MONITOR_MODE,
ATH12K_DP_RX_MONITOR_MODE
@@ -82,6 +85,9 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *buf_ring,
int req_entries);
+int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab,
+ struct dp_rxdma_mon_ring *rx_ring,
+ int req_entries);
int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
struct napi_struct *napi, int budget,
enum dp_monitor_mode monitor_mode);
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 75bf4211ad42..57648febc4a4 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -194,6 +194,22 @@ static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len);
}
+u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc)
+{
+ return ab->hal_rx_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
+}
+
+bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc)
+{
+ u32 tlv_tag;
+
+ tlv_tag = ab->hal_rx_ops->rx_desc_get_mpdu_start_tag(rx_desc);
+
+ return tlv_tag == HAL_RX_MPDU_START;
+}
+
static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
@@ -228,12 +244,6 @@ static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
}
-static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_mpdu_frame_ctl(desc);
-}
-
static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
@@ -420,9 +430,17 @@ static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab,
static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
+ int i;
ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
+ if (ab->hw_params->rxdma1_enable)
+ return 0;
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
+ ath12k_dp_rxdma_mon_buf_ring_free(ab,
+ &dp->rx_mon_status_refill_ring[i]);
+
return 0;
}
@@ -436,7 +454,12 @@ static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab,
ath12k_hal_srng_get_entrysize(ab, ringtype);
rx_ring->bufs_max = num_entries;
- ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
+
+ if (ringtype == HAL_RXDMA_MONITOR_STATUS)
+ ath12k_dp_mon_status_bufs_replenish(ab, rx_ring,
+ num_entries);
+ else
+ ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
return 0;
}
@@ -457,7 +480,8 @@ static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
- int ret;
+ struct dp_rxdma_mon_ring *mon_ring;
+ int ret, i;
ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring);
if (ret) {
@@ -470,9 +494,19 @@ static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
&dp->rxdma_mon_buf_ring,
HAL_RXDMA_MONITOR_BUF);
- if (ret) {
+ if (ret)
ath12k_warn(ab,
"failed to setup HAL_RXDMA_MONITOR_BUF\n");
+ return ret;
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ mon_ring = &dp->rx_mon_status_refill_ring[i];
+ ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, mon_ring,
+ HAL_RXDMA_MONITOR_STATUS);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to setup HAL_RXDMA_MONITOR_STATUS\n");
return ret;
}
}
@@ -556,9 +590,9 @@ void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
spin_lock_bh(&dp->reo_cmd_lock);
list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
list_del(&cmd->list);
- dma_unmap_single(ab->dev, cmd->data.paddr,
- cmd->data.size, DMA_BIDIRECTIONAL);
- kfree(cmd->data.vaddr);
+ dma_unmap_single(ab->dev, cmd->data.qbuf.paddr_aligned,
+ cmd->data.qbuf.size, DMA_BIDIRECTIONAL);
+ kfree(cmd->data.qbuf.vaddr);
kfree(cmd);
}
@@ -566,9 +600,9 @@ void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
&dp->reo_cmd_cache_flush_list, list) {
list_del(&cmd_cache->list);
dp->reo_cmd_cache_flush_count--;
- dma_unmap_single(ab->dev, cmd_cache->data.paddr,
- cmd_cache->data.size, DMA_BIDIRECTIONAL);
- kfree(cmd_cache->data.vaddr);
+ dma_unmap_single(ab->dev, cmd_cache->data.qbuf.paddr_aligned,
+ cmd_cache->data.qbuf.size, DMA_BIDIRECTIONAL);
+ kfree(cmd_cache->data.qbuf.vaddr);
kfree(cmd_cache);
}
spin_unlock_bh(&dp->reo_cmd_lock);
@@ -583,10 +617,10 @@ static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
rx_tid->tid, status);
- dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
+ dma_unmap_single(dp->ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ kfree(rx_tid->qbuf.vaddr);
+ rx_tid->qbuf.vaddr = NULL;
}
static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
@@ -641,13 +675,13 @@ static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
unsigned long tot_desc_sz, desc_sz;
int ret;
- tot_desc_sz = rx_tid->size;
+ tot_desc_sz = rx_tid->qbuf.size;
desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
while (tot_desc_sz > desc_sz) {
tot_desc_sz -= desc_sz;
- cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
- cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned + tot_desc_sz);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_FLUSH_CACHE, &cmd,
NULL);
@@ -658,8 +692,8 @@ static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
}
memset(&cmd, 0, sizeof(cmd));
- cmd.addr_lo = lower_32_bits(rx_tid->paddr);
- cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_FLUSH_CACHE,
@@ -667,10 +701,10 @@ static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
if (ret) {
ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
rx_tid->tid, ret);
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ kfree(rx_tid->qbuf.vaddr);
+ rx_tid->qbuf.vaddr = NULL;
}
}
@@ -729,10 +763,10 @@ static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
return;
free_desc:
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ kfree(rx_tid->qbuf.vaddr);
+ rx_tid->qbuf.vaddr = NULL;
}
static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
@@ -762,6 +796,7 @@ static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u
qref->info1 = u32_encode_bits(upper_32_bits(paddr),
BUFFER_ADDR_INFO1_ADDR) |
u32_encode_bits(tid, DP_REO_QREF_NUM);
+ ath12k_hal_reo_shared_qaddr_cache_clear(ab);
}
static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
@@ -801,8 +836,8 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
return;
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
- cmd.addr_lo = lower_32_bits(rx_tid->paddr);
- cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
@@ -810,10 +845,10 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
if (ret) {
ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
tid, ret);
- dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_unmap_single(ar->ab->dev, rx_tid->qbuf.paddr_aligned,
+ rx_tid->qbuf.size, DMA_BIDIRECTIONAL);
+ kfree(rx_tid->qbuf.vaddr);
+ rx_tid->qbuf.vaddr = NULL;
}
if (peer->mlo)
@@ -824,15 +859,10 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
rx_tid->active = false;
}
-/* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted
- * to struct hal_wbm_release_ring, I couldn't figure out the logic behind
- * that.
- */
-static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
- struct hal_reo_dest_ring *ring,
- enum hal_wbm_rel_bm_act action)
+int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
+ struct ath12k_buffer_addr *buf_addr_info,
+ enum hal_wbm_rel_bm_act action)
{
- struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring;
struct hal_wbm_release_ring *desc;
struct ath12k_dp *dp = &ab->dp;
struct hal_srng *srng;
@@ -850,7 +880,7 @@ static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
goto exit;
}
- ath12k_hal_rx_msdu_link_desc_set(ab, desc, link_desc, action);
+ ath12k_hal_rx_msdu_link_desc_set(ab, desc, buf_addr_info, action);
exit:
ath12k_hal_srng_access_end(ab, srng);
@@ -863,14 +893,17 @@ exit:
static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
bool rel_link_desc)
{
+ struct ath12k_buffer_addr *buf_addr_info;
struct ath12k_base *ab = rx_tid->ab;
lockdep_assert_held(&ab->base_lock);
if (rx_tid->dst_ring_desc) {
- if (rel_link_desc)
- ath12k_dp_rx_link_desc_return(ab, rx_tid->dst_ring_desc,
+ if (rel_link_desc) {
+ buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info;
+ ath12k_dp_rx_link_desc_return(ab, buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
kfree(rx_tid->dst_ring_desc);
rx_tid->dst_ring_desc = NULL;
}
@@ -909,8 +942,8 @@ static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
struct ath12k_hal_reo_cmd cmd = {0};
int ret;
- cmd.addr_lo = lower_32_bits(rx_tid->paddr);
- cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
cmd.ba_window_size = ba_win_sz;
@@ -934,18 +967,67 @@ static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
return 0;
}
+static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab,
+ struct ath12k_sta *ahsta,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u16 ssn, enum hal_pn_type pn_type)
+{
+ u32 ba_win_sz = rx_tid->ba_win_sz;
+ struct ath12k_reoq_buf *buf;
+ void *vaddr, *vaddr_aligned;
+ dma_addr_t paddr_aligned;
+ u8 tid = rx_tid->tid;
+ u32 hw_desc_sz;
+ int ret;
+
+ buf = &ahsta->reoq_bufs[tid];
+ if (!buf->vaddr) {
+ /* TODO: Optimize the memory allocation for qos tid based on
+ * the actual BA window size in REO tid update path.
+ */
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
+ else
+ hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
+
+ vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
+ if (!vaddr)
+ return -ENOMEM;
+
+ vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
+
+ ath12k_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz,
+ ssn, pn_type);
+
+ paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz,
+ DMA_BIDIRECTIONAL);
+ ret = dma_mapping_error(ab->dev, paddr_aligned);
+ if (ret) {
+ kfree(vaddr);
+ return ret;
+ }
+
+ buf->vaddr = vaddr;
+ buf->paddr_aligned = paddr_aligned;
+ buf->size = hw_desc_sz;
+ }
+
+ rx_tid->qbuf = *buf;
+ rx_tid->active = true;
+
+ return 0;
+}
+
int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
u8 tid, u32 ba_win_sz, u16 ssn,
enum hal_pn_type pn_type)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
- struct hal_rx_reo_queue *addr_aligned;
struct ath12k_peer *peer;
+ struct ath12k_sta *ahsta;
struct ath12k_dp_rx_tid *rx_tid;
- u32 hw_desc_sz;
- void *vaddr;
- dma_addr_t paddr;
+ dma_addr_t paddr_aligned;
int ret;
spin_lock_bh(&ab->base_lock);
@@ -957,7 +1039,8 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
return -ENOENT;
}
- if (!peer->primary_link) {
+ if (ab->hw_params->dp_primary_link_only &&
+ !peer->primary_link) {
spin_unlock_bh(&ab->base_lock);
return 0;
}
@@ -977,9 +1060,9 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
}
rx_tid = &peer->rx_tid[tid];
+ paddr_aligned = rx_tid->qbuf.paddr_aligned;
/* Update the tid queue if it is already setup */
if (rx_tid->active) {
- paddr = rx_tid->paddr;
ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
ba_win_sz, ssn, true);
spin_unlock_bh(&ab->base_lock);
@@ -991,8 +1074,8 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
if (!ab->hw_params->reoq_lut_support) {
ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
peer_mac,
- paddr, tid, 1,
- ba_win_sz);
+ paddr_aligned, tid,
+ 1, ba_win_sz);
if (ret) {
ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n",
tid, ret);
@@ -1007,61 +1090,34 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
rx_tid->ba_win_sz = ba_win_sz;
- /* TODO: Optimize the memory allocation for qos tid based on
- * the actual BA window size in REO tid update path.
- */
- if (tid == HAL_DESC_REO_NON_QOS_TID)
- hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
- else
- hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
-
- vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
- if (!vaddr) {
- spin_unlock_bh(&ab->base_lock);
- return -ENOMEM;
- }
-
- addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
-
- ath12k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
- ssn, pn_type);
-
- paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
- DMA_BIDIRECTIONAL);
-
- ret = dma_mapping_error(ab->dev, paddr);
+ ahsta = ath12k_sta_to_ahsta(peer->sta);
+ ret = ath12k_dp_rx_assign_reoq(ab, ahsta, rx_tid, ssn, pn_type);
if (ret) {
spin_unlock_bh(&ab->base_lock);
- goto err_mem_free;
+ ath12k_warn(ab, "failed to assign reoq buf for rx tid %u\n", tid);
+ return ret;
}
- rx_tid->vaddr = vaddr;
- rx_tid->paddr = paddr;
- rx_tid->size = hw_desc_sz;
- rx_tid->active = true;
-
if (ab->hw_params->reoq_lut_support) {
/* Update the REO queue LUT at the corresponding peer id
* and tid with qaddr.
*/
if (peer->mlo)
- ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid, paddr);
+ ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid,
+ paddr_aligned);
else
- ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
+ ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid,
+ paddr_aligned);
spin_unlock_bh(&ab->base_lock);
} else {
spin_unlock_bh(&ab->base_lock);
ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
- paddr, tid, 1, ba_win_sz);
+ paddr_aligned, tid, 1,
+ ba_win_sz);
}
return ret;
-
-err_mem_free:
- kfree(vaddr);
-
- return ret;
}
int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
@@ -1196,8 +1252,8 @@ int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
rx_tid = &peer->rx_tid[tid];
if (!rx_tid->active)
continue;
- cmd.addr_lo = lower_32_bits(rx_tid->paddr);
- cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_UPDATE_RX_QUEUE,
&cmd, NULL);
@@ -1784,8 +1840,12 @@ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
peer_mac_h16, mac_addr);
+ ast_hash = le32_get_bits(resp->peer_map_ev.info2,
+ HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL);
+ hw_peer_id = le32_get_bits(resp->peer_map_ev.info2,
+ HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID);
ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
- peer_id);
+ hw_peer_id);
break;
case HTT_T2H_MSG_TYPE_PEER_UNMAP:
case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
@@ -1823,6 +1883,7 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
struct hal_rx_desc *ldesc;
int space_extra, rem_len, buf_len;
u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
+ bool is_continuation;
/* As the msdu is spread across multiple rx buffers,
* find the offset to the start of msdu for computing
@@ -1871,7 +1932,8 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
rem_len = msdu_len - buf_first_len;
while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
rxcb = ATH12K_SKB_RXCB(skb);
- if (rxcb->is_continuation)
+ is_continuation = rxcb->is_continuation;
+ if (is_continuation)
buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
else
buf_len = rem_len;
@@ -1889,7 +1951,7 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
dev_kfree_skb_any(skb);
rem_len -= buf_len;
- if (!rxcb->is_continuation)
+ if (!is_continuation)
break;
}
@@ -1914,21 +1976,14 @@ static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_
return NULL;
}
-static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu)
+static void ath12k_dp_rx_h_csum_offload(struct sk_buff *msdu,
+ struct ath12k_dp_rx_info *rx_info)
{
- struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- struct ath12k_base *ab = ar->ab;
- bool ip_csum_fail, l4_csum_fail;
-
- ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rxcb->rx_desc);
- l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rxcb->rx_desc);
-
- msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
- CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+ msdu->ip_summed = (rx_info->ip_csum_fail || rx_info->l4_csum_fail) ?
+ CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
}
-static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar,
- enum hal_encrypt_type enctype)
+int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype)
{
switch (enctype) {
case HAL_ENCRYPT_TYPE_OPEN:
@@ -2122,10 +2177,13 @@ static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
struct hal_rx_desc *rx_desc = rxcb->rx_desc;
struct ath12k_base *ab = ar->ab;
size_t hdr_len, crypto_len;
- struct ieee80211_hdr *hdr;
- u16 qos_ctl;
- __le16 fc;
- u8 *crypto_hdr;
+ struct ieee80211_hdr hdr;
+ __le16 qos_ctl;
+ u8 *crypto_hdr, mesh_ctrl;
+
+ ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, &hdr);
+ hdr_len = ieee80211_hdrlen(hdr.frame_control);
+ mesh_ctrl = ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc);
if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
@@ -2133,27 +2191,21 @@ static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
}
- fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc));
- hdr_len = ieee80211_hdrlen(fc);
skb_push(msdu, hdr_len);
- hdr = (struct ieee80211_hdr *)msdu->data;
- hdr->frame_control = fc;
-
- /* Get wifi header from rx_desc */
- ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr);
+ memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr)));
if (rxcb->is_mcbc)
status->flag &= ~RX_FLAG_PN_VALIDATED;
/* Add QOS header */
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- qos_ctl = rxcb->tid;
- if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc))
- qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
+ if (ieee80211_is_data_qos(hdr.frame_control)) {
+ struct ieee80211_hdr *qos_ptr = (struct ieee80211_hdr *)msdu->data;
- /* TODO: Add other QoS ctl fields when required */
- memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN),
- &qos_ctl, IEEE80211_QOS_CTL_LEN);
+ qos_ctl = cpu_to_le16(rxcb->tid & IEEE80211_QOS_CTL_TID_MASK);
+ if (mesh_ctrl)
+ qos_ctl |= cpu_to_le16(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT);
+
+ memcpy(ieee80211_get_qos_ctl(qos_ptr), &qos_ctl, IEEE80211_QOS_CTL_LEN);
}
}
@@ -2229,10 +2281,10 @@ static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,
}
struct ath12k_peer *
-ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)
+ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu,
+ struct ath12k_dp_rx_info *rx_info)
{
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- struct hal_rx_desc *rx_desc = rxcb->rx_desc;
struct ath12k_peer *peer = NULL;
lockdep_assert_held(&ab->base_lock);
@@ -2243,40 +2295,41 @@ ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)
if (peer)
return peer;
- if (!rx_desc || !(ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
- return NULL;
+ if (rx_info->addr2_present)
+ peer = ath12k_peer_find_by_addr(ab, rx_info->addr2);
- peer = ath12k_peer_find_by_addr(ab,
- ath12k_dp_rxdesc_get_mpdu_start_addr2(ab,
- rx_desc));
return peer;
}
static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
struct sk_buff *msdu,
struct hal_rx_desc *rx_desc,
- struct ieee80211_rx_status *rx_status)
+ struct ath12k_dp_rx_info *rx_info)
{
- bool fill_crypto_hdr;
struct ath12k_base *ab = ar->ab;
struct ath12k_skb_rxcb *rxcb;
enum hal_encrypt_type enctype;
bool is_decrypted = false;
struct ieee80211_hdr *hdr;
struct ath12k_peer *peer;
+ struct ieee80211_rx_status *rx_status = rx_info->rx_status;
u32 err_bitmap;
/* PN for multicast packets will be checked in mac80211 */
rxcb = ATH12K_SKB_RXCB(msdu);
- fill_crypto_hdr = ath12k_dp_rx_h_is_da_mcbc(ar->ab, rx_desc);
- rxcb->is_mcbc = fill_crypto_hdr;
+ rxcb->is_mcbc = rx_info->is_mcbc;
if (rxcb->is_mcbc)
- rxcb->peer_id = ath12k_dp_rx_h_peer_id(ar->ab, rx_desc);
+ rxcb->peer_id = rx_info->peer_id;
spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
+ peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, rx_info);
if (peer) {
+ /* resetting mcbc bit because mcbc packets are unicast
+ * packets only for AP as STA sends unicast packets.
+ */
+ rxcb->is_mcbc = rxcb->is_mcbc && !peer->ucast_ra_only;
+
if (rxcb->is_mcbc)
enctype = peer->sec_type_grp;
else
@@ -2305,7 +2358,7 @@ static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
if (is_decrypted) {
rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
- if (fill_crypto_hdr)
+ if (rx_info->is_mcbc)
rx_status->flag |= RX_FLAG_MIC_STRIPPED |
RX_FLAG_ICV_STRIPPED;
else
@@ -2313,37 +2366,28 @@ static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
RX_FLAG_PN_VALIDATED;
}
- ath12k_dp_rx_h_csum_offload(ar, msdu);
+ ath12k_dp_rx_h_csum_offload(msdu, rx_info);
ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
enctype, rx_status, is_decrypted);
- if (!is_decrypted || fill_crypto_hdr)
+ if (!is_decrypted || rx_info->is_mcbc)
return;
- if (ath12k_dp_rx_h_decap_type(ar->ab, rx_desc) !=
- DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
+ if (rx_info->decap_type != DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
}
}
-static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
- struct ieee80211_rx_status *rx_status)
+static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
{
- struct ath12k_base *ab = ar->ab;
struct ieee80211_supported_band *sband;
- enum rx_msdu_start_pkt_type pkt_type;
- u8 bw;
- u8 rate_mcs, nss;
- u8 sgi;
+ struct ieee80211_rx_status *rx_status = rx_info->rx_status;
+ enum rx_msdu_start_pkt_type pkt_type = rx_info->pkt_type;
+ u8 bw = rx_info->bw, sgi = rx_info->sgi;
+ u8 rate_mcs = rx_info->rate_mcs, nss = rx_info->nss;
bool is_cck;
- pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
- bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
- rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
- nss = ath12k_dp_rx_h_nss(ab, rx_desc);
- sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
-
switch (pkt_type) {
case RX_MSDU_START_PKT_TYPE_11A:
case RX_MSDU_START_PKT_TYPE_11B:
@@ -2412,10 +2456,35 @@ static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
}
}
-void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
- struct ieee80211_rx_status *rx_status)
+void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_desc,
+ struct ath12k_dp_rx_info *rx_info)
{
- struct ath12k_base *ab = ar->ab;
+ rx_info->ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rx_desc);
+ rx_info->l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rx_desc);
+ rx_info->is_mcbc = ath12k_dp_rx_h_is_da_mcbc(ab, rx_desc);
+ rx_info->decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc);
+ rx_info->pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
+ rx_info->sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
+ rx_info->rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
+ rx_info->bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
+ rx_info->nss = ath12k_dp_rx_h_nss(ab, rx_desc);
+ rx_info->tid = ath12k_dp_rx_h_tid(ab, rx_desc);
+ rx_info->peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
+ rx_info->phy_meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
+
+ if (ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)) {
+ ether_addr_copy(rx_info->addr2,
+ ath12k_dp_rxdesc_get_mpdu_start_addr2(ab, rx_desc));
+ rx_info->addr2_present = true;
+ }
+
+ ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
+ rx_desc, sizeof(*rx_desc));
+}
+
+void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
+{
+ struct ieee80211_rx_status *rx_status = rx_info->rx_status;
u8 channel_num;
u32 center_freq, meta_data;
struct ieee80211_channel *channel;
@@ -2429,12 +2498,12 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
- meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
+ meta_data = rx_info->phy_meta_data;
channel_num = meta_data;
center_freq = meta_data >> 16;
- if (center_freq >= ATH12K_MIN_6G_FREQ &&
- center_freq <= ATH12K_MAX_6G_FREQ) {
+ if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
+ center_freq <= ATH12K_MAX_6GHZ_FREQ) {
rx_status->band = NL80211_BAND_6GHZ;
rx_status->freq = center_freq;
} else if (channel_num >= 1 && channel_num <= 14) {
@@ -2450,20 +2519,18 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
ieee80211_frequency_to_channel(channel->center_freq);
}
spin_unlock_bh(&ar->data_lock);
- ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
- rx_desc, sizeof(*rx_desc));
}
if (rx_status->band != NL80211_BAND_6GHZ)
rx_status->freq = ieee80211_channel_to_frequency(channel_num,
rx_status->band);
- ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
+ ath12k_dp_rx_h_rate(ar, rx_info);
}
static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
struct sk_buff *msdu,
- struct ieee80211_rx_status *status)
+ struct ath12k_dp_rx_info *rx_info)
{
struct ath12k_base *ab = ar->ab;
static const struct ieee80211_radiotap_he known = {
@@ -2476,6 +2543,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
struct ieee80211_sta *pubsta;
struct ath12k_peer *peer;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct ieee80211_rx_status *status = rx_info->rx_status;
u8 decap = DP_RX_DECAP_TYPE_RAW;
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol = rxcb->is_eapol;
@@ -2488,10 +2556,10 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
}
if (!(status->flag & RX_FLAG_ONLY_MONITOR))
- decap = ath12k_dp_rx_h_decap_type(ab, rxcb->rx_desc);
+ decap = rx_info->decap_type;
spin_lock_bh(&ab->base_lock);
- peer = ath12k_dp_rx_h_find_peer(ab, msdu);
+ peer = ath12k_dp_rx_h_find_peer(ab, msdu, rx_info);
pubsta = peer ? peer->sta : NULL;
@@ -2566,7 +2634,7 @@ static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab,
if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN)))
return true;
- ab->soc_stats.invalid_rbm++;
+ ab->device_stats.invalid_rbm++;
WARN_ON_ONCE(1);
return false;
}
@@ -2574,7 +2642,7 @@ static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab,
static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
struct sk_buff *msdu,
struct sk_buff_head *msdu_list,
- struct ieee80211_rx_status *rx_status)
+ struct ath12k_dp_rx_info *rx_info)
{
struct ath12k_base *ab = ar->ab;
struct hal_rx_desc *rx_desc, *lrx_desc;
@@ -2634,10 +2702,11 @@ static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
goto free_out;
}
- ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
- ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
+ ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
+ ath12k_dp_rx_h_ppdu(ar, rx_info);
+ ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_info);
- rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
+ rx_info->rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
return 0;
@@ -2657,12 +2726,16 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
struct ath12k *ar;
struct ath12k_hw_link *hw_links = ag->hw_links;
struct ath12k_base *partner_ab;
+ struct ath12k_dp_rx_info rx_info;
u8 hw_link_id, pdev_id;
int ret;
if (skb_queue_empty(msdu_list))
return;
+ rx_info.addr2_present = false;
+ rx_info.rx_status = &rx_status;
+
rcu_read_lock();
while ((msdu = __skb_dequeue(msdu_list))) {
@@ -2683,7 +2756,7 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
continue;
}
- ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
+ ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_info);
if (ret) {
ath12k_dbg(ab, ATH12K_DBG_DATA,
"Unable to process msdu %d", ret);
@@ -2691,7 +2764,7 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
continue;
}
- ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
+ ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
}
rcu_read_unlock();
@@ -2724,9 +2797,9 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
struct napi_struct *napi, int budget)
{
struct ath12k_hw_group *ag = ab->ag;
- struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
+ struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
struct ath12k_hw_link *hw_links = ag->hw_links;
- int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
+ int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
struct ath12k_rx_desc_info *desc_info;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
@@ -2743,7 +2816,7 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
__skb_queue_head_init(&msdu_list);
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
@@ -2804,13 +2877,14 @@ try_again:
DMA_FROM_DEVICE);
num_buffs_reaped[device_id]++;
+ ab->device_stats.reo_rx[ring_id][ab->device_id]++;
push_reason = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_PUSH_REASON);
if (push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
dev_kfree_skb_any(msdu);
- ab->soc_stats.hal_reo_error[ring_id]++;
+ ab->device_stats.hal_reo_error[ring_id]++;
continue;
}
@@ -2860,7 +2934,7 @@ try_again:
if (!total_msdu_reaped)
goto exit;
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
if (!num_buffs_reaped[device_id])
continue;
@@ -2881,7 +2955,8 @@ exit:
static void ath12k_dp_rx_frag_timer(struct timer_list *timer)
{
- struct ath12k_dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
+ struct ath12k_dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer,
+ frag_timer);
spin_lock_bh(&rx_tid->ab->base_lock);
if (rx_tid->last_frag_no &&
@@ -2984,6 +3059,7 @@ static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer
struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
struct ieee80211_key_conf *key_conf;
struct ieee80211_hdr *hdr;
+ struct ath12k_dp_rx_info rx_info;
u8 mic[IEEE80211_CCMP_MIC_LEN];
int head_len, tail_len, ret;
size_t data_len;
@@ -2994,6 +3070,9 @@ static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer
if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
return 0;
+ rx_info.addr2_present = false;
+ rx_info.rx_status = rxs;
+
hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
hdr_len = ieee80211_hdrlen(hdr->frame_control);
head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
@@ -3020,6 +3099,8 @@ mic_fail:
(ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
(ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
+ ath12k_dp_rx_h_fetch_info(ab, rx_desc, &rx_info);
+
rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
skb_pull(msdu, hal_rx_desc_sz);
@@ -3027,7 +3108,7 @@ mic_fail:
if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu)))
return -EINVAL;
- ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+ ath12k_dp_rx_h_ppdu(ar, &rx_info);
ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
ieee80211_rx(ath12k_ar_to_hw(ar), msdu);
@@ -3242,8 +3323,15 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
reo_ent_ring->rx_mpdu_info.peer_meta_data =
reo_dest_ring->rx_mpdu_info.peer_meta_data;
- reo_ent_ring->queue_addr_lo = cpu_to_le32(lower_32_bits(rx_tid->paddr));
- queue_addr_hi = upper_32_bits(rx_tid->paddr);
+ if (ab->hw_params->reoq_lut_support) {
+ reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
+ queue_addr_hi = 0;
+ } else {
+ reo_ent_ring->queue_addr_lo =
+ cpu_to_le32(lower_32_bits(rx_tid->qbuf.paddr_aligned));
+ queue_addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
+ }
+
reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi,
HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) |
le32_encode_bits(dst_ind,
@@ -3439,7 +3527,7 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
goto out_unlock;
}
} else {
- ath12k_dp_rx_link_desc_return(ab, ring_desc,
+ ath12k_dp_rx_link_desc_return(ab, &ring_desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
@@ -3552,7 +3640,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) {
dev_kfree_skb_any(msdu);
- ath12k_dp_rx_link_desc_return(ar->ab, desc,
+ ath12k_dp_rx_link_desc_return(ar->ab, &desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
exit:
@@ -3564,9 +3652,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
int budget)
{
struct ath12k_hw_group *ag = ab->ag;
- struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
+ struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
- int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
+ int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
struct dp_link_desc_bank *link_desc_banks;
enum hal_rx_buf_return_buf_manager rbm;
struct hal_rx_msdu_link *link_desc_va;
@@ -3588,7 +3676,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
tot_n_bufs_reaped = 0;
quota = budget;
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
reo_except = &ab->dp.reo_except_ring;
@@ -3602,7 +3690,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
while (budget &&
(reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
drop = false;
- ab->soc_stats.err_ring_pkts++;
+ ab->device_stats.err_ring_pkts++;
ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
&desc_bank);
@@ -3629,9 +3717,10 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
if (rbm != partner_ab->dp.idle_link_rbm &&
rbm != HAL_RX_BUF_RBM_SW3_BM &&
rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) {
- ab->soc_stats.invalid_rbm++;
+ ab->device_stats.invalid_rbm++;
ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
- ath12k_dp_rx_link_desc_return(partner_ab, reo_desc,
+ ath12k_dp_rx_link_desc_return(partner_ab,
+ &reo_desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_REL_MSDU);
continue;
}
@@ -3649,7 +3738,8 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
drop = true;
/* Return the link desc back to wbm idle list */
- ath12k_dp_rx_link_desc_return(partner_ab, reo_desc,
+ ath12k_dp_rx_link_desc_return(partner_ab,
+ &reo_desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
@@ -3676,7 +3766,7 @@ exit:
spin_unlock_bh(&srng->lock);
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
if (!num_buffs_reaped[device_id])
continue;
@@ -3716,7 +3806,7 @@ static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
}
static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
- struct ieee80211_rx_status *status,
+ struct ath12k_dp_rx_info *rx_info,
struct sk_buff_head *msdu_list)
{
struct ath12k_base *ab = ar->ab;
@@ -3772,11 +3862,11 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
return -EINVAL;
- ath12k_dp_rx_h_ppdu(ar, desc, status);
+ ath12k_dp_rx_h_fetch_info(ab, desc, rx_info);
+ ath12k_dp_rx_h_ppdu(ar, rx_info);
+ ath12k_dp_rx_h_mpdu(ar, msdu, desc, rx_info);
- ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);
-
- rxcb->tid = ath12k_dp_rx_h_tid(ab, desc);
+ rxcb->tid = rx_info->tid;
/* Please note that caller will having the access to msdu and completing
* rx with mac80211. Need not worry about cleaning up amsdu_list.
@@ -3786,17 +3876,17 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
}
static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
- struct ieee80211_rx_status *status,
+ struct ath12k_dp_rx_info *rx_info,
struct sk_buff_head *msdu_list)
{
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
bool drop = false;
- ar->ab->soc_stats.reo_error[rxcb->err_code]++;
+ ar->ab->device_stats.reo_error[rxcb->err_code]++;
switch (rxcb->err_code) {
case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
- if (ath12k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
+ if (ath12k_dp_rx_h_null_q_desc(ar, msdu, rx_info, msdu_list))
drop = true;
break;
case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
@@ -3817,7 +3907,7 @@ static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
}
static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
- struct ieee80211_rx_status *status)
+ struct ath12k_dp_rx_info *rx_info)
{
struct ath12k_base *ab = ar->ab;
u16 msdu_len;
@@ -3831,24 +3921,33 @@ static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
+
+ if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "invalid msdu len in tkip mic err %u\n", msdu_len);
+ ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc,
+ sizeof(*desc));
+ return true;
+ }
+
skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
return true;
- ath12k_dp_rx_h_ppdu(ar, desc, status);
+ ath12k_dp_rx_h_ppdu(ar, rx_info);
- status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
- RX_FLAG_DECRYPTED);
+ rx_info->rx_status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED);
ath12k_dp_rx_h_undecap(ar, msdu, desc,
- HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
+ HAL_ENCRYPT_TYPE_TKIP_MIC, rx_info->rx_status, false);
return false;
}
static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,
- struct ieee80211_rx_status *status)
+ struct ath12k_dp_rx_info *rx_info)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
@@ -3856,14 +3955,15 @@ static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,
bool drop = false;
u32 err_bitmap;
- ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
+ ar->ab->device_stats.rxdma_error[rxcb->err_code]++;
switch (rxcb->err_code) {
case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
- drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
+ ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
+ drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, rx_info);
break;
}
fallthrough;
@@ -3885,14 +3985,18 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
{
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
struct ieee80211_rx_status rxs = {0};
+ struct ath12k_dp_rx_info rx_info;
bool drop = true;
+ rx_info.addr2_present = false;
+ rx_info.rx_status = &rxs;
+
switch (rxcb->err_rel_src) {
case HAL_WBM_REL_SRC_MODULE_REO:
- drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
+ drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rx_info, msdu_list);
break;
case HAL_WBM_REL_SRC_MODULE_RXDMA:
- drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
+ drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rx_info);
break;
default:
/* msdu will get freed */
@@ -3904,13 +4008,13 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
return;
}
- ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
+ ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
}
int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct napi_struct *napi, int budget)
{
- struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
+ struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
struct ath12k_dp *dp = &ab->dp;
@@ -3921,9 +4025,10 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct sk_buff_head msdu_list, scatter_msdu_list;
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
- int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
+ int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
int total_num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
+ struct ath12k_device_dp_stats *device_stats = &ab->device_stats;
struct ath12k_hw_link *hw_links = ag->hw_links;
struct ath12k_base *partner_ab;
u8 hw_link_id, device_id;
@@ -3933,7 +4038,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
__skb_queue_head_init(&msdu_list);
__skb_queue_head_init(&scatter_msdu_list);
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
@@ -4057,7 +4162,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
if (!total_num_buffs_reaped)
goto done;
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
if (!num_buffs_reaped[device_id])
continue;
@@ -4097,6 +4202,12 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
dev_kfree_skb_any(msdu);
continue;
}
+
+ if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX) {
+ device_id = ar->ab->device_id;
+ device_stats->rx_wbm_rel_source[rxcb->err_rel_src][device_id]++;
+ }
+
ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list);
}
rcu_read_unlock();
@@ -4186,6 +4297,7 @@ void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
void ath12k_dp_rx_free(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
+ struct dp_srng *srng;
int i;
ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
@@ -4193,6 +4305,10 @@ void ath12k_dp_rx_free(struct ath12k_base *ab)
for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
if (ab->hw_params->rx_mac_buf_ring)
ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
+ if (!ab->hw_params->rxdma1_enable) {
+ srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
+ ath12k_dp_srng_cleanup(ab, srng);
+ }
}
for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
@@ -4341,6 +4457,19 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
ret);
return ret;
}
+ } else {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id =
+ dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i,
+ HAL_RXDMA_MONITOR_STATUS);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to configure mon_status_refill_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
}
ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
@@ -4355,6 +4484,7 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
int ath12k_dp_rx_alloc(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
+ struct dp_srng *srng;
int i, ret;
idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
@@ -4402,6 +4532,23 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
+ } else {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ idr_init(&dp->rx_mon_status_refill_ring[i].bufs_idr);
+ spin_lock_init(&dp->rx_mon_status_refill_ring[i].idr_lock);
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
+ ret = ath12k_dp_srng_setup(ab, srng,
+ HAL_RXDMA_MONITOR_STATUS, 0, i,
+ DP_RXDMA_MON_STATUS_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup mon status ring %d\n",
+ i);
+ return ret;
+ }
+ }
}
ret = ath12k_dp_rxdma_buf_setup(ab);
@@ -4472,15 +4619,15 @@ int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
return ret;
}
- /* if rxdma1_enable is false, no need to setup
- * rxdma_mon_desc_ring.
- */
- if (!ar->ab->hw_params->rxdma1_enable)
- return 0;
-
pmon->mon_last_linkdesc_paddr = 0;
pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
spin_lock_init(&pmon->mon_lock);
+ if (!ar->ab->hw_params->rxdma1_enable)
+ return 0;
+
+ INIT_LIST_HEAD(&pmon->dp_rx_mon_mpdu_list);
+ pmon->mon_mpdu = NULL;
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index 88e42365a9d8..e971a314bd2d 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -14,11 +14,9 @@
struct ath12k_dp_rx_tid {
u8 tid;
- u32 *vaddr;
- dma_addr_t paddr;
- u32 size;
u32 ba_win_sz;
bool active;
+ struct ath12k_reoq_buf qbuf;
/* Info related to rx fragments */
u32 cur_sn;
@@ -65,6 +63,24 @@ struct ath12k_dp_rx_rfc1042_hdr {
__be16 snap_type;
} __packed;
+struct ath12k_dp_rx_info {
+ struct ieee80211_rx_status *rx_status;
+ u32 phy_meta_data;
+ u16 peer_id;
+ u8 decap_type;
+ u8 pkt_type;
+ u8 sgi;
+ u8 rate_mcs;
+ u8 bw;
+ u8 nss;
+ u8 addr2[ETH_ALEN];
+ u8 tid;
+ bool ip_csum_fail;
+ bool l4_csum_fail;
+ bool is_mcbc;
+ bool addr2_present;
+};
+
static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
{
u32 ret = 0;
@@ -131,13 +147,13 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
struct hal_rx_desc *desc);
struct ath12k_peer *
-ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
+ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu,
+ struct ath12k_dp_rx_info *rx_info);
u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
struct hal_rx_desc *desc);
u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
struct hal_rx_desc *desc);
-void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
- struct ieee80211_rx_status *rx_status);
+void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info);
int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab);
int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab);
@@ -145,4 +161,17 @@ int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
const void *ptr, void *data),
void *data);
+void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_desc,
+ struct ath12k_dp_rx_info *rx_info);
+
+int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype);
+u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc);
+bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc);
+int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
+ struct ath12k_buffer_addr *buf_addr_info,
+ enum hal_wbm_rel_bm_act action);
+bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc);
#endif /* ATH12K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index ced232bf4aed..b6816b6c2c04 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -7,6 +7,7 @@
#include "core.h"
#include "dp_tx.h"
#include "debug.h"
+#include "debugfs.h"
#include "hw.h"
#include "peer.h"
#include "mac.h"
@@ -83,6 +84,7 @@ static void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp,
u8 pool_id)
{
spin_lock_bh(&dp->tx_desc_lock[pool_id]);
+ tx_desc->skb_ext_desc = NULL;
list_move_tail(&tx_desc->list, &dp->tx_desc_free_list[pool_id]);
spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
}
@@ -219,7 +221,8 @@ out:
}
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
- struct sk_buff *skb, bool gsn_valid, int mcbc_gsn)
+ struct sk_buff *skb, bool gsn_valid, int mcbc_gsn,
+ bool is_mcast)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
@@ -229,7 +232,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
struct hal_tcl_data_cmd *hal_tcl_desc;
struct hal_tx_msdu_ext_desc *msg;
- struct sk_buff *skb_ext_desc;
+ struct sk_buff *skb_ext_desc = NULL;
struct hal_srng *tcl_ring;
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ath12k_vif *ahvif = arvif->ahvif;
@@ -347,7 +350,7 @@ tcl_ring_sel:
default:
/* TODO: Take care of other encap modes as well */
ret = -EINVAL;
- atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+ atomic_inc(&ab->device_stats.tx_err.misc_fail);
goto fail_remove_tx_buf;
}
@@ -370,7 +373,7 @@ tcl_ring_sel:
map:
ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, ti.paddr)) {
- atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+ atomic_inc(&ab->device_stats.tx_err.misc_fail);
ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
ret = -ENOMEM;
goto fail_remove_tx_buf;
@@ -415,23 +418,21 @@ map:
if (ret < 0) {
ath12k_dbg(ab, ATH12K_DBG_DP_TX,
"Failed to add HTT meta data, dropping packet\n");
- kfree_skb(skb_ext_desc);
- goto fail_unmap_dma;
+ goto fail_free_ext_skb;
}
}
ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
skb_ext_desc->len, DMA_TO_DEVICE);
ret = dma_mapping_error(ab->dev, ti.paddr);
- if (ret) {
- kfree_skb(skb_ext_desc);
- goto fail_unmap_dma;
- }
+ if (ret)
+ goto fail_free_ext_skb;
ti.data_len = skb_ext_desc->len;
ti.type = HAL_TCL_DESC_TYPE_EXT_DESC;
skb_cb->paddr_ext_desc = ti.paddr;
+ tx_desc->skb_ext_desc = skb_ext_desc;
}
hal_ring_id = tx_ring->tcl_data_ring.ring_id;
@@ -447,7 +448,7 @@ map:
* desc because the desc is directly enqueued onto hw queue.
*/
ath12k_hal_srng_access_end(ab, tcl_ring);
- ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
+ ab->device_stats.tx_err.desc_na[ti.ring_id]++;
spin_unlock_bh(&tcl_ring->lock);
ret = -ENOMEM;
@@ -462,9 +463,22 @@ map:
ring_selector++;
}
- goto fail_unmap_dma;
+ goto fail_unmap_dma_ext;
}
+ spin_lock_bh(&arvif->link_stats_lock);
+ arvif->link_stats.tx_encap_type[ti.encap_type]++;
+ arvif->link_stats.tx_encrypt_type[ti.encrypt_type]++;
+ arvif->link_stats.tx_desc_type[ti.type]++;
+
+ if (is_mcast)
+ arvif->link_stats.tx_bcast_mcast++;
+ else
+ arvif->link_stats.tx_enqueued++;
+ spin_unlock_bh(&arvif->link_stats_lock);
+
+ ab->device_stats.tx_enqueued[ti.ring_id]++;
+
ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
ath12k_hal_srng_access_end(ab, tcl_ring);
@@ -478,16 +492,24 @@ map:
return 0;
-fail_unmap_dma:
- dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
-
+fail_unmap_dma_ext:
if (skb_cb->paddr_ext_desc)
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
- sizeof(struct hal_tx_msdu_ext_desc),
+ skb_ext_desc->len,
DMA_TO_DEVICE);
+fail_free_ext_skb:
+ kfree_skb(skb_ext_desc);
+
+fail_unmap_dma:
+ dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
fail_remove_tx_buf:
ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
+
+ spin_lock_bh(&arvif->link_stats_lock);
+ arvif->link_stats.tx_dropped++;
+ spin_unlock_bh(&arvif->link_stats_lock);
+
if (tcl_ring_retry)
goto tcl_ring_sel;
@@ -495,20 +517,23 @@ fail_remove_tx_buf:
}
static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
- struct sk_buff *msdu, u8 mac_id,
- struct dp_tx_ring *tx_ring)
+ struct dp_tx_ring *tx_ring,
+ struct ath12k_tx_desc_params *desc_params)
{
struct ath12k *ar;
+ struct sk_buff *msdu = desc_params->skb;
struct ath12k_skb_cb *skb_cb;
- u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
+ u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, desc_params->mac_id);
skb_cb = ATH12K_SKB_CB(msdu);
ar = ab->pdevs[pdev_id].ar;
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
- if (skb_cb->paddr_ext_desc)
+ if (skb_cb->paddr_ext_desc) {
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
- sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+ desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(desc_params->skb_ext_desc);
+ }
ieee80211_free_txskb(ar->ah->hw, msdu);
@@ -518,26 +543,46 @@ static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
static void
ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
- struct sk_buff *msdu,
+ struct ath12k_tx_desc_params *desc_params,
struct dp_tx_ring *tx_ring,
struct ath12k_dp_htt_wbm_tx_status *ts)
{
struct ieee80211_tx_info *info;
+ struct ath12k_link_vif *arvif;
struct ath12k_skb_cb *skb_cb;
+ struct ieee80211_vif *vif;
+ struct ath12k_vif *ahvif;
struct ath12k *ar;
+ struct sk_buff *msdu = desc_params->skb;
skb_cb = ATH12K_SKB_CB(msdu);
info = IEEE80211_SKB_CB(msdu);
ar = skb_cb->ar;
+ ab->device_stats.tx_completed[tx_ring->tcl_data_ring_id]++;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
- if (skb_cb->paddr_ext_desc)
+ if (skb_cb->paddr_ext_desc) {
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
- sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+ desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(desc_params->skb_ext_desc);
+ }
+
+ vif = skb_cb->vif;
+ if (vif) {
+ ahvif = ath12k_vif_to_ahvif(vif);
+ rcu_read_lock();
+ arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
+ if (arvif) {
+ spin_lock_bh(&arvif->link_stats_lock);
+ arvif->link_stats.tx_completed++;
+ spin_unlock_bh(&arvif->link_stats_lock);
+ }
+ rcu_read_unlock();
+ }
memset(&info->status, 0, sizeof(info->status));
@@ -560,10 +605,9 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
}
static void
-ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
- void *desc, u8 mac_id,
- struct sk_buff *msdu,
- struct dp_tx_ring *tx_ring)
+ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, void *desc,
+ struct dp_tx_ring *tx_ring,
+ struct ath12k_tx_desc_params *desc_params)
{
struct htt_tx_wbm_completion *status_desc;
struct ath12k_dp_htt_wbm_tx_status ts = {0};
@@ -573,19 +617,21 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
wbm_status = le32_get_bits(status_desc->info0,
HTT_TX_WBM_COMP_INFO0_STATUS);
+ ab->device_stats.fw_tx_status[wbm_status]++;
switch (wbm_status) {
case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
ts.ack_rssi = le32_get_bits(status_desc->info2,
HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
- ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts);
+ ath12k_dp_tx_htt_tx_complete_buf(ab, desc_params, tx_ring, &ts);
break;
case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
- ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring);
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH:
+ ath12k_dp_tx_free_txbuf(ab, tx_ring, desc_params);
break;
case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
/* This event is to be handled only when the driver decides to
@@ -593,7 +639,7 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
*/
break;
default:
- ath12k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
+ ath12k_warn(ab, "Unknown htt wbm tx status %d\n", wbm_status);
break;
}
}
@@ -717,13 +763,18 @@ static void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status
}
static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
- struct sk_buff *msdu,
- struct hal_tx_status *ts)
+ struct ath12k_tx_desc_params *desc_params,
+ struct hal_tx_status *ts,
+ int ring)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_hw *ah = ar->ah;
struct ieee80211_tx_info *info;
+ struct ath12k_link_vif *arvif;
struct ath12k_skb_cb *skb_cb;
+ struct ieee80211_vif *vif;
+ struct ath12k_vif *ahvif;
+ struct sk_buff *msdu = desc_params->skb;
if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
/* Must not happen */
@@ -731,11 +782,14 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
}
skb_cb = ATH12K_SKB_CB(msdu);
+ ab->device_stats.tx_completed[ring]++;
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
- if (skb_cb->paddr_ext_desc)
+ if (skb_cb->paddr_ext_desc) {
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
- sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+ desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(desc_params->skb_ext_desc);
+ }
rcu_read_lock();
@@ -749,6 +803,17 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
goto exit;
}
+ vif = skb_cb->vif;
+ if (vif) {
+ ahvif = ath12k_vif_to_ahvif(vif);
+ arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
+ if (arvif) {
+ spin_lock_bh(&arvif->link_stats_lock);
+ arvif->link_stats.tx_completed++;
+ spin_unlock_bh(&arvif->link_stats_lock);
+ }
+ }
+
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
@@ -842,12 +907,14 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
struct ath12k_tx_desc_info *tx_desc = NULL;
- struct sk_buff *msdu;
struct hal_tx_status ts = { 0 };
+ struct ath12k_tx_desc_params desc_params;
struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
struct hal_wbm_release_ring *desc;
- u8 mac_id, pdev_id;
+ u8 pdev_id;
u64 desc_va;
+ enum hal_wbm_rel_src_module buf_rel_source;
+ enum hal_wbm_tqm_rel_reason rel_status;
spin_lock_bh(&status_ring->lock);
@@ -900,28 +967,37 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
continue;
}
- msdu = tx_desc->skb;
- mac_id = tx_desc->mac_id;
+ desc_params.mac_id = tx_desc->mac_id;
+ desc_params.skb = tx_desc->skb;
+ desc_params.skb_ext_desc = tx_desc->skb_ext_desc;
+
+ /* Find the HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE value */
+ buf_rel_source = le32_get_bits(tx_status->info0,
+ HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE);
+ ab->device_stats.tx_wbm_rel_source[buf_rel_source]++;
+
+ rel_status = le32_get_bits(tx_status->info0,
+ HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
+ ab->device_stats.tqm_rel_reason[rel_status]++;
/* Release descriptor as soon as extracting necessary info
* to reduce contention
*/
ath12k_dp_tx_release_txbuf(dp, tx_desc, tx_desc->pool_id);
if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
- ath12k_dp_tx_process_htt_tx_complete(ab,
- (void *)tx_status,
- mac_id, msdu,
- tx_ring);
+ ath12k_dp_tx_process_htt_tx_complete(ab, (void *)tx_status,
+ tx_ring, &desc_params);
continue;
}
- pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, desc_params.mac_id);
ar = ab->pdevs[pdev_id].ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
- ath12k_dp_tx_complete_msdu(ar, msdu, &ts);
+ ath12k_dp_tx_complete_msdu(ar, &desc_params, &ts,
+ tx_ring->tcl_data_ring_id);
}
}
@@ -1431,6 +1507,11 @@ int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
HTT_RX_MON_MO_DATA_FILTER_FLASG3;
+ } else {
+ tlv_filter = ath12k_mac_mon_status_filter_default;
+
+ if (ath12k_debugfs_is_extd_rx_stats_enabled(ar))
+ tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar);
}
if (ab->hw_params->rxdma1_enable) {
@@ -1448,6 +1529,44 @@ int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
return ret;
}
}
+ return 0;
+ }
+
+ if (!reset) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ab->dp.rx_mac_buf_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ i,
+ HAL_RXDMA_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for mon rx buf %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ab->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ if (!reset) {
+ tlv_filter.rx_filter =
+ HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
+ }
+
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id,
+ i,
+ HAL_RXDMA_MONITOR_STATUS,
+ RX_MON_STATUS_BUF_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for mon status buf %d\n",
+ ret);
+ return ret;
+ }
}
return 0;
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.h b/drivers/net/wireless/ath/ath12k/dp_tx.h
index a5904097dc34..10acdcf1fa8f 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.h
@@ -17,7 +17,8 @@ struct ath12k_dp_htt_wbm_tx_status {
int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
- struct sk_buff *skb, bool gsn_valid, int mcbc_gsn);
+ struct sk_buff *skb, bool gsn_valid, int mcbc_gsn,
+ bool is_mcast);
void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id);
int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask);
diff --git a/drivers/net/wireless/ath/ath12k/fw.c b/drivers/net/wireless/ath/ath12k/fw.c
index 5be4b2d4a19d..5ac497f80cad 100644
--- a/drivers/net/wireless/ath/ath12k/fw.c
+++ b/drivers/net/wireless/ath/ath12k/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -99,6 +99,8 @@ static int ath12k_fw_request_firmware_api_n(struct ath12k_base *ab,
__set_bit(i, ab->fw.fw_features);
}
+ ab->fw.fw_features_valid = true;
+
ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "features", "",
ab->fw.fw_features,
sizeof(ab->fw.fw_features));
@@ -169,3 +171,8 @@ void ath12k_fw_unmap(struct ath12k_base *ab)
release_firmware(ab->fw.fw);
memset(&ab->fw, 0, sizeof(ab->fw));
}
+
+bool ath12k_fw_feature_supported(struct ath12k_base *ab, enum ath12k_fw_features feat)
+{
+ return ab->fw.fw_features_valid && test_bit(feat, ab->fw.fw_features);
+}
diff --git a/drivers/net/wireless/ath/ath12k/fw.h b/drivers/net/wireless/ath/ath12k/fw.h
index 273c003eff3b..7afaefed5086 100644
--- a/drivers/net/wireless/ath/ath12k/fw.h
+++ b/drivers/net/wireless/ath/ath12k/fw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_FW_H
@@ -32,5 +32,6 @@ enum ath12k_fw_features {
void ath12k_fw_map(struct ath12k_base *ab);
void ath12k_fw_unmap(struct ath12k_base *ab);
+bool ath12k_fw_feature_supported(struct ath12k_base *ab, enum ath12k_fw_features feat);
#endif /* ATH12K_FW_H */
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
index cd59ff8e6c7b..a301898e5849 100644
--- a/drivers/net/wireless/ath/ath12k/hal.c
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include "hal_tx.h"
@@ -154,7 +154,14 @@ static const struct hal_srng_config hw_srng_config_template[] = {
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
},
- [HAL_RXDMA_MONITOR_STATUS] = { 0, },
+ [HAL_RXDMA_MONITOR_STATUS] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
[HAL_RXDMA_MONITOR_DESC] = { 0, },
[HAL_RXDMA_DIR_BUF] = {
.start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
@@ -449,8 +456,8 @@ static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
{
- return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) &
- RX_MPDU_START_INFO6_MCAST_BCAST;
+ return __le16_to_cpu(desc->u.qcn9274.msdu_end.info5) &
+ RX_MSDU_END_INFO5_DA_IS_MCBC;
}
static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
@@ -511,11 +518,6 @@ static void ath12k_hw_qcn9274_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[1]);
}
-static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
-{
- return __le16_to_cpu(desc->u.qcn9274.mpdu_start.frame_ctrl);
-}
-
static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
@@ -552,9 +554,9 @@ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP;
s = &hal->srng_config[HAL_TCL_DATA];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
- s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB;
+ s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
s = &hal->srng_config[HAL_TCL_CMD];
@@ -566,29 +568,29 @@ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
s = &hal->srng_config[HAL_CE_SRC];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
- HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
- HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
s = &hal->srng_config[HAL_CE_DST];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s = &hal->srng_config[HAL_CE_DST_STATUS];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
HAL_CE_DST_STATUS_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s = &hal->srng_config[HAL_WBM_IDLE_LINK];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
@@ -736,7 +738,6 @@ const struct hal_rx_ops hal_rx_qcn9274_ops = {
.rx_desc_is_da_mcbc = ath12k_hw_qcn9274_rx_desc_is_da_mcbc,
.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr,
- .rx_desc_get_mpdu_frame_ctl = ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl,
.dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail,
@@ -908,8 +909,8 @@ static u8 *ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2(struct hal_rx_desc
static bool ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
{
- return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info6) &
- RX_MPDU_START_INFO6_MCAST_BCAST;
+ return __le16_to_cpu(desc->u.qcn9274_compact.msdu_end.info5) &
+ RX_MSDU_END_INFO5_DA_IS_MCBC;
}
static void ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
@@ -975,11 +976,6 @@ ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[1]);
}
-static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
-{
- return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.frame_ctrl);
-}
-
static bool ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
@@ -1080,8 +1076,6 @@ const struct hal_rx_ops hal_rx_qcn9274_compact_ops = {
.rx_desc_is_da_mcbc = ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc,
.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr,
- .rx_desc_get_mpdu_frame_ctl =
- ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl,
.dp_rx_h_msdu_done = ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail,
@@ -1330,11 +1324,6 @@ static void ath12k_hw_wcn7850_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[1]);
}
-static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
-{
- return __le16_to_cpu(desc->u.wcn7850.mpdu_start.frame_ctrl);
-}
-
static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
@@ -1371,9 +1360,9 @@ static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab)
s = &hal->srng_config[HAL_TCL_DATA];
s->max_rings = 5;
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
- s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB;
+ s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
s = &hal->srng_config[HAL_TCL_CMD];
@@ -1386,31 +1375,31 @@ static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab)
s = &hal->srng_config[HAL_CE_SRC];
s->max_rings = 12;
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
- HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
- HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
s = &hal->srng_config[HAL_CE_DST];
s->max_rings = 12;
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s = &hal->srng_config[HAL_CE_DST_STATUS];
s->max_rings = 12;
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
HAL_CE_DST_STATUS_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s = &hal->srng_config[HAL_WBM_IDLE_LINK];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
@@ -1555,7 +1544,6 @@ const struct hal_rx_ops hal_rx_wcn7850_ops = {
.rx_desc_is_da_mcbc = ath12k_hw_wcn7850_rx_desc_is_da_mcbc,
.rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr,
- .rx_desc_get_mpdu_frame_ctl = ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl,
.dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail,
@@ -1756,7 +1744,7 @@ static void ath12k_hal_srng_src_hw_init(struct ath12k_base *ab,
HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB) |
u32_encode_bits((srng->entry_size * srng->num_entries),
HAL_TCL1_RING_BASE_MSB_RING_SIZE);
- ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val);
+ ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
val = u32_encode_bits(srng->entry_size, HAL_REO1_RING_ID_ENTRY_SIZE);
ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
@@ -1962,7 +1950,7 @@ u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc
{
u32 len;
- len = le32_get_bits(desc->flags, HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
+ len = le32_get_bits(READ_ONCE(desc->flags), HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
desc->flags &= ~cpu_to_le32(HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
return len;
@@ -2054,6 +2042,24 @@ int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng,
return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
}
+void *ath12k_hal_srng_src_next_peek(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ void *desc;
+ u32 next_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
+
+ if (next_hp == srng->u.src_ring.cached_tp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + next_hp;
+
+ return desc;
+}
+
void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
struct hal_srng *srng)
{
@@ -2087,6 +2093,17 @@ void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
return desc;
}
+void *ath12k_hal_srng_src_peek(struct ath12k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
+ srng->u.src_ring.cached_tp)
+ return NULL;
+
+ return srng->ring_base_vaddr + srng->u.src_ring.hp;
+}
+
void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab,
struct hal_srng *srng)
{
@@ -2132,7 +2149,7 @@ void ath12k_hal_srng_access_begin(struct ath12k_base *ab, struct hal_srng *srng)
srng->u.src_ring.cached_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
else
- srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+ srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
}
/* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin()
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index 94e2e8735958..c1750b5dc03c 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_H
@@ -11,6 +11,7 @@
#include "rx_desc.h"
struct ath12k_base;
+#define HAL_CE_REMAP_REG_BASE (ab->ce_remap_base_addr)
#define HAL_LINK_DESC_SIZE (32 << 2)
#define HAL_LINK_DESC_ALIGN 128
@@ -21,6 +22,7 @@ struct ath12k_base;
#define HAL_MAX_AVAIL_BLK_RES 3
#define HAL_RING_BASE_ALIGN 8
+#define HAL_REO_QLUT_ADDR_ALIGN 256
#define HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX 32704
/* TODO: Check with hw team on the supported scatter buf size */
@@ -39,15 +41,21 @@ struct ath12k_base;
#define HAL_OFFSET_FROM_HP_TO_TP 4
#define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x)))
+#define HAL_REO_QDESC_MAX_PEERID 8191
/* WCSS Relative address */
+#define HAL_SEQ_WCSS_CMEM_OFFSET 0x00100000
#define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000
#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
#define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000
-#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG 0x01b80000
-#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG 0x01b81000
-#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG 0x01b82000
-#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG 0x01b83000
+#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) \
+ ((ab)->hw_params->regs->hal_umac_ce0_src_reg_base)
+#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) \
+ ((ab)->hw_params->regs->hal_umac_ce0_dest_reg_base)
+#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) \
+ ((ab)->hw_params->regs->hal_umac_ce1_src_reg_base)
+#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) \
+ ((ab)->hw_params->regs->hal_umac_ce1_dest_reg_base)
#define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000
#define HAL_CE_WFSS_CE_REG_BASE 0x01b80000
@@ -57,8 +65,10 @@ struct ath12k_base;
/* SW2TCL(x) R0 ring configuration address */
#define HAL_TCL1_RING_CMN_CTRL_REG 0x00000020
#define HAL_TCL1_RING_DSCP_TID_MAP 0x00000240
-#define HAL_TCL1_RING_BASE_LSB 0x00000900
-#define HAL_TCL1_RING_BASE_MSB 0x00000904
+#define HAL_TCL1_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_base_lsb)
+#define HAL_TCL1_RING_BASE_MSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_base_msb)
#define HAL_TCL1_RING_ID(ab) ((ab)->hw_params->regs->hal_tcl1_ring_id)
#define HAL_TCL1_RING_MISC(ab) \
((ab)->hw_params->regs->hal_tcl1_ring_misc)
@@ -76,30 +86,31 @@ struct ath12k_base;
((ab)->hw_params->regs->hal_tcl1_ring_msi1_base_msb)
#define HAL_TCL1_RING_MSI1_DATA(ab) \
((ab)->hw_params->regs->hal_tcl1_ring_msi1_data)
-#define HAL_TCL2_RING_BASE_LSB 0x00000978
+#define HAL_TCL2_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl2_ring_base_lsb)
#define HAL_TCL_RING_BASE_LSB(ab) \
((ab)->hw_params->regs->hal_tcl_ring_base_lsb)
-#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab) \
- (HAL_TCL1_RING_MSI1_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab) \
- (HAL_TCL1_RING_MSI1_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab) \
- (HAL_TCL1_RING_MSI1_DATA(ab) - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_BASE_MSB_OFFSET \
- (HAL_TCL1_RING_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_ID_OFFSET(ab) \
- (HAL_TCL1_RING_ID(ab) - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab) \
- (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) \
- (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) \
- (HAL_TCL1_RING_TP_ADDR_LSB(ab) - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) \
- (HAL_TCL1_RING_TP_ADDR_MSB(ab) - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_MISC_OFFSET(ab) \
- (HAL_TCL1_RING_MISC(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_MSI1_BASE_LSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_MSI1_BASE_MSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_MSI1_DATA(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_BASE_MSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_BASE_MSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_ID_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_ID(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_TP_ADDR_LSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_TP_ADDR_MSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_MISC_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_MISC(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
/* SW2TCL(x) R2 ring pointers (head/tail) address */
#define HAL_TCL1_RING_HP 0x00002000
@@ -132,6 +143,8 @@ struct ath12k_base;
#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
+#define HAL_REO1_QDESC_ADDR(ab) ((ab)->hw_params->regs->hal_reo1_qdesc_addr)
+#define HAL_REO1_QDESC_MAX_PEERID(ab) ((ab)->hw_params->regs->hal_reo1_qdesc_max_peerid)
#define HAL_REO1_SW_COOKIE_CFG0(ab) ((ab)->hw_params->regs->hal_reo1_sw_cookie_cfg0)
#define HAL_REO1_SW_COOKIE_CFG1(ab) ((ab)->hw_params->regs->hal_reo1_sw_cookie_cfg1)
#define HAL_REO1_QDESC_LUT_BASE0(ab) ((ab)->hw_params->regs->hal_reo1_qdesc_lut_base0)
@@ -319,6 +332,8 @@ struct ath12k_base;
#define HAL_REO1_SW_COOKIE_CFG_ALIGN BIT(18)
#define HAL_REO1_SW_COOKIE_CFG_ENABLE BIT(19)
#define HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE BIT(20)
+#define HAL_REO_QDESC_ADDR_READ_LUT_ENABLE BIT(7)
+#define HAL_REO_QDESC_ADDR_READ_CLEAR_QDESC_ARRAY BIT(6)
/* CE ring bit field mask and shift */
#define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0)
@@ -365,6 +380,9 @@ struct ath12k_base;
* ath12k_hal_rx_desc_get_err().
*/
+#define HAL_IPQ5332_CE_WFSS_REG_BASE 0x740000
+#define HAL_IPQ5332_CE_SIZE 0x100000
+
enum hal_srng_ring_id {
HAL_SRNG_RING_ID_REO2SW0 = 0,
HAL_SRNG_RING_ID_REO2SW1,
@@ -480,6 +498,7 @@ enum hal_srng_ring_id {
HAL_SRNG_RING_ID_WMAC1_SW2RXMON_BUF0 = HAL_SRNG_RING_ID_PMAC1_ID_START,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
HAL_SRNG_RING_ID_WMAC1_RXMON2SW0 = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
@@ -566,7 +585,8 @@ enum hal_reo_cmd_type {
* or cache was blocked
* @HAL_REO_CMD_FAILED: Command execution failed, could be due to
* invalid queue desc
- * @HAL_REO_CMD_RESOURCE_BLOCKED:
+ * @HAL_REO_CMD_RESOURCE_BLOCKED: Command could not be executed because
+ * one or more descriptors were blocked
* @HAL_REO_CMD_DRAIN:
*/
enum hal_reo_cmd_status {
@@ -1068,7 +1088,6 @@ struct hal_rx_ops {
bool (*rx_desc_is_da_mcbc)(struct hal_rx_desc *desc);
void (*rx_desc_get_dot11_hdr)(struct hal_rx_desc *desc,
struct ieee80211_hdr *hdr);
- u16 (*rx_desc_get_mpdu_frame_ctl)(struct hal_rx_desc *desc);
void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc,
u8 *crypto_hdr,
enum hal_encrypt_type enctype);
@@ -1126,6 +1145,7 @@ void ath12k_hal_srng_get_params(struct ath12k_base *ab, struct hal_srng *srng,
struct hal_srng_params *params);
void *ath12k_hal_srng_dst_get_next_entry(struct ath12k_base *ab,
struct hal_srng *srng);
+void *ath12k_hal_srng_src_peek(struct ath12k_base *ab, struct hal_srng *srng);
void *ath12k_hal_srng_dst_peek(struct ath12k_base *ab, struct hal_srng *srng);
int ath12k_hal_srng_dst_num_free(struct ath12k_base *ab, struct hal_srng *srng,
bool sync_hw_ptr);
@@ -1133,6 +1153,8 @@ void *ath12k_hal_srng_src_get_next_reaped(struct ath12k_base *ab,
struct hal_srng *srng);
void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab,
struct hal_srng *srng);
+void *ath12k_hal_srng_src_next_peek(struct ath12k_base *ab,
+ struct hal_srng *srng);
void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
struct hal_srng *srng);
int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng,
@@ -1154,4 +1176,5 @@ int ath12k_hal_srng_update_shadow_config(struct ath12k_base *ab,
void ath12k_hal_srng_shadow_config(struct ath12k_base *ab);
void ath12k_hal_srng_shadow_update_hp_tp(struct ath12k_base *ab,
struct hal_srng *srng);
+void ath12k_hal_reo_shared_qaddr_cache_clear(struct ath12k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 3e8983b85de8..0173f731bfef 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -707,7 +707,7 @@ enum hal_rx_msdu_desc_reo_dest_ind {
#define RX_MSDU_DESC_INFO0_DECAP_FORMAT GENMASK(30, 29)
#define HAL_RX_MSDU_PKT_LENGTH_GET(val) \
- (u32_get_bits((val), RX_MSDU_DESC_INFO0_MSDU_LENGTH))
+ (le32_get_bits((val), RX_MSDU_DESC_INFO0_MSDU_LENGTH))
struct rx_msdu_desc {
__le32 info0;
@@ -1008,6 +1008,10 @@ enum hal_reo_entr_rxdma_ecode {
HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR,
HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR,
HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_FRAG_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MULTICAST_ECHO_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_MISMATCH_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_UNAUTH_WDS_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_GRPCAST_AMSDU_WDS_ERR,
HAL_REO_ENTR_RING_RXDMA_ECODE_MAX,
};
@@ -1284,11 +1288,13 @@ enum hal_tcl_encap_type {
HAL_TCL_ENCAP_TYPE_NATIVE_WIFI,
HAL_TCL_ENCAP_TYPE_ETHERNET,
HAL_TCL_ENCAP_TYPE_802_3 = 3,
+ HAL_TCL_ENCAP_TYPE_MAX
};
enum hal_tcl_desc_type {
HAL_TCL_DESC_TYPE_BUFFER,
HAL_TCL_DESC_TYPE_EXT_DESC,
+ HAL_TCL_DESC_TYPE_MAX,
};
enum hal_wbm_htt_tx_comp_status {
@@ -1298,6 +1304,7 @@ enum hal_wbm_htt_tx_comp_status {
HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ,
HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT,
HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH,
HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX,
};
@@ -1806,6 +1813,7 @@ enum hal_wbm_rel_src_module {
HAL_WBM_REL_SRC_MODULE_REO,
HAL_WBM_REL_SRC_MODULE_FW,
HAL_WBM_REL_SRC_MODULE_SW,
+ HAL_WBM_REL_SRC_MODULE_MAX,
};
enum hal_wbm_rel_desc_type {
@@ -1998,6 +2006,7 @@ struct hal_wbm_release_ring_cc_rx {
#define HAL_WBM_RELEASE_INFO3_CONTINUATION BIT(2)
#define HAL_WBM_RELEASE_INFO5_LOOPING_COUNT GENMASK(31, 28)
+#define HAL_ENCRYPT_TYPE_MAX 12
struct hal_wbm_release_ring {
struct ath12k_buffer_addr buf_addr_info;
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c
index ac17d6223fa7..48aa48c48606 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "debug.h"
@@ -326,7 +326,7 @@ int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab,
HAL_REO_DEST_RING_INFO0_PUSH_REASON);
err_code = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_ERROR_CODE);
- ab->soc_stats.reo_error[err_code]++;
+ ab->device_stats.reo_error[err_code]++;
if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
@@ -381,7 +381,7 @@ int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
val = le32_get_bits(wbm_desc->buf_addr_info.info1,
BUFFER_ADDR_INFO1_RET_BUF_MGR);
if (val != HAL_RX_BUF_RBM_SW3_BM) {
- ab->soc_stats.invalid_rbm++;
+ ab->device_stats.invalid_rbm++;
return -EINVAL;
}
@@ -393,7 +393,7 @@ int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
val = le32_get_bits(wbm_cc_desc->info0,
HAL_WBM_RELEASE_RX_CC_INFO0_RBM);
if (val != HAL_RX_BUF_RBM_SW3_BM) {
- ab->soc_stats.invalid_rbm++;
+ ab->device_stats.invalid_rbm++;
return -EINVAL;
}
@@ -446,17 +446,97 @@ void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab,
*cookie = le32_get_bits(buff_addr->info1, BUFFER_ADDR_INFO1_SW_COOKIE);
}
+void ath12k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
+ u32 *sw_cookie,
+ struct ath12k_buffer_addr **pp_buf_addr,
+ u8 *rbm, u32 *msdu_cnt)
+{
+ struct hal_reo_entrance_ring *reo_ent_ring =
+ (struct hal_reo_entrance_ring *)rx_desc;
+ struct ath12k_buffer_addr *buf_addr_info;
+ struct rx_mpdu_desc *rx_mpdu_desc_info_details;
+
+ rx_mpdu_desc_info_details =
+ (struct rx_mpdu_desc *)&reo_ent_ring->rx_mpdu_info;
+
+ *msdu_cnt = le32_get_bits(rx_mpdu_desc_info_details->info0,
+ RX_MPDU_DESC_INFO0_MSDU_COUNT);
+
+ buf_addr_info = (struct ath12k_buffer_addr *)&reo_ent_ring->buf_addr_info;
+
+ *paddr = (((u64)le32_get_bits(buf_addr_info->info1,
+ BUFFER_ADDR_INFO1_ADDR)) << 32) |
+ le32_get_bits(buf_addr_info->info0,
+ BUFFER_ADDR_INFO0_ADDR);
+
+ *sw_cookie = le32_get_bits(buf_addr_info->info1,
+ BUFFER_ADDR_INFO1_SW_COOKIE);
+ *rbm = le32_get_bits(buf_addr_info->info1,
+ BUFFER_ADDR_INFO1_RET_BUF_MGR);
+
+ *pp_buf_addr = (void *)buf_addr_info;
+}
+
+void ath12k_hal_rx_msdu_list_get(struct ath12k *ar,
+ struct hal_rx_msdu_link *link_desc,
+ struct hal_rx_msdu_list *msdu_list,
+ u16 *num_msdus)
+{
+ struct hal_rx_msdu_details *msdu_details = NULL;
+ struct rx_msdu_desc *msdu_desc_info = NULL;
+ u32 last = 0, first = 0;
+ u8 tmp = 0;
+ int i;
+
+ last = u32_encode_bits(last, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
+ first = u32_encode_bits(first, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
+ msdu_details = &link_desc->msdu_link[0];
+
+ for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
+ if (!i && le32_get_bits(msdu_details[i].buf_addr_info.info0,
+ BUFFER_ADDR_INFO0_ADDR) == 0)
+ break;
+ if (le32_get_bits(msdu_details[i].buf_addr_info.info0,
+ BUFFER_ADDR_INFO0_ADDR) == 0) {
+ msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
+ msdu_desc_info->info0 |= cpu_to_le32(last);
+ break;
+ }
+ msdu_desc_info = &msdu_details[i].rx_msdu_info;
+
+ if (!i)
+ msdu_desc_info->info0 |= cpu_to_le32(first);
+ else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
+ msdu_desc_info->info0 |= cpu_to_le32(last);
+ msdu_list->msdu_info[i].msdu_flags = le32_to_cpu(msdu_desc_info->info0);
+ msdu_list->msdu_info[i].msdu_len =
+ HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
+ msdu_list->sw_cookie[i] =
+ le32_get_bits(msdu_details[i].buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_SW_COOKIE);
+ tmp = le32_get_bits(msdu_details[i].buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_RET_BUF_MGR);
+ msdu_list->paddr[i] =
+ ((u64)(le32_get_bits(msdu_details[i].buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_ADDR)) << 32) |
+ le32_get_bits(msdu_details[i].buf_addr_info.info0,
+ BUFFER_ADDR_INFO0_ADDR);
+ msdu_list->rbm[i] = tmp;
+ }
+ *num_msdus = i;
+}
+
void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab,
- struct hal_wbm_release_ring *dst_desc,
- struct hal_wbm_release_ring *src_desc,
+ struct hal_wbm_release_ring *desc,
+ struct ath12k_buffer_addr *buf_addr_info,
enum hal_wbm_rel_bm_act action)
{
- dst_desc->buf_addr_info = src_desc->buf_addr_info;
- dst_desc->info0 |= le32_encode_bits(HAL_WBM_REL_SRC_MODULE_SW,
- HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE) |
- le32_encode_bits(action, HAL_WBM_RELEASE_INFO0_BM_ACTION) |
- le32_encode_bits(HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
- HAL_WBM_RELEASE_INFO0_DESC_TYPE);
+ desc->buf_addr_info = *buf_addr_info;
+ desc->info0 |= le32_encode_bits(HAL_WBM_REL_SRC_MODULE_SW,
+ HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE) |
+ le32_encode_bits(action, HAL_WBM_RELEASE_INFO0_BM_ACTION) |
+ le32_encode_bits(HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
+ HAL_WBM_RELEASE_INFO0_DESC_TYPE);
}
void ath12k_hal_reo_status_queue_stats(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
@@ -851,3 +931,20 @@ void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map)
ath12k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
ring_hash_map);
}
+
+void ath12k_hal_reo_shared_qaddr_cache_clear(struct ath12k_base *ab)
+{
+ u32 val;
+
+ lockdep_assert_held(&ab->base_lock);
+ val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_ADDR(ab));
+
+ val |= u32_encode_bits(1, HAL_REO_QDESC_ADDR_READ_CLEAR_QDESC_ARRAY);
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_ADDR(ab), val);
+
+ val &= ~HAL_REO_QDESC_ADDR_READ_CLEAR_QDESC_ARRAY;
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_ADDR(ab), val);
+}
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h
index 6bdcd0867d86..a3ab588aae19 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.h
@@ -108,11 +108,12 @@ enum hal_rx_mon_status {
HAL_RX_MON_STATUS_PPDU_DONE,
HAL_RX_MON_STATUS_BUF_DONE,
HAL_RX_MON_STATUS_BUF_ADDR,
+ HAL_RX_MON_STATUS_MPDU_START,
HAL_RX_MON_STATUS_MPDU_END,
HAL_RX_MON_STATUS_MSDU_END,
};
-#define HAL_RX_MAX_MPDU 256
+#define HAL_RX_MAX_MPDU 1024
#define HAL_RX_NUM_WORDS_PER_PPDU_BITMAP (HAL_RX_MAX_MPDU >> 5)
struct hal_rx_user_status {
@@ -506,6 +507,18 @@ struct hal_rx_mpdu_start {
__le32 rsvd2[16];
} __packed;
+struct hal_rx_msdu_end {
+ __le32 info0;
+ __le32 rsvd0[9];
+ __le16 info00;
+ __le16 info01;
+ __le32 rsvd00[8];
+ __le32 info1;
+ __le32 rsvd1[10];
+ __le32 info2;
+ __le32 rsvd2;
+} __packed;
+
#define HAL_RX_PPDU_END_DURATION GENMASK(23, 0)
struct hal_rx_ppdu_end_duration {
__le32 rsvd0[9];
@@ -525,6 +538,7 @@ struct hal_rx_msdu_desc_info {
#define HAL_RX_NUM_MSDU_DESC 6
struct hal_rx_msdu_list {
struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC];
+ u64 paddr[HAL_RX_NUM_MSDU_DESC];
u32 sw_cookie[HAL_RX_NUM_MSDU_DESC];
u8 rbm[HAL_RX_NUM_MSDU_DESC];
};
@@ -1128,8 +1142,8 @@ void ath12k_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_ms
u32 *msdu_cookies,
enum hal_rx_buf_return_buf_manager *rbm);
void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab,
- struct hal_wbm_release_ring *dst_desc,
- struct hal_wbm_release_ring *src_desc,
+ struct hal_wbm_release_ring *desc,
+ struct ath12k_buffer_addr *buf_addr_info,
enum hal_wbm_rel_bm_act action);
void ath12k_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo,
dma_addr_t paddr, u32 cookie, u8 manager);
@@ -1144,5 +1158,12 @@ int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab,
struct ath12k_buffer_addr *buff_addr,
dma_addr_t *paddr, u32 *cookie);
+void ath12k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr, u32 *sw_cookie,
+ struct ath12k_buffer_addr **pp_buf_addr,
+ u8 *rbm, u32 *msdu_cnt);
+void ath12k_hal_rx_msdu_list_get(struct ath12k *ar,
+ struct hal_rx_msdu_link *link_desc,
+ struct hal_rx_msdu_list *msdu_list,
+ u16 *num_msdus);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index a106ebed7870..8254dc10b53b 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -118,6 +118,10 @@ static const struct ath12k_hw_ops wcn7850_ops = {
#define ATH12K_TX_MON_RING_MASK_0 0x1
#define ATH12K_TX_MON_RING_MASK_1 0x2
+#define ATH12K_RX_MON_STATUS_RING_MASK_0 0x1
+#define ATH12K_RX_MON_STATUS_RING_MASK_1 0x2
+#define ATH12K_RX_MON_STATUS_RING_MASK_2 0x4
+
/* Target firmware's Copy Engine configuration. */
static const struct ce_pipe_config ath12k_target_ce_config_wlan_qcn9274[] = {
/* CE0: host->target HTC control and raw streams */
@@ -535,6 +539,217 @@ static const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_wcn7850
},
};
+static const struct ce_pipe_config ath12k_target_ce_config_wlan_ipq5332[] = {
+ /* host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* target->host HTT */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* target->host WMI + HTC control */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* Target -> host PKTLOG */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* Reserved for target autonomous HIF_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE7 Reserved for CV Prefetch */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE8 Reserved for target generic HIF memcpy */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE9 WMI logging/CFR/Spectral/Radar/ */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* Unused TBD */
+ {
+ .pipenum = __cpu_to_le32(10),
+ .pipedir = __cpu_to_le32(PIPEDIR_NONE),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* Unused TBD */
+ {
+ .pipenum = __cpu_to_le32(11),
+ .pipedir = __cpu_to_le32(PIPEDIR_NONE),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+};
+
+static const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_ipq5332[] = {
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_PKT_LOG),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(5),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(9),
+ },
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
.tx = {
ATH12K_TX_RING_MASK_0,
@@ -577,6 +792,46 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
},
};
+static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_ipq5332 = {
+ .tx = {
+ ATH12K_TX_RING_MASK_0,
+ ATH12K_TX_RING_MASK_1,
+ ATH12K_TX_RING_MASK_2,
+ ATH12K_TX_RING_MASK_3,
+ },
+ .rx_mon_dest = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ ATH12K_RX_MON_RING_MASK_0,
+ },
+ .rx = {
+ 0, 0, 0, 0,
+ ATH12K_RX_RING_MASK_0,
+ ATH12K_RX_RING_MASK_1,
+ ATH12K_RX_RING_MASK_2,
+ ATH12K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ 0, 0, 0,
+ ATH12K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ 0, 0, 0,
+ ATH12K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ 0, 0, 0,
+ ATH12K_REO_STATUS_RING_MASK_0,
+ },
+ .host2rxdma = {
+ 0, 0, 0,
+ ATH12K_HOST2RXDMA_RING_MASK_0,
+ },
+ .tx_mon_dest = {
+ ATH12K_TX_MON_RING_MASK_0,
+ ATH12K_TX_MON_RING_MASK_1,
+ },
+};
+
static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
.tx = {
ATH12K_TX_RING_MASK_0,
@@ -585,6 +840,12 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
},
.rx_mon_dest = {
},
+ .rx_mon_status = {
+ 0, 0, 0, 0,
+ ATH12K_RX_MON_STATUS_RING_MASK_0,
+ ATH12K_RX_MON_STATUS_RING_MASK_1,
+ ATH12K_RX_MON_STATUS_RING_MASK_2,
+ },
.rx = {
0, 0, 0,
ATH12K_RX_RING_MASK_0,
@@ -619,6 +880,9 @@ static const struct ath12k_hw_regs qcn9274_v1_regs = {
.hal_tcl1_ring_msi1_base_msb = 0x0000094c,
.hal_tcl1_ring_msi1_data = 0x00000950,
.hal_tcl_ring_base_lsb = 0x00000b58,
+ .hal_tcl1_ring_base_lsb = 0x00000900,
+ .hal_tcl1_ring_base_msb = 0x00000904,
+ .hal_tcl2_ring_base_lsb = 0x00000978,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x00000d38,
@@ -681,6 +945,14 @@ static const struct ath12k_hw_regs qcn9274_v1_regs = {
/* REO status ring address */
.hal_reo_status_ring_base = 0x00000a84,
+
+ /* CE base address */
+ .hal_umac_ce0_src_reg_base = 0x01b80000,
+ .hal_umac_ce0_dest_reg_base = 0x01b81000,
+ .hal_umac_ce1_src_reg_base = 0x01b82000,
+ .hal_umac_ce1_dest_reg_base = 0x01b83000,
+
+ .gcc_gcc_pcie_hot_rst = 0x1e38338,
};
static const struct ath12k_hw_regs qcn9274_v2_regs = {
@@ -695,6 +967,9 @@ static const struct ath12k_hw_regs qcn9274_v2_regs = {
.hal_tcl1_ring_msi1_base_msb = 0x0000094c,
.hal_tcl1_ring_msi1_data = 0x00000950,
.hal_tcl_ring_base_lsb = 0x00000b58,
+ .hal_tcl1_ring_base_lsb = 0x00000900,
+ .hal_tcl1_ring_base_msb = 0x00000904,
+ .hal_tcl2_ring_base_lsb = 0x00000978,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x00000d38,
@@ -734,6 +1009,8 @@ static const struct ath12k_hw_regs qcn9274_v2_regs = {
.hal_reo1_sw_cookie_cfg1 = 0x00000070,
.hal_reo1_qdesc_lut_base0 = 0x00000074,
.hal_reo1_qdesc_lut_base1 = 0x00000078,
+ .hal_reo1_qdesc_addr = 0x0000007c,
+ .hal_reo1_qdesc_max_peerid = 0x00000088,
.hal_reo1_ring_base_lsb = 0x00000500,
.hal_reo1_ring_base_msb = 0x00000504,
.hal_reo1_ring_id = 0x00000508,
@@ -761,6 +1038,102 @@ static const struct ath12k_hw_regs qcn9274_v2_regs = {
/* REO status ring address */
.hal_reo_status_ring_base = 0x00000aa0,
+
+ /* CE base address */
+ .hal_umac_ce0_src_reg_base = 0x01b80000,
+ .hal_umac_ce0_dest_reg_base = 0x01b81000,
+ .hal_umac_ce1_src_reg_base = 0x01b82000,
+ .hal_umac_ce1_dest_reg_base = 0x01b83000,
+
+ .gcc_gcc_pcie_hot_rst = 0x1e38338,
+};
+
+static const struct ath12k_hw_regs ipq5332_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_id = 0x00000918,
+ .hal_tcl1_ring_misc = 0x00000920,
+ .hal_tcl1_ring_tp_addr_lsb = 0x0000092c,
+ .hal_tcl1_ring_tp_addr_msb = 0x00000930,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000940,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000944,
+ .hal_tcl1_ring_msi1_base_lsb = 0x00000958,
+ .hal_tcl1_ring_msi1_base_msb = 0x0000095c,
+ .hal_tcl1_ring_base_lsb = 0x00000910,
+ .hal_tcl1_ring_base_msb = 0x00000914,
+ .hal_tcl1_ring_msi1_data = 0x00000960,
+ .hal_tcl2_ring_base_lsb = 0x00000988,
+ .hal_tcl_ring_base_lsb = 0x00000b68,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000d48,
+
+ /* REO DEST ring address */
+ .hal_reo2_ring_base = 0x00000578,
+ .hal_reo1_misc_ctrl_addr = 0x00000b9c,
+ .hal_reo1_sw_cookie_cfg0 = 0x0000006c,
+ .hal_reo1_sw_cookie_cfg1 = 0x00000070,
+ .hal_reo1_qdesc_lut_base0 = 0x00000074,
+ .hal_reo1_qdesc_lut_base1 = 0x00000078,
+ .hal_reo1_ring_base_lsb = 0x00000500,
+ .hal_reo1_ring_base_msb = 0x00000504,
+ .hal_reo1_ring_id = 0x00000508,
+ .hal_reo1_ring_misc = 0x00000510,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000514,
+ .hal_reo1_ring_hp_addr_msb = 0x00000518,
+ .hal_reo1_ring_producer_int_setup = 0x00000524,
+ .hal_reo1_ring_msi1_base_lsb = 0x00000548,
+ .hal_reo1_ring_msi1_base_msb = 0x0000054C,
+ .hal_reo1_ring_msi1_data = 0x00000550,
+ .hal_reo1_aging_thres_ix0 = 0x00000B28,
+ .hal_reo1_aging_thres_ix1 = 0x00000B2C,
+ .hal_reo1_aging_thres_ix2 = 0x00000B30,
+ .hal_reo1_aging_thres_ix3 = 0x00000B34,
+
+ /* REO Exception ring address */
+ .hal_reo2_sw0_ring_base = 0x000008c0,
+
+ /* REO Reinject ring address */
+ .hal_sw2reo_ring_base = 0x00000320,
+ .hal_sw2reo1_ring_base = 0x00000398,
+
+ /* REO cmd ring address */
+ .hal_reo_cmd_ring_base = 0x000002A8,
+
+ /* REO status ring address */
+ .hal_reo_status_ring_base = 0x00000aa0,
+
+ /* WBM idle link ring address */
+ .hal_wbm_idle_ring_base_lsb = 0x00000d3c,
+ .hal_wbm_idle_ring_misc_addr = 0x00000d4c,
+ .hal_wbm_r0_idle_list_cntl_addr = 0x00000240,
+ .hal_wbm_r0_idle_list_size_addr = 0x00000244,
+ .hal_wbm_scattered_ring_base_lsb = 0x00000250,
+ .hal_wbm_scattered_ring_base_msb = 0x00000254,
+ .hal_wbm_scattered_desc_head_info_ix0 = 0x00000260,
+ .hal_wbm_scattered_desc_head_info_ix1 = 0x00000264,
+ .hal_wbm_scattered_desc_tail_info_ix0 = 0x00000270,
+ .hal_wbm_scattered_desc_tail_info_ix1 = 0x00000274,
+ .hal_wbm_scattered_desc_ptr_hp_addr = 0x0000027c,
+
+ /* SW2WBM release ring address */
+ .hal_wbm_sw_release_ring_base_lsb = 0x0000037c,
+
+ /* WBM2SW release ring address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000e08,
+ .hal_wbm1_release_ring_base_lsb = 0x00000e80,
+
+ /* PPE release ring address */
+ .hal_ppe_rel_ring_base = 0x0000046c,
+
+ /* CE address */
+ .hal_umac_ce0_src_reg_base = 0x00740000 -
+ HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .hal_umac_ce0_dest_reg_base = 0x00741000 -
+ HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .hal_umac_ce1_src_reg_base = 0x00742000 -
+ HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .hal_umac_ce1_dest_reg_base = 0x00743000 -
+ HAL_IPQ5332_CE_WFSS_REG_BASE,
};
static const struct ath12k_hw_regs wcn7850_regs = {
@@ -775,6 +1148,9 @@ static const struct ath12k_hw_regs wcn7850_regs = {
.hal_tcl1_ring_msi1_base_msb = 0x0000094c,
.hal_tcl1_ring_msi1_data = 0x00000950,
.hal_tcl_ring_base_lsb = 0x00000b58,
+ .hal_tcl1_ring_base_lsb = 0x00000900,
+ .hal_tcl1_ring_base_msb = 0x00000904,
+ .hal_tcl2_ring_base_lsb = 0x00000978,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x00000d38,
@@ -837,6 +1213,14 @@ static const struct ath12k_hw_regs wcn7850_regs = {
/* REO status ring address */
.hal_reo_status_ring_base = 0x00000a84,
+
+ /* CE base address */
+ .hal_umac_ce0_src_reg_base = 0x01b80000,
+ .hal_umac_ce0_dest_reg_base = 0x01b81000,
+ .hal_umac_ce1_src_reg_base = 0x01b82000,
+ .hal_umac_ce1_dest_reg_base = 0x01b83000,
+
+ .gcc_gcc_pcie_hot_rst = 0x1e40304,
};
static const struct ath12k_hw_hal_params ath12k_hw_hal_params_qcn9274 = {
@@ -856,6 +1240,26 @@ static const struct ath12k_hw_hal_params ath12k_hw_hal_params_wcn7850 = {
HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
};
+static const struct ath12k_hw_hal_params ath12k_hw_hal_params_ipq5332 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
+ .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
+};
+
+static const struct ce_ie_addr ath12k_ce_ie_addr_ipq5332 = {
+ .ie1_reg_addr = CE_HOST_IE_ADDRESS - HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .ie2_reg_addr = CE_HOST_IE_2_ADDRESS - HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .ie3_reg_addr = CE_HOST_IE_3_ADDRESS - HAL_IPQ5332_CE_WFSS_REG_BASE,
+};
+
+static const struct ce_remap ath12k_ce_remap_ipq5332 = {
+ .base = HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .size = HAL_IPQ5332_CE_SIZE,
+};
+
static const struct ath12k_hw_params ath12k_hw_params[] = {
{
.name = "qcn9274 hw1.0",
@@ -864,6 +1268,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.dir = "QCN9274/hw1.0",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
+ .m3_loader = ath12k_m3_fw_loader_driver,
},
.max_radios = 1,
.single_pdev_only = false,
@@ -899,7 +1304,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.download_calib = true,
.supports_suspend = false,
.tcl_ring_retry = true,
- .reoq_lut_support = false,
+ .reoq_lut_support = true,
.supports_shadow_regs = false,
.num_tcl_banks = 48,
@@ -932,6 +1337,14 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.iova_mask = 0,
.supports_aspm = false,
+
+ .ce_ie_addr = NULL,
+ .ce_remap = NULL,
+ .bdf_addr_offset = 0,
+
+ .current_cc_support = false,
+
+ .dp_primary_link_only = true,
},
{
.name = "wcn7850 hw2.0",
@@ -941,6 +1354,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.dir = "WCN7850/hw2.0",
.board_size = 256 * 1024,
.cal_offset = 256 * 1024,
+ .m3_loader = ath12k_m3_fw_loader_driver,
},
.max_radios = 1,
@@ -972,7 +1386,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
BIT(NL80211_IFTYPE_P2P_DEVICE) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO),
- .supports_monitor = false,
+ .supports_monitor = true,
.idle_ps = true,
.download_calib = false,
@@ -1012,6 +1426,14 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.iova_mask = ATH12K_PCIE_MAX_PAYLOAD_SIZE - 1,
.supports_aspm = true,
+
+ .ce_ie_addr = NULL,
+ .ce_remap = NULL,
+ .bdf_addr_offset = 0,
+
+ .current_cc_support = true,
+
+ .dp_primary_link_only = false,
},
{
.name = "qcn9274 hw2.0",
@@ -1020,6 +1442,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.dir = "QCN9274/hw2.0",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
+ .m3_loader = ath12k_m3_fw_loader_driver,
},
.max_radios = 2,
.single_pdev_only = false,
@@ -1049,7 +1472,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_AP_VLAN),
- .supports_monitor = false,
+ .supports_monitor = true,
.idle_ps = false,
.download_calib = true,
@@ -1088,6 +1511,92 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.iova_mask = 0,
.supports_aspm = false,
+
+ .ce_ie_addr = NULL,
+ .ce_remap = NULL,
+ .bdf_addr_offset = 0,
+
+ .current_cc_support = false,
+
+ .dp_primary_link_only = true,
+ },
+ {
+ .name = "ipq5332 hw1.0",
+ .hw_rev = ATH12K_HW_IPQ5332_HW10,
+ .fw = {
+ .dir = "IPQ5332/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ .m3_loader = ath12k_m3_fw_loader_remoteproc,
+ },
+ .max_radios = 1,
+ .single_pdev_only = false,
+ .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ5332,
+ .internal_sleep_clock = false,
+
+ .hw_ops = &qcn9274_ops,
+ .regs = &ipq5332_regs,
+ .ring_mask = &ath12k_hw_ring_mask_ipq5332,
+
+ .host_ce_config = ath12k_host_ce_config_ipq5332,
+ .ce_count = 12,
+ .target_ce_config = ath12k_target_ce_config_wlan_ipq5332,
+ .target_ce_count = 12,
+ .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_ipq5332,
+ .svc_to_ce_map_len = 18,
+
+ .hal_params = &ath12k_hw_hal_params_ipq5332,
+
+ .rxdma1_enable = false,
+ .num_rxdma_per_pdev = 1,
+ .num_rxdma_dst_ring = 0,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = false,
+
+ .idle_ps = false,
+ .download_calib = true,
+ .supports_suspend = false,
+ .tcl_ring_retry = true,
+ .reoq_lut_support = false,
+ .supports_shadow_regs = false,
+
+ .num_tcl_banks = 48,
+ .max_tx_ring = 4,
+
+ .wmi_init = &ath12k_wmi_init_qcn9274,
+
+ .hal_ops = &hal_qcn9274_ops,
+
+ .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01),
+
+ .rfkill_pin = 0,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 0,
+
+ .rddm_size = 0,
+
+ .def_num_link = 0,
+ .max_mlo_peer = 256,
+
+ .otp_board_id_register = 0,
+
+ .supports_sta_ps = false,
+
+ .acpi_guid = NULL,
+ .supports_dynamic_smps_6ghz = false,
+ .iova_mask = 0,
+ .supports_aspm = false,
+
+ .ce_ie_addr = &ath12k_ce_ie_addr_ipq5332,
+ .ce_remap = &ath12k_ce_remap_ipq5332,
+ .bdf_addr_offset = 0xC00000,
+
+ .dp_primary_link_only = true,
},
};
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index 8d52182e28ae..0a75bc5abfa2 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HW_H
@@ -97,6 +97,7 @@
#define ATH12K_REGDB_FILE_NAME "regdb.bin"
#define ATH12K_PCIE_MAX_PAYLOAD_SIZE 128
+#define ATH12K_IPQ5332_USERPD_ID 1
enum ath12k_hw_rate_cck {
ATH12K_HW_RATE_CCK_LP_11M = 0,
@@ -121,6 +122,7 @@ enum ath12k_hw_rate_ofdm {
enum ath12k_bus {
ATH12K_BUS_PCI,
+ ATH12K_BUS_AHB,
};
#define ATH12K_EXT_IRQ_GRP_NUM_MAX 11
@@ -133,6 +135,7 @@ enum hal_encrypt_type;
struct ath12k_hw_ring_mask {
u8 tx[ATH12K_EXT_IRQ_GRP_NUM_MAX];
u8 rx_mon_dest[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_mon_status[ATH12K_EXT_IRQ_GRP_NUM_MAX];
u8 rx[ATH12K_EXT_IRQ_GRP_NUM_MAX];
u8 rx_err[ATH12K_EXT_IRQ_GRP_NUM_MAX];
u8 rx_wbm_rel[ATH12K_EXT_IRQ_GRP_NUM_MAX];
@@ -146,6 +149,11 @@ struct ath12k_hw_hal_params {
u32 wbm2sw_cc_enable;
};
+enum ath12k_m3_fw_loaders {
+ ath12k_m3_fw_loader_driver,
+ ath12k_m3_fw_loader_remoteproc,
+};
+
struct ath12k_hw_params {
const char *name;
u16 hw_rev;
@@ -154,6 +162,7 @@ struct ath12k_hw_params {
const char *dir;
size_t board_size;
size_t cal_offset;
+ enum ath12k_m3_fw_loaders m3_loader;
} fw;
u8 max_radios;
@@ -190,6 +199,7 @@ struct ath12k_hw_params {
bool reoq_lut_support:1;
bool supports_shadow_regs:1;
bool supports_aspm:1;
+ bool current_cc_support:1;
u32 num_tcl_banks;
u32 max_tx_ring;
@@ -220,6 +230,13 @@ struct ath12k_hw_params {
bool supports_dynamic_smps_6ghz;
u32 iova_mask;
+
+ const struct ce_ie_addr *ce_ie_addr;
+ const struct ce_remap *ce_remap;
+ u32 bdf_addr_offset;
+
+ /* setup REO queue, frag etc only for primary link peer */
+ bool dp_primary_link_only:1;
};
struct ath12k_hw_ops {
@@ -293,9 +310,15 @@ struct ath12k_hw_regs {
u32 hal_tcl1_ring_msi1_base_msb;
u32 hal_tcl1_ring_msi1_data;
u32 hal_tcl_ring_base_lsb;
+ u32 hal_tcl1_ring_base_lsb;
+ u32 hal_tcl1_ring_base_msb;
+ u32 hal_tcl2_ring_base_lsb;
u32 hal_tcl_status_ring_base_lsb;
+ u32 hal_reo1_qdesc_addr;
+ u32 hal_reo1_qdesc_max_peerid;
+
u32 hal_wbm_idle_ring_base_lsb;
u32 hal_wbm_idle_ring_misc_addr;
u32 hal_wbm_r0_idle_list_cntl_addr;
@@ -316,6 +339,11 @@ struct ath12k_hw_regs {
u32 pcie_qserdes_sysclk_en_sel;
u32 pcie_pcs_osc_dtct_config_base;
+ u32 hal_umac_ce0_src_reg_base;
+ u32 hal_umac_ce0_dest_reg_base;
+ u32 hal_umac_ce1_src_reg_base;
+ u32 hal_umac_ce1_dest_reg_base;
+
u32 hal_ppe_rel_ring_base;
u32 hal_reo2_ring_base;
@@ -347,6 +375,8 @@ struct ath12k_hw_regs {
u32 hal_reo_cmd_ring_base;
u32 hal_reo_status_ring_base;
+
+ u32 gcc_gcc_pcie_hot_rst;
};
static inline const char *ath12k_bd_ie_type_str(enum ath12k_bd_ie_type type)
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index dfa05f0ee6c9..59ec422992d3 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -229,7 +229,8 @@ ath12k_phymodes[NUM_NL80211_BANDS][ATH12K_CHAN_WIDTH_NUM] = {
const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default = {
.rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE |
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO,
.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
@@ -580,14 +581,22 @@ static int ath12k_mac_vif_link_chan(struct ieee80211_vif *vif, u8 link_id,
return 0;
}
-static struct ath12k_link_vif *ath12k_mac_get_tx_arvif(struct ath12k_link_vif *arvif)
+static struct ath12k_link_vif *
+ath12k_mac_get_tx_arvif(struct ath12k_link_vif *arvif,
+ struct ieee80211_bss_conf *link_conf)
{
+ struct ieee80211_bss_conf *tx_bss_conf;
+ struct ath12k *ar = arvif->ar;
struct ath12k_vif *tx_ahvif;
- if (arvif->ahvif->vif->mbssid_tx_vif) {
- tx_ahvif = ath12k_vif_to_ahvif(arvif->ahvif->vif->mbssid_tx_vif);
- if (tx_ahvif)
- return &tx_ahvif->deflink;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ tx_bss_conf = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ link_conf->tx_bss_conf);
+ if (tx_bss_conf) {
+ tx_ahvif = ath12k_vif_to_ahvif(tx_bss_conf->vif);
+ return wiphy_dereference(tx_ahvif->ah->hw->wiphy,
+ tx_ahvif->link[tx_bss_conf->link_id]);
}
return NULL;
@@ -874,12 +883,12 @@ static bool ath12k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BA
{
switch (band1) {
case NL80211_BAND_2GHZ:
- if (band2 & WMI_HOST_WLAN_2G_CAP)
+ if (band2 & WMI_HOST_WLAN_2GHZ_CAP)
return true;
break;
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
- if (band2 & WMI_HOST_WLAN_5G_CAP)
+ if (band2 & WMI_HOST_WLAN_5GHZ_CAP)
return true;
break;
default:
@@ -980,7 +989,7 @@ static int ath12k_mac_txpower_recalc(struct ath12k *ar)
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "txpower to set in hw %d\n",
txpower / 2);
- if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) &&
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2GHZ_CAP) &&
ar->txpower_limit_2g != txpower) {
param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
ret = ath12k_wmi_pdev_set_param(ar, param,
@@ -990,7 +999,7 @@ static int ath12k_mac_txpower_recalc(struct ath12k *ar)
ar->txpower_limit_2g = txpower;
}
- if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) &&
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5GHZ_CAP) &&
ar->txpower_limit_5g != txpower) {
param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
ret = ath12k_wmi_pdev_set_param(ar, param,
@@ -1245,61 +1254,6 @@ static int ath12k_mac_monitor_vdev_stop(struct ath12k *ar)
return ret;
}
-static int ath12k_mac_monitor_vdev_create(struct ath12k *ar)
-{
- struct ath12k_pdev *pdev = ar->pdev;
- struct ath12k_wmi_vdev_create_arg arg = {};
- int bit, ret;
- u8 tmp_addr[6];
-
- lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
-
- if (ar->monitor_vdev_created)
- return 0;
-
- if (ar->ab->free_vdev_map == 0) {
- ath12k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n");
- return -ENOMEM;
- }
-
- bit = __ffs64(ar->ab->free_vdev_map);
-
- ar->monitor_vdev_id = bit;
-
- arg.if_id = ar->monitor_vdev_id;
- arg.type = WMI_VDEV_TYPE_MONITOR;
- arg.subtype = WMI_VDEV_SUBTYPE_NONE;
- arg.pdev_id = pdev->pdev_id;
- arg.if_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
-
- if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
- arg.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
- arg.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
- }
-
- if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
- arg.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
- arg.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
- }
-
- ret = ath12k_wmi_vdev_create(ar, tmp_addr, &arg);
- if (ret) {
- ath12k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n",
- ar->monitor_vdev_id, ret);
- ar->monitor_vdev_id = -1;
- return ret;
- }
-
- ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id;
- ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
- ar->num_created_vdevs++;
- ar->monitor_vdev_created = true;
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d created\n",
- ar->monitor_vdev_id);
-
- return 0;
-}
-
static int ath12k_mac_monitor_vdev_delete(struct ath12k *ar)
{
int ret;
@@ -1336,19 +1290,9 @@ static int ath12k_mac_monitor_vdev_delete(struct ath12k *ar)
return ret;
}
-static void
-ath12k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
- struct ieee80211_chanctx_conf *conf,
- void *data)
-{
- struct cfg80211_chan_def **def = data;
-
- *def = &conf->def;
-}
-
static int ath12k_mac_monitor_start(struct ath12k *ar)
{
- struct cfg80211_chan_def *chandef = NULL;
+ struct ath12k_mac_get_any_chanctx_conf_arg arg;
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -1356,25 +1300,33 @@ static int ath12k_mac_monitor_start(struct ath12k *ar)
if (ar->monitor_started)
return 0;
+ arg.ar = ar;
+ arg.chanctx_conf = NULL;
ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
- ath12k_mac_get_any_chandef_iter,
- &chandef);
- if (!chandef)
+ ath12k_mac_get_any_chanctx_conf_iter,
+ &arg);
+ if (!arg.chanctx_conf)
return 0;
- ret = ath12k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef);
+ ret = ath12k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id,
+ &arg.chanctx_conf->def);
if (ret) {
ath12k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret);
- ath12k_mac_monitor_vdev_delete(ar);
+ return ret;
+ }
+
+ ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, false);
+ if (ret) {
+ ath12k_warn(ar->ab, "fail to set monitor filter: %d\n", ret);
return ret;
}
ar->monitor_started = true;
ar->num_started_vdevs++;
- ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, false);
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor started ret %d\n", ret);
- return ret;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor started\n");
+
+ return 0;
}
static int ath12k_mac_monitor_stop(struct ath12k *ar)
@@ -1440,58 +1392,9 @@ err:
return ret;
}
-static int ath12k_mac_config(struct ath12k *ar, u32 changed)
-{
- struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
- struct ieee80211_conf *conf = &hw->conf;
- int ret = 0;
-
- lockdep_assert_wiphy(hw->wiphy);
-
- if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
- ar->monitor_conf_enabled = conf->flags & IEEE80211_CONF_MONITOR;
- if (ar->monitor_conf_enabled) {
- if (ar->monitor_vdev_created)
- return ret;
- ret = ath12k_mac_monitor_vdev_create(ar);
- if (ret)
- return ret;
- ret = ath12k_mac_monitor_start(ar);
- if (ret)
- goto err_mon_del;
- } else {
- if (!ar->monitor_vdev_created)
- return ret;
- ret = ath12k_mac_monitor_stop(ar);
- if (ret)
- return ret;
- ath12k_mac_monitor_vdev_delete(ar);
- }
- }
-
- return ret;
-
-err_mon_del:
- ath12k_mac_monitor_vdev_delete(ar);
- return ret;
-}
-
static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
- int ret;
-
- lockdep_assert_wiphy(hw->wiphy);
-
- ar = ath12k_ah_to_ar(ah, 0);
-
- ret = ath12k_mac_config(ar, changed);
- if (ret)
- ath12k_warn(ar->ab, "failed to update config pdev idx %d: %d\n",
- ar->pdev_idx, ret);
-
- return ret;
+ return 0;
}
static int ath12k_mac_setup_bcn_p2p_ie(struct ath12k_link_vif *arvif,
@@ -1715,7 +1618,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
return -ENOLINK;
}
- tx_arvif = ath12k_mac_get_tx_arvif(arvif);
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
if (tx_arvif) {
if (tx_arvif != arvif && arvif->is_up)
return 0;
@@ -1785,6 +1688,7 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
{
struct ath12k_wmi_vdev_up_params params = {};
struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_bss_conf *link_conf;
struct ath12k_link_vif *tx_arvif;
struct ath12k *ar = arvif->ar;
int ret;
@@ -1817,7 +1721,15 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
params.aid = ahvif->aid;
params.bssid = arvif->bssid;
- tx_arvif = ath12k_mac_get_tx_arvif(arvif);
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab,
+ "unable to access bss link conf for link %u required to retrieve transmitting link conf\n",
+ arvif->link_id);
+ return;
+ }
+
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
if (tx_arvif) {
params.tx_bssid = tx_arvif->bssid;
params.nontx_profile_idx = info->bssid_index;
@@ -3027,6 +2939,7 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
const struct ieee80211_sta_eht_cap *eht_cap;
const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_link_sta *link_sta;
+ struct ieee80211_bss_conf *link_conf;
u32 *rx_mcs, *tx_mcs;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -3038,6 +2951,12 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
return;
}
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access link_conf in peer assoc eht set\n");
+ return;
+ }
+
eht_cap = &link_sta->eht_cap;
he_cap = &link_sta->he_cap;
if (!he_cap->has_he || !eht_cap->has_eht)
@@ -3109,6 +3028,7 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
}
arg->punct_bitmap = ~arvif->punct_bitmap;
+ arg->eht_disable_mcs15 = link_conf->eht_disable_mcs15;
}
static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta,
@@ -3139,6 +3059,7 @@ static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta,
ml->ml_peer_id = ahsta->ml_peer_id;
ml->ieee_link_id = arsta->link_id;
ml->num_partner_links = 0;
+ ml->eml_cap = sta->eml_cap;
links = ahsta->links_map;
rcu_read_lock();
@@ -3377,6 +3298,11 @@ static void ath12k_bss_assoc(struct ath12k *ar,
if (ret)
ath12k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n",
arvif->vdev_id, ret);
+
+ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map) &&
+ ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE)
+ ath12k_mac_11d_scan_stop_all(ar->ab);
}
static void ath12k_bss_disassoc(struct ath12k *ar,
@@ -3450,7 +3376,10 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
}
sband = hw->wiphy->bands[def->chan->band];
- basic_rate_idx = ffs(bss_conf->basic_rates) - 1;
+ if (bss_conf->basic_rates)
+ basic_rate_idx = __ffs(bss_conf->basic_rates);
+ else
+ basic_rate_idx = 0;
bitrate = sband->bitrates[basic_rate_idx].bitrate;
hw_rate_code = ath12k_mac_get_rate_hw_value(bitrate);
@@ -3495,6 +3424,9 @@ static void ath12k_mac_init_arvif(struct ath12k_vif *ahvif,
arvif->ahvif = ahvif;
arvif->link_id = _link_id;
+ /* Protects the datapath stats update on a per link basis */
+ spin_lock_init(&arvif->link_stats_lock);
+
INIT_LIST_HEAD(&arvif->list);
INIT_DELAYED_WORK(&arvif->connection_loss_work,
ath12k_mac_vif_sta_connection_loss_work);
@@ -3534,6 +3466,11 @@ static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw,
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac remove link interface (vdev %d link id %d)",
arvif->vdev_id, arvif->link_id);
+ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map) &&
+ ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE)
+ ath12k_mac_11d_scan_stop(ar);
+
if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
if (ret)
@@ -3556,23 +3493,18 @@ static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah,
if (arvif)
return arvif;
- if (!vif->valid_links) {
- /* Use deflink for Non-ML VIFs and mark the link id as 0
- */
- link_id = 0;
+ /* If this is the first link arvif being created for an ML VIF
+ * use the preallocated deflink memory except for scan arvifs
+ */
+ if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) {
arvif = &ahvif->deflink;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ arvif->is_sta_assoc_link = true;
} else {
- /* If this is the first link arvif being created for an ML VIF
- * use the preallocated deflink memory except for scan arvifs
- */
- if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) {
- arvif = &ahvif->deflink;
- } else {
- arvif = (struct ath12k_link_vif *)
- kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL);
- if (!arvif)
- return NULL;
- }
+ arvif = kzalloc(sizeof(*arvif), GFP_KERNEL);
+ if (!arvif)
+ return NULL;
}
ath12k_mac_init_arvif(ahvif, arvif, link_id);
@@ -3702,6 +3634,8 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
unsigned long links = ahvif->links_map;
struct ieee80211_bss_conf *info;
struct ath12k_link_vif *arvif;
+ struct ieee80211_sta *sta;
+ struct ath12k_sta *ahsta;
struct ath12k *ar;
u8 link_id;
@@ -3714,6 +3648,35 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
+ if (vif->cfg.assoc) {
+ /* only in station mode we can get here, so it's safe
+ * to use ap_addr
+ */
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
+ if (!sta) {
+ rcu_read_unlock();
+ WARN_ONCE(1, "failed to find sta with addr %pM\n",
+ vif->cfg.ap_addr);
+ return;
+ }
+
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arvif = wiphy_dereference(hw->wiphy,
+ ahvif->link[ahsta->assoc_link_id]);
+ rcu_read_unlock();
+
+ ar = arvif->ar;
+ /* there is no reason for which an assoc link's
+ * bss info does not exist
+ */
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ ath12k_bss_assoc(ar, arvif, info);
+
+ /* exclude assoc link as it is done above */
+ links &= ~BIT(ahsta->assoc_link_id);
+ }
+
for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
if (!arvif || !arvif->ar)
@@ -3789,6 +3752,18 @@ static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
psmode, arvif->vdev_id, ret);
}
+static bool ath12k_mac_supports_station_tpc(struct ath12k *ar,
+ struct ath12k_vif *ahvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath12k_wmi_supports_6ghz_cc_ext(ar) &&
+ test_bit(WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT, ar->ab->wmi_ab.svc_map) &&
+ ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE &&
+ chandef->chan &&
+ chandef->chan->band == NL80211_BAND_6GHZ;
+}
+
static void ath12k_mac_bss_info_changed(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ieee80211_bss_conf *info,
@@ -3983,12 +3958,16 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
band = def.chan->band;
mcast_rate = info->mcast_rate[band];
- if (mcast_rate > 0)
+ if (mcast_rate > 0) {
rateidx = mcast_rate - 1;
- else
- rateidx = ffs(info->basic_rates) - 1;
+ } else {
+ if (info->basic_rates)
+ rateidx = __ffs(info->basic_rates);
+ else
+ rateidx = 0;
+ }
- if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
+ if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5GHZ_CAP)
rateidx += ATH12K_MAC_FIRST_OFDM_RATE_IDX;
bitrate = ath12k_legacy_rates[rateidx].bitrate;
@@ -4162,9 +4141,9 @@ ath12k_mac_select_scan_device(struct ieee80211_hw *hw,
* split the hw request and perform multiple scans
*/
- if (center_freq < ATH12K_MIN_5G_FREQ)
+ if (center_freq < ATH12K_MIN_5GHZ_FREQ)
band = NL80211_BAND_2GHZ;
- else if (center_freq < ATH12K_MIN_6G_FREQ)
+ else if (center_freq < ATH12K_MIN_6GHZ_FREQ)
band = NL80211_BAND_5GHZ;
else
band = NL80211_BAND_6GHZ;
@@ -4194,7 +4173,7 @@ void __ath12k_mac_scan_finish(struct ath12k *ar)
fallthrough;
case ATH12K_SCAN_STARTING:
cancel_delayed_work(&ar->scan.timeout);
- complete(&ar->scan.completed);
+ complete_all(&ar->scan.completed);
wiphy_work_queue(ar->ah->hw->wiphy, &ar->scan.vdev_clean_wk);
break;
}
@@ -4376,6 +4355,133 @@ static int ath12k_start_scan(struct ath12k *ar,
return 0;
}
+int ath12k_mac_get_fw_stats(struct ath12k *ar,
+ struct ath12k_fw_stats_req_params *param)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ unsigned long time_left;
+ int ret;
+
+ guard(mutex)(&ah->hw_mutex);
+
+ if (ah->state != ATH12K_HW_STATE_ON)
+ return -ENETDOWN;
+
+ ath12k_fw_stats_reset(ar);
+
+ reinit_completion(&ar->fw_stats_complete);
+ reinit_completion(&ar->fw_stats_done);
+
+ ret = ath12k_wmi_send_stats_request_cmd(ar, param->stats_id,
+ param->vdev_id, param->pdev_id);
+ if (ret) {
+ ath12k_warn(ab, "failed to request fw stats: %d\n", ret);
+ return ret;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "get fw stat pdev id %d vdev id %d stats id 0x%x\n",
+ param->pdev_id, param->vdev_id, param->stats_id);
+
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
+ if (!time_left) {
+ ath12k_warn(ab, "time out while waiting for get fw stats\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Firmware sends WMI_UPDATE_STATS_EVENTID back-to-back
+ * when stats data buffer limit is reached. fw_stats_complete
+ * is completed once host receives first event from firmware, but
+ * still there could be more events following. Below is to wait
+ * until firmware completes sending all the events.
+ */
+ time_left = wait_for_completion_timeout(&ar->fw_stats_done, 3 * HZ);
+ if (!time_left) {
+ ath12k_warn(ab, "time out while waiting for fw stats done\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_op_get_txpower(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id,
+ int *dbm)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_fw_stats_req_params params = {};
+ struct ath12k_fw_stats_pdev *pdev;
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_base *ab;
+ struct ath12k *ar;
+ int ret;
+
+ /* Final Tx power is minimum of Target Power, CTL power, Regulatory
+ * Power, PSD EIRP Power. We just know the Regulatory power from the
+ * regulatory rules obtained. FW knows all these power and sets the min
+ * of these. Hence, we request the FW pdev stats in which FW reports
+ * the minimum of all vdev's channel Tx power.
+ */
+ lockdep_assert_wiphy(hw->wiphy);
+
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ if (!arvif || !arvif->ar)
+ return -EINVAL;
+
+ ar = arvif->ar;
+ ab = ar->ab;
+ if (ah->state != ATH12K_HW_STATE_ON)
+ goto err_fallback;
+
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags))
+ return -EAGAIN;
+
+ /* Limit the requests to Firmware for fetching the tx power */
+ if (ar->chan_tx_pwr != ATH12K_PDEV_TX_POWER_INVALID &&
+ time_before(jiffies,
+ msecs_to_jiffies(ATH12K_PDEV_TX_POWER_REFRESH_TIME_MSECS) +
+ ar->last_tx_power_update))
+ goto send_tx_power;
+
+ params.pdev_id = ar->pdev->pdev_id;
+ params.vdev_id = arvif->vdev_id;
+ params.stats_id = WMI_REQUEST_PDEV_STAT;
+ ret = ath12k_mac_get_fw_stats(ar, &params);
+ if (ret) {
+ ath12k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ goto err_fallback;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ pdev = list_first_entry_or_null(&ar->fw_stats.pdevs,
+ struct ath12k_fw_stats_pdev, list);
+ if (!pdev) {
+ spin_unlock_bh(&ar->data_lock);
+ goto err_fallback;
+ }
+
+ /* tx power reported by firmware is in units of 0.5 dBm */
+ ar->chan_tx_pwr = pdev->chan_tx_power / 2;
+ spin_unlock_bh(&ar->data_lock);
+ ar->last_tx_power_update = jiffies;
+
+send_tx_power:
+ *dbm = ar->chan_tx_pwr;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "txpower fetched from firmware %d dBm\n",
+ *dbm);
+ return 0;
+
+err_fallback:
+ /* We didn't get txpower from FW. Hence, relying on vif->bss_conf.txpower */
+ *dbm = vif->bss_conf.txpower;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n",
+ *dbm);
+ return 0;
+}
+
static u8
ath12k_mac_find_link_id_by_ar(struct ath12k_vif *ahvif, struct ath12k *ar)
{
@@ -4429,7 +4535,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
return -EINVAL;
/* check if any of the links of ML VIF is already started on
- * radio(ar) correpsondig to given scan frequency and use it,
+ * radio(ar) corresponding to given scan frequency and use it,
* if not use scan link (link 15) for scan purpose.
*/
link_id = ath12k_mac_find_link_id_by_ar(ahvif, ar);
@@ -4538,10 +4644,16 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
ret = ath12k_start_scan(ar, arg);
if (ret) {
- ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
+ if (ret == -EBUSY)
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "scan engine is busy 11d state %d\n", ar->state_11d);
+ else
+ ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
+
spin_lock_bh(&ar->data_lock);
ar->scan.state = ATH12K_SCAN_IDLE;
spin_unlock_bh(&ar->data_lock);
+ goto exit;
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac scan started");
@@ -4565,6 +4677,11 @@ exit:
kfree(arg);
}
+ if (ar->state_11d == ATH12K_11D_PREPARING &&
+ ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE)
+ ath12k_mac_11d_scan_start(ar, arvif->vdev_id);
+
return ret;
}
@@ -4605,7 +4722,6 @@ static int ath12k_install_key(struct ath12k_link_vif *arvif,
.macaddr = macaddr,
};
struct ath12k_vif *ahvif = arvif->ahvif;
- struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -4624,8 +4740,8 @@ static int ath12k_install_key(struct ath12k_link_vif *arvif,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
arg.key_cipher = WMI_CIPHER_AES_CCM;
- /* TODO: Re-check if flag is valid */
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
break;
case WLAN_CIPHER_SUITE_TKIP:
@@ -4633,12 +4749,10 @@ static int ath12k_install_key(struct ath12k_link_vif *arvif,
arg.key_txmic_len = 8;
arg.key_rxmic_len = 8;
break;
- case WLAN_CIPHER_SUITE_CCMP_256:
- arg.key_cipher = WMI_CIPHER_AES_CCM;
- break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
arg.key_cipher = WMI_CIPHER_AES_GCM;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
break;
default:
ath12k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
@@ -4658,7 +4772,7 @@ install:
if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ))
return -ETIMEDOUT;
- if (ether_addr_equal(macaddr, vif->addr))
+ if (ether_addr_equal(macaddr, arvif->bssid))
ahvif->key_cipher = key->cipher;
return ar->install_key_status ? -EINVAL : 0;
@@ -5524,10 +5638,13 @@ static int ath12k_mac_station_add(struct ath12k *ar,
ar->max_num_stations);
goto exit;
}
- arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
- if (!arsta->rx_stats) {
- ret = -ENOMEM;
- goto dec_num_station;
+
+ if (ath12k_debugfs_is_extd_rx_stats_enabled(ar) && !arsta->rx_stats) {
+ arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
+ if (!arsta->rx_stats) {
+ ret = -ENOMEM;
+ goto dec_num_station;
+ }
}
peer_param.vdev_id = arvif->vdev_id;
@@ -5669,12 +5786,15 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
enum ieee80211_sta_state new_state)
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_bss_conf *link_conf;
struct ath12k *ar = arvif->ar;
+ struct ath12k_reg_info *reg_info;
+ struct ath12k_base *ab = ar->ab;
int ret = 0;
lockdep_assert_wiphy(hw->wiphy);
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac handle link %u sta %pM state %d -> %d\n",
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "mac handle link %u sta %pM state %d -> %d\n",
arsta->link_id, arsta->addr, old_state, new_state);
/* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST: Remove the station
@@ -5684,7 +5804,7 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NOTEXIST)) {
ret = ath12k_mac_station_remove(ar, arvif, arsta);
if (ret) {
- ath12k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n",
+ ath12k_warn(ab, "Failed to remove station: %pM for VDEV: %d\n",
arsta->addr, arvif->vdev_id);
goto exit;
}
@@ -5695,7 +5815,7 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NONE) {
ret = ath12k_mac_station_add(ar, arvif, arsta);
if (ret)
- ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ ath12k_warn(ab, "Failed to add station: %pM for VDEV: %d\n",
arsta->addr, arvif->vdev_id);
/* IEEE80211_STA_AUTH -> IEEE80211_STA_ASSOC: Send station assoc command for
@@ -5708,7 +5828,7 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
vif->type == NL80211_IFTYPE_ADHOC)) {
ret = ath12k_mac_station_assoc(ar, arvif, arsta, false);
if (ret)
- ath12k_warn(ar->ab, "Failed to associate station: %pM\n",
+ ath12k_warn(ab, "Failed to associate station: %pM\n",
arsta->addr);
/* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTHORIZED: set peer status as
@@ -5717,9 +5837,21 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
ret = ath12k_mac_station_authorize(ar, arvif, arsta);
- if (ret)
- ath12k_warn(ar->ab, "Failed to authorize station: %pM\n",
+ if (ret) {
+ ath12k_warn(ab, "Failed to authorize station: %pM\n",
arsta->addr);
+ goto exit;
+ }
+
+ if (ath12k_wmi_supports_6ghz_cc_ext(ar) &&
+ arvif->ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ reg_info = ab->reg_info[ar->pdev_idx];
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "connection done, update reg rules\n");
+ ath12k_hw_to_ah(hw)->regd_updated = false;
+ ath12k_reg_handle_chan_list(ab, reg_info, arvif->ahvif->vdev_type,
+ link_conf->power_type);
+ }
/* IEEE80211_STA_AUTHORIZED -> IEEE80211_STA_ASSOC: station may be in removal,
* deauthorize it.
@@ -5738,7 +5870,7 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
vif->type == NL80211_IFTYPE_ADHOC)) {
ret = ath12k_mac_station_disassoc(ar, arvif, arsta);
if (ret)
- ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n",
+ ath12k_warn(ab, "Failed to disassociate station: %pM\n",
arsta->addr);
}
@@ -5746,6 +5878,327 @@ exit:
return ret;
}
+static bool ath12k_mac_is_freq_on_mac(struct ath12k_hw_mode_freq_range_arg *freq_range,
+ u32 freq, u8 mac_id)
+{
+ return (freq >= freq_range[mac_id].low_2ghz_freq &&
+ freq <= freq_range[mac_id].high_2ghz_freq) ||
+ (freq >= freq_range[mac_id].low_5ghz_freq &&
+ freq <= freq_range[mac_id].high_5ghz_freq);
+}
+
+static bool
+ath12k_mac_2_freq_same_mac_in_freq_range(struct ath12k_base *ab,
+ struct ath12k_hw_mode_freq_range_arg *freq_range,
+ u32 freq_link1, u32 freq_link2)
+{
+ u8 i;
+
+ for (i = 0; i < MAX_RADIOS; i++) {
+ if (ath12k_mac_is_freq_on_mac(freq_range, freq_link1, i) &&
+ ath12k_mac_is_freq_on_mac(freq_range, freq_link2, i))
+ return true;
+ }
+
+ return false;
+}
+
+static bool ath12k_mac_is_hw_dbs_capable(struct ath12k_base *ab)
+{
+ return test_bit(WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT,
+ ab->wmi_ab.svc_map) &&
+ ab->wmi_ab.hw_mode_info.support_dbs;
+}
+
+static bool ath12k_mac_2_freq_same_mac_in_dbs(struct ath12k_base *ab,
+ u32 freq_link1, u32 freq_link2)
+{
+ struct ath12k_hw_mode_freq_range_arg *freq_range;
+
+ if (!ath12k_mac_is_hw_dbs_capable(ab))
+ return true;
+
+ freq_range = ab->wmi_ab.hw_mode_info.freq_range_caps[ATH12K_HW_MODE_DBS];
+ return ath12k_mac_2_freq_same_mac_in_freq_range(ab, freq_range,
+ freq_link1, freq_link2);
+}
+
+static bool ath12k_mac_is_hw_sbs_capable(struct ath12k_base *ab)
+{
+ return test_bit(WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT,
+ ab->wmi_ab.svc_map) &&
+ ab->wmi_ab.hw_mode_info.support_sbs;
+}
+
+static bool ath12k_mac_2_freq_same_mac_in_sbs(struct ath12k_base *ab,
+ u32 freq_link1, u32 freq_link2)
+{
+ struct ath12k_hw_mode_info *info = &ab->wmi_ab.hw_mode_info;
+ struct ath12k_hw_mode_freq_range_arg *sbs_uppr_share;
+ struct ath12k_hw_mode_freq_range_arg *sbs_low_share;
+ struct ath12k_hw_mode_freq_range_arg *sbs_range;
+
+ if (!ath12k_mac_is_hw_sbs_capable(ab))
+ return true;
+
+ if (ab->wmi_ab.sbs_lower_band_end_freq) {
+ sbs_uppr_share = info->freq_range_caps[ATH12K_HW_MODE_SBS_UPPER_SHARE];
+ sbs_low_share = info->freq_range_caps[ATH12K_HW_MODE_SBS_LOWER_SHARE];
+
+ return ath12k_mac_2_freq_same_mac_in_freq_range(ab, sbs_low_share,
+ freq_link1, freq_link2) ||
+ ath12k_mac_2_freq_same_mac_in_freq_range(ab, sbs_uppr_share,
+ freq_link1, freq_link2);
+ }
+
+ sbs_range = info->freq_range_caps[ATH12K_HW_MODE_SBS];
+ return ath12k_mac_2_freq_same_mac_in_freq_range(ab, sbs_range,
+ freq_link1, freq_link2);
+}
+
+static bool ath12k_mac_freqs_on_same_mac(struct ath12k_base *ab,
+ u32 freq_link1, u32 freq_link2)
+{
+ return ath12k_mac_2_freq_same_mac_in_dbs(ab, freq_link1, freq_link2) &&
+ ath12k_mac_2_freq_same_mac_in_sbs(ab, freq_link1, freq_link2);
+}
+
+static int ath12k_mac_mlo_sta_set_link_active(struct ath12k_base *ab,
+ enum wmi_mlo_link_force_reason reason,
+ enum wmi_mlo_link_force_mode mode,
+ u8 *mlo_vdev_id_lst,
+ u8 num_mlo_vdev,
+ u8 *mlo_inactive_vdev_lst,
+ u8 num_mlo_inactive_vdev)
+{
+ struct wmi_mlo_link_set_active_arg param = {0};
+ u32 entry_idx, entry_offset, vdev_idx;
+ u8 vdev_id;
+
+ param.reason = reason;
+ param.force_mode = mode;
+
+ for (vdev_idx = 0; vdev_idx < num_mlo_vdev; vdev_idx++) {
+ vdev_id = mlo_vdev_id_lst[vdev_idx];
+ entry_idx = vdev_id / 32;
+ entry_offset = vdev_id % 32;
+ if (entry_idx >= WMI_MLO_LINK_NUM_SZ) {
+ ath12k_warn(ab, "Invalid entry_idx %d num_mlo_vdev %d vdev %d",
+ entry_idx, num_mlo_vdev, vdev_id);
+ return -EINVAL;
+ }
+ param.vdev_bitmap[entry_idx] |= 1 << entry_offset;
+ /* update entry number if entry index changed */
+ if (param.num_vdev_bitmap < entry_idx + 1)
+ param.num_vdev_bitmap = entry_idx + 1;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "num_vdev_bitmap %d vdev_bitmap[0] = 0x%x, vdev_bitmap[1] = 0x%x",
+ param.num_vdev_bitmap, param.vdev_bitmap[0], param.vdev_bitmap[1]);
+
+ if (mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) {
+ for (vdev_idx = 0; vdev_idx < num_mlo_inactive_vdev; vdev_idx++) {
+ vdev_id = mlo_inactive_vdev_lst[vdev_idx];
+ entry_idx = vdev_id / 32;
+ entry_offset = vdev_id % 32;
+ if (entry_idx >= WMI_MLO_LINK_NUM_SZ) {
+ ath12k_warn(ab, "Invalid entry_idx %d num_mlo_vdev %d vdev %d",
+ entry_idx, num_mlo_inactive_vdev, vdev_id);
+ return -EINVAL;
+ }
+ param.inactive_vdev_bitmap[entry_idx] |= 1 << entry_offset;
+ /* update entry number if entry index changed */
+ if (param.num_inactive_vdev_bitmap < entry_idx + 1)
+ param.num_inactive_vdev_bitmap = entry_idx + 1;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "num_vdev_bitmap %d inactive_vdev_bitmap[0] = 0x%x, inactive_vdev_bitmap[1] = 0x%x",
+ param.num_inactive_vdev_bitmap,
+ param.inactive_vdev_bitmap[0],
+ param.inactive_vdev_bitmap[1]);
+ }
+
+ if (mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM ||
+ mode == WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM) {
+ param.num_link_entry = 1;
+ param.link_num[0].num_of_link = num_mlo_vdev - 1;
+ }
+
+ return ath12k_wmi_send_mlo_link_set_active_cmd(ab, &param);
+}
+
+static int ath12k_mac_mlo_sta_update_link_active(struct ath12k_base *ab,
+ struct ieee80211_hw *hw,
+ struct ath12k_vif *ahvif)
+{
+ u8 mlo_vdev_id_lst[IEEE80211_MLD_MAX_NUM_LINKS] = {0};
+ u32 mlo_freq_list[IEEE80211_MLD_MAX_NUM_LINKS] = {0};
+ unsigned long links = ahvif->links_map;
+ enum wmi_mlo_link_force_reason reason;
+ struct ieee80211_chanctx_conf *conf;
+ enum wmi_mlo_link_force_mode mode;
+ struct ieee80211_bss_conf *info;
+ struct ath12k_link_vif *arvif;
+ u8 num_mlo_vdev = 0;
+ u8 link_id;
+
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ /* make sure vdev is created on this device */
+ if (!arvif || !arvif->is_created || arvif->ar->ab != ab)
+ continue;
+
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ conf = wiphy_dereference(hw->wiphy, info->chanctx_conf);
+ mlo_freq_list[num_mlo_vdev] = conf->def.chan->center_freq;
+
+ mlo_vdev_id_lst[num_mlo_vdev] = arvif->vdev_id;
+ num_mlo_vdev++;
+ }
+
+ /* It is not allowed to activate more links than a single device
+ * supported. Something goes wrong if we reach here.
+ */
+ if (num_mlo_vdev > ATH12K_NUM_MAX_ACTIVE_LINKS_PER_DEVICE) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ /* if 2 links are established and both link channels fall on the
+ * same hardware MAC, send command to firmware to deactivate one
+ * of them.
+ */
+ if (num_mlo_vdev == 2 &&
+ ath12k_mac_freqs_on_same_mac(ab, mlo_freq_list[0],
+ mlo_freq_list[1])) {
+ mode = WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM;
+ reason = WMI_MLO_LINK_FORCE_REASON_NEW_CONNECT;
+ return ath12k_mac_mlo_sta_set_link_active(ab, reason, mode,
+ mlo_vdev_id_lst, num_mlo_vdev,
+ NULL, 0);
+ }
+
+ return 0;
+}
+
+static bool ath12k_mac_are_sbs_chan(struct ath12k_base *ab, u32 freq_1, u32 freq_2)
+{
+ if (!ath12k_mac_is_hw_sbs_capable(ab))
+ return false;
+
+ if (ath12k_is_2ghz_channel_freq(freq_1) ||
+ ath12k_is_2ghz_channel_freq(freq_2))
+ return false;
+
+ return !ath12k_mac_2_freq_same_mac_in_sbs(ab, freq_1, freq_2);
+}
+
+static bool ath12k_mac_are_dbs_chan(struct ath12k_base *ab, u32 freq_1, u32 freq_2)
+{
+ if (!ath12k_mac_is_hw_dbs_capable(ab))
+ return false;
+
+ return !ath12k_mac_2_freq_same_mac_in_dbs(ab, freq_1, freq_2);
+}
+
+static int ath12k_mac_select_links(struct ath12k_base *ab,
+ struct ieee80211_vif *vif,
+ struct ieee80211_hw *hw,
+ u16 *selected_links)
+{
+ unsigned long useful_links = ieee80211_vif_usable_links(vif);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ u8 num_useful_links = hweight_long(useful_links);
+ struct ieee80211_chanctx_conf *chanctx;
+ struct ath12k_link_vif *assoc_arvif;
+ u32 assoc_link_freq, partner_freq;
+ u16 sbs_links = 0, dbs_links = 0;
+ struct ieee80211_bss_conf *info;
+ struct ieee80211_channel *chan;
+ struct ieee80211_sta *sta;
+ struct ath12k_sta *ahsta;
+ u8 link_id;
+
+ /* activate all useful links if less than max supported */
+ if (num_useful_links <= ATH12K_NUM_MAX_ACTIVE_LINKS_PER_DEVICE) {
+ *selected_links = useful_links;
+ return 0;
+ }
+
+ /* only in station mode we can get here, so it's safe
+ * to use ap_addr
+ */
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
+ if (!sta) {
+ rcu_read_unlock();
+ ath12k_warn(ab, "failed to find sta with addr %pM\n", vif->cfg.ap_addr);
+ return -EINVAL;
+ }
+
+ ahsta = ath12k_sta_to_ahsta(sta);
+ assoc_arvif = wiphy_dereference(hw->wiphy, ahvif->link[ahsta->assoc_link_id]);
+ info = ath12k_mac_get_link_bss_conf(assoc_arvif);
+ chanctx = rcu_dereference(info->chanctx_conf);
+ assoc_link_freq = chanctx->def.chan->center_freq;
+ rcu_read_unlock();
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "assoc link %u freq %u\n",
+ assoc_arvif->link_id, assoc_link_freq);
+
+ /* assoc link is already activated and has to be kept active,
+ * only need to select a partner link from others.
+ */
+ useful_links &= ~BIT(assoc_arvif->link_id);
+ for_each_set_bit(link_id, &useful_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ info = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
+ if (!info) {
+ ath12k_warn(ab, "failed to get link info for link: %u\n",
+ link_id);
+ return -ENOLINK;
+ }
+
+ chan = info->chanreq.oper.chan;
+ if (!chan) {
+ ath12k_warn(ab, "failed to get chan for link: %u\n", link_id);
+ return -EINVAL;
+ }
+
+ partner_freq = chan->center_freq;
+ if (ath12k_mac_are_sbs_chan(ab, assoc_link_freq, partner_freq)) {
+ sbs_links |= BIT(link_id);
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "new SBS link %u freq %u\n",
+ link_id, partner_freq);
+ continue;
+ }
+
+ if (ath12k_mac_are_dbs_chan(ab, assoc_link_freq, partner_freq)) {
+ dbs_links |= BIT(link_id);
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "new DBS link %u freq %u\n",
+ link_id, partner_freq);
+ continue;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "non DBS/SBS link %u freq %u\n",
+ link_id, partner_freq);
+ }
+
+ /* choose the first candidate no matter how many is in the list */
+ if (sbs_links)
+ link_id = __ffs(sbs_links);
+ else if (dbs_links)
+ link_id = __ffs(dbs_links);
+ else
+ link_id = ffs(useful_links) - 1;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "select partner link %u\n", link_id);
+
+ *selected_links = BIT(assoc_arvif->link_id) | BIT(link_id);
+
+ return 0;
+}
+
static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -5755,10 +6208,13 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_base *prev_ab = NULL, *ab;
struct ath12k_link_vif *arvif;
struct ath12k_link_sta *arsta;
unsigned long valid_links;
- u8 link_id = 0;
+ u16 selected_links = 0;
+ u8 link_id = 0, i;
+ struct ath12k *ar;
int ret;
lockdep_assert_wiphy(hw->wiphy);
@@ -5802,6 +6258,17 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
* link sta
*/
if (sta->mlo) {
+ /* For station mode, arvif->is_sta_assoc_link has been set when
+ * vdev starts. Make sure the arvif/arsta pair have same setting
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ !arsta->arvif->is_sta_assoc_link) {
+ ath12k_hw_warn(ah, "failed to verify assoc link setting with link id %u\n",
+ link_id);
+ ret = -EINVAL;
+ goto exit;
+ }
+
arsta->is_assoc_link = true;
ahsta->assoc_link_id = link_id;
}
@@ -5817,8 +6284,24 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
* about to move to the associated state.
*/
if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION &&
- old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC)
- ieee80211_set_active_links(vif, ieee80211_vif_usable_links(vif));
+ old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) {
+ /* TODO: for now only do link selection for single device
+ * MLO case. Other cases would be handled in the future.
+ */
+ ab = ah->radio[0].ab;
+ if (ab->ag->num_devices == 1) {
+ ret = ath12k_mac_select_links(ab, vif, hw, &selected_links);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to get selected links: %d\n", ret);
+ goto exit;
+ }
+ } else {
+ selected_links = ieee80211_vif_usable_links(vif);
+ }
+
+ ieee80211_set_active_links(vif, selected_links);
+ }
/* Handle all the other state transitions in generic way */
valid_links = ahsta->links_map;
@@ -5842,6 +6325,24 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
}
}
+ if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION &&
+ old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) {
+ for_each_ar(ah, ar, i) {
+ ab = ar->ab;
+ if (prev_ab == ab)
+ continue;
+
+ ret = ath12k_mac_mlo_sta_update_link_active(ab, hw, ahvif);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to update link active state on connect %d\n",
+ ret);
+ goto exit;
+ }
+
+ prev_ab = ab;
+ }
+ }
/* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST:
* Remove the station from driver (handle ML sta here since that
* needs special handling. Normal sta will be handled in generic
@@ -6475,7 +6976,7 @@ static void ath12k_mac_setup_ht_vht_cap(struct ath12k *ar,
rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift;
rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift;
- if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ if (cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
band = &ar->mac.sbands[NL80211_BAND_2GHZ];
ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info;
if (ht_cap_info)
@@ -6484,7 +6985,7 @@ static void ath12k_mac_setup_ht_vht_cap(struct ath12k *ar,
rate_cap_rx_chainmask);
}
- if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ if (cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP &&
(ar->ab->hw_params->single_pdev_only ||
!ar->supports_6ghz)) {
band = &ar->mac.sbands[NL80211_BAND_5GHZ];
@@ -6661,6 +7162,8 @@ static void ath12k_mac_copy_he_cap(struct ath12k_band_cap *band_cap,
switch (iftype) {
case NL80211_IFTYPE_AP:
+ he_cap_elem->mac_cap_info[2] &=
+ ~IEEE80211_HE_MAC_CAP2_BCAST_TWT;
he_cap_elem->phy_cap_info[3] &=
~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK;
he_cap_elem->phy_cap_info[9] |=
@@ -6893,7 +7396,7 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar,
enum nl80211_band band;
int count;
- if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ if (cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
band = NL80211_BAND_2GHZ;
count = ath12k_mac_copy_sband_iftype_data(ar, cap,
ar->mac.iftype[band],
@@ -6903,7 +7406,7 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar,
count);
}
- if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ if (cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
band = NL80211_BAND_5GHZ;
count = ath12k_mac_copy_sband_iftype_data(ar, cap,
ar->mac.iftype[band],
@@ -6913,7 +7416,7 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar,
count);
}
- if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ if (cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP &&
ar->supports_6ghz) {
band = NL80211_BAND_6GHZ;
count = ath12k_mac_copy_sband_iftype_data(ar, cap,
@@ -7041,14 +7544,17 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arv
{
struct ath12k_base *ab = ar->ab;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
struct ieee80211_tx_info *info;
+ enum hal_encrypt_type enctype;
+ unsigned int mic_len;
dma_addr_t paddr;
int buf_id;
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- ATH12K_SKB_CB(skb)->ar = ar;
+ skb_cb->ar = ar;
spin_lock_bh(&ar->txmgmt_idr_lock);
buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
ATH12K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC);
@@ -7057,12 +7563,15 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arv
return -ENOSPC;
info = IEEE80211_SKB_CB(skb);
- if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
+ if ((skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
+ !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
- skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ enctype = ath12k_dp_tx_get_encrypt_type(skb_cb->cipher);
+ mic_len = ath12k_dp_rx_crypto_mic_len(ar, enctype);
+ skb_put(skb, mic_len);
}
}
@@ -7073,7 +7582,7 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arv
goto err_free_idr;
}
- ATH12K_SKB_CB(skb)->paddr = paddr;
+ skb_cb->paddr = paddr;
ret = ath12k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
if (ret) {
@@ -7084,7 +7593,7 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arv
return 0;
err_unmap_buf:
- dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
+ dma_unmap_single(ab->dev, skb_cb->paddr,
skb->len, DMA_TO_DEVICE);
err_free_idr:
spin_lock_bh(&ar->txmgmt_idr_lock);
@@ -7337,12 +7846,18 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ath12k_peer *peer;
unsigned long links_map;
bool is_mcast = false;
+ bool is_dvlan = false;
struct ethhdr *eth;
bool is_prb_rsp;
u16 mcbc_gsn;
u8 link_id;
int ret;
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK);
memset(skb_cb, 0, sizeof(*skb_cb));
skb_cb->vif = vif;
@@ -7397,9 +7912,16 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_AP && vif->p2p)
ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp);
- if (!vif->valid_links || !is_mcast ||
+ /* Checking if it is a DVLAN frame */
+ if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
+ !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
+ !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
+ ieee80211_has_protected(hdr->frame_control))
+ is_dvlan = true;
+
+ if (!vif->valid_links || !is_mcast || is_dvlan ||
test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) {
- ret = ath12k_dp_tx(ar, arvif, skb, false, 0);
+ ret = ath12k_dp_tx(ar, arvif, skb, false, 0, is_mcast);
if (unlikely(ret)) {
ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
ieee80211_free_txskb(ar->ah->hw, skb);
@@ -7429,11 +7951,10 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
info_flags);
skb_cb = ATH12K_SKB_CB(msdu_copied);
- info = IEEE80211_SKB_CB(msdu_copied);
skb_cb->link_id = link_id;
/* For open mode, skip peer find logic */
- if (unlikely(ahvif->key_cipher == WMI_CIPHER_NONE))
+ if (unlikely(!ahvif->key_cipher))
goto skip_peer_find;
spin_lock_bh(&tmp_ar->ab->base_lock);
@@ -7452,7 +7973,6 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
if (key) {
skb_cb->cipher = key->cipher;
skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
- info->control.hw_key = key;
hdr = (struct ieee80211_hdr *)msdu_copied->data;
if (!ieee80211_has_protected(hdr->frame_control))
@@ -7463,7 +7983,7 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
skip_peer_find:
ret = ath12k_dp_tx(tmp_ar, tmp_arvif,
- msdu_copied, true, mcbc_gsn);
+ msdu_copied, true, mcbc_gsn, is_mcast);
if (unlikely(ret)) {
if (ret == -ENOMEM) {
/* Drops are expected during heavy multicast
@@ -7549,7 +8069,7 @@ static int ath12k_mac_start(struct ath12k *ar)
1, pdev->pdev_id);
if (ret) {
- ath12k_err(ab, "failed to enable PMF QOS: (%d\n", ret);
+ ath12k_err(ab, "failed to enable PMF QOS: %d\n", ret);
goto err;
}
@@ -7594,12 +8114,13 @@ static int ath12k_mac_start(struct ath12k *ar)
/* TODO: Do we need to enable ANI? */
- ath12k_reg_update_chan_list(ar);
+ ath12k_reg_update_chan_list(ar, false);
ar->num_started_vdevs = 0;
ar->num_created_vdevs = 0;
ar->num_peers = 0;
ar->allocated_vdev_map = 0;
+ ar->chan_tx_pwr = ATH12K_PDEV_TX_POWER_INVALID;
/* Configure monitor status ring with default rx_filter to get rx status
* such as rssi, rx_duration.
@@ -7781,6 +8302,9 @@ static void ath12k_mac_stop(struct ath12k *ar)
wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->scan.vdev_clean_wk);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ar->ab->rfkill_work);
+ cancel_work_sync(&ar->ab->update_11d_work);
+ ar->state_11d = ATH12K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
spin_lock_bh(&ar->data_lock);
list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
@@ -7854,7 +8378,7 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
return -ENOLINK;
}
- tx_arvif = ath12k_mac_get_tx_arvif(arvif);
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
if (!tx_arvif)
return 0;
@@ -7903,15 +8427,15 @@ static int ath12k_mac_setup_vdev_create_arg(struct ath12k_link_vif *arvif,
return ret;
}
- if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
}
- if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
arg->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
arg->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
}
- if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5GHZ_CAP &&
ar->supports_6ghz) {
arg->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
arg->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
@@ -7940,7 +8464,7 @@ ath12k_mac_prepare_he_mode(struct ath12k_pdev *pdev, u32 viftype)
u32 *hecap_phy_ptr = NULL;
u32 hemode;
- if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP)
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2GHZ_CAP)
cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
else
cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
@@ -8071,44 +8595,121 @@ static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
ath12k_mac_update_vif_offload(&ahvif->deflink);
}
-int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
+static bool ath12k_mac_vif_ap_active_any(struct ath12k_base *ab)
{
- struct ath12k_hw *ah = ar->ah;
- struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ah->hw;
- struct ath12k_vif *ahvif = arvif->ahvif;
- struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
- struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
- struct ath12k_wmi_peer_create_arg peer_param = {0};
- struct ieee80211_bss_conf *link_conf;
- u32 param_id, param_value;
- u16 nss;
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ struct ath12k_link_vif *arvif;
int i;
- int ret, vdev_id;
- u8 link_id;
- lockdep_assert_wiphy(hw->wiphy);
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up &&
+ arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP)
+ return true;
+ }
+ }
+ return false;
+}
- /* If no link is active and scan vdev is requested
- * use a default link conf for scan address purpose.
- */
- if (arvif->link_id == ATH12K_DEFAULT_SCAN_LINK && vif->valid_links)
- link_id = ffs(vif->valid_links) - 1;
- else
- link_id = arvif->link_id;
+void ath12k_mac_11d_scan_start(struct ath12k *ar, u32 vdev_id)
+{
+ struct wmi_11d_scan_start_arg arg;
+ int ret;
- link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
- if (!link_conf) {
- ath12k_warn(ar->ab, "unable to access bss link conf in vdev create for vif %pM link %u\n",
- vif->addr, arvif->link_id);
- return -ENOLINK;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (ar->regdom_set_by_user)
+ goto fin;
+
+ if (ar->vdev_id_11d_scan != ATH12K_11D_INVALID_VDEV_ID)
+ goto fin;
+
+ if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map))
+ goto fin;
+
+ if (ath12k_mac_vif_ap_active_any(ar->ab))
+ goto fin;
+
+ arg.vdev_id = vdev_id;
+ arg.start_interval_msec = 0;
+ arg.scan_period_msec = ATH12K_SCAN_11D_INTERVAL;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac start 11d scan for vdev %d\n", vdev_id);
+
+ ret = ath12k_wmi_send_11d_scan_start_cmd(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n",
+ vdev_id, ret);
+ } else {
+ ar->vdev_id_11d_scan = vdev_id;
+ if (ar->state_11d == ATH12K_11D_PREPARING)
+ ar->state_11d = ATH12K_11D_RUNNING;
}
- memcpy(arvif->bssid, link_conf->addr, ETH_ALEN);
+fin:
+ if (ar->state_11d == ATH12K_11D_PREPARING) {
+ ar->state_11d = ATH12K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+}
- arvif->ar = ar;
- vdev_id = __ffs64(ab->free_vdev_map);
- arvif->vdev_id = vdev_id;
+void ath12k_mac_11d_scan_stop(struct ath12k *ar)
+{
+ int ret;
+ u32 vdev_id;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map))
+ return;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac stop 11d for vdev %d\n",
+ ar->vdev_id_11d_scan);
+
+ if (ar->state_11d == ATH12K_11D_PREPARING) {
+ ar->state_11d = ATH12K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
+ if (ar->vdev_id_11d_scan != ATH12K_11D_INVALID_VDEV_ID) {
+ vdev_id = ar->vdev_id_11d_scan;
+
+ ret = ath12k_wmi_send_11d_scan_stop_cmd(ar, vdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to stopt 11d scan vdev %d ret: %d\n",
+ vdev_id, ret);
+ } else {
+ ar->vdev_id_11d_scan = ATH12K_11D_INVALID_VDEV_ID;
+ ar->state_11d = ATH12K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+ }
+}
+
+void ath12k_mac_11d_scan_stop_all(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ int i;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "mac stop soc 11d scan\n");
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ ath12k_mac_11d_scan_stop(ar);
+ }
+}
+
+static void ath12k_mac_determine_vdev_type(struct ieee80211_vif *vif,
+ struct ath12k_vif *ahvif)
+{
ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
switch (vif->type) {
@@ -8132,7 +8733,6 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
break;
case NL80211_IFTYPE_MONITOR:
ahvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
- ar->monitor_vdev_id = vdev_id;
break;
case NL80211_IFTYPE_P2P_DEVICE:
ahvif->vdev_type = WMI_VDEV_TYPE_STA;
@@ -8142,6 +8742,53 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
WARN_ON(1);
break;
}
+}
+
+int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
+{
+ struct ath12k_hw *ah = ar->ah;
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_hw *hw = ah->hw;
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
+ struct ath12k_wmi_peer_create_arg peer_param = {0};
+ struct ieee80211_bss_conf *link_conf = NULL;
+ u32 param_id, param_value;
+ u16 nss;
+ int i;
+ int ret, vdev_id;
+ u8 link_id;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ /* In NO_VIRTUAL_MONITOR, its necessary to restrict only one monitor
+ * interface in each radio
+ */
+ if (vif->type == NL80211_IFTYPE_MONITOR && ar->monitor_vdev_created)
+ return -EINVAL;
+
+ link_id = arvif->link_id;
+
+ if (link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
+ link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in vdev create for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+ }
+
+ if (link_conf)
+ memcpy(arvif->bssid, link_conf->addr, ETH_ALEN);
+ else
+ memcpy(arvif->bssid, vif->addr, ETH_ALEN);
+
+ arvif->ar = ar;
+ vdev_id = __ffs64(ab->free_vdev_map);
+ arvif->vdev_id = vdev_id;
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ ar->monitor_vdev_id = vdev_id;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev create id %d type %d subtype %d map %llx\n",
arvif->vdev_id, ahvif->vdev_type, ahvif->vdev_subtype,
@@ -8205,6 +8852,7 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
arvif->vdev_id, ret);
goto err_peer_del;
}
+ ath12k_mac_11d_scan_stop_all(ar->ab);
break;
case WMI_VDEV_TYPE_STA:
param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
@@ -8243,12 +8891,26 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
arvif->vdev_id, ret);
goto err_peer_del;
}
+
+ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map) &&
+ ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE) {
+ reinit_completion(&ar->completed_11d_scan);
+ ar->state_11d = ATH12K_11D_PREPARING;
+ }
+ break;
+ case WMI_VDEV_TYPE_MONITOR:
+ ar->monitor_vdev_created = true;
break;
default:
break;
}
- arvif->txpower = link_conf->txpower;
+ if (link_conf)
+ arvif->txpower = link_conf->txpower;
+ else
+ arvif->txpower = NL80211_TX_POWER_AUTOMATIC;
+
ret = ath12k_mac_txpower_recalc(ar);
if (ret)
goto err_peer_del;
@@ -8263,8 +8925,6 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
}
ath12k_dp_vdev_tx_attach(ar, arvif);
- if (vif->type != NL80211_IFTYPE_MONITOR && ar->monitor_conf_enabled)
- ath12k_mac_monitor_vdev_create(ar);
return ret;
@@ -8289,6 +8949,11 @@ err_peer_del:
}
err_vdev_del:
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ar->monitor_vdev_id = -1;
+ ar->monitor_vdev_created = false;
+ }
+
ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
ar->num_created_vdevs--;
arvif->is_created = false;
@@ -8483,7 +9148,10 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_reg_info *reg_info;
struct ath12k_link_vif *arvif;
+ struct ath12k_base *ab;
+ struct ath12k *ar;
int i;
lockdep_assert_wiphy(hw->wiphy);
@@ -8502,6 +9170,22 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE;
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ ath12k_mac_determine_vdev_type(vif, ahvif);
+
+ for_each_ar(ah, ar, i) {
+ if (!ath12k_wmi_supports_6ghz_cc_ext(ar))
+ continue;
+
+ ab = ar->ab;
+ reg_info = ab->reg_info[ar->pdev_idx];
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "interface added to change reg rules\n");
+ ah->regd_updated = false;
+ ath12k_reg_handle_chan_list(ab, reg_info, ahvif->vdev_type,
+ IEEE80211_REG_UNSET_AP);
+ break;
+ }
+
/* Defer vdev creation until assign_chanctx or hw_scan is initiated as driver
* will not know if this interface is an ML vif at this point.
*/
@@ -8566,8 +9250,6 @@ static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ath12k_link_vif *arv
if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ar->monitor_vdev_id = -1;
ar->monitor_vdev_created = false;
- } else if (ar->monitor_vdev_created && !ar->monitor_started) {
- ret = ath12k_mac_monitor_vdev_delete(ar);
}
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
@@ -8798,6 +9480,7 @@ static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
*/
ar->rx_channel = ctx->def.chan;
spin_unlock_bh(&ar->data_lock);
+ ar->chan_tx_pwr = ATH12K_PDEV_TX_POWER_INVALID;
return 0;
}
@@ -8826,6 +9509,7 @@ static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
*/
ar->rx_channel = NULL;
spin_unlock_bh(&ar->data_lock);
+ ar->chan_tx_pwr = ATH12K_PDEV_TX_POWER_INVALID;
}
static enum wmi_phy_mode
@@ -8917,6 +9601,9 @@ ath12k_mac_mlo_get_vdev_args(struct ath12k_link_vif *arvif,
* link vdevs which are advertised as partners below
*/
ml_arg->link_add = true;
+
+ ml_arg->assoc_link = arvif->is_sta_assoc_link;
+
partner_info = ml_arg->partner_info;
links = ahvif->links_map;
@@ -9055,6 +9742,15 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
return ret;
}
+ /* TODO: For now we only set TPC power here. However when
+ * channel changes, say CSA, it should be updated again.
+ */
+ if (ath12k_mac_supports_station_tpc(ar, ahvif, chandef)) {
+ ath12k_mac_fill_reg_tpc_info(ar, arvif, ctx);
+ ath12k_wmi_send_vdev_set_tpc_power(ar, arvif->vdev_id,
+ &arvif->reg_tpc_info);
+ }
+
ar->num_started_vdevs++;
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
ahvif->vif->addr, arvif->vdev_id);
@@ -9257,8 +9953,10 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
arvif = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
ahvif->link[link_id]);
- if (vif->type == NL80211_IFTYPE_MONITOR)
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
monitor_vif = true;
+ continue;
+ }
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
@@ -9308,7 +10006,7 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
params.aid = ahvif->aid;
params.bssid = arvif->bssid;
- tx_arvif = ath12k_mac_get_tx_arvif(arvif);
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
if (tx_arvif) {
params.tx_bssid = tx_arvif->bssid;
params.nontx_profile_idx = link_conf->bssid_index;
@@ -9408,16 +10106,26 @@ static int ath12k_start_vdev_delay(struct ath12k *ar,
struct ath12k_base *ab = ar->ab;
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_chanctx_conf *chanctx;
+ struct ieee80211_bss_conf *link_conf;
int ret;
if (WARN_ON(arvif->is_started))
return -EBUSY;
- ret = ath12k_mac_vdev_start(arvif, &arvif->chanctx);
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ab, "failed to get link conf for vdev %u\n", arvif->vdev_id);
+ return -EINVAL;
+ }
+
+ chanctx = wiphy_dereference(ath12k_ar_to_hw(arvif->ar)->wiphy,
+ link_conf->chanctx_conf);
+ ret = ath12k_mac_vdev_start(arvif, chanctx);
if (ret) {
ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
arvif->vdev_id, vif->addr,
- arvif->chanctx.def.chan->center_freq, ret);
+ chanctx->def.chan->center_freq, ret);
return ret;
}
@@ -9435,6 +10143,391 @@ static int ath12k_start_vdev_delay(struct ath12k *ar,
return 0;
}
+static u8 ath12k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def)
+{
+ if (chan_def->chan->flags & IEEE80211_CHAN_PSD) {
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_20:
+ return 1;
+ case NL80211_CHAN_WIDTH_40:
+ return 2;
+ case NL80211_CHAN_WIDTH_80:
+ return 4;
+ case NL80211_CHAN_WIDTH_160:
+ return 8;
+ case NL80211_CHAN_WIDTH_320:
+ return 16;
+ default:
+ return 1;
+ }
+ } else {
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_20:
+ return 1;
+ case NL80211_CHAN_WIDTH_40:
+ return 2;
+ case NL80211_CHAN_WIDTH_80:
+ return 3;
+ case NL80211_CHAN_WIDTH_160:
+ return 4;
+ case NL80211_CHAN_WIDTH_320:
+ return 5;
+ default:
+ return 1;
+ }
+ }
+}
+
+static u16 ath12k_mac_get_6ghz_start_frequency(struct cfg80211_chan_def *chan_def)
+{
+ u16 diff_seq;
+
+ /* It is to get the lowest channel number's center frequency of the chan.
+ * For example,
+ * bandwidth=40 MHz, center frequency is 5965, lowest channel is 1
+ * with center frequency 5955, its diff is 5965 - 5955 = 10.
+ * bandwidth=80 MHz, center frequency is 5985, lowest channel is 1
+ * with center frequency 5955, its diff is 5985 - 5955 = 30.
+ * bandwidth=160 MHz, center frequency is 6025, lowest channel is 1
+ * with center frequency 5955, its diff is 6025 - 5955 = 70.
+ * bandwidth=320 MHz, center frequency is 6105, lowest channel is 1
+ * with center frequency 5955, its diff is 6105 - 5955 = 70.
+ */
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_320:
+ diff_seq = 150;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ diff_seq = 70;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ diff_seq = 30;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ diff_seq = 10;
+ break;
+ default:
+ diff_seq = 0;
+ }
+
+ return chan_def->center_freq1 - diff_seq;
+}
+
+static u16 ath12k_mac_get_seg_freq(struct cfg80211_chan_def *chan_def,
+ u16 start_seq, u8 seq)
+{
+ u16 seg_seq;
+
+ /* It is to get the center frequency of the specific bandwidth.
+ * start_seq means the lowest channel number's center frequency.
+ * seq 0/1/2/3 means 20 MHz/40 MHz/80 MHz/160 MHz.
+ * For example,
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5955 when bandwidth=20 MHz, its diff is 5955 - 5955 = 0.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5965 when bandwidth=40 MHz, its diff is 5965 - 5955 = 10.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5985 when bandwidth=80 MHz, its diff is 5985 - 5955 = 30.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 6025 when bandwidth=160 MHz, its diff is 6025 - 5955 = 70.
+ */
+ seg_seq = 10 * (BIT(seq) - 1);
+ return seg_seq + start_seq;
+}
+
+static void ath12k_mac_get_psd_channel(struct ath12k *ar,
+ u16 step_freq,
+ u16 *start_freq,
+ u16 *center_freq,
+ u8 i,
+ struct ieee80211_channel **temp_chan,
+ s8 *tx_power)
+{
+ /* It is to get the center frequency for each 20 MHz.
+ * For example, if the chan is 160 MHz and center frequency is 6025,
+ * then it include 8 channels, they are 1/5/9/13/17/21/25/29,
+ * channel number 1's center frequency is 5955, it is parameter start_freq.
+ * parameter i is the step of the 8 channels. i is 0~7 for the 8 channels.
+ * the channel 1/5/9/13/17/21/25/29 maps i=0/1/2/3/4/5/6/7,
+ * and maps its center frequency is 5955/5975/5995/6015/6035/6055/6075/6095,
+ * the gap is 20 for each channel, parameter step_freq means the gap.
+ * after get the center frequency of each channel, it is easy to find the
+ * struct ieee80211_channel of it and get the max_reg_power.
+ */
+ *center_freq = *start_freq + i * step_freq;
+ *temp_chan = ieee80211_get_channel(ar->ah->hw->wiphy, *center_freq);
+ *tx_power = (*temp_chan)->max_reg_power;
+}
+
+static void ath12k_mac_get_eirp_power(struct ath12k *ar,
+ u16 *start_freq,
+ u16 *center_freq,
+ u8 i,
+ struct ieee80211_channel **temp_chan,
+ struct cfg80211_chan_def *def,
+ s8 *tx_power)
+{
+ /* It is to get the center frequency for 20 MHz/40 MHz/80 MHz/
+ * 160 MHz bandwidth, and then plus 10 to the center frequency,
+ * it is the center frequency of a channel number.
+ * For example, when configured channel number is 1.
+ * center frequency is 5965 when bandwidth=40 MHz, after plus 10, it is 5975,
+ * then it is channel number 5.
+ * center frequency is 5985 when bandwidth=80 MHz, after plus 10, it is 5995,
+ * then it is channel number 9.
+ * center frequency is 6025 when bandwidth=160 MHz, after plus 10, it is 6035,
+ * then it is channel number 17.
+ * after get the center frequency of each channel, it is easy to find the
+ * struct ieee80211_channel of it and get the max_reg_power.
+ */
+ *center_freq = ath12k_mac_get_seg_freq(def, *start_freq, i);
+
+ /* For the 20 MHz, its center frequency is same with same channel */
+ if (i != 0)
+ *center_freq += 10;
+
+ *temp_chan = ieee80211_get_channel(ar->ah->hw->wiphy, *center_freq);
+ *tx_power = (*temp_chan)->max_reg_power;
+}
+
+void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_reg_tpc_power_info *reg_tpc_info = &arvif->reg_tpc_info;
+ struct ieee80211_bss_conf *bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+ struct ieee80211_channel *chan, *temp_chan;
+ u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
+ bool is_psd_power = false, is_tpe_present = false;
+ s8 max_tx_power[ATH12K_NUM_PWR_LEVELS],
+ psd_power, tx_power, eirp_power;
+ u16 start_freq, center_freq;
+
+ chan = ctx->def.chan;
+ start_freq = ath12k_mac_get_6ghz_start_frequency(&ctx->def);
+ pwr_reduction = bss_conf->pwr_reduction;
+
+ if (arvif->reg_tpc_info.num_pwr_levels) {
+ is_tpe_present = true;
+ num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels;
+ } else {
+ num_pwr_levels = ath12k_mac_get_num_pwr_levels(&ctx->def);
+ }
+
+ for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) {
+ /* STA received TPE IE*/
+ if (is_tpe_present) {
+ /* local power is PSD power*/
+ if (chan->flags & IEEE80211_CHAN_PSD) {
+ /* Connecting AP is psd power */
+ if (reg_tpc_info->is_psd_power) {
+ is_psd_power = true;
+ ath12k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ psd_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ /* Connecting AP is not psd power */
+ } else {
+ ath12k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ /* convert psd power to EIRP power based
+ * on channel width
+ */
+ tx_power =
+ min_t(s8, tx_power,
+ psd_power + 13 + pwr_lvl_idx * 3);
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ tx_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ }
+ /* local power is not PSD power */
+ } else {
+ /* Connecting AP is psd power */
+ if (reg_tpc_info->is_psd_power) {
+ is_psd_power = true;
+ ath12k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] =
+ reg_tpc_info->tpe[pwr_lvl_idx];
+ /* Connecting AP is not psd power */
+ } else {
+ ath12k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ tx_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ }
+ }
+ /* STA not received TPE IE */
+ } else {
+ /* local power is PSD power*/
+ if (chan->flags & IEEE80211_CHAN_PSD) {
+ is_psd_power = true;
+ ath12k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] = psd_power;
+ } else {
+ ath12k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ max_tx_power[pwr_lvl_idx] = tx_power;
+ }
+ }
+
+ if (is_psd_power) {
+ /* If AP local power constraint is present */
+ if (pwr_reduction)
+ eirp_power = eirp_power - pwr_reduction;
+
+ /* If firmware updated max tx power is non zero, then take
+ * the min of firmware updated ap tx power
+ * and max power derived from above mentioned parameters.
+ */
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "eirp power : %d firmware report power : %d\n",
+ eirp_power, ar->max_allowed_tx_power);
+ /* Firmware reports lower max_allowed_tx_power during vdev
+ * start response. In case of 6 GHz, firmware is not aware
+ * of EIRP power unless driver sets EIRP power through WMI
+ * TPC command. So radio which does not support idle power
+ * save can set maximum calculated EIRP power directly to
+ * firmware through TPC command without min comparison with
+ * vdev start response's max_allowed_tx_power.
+ */
+ if (ar->max_allowed_tx_power && ab->hw_params->idle_ps)
+ eirp_power = min_t(s8,
+ eirp_power,
+ ar->max_allowed_tx_power);
+ } else {
+ /* If AP local power constraint is present */
+ if (pwr_reduction)
+ max_tx_power[pwr_lvl_idx] =
+ max_tx_power[pwr_lvl_idx] - pwr_reduction;
+ /* If firmware updated max tx power is non zero, then take
+ * the min of firmware updated ap tx power
+ * and max power derived from above mentioned parameters.
+ */
+ if (ar->max_allowed_tx_power && ab->hw_params->idle_ps)
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ max_tx_power[pwr_lvl_idx],
+ ar->max_allowed_tx_power);
+ }
+ reg_tpc_info->chan_power_info[pwr_lvl_idx].chan_cfreq = center_freq;
+ reg_tpc_info->chan_power_info[pwr_lvl_idx].tx_power =
+ max_tx_power[pwr_lvl_idx];
+ }
+
+ reg_tpc_info->num_pwr_levels = num_pwr_levels;
+ reg_tpc_info->is_psd_power = is_psd_power;
+ reg_tpc_info->eirp_power = eirp_power;
+ reg_tpc_info->ap_power_type =
+ ath12k_reg_ap_pwr_convert(bss_conf->power_type);
+}
+
+static void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar,
+ struct ath12k_link_vif *arvif)
+{
+ struct ieee80211_bss_conf *bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+ struct ath12k_reg_tpc_power_info *tpc_info = &arvif->reg_tpc_info;
+ struct ieee80211_parsed_tpe_eirp *local_non_psd, *reg_non_psd;
+ struct ieee80211_parsed_tpe_psd *local_psd, *reg_psd;
+ struct ieee80211_parsed_tpe *tpe = &bss_conf->tpe;
+ enum wmi_reg_6g_client_type client_type;
+ struct ath12k_reg_info *reg_info;
+ struct ath12k_base *ab = ar->ab;
+ bool psd_valid, non_psd_valid;
+ int i;
+
+ reg_info = ab->reg_info[ar->pdev_idx];
+ client_type = reg_info->client_type;
+
+ local_psd = &tpe->psd_local[client_type];
+ reg_psd = &tpe->psd_reg_client[client_type];
+ local_non_psd = &tpe->max_local[client_type];
+ reg_non_psd = &tpe->max_reg_client[client_type];
+
+ psd_valid = local_psd->valid | reg_psd->valid;
+ non_psd_valid = local_non_psd->valid | reg_non_psd->valid;
+
+ if (!psd_valid && !non_psd_valid) {
+ ath12k_warn(ab,
+ "no transmit power envelope match client power type %d\n",
+ client_type);
+ return;
+ };
+
+ if (psd_valid) {
+ tpc_info->is_psd_power = true;
+
+ tpc_info->num_pwr_levels = max(local_psd->count,
+ reg_psd->count);
+ if (tpc_info->num_pwr_levels > ATH12K_NUM_PWR_LEVELS)
+ tpc_info->num_pwr_levels = ATH12K_NUM_PWR_LEVELS;
+
+ for (i = 0; i < tpc_info->num_pwr_levels; i++) {
+ tpc_info->tpe[i] = min(local_psd->power[i],
+ reg_psd->power[i]) / 2;
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "TPE PSD power[%d] : %d\n",
+ i, tpc_info->tpe[i]);
+ }
+ } else {
+ tpc_info->is_psd_power = false;
+ tpc_info->eirp_power = 0;
+
+ tpc_info->num_pwr_levels = max(local_non_psd->count,
+ reg_non_psd->count);
+ if (tpc_info->num_pwr_levels > ATH12K_NUM_PWR_LEVELS)
+ tpc_info->num_pwr_levels = ATH12K_NUM_PWR_LEVELS;
+
+ for (i = 0; i < tpc_info->num_pwr_levels; i++) {
+ tpc_info->tpe[i] = min(local_non_psd->power[i],
+ reg_non_psd->power[i]) / 2;
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "non PSD power[%d] : %d\n",
+ i, tpc_info->tpe[i]);
+ }
+ }
+}
+
static int
ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -9462,8 +10555,8 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
ar = ath12k_mac_assign_vif_to_vdev(hw, arvif, ctx);
if (!ar) {
- ath12k_warn(arvif->ar->ab, "failed to assign chanctx for vif %pM link id %u link vif is already started",
- vif->addr, link_id);
+ ath12k_hw_warn(ah, "failed to assign chanctx for vif %pM link id %u link vif is already started",
+ vif->addr, link_id);
return -EINVAL;
}
@@ -9473,6 +10566,11 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
"mac chanctx assign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
+ if (ath12k_wmi_supports_6ghz_cc_ext(ar) &&
+ ctx->def.chan->band == NL80211_BAND_6GHZ &&
+ ahvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ath12k_mac_parse_tx_pwr_env(ar, arvif);
+
arvif->punct_bitmap = ctx->def.punctured;
/* for some targets bss peer must be created before vdev_start */
@@ -9480,7 +10578,6 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
ahvif->vdev_type != WMI_VDEV_TYPE_AP &&
ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
!ath12k_peer_exist_by_vdev_id(ab, arvif->vdev_id)) {
- memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
ret = 0;
goto out;
}
@@ -9492,8 +10589,10 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath12k_mac_monitor_start(ar);
- if (ret)
+ if (ret) {
+ ath12k_mac_monitor_vdev_delete(ar);
goto out;
+ }
arvif->is_started = true;
goto out;
@@ -9507,9 +10606,6 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->monitor_vdev_created)
- ath12k_mac_monitor_start(ar);
-
arvif->is_started = true;
/* TODO: Setup ps and cts/rts protection */
@@ -9572,9 +10668,18 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
arvif->is_started = false;
- if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
- ar->num_started_vdevs == 1 && ar->monitor_vdev_created)
- ath12k_mac_monitor_stop(ar);
+ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map) &&
+ ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE &&
+ ar->state_11d != ATH12K_11D_PREPARING) {
+ reinit_completion(&ar->completed_11d_scan);
+ ar->state_11d = ATH12K_11D_PREPARING;
+ }
+
+ if (ar->scan.arvif == arvif && ar->scan.state == ATH12K_SCAN_RUNNING) {
+ ath12k_scan_abort(ar);
+ ar->scan.arvif = NULL;
+ }
}
static int
@@ -10136,6 +11241,14 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
ath12k_warn(ar->ab, "pdev %d successfully recovered\n",
ar->pdev->pdev_id);
+ if (ar->ab->hw_params->current_cc_support &&
+ ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
+ struct wmi_set_current_country_arg arg = {};
+
+ memcpy(&arg.alpha2, ar->alpha2, 2);
+ ath12k_wmi_send_set_current_country_cmd(ar, &arg);
+ }
+
if (ab->is_reset) {
recovery_count = atomic_inc_return(&ab->recovery_count);
@@ -10267,46 +11380,13 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
-static int ath12k_mac_get_fw_stats(struct ath12k *ar, u32 pdev_id,
- u32 vdev_id, u32 stats_id)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
- unsigned long time_left;
- int ret;
-
- guard(mutex)(&ah->hw_mutex);
-
- if (ah->state != ATH12K_HW_STATE_ON)
- return -ENETDOWN;
-
- reinit_completion(&ar->fw_stats_complete);
-
- ret = ath12k_wmi_send_stats_request_cmd(ar, stats_id, vdev_id, pdev_id);
-
- if (ret) {
- ath12k_warn(ab, "failed to request fw stats: %d\n", ret);
- return ret;
- }
-
- ath12k_dbg(ab, ATH12K_DBG_WMI,
- "get fw stat pdev id %d vdev id %d stats id 0x%x\n",
- pdev_id, vdev_id, stats_id);
-
- time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
-
- if (!time_left)
- ath12k_warn(ab, "time out while waiting for get fw stats\n");
-
- return ret;
-}
-
static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct station_info *sinfo)
{
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_fw_stats_req_params params = {};
struct ath12k_link_sta *arsta;
struct ath12k *ar;
s8 signal;
@@ -10348,10 +11428,13 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
/* TODO: Use real NF instead of default one. */
signal = arsta->rssi_comb;
+ params.pdev_id = ar->pdev->pdev_id;
+ params.vdev_id = 0;
+ params.stats_id = WMI_REQUEST_VDEV_STAT;
+
if (!signal &&
ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
- !(ath12k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
- WMI_REQUEST_VDEV_STAT)))
+ !(ath12k_mac_get_fw_stats(ar, &params)))
signal = arsta->rssi_beacon;
if (signal) {
@@ -10411,7 +11494,7 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
return -EINVAL;
/* check if any of the links of ML VIF is already started on
- * radio(ar) correpsondig to given scan frequency and use it,
+ * radio(ar) corresponding to given scan frequency and use it,
* if not use deflink(link 0) for scan purpose.
*/
@@ -10595,6 +11678,7 @@ static const struct ieee80211_ops ath12k_ops = {
.assign_vif_chanctx = ath12k_mac_op_assign_vif_chanctx,
.unassign_vif_chanctx = ath12k_mac_op_unassign_vif_chanctx,
.switch_vif_chanctx = ath12k_mac_op_switch_vif_chanctx,
+ .get_txpower = ath12k_mac_op_get_txpower,
.set_rts_threshold = ath12k_mac_op_set_rts_threshold,
.set_frag_threshold = ath12k_mac_op_set_frag_threshold,
.set_bitrate_mask = ath12k_mac_op_set_bitrate_mask,
@@ -10610,12 +11694,37 @@ static const struct ieee80211_ops ath12k_ops = {
.resume = ath12k_wow_op_resume,
.set_wakeup = ath12k_wow_op_set_wakeup,
#endif
+#ifdef CONFIG_ATH12K_DEBUGFS
+ .vif_add_debugfs = ath12k_debugfs_op_vif_add,
+#endif
CFG80211_TESTMODE_CMD(ath12k_tm_cmd)
#ifdef CONFIG_ATH12K_DEBUGFS
.link_sta_add_debugfs = ath12k_debugfs_link_sta_op_add,
#endif
};
+void ath12k_mac_update_freq_range(struct ath12k *ar,
+ u32 freq_low, u32 freq_high)
+{
+ if (!(freq_low && freq_high))
+ return;
+
+ if (ar->freq_range.start_freq || ar->freq_range.end_freq) {
+ ar->freq_range.start_freq = min(ar->freq_range.start_freq,
+ MHZ_TO_KHZ(freq_low));
+ ar->freq_range.end_freq = max(ar->freq_range.end_freq,
+ MHZ_TO_KHZ(freq_high));
+ } else {
+ ar->freq_range.start_freq = MHZ_TO_KHZ(freq_low);
+ ar->freq_range.end_freq = MHZ_TO_KHZ(freq_high);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac pdev %u freq limit updated. New range %u->%u MHz\n",
+ ar->pdev->pdev_id, KHZ_TO_MHZ(ar->freq_range.start_freq),
+ KHZ_TO_MHZ(ar->freq_range.end_freq));
+}
+
static void ath12k_mac_update_ch_list(struct ath12k *ar,
struct ieee80211_supported_band *band,
u32 freq_low, u32 freq_high)
@@ -10630,9 +11739,6 @@ static void ath12k_mac_update_ch_list(struct ath12k *ar,
band->channels[i].center_freq > freq_high)
band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
}
-
- ar->freq_range.start_freq = MHZ_TO_KHZ(freq_low);
- ar->freq_range.end_freq = MHZ_TO_KHZ(freq_high);
}
static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
@@ -10640,10 +11746,10 @@ static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
struct ath12k_pdev *pdev = ar->pdev;
struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
- if (band == WMI_HOST_WLAN_2G_CAP)
+ if (band == WMI_HOST_WLAN_2GHZ_CAP)
return pdev_cap->band[NL80211_BAND_2GHZ].phy_id;
- if (band == WMI_HOST_WLAN_5G_CAP)
+ if (band == WMI_HOST_WLAN_5GHZ_CAP)
return pdev_cap->band[NL80211_BAND_5GHZ].phy_id;
ath12k_warn(ar->ab, "unsupported phy cap:%d\n", band);
@@ -10657,18 +11763,19 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
{
struct ieee80211_supported_band *band;
struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
+ struct ath12k_base *ab = ar->ab;
+ u32 phy_id, freq_low, freq_high;
struct ath12k_hw *ah = ar->ah;
void *channels;
- u32 phy_id;
BUILD_BUG_ON((ARRAY_SIZE(ath12k_2ghz_channels) +
ARRAY_SIZE(ath12k_5ghz_channels) +
ARRAY_SIZE(ath12k_6ghz_channels)) !=
ATH12K_NUM_CHANS);
- reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
+ reg_cap = &ab->hal_reg_cap[ar->pdev_idx];
- if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
channels = kmemdup(ath12k_2ghz_channels,
sizeof(ath12k_2ghz_channels),
GFP_KERNEL);
@@ -10683,17 +11790,25 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->bitrates = ath12k_g_rates;
bands[NL80211_BAND_2GHZ] = band;
- if (ar->ab->hw_params->single_pdev_only) {
- phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
- reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ if (ab->hw_params->single_pdev_only) {
+ phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2GHZ_CAP);
+ reg_cap = &ab->hal_reg_cap[phy_id];
}
+
+ freq_low = max(reg_cap->low_2ghz_chan,
+ ab->reg_freq_2ghz.start_freq);
+ freq_high = min(reg_cap->high_2ghz_chan,
+ ab->reg_freq_2ghz.end_freq);
+
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_2ghz_chan,
reg_cap->high_2ghz_chan);
+
+ ath12k_mac_update_freq_range(ar, freq_low, freq_high);
}
- if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
- if (reg_cap->high_5ghz_chan >= ATH12K_MIN_6G_FREQ) {
+ if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
+ if (reg_cap->high_5ghz_chan >= ATH12K_MIN_6GHZ_FREQ) {
channels = kmemdup(ath12k_6ghz_channels,
sizeof(ath12k_6ghz_channels), GFP_KERNEL);
if (!channels) {
@@ -10709,13 +11824,21 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->n_bitrates = ath12k_a_rates_size;
band->bitrates = ath12k_a_rates;
bands[NL80211_BAND_6GHZ] = band;
+
+ freq_low = max(reg_cap->low_5ghz_chan,
+ ab->reg_freq_6ghz.start_freq);
+ freq_high = min(reg_cap->high_5ghz_chan,
+ ab->reg_freq_6ghz.end_freq);
+
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
+
+ ath12k_mac_update_freq_range(ar, freq_low, freq_high);
ah->use_6ghz_regd = true;
}
- if (reg_cap->low_5ghz_chan < ATH12K_MIN_6G_FREQ) {
+ if (reg_cap->low_5ghz_chan < ATH12K_MIN_6GHZ_FREQ) {
channels = kmemdup(ath12k_5ghz_channels,
sizeof(ath12k_5ghz_channels),
GFP_KERNEL);
@@ -10733,14 +11856,21 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->bitrates = ath12k_a_rates;
bands[NL80211_BAND_5GHZ] = band;
- if (ar->ab->hw_params->single_pdev_only) {
- phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
- reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ if (ab->hw_params->single_pdev_only) {
+ phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5GHZ_CAP);
+ reg_cap = &ab->hal_reg_cap[phy_id];
}
+ freq_low = max(reg_cap->low_5ghz_chan,
+ ab->reg_freq_5ghz.start_freq);
+ freq_high = min(reg_cap->high_5ghz_chan,
+ ab->reg_freq_5ghz.end_freq);
+
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
+
+ ath12k_mac_update_freq_range(ar, freq_low, freq_high);
}
}
@@ -10840,13 +11970,18 @@ ath12k_mac_setup_radio_iface_comb(struct ath12k *ar,
comb[0].limits = limits;
comb[0].n_limits = n_limits;
comb[0].max_interfaces = max_interfaces;
- comb[0].num_different_channels = 1;
comb[0].beacon_int_infra_match = true;
comb[0].beacon_int_min_gcd = 100;
- comb[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80);
+
+ if (ar->ab->hw_params->single_pdev_only) {
+ comb[0].num_different_channels = 2;
+ } else {
+ comb[0].num_different_channels = 1;
+ comb[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80);
+ }
return 0;
}
@@ -11071,6 +12206,7 @@ static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
for_each_ar(ah, ar, i) {
cancel_work_sync(&ar->regd_update_work);
ath12k_debugfs_unregister(ar);
+ ath12k_fw_stats_reset(ar);
}
ieee80211_unregister_hw(hw);
@@ -11219,6 +12355,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ieee80211_hw_set(hw, QUEUE_CONTROL);
ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(hw, REPORTS_LOW_ACK);
+ ieee80211_hw_set(hw, NO_VIRTUAL_MONITOR);
if ((ht_cap & WMI_HT_CAP_ENABLED) || is_6ghz) {
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
@@ -11347,11 +12484,22 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
goto err_unregister_hw;
}
+ if (ar->ab->hw_params->current_cc_support && ab->new_alpha2[0]) {
+ struct wmi_set_current_country_arg current_cc = {};
+
+ memcpy(&current_cc.alpha2, ab->new_alpha2, 2);
+ memcpy(&ar->alpha2, ab->new_alpha2, 2);
+ ret = ath12k_wmi_send_set_current_country_cmd(ar, &current_cc);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed set cc code for mac register: %d\n",
+ ret);
+ }
+
+ ath12k_fw_stats_init(ar);
ath12k_debugfs_register(ar);
}
- init_completion(&ar->fw_stats_complete);
-
return 0;
err_unregister_hw:
@@ -11396,6 +12544,7 @@ static void ath12k_mac_setup(struct ath12k *ar)
ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
ar->scan.arvif = NULL;
+ ar->vdev_id_11d_scan = ATH12K_11D_INVALID_VDEV_ID;
spin_lock_init(&ar->data_lock);
INIT_LIST_HEAD(&ar->arvifs);
@@ -11411,6 +12560,7 @@ static void ath12k_mac_setup(struct ath12k *ar)
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
init_completion(&ar->mlo_setup_done);
+ init_completion(&ar->completed_11d_scan);
INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
wiphy_work_init(&ar->scan.vdev_clean_wk, ath12k_scan_vdev_clean_work);
@@ -11418,6 +12568,10 @@ static void ath12k_mac_setup(struct ath12k *ar)
wiphy_work_init(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+
+ ar->monitor_vdev_id = -1;
+ ar->monitor_vdev_created = false;
+ ar->monitor_started = false;
}
static int __ath12k_mac_mlo_setup(struct ath12k *ar)
@@ -11572,7 +12726,6 @@ void ath12k_mac_mlo_teardown(struct ath12k_hw_group *ag)
int ath12k_mac_register(struct ath12k_hw_group *ag)
{
- struct ath12k_base *ab = ag->ab[0];
struct ath12k_hw *ah;
int i;
int ret;
@@ -11585,8 +12738,6 @@ int ath12k_mac_register(struct ath12k_hw_group *ag)
goto err;
}
- set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
-
return 0;
err:
@@ -11603,12 +12754,9 @@ err:
void ath12k_mac_unregister(struct ath12k_hw_group *ag)
{
- struct ath12k_base *ab = ag->ab[0];
struct ath12k_hw *ah;
int i;
- clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
-
for (i = ag->num_hw - 1; i >= 0; i--) {
ah = ath12k_ag_to_ah(ag, i);
if (!ah)
@@ -11722,6 +12870,7 @@ int ath12k_mac_allocate(struct ath12k_hw_group *ag)
if (!ab)
continue;
+ ath12k_debugfs_pdev_create(ab);
ath12k_mac_set_device_defaults(ab);
total_radio += ab->num_radios;
}
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index ae35b73312bf..cc81b1f5680f 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -33,6 +33,9 @@ struct ath12k_generic_iter {
#define ATH12K_KEEPALIVE_MAX_IDLE 3895
#define ATH12K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+#define ATH12K_PDEV_TX_POWER_INVALID ((u32)-1)
+#define ATH12K_PDEV_TX_POWER_REFRESH_TIME_MSECS 5000 /* msecs */
+
/* FIXME: should these be in ieee80211.h? */
#define IEEE80211_VHT_MCS_SUPPORT_0_11_MASK GENMASK(23, 16)
#define IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11 BIT(24)
@@ -51,6 +54,8 @@ struct ath12k_generic_iter {
#define ATH12K_DEFAULT_SCAN_LINK IEEE80211_MLD_MAX_NUM_LINKS
#define ATH12K_NUM_MAX_LINKS (IEEE80211_MLD_MAX_NUM_LINKS + 1)
+#define ATH12K_NUM_MAX_ACTIVE_LINKS_PER_DEVICE 2
+
enum ath12k_supported_bw {
ATH12K_BW_20 = 0,
ATH12K_BW_40 = 1,
@@ -64,8 +69,55 @@ struct ath12k_mac_get_any_chanctx_conf_arg {
struct ieee80211_chanctx_conf *chanctx_conf;
};
+/**
+ * struct ath12k_chan_power_info - TPE containing power info per channel chunk
+ * @chan_cfreq: channel center freq (MHz)
+ * e.g.
+ * channel 37/20 MHz, it is 6135
+ * channel 37/40 MHz, it is 6125
+ * channel 37/80 MHz, it is 6145
+ * channel 37/160 MHz, it is 6185
+ * @tx_power: transmit power (dBm)
+ */
+struct ath12k_chan_power_info {
+ u16 chan_cfreq;
+ s8 tx_power;
+};
+
+/* ath12k only deals with 320 MHz, so 16 subchannels */
+#define ATH12K_NUM_PWR_LEVELS 16
+
+/**
+ * struct ath12k_reg_tpc_power_info - regulatory TPC power info
+ * @is_psd_power: is PSD power or not
+ * @eirp_power: Maximum EIRP power (dBm), valid only if power is PSD
+ * @ap_power_type: type of power (SP/LPI/VLP)
+ * @num_pwr_levels: number of power levels
+ * @reg_max: Array of maximum TX power (dBm) per PSD value
+ * @ap_constraint_power: AP constraint power (dBm)
+ * @tpe: TPE values processed from TPE IE
+ * @chan_power_info: power info to send to firmware
+ */
+struct ath12k_reg_tpc_power_info {
+ bool is_psd_power;
+ u8 eirp_power;
+ enum wmi_reg_6g_ap_type ap_power_type;
+ u8 num_pwr_levels;
+ u8 reg_max[ATH12K_NUM_PWR_LEVELS];
+ u8 ap_constraint_power;
+ s8 tpe[ATH12K_NUM_PWR_LEVELS];
+ struct ath12k_chan_power_info chan_power_info[ATH12K_NUM_PWR_LEVELS];
+};
+
extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default;
+#define ATH12K_SCAN_11D_INTERVAL 600000
+#define ATH12K_11D_INVALID_VDEV_ID 0xFFFF
+
+void ath12k_mac_11d_scan_start(struct ath12k *ar, u32 vdev_id);
+void ath12k_mac_11d_scan_stop(struct ath12k *ar);
+void ath12k_mac_11d_scan_stop_all(struct ath12k_base *ab);
+
void ath12k_mac_destroy(struct ath12k_hw_group *ag);
void ath12k_mac_unregister(struct ath12k_hw_group *ag);
int ath12k_mac_register(struct ath12k_hw_group *ag);
@@ -115,4 +167,10 @@ struct ieee80211_bss_conf *ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *
struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u8 link_id);
+int ath12k_mac_get_fw_stats(struct ath12k *ar, struct ath12k_fw_stats_req_params *param);
+void ath12k_mac_update_freq_range(struct ath12k *ar,
+ u32 freq_low, u32 freq_high);
+void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ieee80211_chanctx_conf *ctx);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
index 2f6d14382ed7..08f44baf182a 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.c
+++ b/drivers/net/wireless/ath/ath12k/mhi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
@@ -285,8 +285,11 @@ static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
break;
}
- if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags)))
+ if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) {
+ set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+ set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
queue_work(ab->workqueue_aux, &ab->reset_work);
+ }
break;
default:
break;
@@ -379,7 +382,7 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
mhi_ctrl->iova_start = 0;
- mhi_ctrl->iova_stop = 0xffffffff;
+ mhi_ctrl->iova_stop = ab_pci->dma_mask;
mhi_ctrl->sbl_size = SZ_512K;
mhi_ctrl->seg_len = SZ_512K;
mhi_ctrl->fbc_download = true;
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index b474696ac6d8..1f3cfd9b89fd 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -17,7 +17,7 @@
#include "debug.h"
#define ATH12K_PCI_BAR_NUM 0
-#define ATH12K_PCI_DMA_MASK 32
+#define ATH12K_PCI_DMA_MASK 36
#define ATH12K_PCI_IRQ_CE0_OFFSET 3
@@ -292,10 +292,10 @@ static void ath12k_pci_enable_ltssm(struct ath12k_base *ab)
ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val);
- val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
+ val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST(ab));
val |= GCC_GCC_PCIE_HOT_RST_VAL;
- ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
- val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
+ ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST(ab), val);
+ val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST(ab));
ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val);
@@ -600,7 +600,8 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
ab->hw_params->ring_mask->rx_wbm_rel[i] ||
ab->hw_params->ring_mask->reo_status[i] ||
ab->hw_params->ring_mask->host2rxdma[i] ||
- ab->hw_params->ring_mask->rx_mon_dest[i]) {
+ ab->hw_params->ring_mask->rx_mon_dest[i] ||
+ ab->hw_params->ring_mask->rx_mon_status[i]) {
num_irq = 1;
}
@@ -718,7 +719,7 @@ static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab)
cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
- if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) {
+ if (ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MULTI_QRTR_ID)) {
ab_pci->qmi_instance =
u32_encode_bits(pci_domain_nr(bus), DOMAIN_NUMBER_MASK) |
u32_encode_bits(bus->number, BUS_NUMBER_MASK);
@@ -877,13 +878,9 @@ static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev)
goto disable_device;
}
- ret = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(ATH12K_PCI_DMA_MASK));
- if (ret) {
- ath12k_err(ab, "failed to set pci dma mask to %d: %d\n",
- ATH12K_PCI_DMA_MASK, ret);
- goto release_region;
- }
+ ab_pci->dma_mask = DMA_BIT_MASK(ATH12K_PCI_DMA_MASK);
+ dma_set_mask(&pdev->dev, ab_pci->dma_mask);
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
pci_set_master(pdev);
@@ -1472,7 +1469,7 @@ int ath12k_pci_power_up(struct ath12k_base *ab)
ath12k_pci_msi_enable(ab_pci);
- if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features))
+ if (ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MULTI_QRTR_ID))
ath12k_pci_update_qrtr_node_id(ab);
ret = ath12k_mhi_start(ab_pci);
@@ -1491,6 +1488,9 @@ void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags))
+ return;
+
/* restore aspm in case firmware bootup fails */
ath12k_pci_aspm_restore(ab_pci);
@@ -1710,12 +1710,12 @@ err_hal_srng_deinit:
err_mhi_unregister:
ath12k_mhi_unregister(ab_pci);
-err_pci_msi_free:
- ath12k_pci_msi_free(ab_pci);
-
err_irq_affinity_cleanup:
ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
+err_pci_msi_free:
+ ath12k_pci_msi_free(ab_pci);
+
err_pci_free_region:
ath12k_pci_free_region(ab_pci);
@@ -1734,8 +1734,6 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath12k_pci_power_down(ab, false);
- ath12k_qmi_deinit_service(ab);
- ath12k_core_hw_group_unassign(ab);
goto qmi_fail;
}
@@ -1743,9 +1741,10 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
cancel_work_sync(&ab->reset_work);
cancel_work_sync(&ab->dump_work);
- ath12k_core_deinit(ab);
+ ath12k_core_hw_group_cleanup(ab->ag);
qmi_fail:
+ ath12k_core_deinit(ab);
ath12k_fw_unmap(ab);
ath12k_mhi_unregister(ab_pci);
@@ -1758,13 +1757,34 @@ qmi_fail:
ath12k_core_free(ab);
}
+static void ath12k_pci_hw_group_power_down(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ if (!ag)
+ return;
+
+ mutex_lock(&ag->mutex);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ ath12k_pci_power_down(ab, false);
+ }
+
+ mutex_unlock(&ag->mutex);
+}
+
static void ath12k_pci_shutdown(struct pci_dev *pdev)
{
struct ath12k_base *ab = pci_get_drvdata(pdev);
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
- ath12k_pci_power_down(ab, false);
+ ath12k_pci_hw_group_power_down(ab->ag);
}
static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev)
@@ -1831,7 +1851,7 @@ static struct pci_driver ath12k_pci_driver = {
.driver.pm = &ath12k_pci_pm_ops,
};
-static int ath12k_pci_init(void)
+int ath12k_pci_init(void)
{
int ret;
@@ -1844,14 +1864,8 @@ static int ath12k_pci_init(void)
return 0;
}
-module_init(ath12k_pci_init);
-static void ath12k_pci_exit(void)
+void ath12k_pci_exit(void)
{
pci_unregister_driver(&ath12k_pci_driver);
}
-
-module_exit(ath12k_pci_exit);
-
-MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11be WLAN devices");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
index 31584a7ad80e..d1ec8aad7f6c 100644
--- a/drivers/net/wireless/ath/ath12k/pci.h
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_PCI_H
#define ATH12K_PCI_H
@@ -28,7 +28,9 @@
#define PCIE_PCIE_PARF_LTSSM 0x1e081b0
#define PARM_LTSSM_VALUE 0x111
-#define GCC_GCC_PCIE_HOT_RST 0x1e38338
+#define GCC_GCC_PCIE_HOT_RST(ab) \
+ ((ab)->hw_params->regs->gcc_gcc_pcie_hot_rst)
+
#define GCC_GCC_PCIE_HOT_RST_VAL 0x10
#define PCIE_PCIE_INT_ALL_CLEAR 0x1e08228
@@ -116,6 +118,7 @@ struct ath12k_pci {
unsigned long irq_flags;
const struct ath12k_pci_ops *pci_ops;
u32 qmi_instance;
+ u64 dma_mask;
};
static inline struct ath12k_pci *ath12k_pci_priv(struct ath12k_base *ab)
@@ -145,4 +148,6 @@ void ath12k_pci_stop(struct ath12k_base *ab);
int ath12k_pci_start(struct ath12k_base *ab);
int ath12k_pci_power_up(struct ath12k_base *ab);
void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend);
+int ath12k_pci_init(void);
+void ath12k_pci_exit(void);
#endif /* ATH12K_PCI_H */
diff --git a/drivers/net/wireless/ath/ath12k/peer.c b/drivers/net/wireless/ath/ath12k/peer.c
index 792cca8a3fb1..ec7236bbccc0 100644
--- a/drivers/net/wireless/ath/ath12k/peer.c
+++ b/drivers/net/wireless/ath/ath12k/peer.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -383,6 +383,9 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
arvif->ast_idx = peer->hw_peer_id;
}
+ if (vif->type == NL80211_IFTYPE_AP)
+ peer->ucast_ra_only = true;
+
if (sta) {
ahsta = ath12k_sta_to_ahsta(sta);
arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
index 5870ee11a8c7..f3a5e054d2b5 100644
--- a/drivers/net/wireless/ath/ath12k/peer.h
+++ b/drivers/net/wireless/ath/ath12k/peer.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_PEER_H
@@ -62,6 +62,7 @@ struct ath12k_peer {
/* for reference to ath12k_link_sta */
u8 link_id;
+ bool ucast_ra_only;
};
struct ath12k_ml_peer {
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 348dbc81bad8..99e1fb2910d0 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -11,6 +11,8 @@
#include "debug.h"
#include <linux/of.h>
#include <linux/firmware.h>
+#include <linux/of_address.h>
+#include <linux/ioport.h>
#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
#define HOST_CSTATE_BIT 0x04
@@ -2168,10 +2170,12 @@ int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
req.bdf_support_valid = 1;
req.bdf_support = 1;
- req.m3_support_valid = 1;
- req.m3_support = 1;
- req.m3_cache_support_valid = 1;
- req.m3_cache_support = 1;
+ if (ab->hw_params->fw.m3_loader == ath12k_m3_fw_loader_driver) {
+ req.m3_support_valid = 1;
+ req.m3_support = 1;
+ req.m3_cache_support_valid = 1;
+ req.m3_cache_support = 1;
+ }
req.cal_done_valid = 1;
req.cal_done = ab->qmi.cal_done;
@@ -2264,6 +2268,9 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
goto out;
}
+ if (resp.single_chip_mlo_support_valid && resp.single_chip_mlo_support)
+ ab->single_chip_mlo_support = true;
+
if (!resp.num_phy_valid) {
ret = -ENODATA;
goto out;
@@ -2272,7 +2279,8 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
ab->qmi.num_radios = resp.num_phy;
ath12k_dbg(ab, ATH12K_DBG_QMI,
- "phy capability resp valid %d num_phy %d valid %d board_id %d\n",
+ "phy capability resp valid %d single_chip_mlo_support %d valid %d num_phy %d valid %d board_id %d\n",
+ resp.single_chip_mlo_support_valid, resp.single_chip_mlo_support,
resp.num_phy_valid, resp.num_phy,
resp.board_id_valid, resp.board_id);
@@ -2376,7 +2384,8 @@ int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
* failure to firmware and firmware then request multiple blocks of
* small chunk size memory.
*/
- if (ab->qmi.target_mem_delayed) {
+ if (!test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags) &&
+ ab->qmi.target_mem_delayed) {
delayed = true;
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi delays mem_request %d\n",
ab->qmi.mem_seg_count);
@@ -2434,12 +2443,35 @@ out:
return ret;
}
+void ath12k_qmi_reset_mlo_mem(struct ath12k_hw_group *ag)
+{
+ struct target_mem_chunk *mlo_chunk;
+ int i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (!ag->mlo_mem.init_done || ag->num_started)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ag->mlo_mem.chunk); i++) {
+ mlo_chunk = &ag->mlo_mem.chunk[i];
+
+ if (mlo_chunk->v.addr)
+ /* TODO: Mode 0 recovery is the default mode hence resetting the
+ * whole memory region for now. Once Mode 1 support is added, this
+ * needs to be handled properly
+ */
+ memset(mlo_chunk->v.addr, 0, mlo_chunk->size);
+ }
+}
+
static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab,
struct target_mem_chunk *chunk,
int idx)
{
struct ath12k_hw_group *ag = ab->ag;
struct target_mem_chunk *mlo_chunk;
+ bool fixed_mem;
lockdep_assert_held(&ag->mutex);
@@ -2451,8 +2483,13 @@ static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab,
return;
}
+ fixed_mem = test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags);
mlo_chunk = &ag->mlo_mem.chunk[idx];
- if (mlo_chunk->v.addr) {
+
+ if (fixed_mem && mlo_chunk->v.ioaddr) {
+ iounmap(mlo_chunk->v.ioaddr);
+ mlo_chunk->v.ioaddr = NULL;
+ } else if (mlo_chunk->v.addr) {
dma_free_coherent(ab->dev,
mlo_chunk->size,
mlo_chunk->v.addr,
@@ -2462,7 +2499,10 @@ static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab,
mlo_chunk->paddr = 0;
mlo_chunk->size = 0;
- chunk->v.addr = NULL;
+ if (fixed_mem)
+ chunk->v.ioaddr = NULL;
+ else
+ chunk->v.addr = NULL;
chunk->paddr = 0;
chunk->size = 0;
}
@@ -2473,19 +2513,24 @@ static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
int i, mlo_idx;
for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
- if (!ab->qmi.target_mem[i].v.addr)
- continue;
-
if (ab->qmi.target_mem[i].type == MLO_GLOBAL_MEM_REGION_TYPE) {
ath12k_qmi_free_mlo_mem_chunk(ab,
&ab->qmi.target_mem[i],
mlo_idx++);
} else {
- dma_free_coherent(ab->dev,
- ab->qmi.target_mem[i].prev_size,
- ab->qmi.target_mem[i].v.addr,
- ab->qmi.target_mem[i].paddr);
- ab->qmi.target_mem[i].v.addr = NULL;
+ if (test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags) &&
+ ab->qmi.target_mem[i].v.ioaddr) {
+ iounmap(ab->qmi.target_mem[i].v.ioaddr);
+ ab->qmi.target_mem[i].v.ioaddr = NULL;
+ } else {
+ if (!ab->qmi.target_mem[i].v.addr)
+ continue;
+ dma_free_coherent(ab->dev,
+ ab->qmi.target_mem[i].prev_size,
+ ab->qmi.target_mem[i].v.addr,
+ ab->qmi.target_mem[i].paddr);
+ ab->qmi.target_mem[i].v.addr = NULL;
+ }
}
}
@@ -2638,6 +2683,130 @@ err:
return ret;
}
+static int ath12k_qmi_assign_target_mem_chunk(struct ath12k_base *ab)
+{
+ struct reserved_mem *rmem;
+ size_t avail_rmem_size;
+ int i, idx, ret;
+
+ for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
+ switch (ab->qmi.target_mem[i].type) {
+ case HOST_DDR_REGION_TYPE:
+ rmem = ath12k_core_get_reserved_mem(ab, 0);
+ if (!rmem) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ avail_rmem_size = rmem->size;
+ if (avail_rmem_size < ab->qmi.target_mem[i].size) {
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "failed to assign mem type %u req size %u avail size %zu\n",
+ ab->qmi.target_mem[i].type,
+ ab->qmi.target_mem[i].size,
+ avail_rmem_size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ab->qmi.target_mem[idx].paddr = rmem->base;
+ ab->qmi.target_mem[idx].v.ioaddr =
+ ioremap(ab->qmi.target_mem[idx].paddr,
+ ab->qmi.target_mem[i].size);
+ if (!ab->qmi.target_mem[idx].v.ioaddr) {
+ ret = -EIO;
+ goto out;
+ }
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ case BDF_MEM_REGION_TYPE:
+ rmem = ath12k_core_get_reserved_mem(ab, 0);
+ if (!rmem) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ avail_rmem_size = rmem->size - ab->hw_params->bdf_addr_offset;
+ if (avail_rmem_size < ab->qmi.target_mem[i].size) {
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "failed to assign mem type %u req size %u avail size %zu\n",
+ ab->qmi.target_mem[i].type,
+ ab->qmi.target_mem[i].size,
+ avail_rmem_size);
+ ret = -EINVAL;
+ goto out;
+ }
+ ab->qmi.target_mem[idx].paddr =
+ rmem->base + ab->hw_params->bdf_addr_offset;
+ ab->qmi.target_mem[idx].v.ioaddr =
+ ioremap(ab->qmi.target_mem[idx].paddr,
+ ab->qmi.target_mem[i].size);
+ if (!ab->qmi.target_mem[idx].v.ioaddr) {
+ ret = -EIO;
+ goto out;
+ }
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ case CALDB_MEM_REGION_TYPE:
+ /* Cold boot calibration is not enabled in Ath12k. Hence,
+ * assign paddr = 0.
+ * Once cold boot calibration is enabled add support to
+ * assign reserved memory from DT.
+ */
+ ab->qmi.target_mem[idx].paddr = 0;
+ ab->qmi.target_mem[idx].v.ioaddr = NULL;
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ case M3_DUMP_REGION_TYPE:
+ rmem = ath12k_core_get_reserved_mem(ab, 1);
+ if (!rmem) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ avail_rmem_size = rmem->size;
+ if (avail_rmem_size < ab->qmi.target_mem[i].size) {
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "failed to assign mem type %u req size %u avail size %zu\n",
+ ab->qmi.target_mem[i].type,
+ ab->qmi.target_mem[i].size,
+ avail_rmem_size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ab->qmi.target_mem[idx].paddr = rmem->base;
+ ab->qmi.target_mem[idx].v.ioaddr =
+ ioremap(ab->qmi.target_mem[idx].paddr,
+ ab->qmi.target_mem[i].size);
+ if (!ab->qmi.target_mem[idx].v.ioaddr) {
+ ret = -EIO;
+ goto out;
+ }
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ default:
+ ath12k_warn(ab, "qmi ignore invalid mem req type %u\n",
+ ab->qmi.target_mem[i].type);
+ break;
+ }
+ }
+ ab->qmi.mem_seg_count = idx;
+
+ return 0;
+out:
+ ath12k_qmi_free_target_mem_chunk(ab);
+ return ret;
+}
+
/* clang stack usage explodes if this is inlined */
static noinline_for_stack
int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
@@ -2939,6 +3108,9 @@ static void ath12k_qmi_m3_free(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+ if (ab->hw_params->fw.m3_loader == ath12k_m3_fw_loader_remoteproc)
+ return;
+
if (!m3_mem->vaddr)
return;
@@ -3019,15 +3191,16 @@ int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
struct qmi_txn txn;
int ret = 0;
- ret = ath12k_qmi_m3_load(ab);
- if (ret) {
- ath12k_err(ab, "failed to load m3 firmware: %d", ret);
- return ret;
+ if (ab->hw_params->fw.m3_loader == ath12k_m3_fw_loader_driver) {
+ ret = ath12k_qmi_m3_load(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to load m3 firmware: %d", ret);
+ return ret;
+ }
+ req.addr = m3_mem->paddr;
+ req.size = m3_mem->size;
}
- req.addr = m3_mem->paddr;
- req.size = m3_mem->size;
-
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -3477,11 +3650,20 @@ static void ath12k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
msg->mem_seg[i].type, msg->mem_seg[i].size);
}
- ret = ath12k_qmi_alloc_target_mem_chunk(ab);
- if (ret) {
- ath12k_warn(ab, "qmi failed to alloc target memory: %d\n",
- ret);
- return;
+ if (test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags)) {
+ ret = ath12k_qmi_assign_target_mem_chunk(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to assign qmi target memory: %d\n",
+ ret);
+ return;
+ }
+ } else {
+ ret = ath12k_qmi_alloc_target_mem_chunk(ab);
+ if (ret) {
+ ath12k_warn(ab, "qmi failed to alloc target memory: %d\n",
+ ret);
+ return;
+ }
}
ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_REQUEST_MEM, NULL);
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index 45d7c3fcafdd..96e6c3daecfe 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_QMI_H
@@ -21,6 +21,7 @@
#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_WCN7850 0x1
#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274 0x07
+#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ5332 0x2
#define ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32
#define ATH12K_QMI_RESP_LEN_MAX 8192
#define ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52
@@ -41,6 +42,7 @@
#define ATH12K_BOARD_ID_DEFAULT 0xFF
struct ath12k_base;
+struct ath12k_hw_group;
enum ath12k_qmi_file_type {
ATH12K_QMI_FILE_TYPE_BDF_GOLDEN = 0,
@@ -621,5 +623,6 @@ void ath12k_qmi_deinit_service(struct ath12k_base *ab);
int ath12k_qmi_init_service(struct ath12k_base *ab);
void ath12k_qmi_free_resource(struct ath12k_base *ab);
void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab);
+void ath12k_qmi_reset_mlo_mem(struct ath12k_hw_group *ag);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
index 439d61f284d8..2598b39d5d7e 100644
--- a/drivers/net/wireless/ath/ath12k/reg.c
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/rtnetlink.h>
#include "core.h"
#include "debug.h"
+#include "mac.h"
/* World regdom to be used in case default regd from fw is unavailable */
#define ATH12K_2GHZ_CH01_11 REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0)
@@ -48,6 +49,7 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath12k_wmi_init_country_arg arg;
+ struct wmi_set_current_country_arg current_arg = {};
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
int ret, i;
@@ -55,6 +57,24 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
ath12k_dbg(ar->ab, ATH12K_DBG_REG,
"Regulatory Notification received for %s\n", wiphy_name(wiphy));
+ if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "driver initiated regd update\n");
+ if (ah->state != ATH12K_HW_STATE_ON)
+ return;
+
+ for_each_ar(ah, ar, i) {
+ ret = ath12k_reg_update_chan_list(ar, true);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to update chan list for pdev %u, ret %d\n",
+ i, ret);
+ break;
+ }
+ }
+ return;
+ }
+
/* Currently supporting only General User Hints. Cell base user
* hints to be handled later.
* Hints from other sources like Core, Beacons are not expected for
@@ -77,27 +97,38 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
return;
}
- /* Set the country code to the firmware and wait for
- * the WMI_REG_CHAN_LIST_CC EVENT for updating the
- * reg info
- */
- arg.flags = ALPHA_IS_SET;
- memcpy(&arg.cc_info.alpha2, request->alpha2, 2);
- arg.cc_info.alpha2[2] = 0;
-
/* Allow fresh updates to wiphy regd */
ah->regd_updated = false;
/* Send the reg change request to all the radios */
for_each_ar(ah, ar, i) {
- ret = ath12k_wmi_send_init_country_cmd(ar, &arg);
- if (ret)
- ath12k_warn(ar->ab,
- "INIT Country code set to fw failed : %d\n", ret);
+ if (ar->ab->hw_params->current_cc_support) {
+ memcpy(&current_arg.alpha2, request->alpha2, 2);
+ memcpy(&ar->alpha2, &current_arg.alpha2, 2);
+ ret = ath12k_wmi_send_set_current_country_cmd(ar, &current_arg);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed set current country code: %d\n", ret);
+ } else {
+ arg.flags = ALPHA_IS_SET;
+ memcpy(&arg.cc_info.alpha2, request->alpha2, 2);
+ arg.cc_info.alpha2[2] = 0;
+
+ ret = ath12k_wmi_send_init_country_cmd(ar, &arg);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed set INIT Country code: %d\n", ret);
+ }
+
+ wiphy_lock(wiphy);
+ ath12k_mac_11d_scan_stop(ar);
+ wiphy_unlock(wiphy);
+
+ ar->regdom_set_by_user = true;
}
}
-int ath12k_reg_update_chan_list(struct ath12k *ar)
+int ath12k_reg_update_chan_list(struct ath12k *ar, bool wait)
{
struct ieee80211_supported_band **bands;
struct ath12k_wmi_scan_chan_list_arg *arg;
@@ -106,7 +137,35 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
struct ath12k_wmi_channel_arg *ch;
enum nl80211_band band;
int num_channels = 0;
- int i, ret;
+ int i, ret, left;
+
+ if (wait && ar->state_11d == ATH12K_11D_RUNNING) {
+ left = wait_for_completion_timeout(&ar->completed_11d_scan,
+ ATH12K_SCAN_TIMEOUT_HZ);
+ if (!left) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "failed to receive 11d scan complete: timed out\n");
+ ar->state_11d = ATH12K_11D_IDLE;
+ }
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "reg 11d scan wait left time %d\n", left);
+ }
+
+ if (wait &&
+ (ar->scan.state == ATH12K_SCAN_STARTING ||
+ ar->scan.state == ATH12K_SCAN_RUNNING)) {
+ left = wait_for_completion_timeout(&ar->scan.completed,
+ ATH12K_SCAN_TIMEOUT_HZ);
+ if (!left)
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "failed to receive hw scan complete: timed out\n");
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "reg hw scan wait left time %d\n", left);
+ }
+
+ if (ar->ah->state == ATH12K_HW_STATE_RESTARTING)
+ return 0;
bands = hw->wiphy->bands;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
@@ -206,15 +265,57 @@ static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig,
int ath12k_regd_update(struct ath12k *ar, bool init)
{
+ struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
+ u32 phy_id, freq_low, freq_high, supported_bands;
struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
struct ieee80211_hw *hw = ah->hw;
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
struct ath12k_base *ab;
- int i;
ab = ar->ab;
+ supported_bands = ar->pdev->cap.supported_bands;
+ reg_cap = &ab->hal_reg_cap[ar->pdev_idx];
+
+ /* Possible that due to reg change, current limits for supported
+ * frequency changed. Update it. As a first step, reset the
+ * previous values and then compute and set the new values.
+ */
+ ar->freq_range.start_freq = 0;
+ ar->freq_range.end_freq = 0;
+
+ if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
+ if (ab->hw_params->single_pdev_only) {
+ phy_id = ar->pdev->cap.band[WMI_HOST_WLAN_2GHZ_CAP].phy_id;
+ reg_cap = &ab->hal_reg_cap[phy_id];
+ }
+
+ freq_low = max(reg_cap->low_2ghz_chan, ab->reg_freq_2ghz.start_freq);
+ freq_high = min(reg_cap->high_2ghz_chan, ab->reg_freq_2ghz.end_freq);
+
+ ath12k_mac_update_freq_range(ar, freq_low, freq_high);
+ }
+
+ if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && !ar->supports_6ghz) {
+ if (ab->hw_params->single_pdev_only) {
+ phy_id = ar->pdev->cap.band[WMI_HOST_WLAN_5GHZ_CAP].phy_id;
+ reg_cap = &ab->hal_reg_cap[phy_id];
+ }
+
+ freq_low = max(reg_cap->low_5ghz_chan, ab->reg_freq_5ghz.start_freq);
+ freq_high = min(reg_cap->high_5ghz_chan, ab->reg_freq_5ghz.end_freq);
+
+ ath12k_mac_update_freq_range(ar, freq_low, freq_high);
+ }
+
+ if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && ar->supports_6ghz) {
+ freq_low = max(reg_cap->low_5ghz_chan, ab->reg_freq_6ghz.start_freq);
+ freq_high = min(reg_cap->high_5ghz_chan, ab->reg_freq_6ghz.end_freq);
+
+ ath12k_mac_update_freq_range(ar, freq_low, freq_high);
+ }
+
/* If one of the radios within ah has already updated the regd for
* the wiphy, then avoid setting regd again
*/
@@ -275,11 +376,7 @@ int ath12k_regd_update(struct ath12k *ar, bool init)
goto err;
}
- rtnl_lock();
- wiphy_lock(hw->wiphy);
- ret = regulatory_set_wiphy_regd_sync(hw->wiphy, regd_copy);
- wiphy_unlock(hw->wiphy);
- rtnl_unlock();
+ ret = regulatory_set_wiphy_regd(hw->wiphy, regd_copy);
kfree(regd_copy);
@@ -290,15 +387,7 @@ int ath12k_regd_update(struct ath12k *ar, bool init)
goto skip;
ah->regd_updated = true;
- /* Apply the new regd to all the radios, this is expected to be received only once
- * since we check for ah->regd_updated and allow here only once.
- */
- for_each_ar(ah, ar, i) {
- ab = ar->ab;
- ret = ath12k_reg_update_chan_list(ar);
- if (ret)
- goto err;
- }
+
skip:
return 0;
err:
@@ -365,129 +454,6 @@ static u32 ath12k_map_fw_phy_flags(u32 phy_flags)
return flags;
}
-static bool
-ath12k_reg_can_intersect(struct ieee80211_reg_rule *rule1,
- struct ieee80211_reg_rule *rule2)
-{
- u32 start_freq1, end_freq1;
- u32 start_freq2, end_freq2;
-
- start_freq1 = rule1->freq_range.start_freq_khz;
- start_freq2 = rule2->freq_range.start_freq_khz;
-
- end_freq1 = rule1->freq_range.end_freq_khz;
- end_freq2 = rule2->freq_range.end_freq_khz;
-
- if ((start_freq1 >= start_freq2 &&
- start_freq1 < end_freq2) ||
- (start_freq2 > start_freq1 &&
- start_freq2 < end_freq1))
- return true;
-
- /* TODO: Should we restrict intersection feasibility
- * based on min bandwidth of the intersected region also,
- * say the intersected rule should have a min bandwidth
- * of 20MHz?
- */
-
- return false;
-}
-
-static void ath12k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
- struct ieee80211_reg_rule *rule2,
- struct ieee80211_reg_rule *new_rule)
-{
- u32 start_freq1, end_freq1;
- u32 start_freq2, end_freq2;
- u32 freq_diff, max_bw;
-
- start_freq1 = rule1->freq_range.start_freq_khz;
- start_freq2 = rule2->freq_range.start_freq_khz;
-
- end_freq1 = rule1->freq_range.end_freq_khz;
- end_freq2 = rule2->freq_range.end_freq_khz;
-
- new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1,
- start_freq2);
- new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2);
-
- freq_diff = new_rule->freq_range.end_freq_khz -
- new_rule->freq_range.start_freq_khz;
- max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz,
- rule2->freq_range.max_bandwidth_khz);
- new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff);
-
- new_rule->power_rule.max_antenna_gain =
- min_t(u32, rule1->power_rule.max_antenna_gain,
- rule2->power_rule.max_antenna_gain);
-
- new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp,
- rule2->power_rule.max_eirp);
-
- /* Use the flags of both the rules */
- new_rule->flags = rule1->flags | rule2->flags;
-
- /* To be safe, lts use the max cac timeout of both rules */
- new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
- rule2->dfs_cac_ms);
-}
-
-static struct ieee80211_regdomain *
-ath12k_regd_intersect(struct ieee80211_regdomain *default_regd,
- struct ieee80211_regdomain *curr_regd)
-{
- u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules;
- struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule;
- struct ieee80211_regdomain *new_regd = NULL;
- u8 i, j, k;
-
- num_old_regd_rules = default_regd->n_reg_rules;
- num_curr_regd_rules = curr_regd->n_reg_rules;
- num_new_regd_rules = 0;
-
- /* Find the number of intersecting rules to allocate new regd memory */
- for (i = 0; i < num_old_regd_rules; i++) {
- old_rule = default_regd->reg_rules + i;
- for (j = 0; j < num_curr_regd_rules; j++) {
- curr_rule = curr_regd->reg_rules + j;
-
- if (ath12k_reg_can_intersect(old_rule, curr_rule))
- num_new_regd_rules++;
- }
- }
-
- if (!num_new_regd_rules)
- return NULL;
-
- new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules *
- sizeof(struct ieee80211_reg_rule)),
- GFP_ATOMIC);
-
- if (!new_regd)
- return NULL;
-
- /* We set the new country and dfs region directly and only trim
- * the freq, power, antenna gain by intersecting with the
- * default regdomain. Also MAX of the dfs cac timeout is selected.
- */
- new_regd->n_reg_rules = num_new_regd_rules;
- memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2));
- new_regd->dfs_region = curr_regd->dfs_region;
- new_rule = new_regd->reg_rules;
-
- for (i = 0, k = 0; i < num_old_regd_rules; i++) {
- old_rule = default_regd->reg_rules + i;
- for (j = 0; j < num_curr_regd_rules; j++) {
- curr_rule = curr_regd->reg_rules + j;
-
- if (ath12k_reg_can_intersect(old_rule, curr_rule))
- ath12k_reg_intersect_rules(old_rule, curr_rule,
- (new_rule + k++));
- }
- }
- return new_regd;
-}
-
static const char *
ath12k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region)
{
@@ -524,13 +490,14 @@ ath12k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
static void
ath12k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
- u32 reg_flags)
+ s8 psd, u32 reg_flags)
{
reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
+ reg_rule->psd = psd;
reg_rule->flags = reg_flags;
}
@@ -552,7 +519,7 @@ ath12k_reg_update_weather_radar_band(struct ath12k_base *ab,
ath12k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
ath12k_dbg(ab, ATH12K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
@@ -574,7 +541,7 @@ ath12k_reg_update_weather_radar_band(struct ath12k_base *ab,
ath12k_reg_update_rule(regd->reg_rules + i,
ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
@@ -599,7 +566,7 @@ ath12k_reg_update_weather_radar_band(struct ath12k_base *ab,
ath12k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
ath12k_dbg(ab, ATH12K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
@@ -611,26 +578,77 @@ ath12k_reg_update_weather_radar_band(struct ath12k_base *ab,
*rule_idx = i;
}
+static void ath12k_reg_update_freq_range(struct ath12k_reg_freq *reg_freq,
+ struct ath12k_reg_rule *reg_rule)
+{
+ if (reg_freq->start_freq > reg_rule->start_freq)
+ reg_freq->start_freq = reg_rule->start_freq;
+
+ if (reg_freq->end_freq < reg_rule->end_freq)
+ reg_freq->end_freq = reg_rule->end_freq;
+}
+
+enum wmi_reg_6g_ap_type
+ath12k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type)
+{
+ switch (power_type) {
+ case IEEE80211_REG_LPI_AP:
+ return WMI_REG_INDOOR_AP;
+ case IEEE80211_REG_SP_AP:
+ return WMI_REG_STD_POWER_AP;
+ case IEEE80211_REG_VLP_AP:
+ return WMI_REG_VLP_AP;
+ default:
+ return WMI_REG_MAX_AP_TYPE;
+ }
+}
+
struct ieee80211_regdomain *
ath12k_reg_build_regd(struct ath12k_base *ab,
- struct ath12k_reg_info *reg_info, bool intersect)
+ struct ath12k_reg_info *reg_info,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type)
{
- struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
- struct ath12k_reg_rule *reg_rule;
+ struct ieee80211_regdomain *new_regd = NULL;
+ struct ath12k_reg_rule *reg_rule, *reg_rule_6ghz;
+ u32 flags, reg_6ghz_number, max_bw_6ghz;
u8 i = 0, j = 0, k = 0;
u8 num_rules;
u16 max_bw;
- u32 flags;
char alpha2[3];
num_rules = reg_info->num_5g_reg_rules + reg_info->num_2g_reg_rules;
- /* FIXME: Currently taking reg rules for 6G only from Indoor AP mode list.
- * This can be updated to choose the combination dynamically based on AP
- * type and client type, after complete 6G regulatory support is added.
- */
- if (reg_info->is_ext_reg_event)
- num_rules += reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP];
+ if (reg_info->is_ext_reg_event) {
+ if (vdev_type == WMI_VDEV_TYPE_STA) {
+ enum wmi_reg_6g_ap_type ap_type;
+
+ ap_type = ath12k_reg_ap_pwr_convert(power_type);
+ if (ap_type == WMI_REG_MAX_AP_TYPE)
+ ap_type = WMI_REG_INDOOR_AP;
+
+ reg_6ghz_number = reg_info->num_6g_reg_rules_cl
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ if (reg_6ghz_number == 0) {
+ ap_type = WMI_REG_INDOOR_AP;
+ reg_6ghz_number = reg_info->num_6g_reg_rules_cl
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ }
+
+ reg_rule_6ghz = reg_info->reg_rules_6g_client_ptr
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ max_bw_6ghz = reg_info->max_bw_6g_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ } else {
+ reg_6ghz_number = reg_info->num_6g_reg_rules_ap
+ [WMI_REG_INDOOR_AP];
+ reg_rule_6ghz =
+ reg_info->reg_rules_6g_ap_ptr[WMI_REG_INDOOR_AP];
+ max_bw_6ghz = reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP];
+ }
+
+ num_rules += reg_6ghz_number;
+ }
if (!num_rules)
goto ret;
@@ -639,21 +657,31 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
if (reg_info->dfs_region == ATH12K_DFS_REG_ETSI)
num_rules += 2;
- tmp_regd = kzalloc(sizeof(*tmp_regd) +
+ new_regd = kzalloc(sizeof(*new_regd) +
(num_rules * sizeof(struct ieee80211_reg_rule)),
GFP_ATOMIC);
- if (!tmp_regd)
+ if (!new_regd)
goto ret;
- memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+ memcpy(new_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
alpha2[2] = '\0';
- tmp_regd->dfs_region = ath12k_map_fw_dfs_region(reg_info->dfs_region);
+ new_regd->dfs_region = ath12k_map_fw_dfs_region(reg_info->dfs_region);
ath12k_dbg(ab, ATH12K_DBG_REG,
"\r\nCountry %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
- alpha2, ath12k_reg_get_regdom_str(tmp_regd->dfs_region),
+ alpha2, ath12k_reg_get_regdom_str(new_regd->dfs_region),
reg_info->dfs_region, num_rules);
+
+ /* Reset start and end frequency for each band
+ */
+ ab->reg_freq_5ghz.start_freq = INT_MAX;
+ ab->reg_freq_5ghz.end_freq = 0;
+ ab->reg_freq_2ghz.start_freq = INT_MAX;
+ ab->reg_freq_2ghz.end_freq = 0;
+ ab->reg_freq_6ghz.start_freq = INT_MAX;
+ ab->reg_freq_6ghz.end_freq = 0;
+
/* Update reg_rules[] below. Firmware is expected to
* send these rules in order(2G rules first and then 5G)
*/
@@ -664,6 +692,7 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_2g);
flags = 0;
+ ath12k_reg_update_freq_range(&ab->reg_freq_2ghz, reg_rule);
} else if (reg_info->num_5g_reg_rules &&
(j < reg_info->num_5g_reg_rules)) {
reg_rule = reg_info->reg_rules_5g_ptr + j++;
@@ -677,13 +706,15 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
* per other BW rule flags we pass from here
*/
flags = NL80211_RRF_AUTO_BW;
- } else if (reg_info->is_ext_reg_event &&
- reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] &&
- (k < reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP])) {
- reg_rule = reg_info->reg_rules_6g_ap_ptr[WMI_REG_INDOOR_AP] + k++;
- max_bw = min_t(u16, reg_rule->max_bw,
- reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP]);
+ ath12k_reg_update_freq_range(&ab->reg_freq_5ghz, reg_rule);
+ } else if (reg_info->is_ext_reg_event && reg_6ghz_number &&
+ (k < reg_6ghz_number)) {
+ reg_rule = reg_rule_6ghz + k++;
+ max_bw = min_t(u16, reg_rule->max_bw, max_bw_6ghz);
flags = NL80211_RRF_AUTO_BW;
+ if (reg_rule->psd_flag)
+ flags |= NL80211_RRF_PSD;
+ ath12k_reg_update_freq_range(&ab->reg_freq_6ghz, reg_rule);
} else {
break;
}
@@ -691,11 +722,11 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
flags |= ath12k_map_fw_reg_flags(reg_rule->flags);
flags |= ath12k_map_fw_phy_flags(reg_info->phybitmap);
- ath12k_reg_update_rule(tmp_regd->reg_rules + i,
+ ath12k_reg_update_rule(new_regd->reg_rules + i,
reg_rule->start_freq,
reg_rule->end_freq, max_bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
/* Update dfs cac timeout if the dfs domain is ETSI and the
* new rule covers weather radar band.
@@ -706,7 +737,7 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
reg_info->dfs_region == ATH12K_DFS_REG_ETSI &&
(reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW &&
reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){
- ath12k_reg_update_weather_radar_band(ab, tmp_regd,
+ ath12k_reg_update_weather_radar_band(ab, new_regd,
reg_rule, &i,
flags, max_bw);
continue;
@@ -716,36 +747,19 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
ath12k_dbg(ab, ATH12K_DBG_REG, "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d) (%d, %d)\n",
i + 1, reg_rule->start_freq, reg_rule->end_freq,
max_bw, reg_rule->ant_gain, reg_rule->reg_power,
- tmp_regd->reg_rules[i].dfs_cac_ms,
+ new_regd->reg_rules[i].dfs_cac_ms,
flags, reg_rule->psd_flag, reg_rule->psd_eirp);
} else {
ath12k_dbg(ab, ATH12K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, reg_rule->start_freq, reg_rule->end_freq,
max_bw, reg_rule->ant_gain, reg_rule->reg_power,
- tmp_regd->reg_rules[i].dfs_cac_ms,
+ new_regd->reg_rules[i].dfs_cac_ms,
flags);
}
}
- tmp_regd->n_reg_rules = i;
-
- if (intersect) {
- default_regd = ab->default_regd[reg_info->phy_id];
-
- /* Get a new regd by intersecting the received regd with
- * our default regd.
- */
- new_regd = ath12k_regd_intersect(default_regd, tmp_regd);
- kfree(tmp_regd);
- if (!new_regd) {
- ath12k_warn(ab, "Unable to create intersected regdomain\n");
- goto ret;
- }
- } else {
- new_regd = tmp_regd;
- }
-
+ new_regd->n_reg_rules = i;
ret:
return new_regd;
}
@@ -767,9 +781,109 @@ void ath12k_regd_update_work(struct work_struct *work)
}
}
+void ath12k_reg_reset_reg_info(struct ath12k_reg_info *reg_info)
+{
+ u8 i, j;
+
+ if (!reg_info)
+ return;
+
+ kfree(reg_info->reg_rules_2g_ptr);
+ kfree(reg_info->reg_rules_5g_ptr);
+
+ if (reg_info->is_ext_reg_event) {
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ kfree(reg_info->reg_rules_6g_ap_ptr[i]);
+
+ for (j = 0; j < WMI_REG_MAX_CLIENT_TYPE; j++)
+ kfree(reg_info->reg_rules_6g_client_ptr[i][j]);
+ }
+ }
+}
+
+enum ath12k_reg_status ath12k_reg_validate_reg_info(struct ath12k_base *ab,
+ struct ath12k_reg_info *reg_info)
+{
+ int pdev_idx = reg_info->phy_id;
+
+ if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
+ /* In case of failure to set the requested country,
+ * firmware retains the current regd. We print a failure info
+ * and return from here.
+ */
+ ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
+ return ATH12K_REG_STATUS_DROP;
+ }
+
+ if (pdev_idx >= ab->num_radios) {
+ /* Process the event for phy0 only if single_pdev_only
+ * is true. If pdev_idx is valid but not 0, discard the
+ * event. Otherwise, it goes to fallback.
+ */
+ if (ab->hw_params->single_pdev_only &&
+ pdev_idx < ab->hw_params->num_rxdma_per_pdev)
+ return ATH12K_REG_STATUS_DROP;
+ else
+ return ATH12K_REG_STATUS_FALLBACK;
+ }
+
+ /* Avoid multiple overwrites to default regd, during core
+ * stop-start after mac registration.
+ */
+ if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
+ !memcmp(ab->default_regd[pdev_idx]->alpha2,
+ reg_info->alpha2, 2))
+ return ATH12K_REG_STATUS_DROP;
+
+ return ATH12K_REG_STATUS_VALID;
+}
+
+int ath12k_reg_handle_chan_list(struct ath12k_base *ab,
+ struct ath12k_reg_info *reg_info,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type)
+{
+ struct ieee80211_regdomain *regd = NULL;
+ int pdev_idx = reg_info->phy_id;
+ struct ath12k *ar;
+
+ regd = ath12k_reg_build_regd(ab, reg_info, vdev_type, power_type);
+ if (!regd)
+ return -EINVAL;
+
+ spin_lock_bh(&ab->base_lock);
+ if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+ /* Once mac is registered, ar is valid and all CC events from
+ * firmware is considered to be received due to user requests
+ * currently.
+ * Free previously built regd before assigning the newly
+ * generated regd to ar. NULL pointer handling will be
+ * taken care by kfree itself.
+ */
+ ar = ab->pdevs[pdev_idx].ar;
+ kfree(ab->new_regd[pdev_idx]);
+ ab->new_regd[pdev_idx] = regd;
+ queue_work(ab->workqueue, &ar->regd_update_work);
+ } else {
+ /* Multiple events for the same *ar is not expected. But we
+ * can still clear any previously stored default_regd if we
+ * are receiving this event for the same radio by mistake.
+ * NULL pointer handling will be taken care by kfree itself.
+ */
+ kfree(ab->default_regd[pdev_idx]);
+ /* This regd would be applied during mac registration */
+ ab->default_regd[pdev_idx] = regd;
+ }
+ ab->dfs_region = reg_info->dfs_region;
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+}
+
void ath12k_reg_init(struct ieee80211_hw *hw)
{
hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
+ hw->wiphy->flags |= WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER;
hw->wiphy->reg_notifier = ath12k_reg_notifier;
}
@@ -777,8 +891,18 @@ void ath12k_reg_free(struct ath12k_base *ab)
{
int i;
+ mutex_lock(&ab->core_lock);
+ for (i = 0; i < MAX_RADIOS; i++) {
+ ath12k_reg_reset_reg_info(ab->reg_info[i]);
+ kfree(ab->reg_info[i]);
+ ab->reg_info[i] = NULL;
+ }
+
for (i = 0; i < ab->hw_params->max_radios; i++) {
kfree(ab->default_regd[i]);
kfree(ab->new_regd[i]);
+ ab->default_regd[i] = NULL;
+ ab->new_regd[i] = NULL;
}
+ mutex_unlock(&ab->core_lock);
}
diff --git a/drivers/net/wireless/ath/ath12k/reg.h b/drivers/net/wireless/ath/ath12k/reg.h
index 75f80df2aa0c..8af8e9ba462e 100644
--- a/drivers/net/wireless/ath/ath12k/reg.h
+++ b/drivers/net/wireless/ath/ath12k/reg.h
@@ -92,13 +92,29 @@ enum ath12k_reg_phy_bitmap {
ATH12K_REG_PHY_BITMAP_NO11BE = BIT(6),
};
+enum ath12k_reg_status {
+ ATH12K_REG_STATUS_VALID,
+ ATH12K_REG_STATUS_DROP,
+ ATH12K_REG_STATUS_FALLBACK,
+};
+
void ath12k_reg_init(struct ieee80211_hw *hw);
void ath12k_reg_free(struct ath12k_base *ab);
void ath12k_regd_update_work(struct work_struct *work);
struct ieee80211_regdomain *ath12k_reg_build_regd(struct ath12k_base *ab,
struct ath12k_reg_info *reg_info,
- bool intersect);
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type);
int ath12k_regd_update(struct ath12k *ar, bool init);
-int ath12k_reg_update_chan_list(struct ath12k *ar);
+int ath12k_reg_update_chan_list(struct ath12k *ar, bool wait);
+void ath12k_reg_reset_reg_info(struct ath12k_reg_info *reg_info);
+int ath12k_reg_handle_chan_list(struct ath12k_base *ab,
+ struct ath12k_reg_info *reg_info,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type);
+enum wmi_reg_6g_ap_type
+ath12k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type);
+enum ath12k_reg_status ath12k_reg_validate_reg_info(struct ath12k_base *ab,
+ struct ath12k_reg_info *reg_info);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/testmode.c b/drivers/net/wireless/ath/ath12k/testmode.c
index 18d56a976dc7..fb6af7ccf71f 100644
--- a/drivers/net/wireless/ath/ath12k/testmode.c
+++ b/drivers/net/wireless/ath/ath12k/testmode.c
@@ -97,7 +97,7 @@ void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
u8 const *buf_pos;
ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
- "testmode event wmi cmd_id %d ftm event msg %pK datalen %d\n",
+ "testmode event wmi cmd_id %d ftm event msg %p datalen %d\n",
cmd_id, ftm_msg, length);
ath12k_dbg_dump(ab, ATH12K_DBG_TESTMODE, NULL, "", ftm_msg, length);
pdev_id = DP_HW2SW_MACID(le32_to_cpu(ftm_msg->seg_hdr.pdev_id));
@@ -227,7 +227,7 @@ static int ath12k_tm_cmd_process_ftm(struct ath12k *ar, struct nlattr *tb[])
buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
cmd_id = WMI_PDEV_UTF_CMDID;
ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
- "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
+ "testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
cmd_id, buf, buf_len);
ath12k_dbg_dump(ar->ab, ATH12K_DBG_TESTMODE, NULL, "", buf, buf_len);
bufpos = buf;
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 6d1ea5f3a791..465f877fc0fb 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -29,6 +29,7 @@ struct ath12k_wmi_svc_ready_parse {
struct wmi_tlv_fw_stats_parse {
const struct wmi_stats_event *ev;
+ struct ath12k_fw_stats *stats;
};
struct ath12k_wmi_dma_ring_caps_parse {
@@ -90,6 +91,11 @@ struct ath12k_wmi_svc_rdy_ext2_parse {
bool dma_ring_cap_done;
bool spectral_bin_scaling_done;
bool mac_phy_caps_ext_done;
+ bool hal_reg_caps_ext2_done;
+ bool scan_radio_caps_ext2_done;
+ bool twt_caps_done;
+ bool htt_msdu_idx_to_qtype_map_done;
+ bool dbs_or_sbs_cap_ext_done;
};
struct ath12k_wmi_rdy_parse {
@@ -177,6 +183,8 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
.min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
[WMI_TAG_P2P_NOA_EVENT] = {
.min_len = sizeof(struct wmi_p2p_noa_event) },
+ [WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
+ .min_len = sizeof(struct wmi_11d_new_cc_event) },
};
__le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
@@ -520,10 +528,10 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
* band to band for a single radio, need to see how this should be
* handled.
*/
- if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
+ if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
- } else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
+ } else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
@@ -546,7 +554,7 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
pdev_cap->rx_chain_mask_shift =
find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
- if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
+ if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
@@ -566,7 +574,7 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
}
- if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
+ if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
cap_band->max_bw_supported =
@@ -1037,14 +1045,32 @@ int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
struct wmi_vdev_start_req_arg *arg)
{
+ u32 center_freq1 = arg->band_center_freq1;
+
memset(chan, 0, sizeof(*chan));
chan->mhz = cpu_to_le32(arg->freq);
- chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1);
- if (arg->mode == MODE_11AC_VHT80_80)
+ chan->band_center_freq1 = cpu_to_le32(center_freq1);
+ if (arg->mode == MODE_11BE_EHT320) {
+ if (arg->freq > center_freq1)
+ chan->band_center_freq1 = cpu_to_le32(center_freq1 + 80);
+ else
+ chan->band_center_freq1 = cpu_to_le32(center_freq1 - 80);
+
+ chan->band_center_freq2 = cpu_to_le32(center_freq1);
+
+ } else if (arg->mode == MODE_11BE_EHT160) {
+ if (arg->freq > center_freq1)
+ chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40);
+ else
+ chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40);
+
+ chan->band_center_freq2 = cpu_to_le32(center_freq1);
+ } else if (arg->mode == MODE_11BE_EHT80_80) {
chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
- else
+ } else {
chan->band_center_freq2 = 0;
+ }
chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
if (arg->passive)
@@ -2166,9 +2192,10 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
- u32 peer_legacy_rates_align;
- u32 peer_ht_rates_align;
+ u32 peer_legacy_rates_align, eml_pad_delay, eml_trans_delay;
+ u32 peer_ht_rates_align, eml_trans_timeout;
int i, ret, len;
+ u16 eml_cap;
__le32 v;
peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
@@ -2340,6 +2367,24 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
+
+ eml_cap = arg->ml.eml_cap;
+ if (u16_get_bits(eml_cap, IEEE80211_EML_CAP_EMLSR_SUPP)) {
+ /* Padding delay */
+ eml_pad_delay = ieee80211_emlsr_pad_delay_in_us(eml_cap);
+ ml_params->emlsr_padding_delay_us = cpu_to_le32(eml_pad_delay);
+ /* Transition delay */
+ eml_trans_delay = ieee80211_emlsr_trans_delay_in_us(eml_cap);
+ ml_params->emlsr_trans_delay_us = cpu_to_le32(eml_trans_delay);
+ /* Transition timeout */
+ eml_trans_timeout = ieee80211_eml_trans_timeout_in_us(eml_cap);
+ ml_params->emlsr_trans_timeout_us =
+ cpu_to_le32(eml_trans_timeout);
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi peer %pM emlsr padding delay %u, trans delay %u trans timeout %u",
+ arg->peer_mac, eml_pad_delay, eml_trans_delay,
+ eml_trans_timeout);
+ }
+
ptr += sizeof(*ml_params);
skip_ml_params:
@@ -2351,7 +2396,7 @@ skip_ml_params:
for (i = 0; i < arg->peer_eht_mcs_count; i++) {
eht_mcs = ptr;
- eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
+ eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_EHT_RATE_SET,
sizeof(*eht_mcs));
eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
@@ -2359,6 +2404,10 @@ skip_ml_params:
ptr += sizeof(*eht_mcs);
}
+ /* Update MCS15 capability */
+ if (arg->eht_disable_mcs15)
+ cmd->peer_eht_ops = cpu_to_le32(IEEE80211_EHT_OPER_MCS15_DISABLE);
+
tlv = ptr;
len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
/* fill ML Partner links */
@@ -2399,7 +2448,7 @@ skip_ml_params:
send:
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
- "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
+ "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x peer_eht_ops %x\n",
cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
cmd->peer_listen_intval, cmd->peer_ht_caps,
@@ -2412,7 +2461,7 @@ send:
cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
- cmd->peer_eht_cap_phy[2]);
+ cmd->peer_eht_cap_phy[2], cmd->peer_eht_ops);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
if (ret) {
@@ -2591,7 +2640,10 @@ int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
cmd->scan_id = cpu_to_le32(arg->scan_id);
cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
- cmd->scan_priority = cpu_to_le32(arg->scan_priority);
+ if (ar->state_11d == ATH12K_11D_PREPARING)
+ arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
+ else
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
@@ -3313,6 +3365,110 @@ out:
return ret;
}
+int ath12k_wmi_send_set_current_country_cmd(struct ath12k *ar,
+ struct wmi_set_current_country_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_set_current_country_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_current_country_cmd *)skb->data;
+ cmd->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_CURRENT_COUNTRY_CMD,
+ sizeof(*cmd));
+
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+ memcpy(&cmd->new_alpha2, &arg->alpha2, sizeof(arg->alpha2));
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "set current country pdev id %d alpha2 %c%c\n",
+ ar->pdev->pdev_id,
+ arg->alpha2[0],
+ arg->alpha2[1]);
+
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_11d_scan_start_cmd(struct ath12k *ar,
+ struct wmi_11d_scan_start_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_11d_scan_start_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_11d_scan_start_cmd *)skb->data;
+ cmd->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_START_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->scan_period_msec = cpu_to_le32(arg->scan_period_msec);
+ cmd->start_interval_msec = cpu_to_le32(arg->start_interval_msec);
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "send 11d scan start vdev id %d period %d ms internal %d ms\n",
+ arg->vdev_id, arg->scan_period_msec,
+ arg->start_interval_msec);
+
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_11d_scan_stop_cmd(struct ath12k *ar, u32 vdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_11d_scan_stop_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_11d_scan_stop_cmd *)skb->data;
+ cmd->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_STOP_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "send 11d scan stop vdev id %d\n",
+ cmd->vdev_id);
+
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int
ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
{
@@ -3646,15 +3802,15 @@ ath12k_fill_band_to_mac_param(struct ath12k_base *soc,
arg[i].pdev_id = pdev->pdev_id;
switch (pdev->cap.supported_bands) {
- case WMI_HOST_WLAN_2G_5G_CAP:
+ case WMI_HOST_WLAN_2GHZ_5GHZ_CAP:
arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
break;
- case WMI_HOST_WLAN_2G_CAP:
+ case WMI_HOST_WLAN_2GHZ_CAP:
arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
break;
- case WMI_HOST_WLAN_5G_CAP:
+ case WMI_HOST_WLAN_5GHZ_CAP:
arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
break;
@@ -3665,7 +3821,8 @@ ath12k_fill_band_to_mac_param(struct ath12k_base *soc,
}
static void
-ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cfg,
+ath12k_wmi_copy_resource_config(struct ath12k_base *ab,
+ struct ath12k_wmi_resource_config_params *wmi_cfg,
struct ath12k_wmi_resource_config_arg *tg_cfg)
{
wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
@@ -3732,6 +3889,9 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf
WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
+ if (ab->hw_params->reoq_lut_support)
+ wmi_cfg->host_service_flags |=
+ cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REO_QREF_SUPPORT_BIT);
wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
@@ -3772,7 +3932,7 @@ static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
ptr = skb->data + sizeof(*cmd);
cfg = ptr;
- ath12k_wmi_copy_resource_config(cfg, &arg->res_cfg);
+ ath12k_wmi_copy_resource_config(ab, cfg, &arg->res_cfg);
cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
sizeof(*cfg));
@@ -4240,6 +4400,7 @@ static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
u16 len, const void *ptr, void *data)
{
+ struct ath12k_svc_ext_info *svc_ext_info = &soc->wmi_ab.svc_ext_info;
struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
enum wmi_host_hw_mode_config_type mode, pref;
@@ -4272,8 +4433,11 @@ static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
}
}
- ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n",
- soc->wmi_ab.preferred_hw_mode);
+ svc_ext_info->num_hw_modes = svc_rdy_ext->n_hw_mode_caps;
+
+ ath12k_dbg(soc, ATH12K_DBG_WMI, "num hw modes %u preferred_hw_mode %d\n",
+ svc_ext_info->num_hw_modes, soc->wmi_ab.preferred_hw_mode);
+
if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
return -EINVAL;
@@ -4503,6 +4667,65 @@ free_dir_buff:
return ret;
}
+static void
+ath12k_wmi_save_mac_phy_info(struct ath12k_base *ab,
+ const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap,
+ struct ath12k_svc_ext_mac_phy_info *mac_phy_info)
+{
+ mac_phy_info->phy_id = __le32_to_cpu(mac_phy_cap->phy_id);
+ mac_phy_info->supported_bands = __le32_to_cpu(mac_phy_cap->supported_bands);
+ mac_phy_info->hw_freq_range.low_2ghz_freq =
+ __le32_to_cpu(mac_phy_cap->low_2ghz_chan_freq);
+ mac_phy_info->hw_freq_range.high_2ghz_freq =
+ __le32_to_cpu(mac_phy_cap->high_2ghz_chan_freq);
+ mac_phy_info->hw_freq_range.low_5ghz_freq =
+ __le32_to_cpu(mac_phy_cap->low_5ghz_chan_freq);
+ mac_phy_info->hw_freq_range.high_5ghz_freq =
+ __le32_to_cpu(mac_phy_cap->high_5ghz_chan_freq);
+}
+
+static void
+ath12k_wmi_save_all_mac_phy_info(struct ath12k_base *ab,
+ struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext)
+{
+ struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info;
+ const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap;
+ const struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
+ struct ath12k_svc_ext_mac_phy_info *mac_phy_info;
+ u32 hw_mode_id, phy_bit_map;
+ u8 hw_idx;
+
+ mac_phy_info = &svc_ext_info->mac_phy_info[0];
+ mac_phy_cap = svc_rdy_ext->mac_phy_caps;
+
+ for (hw_idx = 0; hw_idx < svc_ext_info->num_hw_modes; hw_idx++) {
+ hw_mode_cap = &svc_rdy_ext->hw_mode_caps[hw_idx];
+ hw_mode_id = __le32_to_cpu(hw_mode_cap->hw_mode_id);
+ phy_bit_map = __le32_to_cpu(hw_mode_cap->phy_id_map);
+
+ while (phy_bit_map) {
+ ath12k_wmi_save_mac_phy_info(ab, mac_phy_cap, mac_phy_info);
+ mac_phy_info->hw_mode_config_type =
+ le32_get_bits(hw_mode_cap->hw_mode_config_type,
+ WMI_HW_MODE_CAP_CFG_TYPE);
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "hw_idx %u hw_mode_id %u hw_mode_config_type %u supported_bands %u phy_id %u 2 GHz [%u - %u] 5 GHz [%u - %u]\n",
+ hw_idx, hw_mode_id,
+ mac_phy_info->hw_mode_config_type,
+ mac_phy_info->supported_bands, mac_phy_info->phy_id,
+ mac_phy_info->hw_freq_range.low_2ghz_freq,
+ mac_phy_info->hw_freq_range.high_2ghz_freq,
+ mac_phy_info->hw_freq_range.low_5ghz_freq,
+ mac_phy_info->hw_freq_range.high_5ghz_freq);
+
+ mac_phy_cap++;
+ mac_phy_info++;
+
+ phy_bit_map >>= 1;
+ }
+ }
+}
+
static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
@@ -4551,6 +4774,8 @@ static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
return ret;
}
+ ath12k_wmi_save_all_mac_phy_info(ab, svc_rdy_ext);
+
svc_rdy_ext->mac_phy_done = true;
} else if (!svc_rdy_ext->ext_hal_reg_done) {
ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
@@ -4601,6 +4826,7 @@ static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
return 0;
err:
+ kfree(svc_rdy_ext.mac_phy_caps);
ath12k_wmi_free_dbring_caps(ab);
return ret;
}
@@ -4699,7 +4925,7 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
bands = pdev->cap.supported_bands;
}
- if (bands & WMI_HOST_WLAN_2G_CAP) {
+ if (bands & WMI_HOST_WLAN_2GHZ_CAP) {
ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ,
caps->eht_cap_mac_info_2ghz,
caps->eht_cap_phy_info_2ghz,
@@ -4708,7 +4934,7 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
caps->eht_cap_info_internal);
}
- if (bands & WMI_HOST_WLAN_5G_CAP) {
+ if (bands & WMI_HOST_WLAN_5GHZ_CAP) {
ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ,
caps->eht_cap_mac_info_5ghz,
caps->eht_cap_phy_info_5ghz,
@@ -4766,10 +4992,449 @@ static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
return 0;
}
+static void
+ath12k_wmi_update_freq_info(struct ath12k_base *ab,
+ struct ath12k_svc_ext_mac_phy_info *mac_cap,
+ enum ath12k_hw_mode mode,
+ u32 phy_id)
+{
+ struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
+ struct ath12k_hw_mode_freq_range_arg *mac_range;
+
+ mac_range = &hw_mode_info->freq_range_caps[mode][phy_id];
+
+ if (mac_cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
+ mac_range->low_2ghz_freq = max_t(u32,
+ mac_cap->hw_freq_range.low_2ghz_freq,
+ ATH12K_MIN_2GHZ_FREQ);
+ mac_range->high_2ghz_freq = mac_cap->hw_freq_range.high_2ghz_freq ?
+ min_t(u32,
+ mac_cap->hw_freq_range.high_2ghz_freq,
+ ATH12K_MAX_2GHZ_FREQ) :
+ ATH12K_MAX_2GHZ_FREQ;
+ }
+
+ if (mac_cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
+ mac_range->low_5ghz_freq = max_t(u32,
+ mac_cap->hw_freq_range.low_5ghz_freq,
+ ATH12K_MIN_5GHZ_FREQ);
+ mac_range->high_5ghz_freq = mac_cap->hw_freq_range.high_5ghz_freq ?
+ min_t(u32,
+ mac_cap->hw_freq_range.high_5ghz_freq,
+ ATH12K_MAX_6GHZ_FREQ) :
+ ATH12K_MAX_6GHZ_FREQ;
+ }
+}
+
+static bool
+ath12k_wmi_all_phy_range_updated(struct ath12k_base *ab,
+ enum ath12k_hw_mode hwmode)
+{
+ struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
+ struct ath12k_hw_mode_freq_range_arg *mac_range;
+ u8 phy_id;
+
+ for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
+ mac_range = &hw_mode_info->freq_range_caps[hwmode][phy_id];
+ /* modify SBS/DBS range only when both phy for DBS are filled */
+ if (!mac_range->low_2ghz_freq && !mac_range->low_5ghz_freq)
+ return false;
+ }
+
+ return true;
+}
+
+static void ath12k_wmi_update_dbs_freq_info(struct ath12k_base *ab)
+{
+ struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
+ struct ath12k_hw_mode_freq_range_arg *mac_range;
+ u8 phy_id;
+
+ mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_DBS];
+ /* Reset 5 GHz range for shared mac for DBS */
+ for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
+ if (mac_range[phy_id].low_2ghz_freq &&
+ mac_range[phy_id].low_5ghz_freq) {
+ mac_range[phy_id].low_5ghz_freq = 0;
+ mac_range[phy_id].high_5ghz_freq = 0;
+ }
+ }
+}
+
+static u32
+ath12k_wmi_get_highest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range)
+{
+ u32 highest_freq = 0;
+ u8 phy_id;
+
+ for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
+ if (range[phy_id].high_5ghz_freq > highest_freq)
+ highest_freq = range[phy_id].high_5ghz_freq;
+ }
+
+ return highest_freq ? highest_freq : ATH12K_MAX_6GHZ_FREQ;
+}
+
+static u32
+ath12k_wmi_get_lowest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range)
+{
+ u32 lowest_freq = 0;
+ u8 phy_id;
+
+ for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
+ if ((!lowest_freq && range[phy_id].low_5ghz_freq) ||
+ range[phy_id].low_5ghz_freq < lowest_freq)
+ lowest_freq = range[phy_id].low_5ghz_freq;
+ }
+
+ return lowest_freq ? lowest_freq : ATH12K_MIN_5GHZ_FREQ;
+}
+
+static void
+ath12k_wmi_fill_upper_share_sbs_freq(struct ath12k_base *ab,
+ u16 sbs_range_sep,
+ struct ath12k_hw_mode_freq_range_arg *ref_freq)
+{
+ struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
+ struct ath12k_hw_mode_freq_range_arg *upper_sbs_freq_range;
+ u8 phy_id;
+
+ upper_sbs_freq_range =
+ hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_UPPER_SHARE];
+
+ for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
+ upper_sbs_freq_range[phy_id].low_2ghz_freq =
+ ref_freq[phy_id].low_2ghz_freq;
+ upper_sbs_freq_range[phy_id].high_2ghz_freq =
+ ref_freq[phy_id].high_2ghz_freq;
+
+ /* update for shared mac */
+ if (upper_sbs_freq_range[phy_id].low_2ghz_freq) {
+ upper_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10;
+ upper_sbs_freq_range[phy_id].high_5ghz_freq =
+ ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq);
+ } else {
+ upper_sbs_freq_range[phy_id].low_5ghz_freq =
+ ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq);
+ upper_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep;
+ }
+ }
+}
+
+static void
+ath12k_wmi_fill_lower_share_sbs_freq(struct ath12k_base *ab,
+ u16 sbs_range_sep,
+ struct ath12k_hw_mode_freq_range_arg *ref_freq)
+{
+ struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
+ struct ath12k_hw_mode_freq_range_arg *lower_sbs_freq_range;
+ u8 phy_id;
+
+ lower_sbs_freq_range =
+ hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_LOWER_SHARE];
+
+ for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
+ lower_sbs_freq_range[phy_id].low_2ghz_freq =
+ ref_freq[phy_id].low_2ghz_freq;
+ lower_sbs_freq_range[phy_id].high_2ghz_freq =
+ ref_freq[phy_id].high_2ghz_freq;
+
+ /* update for shared mac */
+ if (lower_sbs_freq_range[phy_id].low_2ghz_freq) {
+ lower_sbs_freq_range[phy_id].low_5ghz_freq =
+ ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq);
+ lower_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep;
+ } else {
+ lower_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10;
+ lower_sbs_freq_range[phy_id].high_5ghz_freq =
+ ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq);
+ }
+ }
+}
+
+static const char *ath12k_wmi_hw_mode_to_str(enum ath12k_hw_mode hw_mode)
+{
+ static const char * const mode_str[] = {
+ [ATH12K_HW_MODE_SMM] = "SMM",
+ [ATH12K_HW_MODE_DBS] = "DBS",
+ [ATH12K_HW_MODE_SBS] = "SBS",
+ [ATH12K_HW_MODE_SBS_UPPER_SHARE] = "SBS_UPPER_SHARE",
+ [ATH12K_HW_MODE_SBS_LOWER_SHARE] = "SBS_LOWER_SHARE",
+ };
+
+ if (hw_mode >= ARRAY_SIZE(mode_str))
+ return "Unknown";
+
+ return mode_str[hw_mode];
+}
+
+static void
+ath12k_wmi_dump_freq_range_per_mac(struct ath12k_base *ab,
+ struct ath12k_hw_mode_freq_range_arg *freq_range,
+ enum ath12k_hw_mode hw_mode)
+{
+ u8 i;
+
+ for (i = 0; i < MAX_RADIOS; i++)
+ if (freq_range[i].low_2ghz_freq || freq_range[i].low_5ghz_freq)
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "frequency range: %s(%d) mac %d 2 GHz [%d - %d] 5 GHz [%d - %d]",
+ ath12k_wmi_hw_mode_to_str(hw_mode),
+ hw_mode, i,
+ freq_range[i].low_2ghz_freq,
+ freq_range[i].high_2ghz_freq,
+ freq_range[i].low_5ghz_freq,
+ freq_range[i].high_5ghz_freq);
+}
+
+static void ath12k_wmi_dump_freq_range(struct ath12k_base *ab)
+{
+ struct ath12k_hw_mode_freq_range_arg *freq_range;
+ u8 i;
+
+ for (i = ATH12K_HW_MODE_SMM; i < ATH12K_HW_MODE_MAX; i++) {
+ freq_range = ab->wmi_ab.hw_mode_info.freq_range_caps[i];
+ ath12k_wmi_dump_freq_range_per_mac(ab, freq_range, i);
+ }
+}
+
+static int ath12k_wmi_modify_sbs_freq(struct ath12k_base *ab, u8 phy_id)
+{
+ struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
+ struct ath12k_hw_mode_freq_range_arg *sbs_mac_range, *shared_mac_range;
+ struct ath12k_hw_mode_freq_range_arg *non_shared_range;
+ u8 shared_phy_id;
+
+ sbs_mac_range = &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][phy_id];
+
+ /* if SBS mac range has both 2.4 and 5 GHz ranges, i.e. shared phy_id
+ * keep the range as it is in SBS
+ */
+ if (sbs_mac_range->low_2ghz_freq && sbs_mac_range->low_5ghz_freq)
+ return 0;
+
+ if (sbs_mac_range->low_2ghz_freq && !sbs_mac_range->low_5ghz_freq) {
+ ath12k_err(ab, "Invalid DBS/SBS mode with only 2.4Ghz");
+ ath12k_wmi_dump_freq_range_per_mac(ab, sbs_mac_range, ATH12K_HW_MODE_SBS);
+ return -EINVAL;
+ }
+
+ non_shared_range = sbs_mac_range;
+ /* if SBS mac range has only 5 GHz then it's the non-shared phy, so
+ * modify the range as per the shared mac.
+ */
+ shared_phy_id = phy_id ? 0 : 1;
+ shared_mac_range =
+ &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][shared_phy_id];
+
+ if (shared_mac_range->low_5ghz_freq > non_shared_range->low_5ghz_freq) {
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "high 5 GHz shared");
+ /* If the shared mac lower 5 GHz frequency is greater than
+ * non-shared mac lower 5 GHz frequency then the shared mac has
+ * high 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz high
+ * freq should be less than the shared mac's low 5 GHz freq.
+ */
+ if (non_shared_range->high_5ghz_freq >=
+ shared_mac_range->low_5ghz_freq)
+ non_shared_range->high_5ghz_freq =
+ max_t(u32, shared_mac_range->low_5ghz_freq - 10,
+ non_shared_range->low_5ghz_freq);
+ } else if (shared_mac_range->high_5ghz_freq <
+ non_shared_range->high_5ghz_freq) {
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "low 5 GHz shared");
+ /* If the shared mac high 5 GHz frequency is less than
+ * non-shared mac high 5 GHz frequency then the shared mac has
+ * low 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz low
+ * freq should be greater than the shared mac's high 5 GHz freq.
+ */
+ if (shared_mac_range->high_5ghz_freq >=
+ non_shared_range->low_5ghz_freq)
+ non_shared_range->low_5ghz_freq =
+ min_t(u32, shared_mac_range->high_5ghz_freq + 10,
+ non_shared_range->high_5ghz_freq);
+ } else {
+ ath12k_warn(ab, "invalid SBS range with all 5 GHz shared");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ath12k_wmi_update_sbs_freq_info(struct ath12k_base *ab)
+{
+ struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
+ struct ath12k_hw_mode_freq_range_arg *mac_range;
+ u16 sbs_range_sep;
+ u8 phy_id;
+ int ret;
+
+ mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS];
+
+ /* If sbs_lower_band_end_freq has a value, then the frequency range
+ * will be split using that value.
+ */
+ sbs_range_sep = ab->wmi_ab.sbs_lower_band_end_freq;
+ if (sbs_range_sep) {
+ ath12k_wmi_fill_upper_share_sbs_freq(ab, sbs_range_sep,
+ mac_range);
+ ath12k_wmi_fill_lower_share_sbs_freq(ab, sbs_range_sep,
+ mac_range);
+ /* Hardware specifies the range boundary with sbs_range_sep,
+ * (i.e. the boundary between 5 GHz high and 5 GHz low),
+ * reset the original one to make sure it will not get used.
+ */
+ memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS);
+ return;
+ }
+
+ /* If sbs_lower_band_end_freq is not set that means firmware will send one
+ * shared mac range and one non-shared mac range. so update that freq.
+ */
+ for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
+ ret = ath12k_wmi_modify_sbs_freq(ab, phy_id);
+ if (ret) {
+ memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS);
+ break;
+ }
+ }
+}
+
+static void
+ath12k_wmi_update_mac_freq_info(struct ath12k_base *ab,
+ enum wmi_host_hw_mode_config_type hw_config_type,
+ u32 phy_id,
+ struct ath12k_svc_ext_mac_phy_info *mac_cap)
+{
+ if (phy_id >= MAX_RADIOS) {
+ ath12k_err(ab, "mac more than two not supported: %d", phy_id);
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "hw_mode_cfg %d mac %d band 0x%x SBS cutoff freq %d 2 GHz [%d - %d] 5 GHz [%d - %d]",
+ hw_config_type, phy_id, mac_cap->supported_bands,
+ ab->wmi_ab.sbs_lower_band_end_freq,
+ mac_cap->hw_freq_range.low_2ghz_freq,
+ mac_cap->hw_freq_range.high_2ghz_freq,
+ mac_cap->hw_freq_range.low_5ghz_freq,
+ mac_cap->hw_freq_range.high_5ghz_freq);
+
+ switch (hw_config_type) {
+ case WMI_HOST_HW_MODE_SINGLE:
+ if (phy_id) {
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "mac phy 1 is not supported");
+ break;
+ }
+ ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SMM, phy_id);
+ break;
+
+ case WMI_HOST_HW_MODE_DBS:
+ if (!ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS))
+ ath12k_wmi_update_freq_info(ab, mac_cap,
+ ATH12K_HW_MODE_DBS, phy_id);
+ break;
+ case WMI_HOST_HW_MODE_DBS_SBS:
+ case WMI_HOST_HW_MODE_DBS_OR_SBS:
+ ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_DBS, phy_id);
+ if (ab->wmi_ab.sbs_lower_band_end_freq ||
+ mac_cap->hw_freq_range.low_5ghz_freq ||
+ mac_cap->hw_freq_range.low_2ghz_freq)
+ ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS,
+ phy_id);
+
+ if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS))
+ ath12k_wmi_update_dbs_freq_info(ab);
+ if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS))
+ ath12k_wmi_update_sbs_freq_info(ab);
+ break;
+ case WMI_HOST_HW_MODE_SBS:
+ case WMI_HOST_HW_MODE_SBS_PASSIVE:
+ ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, phy_id);
+ if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS))
+ ath12k_wmi_update_sbs_freq_info(ab);
+
+ break;
+ default:
+ break;
+ }
+}
+
+static bool ath12k_wmi_sbs_range_present(struct ath12k_base *ab)
+{
+ if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS) ||
+ (ab->wmi_ab.sbs_lower_band_end_freq &&
+ ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_LOWER_SHARE) &&
+ ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_UPPER_SHARE)))
+ return true;
+
+ return false;
+}
+
+static int ath12k_wmi_update_hw_mode_list(struct ath12k_base *ab)
+{
+ struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info;
+ struct ath12k_hw_mode_info *info = &ab->wmi_ab.hw_mode_info;
+ enum wmi_host_hw_mode_config_type hw_config_type;
+ struct ath12k_svc_ext_mac_phy_info *tmp;
+ bool dbs_mode = false, sbs_mode = false;
+ u32 i, j = 0;
+
+ if (!svc_ext_info->num_hw_modes) {
+ ath12k_err(ab, "invalid number of hw modes");
+ return -EINVAL;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "updated HW mode list: num modes %d",
+ svc_ext_info->num_hw_modes);
+
+ memset(info->freq_range_caps, 0, sizeof(info->freq_range_caps));
+
+ for (i = 0; i < svc_ext_info->num_hw_modes; i++) {
+ if (j >= ATH12K_MAX_MAC_PHY_CAP)
+ return -EINVAL;
+
+ /* Update for MAC0 */
+ tmp = &svc_ext_info->mac_phy_info[j++];
+ hw_config_type = tmp->hw_mode_config_type;
+ ath12k_wmi_update_mac_freq_info(ab, hw_config_type, tmp->phy_id, tmp);
+
+ /* SBS and DBS have dual MAC. Up to 2 MACs are considered. */
+ if (hw_config_type == WMI_HOST_HW_MODE_DBS ||
+ hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE ||
+ hw_config_type == WMI_HOST_HW_MODE_SBS ||
+ hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) {
+ if (j >= ATH12K_MAX_MAC_PHY_CAP)
+ return -EINVAL;
+ /* Update for MAC1 */
+ tmp = &svc_ext_info->mac_phy_info[j++];
+ ath12k_wmi_update_mac_freq_info(ab, hw_config_type,
+ tmp->phy_id, tmp);
+
+ if (hw_config_type == WMI_HOST_HW_MODE_DBS ||
+ hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS)
+ dbs_mode = true;
+
+ if (ath12k_wmi_sbs_range_present(ab) &&
+ (hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE ||
+ hw_config_type == WMI_HOST_HW_MODE_SBS ||
+ hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS))
+ sbs_mode = true;
+ }
+ }
+
+ info->support_dbs = dbs_mode;
+ info->support_sbs = sbs_mode;
+
+ ath12k_wmi_dump_freq_range(ab);
+
+ return 0;
+}
+
static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
+ const struct ath12k_wmi_dbs_or_sbs_cap_params *dbs_or_sbs_caps;
struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
int ret;
@@ -4811,7 +5476,32 @@ static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
}
parse->mac_phy_caps_ext_done = true;
+ } else if (!parse->hal_reg_caps_ext2_done) {
+ parse->hal_reg_caps_ext2_done = true;
+ } else if (!parse->scan_radio_caps_ext2_done) {
+ parse->scan_radio_caps_ext2_done = true;
+ } else if (!parse->twt_caps_done) {
+ parse->twt_caps_done = true;
+ } else if (!parse->htt_msdu_idx_to_qtype_map_done) {
+ parse->htt_msdu_idx_to_qtype_map_done = true;
+ } else if (!parse->dbs_or_sbs_cap_ext_done) {
+ dbs_or_sbs_caps = ptr;
+ ab->wmi_ab.sbs_lower_band_end_freq =
+ __le32_to_cpu(dbs_or_sbs_caps->sbs_lower_band_end_freq);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "sbs_lower_band_end_freq %u\n",
+ ab->wmi_ab.sbs_lower_band_end_freq);
+
+ ret = ath12k_wmi_update_hw_mode_list(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to update hw mode list: %d\n",
+ ret);
+ return ret;
+ }
+
+ parse->dbs_or_sbs_cap_ext_done = true;
}
+
break;
default:
break;
@@ -4922,7 +5612,7 @@ static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_param
for (count = 0; count < num_reg_rules; count++) {
start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
- if (start_freq >= ATH12K_MIN_6G_FREQ)
+ if (start_freq >= ATH12K_MIN_6GHZ_FREQ)
num_invalid_5ghz_rules++;
}
@@ -4992,9 +5682,9 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
- if (num_6g_reg_rules_ap[i] > MAX_6G_REG_RULES) {
+ if (num_6g_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) {
ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
- i, num_6g_reg_rules_ap[i], MAX_6G_REG_RULES);
+ i, num_6g_reg_rules_ap[i], MAX_6GHZ_REG_RULES);
kfree(tb);
return -EINVAL;
}
@@ -5015,9 +5705,9 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
- if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6G_REG_RULES ||
- num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6G_REG_RULES ||
- num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] > MAX_6G_REG_RULES) {
+ if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES ||
+ num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6GHZ_REG_RULES ||
+ num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] > MAX_6GHZ_REG_RULES) {
ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
i);
kfree(tb);
@@ -5933,30 +6623,62 @@ static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
wake_up(&ab->wmi_ab.tx_credits_wq);
}
-static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
- struct sk_buff *skb)
+static int ath12k_reg_11d_new_cc_event(struct ath12k_base *ab, struct sk_buff *skb)
{
- dev_kfree_skb(skb);
-}
+ const struct wmi_11d_new_cc_event *ev;
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ const void **tb;
+ int ret, i;
-static bool ath12k_reg_is_world_alpha(char *alpha)
-{
- if (alpha[0] == '0' && alpha[1] == '0')
- return true;
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
- if (alpha[0] == 'n' && alpha[1] == 'a')
- return true;
+ ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT];
+ if (!ev) {
+ kfree(tb);
+ ath12k_warn(ab, "failed to fetch 11d new cc ev");
+ return -EPROTO;
+ }
- return false;
+ spin_lock_bh(&ab->base_lock);
+ memcpy(&ab->new_alpha2, &ev->new_alpha2, REG_ALPHA2_LEN);
+ spin_unlock_bh(&ab->base_lock);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi 11d new cc %c%c\n",
+ ab->new_alpha2[0],
+ ab->new_alpha2[1]);
+
+ kfree(tb);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ ar->state_11d = ATH12K_11D_IDLE;
+ ar->ah->regd_updated = false;
+ complete(&ar->completed_11d_scan);
+ }
+
+ queue_work(ab->workqueue, &ab->update_11d_work);
+
+ return 0;
+}
+
+static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb(skb);
}
static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
{
- struct ath12k_reg_info *reg_info = NULL;
- struct ieee80211_regdomain *regd = NULL;
- bool intersect = false;
- int ret = 0, pdev_idx, i, j;
- struct ath12k *ar;
+ struct ath12k_reg_info *reg_info;
+ u8 pdev_idx;
+ int ret;
reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
if (!reg_info) {
@@ -5965,86 +6687,52 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk
}
ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
-
if (ret) {
ath12k_warn(ab, "failed to extract regulatory info from received event\n");
- goto fallback;
+ goto mem_free;
}
- if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
- /* In case of failure to set the requested ctry,
- * fw retains the current regd. We print a failure info
- * and return from here.
+ ret = ath12k_reg_validate_reg_info(ab, reg_info);
+ if (ret == ATH12K_REG_STATUS_FALLBACK) {
+ ath12k_warn(ab, "failed to validate reg info %d\n", ret);
+ /* firmware has successfully switches to new regd but host can not
+ * continue, so free reginfo and fallback to old regd
*/
- ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
+ goto mem_free;
+ } else if (ret == ATH12K_REG_STATUS_DROP) {
+ /* reg info is valid but we will not store it and
+ * not going to create new regd for it
+ */
+ ret = ATH12K_REG_STATUS_VALID;
goto mem_free;
}
+ /* free old reg_info if it exist */
pdev_idx = reg_info->phy_id;
-
- if (pdev_idx >= ab->num_radios) {
- /* Process the event for phy0 only if single_pdev_only
- * is true. If pdev_idx is valid but not 0, discard the
- * event. Otherwise, it goes to fallback.
- */
- if (ab->hw_params->single_pdev_only &&
- pdev_idx < ab->hw_params->num_rxdma_per_pdev)
- goto mem_free;
- else
- goto fallback;
+ if (ab->reg_info[pdev_idx]) {
+ ath12k_reg_reset_reg_info(ab->reg_info[pdev_idx]);
+ kfree(ab->reg_info[pdev_idx]);
}
-
- /* Avoid multiple overwrites to default regd, during core
- * stop-start after mac registration.
+ /* reg_info is valid, we store it for later use
+ * even below regd build failed
*/
- if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
- !memcmp(ab->default_regd[pdev_idx]->alpha2,
- reg_info->alpha2, 2))
- goto mem_free;
+ ab->reg_info[pdev_idx] = reg_info;
- /* Intersect new rules with default regd if a new country setting was
- * requested, i.e a default regd was already set during initialization
- * and the regd coming from this event has a valid country info.
- */
- if (ab->default_regd[pdev_idx] &&
- !ath12k_reg_is_world_alpha((char *)
- ab->default_regd[pdev_idx]->alpha2) &&
- !ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
- intersect = true;
-
- regd = ath12k_reg_build_regd(ab, reg_info, intersect);
- if (!regd) {
- ath12k_warn(ab, "failed to build regd from reg_info\n");
+ ret = ath12k_reg_handle_chan_list(ab, reg_info, WMI_VDEV_TYPE_UNSPEC,
+ IEEE80211_REG_UNSET_AP);
+ if (ret) {
+ ath12k_warn(ab, "failed to handle chan list %d\n", ret);
goto fallback;
}
- spin_lock(&ab->base_lock);
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
- /* Once mac is registered, ar is valid and all CC events from
- * fw is considered to be received due to user requests
- * currently.
- * Free previously built regd before assigning the newly
- * generated regd to ar. NULL pointer handling will be
- * taken care by kfree itself.
- */
- ar = ab->pdevs[pdev_idx].ar;
- kfree(ab->new_regd[pdev_idx]);
- ab->new_regd[pdev_idx] = regd;
- queue_work(ab->workqueue, &ar->regd_update_work);
- } else {
- /* Multiple events for the same *ar is not expected. But we
- * can still clear any previously stored default_regd if we
- * are receiving this event for the same radio by mistake.
- * NULL pointer handling will be taken care by kfree itself.
- */
- kfree(ab->default_regd[pdev_idx]);
- /* This regd would be applied during mac registration */
- ab->default_regd[pdev_idx] = regd;
- }
- ab->dfs_region = reg_info->dfs_region;
- spin_unlock(&ab->base_lock);
+ goto out;
+
+mem_free:
+ ath12k_reg_reset_reg_info(reg_info);
+ kfree(reg_info);
- goto mem_free;
+ if (ret == ATH12K_REG_STATUS_VALID)
+ return ret;
fallback:
/* Fallback to older reg (by sending previous country setting
@@ -6056,20 +6744,8 @@ fallback:
*/
/* TODO: This is rare, but still should also be handled */
WARN_ON(1);
-mem_free:
- if (reg_info) {
- kfree(reg_info->reg_rules_2g_ptr);
- kfree(reg_info->reg_rules_5g_ptr);
- if (reg_info->is_ext_reg_event) {
- for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
- kfree(reg_info->reg_rules_6g_ap_ptr[i]);
-
- for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
- for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
- kfree(reg_info->reg_rules_6g_client_ptr[j][i]);
- }
- kfree(reg_info);
- }
+
+out:
return ret;
}
@@ -6225,13 +6901,14 @@ static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff
ar->last_wmi_vdev_start_status = 0;
status = le32_to_cpu(vdev_start_resp.status);
-
if (WARN_ON_ONCE(status)) {
ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
status, ath12k_wmi_vdev_resp_print(status));
ar->last_wmi_vdev_start_status = status;
}
+ ar->max_allowed_tx_power = (s8)le32_to_cpu(vdev_start_resp.max_allowed_tx_power);
+
complete(&ar->vdev_setup_done);
rcu_read_unlock();
@@ -6317,13 +6994,13 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
- if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ &&
- rx_ev.chan_freq <= ATH12K_MAX_6G_FREQ) {
+ if (rx_ev.chan_freq >= ATH12K_MIN_6GHZ_FREQ &&
+ rx_ev.chan_freq <= ATH12K_MAX_6GHZ_FREQ) {
status->band = NL80211_BAND_6GHZ;
status->freq = rx_ev.chan_freq;
} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
status->band = NL80211_BAND_2GHZ;
- } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
+ } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5GHZ_CHAN) {
status->band = NL80211_BAND_5GHZ;
} else {
/* Shouldn't happen unless list of advertised channels to
@@ -7216,7 +7893,7 @@ void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
else
buf[len] = 0;
- ath12k_debugfs_fw_stats_reset(ar);
+ ath12k_fw_stats_reset(ar);
}
static void
@@ -7335,7 +8012,7 @@ static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
u16 len)
{
const struct wmi_stats_event *ev = parse->ev;
- struct ath12k_fw_stats stats = {0};
+ struct ath12k_fw_stats *stats = parse->stats;
struct ath12k *ar;
struct ath12k_link_vif *arvif;
struct ieee80211_sta *sta;
@@ -7344,18 +8021,18 @@ static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
int i, ret = 0;
const void *data = ptr;
- INIT_LIST_HEAD(&stats.vdevs);
- INIT_LIST_HEAD(&stats.bcn);
- INIT_LIST_HEAD(&stats.pdevs);
-
if (!ev) {
ath12k_warn(ab, "failed to fetch update stats ev");
return -EPROTO;
}
+ if (!stats)
+ return -EINVAL;
+
rcu_read_lock();
- ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
+ stats->pdev_id = le32_to_cpu(ev->pdev_id);
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id);
if (!ar) {
ath12k_warn(ab, "invalid pdev id %d in update stats event\n",
le32_to_cpu(ev->pdev_id));
@@ -7398,8 +8075,8 @@ static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
if (!dst)
continue;
ath12k_wmi_pull_vdev_stats(src, dst);
- stats.stats_id = WMI_REQUEST_VDEV_STAT;
- list_add_tail(&dst->list, &stats.vdevs);
+ stats->stats_id = WMI_REQUEST_VDEV_STAT;
+ list_add_tail(&dst->list, &stats->vdevs);
}
for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) {
const struct ath12k_wmi_bcn_stats_params *src;
@@ -7417,8 +8094,8 @@ static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
if (!dst)
continue;
ath12k_wmi_pull_bcn_stats(src, dst);
- stats.stats_id = WMI_REQUEST_BCN_STAT;
- list_add_tail(&dst->list, &stats.bcn);
+ stats->stats_id = WMI_REQUEST_BCN_STAT;
+ list_add_tail(&dst->list, &stats->bcn);
}
for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) {
const struct ath12k_wmi_pdev_stats_params *src;
@@ -7430,7 +8107,7 @@ static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
goto exit;
}
- stats.stats_id = WMI_REQUEST_PDEV_STAT;
+ stats->stats_id = WMI_REQUEST_PDEV_STAT;
data += sizeof(*src);
len -= sizeof(*src);
@@ -7442,11 +8119,9 @@ static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
ath12k_wmi_pull_pdev_stats_base(&src->base, dst);
ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst);
ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst);
- list_add_tail(&dst->list, &stats.pdevs);
+ list_add_tail(&dst->list, &stats->pdevs);
}
- complete(&ar->fw_stats_complete);
- ath12k_debugfs_fw_stats_process(ar, &stats);
exit:
rcu_read_unlock();
return ret;
@@ -7472,16 +8147,128 @@ static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
return ret;
}
+static int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb,
+ struct ath12k_fw_stats *stats)
+{
+ struct wmi_tlv_fw_stats_parse parse = {};
+
+ stats->stats_id = 0;
+ parse.stats = stats;
+
+ return ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_tlv_fw_stats_parse,
+ &parse);
+}
+
+static void ath12k_wmi_fw_stats_process(struct ath12k *ar,
+ struct ath12k_fw_stats *stats)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev *pdev;
+ bool is_end = true;
+ size_t total_vdevs_started = 0;
+ int i;
+
+ if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats->vdevs)) {
+ ath12k_warn(ab, "empty vdev stats");
+ return;
+ }
+ /* FW sends all the active VDEV stats irrespective of PDEV,
+ * hence limit until the count of all VDEVs started
+ */
+ rcu_read_lock();
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar)
+ total_vdevs_started += pdev->ar->num_started_vdevs;
+ }
+ rcu_read_unlock();
+
+ if (total_vdevs_started)
+ is_end = ((++ar->fw_stats.num_vdev_recvd) ==
+ total_vdevs_started);
+
+ list_splice_tail_init(&stats->vdevs,
+ &ar->fw_stats.vdevs);
+
+ if (is_end)
+ complete(&ar->fw_stats_done);
+
+ return;
+ }
+
+ if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
+ if (list_empty(&stats->bcn)) {
+ ath12k_warn(ab, "empty beacon stats");
+ return;
+ }
+ /* Mark end until we reached the count of all started VDEVs
+ * within the PDEV
+ */
+ if (ar->num_started_vdevs)
+ is_end = ((++ar->fw_stats.num_bcn_recvd) ==
+ ar->num_started_vdevs);
+
+ list_splice_tail_init(&stats->bcn,
+ &ar->fw_stats.bcn);
+
+ if (is_end)
+ complete(&ar->fw_stats_done);
+ }
+}
+
static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
{
+ struct ath12k_fw_stats stats = {};
+ struct ath12k *ar;
int ret;
- struct wmi_tlv_fw_stats_parse parse = {};
- ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
- ath12k_wmi_tlv_fw_stats_parse,
- &parse);
- if (ret)
- ath12k_warn(ab, "failed to parse fw stats %d\n", ret);
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.bcn);
+
+ ret = ath12k_wmi_pull_fw_stats(ab, skb, &stats);
+ if (ret) {
+ ath12k_warn(ab, "failed to pull fw stats: %d\n", ret);
+ goto free;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "event update stats");
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
+ if (!ar) {
+ rcu_read_unlock();
+ ath12k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
+ stats.pdev_id, ret);
+ goto free;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ /* Handle WMI_REQUEST_PDEV_STAT status update */
+ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
+ list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
+ complete(&ar->fw_stats_done);
+ goto complete;
+ }
+
+ /* Handle WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT updates. */
+ ath12k_wmi_fw_stats_process(ar, &stats);
+
+complete:
+ complete(&ar->fw_stats_complete);
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+
+ /* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
+ * at this point, no need to free the individual list.
+ */
+ return;
+
+free:
+ ath12k_fw_stats_free(&stats);
}
/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
@@ -8640,6 +9427,9 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_HALPHY_STATS_CTRL_PATH_EVENTID:
ath12k_wmi_process_tpc_stats(ab, skb);
break;
+ case WMI_11D_NEW_COUNTRY_EVENTID:
+ ath12k_reg_11d_new_cc_event(ab, skb);
+ break;
/* add Unsupported events (rare) here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
@@ -9643,3 +10433,290 @@ int ath12k_wmi_mlo_teardown(struct ath12k *ar)
return 0;
}
+
+bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar)
+{
+ return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
+ ar->ab->wmi_ab.svc_map) && ar->supports_6ghz;
+}
+
+int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
+ u32 vdev_id,
+ struct ath12k_reg_tpc_power_info *param)
+{
+ struct wmi_vdev_set_tpc_power_cmd *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_ch_power_params *ch;
+ int i, ret, len, array_len;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ u8 *ptr;
+
+ array_len = sizeof(*ch) * param->num_pwr_levels;
+ len = sizeof(*cmd) + TLV_HDR_SIZE + array_len;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_TPC_POWER_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->psd_power = cpu_to_le32(param->is_psd_power);
+ cmd->eirp_power = cpu_to_le32(param->eirp_power);
+ cmd->power_type_6ghz = cpu_to_le32(param->ap_power_type);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n",
+ vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type);
+
+ ptr += sizeof(*cmd);
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, array_len);
+
+ ptr += TLV_HDR_SIZE;
+ ch = (struct wmi_vdev_ch_power_params *)ptr;
+
+ for (i = 0; i < param->num_pwr_levels; i++, ch++) {
+ ch->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CH_POWER_INFO,
+ sizeof(*ch));
+ ch->chan_cfreq = cpu_to_le32(param->chan_power_info[i].chan_cfreq);
+ ch->tx_power = cpu_to_le32(param->chan_power_info[i].tx_power);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc chan freq %d TX power %d\n",
+ ch->chan_cfreq, ch->tx_power);
+ }
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ath12k_wmi_fill_disallowed_bmap(struct ath12k_base *ab,
+ struct wmi_disallowed_mlo_mode_bitmap_params *dislw_bmap,
+ struct wmi_mlo_link_set_active_arg *arg)
+{
+ struct wmi_ml_disallow_mode_bmap_arg *dislw_bmap_arg;
+ u8 i;
+
+ if (arg->num_disallow_mode_comb >
+ ARRAY_SIZE(arg->disallow_bmap)) {
+ ath12k_warn(ab, "invalid num_disallow_mode_comb: %d",
+ arg->num_disallow_mode_comb);
+ return -EINVAL;
+ }
+
+ dislw_bmap_arg = &arg->disallow_bmap[0];
+ for (i = 0; i < arg->num_disallow_mode_comb; i++) {
+ dislw_bmap->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(0, sizeof(*dislw_bmap));
+ dislw_bmap->disallowed_mode_bitmap =
+ cpu_to_le32(dislw_bmap_arg->disallowed_mode);
+ dislw_bmap->ieee_link_id_comb =
+ le32_encode_bits(dislw_bmap_arg->ieee_link_id[0],
+ WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_1) |
+ le32_encode_bits(dislw_bmap_arg->ieee_link_id[1],
+ WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_2) |
+ le32_encode_bits(dislw_bmap_arg->ieee_link_id[2],
+ WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_3) |
+ le32_encode_bits(dislw_bmap_arg->ieee_link_id[3],
+ WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_4);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "entry %d disallowed_mode %d ieee_link_id_comb 0x%x",
+ i, dislw_bmap_arg->disallowed_mode,
+ dislw_bmap_arg->ieee_link_id_comb);
+ dislw_bmap++;
+ dislw_bmap_arg++;
+ }
+
+ return 0;
+}
+
+int ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base *ab,
+ struct wmi_mlo_link_set_active_arg *arg)
+{
+ struct wmi_disallowed_mlo_mode_bitmap_params *disallowed_mode_bmap;
+ struct wmi_mlo_set_active_link_number_params *link_num_param;
+ u32 num_link_num_param = 0, num_vdev_bitmap = 0;
+ struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+ struct wmi_mlo_link_set_active_cmd *cmd;
+ u32 num_inactive_vdev_bitmap = 0;
+ u32 num_disallow_mode_comb = 0;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ __le32 *vdev_bitmap;
+ void *buf_ptr;
+ int i, ret;
+ u32 len;
+
+ if (!arg->num_vdev_bitmap && !arg->num_link_entry) {
+ ath12k_warn(ab, "Invalid num_vdev_bitmap and num_link_entry");
+ return -EINVAL;
+ }
+
+ switch (arg->force_mode) {
+ case WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM:
+ case WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM:
+ num_link_num_param = arg->num_link_entry;
+ fallthrough;
+ case WMI_MLO_LINK_FORCE_MODE_ACTIVE:
+ case WMI_MLO_LINK_FORCE_MODE_INACTIVE:
+ case WMI_MLO_LINK_FORCE_MODE_NO_FORCE:
+ num_vdev_bitmap = arg->num_vdev_bitmap;
+ break;
+ case WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE:
+ num_vdev_bitmap = arg->num_vdev_bitmap;
+ num_inactive_vdev_bitmap = arg->num_inactive_vdev_bitmap;
+ break;
+ default:
+ ath12k_warn(ab, "Invalid force mode: %u", arg->force_mode);
+ return -EINVAL;
+ }
+
+ num_disallow_mode_comb = arg->num_disallow_mode_comb;
+ len = sizeof(*cmd) +
+ TLV_HDR_SIZE + sizeof(*link_num_param) * num_link_num_param +
+ TLV_HDR_SIZE + sizeof(*vdev_bitmap) * num_vdev_bitmap +
+ TLV_HDR_SIZE + TLV_HDR_SIZE + TLV_HDR_SIZE +
+ TLV_HDR_SIZE + sizeof(*disallowed_mode_bmap) * num_disallow_mode_comb;
+ if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE)
+ len += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap;
+
+ skb = ath12k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mlo_link_set_active_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_LINK_SET_ACTIVE_CMD,
+ sizeof(*cmd));
+ cmd->force_mode = cpu_to_le32(arg->force_mode);
+ cmd->reason = cpu_to_le32(arg->reason);
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "mode %d reason %d num_link_num_param %d num_vdev_bitmap %d inactive %d num_disallow_mode_comb %d",
+ arg->force_mode, arg->reason, num_link_num_param,
+ num_vdev_bitmap, num_inactive_vdev_bitmap,
+ num_disallow_mode_comb);
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*link_num_param) * num_link_num_param);
+ buf_ptr += TLV_HDR_SIZE;
+
+ if (num_link_num_param) {
+ cmd->ctrl_flags =
+ le32_encode_bits(arg->ctrl_flags.dync_force_link_num ? 1 : 0,
+ CRTL_F_DYNC_FORCE_LINK_NUM);
+
+ link_num_param = buf_ptr;
+ for (i = 0; i < num_link_num_param; i++) {
+ link_num_param->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(0, sizeof(*link_num_param));
+ link_num_param->num_of_link =
+ cpu_to_le32(arg->link_num[i].num_of_link);
+ link_num_param->vdev_type =
+ cpu_to_le32(arg->link_num[i].vdev_type);
+ link_num_param->vdev_subtype =
+ cpu_to_le32(arg->link_num[i].vdev_subtype);
+ link_num_param->home_freq =
+ cpu_to_le32(arg->link_num[i].home_freq);
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "entry %d num_of_link %d vdev type %d subtype %d freq %d control_flags %d",
+ i, arg->link_num[i].num_of_link,
+ arg->link_num[i].vdev_type,
+ arg->link_num[i].vdev_subtype,
+ arg->link_num[i].home_freq,
+ __le32_to_cpu(cmd->ctrl_flags));
+ link_num_param++;
+ }
+
+ buf_ptr += sizeof(*link_num_param) * num_link_num_param;
+ }
+
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32,
+ sizeof(*vdev_bitmap) * num_vdev_bitmap);
+ buf_ptr += TLV_HDR_SIZE;
+
+ if (num_vdev_bitmap) {
+ vdev_bitmap = buf_ptr;
+ for (i = 0; i < num_vdev_bitmap; i++) {
+ vdev_bitmap[i] = cpu_to_le32(arg->vdev_bitmap[i]);
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "entry %d vdev_id_bitmap 0x%x",
+ i, arg->vdev_bitmap[i]);
+ }
+
+ buf_ptr += sizeof(*vdev_bitmap) * num_vdev_bitmap;
+ }
+
+ if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) {
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32,
+ sizeof(*vdev_bitmap) *
+ num_inactive_vdev_bitmap);
+ buf_ptr += TLV_HDR_SIZE;
+
+ if (num_inactive_vdev_bitmap) {
+ vdev_bitmap = buf_ptr;
+ for (i = 0; i < num_inactive_vdev_bitmap; i++) {
+ vdev_bitmap[i] =
+ cpu_to_le32(arg->inactive_vdev_bitmap[i]);
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "entry %d inactive_vdev_id_bitmap 0x%x",
+ i, arg->inactive_vdev_bitmap[i]);
+ }
+
+ buf_ptr += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap;
+ }
+ } else {
+ /* add empty vdev bitmap2 tlv */
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+ buf_ptr += TLV_HDR_SIZE;
+ }
+
+ /* add empty ieee_link_id_bitmap tlv */
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+ buf_ptr += TLV_HDR_SIZE;
+
+ /* add empty ieee_link_id_bitmap2 tlv */
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+ buf_ptr += TLV_HDR_SIZE;
+
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*disallowed_mode_bmap) *
+ arg->num_disallow_mode_comb);
+ buf_ptr += TLV_HDR_SIZE;
+
+ ret = ath12k_wmi_fill_disallowed_bmap(ab, buf_ptr, arg);
+ if (ret)
+ goto free_skb;
+
+ ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_MLO_LINK_SET_ACTIVE_CMDID);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to send WMI_MLO_LINK_SET_ACTIVE_CMDID: %d\n", ret);
+ goto free_skb;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "WMI mlo link set active cmd");
+
+ return ret;
+
+free_skb:
+ dev_kfree_skb(skb);
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 1ba33e30ddd2..c640ffa180c8 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -26,6 +26,7 @@ struct ath12k_base;
struct ath12k;
struct ath12k_link_vif;
struct ath12k_fw_stats;
+struct ath12k_reg_tpc_power_info;
/* There is no signed version of __le32, so for a temporary solution come
* up with our own version. The idea is from fs/ntfs/endian.h.
@@ -216,9 +217,9 @@ enum wmi_host_hw_mode_priority {
};
enum WMI_HOST_WLAN_BAND {
- WMI_HOST_WLAN_2G_CAP = 1,
- WMI_HOST_WLAN_5G_CAP = 2,
- WMI_HOST_WLAN_2G_5G_CAP = 3,
+ WMI_HOST_WLAN_2GHZ_CAP = 1,
+ WMI_HOST_WLAN_5GHZ_CAP = 2,
+ WMI_HOST_WLAN_2GHZ_5GHZ_CAP = 3,
};
enum wmi_cmd_group {
@@ -386,6 +387,22 @@ enum wmi_tlv_cmd_id {
WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID,
+ WMI_VDEV_SET_ARP_STAT_CMDID,
+ WMI_VDEV_GET_ARP_STAT_CMDID,
+ WMI_VDEV_GET_TX_POWER_CMDID,
+ WMI_VDEV_LIMIT_OFFCHAN_CMDID,
+ WMI_VDEV_SET_CUSTOM_SW_RETRY_TH_CMDID,
+ WMI_VDEV_CHAINMASK_CONFIG_CMDID,
+ WMI_VDEV_GET_BCN_RECEPTION_STATS_CMDID,
+ WMI_VDEV_GET_MWS_COEX_INFO_CMDID,
+ WMI_VDEV_DELETE_ALL_PEER_CMDID,
+ WMI_VDEV_BSS_MAX_IDLE_TIME_CMDID,
+ WMI_VDEV_AUDIO_SYNC_TRIGGER_CMDID,
+ WMI_VDEV_AUDIO_SYNC_QTIMER_CMDID,
+ WMI_VDEV_SET_PCL_CMDID,
+ WMI_VDEV_GET_BIG_DATA_CMDID,
+ WMI_VDEV_GET_BIG_DATA_P2_CMDID,
+ WMI_VDEV_SET_TPC_POWER_CMDID,
WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER),
WMI_PEER_DELETE_CMDID,
WMI_PEER_FLUSH_TIDS_CMDID,
@@ -1955,6 +1972,9 @@ enum wmi_tlv_tag {
WMI_TAG_TPC_STATS_REG_PWR_ALLOWED,
WMI_TAG_TPC_STATS_RATES_ARRAY,
WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT,
+ WMI_TAG_VDEV_SET_TPC_POWER_CMD = 0x3B5,
+ WMI_TAG_VDEV_CH_POWER_INFO,
+ WMI_TAG_MLO_LINK_SET_ACTIVE_CMD = 0x3BE,
WMI_TAG_EHT_RATE_SET = 0x3C4,
WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5,
WMI_TAG_MLO_TX_SEND_PARAMS,
@@ -2201,6 +2221,8 @@ enum wmi_tlv_service {
WMI_MAX_EXT_SERVICE = 256,
+ WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT = 280,
+
WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT = 281,
WMI_TLV_SERVICE_11BE = 289,
@@ -2461,6 +2483,7 @@ struct wmi_init_cmd {
} __packed;
#define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4
+#define WMI_RSRC_CFG_HOST_SVC_FLAG_REO_QREF_SUPPORT_BIT 12
#define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4)
#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5)
#define WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET BIT(9)
@@ -2595,6 +2618,8 @@ struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params {
__le32 num_chainmask_tables;
} __packed;
+#define WMI_HW_MODE_CAP_CFG_TYPE GENMASK(27, 0)
+
struct ath12k_wmi_hw_mode_cap_params {
__le32 tlv_header;
__le32 hw_mode_id;
@@ -2644,6 +2669,12 @@ struct ath12k_wmi_mac_phy_caps_params {
__le32 he_cap_info_2g_ext;
__le32 he_cap_info_5g_ext;
__le32 he_cap_info_internal;
+ __le32 wireless_modes;
+ __le32 low_2ghz_chan_freq;
+ __le32 high_2ghz_chan_freq;
+ __le32 low_5ghz_chan_freq;
+ __le32 high_5ghz_chan_freq;
+ __le32 nss_ratio;
} __packed;
struct ath12k_wmi_hal_reg_caps_ext_params {
@@ -2690,8 +2721,8 @@ enum wmi_channel_width {
* 2 - index for 160 MHz, first 3 bytes valid
* 3 - index for 320 MHz, first 3 bytes valid
*/
-#define WMI_MAX_EHT_SUPP_MCS_2G_SIZE 2
-#define WMI_MAX_EHT_SUPP_MCS_5G_SIZE 4
+#define WMI_MAX_EHT_SUPP_MCS_2GHZ_SIZE 2
+#define WMI_MAX_EHT_SUPP_MCS_5GHZ_SIZE 4
#define WMI_EHTCAP_TXRX_MCS_NSS_IDX_80 0
#define WMI_EHTCAP_TXRX_MCS_NSS_IDX_160 1
@@ -2717,6 +2748,11 @@ struct wmi_service_ready_ext2_event {
__le32 default_num_msduq_supported_per_tid;
} __packed;
+struct ath12k_wmi_dbs_or_sbs_cap_params {
+ __le32 hw_mode_id;
+ __le32 sbs_lower_band_end_freq;
+} __packed;
+
struct ath12k_wmi_caps_ext_params {
__le32 hw_mode_id;
__le32 pdev_and_hw_link_ids;
@@ -2730,8 +2766,8 @@ struct ath12k_wmi_caps_ext_params {
struct ath12k_wmi_ppe_threshold_params eht_ppet_2ghz;
struct ath12k_wmi_ppe_threshold_params eht_ppet_5ghz;
__le32 eht_cap_info_internal;
- __le32 eht_supp_mcs_ext_2ghz[WMI_MAX_EHT_SUPP_MCS_2G_SIZE];
- __le32 eht_supp_mcs_ext_5ghz[WMI_MAX_EHT_SUPP_MCS_5G_SIZE];
+ __le32 eht_supp_mcs_ext_2ghz[WMI_MAX_EHT_SUPP_MCS_2GHZ_SIZE];
+ __le32 eht_supp_mcs_ext_5ghz[WMI_MAX_EHT_SUPP_MCS_5GHZ_SIZE];
__le32 eml_capability;
__le32 mld_capability;
} __packed;
@@ -3754,6 +3790,7 @@ struct peer_assoc_mlo_params {
u32 ieee_link_id;
u8 num_partner_links;
struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
+ u16 eml_cap;
};
struct wmi_rate_set_arg {
@@ -3832,6 +3869,7 @@ struct ath12k_wmi_peer_assoc_arg {
u32 punct_bitmap;
bool is_assoc;
struct peer_assoc_mlo_params ml;
+ bool eht_disable_mcs15;
};
#define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0)
@@ -4026,6 +4064,28 @@ struct wmi_init_country_cmd {
} cc_info;
} __packed;
+struct wmi_11d_scan_start_arg {
+ u32 vdev_id;
+ u32 scan_period_msec;
+ u32 start_interval_msec;
+};
+
+struct wmi_11d_scan_start_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 scan_period_msec;
+ __le32 start_interval_msec;
+} __packed;
+
+struct wmi_11d_scan_stop_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_11d_new_cc_event {
+ __le32 new_alpha2;
+} __packed;
+
struct wmi_delba_send_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -4108,7 +4168,17 @@ struct ath12k_wmi_eht_rate_set_params {
#define MAX_REG_RULES 10
#define REG_ALPHA2_LEN 2
-#define MAX_6G_REG_RULES 5
+#define MAX_6GHZ_REG_RULES 5
+
+struct wmi_set_current_country_arg {
+ u8 alpha2[REG_ALPHA2_LEN];
+};
+
+struct wmi_set_current_country_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 new_alpha2;
+} __packed;
enum wmi_start_event_param {
WMI_VDEV_START_RESP_EVENT = 0,
@@ -4129,6 +4199,7 @@ struct wmi_vdev_start_resp_event {
};
__le32 cfgd_tx_streams;
__le32 cfgd_rx_streams;
+ __le32 max_allowed_tx_power;
} __packed;
/* VDEV start response status codes */
@@ -4474,6 +4545,7 @@ struct ath12k_wmi_target_cap_arg {
};
enum wmi_vdev_type {
+ WMI_VDEV_TYPE_UNSPEC = 0,
WMI_VDEV_TYPE_AP = 1,
WMI_VDEV_TYPE_STA = 2,
WMI_VDEV_TYPE_IBSS = 3,
@@ -4991,6 +5063,53 @@ struct ath12k_wmi_pdev {
u32 rx_decap_mode;
};
+struct ath12k_hw_mode_freq_range_arg {
+ u32 low_2ghz_freq;
+ u32 high_2ghz_freq;
+ u32 low_5ghz_freq;
+ u32 high_5ghz_freq;
+};
+
+struct ath12k_svc_ext_mac_phy_info {
+ enum wmi_host_hw_mode_config_type hw_mode_config_type;
+ u32 phy_id;
+ u32 supported_bands;
+ struct ath12k_hw_mode_freq_range_arg hw_freq_range;
+};
+
+#define ATH12K_MAX_MAC_PHY_CAP 8
+
+struct ath12k_svc_ext_info {
+ u32 num_hw_modes;
+ struct ath12k_svc_ext_mac_phy_info mac_phy_info[ATH12K_MAX_MAC_PHY_CAP];
+};
+
+/**
+ * enum ath12k_hw_mode - enum for host mode
+ * @ATH12K_HW_MODE_SMM: Single mac mode
+ * @ATH12K_HW_MODE_DBS: DBS mode
+ * @ATH12K_HW_MODE_SBS: SBS mode with either high share or low share
+ * @ATH12K_HW_MODE_SBS_UPPER_SHARE: Higher 5 GHz shared with 2.4 GHz
+ * @ATH12K_HW_MODE_SBS_LOWER_SHARE: Lower 5 GHz shared with 2.4 GHz
+ * @ATH12K_HW_MODE_MAX: Max, used to indicate invalid mode
+ */
+enum ath12k_hw_mode {
+ ATH12K_HW_MODE_SMM,
+ ATH12K_HW_MODE_DBS,
+ ATH12K_HW_MODE_SBS,
+ ATH12K_HW_MODE_SBS_UPPER_SHARE,
+ ATH12K_HW_MODE_SBS_LOWER_SHARE,
+ ATH12K_HW_MODE_MAX,
+};
+
+struct ath12k_hw_mode_info {
+ bool support_dbs:1;
+ bool support_sbs:1;
+
+ struct ath12k_hw_mode_freq_range_arg freq_range_caps[ATH12K_HW_MODE_MAX]
+ [MAX_RADIOS];
+};
+
struct ath12k_wmi_base {
struct ath12k_base *ab;
struct ath12k_wmi_pdev wmi[MAX_RADIOS];
@@ -5008,6 +5127,10 @@ struct ath12k_wmi_base {
enum wmi_host_hw_mode_config_type preferred_hw_mode;
struct ath12k_wmi_target_cap_arg *targ_cap;
+
+ struct ath12k_svc_ext_info svc_ext_info;
+ u32 sbs_lower_band_end_freq;
+ struct ath12k_hw_mode_info hw_mode_info;
};
struct wmi_pdev_set_bios_interface_cmd {
@@ -5904,6 +6027,153 @@ struct wmi_tpc_stats_arg {
struct wmi_tpc_ctl_pwr_table_arg ctl_array;
};
+struct wmi_vdev_ch_power_params {
+ __le32 tlv_header;
+
+ /* Channel center frequency (MHz) */
+ __le32 chan_cfreq;
+
+ /* Unit: dBm, either PSD/EIRP power for this frequency or
+ * incremental for non-PSD BW
+ */
+ __le32 tx_power;
+} __packed;
+
+struct wmi_vdev_set_tpc_power_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+
+ /* Value: 0 or 1, is PSD power or not */
+ __le32 psd_power;
+
+ /* Maximum EIRP power (dBm units), valid only if power is PSD */
+ __le32 eirp_power;
+
+ /* Type: WMI_6GHZ_REG_TYPE, used for halphy CTL lookup */
+ __le32 power_type_6ghz;
+
+ /* This fixed_param TLV is followed by the below TLVs:
+ * num_pwr_levels of wmi_vdev_ch_power_info
+ * For PSD power, it is the PSD/EIRP power of the frequency (20 MHz chunks).
+ * For non-PSD power, the power values are for 20, 40, and till
+ * BSS BW power levels.
+ * The num_pwr_levels will be checked by sw how many elements present
+ * in the variable-length array.
+ */
+} __packed;
+
+#define CRTL_F_DYNC_FORCE_LINK_NUM GENMASK(3, 2)
+
+struct wmi_mlo_link_set_active_cmd {
+ __le32 tlv_header;
+ __le32 force_mode;
+ __le32 reason;
+ __le32 use_ieee_link_id_bitmap;
+ struct ath12k_wmi_mac_addr_params ap_mld_mac_addr;
+ __le32 ctrl_flags;
+} __packed;
+
+struct wmi_mlo_set_active_link_number_params {
+ __le32 tlv_header;
+ __le32 num_of_link;
+ __le32 vdev_type;
+ __le32 vdev_subtype;
+ __le32 home_freq;
+} __packed;
+
+#define WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_1 GENMASK(7, 0)
+#define WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_2 GENMASK(15, 8)
+#define WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_3 GENMASK(23, 16)
+#define WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_4 GENMASK(31, 24)
+
+struct wmi_disallowed_mlo_mode_bitmap_params {
+ __le32 tlv_header;
+ __le32 disallowed_mode_bitmap;
+ __le32 ieee_link_id_comb;
+} __packed;
+
+enum wmi_mlo_link_force_mode {
+ WMI_MLO_LINK_FORCE_MODE_ACTIVE = 1,
+ WMI_MLO_LINK_FORCE_MODE_INACTIVE = 2,
+ WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM = 3,
+ WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM = 4,
+ WMI_MLO_LINK_FORCE_MODE_NO_FORCE = 5,
+ WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE = 6,
+ WMI_MLO_LINK_FORCE_MODE_NON_FORCE_UPDATE = 7,
+};
+
+enum wmi_mlo_link_force_reason {
+ WMI_MLO_LINK_FORCE_REASON_NEW_CONNECT = 1,
+ WMI_MLO_LINK_FORCE_REASON_NEW_DISCONNECT = 2,
+ WMI_MLO_LINK_FORCE_REASON_LINK_REMOVAL = 3,
+ WMI_MLO_LINK_FORCE_REASON_TDLS = 4,
+ WMI_MLO_LINK_FORCE_REASON_REVERT_FAILURE = 5,
+ WMI_MLO_LINK_FORCE_REASON_LINK_DELETE = 6,
+ WMI_MLO_LINK_FORCE_REASON_SINGLE_LINK_EMLSR_OP = 7,
+};
+
+struct wmi_mlo_link_num_arg {
+ u32 num_of_link;
+ u32 vdev_type;
+ u32 vdev_subtype;
+ u32 home_freq;
+};
+
+struct wmi_mlo_control_flags_arg {
+ bool overwrite_force_active_bitmap;
+ bool overwrite_force_inactive_bitmap;
+ bool dync_force_link_num;
+ bool post_re_evaluate;
+ u8 post_re_evaluate_loops;
+ bool dont_reschedule_workqueue;
+};
+
+struct wmi_ml_link_force_cmd_arg {
+ u8 ap_mld_mac_addr[ETH_ALEN];
+ u16 ieee_link_id_bitmap;
+ u16 ieee_link_id_bitmap2;
+ u8 link_num;
+};
+
+struct wmi_ml_disallow_mode_bmap_arg {
+ u32 disallowed_mode;
+ union {
+ u32 ieee_link_id_comb;
+ u8 ieee_link_id[4];
+ };
+};
+
+/* maximum size of link number param array
+ * for MLO link set active command
+ */
+#define WMI_MLO_LINK_NUM_SZ 2
+
+/* maximum size of vdev bitmap array for
+ * MLO link set active command
+ */
+#define WMI_MLO_VDEV_BITMAP_SZ 2
+
+/* Max number of disallowed bitmap combination
+ * sent to firmware
+ */
+#define WMI_ML_MAX_DISALLOW_BMAP_COMB 4
+
+struct wmi_mlo_link_set_active_arg {
+ enum wmi_mlo_link_force_mode force_mode;
+ enum wmi_mlo_link_force_reason reason;
+ u32 num_link_entry;
+ u32 num_vdev_bitmap;
+ u32 num_inactive_vdev_bitmap;
+ struct wmi_mlo_link_num_arg link_num[WMI_MLO_LINK_NUM_SZ];
+ u32 vdev_bitmap[WMI_MLO_VDEV_BITMAP_SZ];
+ u32 inactive_vdev_bitmap[WMI_MLO_VDEV_BITMAP_SZ];
+ struct wmi_mlo_control_flags_arg ctrl_flags;
+ bool use_ieee_link_id;
+ struct wmi_ml_link_force_cmd_arg force_cmd;
+ u32 num_disallow_mode_comb;
+ struct wmi_ml_disallow_mode_bmap_arg disallow_bmap[WMI_ML_MAX_DISALLOW_BMAP_COMB];
+};
+
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -5990,11 +6260,17 @@ int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
u32 vdev_id, u32 bcn_ctrl_op);
int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
struct ath12k_wmi_init_country_arg *arg);
+int
+ath12k_wmi_send_set_current_country_cmd(struct ath12k *ar,
+ struct wmi_set_current_country_arg *arg);
int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
int vdev_id, const u8 *addr,
dma_addr_t paddr, u8 tid,
u8 ba_window_size_valid,
u32 ba_window_size);
+int ath12k_wmi_send_11d_scan_start_cmd(struct ath12k *ar,
+ struct wmi_11d_scan_start_arg *arg);
+int ath12k_wmi_send_11d_scan_stop_cmd(struct ath12k *ar, u32 vdev_id);
int
ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
struct ath12k_wmi_rx_reorder_queue_remove_arg *arg);
@@ -6092,5 +6368,10 @@ int ath12k_wmi_mlo_teardown(struct ath12k *ar);
void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
struct ath12k_fw_stats *fw_stats, u32 stats_id,
char *buf);
-
+bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar);
+int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
+ u32 vdev_id,
+ struct ath12k_reg_tpc_power_info *param);
+int ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base *ab,
+ struct wmi_mlo_link_set_active_arg *param);
#endif
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
index af98e871199d..5a9e93fd1ef4 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.c
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -87,7 +87,9 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
* We need to do some backwards compatibility to make this work.
*/
if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
- WARN_ON(1);
+ ath6kl_err("mismatched byte count %d vs. expected %zd\n",
+ le32_to_cpu(targ_info->byte_count),
+ sizeof(*targ_info));
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 867089a3c096..d62b96459751 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -503,7 +503,7 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
void disconnect_timer_handler(struct timer_list *t)
{
- struct ath6kl_vif *vif = from_timer(vif, t, disconnect_timer);
+ struct ath6kl_vif *vif = timer_container_of(vif, t, disconnect_timer);
ath6kl_init_profile_info(vif);
ath6kl_disconnect(vif);
diff --git a/drivers/net/wireless/ath/ath6kl/recovery.c b/drivers/net/wireless/ath/ath6kl/recovery.c
index fd2dceb8b63d..43186c193df1 100644
--- a/drivers/net/wireless/ath/ath6kl/recovery.c
+++ b/drivers/net/wireless/ath/ath6kl/recovery.c
@@ -62,7 +62,7 @@ void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie)
static void ath6kl_recovery_hb_timer(struct timer_list *t)
{
- struct ath6kl *ar = from_timer(ar, t, fw_recovery.hb_timer);
+ struct ath6kl *ar = timer_container_of(ar, t, fw_recovery.hb_timer);
int err;
if (test_bit(RECOVERY_CLEANUP, &ar->flag) ||
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 3a6f0b647e17..c3b06b515c4f 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -1623,7 +1623,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
static void aggr_timeout(struct timer_list *t)
{
u8 i, j;
- struct aggr_info_conn *aggr_conn = from_timer(aggr_conn, t, timer);
+ struct aggr_info_conn *aggr_conn = timer_container_of(aggr_conn, t,
+ timer);
struct rxtid *rxtid;
struct rxtid_stats *stats;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 3787b9fb0075..84317afe4651 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1078,7 +1078,7 @@ static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len,
void ath6kl_wmi_sscan_timer(struct timer_list *t)
{
- struct ath6kl_vif *vif = from_timer(vif, t, sched_scan_timer);
+ struct ath6kl_vif *vif = timer_container_of(vif, t, sched_scan_timer);
cfg80211_sched_scan_results(vif->ar->wiphy, 0);
}
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index d4805e02b927..49b7ab26c477 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -74,7 +74,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
void __iomem *mem;
struct ath_softc *sc;
struct ieee80211_hw *hw;
- struct resource *res;
const struct platform_device_id *id = platform_get_device_id(pdev);
int irq;
int ret = 0;
@@ -86,16 +85,10 @@ static int ath_ahb_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "no memory resource found\n");
- return -ENXIO;
- }
-
- mem = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (mem == NULL) {
+ mem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem)) {
dev_err(&pdev->dev, "ioremap failed\n");
- return -ENOMEM;
+ return PTR_ERR(mem);
}
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 799be0be24f4..121e51ce1bc0 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -1040,7 +1040,7 @@ static void ath_scan_channel_start(struct ath_softc *sc)
static void ath_chanctx_timer(struct timer_list *t)
{
- struct ath_softc *sc = from_timer(sc, t, sched.timer);
+ struct ath_softc *sc = timer_container_of(sc, t, sched.timer);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
ath_dbg(common, CHAN_CTX,
@@ -1051,7 +1051,7 @@ static void ath_chanctx_timer(struct timer_list *t)
static void ath_offchannel_timer(struct timer_list *t)
{
- struct ath_softc *sc = from_timer(sc, t, offchannel.timer);
+ struct ath_softc *sc = timer_container_of(sc, t, offchannel.timer);
struct ath_chanctx *ctx;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 5a26f1d05f04..2dbc7efdd637 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -193,7 +193,7 @@ static void ath_mci_ftp_adjust(struct ath_softc *sc)
*/
static void ath_btcoex_period_timer(struct timer_list *t)
{
- struct ath_softc *sc = from_timer(sc, t, btcoex.period_timer);
+ struct ath_softc *sc = timer_container_of(sc, t, btcoex.period_timer);
struct ath_hw *ah = sc->sc_ah;
struct ath_btcoex *btcoex = &sc->btcoex;
enum ath_stomp_type stomp_type;
@@ -254,7 +254,8 @@ skip_hw_wakeup:
*/
static void ath_btcoex_no_stomp_timer(struct timer_list *t)
{
- struct ath_softc *sc = from_timer(sc, t, btcoex.no_stomp_timer);
+ struct ath_softc *sc = timer_container_of(sc, t,
+ btcoex.no_stomp_timer);
struct ath_hw *ah = sc->sc_ah;
struct ath_btcoex *btcoex = &sc->btcoex;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 547634f82183..81fa7cbad892 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -290,6 +290,9 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
struct ath_common *common = ath9k_hw_common(priv->ah);
int slot;
+ if (!priv->cur_beacon_conf.enable_beacon)
+ return;
+
if (swba->beacon_pending != 0) {
priv->beacon.bmisscnt++;
if (priv->beacon.bmisscnt > BSTUCK_THRESHOLD) {
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index ce9c04e418b8..ee5945cfc10e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -760,7 +760,8 @@ static void ath9k_htc_tx_cleanup_queue(struct ath9k_htc_priv *priv,
void ath9k_htc_tx_cleanup_timer(struct timer_list *t)
{
- struct ath9k_htc_priv *priv = from_timer(priv, t, tx.cleanup_timer);
+ struct ath9k_htc_priv *priv = timer_container_of(priv, t,
+ tx.cleanup_timer);
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_htc_tx_event *event, *tmp;
struct sk_buff *skb;
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 7f890997bb53..5d7e3ddb6dbc 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -332,7 +332,7 @@ fail_paprd:
*/
void ath_ani_calibrate(struct timer_list *t)
{
- struct ath_common *common = from_timer(common, t, ani.timer);
+ struct ath_common *common = timer_container_of(common, t, ani.timer);
struct ath_softc *sc = common->priv;
struct ath_hw *ah = sc->sc_ah;
bool longcal = false;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 92fc5e3d756e..c56f4f3b8990 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -98,7 +98,7 @@ static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
void ath_ps_full_sleep(struct timer_list *t)
{
- struct ath_softc *sc = from_timer(sc, t, sleep_timer);
+ struct ath_softc *sc = timer_container_of(sc, t, sleep_timer);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
unsigned long flags;
bool reset;
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 0226c31a6cae..b7717f9e1e9b 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -366,8 +366,7 @@ static void carl9170_tx_shift_bm(struct ar9170 *ar,
if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
return;
- if (!bitmap_empty(tid_info->bitmap, off))
- off = find_first_bit(tid_info->bitmap, off);
+ off = min(off, find_first_bit(tid_info->bitmap, off));
tid_info->bsn += off;
tid_info->bsn &= 0x0fff;
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index a3e03580cd9f..564ca6a61985 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -438,14 +438,21 @@ static void carl9170_usb_rx_complete(struct urb *urb)
if (atomic_read(&ar->rx_anch_urbs) == 0) {
/*
- * The system is too slow to cope with
- * the enormous workload. We have simply
- * run out of active rx urbs and this
- * unfortunately leads to an unpredictable
- * device.
+ * At this point, either the system is too slow to
+ * cope with the enormous workload (so we have simply
+ * run out of active rx urbs and this unfortunately
+ * leads to an unpredictable device), or the device
+ * is not fully functional after an unsuccessful
+ * firmware loading attempts (so it doesn't pass
+ * ieee80211_register_hw() and there is no internal
+ * workqueue at all).
*/
- ieee80211_queue_work(ar->hw, &ar->ping_work);
+ if (ar->registered)
+ ieee80211_queue_work(ar->hw, &ar->ping_work);
+ else
+ pr_warn_once("device %s is not registered\n",
+ dev_name(&ar->udev->dev));
}
} else {
/*
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index cc2a033e87f5..0f4df5585fd9 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -373,7 +373,7 @@ void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
static void wcn36xx_dxe_tx_timer(struct timer_list *t)
{
- struct wcn36xx *wcn = from_timer(wcn, t, tx_ack_timer);
+ struct wcn36xx *wcn = timer_container_of(wcn, t, tx_ack_timer);
struct ieee80211_tx_info *info;
unsigned long flags;
struct sk_buff *skb;
diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.c b/drivers/net/wireless/ath/wcn36xx/testmode.c
index e5142c052985..d7a2a483cbc4 100644
--- a/drivers/net/wireless/ath/wcn36xx/testmode.c
+++ b/drivers/net/wireless/ath/wcn36xx/testmode.c
@@ -56,7 +56,7 @@ static int wcn36xx_tm_cmd_ptt(struct wcn36xx *wcn, struct ieee80211_vif *vif,
msg = buf;
wcn36xx_dbg(WCN36XX_DBG_TESTMODE,
- "testmode cmd wmi msg_id 0x%04X msg_len %d buf %pK buf_len %d\n",
+ "testmode cmd wmi msg_id 0x%04X msg_len %d buf %p buf_len %d\n",
msg->msg_id, msg->msg_body_length,
buf, buf_len);
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 67172385a5d6..89d4394cedcf 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -179,9 +179,11 @@ void wil_mask_irq(struct wil6210_priv *wil)
wil_dbg_irq(wil, "mask_irq\n");
wil6210_mask_irq_tx(wil);
- wil6210_mask_irq_tx_edma(wil);
+ if (wil->use_enhanced_dma_hw)
+ wil6210_mask_irq_tx_edma(wil);
wil6210_mask_irq_rx(wil);
- wil6210_mask_irq_rx_edma(wil);
+ if (wil->use_enhanced_dma_hw)
+ wil6210_mask_irq_rx_edma(wil);
wil6210_mask_irq_misc(wil, true);
wil6210_mask_irq_pseudo(wil);
}
@@ -190,10 +192,12 @@ void wil_unmask_irq(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "unmask_irq\n");
- wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
- WIL_ICR_ICC_VALUE);
- wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
- WIL_ICR_ICC_VALUE);
+ if (wil->use_enhanced_dma_hw) {
+ wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+ wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+ }
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_MISC_VALUE);
wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, ICC),
@@ -845,10 +849,12 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
- wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_RX_ICR) +
- offsetof(struct RGF_ICR, ICR));
- wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) +
- offsetof(struct RGF_ICR, ICR));
+ if (wil->use_enhanced_dma_hw) {
+ wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ }
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
wmb(); /* make sure write completed */
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 59884e8e3765..55c267ab2893 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -227,7 +227,7 @@ static void wil_ndev_destructor(struct net_device *ndev)
static void wil_connect_timer_fn(struct timer_list *t)
{
- struct wil6210_vif *vif = from_timer(vif, t, connect_timer);
+ struct wil6210_vif *vif = timer_container_of(vif, t, connect_timer);
struct wil6210_priv *wil = vif_to_wil(vif);
bool q;
@@ -243,7 +243,7 @@ static void wil_connect_timer_fn(struct timer_list *t)
static void wil_scan_timer_fn(struct timer_list *t)
{
- struct wil6210_vif *vif = from_timer(vif, t, scan_timer);
+ struct wil6210_vif *vif = timer_container_of(vif, t, scan_timer);
struct wil6210_priv *wil = vif_to_wil(vif);
clear_bit(wil_status_fwready, wil->status);
@@ -253,7 +253,8 @@ static void wil_scan_timer_fn(struct timer_list *t)
static void wil_p2p_discovery_timer_fn(struct timer_list *t)
{
- struct wil6210_vif *vif = from_timer(vif, t, p2p.discovery_timer);
+ struct wil6210_vif *vif = timer_container_of(vif, t,
+ p2p.discovery_timer);
struct wil6210_priv *wil = vif_to_wil(vif);
wil_dbg_misc(wil, "p2p_discovery_timer_fn\n");
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 689f68d89a44..fff8b7f8abc5 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -7,6 +7,7 @@
#ifndef WIL6210_TXRX_H
#define WIL6210_TXRX_H
+#include <net/sock.h>
#include "wil6210.h"
#include "txrx_edma.h"
@@ -616,8 +617,7 @@ static inline bool wil_need_txstat(struct sk_buff *skb)
{
const u8 *da = wil_skb_get_da(skb);
- return is_unicast_ether_addr(da) && skb->sk &&
- (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
+ return is_unicast_ether_addr(da) && sk_requests_wifi_status(skb->sk);
}
static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
index e0de34a3e43a..69ef8cf203d2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
@@ -272,7 +272,8 @@ static void brcmf_btcoex_restore_part1(struct brcmf_btcoex_info *btci)
*/
static void brcmf_btcoex_timerfunc(struct timer_list *t)
{
- struct brcmf_btcoex_info *bt_local = from_timer(bt_local, t, timer);
+ struct brcmf_btcoex_info *bt_local = timer_container_of(bt_local, t,
+ timer);
brcmf_dbg(TRACE, "enter\n");
bt_local->timer_on = false;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 4b70845e1a26..b94c3619526c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -74,7 +74,6 @@
#define VNDR_IE_HDR_SIZE 12
#define VNDR_IE_PARSE_LIMIT 5
-#define DOT11_MGMT_HDR_LEN 24 /* d11 management header len */
#define DOT11_BCN_PRB_FIXED_LEN 12 /* beacon/probe fixed length */
#define BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320
@@ -1945,17 +1944,22 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_cfg80211_security *sec;
- s32 val = 0;
- s32 err = 0;
+ s32 val;
+ s32 err;
- if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) {
val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
- else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
- val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
- else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_3)
+ } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) {
+ if (drvr->bus_if->fwvid == BRCMF_FWVENDOR_CYW &&
+ sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_SAE)
+ val = WPA3_AUTH_SAE_PSK;
+ else
+ val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+ } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_3) {
val = WPA3_AUTH_SAE_PSK;
- else
+ } else {
val = WPA_AUTH_DISABLED;
+ }
brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", val);
if (err) {
@@ -2163,28 +2167,25 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
switch (sme->crypto.akm_suites[0]) {
case WLAN_AKM_SUITE_SAE:
val = WPA3_AUTH_SAE_PSK;
- if (sme->crypto.sae_pwd) {
- brcmf_dbg(INFO, "using SAE offload\n");
- profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE;
- }
break;
case WLAN_AKM_SUITE_FT_OVER_SAE:
val = WPA3_AUTH_SAE_PSK | WPA2_AUTH_FT;
profile->is_ft = true;
- if (sme->crypto.sae_pwd) {
- brcmf_dbg(INFO, "using SAE offload\n");
- profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE;
- }
break;
default:
bphy_err(drvr, "invalid akm suite (%d)\n",
sme->crypto.akm_suites[0]);
return -EINVAL;
}
+ if (sme->crypto.sae_pwd) {
+ profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE;
+ }
}
if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_1X)
brcmf_dbg(INFO, "using 1X offload\n");
+ if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_SAE)
+ brcmf_dbg(INFO, "using SAE offload\n");
if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP))
goto skip_mfp_config;
@@ -2221,7 +2222,7 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "mfp", mfp);
skip_mfp_config:
- brcmf_dbg(CONN, "setting wpa_auth to %d\n", val);
+ brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val);
if (err) {
bphy_err(drvr, "could not set wpa_auth (%d)\n", err);
@@ -3556,7 +3557,7 @@ static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
static void brcmf_escan_timeout(struct timer_list *t)
{
struct brcmf_cfg80211_info *cfg =
- from_timer(cfg, t, escan_timeout);
+ timer_container_of(cfg, t, escan_timeout);
struct brcmf_pub *drvr = cfg->pub;
if (cfg->int_escan_map || cfg->scan_request) {
@@ -5509,7 +5510,7 @@ brcmf_cfg80211_update_mgmt_frame_registrations(struct wiphy *wiphy,
}
-static int
+int
brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_mgmt_tx_params *params, u64 *cookie)
{
@@ -5616,6 +5617,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
exit:
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_cfg80211_mgmt_tx);
static int brcmf_cfg80211_set_cqm_rssi_range_config(struct wiphy *wiphy,
struct net_device *ndev,
@@ -6009,6 +6011,7 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
vif->wdev.wiphy = cfg->wiphy;
vif->wdev.iftype = type;
+ init_completion(&vif->mgmt_tx);
brcmf_init_prof(&vif->profile);
@@ -6760,6 +6763,8 @@ static void brcmf_register_event_handlers(struct brcmf_cfg80211_info *cfg)
brcmf_fweh_register(cfg->pub, BRCMF_E_PSK_SUP,
brcmf_notify_connect_status);
brcmf_fweh_register(cfg->pub, BRCMF_E_RSSI, brcmf_notify_rssi);
+
+ brcmf_fwvid_register_event_handlers(cfg->pub);
}
static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
@@ -7346,6 +7351,7 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_STATION] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
[NL80211_IFTYPE_P2P_CLIENT] = {
@@ -7653,6 +7659,8 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_SAE_OFFLOAD_AP);
}
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SAE_EXT))
+ wiphy->features |= NL80211_FEATURE_SAE;
wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000;
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 2abae8894614..b83485ec7b87 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -13,6 +13,8 @@
#include "fwil_types.h"
#include "p2p.h"
+#define DOT11_MGMT_HDR_LEN 24 /* d11 management header len */
+
#define BRCMF_SCAN_IE_LEN_MAX 2048
#define WL_NUM_SCAN_MAX 10
@@ -142,6 +144,21 @@ enum brcmf_profile_fwauth {
};
/**
+ * enum brcmf_mgmt_tx_status - mgmt frame tx status
+ *
+ * @BRCMF_MGMT_TX_ACK: mgmt frame acked
+ * @BRCMF_MGMT_TX_NOACK: mgmt frame not acked
+ * @BRCMF_MGMT_TX_OFF_CHAN_COMPLETED: off-channel complete
+ * @BRCMF_MGMT_TX_SEND_FRAME: mgmt frame tx is in progres
+ */
+enum brcmf_mgmt_tx_status {
+ BRCMF_MGMT_TX_ACK,
+ BRCMF_MGMT_TX_NOACK,
+ BRCMF_MGMT_TX_OFF_CHAN_COMPLETED,
+ BRCMF_MGMT_TX_SEND_FRAME
+};
+
+/**
* struct brcmf_cfg80211_profile - profile information.
*
* @bssid: bssid of joined/joining ibss.
@@ -211,6 +228,9 @@ struct vif_saved_ie {
* @profile: profile information.
* @sme_state: SME state using enum brcmf_vif_status bits.
* @list: linked list.
+ * @mgmt_tx: completion for management frame transmit.
+ * @mgmt_tx_status: status of last management frame sent to firmware.
+ * @mgmt_tx_id:
* @mgmt_rx_reg: registered rx mgmt frame types.
* @mbss: Multiple BSS type, set if not first AP (not relevant for P2P).
* @cqm_rssi_low: Lower RSSI limit for CQM monitoring
@@ -224,6 +244,9 @@ struct brcmf_cfg80211_vif {
unsigned long sme_state;
struct vif_saved_ie saved_ie;
struct list_head list;
+ struct completion mgmt_tx;
+ unsigned long mgmt_tx_status;
+ u32 mgmt_tx_id;
u16 mgmt_rx_reg;
bool mbss;
int is_11d;
@@ -468,5 +491,7 @@ void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
void brcmf_cfg80211_free_netdev(struct net_device *ndev);
int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags);
+int brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params, u64 *cookie);
#endif /* BRCMFMAC_CFG80211_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index f26e4679e4ff..75f101622db1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -491,6 +491,7 @@ void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...)
trace_brcmf_dbg(level, func, &vaf);
va_end(args);
}
+BRCMF_EXPORT_SYMBOL_GPL(__brcmf_dbg);
#endif
static void brcmf_mp_attach(void)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 3d63010ae079..04f41c09deca 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1363,6 +1363,8 @@ int brcmf_attach(struct device *dev)
brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG,
brcmf_psm_watchdog_notify);
+ brcmf_fwvid_get_cfg80211_ops(drvr);
+
ret = brcmf_bus_started(drvr, drvr->ops);
if (ret != 0) {
bphy_err(drvr, "dongle is not responding: err=%d\n", ret);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
index 9a4837881486..c9537fb597ce 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
@@ -8,11 +8,21 @@
#include <bus.h>
#include <fwvid.h>
#include <fwil.h>
+#include <fweh.h>
#include "vops.h"
+#include "fwil_types.h"
+/* event definitions */
+#define BRCMF_CYW_E_EXT_AUTH_REQ 187
+#define BRCMF_CYW_E_EXT_AUTH_FRAME_RX 188
+#define BRCMF_CYW_E_MGMT_FRAME_TXS 189
+#define BRCMF_CYW_E_MGMT_FRAME_TXS_OC 190
#define BRCMF_CYW_E_LAST 197
+#define MGMT_AUTH_FRAME_DWELL_TIME 4000
+#define MGMT_AUTH_FRAME_WAIT_TIME (MGMT_AUTH_FRAME_DWELL_TIME + 100)
+
static int brcmf_cyw_set_sae_pwd(struct brcmf_if *ifp,
struct cfg80211_crypto_settings *crypto)
{
@@ -39,6 +49,19 @@ static int brcmf_cyw_set_sae_pwd(struct brcmf_if *ifp,
return err;
}
+static const struct brcmf_fweh_event_map brcmf_cyw_event_map = {
+ .items = {
+ { BRCMF_E_EXT_AUTH_REQ, BRCMF_CYW_E_EXT_AUTH_REQ },
+ { BRCMF_E_EXT_AUTH_FRAME_RX, BRCMF_CYW_E_EXT_AUTH_FRAME_RX },
+ { BRCMF_E_MGMT_FRAME_TXSTATUS, BRCMF_CYW_E_MGMT_FRAME_TXS },
+ {
+ BRCMF_E_MGMT_FRAME_OFFCHAN_DONE,
+ BRCMF_CYW_E_MGMT_FRAME_TXS_OC
+ },
+ },
+ .n_items = 4
+};
+
static int brcmf_cyw_alloc_fweh_info(struct brcmf_pub *drvr)
{
struct brcmf_fweh_info *fweh;
@@ -49,11 +72,296 @@ static int brcmf_cyw_alloc_fweh_info(struct brcmf_pub *drvr)
return -ENOMEM;
fweh->num_event_codes = BRCMF_CYW_E_LAST;
+ fweh->event_map = &brcmf_cyw_event_map;
drvr->fweh = fweh;
return 0;
}
+static int brcmf_cyw_activate_events(struct brcmf_if *ifp)
+{
+ struct brcmf_fweh_info *fweh = ifp->drvr->fweh;
+ struct brcmf_eventmsgs_ext *eventmask_msg;
+ u32 msglen;
+ int err;
+
+ msglen = sizeof(*eventmask_msg) + fweh->event_mask_len;
+ eventmask_msg = kzalloc(msglen, GFP_KERNEL);
+ if (!eventmask_msg)
+ return -ENOMEM;
+ eventmask_msg->ver = EVENTMSGS_VER;
+ eventmask_msg->command = CYW_EVENTMSGS_SET_MASK;
+ eventmask_msg->len = fweh->event_mask_len;
+ memcpy(eventmask_msg->mask, fweh->event_mask, fweh->event_mask_len);
+
+ err = brcmf_fil_iovar_data_set(ifp, "event_msgs_ext", eventmask_msg,
+ msglen);
+ kfree(eventmask_msg);
+ return err;
+}
+
+static
+int brcmf_cyw_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params, u64 *cookie)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct ieee80211_channel *chan = params->chan;
+ struct brcmf_pub *drvr = cfg->pub;
+ const u8 *buf = params->buf;
+ size_t len = params->len;
+ const struct ieee80211_mgmt *mgmt;
+ struct brcmf_cfg80211_vif *vif;
+ s32 err = 0;
+ bool ack = false;
+ s32 chan_nr;
+ u32 freq;
+ struct brcmf_mf_params_le *mf_params;
+ u32 mf_params_len;
+ s32 ready;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ mgmt = (const struct ieee80211_mgmt *)buf;
+
+ if (!ieee80211_is_auth(mgmt->frame_control))
+ return brcmf_cfg80211_mgmt_tx(wiphy, wdev, params, cookie);
+
+ *cookie = 0;
+ vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+
+ reinit_completion(&vif->mgmt_tx);
+ clear_bit(BRCMF_MGMT_TX_ACK, &vif->mgmt_tx_status);
+ clear_bit(BRCMF_MGMT_TX_NOACK, &vif->mgmt_tx_status);
+ clear_bit(BRCMF_MGMT_TX_OFF_CHAN_COMPLETED,
+ &vif->mgmt_tx_status);
+ mf_params_len = offsetof(struct brcmf_mf_params_le, data) +
+ (len - DOT11_MGMT_HDR_LEN);
+ mf_params = kzalloc(mf_params_len, GFP_KERNEL);
+ if (!mf_params)
+ return -ENOMEM;
+
+ mf_params->dwell_time = cpu_to_le32(MGMT_AUTH_FRAME_DWELL_TIME);
+ mf_params->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN);
+ mf_params->frame_control = mgmt->frame_control;
+
+ if (chan)
+ freq = chan->center_freq;
+ else
+ brcmf_fil_cmd_int_get(vif->ifp, BRCMF_C_GET_CHANNEL,
+ &freq);
+ chan_nr = ieee80211_frequency_to_channel(freq);
+ mf_params->channel = cpu_to_le16(chan_nr);
+ memcpy(&mf_params->da[0], &mgmt->da[0], ETH_ALEN);
+ memcpy(&mf_params->bssid[0], &mgmt->bssid[0], ETH_ALEN);
+ mf_params->packet_id = cpu_to_le32(*cookie);
+ memcpy(mf_params->data, &buf[DOT11_MGMT_HDR_LEN],
+ le16_to_cpu(mf_params->len));
+
+ brcmf_dbg(TRACE, "Auth frame, cookie=%d, fc=%04x, len=%d, channel=%d\n",
+ le32_to_cpu(mf_params->packet_id),
+ le16_to_cpu(mf_params->frame_control),
+ le16_to_cpu(mf_params->len), chan_nr);
+
+ vif->mgmt_tx_id = le32_to_cpu(mf_params->packet_id);
+ set_bit(BRCMF_MGMT_TX_SEND_FRAME, &vif->mgmt_tx_status);
+
+ err = brcmf_fil_bsscfg_data_set(vif->ifp, "mgmt_frame",
+ mf_params, mf_params_len);
+ if (err) {
+ bphy_err(drvr, "Failed to send Auth frame: err=%d\n",
+ err);
+ goto tx_status;
+ }
+
+ ready = wait_for_completion_timeout(&vif->mgmt_tx,
+ MGMT_AUTH_FRAME_WAIT_TIME);
+ if (test_bit(BRCMF_MGMT_TX_ACK, &vif->mgmt_tx_status)) {
+ brcmf_dbg(TRACE, "TX Auth frame operation is success\n");
+ ack = true;
+ } else {
+ bphy_err(drvr, "TX Auth frame operation is %s: status=%ld)\n",
+ ready ? "failed" : "timedout", vif->mgmt_tx_status);
+ }
+
+tx_status:
+ cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,
+ GFP_KERNEL);
+ kfree(mf_params);
+ return err;
+}
+
+static int
+brcmf_cyw_external_auth(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_external_auth_params *params)
+{
+ struct brcmf_if *ifp;
+ struct brcmf_pub *drvr;
+ struct brcmf_auth_req_status_le auth_status;
+ int ret = 0;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ ifp = netdev_priv(dev);
+ drvr = ifp->drvr;
+ if (params->status == WLAN_STATUS_SUCCESS) {
+ auth_status.flags = cpu_to_le16(BRCMF_EXTAUTH_SUCCESS);
+ } else {
+ bphy_err(drvr, "External authentication failed: status=%d\n",
+ params->status);
+ auth_status.flags = cpu_to_le16(BRCMF_EXTAUTH_FAIL);
+ }
+
+ memcpy(auth_status.peer_mac, params->bssid, ETH_ALEN);
+ params->ssid.ssid_len = min_t(u8, params->ssid.ssid_len,
+ IEEE80211_MAX_SSID_LEN);
+ auth_status.ssid_len = cpu_to_le32(params->ssid.ssid_len);
+ memcpy(auth_status.ssid, params->ssid.ssid, params->ssid.ssid_len);
+
+ ret = brcmf_fil_iovar_data_set(ifp, "auth_status", &auth_status,
+ sizeof(auth_status));
+ if (ret < 0)
+ bphy_err(drvr, "auth_status iovar failed: ret=%d\n", ret);
+
+ return ret;
+}
+
+static void brcmf_cyw_get_cfg80211_ops(struct brcmf_pub *drvr)
+{
+ drvr->ops->mgmt_tx = brcmf_cyw_mgmt_tx;
+ drvr->ops->external_auth = brcmf_cyw_external_auth;
+}
+
+static s32
+brcmf_cyw_notify_ext_auth_req(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e, void *data)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct cfg80211_external_auth_params params;
+ struct brcmf_auth_req_status_le *auth_req =
+ (struct brcmf_auth_req_status_le *)data;
+ s32 err = 0;
+
+ brcmf_dbg(INFO, "Enter: event %s (%d) received\n",
+ brcmf_fweh_event_name(e->event_code), e->event_code);
+
+ if (e->datalen < sizeof(*auth_req)) {
+ bphy_err(drvr, "Event %s (%d) data too small. Ignore\n",
+ brcmf_fweh_event_name(e->event_code), e->event_code);
+ return -EINVAL;
+ }
+
+ memset(&params, 0, sizeof(params));
+ params.action = NL80211_EXTERNAL_AUTH_START;
+ params.key_mgmt_suite = WLAN_AKM_SUITE_SAE;
+ params.status = WLAN_STATUS_SUCCESS;
+ params.ssid.ssid_len = min_t(u32, 32, le32_to_cpu(auth_req->ssid_len));
+ memcpy(params.ssid.ssid, auth_req->ssid, params.ssid.ssid_len);
+ memcpy(params.bssid, auth_req->peer_mac, ETH_ALEN);
+
+ err = cfg80211_external_auth_request(ifp->ndev, &params, GFP_KERNEL);
+ if (err)
+ bphy_err(drvr, "Ext Auth request to supplicant failed (%d)\n",
+ err);
+
+ return err;
+}
+
+static s32
+brcmf_notify_auth_frame_rx(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e, void *data)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_cfg80211_info *cfg = drvr->config;
+ struct wireless_dev *wdev;
+ u32 mgmt_frame_len = e->datalen - sizeof(struct brcmf_rx_mgmt_data);
+ struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data;
+ u8 *frame = (u8 *)(rxframe + 1);
+ struct brcmu_chan ch;
+ struct ieee80211_mgmt *mgmt_frame;
+ s32 freq;
+
+ brcmf_dbg(INFO, "Enter: event %s (%d) received\n",
+ brcmf_fweh_event_name(e->event_code), e->event_code);
+
+ if (e->datalen < sizeof(*rxframe)) {
+ bphy_err(drvr, "Event %s (%d) data too small. Ignore\n",
+ brcmf_fweh_event_name(e->event_code), e->event_code);
+ return -EINVAL;
+ }
+
+ wdev = &ifp->vif->wdev;
+ WARN_ON(!wdev);
+
+ ch.chspec = be16_to_cpu(rxframe->chanspec);
+ cfg->d11inf.decchspec(&ch);
+
+ mgmt_frame = kzalloc(mgmt_frame_len, GFP_KERNEL);
+ if (!mgmt_frame)
+ return -ENOMEM;
+
+ mgmt_frame->frame_control = cpu_to_le16(IEEE80211_STYPE_AUTH);
+ memcpy(mgmt_frame->da, ifp->mac_addr, ETH_ALEN);
+ memcpy(mgmt_frame->sa, e->addr, ETH_ALEN);
+ brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSSID, mgmt_frame->bssid,
+ ETH_ALEN);
+ frame += offsetof(struct ieee80211_mgmt, u);
+ memcpy(&mgmt_frame->u, frame,
+ mgmt_frame_len - offsetof(struct ieee80211_mgmt, u));
+
+ freq = ieee80211_channel_to_frequency(ch.control_ch_num,
+ ch.band == BRCMU_CHAN_BAND_2G ?
+ NL80211_BAND_2GHZ :
+ NL80211_BAND_5GHZ);
+
+ cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len,
+ NL80211_RXMGMT_FLAG_EXTERNAL_AUTH);
+ kfree(mgmt_frame);
+ return 0;
+}
+
+static s32
+brcmf_notify_mgmt_tx_status(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e, void *data)
+{
+ struct brcmf_cfg80211_vif *vif = ifp->vif;
+ u32 *packet_id = (u32 *)data;
+
+ brcmf_dbg(INFO, "Enter: event %s (%d), status=%d\n",
+ brcmf_fweh_event_name(e->event_code), e->event_code,
+ e->status);
+
+ if (!test_bit(BRCMF_MGMT_TX_SEND_FRAME, &vif->mgmt_tx_status) ||
+ (*packet_id != vif->mgmt_tx_id))
+ return 0;
+
+ if (e->event_code == BRCMF_E_MGMT_FRAME_TXSTATUS) {
+ if (e->status == BRCMF_E_STATUS_SUCCESS)
+ set_bit(BRCMF_MGMT_TX_ACK, &vif->mgmt_tx_status);
+ else
+ set_bit(BRCMF_MGMT_TX_NOACK, &vif->mgmt_tx_status);
+ } else {
+ set_bit(BRCMF_MGMT_TX_OFF_CHAN_COMPLETED, &vif->mgmt_tx_status);
+ }
+
+ complete(&vif->mgmt_tx);
+ return 0;
+}
+
+static void brcmf_cyw_register_event_handlers(struct brcmf_pub *drvr)
+{
+ brcmf_fweh_register(drvr, BRCMF_E_EXT_AUTH_REQ,
+ brcmf_cyw_notify_ext_auth_req);
+ brcmf_fweh_register(drvr, BRCMF_E_EXT_AUTH_FRAME_RX,
+ brcmf_notify_auth_frame_rx);
+ brcmf_fweh_register(drvr, BRCMF_E_MGMT_FRAME_TXSTATUS,
+ brcmf_notify_mgmt_tx_status);
+ brcmf_fweh_register(drvr, BRCMF_E_MGMT_FRAME_OFFCHAN_DONE,
+ brcmf_notify_mgmt_tx_status);
+}
+
const struct brcmf_fwvid_ops brcmf_cyw_ops = {
.set_sae_password = brcmf_cyw_set_sae_pwd,
.alloc_fweh_info = brcmf_cyw_alloc_fweh_info,
+ .activate_events = brcmf_cyw_activate_events,
+ .get_cfg80211_ops = brcmf_cyw_get_cfg80211_ops,
+ .register_event_handlers = brcmf_cyw_register_event_handlers,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h
new file mode 100644
index 000000000000..08c69142495a
--- /dev/null
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ */
+
+#ifndef CYW_FWIL_TYPES_H_
+#define CYW_FWIL_TYPES_H_
+
+#include <fwil_types.h>
+
+enum brcmf_event_msgs_ext_command {
+ CYW_EVENTMSGS_NONE = 0,
+ CYW_EVENTMSGS_SET_BIT = 1,
+ CYW_EVENTMSGS_RESET_BIT = 2,
+ CYW_EVENTMSGS_SET_MASK = 3,
+};
+
+#define EVENTMSGS_VER 1
+#define EVENTMSGS_EXT_STRUCT_SIZE offsetof(struct eventmsgs_ext, mask[0])
+
+/**
+ * struct brcmf_eventmsgs_ext - structure used with "eventmsgs_ext" iovar.
+ *
+ * @ver: version.
+ * @command: requested operation (see &enum event_msgs_ext_command).
+ * @len: length of the @mask array.
+ * @maxgetsize: indicates maximum mask size that may be returned by firmware
+ * upon iovar GET.
+ * @mask: array where each bit represents firmware event.
+ */
+struct brcmf_eventmsgs_ext {
+ u8 ver;
+ u8 command;
+ u8 len;
+ u8 maxgetsize;
+ u8 mask[] __counted_by(len);
+};
+
+#define BRCMF_EXTAUTH_START 1
+#define BRCMF_EXTAUTH_ABORT 2
+#define BRCMF_EXTAUTH_FAIL 3
+#define BRCMF_EXTAUTH_SUCCESS 4
+
+/**
+ * struct brcmf_auth_req_status_le - external auth request and status update
+ *
+ * @flags: flags for external auth status
+ * @peer_mac: peer MAC address
+ * @ssid_len: length of ssid
+ * @ssid: ssid characters
+ * @pmkid: PMKSA identifier
+ */
+struct brcmf_auth_req_status_le {
+ __le16 flags;
+ u8 peer_mac[ETH_ALEN];
+ __le32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 pmkid[WLAN_PMKID_LEN];
+};
+
+/**
+ * struct brcmf_mf_params_le - management frame parameters for mgmt_frame iovar
+ *
+ * @version: version of the iovar
+ * @dwell_time: dwell duration in ms
+ * @len: length of frame data
+ * @frame_control: frame control
+ * @channel: channel
+ * @da: peer MAC address
+ * @bssid: BSS network identifier
+ * @packet_id: packet identifier
+ * @data: frame data
+ */
+struct brcmf_mf_params_le {
+ __le32 version;
+ __le32 dwell_time;
+ __le16 len;
+ __le16 frame_control;
+ __le16 channel;
+ u8 da[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ __le32 packet_id;
+ u8 data[] __counted_by(len);
+};
+
+#endif /* CYW_FWIL_TYPES_H_ */
+
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 0d9ae197fa1e..488364ef8ff2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -42,8 +42,9 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
{ BRCMF_FEAT_MONITOR_FLAG, "rtap" },
{ BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" },
{ BRCMF_FEAT_DOT11H, "802.11h" },
- { BRCMF_FEAT_SAE, "sae" },
+ { BRCMF_FEAT_SAE, "sae " },
{ BRCMF_FEAT_FWAUTH, "idauth" },
+ { BRCMF_FEAT_SAE_EXT, "sae_ext" },
};
#ifdef DEBUG
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index 7f4f0b3e4a7b..31f8695ca417 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -31,6 +31,7 @@
* FWAUTH: Firmware authenticator
* DUMP_OBSS: Firmware has capable to dump obss info to support ACS
* SCAN_V2: Version 2 scan params
+ * SAE_EXT: SAE authentication handled by user-space supplicant
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -57,7 +58,8 @@
BRCMF_FEAT_DEF(DUMP_OBSS) \
BRCMF_FEAT_DEF(SCAN_V2) \
BRCMF_FEAT_DEF(PMKID_V2) \
- BRCMF_FEAT_DEF(PMKID_V3)
+ BRCMF_FEAT_DEF(PMKID_V3) \
+ BRCMF_FEAT_DEF(SAE_EXT)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index f0b6a7607f16..c2d98ee6652f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -75,6 +75,7 @@ const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code)
return "nodebug";
}
#endif
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fweh_event_name);
/**
* brcmf_fweh_queue_event() - create and queue event.
@@ -405,6 +406,7 @@ int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
brcmf_fweh_event_name(code));
return 0;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fweh_register);
/**
* brcmf_fweh_unregister() - remove handler for given code.
@@ -448,11 +450,14 @@ int brcmf_fweh_activate_events(struct brcmf_if *ifp)
brcmf_dbg(EVENT, "enable event IF\n");
setbit(fweh->event_mask, BRCMF_E_IF);
+ /* allow per-vendor method to activate firmware events */
+ if (!brcmf_fwvid_activate_events(ifp))
+ return 0;
+
err = brcmf_fil_iovar_data_set(ifp, "event_msgs", fweh->event_mask,
fweh->event_mask_len);
if (err)
bphy_err(fweh->drvr, "Set event_msgs error (%d)\n", err);
-
return err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index eed439b84010..e327dd58d29c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -94,7 +94,11 @@ struct brcmf_cfg80211_info;
BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
BRCMF_ENUM_DEF(TDLS_PEER_EVENT, 92) \
- BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
+ BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \
+ BRCMF_ABSTRACT_ENUM_DEF(EXT_AUTH_REQ, 0) \
+ BRCMF_ABSTRACT_ENUM_DEF(EXT_AUTH_FRAME_RX, 1) \
+ BRCMF_ABSTRACT_ENUM_DEF(MGMT_FRAME_TXSTATUS, 2) \
+ BRCMF_ABSTRACT_ENUM_DEF(MGMT_FRAME_OFFCHAN_DONE, 3)
#define BRCMF_ENUM_DEF(id, val) \
BRCMF_E_##id = (val),
@@ -337,7 +341,7 @@ struct brcmf_fweh_info {
struct list_head event_q;
uint event_mask_len;
u8 *event_mask;
- struct brcmf_fweh_event_map *event_map;
+ const struct brcmf_fweh_event_map *event_map;
uint num_event_codes;
brcmf_fweh_handler_t evt_handler[] __counted_by(num_event_codes);
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
index e6ac9fc341bc..f3e011d090f2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
@@ -15,6 +15,9 @@ struct brcmf_fwvid_ops {
void (*feat_attach)(struct brcmf_if *ifp);
int (*set_sae_password)(struct brcmf_if *ifp, struct cfg80211_crypto_settings *crypto);
int (*alloc_fweh_info)(struct brcmf_pub *drvr);
+ int (*activate_events)(struct brcmf_if *ifp);
+ void (*get_cfg80211_ops)(struct brcmf_pub *drvr);
+ void (*register_event_handlers)(struct brcmf_pub *drvr);
};
/* exported functions */
@@ -56,4 +59,30 @@ static inline int brcmf_fwvid_alloc_fweh_info(struct brcmf_pub *drvr)
return drvr->vops->alloc_fweh_info(drvr);
}
+static inline int brcmf_fwvid_activate_events(struct brcmf_if *ifp)
+{
+ const struct brcmf_fwvid_ops *vops = ifp->drvr->vops;
+
+ if (!vops || !vops->activate_events)
+ return -EOPNOTSUPP;
+
+ return vops->activate_events(ifp);
+}
+
+static inline void brcmf_fwvid_get_cfg80211_ops(struct brcmf_pub *drvr)
+{
+ if (!drvr->vops || !drvr->vops->get_cfg80211_ops)
+ return;
+
+ drvr->vops->get_cfg80211_ops(drvr);
+}
+
+static inline void brcmf_fwvid_register_event_handlers(struct brcmf_pub *drvr)
+{
+ if (!drvr->vops || !drvr->vops->register_event_handlers)
+ return;
+
+ drvr->vops->register_event_handlers(drvr);
+}
+
#endif /* FWVID_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 9f1854b3d1a5..8f97562811d7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -2328,7 +2328,8 @@ brcmf_pcie_fwcon_timer(struct brcmf_pciedev_info *devinfo, bool active)
static void
brcmf_pcie_fwcon(struct timer_list *t)
{
- struct brcmf_pciedev_info *devinfo = from_timer(devinfo, t, timer);
+ struct brcmf_pciedev_info *devinfo = timer_container_of(devinfo, t,
+ timer);
if (!devinfo->console_active)
return;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 93727b9a5f0d..cf26ab15ee0c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -4121,7 +4121,7 @@ brcmf_sdio_watchdog_thread(void *data)
static void
brcmf_sdio_watchdog(struct timer_list *t)
{
- struct brcmf_sdio *bus = from_timer(bus, t, timer);
+ struct brcmf_sdio *bus = timer_container_of(bus, t, timer);
if (bus->watchdog_tsk) {
complete(&bus->watchdog_wait);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index d06c724f63d9..b056336d5da6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -741,15 +741,19 @@ static int brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
void *buffer, int buflen)
{
int ret;
- char *tmpbuf;
+ char *tmpbuf = NULL;
u16 size;
- if ((!devinfo) || (devinfo->ctl_urb == NULL))
- return -EINVAL;
+ if (!devinfo || !devinfo->ctl_urb) {
+ ret = -EINVAL;
+ goto err;
+ }
tmpbuf = kmalloc(buflen, GFP_ATOMIC);
- if (!tmpbuf)
- return -ENOMEM;
+ if (!tmpbuf) {
+ ret = -ENOMEM;
+ goto err;
+ }
size = buflen;
devinfo->ctl_urb->transfer_buffer_length = size;
@@ -770,18 +774,23 @@ static int brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
if (ret < 0) {
brcmf_err("usb_submit_urb failed %d\n", ret);
- goto finalize;
+ goto err;
}
if (!brcmf_usb_ioctl_resp_wait(devinfo)) {
usb_kill_urb(devinfo->ctl_urb);
ret = -ETIMEDOUT;
+ goto err;
} else {
memcpy(buffer, tmpbuf, buflen);
}
-finalize:
kfree(tmpbuf);
+ return 0;
+
+err:
+ kfree(tmpbuf);
+ brcmf_err("dl cmd %u failed: err=%d\n", cmd, ret);
return ret;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
index 50d817485cf9..0cb64fc56783 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
@@ -198,7 +198,7 @@
#define CST4313_SPROM_OTP_SEL_SHIFT 0
/* 4313 Chip specific ChipControl register bits */
- /* 12 mA drive strengh for later 4313 */
+ /* 12 mA drive strength for later 4313 */
#define CCTRL_4313_12MA_LED_DRIVE 0x00000007
/* Manufacturer Ids */
@@ -453,7 +453,7 @@ ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
/* get chipcommon chipstatus */
sii->chipst = bcma_read32(cc, CHIPCREGOFFS(chipstatus));
- /* get chipcommon capabilites */
+ /* get chipcommon capabilities */
sii->pub.cccaps = bcma_read32(cc, CHIPCREGOFFS(capabilities));
/* get pmu rev and caps */
@@ -657,7 +657,7 @@ u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
}
/*
- * clock control policy function throught chipcommon
+ * clock control policy function through chipcommon
*
* set dynamic clk control mode (forceslow, forcefast, dynamic)
* returns true if we are forcing fast clock
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h
index 90b6e3982d2c..e791dd07ca78 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h
@@ -76,7 +76,7 @@
* conventions for the use the flash space:
*/
-/* Minumum amount of flash we support */
+/* Minimum amount of flash we support */
#define FLASH_MIN 0x00020000 /* Minimum flash size */
#define CC_SROM_OTP 0x800 /* SROM/OTP address space */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
index a767cbb79185..e1d707a7c964 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
@@ -479,7 +479,7 @@ void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
/*
* Preps the given packet for AMPDU based on the session data. If the
- * frame cannot be accomodated in the current session, -ENOSPC is
+ * frame cannot be accommodated in the current session, -ENOSPC is
* returned.
*/
int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
@@ -529,7 +529,7 @@ int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
}
/*
- * Now that we're sure this frame can be accomodated, update the
+ * Now that we're sure this frame can be accommodated, update the
* session information.
*/
session->ampdu_len += len;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
index d1b9a18d0374..3878c4124e25 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
@@ -445,7 +445,7 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
/*
* OFDM 40 MHz SISO has the same power as the corresponding
- * MCS0-7 rate unless overriden by the locale specific code.
+ * MCS0-7 rate unless overridden by the locale specific code.
* We set this value to 0 as a flag (presumably 0 dBm isn't
* a possibility) and then copy the MCS0-7 value to the 40 MHz
* value if it wasn't explicitly set.
@@ -479,7 +479,7 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
/*
* 20 MHz has the same power as the corresponding OFDM rate
- * unless overriden by the locale specific code.
+ * unless overridden by the locale specific code.
*/
txpwr->mcs_20_siso[i] = txpwr->ofdm[i];
txpwr->mcs_40_siso[i] = 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
index 80c35027787a..c739bf7463b3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
@@ -1348,7 +1348,7 @@ static void prep_ampdu_frame(struct dma_info *di, struct sk_buff *p)
ret = brcms_c_ampdu_add_frame(session, p);
if (ret == -ENOSPC) {
/*
- * AMPDU cannot accomodate this frame. Close out the in-
+ * AMPDU cannot accommodate this frame. Close out the in-
* progress AMPDU session and start a new one.
*/
ampdu_finalize(di);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 5b1d35601bbd..1c3d29dca424 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -48,9 +48,9 @@
FIF_BCN_PRBRESP_PROMISC | \
FIF_PSPOLL)
-#define CHAN2GHZ(channel, freqency, chflags) { \
+#define CHAN2GHZ(channel, frequency, chflags) { \
.band = NL80211_BAND_2GHZ, \
- .center_freq = (freqency), \
+ .center_freq = (frequency), \
.hw_value = (channel), \
.flags = chflags, \
.max_antenna_gain = 0, \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 2738d4d6c60a..c1a9c1e442ee 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -921,7 +921,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
* The "fallback limit" is the number of tx attempts a given
* MPDU is sent at the "primary" rate. Tx attempts beyond that
* limit are sent at the "secondary" rate.
- * A 'short frame' does not exceed RTS treshold.
+ * A 'short frame' does not exceed RTS threshold.
*/
u16 sfbl, /* Short Frame Rate Fallback Limit */
lfbl, /* Long Frame Rate Fallback Limit */
@@ -6259,7 +6259,7 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
}
/*
- * Currently only support same setting for primay and
+ * Currently only support same setting for primary and
* fallback rates. Unify flags for each rate into a
* single value for the frame
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
index 9f76b880814e..b7ca0d9891c4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
@@ -603,7 +603,7 @@ enum brcms_bss_type {
* cur_etheraddr: h/w address
* flags: BSSCFG flags; see below
*
- * current_bss: BSS parms in ASSOCIATED state
+ * current_bss: BSS parameters in ASSOCIATED state
*
*
* ID: 'unique' ID of this bsscfg, assigned at bsscfg allocation
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c
index 71b80381f3ad..c9a29e626daa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c
@@ -31,7 +31,7 @@
#define EXT_ILP_HZ 32768
/*
- * Duration for ILP clock frequency measurment in milliseconds
+ * Duration for ILP clock frequency measurement in milliseconds
*
* remark: 1000 must be an integer multiple of this duration
*/
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_crypto.c b/drivers/net/wireless/intel/ipw2x00/libipw_crypto.c
index dfcc12aa8620..243d0c5928a2 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_crypto.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_crypto.c
@@ -110,8 +110,8 @@ static void libipw_crypt_quiescing(struct libipw_crypt_info *info)
static void libipw_crypt_deinit_handler(struct timer_list *t)
{
- struct libipw_crypt_info *info = from_timer(info, t,
- crypt_deinit_timer);
+ struct libipw_crypt_info *info = timer_container_of(info, t,
+ crypt_deinit_timer);
unsigned long flags;
libipw_crypt_deinit_entries(info, 0);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
index df1b8ec86651..1826c37c090c 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -168,7 +168,8 @@ il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta)
static void
il3945_bg_rate_scale_flush(struct timer_list *t)
{
- struct il3945_rs_sta *rs_sta = from_timer(rs_sta, t, rate_scale_flush);
+ struct il3945_rs_sta *rs_sta = timer_container_of(rs_sta, t,
+ rate_scale_flush);
struct il_priv *il __maybe_unused = rs_sta->il;
int unflushed = 0;
unsigned long flags;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index dc8c408902e6..8e58e97a148f 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -4051,7 +4051,7 @@ il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
static void
il4965_bg_stats_periodic(struct timer_list *t)
{
- struct il_priv *il = from_timer(il, t, stats_periodic);
+ struct il_priv *il = timer_container_of(il, t, stats_periodic);
if (test_bit(S_EXIT_PENDING, &il->status))
return;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 09fb4b758704..9a86688aea67 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -4804,7 +4804,7 @@ il_check_stuck_queue(struct il_priv *il, int cnt)
void
il_bg_watchdog(struct timer_list *t)
{
- struct il_priv *il = from_timer(il, t, watchdog);
+ struct il_priv *il = timer_container_of(il, t, watchdog);
int cnt;
unsigned long timeout;
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 9546ceeaf5e3..3f476e333726 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -8,12 +8,23 @@ iwlwifi-objs += iwl-nvm-utils.o
iwlwifi-objs += iwl-utils.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
-iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
+iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-v2.o
iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
-iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
-iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
+
+CFLAGS_pcie/drv.o += -Wno-override-init
+
+# combined MAC/RF configurations
+iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o
+iwlwifi-$(CONFIG_IWLDVM) += cfg/5000.o cfg/6000.o
+iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o
+# MAC configurations
+iwlwifi-$(CONFIG_IWLMVM) += cfg/9000.o cfg/22000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o
iwlwifi-$(CONFIG_IWLMLD) += cfg/bz.o cfg/sc.o cfg/dr.o
+# RF configurations
+iwlwifi-$(CONFIG_IWLMVM) += cfg/rf-jf.o cfg/rf-hr.o cfg/rf-gf.o
+iwlwifi-$(CONFIG_IWLMLD) += cfg/rf-fm.o cfg/rf-wh.o cfg/rf-pe.o
+
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
index f172ffd2a841..c029e595e7c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2020, 2023 Intel Corporation
+ * Copyright(c) 2018 - 2020, 2023, 2025 Intel Corporation
*****************************************************************************/
#include <linux/module.h>
@@ -29,7 +29,7 @@
#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl1000_base_params = {
+static const struct iwl_family_base_params iwl1000_base = {
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
@@ -42,12 +42,6 @@ static const struct iwl_base_params iwl1000_base_params = {
.scd_chain_ext_wa = true,
};
-static const struct iwl_ht_params iwl1000_ht_params = {
- .ht_greenfield_support = true,
- .use_rts_for_aggregation = true, /* use rts/cts protection */
- .ht40_bands = BIT(NL80211_BAND_2GHZ),
-};
-
static const struct iwl_eeprom_params iwl1000_eeprom_params = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
@@ -60,54 +54,67 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
}
};
+const struct iwl_mac_cfg iwl1000_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_1000,
+ .base = &iwl1000_base,
+};
+
#define IWL_DEVICE_1000 \
.fw_name_pre = IWL1000_FW_PRE, \
.ucode_api_max = IWL1000_UCODE_API_MAX, \
.ucode_api_min = IWL1000_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_1000, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
.max_data_size = IWLAGN_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_1000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
- .trans.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_BLINK
-const struct iwl_cfg iwl1000_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
+const struct iwl_rf_cfg iwl1000_bgn_cfg = {
IWL_DEVICE_1000,
- .ht_params = &iwl1000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ),
+ },
};
-const struct iwl_cfg iwl1000_bg_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
+const char iwl1000_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 1000 BGN";
+
+const struct iwl_rf_cfg iwl1000_bg_cfg = {
IWL_DEVICE_1000,
};
+const char iwl1000_bg_name[] = "Intel(R) Centrino(R) Wireless-N 1000 BG";
+
#define IWL_DEVICE_100 \
.fw_name_pre = IWL100_FW_PRE, \
.ucode_api_max = IWL100_UCODE_API_MAX, \
.ucode_api_min = IWL100_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_100, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
.max_data_size = IWLAGN_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_1000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
- .trans.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true
-const struct iwl_cfg iwl100_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
+const struct iwl_rf_cfg iwl100_bgn_cfg = {
IWL_DEVICE_100,
- .ht_params = &iwl1000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ),
+ },
};
-const struct iwl_cfg iwl100_bg_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 100 BG",
+const char iwl100_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 100 BGN";
+
+const struct iwl_rf_cfg iwl100_bg_cfg = {
IWL_DEVICE_100,
};
+const char iwl100_bg_name[] = "Intel(R) Centrino(R) Wireless-N 100 BG";
+
MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
index 6f3f26da0ad5..47554a5f406e 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2020, 2023 Intel Corporation
+ * Copyright(c) 2018 - 2020, 2023, 2025 Intel Corporation
*****************************************************************************/
#include <linux/module.h>
@@ -40,7 +40,7 @@
#define IWL135_FW_PRE "iwlwifi-135"
#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl2000_base_params = {
+static const struct iwl_family_base_params iwl2000_base = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
@@ -54,7 +54,7 @@ static const struct iwl_base_params iwl2000_base_params = {
};
-static const struct iwl_base_params iwl2030_base_params = {
+static const struct iwl_family_base_params iwl2030_base = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
@@ -67,12 +67,6 @@ static const struct iwl_base_params iwl2030_base_params = {
.scd_chain_ext_wa = true,
};
-static const struct iwl_ht_params iwl2000_ht_params = {
- .ht_greenfield_support = true,
- .use_rts_for_aggregation = true, /* use rts/cts protection */
- .ht40_bands = BIT(NL80211_BAND_2GHZ),
-};
-
static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
@@ -86,97 +80,119 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
.enhanced_txpower = true,
};
+const struct iwl_mac_cfg iwl2000_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_2000,
+ .base = &iwl2000_base,
+};
+
#define IWL_DEVICE_2000 \
.fw_name_pre = IWL2000_FW_PRE, \
.ucode_api_max = IWL2000_UCODE_API_MAX, \
.ucode_api_min = IWL2000_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_2000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_2000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
- .trans.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE
-const struct iwl_cfg iwl2000_2bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
+const struct iwl_rf_cfg iwl2000_2bgn_cfg = {
IWL_DEVICE_2000,
- .ht_params = &iwl2000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ),
+ },
};
-const struct iwl_cfg iwl2000_2bgn_d_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 2200D BGN",
- IWL_DEVICE_2000,
- .ht_params = &iwl2000_ht_params,
+const char iwl2000_2bgn_name[] = "Intel(R) Centrino(R) Wireless-N 2200 BGN";
+const char iwl2000_2bgn_d_name[] = "Intel(R) Centrino(R) Wireless-N 2200D BGN";
+
+const struct iwl_mac_cfg iwl2030_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_2030,
+ .base = &iwl2030_base,
};
#define IWL_DEVICE_2030 \
.fw_name_pre = IWL2030_FW_PRE, \
.ucode_api_max = IWL2030_UCODE_API_MAX, \
.ucode_api_min = IWL2030_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_2030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_2000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
- .trans.base_params = &iwl2030_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE
-const struct iwl_cfg iwl2030_2bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
+const struct iwl_rf_cfg iwl2030_2bgn_cfg = {
IWL_DEVICE_2030,
- .ht_params = &iwl2000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ),
+ },
+};
+
+const char iwl2030_2bgn_name[] = "Intel(R) Centrino(R) Wireless-N 2230 BGN";
+
+const struct iwl_mac_cfg iwl105_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_105,
+ .base = &iwl2000_base,
};
#define IWL_DEVICE_105 \
.fw_name_pre = IWL105_FW_PRE, \
.ucode_api_max = IWL105_UCODE_API_MAX, \
.ucode_api_min = IWL105_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_105, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_2000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
- .trans.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true
-const struct iwl_cfg iwl105_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
+const struct iwl_rf_cfg iwl105_bgn_cfg = {
IWL_DEVICE_105,
- .ht_params = &iwl2000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ),
+ },
};
-const struct iwl_cfg iwl105_bgn_d_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 105D BGN",
- IWL_DEVICE_105,
- .ht_params = &iwl2000_ht_params,
+const char iwl105_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 105 BGN";
+const char iwl105_bgn_d_name[] = "Intel(R) Centrino(R) Wireless-N 105D BGN";
+
+const struct iwl_mac_cfg iwl135_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_135,
+ .base = &iwl2030_base,
};
#define IWL_DEVICE_135 \
.fw_name_pre = IWL135_FW_PRE, \
.ucode_api_max = IWL135_UCODE_API_MAX, \
.ucode_api_min = IWL135_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_135, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_2000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
- .trans.base_params = &iwl2030_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true
-const struct iwl_cfg iwl135_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
+const struct iwl_rf_cfg iwl135_bgn_cfg = {
IWL_DEVICE_135,
- .ht_params = &iwl2000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ),
+ },
};
+const char iwl135_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 135 BGN";
+
MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 130b9a8aa7eb..52e0beebf9ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -15,14 +15,7 @@
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 77
-/* NVM versions */
-#define IWL_22000_NVM_VERSION 0x0a1d
-
/* Memory offsets and lengths */
-#define IWL_22000_DCCM_OFFSET 0x800000 /* LMAC1 */
-#define IWL_22000_DCCM_LEN 0x10000 /* LMAC1 */
-#define IWL_22000_DCCM2_OFFSET 0x880000
-#define IWL_22000_DCCM2_LEN 0x8000
#define IWL_22000_SMEM_OFFSET 0x400000
#define IWL_22000_SMEM_LEN 0xD0000
@@ -44,11 +37,12 @@
IWL_QU_C_HR_B_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
IWL_QU_B_JF_B_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_QU_C_JF_B_MODULE_FIRMWARE(api) \
+ IWL_QU_C_JF_B_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_CC_A_MODULE_FIRMWARE(api) \
IWL_CC_A_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl_22000_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
+static const struct iwl_family_base_params iwl_22000_base = {
.num_of_queues = 512,
.max_tfd_queue_size = 256,
.shadow_ram_support = true,
@@ -57,155 +51,78 @@ static const struct iwl_base_params iwl_22000_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
-};
-
-const struct iwl_ht_params iwl_22000_ht_params = {
- .stbc = true,
- .ldpc = true,
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) |
- BIT(NL80211_BAND_6GHZ),
-};
-
-#define IWL_DEVICE_22000_COMMON \
- .ucode_api_min = IWL_22000_UCODE_API_MIN, \
- .led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = 10, \
- .non_shared_ant = ANT_B, \
- .dccm_offset = IWL_22000_DCCM_OFFSET, \
- .dccm_len = IWL_22000_DCCM_LEN, \
- .dccm2_offset = IWL_22000_DCCM2_OFFSET, \
- .dccm2_len = IWL_22000_DCCM2_LEN, \
- .smem_offset = IWL_22000_SMEM_OFFSET, \
- .smem_len = IWL_22000_SMEM_LEN, \
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
- .apmg_not_supported = true, \
- .trans.mq_rx_supported = true, \
- .vht_mu_mimo_supported = true, \
- .mac_addr_from_csr = 0x380, \
- .ht_params = &iwl_22000_ht_params, \
- .nvm_ver = IWL_22000_NVM_VERSION, \
- .trans.rf_id = true, \
- .trans.gen2 = true, \
- .nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true, \
- .min_umac_error_event_table = 0x400000, \
- .d3_debug_data_base_addr = 0x401000, \
- .d3_debug_data_length = 60 * 1024, \
- .mon_smem_regs = { \
- .write_ptr = { \
- .addr = LDBG_M2S_BUF_WPTR, \
- .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = LDBG_M2S_BUF_WRAP_CNT, \
- .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
- }, \
- }
-
-#define IWL_DEVICE_22500 \
- IWL_DEVICE_22000_COMMON, \
- .ucode_api_max = IWL_22000_UCODE_API_MAX, \
- .trans.device_family = IWL_DEVICE_FAMILY_22000, \
- .trans.base_params = &iwl_22000_base_params, \
- .gp2_reg_addr = 0xa02c68, \
- .mon_dram_regs = { \
- .write_ptr = { \
- .addr = MON_BUFF_WRPTR_VER2, \
- .mask = 0xffffffff, \
- }, \
- .cycle_cnt = { \
- .addr = MON_BUFF_CYCLE_CNT_VER2, \
- .mask = 0xffffffff, \
- }, \
- }
-
-const struct iwl_cfg_trans_params iwl_qu_trans_cfg = {
+ .smem_offset = IWL_22000_SMEM_OFFSET,
+ .smem_len = IWL_22000_SMEM_LEN,
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
+ .apmg_not_supported = true,
+ .mac_addr_from_csr = 0x380,
+ .min_umac_error_event_table = 0x400000,
+ .d3_debug_data_base_addr = 0x401000,
+ .d3_debug_data_length = 60 * 1024,
+ .mon_smem_regs = {
+ .write_ptr = {
+ .addr = LDBG_M2S_BUF_WPTR,
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK,
+ },
+ .cycle_cnt = {
+ .addr = LDBG_M2S_BUF_WRAP_CNT,
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK,
+ },
+ },
+ .gp2_reg_addr = 0xa02c68,
+ .mon_dram_regs = {
+ .write_ptr = {
+ .addr = MON_BUFF_WRPTR_VER2,
+ .mask = 0xffffffff,
+ },
+ .cycle_cnt = {
+ .addr = MON_BUFF_CYCLE_CNT_VER2,
+ .mask = 0xffffffff,
+ },
+ },
+ .ucode_api_min = IWL_22000_UCODE_API_MIN,
+ .ucode_api_max = IWL_22000_UCODE_API_MAX,
+};
+
+const struct iwl_mac_cfg iwl_qu_mac_cfg = {
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_22000,
- .base_params = &iwl_22000_base_params,
+ .base = &iwl_22000_base,
.integrated = true,
.xtal_latency = 500,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US,
};
-const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg = {
+const struct iwl_mac_cfg iwl_qu_medium_latency_mac_cfg = {
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_22000,
- .base_params = &iwl_22000_base_params,
+ .base = &iwl_22000_base,
.integrated = true,
.xtal_latency = 1820,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_1820US,
};
-const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
+const struct iwl_mac_cfg iwl_qu_long_latency_mac_cfg = {
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_22000,
- .base_params = &iwl_22000_base_params,
+ .base = &iwl_22000_base,
.integrated = true,
.xtal_latency = 12000,
.low_latency_xtal = true,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-/*
- * If the device doesn't support HE, no need to have that many buffers.
- * 22000 devices can split multiple frames into a single RB, so fewer are
- * needed; AX210 cannot (but use smaller RBs by default) - these sizes
- * were picked according to 8 MSDUs inside 256 A-MSDUs in an A-MPDU, with
- * additional overhead to account for processing time.
- */
-#define IWL_NUM_RBDS_NON_HE 512
-#define IWL_NUM_RBDS_22000_HE 2048
-
-/*
- * All JF radio modules are part of the 9000 series, but the MAC part
- * looks more like 22000. That's why this device is here, but called
- * 9560 nevertheless.
- */
-const struct iwl_cfg iwl9560_qu_b0_jf_b0_cfg = {
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg = {
- .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg = {
- .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg_trans_params iwl_ax200_trans_cfg = {
+const struct iwl_mac_cfg iwl_ax200_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_22000,
- .base_params = &iwl_22000_base_params,
+ .base = &iwl_22000_base,
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.bisr_workaround = 1,
};
-const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
-const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
-const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
-const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
-
const char iwl_ax200_killer_1650w_name[] =
"Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)";
const char iwl_ax200_killer_1650x_name[] =
@@ -215,213 +132,10 @@ const char iwl_ax201_killer_1650s_name[] =
const char iwl_ax201_killer_1650i_name[] =
"Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)";
-const struct iwl_cfg iwl_qu_b0_hr1_b0 = {
- .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .tx_with_siso_diversity = true,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_qu_b0_hr_b0 = {
- .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
- .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
- .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_qu_c0_hr1_b0 = {
- .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .tx_with_siso_diversity = true,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_qu_c0_hr_b0 = {
- .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
- .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
- .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_quz_a0_hr1_b0 = {
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .tx_with_siso_diversity = true,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_ax201_cfg_quz_hr = {
- .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = {
- .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
- .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_ax200_cfg_cc = {
- .fw_name_pre = IWL_CC_A_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
- .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)",
- .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
- .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)",
- .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
- .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)",
- .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
- .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)",
- .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = {
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QU_C_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index de7ede59a994..aaae3b1f5433 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2020, 2023 Intel Corporation
+ * Copyright(c) 2018 - 2020, 2023, 2025 Intel Corporation
*****************************************************************************/
#include <linux/module.h>
@@ -30,7 +30,7 @@
#define IWL5150_FW_PRE "iwlwifi-5150"
#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl5000_base_params = {
+static const struct iwl_family_base_params iwl5000_base = {
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
@@ -41,11 +41,6 @@ static const struct iwl_base_params iwl5000_base_params = {
.scd_chain_ext_wa = true,
};
-static const struct iwl_ht_params iwl5000_ht_params = {
- .ht_greenfield_support = true,
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
-};
-
static const struct iwl_eeprom_params iwl5000_eeprom_params = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
@@ -58,93 +53,107 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
},
};
+const struct iwl_mac_cfg iwl5000_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_5000,
+ .base = &iwl5000_base,
+};
+
#define IWL_DEVICE_5000 \
.fw_name_pre = IWL5000_FW_PRE, \
.ucode_api_max = IWL5000_UCODE_API_MAX, \
.ucode_api_min = IWL5000_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_5000, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
.max_data_size = IWLAGN_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_5000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
- .trans.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK
-const struct iwl_cfg iwl5300_agn_cfg = {
- .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
+const struct iwl_rf_cfg iwl5300_agn_cfg = {
IWL_DEVICE_5000,
/* at least EEPROM 0x11A has wrong info */
.valid_tx_ant = ANT_ABC, /* .cfg overwrite */
.valid_rx_ant = ANT_ABC, /* .cfg overwrite */
- .ht_params = &iwl5000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl5100_bgn_cfg = {
- .name = "Intel(R) WiFi Link 5100 BGN",
- IWL_DEVICE_5000,
- .valid_tx_ant = ANT_B, /* .cfg overwrite */
- .valid_rx_ant = ANT_AB, /* .cfg overwrite */
- .ht_params = &iwl5000_ht_params,
-};
+const char iwl5300_agn_name[] = "Intel(R) Ultimate N WiFi Link 5300 AGN";
-const struct iwl_cfg iwl5100_abg_cfg = {
- .name = "Intel(R) WiFi Link 5100 ABG",
+const struct iwl_rf_cfg iwl5100_n_cfg = {
IWL_DEVICE_5000,
.valid_tx_ant = ANT_B, /* .cfg overwrite */
.valid_rx_ant = ANT_AB, /* .cfg overwrite */
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl5100_agn_cfg = {
- .name = "Intel(R) WiFi Link 5100 AGN",
+const char iwl5100_bgn_name[] = "Intel(R) WiFi Link 5100 BGN";
+
+const struct iwl_rf_cfg iwl5100_abg_cfg = {
IWL_DEVICE_5000,
.valid_tx_ant = ANT_B, /* .cfg overwrite */
.valid_rx_ant = ANT_AB, /* .cfg overwrite */
- .ht_params = &iwl5000_ht_params,
};
-const struct iwl_cfg iwl5350_agn_cfg = {
- .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
+const char iwl5100_abg_name[] = "Intel(R) WiFi Link 5100 ABG";
+const char iwl5100_agn_name[] = "Intel(R) WiFi Link 5100 AGN";
+
+const struct iwl_rf_cfg iwl5350_agn_cfg = {
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
- .trans.device_family = IWL_DEVICE_FAMILY_5000,
.max_inst_size = IWLAGN_RTC_INST_SIZE,
.max_data_size = IWLAGN_RTC_DATA_SIZE,
.nvm_ver = EEPROM_5050_EEPROM_VERSION,
.nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION,
- .trans.base_params = &iwl5000_base_params,
.eeprom_params = &iwl5000_eeprom_params,
- .ht_params = &iwl5000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.led_mode = IWL_LED_BLINK,
.internal_wimax_coex = true,
};
+const char iwl5350_agn_name[] = "Intel(R) WiMAX/WiFi Link 5350 AGN";
+
+const struct iwl_mac_cfg iwl5150_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_5150,
+ .base = &iwl5000_base,
+};
+
#define IWL_DEVICE_5150 \
.fw_name_pre = IWL5150_FW_PRE, \
.ucode_api_max = IWL5150_UCODE_API_MAX, \
.ucode_api_min = IWL5150_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_5150, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
.max_data_size = IWLAGN_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_5050_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
- .trans.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
-const struct iwl_cfg iwl5150_agn_cfg = {
- .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
+const struct iwl_rf_cfg iwl5150_agn_cfg = {
IWL_DEVICE_5150,
- .ht_params = &iwl5000_ht_params,
-
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl5150_abg_cfg = {
- .name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
+const char iwl5150_agn_name[] = "Intel(R) WiMAX/WiFi Link 5150 AGN";
+
+const struct iwl_rf_cfg iwl5150_abg_cfg = {
IWL_DEVICE_5150,
};
+const char iwl5150_abg_name[] = "Intel(R) WiMAX/WiFi Link 5150 ABG";
+
MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
index f013cf420569..ab13394896e0 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2020, 2023 Intel Corporation
+ * Copyright(c) 2018 - 2020, 2023, 2025 Intel Corporation
*****************************************************************************/
#include <linux/module.h>
@@ -49,7 +49,7 @@
#define IWL6030_FW_PRE "iwlwifi-6000g2b"
#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl6000_base_params = {
+static const struct iwl_family_base_params iwl6000_base = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
@@ -62,7 +62,7 @@ static const struct iwl_base_params iwl6000_base_params = {
.scd_chain_ext_wa = true,
};
-static const struct iwl_base_params iwl6050_base_params = {
+static const struct iwl_family_base_params iwl6050_base = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
@@ -75,7 +75,7 @@ static const struct iwl_base_params iwl6050_base_params = {
.scd_chain_ext_wa = true,
};
-static const struct iwl_base_params iwl6000_g2_base_params = {
+static const struct iwl_family_base_params iwl6000_g2_base = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
@@ -88,12 +88,6 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
.scd_chain_ext_wa = true,
};
-static const struct iwl_ht_params iwl6000_ht_params = {
- .ht_greenfield_support = true,
- .use_rts_for_aggregation = true, /* use rts/cts protection */
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
-};
-
static const struct iwl_eeprom_params iwl6000_eeprom_params = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
@@ -107,141 +101,126 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
.enhanced_txpower = true,
};
+const struct iwl_mac_cfg iwl6005_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_6005,
+ .base = &iwl6000_g2_base,
+};
+
#define IWL_DEVICE_6005 \
.fw_name_pre = IWL6005_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_6005, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_6005_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
- .trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE
-const struct iwl_cfg iwl6005_2agn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
- IWL_DEVICE_6005,
- .ht_params = &iwl6000_ht_params,
-};
-
-const struct iwl_cfg iwl6005_2abg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6205 ABG",
- IWL_DEVICE_6005,
-};
-
-const struct iwl_cfg iwl6005_2bg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6205 BG",
+const struct iwl_rf_cfg iwl6005_n_cfg = {
IWL_DEVICE_6005,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl6005_2agn_sff_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
- IWL_DEVICE_6005,
- .ht_params = &iwl6000_ht_params,
-};
+const char iwl6005_2agn_name[] = "Intel(R) Centrino(R) Advanced-N 6205 AGN";
+const char iwl6005_2agn_sff_name[] = "Intel(R) Centrino(R) Advanced-N 6205S AGN";
+const char iwl6005_2agn_d_name[] = "Intel(R) Centrino(R) Advanced-N 6205D AGN";
+const char iwl6005_2agn_mow1_name[] = "Intel(R) Centrino(R) Advanced-N 6206 AGN";
+const char iwl6005_2agn_mow2_name[] = "Intel(R) Centrino(R) Advanced-N 6207 AGN";
-const struct iwl_cfg iwl6005_2agn_d_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6205D AGN",
+const struct iwl_rf_cfg iwl6005_non_n_cfg = {
IWL_DEVICE_6005,
- .ht_params = &iwl6000_ht_params,
};
-const struct iwl_cfg iwl6005_2agn_mow1_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6206 AGN",
- IWL_DEVICE_6005,
- .ht_params = &iwl6000_ht_params,
-};
+const char iwl6005_2abg_name[] = "Intel(R) Centrino(R) Advanced-N 6205 ABG";
+const char iwl6005_2bg_name[] = "Intel(R) Centrino(R) Advanced-N 6205 BG";
-const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6207 AGN",
- IWL_DEVICE_6005,
- .ht_params = &iwl6000_ht_params,
+const struct iwl_mac_cfg iwl6030_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_6030,
+ .base = &iwl6000_g2_base,
};
#define IWL_DEVICE_6030 \
.fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_6030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_6030_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
- .trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE
-const struct iwl_cfg iwl6030_2agn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
+const struct iwl_rf_cfg iwl6030_n_cfg = {
IWL_DEVICE_6030,
- .ht_params = &iwl6000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl6030_2abg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6230 ABG",
- IWL_DEVICE_6030,
-};
+const char iwl6030_2agn_name[] = "Intel(R) Centrino(R) Advanced-N 6230 AGN";
+const char iwl6030_2bgn_name[] = "Intel(R) Centrino(R) Advanced-N 6230 BGN";
+const char iwl1030_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 1030 BGN";
+const char iwl1030_bg_name[] = "Intel(R) Centrino(R) Wireless-N 1030 BG";
-const struct iwl_cfg iwl6030_2bgn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6230 BGN",
+const struct iwl_rf_cfg iwl6030_non_n_cfg = {
IWL_DEVICE_6030,
- .ht_params = &iwl6000_ht_params,
};
-const struct iwl_cfg iwl6030_2bg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6230 BG",
- IWL_DEVICE_6030,
-};
+const char iwl6030_2abg_name[] = "Intel(R) Centrino(R) Advanced-N 6230 ABG";
+const char iwl6030_2bg_name[] = "Intel(R) Centrino(R) Advanced-N 6230 BG";
#define IWL_DEVICE_6035 \
.fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6035_UCODE_API_MAX, \
.ucode_api_min = IWL6035_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_6030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_6030_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
- .trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE
-const struct iwl_cfg iwl6035_2agn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
+const struct iwl_rf_cfg iwl6035_2agn_cfg = {
IWL_DEVICE_6035,
- .ht_params = &iwl6000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl6035_2agn_sff_cfg = {
- .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
- IWL_DEVICE_6035,
- .ht_params = &iwl6000_ht_params,
-};
+const char iwl6035_2agn_name[] = "Intel(R) Centrino(R) Advanced-N 6235 AGN";
+const char iwl6035_2agn_sff_name[] = "Intel(R) Centrino(R) Ultimate-N 6235 AGN";
-const struct iwl_cfg iwl1030_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
+const struct iwl_rf_cfg iwl130_bgn_cfg = {
IWL_DEVICE_6030,
- .ht_params = &iwl6000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
+ .rx_with_siso_diversity = true,
};
-const struct iwl_cfg iwl1030_bg_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 1030 BG",
- IWL_DEVICE_6030,
-};
+const char iwl130_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 130 BGN";
-const struct iwl_cfg iwl130_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 130 BGN",
+const struct iwl_rf_cfg iwl130_bg_cfg = {
IWL_DEVICE_6030,
- .ht_params = &iwl6000_ht_params,
.rx_with_siso_diversity = true,
};
-const struct iwl_cfg iwl130_bg_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 130 BG",
- IWL_DEVICE_6030,
- .rx_with_siso_diversity = true,
+const char iwl130_bg_name[] = "Intel(R) Centrino(R) Wireless-N 130 BG";
+
+const struct iwl_mac_cfg iwl6000i_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_6000i,
+ .base = &iwl6000_base,
};
/*
@@ -251,101 +230,127 @@ const struct iwl_cfg iwl130_bg_cfg = {
.fw_name_pre = IWL6000_FW_PRE, \
.ucode_api_max = IWL6000_UCODE_API_MAX, \
.ucode_api_min = IWL6000_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_6000i, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
.valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
.nvm_ver = EEPROM_6000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
- .trans.base_params = &iwl6000_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK
-const struct iwl_cfg iwl6000i_2agn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
+const struct iwl_rf_cfg iwl6000i_2agn_cfg = {
IWL_DEVICE_6000i,
- .ht_params = &iwl6000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl6000i_2abg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
+const char iwl6000i_2agn_name[] = "Intel(R) Centrino(R) Advanced-N 6200 AGN";
+
+const struct iwl_rf_cfg iwl6000i_non_n_cfg = {
IWL_DEVICE_6000i,
};
-const struct iwl_cfg iwl6000i_2bg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
- IWL_DEVICE_6000i,
+const char iwl6000i_2abg_name[] = "Intel(R) Centrino(R) Advanced-N 6200 ABG";
+const char iwl6000i_2bg_name[] = "Intel(R) Centrino(R) Advanced-N 6200 BG";
+
+const struct iwl_mac_cfg iwl6050_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_6050,
+ .base = &iwl6050_base,
};
#define IWL_DEVICE_6050 \
.fw_name_pre = IWL6050_FW_PRE, \
.ucode_api_max = IWL6050_UCODE_API_MAX, \
.ucode_api_min = IWL6050_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_6050, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
.valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
.nvm_ver = EEPROM_6050_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
- .trans.base_params = &iwl6050_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
-const struct iwl_cfg iwl6050_2agn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
+const struct iwl_rf_cfg iwl6050_2agn_cfg = {
IWL_DEVICE_6050,
- .ht_params = &iwl6000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl6050_2abg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
+const char iwl6050_2agn_name[] = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN";
+
+const struct iwl_rf_cfg iwl6050_2abg_cfg = {
IWL_DEVICE_6050,
};
+const char iwl6050_2abg_name[] = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG";
+
+const struct iwl_mac_cfg iwl6150_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_6150,
+ .base = &iwl6050_base,
+};
+
#define IWL_DEVICE_6150 \
.fw_name_pre = IWL6050_FW_PRE, \
.ucode_api_max = IWL6050_UCODE_API_MAX, \
.ucode_api_min = IWL6050_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_6150, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_6150_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
- .trans.base_params = &iwl6050_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
-const struct iwl_cfg iwl6150_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
+const struct iwl_rf_cfg iwl6150_bgn_cfg = {
IWL_DEVICE_6150,
- .ht_params = &iwl6000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl6150_bg_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
+const char iwl6150_bgn_name[] = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN";
+
+const struct iwl_rf_cfg iwl6150_bg_cfg = {
IWL_DEVICE_6150,
};
-const struct iwl_cfg iwl6000_3agn_cfg = {
- .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
+const char iwl6150_bg_name[] = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG";
+
+const struct iwl_mac_cfg iwl6000_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_6000,
+ .base = &iwl6000_base,
+};
+
+const struct iwl_rf_cfg iwl6000_3agn_cfg = {
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
- .trans.device_family = IWL_DEVICE_FAMILY_6000,
.max_inst_size = IWL60_RTC_INST_SIZE,
.max_data_size = IWL60_RTC_DATA_SIZE,
.nvm_ver = EEPROM_6000_EEPROM_VERSION,
.nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION,
- .trans.base_params = &iwl6000_base_params,
.eeprom_params = &iwl6000_eeprom_params,
- .ht_params = &iwl6000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.led_mode = IWL_LED_BLINK,
};
+const char iwl6000_3agn_name[] = "Intel(R) Centrino(R) Ultimate-N 6300 AGN";
+
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index 4e2afdedf4c6..f987ad3192c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020, 2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2020, 2023, 2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@@ -49,7 +49,7 @@
#define IWL7265D_FW_PRE "iwlwifi-7265D"
#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl7000_base_params = {
+static const struct iwl_family_base_params iwl7000_base = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_16K,
.num_of_queues = 31,
.max_tfd_queue_size = 256,
@@ -60,6 +60,7 @@ static const struct iwl_base_params iwl7000_base_params = {
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
.apmg_wake_up_wa = true,
+ .nvm_hw_section_num = 0,
};
static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
@@ -84,16 +85,13 @@ static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
.support_tx_backoff = true,
};
-static const struct iwl_ht_params iwl7000_ht_params = {
- .stbc = true,
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+const struct iwl_mac_cfg iwl7000_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_7000,
+ .base = &iwl7000_base,
};
#define IWL_DEVICE_7000_COMMON \
- .trans.device_family = IWL_DEVICE_FAMILY_7000, \
- .trans.base_params = &iwl7000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = 0, \
.non_shared_ant = ANT_A, \
.dccm_offset = IWL7000_DCCM_OFFSET
@@ -117,77 +115,52 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.ucode_api_max = IWL7265D_UCODE_API_MAX, \
.ucode_api_min = IWL7265D_UCODE_API_MIN
-const struct iwl_cfg iwl7260_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 7260",
+const char iwl7260_2ac_name[] = "Intel(R) Dual Band Wireless AC 7260";
+const char iwl7260_2n_name[] = "Intel(R) Dual Band Wireless N 7260";
+const char iwl7260_n_name[] = "Intel(R) Wireless N 7260";
+const char iwl3160_2ac_name[] = "Intel(R) Dual Band Wireless AC 3160";
+const char iwl3160_2n_name[] = "Intel(R) Dual Band Wireless N 3160";
+const char iwl3160_n_name[] = "Intel(R) Wireless N 3160";
+const char iwl3165_2ac_name[] = "Intel(R) Dual Band Wireless-AC 3165";
+const char iwl3168_2ac_name[] = "Intel(R) Dual Band Wireless-AC 3168";
+const char iwl7265_2ac_name[] = "Intel(R) Dual Band Wireless-AC 7265";
+const char iwl7265_2n_name[] = "Intel(R) Dual Band Wireless-N 7265";
+const char iwl7265_n_name[] = "Intel(R) Wireless-N 7265";
+
+const struct iwl_rf_cfg iwl7260_cfg = {
.fw_name_pre = IWL7260_FW_PRE,
IWL_DEVICE_7000,
- .ht_params = &iwl7000_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL7260_NVM_VERSION,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
.dccm_len = IWL7260_DCCM_LEN,
};
-const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
- .name = "Intel(R) Dual Band Wireless AC 7260",
+const struct iwl_rf_cfg iwl7260_high_temp_cfg = {
.fw_name_pre = IWL7260_FW_PRE,
IWL_DEVICE_7000,
- .ht_params = &iwl7000_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL7260_NVM_VERSION,
- .high_temp = true,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
.dccm_len = IWL7260_DCCM_LEN,
.thermal_params = &iwl7000_high_temp_tt_params,
};
-const struct iwl_cfg iwl7260_2n_cfg = {
- .name = "Intel(R) Dual Band Wireless N 7260",
- .fw_name_pre = IWL7260_FW_PRE,
- IWL_DEVICE_7000,
- .ht_params = &iwl7000_ht_params,
- .nvm_ver = IWL7260_NVM_VERSION,
- .host_interrupt_operation_mode = true,
- .lp_xtal_workaround = true,
- .dccm_len = IWL7260_DCCM_LEN,
-};
-
-const struct iwl_cfg iwl7260_n_cfg = {
- .name = "Intel(R) Wireless N 7260",
- .fw_name_pre = IWL7260_FW_PRE,
- IWL_DEVICE_7000,
- .ht_params = &iwl7000_ht_params,
- .nvm_ver = IWL7260_NVM_VERSION,
- .host_interrupt_operation_mode = true,
- .lp_xtal_workaround = true,
- .dccm_len = IWL7260_DCCM_LEN,
-};
-
-const struct iwl_cfg iwl3160_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 3160",
+const struct iwl_rf_cfg iwl3160_cfg = {
.fw_name_pre = IWL3160_FW_PRE,
IWL_DEVICE_7000,
- .ht_params = &iwl7000_ht_params,
- .nvm_ver = IWL3160_NVM_VERSION,
- .host_interrupt_operation_mode = true,
- .dccm_len = IWL3160_DCCM_LEN,
-};
-
-const struct iwl_cfg iwl3160_2n_cfg = {
- .name = "Intel(R) Dual Band Wireless N 3160",
- .fw_name_pre = IWL3160_FW_PRE,
- IWL_DEVICE_7000,
- .ht_params = &iwl7000_ht_params,
- .nvm_ver = IWL3160_NVM_VERSION,
- .host_interrupt_operation_mode = true,
- .dccm_len = IWL3160_DCCM_LEN,
-};
-
-const struct iwl_cfg iwl3160_n_cfg = {
- .name = "Intel(R) Wireless N 3160",
- .fw_name_pre = IWL3160_FW_PRE,
- IWL_DEVICE_7000,
- .ht_params = &iwl7000_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL3160_NVM_VERSION,
.host_interrupt_operation_mode = true,
.dccm_len = IWL3160_DCCM_LEN,
@@ -204,88 +177,52 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
{0},
};
-static const struct iwl_ht_params iwl7265_ht_params = {
- .stbc = true,
- .ldpc = true,
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
-};
-
-const struct iwl_cfg iwl3165_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 3165",
+const struct iwl_rf_cfg iwl3165_2ac_cfg = {
.fw_name_pre = IWL7265D_FW_PRE,
IWL_DEVICE_7005D,
- .ht_params = &iwl7000_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL3165_NVM_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
};
-const struct iwl_cfg iwl3168_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 3168",
+const struct iwl_rf_cfg iwl3168_2ac_cfg = {
.fw_name_pre = IWL3168_FW_PRE,
IWL_DEVICE_3008,
- .ht_params = &iwl7000_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL3168_NVM_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
.nvm_type = IWL_NVM_SDP,
};
-const struct iwl_cfg iwl7265_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 7265",
- .fw_name_pre = IWL7265_FW_PRE,
- IWL_DEVICE_7005,
- .ht_params = &iwl7265_ht_params,
- .nvm_ver = IWL7265_NVM_VERSION,
- .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
- .dccm_len = IWL7265_DCCM_LEN,
-};
-
-const struct iwl_cfg iwl7265_2n_cfg = {
- .name = "Intel(R) Dual Band Wireless N 7265",
- .fw_name_pre = IWL7265_FW_PRE,
- IWL_DEVICE_7005,
- .ht_params = &iwl7265_ht_params,
- .nvm_ver = IWL7265_NVM_VERSION,
- .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
- .dccm_len = IWL7265_DCCM_LEN,
-};
-
-const struct iwl_cfg iwl7265_n_cfg = {
- .name = "Intel(R) Wireless N 7265",
+const struct iwl_rf_cfg iwl7265_cfg = {
.fw_name_pre = IWL7265_FW_PRE,
IWL_DEVICE_7005,
- .ht_params = &iwl7265_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ldpc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL7265_NVM_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
};
-const struct iwl_cfg iwl7265d_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 7265",
- .fw_name_pre = IWL7265D_FW_PRE,
- IWL_DEVICE_7005D,
- .ht_params = &iwl7265_ht_params,
- .nvm_ver = IWL7265D_NVM_VERSION,
- .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
- .dccm_len = IWL7265_DCCM_LEN,
-};
-
-const struct iwl_cfg iwl7265d_2n_cfg = {
- .name = "Intel(R) Dual Band Wireless N 7265",
+const struct iwl_rf_cfg iwl7265d_cfg = {
.fw_name_pre = IWL7265D_FW_PRE,
IWL_DEVICE_7005D,
- .ht_params = &iwl7265_ht_params,
- .nvm_ver = IWL7265D_NVM_VERSION,
- .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
- .dccm_len = IWL7265_DCCM_LEN,
-};
-
-const struct iwl_cfg iwl7265d_n_cfg = {
- .name = "Intel(R) Wireless N 7265",
- .fw_name_pre = IWL7265D_FW_PRE,
- IWL_DEVICE_7005D,
- .ht_params = &iwl7265_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ldpc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL7265D_NVM_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index d09cf8d7dc01..b56574006ee0 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2014, 2018-2020, 2023 Intel Corporation
+ * Copyright (C) 2014, 2018-2020, 2023, 2025 Intel Corporation
* Copyright (C) 2014-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -35,9 +35,7 @@
#define IWL8265_MODULE_FIRMWARE(api) \
IWL8265_FW_PRE "-" __stringify(api) ".ucode"
-#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
-
-static const struct iwl_base_params iwl8000_base_params = {
+static const struct iwl_family_base_params iwl8000_base = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
.num_of_queues = 31,
.max_tfd_queue_size = 256,
@@ -47,12 +45,12 @@ static const struct iwl_base_params iwl8000_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
-};
-
-static const struct iwl_ht_params iwl8000_ht_params = {
- .stbc = true,
- .ldpc = true,
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ .nvm_hw_section_num = 10,
+ .features = NETIF_F_RXCSUM,
+ .smem_offset = IWL8260_SMEM_OFFSET,
+ .smem_len = IWL8260_SMEM_LEN,
+ .apmg_not_supported = true,
+ .min_umac_error_event_table = 0x800000,
};
static const struct iwl_tt_params iwl8000_tt_params = {
@@ -76,30 +74,20 @@ static const struct iwl_tt_params iwl8000_tt_params = {
.support_tx_backoff = true,
};
+const struct iwl_mac_cfg iwl8000_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_8000,
+ .base = &iwl8000_base,
+};
+
#define IWL_DEVICE_8000_COMMON \
- .trans.device_family = IWL_DEVICE_FAMILY_8000, \
- .trans.base_params = &iwl8000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = 10, \
- .features = NETIF_F_RXCSUM, \
.non_shared_ant = ANT_A, \
.dccm_offset = IWL8260_DCCM_OFFSET, \
.dccm_len = IWL8260_DCCM_LEN, \
.dccm2_offset = IWL8260_DCCM2_OFFSET, \
.dccm2_len = IWL8260_DCCM2_LEN, \
- .smem_offset = IWL8260_SMEM_OFFSET, \
- .smem_len = IWL8260_SMEM_LEN, \
- .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \
.thermal_params = &iwl8000_tt_params, \
- .apmg_not_supported = true, \
- .nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true, \
- .min_umac_error_event_table = 0x800000
-
-#define IWL_DEVICE_8000 \
- IWL_DEVICE_8000_COMMON, \
- .ucode_api_max = IWL8000_UCODE_API_MAX, \
- .ucode_api_min = IWL8000_UCODE_API_MIN \
+ .nvm_type = IWL_NVM_EXT
#define IWL_DEVICE_8260 \
IWL_DEVICE_8000_COMMON, \
@@ -111,47 +99,39 @@ static const struct iwl_tt_params iwl8000_tt_params = {
.ucode_api_max = IWL8265_UCODE_API_MAX, \
.ucode_api_min = IWL8265_UCODE_API_MIN \
-const struct iwl_cfg iwl8260_2n_cfg = {
- .name = "Intel(R) Dual Band Wireless N 8260",
- .fw_name_pre = IWL8000_FW_PRE,
- IWL_DEVICE_8260,
- .ht_params = &iwl8000_ht_params,
- .nvm_ver = IWL8000_NVM_VERSION,
-};
+const char iwl8260_2n_name[] = "Intel(R) Dual Band Wireless-N 8260";
+const char iwl8260_2ac_name[] = "Intel(R) Dual Band Wireless-AC 8260";
+const char iwl8265_2ac_name[] = "Intel(R) Dual Band Wireless-AC 8265";
+const char iwl8275_2ac_name[] = "Intel(R) Dual Band Wireless-AC 8275";
+const char iwl4165_2ac_name[] = "Intel(R) Dual Band Wireless-AC 4165";
-const struct iwl_cfg iwl8260_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 8260",
+const char iwl_killer_1435i_name[] =
+ "Killer(R) Wireless-AC 1435i Wireless Network Adapter (8265D2W)";
+const char iwl_killer_1434_kix_name[] =
+ "Killer(R) Wireless-AC 1435-KIX Wireless Network Adapter (8265NGW)";
+
+const struct iwl_rf_cfg iwl8260_cfg = {
.fw_name_pre = IWL8000_FW_PRE,
IWL_DEVICE_8260,
- .ht_params = &iwl8000_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ldpc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL8000_NVM_VERSION,
};
-const struct iwl_cfg iwl8265_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 8265",
+const struct iwl_rf_cfg iwl8265_cfg = {
.fw_name_pre = IWL8265_FW_PRE,
IWL_DEVICE_8265,
- .ht_params = &iwl8000_ht_params,
- .nvm_ver = IWL8000_NVM_VERSION,
- .vht_mu_mimo_supported = true,
-};
-
-const struct iwl_cfg iwl8275_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 8275",
- .fw_name_pre = IWL8265_FW_PRE,
- IWL_DEVICE_8265,
- .ht_params = &iwl8000_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ldpc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL8000_NVM_VERSION,
.vht_mu_mimo_supported = true,
};
-const struct iwl_cfg iwl4165_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 4165",
- .fw_name_pre = IWL8000_FW_PRE,
- IWL_DEVICE_8000,
- .ht_params = &iwl8000_ht_params,
- .nvm_ver = IWL8000_NVM_VERSION,
-};
-
MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index 0130d9a9b78b..ac1fa291cf2f 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2018-2021, 2023, 2025 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -15,14 +15,7 @@
/* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 30
-/* NVM versions */
-#define IWL9000_NVM_VERSION 0x0a1d
-
/* Memory offsets and lengths */
-#define IWL9000_DCCM_OFFSET 0x800000
-#define IWL9000_DCCM_LEN 0x18000
-#define IWL9000_DCCM2_OFFSET 0x880000
-#define IWL9000_DCCM2_LEN 0x8000
#define IWL9000_SMEM_OFFSET 0x400000
#define IWL9000_SMEM_LEN 0x68000
@@ -33,7 +26,7 @@
#define IWL9260_MODULE_FIRMWARE(api) \
IWL9260_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl9000_base_params = {
+static const struct iwl_family_base_params iwl9000_base = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
.num_of_queues = 31,
.max_tfd_queue_size = 256,
@@ -43,150 +36,69 @@ static const struct iwl_base_params iwl9000_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
-};
-
-static const struct iwl_ht_params iwl9000_ht_params = {
- .stbc = true,
- .ldpc = true,
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
-};
-
-static const struct iwl_tt_params iwl9000_tt_params = {
- .ct_kill_entry = 115,
- .ct_kill_exit = 93,
- .ct_kill_duration = 5,
- .dynamic_smps_entry = 111,
- .dynamic_smps_exit = 107,
- .tx_protection_entry = 112,
- .tx_protection_exit = 105,
- .tx_backoff = {
- {.temperature = 110, .backoff = 200},
- {.temperature = 111, .backoff = 600},
- {.temperature = 112, .backoff = 1200},
- {.temperature = 113, .backoff = 2000},
- {.temperature = 114, .backoff = 4000},
+ .smem_offset = IWL9000_SMEM_OFFSET,
+ .smem_len = IWL9000_SMEM_LEN,
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
+ .apmg_not_supported = true,
+ .mac_addr_from_csr = 0x380,
+ .min_umac_error_event_table = 0x800000,
+ .d3_debug_data_base_addr = 0x401000,
+ .d3_debug_data_length = 92 * 1024,
+ .nvm_hw_section_num = 10,
+ .mon_smem_regs = {
+ .write_ptr = {
+ .addr = LDBG_M2S_BUF_WPTR,
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK,
+ },
+ .cycle_cnt = {
+ .addr = LDBG_M2S_BUF_WRAP_CNT,
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK,
+ },
+ },
+ .mon_dram_regs = {
+ .write_ptr = {
+ .addr = MON_BUFF_WRPTR_VER2,
+ .mask = 0xffffffff,
+ },
+ .cycle_cnt = {
+ .addr = MON_BUFF_CYCLE_CNT_VER2,
+ .mask = 0xffffffff,
+ },
},
- .support_ct_kill = true,
- .support_dynamic_smps = true,
- .support_tx_protection = true,
- .support_tx_backoff = true,
+ .ucode_api_max = IWL9000_UCODE_API_MAX,
+ .ucode_api_min = IWL9000_UCODE_API_MIN,
};
-#define IWL_DEVICE_9000 \
- .ucode_api_max = IWL9000_UCODE_API_MAX, \
- .ucode_api_min = IWL9000_UCODE_API_MIN, \
- .led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = 10, \
- .non_shared_ant = ANT_B, \
- .dccm_offset = IWL9000_DCCM_OFFSET, \
- .dccm_len = IWL9000_DCCM_LEN, \
- .dccm2_offset = IWL9000_DCCM2_OFFSET, \
- .dccm2_len = IWL9000_DCCM2_LEN, \
- .smem_offset = IWL9000_SMEM_OFFSET, \
- .smem_len = IWL9000_SMEM_LEN, \
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
- .thermal_params = &iwl9000_tt_params, \
- .apmg_not_supported = true, \
- .num_rbds = 512, \
- .vht_mu_mimo_supported = true, \
- .mac_addr_from_csr = 0x380, \
- .nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true, \
- .min_umac_error_event_table = 0x800000, \
- .d3_debug_data_base_addr = 0x401000, \
- .d3_debug_data_length = 92 * 1024, \
- .ht_params = &iwl9000_ht_params, \
- .nvm_ver = IWL9000_NVM_VERSION, \
- .mon_smem_regs = { \
- .write_ptr = { \
- .addr = LDBG_M2S_BUF_WPTR, \
- .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = LDBG_M2S_BUF_WRAP_CNT, \
- .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
- }, \
- }, \
- .mon_dram_regs = { \
- .write_ptr = { \
- .addr = MON_BUFF_WRPTR_VER2, \
- .mask = 0xffffffff, \
- }, \
- .cycle_cnt = { \
- .addr = MON_BUFF_CYCLE_CNT_VER2, \
- .mask = 0xffffffff, \
- }, \
- }
-
-const struct iwl_cfg_trans_params iwl9000_trans_cfg = {
+const struct iwl_mac_cfg iwl9000_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_9000,
- .base_params = &iwl9000_base_params,
+ .base = &iwl9000_base,
.mq_rx_supported = true,
- .rf_id = true,
};
-const struct iwl_cfg_trans_params iwl9560_trans_cfg = {
+const struct iwl_mac_cfg iwl9560_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_9000,
- .base_params = &iwl9000_base_params,
+ .base = &iwl9000_base,
.mq_rx_supported = true,
- .rf_id = true,
.integrated = true,
.xtal_latency = 650,
};
-const struct iwl_cfg_trans_params iwl9560_long_latency_trans_cfg = {
+const struct iwl_mac_cfg iwl9560_long_latency_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_9000,
- .base_params = &iwl9000_base_params,
+ .base = &iwl9000_base,
.mq_rx_supported = true,
- .rf_id = true,
.integrated = true,
.xtal_latency = 2820,
};
-const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg = {
+const struct iwl_mac_cfg iwl9560_shared_clk_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_9000,
- .base_params = &iwl9000_base_params,
+ .base = &iwl9000_base,
.mq_rx_supported = true,
- .rf_id = true,
.integrated = true,
.xtal_latency = 670,
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
};
-const char iwl9162_name[] = "Intel(R) Wireless-AC 9162";
-const char iwl9260_name[] = "Intel(R) Wireless-AC 9260";
-const char iwl9260_1_name[] = "Intel(R) Wireless-AC 9260-1";
-const char iwl9270_name[] = "Intel(R) Wireless-AC 9270";
-const char iwl9461_name[] = "Intel(R) Wireless-AC 9461";
-const char iwl9462_name[] = "Intel(R) Wireless-AC 9462";
-const char iwl9560_name[] = "Intel(R) Wireless-AC 9560";
-const char iwl9162_160_name[] = "Intel(R) Wireless-AC 9162 160MHz";
-const char iwl9260_160_name[] = "Intel(R) Wireless-AC 9260 160MHz";
-const char iwl9270_160_name[] = "Intel(R) Wireless-AC 9270 160MHz";
-const char iwl9461_160_name[] = "Intel(R) Wireless-AC 9461 160MHz";
-const char iwl9462_160_name[] = "Intel(R) Wireless-AC 9462 160MHz";
-const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz";
-
-const char iwl9260_killer_1550_name[] =
- "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW) 160MHz";
-const char iwl9560_killer_1550i_name[] =
- "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)";
-const char iwl9560_killer_1550i_160_name[] =
- "Killer(R) Wireless-AC 1550i Wireless Network Adapter (9560NGW) 160MHz";
-const char iwl9560_killer_1550s_name[] =
- "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)";
-const char iwl9560_killer_1550s_160_name[] =
- "Killer(R) Wireless-AC 1550s Wireless Network Adapter (9560D2W) 160MHz";
-
-const struct iwl_cfg iwl9260_2ac_cfg = {
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
-};
-
-const struct iwl_cfg iwl9560_2ac_cfg_soc = {
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
-};
-
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
index e87b57b9e2c0..3bf9fdbe01c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -15,14 +15,7 @@
/* Lowest firmware API version supported */
#define IWL_AX210_UCODE_API_MIN 77
-/* NVM versions */
-#define IWL_AX210_NVM_VERSION 0x0a1d
-
/* Memory offsets and lengths */
-#define IWL_AX210_DCCM_OFFSET 0x800000 /* LMAC1 */
-#define IWL_AX210_DCCM_LEN 0x10000 /* LMAC1 */
-#define IWL_AX210_DCCM2_OFFSET 0x880000
-#define IWL_AX210_DCCM2_LEN 0x8000
#define IWL_AX210_SMEM_OFFSET 0x400000
#define IWL_AX210_SMEM_LEN 0xD0000
@@ -47,8 +40,7 @@
#define IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(api) \
IWL_MA_B_HR_B_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl_ax210_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
+static const struct iwl_family_base_params iwl_ax210_base = {
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
.shadow_ram_support = true,
@@ -57,74 +49,60 @@ static const struct iwl_base_params iwl_ax210_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
+ .smem_offset = IWL_AX210_SMEM_OFFSET,
+ .smem_len = IWL_AX210_SMEM_LEN,
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
+ .apmg_not_supported = true,
+ .mac_addr_from_csr = 0x380,
+ .min_umac_error_event_table = 0x400000,
+ .d3_debug_data_base_addr = 0x401000,
+ .d3_debug_data_length = 60 * 1024,
+ .mon_smem_regs = {
+ .write_ptr = {
+ .addr = LDBG_M2S_BUF_WPTR,
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK,
+ },
+ .cycle_cnt = {
+ .addr = LDBG_M2S_BUF_WRAP_CNT,
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK,
+ },
+ },
+ .min_txq_size = 128,
+ .gp2_reg_addr = 0xd02c68,
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE,
+ .mon_dram_regs = {
+ .write_ptr = {
+ .addr = DBGC_CUR_DBGBUF_STATUS,
+ .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK,
+ },
+ .cycle_cnt = {
+ .addr = DBGC_DBGBUF_WRAP_AROUND,
+ .mask = 0xffffffff,
+ },
+ .cur_frag = {
+ .addr = DBGC_CUR_DBGBUF_STATUS,
+ .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK,
+ },
+ },
+ .ucode_api_min = IWL_AX210_UCODE_API_MIN,
+ .ucode_api_max = IWL_AX210_UCODE_API_MAX,
};
-#define IWL_DEVICE_AX210_COMMON \
- .ucode_api_min = IWL_AX210_UCODE_API_MIN, \
- .led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = 10, \
- .non_shared_ant = ANT_B, \
- .dccm_offset = IWL_AX210_DCCM_OFFSET, \
- .dccm_len = IWL_AX210_DCCM_LEN, \
- .dccm2_offset = IWL_AX210_DCCM2_OFFSET, \
- .dccm2_len = IWL_AX210_DCCM2_LEN, \
- .smem_offset = IWL_AX210_SMEM_OFFSET, \
- .smem_len = IWL_AX210_SMEM_LEN, \
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
- .apmg_not_supported = true, \
- .trans.mq_rx_supported = true, \
- .vht_mu_mimo_supported = true, \
- .mac_addr_from_csr = 0x380, \
- .ht_params = &iwl_22000_ht_params, \
- .nvm_ver = IWL_AX210_NVM_VERSION, \
- .trans.rf_id = true, \
- .trans.gen2 = true, \
- .nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true, \
- .min_umac_error_event_table = 0x400000, \
- .d3_debug_data_base_addr = 0x401000, \
- .d3_debug_data_length = 60 * 1024, \
- .mon_smem_regs = { \
- .write_ptr = { \
- .addr = LDBG_M2S_BUF_WPTR, \
- .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = LDBG_M2S_BUF_WRAP_CNT, \
- .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
- }, \
- }
-
-#define IWL_DEVICE_AX210 \
- IWL_DEVICE_AX210_COMMON, \
- .ucode_api_max = IWL_AX210_UCODE_API_MAX, \
- .trans.umac_prph_offset = 0x300000, \
- .trans.device_family = IWL_DEVICE_FAMILY_AX210, \
- .trans.base_params = &iwl_ax210_base_params, \
- .min_txq_size = 128, \
- .gp2_reg_addr = 0xd02c68, \
- .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE, \
- .mon_dram_regs = { \
- .write_ptr = { \
- .addr = DBGC_CUR_DBGBUF_STATUS, \
- .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = DBGC_DBGBUF_WRAP_AROUND, \
- .mask = 0xffffffff, \
- }, \
- .cur_frag = { \
- .addr = DBGC_CUR_DBGBUF_STATUS, \
- .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
- }, \
- }
+const struct iwl_mac_cfg iwl_ty_mac_cfg = {
+ .mq_rx_supported = true,
+ .gen2 = true,
+ .device_family = IWL_DEVICE_FAMILY_AX210,
+ .base = &iwl_ax210_base,
+ .umac_prph_offset = 0x300000,
+ /* TODO: the following values need to be checked */
+ .xtal_latency = 500,
+};
-const struct iwl_cfg_trans_params iwl_so_trans_cfg = {
+const struct iwl_mac_cfg iwl_so_mac_cfg = {
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_AX210,
- .base_params = &iwl_ax210_base_params,
+ .base = &iwl_ax210_base,
.umac_prph_offset = 0x300000,
.integrated = true,
/* TODO: the following values need to be checked */
@@ -132,12 +110,11 @@ const struct iwl_cfg_trans_params iwl_so_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US,
};
-const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = {
+const struct iwl_mac_cfg iwl_so_long_latency_mac_cfg = {
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_AX210,
- .base_params = &iwl_ax210_base_params,
+ .base = &iwl_ax210_base,
.umac_prph_offset = 0x300000,
.integrated = true,
.low_latency_xtal = true,
@@ -145,12 +122,11 @@ const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = {
+const struct iwl_mac_cfg iwl_so_long_latency_imr_mac_cfg = {
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_AX210,
- .base_params = &iwl_ax210_base_params,
+ .base = &iwl_ax210_base,
.umac_prph_offset = 0x300000,
.integrated = true,
.low_latency_xtal = true,
@@ -159,107 +135,15 @@ const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = {
.imr_enabled = true,
};
-/*
- * If the device doesn't support HE, no need to have that many buffers.
- * AX210 devices can split multiple frames into a single RB, so fewer are
- * needed; AX210 cannot (but use smaller RBs by default) - these sizes
- * were picked according to 8 MSDUs inside 256 A-MSDUs in an A-MPDU, with
- * additional overhead to account for processing time.
- */
-#define IWL_NUM_RBDS_NON_HE 512
-#define IWL_NUM_RBDS_AX210_HE 4096
-
-const struct iwl_cfg_trans_params iwl_ma_trans_cfg = {
+const struct iwl_mac_cfg iwl_ma_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_AX210,
- .base_params = &iwl_ax210_base_params,
+ .base = &iwl_ax210_base,
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.integrated = true,
.umac_prph_offset = 0x300000
};
-const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
-const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
-const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz";
-
-const char iwl_ax210_killer_1675w_name[] =
- "Killer(R) Wi-Fi 6E AX1675w 160MHz Wireless Network Adapter (210D2W)";
-const char iwl_ax210_killer_1675x_name[] =
- "Killer(R) Wi-Fi 6E AX1675x 160MHz Wireless Network Adapter (210NGW)";
-const char iwl_ax211_killer_1675s_name[] =
- "Killer(R) Wi-Fi 6E AX1675s 160MHz Wireless Network Adapter (211NGW)";
-const char iwl_ax211_killer_1675i_name[] =
- "Killer(R) Wi-Fi 6E AX1675i 160MHz Wireless Network Adapter (211NGW)";
-const char iwl_ax411_killer_1690s_name[] =
- "Killer(R) Wi-Fi 6E AX1690s 160MHz Wireless Network Adapter (411D2W)";
-const char iwl_ax411_killer_1690i_name[] =
- "Killer(R) Wi-Fi 6E AX1690i 160MHz Wireless Network Adapter (411NGW)";
-
-const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL_SO_A_JF_B_FW_PRE,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
- .name = iwl_ax211_name,
- .fw_name_pre = IWL_SO_A_GF_A_FW_PRE,
- .uhb_supported = true,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
-};
-
-const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = {
- .name = iwl_ax211_name,
- .fw_name_pre = IWL_SO_A_GF_A_FW_PRE,
- .uhb_supported = true,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
- .trans.xtal_latency = 12000,
- .trans.low_latency_xtal = true,
-};
-
-const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
- .name = "Intel(R) Wi-Fi 6 AX210 160MHz",
- .fw_name_pre = IWL_TY_A_GF_A_FW_PRE,
- .uhb_supported = true,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
-};
-
-const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
- .name = iwl_ax411_name,
- .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE,
- .uhb_supported = true,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
-};
-
-const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = {
- .name = iwl_ax411_name,
- .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE,
- .uhb_supported = true,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
- .trans.xtal_latency = 12000,
- .trans.low_latency_xtal = true,
-};
-
-const struct iwl_cfg iwl_cfg_ma = {
- .fw_name_mac = "ma",
- .uhb_supported = true,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
-};
-
-const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = {
- .fw_name_pre = IWL_SO_A_HR_B_FW_PRE,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
-};
-
MODULE_FIRMWARE(IWL_SO_A_JF_B_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SO_A_HR_B_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
IWL_FW_AND_PNVM(IWL_SO_A_GF_A_FW_PRE, IWL_AX210_UCODE_API_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index f055255a7c93..05e45fff8b36 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -10,19 +10,12 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 98
+#define IWL_BZ_UCODE_API_MAX 99
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 93
-/* NVM versions */
-#define IWL_BZ_NVM_VERSION 0x0a1d
-
/* Memory offsets and lengths */
-#define IWL_BZ_DCCM_OFFSET 0x800000 /* LMAC1 */
-#define IWL_BZ_DCCM_LEN 0x10000 /* LMAC1 */
-#define IWL_BZ_DCCM2_OFFSET 0x880000
-#define IWL_BZ_DCCM2_LEN 0x8000
#define IWL_BZ_SMEM_OFFSET 0x400000
#define IWL_BZ_SMEM_LEN 0xD0000
@@ -38,13 +31,7 @@
#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_BZ_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
-#if !IS_ENABLED(CONFIG_IWLMVM)
-const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
-const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
-#endif
-
-static const struct iwl_base_params iwl_bz_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
+static const struct iwl_family_base_params iwl_bz_base = {
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
.shadow_ram_support = true,
@@ -53,91 +40,55 @@ static const struct iwl_base_params iwl_bz_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
+ .smem_offset = IWL_BZ_SMEM_OFFSET,
+ .smem_len = IWL_BZ_SMEM_LEN,
+ .apmg_not_supported = true,
+ .mac_addr_from_csr = 0x30,
+ .min_umac_error_event_table = 0xD0000,
+ .d3_debug_data_base_addr = 0x401000,
+ .d3_debug_data_length = 60 * 1024,
+ .mon_smem_regs = {
+ .write_ptr = {
+ .addr = LDBG_M2S_BUF_WPTR,
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK,
+ },
+ .cycle_cnt = {
+ .addr = LDBG_M2S_BUF_WRAP_CNT,
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK,
+ },
+ },
+ .min_txq_size = 128,
+ .gp2_reg_addr = 0xd02c68,
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT,
+ .mon_dram_regs = {
+ .write_ptr = {
+ .addr = DBGC_CUR_DBGBUF_STATUS,
+ .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK,
+ },
+ .cycle_cnt = {
+ .addr = DBGC_DBGBUF_WRAP_AROUND,
+ .mask = 0xffffffff,
+ },
+ .cur_frag = {
+ .addr = DBGC_CUR_DBGBUF_STATUS,
+ .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK,
+ },
+ },
+ .mon_dbgi_regs = {
+ .write_ptr = {
+ .addr = DBGI_SRAM_FIFO_POINTERS,
+ .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK,
+ },
+ },
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
+ .ucode_api_max = IWL_BZ_UCODE_API_MAX,
+ .ucode_api_min = IWL_BZ_UCODE_API_MIN,
};
-const struct iwl_ht_params iwl_bz_ht_params = {
- .stbc = true,
- .ldpc = true,
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) |
- BIT(NL80211_BAND_6GHZ),
-};
-
-#define IWL_DEVICE_BZ_COMMON \
- .ucode_api_max = IWL_BZ_UCODE_API_MAX, \
- .ucode_api_min = IWL_BZ_UCODE_API_MIN, \
- .led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = 10, \
- .non_shared_ant = ANT_B, \
- .dccm_offset = IWL_BZ_DCCM_OFFSET, \
- .dccm_len = IWL_BZ_DCCM_LEN, \
- .dccm2_offset = IWL_BZ_DCCM2_OFFSET, \
- .dccm2_len = IWL_BZ_DCCM2_LEN, \
- .smem_offset = IWL_BZ_SMEM_OFFSET, \
- .smem_len = IWL_BZ_SMEM_LEN, \
- .apmg_not_supported = true, \
- .trans.mq_rx_supported = true, \
- .vht_mu_mimo_supported = true, \
- .mac_addr_from_csr = 0x30, \
- .nvm_ver = IWL_BZ_NVM_VERSION, \
- .trans.rf_id = true, \
- .trans.gen2 = true, \
- .nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true, \
- .min_umac_error_event_table = 0xD0000, \
- .d3_debug_data_base_addr = 0x401000, \
- .d3_debug_data_length = 60 * 1024, \
- .mon_smem_regs = { \
- .write_ptr = { \
- .addr = LDBG_M2S_BUF_WPTR, \
- .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = LDBG_M2S_BUF_WRAP_CNT, \
- .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
- }, \
- }, \
- .trans.umac_prph_offset = 0x300000, \
- .trans.device_family = IWL_DEVICE_FAMILY_BZ, \
- .trans.base_params = &iwl_bz_base_params, \
- .min_txq_size = 128, \
- .gp2_reg_addr = 0xd02c68, \
- .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
- .mon_dram_regs = { \
- .write_ptr = { \
- .addr = DBGC_CUR_DBGBUF_STATUS, \
- .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = DBGC_DBGBUF_WRAP_AROUND, \
- .mask = 0xffffffff, \
- }, \
- .cur_frag = { \
- .addr = DBGC_CUR_DBGBUF_STATUS, \
- .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
- }, \
- }, \
- .mon_dbgi_regs = { \
- .write_ptr = { \
- .addr = DBGI_SRAM_FIFO_POINTERS, \
- .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \
- }, \
- }
-
-#define IWL_DEVICE_BZ \
- IWL_DEVICE_BZ_COMMON, \
- .ht_params = &iwl_bz_ht_params
-
-/*
- * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
- * A-MPDU, with additional overhead to account for processing time.
- */
-#define IWL_NUM_RBDS_BZ_EHT (512 * 16)
-
-const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
+const struct iwl_mac_cfg iwl_bz_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_BZ,
- .base_params = &iwl_bz_base_params,
+ .base = &iwl_bz_base,
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.integrated = true,
.umac_prph_offset = 0x300000,
@@ -146,38 +97,16 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-const struct iwl_cfg_trans_params iwl_gl_trans_cfg = {
+const struct iwl_mac_cfg iwl_gl_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_BZ,
- .base_params = &iwl_bz_base_params,
+ .base = &iwl_bz_base,
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.umac_prph_offset = 0x300000,
.xtal_latency = 12000,
.low_latency_xtal = true,
};
-const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
-const char iwl_wh_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz";
-const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
-const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
-
-const struct iwl_cfg iwl_cfg_bz = {
- .fw_name_mac = "bz",
- .uhb_supported = true,
- IWL_DEVICE_BZ,
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .num_rbds = IWL_NUM_RBDS_BZ_EHT,
-};
-
-const struct iwl_cfg iwl_cfg_gl = {
- .fw_name_mac = "gl",
- .uhb_supported = true,
- IWL_DEVICE_BZ,
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .num_rbds = IWL_NUM_RBDS_BZ_EHT,
-};
-
MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
IWL_FW_AND_PNVM(IWL_BZ_A_GF_A_FW_PRE, IWL_BZ_UCODE_API_MAX);
IWL_FW_AND_PNVM(IWL_BZ_A_GF4_A_FW_PRE, IWL_BZ_UCODE_API_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
index 282b9b846c3a..45e55cef42ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
@@ -9,35 +9,21 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_DR_UCODE_API_MAX 98
+#define IWL_DR_UCODE_API_MAX 99
/* Lowest firmware API version supported */
-#define IWL_DR_UCODE_API_MIN 96
-
-/* NVM versions */
-#define IWL_DR_NVM_VERSION 0x0a1d
+#define IWL_DR_UCODE_API_MIN 97
/* Memory offsets and lengths */
-#define IWL_DR_DCCM_OFFSET 0x800000 /* LMAC1 */
-#define IWL_DR_DCCM_LEN 0x10000 /* LMAC1 */
-#define IWL_DR_DCCM2_OFFSET 0x880000
-#define IWL_DR_DCCM2_LEN 0x8000
#define IWL_DR_SMEM_OFFSET 0x400000
#define IWL_DR_SMEM_LEN 0xD0000
#define IWL_DR_A_PE_A_FW_PRE "iwlwifi-dr-a0-pe-a0"
-#define IWL_BR_A_PET_A_FW_PRE "iwlwifi-br-a0-petc-a0"
-#define IWL_BR_A_PE_A_FW_PRE "iwlwifi-br-a0-pe-a0"
#define IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(api) \
IWL_DR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_BR_A_PET_A_FW_MODULE_FIRMWARE(api) \
- IWL_BR_A_PET_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_BR_A_PE_A_FW_MODULE_FIRMWARE(api) \
- IWL_BR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl_dr_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
+static const struct iwl_family_base_params iwl_dr_base = {
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
.shadow_ram_support = true,
@@ -46,87 +32,55 @@ static const struct iwl_base_params iwl_dr_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
+ .smem_offset = IWL_DR_SMEM_OFFSET,
+ .smem_len = IWL_DR_SMEM_LEN,
+ .apmg_not_supported = true,
+ .mac_addr_from_csr = 0x30,
+ .min_umac_error_event_table = 0xD0000,
+ .d3_debug_data_base_addr = 0x401000,
+ .d3_debug_data_length = 60 * 1024,
+ .mon_smem_regs = {
+ .write_ptr = {
+ .addr = LDBG_M2S_BUF_WPTR,
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK,
+ },
+ .cycle_cnt = {
+ .addr = LDBG_M2S_BUF_WRAP_CNT,
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK,
+ },
+ },
+ .min_txq_size = 128,
+ .gp2_reg_addr = 0xd02c68,
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT,
+ .mon_dram_regs = {
+ .write_ptr = {
+ .addr = DBGC_CUR_DBGBUF_STATUS,
+ .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK,
+ },
+ .cycle_cnt = {
+ .addr = DBGC_DBGBUF_WRAP_AROUND,
+ .mask = 0xffffffff,
+ },
+ .cur_frag = {
+ .addr = DBGC_CUR_DBGBUF_STATUS,
+ .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK,
+ },
+ },
+ .mon_dbgi_regs = {
+ .write_ptr = {
+ .addr = DBGI_SRAM_FIFO_POINTERS,
+ .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK,
+ },
+ },
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
+ .ucode_api_max = IWL_DR_UCODE_API_MAX,
+ .ucode_api_min = IWL_DR_UCODE_API_MIN,
};
-#define IWL_DEVICE_DR_COMMON \
- .ucode_api_max = IWL_DR_UCODE_API_MAX, \
- .ucode_api_min = IWL_DR_UCODE_API_MIN, \
- .led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = 10, \
- .non_shared_ant = ANT_B, \
- .dccm_offset = IWL_DR_DCCM_OFFSET, \
- .dccm_len = IWL_DR_DCCM_LEN, \
- .dccm2_offset = IWL_DR_DCCM2_OFFSET, \
- .dccm2_len = IWL_DR_DCCM2_LEN, \
- .smem_offset = IWL_DR_SMEM_OFFSET, \
- .smem_len = IWL_DR_SMEM_LEN, \
- .apmg_not_supported = true, \
- .trans.mq_rx_supported = true, \
- .vht_mu_mimo_supported = true, \
- .mac_addr_from_csr = 0x30, \
- .nvm_ver = IWL_DR_NVM_VERSION, \
- .trans.rf_id = true, \
- .trans.gen2 = true, \
- .nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true, \
- .min_umac_error_event_table = 0xD0000, \
- .d3_debug_data_base_addr = 0x401000, \
- .d3_debug_data_length = 60 * 1024, \
- .mon_smem_regs = { \
- .write_ptr = { \
- .addr = LDBG_M2S_BUF_WPTR, \
- .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = LDBG_M2S_BUF_WRAP_CNT, \
- .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
- }, \
- }, \
- .trans.umac_prph_offset = 0x300000, \
- .trans.device_family = IWL_DEVICE_FAMILY_DR, \
- .trans.base_params = &iwl_dr_base_params, \
- .min_txq_size = 128, \
- .gp2_reg_addr = 0xd02c68, \
- .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
- .mon_dram_regs = { \
- .write_ptr = { \
- .addr = DBGC_CUR_DBGBUF_STATUS, \
- .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = DBGC_DBGBUF_WRAP_AROUND, \
- .mask = 0xffffffff, \
- }, \
- .cur_frag = { \
- .addr = DBGC_CUR_DBGBUF_STATUS, \
- .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
- }, \
- }, \
- .mon_dbgi_regs = { \
- .write_ptr = { \
- .addr = DBGI_SRAM_FIFO_POINTERS, \
- .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \
- }, \
- }
-
-#define IWL_DEVICE_DR \
- IWL_DEVICE_DR_COMMON, \
- .uhb_supported = true, \
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
- .num_rbds = IWL_NUM_RBDS_DR_EHT, \
- .ht_params = &iwl_bz_ht_params
-
-/*
- * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
- * A-MPDU, with additional overhead to account for processing time.
- */
-#define IWL_NUM_RBDS_DR_EHT (512 * 16)
-
-const struct iwl_cfg_trans_params iwl_dr_trans_cfg = {
+const struct iwl_mac_cfg iwl_dr_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_DR,
- .base_params = &iwl_dr_base_params,
+ .base = &iwl_dr_base,
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.integrated = true,
.umac_prph_offset = 0x300000,
@@ -135,31 +89,5 @@ const struct iwl_cfg_trans_params iwl_dr_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-const char iwl_dr_name[] = "Intel(R) TBD Dr device";
-
-const struct iwl_cfg iwl_cfg_dr = {
- .fw_name_mac = "dr",
- IWL_DEVICE_DR,
-};
-
-const struct iwl_cfg_trans_params iwl_br_trans_cfg = {
- .device_family = IWL_DEVICE_FAMILY_DR,
- .base_params = &iwl_dr_base_params,
- .mq_rx_supported = true,
- .rf_id = true,
- .gen2 = true,
- .umac_prph_offset = 0x300000,
- .xtal_latency = 12000,
- .low_latency_xtal = true,
-};
-
-const char iwl_br_name[] = "Intel(R) TBD Br device";
-
-const struct iwl_cfg iwl_cfg_br = {
- .fw_name_mac = "br",
- IWL_DEVICE_DR,
-};
-
MODULE_FIRMWARE(IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_BR_A_PET_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_BR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
+
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c
new file mode 100644
index 000000000000..456a666c8dfd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2025 Intel Corporation
+ */
+#include "iwl-config.h"
+
+/* NVM versions */
+#define IWL_FM_NVM_VERSION 0x0a1d
+
+#define IWL_DEVICE_FM \
+ .ht_params = { \
+ .stbc = true, \
+ .ldpc = true, \
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | \
+ BIT(NL80211_BAND_5GHZ), \
+ }, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .non_shared_ant = ANT_B, \
+ .vht_mu_mimo_supported = true, \
+ .uhb_supported = true, \
+ .num_rbds = IWL_NUM_RBDS_EHT, \
+ .nvm_ver = IWL_FM_NVM_VERSION, \
+ .nvm_type = IWL_NVM_EXT
+
+const struct iwl_rf_cfg iwl_rf_fm = {
+ IWL_DEVICE_FM,
+};
+
+const struct iwl_rf_cfg iwl_rf_fm_160mhz = {
+ IWL_DEVICE_FM,
+ .bw_limit = 160,
+};
+
+const char iwl_killer_be1750s_name[] =
+ "Killer(R) Wi-Fi 7 BE1750s 320MHz Wireless Network Adapter (BE201D2W)";
+const char iwl_killer_be1750i_name[] =
+ "Killer(R) Wi-Fi 7 BE1750i 320MHz Wireless Network Adapter (BE201NGW)";
+const char iwl_killer_be1750w_name[] =
+ "Killer(TM) Wi-Fi 7 BE1750w 320MHz Wireless Network Adapter (BE200D2W)";
+const char iwl_killer_be1750x_name[] =
+ "Killer(TM) Wi-Fi 7 BE1750x 320MHz Wireless Network Adapter (BE200NGW)";
+const char iwl_killer_be1790s_name[] =
+ "Killer(R) Wi-Fi 7 BE1790s 320MHz Wireless Network Adapter (BE401D2W)";
+const char iwl_killer_be1790i_name[] =
+ "Killer(R) Wi-Fi 7 BE1790i 320MHz Wireless Network Adapter (BE401NGW)";
+
+const char iwl_be201_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
+const char iwl_be200_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
+const char iwl_be202_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
+const char iwl_be401_name[] = "Intel(R) Wi-Fi 7 BE401 320MHz";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c
new file mode 100644
index 000000000000..f55c286e83be
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2025 Intel Corporation
+ */
+#include "iwl-config.h"
+
+/* NVM versions */
+#define IWL_GF_NVM_VERSION 0x0a1d
+
+const struct iwl_rf_cfg iwl_rf_gf = {
+ .uhb_supported = true,
+ .led_mode = IWL_LED_RF_STATE,
+ .non_shared_ant = ANT_B,
+ .vht_mu_mimo_supported = true,
+ .ht_params = {
+ .stbc = true,
+ .ldpc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) |
+ BIT(NL80211_BAND_5GHZ),
+ },
+ .nvm_ver = IWL_GF_NVM_VERSION,
+ .nvm_type = IWL_NVM_EXT,
+ .num_rbds = IWL_NUM_RBDS_HE,
+};
+
+const char iwl_ax210_killer_1675w_name[] =
+ "Killer(R) Wi-Fi 6E AX1675w 160MHz Wireless Network Adapter (210D2W)";
+const char iwl_ax210_killer_1675x_name[] =
+ "Killer(R) Wi-Fi 6E AX1675x 160MHz Wireless Network Adapter (210NGW)";
+const char iwl_ax211_killer_1675s_name[] =
+ "Killer(R) Wi-Fi 6E AX1675s 160MHz Wireless Network Adapter (211D2W)";
+const char iwl_ax211_killer_1675i_name[] =
+ "Killer(R) Wi-Fi 6E AX1675i 160MHz Wireless Network Adapter (211NGW)";
+const char iwl_ax411_killer_1690s_name[] =
+ "Killer(R) Wi-Fi 6E AX1690s 160MHz Wireless Network Adapter (411D2W)";
+const char iwl_ax411_killer_1690i_name[] =
+ "Killer(R) Wi-Fi 6E AX1690i 160MHz Wireless Network Adapter (411NGW)";
+
+const char iwl_ax210_name[] = "Intel(R) Wi-Fi 6E AX210 160MHz";
+const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
+const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c
new file mode 100644
index 000000000000..db02664e3917
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2025 Intel Corporation
+ */
+#include "iwl-config.h"
+
+/* NVM versions */
+#define IWL_HR_NVM_VERSION 0x0a1d
+
+#define IWL_DEVICE_HR \
+ .led_mode = IWL_LED_RF_STATE, \
+ .non_shared_ant = ANT_B, \
+ .vht_mu_mimo_supported = true, \
+ .ht_params = { \
+ .stbc = true, \
+ .ldpc = true, \
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | \
+ BIT(NL80211_BAND_5GHZ), \
+ }, \
+ .num_rbds = IWL_NUM_RBDS_HE, \
+ .nvm_ver = IWL_HR_NVM_VERSION, \
+ .nvm_type = IWL_NVM_EXT
+
+const struct iwl_rf_cfg iwl_rf_hr1 = {
+ IWL_DEVICE_HR,
+ .tx_with_siso_diversity = true,
+};
+
+const struct iwl_rf_cfg iwl_rf_hr = {
+ IWL_DEVICE_HR,
+};
+
+const struct iwl_rf_cfg iwl_rf_hr_80mhz = {
+ IWL_DEVICE_HR,
+ .bw_limit = 80,
+};
+
+const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
+const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
+const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
+const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c
new file mode 100644
index 000000000000..467eaeae6deb
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2021, 2023, 2025 Intel Corporation
+ */
+#include "iwl-config.h"
+
+/* NVM versions */
+#define IWL_JF_NVM_VERSION 0x0a1d
+
+/* Memory offsets and lengths */
+#define IWL9000_DCCM_OFFSET 0x800000
+#define IWL9000_DCCM_LEN 0x18000
+#define IWL9000_DCCM2_OFFSET 0x880000
+#define IWL9000_DCCM2_LEN 0x8000
+
+static const struct iwl_tt_params iwl_jf_tt_params = {
+ .ct_kill_entry = 115,
+ .ct_kill_exit = 93,
+ .ct_kill_duration = 5,
+ .dynamic_smps_entry = 111,
+ .dynamic_smps_exit = 107,
+ .tx_protection_entry = 112,
+ .tx_protection_exit = 105,
+ .tx_backoff = {
+ {.temperature = 110, .backoff = 200},
+ {.temperature = 111, .backoff = 600},
+ {.temperature = 112, .backoff = 1200},
+ {.temperature = 113, .backoff = 2000},
+ {.temperature = 114, .backoff = 4000},
+ },
+ .support_ct_kill = true,
+ .support_dynamic_smps = true,
+ .support_tx_protection = true,
+ .support_tx_backoff = true,
+};
+
+/* these values are ignored if not with Pu/Th MAC firmware, due to offload */
+#define IWL_DEVICE_JF_PU \
+ .dccm_offset = IWL9000_DCCM_OFFSET, \
+ .dccm_len = IWL9000_DCCM_LEN, \
+ .dccm2_offset = IWL9000_DCCM2_OFFSET, \
+ .dccm2_len = IWL9000_DCCM2_LEN, \
+ .thermal_params = &iwl_jf_tt_params
+
+#define IWL_DEVICE_JF \
+ IWL_DEVICE_JF_PU, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .non_shared_ant = ANT_B, \
+ .num_rbds = IWL_NUM_RBDS_NON_HE, \
+ .vht_mu_mimo_supported = true, \
+ .ht_params = { \
+ .stbc = true, \
+ .ldpc = true, \
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | \
+ BIT(NL80211_BAND_5GHZ), \
+ }, \
+ .nvm_ver = IWL_JF_NVM_VERSION, \
+ .nvm_type = IWL_NVM_EXT
+
+const struct iwl_rf_cfg iwl_rf_jf = {
+ IWL_DEVICE_JF,
+};
+
+const struct iwl_rf_cfg iwl_rf_jf_80mhz = {
+ IWL_DEVICE_JF,
+ .bw_limit = 80,
+};
+
+const char iwl9260_name[] = "Intel(R) Wireless-AC 9260";
+const char iwl9461_name[] = "Intel(R) Wireless-AC 9461";
+const char iwl9462_name[] = "Intel(R) Wireless-AC 9462";
+const char iwl9560_name[] = "Intel(R) Wireless-AC 9560";
+const char iwl9260_160_name[] = "Intel(R) Wireless-AC 9260 160MHz";
+const char iwl9461_160_name[] = "Intel(R) Wireless-AC 9461 160MHz";
+const char iwl9462_160_name[] = "Intel(R) Wireless-AC 9462 160MHz";
+const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz";
+
+const char iwl9260_killer_1550_name[] =
+ "Killer(R) Wireless-AC 1550 Wireless Network Adapter (9260NGW) 160MHz";
+const char iwl9560_killer_1550i_name[] =
+ "Killer(R) Wireless-AC 1550i Wireless Network Adapter (9560NGW) 160MHz";
+const char iwl9560_killer_1550s_name[] =
+ "Killer(R) Wireless-AC 1550s Wireless Network Adapter (9560D2W) 160MHz";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c
new file mode 100644
index 000000000000..483f21659eff
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include "iwl-config.h"
+
+/* currently iwl_rf_wh/iwl_rf_wh_160mhz are just defines for the FM ones */
+
+const char iwl_killer_bn1850w2_name[] =
+ "Killer(R) Wi-Fi 8 BN1850w2 320MHz Wireless Network Adapter (BN201.D2W)";
+const char iwl_killer_bn1850i_name[] =
+ "Killer(R) Wi-Fi 8 BN1850i 320MHz Wireless Network Adapter (BN201.NGW)";
+
+const char iwl_bn201_name[] = "Intel(R) Wi-Fi 8 BN201";
+const char iwl_be221_name[] = "Intel(R) Wi-Fi 7 BE221";
+const char iwl_be223_name[] = "Intel(R) Wi-Fi 7 BE223";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c
new file mode 100644
index 000000000000..97735175cb0e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include "iwl-config.h"
+
+/* currently iwl_rf_wh/iwl_rf_wh_160mhz are just defines for the FM ones */
+
+const char iwl_killer_be1775s_name[] =
+ "Killer(R) Wi-Fi 7 BE1775s 320MHz Wireless Network Adapter (BE211D2W)";
+const char iwl_killer_be1775i_name[] =
+ "Killer(R) Wi-Fi 7 BE1775i 320MHz Wireless Network Adapter (BE211NGW)";
+
+const char iwl_be211_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz";
+const char iwl_be213_name[] = "Intel(R) Wi-Fi 7 BE213 160MHz";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 59af36960f9c..b2e4d4035296 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -10,19 +10,15 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 98
+#define IWL_SC_UCODE_API_MAX 99
/* Lowest firmware API version supported */
-#define IWL_SC_UCODE_API_MIN 93
+#define IWL_SC_UCODE_API_MIN 97
/* NVM versions */
#define IWL_SC_NVM_VERSION 0x0a1d
/* Memory offsets and lengths */
-#define IWL_SC_DCCM_OFFSET 0x800000 /* LMAC1 */
-#define IWL_SC_DCCM_LEN 0x10000 /* LMAC1 */
-#define IWL_SC_DCCM2_OFFSET 0x880000
-#define IWL_SC_DCCM2_LEN 0x8000
#define IWL_SC_SMEM_OFFSET 0x400000
#define IWL_SC_SMEM_LEN 0xD0000
@@ -43,8 +39,7 @@
#define IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl_sc_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
+static const struct iwl_family_base_params iwl_sc_base = {
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
.shadow_ram_support = true,
@@ -53,87 +48,55 @@ static const struct iwl_base_params iwl_sc_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
+ .smem_offset = IWL_SC_SMEM_OFFSET,
+ .smem_len = IWL_SC_SMEM_LEN,
+ .apmg_not_supported = true,
+ .mac_addr_from_csr = 0x30,
+ .min_umac_error_event_table = 0xD0000,
+ .d3_debug_data_base_addr = 0x401000,
+ .d3_debug_data_length = 60 * 1024,
+ .mon_smem_regs = {
+ .write_ptr = {
+ .addr = LDBG_M2S_BUF_WPTR,
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK,
+ },
+ .cycle_cnt = {
+ .addr = LDBG_M2S_BUF_WRAP_CNT,
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK,
+ },
+ },
+ .min_txq_size = 128,
+ .gp2_reg_addr = 0xd02c68,
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT,
+ .mon_dram_regs = {
+ .write_ptr = {
+ .addr = DBGC_CUR_DBGBUF_STATUS,
+ .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK,
+ },
+ .cycle_cnt = {
+ .addr = DBGC_DBGBUF_WRAP_AROUND,
+ .mask = 0xffffffff,
+ },
+ .cur_frag = {
+ .addr = DBGC_CUR_DBGBUF_STATUS,
+ .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK,
+ },
+ },
+ .mon_dbgi_regs = {
+ .write_ptr = {
+ .addr = DBGI_SRAM_FIFO_POINTERS,
+ .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK,
+ },
+ },
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
+ .ucode_api_max = IWL_SC_UCODE_API_MAX,
+ .ucode_api_min = IWL_SC_UCODE_API_MIN,
};
-#define IWL_DEVICE_BZ_COMMON \
- .ucode_api_max = IWL_SC_UCODE_API_MAX, \
- .ucode_api_min = IWL_SC_UCODE_API_MIN, \
- .led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = 10, \
- .non_shared_ant = ANT_B, \
- .dccm_offset = IWL_SC_DCCM_OFFSET, \
- .dccm_len = IWL_SC_DCCM_LEN, \
- .dccm2_offset = IWL_SC_DCCM2_OFFSET, \
- .dccm2_len = IWL_SC_DCCM2_LEN, \
- .smem_offset = IWL_SC_SMEM_OFFSET, \
- .smem_len = IWL_SC_SMEM_LEN, \
- .apmg_not_supported = true, \
- .trans.mq_rx_supported = true, \
- .vht_mu_mimo_supported = true, \
- .mac_addr_from_csr = 0x30, \
- .nvm_ver = IWL_SC_NVM_VERSION, \
- .trans.rf_id = true, \
- .trans.gen2 = true, \
- .nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true, \
- .min_umac_error_event_table = 0xD0000, \
- .d3_debug_data_base_addr = 0x401000, \
- .d3_debug_data_length = 60 * 1024, \
- .mon_smem_regs = { \
- .write_ptr = { \
- .addr = LDBG_M2S_BUF_WPTR, \
- .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = LDBG_M2S_BUF_WRAP_CNT, \
- .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
- }, \
- }, \
- .trans.umac_prph_offset = 0x300000, \
- .trans.device_family = IWL_DEVICE_FAMILY_SC, \
- .trans.base_params = &iwl_sc_base_params, \
- .min_txq_size = 128, \
- .gp2_reg_addr = 0xd02c68, \
- .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
- .mon_dram_regs = { \
- .write_ptr = { \
- .addr = DBGC_CUR_DBGBUF_STATUS, \
- .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = DBGC_DBGBUF_WRAP_AROUND, \
- .mask = 0xffffffff, \
- }, \
- .cur_frag = { \
- .addr = DBGC_CUR_DBGBUF_STATUS, \
- .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
- }, \
- }, \
- .mon_dbgi_regs = { \
- .write_ptr = { \
- .addr = DBGI_SRAM_FIFO_POINTERS, \
- .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \
- }, \
- }
-
-#define IWL_DEVICE_SC \
- IWL_DEVICE_BZ_COMMON, \
- .uhb_supported = true, \
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
- .num_rbds = IWL_NUM_RBDS_SC_EHT, \
- .ht_params = &iwl_bz_ht_params
-
-/*
- * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
- * A-MPDU, with additional overhead to account for processing time.
- */
-#define IWL_NUM_RBDS_SC_EHT (512 * 16)
-
-const struct iwl_cfg_trans_params iwl_sc_trans_cfg = {
+const struct iwl_mac_cfg iwl_sc_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_SC,
- .base_params = &iwl_sc_base_params,
+ .base = &iwl_sc_base,
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.integrated = true,
.umac_prph_offset = 0x300000,
@@ -142,21 +105,6 @@ const struct iwl_cfg_trans_params iwl_sc_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-const struct iwl_cfg iwl_cfg_sc = {
- .fw_name_mac = "sc",
- IWL_DEVICE_SC,
-};
-
-const struct iwl_cfg iwl_cfg_sc2 = {
- .fw_name_mac = "sc2",
- IWL_DEVICE_SC,
-};
-
-const struct iwl_cfg iwl_cfg_sc2f = {
- .fw_name_mac = "sc2f",
- IWL_DEVICE_SC,
-};
-
IWL_FW_AND_PNVM(IWL_SC_A_FM_B_FW_PRE, IWL_SC_UCODE_API_MAX);
IWL_FW_AND_PNVM(IWL_SC_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
MODULE_FIRMWARE(IWL_SC_A_HR_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index a13add556a7b..1ebc7effcc2a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2021, 2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2021, 2024-2025 Intel Corporation
*/
#ifndef __iwl_agn_h__
#define __iwl_agn_h__
@@ -399,7 +399,7 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
* later with iwl_free_nvm_data().
*/
struct iwl_nvm_data *
-iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const u8 *eeprom, size_t eeprom_size);
int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
index 2ed4b6e798ab..ec94c43ba28c 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018, 2025 Intel Corporation
*****************************************************************************/
#include <linux/slab.h>
@@ -2097,7 +2097,8 @@ static ssize_t iwl_dbgfs_protection_mode_read(struct file *file,
char buf[40];
const size_t bufsz = sizeof(buf);
- if (priv->cfg->ht_params)
+ /* HT devices also have at least one HT40 band */
+ if (priv->cfg->ht_params.ht40_bands)
pos += scnprintf(buf + pos, bufsz - pos,
"use %s for aggregation\n",
(priv->hw_params.use_rts_for_aggregation) ?
@@ -2117,7 +2118,8 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
int buf_size;
int rts;
- if (!priv->cfg->ht_params)
+ /* HT devices also have at least one HT40 band */
+ if (!priv->cfg->ht_params.ht40_bands)
return -EINVAL;
memset(buf, 0, sizeof(buf));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index 4ac8b862ad41..25b24820466d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -2,6 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014, 2020, 2023 Intel Corporation. All rights reserved.
+ * Copyright (C) 2025 Intel Corporation
*****************************************************************************/
/*
* Please use this file (dev.h) for driver implementation definitions.
@@ -627,7 +628,7 @@ struct iwl_priv {
struct iwl_trans *trans;
struct device *dev; /* for debug prints only */
- const struct iwl_cfg *cfg;
+ const struct iwl_rf_cfg *cfg;
const struct iwl_fw *fw;
const struct iwl_dvm_cfg *lib;
unsigned long status;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
index 48a8349680fc..3447ae0b160a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019, 2025 Intel Corporation
*****************************************************************************/
#include <linux/units.h>
@@ -481,7 +481,7 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
/* NIC configuration for 6000 series */
static void iwl6000_nic_config(struct iwl_priv *priv)
{
- switch (priv->trans->trans_cfg->device_family) {
+ switch (priv->trans->mac_cfg->device_family) {
case IWL_DEVICE_FAMILY_6005:
case IWL_DEVICE_FAMILY_6030:
case IWL_DEVICE_FAMILY_6000:
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
index cdc05f7e75a6..2423125e5284 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2019, 2021, 2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2019, 2021, 2024-2025 Intel Corporation
*/
#include <linux/types.h>
#include <linux/slab.h>
@@ -151,7 +151,7 @@ static u16 iwl_eeprom_query16(const u8 *eeprom, size_t eeprom_size, int offset)
{
if (WARN_ON(offset + sizeof(u16) > eeprom_size))
return 0;
- return le16_to_cpup((__le16 *)(eeprom + offset));
+ return le16_to_cpup((const __le16 *)(eeprom + offset));
}
static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size,
@@ -204,8 +204,8 @@ static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size,
return (address & ADDRESS_MSK) + (offset << 1);
}
-static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
- u32 offset)
+static const void *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
+ u32 offset)
{
u32 address = eeprom_indirect_address(eeprom, eeprom_size, offset);
@@ -218,10 +218,9 @@ static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size,
struct iwl_nvm_data *data)
{
- struct iwl_eeprom_calib_hdr *hdr;
+ const struct iwl_eeprom_calib_hdr *hdr;
- hdr = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
- EEPROM_CALIB_ALL);
+ hdr = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_CALIB_ALL);
if (!hdr)
return -ENODATA;
data->calib_version = hdr->version;
@@ -295,7 +294,7 @@ struct iwl_eeprom_enhanced_txpwr {
} __packed;
static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data,
- struct iwl_eeprom_enhanced_txpwr *txp)
+ const struct iwl_eeprom_enhanced_txpwr *txp)
{
s8 result = 0; /* (.5 dBm) */
@@ -329,7 +328,7 @@ static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data,
static void
iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data,
- struct iwl_eeprom_enhanced_txpwr *txp,
+ const struct iwl_eeprom_enhanced_txpwr *txp,
int n_channels, s8 max_txpower_avg)
{
int ch_idx;
@@ -360,20 +359,18 @@ static void iwl_eeprom_enhanced_txpower(struct device *dev,
const u8 *eeprom, size_t eeprom_size,
int n_channels)
{
- struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
+ const struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
int idx, entries;
- __le16 *txp_len;
+ const __le16 *txp_len;
s8 max_txp_avg_halfdbm;
BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
/* the length is in 16-bit words, but we want entries */
- txp_len = (__le16 *)iwl_eeprom_query_addr(eeprom, eeprom_size,
- EEPROM_TXP_SZ_OFFS);
+ txp_len = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_TXP_SZ_OFFS);
entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
- txp_array = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
- EEPROM_TXP_OFFS);
+ txp_array = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_TXP_OFFS);
for (idx = 0; idx < entries; idx++) {
txp = &txp_array[idx];
@@ -416,7 +413,7 @@ static void iwl_eeprom_enhanced_txpower(struct device *dev,
}
}
-static void iwl_init_band_reference(const struct iwl_cfg *cfg,
+static void iwl_init_band_reference(const struct iwl_rf_cfg *cfg,
const u8 *eeprom, size_t eeprom_size,
int eeprom_band, int *eeprom_ch_count,
const struct iwl_eeprom_channel **ch_info,
@@ -426,7 +423,7 @@ static void iwl_init_band_reference(const struct iwl_cfg *cfg,
offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY;
- *ch_info = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, offset);
+ *ch_info = iwl_eeprom_query_addr(eeprom, eeprom_size, offset);
switch (eeprom_band) {
case 1: /* 2.4GHz band */
@@ -510,7 +507,7 @@ static void iwl_mod_ht40_chan_info(struct device *dev,
#define CHECK_AND_PRINT_I(x) \
((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "")
-static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+static int iwl_init_channel_map(struct device *dev, const struct iwl_rf_cfg *cfg,
struct iwl_nvm_data *data,
const u8 *eeprom, size_t eeprom_size)
{
@@ -749,7 +746,7 @@ static int iwl_nvm_is_otp(struct iwl_trans *trans)
u32 otpgp;
/* OTP only valid for CP/PP and after */
- switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) {
+ switch (trans->info.hw_rev & CSR_HW_REV_TYPE_MSK) {
case CSR_HW_REV_TYPE_NONE:
IWL_ERR(trans, "Unknown hardware type\n");
return -EIO;
@@ -784,7 +781,7 @@ static int iwl_init_otp_access(struct iwl_trans *trans)
* CSR auto clock gate disable bit -
* this is only applicable for HW with OTP shadow RAM
*/
- if (trans->trans_cfg->base_params->shadow_ram_support)
+ if (trans->mac_cfg->base->shadow_ram_support)
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
@@ -905,7 +902,7 @@ static int iwl_find_otp_image(struct iwl_trans *trans,
}
/* more in the link list, continue */
usedblocks++;
- } while (usedblocks <= trans->trans_cfg->base_params->max_ll_items);
+ } while (usedblocks <= trans->mac_cfg->base->max_ll_items);
/* OTP has no valid blocks */
IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n");
@@ -938,7 +935,7 @@ int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
if (nvm_is_otp < 0)
return nvm_is_otp;
- sz = trans->trans_cfg->base_params->eeprom_size;
+ sz = trans->mac_cfg->base->eeprom_size;
IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz);
e = kmalloc(sz, GFP_KERNEL);
@@ -973,7 +970,7 @@ int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
/* traversing the linked list if no shadow ram supported */
- if (!trans->trans_cfg->base_params->shadow_ram_support) {
+ if (!trans->mac_cfg->base->shadow_ram_support) {
ret = iwl_find_otp_image(trans, &validblockaddr);
if (ret)
goto err_unlock;
@@ -1027,7 +1024,7 @@ int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
return ret;
}
-static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
struct iwl_nvm_data *data,
const u8 *eeprom, size_t eeprom_size)
{
@@ -1062,7 +1059,7 @@ static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg,
/* EEPROM data functions */
struct iwl_nvm_data *
-iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const u8 *eeprom, size_t eeprom_size)
{
struct iwl_nvm_data *data;
@@ -1098,14 +1095,14 @@ iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
EEPROM_RAW_TEMPERATURE);
if (!tmp)
goto err_free;
- data->raw_temperature = *(__le16 *)tmp;
+ data->raw_temperature = *(const __le16 *)tmp;
tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
EEPROM_KELVIN_TEMPERATURE);
if (!tmp)
goto err_free;
- data->kelvin_temperature = *(__le16 *)tmp;
- data->kelvin_voltage = *((__le16 *)tmp + 1);
+ data->kelvin_temperature = *(const __le16 *)tmp;
+ data->kelvin_voltage = *((const __le16 *)tmp + 1);
radio_cfg =
iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_RADIO_CONFIG);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
index 5ca85d90a8d6..cec2ebdfd651 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019, 2025 Intel Corporation
*****************************************************************************/
@@ -116,9 +116,9 @@ static int iwl_led_cmd(struct iwl_priv *priv,
}
led_cmd.on = iwl_blink_compensation(priv, on,
- priv->trans->trans_cfg->base_params->led_compensation);
+ priv->trans->mac_cfg->base->led_compensation);
led_cmd.off = iwl_blink_compensation(priv, off,
- priv->trans->trans_cfg->base_params->led_compensation);
+ priv->trans->mac_cfg->base->led_compensation);
ret = iwl_send_led_cmd(priv, &led_cmd);
if (!ret) {
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 56d19a034c24..0771a46bd552 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(C) 2018 - 2019, 2022 - 2024 Intel Corporation
+ * Copyright(C) 2018 - 2019, 2022 - 2025 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -96,7 +96,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
- if (priv->trans->max_skb_frags)
+ if (priv->trans->info.max_skb_frags)
hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
@@ -188,7 +188,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
priv->hw->wiphy->bands[NL80211_BAND_5GHZ] =
&priv->nvm_data->bands[NL80211_BAND_5GHZ];
- hw->wiphy->hw_version = priv->trans->hw_id;
+ hw->wiphy->hw_version = priv->trans->info.hw_id;
iwl_leds_init(priv);
@@ -549,7 +549,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
iwlagn_prepare_restart(priv);
- memset((void *)&ctx->active, 0, sizeof(ctx->active));
+ memset((void *)(uintptr_t)&ctx->active, 0, sizeof(ctx->active));
iwl_connection_init_rx_config(priv, ctx);
iwlagn_set_rxon_chain(priv, ctx);
@@ -1091,7 +1091,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto done;
}
- scd_queues = BIT(priv->trans->trans_cfg->base_params->num_of_queues) - 1;
+ scd_queues = BIT(priv->trans->mac_cfg->base->num_of_queues) - 1;
scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index a27a72cc017a..66211426aa3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014, 2018 - 2022 Intel Corporation. All rights reserved.
- * Copyright(c) 2024 Intel Corporation. All rights reserved.
+ * Copyright(c) 2024-2025 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
@@ -381,7 +381,8 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
*/
static void iwl_bg_statistics_periodic(struct timer_list *t)
{
- struct iwl_priv *priv = from_timer(priv, t, statistics_periodic);
+ struct iwl_priv *priv = timer_container_of(priv, t,
+ statistics_periodic);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -537,7 +538,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
*/
static void iwl_bg_ucode_trace(struct timer_list *t)
{
- struct iwl_priv *priv = from_timer(priv, t, ucode_trace);
+ struct iwl_priv *priv = timer_container_of(priv, t, ucode_trace);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -824,11 +825,11 @@ int iwl_alive_start(struct iwl_priv *priv)
iwlagn_send_tx_ant_config(priv, priv->nvm_data->valid_tx_ant);
if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
- struct iwl_rxon_cmd *active_rxon =
- (struct iwl_rxon_cmd *)&ctx->active;
+ struct iwl_rxon_cmd *active = (void *)(uintptr_t)&ctx->active;
+
/* apply any changes in staging */
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ active->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
struct iwl_rxon_context *tmp;
/* Initialize our rx_config data */
@@ -1137,9 +1138,10 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
static void iwl_set_hw_params(struct iwl_priv *priv)
{
- if (priv->cfg->ht_params)
+ /* there are no devices with HT but without HT40 on all bands */
+ if (priv->cfg->ht_params.ht40_bands)
priv->hw_params.use_rts_for_aggregation =
- priv->cfg->ht_params->use_rts_for_aggregation;
+ priv->cfg->ht_params.use_rts_for_aggregation;
/* Device-specific setup */
priv->lib->set_hw_params(priv);
@@ -1173,8 +1175,9 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
{
struct iwl_nvm_data *data = priv->nvm_data;
+ /* all HT devices also have HT40 on at least one band */
if (data->sku_cap_11n_enable &&
- !priv->cfg->ht_params) {
+ !priv->cfg->ht_params.ht40_bands) {
IWL_ERR(priv, "Invalid 11n configuration\n");
return -EINVAL;
}
@@ -1224,7 +1227,7 @@ static int iwl_nvm_check_version(struct iwl_nvm_data *data,
}
static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
- const struct iwl_cfg *cfg,
+ const struct iwl_rf_cfg *cfg,
const struct iwl_fw *fw,
struct dentry *dbgfs_dir)
{
@@ -1233,7 +1236,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
struct iwl_op_mode *op_mode;
u16 num_mac;
u32 ucode_flags;
- struct iwl_trans_config trans_cfg = {};
static const u8 no_reclaim_cmds[] = {
REPLY_RX_PHY_CMD,
REPLY_RX_MPDU_CMD,
@@ -1248,7 +1250,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
************************/
hw = iwl_alloc_all();
if (!hw) {
- pr_err("%s: Cannot allocate network device\n", trans->name);
+ pr_err("%s: Cannot allocate network device\n",
+ trans->info.name);
err = -ENOMEM;
goto out;
}
@@ -1261,7 +1264,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
priv->cfg = cfg;
priv->fw = fw;
- switch (priv->trans->trans_cfg->device_family) {
+ switch (priv->trans->mac_cfg->device_family) {
case IWL_DEVICE_FAMILY_1000:
case IWL_DEVICE_FAMILY_100:
priv->lib = &iwl_dvm_1000_cfg;
@@ -1309,52 +1312,51 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
* Populate the state variables that the transport layer needs
* to know about.
*/
- trans_cfg.op_mode = op_mode;
- trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
- trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+ BUILD_BUG_ON(sizeof(no_reclaim_cmds) >
+ sizeof(trans->conf.no_reclaim_cmds));
+ memcpy(trans->conf.no_reclaim_cmds, no_reclaim_cmds,
+ sizeof(no_reclaim_cmds));
+ trans->conf.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
switch (iwlwifi_mod_params.amsdu_size) {
case IWL_AMSDU_DEF:
case IWL_AMSDU_4K:
- trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+ trans->conf.rx_buf_size = IWL_AMSDU_4K;
break;
case IWL_AMSDU_8K:
- trans_cfg.rx_buf_size = IWL_AMSDU_8K;
+ trans->conf.rx_buf_size = IWL_AMSDU_8K;
break;
case IWL_AMSDU_12K:
default:
- trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+ trans->conf.rx_buf_size = IWL_AMSDU_4K;
pr_err("Unsupported amsdu_size: %d\n",
iwlwifi_mod_params.amsdu_size);
}
- trans_cfg.command_groups = iwl_dvm_groups;
- trans_cfg.command_groups_size = ARRAY_SIZE(iwl_dvm_groups);
+ trans->conf.command_groups = iwl_dvm_groups;
+ trans->conf.command_groups_size = ARRAY_SIZE(iwl_dvm_groups);
- trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
- trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
- driver_data[2]);
+ trans->conf.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
+ trans->conf.cb_data_offs = offsetof(struct ieee80211_tx_info,
+ driver_data[2]);
WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE <
- priv->trans->trans_cfg->base_params->num_of_queues);
+ priv->trans->mac_cfg->base->num_of_queues);
ucode_flags = fw->ucode_capa.flags;
if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
- trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
+ trans->conf.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
} else {
priv->sta_key_max_num = STA_KEY_MAX_NUM;
- trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+ trans->conf.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
}
- /* Configure transport layer */
- iwl_trans_configure(priv->trans, &trans_cfg);
+ trans->conf.rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+ trans->conf.rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
- trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
- trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
- trans->command_groups = trans_cfg.command_groups;
- trans->command_groups_size = trans_cfg.command_groups_size;
+ iwl_trans_op_mode_enter(priv->trans, op_mode);
/* At this point both hw and priv are allocated. */
@@ -1378,18 +1380,18 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
* 2. Read REV register
***********************/
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
- priv->trans->name, priv->trans->hw_rev);
+ priv->trans->info.name, priv->trans->info.hw_rev);
err = iwl_trans_start_hw(priv->trans);
if (err)
- goto out_free_hw;
+ goto out_leave_trans;
/* Read the EEPROM */
err = iwl_read_eeprom(priv->trans, &priv->eeprom_blob,
&priv->eeprom_blob_size);
if (err) {
IWL_ERR(priv, "Unable to init EEPROM\n");
- goto out_free_hw;
+ goto out_leave_trans;
}
/* Reset chip to save power until we load uCode during "up". */
@@ -1437,10 +1439,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
* packaging bug or due to the eeprom check above
*/
priv->sta_key_max_num = STA_KEY_MAX_NUM;
- trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
-
- /* Configure transport layer again*/
- iwl_trans_configure(priv->trans, &trans_cfg);
+ trans->conf.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
}
/*******************
@@ -1508,6 +1507,8 @@ out_free_eeprom_blob:
kfree(priv->eeprom_blob);
out_free_eeprom:
kfree(priv->nvm_data);
+out_leave_trans:
+ iwl_trans_op_mode_leave(priv->trans);
out_free_hw:
ieee80211_free_hw(priv->hw);
out:
@@ -1992,7 +1993,7 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
/* SKU Control */
iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH,
- CSR_HW_REV_STEP_DASH(priv->trans->hw_rev));
+ CSR_HW_REV_STEP_DASH(priv->trans->info.hw_rev));
/* write radio config values to register */
if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) {
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.c b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
index 6d16a7105656..6b42d6e5f30f 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019, 2025 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -64,32 +64,32 @@ struct iwl_power_vec_entry {
/* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */
/* DTIM 0 - 2 */
static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
- {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0},
- {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
- {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
- {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
- {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2}
+ {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF), 0}, 0},
+ {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF), 0}, 0},
+ {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF), 0}, 0},
+ {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF), 0}, 1},
+ {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF), 0}, 2}
};
/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
/* DTIM 3 - 10 */
static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = {
- {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
- {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
- {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
- {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
- {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2}
+ {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4), 0}, 0},
+ {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7), 0}, 0},
+ {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9), 0}, 0},
+ {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10), 0}, 1},
+ {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10), 0}, 2}
};
/* for DTIM period > IWL_DTIM_RANGE_1_MAX */
/* DTIM 11 - */
static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
- {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
- {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
- {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
- {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
- {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
+ {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF), 0}, 0},
+ {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF), 0}, 0},
+ {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF), 0}, 0},
+ {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF), 0}, 0},
+ {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF), 0}, 0}
};
/* advance power management */
@@ -196,7 +196,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
else
cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
- if (priv->trans->trans_cfg->base_params->shadow_reg_enable)
+ if (priv->trans->mac_cfg->base->shadow_reg_enable)
cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
else
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index 7f67e602940c..5f8b60824043 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -3,7 +3,7 @@
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
- * Copyright(c) 2018, 2020-2021 Intel Corporation
+ * Copyright(c) 2018, 2020-2021, 2025 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portionhelp of the ieee80211 subsystem header files.
@@ -50,7 +50,7 @@ static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
* See iwlagn_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
+ struct iwl_rxon_cmd *rxon = (void *)(uintptr_t)&ctx->active;
if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
return;
@@ -643,8 +643,8 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
fraglen = len - hdrlen;
if (fraglen) {
- int offset = (void *)hdr + hdrlen -
- rxb_addr(rxb) + rxb_offset(rxb);
+ int offset = (u8 *)hdr + hdrlen -
+ (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
fraglen, rxb->truesize);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
index f80cce37e2c0..2d3c1627f283 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
*
- * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014, 2025 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
*****************************************************************************/
@@ -341,7 +341,7 @@ static int iwlagn_rxon_disconn(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
int ret;
- struct iwl_rxon_cmd *active = (void *)&ctx->active;
+ struct iwl_rxon_cmd *active = (void *)(uintptr_t)&ctx->active;
if (ctx->ctxid == IWL_RXON_CTX_BSS) {
ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
@@ -441,7 +441,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
int ret;
- struct iwl_rxon_cmd *active = (void *)&ctx->active;
+ struct iwl_rxon_cmd *active = (void *)(uintptr_t)&ctx->active;
/* RXON timing must be before associated RXON */
if (ctx->ctxid == IWL_RXON_CTX_BSS) {
@@ -1023,7 +1023,7 @@ static void iwl_calc_basic_rates(struct iwl_priv *priv,
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
{
/* cast away the const for active_rxon in this function */
- struct iwl_rxon_cmd *active = (void *)&ctx->active;
+ struct iwl_rxon_cmd *active = (void *)(uintptr_t)&ctx->active;
bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
int ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
index 98f0949b3683..96831ce8da6f 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
@@ -137,8 +137,8 @@ enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
*/
static void iwl_tt_check_exit_ct_kill(struct timer_list *t)
{
- struct iwl_priv *priv = from_timer(priv, t,
- thermal_throttle.ct_kill_exit_tm);
+ struct iwl_priv *priv = timer_container_of(priv, t,
+ thermal_throttle.ct_kill_exit_tm);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
@@ -187,8 +187,8 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
static void iwl_tt_ready_for_ct_kill(struct timer_list *t)
{
- struct iwl_priv *priv = from_timer(priv, t,
- thermal_throttle.ct_kill_waiting_tm);
+ struct iwl_priv *priv = timer_container_of(priv, t,
+ thermal_throttle.ct_kill_waiting_tm);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index 111ed1873006..24fefa0e8148 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -3,7 +3,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2019 Intel Corporation
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023, 2025 Intel Corporation
*****************************************************************************/
#include <linux/kernel.h>
@@ -463,7 +463,7 @@ static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq)
int q;
for (q = IWLAGN_FIRST_AMPDU_QUEUE;
- q < priv->trans->trans_cfg->base_params->num_of_queues; q++) {
+ q < priv->trans->mac_cfg->base->num_of_queues; q++) {
if (!test_and_set_bit(q, priv->agg_q_alloc)) {
priv->queue_to_mac80211[q] = mq;
return q;
@@ -1277,7 +1277,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
* (in Tx queue's circular buffer) of first TFD/frame in window */
u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
- if (scd_flow >= priv->trans->trans_cfg->base_params->num_of_queues) {
+ if (scd_flow >= priv->trans->mac_cfg->base->num_of_queues) {
IWL_ERR(priv,
"BUG_ON scd_flow is bigger than number of queues\n");
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
index bb13ca5d666c..ac90191a3973 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
@@ -3,6 +3,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2025 Intel Corporation
*****************************************************************************/
#include <linux/kernel.h>
@@ -222,7 +223,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
int ret;
int i;
- iwl_trans_fw_alive(priv->trans, 0);
+ iwl_trans_fw_alive(priv->trans);
if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
priv->nvm_data->sku_cap_ipan_enable) {
@@ -293,15 +294,10 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
{
struct iwl_notification_wait alive_wait;
struct iwl_alive_data alive_data;
- const struct fw_img *fw;
int ret;
enum iwl_ucode_type old_type;
static const u16 alive_cmd[] = { REPLY_ALIVE };
- fw = iwl_get_ucode_image(priv->fw, ucode_type);
- if (WARN_ON(!fw))
- return -EINVAL;
-
old_type = priv->cur_ucode;
priv->cur_ucode = ucode_type;
priv->ucode_loaded = false;
@@ -310,7 +306,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
alive_cmd, ARRAY_SIZE(alive_cmd),
iwl_alive_fn, &alive_data);
- ret = iwl_trans_start_fw(priv->trans, fw, false);
+ ret = iwl_trans_start_fw(priv->trans, priv->fw, ucode_type, false);
if (ret) {
priv->cur_ucode = old_type;
iwl_remove_notification(&priv->notif_wait, &alive_wait);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index efa7b673ebc7..bee7d92293b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2024 Intel Corporation
+ * Copyright (C) 2019-2025 Intel Corporation
*/
#include <linux/uuid.h>
#include "iwl-drv.h"
@@ -847,12 +847,12 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
if (IS_ERR(data))
return PTR_ERR(data);
- /* try to read ppag table rev 3, 2 or 1 (all have the same data size) */
+ /* try to read ppag table rev 1 to 4 (all have the same data size) */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
if (!IS_ERR(wifi_pkg)) {
- if (tbl_rev >= 1 && tbl_rev <= 3) {
+ if (tbl_rev >= 1 && tbl_rev <= 4) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
IWL_DEBUG_RADIO(fwrt,
"Reading PPAG table (tbl_rev=%d)\n",
@@ -882,7 +882,7 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
goto out_free;
read_table:
- fwrt->ppag_ver = tbl_rev;
+ fwrt->ppag_bios_rev = tbl_rev;
flags = &wifi_pkg->package.elements[1];
if (flags->type != ACPI_TYPE_INTEGER) {
@@ -891,7 +891,7 @@ read_table:
}
fwrt->ppag_flags = iwl_bios_get_ppag_flags(flags->integer.value,
- fwrt->ppag_ver);
+ fwrt->ppag_bios_rev);
/*
* read, verify gain values and save them into the PPAG table.
@@ -912,6 +912,7 @@ read_table:
}
}
+ fwrt->ppag_bios_source = BIOS_SOURCE_ACPI;
ret = 0;
out_free:
@@ -919,40 +920,39 @@ out_free:
return ret;
}
-void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
- struct iwl_phy_specific_cfg *filters)
+int iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt)
{
+ struct iwl_phy_specific_cfg *filters = &fwrt->phy_filters;
struct iwl_phy_specific_cfg tmp = {};
- union acpi_object *wifi_pkg, *data;
+ union acpi_object *wifi_pkg, *data __free(kfree);
int tbl_rev, i;
data = iwl_acpi_get_object(fwrt->dev, ACPI_WPFC_METHOD);
if (IS_ERR(data))
- return;
+ return PTR_ERR(data);
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WPFC_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg))
- goto out_free;
+ return PTR_ERR(wifi_pkg);
if (tbl_rev != 0)
- goto out_free;
+ return -EINVAL;
BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) !=
ACPI_WPFC_WIFI_DATA_SIZE - 1);
for (i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) {
if (wifi_pkg->package.elements[i + 1].type != ACPI_TYPE_INTEGER)
- goto out_free;
+ return -EINVAL;
tmp.filter_cfg_chains[i] =
cpu_to_le32(wifi_pkg->package.elements[i + 1].integer.value);
}
IWL_DEBUG_RADIO(fwrt, "Loaded WPFC filter config from ACPI\n");
*filters = tmp;
-out_free:
- kfree(data);
+ return 0;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_phy_filters);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index e50b93472dd2..68d8fb5f6357 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2023, 2025 Intel Corporation
*/
#ifndef __iwl_fw_acpi__
#define __iwl_fw_acpi__
@@ -180,8 +180,7 @@ int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt);
-void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
- struct iwl_phy_specific_cfg *filters);
+int iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt);
void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt);
@@ -244,8 +243,10 @@ static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
return -ENOENT;
}
-/* macro since the second argument doesn't always exist */
-#define iwl_acpi_get_phy_filters(fwrt, filters) do { } while (0)
+static inline int iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
static inline void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index 42360a8f23aa..3ce477c248ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -112,6 +112,16 @@ struct iwl_alive_ntf_v6 {
struct iwl_imr_alive_info imr;
} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_6 */
+struct iwl_alive_ntf {
+ __le16 status;
+ __le16 flags;
+ struct iwl_lmac_alive lmac_data[2];
+ struct iwl_umac_alive umac_data;
+ struct iwl_sku_id sku_id;
+ struct iwl_imr_alive_info imr;
+ __le64 platform_id;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_8 */
+
/**
* enum iwl_extended_cfg_flags - commands driver may send before
* finishing init flow
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index d43adb6f9220..1c86a858aaab 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022, 2024 Intel Corporation
+ * Copyright (C) 2018-2022, 2024-2025 Intel Corporation
*/
#ifndef __iwl_fw_api_commands_h__
#define __iwl_fw_api_commands_h__
@@ -145,8 +145,8 @@ enum iwl_legacy_cmds {
REMOVE_STA = 0x19,
/**
- * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or
- * &struct iwl_tx_cmd_gen3,
+ * @TX_CMD: uses &struct iwl_tx_cmd_v6 or &struct iwl_tx_cmd_v9 or
+ * &struct iwl_tx_cmd,
* response in &struct iwl_tx_resp or
* &struct iwl_tx_resp_v3
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index c139b965980d..9c88bb280609 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -98,7 +98,7 @@ enum iwl_data_path_subcmd_ids {
/**
* @ESR_MODE_NOTIF: notification to recommend/force a wanted esr mode,
- * uses &struct iwl_esr_mode_notif
+ * uses &struct iwl_esr_mode_notif or &struct iwl_esr_mode_notif_v1
*/
ESR_MODE_NOTIF = 0xF3,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 4fab6c66994e..3173fa96cb48 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -527,8 +527,8 @@ enum iwl_fw_ini_time_point {
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
* Append otherwise
* @IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD: send cmd once dump collected
- * @IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE: perform reset handshake and
- * split dump to before/after with region marking
+ * @IWL_FW_INI_APPLY_POLICY_SPLIT_DUMP_RESET: split this dump into regions
+ * before and after the reset handshake
*/
enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0),
@@ -537,7 +537,7 @@ enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD = BIT(16),
- IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE = BIT(17),
+ IWL_FW_INI_APPLY_POLICY_SPLIT_DUMP_RESET = BIT(17),
};
/**
@@ -560,7 +560,7 @@ enum iwl_fw_ini_trigger_reset_fw_policy {
* @IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB: mini dump only 600KB region dump
* @IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB: mini dump 5MB size dump
* @IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET: dump this region before reset
- * handshake (if requested by %IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE)
+ * handshake (if requested by %IWL_FW_INI_APPLY_POLICY_SPLIT_DUMP_RESET)
*/
enum iwl_fw_ini_dump_policy {
IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT = BIT(0),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index e1952fc6d149..33541f92c7c7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -6,6 +6,11 @@
*/
#ifndef __iwl_fw_api_location_h__
#define __iwl_fw_api_location_h__
+#include <linux/ieee80211.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <linux/bits.h>
+#include "rs.h"
/**
* enum iwl_location_subcmd_ids - location group command IDs
@@ -1609,7 +1614,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 {
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */
/**
- * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
+ * struct iwl_tof_range_rsp_ap_entry_ntfy_v7 - AP parameters (response)
* @bssid: BSSID of the AP
* @measure_status: current APs measurement status, one of
* &enum iwl_tof_entry_status.
@@ -1645,7 +1650,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 {
* @tx_pn: the last PN used for this responder Tx in case PMF is configured in
* LE byte order.
*/
-struct iwl_tof_range_rsp_ap_entry_ntfy {
+struct iwl_tof_range_rsp_ap_entry_ntfy_v7 {
u8 bssid[ETH_ALEN];
u8 measure_status;
u8 measure_bw;
@@ -1672,6 +1677,65 @@ struct iwl_tof_range_rsp_ap_entry_ntfy {
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_6,
LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_7 */
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
+ * @bssid: BSSID of the AP
+ * @measure_status: current APs measurement status, one of
+ * &enum iwl_tof_entry_status.
+ * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ * current AP [pSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ * values measured for current AP in the current session [pSec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ * measured for current AP in the current session
+ * @last_burst: 1 if no more FTM sessions are scheduled for this responder
+ * @refusal_period: refusal period in case of
+ * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ * uploaded by the LMAC
+ * @start_tsf: measurement start time in TSF of the mac specified in the range
+ * request
+ * @reserved1: reserved, for backwards compatibility
+ * @t2t3_initiator: as calculated from the algo in the initiator
+ * @t1t4_responder: as calculated from the algo in the responder
+ * @common_calib: Calib val that was used in for this AP measurement
+ * @specific_calib: val that was used in for this AP measurement
+ * @papd_calib_output: The result of the tof papd calibration that was injected
+ * into the algorithm.
+ * @rttConfidence: a value between 0 - 31 that represents the rtt accuracy.
+ * @reserved: for alignment
+ * @rx_pn: the last PN used for this responder Rx in case PMF is configured in
+ * LE byte order.
+ * @tx_pn: the last PN used for this responder Tx in case PMF is configured in
+ * LE byte order.
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy {
+ u8 bssid[ETH_ALEN];
+ u8 measure_status;
+ u8 measure_bw;
+ __le32 rtt;
+ __le32 rtt_variance;
+ __le32 rtt_spread;
+ s8 rssi;
+ u8 rssi_spread;
+ u8 last_burst;
+ u8 refusal_period;
+ __le32 timestamp;
+ __le32 start_tsf;
+ __le32 reserved1[2];
+ __le32 t2t3_initiator;
+ __le32 t1t4_responder;
+ __le16 common_calib;
+ __le16 specific_calib;
+ __le32 papd_calib_output;
+ u8 rttConfidence;
+ u8 reserved[3];
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_8 */
/**
* enum iwl_tof_response_status - tof response status
@@ -1739,6 +1803,24 @@ struct iwl_tof_range_rsp_ntfy_v7 {
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */
/**
+ * struct iwl_tof_range_rsp_ntfy_v9 - ranging response notification
+ * @request_id: A Token ID of the corresponding Range request
+ * @num_of_aps: Number of APs results
+ * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
+ * @reserved: reserved
+ * @ap: per-AP data
+ */
+struct iwl_tof_range_rsp_ntfy_v9 {
+ u8 request_id;
+ u8 num_of_aps;
+ u8 last_report;
+ u8 reserved;
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v7 ap[IWL_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8,
+ * LOCATION_RANGE_RSP_NTFY_API_S_VER_9
+ */
+
+/**
* struct iwl_tof_range_rsp_ntfy - ranging response notification
* @request_id: A Token ID of the corresponding Range request
* @num_of_aps: Number of APs results
@@ -1752,8 +1834,7 @@ struct iwl_tof_range_rsp_ntfy {
u8 last_report;
u8 reserved;
struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_TOF_MAX_APS];
-} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8,
- LOCATION_RANGE_RSP_NTFY_API_S_VER_9 */
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_10 */
#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index b511e3aa6bb2..b9f559dac39f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -311,8 +311,41 @@ enum iwl_mac_config_filter_flags {
}; /* MAC_FILTER_FLAGS_MASK_E_VER_1 */
/**
+ * struct iwl_mac_wifi_gen_support_v2 - parameters of iwl_mac_config_cmd
+ * with support up to eht as in version 2 of the command
+ *
+ * @he_support: does this MAC support HE
+ * @he_ap_support: HE AP enabled, "pseudo HE", no trigger frame handling
+ * @eht_support: does this MAC support EHT. Requires he_support
+ */
+struct iwl_mac_wifi_gen_support_v2 {
+ __le16 he_support;
+ __le16 he_ap_support;
+ __le32 eht_support;
+} __packed;
+
+/**
+ * struct iwl_mac_wifi_gen_support - parameters of iwl_mac_config_cmd
+ * with support up to uhr as in version 3 of the command
+ * ( MAC_CONTEXT_CONFIG_CMD = 0x8 )
+ *
+ * @he_support: does this MAC support HE
+ * @he_ap_support: HE AP enabled, "pseudo HE", no trigger frame handling
+ * @eht_support: does this MAC support EHT. Requires he_support
+ * @uhr_support: does this MAC support UHR. Requires eht_support
+ * @reserved: reserved for alignment and to match version 2's size
+ */
+struct iwl_mac_wifi_gen_support {
+ u8 he_support;
+ u8 he_ap_support;
+ u8 eht_support;
+ u8 uhr_support;
+ __le32 reserved;
+} __packed;
+
+/**
* struct iwl_mac_config_cmd - command structure to configure MAC contexts in
- * MLD API
+ * MLD API for versions 2 and 3
* ( MAC_CONTEXT_CONFIG_CMD = 0x8 )
*
* @id_and_color: ID and color of the MAC
@@ -321,9 +354,8 @@ enum iwl_mac_config_filter_flags {
* @local_mld_addr: mld address
* @reserved_for_local_mld_addr: reserved
* @filter_flags: combination of &enum iwl_mac_config_filter_flags
- * @he_support: does this MAC support HE
- * @he_ap_support: HE AP enabled, "pseudo HE", no trigger frame handling
- * @eht_support: does this MAC support EHT. Requires he_support
+ * @wifi_gen_v2: he/eht parameters as in cmd version 2
+ * @wifi_gen: he/eht/uhr parameters as in cmd version 3
* @nic_not_ack_enabled: mark that the NIC doesn't support receiving
* ACK-enabled AGG, (i.e. both BACK and non-BACK frames in single AGG).
* If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0
@@ -332,7 +364,6 @@ enum iwl_mac_config_filter_flags {
* @p2p_dev: mac data for p2p device
*/
struct iwl_mac_config_cmd {
- /* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
/* MAC_CONTEXT_TYPE_API_E */
@@ -340,16 +371,17 @@ struct iwl_mac_config_cmd {
u8 local_mld_addr[6];
__le16 reserved_for_local_mld_addr;
__le32 filter_flags;
- __le16 he_support;
- __le16 he_ap_support;
- __le32 eht_support;
+ union {
+ struct iwl_mac_wifi_gen_support_v2 wifi_gen_v2;
+ struct iwl_mac_wifi_gen_support wifi_gen;
+ };
__le32 nic_not_ack_enabled;
/* MAC_CONTEXT_CONFIG_SPECIFIC_DATA_API_U_VER_2 */
union {
struct iwl_mac_client_data client;
struct iwl_mac_p2p_dev_data p2p_dev;
};
-} __packed; /* MAC_CONTEXT_CONFIG_CMD_API_S_VER_2 */
+} __packed; /* MAC_CONTEXT_CONFIG_CMD_API_S_VER_2_VER_3 */
/**
* enum iwl_link_ctx_modify_flags - indicate to the fw what fields are being
@@ -457,6 +489,24 @@ enum iwl_link_modify_bandwidth {
};
/**
+ * struct iwl_npca_params - NPCA parameters (non-primary channel access)
+ *
+ * @switch_delay: after switch, delay TX according to destination AP
+ * @switch_back_delay: switch back to control channel before OBSS frame end
+ * @min_dur_threshold: minimum PPDU time to switch to the non-primary
+ * NPCA channel
+ * @flags: NPCA flags - bit 0: puncturing allowed, bit 1: new TX allowed
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_npca_params {
+ u8 switch_delay;
+ u8 switch_back_delay;
+ __le16 min_dur_threshold;
+ __le16 flags;
+ __le16 reserved;
+} __packed; /* NPCA_PARAM_API_S_VER_1 */
+
+/**
* struct iwl_link_config_cmd - command structure to configure the LINK context
* in MLD API
* ( LINK_CONFIG_CMD =0x9 )
@@ -513,6 +563,8 @@ enum iwl_link_modify_bandwidth {
* IEEE802.11REVme-D5.0
* @ibss_bssid_addr: bssid for ibss
* @reserved_for_ibss_bssid_addr: reserved
+ * @npca_params: NPCA parameters
+ * @prio_edca_params: priority EDCA parameters for enhanced QoS
* @reserved3: reserved for future use
*/
struct iwl_link_config_cmd {
@@ -560,8 +612,10 @@ struct iwl_link_config_cmd {
u8 ul_mu_data_disable;
u8 ibss_bssid_addr[6];
__le16 reserved_for_ibss_bssid_addr;
- __le32 reserved3[8];
-} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3, _VER_4, _VER_5, _VER_6 */
+ struct iwl_npca_params npca_params; /* since _VER_7 */
+ struct iwl_ac_qos prio_edca_params; /* since _VER_7 */
+ __le32 reserved3[4];
+} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3, _VER_4, _VER_5, _VER_6, _VER_7 */
/* Currently FW supports link ids in the range 0-3 and can have
* at most two active links for each vif.
@@ -589,6 +643,62 @@ enum iwl_fw_sta_type {
}; /* STATION_TYPE_E_VER_1 */
/**
+ * struct iwl_sta_cfg_cmd_v1 - cmd structure to add a peer sta to the uCode's
+ * station table
+ * ( STA_CONFIG_CMD = 0xA )
+ *
+ * @sta_id: index of station in uCode's station table
+ * @link_id: the id of the link that is used to communicate with this sta
+ * @peer_mld_address: the peers mld address
+ * @reserved_for_peer_mld_address: reserved
+ * @peer_link_address: the address of the link that is used to communicate
+ * with this sta
+ * @reserved_for_peer_link_address: reserved
+ * @station_type: type of this station. See &enum iwl_fw_sta_type
+ * @assoc_id: for GO only
+ * @beamform_flags: beam forming controls
+ * @mfp: indicates whether the STA uses management frame protection or not.
+ * @mimo: indicates whether the sta uses mimo or not
+ * @mimo_protection: indicates whether the sta uses mimo protection or not
+ * @ack_enabled: indicates that the AP supports receiving ACK-
+ * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG
+ * @trig_rnd_alloc: indicates that trigger based random allocation
+ * is enabled according to UORA element existence
+ * @tx_ampdu_spacing: minimum A-MPDU spacing:
+ * 4 - 2us density, 5 - 4us density, 6 - 8us density, 7 - 16us density
+ * @tx_ampdu_max_size: maximum A-MPDU length: 0 - 8K, 1 - 16K, 2 - 32K,
+ * 3 - 64K, 4 - 128K, 5 - 256K, 6 - 512K, 7 - 1024K.
+ * @sp_length: the size of the SP in actual number of frames
+ * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
+ * enabled ACs.
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE/EHT-PHY
+ * capa
+ * @htc_flags: which features are supported in HTC
+ */
+struct iwl_sta_cfg_cmd_v1 {
+ __le32 sta_id;
+ __le32 link_id;
+ u8 peer_mld_address[ETH_ALEN];
+ __le16 reserved_for_peer_mld_address;
+ u8 peer_link_address[ETH_ALEN];
+ __le16 reserved_for_peer_link_address;
+ __le32 station_type;
+ __le32 assoc_id;
+ __le32 beamform_flags;
+ __le32 mfp;
+ __le32 mimo;
+ __le32 mimo_protection;
+ __le32 ack_enabled;
+ __le32 trig_rnd_alloc;
+ __le32 tx_ampdu_spacing;
+ __le32 tx_ampdu_max_size;
+ __le32 sp_length;
+ __le32 uapsd_acs;
+ struct iwl_he_pkt_ext_v2 pkt_ext;
+ __le32 htc_flags;
+} __packed; /* STA_CMD_API_S_VER_1 */
+
+/**
* struct iwl_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's
* station table
* ( STA_CONFIG_CMD = 0xA )
@@ -620,6 +730,14 @@ enum iwl_fw_sta_type {
* @pkt_ext: optional, exists according to PPE-present bit in the HE/EHT-PHY
* capa
* @htc_flags: which features are supported in HTC
+ * @use_ldpc_x2_cw: Indicates whether to use LDPC with double CW
+ * @use_icf: Indicates whether to use ICF instead of RTS
+ * @dps_pad_time: DPS (Dynamic Power Save) padding delay resolution to ensure
+ * proper timing alignment
+ * @dps_trans_delay: DPS minimal time that takes the peer to return to low power
+ * @mic_prep_pad_delay: MIC prep time padding
+ * @mic_compute_pad_delay: MIC compute time padding
+ * @reserved: Reserved for alignment
*/
struct iwl_sta_cfg_cmd {
__le32 sta_id;
@@ -642,7 +760,14 @@ struct iwl_sta_cfg_cmd {
__le32 uapsd_acs;
struct iwl_he_pkt_ext_v2 pkt_ext;
__le32 htc_flags;
-} __packed; /* STA_CMD_API_S_VER_1 */
+ u8 use_ldpc_x2_cw;
+ u8 use_icf;
+ u8 dps_pad_time;
+ u8 dps_trans_delay;
+ u8 mic_prep_pad_delay;
+ u8 mic_compute_pad_delay;
+ u8 reserved[2];
+} __packed; /* STA_CMD_API_S_VER_2 */
/**
* struct iwl_aux_sta_cmd - command for AUX STA configuration
@@ -686,9 +811,9 @@ struct iwl_mvm_sta_disable_tx_cmd {
/**
* enum iwl_mvm_fw_esr_recommendation - FW recommendation code
- * @ESR_RECOMMEND_LEAVE: recommendation to leave esr
- * @ESR_FORCE_LEAVE: force exiting esr
- * @ESR_RECOMMEND_ENTER: recommendation to enter esr
+ * @ESR_RECOMMEND_LEAVE: recommendation to leave EMLSR
+ * @ESR_FORCE_LEAVE: force exiting EMLSR
+ * @ESR_RECOMMEND_ENTER: recommendation to enter EMLSR
*/
enum iwl_mvm_fw_esr_recommendation {
ESR_RECOMMEND_LEAVE,
@@ -697,15 +822,46 @@ enum iwl_mvm_fw_esr_recommendation {
}; /* ESR_MODE_RECOMMENDATION_CODE_API_E_VER_1 */
/**
- * struct iwl_esr_mode_notif - FWs recommendation/force for esr mode
+ * struct iwl_esr_mode_notif_v1 - FW recommendation/force for EMLSR mode
*
- * @action: the action to apply on esr state. See &iwl_mvm_fw_esr_recommendation
+ * @action: the action to apply on EMLSR state.
+ * See &iwl_mvm_fw_esr_recommendation
*/
-struct iwl_esr_mode_notif {
+struct iwl_esr_mode_notif_v1 {
__le32 action;
} __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_1 */
/**
+ * enum iwl_esr_leave_reason - reasons for leaving EMLSR mode
+ *
+ * @ESR_LEAVE_REASON_OMI_MU_UL_DISALLOWED: OMI MU UL disallowed
+ * @ESR_LEAVE_REASON_NO_TRIG_FOR_ESR_STA: No trigger for EMLSR station
+ * @ESR_LEAVE_REASON_NO_ESR_STA_IN_MU_DL: No EMLSR station in MU DL
+ * @ESR_LEAVE_REASON_BAD_ACTIV_FRAME_TH: Bad activation frame threshold
+ * @ESR_LEAVE_REASON_RTS_IN_DUAL_LISTEN: RTS in dual listen
+ */
+enum iwl_esr_leave_reason {
+ ESR_LEAVE_REASON_OMI_MU_UL_DISALLOWED = BIT(0),
+ ESR_LEAVE_REASON_NO_TRIG_FOR_ESR_STA = BIT(1),
+ ESR_LEAVE_REASON_NO_ESR_STA_IN_MU_DL = BIT(2),
+ ESR_LEAVE_REASON_BAD_ACTIV_FRAME_TH = BIT(3),
+ ESR_LEAVE_REASON_RTS_IN_DUAL_LISTEN = BIT(4),
+};
+
+/**
+ * struct iwl_esr_mode_notif - FW recommendation/force for EMLSR mode
+ *
+ * @action: the action to apply on EMLSR state.
+ * See &iwl_mvm_fw_esr_recommendation
+ * @leave_reason_mask: mask for various reasons to leave EMLSR mode.
+ * See &iwl_esr_leave_reason
+ */
+struct iwl_esr_mode_notif {
+ __le32 action;
+ __le32 leave_reason_mask;
+} __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_2 */
+
+/**
* struct iwl_missed_beacons_notif - sent when by the firmware upon beacon loss
* ( MISSED_BEACONS_NOTIF = 0xF6 )
* @link_id: fw link ID
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 26301c0b06a1..2a174c00b712 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022, 2024-2025 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_mac_h__
@@ -287,9 +287,9 @@ struct iwl_ac_qos {
__le16 cw_min;
__le16 cw_max;
u8 aifsn;
- u8 fifos_mask;
+ u8 fifos_mask; /* not in use since _VER_3 */
__le16 edca_txop;
-} __packed; /* AC_QOS_API_S_VER_2 */
+} __packed; /* AC_QOS_API_S_VER_2, _VER_3 */
/**
* struct iwl_mac_ctx_cmd - command structure to configure MAC contexts
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
index 4d8a12799c4d..4594a7c94bd6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -146,6 +146,7 @@ struct iwl_phy_context_cmd_v1 {
* @sbb_ctrl_channel_loc: location of the control channel
* @puncture_mask: bitmap of punctured subchannels
* @dsp_cfg_flags: set to 0
+ * @secondary_ctrl_chnl_loc: location of secondary control channel
* @reserved: reserved to align to 64 bit
*/
struct iwl_phy_context_cmd {
@@ -164,11 +165,13 @@ struct iwl_phy_context_cmd {
};
};
__le32 dsp_cfg_flags;
- __le32 reserved;
+ u8 secondary_ctrl_chnl_loc;
+ u8 reserved[3];
} __packed; /* PHY_CONTEXT_CMD_API_VER_3,
* PHY_CONTEXT_CMD_API_VER_4,
* PHY_CONTEXT_CMD_API_VER_5,
- * PHY_CONTEXT_CMD_API_VER_6
+ * PHY_CONTEXT_CMD_API_VER_6,
+ * PHY_CONTEXT_CMD_API_S_VER_7
*/
#endif /* __iwl_fw_api_phy_ctxt_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 37ec26596ee7..23140205ccb9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_power_h__
#define __iwl_fw_api_power_h__
+#include "nvm-reg.h"
+
/* Power Management Commands, Responses, Notifications */
/**
@@ -54,7 +56,7 @@ struct iwl_ltr_config_cmd_v1 {
* @flags: See &enum iwl_ltr_config_flags
* @static_long: static LTR Long register value.
* @static_short: static LTR Short register value.
- * @ltr_cfg_values: LTR parameters table values (in usec) in folowing order:
+ * @ltr_cfg_values: LTR parameters table values (in usec) in following order:
* TX, RX, Short Idle, Long Idle. Used only if %LTR_CFG_FLAG_UPDATE_VALUES
* is set.
* @ltr_short_idle_timeout: LTR Short Idle timeout (in usec). Used only if
@@ -89,6 +91,7 @@ struct iwl_ltr_config_cmd {
* @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
* @POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
* detection enablement
+ * @POWER_FLAGS_ENABLE_SMPS_MSK: SMPS is allowed for this vif
*/
enum iwl_power_flags {
POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
@@ -99,6 +102,7 @@ enum iwl_power_flags {
POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9),
POWER_FLAGS_LPRX_ENA_MSK = BIT(11),
POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12),
+ POWER_FLAGS_ENABLE_SMPS_MSK = BIT(14),
};
#define IWL_POWER_VEC_SIZE 5
@@ -216,7 +220,6 @@ struct iwl_mac_power_cmd {
/* CONTEXT_DESC_API_T_VER_1 */
__le32 id_and_color;
- /* CLIENT_PM_POWER_TABLE_S_VER_1 */
__le16 flags;
__le16 keep_alive_seconds;
__le32 rx_data_timeout;
@@ -237,7 +240,7 @@ struct iwl_mac_power_cmd {
u8 heavy_rx_thld_percentage;
u8 limited_ps_threshold;
u8 reserved;
-} __packed;
+} __packed; /* CLIENT_PM_POWER_TABLE_S_VER_1, VER_2 */
/*
* struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when
@@ -628,28 +631,37 @@ enum iwl_ppag_flags {
/**
* union iwl_ppag_table_cmd - union for all versions of PPAG command
- * @v1: version 1
- * @v2: version 2
- * version 3, 4, 5 and 6 are the same structure as v2,
+ * @v1: command version 1 structure.
+ * @v2: command version from 2 to 6 are same structure as v2.
* but has a different format of the flags bitmap
+ * @v3: command version 7 structure.
* @v1.flags: values from &enum iwl_ppag_flags
* @v1.gain: table of antenna gain values per chain and sub-band
* @v1.reserved: reserved
* @v2.flags: values from &enum iwl_ppag_flags
* @v2.gain: table of antenna gain values per chain and sub-band
- * @v2.reserved: reserved
+ * @v3.ppag_config_info: see @struct bios_value_u32
+ * @v3.gain: table of antenna gain values per chain and sub-band
+ * @v3.reserved: reserved
*/
union iwl_ppag_table_cmd {
struct {
__le32 flags;
s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1];
s8 reserved[2];
- } v1;
+ } __packed v1; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_1 */
struct {
__le32 flags;
s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
s8 reserved[2];
- } v2;
+ } __packed v2; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_2, VER3, VER4,
+ * VER5, VER6
+ */
+ struct {
+ struct bios_value_u32 ppag_config_info;
+ s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ s8 reserved[2];
+ } __packed v3; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_7 */
} __packed;
#define IWL_PPAG_CMD_V4_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)
@@ -657,6 +669,15 @@ union iwl_ppag_table_cmd {
IWL_PPAG_ETSI_LPI_UHB_MASK | \
IWL_PPAG_USA_LPI_UHB_MASK)
+#define IWL_PPAG_CMD_V6_MASK (IWL_PPAG_CMD_V5_MASK | \
+ IWL_PPAG_ETSI_VLP_UHB_MASK | \
+ IWL_PPAG_ETSI_SP_UHB_MASK | \
+ IWL_PPAG_USA_VLP_UHB_MASK | \
+ IWL_PPAG_USA_SP_UHB_MASK | \
+ IWL_PPAG_CANADA_LPI_UHB_MASK | \
+ IWL_PPAG_CANADA_VLP_UHB_MASK | \
+ IWL_PPAG_CANADA_SP_UHB_MASK)
+
#define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26
#define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13
@@ -690,7 +711,7 @@ struct iwl_sar_offset_mapping_cmd {
* Roaming Energy Delta Threshold, otherwise use normal Energy Delta
* Threshold. Typical energy threshold is -72dBm.
* @bf_temp_threshold: This threshold determines the type of temperature
- * filtering (Slow or Fast) that is selected (Units are in Celsuis):
+ * filtering (Slow or Fast) that is selected (Units are in Celsius):
* If the current temperature is above this threshold - Fast filter
* will be used, If the current temperature is below this threshold -
* Slow filter will be used.
@@ -698,12 +719,12 @@ struct iwl_sar_offset_mapping_cmd {
* calculated for this and the last passed beacon is greater than this
* threshold. Zero value means that the temperature change is ignored for
* beacon filtering; beacons will not be forced to be sent to driver
- * regardless of whether its temerature has been changed.
+ * regardless of whether its temperature has been changed.
* @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values
* calculated for this and the last passed beacon is greater than this
* threshold. Zero value means that the temperature change is ignored for
* beacon filtering; beacons will not be forced to be sent to driver
- * regardless of whether its temerature has been changed.
+ * regardless of whether its temperature has been changed.
* @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
* @bf_debug_flag: beacon filtering debug configuration
* @bf_escape_timer: Send beacons to to driver if no beacons were passed
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index c2f806cbab59..3222cbcbe1ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -1,11 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022, 2024-2025 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_rs_h__
#define __iwl_fw_api_rs_h__
-
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/bits.h>
#include "mac.h"
/**
@@ -213,7 +215,8 @@ enum iwl_tlc_update_flags {
* @sta_id: station id
* @reserved: reserved
* @flags: bitmap of notifications reported
- * @rate: current initial rate
+ * @rate: current initial rate, format depends on the notification
+ * version
* @amsdu_size: Max AMSDU size, in bytes
* @amsdu_enabled: bitmap for per-TID AMSDU enablement
*/
@@ -224,7 +227,7 @@ struct iwl_tlc_update_notif {
__le32 rate;
__le32 amsdu_size;
__le32 amsdu_enabled;
-} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */
+} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2, _VER_3, _VER_4 */
/**
* enum iwl_tlc_debug_types - debug options
@@ -427,6 +430,7 @@ enum {
/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */
#define RATE_VHT_MCS_RATE_CODE_MSK 0xf
+#define RATE_VHT_MCS_NSS_MSK 0x30
/*
* Legacy OFDM rate format for bits 7:0
@@ -541,7 +545,7 @@ enum {
#define RATE_MCS_CTS_REQUIRED_POS (31)
#define RATE_MCS_CTS_REQUIRED_MSK (0x1 << RATE_MCS_CTS_REQUIRED_POS)
-/* rate_n_flags bit field version 2
+/* rate_n_flags bit field version 2 and 3
*
* The 32-bit value has different layouts in the low 8 bits depending on the
* format. There are three formats, HT, VHT and legacy (11abg, with subformats
@@ -553,23 +557,25 @@ enum {
* (0) Legacy CCK (1) Legacy OFDM (2) High-throughput (HT)
* (3) Very High-throughput (VHT) (4) High-efficiency (HE)
* (5) Extremely High-throughput (EHT)
+ * (6) Ultra High Reliability (UHR) (v3 rate format only)
*/
#define RATE_MCS_MOD_TYPE_POS 8
#define RATE_MCS_MOD_TYPE_MSK (0x7 << RATE_MCS_MOD_TYPE_POS)
-#define RATE_MCS_CCK_MSK (0 << RATE_MCS_MOD_TYPE_POS)
-#define RATE_MCS_LEGACY_OFDM_MSK (1 << RATE_MCS_MOD_TYPE_POS)
-#define RATE_MCS_HT_MSK (2 << RATE_MCS_MOD_TYPE_POS)
-#define RATE_MCS_VHT_MSK (3 << RATE_MCS_MOD_TYPE_POS)
-#define RATE_MCS_HE_MSK (4 << RATE_MCS_MOD_TYPE_POS)
-#define RATE_MCS_EHT_MSK (5 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_MOD_TYPE_CCK (0 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_MOD_TYPE_LEGACY_OFDM (1 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_MOD_TYPE_HT (2 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_MOD_TYPE_VHT (3 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_MOD_TYPE_HE (4 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_MOD_TYPE_EHT (5 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_MOD_TYPE_UHR (6 << RATE_MCS_MOD_TYPE_POS)
/*
* Legacy CCK rate format for bits 0:3:
*
- * (0) 0xa - 1 Mbps
- * (1) 0x14 - 2 Mbps
- * (2) 0x37 - 5.5 Mbps
- * (3) 0x6e - 11 nbps
+ * (0) 1 Mbps
+ * (1) 2 Mbps
+ * (2) 5.5 Mbps
+ * (3) 11 Mbps
*
* Legacy OFDM rate format for bis 3:0:
*
@@ -586,15 +592,19 @@ enum {
#define RATE_LEGACY_RATE_MSK 0x7
/*
- * HT, VHT, HE, EHT rate format for bits 3:0
- * 3-0: MCS
- *
+ * HT, VHT, HE, EHT, UHR rate format
+ * Version 2: (not applicable for UHR)
+ * 3-0: MCS
+ * 4: NSS==2 indicator
+ * Version 3:
+ * 4-0: MCS
+ * 5: NSS==2 indicator
*/
#define RATE_HT_MCS_CODE_MSK 0x7
-#define RATE_MCS_NSS_POS 4
-#define RATE_MCS_NSS_MSK (1 << RATE_MCS_NSS_POS)
-#define RATE_MCS_CODE_MSK 0xf
-#define RATE_HT_MCS_INDEX(r) ((((r) & RATE_MCS_NSS_MSK) >> 1) | \
+#define RATE_MCS_NSS_MSK_V2 0x10
+#define RATE_MCS_NSS_MSK 0x20
+#define RATE_MCS_CODE_MSK 0x1f
+#define RATE_HT_MCS_INDEX(r) ((((r) & RATE_MCS_NSS_MSK) >> 2) | \
((r) & RATE_HT_MCS_CODE_MSK))
/* Bits 7-5: reserved */
@@ -810,11 +820,38 @@ struct iwl_lq_cmd {
}; /* LINK_QUALITY_CMD_API_S_VER_1 */
u8 iwl_fw_rate_idx_to_plcp(int idx);
-u32 iwl_new_rate_from_v1(u32 rate_v1);
const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx);
const char *iwl_rs_pretty_ant(u8 ant);
const char *iwl_rs_pretty_bw(int bw);
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
bool iwl_he_is_sgi(u32 rate_n_flags);
+static inline u32 iwl_v3_rate_from_v2_v3(__le32 rate, bool fw_v3)
+{
+ u32 val;
+
+ if (fw_v3)
+ return le32_to_cpu(rate);
+
+ val = le32_to_cpu(rate) & ~RATE_MCS_NSS_MSK_V2;
+ val |= u32_encode_bits(le32_get_bits(rate, RATE_MCS_NSS_MSK_V2),
+ RATE_MCS_NSS_MSK);
+
+ return val;
+}
+
+static inline __le32 iwl_v3_rate_to_v2_v3(u32 rate, bool fw_v3)
+{
+ __le32 val;
+
+ if (fw_v3)
+ return cpu_to_le32(rate);
+
+ val = cpu_to_le32(rate & ~RATE_MCS_NSS_MSK);
+ val |= le32_encode_bits(u32_get_bits(rate, RATE_MCS_NSS_MSK),
+ RATE_MCS_NSS_MSK_V2);
+
+ return val;
+}
+
#endif /* __iwl_fw_api_rs_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 691c879cb90d..7cf6d6ac7430 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -193,10 +193,11 @@ enum iwl_rx_mpdu_amsdu_info {
IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80,
};
-#define RX_MPDU_BAND_POS 6
-#define RX_MPDU_BAND_MASK 0xC0
-#define BAND_IN_RX_STATUS(_val) \
- (((_val) & RX_MPDU_BAND_MASK) >> RX_MPDU_BAND_POS)
+enum iwl_rx_mpdu_mac_phy_band {
+ IWL_RX_MPDU_MAC_PHY_BAND_MAC_MASK = 0x0f,
+ IWL_RX_MPDU_MAC_PHY_BAND_PHY_MASK = 0x30,
+ IWL_RX_MPDU_MAC_PHY_BAND_BAND_MASK = 0xc0,
+};
enum iwl_rx_l3_proto_values {
IWL_RX_L3_TYPE_NONE,
@@ -639,7 +640,9 @@ struct iwl_rx_mpdu_desc_v3 {
*/
__le32 reserved[1];
} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
- RX_MPDU_RES_START_API_S_VER_5 */
+ * RX_MPDU_RES_START_API_S_VER_5,
+ * RX_MPDU_RES_START_API_S_VER_6
+ */
/**
* struct iwl_rx_mpdu_desc - RX MPDU descriptor
@@ -668,9 +671,10 @@ struct iwl_rx_mpdu_desc {
*/
__le16 phy_info;
/**
- * @mac_phy_idx: MAC/PHY index
+ * @mac_phy_band: MAC ID, PHY ID, band;
+ * see &enum iwl_rx_mpdu_mac_phy_band
*/
- u8 mac_phy_idx;
+ u8 mac_phy_band;
/* DW4 */
union {
struct {
@@ -722,8 +726,10 @@ struct iwl_rx_mpdu_desc {
struct iwl_rx_mpdu_desc_v3 v3;
};
} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
- RX_MPDU_RES_START_API_S_VER_4,
- RX_MPDU_RES_START_API_S_VER_5 */
+ * RX_MPDU_RES_START_API_S_VER_4,
+ * RX_MPDU_RES_START_API_S_VER_5,
+ * RX_MPDU_RES_START_API_S_VER_6
+ */
#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
@@ -819,7 +825,7 @@ struct iwl_rx_no_data {
* 15:8 chain-B, measured at FINA time (FINA_ENERGY), 16:23 channel
* @on_air_rise_time: GP2 during on air rise
* @fr_time: frame time
- * @rate: rate/mcs of frame
+ * @rate: rate/mcs of frame, format depends on the notification version
* @phy_info: &enum iwl_rx_phy_eht_data0 and &enum iwl_rx_phy_info_type
* @rx_vec: DW-12:9 raw RX vectors from DSP according to modulation type.
* for VHT: OFDM_RX_VECTOR_SIGA1_OUT, OFDM_RX_VECTOR_SIGA2_OUT
@@ -835,9 +841,7 @@ struct iwl_rx_no_data_ver_3 {
__le32 rate;
__le32 phy_info[2];
__le32 rx_vec[4];
-} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1,
- RX_NO_DATA_NTFY_API_S_VER_2
- RX_NO_DATA_NTFY_API_S_VER_3 */
+} __packed; /* RX_NO_DATA_NTFY_API_S_VER_3, _VER_4 */
struct iwl_frame_release {
u8 baid;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
index 0a9f14fb04be..00713a991879 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020 - 2021, 2023 - 2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020-2021, 2023-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -584,6 +584,9 @@ struct iwl_stats_ntfy_per_phy {
__le32 last_tx_ch_width_indx;
} __packed; /* STATISTICS_NTFY_PER_PHY_API_S_VER_1 */
+/* unknown channel load (due to not being active on channel) */
+#define IWL_STATS_UNKNOWN_CHANNEL_LOAD 0xffffffff
+
/**
* struct iwl_stats_ntfy_per_sta
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
index cfa6532a3cdd..58d5a6ef633e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2024-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -50,7 +50,7 @@ struct iwl_tdls_channel_switch_timing {
*/
struct iwl_tdls_channel_switch_frame {
__le32 switch_time_offset;
- struct iwl_tx_cmd tx_cmd;
+ struct iwl_tx_cmd_v6 tx_cmd;
u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE];
} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
@@ -131,7 +131,7 @@ struct iwl_tdls_config_cmd {
struct iwl_tdls_sta_info sta_info[IWL_TDLS_STA_COUNT];
__le32 pti_req_data_offset;
- struct iwl_tx_cmd pti_req_tx_cmd;
+ struct iwl_tx_cmd_v6 pti_req_tx_cmd;
u8 pti_req_template[];
} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 0a39e4b6eb62..557832563f89 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_tx_h__
@@ -151,7 +151,7 @@ enum iwl_tx_cmd_sec_ctrl {
#define IWL_LOW_RETRY_LIMIT 7
/**
- * enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd offload_assist values
+ * enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd_v6 offload_assist values
* @TX_CMD_OFFLD_IP_HDR: offset to start of IP header (in words)
* from mac header end. For normal case it is 4 words for SNAP.
* note: tx_cmd, mac header and pad are not counted in the offset.
@@ -181,7 +181,7 @@ enum iwl_tx_offload_assist_flags_pos {
/* TODO: complete documentation for try_cnt and btkill_cnt */
/**
- * struct iwl_tx_cmd - TX command struct to FW
+ * struct iwl_tx_cmd_v6 - TX command struct to FW
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @offload_assist: TX offload configuration
@@ -221,7 +221,7 @@ enum iwl_tx_offload_assist_flags_pos {
* After the struct fields the MAC header is placed, plus any padding,
* and then the actial payload.
*/
-struct iwl_tx_cmd {
+struct iwl_tx_cmd_v6 {
__le16 len;
__le16 offload_assist;
__le32 tx_flags;
@@ -258,7 +258,7 @@ struct iwl_dram_sec_info {
} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
/**
- * struct iwl_tx_cmd_gen2 - TX command struct to FW for 22000 devices
+ * struct iwl_tx_cmd_v9 - TX command struct to FW for 22000 devices
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @offload_assist: TX offload configuration
@@ -268,7 +268,7 @@ struct iwl_dram_sec_info {
* cleared. Combination of RATE_MCS_*
* @hdr: 802.11 header
*/
-struct iwl_tx_cmd_gen2 {
+struct iwl_tx_cmd_v9 {
__le16 len;
__le16 offload_assist;
__le32 flags;
@@ -279,18 +279,18 @@ struct iwl_tx_cmd_gen2 {
TX_CMD_API_S_VER_9 */
/**
- * struct iwl_tx_cmd_gen3 - TX command struct to FW for AX210+ devices
+ * struct iwl_tx_cmd - TX command struct to FW for AX210+ devices
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @flags: combination of &enum iwl_tx_cmd_flags
* @offload_assist: TX offload configuration
* @dram_info: FW internal DRAM storage
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
- * cleared. Combination of RATE_MCS_*
+ * cleared. Combination of RATE_MCS_*; format depends on version
* @reserved: reserved
* @hdr: 802.11 header
*/
-struct iwl_tx_cmd_gen3 {
+struct iwl_tx_cmd {
__le16 len;
__le16 flags;
__le32 offload_assist;
@@ -298,7 +298,9 @@ struct iwl_tx_cmd_gen3 {
__le32 rate_n_flags;
u8 reserved[8];
struct ieee80211_hdr hdr[];
-} __packed; /* TX_CMD_API_S_VER_8, TX_CMD_API_S_VER_10 */
+} __packed; /* TX_CMD_API_S_VER_10,
+ * TX_CMD_API_S_VER_11
+ */
/*
* TX response related data
@@ -549,7 +551,7 @@ struct iwl_tx_resp_v3 {
* @failure_rts: num of failures due to unsuccessful RTS
* @failure_frame: num failures due to no ACK (unused for agg)
* @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
- * Tx of all the batch. RATE_MCS_*
+ * Tx of all the batch. RATE_MCS_*; format depends on command version
* @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
* for agg: RTS + CTS + aggregation tx time + block-ack time.
* in usec.
@@ -600,8 +602,10 @@ struct iwl_tx_resp {
__le16 reserved2;
struct agg_tx_status status;
} __packed; /* TX_RSP_API_S_VER_6,
- TX_RSP_API_S_VER_7,
- TX_RSP_API_S_VER_8 */
+ * TX_RSP_API_S_VER_7,
+ * TX_RSP_API_S_VER_8,
+ * TX_RSP_API_S_VER_9
+ */
/**
* struct iwl_mvm_ba_notif - notifies about reception of BA
@@ -701,7 +705,8 @@ enum iwl_mvm_ba_resp_flags {
* @rts_retry_cnt: RTS retry count
* @reserved: reserved (for alignment)
* @wireless_time: Wireless-media time
- * @tx_rate: the rate the aggregation was sent at
+ * @tx_rate: the rate the aggregation was sent at. Format depends on command
+ * version.
* @tfd_cnt: number of TFD-Q elements
* @ra_tid_cnt: number of RATID-Q elements
* @tfd: array of TFD queue status updates. See &iwl_compressed_ba_tfd
@@ -730,7 +735,8 @@ struct iwl_compressed_ba_notif {
DECLARE_FLEX_ARRAY(struct iwl_compressed_ba_tfd, tfd);
};
} __packed; /* COMPRESSED_BA_RES_API_S_VER_4,
- COMPRESSED_BA_RES_API_S_VER_5 */
+ COMPRESSED_BA_RES_API_S_VER_6,
+ COMPRESSED_BA_RES_API_S_VER_7 */
/**
* struct iwl_mac_beacon_cmd_v6 - beacon template command
@@ -742,7 +748,7 @@ struct iwl_compressed_ba_notif {
* @frame: the template of the beacon frame
*/
struct iwl_mac_beacon_cmd_v6 {
- struct iwl_tx_cmd tx;
+ struct iwl_tx_cmd_v6 tx;
__le32 template_id;
__le32 tim_idx;
__le32 tim_size;
@@ -761,7 +767,7 @@ struct iwl_mac_beacon_cmd_v6 {
* @frame: the template of the beacon frame
*/
struct iwl_mac_beacon_cmd_v7 {
- struct iwl_tx_cmd tx;
+ struct iwl_tx_cmd_v6 tx;
__le32 template_id;
__le32 tim_idx;
__le32 tim_size;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 03f639fbf9b6..ea739ebe7cb0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -187,7 +187,7 @@ static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt,
/* Pull RXF2 */
iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
RXF_DIFF_FROM_PREV +
- fwrt->trans->trans_cfg->umac_prph_offset, 1);
+ fwrt->trans->mac_cfg->umac_prph_offset, 1);
/* Pull LMAC2 RXF1 */
if (fwrt->smem_cfg.num_lmacs > 1)
iwl_fwrt_dump_rxf(fwrt, dump_data,
@@ -654,10 +654,10 @@ static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr,
{
u32 range_len;
- if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
range_len = ARRAY_SIZE(iwl_prph_dump_addr_ax210);
handler(fwrt, iwl_prph_dump_addr_ax210, range_len, ptr);
- } else if (fwrt->trans->trans_cfg->device_family >=
+ } else if (fwrt->trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_22000) {
range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000);
handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr);
@@ -665,7 +665,7 @@ static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr,
range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm);
handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr);
- if (fwrt->trans->trans_cfg->mq_rx_supported) {
+ if (fwrt->trans->mac_cfg->mq_rx_supported) {
range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000);
handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr);
}
@@ -809,13 +809,14 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv;
struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg;
u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0;
- u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
+ u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->mac_cfg->base->smem_len;
u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ?
0 : fwrt->trans->cfg->dccm2_len;
int i;
/* SRAM - include stack CCM if driver knows the values for it */
- if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
+ if (!fwrt->trans->cfg->dccm_offset ||
+ !fwrt->trans->cfg->dccm_len) {
const struct fw_img *img;
if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX)
@@ -838,7 +839,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
iwl_fw_prph_handler(fwrt, &prph_len,
iwl_fw_get_prph_len);
- if (fwrt->trans->trans_cfg->device_family ==
+ if (fwrt->trans->mac_cfg->device_family ==
IWL_DEVICE_FAMILY_7000 &&
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG))
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
@@ -876,7 +877,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
file_len += sizeof(*dump_data) +
- fwrt->trans->cfg->d3_debug_data_length * 2;
+ fwrt->trans->mac_cfg->base->d3_debug_data_length * 2;
}
/* If we only want a monitor dump, reset the file length */
@@ -904,13 +905,14 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
dump_data->len = cpu_to_le32(sizeof(*dump_info));
dump_info = (void *)dump_data->data;
dump_info->hw_type =
- cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev));
+ cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->info.hw_rev));
dump_info->hw_step =
- cpu_to_le32(fwrt->trans->hw_rev_step);
+ cpu_to_le32(fwrt->trans->info.hw_rev_step);
memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
sizeof(dump_info->fw_human_readable));
- strscpy_pad(dump_info->dev_human_readable, fwrt->trans->name,
- sizeof(dump_info->dev_human_readable));
+ strscpy_pad(dump_info->dev_human_readable,
+ fwrt->trans->info.name,
+ sizeof(dump_info->dev_human_readable));
strscpy_pad(dump_info->bus_human_readable, fwrt->dev->bus->name,
sizeof(dump_info->bus_human_readable));
dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs;
@@ -996,7 +998,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
}
iwl_fw_dump_mem(fwrt, &dump_data, smem_len,
- fwrt->trans->cfg->smem_offset,
+ fwrt->trans->mac_cfg->base->smem_offset,
IWL_FW_ERROR_DUMP_MEM_SMEM);
iwl_fw_dump_mem(fwrt, &dump_data, sram2_len,
@@ -1005,8 +1007,8 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
}
if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
- u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr;
- size_t data_size = fwrt->trans->cfg->d3_debug_data_length;
+ u32 addr = fwrt->trans->mac_cfg->base->d3_debug_data_base_addr;
+ size_t data_size = fwrt->trans->mac_cfg->base->d3_debug_data_length;
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
dump_data->len = cpu_to_le32(data_size * 2);
@@ -1109,7 +1111,7 @@ static int iwl_dump_ini_prph_phy_iter_common(struct iwl_fw_runtime *fwrt,
range->internal_base_addr = cpu_to_le32(addr);
range->range_data_size = size;
- if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ if (fwrt->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
indirect_wr_addr = WMAL_INDRCT_CMD1;
indirect_wr_addr += le32_to_cpu(offset);
@@ -1266,7 +1268,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
/* all paged index start from 1 to skip CSS section */
idx++;
- if (!fwrt->trans->trans_cfg->gen2)
+ if (!fwrt->trans->mac_cfg->gen2)
return _iwl_dump_ini_paging_iter(fwrt, range_ptr, range_len, idx);
range = range_ptr;
@@ -1790,7 +1792,7 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, u32 alloc_id,
data->write_ptr = iwl_get_mon_reg(fwrt, alloc_id,
&addrs->write_ptr);
- if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
u32 wrt_ptr = le32_to_cpu(data->write_ptr);
data->write_ptr = cpu_to_le32(wrt_ptr >> 2);
@@ -1817,7 +1819,7 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump,
- &fwrt->trans->cfg->mon_dram_regs);
+ &fwrt->trans->mac_cfg->base->mon_dram_regs);
}
static void *
@@ -1830,7 +1832,7 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id);
return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump,
- &fwrt->trans->cfg->mon_smem_regs);
+ &fwrt->trans->mac_cfg->base->mon_smem_regs);
}
static void *
@@ -1844,7 +1846,7 @@ iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt,
/* no offset calculation later */
IWL_FW_INI_ALLOCATION_ID_DBGC1,
mon_dump,
- &fwrt->trans->cfg->mon_dbgi_regs);
+ &fwrt->trans->mac_cfg->base->mon_dbgi_regs);
}
static void *
@@ -1909,7 +1911,7 @@ iwl_dump_ini_mem_block_ranges(struct iwl_fw_runtime *fwrt,
static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
- if (fwrt->trans->trans_cfg->gen2) {
+ if (fwrt->trans->mac_cfg->gen2) {
if (fwrt->trans->init_dram.paging_cnt)
return fwrt->trans->init_dram.paging_cnt - 1;
else
@@ -2021,7 +2023,7 @@ iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
/* start from 1 to skip CSS section */
for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg_data); i++) {
size += range_header_len;
- if (fwrt->trans->trans_cfg->gen2)
+ if (fwrt->trans->mac_cfg->gen2)
size += fwrt->trans->init_dram.paging[i].size;
else
size += fwrt->fw_paging_db[i].fw_paging_size;
@@ -2375,7 +2377,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_dump_cfg_name *cfg_name;
u32 size = sizeof(*tlv) + sizeof(*dump);
u32 num_of_cfg_names = 0;
- u32 hw_type;
+ u32 hw_type, is_cdb, is_jacket;
list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
size += sizeof(*cfg_name);
@@ -2403,33 +2405,24 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type);
dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype);
- dump->hw_step = cpu_to_le32(fwrt->trans->hw_rev_step);
+ dump->hw_step = cpu_to_le32(fwrt->trans->info.hw_rev_step);
- /*
- * Several HWs all have type == 0x42, so we'll override this value
- * according to the detected HW
- */
- hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev);
- if (hw_type == IWL_AX210_HW_TYPE) {
- u32 prph_val = iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR);
- u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT);
- u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT);
- u32 masked_bits = is_jacket | (is_cdb << 1);
+ hw_type = CSR_HW_REV_TYPE(fwrt->trans->info.hw_rev);
+
+ is_cdb = CSR_HW_RFID_IS_CDB(fwrt->trans->info.hw_rf_id);
+ is_jacket = !!(iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR) &
+ WFPM_OTP_CFG1_IS_JACKET_BIT);
+
+ /* Use bits 12 and 13 to indicate jacket/CDB, respectively */
+ hw_type |= (is_jacket | (is_cdb << 1)) << IWL_JACKET_CDB_SHIFT;
- /*
- * The HW type depends on certain bits in this case, so add
- * these bits to the HW type. We won't have collisions since we
- * add these bits after the highest possible bit in the mask.
- */
- hw_type |= masked_bits << IWL_AX210_HW_TYPE_ADDITION_SHIFT;
- }
dump->hw_type = cpu_to_le32(hw_type);
dump->rf_id_flavor =
- cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt->trans->hw_rf_id));
- dump->rf_id_dash = cpu_to_le32(CSR_HW_RFID_DASH(fwrt->trans->hw_rf_id));
- dump->rf_id_step = cpu_to_le32(CSR_HW_RFID_STEP(fwrt->trans->hw_rf_id));
- dump->rf_id_type = cpu_to_le32(CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id));
+ cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt->trans->info.hw_rf_id));
+ dump->rf_id_dash = cpu_to_le32(CSR_HW_RFID_DASH(fwrt->trans->info.hw_rf_id));
+ dump->rf_id_step = cpu_to_le32(CSR_HW_RFID_STEP(fwrt->trans->info.hw_rf_id));
+ dump->rf_id_type = cpu_to_le32(CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id));
dump->lmac_major = cpu_to_le32(fwrt->dump.fw_ver.lmac_major);
dump->lmac_minor = cpu_to_le32(fwrt->dump.fw_ver.lmac_minor);
@@ -2624,6 +2617,12 @@ enum iwl_dump_ini_region_selector {
IWL_INI_DUMP_LATE_REGIONS,
};
+static bool iwl_dump_due_to_error(enum iwl_fw_ini_time_point tp_id)
+{
+ return tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT ||
+ tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR;
+}
+
static u32
iwl_dump_ini_dump_regions(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data,
@@ -2689,8 +2688,7 @@ iwl_dump_ini_dump_regions(struct iwl_fw_runtime *fwrt,
* debug data which also need to be collected.
*/
if (reg_type == IWL_FW_INI_REGION_DRAM_IMR) {
- if (tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT ||
- tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR)
+ if (iwl_dump_due_to_error(tp_id))
imr_reg_data->reg_tlv =
fwrt->trans->dbg.active_regions[i];
else
@@ -2726,8 +2724,8 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) <
ARRAY_SIZE(fwrt->trans->dbg.active_regions));
- if (trigger->time_point &
- cpu_to_le32(IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE)) {
+ if (trigger->apply_policy &
+ cpu_to_le32(IWL_FW_INI_APPLY_POLICY_SPLIT_DUMP_RESET)) {
size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id,
regions_mask, &imr_reg_data,
IWL_INI_DUMP_EARLY_REGIONS);
@@ -2736,6 +2734,10 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
regions_mask, &imr_reg_data,
IWL_INI_DUMP_LATE_REGIONS);
} else {
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RESET_DURING_ASSERT) &&
+ iwl_dump_due_to_error(tp_id))
+ iwl_trans_pcie_fw_reset_handshake(fwrt->trans);
size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id,
regions_mask, &imr_reg_data,
IWL_INI_DUMP_ALL_REGIONS);
@@ -3282,13 +3284,13 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt)
{
- const struct iwl_cfg *cfg = fwrt->trans->cfg;
+ const struct iwl_mac_cfg *mac_cfg = fwrt->trans->mac_cfg;
if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt))
return;
if (!fwrt->dump.d3_debug_data) {
- fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length,
+ fwrt->dump.d3_debug_data = kmalloc(mac_cfg->base->d3_debug_data_length,
GFP_KERNEL);
if (!fwrt->dump.d3_debug_data) {
IWL_ERR(fwrt,
@@ -3298,15 +3300,15 @@ void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt)
}
/* if the buffer holds previous debug data it is overwritten */
- iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr,
+ iwl_trans_read_mem_bytes(fwrt->trans, mac_cfg->base->d3_debug_data_base_addr,
fwrt->dump.d3_debug_data,
- cfg->d3_debug_data_length);
+ mac_cfg->base->d3_debug_data_length);
if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem)
fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx,
- cfg->d3_debug_data_base_addr,
+ mac_cfg->base->d3_debug_data_base_addr,
fwrt->dump.d3_debug_data,
- cfg->d3_debug_data_length);
+ mac_cfg->base->d3_debug_data_length);
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data);
@@ -3341,7 +3343,7 @@ static int iwl_fw_dbg_suspend_resume_hcmd(struct iwl_trans *trans, bool suspend)
static void iwl_fw_dbg_stop_recording(struct iwl_trans *trans,
struct iwl_fw_dbg_params *params)
{
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
return;
}
@@ -3365,7 +3367,7 @@ static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
if (!params)
return -EIO;
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
@@ -3467,7 +3469,7 @@ void iwl_fw_disable_dbg_asserts(struct iwl_fw_runtime *fwrt)
GENMASK(31, IWL_FW_DBG_DOMAIN_POS + 1));
/* supported starting from 9000 devices */
- if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
+ if (fwrt->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_9000)
return;
if (fwrt->trans->dbg.yoyo_bin_loaded || (preset && preset != 1))
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 35e30e5db462..8034c9ecba69 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2019, 2021-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2019, 2021-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -201,7 +201,7 @@ static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt)
{
return fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_D3_DEBUG) &&
- fwrt->trans->cfg->d3_debug_data_length && fwrt->ops &&
+ fwrt->trans->mac_cfg->base->d3_debug_data_length && fwrt->ops &&
fwrt->ops->d3_debug_enable &&
fwrt->ops->d3_debug_enable(fwrt->ops_ctx) &&
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
@@ -210,7 +210,7 @@ static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt)
static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt)
{
return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) &&
- !fwrt->trans->trans_cfg->gen2 &&
+ !fwrt->trans->mac_cfg->gen2 &&
fwrt->cur_fw_img < IWL_UCODE_TYPE_MAX &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index f0c813d675f4..c70f2a20f7d5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -311,7 +311,7 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct iwl_fw_runtime *fwrt,
pos += scnprintf(pos, endpos - pos, "FW: %s\n",
fwrt->fw->human_readable);
pos += scnprintf(pos, endpos - pos, "Device: %s\n",
- fwrt->trans->name);
+ fwrt->trans->info.name);
pos += scnprintf(pos, endpos - pos, "Bus: %s\n",
fwrt->dev->bus->name);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
index c7b261c8ec96..3ec42a4ea801 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -417,10 +417,10 @@ static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt)
struct iwl_trans *trans = fwrt->trans;
u32 error, data1;
- if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
error = UMAG_SB_CPU_2_STATUS;
data1 = UMAG_SB_CPU_1_STATUS;
- } else if (fwrt->trans->trans_cfg->device_family >=
+ } else if (fwrt->trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_8000) {
error = SB_CPU_2_STATUS;
data1 = SB_CPU_1_STATUS;
@@ -439,7 +439,7 @@ static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt)
IWL_ERR(fwrt, "0x%08X | IML/ROM data1\n",
iwl_read_umac_prph(trans, data1));
- if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+ if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
IWL_ERR(fwrt, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n",
iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG));
}
@@ -508,7 +508,7 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt)
iwl_fwrt_dump_rcm_error_log(fwrt, 1);
iwl_fwrt_dump_iml_error_log(fwrt);
iwl_fwrt_dump_fseq_regs(fwrt);
- if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
pc_data = fwrt->trans->dbg.pc_data;
if (!iwl_trans_grab_nic_access(fwrt->trans))
@@ -522,7 +522,7 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt)
iwl_trans_release_nic_access(fwrt->trans);
}
- if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
u32 scratch = iwl_read32(fwrt->trans, CSR_FUNC_SCRATCH);
IWL_ERR(fwrt, "Function Scratch status:\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 3af275133da0..cf41021d59ad 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2014, 2018-2025 Intel Corporation
* Copyright (C) 2014-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -372,10 +372,7 @@ struct iwl_fw_ini_dump_cfg_name {
u8 cfg_name[IWL_FW_INI_MAX_CFG_NAME];
} __packed;
-/* AX210's HW type */
-#define IWL_AX210_HW_TYPE 0x42
-/* How many bits to roll when adding to the HW type of AX210 HW */
-#define IWL_AX210_HW_TYPE_ADDITION_SHIFT 12
+#define IWL_JACKET_CDB_SHIFT 12
/* struct iwl_fw_ini_dump_info - ini dump information
* @version: dump version
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 9860903ecd3f..5a1ec880ed72 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -102,6 +102,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_SEC_TABLE_ADDR = 66,
IWL_UCODE_TLV_D3_KEK_KCK_ADDR = 67,
IWL_UCODE_TLV_CURRENT_PC = 68,
+ IWL_UCODE_TLV_FSEQ_BIN_VERSION = 72,
IWL_UCODE_TLV_FW_NUM_STATIONS = IWL_UCODE_TLV_CONST_BASE + 0,
IWL_UCODE_TLV_FW_NUM_LINKS = IWL_UCODE_TLV_CONST_BASE + 1,
@@ -402,6 +403,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA: supports (de)activating 5G9
* for CA from BIOS.
* @IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT: supports %TAS_UHB_ALLOWED_CANADA
+ * @IWL_UCODE_TLV_CAPA_EXT_FSEQ_IMAGE_SUPPORT: external FSEQ image support
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -504,6 +506,14 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS = (__force iwl_ucode_tlv_capa_t)122,
IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA = (__force iwl_ucode_tlv_capa_t)123,
IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT = (__force iwl_ucode_tlv_capa_t)124,
+ IWL_UCODE_TLV_CAPA_EXT_FSEQ_IMAGE_SUPPORT = (__force iwl_ucode_tlv_capa_t)125,
+
+ /* set 4 */
+ /**
+ * @IWL_UCODE_TLV_CAPA_RESET_DURING_ASSERT: FW reset handshake is needed
+ * during assert handling even if the dump isn't split
+ */
+ IWL_UCODE_TLV_CAPA_RESET_DURING_ASSERT = (__force iwl_ucode_tlv_capa_t)(4 * 32 + 0),
NUM_IWL_UCODE_TLV_CAPA
/*
* This construction make both sparse (which cannot increment the previous
@@ -994,6 +1004,10 @@ struct iwl_fw_dump_exclude {
__le32 addr, size;
};
+struct iwl_fw_fseq_bin_version {
+ __le32 major, minor;
+}; /* FW_TLV_FSEQ_BIN_VERSION_S */
+
static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv,
size_t fixed_size, size_t var_size)
{
@@ -1011,4 +1025,18 @@ static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv,
#define iwl_tlv_array_len_with_size(_tlv_ptr, _struct_ptr, _size) \
_iwl_tlv_array_len((_tlv_ptr), sizeof(*(_struct_ptr)), _size)
+
+/* external FSEQ file */
+#define IWL_FSEQ_FILE "intel/fseq-%04x-%04x"
+#define IWL_FSEQ_MAGIC "INTEL-CNV-FSEQ\n\0"
+
+struct iwl_fseq_file {
+ char magic[16];
+ char version[16];
+ __le32 bt_len;
+ __le32 wifi_len;
+ u8 reserved[8];
+ u8 data[];
+} __packed;
+
#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index de87e0e3e072..d1d8058ad29f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021, 2024 Intel Corporation
+ * Copyright (C) 2019-2021, 2024-2025 Intel Corporation
*/
#include "iwl-drv.h"
#include "runtime.h"
@@ -72,7 +72,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
* values in VER_1, this is backwards-compatible with VER_2,
* as long as we don't set any other bits.
*/
- if (!fwrt->trans->trans_cfg->integrated)
+ if (!fwrt->trans->mac_cfg->integrated)
cmd.flags = cpu_to_le32(SOC_CONFIG_CMD_FLAGS_DISCRETE);
BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_NONE !=
@@ -84,17 +84,17 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_1820US !=
SOC_FLAGS_LTR_APPLY_DELAY_1820);
- if (fwrt->trans->trans_cfg->ltr_delay != IWL_CFG_TRANS_LTR_DELAY_NONE &&
- !WARN_ON(!fwrt->trans->trans_cfg->integrated))
- cmd.flags |= le32_encode_bits(fwrt->trans->trans_cfg->ltr_delay,
+ if (fwrt->trans->mac_cfg->ltr_delay != IWL_CFG_TRANS_LTR_DELAY_NONE &&
+ !WARN_ON(!fwrt->trans->mac_cfg->integrated))
+ cmd.flags |= le32_encode_bits(fwrt->trans->mac_cfg->ltr_delay,
SOC_FLAGS_LTR_APPLY_DELAY_MASK);
if (iwl_fw_lookup_cmd_ver(fwrt->fw, SCAN_REQ_UMAC,
IWL_FW_CMD_VER_UNKNOWN) >= 2 &&
- fwrt->trans->trans_cfg->low_latency_xtal)
+ fwrt->trans->mac_cfg->low_latency_xtal)
cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
- cmd.latency = cpu_to_le32(fwrt->trans->trans_cfg->xtal_latency);
+ cmd.latency = cpu_to_le32(fwrt->trans->mac_cfg->xtal_latency);
ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
if (ret)
@@ -116,14 +116,14 @@ int iwl_configure_rxq(struct iwl_fw_runtime *fwrt)
* The default queue is configured via context info, so if we
* have a single queue, there's nothing to do here.
*/
- if (fwrt->trans->num_rx_queues == 1)
+ if (fwrt->trans->info.num_rxqs == 1)
return 0;
- if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22000)
+ if (fwrt->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_22000)
return 0;
/* skip the default queue */
- num_queues = fwrt->trans->num_rx_queues - 1;
+ num_queues = fwrt->trans->info.num_rxqs - 1;
size = struct_size(cmd, data, num_queues);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
index a7b7cae874a2..826409f6f710 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021, 2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -267,7 +267,7 @@ int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type)
const struct fw_img *fw = &fwrt->fw->img[type];
int ret;
- if (fwrt->trans->trans_cfg->gen2)
+ if (fwrt->trans->mac_cfg->gen2)
return 0;
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 1195e708caa9..4f3c2f7f4f5b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2020-2024 Intel Corporation
+ * Copyright(c) 2020-2025 Intel Corporation
*/
#include "iwl-drv.h"
@@ -96,8 +96,8 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
"Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
mac_type, rf_id);
- if (mac_type == CSR_HW_REV_TYPE(trans->hw_rev) &&
- rf_id == CSR_HW_RFID_TYPE(trans->hw_rf_id))
+ if (mac_type == CSR_HW_REV_TYPE(trans->info.hw_rev) &&
+ rf_id == CSR_HW_RFID_TYPE(trans->info.hw_rf_id))
hw_match = true;
break;
case IWL_UCODE_TLV_SEC_RT: {
@@ -152,8 +152,8 @@ done:
if (!hw_match) {
IWL_DEBUG_FW(trans,
"HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n",
- CSR_HW_REV_TYPE(trans->hw_rev),
- CSR_HW_RFID_TYPE(trans->hw_rf_id));
+ CSR_HW_REV_TYPE(trans->info.hw_rev),
+ CSR_HW_RFID_TYPE(trans->info.hw_rf_id));
return -ENOENT;
}
@@ -167,7 +167,8 @@ done:
static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
size_t len,
- struct iwl_pnvm_image *pnvm_data)
+ struct iwl_pnvm_image *pnvm_data,
+ __le32 sku_id[3])
{
const struct iwl_ucode_tlv *tlv;
@@ -190,23 +191,23 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
}
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
- const struct iwl_sku_id *sku_id =
+ const struct iwl_sku_id *tlv_sku_id =
(const void *)(data + sizeof(*tlv));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
tlv_len);
IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n",
- le32_to_cpu(sku_id->data[0]),
- le32_to_cpu(sku_id->data[1]),
- le32_to_cpu(sku_id->data[2]));
+ le32_to_cpu(tlv_sku_id->data[0]),
+ le32_to_cpu(tlv_sku_id->data[1]),
+ le32_to_cpu(tlv_sku_id->data[2]));
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
len -= ALIGN(tlv_len, 4);
trans->reduced_cap_sku = false;
- rf_type = CSR_HW_RFID_TYPE(trans->hw_rf_id);
- if ((trans->sku_id[0] & IWL_PNVM_REDUCED_CAP_BIT) &&
+ rf_type = CSR_HW_RFID_TYPE(trans->info.hw_rf_id);
+ if ((sku_id[0] & cpu_to_le32(IWL_PNVM_REDUCED_CAP_BIT)) &&
rf_type == IWL_CFG_RF_TYPE_FM)
trans->reduced_cap_sku = true;
@@ -214,9 +215,9 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
"Reduced SKU device %d\n",
trans->reduced_cap_sku);
- if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
- trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
- trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
+ if (sku_id[0] == tlv_sku_id->data[0] &&
+ sku_id[1] == tlv_sku_id->data[1] &&
+ sku_id[2] == tlv_sku_id->data[2]) {
int ret;
ret = iwl_pnvm_handle_section(trans, data, len,
@@ -263,13 +264,14 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
return 0;
}
-static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len)
+static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len,
+ __le32 sku_id[3])
{
struct pnvm_sku_package *package;
u8 *image = NULL;
/* Get PNVM from BIOS for non-Intel SKU */
- if (trans_p->sku_id[2]) {
+ if (sku_id[2]) {
package = iwl_uefi_get_pnvm(trans_p, len);
if (!IS_ERR_OR_NULL(package)) {
if (*len >= sizeof(*package)) {
@@ -294,8 +296,10 @@ static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len)
return image;
}
-static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa)
+static void
+iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa,
+ __le32 sku_id[3])
{
struct iwl_pnvm_image *pnvm_data = NULL;
u8 *data = NULL;
@@ -309,7 +313,7 @@ static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans,
if (trans->pnvm_loaded)
goto set;
- data = iwl_get_pnvm_image(trans, &length);
+ data = iwl_get_pnvm_image(trans, &length, sku_id);
if (!data) {
trans->fail_to_parse_pnvm_image = true;
return;
@@ -319,7 +323,7 @@ static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans,
if (!pnvm_data)
goto free;
- ret = iwl_pnvm_parse(trans, data, length, pnvm_data);
+ ret = iwl_pnvm_parse(trans, data, length, pnvm_data, sku_id);
if (ret) {
trans->fail_to_parse_pnvm_image = true;
goto free;
@@ -339,7 +343,8 @@ free:
static void
iwl_pnvm_load_reduce_power_to_trans(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa)
+ const struct iwl_ucode_capabilities *capa,
+ __le32 sku_id[3])
{
struct iwl_pnvm_image *pnvm_data = NULL;
u8 *data = NULL;
@@ -362,7 +367,8 @@ iwl_pnvm_load_reduce_power_to_trans(struct iwl_trans *trans,
if (!pnvm_data)
goto free;
- ret = iwl_uefi_reduce_power_parse(trans, data, length, pnvm_data);
+ ret = iwl_uefi_reduce_power_parse(trans, data, length, pnvm_data,
+ sku_id);
if (ret) {
trans->failed_to_load_reduce_power_image = true;
goto free;
@@ -386,18 +392,19 @@ free:
int iwl_pnvm_load(struct iwl_trans *trans,
struct iwl_notif_wait_data *notif_wait,
- const struct iwl_ucode_capabilities *capa)
+ const struct iwl_ucode_capabilities *capa,
+ __le32 sku_id[3])
{
struct iwl_notification_wait pnvm_wait;
static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
PNVM_INIT_COMPLETE_NTFY) };
/* if the SKU_ID is empty, there's nothing to do */
- if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
+ if (!sku_id[0] && !sku_id[1] && !sku_id[2])
return 0;
- iwl_pnvm_load_pnvm_to_trans(trans, capa);
- iwl_pnvm_load_reduce_power_to_trans(trans, capa);
+ iwl_pnvm_load_pnvm_to_trans(trans, capa, sku_id);
+ iwl_pnvm_load_reduce_power_to_trans(trans, capa, sku_id);
iwl_init_notification_wait(notif_wait, &pnvm_wait,
ntf_cmds, ARRAY_SIZE(ntf_cmds),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
index 1bac3466154c..9540926e8a0f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright(c) 2020-2023 Intel Corporation
+ * Copyright(c) 2020-2023, 2025 Intel Corporation
*/
#ifndef __IWL_PNVM_H__
#define __IWL_PNVM_H__
@@ -14,7 +14,8 @@
int iwl_pnvm_load(struct iwl_trans *trans,
struct iwl_notif_wait_data *notif_wait,
- const struct iwl_ucode_capabilities *capa);
+ const struct iwl_ucode_capabilities *capa,
+ __le32 sku_id[3]);
static inline
void iwl_pnvm_get_fs_name(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index 6adcfa6e214a..74b90bd92c48 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023, 2025 Intel Corporation
*/
#include <linux/dmi.h>
#include "iwl-drv.h"
@@ -34,6 +34,7 @@ IWL_BIOS_TABLE_LOADER(wrds_table);
IWL_BIOS_TABLE_LOADER(ewrd_table);
IWL_BIOS_TABLE_LOADER(wgds_table);
IWL_BIOS_TABLE_LOADER(ppag_table);
+IWL_BIOS_TABLE_LOADER(phy_filters);
IWL_BIOS_TABLE_LOADER_DATA(tas_table, struct iwl_tas_data);
IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64);
IWL_BIOS_TABLE_LOADER_DATA(mcc, char);
@@ -180,9 +181,9 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
*/
return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
(IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
- fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
+ fwrt->trans->info.hw_rev != CSR_HW_REV_TYPE_3160) ||
(IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
- ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ ((fwrt->trans->info.hw_rev & CSR_HW_REV_TYPE_MSK) ==
CSR_HW_REV_TYPE_7265D));
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
@@ -313,7 +314,7 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
bool send_ppag_always;
/* many firmware images for JF lie about this */
- if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
+ if (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id) ==
CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
return -EOPNOTSUPP;
@@ -338,31 +339,35 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
return -EINVAL;
}
- /* The 'flags' field is the same in v1 and in v2 so we can just
- * use v1 to access it.
- */
- cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
-
IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver);
if (cmd_ver == 1) {
num_sub_bands = IWL_NUM_SUB_BANDS_V1;
gain = cmd->v1.gain[0];
*cmd_size = sizeof(cmd->v1);
- if (fwrt->ppag_ver >= 1) {
+ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
+ if (fwrt->ppag_bios_rev >= 1) {
/* in this case FW supports revision 0 */
IWL_DEBUG_RADIO(fwrt,
"PPAG table rev is %d, send truncated table\n",
- fwrt->ppag_ver);
+ fwrt->ppag_bios_rev);
}
} else if (cmd_ver >= 2 && cmd_ver <= 6) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = cmd->v2.gain[0];
*cmd_size = sizeof(cmd->v2);
- if (fwrt->ppag_ver == 0) {
+ cmd->v2.flags = cpu_to_le32(fwrt->ppag_flags);
+ if (fwrt->ppag_bios_rev == 0) {
/* in this case FW supports revisions 1,2 or 3 */
IWL_DEBUG_RADIO(fwrt,
"PPAG table rev is 0, send padded table\n");
}
+ } else if (cmd_ver == 7) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = cmd->v3.gain[0];
+ *cmd_size = sizeof(cmd->v3);
+ cmd->v3.ppag_config_info.table_source = fwrt->ppag_bios_source;
+ cmd->v3.ppag_config_info.table_revision = fwrt->ppag_bios_rev;
+ cmd->v3.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags);
} else {
IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
return -EINVAL;
@@ -371,9 +376,11 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
/* ppag mode */
IWL_DEBUG_RADIO(fwrt,
"PPAG MODE bits were read from bios: %d\n",
- le32_to_cpu(cmd->v1.flags));
+ fwrt->ppag_flags);
- if (cmd_ver == 5)
+ if (cmd_ver == 6)
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V6_MASK);
+ else if (cmd_ver == 5)
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V5_MASK);
else if (cmd_ver < 5)
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V4_MASK);
@@ -381,16 +388,20 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
if ((cmd_ver == 1 &&
!fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
- (cmd_ver == 2 && fwrt->ppag_ver >= 2)) {
+ (cmd_ver == 2 && fwrt->ppag_bios_rev >= 2)) {
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
} else {
IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
}
+ /* The 'flags' field is the same in v1 and v2 so we can just
+ * use v1 to access it.
+ */
IWL_DEBUG_RADIO(fwrt,
"PPAG MODE bits going to be sent: %d\n",
- le32_to_cpu(cmd->v1.flags));
+ (cmd_ver < 7) ? le32_to_cpu(cmd->v1.flags) :
+ le32_to_cpu(cmd->v3.ppag_config_info.value));
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
for (j = 0; j < num_sub_bands; j++) {
@@ -480,7 +491,7 @@ __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
u32 val;
__le32 config_bitmap = 0;
- switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) {
+ switch (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id)) {
case IWL_CFG_RF_TYPE_HR1:
case IWL_CFG_RF_TYPE_HR2:
case IWL_CFG_RF_TYPE_JF1:
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
index 53693314d505..9bed3d573b1e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2023-2024 Intel Corporation
+ * Copyright (C) 2023-2025 Intel Corporation
*/
#ifndef __fw_regulatory_h__
@@ -224,10 +224,14 @@ int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
u32 *value);
static inline u32 iwl_bios_get_ppag_flags(const u32 ppag_modes,
- const u8 ppag_ver)
+ const u8 ppag_bios_rev)
{
- return ppag_modes & (ppag_ver < 3 ? IWL_PPAG_ETSI_CHINA_MASK :
- IWL_PPAG_REV3_MASK);
+ /* For revision 4 and above driver is pipe */
+ if (ppag_bios_rev >= 4)
+ return ppag_modes;
+
+ return ppag_modes & (ppag_bios_rev < 3 ? IWL_PPAG_ETSI_CHINA_MASK :
+ IWL_PPAG_REV3_MASK);
}
bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc);
@@ -236,22 +240,25 @@ bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc);
#define IWL_DSBR_PERMANENT_URM_MASK BIT(9)
int iwl_bios_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value);
+int iwl_bios_get_phy_filters(struct iwl_fw_runtime *fwrt);
static inline void iwl_bios_setup_step(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt)
{
u32 dsbr;
- if (!trans->trans_cfg->integrated)
+ if (!trans->mac_cfg->integrated)
return;
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
return;
if (iwl_bios_get_dsbr(fwrt, &dsbr))
dsbr = 0;
- trans->dsbr_urm_fw_dependent = !!(dsbr & IWL_DSBR_FW_MODIFIED_URM_MASK);
- trans->dsbr_urm_permanent = !!(dsbr & IWL_DSBR_PERMANENT_URM_MASK);
+ trans->conf.dsbr_urm_fw_dependent =
+ !!(dsbr & IWL_DSBR_FW_MODIFIED_URM_MASK);
+ trans->conf.dsbr_urm_permanent =
+ !!(dsbr & IWL_DSBR_PERMANENT_URM_MASK);
}
#endif /* __fw_regulatory_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
index 8f99e501629e..746f2acffb8f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2021-2022 Intel Corporation
+ * Copyright (C) 2021-2022, 2025 Intel Corporation
*/
#include <net/mac80211.h>
@@ -91,104 +91,6 @@ const char *iwl_rs_pretty_bw(int bw)
}
IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw);
-static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
-{
- int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
- int idx;
- bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
- int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
- int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
-
- for (idx = offset; idx < last; idx++)
- if (iwl_fw_rate_idx_to_plcp(idx) == rate)
- return idx - offset;
- return IWL_RATE_INVALID;
-}
-
-u32 iwl_new_rate_from_v1(u32 rate_v1)
-{
- u32 rate_v2 = 0;
- u32 dup = 0;
-
- if (rate_v1 == 0)
- return rate_v1;
- /* convert rate */
- if (rate_v1 & RATE_MCS_HT_MSK_V1) {
- u32 nss = 0;
-
- rate_v2 |= RATE_MCS_HT_MSK;
- rate_v2 |=
- rate_v1 & RATE_HT_MCS_RATE_CODE_MSK_V1;
- nss = (rate_v1 & RATE_HT_MCS_MIMO2_MSK) >>
- RATE_HT_MCS_NSS_POS_V1;
- rate_v2 |= nss << RATE_MCS_NSS_POS;
- } else if (rate_v1 & RATE_MCS_VHT_MSK_V1 ||
- rate_v1 & RATE_MCS_HE_MSK_V1) {
- rate_v2 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK;
-
- rate_v2 |= rate_v1 & RATE_MCS_NSS_MSK;
-
- if (rate_v1 & RATE_MCS_HE_MSK_V1) {
- u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1;
- u32 he_type = he_type_bits >> RATE_MCS_HE_TYPE_POS_V1;
- u32 he_106t = (rate_v1 & RATE_MCS_HE_106T_MSK_V1) >>
- RATE_MCS_HE_106T_POS_V1;
- u32 he_gi_ltf = (rate_v1 & RATE_MCS_HE_GI_LTF_MSK_V1) >>
- RATE_MCS_HE_GI_LTF_POS;
-
- if ((he_type_bits == RATE_MCS_HE_TYPE_SU ||
- he_type_bits == RATE_MCS_HE_TYPE_EXT_SU) &&
- he_gi_ltf == RATE_MCS_HE_SU_4_LTF)
- /* the new rate have an additional bit to
- * represent the value 4 rather then using SGI
- * bit for this purpose - as it was done in the old
- * rate */
- he_gi_ltf += (rate_v1 & RATE_MCS_SGI_MSK_V1) >>
- RATE_MCS_SGI_POS_V1;
-
- rate_v2 |= he_gi_ltf << RATE_MCS_HE_GI_LTF_POS;
- rate_v2 |= he_type << RATE_MCS_HE_TYPE_POS;
- rate_v2 |= he_106t << RATE_MCS_HE_106T_POS;
- rate_v2 |= rate_v1 & RATE_HE_DUAL_CARRIER_MODE_MSK;
- rate_v2 |= RATE_MCS_HE_MSK;
- } else {
- rate_v2 |= RATE_MCS_VHT_MSK;
- }
- /* if legacy format */
- } else {
- u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
-
- if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID))
- legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ?
- IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE;
-
- rate_v2 |= legacy_rate;
- if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
- rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK;
- }
-
- /* convert flags */
- if (rate_v1 & RATE_MCS_LDPC_MSK_V1)
- rate_v2 |= RATE_MCS_LDPC_MSK;
- rate_v2 |= (rate_v1 & RATE_MCS_CHAN_WIDTH_MSK_V1) |
- (rate_v1 & RATE_MCS_ANT_AB_MSK) |
- (rate_v1 & RATE_MCS_STBC_MSK) |
- (rate_v1 & RATE_MCS_BF_MSK);
-
- dup = (rate_v1 & RATE_MCS_DUP_MSK_V1) >> RATE_MCS_DUP_POS_V1;
- if (dup) {
- rate_v2 |= RATE_MCS_DUP_MSK;
- rate_v2 |= dup << RATE_MCS_CHAN_WIDTH_POS;
- }
-
- if ((!(rate_v1 & RATE_MCS_HE_MSK_V1)) &&
- (rate_v1 & RATE_MCS_SGI_MSK_V1))
- rate_v2 |= RATE_MCS_SGI_MSK;
-
- return rate_v2;
-}
-IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1);
-
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
{
char *type;
@@ -197,37 +99,40 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
u32 bw = (rate & RATE_MCS_CHAN_WIDTH_MSK) >>
RATE_MCS_CHAN_WIDTH_POS;
u32 format = rate & RATE_MCS_MOD_TYPE_MSK;
+ int index = 0;
bool sgi;
- if (format == RATE_MCS_CCK_MSK ||
- format == RATE_MCS_LEGACY_OFDM_MSK) {
- int legacy_rate = rate & RATE_LEGACY_RATE_MSK;
- int index = format == RATE_MCS_CCK_MSK ?
- legacy_rate :
- legacy_rate + IWL_FIRST_OFDM_RATE;
+ switch (format) {
+ case RATE_MCS_MOD_TYPE_LEGACY_OFDM:
+ index = IWL_FIRST_OFDM_RATE;
+ fallthrough;
+ case RATE_MCS_MOD_TYPE_CCK:
+ index += rate & RATE_LEGACY_RATE_MSK;
return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps",
iwl_rs_pretty_ant(ant),
iwl_rate_mcs(index)->mbps);
- }
-
- if (format == RATE_MCS_VHT_MSK)
+ case RATE_MCS_MOD_TYPE_VHT:
type = "VHT";
- else if (format == RATE_MCS_HT_MSK)
+ break;
+ case RATE_MCS_MOD_TYPE_HT:
type = "HT";
- else if (format == RATE_MCS_HE_MSK)
+ break;
+ case RATE_MCS_MOD_TYPE_HE:
type = "HE";
- else if (format == RATE_MCS_EHT_MSK)
+ break;
+ case RATE_MCS_MOD_TYPE_EHT:
type = "EHT";
- else
+ break;
+ default:
type = "Unknown"; /* shouldn't happen */
+ }
- mcs = format == RATE_MCS_HT_MSK ?
+ mcs = format == RATE_MCS_MOD_TYPE_HT ?
RATE_HT_MCS_INDEX(rate) :
rate & RATE_MCS_CODE_MSK;
- nss = ((rate & RATE_MCS_NSS_MSK)
- >> RATE_MCS_NSS_POS) + 1;
- sgi = format == RATE_MCS_HE_MSK ?
+ nss = u32_get_bits(rate, RATE_MCS_NSS_MSK);
+ sgi = format == RATE_MCS_MOD_TYPE_HE ?
iwl_he_is_sgi(rate) :
rate & RATE_MCS_SGI_MSK;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index a9e6bba2419e..0444a736c2b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#ifndef __iwl_fw_runtime_h__
#define __iwl_fw_runtime_h__
@@ -110,6 +110,9 @@ struct iwl_txf_iter_data {
* Only read the UEFI variables if locked.
* @sar_profiles: sar profiles as read from WRDS/EWRD BIOS tables
* @geo_profiles: geographic profiles as read from WGDS BIOS table
+ * @phy_filters: specific phy filters as read from WPFC BIOS table
+ * @ppag_bios_rev: PPAG BIOS revision
+ * @ppag_bios_source: see &enum bios_source
*/
struct iwl_fw_runtime {
struct iwl_trans *trans;
@@ -178,12 +181,14 @@ struct iwl_fw_runtime {
bool geo_enabled;
struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
u32 ppag_flags;
- u8 ppag_ver;
+ u8 ppag_bios_rev;
+ u8 ppag_bios_source;
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
struct iwl_mcc_allowed_ap_type_cmd uats_table;
bool uats_valid;
u8 uefi_tables_lock_status;
+ struct iwl_phy_specific_cfg phy_filters;
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index 3f1272014daf..90fd69b4860c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021, 2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -102,7 +102,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
}
pkt = cmd.resp_pkt;
- if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+ if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
iwl_parse_shared_mem_22000(fwrt, pkt);
else
iwl_parse_shared_mem(fwrt, pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 386aadbce2a2..48126ec6b94b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -219,7 +219,8 @@ done:
int iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
const u8 *data, size_t len,
- struct iwl_pnvm_image *pnvm_data)
+ struct iwl_pnvm_image *pnvm_data,
+ __le32 sku_id[3])
{
const struct iwl_ucode_tlv *tlv;
@@ -241,23 +242,23 @@ int iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
}
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
- const struct iwl_sku_id *sku_id =
+ const struct iwl_sku_id *tlv_sku_id =
(const void *)(data + sizeof(*tlv));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
tlv_len);
IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n",
- le32_to_cpu(sku_id->data[0]),
- le32_to_cpu(sku_id->data[1]),
- le32_to_cpu(sku_id->data[2]));
+ le32_to_cpu(tlv_sku_id->data[0]),
+ le32_to_cpu(tlv_sku_id->data[1]),
+ le32_to_cpu(tlv_sku_id->data[2]));
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
len -= ALIGN(tlv_len, 4);
- if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
- trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
- trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
+ if (sku_id[0] == tlv_sku_id->data[0] &&
+ sku_id[1] == tlv_sku_id->data[1] &&
+ sku_id[2] == tlv_sku_id->data[2]) {
int ret = iwl_uefi_reduce_power_section(trans,
data, len,
pnvm_data);
@@ -310,11 +311,12 @@ static int iwl_uefi_step_parse(struct uefi_cnv_common_step_data *common_step_dat
if (common_step_data->revision != 1)
return -EINVAL;
- trans->mbx_addr_0_step = (u32)common_step_data->revision |
+ trans->conf.mbx_addr_0_step =
+ (u32)common_step_data->revision |
(u32)common_step_data->cnvi_eq_channel << 8 |
(u32)common_step_data->cnvr_eq_channel << 16 |
(u32)common_step_data->radio1 << 24;
- trans->mbx_addr_1_step = (u32)common_step_data->radio2;
+ trans->conf.mbx_addr_1_step = (u32)common_step_data->radio2;
return 0;
}
@@ -323,7 +325,7 @@ void iwl_uefi_get_step_table(struct iwl_trans *trans)
struct uefi_cnv_common_step_data *data;
int ret;
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
data = iwl_uefi_get_verified_variable_guid(trans, &IWL_EFI_WIFI_BT_GUID,
@@ -408,8 +410,8 @@ static int iwl_uefi_uats_parse(struct uefi_cnv_wlan_uats_data *uats_data,
return 0;
}
-int iwl_uefi_get_uats_table(struct iwl_trans *trans,
- struct iwl_fw_runtime *fwrt)
+void iwl_uefi_get_uats_table(struct iwl_trans *trans,
+ struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_wlan_uats_data *data;
int ret;
@@ -417,17 +419,12 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans,
data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_UATS_NAME,
"UATS", sizeof(*data), NULL);
if (IS_ERR(data))
- return -EINVAL;
+ return;
ret = iwl_uefi_uats_parse(data, fwrt);
- if (ret < 0) {
+ if (ret < 0)
IWL_DEBUG_FW(trans, "Cannot read UATS table. rev is invalid\n");
- kfree(data);
- return ret;
- }
-
kfree(data);
- return 0;
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_uats_table);
@@ -557,13 +554,14 @@ int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt)
goto out;
}
- fwrt->ppag_ver = data->revision;
+ fwrt->ppag_bios_rev = data->revision;
fwrt->ppag_flags = iwl_bios_get_ppag_flags(data->ppag_modes,
- fwrt->ppag_ver);
+ fwrt->ppag_bios_rev);
BUILD_BUG_ON(sizeof(fwrt->ppag_chains) != sizeof(data->ppag_chains));
memcpy(&fwrt->ppag_chains, &data->ppag_chains,
sizeof(data->ppag_chains));
+ fwrt->ppag_bios_source = BIOS_SOURCE_UEFI;
out:
kfree(data);
return ret;
@@ -750,6 +748,10 @@ int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
}
*value = data->functions[func];
+
+ IWL_DEBUG_RADIO(fwrt,
+ "UEFI: DSM func=%d: value=%d\n", func, *value);
+
ret = 0;
out:
kfree(data);
@@ -810,3 +812,31 @@ out:
kfree(data);
return ret;
}
+
+int iwl_uefi_get_phy_filters(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_wpfc_data *data __free(kfree);
+ struct iwl_phy_specific_cfg *filters = &fwrt->phy_filters;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WPFC_NAME,
+ "WPFC", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != 0) {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WPFC revision:%d\n",
+ data->revision);
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) !=
+ ARRAY_SIZE(data->chains));
+
+ for (int i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) {
+ filters->filter_cfg_chains[i] = cpu_to_le32(data->chains[i]);
+ IWL_DEBUG_RADIO(fwrt, "WPFC: chain %d: %u\n", i, data->chains[i]);
+ }
+
+ IWL_DEBUG_RADIO(fwrt, "Loaded WPFC config from UEFI\n");
+ return 0;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index eb3c05417da3..5a4c557e47c7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -24,6 +24,7 @@
#define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM"
#define IWL_UEFI_PUNCTURING_NAME L"UefiCnvWlanPuncturing"
#define IWL_UEFI_DSBR_NAME L"UefiCnvCommonDSBR"
+#define IWL_UEFI_WPFC_NAME L"WPFC"
#define IWL_SGOM_MAP_SIZE 339
@@ -33,7 +34,7 @@
#define IWL_UEFI_EWRD_REVISION 2
#define IWL_UEFI_WGDS_REVISION 3
#define IWL_UEFI_MIN_PPAG_REV 1
-#define IWL_UEFI_MAX_PPAG_REV 3
+#define IWL_UEFI_MAX_PPAG_REV 4
#define IWL_UEFI_MIN_WTAS_REVISION 1
#define IWL_UEFI_MAX_WTAS_REVISION 2
#define IWL_UEFI_SPLC_REVISION 0
@@ -230,6 +231,18 @@ struct uefi_cnv_wlan_dsbr_data {
u32 config;
} __packed;
+/**
+ * struct uefi_cnv_wpfc_data - BIOS Wi-Fi PHY filter Configuration
+ * @revision: the revision of the table
+ * @chains: configuration of each of the chains (a-d)
+ *
+ * specific PHY filter configuration
+ */
+struct uefi_cnv_wpfc_data {
+ u8 revision;
+ u32 chains[4];
+} __packed;
+
/*
* This is known to be broken on v4.19 and to work on v5.4. Until we
* figure out why this is the case and how to make it work, simply
@@ -240,7 +253,8 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len);
u8 *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len);
int iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
const u8 *data, size_t len,
- struct iwl_pnvm_image *pnvm_data);
+ struct iwl_pnvm_image *pnvm_data,
+ __le32 sku_id[3]);
void iwl_uefi_get_step_table(struct iwl_trans *trans);
int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, struct iwl_pnvm_image *pnvm_data);
@@ -258,10 +272,11 @@ int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value);
int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
u32 *value);
void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
-int iwl_uefi_get_uats_table(struct iwl_trans *trans,
- struct iwl_fw_runtime *fwrt);
+void iwl_uefi_get_uats_table(struct iwl_trans *trans,
+ struct iwl_fw_runtime *fwrt);
int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt);
int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value);
+int iwl_uefi_get_phy_filters(struct iwl_fw_runtime *fwrt);
#else /* CONFIG_EFI */
static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
{
@@ -271,7 +286,8 @@ static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
static inline int
iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
const u8 *data, size_t len,
- struct iwl_pnvm_image *pnvm_data)
+ struct iwl_pnvm_image *pnvm_data,
+ __le32 sku_id[3])
{
return -EOPNOTSUPP;
}
@@ -352,11 +368,9 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwr
{
}
-static inline
-int iwl_uefi_get_uats_table(struct iwl_trans *trans,
- struct iwl_fw_runtime *fwrt)
+static inline void
+iwl_uefi_get_uats_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt)
{
- return 0;
}
static inline
@@ -370,5 +384,10 @@ int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value)
{
return -ENOENT;
}
+
+static inline int iwl_uefi_get_phy_filters(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
#endif /* CONFIG_EFI */
#endif /* __iwl_fw_uefi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index acafee538b8a..91f22ce36d74 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#ifndef __IWL_CONFIG_H__
#define __IWL_CONFIG_H__
@@ -115,7 +115,29 @@ static inline u8 num_of_ant(u8 mask)
}
/**
- * struct iwl_base_params - params not likely to change within a device family
+ * struct iwl_fw_mon_reg - FW monitor register info
+ * @addr: register address
+ * @mask: register mask
+ */
+struct iwl_fw_mon_reg {
+ u32 addr;
+ u32 mask;
+};
+
+/**
+ * struct iwl_fw_mon_regs - FW monitor registers
+ * @write_ptr: write pointer register
+ * @cycle_cnt: cycle count register
+ * @cur_frag: current fragment in use
+ */
+struct iwl_fw_mon_regs {
+ struct iwl_fw_mon_reg write_ptr;
+ struct iwl_fw_mon_reg cycle_cnt;
+ struct iwl_fw_mon_reg cur_frag;
+};
+
+/**
+ * struct iwl_family_base_params - base parameters for an entire family
* @max_ll_items: max number of OTP blocks
* @shadow_ram_support: shadow support for OTP memory
* @led_compensation: compensate on the led on/off time per HW according
@@ -124,12 +146,34 @@ static inline u8 num_of_ant(u8 mask)
* @wd_timeout: TX queues watchdog timeout
* @max_event_log_size: size of event log buffer size for ucode event logging
* @shadow_reg_enable: HW shadow register support
+ * @apmg_not_supported: there's no APMG
* @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
* is in flight. This is due to a HW bug in 7260, 3160 and 7265.
* @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
* @max_tfd_queue_size: max number of entries in tfd queue.
+ * @eeprom_size: EEPROM size
+ * @num_of_queues: number of HW TX queues supported
+ * @pcie_l1_allowed: PCIe L1 state is allowed
+ * @pll_cfg: PLL configuration needed
+ * @nvm_hw_section_num: the ID of the HW NVM section
+ * @features: hw features, any combination of feature_passlist
+ * @smem_offset: offset from which the SMEM begins
+ * @smem_len: the length of SMEM
+ * @mac_addr_from_csr: read HW address from CSR registers at this offset
+ * @d3_debug_data_base_addr: base address where D3 debug data is stored
+ * @d3_debug_data_length: length of the D3 debug data
+ * @min_ba_txq_size: minimum number of slots required in a TX queue used
+ * for aggregation
+ * @min_txq_size: minimum number of slots required in a TX queue
+ * @gp2_reg_addr: GP2 (timer) register address
+ * @min_umac_error_event_table: minimum SMEM location of UMAC error table
+ * @mon_dbgi_regs: monitor DBGI registers
+ * @mon_dram_regs: monitor DRAM registers
+ * @mon_smem_regs: monitor SMEM registers
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
*/
-struct iwl_base_params {
+struct iwl_family_base_params {
unsigned int wd_timeout;
u16 eeprom_size;
@@ -140,6 +184,7 @@ struct iwl_base_params {
shadow_reg_enable:1,
pcie_l1_allowed:1,
apmg_wake_up_wa:1,
+ apmg_not_supported:1,
scd_chain_ext_wa:1;
u16 num_of_queues; /* def: HW dependent */
@@ -147,6 +192,22 @@ struct iwl_base_params {
u8 max_ll_items;
u8 led_compensation;
+ u8 ucode_api_max;
+ u8 ucode_api_min;
+ u32 mac_addr_from_csr:10;
+ u8 nvm_hw_section_num;
+ netdev_features_t features;
+ u32 smem_offset;
+ u32 smem_len;
+ u32 min_umac_error_event_table;
+ u32 d3_debug_data_base_addr;
+ u32 d3_debug_data_length;
+ u32 min_txq_size;
+ u32 gp2_reg_addr;
+ u32 min_ba_txq_size;
+ const struct iwl_fw_mon_regs mon_dram_regs;
+ const struct iwl_fw_mon_regs mon_smem_regs;
+ const struct iwl_fw_mon_regs mon_dbgi_regs;
};
/*
@@ -238,7 +299,7 @@ struct iwl_pwr_tx_backoff {
u32 backoff;
};
-enum iwl_cfg_trans_ltr_delay {
+enum iwl_mac_cfg_ltr_delay {
IWL_CFG_TRANS_LTR_DELAY_NONE = 0,
IWL_CFG_TRANS_LTR_DELAY_200US = 1,
IWL_CFG_TRANS_LTR_DELAY_2500US = 2,
@@ -246,35 +307,33 @@ enum iwl_cfg_trans_ltr_delay {
};
/**
- * struct iwl_cfg_trans_params - information needed to start the trans
+ * struct iwl_mac_cfg - information about the MAC-specific device part
*
* These values are specific to the device ID and do not change when
* multiple configs are used for a single device ID. They values are
* used, among other things, to boot the NIC so that the HW REV or
* RFID can be read before deciding the remaining parameters to use.
*
- * @base_params: pointer to basic parameters
+ * @base: pointer to basic parameters
* @device_family: the device family
* @umac_prph_offset: offset to add to UMAC periphery address
* @xtal_latency: power up latency to get the xtal stabilized
* @extra_phy_cfg_flags: extra configuration flags to pass to the PHY
- * @rf_id: need to read rf_id to determine the firmware image
* @gen2: 22000 and on transport operation
* @mq_rx_supported: multi-queue rx support
* @integrated: discrete or integrated
* @low_latency_xtal: use the low latency xtal if supported
* @bisr_workaround: BISR hardware workaround (for 22260 series devices)
- * @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay.
+ * @ltr_delay: LTR delay parameter, &enum iwl_mac_cfg_ltr_delay.
* @imr_enabled: use the IMR if supported.
*/
-struct iwl_cfg_trans_params {
- const struct iwl_base_params *base_params;
+struct iwl_mac_cfg {
+ const struct iwl_family_base_params *base;
enum iwl_device_family device_family;
u32 umac_prph_offset;
u32 xtal_latency;
u32 extra_phy_cfg_flags;
- u32 rf_id:1,
- gen2:1,
+ u32 gen2:1,
mq_rx_supported:1,
integrated:1,
low_latency_xtal:1,
@@ -283,36 +342,21 @@ struct iwl_cfg_trans_params {
imr_enabled:1;
};
-/**
- * struct iwl_fw_mon_reg - FW monitor register info
- * @addr: register address
- * @mask: register mask
- */
-struct iwl_fw_mon_reg {
- u32 addr;
- u32 mask;
-};
-
-/**
- * struct iwl_fw_mon_regs - FW monitor registers
- * @write_ptr: write pointer register
- * @cycle_cnt: cycle count register
- * @cur_frag: current fragment in use
+/*
+ * These sizes were picked according to 8 MSDUs inside 64/256/612 A-MSDUs
+ * in an A-MPDU, with additional overhead to account for processing time.
+ * They will be doubled for MACs starting from So/Ty that don't support
+ * putting multiple frames into a single buffer.
*/
-struct iwl_fw_mon_regs {
- struct iwl_fw_mon_reg write_ptr;
- struct iwl_fw_mon_reg cycle_cnt;
- struct iwl_fw_mon_reg cur_frag;
-};
+#define IWL_NUM_RBDS_NON_HE (64 * 8)
+#define IWL_NUM_RBDS_HE (256 * 8)
+#define IWL_NUM_RBDS_EHT (512 * 8)
/**
- * struct iwl_cfg
- * @trans: the trans-specific configuration part
- * @name: Official name of the device
+ * struct iwl_rf_cfg
* @fw_name_pre: Firmware filename prefix. The api version and extension
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as <fw_name_pre>-<api>.ucode.
- * @fw_name_mac: MAC name for this config, the remaining pieces of the
* name will be generated dynamically
* @ucode_api_max: Highest version of uCode API supported by driver.
* @ucode_api_min: Lowest version of uCode API supported by driver.
@@ -323,34 +367,25 @@ struct iwl_fw_mon_regs {
* @non_shared_ant: the antenna that is for WiFi only
* @nvm_ver: NVM version
* @nvm_calib_ver: NVM calibration version
+ * @bw_limit: bandwidth limit for this device, if non-zero
* @ht_params: point to ht parameters
+ * @eeprom_params: EEPROM parameters (old devices)
+ * @thermal_params: Thermal throttling parameters
+ * @lp_xtal_workaround: low-power crystal workaround needed
* @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
* @rx_with_siso_diversity: 1x1 device with rx antenna diversity
* @tx_with_siso_diversity: 1x1 device with tx antenna diversity
* @internal_wimax_coex: internal wifi/wimax combo device
- * @high_temp: Is this NIC is designated to be in high temperature.
* @host_interrupt_operation_mode: device needs host interrupt operation
* mode set
- * @nvm_hw_section_num: the ID of the HW NVM section
- * @mac_addr_from_csr: read HW address from CSR registers at this offset
- * @features: hw features, any combination of feature_passlist
* @pwr_tx_backoffs: translation table between power limits and backoffs
- * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
* @dccm_offset: offset from which DCCM begins
* @dccm_len: length of DCCM (including runtime stack CCM)
* @dccm2_offset: offset from which the second DCCM begins
* @dccm2_len: length of the second DCCM
- * @smem_offset: offset from which the SMEM begins
- * @smem_len: the length of SMEM
* @vht_mu_mimo_supported: VHT MU-MIMO support
- * @cdb: CDB support
* @nvm_type: see &enum iwl_nvm_type
- * @d3_debug_data_base_addr: base address where D3 debug data is stored
- * @d3_debug_data_length: length of the D3 debug data
- * @min_txq_size: minimum number of slots required in a TX queue
* @uhb_supported: ultra high band channels supported
- * @min_ba_txq_size: minimum number of slots required in a TX queue which
- * based on hardware support (HE - 256, EHT - 1K).
* @num_rbds: number of receive buffer descriptors to use
* (only used for multi-queue capable devices)
*
@@ -358,60 +393,38 @@ struct iwl_fw_mon_regs {
* API differences in uCode shouldn't be handled here but through TLVs
* and/or the uCode API version instead.
*/
-struct iwl_cfg {
- struct iwl_cfg_trans_params trans;
+struct iwl_rf_cfg {
/* params specific to an individual device within a device family */
- const char *name;
const char *fw_name_pre;
- const char *fw_name_mac;
/* params likely to change within a device family */
- const struct iwl_ht_params *ht_params;
+ const struct iwl_ht_params ht_params;
const struct iwl_eeprom_params *eeprom_params;
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
- const char *default_nvm_file_C_step;
const struct iwl_tt_params *thermal_params;
enum iwl_led_mode led_mode;
enum iwl_nvm_type nvm_type;
u32 max_data_size;
u32 max_inst_size;
- netdev_features_t features;
u32 dccm_offset;
u32 dccm_len;
u32 dccm2_offset;
u32 dccm2_len;
- u32 smem_offset;
- u32 smem_len;
u16 nvm_ver;
u16 nvm_calib_ver;
+ u16 bw_limit;
u32 rx_with_siso_diversity:1,
tx_with_siso_diversity:1,
internal_wimax_coex:1,
host_interrupt_operation_mode:1,
- high_temp:1,
- mac_addr_from_csr:10,
lp_xtal_workaround:1,
- apmg_not_supported:1,
vht_mu_mimo_supported:1,
- cdb:1,
- dbgc_supported:1,
uhb_supported:1;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
- u8 nvm_hw_section_num;
- u8 max_tx_agg_size;
u8 ucode_api_max;
u8 ucode_api_min;
u16 num_rbds;
- u32 min_umac_error_event_table;
- u32 d3_debug_data_base_addr;
- u32 d3_debug_data_length;
- u32 min_txq_size;
- u32 gp2_reg_addr;
- u32 min_ba_txq_size;
- const struct iwl_fw_mon_regs mon_dram_regs;
- const struct iwl_fw_mon_regs mon_smem_regs;
- const struct iwl_fw_mon_regs mon_dbgi_regs;
};
#define IWL_CFG_ANY (~0)
@@ -419,8 +432,10 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_PU 0x31
#define IWL_CFG_MAC_TYPE_TH 0x32
#define IWL_CFG_MAC_TYPE_QU 0x33
+#define IWL_CFG_MAC_TYPE_CC 0x34
#define IWL_CFG_MAC_TYPE_QUZ 0x35
#define IWL_CFG_MAC_TYPE_SO 0x37
+#define IWL_CFG_MAC_TYPE_TY 0x42
#define IWL_CFG_MAC_TYPE_SOF 0x43
#define IWL_CFG_MAC_TYPE_MA 0x44
#define IWL_CFG_MAC_TYPE_BZ 0x46
@@ -432,8 +447,6 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_BR 0x4C
#define IWL_CFG_MAC_TYPE_DR 0x4D
-#define IWL_CFG_RF_TYPE_TH 0x105
-#define IWL_CFG_RF_TYPE_TH1 0x108
#define IWL_CFG_RF_TYPE_JF2 0x105
#define IWL_CFG_RF_TYPE_JF1 0x108
#define IWL_CFG_RF_TYPE_HR2 0x10A
@@ -451,12 +464,6 @@ struct iwl_cfg {
#define IWL_CFG_RF_ID_HR 0x7
#define IWL_CFG_RF_ID_HR1 0x4
-#define IWL_CFG_NO_160 0x1
-#define IWL_CFG_160 0x0
-
-#define IWL_CFG_NO_320 0x1
-#define IWL_CFG_320 0x0
-
#define IWL_CFG_CORES_BT 0x0
#define IWL_CFG_CORES_BT_GNSS 0x5
@@ -467,55 +474,133 @@ struct iwl_cfg {
#define IWL_CFG_IS_JACKET 0x1
#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
-#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
+#define IWL_SUBDEVICE_BW_LIM(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
struct iwl_dev_info {
+ const struct iwl_rf_cfg *cfg;
+ const char *name;
u16 device;
u16 subdevice;
- u16 mac_type;
- u16 rf_type;
- u8 mac_step;
- u8 rf_step;
- u8 rf_id;
- u8 no_160;
- u8 cores;
- u8 cdb;
- u8 jacket;
- const struct iwl_cfg *cfg;
- const char *name;
+ u32 subdevice_m_l:4,
+ subdevice_m_h:4,
+ match_rf_type:1,
+ rf_type:9,
+ match_bw_limit:1,
+ bw_limit:1,
+ match_rf_step:1,
+ rf_step:4,
+ match_rf_id:1,
+ rf_id:4,
+ match_cdb:1,
+ cdb:1;
};
#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
extern const struct iwl_dev_info iwl_dev_info_table[];
extern const unsigned int iwl_dev_info_table_size;
const struct iwl_dev_info *
-iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
- u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
- u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step);
+iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 rf_type, u8 cdb,
+ u8 rf_id, u8 bw_limit, u8 rf_step);
extern const struct pci_device_id iwl_hw_card_ids[];
#endif
/*
* This list declares the config structures for all devices.
*/
-extern const struct iwl_cfg_trans_params iwl9000_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl9560_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl9560_long_latency_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_so_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_gl_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_dr_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_br_trans_cfg;
+extern const struct iwl_mac_cfg iwl1000_mac_cfg;
+extern const struct iwl_mac_cfg iwl5000_mac_cfg;
+extern const struct iwl_mac_cfg iwl2000_mac_cfg;
+extern const struct iwl_mac_cfg iwl2030_mac_cfg;
+extern const struct iwl_mac_cfg iwl105_mac_cfg;
+extern const struct iwl_mac_cfg iwl135_mac_cfg;
+extern const struct iwl_mac_cfg iwl5150_mac_cfg;
+extern const struct iwl_mac_cfg iwl6005_mac_cfg;
+extern const struct iwl_mac_cfg iwl6030_mac_cfg;
+extern const struct iwl_mac_cfg iwl6000i_mac_cfg;
+extern const struct iwl_mac_cfg iwl6050_mac_cfg;
+extern const struct iwl_mac_cfg iwl6150_mac_cfg;
+extern const struct iwl_mac_cfg iwl6000_mac_cfg;
+extern const struct iwl_mac_cfg iwl7000_mac_cfg;
+extern const struct iwl_mac_cfg iwl8000_mac_cfg;
+extern const struct iwl_mac_cfg iwl9000_mac_cfg;
+extern const struct iwl_mac_cfg iwl9560_mac_cfg;
+extern const struct iwl_mac_cfg iwl9560_long_latency_mac_cfg;
+extern const struct iwl_mac_cfg iwl9560_shared_clk_mac_cfg;
+extern const struct iwl_mac_cfg iwl_qu_mac_cfg;
+extern const struct iwl_mac_cfg iwl_qu_medium_latency_mac_cfg;
+extern const struct iwl_mac_cfg iwl_qu_long_latency_mac_cfg;
+extern const struct iwl_mac_cfg iwl_ax200_mac_cfg;
+extern const struct iwl_mac_cfg iwl_ty_mac_cfg;
+extern const struct iwl_mac_cfg iwl_so_mac_cfg;
+extern const struct iwl_mac_cfg iwl_so_long_latency_mac_cfg;
+extern const struct iwl_mac_cfg iwl_so_long_latency_imr_mac_cfg;
+extern const struct iwl_mac_cfg iwl_ma_mac_cfg;
+extern const struct iwl_mac_cfg iwl_bz_mac_cfg;
+extern const struct iwl_mac_cfg iwl_gl_mac_cfg;
+extern const struct iwl_mac_cfg iwl_sc_mac_cfg;
+extern const struct iwl_mac_cfg iwl_dr_mac_cfg;
+
+extern const char iwl1000_bgn_name[];
+extern const char iwl1000_bg_name[];
+extern const char iwl100_bgn_name[];
+extern const char iwl100_bg_name[];
+extern const char iwl2000_2bgn_name[];
+extern const char iwl2000_2bgn_d_name[];
+extern const char iwl2030_2bgn_name[];
+extern const char iwl105_bgn_name[];
+extern const char iwl105_bgn_d_name[];
+extern const char iwl135_bgn_name[];
+extern const char iwl5300_agn_name[];
+extern const char iwl5100_bgn_name[];
+extern const char iwl5100_abg_name[];
+extern const char iwl5100_agn_name[];
+extern const char iwl5350_agn_name[];
+extern const char iwl5150_agn_name[];
+extern const char iwl5150_abg_name[];
+extern const char iwl6005_2agn_name[];
+extern const char iwl6005_2abg_name[];
+extern const char iwl6005_2bg_name[];
+extern const char iwl6005_2agn_sff_name[];
+extern const char iwl6005_2agn_d_name[];
+extern const char iwl6005_2agn_mow1_name[];
+extern const char iwl6005_2agn_mow2_name[];
+extern const char iwl6030_2agn_name[];
+extern const char iwl6030_2abg_name[];
+extern const char iwl6030_2bgn_name[];
+extern const char iwl6030_2bg_name[];
+extern const char iwl6035_2agn_name[];
+extern const char iwl6035_2agn_sff_name[];
+extern const char iwl1030_bgn_name[];
+extern const char iwl1030_bg_name[];
+extern const char iwl130_bgn_name[];
+extern const char iwl130_bg_name[];
+extern const char iwl6000i_2agn_name[];
+extern const char iwl6000i_2abg_name[];
+extern const char iwl6000i_2bg_name[];
+extern const char iwl6050_2agn_name[];
+extern const char iwl6050_2abg_name[];
+extern const char iwl6150_bgn_name[];
+extern const char iwl6150_bg_name[];
+extern const char iwl6000_3agn_name[];
+extern const char iwl7260_2ac_name[];
+extern const char iwl7260_2n_name[];
+extern const char iwl7260_n_name[];
+extern const char iwl3160_2ac_name[];
+extern const char iwl3160_2n_name[];
+extern const char iwl3160_n_name[];
+extern const char iwl3165_2ac_name[];
+extern const char iwl3168_2ac_name[];
+extern const char iwl7265_2ac_name[];
+extern const char iwl7265_2n_name[];
+extern const char iwl7265_n_name[];
+extern const char iwl8260_2n_name[];
+extern const char iwl8260_2ac_name[];
+extern const char iwl8265_2ac_name[];
+extern const char iwl8275_2ac_name[];
+extern const char iwl4165_2ac_name[];
+extern const char iwl_killer_1435i_name[];
+extern const char iwl_killer_1434_kix_name[];
extern const char iwl9162_name[];
extern const char iwl9260_name[];
extern const char iwl9260_1_name[];
@@ -548,129 +633,84 @@ extern const char iwl_ax211_killer_1675s_name[];
extern const char iwl_ax211_killer_1675i_name[];
extern const char iwl_ax411_killer_1690s_name[];
extern const char iwl_ax411_killer_1690i_name[];
+extern const char iwl_ax210_name[];
extern const char iwl_ax211_name[];
-extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
-extern const char iwl_fm_name[];
-extern const char iwl_wh_name[];
-extern const char iwl_gl_name[];
-extern const char iwl_mtp_name[];
-extern const char iwl_dr_name[];
-extern const char iwl_br_name[];
+extern const char iwl_killer_be1750s_name[];
+extern const char iwl_killer_be1750i_name[];
+extern const char iwl_killer_be1750w_name[];
+extern const char iwl_killer_be1750x_name[];
+extern const char iwl_killer_be1790s_name[];
+extern const char iwl_killer_be1790i_name[];
+extern const char iwl_be201_name[];
+extern const char iwl_be200_name[];
+extern const char iwl_be202_name[];
+extern const char iwl_be401_name[];
+extern const char iwl_be213_name[];
+extern const char iwl_killer_be1775s_name[];
+extern const char iwl_killer_be1775i_name[];
+extern const char iwl_be211_name[];
+extern const char iwl_killer_bn1850w2_name[];
+extern const char iwl_killer_bn1850i_name[];
+extern const char iwl_bn201_name[];
+extern const char iwl_be221_name[];
+extern const char iwl_be223_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
-extern const struct iwl_cfg iwl5300_agn_cfg;
-extern const struct iwl_cfg iwl5100_agn_cfg;
-extern const struct iwl_cfg iwl5350_agn_cfg;
-extern const struct iwl_cfg iwl5100_bgn_cfg;
-extern const struct iwl_cfg iwl5100_abg_cfg;
-extern const struct iwl_cfg iwl5150_agn_cfg;
-extern const struct iwl_cfg iwl5150_abg_cfg;
-extern const struct iwl_cfg iwl6005_2agn_cfg;
-extern const struct iwl_cfg iwl6005_2abg_cfg;
-extern const struct iwl_cfg iwl6005_2bg_cfg;
-extern const struct iwl_cfg iwl6005_2agn_sff_cfg;
-extern const struct iwl_cfg iwl6005_2agn_d_cfg;
-extern const struct iwl_cfg iwl6005_2agn_mow1_cfg;
-extern const struct iwl_cfg iwl6005_2agn_mow2_cfg;
-extern const struct iwl_cfg iwl1030_bgn_cfg;
-extern const struct iwl_cfg iwl1030_bg_cfg;
-extern const struct iwl_cfg iwl6030_2agn_cfg;
-extern const struct iwl_cfg iwl6030_2abg_cfg;
-extern const struct iwl_cfg iwl6030_2bgn_cfg;
-extern const struct iwl_cfg iwl6030_2bg_cfg;
-extern const struct iwl_cfg iwl6000i_2agn_cfg;
-extern const struct iwl_cfg iwl6000i_2abg_cfg;
-extern const struct iwl_cfg iwl6000i_2bg_cfg;
-extern const struct iwl_cfg iwl6000_3agn_cfg;
-extern const struct iwl_cfg iwl6050_2agn_cfg;
-extern const struct iwl_cfg iwl6050_2abg_cfg;
-extern const struct iwl_cfg iwl6150_bgn_cfg;
-extern const struct iwl_cfg iwl6150_bg_cfg;
-extern const struct iwl_cfg iwl1000_bgn_cfg;
-extern const struct iwl_cfg iwl1000_bg_cfg;
-extern const struct iwl_cfg iwl100_bgn_cfg;
-extern const struct iwl_cfg iwl100_bg_cfg;
-extern const struct iwl_cfg iwl130_bgn_cfg;
-extern const struct iwl_cfg iwl130_bg_cfg;
-extern const struct iwl_cfg iwl2000_2bgn_cfg;
-extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
-extern const struct iwl_cfg iwl2030_2bgn_cfg;
-extern const struct iwl_cfg iwl6035_2agn_cfg;
-extern const struct iwl_cfg iwl6035_2agn_sff_cfg;
-extern const struct iwl_cfg iwl105_bgn_cfg;
-extern const struct iwl_cfg iwl105_bgn_d_cfg;
-extern const struct iwl_cfg iwl135_bgn_cfg;
+extern const struct iwl_rf_cfg iwl5300_agn_cfg;
+extern const struct iwl_rf_cfg iwl5350_agn_cfg;
+extern const struct iwl_rf_cfg iwl5100_n_cfg;
+extern const struct iwl_rf_cfg iwl5100_abg_cfg;
+extern const struct iwl_rf_cfg iwl5150_agn_cfg;
+extern const struct iwl_rf_cfg iwl5150_abg_cfg;
+extern const struct iwl_rf_cfg iwl6005_non_n_cfg;
+extern const struct iwl_rf_cfg iwl6005_n_cfg;
+extern const struct iwl_rf_cfg iwl6030_n_cfg;
+extern const struct iwl_rf_cfg iwl6030_non_n_cfg;
+extern const struct iwl_rf_cfg iwl6000i_2agn_cfg;
+extern const struct iwl_rf_cfg iwl6000i_non_n_cfg;
+extern const struct iwl_rf_cfg iwl6000i_non_n_cfg;
+extern const struct iwl_rf_cfg iwl6000_3agn_cfg;
+extern const struct iwl_rf_cfg iwl6050_2agn_cfg;
+extern const struct iwl_rf_cfg iwl6050_2abg_cfg;
+extern const struct iwl_rf_cfg iwl6150_bgn_cfg;
+extern const struct iwl_rf_cfg iwl6150_bg_cfg;
+extern const struct iwl_rf_cfg iwl1000_bgn_cfg;
+extern const struct iwl_rf_cfg iwl1000_bg_cfg;
+extern const struct iwl_rf_cfg iwl100_bgn_cfg;
+extern const struct iwl_rf_cfg iwl100_bg_cfg;
+extern const struct iwl_rf_cfg iwl130_bgn_cfg;
+extern const struct iwl_rf_cfg iwl130_bg_cfg;
+extern const struct iwl_rf_cfg iwl2000_2bgn_cfg;
+extern const struct iwl_rf_cfg iwl2030_2bgn_cfg;
+extern const struct iwl_rf_cfg iwl6035_2agn_cfg;
+extern const struct iwl_rf_cfg iwl105_bgn_cfg;
+extern const struct iwl_rf_cfg iwl135_bgn_cfg;
#endif /* CONFIG_IWLDVM */
#if IS_ENABLED(CONFIG_IWLMVM)
-extern const struct iwl_ht_params iwl_22000_ht_params;
-extern const struct iwl_cfg iwl7260_2ac_cfg;
-extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp;
-extern const struct iwl_cfg iwl7260_2n_cfg;
-extern const struct iwl_cfg iwl7260_n_cfg;
-extern const struct iwl_cfg iwl3160_2ac_cfg;
-extern const struct iwl_cfg iwl3160_2n_cfg;
-extern const struct iwl_cfg iwl3160_n_cfg;
-extern const struct iwl_cfg iwl3165_2ac_cfg;
-extern const struct iwl_cfg iwl3168_2ac_cfg;
-extern const struct iwl_cfg iwl7265_2ac_cfg;
-extern const struct iwl_cfg iwl7265_2n_cfg;
-extern const struct iwl_cfg iwl7265_n_cfg;
-extern const struct iwl_cfg iwl7265d_2ac_cfg;
-extern const struct iwl_cfg iwl7265d_2n_cfg;
-extern const struct iwl_cfg iwl7265d_n_cfg;
-extern const struct iwl_cfg iwl8260_2n_cfg;
-extern const struct iwl_cfg iwl8260_2ac_cfg;
-extern const struct iwl_cfg iwl8265_2ac_cfg;
-extern const struct iwl_cfg iwl8275_2ac_cfg;
-extern const struct iwl_cfg iwl4165_2ac_cfg;
-extern const struct iwl_cfg iwl9260_2ac_cfg;
-extern const struct iwl_cfg iwl9560_qu_b0_jf_b0_cfg;
-extern const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg;
-extern const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg;
-extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
-extern const struct iwl_cfg iwl_qu_b0_hr1_b0;
-extern const struct iwl_cfg iwl_qu_c0_hr1_b0;
-extern const struct iwl_cfg iwl_quz_a0_hr1_b0;
-extern const struct iwl_cfg iwl_qu_b0_hr_b0;
-extern const struct iwl_cfg iwl_qu_c0_hr_b0;
-extern const struct iwl_cfg iwl_ax200_cfg_cc;
-extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
-extern const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0;
-extern const struct iwl_cfg iwl_ax201_cfg_quz_hr;
-extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
-extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr;
-extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
-extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
-extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0;
-extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0;
-extern const struct iwl_cfg killer1650x_2ax_cfg;
-extern const struct iwl_cfg killer1650w_2ax_cfg;
-extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0;
-extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
-extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long;
-extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
-extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
-extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long;
-
-extern const struct iwl_cfg iwl_cfg_ma;
-
-extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
-extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
+extern const struct iwl_rf_cfg iwl7260_cfg;
+extern const struct iwl_rf_cfg iwl7260_high_temp_cfg;
+extern const struct iwl_rf_cfg iwl3160_cfg;
+extern const struct iwl_rf_cfg iwl3165_2ac_cfg;
+extern const struct iwl_rf_cfg iwl3168_2ac_cfg;
+extern const struct iwl_rf_cfg iwl7265_cfg;
+extern const struct iwl_rf_cfg iwl7265d_cfg;
+extern const struct iwl_rf_cfg iwl8260_cfg;
+extern const struct iwl_rf_cfg iwl8265_cfg;
+extern const struct iwl_rf_cfg iwl_rf_jf;
+extern const struct iwl_rf_cfg iwl_rf_jf_80mhz;
+extern const struct iwl_rf_cfg iwl_rf_hr1;
+extern const struct iwl_rf_cfg iwl_rf_hr;
+extern const struct iwl_rf_cfg iwl_rf_hr_80mhz;
+
+extern const struct iwl_rf_cfg iwl_rf_gf;
#endif /* CONFIG_IWLMVM */
#if IS_ENABLED(CONFIG_IWLMLD)
-extern const struct iwl_ht_params iwl_bz_ht_params;
-
-extern const struct iwl_ht_params iwl_bz_ht_params;
-
-extern const struct iwl_cfg iwl_cfg_bz;
-extern const struct iwl_cfg iwl_cfg_gl;
-
-extern const struct iwl_cfg iwl_cfg_sc;
-extern const struct iwl_cfg iwl_cfg_sc2;
-extern const struct iwl_cfg iwl_cfg_sc2f;
-extern const struct iwl_cfg iwl_cfg_dr;
-extern const struct iwl_cfg iwl_cfg_br;
+extern const struct iwl_rf_cfg iwl_rf_fm;
+extern const struct iwl_rf_cfg iwl_rf_fm_160mhz;
+#define iwl_rf_wh iwl_rf_fm
+#define iwl_rf_wh_160mhz iwl_rf_fm_160mhz
+#define iwl_rf_pe iwl_rf_fm
#endif /* CONFIG_IWLMLD */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-v2.h
index 20563a32a21a..8c5c0ea46181 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-v2.h
@@ -2,8 +2,8 @@
/*
* Copyright (C) 2018, 2020-2025 Intel Corporation
*/
-#ifndef __iwl_context_info_file_gen3_h__
-#define __iwl_context_info_file_gen3_h__
+#ifndef __iwl_context_info_file_v2_h__
+#define __iwl_context_info_file_v2_h__
#include "iwl-context-info.h"
@@ -58,6 +58,7 @@ enum iwl_prph_scratch_mtr_format {
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size
* @IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE: Indicate fw to set SCU_FORCE_ACTIVE
* upon reset.
+ * @IWL_PRPH_SCRATCH_TOP_RESET: request TOP reset
*/
enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_IMR_DEBUG_EN = BIT(1),
@@ -74,15 +75,18 @@ enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20,
IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K = 10 << 20,
IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE = BIT(29),
+ IWL_PRPH_SCRATCH_TOP_RESET = BIT(30),
};
/**
* enum iwl_prph_scratch_ext_flags - PRPH scratch control ext flags
+ * @IWL_PRPH_SCRATCH_EXT_EXT_FSEQ: external FSEQ image provided
* @IWL_PRPH_SCRATCH_EXT_URM_FW: switch to URM mode based on fw setting
* @IWL_PRPH_SCRATCH_EXT_URM_PERM: switch to permanent URM mode
* @IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID: use external 32 KHz clock
*/
enum iwl_prph_scratch_ext_flags {
+ IWL_PRPH_SCRATCH_EXT_EXT_FSEQ = BIT(0),
IWL_PRPH_SCRATCH_EXT_URM_FW = BIT(4),
IWL_PRPH_SCRATCH_EXT_URM_PERM = BIT(5),
IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID = BIT(8),
@@ -202,6 +206,19 @@ struct iwl_prph_scratch_ctrl_cfg {
struct iwl_prph_scratch_step_cfg step_cfg;
} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
+#define IWL_NUM_DRAM_FSEQ_ENTRIES 8
+
+/**
+ * struct iwl_context_info_dram_fseq - images DRAM map (with fseq)
+ * each entry in the map represents a DRAM chunk of up to 32 KB
+ * @common: UMAC/LMAC/virtual images
+ * @fseq_img: FSEQ image DRAM map
+ */
+struct iwl_context_info_dram_fseq {
+ struct iwl_context_info_dram_nonfseq common;
+ __le64 fseq_img[IWL_NUM_DRAM_FSEQ_ENTRIES];
+} __packed; /* PERIPH_SCRATCH_DRAM_MAP_S */
+
/**
* struct iwl_prph_scratch - peripheral scratch mapping
* @ctrl_cfg: control and configuration of prph scratch
@@ -215,7 +232,7 @@ struct iwl_prph_scratch {
__le32 fseq_override;
__le32 step_analog_params;
__le32 reserved[8];
- struct iwl_context_info_dram dram;
+ struct iwl_context_info_dram_fseq dram;
} __packed; /* PERIPH_SCRATCH_S */
/**
@@ -233,7 +250,7 @@ struct iwl_prph_info {
} __packed; /* PERIPH_INFO_S */
/**
- * struct iwl_context_info_gen3 - device INIT configuration
+ * struct iwl_context_info_v2 - device INIT configuration
* @version: version of the context information
* @size: size of context information in DWs
* @config: context in which the peripheral would execute - a subset of
@@ -276,7 +293,7 @@ struct iwl_prph_info {
* @prph_scratch_size: the size of the peripheral scratch structure in DWs
* @reserved: reserved
*/
-struct iwl_context_info_gen3 {
+struct iwl_context_info_v2 {
__le16 version;
__le16 size;
__le32 config;
@@ -306,22 +323,22 @@ struct iwl_context_info_gen3 {
__le32 reserved;
} __packed; /* IPC_CONTEXT_INFO_S */
-int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
- const struct fw_img *fw);
-void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive);
+int iwl_pcie_ctxt_info_v2_alloc(struct iwl_trans *trans,
+ const struct iwl_fw *fw,
+ const struct fw_img *img);
+void iwl_pcie_ctxt_info_v2_kick(struct iwl_trans *trans);
+void iwl_pcie_ctxt_info_v2_free(struct iwl_trans *trans, bool alive);
-int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
- const struct iwl_pnvm_image *pnvm_payloads,
- const struct iwl_ucode_capabilities *capa);
-void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa);
+int iwl_trans_pcie_ctx_info_v2_load_pnvm(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *pnvm_payloads,
+ const struct iwl_ucode_capabilities *capa);
+void iwl_trans_pcie_ctx_info_v2_set_pnvm(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa);
int
-iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads,
- const struct iwl_ucode_capabilities *capa);
+iwl_trans_pcie_ctx_info_v2_load_reduce_power(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa);
void
-iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa);
-int iwl_trans_pcie_ctx_info_gen3_set_step(struct iwl_trans *trans,
- u32 mbx_addr_0_step, u32 mbx_addr_1_step);
-#endif /* __iwl_context_info_file_gen3_h__ */
+iwl_trans_pcie_ctx_info_v2_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa);
+#endif /* __iwl_context_info_file_v2_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
index dfd44fabf237..7ae0fbdef208 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020, 2022, 2024 Intel Corporation
+ * Copyright (C) 2018-2020, 2022, 2024-2025 Intel Corporation
*/
#ifndef __iwl_context_info_file_h__
#define __iwl_context_info_file_h__
@@ -78,13 +78,13 @@ struct iwl_context_info_control {
} __packed;
/**
- * struct iwl_context_info_dram - images DRAM map
+ * struct iwl_context_info_dram_nonfseq - images DRAM map
* each entry in the map represents a DRAM chunk of up to 32 KB
* @umac_img: UMAC image DRAM map
* @lmac_img: LMAC image DRAM map
* @virtual_img: paged image DRAM map
*/
-struct iwl_context_info_dram {
+struct iwl_context_info_dram_nonfseq {
__le64 umac_img[IWL_MAX_DRAM_ENTRY];
__le64 lmac_img[IWL_MAX_DRAM_ENTRY];
__le64 virtual_img[IWL_MAX_DRAM_ENTRY];
@@ -177,16 +177,16 @@ struct iwl_context_info {
struct iwl_context_info_early_dbg_cfg edbg_cfg;
struct iwl_context_info_pnvm_cfg pnvm_cfg;
__le32 reserved2[16];
- struct iwl_context_info_dram dram;
+ struct iwl_context_info_dram_nonfseq dram;
__le32 reserved3[16];
-} __packed;
+} __packed; /* BOOT_LOADER_CONTEXT_INFO_S */
-int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw);
+int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *img);
void iwl_pcie_ctxt_info_free(struct iwl_trans *trans);
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
const struct fw_img *fw,
- struct iwl_context_info_dram *ctxt_dram);
+ struct iwl_context_info_dram_nonfseq *ctxt_dram);
void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
size_t size,
dma_addr_t *phys);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 3ff493e920d2..0fd452cb94ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -107,6 +107,13 @@
/* GIO Chicken Bits (PCI Express bus link power management) */
#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
+#define CSR_IPC_STATE (CSR_BASE + 0x110)
+#define CSR_IPC_STATE_RESET 0x00000030
+#define CSR_IPC_STATE_RESET_NONE 0
+#define CSR_IPC_STATE_RESET_SW_READY 1
+#define CSR_IPC_STATE_RESET_TOP_READY 2
+#define CSR_IPC_STATE_RESET_TOP_FOLLOWER 3
+
#define CSR_IPC_SLEEP_CONTROL (CSR_BASE + 0x114)
#define CSR_IPC_SLEEP_CONTROL_SUSPEND 0x3
#define CSR_IPC_SLEEP_CONTROL_RESUME 0
@@ -194,17 +201,19 @@
#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */
+#define CSR_INT_BIT_RESET_DONE (1 << 2) /* reset handshake with firmware is done */
#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
-#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
- CSR_INT_BIT_HW_ERR | \
- CSR_INT_BIT_FH_TX | \
- CSR_INT_BIT_SW_ERR | \
- CSR_INT_BIT_RF_KILL | \
- CSR_INT_BIT_SW_RX | \
- CSR_INT_BIT_WAKEUP | \
- CSR_INT_BIT_ALIVE | \
+#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
+ CSR_INT_BIT_HW_ERR | \
+ CSR_INT_BIT_FH_TX | \
+ CSR_INT_BIT_SW_ERR | \
+ CSR_INT_BIT_RF_KILL | \
+ CSR_INT_BIT_SW_RX | \
+ CSR_INT_BIT_WAKEUP | \
+ CSR_INT_BIT_RESET_DONE | \
+ CSR_INT_BIT_ALIVE | \
CSR_INT_BIT_RX_PERIODIC)
/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
@@ -641,7 +650,7 @@ enum msix_hw_int_causes {
* HW address related registers *
*****************************************************************************/
-#define CSR_ADDR_BASE(trans) ((trans)->cfg->mac_addr_from_csr)
+#define CSR_ADDR_BASE(trans) ((trans)->mac_cfg->base->mac_addr_from_csr)
#define CSR_MAC_ADDR0_OTP(trans) (CSR_ADDR_BASE(trans) + 0x00)
#define CSR_MAC_ADDR1_OTP(trans) (CSR_ADDR_BASE(trans) + 0x04)
#define CSR_MAC_ADDR0_STRAP(trans) (CSR_ADDR_BASE(trans) + 0x08)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index ce787326aa69..5240dacf1360 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -503,7 +503,7 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
int res;
if (!iwlwifi_mod_params.enable_ini ||
- trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000)
+ trans->mac_cfg->device_family <= IWL_DEVICE_FAMILY_8000)
return;
res = firmware_request_nowarn(&fw, yoyo_bin, dev);
@@ -603,11 +603,11 @@ static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
return 0;
num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
- if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
+ if (fwrt->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
return -EIO;
num_frags = 1;
- } else if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ &&
+ } else if (fwrt->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ &&
alloc_id > IWL_FW_INI_ALLOCATION_ID_DBGC3) {
return -EIO;
}
@@ -949,7 +949,7 @@ static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t)
{
struct iwl_dbg_tlv_timer_node *timer_node =
- from_timer(timer_node, t, timer);
+ timer_container_of(timer_node, t, timer);
struct iwl_fwrt_dump_data dump_data = {
.trig = (void *)timer_node->tlv->data,
};
@@ -1241,7 +1241,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
fwrt->trans->dbg.restart_required = false;
- if (fwrt->trans->trans_cfg->device_family ==
+ if (fwrt->trans->mac_cfg->device_family ==
IWL_DEVICE_FAMILY_9000) {
fwrt->trans->dbg.restart_required = true;
} else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT &&
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
index 76166e1b10e5..99789c7cef3b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
@@ -3,7 +3,7 @@
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(C) 2016 Intel Deutschland GmbH
- * Copyright(c) 2018, 2023 Intel Corporation
+ * Copyright(c) 2018, 2023, 2025 Intel Corporation
*****************************************************************************/
#ifndef __IWLWIFI_DEVICE_TRACE
@@ -54,11 +54,11 @@ static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
struct ieee80211_hdr *hdr = NULL;
size_t hdr_offset;
- if (cmd->cmd != trans->rx_mpdu_cmd)
+ if (cmd->cmd != trans->conf.rx_mpdu_cmd)
return len;
hdr_offset = sizeof(struct iwl_cmd_header) +
- trans->rx_mpdu_cmd_hdr_size;
+ trans->conf.rx_mpdu_cmd_hdr_size;
if (out_hdr_offset)
*out_hdr_offset = hdr_offset;
@@ -67,7 +67,8 @@ static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
if (!ieee80211_is_data(hdr->frame_control))
return len;
/* maybe try to identify EAPOL frames? */
- return sizeof(__le32) + sizeof(*cmd) + trans->rx_mpdu_cmd_hdr_size +
+ return sizeof(__le32) + sizeof(*cmd) +
+ trans->conf.rx_mpdu_cmd_hdr_size +
ieee80211_hdrlen(hdr->frame_control);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index d36837501e08..9504a0cb8b13 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -176,25 +176,86 @@ static inline char iwl_drv_get_step(int step)
static bool iwl_drv_is_wifi7_supported(struct iwl_trans *trans)
{
- return CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM;
+ return CSR_HW_RFID_TYPE(trans->info.hw_rf_id) >= IWL_CFG_RF_TYPE_FM;
}
const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf)
{
char mac_step, rf_step;
- const char *rf, *cdb;
+ const char *mac, *rf, *cdb;
if (trans->cfg->fw_name_pre)
return trans->cfg->fw_name_pre;
- if (WARN_ON(!trans->cfg->fw_name_mac))
- return "unconfigured";
+ mac_step = iwl_drv_get_step(trans->info.hw_rev_step);
- mac_step = iwl_drv_get_step(trans->hw_rev_step);
+ switch (CSR_HW_REV_TYPE(trans->info.hw_rev)) {
+ case IWL_CFG_MAC_TYPE_PU:
+ mac = "9000-pu";
+ mac_step = 'b';
+ break;
+ case IWL_CFG_MAC_TYPE_TH:
+ mac = "9260-th";
+ mac_step = 'b';
+ break;
+ case IWL_CFG_MAC_TYPE_QU:
+ mac = "Qu";
+ break;
+ case IWL_CFG_MAC_TYPE_CC:
+ /* special case - no RF since it's fixed (discrete) */
+ scnprintf(buf, FW_NAME_PRE_BUFSIZE, "iwlwifi-cc-a0");
+ return buf;
+ case IWL_CFG_MAC_TYPE_QUZ:
+ mac = "QuZ";
+ /* all QuZ use A0 firmware */
+ mac_step = 'a';
+ break;
+ case IWL_CFG_MAC_TYPE_SO:
+ case IWL_CFG_MAC_TYPE_SOF:
+ mac = "so";
+ mac_step = 'a';
+ break;
+ case IWL_CFG_MAC_TYPE_TY:
+ mac = "ty";
+ mac_step = 'a';
+ break;
+ case IWL_CFG_MAC_TYPE_MA:
+ mac = "ma";
+ break;
+ case IWL_CFG_MAC_TYPE_BZ:
+ case IWL_CFG_MAC_TYPE_BZ_W:
+ mac = "bz";
+ break;
+ case IWL_CFG_MAC_TYPE_GL:
+ mac = "gl";
+ break;
+ case IWL_CFG_MAC_TYPE_SC:
+ mac = "sc";
+ break;
+ case IWL_CFG_MAC_TYPE_SC2:
+ mac = "sc2";
+ break;
+ case IWL_CFG_MAC_TYPE_SC2F:
+ mac = "sc2f";
+ break;
+ case IWL_CFG_MAC_TYPE_BR:
+ mac = "br";
+ break;
+ case IWL_CFG_MAC_TYPE_DR:
+ mac = "dr";
+ break;
+ default:
+ return "unknown-mac";
+ }
- rf_step = iwl_drv_get_step(CSR_HW_RFID_STEP(trans->hw_rf_id));
+ rf_step = iwl_drv_get_step(CSR_HW_RFID_STEP(trans->info.hw_rf_id));
- switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_JF1:
+ case IWL_CFG_RF_TYPE_JF2:
+ rf = "jf";
+ rf_step = 'b';
+ break;
case IWL_CFG_RF_TYPE_HR1:
case IWL_CFG_RF_TYPE_HR2:
rf = "hr";
@@ -202,29 +263,26 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf)
break;
case IWL_CFG_RF_TYPE_GF:
rf = "gf";
+ rf_step = 'a';
break;
case IWL_CFG_RF_TYPE_FM:
rf = "fm";
break;
case IWL_CFG_RF_TYPE_WH:
- if (SILICON_Z_STEP ==
- CSR_HW_RFID_STEP(trans->hw_rf_id)) {
- rf = "whtc";
- rf_step = 'a';
- } else {
- rf = "wh";
- }
+ rf = "wh";
+ break;
+ case IWL_CFG_RF_TYPE_PE:
+ rf = "pe";
break;
default:
return "unknown-rf";
}
- cdb = CSR_HW_RFID_IS_CDB(trans->hw_rf_id) ? "4" : "";
+ cdb = CSR_HW_RFID_IS_CDB(trans->info.hw_rf_id) ? "4" : "";
scnprintf(buf, FW_NAME_PRE_BUFSIZE,
"iwlwifi-%s-%c0-%s%s-%c0",
- trans->cfg->fw_name_mac, mac_step,
- rf, cdb, rf_step);
+ mac, mac_step, rf, cdb, rf_step);
return buf;
}
@@ -233,39 +291,64 @@ IWL_EXPORT_SYMBOL(iwl_drv_get_fwname_pre);
static void iwl_req_fw_callback(const struct firmware *ucode_raw,
void *context);
+static void iwl_get_ucode_api_versions(struct iwl_trans *trans,
+ unsigned int *api_min,
+ unsigned int *api_max)
+{
+ const struct iwl_family_base_params *base = trans->mac_cfg->base;
+ const struct iwl_rf_cfg *cfg = trans->cfg;
+
+ if (!base->ucode_api_max) {
+ *api_min = cfg->ucode_api_min;
+ *api_max = cfg->ucode_api_max;
+ return;
+ }
+
+ if (!cfg->ucode_api_max) {
+ *api_min = base->ucode_api_min;
+ *api_max = base->ucode_api_max;
+ return;
+ }
+
+ *api_min = max(cfg->ucode_api_min, base->ucode_api_min);
+ *api_max = min(cfg->ucode_api_max, base->ucode_api_max);
+}
+
static int iwl_request_firmware(struct iwl_drv *drv, bool first)
{
- const struct iwl_cfg *cfg = drv->trans->cfg;
char _fw_name_pre[FW_NAME_PRE_BUFSIZE];
+ unsigned int ucode_api_max, ucode_api_min;
const char *fw_name_pre;
- if (drv->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
- (drv->trans->hw_rev_step != SILICON_B_STEP &&
- drv->trans->hw_rev_step != SILICON_C_STEP)) {
+ iwl_get_ucode_api_versions(drv->trans, &ucode_api_min, &ucode_api_max);
+
+ if (drv->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
+ (drv->trans->info.hw_rev_step != SILICON_B_STEP &&
+ drv->trans->info.hw_rev_step != SILICON_C_STEP)) {
IWL_ERR(drv,
"Only HW steps B and C are currently supported (0x%0x)\n",
- drv->trans->hw_rev);
+ drv->trans->info.hw_rev);
return -EINVAL;
}
fw_name_pre = iwl_drv_get_fwname_pre(drv->trans, _fw_name_pre);
if (first)
- drv->fw_index = cfg->ucode_api_max;
+ drv->fw_index = ucode_api_max;
else
drv->fw_index--;
- if (drv->fw_index < cfg->ucode_api_min) {
+ if (drv->fw_index < ucode_api_min) {
IWL_ERR(drv, "no suitable firmware found!\n");
- if (cfg->ucode_api_min == cfg->ucode_api_max) {
+ if (ucode_api_min == ucode_api_max) {
IWL_ERR(drv, "%s-%d is required\n", fw_name_pre,
- cfg->ucode_api_max);
+ ucode_api_max);
} else {
IWL_ERR(drv, "minimum version required: %s-%d\n",
- fw_name_pre, cfg->ucode_api_min);
+ fw_name_pre, ucode_api_min);
IWL_ERR(drv, "maximum version supported: %s-%d\n",
- fw_name_pre, cfg->ucode_api_max);
+ fw_name_pre, ucode_api_max);
}
IWL_ERR(drv,
@@ -1235,7 +1318,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(*dbg_ptrs))
goto invalid_tlv_len;
- if (drv->trans->trans_cfg->device_family <
+ if (drv->trans->mac_cfg->device_family <
IWL_DEVICE_FAMILY_22000)
break;
drv->trans->dbg.umac_error_event_table =
@@ -1251,7 +1334,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(*dbg_ptrs))
goto invalid_tlv_len;
- if (drv->trans->trans_cfg->device_family <
+ if (drv->trans->mac_cfg->device_family <
IWL_DEVICE_FAMILY_22000)
break;
drv->trans->dbg.lmac_error_event_table[0] =
@@ -1373,7 +1456,7 @@ static int iwl_alloc_ucode(struct iwl_drv *drv,
static int validate_sec_sizes(struct iwl_drv *drv,
struct iwl_firmware_pieces *pieces,
- const struct iwl_cfg *cfg)
+ const struct iwl_rf_cfg *cfg)
{
IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %zd\n",
get_sec_size(pieces, IWL_UCODE_REGULAR,
@@ -1496,14 +1579,15 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
struct iwlwifi_opmode_table *op;
int err;
struct iwl_firmware_pieces *pieces;
- const unsigned int api_max = drv->trans->cfg->ucode_api_max;
- const unsigned int api_min = drv->trans->cfg->ucode_api_min;
+ unsigned int api_min, api_max;
size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
u32 api_ver;
int i;
bool usniffer_images = false;
bool failure = true;
+ iwl_get_ucode_api_versions(drv->trans, &api_min, &api_max);
+
fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
@@ -1697,14 +1781,14 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12;
else
fw->init_evtlog_size =
- drv->trans->trans_cfg->base_params->max_event_log_size;
+ drv->trans->mac_cfg->base->max_event_log_size;
fw->init_errlog_ptr = pieces->init_errlog_ptr;
fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr;
if (pieces->inst_evtlog_size)
fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12;
else
fw->inst_evtlog_size =
- drv->trans->trans_cfg->base_params->max_event_log_size;
+ drv->trans->mac_cfg->base->max_event_log_size;
fw->inst_errlog_ptr = pieces->inst_errlog_ptr;
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index 854957bdf79d..595300a14639 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2020-2021, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2020-2021, 2023, 2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
*/
#ifndef __iwl_drv_h__
@@ -53,7 +53,7 @@
struct iwl_drv;
struct iwl_trans;
-struct iwl_cfg;
+struct iwl_rf_cfg;
/**
* iwl_drv_start - start the drv
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 5c8f1868db64..0f6de08b7473 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021, 2023-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021, 2023-2025 Intel Corporation
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fh_h__
@@ -71,7 +71,7 @@
static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
unsigned int chnl)
{
- if (trans->trans_cfg->gen2) {
+ if (trans->mac_cfg->gen2) {
WARN_ON_ONCE(chnl >= 64);
return TFH_TFDQ_CBB_TABLE + 8 * chnl;
}
@@ -378,14 +378,14 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* Once the RXF-to-DRAM DMA is active, this flag is immediately turned off.
*/
#define RFH_GEN_STATUS 0xA09808
-#define RFH_GEN_STATUS_GEN3 0xA07824
+#define RFH_GEN_STATUS_AX210 0xA07824
#define RBD_FETCH_IDLE BIT(29)
#define SRAM_DMA_IDLE BIT(30)
#define RXF_DMA_IDLE BIT(31)
/* DMA configuration */
#define RFH_RXF_DMA_CFG 0xA09820
-#define RFH_RXF_DMA_CFG_GEN3 0xA07880
+#define RFH_RXF_DMA_CFG_AX210 0xA07880
/* RB size */
#define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
#define RFH_RXF_DMA_RB_SIZE_POS 16
@@ -588,13 +588,12 @@ struct iwl_rb_status {
#define TFD_QUEUE_SIZE_MAX (256)
-#define TFD_QUEUE_SIZE_MAX_GEN3 (65536)
/* cb size is the exponent - 3 */
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
-#define TFD_QUEUE_BC_SIZE_GEN3_AX210 1024
-#define TFD_QUEUE_BC_SIZE_GEN3_BZ (1024 * 4)
+#define TFD_QUEUE_BC_SIZE_AX210 1024
+#define TFD_QUEUE_BC_SIZE_BZ (1024 * 4)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
#define IWL_TFH_NUM_TBS 25
@@ -717,30 +716,19 @@ struct iwl_tfh_tfd {
/* Fixed (non-configurable) rx data from phy */
/**
- * struct iwlagn_scd_bc_tbl - scheduler byte count table
+ * struct iwl_bc_tbl_entry - scheduler byte count table entry
* base physical address provided by SCD_DRAM_BASE_ADDR
* For devices up to 22000:
* @tfd_offset:
* For devices up to 22000:
* 0-12 - tx command byte count
* 12-16 - station index
- * For 22000:
+ * For 22000 and on:
* 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
-struct iwlagn_scd_bc_tbl {
- __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
-} __packed;
-
-/**
- * struct iwl_gen3_bc_tbl_entry - scheduler byte count table entry gen3
- * For AX210 and on:
- * @tfd_offset: 0-12 - tx command byte count
- * 12-13 - number of 64 byte chunks
- * 14-16 - reserved
- */
-struct iwl_gen3_bc_tbl_entry {
+struct iwl_bc_tbl_entry {
__le16 tfd_offset;
} __packed;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 0653ca8b974a..80591809164e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2022, 2024 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2022, 2024-2025 Intel Corporation
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
#include <linux/delay.h>
@@ -211,13 +211,13 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
void iwl_force_nmi(struct iwl_trans *trans)
{
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_9000)
iwl_write_prph_delay(trans, DEVICE_SET_NMI_REG,
DEVICE_SET_NMI_VAL_DRV, 1);
- else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ else if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER);
- else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ else if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
UREG_DOORBELL_TO_ISR6_NMI_BIT);
else
@@ -260,7 +260,7 @@ struct reg {
static int iwl_dump_rfh(struct iwl_trans *trans, char **buf)
{
int i, q;
- int num_q = trans->num_rx_queues;
+ int num_q = trans->info.num_rxqs;
static const u32 rfh_tbl[] = {
RFH_RXF_DMA_CFG,
RFH_GEN_CFG,
@@ -368,7 +368,7 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf)
FH_TSSR_TX_ERROR_REG
};
- if (trans->trans_cfg->mq_rx_supported)
+ if (trans->mac_cfg->mq_rx_supported)
return iwl_dump_rfh(trans, buf);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -423,7 +423,7 @@ static void iwl_dump_host_monitor_block(struct iwl_trans *trans,
static void iwl_dump_host_monitor(struct iwl_trans *trans)
{
- switch (trans->trans_cfg->device_family) {
+ switch (trans->mac_cfg->device_family) {
case IWL_DEVICE_FAMILY_22000:
case IWL_DEVICE_FAMILY_AX210:
IWL_ERR(trans, "CSR_RESET = 0x%x\n",
@@ -445,11 +445,11 @@ static void iwl_dump_host_monitor(struct iwl_trans *trans)
int iwl_finish_nic_init(struct iwl_trans *trans)
{
- const struct iwl_cfg_trans_params *cfg_trans = trans->trans_cfg;
+ const struct iwl_mac_cfg *mac_cfg = trans->mac_cfg;
u32 poll_ready;
int err;
- if (cfg_trans->bisr_workaround) {
+ if (mac_cfg->bisr_workaround) {
/* ensure the TOP FSM isn't still in previous reset */
mdelay(2);
}
@@ -458,7 +458,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans)
* Set "initialization complete" bit to move adapter from
* D0U* --> D0A* (powered-up active) state.
*/
- if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ |
CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
@@ -469,7 +469,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans)
poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY;
}
- if (cfg_trans->device_family == IWL_DEVICE_FAMILY_8000)
+ if (mac_cfg->device_family == IWL_DEVICE_FAMILY_8000)
udelay(2);
/*
@@ -484,7 +484,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans)
iwl_dump_host_monitor(trans);
}
- if (cfg_trans->bisr_workaround) {
+ if (mac_cfg->bisr_workaround) {
/* ensure BISR shift has finished */
udelay(200);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
index 37b3bd62897e..f4833c5fe86e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2021, 2025 Intel Corporation
*/
#ifndef __iwl_io_h__
#define __iwl_io_h__
@@ -64,38 +64,38 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf);
*/
static inline u32 iwl_umac_prph(struct iwl_trans *trans, u32 ofs)
{
- return ofs + trans->trans_cfg->umac_prph_offset;
+ return ofs + trans->mac_cfg->umac_prph_offset;
}
static inline u32 iwl_read_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs)
{
return iwl_read_prph_no_grab(trans, ofs +
- trans->trans_cfg->umac_prph_offset);
+ trans->mac_cfg->umac_prph_offset);
}
static inline u32 iwl_read_umac_prph(struct iwl_trans *trans, u32 ofs)
{
- return iwl_read_prph(trans, ofs + trans->trans_cfg->umac_prph_offset);
+ return iwl_read_prph(trans, ofs + trans->mac_cfg->umac_prph_offset);
}
static inline void iwl_write_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs,
u32 val)
{
- iwl_write_prph_no_grab(trans, ofs + trans->trans_cfg->umac_prph_offset,
+ iwl_write_prph_no_grab(trans, ofs + trans->mac_cfg->umac_prph_offset,
val);
}
static inline void iwl_write_umac_prph(struct iwl_trans *trans, u32 ofs,
u32 val)
{
- iwl_write_prph(trans, ofs + trans->trans_cfg->umac_prph_offset, val);
+ iwl_write_prph(trans, ofs + trans->mac_cfg->umac_prph_offset, val);
}
static inline int iwl_poll_umac_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
{
return iwl_poll_prph_bit(trans, addr +
- trans->trans_cfg->umac_prph_offset,
+ trans->mac_cfg->umac_prph_offset,
bits, mask, timeout);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index c381511e9ec6..0592f0f59d1c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -143,6 +143,9 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
* @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
* @NVM_CHANNEL_IBSS: usable as an IBSS channel and deprecated
* when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
+ * @NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY: active scanning allowed and
+ * AP allowed only in 20 MHz. Valid only
+ * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
* @NVM_CHANNEL_ACTIVE: active scanning allowed and allows IBSS
* when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
* @NVM_CHANNEL_RADAR: radar detection required
@@ -159,20 +162,21 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
* @NVM_CHANNEL_AFC: client support connection to UHB AFC AP
*/
enum iwl_nvm_channel_flags {
- NVM_CHANNEL_VALID = BIT(0),
- NVM_CHANNEL_IBSS = BIT(1),
- NVM_CHANNEL_ACTIVE = BIT(3),
- NVM_CHANNEL_RADAR = BIT(4),
- NVM_CHANNEL_INDOOR_ONLY = BIT(5),
- NVM_CHANNEL_GO_CONCURRENT = BIT(6),
- NVM_CHANNEL_UNIFORM = BIT(7),
- NVM_CHANNEL_20MHZ = BIT(8),
- NVM_CHANNEL_40MHZ = BIT(9),
- NVM_CHANNEL_80MHZ = BIT(10),
- NVM_CHANNEL_160MHZ = BIT(11),
- NVM_CHANNEL_DC_HIGH = BIT(12),
- NVM_CHANNEL_VLP = BIT(13),
- NVM_CHANNEL_AFC = BIT(14),
+ NVM_CHANNEL_VALID = BIT(0),
+ NVM_CHANNEL_IBSS = BIT(1),
+ NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY = BIT(2),
+ NVM_CHANNEL_ACTIVE = BIT(3),
+ NVM_CHANNEL_RADAR = BIT(4),
+ NVM_CHANNEL_INDOOR_ONLY = BIT(5),
+ NVM_CHANNEL_GO_CONCURRENT = BIT(6),
+ NVM_CHANNEL_UNIFORM = BIT(7),
+ NVM_CHANNEL_20MHZ = BIT(8),
+ NVM_CHANNEL_40MHZ = BIT(9),
+ NVM_CHANNEL_80MHZ = BIT(10),
+ NVM_CHANNEL_160MHZ = BIT(11),
+ NVM_CHANNEL_DC_HIGH = BIT(12),
+ NVM_CHANNEL_VLP = BIT(13),
+ NVM_CHANNEL_AFC = BIT(14),
};
/**
@@ -332,7 +336,7 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
}
static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band,
- u32 nvm_flags, const struct iwl_cfg *cfg)
+ u32 nvm_flags, const struct iwl_rf_cfg *cfg)
{
u32 flags = IEEE80211_CHAN_NO_HT40;
@@ -399,7 +403,7 @@ static int iwl_init_channel_map(struct iwl_trans *trans,
const void * const nvm_ch_flags,
u32 sbands_flags, bool v4)
{
- const struct iwl_cfg *cfg = trans->cfg;
+ const struct iwl_rf_cfg *cfg = trans->cfg;
struct device *dev = trans->dev;
int ch_idx;
int n_channels = 0;
@@ -500,7 +504,7 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans,
struct ieee80211_sta_vht_cap *vht_cap,
u8 tx_chains, u8 rx_chains)
{
- const struct iwl_cfg *cfg = trans->cfg;
+ const struct iwl_rf_cfg *cfg = trans->cfg;
int num_rx_ants = num_of_ant(rx_chains);
int num_tx_ants = num_of_ant(tx_chains);
@@ -513,7 +517,7 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans,
IEEE80211_VHT_MAX_AMPDU_1024K <<
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
- if (!trans->cfg->ht_params->stbc)
+ if (!trans->cfg->ht_params.stbc)
vht_cap->cap &= ~IEEE80211_VHT_CAP_RXSTBC_MASK;
if (data->vht160_supported)
@@ -523,7 +527,7 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans,
if (cfg->vht_mu_mimo_supported)
vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
- if (cfg->ht_params->ldpc)
+ if (cfg->ht_params.ldpc)
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
if (data->sku_cap_mimo_disabled) {
@@ -531,21 +535,21 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans,
num_tx_ants = 1;
}
- if (trans->cfg->ht_params->stbc && num_tx_ants > 1)
+ if (trans->cfg->ht_params.stbc && num_tx_ants > 1)
vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
else
vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
switch (iwlwifi_mod_params.amsdu_size) {
case IWL_AMSDU_DEF:
- if (trans->trans_cfg->mq_rx_supported)
+ if (trans->mac_cfg->mq_rx_supported)
vht_cap->cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
else
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
break;
case IWL_AMSDU_2K:
- if (trans->trans_cfg->mq_rx_supported)
+ if (trans->mac_cfg->mq_rx_supported)
vht_cap->cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
else
@@ -916,8 +920,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
{
bool is_ap = iftype_data->types_mask & (BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO));
- bool slow_pcie = (!trans->trans_cfg->integrated &&
- trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB);
+ bool slow_pcie = (!trans->mac_cfg->integrated &&
+ trans->info.pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB);
if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be)
iftype_data->eht_cap.has_eht = false;
@@ -944,7 +948,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
break;
case NL80211_BAND_6GHZ:
- if (!trans->reduced_cap_sku) {
+ if (!trans->reduced_cap_sku &&
+ (!trans->cfg->bw_limit || trans->cfg->bw_limit >= 320)) {
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[0] |=
IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[1] |=
@@ -1031,11 +1036,11 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210 && !is_ap)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210 && !is_ap)
iftype_data->he_cap.he_cap_elem.phy_cap_info[2] |=
IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO;
- switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
case IWL_CFG_RF_TYPE_GF:
case IWL_CFG_RF_TYPE_FM:
case IWL_CFG_RF_TYPE_WH:
@@ -1047,7 +1052,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
break;
}
- if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
+ if (CSR_HW_REV_TYPE(trans->info.hw_rev) == IWL_CFG_MAC_TYPE_GL &&
iftype_data->eht_cap.has_eht) {
iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &=
~(IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
@@ -1076,13 +1081,13 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |=
IEEE80211_HE_MAC_CAP2_BCAST_TWT;
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
!is_ap) {
iftype_data->vendor_elems.data = iwl_vendor_caps;
iftype_data->vendor_elems.len = ARRAY_SIZE(iwl_vendor_caps);
}
- if (!trans->cfg->ht_params->stbc) {
+ if (!trans->cfg->ht_params.stbc) {
iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &=
~IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
iftype_data->he_cap.he_cap_elem.phy_cap_info[7] &=
@@ -1094,19 +1099,23 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs13_max_nss = 0;
}
- if (trans->no_160)
+ if (trans->cfg->bw_limit && trans->cfg->bw_limit < 160)
iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &=
~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
- if (trans->reduced_cap_sku) {
+ if ((trans->cfg->bw_limit && trans->cfg->bw_limit < 320) ||
+ trans->reduced_cap_sku) {
memset(&iftype_data->eht_cap.eht_mcs_nss_supp.bw._320, 0,
sizeof(iftype_data->eht_cap.eht_mcs_nss_supp.bw._320));
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
+ }
+
+ if (trans->reduced_cap_sku) {
iftype_data->eht_cap.eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss = 0;
iftype_data->eht_cap.eht_mcs_nss_supp.bw._160.rx_tx_mcs13_max_nss = 0;
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[8] &=
~IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA;
- iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &=
- ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
}
}
@@ -1242,7 +1251,7 @@ static void iwl_init_sbands(struct iwl_trans *trans,
n_used, n_channels);
}
-static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
+static int iwl_get_sku(const struct iwl_rf_cfg *cfg, const __le16 *nvm_sw,
const __le16 *phy_sku)
{
if (cfg->nvm_type != IWL_NVM_EXT)
@@ -1251,7 +1260,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
return le32_to_cpup((const __le32 *)(phy_sku + SKU_FAMILY_8000));
}
-static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
+static int iwl_get_nvm_version(const struct iwl_rf_cfg *cfg, const __le16 *nvm_sw)
{
if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + NVM_VERSION);
@@ -1260,7 +1269,7 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
NVM_VERSION_EXT_NVM));
}
-static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
+static int iwl_get_radio_cfg(const struct iwl_rf_cfg *cfg, const __le16 *nvm_sw,
const __le16 *phy_sku)
{
if (cfg->nvm_type != IWL_NVM_EXT)
@@ -1270,7 +1279,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
}
-static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
+static int iwl_get_n_hw_addrs(const struct iwl_rf_cfg *cfg, const __le16 *nvm_sw)
{
int n_hw_addr;
@@ -1282,7 +1291,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
return n_hw_addr & N_HW_ADDR_MASK;
}
-static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
+static void iwl_set_radio_cfg(const struct iwl_rf_cfg *cfg,
struct iwl_nvm_data *data,
u32 radio_cfg)
{
@@ -1341,7 +1350,7 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
}
static void iwl_set_hw_address_family_8000(struct iwl_trans *trans,
- const struct iwl_cfg *cfg,
+ const struct iwl_rf_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 *mac_override,
const __be16 *nvm_hw)
@@ -1390,11 +1399,12 @@ static void iwl_set_hw_address_family_8000(struct iwl_trans *trans,
}
static int iwl_set_hw_address(struct iwl_trans *trans,
- const struct iwl_cfg *cfg,
+ const struct iwl_rf_cfg *cfg,
struct iwl_nvm_data *data, const __be16 *nvm_hw,
const __le16 *mac_override)
{
- if (cfg->mac_addr_from_csr) {
+ const struct iwl_mac_cfg *mac_cfg = trans->mac_cfg;
+ if (mac_cfg->base->mac_addr_from_csr) {
iwl_set_hw_address_from_csr(trans, data);
} else if (cfg->nvm_type != IWL_NVM_EXT) {
const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR);
@@ -1424,7 +1434,7 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
}
static bool
-iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const __be16 *nvm_hw)
{
/*
@@ -1436,7 +1446,7 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
* in 5GHz otherwise the FW will throw a sysassert when we try
* to use them.
*/
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
/*
* Unlike the other sections in the NVM, the hw
* section uses big-endian.
@@ -1456,7 +1466,7 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
struct iwl_nvm_data *
-iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const struct iwl_mei_nvm *mei_nvm,
const struct iwl_fw *fw, u8 tx_ant, u8 rx_ant)
{
@@ -1520,7 +1530,7 @@ iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
IWL_EXPORT_SYMBOL(iwl_parse_mei_nvm_data);
struct iwl_nvm_data *
-iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const struct iwl_fw *fw,
const __be16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory,
@@ -1620,7 +1630,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
struct iwl_reg_capa reg_capa,
- const struct iwl_cfg *cfg)
+ const struct iwl_rf_cfg *cfg)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1655,6 +1665,10 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
flags |= NL80211_RRF_NO_OUTDOOR;
+ if (nvm_flags & NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY &&
+ flags & NL80211_RRF_NO_IR)
+ flags |= NL80211_RRF_ALLOW_20MHZ_ACTIVITY;
+
/* Set the GO concurrent flag only in case that NO_IR is set.
* Otherwise it is meaningless
*/
@@ -1731,10 +1745,12 @@ static struct iwl_reg_capa iwl_get_reg_capa(u32 flags, u8 resp_ver)
}
struct ieee80211_regdomain *
-iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
+iwl_parse_nvm_mcc_info(struct iwl_trans *trans,
int num_of_ch, __le32 *channels, u16 fw_mcc,
u16 geo_info, u32 cap, u8 resp_ver)
{
+ const struct iwl_rf_cfg *cfg = trans->cfg;
+ struct device *dev = trans->dev;
int ch_idx;
u16 ch_flags;
u32 reg_rule_flags, prev_reg_rule_flags = 0;
@@ -1989,8 +2005,8 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
le32_to_cpu(dword_buff[3]));
/* nvm file validation, dword_buff[2] holds the file version */
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
- trans->hw_rev_step == SILICON_C_STEP &&
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+ trans->info.hw_rev_step == SILICON_C_STEP &&
le32_to_cpu(dword_buff[2]) < 0xE4A) {
ret = -EFAULT;
goto out;
@@ -2057,7 +2073,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
break;
}
- iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size);
+ iwl_nvm_fixups(trans->info.hw_id, section_id, temp, section_size);
kfree(nvm_sections[section_id].data);
nvm_sections[section_id].data = temp;
@@ -2160,7 +2176,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
nvm->sku_cap_mimo_disabled =
!!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
- if (CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM)
+ if (CSR_HW_RFID_TYPE(trans->info.hw_rf_id) >= IWL_CFG_RF_TYPE_FM)
nvm->sku_cap_11be_enable = true;
/* Initialize PHY sku data */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index 0c6c3fb8c6dd..9ce9fa4e78fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2015, 2018-2024 Intel Corporation
+ * Copyright (C) 2005-2015, 2018-2025 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_nvm_parse_h__
@@ -30,7 +30,7 @@ enum iwl_nvm_sbands_flags {
* later with iwl_free_nvm_data().
*/
struct iwl_nvm_data *
-iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const struct iwl_fw *fw,
const __be16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory,
@@ -46,9 +46,17 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
* accordingly. An ERR_PTR is returned on error.
* If not given to the regulatory core, the user is responsible for freeing
* the regdomain returned here with kfree.
+ *
+ * @trans: the transport
+ * @num_of_ch: the number of channels
+ * @channels: channel map
+ * @fw_mcc: firmware country code
+ * @geo_info: geo info value
+ * @cap: capability
+ * @resp_ver: FW response version
*/
struct ieee80211_regdomain *
-iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
+iwl_parse_nvm_mcc_info(struct iwl_trans *trans,
int num_of_ch, __le32 *channels, u16 fw_mcc,
u16 geo_info, u32 cap, u8 resp_ver);
@@ -87,7 +95,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
* iwl_parse_mei_nvm_data - parse the mei_nvm_data and get an iwl_nvm_data
*/
struct iwl_nvm_data *
-iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const struct iwl_mei_nvm *mei_nvm,
const struct iwl_fw *fw, u8 set_tx_ant, u8 set_rx_ant);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c
index b3c25acd3691..ec312c90ff85 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021, 2023, 2025 Intel Corporation
* Copyright (C) 2015 Intel Mobile Communications GmbH
*/
#include <linux/types.h>
@@ -42,7 +42,7 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans,
enum nl80211_band band,
u8 tx_chains, u8 rx_chains)
{
- const struct iwl_cfg *cfg = trans->cfg;
+ const struct iwl_rf_cfg *cfg = trans->cfg;
int max_bit_rate = 0;
tx_chains = hweight8(tx_chains);
@@ -53,7 +53,8 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans,
if (!(data->sku_cap_11n_enable) ||
(iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) ||
- !cfg->ht_params) {
+ /* there are no devices with HT but without HT40 entirely */
+ !cfg->ht_params.ht40_bands) {
ht_info->ht_supported = false;
return;
}
@@ -64,17 +65,17 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans,
ht_info->ht_supported = true;
ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
- if (cfg->ht_params->stbc) {
+ if (cfg->ht_params.stbc) {
ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
if (tx_chains > 1)
ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
}
- if (cfg->ht_params->ldpc)
+ if (cfg->ht_params.ldpc)
ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
- if (trans->trans_cfg->mq_rx_supported ||
+ if (trans->mac_cfg->mq_rx_supported ||
iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
@@ -90,13 +91,13 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans,
if (rx_chains >= 3)
ht_info->mcs.rx_mask[2] = 0xFF;
- if (cfg->ht_params->ht_greenfield_support)
+ if (cfg->ht_params.ht_greenfield_support)
ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
max_bit_rate = MAX_BIT_RATE_20_MHZ;
- if (cfg->ht_params->ht40_bands & BIT(band)) {
+ if (cfg->ht_params.ht40_bands & BIT(band)) {
ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
max_bit_rate = MAX_BIT_RATE_40_MHZ;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index 34eca1a568ea..5dc299296d6d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021, 2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021, 2024-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@@ -17,7 +17,7 @@ struct sk_buff;
struct iwl_device_cmd;
struct iwl_rx_cmd_buffer;
struct iwl_fw;
-struct iwl_cfg;
+struct iwl_rf_cfg;
/**
* DOC: Operational mode - what is it ?
@@ -52,12 +52,20 @@ struct iwl_cfg;
* any debug collection must happen synchronously as
* the device will be shut down
* @IWL_ERR_TYPE_CMD_QUEUE_FULL: command queue was full
+ * @IWL_ERR_TYPE_TOP_RESET_BY_BT: TOP reset initiated by BT
+ * @IWL_ERR_TYPE_TOP_FATAL_ERROR: TOP fatal error
+ * @IWL_ERR_TYPE_TOP_RESET_FAILED: TOP reset failed
+ * @IWL_ERR_TYPE_DEBUGFS: error/reset indication from debugfs
*/
enum iwl_fw_error_type {
IWL_ERR_TYPE_IRQ,
IWL_ERR_TYPE_NMI_FORCED,
IWL_ERR_TYPE_RESET_HS_TIMEOUT,
IWL_ERR_TYPE_CMD_QUEUE_FULL,
+ IWL_ERR_TYPE_TOP_RESET_BY_BT,
+ IWL_ERR_TYPE_TOP_FATAL_ERROR,
+ IWL_ERR_TYPE_TOP_RESET_FAILED,
+ IWL_ERR_TYPE_DEBUGFS,
};
/**
@@ -142,7 +150,7 @@ struct iwl_fw_error_dump_mode {
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
- const struct iwl_cfg *cfg,
+ const struct iwl_rf_cfg *cfg,
const struct iwl_fw *fw,
struct dentry *dbgfs_dir);
void (*stop)(struct iwl_op_mode *op_mode);
@@ -242,6 +250,9 @@ static inline void iwl_op_mode_dump_error(struct iwl_op_mode *op_mode,
{
might_sleep();
+ if (WARN_ON(mode->type == IWL_ERR_TYPE_TOP_RESET_BY_BT))
+ return;
+
if (op_mode->ops->dump_error)
op_mode->ops->dump_error(op_mode, mode);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index e7b2e08645ef..8a40801cf0dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -15,7 +15,7 @@
#include <linux/dmapool.h>
#include "fw/api/commands.h"
#include "pcie/internal.h"
-#include "iwl-context-info-gen3.h"
+#include "iwl-context-info-v2.h"
struct iwl_trans_dev_restart_data {
struct list_head list;
@@ -82,14 +82,14 @@ void iwl_trans_free_restart_list(void)
struct iwl_trans_reprobe {
struct device *dev;
- struct work_struct work;
+ struct delayed_work work;
};
static void iwl_trans_reprobe_wk(struct work_struct *wk)
{
struct iwl_trans_reprobe *reprobe;
- reprobe = container_of(wk, typeof(*reprobe), work);
+ reprobe = container_of(wk, typeof(*reprobe), work.work);
if (device_reprobe(reprobe->dev))
dev_err(reprobe->dev, "reprobe failed!\n");
@@ -98,6 +98,31 @@ static void iwl_trans_reprobe_wk(struct work_struct *wk)
module_put(THIS_MODULE);
}
+static void iwl_trans_schedule_reprobe(struct iwl_trans *trans,
+ unsigned int delay_ms)
+{
+ struct iwl_trans_reprobe *reprobe;
+
+ /*
+ * get a module reference to avoid doing this while unloading
+ * anyway and to avoid scheduling a work with code that's
+ * being removed.
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ IWL_ERR(trans, "Module is being unloaded - abort\n");
+ return;
+ }
+
+ reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL);
+ if (!reprobe) {
+ module_put(THIS_MODULE);
+ return;
+ }
+ reprobe->dev = get_device(trans->dev);
+ INIT_DELAYED_WORK(&reprobe->work, iwl_trans_reprobe_wk);
+ schedule_delayed_work(&reprobe->work, msecs_to_jiffies(delay_ms));
+}
+
#define IWL_TRANS_RESET_OK_TIME 7 /* seconds */
static enum iwl_reset_mode
@@ -106,18 +131,46 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans)
struct iwl_trans_dev_restart_data *data;
enum iwl_reset_mode at_least = 0;
unsigned int index;
- static const enum iwl_reset_mode escalation_list[] = {
+ static const enum iwl_reset_mode escalation_list_old[] = {
+ IWL_RESET_MODE_SW_RESET,
+ IWL_RESET_MODE_REPROBE,
+ IWL_RESET_MODE_REPROBE,
+ IWL_RESET_MODE_FUNC_RESET,
+ IWL_RESET_MODE_PROD_RESET,
+ };
+ static const enum iwl_reset_mode escalation_list_sc[] = {
IWL_RESET_MODE_SW_RESET,
IWL_RESET_MODE_REPROBE,
IWL_RESET_MODE_REPROBE,
IWL_RESET_MODE_FUNC_RESET,
- /* FIXME: add TOP reset */
+ IWL_RESET_MODE_TOP_RESET,
IWL_RESET_MODE_PROD_RESET,
- /* FIXME: add TOP reset */
+ IWL_RESET_MODE_TOP_RESET,
IWL_RESET_MODE_PROD_RESET,
- /* FIXME: add TOP reset */
+ IWL_RESET_MODE_TOP_RESET,
IWL_RESET_MODE_PROD_RESET,
};
+ const enum iwl_reset_mode *escalation_list;
+ size_t escalation_list_size;
+
+ /* used by TOP fatal error/TOP reset */
+ if (trans->restart.mode.type == IWL_ERR_TYPE_TOP_RESET_FAILED)
+ return IWL_RESET_MODE_PROD_RESET;
+
+ if (trans->request_top_reset) {
+ trans->request_top_reset = 0;
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC)
+ return IWL_RESET_MODE_TOP_RESET;
+ return IWL_RESET_MODE_PROD_RESET;
+ }
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) {
+ escalation_list = escalation_list_sc;
+ escalation_list_size = ARRAY_SIZE(escalation_list_sc);
+ } else {
+ escalation_list = escalation_list_old;
+ escalation_list_size = ARRAY_SIZE(escalation_list_old);
+ }
if (trans->restart.during_reset)
at_least = IWL_RESET_MODE_REPROBE;
@@ -132,8 +185,8 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans)
data->restart_count = 0;
index = data->restart_count;
- if (index >= ARRAY_SIZE(escalation_list)) {
- index = ARRAY_SIZE(escalation_list) - 1;
+ if (index >= escalation_list_size) {
+ index = escalation_list_size - 1;
if (!data->backoff) {
data->backoff = true;
return IWL_RESET_MODE_BACKOFF;
@@ -144,15 +197,21 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans)
return max(at_least, escalation_list[index]);
}
+#define IWL_TRANS_TOP_FOLLOWER_WAIT 180 /* ms */
+
#define IWL_TRANS_RESET_DELAY (HZ * 60)
static void iwl_trans_restart_wk(struct work_struct *wk)
{
struct iwl_trans *trans = container_of(wk, typeof(*trans),
restart.wk.work);
- struct iwl_trans_reprobe *reprobe;
enum iwl_reset_mode mode;
+ if (trans->restart.mode.type == IWL_ERR_TYPE_TOP_RESET_BY_BT) {
+ iwl_trans_schedule_reprobe(trans, IWL_TRANS_TOP_FOLLOWER_WAIT);
+ return;
+ }
+
if (!trans->op_mode)
return;
@@ -187,31 +246,19 @@ static void iwl_trans_restart_wk(struct work_struct *wk)
iwl_trans_inc_restart_count(trans->dev);
switch (mode) {
+ case IWL_RESET_MODE_TOP_RESET:
+ trans->do_top_reset = 1;
+ IWL_ERR(trans, "Device error - TOP reset\n");
+ fallthrough;
case IWL_RESET_MODE_SW_RESET:
- IWL_ERR(trans, "Device error - SW reset\n");
+ if (mode == IWL_RESET_MODE_SW_RESET)
+ IWL_ERR(trans, "Device error - SW reset\n");
iwl_trans_opmode_sw_reset(trans, trans->restart.mode.type);
break;
case IWL_RESET_MODE_REPROBE:
IWL_ERR(trans, "Device error - reprobe!\n");
- /*
- * get a module reference to avoid doing this while unloading
- * anyway and to avoid scheduling a work with code that's
- * being removed.
- */
- if (!try_module_get(THIS_MODULE)) {
- IWL_ERR(trans, "Module is being unloaded - abort\n");
- return;
- }
-
- reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL);
- if (!reprobe) {
- module_put(THIS_MODULE);
- return;
- }
- reprobe->dev = get_device(trans->dev);
- INIT_WORK(&reprobe->work, iwl_trans_reprobe_wk);
- schedule_work(&reprobe->work);
+ iwl_trans_schedule_reprobe(trans, 0);
break;
default:
iwl_trans_pcie_reset(trans, mode);
@@ -221,7 +268,7 @@ static void iwl_trans_restart_wk(struct work_struct *wk)
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_cfg_trans_params *cfg_trans)
+ const struct iwl_mac_cfg *mac_cfg)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
@@ -232,7 +279,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
if (!trans)
return NULL;
- trans->trans_cfg = cfg_trans;
+ trans->mac_cfg = mac_cfg;
#ifdef CONFIG_LOCKDEP
lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
@@ -240,7 +287,6 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
#endif
trans->dev = dev;
- trans->num_rx_queues = 1;
INIT_DELAYED_WORK(&trans->restart.wk, iwl_trans_restart_wk);
@@ -251,14 +297,18 @@ int iwl_trans_init(struct iwl_trans *trans)
{
int txcmd_size, txcmd_align;
- if (!trans->trans_cfg->gen2) {
- txcmd_size = sizeof(struct iwl_tx_cmd);
+ /* check if name/num_rx_queues were set as a proxy for info being set */
+ if (WARN_ON(!trans->info.name || !trans->info.num_rxqs))
+ return -EINVAL;
+
+ if (!trans->mac_cfg->gen2) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_v6);
txcmd_align = sizeof(void *);
- } else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
- txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
+ } else if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_v9);
txcmd_align = 64;
} else {
- txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
+ txcmd_size = sizeof(struct iwl_tx_cmd);
txcmd_align = 128;
}
@@ -266,7 +316,7 @@ int iwl_trans_init(struct iwl_trans *trans)
txcmd_size += 36; /* biggest possible 802.11 header */
/* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
- if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
+ if (WARN_ON(trans->mac_cfg->gen2 && txcmd_size >= txcmd_align))
return -EINVAL;
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
@@ -278,9 +328,6 @@ int iwl_trans_init(struct iwl_trans *trans)
if (!trans->dev_cmd_pool)
return -ENOMEM;
- /* Initialize the wait queue for commands */
- init_waitqueue_head(&trans->wait_command_queue);
-
return 0;
}
@@ -298,17 +345,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
test_bit(STATUS_RFKILL_OPMODE, &trans->status)))
return -ERFKILL;
- /*
- * We can't test IWL_MVM_STATUS_IN_D3 in mvm->status because this
- * bit is set early in the D3 flow, before we send all the commands
- * that configure the firmware for D3 operation (power, patterns, ...)
- * and we don't want to flag all those with CMD_SEND_IN_D3.
- * So use the system_pm_mode instead. The only command sent after
- * we set system_pm_mode is D3_CONFIG_CMD, which we now flag with
- * CMD_SEND_IN_D3.
- */
- if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
- !(cmd->flags & CMD_SEND_IN_D3)))
+ if (unlikely(test_bit(STATUS_SUSPENDED, &trans->status)))
return -EHOSTDOWN;
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
@@ -321,7 +358,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (!(cmd->flags & CMD_ASYNC))
lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
- if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id)) {
+ if (trans->conf.wide_cmd_header && !iwl_cmd_groupid(cmd->id)) {
if (cmd->id != REPLY_ERROR)
cmd->id = DEF_ID(cmd->id);
}
@@ -365,11 +402,12 @@ const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id)
grp = iwl_cmd_groupid(id);
cmd = iwl_cmd_opcode(id);
- if (!trans->command_groups || grp >= trans->command_groups_size ||
- !trans->command_groups[grp].arr)
+ if (!trans->conf.command_groups ||
+ grp >= trans->conf.command_groups_size ||
+ !trans->conf.command_groups[grp].arr)
return "UNKNOWN";
- arr = &trans->command_groups[grp];
+ arr = &trans->conf.command_groups[grp];
ret = bsearch(&cmd, arr->arr, arr->size, size, iwl_hcmd_names_cmp);
if (!ret)
return "UNKNOWN";
@@ -377,37 +415,29 @@ const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id)
}
IWL_EXPORT_SYMBOL(iwl_get_cmd_string);
-int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans)
+void iwl_trans_op_mode_enter(struct iwl_trans *trans,
+ struct iwl_op_mode *op_mode)
{
- int i, j;
- const struct iwl_hcmd_arr *arr;
+ trans->op_mode = op_mode;
- for (i = 0; i < trans->command_groups_size; i++) {
- arr = &trans->command_groups[i];
- if (!arr->arr)
- continue;
- for (j = 0; j < arr->size - 1; j++)
- if (arr->arr[j].cmd_id > arr->arr[j + 1].cmd_id)
- return -1;
- }
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted);
+ if (WARN_ON(trans->conf.n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
+ trans->conf.n_no_reclaim_cmds =
+ ARRAY_SIZE(trans->conf.no_reclaim_cmds);
-void iwl_trans_configure(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg)
-{
- trans->op_mode = trans_cfg->op_mode;
+ WARN_ON_ONCE(!trans->conf.rx_mpdu_cmd);
- iwl_trans_pcie_configure(trans, trans_cfg);
- WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
+ iwl_trans_pcie_op_mode_enter(trans);
}
-IWL_EXPORT_SYMBOL(iwl_trans_configure);
+IWL_EXPORT_SYMBOL(iwl_trans_op_mode_enter);
int iwl_trans_start_hw(struct iwl_trans *trans)
{
might_sleep();
+ clear_bit(STATUS_TRANS_RESET_IN_PROGRESS, &trans->status);
+ /* opmode may not resume if it detects errors */
+ clear_bit(STATUS_SUSPENDED, &trans->status);
+
return iwl_trans_pcie_start_hw(trans);
}
IWL_EXPORT_SYMBOL(iwl_trans_start_hw);
@@ -421,6 +451,7 @@ void iwl_trans_op_mode_leave(struct iwl_trans *trans)
cancel_delayed_work_sync(&trans->restart.wk);
trans->op_mode = NULL;
+ memset(&trans->conf, 0, sizeof(trans->conf));
trans->state = IWL_TRANS_NO_FW;
}
@@ -497,18 +528,31 @@ IWL_EXPORT_SYMBOL(iwl_trans_dump_data);
int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
{
+ int err;
+
might_sleep();
- return iwl_trans_pcie_d3_suspend(trans, test, reset);
+ err = iwl_trans_pcie_d3_suspend(trans, test, reset);
+
+ if (!err)
+ set_bit(STATUS_SUSPENDED, &trans->status);
+
+ return err;
}
IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend);
int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
bool test, bool reset)
{
+ int err;
+
might_sleep();
- return iwl_trans_pcie_d3_resume(trans, status, test, reset);
+ err = iwl_trans_pcie_d3_resume(trans, status, test, reset);
+
+ clear_bit(STATUS_SUSPENDED, &trans->status);
+
+ return err;
}
IWL_EXPORT_SYMBOL(iwl_trans_d3_resume);
@@ -558,34 +602,39 @@ iwl_trans_release_nic_access(struct iwl_trans *trans)
}
IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access);
-void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+void iwl_trans_fw_alive(struct iwl_trans *trans)
{
might_sleep();
trans->state = IWL_TRANS_FW_ALIVE;
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
iwl_trans_pcie_gen2_fw_alive(trans);
else
- iwl_trans_pcie_fw_alive(trans, scd_addr);
+ iwl_trans_pcie_fw_alive(trans);
}
IWL_EXPORT_SYMBOL(iwl_trans_fw_alive);
-int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw,
- bool run_in_rfkill)
+int iwl_trans_start_fw(struct iwl_trans *trans, const struct iwl_fw *fw,
+ enum iwl_ucode_type ucode_type, bool run_in_rfkill)
{
+ const struct fw_img *img;
int ret;
might_sleep();
- WARN_ON_ONCE(!trans->rx_mpdu_cmd);
+ img = iwl_get_ucode_image(fw, ucode_type);
+ if (!img)
+ return -EINVAL;
clear_bit(STATUS_FW_ERROR, &trans->status);
- if (trans->trans_cfg->gen2)
- ret = iwl_trans_pcie_gen2_start_fw(trans, fw, run_in_rfkill);
+ if (trans->mac_cfg->gen2)
+ ret = iwl_trans_pcie_gen2_start_fw(trans, fw, img,
+ run_in_rfkill);
else
- ret = iwl_trans_pcie_start_fw(trans, fw, run_in_rfkill);
+ ret = iwl_trans_pcie_start_fw(trans, fw, img,
+ run_in_rfkill);
if (ret == 0)
trans->state = IWL_TRANS_FW_STARTED;
@@ -626,7 +675,7 @@ void iwl_trans_stop_device(struct iwl_trans *trans)
iwl_op_mode_dump_error(trans->op_mode, &mode);
}
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
iwl_trans_pcie_gen2_stop_device(trans);
else
iwl_trans_pcie_stop_device(trans);
@@ -645,7 +694,7 @@ int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
"bad state = %d\n", trans->state))
return -EIO;
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
return iwl_txq_gen2_tx(trans, skb, dev_cmd, queue);
return iwl_trans_pcie_tx(trans, skb, dev_cmd, queue);
@@ -778,14 +827,14 @@ int iwl_trans_load_pnvm(struct iwl_trans *trans,
const struct iwl_pnvm_image *pnvm_data,
const struct iwl_ucode_capabilities *capa)
{
- return iwl_trans_pcie_ctx_info_gen3_load_pnvm(trans, pnvm_data, capa);
+ return iwl_trans_pcie_ctx_info_v2_load_pnvm(trans, pnvm_data, capa);
}
IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm);
void iwl_trans_set_pnvm(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa)
{
- iwl_trans_pcie_ctx_info_gen3_set_pnvm(trans, capa);
+ iwl_trans_pcie_ctx_info_v2_set_pnvm(trans, capa);
}
IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm);
@@ -793,7 +842,7 @@ int iwl_trans_load_reduce_power(struct iwl_trans *trans,
const struct iwl_pnvm_image *payloads,
const struct iwl_ucode_capabilities *capa)
{
- return iwl_trans_pcie_ctx_info_gen3_load_reduce_power(trans, payloads,
+ return iwl_trans_pcie_ctx_info_v2_load_reduce_power(trans, payloads,
capa);
}
IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power);
@@ -801,6 +850,6 @@ IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power);
void iwl_trans_set_reduce_power(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa)
{
- iwl_trans_pcie_ctx_info_gen3_set_reduce_power(trans, capa);
+ iwl_trans_pcie_ctx_info_v2_set_reduce_power(trans, capa);
}
IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index ce4954b0d524..012b1e44bce3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -109,16 +109,12 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* the response. The caller needs to call iwl_free_resp when done.
* @CMD_SEND_IN_RFKILL: Send the command even if the NIC is in RF-kill.
* @CMD_BLOCK_TXQS: Block TXQs while the comment is executing.
- * @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to
- * SUSPEND and RESUME commands. We are in D3 mode when we set
- * trans->system_pm_mode to IWL_PLAT_PM_MODE_D3.
*/
enum CMD_MODE {
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
CMD_SEND_IN_RFKILL = BIT(2),
CMD_BLOCK_TXQS = BIT(3),
- CMD_SEND_IN_D3 = BIT(4),
};
#define CMD_MODE_BITS 5
@@ -304,6 +300,10 @@ enum iwl_d3_status {
* via iwl_trans_finish_sw_reset()
* @STATUS_RESET_PENDING: reset worker was scheduled, but didn't dump
* the firmware state yet
+ * @STATUS_TRANS_RESET_IN_PROGRESS: reset is still in progress, don't
+ * attempt another reset yet
+ * @STATUS_SUSPENDED: device is suspended, don't send commands that
+ * aren't marked accordingly
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@@ -317,6 +317,8 @@ enum iwl_trans_status {
STATUS_SUPPRESS_CMD_ERROR_ONCE,
STATUS_IN_SW_RESET,
STATUS_RESET_PENDING,
+ STATUS_TRANS_RESET_IN_PROGRESS,
+ STATUS_SUSPENDED,
};
static inline int
@@ -328,6 +330,7 @@ iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
case IWL_AMSDU_4K:
return get_order(4 * 1024);
case IWL_AMSDU_8K:
+ return get_order(8 * 1024);
case IWL_AMSDU_12K:
return get_order(16 * 1024);
default:
@@ -387,7 +390,8 @@ struct iwl_dump_sanitize_ops {
/**
* struct iwl_trans_config - transport configuration
*
- * @op_mode: pointer to the upper layer.
+ * These values should be set before iwl_trans_op_mode_enter().
+ *
* @cmd_queue: the index of the command queue.
* Must be set before start_fw.
* @cmd_fifo: the fifo for host commands
@@ -398,8 +402,6 @@ struct iwl_dump_sanitize_ops {
* @n_no_reclaim_cmds: # of commands in list
* @rx_buf_size: RX buffer size needed for A-MSDUs
* if unset 4k will be the RX buffer size
- * @bc_table_dword: set to true if the BC table expects the byte count to be
- * in DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
* @command_groups: array of command groups, each member is an array of the
* commands in the group; for debugging only
@@ -410,17 +412,24 @@ struct iwl_dump_sanitize_ops {
* @queue_alloc_cmd_ver: queue allocation command version, set to 0
* for using the older SCD_QUEUE_CFG, set to the version of
* SCD_QUEUE_CONFIG_CMD otherwise.
+ * @wide_cmd_header: true when ucode supports wide command header format
+ * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
+ * starting the firmware, used for tracing
+ * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
+ * start of the 802.11 header in the @rx_mpdu_cmd
+ * @dsbr_urm_fw_dependent: switch to URM based on fw settings
+ * @dsbr_urm_permanent: switch to URM permanently
+ * @mbx_addr_0_step: step address data 0
+ * @mbx_addr_1_step: step address data 1
+ * @ext_32khz_clock_valid: if true, the external 32 KHz clock can be used
*/
struct iwl_trans_config {
- struct iwl_op_mode *op_mode;
-
u8 cmd_queue;
u8 cmd_fifo;
- const u8 *no_reclaim_cmds;
- unsigned int n_no_reclaim_cmds;
+ u8 n_no_reclaim_cmds;
+ u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
enum iwl_amsdu_size rx_buf_size;
- bool bc_table_dword;
bool scd_set_active;
const struct iwl_hcmd_arr *command_groups;
int command_groups_size;
@@ -428,6 +437,16 @@ struct iwl_trans_config {
u8 cb_data_offs;
bool fw_reset_handshake;
u8 queue_alloc_cmd_ver;
+
+ bool wide_cmd_header;
+ u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
+
+ u8 dsbr_urm_fw_dependent:1,
+ dsbr_urm_permanent:1,
+ ext_32khz_clock_valid:1;
+
+ u32 mbx_addr_0_step;
+ u32 mbx_addr_1_step;
};
struct iwl_trans_dump_data {
@@ -513,23 +532,6 @@ enum iwl_trans_state {
*/
/**
- * enum iwl_plat_pm_mode - platform power management mode
- *
- * This enumeration describes the device's platform power management
- * behavior when in system-wide suspend (i.e WoWLAN).
- *
- * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
- * device. In system-wide suspend mode, it means that the all
- * connections will be closed automatically by mac80211 before
- * the platform is suspended.
- * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
- */
-enum iwl_plat_pm_mode {
- IWL_PLAT_PM_MODE_DISABLED,
- IWL_PLAT_PM_MODE_D3,
-};
-
-/**
* enum iwl_ini_cfg_state
* @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given
* @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded
@@ -819,106 +821,90 @@ struct iwl_txq {
};
/**
+ * struct iwl_trans_info - transport info for outside use
+ * @name: the device name
+ * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
+ * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
+ * @hw_rev: the revision data of the HW
+ * @hw_rev_step: The mac step of the HW
+ * @hw_rf_id: the device RF ID
+ * @hw_cnv_id: the device CNV ID
+ * @hw_crf_id: the device CRF ID
+ * @hw_wfpm_id: the device wfpm ID
+ * @hw_id: the ID of the device / sub-device
+ * Bits 0:15 represent the sub-device ID
+ * Bits 16:31 represent the device ID.
+ * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
+ * only valid for discrete (not integrated) NICs
+ * @num_rxqs: number of RX queues allocated by the transport
+ */
+struct iwl_trans_info {
+ const char *name;
+ u32 max_skb_frags;
+ u32 hw_rev;
+ u32 hw_rev_step;
+ u32 hw_rf_id;
+ u32 hw_crf_id;
+ u32 hw_cnv_id;
+ u32 hw_wfpm_id;
+ u32 hw_id;
+ u8 pcie_link_speed;
+ u8 num_rxqs;
+};
+
+/**
* struct iwl_trans - transport common data
*
* @csme_own: true if we couldn't get ownership on the device
* @op_mode: pointer to the op_mode
- * @trans_cfg: the trans-specific configuration part
+ * @mac_cfg: the trans-specific configuration part
* @cfg: pointer to the configuration
* @drv: pointer to iwl_drv
+ * @conf: configuration set by the opmode before enter
* @state: current device state
* @status: a bit-mask of transport status flags
* @dev: pointer to struct device * that represents the device
- * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
- * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
- * @hw_rf_id: a u32 with the device RF ID
- * @hw_cnv_id: a u32 with the device CNV ID
- * @hw_crf_id: a u32 with the device CRF ID
- * @hw_wfpm_id: a u32 with the device wfpm ID
- * @hw_id: a u32 with the ID of the device / sub-device.
- * Set during transport allocation.
- * @hw_id_str: a string with info about HW ID. Set during transport allocation.
- * @sku_id: the SKU identifier (for PNVM matching)
+ * @info: device information for use by other layers
* @pnvm_loaded: indicates PNVM was loaded
- * @hw_rev: the revision data of the HW
- * @hw_rev_step: The mac step of the HW
* @pm_support: set to true in start_hw if link pm is supported
* @ltr_enabled: set to true if the LTR is enabled
* @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
* @reduce_power_loaded: indicates reduced power section was loaded
* @failed_to_load_reduce_power_image: set to true if pnvm loading failed
- * @command_groups: pointer to command group name list array
- * @command_groups_size: array size of @command_groups
- * @wide_cmd_header: true when ucode supports wide command header format
- * @wait_command_queue: wait queue for sync commands
- * @num_rx_queues: number of RX queues allocated by the transport;
- * the transport must set this before calling iwl_drv_start()
- * @iml_len: the length of the image loader
- * @iml: a pointer to the image loader itself
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
* The user should use iwl_trans_{alloc,free}_tx_cmd.
* @dev_cmd_pool_name: name for the TX command allocation pool
* @dbgfs_dir: iwlwifi debugfs base dir for this device
* @sync_cmd_lockdep_map: lockdep map for checking sync commands
- * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
- * starting the firmware, used for tracing
- * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
- * start of the 802.11 header in the @rx_mpdu_cmd
* @dbg: additional debug data, see &struct iwl_trans_debug
* @init_dram: FW initialization DMA data
- * @system_pm_mode: the system-wide power management mode in use.
- * This mode is set dynamically, depending on the WoWLAN values
- * configured from the userspace at runtime.
- * @name: the device name
- * @mbx_addr_0_step: step address data 0
- * @mbx_addr_1_step: step address data 1
- * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
- * only valid for discrete (not integrated) NICs
- * @invalid_tx_cmd: invalid TX command buffer
* @reduced_cap_sku: reduced capability supported SKU
- * @no_160: device not supporting 160 MHz
* @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
* @restart: restart worker data
* @restart.wk: restart worker
* @restart.mode: reset/restart error mode information
* @restart.during_reset: error occurred during previous software reset
- * @me_recheck_wk: worker to recheck WiAMT/CSME presence
- * @me_present: WiAMT/CSME is detected as present (1), not present (0)
- * or unknown (-1, so can still use it as a boolean safely)
* @trans_specific: data for the specific transport this is allocated for/with
- * @dsbr_urm_fw_dependent: switch to URM based on fw settings
- * @dsbr_urm_permanent: switch to URM permanently
- * @ext_32khz_clock_valid: if true, the external 32 KHz clock can be used
+ * @request_top_reset: TOP reset was requested, used by the reset
+ * worker that should be scheduled (with appropriate reason)
+ * @do_top_reset: indication to the (PCIe) transport/context-info
+ * to do the TOP reset
*/
struct iwl_trans {
bool csme_own;
struct iwl_op_mode *op_mode;
- const struct iwl_cfg_trans_params *trans_cfg;
- const struct iwl_cfg *cfg;
+ const struct iwl_mac_cfg *mac_cfg;
+ const struct iwl_rf_cfg *cfg;
struct iwl_drv *drv;
+ struct iwl_trans_config conf;
enum iwl_trans_state state;
unsigned long status;
struct device *dev;
- u32 max_skb_frags;
- u32 hw_rev;
- u32 hw_rev_step;
- u32 hw_rf_id;
- u32 hw_crf_id;
- u32 hw_cnv_id;
- u32 hw_wfpm_id;
- u32 hw_id;
- char hw_id_str[52];
- u32 sku_id[3];
- bool reduced_cap_sku;
- u8 no_160:1, step_urm:1;
-
- u8 dsbr_urm_fw_dependent:1,
- dsbr_urm_permanent:1;
- bool ext_32khz_clock_valid;
-
- u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
+ const struct iwl_trans_info info;
+ bool reduced_cap_sku;
+ bool step_urm;
bool pm_support;
bool ltr_enabled;
@@ -927,16 +913,6 @@ struct iwl_trans {
u8 reduce_power_loaded:1;
u8 failed_to_load_reduce_power_image:1;
- const struct iwl_hcmd_arr *command_groups;
- int command_groups_size;
- bool wide_cmd_header;
-
- wait_queue_head_t wait_command_queue;
- u8 num_rx_queues;
-
- size_t iml_len;
- u8 *iml;
-
/* The following fields are internal only */
struct kmem_cache *dev_cmd_pool;
char dev_cmd_pool_name[50];
@@ -950,24 +926,14 @@ struct iwl_trans {
struct iwl_trans_debug dbg;
struct iwl_self_init_dram init_dram;
- enum iwl_plat_pm_mode system_pm_mode;
-
- const char *name;
- u32 mbx_addr_0_step;
- u32 mbx_addr_1_step;
-
- u8 pcie_link_speed;
-
- struct iwl_dma_ptr invalid_tx_cmd;
-
struct {
struct delayed_work wk;
struct iwl_fw_error_dump_mode mode;
bool during_reset;
} restart;
- struct delayed_work me_recheck_wk;
- s8 me_present;
+ u8 request_top_reset:1,
+ do_top_reset:1;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
@@ -975,19 +941,18 @@ struct iwl_trans {
};
const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
-int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
-void iwl_trans_configure(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg);
+void iwl_trans_op_mode_enter(struct iwl_trans *trans,
+ struct iwl_op_mode *op_mode);
int iwl_trans_start_hw(struct iwl_trans *trans);
void iwl_trans_op_mode_leave(struct iwl_trans *trans);
-void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+void iwl_trans_fw_alive(struct iwl_trans *trans);
-int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw,
- bool run_in_rfkill);
+int iwl_trans_start_fw(struct iwl_trans *trans, const struct iwl_fw *fw,
+ enum iwl_ucode_type ucode_type, bool run_in_rfkill);
void iwl_trans_stop_device(struct iwl_trans *trans);
@@ -1150,6 +1115,9 @@ static inline void iwl_trans_schedule_reset(struct iwl_trans *trans,
{
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return;
+ /* clear this on device init, not cleared on any unbind/reprobe */
+ if (test_and_set_bit(STATUS_TRANS_RESET_IN_PROGRESS, &trans->status))
+ return;
trans->restart.mode.type = type;
trans->restart.mode.context = IWL_ERR_CONTEXT_WORKER;
@@ -1187,6 +1155,9 @@ static inline void iwl_trans_opmode_sw_reset(struct iwl_trans *trans,
set_bit(STATUS_IN_SW_RESET, &trans->status);
+ if (WARN_ON(type == IWL_ERR_TYPE_TOP_RESET_BY_BT))
+ return;
+
if (!trans->op_mode->ops->sw_reset ||
!trans->op_mode->ops->sw_reset(trans->op_mode, type))
clear_bit(STATUS_IN_SW_RESET, &trans->status);
@@ -1234,7 +1205,7 @@ static inline void iwl_trans_finish_sw_reset(struct iwl_trans *trans)
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_cfg_trans_params *cfg_trans);
+ const struct iwl_mac_cfg *cfg_trans);
int iwl_trans_init(struct iwl_trans *trans);
void iwl_trans_free(struct iwl_trans *trans);
@@ -1245,6 +1216,19 @@ static inline bool iwl_trans_is_hw_error_value(u32 val)
void iwl_trans_free_restart_list(void);
+static inline u16 iwl_trans_get_num_rbds(struct iwl_trans *trans)
+{
+ u16 result = trans->cfg->num_rbds;
+
+ /*
+ * Since AX210 family (So/Ty) the device cannot put mutliple
+ * frames into the same buffer, so double the value for them.
+ */
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return 2 * result;
+ return result;
+}
+
/*****************************************************
* PCIe handling
*****************************************************/
@@ -1256,6 +1240,8 @@ enum iwl_reset_mode {
/* upper level modes: */
IWL_RESET_MODE_SW_RESET,
IWL_RESET_MODE_REPROBE,
+ /* TOP reset doesn't require PCIe remove */
+ IWL_RESET_MODE_TOP_RESET,
/* PCIE level modes: */
IWL_RESET_MODE_REMOVE_ONLY,
IWL_RESET_MODE_RESCAN,
@@ -1272,4 +1258,19 @@ void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
+/* Internal helper */
+static inline void iwl_trans_set_info(struct iwl_trans *trans,
+ struct iwl_trans_info *info)
+{
+ struct iwl_trans_info *write;
+
+ write = (void *)(uintptr_t)&trans->info;
+ *write = *info;
+}
+
+static inline u16 iwl_trans_get_device_id(struct iwl_trans *trans)
+{
+ return u32_get_bits(trans->info.hw_id, GENMASK(31, 16));
+}
+
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-utils.c b/drivers/net/wireless/intel/iwlwifi/iwl-utils.c
index b14ec98e28b6..c5b49851e4b9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-utils.c
@@ -4,7 +4,6 @@
*/
#include <net/gso.h>
#include <linux/ieee80211.h>
-#include <net/gso.h>
#include <net/ip.h>
#include "iwl-drv.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/agg.c b/drivers/net/wireless/intel/iwlwifi/mld/agg.c
index 687a9450ac98..6b349270481d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/agg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/agg.c
@@ -124,10 +124,12 @@ void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld,
rcu_read_lock();
baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
- if (IWL_FW_CHECK(mld, !baid_data,
- "Got valid BAID %d but not allocated, invalid BAR release!\n",
- baid))
+ if (!baid_data) {
+ IWL_DEBUG_HT(mld,
+ "Got valid BAID %d but not allocated\n",
+ baid);
goto out_unlock;
+ }
if (IWL_FW_CHECK(mld, tid != baid_data->tid ||
sta_id > mld->fw->ucode_capa.num_stations ||
@@ -315,7 +317,7 @@ EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_reorder);
static void iwl_mld_rx_agg_session_expired(struct timer_list *t)
{
struct iwl_mld_baid_data *data =
- from_timer(data, t, session_timer);
+ timer_container_of(data, t, session_timer);
struct iwl_mld_baid_data __rcu **rcu_ptr = data->rcu_ptr;
struct iwl_mld_baid_data *ba_data;
struct ieee80211_link_sta *link_sta;
@@ -444,7 +446,7 @@ static void iwl_mld_init_reorder_buffer(struct iwl_mld *mld,
struct iwl_mld_baid_data *data,
u16 ssn)
{
- for (int i = 0; i < mld->trans->num_rx_queues; i++) {
+ for (int i = 0; i < mld->trans->info.num_rxqs; i++) {
struct iwl_mld_reorder_buffer *reorder_buf =
&data->reorder_buf[i];
struct iwl_mld_reorder_buf_entry *entries =
@@ -468,7 +470,7 @@ static void iwl_mld_free_reorder_buffer(struct iwl_mld *mld,
iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_NOTIF_DEL_BA,
&delba_data, sizeof(delba_data));
- for (int i = 0; i < mld->trans->num_rx_queues; i++) {
+ for (int i = 0; i < mld->trans->info.num_rxqs; i++) {
struct iwl_mld_reorder_buffer *reorder_buf =
&data->reorder_buf[i];
struct iwl_mld_reorder_buf_entry *entries =
@@ -530,7 +532,7 @@ int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta,
* before starting the BA session in the firmware
*/
baid_data = kzalloc(sizeof(*baid_data) +
- mld->trans->num_rx_queues * reorder_buf_size,
+ mld->trans->info.num_rxqs * reorder_buf_size,
GFP_KERNEL);
if (!baid_data)
return -ENOMEM;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ap.c b/drivers/net/wireless/intel/iwlwifi/mld/ap.c
index 571eabd0b511..26511b49d89a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/ap.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ap.c
@@ -11,6 +11,7 @@
#include "tx.h"
#include "power.h"
#include "key.h"
+#include "phy.h"
#include "iwl-utils.h"
#include "fw/api/sta.h"
@@ -269,6 +270,7 @@ int iwl_mld_start_ap_ibss(struct ieee80211_hw *hw,
{
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct ieee80211_chanctx_conf *ctx;
int ret;
if (vif->type == NL80211_IFTYPE_AP)
@@ -314,6 +316,13 @@ int iwl_mld_start_ap_ibss(struct ieee80211_hw *hw,
return iwl_mld_mac_fw_action(mld, mld->p2p_device_vif,
FW_CTXT_ACTION_MODIFY);
+ /* When the channel context was added, the link is not yet active, so
+ * min_def is always used. Update the PHY again here in case def should
+ * actually be used.
+ */
+ ctx = wiphy_dereference(mld->wiphy, link->chanctx_conf);
+ iwl_mld_update_phy_chandef(mld, ctx);
+
return 0;
rm_bcast:
iwl_mld_remove_bcast_sta(mld, vif, link);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/coex.c b/drivers/net/wireless/intel/iwlwifi/mld/coex.c
index 5f262bd43f21..32c727b3b391 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/coex.c
@@ -24,17 +24,13 @@ int iwl_mld_send_bt_init_conf(struct iwl_mld *mld)
void iwl_mld_handle_bt_coex_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
- const struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
+ const struct iwl_bt_coex_profile_notif *notif = (const void *)pkt->data;
const struct iwl_bt_coex_profile_notif zero_notif = {};
/* zeroed structure means that BT is OFF */
bool bt_is_active = memcmp(notif, &zero_notif, sizeof(*notif));
- if (bt_is_active == mld->bt_is_active)
- return;
-
+ mld->last_bt_notif = *notif;
IWL_DEBUG_INFO(mld, "BT was turned %s\n", bt_is_active ? "ON" : "OFF");
- mld->bt_is_active = bt_is_active;
-
iwl_mld_emlsr_check_bt(mld);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
index ee99298eebf5..c776543cbba5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
@@ -774,7 +774,7 @@ iwl_mld_update_ptk_rx_seq(struct iwl_mld *mld,
return;
for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
- for (int i = 1; i < mld->trans->num_rx_queues; i++)
+ for (int i = 1; i < mld->trans->info.num_rxqs; i++)
memcpy(mld_ptk_pn->q[i].pn[tid],
wowlan_status->ptk.aes_seq[tid].ccmp.pn,
IEEE80211_CCMP_PN_LEN);
@@ -1347,7 +1347,8 @@ int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld)
if (ret) {
IWL_ERR(mld, "d3 suspend: trans_d3_suspend failed %d\n", ret);
} else {
- mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+ /* Async notification might send hcmds, which is not allowed in suspend */
+ iwl_mld_cancel_async_notifications(mld);
mld->fw_status.in_d3 = true;
}
@@ -1372,7 +1373,6 @@ int iwl_mld_no_wowlan_resume(struct iwl_mld *mld)
IWL_DEBUG_WOWLAN(mld, "Starting the no wowlan resume flow\n");
- mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
mld->fw_status.in_d3 = false;
iwl_fw_dbg_read_d3_debug_data(&mld->fwrt);
@@ -1437,7 +1437,7 @@ iwl_mld_suspend_set_ucast_pn(struct iwl_mld *mld, struct ieee80211_sta *sta,
ieee80211_get_key_rx_seq(key, tid, &seq);
/* and use the internal data for all queues */
- for (int que = 1; que < mld->trans->num_rx_queues; que++) {
+ for (int que = 1; que < mld->trans->info.num_rxqs; que++) {
u8 *cur_pn = mld_ptk_pn->q[que].pn[tid];
if (memcmp(max_pn, cur_pn, IEEE80211_CCMP_PN_LEN) < 0)
@@ -1757,7 +1757,7 @@ iwl_mld_send_proto_offload(struct iwl_mld *mld,
addrconf_addr_solict_mult(&wowlan_data->target_ipv6_addrs[i],
&solicited_addr);
- for (j = 0; j < c; j++)
+ for (j = 0; j < n_nsc && j < c; j++)
if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
&solicited_addr) == 0)
break;
@@ -1903,7 +1903,6 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld)
IWL_DEBUG_WOWLAN(mld, "Starting the wowlan resume flow\n");
- mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
if (!mld->fw_status.in_d3) {
IWL_DEBUG_WOWLAN(mld,
"Device_powered_off() was called during wowlan\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
index 93f9f78e4276..352da8aa7898 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
@@ -546,6 +546,9 @@ iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir)
#endif
MLD_DEBUGFS_ADD_FILE(inject_packet, debugfs_dir, 0200);
+ debugfs_create_bool("rx_ts_ptp", 0600, debugfs_dir,
+ &mld->monitor.ptp_time);
+
/* Create a symlink with mac80211. It will be removed when mac80211
* exits (before the opmode exits which removes the target.)
*/
@@ -997,8 +1000,8 @@ void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw,
mld_link_dir = debugfs_create_dir("iwlmld", dir);
}
-static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
- size_t count, void *data)
+static ssize_t _iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
+ size_t count, void *data, bool v3)
{
struct ieee80211_link_sta *link_sta = data;
struct iwl_mld_link_sta *mld_link_sta;
@@ -1020,6 +1023,10 @@ static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return -EIO;
+ /* input is in FW format (v2 or v3) so convert to v3 */
+ rate = iwl_v3_rate_from_v2_v3(cpu_to_le32(rate), v3);
+ rate = le32_to_cpu(iwl_v3_rate_to_v2_v3(rate, mld->fw_rates_ver_3));
+
ret = iwl_mld_send_tlc_dhc(mld, fw_sta_id,
partial ? IWL_TLC_DEBUG_PARTIAL_FIXED_RATE :
IWL_TLC_DEBUG_FIXED_RATE,
@@ -1033,6 +1040,18 @@ static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
return ret ? : count;
}
+static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
+ size_t count, void *data)
+{
+ return _iwl_dbgfs_fixed_rate_write(mld, buf, count, data, false);
+}
+
+static ssize_t iwl_dbgfs_fixed_rate_v3_write(struct iwl_mld *mld, char *buf,
+ size_t count, void *data)
+{
+ return _iwl_dbgfs_fixed_rate_write(mld, buf, count, data, true);
+}
+
static ssize_t iwl_dbgfs_tlc_dhc_write(struct iwl_mld *mld, char *buf,
size_t count, void *data)
{
@@ -1072,6 +1091,7 @@ static ssize_t iwl_dbgfs_tlc_dhc_write(struct iwl_mld *mld, char *buf,
LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(tlc_dhc, 64);
LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(fixed_rate, 64);
+LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(fixed_rate_v3, 64);
void iwl_mld_add_link_sta_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -1079,5 +1099,6 @@ void iwl_mld_add_link_sta_debugfs(struct ieee80211_hw *hw,
struct dentry *dir)
{
LINK_STA_DEBUGFS_ADD_FILE(fixed_rate, dir, 0200);
+ LINK_STA_DEBUGFS_ADD_FILE(fixed_rate_v3, dir, 0200);
LINK_STA_DEBUGFS_ADD_FILE(tlc_dhc, dir, 0200);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/fw.c b/drivers/net/wireless/intel/iwlwifi/mld/fw.c
index 4b083d447ee2..9d2c087360e7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/fw.c
@@ -8,10 +8,10 @@
#include "fw/api/alive.h"
#include "fw/api/scan.h"
#include "fw/api/rx.h"
+#include "phy.h"
#include "fw/dbg.h"
#include "fw/pnvm.h"
#include "hcmd.h"
-#include "iwl-nvm-parse.h"
#include "power.h"
#include "mcc.h"
#include "led.h"
@@ -49,7 +49,7 @@ static int iwl_mld_send_rss_cfg_cmd(struct iwl_mld *mld)
/* Do not direct RSS traffic to Q 0 which is our fallback queue */
for (int i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
cmd.indirection_table[i] =
- 1 + (i % (mld->trans->num_rx_queues - 1));
+ 1 + (i % (mld->trans->info.num_rxqs - 1));
netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
return iwl_mld_send_cmd_pdu(mld, RSS_CONFIG_CMD, &cmd);
@@ -99,17 +99,23 @@ static void iwl_mld_alive_imr_data(struct iwl_trans *trans,
}
}
+struct iwl_mld_alive_data {
+ __le32 sku_id[3];
+ bool valid;
+};
+
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+ unsigned int expected_sz;
struct iwl_mld *mld =
container_of(notif_wait, struct iwl_mld, notif_wait);
struct iwl_trans *trans = mld->trans;
u32 version = iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP,
UCODE_ALIVE_NTFY, 0);
- struct iwl_alive_ntf_v6 *palive;
- bool *alive_valid = data;
+ struct iwl_mld_alive_data *alive_data = data;
+ struct iwl_alive_ntf *palive;
struct iwl_umac_alive *umac;
struct iwl_lmac_alive *lmac1;
struct iwl_lmac_alive *lmac2 = NULL;
@@ -117,7 +123,19 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
u32 umac_error_table;
u16 status;
- if (version < 6 || version > 7 || pkt_len != sizeof(*palive))
+ switch (version) {
+ case 6:
+ case 7:
+ expected_sz = sizeof(struct iwl_alive_ntf_v6);
+ break;
+ case 8:
+ expected_sz = sizeof(struct iwl_alive_ntf);
+ break;
+ default:
+ return false;
+ }
+
+ if (pkt_len != expected_sz)
return false;
palive = (void *)pkt->data;
@@ -129,12 +147,15 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
lmac2 = &palive->lmac_data[1];
status = le16_to_cpu(palive->status);
- trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]);
- trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]);
- trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]);
+ BUILD_BUG_ON(sizeof(alive_data->sku_id) !=
+ sizeof(palive->sku_id.data));
+ memcpy(alive_data->sku_id, palive->sku_id.data,
+ sizeof(palive->sku_id.data));
IWL_DEBUG_FW(mld, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
- trans->sku_id[0], trans->sku_id[1], trans->sku_id[2]);
+ le32_to_cpu(alive_data->sku_id[0]),
+ le32_to_cpu(alive_data->sku_id[1]),
+ le32_to_cpu(alive_data->sku_id[2]));
lmac_error_event_table =
le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
@@ -147,13 +168,13 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) &
~FW_ADDR_CACHE_CONTROL;
- if (umac_error_table >= trans->cfg->min_umac_error_event_table)
+ if (umac_error_table >= trans->mac_cfg->base->min_umac_error_event_table)
iwl_fw_umac_set_alive_err_table(trans, umac_error_table);
else
IWL_ERR(mld, "Not valid error log pointer 0x%08X\n",
umac_error_table);
- *alive_valid = status == IWL_ALIVE_STATUS_OK;
+ alive_data->valid = status == IWL_ALIVE_STATUS_OK;
IWL_DEBUG_FW(mld,
"Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
@@ -171,6 +192,10 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
IWL_DEBUG_FW(mld, "FW alive flags 0x%x\n",
le16_to_cpu(palive->flags));
+ if (version >= 8)
+ IWL_DEBUG_FW(mld, "platform_id 0x%llx\n",
+ le64_to_cpu(palive->platform_id));
+
iwl_fwrt_update_fw_versions(&mld->fwrt, lmac1, umac);
return true;
@@ -208,24 +233,22 @@ static void iwl_mld_print_alive_notif_timeout(struct iwl_mld *mld)
pc_data->pc_address);
}
-static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld)
+static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld,
+ struct iwl_mld_alive_data *alive_data)
{
- const struct fw_img *fw =
- iwl_get_ucode_image(mld->fw, IWL_UCODE_REGULAR);
static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
struct iwl_notification_wait alive_wait;
- bool alive_valid = false;
int ret;
lockdep_assert_wiphy(mld->wiphy);
iwl_init_notification_wait(&mld->notif_wait, &alive_wait,
alive_cmd, ARRAY_SIZE(alive_cmd),
- iwl_alive_fn, &alive_valid);
+ iwl_alive_fn, alive_data);
iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
- ret = iwl_trans_start_fw(mld->trans, fw, true);
+ ret = iwl_trans_start_fw(mld->trans, mld->fw, IWL_UCODE_REGULAR, true);
if (ret) {
iwl_remove_notification(&mld->notif_wait, &alive_wait);
return ret;
@@ -239,28 +262,26 @@ static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld)
iwl_fw_dbg_error_collect(&mld->fwrt,
FW_DBG_TRIGGER_ALIVE_TIMEOUT);
iwl_mld_print_alive_notif_timeout(mld);
- goto alive_failure;
+ return ret;
}
- if (!alive_valid) {
+ if (!alive_data->valid) {
IWL_ERR(mld, "Loaded firmware is not valid!\n");
- ret = -EIO;
- goto alive_failure;
+ return -EIO;
}
- iwl_trans_fw_alive(mld->trans, 0);
+ iwl_trans_fw_alive(mld->trans);
return 0;
-
-alive_failure:
- iwl_trans_stop_device(mld->trans);
- return ret;
}
static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld)
{
struct iwl_notification_wait init_wait;
- struct iwl_init_extended_cfg_cmd init_cfg = {};
+ struct iwl_init_extended_cfg_cmd init_cfg = {
+ .init_flags = cpu_to_le32(BIT(IWL_INIT_PHY)),
+ };
+ struct iwl_mld_alive_data alive_data = {};
static const u16 init_complete[] = {
INIT_COMPLETE_NOTIF,
};
@@ -268,19 +289,15 @@ static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld)
lockdep_assert_wiphy(mld->wiphy);
- ret = iwl_mld_load_fw_wait_alive(mld);
+ ret = iwl_mld_load_fw_wait_alive(mld, &alive_data);
if (ret)
return ret;
- mld->trans->step_urm =
- !!(iwl_read_umac_prph(mld->trans, CNVI_PMU_STEP_FLOW) &
- CNVI_PMU_STEP_FLOW_FORCE_URM);
-
ret = iwl_pnvm_load(mld->trans, &mld->notif_wait,
- &mld->fw->ucode_capa);
+ &mld->fw->ucode_capa, alive_data.sku_id);
if (ret) {
IWL_ERR(mld, "Timeout waiting for PNVM load %d\n", ret);
- goto init_failure;
+ return ret;
}
iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
@@ -298,31 +315,24 @@ static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld)
if (ret) {
IWL_ERR(mld, "Failed to send init config command: %d\n", ret);
iwl_remove_notification(&mld->notif_wait, &init_wait);
- goto init_failure;
+ return ret;
+ }
+
+ ret = iwl_mld_send_phy_cfg_cmd(mld);
+ if (ret) {
+ IWL_ERR(mld, "Failed to send PHY config command: %d\n", ret);
+ iwl_remove_notification(&mld->notif_wait, &init_wait);
+ return ret;
}
ret = iwl_wait_notification(&mld->notif_wait, &init_wait,
MLD_INIT_COMPLETE_TIMEOUT);
if (ret) {
IWL_ERR(mld, "Failed to get INIT_COMPLETE %d\n", ret);
- goto init_failure;
- }
-
- if (!mld->nvm_data) {
- mld->nvm_data = iwl_get_nvm(mld->trans, mld->fw, 0, 0);
- if (IS_ERR(mld->nvm_data)) {
- ret = PTR_ERR(mld->nvm_data);
- mld->nvm_data = NULL;
- IWL_ERR(mld, "Failed to read NVM: %d\n", ret);
- goto init_failure;
- }
+ return ret;
}
return 0;
-
-init_failure:
- iwl_trans_stop_device(mld->trans);
- return ret;
}
int iwl_mld_load_fw(struct iwl_mld *mld)
@@ -333,16 +343,12 @@ int iwl_mld_load_fw(struct iwl_mld *mld)
ret = iwl_trans_start_hw(mld->trans);
if (ret)
- goto err;
+ return ret;
ret = iwl_mld_run_fw_init_sequence(mld);
if (ret)
goto err;
- ret = iwl_mld_init_mcc(mld);
- if (ret)
- goto err;
-
mld->fw_status.running = true;
return 0;
@@ -361,9 +367,10 @@ void iwl_mld_stop_fw(struct iwl_mld *mld)
iwl_trans_stop_device(mld->trans);
- wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk);
-
- iwl_mld_purge_async_handlers_list(mld);
+ /* HW is stopped, no more coming RX. Cancel all notifications in
+ * case they were sent just before stopping the HW.
+ */
+ iwl_mld_cancel_async_notifications(mld);
mld->fw_status.running = false;
}
@@ -526,7 +533,7 @@ int iwl_mld_start_fw(struct iwl_mld *mld)
ret = iwl_mld_load_fw(mld);
if (IWL_FW_CHECK(mld, ret, "Failed to start firmware %d\n", ret)) {
iwl_fw_dbg_error_collect(&mld->fwrt, FW_DBG_TRIGGER_DRIVER);
- goto error;
+ return ret;
}
IWL_DEBUG_INFO(mld, "uCode started.\n");
@@ -535,6 +542,10 @@ int iwl_mld_start_fw(struct iwl_mld *mld)
if (ret)
goto error;
+ ret = iwl_mld_init_mcc(mld);
+ if (ret)
+ goto error;
+
return 0;
error:
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.c b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
index e49e2260ac05..235b55e0fe59 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/iface.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
@@ -22,9 +22,17 @@ void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
struct iwl_mld *mld = mld_vif->mld;
struct iwl_mld_link *link;
+ mld_vif->emlsr.blocked_reasons &= ~IWL_MLD_EMLSR_BLOCKED_ROC;
+
+ if (mld_vif->aux_sta.sta_id != IWL_INVALID_STA)
+ iwl_mld_free_internal_sta(mld, &mld_vif->aux_sta);
+
/* EMLSR is turned back on during recovery */
vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
+ if (mld_vif->roc_activity != ROC_NUM_ACTIVITIES)
+ ieee80211_remain_on_channel_expired(mld->hw);
+
mld_vif->roc_activity = ROC_NUM_ACTIVITIES;
for_each_mld_vif_valid_link(mld_vif, link) {
@@ -103,6 +111,24 @@ static bool iwl_mld_is_nic_ack_enabled(struct iwl_mld *mld,
IEEE80211_HE_MAC_CAP2_ACK_EN);
}
+static void iwl_mld_set_he_support(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_config_cmd *cmd,
+ int cmd_ver)
+{
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.he_ap_support = cpu_to_le16(1);
+ else
+ cmd->wifi_gen.he_ap_support = 1;
+ } else {
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.he_support = cpu_to_le16(1);
+ else
+ cmd->wifi_gen.he_support = 1;
+ }
+}
+
/* fill the common part for all interface types */
static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
struct ieee80211_vif *vif,
@@ -112,6 +138,9 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct ieee80211_bss_conf *link_conf;
unsigned int link_id;
+ int cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw,
+ WIDE_ID(MAC_CONF_GROUP,
+ MAC_CONFIG_CMD), 0);
lockdep_assert_wiphy(mld->wiphy);
@@ -138,12 +167,11 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
* and enable both when we have MLO.
*/
if (ieee80211_vif_is_mld(vif)) {
- if (vif->type == NL80211_IFTYPE_AP)
- cmd->he_ap_support = cpu_to_le16(1);
+ iwl_mld_set_he_support(mld, vif, cmd, cmd_ver);
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.eht_support = cpu_to_le32(1);
else
- cmd->he_support = cpu_to_le16(1);
-
- cmd->eht_support = cpu_to_le32(1);
+ cmd->wifi_gen.eht_support = 1;
return;
}
@@ -151,10 +179,7 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
if (!link_conf->he_support)
continue;
- if (vif->type == NL80211_IFTYPE_AP)
- cmd->he_ap_support = cpu_to_le16(1);
- else
- cmd->he_support = cpu_to_le16(1);
+ iwl_mld_set_he_support(mld, vif, cmd, cmd_ver);
/* EHT, if supported, was already set above */
break;
@@ -226,11 +251,6 @@ static void iwl_mld_fill_mac_cmd_sta(struct iwl_mld *mld,
if (vif->probe_req_reg && vif->cfg.assoc && vif->p2p)
cmd->filter_flags |=
cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
-
- if (vif->p2p)
- cmd->client.ctwin =
- cpu_to_le32(vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
- IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
}
static void iwl_mld_fill_mac_cmd_ap(struct iwl_mld *mld,
@@ -393,6 +413,7 @@ iwl_mld_init_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
wiphy_delayed_work_init(&mld_vif->emlsr.tmp_non_bss_done_wk,
iwl_mld_emlsr_tmp_non_bss_done_wk);
}
+ iwl_mld_init_internal_sta(&mld_vif->aux_sta);
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.h b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
index ec14d0736cee..49e2ce65557d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/iface.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
@@ -10,6 +10,7 @@
#include "link.h"
#include "session-protect.h"
#include "d3.h"
+#include "fw/api/time-event.h"
enum iwl_mld_cca_40mhz_wa_status {
CCA_40_MHZ_WA_NONE,
@@ -59,6 +60,7 @@ enum iwl_mld_emlsr_blocked {
* loaded enough to justify EMLSR.
* @IWL_MLD_EMLSR_EXIT_RFI: Exit EMLSR due to RFI
* @IWL_MLD_EMLSR_EXIT_FW_REQUEST: Exit EMLSR because the FW requested it
+ * @IWL_MLD_EMLSR_EXIT_INVALID: internal exit reason due to invalid data
*/
enum iwl_mld_emlsr_exit {
IWL_MLD_EMLSR_EXIT_BLOCK = 0x1,
@@ -72,6 +74,7 @@ enum iwl_mld_emlsr_exit {
IWL_MLD_EMLSR_EXIT_CHAN_LOAD = 0x100,
IWL_MLD_EMLSR_EXIT_RFI = 0x200,
IWL_MLD_EMLSR_EXIT_FW_REQUEST = 0x400,
+ IWL_MLD_EMLSR_EXIT_INVALID = 0x800,
};
/**
@@ -123,8 +126,6 @@ struct iwl_mld_emlsr {
* Only valid for STA. (FIXME: needs to be per link)
* @num_associated_stas: number of associated STAs. Relevant only for AP mode.
* @ap_ibss_active: whether the AP/IBSS was started
- * @roc_activity: the id of the roc_activity running. Relevant for p2p device
- * only. Set to %ROC_NUM_ACTIVITIES when not in use.
* @cca_40mhz_workaround: When we are connected in 2.4 GHz and 40 MHz, and the
* environment is too loaded, we work around this by reconnecting to the
* same AP with 20 MHz. This manages the status of the workaround.
@@ -140,6 +141,9 @@ struct iwl_mld_emlsr {
* @use_ps_poll: use ps_poll frames
* @disable_bf: disable beacon filter
* @dbgfs_slink: debugfs symlink for this interface
+ * @roc_activity: the id of the roc_activity running. Relevant for STA and
+ * p2p device only. Set to %ROC_NUM_ACTIVITIES when not in use.
+ * @aux_sta: station used for remain on channel. Used in P2P device.
*/
struct iwl_mld_vif {
/* Add here fields that need clean up on restart */
@@ -151,7 +155,6 @@ struct iwl_mld_vif {
struct ieee80211_key_conf __rcu *bigtks[2];
u8 num_associated_stas;
bool ap_ibss_active;
- u32 roc_activity;
enum iwl_mld_cca_40mhz_wa_status cca_40mhz_workaround;
#ifdef CONFIG_IWLWIFI_DEBUGFS
bool beacon_inject_active;
@@ -174,6 +177,8 @@ struct iwl_mld_vif {
bool disable_bf;
struct dentry *dbgfs_slink;
#endif
+ enum iwl_roc_activity roc_activity;
+ struct iwl_mld_int_sta aux_sta;
};
static inline struct iwl_mld_vif *
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c
index 82a4979a3af3..d0f56189ad3f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c
@@ -580,7 +580,7 @@ iwl_mld_get_omi_bw_reduction_pointers(struct iwl_mld *mld,
*link_sta = NULL;
- if (mld->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_SC)
+ if (mld->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC)
return NULL;
vif = iwl_mld_get_bss_vif(mld);
@@ -782,10 +782,11 @@ iwl_mld_init_link(struct iwl_mld *mld, struct ieee80211_bss_conf *link,
iwl_mld_init_internal_sta(&mld_link->bcast_sta);
iwl_mld_init_internal_sta(&mld_link->mcast_sta);
- iwl_mld_init_internal_sta(&mld_link->aux_sta);
+ iwl_mld_init_internal_sta(&mld_link->mon_sta);
- wiphy_delayed_work_init(&mld_link->rx_omi.finished_work,
- iwl_mld_omi_bw_finished_work);
+ if (!mld->fw_status.in_hw_restart)
+ wiphy_delayed_work_init(&mld_link->rx_omi.finished_work,
+ iwl_mld_omi_bw_finished_work);
return iwl_mld_allocate_link_fw_id(mld, &mld_link->fw_id, link);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.h b/drivers/net/wireless/intel/iwlwifi/mld/link.h
index 42b7bdcbd741..39f04aae5579 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/link.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.h
@@ -39,7 +39,7 @@ struct iwl_probe_resp_data {
* @vif: the vif this link belongs to
* @bcast_sta: station used for broadcast packets. Used in AP, GO and IBSS.
* @mcast_sta: station used for multicast packets. Used in AP, GO and IBSS.
- * @aux_sta: station used for remain on channel. Used in P2P device.
+ * @mon_sta: station used for TX injection in monitor interface.
* @link_id: over the air link ID
* @ap_early_keys: The firmware cannot install keys before bcast/mcast STAs,
* but higher layers work differently, so we store the keys here for
@@ -72,7 +72,7 @@ struct iwl_mld_link {
struct ieee80211_vif *vif;
struct iwl_mld_int_sta bcast_sta;
struct iwl_mld_int_sta mcast_sta;
- struct iwl_mld_int_sta aux_sta;
+ struct iwl_mld_int_sta mon_sta;
u8 link_id;
struct {
@@ -89,7 +89,7 @@ struct iwl_mld_link {
struct iwl_probe_resp_data __rcu *probe_resp_data;
};
-/* Cleanup function for struct iwl_mld_phy, will be called in restart */
+/* Cleanup function for struct iwl_mld_link, will be called in restart */
static inline void
iwl_mld_cleanup_link(struct iwl_mld *mld, struct iwl_mld_link *link)
{
@@ -105,8 +105,8 @@ iwl_mld_cleanup_link(struct iwl_mld *mld, struct iwl_mld_link *link)
iwl_mld_free_internal_sta(mld, &link->bcast_sta);
if (link->mcast_sta.sta_id != IWL_INVALID_STA)
iwl_mld_free_internal_sta(mld, &link->mcast_sta);
- if (link->aux_sta.sta_id != IWL_INVALID_STA)
- iwl_mld_free_internal_sta(mld, &link->aux_sta);
+ if (link->mon_sta.sta_id != IWL_INVALID_STA)
+ iwl_mld_free_internal_sta(mld, &link->mon_sta);
}
/* Convert a percentage from [0,100] to [0,255] */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
index a4a612afb3b3..f7faa87b8ba6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
@@ -21,7 +21,7 @@ static bool iwl_mld_calc_low_latency(struct iwl_mld *mld,
{
struct iwl_mld_low_latency *ll = &mld->low_latency;
bool global_low_latency = false;
- u8 num_rx_q = mld->trans->num_rx_queues;
+ u8 num_rx_q = mld->trans->info.num_rxqs;
for (int mac_id = 0; mac_id < NUM_MAC_INDEX_DRIVER; mac_id++) {
u32 total_vo_vi_pkts = 0;
@@ -131,12 +131,12 @@ int iwl_mld_low_latency_init(struct iwl_mld *mld)
struct iwl_mld_low_latency *ll = &mld->low_latency;
unsigned long ts = jiffies;
- ll->pkts_counters = kcalloc(mld->trans->num_rx_queues,
+ ll->pkts_counters = kcalloc(mld->trans->info.num_rxqs,
sizeof(*ll->pkts_counters), GFP_KERNEL);
if (!ll->pkts_counters)
return -ENOMEM;
- for (int q = 0; q < mld->trans->num_rx_queues; q++)
+ for (int q = 0; q < mld->trans->info.num_rxqs; q++)
spin_lock_init(&ll->pkts_counters[q].lock);
wiphy_delayed_work_init(&ll->work, iwl_mld_low_latency_wk);
@@ -167,7 +167,7 @@ void iwl_mld_low_latency_restart_cleanup(struct iwl_mld *mld)
memset(ll->window_start, 0, sizeof(ll->window_start));
memset(ll->result, 0, sizeof(ll->result));
- for (int q = 0; q < mld->trans->num_rx_queues; q++)
+ for (int q = 0; q < mld->trans->info.num_rxqs; q++)
memset(ll->pkts_counters[q].vo_vi, 0,
sizeof(ll->pkts_counters[q].vo_vi));
}
@@ -276,7 +276,7 @@ void iwl_mld_low_latency_update_counters(struct iwl_mld *mld,
return;
if (WARN_ON_ONCE(fw_id >= ARRAY_SIZE(counters->vo_vi) ||
- queue >= mld->trans->num_rx_queues))
+ queue >= mld->trans->info.num_rxqs))
return;
if (mld->low_latency.stopped)
@@ -324,7 +324,7 @@ void iwl_mld_low_latency_restart(struct iwl_mld *mld)
ll->window_start[mac] = 0;
low_latency |= ll->result[mac];
- for (int q = 0; q < mld->trans->num_rx_queues; q++) {
+ for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
spin_lock_bh(&ll->pkts_counters[q].lock);
ll->pkts_counters[q].vo_vi[mac] = 0;
spin_unlock_bh(&ll->pkts_counters[q].lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
index 68d97d3b8f02..4ba050397632 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
@@ -243,7 +243,6 @@ static void iwl_mac_hw_set_flags(struct iwl_mld *mld)
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
- ieee80211_hw_set(hw, DISALLOW_PUNCTURING_5GHZ);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, TDLS_WIDER_BW);
@@ -305,7 +304,7 @@ static void iwl_mac_hw_set_wiphy(struct iwl_mld *mld)
wiphy->max_remain_on_channel_duration = 10000;
- wiphy->hw_version = mld->trans->hw_id;
+ wiphy->hw_version = mld->trans->info.hw_id;
wiphy->hw_timestamp_max_peers = 1;
@@ -351,9 +350,9 @@ static void iwl_mac_hw_set_misc(struct iwl_mld *mld)
hw->queues = IEEE80211_NUM_ACS;
hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
- hw->netdev_features |= mld->cfg->features;
+ hw->netdev_features |= mld->trans->mac_cfg->base->features;
- hw->max_tx_fragments = mld->trans->max_skb_frags;
+ hw->max_tx_fragments = mld->trans->info.max_skb_frags;
hw->max_listen_interval = IWL_MLD_CONN_LISTEN_INTERVAL;
hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
@@ -376,6 +375,24 @@ static void iwl_mac_hw_set_misc(struct iwl_mld *mld)
static int iwl_mld_hw_verify_preconditions(struct iwl_mld *mld)
{
+ int ratecheck;
+
+ /* check for rates version 3 */
+ ratecheck =
+ (iwl_fw_lookup_cmd_ver(mld->fw, TX_CMD, 0) >= 11) +
+ (iwl_fw_lookup_notif_ver(mld->fw, DATA_PATH_GROUP,
+ TLC_MNG_UPDATE_NOTIF, 0) >= 4) +
+ (iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP,
+ REPLY_RX_MPDU_CMD, 0) >= 6) +
+ (iwl_fw_lookup_notif_ver(mld->fw, DATA_PATH_GROUP,
+ RX_NO_DATA_NOTIF, 0) >= 4) +
+ (iwl_fw_lookup_notif_ver(mld->fw, LONG_GROUP, TX_CMD, 0) >= 9);
+
+ if (ratecheck != 0 && ratecheck != 5) {
+ IWL_ERR(mld, "Firmware has inconsistent rates\n");
+ return -EINVAL;
+ }
+
/* 11ax is expected to be enabled for all supported devices */
if (WARN_ON(!mld->nvm_data->sku_cap_11ax_enable))
return -EINVAL;
@@ -541,15 +558,6 @@ void iwl_mld_mac80211_stop(struct ieee80211_hw *hw, bool suspend)
(IS_ENABLED(CONFIG_PM_SLEEP) && iwl_mld_no_wowlan_suspend(mld)))
iwl_mld_stop_fw(mld);
- /* HW is stopped, no more coming RX. OTOH, the worker can't run as the
- * wiphy lock is held. Cancel it in case it was scheduled just before
- * we stopped the HW.
- */
- wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk);
-
- /* Empty out the list, as the worker won't do that */
- iwl_mld_purge_async_handlers_list(mld);
-
/* Clear in_hw_restart flag when stopping the hw, as mac80211 won't
* execute the restart.
*/
@@ -897,9 +905,8 @@ void iwl_mld_change_chanctx(struct ieee80211_hw *hw,
return;
}
update:
- phy->chandef = *chandef;
- iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_MODIFY);
+ iwl_mld_update_phy_chandef(mld, ctx);
}
static u8
@@ -1031,12 +1038,19 @@ int iwl_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mld_send_ap_tx_power_constraint_cmd(mld, vif, link);
if (vif->type == NL80211_IFTYPE_MONITOR) {
- /* TODO: task=sniffer add sniffer station */
+ ret = iwl_mld_add_mon_sta(mld, vif, link);
+ if (ret)
+ goto deactivate_link;
+
mld->monitor.p80 =
iwl_mld_chandef_get_primary_80(&vif->bss_conf.chanreq.oper);
}
return 0;
+
+deactivate_link:
+ if (mld_link->active)
+ iwl_mld_deactivate_link(mld, link);
err:
RCU_INIT_POINTER(mld_link->chan_ctx, NULL);
return ret;
@@ -1062,7 +1076,8 @@ void iwl_mld_unassign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mld_deactivate_link(mld, link);
- /* TODO: task=sniffer remove sniffer station */
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ iwl_mld_remove_mon_sta(mld, vif, link);
if (n_active > 1) {
/* Indicate to mac80211 that EML is disabled */
@@ -1258,9 +1273,14 @@ iwl_mld_mac80211_link_info_changed(struct ieee80211_hw *hw,
}
static void
-iwl_mld_smps_wa(struct iwl_mld *mld, struct ieee80211_vif *vif, bool enable)
+iwl_mld_smps_workaround(struct iwl_mld *mld, struct ieee80211_vif *vif, bool enable)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ bool workaround_required =
+ iwl_fw_lookup_cmd_ver(mld->fw, MAC_PM_POWER_TABLE, 0) < 2;
+
+ if (!workaround_required)
+ return;
/* Send the device-level power commands since the
* firmware checks the POWER_TABLE_CMD's POWER_SAVE_EN bit to
@@ -1307,7 +1327,7 @@ void iwl_mld_mac80211_vif_cfg_changed(struct ieee80211_hw *hw,
}
if (changes & BSS_CHANGED_PS) {
- iwl_mld_smps_wa(mld, vif, vif->cfg.ps);
+ iwl_mld_smps_workaround(mld, vif, vif->cfg.ps);
iwl_mld_update_mac_power(mld, vif, false);
}
@@ -1320,13 +1340,22 @@ iwl_mld_mac80211_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_scan_request *hw_req)
{
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
if (WARN_ON(!hw_req->req.n_channels ||
hw_req->req.n_channels >
mld->fw->ucode_capa.n_scan_channels))
return -EINVAL;
- return iwl_mld_regular_scan_start(mld, vif, &hw_req->req, &hw_req->ies);
+ ret = iwl_mld_regular_scan_start(mld, vif, &hw_req->req, &hw_req->ies);
+ if (!ret) {
+ /* We will be busy with scanning, so the counters may not reflect the
+ * reality. Stop checking the counters until the scan ends
+ */
+ iwl_mld_start_ignoring_tpt_updates(mld);
+ }
+
+ return ret;
}
static void
@@ -1342,8 +1371,11 @@ iwl_mld_mac80211_cancel_hw_scan(struct ieee80211_hw *hw,
* cancel scan before ieee80211_scan_work() could run.
* To handle that, simply return if the scan is not running.
*/
- if (mld->scan.status & IWL_MLD_SCAN_REGULAR)
+ if (mld->scan.status & IWL_MLD_SCAN_REGULAR) {
iwl_mld_scan_stop(mld, IWL_MLD_SCAN_REGULAR, true);
+ /* Scan is over, we can check again the tpt counters */
+ iwl_mld_stop_ignoring_tpt_updates(mld);
+ }
}
static int
@@ -1720,7 +1752,7 @@ static int iwl_mld_move_sta_state_up(struct iwl_mld *mld,
FW_CTXT_ACTION_MODIFY);
if (ret)
return ret;
- iwl_mld_smps_wa(mld, vif, vif->cfg.ps);
+ iwl_mld_smps_workaround(mld, vif, vif->cfg.ps);
}
/* MFP is set by default before the station is authorized.
@@ -1763,7 +1795,7 @@ static int iwl_mld_move_sta_state_down(struct iwl_mld *mld,
&mld_vif->emlsr.check_tpt_wk);
iwl_mld_reset_cca_40mhz_workaround(mld, vif);
- iwl_mld_smps_wa(mld, vif, true);
+ iwl_mld_smps_workaround(mld, vif, true);
}
/* once we move into assoc state, need to update the FW to
@@ -2004,7 +2036,7 @@ static int iwl_mld_alloc_ptk_pn(struct iwl_mld *mld,
struct ieee80211_key_conf *key,
struct iwl_mld_ptk_pn **ptk_pn)
{
- u8 num_rx_queues = mld->trans->num_rx_queues;
+ u8 num_rx_queues = mld->trans->info.num_rxqs;
int keyidx = key->keyidx;
struct ieee80211_key_seq seq;
@@ -2460,15 +2492,17 @@ iwl_mld_change_vif_links(struct ieee80211_hw *hw,
added |= BIT(0);
for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
- if (removed & BIT(i))
+ if (removed & BIT(i) && !WARN_ON(!old[i]))
iwl_mld_remove_link(mld, old[i]);
}
for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
if (added & BIT(i)) {
link_conf = link_conf_dereference_protected(vif, i);
- if (WARN_ON(!link_conf))
- return -EINVAL;
+ if (!link_conf) {
+ err = -EINVAL;
+ goto remove_added_links;
+ }
err = iwl_mld_add_link(mld, link_conf);
if (err)
@@ -2503,7 +2537,11 @@ remove_added_links:
iwl_mld_remove_link(mld, link_conf);
}
- return err;
+ if (WARN_ON(!iwl_mld_error_before_recovery(mld)))
+ return err;
+
+ /* reconfig will fix us anyway */
+ return 0;
}
static int iwl_mld_change_sta_links(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mcc.c b/drivers/net/wireless/intel/iwlwifi/mld/mcc.c
index daca14e208bd..19cb562e7a73 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mcc.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mcc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
#include <net/cfg80211.h>
@@ -158,7 +158,7 @@ iwl_mld_get_regdomain(struct iwl_mld *mld,
}
IWL_DEBUG_LAR(mld, "MCC update response version: %d\n", resp_ver);
- regd = iwl_parse_nvm_mcc_info(mld->trans->dev, mld->cfg,
+ regd = iwl_parse_nvm_mcc_info(mld->trans,
__le32_to_cpu(resp->n_channels),
resp->channels,
__le16_to_cpu(resp->mcc),
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
index 73d2166a4c25..1774bb84dd3f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
@@ -26,6 +26,8 @@
#include "hcmd.h"
#include "fw/api/location.h"
+#include "iwl-nvm-parse.h"
+
#define DRV_DESCRIPTION "Intel(R) MLD wireless driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
@@ -60,7 +62,7 @@ static void iwl_mld_hw_set_regulatory(struct iwl_mld *mld)
VISIBLE_IF_IWLWIFI_KUNIT
void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
- const struct iwl_cfg *cfg, const struct iwl_fw *fw,
+ const struct iwl_rf_cfg *cfg, const struct iwl_fw *fw,
struct ieee80211_hw *hw, struct dentry *dbgfs_dir)
{
mld->dev = trans->dev;
@@ -192,6 +194,7 @@ static const struct iwl_hcmd_names iwl_mld_system_names[] = {
HCMD_NAME(SOC_CONFIGURATION_CMD),
HCMD_NAME(INIT_EXTENDED_CFG_CMD),
HCMD_NAME(FW_ERROR_RECOVERY_CMD),
+ HCMD_NAME(RFI_CONFIG_CMD),
HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
HCMD_NAME(SYSTEM_STATISTICS_CMD),
HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF),
@@ -287,7 +290,9 @@ static const struct iwl_hcmd_names iwl_mld_statistics_names[] = {
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mld_prot_offload_names[] = {
- HCMD_NAME(STORED_BEACON_NTF),
+ HCMD_NAME(WOWLAN_WAKE_PKT_NOTIFICATION),
+ HCMD_NAME(WOWLAN_INFO_NOTIFICATION),
+ HCMD_NAME(D3_END_NOTIFICATION),
};
/* Please keep this array *SORTED* by hex value.
@@ -322,33 +327,40 @@ EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(global_iwl_mld_goups_size);
static void
iwl_mld_configure_trans(struct iwl_op_mode *op_mode)
{
- const struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
static const u8 no_reclaim_cmds[] = {TX_CMD};
- struct iwl_trans_config trans_cfg = {
- .op_mode = op_mode,
- /* Rx is not supported yet, but add it to avoid warnings */
- .rx_buf_size = iwl_amsdu_size_to_rxb_size(),
- .command_groups = iwl_mld_groups,
- .command_groups_size = ARRAY_SIZE(iwl_mld_groups),
- .fw_reset_handshake = true,
- .queue_alloc_cmd_ver =
- iwl_fw_lookup_cmd_ver(mld->fw,
- WIDE_ID(DATA_PATH_GROUP,
- SCD_QUEUE_CONFIG_CMD),
- 0),
- .no_reclaim_cmds = no_reclaim_cmds,
- .n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds),
- .cb_data_offs = offsetof(struct ieee80211_tx_info,
- driver_data[2]),
- };
struct iwl_trans *trans = mld->trans;
+ u32 eckv_value;
- trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
- trans->iml = mld->fw->iml;
- trans->iml_len = mld->fw->iml_len;
- trans->wide_cmd_header = true;
+ iwl_bios_setup_step(trans, &mld->fwrt);
+ iwl_uefi_get_step_table(trans);
- iwl_trans_configure(trans, &trans_cfg);
+ if (iwl_bios_get_eckv(&mld->fwrt, &eckv_value))
+ IWL_DEBUG_RADIO(mld, "ECKV table doesn't exist in BIOS\n");
+ else
+ trans->conf.ext_32khz_clock_valid = !!eckv_value;
+
+ trans->conf.rx_buf_size = iwl_amsdu_size_to_rxb_size();
+ trans->conf.command_groups = iwl_mld_groups;
+ trans->conf.command_groups_size = ARRAY_SIZE(iwl_mld_groups);
+ trans->conf.fw_reset_handshake = true;
+ trans->conf.queue_alloc_cmd_ver =
+ iwl_fw_lookup_cmd_ver(mld->fw, WIDE_ID(DATA_PATH_GROUP,
+ SCD_QUEUE_CONFIG_CMD),
+ 0);
+ trans->conf.cb_data_offs = offsetof(struct ieee80211_tx_info,
+ driver_data[2]);
+ BUILD_BUG_ON(sizeof(no_reclaim_cmds) >
+ sizeof(trans->conf.no_reclaim_cmds));
+ memcpy(trans->conf.no_reclaim_cmds, no_reclaim_cmds,
+ sizeof(no_reclaim_cmds));
+ trans->conf.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+
+ trans->conf.rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+ trans->conf.rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
+ trans->conf.wide_cmd_header = true;
+
+ iwl_trans_op_mode_enter(trans, op_mode);
}
/*
@@ -359,13 +371,12 @@ iwl_mld_configure_trans(struct iwl_op_mode *op_mode)
#define NUM_FW_LOAD_RETRIES 3
static struct iwl_op_mode *
-iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
{
struct ieee80211_hw *hw;
struct iwl_op_mode *op_mode;
struct iwl_mld *mld;
- u32 eckv_value;
int ret;
/* Allocate and initialize a new hardware device */
@@ -383,16 +394,13 @@ iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_construct_mld(mld, trans, cfg, fw, hw, dbgfs_dir);
+ /* we'll verify later it matches between commands */
+ mld->fw_rates_ver_3 = iwl_fw_lookup_cmd_ver(mld->fw, TX_CMD, 0) >= 11;
+
iwl_mld_construct_fw_runtime(mld, trans, fw, dbgfs_dir);
iwl_mld_get_bios_tables(mld);
iwl_uefi_get_sgom_table(trans, &mld->fwrt);
- iwl_uefi_get_step_table(trans);
- if (iwl_bios_get_eckv(&mld->fwrt, &eckv_value))
- IWL_DEBUG_RADIO(mld, "ECKV table doesn't exist in BIOS\n");
- else
- trans->ext_32khz_clock_valid = !!eckv_value;
- iwl_bios_setup_step(trans, &mld->fwrt);
mld->bios_enable_puncturing = iwl_uefi_get_puncturing(&mld->fwrt);
iwl_mld_hw_set_regulatory(mld);
@@ -411,10 +419,17 @@ iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
break;
}
+ if (!ret) {
+ mld->nvm_data = iwl_get_nvm(mld->trans, mld->fw, 0, 0);
+ if (IS_ERR(mld->nvm_data)) {
+ IWL_ERR(mld, "Failed to read NVM: %d\n", ret);
+ ret = PTR_ERR(mld->nvm_data);
+ }
+ }
+
if (ret) {
wiphy_unlock(mld->wiphy);
rtnl_unlock();
- iwl_fw_flush_dumps(&mld->fwrt);
goto err;
}
@@ -475,8 +490,9 @@ iwl_op_mode_mld_stop(struct iwl_op_mode *op_mode)
iwl_mld_ptp_remove(mld);
iwl_mld_leds_exit(mld);
- wiphy_lock(mld->wiphy);
iwl_mld_thermal_exit(mld);
+
+ wiphy_lock(mld->wiphy);
iwl_mld_low_latency_stop(mld);
iwl_mld_deinit_time_sync(mld);
wiphy_unlock(mld->wiphy);
@@ -638,7 +654,8 @@ iwl_mld_nic_error(struct iwl_op_mode *op_mode,
* It might not actually be true that we'll restart, but the
* setting doesn't matter if we're going to be unbound either.
*/
- if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT)
+ if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT &&
+ mld->fw_status.running)
mld->fw_status.in_hw_restart = true;
}
@@ -664,6 +681,13 @@ static bool iwl_mld_sw_reset(struct iwl_op_mode *op_mode,
{
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ /* SW reset can happen for TOP error w/o NIC error, so
+ * also abort scan here and set in_hw_restart, when we
+ * had a NIC error both were already done.
+ */
+ iwl_mld_report_scan_aborted(mld);
+ mld->fw_status.in_hw_restart = true;
+
/* Do restart only in the following conditions are met:
* - we consider the FW as running
* - The trigger that brought us here is defined as one that requires
@@ -692,7 +716,6 @@ static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode)
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
wiphy_lock(mld->wiphy);
- mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
iwl_mld_stop_fw(mld);
mld->fw_status.in_d3 = false;
wiphy_unlock(mld->wiphy);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.h b/drivers/net/wireless/intel/iwlwifi/mld/mld.h
index a4a16da6ebf3..1a2c44f44eff 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.h
@@ -116,6 +116,7 @@
* @monitor.ampdu_toggle: the state of the previous packet to track A-MPDU
* @monitor.cur_aid: current association id tracked by the sniffer
* @monitor.cur_bssid: current bssid tracked by the sniffer
+ * @monitor.ptp_time: set the Rx mactime using the device's PTP clock time
* @monitor.p80: primary channel position relative to he whole bandwidth, in
* steps of 80 MHz
* @fw_id_to_link_sta: maps a fw id of a sta to the corresponding
@@ -126,7 +127,6 @@
* cleanup using iwl_mld_free_internal_sta
* @netdetect: indicates the FW is in suspend mode with netdetect configured
* @p2p_device_vif: points to the p2p device vif if exists
- * @bt_is_active: indicates that BT is active
* @dev: pointer to device struct. For printing purposes
* @trans: pointer to the transport layer
* @cfg: pointer to the device configuration
@@ -154,6 +154,8 @@
* @radio_kill: bitmap of radio kill status
* @radio_kill.hw: radio is killed by hw switch
* @radio_kill.ct: radio is killed because the device it too hot
+ * @power_budget_mw: maximum cTDP power budget as defined for this system and
+ * device
* @addresses: device MAC addresses.
* @scan: instance of the scan object
* @wowlan: WoWLAN support data.
@@ -174,6 +176,7 @@
* @mcast_filter_cmd: pointer to the multicast filter command.
* @mgmt_tx_ant: stores the last TX antenna index; used for setting
* TX rate_n_flags for non-STA mgmt frames (toggles on every TX failure).
+ * @fw_rates_ver_3: FW rates are in version 3
* @low_latency: low-latency manager.
* @tzone: thermal zone device's data
* @cooling_dev: cooling device's related data
@@ -184,6 +187,7 @@
* @ptp_data: data of the PTP clock
* @time_sync: time sync data.
* @ftm_initiator: FTM initiator data
+ * @last_bt_notif: last received BT Coex notif
*/
struct iwl_mld {
/* Add here fields that need clean up on restart */
@@ -201,19 +205,20 @@ struct iwl_mld {
#ifdef CONFIG_IWLWIFI_DEBUGFS
__le16 cur_aid;
u8 cur_bssid[ETH_ALEN];
+ bool ptp_time;
#endif
} monitor;
#ifdef CONFIG_PM_SLEEP
bool netdetect;
#endif /* CONFIG_PM_SLEEP */
struct ieee80211_vif *p2p_device_vif;
- bool bt_is_active;
+ struct iwl_bt_coex_profile_notif last_bt_notif;
);
struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_STATION_COUNT_MAX];
/* And here fields that survive a fw restart */
struct device *dev;
struct iwl_trans *trans;
- const struct iwl_cfg *cfg;
+ const struct iwl_rf_cfg *cfg;
const struct iwl_fw *fw;
struct ieee80211_hw *hw;
struct wiphy *wiphy;
@@ -241,6 +246,8 @@ struct iwl_mld {
ct:1;
} radio_kill;
+ u32 power_budget_mw;
+
struct mac_address addresses[IWL_MLD_MAX_ADDRESSES];
struct iwl_mld_scan scan;
#ifdef CONFIG_PM_SLEEP
@@ -266,6 +273,8 @@ struct iwl_mld {
u8 mgmt_tx_ant;
+ bool fw_rates_ver_3;
+
struct iwl_mld_low_latency low_latency;
bool ibss_manager;
@@ -286,7 +295,7 @@ struct iwl_mld {
memset((void *)&(_ptr)->zeroed_on_hw_restart, 0, \
sizeof((_ptr)->zeroed_on_hw_restart))
-/* Cleanup function for struct iwl_mld_vif, will be called in restart */
+/* Cleanup function for struct iwl_mld, will be called in restart */
static inline void
iwl_cleanup_mld(struct iwl_mld *mld)
{
@@ -410,7 +419,7 @@ iwl_mld_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
bool is_lb = band == NL80211_BAND_2GHZ;
- if (format == RATE_MCS_LEGACY_OFDM_MSK)
+ if (format == RATE_MCS_MOD_TYPE_LEGACY_OFDM)
return is_lb ? rate + IWL_FIRST_OFDM_RATE : rate;
/* CCK is not allowed in 5 GHz */
@@ -482,7 +491,7 @@ iwl_mld_is_dup(struct iwl_mld *mld, struct ieee80211_sta *sta,
struct ieee80211_rx_status *rx_status, int queue);
void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
- const struct iwl_cfg *cfg, const struct iwl_fw *fw,
+ const struct iwl_rf_cfg *cfg, const struct iwl_fw *fw,
struct ieee80211_hw *hw, struct dentry *dbgfs_dir);
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
index a870e169e265..dba5379ed009 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
@@ -52,7 +52,8 @@ static void iwl_mld_print_emlsr_blocked(struct iwl_mld *mld, u32 mask)
HOW(BT_COEX) \
HOW(CHAN_LOAD) \
HOW(RFI) \
- HOW(FW_REQUEST)
+ HOW(FW_REQUEST) \
+ HOW(INVALID)
static const char *
iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit)
@@ -325,23 +326,44 @@ static void
iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
- struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
- struct iwl_esr_mode_notif *notif = (void *)data;
+ const struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ enum iwl_mvm_fw_esr_recommendation action;
+ const struct iwl_esr_mode_notif *notif = NULL;
+
+ if (iwl_fw_lookup_notif_ver(mld_vif->mld->fw, DATA_PATH_GROUP,
+ ESR_MODE_NOTIF, 0) > 1) {
+ notif = (void *)data;
+ action = le32_to_cpu(notif->action);
+ } else {
+ const struct iwl_esr_mode_notif_v1 *notif_v1 = (void *)data;
+
+ action = le32_to_cpu(notif_v1->action);
+ }
if (!iwl_mld_vif_has_emlsr_cap(vif))
return;
- switch (le32_to_cpu(notif->action)) {
+ switch (action) {
case ESR_RECOMMEND_LEAVE:
+ if (notif)
+ IWL_DEBUG_INFO(mld_vif->mld,
+ "FW recommend leave reason = 0x%x\n",
+ le32_to_cpu(notif->leave_reason_mask));
+
iwl_mld_exit_emlsr(mld_vif->mld, vif,
IWL_MLD_EMLSR_EXIT_FW_REQUEST,
iwl_mld_get_primary_link(vif));
break;
- case ESR_RECOMMEND_ENTER:
case ESR_FORCE_LEAVE:
+ if (notif)
+ IWL_DEBUG_INFO(mld_vif->mld,
+ "FW force leave reason = 0x%x\n",
+ le32_to_cpu(notif->leave_reason_mask));
+ fallthrough;
+ case ESR_RECOMMEND_ENTER:
default:
IWL_WARN(mld_vif->mld, "Unexpected EMLSR notification: %d\n",
- le32_to_cpu(notif->action));
+ action);
}
}
@@ -523,7 +545,7 @@ void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
}
/* Sum up RX and TX MPDUs from the different queues/links */
- for (int q = 0; q < mld->trans->num_rx_queues; q++) {
+ for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
struct iwl_mld_per_q_mpdu_counter *queue_counter =
&mld_sta->mpdu_counters[q];
@@ -635,6 +657,42 @@ s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld,
#undef RSSI_THRESHOLD
}
+#define IWL_MLD_BT_COEX_DISABLE_EMLSR_RSSI_THRESH -69
+#define IWL_MLD_BT_COEX_ENABLE_EMLSR_RSSI_THRESH -63
+#define IWL_MLD_BT_COEX_WIFI_LOSS_THRESH 7
+
+VISIBLE_IF_IWLWIFI_KUNIT
+bool
+iwl_mld_bt_allows_emlsr(struct iwl_mld *mld, struct ieee80211_bss_conf *link,
+ bool check_entry)
+{
+ int bt_penalty, rssi_thresh;
+ s32 link_rssi;
+
+ if (WARN_ON_ONCE(!link->bss))
+ return false;
+
+ link_rssi = MBM_TO_DBM(link->bss->signal);
+ rssi_thresh = check_entry ?
+ IWL_MLD_BT_COEX_ENABLE_EMLSR_RSSI_THRESH :
+ IWL_MLD_BT_COEX_DISABLE_EMLSR_RSSI_THRESH;
+ /* No valid RSSI - force to take low rssi */
+ if (!link_rssi)
+ link_rssi = rssi_thresh - 1;
+
+ if (link_rssi > rssi_thresh)
+ bt_penalty = max(mld->last_bt_notif.wifi_loss_mid_high_rssi[PHY_BAND_24][0],
+ mld->last_bt_notif.wifi_loss_mid_high_rssi[PHY_BAND_24][1]);
+ else
+ bt_penalty = max(mld->last_bt_notif.wifi_loss_low_rssi[PHY_BAND_24][0],
+ mld->last_bt_notif.wifi_loss_low_rssi[PHY_BAND_24][1]);
+
+ IWL_DEBUG_EHT(mld, "BT penalty for link-id %0X is %d\n",
+ link->link_id, bt_penalty);
+ return bt_penalty < IWL_MLD_BT_COEX_WIFI_LOSS_THRESH;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_bt_allows_emlsr);
+
static u32
iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld,
struct ieee80211_vif *vif,
@@ -643,13 +701,14 @@ iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld,
{
struct wiphy *wiphy = mld->wiphy;
struct ieee80211_bss_conf *conf;
- enum iwl_mld_emlsr_exit ret = 0;
+ u32 ret = 0;
conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
if (WARN_ON_ONCE(!conf))
- return false;
+ return IWL_MLD_EMLSR_EXIT_INVALID;
- if (link->chandef->chan->band == NL80211_BAND_2GHZ && mld->bt_is_active)
+ if (link->chandef->chan->band == NL80211_BAND_2GHZ &&
+ !iwl_mld_bt_allows_emlsr(mld, conf, true))
ret |= IWL_MLD_EMLSR_EXIT_BT_COEX;
if (link->signal <
@@ -731,7 +790,7 @@ iwl_mld_get_min_chan_load_thresh(struct ieee80211_chanctx_conf *chanctx)
return 10;
}
-VISIBLE_IF_IWLWIFI_KUNIT bool
+static bool
iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
struct ieee80211_vif *vif,
const struct iwl_mld_link_sel_data *a,
@@ -772,8 +831,8 @@ iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
if (a->chandef->width <= b->chandef->width)
return true;
- bw_a = nl80211_chan_width_to_mhz(a->chandef->width);
- bw_b = nl80211_chan_width_to_mhz(b->chandef->width);
+ bw_a = cfg80211_chandef_get_width(a->chandef);
+ bw_b = cfg80211_chandef_get_width(b->chandef);
ratio = bw_a / bw_b;
switch (ratio) {
@@ -788,10 +847,9 @@ iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
return false;
}
-EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_channel_load_allows_emlsr);
-static bool
-iwl_mld_valid_emlsr_pair(struct ieee80211_vif *vif,
+VISIBLE_IF_IWLWIFI_KUNIT u32
+iwl_mld_emlsr_pair_state(struct ieee80211_vif *vif,
struct iwl_mld_link_sel_data *a,
struct iwl_mld_link_sel_data *b)
{
@@ -800,12 +858,36 @@ iwl_mld_valid_emlsr_pair(struct ieee80211_vif *vif,
u32 reason_mask = 0;
/* Per-link considerations */
- if (iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true) ||
- iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false))
- return false;
-
- if (a->chandef->chan->band == b->chandef->chan->band)
- reason_mask |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND;
+ reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true);
+ if (reason_mask)
+ return reason_mask;
+
+ reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false);
+ if (reason_mask)
+ return reason_mask;
+
+ if (a->chandef->chan->band == b->chandef->chan->band) {
+ const struct cfg80211_chan_def *c_low = a->chandef;
+ const struct cfg80211_chan_def *c_high = b->chandef;
+ u32 c_low_upper_edge, c_high_lower_edge;
+
+ if (c_low->chan->center_freq > c_high->chan->center_freq)
+ swap(c_low, c_high);
+
+ c_low_upper_edge = c_low->chan->center_freq +
+ cfg80211_chandef_get_width(c_low) / 2;
+ c_high_lower_edge = c_high->chan->center_freq -
+ cfg80211_chandef_get_width(c_high) / 2;
+
+ if (a->chandef->chan->band == NL80211_BAND_5GHZ &&
+ c_low_upper_edge <= 5330 && c_high_lower_edge >= 5490) {
+ /* This case is fine - HW/FW can deal with it, there's
+ * enough separation between the two channels.
+ */
+ } else {
+ reason_mask |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND;
+ }
+ }
if (!iwl_mld_channel_load_allows_emlsr(mld, vif, a, b))
reason_mask |= IWL_MLD_EMLSR_EXIT_CHAN_LOAD;
@@ -818,11 +900,11 @@ iwl_mld_valid_emlsr_pair(struct ieee80211_vif *vif,
nl80211_chan_width_to_mhz(a->chandef->width),
nl80211_chan_width_to_mhz(b->chandef->width));
iwl_mld_print_emlsr_exit(mld, reason_mask);
- return false;
}
- return true;
+ return reason_mask;
}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_emlsr_pair_state);
/* Calculation is done with fixed-point with a scaling factor of 1/256 */
#define SCALE_FACTOR 256
@@ -850,7 +932,7 @@ unsigned int iwl_mld_get_emlsr_grade(struct iwl_mld *mld,
*primary_id = a->link_id;
- if (!iwl_mld_valid_emlsr_pair(vif, a, b))
+ if (iwl_mld_emlsr_pair_state(vif, a, b))
return 0;
primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
@@ -892,8 +974,11 @@ static void _iwl_mld_select_links(struct iwl_mld *mld,
n_data = iwl_mld_set_link_sel_data(mld, vif, data, usable_links,
&best_idx);
- if (WARN(!n_data, "Couldn't find a valid grade for any link!\n"))
+ if (!n_data) {
+ IWL_DEBUG_EHT(mld,
+ "Couldn't find a valid grade for any link!\n");
return;
+ }
/* Default to selecting the single best link */
best_link = &data[best_idx];
@@ -961,27 +1046,41 @@ static void iwl_mld_emlsr_check_bt_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ const struct iwl_bt_coex_profile_notif zero_notif = {};
struct iwl_mld *mld = mld_vif->mld;
struct ieee80211_bss_conf *link;
unsigned int link_id;
+ const struct iwl_bt_coex_profile_notif *notif = &mld->last_bt_notif;
- if (!mld->bt_is_active) {
- iwl_mld_retry_emlsr(mld, vif);
+ if (!iwl_mld_vif_has_emlsr_cap(vif))
return;
- }
- /* BT is turned ON but we are not in EMLSR, nothing to do */
- if (!iwl_mld_emlsr_active(vif))
+ /* zeroed structure means that BT is OFF */
+ if (!memcmp(notif, &zero_notif, sizeof(*notif))) {
+ iwl_mld_retry_emlsr(mld, vif);
return;
-
- /* In EMLSR and BT is turned ON */
+ }
for_each_vif_active_link(vif, link, link_id) {
+ bool emlsr_active, emlsr_allowed;
+
if (WARN_ON(!link->chanreq.oper.chan))
continue;
- if (link->chanreq.oper.chan->band == NL80211_BAND_2GHZ) {
- iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BT_COEX,
+ if (link->chanreq.oper.chan->band != NL80211_BAND_2GHZ)
+ continue;
+
+ emlsr_active = iwl_mld_emlsr_active(vif);
+ emlsr_allowed = iwl_mld_bt_allows_emlsr(mld, link,
+ !emlsr_active);
+ if (emlsr_allowed && !emlsr_active) {
+ iwl_mld_retry_emlsr(mld, vif);
+ return;
+ }
+
+ if (!emlsr_allowed && emlsr_active) {
+ iwl_mld_exit_emlsr(mld, vif,
+ IWL_MLD_EMLSR_EXIT_BT_COEX,
iwl_mld_get_primary_link(vif));
return;
}
@@ -1074,3 +1173,69 @@ void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif)
iwl_mld_int_mlo_scan(mld, vif);
}
+
+static void iwl_mld_ignore_tpt_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld *mld = mld_vif->mld;
+ struct iwl_mld_sta *mld_sta;
+ bool *start = (void *)data;
+
+ /* check_tpt_wk is only used when TPT block isn't set */
+ if (mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT ||
+ !IWL_MLD_AUTO_EML_ENABLE || !mld_vif->ap_sta)
+ return;
+
+ mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
+
+ /* We only count for the AP sta in a MLO connection */
+ if (!mld_sta->mpdu_counters)
+ return;
+
+ if (*start) {
+ wiphy_delayed_work_cancel(mld_vif->mld->wiphy,
+ &mld_vif->emlsr.check_tpt_wk);
+ IWL_DEBUG_EHT(mld, "TPT check disabled\n");
+ return;
+ }
+
+ /* Clear the counters so we start from the beginning */
+ for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
+ struct iwl_mld_per_q_mpdu_counter *queue_counter =
+ &mld_sta->mpdu_counters[q];
+
+ spin_lock_bh(&queue_counter->lock);
+
+ memset(queue_counter->per_link, 0,
+ sizeof(queue_counter->per_link));
+
+ spin_unlock_bh(&queue_counter->lock);
+ }
+
+ /* Schedule the check in 5 seconds */
+ wiphy_delayed_work_queue(mld_vif->mld->wiphy,
+ &mld_vif->emlsr.check_tpt_wk,
+ round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
+ IWL_DEBUG_EHT(mld, "TPT check enabled\n");
+}
+
+void iwl_mld_start_ignoring_tpt_updates(struct iwl_mld *mld)
+{
+ bool start = true;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_ignore_tpt_iter,
+ &start);
+}
+
+void iwl_mld_stop_ignoring_tpt_updates(struct iwl_mld *mld)
+{
+ bool start = false;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_ignore_tpt_iter,
+ &start);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.h b/drivers/net/wireless/intel/iwlwifi/mld/mlo.h
index 4fb1fdbe3df9..9afa3d6ea649 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mlo.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.h
@@ -37,7 +37,7 @@ static inline bool iwl_mld_vif_has_emlsr_cap(struct ieee80211_vif *vif)
return ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION &&
ieee80211_vif_is_mld(vif) &&
vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP &&
- !CSR_HW_RFID_IS_CDB(mld_vif->mld->trans->hw_rf_id);
+ !CSR_HW_RFID_IS_CDB(mld_vif->mld->trans->info.hw_rf_id);
}
static inline int
@@ -158,10 +158,16 @@ struct iwl_mld_link_sel_data {
};
#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
-bool iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
- struct ieee80211_vif *vif,
- const struct iwl_mld_link_sel_data *a,
- const struct iwl_mld_link_sel_data *b);
+u32 iwl_mld_emlsr_pair_state(struct ieee80211_vif *vif,
+ struct iwl_mld_link_sel_data *a,
+ struct iwl_mld_link_sel_data *b);
+
+bool iwl_mld_bt_allows_emlsr(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link,
+ bool entry_criteria);
#endif
+void iwl_mld_start_ignoring_tpt_updates(struct iwl_mld *mld);
+void iwl_mld_stop_ignoring_tpt_updates(struct iwl_mld *mld);
+
#endif /* __iwl_mld_mlo_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.c b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
index fc18cba8aaa8..c0e62d46aba6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/notif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
@@ -183,47 +183,6 @@ static void iwl_mld_handle_mu_mimo_grp_notif(struct iwl_mld *mld,
}
static void
-iwl_mld_handle_stored_beacon_notif(struct iwl_mld *mld,
- struct iwl_rx_packet *pkt)
-{
- unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
- struct iwl_stored_beacon_notif *sb = (void *)pkt->data;
- struct ieee80211_rx_status rx_status = {};
- struct sk_buff *skb;
- u32 size = le32_to_cpu(sb->common.byte_count);
-
- if (size == 0)
- return;
-
- if (pkt_len < struct_size(sb, data, size))
- return;
-
- skb = alloc_skb(size, GFP_ATOMIC);
- if (!skb) {
- IWL_ERR(mld, "alloc_skb failed\n");
- return;
- }
-
- /* update rx_status according to the notification's metadata */
- rx_status.mactime = le64_to_cpu(sb->common.tsf);
- /* TSF as indicated by the firmware is at INA time */
- rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
- rx_status.device_timestamp = le32_to_cpu(sb->common.system_time);
- rx_status.band =
- iwl_mld_phy_band_to_nl80211(le16_to_cpu(sb->common.band));
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(sb->common.channel),
- rx_status.band);
-
- /* copy the data */
- skb_put_data(skb, sb->data, size);
- memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
-
- /* pass it as regular rx to mac80211 */
- ieee80211_rx_napi(mld->hw, NULL, skb, NULL);
-}
-
-static void
iwl_mld_handle_channel_switch_start_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
@@ -345,12 +304,15 @@ CMD_VERSIONS(session_prot_notif,
CMD_VERSIONS(missed_beacon_notif,
CMD_VER_ENTRY(5, iwl_missed_beacons_notif))
CMD_VERSIONS(tx_resp_notif,
- CMD_VER_ENTRY(8, iwl_tx_resp))
+ CMD_VER_ENTRY(8, iwl_tx_resp)
+ CMD_VER_ENTRY(9, iwl_tx_resp))
CMD_VERSIONS(compressed_ba_notif,
CMD_VER_ENTRY(5, iwl_compressed_ba_notif)
- CMD_VER_ENTRY(6, iwl_compressed_ba_notif))
+ CMD_VER_ENTRY(6, iwl_compressed_ba_notif)
+ CMD_VER_ENTRY(7, iwl_compressed_ba_notif))
CMD_VERSIONS(tlc_notif,
- CMD_VER_ENTRY(3, iwl_tlc_update_notif))
+ CMD_VER_ENTRY(3, iwl_tlc_update_notif)
+ CMD_VER_ENTRY(4, iwl_tlc_update_notif))
CMD_VERSIONS(mu_mimo_grp_notif,
CMD_VER_ENTRY(1, iwl_mu_group_mgmt_notif))
CMD_VERSIONS(channel_switch_start_notif,
@@ -361,8 +323,6 @@ CMD_VERSIONS(ct_kill_notif,
CMD_VER_ENTRY(2, ct_kill_notif))
CMD_VERSIONS(temp_notif,
CMD_VER_ENTRY(2, iwl_dts_measurement_notif))
-CMD_VERSIONS(stored_beacon_notif,
- CMD_VER_ENTRY(4, iwl_stored_beacon_notif))
CMD_VERSIONS(roc_notif,
CMD_VER_ENTRY(1, iwl_roc_notif))
CMD_VERSIONS(probe_resp_data_notif,
@@ -378,7 +338,8 @@ CMD_VERSIONS(bt_coex_notif,
CMD_VERSIONS(beacon_notification,
CMD_VER_ENTRY(6, iwl_extended_beacon_notif))
CMD_VERSIONS(emlsr_mode_notif,
- CMD_VER_ENTRY(1, iwl_esr_mode_notif))
+ CMD_VER_ENTRY(1, iwl_esr_mode_notif_v1)
+ CMD_VER_ENTRY(2, iwl_esr_mode_notif))
CMD_VERSIONS(emlsr_trans_fail_notif,
CMD_VER_ENTRY(1, iwl_esr_trans_fail_notif))
CMD_VERSIONS(uapsd_misbehaving_ap_notif,
@@ -473,8 +434,6 @@ const struct iwl_rx_handler iwl_mld_rx_handlers[] = {
RX_HANDLER_OF_ROC(MAC_CONF_GROUP, ROC_NOTIF, roc_notif)
RX_HANDLER_NO_OBJECT(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
mu_mimo_grp_notif, RX_HANDLER_SYNC)
- RX_HANDLER_NO_OBJECT(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
- stored_beacon_notif, RX_HANDLER_SYNC)
RX_HANDLER_OF_VIF(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF,
probe_resp_data_notif)
RX_HANDLER_NO_OBJECT(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
@@ -646,7 +605,7 @@ void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi,
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
- if (unlikely(queue >= mld->trans->num_rx_queues))
+ if (unlikely(queue >= mld->trans->info.num_rxqs))
return;
if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
@@ -707,10 +666,14 @@ void iwl_mld_async_handlers_wk(struct wiphy *wiphy, struct wiphy_work *wk)
}
}
-void iwl_mld_purge_async_handlers_list(struct iwl_mld *mld)
+void iwl_mld_cancel_async_notifications(struct iwl_mld *mld)
{
struct iwl_async_handler_entry *entry, *tmp;
+ lockdep_assert_wiphy(mld->wiphy);
+
+ wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk);
+
spin_lock_bh(&mld->async_handlers_lock);
list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
iwl_mld_log_async_handler_op(mld, "Purged", &entry->rxb);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.h b/drivers/net/wireless/intel/iwlwifi/mld/notif.h
index 2eaa1d4e138e..adcdd9dec192 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/notif.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.h
@@ -15,7 +15,7 @@ void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi,
void iwl_mld_async_handlers_wk(struct wiphy *wiphy, struct wiphy_work *wk);
-void iwl_mld_purge_async_handlers_list(struct iwl_mld *mld);
+void iwl_mld_cancel_async_notifications(struct iwl_mld *mld);
enum iwl_mld_object_type {
IWL_MLD_OBJECT_TYPE_NONE,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/phy.c b/drivers/net/wireless/intel/iwlwifi/mld/phy.c
index 2fbc8090088b..d5a32ee56b92 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/phy.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/phy.c
@@ -50,6 +50,9 @@ static void iwl_mld_chanctx_usage_iter(void *_data, u8 *mac,
if (rcu_access_pointer(link_conf->chanctx_conf) != data->ctx)
continue;
+ if (vif->type == NL80211_IFTYPE_AP && link_conf->ftm_responder)
+ data->use_def = true;
+
if (iwl_mld_chanctx_fils_enabled(vif, data->ctx))
data->use_def = true;
}
@@ -153,3 +156,43 @@ int iwl_mld_phy_fw_action(struct iwl_mld *mld,
return ret;
}
+
+static u32 iwl_mld_get_phy_config(struct iwl_mld *mld)
+{
+ u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN |
+ FW_PHY_CFG_RX_CHAIN);
+ u32 valid_rx_ant = iwl_mld_get_valid_rx_ant(mld);
+ u32 valid_tx_ant = iwl_mld_get_valid_tx_ant(mld);
+
+ phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS |
+ valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS;
+
+ return mld->fw->phy_config & phy_config;
+}
+
+int iwl_mld_send_phy_cfg_cmd(struct iwl_mld *mld)
+{
+ const struct iwl_tlv_calib_ctrl *default_calib =
+ &mld->fw->default_calib[IWL_UCODE_REGULAR];
+ struct iwl_phy_cfg_cmd_v3 cmd = {
+ .phy_cfg = cpu_to_le32(iwl_mld_get_phy_config(mld)),
+ .calib_control.event_trigger = default_calib->event_trigger,
+ .calib_control.flow_trigger = default_calib->flow_trigger,
+ .phy_specific_cfg = mld->fwrt.phy_filters,
+ };
+
+ IWL_INFO(mld, "Sending Phy CFG command: 0x%x\n", cmd.phy_cfg);
+
+ return iwl_mld_send_cmd_pdu(mld, PHY_CONFIGURATION_CMD, &cmd);
+}
+
+void iwl_mld_update_phy_chandef(struct iwl_mld *mld,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+ struct cfg80211_chan_def *chandef =
+ iwl_mld_get_chandef_from_chanctx(mld, ctx);
+
+ phy->chandef = *chandef;
+ iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_MODIFY);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/phy.h b/drivers/net/wireless/intel/iwlwifi/mld/phy.h
index 2212a89321b7..0deaf179f07c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/phy.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/phy.h
@@ -52,4 +52,9 @@ iwl_mld_get_chandef_from_chanctx(struct iwl_mld *mld,
struct ieee80211_chanctx_conf *ctx);
u8 iwl_mld_get_fw_ctrl_pos(const struct cfg80211_chan_def *chandef);
+int iwl_mld_send_phy_cfg_cmd(struct iwl_mld *mld);
+
+void iwl_mld_update_phy_chandef(struct iwl_mld *mld,
+ struct ieee80211_chanctx_conf *ctx);
+
#endif /* __iwl_mld_phy_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/power.c b/drivers/net/wireless/intel/iwlwifi/mld/power.c
index 2f16c174b57e..8cc276041360 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/power.c
@@ -253,6 +253,9 @@ static void iwl_mld_power_build_cmd(struct iwl_mld *mld,
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+ if (iwl_fw_lookup_cmd_ver(mld->fw, MAC_PM_POWER_TABLE, 0) >= 2)
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_ENABLE_SMPS_MSK);
+
/* firmware supports LPRX for beacons at rate 1 Mbps or 6 Mbps only */
if (link_conf->beacon_rate &&
(link_conf->beacon_rate->bitrate == 10 ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ptp.c b/drivers/net/wireless/intel/iwlwifi/mld/ptp.c
index d5c3f853d96c..5ee38fc168c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/ptp.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ptp.c
@@ -21,7 +21,7 @@
static int iwl_mld_get_systime(struct iwl_mld *mld, u32 *gp2)
{
- *gp2 = iwl_read_prph(mld->trans, mld->trans->cfg->gp2_reg_addr);
+ *gp2 = iwl_read_prph(mld->trans, mld->trans->mac_cfg->base->gp2_reg_addr);
if (*gp2 == 0x5a5a5a5a)
return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
index a75af8c1e8ab..326c300470ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
@@ -63,9 +63,9 @@ void iwl_mld_get_bios_tables(struct iwl_mld *mld)
/* we don't fail if the table is not available */
}
- ret = iwl_uefi_get_uats_table(mld->trans, &mld->fwrt);
- if (ret)
- IWL_DEBUG_RADIO(mld, "failed to read UATS table (%d)\n", ret);
+ iwl_uefi_get_uats_table(mld->trans, &mld->fwrt);
+
+ iwl_bios_get_phy_filters(&mld->fwrt);
}
static int iwl_mld_geo_sar_init(struct iwl_mld *mld)
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/roc.c b/drivers/net/wireless/intel/iwlwifi/mld/roc.c
index b87faca23ceb..e85f45bce79a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/roc.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/roc.c
@@ -31,13 +31,54 @@ iwl_mld_vif_iter_emlsr_block_roc(void *data, u8 *mac, struct ieee80211_vif *vif)
*result = ret;
}
+struct iwl_mld_roc_iter_data {
+ enum iwl_roc_activity activity;
+ struct ieee80211_vif *vif;
+ bool found;
+};
+
+static void iwl_mld_find_roc_vif_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_roc_iter_data *roc_data = data;
+
+ if (mld_vif->roc_activity != roc_data->activity)
+ return;
+
+ /* The FW supports one ROC of each type simultaneously */
+ if (WARN_ON(roc_data->found)) {
+ roc_data->vif = NULL;
+ return;
+ }
+
+ roc_data->found = true;
+ roc_data->vif = vif;
+}
+
+static struct ieee80211_vif *
+iwl_mld_find_roc_vif(struct iwl_mld *mld, enum iwl_roc_activity activity)
+{
+ struct iwl_mld_roc_iter_data roc_data = {
+ .activity = activity,
+ .found = false,
+ };
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_find_roc_vif_iter,
+ &roc_data);
+
+ return roc_data.vif;
+}
+
int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel *channel, int duration,
enum ieee80211_roc_type type)
{
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
- struct iwl_mld_int_sta *aux_sta;
+ struct iwl_mld_int_sta *aux_sta = &mld_vif->aux_sta;
struct iwl_roc_req cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
};
@@ -49,38 +90,40 @@ int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
lockdep_assert_wiphy(mld->wiphy);
- ieee80211_iterate_active_interfaces_mtx(mld->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mld_vif_iter_emlsr_block_roc,
- &ret);
- if (ret)
- return ret;
-
- /* TODO: task=Hotspot 2.0 */
- if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ vif->type != NL80211_IFTYPE_STATION) {
IWL_ERR(mld, "NOT SUPPORTED: ROC on vif->type %d\n",
vif->type);
return -EOPNOTSUPP;
}
- switch (type) {
- case IEEE80211_ROC_TYPE_NORMAL:
- activity = ROC_ACTIVITY_P2P_DISC;
- break;
- case IEEE80211_ROC_TYPE_MGMT_TX:
- activity = ROC_ACTIVITY_P2P_NEG;
- break;
- default:
- WARN_ONCE(1, "Got an invalid P2P ROC type\n");
- return -EINVAL;
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ switch (type) {
+ case IEEE80211_ROC_TYPE_NORMAL:
+ activity = ROC_ACTIVITY_P2P_DISC;
+ break;
+ case IEEE80211_ROC_TYPE_MGMT_TX:
+ activity = ROC_ACTIVITY_P2P_NEG;
+ break;
+ default:
+ WARN_ONCE(1, "Got an invalid P2P ROC type\n");
+ return -EINVAL;
+ }
+ } else {
+ activity = ROC_ACTIVITY_HOTSPOT;
}
- if (WARN_ON(mld_vif->roc_activity != ROC_NUM_ACTIVITIES))
+ /* The FW supports one ROC of each type simultaneously */
+ if (WARN_ON(iwl_mld_find_roc_vif(mld, activity)))
return -EBUSY;
- /* No MLO on P2P device */
- aux_sta = &mld_vif->deflink.aux_sta;
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_emlsr_block_roc,
+ &ret);
+ if (ret)
+ return ret;
ret = iwl_mld_add_aux_sta(mld, aux_sta);
if (ret)
@@ -91,9 +134,6 @@ int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
cmd.channel_info.channel = cpu_to_le32(channel->hw_value);
cmd.channel_info.band = iwl_mld_nl80211_band_to_fw(channel->band);
cmd.channel_info.width = IWL_PHY_CHANNEL_MODE20;
- /* TODO: task=Hotspot 2.0, revisit those parameters when we add an ROC
- * on the BSS vif
- */
cmd.max_delay = cpu_to_le32(AUX_ROC_MAX_DELAY);
cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
@@ -105,6 +145,7 @@ int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
IWL_ERR(mld, "Couldn't send the ROC_CMD\n");
return ret;
}
+
mld_vif->roc_activity = activity;
return 0;
@@ -136,9 +177,9 @@ static void iwl_mld_destroy_roc(struct iwl_mld *mld,
* we can flush the Tx on the queues
*/
- iwl_mld_flush_link_sta_txqs(mld, mld_vif->deflink.aux_sta.sta_id);
+ iwl_mld_flush_link_sta_txqs(mld, mld_vif->aux_sta.sta_id);
- iwl_mld_remove_aux_sta(mld, vif, &vif->bss_conf);
+ iwl_mld_remove_aux_sta(mld, vif);
}
int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
@@ -156,8 +197,8 @@ int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
lockdep_assert_wiphy(mld->wiphy);
- /* TODO: task=Hotspot 2.0 */
- if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE))
+ if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ vif->type != NL80211_IFTYPE_STATION))
return -EOPNOTSUPP;
/* No roc activity running it's probably already done */
@@ -192,10 +233,10 @@ void iwl_mld_handle_roc_notif(struct iwl_mld *mld,
{
const struct iwl_roc_notif *notif = (void *)pkt->data;
u32 activity = le32_to_cpu(notif->activity);
- /* TODO: task=Hotspot 2.0 - roc can run on BSS */
- struct ieee80211_vif *vif = mld->p2p_device_vif;
struct iwl_mld_vif *mld_vif;
+ struct ieee80211_vif *vif;
+ vif = iwl_mld_find_roc_vif(mld, activity);
if (WARN_ON(!vif))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/rx.c b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
index c4f189bcece2..ce0093d5c638 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
@@ -33,17 +33,17 @@ struct iwl_mld_rx_phy_data {
u32 gp2_on_air_rise;
u16 phy_info;
u8 energy_a, energy_b;
- u8 channel;
};
static void
-iwl_mld_fill_phy_data(struct iwl_rx_mpdu_desc *desc,
+iwl_mld_fill_phy_data(struct iwl_mld *mld,
+ struct iwl_rx_mpdu_desc *desc,
struct iwl_mld_rx_phy_data *phy_data)
{
phy_data->phy_info = le16_to_cpu(desc->phy_info);
- phy_data->rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+ phy_data->rate_n_flags = iwl_v3_rate_from_v2_v3(desc->v3.rate_n_flags,
+ mld->fw_rates_ver_3);
phy_data->gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
- phy_data->channel = desc->v3.channel;
phy_data->energy_a = desc->v3.energy_a;
phy_data->energy_b = desc->v3.energy_b;
phy_data->data0 = desc->v3.phy_data0;
@@ -1162,8 +1162,6 @@ static void iwl_mld_add_rtap_sniffer_config(struct iwl_mld *mld,
static void iwl_mld_rx_fill_status(struct iwl_mld *mld, struct sk_buff *skb,
struct iwl_mld_rx_phy_data *phy_data,
- struct iwl_rx_mpdu_desc *mpdu_desc,
- struct ieee80211_hdr *hdr,
int queue)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
@@ -1172,47 +1170,15 @@ static void iwl_mld_rx_fill_status(struct iwl_mld *mld, struct sk_buff *skb,
u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK);
bool is_sgi = rate_n_flags & RATE_MCS_SGI_MSK;
- if (WARN_ON_ONCE(phy_data->with_data && (!mpdu_desc || !hdr)))
- return;
-
- /* Keep packets with CRC errors (and with overrun) for monitor mode
- * (otherwise the firmware discards them) but mark them as bad.
- */
- if (phy_data->with_data &&
- (!(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) ||
- !(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK)))) {
- IWL_DEBUG_RX(mld, "Bad CRC or FIFO: 0x%08X.\n",
- le32_to_cpu(mpdu_desc->status));
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- }
-
phy_data->info_type = IWL_RX_PHY_INFO_TYPE_NONE;
- if (phy_data->with_data &&
- likely(!(phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
- rx_status->mactime =
- le64_to_cpu(mpdu_desc->v3.tsf_on_air_rise);
-
- /* TSF as indicated by the firmware is at INA time */
- rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
- } else {
+ if (phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
phy_data->info_type =
le32_get_bits(phy_data->data1,
IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
- }
-
- /* management stuff on default queue */
- if (!queue && phy_data->with_data &&
- unlikely(ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control))) {
- rx_status->boottime_ns = ktime_get_boottime_ns();
-
- if (mld->scan.pass_all_sched_res == SCHED_SCAN_PASS_ALL_STATE_ENABLED)
- mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_FOUND;
- }
/* set the preamble flag if appropriate */
- if (format == RATE_MCS_CCK_MSK &&
+ if (format == RATE_MCS_MOD_TYPE_CCK &&
phy_data->phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
@@ -1237,7 +1203,7 @@ static void iwl_mld_rx_fill_status(struct iwl_mld *mld, struct sk_buff *skb,
}
/* must be before L-SIG data */
- if (format == RATE_MCS_HE_MSK)
+ if (format == RATE_MCS_MOD_TYPE_HE)
iwl_mld_rx_he(mld, skb, phy_data, queue);
iwl_mld_decode_lsig(skb, phy_data);
@@ -1245,40 +1211,53 @@ static void iwl_mld_rx_fill_status(struct iwl_mld *mld, struct sk_buff *skb,
rx_status->device_timestamp = phy_data->gp2_on_air_rise;
/* using TLV format and must be after all fixed len fields */
- if (format == RATE_MCS_EHT_MSK)
+ if (format == RATE_MCS_MOD_TYPE_EHT)
iwl_mld_rx_eht(mld, skb, phy_data, queue);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (unlikely(mld->monitor.on))
+ if (unlikely(mld->monitor.on)) {
iwl_mld_add_rtap_sniffer_config(mld, skb);
+
+ if (mld->monitor.ptp_time) {
+ u64 adj_time =
+ iwl_mld_ptp_get_adj_time(mld,
+ phy_data->gp2_on_air_rise *
+ NSEC_PER_USEC);
+
+ rx_status->mactime = div64_u64(adj_time, NSEC_PER_USEC);
+ rx_status->flag |= RX_FLAG_MACTIME_IS_RTAP_TS64;
+ rx_status->flag &= ~RX_FLAG_MACTIME;
+ }
+ }
#endif
- if (format != RATE_MCS_CCK_MSK && is_sgi)
+ if (format != RATE_MCS_MOD_TYPE_CCK && is_sgi)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_n_flags & RATE_MCS_LDPC_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
switch (format) {
- case RATE_MCS_HT_MSK:
+ case RATE_MCS_MOD_TYPE_HT:
rx_status->encoding = RX_ENC_HT;
rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
break;
- case RATE_MCS_VHT_MSK:
- case RATE_MCS_HE_MSK:
- case RATE_MCS_EHT_MSK:
- if (format == RATE_MCS_VHT_MSK) {
+ case RATE_MCS_MOD_TYPE_VHT:
+ case RATE_MCS_MOD_TYPE_HE:
+ case RATE_MCS_MOD_TYPE_EHT:
+ if (format == RATE_MCS_MOD_TYPE_VHT) {
rx_status->encoding = RX_ENC_VHT;
- } else if (format == RATE_MCS_HE_MSK) {
+ } else if (format == RATE_MCS_MOD_TYPE_HE) {
rx_status->encoding = RX_ENC_HE;
rx_status->he_dcm =
!!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
- } else if (format == RATE_MCS_EHT_MSK) {
+ } else if (format == RATE_MCS_MOD_TYPE_EHT) {
rx_status->encoding = RX_ENC_EHT;
}
- rx_status->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
+ rx_status->nss = u32_get_bits(rate_n_flags,
+ RATE_MCS_NSS_MSK) + 1;
rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
break;
@@ -1735,15 +1714,11 @@ static void iwl_mld_rx_update_ampdu_ref(struct iwl_mld *mld,
}
static void
-iwl_mld_fill_rx_status_band_freq(struct iwl_mld_rx_phy_data *phy_data,
- struct iwl_rx_mpdu_desc *mpdu_desc,
- struct ieee80211_rx_status *rx_status)
+iwl_mld_fill_rx_status_band_freq(struct ieee80211_rx_status *rx_status,
+ u8 band, u8 channel)
{
- enum nl80211_band band;
-
- band = BAND_IN_RX_STATUS(mpdu_desc->mac_phy_idx);
rx_status->band = iwl_mld_phy_band_to_nl80211(band);
- rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel,
+ rx_status->freq = ieee80211_channel_to_frequency(channel,
rx_status->band);
}
@@ -1758,7 +1733,7 @@ void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi,
struct sk_buff *skb;
size_t mpdu_desc_size = sizeof(*mpdu_desc);
bool drop = false;
- u8 crypto_len = 0;
+ u8 crypto_len = 0, band;
u32 pkt_len = iwl_rx_packet_payload_len(pkt);
u32 mpdu_len;
enum iwl_mld_reorder_result reorder_res;
@@ -1788,7 +1763,7 @@ void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi,
hdr = (void *)(pkt->data + mpdu_desc_size);
- iwl_mld_fill_phy_data(mpdu_desc, &phy_data);
+ iwl_mld_fill_phy_data(mld, mpdu_desc, &phy_data);
if (mpdu_desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
/* If the device inserted padding it means that (it thought)
@@ -1802,7 +1777,11 @@ void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi,
rx_status = IEEE80211_SKB_RXCB(skb);
/* this is needed early */
- iwl_mld_fill_rx_status_band_freq(&phy_data, mpdu_desc, rx_status);
+ band = u8_get_bits(mpdu_desc->mac_phy_band,
+ IWL_RX_MPDU_MAC_PHY_BAND_BAND_MASK);
+ iwl_mld_fill_rx_status_band_freq(rx_status, band,
+ mpdu_desc->v3.channel);
+
rcu_read_lock();
@@ -1814,7 +1793,36 @@ void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi,
if (!queue && (phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU))
iwl_mld_rx_update_ampdu_ref(mld, &phy_data, rx_status);
- iwl_mld_rx_fill_status(mld, skb, &phy_data, mpdu_desc, hdr, queue);
+ /* Keep packets with CRC errors (and with overrun) for monitor mode
+ * (otherwise the firmware discards them) but mark them as bad.
+ */
+ if (!(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) ||
+ !(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
+ IWL_DEBUG_RX(mld, "Bad CRC or FIFO: 0x%08X.\n",
+ le32_to_cpu(mpdu_desc->status));
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ }
+
+ if (likely(!(phy_data.phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
+ rx_status->mactime =
+ le64_to_cpu(mpdu_desc->v3.tsf_on_air_rise);
+
+ /* TSF as indicated by the firmware is at INA time */
+ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+ }
+
+ /* management stuff on default queue */
+ if (!queue && unlikely(ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control))) {
+ rx_status->boottime_ns = ktime_get_boottime_ns();
+
+ if (mld->scan.pass_all_sched_res ==
+ SCHED_SCAN_PASS_ALL_STATE_ENABLED)
+ mld->scan.pass_all_sched_res =
+ SCHED_SCAN_PASS_ALL_STATE_FOUND;
+ }
+
+ iwl_mld_rx_fill_status(mld, skb, &phy_data, queue);
if (iwl_mld_rx_crypto(mld, sta, hdr, rx_status, mpdu_desc, queue,
le32_to_cpu(pkt->len_n_flags), &crypto_len))
@@ -1857,7 +1865,7 @@ void iwl_mld_sync_rx_queues(struct iwl_mld *mld,
enum iwl_mld_internal_rxq_notif_type type,
const void *notif_payload, u32 notif_payload_size)
{
- u8 num_rx_queues = mld->trans->num_rx_queues;
+ u8 num_rx_queues = mld->trans->info.num_rxqs;
struct {
struct iwl_rxq_sync_cmd sync_cmd;
struct iwl_mld_internal_rxq_notif notif;
@@ -1956,6 +1964,7 @@ void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi,
struct ieee80211_rx_status *rx_status;
struct sk_buff *skb;
u32 format, rssi;
+ u8 channel;
if (unlikely(mld->fw_status.in_hw_restart))
return;
@@ -1968,14 +1977,16 @@ void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi,
desc = (void *)pkt->data;
rssi = le32_to_cpu(desc->rssi);
+ channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK);
+
phy_data.energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK);
phy_data.energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK);
- phy_data.channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK);
phy_data.data0 = desc->phy_info[0];
phy_data.data1 = desc->phy_info[1];
phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time);
- phy_data.rate_n_flags = le32_to_cpu(desc->rate);
+ phy_data.rate_n_flags = iwl_v3_rate_from_v2_v3(desc->rate,
+ mld->fw_rates_ver_3);
phy_data.with_data = false;
BUILD_BUG_ON(sizeof(phy_data.rx_vec) != sizeof(desc->rx_vec));
@@ -2018,13 +2029,13 @@ void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi,
break;
}
- rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
+ rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ;
- rx_status->freq = ieee80211_channel_to_frequency(phy_data.channel,
+ rx_status->freq = ieee80211_channel_to_frequency(channel,
rx_status->band);
- iwl_mld_rx_fill_status(mld, skb, &phy_data, NULL, NULL, queue);
+ iwl_mld_rx_fill_status(mld, skb, &phy_data, queue);
/* No more radiotap info should be added after this point.
* Mark it as mac header for upper layers to know where
@@ -2037,17 +2048,17 @@ void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi,
* may be up to 8 spatial streams.
*/
switch (format) {
- case RATE_MCS_VHT_MSK:
+ case RATE_MCS_MOD_TYPE_VHT:
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
break;
- case RATE_MCS_HE_MSK:
+ case RATE_MCS_MOD_TYPE_HE:
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
break;
- case RATE_MCS_EHT_MSK:
+ case RATE_MCS_MOD_TYPE_EHT:
rx_status->nss =
le32_get_bits(desc->rx_vec[2],
RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.c b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
index 7ec04318ec2f..3fce7cd2d512 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
@@ -1920,6 +1920,9 @@ void iwl_mld_handle_scan_complete_notif(struct iwl_mld *mld,
IWL_DEBUG_SCAN(mld, "Scan link is no longer valid\n");
ieee80211_scan_completed(mld->hw, &info);
+
+ /* Scan is over, we can check again the tpt counters */
+ iwl_mld_stop_ignoring_tpt_updates(mld);
} else if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mld->hw);
mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.c b/drivers/net/wireless/intel/iwlwifi/mld/sta.c
index 332a7aecec2d..8fb51209b4a6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.c
@@ -401,9 +401,11 @@ static u32 iwl_mld_get_htc_flags(struct ieee80211_link_sta *link_sta)
static int iwl_mld_send_sta_cmd(struct iwl_mld *mld,
const struct iwl_sta_cfg_cmd *cmd)
{
- int ret = iwl_mld_send_cmd_pdu(mld,
- WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD),
- cmd);
+ u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD);
+ int cmd_len = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0) > 1 ?
+ sizeof(*cmd) :
+ sizeof(struct iwl_sta_cfg_cmd_v1);
+ int ret = iwl_mld_send_cmd_pdu(mld, cmd_id, cmd, cmd_len);
if (ret)
IWL_ERR(mld, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret);
return ret;
@@ -660,7 +662,7 @@ iwl_mld_alloc_dup_data(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta)
if (mld->fw_status.in_hw_restart)
return 0;
- dup_data = kcalloc(mld->trans->num_rx_queues, sizeof(*dup_data),
+ dup_data = kcalloc(mld->trans->info.num_rxqs, sizeof(*dup_data),
GFP_KERNEL);
if (!dup_data)
return -ENOMEM;
@@ -673,7 +675,7 @@ iwl_mld_alloc_dup_data(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta)
* This thus allows receiving a packet with seqno 0 and the
* retry bit set as the very first packet on a new TID.
*/
- for (int q = 0; q < mld->trans->num_rx_queues; q++)
+ for (int q = 0; q < mld->trans->info.num_rxqs; q++)
memset(dup_data[q].last_seq, 0xff,
sizeof(dup_data[q].last_seq));
mld_sta->dup_data = dup_data;
@@ -695,13 +697,13 @@ static void iwl_mld_alloc_mpdu_counters(struct iwl_mld *mld,
sta->tdls || !ieee80211_vif_is_mld(vif))
return;
- mld_sta->mpdu_counters = kcalloc(mld->trans->num_rx_queues,
+ mld_sta->mpdu_counters = kcalloc(mld->trans->info.num_rxqs,
sizeof(*mld_sta->mpdu_counters),
GFP_KERNEL);
if (!mld_sta->mpdu_counters)
return;
- for (int q = 0; q < mld->trans->num_rx_queues; q++)
+ for (int q = 0; q < mld->trans->info.num_rxqs; q++)
spin_lock_init(&mld_sta->mpdu_counters[q].lock);
}
@@ -947,7 +949,7 @@ static int iwl_mld_allocate_internal_txq(struct iwl_mld *mld,
int queue, size;
size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
- mld->trans->cfg->min_txq_size);
+ mld->trans->mac_cfg->base->min_txq_size);
queue = iwl_trans_txq_alloc(mld->trans, 0, sta_mask, tid, size,
IWL_WATCHDOG_DISABLED);
@@ -1087,6 +1089,24 @@ int iwl_mld_add_aux_sta(struct iwl_mld *mld,
0, NULL, IWL_MAX_TID_COUNT);
}
+int iwl_mld_add_mon_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+ if (WARN_ON(!mld_link))
+ return -EINVAL;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_MONITOR))
+ return -EINVAL;
+
+ return iwl_mld_add_internal_sta(mld, &mld_link->mon_sta,
+ STATION_TYPE_BCAST_MGMT,
+ mld_link->fw_id, NULL,
+ IWL_MAX_TID_COUNT);
+}
+
static void iwl_mld_remove_internal_sta(struct iwl_mld *mld,
struct iwl_mld_int_sta *internal_sta,
bool flush, u8 tid)
@@ -1140,6 +1160,19 @@ void iwl_mld_remove_mcast_sta(struct iwl_mld *mld,
}
void iwl_mld_remove_aux_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ vif->type != NL80211_IFTYPE_STATION))
+ return;
+
+ iwl_mld_remove_internal_sta(mld, &mld_vif->aux_sta, false,
+ IWL_MAX_TID_COUNT);
+}
+
+void iwl_mld_remove_mon_sta(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link)
{
@@ -1148,11 +1181,10 @@ void iwl_mld_remove_aux_sta(struct iwl_mld *mld,
if (WARN_ON(!mld_link))
return;
- /* TODO: Hotspot 2.0 */
- if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE))
+ if (WARN_ON(vif->type != NL80211_IFTYPE_MONITOR))
return;
- iwl_mld_remove_internal_sta(mld, &mld_link->aux_sta, false,
+ iwl_mld_remove_internal_sta(mld, &mld_link->mon_sta, false,
IWL_MAX_TID_COUNT);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.h b/drivers/net/wireless/intel/iwlwifi/mld/sta.h
index ddcffd7b9fde..1897b121aae2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.h
@@ -247,6 +247,10 @@ int iwl_mld_add_mcast_sta(struct iwl_mld *mld,
int iwl_mld_add_aux_sta(struct iwl_mld *mld,
struct iwl_mld_int_sta *internal_sta);
+int iwl_mld_add_mon_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
void iwl_mld_remove_bcast_sta(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link);
@@ -256,6 +260,9 @@ void iwl_mld_remove_mcast_sta(struct iwl_mld *mld,
struct ieee80211_bss_conf *link);
void iwl_mld_remove_aux_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif);
+
+void iwl_mld_remove_mon_sta(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/stats.c b/drivers/net/wireless/intel/iwlwifi/mld/stats.c
index 0715bbc31031..f633cb1cf510 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/stats.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/stats.c
@@ -191,11 +191,12 @@ static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta,
break;
}
- if (format == RATE_MCS_CCK_MSK || format == RATE_MCS_LEGACY_OFDM_MSK) {
+ if (format == RATE_MCS_MOD_TYPE_CCK ||
+ format == RATE_MCS_MOD_TYPE_LEGACY_OFDM) {
int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK);
/* add the offset needed to get to the legacy ofdm indices */
- if (format == RATE_MCS_LEGACY_OFDM_MSK)
+ if (format == RATE_MCS_MOD_TYPE_LEGACY_OFDM)
rate += IWL_FIRST_OFDM_RATE;
switch (rate) {
@@ -240,7 +241,7 @@ static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta,
rinfo->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
- if (format == RATE_MCS_HT_MSK)
+ if (format == RATE_MCS_MOD_TYPE_HT)
rinfo->mcs = RATE_HT_MCS_INDEX(rate_n_flags);
else
rinfo->mcs = u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK);
@@ -249,10 +250,10 @@ static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta,
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
switch (format) {
- case RATE_MCS_EHT_MSK:
+ case RATE_MCS_MOD_TYPE_EHT:
rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS;
break;
- case RATE_MCS_HE_MSK:
+ case RATE_MCS_MOD_TYPE_HE:
gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK);
rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
@@ -293,10 +294,10 @@ static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta,
if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
rinfo->he_dcm = 1;
break;
- case RATE_MCS_HT_MSK:
+ case RATE_MCS_MOD_TYPE_HT:
rinfo->flags |= RATE_INFO_FLAGS_MCS;
break;
- case RATE_MCS_VHT_MSK:
+ case RATE_MCS_MOD_TYPE_VHT:
rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
break;
}
@@ -467,12 +468,18 @@ static void iwl_mld_fill_chanctx_stats(struct ieee80211_hw *hw,
old_load = phy->avg_channel_load_not_by_us;
new_load = le32_to_cpu(per_phy[phy->fw_id].channel_load_not_by_us);
- if (IWL_FW_CHECK(phy->mld, new_load > 100, "Invalid channel load %u\n",
- new_load))
+
+ if (IWL_FW_CHECK(phy->mld,
+ new_load != IWL_STATS_UNKNOWN_CHANNEL_LOAD &&
+ new_load > 100,
+ "Invalid channel load %u\n", new_load))
return;
- /* give a weight of 0.5 for the old value */
- phy->avg_channel_load_not_by_us = (new_load >> 1) + (old_load >> 1);
+ if (new_load != IWL_STATS_UNKNOWN_CHANNEL_LOAD) {
+ /* update giving a weight of 0.5 for the old value */
+ phy->avg_channel_load_not_by_us = (new_load >> 1) +
+ (old_load >> 1);
+ }
iwl_mld_emlsr_check_chan_load(hw, phy, old_load);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile
index 36317feb923b..3e2ae6020613 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-iwlmld-tests-y += module.o hcmd.o utils.o link.o rx.o agg.o link-selection.o
+iwlmld-tests-y += module.o hcmd.o utils.o link.o rx.o agg.o link-selection.o emlsr_with_bt.o
ccflags-y += -I$(src)/../
obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmld-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c
index 1fd664be1a7c..29b0248cec3d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for channel helper functions
*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
#include <kunit/test.h>
#include <kunit/static_stub.h>
@@ -474,14 +474,14 @@ static struct iwl_rx_mpdu_desc *setup_mpdu_desc(void)
KUNIT_ALLOC_AND_ASSERT(test, mpdu_desc);
mpdu_desc->reorder_data |=
- cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_BAID_MASK,
- param->rx_pkt.baid));
+ le32_encode_bits(param->rx_pkt.baid,
+ IWL_RX_MPDU_REORDER_BAID_MASK);
mpdu_desc->reorder_data |=
- cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_SN_MASK,
- param->rx_pkt.sn));
+ le32_encode_bits(param->rx_pkt.sn,
+ IWL_RX_MPDU_REORDER_SN_MASK);
mpdu_desc->reorder_data |=
- cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_NSSN_MASK,
- param->rx_pkt.nssn));
+ le32_encode_bits(param->rx_pkt.nssn,
+ IWL_RX_MPDU_REORDER_NSSN_MASK);
if (param->rx_pkt.old_sn)
mpdu_desc->reorder_data |=
cpu_to_le32(IWL_RX_MPDU_REORDER_BA_OLD_SN);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/emlsr_with_bt.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/emlsr_with_bt.c
new file mode 100644
index 000000000000..91556ee5c142
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/emlsr_with_bt.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for link selection functions
+ *
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include <kunit/static_stub.h>
+
+#include "utils.h"
+#include "mld.h"
+#include "mlo.h"
+
+static const struct emlsr_with_bt_test_case {
+ const char *desc;
+ struct {
+ struct iwl_bt_coex_profile_notif notif;
+ s32 signal;
+ bool check_entry;
+ } input;
+ bool emlsr_allowed;
+} emlsr_with_bt_cases[] = {
+ {
+ .desc = "BT penalty(exit) with low rssi 4.5: emlsr allowed",
+ .input = {
+ .notif.wifi_loss_low_rssi[1] = {4, 5},
+ .notif.wifi_loss_mid_high_rssi[1] = {7, 9},
+ .signal = -69,
+ .check_entry = false,
+ },
+ .emlsr_allowed = true,
+ },
+ {
+ .desc = "BT penalty(exit) from high rssi 5: emlsr allowed",
+ .input = {
+ .notif.wifi_loss_low_rssi[1] = {7, 9},
+ .notif.wifi_loss_mid_high_rssi[1] = {5, 5},
+ .signal = -68,
+ .check_entry = false,
+ },
+ .emlsr_allowed = true,
+ },
+ {
+ .desc = "BT penalty(exit) with low rssi 8: emlsr not allowed",
+ .input = {
+ .notif.wifi_loss_low_rssi[1] = {7, 9},
+ .notif.wifi_loss_mid_high_rssi[1] = {4, 5},
+ .signal = -69,
+ .check_entry = false,
+ },
+ .emlsr_allowed = false,
+ },
+ {
+ .desc = "BT penalty(exit) from high rssi 9: emlsr not allowed",
+ .input = {
+ .notif.wifi_loss_low_rssi[1] = {4, 5},
+ .notif.wifi_loss_mid_high_rssi[1] = {9, 9},
+ .signal = -68,
+ .check_entry = false,
+ },
+ .emlsr_allowed = false,
+ },
+ {
+ .desc = "BT penalty(entry) with low rssi 4.5: emlsr allowed",
+ .input = {
+ .notif.wifi_loss_low_rssi[1] = {4, 5},
+ .notif.wifi_loss_mid_high_rssi[1] = {7, 9},
+ .signal = -63,
+ .check_entry = true,
+ },
+ .emlsr_allowed = true,
+ },
+ {
+ .desc = "BT penalty(entry) from high rssi 5: emlsr allowed",
+ .input = {
+ .notif.wifi_loss_low_rssi[1] = {7, 9},
+ .notif.wifi_loss_mid_high_rssi[1] = {5, 5},
+ .signal = -62,
+ .check_entry = false,
+ },
+ .emlsr_allowed = true,
+ },
+ {
+ .desc = "BT penalty(entry) with low rssi 8: emlsr not allowed",
+ .input = {
+ .notif.wifi_loss_low_rssi[1] = {7, 9},
+ .notif.wifi_loss_mid_high_rssi[1] = {4, 5},
+ .signal = -63,
+ .check_entry = false,
+ },
+ .emlsr_allowed = true,
+ },
+ {
+ .desc = "BT penalty(entry) from high rssi 9: emlsr not allowed",
+ .input = {
+ .notif.wifi_loss_low_rssi[1] = {4, 5},
+ .notif.wifi_loss_mid_high_rssi[1] = {9, 9},
+ .signal = -62,
+ .check_entry = true,
+ },
+ .emlsr_allowed = false,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(emlsr_with_bt, emlsr_with_bt_cases, desc);
+
+static void test_emlsr_with_bt(struct kunit *test)
+{
+ struct iwl_mld *mld = test->priv;
+ const struct emlsr_with_bt_test_case *test_param =
+ (const void *)(test->param_value);
+ struct ieee80211_vif *vif =
+ iwlmld_kunit_add_vif(true, NL80211_IFTYPE_STATION);
+ struct ieee80211_bss_conf *link = iwlmld_kunit_add_link(vif, 1);
+ bool actual_value = false;
+
+ KUNIT_ALLOC_AND_ASSERT(test, link->bss);
+
+ /* Extract test case parameters */
+ link->bss->signal = DBM_TO_MBM(test_param->input.signal);
+ memcpy(&mld->last_bt_notif, &test_param->input.notif,
+ sizeof(struct iwl_bt_coex_profile_notif));
+
+ actual_value = iwl_mld_bt_allows_emlsr(mld, link,
+ test_param->input.check_entry);
+ /* Assert that the returned value matches the expected emlsr_allowed */
+ KUNIT_EXPECT_EQ(test, actual_value, test_param->emlsr_allowed);
+}
+
+static struct kunit_case emlsr_with_bt_test_cases[] = {
+ KUNIT_CASE_PARAM(test_emlsr_with_bt, emlsr_with_bt_gen_params),
+ {},
+};
+
+static struct kunit_suite emlsr_with_bt = {
+ .name = "iwlmld-emlsr-with-bt-tests",
+ .test_cases = emlsr_with_bt_test_cases,
+ .init = iwlmld_kunit_test_init,
+};
+
+kunit_test_suite(emlsr_with_bt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c
index 4e189bf8b3fb..0e3b9417dd63 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for channel helper functions
*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
#include <kunit/test.h>
@@ -30,10 +30,10 @@ static void test_hcmd_names_sorted(struct kunit *test)
static void test_hcmd_names_for_rx(struct kunit *test)
{
static struct iwl_trans t = {
- .command_groups = iwl_mld_groups,
+ .conf.command_groups = iwl_mld_groups,
};
- t.command_groups_size = global_iwl_mld_goups_size;
+ t.conf.command_groups_size = global_iwl_mld_goups_size;
for (unsigned int i = 0; i < iwl_mld_rx_handlers_num; i++) {
const struct iwl_rx_handler *rxh;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c
index 295dcfd3f85d..94a037bec1fa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c
@@ -32,7 +32,7 @@ static const struct link_grading_test_case {
.desc = "channel util of 128 (50%)",
.input.link = {
.link_id = 0,
- .chandef = &chandef_2ghz,
+ .chandef = &chandef_2ghz_20mhz,
.active = false,
.has_chan_util_elem = true,
.chan_util = 128,
@@ -43,7 +43,7 @@ static const struct link_grading_test_case {
.desc = "channel util of 180 (70%)",
.input.link = {
.link_id = 0,
- .chandef = &chandef_2ghz,
+ .chandef = &chandef_2ghz_20mhz,
.active = false,
.has_chan_util_elem = true,
.chan_util = 180,
@@ -54,7 +54,7 @@ static const struct link_grading_test_case {
.desc = "channel util of 180 (70%), channel load by us of 10%",
.input.link = {
.link_id = 0,
- .chandef = &chandef_2ghz,
+ .chandef = &chandef_2ghz_20mhz,
.has_chan_util_elem = true,
.chan_util = 180,
.active = true,
@@ -66,7 +66,7 @@ static const struct link_grading_test_case {
.desc = "no channel util element",
.input.link = {
.link_id = 0,
- .chandef = &chandef_2ghz,
+ .chandef = &chandef_2ghz_20mhz,
.active = true,
},
.expected_grade = 120,
@@ -132,7 +132,7 @@ static void test_link_grading(struct kunit *test)
bool active = test_param->input.link.active;
u16 valid_links;
struct iwl_mld_kunit_link assoc_link = {
- .band = test_param->input.link.chandef->chan->band,
+ .chandef = test_param->input.link.chandef,
};
/* If the link is not active, use a different link as the assoc link */
@@ -172,108 +172,150 @@ static struct kunit_suite link_selection = {
kunit_test_suite(link_selection);
-static const struct channel_load_case {
+static const struct link_pair_case {
const char *desc;
+ const struct cfg80211_chan_def *chandef_a, *chandef_b;
bool low_latency_vif;
u32 chan_load_not_by_us;
- enum nl80211_chan_width bw_a;
- enum nl80211_chan_width bw_b;
bool primary_link_active;
- bool expected_result;
-} channel_load_cases[] = {
+ u32 expected_result;
+} link_pair_cases[] = {
{
.desc = "Unequal bandwidth, primary link inactive, EMLSR not allowed",
.low_latency_vif = false,
.primary_link_active = false,
- .bw_a = NL80211_CHAN_WIDTH_40,
- .bw_b = NL80211_CHAN_WIDTH_20,
- .expected_result = false,
+ .chandef_a = &chandef_5ghz_40mhz,
+ .chandef_b = &chandef_6ghz_20mhz,
+ .expected_result = IWL_MLD_EMLSR_EXIT_CHAN_LOAD,
},
{
.desc = "Equal bandwidths, sufficient channel load, EMLSR allowed",
.low_latency_vif = false,
.primary_link_active = true,
.chan_load_not_by_us = 11,
- .bw_a = NL80211_CHAN_WIDTH_40,
- .bw_b = NL80211_CHAN_WIDTH_40,
- .expected_result = true,
+ .chandef_a = &chandef_5ghz_40mhz,
+ .chandef_b = &chandef_6ghz_40mhz,
+ .expected_result = 0,
},
{
.desc = "Equal bandwidths, insufficient channel load, EMLSR not allowed",
.low_latency_vif = false,
.primary_link_active = true,
.chan_load_not_by_us = 6,
- .bw_a = NL80211_CHAN_WIDTH_80,
- .bw_b = NL80211_CHAN_WIDTH_80,
- .expected_result = false,
+ .chandef_a = &chandef_5ghz_80mhz,
+ .chandef_b = &chandef_6ghz_80mhz,
+ .expected_result = IWL_MLD_EMLSR_EXIT_CHAN_LOAD,
},
{
.desc = "Low latency VIF, sufficient channel load, EMLSR allowed",
.low_latency_vif = true,
.primary_link_active = true,
.chan_load_not_by_us = 6,
- .bw_a = NL80211_CHAN_WIDTH_160,
- .bw_b = NL80211_CHAN_WIDTH_160,
- .expected_result = true,
+ .chandef_a = &chandef_5ghz_160mhz,
+ .chandef_b = &chandef_6ghz_160mhz,
+ .expected_result = 0,
},
{
.desc = "Different bandwidths (2x ratio), primary link load permits EMLSR",
.low_latency_vif = false,
.primary_link_active = true,
.chan_load_not_by_us = 30,
- .bw_a = NL80211_CHAN_WIDTH_40,
- .bw_b = NL80211_CHAN_WIDTH_20,
- .expected_result = true,
+ .chandef_a = &chandef_5ghz_40mhz,
+ .chandef_b = &chandef_6ghz_20mhz,
+ .expected_result = 0,
},
{
.desc = "Different bandwidths (4x ratio), primary link load permits EMLSR",
.low_latency_vif = false,
.primary_link_active = true,
.chan_load_not_by_us = 45,
- .bw_a = NL80211_CHAN_WIDTH_80,
- .bw_b = NL80211_CHAN_WIDTH_20,
- .expected_result = true,
+ .chandef_a = &chandef_5ghz_80mhz,
+ .chandef_b = &chandef_6ghz_20mhz,
+ .expected_result = 0,
},
{
.desc = "Different bandwidths (16x ratio), primary link load insufficient",
.low_latency_vif = false,
.primary_link_active = true,
.chan_load_not_by_us = 45,
- .bw_a = NL80211_CHAN_WIDTH_320,
- .bw_b = NL80211_CHAN_WIDTH_20,
- .expected_result = false,
+ .chandef_a = &chandef_6ghz_320mhz,
+ .chandef_b = &chandef_5ghz_20mhz,
+ .expected_result = IWL_MLD_EMLSR_EXIT_CHAN_LOAD,
+ },
+ {
+ .desc = "Same band not allowed (2.4 GHz)",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 30,
+ .chandef_a = &chandef_2ghz_20mhz,
+ .chandef_b = &chandef_2ghz_11_20mhz,
+ .expected_result = IWL_MLD_EMLSR_EXIT_EQUAL_BAND,
+ },
+ {
+ .desc = "Same band not allowed (5 GHz)",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 30,
+ .chandef_a = &chandef_5ghz_40mhz,
+ .chandef_b = &chandef_5ghz_40mhz,
+ .expected_result = IWL_MLD_EMLSR_EXIT_EQUAL_BAND,
+ },
+ {
+ .desc = "Same band allowed (5 GHz separated)",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 30,
+ .chandef_a = &chandef_5ghz_40mhz,
+ .chandef_b = &chandef_5ghz_120_40mhz,
+ .expected_result = 0,
+ },
+ {
+ .desc = "Same band not allowed (6 GHz)",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 30,
+ .chandef_a = &chandef_6ghz_160mhz,
+ .chandef_b = &chandef_6ghz_221_160mhz,
+ .expected_result = IWL_MLD_EMLSR_EXIT_EQUAL_BAND,
},
};
-KUNIT_ARRAY_PARAM_DESC(channel_load, channel_load_cases, desc);
+KUNIT_ARRAY_PARAM_DESC(link_pair, link_pair_cases, desc);
-static void test_iwl_mld_channel_load_allows_emlsr(struct kunit *test)
+static void test_iwl_mld_link_pair_allows_emlsr(struct kunit *test)
{
- const struct channel_load_case *params = test->param_value;
+ const struct link_pair_case *params = test->param_value;
struct iwl_mld *mld = test->priv;
struct ieee80211_vif *vif;
- struct cfg80211_chan_def chandef_a, chandef_b;
- struct iwl_mld_link_sel_data a = {.chandef = &chandef_a,
- .link_id = 4};
- struct iwl_mld_link_sel_data b = {.chandef = &chandef_b,
- .link_id = 5};
+ struct ieee80211_bss_conf *link;
+ /* link A is the primary and link B is the secondary */
+ struct iwl_mld_link_sel_data a = {
+ .chandef = params->chandef_a,
+ .link_id = 4,
+ };
+ struct iwl_mld_link_sel_data b = {
+ .chandef = params->chandef_b,
+ .link_id = 5,
+ };
struct iwl_mld_kunit_link assoc_link = {
+ .chandef = params->primary_link_active ? a.chandef : b.chandef,
.id = params->primary_link_active ? a.link_id : b.link_id,
- .bandwidth = params->primary_link_active ? params->bw_a : params->bw_b,
};
- bool result;
+ u32 result;
vif = iwlmld_kunit_setup_mlo_assoc(BIT(a.link_id) | BIT(b.link_id),
&assoc_link);
- chandef_a.width = params->bw_a;
- chandef_b.width = params->bw_b;
-
if (params->low_latency_vif)
iwl_mld_vif_from_mac80211(vif)->low_latency_causes = 1;
wiphy_lock(mld->wiphy);
+ link = wiphy_dereference(mld->wiphy, vif->link_conf[a.link_id]);
+ KUNIT_ALLOC_AND_ASSERT(test, link->bss);
+ link = wiphy_dereference(mld->wiphy, vif->link_conf[b.link_id]);
+ KUNIT_ALLOC_AND_ASSERT(test, link->bss);
+
/* Simulate channel load */
if (params->primary_link_active) {
struct iwl_mld_phy *phy =
@@ -282,22 +324,22 @@ static void test_iwl_mld_channel_load_allows_emlsr(struct kunit *test)
phy->avg_channel_load_not_by_us = params->chan_load_not_by_us;
}
- result = iwl_mld_channel_load_allows_emlsr(mld, vif, &a, &b);
+ result = iwl_mld_emlsr_pair_state(vif, &a, &b);
wiphy_unlock(mld->wiphy);
KUNIT_EXPECT_EQ(test, result, params->expected_result);
}
-static struct kunit_case channel_load_criteria_test_cases[] = {
- KUNIT_CASE_PARAM(test_iwl_mld_channel_load_allows_emlsr, channel_load_gen_params),
+static struct kunit_case link_pair_criteria_test_cases[] = {
+ KUNIT_CASE_PARAM(test_iwl_mld_link_pair_allows_emlsr, link_pair_gen_params),
{}
};
-static struct kunit_suite channel_load_criteria_tests = {
- .name = "iwlmld_channel_load_allows_emlsr",
- .test_cases = channel_load_criteria_test_cases,
+static struct kunit_suite link_pair_criteria_tests = {
+ .name = "iwlmld_link_pair_allows_emlsr",
+ .test_cases = link_pair_criteria_test_cases,
.init = iwlmld_kunit_test_init,
};
-kunit_test_suite(channel_load_criteria_tests);
+kunit_test_suite(link_pair_criteria_tests);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c
index 4a4eaa134bd3..69a0d67858bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c
@@ -63,11 +63,11 @@ static void test_missed_beacon(struct kunit *test)
struct iwl_rx_packet *pkt;
struct iwl_mld_kunit_link link1 = {
.id = 0,
- .band = NL80211_BAND_6GHZ,
+ .chandef = &chandef_6ghz_160mhz,
};
struct iwl_mld_kunit_link link2 = {
.id = 1,
- .band = NL80211_BAND_5GHZ,
+ .chandef = &chandef_5ghz_80mhz,
};
kunit_activate_static_stub(test, ieee80211_connection_loss,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c
index 9712ee696509..26cf27be762d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c
@@ -24,7 +24,7 @@ int iwlmld_kunit_test_init(struct kunit *test)
{
struct iwl_mld *mld;
struct iwl_trans *trans;
- const struct iwl_cfg *cfg;
+ const struct iwl_rf_cfg *cfg;
struct iwl_fw *fw;
struct ieee80211_hw *hw;
@@ -146,7 +146,7 @@ iwlmld_kunit_add_link(struct ieee80211_vif *vif, int link_id)
}
struct ieee80211_chanctx_conf *
-iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def)
+iwlmld_kunit_add_chanctx(const struct cfg80211_chan_def *def)
{
struct kunit *test = kunit_get_current_test();
struct iwl_mld *mld = test->priv;
@@ -346,8 +346,7 @@ iwlmld_kunit_setup_assoc(bool mlo, struct iwl_mld_kunit_link *assoc_link)
else
link = &vif->bss_conf;
- chan_ctx = iwlmld_kunit_add_chanctx(assoc_link->band,
- assoc_link->bandwidth);
+ chan_ctx = iwlmld_kunit_add_chanctx(assoc_link->chandef);
wiphy_lock(mld->wiphy);
iwlmld_kunit_assign_chanctx_to_link(vif, link, chan_ctx);
@@ -428,7 +427,7 @@ struct ieee80211_vif *iwlmld_kunit_assoc_emlsr(struct iwl_mld_kunit_link *link1,
link = wiphy_dereference(mld->wiphy, vif->link_conf[link2->id]);
KUNIT_EXPECT_NOT_NULL(test, link);
- chan_ctx = iwlmld_kunit_add_chanctx(link2->band, link2->bandwidth);
+ chan_ctx = iwlmld_kunit_add_chanctx(link2->chandef);
iwlmld_kunit_assign_chanctx_to_link(vif, link, chan_ctx);
wiphy_unlock(mld->wiphy);
@@ -472,3 +471,33 @@ struct iwl_mld_phy *iwlmld_kunit_get_phy_of_link(struct ieee80211_vif *vif,
return iwl_mld_phy_from_mac80211(chanctx);
}
+
+static const struct chandef_case {
+ const char *desc;
+ const struct cfg80211_chan_def *chandef;
+} chandef_cases[] = {
+#define CHANDEF(c, ...) { .desc = "chandef " #c " valid", .chandef = &c, },
+ CHANDEF_LIST
+#undef CHANDEF
+};
+
+KUNIT_ARRAY_PARAM_DESC(chandef, chandef_cases, desc);
+
+static void test_iwl_mld_chandef_valid(struct kunit *test)
+{
+ const struct chandef_case *params = test->param_value;
+
+ KUNIT_EXPECT_EQ(test, true, cfg80211_chandef_valid(params->chandef));
+}
+
+static struct kunit_case chandef_test_cases[] = {
+ KUNIT_CASE_PARAM(test_iwl_mld_chandef_valid, chandef_gen_params),
+ {}
+};
+
+static struct kunit_suite chandef_tests = {
+ .name = "iwlmld_valid_test_chandefs",
+ .test_cases = chandef_test_cases,
+};
+
+kunit_test_suite(chandef_tests);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h
index d3723653cf1b..edf8eef4e81a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h
@@ -14,9 +14,8 @@ struct iwl_mld;
int iwlmld_kunit_test_init(struct kunit *test);
struct iwl_mld_kunit_link {
+ const struct cfg80211_chan_def *chandef;
u8 id;
- enum nl80211_band band;
- enum nl80211_chan_width bandwidth;
};
enum nl80211_iftype;
@@ -42,50 +41,57 @@ static struct ieee80211_channel _name = { \
.hw_value = (_freq), \
}
-#define CHANDEF(_name, _channel, _freq1, _width) \
-__maybe_unused static struct cfg80211_chan_def _name = { \
- .chan = &(_channel), \
- .center_freq1 = (_freq1), \
- .width = (_width), \
-}
-
CHANNEL(chan_2ghz, NL80211_BAND_2GHZ, 2412);
+CHANNEL(chan_2ghz_11, NL80211_BAND_2GHZ, 2462);
CHANNEL(chan_5ghz, NL80211_BAND_5GHZ, 5200);
+CHANNEL(chan_5ghz_120, NL80211_BAND_5GHZ, 5600);
CHANNEL(chan_6ghz, NL80211_BAND_6GHZ, 6115);
+CHANNEL(chan_6ghz_221, NL80211_BAND_6GHZ, 7055);
/* Feel free to add more */
+#undef CHANNEL
+
+#define CHANDEF_LIST \
+ CHANDEF(chandef_2ghz_20mhz, chan_2ghz, 2412, \
+ NL80211_CHAN_WIDTH_20) \
+ CHANDEF(chandef_2ghz_40mhz, chan_2ghz, 2422, \
+ NL80211_CHAN_WIDTH_40) \
+ CHANDEF(chandef_2ghz_11_20mhz, chan_2ghz_11, 2462, \
+ NL80211_CHAN_WIDTH_20) \
+ CHANDEF(chandef_5ghz_20mhz, chan_5ghz, 5200, \
+ NL80211_CHAN_WIDTH_20) \
+ CHANDEF(chandef_5ghz_40mhz, chan_5ghz, 5210, \
+ NL80211_CHAN_WIDTH_40) \
+ CHANDEF(chandef_5ghz_80mhz, chan_5ghz, 5210, \
+ NL80211_CHAN_WIDTH_80) \
+ CHANDEF(chandef_5ghz_160mhz, chan_5ghz, 5250, \
+ NL80211_CHAN_WIDTH_160) \
+ CHANDEF(chandef_5ghz_120_40mhz, chan_5ghz_120, 5610, \
+ NL80211_CHAN_WIDTH_40) \
+ CHANDEF(chandef_6ghz_20mhz, chan_6ghz, 6115, \
+ NL80211_CHAN_WIDTH_20) \
+ CHANDEF(chandef_6ghz_40mhz, chan_6ghz, 6125, \
+ NL80211_CHAN_WIDTH_40) \
+ CHANDEF(chandef_6ghz_80mhz, chan_6ghz, 6145, \
+ NL80211_CHAN_WIDTH_80) \
+ CHANDEF(chandef_6ghz_160mhz, chan_6ghz, 6185, \
+ NL80211_CHAN_WIDTH_160) \
+ CHANDEF(chandef_6ghz_320mhz, chan_6ghz, 6105, \
+ NL80211_CHAN_WIDTH_320) \
+ CHANDEF(chandef_6ghz_221_160mhz, chan_6ghz_221, 6985, \
+ NL80211_CHAN_WIDTH_160) \
+ /* Feel free to add more */
-CHANDEF(chandef_2ghz, chan_2ghz, 2412, NL80211_CHAN_WIDTH_20);
-CHANDEF(chandef_5ghz, chan_5ghz, 5200, NL80211_CHAN_WIDTH_40);
-CHANDEF(chandef_6ghz, chan_6ghz, 6115, NL80211_CHAN_WIDTH_160);
-/* Feel free to add more */
-
-//struct cfg80211_chan_def;
+#define CHANDEF(_name, _channel, _freq1, _width) \
+__maybe_unused static const struct cfg80211_chan_def _name = { \
+ .chan = &(_channel), \
+ .center_freq1 = (_freq1), \
+ .width = (_width), \
+};
+CHANDEF_LIST
+#undef CHANDEF
struct ieee80211_chanctx_conf *
-iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def);
-
-static inline struct ieee80211_chanctx_conf *
-iwlmld_kunit_add_chanctx(enum nl80211_band band, enum nl80211_chan_width width)
-{
- struct cfg80211_chan_def chandef;
-
- switch (band) {
- case NL80211_BAND_2GHZ:
- chandef = chandef_2ghz;
- break;
- case NL80211_BAND_5GHZ:
- chandef = chandef_5ghz;
- break;
- default:
- case NL80211_BAND_6GHZ:
- chandef = chandef_6ghz;
- break;
- }
-
- chandef.width = width;
-
- return iwlmld_kunit_add_chanctx_from_def(&chandef);
-}
+iwlmld_kunit_add_chanctx(const struct cfg80211_chan_def *def);
void iwlmld_kunit_assign_chanctx_to_link(struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/thermal.c b/drivers/net/wireless/intel/iwlwifi/mld/thermal.c
index 1909953a9be9..f8a8c35066be 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/thermal.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/thermal.c
@@ -13,6 +13,9 @@
#include "mld.h"
#include "hcmd.h"
+#define IWL_MLD_NUM_CTDP_STEPS 20
+#define IWL_MLD_MIN_CTDP_BUDGET_MW 150
+
#define IWL_MLD_CT_KILL_DURATION (5 * HZ)
void iwl_mld_handle_ct_kill_notif(struct iwl_mld *mld,
@@ -116,8 +119,8 @@ free_resp:
static int compare_temps(const void *a, const void *b)
{
- return ((s16)le16_to_cpu(*(__le16 *)a) -
- (s16)le16_to_cpu(*(__le16 *)b));
+ return ((s16)le16_to_cpu(*(const __le16 *)a) -
+ (s16)le16_to_cpu(*(const __le16 *)b));
}
struct iwl_trip_walk_data {
@@ -272,43 +275,27 @@ static void iwl_mld_thermal_zone_register(struct iwl_mld *mld)
}
}
-/* budget in mWatt */
-static const u32 iwl_mld_cdev_budgets[] = {
- 2400, /* cooling state 0 */
- 2000, /* cooling state 1 */
- 1800, /* cooling state 2 */
- 1600, /* cooling state 3 */
- 1400, /* cooling state 4 */
- 1200, /* cooling state 5 */
- 1000, /* cooling state 6 */
- 900, /* cooling state 7 */
- 800, /* cooling state 8 */
- 700, /* cooling state 9 */
- 650, /* cooling state 10 */
- 600, /* cooling state 11 */
- 550, /* cooling state 12 */
- 500, /* cooling state 13 */
- 450, /* cooling state 14 */
- 400, /* cooling state 15 */
- 350, /* cooling state 16 */
- 300, /* cooling state 17 */
- 250, /* cooling state 18 */
- 200, /* cooling state 19 */
- 150, /* cooling state 20 */
-};
-
int iwl_mld_config_ctdp(struct iwl_mld *mld, u32 state,
enum iwl_ctdp_cmd_operation op)
{
struct iwl_ctdp_cmd cmd = {
.operation = cpu_to_le32(op),
- .budget = cpu_to_le32(iwl_mld_cdev_budgets[state]),
.window_size = 0,
};
+ u32 budget;
int ret;
lockdep_assert_wiphy(mld->wiphy);
+ /* Do a linear scale from IWL_MLD_MIN_CTDP_BUDGET_MW to the configured
+ * maximum in the predefined number of steps.
+ */
+ budget = ((mld->power_budget_mw - IWL_MLD_MIN_CTDP_BUDGET_MW) *
+ (IWL_MLD_NUM_CTDP_STEPS - 1 - state)) /
+ (IWL_MLD_NUM_CTDP_STEPS - 1) +
+ IWL_MLD_MIN_CTDP_BUDGET_MW;
+ cmd.budget = cpu_to_le32(budget);
+
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP, CTDP_CONFIG_CMD),
&cmd);
@@ -326,7 +313,7 @@ int iwl_mld_config_ctdp(struct iwl_mld *mld, u32 state,
static int iwl_mld_tcool_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
- *state = ARRAY_SIZE(iwl_mld_cdev_budgets) - 1;
+ *state = IWL_MLD_NUM_CTDP_STEPS - 1;
return 0;
}
@@ -354,7 +341,7 @@ static int iwl_mld_tcool_set_cur_state(struct thermal_cooling_device *cdev,
goto unlock;
}
- if (new_state >= ARRAY_SIZE(iwl_mld_cdev_budgets)) {
+ if (new_state >= IWL_MLD_NUM_CTDP_STEPS) {
ret = -EINVAL;
goto unlock;
}
@@ -417,10 +404,50 @@ static void iwl_mld_cooling_device_unregister(struct iwl_mld *mld)
}
#endif /* CONFIG_THERMAL */
+static u32 iwl_mld_ctdp_get_max_budget(struct iwl_mld *mld)
+{
+ u64 bios_power_budget = 0;
+ u32 default_power_budget;
+
+ switch (CSR_HW_RFID_TYPE(mld->trans->info.hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_GF:
+ /* dual-radio devices have a higher budget */
+ if (CSR_HW_RFID_IS_CDB(mld->trans->info.hw_rf_id))
+ default_power_budget = 5200;
+ else
+ default_power_budget = 2880;
+ break;
+ case IWL_CFG_RF_TYPE_FM:
+ default_power_budget = 3450;
+ break;
+ case IWL_CFG_RF_TYPE_WH:
+ case IWL_CFG_RF_TYPE_PE:
+ default:
+ default_power_budget = 5550;
+ break;
+ }
+
+ iwl_bios_get_pwr_limit(&mld->fwrt, &bios_power_budget);
+
+ /* 32bit in UEFI, 16bit in ACPI; use BIOS value if it is in range */
+ if (bios_power_budget &&
+ bios_power_budget != 0xffff && bios_power_budget != 0xffffffff &&
+ bios_power_budget >= IWL_MLD_MIN_CTDP_BUDGET_MW &&
+ bios_power_budget <= default_power_budget)
+ return (u32)bios_power_budget;
+
+ return default_power_budget;
+}
+
void iwl_mld_thermal_initialize(struct iwl_mld *mld)
{
+ lockdep_assert_not_held(&mld->wiphy->mtx);
+
wiphy_delayed_work_init(&mld->ct_kill_exit_wk, iwl_mld_exit_ctkill);
+ mld->power_budget_mw = iwl_mld_ctdp_get_max_budget(mld);
+ IWL_DEBUG_TEMP(mld, "cTDP power budget: %d mW\n", mld->power_budget_mw);
+
#ifdef CONFIG_THERMAL
iwl_mld_cooling_device_register(mld);
iwl_mld_thermal_zone_register(mld);
@@ -429,7 +456,9 @@ void iwl_mld_thermal_initialize(struct iwl_mld *mld)
void iwl_mld_thermal_exit(struct iwl_mld *mld)
{
+ wiphy_lock(mld->wiphy);
wiphy_delayed_work_cancel(mld->wiphy, &mld->ct_kill_exit_wk);
+ wiphy_unlock(mld->wiphy);
#ifdef CONFIG_THERMAL
iwl_mld_cooling_device_unregister(mld);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
index f054cc921d9d..a9ca92c0455e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
@@ -44,7 +44,7 @@ iwl_mld_get_tlc_cmd_flags(struct iwl_mld *mld,
u16 flags = 0;
/* STBC flags */
- if (mld->cfg->ht_params->stbc &&
+ if (mld->cfg->ht_params.stbc &&
(hweight8(iwl_mld_get_valid_tx_ant(mld)) > 1)) {
if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] &
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
@@ -56,7 +56,7 @@ iwl_mld_get_tlc_cmd_flags(struct iwl_mld *mld,
}
/* LDPC */
- if (mld->cfg->ht_params->ldpc &&
+ if (mld->cfg->ht_params.ldpc &&
((ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) ||
(has_vht && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
@@ -658,7 +658,9 @@ void iwl_mld_handle_tlc_notif(struct iwl_mld *mld,
if (WARN_ON(!mld_link_sta))
return;
- mld_link_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
+ mld_link_sta->last_rate_n_flags =
+ iwl_v3_rate_from_v2_v3(notif->rate,
+ mld->fw_rates_ver_3);
rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate),
mld_link_sta->last_rate_n_flags);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tx.c b/drivers/net/wireless/intel/iwlwifi/mld/tx.c
index 543abe72e465..3b4b575aadaa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tx.c
@@ -76,14 +76,14 @@ static int iwl_mld_allocate_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
*/
unsigned int watchdog_timeout = txq->vif->type == NL80211_IFTYPE_AP ?
IWL_WATCHDOG_DISABLED :
- mld->trans->trans_cfg->base_params->wd_timeout;
+ mld->trans->mac_cfg->base->wd_timeout;
int queue, size;
lockdep_assert_wiphy(mld->wiphy);
if (tid == IWL_MGMT_TID)
size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
- mld->trans->cfg->min_txq_size);
+ mld->trans->mac_cfg->base->min_txq_size);
else
size = iwl_mld_get_queue_size(mld, txq);
@@ -391,9 +391,9 @@ static u32 iwl_mld_mac80211_rate_idx_to_fw(struct iwl_mld *mld,
/* Set CCK or OFDM flag */
if (rate_idx <= IWL_LAST_CCK_RATE)
- rate_flags |= RATE_MCS_CCK_MSK;
+ rate_flags |= RATE_MCS_MOD_TYPE_CCK;
else
- rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
+ rate_flags |= RATE_MCS_MOD_TYPE_LEGACY_OFDM;
/* Legacy rates are indexed:
* 0 - 3 for CCK and 0 - 7 for OFDM
@@ -425,47 +425,40 @@ static u32 iwl_mld_get_inject_tx_rate(struct iwl_mld *mld,
struct ieee80211_tx_rate *rate = &info->control.rates[0];
u32 result;
- /* we only care about legacy/HT/VHT so far, so we can
- * build in v1 and use iwl_new_rate_from_v1()
- * FIXME: in newer devices we only support the new rates, build
- * the rate_n_flags in the new format here instead of using v1 and
- * converting it.
- */
-
if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
u8 mcs = ieee80211_rate_get_vht_mcs(rate);
u8 nss = ieee80211_rate_get_vht_nss(rate);
- result = RATE_MCS_VHT_MSK_V1;
- result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK);
+ result = RATE_MCS_MOD_TYPE_VHT;
+ result |= u32_encode_bits(mcs, RATE_MCS_CODE_MSK);
result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- result |= RATE_MCS_SGI_MSK_V1;
+ result |= RATE_MCS_SGI_MSK;
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ result |= RATE_MCS_CHAN_WIDTH_40;
else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ result |= RATE_MCS_CHAN_WIDTH_80;
else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
- result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1);
-
- result = iwl_new_rate_from_v1(result);
+ result |= RATE_MCS_CHAN_WIDTH_160;
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
- result = RATE_MCS_HT_MSK_V1;
- result |= u32_encode_bits(rate->idx,
- RATE_HT_MCS_RATE_CODE_MSK_V1 |
- RATE_HT_MCS_NSS_MSK_V1);
+ /* only MCS 0-15 are supported */
+ u8 mcs = rate->idx & 7;
+ u8 nss = rate->idx > 7;
+
+ result = RATE_MCS_MOD_TYPE_HT;
+ result |= u32_encode_bits(mcs, RATE_MCS_CODE_MSK);
+ result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
+
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- result |= RATE_MCS_SGI_MSK_V1;
+ result |= RATE_MCS_SGI_MSK;
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ result |= RATE_MCS_CHAN_WIDTH_40;
if (info->flags & IEEE80211_TX_CTL_LDPC)
- result |= RATE_MCS_LDPC_MSK_V1;
+ result |= RATE_MCS_LDPC_MSK;
if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
result |= RATE_MCS_STBC_MSK;
-
- result = iwl_new_rate_from_v1(result);
} else {
result = iwl_mld_mac80211_rate_idx_to_fw(mld, info, rate->idx);
}
@@ -479,19 +472,23 @@ static u32 iwl_mld_get_inject_tx_rate(struct iwl_mld *mld,
return result;
}
-static u32 iwl_mld_get_tx_rate_n_flags(struct iwl_mld *mld,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta, __le16 fc)
+static __le32 iwl_mld_get_tx_rate_n_flags(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
{
+ u32 rate;
+
if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
- return iwl_mld_get_inject_tx_rate(mld, info, sta, fc);
+ rate = iwl_mld_get_inject_tx_rate(mld, info, sta, fc);
+ else
+ rate = iwl_mld_mac80211_rate_idx_to_fw(mld, info, -1) |
+ iwl_mld_get_tx_ant(mld, info, sta, fc);
- return iwl_mld_mac80211_rate_idx_to_fw(mld, info, -1) |
- iwl_mld_get_tx_ant(mld, info, sta, fc);
+ return iwl_v3_rate_to_v2_v3(rate, mld->fw_rates_ver_3);
}
static void
-iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd_gen3 *tx_cmd,
+iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd *tx_cmd,
struct sk_buff *skb, bool amsdu)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -537,11 +534,11 @@ iwl_mld_fill_tx_cmd(struct iwl_mld *mld, struct sk_buff *skb,
struct ieee80211_hdr *hdr = (void *)skb->data;
struct iwl_mld_sta *mld_sta = sta ? iwl_mld_sta_from_mac80211(sta) :
NULL;
- struct iwl_tx_cmd_gen3 *tx_cmd;
+ struct iwl_tx_cmd *tx_cmd;
bool amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
(*ieee80211_get_qos_ctl(hdr) &
IEEE80211_QOS_CTL_A_MSDU_PRESENT);
- u32 rate_n_flags = 0;
+ __le32 rate_n_flags = 0;
u16 flags = 0;
dev_tx_cmd->hdr.cmd = TX_CMD;
@@ -576,7 +573,7 @@ iwl_mld_fill_tx_cmd(struct iwl_mld *mld, struct sk_buff *skb,
tx_cmd->flags = cpu_to_le16(flags);
- tx_cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+ tx_cmd->rate_n_flags = rate_n_flags;
}
/* Caller of this need to check that info->control.vif is not NULL */
@@ -641,16 +638,36 @@ iwl_mld_get_tx_queue_id(struct iwl_mld *mld, struct ieee80211_txq *txq,
case NL80211_IFTYPE_P2P_DEVICE:
mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
- if (mld_vif->roc_activity == ROC_NUM_ACTIVITIES) {
- IWL_DEBUG_DROP(mld, "Drop tx outside ROC\n");
+ if (mld_vif->roc_activity != ROC_ACTIVITY_P2P_DISC &&
+ mld_vif->roc_activity != ROC_ACTIVITY_P2P_NEG) {
+ IWL_DEBUG_DROP(mld,
+ "Drop tx outside ROC with activity %d\n",
+ mld_vif->roc_activity);
return IWL_MLD_INVALID_DROP_TX;
}
WARN_ON(!ieee80211_is_mgmt(fc));
- return mld_vif->deflink.aux_sta.queue_id;
+ return mld_vif->aux_sta.queue_id;
+ case NL80211_IFTYPE_MONITOR:
+ mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
+ return mld_vif->deflink.mon_sta.queue_id;
+ case NL80211_IFTYPE_STATION:
+ mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
+
+ if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) {
+ IWL_DEBUG_DROP(mld, "Drop tx not off-channel\n");
+ return IWL_MLD_INVALID_DROP_TX;
+ }
+
+ if (mld_vif->roc_activity != ROC_ACTIVITY_HOTSPOT) {
+ IWL_DEBUG_DROP(mld, "Drop tx outside ROC\n");
+ return IWL_MLD_INVALID_DROP_TX;
+ }
+
+ WARN_ON(!ieee80211_is_mgmt(fc));
+ return mld_vif->aux_sta.queue_id;
default:
- /* TODO: consider monitor (task=monitor) */
WARN_ONCE(1, "Unsupported vif type\n");
break;
}
@@ -831,7 +848,7 @@ static int iwl_mld_tx_tso_segment(struct iwl_mld *mld, struct sk_buff *skb,
* 1 more for the potential data in the header
*/
if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
- mld->trans->max_skb_frags)
+ mld->trans->info.max_skb_frags)
num_subframes = 1;
if (num_subframes > 1)
@@ -977,11 +994,14 @@ void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
rcu_read_unlock();
}
-static void iwl_mld_hwrate_to_tx_rate(u32 rate_n_flags,
+static void iwl_mld_hwrate_to_tx_rate(struct iwl_mld *mld,
+ __le32 rate_n_flags_fw,
struct ieee80211_tx_info *info)
{
enum nl80211_band band = info->band;
struct ieee80211_tx_rate *tx_rate = &info->status.rates[0];
+ u32 rate_n_flags = iwl_v3_rate_from_v2_v3(rate_n_flags_fw,
+ mld->fw_rates_ver_3);
u32 sgi = rate_n_flags & RATE_MCS_SGI_MSK;
u32 chan_width = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
@@ -1006,18 +1026,19 @@ static void iwl_mld_hwrate_to_tx_rate(u32 rate_n_flags,
}
switch (format) {
- case RATE_MCS_HT_MSK:
+ case RATE_MCS_MOD_TYPE_HT:
tx_rate->flags |= IEEE80211_TX_RC_MCS;
tx_rate->idx = RATE_HT_MCS_INDEX(rate_n_flags);
break;
- case RATE_MCS_VHT_MSK:
+ case RATE_MCS_MOD_TYPE_VHT:
ieee80211_rate_set_vht(tx_rate,
rate_n_flags & RATE_MCS_CODE_MSK,
- FIELD_GET(RATE_MCS_NSS_MSK,
- rate_n_flags) + 1);
+ u32_get_bits(rate_n_flags,
+ RATE_MCS_NSS_MSK) + 1);
tx_rate->flags |= IEEE80211_TX_RC_VHT_MCS;
break;
- case RATE_MCS_HE_MSK:
+ case RATE_MCS_MOD_TYPE_HE:
+ case RATE_MCS_MOD_TYPE_EHT:
/* mac80211 cannot do this without ieee80211_tx_status_ext()
* but it only matters for radiotap
*/
@@ -1111,8 +1132,7 @@ void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld,
iwl_dbg_tlv_time_point(&mld->fwrt, tp, NULL);
}
- iwl_mld_hwrate_to_tx_rate(le32_to_cpu(tx_resp->initial_rate),
- info);
+ iwl_mld_hwrate_to_tx_rate(mld, tx_resp->initial_rate, info);
if (likely(!iwl_mld_time_sync_frame(mld, skb, hdr->addr1)))
ieee80211_tx_status_skb(mld->hw, skb);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 21641d41a958..13cdc077d8d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2013-2014, 2018-2020, 2022-2024 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2020, 2022-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
*/
#include <linux/ieee80211.h>
@@ -181,7 +181,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
struct iwl_mvm_sta *mvmsta;
u32 value;
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return 0;
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
@@ -569,7 +569,7 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_notif_iterator, &data);
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
rcu_read_unlock();
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 3e8b7168af01..507c03198c92 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -300,7 +300,7 @@ static void iwl_mvm_wowlan_get_rsc_tsc_data(struct ieee80211_hw *hw,
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i,
- mvm->trans->num_rx_queues);
+ mvm->trans->info.num_rxqs);
aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
((u64)pn[4] << 8) |
((u64)pn[3] << 16) |
@@ -421,7 +421,7 @@ static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw,
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i,
- mvm->trans->num_rx_queues);
+ mvm->trans->info.num_rxqs);
rsc[i] = cpu_to_le64((u64)pn[5] |
((u64)pn[4] << 8) |
((u64)pn[3] << 16) |
@@ -1266,7 +1266,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
};
struct iwl_host_cmd d3_cfg_cmd = {
.id = D3_CONFIG_CMD,
- .flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
+ .flags = CMD_WANT_SKB,
.data[0] = &d3_cfg_cmd_data,
.len[0] = sizeof(d3_cfg_cmd_data),
};
@@ -1370,11 +1370,9 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
* recording before entering D3. In later devices the FW stops the
* recording automatically.
*/
- if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_9000)
iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true);
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
-
/* must be last -- this switches firmware state */
ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
if (ret)
@@ -1686,7 +1684,7 @@ static void iwl_mvm_set_aes_ptk_rx_seq(struct iwl_mvm *mvm,
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
int i;
- for (i = 1; i < mvm->trans->num_rx_queues; i++)
+ for (i = 1; i < mvm->trans->info.num_rxqs; i++)
memcpy(ptk_pn->q[i].pn[tid],
status->ptk.aes.seq[tid].ccmp.pn,
IEEE80211_CCMP_PN_LEN);
@@ -2825,7 +2823,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
status->qos_seq_ctr[i] + 0x10;
}
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
i = mvm->offload_tid;
iwl_trans_set_q_ptrs(mvm->trans,
mvm_ap_sta->tid_data[i].txq_id,
@@ -3407,9 +3405,9 @@ static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
int ret;
enum iwl_d3_status d3_status;
struct iwl_host_cmd cmd = {
- .id = D0I3_END_CMD,
- .flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
- };
+ .id = D0I3_END_CMD,
+ .flags = CMD_WANT_SKB,
+ };
bool reset = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -3427,7 +3425,7 @@ static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
* AX210 and above don't need the command since they have
* the doorbell interrupt.
*/
- if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_22000 &&
+ if (mvm->trans->mac_cfg->device_family <= IWL_DEVICE_FAMILY_22000 &&
fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST)) {
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret < 0)
@@ -3564,9 +3562,6 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN);
- /* after the successful handshake, we're out of D3 */
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
-
/* when reset is required we can't send these following commands */
if (d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE)
goto query_wakeup_reasons;
@@ -3639,9 +3634,6 @@ out:
*/
set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
- /* regardless of what happened, we're now out of D3 */
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
-
return 1;
}
@@ -3679,8 +3671,7 @@ void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
WARN_ON(iwl_mvm_power_update_device(mvm));
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
- ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SEND_IN_D3,
+ ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, 0,
sizeof(d3_cfg_cmd_data), &d3_cfg_cmd_data);
if (ret)
IWL_ERR(mvm,
@@ -3735,7 +3726,6 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
out:
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
mvm->fast_resume = false;
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 671d3f8d79c1..86a87ea89916 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1277,7 +1277,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
return -EIO;
/* supporting only MQ RX */
- if (!mvm->trans->trans_cfg->mq_rx_supported)
+ if (!mvm->trans->mac_cfg->mq_rx_supported)
return -EOPNOTSUPP;
rxb._page = alloc_pages(GFP_ATOMIC, 0);
@@ -1468,7 +1468,7 @@ static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm,
char *buf, size_t count,
loff_t *ppos)
{
- if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_9000)
return -EOPNOTSUPP;
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 2b5a62604fc4..819e3228462a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -29,8 +29,8 @@
#define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
struct iwl_mvm_alive_data {
+ __le32 sku_id[3];
bool valid;
- u32 scd_base_addr;
};
static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
@@ -57,13 +57,13 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
};
- if (mvm->trans->num_rx_queues == 1)
+ if (mvm->trans->info.num_rxqs == 1)
return 0;
/* Do not direct RSS traffic to Q 0 which is our fallback queue */
for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
cmd.indirection_table[i] =
- 1 + (i % (mvm->trans->num_rx_queues - 1));
+ 1 + (i % (mvm->trans->info.num_rxqs - 1));
netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
@@ -114,7 +114,7 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
u32 i;
- if (version == 6) {
+ if (version >= 6) {
struct iwl_alive_ntf_v6 *palive;
if (pkt_len < sizeof(*palive))
@@ -157,6 +157,17 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
}
}
}
+
+ if (version >= 8) {
+ const struct iwl_alive_ntf *palive_v8 =
+ (void *)pkt->data;
+
+ if (pkt_len < sizeof(*palive_v8))
+ return false;
+
+ IWL_DEBUG_FW(mvm, "platform id: 0x%llx\n",
+ palive_v8->platform_id);
+ }
}
if (version >= 5) {
@@ -171,14 +182,15 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
lmac2 = &palive->lmac_data[1];
status = le16_to_cpu(palive->status);
- mvm->trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]);
- mvm->trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]);
- mvm->trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]);
+ BUILD_BUG_ON(sizeof(palive->sku_id.data) !=
+ sizeof(alive_data->sku_id));
+ memcpy(alive_data->sku_id, palive->sku_id.data,
+ sizeof(palive->sku_id.data));
IWL_DEBUG_FW(mvm, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
- mvm->trans->sku_id[0],
- mvm->trans->sku_id[1],
- mvm->trans->sku_id[2]);
+ le32_to_cpu(alive_data->sku_id[0]),
+ le32_to_cpu(alive_data->sku_id[1]),
+ le32_to_cpu(alive_data->sku_id[2]));
} else if (iwl_rx_packet_payload_len(pkt) == sizeof(struct iwl_alive_ntf_v4)) {
struct iwl_alive_ntf_v4 *palive;
@@ -221,7 +233,7 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
if (umac_error_table) {
if (umac_error_table >=
- mvm->trans->cfg->min_umac_error_event_table) {
+ mvm->trans->mac_cfg->base->min_umac_error_event_table) {
iwl_fw_umac_set_alive_err_table(mvm->trans,
umac_error_table);
} else {
@@ -233,7 +245,6 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
}
}
- alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr);
alive_data->valid = status == IWL_ALIVE_STATUS_OK;
IWL_DEBUG_FW(mvm,
@@ -282,7 +293,7 @@ static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm)
IWL_ERR(mvm, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name))
struct iwl_trans *trans = mvm->trans;
- enum iwl_device_family device_family = trans->trans_cfg->device_family;
+ enum iwl_device_family device_family = trans->mac_cfg->device_family;
if (device_family < IWL_DEVICE_FAMILY_8000)
return;
@@ -304,7 +315,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
{
struct iwl_notification_wait alive_wait;
struct iwl_mvm_alive_data alive_data = {};
- const struct fw_img *fw;
int ret;
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
@@ -317,11 +327,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
!(fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
- fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
- else
- fw = iwl_get_ucode_image(mvm->fw, ucode_type);
- if (WARN_ON(!fw))
- return -EINVAL;
+ ucode_type = IWL_UCODE_REGULAR_USNIFFER;
iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
@@ -334,7 +340,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
* For the unified firmware case, the ucode_type is not
* INIT, but we still need to run it.
*/
- ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill);
+ ret = iwl_trans_start_fw(mvm->trans, mvm->fw, ucode_type,
+ run_in_rfkill);
if (ret) {
iwl_fw_set_current_image(&mvm->fwrt, old_type);
iwl_remove_notification(&mvm->notif_wait, &alive_wait);
@@ -348,7 +355,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
MVM_UCODE_ALIVE_TIMEOUT);
- if (mvm->trans->trans_cfg->device_family ==
+ if (mvm->trans->mac_cfg->device_family ==
IWL_DEVICE_FAMILY_AX210) {
/* print these registers regardless of alive fail/success */
IWL_INFO(mvm, "WFPM_UMAC_PD_NOTIFICATION: 0x%x\n",
@@ -365,14 +372,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
struct iwl_trans *trans = mvm->trans;
/* SecBoot info */
- if (trans->trans_cfg->device_family >=
+ if (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_22000) {
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
iwl_read_umac_prph(trans,
UMAG_SB_CPU_2_STATUS));
- } else if (trans->trans_cfg->device_family >=
+ } else if (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_8000) {
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
@@ -383,7 +390,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_mvm_print_pd_notification(mvm);
/* LMAC/UMAC PC info */
- if (trans->trans_cfg->device_family >=
+ if (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_22000) {
pc_data = trans->dbg.pc_data;
for (count = 0; count < trans->dbg.num_pc;
@@ -391,7 +398,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
IWL_ERR(mvm, "%s: 0x%x\n",
pc_data->pc_name,
pc_data->pc_address);
- } else if (trans->trans_cfg->device_family >=
+ } else if (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_9000) {
IWL_ERR(mvm, "UMAC PC: 0x%x\n",
iwl_read_umac_prph(trans,
@@ -422,10 +429,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
/* if reached this point, Alive notification was received */
iwl_mei_alive_notif(true);
- iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
+ iwl_trans_fw_alive(mvm->trans);
ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait,
- &mvm->fw->ucode_capa);
+ &mvm->fw->ucode_capa, alive_data.sku_id);
if (ret) {
IWL_ERR(mvm, "Timeout waiting for PNVM load!\n");
iwl_fw_set_current_image(&mvm->fwrt, old_type);
@@ -473,7 +480,7 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
struct iwl_phy_specific_cfg *phy_filters)
{
#ifdef CONFIG_ACPI
- *phy_filters = mvm->phy_filters;
+ *phy_filters = mvm->fwrt.phy_filters;
#endif /* CONFIG_ACPI */
}
@@ -490,7 +497,7 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
- if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n");
return;
}
@@ -504,11 +511,10 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
return;
}
- ret = iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt);
- if (ret < 0) {
- IWL_DEBUG_FW(mvm, "failed to read UATS table (%d)\n", ret);
+ iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt);
+
+ if (!mvm->fwrt.uats_valid)
return;
- }
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret < 0)
@@ -578,7 +584,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
/* set flags extra PHY configuration flags from the device's cfg */
phy_cfg_cmd.phy_cfg |=
- cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags);
+ cpu_to_le32(mvm->trans->mac_cfg->extra_phy_cfg_flags);
phy_cfg_cmd.calib_control.event_trigger =
mvm->fw->default_calib[ucode_type].event_trigger;
@@ -617,7 +623,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
mvm->rfkill_safe_init_done = false;
- if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
+ if (mvm->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG);
/* if needed, we'll reset this on our way out later */
mvm->fw_product_reset = sb_cfg == SB_CFG_RESIDES_IN_ROM;
@@ -651,11 +657,6 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
NULL);
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- mvm->trans->step_urm = !!(iwl_read_umac_prph(mvm->trans,
- CNVI_PMU_STEP_FLOW) &
- CNVI_PMU_STEP_FLOW_FORCE_URM);
-
/* Send init config command to mark that we are sending NVM access
* commands
*/
@@ -756,7 +757,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
goto remove_notif;
}
- if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) {
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000) {
ret = iwl_mvm_send_bt_init_conf(mvm);
if (ret)
goto remove_notif;
@@ -1270,7 +1271,7 @@ void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm)
}
}
- iwl_acpi_get_phy_filters(&mvm->fwrt, &mvm->phy_filters);
+ iwl_acpi_get_phy_filters(&mvm->fwrt);
if (iwl_bios_get_eckv(&mvm->fwrt, &mvm->ext_clock_valid))
IWL_DEBUG_RADIO(mvm, "ECKV table doesn't exist in BIOS\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
index 1ea7c44250d4..c3cc1ea3ccc9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2025 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#include <linux/leds.h>
@@ -102,7 +102,7 @@ void iwl_mvm_leds_sync(struct iwl_mvm *mvm)
* if we control through the register, we're doing it
* even when the firmware isn't up, so no need to sync
*/
- if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)
return;
iwl_mvm_led_set(mvm, mvm->led.brightness > 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index bec18d197f31..9098a36530cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -938,12 +938,19 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm,
u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx)
{
- u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10;
+ u16 flags, cck_flag;
- if (rate_idx <= IWL_FIRST_CCK_RATE)
- flags |= is_new_rate ? IWL_MAC_BEACON_CCK
- : IWL_MAC_BEACON_CCK_V1;
+ if (is_new_rate) {
+ flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
+ cck_flag = IWL_MAC_BEACON_CCK;
+ } else {
+ cck_flag = IWL_MAC_BEACON_CCK_V1;
+ flags = iwl_fw_rate_idx_to_plcp(rate_idx);
+ }
+
+ if (rate_idx <= IWL_LAST_CCK_RATE)
+ flags |= cck_flag;
return flags;
}
@@ -969,7 +976,7 @@ u8 iwl_mvm_mac_ctxt_get_beacon_rate(struct iwl_mvm *mvm,
static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct sk_buff *beacon,
- struct iwl_tx_cmd *tx)
+ struct iwl_tx_cmd_v6 *tx)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_tx_info *info;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 1e916a0ce082..956b491ae5a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -146,7 +146,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
MCC_UPDATE_CMD, 0);
IWL_DEBUG_LAR(mvm, "MCC update response version: %d\n", resp_ver);
- regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
+ regd = iwl_parse_nvm_mcc_info(mvm->trans,
__le32_to_cpu(resp->n_channels),
resp->channels,
__le16_to_cpu(resp->mcc),
@@ -311,8 +311,8 @@ int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
/* This has been tested on those devices only */
- if (mvm->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_9000 &&
- mvm->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_22000)
+ if (mvm->trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_9000 &&
+ mvm->trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_22000)
return -EOPNOTSUPP;
if (!mvm->nvm_data)
@@ -391,7 +391,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
* for older devices. We also don't see this issue on any newer
* devices.
*/
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000)
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000)
ieee80211_hw_set(hw, TX_AMSDU);
ieee80211_hw_set(hw, TX_FRAG_LIST);
@@ -402,7 +402,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
/* We want to use the mac80211's reorder buffer for 9000 */
if (iwl_mvm_has_new_rx_api(mvm) &&
- mvm->trans->trans_cfg->device_family > IWL_DEVICE_FAMILY_9000)
+ mvm->trans->mac_cfg->device_family > IWL_DEVICE_FAMILY_9000)
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
if (fw_has_capa(&mvm->fw->ucode_capa,
@@ -417,10 +417,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
return -EINVAL;
}
- if (mvm->trans->num_rx_queues > 1)
+ if (mvm->trans->info.num_rxqs > 1)
ieee80211_hw_set(hw, USES_RSS);
- if (mvm->trans->max_skb_frags)
+ if (mvm->trans->info.max_skb_frags)
hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
hw->queues = IEEE80211_NUM_ACS;
@@ -441,7 +441,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
- hw->max_tx_fragments = mvm->trans->max_skb_frags;
+ hw->max_tx_fragments = mvm->trans->info.max_skb_frags;
BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
@@ -544,7 +544,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
REGULATORY_DISABLE_BEACON_HINTS;
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_DFS_CONCURRENT);
@@ -610,7 +610,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->bands[NL80211_BAND_6GHZ] =
&mvm->nvm_data->bands[NL80211_BAND_6GHZ];
- hw->wiphy->hw_version = mvm->trans->hw_id;
+ hw->wiphy->hw_version = mvm->trans->info.hw_id;
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -731,8 +731,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
- ieee80211_hw_set(hw, DISALLOW_PUNCTURING_5GHZ);
-
#ifdef CONFIG_PM_SLEEP
if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) &&
device_can_wakeup(mvm->trans->dev)) {
@@ -771,7 +769,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
}
- hw->netdev_features |= mvm->cfg->features;
+ hw->netdev_features |= mvm->trans->mac_cfg->base->features;
if (!iwl_mvm_is_csum_supported(mvm))
hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK;
@@ -1379,7 +1377,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend)
iwl_mvm_rm_aux_sta(mvm);
if (suspend &&
- mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
iwl_mvm_fast_suspend(mvm);
/* From this point on, we won't touch the device */
iwl_mvm_mei_device_state(mvm, false);
@@ -1988,15 +1986,8 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
* interface is be handled as part of the stop_ap flow.
*/
if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC) {
-#ifdef CONFIG_NL80211_TESTMODE
- if (vif == mvm->noa_vif) {
- mvm->noa_vif = NULL;
- mvm->noa_duration = 0;
- }
-#endif
+ vif->type == NL80211_IFTYPE_ADHOC)
goto out;
- }
iwl_mvm_power_update_mac(mvm);
@@ -3059,7 +3050,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
* context. For the newer, the beacon is a resource that belongs to a
* MAC, so need to send beacon template after adding the mac.
*/
- if (mvm->trans->trans_cfg->device_family > IWL_DEVICE_FAMILY_22000) {
+ if (mvm->trans->mac_cfg->device_family > IWL_DEVICE_FAMILY_22000) {
/* Add the mac context */
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
if (ret)
@@ -4105,7 +4096,8 @@ void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- if (!iwl_mvm_has_rlc_offload(mvm))
+ if (!iwl_mvm_has_rlc_offload(mvm) ||
+ iwl_fw_lookup_cmd_ver(mvm->fw, MAC_PM_POWER_TABLE, 0) >= 2)
return;
mvmvif->ps_disabled = !vif->cfg.ps;
@@ -4395,7 +4387,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
- if (!mvm->trans->trans_cfg->gen2) {
+ if (!mvm->trans->mac_cfg->gen2) {
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
} else if (vif->type == NL80211_IFTYPE_STATION) {
@@ -4512,7 +4504,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
ptk_pn = kzalloc(struct_size(ptk_pn, q,
- mvm->trans->num_rx_queues),
+ mvm->trans->info.num_rxqs),
GFP_KERNEL);
if (!ptk_pn) {
ret = -ENOMEM;
@@ -4521,7 +4513,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
ieee80211_get_key_rx_seq(key, tid, &seq);
- for (q = 0; q < mvm->trans->num_rx_queues; q++)
+ for (q = 0; q < mvm->trans->info.num_rxqs; q++)
memcpy(ptk_pn->q[q].pn[tid],
seq.ccmp.pn,
IEEE80211_CCMP_PN_LEN);
@@ -5525,70 +5517,6 @@ static int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
&mvm_sta->vif->bss_conf);
}
-#ifdef CONFIG_NL80211_TESTMODE
-static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
- [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
- [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
- [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
-};
-
-static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- void *data, int len)
-{
- struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
- int err;
- u32 noa_duration;
-
- err = nla_parse_deprecated(tb, IWL_MVM_TM_ATTR_MAX, data, len,
- iwl_mvm_tm_policy, NULL);
- if (err)
- return err;
-
- if (!tb[IWL_MVM_TM_ATTR_CMD])
- return -EINVAL;
-
- switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
- case IWL_MVM_TM_CMD_SET_NOA:
- if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
- !vif->bss_conf.enable_beacon ||
- !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
- return -EINVAL;
-
- noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
- if (noa_duration >= vif->bss_conf.beacon_int)
- return -EINVAL;
-
- mvm->noa_duration = noa_duration;
- mvm->noa_vif = vif;
-
- return iwl_mvm_update_quotas(mvm, true, NULL);
- case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
- /* must be associated client vif - ignore authorized */
- if (!vif || vif->type != NL80211_IFTYPE_STATION ||
- !vif->cfg.assoc || !vif->bss_conf.dtim_period ||
- !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
- return -EINVAL;
-
- if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
- return iwl_mvm_enable_beacon_filter(mvm, vif);
- return iwl_mvm_disable_beacon_filter(mvm, vif);
- }
-
- return -EOPNOTSUPP;
-}
-
-int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- void *data, int len)
-{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-
- guard(mvm)(mvm);
- return __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
-}
-#endif
-
void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw)
{
@@ -6140,12 +6068,12 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
break;
}
- if (format == RATE_MCS_CCK_MSK ||
- format == RATE_MCS_LEGACY_OFDM_MSK) {
+ if (format == RATE_MCS_MOD_TYPE_CCK ||
+ format == RATE_MCS_MOD_TYPE_LEGACY_OFDM) {
int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK);
/* add the offset needed to get to the legacy ofdm indices */
- if (format == RATE_MCS_LEGACY_OFDM_MSK)
+ if (format == RATE_MCS_MOD_TYPE_LEGACY_OFDM)
rate += IWL_FIRST_OFDM_RATE;
switch (rate) {
@@ -6190,7 +6118,7 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
rinfo->nss = u32_get_bits(rate_n_flags,
RATE_MCS_NSS_MSK) + 1;
- rinfo->mcs = format == RATE_MCS_HT_MSK ?
+ rinfo->mcs = format == RATE_MCS_MOD_TYPE_HT ?
RATE_HT_MCS_INDEX(rate_n_flags) :
u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK);
@@ -6198,11 +6126,11 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
switch (format) {
- case RATE_MCS_EHT_MSK:
+ case RATE_MCS_MOD_TYPE_EHT:
/* TODO: GI/LTF/RU. How does the firmware encode them? */
rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS;
break;
- case RATE_MCS_HE_MSK:
+ case RATE_MCS_MOD_TYPE_HE:
gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK);
rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
@@ -6243,10 +6171,10 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
rinfo->he_dcm = 1;
break;
- case RATE_MCS_HT_MSK:
+ case RATE_MCS_MOD_TYPE_HT:
rinfo->flags |= RATE_INFO_FLAGS_MCS;
break;
- case RATE_MCS_VHT_MSK:
+ case RATE_MCS_MOD_TYPE_VHT:
rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
break;
}
@@ -6426,37 +6354,36 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
bool sync,
const void *data, u32 size)
{
- struct {
- struct iwl_rxq_sync_cmd cmd;
- struct iwl_mvm_internal_rxq_notif notif;
- } __packed cmd = {
- .cmd.rxq_mask = cpu_to_le32(BIT(mvm->trans->num_rx_queues) - 1),
- .cmd.count =
- cpu_to_le32(sizeof(struct iwl_mvm_internal_rxq_notif) +
- size),
- .notif.type = type,
- .notif.sync = sync,
- };
+ DEFINE_RAW_FLEX(struct iwl_rxq_sync_cmd, cmd, payload,
+ sizeof(struct iwl_mvm_internal_rxq_notif));
+ struct iwl_mvm_internal_rxq_notif *notif =
+ (struct iwl_mvm_internal_rxq_notif *)cmd->payload;
struct iwl_host_cmd hcmd = {
.id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD),
- .data[0] = &cmd,
- .len[0] = sizeof(cmd),
+ .data[0] = cmd,
+ .len[0] = __struct_size(cmd),
.data[1] = data,
.len[1] = size,
.flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC),
};
int ret;
+ cmd->rxq_mask = cpu_to_le32(BIT(mvm->trans->info.num_rxqs) - 1);
+ cmd->count = cpu_to_le32(sizeof(struct iwl_mvm_internal_rxq_notif) +
+ size);
+ notif->type = type;
+ notif->sync = sync;
+
/* size must be a multiple of DWORD */
- if (WARN_ON(cmd.cmd.count & cpu_to_le32(3)))
+ if (WARN_ON(cmd->count & cpu_to_le32(3)))
return;
if (!iwl_mvm_has_new_rx_api(mvm))
return;
if (sync) {
- cmd.notif.cookie = mvm->queue_sync_cookie;
- mvm->queue_sync_state = (1 << mvm->trans->num_rx_queues) - 1;
+ notif->cookie = mvm->queue_sync_cookie;
+ mvm->queue_sync_state = (1 << mvm->trans->info.num_rxqs) - 1;
}
ret = iwl_mvm_send_cmd(mvm, &hcmd);
@@ -6649,8 +6576,6 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.sync_rx_queues = iwl_mvm_sync_rx_queues,
- CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
-
#ifdef CONFIG_PM_SLEEP
/* look at d3.c */
.suspend = iwl_mvm_suspend,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
index bb7851042177..3c255ae916c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
@@ -1,17 +1,25 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2024 Intel Corporation
+ * Copyright (C) 2022 - 2025 Intel Corporation
*/
#include "mvm.h"
static void iwl_mvm_mld_set_he_support(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_mac_config_cmd *cmd)
+ struct iwl_mac_config_cmd *cmd,
+ int cmd_ver)
{
- if (vif->type == NL80211_IFTYPE_AP)
- cmd->he_ap_support = cpu_to_le16(1);
- else
- cmd->he_support = cpu_to_le16(1);
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.he_ap_support = cpu_to_le16(1);
+ else
+ cmd->wifi_gen.he_ap_support = 1;
+ } else {
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.he_support = cpu_to_le16(1);
+ else
+ cmd->wifi_gen.he_support = 1;
+ }
}
static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
@@ -22,6 +30,12 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_bss_conf *link_conf;
unsigned int link_id;
+ int cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(MAC_CONF_GROUP,
+ MAC_CONFIG_CMD), 0);
+
+ if (WARN_ON(cmd_ver < 1 || cmd_ver > 3))
+ return;
cmd->id_and_color = cpu_to_le32(mvmvif->id);
cmd->action = cpu_to_le32(action);
@@ -30,8 +44,8 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
memcpy(cmd->local_mld_addr, vif->addr, ETH_ALEN);
- cmd->he_support = 0;
- cmd->eht_support = 0;
+ cmd->wifi_gen_v2.he_support = 0;
+ cmd->wifi_gen_v2.eht_support = 0;
/* should be set by specific context type handler */
cmd->filter_flags = 0;
@@ -51,8 +65,11 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
* and enable both when we have MLO.
*/
if (ieee80211_vif_is_mld(vif)) {
- iwl_mvm_mld_set_he_support(mvm, vif, cmd);
- cmd->eht_support = cpu_to_le32(1);
+ iwl_mvm_mld_set_he_support(mvm, vif, cmd, cmd_ver);
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.eht_support = cpu_to_le32(1);
+ else
+ cmd->wifi_gen.eht_support = 1;
return;
}
@@ -63,16 +80,19 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
continue;
if (link_conf->he_support)
- iwl_mvm_mld_set_he_support(mvm, vif, cmd);
+ iwl_mvm_mld_set_he_support(mvm, vif, cmd, cmd_ver);
- /* it's not reasonable to have EHT without HE and FW API doesn't
+ /* It's not reasonable to have EHT without HE and FW API doesn't
* support it. Ignore EHT in this case.
*/
if (!link_conf->he_support && link_conf->eht_support)
continue;
if (link_conf->eht_support) {
- cmd->eht_support = cpu_to_le32(1);
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.eht_support = cpu_to_le32(1);
+ else
+ cmd->wifi_gen.eht_support = 1;
break;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 78d7153a0cfc..bf24f8cb673e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -150,19 +150,6 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
iwl_mvm_vif_dbgfs_rm_link(mvm, vif);
- /* For AP/GO interface, the tear down of the resources allocated to the
- * interface is be handled as part of the stop_ap flow.
- */
- if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC) {
-#ifdef CONFIG_NL80211_TESTMODE
- if (vif == mvm->noa_vif) {
- mvm->noa_vif = NULL;
- mvm->noa_duration = 0;
- }
-#endif
- }
-
iwl_mvm_power_update_mac(mvm);
/* Before the interface removal, mac80211 would cancel the ROC, and the
@@ -1403,8 +1390,6 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.sync_rx_queues = iwl_mvm_sync_rx_queues,
- CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
-
#ifdef CONFIG_PM_SLEEP
/* look at d3.c */
.suspend = iwl_mvm_suspend,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index 9dd670041137..e1010521c3ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022-2024 Intel Corporation
+ * Copyright (C) 2022-2025 Intel Corporation
*/
#include "mvm.h"
#include "time-sync.h"
@@ -48,9 +48,13 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
static int iwl_mvm_mld_send_sta_cmd(struct iwl_mvm *mvm,
struct iwl_sta_cfg_cmd *cmd)
{
+ u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD);
+ int cmd_len = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) > 1 ?
+ sizeof(*cmd) :
+ sizeof(struct iwl_sta_cfg_cmd_v1);
int ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD),
- 0, sizeof(*cmd), cmd);
+ 0, cmd_len, cmd);
if (ret)
IWL_ERR(mvm, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret);
return ret;
@@ -144,7 +148,7 @@ int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm,
{
int ret, txq;
unsigned int wdg_timeout = _wdg_timeout ? *_wdg_timeout :
- mvm->trans->trans_cfg->base_params->wd_timeout;
+ mvm->trans->mac_cfg->base->wd_timeout;
if (WARN_ON_ONCE(sta->sta_id == IWL_INVALID_STA))
return -ENOSPC;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index f6391c7a3e29..a4f412e750d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -126,7 +126,7 @@ struct iwl_mvm_time_event_data {
/* Power management */
/**
- * enum iwl_power_scheme
+ * enum iwl_power_scheme - iwl power schemes
* @IWL_POWER_SCHEME_CAM: Continuously Active Mode
* @IWL_POWER_SCHEME_BPS: Balanced Power Save (default)
* @IWL_POWER_SCHEME_LP: Low Power
@@ -664,6 +664,8 @@ enum iwl_mvm_sched_scan_pass_all_states {
* @min_backoff: The minimal tx backoff due to power restrictions
* @params: Parameters to configure the thermal throttling algorithm.
* @throttle: Is thermal throttling is active?
+ * @power_budget_mw: maximum cTDP power budget as defined for this system and
+ * device
*/
struct iwl_mvm_tt_mgmt {
struct delayed_work ct_kill_exit;
@@ -672,6 +674,8 @@ struct iwl_mvm_tt_mgmt {
u32 min_backoff;
struct iwl_tt_params params;
bool throttle;
+
+ u32 power_budget_mw;
};
#ifdef CONFIG_THERMAL
@@ -999,7 +1003,7 @@ struct iwl_mvm {
struct iwl_trans *trans;
const struct iwl_fw *fw;
- const struct iwl_cfg *cfg;
+ const struct iwl_rf_cfg *cfg;
struct iwl_phy_db *phy_db;
struct ieee80211_hw *hw;
@@ -1033,6 +1037,8 @@ struct iwl_mvm {
u8 cca_40mhz_workaround;
+ u8 fw_rates_ver;
+
u32 ampdu_ref;
bool ampdu_toggle;
@@ -1246,11 +1252,6 @@ struct iwl_mvm {
struct iwl_time_quota_cmd last_quota_cmd;
-#ifdef CONFIG_NL80211_TESTMODE
- u32 noa_duration;
- struct ieee80211_vif *noa_vif;
-#endif
-
/* Tx queues */
u16 aux_queue;
u16 snif_queue;
@@ -1348,10 +1349,6 @@ struct iwl_mvm {
__le16 cur_aid;
u8 cur_bssid[ETH_ALEN];
-#ifdef CONFIG_ACPI
- struct iwl_phy_specific_cfg phy_filters;
-#endif
-
/* report rx timestamp in ptp clock time */
bool rx_ts_ptp;
@@ -1562,7 +1559,7 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
* Enable LAR only if it is supported by the FW (TLV) &&
* enabled in the NVM
*/
- if (mvm->cfg->nvm_type == IWL_NVM_EXT)
+ if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT)
return nvm_lar && tlv_lar;
else
return tlv_lar;
@@ -1626,13 +1623,13 @@ static inline bool iwl_mvm_has_new_station_api(const struct iwl_fw *fw)
static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
{
/* TODO - replace with TLV once defined */
- return mvm->trans->trans_cfg->gen2;
+ return mvm->trans->mac_cfg->gen2;
}
static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm)
{
/* TODO - better define this */
- return mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000;
+ return mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000;
}
static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
@@ -1657,7 +1654,7 @@ static inline bool iwl_mvm_cdb_scan_api(struct iwl_mvm *mvm)
* but then there's a little bit of code in scan that won't make
* any sense...
*/
- return mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000;
+ return mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000;
}
static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm)
@@ -1732,13 +1729,13 @@ static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm)
static inline bool iwl_mvm_is_esr_supported(struct iwl_trans *trans)
{
- if (CSR_HW_RFID_IS_CDB(trans->hw_rf_id))
+ if (CSR_HW_RFID_IS_CDB(trans->info.hw_rf_id))
return false;
- switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
case IWL_CFG_RF_TYPE_FM:
/* Step A doesn't support eSR */
- return CSR_HW_RFID_STEP(trans->hw_rf_id);
+ return CSR_HW_RFID_STEP(trans->info.hw_rf_id);
case IWL_CFG_RF_TYPE_WH:
case IWL_CFG_RF_TYPE_PE:
return true;
@@ -1757,8 +1754,8 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
/* Check if HW supports eSR or STR */
if (iwl_mvm_is_esr_supported(trans) ||
- (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
- CSR_HW_RFID_IS_CDB(trans->hw_rf_id)))
+ (CSR_HW_RFID_TYPE(trans->info.hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
+ CSR_HW_RFID_IS_CDB(trans->info.hw_rf_id)))
return IWL_FW_MAX_ACTIVE_LINKS_NUM;
return 1;
@@ -1771,7 +1768,7 @@ extern const u8 iwl_mvm_ac_to_bz_tx_fifo[];
static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm,
enum ieee80211_ac_numbers ac)
{
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
return iwl_mvm_ac_to_bz_tx_fifo[ac];
if (iwl_mvm_has_new_tx_api(mvm))
return iwl_mvm_ac_to_gen2_tx_fifo[ac];
@@ -1810,9 +1807,6 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
enum nl80211_band band,
struct ieee80211_tx_rate *r);
-void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags,
- enum nl80211_band band,
- struct ieee80211_tx_rate *r);
u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx);
u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac);
bool iwl_mvm_is_nic_ack_enabled(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -1843,9 +1837,9 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta);
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd,
+ struct iwl_tx_cmd_v6 *tx_cmd,
struct ieee80211_tx_info *info, u8 sta_id);
-void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
+void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd_v6 *tx_cmd,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc);
void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
@@ -1876,7 +1870,7 @@ int iwl_mvm_set_sta_pkt_ext(struct iwl_mvm *mvm,
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd)
+ struct iwl_tx_cmd_v6 *tx_cmd)
{
struct ieee80211_key_conf *keyconf = info->control.hw_key;
@@ -2138,6 +2132,10 @@ bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
const struct iwl_mvm_link_sel_data *b);
s8 iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif);
+
+
+extern const struct iwl_hcmd_arr iwl_mvm_groups[];
+extern const unsigned int iwl_mvm_groups_size;
#endif
/* AP and IBSS */
@@ -2459,7 +2457,7 @@ void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set,
*/
static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
{
- return ((BIT(mvm->trans->trans_cfg->base_params->num_of_queues) - 1) &
+ return ((BIT(mvm->trans->mac_cfg->base->num_of_queues) - 1) &
~BIT(IWL_MVM_DQA_CMD_QUEUE));
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 80ec59c58ae4..953218f1e025 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -120,7 +120,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
} else {
IWL_DEBUG_EEPROM(mvm->trans->dev,
"NVM access command failed with status %d (device: %s)\n",
- ret, mvm->trans->name);
+ ret, mvm->trans->info.name);
ret = -ENODATA;
}
goto exit;
@@ -191,7 +191,7 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
while (ret == length) {
/* Check no memory assumptions fail and cause an overflow */
if ((size_read + offset + length) >
- mvm->trans->trans_cfg->base_params->eeprom_size) {
+ mvm->trans->mac_cfg->base->eeprom_size) {
IWL_ERR(mvm, "EEPROM size is too small for NVM\n");
return -ENOBUFS;
}
@@ -206,7 +206,7 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
offset += ret;
}
- iwl_nvm_fixups(mvm->trans->hw_id, section, data, offset);
+ iwl_nvm_fixups(mvm->trans->info.hw_id, section, data, offset);
IWL_DEBUG_EEPROM(mvm->trans->dev,
"NVM section %d read completed\n", section);
@@ -226,7 +226,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
/* Checking for required sections */
if (mvm->trans->cfg->nvm_type == IWL_NVM) {
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
- !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
+ !mvm->nvm_sections[mvm->trans->mac_cfg->base->nvm_hw_section_num].data) {
IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
return NULL;
}
@@ -244,7 +244,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
return NULL;
}
/* MAC_OVERRIDE or at least HW section must exist */
- if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data &&
+ if (!mvm->nvm_sections[mvm->trans->mac_cfg->base->nvm_hw_section_num].data &&
!mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
IWL_ERR(mvm,
"Can't parse mac_address, empty sections\n");
@@ -260,7 +260,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
}
}
- hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data;
+ hw = (const __be16 *)sections[mvm->trans->mac_cfg->base->nvm_hw_section_num].data;
sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
mac_override =
@@ -308,16 +308,15 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
int ret, section;
u32 size_read = 0;
u8 *nvm_buffer, *temp;
- const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step;
- if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
+ if (WARN_ON_ONCE(mvm->trans->mac_cfg->base->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
return -EINVAL;
/* load NVM values from nic */
/* Read From FW NVM */
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
- nvm_buffer = kmalloc(mvm->trans->trans_cfg->base_params->eeprom_size,
+ nvm_buffer = kmalloc(mvm->trans->mac_cfg->base->eeprom_size,
GFP_KERNEL);
if (!nvm_buffer)
return -ENOMEM;
@@ -338,7 +337,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
break;
}
- iwl_nvm_fixups(mvm->trans->hw_id, section, temp, ret);
+ iwl_nvm_fixups(mvm->trans->info.hw_id, section, temp, ret);
mvm->nvm_sections[section].data = temp;
mvm->nvm_sections[section].length = ret;
@@ -367,7 +366,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
mvm->nvm_reg_blob.size = ret;
break;
default:
- if (section == mvm->cfg->nvm_hw_section_num) {
+ if (section == mvm->trans->mac_cfg->base->nvm_hw_section_num) {
mvm->nvm_hw_blob.data = temp;
mvm->nvm_hw_blob.size = ret;
break;
@@ -384,21 +383,8 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
/* read External NVM file from the mod param */
ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
mvm->nvm_sections);
- if (ret) {
- mvm->nvm_file_name = nvm_file_C;
-
- if ((ret == -EFAULT || ret == -ENOENT) &&
- mvm->nvm_file_name) {
- /* in case nvm file was failed try again */
- ret = iwl_read_external_nvm(mvm->trans,
- mvm->nvm_file_name,
- mvm->nvm_sections);
- if (ret)
- return ret;
- } else {
- return ret;
- }
- }
+ if (ret)
+ return ret;
}
/* parse the relevant nvm sections */
@@ -554,7 +540,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
struct ieee80211_regdomain *regd;
char mcc[3];
- if (mvm->cfg->nvm_type == IWL_NVM_EXT) {
+ if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT) {
tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
nvm_lar = mvm->nvm_data->lar_enabled;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 76603ef02704..a2dc5c3b0596 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -92,11 +92,11 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
radio_cfg_step, radio_cfg_dash);
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return;
/* SKU control */
- reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev);
+ reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->info.hw_rev);
/* radio configuration */
reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
@@ -114,7 +114,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
* unrelated errors. Need to further investigate this, but for now
* we'll separate cases.
*/
- if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)
reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
@@ -135,7 +135,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
* (PCIe power is lost before PERST# is asserted), causing ME FW
* to lose ownership and not being able to obtain it back.
*/
- if (!mvm->trans->cfg->apmg_not_supported)
+ if (!mvm->trans->mac_cfg->base->apmg_not_supported)
iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
@@ -777,7 +777,8 @@ static const struct iwl_hcmd_names iwl_mvm_bt_coex_names[] = {
HCMD_NAME(PROFILE_NOTIF),
};
-static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
+VISIBLE_IF_IWLWIFI_KUNIT
+const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
@@ -793,6 +794,11 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[DEBUG_GROUP] = HCMD_ARR(iwl_mvm_debug_names),
[STATISTICS_GROUP] = HCMD_ARR(iwl_mvm_statistics_names),
};
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_groups);
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+const unsigned int iwl_mvm_groups_size = ARRAY_SIZE(iwl_mvm_groups);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_groups_size);
+#endif
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
@@ -1272,13 +1278,12 @@ static void iwl_mvm_trig_link_selection(struct wiphy *wiphy,
}
static struct iwl_op_mode *
-iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
{
struct ieee80211_hw *hw;
struct iwl_op_mode *op_mode;
struct iwl_mvm *mvm;
- struct iwl_trans_config trans_cfg = {};
static const u8 no_reclaim_cmds[] = {
TX_CMD,
};
@@ -1286,6 +1291,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
size_t scan_size;
u32 min_backoff;
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
+ int ratecheck;
int err;
/*
@@ -1306,18 +1312,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (!hw)
return ERR_PTR(-ENOMEM);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
max_agg = 512;
else
max_agg = IEEE80211_MAX_AMPDU_BUF_HE;
hw->max_rx_aggregation_subframes = max_agg;
- if (cfg->max_tx_agg_size)
- hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
- else
- hw->max_tx_aggregation_subframes = max_agg;
-
op_mode = hw->priv;
mvm = IWL_OP_MODE_GET_MVM(op_mode);
@@ -1337,19 +1338,58 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->init_status = 0;
+ /* start with v1 rates */
+ mvm->fw_rates_ver = 1;
+
+ /* check for rates version 2 */
+ ratecheck =
+ (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) >= 8) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
+ TLC_MNG_UPDATE_NOTIF, 0) >= 3) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ REPLY_RX_MPDU_CMD, 0) >= 4) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) >= 6);
+ if (ratecheck != 0 && ratecheck != 4) {
+ IWL_ERR(mvm, "Firmware has inconsistent rates\n");
+ err = -EINVAL;
+ goto out_free;
+ }
+ if (ratecheck == 4)
+ mvm->fw_rates_ver = 2;
+
+ /* check for rates version 3 */
+ ratecheck =
+ (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) >= 11) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
+ TLC_MNG_UPDATE_NOTIF, 0) >= 4) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ REPLY_RX_MPDU_CMD, 0) >= 6) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
+ RX_NO_DATA_NOTIF, 0) >= 4) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) >= 9);
+ if (ratecheck != 0 && ratecheck != 5) {
+ IWL_ERR(mvm, "Firmware has inconsistent rates\n");
+ err = -EINVAL;
+ goto out_free;
+ }
+ if (ratecheck == 5)
+ mvm->fw_rates_ver = 3;
+
+ trans->conf.rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+
if (iwl_mvm_has_new_rx_api(mvm)) {
op_mode->ops = &iwl_mvm_ops_mq;
- trans->rx_mpdu_cmd_hdr_size =
- (trans->trans_cfg->device_family >=
+ trans->conf.rx_mpdu_cmd_hdr_size =
+ (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_rx_mpdu_desc) :
IWL_RX_DESC_SIZE_V1;
} else {
op_mode->ops = &iwl_mvm_ops;
- trans->rx_mpdu_cmd_hdr_size =
+ trans->conf.rx_mpdu_cmd_hdr_size =
sizeof(struct iwl_rx_mpdu_res_start);
- if (WARN_ON(trans->num_rx_queues > 1)) {
+ if (WARN_ON(trans->info.num_rxqs > 1)) {
err = -EINVAL;
goto out_free;
}
@@ -1437,53 +1477,50 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
* Populate the state variables that the transport layer needs
* to know about.
*/
- trans_cfg.op_mode = op_mode;
- trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
- trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+ BUILD_BUG_ON(sizeof(no_reclaim_cmds) >
+ sizeof(trans->conf.no_reclaim_cmds));
+ memcpy(trans->conf.no_reclaim_cmds, no_reclaim_cmds,
+ sizeof(no_reclaim_cmds));
+ trans->conf.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
- trans_cfg.rx_buf_size = iwl_amsdu_size_to_rxb_size();
+ trans->conf.rx_buf_size = iwl_amsdu_size_to_rxb_size();
- trans->wide_cmd_header = true;
- trans_cfg.bc_table_dword =
- mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210;
+ trans->conf.wide_cmd_header = true;
- trans_cfg.command_groups = iwl_mvm_groups;
- trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
+ trans->conf.command_groups = iwl_mvm_groups;
+ trans->conf.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
- trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
- trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
- trans_cfg.scd_set_active = true;
+ trans->conf.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
+ trans->conf.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
+ trans->conf.scd_set_active = true;
- trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
- driver_data[2]);
+ trans->conf.cb_data_offs = offsetof(struct ieee80211_tx_info,
+ driver_data[2]);
snprintf(mvm->hw->wiphy->fw_version,
sizeof(mvm->hw->wiphy->fw_version),
"%.31s", fw->fw_version);
- trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);
+ trans->conf.fw_reset_handshake =
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);
- trans_cfg.queue_alloc_cmd_ver =
+ trans->conf.queue_alloc_cmd_ver =
iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(DATA_PATH_GROUP,
SCD_QUEUE_CONFIG_CMD),
0);
mvm->sta_remove_requires_queue_remove =
- trans_cfg.queue_alloc_cmd_ver > 0;
+ trans->conf.queue_alloc_cmd_ver > 0;
mvm->mld_api_is_used = iwl_mvm_has_mld_api(mvm->fw);
/* Configure transport layer */
- iwl_trans_configure(mvm->trans, &trans_cfg);
+ iwl_trans_op_mode_enter(mvm->trans, op_mode);
- trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv;
trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg;
- trans->iml = mvm->fw->iml;
- trans->iml_len = mvm->fw->iml_len;
-
/* set up notification wait support */
iwl_notification_wait_init(&mvm->notif_wait);
@@ -2100,7 +2137,7 @@ static bool iwl_mvm_sw_reset(struct iwl_op_mode *op_mode,
mvm->fwrt.trans->dbg.restart_required = false;
ieee80211_restart_hw(mvm->hw);
return true;
- } else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
+ } else if (mvm->trans->mac_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
ieee80211_restart_hw(mvm->hw);
return true;
}
@@ -2125,7 +2162,6 @@ static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
mutex_lock(&mvm->mutex);
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
iwl_mvm_stop_device(mvm);
mvm->fast_resume = false;
mutex_unlock(&mvm->mutex);
@@ -2165,7 +2201,7 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
- if (unlikely(queue >= mvm->trans->num_rx_queues))
+ if (unlikely(queue >= mvm->trans->info.num_rxqs))
return;
if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index a386b315e52f..0057fddf88f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -376,6 +376,9 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
if (!vif->cfg.ps || !mvmvif->pm_enabled)
return;
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, MAC_PM_POWER_TABLE, 0) >= 2)
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_ENABLE_SMPS_MSK);
+
if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
(!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS) ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
index aad2614af9ad..798a7e4bea83 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018, 2021-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2021-2022, 2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -86,45 +86,6 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
}
}
-static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
- struct iwl_time_quota_cmd *cmd)
-{
-#ifdef CONFIG_NL80211_TESTMODE
- struct iwl_mvm_vif *mvmvif;
- int i, phy_id = -1, beacon_int = 0;
-
- if (!mvm->noa_duration || !mvm->noa_vif)
- return;
-
- mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif);
- if (!mvmvif->ap_ibss_active)
- return;
-
- phy_id = mvmvif->deflink.phy_ctxt->id;
- beacon_int = mvm->noa_vif->bss_conf.beacon_int;
-
- for (i = 0; i < MAX_BINDINGS; i++) {
- struct iwl_time_quota_data *data =
- iwl_mvm_quota_cmd_get_quota(mvm, cmd,
- i);
- u32 id_n_c = le32_to_cpu(data->id_and_color);
- u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
- u32 quota = le32_to_cpu(data->quota);
-
- if (id != phy_id)
- continue;
-
- quota *= (beacon_int - mvm->noa_duration);
- quota /= beacon_int;
-
- IWL_DEBUG_QUOTA(mvm, "quota: adjust for NoA from %d to %d\n",
- le32_to_cpu(data->quota), quota);
-
- data->quota = cpu_to_le32(quota);
- }
-#endif
-}
-
int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
bool force_update,
struct ieee80211_vif *disabled_vif)
@@ -260,8 +221,6 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
}
}
- iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
-
/* check that we have non-zero quota for all valid bindings */
for (i = 0; i < MAX_BINDINGS; i++) {
qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index de5ac000272e..89ac4c6b3e54 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include "rs.h"
#include "fw-api.h"
@@ -72,7 +72,7 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
u16 flags = 0;
/* get STBC flags */
- if (mvm->cfg->ht_params->stbc &&
+ if (mvm->cfg->ht_params.stbc &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] &
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
@@ -83,7 +83,7 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
}
- if (mvm->cfg->ht_params->ldpc &&
+ if (mvm->cfg->ht_params.ldpc &&
((ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) ||
(vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
@@ -454,22 +454,11 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
char pretty_rate[100];
- if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
- TLC_MNG_UPDATE_NOTIF, 0) < 3) {
- rs_pretty_print_rate_v1(pretty_rate,
- sizeof(pretty_rate),
- le32_to_cpu(notif->rate));
- IWL_DEBUG_RATE(mvm,
- "Got rate in old format. Rate: %s. Converting.\n",
- pretty_rate);
- lq_sta->last_rate_n_flags =
- iwl_new_rate_from_v1(le32_to_cpu(notif->rate));
- } else {
- lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
- }
+ lq_sta->last_rate_n_flags =
+ iwl_mvm_v3_rate_from_fw(notif->rate, mvm->fw_rates_ver);
rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate),
lq_sta->last_rate_n_flags);
- IWL_DEBUG_RATE(mvm, "new rate: %s\n", pretty_rate);
+ IWL_DEBUG_RATE(mvm, "rate: %s\n", pretty_rate);
}
if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvm_link_sta->orig_amsdu_len) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 068c58e9c1eb..5802ed80a9ca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2,6 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2005 - 2014, 2018 - 2023 Intel Corporation. All rights reserved.
+ * Copyright(c) 2025 Intel Corporation
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*****************************************************************************/
@@ -895,7 +896,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
WARN_ON_ONCE(1);
}
} else if (ucode_rate & RATE_MCS_VHT_MSK_V1) {
- nss = FIELD_GET(RATE_MCS_NSS_MSK, ucode_rate) + 1;
+ nss = FIELD_GET(RATE_VHT_MCS_NSS_MSK, ucode_rate) + 1;
if (nss == 1) {
rate->type = LQ_VHT_SISO;
@@ -909,7 +910,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
WARN_ON_ONCE(1);
}
} else if (ucode_rate & RATE_MCS_HE_MSK_V1) {
- nss = FIELD_GET(RATE_MCS_NSS_MSK, ucode_rate) + 1;
+ nss = FIELD_GET(RATE_VHT_MCS_NSS_MSK, ucode_rate) + 1;
if (nss == 1) {
rate->type = LQ_HE_SISO;
@@ -2696,8 +2697,10 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
lq_sta = mvm_sta;
spin_lock_bh(&lq_sta->pers.lock);
- iwl_mvm_hwrate_to_tx_rate_v1(lq_sta->last_rate_n_flags,
- info->band, &info->control.rates[0]);
+ iwl_mvm_hwrate_to_tx_rate(iwl_mvm_v3_rate_from_fw(
+ cpu_to_le32(lq_sta->last_rate_n_flags),
+ 1),
+ info->band, &info->control.rates[0]);
info->control.rates[0].count = 1;
/* Report the optimal rate based on rssi and STA caps if we haven't
@@ -2707,8 +2710,12 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
optimal_rate = rs_get_optimal_rate(mvm, lq_sta);
last_ucode_rate = ucode_rate_from_rs_rate(mvm,
optimal_rate);
- iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band,
- &txrc->reported_rate);
+ last_ucode_rate =
+ iwl_mvm_v3_rate_from_fw(cpu_to_le32(last_ucode_rate),
+ 1);
+ iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band,
+ &txrc->reported_rate);
+ txrc->reported_rate.count = 1;
}
spin_unlock_bh(&lq_sta->pers.lock);
}
@@ -2813,11 +2820,11 @@ static void rs_ht_init(struct iwl_mvm *mvm,
lq_sta->active_mimo2_rate &= ~((u16)0x2);
lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
- if (mvm->cfg->ht_params->ldpc &&
+ if (mvm->cfg->ht_params.ldpc &&
(ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING))
lq_sta->ldpc = true;
- if (mvm->cfg->ht_params->stbc &&
+ if (mvm->cfg->ht_params.stbc &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
(ht_cap->cap & IEEE80211_HT_CAP_RX_STBC))
lq_sta->stbc_capable = true;
@@ -2832,11 +2839,11 @@ static void rs_vht_init(struct iwl_mvm *mvm,
{
rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
- if (mvm->cfg->ht_params->ldpc &&
+ if (mvm->cfg->ht_params.ldpc &&
(vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))
lq_sta->ldpc = true;
- if (mvm->cfg->ht_params->stbc &&
+ if (mvm->cfg->ht_params.stbc &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
(vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
lq_sta->stbc_capable = true;
@@ -2887,10 +2894,10 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
if (rate & RATE_MCS_HT_MSK_V1) {
mvm->drv_rx_stats.ht_frames++;
- nss = ((rate & RATE_HT_MCS_NSS_MSK_V1) >> RATE_HT_MCS_NSS_POS_V1) + 1;
+ nss = FIELD_GET(RATE_HT_MCS_MIMO2_MSK, rate) + 1;
} else if (rate & RATE_MCS_VHT_MSK_V1) {
mvm->drv_rx_stats.vht_frames++;
- nss = FIELD_GET(RATE_MCS_NSS_MSK, rate) + 1;
+ nss = FIELD_GET(RATE_VHT_MCS_NSS_MSK, rate) + 1;
} else {
mvm->drv_rx_stats.legacy_frames++;
}
@@ -3304,7 +3311,7 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
if (num_of_ant(ant) == 1)
lq_cmd->single_stream_ant_msk = ant;
- if (!mvm->trans->trans_cfg->gen2)
+ if (!mvm->trans->mac_cfg->gen2)
lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
else
lq_cmd->agg_frame_cnt_limit =
@@ -3673,16 +3680,15 @@ int rs_pretty_print_rate_v1(char *buf, int bufsz, const u32 rate)
if (rate & RATE_MCS_VHT_MSK_V1) {
type = "VHT";
mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
- nss = FIELD_GET(RATE_MCS_NSS_MSK, rate) + 1;
+ nss = FIELD_GET(RATE_VHT_MCS_NSS_MSK, rate) + 1;
} else if (rate & RATE_MCS_HT_MSK_V1) {
type = "HT";
mcs = rate & RATE_HT_MCS_INDEX_MSK_V1;
- nss = ((rate & RATE_HT_MCS_NSS_MSK_V1)
- >> RATE_HT_MCS_NSS_POS_V1) + 1;
+ nss = FIELD_GET(RATE_HT_MCS_MIMO2_MSK, rate) + 1;
} else if (rate & RATE_MCS_HE_MSK_V1) {
type = "HE";
mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
- nss = FIELD_GET(RATE_MCS_NSS_MSK, rate) + 1;
+ nss = FIELD_GET(RATE_VHT_MCS_NSS_MSK, rate) + 1;
} else {
type = "Unknown"; /* shouldn't happen */
}
@@ -4172,3 +4178,167 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
else
return rs_drv_tx_protection(mvm, mvmsta, enable);
}
+
+static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
+{
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
+ int idx;
+ bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
+ int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
+ int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
+
+ for (idx = offset; idx < last; idx++)
+ if (iwl_fw_rate_idx_to_plcp(idx) == rate)
+ return idx - offset;
+ return IWL_RATE_INVALID;
+}
+
+u32 iwl_mvm_v3_rate_from_fw(__le32 rate, u8 rate_ver)
+{
+ u32 rate_v3 = 0, rate_v1;
+ u32 dup = 0;
+
+ if (rate_ver > 1)
+ return iwl_v3_rate_from_v2_v3(rate, rate_ver >= 3);
+
+ rate_v1 = le32_to_cpu(rate);
+ if (rate_v1 == 0)
+ return rate_v1;
+ /* convert rate */
+ if (rate_v1 & RATE_MCS_HT_MSK_V1) {
+ u32 nss;
+
+ rate_v3 |= RATE_MCS_MOD_TYPE_HT;
+ rate_v3 |=
+ rate_v1 & RATE_HT_MCS_RATE_CODE_MSK_V1;
+ nss = u32_get_bits(rate_v1, RATE_HT_MCS_MIMO2_MSK);
+ rate_v3 |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
+ } else if (rate_v1 & RATE_MCS_VHT_MSK_V1 ||
+ rate_v1 & RATE_MCS_HE_MSK_V1) {
+ u32 nss = u32_get_bits(rate_v1, RATE_VHT_MCS_NSS_MSK);
+
+ rate_v3 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK;
+
+ rate_v3 |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
+
+ if (rate_v1 & RATE_MCS_HE_MSK_V1) {
+ u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1;
+ u32 he_type = he_type_bits >> RATE_MCS_HE_TYPE_POS_V1;
+ u32 he_106t = (rate_v1 & RATE_MCS_HE_106T_MSK_V1) >>
+ RATE_MCS_HE_106T_POS_V1;
+ u32 he_gi_ltf = (rate_v1 & RATE_MCS_HE_GI_LTF_MSK_V1) >>
+ RATE_MCS_HE_GI_LTF_POS;
+
+ if ((he_type_bits == RATE_MCS_HE_TYPE_SU ||
+ he_type_bits == RATE_MCS_HE_TYPE_EXT_SU) &&
+ he_gi_ltf == RATE_MCS_HE_SU_4_LTF)
+ /* the new rate have an additional bit to
+ * represent the value 4 rather then using SGI
+ * bit for this purpose - as it was done in the
+ * old rate
+ */
+ he_gi_ltf += (rate_v1 & RATE_MCS_SGI_MSK_V1) >>
+ RATE_MCS_SGI_POS_V1;
+
+ rate_v3 |= he_gi_ltf << RATE_MCS_HE_GI_LTF_POS;
+ rate_v3 |= he_type << RATE_MCS_HE_TYPE_POS;
+ rate_v3 |= he_106t << RATE_MCS_HE_106T_POS;
+ rate_v3 |= rate_v1 & RATE_HE_DUAL_CARRIER_MODE_MSK;
+ rate_v3 |= RATE_MCS_MOD_TYPE_HE;
+ } else {
+ rate_v3 |= RATE_MCS_MOD_TYPE_VHT;
+ }
+ /* if legacy format */
+ } else {
+ u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
+
+ if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID))
+ legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ?
+ IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE;
+
+ rate_v3 |= legacy_rate;
+ if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
+ rate_v3 |= RATE_MCS_MOD_TYPE_LEGACY_OFDM;
+ }
+
+ /* convert flags */
+ if (rate_v1 & RATE_MCS_LDPC_MSK_V1)
+ rate_v3 |= RATE_MCS_LDPC_MSK;
+ rate_v3 |= (rate_v1 & RATE_MCS_CHAN_WIDTH_MSK_V1) |
+ (rate_v1 & RATE_MCS_ANT_AB_MSK) |
+ (rate_v1 & RATE_MCS_STBC_MSK) |
+ (rate_v1 & RATE_MCS_BF_MSK);
+
+ dup = (rate_v1 & RATE_MCS_DUP_MSK_V1) >> RATE_MCS_DUP_POS_V1;
+ if (dup) {
+ rate_v3 |= RATE_MCS_DUP_MSK;
+ rate_v3 |= dup << RATE_MCS_CHAN_WIDTH_POS;
+ }
+
+ if ((!(rate_v1 & RATE_MCS_HE_MSK_V1)) &&
+ (rate_v1 & RATE_MCS_SGI_MSK_V1))
+ rate_v3 |= RATE_MCS_SGI_MSK;
+
+ return rate_v3;
+}
+
+__le32 iwl_mvm_v3_rate_to_fw(u32 rate, u8 rate_ver)
+{
+ u32 result = 0;
+ int rate_idx;
+
+ if (rate_ver > 1)
+ return iwl_v3_rate_to_v2_v3(rate, rate_ver > 2);
+
+ switch (rate & RATE_MCS_MOD_TYPE_MSK) {
+ case RATE_MCS_MOD_TYPE_CCK:
+ result = RATE_MCS_CCK_MSK_V1;
+ fallthrough;
+ case RATE_MCS_MOD_TYPE_LEGACY_OFDM:
+ rate_idx = u32_get_bits(rate, RATE_LEGACY_RATE_MSK);
+ if (!(result & RATE_MCS_CCK_MSK_V1))
+ rate_idx += IWL_FIRST_OFDM_RATE;
+ result |= u32_encode_bits(iwl_fw_rate_idx_to_plcp(rate_idx),
+ RATE_LEGACY_RATE_MSK_V1);
+ break;
+ case RATE_MCS_MOD_TYPE_HT:
+ result = RATE_MCS_HT_MSK_V1;
+ result |= u32_encode_bits(u32_get_bits(rate,
+ RATE_HT_MCS_CODE_MSK),
+ RATE_HT_MCS_RATE_CODE_MSK_V1);
+ result |= u32_encode_bits(u32_get_bits(rate,
+ RATE_MCS_NSS_MSK),
+ RATE_HT_MCS_MIMO2_MSK);
+ break;
+ case RATE_MCS_MOD_TYPE_VHT:
+ result = RATE_MCS_VHT_MSK_V1;
+ result |= u32_encode_bits(u32_get_bits(rate,
+ RATE_VHT_MCS_NSS_MSK),
+ RATE_MCS_CODE_MSK);
+ result |= u32_encode_bits(u32_get_bits(rate, RATE_MCS_NSS_MSK),
+ RATE_VHT_MCS_NSS_MSK);
+ break;
+ case RATE_MCS_MOD_TYPE_HE: /* not generated */
+ default:
+ WARN_ONCE(1, "bad modulation type %d\n",
+ u32_get_bits(rate, RATE_MCS_MOD_TYPE_MSK));
+ return 0;
+ }
+
+ if (rate & RATE_MCS_LDPC_MSK)
+ result |= RATE_MCS_LDPC_MSK_V1;
+ WARN_ON_ONCE(u32_get_bits(rate, RATE_MCS_CHAN_WIDTH_MSK) >
+ RATE_MCS_CHAN_WIDTH_160_VAL);
+ result |= (rate & RATE_MCS_CHAN_WIDTH_MSK_V1) |
+ (rate & RATE_MCS_ANT_AB_MSK) |
+ (rate & RATE_MCS_STBC_MSK) |
+ (rate & RATE_MCS_BF_MSK);
+
+ /* not handling DUP since we don't use it */
+ WARN_ON_ONCE(rate & RATE_MCS_DUP_MSK);
+
+ if (rate & RATE_MCS_SGI_MSK)
+ result |= RATE_MCS_SGI_MSK_V1;
+
+ return cpu_to_le32(result);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index ea81cb236d5c..69259ebb966b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -3,7 +3,7 @@
*
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright (C) 2003 - 2014, 2018 - 2024 Intel Corporation
+ * Copyright (C) 2003 - 2014, 2018 - 2025 Intel Corporation
*****************************************************************************/
#ifndef __rs_h__
@@ -424,6 +424,9 @@ void iwl_mvm_rate_control_unregister(void);
struct iwl_mvm_sta;
+u32 iwl_mvm_v3_rate_from_fw(__le32 rate, u8 rate_ver);
+__le32 iwl_mvm_v3_rate_to_fw(u32 rate, u8 rate_ver);
+
int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool enable);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 2dbef7b46355..8eb0aa448c85 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -490,8 +490,6 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
if (!(rate_n_flags & RATE_MCS_CCK_MSK_V1) &&
rate_n_flags & RATE_MCS_SGI_MSK_V1)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- if (rate_n_flags & RATE_HT_MCS_GF_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
if (rate_n_flags & RATE_MCS_LDPC_MSK_V1)
rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
if (rate_n_flags & RATE_MCS_HT_MSK_V1) {
@@ -1001,7 +999,7 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
sec_link = mvmvif->link[sec_link]->fw_link_id;
/* Sum up RX and TX MPDUs from the different queues/links */
- for (int q = 0; q < mvm->trans->num_rx_queues; q++) {
+ for (int q = 0; q < mvm->trans->info.num_rxqs; q++) {
spin_lock_bh(&mvmsta->mpdu_counters[q].lock);
/* The link IDs that doesn't exist will contain 0 */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 14ea89f931bb..077aadbf95db 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -171,7 +171,7 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
shdr->type != htons(ETH_P_PAE) &&
shdr->type != htons(ETH_P_TDLS))))
skb->ip_summed = CHECKSUM_NONE;
- else if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ else if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
/* mac80211 assumes full CSUM including SNAP header */
skb_postpush_rcsum(skb, shdr, sizeof(*shdr));
}
@@ -409,7 +409,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
return 0;
- if (mvm->trans->trans_cfg->gen2 &&
+ if (mvm->trans->mac_cfg->gen2 &&
!(status & RX_MPDU_RES_STATUS_MIC_OK))
stats->flag |= RX_FLAG_MMIC_ERROR;
@@ -426,7 +426,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (pkt_flags & FH_RSCSR_RADA_EN) {
stats->flag |= RX_FLAG_ICV_STRIPPED;
- if (mvm->trans->trans_cfg->gen2)
+ if (mvm->trans->mac_cfg->gen2)
stats->flag |= RX_FLAG_MMIC_STRIPPED;
}
@@ -462,7 +462,7 @@ static void iwl_mvm_rx_csum(struct iwl_mvm *mvm,
{
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
if (pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) {
u16 hwsum = be16_to_cpu(desc->v3.raw_xsum);
@@ -744,7 +744,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT;
- if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000)
+ if (mvm->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_9000)
return false;
/*
@@ -1944,7 +1944,7 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
}
/* must be before L-SIG data */
- if (format == RATE_MCS_HE_MSK)
+ if (format == RATE_MCS_MOD_TYPE_HE)
iwl_mvm_rx_he(mvm, skb, phy_data, queue);
iwl_mvm_decode_lsig(skb, phy_data);
@@ -1966,45 +1966,45 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
phy_data->energy_a, phy_data->energy_b);
/* using TLV format and must be after all fixed len fields */
- if (format == RATE_MCS_EHT_MSK)
+ if (format == RATE_MCS_MOD_TYPE_EHT)
iwl_mvm_rx_eht(mvm, skb, phy_data, queue);
if (unlikely(mvm->monitor_on))
iwl_mvm_add_rtap_sniffer_config(mvm, skb);
- is_sgi = format == RATE_MCS_HE_MSK ?
+ is_sgi = format == RATE_MCS_MOD_TYPE_HE ?
iwl_he_is_sgi(rate_n_flags) :
rate_n_flags & RATE_MCS_SGI_MSK;
- if (!(format == RATE_MCS_CCK_MSK) && is_sgi)
+ if (!(format == RATE_MCS_MOD_TYPE_CCK) && is_sgi)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_n_flags & RATE_MCS_LDPC_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
switch (format) {
- case RATE_MCS_VHT_MSK:
+ case RATE_MCS_MOD_TYPE_VHT:
rx_status->encoding = RX_ENC_VHT;
break;
- case RATE_MCS_HE_MSK:
+ case RATE_MCS_MOD_TYPE_HE:
rx_status->encoding = RX_ENC_HE;
rx_status->he_dcm =
!!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
break;
- case RATE_MCS_EHT_MSK:
+ case RATE_MCS_MOD_TYPE_EHT:
rx_status->encoding = RX_ENC_EHT;
break;
}
switch (format) {
- case RATE_MCS_HT_MSK:
+ case RATE_MCS_MOD_TYPE_HT:
rx_status->encoding = RX_ENC_HT;
rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
break;
- case RATE_MCS_VHT_MSK:
- case RATE_MCS_HE_MSK:
- case RATE_MCS_EHT_MSK:
+ case RATE_MCS_MOD_TYPE_VHT:
+ case RATE_MCS_MOD_TYPE_HE:
+ case RATE_MCS_MOD_TYPE_EHT:
rx_status->nss =
u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
@@ -2048,7 +2048,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
desc_size = sizeof(*desc);
else
desc_size = IWL_RX_DESC_SIZE_V1;
@@ -2058,8 +2058,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
return;
}
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- phy_data.rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ phy_data.rate_n_flags =
+ iwl_mvm_v3_rate_from_fw(desc->v3.rate_n_flags,
+ mvm->fw_rates_ver);
phy_data.channel = desc->v3.channel;
phy_data.gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
phy_data.energy_a = desc->v3.energy_a;
@@ -2072,7 +2074,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
phy_data.eht_d4 = desc->phy_eht_data4;
phy_data.d5 = desc->v3.phy_data5;
} else {
- phy_data.rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
+ phy_data.rate_n_flags =
+ iwl_mvm_v3_rate_from_fw(desc->v1.rate_n_flags,
+ mvm->fw_rates_ver);
phy_data.channel = desc->v1.channel;
phy_data.gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
phy_data.energy_a = desc->v1.energy_a;
@@ -2084,13 +2088,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
phy_data.d3 = desc->v1.phy_data3;
}
- if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
- REPLY_RX_MPDU_CMD, 0) < 4) {
- phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags);
- IWL_DEBUG_DROP(mvm, "Got old format rate, converting. New rate: 0x%x\n",
- phy_data.rate_n_flags);
- }
-
format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
len = le16_to_cpu(desc->mpdu_len);
@@ -2138,14 +2135,14 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
/* set the preamble flag if appropriate */
- if (format == RATE_MCS_CCK_MSK &&
+ if (format == RATE_MCS_MOD_TYPE_CCK &&
phy_data.phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (likely(!(phy_data.phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
u64 tsf_on_air_rise;
- if (mvm->trans->trans_cfg->device_family >=
+ if (mvm->trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210)
tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
else
@@ -2157,7 +2154,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
if (iwl_mvm_is_band_in_rx_supported(mvm)) {
- u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx);
+ u8 band = u8_get_bits(desc->mac_phy_band,
+ IWL_RX_MPDU_MAC_PHY_BAND_BAND_MASK);
rx_status->band = iwl_mvm_nl80211_band_from_phy(band);
} else {
@@ -2303,7 +2301,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
- if (mvm->trans->trans_cfg->device_family ==
+ if (mvm->trans->mac_cfg->device_family ==
IWL_DEVICE_FAMILY_9000) {
iwl_mvm_flip_address(hdr->addr3);
@@ -2349,7 +2347,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc) &&
likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr2)) &&
likely(!iwl_mvm_mei_filter_scan(mvm, skb))) {
- if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
+ if (mvm->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
(desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
!(desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
rx_status->flag |= RX_FLAG_AMSDU_MORE;
@@ -2383,7 +2381,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
phy_data.d1 = desc->phy_info[1];
phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time);
- phy_data.rate_n_flags = le32_to_cpu(desc->rate);
phy_data.energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK);
phy_data.energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK);
phy_data.channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK);
@@ -2391,14 +2388,8 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
phy_data.rx_vec[0] = desc->rx_vec[0];
phy_data.rx_vec[1] = desc->rx_vec[1];
- if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
- RX_NO_DATA_NOTIF, 0) < 2) {
- IWL_DEBUG_DROP(mvm, "Got an old rate format. Old rate: 0x%x\n",
- phy_data.rate_n_flags);
- phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags);
- IWL_DEBUG_DROP(mvm, " Rate after conversion to the new format: 0x%x\n",
- phy_data.rate_n_flags);
- }
+ phy_data.rate_n_flags = iwl_mvm_v3_rate_from_fw(desc->rate,
+ mvm->fw_rates_ver);
format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
@@ -2411,7 +2402,7 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
phy_data.rx_vec[2] = desc->rx_vec[2];
phy_data.rx_vec[3] = desc->rx_vec[3];
} else {
- if (format == RATE_MCS_EHT_MSK)
+ if (format == RATE_MCS_MOD_TYPE_EHT)
/* no support for EHT before version 3 API */
return;
}
@@ -2472,17 +2463,17 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
* may be up to 8 spatial streams.
*/
switch (format) {
- case RATE_MCS_VHT_MSK:
+ case RATE_MCS_MOD_TYPE_VHT:
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
break;
- case RATE_MCS_HE_MSK:
+ case RATE_MCS_MOD_TYPE_HE:
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
break;
- case RATE_MCS_EHT_MSK:
+ case RATE_MCS_MOD_TYPE_EHT:
rx_status->nss =
le32_get_bits(desc->rx_vec[2],
RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 78fd7faaed97..5f6797598998 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2015, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2015, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -253,7 +253,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
{
struct iwl_mvm_baid_data *data =
- from_timer(data, t, session_timer);
+ timer_container_of(data, t, session_timer);
struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
struct iwl_mvm_baid_data *ba_data;
struct ieee80211_sta *sta;
@@ -791,10 +791,10 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
lockdep_assert_held(&mvm->mutex);
- if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
+ if (WARN(maxq >= mvm->trans->mac_cfg->base->num_of_queues,
"max queue %d >= num_of_queues (%d)", maxq,
- mvm->trans->trans_cfg->base_params->num_of_queues))
- maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
+ mvm->trans->mac_cfg->base->num_of_queues))
+ maxq = mvm->trans->mac_cfg->base->num_of_queues - 1;
/* This should not be hit with new TX path */
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
@@ -852,7 +852,7 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
if (tid == IWL_MAX_TID_COUNT) {
tid = IWL_MGMT_TID;
size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
- mvm->trans->cfg->min_txq_size);
+ mvm->trans->mac_cfg->base->min_txq_size);
} else {
size = iwl_mvm_get_queue_size(sta);
}
@@ -1765,7 +1765,7 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm_sta->deflink.sta_id = sta_id;
rcu_assign_pointer(mvm_sta->link[0], &mvm_sta->deflink);
- if (!mvm->trans->trans_cfg->gen2)
+ if (!mvm->trans->mac_cfg->gen2)
mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
LINK_QUAL_AGG_FRAME_LIMIT_DEF;
else
@@ -1798,7 +1798,7 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (iwl_mvm_has_new_rx_api(mvm)) {
int q;
- dup_data = kcalloc(mvm->trans->num_rx_queues,
+ dup_data = kcalloc(mvm->trans->info.num_rxqs,
sizeof(*dup_data), GFP_KERNEL);
if (!dup_data)
return -ENOMEM;
@@ -1811,7 +1811,7 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* This thus allows receiving a packet with seqno 0 and the
* retry bit set as the very first packet on a new TID.
*/
- for (q = 0; q < mvm->trans->num_rx_queues; q++)
+ for (q = 0; q < mvm->trans->info.num_rxqs; q++)
memset(dup_data[q].last_seq, 0xff,
sizeof(dup_data[q].last_seq));
mvm_sta->dup_data = dup_data;
@@ -1839,11 +1839,11 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
!sta->tdls && ieee80211_vif_is_mld(vif)) {
mvm_sta->mpdu_counters =
- kcalloc(mvm->trans->num_rx_queues,
+ kcalloc(mvm->trans->info.num_rxqs,
sizeof(*mvm_sta->mpdu_counters),
GFP_KERNEL);
if (mvm_sta->mpdu_counters)
- for (int q = 0; q < mvm->trans->num_rx_queues; q++)
+ for (int q = 0; q < mvm->trans->info.num_rxqs; q++)
spin_lock_init(&mvm_sta->mpdu_counters[q].lock);
}
@@ -2189,7 +2189,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
u8 sta_id, u8 fifo)
{
unsigned int wdg_timeout =
- mvm->trans->trans_cfg->base_params->wd_timeout;
+ mvm->trans->mac_cfg->base->wd_timeout;
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = fifo,
.sta_id = sta_id,
@@ -2206,7 +2206,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
{
unsigned int wdg_timeout =
- mvm->trans->trans_cfg->base_params->wd_timeout;
+ mvm->trans->mac_cfg->base->wd_timeout;
WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
@@ -2717,7 +2717,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
- for (i = 0; i < mvm->trans->num_rx_queues; i++) {
+ for (i = 0; i < mvm->trans->info.num_rxqs; i++) {
int j;
struct iwl_mvm_reorder_buffer *reorder_buf =
&data->reorder_buf[i];
@@ -2750,7 +2750,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
{
int i;
- for (i = 0; i < mvm->trans->num_rx_queues; i++) {
+ for (i = 0; i < mvm->trans->info.num_rxqs; i++) {
struct iwl_mvm_reorder_buffer *reorder_buf =
&data->reorder_buf[i];
struct iwl_mvm_reorder_buf_entry *entries =
@@ -2925,7 +2925,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* before starting the BA session in the firmware
*/
baid_data = kzalloc(sizeof(*baid_data) +
- mvm->trans->num_rx_queues *
+ mvm->trans->info.num_rxqs *
reorder_buf_size,
GFP_KERNEL);
if (!baid_data)
@@ -3177,7 +3177,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* to align the wrap around of ssn so we compare relevant values.
*/
normalized_ssn = tid_data->ssn;
- if (mvm->trans->trans_cfg->gen2)
+ if (mvm->trans->mac_cfg->gen2)
normalized_ssn &= 0xff;
if (normalized_ssn == tid_data->next_reclaimed) {
@@ -4305,7 +4305,7 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
* In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
* to align the wrap around of ssn so we compare relevant values.
*/
- if (mvm->trans->trans_cfg->gen2)
+ if (mvm->trans->mac_cfg->gen2)
sn &= 0xff;
return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 19c905b641e2..6b183f5e9bbc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -225,8 +225,6 @@ struct iwl_mvm_vif;
* @IWL_AGG_ON: aggregation session is up
* @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
* HW queue to be empty from packets for this RA /TID.
- * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
- * HW queue to be empty from packets for this RA /TID.
*/
enum iwl_mvm_agg_state {
IWL_AGG_OFF = 0,
@@ -234,7 +232,6 @@ enum iwl_mvm_agg_state {
IWL_AGG_STARTING,
IWL_AGG_ON,
IWL_EMPTYING_HW_QUEUE_ADDBA,
- IWL_EMPTYING_HW_QUEUE_DELBA,
};
/**
@@ -262,7 +259,7 @@ struct iwl_mvm_tid_data {
u16 seq_number;
u16 next_reclaimed;
/* The rest is Tx AGG related */
- u32 rate_n_flags;
+ __le32 rate_n_flags;
u8 lq_color;
bool amsdu_in_ampdu_allowed;
enum iwl_mvm_agg_state state;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
index 6bd56a28cffd..895d53f223e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
@@ -1,3 +1,3 @@
-iwlmvm-tests-y += module.o links.o scan.o
+iwlmvm-tests-y += module.o links.o scan.o hcmd.o
obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmvm-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c
new file mode 100644
index 000000000000..1fee0320c756
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include <kunit/test.h>
+
+#include <iwl-trans.h>
+#include "../mvm.h"
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static void test_hcmd_names_sorted(struct kunit *test)
+{
+ for (int i = 0; i < iwl_mvm_groups_size; i++) {
+ const struct iwl_hcmd_arr *arr = &iwl_mvm_groups[i];
+
+ if (!arr->arr)
+ continue;
+
+ for (int j = 0; j < arr->size - 1; j++)
+ KUNIT_EXPECT_LE(test, arr->arr[j].cmd_id,
+ arr->arr[j + 1].cmd_id);
+ }
+}
+
+static struct kunit_case hcmd_names_cases[] = {
+ KUNIT_CASE(test_hcmd_names_sorted),
+ {},
+};
+
+static struct kunit_suite hcmd_names = {
+ .name = "iwlmvm-hcmd-names",
+ .test_cases = hcmd_names_cases,
+};
+
+kunit_test_suite(hcmd_names);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 1a30bb1ff8ca..478408f802d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -771,15 +771,17 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity)
{
- struct iwl_roc_req_v5 roc_cmd = {
+ struct iwl_roc_req roc_cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
.activity = cpu_to_le32(activity),
};
+ u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+ u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(roc_cmd);
int ret;
lockdep_assert_held(&mvm->mutex);
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0,
- sizeof(roc_cmd), &roc_cmd);
+ cmd_len, &roc_cmd);
if (ret)
IWL_ERR(mvm, "Couldn't send the ROC_CMD: %d\n", ret);
}
@@ -1102,11 +1104,13 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
{
int res;
u32 duration_tu, delay;
- struct iwl_roc_req_v5 roc_req = {
+ struct iwl_roc_req roc_req = {
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
.activity = cpu_to_le32(activity),
.sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
};
+ u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+ u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(roc_req);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
lockdep_assert_held(&mvm->mutex);
@@ -1136,7 +1140,7 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
memcpy(roc_req.node_addr, vif->addr, ETH_ALEN);
res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
- 0, sizeof(roc_req), &roc_req);
+ 0, cmd_len, &roc_req);
if (!res)
mvmvif->roc_activity = activity;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index c851290e75a2..53bab21ebae2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2022, 2024-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -8,6 +8,9 @@
#include "mvm.h"
+#define IWL_MVM_NUM_CTDP_STEPS 20
+#define IWL_MVM_MIN_CTDP_BUDGET_MW 150
+
#define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ
void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
@@ -479,43 +482,28 @@ static const struct iwl_tt_params iwl_mvm_default_tt_params = {
.support_tx_backoff = true,
};
-/* budget in mWatt */
-static const u32 iwl_mvm_cdev_budgets[] = {
- 2400, /* cooling state 0 */
- 2000, /* cooling state 1 */
- 1800, /* cooling state 2 */
- 1600, /* cooling state 3 */
- 1400, /* cooling state 4 */
- 1200, /* cooling state 5 */
- 1000, /* cooling state 6 */
- 900, /* cooling state 7 */
- 800, /* cooling state 8 */
- 700, /* cooling state 9 */
- 650, /* cooling state 10 */
- 600, /* cooling state 11 */
- 550, /* cooling state 12 */
- 500, /* cooling state 13 */
- 450, /* cooling state 14 */
- 400, /* cooling state 15 */
- 350, /* cooling state 16 */
- 300, /* cooling state 17 */
- 250, /* cooling state 18 */
- 200, /* cooling state 19 */
- 150, /* cooling state 20 */
-};
-
int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
{
struct iwl_ctdp_cmd cmd = {
.operation = cpu_to_le32(op),
- .budget = cpu_to_le32(iwl_mvm_cdev_budgets[state]),
.window_size = 0,
};
+ u32 budget;
int ret;
u32 status;
lockdep_assert_held(&mvm->mutex);
+ /* Do a linear scale from IWL_MVM_MIN_CTDP_BUDGET_MW to the configured
+ * maximum in the predefined number of steps.
+ */
+ budget = ((mvm->thermal_throttle.power_budget_mw -
+ IWL_MVM_MIN_CTDP_BUDGET_MW) *
+ (IWL_MVM_NUM_CTDP_STEPS - 1 - state)) /
+ (IWL_MVM_NUM_CTDP_STEPS - 1) +
+ IWL_MVM_MIN_CTDP_BUDGET_MW;
+ cmd.budget = cpu_to_le32(budget);
+
status = 0;
ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
CTDP_CONFIG_CMD),
@@ -552,8 +540,8 @@ int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
#ifdef CONFIG_THERMAL
static int compare_temps(const void *a, const void *b)
{
- return ((s16)le16_to_cpu(*(__le16 *)a) -
- (s16)le16_to_cpu(*(__le16 *)b));
+ return ((s16)le16_to_cpu(*(const __le16 *)a) -
+ (s16)le16_to_cpu(*(const __le16 *)b));
}
struct iwl_trip_walk_data {
@@ -707,7 +695,7 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
- *state = ARRAY_SIZE(iwl_mvm_cdev_budgets) - 1;
+ *state = IWL_MVM_NUM_CTDP_STEPS - 1;
return 0;
}
@@ -733,7 +721,7 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
return -EIO;
- if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets))
+ if (new_state >= IWL_MVM_NUM_CTDP_STEPS)
return -EINVAL;
return iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
@@ -794,6 +782,47 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
}
#endif /* CONFIG_THERMAL */
+static u32 iwl_mvm_ctdp_get_max_budget(struct iwl_mvm *mvm)
+{
+ u64 bios_power_budget = 0;
+ u32 default_power_budget;
+
+ switch (CSR_HW_RFID_TYPE(mvm->trans->info.hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_JF2:
+ case IWL_CFG_RF_TYPE_JF1:
+ default_power_budget = 2000;
+ break;
+ case IWL_CFG_RF_TYPE_HR2:
+ case IWL_CFG_RF_TYPE_HR1:
+ default_power_budget = 2400;
+ break;
+ case IWL_CFG_RF_TYPE_GF:
+ /* dual-radio devices have a higher budget */
+ if (CSR_HW_RFID_IS_CDB(mvm->trans->info.hw_rf_id))
+ default_power_budget = 5200;
+ else
+ default_power_budget = 2880;
+ break;
+ case IWL_CFG_RF_TYPE_FM:
+ default_power_budget = 3450;
+ break;
+ default:
+ default_power_budget = 5550;
+ break;
+ }
+
+ iwl_bios_get_pwr_limit(&mvm->fwrt, &bios_power_budget);
+
+ /* 32bit in UEFI, 16bit in ACPI; use BIOS value if it is in range */
+ if (bios_power_budget &&
+ bios_power_budget != 0xffff && bios_power_budget != 0xffffffff &&
+ bios_power_budget >= IWL_MVM_MIN_CTDP_BUDGET_MW &&
+ bios_power_budget <= default_power_budget)
+ return (u32)bios_power_budget;
+
+ return default_power_budget;
+}
+
void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff)
{
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
@@ -805,6 +834,8 @@ void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff)
else
tt->params = iwl_mvm_default_tt_params;
+ tt->power_budget_mw = iwl_mvm_ctdp_get_max_budget(mvm);
+ IWL_DEBUG_TEMP(mvm, "cTDP power budget: %d mW\n", tt->power_budget_mw);
tt->throttle = false;
tt->dynamic_smps = false;
tt->min_backoff = min_backoff;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index f67afb66ef2b..ac2cf1b8ce23 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -148,7 +148,7 @@ out:
* Sets most of the Tx cmd's fields
*/
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd,
+ struct iwl_tx_cmd_v6 *tx_cmd,
struct ieee80211_tx_info *info, u8 sta_id)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -283,14 +283,10 @@ static u32 iwl_mvm_convert_rate_idx(struct iwl_mvm *mvm,
(rate_idx <= IWL_LAST_CCK_RATE);
/* Set CCK or OFDM flag */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) {
- if (!is_cck)
- rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
- else
- rate_flags |= RATE_MCS_CCK_MSK;
- } else if (is_cck) {
- rate_flags |= RATE_MCS_CCK_MSK_V1;
- }
+ if (!is_cck)
+ rate_flags |= RATE_MCS_MOD_TYPE_LEGACY_OFDM;
+ else
+ rate_flags |= RATE_MCS_MOD_TYPE_CCK;
return (u32)rate_plcp | rate_flags;
}
@@ -303,45 +299,35 @@ static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm,
struct ieee80211_tx_rate *rate = &info->control.rates[0];
u32 result;
- /*
- * we only care about legacy/HT/VHT so far, so we can
- * build in v1 and use iwl_new_rate_from_v1()
- */
-
if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
u8 mcs = ieee80211_rate_get_vht_mcs(rate);
u8 nss = ieee80211_rate_get_vht_nss(rate);
- result = RATE_MCS_VHT_MSK_V1;
+ result = RATE_MCS_MOD_TYPE_VHT;
result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK);
result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- result |= RATE_MCS_SGI_MSK_V1;
+ result |= RATE_MCS_SGI_MSK;
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ result |= RATE_MCS_CHAN_WIDTH_40;
else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ result |= RATE_MCS_CHAN_WIDTH_80;
else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
- result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1);
-
- if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6)
- result = iwl_new_rate_from_v1(result);
+ result |= RATE_MCS_CHAN_WIDTH_160;
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
- result = RATE_MCS_HT_MSK_V1;
- result |= u32_encode_bits(rate->idx,
- RATE_HT_MCS_RATE_CODE_MSK_V1 |
- RATE_HT_MCS_NSS_MSK_V1);
+ result = RATE_MCS_MOD_TYPE_HT;
+ result |= u32_encode_bits(rate->idx & 0x7,
+ RATE_HT_MCS_CODE_MSK);
+ result |= u32_encode_bits(rate->idx >> 3,
+ RATE_MCS_NSS_MSK);
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- result |= RATE_MCS_SGI_MSK_V1;
+ result |= RATE_MCS_SGI_MSK;
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ result |= RATE_MCS_CHAN_WIDTH_40;
if (info->flags & IEEE80211_TX_CTL_LDPC)
- result |= RATE_MCS_LDPC_MSK_V1;
+ result |= RATE_MCS_LDPC_MSK;
if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
result |= RATE_MCS_STBC_MSK;
-
- if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6)
- result = iwl_new_rate_from_v1(result);
} else {
int rate_idx = info->control.rates[0].idx;
@@ -391,21 +377,25 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
return iwl_mvm_convert_rate_idx(mvm, info, rate_idx);
}
-static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta, __le16 fc)
+static __le32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
{
+ u32 rate;
+
if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
- return iwl_mvm_get_inject_tx_rate(mvm, info, sta, fc);
+ rate = iwl_mvm_get_inject_tx_rate(mvm, info, sta, fc);
+ else
+ rate = iwl_mvm_get_tx_rate(mvm, info, sta, fc) |
+ iwl_mvm_get_tx_ant(mvm, info, sta, fc);
- return iwl_mvm_get_tx_rate(mvm, info, sta, fc) |
- iwl_mvm_get_tx_ant(mvm, info, sta, fc);
+ return iwl_mvm_v3_rate_to_fw(rate, mvm->fw_rates_ver);
}
/*
* Sets the fields in the Tx cmd that are rate related
*/
-void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
+void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd_v6 *tx_cmd,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc)
{
@@ -443,8 +433,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
}
/* Set the rate in the TX cmd */
- tx_cmd->rate_n_flags =
- cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc));
+ tx_cmd->rate_n_flags = iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc);
}
static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
@@ -469,7 +458,7 @@ static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
*/
static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
+ struct iwl_tx_cmd_v6 *tx_cmd,
struct sk_buff *skb_frag,
int hdrlen)
{
@@ -543,7 +532,7 @@ static bool iwl_mvm_use_host_rate(struct iwl_mvm *mvm,
* (since we don't necesarily know the link), but FW rate
* selection was fixed.
*/
- return mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ;
+ return mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ;
}
static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen,
@@ -567,7 +556,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_device_tx_cmd *dev_cmd;
- struct iwl_tx_cmd *tx_cmd;
+ struct iwl_tx_cmd_v6 *tx_cmd;
dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
@@ -577,7 +566,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
- u32 rate_n_flags = 0;
+ __le32 rate_n_flags = 0;
u16 flags = 0;
struct iwl_mvm_sta *mvmsta = sta ?
iwl_mvm_sta_from_mac80211(sta) : NULL;
@@ -609,9 +598,9 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
flags |= IWL_TX_FLAGS_HIGH_PRI;
}
- if (mvm->trans->trans_cfg->device_family >=
+ if (mvm->trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210) {
- struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
+ struct iwl_tx_cmd *cmd = (void *)dev_cmd->payload;
u32 offload_assist = iwl_mvm_tx_csum(mvm, skb,
info, amsdu);
@@ -624,9 +613,9 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
cmd->flags = cpu_to_le16(flags);
- cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+ cmd->rate_n_flags = rate_n_flags;
} else {
- struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
+ struct iwl_tx_cmd_v9 *cmd = (void *)dev_cmd->payload;
u16 offload_assist = iwl_mvm_tx_csum(mvm, skb,
info, amsdu);
@@ -639,12 +628,12 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
cmd->flags = cpu_to_le32(flags);
- cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+ cmd->rate_n_flags = rate_n_flags;
}
goto out;
}
- tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+ tx_cmd = (struct iwl_tx_cmd_v6 *)dev_cmd->payload;
if (info->control.hw_key)
iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
@@ -1023,7 +1012,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* 1 more for the potential data in the header
*/
if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
- mvm->trans->max_skb_frags)
+ mvm->trans->info.max_skb_frags)
num_subframes = 1;
if (num_subframes > 1)
@@ -1185,7 +1174,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
seq_number &= IEEE80211_SCTL_SEQ;
if (!iwl_mvm_has_new_tx_api(mvm)) {
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+ struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
@@ -1372,8 +1361,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
lockdep_assert_held(&mvmsta->lock);
- if ((tid_data->state == IWL_AGG_ON ||
- tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
+ if (tid_data->state == IWL_AGG_ON &&
iwl_mvm_tid_queued(mvm, tid_data) == 0) {
/*
* Now that this aggregation or DQA queue is empty tell
@@ -1388,7 +1376,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
* to align the wrap around of ssn so we compare relevant values.
*/
normalized_ssn = tid_data->ssn;
- if (mvm->trans->trans_cfg->gen2)
+ if (mvm->trans->mac_cfg->gen2)
normalized_ssn &= 0xff;
if (normalized_ssn != tid_data->next_reclaimed)
@@ -1402,15 +1390,6 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
tid_data->state = IWL_AGG_STARTING;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
-
- case IWL_EMPTYING_HW_QUEUE_DELBA:
- IWL_DEBUG_TX_QUEUES(mvm,
- "Can continue DELBA flow ssn = next_recl = %d\n",
- tid_data->next_reclaimed);
- tid_data->state = IWL_AGG_OFF;
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
-
default:
break;
}
@@ -1477,7 +1456,7 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
struct ieee80211_tx_rate *r)
{
u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
- u32 rate = format == RATE_MCS_HT_MSK ?
+ u32 rate = format == RATE_MCS_MOD_TYPE_HT ?
RATE_HT_MCS_INDEX(rate_n_flags) :
rate_n_flags & RATE_MCS_CODE_MSK;
@@ -1487,68 +1466,51 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
if (rate_n_flags & RATE_MCS_SGI_MSK)
r->flags |= IEEE80211_TX_RC_SHORT_GI;
- if (format == RATE_MCS_HT_MSK) {
+ switch (format) {
+ case RATE_MCS_MOD_TYPE_HT:
r->flags |= IEEE80211_TX_RC_MCS;
r->idx = rate;
- } else if (format == RATE_MCS_VHT_MSK) {
+ break;
+ case RATE_MCS_MOD_TYPE_VHT:
ieee80211_rate_set_vht(r, rate,
FIELD_GET(RATE_MCS_NSS_MSK,
rate_n_flags) + 1);
r->flags |= IEEE80211_TX_RC_VHT_MCS;
- } else if (format == RATE_MCS_HE_MSK) {
+ break;
+ case RATE_MCS_MOD_TYPE_HE:
+ case RATE_MCS_MOD_TYPE_EHT:
/* mac80211 cannot do this without ieee80211_tx_status_ext()
* but it only matters for radiotap */
r->idx = 0;
- } else {
+ break;
+ default:
r->idx = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
band);
}
}
-void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags,
- enum nl80211_band band,
- struct ieee80211_tx_rate *r)
-{
- if (rate_n_flags & RATE_HT_MCS_GF_MSK)
- r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
-
- r->flags |=
- iwl_mvm_get_hwrate_chan_width(rate_n_flags &
- RATE_MCS_CHAN_WIDTH_MSK_V1);
-
- if (rate_n_flags & RATE_MCS_SGI_MSK_V1)
- r->flags |= IEEE80211_TX_RC_SHORT_GI;
- if (rate_n_flags & RATE_MCS_HT_MSK_V1) {
- r->flags |= IEEE80211_TX_RC_MCS;
- r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1;
- } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) {
- ieee80211_rate_set_vht(
- r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
- FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags) + 1);
- r->flags |= IEEE80211_TX_RC_VHT_MCS;
- } else {
- r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
- band);
- }
-}
-
/*
* translate ucode response to mac80211 tx status control values
*/
-static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw,
- u32 rate_n_flags,
+static void iwl_mvm_hwrate_to_tx_status(struct iwl_mvm *mvm,
+ __le32 rate_n_flags,
struct ieee80211_tx_info *info)
{
struct ieee80211_tx_rate *r = &info->status.rates[0];
+ u32 rate;
- if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP,
- TX_CMD, 0) <= 6)
- rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
+ /*
+ * Technically this conversion is incorrect for BA status, however:
+ * - we only use the BA notif data for older firmware that have
+ * host rate scaling and don't use newer rate formats
+ * - the firmware API changed together for BA notif and TX CMD
+ * as well
+ */
+ rate = iwl_mvm_v3_rate_from_fw(rate_n_flags, mvm->fw_rates_ver);
info->status.antenna =
- ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS);
- iwl_mvm_hwrate_to_tx_rate(rate_n_flags,
- info->band, r);
+ ((rate & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS);
+ iwl_mvm_hwrate_to_tx_rate(rate, info->band, r);
}
static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
@@ -1613,7 +1575,7 @@ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
u32 val = le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
tx_resp->frame_count);
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return val & 0xFFFF;
return val & 0xFFF;
}
@@ -1700,9 +1662,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
info->status.rates[0].count = tx_resp->failure_frame + 1;
- iwl_mvm_hwrate_to_tx_status(mvm->fw,
- le32_to_cpu(tx_resp->initial_rate),
- info);
+ iwl_mvm_hwrate_to_tx_status(mvm, tx_resp->initial_rate, info);
/* Don't assign the converted initial_rate, because driver
* TLC uses this and doesn't support the new FW rate
@@ -1944,7 +1904,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
if (!WARN_ON_ONCE(!mvmsta)) {
mvmsta->tid_data[tid].rate_n_flags =
- le32_to_cpu(tx_resp->initial_rate);
+ tx_resp->initial_rate;
mvmsta->tid_data[tid].tx_time =
le16_to_cpu(tx_resp->wireless_media_time);
mvmsta->tid_data[tid].lq_color =
@@ -1969,7 +1929,7 @@ void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
int txq, int index,
- struct ieee80211_tx_info *tx_info, u32 rate,
+ struct ieee80211_tx_info *tx_info, __le32 rate,
bool is_flush)
{
struct sk_buff_head reclaimed_skbs;
@@ -2053,7 +2013,9 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
tx_info->status.status_driver_data[0] =
RS_DRV_DATA_PACK(tid_data->lq_color,
tx_info->status.status_driver_data[0]);
- tx_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
+ /* the value is only consumed for old FW that has v1 rates anyway */
+ tx_info->status.status_driver_data[1] =
+ (void *)(uintptr_t)le32_to_cpu(rate);
skb_queue_walk(&reclaimed_skbs, skb) {
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -2072,7 +2034,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
info->flags |= IEEE80211_TX_STAT_AMPDU;
memcpy(&info->status, &tx_info->status,
sizeof(tx_info->status));
- iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, info);
+ iwl_mvm_hwrate_to_tx_status(mvm, rate, info);
}
}
@@ -2095,7 +2057,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
goto out;
tx_info->band = chanctx_conf->def.chan->band;
- iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, tx_info);
+ iwl_mvm_hwrate_to_tx_status(mvm, rate, tx_info);
IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false);
@@ -2184,7 +2146,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
(int)(le16_to_cpu(ba_tfd->q_num)),
le16_to_cpu(ba_tfd->tfd_index),
&ba_info,
- le32_to_cpu(ba_res->tx_rate), false);
+ ba_res->tx_rate, false);
}
if (mvmsta) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index dd890dcd1505..62da0132f383 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -142,7 +142,7 @@ int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
bool is_LB = band == NL80211_BAND_2GHZ;
- if (format == RATE_MCS_LEGACY_OFDM_MSK)
+ if (format == RATE_MCS_MOD_TYPE_LEGACY_OFDM)
return is_LB ? rate + IWL_FIRST_OFDM_RATE :
rate;
@@ -169,15 +169,9 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
{
- if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
- /* In the new rate legacy rates are indexed:
- * 0 - 3 for CCK and 0 - 7 for OFDM.
- */
- return (rate_idx >= IWL_FIRST_OFDM_RATE ?
- rate_idx - IWL_FIRST_OFDM_RATE :
- rate_idx);
-
- return iwl_fw_rate_idx_to_plcp(rate_idx);
+ return (rate_idx >= IWL_FIRST_OFDM_RATE ?
+ rate_idx - IWL_FIRST_OFDM_RATE :
+ rate_idx);
}
u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
@@ -748,7 +742,7 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
unsigned int default_timeout =
- mvm->trans->trans_cfg->base_params->wd_timeout;
+ mvm->trans->mac_cfg->base->wd_timeout;
/*
* We can't know when the station is asleep or awake, so we
@@ -1187,9 +1181,9 @@ u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
{
u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
- mvm->trans->cfg->gp2_reg_addr)
- reg_addr = mvm->trans->cfg->gp2_reg_addr;
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
+ mvm->trans->mac_cfg->base->gp2_reg_addr)
+ reg_addr = mvm->trans->mac_cfg->base->gp2_reg_addr;
return iwl_read_prph(mvm->trans, reg_addr);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c
index 8aa7c455bdee..976fd1f58da4 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c
@@ -5,7 +5,7 @@
#include <linux/dmi.h>
#include "iwl-trans.h"
#include "iwl-fh.h"
-#include "iwl-context-info-gen3.h"
+#include "iwl-context-info-v2.h"
#include "internal.h"
#include "iwl-prph.h"
@@ -97,11 +97,12 @@ out:
*control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
}
-int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
- const struct fw_img *fw)
+int iwl_pcie_ctxt_info_v2_alloc(struct iwl_trans *trans,
+ const struct iwl_fw *fw,
+ const struct fw_img *img)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_context_info_gen3 *ctxt_info_gen3;
+ struct iwl_context_info_v2 *ctxt_info_v2;
struct iwl_prph_scratch *prph_scratch;
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
struct iwl_prph_info *prph_info;
@@ -109,9 +110,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
u32 control_flags_ext = 0;
int ret;
int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
- trans->cfg->min_txq_size);
+ trans->mac_cfg->base->min_txq_size);
- switch (trans_pcie->rx_buf_size) {
+ switch (trans->conf.rx_buf_size) {
case IWL_AMSDU_DEF:
return -EINVAL;
case IWL_AMSDU_2K:
@@ -131,13 +132,13 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
break;
}
- if (trans->dsbr_urm_fw_dependent)
+ if (trans->conf.dsbr_urm_fw_dependent)
control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_FW;
- if (trans->dsbr_urm_permanent)
+ if (trans->conf.dsbr_urm_permanent)
control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_PERM;
- if (trans->ext_32khz_clock_valid)
+ if (trans->conf.ext_32khz_clock_valid)
control_flags_ext |= IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID;
/* Allocate prph scratch */
@@ -151,16 +152,16 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
prph_sc_ctrl->version.version = 0;
prph_sc_ctrl->version.mac_id =
- cpu_to_le16((u16)trans->hw_rev);
+ cpu_to_le16((u16)trans->info.hw_rev);
prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;
- if (trans->trans_cfg->imr_enabled)
+ if (trans->mac_cfg->imr_enabled)
control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN;
- if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
+ if (CSR_HW_REV_TYPE(trans->info.hw_rev) == IWL_CFG_MAC_TYPE_GL &&
iwl_is_force_scu_active_approved()) {
control_flags |= IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE;
IWL_DEBUG_FW(trans,
@@ -168,6 +169,11 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE);
}
+ if (trans->do_top_reset) {
+ WARN_ON(trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC);
+ control_flags |= IWL_PRPH_SCRATCH_TOP_RESET;
+ }
+
/* initialize RX default queue */
prph_sc_ctrl->rbd_cfg.free_rbd_addr =
cpu_to_le64(trans_pcie->rxq->bd_dma);
@@ -178,15 +184,16 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
prph_sc_ctrl->control.control_flags_ext = cpu_to_le32(control_flags_ext);
/* initialize the Step equalizer data */
- prph_sc_ctrl->step_cfg.mbx_addr_0 = cpu_to_le32(trans->mbx_addr_0_step);
- prph_sc_ctrl->step_cfg.mbx_addr_1 = cpu_to_le32(trans->mbx_addr_1_step);
+ prph_sc_ctrl->step_cfg.mbx_addr_0 =
+ cpu_to_le32(trans->conf.mbx_addr_0_step);
+ prph_sc_ctrl->step_cfg.mbx_addr_1 =
+ cpu_to_le32(trans->conf.mbx_addr_1_step);
/* allocate ucode sections in dram and set addresses */
- ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
+ ret = iwl_pcie_init_fw_sec(trans, img, &prph_scratch->dram.common);
if (ret)
goto err_free_prph_scratch;
-
/* Allocate prph information
* currently we don't assign to the prph info anything, but it would get
* assigned later
@@ -206,42 +213,58 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
}
/* Allocate context info */
- ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
- sizeof(*ctxt_info_gen3),
- &trans_pcie->ctxt_info_dma_addr,
- GFP_KERNEL);
- if (!ctxt_info_gen3) {
+ ctxt_info_v2 = dma_alloc_coherent(trans->dev,
+ sizeof(*ctxt_info_v2),
+ &trans_pcie->ctxt_info_dma_addr,
+ GFP_KERNEL);
+ if (!ctxt_info_v2) {
ret = -ENOMEM;
goto err_free_prph_info;
}
- ctxt_info_gen3->prph_info_base_addr =
+ ctxt_info_v2->prph_info_base_addr =
cpu_to_le64(trans_pcie->prph_info_dma_addr);
- ctxt_info_gen3->prph_scratch_base_addr =
+ ctxt_info_v2->prph_scratch_base_addr =
cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
- ctxt_info_gen3->prph_scratch_size =
- cpu_to_le32(sizeof(*prph_scratch));
- ctxt_info_gen3->cr_head_idx_arr_base_addr =
+
+ /*
+ * This code assumes the FSEQ is last and we can make that
+ * optional; old devices _should_ be fine with a bigger size,
+ * but in simulation we check the size more precisely.
+ */
+ BUILD_BUG_ON(offsetofend(typeof(*prph_scratch), dram.common) +
+ sizeof(prph_scratch->dram.fseq_img) !=
+ sizeof(*prph_scratch));
+ if (control_flags_ext & IWL_PRPH_SCRATCH_EXT_EXT_FSEQ)
+ ctxt_info_v2->prph_scratch_size =
+ cpu_to_le32(sizeof(*prph_scratch));
+ else
+ ctxt_info_v2->prph_scratch_size =
+ cpu_to_le32(offsetofend(typeof(*prph_scratch),
+ dram.common));
+
+ ctxt_info_v2->cr_head_idx_arr_base_addr =
cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
- ctxt_info_gen3->tr_tail_idx_arr_base_addr =
+ ctxt_info_v2->tr_tail_idx_arr_base_addr =
cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2);
- ctxt_info_gen3->cr_tail_idx_arr_base_addr =
+ ctxt_info_v2->cr_tail_idx_arr_base_addr =
cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
- ctxt_info_gen3->mtr_base_addr =
- cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr);
- ctxt_info_gen3->mcr_base_addr =
+ ctxt_info_v2->mtr_base_addr =
+ cpu_to_le64(trans_pcie->txqs.txq[trans->conf.cmd_queue]->dma_addr);
+ ctxt_info_v2->mcr_base_addr =
cpu_to_le64(trans_pcie->rxq->used_bd_dma);
- ctxt_info_gen3->mtr_size =
+ ctxt_info_v2->mtr_size =
cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size));
- ctxt_info_gen3->mcr_size =
- cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds));
+ ctxt_info_v2->mcr_size =
+ cpu_to_le16(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)));
- trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
+ trans_pcie->ctxt_info_v2 = ctxt_info_v2;
trans_pcie->prph_info = prph_info;
trans_pcie->prph_scratch = prph_scratch;
/* Allocate IML */
- trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len,
+ trans_pcie->iml_len = fw->iml_len;
+ trans_pcie->iml = dma_alloc_coherent(trans->dev, fw->iml_len,
&trans_pcie->iml_dma_addr,
GFP_KERNEL);
if (!trans_pcie->iml) {
@@ -249,27 +272,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
goto err_free_ctxt_info;
}
- memcpy(trans_pcie->iml, trans->iml, trans->iml_len);
-
- iwl_enable_fw_load_int_ctx_info(trans);
-
- /* kick FW self load */
- iwl_write64(trans, CSR_CTXT_INFO_ADDR,
- trans_pcie->ctxt_info_dma_addr);
- iwl_write64(trans, CSR_IML_DATA_ADDR,
- trans_pcie->iml_dma_addr);
- iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
-
- iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
- CSR_AUTO_FUNC_BOOT_ENA);
+ memcpy(trans_pcie->iml, fw->iml, fw->iml_len);
return 0;
err_free_ctxt_info:
- dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
- trans_pcie->ctxt_info_gen3,
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_v2),
+ trans_pcie->ctxt_info_v2,
trans_pcie->ctxt_info_dma_addr);
- trans_pcie->ctxt_info_gen3 = NULL;
+ trans_pcie->ctxt_info_v2 = NULL;
err_free_prph_info:
dma_free_coherent(trans->dev, PAGE_SIZE, prph_info,
trans_pcie->prph_info_dma_addr);
@@ -283,14 +294,31 @@ err_free_prph_scratch:
}
-void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
+void iwl_pcie_ctxt_info_v2_kick(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_enable_fw_load_int_ctx_info(trans, trans->do_top_reset);
+
+ /* kick FW self load */
+ iwl_write64(trans, CSR_CTXT_INFO_ADDR, trans_pcie->ctxt_info_dma_addr);
+ iwl_write64(trans, CSR_IML_DATA_ADDR, trans_pcie->iml_dma_addr);
+ iwl_write32(trans, CSR_IML_SIZE_ADDR, trans_pcie->iml_len);
+
+ iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
+ CSR_AUTO_FUNC_BOOT_ENA);
+}
+
+void iwl_pcie_ctxt_info_v2_free(struct iwl_trans *trans, bool alive)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (trans_pcie->iml) {
- dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml,
+ dma_free_coherent(trans->dev, trans_pcie->iml_len,
+ trans_pcie->iml,
trans_pcie->iml_dma_addr);
trans_pcie->iml_dma_addr = 0;
+ trans_pcie->iml_len = 0;
trans_pcie->iml = NULL;
}
@@ -299,15 +327,15 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
if (alive)
return;
- if (!trans_pcie->ctxt_info_gen3)
+ if (!trans_pcie->ctxt_info_v2)
return;
- /* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */
- dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
- trans_pcie->ctxt_info_gen3,
+ /* ctxt_info_v2 and prph_scratch are still needed for PNVM load */
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_v2),
+ trans_pcie->ctxt_info_v2,
trans_pcie->ctxt_info_dma_addr);
trans_pcie->ctxt_info_dma_addr = 0;
- trans_pcie->ctxt_info_gen3 = NULL;
+ trans_pcie->ctxt_info_v2 = NULL;
dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
trans_pcie->prph_scratch,
@@ -322,9 +350,9 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
trans_pcie->prph_info = NULL;
}
-static int iwl_pcie_load_payloads_continuously(struct iwl_trans *trans,
- const struct iwl_pnvm_image *pnvm_data,
- struct iwl_dram_data *dram)
+static int iwl_pcie_load_payloads_contig(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *pnvm_data,
+ struct iwl_dram_data *dram)
{
u32 len, len0, len1;
@@ -411,9 +439,9 @@ static int iwl_pcie_load_payloads_segments
}
-int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
- const struct iwl_pnvm_image *pnvm_payloads,
- const struct iwl_ucode_capabilities *capa)
+int iwl_trans_pcie_ctx_info_v2_load_pnvm(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *pnvm_payloads,
+ const struct iwl_ucode_capabilities *capa)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
@@ -428,7 +456,7 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
return -EBUSY;
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return 0;
if (!pnvm_payloads->n_chunks) {
@@ -445,10 +473,8 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
trans->pnvm_loaded = true;
} else {
/* save only in one DRAM section */
- ret = iwl_pcie_load_payloads_continuously
- (trans,
- pnvm_payloads,
- &dram_regions->drams[0]);
+ ret = iwl_pcie_load_payloads_contig(trans, pnvm_payloads,
+ &dram_regions->drams[0]);
if (!ret) {
dram_regions->n_regions = 1;
trans->pnvm_loaded = true;
@@ -483,7 +509,7 @@ static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans)
cpu_to_le32(iwl_dram_regions_size(dram_regions));
}
-static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans)
+static void iwl_pcie_set_contig_pnvm(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
@@ -495,21 +521,21 @@ static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans)
cpu_to_le32(trans_pcie->pnvm_data.drams[0].size);
}
-void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa)
+void iwl_trans_pcie_ctx_info_v2_set_pnvm(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa)
{
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
iwl_pcie_set_pnvm_segments(trans);
else
- iwl_pcie_set_continuous_pnvm(trans);
+ iwl_pcie_set_contig_pnvm(trans);
}
-int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads,
- const struct iwl_ucode_capabilities *capa)
+int iwl_trans_pcie_ctx_info_v2_load_reduce_power(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
@@ -521,7 +547,7 @@ int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
if (trans->reduce_power_loaded)
return 0;
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return 0;
if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))
@@ -541,10 +567,8 @@ int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
trans->reduce_power_loaded = true;
} else {
/* save only in one DRAM section */
- ret = iwl_pcie_load_payloads_continuously
- (trans,
- payloads,
- &dram_regions->drams[0]);
+ ret = iwl_pcie_load_payloads_contig(trans, payloads,
+ &dram_regions->drams[0]);
if (!ret) {
dram_regions->n_regions = 1;
trans->reduce_power_loaded = true;
@@ -567,7 +591,7 @@ static void iwl_pcie_set_reduce_power_segments(struct iwl_trans *trans)
cpu_to_le32(iwl_dram_regions_size(dram_regions));
}
-static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans)
+static void iwl_pcie_set_contig_reduce_power(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
@@ -580,15 +604,15 @@ static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans)
}
void
-iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa)
+iwl_trans_pcie_ctx_info_v2_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa)
{
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
iwl_pcie_set_reduce_power_segments(trans);
else
- iwl_pcie_set_continuous_reduce_power(trans);
+ iwl_pcie_set_contig_reduce_power(trans);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 344e4d5a1c6e..4f2be0c1bd97 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -83,7 +83,7 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
const struct fw_img *fw,
- struct iwl_context_info_dram *ctxt_dram)
+ struct iwl_context_info_dram_nonfseq *ctxt_dram)
{
struct iwl_self_init_dram *dram = &trans->init_dram;
int i, ret, lmac_cnt, umac_cnt, paging_cnt;
@@ -161,12 +161,12 @@ int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
}
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
- const struct fw_img *fw)
+ const struct fw_img *img)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_context_info *ctxt_info;
struct iwl_context_info_rbd_cfg *rx_cfg;
- u32 control_flags = 0, rb_size;
+ u32 control_flags = 0, rb_size, cb_size;
dma_addr_t phys;
int ret;
@@ -180,11 +180,11 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
ctxt_info->version.version = 0;
ctxt_info->version.mac_id =
- cpu_to_le16((u16)trans->hw_rev);
+ cpu_to_le16((u16)trans->info.hw_rev);
/* size is in DWs */
ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
- switch (trans_pcie->rx_buf_size) {
+ switch (trans->conf.rx_buf_size) {
case IWL_AMSDU_2K:
rb_size = IWL_CTXT_INFO_RB_SIZE_2K;
break;
@@ -202,11 +202,12 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
}
- WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12);
+ cb_size = RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans));
+ if (WARN_ON(cb_size > 12))
+ cb_size = 12;
+
control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG;
- control_flags |=
- u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds),
- IWL_CTXT_INFO_RB_CB_SIZE);
+ control_flags |= u32_encode_bits(cb_size, IWL_CTXT_INFO_RB_CB_SIZE);
control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE);
ctxt_info->control.control_flags = cpu_to_le32(control_flags);
@@ -218,12 +219,12 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* initialize TX command queue */
ctxt_info->hcmd_cfg.cmd_queue_addr =
- cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr);
+ cpu_to_le64(trans_pcie->txqs.txq[trans->conf.cmd_queue]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
/* allocate ucode sections in dram and set addresses */
- ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
+ ret = iwl_pcie_init_fw_sec(trans, img, &ctxt_info->dram);
if (ret) {
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
ctxt_info, trans_pcie->ctxt_info_dma_addr);
@@ -232,7 +233,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
trans_pcie->ctxt_info = ctxt_info;
- iwl_enable_fw_load_int_ctx_info(trans);
+ iwl_enable_fw_load_int_ctx_info(trans, false);
/* Configure debug, if exists */
if (iwl_pcie_dbg_on(trans))
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 00056e76ea3d..0a9e0dbb58fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -17,15 +17,13 @@
#include "iwl-prph.h"
#include "internal.h"
-#define TRANS_CFG_MARKER BIT(0)
#define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg), \
struct _struct)
extern int _invalid_type;
-#define _TRANS_CFG_MARKER(cfg) \
- (__builtin_choose_expr(_IS_A(cfg, iwl_cfg_trans_params), \
- TRANS_CFG_MARKER, \
- __builtin_choose_expr(_IS_A(cfg, iwl_cfg), 0, _invalid_type)))
-#define _ASSIGN_CFG(cfg) (_TRANS_CFG_MARKER(cfg) + (kernel_ulong_t)&(cfg))
+#define _TRANS_CFG_CHECK(cfg) \
+ (__builtin_choose_expr(_IS_A(cfg, iwl_mac_cfg), \
+ 0, _invalid_type))
+#define _ASSIGN_CFG(cfg) (_TRANS_CFG_CHECK(cfg) + (kernel_ulong_t)&(cfg))
#define IWL_PCI_DEVICE(dev, subdev, cfg) \
.vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
@@ -35,515 +33,518 @@ extern int _invalid_type;
/* Hardware specific file defines the PCI IDs table for that hardware module */
VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
#if IS_ENABLED(CONFIG_IWLDVM)
- {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5000_mac_cfg)}, /* Half Mini Card */
/* 5300 Series WiFi */
- {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5000_mac_cfg)}, /* Half Mini Card */
/* 5350 Series WiFi/WiMax */
- {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5000_mac_cfg)}, /* Mini Card */
/* 5150 Series Wifi/WiMax */
- {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
-
- {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_mac_cfg)}, /* Half Mini Card */
+
+ {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_mac_cfg)}, /* Half Mini Card */
/* 6x00 Series */
- {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
- {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
- {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
- {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
- {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
- {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
- {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
- {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
- {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
- {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
- {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
- {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
- {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_mac_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_mac_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_mac_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_mac_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_mac_cfg)},
+ {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_mac_cfg)},
+ {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_mac_cfg)},
/* 6x05 Series */
- {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
- {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
- {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
- {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
+ {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_mac_cfg)},/* low 5GHz active */
+ {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_mac_cfg)},/* high 5GHz active */
/* 6x30 Series */
- {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)},
- {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)},
- {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)},
- {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_mac_cfg)},
/* 6x50 WiFi/WiMax Series */
- {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_mac_cfg)},
/* 6150 WiFi/WiMax Series */
- {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
- {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
- {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_mac_cfg)},
/* 1000 Series WiFi */
- {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)},
- {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)},
- {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_mac_cfg)},
/* 100 Series WiFi */
- {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
- {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
- {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
- {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)},
- {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
- {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)},
+ {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl1000_mac_cfg)},
/* 130 Series WiFi */
- {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)},
- {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)},
- {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5005, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5007, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0897, 0x5015, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0897, 0x5017, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5025, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5027, iwl1000_mac_cfg)},
/* 2x00 Series */
- {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_mac_cfg)},
/* 2x30 Series */
- {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_mac_cfg)},
/* 6x35 Series */
- {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
- {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
- {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
- {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6030_mac_cfg)},
/* 105 Series */
- {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_mac_cfg)},
/* 135 Series */
- {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_mac_cfg)},
#endif /* CONFIG_IWLDVM */
#if IS_ENABLED(CONFIG_IWLMVM)
/* 7260 Series */
- {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
- {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
- {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
- {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7000_mac_cfg)},
/* 3160 Series */
- {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
- {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
- {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl7000_mac_cfg)},
/* 3165 Series */
- {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4010, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4012, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3166, 0x4212, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4410, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4510, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4110, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3166, 0x4310, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3166, 0x4210, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x8010, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x8110, iwl7000_mac_cfg)},
/* 3168 Series */
- {IWL_PCI_DEVICE(0x24FB, 0x2010, iwl3168_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FB, 0x2050, iwl3168_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FB, 0x2150, iwl3168_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FB, 0x2010, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FB, 0x2050, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FB, 0x2150, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl7000_mac_cfg)},
/* 7265 Series */
- {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7000_mac_cfg)},
/* 8000 Series */
- {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)},
- {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0910, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8275_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xC030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xD030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0910, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1431, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1432, iwl8000_mac_cfg)},
/* 9000 Series */
- {IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_trans_cfg)},
- {IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9000_trans_cfg)},
- {IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9000_trans_cfg)},
- {IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_long_latency_trans_cfg)},
- {IWL_PCI_DEVICE(0x31DC, PCI_ANY_ID, iwl9560_shared_clk_trans_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, PCI_ANY_ID, iwl9560_trans_cfg)},
- {IWL_PCI_DEVICE(0xA370, PCI_ANY_ID, iwl9560_trans_cfg)},
+ {IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_long_latency_mac_cfg)},
+ {IWL_PCI_DEVICE(0x31DC, PCI_ANY_ID, iwl9560_shared_clk_mac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, PCI_ANY_ID, iwl9560_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA370, PCI_ANY_ID, iwl9560_mac_cfg)},
/* Qu devices */
- {IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
- {IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
+ {IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_mac_cfg)},
+ {IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_mac_cfg)},
- {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
- {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
- {IWL_PCI_DEVICE(0x4DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_medium_latency_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_medium_latency_mac_cfg)},
+ {IWL_PCI_DEVICE(0x4DF0, PCI_ANY_ID, iwl_qu_medium_latency_mac_cfg)},
- {IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)},
- {IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_mac_cfg)},
- {IWL_PCI_DEVICE(0x2723, PCI_ANY_ID, iwl_ax200_trans_cfg)},
+ {IWL_PCI_DEVICE(0x2723, PCI_ANY_ID, iwl_ax200_mac_cfg)},
-/* So devices */
- {IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_so_trans_cfg)},
- {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
- {IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)},
- {IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
- {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
- {IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
- {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)},
+/* Ty/So devices */
+ {IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_ty_mac_cfg)},
+ {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_imr_mac_cfg)},
+ {IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_mac_cfg)},
+ {IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_mac_cfg)},
+ {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_mac_cfg)},
+ {IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_mac_cfg)},
+ {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_mac_cfg)},
/* Ma devices */
- {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
- {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
+ {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_mac_cfg)},
+ {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_mac_cfg)},
#endif /* CONFIG_IWLMVM */
#if IS_ENABLED(CONFIG_IWLMLD)
/* Bz devices */
- {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0x4D40, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1775, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1776, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0x4D40, PCI_ANY_ID, iwl_bz_mac_cfg)},
/* Sc devices */
- {IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_trans_cfg)},
- {IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_trans_cfg)},
- {IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_trans_cfg)},
- {IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_trans_cfg)},
-
-/* Dr devices */
- {IWL_PCI_DEVICE(0x272F, PCI_ANY_ID, iwl_dr_trans_cfg)},
+ {IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_mac_cfg)},
+ {IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_mac_cfg)},
+ {IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_mac_cfg)},
+ {IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_mac_cfg)},
#endif /* CONFIG_IWLMLD */
{0}
@@ -551,689 +552,518 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_hw_card_ids);
-#define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
- _rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \
- { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
- .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, .rf_step = _rf_step, \
- .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
- .mac_step = _mac_step, .cdb = _cdb, .jacket = IWL_CFG_ANY }
-
-#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
- _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
- IWL_CFG_ANY, _cfg, _name)
+#define _IWL_DEV_INFO(_cfg, _name, ...) { \
+ .cfg = &_cfg, \
+ .name = _name, \
+ .device = IWL_CFG_ANY, \
+ .subdevice = IWL_CFG_ANY, \
+ .subdevice_m_h = 15, \
+ __VA_ARGS__ \
+}
+#define IWL_DEV_INFO(_cfg, _name, ...) \
+ _IWL_DEV_INFO(_cfg, _name, __VA_ARGS__)
+
+#define DEVICE(n) .device = (n)
+#define SUBDEV(n) .subdevice = (n)
+#define _LOWEST_BIT(n) (__builtin_ffs(n) - 1)
+#define _BIT_ABOVE_MASK(n) ((n) + (1 << _LOWEST_BIT(n)))
+#define _HIGHEST_BIT(n) (__builtin_ffs(_BIT_ABOVE_MASK(n)) - 2)
+#define _IS_POW2(n) (((n) & ((n) - 1)) == 0)
+#define _IS_CONTIG(n) _IS_POW2(_BIT_ABOVE_MASK(n))
+#define _CHECK_MASK(m) BUILD_BUG_ON_ZERO(!_IS_CONTIG(m))
+#define SUBDEV_MASKED(v, m) .subdevice = (v) + _CHECK_MASK(m), \
+ .subdevice_m_l = _LOWEST_BIT(m), \
+ .subdevice_m_h = _HIGHEST_BIT(m)
+#define RF_TYPE(n) .match_rf_type = 1, \
+ .rf_type = IWL_CFG_RF_TYPE_##n
+#define RF_STEP(n) .match_rf_step = 1, \
+ .rf_step = SILICON_##n##_STEP
+#define RF_ID(n) .match_rf_id = 1, \
+ .rf_id = IWL_CFG_RF_ID_##n
+#define NO_CDB .match_cdb = 1, .cdb = 0
+#define CDB .match_cdb = 1, .cdb = 1
+#define BW_NOT_LIMITED .match_bw_limit = 1, .bw_limit = 0
+#define BW_LIMITED .match_bw_limit = 1, .bw_limit = 1
VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
+#if IS_ENABLED(CONFIG_IWLDVM)
+ IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_agn_name,
+ DEVICE(0x4232), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_agn_name,
+ DEVICE(0x4232), SUBDEV_MASKED(0x4, 0xF)),
+ IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_bgn_name,
+ DEVICE(0x4232), SUBDEV_MASKED(0x5, 0xF)),
+ IWL_DEV_INFO(iwl5100_abg_cfg, iwl5100_abg_name,
+ DEVICE(0x4232), SUBDEV_MASKED(0x6, 0xF)),
+ IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_agn_name,
+ DEVICE(0x4237), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_agn_name,
+ DEVICE(0x4237), SUBDEV_MASKED(0x4, 0xF)),
+ IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_bgn_name,
+ DEVICE(0x4237), SUBDEV_MASKED(0x5, 0xF)),
+ IWL_DEV_INFO(iwl5100_abg_cfg, iwl5100_abg_name,
+ DEVICE(0x4237), SUBDEV_MASKED(0x6, 0xF)),
+
+/* 5300 Series WiFi */
+ IWL_DEV_INFO(iwl5300_agn_cfg, iwl5300_agn_name,
+ DEVICE(0x4235), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl5300_agn_cfg, iwl5300_agn_name,
+ DEVICE(0x4235), SUBDEV_MASKED(0x4, 0xF)),
+ IWL_DEV_INFO(iwl5300_agn_cfg, iwl5300_agn_name,
+ DEVICE(0x4236), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl5300_agn_cfg, iwl5300_agn_name,
+ DEVICE(0x4236), SUBDEV_MASKED(0x4, 0xF)),
+
+/* 5350 Series WiFi/WiMax */
+ IWL_DEV_INFO(iwl5350_agn_cfg, iwl5350_agn_name,
+ DEVICE(0x423A)),
+ IWL_DEV_INFO(iwl5350_agn_cfg, iwl5350_agn_name,
+ DEVICE(0x423B)),
+
+/* 5150 Series Wifi/WiMax */
+ IWL_DEV_INFO(iwl5150_agn_cfg, iwl5150_agn_name,
+ DEVICE(0x423C), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl5150_abg_cfg, iwl5150_abg_name,
+ DEVICE(0x423C), SUBDEV_MASKED(0x6, 0xF)),
+
+ IWL_DEV_INFO(iwl5150_agn_cfg, iwl5150_agn_name,
+ DEVICE(0x423D), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl5150_abg_cfg, iwl5150_abg_name,
+ DEVICE(0x423D), SUBDEV_MASKED(0x6, 0xF)),
+
+/* 6x00 Series */
+ IWL_DEV_INFO(iwl6000_3agn_cfg, iwl6000_3agn_name,
+ DEVICE(0x422B), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl6000_3agn_cfg, iwl6000_3agn_name,
+ DEVICE(0x422B), SUBDEV_MASKED(0x8, 0xF)),
+ IWL_DEV_INFO(iwl6000i_2agn_cfg, iwl6000i_2agn_name,
+ DEVICE(0x422C), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl6000i_non_n_cfg, iwl6000i_2abg_name,
+ DEVICE(0x422C), SUBDEV_MASKED(0x6, 0xF)),
+ IWL_DEV_INFO(iwl6000i_non_n_cfg, iwl6000i_2bg_name,
+ DEVICE(0x422C), SUBDEV_MASKED(0x7, 0xF)),
+ IWL_DEV_INFO(iwl6000_3agn_cfg, iwl6000_3agn_name,
+ DEVICE(0x4238), SUBDEV(0x1111)),
+ IWL_DEV_INFO(iwl6000_3agn_cfg, iwl6000_3agn_name,
+ DEVICE(0x4238), SUBDEV(0x1118)),
+ IWL_DEV_INFO(iwl6000i_2agn_cfg, iwl6000i_2agn_name,
+ DEVICE(0x4239), SUBDEV(0x1311)),
+ IWL_DEV_INFO(iwl6000i_non_n_cfg, iwl6000i_2abg_name,
+ DEVICE(0x4239), SUBDEV(0x1316)),
+
+/* 6x05 Series */
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_name,
+ DEVICE(0x0082), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl6005_non_n_cfg, iwl6005_2abg_name,
+ DEVICE(0x0082), SUBDEV_MASKED(0x6, 0xF)),
+ IWL_DEV_INFO(iwl6005_non_n_cfg, iwl6005_2bg_name,
+ DEVICE(0x0082), SUBDEV_MASKED(0x7, 0xF)),
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_name,
+ DEVICE(0x0082), SUBDEV_MASKED(0x8, 0xF)),
+
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_name,
+ DEVICE(0x0085), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_name,
+ DEVICE(0x0085), SUBDEV_MASKED(0x8, 0xF)),
+ IWL_DEV_INFO(iwl6005_non_n_cfg, iwl6005_2abg_name,
+ DEVICE(0x0085), SUBDEV_MASKED(0x6, 0xF)),
+
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_sff_name,
+ DEVICE(0x0082), SUBDEV_MASKED(0xC000, 0xF000)),
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_d_name,
+ DEVICE(0x0082), SUBDEV(0x4820)),
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_mow1_name,
+ DEVICE(0x0082), SUBDEV(0x1304)),/* low 5GHz active */
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_mow2_name,
+ DEVICE(0x0082), SUBDEV(0x1305)),/* high 5GHz active */
+
+/* 6x30 Series */
+ IWL_DEV_INFO(iwl6030_n_cfg, iwl1030_bgn_name,
+ DEVICE(0x008A), SUBDEV_MASKED(0x5, 0xF)),
+ IWL_DEV_INFO(iwl6030_non_n_cfg, iwl1030_bg_name,
+ DEVICE(0x008A), SUBDEV_MASKED(0x7, 0xF)),
+ IWL_DEV_INFO(iwl6030_n_cfg, iwl1030_bgn_name,
+ DEVICE(0x008B), SUBDEV(0x5315)),
+ IWL_DEV_INFO(iwl6030_non_n_cfg, iwl1030_bg_name,
+ DEVICE(0x008B), SUBDEV(0x5317)),
+ IWL_DEV_INFO(iwl6030_n_cfg, iwl6030_2agn_name,
+ DEVICE(0x0090), SUBDEV(0x5211)),
+ IWL_DEV_INFO(iwl6030_n_cfg, iwl6030_2bgn_name,
+ DEVICE(0x0090), SUBDEV(0x5215)),
+ IWL_DEV_INFO(iwl6030_non_n_cfg, iwl6030_2abg_name,
+ DEVICE(0x0090), SUBDEV(0x5216)),
+ IWL_DEV_INFO(iwl6030_n_cfg, iwl6030_2agn_name,
+ DEVICE(0x0091), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl6030_n_cfg, iwl6030_2bgn_name,
+ DEVICE(0x0091), SUBDEV_MASKED(0x5, 0xF)),
+ IWL_DEV_INFO(iwl6030_non_n_cfg, iwl6030_2abg_name,
+ DEVICE(0x0091), SUBDEV_MASKED(0x6, 0xF)),
+ IWL_DEV_INFO(iwl6030_non_n_cfg, iwl6030_2bg_name,
+ DEVICE(0x0091), SUBDEV(0x5207)),
+
+/* 6x50 WiFi/WiMax Series */
+ IWL_DEV_INFO(iwl6050_2agn_cfg, iwl6050_2agn_name,
+ DEVICE(0x0087), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl6050_2abg_cfg, iwl6050_2abg_name,
+ DEVICE(0x0087), SUBDEV_MASKED(0x6, 0xF)),
+ IWL_DEV_INFO(iwl6050_2agn_cfg, iwl6050_2agn_name,
+ DEVICE(0x0089), SUBDEV(0x1311)),
+ IWL_DEV_INFO(iwl6050_2abg_cfg, iwl6050_2abg_name,
+ DEVICE(0x0089), SUBDEV(0x1316)),
+
+/* 6150 WiFi/WiMax Series */
+ IWL_DEV_INFO(iwl6150_bgn_cfg, iwl6150_bgn_name,
+ DEVICE(0x0885), SUBDEV_MASKED(0x5, 0xF)),
+ IWL_DEV_INFO(iwl6150_bg_cfg, iwl6150_bg_name,
+ DEVICE(0x0885), SUBDEV_MASKED(0x7, 0xF)),
+ IWL_DEV_INFO(iwl6150_bgn_cfg, iwl6150_bgn_name,
+ DEVICE(0x0886), SUBDEV(0x1315)),
+ IWL_DEV_INFO(iwl6150_bg_cfg, iwl6150_bg_name,
+ DEVICE(0x0886), SUBDEV(0x1317)),
+
+/* 1000 Series WiFi */
+ IWL_DEV_INFO(iwl1000_bgn_cfg, iwl1000_bgn_name,
+ DEVICE(0x0083), SUBDEV_MASKED(0x5, 0xF)),
+ IWL_DEV_INFO(iwl1000_bg_cfg, iwl1000_bg_name,
+ DEVICE(0x0083), SUBDEV_MASKED(0x6, 0xF)),
+ IWL_DEV_INFO(iwl1000_bg_cfg, iwl1000_bg_name,
+ DEVICE(0x0084), SUBDEV(0x1216)),
+ IWL_DEV_INFO(iwl1000_bg_cfg, iwl1000_bg_name,
+ DEVICE(0x0084), SUBDEV(0x1316)),
+
+/* 100 Series WiFi */
+ IWL_DEV_INFO(iwl100_bgn_cfg, iwl100_bgn_name,
+ DEVICE(0x08AE), SUBDEV_MASKED(0x5, 0xF)),
+ IWL_DEV_INFO(iwl100_bg_cfg, iwl100_bg_name,
+ DEVICE(0x08AE), SUBDEV_MASKED(0x7, 0xF)),
+ IWL_DEV_INFO(iwl100_bgn_cfg, iwl100_bgn_name,
+ DEVICE(0x08AF), SUBDEV(0x1015)),
+ IWL_DEV_INFO(iwl100_bg_cfg, iwl100_bg_name,
+ DEVICE(0x08AF), SUBDEV(0x1017)),
+
+/* 130 Series WiFi */
+ IWL_DEV_INFO(iwl130_bgn_cfg, iwl130_bgn_name,
+ DEVICE(0x0896), SUBDEV_MASKED(0x5, 0xF)),
+ IWL_DEV_INFO(iwl130_bg_cfg, iwl130_bg_name,
+ DEVICE(0x0896), SUBDEV_MASKED(0x7, 0xF)),
+ IWL_DEV_INFO(iwl130_bgn_cfg, iwl130_bgn_name,
+ DEVICE(0x0897), SUBDEV(0x5015)),
+ IWL_DEV_INFO(iwl130_bg_cfg, iwl130_bg_name,
+ DEVICE(0x0897), SUBDEV(0x5017)),
+
+/* 2x00 Series */
+ IWL_DEV_INFO(iwl2000_2bgn_cfg, iwl2000_2bgn_name,
+ DEVICE(0x0890), SUBDEV(0x4022)),
+ IWL_DEV_INFO(iwl2000_2bgn_cfg, iwl2000_2bgn_name,
+ DEVICE(0x0891), SUBDEV(0x4222)),
+ IWL_DEV_INFO(iwl2000_2bgn_cfg, iwl2000_2bgn_name,
+ DEVICE(0x0890), SUBDEV(0x4422)),
+ IWL_DEV_INFO(iwl2000_2bgn_cfg, iwl2000_2bgn_d_name,
+ DEVICE(0x0890), SUBDEV(0x4822)),
+
+/* 2x30 Series */
+ IWL_DEV_INFO(iwl2030_2bgn_cfg, iwl2030_2bgn_name,
+ DEVICE(0x0887)),
+ IWL_DEV_INFO(iwl2030_2bgn_cfg, iwl2030_2bgn_name,
+ DEVICE(0x0888), SUBDEV(0x4262)),
+
+/* 6x35 Series */
+ IWL_DEV_INFO(iwl6035_2agn_cfg, iwl6035_2agn_name,
+ DEVICE(0x088E), SUBDEV_MASKED(0x0, 0xF)),
+ IWL_DEV_INFO(iwl6035_2agn_cfg, iwl6035_2agn_sff_name,
+ DEVICE(0x088E), SUBDEV_MASKED(0xA, 0xF)),
+ IWL_DEV_INFO(iwl6035_2agn_cfg, iwl6035_2agn_name,
+ DEVICE(0x088F), SUBDEV_MASKED(0x0, 0xF)),
+ IWL_DEV_INFO(iwl6035_2agn_cfg, iwl6035_2agn_sff_name,
+ DEVICE(0x088F), SUBDEV_MASKED(0xA, 0xF)),
+
+/* 105 Series */
+ IWL_DEV_INFO(iwl105_bgn_cfg, iwl105_bgn_name,
+ DEVICE(0x0894)),
+ IWL_DEV_INFO(iwl105_bgn_cfg, iwl105_bgn_name,
+ DEVICE(0x0895), SUBDEV(0x0222)),
+
+/* 135 Series */
+ IWL_DEV_INFO(iwl135_bgn_cfg, iwl135_bgn_name,
+ DEVICE(0x0892)),
+ IWL_DEV_INFO(iwl135_bgn_cfg, iwl135_bgn_name,
+ DEVICE(0x0893), SUBDEV(0x0262)),
+#endif /* CONFIG_IWLDVM */
+
#if IS_ENABLED(CONFIG_IWLMVM)
-/* 9000 */
- IWL_DEV_INFO(0x2526, 0x1550, iwl9260_2ac_cfg, iwl9260_killer_1550_name),
- IWL_DEV_INFO(0x2526, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
- IWL_DEV_INFO(0x2526, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
- IWL_DEV_INFO(0x30DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
- IWL_DEV_INFO(0x30DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
- IWL_DEV_INFO(0x31DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
- IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
- IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
- IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
- IWL_DEV_INFO(0x54F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
- IWL_DEV_INFO(0x54F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
- IWL_DEV_INFO(0x51F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
- IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name),
- IWL_DEV_INFO(0x51F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
- IWL_DEV_INFO(0x51F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
- IWL_DEV_INFO(0x51F1, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
- IWL_DEV_INFO(0x54F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
- IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
- IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
- IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
- IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
- IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
- IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
- IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
-
- IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
- IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma, iwl_ax411_killer_1690s_name),
- IWL_DEV_INFO(0x7E40, 0x1692, iwl_cfg_ma, iwl_ax411_killer_1690i_name),
-
-/* AX200 */
- IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name),
- IWL_DEV_INFO(0x2723, 0x1653, iwl_ax200_cfg_cc, iwl_ax200_killer_1650w_name),
- IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name),
-
- /* Qu with Hr */
- IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name),
- IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name),
- IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x6074, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
-
- IWL_DEV_INFO(0x3DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0x3DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0x3DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
-
- IWL_DEV_INFO(0x4DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x4DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x4DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x4DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x4DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x4DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
-
- /* So with HR */
- IWL_DEV_INFO(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x2020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x0024, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0xE020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0xE024, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x4020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x6020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x6024, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x1673, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675w_name),
- IWL_DEV_INFO(0x2725, 0x1674, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675x_name),
- IWL_DEV_INFO(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
- IWL_DEV_INFO(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
- IWL_DEV_INFO(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long, NULL),
- IWL_DEV_INFO(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
- IWL_DEV_INFO(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
- IWL_DEV_INFO(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
- IWL_DEV_INFO(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL),
- IWL_DEV_INFO(0x7AF0, 0x0098, iwlax211_2ax_cfg_so_gf_a0, NULL),
- IWL_DEV_INFO(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0, NULL),
- IWL_DEV_INFO(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0, NULL),
- IWL_DEV_INFO(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0, NULL),
- IWL_DEV_INFO(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0, NULL),
-
- /* So with JF */
- IWL_DEV_INFO(0x7A70, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
- IWL_DEV_INFO(0x7A70, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name),
- IWL_DEV_INFO(0x7AF0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
- IWL_DEV_INFO(0x7AF0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name),
-
- /* SO with GF2 */
- IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x2726, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
- IWL_DEV_INFO(0x51F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x51F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
- IWL_DEV_INFO(0x51F1, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x51F1, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
- IWL_DEV_INFO(0x54F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x54F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
- IWL_DEV_INFO(0x7A70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x7A70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
- IWL_DEV_INFO(0x7AF0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x7AF0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
- IWL_DEV_INFO(0x7F70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x7F70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
-
- /* MA with GF2 */
- IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x7E40, 0x1672, iwl_cfg_ma, iwl_ax211_killer_1675i_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_2ac_cfg_soc, iwl9461_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_2ac_cfg_soc, iwl9461_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_2ac_cfg_soc, iwl9462_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_2ac_cfg_soc, iwl9462_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_2ac_cfg_soc, iwl9560_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_2ac_cfg_soc, iwl9560_name),
-
- _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
- iwl9260_2ac_cfg, iwl9270_160_name),
- _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
- iwl9260_2ac_cfg, iwl9270_name),
-
- _IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9260_2ac_cfg, iwl9162_160_name),
- _IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9260_2ac_cfg, iwl9162_name),
-
- _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9260_2ac_cfg, iwl9260_160_name),
- _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9260_2ac_cfg, iwl9260_name),
-
-/* Qu with Jf */
- /* Qu B step */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_b0_jf_b0_cfg, iwl9461_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_b0_jf_b0_cfg, iwl9462_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_b0_jf_b0_cfg, iwl9560_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
-
- /* Qu C step */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_c0_jf_b0_cfg, iwl9461_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_c0_jf_b0_cfg, iwl9462_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_c0_jf_b0_cfg, iwl9560_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name),
-
- /* QuZ */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_quz_a0_jf_b0_cfg, iwl9461_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_quz_a0_jf_b0_cfg, iwl9462_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_quz_a0_jf_b0_cfg, iwl9560_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name),
-
-/* Qu with Hr */
- /* Qu B step */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_qu_b0_hr1_b0, iwl_ax101_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_qu_b0_hr_b0, iwl_ax203_name),
-
- /* Qu C step */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_qu_c0_hr1_b0, iwl_ax101_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_qu_c0_hr_b0, iwl_ax203_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_qu_c0_hr_b0, iwl_ax201_name),
-
- /* QuZ */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_quz_a0_hr1_b0, iwl_ax101_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_quz_a0_hr_b0, iwl_ax203_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_quz_a0_hr_b0, iwl_ax201_name),
-
-/* Ma */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_ma, iwl_ax201_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_ma, iwl_ax211_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_ma, iwl_ax231_name),
-
-/* So with Hr */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
-
-/* So-F with Hr */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
-
-/* So-F with Gf */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB,
- iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
-
-/* SoF with JF2 */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
-
-/* SoF with JF */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
-
-/* So with GF */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB,
- iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
-
-/* So with JF2 */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
-
-/* So with JF */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
+/* 7260 Series */
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B1)), // unlisted ones fall through to here
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x4060)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x406A)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x4160)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B1), SUBDEV(0x4062)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B1), SUBDEV(0x4162)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x4460)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x446A)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B1), SUBDEV(0x4462)),
+ IWL_DEV_INFO(iwl7260_high_temp_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B1), SUBDEV(0x4A70)),
+ IWL_DEV_INFO(iwl7260_high_temp_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B1), SUBDEV(0x4A6E)),
+ IWL_DEV_INFO(iwl7260_high_temp_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B1), SUBDEV(0x4A6C)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x4560)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x4020)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x402A)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x4420)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC060)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC06A)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC160)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B1), SUBDEV(0xC062)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B1), SUBDEV(0xC162)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC760)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC460)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B1), SUBDEV(0xC462)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC560)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC360)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC020)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC02A)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC420)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B2), SUBDEV(0x4270)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B2), SUBDEV(0x4272)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B2), SUBDEV(0x4260)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B2), SUBDEV(0x426A)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B2), SUBDEV(0x4262)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B2), SUBDEV(0x4370)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B2), SUBDEV(0x4360)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B2), SUBDEV(0x4220)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B2), SUBDEV(0xC270)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B2), SUBDEV(0xC272)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B2), SUBDEV(0xC260)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B2), SUBDEV(0xC26A)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B2), SUBDEV(0xC262)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B2), SUBDEV(0xC370)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B2), SUBDEV(0xC220)),
+
+/* 3160 Series */
+ IWL_DEV_INFO(iwl3160_cfg, iwl3160_2ac_name,
+ DEVICE(0x08B3)),
+ IWL_DEV_INFO(iwl3160_cfg, iwl3160_n_name,
+ DEVICE(0x08B3), SUBDEV_MASKED(0x62, 0xFF)),
+ IWL_DEV_INFO(iwl3160_cfg, iwl3160_2n_name,
+ DEVICE(0x08B3), SUBDEV_MASKED(0x60, 0xFF)),
+
+ IWL_DEV_INFO(iwl3160_cfg, iwl3160_2ac_name,
+ DEVICE(0x08B4)),
+
+/* 3165 Series */
+ IWL_DEV_INFO(iwl3165_2ac_cfg, iwl3165_2ac_name,
+ DEVICE(0x3165)),
+ IWL_DEV_INFO(iwl3165_2ac_cfg, iwl3165_2ac_name,
+ DEVICE(0x3166)),
+
+/* 3168 Series */
+ IWL_DEV_INFO(iwl3168_2ac_cfg, iwl3168_2ac_name,
+ DEVICE(0x24FB)),
+
+/* 7265 Series */
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2ac_name,
+ DEVICE(0x095A)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5000)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x500A)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_n_name,
+ DEVICE(0x095A), SUBDEV(0x5002)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_n_name,
+ DEVICE(0x095A), SUBDEV(0x5102)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5020)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x502A)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5090)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5190)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5100)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5400)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5420)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5490)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5C10)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5590)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x9000)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x900A)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x9400)),
+
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2ac_name,
+ DEVICE(0x095B)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095B), SUBDEV(0x520A)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_n_name,
+ DEVICE(0x095B), SUBDEV(0x5302)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095B), SUBDEV(0x5200)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_n_name,
+ DEVICE(0x095B), SUBDEV(0x5202)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095B), SUBDEV(0x9200)),
+
+/* 8000 Series */
+ IWL_DEV_INFO(iwl8260_cfg, iwl8260_2ac_name,
+ DEVICE(0x24F3)),
+ IWL_DEV_INFO(iwl8260_cfg, iwl8260_2n_name,
+ DEVICE(0x24F3), SUBDEV(0x0004)),
+ IWL_DEV_INFO(iwl8260_cfg, iwl8260_2n_name,
+ DEVICE(0x24F3), SUBDEV(0x0044)),
+ IWL_DEV_INFO(iwl8265_cfg, iwl8265_2ac_name,
+ DEVICE(0x24FD)),
+ IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name,
+ DEVICE(0x24FD), SUBDEV(0x3E02)),
+ IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name,
+ DEVICE(0x24FD), SUBDEV(0x3E01)),
+ IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name,
+ DEVICE(0x24FD), SUBDEV(0x1012)),
+ IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name,
+ DEVICE(0x24FD), SUBDEV(0x0012)),
+ IWL_DEV_INFO(iwl8265_cfg, iwl_killer_1435i_name,
+ DEVICE(0x24FD), SUBDEV(0x1431)),
+ IWL_DEV_INFO(iwl8265_cfg, iwl_killer_1434_kix_name,
+ DEVICE(0x24FD), SUBDEV(0x1432)),
+
+/* JF1 RF */
+ IWL_DEV_INFO(iwl_rf_jf, iwl9461_160_name,
+ RF_TYPE(JF1)),
+ IWL_DEV_INFO(iwl_rf_jf_80mhz, iwl9461_name,
+ RF_TYPE(JF1), BW_LIMITED),
+ IWL_DEV_INFO(iwl_rf_jf, iwl9462_160_name,
+ RF_TYPE(JF1), RF_ID(JF1_DIV)),
+ IWL_DEV_INFO(iwl_rf_jf_80mhz, iwl9462_name,
+ RF_TYPE(JF1), RF_ID(JF1_DIV), BW_LIMITED),
+/* JF2 RF */
+ IWL_DEV_INFO(iwl_rf_jf, iwl9260_160_name,
+ RF_TYPE(JF2)),
+ IWL_DEV_INFO(iwl_rf_jf_80mhz, iwl9260_name,
+ RF_TYPE(JF2), BW_LIMITED),
+ IWL_DEV_INFO(iwl_rf_jf, iwl9560_160_name,
+ RF_TYPE(JF2), RF_ID(JF)),
+ IWL_DEV_INFO(iwl_rf_jf_80mhz, iwl9560_name,
+ RF_TYPE(JF2), RF_ID(JF), BW_LIMITED),
+
+/* HR RF */
+ IWL_DEV_INFO(iwl_rf_hr, iwl_ax201_name, RF_TYPE(HR2)),
+ IWL_DEV_INFO(iwl_rf_hr_80mhz, iwl_ax101_name, RF_TYPE(HR1)),
+ IWL_DEV_INFO(iwl_rf_hr_80mhz, iwl_ax203_name, RF_TYPE(HR2), BW_LIMITED),
+ IWL_DEV_INFO(iwl_rf_hr, iwl_ax200_name, DEVICE(0x2723)),
+
+/* GF RF */
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax211_name, RF_TYPE(GF)),
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax411_name, RF_TYPE(GF), CDB),
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax210_name, DEVICE(0x2725)),
+
+/* Killer CRFs */
+ IWL_DEV_INFO(iwl_rf_jf, iwl9260_killer_1550_name, SUBDEV(0x1550)),
+ IWL_DEV_INFO(iwl_rf_jf, iwl9560_killer_1550s_name, SUBDEV(0x1551)),
+ IWL_DEV_INFO(iwl_rf_jf, iwl9560_killer_1550i_name, SUBDEV(0x1552)),
+
+ IWL_DEV_INFO(iwl_rf_hr, iwl_ax201_killer_1650s_name, SUBDEV(0x1651)),
+ IWL_DEV_INFO(iwl_rf_hr, iwl_ax201_killer_1650i_name, SUBDEV(0x1652)),
+
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax211_killer_1675s_name, SUBDEV(0x1671)),
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax211_killer_1675i_name, SUBDEV(0x1672)),
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax210_killer_1675w_name, SUBDEV(0x1673)),
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax210_killer_1675x_name, SUBDEV(0x1674)),
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax411_killer_1690s_name, SUBDEV(0x1691)),
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax411_killer_1690i_name, SUBDEV(0x1692)),
+
+/* Killer discrete */
+ IWL_DEV_INFO(iwl_rf_hr, iwl_ax200_killer_1650w_name,
+ DEVICE(0x2723), SUBDEV(0x1653)),
+ IWL_DEV_INFO(iwl_rf_hr, iwl_ax200_killer_1650x_name,
+ DEVICE(0x2723), SUBDEV(0x1654)),
#endif /* CONFIG_IWLMVM */
#if IS_ENABLED(CONFIG_IWLMLD)
-/* Bz */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_ax201_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_ax211_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_fm_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_wh_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_ax201_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_ax211_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_fm_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_wh_name),
-
-/* Ga (Gl) */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_gl, iwl_gl_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_gl, iwl_mtp_name),
-
-/* Sc */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc, iwl_ax211_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc, iwl_fm_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc, iwl_wh_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2, iwl_ax211_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2, iwl_fm_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2, iwl_wh_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2f, iwl_ax211_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2f, iwl_fm_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2f, iwl_wh_name),
-
-/* Dr */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_DR, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_dr, iwl_dr_name),
-
-/* Br */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BR, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_br, iwl_br_name),
+/* FM RF */
+ IWL_DEV_INFO(iwl_rf_fm, iwl_be201_name, RF_TYPE(FM)),
+ IWL_DEV_INFO(iwl_rf_fm, iwl_be401_name, RF_TYPE(FM), CDB),
+ /* the discrete NICs got the RF B0, it's only for the name anyway */
+ IWL_DEV_INFO(iwl_rf_fm, iwl_be200_name, RF_TYPE(FM),
+ DEVICE(0x272B), RF_STEP(B)),
+ IWL_DEV_INFO(iwl_rf_fm_160mhz, iwl_be202_name,
+ RF_TYPE(FM), BW_LIMITED),
+
+/* Killer CRFs */
+ IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1750s_name, SUBDEV(0x1771)),
+ IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1750i_name, SUBDEV(0x1772)),
+ IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1790s_name, SUBDEV(0x1791)),
+ IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1790i_name, SUBDEV(0x1792)),
+
+/* Killer discrete */
+ IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1750w_name,
+ DEVICE(0x272B), SUBDEV(0x1773)),
+ IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1750x_name,
+ DEVICE(0x272B), SUBDEV(0x1774)),
+
+/* WH RF */
+ IWL_DEV_INFO(iwl_rf_wh, iwl_be211_name, RF_TYPE(WH)),
+ IWL_DEV_INFO(iwl_rf_wh_160mhz, iwl_be213_name, RF_TYPE(WH), BW_LIMITED),
+
+/* PE RF */
+ IWL_DEV_INFO(iwl_rf_pe, iwl_bn201_name, RF_TYPE(PE)),
+ IWL_DEV_INFO(iwl_rf_pe, iwl_be223_name, RF_TYPE(PE), SUBDEV(0x0524)),
+ IWL_DEV_INFO(iwl_rf_pe, iwl_be221_name, RF_TYPE(PE), SUBDEV(0x0324)),
+
+/* Killer */
+ IWL_DEV_INFO(iwl_rf_wh, iwl_killer_be1775s_name, SUBDEV(0x1776)),
+ IWL_DEV_INFO(iwl_rf_wh, iwl_killer_be1775i_name, SUBDEV(0x1775)),
+
+ IWL_DEV_INFO(iwl_rf_pe, iwl_killer_bn1850w2_name, SUBDEV(0x1851)),
+ IWL_DEV_INFO(iwl_rf_pe, iwl_killer_bn1850i_name, SUBDEV(0x1852)),
#endif /* CONFIG_IWLMLD */
};
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table);
@@ -1246,13 +1076,15 @@ EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table_size);
/*
* Read rf id and cdb info from prph register and store it
*/
-static void get_crf_id(struct iwl_trans *iwl_trans)
+static void get_crf_id(struct iwl_trans *iwl_trans,
+ struct iwl_trans_info *info)
{
u32 sd_reg_ver_addr;
+ u32 hw_wfpm_id;
u32 val = 0;
u8 step;
- if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (iwl_trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
sd_reg_ver_addr = SD_REG_VER_GEN2;
else
sd_reg_ver_addr = SD_REG_VER;
@@ -1263,83 +1095,83 @@ static void get_crf_id(struct iwl_trans *iwl_trans)
iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val);
/* Read crf info */
- iwl_trans->hw_crf_id = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr);
+ info->hw_crf_id = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr);
/* Read cnv info */
- iwl_trans->hw_cnv_id =
- iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP);
+ info->hw_cnv_id = iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP);
/* For BZ-W, take B step also when A step is indicated */
- if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W)
+ if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W)
step = SILICON_B_STEP;
/* In BZ, the MAC step must be read from the CNVI aux register */
- if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ) {
- step = CNVI_AUX_MISC_CHIP_MAC_STEP(iwl_trans->hw_cnv_id);
+ if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ) {
+ step = CNVI_AUX_MISC_CHIP_MAC_STEP(info->hw_cnv_id);
/* For BZ-U, take B step also when A step is indicated */
- if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(iwl_trans->hw_cnv_id) ==
+ if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(info->hw_cnv_id) ==
CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U) &&
step == SILICON_A_STEP)
step = SILICON_B_STEP;
}
- if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ ||
- CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) {
- iwl_trans->hw_rev_step = step;
- iwl_trans->hw_rev |= step;
+ if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ ||
+ CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) {
+ info->hw_rev_step = step;
+ info->hw_rev |= step;
}
/* Read cdb info (also contains the jacket info if needed in the future */
- iwl_trans->hw_wfpm_id =
- iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR);
+ hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR);
IWL_INFO(iwl_trans, "Detected crf-id 0x%x, cnv-id 0x%x wfpm id 0x%x\n",
- iwl_trans->hw_crf_id, iwl_trans->hw_cnv_id,
- iwl_trans->hw_wfpm_id);
+ info->hw_crf_id, info->hw_cnv_id, hw_wfpm_id);
}
/*
* In case that there is no OTP on the NIC, map the rf id and cdb info
* from the prph registers.
*/
-static int map_crf_id(struct iwl_trans *iwl_trans)
+static int map_crf_id(struct iwl_trans *iwl_trans,
+ struct iwl_trans_info *info)
{
int ret = 0;
- u32 val = iwl_trans->hw_crf_id;
+ u32 val = info->hw_crf_id;
u32 step_id = REG_CRF_ID_STEP(val);
u32 slave_id = REG_CRF_ID_SLAVE(val);
- u32 jacket_id_cnv = REG_CRF_ID_SLAVE(iwl_trans->hw_cnv_id);
- u32 jacket_id_wfpm = WFPM_OTP_CFG1_IS_JACKET(iwl_trans->hw_wfpm_id);
- u32 cdb_id_wfpm = WFPM_OTP_CFG1_IS_CDB(iwl_trans->hw_wfpm_id);
+ u32 jacket_id_cnv = REG_CRF_ID_SLAVE(info->hw_cnv_id);
+ u32 hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans,
+ WFPM_OTP_CFG1_ADDR);
+ u32 jacket_id_wfpm = WFPM_OTP_CFG1_IS_JACKET(hw_wfpm_id);
+ u32 cdb_id_wfpm = WFPM_OTP_CFG1_IS_CDB(hw_wfpm_id);
/* Map between crf id to rf id */
switch (REG_CRF_ID_TYPE(val)) {
case REG_CRF_ID_TYPE_JF_1:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF1 << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_JF1 << 12);
break;
case REG_CRF_ID_TYPE_JF_2:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF2 << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_JF2 << 12);
break;
case REG_CRF_ID_TYPE_HR_NONE_CDB_1X1:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR1 << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_HR1 << 12);
break;
case REG_CRF_ID_TYPE_HR_NONE_CDB:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12);
break;
case REG_CRF_ID_TYPE_HR_CDB:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12);
break;
case REG_CRF_ID_TYPE_GF:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12);
break;
case REG_CRF_ID_TYPE_FM:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12);
break;
case REG_CRF_ID_TYPE_WHP:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12);
break;
case REG_CRF_ID_TYPE_PE:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_PE << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_PE << 12);
break;
default:
ret = -EIO;
@@ -1351,28 +1183,28 @@ static int map_crf_id(struct iwl_trans *iwl_trans)
}
/* Set Step-id */
- iwl_trans->hw_rf_id |= (step_id << 8);
+ info->hw_rf_id |= (step_id << 8);
/* Set CDB capabilities */
if (cdb_id_wfpm || slave_id) {
- iwl_trans->hw_rf_id += BIT(28);
+ info->hw_rf_id += BIT(28);
IWL_INFO(iwl_trans, "Adding cdb to rf id\n");
}
/* Set Jacket capabilities */
if (jacket_id_wfpm || jacket_id_cnv) {
- iwl_trans->hw_rf_id += BIT(29);
+ info->hw_rf_id += BIT(29);
IWL_INFO(iwl_trans, "Adding jacket to rf id\n");
}
IWL_INFO(iwl_trans,
"Detected rf-type 0x%x step-id 0x%x slave-id 0x%x from crf id 0x%x\n",
- REG_CRF_ID_TYPE(val), step_id, slave_id, iwl_trans->hw_rf_id);
+ REG_CRF_ID_TYPE(val), step_id, slave_id, info->hw_rf_id);
IWL_INFO(iwl_trans,
"Detected cdb-id 0x%x jacket-id 0x%x from wfpm id 0x%x\n",
- cdb_id_wfpm, jacket_id_wfpm, iwl_trans->hw_wfpm_id);
+ cdb_id_wfpm, jacket_id_wfpm, hw_wfpm_id);
IWL_INFO(iwl_trans, "Detected jacket-id 0x%x from cnvi id 0x%x\n",
- jacket_id_cnv, iwl_trans->hw_cnv_id);
+ jacket_id_cnv, info->hw_cnv_id);
out:
return ret;
@@ -1382,9 +1214,8 @@ out:
#define PCI_CFG_RETRY_TIMEOUT 0x041
VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info *
-iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
- u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
- u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step)
+iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 rf_type, u8 cdb,
+ u8 rf_id, u8 bw_limit, u8 rf_step)
{
int num_devices = ARRAY_SIZE(iwl_dev_info_table);
int i;
@@ -1394,49 +1225,32 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
for (i = num_devices - 1; i >= 0; i--) {
const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
+ u16 subdevice_mask;
if (dev_info->device != (u16)IWL_CFG_ANY &&
dev_info->device != device)
continue;
- if (dev_info->subdevice != (u16)IWL_CFG_ANY &&
- dev_info->subdevice != subsystem_device)
- continue;
+ subdevice_mask = GENMASK(dev_info->subdevice_m_h,
+ dev_info->subdevice_m_l);
- if (dev_info->mac_type != (u16)IWL_CFG_ANY &&
- dev_info->mac_type != mac_type)
- continue;
-
- if (dev_info->mac_step != (u8)IWL_CFG_ANY &&
- dev_info->mac_step != mac_step)
- continue;
-
- if (dev_info->rf_type != (u16)IWL_CFG_ANY &&
- dev_info->rf_type != rf_type)
- continue;
-
- if (dev_info->cdb != (u8)IWL_CFG_ANY &&
- dev_info->cdb != cdb)
+ if (dev_info->subdevice != (u16)IWL_CFG_ANY &&
+ dev_info->subdevice != (subsystem_device & subdevice_mask))
continue;
- if (dev_info->jacket != (u8)IWL_CFG_ANY &&
- dev_info->jacket != jacket)
+ if (dev_info->match_rf_type && dev_info->rf_type != rf_type)
continue;
- if (dev_info->rf_id != (u8)IWL_CFG_ANY &&
- dev_info->rf_id != rf_id)
+ if (dev_info->match_cdb && dev_info->cdb != cdb)
continue;
- if (dev_info->no_160 != (u8)IWL_CFG_ANY &&
- dev_info->no_160 != no_160)
+ if (dev_info->match_rf_id && dev_info->rf_id != rf_id)
continue;
- if (dev_info->cores != (u8)IWL_CFG_ANY &&
- dev_info->cores != cores)
+ if (dev_info->match_bw_limit && dev_info->bw_limit != bw_limit)
continue;
- if (dev_info->rf_step != (u8)IWL_CFG_ANY &&
- dev_info->rf_step != rf_step)
+ if (dev_info->match_rf_step && dev_info->rf_step != rf_step)
continue;
return dev_info;
@@ -1448,30 +1262,32 @@ EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_pci_find_dev_info);
static void iwl_pcie_recheck_me_status(struct work_struct *wk)
{
- struct iwl_trans *trans = container_of(wk, typeof(*trans),
- me_recheck_wk.work);
+ struct iwl_trans_pcie *trans_pcie = container_of(wk,
+ typeof(*trans_pcie),
+ me_recheck_wk.work);
u32 val;
- val = iwl_read32(trans, CSR_HW_IF_CONFIG_REG);
- trans->me_present = !!(val & CSR_HW_IF_CONFIG_REG_IAMT_UP);
+ val = iwl_read32(trans_pcie->trans, CSR_HW_IF_CONFIG_REG);
+ trans_pcie->me_present = !!(val & CSR_HW_IF_CONFIG_REG_IAMT_UP);
}
static void iwl_pcie_check_me_status(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
- trans->me_present = -1;
+ trans_pcie->me_present = -1;
- INIT_DELAYED_WORK(&trans->me_recheck_wk,
+ INIT_DELAYED_WORK(&trans_pcie->me_recheck_wk,
iwl_pcie_recheck_me_status);
/* we don't have a good way of determining this until BZ */
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
return;
val = iwl_read_prph(trans, CNVI_SCU_REG_FOR_ECO_1);
if (val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_KNOWN) {
- trans->me_present =
+ trans_pcie->me_present =
!!(val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_PRESENT);
return;
}
@@ -1479,38 +1295,28 @@ static void iwl_pcie_check_me_status(struct iwl_trans *trans)
val = iwl_read32(trans, CSR_HW_IF_CONFIG_REG);
if (val & (CSR_HW_IF_CONFIG_REG_ME_OWN |
CSR_HW_IF_CONFIG_REG_IAMT_UP)) {
- trans->me_present = 1;
+ trans_pcie->me_present = 1;
return;
}
/* recheck again later, ME might still be initializing */
- schedule_delayed_work(&trans->me_recheck_wk, HZ);
+ schedule_delayed_work(&trans_pcie->me_recheck_wk, HZ);
}
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- const struct iwl_cfg_trans_params *trans;
- const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
+ const struct iwl_mac_cfg *trans;
const struct iwl_dev_info *dev_info;
+ struct iwl_trans_info info = {
+ .hw_id = (pdev->device << 16) + pdev->subsystem_device,
+ };
struct iwl_trans *iwl_trans;
struct iwl_trans_pcie *trans_pcie;
int ret;
- const struct iwl_cfg *cfg;
-
- trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
- /*
- * This is needed for backwards compatibility with the old
- * tables, so we don't need to change all the config structs
- * at the same time. The cfg is used to compare with the old
- * full cfg structs.
- */
- cfg = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
-
- /* make sure trans is the first element in iwl_cfg */
- BUILD_BUG_ON(offsetof(struct iwl_cfg, trans));
+ trans = (void *)ent->driver_data;
- iwl_trans = iwl_trans_pcie_alloc(pdev, ent, trans);
+ iwl_trans = iwl_trans_pcie_alloc(pdev, trans, &info);
if (IS_ERR(iwl_trans))
return PTR_ERR(iwl_trans);
@@ -1519,6 +1325,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_trans_pcie_check_product_reset_status(pdev);
iwl_trans_pcie_check_product_reset_mode(pdev);
+ /* set the things we know so far for the grab NIC access */
+ iwl_trans_set_info(iwl_trans, &info);
+
/*
* Let's try to grab NIC access early here. Sometimes, NICs may
* fail to initialize, and if that happens it's better if we see
@@ -1532,7 +1341,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_free_trans;
if (iwl_trans_grab_nic_access(iwl_trans)) {
- get_crf_id(iwl_trans);
+ get_crf_id(iwl_trans, &info);
/* all good */
iwl_trans_release_nic_access(iwl_trans);
} else {
@@ -1541,38 +1350,32 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
+ info.hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
/*
* The RF_ID is set to zero in blank OTP so read version to
* extract the RF_ID.
* This is relevant only for family 9000 and up.
*/
- if (iwl_trans->trans_cfg->rf_id &&
- iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 &&
- !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && map_crf_id(iwl_trans)) {
+ if (iwl_trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000 &&
+ !CSR_HW_RFID_TYPE(info.hw_rf_id) && map_crf_id(iwl_trans, &info)) {
ret = -EINVAL;
goto out_free_trans;
}
IWL_INFO(iwl_trans, "PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",
pdev->device, pdev->subsystem_device,
- iwl_trans->hw_rev, iwl_trans->hw_rf_id);
+ info.hw_rev, info.hw_rf_id);
dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device,
- CSR_HW_REV_TYPE(iwl_trans->hw_rev),
- iwl_trans->hw_rev_step,
- CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id),
- CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id),
- CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id),
+ CSR_HW_RFID_TYPE(info.hw_rf_id),
+ CSR_HW_RFID_IS_CDB(info.hw_rf_id),
IWL_SUBDEVICE_RF_ID(pdev->subsystem_device),
- IWL_SUBDEVICE_NO_160(pdev->subsystem_device),
- IWL_SUBDEVICE_CORES(pdev->subsystem_device),
- CSR_HW_RFID_STEP(iwl_trans->hw_rf_id));
+ IWL_SUBDEVICE_BW_LIM(pdev->subsystem_device),
+ CSR_HW_RFID_STEP(info.hw_rf_id));
if (dev_info) {
iwl_trans->cfg = dev_info->cfg;
- iwl_trans->name = dev_info->name;
- iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160;
+ info.name = dev_info->name;
}
#if IS_ENABLED(CONFIG_IWLMVM)
@@ -1583,82 +1386,41 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* all the parameters that the transport uses must, until that is
* changed, be identical to the ones in the 7265D configuration.
*/
- if (cfg == &iwl7265_2ac_cfg)
- cfg_7265d = &iwl7265d_2ac_cfg;
- else if (cfg == &iwl7265_2n_cfg)
- cfg_7265d = &iwl7265d_2n_cfg;
- else if (cfg == &iwl7265_n_cfg)
- cfg_7265d = &iwl7265d_n_cfg;
- if (cfg_7265d &&
- (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
- iwl_trans->cfg = cfg_7265d;
-
- /*
- * This is a hack to switch from Qu B0 to Qu C0. We need to
- * do this for all cfgs that use Qu B0, except for those using
- * Jf, which have already been moved to the new table. The
- * rest must be removed once we convert Qu with Hr as well.
- */
- if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) {
- if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
- iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
- else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
- iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
- else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
- iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0;
- }
-
- /* same thing for QuZ... */
- if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
- if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
- iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
- else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
- iwl_trans->cfg = &iwl_ax1650s_cfg_quz_hr;
- else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
- iwl_trans->cfg = &iwl_ax1650i_cfg_quz_hr;
- }
-
+ if (iwl_trans->cfg == &iwl7265_cfg &&
+ (info.hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
+ iwl_trans->cfg = &iwl7265d_cfg;
#endif
- /*
- * If we didn't set the cfg yet, the PCI ID table entry should have
- * been a full config - if yes, use it, otherwise fail.
- */
if (!iwl_trans->cfg) {
- if (ent->driver_data & TRANS_CFG_MARKER) {
- pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",
- pdev->device, pdev->subsystem_device,
- iwl_trans->hw_rev, iwl_trans->hw_rf_id);
- ret = -EINVAL;
- goto out_free_trans;
- }
- iwl_trans->cfg = cfg;
+ pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",
+ pdev->device, pdev->subsystem_device,
+ info.hw_rev, info.hw_rf_id);
+ ret = -EINVAL;
+ goto out_free_trans;
}
- /* if we don't have a name yet, copy name from the old cfg */
- if (!iwl_trans->name)
- iwl_trans->name = iwl_trans->cfg->name;
-
- IWL_INFO(iwl_trans, "Detected %s\n", iwl_trans->name);
+ IWL_INFO(iwl_trans, "Detected %s\n", info.name);
- if (iwl_trans->trans_cfg->mq_rx_supported) {
+ if (iwl_trans->mac_cfg->mq_rx_supported) {
if (WARN_ON(!iwl_trans->cfg->num_rbds)) {
ret = -EINVAL;
goto out_free_trans;
}
- trans_pcie->num_rx_bufs = iwl_trans->cfg->num_rbds;
+ trans_pcie->num_rx_bufs = iwl_trans_get_num_rbds(iwl_trans);
} else {
trans_pcie->num_rx_bufs = RX_QUEUE_SIZE;
}
- if (!iwl_trans->trans_cfg->integrated) {
+ if (!iwl_trans->mac_cfg->integrated) {
u16 link_status;
pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &link_status);
- iwl_trans->pcie_link_speed =
+ info.pcie_link_speed =
u16_get_bits(link_status, PCI_EXP_LNKSTA_CLS);
}
+ iwl_trans_set_info(iwl_trans, &info);
+
ret = iwl_trans_init(iwl_trans);
if (ret)
goto out_free_trans;
@@ -1690,11 +1452,12 @@ out_free_trans:
static void iwl_pci_remove(struct pci_dev *pdev)
{
struct iwl_trans *trans = pci_get_drvdata(pdev);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (!trans)
return;
- cancel_delayed_work_sync(&trans->me_recheck_wk);
+ cancel_delayed_work_sync(&trans_pcie->me_recheck_wk);
iwl_drv_stop(trans->drv);
@@ -1742,7 +1505,7 @@ static int _iwl_pci_resume(struct device *device, bool restore)
* the device is alive.
* For older devices, just try silently to grab the NIC.
*/
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
if (!(iwl_read32(trans, CSR_FUNC_SCRATCH) &
CSR_FUNC_SCRATCH_POWER_OFF_MASK))
device_was_powered_off = true;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 114a9195ad7f..3b7c12fc4f9e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -194,7 +194,7 @@ struct iwl_rb_allocator {
static inline u16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
__le16 *rb_stts = rxq->rb_stts;
return le16_to_cpu(READ_ONCE(*rb_stts));
@@ -269,6 +269,7 @@ enum iwl_pcie_fw_reset_state {
FW_RESET_REQUESTED,
FW_RESET_OK,
FW_RESET_ERROR,
+ FW_RESET_TOP_REQUESTED,
};
/**
@@ -288,22 +289,14 @@ enum iwl_pcie_imr_status {
/**
* struct iwl_pcie_txqs - TX queues data
*
- * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
- * @page_offs: offset from skb->cb to mac header page pointer
- * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
* @queue_used: bit mask of used queues
* @queue_stopped: bit mask of stopped queues
* @txq: array of TXQ data structures representing the TXQs
* @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
- * @queue_alloc_cmd_ver: queue allocation command version
* @bc_pool: bytecount DMA allocations pool
* @bc_tbl_size: bytecount table size
* @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
* (and similar usage)
- * @cmd: command queue data
- * @cmd.fifo: FIFO number
- * @cmd.q_id: queue ID
- * @cmd.wdg_timeout: watchdog timeout
* @tfd: TFD data
* @tfd.max_tbs: max number of buffers per TFD
* @tfd.size: TFD size
@@ -315,26 +308,15 @@ struct iwl_pcie_txqs {
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
struct dma_pool *bc_pool;
size_t bc_tbl_size;
- bool bc_table_dword;
- u8 page_offs;
- u8 dev_cmd_offs;
struct iwl_tso_hdr_page __percpu *tso_hdr_page;
struct {
- u8 fifo;
- u8 q_id;
- unsigned int wdg_timeout;
- } cmd;
-
- struct {
u8 max_tbs;
u16 size;
u8 addr_size;
} tfd;
struct iwl_dma_ptr scd_bc_tbls;
-
- u8 queue_alloc_cmd_ver;
};
/**
@@ -344,7 +326,7 @@ struct iwl_pcie_txqs {
* @global_table: table mapping received VID from hw to rxb
* @rba: allocator for RX replenishing
* @ctxt_info: context information for FW self init
- * @ctxt_info_gen3: context information for gen3 devices
+ * @ctxt_info_v2: context information for v1 devices
* @prph_info: prph info for self init
* @prph_scratch: prph scratch for self init
* @ctxt_info_dma_addr: dma addr of context information
@@ -352,6 +334,7 @@ struct iwl_pcie_txqs {
* @prph_scratch_dma_addr: dma addr of prph scratch
* @ctxt_info_dma_addr: dma addr of context information
* @iml: image loader image virtual address
+ * @iml_len: image loader image size
* @iml_dma_addr: image loader image DMA address
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
@@ -363,9 +346,6 @@ struct iwl_pcie_txqs {
* @hw_base: pci hardware address support
* @ucode_write_complete: indicates that the ucode has been copied.
* @ucode_write_waitq: wait queue for uCode load
- * @cmd_queue - command queue number
- * @rx_buf_size: Rx buffer size
- * @scd_set_active: should the transport configure the SCD for HCMD queue
* @rx_page_order: page order for receive buffer size
* @rx_buf_bytes: RX buffer (RB) size in bytes
* @reg_lock: protect hw register access
@@ -406,19 +386,20 @@ struct iwl_pcie_txqs {
* @pcie_dbg_dumped_once: indicates PCIe regs were dumped already
* @opmode_down: indicates opmode went away
* @num_rx_bufs: number of RX buffers to allocate/use
- * @no_reclaim_cmds: special commands not using reclaim flow
- * (firmware workaround)
- * @n_no_reclaim_cmds: number of special commands not using reclaim flow
* @affinity_mask: IRQ affinity mask for each RX queue
* @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio
* enable/disable
- * @fw_reset_handshake: indicates FW reset handshake is needed
* @fw_reset_state: state of FW reset handshake
* @fw_reset_waitq: waitqueue for FW reset handshake
* @is_down: indicates the NIC is down
* @isr_stats: interrupt statistics
* @napi_dev: (fake) netdev for NAPI registration
* @txqs: transport tx queues data.
+ * @me_present: WiAMT/CSME is detected as present (1), not present (0)
+ * or unknown (-1, so can still use it as a boolean safely)
+ * @me_recheck_wk: worker to recheck WiAMT/CSME presence
+ * @invalid_tx_cmd: invalid TX command buffer
+ * @wait_command_queue: wait queue for sync commands
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
@@ -427,11 +408,12 @@ struct iwl_trans_pcie {
struct iwl_rb_allocator rba;
union {
struct iwl_context_info *ctxt_info;
- struct iwl_context_info_gen3 *ctxt_info_gen3;
+ struct iwl_context_info_v2 *ctxt_info_v2;
};
struct iwl_prph_info *prph_info;
struct iwl_prph_scratch *prph_scratch;
void *iml;
+ size_t iml_len;
dma_addr_t ctxt_info_dma_addr;
dma_addr_t prph_info_dma_addr;
dma_addr_t prph_scratch_dma_addr;
@@ -470,12 +452,8 @@ struct iwl_trans_pcie {
wait_queue_head_t ucode_write_waitq;
wait_queue_head_t sx_waitq;
- u8 n_no_reclaim_cmds;
- u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u16 num_rx_bufs;
- enum iwl_amsdu_size rx_buf_size;
- bool scd_set_active;
bool pcie_dbg_dumped_once;
u32 rx_page_order;
u32 rx_buf_bytes;
@@ -510,7 +488,6 @@ struct iwl_trans_pcie {
void *base_rb_stts;
dma_addr_t base_rb_stts_dma;
- bool fw_reset_handshake;
enum iwl_pcie_fw_reset_state fw_reset_state;
wait_queue_head_t fw_reset_waitq;
enum iwl_pcie_imr_status imr_status;
@@ -518,6 +495,13 @@ struct iwl_trans_pcie {
char rf_name[32];
struct iwl_pcie_txqs txqs;
+
+ s8 me_present;
+ struct delayed_work me_recheck_wk;
+
+ struct iwl_dma_ptr invalid_tx_cmd;
+
+ wait_queue_head_t wait_command_queue;
};
static inline struct iwl_trans_pcie *
@@ -552,8 +536,8 @@ iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
*/
struct iwl_trans
*iwl_trans_pcie_alloc(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- const struct iwl_cfg_trans_params *cfg_trans);
+ const struct iwl_mac_cfg *mac_cfg,
+ struct iwl_trans_info *info);
void iwl_trans_pcie_free(struct iwl_trans *trans);
void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
struct device *dev);
@@ -623,7 +607,7 @@ struct iwl_tso_page_info {
IWL_TSO_PAGE_DATA_SIZE))
int iwl_pcie_tx_init(struct iwl_trans *trans);
-void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
+void iwl_pcie_tx_start(struct iwl_trans *trans);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans);
bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
@@ -679,7 +663,7 @@ static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
idx = iwl_txq_get_cmd_index(txq, idx);
return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx;
@@ -718,7 +702,7 @@ static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
{
return ++index &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+ (trans->mac_cfg->base->max_tfd_queue_size - 1);
}
/**
@@ -729,7 +713,7 @@ static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
{
return --index &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+ (trans->mac_cfg->base->max_tfd_queue_size - 1);
}
void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
@@ -752,10 +736,12 @@ int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
static inline void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
struct iwl_tfh_tfd *tfd)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
tfd->num_tbs = 0;
- iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma,
- trans->invalid_tx_cmd.size);
+ iwl_txq_gen2_set_tb(trans, tfd, trans_pcie->invalid_tx_cmd.dma,
+ trans_pcie->invalid_tx_cmd.size);
}
void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
@@ -782,7 +768,7 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
struct iwl_tfd *tfd;
struct iwl_tfd_tb *tb;
- if (trans->trans_cfg->gen2) {
+ if (trans->mac_cfg->gen2) {
struct iwl_tfh_tfd *tfh_tfd = _tfd;
struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
@@ -940,11 +926,13 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
}
}
-static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
+static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans,
+ bool top_reset)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
+ IWL_DEBUG_ISR(trans, "Enabling %s interrupt only\n",
+ top_reset ? "RESET" : "ALIVE");
if (!trans_pcie->msix_enabled) {
/*
@@ -954,11 +942,20 @@ static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
* RX interrupt which will allow us to receive the ALIVE
* notification (which is Rx) and continue the flow.
*/
- trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
+ if (top_reset)
+ trans_pcie->inta_mask = CSR_INT_BIT_RESET_DONE;
+ else
+ trans_pcie->inta_mask = CSR_INT_BIT_ALIVE |
+ CSR_INT_BIT_FH_RX;
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
} else {
- iwl_enable_hw_int_msk_msix(trans,
- MSIX_HW_INT_CAUSES_REG_ALIVE);
+ u32 val = top_reset ? MSIX_HW_INT_CAUSES_REG_RESET_DONE
+ : MSIX_HW_INT_CAUSES_REG_ALIVE;
+
+ iwl_enable_hw_int_msk_msix(trans, val);
+
+ if (top_reset)
+ return;
/*
* Leave all the FH causes enabled to get the ALIVE
* notification.
@@ -1005,7 +1002,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
MSIX_HW_INT_CAUSES_REG_RF_KILL);
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
/*
* On 9000-series devices this bit isn't enabled by default, so
* when we power down the device we need set the bit to allow it
@@ -1076,8 +1073,7 @@ static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
void iwl_pcie_rx_allocator_work(struct work_struct *data);
/* common trans ops for all generations transports */
-void iwl_trans_pcie_configure(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg);
+void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans);
int iwl_trans_pcie_start_hw(struct iwl_trans *trans);
void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans);
void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val);
@@ -1107,11 +1103,14 @@ int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
void __releases(nic_access_nobh)
iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
/* transport gen 1 exported functions */
-void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+void iwl_trans_pcie_fw_alive(struct iwl_trans *trans);
int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw, bool run_in_rfkill);
+ const struct iwl_fw *fw,
+ const struct fw_img *img,
+ bool run_in_rfkill);
void iwl_trans_pcie_stop_device(struct iwl_trans *trans);
/* common functions that are used by gen2 transport */
@@ -1129,15 +1128,12 @@ int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
void iwl_pcie_apply_destination(struct iwl_trans *trans);
-/* common functions that are used by gen3 transport */
-void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
-
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw, bool run_in_rfkill);
+ const struct iwl_fw *fw,
+ const struct fw_img *img,
+ bool run_in_rfkill);
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans);
-int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 4a4f8de4efe2..f0405eddc367 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -12,7 +12,8 @@
#include "iwl-io.h"
#include "internal.h"
#include "iwl-op-mode.h"
-#include "iwl-context-info-gen3.h"
+#include "iwl-context-info-v2.h"
+#include "fw/dbg.h"
/******************************************************************************
*
@@ -143,12 +144,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
*/
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
/* TODO: remove this once fw does it */
- iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
- return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
+ iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_AX210, 0);
+ return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_AX210,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
- } else if (trans->trans_cfg->mq_rx_supported) {
+ } else if (trans->mac_cfg->mq_rx_supported) {
iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
@@ -175,7 +176,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
* 1. shadow registers aren't enabled
* 2. there is a chance that the NIC is asleep
*/
- if (!trans->trans_cfg->base_params->shadow_reg_enable &&
+ if (!trans->mac_cfg->base->shadow_reg_enable &&
test_bit(STATUS_TPOWER_PMI, &trans->status)) {
reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
@@ -190,9 +191,9 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
}
rxq->write_actual = round_down(rxq->write, 8);
- if (!trans->trans_cfg->mq_rx_supported)
+ if (!trans->mac_cfg->mq_rx_supported)
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
- else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
HBUS_TARG_WRPTR_RX_Q(rxq->id));
else
@@ -205,7 +206,7 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- for (i = 0; i < trans->num_rx_queues; i++) {
+ for (i = 0; i < trans->info.num_rxqs; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
if (!rxq->need_update)
@@ -221,7 +222,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
struct iwl_rxq *rxq,
struct iwl_rx_mem_buffer *rxb)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
struct iwl_rx_transfer_desc *bd = rxq->bd;
BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
@@ -348,7 +349,7 @@ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
static
void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
- if (trans->trans_cfg->mq_rx_supported)
+ if (trans->mac_cfg->mq_rx_supported)
iwl_pcie_rxmq_restock(trans, rxq);
else
iwl_pcie_rxsq_restock(trans, rxq);
@@ -362,8 +363,8 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
u32 *offset, gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
+ unsigned int rbsize = trans_pcie->rx_buf_bytes;
struct page *page;
gfp_t gfp_mask = priority;
@@ -657,19 +658,19 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return sizeof(struct iwl_rx_transfer_desc);
- return trans->trans_cfg->mq_rx_supported ?
+ return trans->mac_cfg->mq_rx_supported ?
sizeof(__le64) : sizeof(__le32);
}
static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
return sizeof(struct iwl_rx_completion_desc_bz);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return sizeof(struct iwl_rx_completion_desc);
return sizeof(__le32);
@@ -701,7 +702,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
static size_t iwl_pcie_rb_stts_size(struct iwl_trans *trans)
{
- bool use_rx_td = (trans->trans_cfg->device_family >=
+ bool use_rx_td = (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210);
if (use_rx_td)
@@ -720,8 +721,8 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
int free_size;
spin_lock_init(&rxq->lock);
- if (trans->trans_cfg->mq_rx_supported)
- rxq->queue_size = trans->cfg->num_rbds;
+ if (trans->mac_cfg->mq_rx_supported)
+ rxq->queue_size = iwl_trans_get_num_rbds(trans);
else
rxq->queue_size = RX_QUEUE_SIZE;
@@ -736,7 +737,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
if (!rxq->bd)
goto err;
- if (trans->trans_cfg->mq_rx_supported) {
+ if (trans->mac_cfg->mq_rx_supported) {
rxq->used_bd = dma_alloc_coherent(dev,
iwl_pcie_used_bd_size(trans) *
rxq->queue_size,
@@ -753,7 +754,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
return 0;
err:
- for (i = 0; i < trans->num_rx_queues; i++) {
+ for (i = 0; i < trans->info.num_rxqs; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
iwl_pcie_free_rxq_dma(trans, rxq);
@@ -772,7 +773,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
if (WARN_ON(trans_pcie->rxq))
return -EINVAL;
- trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
+ trans_pcie->rxq = kcalloc(trans->info.num_rxqs, sizeof(struct iwl_rxq),
GFP_KERNEL);
trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
sizeof(trans_pcie->rx_pool[0]),
@@ -795,7 +796,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
*/
trans_pcie->base_rb_stts =
dma_alloc_coherent(trans->dev,
- rb_stts_size * trans->num_rx_queues,
+ rb_stts_size * trans->info.num_rxqs,
&trans_pcie->base_rb_stts_dma,
GFP_KERNEL);
if (!trans_pcie->base_rb_stts) {
@@ -803,7 +804,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
goto err;
}
- for (i = 0; i < trans->num_rx_queues; i++) {
+ for (i = 0; i < trans->info.num_rxqs; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
rxq->id = i;
@@ -816,7 +817,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
err:
if (trans_pcie->base_rb_stts) {
dma_free_coherent(trans->dev,
- rb_stts_size * trans->num_rx_queues,
+ rb_stts_size * trans->info.num_rxqs,
trans_pcie->base_rb_stts,
trans_pcie->base_rb_stts_dma);
trans_pcie->base_rb_stts = NULL;
@@ -834,11 +835,10 @@ err:
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size;
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- switch (trans_pcie->rx_buf_size) {
+ switch (trans->conf.rx_buf_size) {
case IWL_AMSDU_4K:
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
break;
@@ -906,7 +906,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
u32 rb_size, enabled = 0;
int i;
- switch (trans_pcie->rx_buf_size) {
+ switch (trans->conf.rx_buf_size) {
case IWL_AMSDU_2K:
rb_size = RFH_RXF_DMA_RB_SIZE_2K;
break;
@@ -932,7 +932,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
/* disable free amd used rx queue operation */
iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
- for (i = 0; i < trans->num_rx_queues; i++) {
+ for (i = 0; i < trans->info.num_rxqs; i++) {
/* Tell device where to find RBD free table in DRAM */
iwl_write_prph64_no_grab(trans,
RFH_Q_FRBDCB_BA_LSB(i),
@@ -976,7 +976,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
RFH_GEN_CFG_SERVICE_DMA_SNOOP |
RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
- trans->trans_cfg->integrated ?
+ trans->mac_cfg->integrated ?
RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
RFH_GEN_CFG_RB_CHUNK_SIZE_128));
/* Enable the relevant rx queues */
@@ -1072,7 +1072,7 @@ void iwl_pcie_rx_napi_sync(struct iwl_trans *trans)
if (unlikely(!trans_pcie->rxq))
return;
- for (i = 0; i < trans->num_rx_queues; i++) {
+ for (i = 0; i < trans->info.num_rxqs; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
if (rxq && rxq->napi.poll)
@@ -1109,7 +1109,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
for (i = 0; i < RX_QUEUE_SIZE; i++)
def_rxq->queue[i] = NULL;
- for (i = 0; i < trans->num_rx_queues; i++) {
+ for (i = 0; i < trans->info.num_rxqs; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
spin_lock_bh(&rxq->lock);
@@ -1122,7 +1122,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->write = 0;
rxq->write_actual = 0;
memset(rxq->rb_stts, 0,
- (trans->trans_cfg->device_family >=
+ (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210) ?
sizeof(__le16) : sizeof(struct iwl_rb_status));
@@ -1144,9 +1144,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
}
/* move the pool to the default queue and allocator ownerships */
- queue_size = trans->trans_cfg->mq_rx_supported ?
+ queue_size = trans->mac_cfg->mq_rx_supported ?
trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
- allocator_pool_size = trans->num_rx_queues *
+ allocator_pool_size = trans->info.num_rxqs *
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
num_alloc = queue_size + allocator_pool_size;
@@ -1175,7 +1175,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
if (ret)
return ret;
- if (trans->trans_cfg->mq_rx_supported)
+ if (trans->mac_cfg->mq_rx_supported)
iwl_pcie_rx_mq_hw_init(trans);
else
iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
@@ -1223,14 +1223,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
if (trans_pcie->base_rb_stts) {
dma_free_coherent(trans->dev,
- rb_stts_size * trans->num_rx_queues,
+ rb_stts_size * trans->info.num_rxqs,
trans_pcie->base_rb_stts,
trans_pcie->base_rb_stts_dma);
trans_pcie->base_rb_stts = NULL;
trans_pcie->base_rb_stts_dma = 0;
}
- for (i = 0; i < trans->num_rx_queues; i++) {
+ for (i = 0; i < trans->info.num_rxqs; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
iwl_pcie_free_rxq_dma(trans, rxq);
@@ -1301,7 +1301,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
int i)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
bool page_stolen = false;
int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0;
@@ -1368,8 +1368,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
if (reclaim && !pkt->hdr.group_id) {
int i;
- for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
- if (trans_pcie->no_reclaim_cmds[i] ==
+ for (i = 0; i < trans->conf.n_no_reclaim_cmds; i++) {
+ if (trans->conf.no_reclaim_cmds[i] ==
pkt->hdr.cmd) {
reclaim = false;
break;
@@ -1408,7 +1408,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
page_stolen |= rxcb._page_stolen;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
break;
}
@@ -1454,18 +1454,18 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
- if (!trans->trans_cfg->mq_rx_supported) {
+ if (!trans->mac_cfg->mq_rx_supported) {
rxb = rxq->queue[i];
rxq->queue[i] = NULL;
return rxb;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
vid = le16_to_cpu(cd[i].rbid);
*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
- } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ } else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
struct iwl_rx_completion_desc *cd = rxq->used_bd;
vid = le16_to_cpu(cd[i].rbid);
@@ -1648,7 +1648,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
- if (WARN_ON(entry->entry >= trans->num_rx_queues))
+ if (WARN_ON(entry->entry >= trans->info.num_rxqs))
return IRQ_NONE;
if (!trans_pcie->rxq) {
@@ -1683,18 +1683,18 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
if (trans->cfg->internal_wimax_coex &&
- !trans->cfg->apmg_not_supported &&
+ !trans->mac_cfg->base->apmg_not_supported &&
(!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
APMS_CLK_VAL_MRB_FUNC_MODE) ||
(iwl_read_prph(trans, APMG_PS_CTRL_REG) &
APMG_PS_CTRL_VAL_RESET_REQ))) {
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
iwl_op_mode_wimax_active(trans->op_mode);
- wake_up(&trans->wait_command_queue);
+ wake_up(&trans_pcie->wait_command_queue);
return;
}
- for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
+ for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {
if (!trans_pcie->txqs.txq[i])
continue;
timer_delete(&trans_pcie->txqs.txq[i]->stuck_timer);
@@ -1705,7 +1705,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
iwl_trans_fw_error(trans, IWL_ERR_TYPE_IRQ);
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- wake_up(&trans->wait_command_queue);
+ wake_up(&trans_pcie->wait_command_queue);
}
static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
@@ -1820,7 +1820,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
&trans->status))
IWL_DEBUG_RF_KILL(trans,
"Rfkill while SYNC HCMD in flight\n");
- wake_up(&trans->wait_command_queue);
+ wake_up(&trans_pcie->wait_command_queue);
} else {
clear_bit(STATUS_RFKILL_HW, &trans->status);
if (trans_pcie->opmode_down)
@@ -1828,6 +1828,54 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
}
}
+static void iwl_trans_pcie_handle_reset_interrupt(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 state;
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) {
+ u32 val = iwl_read32(trans, CSR_IPC_STATE);
+
+ state = u32_get_bits(val, CSR_IPC_STATE_RESET);
+ IWL_DEBUG_ISR(trans, "IPC state = 0x%x/%d\n", val, state);
+ } else {
+ state = CSR_IPC_STATE_RESET_SW_READY;
+ }
+
+ switch (state) {
+ case CSR_IPC_STATE_RESET_SW_READY:
+ if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
+ IWL_DEBUG_ISR(trans, "Reset flow completed\n");
+ trans_pcie->fw_reset_state = FW_RESET_OK;
+ wake_up(&trans_pcie->fw_reset_waitq);
+ break;
+ }
+ fallthrough;
+ case CSR_IPC_STATE_RESET_TOP_READY:
+ /* FIXME: handle this case when requesting TOP reset */
+ fallthrough;
+ case CSR_IPC_STATE_RESET_NONE:
+ IWL_FW_CHECK_FAILED(trans,
+ "Invalid reset interrupt (state=%d)!\n",
+ state);
+ break;
+ case CSR_IPC_STATE_RESET_TOP_FOLLOWER:
+ if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
+ /* if we were in reset, wake that up */
+ IWL_INFO(trans,
+ "TOP reset from BT while doing reset\n");
+ trans_pcie->fw_reset_state = FW_RESET_OK;
+ wake_up(&trans_pcie->fw_reset_waitq);
+ } else {
+ IWL_INFO(trans, "TOP reset from BT\n");
+ trans->state = IWL_TRANS_NO_FW;
+ iwl_trans_schedule_reset(trans,
+ IWL_ERR_TYPE_TOP_RESET_BY_BT);
+ }
+ break;
+ }
+}
+
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
{
struct iwl_trans *trans = dev_id;
@@ -1936,7 +1984,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
if (inta & CSR_INT_BIT_ALIVE) {
IWL_DEBUG_ISR(trans, "Alive interrupt\n");
isr_stats->alive++;
- if (trans->trans_cfg->gen2) {
+ if (trans->mac_cfg->gen2) {
/*
* We can restock, since firmware configured
* the RFH
@@ -1947,6 +1995,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
handled |= CSR_INT_BIT_ALIVE;
}
+ if (inta & CSR_INT_BIT_RESET_DONE) {
+ iwl_trans_pcie_handle_reset_interrupt(trans);
+ handled |= CSR_INT_BIT_RESET_DONE;
+ }
+
/* Safely ignore these bits for debug checks below */
inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
@@ -1968,7 +2021,12 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
IWL_ERR(trans, "Microcode SW error detected. "
" Restarting 0x%X.\n", inta);
isr_stats->sw++;
- iwl_pcie_irq_handle_error(trans);
+ if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
+ trans_pcie->fw_reset_state = FW_RESET_ERROR;
+ wake_up(&trans_pcie->fw_reset_waitq);
+ } else {
+ iwl_pcie_irq_handle_error(trans);
+ }
handled |= CSR_INT_BIT_SW_ERR;
}
@@ -2074,7 +2132,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
iwl_enable_rfkill_int(trans);
/* Re-enable the ALIVE / Rx interrupt if it occurred */
else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
- iwl_enable_fw_load_int_ctx_info(trans);
+ iwl_enable_fw_load_int_ctx_info(trans, false);
spin_unlock_bh(&trans_pcie->irq_lock);
}
@@ -2289,7 +2347,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
else
sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR;
@@ -2297,9 +2355,13 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
if (inta_hw & MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR) {
IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n",
inta_hw);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- iwl_trans_pcie_reset(trans,
- IWL_RESET_MODE_PROD_RESET);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ trans->request_top_reset = 1;
+ iwl_op_mode_nic_error(trans->op_mode,
+ IWL_ERR_TYPE_TOP_FATAL_ERROR);
+ iwl_trans_schedule_reset(trans,
+ IWL_ERR_TYPE_TOP_FATAL_ERROR);
+ }
}
/* Error detected by uCode */
@@ -2338,7 +2400,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
IWL_DEBUG_ISR(trans, "Alive interrupt\n");
isr_stats->alive++;
- if (trans->trans_cfg->gen2) {
+ if (trans->mac_cfg->gen2) {
/* We can restock, since firmware configured the RFH */
iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
}
@@ -2388,11 +2450,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
iwl_pcie_irq_handle_error(trans);
}
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) {
- IWL_DEBUG_ISR(trans, "Reset flow completed\n");
- trans_pcie->fw_reset_state = FW_RESET_OK;
- wake_up(&trans_pcie->fw_reset_waitq);
- }
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE)
+ iwl_trans_pcie_handle_reset_interrupt(trans);
if (!polling)
iwl_pcie_clear_irq(trans, entry->entry);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 472f26f83ba8..c8f4f3a1d2eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -6,7 +6,7 @@
#include "iwl-trans.h"
#include "iwl-prph.h"
#include "iwl-context-info.h"
-#include "iwl-context-info-gen3.h"
+#include "iwl-context-info-v2.h"
#include "internal.h"
#include "fw/dbg.h"
@@ -81,13 +81,13 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
/* Stop device's DMA activity */
iwl_pcie_apm_stop_master(trans);
- iwl_trans_sw_reset(trans, false);
+ iwl_trans_pcie_sw_reset(trans, false);
/*
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
else
@@ -102,10 +102,10 @@ void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
trans_pcie->fw_reset_state = FW_RESET_REQUESTED;
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE);
- else if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
+ else if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE);
else
@@ -117,13 +117,23 @@ void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
trans_pcie->fw_reset_state != FW_RESET_REQUESTED,
FW_RESET_TIMEOUT);
if (!ret || trans_pcie->fw_reset_state == FW_RESET_ERROR) {
- u32 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
+ bool reset_done;
+ u32 inta_hw;
+
+ if (trans_pcie->msix_enabled) {
+ inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
+ reset_done =
+ inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE;
+ } else {
+ inta_hw = iwl_read32(trans, CSR_INT);
+ reset_done = inta_hw & CSR_INT_BIT_RESET_DONE;
+ }
IWL_ERR(trans,
- "timeout waiting for FW reset ACK (inta_hw=0x%x)\n",
- inta_hw);
+ "timeout waiting for FW reset ACK (inta_hw=0x%x, reset_done %d)\n",
+ inta_hw, reset_done);
- if (!(inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE)) {
+ if (!reset_done) {
struct iwl_fw_error_dump_mode mode = {
.type = IWL_ERR_TYPE_RESET_HS_TIMEOUT,
.context = IWL_ERR_CONTEXT_FROM_OPMODE,
@@ -147,7 +157,7 @@ static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
return;
if (trans->state >= IWL_TRANS_FW_STARTED &&
- trans_pcie->fw_reset_handshake) {
+ trans->conf.fw_reset_handshake) {
/*
* Reset handshake can dump firmware on timeout, but that
* should assume that the firmware is already dead.
@@ -181,8 +191,8 @@ static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
}
iwl_pcie_ctxt_info_free_paging(trans);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- iwl_pcie_ctxt_info_gen3_free(trans, false);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ iwl_pcie_ctxt_info_v2_free(trans, false);
else
iwl_pcie_ctxt_info_free(trans);
@@ -190,7 +200,7 @@ static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
iwl_pcie_gen2_apm_stop(trans, false);
/* re-take ownership to prevent other users from stealing the device */
- iwl_trans_sw_reset(trans, true);
+ iwl_trans_pcie_sw_reset(trans, true);
/*
* Upon stop, the IVAR table gets erased, so msi-x won't
@@ -243,7 +253,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
- trans->cfg->min_txq_size);
+ trans->mac_cfg->base->min_txq_size);
int ret;
/* TODO: most of the logic can be removed in A0 - but not in Z0 */
@@ -260,7 +270,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return -ENOMEM;
/* Allocate or reset and init all Tx and Command queues */
- if (iwl_txq_gen2_init(trans, trans_pcie->txqs.cmd.q_id, queue_size))
+ if (iwl_txq_gen2_init(trans, trans->conf.cmd_queue, queue_size))
return -ENOMEM;
/* enable shadow regs in HW */
@@ -281,7 +291,7 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
if (buf[0])
return;
- switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF):
pos = scnprintf(buf, buflen, "JF");
break;
@@ -305,7 +315,7 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
break;
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_WP):
if (SILICON_Z_STEP ==
- CSR_HW_RFID_STEP(trans->hw_rf_id))
+ CSR_HW_RFID_STEP(trans->info.hw_rf_id))
pos = scnprintf(buf, buflen, "WHTC");
else
pos = scnprintf(buf, buflen, "WH");
@@ -314,7 +324,7 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
return;
}
- switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
@@ -337,7 +347,7 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
}
pos += scnprintf(buf + pos, buflen - pos, ", rfid=0x%x",
- trans->hw_rf_id);
+ trans->info.hw_rf_id);
IWL_INFO(trans, "Detected RF %s\n", buf);
@@ -364,8 +374,8 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans)
/* now that we got alive we can free the fw image & the context info.
* paging memory cannot be freed included since FW will still use it
*/
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- iwl_pcie_ctxt_info_gen3_free(trans, true);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ iwl_pcie_ctxt_info_v2_free(trans, true);
else
iwl_pcie_ctxt_info_free(trans);
@@ -379,6 +389,11 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans)
iwl_pcie_get_rf_name(trans);
mutex_unlock(&trans_pcie->mutex);
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ trans->step_urm = !!(iwl_read_umac_prph(trans,
+ CNVI_PMU_STEP_FLOW) &
+ CNVI_PMU_STEP_FLOW_FORCE_URM);
}
static bool iwl_pcie_set_ltr(struct iwl_trans *trans)
@@ -398,21 +413,21 @@ static bool iwl_pcie_set_ltr(struct iwl_trans *trans)
* initialize the LTR to ~250 usec (see ltr_val above).
* The firmware initializes this again later (to a smaller value).
*/
- if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
- trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
- !trans->trans_cfg->integrated) {
+ if ((trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
+ trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
+ !trans->mac_cfg->integrated) {
iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
return true;
}
- if (trans->trans_cfg->integrated &&
- trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
+ if (trans->mac_cfg->integrated &&
+ trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
return true;
}
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
/* First clear the interrupt, just in case */
iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD,
MSIX_HW_INT_CAUSES_REG_IML);
@@ -469,16 +484,22 @@ static void iwl_pcie_spin_for_iml(struct iwl_trans *trans)
}
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw, bool run_in_rfkill)
+ const struct iwl_fw *fw,
+ const struct fw_img *img,
+ bool run_in_rfkill)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill, keep_ram_busy;
+ bool top_reset_done = false;
int ret;
+ mutex_lock(&trans_pcie->mutex);
+again:
/* This may fail if AMT took ownership of the device */
if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
- return -EIO;
+ ret = -EIO;
+ goto out;
}
iwl_enable_rfkill_int(trans);
@@ -495,8 +516,6 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
/* Make sure it finished running */
iwl_pcie_synchronize_irqs(trans);
- mutex_lock(&trans_pcie->mutex);
-
/* If platform's RF_KILL switch is NOT set to KILL */
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
if (hw_rfkill && !run_in_rfkill) {
@@ -526,22 +545,37 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
goto out;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
- else
- ret = iwl_pcie_ctxt_info_init(trans, fw);
- if (ret)
- goto out;
+ if (WARN_ON(trans->do_top_reset &&
+ trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC))
+ return -EINVAL;
+
+ /* we need to wait later - set state */
+ if (trans->do_top_reset)
+ trans_pcie->fw_reset_state = FW_RESET_TOP_REQUESTED;
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (!top_reset_done) {
+ ret = iwl_pcie_ctxt_info_v2_alloc(trans, fw, img);
+ if (ret)
+ goto out;
+ }
+
+ iwl_pcie_ctxt_info_v2_kick(trans);
+ } else {
+ ret = iwl_pcie_ctxt_info_init(trans, img);
+ if (ret)
+ goto out;
+ }
keep_ram_busy = !iwl_pcie_set_ltr(trans);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
IWL_DEBUG_POWER(trans, "function scratch register value is 0x%08x\n",
iwl_read32(trans, CSR_FUNC_SCRATCH));
iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE);
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_ROM_START);
- } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ } else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
} else {
iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
@@ -550,6 +584,38 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
if (keep_ram_busy)
iwl_pcie_spin_for_iml(trans);
+ if (trans->do_top_reset) {
+ trans->do_top_reset = 0;
+
+#define FW_TOP_RESET_TIMEOUT (HZ / 4)
+ ret = wait_event_timeout(trans_pcie->fw_reset_waitq,
+ trans_pcie->fw_reset_state != FW_RESET_TOP_REQUESTED,
+ FW_TOP_RESET_TIMEOUT);
+
+ if (trans_pcie->fw_reset_state != FW_RESET_OK) {
+ if (trans_pcie->fw_reset_state != FW_RESET_TOP_REQUESTED)
+ IWL_ERR(trans,
+ "TOP reset interrupted by error (state %d)!\n",
+ trans_pcie->fw_reset_state);
+ else
+ IWL_ERR(trans, "TOP reset timed out!\n");
+ iwl_op_mode_nic_error(trans->op_mode,
+ IWL_ERR_TYPE_TOP_RESET_FAILED);
+ iwl_trans_schedule_reset(trans,
+ IWL_ERR_TYPE_TOP_RESET_FAILED);
+ ret = -EIO;
+ goto out;
+ }
+
+ msleep(10);
+ IWL_INFO(trans, "TOP reset successful, reinit now\n");
+ /* now load the firmware again properly */
+ trans_pcie->prph_scratch->ctrl_cfg.control.control_flags &=
+ ~cpu_to_le32(IWL_PRPH_SCRATCH_TOP_RESET);
+ top_reset_done = true;
+ goto again;
+ }
+
/* re-check RF-Kill state since we may have missed the interrupt */
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
if (hw_rfkill && !run_in_rfkill)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 102a6123bba0..cc4d289b110d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -28,7 +28,7 @@
#include "mei/iwl-mei.h"
#include "internal.h"
#include "iwl-fh.h"
-#include "iwl-context-info-gen3.h"
+#include "iwl-context-info-v2.h"
/* extended range in FW SRAM */
#define IWL_FW_MEM_EXTENDED_START 0x40000
@@ -131,7 +131,7 @@ out:
int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_SW_RESET);
usleep_range(10000, 20000);
@@ -237,7 +237,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
- if (trans->cfg->apmg_not_supported)
+ if (trans->mac_cfg->base->apmg_not_supported)
return;
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -293,7 +293,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
*/
/* Disable L0S exit timer (platform NMI Work/Around) */
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
@@ -317,7 +317,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans);
/* Configure analog phase-lock-loop before activating to D0A */
- if (trans->trans_cfg->base_params->pll_cfg)
+ if (trans->mac_cfg->base->pll_cfg)
iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
ret = iwl_finish_nic_init(trans);
@@ -353,7 +353,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
* bits do not disable clocks. This preserves any hardware
* bits already set by default in "CLK_CTRL_REG" after reset.
*/
- if (!trans->cfg->apmg_not_supported) {
+ if (!trans->mac_cfg->base->apmg_not_supported) {
iwl_write_prph(trans, APMG_CLK_EN_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
udelay(20);
@@ -469,7 +469,7 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
/* stop device's busmaster DMA activity */
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ);
@@ -501,10 +501,10 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
iwl_pcie_apm_init(trans);
/* inform ME that we are leaving */
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000)
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_WAKE_ME);
- else if (trans->trans_cfg->device_family >=
+ else if (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_8000) {
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
@@ -565,7 +565,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
return -ENOMEM;
}
- if (trans->trans_cfg->base_params->shadow_reg_enable) {
+ if (trans->mac_cfg->base->shadow_reg_enable) {
/* enable shadow regs in HW */
iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
@@ -634,7 +634,7 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
IWL_DEBUG_INFO(trans,
"Couldn't prepare the card but SAP is connected\n");
trans->csme_own = true;
- if (trans->trans_cfg->device_family !=
+ if (trans->mac_cfg->device_family !=
IWL_DEVICE_FAMILY_9000)
IWL_ERR(trans,
"SAP not supported for this NIC family\n");
@@ -819,7 +819,7 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
iwl_enable_interrupts(trans);
- if (trans->trans_cfg->gen2) {
+ if (trans->mac_cfg->gen2) {
if (cpu == 1)
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
0xFFFF);
@@ -977,7 +977,7 @@ monitor:
if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
fw_mon->physical >> dest->base_shift);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
(fw_mon->physical + fw_mon->size -
256) >> dest->end_shift);
@@ -1153,7 +1153,7 @@ static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
*/
iwl_pcie_map_list(trans, causes_list_common,
ARRAY_SIZE(causes_list_common), val);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_pcie_map_list(trans, causes_list_bz,
ARRAY_SIZE(causes_list_bz), val);
else
@@ -1175,7 +1175,7 @@ static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
* the other (N - 2) interrupt vectors.
*/
val = BIT(MSIX_FH_INT_CAUSES_Q(0));
- for (idx = 1; idx < trans->num_rx_queues; idx++) {
+ for (idx = 1; idx < trans->info.num_rxqs; idx++) {
iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
MSIX_FH_INT_CAUSES_Q(idx - offset));
val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
@@ -1196,7 +1196,7 @@ void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
struct iwl_trans *trans = trans_pcie->trans;
if (!trans_pcie->msix_enabled) {
- if (trans->trans_cfg->mq_rx_supported &&
+ if (trans->mac_cfg->mq_rx_supported &&
test_bit(STATUS_DEVICE_ENABLED, &trans->status))
iwl_write_umac_prph(trans, UREG_CHICK,
UREG_CHICK_MSI_ENABLE);
@@ -1271,7 +1271,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)
iwl_pcie_rx_stop(trans);
/* Power-down device's busmaster DMA clocks */
- if (!trans->cfg->apmg_not_supported) {
+ if (!trans->mac_cfg->base->apmg_not_supported) {
iwl_write_prph(trans, APMG_CLK_DIS_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
@@ -1279,7 +1279,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)
}
/* Make sure (redundant) we've released our request to stay awake */
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
else
@@ -1337,7 +1337,9 @@ void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
}
int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw, bool run_in_rfkill)
+ const struct iwl_fw *fw,
+ const struct fw_img *img,
+ bool run_in_rfkill)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
@@ -1408,10 +1410,10 @@ int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
/* Load the given image to the HW */
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
- ret = iwl_pcie_load_given_ucode_8000(trans, fw);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ ret = iwl_pcie_load_given_ucode_8000(trans, img);
else
- ret = iwl_pcie_load_given_ucode(trans, fw);
+ ret = iwl_pcie_load_given_ucode(trans, img);
/* re-check RF-Kill state since we may have missed the interrupt */
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
@@ -1423,10 +1425,10 @@ out:
return ret;
}
-void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
{
iwl_pcie_reset_ict(trans);
- iwl_pcie_tx_start(trans, scd_addr);
+ iwl_pcie_tx_start(trans);
}
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
@@ -1485,7 +1487,7 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
state ? "disabled" : "enabled");
if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) &&
- !WARN_ON(trans->trans_cfg->gen2))
+ !WARN_ON(trans->mac_cfg->gen2))
_iwl_trans_pcie_stop_device(trans, from_irq);
}
@@ -1505,7 +1507,7 @@ static void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
iwl_pcie_synchronize_irqs(trans);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
iwl_clear_bit(trans, CSR_GP_CNTRL,
@@ -1534,11 +1536,11 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND :
UREG_DOORBELL_TO_ISR6_RESUME);
- else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_write32(trans, CSR_IPC_SLEEP_CONTROL,
suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND :
CSR_IPC_SLEEP_CONTROL_RESUME);
@@ -1593,7 +1595,7 @@ int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
goto out;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
else
@@ -1653,17 +1655,18 @@ out:
static void
iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
struct iwl_trans *trans,
- const struct iwl_cfg_trans_params *cfg_trans)
+ const struct iwl_mac_cfg *mac_cfg,
+ struct iwl_trans_info *info)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int max_irqs, num_irqs, i, ret;
u16 pci_cmd;
u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES;
- if (!cfg_trans->mq_rx_supported)
+ if (!mac_cfg->mq_rx_supported)
goto enable_msi;
- if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000)
+ if (mac_cfg->device_family <= IWL_DEVICE_FAMILY_9000)
max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;
max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues);
@@ -1693,27 +1696,28 @@ iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
* More than two interrupts: we will use fewer RSS queues.
*/
if (num_irqs <= max_irqs - 2) {
- trans_pcie->trans->num_rx_queues = num_irqs + 1;
+ info->num_rxqs = num_irqs + 1;
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
IWL_SHARED_IRQ_FIRST_RSS;
} else if (num_irqs == max_irqs - 1) {
- trans_pcie->trans->num_rx_queues = num_irqs;
+ info->num_rxqs = num_irqs;
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
} else {
- trans_pcie->trans->num_rx_queues = num_irqs - 1;
+ info->num_rxqs = num_irqs - 1;
}
IWL_DEBUG_INFO(trans,
"MSI-X enabled with rx queues %d, vec mask 0x%x\n",
- trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask);
+ info->num_rxqs, trans_pcie->shared_vec_mask);
- WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
+ WARN_ON(info->num_rxqs > IWL_MAX_RX_HW_QUEUES);
trans_pcie->alloc_vecs = num_irqs;
trans_pcie->msix_enabled = true;
return;
enable_msi:
+ info->num_rxqs = 1;
ret = pci_enable_msi(pdev);
if (ret) {
dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
@@ -1726,14 +1730,15 @@ enable_msi:
}
}
-static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
+static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans,
+ struct iwl_trans_info *info)
{
#if defined(CONFIG_SMP)
int iter_rx_q, i, ret, cpu, offset;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
- iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
+ iter_rx_q = info->num_rxqs - 1 + i;
offset = 1 + i;
for (; i < iter_rx_q ; i++) {
/*
@@ -1753,7 +1758,8 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
}
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
- struct iwl_trans_pcie *trans_pcie)
+ struct iwl_trans_pcie *trans_pcie,
+ struct iwl_trans_info *info)
{
int i;
@@ -1782,7 +1788,7 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
return ret;
}
}
- iwl_pcie_irq_set_affinity(trans_pcie->trans);
+ iwl_pcie_irq_set_affinity(trans_pcie->trans, info);
return 0;
}
@@ -1791,7 +1797,7 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
{
u32 hpm, wprot;
- switch (trans->trans_cfg->device_family) {
+ switch (trans->mac_cfg->device_family) {
case IWL_DEVICE_FAMILY_9000:
wprot = PREG_PRPH_WPROT_9000;
break;
@@ -1860,8 +1866,8 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
if (err)
return err;
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
- trans->trans_cfg->integrated) {
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
+ trans->mac_cfg->integrated) {
err = iwl_pcie_gen2_force_power_gating(trans);
if (err)
return err;
@@ -1936,7 +1942,7 @@ u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return 0x00FFFFFF;
else
return 0x000FFFFF;
@@ -1960,45 +1966,17 @@ void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
-void iwl_trans_pcie_configure(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg)
+void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* free all first - we might be reconfigured for a different size */
iwl_pcie_free_rbs_pool(trans);
- trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue;
- trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo;
- trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs;
- trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
- trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
-
- if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
- trans_pcie->n_no_reclaim_cmds = 0;
- else
- trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
- if (trans_pcie->n_no_reclaim_cmds)
- memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
- trans_pcie->n_no_reclaim_cmds * sizeof(u8));
-
- trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
trans_pcie->rx_page_order =
- iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
+ iwl_trans_get_rb_size_order(trans->conf.rx_buf_size);
trans_pcie->rx_buf_bytes =
- iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
- trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
-
- trans_pcie->txqs.bc_table_dword = trans_cfg->bc_table_dword;
- trans_pcie->scd_set_active = trans_cfg->scd_set_active;
-
- trans->command_groups = trans_cfg->command_groups;
- trans->command_groups_size = trans_cfg->command_groups_size;
-
-
- trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
+ iwl_trans_get_rb_size(trans->conf.rx_buf_size);
}
void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
@@ -2026,11 +2004,14 @@ void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions
static void iwl_pcie_free_invalid_tx_cmd(struct iwl_trans *trans)
{
- iwl_pcie_free_dma_ptr(trans, &trans->invalid_tx_cmd);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_pcie_free_dma_ptr(trans, &trans_pcie->invalid_tx_cmd);
}
static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_cmd_header_wide bad_cmd = {
.cmd = INVALID_WR_PTR_CMD,
.group_id = DEBUG_GROUP,
@@ -2040,11 +2021,11 @@ static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans)
};
int ret;
- ret = iwl_pcie_alloc_dma_ptr(trans, &trans->invalid_tx_cmd,
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->invalid_tx_cmd,
sizeof(bad_cmd));
if (ret)
return ret;
- memcpy(trans->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd));
+ memcpy(trans_pcie->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd));
return 0;
}
@@ -2055,7 +2036,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_synchronize_irqs(trans);
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
iwl_txq_gen2_tx_free(trans);
else
iwl_pcie_tx_free(trans);
@@ -2348,6 +2329,7 @@ out:
void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_trans_pcie_removal *removal;
char _msg = 0, *msg = &_msg;
@@ -2358,9 +2340,9 @@ void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode)
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return;
- if (trans->me_present && mode == IWL_RESET_MODE_PROD_RESET) {
+ if (trans_pcie->me_present && mode == IWL_RESET_MODE_PROD_RESET) {
mode = IWL_RESET_MODE_FUNC_RESET;
- if (trans->me_present < 0)
+ if (trans_pcie->me_present < 0)
msg = " instead of product reset as ME may be present";
else
msg = " instead of product reset as ME is present";
@@ -2395,7 +2377,7 @@ void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode)
removal->pdev = to_pci_dev(trans->dev);
removal->mode = mode;
- removal->integrated = trans->trans_cfg->integrated;
+ removal->integrated = trans->mac_cfg->integrated;
INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
pci_dev_get(removal->pdev);
schedule_work(&removal->work);
@@ -2423,7 +2405,7 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
if (trans_pcie->cmd_hold_nic_awake)
goto out;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ;
mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
@@ -2431,7 +2413,7 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
udelay(2);
/*
@@ -2518,7 +2500,7 @@ iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
if (trans_pcie->cmd_hold_nic_awake)
goto out;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
else
@@ -2617,7 +2599,7 @@ int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
+ if (queue >= trans->info.num_rxqs || !trans_pcie->rxq)
return -EINVAL;
data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
@@ -2698,10 +2680,10 @@ int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
/* waiting for all the tx frames complete might take a while */
for (cnt = 0;
- cnt < trans->trans_cfg->base_params->num_of_queues;
+ cnt < trans->mac_cfg->base->num_of_queues;
cnt++) {
- if (cnt == trans_pcie->txqs.cmd.q_id)
+ if (cnt == trans->conf.cmd_queue)
continue;
if (!test_bit(cnt, trans_pcie->txqs.queue_used))
continue;
@@ -2842,7 +2824,7 @@ static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
struct iwl_dbgfs_tx_queue_state *state;
- if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
+ if (*pos >= priv->trans->mac_cfg->base->num_of_queues)
return NULL;
state = kmalloc(sizeof(*state), GFP_KERNEL);
@@ -2860,7 +2842,7 @@ static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
*pos = ++state->pos;
- if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
+ if (*pos >= priv->trans->mac_cfg->base->num_of_queues)
return NULL;
return state;
@@ -2892,7 +2874,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
else
seq_puts(seq, "(unallocated)");
- if (state->pos == trans_pcie->txqs.cmd.q_id)
+ if (state->pos == trans->conf.cmd_queue)
seq_puts(seq, " (HCMD)");
seq_puts(seq, "\n");
@@ -2930,7 +2912,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
int pos = 0, i, ret;
size_t bufsz;
- bufsz = sizeof(char) * 121 * trans->num_rx_queues;
+ bufsz = sizeof(char) * 121 * trans->info.num_rxqs;
if (!trans_pcie->rxq)
return -EAGAIN;
@@ -2939,9 +2921,11 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
if (!buf)
return -ENOMEM;
- for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
+ for (i = 0; i < trans->info.num_rxqs && pos < bufsz; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+ spin_lock_bh(&rxq->lock);
+
pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
i);
pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
@@ -2962,6 +2946,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos,
"\tclosed_rb_num: Not Allocated\n");
}
+ spin_unlock_bh(&rxq->lock);
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
@@ -3266,8 +3251,9 @@ static ssize_t iwl_dbgfs_reset_write(struct file *file,
{
struct iwl_trans *trans = file->private_data;
static const char * const modes[] = {
- [IWL_RESET_MODE_SW_RESET] = "n/a",
- [IWL_RESET_MODE_REPROBE] = "n/a",
+ [IWL_RESET_MODE_SW_RESET] = "sw",
+ [IWL_RESET_MODE_REPROBE] = "reprobe",
+ [IWL_RESET_MODE_TOP_RESET] = "top",
[IWL_RESET_MODE_REMOVE_ONLY] = "remove",
[IWL_RESET_MODE_RESCAN] = "rescan",
[IWL_RESET_MODE_FUNC_RESET] = "function",
@@ -3286,8 +3272,18 @@ static ssize_t iwl_dbgfs_reset_write(struct file *file,
if (mode < 0)
return mode;
- if (mode < IWL_RESET_MODE_REMOVE_ONLY)
- return -EINVAL;
+ if (mode < IWL_RESET_MODE_REMOVE_ONLY) {
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ return -EINVAL;
+ if (mode == IWL_RESET_MODE_TOP_RESET) {
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC)
+ return -EINVAL;
+ trans->request_top_reset = 1;
+ }
+ iwl_op_mode_nic_error(trans->op_mode, IWL_ERR_TYPE_DEBUGFS);
+ iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_DEBUGFS);
+ return count;
+ }
iwl_trans_pcie_reset(trans, mode);
@@ -3428,7 +3424,7 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
(*data)->len = cpu_to_le32(fh_regs_len);
val = (void *)(*data)->data;
- if (!trans->trans_cfg->gen2)
+ if (!trans->mac_cfg->gen2)
for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
i += sizeof(u32))
*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
@@ -3475,7 +3471,7 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
{
u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
write_ptr = DBGC_CUR_DBGBUF_STATUS;
@@ -3495,7 +3491,7 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
fw_mon_data->fw_mon_base_ptr =
cpu_to_le32(iwl_read_prph(trans, base));
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
fw_mon_data->fw_mon_base_high_ptr =
cpu_to_le32(iwl_read_prph(trans, base_high));
write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
@@ -3515,8 +3511,8 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
if (trans->dbg.dest_tlv ||
(fw_mon->size &&
- (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
- trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
+ (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
+ trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
@@ -3539,14 +3535,14 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
IWL_LDBG_M2S_BUF_BA_MSK) <<
trans->dbg.dest_tlv->base_shift;
base *= IWL_M2S_UNIT_SIZE;
- base += trans->cfg->smem_offset;
+ base += trans->mac_cfg->base->smem_offset;
} else {
base = iwl_read_prph(trans, base) <<
trans->dbg.dest_tlv->base_shift;
}
- iwl_trans_read_mem(trans, base, fw_mon_data->data,
- monitor_len / sizeof(u32));
+ iwl_trans_pcie_read_mem(trans, base, fw_mon_data->data,
+ monitor_len / sizeof(u32));
} else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
monitor_len =
iwl_trans_pci_dump_marbh_monitor(trans,
@@ -3580,7 +3576,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
trans->dbg.dest_tlv->base_shift;
base *= IWL_M2S_UNIT_SIZE;
- base += trans->cfg->smem_offset;
+ base += trans->mac_cfg->base->smem_offset;
monitor_len =
(cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
@@ -3596,7 +3592,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
trans->dbg.dest_tlv->end_shift;
/* Make "end" point to the actual end */
- if (trans->trans_cfg->device_family >=
+ if (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_8000 ||
trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
end += (1 << trans->dbg.dest_tlv->end_shift);
@@ -3617,13 +3613,13 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data;
- struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs = 0, monitor_len = 0;
int i, ptr;
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
- !trans->trans_cfg->mq_rx_supported &&
+ !trans->mac_cfg->mq_rx_supported &&
dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
if (!dump_mask)
@@ -3648,7 +3644,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
/* FH registers */
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
len += sizeof(*data) +
(iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
@@ -3662,15 +3658,18 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
/* RBs */
+ spin_lock_bh(&rxq->lock);
num_rbs = iwl_get_closed_rb_stts(trans, rxq);
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
+ spin_unlock_bh(&rxq->lock);
+
len += num_rbs * (sizeof(*data) +
sizeof(struct iwl_fw_error_dump_rb) +
(PAGE_SIZE << trans_pcie->rx_page_order));
}
/* Paged memory for gen2 HW */
- if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
+ if (trans->mac_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
for (i = 0; i < trans->init_dram.paging_cnt; i++)
len += sizeof(*data) +
sizeof(struct iwl_fw_error_dump_paging) +
@@ -3695,7 +3694,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
u8 tfdidx;
u32 caplen, cmdlen;
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
tfdidx = idx;
else
tfdidx = ptr;
@@ -3735,7 +3734,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
/* Paged memory for gen2 HW */
- if (trans->trans_cfg->gen2 &&
+ if (trans->mac_cfg->gen2 &&
dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
for (i = 0; i < trans->init_dram.paging_cnt; i++) {
struct iwl_fw_error_dump_paging *paging;
@@ -3775,7 +3774,7 @@ void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
if (trans_pcie->msix_enabled) {
inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
else
sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
@@ -3787,12 +3786,14 @@ void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
}
-struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- const struct iwl_cfg_trans_params *cfg_trans)
+struct iwl_trans *
+iwl_trans_pcie_alloc(struct pci_dev *pdev,
+ const struct iwl_mac_cfg *mac_cfg,
+ struct iwl_trans_info *info)
{
struct iwl_trans_pcie *trans_pcie, **priv;
struct iwl_trans *trans;
+ unsigned int bc_tbl_n_entries;
int ret, addr_size;
u32 bar0;
@@ -3809,13 +3810,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
return ERR_PTR(ret);
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev,
- cfg_trans);
+ mac_cfg);
if (!trans)
return ERR_PTR(-ENOMEM);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (trans->trans_cfg->gen2) {
+ /* Initialize the wait queue for commands */
+ init_waitqueue_head(&trans_pcie->wait_command_queue);
+
+ if (trans->mac_cfg->gen2) {
trans_pcie->txqs.tfd.addr_size = 64;
trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
@@ -3824,10 +3828,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd);
}
- trans->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
- /* Set a short watchdog for the command queue */
- trans_pcie->txqs.cmd.wdg_timeout = IWL_DEF_WD_TIMEOUT;
+ trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(12);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(11);
+
+ info->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
if (!trans_pcie->txqs.tso_hdr_page) {
@@ -3835,20 +3841,21 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_free_trans;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- trans_pcie->txqs.bc_tbl_size =
- sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
- else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- trans_pcie->txqs.bc_tbl_size =
- sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_BZ;
+ else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_AX210;
else
- trans_pcie->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
+ bc_tbl_n_entries = TFD_QUEUE_BC_SIZE;
+
+ trans_pcie->txqs.bc_tbl_size =
+ sizeof(struct iwl_bc_tbl_entry) * bc_tbl_n_entries;
/*
* For gen2 devices, we use a single allocation for each byte-count
* table, but they're pretty small (1k) so use a DMA pool that we
* allocate here.
*/
- if (trans->trans_cfg->gen2) {
+ if (trans->mac_cfg->gen2) {
trans_pcie->txqs.bc_pool =
dmam_pool_create("iwlwifi:bc", trans->dev,
trans_pcie->txqs.bc_tbl_size,
@@ -3861,7 +3868,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* Some things must not change even if the config does */
WARN_ON(trans_pcie->txqs.tfd.addr_size !=
- (trans->trans_cfg->gen2 ? 64 : 36));
+ (trans->mac_cfg->gen2 ? 64 : 36));
/* Initialize NAPI here - it should be before registering to mac80211
* in the opmode but after the HW struct is allocated.
@@ -3895,7 +3902,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->debug_rfkill = -1;
- if (!cfg_trans->base_params->pcie_l1_allowed) {
+ if (!mac_cfg->base->pcie_l1_allowed) {
/*
* W/A - seems to solve weird behavior. We need to remove this
* if we don't want to stay in L1 all the time. This wastes a
@@ -3939,8 +3946,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->pci_dev = pdev;
iwl_disable_interrupts(trans);
- trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
- if (trans->hw_rev == 0xffffffff) {
+ info->hw_rev = iwl_read32(trans, CSR_HW_REV);
+ if (info->hw_rev == 0xffffffff) {
dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
ret = -EIO;
goto out_no_pci;
@@ -3952,17 +3959,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* "dash" value). To keep hw_rev backwards compatible - we'll store it
* in the old format.
*/
- if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000)
- trans->hw_rev_step = trans->hw_rev & 0xF;
+ if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ info->hw_rev_step = info->hw_rev & 0xF;
else
- trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2;
+ info->hw_rev_step = (info->hw_rev & 0xC) >> 2;
- IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
+ IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", info->hw_rev);
- iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans);
- trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
- snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
- "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
+ iwl_pcie_set_interrupt_capa(pdev, trans, mac_cfg, info);
init_waitqueue_head(&trans_pcie->sx_waitq);
@@ -3971,7 +3975,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_no_pci;
if (trans_pcie->msix_enabled) {
- ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
+ ret = iwl_pcie_init_msix_handler(pdev, trans_pcie, info);
if (ret)
goto out_no_pci;
} else {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 71227fd3dac0..df0545f09da9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -18,13 +18,12 @@
static struct page *get_workaround_page(struct iwl_trans *trans,
struct sk_buff *skb)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tso_page_info *info;
struct page **page_ptr;
struct page *ret;
dma_addr_t phys;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+ page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
ret = alloc_page(GFP_ATOMIC);
if (!ret)
@@ -164,7 +163,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
struct iwl_device_tx_cmd *dev_cmd)
{
#ifdef CONFIG_INET
- struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
+ struct iwl_tx_cmd_v9 *tx_cmd = (void *)dev_cmd->payload;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
@@ -491,21 +490,21 @@ struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
bool amsdu;
/* There must be data left over for TB1 or this code must be changed */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v9) < IWL_FIRST_TB_SIZE);
BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
- offsetofend(struct iwl_tx_cmd_gen2, dram_info) >
+ offsetofend(struct iwl_tx_cmd_v9, dram_info) >
IWL_FIRST_TB_SIZE);
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
- offsetofend(struct iwl_tx_cmd_gen3, dram_info) >
+ offsetofend(struct iwl_tx_cmd, dram_info) >
IWL_FIRST_TB_SIZE);
memset(tfd, 0, sizeof(*tfd));
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- len = sizeof(struct iwl_tx_cmd_gen2);
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ len = sizeof(struct iwl_tx_cmd_v9);
else
- len = sizeof(struct iwl_tx_cmd_gen3);
+ len = sizeof(struct iwl_tx_cmd);
amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
(*ieee80211_get_qos_ctl(hdr) &
@@ -536,17 +535,17 @@ int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
* If q->n_window is smaller than max_tfd_queue_size, there is no need
* to reserve any queue entries for this purpose.
*/
- if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
+ if (q->n_window < trans->mac_cfg->base->max_tfd_queue_size)
max = q->n_window;
else
- max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
+ max = trans->mac_cfg->base->max_tfd_queue_size - 1;
/*
* max_tfd_queue_size is a power of 2, so the following is equivalent to
* modulo by max_tfd_queue_size and is well defined.
*/
used = (q->write_ptr - q->read_ptr) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+ (trans->mac_cfg->base->max_tfd_queue_size - 1);
if (WARN_ON(used > max))
return 0;
@@ -561,8 +560,8 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
struct iwl_txq *txq, u16 byte_cnt,
int num_tbs)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.addr;
u8 filled_tfd_size, num_fetch_chunks;
u16 len = byte_cnt;
__le16 bc_ent;
@@ -582,24 +581,16 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
*/
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
-
- /* Starting from AX210, the HW expects bytes */
- WARN_ON(trans_pcie->txqs.bc_table_dword);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
WARN_ON(len > 0x3FFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
- scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
} else {
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
-
- /* Before AX210, the HW expects DW */
- WARN_ON(!trans_pcie->txqs.bc_table_dword);
len = DIV_ROUND_UP(len, 4);
WARN_ON(len > 0xFFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- scd_bc_tbl->tfd_offset[idx] = bc_ent;
}
+
+ scd_bc_tbl[idx].tfd_offset = bc_ent;
}
static u8 iwl_txq_gen2_get_num_tbs(struct iwl_tfh_tfd *tfd)
@@ -756,7 +747,8 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans_pcie->txqs.dev_cmd_offs);
+ trans->conf.cb_data_offs +
+ sizeof(void *));
*dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb);
@@ -785,16 +777,16 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
return -1;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_tx_cmd *tx_cmd =
(void *)dev_cmd->payload;
- cmd_len = le16_to_cpu(tx_cmd_gen3->len);
+ cmd_len = le16_to_cpu(tx_cmd->len);
} else {
- struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
+ struct iwl_tx_cmd_v9 *tx_cmd_v9 =
(void *)dev_cmd->payload;
- cmd_len = le16_to_cpu(tx_cmd_gen2->len);
+ cmd_len = le16_to_cpu(tx_cmd_v9->len);
}
/* Set up entry for this TFD in Tx byte-count array */
@@ -832,7 +824,7 @@ static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr);
- if (txq_id != trans_pcie->txqs.cmd.q_id) {
+ if (txq_id != trans->conf.cmd_queue) {
int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
struct iwl_cmd_meta *cmd_meta = &txq->entries[idx].meta;
struct sk_buff *skb = txq->entries[idx].skb;
@@ -906,7 +898,7 @@ static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
iwl_txq_gen2_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */
- if (txq_id == trans_pcie->txqs.cmd.q_id)
+ if (txq_id == trans->conf.cmd_queue)
for (i = 0; i < txq->n_window; i++) {
kfree_sensitive(txq->entries[i].cmd);
kfree_sensitive(txq->entries[i].free_buf);
@@ -1007,7 +999,7 @@ static int iwl_pcie_txq_alloc_response(struct iwl_trans *trans,
txq->id = qid;
trans_pcie->txqs.txq[qid] = txq;
- wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+ wr_ptr &= (trans->mac_cfg->base->max_tfd_queue_size - 1);
/* Place first TFD at index corresponding to start sequence number */
txq->read_ptr = wr_ptr;
@@ -1043,8 +1035,8 @@ int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
/* but must be power of 2 values for calculating read/write pointers */
size = rounddown_pow_of_two(size);
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
- trans->hw_rev_step == SILICON_A_STEP) {
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
+ trans->info.hw_rev_step == SILICON_A_STEP) {
size = 4096;
txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
} else {
@@ -1064,7 +1056,7 @@ int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
if (IS_ERR(txq))
return PTR_ERR(txq);
- if (trans_pcie->txqs.queue_alloc_cmd_ver == 0) {
+ if (trans->conf.queue_alloc_cmd_ver == 0) {
memset(&cmd.old, 0, sizeof(cmd.old));
cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
@@ -1081,7 +1073,7 @@ int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
hcmd.id = SCD_QUEUE_CFG;
hcmd.len[0] = sizeof(cmd.old);
hcmd.data[0] = &cmd.old;
- } else if (trans_pcie->txqs.queue_alloc_cmd_ver == 3) {
+ } else if (trans->conf.queue_alloc_cmd_ver == 3) {
memset(&cmd.new, 0, sizeof(cmd.new));
cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
@@ -1176,7 +1168,7 @@ int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
}
ret = iwl_txq_init(trans, queue, queue_size,
- (txq_id == trans_pcie->txqs.cmd.q_id));
+ (txq_id == trans->conf.cmd_queue));
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
@@ -1206,7 +1198,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
void *dup_buf = NULL;
@@ -1323,7 +1315,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
@@ -1371,7 +1363,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
"Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
iwl_get_cmd_string(trans, cmd->id), group_id,
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id);
+ cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 9fc4e98693eb..bb467e2b1779 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -78,7 +78,6 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
int txq_id = txq->id;
@@ -90,8 +89,8 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
* 2. NIC is woken up for CMD regardless of shadow outside this function
* 3. there is a chance that the NIC is asleep
*/
- if (!trans->trans_cfg->base_params->shadow_reg_enable &&
- txq_id != trans_pcie->txqs.cmd.q_id &&
+ if (!trans->mac_cfg->base->shadow_reg_enable &&
+ txq_id != trans->conf.cmd_queue &&
test_bit(STATUS_TPOWER_PMI, &trans->status)) {
/*
* wake up nic if it's powered down ...
@@ -125,7 +124,7 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
+ for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {
struct iwl_txq *txq = trans_pcie->txqs.txq[i];
if (!test_bit(i, trans_pcie->txqs.queue_used))
@@ -193,7 +192,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
+ if (!trans->mac_cfg->base->apmg_wake_up_wa)
return;
spin_lock(&trans_pcie->reg_lock);
@@ -226,11 +225,10 @@ static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,
void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct page **page_ptr;
struct page *next;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+ page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
next = *page_ptr;
*page_ptr = NULL;
@@ -280,10 +278,12 @@ iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
struct iwl_tfd *tfd)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
tfd->num_tbs = 0;
- iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma,
- trans->invalid_tx_cmd.size);
+ iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans_pcie->invalid_tx_cmd.dma,
+ trans_pcie->invalid_tx_cmd.size);
}
static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
@@ -355,7 +355,7 @@ static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
/* We have only q->n_window txq->entries, but we use
* TFD_QUEUE_SIZE_MAX tfds
*/
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
iwl_txq_get_tfd(trans, txq, read_ptr));
else
@@ -394,7 +394,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr);
- if (txq_id != trans_pcie->txqs.cmd.q_id) {
+ if (txq_id != trans->conf.cmd_queue) {
struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
struct iwl_cmd_meta *cmd_meta =
&txq->entries[txq->read_ptr].meta;
@@ -408,7 +408,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr &&
- txq_id == trans_pcie->txqs.cmd.q_id)
+ txq_id == trans->conf.cmd_queue)
iwl_pcie_clear_cmd_in_flight(trans);
}
@@ -446,7 +446,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
iwl_pcie_txq_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */
- if (txq_id == trans_pcie->txqs.cmd.q_id)
+ if (txq_id == trans->conf.cmd_queue)
for (i = 0; i < txq->n_window; i++) {
kfree_sensitive(txq->entries[i].cmd);
kfree_sensitive(txq->entries[i].free_buf);
@@ -456,7 +456,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
if (txq->tfds) {
dma_free_coherent(dev,
trans_pcie->txqs.tfd.size *
- trans->trans_cfg->base_params->max_tfd_queue_size,
+ trans->mac_cfg->base->max_tfd_queue_size,
txq->tfds, txq->dma_addr);
txq->dma_addr = 0;
txq->tfds = NULL;
@@ -475,10 +475,10 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
memset(txq, 0, sizeof(*txq));
}
-void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
+void iwl_pcie_tx_start(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int nq = trans->trans_cfg->base_params->num_of_queues;
+ int nq = trans->mac_cfg->base->num_of_queues;
int chan;
u32 reg_val;
int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
@@ -493,13 +493,10 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
- WARN_ON(scd_base_addr != 0 &&
- scd_base_addr != trans_pcie->scd_base_addr);
-
/* reset context data, TX status and translation data */
- iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
- SCD_CONTEXT_MEM_LOWER_BOUND,
- NULL, clear_dwords);
+ iwl_trans_pcie_write_mem(trans, trans_pcie->scd_base_addr +
+ SCD_CONTEXT_MEM_LOWER_BOUND,
+ NULL, clear_dwords);
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
trans_pcie->txqs.scd_bc_tbls.dma >> 10);
@@ -507,12 +504,12 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
/* The chain extension of the SCD doesn't work well. This feature is
* enabled by default by the HW, so we need to disable it manually.
*/
- if (trans->trans_cfg->base_params->scd_chain_ext_wa)
+ if (trans->mac_cfg->base->scd_chain_ext_wa)
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
- iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id,
- trans_pcie->txqs.cmd.fifo,
- trans_pcie->txqs.cmd.wdg_timeout);
+ iwl_trans_ac_txq_enable(trans, trans->conf.cmd_queue,
+ trans->conf.cmd_fifo,
+ IWL_DEF_WD_TIMEOUT);
/* Activate all Tx DMA/FIFO channels */
iwl_scd_activate_fifos(trans);
@@ -529,7 +526,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
/* Enable L1-Active */
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)
iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
@@ -543,13 +540,13 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
* we should never get here in gen2 trans mode return early to avoid
* having invalid accesses
*/
- if (WARN_ON_ONCE(trans->trans_cfg->gen2))
+ if (WARN_ON_ONCE(trans->mac_cfg->gen2))
return;
- for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
+ for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
txq_id++) {
struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
txq->dma_addr);
@@ -571,7 +568,7 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
* while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
* contain garbage.
*/
- iwl_pcie_tx_start(trans, 0);
+ iwl_pcie_tx_start(trans);
}
static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
@@ -633,7 +630,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
return 0;
/* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
+ for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
txq_id++)
iwl_pcie_txq_unmap(trans, txq_id);
@@ -656,7 +653,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
/* Tx queues */
if (trans_pcie->txq_memory) {
for (txq_id = 0;
- txq_id < trans->trans_cfg->base_params->num_of_queues;
+ txq_id < trans->mac_cfg->base->num_of_queues;
txq_id++) {
iwl_pcie_txq_free(trans, txq_id);
trans_pcie->txqs.txq[txq_id] = NULL;
@@ -678,7 +675,7 @@ void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
bool active;
u8 fifo;
- if (trans->trans_cfg->gen2) {
+ if (trans->mac_cfg->gen2) {
IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
txq->read_ptr, txq->write_ptr);
/* TODO: access new SCD registers and dump them */
@@ -695,15 +692,15 @@ void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
jiffies_to_msecs(txq->wd_timeout),
txq->read_ptr, txq->write_ptr,
iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
+ (trans->mac_cfg->base->max_tfd_queue_size - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
+ (trans->mac_cfg->base->max_tfd_queue_size - 1),
iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
}
static void iwl_txq_stuck_timer(struct timer_list *t)
{
- struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
+ struct iwl_txq *txq = timer_container_of(txq, t, stuck_timer);
struct iwl_trans *trans = txq->trans;
spin_lock(&txq->lock);
@@ -723,8 +720,8 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t num_entries = trans->trans_cfg->gen2 ?
- slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
+ size_t num_entries = trans->mac_cfg->gen2 ?
+ slots_num : trans->mac_cfg->base->max_tfd_queue_size;
size_t tfd_sz;
size_t tb0_buf_sz;
int i;
@@ -779,7 +776,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
for (i = 0; i < num_entries; i++) {
void *tfd = iwl_txq_get_tfd(trans, txq, i);
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
iwl_txq_set_tfd_invalid_gen2(trans, tfd);
else
iwl_txq_set_tfd_invalid_gen1(trans, tfd);
@@ -799,6 +796,8 @@ error:
return -ENOMEM;
}
+#define BC_TABLE_SIZE (sizeof(struct iwl_bc_tbl_entry) * TFD_QUEUE_BC_SIZE)
+
/*
* iwl_pcie_tx_alloc - allocate TX context
* Allocate all Tx DMA structures and initialize them
@@ -808,12 +807,12 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
int ret;
int txq_id, slots_num;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
+ u16 bc_tbls_size = trans->mac_cfg->base->num_of_queues;
- if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
+ if (WARN_ON(trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
return -EINVAL;
- bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl);
+ bc_tbls_size *= BC_TABLE_SIZE;
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
@@ -837,7 +836,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
}
trans_pcie->txq_memory =
- kcalloc(trans->trans_cfg->base_params->num_of_queues,
+ kcalloc(trans->mac_cfg->base->num_of_queues,
sizeof(struct iwl_txq), GFP_KERNEL);
if (!trans_pcie->txq_memory) {
IWL_ERR(trans, "Not enough memory for txq\n");
@@ -846,16 +845,16 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
}
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
+ for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
+ bool cmd_queue = (txq_id == trans->conf.cmd_queue);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
- trans->cfg->min_txq_size);
+ trans->mac_cfg->base->min_txq_size);
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
- trans->cfg->min_ba_txq_size);
+ trans->mac_cfg->base->min_ba_txq_size);
trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id],
slots_num, cmd_queue);
@@ -905,7 +904,7 @@ int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
u32 tfd_queue_max_size =
- trans->trans_cfg->base_params->max_tfd_queue_size;
+ trans->mac_cfg->base->max_tfd_queue_size;
int ret;
txq->need_update = false;
@@ -963,16 +962,16 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
spin_unlock_bh(&trans_pcie->irq_lock);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
+ for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
+ bool cmd_queue = (txq_id == trans->conf.cmd_queue);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
- trans->cfg->min_txq_size);
+ trans->mac_cfg->base->min_txq_size);
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
- trans->cfg->min_ba_txq_size);
+ trans->mac_cfg->base->min_ba_txq_size);
ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num,
cmd_queue);
if (ret) {
@@ -991,7 +990,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
}
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
- if (trans->trans_cfg->base_params->num_of_queues > 20)
+ if (trans->mac_cfg->base->num_of_queues > 20)
iwl_set_bits_prph(trans, SCD_GP_CTRL,
SCD_GP_CTRL_ENABLE_31_QUEUES);
@@ -1012,7 +1011,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return -ENODEV;
- if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
+ if (!trans->mac_cfg->base->apmg_wake_up_wa)
return 0;
/*
@@ -1090,12 +1089,12 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
idx = iwl_txq_get_cmd_index(txq, idx);
r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
- if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
+ if (idx >= trans->mac_cfg->base->max_tfd_queue_size ||
(!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) {
WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used),
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx,
- trans->trans_cfg->base_params->max_tfd_queue_size,
+ trans->mac_cfg->base->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
return;
}
@@ -1164,15 +1163,15 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
fifo = cfg->fifo;
/* Disable the scheduler prior configuring the cmd queue */
- if (txq_id == trans_pcie->txqs.cmd.q_id &&
- trans_pcie->scd_set_active)
+ if (txq_id == trans->conf.cmd_queue &&
+ trans->conf.scd_set_active)
iwl_scd_enable_set_active(trans, 0);
/* Stop this Tx queue before configuring it */
iwl_scd_txq_set_inactive(trans, txq_id);
/* Set this queue as a chain-building queue unless it is CMD */
- if (txq_id != trans_pcie->txqs.cmd.q_id)
+ if (txq_id != trans->conf.cmd_queue)
iwl_scd_txq_set_chain(trans, txq_id);
if (cfg->aggregate) {
@@ -1206,7 +1205,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
* this sad hardware issue.
* This bug has been fixed on devices 9000 and up.
*/
- scd_bug = !trans->trans_cfg->mq_rx_supported &&
+ scd_bug = !trans->mac_cfg->mq_rx_supported &&
!((ssn - txq->write_ptr) & 0x3f) &&
(ssn != txq->write_ptr);
if (scd_bug)
@@ -1242,8 +1241,8 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
SCD_QUEUE_STTS_REG_MSK);
/* enable the scheduler for this queue (only) */
- if (txq_id == trans_pcie->txqs.cmd.q_id &&
- trans_pcie->scd_set_active)
+ if (txq_id == trans->conf.cmd_queue &&
+ trans->conf.scd_set_active)
iwl_scd_enable_set_active(trans, BIT(txq_id));
IWL_DEBUG_TX_QUEUES(trans,
@@ -1293,8 +1292,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
if (configure_scd) {
iwl_scd_txq_set_inactive(trans, txq_id);
- iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,
- ARRAY_SIZE(zero_val));
+ iwl_trans_pcie_write_mem(trans, stts_addr,
+ (const void *)zero_val,
+ ARRAY_SIZE(zero_val));
}
iwl_pcie_txq_unmap(trans, txq_id);
@@ -1310,10 +1310,10 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
+ for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {
struct iwl_txq *txq = trans_pcie->txqs.txq[i];
- if (i == trans_pcie->txqs.cmd.q_id)
+ if (i == trans->conf.cmd_queue)
continue;
/* we skip the command queue (obviously) so it's OK to nest */
@@ -1346,7 +1346,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
void *dup_buf = NULL;
@@ -1361,7 +1361,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
unsigned long flags;
- if (WARN(!trans->wide_cmd_header &&
+ if (WARN(!trans->conf.wide_cmd_header &&
group_id > IWL_ALWAYS_LONG_GROUP,
"unsupported wide command %#x\n", cmd->id))
return -EINVAL;
@@ -1475,7 +1475,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
@@ -1483,7 +1483,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
} else {
out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
out_cmd->hdr.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
INDEX_TO_SEQ(txq->write_ptr));
out_cmd->hdr.group_id = 0;
@@ -1534,7 +1534,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
iwl_get_cmd_string(trans, cmd->id),
group_id, out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id);
+ cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
@@ -1633,14 +1633,14 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
- if (WARN(txq_id != trans_pcie->txqs.cmd.q_id,
+ if (WARN(txq_id != trans->conf.cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans_pcie->txqs.cmd.q_id, sequence, txq->read_ptr,
+ txq_id, trans->conf.cmd_queue, sequence, txq->read_ptr,
txq->write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
@@ -1654,7 +1654,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
group_id = cmd->hdr.group_id;
cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
iwl_txq_gen2_tfd_unmap(trans, meta,
iwl_txq_get_tfd(trans, txq, index));
else
@@ -1683,7 +1683,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
iwl_get_cmd_string(trans, cmd_id));
- wake_up(&trans->wait_command_queue);
+ wake_up(&trans_pcie->wait_command_queue);
}
meta->flags = 0;
@@ -1753,7 +1753,7 @@ static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
dma_addr_t phys;
void *ret;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+ page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
if (WARN_ON(*page_ptr))
return NULL;
@@ -1912,7 +1912,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
u16 tb1_len)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+ struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
@@ -2067,14 +2067,14 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
int num_tbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwlagn_scd_bc_tbl *scd_bc_tbl;
+ struct iwl_bc_tbl_entry *scd_bc_tbl;
int write_ptr = txq->write_ptr;
int txq_id = txq->id;
u8 sec_ctl = 0;
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+ struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
u8 sta_id = tx_cmd->sta_id;
scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
@@ -2092,7 +2092,8 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
break;
}
- if (trans_pcie->txqs.bc_table_dword)
+
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
len = DIV_ROUND_UP(len, 4);
if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
@@ -2100,10 +2101,10 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
bc_ent = cpu_to_le16(len | (sta_id << 12));
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+ scd_bc_tbl[txq_id * BC_TABLE_SIZE + write_ptr].tfd_offset = bc_ent;
if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
+ scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + write_ptr].tfd_offset =
bc_ent;
}
@@ -2112,7 +2113,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr;
- struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+ struct iwl_tx_cmd_v6 *tx_cmd = (struct iwl_tx_cmd_v6 *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq;
dma_addr_t tb0_phys, tb1_phys, scratch_phys;
@@ -2153,7 +2154,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans_pcie->txqs.dev_cmd_offs);
+ trans->conf.cb_data_offs +
+ sizeof(void *));
*dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb);
@@ -2184,7 +2186,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch);
+ offsetof(struct iwl_tx_cmd_v6, scratch);
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
@@ -2199,7 +2201,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
* (This calculation modifies the TX command, so do it before the
* setup of the first TB)
*/
- len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
+ len = sizeof(struct iwl_tx_cmd_v6) + sizeof(struct iwl_cmd_header) +
hdr_len - IWL_FIRST_TB_SIZE;
/* do not align A-MSDU to dword as the subframe header aligns it */
amsdu = ieee80211_is_data_qos(fc) &&
@@ -2222,9 +2224,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
IWL_FIRST_TB_SIZE, true);
/* there must be data left over for TB1 or this code must be changed */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v6) < IWL_FIRST_TB_SIZE);
BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
- offsetofend(struct iwl_tx_cmd, scratch) >
+ offsetofend(struct iwl_tx_cmd_v6, scratch) >
IWL_FIRST_TB_SIZE);
/* map the data for TB1 */
@@ -2312,24 +2314,24 @@ static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
int read_ptr)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
+ struct iwl_bc_tbl_entry *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
int txq_id = txq->id;
u8 sta_id = 0;
__le16 bc_ent;
struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+ struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
- if (txq_id != trans_pcie->txqs.cmd.q_id)
+ if (txq_id != trans->conf.cmd_queue)
sta_id = tx_cmd->sta_id;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
- scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+ scd_bc_tbl[txq_id * BC_TABLE_SIZE + read_ptr].tfd_offset = bc_ent;
if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
+ scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + read_ptr].tfd_offset =
bc_ent;
}
@@ -2343,7 +2345,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
int txq_read_ptr, txq_write_ptr;
/* This function is not meant to release cmd queue*/
- if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id))
+ if (WARN_ON(txq_id == trans->conf.cmd_queue))
return;
if (WARN_ON(!txq))
@@ -2385,7 +2387,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
IWL_ERR(trans,
"%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, last_to_free,
- trans->trans_cfg->base_params->max_tfd_queue_size,
+ trans->mac_cfg->base->max_tfd_queue_size,
txq_write_ptr, txq_read_ptr);
iwl_op_mode_time_point(trans->op_mode,
@@ -2414,7 +2416,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
txq->entries[read_ptr].skb = NULL;
- if (!trans->trans_cfg->gen2)
+ if (!trans->mac_cfg->gen2)
iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq,
txq_read_ptr);
@@ -2456,7 +2458,8 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct iwl_device_tx_cmd *dev_cmd_ptr;
dev_cmd_ptr = *(void **)((u8 *)skb->cb +
- trans_pcie->txqs.dev_cmd_offs);
+ trans->conf.cb_data_offs +
+ sizeof(void *));
/*
* Note that we can very well be overflowing again.
@@ -2548,11 +2551,11 @@ next_queue:
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
+ struct iwl_host_cmd *cmd,
+ const char *cmd_str)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
int cmd_idx;
int ret;
@@ -2565,7 +2568,7 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
else
cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
@@ -2578,7 +2581,7 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
return ret;
}
- ret = wait_event_timeout(trans->wait_command_queue,
+ ret = wait_event_timeout(trans_pcie->wait_command_queue,
!test_bit(STATUS_SYNC_HCMD_ACTIVE,
&trans->status),
HOST_COMPLETE_TIMEOUT);
@@ -2594,7 +2597,7 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
cmd_str);
ret = -ETIMEDOUT;
- iwl_trans_sync_nmi(trans);
+ iwl_trans_pcie_sync_nmi(trans);
goto cancel;
}
@@ -2645,6 +2648,8 @@ cancel:
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
+ const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
+
/* Make sure the NIC is still alive in the bus */
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return -ENODEV;
@@ -2656,20 +2661,16 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
return -ERFKILL;
}
- if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
- !(cmd->flags & CMD_SEND_IN_D3))) {
- IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
- return -EHOSTDOWN;
- }
-
if (cmd->flags & CMD_ASYNC) {
int ret;
+ IWL_DEBUG_INFO(trans, "Sending async command %s\n", cmd_str);
+
/* An asynchronous command can not expect an SKB to be set. */
if (WARN_ON(cmd->flags & CMD_WANT_SKB))
return -EINVAL;
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
else
ret = iwl_pcie_enqueue_hcmd(trans, cmd);
@@ -2683,6 +2684,5 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
return 0;
}
- return iwl_trans_pcie_send_hcmd_sync(trans, cmd);
+ return iwl_trans_pcie_send_hcmd_sync(trans, cmd, cmd_str);
}
-IWL_EXPORT_SYMBOL(iwl_trans_pcie_send_hcmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
index d0bda23c628a..784433bb246a 100644
--- a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
+++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for the iwlwifi device info table
*
- * Copyright (C) 2023-2024 Intel Corporation
+ * Copyright (C) 2023-2025 Intel Corporation
*/
#include <kunit/test.h>
#include <linux/pci.h>
@@ -13,10 +13,50 @@ MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
static void iwl_pci_print_dev_info(const char *pfx, const struct iwl_dev_info *di)
{
- printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,no_160=%d,cores=%.2x\n",
- pfx, di->device, di->subdevice, di->mac_type, di->mac_step,
- di->rf_type, di->cdb, di->jacket, di->rf_id, di->no_160,
- di->cores);
+ u16 subdevice_mask = GENMASK(di->subdevice_m_h, di->subdevice_m_l);
+ char buf[100] = {};
+ int pos = 0;
+
+ if (di->match_rf_type)
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " rf_type=%03x", di->rf_type);
+ else
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " rf_type=*");
+
+ if (di->match_bw_limit)
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " bw_limit=%d", di->bw_limit);
+ else
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " bw_limit=*");
+
+ if (di->match_rf_step)
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " rf_step=%c",
+ di->rf_step == SILICON_Z_STEP ? 'Z' :
+ 'A' + di->rf_step);
+ else
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " rf_step=*");
+
+ if (di->match_rf_id)
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " rf_id=0x%x", di->rf_id);
+ else
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " rf_id=*");
+
+ if (di->match_cdb)
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " cdb=%d", di->cdb);
+ else
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " cdb=*");
+
+
+ printk(KERN_DEBUG "%sdev=%04x subdev=%04x/%04x%s\n",
+ pfx, di->device, di->subdevice, subdevice_mask, buf);
}
static void devinfo_table_order(struct kunit *test)
@@ -28,11 +68,14 @@ static void devinfo_table_order(struct kunit *test)
const struct iwl_dev_info *ret;
ret = iwl_pci_find_dev_info(di->device, di->subdevice,
- di->mac_type, di->mac_step,
di->rf_type, di->cdb,
- di->jacket, di->rf_id,
- di->no_160, di->cores, di->rf_step);
- if (ret != di) {
+ di->rf_id, di->bw_limit,
+ di->rf_step);
+ if (!ret) {
+ iwl_pci_print_dev_info("No entry found for: ", di);
+ KUNIT_FAIL(test,
+ "No entry found for entry at index %d\n", idx);
+ } else if (ret != di) {
iwl_pci_print_dev_info("searched: ", di);
iwl_pci_print_dev_info("found: ", ret);
KUNIT_FAIL(test,
@@ -42,6 +85,92 @@ static void devinfo_table_order(struct kunit *test)
}
}
+static void devinfo_names(struct kunit *test)
+{
+ int idx;
+
+ for (idx = 0; idx < iwl_dev_info_table_size; idx++) {
+ const struct iwl_dev_info *di = &iwl_dev_info_table[idx];
+
+ KUNIT_ASSERT_TRUE(test, di->name);
+ }
+}
+
+static void devinfo_no_cfg_dups(struct kunit *test)
+{
+ for (int i = 0; i < iwl_dev_info_table_size; i++) {
+ const struct iwl_rf_cfg *cfg_i = iwl_dev_info_table[i].cfg;
+
+ for (int j = 0; j < i; j++) {
+ const struct iwl_rf_cfg *cfg_j = iwl_dev_info_table[j].cfg;
+
+ if (cfg_i == cfg_j)
+ continue;
+
+ KUNIT_EXPECT_NE_MSG(test, memcmp(cfg_i, cfg_j,
+ sizeof(*cfg_i)), 0,
+ "identical configs: %ps and %ps\n",
+ cfg_i, cfg_j);
+ }
+ }
+}
+
+static void devinfo_no_name_dups(struct kunit *test)
+{
+ for (int i = 0; i < iwl_dev_info_table_size; i++) {
+ for (int j = 0; j < i; j++) {
+ if (iwl_dev_info_table[i].name == iwl_dev_info_table[j].name)
+ continue;
+
+ KUNIT_EXPECT_NE_MSG(test,
+ strcmp(iwl_dev_info_table[i].name,
+ iwl_dev_info_table[j].name),
+ 0,
+ "name dup: %ps/%ps",
+ iwl_dev_info_table[i].name,
+ iwl_dev_info_table[j].name);
+ }
+ }
+}
+
+static void devinfo_check_subdev_match(struct kunit *test)
+{
+ for (int i = 0; i < iwl_dev_info_table_size; i++) {
+ const struct iwl_dev_info *di = &iwl_dev_info_table[i];
+ u16 subdevice_mask = GENMASK(di->subdevice_m_h,
+ di->subdevice_m_l);
+
+ /* if BW limit bit is matched then must have a limit */
+ if (di->match_bw_limit == 1 && di->bw_limit == 1)
+ KUNIT_EXPECT_NE(test, di->cfg->bw_limit, 0);
+
+ /* if subdevice is ANY we can have RF ID/BW limit */
+ if (di->subdevice == (u16)IWL_CFG_ANY)
+ continue;
+
+ /* same if the subdevice mask doesn't overlap them */
+ if (IWL_SUBDEVICE_RF_ID(subdevice_mask) == 0 &&
+ IWL_SUBDEVICE_BW_LIM(subdevice_mask) == 0)
+ continue;
+
+ /* but otherwise they shouldn't be used */
+ KUNIT_EXPECT_EQ(test, (int)di->match_rf_id, 0);
+ KUNIT_EXPECT_EQ(test, (int)di->match_bw_limit, 0);
+ }
+}
+
+static void devinfo_check_killer_subdev(struct kunit *test)
+{
+ for (int i = 0; i < iwl_dev_info_table_size; i++) {
+ const struct iwl_dev_info *di = &iwl_dev_info_table[i];
+
+ if (!strstr(di->name, "Killer"))
+ continue;
+
+ KUNIT_EXPECT_NE(test, di->subdevice, (u16)IWL_CFG_ANY);
+ }
+}
+
static void devinfo_pci_ids(struct kunit *test)
{
struct pci_dev *dev;
@@ -64,9 +193,36 @@ static void devinfo_pci_ids(struct kunit *test)
}
}
+static void devinfo_no_mac_cfg_dups(struct kunit *test)
+{
+ for (int i = 0; iwl_hw_card_ids[i].vendor; i++) {
+ const struct iwl_mac_cfg *cfg_i =
+ (void *)iwl_hw_card_ids[i].driver_data;
+
+ for (int j = 0; j < i; j++) {
+ const struct iwl_mac_cfg *cfg_j =
+ (void *)iwl_hw_card_ids[j].driver_data;
+
+ if (cfg_i == cfg_j)
+ continue;
+
+ KUNIT_EXPECT_NE_MSG(test, memcmp(cfg_j, cfg_i,
+ sizeof(*cfg_i)), 0,
+ "identical configs: %ps and %ps\n",
+ cfg_i, cfg_j);
+ }
+ }
+}
+
static struct kunit_case devinfo_test_cases[] = {
KUNIT_CASE(devinfo_table_order),
+ KUNIT_CASE(devinfo_names),
+ KUNIT_CASE(devinfo_no_cfg_dups),
+ KUNIT_CASE(devinfo_no_name_dups),
+ KUNIT_CASE(devinfo_check_subdev_match),
+ KUNIT_CASE(devinfo_check_killer_subdev),
KUNIT_CASE(devinfo_pci_ids),
+ KUNIT_CASE(devinfo_no_mac_cfg_dups),
{}
};
diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c
index 772084a9bd8d..3baf8ab01e22 100644
--- a/drivers/net/wireless/intersil/p54/fwio.c
+++ b/drivers/net/wireless/intersil/p54/fwio.c
@@ -231,6 +231,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
mutex_lock(&priv->eeprom_mutex);
priv->eeprom = buf;
+ priv->eeprom_slice_size = len;
eeprom_hdr = skb_put(skb, eeprom_hdr_size + len);
if (priv->fw_var < 0x509) {
@@ -253,6 +254,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
ret = -EBUSY;
}
priv->eeprom = NULL;
+ priv->eeprom_slice_size = 0;
mutex_unlock(&priv->eeprom_mutex);
return ret;
}
diff --git a/drivers/net/wireless/intersil/p54/p54.h b/drivers/net/wireless/intersil/p54/p54.h
index 522656de4159..aeb5e40cc5ef 100644
--- a/drivers/net/wireless/intersil/p54/p54.h
+++ b/drivers/net/wireless/intersil/p54/p54.h
@@ -258,6 +258,7 @@ struct p54_common {
/* eeprom handling */
void *eeprom;
+ size_t eeprom_slice_size;
struct completion eeprom_comp;
struct mutex eeprom_mutex;
};
diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
index 8414aa208655..2deb1bb54f24 100644
--- a/drivers/net/wireless/intersil/p54/txrx.c
+++ b/drivers/net/wireless/intersil/p54/txrx.c
@@ -496,14 +496,19 @@ static void p54_rx_eeprom_readback(struct p54_common *priv,
return ;
if (priv->fw_var >= 0x509) {
- memcpy(priv->eeprom, eeprom->v2.data,
- le16_to_cpu(eeprom->v2.len));
+ if (le16_to_cpu(eeprom->v2.len) != priv->eeprom_slice_size)
+ return;
+
+ memcpy(priv->eeprom, eeprom->v2.data, priv->eeprom_slice_size);
} else {
- memcpy(priv->eeprom, eeprom->v1.data,
- le16_to_cpu(eeprom->v1.len));
+ if (le16_to_cpu(eeprom->v1.len) != priv->eeprom_slice_size)
+ return;
+
+ memcpy(priv->eeprom, eeprom->v1.data, priv->eeprom_slice_size);
}
priv->eeprom = NULL;
+ priv->eeprom_slice_size = 0;
tmp = p54_find_and_unlink_skb(priv, hdr->req_id);
dev_kfree_skb_any(tmp);
complete(&priv->eeprom_comp);
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index ea3cc2eaec36..b3c4040257a6 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -165,7 +165,7 @@ static void if_usb_setup_firmware(struct lbs_private *priv)
static void if_usb_fw_timeo(struct timer_list *t)
{
- struct if_usb_card *cardp = from_timer(cardp, t, fw_timeout);
+ struct if_usb_card *cardp = timer_container_of(cardp, t, fw_timeout);
if (cardp->fwdnldover) {
lbs_deb_usb("Download complete, no event. Assuming success\n");
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 26d13e9b3c95..d44e02c6fe38 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -693,7 +693,7 @@ EXPORT_SYMBOL_GPL(lbs_resume);
*/
static void lbs_cmd_timeout_handler(struct timer_list *t)
{
- struct lbs_private *priv = from_timer(priv, t, command_timer);
+ struct lbs_private *priv = timer_container_of(priv, t, command_timer);
unsigned long flags;
spin_lock_irqsave(&priv->driver_lock, flags);
@@ -727,7 +727,8 @@ out:
*/
static void lbs_tx_lockup_handler(struct timer_list *t)
{
- struct lbs_private *priv = from_timer(priv, t, tx_lockup_timer);
+ struct lbs_private *priv = timer_container_of(priv, t,
+ tx_lockup_timer);
unsigned long flags;
spin_lock_irqsave(&priv->driver_lock, flags);
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index 7c413dc81f9a..5662a244f82a 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -113,7 +113,7 @@ static void if_usb_setup_firmware(struct lbtf_private *priv)
static void if_usb_fw_timeo(struct timer_list *t)
{
- struct if_usb_card *cardp = from_timer(cardp, t, fw_timeout);
+ struct if_usb_card *cardp = timer_container_of(cardp, t, fw_timeout);
lbtf_deb_enter(LBTF_DEB_USB);
if (!cardp->fwdnldover) {
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index a57a11be57d8..50c0f6179e2d 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -126,7 +126,7 @@ static void lbtf_cmd_work(struct work_struct *work)
*/
static void command_timer_fn(struct timer_list *t)
{
- struct lbtf_private *priv = from_timer(priv, t, command_timer);
+ struct lbtf_private *priv = timer_container_of(priv, t, command_timer);
unsigned long flags;
lbtf_deb_enter(LBTF_DEB_CMD);
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 738bafc3749b..66f0f5377ac1 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -403,14 +403,12 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
bss_desc->bcn_ht_oper->ht_param &
- IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) {
- chan_list->chan_scan_param[0].radio_type |=
- CHAN_BW_40MHZ << 2;
+ IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)
SET_SECONDARYCHAN(chan_list->chan_scan_param[0].
radio_type,
(bss_desc->bcn_ht_oper->ht_param &
IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
- }
+
*buffer += struct_size(chan_list, chan_scan_param, 1);
ret_len += struct_size(chan_list, chan_scan_param, 1);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 8aff1df09b40..354c5ce66045 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -300,7 +300,7 @@ static void
mwifiex_flush_data(struct timer_list *t)
{
struct reorder_tmr_cnxt *ctx =
- from_timer(ctx, t, timer);
+ timer_container_of(ctx, t, timer);
int start_win, seq_num;
ctx->timer_is_set = false;
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index a099fdaafa45..60c12328c2f3 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1126,7 +1126,7 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
HostCmd_ACT_GEN_SET, 0, NULL, true))
return -1;
- if (mwifiex_sta_init_cmd(priv, false, false))
+ if (mwifiex_sta_init_cmd(priv, false))
return -1;
return 0;
@@ -1167,7 +1167,7 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev,
if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
HostCmd_ACT_GEN_SET, 0, NULL, true))
return -1;
- if (mwifiex_sta_init_cmd(priv, false, false))
+ if (mwifiex_sta_init_cmd(priv, false))
return -1;
return 0;
@@ -1204,7 +1204,7 @@ mwifiex_change_vif_to_ap(struct net_device *dev,
if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
HostCmd_ACT_GEN_SET, 0, NULL, true))
return -1;
- if (mwifiex_sta_init_cmd(priv, false, false))
+ if (mwifiex_sta_init_cmd(priv, false))
return -1;
return 0;
@@ -2906,16 +2906,12 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
struct mwifiex_private *priv)
{
int rx_mcs_supp;
- struct ieee80211_mcs_info mcs_set;
- u8 *mcs = (u8 *)&mcs_set;
struct mwifiex_adapter *adapter = priv->adapter;
ht_info->ht_supported = true;
ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-
/* Fill HT capability information */
if (ISSUPP_CHANWIDTH40(adapter->hw_dot_11n_dev_cap))
ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -2961,17 +2957,15 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
ht_info->cap |= IEEE80211_HT_CAP_SM_PS;
rx_mcs_supp = GET_RXMCSSUPP(adapter->user_dev_mcs_support);
+
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
/* Set MCS for 1x1/2x2 */
- memset(mcs, 0xff, rx_mcs_supp);
- /* Clear all the other values */
- memset(&mcs[rx_mcs_supp], 0,
- sizeof(struct ieee80211_mcs_info) - rx_mcs_supp);
+ memset(ht_info->mcs.rx_mask, 0xff, rx_mcs_supp);
+
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
ISSUPP_CHANWIDTH40(adapter->hw_dot_11n_dev_cap))
/* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */
- SETHT_MCS32(mcs_set.rx_mask);
-
- memcpy((u8 *) &ht_info->mcs, mcs, sizeof(struct ieee80211_mcs_info));
+ SETHT_MCS32(ht_info->mcs.rx_mask);
ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
}
@@ -3013,7 +3007,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
return ERR_PTR(-EFAULT);
}
- priv->wdev.wiphy = wiphy;
priv->wdev.iftype = NL80211_IFTYPE_STATION;
if (type == NL80211_IFTYPE_UNSPECIFIED)
@@ -3022,8 +3015,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->bss_mode = type;
priv->bss_type = MWIFIEX_BSS_TYPE_STA;
- priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
- priv->bss_priority = 0;
priv->bss_role = MWIFIEX_BSS_ROLE_STA;
break;
@@ -3043,14 +3034,10 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
return ERR_PTR(-EFAULT);
}
- priv->wdev.wiphy = wiphy;
priv->wdev.iftype = NL80211_IFTYPE_AP;
priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
- priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
- priv->bss_priority = 0;
priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
- priv->bss_started = 0;
priv->bss_mode = type;
break;
@@ -3070,7 +3057,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
return ERR_PTR(-EFAULT);
}
- priv->wdev.wiphy = wiphy;
/* At start-up, wpa_supplicant tries to change the interface
* to NL80211_IFTYPE_STATION if it is not managed mode.
*/
@@ -3083,10 +3069,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
*/
priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
- priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
- priv->bss_priority = 0;
priv->bss_role = MWIFIEX_BSS_ROLE_STA;
- priv->bss_started = 0;
if (mwifiex_cfg80211_init_p2p_client(priv)) {
memset(&priv->wdev, 0, sizeof(priv->wdev));
@@ -3100,6 +3083,11 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
return ERR_PTR(-EINVAL);
}
+ priv->wdev.wiphy = wiphy;
+ priv->bss_priority = 0;
+ priv->bss_started = 0;
+ priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
+
dev = alloc_netdev_mqs(sizeof(struct mwifiex_private *), name,
name_assign_type, ether_setup,
IEEE80211_NUM_ACS, 1);
@@ -3122,7 +3110,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
if (ret)
goto err_set_bss_mode;
- ret = mwifiex_sta_init_cmd(priv, false, false);
+ ret = mwifiex_sta_init_cmd(priv, false);
if (ret)
goto err_sta_init;
}
@@ -4703,7 +4691,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
void *wdev_priv;
struct wiphy *wiphy;
struct mwifiex_private *priv = adapter->priv[MWIFIEX_BSS_TYPE_STA];
- u8 *country_code;
+ const u8 *country_code;
u32 thr, retry;
struct cfg80211_ops *ops;
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index d7fd79214bcf..686bf12b6c26 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -153,7 +153,7 @@ struct region_code_mapping {
u8 region[IEEE80211_COUNTRY_STRING_LEN] __nonstring;
};
-static struct region_code_mapping region_code_mapping_t[] = {
+static const struct region_code_mapping region_code_mapping_t[] = {
{ 0x10, "US " }, /* US FCC */
{ 0x20, "CA " }, /* IC Canada */
{ 0x30, "FR " }, /* France */
@@ -165,7 +165,7 @@ static struct region_code_mapping region_code_mapping_t[] = {
};
/* This function converts integer code to region string */
-u8 *mwifiex_11d_code_2_region(u8 code)
+const u8 *mwifiex_11d_code_2_region(u8 code)
{
u8 i;
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 5573e2ded72f..0f466c31337f 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -159,11 +159,9 @@ static int mwifiex_cmd_host_cmd(struct mwifiex_private *priv,
* sending. Afterwards, it logs the command ID and action for debugging
* and sets up the command timeout timer.
*/
-static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
+static int mwifiex_dnld_cmd_to_fw(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node)
{
-
- struct mwifiex_adapter *adapter = priv->adapter;
int ret;
struct host_cmd_ds_command *host_cmd;
uint16_t cmd_code;
@@ -367,8 +365,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
(test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags) &&
!adapter->sleep_period.period)) {
adapter->pm_wakeup_card_req = true;
- mwifiex_hs_activated_event(mwifiex_get_priv
- (adapter, MWIFIEX_BSS_ROLE_ANY), true);
+ mwifiex_hs_activated_event(adapter, true);
}
return ret;
@@ -473,8 +470,7 @@ void mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
int mwifiex_process_event(struct mwifiex_adapter *adapter)
{
int ret, i;
- struct mwifiex_private *priv =
- mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+ struct mwifiex_private *priv;
struct sk_buff *skb = adapter->event_skb;
u32 eventcause;
struct mwifiex_rxinfo *rx_info;
@@ -744,7 +740,6 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
*/
int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
{
- struct mwifiex_private *priv;
struct cmd_ctrl_node *cmd_node;
int ret = 0;
struct host_cmd_ds_command *host_cmd;
@@ -768,7 +763,6 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
struct cmd_ctrl_node, list);
host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
- priv = cmd_node->priv;
if (adapter->ps_state != PS_STATE_AWAKE) {
mwifiex_dbg(adapter, ERROR,
@@ -783,18 +777,17 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
spin_unlock_bh(&adapter->cmd_pending_q_lock);
spin_unlock_bh(&adapter->mwifiex_cmd_lock);
- ret = mwifiex_dnld_cmd_to_fw(priv, cmd_node);
- priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+ ret = mwifiex_dnld_cmd_to_fw(adapter, cmd_node);
+
/* Any command sent to the firmware when host is in sleep
* mode should de-configure host sleep. We should skip the
* host sleep configuration command itself though
*/
- if (priv && (host_cmd->command !=
- cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH))) {
+ if (host_cmd->command != cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH)) {
if (adapter->hs_activated) {
clear_bit(MWIFIEX_IS_HS_CONFIGURED,
&adapter->work_flags);
- mwifiex_hs_activated_event(priv, false);
+ mwifiex_hs_activated_event(adapter, false);
}
}
@@ -810,8 +803,7 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
{
struct host_cmd_ds_command *resp;
- struct mwifiex_private *priv =
- mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+ struct mwifiex_private *priv;
int ret = 0;
uint16_t orig_cmdresp_no;
uint16_t cmdresp_no;
@@ -900,18 +892,6 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
ret = mwifiex_process_sta_cmdresp(priv, cmdresp_no, resp);
}
- /* Check init command response */
- if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
- if (ret) {
- mwifiex_dbg(adapter, ERROR,
- "%s: cmd %#x failed during\t"
- "initialization\n", __func__, cmdresp_no);
- mwifiex_init_fw_complete(adapter);
- return -1;
- } else if (adapter->last_init_cmd == cmdresp_no)
- adapter->hw_status = MWIFIEX_HW_STATUS_INIT_DONE;
- }
-
if (adapter->curr_cmd) {
if (adapter->curr_cmd->wait_q_enabled)
adapter->cmd_wait_q.status = ret;
@@ -954,7 +934,8 @@ void mwifiex_process_assoc_resp(struct mwifiex_adapter *adapter)
void
mwifiex_cmd_timeout_func(struct timer_list *t)
{
- struct mwifiex_adapter *adapter = from_timer(adapter, t, cmd_timer);
+ struct mwifiex_adapter *adapter = timer_container_of(adapter, t,
+ cmd_timer);
struct cmd_ctrl_node *cmd_node;
set_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
@@ -1030,10 +1011,6 @@ mwifiex_cmd_timeout_func(struct timer_list *t)
mwifiex_cancel_pending_ioctl(adapter);
}
}
- if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
- mwifiex_init_fw_complete(adapter);
- return;
- }
if (adapter->if_ops.device_dump)
adapter->if_ops.device_dump(adapter);
@@ -1160,27 +1137,27 @@ mwifiex_check_ps_cond(struct mwifiex_adapter *adapter)
* This event is generated by the driver, with a blank event body.
*/
void
-mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
+mwifiex_hs_activated_event(struct mwifiex_adapter *adapter, u8 activated)
{
if (activated) {
if (test_bit(MWIFIEX_IS_HS_CONFIGURED,
- &priv->adapter->work_flags)) {
- priv->adapter->hs_activated = true;
- mwifiex_update_rxreor_flags(priv->adapter,
+ &adapter->work_flags)) {
+ adapter->hs_activated = true;
+ mwifiex_update_rxreor_flags(adapter,
RXREOR_FORCE_NO_DROP);
- mwifiex_dbg(priv->adapter, EVENT,
+ mwifiex_dbg(adapter, EVENT,
"event: hs_activated\n");
- priv->adapter->hs_activate_wait_q_woken = true;
+ adapter->hs_activate_wait_q_woken = true;
wake_up_interruptible(
- &priv->adapter->hs_activate_wait_q);
+ &adapter->hs_activate_wait_q);
} else {
- mwifiex_dbg(priv->adapter, EVENT,
+ mwifiex_dbg(adapter, EVENT,
"event: HS not configured\n");
}
} else {
- mwifiex_dbg(priv->adapter, EVENT,
+ mwifiex_dbg(adapter, EVENT,
"event: hs_deactivated\n");
- priv->adapter->hs_activated = false;
+ adapter->hs_activated = false;
}
}
@@ -1204,7 +1181,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) &&
adapter->iface_type != MWIFIEX_USB) {
- mwifiex_hs_activated_event(priv, true);
+ mwifiex_hs_activated_event(adapter, true);
return 0;
} else {
mwifiex_dbg(adapter, CMD,
@@ -1217,11 +1194,11 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
if (conditions != HS_CFG_CANCEL) {
set_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
if (adapter->iface_type == MWIFIEX_USB)
- mwifiex_hs_activated_event(priv, true);
+ mwifiex_hs_activated_event(adapter, true);
} else {
clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
if (adapter->hs_activated)
- mwifiex_hs_activated_event(priv, false);
+ mwifiex_hs_activated_event(adapter, false);
}
return 0;
@@ -1250,9 +1227,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
adapter->hs_activated = false;
clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
- mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
- MWIFIEX_BSS_ROLE_ANY),
- false);
+ mwifiex_hs_activated_event(adapter, false);
}
EXPORT_SYMBOL_GPL(mwifiex_process_hs_config);
@@ -1302,9 +1277,7 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
}
adapter->pm_wakeup_card_req = true;
if (test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags))
- mwifiex_hs_activated_event(mwifiex_get_priv
- (adapter, MWIFIEX_BSS_ROLE_ANY),
- true);
+ mwifiex_hs_activated_event(adapter, true);
adapter->ps_state = PS_STATE_SLEEP;
cmd->command = cpu_to_le16(command);
cmd->seq_num = cpu_to_le16(seq_num);
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index ce0d42e72e94..4820010a86f6 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -41,7 +41,8 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
static void wakeup_timer_fn(struct timer_list *t)
{
- struct mwifiex_adapter *adapter = from_timer(adapter, t, wakeup_timer);
+ struct mwifiex_adapter *adapter = timer_container_of(adapter, t,
+ wakeup_timer);
mwifiex_dbg(adapter, ERROR, "Firmware wakeup failed\n");
adapter->hw_status = MWIFIEX_HW_STATUS_RESET;
@@ -480,14 +481,12 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
* - Initialize the private structure
* - Add BSS priority tables to the adapter structure
* - For each interface, send the init commands to firmware
- * - Send the first command in command pending queue, if available
*/
int mwifiex_init_fw(struct mwifiex_adapter *adapter)
{
int ret;
struct mwifiex_private *priv;
u8 i, first_sta = true;
- int is_cmd_pend_q_empty;
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
@@ -509,11 +508,10 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
}
if (adapter->mfg_mode) {
adapter->hw_status = MWIFIEX_HW_STATUS_READY;
- ret = -EINPROGRESS;
} else {
for (i = 0; i < adapter->priv_num; i++) {
ret = mwifiex_sta_init_cmd(adapter->priv[i],
- first_sta, true);
+ first_sta);
if (ret == -1)
return -1;
@@ -522,17 +520,15 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
}
spin_lock_bh(&adapter->cmd_pending_q_lock);
- is_cmd_pend_q_empty = list_empty(&adapter->cmd_pending_q);
+ WARN_ON(!list_empty(&adapter->cmd_pending_q));
spin_unlock_bh(&adapter->cmd_pending_q_lock);
- if (!is_cmd_pend_q_empty) {
- /* Send the first command in queue and return */
- if (mwifiex_main_process(adapter) != -1)
- ret = -EINPROGRESS;
- } else {
- adapter->hw_status = MWIFIEX_HW_STATUS_READY;
- }
- return ret;
+ adapter->hw_status = MWIFIEX_HW_STATUS_READY;
+
+ if (adapter->if_ops.init_fw_port)
+ adapter->if_ops.init_fw_port(adapter);
+
+ return 0;
}
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 0e1f53940401..7b50a88a18e5 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -6,6 +6,7 @@
*/
#include <linux/suspend.h>
+#include <net/sock.h>
#include "main.h"
#include "wmm.h"
@@ -354,13 +355,6 @@ process_start:
if (adapter->cmd_resp_received) {
adapter->cmd_resp_received = false;
mwifiex_process_cmdresp(adapter);
-
- /* call mwifiex back when init_fw is done */
- if (adapter->hw_status == MWIFIEX_HW_STATUS_INIT_DONE) {
- adapter->hw_status = MWIFIEX_HW_STATUS_READY;
- mwifiex_init_fw_complete(adapter);
- maybe_quirk_fw_disable_ds(adapter);
- }
}
/* Check if we need to confirm Sleep Request
@@ -415,10 +409,7 @@ process_start:
if (adapter->hs_activated) {
clear_bit(MWIFIEX_IS_HS_CONFIGURED,
&adapter->work_flags);
- mwifiex_hs_activated_event
- (mwifiex_get_priv
- (adapter, MWIFIEX_BSS_ROLE_ANY),
- false);
+ mwifiex_hs_activated_event(adapter, false);
}
}
@@ -438,10 +429,7 @@ process_start:
if (adapter->hs_activated) {
clear_bit(MWIFIEX_IS_HS_CONFIGURED,
&adapter->work_flags);
- mwifiex_hs_activated_event
- (mwifiex_get_priv
- (adapter, MWIFIEX_BSS_ROLE_ANY),
- false);
+ mwifiex_hs_activated_event(adapter, false);
}
}
@@ -460,10 +448,7 @@ process_start:
if (adapter->hs_activated) {
clear_bit(MWIFIEX_IS_HS_CONFIGURED,
&adapter->work_flags);
- mwifiex_hs_activated_event
- (mwifiex_get_priv
- (adapter, MWIFIEX_BSS_ROLE_ANY),
- false);
+ mwifiex_hs_activated_event(adapter, false);
}
}
@@ -587,21 +572,11 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto err_dnld_fw;
}
- adapter->init_wait_q_woken = false;
ret = mwifiex_init_fw(adapter);
- if (ret == -1) {
+ if (ret == -1)
goto err_init_fw;
- } else if (!ret) {
- adapter->hw_status = MWIFIEX_HW_STATUS_READY;
- goto done;
- }
- /* Wait for mwifiex_init to complete */
- if (!adapter->mfg_mode) {
- wait_event_interruptible(adapter->init_wait_q,
- adapter->init_wait_q_woken);
- if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
- goto err_init_fw;
- }
+
+ maybe_quirk_fw_disable_ds(adapter);
if (!adapter->wiphy) {
if (mwifiex_register_cfg80211(adapter)) {
@@ -938,8 +913,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
multicast = is_multicast_ether_addr(skb->data);
- if (unlikely(!multicast && skb->sk &&
- skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS &&
+ if (unlikely(!multicast && sk_requests_wifi_status(skb->sk) &&
priv->adapter->fw_api_ver == MWIFIEX_FW_V15))
skb = mwifiex_clone_skb_for_tx_status(priv,
skb,
@@ -1558,7 +1532,6 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
- init_waitqueue_head(&adapter->init_wait_q);
clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
adapter->hs_activated = false;
clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
@@ -1726,7 +1699,6 @@ mwifiex_add_card(void *card, struct completion *fw_done,
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
- init_waitqueue_head(&adapter->init_wait_q);
clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
adapter->hs_activated = false;
init_waitqueue_head(&adapter->hs_activate_wait_q);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 63f1c900e096..9ac36bef980e 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -239,7 +239,6 @@ struct mwifiex_dbg {
enum MWIFIEX_HARDWARE_STATUS {
MWIFIEX_HW_STATUS_READY,
MWIFIEX_HW_STATUS_INITIALIZING,
- MWIFIEX_HW_STATUS_INIT_DONE,
MWIFIEX_HW_STATUS_RESET,
MWIFIEX_HW_STATUS_NOT_READY
};
@@ -865,8 +864,6 @@ struct mwifiex_adapter {
unsigned long work_flags;
u32 fw_release_number;
u8 intf_hdr_len;
- u16 init_wait_q_woken;
- wait_queue_head_t init_wait_q;
void *card;
struct mwifiex_if_ops if_ops;
atomic_t bypass_tx_pending;
@@ -919,7 +916,6 @@ struct mwifiex_adapter {
struct cmd_ctrl_node *curr_cmd;
/* spin lock for command */
spinlock_t mwifiex_cmd_lock;
- u16 last_init_cmd;
struct timer_list cmd_timer;
struct list_head cmd_free_q;
/* spin lock for cmd_free_q */
@@ -1060,8 +1056,6 @@ void mwifiex_free_priv(struct mwifiex_private *priv);
int mwifiex_init_fw(struct mwifiex_adapter *adapter);
-int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
-
void mwifiex_shutdown_drv(struct mwifiex_adapter *adapter);
int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
@@ -1125,7 +1119,7 @@ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp,
struct mwifiex_ds_pm_cfg *pm_cfg);
void mwifiex_process_hs_config(struct mwifiex_adapter *adapter);
-void mwifiex_hs_activated_event(struct mwifiex_private *priv,
+void mwifiex_hs_activated_event(struct mwifiex_adapter *adapter,
u8 activated);
int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
int cmd_type, struct mwifiex_ds_hs_cfg *hs_cfg);
@@ -1156,7 +1150,7 @@ void mwifiex_process_sta_txpd(struct mwifiex_private *priv,
struct sk_buff *skb);
void mwifiex_process_uap_txpd(struct mwifiex_private *priv,
struct sk_buff *skb);
-int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta, bool init);
+int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta);
int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
struct mwifiex_scan_cmd_config *scan_cfg);
void mwifiex_queue_scan_cmd(struct mwifiex_private *priv,
@@ -1565,7 +1559,7 @@ int mwifiex_add_wowlan_magic_pkt_filter(struct mwifiex_adapter *adapter);
int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
struct cfg80211_beacon_data *data);
int mwifiex_del_mgmt_ies(struct mwifiex_private *priv);
-u8 *mwifiex_11d_code_2_region(u8 code);
+const u8 *mwifiex_11d_code_2_region(u8 code);
void mwifiex_uap_set_channel(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_chan_def chandef);
@@ -1598,7 +1592,6 @@ mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac);
struct mwifiex_sta_node *
mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac);
u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv);
-u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv);
u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv);
int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
u8 action_code, u8 dialog_token,
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index dd2a42e732f2..a760de191fce 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -2971,7 +2971,7 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
goto err_iomap2;
}
- pr_notice("PCI memory map Virt0: %pK PCI memory map Virt2: %pK\n",
+ pr_notice("PCI memory map Virt0: %p PCI memory map Virt2: %p\n",
card->pci_mmap, card->pci_mmap1);
ret = mwifiex_pcie_alloc_buffers(adapter);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index c4689f5a1acc..c93281f5a47c 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -157,7 +157,7 @@ mwifiex_cmd_802_11_get_log(struct host_cmd_ds_command *cmd)
*/
static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
- u16 cmd_action, u16 *pbitmap_rates)
+ u16 cmd_action, const u16 *pbitmap_rates)
{
struct host_cmd_ds_tx_rate_cfg *rate_cfg = &cmd->params.tx_rate_cfg;
struct mwifiex_rate_scope *rate_scope;
@@ -174,34 +174,19 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
rate_scope->type = cpu_to_le16(TLV_TYPE_RATE_SCOPE);
rate_scope->length = cpu_to_le16
(sizeof(*rate_scope) - sizeof(struct mwifiex_ie_types_header));
- if (pbitmap_rates != NULL) {
- rate_scope->hr_dsss_rate_bitmap = cpu_to_le16(pbitmap_rates[0]);
- rate_scope->ofdm_rate_bitmap = cpu_to_le16(pbitmap_rates[1]);
- for (i = 0; i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); i++)
- rate_scope->ht_mcs_rate_bitmap[i] =
- cpu_to_le16(pbitmap_rates[2 + i]);
- if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
- for (i = 0;
- i < ARRAY_SIZE(rate_scope->vht_mcs_rate_bitmap);
- i++)
- rate_scope->vht_mcs_rate_bitmap[i] =
- cpu_to_le16(pbitmap_rates[10 + i]);
- }
- } else {
- rate_scope->hr_dsss_rate_bitmap =
- cpu_to_le16(priv->bitmap_rates[0]);
- rate_scope->ofdm_rate_bitmap =
- cpu_to_le16(priv->bitmap_rates[1]);
- for (i = 0; i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); i++)
- rate_scope->ht_mcs_rate_bitmap[i] =
- cpu_to_le16(priv->bitmap_rates[2 + i]);
- if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
- for (i = 0;
- i < ARRAY_SIZE(rate_scope->vht_mcs_rate_bitmap);
- i++)
- rate_scope->vht_mcs_rate_bitmap[i] =
- cpu_to_le16(priv->bitmap_rates[10 + i]);
- }
+ if (!pbitmap_rates)
+ pbitmap_rates = priv->bitmap_rates;
+
+ rate_scope->hr_dsss_rate_bitmap = cpu_to_le16(pbitmap_rates[0]);
+ rate_scope->ofdm_rate_bitmap = cpu_to_le16(pbitmap_rates[1]);
+
+ for (i = 0; i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); i++)
+ rate_scope->ht_mcs_rate_bitmap[i] = cpu_to_le16(pbitmap_rates[2 + i]);
+
+ if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
+ for (i = 0; i < ARRAY_SIZE(rate_scope->vht_mcs_rate_bitmap); i++)
+ rate_scope->vht_mcs_rate_bitmap[i] =
+ cpu_to_le16(pbitmap_rates[10 + i]);
}
rate_drop = (struct mwifiex_rate_drop_pattern *) ((u8 *) rate_scope +
@@ -2258,7 +2243,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
* - Set 11d control
* - Set MAC control (this must be the last command to initialize firmware)
*/
-int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
+int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
{
struct mwifiex_adapter *adapter = priv->adapter;
int ret;
@@ -2433,11 +2418,5 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_CFG,
HostCmd_ACT_GEN_SET, 0, &tx_cfg, true);
- if (init) {
- /* set last_init_cmd before sending the command */
- priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
- ret = -EINPROGRESS;
- }
-
return ret;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index 18e8c04d14c4..77a9a6de636d 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -1415,7 +1415,8 @@ void mwifiex_auto_tdls_update_peer_signal(struct mwifiex_private *priv,
void mwifiex_check_auto_tdls(struct timer_list *t)
{
- struct mwifiex_private *priv = from_timer(priv, t, auto_tdls_timer);
+ struct mwifiex_private *priv = timer_container_of(priv, t,
+ auto_tdls_timer);
struct mwifiex_auto_tdls_peer *tdls_peer;
u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c
index bd91678d26b4..f44e22f24511 100644
--- a/drivers/net/wireless/marvell/mwifiex/txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/txrx.c
@@ -24,8 +24,7 @@
int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
struct sk_buff *skb)
{
- struct mwifiex_private *priv =
- mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+ struct mwifiex_private *priv;
struct rxpd *local_rx_pd;
struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
int ret;
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 2f565397cf36..947ecb0a7b40 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -1125,7 +1125,7 @@ static void mwifiex_usb_tx_aggr_tmo(struct timer_list *t)
struct urb_context *urb_cnxt = NULL;
struct sk_buff *skb_send = NULL;
struct tx_aggr_tmr_cnxt *timer_context =
- from_timer(timer_context, t, hold_timer);
+ timer_container_of(timer_context, t, hold_timer);
struct mwifiex_adapter *adapter = timer_context->adapter;
struct usb_tx_data_port *port = timer_context->port;
int err = 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 1f1f6280a0f2..4c5b1de0e936 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -116,24 +116,6 @@ static struct mwifiex_debug_data items[] = {
static int num_of_items = ARRAY_SIZE(items);
/*
- * Firmware initialization complete callback handler.
- *
- * This function wakes up the function waiting on the init
- * wait queue for the firmware initialization to complete.
- */
-int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter)
-{
-
- if (adapter->hw_status == MWIFIEX_HW_STATUS_READY)
- if (adapter->if_ops.init_fw_port)
- adapter->if_ops.init_fw_port(adapter);
-
- adapter->init_wait_q_woken = true;
- wake_up_interruptible(&adapter->init_wait_q);
- return 0;
-}
-
-/*
* This function sends init/shutdown command
* to firmware.
*/
@@ -663,7 +645,7 @@ u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv)
return false;
}
-u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv)
+static u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv)
{
struct mwifiex_sta_node *sta_ptr;
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index bcb61dab7dc8..1b1222c73728 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -1428,13 +1428,13 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
}
if (!ptr->is_11n_enabled ||
- ptr->ba_status ||
- priv->wps.session_enable) {
+ ptr->ba_status ||
+ priv->wps.session_enable) {
if (ptr->is_11n_enabled &&
- ptr->ba_status &&
- ptr->amsdu_in_ampdu &&
- mwifiex_is_amsdu_allowed(priv, tid) &&
- mwifiex_is_11n_aggragation_possible(priv, ptr,
+ ptr->ba_status &&
+ ptr->amsdu_in_ampdu &&
+ mwifiex_is_amsdu_allowed(priv, tid) &&
+ mwifiex_is_11n_aggragation_possible(priv, ptr,
adapter->tx_buf_size))
mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
diff --git a/drivers/net/wireless/mediatek/mt76/channel.c b/drivers/net/wireless/mediatek/mt76/channel.c
index e7b839e74290..cc2d888e3f17 100644
--- a/drivers/net/wireless/mediatek/mt76/channel.c
+++ b/drivers/net/wireless/mediatek/mt76/channel.c
@@ -302,11 +302,13 @@ void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct mt76_vif_link *mlink)
{
struct mt76_dev *dev = phy->dev;
- struct mt76_vif_data *mvif = mlink->mvif;
+ struct mt76_vif_data *mvif;
if (IS_ERR_OR_NULL(mlink) || !mlink->offchannel)
return;
+ mvif = mlink->mvif;
+
rcu_assign_pointer(mvif->offchannel_link, NULL);
dev->drv->vif_link_remove(phy, vif, &vif->bss_conf, mlink);
kfree(mlink);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 844af16ee551..35b4ec91979e 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -1011,6 +1011,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
int i;
mt76_worker_disable(&dev->tx_worker);
+ napi_disable(&dev->tx_napi);
netif_napi_del(&dev->tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index b88d7e10742e..45c8db939d55 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -449,8 +449,10 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
- wiphy->available_antennas_tx = phy->antenna_mask;
- wiphy->available_antennas_rx = phy->antenna_mask;
+ if (!wiphy->available_antennas_tx)
+ wiphy->available_antennas_tx = phy->antenna_mask;
+ if (!wiphy->available_antennas_rx)
+ wiphy->available_antennas_rx = phy->antenna_mask;
wiphy->sar_capa = &mt76_sar_capa;
phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
@@ -1703,7 +1705,7 @@ s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower)
int n_chains = hweight16(phy->chainmask);
txpower = mt76_get_sar_power(phy, phy->chandef.chan, txpower * 2);
- txpower -= mt76_tx_power_nss_delta(n_chains);
+ txpower -= mt76_tx_power_path_delta(n_chains);
return txpower;
}
@@ -1719,7 +1721,7 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return -EINVAL;
n_chains = hweight16(phy->chainmask);
- delta = mt76_tx_power_nss_delta(n_chains);
+ delta = mt76_tx_power_path_delta(n_chains);
*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index d7cd467b812f..5f8d81cda6cd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -162,6 +162,16 @@ enum mt76_dfs_state {
MT_DFS_STATE_ACTIVE,
};
+#define MT76_RNR_SCAN_MAX_BSSIDS 16
+struct mt76_scan_rnr_param {
+ u8 bssid[MT76_RNR_SCAN_MAX_BSSIDS][ETH_ALEN];
+ u8 channel[MT76_RNR_SCAN_MAX_BSSIDS];
+ u8 random_mac[ETH_ALEN];
+ u8 seq_num;
+ u8 bssid_num;
+ u32 sreq_flag;
+};
+
struct mt76_queue_buf {
dma_addr_t addr;
u16 len:15,
@@ -941,6 +951,8 @@ struct mt76_dev {
char alpha2[3];
enum nl80211_dfs_regions region;
+ struct mt76_scan_rnr_param rnr;
+
u32 debugfs_reg;
u8 csa_complete;
@@ -1386,12 +1398,12 @@ static inline bool mt76_is_skb_pktid(u8 pktid)
return pktid >= MT_PACKET_ID_FIRST;
}
-static inline u8 mt76_tx_power_nss_delta(u8 nss)
+static inline u8 mt76_tx_power_path_delta(u8 path)
{
- static const u8 nss_delta[4] = { 0, 6, 9, 12 };
- u8 idx = nss - 1;
+ static const u8 path_delta[5] = { 0, 6, 9, 12, 14 };
+ u8 idx = path - 1;
- return (idx < ARRAY_SIZE(nss_delta)) ? nss_delta[idx] : 0;
+ return (idx < ARRAY_SIZE(path_delta)) ? path_delta[idx] : 0;
}
static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 66ba3be27343..aae80005a3c1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -273,7 +273,7 @@ void mt7615_init_txpower(struct mt7615_dev *dev,
struct ieee80211_supported_band *sband)
{
int i, n_chains = hweight8(dev->mphy.antenna_mask), target_chains;
- int delta_idx, delta = mt76_tx_power_nss_delta(n_chains);
+ int delta_idx, delta = mt76_tx_power_path_delta(n_chains);
u8 *eep = (u8 *)dev->mt76.eeprom.data;
enum nl80211_band band = sband->band;
struct mt76_power_limits limits;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index c54005df08ca..8a37fb37f77d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -1043,7 +1043,7 @@ void mt7615_roc_work(struct work_struct *work)
void mt7615_roc_timer(struct timer_list *timer)
{
- struct mt7615_phy *phy = from_timer(phy, timer, roc_timer);
+ struct mt7615_phy *phy = timer_container_of(phy, timer, roc_timer);
ieee80211_queue_work(phy->mt76->hw, &phy->roc_work);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index b8fcd4eb3fbb..4064e193d4de 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -2067,7 +2067,7 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
};
tx_power = mt76_get_sar_power(mphy, mphy->chandef.chan, tx_power);
- tx_power -= mt76_tx_power_nss_delta(n_chains);
+ tx_power -= mt76_tx_power_path_delta(n_chains);
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
&limits, tx_power);
mphy->txpower_cur = tx_power;
@@ -2084,8 +2084,8 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
int delta = 0;
if (i < n_chains - 1)
- delta = mt76_tx_power_nss_delta(n_chains) -
- mt76_tx_power_nss_delta(i + 1);
+ delta = mt76_tx_power_path_delta(n_chains) -
+ mt76_tx_power_path_delta(i + 1);
sku[MT_SKU_1SS_DELTA + i] = delta;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index 455979476d11..192dcc374a64 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -232,9 +232,14 @@ static inline bool is_mt7992(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7992;
}
+static inline bool is_mt7990(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7993;
+}
+
static inline bool is_mt799x(struct mt76_dev *dev)
{
- return is_mt7996(dev) || is_mt7992(dev);
+ return is_mt7996(dev) || is_mt7992(dev) || is_mt7990(dev);
}
static inline bool is_mt7622(struct mt76_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
index 487ad716f872..1013cad57a7f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
@@ -273,6 +273,7 @@ enum tx_frag_idx {
#define MT_TXD6_TX_RATE GENMASK(21, 16)
#define MT_TXD6_TIMESTAMP_OFS_EN BIT(15)
#define MT_TXD6_TIMESTAMP_OFS_IDX GENMASK(14, 10)
+#define MT_TXD6_TID_ADDBA GENMASK(10, 8)
#define MT_TXD6_MSDU_CNT GENMASK(9, 4)
#define MT_TXD6_MSDU_CNT_V2 GENMASK(15, 10)
#define MT_TXD6_DIS_MAT BIT(3)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index bafcf5a279e2..cb13d0a76878 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -67,8 +67,7 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
(is_mt7921(dev) && addr == 0x900000) ||
(is_mt7925(dev) && (addr == 0x900000 || addr == 0xe0002800)) ||
- (is_mt7996(dev) && addr == 0x900000) ||
- (is_mt7992(dev) && addr == 0x900000))
+ (is_mt799x(dev) && addr == 0x900000))
cmd = MCU_CMD(PATCH_START_REQ);
else
cmd = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
@@ -391,7 +390,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
if (vif->type == NL80211_IFTYPE_STATION &&
- link_conf && !is_zero_ether_addr(link_conf->bssid)) {
+ !is_zero_ether_addr(link_conf->bssid)) {
memcpy(basic->peer_addr, link_conf->bssid, ETH_ALEN);
basic->aid = cpu_to_le16(vif->cfg.aid);
} else {
@@ -1667,6 +1666,44 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_bss);
+void mt76_connac_mcu_build_rnr_scan_param(struct mt76_dev *mdev,
+ struct cfg80211_scan_request *sreq)
+{
+ struct ieee80211_channel **scan_list = sreq->channels;
+ int i, bssid_index = 0;
+
+ /* clear 6G active Scan BSSID table */
+ memset(&mdev->rnr, 0, sizeof(mdev->rnr));
+
+ for (i = 0; i < sreq->n_6ghz_params; i++) {
+ u8 ch_idx = sreq->scan_6ghz_params[i].channel_idx;
+ int k = 0;
+
+ /* Remove the duplicated BSSID */
+ for (k = 0; k < bssid_index; k++) {
+ if (!memcmp(&mdev->rnr.bssid[k],
+ sreq->scan_6ghz_params[i].bssid,
+ ETH_ALEN))
+ break;
+ }
+
+ if (k == bssid_index &&
+ bssid_index < MT76_RNR_SCAN_MAX_BSSIDS) {
+ memcpy(&mdev->rnr.bssid[bssid_index++],
+ sreq->scan_6ghz_params[i].bssid, ETH_ALEN);
+ mdev->rnr.channel[k] = scan_list[ch_idx]->hw_value;
+ }
+ }
+
+ mdev->rnr.bssid_num = bssid_index;
+
+ if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ memcpy(mdev->rnr.random_mac, sreq->mac_addr, ETH_ALEN);
+ mdev->rnr.sreq_flag = sreq->flags;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_build_rnr_scan_param);
+
#define MT76_CONNAC_SCAN_CHANNEL_TIME 60
int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 43237e518373..27daf419560a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -869,6 +869,7 @@ enum {
#define NETWORK_WDS BIT(21)
#define SCAN_FUNC_RANDOM_MAC BIT(0)
+#define SCAN_FUNC_RNR_SCAN BIT(3)
#define SCAN_FUNC_SPLIT_SCAN BIT(5)
#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
@@ -1065,6 +1066,7 @@ enum {
MCU_UNI_EVENT_WED_RRO = 0x57,
MCU_UNI_EVENT_PER_STA_INFO = 0x6d,
MCU_UNI_EVENT_ALL_STA_INFO = 0x6e,
+ MCU_UNI_EVENT_SDO = 0x83,
};
#define MCU_UNI_CMD_EVENT BIT(1)
@@ -1182,6 +1184,11 @@ enum {
#define MCU_UNI_CMD(_t) (__MCU_CMD_FIELD_UNI | \
FIELD_PREP(__MCU_CMD_FIELD_ID, \
MCU_UNI_CMD_##_t))
+
+#define MCU_UNI_QUERY(_t) (__MCU_CMD_FIELD_UNI | __MCU_CMD_FIELD_QUERY | \
+ FIELD_PREP(__MCU_CMD_FIELD_ID, \
+ MCU_UNI_CMD_##_t))
+
#define MCU_CE_CMD(_t) (__MCU_CMD_FIELD_CE | \
FIELD_PREP(__MCU_CMD_FIELD_ID, \
MCU_CE_CMD_##_t))
@@ -1287,16 +1294,20 @@ enum {
MCU_UNI_CMD_EFUSE_CTRL = 0x2d,
MCU_UNI_CMD_RA = 0x2f,
MCU_UNI_CMD_MURU = 0x31,
+ MCU_UNI_CMD_TESTMODE_RX_STAT = 0x32,
MCU_UNI_CMD_BF = 0x33,
MCU_UNI_CMD_CHANNEL_SWITCH = 0x34,
MCU_UNI_CMD_THERMAL = 0x35,
MCU_UNI_CMD_VOW = 0x37,
MCU_UNI_CMD_FIXED_RATE_TABLE = 0x40,
+ MCU_UNI_CMD_TESTMODE_CTRL = 0x46,
MCU_UNI_CMD_RRO = 0x57,
MCU_UNI_CMD_OFFCH_SCAN_CTRL = 0x58,
MCU_UNI_CMD_PER_STA_INFO = 0x6d,
MCU_UNI_CMD_ALL_STA_INFO = 0x6e,
MCU_UNI_CMD_ASSERT_DUMP = 0x6f,
+ MCU_UNI_CMD_RADIO_STATUS = 0x80,
+ MCU_UNI_CMD_SDO = 0x88,
};
enum {
@@ -1369,6 +1380,7 @@ enum {
UNI_BSS_INFO_OFFLOAD = 25,
UNI_BSS_INFO_MLD = 26,
UNI_BSS_INFO_PM_DISABLE = 27,
+ UNI_BSS_INFO_EHT = 30,
};
enum {
@@ -1969,6 +1981,8 @@ int mt76_connac_mcu_start_patch(struct mt76_dev *dev);
int mt76_connac_mcu_patch_sem_ctrl(struct mt76_dev *dev, bool get);
int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option);
+void mt76_connac_mcu_build_rnr_scan_param(struct mt76_dev *mdev,
+ struct cfg80211_scan_request *sreq);
int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req);
int mt76_connac_mcu_cancel_hw_scan(struct mt76_phy *phy,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index a82c75ba26e6..a683d53c7ceb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -174,7 +174,6 @@ static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
int mt76x02_dma_init(struct mt76x02_dev *dev)
{
- struct mt76_txwi_cache __maybe_unused *t;
int i, ret, fifo_size;
struct mt76_queue *q;
void *status_fifo;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index 84ef80ab4afb..96cecc576a98 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -17,6 +17,8 @@ static const struct usb_device_id mt76x2u_device_table[] = {
{ USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
{ USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
{ USB_DEVICE(0x0e8d, 0x7632) }, /* HC-M7662BU1 */
+ { USB_DEVICE(0x0471, 0x2126) }, /* LiteOn WN4516R module, nonstandard USB connector */
+ { USB_DEVICE(0x0471, 0x7600) }, /* LiteOn WN4519R module, nonstandard USB connector */
{ USB_DEVICE(0x2c4e, 0x0103) }, /* Mercury UD13 */
{ USB_DEVICE(0x0846, 0x9014) }, /* Netgear WNDA3100v3 */
{ USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index 33a14365ec9b..3b5562811511 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -191,6 +191,7 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
struct mt76_usb *usb = &dev->mt76.usb;
+ bool vht;
int err;
INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
@@ -217,7 +218,17 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
/* check hw sg support in order to enable AMSDU */
hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_TX_SG_MAX_SIZE : 1;
- err = mt76_register_device(&dev->mt76, true, mt76x02_rates,
+ switch (dev->mt76.rev) {
+ case 0x76320044:
+ /* these ASIC revisions do not support VHT */
+ vht = false;
+ break;
+ default:
+ vht = true;
+ break;
+ }
+
+ err = mt76_register_device(&dev->mt76, vht, mt76x02_rates,
ARRAY_SIZE(mt76x02_rates));
if (err)
goto fail;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index 192e8eff970b..b287b7d9394e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -211,13 +211,28 @@ static const struct file_operations mt7915_sys_recovery_ops = {
static int
mt7915_radar_trigger(void *data, u64 val)
{
- struct mt7915_dev *dev = data;
+#define RADAR_MAIN_CHAIN 1
+#define RADAR_BACKGROUND 2
+ struct mt7915_phy *phy = data;
+ struct mt7915_dev *dev = phy->dev;
+ int rdd_idx;
+
+ if (!val || val > RADAR_BACKGROUND)
+ return -EINVAL;
- if (val > MT_RX_SEL2)
+ if (val == RADAR_BACKGROUND && !dev->rdd2_phy) {
+ dev_err(dev->mt76.dev, "Background radar is not enabled\n");
return -EINVAL;
+ }
+
+ rdd_idx = mt7915_get_rdd_idx(phy, val == RADAR_BACKGROUND);
+ if (rdd_idx < 0) {
+ dev_err(dev->mt76.dev, "No RDD found\n");
+ return -EINVAL;
+ }
return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_RADAR_EMULATE,
- val, 0, 0);
+ rdd_idx, 0, 0);
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL,
@@ -445,6 +460,11 @@ mt7915_rdd_monitor(struct seq_file *s, void *data)
mutex_lock(&dev->mt76.mutex);
+ if (!mt7915_eeprom_has_background_radar(dev)) {
+ seq_puts(s, "no background radar capability\n");
+ goto out;
+ }
+
if (!cfg80211_chandef_valid(chandef)) {
ret = -EINVAL;
goto out;
@@ -1242,7 +1262,7 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
if (!dev->dbdc_support || phy->mt76->band_idx) {
debugfs_create_u32("dfs_hw_pattern", 0400, dir,
&dev->hw_pattern);
- debugfs_create_file("radar_trigger", 0200, dir, dev,
+ debugfs_create_file("radar_trigger", 0200, dir, phy,
&fops_radar_trigger);
debugfs_create_devm_seqfile(dev->mt76.dev, "rdd_monitor", dir,
mt7915_rdd_monitor);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index 928e0b07a9bf..c0f3402d30bb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -147,7 +147,7 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
/* read eeprom data from efuse */
block_num = DIV_ROUND_UP(eeprom_size, eeprom_blk_size);
for (i = 0; i < block_num; i++) {
- ret = mt7915_mcu_get_eeprom(dev, i * eeprom_blk_size);
+ ret = mt7915_mcu_get_eeprom(dev, i * eeprom_blk_size, NULL);
if (ret < 0)
return ret;
}
@@ -361,6 +361,37 @@ s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band)
return val & MT_EE_RATE_DELTA_SIGN ? delta : -delta;
}
+bool
+mt7915_eeprom_has_background_radar(struct mt7915_dev *dev)
+{
+ u8 val, buf[MT7915_EEPROM_BLOCK_SIZE];
+ u8 band_sel, tx_path, rx_path;
+ int offs = MT_EE_WIFI_CONF + 1;
+
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ return true;
+ case 0x7906:
+ /* read efuse to check background radar capability */
+ if (mt7915_mcu_get_eeprom(dev, offs, buf))
+ break;
+
+ val = buf[offs % MT7915_EEPROM_BLOCK_SIZE];
+ band_sel = u8_get_bits(val, MT_EE_WIFI_CONF0_BAND_SEL);
+ tx_path = u8_get_bits(val, MT_EE_WIFI_CONF0_TX_PATH);
+ rx_path = u8_get_bits(val, MT_EE_WIFI_CONF0_RX_PATH);
+
+ return (band_sel == MT_EE_V2_BAND_SEL_5GHZ &&
+ tx_path == rx_path && rx_path == 2);
+ case 0x7981:
+ case 0x7986:
+ default:
+ break;
+ }
+
+ return false;
+}
+
const u8 mt7915_sku_group_len[] = {
[SKU_CCK] = 4,
[SKU_OFDM] = 8,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
index 509fb43d8a68..31aec0f40232 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -55,6 +55,7 @@ enum mt7915_eeprom_field {
#define MT_EE_CAL_DPD_SIZE_V2_7981 (102 * MT_EE_CAL_UNIT) /* no 6g dpd data */
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
+#define MT_EE_WIFI_CONF0_RX_PATH GENMASK(5, 3)
#define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6)
#define MT_EE_WIFI_CONF1_BAND_SEL GENMASK(7, 6)
#define MT_EE_WIFI_CONF_STREAM_NUM GENMASK(7, 5)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index bee4beabc4eb..3e30ca5155d2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -285,7 +285,7 @@ static void __mt7915_init_txpower(struct mt7915_phy *phy,
{
struct mt7915_dev *dev = phy->dev;
int i, n_chains = hweight16(phy->mt76->chainmask);
- int nss_delta = mt76_tx_power_nss_delta(n_chains);
+ int path_delta = mt76_tx_power_path_delta(n_chains);
int pwr_delta = mt7915_eeprom_get_power_delta(dev, sband->band);
struct mt76_power_limits limits;
@@ -305,7 +305,7 @@ static void __mt7915_init_txpower(struct mt7915_phy *phy,
target_power = mt76_get_rate_power_limits(phy->mt76, chan,
&limits,
target_power);
- target_power += nss_delta;
+ target_power += path_delta;
target_power = DIV_ROUND_UP(target_power, 2);
chan->max_power = min_t(int, chan->max_reg_power,
target_power);
@@ -392,9 +392,10 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
if (!is_mt7915(&dev->mt76))
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
- if (!mdev->dev->of_node ||
- !of_property_read_bool(mdev->dev->of_node,
- "mediatek,disable-radar-background"))
+ if (mt7915_eeprom_has_background_radar(phy->dev) &&
+ (!mdev->dev->of_node ||
+ !of_property_read_bool(mdev->dev->of_node,
+ "mediatek,disable-radar-background")))
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_RADAR_BACKGROUND);
@@ -924,8 +925,7 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
c = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US;
if (!is_mt7915(&dev->mt76))
- c |= IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
- IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ c |= IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO;
elem->phy_cap_info[2] |= c;
c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 2ba6eb3038ce..9400e4af2a04 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -2035,16 +2035,15 @@ void mt7915_mac_work(struct work_struct *work)
static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
+ int rdd_idx = mt7915_get_rdd_idx(phy, false);
- if (phy->rdd_state & BIT(0))
- mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
- MT_RX_SEL0, 0);
- if (phy->rdd_state & BIT(1))
- mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
- MT_RX_SEL0, 0);
+ if (rdd_idx < 0)
+ return;
+
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, rdd_idx, 0, 0);
}
-static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
+static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int rdd_idx)
{
int err, region;
@@ -2061,52 +2060,38 @@ static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
break;
}
- err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
- MT_RX_SEL0, region);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, rdd_idx, 0, region);
if (err < 0)
return err;
if (is_mt7915(&dev->mt76)) {
- err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, chain,
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, rdd_idx,
0, dev->dbdc_support ? 2 : 0);
if (err < 0)
return err;
}
- return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
- MT_RX_SEL0, 1);
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, rdd_idx, 0, 1);
}
static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
{
- struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7915_dev *dev = phy->dev;
- int err;
+ int err, rdd_idx;
+
+ rdd_idx = mt7915_get_rdd_idx(phy, false);
+ if (rdd_idx < 0)
+ return -EINVAL;
/* start CAC */
- err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START,
- phy->mt76->band_idx, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, rdd_idx, 0, 0);
if (err < 0)
return err;
- err = mt7915_dfs_start_rdd(dev, phy->mt76->band_idx);
+ err = mt7915_dfs_start_rdd(dev, rdd_idx);
if (err < 0)
return err;
- phy->rdd_state |= BIT(phy->mt76->band_idx);
-
- if (!is_mt7915(&dev->mt76))
- return 0;
-
- if (chandef->width == NL80211_CHAN_WIDTH_160 ||
- chandef->width == NL80211_CHAN_WIDTH_80P80) {
- err = mt7915_dfs_start_rdd(dev, 1);
- if (err < 0)
- return err;
-
- phy->rdd_state |= BIT(1);
- }
-
return 0;
}
@@ -2148,12 +2133,12 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
enum mt76_dfs_state dfs_state, prev_state;
- int err;
+ int err, rdd_idx = mt7915_get_rdd_idx(phy, false);
prev_state = phy->mt76->dfs_state;
dfs_state = mt76_phy_dfs_state(phy->mt76);
- if (prev_state == dfs_state)
+ if (prev_state == dfs_state || rdd_idx < 0)
return 0;
if (prev_state == MT_DFS_STATE_UNKNOWN)
@@ -2177,8 +2162,7 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
if (dfs_state == MT_DFS_STATE_CAC)
return 0;
- err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
- phy->mt76->band_idx, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END, rdd_idx, 0, 0);
if (err < 0) {
phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
return err;
@@ -2188,15 +2172,13 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
return 0;
stop:
- err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START,
- phy->mt76->band_idx, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, rdd_idx, 0, 0);
if (err < 0)
return err;
if (is_mt7915(&dev->mt76)) {
err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT,
- phy->mt76->band_idx, 0,
- dev->dbdc_support ? 2 : 0);
+ rdd_idx, 0, dev->dbdc_support ? 2 : 0);
if (err < 0)
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 3643c72bb68d..427542777abc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -303,17 +303,35 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
{
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt7915_mcu_rdd_report *r;
+ u32 sku;
r = (struct mt7915_mcu_rdd_report *)skb->data;
- if (r->band_idx > MT_RX_SEL2)
+ switch (r->rdd_idx) {
+ case MT_RDD_IDX_BAND0:
+ break;
+ case MT_RDD_IDX_BAND1:
+ sku = mt7915_check_adie(dev, true);
+ /* the main phy is bound to band 1 for this sku */
+ if (is_mt7986(&dev->mt76) &&
+ (sku == MT7975_ONE_ADIE || sku == MT7976_ONE_ADIE))
+ break;
+ mphy = dev->mt76.phys[MT_BAND1];
+ break;
+ case MT_RDD_IDX_BACKGROUND:
+ if (!dev->rdd2_phy)
+ return;
+ mphy = dev->rdd2_phy->mt76;
+ break;
+ default:
+ dev_err(dev->mt76.dev, "Unknown RDD idx %d\n", r->rdd_idx);
return;
+ }
- if ((r->band_idx && !dev->phy.mt76->band_idx) &&
- dev->mt76.phys[MT_BAND1])
- mphy = dev->mt76.phys[MT_BAND1];
+ if (!mphy)
+ return;
- if (r->band_idx == MT_RX_SEL2)
+ if (r->rdd_idx == MT_RDD_IDX_BACKGROUND)
cfg80211_background_radar_event(mphy->hw->wiphy,
&dev->rdd2_chandef,
GFP_ATOMIC);
@@ -2697,11 +2715,14 @@ int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
struct cfg80211_chan_def *chandef)
{
struct mt7915_dev *dev = phy->dev;
- int err, region;
+ int err, region, rdd_idx;
+
+ rdd_idx = mt7915_get_rdd_idx(phy, true);
+ if (rdd_idx < 0)
+ return -EINVAL;
if (!chandef) { /* disable offchain */
- err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, MT_RX_SEL2,
- 0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, rdd_idx, 0, 0);
if (err)
return err;
@@ -2727,8 +2748,7 @@ int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
break;
}
- return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, MT_RX_SEL2,
- 0, region);
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, rdd_idx, 0, region);
}
int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
@@ -2859,7 +2879,7 @@ int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
&req, sizeof(req), true);
}
-int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
+int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset, u8 *read_buf)
{
struct mt7915_mcu_eeprom_info req = {
.addr = cpu_to_le32(round_down(offset,
@@ -2867,8 +2887,8 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
};
struct mt7915_mcu_eeprom_info *res;
struct sk_buff *skb;
+ u8 *buf = read_buf;
int ret;
- u8 *buf;
ret = mt76_mcu_send_and_get_msg(&dev->mt76,
MCU_EXT_QUERY(EFUSE_ACCESS),
@@ -2877,8 +2897,10 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
return ret;
res = (struct mt7915_mcu_eeprom_info *)skb->data;
- buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
+ if (!buf)
+ buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
memcpy(buf, res->data, MT7915_EEPROM_BLOCK_SIZE);
+
dev_kfree_skb(skb);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index 092ed504a8f2..086ad89ecd91 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -57,7 +57,7 @@ struct mt7915_mcu_bcc_notify {
struct mt7915_mcu_rdd_report {
struct mt76_connac2_mcu_rxd_hdr rxd;
- u8 band_idx;
+ u8 rdd_idx;
u8 long_detected;
u8 constant_prf_detected;
u8 staggered_prf_detected;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 876f0692850a..9c4d5cea0c42 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -651,6 +651,9 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
wed->wlan.base = devm_ioremap(dev->mt76.dev,
pci_resource_start(pci_dev, 0),
pci_resource_len(pci_dev, 0));
+ if (!wed->wlan.base)
+ return -ENOMEM;
+
wed->wlan.phy_base = pci_resource_start(pci_dev, 0);
wed->wlan.wpdma_int = pci_resource_start(pci_dev, 0) +
MT_INT_WED_SOURCE_CSR;
@@ -678,6 +681,9 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
wed->wlan.bus_type = MTK_WED_BUS_AXI;
wed->wlan.base = devm_ioremap(dev->mt76.dev, res->start,
resource_size(res));
+ if (!wed->wlan.base)
+ return -ENOMEM;
+
wed->wlan.phy_base = res->start;
wed->wlan.wpdma_int = res->start + MT_INT_SOURCE_CSR;
wed->wlan.wpdma_mask = res->start + MT_INT_MASK_CSR;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 533939f2b7ed..2e94347c46d6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -215,8 +215,6 @@ struct mt7915_phy {
s16 coverage_class;
u8 slottime;
- u8 rdd_state;
-
u32 trb_ts;
u32 rx_ampdu_ts;
@@ -331,10 +329,10 @@ enum {
__MT_WFDMA_MAX,
};
-enum {
- MT_RX_SEL0,
- MT_RX_SEL1,
- MT_RX_SEL2, /* monitor chain */
+enum rdd_idx {
+ MT_RDD_IDX_BAND0, /* RDD idx for band idx 0 (single-band) */
+ MT_RDD_IDX_BAND1, /* RDD idx for band idx 1 */
+ MT_RDD_IDX_BACKGROUND, /* RDD idx for background chain */
};
enum mt7915_rdd_cmd {
@@ -354,6 +352,18 @@ enum mt7915_rdd_cmd {
RDD_IRQ_OFF,
};
+static inline int
+mt7915_get_rdd_idx(struct mt7915_phy *phy, bool is_background)
+{
+ if (!phy->mt76->cap.has_5ghz)
+ return -1;
+
+ if (is_background)
+ return MT_RDD_IDX_BACKGROUND;
+
+ return phy->mt76->band_idx;
+}
+
static inline struct mt7915_phy *
mt7915_hw_phy(struct ieee80211_hw *hw)
{
@@ -425,6 +435,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx);
s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band);
+bool mt7915_eeprom_has_background_radar(struct mt7915_dev *dev);
int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2);
void mt7915_dma_prefetch(struct mt7915_dev *dev);
void mt7915_dma_cleanup(struct mt7915_dev *dev);
@@ -473,7 +484,7 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
struct ieee80211_sta *sta,
void *data, u32 field);
int mt7915_mcu_set_eeprom(struct mt7915_dev *dev);
-int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset);
+int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset, u8 *read_buf);
int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num);
int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
bool hdr_trans);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 826c48a2ee69..1fffa43379b2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -83,6 +83,11 @@ mt7921_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
he_cap_elem->phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+
+ if (is_mt7922(phy->mt76->dev)) {
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ }
break;
case NL80211_IFTYPE_STATION:
he_cap_elem->mac_cap_info[1] |=
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/Makefile b/drivers/net/wireless/mediatek/mt76/mt7925/Makefile
index d321e4ed732f..ade5e647c941 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/Makefile
@@ -5,5 +5,6 @@ obj-$(CONFIG_MT7925E) += mt7925e.o
obj-$(CONFIG_MT7925U) += mt7925u.o
mt7925-common-y := mac.o mcu.o main.o init.o debugfs.o
+mt7925-common-$(CONFIG_NL80211_TESTMODE) += testmode.o
mt7925e-y := pci.o pci_mac.o pci_mcu.o
mt7925u-y := usb.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
index 63cb08f4d87c..2a83ff59a968 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
@@ -89,7 +89,7 @@ void mt7925_regd_be_ctrl(struct mt792x_dev *dev, u8 *alpha2)
}
/* Check the last one */
- if (rule->flag && BIT(0))
+ if (rule->flag & BIT(0))
break;
pos += sizeof(*rule);
@@ -322,6 +322,12 @@ static void mt7925_init_work(struct work_struct *work)
return;
}
+ ret = mt7925_mcu_set_thermal_protect(dev);
+ if (ret) {
+ dev_err(dev->mt76.dev, "thermal protection enable failed\n");
+ return;
+ }
+
/* we support chip reset now */
dev->hw_init_done = true;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 66f327781947..94b0099dcd41 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -334,6 +334,9 @@ int __mt7925_start(struct mt792x_phy *phy)
ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT792x_WATCHDOG_TIME);
+ if (phy->chip_cap & MT792x_CHIP_CAP_WF_RF_PIN_CTRL_EVT_EN)
+ wiphy_rfkill_start_polling(mphy->hw->wiphy);
+
return 0;
}
EXPORT_SYMBOL_GPL(__mt7925_start);
@@ -1865,6 +1868,10 @@ mt7925_change_chanctx(struct ieee80211_hw *hw,
link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id);
mt7925_mcu_set_chctx(mvif->phy->mt76, &mconf->mt76,
link_conf, ctx);
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_PUNCTURING)
+ mt7925_mcu_set_eht_pp(mvif->phy->mt76, &mconf->mt76,
+ link_conf, ctx);
}
}
@@ -1954,8 +1961,10 @@ static void mt7925_link_info_changed(struct ieee80211_hw *hw,
struct mt792x_phy *phy = mt792x_hw_phy(hw);
struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct mt792x_bss_conf *mconf;
+ struct ieee80211_bss_conf *link_conf;
mconf = mt792x_vif_to_link(mvif, info->link_id);
+ link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id);
mt792x_mutex_acquire(dev);
@@ -1997,6 +2006,10 @@ static void mt7925_link_info_changed(struct ieee80211_hw *hw,
mvif->mlo_pm_state = MT792x_MLO_CHANGED_PS;
}
+ if (changed & IEEE80211_CHANCTX_CHANGE_PUNCTURING)
+ mt7925_mcu_set_eht_pp(mvif->phy->mt76, &mconf->mt76,
+ link_conf, NULL);
+
mt792x_mutex_release(dev);
}
@@ -2195,6 +2208,18 @@ static void mt7925_unassign_vif_chanctx(struct ieee80211_hw *hw,
mutex_unlock(&dev->mt76.mutex);
}
+static void mt7925_rfkill_poll(struct ieee80211_hw *hw)
+{
+ struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ int ret;
+
+ mt792x_mutex_acquire(phy->dev);
+ ret = mt7925_mcu_wf_rf_pin_ctrl(phy);
+ mt792x_mutex_release(phy->dev);
+
+ wiphy_rfkill_set_hw_state(hw->wiphy, ret == 0);
+}
+
const struct ieee80211_ops mt7925_ops = {
.tx = mt792x_tx,
.start = mt7925_start,
@@ -2234,6 +2259,8 @@ const struct ieee80211_ops mt7925_ops = {
.sta_statistics = mt792x_sta_statistics,
.sched_scan_start = mt7925_start_sched_scan,
.sched_scan_stop = mt7925_stop_sched_scan,
+ CFG80211_TESTMODE_CMD(mt7925_testmode_cmd)
+ CFG80211_TESTMODE_DUMP(mt7925_testmode_dump)
#ifdef CONFIG_PM
.suspend = mt7925_suspend,
.resume = mt7925_resume,
@@ -2255,6 +2282,7 @@ const struct ieee80211_ops mt7925_ops = {
.link_info_changed = mt7925_link_info_changed,
.change_vif_links = mt7925_change_vif_links,
.change_sta_links = mt7925_change_sta_links,
+ .rfkill_poll = mt7925_rfkill_poll,
};
EXPORT_SYMBOL_GPL(mt7925_ops);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index e61da76b2097..b8542be0d945 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -783,7 +783,7 @@ int mt7925_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl)
int ret;
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(WSYS_CONFIG),
- &req, sizeof(req), false, NULL);
+ &req, sizeof(req), true, NULL);
return ret;
}
@@ -974,6 +974,23 @@ int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable)
}
EXPORT_SYMBOL_GPL(mt7925_mcu_set_deep_sleep);
+int mt7925_mcu_set_thermal_protect(struct mt792x_dev *dev)
+{
+ char cmd[64];
+ int ret = 0;
+
+ snprintf(cmd, sizeof(cmd), "ThermalProtGband %d %d %d %d %d %d %d %d %d %d",
+ 0, 100, 90, 80, 30, 1, 1, 115, 105, 5);
+ ret = mt7925_mcu_chip_config(dev, cmd);
+
+ snprintf(cmd, sizeof(cmd), "ThermalProtAband %d %d %d %d %d %d %d %d %d %d",
+ 1, 100, 90, 80, 30, 1, 1, 115, 105, 5);
+ ret |= mt7925_mcu_chip_config(dev, cmd);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt7925_mcu_set_thermal_protect);
+
int mt7925_run_firmware(struct mt792x_dev *dev)
{
int err;
@@ -1424,7 +1441,7 @@ int mt7925_mcu_set_eeprom(struct mt792x_dev *dev)
};
return mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(EFUSE_CTRL),
- &req, sizeof(req), false, NULL);
+ &req, sizeof(req), true, NULL);
}
EXPORT_SYMBOL_GPL(mt7925_mcu_set_eeprom);
@@ -1924,14 +1941,14 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta);
mt7925_mcu_sta_eht_mld_tlv(skb, info->vif, info->link_sta->sta);
}
-
- mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta);
}
if (!info->enable) {
mt7925_mcu_sta_remove_tlv(skb);
mt76_connac_mcu_add_tlv(skb, STA_REC_MLD_OFF,
sizeof(struct tlv));
+ } else {
+ mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta);
}
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
@@ -2046,8 +2063,6 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
},
};
- mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req), true);
-
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req),
true);
}
@@ -2298,6 +2313,40 @@ __mt7925_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif_link *mvif, int
return skb;
}
+static
+void mt7925_mcu_bss_eht_tlv(struct sk_buff *skb, struct mt76_phy *phy,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct cfg80211_chan_def *chandef = ctx ? &ctx->def :
+ &link_conf->chanreq.oper;
+
+ struct bss_eht_tlv *req;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_EHT, sizeof(*req));
+ req = (struct bss_eht_tlv *)tlv;
+ req->is_eth_dscb_present = chandef->punctured ? 1 : 0;
+ req->eht_dis_sub_chan_bitmap = cpu_to_le16(chandef->punctured);
+}
+
+int mt7925_mcu_set_eht_pp(struct mt76_phy *phy, struct mt76_vif_link *mvif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct sk_buff *skb;
+
+ skb = __mt7925_mcu_alloc_bss_req(phy->dev, mvif,
+ MT7925_BSS_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7925_mcu_bss_eht_tlv(skb, phy, link_conf, ctx);
+
+ return mt76_mcu_skb_send_msg(phy->dev, skb,
+ MCU_UNI_CMD(BSS_INFO_UPDATE), true);
+}
+
int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif_link *mvif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
@@ -2764,7 +2813,7 @@ int mt7925_mcu_set_dbdc(struct mt76_phy *phy, bool enable)
conf->band = 0; /* unused */
err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SET_DBDC_PARMS),
- false);
+ true);
return err;
}
@@ -2779,7 +2828,6 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct mt76_dev *mdev = phy->dev;
struct mt76_connac_mcu_scan_channel *chan;
struct sk_buff *skb;
-
struct scan_hdr_tlv *hdr;
struct scan_req_tlv *req;
struct scan_ssid_tlv *ssid;
@@ -2790,9 +2838,12 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct tlv *tlv;
int max_len;
+ if (test_bit(MT76_HW_SCANNING, &phy->state))
+ return -EBUSY;
+
max_len = sizeof(*hdr) + sizeof(*req) + sizeof(*ssid) +
- sizeof(*bssid) + sizeof(*chan_info) +
- sizeof(*misc) + sizeof(*ie);
+ sizeof(*bssid) * MT7925_RNR_SCAN_MAX_BSSIDS +
+ sizeof(*chan_info) + sizeof(*misc) + sizeof(*ie);
skb = mt76_mcu_msg_alloc(mdev, NULL, max_len);
if (!skb)
@@ -2815,6 +2866,8 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
for (i = 0; i < sreq->n_ssids; i++) {
if (!sreq->ssids[i].ssid_len)
continue;
+ if (i > MT7925_RNR_SCAN_MAX_BSSIDS)
+ break;
ssid->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len);
memcpy(ssid->ssids[i].ssid, sreq->ssids[i].ssid,
@@ -2824,10 +2877,31 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
ssid->ssid_type = n_ssids ? BIT(2) : BIT(0);
ssid->ssids_num = n_ssids;
- tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_BSSID, sizeof(*bssid));
- bssid = (struct scan_bssid_tlv *)tlv;
+ if (sreq->n_6ghz_params) {
+ u8 j;
+
+ mt76_connac_mcu_build_rnr_scan_param(mdev, sreq);
+
+ for (j = 0; j < mdev->rnr.bssid_num; j++) {
+ if (j > MT7925_RNR_SCAN_MAX_BSSIDS)
+ break;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_BSSID,
+ sizeof(*bssid));
+ bssid = (struct scan_bssid_tlv *)tlv;
- memcpy(bssid->bssid, sreq->bssid, ETH_ALEN);
+ ether_addr_copy(bssid->bssid, mdev->rnr.bssid[j]);
+ bssid->match_ch = mdev->rnr.channel[j];
+ bssid->match_ssid_ind = MT7925_RNR_SCAN_MAX_BSSIDS;
+ bssid->match_short_ssid_ind = MT7925_RNR_SCAN_MAX_BSSIDS;
+ }
+ req->scan_func |= SCAN_FUNC_RNR_SCAN;
+ } else {
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_BSSID, sizeof(*bssid));
+ bssid = (struct scan_bssid_tlv *)tlv;
+
+ ether_addr_copy(bssid->bssid, sreq->bssid);
+ }
tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_CHANNEL, sizeof(*chan_info));
chan_info = (struct scan_chan_info_tlv *)tlv;
@@ -2869,7 +2943,7 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
}
err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ),
- false);
+ true);
if (err < 0)
clear_bit(MT76_HW_SCANNING, &phy->state);
@@ -2975,7 +3049,7 @@ int mt7925_mcu_sched_scan_req(struct mt76_phy *phy,
}
return mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ),
- false);
+ true);
}
EXPORT_SYMBOL_GPL(mt7925_mcu_sched_scan_req);
@@ -3011,7 +3085,7 @@ mt7925_mcu_sched_scan_enable(struct mt76_phy *phy,
clear_bit(MT76_HW_SCHED_SCANNING, &phy->state);
return mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ),
- false);
+ true);
}
int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy,
@@ -3050,7 +3124,7 @@ int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy,
}
return mt76_mcu_send_msg(phy->dev, MCU_UNI_CMD(SCAN_REQ),
- &req, sizeof(req), false);
+ &req, sizeof(req), true);
}
EXPORT_SYMBOL_GPL(mt7925_mcu_cancel_hw_scan);
@@ -3155,7 +3229,7 @@ int mt7925_mcu_set_channel_domain(struct mt76_phy *phy)
memcpy(__skb_push(skb, sizeof(req)), &req, sizeof(req));
return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(SET_DOMAIN_INFO),
- false);
+ true);
}
EXPORT_SYMBOL_GPL(mt7925_mcu_set_channel_domain);
@@ -3305,9 +3379,18 @@ int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
else
uni_txd->option = MCU_CMD_UNI_EXT_ACK;
- if (cmd == MCU_UNI_CMD(HIF_CTRL))
+ if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
+ cmd == MCU_UNI_CMD(CHIP_CONFIG))
uni_txd->option &= ~MCU_CMD_ACK;
+ if (mcu_cmd == MCU_UNI_CMD_TESTMODE_CTRL ||
+ mcu_cmd == MCU_UNI_CMD_TESTMODE_RX_STAT) {
+ if (cmd & __MCU_CMD_FIELD_QUERY)
+ uni_txd->option = 0x2;
+ else
+ uni_txd->option = 0x6;
+ }
+
goto exit;
}
@@ -3599,6 +3682,43 @@ int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy)
return 0;
}
+int mt7925_mcu_wf_rf_pin_ctrl(struct mt792x_phy *phy)
+{
+#define UNI_CMD_RADIO_STATUS_GET 0
+ struct mt792x_dev *dev = phy->dev;
+ struct sk_buff *skb;
+ int ret;
+ struct {
+ __le16 tag;
+ __le16 len;
+ u8 rsv[4];
+ } __packed req = {
+ .tag = UNI_CMD_RADIO_STATUS_GET,
+ .len = cpu_to_le16(sizeof(req)),
+ };
+ struct mt7925_radio_status_event {
+ __le16 tag;
+ __le16 len;
+
+ u8 data;
+ u8 rsv[3];
+ } __packed *status;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_UNI_CMD(RADIO_STATUS),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ skb_pull(skb, sizeof(struct tlv));
+ status = (struct mt7925_radio_status_event *)skb->data;
+ ret = status->data;
+
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
int mt7925_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
u8 bit_op, u32 bit_map)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
index 8ac43feb26d6..ee6fb16e83c5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
@@ -104,13 +104,6 @@ enum {
MT7925_TM_WIFISPECTRUM,
};
-struct mt7925_rftest_cmd {
- u8 action;
- u8 rsv[3];
- __le32 param0;
- __le32 param1;
-} __packed;
-
struct mt7925_rftest_evt {
__le32 param0;
__le32 param1;
@@ -196,6 +189,7 @@ enum {
UNI_SNIFFER_CONFIG,
};
+#define MT7925_RNR_SCAN_MAX_BSSIDS 10
struct scan_hdr_tlv {
/* fixed field */
u8 seq_num;
@@ -223,7 +217,7 @@ struct scan_req_tlv {
__le16 timeout_value;
__le16 probe_delay_time;
__le32 func_mask_ext;
-};
+} __packed;
struct scan_ssid_tlv {
__le16 tag;
@@ -235,9 +229,10 @@ struct scan_ssid_tlv {
* BIT(2) + ssid_type_ext BIT(0) specified SSID only
*/
u8 ssids_num;
- u8 pad[2];
- struct mt76_connac_mcu_scan_ssid ssids[4];
-};
+ u8 is_short_ssid;
+ u8 pad;
+ struct mt76_connac_mcu_scan_ssid ssids[MT7925_RNR_SCAN_MAX_BSSIDS];
+} __packed;
struct scan_bssid_tlv {
__le16 tag;
@@ -247,8 +242,9 @@ struct scan_bssid_tlv {
u8 match_ch;
u8 match_ssid_ind;
u8 rcpi;
- u8 pad[3];
-};
+ u8 match_short_ssid_ind;
+ u8 pad[2];
+} __packed;
struct scan_chan_info_tlv {
__le16 tag;
@@ -264,7 +260,7 @@ struct scan_chan_info_tlv {
u8 channels_num; /* valid when channel_type is 4 */
u8 pad[2];
struct mt76_connac_mcu_scan_channel channels[64];
-};
+} __packed;
struct scan_ie_tlv {
__le16 tag;
@@ -372,6 +368,19 @@ struct bss_mld_tlv {
u8 __rsv[3];
} __packed;
+struct bss_eht_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 is_eht_op_present;
+ u8 is_eth_dscb_present;
+ u8 eht_ctrl;
+ u8 eht_ccfs0;
+ u8 eht_ccfs1;
+ u8 pad1;
+ __le16 eht_dis_sub_chan_bitmap;
+ u8 pad2[4];
+} __packed;
+
struct sta_rec_ba_uni {
__le16 tag;
__le16 len;
@@ -589,6 +598,47 @@ struct roc_acquire_tlv {
u8 rsv[3];
} __packed;
+enum ENUM_CMD_TEST_CTRL_ACT {
+ CMD_TEST_CTRL_ACT_SWITCH_MODE = 0,
+ CMD_TEST_CTRL_ACT_SET_AT = 1,
+ CMD_TEST_CTRL_ACT_GET_AT = 2,
+ CMD_TEST_CTRL_ACT_SET_AT_ENG = 3,
+ CMD_TEST_CTRL_ACT_GET_AT_ENG = 4,
+ CMD_TEST_CTRL_ACT_NUM
+};
+
+enum ENUM_CMD_TEST_CTRL_ACT_SWITCH_MODE_OP {
+ CMD_TEST_CTRL_ACT_SWITCH_MODE_NORMAL = 0,
+ CMD_TEST_CTRL_ACT_SWITCH_MODE_RF_TEST = 1,
+ CMD_TEST_CTRL_ACT_SWITCH_MODE_ICAP = 2,
+ CMD_TEST_CTRL_ACT_SWITCH_MODE_NUM
+};
+
+union testmode_data {
+ __le32 op_mode;
+ __le32 channel_freq;
+ u8 rf_at_info[84];
+};
+
+union testmode_evt {
+ __le32 op_mode;
+ __le32 channel_freq;
+ u8 rf_at_info[1024];
+};
+
+struct uni_cmd_testmode_ctrl {
+ u16 tag;
+ u16 length;
+ u8 action;
+ u8 reserved[3];
+ union testmode_data data;
+} __packed;
+
+struct mt7925_rftest_cmd {
+ u8 padding[4];
+ struct uni_cmd_testmode_ctrl ctrl;
+} __packed;
+
static inline enum connac3_mcu_cipher_type
mt7925_mcu_get_cipher(int cipher)
{
@@ -637,11 +687,15 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
int mt7925_mcu_set_timing(struct mt792x_phy *phy,
struct ieee80211_bss_conf *link_conf);
int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable);
+int mt7925_mcu_set_thermal_protect(struct mt792x_dev *dev);
int mt7925_mcu_set_channel_domain(struct mt76_phy *phy);
int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable);
int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif_link *mvif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx);
+int mt7925_mcu_set_eht_pp(struct mt76_phy *phy, struct mt76_vif_link *mvif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx);
int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy);
int mt7925_mcu_update_arp_filter(struct mt76_dev *dev,
struct ieee80211_bss_conf *link_conf);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
index 4e50f2597ccd..1b165d0d8bd3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
@@ -365,5 +365,11 @@ int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
int link_id);
+int mt7925_mcu_wf_rf_pin_ctrl(struct mt792x_phy *phy);
+
+int mt7925_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
+int mt7925_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
+ struct netlink_callback *cb, void *data, int len);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index c7b5dc1dbb34..89dc30f7c6b7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -31,6 +31,10 @@ static void mt7925e_unregister_device(struct mt792x_dev *dev)
{
int i;
struct mt76_connac_pm *pm = &dev->pm;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ if (dev->phy.chip_cap & MT792x_CHIP_CAP_WF_RF_PIN_CTRL_EVT_EN)
+ wiphy_rfkill_stop_polling(hw->wiphy);
cancel_work_sync(&dev->init_work);
mt76_unregister_device(&dev->mt76);
@@ -490,9 +494,6 @@ static int mt7925_pci_suspend(struct device *device)
/* disable interrupt */
mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
- mt76_wr(dev, MT_WFDMA0_HOST_INT_DIS,
- dev->irq_map->tx.all_complete_mask |
- MT_INT_RX_DONE_ALL | MT_INT_MCU_CMD);
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h
index 985794a40c1a..547489092c29 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h
@@ -28,7 +28,7 @@
#define MT_MDP_TO_HIF 0
#define MT_MDP_TO_WM 1
-#define MT_WFDMA0_HOST_INT_ENA MT_WFDMA0(0x228)
+#define MT_WFDMA0_HOST_INT_ENA MT_WFDMA0(0x204)
#define MT_WFDMA0_HOST_INT_DIS MT_WFDMA0(0x22c)
#define HOST_RX_DONE_INT_ENA4 BIT(12)
#define HOST_RX_DONE_INT_ENA5 BIT(13)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7925/testmode.c
new file mode 100644
index 000000000000..a3c97164ba21
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/testmode.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: ISC
+
+#include "mt7925.h"
+#include "mcu.h"
+
+#define MT7925_EVT_RSP_LEN 512
+
+enum mt7925_testmode_attr {
+ MT7925_TM_ATTR_UNSPEC,
+ MT7925_TM_ATTR_SET,
+ MT7925_TM_ATTR_QUERY,
+ MT7925_TM_ATTR_RSP,
+
+ /* keep last */
+ NUM_MT7925_TM_ATTRS,
+ MT7925_TM_ATTR_MAX = NUM_MT7925_TM_ATTRS - 1,
+};
+
+struct mt7925_tm_cmd {
+ u8 padding[4];
+ struct uni_cmd_testmode_ctrl c;
+} __packed;
+
+struct mt7925_tm_evt {
+ u32 param0;
+ u32 param1;
+} __packed;
+
+static const struct nla_policy mt7925_tm_policy[NUM_MT7925_TM_ATTRS] = {
+ [MT7925_TM_ATTR_SET] = NLA_POLICY_EXACT_LEN(sizeof(struct mt7925_tm_cmd)),
+ [MT7925_TM_ATTR_QUERY] = NLA_POLICY_EXACT_LEN(sizeof(struct mt7925_tm_cmd)),
+};
+
+static int
+mt7925_tm_set(struct mt792x_dev *dev, struct mt7925_tm_cmd *req)
+{
+ struct mt7925_rftest_cmd cmd;
+ struct mt7925_rftest_cmd *pcmd = &cmd;
+ bool testmode = false, normal = false;
+ struct mt76_connac_pm *pm = &dev->pm;
+ struct mt76_phy *phy = &dev->mphy;
+ int ret = -ENOTCONN;
+
+ memset(pcmd, 0, sizeof(*pcmd));
+ memcpy(pcmd, req, sizeof(struct mt7925_tm_cmd));
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (pcmd->ctrl.action == CMD_TEST_CTRL_ACT_SWITCH_MODE) {
+ if (pcmd->ctrl.data.op_mode == CMD_TEST_CTRL_ACT_SWITCH_MODE_NORMAL)
+ normal = true;
+ else
+ testmode = true;
+ }
+
+ if (testmode) {
+ /* Make sure testmode running on full power mode */
+ pm->enable = false;
+ cancel_delayed_work_sync(&pm->ps_work);
+ cancel_work_sync(&pm->wake_work);
+ __mt792x_mcu_drv_pmctrl(dev);
+
+ phy->test.state = MT76_TM_STATE_ON;
+ }
+
+ if (!mt76_testmode_enabled(phy))
+ goto out;
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(TESTMODE_CTRL), &cmd,
+ sizeof(cmd), false);
+
+ if (ret)
+ goto out;
+
+ if (normal) {
+ /* Switch back to the normal world */
+ phy->test.state = MT76_TM_STATE_OFF;
+ pm->enable = true;
+ }
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static int
+mt7925_tm_query(struct mt792x_dev *dev, struct mt7925_tm_cmd *req,
+ char *evt_resp)
+{
+ struct mt7925_rftest_cmd cmd;
+ char *pcmd = (char *)&cmd;
+ struct sk_buff *skb = NULL;
+ int ret = 1;
+
+ memset(pcmd, 0, sizeof(*pcmd));
+ memcpy(pcmd + 4, (char *)&req->c, sizeof(struct uni_cmd_testmode_ctrl));
+
+ if (*((uint16_t *)req->padding) == MCU_UNI_CMD_TESTMODE_CTRL)
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_QUERY(TESTMODE_CTRL),
+ &cmd, sizeof(cmd), true, &skb);
+ else if (*((uint16_t *)req->padding) == MCU_UNI_CMD_TESTMODE_RX_STAT)
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_QUERY(TESTMODE_RX_STAT),
+ &cmd, sizeof(cmd), true, &skb);
+
+ if (ret)
+ goto out;
+
+ memcpy((char *)evt_resp, (char *)skb->data + 8, MT7925_EVT_RSP_LEN);
+
+out:
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+int mt7925_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ struct nlattr *tb[NUM_MT76_TM_ATTRS];
+ struct mt76_phy *mphy = hw->priv;
+ struct mt792x_phy *phy = mphy->priv;
+ int err;
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state) ||
+ !(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ return -ENOTCONN;
+
+ err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len,
+ mt76_tm_policy, NULL);
+ if (err)
+ return err;
+
+ if (tb[MT76_TM_ATTR_DRV_DATA]) {
+ struct nlattr *drv_tb[NUM_MT7925_TM_ATTRS], *data;
+ int ret;
+
+ data = tb[MT76_TM_ATTR_DRV_DATA];
+ ret = nla_parse_nested_deprecated(drv_tb,
+ MT7925_TM_ATTR_MAX,
+ data, mt7925_tm_policy,
+ NULL);
+ if (ret)
+ return ret;
+
+ data = drv_tb[MT7925_TM_ATTR_SET];
+ if (data)
+ return mt7925_tm_set(phy->dev, nla_data(data));
+ }
+
+ return -EINVAL;
+}
+
+int mt7925_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
+ struct netlink_callback *cb, void *data, int len)
+{
+ struct nlattr *tb[NUM_MT76_TM_ATTRS];
+ struct mt76_phy *mphy = hw->priv;
+ struct mt792x_phy *phy = mphy->priv;
+ int err;
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state) ||
+ !(hw->conf.flags & IEEE80211_CONF_MONITOR) ||
+ !mt76_testmode_enabled(mphy))
+ return -ENOTCONN;
+
+ if (cb->args[2]++ > 0)
+ return -ENOENT;
+
+ err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len,
+ mt76_tm_policy, NULL);
+ if (err)
+ return err;
+
+ if (tb[MT76_TM_ATTR_DRV_DATA]) {
+ struct nlattr *drv_tb[NUM_MT7925_TM_ATTRS], *data;
+ int ret;
+
+ data = tb[MT76_TM_ATTR_DRV_DATA];
+ ret = nla_parse_nested_deprecated(drv_tb,
+ MT7925_TM_ATTR_MAX,
+ data, mt7925_tm_policy,
+ NULL);
+ if (ret)
+ return ret;
+
+ data = drv_tb[MT7925_TM_ATTR_QUERY];
+ if (data) {
+ char evt_resp[MT7925_EVT_RSP_LEN];
+
+ err = mt7925_tm_query(phy->dev, nla_data(data),
+ evt_resp);
+ if (err)
+ return err;
+
+ return nla_put(msg, MT7925_TM_ATTR_RSP,
+ sizeof(evt_resp), evt_resp);
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index 38dd58f6e493..a50c1723ca29 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -283,7 +283,7 @@ EXPORT_SYMBOL_GPL(mt792x_tx_worker);
void mt792x_roc_timer(struct timer_list *timer)
{
- struct mt792x_phy *phy = from_timer(phy, timer, roc_timer);
+ struct mt792x_phy *phy = timer_container_of(phy, timer, roc_timer);
ieee80211_queue_work(phy->mt76->hw, &phy->roc_work);
}
@@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(mt792x_roc_timer);
void mt792x_csa_timer(struct timer_list *timer)
{
- struct mt792x_vif *mvif = from_timer(mvif, timer, csa_timer);
+ struct mt792x_vif *mvif = timer_container_of(mvif, timer, csa_timer);
ieee80211_queue_work(mvif->phy->mt76->hw, &mvif->csa_work);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/coredump.c b/drivers/net/wireless/mediatek/mt76/mt7996/coredump.c
index ccab0d7b9be4..303d6e80a666 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/coredump.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/coredump.c
@@ -48,8 +48,8 @@ const struct mt7996_mem_region*
mt7996_coredump_get_mem_layout(struct mt7996_dev *dev, u32 *num)
{
switch (mt76_chip(&dev->mt76)) {
- case 0x7990:
- case 0x7991:
+ case MT7996_DEVICE_ID:
+ case MT7996_DEVICE_ID_2:
*num = ARRAY_SIZE(mt7996_mem_regions);
return &mt7996_mem_regions[0];
default:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
index 4a28db17a287..0ab827f52fd7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
@@ -222,18 +222,27 @@ static const struct file_operations mt7996_sys_recovery_ops = {
static int
mt7996_radar_trigger(void *data, u64 val)
{
+#define RADAR_MAIN_CHAIN 1
+#define RADAR_BACKGROUND 2
struct mt7996_dev *dev = data;
+ struct mt7996_phy *phy = mt7996_band_phy(dev, NL80211_BAND_5GHZ);
+ int rdd_idx;
- if (val > MT_RX_SEL2)
+ if (!phy || !val || val > RADAR_BACKGROUND)
return -EINVAL;
- if (val == MT_RX_SEL2 && !dev->rdd2_phy) {
+ if (val == RADAR_BACKGROUND && !dev->rdd2_phy) {
dev_err(dev->mt76.dev, "Background radar is not enabled\n");
return -EINVAL;
}
- return mt7996_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE,
- val, 0, 0);
+ rdd_idx = mt7996_get_rdd_idx(phy, val == RADAR_BACKGROUND);
+ if (rdd_idx < 0) {
+ dev_err(dev->mt76.dev, "No RDD found\n");
+ return -EINVAL;
+ }
+
+ return mt7996_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE, rdd_idx, 0);
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
index 69a7d9b2e38b..c8bef0b2a144 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
@@ -55,20 +55,32 @@ static void mt7996_dma_config(struct mt7996_dev *dev)
/* rx queue */
RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7996_RXQ_MCU_WM);
+ /* for mt7990, RX ring 1 is for SDO instead */
RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7996_RXQ_MCU_WA);
-
- /* mt7996: band0 and band1, mt7992: band0 */
RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7996_RXQ_BAND0);
- RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN, MT7996_RXQ_MCU_WA_MAIN);
+ if (mt7996_has_wa(dev))
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN,
+ MT7996_RXQ_MCU_WA_MAIN);
- if (is_mt7996(&dev->mt76)) {
+ switch (mt76_chip(&dev->mt76)) {
+ case MT7992_DEVICE_ID:
+ RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT, MT7996_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
+ break;
+ case MT7990_DEVICE_ID:
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0,
+ MT_INT_RX_TXFREE_BAND0_MT7990, MT7990_RXQ_TXFREE0);
+ if (dev->hif2)
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND1, WFDMA0,
+ MT_INT_RX_TXFREE_BAND1_MT7990, MT7990_RXQ_TXFREE1);
+ break;
+ case MT7996_DEVICE_ID:
+ default:
/* mt7996 band2 */
- RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI);
- } else {
- /* mt7992 band1 */
- RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
- RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT, MT7996_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
+ break;
}
if (dev->has_rro) {
@@ -104,9 +116,11 @@ static void mt7996_dma_config(struct mt7996_dev *dev)
}
/* mcu tx queue */
- MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM);
- MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA, MT7996_TXQ_MCU_WA);
MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7996_TXQ_FWDL);
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM);
+ if (mt7996_has_wa(dev))
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA,
+ MT7996_TXQ_MCU_WA);
}
static u32 __mt7996_dma_prefetch_base(u16 *base, u8 depth)
@@ -121,43 +135,62 @@ static u32 __mt7996_dma_prefetch_base(u16 *base, u8 depth)
static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
{
u16 base = 0;
- u8 queue;
+ u8 queue, val;
#define PREFETCH(_depth) (__mt7996_dma_prefetch_base(&base, (_depth)))
/* prefetch SRAM wrapping boundary for tx/rx ring. */
- mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x2));
- mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x2));
+ /* Tx Command Rings */
+ val = is_mt7996(&dev->mt76) ? 2 : 4;
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(val));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(val));
+ if (mt7996_has_wa(dev))
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(val));
+
+ /* Tx Data Rings */
mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x8));
- mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x8));
- mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x2));
- mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0x8));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x2));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x2));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x2));
-
- queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2_WA : MT_RXQ_BAND1_WA;
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(queue) + ofs, PREFETCH(0x2));
-
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10));
+ if (!is_mt7996(&dev->mt76) || dev->hif2)
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x8));
+ if (is_mt7996(&dev->mt76))
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0x8));
+
+ /* Rx Event Rings */
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(val));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(val));
+
+ /* Rx TxFreeDone From WA Rings */
+ if (mt7996_has_wa(dev)) {
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(val));
+ queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2_WA : MT_RXQ_BAND1_WA;
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(val));
+ }
+ /* Rx TxFreeDone From MAC Rings */
+ val = is_mt7996(&dev->mt76) ? 4 : 8;
+ if (is_mt7990(&dev->mt76) || (is_mt7996(&dev->mt76) && dev->has_rro))
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, PREFETCH(val));
+ if (is_mt7990(&dev->mt76) && dev->hif2)
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND1) + ofs, PREFETCH(val));
+ else if (is_mt7996(&dev->mt76) && dev->has_rro)
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, PREFETCH(val));
+
+ /* Rx Data Rings */
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10));
queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2 : MT_RXQ_BAND1;
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(queue) + ofs, PREFETCH(0x10));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
+ /* Rx RRO Rings */
if (dev->has_rro) {
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND0) + ofs,
- PREFETCH(0x10));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND2) + ofs,
- PREFETCH(0x10));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs,
- PREFETCH(0x4));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs,
- PREFETCH(0x4));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs,
- PREFETCH(0x4));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND0) + ofs,
- PREFETCH(0x4));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND2) + ofs,
- PREFETCH(0x4));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_RRO_BAND0) + ofs, PREFETCH(0x10));
+ queue = is_mt7996(&dev->mt76) ? MT_RXQ_RRO_BAND2 : MT_RXQ_RRO_BAND1;
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
+
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs, PREFETCH(val));
+ if (is_mt7996(&dev->mt76)) {
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs,
+ PREFETCH(val));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs,
+ PREFETCH(val));
+ }
}
#undef PREFETCH
@@ -269,6 +302,9 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
mtk_wed_device_start(wed, wed_irq_mask);
}
+ if (!mt7996_has_wa(dev))
+ irq_mask &= ~(MT_INT_RX(MT_RXQ_MAIN_WA) |
+ MT_INT_RX(MT_RXQ_BAND1_WA));
irq_mask = reset ? MT_INT_MCU_CMD : irq_mask;
mt7996_irq_enable(dev, irq_mask);
@@ -474,12 +510,14 @@ int mt7996_dma_init(struct mt7996_dev *dev)
return ret;
/* command to WA */
- ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
- MT_MCUQ_ID(MT_MCUQ_WA),
- MT7996_TX_MCU_RING_SIZE,
- MT_MCUQ_RING_BASE(MT_MCUQ_WA));
- if (ret)
- return ret;
+ if (mt7996_has_wa(dev)) {
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
+ MT_MCUQ_ID(MT_MCUQ_WA),
+ MT7996_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WA));
+ if (ret)
+ return ret;
+ }
/* firmware download */
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
@@ -493,16 +531,16 @@ int mt7996_dma_init(struct mt7996_dev *dev)
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
MT_RXQ_ID(MT_RXQ_MCU),
MT7996_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE,
+ MT7996_RX_MCU_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MCU));
if (ret)
return ret;
- /* event from WA */
+ /* event from WA, or SDO event for mt7990 */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
MT_RXQ_ID(MT_RXQ_MCU_WA),
MT7996_RX_MCU_RING_SIZE_WA,
- MT_RX_BUF_SIZE,
+ MT7996_RX_MCU_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
if (ret)
return ret;
@@ -527,13 +565,41 @@ int mt7996_dma_init(struct mt7996_dev *dev)
dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed;
}
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
- MT_RXQ_ID(MT_RXQ_MAIN_WA),
- MT7996_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE,
- MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
- if (ret)
- return ret;
+ if (mt7996_has_wa(dev)) {
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
+ MT_RXQ_ID(MT_RXQ_MAIN_WA),
+ MT7996_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
+ if (ret)
+ return ret;
+ } else {
+ if (mtk_wed_device_active(wed)) {
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
+ }
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
+ MT7996_RX_MCU_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
+ if (ret)
+ return ret;
+ }
+
+ if (!mt7996_has_wa(dev) && dev->hif2) {
+ if (mtk_wed_device_active(wed)) {
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].flags = MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].wed = wed;
+ }
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1],
+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND1),
+ MT7996_RX_MCU_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND1));
+ if (ret)
+ return ret;
+ }
if (mt7996_band_valid(dev, MT_BAND2)) {
/* rx data queue for mt7996 band2 */
@@ -573,14 +639,16 @@ int mt7996_dma_init(struct mt7996_dev *dev)
return ret;
/* tx free notify event from WA for mt7992 band1 */
- rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs;
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
- MT_RXQ_ID(MT_RXQ_BAND1_WA),
- MT7996_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE,
- rx_base);
- if (ret)
- return ret;
+ if (mt7996_has_wa(dev)) {
+ rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs;
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
+ MT_RXQ_ID(MT_RXQ_BAND1_WA),
+ MT7996_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ rx_base);
+ if (ret)
+ return ret;
+ }
}
if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) &&
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
index 53dfac02f8af..87c6192b6384 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
@@ -13,10 +13,12 @@ static int mt7996_check_eeprom(struct mt7996_dev *dev)
u16 val = get_unaligned_le16(eeprom);
switch (val) {
- case 0x7990:
+ case MT7996_DEVICE_ID:
return is_mt7996(&dev->mt76) ? 0 : -EINVAL;
- case 0x7992:
+ case MT7992_DEVICE_ID:
return is_mt7992(&dev->mt76) ? 0 : -EINVAL;
+ case MT7990_DEVICE_ID:
+ return is_mt7990(&dev->mt76) ? 0 : -EINVAL;
default:
return -EINVAL;
}
@@ -25,7 +27,7 @@ static int mt7996_check_eeprom(struct mt7996_dev *dev)
static char *mt7996_eeprom_name(struct mt7996_dev *dev)
{
switch (mt76_chip(&dev->mt76)) {
- case 0x7992:
+ case MT7992_DEVICE_ID:
switch (dev->var.type) {
case MT7992_VAR_TYPE_23:
if (dev->var.fem == MT7996_FEM_INT)
@@ -39,7 +41,11 @@ static char *mt7996_eeprom_name(struct mt7996_dev *dev)
return MT7992_EEPROM_DEFAULT_MIX;
return MT7992_EEPROM_DEFAULT;
}
- case 0x7990:
+ case MT7990_DEVICE_ID:
+ if (dev->var.fem == MT7996_FEM_INT)
+ return MT7990_EEPROM_DEFAULT_INT;
+ return MT7990_EEPROM_DEFAULT;
+ case MT7996_DEVICE_ID:
default:
switch (dev->var.type) {
case MT7996_VAR_TYPE_233:
@@ -304,6 +310,7 @@ int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy)
phy->has_aux_rx = true;
mphy->antenna_mask = BIT(nss) - 1;
+ phy->orig_antenna_mask = mphy->antenna_mask;
mphy->chainmask = (BIT(path) - 1) << dev->chainshift[band_idx];
phy->orig_chainmask = mphy->chainmask;
dev->chainmask |= mphy->chainmask;
@@ -370,3 +377,30 @@ s8 mt7996_eeprom_get_power_delta(struct mt7996_dev *dev, int band)
return val & MT_EE_RATE_DELTA_SIGN ? delta : -delta;
}
+
+bool mt7996_eeprom_has_background_radar(struct mt7996_dev *dev)
+{
+ switch (mt76_chip(&dev->mt76)) {
+ case MT7996_DEVICE_ID:
+ if (dev->var.type == MT7996_VAR_TYPE_233)
+ return false;
+ break;
+ case MT7992_DEVICE_ID:
+ if (dev->var.type == MT7992_VAR_TYPE_23)
+ return false;
+ break;
+ case MT7990_DEVICE_ID: {
+ u8 path, rx_path, nss, *eeprom = dev->mt76.eeprom.data;
+
+ mt7996_eeprom_parse_stream(eeprom, MT_BAND1, &path, &rx_path, &nss);
+ /* Disable background radar capability in 3T3R */
+ if (path == 3 || rx_path == 3)
+ return false;
+ break;
+ }
+ default:
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index 6b660424aedc..a9599c286328 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -217,6 +217,9 @@ static int mt7996_thermal_init(struct mt7996_phy *phy)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7996_%s.%d",
wiphy_name(wiphy), phy->mt76->band_idx);
+ if (!name)
+ return -ENOMEM;
+
snprintf(cname, sizeof(cname), "cooling_device%d", phy->mt76->band_idx);
cdev = thermal_cooling_device_register(name, phy, &mt7996_thermal_ops);
@@ -314,8 +317,8 @@ static void __mt7996_init_txpower(struct mt7996_phy *phy,
struct ieee80211_supported_band *sband)
{
struct mt7996_dev *dev = phy->dev;
- int i, nss = hweight16(phy->mt76->chainmask);
- int nss_delta = mt76_tx_power_nss_delta(nss);
+ int i, n_chains = hweight16(phy->mt76->chainmask);
+ int path_delta = mt76_tx_power_path_delta(n_chains);
int pwr_delta = mt7996_eeprom_get_power_delta(dev, sband->band);
struct mt76_power_limits limits;
@@ -327,7 +330,7 @@ static void __mt7996_init_txpower(struct mt7996_phy *phy,
target_power = mt76_get_rate_power_limits(phy->mt76, chan,
&limits,
target_power);
- target_power += nss_delta;
+ target_power += path_delta;
target_power = DIV_ROUND_UP(target_power, 2);
chan->max_power = min_t(int, chan->max_reg_power,
target_power);
@@ -440,6 +443,9 @@ mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed)
hw->queues = 4;
hw->max_rx_aggregation_subframes = max_subframes;
hw->max_tx_aggregation_subframes = max_subframes;
+ if (is_mt7990(mdev) && dev->has_eht)
+ hw->max_tx_aggregation_subframes = 512;
+
hw->netdev_features = NETIF_F_RXCSUM;
if (mtk_wed_device_active(wed))
hw->netdev_features |= NETIF_F_HW_TC;
@@ -472,7 +478,7 @@ mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
- if (mt7996_has_background_radar(dev) &&
+ if (mt7996_eeprom_has_background_radar(dev) &&
(!mdev->dev->of_node ||
!of_property_read_bool(mdev->dev->of_node,
"mediatek,disable-radar-background")))
@@ -929,13 +935,13 @@ static int mt7996_variant_type_init(struct mt7996_dev *dev)
u8 var_type;
switch (mt76_chip(&dev->mt76)) {
- case 0x7990:
+ case MT7996_DEVICE_ID:
if (val & MT_PAD_GPIO_2ADIE_TBTC)
var_type = MT7996_VAR_TYPE_233;
else
var_type = MT7996_VAR_TYPE_444;
break;
- case 0x7992:
+ case MT7992_DEVICE_ID:
if (val & MT_PAD_GPIO_ADIE_SINGLE)
var_type = MT7992_VAR_TYPE_23;
else if (u32_get_bits(val, MT_PAD_GPIO_ADIE_COMB_7992))
@@ -943,6 +949,9 @@ static int mt7996_variant_type_init(struct mt7996_dev *dev)
else
return -EINVAL;
break;
+ case MT7990_DEVICE_ID:
+ var_type = MT7990_VAR_TYPE_23;
+ break;
default:
return -EINVAL;
}
@@ -1061,10 +1070,10 @@ void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy)
*cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
- if (is_mt7996(phy->mt76->dev))
- *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 3);
- else
+ if (is_mt7992(phy->mt76->dev))
*cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 4);
+ else
+ *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 3);
*cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
@@ -1107,18 +1116,17 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
elem->phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
c = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
- IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
- IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO;
elem->phy_cap_info[2] |= c;
c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE;
- if (is_mt7996(phy->mt76->dev))
- c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
- (IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 * non_2g);
- else
+ if (is_mt7992(phy->mt76->dev))
c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 |
(IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 * non_2g);
+ else
+ c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
+ (IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 * non_2g);
elem->phy_cap_info[4] |= c;
@@ -1318,6 +1326,9 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454,
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
+ eht_cap_elem->mac_cap_info[1] |=
+ IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK;
+
eht_cap_elem->phy_cap_info[0] =
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index d89c06f47997..0dbd4662bc84 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -647,6 +647,14 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
}
+ /* IEEE 802.11 fragmentation can only be applied to unicast frames.
+ * Hence, drop fragments with multicast/broadcast RA.
+ * This check fixes vulnerabilities, like CVE-2020-26145.
+ */
+ if ((ieee80211_has_morefrags(fc) || seq_ctrl & IEEE80211_SCTL_FRAG) &&
+ FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) != MT_RXD3_NORMAL_U2M)
+ return -EINVAL;
+
hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
if (hdr_trans && ieee80211_has_morefrags(fc)) {
if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap))
@@ -789,10 +797,13 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
if (ieee80211_is_action(fc) &&
mgmt->u.action.category == WLAN_CATEGORY_BACK &&
- mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
+ if (is_mt7990(&dev->mt76))
+ txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TID_ADDBA, tid));
tid = MT_TX_ADDBA;
- else if (ieee80211_is_mgmt(hdr->frame_control))
+ } else if (ieee80211_is_mgmt(hdr->frame_control)) {
tid = MT_TX_NORMAL;
+ }
val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
FIELD_PREP(MT_TXD1_HDR_INFO,
@@ -987,12 +998,32 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
}
}
+static bool
+mt7996_tx_use_mgmt(struct mt7996_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ return true;
+
+ /* for SDO to bypass specific data frame */
+ if (!mt7996_has_wa(dev)) {
+ if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
+ return true;
+
+ if (ieee80211_has_a4(hdr->frame_control) &&
+ !ieee80211_is_data_present(hdr->frame_control))
+ return true;
+ }
+
+ return false;
+}
+
int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
@@ -1017,8 +1048,11 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return id;
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
- mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
- pid, qid, 0);
+ memset(txwi_ptr, 0, MT_TXD_SIZE);
+ /* Transmit non qos data by 802.11 header and need to fill txd by host*/
+ if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
+ mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
+ pid, qid, 0);
txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
for (i = 0; i < nbuf; i++) {
@@ -1035,13 +1069,15 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
}
txp->fw.nbuf = nbuf;
- txp->fw.flags =
- cpu_to_le16(MT_CT_INFO_FROM_HOST | MT_CT_INFO_APPLY_TXD);
+ txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
+
+ if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
if (!key)
txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
- if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control))
+ if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb))
txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
if (vif) {
@@ -1179,6 +1215,7 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
void *end = data + len;
bool wake = false;
u16 total, count = 0;
+ u8 ver;
/* clean DMA queues and unmap buffers first */
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
@@ -1192,7 +1229,8 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false);
}
- if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 5))
+ ver = le32_get_bits(tx_free[1], MT_TXFREE1_VER);
+ if (WARN_ON_ONCE(ver < 5))
return;
total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
@@ -1214,11 +1252,16 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
wcid = rcu_dereference(dev->mt76.wcid[idx]);
sta = wcid_to_sta(wcid);
if (!sta)
- continue;
+ goto next;
msta_link = container_of(wcid, struct mt7996_sta_link,
wcid);
mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
+next:
+ /* ver 7 has a new DW with pair = 1, skip it */
+ if (ver == 7 && ((void *)(cur_info + 1) < end) &&
+ (le32_to_cpu(*(cur_info + 1)) & MT_TXFREE_INFO_PAIR))
+ cur_info++;
continue;
} else if (info & MT_TXFREE_INFO_HEADER) {
u32 tx_retries = 0, tx_failed = 0;
@@ -2411,16 +2454,15 @@ void mt7996_mac_work(struct work_struct *work)
static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy)
{
struct mt7996_dev *dev = phy->dev;
+ int rdd_idx = mt7996_get_rdd_idx(phy, false);
+
+ if (rdd_idx < 0)
+ return;
- if (phy->rdd_state & BIT(0))
- mt7996_mcu_rdd_cmd(dev, RDD_STOP, 0,
- MT_RX_SEL0, 0);
- if (phy->rdd_state & BIT(1))
- mt7996_mcu_rdd_cmd(dev, RDD_STOP, 1,
- MT_RX_SEL0, 0);
+ mt7996_mcu_rdd_cmd(dev, RDD_STOP, rdd_idx, 0);
}
-static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain)
+static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int rdd_idx)
{
int err, region;
@@ -2437,44 +2479,30 @@ static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain)
break;
}
- err = mt7996_mcu_rdd_cmd(dev, RDD_START, chain,
- MT_RX_SEL0, region);
+ err = mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region);
if (err < 0)
return err;
- return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
- MT_RX_SEL0, 1);
+ return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, rdd_idx, 1);
}
static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy)
{
- struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7996_dev *dev = phy->dev;
- u8 band_idx = phy->mt76->band_idx;
- int err;
+ int err, rdd_idx;
- /* start CAC */
- err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, band_idx,
- MT_RX_SEL0, 0);
- if (err < 0)
- return err;
+ rdd_idx = mt7996_get_rdd_idx(phy, false);
+ if (rdd_idx < 0)
+ return -EINVAL;
- err = mt7996_dfs_start_rdd(dev, band_idx);
+ /* start CAC */
+ err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, rdd_idx, 0);
if (err < 0)
return err;
- phy->rdd_state |= BIT(band_idx);
-
- if (chandef->width == NL80211_CHAN_WIDTH_160 ||
- chandef->width == NL80211_CHAN_WIDTH_80P80) {
- err = mt7996_dfs_start_rdd(dev, 1);
- if (err < 0)
- return err;
-
- phy->rdd_state |= BIT(1);
- }
+ err = mt7996_dfs_start_rdd(dev, rdd_idx);
- return 0;
+ return err;
}
static int
@@ -2515,12 +2543,12 @@ int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
{
struct mt7996_dev *dev = phy->dev;
enum mt76_dfs_state dfs_state, prev_state;
- int err;
+ int err, rdd_idx = mt7996_get_rdd_idx(phy, false);
prev_state = phy->mt76->dfs_state;
dfs_state = mt76_phy_dfs_state(phy->mt76);
- if (prev_state == dfs_state)
+ if (prev_state == dfs_state || rdd_idx < 0)
return 0;
if (prev_state == MT_DFS_STATE_UNKNOWN)
@@ -2544,8 +2572,7 @@ int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
if (dfs_state == MT_DFS_STATE_CAC)
return 0;
- err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END,
- phy->mt76->band_idx, MT_RX_SEL0, 0);
+ err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, rdd_idx, 0);
if (err < 0) {
phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
return err;
@@ -2555,8 +2582,7 @@ int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
return 0;
stop:
- err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START,
- phy->mt76->band_idx, MT_RX_SEL0, 0);
+ err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, rdd_idx, 0);
if (err < 0)
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 91c64e3a0860..78ae9f5cb176 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -68,11 +68,13 @@ static int mt7996_start(struct ieee80211_hw *hw)
static void mt7996_stop_phy(struct mt7996_phy *phy)
{
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_dev *dev;
if (!phy || !test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
return;
+ dev = phy->dev;
+
cancel_delayed_work_sync(&phy->mt76->mac_work);
mutex_lock(&dev->mt76.mutex);
@@ -414,11 +416,13 @@ static void mt7996_phy_set_rxfilter(struct mt7996_phy *phy)
static void mt7996_set_monitor(struct mt7996_phy *phy, bool enabled)
{
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_dev *dev;
if (!phy)
return;
+ dev = phy->dev;
+
if (enabled == !(phy->rxfilter & MT_WF_RFCR_DROP_OTHER_UC))
return;
@@ -684,7 +688,7 @@ mt7996_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
n_chains = hweight16(phy->mt76->chainmask);
- delta = mt76_tx_power_nss_delta(n_chains);
+ delta = mt76_tx_power_path_delta(n_chains);
*dbm = DIV_ROUND_UP(phy->mt76->txpower_cur + delta, 2);
return 0;
@@ -987,7 +991,7 @@ mt7996_mac_sta_add_links(struct mt7996_dev *dev, struct ieee80211_vif *vif,
{
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
unsigned int link_id;
- int err;
+ int err = 0;
for_each_set_bit(link_id, &new_links, IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_bss_conf *link_conf;
@@ -998,16 +1002,22 @@ mt7996_mac_sta_add_links(struct mt7996_dev *dev, struct ieee80211_vif *vif,
continue;
link_conf = link_conf_dereference_protected(vif, link_id);
- if (!link_conf)
+ if (!link_conf) {
+ err = -EINVAL;
goto error_unlink;
+ }
link = mt7996_vif_link(dev, vif, link_id);
- if (!link)
+ if (!link) {
+ err = -EINVAL;
goto error_unlink;
+ }
link_sta = link_sta_dereference_protected(sta, link_id);
- if (!link_sta)
+ if (!link_sta) {
+ err = -EINVAL;
goto error_unlink;
+ }
err = mt7996_mac_sta_init_link(dev, link_conf, link_sta, link,
link_id);
@@ -1244,7 +1254,7 @@ unlock:
static int mt7996_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- int i, ret;
+ int i, ret = 0;
mutex_lock(&dev->mt76.mutex);
@@ -1518,7 +1528,8 @@ mt7996_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
u8 shift = dev->chainshift[band_idx];
phy->mt76->chainmask = tx_ant & phy->orig_chainmask;
- phy->mt76->antenna_mask = phy->mt76->chainmask >> shift;
+ phy->mt76->antenna_mask = (phy->mt76->chainmask >> shift) &
+ phy->orig_antenna_mask;
mt76_set_stream_caps(phy->mt76, true);
mt7996_set_stream_vht_txbf_caps(phy);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index ddd555942c73..f0adc0b4b8b6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -13,7 +13,7 @@
#define fw_name(_dev, name, ...) ({ \
char *_fw; \
switch (mt76_chip(&(_dev)->mt76)) { \
- case 0x7992: \
+ case MT7992_DEVICE_ID: \
switch ((_dev)->var.type) { \
case MT7992_VAR_TYPE_23: \
_fw = MT7992_##name##_23; \
@@ -22,7 +22,10 @@
_fw = MT7992_##name; \
} \
break; \
- case 0x7990: \
+ case MT7990_DEVICE_ID: \
+ _fw = MT7990_##name; \
+ break; \
+ case MT7996_DEVICE_ID: \
default: \
switch ((_dev)->var.type) { \
case MT7996_VAR_TYPE_233: \
@@ -265,7 +268,7 @@ mt7996_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd);
txd = (__le32 *)skb_push(skb, txd_len);
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state) && mt7996_has_wa(dev))
qid = MT_MCUQ_WA;
else
qid = MT_MCUQ_WM;
@@ -335,8 +338,12 @@ exit:
int mt7996_mcu_wa_cmd(struct mt7996_dev *dev, int cmd, u32 a1, u32 a2, u32 a3)
{
struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
__le32 args[3];
- } req = {
+ } __packed req = {
.args = {
cpu_to_le32(a1),
cpu_to_le32(a2),
@@ -344,7 +351,16 @@ int mt7996_mcu_wa_cmd(struct mt7996_dev *dev, int cmd, u32 a1, u32 a2, u32 a3)
},
};
- return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), false);
+ if (mt7996_has_wa(dev))
+ return mt76_mcu_send_msg(&dev->mt76, cmd, &req.args,
+ sizeof(req.args), false);
+
+ req.tag = cpu_to_le16(cmd == MCU_WA_PARAM_CMD(QUERY) ? UNI_CMD_SDO_QUERY :
+ UNI_CMD_SDO_SET);
+ req.len = cpu_to_le16(sizeof(req) - 4);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WA_UNI_CMD(SDO), &req,
+ sizeof(req), false);
}
static void
@@ -364,21 +380,27 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb)
r = (struct mt7996_mcu_rdd_report *)skb->data;
- if (r->band_idx >= ARRAY_SIZE(dev->mt76.phys))
- return;
-
- if (r->band_idx == MT_RX_SEL2 && !dev->rdd2_phy)
- return;
-
- if (r->band_idx == MT_RX_SEL2)
+ switch (r->rdd_idx) {
+ case MT_RDD_IDX_BAND2:
+ mphy = dev->mt76.phys[MT_BAND2];
+ break;
+ case MT_RDD_IDX_BAND1:
+ mphy = dev->mt76.phys[MT_BAND1];
+ break;
+ case MT_RDD_IDX_BACKGROUND:
+ if (!dev->rdd2_phy)
+ return;
mphy = dev->rdd2_phy->mt76;
- else
- mphy = dev->mt76.phys[r->band_idx];
+ break;
+ default:
+ dev_err(dev->mt76.dev, "Unknown RDD idx %d\n", r->rdd_idx);
+ return;
+ }
if (!mphy)
return;
- if (r->band_idx == MT_RX_SEL2)
+ if (r->rdd_idx == MT_RDD_IDX_BACKGROUND)
cfg80211_background_radar_event(mphy->hw->wiphy,
&dev->rdd2_chandef,
GFP_ATOMIC);
@@ -2242,9 +2264,6 @@ mt7996_mcu_sta_mld_setup_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
if (!link)
continue;
- if (!msta_link)
- continue;
-
mld_setup_link->wcid = cpu_to_le16(msta_link->wcid.idx);
mld_setup_link->bss_idx = link->mt76.idx;
mld_setup_link++;
@@ -3011,6 +3030,9 @@ static int mt7996_load_ram(struct mt7996_dev *dev)
if (ret)
return ret;
+ if (!mt7996_has_wa(dev))
+ return 0;
+
ret = __mt7996_load_ram(dev, "DSP", fw_name(dev, FIRMWARE_DSP),
MT7996_RAM_TYPE_DSP);
if (ret)
@@ -3021,10 +3043,9 @@ static int mt7996_load_ram(struct mt7996_dev *dev)
}
static int
-mt7996_firmware_state(struct mt7996_dev *dev, bool wa)
+mt7996_firmware_state(struct mt7996_dev *dev, u8 fw_state)
{
- u32 state = FIELD_PREP(MT_TOP_MISC_FW_STATE,
- wa ? FW_STATE_RDY : FW_STATE_FW_DOWNLOAD);
+ u32 state = FIELD_PREP(MT_TOP_MISC_FW_STATE, fw_state);
if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
state, 1000)) {
@@ -3056,13 +3077,14 @@ mt7996_mcu_restart(struct mt76_dev *dev)
static int mt7996_load_firmware(struct mt7996_dev *dev)
{
+ u8 fw_state;
int ret;
/* make sure fw is download state */
- if (mt7996_firmware_state(dev, false)) {
+ if (mt7996_firmware_state(dev, FW_STATE_FW_DOWNLOAD)) {
/* restart firmware once */
mt7996_mcu_restart(&dev->mt76);
- ret = mt7996_firmware_state(dev, false);
+ ret = mt7996_firmware_state(dev, FW_STATE_FW_DOWNLOAD);
if (ret) {
dev_err(dev->mt76.dev,
"Firmware is not ready for download\n");
@@ -3078,7 +3100,8 @@ static int mt7996_load_firmware(struct mt7996_dev *dev)
if (ret)
return ret;
- ret = mt7996_firmware_state(dev, true);
+ fw_state = mt7996_has_wa(dev) ? FW_STATE_RDY : FW_STATE_NORMAL_TRX;
+ ret = mt7996_firmware_state(dev, fw_state);
if (ret)
return ret;
@@ -3216,13 +3239,15 @@ int mt7996_mcu_init_firmware(struct mt7996_dev *dev)
if (ret)
return ret;
- ret = mt7996_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, 0);
- if (ret)
- return ret;
+ if (mt7996_has_wa(dev)) {
+ ret = mt7996_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, 0);
+ if (ret)
+ return ret;
- ret = mt7996_mcu_set_mwds(dev, 1);
- if (ret)
- return ret;
+ ret = mt7996_mcu_set_mwds(dev, 1);
+ if (ret)
+ return ret;
+ }
ret = mt7996_mcu_init_rx_airtime(dev);
if (ret)
@@ -3248,7 +3273,7 @@ int mt7996_mcu_init(struct mt7996_dev *dev)
void mt7996_mcu_exit(struct mt7996_dev *dev)
{
mt7996_mcu_restart(&dev->mt76);
- if (mt7996_firmware_state(dev, false)) {
+ if (mt7996_firmware_state(dev, FW_STATE_FW_DOWNLOAD)) {
dev_err(dev->mt76.dev, "Failed to exit mcu\n");
goto out;
}
@@ -3535,11 +3560,10 @@ int mt7996_mcu_rdd_background_enable(struct mt7996_phy *phy,
struct cfg80211_chan_def *chandef)
{
struct mt7996_dev *dev = phy->dev;
- int err, region;
+ int err, region, rdd_idx = mt7996_get_rdd_idx(phy, true);
if (!chandef) { /* disable offchain */
- err = mt7996_mcu_rdd_cmd(dev, RDD_STOP, MT_RX_SEL2,
- 0, 0);
+ err = mt7996_mcu_rdd_cmd(dev, RDD_STOP, rdd_idx, 0);
if (err)
return err;
@@ -3565,8 +3589,7 @@ int mt7996_mcu_rdd_background_enable(struct mt7996_phy *phy,
break;
}
- return mt7996_mcu_rdd_cmd(dev, RDD_START, MT_RX_SEL2,
- 0, region);
+ return mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region);
}
int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag)
@@ -4433,8 +4456,7 @@ int mt7996_mcu_set_radio_en(struct mt7996_phy *phy, bool enable)
&req, sizeof(req), true);
}
-int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 index,
- u8 rx_sel, u8 val)
+int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 rdd_idx, u8 val)
{
struct {
u8 _rsv[4];
@@ -4451,8 +4473,7 @@ int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 index,
.tag = cpu_to_le16(UNI_RDD_CTRL_PARM),
.len = cpu_to_le16(sizeof(req) - 4),
.ctrl = cmd,
- .rdd_idx = index,
- .rdd_rx_sel = rx_sel,
+ .rdd_idx = rdd_idx,
.val = val,
};
@@ -4742,7 +4763,26 @@ int mt7996_mcu_cp_support(struct mt7996_dev *dev, u8 mode)
mode > mt76_connac_lmac_mapping(IEEE80211_AC_VO))
return -EINVAL;
+ if (!mt7996_has_wa(dev)) {
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+ u8 cp_mode;
+ u8 rsv[3];
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_CMD_SDO_CP_MODE),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .cp_mode = mode,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WA_UNI_CMD(SDO),
+ &req, sizeof(req), false);
+ }
+
cp_mode = cpu_to_le32(mode);
+
return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(CP_SUPPORT),
&cp_mode, sizeof(cp_mode), true);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
index 2ab6a53bee86..130ea95626d5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -69,7 +69,7 @@ struct mt7996_mcu_rdd_report {
__le16 tag;
__le16 len;
- u8 band_idx;
+ u8 rdd_idx;
u8 long_detected;
u8 constant_prf_detected;
u8 staggered_prf_detected;
@@ -832,13 +832,13 @@ enum {
sizeof(struct sta_rec_eht_mld) + \
sizeof(struct tlv))
-#define MT7996_MAX_BEACON_SIZE 1338
#define MT7996_BEACON_UPDATE_SIZE (sizeof(struct bss_req_hdr) + \
sizeof(struct bss_bcn_content_tlv) + \
4 + MT_TXD_SIZE + \
sizeof(struct bss_bcn_cntdwn_tlv) + \
sizeof(struct bss_bcn_mbss_tlv))
-#define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \
+#define MT7996_MAX_BSS_OFFLOAD_SIZE 2048
+#define MT7996_MAX_BEACON_SIZE (MT7996_MAX_BSS_OFFLOAD_SIZE - \
MT7996_BEACON_UPDATE_SIZE)
enum {
@@ -938,6 +938,12 @@ enum {
};
enum {
+ UNI_CMD_SDO_SET = 1,
+ UNI_CMD_SDO_QUERY,
+ UNI_CMD_SDO_CP_MODE = 6,
+};
+
+enum {
MT7996_SEC_MODE_PLAIN,
MT7996_SEC_MODE_AES,
MT7996_SEC_MODE_SCRAMBLE,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 13b188e281bd..30b40f4a91be 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -54,6 +54,17 @@ static const u32 mt7996_offs[] = {
[MIB_BSCR7] = 0x9e8,
[MIB_BSCR17] = 0xa10,
[MIB_TRDR1] = 0xa28,
+ [HIF_REMAP_L1] = 0x24,
+ [HIF_REMAP_BASE_L1] = 0x130000,
+ [HIF_REMAP_L2] = 0x1b4,
+ [HIF_REMAP_BASE_L2] = 0x1000,
+ [CBTOP1_PHY_END] = 0x77ffffff,
+ [INFRA_MCU_END] = 0x7c3fffff,
+ [WTBLON_WDUCR] = 0x370,
+ [WTBL_UPDATE] = 0x380,
+ [WTBL_ITCR] = 0x3b0,
+ [WTBL_ITCR0] = 0x3b8,
+ [WTBL_ITCR1] = 0x3bc,
};
static const u32 mt7992_offs[] = {
@@ -80,6 +91,54 @@ static const u32 mt7992_offs[] = {
[MIB_BSCR7] = 0xae4,
[MIB_BSCR17] = 0xb0c,
[MIB_TRDR1] = 0xb24,
+ [HIF_REMAP_L1] = 0x8,
+ [HIF_REMAP_BASE_L1] = 0x40000,
+ [HIF_REMAP_L2] = 0x1b4,
+ [HIF_REMAP_BASE_L2] = 0x1000,
+ [CBTOP1_PHY_END] = 0x77ffffff,
+ [INFRA_MCU_END] = 0x7c3fffff,
+ [WTBLON_WDUCR] = 0x370,
+ [WTBL_UPDATE] = 0x380,
+ [WTBL_ITCR] = 0x3b0,
+ [WTBL_ITCR0] = 0x3b8,
+ [WTBL_ITCR1] = 0x3bc,
+};
+
+static const u32 mt7990_offs[] = {
+ [MIB_RVSR0] = 0x800,
+ [MIB_RVSR1] = 0x804,
+ [MIB_BTSCR5] = 0x868,
+ [MIB_BTSCR6] = 0x878,
+ [MIB_RSCR1] = 0x890,
+ [MIB_RSCR27] = 0xa38,
+ [MIB_RSCR28] = 0xa3c,
+ [MIB_RSCR29] = 0xa40,
+ [MIB_RSCR30] = 0xa44,
+ [MIB_RSCR31] = 0xa48,
+ [MIB_RSCR33] = 0xa50,
+ [MIB_RSCR35] = 0xa58,
+ [MIB_RSCR36] = 0xa5c,
+ [MIB_BSCR0] = 0xbb8,
+ [MIB_BSCR1] = 0xbbc,
+ [MIB_BSCR2] = 0xbc0,
+ [MIB_BSCR3] = 0xbc4,
+ [MIB_BSCR4] = 0xbc8,
+ [MIB_BSCR5] = 0xbcc,
+ [MIB_BSCR6] = 0xbd0,
+ [MIB_BSCR7] = 0xbd4,
+ [MIB_BSCR17] = 0xbfc,
+ [MIB_TRDR1] = 0xc14,
+ [HIF_REMAP_L1] = 0x8,
+ [HIF_REMAP_BASE_L1] = 0x40000,
+ [HIF_REMAP_L2] = 0x1b8,
+ [HIF_REMAP_BASE_L2] = 0x110000,
+ [CBTOP1_PHY_END] = 0x7fffffff,
+ [INFRA_MCU_END] = 0x7cffffff,
+ [WTBLON_WDUCR] = 0x400,
+ [WTBL_UPDATE] = 0x410,
+ [WTBL_ITCR] = 0x440,
+ [WTBL_ITCR0] = 0x448,
+ [WTBL_ITCR1] = 0x44c,
};
static const struct __map mt7996_reg_map[] = {
@@ -135,14 +194,83 @@ static const struct __map mt7996_reg_map[] = {
{ 0x0, 0x0, 0x0 }, /* imply end of search */
};
+static const struct __map mt7990_reg_map[] = {
+ {0x54000000, 0x02000, 0x1000}, /* WFDMA_0 (PCIE0 MCU DMA0) */
+ {0x55000000, 0x03000, 0x1000}, /* WFDMA_1 (PCIE0 MCU DMA1) */
+ {0x56000000, 0x04000, 0x1000}, /* WFDMA_2 (Reserved) */
+ {0x57000000, 0x05000, 0x1000}, /* WFDMA_3 (MCU wrap CR) */
+ {0x58000000, 0x06000, 0x1000}, /* WFDMA_4 (PCIE1 MCU DMA0 (MEM_DMA)) */
+ {0x59000000, 0x07000, 0x1000}, /* WFDMA_5 (PCIE1 MCU DMA1) */
+ {0x820c0000, 0x08000, 0x4000}, /* WF_UMAC_TOP (PLE) */
+ {0x820c8000, 0x0c000, 0x2000}, /* WF_UMAC_TOP (PSE) */
+ {0x820cc000, 0x0e000, 0x2000}, /* WF_UMAC_TOP (PP) */
+ {0x820e0000, 0x20000, 0x0400}, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ {0x820e1000, 0x20400, 0x0200}, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ {0x820e2000, 0x20800, 0x0400}, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ {0x820e3000, 0x20c00, 0x0400}, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ {0x820e4000, 0x21000, 0x0400}, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ {0x820e5000, 0x21400, 0x0800}, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ {0x820ce000, 0x21c00, 0x0200}, /* WF_LMAC_TOP (WF_SEC) */
+ {0x820e7000, 0x21e00, 0x0200}, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ {0x820cf000, 0x22000, 0x1000}, /* WF_LMAC_TOP (WF_PF) */
+ {0x820e9000, 0x23400, 0x0200}, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ {0x820ea000, 0x24000, 0x0200}, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ {0x820eb000, 0x24200, 0x0400}, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ {0x820ec000, 0x24600, 0x0200}, /* WF_LMAC_TOP BN0 (WF_INT) */
+ {0x820ed000, 0x24800, 0x0800}, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ {0x820ca000, 0x26000, 0x2000}, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+ {0x820d0000, 0x30000, 0x10000}, /* WF_LMAC_TOP (WF_WTBLON) */
+ {0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */
+ {0x820f0000, 0xa0000, 0x0400}, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ {0x820f1000, 0xa0600, 0x0200}, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ {0x820f2000, 0xa0800, 0x0400}, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ {0x820f3000, 0xa0c00, 0x0400}, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ {0x820f4000, 0xa1000, 0x0400}, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ {0x820f5000, 0xa1400, 0x0800}, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ {0x820f7000, 0xa1e00, 0x0200}, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ {0x820f9000, 0xa3400, 0x0200}, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ {0x820fa000, 0xa4000, 0x0200}, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ {0x820fb000, 0xa4200, 0x0400}, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ {0x820fc000, 0xa4600, 0x0200}, /* WF_LMAC_TOP BN1 (WF_INT) */
+ {0x820fd000, 0xa4800, 0x0800}, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ {0x820cc000, 0xa5000, 0x2000}, /* WF_LMAC_TOP BN1 (WF_MUCOP) */
+ {0x820c4000, 0xa8000, 0x4000}, /* WF_LMAC_TOP (WF_UWTBL) */
+ {0x81030000, 0xae000, 0x100}, /* WFSYS_AON part 1 */
+ {0x81031000, 0xae100, 0x100}, /* WFSYS_AON part 2 */
+ {0x81032000, 0xae200, 0x100}, /* WFSYS_AON part 3 */
+ {0x81033000, 0xae300, 0x100}, /* WFSYS_AON part 4 */
+ {0x81034000, 0xae400, 0x100}, /* WFSYS_AON part 5 */
+ {0x80020000, 0xb0000, 0x10000}, /* WF_TOP_MISC_OFF */
+ {0x81020000, 0xc0000, 0x10000}, /* WF_TOP_MISC_ON */
+ {0x81040000, 0x120000, 0x1000}, /* WF_MCU_CFG_ON */
+ {0x81050000, 0x121000, 0x1000}, /* WF_MCU_EINT */
+ {0x81060000, 0x122000, 0x1000}, /* WF_MCU_GPT */
+ {0x81070000, 0x123000, 0x1000}, /* WF_MCU_WDT */
+ {0x80010000, 0x124000, 0x1000}, /* WF_AXIDMA */
+ {0x7c020000, 0xd0000, 0x10000}, /* CONN_INFRA, wfdma for from CODA flow use */
+ {0x7c060000, 0xe0000, 0x10000}, /* CONN_INFRA, conn_host_csr_top for from CODA flow use */
+ {0x20020000, 0xd0000, 0x10000}, /* CONN_INFRA, wfdma */
+ {0x20060000, 0xe0000, 0x10000}, /* CONN_INFRA, conn_host_csr_top */
+ {0x7c000000, 0xf0000, 0x10000}, /* CONN_INFRA */
+ {0x70020000, 0x1f0000, 0x9000}, /* PCIE remapping (AP2CONN) */
+ {0x0, 0x0, 0x0}, /* imply end of search */
+};
+
static u32 mt7996_reg_map_l1(struct mt7996_dev *dev, u32 addr)
{
u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
+ u32 l1_mask, val;
- dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1,
- MT_HIF_REMAP_L1_MASK,
- FIELD_PREP(MT_HIF_REMAP_L1_MASK, base));
+ if (is_mt7996(&dev->mt76)) {
+ l1_mask = MT_HIF_REMAP_L1_MASK_7996;
+ val = FIELD_PREP(MT_HIF_REMAP_L1_MASK_7996, base);
+ } else {
+ l1_mask = MT_HIF_REMAP_L1_MASK;
+ val = FIELD_PREP(MT_HIF_REMAP_L1_MASK, base);
+ }
+
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1, l1_mask, val);
/* use read to push write */
dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1);
@@ -151,18 +279,41 @@ static u32 mt7996_reg_map_l1(struct mt7996_dev *dev, u32 addr)
static u32 mt7996_reg_map_l2(struct mt7996_dev *dev, u32 addr)
{
- u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
- u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+ u32 offset, base, l2_mask, val;
- dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2,
- MT_HIF_REMAP_L2_MASK,
- FIELD_PREP(MT_HIF_REMAP_L2_MASK, base));
+ if (is_mt7990(&dev->mt76)) {
+ offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET_7990, addr);
+ base = FIELD_GET(MT_HIF_REMAP_L2_BASE_7990, addr);
+ l2_mask = MT_HIF_REMAP_L2_MASK_7990;
+ val = FIELD_PREP(MT_HIF_REMAP_L2_MASK_7990, base);
+ } else {
+ offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
+ base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+ l2_mask = MT_HIF_REMAP_L2_MASK;
+ val = FIELD_PREP(MT_HIF_REMAP_L2_MASK, base);
+ }
+
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2, l2_mask, val);
/* use read to push write */
dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
return MT_HIF_REMAP_BASE_L2 + offset;
}
+static u32 mt7996_reg_map_cbtop(struct mt7996_dev *dev, u32 addr)
+{
+ u32 offset = FIELD_GET(MT_HIF_REMAP_CBTOP_OFFSET, addr);
+ u32 base = FIELD_GET(MT_HIF_REMAP_CBTOP_BASE, addr);
+
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_CBTOP,
+ MT_HIF_REMAP_CBTOP_MASK,
+ FIELD_PREP(MT_HIF_REMAP_CBTOP_MASK, base));
+ /* use read to push write */
+ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_CBTOP);
+
+ return MT_HIF_REMAP_BASE_CBTOP + offset;
+}
+
static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
{
int i;
@@ -193,17 +344,20 @@ static u32 __mt7996_reg_remap_addr(struct mt7996_dev *dev, u32 addr)
(addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
return mt7996_reg_map_l1(dev, addr);
- if (dev_is_pci(dev->mt76.dev) &&
- ((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
- addr >= MT_CBTOP2_PHY_START))
- return mt7996_reg_map_l1(dev, addr);
-
/* CONN_INFRA: covert to phyiscal addr and use layer 1 remap */
if (addr >= MT_INFRA_MCU_START && addr <= MT_INFRA_MCU_END) {
addr = addr - MT_INFRA_MCU_START + MT_INFRA_BASE;
return mt7996_reg_map_l1(dev, addr);
}
+ if (dev_is_pci(dev->mt76.dev) &&
+ ((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
+ addr >= MT_CBTOP2_PHY_START)) {
+ if (is_mt7990(&dev->mt76))
+ return mt7996_reg_map_cbtop(dev, addr);
+ return mt7996_reg_map_l1(dev, addr);
+ }
+
return mt7996_reg_map_l2(dev, addr);
}
@@ -323,6 +477,9 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.base = devm_ioremap(dev->mt76.dev,
pci_resource_start(pci_dev, 0),
pci_resource_len(pci_dev, 0));
+ if (!wed->wlan.base)
+ return -ENOMEM;
+
wed->wlan.phy_base = pci_resource_start(pci_dev, 0);
if (hif2) {
@@ -350,7 +507,7 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
MT7996_RXQ_BAND0 * MT_RING_SIZE;
- wed->wlan.id = 0x7991;
+ wed->wlan.id = MT7996_DEVICE_ID_2;
wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND2) - 1;
} else {
wed->wlan.hw_rro = dev->has_rro; /* default on */
@@ -443,18 +600,24 @@ static int mt7996_mmio_init(struct mt76_dev *mdev,
spin_lock_init(&dev->reg_lock);
switch (device_id) {
- case 0x7990:
+ case MT7996_DEVICE_ID:
dev->reg.base = mt7996_reg_base;
dev->reg.offs_rev = mt7996_offs;
dev->reg.map = mt7996_reg_map;
dev->reg.map_size = ARRAY_SIZE(mt7996_reg_map);
break;
- case 0x7992:
+ case MT7992_DEVICE_ID:
dev->reg.base = mt7996_reg_base;
dev->reg.offs_rev = mt7992_offs;
dev->reg.map = mt7996_reg_map;
dev->reg.map_size = ARRAY_SIZE(mt7996_reg_map);
break;
+ case MT7990_DEVICE_ID:
+ dev->reg.base = mt7996_reg_base;
+ dev->reg.offs_rev = mt7990_offs;
+ dev->reg.map = mt7990_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7990_reg_map);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 43e646ed6094..1ad6bc046f7c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -14,7 +14,7 @@
#define MT7996_MAX_RADIOS 3
#define MT7996_MAX_INTERFACES 19 /* per-band */
#define MT7996_MAX_WMM_SETS 4
-#define MT7996_WTBL_BMC_SIZE (is_mt7992(&dev->mt76) ? 32 : 64)
+#define MT7996_WTBL_BMC_SIZE (is_mt7996(&dev->mt76) ? 64 : 32)
#define MT7996_WTBL_RESERVED (mt7996_wtbl_size(dev) - 1)
#define MT7996_WTBL_STA (MT7996_WTBL_RESERVED - \
mt7996_max_interface_num(dev))
@@ -29,6 +29,16 @@
#define MT7996_RX_RING_SIZE 1536
#define MT7996_RX_MCU_RING_SIZE 512
#define MT7996_RX_MCU_RING_SIZE_WA 1024
+/* scatter-gather of mcu event is not supported in connac3 */
+#define MT7996_RX_MCU_BUF_SIZE (2048 + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+#define MT7996_DEVICE_ID 0x7990
+#define MT7996_DEVICE_ID_2 0x7991
+#define MT7992_DEVICE_ID 0x7992
+#define MT7992_DEVICE_ID_2 0x799a
+#define MT7990_DEVICE_ID 0x7993
+#define MT7990_DEVICE_ID_2 0x799b
#define MT7996_FIRMWARE_WA "mediatek/mt7996/mt7996_wa.bin"
#define MT7996_FIRMWARE_WM "mediatek/mt7996/mt7996_wm.bin"
@@ -50,6 +60,11 @@
#define MT7992_FIRMWARE_DSP_23 "mediatek/mt7996/mt7992_dsp_23.bin"
#define MT7992_ROM_PATCH_23 "mediatek/mt7996/mt7992_rom_patch_23.bin"
+#define MT7990_FIRMWARE_WA ""
+#define MT7990_FIRMWARE_WM "mediatek/mt7996/mt7990_wm.bin"
+#define MT7990_FIRMWARE_DSP ""
+#define MT7990_ROM_PATCH "mediatek/mt7996/mt7990_rom_patch.bin"
+
#define MT7996_EEPROM_DEFAULT "mediatek/mt7996/mt7996_eeprom.bin"
#define MT7996_EEPROM_DEFAULT_INT "mediatek/mt7996/mt7996_eeprom_2i5i6i.bin"
#define MT7996_EEPROM_DEFAULT_233 "mediatek/mt7996/mt7996_eeprom_233.bin"
@@ -61,6 +76,9 @@
#define MT7992_EEPROM_DEFAULT_23 "mediatek/mt7996/mt7992_eeprom_23.bin"
#define MT7992_EEPROM_DEFAULT_23_INT "mediatek/mt7996/mt7992_eeprom_23_2i5i.bin"
+#define MT7990_EEPROM_DEFAULT "mediatek/mt7996/mt7990_eeprom.bin"
+#define MT7990_EEPROM_DEFAULT_INT "mediatek/mt7996/mt7990_eeprom_2i5i.bin"
+
#define MT7996_EEPROM_SIZE 7680
#define MT7996_EEPROM_BLOCK_SIZE 16
#define MT7996_TOKEN_SIZE 16384
@@ -131,6 +149,10 @@ enum mt7992_var_type {
MT7992_VAR_TYPE_23,
};
+enum mt7990_var_type {
+ MT7990_VAR_TYPE_23,
+};
+
enum mt7996_fem_type {
MT7996_FEM_EXT,
MT7996_FEM_INT,
@@ -165,6 +187,8 @@ enum mt7996_rxq_id {
MT7996_RXQ_TXFREE1 = 9,
MT7996_RXQ_TXFREE2 = 7,
MT7996_RXQ_RRO_IND = 0,
+ MT7990_RXQ_TXFREE0 = 6,
+ MT7990_RXQ_TXFREE1 = 7,
};
struct mt7996_twt_flow {
@@ -281,8 +305,6 @@ struct mt7996_phy {
s16 coverage_class;
u8 slottime;
- u8 rdd_state;
-
u16 beacon_rate;
u32 rx_ampdu_ts;
@@ -293,6 +315,7 @@ struct mt7996_phy {
struct mt76_channel_state state_ts;
u16 orig_chainmask;
+ u16 orig_antenna_mask;
bool has_aux_rx;
bool counter_reset;
@@ -405,10 +428,10 @@ enum {
__MT_WFDMA_MAX,
};
-enum {
- MT_RX_SEL0,
- MT_RX_SEL1,
- MT_RX_SEL2, /* monitor chain */
+enum rdd_idx {
+ MT_RDD_IDX_BAND2, /* RDD idx for band idx 2 */
+ MT_RDD_IDX_BAND1, /* RDD idx for band idx 1 */
+ MT_RDD_IDX_BACKGROUND, /* RDD idx for background chain */
};
enum mt7996_rdd_cmd {
@@ -427,6 +450,21 @@ enum mt7996_rdd_cmd {
RDD_IRQ_OFF,
};
+static inline int
+mt7996_get_rdd_idx(struct mt7996_phy *phy, bool is_background)
+{
+ if (!phy->mt76->cap.has_5ghz)
+ return -1;
+
+ if (is_background)
+ return MT_RDD_IDX_BACKGROUND;
+
+ if (phy->mt76->band_idx == MT_BAND2)
+ return MT_RDD_IDX_BAND2;
+
+ return MT_RDD_IDX_BAND1;
+}
+
static inline struct mt7996_dev *
mt7996_hw_dev(struct ieee80211_hw *hw)
{
@@ -461,31 +499,12 @@ mt7996_phy3(struct mt7996_dev *dev)
static inline bool
mt7996_band_valid(struct mt7996_dev *dev, u8 band)
{
- if (is_mt7992(&dev->mt76))
+ if (!is_mt7996(&dev->mt76))
return band <= MT_BAND1;
return band <= MT_BAND2;
}
-static inline bool
-mt7996_has_background_radar(struct mt7996_dev *dev)
-{
- switch (mt76_chip(&dev->mt76)) {
- case 0x7990:
- if (dev->var.type == MT7996_VAR_TYPE_233)
- return false;
- break;
- case 0x7992:
- if (dev->var.type == MT7992_VAR_TYPE_23)
- return false;
- break;
- default:
- return false;
- }
-
- return true;
-}
-
static inline struct mt7996_phy *
mt7996_band_phy(struct mt7996_dev *dev, enum nl80211_band band)
{
@@ -549,6 +568,7 @@ int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy);
int mt7996_eeprom_get_target_power(struct mt7996_dev *dev,
struct ieee80211_channel *chan);
s8 mt7996_eeprom_get_power_delta(struct mt7996_dev *dev, int band);
+bool mt7996_eeprom_has_background_radar(struct mt7996_dev *dev);
int mt7996_dma_init(struct mt7996_dev *dev);
void mt7996_dma_reset(struct mt7996_dev *dev, bool force);
void mt7996_dma_prefetch(struct mt7996_dev *dev);
@@ -637,8 +657,7 @@ int mt7996_mcu_get_temperature(struct mt7996_phy *phy);
int mt7996_mcu_set_thermal_throttling(struct mt7996_phy *phy, u8 state);
int mt7996_mcu_set_thermal_protect(struct mt7996_phy *phy, bool enable);
int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy);
-int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 index,
- u8 rx_sel, u8 val);
+int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 rdd_idx, u8 val);
int mt7996_mcu_rdd_background_enable(struct mt7996_phy *phy,
struct cfg80211_chan_def *chandef);
int mt7996_mcu_set_fixed_rate_table(struct mt7996_phy *phy, u8 table_idx,
@@ -704,6 +723,11 @@ static inline u16 mt7996_rx_chainmask(struct mt7996_phy *phy)
return tx_chainmask | (BIT(fls(tx_chainmask)) * phy->has_aux_rx);
}
+static inline bool mt7996_has_wa(struct mt7996_dev *dev)
+{
+ return !is_mt7990(&dev->mt76);
+}
+
void mt7996_mac_init(struct mt7996_dev *dev);
u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw);
bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
index 04056181368a..19e99bc1c6c4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
@@ -16,14 +16,16 @@ static DEFINE_SPINLOCK(hif_lock);
static u32 hif_idx;
static const struct pci_device_id mt7996_pci_device_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7990) },
- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7992) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7996_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7992_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7990_DEVICE_ID) },
{ },
};
static const struct pci_device_id mt7996_hif_device_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7991) },
- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x799a) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7996_DEVICE_ID_2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7992_DEVICE_ID_2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7990_DEVICE_ID_2) },
{ },
};
@@ -63,8 +65,9 @@ static struct mt7996_hif *mt7996_pci_init_hif2(struct pci_dev *pdev)
{
hif_idx++;
- if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7991, NULL) &&
- !pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x799a, NULL))
+ if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, MT7996_DEVICE_ID_2, NULL) &&
+ !pci_get_device(PCI_VENDOR_ID_MEDIATEK, MT7992_DEVICE_ID_2, NULL) &&
+ !pci_get_device(PCI_VENDOR_ID_MEDIATEK, MT7990_DEVICE_ID_2, NULL))
return NULL;
writel(hif_idx | MT_PCIE_RECOG_ID_SEM,
@@ -121,7 +124,9 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
mt76_pci_disable_aspm(pdev);
- if (id->device == 0x7991 || id->device == 0x799a)
+ if (id->device == MT7996_DEVICE_ID_2 ||
+ id->device == MT7992_DEVICE_ID_2 ||
+ id->device == MT7990_DEVICE_ID_2)
return mt7996_pci_hif2_probe(pdev);
dev = mt7996_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
@@ -256,3 +261,5 @@ MODULE_FIRMWARE(MT7992_FIRMWARE_WA);
MODULE_FIRMWARE(MT7992_FIRMWARE_WM);
MODULE_FIRMWARE(MT7992_FIRMWARE_DSP);
MODULE_FIRMWARE(MT7992_ROM_PATCH);
+MODULE_FIRMWARE(MT7990_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7990_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
index 1876a968c92d..e942c0058731 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
@@ -64,6 +64,17 @@ enum offs_rev {
MIB_BSCR7,
MIB_BSCR17,
MIB_TRDR1,
+ HIF_REMAP_L1,
+ HIF_REMAP_BASE_L1,
+ HIF_REMAP_L2,
+ HIF_REMAP_BASE_L2,
+ CBTOP1_PHY_END,
+ INFRA_MCU_END,
+ WTBLON_WDUCR,
+ WTBL_UPDATE,
+ WTBL_ITCR,
+ WTBL_ITCR0,
+ WTBL_ITCR1,
__MT_OFFS_MAX,
};
@@ -291,19 +302,19 @@ enum offs_rev {
/* WTBLON TOP */
#define MT_WTBLON_TOP_BASE 0x820d4000
#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs))
-#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x370)
+#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(__OFFS(WTBLON_WDUCR))
#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(4, 0)
-#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x380)
+#define MT_WTBL_UPDATE MT_WTBLON_TOP(__OFFS(WTBL_UPDATE))
#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(11, 0)
#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(14)
#define MT_WTBL_UPDATE_BUSY BIT(31)
-#define MT_WTBL_ITCR MT_WTBLON_TOP(0x3b0)
+#define MT_WTBL_ITCR MT_WTBLON_TOP(__OFFS(WTBL_ITCR))
#define MT_WTBL_ITCR_WR BIT(16)
#define MT_WTBL_ITCR_EXEC BIT(31)
-#define MT_WTBL_ITDR0 MT_WTBLON_TOP(0x3b8)
-#define MT_WTBL_ITDR1 MT_WTBLON_TOP(0x3bc)
+#define MT_WTBL_ITDR0 MT_WTBLON_TOP(__OFFS(WTBL_ITCR0))
+#define MT_WTBL_ITDR1 MT_WTBLON_TOP(__OFFS(WTBL_ITCR1))
#define MT_WTBL_SPE_IDX_SEL BIT(6)
/* WTBL */
@@ -483,7 +494,7 @@ enum offs_rev {
#define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
MT_MCUQ_ID(q) * 0x4)
-#define MT_RXQ_BAND1_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
+#define MT_RXQ_EXT_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
MT_RXQ_ID(q) * 0x4)
#define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
MT_TXQ_ID(q) * 0x4)
@@ -504,6 +515,8 @@ enum offs_rev {
#define MT_INT_RX_DONE_WA_TRI BIT(3)
#define MT_INT_RX_TXFREE_MAIN BIT(17)
#define MT_INT_RX_TXFREE_TRI BIT(15)
+#define MT_INT_RX_TXFREE_BAND0_MT7990 BIT(14)
+#define MT_INT_RX_TXFREE_BAND1_MT7990 BIT(15)
#define MT_INT_RX_DONE_BAND2_EXT BIT(23)
#define MT_INT_RX_TXFREE_EXT BIT(26)
#define MT_INT_MCU_CMD BIT(29)
@@ -576,27 +589,39 @@ enum offs_rev {
#define MT_MCU_CMD_WDT_MASK GENMASK(31, 30)
/* l1/l2 remap */
-#define MT_HIF_REMAP_L1 0x155024
-#define MT_HIF_REMAP_L1_MASK GENMASK(31, 16)
+#define CONN_BUS_CR_VON_BASE 0x155000
+#define MT_HIF_REMAP_L1 (CONN_BUS_CR_VON_BASE + __OFFS(HIF_REMAP_L1))
+#define MT_HIF_REMAP_L1_MASK_7996 GENMASK(31, 16)
+#define MT_HIF_REMAP_L1_MASK GENMASK(15, 0)
#define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0)
#define MT_HIF_REMAP_L1_BASE GENMASK(31, 16)
-#define MT_HIF_REMAP_BASE_L1 0x130000
+#define MT_HIF_REMAP_BASE_L1 __OFFS(HIF_REMAP_BASE_L1)
-#define MT_HIF_REMAP_L2 0x1b4
+#define MT_HIF_REMAP_L2 __OFFS(HIF_REMAP_L2)
#define MT_HIF_REMAP_L2_MASK GENMASK(19, 0)
#define MT_HIF_REMAP_L2_OFFSET GENMASK(11, 0)
#define MT_HIF_REMAP_L2_BASE GENMASK(31, 12)
-#define MT_HIF_REMAP_BASE_L2 0x1000
+#define MT_HIF_REMAP_L2_MASK_7990 GENMASK(15, 0)
+#define MT_HIF_REMAP_L2_OFFSET_7990 GENMASK(15, 0)
+#define MT_HIF_REMAP_L2_BASE_7990 GENMASK(31, 16)
+#define MT_HIF_REMAP_BASE_L2 __OFFS(HIF_REMAP_BASE_L2)
+
+/* for mt7990 only */
+#define MT_HIF_REMAP_CBTOP 0x1f6554
+#define MT_HIF_REMAP_CBTOP_MASK GENMASK(15, 0)
+#define MT_HIF_REMAP_CBTOP_OFFSET GENMASK(15, 0)
+#define MT_HIF_REMAP_CBTOP_BASE GENMASK(31, 16)
+#define MT_HIF_REMAP_BASE_CBTOP 0x1c0000
#define MT_INFRA_BASE 0x18000000
#define MT_WFSYS0_PHY_START 0x18400000
#define MT_WFSYS1_PHY_START 0x18800000
#define MT_WFSYS1_PHY_END 0x18bfffff
#define MT_CBTOP1_PHY_START 0x70000000
-#define MT_CBTOP1_PHY_END 0x77ffffff
+#define MT_CBTOP1_PHY_END __OFFS(CBTOP1_PHY_END)
#define MT_CBTOP2_PHY_START 0xf0000000
#define MT_INFRA_MCU_START 0x7c000000
-#define MT_INFRA_MCU_END 0x7c3fffff
+#define MT_INFRA_MCU_END __OFFS(INFRA_MCU_END)
/* FW MODE SYNC */
#define MT_FW_ASSERT_CNT 0x02208274
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index cb46a39ef757..a229c6cab332 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -953,8 +953,8 @@ static void wilc_handle_listen_state_expired(struct work_struct *work)
static void listen_timer_cb(struct timer_list *t)
{
- struct host_if_drv *hif_drv = from_timer(hif_drv, t,
- remain_on_ch_timer);
+ struct host_if_drv *hif_drv = timer_container_of(hif_drv, t,
+ remain_on_ch_timer);
struct wilc_vif *vif = hif_drv->remain_on_ch_timer_vif;
int result;
struct host_if_msg *msg;
@@ -1075,7 +1075,8 @@ static void handle_scan_complete(struct work_struct *work)
static void timer_scan_cb(struct timer_list *t)
{
- struct host_if_drv *hif_drv = from_timer(hif_drv, t, scan_timer);
+ struct host_if_drv *hif_drv = timer_container_of(hif_drv, t,
+ scan_timer);
struct wilc_vif *vif = hif_drv->scan_timer_vif;
struct host_if_msg *msg;
int result;
@@ -1091,8 +1092,8 @@ static void timer_scan_cb(struct timer_list *t)
static void timer_connect_cb(struct timer_list *t)
{
- struct host_if_drv *hif_drv = from_timer(hif_drv, t,
- connect_timer);
+ struct host_if_drv *hif_drv = timer_container_of(hif_drv, t,
+ connect_timer);
struct wilc_vif *vif = hif_drv->connect_timer_vif;
struct host_if_msg *msg;
int result;
@@ -1497,7 +1498,7 @@ int wilc_hif_set_cfg(struct wilc_vif *vif, struct cfg_param_attr *param)
static void get_periodic_rssi(struct timer_list *t)
{
- struct wilc_vif *vif = from_timer(vif, t, periodic_rssi);
+ struct wilc_vif *vif = timer_container_of(vif, t, periodic_rssi);
if (!vif->hif_drv) {
netdev_err(vif->ndev, "%s: hif driver is NULL", __func__);
diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
index 10d2e2124ff8..d8b0b79dea1a 100644
--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
+++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
@@ -503,8 +503,10 @@ int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer,
(void *)buffer, buffer_len, complete_fn, context);
r = usb_submit_urb(urb, GFP_ATOMIC);
- if (r)
+ if (r) {
+ usb_free_urb(urb);
dev_err(&udev->dev, "Async write submit failed (%d)\n", r);
+ }
return r;
}
@@ -546,7 +548,7 @@ error:
static void slif_data_plane_sap_timer_callb(struct timer_list *t)
{
- struct plfxlc_usb *usb = from_timer(usb, t, tx.tx_retry_timer);
+ struct plfxlc_usb *usb = timer_container_of(usb, t, tx.tx_retry_timer);
plfxlc_send_packet_from_data_queue(usb);
timer_setup(&usb->tx.tx_retry_timer,
@@ -556,7 +558,7 @@ static void slif_data_plane_sap_timer_callb(struct timer_list *t)
static void sta_queue_cleanup_timer_callb(struct timer_list *t)
{
- struct plfxlc_usb *usb = from_timer(usb, t, sta_queue_cleanup);
+ struct plfxlc_usb *usb = timer_container_of(usb, t, sta_queue_cleanup);
struct plfxlc_usb_tx *tx = &usb->tx;
int sidx;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index e5f553a1ea24..b7ea606bda08 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -9393,7 +9393,7 @@ static void rt2800_loft_search(struct rt2x00_dev *rt2x00dev, u8 ch_idx,
p0, p1, pf, idx0, idx1, ibit);
if (bidx != 5 && pf <= p0 && pf < p1) {
- idxf[iorq] = idxf[iorq];
+ /* no need to adjust idxf[] */;
} else if (p0 < p1) {
pf = p0;
idxf[iorq] = idx0 & 0x3F;
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 6189edc1d8d7..e26feb8de658 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -2220,7 +2220,8 @@ label_lps_done:
void rtl_watch_dog_timer_callback(struct timer_list *t)
{
- struct rtl_priv *rtlpriv = from_timer(rtlpriv, t, works.watchdog_timer);
+ struct rtl_priv *rtlpriv = timer_container_of(rtlpriv, t,
+ works.watchdog_timer);
queue_delayed_work(rtlpriv->works.rtl_wq,
&rtlpriv->works.watchdog_wq, 0);
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 7537f04b1930..819cf519e66e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -58,17 +58,6 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
}
EXPORT_SYMBOL(rtl_rfreg_delay);
-void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
-{
- if (addr >= 0xf9 && addr <= 0xfe) {
- rtl_addr_delay(addr);
- } else {
- rtl_set_bbreg(hw, addr, MASKDWORD, data);
- udelay(1);
- }
-}
-EXPORT_SYMBOL(rtl_bb_delay);
-
static void rtl_fw_do_work(const struct firmware *firmware, void *context,
bool is_wow)
{
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.h b/drivers/net/wireless/realtek/rtlwifi/core.h
index 42c2d9e13bb8..45225d89ac5e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.h
+++ b/drivers/net/wireless/realtek/rtlwifi/core.h
@@ -58,7 +58,6 @@ void rtl_wowlan_fw_cb(const struct firmware *firmware, void *context);
void rtl_addr_delay(u32 addr);
void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
u32 mask, u32 data);
-void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data);
bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
bool rtl_btc_status_false(void);
void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igval);
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 0eafc4d125f9..898f597f70a9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -155,6 +155,16 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
((u8)init_aspm) == (PCI_EXP_LNKCTL_ASPM_L0S |
PCI_EXP_LNKCTL_ASPM_L1 | PCI_EXP_LNKCTL_CCC))
ppsc->support_aspm = false;
+
+ /* RTL8723BE found on some ASUSTek laptops, such as F441U and
+ * X555UQ with subsystem ID 11ad:1723 are known to output large
+ * amounts of PCIe AER errors during and after boot up, causing
+ * heavy lags, poor network throughput, and occasional lock-ups.
+ */
+ if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8723BE &&
+ (rtlpci->pdev->subsystem_vendor == 0x11ad &&
+ rtlpci->pdev->subsystem_device == 0x1723))
+ ppsc->support_aspm = false;
}
static bool _rtl_pci_platform_switch_device_pci_aspm(
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index 5a34894a533b..f749d19ec5f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -1684,7 +1684,8 @@ static void rtl88e_dm_fast_ant_training(struct ieee80211_hw *hw)
void rtl88e_dm_fast_antenna_training_callback(struct timer_list *t)
{
struct rtl_priv *rtlpriv =
- from_timer(rtlpriv, t, works.fast_antenna_training_timer);
+ timer_container_of(rtlpriv, t,
+ works.fast_antenna_training_timer);
struct ieee80211_hw *hw = rtlpriv->hw;
rtl88e_dm_fast_ant_training(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index 27f6c35ba0f9..d122f1eb345e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -237,8 +237,8 @@ static void _rtl88ee_set_fw_ps_rf_off_low_power(struct ieee80211_hw *hw)
void rtl88ee_fw_clk_off_timer_callback(struct timer_list *t)
{
- struct rtl_priv *rtlpriv = from_timer(rtlpriv, t,
- works.fw_clockoff_timer);
+ struct rtl_priv *rtlpriv = timer_container_of(rtlpriv, t,
+ works.fw_clockoff_timer);
struct ieee80211_hw *hw = rtlpriv->hw;
_rtl88ee_set_fw_ps_rf_off_low_power(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index e07402e73ba3..68f890050afb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -2055,11 +2055,6 @@ void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Finish!!!\n");
}
-void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta)
-{
- return;
-}
-
static bool _rtl92d_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
u32 cmdtableidx, u32 cmdtablesz, enum swchnlcmd_id cmdid,
u32 para1, u32 para2, u32 msdelay)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
index bbe9ef77225e..a9bfe54f2802 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
@@ -83,7 +83,6 @@ void rtl92d_phy_set_poweron(struct ieee80211_hw *hw);
bool rtl92d_phy_check_poweroff(struct ieee80211_hw *hw);
void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw);
-void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw);
void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c
index 289ec71ce3e5..8c2167cc1f13 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c
@@ -2445,11 +2445,6 @@ void rtl92du_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Finish!!!\n");
}
-void rtl92du_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta)
-{
- /* Nothing to do. */
-}
-
u8 rtl92du_phy_sw_chnl(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h
index 090a6203db7e..a107a5a76beb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h
@@ -24,7 +24,6 @@ void rtl92du_phy_set_poweron(struct ieee80211_hw *hw);
bool rtl92du_phy_check_poweroff(struct ieee80211_hw *hw);
void rtl92du_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
void rtl92du_update_bbrf_configuration(struct ieee80211_hw *hw);
-void rtl92du_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
void rtl92du_phy_iq_calibrate(struct ieee80211_hw *hw);
void rtl92du_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel);
void rtl92du_phy_init_pa_bias(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index 73ef602bfb01..1e7f0cd1c86e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -2926,10 +2926,6 @@ void rtl92ee_phy_lc_calibrate(struct ieee80211_hw *hw)
rtlphy->lck_inprogress = false;
}
-void rtl92ee_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta)
-{
-}
-
void rtl92ee_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
{
_rtl92ee_phy_set_rfpath_switch(hw, bmain, false);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
index 1a5dbc628379..ec4c26b81c48 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
@@ -119,7 +119,6 @@ void rtl92ee_phy_set_bw_mode(struct ieee80211_hw *hw,
void rtl92ee_phy_sw_chnl_callback(struct ieee80211_hw *hw);
u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw);
void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
-void rtl92ee_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
void rtl92ee_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl92ee_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
bool rtl92ee_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 9eddbada8af1..13a05066e8a6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -4586,10 +4586,6 @@ void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw)
{
}
-void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta)
-{
-}
-
void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
{
_rtl8821ae_phy_set_rfpath_switch(hw, bmain);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
index 35b7d0f70125..90bf5462a3f8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
@@ -214,7 +214,6 @@ void rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw,
bool b_recovery);
void rtl8812ae_phy_iq_calibrate(struct ieee80211_hw *hw,
bool b_recovery);
-void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index f5718e570011..d35ed56d6db9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1077,15 +1077,3 @@ void rtl_usb_disconnect(struct usb_interface *intf)
ieee80211_free_hw(hw);
}
EXPORT_SYMBOL(rtl_usb_disconnect);
-
-int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message)
-{
- return 0;
-}
-EXPORT_SYMBOL(rtl_usb_suspend);
-
-int rtl_usb_resume(struct usb_interface *pusb_intf)
-{
- return 0;
-}
-EXPORT_SYMBOL(rtl_usb_resume);
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h
index b66d6f9ae564..b873bbc9c4c2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.h
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.h
@@ -138,7 +138,5 @@ int rtl_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id,
const struct rtl_hal_cfg *rtl92cu_hal_cfg);
void rtl_usb_disconnect(struct usb_interface *intf);
-int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message);
-int rtl_usb_resume(struct usb_interface *pusb_intf);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index c929db1e53ca..64904278ddad 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -309,7 +309,7 @@ static void rtw_coex_tdma_timer_base(struct rtw_dev *rtwdev, u8 type)
{
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
- u8 para[2] = {0};
+ u8 para[6] = {};
u8 times;
u16 tbtt_interval = coex_stat->wl_beacon_interval;
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 6b563ac489a7..4fc78b882080 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -1466,7 +1466,7 @@ void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev,
int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
u8 *buf, u32 size)
{
- u8 bckp[2];
+ u8 bckp[3];
u8 val;
u16 rsvd_pg_head;
u32 bcn_valid_addr;
@@ -1478,6 +1478,8 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
if (!size)
return -EINVAL;
+ bckp[2] = rtw_read8(rtwdev, REG_BCN_CTRL);
+
if (rtw_chip_wcpu_11n(rtwdev)) {
rtw_write32_set(rtwdev, REG_DWBCN0_CTRL, BIT_BCN_VALID);
} else {
@@ -1491,6 +1493,9 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
val |= BIT_ENSWBCN >> 8;
rtw_write8(rtwdev, REG_CR + 1, val);
+ rtw_write8(rtwdev, REG_BCN_CTRL,
+ (bckp[2] & ~BIT_EN_BCN_FUNCTION) | BIT_DIS_TSF_UDT);
+
if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) {
val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
bckp[1] = val;
@@ -1521,6 +1526,7 @@ restore:
rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
rsvd_pg_head | BIT_BCN_VALID_V1);
+ rtw_write8(rtwdev, REG_BCN_CTRL, bckp[2]);
if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
index 96aeda26014e..d4bee9c3ecfe 100644
--- a/drivers/net/wireless/realtek/rtw88/hci.h
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -19,6 +19,8 @@ struct rtw_hci_ops {
void (*link_ps)(struct rtw_dev *rtwdev, bool enter);
void (*interface_cfg)(struct rtw_dev *rtwdev);
void (*dynamic_rx_agg)(struct rtw_dev *rtwdev, bool enable);
+ void (*write_firmware_page)(struct rtw_dev *rtwdev, u32 page,
+ const u8 *data, u32 size);
int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
@@ -79,6 +81,12 @@ static inline void rtw_hci_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
rtwdev->hci.ops->dynamic_rx_agg(rtwdev, enable);
}
+static inline void rtw_hci_write_firmware_page(struct rtw_dev *rtwdev, u32 page,
+ const u8 *data, u32 size)
+{
+ rtwdev->hci.ops->write_firmware_page(rtwdev, page, data, size);
+}
+
static inline int
rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
{
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 0491f501c138..f66d1b302dc5 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -856,8 +856,8 @@ fwdl_ready:
}
}
-static void
-write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size)
+void rtw_write_firmware_page(struct rtw_dev *rtwdev, u32 page,
+ const u8 *data, u32 size)
{
u32 val32;
u32 block_nr;
@@ -887,6 +887,7 @@ write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size)
rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data));
}
}
+EXPORT_SYMBOL(rtw_write_firmware_page);
static int
download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size)
@@ -904,11 +905,13 @@ download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size)
rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT);
for (page = 0; page < total_page; page++) {
- write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY);
+ rtw_hci_write_firmware_page(rtwdev, page, data,
+ DLFW_PAGE_SIZE_LEGACY);
data += DLFW_PAGE_SIZE_LEGACY;
}
if (last_page_size)
- write_firmware_page(rtwdev, page, data, last_page_size);
+ rtw_hci_write_firmware_page(rtwdev, page, data,
+ last_page_size);
if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) {
rtw_err(rtwdev, "failed to check download firmware report\n");
diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h
index 6905e2747372..e92b1483728d 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.h
+++ b/drivers/net/wireless/realtek/rtw88/mac.h
@@ -34,6 +34,8 @@ int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
const struct rtw_pwr_seq_cmd * const *cmd_seq);
int rtw_mac_power_on(struct rtw_dev *rtwdev);
void rtw_mac_power_off(struct rtw_dev *rtwdev);
+void rtw_write_firmware_page(struct rtw_dev *rtwdev, u32 page,
+ const u8 *data, u32 size);
int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw);
int rtw_mac_init(struct rtw_dev *rtwdev);
void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop);
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 026fbf4ad9cc..77f9fbe1870c 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -396,6 +396,8 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
if (rtw_bf_support)
rtw_bf_assoc(rtwdev, vif, conf);
+ rtw_set_ampdu_factor(rtwdev, vif, conf);
+
rtw_fw_beacon_filter_config(rtwdev, true, vif);
} else {
rtw_leave_lps(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 959f56a3cc1a..c4de5d114eda 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -2242,7 +2242,8 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
- ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ if (rtwdev->chip->amsdu_in_ampdu)
+ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, TX_AMSDU);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
@@ -2447,6 +2448,38 @@ void rtw_core_enable_beacon(struct rtw_dev *rtwdev, bool enable)
}
}
+void rtw_set_ampdu_factor(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ const struct rtw_chip_ops *ops = rtwdev->chip->ops;
+ struct ieee80211_sta *sta;
+ u8 factor = 0xff;
+
+ if (!ops->set_ampdu_factor)
+ return;
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!sta) {
+ rcu_read_unlock();
+ rtw_warn(rtwdev, "%s: failed to find station %pM\n",
+ __func__, bss_conf->bssid);
+ return;
+ }
+
+ if (sta->deflink.vht_cap.vht_supported)
+ factor = u32_get_bits(sta->deflink.vht_cap.cap,
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
+ else if (sta->deflink.ht_cap.ht_supported)
+ factor = sta->deflink.ht_cap.ampdu_factor;
+
+ rcu_read_unlock();
+
+ if (factor != 0xff)
+ ops->set_ampdu_factor(rtwdev, factor);
+}
+
MODULE_AUTHOR("Realtek Corporation");
MODULE_DESCRIPTION("Realtek 802.11ac wireless core module");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 02343e059fd9..b0f1fabe9554 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -878,6 +878,7 @@ struct rtw_chip_ops {
u32 antenna_rx);
void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable);
void (*efuse_grant)(struct rtw_dev *rtwdev, bool enable);
+ void (*set_ampdu_factor)(struct rtw_dev *rtwdev, u8 factor);
void (*false_alarm_statistics)(struct rtw_dev *rtwdev);
void (*phy_calibration)(struct rtw_dev *rtwdev);
void (*dpk_track)(struct rtw_dev *rtwdev);
@@ -1229,6 +1230,7 @@ struct rtw_chip_info {
u16 fw_fifo_addr[RTW_FW_FIFO_MAX];
const struct rtw_fwcd_segs *fwcd_segs;
+ bool amsdu_in_ampdu;
u8 usb_tx_agg_desc_num;
bool hw_feature_report;
u8 c2h_ra_report_size;
@@ -2272,4 +2274,6 @@ void rtw_update_channel(struct rtw_dev *rtwdev, u8 center_channel,
void rtw_core_port_switch(struct rtw_dev *rtwdev, struct ieee80211_vif *vif);
bool rtw_core_check_sta_active(struct rtw_dev *rtwdev);
void rtw_core_enable_beacon(struct rtw_dev *rtwdev, bool enable);
+void rtw_set_ampdu_factor(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index bb4c4ccb31d4..7f2b6dc21f56 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -12,6 +12,7 @@
#include "fw.h"
#include "ps.h"
#include "debug.h"
+#include "mac.h"
static bool rtw_disable_msi;
static bool rtw_pci_disable_aspm;
@@ -1602,6 +1603,7 @@ static const struct rtw_hci_ops rtw_pci_ops = {
.link_ps = rtw_pci_link_ps,
.interface_cfg = rtw_pci_interface_cfg,
.dynamic_rx_agg = NULL,
+ .write_firmware_page = rtw_write_firmware_page,
.read8 = rtw_pci_read8,
.read16 = rtw_pci_read16,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
index 1d232adbdd7e..9e6700c43a63 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8703b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
@@ -519,15 +519,6 @@ static const struct rtw_rqpn rqpn_table_8703b[] = {
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
};
-/* Default power index table for RTL8703B, used if EFUSE does not
- * contain valid data. Replaces EFUSE data from offset 0x10 (start of
- * txpwr_idx_table).
- */
-static const u8 rtw8703b_txpwr_idx_table[] = {
- 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x2D,
- 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x02
-};
-
static void try_mac_from_devicetree(struct rtw_dev *rtwdev)
{
struct device_node *node = rtwdev->dev->of_node;
@@ -544,15 +535,9 @@ static void try_mac_from_devicetree(struct rtw_dev *rtwdev)
}
}
-#define DBG_EFUSE_FIX(rtwdev, name) \
- rtw_dbg(rtwdev, RTW_DBG_EFUSE, "Fixed invalid EFUSE value: " \
- # name "=0x%x\n", rtwdev->efuse.name)
-
static int rtw8703b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
- u8 *pwr = (u8 *)efuse->txpwr_idx_table;
- bool valid = false;
int ret;
ret = rtw8723x_read_efuse(rtwdev, log_map);
@@ -562,51 +547,6 @@ static int rtw8703b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
if (!is_valid_ether_addr(efuse->addr))
try_mac_from_devicetree(rtwdev);
- /* If TX power index table in EFUSE is invalid, fall back to
- * built-in table.
- */
- for (int i = 0; i < ARRAY_SIZE(rtw8703b_txpwr_idx_table); i++)
- if (pwr[i] != 0xff) {
- valid = true;
- break;
- }
- if (!valid) {
- for (int i = 0; i < ARRAY_SIZE(rtw8703b_txpwr_idx_table); i++)
- pwr[i] = rtw8703b_txpwr_idx_table[i];
- rtw_dbg(rtwdev, RTW_DBG_EFUSE,
- "Replaced invalid EFUSE TX power index table.");
- rtw8723x_debug_txpwr_limit(rtwdev,
- efuse->txpwr_idx_table, 2);
- }
-
- /* Override invalid antenna settings. */
- if (efuse->bt_setting == 0xff) {
- /* shared antenna */
- efuse->bt_setting |= BIT(0);
- /* RF path A */
- efuse->bt_setting &= ~BIT(6);
- DBG_EFUSE_FIX(rtwdev, bt_setting);
- }
-
- /* Override invalid board options: The coex code incorrectly
- * assumes that if bits 6 & 7 are set the board doesn't
- * support coex. Regd is also derived from rf_board_option and
- * should be 0 if there's no valid data.
- */
- if (efuse->rf_board_option == 0xff) {
- efuse->regd = 0;
- efuse->rf_board_option &= GENMASK(5, 0);
- DBG_EFUSE_FIX(rtwdev, rf_board_option);
- }
-
- /* Override invalid crystal cap setting, default comes from
- * vendor driver. Chip specific.
- */
- if (efuse->crystal_cap == 0xff) {
- efuse->crystal_cap = 0x20;
- DBG_EFUSE_FIX(rtwdev, crystal_cap);
- }
-
return 0;
}
@@ -1904,6 +1844,7 @@ static const struct rtw_chip_ops rtw8703b_ops = {
.set_antenna = NULL,
.cfg_ldo25 = rtw8723x_cfg_ldo25,
.efuse_grant = rtw8723x_efuse_grant,
+ .set_ampdu_factor = NULL,
.false_alarm_statistics = rtw8723x_false_alarm_statistics,
.phy_calibration = rtw8703b_phy_calibration,
.dpk_track = NULL,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723cs.c b/drivers/net/wireless/realtek/rtw88/rtw8723cs.c
index 8d38d36be8c0..1f98d35a8dd1 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723cs.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723cs.c
@@ -19,7 +19,7 @@ static const struct sdio_device_id rtw_8723cs_id_table[] = {
MODULE_DEVICE_TABLE(sdio, rtw_8723cs_id_table);
static struct sdio_driver rtw_8723cs_driver = {
- .name = "rtw8723cs",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8723cs_id_table,
.probe = rtw_sdio_probe,
.remove = rtw_sdio_remove,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index 87715bd54860..31876e708f9e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -1404,6 +1404,7 @@ static const struct rtw_chip_ops rtw8723d_ops = {
.set_antenna = NULL,
.cfg_ldo25 = rtw8723x_cfg_ldo25,
.efuse_grant = rtw8723x_efuse_grant,
+ .set_ampdu_factor = NULL,
.false_alarm_statistics = rtw8723x_false_alarm_statistics,
.phy_calibration = rtw8723d_phy_calibration,
.cck_pd_set = rtw8723d_phy_cck_pd_set,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723de.c b/drivers/net/wireless/realtek/rtw88/rtw8723de.c
index abbaafa32851..87c8bc9d18a9 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723de.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723de.c
@@ -17,7 +17,7 @@ static const struct pci_device_id rtw_8723de_id_table[] = {
MODULE_DEVICE_TABLE(pci, rtw_8723de_id_table);
static struct pci_driver rtw_8723de_driver = {
- .name = "rtw_8723de",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8723de_id_table,
.probe = rtw_pci_probe,
.remove = rtw_pci_remove,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723ds.c b/drivers/net/wireless/realtek/rtw88/rtw8723ds.c
index e5b6960ba0a0..206b77e5b98e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723ds.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723ds.c
@@ -25,7 +25,7 @@ static const struct sdio_device_id rtw_8723ds_id_table[] = {
MODULE_DEVICE_TABLE(sdio, rtw_8723ds_id_table);
static struct sdio_driver rtw_8723ds_driver = {
- .name = "rtw_8723ds",
+ .name = KBUILD_MODNAME,
.probe = rtw_sdio_probe,
.remove = rtw_sdio_remove,
.id_table = rtw_8723ds_id_table,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723du.c b/drivers/net/wireless/realtek/rtw88/rtw8723du.c
index 322a805da76b..b661a26e0e22 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723du.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723du.c
@@ -24,7 +24,7 @@ static int rtw8723du_probe(struct usb_interface *intf,
}
static struct usb_driver rtw_8723du_driver = {
- .name = "rtw_8723du",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8723du_id_table,
.probe = rtw8723du_probe,
.disconnect = rtw_usb_disconnect,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723x.c b/drivers/net/wireless/realtek/rtw88/rtw8723x.c
index 69f73cb5b4cd..4c77963fdd37 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723x.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723x.c
@@ -69,6 +69,9 @@ static void __rtw8723x_lck(struct rtw_dev *rtwdev)
#define DBG_EFUSE_2BYTE(rtwdev, map, name) \
rtw_dbg(rtwdev, RTW_DBG_EFUSE, # name "=0x%02x%02x\n", \
(map)->name[0], (map)->name[1])
+#define DBG_EFUSE_FIX(rtwdev, name) \
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "Fixed invalid EFUSE value: " \
+ # name "=0x%x\n", rtwdev->efuse.name)
static void rtw8723xe_efuse_debug(struct rtw_dev *rtwdev,
struct rtw8723x_efuse *map)
@@ -238,10 +241,21 @@ static void rtw8723xs_efuse_parsing(struct rtw_efuse *efuse,
ether_addr_copy(efuse->addr, map->s.mac_addr);
}
+/* Default power index table for RTL8703B/RTL8723D, used if EFUSE does
+ * not contain valid data. Replaces EFUSE data from offset 0x10 (start
+ * of txpwr_idx_table).
+ */
+static const u8 rtw8723x_txpwr_idx_table[] = {
+ 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x2D,
+ 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x02
+};
+
static int __rtw8723x_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 *pwr = (u8 *)efuse->txpwr_idx_table;
struct rtw8723x_efuse *map;
+ bool valid = false;
int i;
map = (struct rtw8723x_efuse *)log_map;
@@ -279,6 +293,51 @@ static int __rtw8723x_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
return -EOPNOTSUPP;
}
+ /* If TX power index table in EFUSE is invalid, fall back to
+ * built-in table.
+ */
+ for (i = 0; i < ARRAY_SIZE(rtw8723x_txpwr_idx_table); i++)
+ if (pwr[i] != 0xff) {
+ valid = true;
+ break;
+ }
+ if (!valid) {
+ for (i = 0; i < ARRAY_SIZE(rtw8723x_txpwr_idx_table); i++)
+ pwr[i] = rtw8723x_txpwr_idx_table[i];
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "Replaced invalid EFUSE TX power index table.");
+ rtw8723x_debug_txpwr_limit(rtwdev,
+ efuse->txpwr_idx_table, 2);
+ }
+
+ /* Override invalid antenna settings. */
+ if (efuse->bt_setting == 0xff) {
+ /* shared antenna */
+ efuse->bt_setting |= BIT(0);
+ /* RF path A */
+ efuse->bt_setting &= ~BIT(6);
+ DBG_EFUSE_FIX(rtwdev, bt_setting);
+ }
+
+ /* Override invalid board options: The coex code incorrectly
+ * assumes that if bits 6 & 7 are set the board doesn't
+ * support coex. Regd is also derived from rf_board_option and
+ * should be 0 if there's no valid data.
+ */
+ if (efuse->rf_board_option == 0xff) {
+ efuse->regd = 0;
+ efuse->rf_board_option &= GENMASK(5, 0);
+ DBG_EFUSE_FIX(rtwdev, rf_board_option);
+ }
+
+ /* Override invalid crystal cap setting, default comes from
+ * vendor driver. Chip specific.
+ */
+ if (efuse->crystal_cap == 0xff) {
+ efuse->crystal_cap = 0x20;
+ DBG_EFUSE_FIX(rtwdev, crystal_cap);
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812a.c b/drivers/net/wireless/realtek/rtw88/rtw8812a.c
index f9ba2aa2928a..c2ef41767ff9 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8812a.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8812a.c
@@ -925,6 +925,7 @@ static const struct rtw_chip_ops rtw8812a_ops = {
.set_tx_power_index = rtw88xxa_set_tx_power_index,
.cfg_ldo25 = rtw8812a_cfg_ldo25,
.efuse_grant = rtw88xxa_efuse_grant,
+ .set_ampdu_factor = NULL,
.false_alarm_statistics = rtw88xxa_false_alarm_statistics,
.phy_calibration = rtw8812a_phy_calibration,
.cck_pd_set = rtw88xxa_phy_cck_pd_set,
@@ -1075,6 +1076,7 @@ const struct rtw_chip_info rtw8812a_hw_spec = {
.rfe_defs = rtw8812a_rfe_defs,
.rfe_defs_size = ARRAY_SIZE(rtw8812a_rfe_defs),
.rx_ldpc = false,
+ .amsdu_in_ampdu = true,
.hw_feature_report = false,
.c2h_ra_report_size = 4,
.old_datarate_fb_limit = true,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812au.c b/drivers/net/wireless/realtek/rtw88/rtw8812au.c
index e18995f4cc78..dfea89670372 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8812au.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8812au.c
@@ -82,7 +82,7 @@ static const struct usb_device_id rtw_8812au_id_table[] = {
MODULE_DEVICE_TABLE(usb, rtw_8812au_id_table);
static struct usb_driver rtw_8812au_driver = {
- .name = "rtw_8812au",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8812au_id_table,
.probe = rtw_usb_probe,
.disconnect = rtw_usb_disconnect,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a.c b/drivers/net/wireless/realtek/rtw88/rtw8814a.c
index cfd35d40d46e..44dd3090484b 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8814a.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814a.c
@@ -1332,6 +1332,16 @@ static void rtw8814a_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
{
}
+/* Without this RTL8814A sends too many frames and (some?) 11n AP
+ * can't handle it, resulting in low TX speed. Other chips seem fine.
+ */
+static void rtw8814a_set_ampdu_factor(struct rtw_dev *rtwdev, u8 factor)
+{
+ factor = min_t(u8, factor, IEEE80211_VHT_MAX_AMPDU_256K);
+
+ rtw_write32(rtwdev, REG_AMPDU_MAX_LENGTH, (8192 << factor) - 1);
+}
+
static void rtw8814a_false_alarm_statistics(struct rtw_dev *rtwdev)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
@@ -2051,6 +2061,7 @@ static const struct rtw_chip_ops rtw8814a_ops = {
.set_antenna = NULL,
.cfg_ldo25 = rtw8814a_cfg_ldo25,
.efuse_grant = rtw8814a_efuse_grant,
+ .set_ampdu_factor = rtw8814a_set_ampdu_factor,
.false_alarm_statistics = rtw8814a_false_alarm_statistics,
.phy_calibration = rtw8814a_phy_calibration,
.cck_pd_set = rtw8814a_phy_cck_pd_set,
@@ -2189,6 +2200,7 @@ const struct rtw_chip_info rtw8814a_hw_spec = {
.rx_ldpc = true,
.max_power_index = 0x3f,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_2,
+ .amsdu_in_ampdu = false, /* RX speed is better without AMSDU */
.usb_tx_agg_desc_num = 3,
.hw_feature_report = false,
.c2h_ra_report_size = 6,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814ae.c b/drivers/net/wireless/realtek/rtw88/rtw8814ae.c
index 54d2e20a7764..c7436f1c8d40 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8814ae.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814ae.c
@@ -17,7 +17,7 @@ static const struct pci_device_id rtw_8814ae_id_table[] = {
MODULE_DEVICE_TABLE(pci, rtw_8814ae_id_table);
static struct pci_driver rtw_8814ae_driver = {
- .name = "rtw_8814ae",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8814ae_id_table,
.probe = rtw_pci_probe,
.remove = rtw_pci_remove,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814au.c b/drivers/net/wireless/realtek/rtw88/rtw8814au.c
index afe045fb84de..1a0886ec17dd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8814au.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814au.c
@@ -42,7 +42,7 @@ static const struct usb_device_id rtw_8814au_id_table[] = {
MODULE_DEVICE_TABLE(usb, rtw_8814au_id_table);
static struct usb_driver rtw_8814au_driver = {
- .name = "rtw_8814au",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8814au_id_table,
.probe = rtw_usb_probe,
.disconnect = rtw_usb_disconnect,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821a.c b/drivers/net/wireless/realtek/rtw88/rtw8821a.c
index f68239b07319..413aec694c33 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821a.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821a.c
@@ -871,6 +871,7 @@ static const struct rtw_chip_ops rtw8821a_ops = {
.set_tx_power_index = rtw88xxa_set_tx_power_index,
.cfg_ldo25 = rtw8821a_cfg_ldo25,
.efuse_grant = rtw88xxa_efuse_grant,
+ .set_ampdu_factor = NULL,
.false_alarm_statistics = rtw88xxa_false_alarm_statistics,
.phy_calibration = rtw8821a_phy_calibration,
.cck_pd_set = rtw88xxa_phy_cck_pd_set,
@@ -1175,6 +1176,7 @@ const struct rtw_chip_info rtw8821a_hw_spec = {
.rfe_defs = rtw8821a_rfe_defs,
.rfe_defs_size = ARRAY_SIZE(rtw8821a_rfe_defs),
.rx_ldpc = false,
+ .amsdu_in_ampdu = true,
.hw_feature_report = false,
.c2h_ra_report_size = 4,
.old_datarate_fb_limit = true,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821au.c b/drivers/net/wireless/realtek/rtw88/rtw8821au.c
index a01744b64e8d..28c281b32978 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821au.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821au.c
@@ -66,7 +66,7 @@ static const struct usb_device_id rtw_8821au_id_table[] = {
MODULE_DEVICE_TABLE(usb, rtw_8821au_id_table);
static struct usb_driver rtw_8821au_driver = {
- .name = "rtw_8821au",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8821au_id_table,
.probe = rtw_usb_probe,
.disconnect = rtw_usb_disconnect,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 0ade7f11cbd2..413130a30ca9 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -1668,6 +1668,7 @@ static const struct rtw_chip_ops rtw8821c_ops = {
.set_antenna = NULL,
.set_tx_power_index = rtw8821c_set_tx_power_index,
.cfg_ldo25 = rtw8821c_cfg_ldo25,
+ .set_ampdu_factor = NULL,
.false_alarm_statistics = rtw8821c_false_alarm_statistics,
.phy_calibration = rtw8821c_phy_calibration,
.cck_pd_set = rtw8821c_phy_cck_pd_set,
@@ -1990,6 +1991,7 @@ const struct rtw_chip_info rtw8821c_hw_spec = {
.band = RTW_BAND_2G | RTW_BAND_5G,
.page_size = TX_PAGE_SIZE,
.dig_min = 0x1c,
+ .amsdu_in_ampdu = true,
.usb_tx_agg_desc_num = 3,
.hw_feature_report = true,
.c2h_ra_report_size = 7,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
index f3d971feda04..40637c079d99 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
@@ -21,7 +21,7 @@ static const struct pci_device_id rtw_8821ce_id_table[] = {
MODULE_DEVICE_TABLE(pci, rtw_8821ce_id_table);
static struct pci_driver rtw_8821ce_driver = {
- .name = "rtw_8821ce",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8821ce_id_table,
.probe = rtw_pci_probe,
.remove = rtw_pci_remove,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cs.c b/drivers/net/wireless/realtek/rtw88/rtw8821cs.c
index a359413369a4..6d94162213c6 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821cs.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821cs.c
@@ -20,7 +20,7 @@ static const struct sdio_device_id rtw_8821cs_id_table[] = {
MODULE_DEVICE_TABLE(sdio, rtw_8821cs_id_table);
static struct sdio_driver rtw_8821cs_driver = {
- .name = "rtw_8821cs",
+ .name = KBUILD_MODNAME,
.probe = rtw_sdio_probe,
.remove = rtw_sdio_remove,
.id_table = rtw_8821cs_id_table,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
index a019f4085e73..7a0fffc359e2 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
@@ -48,7 +48,7 @@ static int rtw_8821cu_probe(struct usb_interface *intf,
}
static struct usb_driver rtw_8821cu_driver = {
- .name = "rtw_8821cu",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8821cu_id_table,
.probe = rtw_8821cu_probe,
.disconnect = rtw_usb_disconnect,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index b4934da88e33..ab199eaea3c7 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -2158,6 +2158,7 @@ static const struct rtw_chip_ops rtw8822b_ops = {
.set_tx_power_index = rtw8822b_set_tx_power_index,
.set_antenna = rtw8822b_set_antenna,
.cfg_ldo25 = rtw8822b_cfg_ldo25,
+ .set_ampdu_factor = NULL,
.false_alarm_statistics = rtw8822b_false_alarm_statistics,
.phy_calibration = rtw8822b_phy_calibration,
.pwr_track = rtw8822b_pwr_track,
@@ -2531,6 +2532,7 @@ const struct rtw_chip_info rtw8822b_hw_spec = {
.band = RTW_BAND_2G | RTW_BAND_5G,
.page_size = TX_PAGE_SIZE,
.dig_min = 0x1c,
+ .amsdu_in_ampdu = true,
.usb_tx_agg_desc_num = 3,
.hw_feature_report = true,
.c2h_ra_report_size = 7,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822be.c b/drivers/net/wireless/realtek/rtw88/rtw8822be.c
index 4994950776cd..0bb9f70e7920 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822be.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822be.c
@@ -17,7 +17,7 @@ static const struct pci_device_id rtw_8822be_id_table[] = {
MODULE_DEVICE_TABLE(pci, rtw_8822be_id_table);
static struct pci_driver rtw_8822be_driver = {
- .name = "rtw_8822be",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8822be_id_table,
.probe = rtw_pci_probe,
.remove = rtw_pci_remove,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822bs.c b/drivers/net/wireless/realtek/rtw88/rtw8822bs.c
index 31d8645f83bd..744781dcb419 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822bs.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822bs.c
@@ -20,7 +20,7 @@ static const struct sdio_device_id rtw_8822bs_id_table[] = {
MODULE_DEVICE_TABLE(sdio, rtw_8822bs_id_table);
static struct sdio_driver rtw_8822bs_driver = {
- .name = "rtw_8822bs",
+ .name = KBUILD_MODNAME,
.probe = rtw_sdio_probe,
.remove = rtw_sdio_remove,
.id_table = rtw_8822bs_id_table,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
index 572d1f31832e..44e28e583964 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
@@ -77,6 +77,8 @@ static const struct usb_device_id rtw_8822bu_id_table[] = {
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Mercusys MA30N */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3322, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* D-Link DWA-T185 rev. A1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x03d1, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* BUFFALO WI-U2-866DM */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8822bu_id_table);
@@ -88,7 +90,7 @@ static int rtw8822bu_probe(struct usb_interface *intf,
}
static struct usb_driver rtw_8822bu_driver = {
- .name = "rtw_8822bu",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8822bu_id_table,
.probe = rtw8822bu_probe,
.disconnect = rtw_usb_disconnect,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 5e53e0db177e..017d959de3ce 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -3951,7 +3951,8 @@ static void rtw8822c_dpk_cal_coef1(struct rtw_dev *rtwdev)
rtw_write32(rtwdev, REG_NCTL0, 0x00001148);
rtw_write32(rtwdev, REG_NCTL0, 0x00001149);
- check_hw_ready(rtwdev, 0x2d9c, MASKBYTE0, 0x55);
+ if (!check_hw_ready(rtwdev, 0x2d9c, MASKBYTE0, 0x55))
+ rtw_warn(rtwdev, "DPK stuck, performance may be suboptimal");
rtw_write8(rtwdev, 0x1b10, 0x0);
rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0x0000000c);
@@ -4968,6 +4969,7 @@ static const struct rtw_chip_ops rtw8822c_ops = {
.set_tx_power_index = rtw8822c_set_tx_power_index,
.set_antenna = rtw8822c_set_antenna,
.cfg_ldo25 = rtw8822c_cfg_ldo25,
+ .set_ampdu_factor = NULL,
.false_alarm_statistics = rtw8822c_false_alarm_statistics,
.dpk_track = rtw8822c_dpk_track,
.phy_calibration = rtw8822c_phy_calibration,
@@ -5349,6 +5351,7 @@ const struct rtw_chip_info rtw8822c_hw_spec = {
.band = RTW_BAND_2G | RTW_BAND_5G,
.page_size = TX_PAGE_SIZE,
.dig_min = 0x20,
+ .amsdu_in_ampdu = true,
.usb_tx_agg_desc_num = 3,
.hw_feature_report = true,
.c2h_ra_report_size = 7,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
index e26c6bc82936..9def732480af 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
@@ -21,7 +21,7 @@ static const struct pci_device_id rtw_8822ce_id_table[] = {
MODULE_DEVICE_TABLE(pci, rtw_8822ce_id_table);
static struct pci_driver rtw_8822ce_driver = {
- .name = "rtw_8822ce",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8822ce_id_table,
.probe = rtw_pci_probe,
.remove = rtw_pci_remove,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822cs.c b/drivers/net/wireless/realtek/rtw88/rtw8822cs.c
index 975e81c824f2..322281e07eb8 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822cs.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822cs.c
@@ -20,7 +20,7 @@ static const struct sdio_device_id rtw_8822cs_id_table[] = {
MODULE_DEVICE_TABLE(sdio, rtw_8822cs_id_table);
static struct sdio_driver rtw_8822cs_driver = {
- .name = "rtw_8822cs",
+ .name = KBUILD_MODNAME,
.probe = rtw_sdio_probe,
.remove = rtw_sdio_remove,
.id_table = rtw_8822cs_id_table,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822cu.c b/drivers/net/wireless/realtek/rtw88/rtw8822cu.c
index 157d5102a4b1..324fd5c8bfd4 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822cu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822cu.c
@@ -32,7 +32,7 @@ static int rtw8822cu_probe(struct usb_interface *intf,
}
static struct usb_driver rtw_8822cu_driver = {
- .name = "rtw_8822cu",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8822cu_id_table,
.probe = rtw8822cu_probe,
.disconnect = rtw_usb_disconnect,
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index 6209a49312f1..e733ed846123 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -10,6 +10,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/sdio_func.h>
#include "main.h"
+#include "mac.h"
#include "debug.h"
#include "fw.h"
#include "ps.h"
@@ -677,12 +678,22 @@ static void rtw_sdio_enable_rx_aggregation(struct rtw_dev *rtwdev)
{
u8 size, timeout;
- if (rtw_chip_wcpu_11n(rtwdev)) {
+ switch (rtwdev->chip->id) {
+ case RTW_CHIP_TYPE_8703B:
+ case RTW_CHIP_TYPE_8821A:
+ case RTW_CHIP_TYPE_8812A:
size = 0x6;
timeout = 0x6;
- } else {
+ break;
+ case RTW_CHIP_TYPE_8723D:
+ size = 0xa;
+ timeout = 0x3;
+ rtw_write8_set(rtwdev, REG_RXDMA_AGG_PG_TH + 3, BIT(7));
+ break;
+ default:
size = 0xff;
timeout = 0x1;
+ break;
}
/* Make the firmware honor the size limit configured below */
@@ -718,10 +729,7 @@ static u8 rtw_sdio_get_tx_qsel(struct rtw_dev *rtwdev, struct sk_buff *skb,
case RTW_TX_QUEUE_H2C:
return TX_DESC_QSEL_H2C;
case RTW_TX_QUEUE_MGMT:
- if (rtw_chip_wcpu_11n(rtwdev))
- return TX_DESC_QSEL_HIGH;
- else
- return TX_DESC_QSEL_MGMT;
+ return TX_DESC_QSEL_MGMT;
case RTW_TX_QUEUE_HI0:
return TX_DESC_QSEL_HIGH;
default:
@@ -1157,6 +1165,7 @@ static const struct rtw_hci_ops rtw_sdio_ops = {
.link_ps = rtw_sdio_link_ps,
.interface_cfg = rtw_sdio_interface_cfg,
.dynamic_rx_agg = NULL,
+ .write_firmware_page = rtw_write_firmware_page,
.read8 = rtw_sdio_read8,
.read16 = rtw_sdio_read16,
@@ -1227,10 +1236,7 @@ static void rtw_sdio_process_tx_queue(struct rtw_dev *rtwdev,
return;
}
- if (queue <= RTW_TX_QUEUE_VO)
- rtw_sdio_indicate_tx_status(rtwdev, skb);
- else
- dev_kfree_skb_any(skb);
+ rtw_sdio_indicate_tx_status(rtwdev, skb);
}
static void rtw_sdio_tx_handler(struct work_struct *work)
@@ -1298,7 +1304,6 @@ static void rtw_sdio_deinit_tx(struct rtw_dev *rtwdev)
struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
int i;
- flush_workqueue(rtwsdio->txwq);
destroy_workqueue(rtwsdio->txwq);
kfree(rtwsdio->tx_handler_data);
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index 6ed470dd6f22..2ab440cb2d67 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -178,7 +178,8 @@ static void rtw_tx_report_enable(struct rtw_dev *rtwdev,
void rtw_tx_report_purge_timer(struct timer_list *t)
{
- struct rtw_dev *rtwdev = from_timer(rtwdev, t, tx_report.purge_timer);
+ struct rtw_dev *rtwdev = timer_container_of(rtwdev, t,
+ tx_report.purge_timer);
struct rtw_tx_report *tx_report = &rtwdev->tx_report;
unsigned long flags;
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index c8092fa0d9f1..3b5126ffc81a 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -139,7 +139,7 @@ static void rtw_usb_write(struct rtw_dev *rtwdev, u32 addr, u32 val, int len)
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE,
- addr, 0, data, len, 30000);
+ addr, 0, data, len, 500);
if (ret < 0 && ret != -ENODEV && count++ < 4)
rtw_err(rtwdev, "write register 0x%x failed with %d\n",
addr, ret);
@@ -165,6 +165,60 @@ static void rtw_usb_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
rtw_usb_write(rtwdev, addr, val, 4);
}
+static void rtw_usb_write_firmware_page(struct rtw_dev *rtwdev, u32 page,
+ const u8 *data, u32 size)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ struct usb_device *udev = rtwusb->udev;
+ u32 addr = FW_START_ADDR_LEGACY;
+ u8 *data_dup, *buf;
+ u32 n, block_size;
+ int ret;
+
+ switch (rtwdev->chip->id) {
+ case RTW_CHIP_TYPE_8723D:
+ block_size = 254;
+ break;
+ default:
+ block_size = 196;
+ break;
+ }
+
+ data_dup = kmemdup(data, size, GFP_KERNEL);
+ if (!data_dup)
+ return;
+
+ buf = data_dup;
+
+ rtw_write32_mask(rtwdev, REG_MCUFW_CTRL, BIT_ROM_PGE, page);
+
+ while (size > 0) {
+ if (size >= block_size)
+ n = block_size;
+ else if (size >= 8)
+ n = 8;
+ else
+ n = 1;
+
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE,
+ addr, 0, buf, n, 500);
+ if (ret != n) {
+ if (ret != -ENODEV)
+ rtw_err(rtwdev,
+ "write 0x%x len %d failed: %d\n",
+ addr, n, ret);
+ break;
+ }
+
+ addr += n;
+ buf += n;
+ size -= n;
+ }
+
+ kfree(data_dup);
+}
+
static int dma_mapping_to_ep(enum rtw_dma_mapping dma_mapping)
{
switch (dma_mapping) {
@@ -866,6 +920,7 @@ static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
case RTW_CHIP_TYPE_8822C:
case RTW_CHIP_TYPE_8822B:
case RTW_CHIP_TYPE_8821C:
+ case RTW_CHIP_TYPE_8814A:
rtw_usb_dynamic_rx_agg_v1(rtwdev, enable);
break;
case RTW_CHIP_TYPE_8821A:
@@ -891,6 +946,7 @@ static const struct rtw_hci_ops rtw_usb_ops = {
.link_ps = rtw_usb_link_ps,
.interface_cfg = rtw_usb_interface_cfg,
.dynamic_rx_agg = rtw_usb_dynamic_rx_agg,
+ .write_firmware_page = rtw_usb_write_firmware_page,
.write8 = rtw_usb_write8,
.write16 = rtw_usb_write16,
@@ -948,7 +1004,6 @@ static void rtw_usb_deinit_rx(struct rtw_dev *rtwdev)
skb_queue_purge(&rtwusb->rx_queue);
- flush_workqueue(rtwusb->rxwq);
destroy_workqueue(rtwusb->rxwq);
skb_queue_purge(&rtwusb->rx_free_queue);
@@ -977,7 +1032,6 @@ static void rtw_usb_deinit_tx(struct rtw_dev *rtwdev)
{
struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
- flush_workqueue(rtwusb->txwq);
destroy_workqueue(rtwusb->txwq);
rtw_usb_tx_queue_purge(rtwusb);
}
@@ -1094,7 +1148,8 @@ static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev)
static bool rtw_usb3_chip_old(u8 chip_id)
{
- return chip_id == RTW_CHIP_TYPE_8812A;
+ return chip_id == RTW_CHIP_TYPE_8812A ||
+ chip_id == RTW_CHIP_TYPE_8814A;
}
static bool rtw_usb3_chip_new(u8 chip_id)
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.c b/drivers/net/wireless/realtek/rtw89/acpi.c
index f5dedb12c129..581d6d4154d3 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.c
+++ b/drivers/net/wireless/realtek/rtw89/acpi.c
@@ -12,6 +12,121 @@ static const guid_t rtw89_guid = GUID_INIT(0xD2A8C3E8, 0x4B69, 0x4F00,
0x82, 0xBD, 0xFE, 0x86,
0x07, 0x80, 0x3A, 0xA7);
+static u32 rtw89_acpi_traversal_object(struct rtw89_dev *rtwdev,
+ const union acpi_object *obj, u8 *pos)
+{
+ const union acpi_object *elm;
+ unsigned int i;
+ u32 sub_len;
+ u32 len = 0;
+ u8 *tmp;
+
+ switch (obj->type) {
+ case ACPI_TYPE_INTEGER:
+ if (pos)
+ pos[len] = obj->integer.value;
+
+ len++;
+ break;
+ case ACPI_TYPE_BUFFER:
+ if (unlikely(obj->buffer.length == 0)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "%s: invalid buffer type\n", __func__);
+ goto err;
+ }
+
+ if (pos)
+ memcpy(pos, obj->buffer.pointer, obj->buffer.length);
+
+ len += obj->buffer.length;
+ break;
+ case ACPI_TYPE_PACKAGE:
+ if (unlikely(obj->package.count == 0)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "%s: invalid package type\n", __func__);
+ goto err;
+ }
+
+ for (i = 0; i < obj->package.count; i++) {
+ elm = &obj->package.elements[i];
+ tmp = pos ? pos + len : NULL;
+
+ sub_len = rtw89_acpi_traversal_object(rtwdev, elm, tmp);
+ if (unlikely(sub_len == 0))
+ goto err;
+
+ len += sub_len;
+ }
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: unhandled type: %d\n",
+ __func__, obj->type);
+ goto err;
+ }
+
+ return len;
+
+err:
+ return 0;
+}
+
+static u32 rtw89_acpi_calculate_object_length(struct rtw89_dev *rtwdev,
+ const union acpi_object *obj)
+{
+ return rtw89_acpi_traversal_object(rtwdev, obj, NULL);
+}
+
+static struct rtw89_acpi_data *
+rtw89_acpi_evaluate_method(struct rtw89_dev *rtwdev, const char *method)
+{
+ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct rtw89_acpi_data *data = NULL;
+ acpi_handle root, handle;
+ union acpi_object *obj;
+ acpi_status status;
+ u32 len;
+
+ root = ACPI_HANDLE(rtwdev->dev);
+ if (!root) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi (%s): failed to get root\n", method);
+ return NULL;
+ }
+
+ status = acpi_get_handle(root, (acpi_string)method, &handle);
+ if (ACPI_FAILURE(status)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi (%s): failed to get handle\n", method);
+ return NULL;
+ }
+
+ status = acpi_evaluate_object(handle, NULL, NULL, &buf);
+ if (ACPI_FAILURE(status)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi (%s): failed to evaluate object\n", method);
+ return NULL;
+ }
+
+ obj = buf.pointer;
+ len = rtw89_acpi_calculate_object_length(rtwdev, obj);
+ if (unlikely(len == 0)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi (%s): failed to traversal obj len\n", method);
+ goto out;
+ }
+
+ data = kzalloc(struct_size(data, buf, len), GFP_KERNEL);
+ if (!data)
+ goto out;
+
+ data->len = len;
+ rtw89_acpi_traversal_object(rtwdev, obj, data->buf);
+
+out:
+ ACPI_FREE(obj);
+ return data;
+}
+
static
int rtw89_acpi_dsm_get_value(struct rtw89_dev *rtwdev, union acpi_object *obj,
u8 *value)
@@ -121,6 +236,49 @@ int rtw89_acpi_dsm_get_policy_6ghz_sp(struct rtw89_dev *rtwdev,
return 0;
}
+static bool chk_acpi_policy_tas_sig(const struct rtw89_acpi_policy_tas *p)
+{
+ return p->signature[0] == 0x52 &&
+ p->signature[1] == 0x54 &&
+ p->signature[2] == 0x4B &&
+ p->signature[3] == 0x05;
+}
+
+static int rtw89_acpi_dsm_get_policy_tas(struct rtw89_dev *rtwdev,
+ union acpi_object *obj,
+ struct rtw89_acpi_policy_tas **policy)
+{
+ const struct rtw89_acpi_policy_tas *ptr;
+ u32 buf_len;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi: expect buffer but type: %d\n", obj->type);
+ return -EINVAL;
+ }
+
+ buf_len = obj->buffer.length;
+ if (buf_len < sizeof(*ptr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n",
+ __func__, buf_len);
+ return -EINVAL;
+ }
+
+ ptr = (typeof(ptr))obj->buffer.pointer;
+ if (!chk_acpi_policy_tas_sig(ptr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: bad signature\n", __func__);
+ return -EINVAL;
+ }
+
+ *policy = kmemdup(ptr, sizeof(*ptr), GFP_KERNEL);
+ if (!*policy)
+ return -ENOMEM;
+
+ rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "policy_tas: ", *policy,
+ sizeof(*ptr));
+ return 0;
+}
+
int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
enum rtw89_acpi_dsm_func func,
struct rtw89_acpi_dsm_result *res)
@@ -142,6 +300,8 @@ int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
else if (func == RTW89_ACPI_DSM_FUNC_6GHZ_SP_SUP)
ret = rtw89_acpi_dsm_get_policy_6ghz_sp(rtwdev, obj,
&res->u.policy_6ghz_sp);
+ else if (func == RTW89_ACPI_DSM_FUNC_TAS_EN)
+ ret = rtw89_acpi_dsm_get_policy_tas(rtwdev, obj, &res->u.policy_tas);
else
ret = rtw89_acpi_dsm_get_value(rtwdev, obj, &res->u.value);
@@ -152,46 +312,875 @@ int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
int rtw89_acpi_evaluate_rtag(struct rtw89_dev *rtwdev,
struct rtw89_acpi_rtag_result *res)
{
- struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_handle root, handle;
- union acpi_object *obj;
- acpi_status status;
+ const struct rtw89_acpi_data *data;
u32 buf_len;
int ret = 0;
- root = ACPI_HANDLE(rtwdev->dev);
- if (!root)
- return -EOPNOTSUPP;
-
- status = acpi_get_handle(root, (acpi_string)"RTAG", &handle);
- if (ACPI_FAILURE(status))
+ data = rtw89_acpi_evaluate_method(rtwdev, "RTAG");
+ if (!data)
return -EIO;
- status = acpi_evaluate_object(handle, NULL, NULL, &buf);
- if (ACPI_FAILURE(status))
- return -EIO;
+ buf_len = data->len;
+ if (buf_len != sizeof(*res)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n",
+ __func__, buf_len);
+ ret = -EINVAL;
+ goto out;
+ }
- obj = buf.pointer;
- if (obj->type != ACPI_TYPE_BUFFER) {
+ *res = *(struct rtw89_acpi_rtag_result *)data->buf;
+
+ rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "antenna_gain: ", res, sizeof(*res));
+
+out:
+ kfree(data);
+ return ret;
+}
+
+enum rtw89_acpi_sar_subband rtw89_acpi_sar_get_subband(struct rtw89_dev *rtwdev,
+ u32 center_freq)
+{
+ switch (center_freq) {
+ default:
rtw89_debug(rtwdev, RTW89_DBG_ACPI,
- "acpi: expect buffer but type: %d\n", obj->type);
- ret = -EINVAL;
+ "center freq %u to ACPI SAR subband is unhandled\n",
+ center_freq);
+ fallthrough;
+ case 2412 ... 2484:
+ return RTW89_ACPI_SAR_2GHZ_SUBBAND;
+ case 5180 ... 5240:
+ return RTW89_ACPI_SAR_5GHZ_SUBBAND_1;
+ case 5250 ... 5320:
+ return RTW89_ACPI_SAR_5GHZ_SUBBAND_2;
+ case 5500 ... 5720:
+ return RTW89_ACPI_SAR_5GHZ_SUBBAND_2E;
+ case 5745 ... 5885:
+ return RTW89_ACPI_SAR_5GHZ_SUBBAND_3_4;
+ case 5955 ... 6155:
+ return RTW89_ACPI_SAR_6GHZ_SUBBAND_5_L;
+ case 6175 ... 6415:
+ return RTW89_ACPI_SAR_6GHZ_SUBBAND_5_H;
+ case 6435 ... 6515:
+ return RTW89_ACPI_SAR_6GHZ_SUBBAND_6;
+ case 6535 ... 6695:
+ return RTW89_ACPI_SAR_6GHZ_SUBBAND_7_L;
+ case 6715 ... 6855:
+ return RTW89_ACPI_SAR_6GHZ_SUBBAND_7_H;
+
+ /* freq 6875 (ch 185, 20MHz) spans RTW89_ACPI_SAR_6GHZ_SUBBAND_7_H
+ * and RTW89_ACPI_SAR_6GHZ_SUBBAND_8, so directly describe it with
+ * struct rtw89_6ghz_span.
+ */
+
+ case 6895 ... 7115:
+ return RTW89_ACPI_SAR_6GHZ_SUBBAND_8;
+ }
+}
+
+enum rtw89_band rtw89_acpi_sar_subband_to_band(struct rtw89_dev *rtwdev,
+ enum rtw89_acpi_sar_subband subband)
+{
+ switch (subband) {
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "ACPI SAR subband %u to band is unhandled\n", subband);
+ fallthrough;
+ case RTW89_ACPI_SAR_2GHZ_SUBBAND:
+ return RTW89_BAND_2G;
+ case RTW89_ACPI_SAR_5GHZ_SUBBAND_1:
+ return RTW89_BAND_5G;
+ case RTW89_ACPI_SAR_5GHZ_SUBBAND_2:
+ return RTW89_BAND_5G;
+ case RTW89_ACPI_SAR_5GHZ_SUBBAND_2E:
+ return RTW89_BAND_5G;
+ case RTW89_ACPI_SAR_5GHZ_SUBBAND_3_4:
+ return RTW89_BAND_5G;
+ case RTW89_ACPI_SAR_6GHZ_SUBBAND_5_L:
+ return RTW89_BAND_6G;
+ case RTW89_ACPI_SAR_6GHZ_SUBBAND_5_H:
+ return RTW89_BAND_6G;
+ case RTW89_ACPI_SAR_6GHZ_SUBBAND_6:
+ return RTW89_BAND_6G;
+ case RTW89_ACPI_SAR_6GHZ_SUBBAND_7_L:
+ return RTW89_BAND_6G;
+ case RTW89_ACPI_SAR_6GHZ_SUBBAND_7_H:
+ return RTW89_BAND_6G;
+ case RTW89_ACPI_SAR_6GHZ_SUBBAND_8:
+ return RTW89_BAND_6G;
+ }
+}
+
+static u8 rtw89_acpi_sar_rfpath_to_hp_antidx(enum rtw89_rf_path rfpath)
+{
+ switch (rfpath) {
+ default:
+ case RF_PATH_B:
+ return 0;
+ case RF_PATH_A:
+ return 1;
+ }
+}
+
+static u8 rtw89_acpi_sar_rfpath_to_rt_antidx(enum rtw89_rf_path rfpath)
+{
+ switch (rfpath) {
+ default:
+ case RF_PATH_A:
+ return 0;
+ case RF_PATH_B:
+ return 1;
+ }
+}
+
+static s16 rtw89_acpi_sar_normalize_hp_val(u8 v)
+{
+ static const u8 bias = 10;
+ static const u8 fct = 1;
+ u16 res;
+
+ BUILD_BUG_ON(fct > TXPWR_FACTOR_OF_RTW89_ACPI_SAR);
+
+ res = (bias << TXPWR_FACTOR_OF_RTW89_ACPI_SAR) +
+ (v << (TXPWR_FACTOR_OF_RTW89_ACPI_SAR - fct));
+
+ return min_t(s32, res, MAX_VAL_OF_RTW89_ACPI_SAR);
+}
+
+static s16 rtw89_acpi_sar_normalize_rt_val(u8 v)
+{
+ static const u8 fct = 3;
+ u16 res;
+
+ BUILD_BUG_ON(fct > TXPWR_FACTOR_OF_RTW89_ACPI_SAR);
+
+ res = v << (TXPWR_FACTOR_OF_RTW89_ACPI_SAR - fct);
+
+ return min_t(s32, res, MAX_VAL_OF_RTW89_ACPI_SAR);
+}
+
+static
+void rtw89_acpi_sar_load_std_legacy(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_sar_recognition *rec,
+ const void *content,
+ struct rtw89_sar_entry_from_acpi *ent)
+{
+ const struct rtw89_acpi_sar_std_legacy *ptr = content;
+ enum rtw89_acpi_sar_subband subband;
+ enum rtw89_rf_path path;
+
+ for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) {
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) {
+ u8 antidx = rec->rfpath_to_antidx(path);
+
+ if (subband < RTW89_ACPI_SAR_SUBBAND_NR_LEGACY)
+ ent->v[subband][path] =
+ rec->normalize(ptr->v[antidx][subband]);
+ else
+ ent->v[subband][path] = MAX_VAL_OF_RTW89_ACPI_SAR;
+ }
+ }
+}
+
+static
+void rtw89_acpi_sar_load_std_has_6ghz(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_sar_recognition *rec,
+ const void *content,
+ struct rtw89_sar_entry_from_acpi *ent)
+{
+ const struct rtw89_acpi_sar_std_has_6ghz *ptr = content;
+ enum rtw89_acpi_sar_subband subband;
+ enum rtw89_rf_path path;
+
+ BUILD_BUG_ON(RTW89_ACPI_SAR_SUBBAND_NR_HAS_6GHZ != NUM_OF_RTW89_ACPI_SAR_SUBBAND);
+
+ for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) {
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) {
+ u8 antidx = rec->rfpath_to_antidx(path);
+
+ ent->v[subband][path] = rec->normalize(ptr->v[antidx][subband]);
+ }
+ }
+}
+
+static
+void rtw89_acpi_sar_load_sml_legacy(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_sar_recognition *rec,
+ const void *content,
+ struct rtw89_sar_entry_from_acpi *ent)
+{
+ const struct rtw89_acpi_sar_sml_legacy *ptr = content;
+ enum rtw89_acpi_sar_subband subband;
+ enum rtw89_rf_path path;
+
+ for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) {
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) {
+ u8 antidx = rec->rfpath_to_antidx(path);
+
+ if (subband < RTW89_ACPI_SAR_SUBBAND_NR_LEGACY)
+ ent->v[subband][path] =
+ rec->normalize(ptr->v[antidx][subband]);
+ else
+ ent->v[subband][path] = MAX_VAL_OF_RTW89_ACPI_SAR;
+ }
+ }
+}
+
+static
+void rtw89_acpi_sar_load_sml_has_6ghz(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_sar_recognition *rec,
+ const void *content,
+ struct rtw89_sar_entry_from_acpi *ent)
+{
+ const struct rtw89_acpi_sar_sml_has_6ghz *ptr = content;
+ enum rtw89_acpi_sar_subband subband;
+ enum rtw89_rf_path path;
+
+ BUILD_BUG_ON(RTW89_ACPI_SAR_SUBBAND_NR_HAS_6GHZ != NUM_OF_RTW89_ACPI_SAR_SUBBAND);
+
+ for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) {
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) {
+ u8 antidx = rec->rfpath_to_antidx(path);
+
+ ent->v[subband][path] = rec->normalize(ptr->v[antidx][subband]);
+ }
+ }
+}
+
+static s16 rtw89_acpi_geo_sar_normalize_delta(s8 delta)
+{
+ static const u8 fct = 1;
+
+ BUILD_BUG_ON(fct > TXPWR_FACTOR_OF_RTW89_ACPI_SAR);
+
+ return delta << (TXPWR_FACTOR_OF_RTW89_ACPI_SAR - fct);
+}
+
+static enum rtw89_acpi_geo_sar_regd_hp
+rtw89_acpi_geo_sar_regd_convert_hp_idx(enum rtw89_regulation_type regd)
+{
+ switch (regd) {
+ case RTW89_FCC:
+ case RTW89_IC:
+ case RTW89_NCC:
+ case RTW89_CHILE:
+ case RTW89_MEXICO:
+ return RTW89_ACPI_GEO_SAR_REGD_HP_FCC;
+ case RTW89_ETSI:
+ case RTW89_MKK:
+ case RTW89_ACMA:
+ return RTW89_ACPI_GEO_SAR_REGD_HP_ETSI;
+ default:
+ case RTW89_WW:
+ case RTW89_NA:
+ case RTW89_KCC:
+ return RTW89_ACPI_GEO_SAR_REGD_HP_WW;
+ }
+}
+
+static enum rtw89_acpi_geo_sar_regd_rt
+rtw89_acpi_geo_sar_regd_convert_rt_idx(enum rtw89_regulation_type regd)
+{
+ switch (regd) {
+ case RTW89_FCC:
+ case RTW89_NCC:
+ case RTW89_CHILE:
+ case RTW89_MEXICO:
+ return RTW89_ACPI_GEO_SAR_REGD_RT_FCC;
+ case RTW89_ETSI:
+ case RTW89_ACMA:
+ return RTW89_ACPI_GEO_SAR_REGD_RT_ETSI;
+ case RTW89_MKK:
+ return RTW89_ACPI_GEO_SAR_REGD_RT_MKK;
+ case RTW89_IC:
+ return RTW89_ACPI_GEO_SAR_REGD_RT_IC;
+ case RTW89_KCC:
+ return RTW89_ACPI_GEO_SAR_REGD_RT_KCC;
+ default:
+ case RTW89_WW:
+ case RTW89_NA:
+ return RTW89_ACPI_GEO_SAR_REGD_RT_WW;
+ }
+}
+
+static
+void rtw89_acpi_geo_sar_load_by_hp(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_geo_sar_hp_val *ptr,
+ enum rtw89_rf_path path, s16 *val)
+{
+ u8 antidx = rtw89_acpi_sar_rfpath_to_hp_antidx(path);
+ s16 delta = rtw89_acpi_geo_sar_normalize_delta(ptr->delta[antidx]);
+ s16 max = rtw89_acpi_sar_normalize_hp_val(ptr->max);
+
+ *val = clamp_t(s32, (*val) + delta, MIN_VAL_OF_RTW89_ACPI_SAR, max);
+}
+
+static
+void rtw89_acpi_geo_sar_load_by_rt(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_geo_sar_rt_val *ptr,
+ s16 *val)
+{
+ s16 delta = rtw89_acpi_geo_sar_normalize_delta(ptr->delta);
+ s16 max = rtw89_acpi_sar_normalize_rt_val(ptr->max);
+
+ *val = clamp_t(s32, (*val) + delta, MIN_VAL_OF_RTW89_ACPI_SAR, max);
+}
+
+static
+void rtw89_acpi_geo_sar_load_hp_legacy(struct rtw89_dev *rtwdev,
+ const void *content,
+ enum rtw89_regulation_type regd,
+ struct rtw89_sar_entry_from_acpi *ent)
+{
+ const struct rtw89_acpi_geo_sar_hp_legacy *ptr = content;
+ const struct rtw89_acpi_geo_sar_hp_legacy_entry *ptr_ent;
+ const struct rtw89_acpi_geo_sar_hp_val *ptr_ent_val;
+ enum rtw89_acpi_geo_sar_regd_hp geo_idx =
+ rtw89_acpi_geo_sar_regd_convert_hp_idx(regd);
+ enum rtw89_acpi_sar_subband subband;
+ enum rtw89_rf_path path;
+ enum rtw89_band band;
+
+ ptr_ent = &ptr->entries[geo_idx];
+
+ for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) {
+ band = rtw89_acpi_sar_subband_to_band(rtwdev, subband);
+ switch (band) {
+ case RTW89_BAND_2G:
+ ptr_ent_val = &ptr_ent->val_2ghz;
+ break;
+ case RTW89_BAND_5G:
+ ptr_ent_val = &ptr_ent->val_5ghz;
+ break;
+ default:
+ case RTW89_BAND_6G:
+ ptr_ent_val = NULL;
+ break;
+ }
+
+ if (!ptr_ent_val)
+ continue;
+
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++)
+ rtw89_acpi_geo_sar_load_by_hp(rtwdev, ptr_ent_val, path,
+ &ent->v[subband][path]);
+ }
+}
+
+static
+void rtw89_acpi_geo_sar_load_hp_has_6ghz(struct rtw89_dev *rtwdev,
+ const void *content,
+ enum rtw89_regulation_type regd,
+ struct rtw89_sar_entry_from_acpi *ent)
+{
+ const struct rtw89_acpi_geo_sar_hp_has_6ghz *ptr = content;
+ const struct rtw89_acpi_geo_sar_hp_has_6ghz_entry *ptr_ent;
+ const struct rtw89_acpi_geo_sar_hp_val *ptr_ent_val;
+ enum rtw89_acpi_geo_sar_regd_hp geo_idx =
+ rtw89_acpi_geo_sar_regd_convert_hp_idx(regd);
+ enum rtw89_acpi_sar_subband subband;
+ enum rtw89_rf_path path;
+ enum rtw89_band band;
+
+ ptr_ent = &ptr->entries[geo_idx];
+
+ for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) {
+ band = rtw89_acpi_sar_subband_to_band(rtwdev, subband);
+ switch (band) {
+ case RTW89_BAND_2G:
+ ptr_ent_val = &ptr_ent->val_2ghz;
+ break;
+ case RTW89_BAND_5G:
+ ptr_ent_val = &ptr_ent->val_5ghz;
+ break;
+ case RTW89_BAND_6G:
+ ptr_ent_val = &ptr_ent->val_6ghz;
+ break;
+ default:
+ ptr_ent_val = NULL;
+ break;
+ }
+
+ if (!ptr_ent_val)
+ continue;
+
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++)
+ rtw89_acpi_geo_sar_load_by_hp(rtwdev, ptr_ent_val, path,
+ &ent->v[subband][path]);
+ }
+}
+
+static
+void rtw89_acpi_geo_sar_load_rt_legacy(struct rtw89_dev *rtwdev,
+ const void *content,
+ enum rtw89_regulation_type regd,
+ struct rtw89_sar_entry_from_acpi *ent)
+{
+ const struct rtw89_acpi_geo_sar_rt_legacy *ptr = content;
+ const struct rtw89_acpi_geo_sar_rt_legacy_entry *ptr_ent;
+ const struct rtw89_acpi_geo_sar_rt_val *ptr_ent_val;
+ enum rtw89_acpi_geo_sar_regd_rt geo_idx =
+ rtw89_acpi_geo_sar_regd_convert_rt_idx(regd);
+ enum rtw89_acpi_sar_subband subband;
+ enum rtw89_rf_path path;
+ enum rtw89_band band;
+
+ ptr_ent = &ptr->entries[geo_idx];
+
+ for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) {
+ band = rtw89_acpi_sar_subband_to_band(rtwdev, subband);
+ switch (band) {
+ case RTW89_BAND_2G:
+ ptr_ent_val = &ptr_ent->val_2ghz;
+ break;
+ case RTW89_BAND_5G:
+ ptr_ent_val = &ptr_ent->val_5ghz;
+ break;
+ default:
+ case RTW89_BAND_6G:
+ ptr_ent_val = NULL;
+ break;
+ }
+
+ if (!ptr_ent_val)
+ continue;
+
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++)
+ rtw89_acpi_geo_sar_load_by_rt(rtwdev, ptr_ent_val,
+ &ent->v[subband][path]);
+ }
+}
+
+static
+void rtw89_acpi_geo_sar_load_rt_has_6ghz(struct rtw89_dev *rtwdev,
+ const void *content,
+ enum rtw89_regulation_type regd,
+ struct rtw89_sar_entry_from_acpi *ent)
+{
+ const struct rtw89_acpi_geo_sar_rt_has_6ghz *ptr = content;
+ const struct rtw89_acpi_geo_sar_rt_has_6ghz_entry *ptr_ent;
+ const struct rtw89_acpi_geo_sar_rt_val *ptr_ent_val;
+ enum rtw89_acpi_geo_sar_regd_rt geo_idx =
+ rtw89_acpi_geo_sar_regd_convert_rt_idx(regd);
+ enum rtw89_acpi_sar_subband subband;
+ enum rtw89_rf_path path;
+ enum rtw89_band band;
+
+ ptr_ent = &ptr->entries[geo_idx];
+
+ for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) {
+ band = rtw89_acpi_sar_subband_to_band(rtwdev, subband);
+ switch (band) {
+ case RTW89_BAND_2G:
+ ptr_ent_val = &ptr_ent->val_2ghz;
+ break;
+ case RTW89_BAND_5G:
+ ptr_ent_val = &ptr_ent->val_5ghz;
+ break;
+ case RTW89_BAND_6G:
+ ptr_ent_val = &ptr_ent->val_6ghz;
+ break;
+ default:
+ ptr_ent_val = NULL;
+ break;
+ }
+
+ if (!ptr_ent_val)
+ continue;
+
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++)
+ rtw89_acpi_geo_sar_load_by_rt(rtwdev, ptr_ent_val,
+ &ent->v[subband][path]);
+ }
+}
+
+#define RTW89_ACPI_GEO_SAR_DECL_HANDLER(type) \
+static const struct rtw89_acpi_geo_sar_handler \
+rtw89_acpi_geo_sar_handler_ ## type = { \
+ .data_size = RTW89_ACPI_GEO_SAR_SIZE_OF(type), \
+ .load = rtw89_acpi_geo_sar_load_ ## type, \
+}
+
+RTW89_ACPI_GEO_SAR_DECL_HANDLER(hp_legacy);
+RTW89_ACPI_GEO_SAR_DECL_HANDLER(hp_has_6ghz);
+RTW89_ACPI_GEO_SAR_DECL_HANDLER(rt_legacy);
+RTW89_ACPI_GEO_SAR_DECL_HANDLER(rt_has_6ghz);
+
+static const struct rtw89_acpi_sar_recognition rtw89_acpi_sar_recs[] = {
+ {
+ .id = {
+ .cid = RTW89_ACPI_SAR_CID_HP,
+ .rev = RTW89_ACPI_SAR_REV_LEGACY,
+ .size = RTW89_ACPI_SAR_SIZE_OF(std_legacy),
+ },
+ .geo = &rtw89_acpi_geo_sar_handler_hp_legacy,
+
+ .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_hp_antidx,
+ .normalize = rtw89_acpi_sar_normalize_hp_val,
+ .load = rtw89_acpi_sar_load_std_legacy,
+ },
+ {
+ .id = {
+ .cid = RTW89_ACPI_SAR_CID_HP,
+ .rev = RTW89_ACPI_SAR_REV_HAS_6GHZ,
+ .size = RTW89_ACPI_SAR_SIZE_OF(std_has_6ghz),
+ },
+ .geo = &rtw89_acpi_geo_sar_handler_hp_has_6ghz,
+
+ .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_hp_antidx,
+ .normalize = rtw89_acpi_sar_normalize_hp_val,
+ .load = rtw89_acpi_sar_load_std_has_6ghz,
+ },
+ {
+ .id = {
+ .cid = RTW89_ACPI_SAR_CID_RT,
+ .rev = RTW89_ACPI_SAR_REV_LEGACY,
+ .size = RTW89_ACPI_SAR_SIZE_OF(std_legacy),
+ },
+ .geo = &rtw89_acpi_geo_sar_handler_rt_legacy,
+
+ .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_rt_antidx,
+ .normalize = rtw89_acpi_sar_normalize_rt_val,
+ .load = rtw89_acpi_sar_load_std_legacy,
+ },
+ {
+ .id = {
+ .cid = RTW89_ACPI_SAR_CID_RT,
+ .rev = RTW89_ACPI_SAR_REV_HAS_6GHZ,
+ .size = RTW89_ACPI_SAR_SIZE_OF(std_has_6ghz),
+ },
+ .geo = &rtw89_acpi_geo_sar_handler_rt_has_6ghz,
+
+ .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_rt_antidx,
+ .normalize = rtw89_acpi_sar_normalize_rt_val,
+ .load = rtw89_acpi_sar_load_std_has_6ghz,
+ },
+ {
+ .id = {
+ .cid = RTW89_ACPI_SAR_CID_RT,
+ .rev = RTW89_ACPI_SAR_REV_LEGACY,
+ .size = RTW89_ACPI_SAR_SIZE_OF(sml_legacy),
+ },
+ .geo = &rtw89_acpi_geo_sar_handler_rt_legacy,
+
+ .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_rt_antidx,
+ .normalize = rtw89_acpi_sar_normalize_rt_val,
+ .load = rtw89_acpi_sar_load_sml_legacy,
+ },
+ {
+ .id = {
+ .cid = RTW89_ACPI_SAR_CID_RT,
+ .rev = RTW89_ACPI_SAR_REV_HAS_6GHZ,
+ .size = RTW89_ACPI_SAR_SIZE_OF(sml_has_6ghz),
+ },
+ .geo = &rtw89_acpi_geo_sar_handler_rt_has_6ghz,
+
+ .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_rt_antidx,
+ .normalize = rtw89_acpi_sar_normalize_rt_val,
+ .load = rtw89_acpi_sar_load_sml_has_6ghz,
+ },
+};
+
+struct rtw89_acpi_sar_rec_parm {
+ u32 pld_len;
+ u8 tbl_cnt;
+ u16 cid;
+ u8 rev;
+};
+
+static const struct rtw89_acpi_sar_recognition *
+rtw89_acpi_sar_recognize(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_sar_rec_parm *parm)
+{
+ const u32 tbl_len = parm->pld_len / parm->tbl_cnt;
+ const struct rtw89_acpi_sar_recognition *rec;
+ struct rtw89_acpi_sar_identifier id = {};
+
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "%s: cid %u, rev %u, tbl len %u, tbl cnt %u\n",
+ __func__, parm->cid, parm->rev, tbl_len, parm->tbl_cnt);
+
+ if (unlikely(parm->pld_len % parm->tbl_cnt)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid pld len %u\n",
+ parm->pld_len);
+ return NULL;
+ }
+
+ if (unlikely(tbl_len > RTW89_ACPI_SAR_SIZE_MAX)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid tbl len %u\n",
+ tbl_len);
+ return NULL;
+ }
+
+ if (unlikely(parm->tbl_cnt > MAX_NUM_OF_RTW89_ACPI_SAR_TBL)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid tbl cnt %u\n",
+ parm->tbl_cnt);
+ return NULL;
+ }
+
+ switch (parm->cid) {
+ case RTW89_ACPI_SAR_CID_HP:
+ case RTW89_ACPI_SAR_CID_RT:
+ id.cid = parm->cid;
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid cid 0x%x\n",
+ parm->cid);
+ return NULL;
+ }
+
+ switch (parm->rev) {
+ case RTW89_ACPI_SAR_REV_LEGACY:
+ case RTW89_ACPI_SAR_REV_HAS_6GHZ:
+ id.rev = parm->rev;
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid rev %u\n",
+ parm->rev);
+ return NULL;
+ }
+
+ id.size = tbl_len;
+ for (unsigned int i = 0; i < ARRAY_SIZE(rtw89_acpi_sar_recs); i++) {
+ rec = &rtw89_acpi_sar_recs[i];
+ if (memcmp(&rec->id, &id, sizeof(rec->id)) == 0)
+ return rec;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "failed to recognize\n");
+ return NULL;
+}
+
+static const struct rtw89_acpi_sar_recognition *
+rtw89_acpi_evaluate_static_sar(struct rtw89_dev *rtwdev,
+ struct rtw89_sar_cfg_acpi *cfg)
+{
+ const struct rtw89_acpi_sar_recognition *rec = NULL;
+ const struct rtw89_acpi_static_sar_hdr *hdr;
+ struct rtw89_sar_entry_from_acpi tmp = {};
+ struct rtw89_acpi_sar_rec_parm parm = {};
+ struct rtw89_sar_table_from_acpi *tbl;
+ const struct rtw89_acpi_data *data;
+ u32 len;
+
+ data = rtw89_acpi_evaluate_method(rtwdev, RTW89_ACPI_METHOD_STATIC_SAR);
+ if (!data)
+ return NULL;
+
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "acpi load static sar\n");
+
+ len = data->len;
+ if (len <= sizeof(*hdr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid buf len %u\n", len);
goto out;
}
- buf_len = obj->buffer.length;
- if (buf_len != sizeof(*res)) {
- rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n",
- __func__, buf_len);
+ hdr = (typeof(hdr))data->buf;
+
+ parm.cid = le16_to_cpu(hdr->cid);
+ parm.rev = hdr->rev;
+ parm.tbl_cnt = 1;
+ parm.pld_len = len - sizeof(*hdr);
+
+ rec = rtw89_acpi_sar_recognize(rtwdev, &parm);
+ if (!rec)
+ goto out;
+
+ rec->load(rtwdev, rec, hdr->content, &tmp);
+
+ tbl = &cfg->tables[0];
+ for (u8 regd = 0; regd < RTW89_REGD_NUM; regd++)
+ tbl->entries[regd] = tmp;
+
+ cfg->valid_num = 1;
+
+out:
+ kfree(data);
+ return rec;
+}
+
+static const struct rtw89_acpi_sar_recognition *
+rtw89_acpi_evaluate_dynamic_sar(struct rtw89_dev *rtwdev,
+ struct rtw89_sar_cfg_acpi *cfg)
+{
+ const struct rtw89_acpi_sar_recognition *rec = NULL;
+ const struct rtw89_acpi_dynamic_sar_hdr *hdr;
+ struct rtw89_acpi_sar_rec_parm parm = {};
+ struct rtw89_sar_table_from_acpi *tbl;
+ const struct rtw89_acpi_data *data;
+ u32 len;
+
+ data = rtw89_acpi_evaluate_method(rtwdev, RTW89_ACPI_METHOD_DYNAMIC_SAR);
+ if (!data)
+ return NULL;
+
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "acpi load dynamic sar\n");
+
+ len = data->len;
+ if (len <= sizeof(*hdr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid buf len %u\n", len);
+ goto out;
+ }
+
+ hdr = (typeof(hdr))data->buf;
+
+ parm.cid = le16_to_cpu(hdr->cid);
+ parm.rev = hdr->rev;
+ parm.tbl_cnt = hdr->cnt;
+ parm.pld_len = len - sizeof(*hdr);
+
+ rec = rtw89_acpi_sar_recognize(rtwdev, &parm);
+ if (!rec)
+ goto out;
+
+ for (unsigned int i = 0; i < hdr->cnt; i++) {
+ const u8 *content = hdr->content + rec->id.size * i;
+ struct rtw89_sar_entry_from_acpi tmp = {};
+
+ rec->load(rtwdev, rec, content, &tmp);
+
+ tbl = &cfg->tables[i];
+ for (u8 regd = 0; regd < RTW89_REGD_NUM; regd++)
+ tbl->entries[regd] = tmp;
+ }
+
+ cfg->valid_num = hdr->cnt;
+
+out:
+ kfree(data);
+ return rec;
+}
+
+int rtw89_acpi_evaluate_dynamic_sar_indicator(struct rtw89_dev *rtwdev,
+ struct rtw89_sar_cfg_acpi *cfg,
+ bool *poll_changed)
+{
+ struct rtw89_sar_indicator_from_acpi *ind = &cfg->indicator;
+ struct rtw89_sar_indicator_from_acpi tmp = *ind;
+ const struct rtw89_acpi_data *data;
+ const u8 *tbl_base1_by_ant;
+ enum rtw89_rf_path path;
+ int ret = 0;
+ u32 len;
+
+ data = rtw89_acpi_evaluate_method(rtwdev, RTW89_ACPI_METHOD_DYNAMIC_SAR_INDICATOR);
+ if (!data)
+ return -EFAULT;
+
+ if (!poll_changed)
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "acpi load dynamic sar indicator\n");
+
+ len = data->len;
+ if (len != ind->fields) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid buf len %u\n", len);
ret = -EINVAL;
goto out;
}
- *res = *(struct rtw89_acpi_rtag_result *)obj->buffer.pointer;
+ tbl_base1_by_ant = data->buf;
- rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "antenna_gain: ", res, sizeof(*res));
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) {
+ u8 antidx = ind->rfpath_to_antidx(path);
+ u8 sel;
+
+ if (antidx >= ind->fields)
+ antidx = 0;
+
+ /* convert the table index from 1-based to 0-based */
+ sel = tbl_base1_by_ant[antidx] - 1;
+ if (sel >= cfg->valid_num)
+ sel = 0;
+
+ tmp.tblsel[path] = sel;
+ }
+
+ if (memcmp(ind, &tmp, sizeof(*ind)) == 0) {
+ if (poll_changed)
+ *poll_changed = false;
+ } else {
+ if (poll_changed)
+ *poll_changed = true;
+
+ *ind = tmp;
+ }
out:
- ACPI_FREE(obj);
+ kfree(data);
return ret;
}
+
+static
+void rtw89_acpi_evaluate_geo_sar(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_geo_sar_handler *hdl,
+ struct rtw89_sar_cfg_acpi *cfg)
+{
+ const struct rtw89_acpi_data *data;
+ u32 len;
+
+ data = rtw89_acpi_evaluate_method(rtwdev, RTW89_ACPI_METHOD_GEO_SAR);
+ if (!data)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "acpi load geo sar\n");
+
+ len = data->len;
+ if (len != hdl->data_size) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid buf len %u (expected %u)\n",
+ len, hdl->data_size);
+ goto out;
+ }
+
+ for (unsigned int i = 0; i < cfg->valid_num; i++)
+ for (u8 regd = 0; regd < RTW89_REGD_NUM; regd++)
+ hdl->load(rtwdev, data->buf, regd, &cfg->tables[i].entries[regd]);
+
+out:
+ kfree(data);
+}
+
+int rtw89_acpi_evaluate_sar(struct rtw89_dev *rtwdev,
+ struct rtw89_sar_cfg_acpi *cfg)
+{
+ struct rtw89_sar_indicator_from_acpi *ind = &cfg->indicator;
+ const struct rtw89_acpi_sar_recognition *rec;
+ bool fetch_indicator = false;
+ int ret;
+
+ rec = rtw89_acpi_evaluate_static_sar(rtwdev, cfg);
+ if (rec)
+ goto recognized;
+
+ rec = rtw89_acpi_evaluate_dynamic_sar(rtwdev, cfg);
+ if (!rec)
+ return -ENOENT;
+
+ fetch_indicator = true;
+
+recognized:
+ rtw89_acpi_evaluate_geo_sar(rtwdev, rec->geo, cfg);
+
+ switch (rec->id.cid) {
+ case RTW89_ACPI_SAR_CID_HP:
+ cfg->downgrade_2tx = 3 << TXPWR_FACTOR_OF_RTW89_ACPI_SAR;
+ ind->fields = RTW89_ACPI_SAR_ANT_NR_STD;
+ break;
+ case RTW89_ACPI_SAR_CID_RT:
+ cfg->downgrade_2tx = 0;
+ ind->fields = 1;
+ break;
+ default:
+ return -EFAULT;
+ }
+
+ if (fetch_indicator) {
+ ind->rfpath_to_antidx = rec->rfpath_to_antidx;
+ ret = rtw89_acpi_evaluate_dynamic_sar_indicator(rtwdev, cfg, NULL);
+ if (ret)
+ fetch_indicator = false;
+ }
+
+ if (!fetch_indicator)
+ memset(ind->tblsel, 0, sizeof(ind->tblsel));
+
+ ind->enable_sync = fetch_indicator;
+ return 0;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.h b/drivers/net/wireless/realtek/rtw89/acpi.h
index b43ab106e44d..8c918ee02d2e 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.h
+++ b/drivers/net/wireless/realtek/rtw89/acpi.h
@@ -7,6 +7,11 @@
#include "core.h"
+struct rtw89_acpi_data {
+ u32 len;
+ u8 buf[] __counted_by(len);
+};
+
enum rtw89_acpi_dsm_func {
RTW89_ACPI_DSM_FUNC_IDN_BAND_SUP = 2,
RTW89_ACPI_DSM_FUNC_6G_DIS = 3,
@@ -26,6 +31,13 @@ enum rtw89_acpi_policy_mode {
RTW89_ACPI_POLICY_ALLOW = 1,
};
+enum rtw89_acpi_conf_tas {
+ RTW89_ACPI_CONF_TAS_US = BIT(0),
+ RTW89_ACPI_CONF_TAS_CA = BIT(1),
+ RTW89_ACPI_CONF_TAS_KR = BIT(2),
+ RTW89_ACPI_CONF_TAS_OTHERS = BIT(7),
+};
+
struct rtw89_acpi_country_code {
/* below are allowed:
* * ISO alpha2 country code
@@ -54,12 +66,21 @@ struct rtw89_acpi_policy_6ghz_sp {
u8 rsvd;
} __packed;
+struct rtw89_acpi_policy_tas {
+ u8 signature[4];
+ u8 revision;
+ u8 enable;
+ u8 enabled_countries;
+ u8 rsvd[3];
+} __packed;
+
struct rtw89_acpi_dsm_result {
union {
u8 value;
/* caller needs to free it after using */
struct rtw89_acpi_policy_6ghz *policy_6ghz;
struct rtw89_acpi_policy_6ghz_sp *policy_6ghz_sp;
+ struct rtw89_acpi_policy_tas *policy_tas;
} u;
};
@@ -70,10 +91,179 @@ struct rtw89_acpi_rtag_result {
u8 ant_gain_table[RTW89_ANT_GAIN_CHAIN_NUM][RTW89_ANT_GAIN_SUBBAND_NR];
} __packed;
+enum rtw89_acpi_sar_cid {
+ RTW89_ACPI_SAR_CID_HP = 0x5048,
+ RTW89_ACPI_SAR_CID_RT = 0x5452,
+};
+
+enum rtw89_acpi_sar_rev {
+ RTW89_ACPI_SAR_REV_LEGACY = 1,
+ RTW89_ACPI_SAR_REV_HAS_6GHZ = 2,
+};
+
+#define RTW89_ACPI_SAR_ANT_NR_STD 4
+#define RTW89_ACPI_SAR_ANT_NR_SML 2
+
+#define RTW89_ACPI_METHOD_STATIC_SAR "WRDS"
+#define RTW89_ACPI_METHOD_DYNAMIC_SAR "RWRD"
+#define RTW89_ACPI_METHOD_DYNAMIC_SAR_INDICATOR "RWSI"
+#define RTW89_ACPI_METHOD_GEO_SAR "RWGS"
+
+struct rtw89_acpi_sar_std_legacy {
+ u8 v[RTW89_ACPI_SAR_ANT_NR_STD][RTW89_ACPI_SAR_SUBBAND_NR_LEGACY];
+} __packed;
+
+struct rtw89_acpi_sar_std_has_6ghz {
+ u8 v[RTW89_ACPI_SAR_ANT_NR_STD][RTW89_ACPI_SAR_SUBBAND_NR_HAS_6GHZ];
+} __packed;
+
+struct rtw89_acpi_sar_sml_legacy {
+ u8 v[RTW89_ACPI_SAR_ANT_NR_SML][RTW89_ACPI_SAR_SUBBAND_NR_LEGACY];
+} __packed;
+
+struct rtw89_acpi_sar_sml_has_6ghz {
+ u8 v[RTW89_ACPI_SAR_ANT_NR_SML][RTW89_ACPI_SAR_SUBBAND_NR_HAS_6GHZ];
+} __packed;
+
+struct rtw89_acpi_static_sar_hdr {
+ __le16 cid;
+ u8 rev;
+ u8 content[];
+} __packed;
+
+struct rtw89_acpi_dynamic_sar_hdr {
+ __le16 cid;
+ u8 rev;
+ u8 cnt;
+ u8 content[];
+} __packed;
+
+struct rtw89_acpi_sar_identifier {
+ enum rtw89_acpi_sar_cid cid;
+ enum rtw89_acpi_sar_rev rev;
+ u8 size;
+};
+
+/* for rtw89_acpi_sar_identifier::size */
+#define RTW89_ACPI_SAR_SIZE_MAX U8_MAX
+#define RTW89_ACPI_SAR_SIZE_OF(type) \
+ (BUILD_BUG_ON_ZERO(sizeof(struct rtw89_acpi_sar_ ## type) > \
+ RTW89_ACPI_SAR_SIZE_MAX) + \
+ sizeof(struct rtw89_acpi_sar_ ## type))
+
+struct rtw89_acpi_sar_recognition {
+ struct rtw89_acpi_sar_identifier id;
+ const struct rtw89_acpi_geo_sar_handler *geo;
+
+ u8 (*rfpath_to_antidx)(enum rtw89_rf_path rfpath);
+ s16 (*normalize)(u8 v);
+ void (*load)(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_sar_recognition *rec,
+ const void *content,
+ struct rtw89_sar_entry_from_acpi *ent);
+};
+
+struct rtw89_acpi_geo_sar_hp_val {
+ u8 max;
+ s8 delta[RTW89_ACPI_SAR_ANT_NR_STD];
+} __packed;
+
+struct rtw89_acpi_geo_sar_hp_legacy_entry {
+ struct rtw89_acpi_geo_sar_hp_val val_2ghz;
+ struct rtw89_acpi_geo_sar_hp_val val_5ghz;
+} __packed;
+
+struct rtw89_acpi_geo_sar_hp_has_6ghz_entry {
+ struct rtw89_acpi_geo_sar_hp_val val_2ghz;
+ struct rtw89_acpi_geo_sar_hp_val val_5ghz;
+ struct rtw89_acpi_geo_sar_hp_val val_6ghz;
+} __packed;
+
+enum rtw89_acpi_geo_sar_regd_hp {
+ RTW89_ACPI_GEO_SAR_REGD_HP_FCC = 0,
+ RTW89_ACPI_GEO_SAR_REGD_HP_ETSI = 1,
+ RTW89_ACPI_GEO_SAR_REGD_HP_WW = 2,
+
+ RTW89_ACPI_GEO_SAR_REGD_NR_HP,
+};
+
+struct rtw89_acpi_geo_sar_hp_legacy {
+ struct rtw89_acpi_geo_sar_hp_legacy_entry
+ entries[RTW89_ACPI_GEO_SAR_REGD_NR_HP];
+} __packed;
+
+struct rtw89_acpi_geo_sar_hp_has_6ghz {
+ struct rtw89_acpi_geo_sar_hp_has_6ghz_entry
+ entries[RTW89_ACPI_GEO_SAR_REGD_NR_HP];
+} __packed;
+
+struct rtw89_acpi_geo_sar_rt_val {
+ u8 max;
+ s8 delta;
+} __packed;
+
+struct rtw89_acpi_geo_sar_rt_legacy_entry {
+ struct rtw89_acpi_geo_sar_rt_val val_2ghz;
+ struct rtw89_acpi_geo_sar_rt_val val_5ghz;
+} __packed;
+
+struct rtw89_acpi_geo_sar_rt_has_6ghz_entry {
+ struct rtw89_acpi_geo_sar_rt_val val_2ghz;
+ struct rtw89_acpi_geo_sar_rt_val val_5ghz;
+ struct rtw89_acpi_geo_sar_rt_val val_6ghz;
+} __packed;
+
+enum rtw89_acpi_geo_sar_regd_rt {
+ RTW89_ACPI_GEO_SAR_REGD_RT_FCC = 0,
+ RTW89_ACPI_GEO_SAR_REGD_RT_ETSI = 1,
+ RTW89_ACPI_GEO_SAR_REGD_RT_MKK = 2,
+ RTW89_ACPI_GEO_SAR_REGD_RT_IC = 3,
+ RTW89_ACPI_GEO_SAR_REGD_RT_KCC = 4,
+ RTW89_ACPI_GEO_SAR_REGD_RT_WW = 5,
+
+ RTW89_ACPI_GEO_SAR_REGD_NR_RT,
+};
+
+struct rtw89_acpi_geo_sar_rt_legacy {
+ struct rtw89_acpi_geo_sar_rt_legacy_entry
+ entries[RTW89_ACPI_GEO_SAR_REGD_NR_RT];
+} __packed;
+
+struct rtw89_acpi_geo_sar_rt_has_6ghz {
+ struct rtw89_acpi_geo_sar_rt_has_6ghz_entry
+ entries[RTW89_ACPI_GEO_SAR_REGD_NR_RT];
+} __packed;
+
+struct rtw89_acpi_geo_sar_handler {
+ u8 data_size;
+
+ void (*load)(struct rtw89_dev *rtwdev,
+ const void *content,
+ enum rtw89_regulation_type regd,
+ struct rtw89_sar_entry_from_acpi *ent);
+};
+
+/* for rtw89_acpi_geo_sar_handler::data_size */
+#define RTW89_ACPI_GEO_SAR_SIZE_MAX U8_MAX
+#define RTW89_ACPI_GEO_SAR_SIZE_OF(type) \
+ (BUILD_BUG_ON_ZERO(sizeof(struct rtw89_acpi_geo_sar_ ## type) > \
+ RTW89_ACPI_GEO_SAR_SIZE_MAX) + \
+ sizeof(struct rtw89_acpi_geo_sar_ ## type))
+
+enum rtw89_acpi_sar_subband rtw89_acpi_sar_get_subband(struct rtw89_dev *rtwdev,
+ u32 center_freq);
+enum rtw89_band rtw89_acpi_sar_subband_to_band(struct rtw89_dev *rtwdev,
+ enum rtw89_acpi_sar_subband subband);
+
int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
enum rtw89_acpi_dsm_func func,
struct rtw89_acpi_dsm_result *res);
int rtw89_acpi_evaluate_rtag(struct rtw89_dev *rtwdev,
struct rtw89_acpi_rtag_result *res);
+int rtw89_acpi_evaluate_sar(struct rtw89_dev *rtwdev,
+ struct rtw89_sar_cfg_acpi *cfg);
+int rtw89_acpi_evaluate_dynamic_sar_indicator(struct rtw89_dev *rtwdev,
+ struct rtw89_sar_cfg_acpi *cfg,
+ bool *changed);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index eca3d767ff60..385a238fe5cc 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -6,6 +6,7 @@
#include "debug.h"
#include "fw.h"
#include "mac.h"
+#include "ps.h"
static struct sk_buff *
rtw89_cam_get_sec_key_cmd(struct rtw89_dev *rtwdev,
@@ -469,11 +470,17 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
bool ext_key = false;
int ret;
+ if (ieee80211_vif_is_mld(vif) && !chip->hw_mlo_bmc_crypto &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
+ rtw89_leave_ips_by_hwflags(rtwdev);
hw_key_type = RTW89_SEC_KEY_TYPE_WEP40;
break;
case WLAN_CIPHER_SUITE_WEP104:
+ rtw89_leave_ips_by_hwflags(rtwdev);
hw_key_type = RTW89_SEC_KEY_TYPE_WEP104;
break;
case WLAN_CIPHER_SUITE_TKIP:
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index f60e93870b09..806f42429a29 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -189,9 +189,10 @@ void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
}
void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
- enum rtw89_chanctx_idx idx,
+ struct rtw89_vif_link *rtwvif_link,
const struct cfg80211_chan_def *chandef)
{
+ enum rtw89_chanctx_idx idx = rtwvif_link->chanctx_idx;
struct rtw89_hal *hal = &rtwdev->hal;
enum rtw89_chanctx_idx cur;
@@ -205,6 +206,7 @@ void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
}
hal->roc_chandef = *chandef;
+ hal->roc_link_index = rtw89_vif_link_inst_get_index(rtwvif_link);
} else {
cur = atomic_cmpxchg(&hal->roc_chanctx_idx, idx,
RTW89_CHANCTX_IDLE);
@@ -339,11 +341,10 @@ const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
roc_idx = atomic_read(&hal->roc_chanctx_idx);
if (roc_idx != RTW89_CHANCTX_IDLE) {
- /* ROC is ongoing (given ROC runs on RTW89_ROC_BY_LINK_INDEX).
- * If @link_index is the same as RTW89_ROC_BY_LINK_INDEX, get
- * the ongoing ROC chanctx.
+ /* ROC is ongoing (given ROC runs on @hal->roc_link_index).
+ * If @link_index is the same, get the ongoing ROC chanctx.
*/
- if (link_index == RTW89_ROC_BY_LINK_INDEX)
+ if (link_index == hal->roc_link_index)
chanctx_idx = roc_idx;
}
@@ -358,12 +359,41 @@ dflt:
}
EXPORT_SYMBOL(__rtw89_mgnt_chan_get);
+static enum rtw89_mlo_dbcc_mode
+rtw89_entity_sel_mlo_dbcc_mode(struct rtw89_dev *rtwdev, u8 active_hws)
+{
+ if (rtwdev->chip->chip_gen != RTW89_CHIP_BE)
+ return MLO_DBCC_NOT_SUPPORT;
+
+ switch (active_hws) {
+ case BIT(0):
+ return MLO_2_PLUS_0_1RF;
+ case BIT(1):
+ return MLO_0_PLUS_2_1RF;
+ case BIT(0) | BIT(1):
+ default:
+ return MLO_1_PLUS_1_1RF;
+ }
+}
+
+static
+void rtw89_entity_recalc_mlo_dbcc_mode(struct rtw89_dev *rtwdev, u8 active_hws)
+{
+ enum rtw89_mlo_dbcc_mode mode;
+
+ mode = rtw89_entity_sel_mlo_dbcc_mode(rtwdev, active_hws);
+ rtwdev->mlo_dbcc_mode = mode;
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "recalc mlo dbcc mode to %d\n", mode);
+}
+
static void rtw89_entity_recalc_mgnt_roles(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt;
struct rtw89_vif_link *link;
struct rtw89_vif *role;
+ u8 active_hws = 0;
u8 pos = 0;
int i, j;
@@ -412,10 +442,13 @@ fill:
continue;
mgnt->chanctx_tbl[pos][i] = link->chanctx_idx;
+ active_hws |= BIT(i);
}
mgnt->active_roles[pos++] = role;
}
+
+ rtw89_entity_recalc_mlo_dbcc_mode(rtwdev, active_hws);
}
enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
@@ -557,7 +590,9 @@ static u32 rtw89_mcc_get_tbtt_ofst(struct rtw89_dev *rtwdev,
u64 sync_tsf = READ_ONCE(rtwvif_link->sync_bcn_tsf);
u32 remainder;
- if (tsf < sync_tsf) {
+ if (role->is_go) {
+ sync_tsf = 0;
+ } else if (tsf < sync_tsf) {
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MCC get tbtt ofst: tsf might not update yet\n");
sync_tsf = 0;
@@ -691,19 +726,13 @@ static void rtw89_mcc_role_macid_sta_iter(void *data, struct ieee80211_sta *sta)
struct rtw89_vif *target = mcc_role->rtwvif_link->rtwvif;
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
struct rtw89_vif *rtwvif = rtwsta->rtwvif;
- struct rtw89_dev *rtwdev = rtwsta->rtwdev;
- struct rtw89_sta_link *rtwsta_link;
+ u8 macid;
if (rtwvif != target)
return;
- rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
- if (unlikely(!rtwsta_link)) {
- rtw89_err(rtwdev, "mcc sta macid: find no link on HW-0\n");
- return;
- }
-
- rtw89_mcc_role_fw_macid_bitmap_set_bit(mcc_role, rtwsta_link->mac_id);
+ macid = rtw89_sta_get_main_macid(rtwsta);
+ rtw89_mcc_role_fw_macid_bitmap_set_bit(mcc_role, macid);
}
static void rtw89_mcc_fill_role_macid_bitmap(struct rtw89_dev *rtwdev,
@@ -747,9 +776,11 @@ static void rtw89_mcc_fill_role_limit(struct rtw89_dev *rtwdev,
int ret;
int i;
- if (!mcc_role->is_go && !mcc_role->is_gc)
+ if (!mcc_role->is_gc)
return;
+ rtw89_p2p_noa_once_recalc(rtwvif_link);
+
rcu_read_lock();
bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
@@ -785,6 +816,9 @@ fill:
}
tsf_lmt = (tsf & GENMASK_ULL(63, 32)) | start_time;
+ if (tsf_lmt < tsf)
+ tsf_lmt += roundup_u64(tsf - tsf_lmt, interval);
+
max_toa_us = rtw89_mcc_get_tbtt_ofst(rtwdev, mcc_role, tsf_lmt);
max_dur_us = interval - duration;
max_tob_us = max_dur_us - max_toa_us;
@@ -929,6 +963,15 @@ static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev)
return 0;
}
+static bool rtw89_mcc_can_courtesy(const struct rtw89_mcc_role *provider,
+ const struct rtw89_mcc_role *receiver)
+{
+ if (provider->is_go || receiver->is_gc)
+ return false;
+
+ return true;
+}
+
static void rtw89_mcc_assign_pattern(struct rtw89_dev *rtwdev,
const struct rtw89_mcc_pattern *new)
{
@@ -937,37 +980,44 @@ static void rtw89_mcc_assign_pattern(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *aux = &mcc->role_aux;
struct rtw89_mcc_config *config = &mcc->config;
struct rtw89_mcc_pattern *pattern = &config->pattern;
+ struct rtw89_mcc_courtesy_cfg *crtz;
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MCC assign pattern: ref {%d | %d}, aux {%d | %d}\n",
new->tob_ref, new->toa_ref, new->tob_aux, new->toa_aux);
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC pattern plan: %d\n", new->plan);
+
*pattern = *new;
memset(&pattern->courtesy, 0, sizeof(pattern->courtesy));
- if (pattern->tob_aux <= 0 || pattern->toa_aux <= 0) {
- pattern->courtesy.macid_tgt = aux->rtwvif_link->mac_id;
- pattern->courtesy.macid_src = ref->rtwvif_link->mac_id;
- pattern->courtesy.slot_num = RTW89_MCC_DFLT_COURTESY_SLOT;
- pattern->courtesy.enable = true;
- } else if (pattern->tob_ref <= 0 || pattern->toa_ref <= 0) {
- pattern->courtesy.macid_tgt = ref->rtwvif_link->mac_id;
- pattern->courtesy.macid_src = aux->rtwvif_link->mac_id;
- pattern->courtesy.slot_num = RTW89_MCC_DFLT_COURTESY_SLOT;
- pattern->courtesy.enable = true;
+ if (RTW89_MCC_REQ_COURTESY(pattern, aux) && rtw89_mcc_can_courtesy(ref, aux)) {
+ crtz = &pattern->courtesy.ref;
+ ref->crtz = crtz;
+
+ crtz->macid_tgt = aux->rtwvif_link->mac_id;
+ crtz->slot_num = RTW89_MCC_DFLT_COURTESY_SLOT;
+
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC courtesy ref: tgt %d, slot %d\n",
+ crtz->macid_tgt, crtz->slot_num);
+ } else {
+ ref->crtz = NULL;
}
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC pattern flags: plan %d, courtesy_en %d\n",
- pattern->plan, pattern->courtesy.enable);
+ if (RTW89_MCC_REQ_COURTESY(pattern, ref) && rtw89_mcc_can_courtesy(aux, ref)) {
+ crtz = &pattern->courtesy.aux;
+ aux->crtz = crtz;
- if (!pattern->courtesy.enable)
- return;
+ crtz->macid_tgt = ref->rtwvif_link->mac_id;
+ crtz->slot_num = RTW89_MCC_DFLT_COURTESY_SLOT;
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC pattern courtesy: tgt %d, src %d, slot %d\n",
- pattern->courtesy.macid_tgt, pattern->courtesy.macid_src,
- pattern->courtesy.slot_num);
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC courtesy aux: tgt %d, slot %d\n",
+ crtz->macid_tgt, crtz->slot_num);
+ } else {
+ aux->crtz = NULL;
+ }
}
/* The follow-up roughly shows the relationship between the parameters
@@ -992,6 +1042,7 @@ static void __rtw89_mcc_calc_pattern_loose(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *ref = &mcc->role_ref;
struct rtw89_mcc_role *aux = &mcc->role_aux;
struct rtw89_mcc_config *config = &mcc->config;
+ u16 mcc_intvl = config->mcc_interval;
u16 bcn_ofst = config->beacon_offset;
u16 bt_dur_in_mid = 0;
u16 max_bcn_ofst;
@@ -1025,7 +1076,7 @@ calc:
res = bcn_ofst - bt_dur_in_mid;
upper = min_t(s16, ref->duration, res);
- lower = 0;
+ lower = max_t(s16, 0, ref->duration - (mcc_intvl - bcn_ofst));
if (ref->limit.enable) {
upper = min_t(s16, upper, ref->limit.max_toa);
@@ -1136,6 +1187,107 @@ static int __rtw89_mcc_calc_pattern_strict(struct rtw89_dev *rtwdev,
return 0;
}
+static void __rtw89_mcc_fill_ptrn_anchor_ref(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_pattern *ptrn,
+ bool small_bcn_ofst)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_mcc_config *config = &mcc->config;
+ u16 bcn_ofst = config->beacon_offset;
+ u16 ref_tob;
+ u16 ref_toa;
+
+ if (ref->limit.enable) {
+ ref_tob = ref->limit.max_tob;
+ ref_toa = ref->limit.max_toa;
+ } else {
+ ref_tob = ref->duration / 2;
+ ref_toa = ref->duration / 2;
+ }
+
+ if (small_bcn_ofst) {
+ ptrn->toa_ref = ref_toa;
+ ptrn->tob_ref = ref->duration - ptrn->toa_ref;
+ } else {
+ ptrn->tob_ref = ref_tob;
+ ptrn->toa_ref = ref->duration - ptrn->tob_ref;
+ }
+
+ ptrn->tob_aux = bcn_ofst - ptrn->toa_ref;
+ ptrn->toa_aux = aux->duration - ptrn->tob_aux;
+}
+
+static void __rtw89_mcc_fill_ptrn_anchor_aux(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_pattern *ptrn,
+ bool small_bcn_ofst)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_mcc_config *config = &mcc->config;
+ u16 bcn_ofst = config->beacon_offset;
+ u16 aux_tob;
+ u16 aux_toa;
+
+ if (aux->limit.enable) {
+ aux_tob = aux->limit.max_tob;
+ aux_toa = aux->limit.max_toa;
+ } else {
+ aux_tob = aux->duration / 2;
+ aux_toa = aux->duration / 2;
+ }
+
+ if (small_bcn_ofst) {
+ ptrn->tob_aux = aux_tob;
+ ptrn->toa_aux = aux->duration - ptrn->tob_aux;
+ } else {
+ ptrn->toa_aux = aux_toa;
+ ptrn->tob_aux = aux->duration - ptrn->toa_aux;
+ }
+
+ ptrn->toa_ref = bcn_ofst - ptrn->tob_aux;
+ ptrn->tob_ref = ref->duration - ptrn->toa_ref;
+}
+
+static int __rtw89_mcc_calc_pattern_anchor(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_pattern *ptrn,
+ bool hdl_bt)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_mcc_config *config = &mcc->config;
+ u16 mcc_intvl = config->mcc_interval;
+ u16 bcn_ofst = config->beacon_offset;
+ bool small_bcn_ofst;
+
+ if (bcn_ofst < RTW89_MCC_MIN_RX_BCN_TIME)
+ small_bcn_ofst = true;
+ else if (mcc_intvl - bcn_ofst < RTW89_MCC_MIN_RX_BCN_TIME)
+ small_bcn_ofst = false;
+ else
+ return -EPERM;
+
+ *ptrn = (typeof(*ptrn)){
+ .plan = hdl_bt ? RTW89_MCC_PLAN_TAIL_BT : RTW89_MCC_PLAN_NO_BT,
+ };
+
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC calc ptrn_ac: plan %d, bcn_ofst %d\n",
+ ptrn->plan, bcn_ofst);
+
+ if (ref->is_go || ref->is_gc)
+ __rtw89_mcc_fill_ptrn_anchor_ref(rtwdev, ptrn, small_bcn_ofst);
+ else if (aux->is_go || aux->is_gc)
+ __rtw89_mcc_fill_ptrn_anchor_aux(rtwdev, ptrn, small_bcn_ofst);
+ else
+ __rtw89_mcc_fill_ptrn_anchor_ref(rtwdev, ptrn, small_bcn_ofst);
+
+ return 0;
+}
+
static int rtw89_mcc_calc_pattern(struct rtw89_dev *rtwdev, bool hdl_bt)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1189,6 +1341,10 @@ static int rtw89_mcc_calc_pattern(struct rtw89_dev *rtwdev, bool hdl_bt)
goto done;
}
+ ret = __rtw89_mcc_calc_pattern_anchor(rtwdev, &ptrn, hdl_bt);
+ if (!ret)
+ goto done;
+
__rtw89_mcc_calc_pattern_loose(rtwdev, &ptrn, hdl_bt);
done:
@@ -1439,88 +1595,41 @@ static bool rtw89_mcc_duration_decision_on_bt(struct rtw89_dev *rtwdev)
return false;
}
-static void rtw89_mcc_sync_tbtt(struct rtw89_dev *rtwdev,
- struct rtw89_mcc_role *tgt,
- struct rtw89_mcc_role *src,
- bool ref_is_src)
-{
- struct rtw89_mcc_info *mcc = &rtwdev->mcc;
- struct rtw89_mcc_config *config = &mcc->config;
- u16 beacon_offset_us = ieee80211_tu_to_usec(config->beacon_offset);
- u32 bcn_intvl_src_us = ieee80211_tu_to_usec(src->beacon_interval);
- u32 cur_tbtt_ofst_src;
- u32 tsf_ofst_tgt;
- u32 remainder;
- u64 tbtt_tgt;
- u64 tsf_src;
- int ret;
-
- ret = rtw89_mac_port_get_tsf(rtwdev, src->rtwvif_link, &tsf_src);
- if (ret) {
- rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret);
- return;
- }
-
- cur_tbtt_ofst_src = rtw89_mcc_get_tbtt_ofst(rtwdev, src, tsf_src);
-
- if (ref_is_src)
- tbtt_tgt = tsf_src - cur_tbtt_ofst_src + beacon_offset_us;
- else
- tbtt_tgt = tsf_src - cur_tbtt_ofst_src +
- (bcn_intvl_src_us - beacon_offset_us);
-
- div_u64_rem(tbtt_tgt, bcn_intvl_src_us, &remainder);
- tsf_ofst_tgt = bcn_intvl_src_us - remainder;
-
- config->sync.macid_tgt = tgt->rtwvif_link->mac_id;
- config->sync.band_tgt = tgt->rtwvif_link->mac_idx;
- config->sync.port_tgt = tgt->rtwvif_link->port;
- config->sync.macid_src = src->rtwvif_link->mac_id;
- config->sync.band_src = src->rtwvif_link->mac_idx;
- config->sync.port_src = src->rtwvif_link->port;
- config->sync.offset = tsf_ofst_tgt / 1024;
- config->sync.enable = true;
-
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC sync tbtt: tgt %d, src %d, offset %d\n",
- config->sync.macid_tgt, config->sync.macid_src,
- config->sync.offset);
-
- rtw89_mac_port_tsf_sync(rtwdev, tgt->rtwvif_link, src->rtwvif_link,
- config->sync.offset);
-}
-
static int rtw89_mcc_fill_start_tsf(struct rtw89_dev *rtwdev)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
struct rtw89_mcc_config *config = &mcc->config;
u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval);
- u32 tob_ref_us = ieee80211_tu_to_usec(config->pattern.tob_ref);
- struct rtw89_vif_link *rtwvif_link = ref->rtwvif_link;
+ s32 tob_ref_us = ieee80211_tu_to_usec(config->pattern.tob_ref);
u64 tsf, start_tsf;
u32 cur_tbtt_ofst;
u64 min_time;
+ u64 tsf_aux;
int ret;
- ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
- if (ret) {
- rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_req_tsf(rtwdev, &tsf, &tsf_aux);
+ else
+ ret = __mcc_fw_req_tsf(rtwdev, &tsf, &tsf_aux);
+
+ if (ret)
return ret;
- }
min_time = tsf;
- if (ref->is_go)
+ if (ref->is_go || aux->is_go)
min_time += ieee80211_tu_to_usec(RTW89_MCC_SHORT_TRIGGER_TIME);
else
min_time += ieee80211_tu_to_usec(RTW89_MCC_LONG_TRIGGER_TIME);
cur_tbtt_ofst = rtw89_mcc_get_tbtt_ofst(rtwdev, ref, tsf);
start_tsf = tsf - cur_tbtt_ofst + bcn_intvl_ref_us - tob_ref_us;
- while (start_tsf < min_time)
- start_tsf += bcn_intvl_ref_us;
+ if (start_tsf < min_time)
+ start_tsf += roundup_u64(min_time - start_tsf, bcn_intvl_ref_us);
config->start_tsf = start_tsf;
+ config->start_tsf_in_aux_domain = tsf_aux + start_tsf - tsf;
return 0;
}
@@ -1537,13 +1646,11 @@ static int rtw89_mcc_fill_config(struct rtw89_dev *rtwdev)
switch (mcc->mode) {
case RTW89_MCC_MODE_GO_STA:
- config->beacon_offset = RTW89_MCC_DFLT_BCN_OFST_TIME;
+ config->beacon_offset = rtw89_mcc_get_bcn_ofst(rtwdev);
if (ref->is_go) {
- rtw89_mcc_sync_tbtt(rtwdev, ref, aux, false);
config->mcc_interval = ref->beacon_interval;
rtw89_mcc_set_duration_go_sta(rtwdev, ref, aux);
} else {
- rtw89_mcc_sync_tbtt(rtwdev, aux, ref, true);
config->mcc_interval = aux->beacon_interval;
rtw89_mcc_set_duration_go_sta(rtwdev, aux, ref);
}
@@ -1573,10 +1680,8 @@ bottom:
static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *role)
{
+ const struct rtw89_mcc_courtesy_cfg *crtz = role->crtz;
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
- struct rtw89_mcc_config *config = &mcc->config;
- struct rtw89_mcc_pattern *pattern = &config->pattern;
- struct rtw89_mcc_courtesy *courtesy = &pattern->courtesy;
struct rtw89_mcc_policy *policy = &role->policy;
struct rtw89_fw_mcc_add_req req = {};
const struct rtw89_chan *chan;
@@ -1599,9 +1704,9 @@ static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *ro
req.duration = role->duration;
req.btc_in_2g = false;
- if (courtesy->enable && courtesy->macid_src == req.macid) {
- req.courtesy_target = courtesy->macid_tgt;
- req.courtesy_num = courtesy->slot_num;
+ if (crtz) {
+ req.courtesy_target = crtz->macid_tgt;
+ req.courtesy_num = crtz->slot_num;
req.courtesy_en = true;
}
@@ -1781,26 +1886,23 @@ static void __mrc_fw_add_courtesy(struct rtw89_dev *rtwdev,
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role *ref = &mcc->role_ref;
struct rtw89_mcc_role *aux = &mcc->role_aux;
- struct rtw89_mcc_config *config = &mcc->config;
- struct rtw89_mcc_pattern *pattern = &config->pattern;
- struct rtw89_mcc_courtesy *courtesy = &pattern->courtesy;
struct rtw89_fw_mrc_add_slot_arg *slot_arg_src;
- u8 slot_idx_tgt;
- if (!courtesy->enable)
- return;
-
- if (courtesy->macid_src == ref->rtwvif_link->mac_id) {
+ if (ref->crtz) {
slot_arg_src = &arg->slots[ref->slot_idx];
- slot_idx_tgt = aux->slot_idx;
- } else {
- slot_arg_src = &arg->slots[aux->slot_idx];
- slot_idx_tgt = ref->slot_idx;
+
+ slot_arg_src->courtesy_target = aux->slot_idx;
+ slot_arg_src->courtesy_period = ref->crtz->slot_num;
+ slot_arg_src->courtesy_en = true;
}
- slot_arg_src->courtesy_target = slot_idx_tgt;
- slot_arg_src->courtesy_period = courtesy->slot_num;
- slot_arg_src->courtesy_en = true;
+ if (aux->crtz) {
+ slot_arg_src = &arg->slots[aux->slot_idx];
+
+ slot_arg_src->courtesy_target = ref->slot_idx;
+ slot_arg_src->courtesy_period = aux->crtz->slot_num;
+ slot_arg_src->courtesy_en = true;
+ }
}
static int __mrc_fw_start(struct rtw89_dev *rtwdev, bool replace)
@@ -2000,30 +2102,24 @@ static void rtw89_mcc_handle_beacon_noa(struct rtw89_dev *rtwdev, bool enable)
struct rtw89_mcc_role *ref = &mcc->role_ref;
struct rtw89_mcc_role *aux = &mcc->role_aux;
struct rtw89_mcc_config *config = &mcc->config;
- struct rtw89_mcc_pattern *pattern = &config->pattern;
- struct rtw89_mcc_sync *sync = &config->sync;
struct ieee80211_p2p_noa_desc noa_desc = {};
- u64 start_time = config->start_tsf;
u32 interval = config->mcc_interval;
struct rtw89_vif_link *rtwvif_go;
+ u64 start_time;
u32 duration;
if (mcc->mode != RTW89_MCC_MODE_GO_STA)
return;
if (ref->is_go) {
+ start_time = config->start_tsf;
rtwvif_go = ref->rtwvif_link;
start_time += ieee80211_tu_to_usec(ref->duration);
duration = config->mcc_interval - ref->duration;
} else if (aux->is_go) {
+ start_time = config->start_tsf_in_aux_domain;
rtwvif_go = aux->rtwvif_link;
- start_time += ieee80211_tu_to_usec(pattern->tob_ref) +
- ieee80211_tu_to_usec(config->beacon_offset) +
- ieee80211_tu_to_usec(pattern->toa_aux);
duration = config->mcc_interval - aux->duration;
-
- /* convert time domain from sta(ref) to GO(aux) */
- start_time += ieee80211_tu_to_usec(sync->offset);
} else {
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MCC find no GO: skip updating beacon NoA\n");
@@ -2127,6 +2223,12 @@ static int rtw89_mcc_start(struct rtw89_dev *rtwdev)
}
struct rtw89_mcc_stop_sel {
+ struct {
+ const struct rtw89_vif_link *target;
+ } hint;
+
+ /* selection content */
+ bool filled;
u8 mac_id;
u8 slot_idx;
};
@@ -2136,6 +2238,7 @@ static void rtw89_mcc_stop_sel_fill(struct rtw89_mcc_stop_sel *sel,
{
sel->mac_id = mcc_role->rtwvif_link->mac_id;
sel->slot_idx = mcc_role->slot_idx;
+ sel->filled = true;
}
static int rtw89_mcc_stop_sel_iterator(struct rtw89_dev *rtwdev,
@@ -2145,23 +2248,41 @@ static int rtw89_mcc_stop_sel_iterator(struct rtw89_dev *rtwdev,
{
struct rtw89_mcc_stop_sel *sel = data;
+ if (mcc_role->rtwvif_link == sel->hint.target) {
+ rtw89_mcc_stop_sel_fill(sel, mcc_role);
+ return 1; /* break iteration */
+ }
+
+ if (sel->filled)
+ return 0;
+
if (!mcc_role->rtwvif_link->chanctx_assigned)
return 0;
rtw89_mcc_stop_sel_fill(sel, mcc_role);
- return 1; /* break iteration */
+ return 0;
}
-static void rtw89_mcc_stop(struct rtw89_dev *rtwdev)
+static void rtw89_mcc_stop(struct rtw89_dev *rtwdev,
+ const struct rtw89_chanctx_pause_parm *pause)
{
+ struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role *ref = &mcc->role_ref;
- struct rtw89_mcc_stop_sel sel;
+ struct rtw89_mcc_stop_sel sel = {
+ .hint.target = pause ? pause->trigger : NULL,
+ };
int ret;
+ if (!pause) {
+ wiphy_delayed_work_cancel(rtwdev->hw->wiphy, &rtwdev->chanctx_work);
+ bitmap_zero(hal->changes, NUM_OF_RTW89_CHANCTX_CHANGES);
+ }
+
/* by default, stop at ref */
- rtw89_mcc_stop_sel_fill(&sel, ref);
rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_stop_sel_iterator, &sel);
+ if (!sel.filled)
+ rtw89_mcc_stop_sel_fill(&sel, ref);
rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop at <macid %d>\n", sel.mac_id);
@@ -2193,6 +2314,7 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_config *config = &mcc->config;
struct rtw89_mcc_config old_cfg = *config;
+ bool courtesy_changed;
bool sync_changed;
int ret;
@@ -2205,8 +2327,15 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
if (ret)
return ret;
+ if (memcmp(&old_cfg.pattern.courtesy, &config->pattern.courtesy,
+ sizeof(old_cfg.pattern.courtesy)) == 0)
+ courtesy_changed = false;
+ else
+ courtesy_changed = true;
+
if (old_cfg.pattern.plan != RTW89_MCC_PLAN_NO_BT ||
- config->pattern.plan != RTW89_MCC_PLAN_NO_BT) {
+ config->pattern.plan != RTW89_MCC_PLAN_NO_BT ||
+ courtesy_changed) {
if (rtw89_concurrent_via_mrc(rtwdev))
ret = __mrc_fw_start(rtwdev, true);
else
@@ -2238,7 +2367,7 @@ static void rtw89_mcc_track(struct rtw89_dev *rtwdev)
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_config *config = &mcc->config;
struct rtw89_mcc_pattern *pattern = &config->pattern;
- s16 tolerance;
+ u16 tolerance;
u16 bcn_ofst;
u16 diff;
@@ -2246,18 +2375,25 @@ static void rtw89_mcc_track(struct rtw89_dev *rtwdev)
return;
bcn_ofst = rtw89_mcc_get_bcn_ofst(rtwdev);
+ if (bcn_ofst == config->beacon_offset)
+ return;
+
if (bcn_ofst > config->beacon_offset) {
diff = bcn_ofst - config->beacon_offset;
if (pattern->tob_aux < 0)
tolerance = -pattern->tob_aux;
- else
+ else if (pattern->toa_aux > 0)
tolerance = pattern->toa_aux;
+ else
+ return; /* no chance to improve */
} else {
diff = config->beacon_offset - bcn_ofst;
if (pattern->toa_aux < 0)
tolerance = -pattern->toa_aux;
- else
+ else if (pattern->tob_aux > 0)
tolerance = pattern->tob_aux;
+ else
+ return; /* no chance to improve */
}
if (diff <= tolerance)
@@ -2504,7 +2640,7 @@ void rtw89_chanctx_track(struct rtw89_dev *rtwdev)
}
void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
- enum rtw89_chanctx_pause_reasons rsn)
+ const struct rtw89_chanctx_pause_parm *pause_parm)
{
struct rtw89_hal *hal = &rtwdev->hal;
enum rtw89_entity_mode mode;
@@ -2514,12 +2650,12 @@ void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
if (hal->entity_pause)
return;
- rtw89_debug(rtwdev, RTW89_DBG_CHAN, "chanctx pause (rsn: %d)\n", rsn);
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN, "chanctx pause (rsn: %d)\n", pause_parm->rsn);
mode = rtw89_get_entity_mode(rtwdev);
switch (mode) {
case RTW89_ENTITY_MODE_MCC:
- rtw89_mcc_stop(rtwdev);
+ rtw89_mcc_stop(rtwdev, pause_parm);
break;
default:
break;
@@ -2744,7 +2880,7 @@ out:
cur = rtw89_get_entity_mode(rtwdev);
switch (cur) {
case RTW89_ENTITY_MODE_MCC:
- rtw89_mcc_stop(rtwdev);
+ rtw89_mcc_stop(rtwdev, NULL);
break;
default:
break;
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index e6391f6f2aa7..2a25563593af 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -31,6 +31,14 @@
#define RTW89_MCC_DFLT_TX_NULL_EARLY 3
#define RTW89_MCC_DFLT_COURTESY_SLOT 3
+#define RTW89_MCC_REQ_COURTESY_TIME 5
+#define RTW89_MCC_REQ_COURTESY(pattern, role) \
+({ \
+ const struct rtw89_mcc_pattern *p = pattern; \
+ p->tob_ ## role <= RTW89_MCC_REQ_COURTESY_TIME || \
+ p->toa_ ## role <= RTW89_MCC_REQ_COURTESY_TIME; \
+})
+
#define NUM_OF_RTW89_MCC_ROLES 2
enum rtw89_chanctx_pause_reasons {
@@ -38,6 +46,11 @@ enum rtw89_chanctx_pause_reasons {
RTW89_CHANCTX_PAUSE_REASON_ROC,
};
+struct rtw89_chanctx_pause_parm {
+ const struct rtw89_vif_link *trigger;
+ enum rtw89_chanctx_pause_reasons rsn;
+};
+
struct rtw89_chanctx_cb_parm {
int (*cb)(struct rtw89_dev *rtwdev, void *data);
void *data;
@@ -95,7 +108,7 @@ void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx idx,
const struct cfg80211_chan_def *chandef);
void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
- enum rtw89_chanctx_idx idx,
+ struct rtw89_vif_link *rtwvif_link,
const struct cfg80211_chan_def *chandef);
void rtw89_entity_init(struct rtw89_dev *rtwdev);
enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev);
@@ -105,7 +118,7 @@ void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_changes change);
void rtw89_chanctx_track(struct rtw89_dev *rtwdev);
void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
- enum rtw89_chanctx_pause_reasons rsn);
+ const struct rtw89_chanctx_pause_parm *parm);
void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev,
const struct rtw89_chanctx_cb_parm *cb_parm);
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index cc9b014457ac..49447668cbf3 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -203,6 +203,23 @@ static const struct ieee80211_iface_combination rtw89_iface_combs[] = {
},
};
+static const u8 rtw89_ext_capa_sta[] = {
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+};
+
+static const struct wiphy_iftype_ext_capab rtw89_iftypes_ext_capa[] = {
+ {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = rtw89_ext_capa_sta,
+ .extended_capabilities_mask = rtw89_ext_capa_sta,
+ .extended_capabilities_len = sizeof(rtw89_ext_capa_sta),
+ /* relevant only if EHT is supported */
+ .eml_capabilities = 0,
+ .mld_capa_and_ops = 0,
+ },
+};
+
#define RTW89_6GHZ_SPAN_HEAD 6145
#define RTW89_6GHZ_SPAN_IDX(center_freq) \
((((int)(center_freq) - RTW89_6GHZ_SPAN_HEAD) / 5) / 2)
@@ -211,6 +228,8 @@ static const struct ieee80211_iface_combination rtw89_iface_combs[] = {
[RTW89_6GHZ_SPAN_IDX(center_freq)] = { \
.sar_subband_low = RTW89_SAR_6GHZ_ ## subband_l, \
.sar_subband_high = RTW89_SAR_6GHZ_ ## subband_h, \
+ .acpi_sar_subband_low = RTW89_ACPI_SAR_6GHZ_ ## subband_l, \
+ .acpi_sar_subband_high = RTW89_ACPI_SAR_6GHZ_ ## subband_h, \
.ant_gain_subband_low = RTW89_ANT_GAIN_6GHZ_ ## subband_l, \
.ant_gain_subband_high = RTW89_ANT_GAIN_6GHZ_ ## subband_h, \
}
@@ -639,9 +658,17 @@ out:
static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
+ struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link;
struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link;
+ if (desc_info->mlo && !desc_info->sw_mld) {
+ if (rtwsta_link)
+ return rtw89_sta_get_main_macid(rtwsta_link->rtwsta);
+ else
+ return rtw89_vif_get_main_macid(rtwvif_link->rtwvif);
+ }
+
if (!rtwsta_link)
return rtwvif_link->mac_id;
@@ -671,7 +698,7 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
struct sk_buff *skb = tx_req->skb;
u8 qsel, ch_dma;
- qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : RTW89_TX_QSEL_B0_MGMT;
+ qsel = rtw89_core_get_qsel_mgmt(rtwdev, tx_req);
ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
desc_info->qsel = qsel;
@@ -1104,39 +1131,23 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
return 0;
}
-int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel)
+static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link,
+ struct sk_buff *skb, int *qsel, bool sw_mld)
{
- struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
- struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
- struct rtw89_core_tx_request tx_req = {0};
- struct rtw89_sta_link *rtwsta_link = NULL;
- struct rtw89_vif_link *rtwvif_link;
+ struct ieee80211_sta *sta = rtwsta_link_to_sta_safe(rtwsta_link);
+ struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ struct rtw89_core_tx_request tx_req = {};
int ret;
- /* By default, driver writes tx via the link on HW-0. And then,
- * according to links' status, HW can change tx to another link.
- */
-
- if (rtwsta) {
- rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
- if (unlikely(!rtwsta_link)) {
- rtw89_err(rtwdev, "tx: find no sta link on HW-0\n");
- return -ENOLINK;
- }
- }
-
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
- if (unlikely(!rtwvif_link)) {
- rtw89_err(rtwdev, "tx: find no vif link on HW-0\n");
- return -ENOLINK;
- }
-
tx_req.skb = skb;
tx_req.vif = vif;
tx_req.sta = sta;
tx_req.rtwvif_link = rtwvif_link;
tx_req.rtwsta_link = rtwsta_link;
+ tx_req.desc_info.sw_mld = sw_mld;
rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, true);
rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, true);
@@ -1155,6 +1166,33 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
return 0;
}
+int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel)
+{
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+ struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
+ struct rtw89_sta_link *rtwsta_link = NULL;
+ struct rtw89_vif_link *rtwvif_link;
+
+ if (rtwsta) {
+ rtwsta_link = rtw89_get_designated_link(rtwsta);
+ if (unlikely(!rtwsta_link)) {
+ rtw89_err(rtwdev, "tx: find no sta designated link\n");
+ return -ENOLINK;
+ }
+
+ rtwvif_link = rtwsta_link->rtwvif_link;
+ } else {
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
+ if (unlikely(!rtwvif_link)) {
+ rtw89_err(rtwdev, "tx: find no vif designated link\n");
+ return -ENOLINK;
+ }
+ }
+
+ return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false);
+}
+
static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_BODY0_WP_OFFSET, desc_info->wp_offset) |
@@ -1382,7 +1420,9 @@ static __le32 rtw89_build_txwd_body2_v2(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_body3_v2(struct rtw89_tx_desc_info *desc_info)
{
- u32 dword = FIELD_PREP(BE_TXD_BODY3_WIFI_SEQ, desc_info->seq);
+ u32 dword = FIELD_PREP(BE_TXD_BODY3_WIFI_SEQ, desc_info->seq) |
+ FIELD_PREP(BE_TXD_BODY3_MLO_FLAG, desc_info->mlo) |
+ FIELD_PREP(BE_TXD_BODY3_IS_MLD_SW_EN, desc_info->sw_mld);
return cpu_to_le32(dword);
}
@@ -1635,10 +1675,7 @@ static void rtw89_core_rx_process_phy_ppdu_iter(void *data,
u8 evm_pos = 0;
int i;
- /* FIXME: For single link, taking link on HW-0 here is okay. But, when
- * enabling multiple active links, we should determine the right link.
- */
- rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
+ rtwsta_link = rtw89_sta_get_link_inst(rtwsta, phy_ppdu->phy_idx);
if (unlikely(!rtwsta_link))
return;
@@ -2058,10 +2095,21 @@ static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev,
break;
if (aid == vif->cfg.aid) {
- enum nl80211_he_ru_alloc rua = rtw89_he_rua_to_ru_alloc(tf_rua >> 1);
+ enum nl80211_he_ru_alloc rua;
rtwvif->stats.rx_tf_acc++;
rtwdev->stats.rx_tf_acc++;
+
+ /* The following only required for HE trigger frame, but we
+ * cannot use UL HE-SIG-A2 reserved subfield to identify it
+ * since some 11ax APs will fill it with all 0s, which will
+ * be misunderstood as EHT trigger frame.
+ */
+ if (bss_conf->eht_support)
+ break;
+
+ rua = rtw89_he_rua_to_ru_alloc(tf_rua >> 1);
+
if (tf_bw == IEEE80211_TRIGGER_ULBW_160_80P80MHZ &&
rua <= NL80211_RATE_INFO_HE_RU_ALLOC_106)
rtwvif_link->pwr_diff_en = true;
@@ -2152,8 +2200,10 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
struct rtw89_rx_desc_info *desc_info = iter_data->desc_info;
struct sk_buff *skb = iter_data->skb;
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rtw89_rx_phy_ppdu *phy_ppdu = iter_data->phy_ppdu;
+ bool is_mld = ieee80211_vif_is_mld(vif);
struct ieee80211_bss_conf *bss_conf;
struct rtw89_vif_link *rtwvif_link;
const u8 *bssid = iter_data->bssid;
@@ -2165,10 +2215,7 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
rcu_read_lock();
- /* FIXME: For single link, taking link on HW-0 here is okay. But, when
- * enabling multiple active links, we should determine the right link.
- */
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ rtwvif_link = rtw89_vif_get_link_inst(rtwvif, desc_info->bb_sel);
if (unlikely(!rtwvif_link))
goto out;
@@ -2184,6 +2231,11 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
if (!ether_addr_equal(bss_conf->bssid, bssid))
goto out;
+ if (is_mld) {
+ rx_status->link_valid = true;
+ rx_status->link_id = rtwvif_link->link_id;
+ }
+
if (ieee80211_is_beacon(hdr->frame_control)) {
if (vif->type == NL80211_IFTYPE_STATION &&
!test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags)) {
@@ -2482,7 +2534,8 @@ static void rtw89_core_rx_process_ppdu_sts(struct rtw89_dev *rtwdev,
.len = skb->len,
.to_self = desc_info->addr1_match,
.rate = desc_info->data_rate,
- .mac_id = desc_info->mac_id};
+ .mac_id = desc_info->mac_id,
+ .phy_idx = desc_info->bb_sel};
int ret;
if (desc_info->mac_info_valid) {
@@ -2593,6 +2646,7 @@ void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev,
desc_info->shift = le32_get_bits(rxd_s->dword0, BE_RXD_SHIFT_MASK);
desc_info->long_rxdesc = le32_get_bits(rxd_s->dword0, BE_RXD_LONG_RXD);
desc_info->pkt_type = le32_get_bits(rxd_s->dword0, BE_RXD_RPKT_TYPE_MASK);
+ desc_info->bb_sel = le32_get_bits(rxd_s->dword0, BE_RXD_BB_SEL);
if (desc_info->pkt_type == RTW89_CORE_RX_TYPE_PPDU_STAT)
desc_info->mac_info_valid = true;
@@ -2665,10 +2719,7 @@ void rtw89_core_stats_sta_rx_status_iter(void *data, struct ieee80211_sta *sta)
struct rtw89_sta_link *rtwsta_link;
u8 mac_id = iter_data->mac_id;
- /* FIXME: For single link, taking link on HW-0 here is okay. But, when
- * enabling multiple active links, we should determine the right link.
- */
- rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
+ rtwsta_link = rtw89_sta_get_link_inst(rtwsta, desc_info->bb_sel);
if (unlikely(!rtwsta_link))
return;
@@ -3117,9 +3168,9 @@ static bool rtw89_core_txq_agg_wait(struct rtw89_dev *rtwdev,
if (!rtwsta)
return false;
- rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
+ rtwsta_link = rtw89_get_designated_link(rtwsta);
if (unlikely(!rtwsta_link)) {
- rtw89_err(rtwdev, "agg wait: find no link on HW-0\n");
+ rtw89_err(rtwdev, "agg wait: find no designated link\n");
return false;
}
@@ -3284,8 +3335,10 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
+ struct rtw89_sta_link *rtwsta_link;
struct ieee80211_sta *sta;
struct ieee80211_hdr *hdr;
+ struct rtw89_sta *rtwsta;
struct sk_buff *skb;
int ret, qsel;
@@ -3298,6 +3351,7 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
ret = -EINVAL;
goto out;
}
+ rtwsta = sta_to_rtwsta(sta);
skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, qos);
if (!skb) {
@@ -3309,7 +3363,13 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
if (ps)
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
- ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel);
+ rtwsta_link = rtwsta->links[rtwvif_link->link_id];
+ if (unlikely(!rtwsta_link)) {
+ ret = -ENOLINK;
+ goto out;
+ }
+
+ ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true);
if (ret) {
rtw89_warn(rtwdev, "nullfunc transmit failed: %d\n", ret);
dev_kfree_skb_any(skb);
@@ -3329,6 +3389,9 @@ out:
void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ struct rtw89_chanctx_pause_parm pause_parm = {
+ .rsn = RTW89_CHANCTX_PAUSE_REASON_ROC,
+ };
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw89_roc *roc = &rtwvif->roc;
struct rtw89_vif_link *rtwvif_link;
@@ -3342,14 +3405,16 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_leave_ips_by_hwflags(rtwdev);
rtw89_leave_lps(rtwdev);
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, RTW89_ROC_BY_LINK_INDEX);
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
if (unlikely(!rtwvif_link)) {
- rtw89_err(rtwdev, "roc start: find no link on HW-%u\n",
- RTW89_ROC_BY_LINK_INDEX);
+ rtw89_err(rtwdev, "roc start: find no designated link\n");
return;
}
- rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_ROC);
+ roc->link_id = rtwvif_link->link_id;
+
+ pause_parm.trigger = rtwvif_link;
+ rtw89_chanctx_pause(rtwdev, &pause_parm);
ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, true);
if (ret)
@@ -3369,7 +3434,7 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
}
cfg80211_chandef_create(&roc_chan, &roc->chan, NL80211_CHAN_NO_HT);
- rtw89_config_roc_chandef(rtwdev, rtwvif_link->chanctx_idx, &roc_chan);
+ rtw89_config_roc_chandef(rtwdev, rtwvif_link, &roc_chan);
rtw89_set_channel(rtwdev);
reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx);
@@ -3398,10 +3463,10 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_leave_ips_by_hwflags(rtwdev);
rtw89_leave_lps(rtwdev);
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, RTW89_ROC_BY_LINK_INDEX);
+ rtwvif_link = rtwvif->links[roc->link_id];
if (unlikely(!rtwvif_link)) {
- rtw89_err(rtwdev, "roc end: find no link on HW-%u\n",
- RTW89_ROC_BY_LINK_INDEX);
+ rtw89_err(rtwdev, "roc end: find no link (link id %u)\n",
+ roc->link_id);
return;
}
@@ -3409,7 +3474,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr);
roc->state = RTW89_ROC_IDLE;
- rtw89_config_roc_chandef(rtwdev, rtwvif_link->chanctx_idx, NULL);
+ rtw89_config_roc_chandef(rtwdev, rtwvif_link, NULL);
rtw89_chanctx_proceed(rtwdev, NULL);
ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, false);
if (ret)
@@ -3577,6 +3642,98 @@ void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
ewma_tp_init(&stats->rx_ewma_tp);
}
+#define RTW89_MLSR_GOTO_2GHZ_THRESHOLD -53
+#define RTW89_MLSR_EXIT_2GHZ_THRESHOLD -38
+static void rtw89_core_mlsr_link_decision(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ unsigned int sel_link_id = IEEE80211_MLD_MAX_NUM_LINKS;
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct rtw89_vif_link *rtwvif_link;
+ const struct rtw89_chan *chan;
+ unsigned long usable_links;
+ unsigned int link_id;
+ u8 decided_bands;
+ u8 rssi;
+
+ rssi = ewma_rssi_read(&rtwdev->phystat.bcn_rssi);
+ if (unlikely(!rssi))
+ return;
+
+ if (RTW89_RSSI_RAW_TO_DBM(rssi) >= RTW89_MLSR_EXIT_2GHZ_THRESHOLD)
+ decided_bands = BIT(RTW89_BAND_5G) | BIT(RTW89_BAND_6G);
+ else if (RTW89_RSSI_RAW_TO_DBM(rssi) <= RTW89_MLSR_GOTO_2GHZ_THRESHOLD)
+ decided_bands = BIT(RTW89_BAND_2G);
+ else
+ return;
+
+ usable_links = ieee80211_vif_usable_links(vif);
+
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
+ if (unlikely(!rtwvif_link))
+ goto select;
+
+ chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ if (decided_bands & BIT(chan->band_type))
+ return;
+
+ usable_links &= ~BIT(rtwvif_link->link_id);
+
+select:
+ rcu_read_lock();
+
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_channel *channel;
+ enum rtw89_band band;
+
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (unlikely(!link_conf))
+ continue;
+
+ channel = link_conf->chanreq.oper.chan;
+ if (unlikely(!channel))
+ continue;
+
+ band = rtw89_nl80211_to_hw_band(channel->band);
+ if (decided_bands & BIT(band)) {
+ sel_link_id = link_id;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+
+ if (sel_link_id == IEEE80211_MLD_MAX_NUM_LINKS)
+ return;
+
+ rtw89_core_mlsr_switch(rtwdev, rtwvif, sel_link_id);
+}
+
+static void rtw89_core_mlo_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct ieee80211_vif *vif;
+ struct rtw89_vif *rtwvif;
+
+ if (hal->disabled_dm_bitmap & BIT(RTW89_DM_MLO))
+ return;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ vif = rtwvif_to_vif(rtwvif);
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ continue;
+
+ switch (rtwvif->mlo_mode) {
+ case RTW89_MLO_MODE_MLSR:
+ rtw89_core_mlsr_link_decision(rtwdev, rtwvif);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
@@ -3615,9 +3772,10 @@ static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work)
rtw89_phy_antdiv_track(rtwdev);
rtw89_phy_ul_tb_ctrl_track(rtwdev);
rtw89_phy_edcca_track(rtwdev);
- rtw89_tas_track(rtwdev);
+ rtw89_sar_track(rtwdev);
rtw89_chanctx_track(rtwdev);
rtw89_core_rfkill_poll(rtwdev, false);
+ rtw89_core_mlo_track(rtwdev);
if (rtwdev->lps_enabled && !rtwdev->btc.lps)
rtw89_enter_lps_track(rtwdev);
@@ -3846,6 +4004,9 @@ int rtw89_core_sta_link_disassoc(struct rtw89_dev *rtwdev,
if (vif->type == NL80211_IFTYPE_STATION)
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, false);
+ if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
+ rtw89_p2p_noa_once_deinit(rtwvif_link);
+
return 0;
}
@@ -4361,17 +4522,18 @@ static void rtw89_init_eht_cap(struct rtw89_dev *rtwdev,
#define RTW89_SBAND_IFTYPES_NR 2
-static void rtw89_init_he_eht_cap(struct rtw89_dev *rtwdev,
- enum nl80211_band band,
- struct ieee80211_supported_band *sband)
+static int rtw89_init_he_eht_cap(struct rtw89_dev *rtwdev,
+ enum nl80211_band band,
+ struct ieee80211_supported_band *sband)
{
struct ieee80211_sband_iftype_data *iftype_data;
enum nl80211_iftype iftype;
int idx = 0;
- iftype_data = kcalloc(RTW89_SBAND_IFTYPES_NR, sizeof(*iftype_data), GFP_KERNEL);
+ iftype_data = devm_kcalloc(rtwdev->dev, RTW89_SBAND_IFTYPES_NR,
+ sizeof(*iftype_data), GFP_KERNEL);
if (!iftype_data)
- return;
+ return -ENOMEM;
for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
switch (iftype) {
@@ -4396,77 +4558,75 @@ static void rtw89_init_he_eht_cap(struct rtw89_dev *rtwdev,
}
_ieee80211_set_sband_iftype_data(sband, iftype_data, idx);
+ return 0;
+}
+
+static struct ieee80211_supported_band *
+rtw89_core_sband_dup(struct rtw89_dev *rtwdev,
+ const struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_supported_band *dup;
+
+ dup = devm_kmemdup(rtwdev->dev, sband, sizeof(*sband), GFP_KERNEL);
+ if (!dup)
+ return NULL;
+
+ dup->channels = devm_kmemdup(rtwdev->dev, sband->channels,
+ sizeof(*sband->channels) * sband->n_channels,
+ GFP_KERNEL);
+ if (!dup->channels)
+ return NULL;
+
+ dup->bitrates = devm_kmemdup(rtwdev->dev, sband->bitrates,
+ sizeof(*sband->bitrates) * sband->n_bitrates,
+ GFP_KERNEL);
+ if (!dup->bitrates)
+ return NULL;
+
+ return dup;
}
static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
{
struct ieee80211_hw *hw = rtwdev->hw;
- struct ieee80211_supported_band *sband_2ghz = NULL, *sband_5ghz = NULL;
- struct ieee80211_supported_band *sband_6ghz = NULL;
- u32 size = sizeof(struct ieee80211_supported_band);
+ struct ieee80211_supported_band *sband;
u8 support_bands = rtwdev->chip->support_bands;
+ int ret;
if (support_bands & BIT(NL80211_BAND_2GHZ)) {
- sband_2ghz = kmemdup(&rtw89_sband_2ghz, size, GFP_KERNEL);
- if (!sband_2ghz)
- goto err;
- rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap);
- rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
- hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz;
+ sband = rtw89_core_sband_dup(rtwdev, &rtw89_sband_2ghz);
+ if (!sband)
+ return -ENOMEM;
+ rtw89_init_ht_cap(rtwdev, &sband->ht_cap);
+ ret = rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_2GHZ, sband);
+ if (ret)
+ return ret;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
}
if (support_bands & BIT(NL80211_BAND_5GHZ)) {
- sband_5ghz = kmemdup(&rtw89_sband_5ghz, size, GFP_KERNEL);
- if (!sband_5ghz)
- goto err;
- rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap);
- rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap);
- rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
- hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz;
+ sband = rtw89_core_sband_dup(rtwdev, &rtw89_sband_5ghz);
+ if (!sband)
+ return -ENOMEM;
+ rtw89_init_ht_cap(rtwdev, &sband->ht_cap);
+ rtw89_init_vht_cap(rtwdev, &sband->vht_cap);
+ ret = rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_5GHZ, sband);
+ if (ret)
+ return ret;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
}
if (support_bands & BIT(NL80211_BAND_6GHZ)) {
- sband_6ghz = kmemdup(&rtw89_sband_6ghz, size, GFP_KERNEL);
- if (!sband_6ghz)
- goto err;
- rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz);
- hw->wiphy->bands[NL80211_BAND_6GHZ] = sband_6ghz;
+ sband = rtw89_core_sband_dup(rtwdev, &rtw89_sband_6ghz);
+ if (!sband)
+ return -ENOMEM;
+ ret = rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_6GHZ, sband);
+ if (ret)
+ return ret;
+ hw->wiphy->bands[NL80211_BAND_6GHZ] = sband;
}
return 0;
-
-err:
- hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
- hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
- hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL;
- if (sband_2ghz)
- kfree((__force void *)sband_2ghz->iftype_data);
- if (sband_5ghz)
- kfree((__force void *)sband_5ghz->iftype_data);
- if (sband_6ghz)
- kfree((__force void *)sband_6ghz->iftype_data);
- kfree(sband_2ghz);
- kfree(sband_5ghz);
- kfree(sband_6ghz);
- return -ENOMEM;
-}
-
-static void rtw89_core_clr_supported_band(struct rtw89_dev *rtwdev)
-{
- struct ieee80211_hw *hw = rtwdev->hw;
-
- if (hw->wiphy->bands[NL80211_BAND_2GHZ])
- kfree((__force void *)hw->wiphy->bands[NL80211_BAND_2GHZ]->iftype_data);
- if (hw->wiphy->bands[NL80211_BAND_5GHZ])
- kfree((__force void *)hw->wiphy->bands[NL80211_BAND_5GHZ]->iftype_data);
- if (hw->wiphy->bands[NL80211_BAND_6GHZ])
- kfree((__force void *)hw->wiphy->bands[NL80211_BAND_6GHZ]->iftype_data);
- kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]);
- kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
- kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]);
- hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
- hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
- hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL;
}
static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev)
@@ -4774,6 +4934,7 @@ struct rtw89_vif_link *rtw89_vif_set_link(struct rtw89_vif *rtwvif,
set_bit(index, rtwvif->links_inst_map);
rtwvif->links[link_id] = rtwvif_link;
+ list_add_tail(&rtwvif_link->dlink_schd, &rtwvif->dlink_pool);
return rtwvif_link;
err:
@@ -4794,6 +4955,7 @@ void rtw89_vif_unset_link(struct rtw89_vif *rtwvif, unsigned int link_id)
index = rtw89_vif_link_inst_get_index(link);
clear_bit(index, rtwvif->links_inst_map);
*container = NULL;
+ list_del(&link->dlink_schd);
}
struct rtw89_sta_link *rtw89_sta_set_link(struct rtw89_sta *rtwsta,
@@ -4824,6 +4986,7 @@ struct rtw89_sta_link *rtw89_sta_set_link(struct rtw89_sta *rtwsta,
set_bit(index, rtwsta->links_inst_map);
rtwsta->links[link_id] = rtwsta_link;
+ list_add_tail(&rtwsta_link->dlink_schd, &rtwsta->dlink_pool);
return rtwsta_link;
err:
@@ -4844,6 +5007,7 @@ void rtw89_sta_unset_link(struct rtw89_sta *rtwsta, unsigned int link_id)
index = rtw89_sta_link_inst_get_index(link);
clear_bit(index, rtwsta->links_inst_map);
*container = NULL;
+ list_del(&link->dlink_schd);
}
int rtw89_core_init(struct rtw89_dev *rtwdev)
@@ -4860,6 +5024,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
continue;
INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]);
}
+ INIT_LIST_HEAD(&rtwdev->scan_info.chan_list);
INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
@@ -4880,6 +5045,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtwdev->total_sta_assoc = 0;
rtw89_init_wait(&rtwdev->mcc.wait);
+ rtw89_init_wait(&rtwdev->mlo.wait);
rtw89_init_wait(&rtwdev->mac.fw_ofld_wait);
rtw89_init_wait(&rtwdev->wow.wait);
rtw89_init_wait(&rtwdev->mac.ps_wait);
@@ -4901,7 +5067,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
rtwdev->dbcc_en = true;
rtwdev->mac.qta_mode = RTW89_QTA_DBCC;
- rtwdev->mlo_dbcc_mode = MLO_2_PLUS_0_1RF;
+ rtwdev->mlo_dbcc_mode = MLO_1_PLUS_1_1RF;
}
rtwdev->bbs[RTW89_PHY_0].phy_idx = RTW89_PHY_0;
@@ -4919,7 +5085,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_ser_init(rtwdev);
rtw89_entity_init(rtwdev);
- rtw89_tas_init(rtwdev);
+ rtw89_sar_init(rtwdev);
rtw89_phy_ant_gain_init(rtwdev);
return 0;
@@ -4945,9 +5111,6 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwv
struct rtw89_bb_ctx *bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx);
rtwdev->scanning = true;
- rtw89_leave_lps(rtwdev);
- if (hw_scan)
- rtw89_leave_ips_by_hwflags(rtwdev);
ether_addr_copy(rtwvif_link->mac_addr, mac_addr);
rtw89_btc_ntfy_scan_start(rtwdev, rtwvif_link->phy_idx, chan->band_type);
@@ -5062,6 +5225,76 @@ out:
rtw89_load_txpwr_table(rtwdev, rtwdev->rfe_parms->byr_tbl);
}
+int rtw89_core_mlsr_switch(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ unsigned int link_id)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ u16 usable_links = ieee80211_vif_usable_links(vif);
+ u16 active_links = vif->active_links;
+ struct rtw89_vif_link *target, *cur;
+ int ret;
+
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ if (unlikely(!ieee80211_vif_is_mld(vif)))
+ return -EOPNOTSUPP;
+
+ if (unlikely(!(usable_links & BIT(link_id)))) {
+ rtw89_warn(rtwdev, "%s: link id %u is not usable\n", __func__,
+ link_id);
+ return -ENOLINK;
+ }
+
+ if (active_links == BIT(link_id))
+ return 0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "%s: switch to link id %u MLSR\n",
+ __func__, link_id);
+
+ rtw89_leave_lps(rtwdev);
+
+ ieee80211_stop_queues(rtwdev->hw);
+ flush_work(&rtwdev->txq_work);
+
+ cur = rtw89_get_designated_link(rtwvif);
+
+ ret = ieee80211_set_active_links(vif, active_links | BIT(link_id));
+ if (ret) {
+ rtw89_err(rtwdev, "%s: failed to activate link id %u\n",
+ __func__, link_id);
+ goto wake_queue;
+ }
+
+ target = rtwvif->links[link_id];
+ if (unlikely(!target)) {
+ rtw89_err(rtwdev, "%s: failed to confirm link id %u\n",
+ __func__, link_id);
+
+ ieee80211_set_active_links(vif, active_links);
+ ret = -EFAULT;
+ goto wake_queue;
+ }
+
+ if (likely(cur))
+ rtw89_fw_h2c_mlo_link_cfg(rtwdev, cur, false);
+
+ rtw89_fw_h2c_mlo_link_cfg(rtwdev, target, true);
+
+ ret = ieee80211_set_active_links(vif, BIT(link_id));
+ if (ret)
+ rtw89_err(rtwdev, "%s: failed to inactivate links 0x%x\n",
+ __func__, active_links);
+
+ rtw89_chip_rfk_channel(rtwdev, target);
+
+ rtwvif->mlo_mode = RTW89_MLO_MODE_MLSR;
+
+wake_queue:
+ ieee80211_wake_queues(rtwdev->hw);
+
+ return ret;
+}
+
static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
@@ -5305,8 +5538,11 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
if (chip->chip_gen == RTW89_CHIP_BE)
hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
- if (rtwdev->support_mlo)
+ if (rtwdev->support_mlo) {
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ hw->wiphy->iftype_ext_capab = rtw89_iftypes_ext_capa;
+ hw->wiphy->num_iftype_ext_capab = ARRAY_SIZE(rtw89_iftypes_ext_capa);
+ }
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
@@ -5337,7 +5573,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ret = rtw89_regd_setup(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to set up regd\n");
- goto err_free_supported_band;
+ return ret;
}
hw->wiphy->sar_capa = &rtw89_sar_capa;
@@ -5345,7 +5581,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ret = ieee80211_register_hw(hw);
if (ret) {
rtw89_err(rtwdev, "failed to register hw\n");
- goto err_free_supported_band;
+ return ret;
}
ret = rtw89_regd_init_hint(rtwdev);
@@ -5360,8 +5596,6 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
err_unregister_hw:
ieee80211_unregister_hw(hw);
-err_free_supported_band:
- rtw89_core_clr_supported_band(rtwdev);
return ret;
}
@@ -5372,7 +5606,6 @@ static void rtw89_core_unregister_hw(struct rtw89_dev *rtwdev)
rtw89_rfkill_polling_deinit(rtwdev);
ieee80211_unregister_hw(hw);
- rtw89_core_clr_supported_band(rtwdev);
}
int rtw89_core_register(struct rtw89_dev *rtwdev)
@@ -5440,13 +5673,13 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
if (!hw)
goto err;
- /* TODO: When driver MLO arch. is done, determine whether to support MLO
- * according to the following conditions.
- * 1. run with chanctx_ops
- * 2. chip->support_link_num != 0
- * 3. FW feature supports AP_LINK_PS
+ /* Currently, our AP_LINK_PS handling only works for non-MLD softap
+ * or MLD-single-link softap. If RTW89_MLD_NON_STA_LINK_NUM enlarges,
+ * please tweak entire AP_LINKS_PS handling before supporting MLO.
*/
- support_mlo = false;
+ support_mlo = !no_chanctx && chip->support_link_num &&
+ RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &early_fw) &&
+ RTW89_MLD_NON_STA_LINK_NUM == 1;
hw->wiphy->iface_combinations = rtw89_iface_combs;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 4be05d6cad18..1c8f3b9b7c4c 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -798,6 +798,7 @@ struct rtw89_rx_phy_ppdu {
u8 rssi[RF_PATH_MAX];
u8 mac_id;
u8 chan_idx;
+ u8 phy_idx;
u8 ie;
u16 rate;
u8 rpl_avg;
@@ -1174,6 +1175,7 @@ struct rtw89_tx_desc_info {
bool ldpc;
bool upd_wlan_hdr;
bool mlo;
+ bool sw_mld;
};
struct rtw89_core_tx_request {
@@ -3379,6 +3381,7 @@ struct rtw89_sec_cam_entry {
struct rtw89_sta_link {
struct rtw89_sta *rtwsta;
+ struct list_head dlink_schd;
unsigned int link_id;
u8 mac_id;
@@ -3445,14 +3448,13 @@ enum rtw89_roc_state {
RTW89_ROC_MGMT,
};
-#define RTW89_ROC_BY_LINK_INDEX 0
-
struct rtw89_roc {
struct ieee80211_channel chan;
struct wiphy_delayed_work roc_work;
enum ieee80211_roc_type type;
enum rtw89_roc_state state;
int duration;
+ unsigned int link_id;
};
#define RTW89_P2P_MAX_NOA_NUM 2
@@ -3483,8 +3485,17 @@ struct rtw89_p2p_noa_setter {
u8 noa_index;
};
+struct rtw89_ps_noa_once_handler {
+ bool in_duration;
+ u64 tsf_begin;
+ u64 tsf_end;
+ struct wiphy_delayed_work set_work;
+ struct wiphy_delayed_work clr_work;
+};
+
struct rtw89_vif_link {
struct rtw89_vif *rtwvif;
+ struct list_head dlink_schd;
unsigned int link_id;
bool chanctx_assigned; /* only valid when running with chanctx_ops */
@@ -3507,6 +3518,7 @@ struct rtw89_vif_link {
u8 hit_rule;
u8 last_noa_nr;
u64 sync_bcn_tsf;
+ bool rand_tsf_done;
bool trigger;
bool lsig_txop;
u8 tgt_ind;
@@ -3527,6 +3539,7 @@ struct rtw89_vif_link {
struct rtw89_phy_rate_pattern rate_pattern;
struct list_head general_pkt_list;
struct rtw89_p2p_noa_setter p2p_noa;
+ struct rtw89_ps_noa_once_handler noa_once;
};
enum rtw89_lv1_rcvy_step {
@@ -3986,7 +3999,11 @@ struct rtw89_rfe_parms {
struct rtw89_txpwr_rule_2ghz rule_2ghz;
struct rtw89_txpwr_rule_5ghz rule_5ghz;
struct rtw89_txpwr_rule_6ghz rule_6ghz;
+ struct rtw89_txpwr_rule_2ghz rule_da_2ghz;
+ struct rtw89_txpwr_rule_5ghz rule_da_5ghz;
+ struct rtw89_txpwr_rule_6ghz rule_da_6ghz;
struct rtw89_tx_shape tx_shape;
+ bool has_da;
};
struct rtw89_rfe_parms_conf {
@@ -4081,9 +4098,15 @@ struct rtw89_rfe_data {
struct rtw89_txpwr_lmt_2ghz_data lmt_2ghz;
struct rtw89_txpwr_lmt_5ghz_data lmt_5ghz;
struct rtw89_txpwr_lmt_6ghz_data lmt_6ghz;
+ struct rtw89_txpwr_lmt_2ghz_data da_lmt_2ghz;
+ struct rtw89_txpwr_lmt_5ghz_data da_lmt_5ghz;
+ struct rtw89_txpwr_lmt_6ghz_data da_lmt_6ghz;
struct rtw89_txpwr_lmt_ru_2ghz_data lmt_ru_2ghz;
struct rtw89_txpwr_lmt_ru_5ghz_data lmt_ru_5ghz;
struct rtw89_txpwr_lmt_ru_6ghz_data lmt_ru_6ghz;
+ struct rtw89_txpwr_lmt_ru_2ghz_data da_lmt_ru_2ghz;
+ struct rtw89_txpwr_lmt_ru_5ghz_data da_lmt_ru_5ghz;
+ struct rtw89_txpwr_lmt_ru_6ghz_data da_lmt_ru_6ghz;
struct rtw89_tx_shape_lmt_data tx_shape_lmt;
struct rtw89_tx_shape_lmt_ru_data tx_shape_lmt_ru;
struct rtw89_rfe_parms rfe_parms;
@@ -4284,12 +4307,14 @@ struct rtw89_chip_info {
bool support_rnr;
bool support_ant_gain;
bool support_tas;
+ bool support_sar_by_ant;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
bool rx_freq_frome_ie;
bool hw_sec_hdr;
bool hw_mgmt_tx_encrypt;
bool hw_tkip_crypto;
+ bool hw_mlo_bmc_crypto;
u8 rf_path_num;
u8 tx_nss;
u8 rx_nss;
@@ -4494,6 +4519,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_LPS_CH_INFO,
RTW89_FW_FEATURE_NO_PHYCAP_P1,
RTW89_FW_FEATURE_NO_POWER_DIFFERENCE,
+ RTW89_FW_FEATURE_BEACON_LOSS_COUNT_V1,
};
struct rtw89_fw_suit {
@@ -4606,6 +4632,7 @@ struct rtw89_cam_info {
enum rtw89_sar_sources {
RTW89_SAR_SOURCE_NONE,
RTW89_SAR_SOURCE_COMMON,
+ RTW89_SAR_SOURCE_ACPI,
RTW89_SAR_SOURCE_NR,
};
@@ -4630,8 +4657,62 @@ struct rtw89_sar_cfg_common {
s32 cfg[RTW89_SAR_SUBBAND_NR];
};
+enum rtw89_acpi_sar_subband {
+ RTW89_ACPI_SAR_2GHZ_SUBBAND,
+ RTW89_ACPI_SAR_5GHZ_SUBBAND_1, /* U-NII-1 */
+ RTW89_ACPI_SAR_5GHZ_SUBBAND_2, /* U-NII-2 */
+ RTW89_ACPI_SAR_5GHZ_SUBBAND_2E, /* U-NII-2-Extended */
+ RTW89_ACPI_SAR_5GHZ_SUBBAND_3_4, /* U-NII-3 and U-NII-4 */
+ RTW89_ACPI_SAR_6GHZ_SUBBAND_5_L, /* U-NII-5 lower part */
+ RTW89_ACPI_SAR_6GHZ_SUBBAND_5_H, /* U-NII-5 higher part */
+ RTW89_ACPI_SAR_6GHZ_SUBBAND_6, /* U-NII-6 */
+ RTW89_ACPI_SAR_6GHZ_SUBBAND_7_L, /* U-NII-7 lower part */
+ RTW89_ACPI_SAR_6GHZ_SUBBAND_7_H, /* U-NII-7 higher part */
+ RTW89_ACPI_SAR_6GHZ_SUBBAND_8, /* U-NII-8 */
+
+ NUM_OF_RTW89_ACPI_SAR_SUBBAND,
+ RTW89_ACPI_SAR_SUBBAND_NR_LEGACY = RTW89_ACPI_SAR_5GHZ_SUBBAND_3_4 + 1,
+ RTW89_ACPI_SAR_SUBBAND_NR_HAS_6GHZ = RTW89_ACPI_SAR_6GHZ_SUBBAND_8 + 1,
+};
+
+#define TXPWR_FACTOR_OF_RTW89_ACPI_SAR 3 /* unit: 0.125 dBm */
+#define MAX_VAL_OF_RTW89_ACPI_SAR S16_MAX
+#define MIN_VAL_OF_RTW89_ACPI_SAR S16_MIN
+#define MAX_NUM_OF_RTW89_ACPI_SAR_TBL 6
+#define NUM_OF_RTW89_ACPI_SAR_RF_PATH (RF_PATH_B + 1)
+
+struct rtw89_sar_entry_from_acpi {
+ s16 v[NUM_OF_RTW89_ACPI_SAR_SUBBAND][NUM_OF_RTW89_ACPI_SAR_RF_PATH];
+};
+
+struct rtw89_sar_table_from_acpi {
+ /* If this table is active, must fill all fields according to either
+ * configuration in BIOS or some default values for SAR to work well.
+ */
+ struct rtw89_sar_entry_from_acpi entries[RTW89_REGD_NUM];
+};
+
+struct rtw89_sar_indicator_from_acpi {
+ bool enable_sync;
+ unsigned int fields;
+ u8 (*rfpath_to_antidx)(enum rtw89_rf_path rfpath);
+
+ /* Select among @tables of container, rtw89_sar_cfg_acpi, by path.
+ * Not design with pointers since addresses will be invalid after
+ * sync content with local container instance.
+ */
+ u8 tblsel[NUM_OF_RTW89_ACPI_SAR_RF_PATH];
+};
+
+struct rtw89_sar_cfg_acpi {
+ u8 downgrade_2tx;
+ unsigned int valid_num;
+ struct rtw89_sar_table_from_acpi tables[MAX_NUM_OF_RTW89_ACPI_SAR_TBL];
+ struct rtw89_sar_indicator_from_acpi indicator;
+};
+
struct rtw89_sar_info {
- /* used to decide how to acces SAR cfg union */
+ /* used to decide how to access SAR cfg union */
enum rtw89_sar_sources src;
/* reserved for different knids of SAR cfg struct.
@@ -4639,6 +4720,7 @@ struct rtw89_sar_info {
*/
union {
struct rtw89_sar_cfg_common cfg_common;
+ struct rtw89_sar_cfg_acpi cfg_acpi;
};
};
@@ -4674,11 +4756,14 @@ struct rtw89_ant_gain_info {
struct rtw89_6ghz_span {
enum rtw89_sar_subband sar_subband_low;
enum rtw89_sar_subband sar_subband_high;
+ enum rtw89_acpi_sar_subband acpi_sar_subband_low;
+ enum rtw89_acpi_sar_subband acpi_sar_subband_high;
enum rtw89_ant_gain_subband ant_gain_subband_low;
enum rtw89_ant_gain_subband ant_gain_subband_high;
};
#define RTW89_SAR_SPAN_VALID(span) ((span)->sar_subband_high)
+#define RTW89_ACPI_SAR_SPAN_VALID(span) ((span)->acpi_sar_subband_high)
#define RTW89_ANT_GAIN_SPAN_VALID(span) ((span)->ant_gain_subband_high)
enum rtw89_tas_state {
@@ -4692,6 +4777,7 @@ enum rtw89_tas_state {
struct rtw89_tas_info {
u16 tx_ratio_history[RTW89_TAS_TX_RATIO_WINDOW];
u64 txpwr_history[RTW89_TAS_TXPWR_WINDOW];
+ u8 enabled_countries;
u8 txpwr_head_idx;
u8 txpwr_tail_idx;
u8 tx_ratio_idx;
@@ -4765,6 +4851,7 @@ enum rtw89_dm_type {
RTW89_DM_DYNAMIC_EDCCA,
RTW89_DM_THERMAL_PROTECT,
RTW89_DM_TAS,
+ RTW89_DM_MLO,
};
#define RTW89_THERMAL_PROT_LV_MAX 5
@@ -4786,6 +4873,7 @@ struct rtw89_hal {
bool no_mcs_12_13;
atomic_t roc_chanctx_idx;
+ u8 roc_link_index;
DECLARE_BITMAP(changes, NUM_OF_RTW89_CHANCTX_CHANGES);
DECLARE_BITMAP(entity_map, NUM_OF_RTW89_CHANCTX);
@@ -5207,6 +5295,8 @@ struct rtw89_regulatory_info {
const struct rtw89_regd *regd;
enum rtw89_reg_6ghz_power reg_6ghz_power;
struct rtw89_reg_6ghz_tpe reg_6ghz_tpe;
+ bool txpwr_uk_follow_etsi;
+
DECLARE_BITMAP(block_unii4, RTW89_REGD_MAX_COUNTRY_NUM);
DECLARE_BITMAP(block_6ghz, RTW89_REGD_MAX_COUNTRY_NUM);
DECLARE_BITMAP(block_6ghz_sp, RTW89_REGD_MAX_COUNTRY_NUM);
@@ -5360,9 +5450,10 @@ struct rtw89_early_h2c {
struct rtw89_hw_scan_info {
struct rtw89_vif_link *scanning_vif;
struct list_head pkt_list[NUM_NL80211_BANDS];
+ struct list_head chan_list;
struct rtw89_chan op_chan;
+ bool connected;
bool abort;
- u32 last_chan_idx;
};
enum rtw89_phy_bb_gain_band {
@@ -5574,6 +5665,8 @@ struct rtw89_mcc_role {
struct rtw89_mcc_policy policy;
struct rtw89_mcc_limit limit;
+ const struct rtw89_mcc_courtesy_cfg *crtz;
+
/* only valid when running with FW MRC mechanism */
u8 slot_idx;
@@ -5591,13 +5684,16 @@ struct rtw89_mcc_bt_role {
u16 duration; /* TU */
};
-struct rtw89_mcc_courtesy {
- bool enable;
+struct rtw89_mcc_courtesy_cfg {
u8 slot_num;
- u8 macid_src;
u8 macid_tgt;
};
+struct rtw89_mcc_courtesy {
+ struct rtw89_mcc_courtesy_cfg ref;
+ struct rtw89_mcc_courtesy_cfg aux;
+};
+
enum rtw89_mcc_plan {
RTW89_MCC_PLAN_TAIL_BT,
RTW89_MCC_PLAN_MID_BT,
@@ -5631,6 +5727,7 @@ struct rtw89_mcc_config {
struct rtw89_mcc_pattern pattern;
struct rtw89_mcc_sync sync;
u64 start_tsf;
+ u64 start_tsf_in_aux_domain;
u16 mcc_interval; /* TU */
u16 beacon_offset; /* TU */
};
@@ -5651,6 +5748,16 @@ struct rtw89_mcc_info {
struct rtw89_mcc_config config;
};
+enum rtw89_mlo_mode {
+ RTW89_MLO_MODE_MLSR = 0,
+
+ NUM_OF_RTW89_MLO_MODE,
+};
+
+struct rtw89_mlo_info {
+ struct rtw89_wait_info wait;
+};
+
struct rtw89_dev {
struct ieee80211_hw *hw;
struct device *dev;
@@ -5666,6 +5773,7 @@ struct rtw89_dev {
const struct rtw89_rfe_parms *rfe_parms;
struct rtw89_hal hal;
struct rtw89_mcc_info mcc;
+ struct rtw89_mlo_info mlo;
struct rtw89_mac_info mac;
struct rtw89_fw_info fw;
struct rtw89_hci_info hci;
@@ -5802,6 +5910,9 @@ struct rtw89_vif {
struct rtw89_roc roc;
bool offchan;
+ enum rtw89_mlo_mode mlo_mode;
+
+ struct list_head dlink_pool;
u8 links_inst_valid_num;
DECLARE_BITMAP(links_inst_map, __RTW89_MLD_MAX_LINK_NUM);
struct rtw89_vif_link *links[IEEE80211_MLD_MAX_NUM_LINKS];
@@ -5841,6 +5952,7 @@ struct rtw89_sta {
DECLARE_BITMAP(pairwise_sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
+ struct list_head dlink_pool;
u8 links_inst_valid_num;
DECLARE_BITMAP(links_inst_map, __RTW89_MLD_MAX_LINK_NUM);
struct rtw89_sta_link *links[IEEE80211_MLD_MAX_NUM_LINKS];
@@ -5936,6 +6048,12 @@ rtw89_assoc_link_rcu_dereference(struct rtw89_dev *rtwdev, u8 macid)
return rcu_dereference(rtwdev->assoc_link_on_macid[macid]);
}
+#define rtw89_get_designated_link(links_holder) \
+({ \
+ typeof(links_holder) p = links_holder; \
+ list_first_entry_or_null(&p->dlink_pool, typeof(*p->links_inst), dlink_schd); \
+})
+
static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
@@ -6825,9 +6943,14 @@ static inline void rtw89_load_txpwr_table(struct rtw89_dev *rtwdev,
static inline u8 rtw89_regd_get(struct rtw89_dev *rtwdev, u8 band)
{
- const struct rtw89_regd *regd = rtwdev->regulatory.regd;
+ const struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd *regd = regulatory->regd;
+ u8 txpwr_regd = regd->txpwr_regd[band];
+
+ if (regulatory->txpwr_uk_follow_etsi && txpwr_regd == RTW89_UK)
+ return RTW89_ETSI;
- return regd->txpwr_regd[band];
+ return txpwr_regd;
}
static inline void rtw89_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
@@ -7185,6 +7308,7 @@ void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate);
int rtw89_regd_setup(struct rtw89_dev *rtwdev);
int rtw89_regd_init_hint(struct rtw89_dev *rtwdev);
+const char *rtw89_regd_get_string(enum rtw89_regulation_type regd);
void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
struct rtw89_traffic_stats *stats);
int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond);
@@ -7206,5 +7330,7 @@ void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct ieee80211_bss_conf *bss_conf);
void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event);
+int rtw89_core_mlsr_switch(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ unsigned int link_id);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index f2c5753fd386..d6016fa107fb 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -85,6 +85,7 @@ struct rtw89_debugfs {
struct rtw89_debugfs_priv phy_info;
struct rtw89_debugfs_priv stations;
struct rtw89_debugfs_priv disable_dm;
+ struct rtw89_debugfs_priv mlo_mode;
};
struct rtw89_debugfs_iter_data {
@@ -854,45 +855,21 @@ static ssize_t __print_txpwr_map(struct rtw89_dev *rtwdev, char *buf, size_t buf
return p - buf;
}
-#define case_REGD(_regd) \
- case RTW89_ ## _regd: \
- p += scnprintf(p, end - p, #_regd "\n"); \
- break
-
static int __print_regd(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
const struct rtw89_chan *chan)
{
+ const struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
char *p = buf, *end = buf + bufsz;
u8 band = chan->band_type;
u8 regd = rtw89_regd_get(rtwdev, band);
- switch (regd) {
- default:
- p += scnprintf(p, end - p, "UNKNOWN: %d\n", regd);
- break;
- case_REGD(WW);
- case_REGD(ETSI);
- case_REGD(FCC);
- case_REGD(MKK);
- case_REGD(NA);
- case_REGD(IC);
- case_REGD(KCC);
- case_REGD(NCC);
- case_REGD(CHILE);
- case_REGD(ACMA);
- case_REGD(MEXICO);
- case_REGD(UKRAINE);
- case_REGD(CN);
- case_REGD(QATAR);
- case_REGD(UK);
- case_REGD(THAILAND);
- }
+ p += scnprintf(p, end - p, "%s\n", rtw89_regd_get_string(regd));
+ p += scnprintf(p, end - p, "\t(txpwr UK follow ETSI: %s)\n",
+ str_yes_no(regulatory->txpwr_uk_follow_etsi));
return p - buf;
}
-#undef case_REGD
-
struct dbgfs_txpwr_table {
const struct txpwr_map *byr;
const struct txpwr_map *lmt;
@@ -949,6 +926,7 @@ ssize_t rtw89_debug_priv_txpwr_table_get(struct rtw89_dev *rtwdev,
char *buf, size_t bufsz)
{
enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
+ struct rtw89_sar_parm sar_parm = {};
const struct dbgfs_txpwr_table *tbl;
const struct rtw89_chan *chan;
char *p = buf, *end = buf + bufsz;
@@ -958,11 +936,12 @@ ssize_t rtw89_debug_priv_txpwr_table_get(struct rtw89_dev *rtwdev,
rtw89_leave_ps_mode(rtwdev);
chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ sar_parm.center_freq = chan->freq;
p += rtw89_debug_priv_txpwr_table_get_regd(rtwdev, p, end - p, chan);
p += scnprintf(p, end - p, "[SAR]\n");
- p += rtw89_print_sar(rtwdev, p, end - p, chan->freq);
+ p += rtw89_print_sar(rtwdev, p, end - p, &sar_parm);
p += scnprintf(p, end - p, "[TAS]\n");
p += rtw89_print_tas(rtwdev, p, end - p);
@@ -3993,14 +3972,16 @@ static int rtw89_dump_pkt_offload(char *buf, size_t bufsz, struct list_head *pkt
static int rtw89_vif_link_ids_get(struct rtw89_dev *rtwdev,
char *buf, size_t bufsz, u8 *mac,
- struct rtw89_vif_link *rtwvif_link)
+ struct rtw89_vif_link *rtwvif_link,
+ bool designated)
{
struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif_link->bssid_cam;
char *p = buf, *end = buf + bufsz;
p += scnprintf(p, end - p, " [%u] %pM\n", rtwvif_link->mac_id,
rtwvif_link->mac_addr);
- p += scnprintf(p, end - p, "\tlink_id=%u\n", rtwvif_link->link_id);
+ p += scnprintf(p, end - p, "\tlink_id=%u%s\n", rtwvif_link->link_id,
+ designated ? " (*)" : "");
p += scnprintf(p, end - p, "\tbssid_cam_idx=%u\n",
bssid_cam->bssid_cam_idx);
p += rtw89_dump_addr_cam(rtwdev, p, end - p, &rtwvif_link->addr_cam);
@@ -4017,15 +3998,19 @@ void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
(struct rtw89_debugfs_iter_data *)data;
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_dev *rtwdev = rtwvif->rtwdev;
+ struct rtw89_vif_link *designated_link;
struct rtw89_vif_link *rtwvif_link;
size_t bufsz = iter_data->bufsz;
char *buf = iter_data->buf;
char *p = buf, *end = buf + bufsz;
unsigned int link_id;
+ designated_link = rtw89_get_designated_link(rtwvif);
+
p += scnprintf(p, end - p, "VIF %pM\n", rtwvif->mac_addr);
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
- p += rtw89_vif_link_ids_get(rtwdev, p, end - p, mac, rtwvif_link);
+ p += rtw89_vif_link_ids_get(rtwdev, p, end - p, mac, rtwvif_link,
+ rtwvif_link == designated_link);
rtw89_debugfs_iter_data_next(iter_data, p, end - p, p - buf);
}
@@ -4055,7 +4040,8 @@ static int rtw89_dump_ba_cam(struct rtw89_dev *rtwdev,
static int rtw89_sta_link_ids_get(struct rtw89_dev *rtwdev,
char *buf, size_t bufsz,
- struct rtw89_sta_link *rtwsta_link)
+ struct rtw89_sta_link *rtwsta_link,
+ bool designated)
{
struct ieee80211_link_sta *link_sta;
char *p = buf, *end = buf + bufsz;
@@ -4069,7 +4055,8 @@ static int rtw89_sta_link_ids_get(struct rtw89_dev *rtwdev,
rcu_read_unlock();
- p += scnprintf(p, end - p, "\tlink_id=%u\n", rtwsta_link->link_id);
+ p += scnprintf(p, end - p, "\tlink_id=%u%s\n", rtwsta_link->link_id,
+ designated ? " (*)" : "");
p += rtw89_dump_addr_cam(rtwdev, p, end - p, &rtwsta_link->addr_cam);
p += rtw89_dump_ba_cam(rtwdev, p, end - p, rtwsta_link);
@@ -4082,16 +4069,20 @@ static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta)
(struct rtw89_debugfs_iter_data *)data;
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+ struct rtw89_sta_link *designated_link;
struct rtw89_sta_link *rtwsta_link;
size_t bufsz = iter_data->bufsz;
char *buf = iter_data->buf;
char *p = buf, *end = buf + bufsz;
unsigned int link_id;
+ designated_link = rtw89_get_designated_link(rtwsta);
+
p += scnprintf(p, end - p, "STA %pM %s\n", sta->addr,
sta->tdls ? "(TDLS)" : "");
rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
- p += rtw89_sta_link_ids_get(rtwdev, p, end - p, rtwsta_link);
+ p += rtw89_sta_link_ids_get(rtwdev, p, end - p, rtwsta_link,
+ rtwsta_link == designated_link);
rtw89_debugfs_iter_data_next(iter_data, p, end - p, p - buf);
}
@@ -4146,6 +4137,35 @@ static ssize_t rtw89_debug_priv_stations_get(struct rtw89_dev *rtwdev,
return p - buf;
}
+static void rtw89_debug_disable_dm_cfg_bmap(struct rtw89_dev *rtwdev, u32 new)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 old = hal->disabled_dm_bitmap;
+
+ if (new == old)
+ return;
+
+ hal->disabled_dm_bitmap = new;
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "Disable DM: 0x%x -> 0x%x\n", old, new);
+}
+
+static void rtw89_debug_disable_dm_set_flag(struct rtw89_dev *rtwdev, u8 flag)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 cur = hal->disabled_dm_bitmap;
+
+ rtw89_debug_disable_dm_cfg_bmap(rtwdev, cur | BIT(flag));
+}
+
+static void rtw89_debug_disable_dm_clr_flag(struct rtw89_dev *rtwdev, u8 flag)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 cur = hal->disabled_dm_bitmap;
+
+ rtw89_debug_disable_dm_cfg_bmap(rtwdev, cur & ~BIT(flag));
+}
+
#define DM_INFO(type) {RTW89_DM_ ## type, #type}
static const struct rtw89_disabled_dm_info {
@@ -4155,6 +4175,7 @@ static const struct rtw89_disabled_dm_info {
DM_INFO(DYNAMIC_EDCCA),
DM_INFO(THERMAL_PROTECT),
DM_INFO(TAS),
+ DM_INFO(MLO),
};
static ssize_t
@@ -4188,7 +4209,6 @@ rtw89_debug_priv_disable_dm_set(struct rtw89_dev *rtwdev,
struct rtw89_debugfs_priv *debugfs_priv,
const char *buf, size_t count)
{
- struct rtw89_hal *hal = &rtwdev->hal;
u32 conf;
int ret;
@@ -4196,7 +4216,83 @@ rtw89_debug_priv_disable_dm_set(struct rtw89_dev *rtwdev,
if (ret)
return -EINVAL;
- hal->disabled_dm_bitmap = conf;
+ rtw89_debug_disable_dm_cfg_bmap(rtwdev, conf);
+
+ return count;
+}
+
+static void rtw89_debug_mlo_mode_set_mlsr(struct rtw89_dev *rtwdev,
+ unsigned int link_id)
+{
+ struct ieee80211_vif *vif;
+ struct rtw89_vif *rtwvif;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ vif = rtwvif_to_vif(rtwvif);
+ if (!ieee80211_vif_is_mld(vif))
+ continue;
+
+ rtw89_core_mlsr_switch(rtwdev, rtwvif, link_id);
+ }
+}
+
+static ssize_t
+rtw89_debug_priv_mlo_mode_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
+{
+ bool mlo_dm_dis = rtwdev->hal.disabled_dm_bitmap & BIT(RTW89_DM_MLO);
+ char *p = buf, *end = buf + bufsz;
+ struct ieee80211_vif *vif;
+ struct rtw89_vif *rtwvif;
+ int count = 0;
+
+ p += scnprintf(p, end - p, "MLD(s) status: (MLO DM: %s)\n",
+ str_disable_enable(mlo_dm_dis));
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ vif = rtwvif_to_vif(rtwvif);
+ if (!ieee80211_vif_is_mld(vif))
+ continue;
+
+ p += scnprintf(p, end - p,
+ "\t#%u: MLO mode %x, valid 0x%x, active 0x%x\n",
+ count++, rtwvif->mlo_mode, vif->valid_links,
+ vif->active_links);
+ }
+
+ if (count == 0)
+ p += scnprintf(p, end - p, "\t(None)\n");
+
+ return p - buf;
+}
+
+static ssize_t
+rtw89_debug_priv_mlo_mode_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
+{
+ u8 num, mlo_mode;
+ u32 argv;
+
+ num = sscanf(buf, "%hhx %u", &mlo_mode, &argv);
+ if (num != 2)
+ return -EINVAL;
+
+ rtw89_debug_disable_dm_set_flag(rtwdev, RTW89_DM_MLO);
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "Set MLO mode to %x\n", mlo_mode);
+
+ switch (mlo_mode) {
+ case RTW89_MLO_MODE_MLSR:
+ rtw89_debug_mlo_mode_set_mlsr(rtwdev, argv);
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "Unsupported MLO mode\n");
+ rtw89_debug_disable_dm_clr_flag(rtwdev, RTW89_DM_MLO);
+
+ return -EOPNOTSUPP;
+ }
return count;
}
@@ -4257,7 +4353,8 @@ static const struct rtw89_debugfs rtw89_debugfs_templ = {
.fw_log_manual = rtw89_debug_priv_set(fw_log_manual, WLOCK),
.phy_info = rtw89_debug_priv_get(phy_info),
.stations = rtw89_debug_priv_get(stations, RLOCK),
- .disable_dm = rtw89_debug_priv_set_and_get(disable_dm),
+ .disable_dm = rtw89_debug_priv_set_and_get(disable_dm, RWLOCK),
+ .mlo_mode = rtw89_debug_priv_set_and_get(mlo_mode, RWLOCK),
};
#define rtw89_debugfs_add(name, mode, fopname, parent) \
@@ -4302,6 +4399,7 @@ void rtw89_debugfs_add_sec1(struct rtw89_dev *rtwdev, struct dentry *debugfs_top
rtw89_debugfs_add_r(phy_info);
rtw89_debugfs_add_r(stations);
rtw89_debugfs_add_rw(disable_dm);
+ rtw89_debugfs_add_rw(mlo_mode);
}
void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 8643b17866f8..00b65b2995cf 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -15,6 +15,8 @@
#include "util.h"
#include "wow.h"
+static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev);
+
struct rtw89_eapol_2_of_2 {
u8 gtkbody[14];
u8 key_des_ver;
@@ -554,7 +556,7 @@ const struct rtw89_mfw_hdr *rtw89_mfw_get_hdr_ptr(struct rtw89_dev *rtwdev,
if (sizeof(*mfw_hdr) > firmware->size)
return NULL;
- mfw_hdr = (const struct rtw89_mfw_hdr *)firmware->data;
+ mfw_hdr = (const struct rtw89_mfw_hdr *)&firmware->data[0];
if (mfw_hdr->sig != RTW89_MFW_SIG)
return NULL;
@@ -850,6 +852,7 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 64, 0, NO_POWER_DIFFERENCE),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 71, 0, BEACON_LOSS_COUNT_V1),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -1018,7 +1021,7 @@ int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev,
}
n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]);
- regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL);
+ regs = kcalloc(n_regs, sizeof(*regs), GFP_KERNEL);
if (!regs)
goto out;
@@ -1298,6 +1301,18 @@ static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
rtw89_fw_recognize_txpwr_from_elm,
{ .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL,
},
+ [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_2GHZ] = {
+ rtw89_fw_recognize_txpwr_from_elm,
+ { .offset = offsetof(struct rtw89_rfe_data, da_lmt_2ghz.conf) }, NULL,
+ },
+ [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_5GHZ] = {
+ rtw89_fw_recognize_txpwr_from_elm,
+ { .offset = offsetof(struct rtw89_rfe_data, da_lmt_5ghz.conf) }, NULL,
+ },
+ [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_6GHZ] = {
+ rtw89_fw_recognize_txpwr_from_elm,
+ { .offset = offsetof(struct rtw89_rfe_data, da_lmt_6ghz.conf) }, NULL,
+ },
[RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = {
rtw89_fw_recognize_txpwr_from_elm,
{ .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL,
@@ -1310,6 +1325,18 @@ static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
rtw89_fw_recognize_txpwr_from_elm,
{ .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL,
},
+ [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ] = {
+ rtw89_fw_recognize_txpwr_from_elm,
+ { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_2ghz.conf) }, NULL,
+ },
+ [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ] = {
+ rtw89_fw_recognize_txpwr_from_elm,
+ { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_5ghz.conf) }, NULL,
+ },
+ [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ] = {
+ rtw89_fw_recognize_txpwr_from_elm,
+ { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_6ghz.conf) }, NULL,
+ },
[RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = {
rtw89_fw_recognize_txpwr_from_elm,
{ .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL,
@@ -2483,7 +2510,7 @@ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
if (enable)
comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) |
- BIT(RTW89_FW_LOG_COMP_SCAN);
+ BIT(RTW89_FW_LOG_COMP_MLO) | BIT(RTW89_FW_LOG_COMP_SCAN);
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
if (!skb) {
@@ -3065,8 +3092,8 @@ static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
ntx_path = RF_A;
map_b = 0;
} else {
- ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
- map_b = hal->antenna_tx == RF_AB ? 1 : 0;
+ ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_AB;
+ map_b = ntx_path == RF_AB ? 1 : 0;
}
SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
@@ -4004,8 +4031,9 @@ out:
int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link, bool dis_conn)
{
- struct sk_buff *skb;
u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
+ struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ bool is_mld = ieee80211_vif_is_mld(vif);
u8 self_role = rtwvif_link->self_role;
enum rtw89_fw_sta_type sta_type;
u8 net_type = rtwvif_link->net_type;
@@ -4013,6 +4041,9 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwv
struct rtw89_h2c_join *h2c;
u32 len = sizeof(*h2c);
bool format_v1 = false;
+ struct sk_buff *skb;
+ u8 main_mac_id;
+ bool init_ps;
int ret;
if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
@@ -4054,8 +4085,28 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwv
h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data;
sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link);
+ init_ps = rtwvif_link != rtw89_get_designated_link(rtwvif_link->rtwvif);
+
+ if (rtwsta_link)
+ main_mac_id = rtw89_sta_get_main_macid(rtwsta_link->rtwsta);
+ else
+ main_mac_id = rtw89_vif_get_main_macid(rtwvif_link->rtwvif);
+
+ h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE) |
+ le32_encode_bits(is_mld, RTW89_H2C_JOININFO_W1_IS_MLD) |
+ le32_encode_bits(main_mac_id, RTW89_H2C_JOININFO_W1_MAIN_MACID) |
+ le32_encode_bits(RTW89_H2C_JOININFO_MLO_MODE_MLSR,
+ RTW89_H2C_JOININFO_W1_MLO_MODE) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W1_EMLSR_CAB) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W1_NSTR_EN) |
+ le32_encode_bits(init_ps, RTW89_H2C_JOININFO_W1_INIT_PWR_STATE) |
+ le32_encode_bits(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US,
+ RTW89_H2C_JOININFO_W1_EMLSR_PADDING) |
+ le32_encode_bits(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US,
+ RTW89_H2C_JOININFO_W1_EMLSR_TRANS_DELAY) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MACID_EXT) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MAIN_MACID_EXT);
- h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE);
h2c_v1->w2 = 0;
done:
@@ -4339,6 +4390,7 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
struct rtw89_h2c_bcnfltr *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
+ u8 max_cnt, cnt;
int ret;
if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
@@ -4367,12 +4419,20 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
skb_put(skb, len);
h2c = (struct rtw89_h2c_bcnfltr *)skb->data;
+ if (RTW89_CHK_FW_FEATURE(BEACON_LOSS_COUNT_V1, &rtwdev->fw))
+ max_cnt = BIT(7) - 1;
+ else
+ max_cnt = BIT(4) - 1;
+
+ cnt = min(RTW89_BCN_LOSS_CNT, max_cnt);
+
h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) |
le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) |
le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) |
le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT,
RTW89_H2C_BCNFLTR_W0_MODE) |
- le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) |
+ le32_encode_bits(cnt >> 4, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_H3) |
+ le32_encode_bits(cnt & 0xf, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_L4) |
le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
le32_encode_bits(thold + MAX_RSSI,
RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) |
@@ -5298,12 +5358,12 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
}
static
-int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num,
- struct list_head *chan_list)
+int rtw89_fw_h2c_scan_list_offload_ax(struct rtw89_dev *rtwdev, int ch_num,
+ struct list_head *chan_list)
{
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
struct rtw89_h2c_chinfo_elem *elem;
- struct rtw89_mac_chinfo *ch_info;
+ struct rtw89_mac_chinfo_ax *ch_info;
struct rtw89_h2c_chinfo *h2c;
struct sk_buff *skb;
unsigned int cond;
@@ -5477,7 +5537,7 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
return 0;
}
-#define RTW89_SCAN_DELAY_TSF_UNIT 104800
+#define RTW89_SCAN_DELAY_TSF_UNIT 1000000
int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
struct rtw89_scan_option *option,
struct rtw89_vif_link *rtwvif_link,
@@ -5741,7 +5801,7 @@ flex_member:
RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) |
le32_encode_bits(0,
RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) |
- le32_encode_bits(2,
+ le32_encode_bits(rtw89_is_mlo_1_1(rtwdev) ? 1 : 2,
RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS);
opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
@@ -6525,7 +6585,7 @@ void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
rtw89_fw_prog_cnt_dump(rtwdev);
}
-static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
+static void rtw89_hw_scan_release_pkt_list(struct rtw89_dev *rtwdev)
{
struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
struct rtw89_pktofld_info *info, *tmp;
@@ -6544,6 +6604,23 @@ static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
}
}
+static void rtw89_hw_scan_cleanup(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+
+ mac->free_chan_list(rtwdev);
+ rtw89_hw_scan_release_pkt_list(rtwdev);
+
+ rtwvif->scan_req = NULL;
+ rtwvif->scan_ies = NULL;
+ scan_info->scanning_vif = NULL;
+ scan_info->abort = false;
+ scan_info->connected = false;
+}
+
static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
struct cfg80211_scan_request *req,
struct rtw89_pktofld_info *info,
@@ -6612,7 +6689,8 @@ out:
}
static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link)
+ struct rtw89_vif_link *rtwvif_link,
+ const u8 *mac_addr)
{
struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
struct cfg80211_scan_request *req = rtwvif->scan_req;
@@ -6621,7 +6699,7 @@ static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
int ret;
for (i = 0; i < num; i++) {
- skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr,
+ skb = ieee80211_probereq_get(rtwdev->hw, mac_addr,
req->ssids[i].ssid,
req->ssids[i].ssid_len,
req->ie_len);
@@ -6638,10 +6716,10 @@ static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
return 0;
}
-static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev,
- struct ieee80211_scan_ies *ies,
- struct cfg80211_scan_request *req,
- struct rtw89_mac_chinfo *ch_info)
+static int rtw89_update_6ghz_rnr_chan_ax(struct rtw89_dev *rtwdev,
+ struct ieee80211_scan_ies *ies,
+ struct cfg80211_scan_request *req,
+ struct rtw89_mac_chinfo_ax *ch_info)
{
struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
@@ -6713,7 +6791,7 @@ out:
static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev,
int chan_type, int ssid_num,
- struct rtw89_mac_chinfo *ch_info)
+ struct rtw89_mac_chinfo_ax *ch_info)
{
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct rtw89_pktofld_info *info;
@@ -6761,9 +6839,9 @@ static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev,
}
}
-static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
- int ssid_num,
- struct rtw89_mac_chinfo *ch_info)
+static void rtw89_hw_scan_add_chan_ax(struct rtw89_dev *rtwdev, int chan_type,
+ int ssid_num,
+ struct rtw89_mac_chinfo_ax *ch_info)
{
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
@@ -6794,7 +6872,7 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
}
}
- ret = rtw89_update_6ghz_rnr_chan(rtwdev, ies, req, ch_info);
+ ret = rtw89_update_6ghz_rnr_chan_ax(rtwdev, ies, req, ch_info);
if (ret)
rtw89_warn(rtwdev, "RNR fails: %d\n", ret);
@@ -6950,7 +7028,7 @@ int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
{
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
- struct rtw89_mac_chinfo *ch_info, *tmp;
+ struct rtw89_mac_chinfo_ax *ch_info, *tmp;
struct ieee80211_channel *channel;
struct list_head chan_list;
int list_len;
@@ -6984,7 +7062,7 @@ int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info);
list_add_tail(&ch_info->list, &chan_list);
}
- ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
+ ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &chan_list);
out:
list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
@@ -6995,24 +7073,24 @@ out:
return ret;
}
-int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link, bool connected)
+int rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
struct cfg80211_scan_request *req = rtwvif->scan_req;
- struct rtw89_mac_chinfo *ch_info, *tmp;
+ struct rtw89_mac_chinfo_ax *ch_info, *tmp;
struct ieee80211_channel *channel;
struct list_head chan_list;
bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
- int list_len, off_chan_time = 0;
enum rtw89_chan_type type;
- int ret = 0;
+ int off_chan_time = 0;
+ int ret;
u32 idx;
INIT_LIST_HEAD(&chan_list);
- for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
- idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
- idx++, list_len++) {
+
+ for (idx = 0; idx < req->n_channels; idx++) {
channel = req->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
if (!ch_info) {
@@ -7039,9 +7117,9 @@ int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
type = RTW89_CHAN_DFS;
else
type = RTW89_CHAN_ACTIVE;
- rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info);
+ rtw89_hw_scan_add_chan_ax(rtwdev, type, req->n_ssids, ch_info);
- if (connected &&
+ if (scan_info->connected &&
off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp) {
@@ -7053,16 +7131,16 @@ int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
type = RTW89_CHAN_OPERATE;
tmp->period = req->duration_mandatory ?
req->duration : RTW89_CHANNEL_TIME;
- rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp);
+ rtw89_hw_scan_add_chan_ax(rtwdev, type, 0, tmp);
list_add_tail(&tmp->list, &chan_list);
off_chan_time = 0;
- list_len++;
}
list_add_tail(&ch_info->list, &chan_list);
off_chan_time += ch_info->period;
}
- rtwdev->scan_info.last_chan_idx = idx;
- ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
+
+ list_splice_tail(&chan_list, &scan_info->chan_list);
+ return 0;
out:
list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
@@ -7073,6 +7151,46 @@ out:
return ret;
}
+void rtw89_hw_scan_free_chan_list_ax(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_mac_chinfo_ax *ch_info, *tmp;
+
+ list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+}
+
+int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_mac_chinfo_ax *ch_info, *tmp;
+ unsigned int list_len = 0;
+ struct list_head list;
+ int ret;
+
+ INIT_LIST_HEAD(&list);
+
+ list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
+ list_move_tail(&ch_info->list, &list);
+
+ list_len++;
+ if (list_len == RTW89_SCAN_LIST_LIMIT_AX)
+ break;
+ }
+
+ ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &list);
+
+ list_for_each_entry_safe(ch_info, tmp, &list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+
+ return ret;
+}
+
int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
@@ -7126,25 +7244,24 @@ out:
return ret;
}
-int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link, bool connected)
+int rtw89_hw_scan_prep_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
struct cfg80211_scan_request *req = rtwvif->scan_req;
struct rtw89_mac_chinfo_be *ch_info, *tmp;
struct ieee80211_channel *channel;
struct list_head chan_list;
enum rtw89_chan_type type;
- int list_len, ret;
bool random_seq;
+ int ret;
u32 idx;
random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN);
INIT_LIST_HEAD(&chan_list);
- for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
- idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
- idx++, list_len++) {
+ for (idx = 0; idx < req->n_channels; idx++) {
channel = req->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
if (!ch_info) {
@@ -7174,9 +7291,8 @@ int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
list_add_tail(&ch_info->list, &chan_list);
}
- rtwdev->scan_info.last_chan_idx = idx;
- ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list,
- rtwvif_link);
+ list_splice_tail(&chan_list, &scan_info->chan_list);
+ return 0;
out:
list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
@@ -7187,51 +7303,182 @@ out:
return ret;
}
+void rtw89_hw_scan_free_chan_list_be(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_mac_chinfo_be *ch_info, *tmp;
+
+ list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+}
+
+int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_mac_chinfo_be *ch_info, *tmp;
+ unsigned int list_len = 0;
+ struct list_head list;
+ int ret;
+
+ INIT_LIST_HEAD(&list);
+
+ list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
+ list_move_tail(&ch_info->list, &list);
+
+ list_len++;
+ if (list_len == RTW89_SCAN_LIST_LIMIT_BE)
+ break;
+ }
+
+ ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &list,
+ rtwvif_link);
+
+ list_for_each_entry_safe(ch_info, tmp, &list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+
+ return ret;
+}
+
static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link, bool connected)
+ struct rtw89_vif_link *rtwvif_link,
+ const u8 *mac_addr)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
int ret;
- ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link);
+ ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link, mac_addr);
if (ret) {
rtw89_err(rtwdev, "Update probe request failed\n");
goto out;
}
- ret = mac->add_chan_list(rtwdev, rtwvif_link, connected);
+ ret = mac->prep_chan_list(rtwdev, rtwvif_link);
out:
return ret;
}
-void rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link,
- struct ieee80211_scan_request *scan_req)
+static void rtw89_hw_scan_update_link_beacon_noa(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ u16 tu)
+{
+ struct ieee80211_p2p_noa_desc noa_desc = {};
+ u64 tsf;
+ int ret;
+
+ ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
+ if (ret) {
+ rtw89_warn(rtwdev, "%s: failed to get tsf\n", __func__);
+ return;
+ }
+
+ noa_desc.start_time = cpu_to_le32(tsf);
+ noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(tu));
+ noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(tu));
+ noa_desc.count = 1;
+
+ rtw89_p2p_noa_renew(rtwvif_link);
+ rtw89_p2p_noa_append(rtwvif_link, &noa_desc);
+ rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link);
+}
+
+static void rtw89_hw_scan_update_beacon_noa(struct rtw89_dev *rtwdev,
+ const struct cfg80211_scan_request *req)
+{
+ const struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt;
+ const struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_mac_chinfo_ax *chinfo_ax;
+ struct rtw89_mac_chinfo_be *chinfo_be;
+ struct rtw89_vif_link *rtwvif_link;
+ struct list_head *pos, *tmp;
+ struct ieee80211_vif *vif;
+ struct rtw89_vif *rtwvif;
+ u16 tu = 0;
+
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ list_for_each_safe(pos, tmp, &scan_info->chan_list) {
+ switch (chip->chip_gen) {
+ case RTW89_CHIP_AX:
+ chinfo_ax = list_entry(pos, typeof(*chinfo_ax), list);
+ tu += chinfo_ax->period;
+ break;
+ case RTW89_CHIP_BE:
+ chinfo_be = list_entry(pos, typeof(*chinfo_be), list);
+ tu += chinfo_be->period;
+ break;
+ default:
+ rtw89_warn(rtwdev, "%s: invalid chip gen %d\n",
+ __func__, chip->chip_gen);
+ return;
+ }
+ }
+
+ if (unlikely(tu == 0)) {
+ rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+ "%s: cannot estimate needed TU\n", __func__);
+ return;
+ }
+
+ list_for_each_entry(rtwvif, &mgnt->active_list, mgnt_entry) {
+ unsigned int link_id;
+
+ vif = rtwvif_to_vif(rtwvif);
+ if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
+ continue;
+
+ rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
+ rtw89_hw_scan_update_link_beacon_noa(rtwdev, rtwvif_link, tu);
+ }
+}
+
+int rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct ieee80211_scan_request *scan_req)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev);
struct cfg80211_scan_request *req = &scan_req->req;
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
rtwvif_link->chanctx_idx);
struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ struct rtw89_chanctx_pause_parm pause_parm = {
+ .rsn = RTW89_CHANCTX_PAUSE_REASON_HW_SCAN,
+ .trigger = rtwvif_link,
+ };
u32 rx_fltr = rtwdev->hal.rx_fltr;
u8 mac_addr[ETH_ALEN];
u32 reg;
+ int ret;
/* clone op and keep it during scan */
rtwdev->scan_info.op_chan = *chan;
+ rtwdev->scan_info.connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
rtwdev->scan_info.scanning_vif = rtwvif_link;
- rtwdev->scan_info.last_chan_idx = 0;
rtwdev->scan_info.abort = false;
rtwvif->scan_ies = &scan_req->ies;
rtwvif->scan_req = req;
- ieee80211_stop_queues(rtwdev->hw);
- rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false);
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
get_random_mask_addr(mac_addr, req->mac_addr,
req->mac_addr_mask);
else
ether_addr_copy(mac_addr, rtwvif_link->mac_addr);
+
+ ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, mac_addr);
+ if (ret) {
+ rtw89_hw_scan_cleanup(rtwdev, rtwvif_link);
+ return ret;
+ }
+
+ ieee80211_stop_queues(rtwdev->hw);
+ rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false);
+
rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true);
rx_fltr &= ~B_AX_A_BCN_CHK_EN;
@@ -7241,7 +7488,12 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx);
rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rx_fltr);
- rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN);
+ rtw89_chanctx_pause(rtwdev, &pause_parm);
+
+ if (mode == RTW89_ENTITY_MODE_MCC)
+ rtw89_hw_scan_update_beacon_noa(rtwdev, req);
+
+ return 0;
}
struct rtw89_hw_scan_complete_cb_data {
@@ -7252,20 +7504,16 @@ struct rtw89_hw_scan_complete_cb_data {
static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
- struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_hw_scan_complete_cb_data *cb_data = data;
struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link;
struct cfg80211_scan_info info = {
.aborted = cb_data->aborted,
};
- struct rtw89_vif *rtwvif;
u32 reg;
if (!rtwvif_link)
return -EINVAL;
- rtwvif = rtwvif_link->rtwvif;
-
reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx);
rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr);
@@ -7275,12 +7523,7 @@ static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data)
rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true);
rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
- rtw89_release_pkt_list(rtwdev);
- rtwvif->scan_req = NULL;
- rtwvif->scan_ies = NULL;
- scan_info->last_chan_idx = 0;
- scan_info->scanning_vif = NULL;
- scan_info->abort = false;
+ rtw89_hw_scan_cleanup(rtwdev, rtwvif_link);
return 0;
}
@@ -7355,11 +7598,11 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev,
if (!rtwvif_link)
return -EINVAL;
- connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
+ connected = rtwdev->scan_info.connected;
opt.enable = enable;
opt.target_ch_mode = connected;
if (enable) {
- ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, connected);
+ ret = mac->add_chan_list(rtwdev, rtwvif_link);
if (ret)
goto out;
}
@@ -8719,6 +8962,47 @@ int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en)
return 0;
}
+int rtw89_fw_h2c_mlo_link_cfg(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ bool enable)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mlo.wait;
+ struct rtw89_h2c_mlo_link_cfg *h2c;
+ u8 mac_id = rtwvif_link->mac_id;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ unsigned int cond;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mlo link cfg\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mlo_link_cfg *)skb->data;
+
+ h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_MLO_LINK_CFG_W0_MACID) |
+ le32_encode_bits(enable, RTW89_H2C_MLO_LINK_CFG_W0_OPTION);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MLO,
+ H2C_FUNC_MLO_LINK_CFG, 0, 0,
+ len);
+
+ cond = RTW89_MLO_WAIT_COND(mac_id, H2C_FUNC_MLO_LINK_CFG);
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+ if (ret) {
+ rtw89_err(rtwdev, "mlo link cfg (%s link id %u) failed: %d\n",
+ str_enable_disable(enable), rtwvif_link->link_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
{
static const u8 zeros[U8_MAX] = {};
@@ -9106,6 +9390,26 @@ void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data)
}
}
+static bool rtw89_fw_has_da_txpwr_table(struct rtw89_dev *rtwdev,
+ const struct rtw89_rfe_parms *parms)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->support_bands & BIT(NL80211_BAND_2GHZ) &&
+ !(parms->rule_da_2ghz.lmt && parms->rule_da_2ghz.lmt_ru))
+ return false;
+
+ if (chip->support_bands & BIT(NL80211_BAND_5GHZ) &&
+ !(parms->rule_da_5ghz.lmt && parms->rule_da_5ghz.lmt_ru))
+ return false;
+
+ if (chip->support_bands & BIT(NL80211_BAND_6GHZ) &&
+ !(parms->rule_da_6ghz.lmt && parms->rule_da_6ghz.lmt_ru))
+ return false;
+
+ return true;
+}
+
const struct rtw89_rfe_parms *
rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
const struct rtw89_rfe_parms *init)
@@ -9142,6 +9446,21 @@ rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v;
}
+ if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_2ghz.conf)) {
+ rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->da_lmt_2ghz);
+ parms->rule_da_2ghz.lmt = &rfe_data->da_lmt_2ghz.v;
+ }
+
+ if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_5ghz.conf)) {
+ rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->da_lmt_5ghz);
+ parms->rule_da_5ghz.lmt = &rfe_data->da_lmt_5ghz.v;
+ }
+
+ if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_6ghz.conf)) {
+ rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->da_lmt_6ghz);
+ parms->rule_da_6ghz.lmt = &rfe_data->da_lmt_6ghz.v;
+ }
+
if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) {
rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz);
parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v;
@@ -9157,6 +9476,21 @@ rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v;
}
+ if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_2ghz.conf)) {
+ rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->da_lmt_ru_2ghz);
+ parms->rule_da_2ghz.lmt_ru = &rfe_data->da_lmt_ru_2ghz.v;
+ }
+
+ if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_5ghz.conf)) {
+ rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->da_lmt_ru_5ghz);
+ parms->rule_da_5ghz.lmt_ru = &rfe_data->da_lmt_ru_5ghz.v;
+ }
+
+ if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_6ghz.conf)) {
+ rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->da_lmt_ru_6ghz);
+ parms->rule_da_6ghz.lmt_ru = &rfe_data->da_lmt_ru_6ghz.v;
+ }
+
if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) {
rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt);
parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v;
@@ -9167,5 +9501,7 @@ rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v;
}
+ parms->has_da = rtw89_fw_has_da_txpwr_table(rtwdev, parms);
+
return parms;
}
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 55255b48bdb7..0fcc824e41be 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -199,6 +199,7 @@ enum rtw89_fw_log_comp {
RTW89_FW_LOG_COMP_TWT,
RTW89_FW_LOG_COMP_RF,
RTW89_FW_LOG_COMP_MCC = 20,
+ RTW89_FW_LOG_COMP_MLO = 26,
RTW89_FW_LOG_COMP_SCAN = 28,
};
@@ -333,9 +334,9 @@ struct rtw89_fw_macid_pause_sleep_grp {
#define RTW89_SCAN_LIST_LIMIT_AX RTW89_SCAN_LIST_LIMIT(RTW89_MAC_CHINFO_SIZE)
#define RTW89_SCAN_LIST_LIMIT_BE RTW89_SCAN_LIST_LIMIT(RTW89_MAC_CHINFO_SIZE_BE)
-#define RTW89_BCN_LOSS_CNT 10
+#define RTW89_BCN_LOSS_CNT 60
-struct rtw89_mac_chinfo {
+struct rtw89_mac_chinfo_ax {
u8 period;
u8 dwell_time;
u8 central_ch;
@@ -1636,6 +1637,8 @@ struct rtw89_h2c_join_v1 {
#define RTW89_H2C_JOININFO_W1_IS_MLD BIT(3)
#define RTW89_H2C_JOININFO_W1_MAIN_MACID GENMASK(11, 4)
#define RTW89_H2C_JOININFO_W1_MLO_MODE BIT(12)
+#define RTW89_H2C_JOININFO_MLO_MODE_MLMR 0
+#define RTW89_H2C_JOININFO_MLO_MODE_MLSR 1
#define RTW89_H2C_JOININFO_W1_EMLSR_CAB BIT(13)
#define RTW89_H2C_JOININFO_W1_NSTR_EN BIT(14)
#define RTW89_H2C_JOININFO_W1_INIT_PWR_STATE BIT(15)
@@ -2863,6 +2866,13 @@ struct rtw89_h2c_fwips {
#define RTW89_H2C_FW_IPS_W0_MACID GENMASK(7, 0)
#define RTW89_H2C_FW_IPS_W0_ENABLE BIT(8)
+struct rtw89_h2c_mlo_link_cfg {
+ __le32 w0;
+};
+
+#define RTW89_H2C_MLO_LINK_CFG_W0_MACID GENMASK(15, 0)
+#define RTW89_H2C_MLO_LINK_CFG_W0_OPTION GENMASK(19, 16)
+
static inline void RTW89_SET_FWCMD_P2P_MACID(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)cmd, val, GENMASK(7, 0));
@@ -3562,6 +3572,7 @@ struct rtw89_c2h_done_ack {
#define RTW89_C2H_DONE_ACK_W2_CLASS GENMASK(7, 2)
#define RTW89_C2H_DONE_ACK_W2_FUNC GENMASK(15, 8)
#define RTW89_C2H_DONE_ACK_W2_H2C_RETURN GENMASK(23, 16)
+#define RTW89_C2H_SCAN_DONE_ACK_RETURN GENMASK(5, 0)
#define RTW89_C2H_DONE_ACK_W2_H2C_SEQ GENMASK(31, 24)
#define RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h) \
@@ -3620,6 +3631,19 @@ struct rtw89_c2h_ra_rpt {
#define RTW89_C2H_RA_RPT_W3_MD_SEL_B2 BIT(15)
#define RTW89_C2H_RA_RPT_W3_BW_B2 BIT(16)
+struct rtw89_c2h_fw_scan_rpt {
+ struct rtw89_c2h_hdr hdr;
+ u8 phy_idx;
+ u8 band;
+ u8 center_ch;
+ u8 ofdm_pd_idx; /* in unit of 2 dBm */
+#define PD_LOWER_BOUND_BASE 102
+ s8 cck_pd_idx;
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 rsvd2;
+} __packed;
+
/* For WiFi 6 chips:
* VHT, HE, HT-old: [6:4]: NSS, [3:0]: MCS
* HT-new: [6:5]: NA, [4:0]: MCS
@@ -3717,6 +3741,25 @@ static_assert(sizeof(struct rtw89_mac_mcc_tsf_rpt) <= RTW89_COMPLETION_BUF_SIZE)
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_HIGH(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
+struct rtw89_c2h_mlo_link_cfg_rpt {
+ struct rtw89_c2h_hdr hdr;
+ __le32 w2;
+} __packed;
+
+#define RTW89_C2H_MLO_LINK_CFG_RPT_W2_MACID GENMASK(15, 0)
+#define RTW89_C2H_MLO_LINK_CFG_RPT_W2_STATUS GENMASK(19, 16)
+
+enum rtw89_c2h_mlo_link_status {
+ RTW89_C2H_MLO_LINK_CFG_IDLE = 0,
+ RTW89_C2H_MLO_LINK_CFG_DONE = 1,
+ RTW89_C2H_MLO_LINK_CFG_ISSUE_NULL_FAIL = 2,
+ RTW89_C2H_MLO_LINK_CFG_TX_NULL_FAIL = 3,
+ RTW89_C2H_MLO_LINK_CFG_ROLE_NOT_EXIST = 4,
+ RTW89_C2H_MLO_LINK_CFG_NULL_1_TIMEOUT = 5,
+ RTW89_C2H_MLO_LINK_CFG_NULL_0_TIMEOUT = 6,
+ RTW89_C2H_MLO_LINK_CFG_RUNNING = 0xff,
+};
+
struct rtw89_mac_mrc_tsf_rpt {
unsigned int num;
u64 tsfs[RTW89_MAC_MRC_MAX_REQ_TSF_NUM];
@@ -3813,7 +3856,8 @@ struct rtw89_h2c_bcnfltr {
#define RTW89_H2C_BCNFLTR_W0_MON_BCN BIT(1)
#define RTW89_H2C_BCNFLTR_W0_MON_EN BIT(2)
#define RTW89_H2C_BCNFLTR_W0_MODE GENMASK(4, 3)
-#define RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT GENMASK(11, 8)
+#define RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_H3 GENMASK(7, 5)
+#define RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_L4 GENMASK(11, 8)
#define RTW89_H2C_BCNFLTR_W0_RSSI_HYST GENMASK(15, 12)
#define RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD GENMASK(23, 16)
#define RTW89_H2C_BCNFLTR_W0_MAC_ID GENMASK(31, 24)
@@ -3891,6 +3935,12 @@ enum rtw89_fw_element_id {
RTW89_FW_ELEMENT_ID_TXPWR_TRK = 18,
RTW89_FW_ELEMENT_ID_RFKLOG_FMT = 19,
RTW89_FW_ELEMENT_ID_REGD = 20,
+ RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_2GHZ = 21,
+ RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_5GHZ = 22,
+ RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_6GHZ = 23,
+ RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ = 24,
+ RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ = 25,
+ RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ = 26,
RTW89_FW_ELEMENT_ID_NUM,
};
@@ -4229,6 +4279,26 @@ enum rtw89_mcc_h2c_func {
#define RTW89_MCC_WAIT_COND(group, func) \
((group) * NUM_OF_RTW89_MCC_H2C_FUNC + (func))
+/* CLASS 20 - MLO */
+#define H2C_CL_MLO 0x14
+enum rtw89_mlo_h2c_func {
+ H2C_FUNC_MLO_TBL_CFG = 0x0,
+ H2C_FUNC_MLO_STA_CFG = 0x1,
+ H2C_FUNC_MLO_TTLM = 0x2,
+ H2C_FUNC_MLO_DM_CFG = 0x3,
+ H2C_FUNC_MLO_EMLSR_STA_CFG = 0x4,
+ H2C_FUNC_MLO_MCMLO_RELINK_DROP = 0x5,
+ H2C_FUNC_MLO_MCMLO_SN_SYNC = 0x6,
+ H2C_FUNC_MLO_RELINK = 0x7,
+ H2C_FUNC_MLO_LINK_CFG = 0x8,
+ H2C_FUNC_MLO_DM_DBG = 0x9,
+
+ NUM_OF_RTW89_MLO_H2C_FUNC,
+};
+
+#define RTW89_MLO_WAIT_COND(macid, func) \
+ ((macid) * NUM_OF_RTW89_MLO_H2C_FUNC + (func))
+
/* CLASS 24 - MRC */
#define H2C_CL_MRC 0x18
enum rtw89_mrc_h2c_func {
@@ -4715,9 +4785,9 @@ int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_c2h_info *c2h_info);
int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable);
void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev);
-void rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link,
- struct ieee80211_scan_request *scan_req);
+int rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct ieee80211_scan_request *scan_req);
void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
bool aborted);
@@ -4726,12 +4796,18 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev,
bool enable);
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
+int rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link);
+void rtw89_hw_scan_free_chan_list_ax(struct rtw89_dev *rtwdev);
int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link, bool connected);
+ struct rtw89_vif_link *rtwvif_link);
int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
+int rtw89_hw_scan_prep_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link);
+void rtw89_hw_scan_free_chan_list_be(struct rtw89_dev *rtwdev);
int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link, bool connected);
+ struct rtw89_vif_link *rtwvif_link);
int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev);
@@ -4800,6 +4876,8 @@ int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mrc_upd_duration_arg *arg);
int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en);
+int rtw89_fw_h2c_mlo_link_cfg(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ bool enable);
static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
{
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index b4841f948ec1..9f0e30e75009 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -4631,11 +4631,17 @@ static void rtw89_mac_port_tsf_sync_rand(struct rtw89_dev *rtwdev,
if (rtwvif_link->net_type != RTW89_NET_TYPE_AP_MODE || rtwvif_link == rtwvif_src)
return;
+ if (rtwvif_link->rand_tsf_done)
+ goto out;
+
/* adjust offset randomly to avoid beacon conflict */
offset = offset - offset / 4 + get_random_u32() % (offset / 2);
rtw89_mac_port_tsf_sync(rtwdev, rtwvif_link, rtwvif_src,
(*n_offset) * offset);
+ rtwvif_link->rand_tsf_done = true;
+
+out:
(*n_offset)++;
}
@@ -4866,6 +4872,8 @@ void rtw89_mac_set_he_tb(struct rtw89_dev *rtwdev,
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
{
rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif_link);
+
+ rtwvif_link->rand_tsf_done = false;
}
int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
@@ -4899,11 +4907,11 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
struct rtw89_vif *rtwvif;
struct rtw89_chan new;
- u32 last_chan = rtwdev->scan_info.last_chan_idx, report_tsf;
u16 actual_period, expect_period;
u8 reason, status, tx_fail, band;
u8 mac_idx, sw_def, fw_def;
u8 ver = U8_MAX;
+ u32 report_tsf;
u16 chan;
int ret;
@@ -4962,7 +4970,7 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
return;
if (rtwvif_link && rtwvif->scan_req &&
- last_chan < rtwvif->scan_req->n_channels) {
+ !list_empty(&rtwdev->scan_info.chan_list)) {
ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, true);
if (ret) {
rtw89_hw_scan_abort(rtwdev, rtwvif_link);
@@ -5017,7 +5025,8 @@ rtw89_mac_bcn_fltr_rpt(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_l
switch (type) {
case RTW89_BCN_FLTR_BEACON_LOSS:
- if (!rtwdev->scanning && !rtwvif->offchan)
+ if (!rtwdev->scanning && !rtwvif->offchan &&
+ !rtwvif_link->noa_once.in_duration)
ieee80211_connection_loss(vif);
else
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
@@ -5110,12 +5119,14 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le
return;
case H2C_FUNC_ADD_SCANOFLD_CH:
cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
+ h2c_return &= RTW89_C2H_SCAN_DONE_ACK_RETURN;
break;
case H2C_FUNC_SCANOFLD:
cond = RTW89_SCANOFLD_WAIT_COND_START;
break;
case H2C_FUNC_SCANOFLD_BE:
cond = RTW89_SCANOFLD_BE_WAIT_COND_START;
+ h2c_return &= RTW89_C2H_SCAN_DONE_ACK_RETURN;
break;
}
@@ -5399,6 +5410,27 @@ rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 le
}
static void
+rtw89_mac_c2h_mlo_link_cfg_stat(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ const struct rtw89_c2h_mlo_link_cfg_rpt *c2h_rpt;
+ struct rtw89_wait_info *wait = &rtwdev->mlo.wait;
+ struct rtw89_completion_data data = {};
+ unsigned int cond;
+ u16 mac_id;
+ u8 status;
+
+ c2h_rpt = (const struct rtw89_c2h_mlo_link_cfg_rpt *)c2h->data;
+
+ mac_id = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MLO_LINK_CFG_RPT_W2_MACID);
+ status = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MLO_LINK_CFG_RPT_W2_STATUS);
+
+ data.err = status == RTW89_C2H_MLO_LINK_CFG_ROLE_NOT_EXIST ||
+ status == RTW89_C2H_MLO_LINK_CFG_RUNNING;
+ cond = RTW89_MLO_WAIT_COND(mac_id, H2C_FUNC_MLO_LINK_CFG);
+ rtw89_complete_cond(wait, cond, &data);
+}
+
+static void
rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
@@ -5553,6 +5585,18 @@ void (* const rtw89_mac_c2h_mcc_handler[])(struct rtw89_dev *rtwdev,
};
static
+void (* const rtw89_mac_c2h_mlo_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_MAC_C2H_FUNC_MLO_GET_TBL] = NULL,
+ [RTW89_MAC_C2H_FUNC_MLO_EMLSR_TRANS_DONE] = NULL,
+ [RTW89_MAC_C2H_FUNC_MLO_EMLSR_STA_CFG_DONE] = NULL,
+ [RTW89_MAC_C2H_FUNC_MCMLO_RELINK_RPT] = NULL,
+ [RTW89_MAC_C2H_FUNC_MCMLO_SN_SYNC_RPT] = NULL,
+ [RTW89_MAC_C2H_FUNC_MLO_LINK_CFG_STAT] = rtw89_mac_c2h_mlo_link_cfg_stat,
+ [RTW89_MAC_C2H_FUNC_MLO_DM_DBG_DUMP] = NULL,
+};
+
+static
void (* const rtw89_mac_c2h_mrc_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
[RTW89_MAC_C2H_FUNC_MRC_TSF_RPT] = rtw89_mac_c2h_mrc_tsf_rpt,
@@ -5621,6 +5665,8 @@ bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
}
case RTW89_MAC_C2H_CLASS_MCC:
return true;
+ case RTW89_MAC_C2H_CLASS_MLO:
+ return true;
case RTW89_MAC_C2H_CLASS_MRC:
return true;
case RTW89_MAC_C2H_CLASS_WOW:
@@ -5654,6 +5700,10 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MCC)
handler = rtw89_mac_c2h_mcc_handler[func];
break;
+ case RTW89_MAC_C2H_CLASS_MLO:
+ if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MLO)
+ handler = rtw89_mac_c2h_mlo_handler[func];
+ break;
case RTW89_MAC_C2H_CLASS_MRC:
if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MRC)
handler = rtw89_mac_c2h_mrc_handler[func];
@@ -6889,6 +6939,8 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.is_txq_empty = mac_is_txq_empty_ax,
+ .prep_chan_list = rtw89_hw_scan_prep_chan_list_ax,
+ .free_chan_list = rtw89_hw_scan_free_chan_list_ax,
.add_chan_list = rtw89_hw_scan_add_chan_list_ax,
.add_chan_list_pno = rtw89_pno_scan_add_chan_list_ax,
.scan_offload = rtw89_fw_h2c_scan_offload_ax,
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index fd7935d24501..8013c852d5be 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -370,6 +370,7 @@ enum rtw89_mac_mem_sel {
RTW89_MAC_MEM_TXD_FIFO_0_V1,
RTW89_MAC_MEM_TXD_FIFO_1_V1,
RTW89_MAC_MEM_WD_PAGE,
+ RTW89_MAC_MEM_MLD_TBL,
/* keep last */
RTW89_MAC_MEM_NUM,
@@ -427,6 +428,18 @@ enum rtw89_mac_c2h_mcc_func {
NUM_OF_RTW89_MAC_C2H_FUNC_MCC,
};
+enum rtw89_mac_c2h_mlo_func {
+ RTW89_MAC_C2H_FUNC_MLO_GET_TBL = 0x0,
+ RTW89_MAC_C2H_FUNC_MLO_EMLSR_TRANS_DONE = 0x1,
+ RTW89_MAC_C2H_FUNC_MLO_EMLSR_STA_CFG_DONE = 0x2,
+ RTW89_MAC_C2H_FUNC_MCMLO_RELINK_RPT = 0x3,
+ RTW89_MAC_C2H_FUNC_MCMLO_SN_SYNC_RPT = 0x4,
+ RTW89_MAC_C2H_FUNC_MLO_LINK_CFG_STAT = 0x5,
+ RTW89_MAC_C2H_FUNC_MLO_DM_DBG_DUMP = 0x6,
+
+ NUM_OF_RTW89_MAC_C2H_FUNC_MLO,
+};
+
enum rtw89_mac_c2h_mrc_func {
RTW89_MAC_C2H_FUNC_MRC_TSF_RPT = 0,
RTW89_MAC_C2H_FUNC_MRC_STATUS_RPT = 1,
@@ -453,6 +466,7 @@ enum rtw89_mac_c2h_class {
RTW89_MAC_C2H_CLASS_WOW = 0x3,
RTW89_MAC_C2H_CLASS_MCC = 0x4,
RTW89_MAC_C2H_CLASS_FWDBG = 0x5,
+ RTW89_MAC_C2H_CLASS_MLO = 0xc,
RTW89_MAC_C2H_CLASS_MRC = 0xe,
RTW89_MAC_C2H_CLASS_AP = 0x18,
RTW89_MAC_C2H_CLASS_MAX,
@@ -1031,8 +1045,11 @@ struct rtw89_mac_gen_def {
bool (*is_txq_empty)(struct rtw89_dev *rtwdev);
+ int (*prep_chan_list)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link);
+ void (*free_chan_list)(struct rtw89_dev *rtwdev);
int (*add_chan_list)(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link, bool connected);
+ struct rtw89_vif_link *rtwvif_link);
int (*add_chan_list_pno)(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
int (*scan_offload)(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 4fded07d0bee..a47971003bd4 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -114,11 +114,14 @@ static int __rtw89_ops_add_iface_link(struct rtw89_dev *rtwdev,
wiphy_work_init(&rtwvif_link->update_beacon_work, rtw89_core_update_beacon_work);
INIT_LIST_HEAD(&rtwvif_link->general_pkt_list);
+ rtw89_p2p_noa_once_init(rtwvif_link);
+
rtwvif_link->hit_rule = 0;
rtwvif_link->bcn_hit_cond = 0;
rtwvif_link->chanctx_assigned = false;
rtwvif_link->chanctx_idx = RTW89_CHANCTX_0;
rtwvif_link->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
+ rtwvif_link->rand_tsf_done = false;
rcu_read_lock();
@@ -142,6 +145,8 @@ static void __rtw89_ops_remove_iface_link(struct rtw89_dev *rtwdev,
wiphy_work_cancel(rtwdev->hw->wiphy, &rtwvif_link->update_beacon_work);
+ rtw89_p2p_noa_once_deinit(rtwvif_link);
+
rtw89_leave_ps_mode(rtwdev);
rtw89_btc_ntfy_role_info(rtwdev, rtwvif_link, NULL, BTC_ROLE_STOP);
@@ -186,6 +191,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
if (!rtw89_rtwvif_in_list(rtwdev, rtwvif)) {
list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
INIT_LIST_HEAD(&rtwvif->mgnt_entry);
+ INIT_LIST_HEAD(&rtwvif->dlink_pool);
}
ether_addr_copy(rtwvif->mac_addr, vif->addr);
@@ -479,6 +485,8 @@ static int __rtw89_ops_sta_add(struct rtw89_dev *rtwdev,
int i;
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+ rtwvif->mlo_mode = RTW89_MLO_MODE_MLSR;
+
/* for station mode, assign the mac_id from itself */
macid = rtw89_vif_get_main_macid(rtwvif);
} else {
@@ -494,6 +502,8 @@ static int __rtw89_ops_sta_add(struct rtw89_dev *rtwdev,
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
rtw89_core_txq_init(rtwdev, sta->txq[i]);
+ INIT_LIST_HEAD(&rtwsta->dlink_pool);
+
skb_queue_head_init(&rtwsta->roc_queue);
bitmap_zero(rtwsta->pairwise_sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
@@ -1018,7 +1028,7 @@ static void rtw89_ops_sta_statistics(struct ieee80211_hw *hw,
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
struct rtw89_sta_link *rtwsta_link;
- rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
+ rtwsta_link = rtw89_get_designated_link(rtwsta);
if (unlikely(!rtwsta_link))
return;
@@ -1153,12 +1163,14 @@ static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw,
lockdep_assert_wiphy(hw->wiphy);
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
if (unlikely(!rtwvif_link)) {
- rtw89_err(rtwdev, "sw scan start: find no link on HW-0\n");
+ rtw89_err(rtwdev, "sw scan start: find no designated link\n");
return;
}
+ rtw89_leave_lps(rtwdev);
+
rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, false);
}
@@ -1171,9 +1183,9 @@ static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw,
lockdep_assert_wiphy(hw->wiphy);
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
if (unlikely(!rtwvif_link)) {
- rtw89_err(rtwdev, "sw scan complete: find no link on HW-0\n");
+ rtw89_err(rtwdev, "sw scan complete: find no designated link\n");
return;
}
@@ -1205,13 +1217,19 @@ static int rtw89_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (rtwdev->scanning || rtwvif->offchan)
return -EBUSY;
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
if (unlikely(!rtwvif_link)) {
- rtw89_err(rtwdev, "hw scan: find no link on HW-0\n");
+ rtw89_err(rtwdev, "hw scan: find no designated link\n");
return -ENOLINK;
}
- rtw89_hw_scan_start(rtwdev, rtwvif_link, req);
+ rtw89_leave_lps(rtwdev);
+ rtw89_leave_ips_by_hwflags(rtwdev);
+
+ ret = rtw89_hw_scan_start(rtwdev, rtwvif_link, req);
+ if (ret)
+ return ret;
+
ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, true);
if (ret) {
rtw89_hw_scan_abort(rtwdev, rtwvif_link);
@@ -1236,9 +1254,9 @@ static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw,
if (!rtwdev->scanning)
return;
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
if (unlikely(!rtwvif_link)) {
- rtw89_err(rtwdev, "cancel hw scan: find no link on HW-0\n");
+ rtw89_err(rtwdev, "cancel hw scan: find no designated link\n");
return;
}
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index 99b82dc85ea3..8c9d326dc907 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -29,6 +29,7 @@ static const u32 rtw89_mac_mem_base_addrs_be[RTW89_MAC_MEM_NUM] = {
[RTW89_MAC_MEM_CPU_LOCAL] = CPU_LOCAL_BASE_ADDR_BE,
[RTW89_MAC_MEM_BSSID_CAM] = BSSID_CAM_BASE_ADDR_BE,
[RTW89_MAC_MEM_WD_PAGE] = WD_PAGE_BASE_ADDR_BE,
+ [RTW89_MAC_MEM_MLD_TBL] = MLD_TBL_BASE_ADDR_BE,
};
static const struct rtw89_port_reg rtw89_port_base_be = {
@@ -2636,6 +2637,8 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.is_txq_empty = mac_is_txq_empty_be,
+ .prep_chan_list = rtw89_hw_scan_prep_chan_list_be,
+ .free_chan_list = rtw89_hw_scan_free_chan_list_be,
.add_chan_list = rtw89_hw_scan_add_chan_list_be,
.add_chan_list_pno = rtw89_pno_scan_add_chan_list_be,
.scan_offload = rtw89_fw_h2c_scan_offload_be,
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index c2fe5a898dc7..064f6a940107 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -228,7 +228,7 @@ int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev,
struct sk_buff *skb)
{
struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
- int rx_tag_retry = 100;
+ int rx_tag_retry = 1000;
int ret;
do {
@@ -3105,17 +3105,26 @@ static bool rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev *rtwdev)
return false;
}
-static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev)
+static int rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev, bool force)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ int ret;
+ u8 val;
- if (!rtwpci->enable_dac)
- return;
+ if (!rtwpci->enable_dac && !force)
+ return 0;
if (!rtw89_pci_chip_is_manual_dac(rtwdev))
- return;
+ return 0;
- rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, RTW89_PCIE_BIT_EN_64BITS);
+ /* Configure DAC only via PCI config API, not DBI interfaces */
+ ret = pci_read_config_byte(pdev, RTW89_PCIE_L1_CTRL, &val);
+ if (ret)
+ return ret;
+
+ val |= RTW89_PCIE_BIT_EN_64BITS;
+ return pci_write_config_byte(pdev, RTW89_PCIE_L1_CTRL, val);
}
static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
@@ -3133,13 +3142,16 @@ static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
}
if (!rtw89_pci_is_dac_compatible_bridge(rtwdev))
- goto no_dac;
+ goto try_dac_done;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
if (!ret) {
- rtwpci->enable_dac = true;
- rtw89_pci_cfg_dac(rtwdev);
- } else {
+ ret = rtw89_pci_cfg_dac(rtwdev, true);
+ if (!ret) {
+ rtwpci->enable_dac = true;
+ goto try_dac_done;
+ }
+
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
rtw89_err(rtwdev,
@@ -3147,7 +3159,7 @@ static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
goto err_release_regions;
}
}
-no_dac:
+try_dac_done:
resource_len = pci_resource_len(pdev, bar_id);
rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len);
@@ -4302,7 +4314,7 @@ static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
void rtw89_pci_basic_cfg(struct rtw89_dev *rtwdev, bool resume)
{
if (resume)
- rtw89_pci_cfg_dac(rtwdev);
+ rtw89_pci_cfg_dac(rtwdev, false);
rtw89_pci_disable_eq(rtwdev);
rtw89_pci_filter_out(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index f4eee642e5ce..76a2e26d4a10 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -2034,19 +2034,10 @@ static s8 rtw89_phy_ant_gain_query(struct rtw89_dev *rtwdev,
ant_gain->offset[path][subband_h]);
}
-static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u8 band, u32 center_freq)
+static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u32 center_freq)
{
- struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
- const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 regd = rtw89_regd_get(rtwdev, band);
s8 offset_patha, offset_pathb;
- if (!chip->support_ant_gain)
- return 0;
-
- if (ant_gain->block_country || !(ant_gain->regd_enabled & BIT(regd)))
- return 0;
-
offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq);
offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq);
@@ -2056,18 +2047,31 @@ static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u8 band, u32 cente
return max(offset_patha, offset_pathb);
}
-s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan)
+static bool rtw89_can_apply_ant_gain(struct rtw89_dev *rtwdev, u8 band)
{
+ const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 regd = rtw89_regd_get(rtwdev, chan->band_type);
- s8 offset_patha, offset_pathb;
+ u8 regd = rtw89_regd_get(rtwdev, band);
if (!chip->support_ant_gain)
- return 0;
+ return false;
if (ant_gain->block_country || !(ant_gain->regd_enabled & BIT(regd)))
+ return false;
+
+ if (!rfe_parms->has_da)
+ return false;
+
+ return true;
+}
+
+s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ s8 offset_patha, offset_pathb;
+
+ if (!rtw89_can_apply_ant_gain(rtwdev, chan->band_type))
return 0;
if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw))
@@ -2083,14 +2087,10 @@ EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset);
int rtw89_print_ant_gain(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
const struct rtw89_chan *chan)
{
- struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
- const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 regd = rtw89_regd_get(rtwdev, chan->band_type);
char *p = buf, *end = buf + bufsz;
s8 offset_patha, offset_pathb;
- if (!(chip->support_ant_gain && (ant_gain->regd_enabled & BIT(regd))) ||
- ant_gain->block_country) {
+ if (!rtw89_can_apply_ant_gain(rtwdev, chan->band_type)) {
p += scnprintf(p, end - p, "no DAG is applied\n");
goto out;
}
@@ -2255,20 +2255,31 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
{
const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
+ const struct rtw89_txpwr_rule_2ghz *rule_da_2ghz = &rfe_parms->rule_da_2ghz;
+ const struct rtw89_txpwr_rule_5ghz *rule_da_5ghz = &rfe_parms->rule_da_5ghz;
+ const struct rtw89_txpwr_rule_6ghz *rule_da_6ghz = &rfe_parms->rule_da_6ghz;
const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
+ bool has_ant_gain = rtw89_can_apply_ant_gain(rtwdev, band);
u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
+ s8 lmt = 0, da_lmt = S8_MAX, sar, offset = 0;
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
- s8 lmt = 0, sar, offset;
+ struct rtw89_sar_parm sar_parm = {
+ .center_freq = freq,
+ .ntx = ntx,
+ };
s8 cstr;
switch (band) {
case RTW89_BAND_2G:
+ if (has_ant_gain)
+ da_lmt = (*rule_da_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
+
lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
if (lmt)
break;
@@ -2276,6 +2287,9 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
break;
case RTW89_BAND_5G:
+ if (has_ant_gain)
+ da_lmt = (*rule_da_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
+
lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
if (lmt)
break;
@@ -2283,6 +2297,9 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
break;
case RTW89_BAND_6G:
+ if (has_ant_gain)
+ da_lmt = (*rule_da_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx];
+
lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx];
if (lmt)
break;
@@ -2296,9 +2313,12 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
return 0;
}
- offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq);
- lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt + offset);
- sar = rtw89_query_sar(rtwdev, freq);
+ da_lmt = da_lmt ?: S8_MAX;
+ if (da_lmt != S8_MAX)
+ offset = rtw89_phy_ant_gain_offset(rtwdev, freq);
+
+ lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, min(lmt + offset, da_lmt));
+ sar = rtw89_query_sar(rtwdev, &sar_parm);
cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
return min3(lmt, sar, cstr);
@@ -2515,20 +2535,31 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
u8 ru, u8 ntx, u8 ch)
{
const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
+ const struct rtw89_txpwr_rule_2ghz *rule_da_2ghz = &rfe_parms->rule_da_2ghz;
+ const struct rtw89_txpwr_rule_5ghz *rule_da_5ghz = &rfe_parms->rule_da_5ghz;
+ const struct rtw89_txpwr_rule_6ghz *rule_da_6ghz = &rfe_parms->rule_da_6ghz;
const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
+ bool has_ant_gain = rtw89_can_apply_ant_gain(rtwdev, band);
u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
+ s8 lmt_ru = 0, da_lmt_ru = S8_MAX, sar, offset = 0;
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
- s8 lmt_ru = 0, sar, offset;
+ struct rtw89_sar_parm sar_parm = {
+ .center_freq = freq,
+ .ntx = ntx,
+ };
s8 cstr;
switch (band) {
case RTW89_BAND_2G:
+ if (has_ant_gain)
+ da_lmt_ru = (*rule_da_2ghz->lmt_ru)[ru][ntx][regd][ch_idx];
+
lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][regd][ch_idx];
if (lmt_ru)
break;
@@ -2536,6 +2567,9 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
break;
case RTW89_BAND_5G:
+ if (has_ant_gain)
+ da_lmt_ru = (*rule_da_5ghz->lmt_ru)[ru][ntx][regd][ch_idx];
+
lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][regd][ch_idx];
if (lmt_ru)
break;
@@ -2543,6 +2577,9 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
break;
case RTW89_BAND_6G:
+ if (has_ant_gain)
+ da_lmt_ru = (*rule_da_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx];
+
lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx];
if (lmt_ru)
break;
@@ -2556,9 +2593,12 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
return 0;
}
- offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq);
- lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru + offset);
- sar = rtw89_query_sar(rtwdev, freq);
+ da_lmt_ru = da_lmt_ru ?: S8_MAX;
+ if (da_lmt_ru != S8_MAX)
+ offset = rtw89_phy_ant_gain_offset(rtwdev, freq);
+
+ lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, min(lmt_ru + offset, da_lmt_ru));
+ sar = rtw89_query_sar(rtwdev, &sar_parm);
cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
return min3(lmt_ru, sar, cstr);
@@ -3015,6 +3055,35 @@ void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev,
[RTW89_PHY_C2H_FUNC_TXSTS] = NULL,
};
+static void
+rtw89_phy_c2h_lowrt_rty(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+}
+
+static void
+rtw89_phy_c2h_fw_scan_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ const struct rtw89_c2h_fw_scan_rpt *c2h_rpt =
+ (const struct rtw89_c2h_fw_scan_rpt *)c2h->data;
+
+ rtw89_debug(rtwdev, RTW89_DBG_DIG,
+ "%s: band: %u, op_chan: %u, PD_low_bd(ofdm, cck): (-%d, %d), phy_idx: %u\n",
+ __func__, c2h_rpt->band, c2h_rpt->center_ch,
+ PD_LOWER_BOUND_BASE - (c2h_rpt->ofdm_pd_idx << 1),
+ c2h_rpt->cck_pd_idx, c2h_rpt->phy_idx);
+}
+
+static
+void (* const rtw89_phy_c2h_dm_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_PHY_C2H_DM_FUNC_FW_TEST] = NULL,
+ [RTW89_PHY_C2H_DM_FUNC_FW_TRIG_TX_RPT] = NULL,
+ [RTW89_PHY_C2H_DM_FUNC_SIGB] = NULL,
+ [RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY] = rtw89_phy_c2h_lowrt_rty,
+ [RTW89_PHY_C2H_DM_FUNC_MCC_DIG] = NULL,
+ [RTW89_PHY_C2H_DM_FUNC_FW_SCAN] = rtw89_phy_c2h_fw_scan_rpt,
+};
+
static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
enum rtw89_phy_c2h_rfk_log_func func,
void *content, u16 len)
@@ -3550,9 +3619,9 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
handler = rtw89_phy_c2h_rfk_report_handler[func];
break;
case RTW89_PHY_C2H_CLASS_DM:
- if (func == RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY)
- return;
- fallthrough;
+ if (func < ARRAY_SIZE(rtw89_phy_c2h_dm_handler))
+ handler = rtw89_phy_c2h_dm_handler[func];
+ break;
default:
rtw89_info(rtwdev, "PHY c2h class %d not support\n", class);
return;
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 518a100375fb..5b451f1cfaac 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -164,6 +164,7 @@ enum rtw89_phy_c2h_dm_func {
RTW89_PHY_C2H_DM_FUNC_SIGB,
RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY,
RTW89_PHY_C2H_DM_FUNC_MCC_DIG,
+ RTW89_PHY_C2H_DM_FUNC_FW_SCAN = 0xc,
RTW89_PHY_C2H_DM_FUNC_NUM,
};
@@ -935,6 +936,20 @@ static inline s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm)
return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63);
}
+static inline s16 rtw89_phy_txpwr_mac_to_rf(struct rtw89_dev *rtwdev, s8 txpwr_mac)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return txpwr_mac << (chip->txpwr_factor_rf - chip->txpwr_factor_mac);
+}
+
+static inline s16 rtw89_phy_txpwr_mac_to_bb(struct rtw89_dev *rtwdev, s8 txpwr_mac)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return txpwr_mac << (chip->txpwr_factor_bb - chip->txpwr_factor_mac);
+}
+
void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link);
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev);
void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/realtek/rtw89/phy_be.c b/drivers/net/wireless/realtek/rtw89/phy_be.c
index 37d8f254ae32..d321cf1fc485 100644
--- a/drivers/net/wireless/realtek/rtw89/phy_be.c
+++ b/drivers/net/wireless/realtek/rtw89/phy_be.c
@@ -362,7 +362,7 @@ static void rtw89_phy_bb_wrap_force_cr_init(struct rtw89_dev *rtwdev,
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ENON, 0);
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ON, 0);
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FORCE_MACID, mac_idx);
- rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_MACID_ON, 0);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_MACID_ALL, 0);
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_COEX_CTRL, mac_idx);
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_COEX_ON, 0);
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RATE_CTRL, mac_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index ac46a7baa00d..8e4fe73e7d77 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -382,3 +382,150 @@ u8 rtw89_p2p_noa_fetch(struct rtw89_vif_link *rtwvif_link, void **data)
tail = ie->noa_desc + setter->noa_count;
return tail - *data;
}
+
+static void rtw89_ps_noa_once_set_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+ struct rtw89_ps_noa_once_handler *noa_once =
+ container_of(work, struct rtw89_ps_noa_once_handler, set_work.work);
+
+ lockdep_assert_wiphy(wiphy);
+
+ noa_once->in_duration = true;
+}
+
+static void rtw89_ps_noa_once_clr_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+ struct rtw89_ps_noa_once_handler *noa_once =
+ container_of(work, struct rtw89_ps_noa_once_handler, clr_work.work);
+ struct rtw89_vif_link *rtwvif_link =
+ container_of(noa_once, struct rtw89_vif_link, noa_once);
+ struct rtw89_dev *rtwdev = rtwvif_link->rtwvif->rtwdev;
+
+ lockdep_assert_wiphy(wiphy);
+
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
+ noa_once->in_duration = false;
+}
+
+void rtw89_p2p_noa_once_init(struct rtw89_vif_link *rtwvif_link)
+{
+ struct rtw89_ps_noa_once_handler *noa_once = &rtwvif_link->noa_once;
+
+ noa_once->in_duration = false;
+ noa_once->tsf_begin = 0;
+ noa_once->tsf_end = 0;
+
+ wiphy_delayed_work_init(&noa_once->set_work, rtw89_ps_noa_once_set_work);
+ wiphy_delayed_work_init(&noa_once->clr_work, rtw89_ps_noa_once_clr_work);
+}
+
+static void rtw89_p2p_noa_once_cancel(struct rtw89_vif_link *rtwvif_link)
+{
+ struct rtw89_ps_noa_once_handler *noa_once = &rtwvif_link->noa_once;
+ struct rtw89_dev *rtwdev = rtwvif_link->rtwvif->rtwdev;
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
+
+ wiphy_delayed_work_cancel(wiphy, &noa_once->set_work);
+ wiphy_delayed_work_cancel(wiphy, &noa_once->clr_work);
+}
+
+void rtw89_p2p_noa_once_deinit(struct rtw89_vif_link *rtwvif_link)
+{
+ rtw89_p2p_noa_once_cancel(rtwvif_link);
+ rtw89_p2p_noa_once_init(rtwvif_link);
+}
+
+void rtw89_p2p_noa_once_recalc(struct rtw89_vif_link *rtwvif_link)
+{
+ struct rtw89_ps_noa_once_handler *noa_once = &rtwvif_link->noa_once;
+ struct rtw89_dev *rtwdev = rtwvif_link->rtwvif->rtwdev;
+ const struct ieee80211_p2p_noa_desc *noa_desc;
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
+ struct ieee80211_bss_conf *bss_conf;
+ u64 tsf_begin = U64_MAX, tsf_end;
+ u64 set_delay_us = 0;
+ u64 clr_delay_us = 0;
+ u32 start_time;
+ u32 interval;
+ u32 duration;
+ u64 tsf;
+ int ret;
+ int i;
+
+ lockdep_assert_wiphy(wiphy);
+
+ ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
+ if (ret) {
+ rtw89_warn(rtwdev, "%s: failed to get tsf\n", __func__);
+ return;
+ }
+
+ rcu_read_lock();
+
+ bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+
+ for (i = 0; i < ARRAY_SIZE(bss_conf->p2p_noa_attr.desc); i++) {
+ bool first = tsf_begin == U64_MAX;
+ u64 tmp;
+
+ noa_desc = &bss_conf->p2p_noa_attr.desc[i];
+ if (noa_desc->count == 0 || noa_desc->count == 255)
+ continue;
+
+ start_time = le32_to_cpu(noa_desc->start_time);
+ interval = le32_to_cpu(noa_desc->interval);
+ duration = le32_to_cpu(noa_desc->duration);
+
+ if (unlikely(duration == 0 ||
+ (noa_desc->count > 1 && interval == 0)))
+ continue;
+
+ tmp = start_time + interval * (noa_desc->count - 1) + duration;
+ tmp = (tsf & GENMASK_ULL(63, 32)) + tmp;
+ if (unlikely(tmp <= tsf))
+ continue;
+ tsf_end = first ? tmp : max(tsf_end, tmp);
+
+ tmp = (tsf & GENMASK_ULL(63, 32)) | start_time;
+ tsf_begin = first ? tmp : min(tsf_begin, tmp);
+ }
+
+ rcu_read_unlock();
+
+ if (tsf_begin == U64_MAX)
+ return;
+
+ rtw89_p2p_noa_once_cancel(rtwvif_link);
+
+ if (noa_once->tsf_end > tsf) {
+ tsf_begin = min(tsf_begin, noa_once->tsf_begin);
+ tsf_end = max(tsf_end, noa_once->tsf_end);
+ }
+
+ clr_delay_us = min_t(u64, tsf_end - tsf, UINT_MAX);
+
+ if (tsf_begin <= tsf) {
+ noa_once->in_duration = true;
+ goto out;
+ }
+
+ set_delay_us = tsf_begin - tsf;
+ if (unlikely(set_delay_us > UINT_MAX)) {
+ rtw89_warn(rtwdev, "%s: unhandled begin\n", __func__);
+ set_delay_us = 0;
+ clr_delay_us = 0;
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
+ noa_once->in_duration = false;
+ }
+
+out:
+ if (set_delay_us)
+ wiphy_delayed_work_queue(wiphy, &noa_once->set_work,
+ usecs_to_jiffies(set_delay_us));
+ if (clr_delay_us)
+ wiphy_delayed_work_queue(wiphy, &noa_once->clr_work,
+ usecs_to_jiffies(clr_delay_us));
+
+ noa_once->tsf_begin = tsf_begin;
+ noa_once->tsf_end = tsf_end;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h
index 2b88f254a32d..b2c43d44820d 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.h
+++ b/drivers/net/wireless/realtek/rtw89/ps.h
@@ -22,6 +22,9 @@ void rtw89_p2p_noa_renew(struct rtw89_vif_link *rtwvif_link);
void rtw89_p2p_noa_append(struct rtw89_vif_link *rtwvif_link,
const struct ieee80211_p2p_noa_desc *desc);
u8 rtw89_p2p_noa_fetch(struct rtw89_vif_link *rtwvif_link, void **data);
+void rtw89_p2p_noa_once_init(struct rtw89_vif_link *rtwvif_link);
+void rtw89_p2p_noa_once_deinit(struct rtw89_vif_link *rtwvif_link);
+void rtw89_p2p_noa_once_recalc(struct rtw89_vif_link *rtwvif_link);
static inline void rtw89_leave_ips_by_hwflags(struct rtw89_dev *rtwdev)
{
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index c776954ad360..f05c81ae5869 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -7601,7 +7601,15 @@
#define B_BE_PWR_FORCE_RU_ON BIT(18)
#define B_BE_PWR_FORCE_RU_ENON BIT(28)
#define R_BE_PWR_FORCE_MACID 0x11A48
-#define B_BE_PWR_FORCE_MACID_ON BIT(9)
+#define B_BE_PWR_FORCE_MACID_DBM_ON BIT(9)
+#define B_BE_PWR_FORCE_MACID_DBM_VAL GENMASK(17, 10)
+#define B_BE_PWR_FORCE_MACID_EN_VAL BIT(18)
+#define B_BE_PWR_FORCE_MACID_EN_ON BIT(19)
+#define B_BE_PWR_FORCE_MACID_ALL \
+ (B_BE_PWR_FORCE_MACID_DBM_ON | \
+ B_BE_PWR_FORCE_MACID_DBM_VAL | \
+ B_BE_PWR_FORCE_MACID_EN_VAL | \
+ B_BE_PWR_FORCE_MACID_EN_ON)
#define R_BE_PWR_REG_CTRL 0x11A50
#define B_BE_PWR_BT_EN BIT(23)
@@ -8737,8 +8745,10 @@
#define B_DPD_GDIS BIT(13)
#define B_IQK_RFC_ON BIT(1)
#define R_TXPWRB 0x56CC
+#define R_P1_TXPWRB 0x76CC
#define B_TXPWRB_ON BIT(28)
#define B_TXPWRB_VAL GENMASK(27, 19)
+#define B_TXPWRB_MAX GENMASK(8, 0)
#define R_DPD_OFT_EN 0x5800
#define B_DPD_OFT_EN BIT(28)
#define B_DPD_TSSI_CW GENMASK(26, 18)
@@ -9360,6 +9370,9 @@
#define R_TSSI_PWR_P0 0xE610
#define R_TSSI_PWR_P1 0xE710
#define B_TSSI_CONT_EN BIT(3)
+#define R_P0_TXPWRB_BE 0xE61C
+#define R_P1_TXPWRB_BE 0xE71C
+#define B_TXPWRB_MAX_BE GENMASK(20, 12)
#define R_TSSI_MAP_OFST_P0 0xE620
#define R_TSSI_MAP_OFST_P1 0xE720
#define B_TSSI_MAP_OFST_OFDM GENMASK(17, 9)
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index 655323a79608..3ad14cab1f58 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -588,6 +588,38 @@ bottom:
kfree(sband);
}
+#define RTW89_DEF_REGD_STR(regd) \
+ [RTW89_ ## regd] = #regd
+
+static const char * const rtw89_regd_string[] = {
+ RTW89_DEF_REGD_STR(WW),
+ RTW89_DEF_REGD_STR(ETSI),
+ RTW89_DEF_REGD_STR(FCC),
+ RTW89_DEF_REGD_STR(MKK),
+ RTW89_DEF_REGD_STR(NA),
+ RTW89_DEF_REGD_STR(IC),
+ RTW89_DEF_REGD_STR(KCC),
+ RTW89_DEF_REGD_STR(ACMA),
+ RTW89_DEF_REGD_STR(NCC),
+ RTW89_DEF_REGD_STR(MEXICO),
+ RTW89_DEF_REGD_STR(CHILE),
+ RTW89_DEF_REGD_STR(UKRAINE),
+ RTW89_DEF_REGD_STR(CN),
+ RTW89_DEF_REGD_STR(QATAR),
+ RTW89_DEF_REGD_STR(UK),
+ RTW89_DEF_REGD_STR(THAILAND),
+};
+
+static_assert(ARRAY_SIZE(rtw89_regd_string) == RTW89_REGD_NUM);
+
+const char *rtw89_regd_get_string(enum rtw89_regulation_type regd)
+{
+ if (regd < 0 || regd >= RTW89_REGD_NUM)
+ return "(unknown)";
+
+ return rtw89_regd_string[regd];
+}
+
int rtw89_regd_setup(struct rtw89_dev *rtwdev)
{
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
@@ -604,6 +636,7 @@ int rtw89_regd_setup(struct rtw89_dev *rtwdev)
}
regulatory->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
+ regulatory->txpwr_uk_follow_etsi = true;
if (!wiphy)
return -EINVAL;
@@ -726,11 +759,22 @@ static void rtw89_regd_apply_policy_tas(struct rtw89_dev *rtwdev)
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
const struct rtw89_regd *regd = regulatory->regd;
struct rtw89_tas_info *tas = &rtwdev->tas;
+ u8 tas_country;
if (!tas->enable)
return;
- tas->block_regd = !test_bit(RTW89_REGD_FUNC_TAS, regd->func_bitmap);
+ if (memcmp("US", regd->alpha2, 2) == 0)
+ tas_country = RTW89_ACPI_CONF_TAS_US;
+ else if (memcmp("CA", regd->alpha2, 2) == 0)
+ tas_country = RTW89_ACPI_CONF_TAS_CA;
+ else if (memcmp("KR", regd->alpha2, 2) == 0)
+ tas_country = RTW89_ACPI_CONF_TAS_KR;
+ else
+ tas_country = RTW89_ACPI_CONF_TAS_OTHERS;
+
+ tas->block_regd = !(tas->enabled_countries & tas_country &&
+ test_bit(RTW89_REGD_FUNC_TAS, regd->func_bitmap));
}
static void rtw89_regd_apply_policy_ant_gain(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 0d482cd57f6e..fafa200a9c8d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -2499,12 +2499,14 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.support_unii4 = true,
.support_ant_gain = false,
.support_tas = false,
+ .support_sar_by_ant = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
.hw_tkip_crypto = false,
+ .hw_mlo_bmc_crypto = false,
.rf_path_num = 1,
.tx_nss = 1,
.rx_nss = 1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 286334e26c84..cd5987fc52d7 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -2217,12 +2217,14 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.support_unii4 = false,
.support_ant_gain = false,
.support_tas = false,
+ .support_sar_by_ant = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
.hw_tkip_crypto = false,
+ .hw_mlo_bmc_crypto = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index eceb4fb9880d..dacdb384de2c 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -853,12 +853,14 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.support_unii4 = true,
.support_ant_gain = true,
.support_tas = false,
+ .support_sar_by_ant = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
.hw_tkip_crypto = false,
+ .hw_mlo_bmc_crypto = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
index 99c9505b3cbd..0cf03f18cbb1 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
@@ -8,6 +8,7 @@
#include "phy.h"
#include "reg.h"
#include "rtw8852b_common.h"
+#include "sar.h"
#include "util.h"
static const struct rtw89_reg3_def rtw8852bx_pmac_ht20_mcs7_tbl[] = {
@@ -1234,6 +1235,7 @@ static u32 rtw8852bx_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
u32_encode_bits(ref, B_DPD_REF);
}
+/* @pwr_ofst (unit: 1/8 dBm): power of path A minus power of path B */
static void rtw8852bx_set_txpwr_ref(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx, s16 pwr_ofst)
{
@@ -1336,6 +1338,27 @@ static void rtw8852bx_set_tx_shape(struct rtw89_dev *rtwdev,
tx_shape_ofdm);
}
+static s16 rtw8852bx_get_txpwr_sar_diff(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ struct rtw89_sar_parm sar_parm = {
+ .center_freq = chan->freq,
+ .force_path = true,
+ };
+ s16 sar_bb_a, sar_bb_b;
+ s8 sar_mac;
+
+ sar_parm.path = RF_PATH_A;
+ sar_mac = rtw89_query_sar(rtwdev, &sar_parm);
+ sar_bb_a = rtw89_phy_txpwr_mac_to_bb(rtwdev, sar_mac);
+
+ sar_parm.path = RF_PATH_B;
+ sar_mac = rtw89_query_sar(rtwdev, &sar_parm);
+ sar_bb_b = rtw89_phy_txpwr_mac_to_bb(rtwdev, sar_mac);
+
+ return sar_bb_a - sar_bb_b;
+}
+
static void rtw8852bx_set_txpwr_diff(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
@@ -1343,6 +1366,7 @@ static void rtw8852bx_set_txpwr_diff(struct rtw89_dev *rtwdev,
s16 pwr_ofst;
pwr_ofst = rtw89_phy_ant_gain_pwr_offset(rtwdev, chan);
+ pwr_ofst += rtw8852bx_get_txpwr_sar_diff(rtwdev, chan);
rtw8852bx_set_txpwr_ref(rtwdev, phy_idx, pwr_ofst);
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
index bbf37442c492..289dce688d72 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -786,12 +786,14 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.support_unii4 = true,
.support_ant_gain = true,
.support_tas = false,
+ .support_sar_by_ant = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
.hw_tkip_crypto = true,
+ .hw_mlo_bmc_crypto = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 08bcdf246382..2a6143a8d256 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -15,7 +15,7 @@
#include "sar.h"
#include "util.h"
-#define RTW8852C_FW_FORMAT_MAX 1
+#define RTW8852C_FW_FORMAT_MAX 2
#define RTW8852C_FW_BASENAME "rtw89/rtw8852c_fw"
#define RTW8852C_MODULE_FIRMWARE \
RTW8852C_FW_BASENAME "-" __stringify(RTW8852C_FW_FORMAT_MAX) ".bin"
@@ -2079,6 +2079,31 @@ static void rtw8852c_set_txpwr_diff(struct rtw89_dev *rtwdev,
rtw8852c_set_txpwr_ref(rtwdev, phy_idx, pwr_ofst);
}
+static void rtw8852c_set_txpwr_sar_diff(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_sar_parm sar_parm = {
+ .center_freq = chan->freq,
+ .force_path = true,
+ };
+ s16 sar_rf;
+ s8 sar_mac;
+
+ if (phy_idx != RTW89_PHY_0)
+ return;
+
+ sar_parm.path = RF_PATH_A;
+ sar_mac = rtw89_query_sar(rtwdev, &sar_parm);
+ sar_rf = rtw89_phy_txpwr_mac_to_rf(rtwdev, sar_mac);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWRB, B_TXPWRB_MAX, sar_rf);
+
+ sar_parm.path = RF_PATH_B;
+ sar_mac = rtw89_query_sar(rtwdev, &sar_parm);
+ sar_rf = rtw89_phy_txpwr_mac_to_rf(rtwdev, sar_mac);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPWRB, B_TXPWRB_MAX, sar_rf);
+}
+
static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
@@ -2089,6 +2114,7 @@ static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev,
rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
rtw8852c_set_txpwr_diff(rtwdev, chan, phy_idx);
+ rtw8852c_set_txpwr_sar_diff(rtwdev, chan, phy_idx);
}
static void rtw8852c_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
@@ -3014,12 +3040,14 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.support_unii4 = true,
.support_ant_gain = true,
.support_tas = true,
+ .support_sar_by_ant = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
.rx_freq_frome_ie = false,
.hw_sec_hdr = true,
.hw_mgmt_tx_encrypt = true,
.hw_tkip_crypto = true,
+ .hw_mlo_bmc_crypto = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 8082592db84a..1d0f6e7df497 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -12,6 +12,7 @@
#include "reg.h"
#include "rtw8922a.h"
#include "rtw8922a_rfk.h"
+#include "sar.h"
#include "util.h"
#define RTW8922A_FW_FORMAT_MAX 3
@@ -2070,7 +2071,8 @@ static void __rtw8922a_rfk_init_late(struct rtw89_dev *rtwdev,
rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, phy_idx, 5);
rtw89_phy_rfk_dack_and_wait(rtwdev, phy_idx, chan, 58);
- rtw89_phy_rfk_rxdck_and_wait(rtwdev, phy_idx, chan, false, 32);
+ if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, phy_idx, chan, false, 128);
}
static void rtw8922a_rfk_init_late(struct rtw89_dev *rtwdev)
@@ -2233,6 +2235,31 @@ static void rtw8922a_set_tx_shape(struct rtw89_dev *rtwdev,
rtw8922a_bb_tx_triangular(rtwdev, true, phy_idx);
}
+static void rtw8922a_set_txpwr_sar_diff(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_sar_parm sar_parm = {
+ .center_freq = chan->freq,
+ .force_path = true,
+ };
+ s16 sar_rf;
+ s8 sar_mac;
+
+ if (phy_idx != RTW89_PHY_0)
+ return;
+
+ sar_parm.path = RF_PATH_A;
+ sar_mac = rtw89_query_sar(rtwdev, &sar_parm);
+ sar_rf = rtw89_phy_txpwr_mac_to_rf(rtwdev, sar_mac);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPWRB_BE, B_TXPWRB_MAX_BE, sar_rf);
+
+ sar_parm.path = RF_PATH_B;
+ sar_mac = rtw89_query_sar(rtwdev, &sar_parm);
+ sar_rf = rtw89_phy_txpwr_mac_to_rf(rtwdev, sar_mac);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPWRB_BE, B_TXPWRB_MAX_BE, sar_rf);
+}
+
static void rtw8922a_set_txpwr(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
@@ -2244,6 +2271,7 @@ static void rtw8922a_set_txpwr(struct rtw89_dev *rtwdev,
rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
rtw8922a_set_txpwr_diff(rtwdev, chan, phy_idx);
rtw8922a_set_txpwr_ref(rtwdev, phy_idx);
+ rtw8922a_set_txpwr_sar_diff(rtwdev, chan, phy_idx);
}
static void rtw8922a_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
@@ -2823,12 +2851,14 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.support_unii4 = true,
.support_ant_gain = true,
.support_tas = false,
+ .support_sar_by_ant = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = false,
.hw_sec_hdr = true,
.hw_mgmt_tx_encrypt = true,
.hw_tkip_crypto = true,
+ .hw_mlo_bmc_crypto = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
index c4c93f836a2f..1659ea64ade1 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
@@ -77,11 +77,6 @@ void rtw8922a_ctl_band_ch_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
RR_CFGCH_BAND0 | RR_CFGCH_CH);
rf_reg[path][i] |= u32_encode_bits(central_ch, RR_CFGCH_CH);
- if (band == RTW89_BAND_2G)
- rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x0);
- else
- rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x1);
-
switch (band) {
case RTW89_BAND_2G:
default:
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index 0b5af9528702..517b66022f18 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -57,10 +57,12 @@ static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev,
}
static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev,
- u32 center_freq, s32 *cfg)
+ const struct rtw89_sar_parm *sar_parm,
+ s32 *cfg)
{
struct rtw89_sar_cfg_common *rtwsar = &rtwdev->sar.cfg_common;
enum rtw89_sar_subband subband_l, subband_h;
+ u32 center_freq = sar_parm->center_freq;
const struct rtw89_6ghz_span *span;
span = rtw89_get_6ghz_span(rtwdev, center_freq);
@@ -90,6 +92,93 @@ static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev,
return 0;
}
+static const struct rtw89_sar_entry_from_acpi *
+rtw89_sar_cfg_acpi_get_ent(const struct rtw89_sar_cfg_acpi *rtwsar,
+ enum rtw89_rf_path path,
+ enum rtw89_regulation_type regd)
+{
+ const struct rtw89_sar_indicator_from_acpi *ind = &rtwsar->indicator;
+ const struct rtw89_sar_table_from_acpi *tbl;
+ u8 sel;
+
+ sel = ind->tblsel[path];
+ tbl = &rtwsar->tables[sel];
+
+ return &tbl->entries[regd];
+}
+
+static
+s32 rtw89_sar_cfg_acpi_get_min(const struct rtw89_sar_entry_from_acpi *ent,
+ enum rtw89_rf_path path,
+ enum rtw89_acpi_sar_subband subband_low,
+ enum rtw89_acpi_sar_subband subband_high)
+{
+ return min(ent->v[subband_low][path], ent->v[subband_high][path]);
+}
+
+static int rtw89_query_sar_config_acpi(struct rtw89_dev *rtwdev,
+ const struct rtw89_sar_parm *sar_parm,
+ s32 *cfg)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_sar_cfg_acpi *rtwsar = &rtwdev->sar.cfg_acpi;
+ const struct rtw89_sar_entry_from_acpi *ent_a, *ent_b;
+ enum rtw89_acpi_sar_subband subband_l, subband_h;
+ u32 center_freq = sar_parm->center_freq;
+ const struct rtw89_6ghz_span *span;
+ enum rtw89_regulation_type regd;
+ enum rtw89_band band;
+ s32 cfg_a, cfg_b;
+
+ span = rtw89_get_6ghz_span(rtwdev, center_freq);
+
+ if (span && RTW89_ACPI_SAR_SPAN_VALID(span)) {
+ subband_l = span->acpi_sar_subband_low;
+ subband_h = span->acpi_sar_subband_high;
+ } else {
+ subband_l = rtw89_acpi_sar_get_subband(rtwdev, center_freq);
+ subband_h = subband_l;
+ }
+
+ band = rtw89_acpi_sar_subband_to_band(rtwdev, subband_l);
+ regd = rtw89_regd_get(rtwdev, band);
+
+ ent_a = rtw89_sar_cfg_acpi_get_ent(rtwsar, RF_PATH_A, regd);
+ ent_b = rtw89_sar_cfg_acpi_get_ent(rtwsar, RF_PATH_B, regd);
+
+ cfg_a = rtw89_sar_cfg_acpi_get_min(ent_a, RF_PATH_A, subband_l, subband_h);
+ cfg_b = rtw89_sar_cfg_acpi_get_min(ent_b, RF_PATH_B, subband_l, subband_h);
+
+ if (chip->support_sar_by_ant) {
+ /* With declaration of support_sar_by_ant, relax the general
+ * SAR querying to return the maximum between paths. However,
+ * expect chip has dealt with the corresponding SAR settings
+ * by path. (To get SAR for a given path, chip can then query
+ * with force_path.)
+ */
+ if (sar_parm->force_path) {
+ switch (sar_parm->path) {
+ default:
+ case RF_PATH_A:
+ *cfg = cfg_a;
+ break;
+ case RF_PATH_B:
+ *cfg = cfg_b;
+ break;
+ }
+ } else {
+ *cfg = max(cfg_a, cfg_b);
+ }
+ } else {
+ *cfg = min(cfg_a, cfg_b);
+ }
+
+ if (sar_parm->ntx == RTW89_2TX)
+ *cfg -= rtwsar->downgrade_2tx;
+
+ return 0;
+}
+
static const
struct rtw89_sar_handler rtw89_sar_handlers[RTW89_SAR_SOURCE_NR] = {
[RTW89_SAR_SOURCE_COMMON] = {
@@ -97,6 +186,11 @@ struct rtw89_sar_handler rtw89_sar_handlers[RTW89_SAR_SOURCE_NR] = {
.txpwr_factor_sar = 2,
.query_sar_config = rtw89_query_sar_config_common,
},
+ [RTW89_SAR_SOURCE_ACPI] = {
+ .descr_sar_source = "RTW89_SAR_SOURCE_ACPI",
+ .txpwr_factor_sar = TXPWR_FACTOR_OF_RTW89_ACPI_SAR,
+ .query_sar_config = rtw89_query_sar_config_acpi,
+ },
};
#define rtw89_sar_set_src(_dev, _src, _cfg_name, _cfg_data) \
@@ -175,7 +269,7 @@ static const char *rtw89_tas_state_str(enum rtw89_tas_state state)
}
}
-s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq)
+s8 rtw89_query_sar(struct rtw89_dev *rtwdev, const struct rtw89_sar_parm *sar_parm)
{
const enum rtw89_sar_sources src = rtwdev->sar.src;
/* its members are protected by rtw89_sar_set_src() */
@@ -191,7 +285,7 @@ s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq)
if (src == RTW89_SAR_SOURCE_NONE)
return RTW89_SAR_TXPWR_MAC_MAX;
- ret = sar_hdl->query_sar_config(rtwdev, center_freq, &cfg);
+ ret = sar_hdl->query_sar_config(rtwdev, sar_parm, &cfg);
if (ret)
return RTW89_SAR_TXPWR_MAC_MAX;
@@ -215,9 +309,10 @@ s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq)
return rtw89_txpwr_sar_to_mac(rtwdev, fct, cfg);
}
+EXPORT_SYMBOL(rtw89_query_sar);
int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
- u32 center_freq)
+ const struct rtw89_sar_parm *sar_parm)
{
const enum rtw89_sar_sources src = rtwdev->sar.src;
/* its members are protected by rtw89_sar_set_src() */
@@ -238,7 +333,7 @@ int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
p += scnprintf(p, end - p, "source: %d (%s)\n", src,
sar_hdl->descr_sar_source);
- ret = sar_hdl->query_sar_config(rtwdev, center_freq, &cfg);
+ ret = sar_hdl->query_sar_config(rtwdev, sar_parm, &cfg);
if (ret) {
p += scnprintf(p, end - p, "config: return code: %d\n", ret);
p += scnprintf(p, end - p,
@@ -252,6 +347,8 @@ int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
p += scnprintf(p, end - p, "config: %d (unit: 1/%lu dBm)\n", cfg,
BIT(fct));
+ p += scnprintf(p, end - p, "support different configs by antenna: %s\n",
+ str_yes_no(rtwdev->chip->support_sar_by_ant));
out:
return p - buf;
}
@@ -286,16 +383,7 @@ out:
static int rtw89_apply_sar_common(struct rtw89_dev *rtwdev,
const struct rtw89_sar_cfg_common *sar)
{
- enum rtw89_sar_sources src;
-
- lockdep_assert_wiphy(rtwdev->hw->wiphy);
-
- src = rtwdev->sar.src;
- if (src != RTW89_SAR_SOURCE_NONE && src != RTW89_SAR_SOURCE_COMMON) {
- rtw89_warn(rtwdev, "SAR source: %d is in use", src);
- return -EBUSY;
- }
-
+ /* let common SAR have the highest priority; always apply it */
rtw89_sar_set_src(rtwdev, RTW89_SAR_SOURCE_COMMON, cfg_common, sar);
rtw89_core_set_chip_txpwr(rtwdev);
rtw89_tas_reset(rtwdev, false);
@@ -363,18 +451,92 @@ int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw,
return rtw89_apply_sar_common(rtwdev, &sar_common);
}
+static void rtw89_apply_sar_acpi(struct rtw89_dev *rtwdev,
+ const struct rtw89_sar_cfg_acpi *sar)
+{
+ const struct rtw89_sar_table_from_acpi *tbl;
+ const struct rtw89_sar_entry_from_acpi *ent;
+ enum rtw89_sar_sources src;
+ unsigned int i, j, k;
+
+ src = rtwdev->sar.src;
+ if (src != RTW89_SAR_SOURCE_NONE) {
+ rtw89_warn(rtwdev, "SAR source: %d is in use", src);
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "SAR-ACPI downgrade 2TX: %u (unit: 1/%lu dBm)\n",
+ sar->downgrade_2tx, BIT(TXPWR_FACTOR_OF_RTW89_ACPI_SAR));
+
+ for (i = 0; i < sar->valid_num; i++) {
+ tbl = &sar->tables[i];
+
+ for (j = 0; j < RTW89_REGD_NUM; j++) {
+ ent = &tbl->entries[j];
+
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "SAR-ACPI-[%u] REGD-%s (unit: 1/%lu dBm)\n",
+ i, rtw89_regd_get_string(j),
+ BIT(TXPWR_FACTOR_OF_RTW89_ACPI_SAR));
+
+ for (k = 0; k < NUM_OF_RTW89_ACPI_SAR_SUBBAND; k++)
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "On subband %u, { %d, %d }\n", k,
+ ent->v[k][RF_PATH_A], ent->v[k][RF_PATH_B]);
+ }
+ }
+
+ rtw89_sar_set_src(rtwdev, RTW89_SAR_SOURCE_ACPI, cfg_acpi, sar);
+
+ /* SAR via ACPI is only configured in the early initial phase, so
+ * it does not seem necessary to reset txpwr related things here.
+ */
+}
+
+static void rtw89_set_sar_from_acpi(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_sar_cfg_acpi *cfg;
+ int ret;
+
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return;
+
+ ret = rtw89_acpi_evaluate_sar(rtwdev, cfg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "evaluating ACPI SAR returns %d\n", ret);
+ goto out;
+ }
+
+ if (unlikely(!cfg->valid_num)) {
+ rtw89_debug(rtwdev, RTW89_DBG_SAR, "no valid SAR table from ACPI\n");
+ goto out;
+ }
+
+ rtw89_apply_sar_acpi(rtwdev, cfg);
+
+out:
+ kfree(cfg);
+}
+
static bool rtw89_tas_query_sar_config(struct rtw89_dev *rtwdev, s32 *cfg)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
const enum rtw89_sar_sources src = rtwdev->sar.src;
/* its members are protected by rtw89_sar_set_src() */
const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src];
+ struct rtw89_sar_parm sar_parm = {};
int ret;
if (src == RTW89_SAR_SOURCE_NONE)
return false;
- ret = sar_hdl->query_sar_config(rtwdev, chan->freq, cfg);
+ sar_parm.center_freq = chan->freq;
+ ret = sar_hdl->query_sar_config(rtwdev, &sar_parm, cfg);
if (ret)
return false;
@@ -383,18 +545,27 @@ static bool rtw89_tas_query_sar_config(struct rtw89_dev *rtwdev, s32 *cfg)
return true;
}
-static void rtw89_tas_state_update(struct rtw89_dev *rtwdev,
- enum rtw89_tas_state state)
+static bool __rtw89_tas_state_update(struct rtw89_dev *rtwdev,
+ enum rtw89_tas_state state)
{
struct rtw89_tas_info *tas = &rtwdev->tas;
if (tas->state == state)
- return;
+ return false;
rtw89_debug(rtwdev, RTW89_DBG_SAR, "tas: switch state: %s -> %s\n",
rtw89_tas_state_str(tas->state), rtw89_tas_state_str(state));
tas->state = state;
+ return true;
+}
+
+static void rtw89_tas_state_update(struct rtw89_dev *rtwdev,
+ enum rtw89_tas_state state)
+{
+ if (!__rtw89_tas_state_update(rtwdev, state))
+ return;
+
rtw89_core_set_chip_txpwr(rtwdev);
}
@@ -489,7 +660,7 @@ static void rtw89_tas_history_update(struct rtw89_dev *rtwdev)
rtw89_linear_to_db_quarter(div_u64(txpwr, PERCENT)));
}
-static void rtw89_tas_rolling_average(struct rtw89_dev *rtwdev)
+static bool rtw89_tas_rolling_average(struct rtw89_dev *rtwdev)
{
struct rtw89_tas_info *tas = &rtwdev->tas;
s32 dpr_on_threshold, dpr_off_threshold;
@@ -515,18 +686,18 @@ static void rtw89_tas_rolling_average(struct rtw89_dev *rtwdev)
else if (txpwr_avg < dpr_off_threshold)
state = RTW89_TAS_STATE_DPR_OFF;
else
- return;
+ return false;
- rtw89_tas_state_update(rtwdev, state);
+ return __rtw89_tas_state_update(rtwdev, state);
}
-void rtw89_tas_init(struct rtw89_dev *rtwdev)
+static void rtw89_tas_init(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_tas_info *tas = &rtwdev->tas;
+ const struct rtw89_acpi_policy_tas *ptr;
struct rtw89_acpi_dsm_result res = {};
int ret;
- u8 val;
if (!chip->support_tas)
return;
@@ -538,8 +709,9 @@ void rtw89_tas_init(struct rtw89_dev *rtwdev)
return;
}
- val = res.u.value;
- switch (val) {
+ ptr = res.u.policy_tas;
+
+ switch (ptr->enable) {
case 0:
tas->enable = false;
break;
@@ -552,8 +724,13 @@ void rtw89_tas_init(struct rtw89_dev *rtwdev)
if (!tas->enable) {
rtw89_debug(rtwdev, RTW89_DBG_SAR, "TAS not enable\n");
- return;
+ goto out;
}
+
+ tas->enabled_countries = ptr->enabled_countries;
+
+out:
+ kfree(ptr);
}
void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force)
@@ -598,29 +775,28 @@ void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force)
"tas: band: %u, freq: %u\n", chan->band_type, chan->freq);
}
-void rtw89_tas_track(struct rtw89_dev *rtwdev)
+static bool rtw89_tas_track(struct rtw89_dev *rtwdev)
{
struct rtw89_tas_info *tas = &rtwdev->tas;
struct rtw89_hal *hal = &rtwdev->hal;
s32 cfg;
if (hal->disabled_dm_bitmap & BIT(RTW89_DM_TAS))
- return;
+ return false;
if (!rtw89_tas_is_active(rtwdev))
- return;
+ return false;
- if (!rtw89_tas_query_sar_config(rtwdev, &cfg) || tas->block_regd) {
- rtw89_tas_state_update(rtwdev, RTW89_TAS_STATE_STATIC_SAR);
- return;
- }
+ if (!rtw89_tas_query_sar_config(rtwdev, &cfg) || tas->block_regd)
+ return __rtw89_tas_state_update(rtwdev, RTW89_TAS_STATE_STATIC_SAR);
if (tas->pause)
- return;
+ return false;
rtw89_tas_window_update(rtwdev);
rtw89_tas_history_update(rtwdev);
- rtw89_tas_rolling_average(rtwdev);
+
+ return rtw89_tas_rolling_average(rtwdev);
}
void rtw89_tas_scan(struct rtw89_dev *rtwdev, bool start)
@@ -667,3 +843,51 @@ void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev,
}
}
EXPORT_SYMBOL(rtw89_tas_chanctx_cb);
+
+void rtw89_sar_init(struct rtw89_dev *rtwdev)
+{
+ rtw89_set_sar_from_acpi(rtwdev);
+ rtw89_tas_init(rtwdev);
+}
+
+static bool rtw89_sar_track_acpi(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_sar_cfg_acpi *cfg = &rtwdev->sar.cfg_acpi;
+ struct rtw89_sar_indicator_from_acpi *ind = &cfg->indicator;
+ const enum rtw89_sar_sources src = rtwdev->sar.src;
+ bool changed;
+ int ret;
+
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ if (src != RTW89_SAR_SOURCE_ACPI)
+ return false;
+
+ if (!ind->enable_sync)
+ return false;
+
+ ret = rtw89_acpi_evaluate_dynamic_sar_indicator(rtwdev, cfg, &changed);
+ if (likely(!ret))
+ return changed;
+
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "%s: failed to track indicator: %d; reset and disable\n",
+ __func__, ret);
+
+ memset(ind->tblsel, 0, sizeof(ind->tblsel));
+ ind->enable_sync = false;
+ return true;
+}
+
+void rtw89_sar_track(struct rtw89_dev *rtwdev)
+{
+ unsigned int changes = 0;
+
+ changes += rtw89_sar_track_acpi(rtwdev);
+ changes += rtw89_tas_track(rtwdev);
+
+ if (!changes)
+ return;
+
+ rtw89_core_set_chip_txpwr(rtwdev);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/sar.h b/drivers/net/wireless/realtek/rtw89/sar.h
index 0df1661db9a8..4b7f3d44f57b 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.h
+++ b/drivers/net/wireless/realtek/rtw89/sar.h
@@ -10,25 +10,34 @@
#define RTW89_SAR_TXPWR_MAC_MAX 63
#define RTW89_SAR_TXPWR_MAC_MIN -64
+struct rtw89_sar_parm {
+ u32 center_freq;
+ enum rtw89_ntx ntx;
+
+ bool force_path;
+ enum rtw89_rf_path path;
+};
+
struct rtw89_sar_handler {
const char *descr_sar_source;
u8 txpwr_factor_sar;
- int (*query_sar_config)(struct rtw89_dev *rtwdev, u32 center_freq, s32 *cfg);
+ int (*query_sar_config)(struct rtw89_dev *rtwdev,
+ const struct rtw89_sar_parm *sar_parm, s32 *cfg);
};
extern const struct cfg80211_sar_capa rtw89_sar_capa;
-s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq);
+s8 rtw89_query_sar(struct rtw89_dev *rtwdev, const struct rtw89_sar_parm *sar_parm);
int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
- u32 center_freq);
+ const struct rtw89_sar_parm *sar_parm);
int rtw89_print_tas(struct rtw89_dev *rtwdev, char *buf, size_t bufsz);
int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
-void rtw89_tas_init(struct rtw89_dev *rtwdev);
void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force);
-void rtw89_tas_track(struct rtw89_dev *rtwdev);
void rtw89_tas_scan(struct rtw89_dev *rtwdev, bool start);
void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_state state);
+void rtw89_sar_init(struct rtw89_dev *rtwdev);
+void rtw89_sar_track(struct rtw89_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index 0740e303680c..811c91481441 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -309,6 +309,9 @@ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif_link->port);
rtwvif_link->net_type = RTW89_NET_TYPE_NO_LINK;
rtwvif_link->trigger = false;
+ rtwvif_link->rand_tsf_done = false;
+
+ rtw89_p2p_noa_once_deinit(rtwvif_link);
}
}
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index 70fe7cebc9d5..94f27a9ee9f7 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -712,6 +712,25 @@ static inline u8 rtw89_core_get_qsel(struct rtw89_dev *rtwdev, u8 tid)
}
}
+static inline u8
+rtw89_core_get_qsel_mgmt(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req)
+{
+ struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link;
+
+ if (desc_info->hiq) {
+ if (rtwvif_link->mac_idx == RTW89_MAC_1)
+ return RTW89_TX_QSEL_B1_HI;
+ else
+ return RTW89_TX_QSEL_B0_HI;
+ }
+
+ if (rtwvif_link->mac_idx == RTW89_MAC_1)
+ return RTW89_TX_QSEL_B1_MGMT;
+ else
+ return RTW89_TX_QSEL_B0_MGMT;
+}
+
static inline u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel)
{
switch (qsel) {
@@ -719,12 +738,24 @@ static inline u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel)
rtw89_warn(rtwdev, "Cannot map qsel to dma: %d\n", qsel);
fallthrough;
case RTW89_TX_QSEL_BE_0:
+ case RTW89_TX_QSEL_BE_1:
+ case RTW89_TX_QSEL_BE_2:
+ case RTW89_TX_QSEL_BE_3:
return RTW89_TXCH_ACH0;
case RTW89_TX_QSEL_BK_0:
+ case RTW89_TX_QSEL_BK_1:
+ case RTW89_TX_QSEL_BK_2:
+ case RTW89_TX_QSEL_BK_3:
return RTW89_TXCH_ACH1;
case RTW89_TX_QSEL_VI_0:
+ case RTW89_TX_QSEL_VI_1:
+ case RTW89_TX_QSEL_VI_2:
+ case RTW89_TX_QSEL_VI_3:
return RTW89_TXCH_ACH2;
case RTW89_TX_QSEL_VO_0:
+ case RTW89_TX_QSEL_VO_1:
+ case RTW89_TX_QSEL_VO_2:
+ case RTW89_TX_QSEL_VO_3:
return RTW89_TXCH_ACH3;
case RTW89_TX_QSEL_B0_MGMT:
return RTW89_TXCH_CH8;
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 17eee58503cb..34a0ab49bd7a 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -1086,8 +1086,7 @@ static int rtw89_wow_set_wakeups(struct rtw89_dev *rtwdev,
rtw89_wow_init_pno(rtwdev, wowlan->nd_config);
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
- /* use the link on HW-0 to do wow flow */
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
if (!rtwvif_link)
continue;
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 53827657abb2..7d26314a3e76 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -490,7 +490,7 @@ int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb)
static void bl_cmd_timeout(struct timer_list *t)
{
- struct rsi_hw *adapter = from_timer(adapter, t, bl_cmd_timer);
+ struct rsi_hw *adapter = timer_container_of(adapter, t, bl_cmd_timer);
adapter->blcmd_timer_expired = true;
timer_delete(&adapter->bl_cmd_timer);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 9db08200f4fa..0e115b428f96 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -1746,7 +1746,7 @@ static void rsi_resume_conn_channel(struct rsi_common *common)
void rsi_roc_timeout(struct timer_list *t)
{
- struct rsi_common *common = from_timer(common, t, roc_timer);
+ struct rsi_common *common = timer_container_of(common, t, roc_timer);
rsi_dbg(INFO_ZONE, "Remain on channel expired\n");
diff --git a/drivers/net/wireless/st/cw1200/queue.c b/drivers/net/wireless/st/cw1200/queue.c
index 4fd76183c368..a933e2c7dc2c 100644
--- a/drivers/net/wireless/st/cw1200/queue.c
+++ b/drivers/net/wireless/st/cw1200/queue.c
@@ -133,7 +133,7 @@ static void cw1200_queue_gc(struct timer_list *t)
{
LIST_HEAD(list);
struct cw1200_queue *queue =
- from_timer(queue, t, gc);
+ timer_container_of(queue, t, gc);
spin_lock_bh(&queue->lock);
__cw1200_queue_gc(queue, &list, true);
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index 444272caf124..5dd7f6a38900 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -2112,7 +2112,8 @@ void cw1200_multicast_stop_work(struct work_struct *work)
void cw1200_mcast_timeout(struct timer_list *t)
{
- struct cw1200_common *priv = from_timer(priv, t, mcast_timeout);
+ struct cw1200_common *priv = timer_container_of(priv, t,
+ mcast_timeout);
wiphy_warn(priv->hw->wiphy,
"Multicast delivery timeout.\n");
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index ea9bc4717a85..f93c95edd991 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -189,7 +189,8 @@ out:
static void wl1271_rx_streaming_timer(struct timer_list *t)
{
- struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
+ struct wl12xx_vif *wlvif = timer_container_of(wlvif, t,
+ rx_streaming_timer);
struct wl1271 *wl = wlvif->wl;
ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
}
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index cf3e976471c6..f6add19d1da1 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -1229,6 +1229,11 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
/* MLD not supported here */
u32 bcn_int = data->link_data[0].beacon_int;
u64 delta = abs(tsf - now);
+ struct ieee80211_bss_conf *conf;
+
+ conf = link_conf_dereference_protected(vif, data->link_data[0].link_id);
+ if (conf && !conf->enable_beacon)
+ return;
/* adjust after beaconing with new timestamp at old TBTT */
if (tsf > now) {
@@ -2273,7 +2278,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
{
struct mac80211_hwsim_link_data *link_data = arg;
u32 link_id = link_data->link_id;
- struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_bss_conf *link_conf, *tx_bss_conf;
struct mac80211_hwsim_data *data =
container_of(link_data, struct mac80211_hwsim_data,
link_data[link_id]);
@@ -2292,10 +2297,11 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
vif->type != NL80211_IFTYPE_OCB)
return;
- if (vif->mbssid_tx_vif && vif->mbssid_tx_vif != vif)
+ tx_bss_conf = rcu_access_pointer(link_conf->tx_bss_conf);
+ if (tx_bss_conf && tx_bss_conf != link_conf)
return;
- if (vif->bss_conf.ema_ap) {
+ if (link_conf->ema_ap) {
struct ieee80211_ema_beacons *ema;
u8 i = 0;
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index 8755c5e6a65b..c814fbd756a1 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -550,8 +550,8 @@ static int mhi_mbim_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
struct mhi_mbim_context *mbim = ctxt;
- link->session = if_id;
link->mbim = mbim;
+ link->session = mhi_mbim_get_link_mux_id(link->mbim->mdev->mhi_cntrl) + if_id;
link->ndev = ndev;
u64_stats_init(&link->rx_syncp);
u64_stats_init(&link->tx_syncp);
@@ -607,7 +607,7 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
{
struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
struct mhi_mbim_context *mbim;
- int err, link_id;
+ int err;
mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL);
if (!mbim)
@@ -628,11 +628,8 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
/* Number of transfer descriptors determines size of the queue */
mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
- /* Get the corresponding mux_id from mhi */
- link_id = mhi_mbim_get_link_mux_id(cntrl);
-
/* Register wwan link ops with MHI controller representing WWAN instance */
- return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, link_id);
+ return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, 0);
}
static void mhi_mbim_remove(struct mhi_device *mhi_dev)
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c
index 91fa082e9cab..fc0a7cb181df 100644
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -302,7 +302,7 @@ static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id
ccmni->ctlb = ctlb;
ccmni->dev = dev;
atomic_set(&ccmni->usage, 0);
- ctlb->ccmni_inst[if_id] = ccmni;
+ WRITE_ONCE(ctlb->ccmni_inst[if_id], ccmni);
ret = register_netdevice(dev);
if (ret)
@@ -324,6 +324,7 @@ static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct l
if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni))
return;
+ WRITE_ONCE(ctlb->ccmni_inst[if_id], NULL);
unregister_netdevice(dev);
}
@@ -419,7 +420,7 @@ static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_bu
skb_cb = T7XX_SKB_CB(skb);
netif_id = skb_cb->netif_idx;
- ccmni = ccmni_ctlb->ccmni_inst[netif_id];
+ ccmni = READ_ONCE(ccmni_ctlb->ccmni_inst[netif_id]);
if (!ccmni) {
dev_kfree_skb(skb);
return;
@@ -441,7 +442,7 @@ static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_bu
static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
{
- struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
+ struct t7xx_ccmni *ccmni = READ_ONCE(ctlb->ccmni_inst[0]);
struct netdev_queue *net_queue;
if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) {
@@ -453,7 +454,7 @@ static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno
static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
{
- struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
+ struct t7xx_ccmni *ccmni = READ_ONCE(ctlb->ccmni_inst[0]);
struct netdev_queue *net_queue;
if (atomic_read(&ccmni->usage) > 0) {
@@ -471,7 +472,7 @@ static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev,
if (ctlb->md_sta != MD_STATE_READY)
return;
- if (!ctlb->ccmni_inst[0]) {
+ if (!READ_ONCE(ctlb->ccmni_inst[0])) {
dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n");
return;
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 5836995d6774..c759ebc56457 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -198,7 +198,8 @@ static void tx_add_credit(struct xenvif_queue *queue)
void xenvif_tx_credit_callback(struct timer_list *t)
{
- struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
+ struct xenvif_queue *queue = timer_container_of(queue, t,
+ credit_timeout);
tx_add_credit(queue);
xenvif_napi_schedule_or_enable_events(queue);
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 5091e1fa4a0d..9bac50963477 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -245,7 +245,8 @@ static bool xennet_can_sg(struct net_device *dev)
static void rx_refill_timeout(struct timer_list *t)
{
- struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
+ struct netfront_queue *queue = timer_container_of(queue, t,
+ rx_refill_timer);
napi_schedule(&queue->napi);
}
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index 43ce0c9b2355..a9b03dcc4100 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -119,7 +119,8 @@ static void fw_dnld_over(struct nfcmrvl_private *priv, u32 error)
static void fw_dnld_timeout(struct timer_list *t)
{
- struct nfcmrvl_private *priv = from_timer(priv, t, fw_dnld.timer);
+ struct nfcmrvl_private *priv = timer_container_of(priv, t,
+ fw_dnld.timer);
nfc_err(priv->dev, "FW loading timeout");
priv->fw_dnld.state = STATE_RESET;
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index 34c40d10e260..14661249c690 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -1233,7 +1233,7 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
static void pn533_listen_mode_timer(struct timer_list *t)
{
- struct pn533 *dev = from_timer(dev, t, listen_timer);
+ struct pn533 *dev = timer_container_of(dev, t, listen_timer);
dev->cancel_listen = 1;
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
index 580c9193e4a7..a081bce61c29 100644
--- a/drivers/nfc/pn533/uart.c
+++ b/drivers/nfc/pn533/uart.c
@@ -133,7 +133,7 @@ static const struct pn533_phy_ops uart_phy_ops = {
static void pn532_cmd_timeout(struct timer_list *t)
{
- struct pn532_uart_phy *dev = from_timer(dev, t, cmd_timeout);
+ struct pn532_uart_phy *dev = timer_container_of(dev, t, cmd_timeout);
pn532_uart_send_frame(dev->priv, dev->cur_out_buf);
}
diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
index aec356880adf..af0fa8bd970b 100644
--- a/drivers/nfc/s3fwrn5/core.c
+++ b/drivers/nfc/s3fwrn5/core.c
@@ -2,7 +2,7 @@
/*
* NCI based driver for Samsung S3FWRN5 NFC chip
*
- * Copyright (C) 2015 Samsung Electrnoics
+ * Copyright (C) 2015 Samsung Electronics
* Robert Baldyga <r.baldyga@samsung.com>
*/
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index c20fdbac51c5..781cdbcac104 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -2,7 +2,7 @@
/*
* NCI based driver for Samsung S3FWRN5 NFC chip
*
- * Copyright (C) 2015 Samsung Electrnoics
+ * Copyright (C) 2015 Samsung Electronics
* Robert Baldyga <r.baldyga@samsung.com>
*/
diff --git a/drivers/nfc/s3fwrn5/firmware.h b/drivers/nfc/s3fwrn5/firmware.h
index 3a82ce5837fb..19f479aa6920 100644
--- a/drivers/nfc/s3fwrn5/firmware.h
+++ b/drivers/nfc/s3fwrn5/firmware.h
@@ -2,7 +2,7 @@
/*
* NCI based driver for Samsung S3FWRN5 NFC chip
*
- * Copyright (C) 2015 Samsung Electrnoics
+ * Copyright (C) 2015 Samsung Electronics
* Robert Baldyga <r.baldyga@samsung.com>
*/
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index 536c566e3f59..110d086cfe5b 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -2,7 +2,7 @@
/*
* I2C Link Layer for Samsung S3FWRN5 NCI based Driver
*
- * Copyright (C) 2015 Samsung Electrnoics
+ * Copyright (C) 2015 Samsung Electronics
* Robert Baldyga <r.baldyga@samsung.com>
*/
diff --git a/drivers/nfc/s3fwrn5/nci.c b/drivers/nfc/s3fwrn5/nci.c
index ca6828f55ba0..5a9de11bbece 100644
--- a/drivers/nfc/s3fwrn5/nci.c
+++ b/drivers/nfc/s3fwrn5/nci.c
@@ -2,7 +2,7 @@
/*
* NCI based driver for Samsung S3FWRN5 NFC chip
*
- * Copyright (C) 2015 Samsung Electrnoics
+ * Copyright (C) 2015 Samsung Electronics
* Robert Baldyga <r.baldyga@samsung.com>
*/
diff --git a/drivers/nfc/s3fwrn5/nci.h b/drivers/nfc/s3fwrn5/nci.h
index c2d906591e9e..bc4bce2bbc4d 100644
--- a/drivers/nfc/s3fwrn5/nci.h
+++ b/drivers/nfc/s3fwrn5/nci.h
@@ -2,7 +2,7 @@
/*
* NCI based driver for Samsung S3FWRN5 NFC chip
*
- * Copyright (C) 2015 Samsung Electrnoics
+ * Copyright (C) 2015 Samsung Electronics
* Robert Baldyga <r.baldyga@samsung.com>
*/
diff --git a/drivers/nfc/s3fwrn5/phy_common.c b/drivers/nfc/s3fwrn5/phy_common.c
index 81318478d5fd..deb2c039f0fd 100644
--- a/drivers/nfc/s3fwrn5/phy_common.c
+++ b/drivers/nfc/s3fwrn5/phy_common.c
@@ -2,9 +2,9 @@
/*
* Link Layer for Samsung S3FWRN5 NCI based Driver
*
- * Copyright (C) 2015 Samsung Electrnoics
+ * Copyright (C) 2015 Samsung Electronics
* Robert Baldyga <r.baldyga@samsung.com>
- * Copyright (C) 2020 Samsung Electrnoics
+ * Copyright (C) 2020 Samsung Electronics
* Bongsu Jeon <bongsu.jeon@samsung.com>
*/
diff --git a/drivers/nfc/s3fwrn5/phy_common.h b/drivers/nfc/s3fwrn5/phy_common.h
index 99749c9294d1..9cef25436bf9 100644
--- a/drivers/nfc/s3fwrn5/phy_common.h
+++ b/drivers/nfc/s3fwrn5/phy_common.h
@@ -2,9 +2,9 @@
*
* Link Layer for Samsung S3FWRN5 NCI based Driver
*
- * Copyright (C) 2015 Samsung Electrnoics
+ * Copyright (C) 2015 Samsung Electronics
* Robert Baldyga <r.baldyga@samsung.com>
- * Copyright (C) 2020 Samsung Electrnoics
+ * Copyright (C) 2020 Samsung Electronics
* Bongsu Jeon <bongsu.jeon@samsung.com>
*/
diff --git a/drivers/nfc/s3fwrn5/s3fwrn5.h b/drivers/nfc/s3fwrn5/s3fwrn5.h
index bb8f936d13a2..2b492236090b 100644
--- a/drivers/nfc/s3fwrn5/s3fwrn5.h
+++ b/drivers/nfc/s3fwrn5/s3fwrn5.h
@@ -2,7 +2,7 @@
/*
* NCI based driver for Samsung S3FWRN5 NFC chip
*
- * Copyright (C) 2015 Samsung Electrnoics
+ * Copyright (C) 2015 Samsung Electronics
* Robert Baldyga <r.baldyga@samsung.com>
*/
diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index 8feac119a4bc..be4808859cfa 100644
--- a/drivers/nfc/st-nci/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -237,14 +237,14 @@ EXPORT_SYMBOL(ndlc_recv);
static void ndlc_t1_timeout(struct timer_list *t)
{
- struct llt_ndlc *ndlc = from_timer(ndlc, t, t1_timer);
+ struct llt_ndlc *ndlc = timer_container_of(ndlc, t, t1_timer);
schedule_work(&ndlc->sm_work);
}
static void ndlc_t2_timeout(struct timer_list *t)
{
- struct llt_ndlc *ndlc = from_timer(ndlc, t, t2_timer);
+ struct llt_ndlc *ndlc = timer_container_of(ndlc, t, t2_timer);
schedule_work(&ndlc->sm_work);
}
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index 8cfe5405bae6..607ec768eb7b 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -696,7 +696,8 @@ static void st_nci_se_wt_timeout(struct timer_list *t)
*/
/* hardware reset managed through VCC_UICC_OUT power supply */
u8 param = 0x01;
- struct st_nci_info *info = from_timer(info, t, se_info.bwi_timer);
+ struct st_nci_info *info = timer_container_of(info, t,
+ se_info.bwi_timer);
info->se_info.bwi_active = false;
@@ -714,8 +715,8 @@ static void st_nci_se_wt_timeout(struct timer_list *t)
static void st_nci_se_activation_timeout(struct timer_list *t)
{
- struct st_nci_info *info = from_timer(info, t,
- se_info.se_active_timer);
+ struct st_nci_info *info = timer_container_of(info, t,
+ se_info.se_active_timer);
info->se_info.se_active = false;
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index 9a50f3c03bd4..7154bc1d644a 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -280,15 +280,16 @@ static void st21nfca_se_wt_work(struct work_struct *work)
static void st21nfca_se_wt_timeout(struct timer_list *t)
{
- struct st21nfca_hci_info *info = from_timer(info, t, se_info.bwi_timer);
+ struct st21nfca_hci_info *info = timer_container_of(info, t,
+ se_info.bwi_timer);
schedule_work(&info->se_info.timeout_work);
}
static void st21nfca_se_activation_timeout(struct timer_list *t)
{
- struct st21nfca_hci_info *info = from_timer(info, t,
- se_info.se_active_timer);
+ struct st21nfca_hci_info *info = timer_container_of(info, t,
+ se_info.se_active_timer);
info->se_info.se_active = false;
diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
index 6b89d596ba9a..9ef8ef2d4363 100644
--- a/drivers/nfc/virtual_ncidev.c
+++ b/drivers/nfc/virtual_ncidev.c
@@ -2,7 +2,7 @@
/*
* Virtual NCI device simulation driver
*
- * Copyright (C) 2020 Samsung Electrnoics
+ * Copyright (C) 2020 Samsung Electronics
* Bongsu Jeon <bongsu.jeon@samsung.com>
*/
diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c
index 6295e55ef85e..368f6d894bba 100644
--- a/drivers/ntb/msi.c
+++ b/drivers/ntb/msi.c
@@ -106,10 +106,10 @@ int ntb_msi_setup_mws(struct ntb_dev *ntb)
if (!ntb->msi)
return -EINVAL;
- msi_lock_descs(&ntb->pdev->dev);
- desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED);
- addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
- msi_unlock_descs(&ntb->pdev->dev);
+ scoped_guard (msi_descs_lock, &ntb->pdev->dev) {
+ desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED);
+ addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
+ }
for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
@@ -289,7 +289,7 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
if (!ntb->msi)
return -EINVAL;
- msi_lock_descs(dev);
+ guard(msi_descs_lock)(dev);
msi_for_each_desc(entry, dev, MSI_DESC_ASSOCIATED) {
if (irq_has_action(entry->irq))
continue;
@@ -307,17 +307,11 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
ret = ntbm_msi_setup_callback(ntb, entry, msi_desc);
if (ret) {
devm_free_irq(&ntb->dev, entry->irq, dev_id);
- goto unlock;
+ return ret;
}
-
- ret = entry->irq;
- goto unlock;
+ return entry->irq;
}
- ret = -ENODEV;
-
-unlock:
- msi_unlock_descs(dev);
- return ret;
+ return -ENODEV;
}
EXPORT_SYMBOL(ntbm_msi_request_threaded_irq);
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index 2c092ec8c0a9..91e273b89fea 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -242,7 +242,7 @@ struct nvme_dhchap_key *nvme_auth_transform_key(
{
const char *hmac_name;
struct crypto_shash *key_tfm;
- struct shash_desc *shash;
+ SHASH_DESC_ON_STACK(shash, key_tfm);
struct nvme_dhchap_key *transformed_key;
int ret, key_len;
@@ -267,19 +267,11 @@ struct nvme_dhchap_key *nvme_auth_transform_key(
if (IS_ERR(key_tfm))
return ERR_CAST(key_tfm);
- shash = kmalloc(sizeof(struct shash_desc) +
- crypto_shash_descsize(key_tfm),
- GFP_KERNEL);
- if (!shash) {
- ret = -ENOMEM;
- goto out_free_key;
- }
-
key_len = crypto_shash_digestsize(key_tfm);
transformed_key = nvme_auth_alloc_key(key_len, key->hash);
if (!transformed_key) {
ret = -ENOMEM;
- goto out_free_shash;
+ goto out_free_key;
}
shash->tfm = key_tfm;
@@ -299,15 +291,12 @@ struct nvme_dhchap_key *nvme_auth_transform_key(
if (ret < 0)
goto out_free_transformed_key;
- kfree(shash);
crypto_free_shash(key_tfm);
return transformed_key;
out_free_transformed_key:
nvme_auth_free_key(transformed_key);
-out_free_shash:
- kfree(shash);
out_free_key:
crypto_free_shash(key_tfm);
@@ -482,7 +471,7 @@ EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
* @c1: Value of challenge C1
* @c2: Value of challenge C2
* @hash_len: Hash length of the hash algorithm
- * @ret_psk: Pointer too the resulting generated PSK
+ * @ret_psk: Pointer to the resulting generated PSK
* @ret_len: length of @ret_psk
*
* Generate a PSK for TLS as specified in NVMe base specification, section
@@ -770,8 +759,8 @@ int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len,
goto out_free_prk;
/*
- * 2 addtional bytes for the length field from HDKF-Expand-Label,
- * 2 addtional bytes for the HMAC ID, and one byte for the space
+ * 2 additional bytes for the length field from HDKF-Expand-Label,
+ * 2 additional bytes for the HMAC ID, and one byte for the space
* separator.
*/
info_len = strlen(psk_digest) + strlen(psk_prefix) + 5;
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 4d64b6935bb9..31974c7dd20c 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -84,9 +84,9 @@ config NVME_TCP
tristate "NVM Express over Fabrics TCP host driver"
depends on INET
depends on BLOCK
+ select CRC32
+ select NET_CRC32C
select NVME_FABRICS
- select CRYPTO
- select CRYPTO_CRC32C
help
This provides support for the NVMe over Fabrics protocol using
the TCP transport. This allows you to use remote block devices
@@ -106,7 +106,7 @@ config NVME_TCP_TLS
help
Enables TLS encryption for NVMe TCP using the netlink handshake API.
- The TLS handshake daemon is availble at
+ The TLS handshake daemon is available at
https://github.com/oracle/ktls-utils.
If unsure, say N.
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 6115fef74c1e..f6ddbe553289 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -31,6 +31,7 @@ struct nvme_dhchap_queue_context {
u32 s1;
u32 s2;
bool bi_directional;
+ bool authenticated;
u16 transaction;
u8 status;
u8 dhgroup_id;
@@ -682,6 +683,7 @@ static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
{
nvme_auth_reset_dhchap(chap);
+ chap->authenticated = false;
if (chap->shash_tfm)
crypto_free_shash(chap->shash_tfm);
if (chap->dh_tfm)
@@ -930,12 +932,14 @@ static void nvme_queue_auth_work(struct work_struct *work)
}
if (!ret) {
chap->error = 0;
+ chap->authenticated = true;
if (ctrl->opts->concat &&
(ret = nvme_auth_secure_concat(ctrl, chap))) {
dev_warn(ctrl->device,
"%s: qid %d failed to enable secure concatenation\n",
__func__, chap->qid);
chap->error = ret;
+ chap->authenticated = false;
}
return;
}
@@ -1023,13 +1027,16 @@ static void nvme_ctrl_auth_work(struct work_struct *work)
return;
for (q = 1; q < ctrl->queue_count; q++) {
- ret = nvme_auth_negotiate(ctrl, q);
- if (ret) {
- dev_warn(ctrl->device,
- "qid %d: error %d setting up authentication\n",
- q, ret);
- break;
- }
+ struct nvme_dhchap_queue_context *chap =
+ &ctrl->dhchap_ctxs[q];
+ /*
+ * Skip re-authentication if the queue had
+ * not been authenticated initially.
+ */
+ if (!chap->authenticated)
+ continue;
+ cancel_work_sync(&chap->auth_work);
+ queue_work(nvme_auth_wq, &chap->auth_work);
}
/*
@@ -1037,7 +1044,13 @@ static void nvme_ctrl_auth_work(struct work_struct *work)
* the controller terminates the connection.
*/
for (q = 1; q < ctrl->queue_count; q++) {
- ret = nvme_auth_wait(ctrl, q);
+ struct nvme_dhchap_queue_context *chap =
+ &ctrl->dhchap_ctxs[q];
+ if (!chap->authenticated)
+ continue;
+ flush_work(&chap->auth_work);
+ ret = chap->error;
+ nvme_auth_reset_dhchap(chap);
if (ret)
dev_warn(ctrl->device,
"qid %d: authentication failed\n", q);
@@ -1076,6 +1089,7 @@ int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
chap = &ctrl->dhchap_ctxs[i];
chap->qid = i;
chap->ctrl = ctrl;
+ chap->authenticated = false;
INIT_WORK(&chap->auth_work, nvme_queue_auth_work);
}
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index 2b9e6cfaf2a8..1a0058be5821 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -145,7 +145,7 @@ static const char * const nvme_statuses[] = {
[NVME_SC_BAD_ATTRIBUTES] = "Conflicting Attributes",
[NVME_SC_INVALID_PI] = "Invalid Protection Information",
[NVME_SC_READ_ONLY] = "Attempted Write to Read Only Range",
- [NVME_SC_ONCS_NOT_SUPPORTED] = "ONCS Not Supported",
+ [NVME_SC_CMD_SIZE_LIM_EXCEEDED ] = "Command Size Limits Exceeded",
[NVME_SC_ZONE_BOUNDARY_ERROR] = "Zoned Boundary Error",
[NVME_SC_ZONE_FULL] = "Zone Is Full",
[NVME_SC_ZONE_READ_ONLY] = "Zone Is Read Only",
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ac53629fce68..92697f98c601 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -38,6 +38,8 @@ struct nvme_ns_info {
u32 nsid;
__le32 anagrpid;
u8 pi_offset;
+ u16 endgid;
+ u64 runs;
bool is_shared;
bool is_readonly;
bool is_ready;
@@ -150,6 +152,8 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
unsigned nsid);
static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
struct nvme_command *cmd);
+static int nvme_get_log_lsi(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page,
+ u8 lsp, u8 csi, void *log, size_t size, u64 offset, u16 lsi);
void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
@@ -286,7 +290,6 @@ static blk_status_t nvme_error_status(u16 status)
case NVME_SC_NS_NOT_READY:
return BLK_STS_TARGET;
case NVME_SC_BAD_ATTRIBUTES:
- case NVME_SC_ONCS_NOT_SUPPORTED:
case NVME_SC_INVALID_OPCODE:
case NVME_SC_INVALID_FIELD:
case NVME_SC_INVALID_NS:
@@ -664,10 +667,11 @@ static void nvme_free_ns_head(struct kref *ref)
struct nvme_ns_head *head =
container_of(ref, struct nvme_ns_head, ref);
- nvme_mpath_remove_disk(head);
+ nvme_mpath_put_disk(head);
ida_free(&head->subsys->ns_ida, head->instance);
cleanup_srcu_struct(&head->srcu);
nvme_put_subsystem(head->subsys);
+ kfree(head->plids);
kfree(head);
}
@@ -991,6 +995,18 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
if (req->cmd_flags & REQ_RAHEAD)
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+ if (op == nvme_cmd_write && ns->head->nr_plids) {
+ u16 write_stream = req->bio->bi_write_stream;
+
+ if (WARN_ON_ONCE(write_stream > ns->head->nr_plids))
+ return BLK_STS_INVAL;
+
+ if (write_stream) {
+ dsmgmt |= ns->head->plids[write_stream - 1] << 16;
+ control |= NVME_RW_DTYPE_DPLCMT;
+ }
+ }
+
if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req))
return BLK_STS_INVAL;
@@ -1010,7 +1026,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
if (ns->head->ms) {
/*
- * If formated with metadata, the block layer always provides a
+ * If formatted with metadata, the block layer always provides a
* metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
* we enable the PRACT bit for protection information or set the
* namespace capacity to zero to prevent any I/O.
@@ -1157,7 +1173,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
req->cmd_flags &= ~REQ_FAILFAST_DRIVER;
if (buffer && bufflen) {
- ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
+ ret = blk_rq_map_kern(req, buffer, bufflen, GFP_KERNEL);
if (ret)
goto out;
}
@@ -1609,6 +1625,7 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
info->is_ready = true;
+ info->endgid = le16_to_cpu(id->endgid);
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
dev_info(ctrl->device,
"Ignoring bogus Namespace Identifiers\n");
@@ -1649,6 +1666,7 @@ static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
info->is_ready = id->nstat & NVME_NSTAT_NRDY;
info->is_rotational = id->nsfeat & NVME_NS_ROTATIONAL;
info->no_vwc = id->nsfeat & NVME_NS_VWC_NOT_PRESENT;
+ info->endgid = le16_to_cpu(id->endgid);
}
kfree(id);
return ret;
@@ -1674,7 +1692,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
- u32 *result)
+ void *result)
{
return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
buflen, result);
@@ -1683,7 +1701,7 @@ EXPORT_SYMBOL_GPL(nvme_set_features);
int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
- u32 *result)
+ void *result)
{
return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
buflen, result);
@@ -2059,7 +2077,21 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
else
- atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
+ atomic_bs = (1 + ns->ctrl->awupf) * bs;
+
+ /*
+ * Set subsystem atomic bs.
+ */
+ if (ns->ctrl->subsys->atomic_bs) {
+ if (atomic_bs != ns->ctrl->subsys->atomic_bs) {
+ dev_err_ratelimited(ns->ctrl->device,
+ "%s: Inconsistent Atomic Write Size, Namespace will not be added: Subsystem=%d bytes, Controller/Namespace=%d bytes\n",
+ ns->disk ? ns->disk->disk_name : "?",
+ ns->ctrl->subsys->atomic_bs,
+ atomic_bs);
+ }
+ } else
+ ns->ctrl->subsys->atomic_bs = atomic_bs;
nvme_update_atomic_write_disk_info(ns, id, lim, bs, atomic_bs);
}
@@ -2153,6 +2185,148 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns,
return ret;
}
+static int nvme_query_fdp_granularity(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info, u8 fdp_idx)
+{
+ struct nvme_fdp_config_log hdr, *h;
+ struct nvme_fdp_config_desc *desc;
+ size_t size = sizeof(hdr);
+ void *log, *end;
+ int i, n, ret;
+
+ ret = nvme_get_log_lsi(ctrl, 0, NVME_LOG_FDP_CONFIGS, 0,
+ NVME_CSI_NVM, &hdr, size, 0, info->endgid);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "FDP configs log header status:0x%x endgid:%d\n", ret,
+ info->endgid);
+ return ret;
+ }
+
+ size = le32_to_cpu(hdr.sze);
+ if (size > PAGE_SIZE * MAX_ORDER_NR_PAGES) {
+ dev_warn(ctrl->device, "FDP config size too large:%zu\n",
+ size);
+ return 0;
+ }
+
+ h = kvmalloc(size, GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ ret = nvme_get_log_lsi(ctrl, 0, NVME_LOG_FDP_CONFIGS, 0,
+ NVME_CSI_NVM, h, size, 0, info->endgid);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "FDP configs log status:0x%x endgid:%d\n", ret,
+ info->endgid);
+ goto out;
+ }
+
+ n = le16_to_cpu(h->numfdpc) + 1;
+ if (fdp_idx > n) {
+ dev_warn(ctrl->device, "FDP index:%d out of range:%d\n",
+ fdp_idx, n);
+ /* Proceed without registering FDP streams */
+ ret = 0;
+ goto out;
+ }
+
+ log = h + 1;
+ desc = log;
+ end = log + size - sizeof(*h);
+ for (i = 0; i < fdp_idx; i++) {
+ log += le16_to_cpu(desc->dsze);
+ desc = log;
+ if (log >= end) {
+ dev_warn(ctrl->device,
+ "FDP invalid config descriptor list\n");
+ ret = 0;
+ goto out;
+ }
+ }
+
+ if (le32_to_cpu(desc->nrg) > 1) {
+ dev_warn(ctrl->device, "FDP NRG > 1 not supported\n");
+ ret = 0;
+ goto out;
+ }
+
+ info->runs = le64_to_cpu(desc->runs);
+out:
+ kvfree(h);
+ return ret;
+}
+
+static int nvme_query_fdp_info(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ struct nvme_ns_head *head = ns->head;
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ struct nvme_fdp_ruh_status *ruhs;
+ struct nvme_fdp_config fdp;
+ struct nvme_command c = {};
+ size_t size;
+ int i, ret;
+
+ /*
+ * The FDP configuration is static for the lifetime of the namespace,
+ * so return immediately if we've already registered this namespace's
+ * streams.
+ */
+ if (head->nr_plids)
+ return 0;
+
+ ret = nvme_get_features(ctrl, NVME_FEAT_FDP, info->endgid, NULL, 0,
+ &fdp);
+ if (ret) {
+ dev_warn(ctrl->device, "FDP get feature status:0x%x\n", ret);
+ return ret;
+ }
+
+ if (!(fdp.flags & FDPCFG_FDPE))
+ return 0;
+
+ ret = nvme_query_fdp_granularity(ctrl, info, fdp.fdpcidx);
+ if (!info->runs)
+ return ret;
+
+ size = struct_size(ruhs, ruhsd, S8_MAX - 1);
+ ruhs = kzalloc(size, GFP_KERNEL);
+ if (!ruhs)
+ return -ENOMEM;
+
+ c.imr.opcode = nvme_cmd_io_mgmt_recv;
+ c.imr.nsid = cpu_to_le32(head->ns_id);
+ c.imr.mo = NVME_IO_MGMT_RECV_MO_RUHS;
+ c.imr.numd = cpu_to_le32(nvme_bytes_to_numd(size));
+ ret = nvme_submit_sync_cmd(ns->queue, &c, ruhs, size);
+ if (ret) {
+ dev_warn(ctrl->device, "FDP io-mgmt status:0x%x\n", ret);
+ goto free;
+ }
+
+ head->nr_plids = le16_to_cpu(ruhs->nruhsd);
+ if (!head->nr_plids)
+ goto free;
+
+ head->plids = kcalloc(head->nr_plids, sizeof(*head->plids),
+ GFP_KERNEL);
+ if (!head->plids) {
+ dev_warn(ctrl->device,
+ "failed to allocate %u FDP placement IDs\n",
+ head->nr_plids);
+ head->nr_plids = 0;
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ for (i = 0; i < head->nr_plids; i++)
+ head->plids[i] = le16_to_cpu(ruhs->ruhsd[i].pid);
+free:
+ kfree(ruhs);
+ return ret;
+}
+
static int nvme_update_ns_info_block(struct nvme_ns *ns,
struct nvme_ns_info *info)
{
@@ -2190,6 +2364,12 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
goto out;
}
+ if (ns->ctrl->ctratt & NVME_CTRL_ATTR_FDPS) {
+ ret = nvme_query_fdp_info(ns, info);
+ if (ret < 0)
+ goto out;
+ }
+
lim = queue_limits_start_update(ns->disk->queue);
memflags = blk_mq_freeze_queue(ns->disk->queue);
@@ -2201,6 +2381,17 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
nvme_set_chunk_sectors(ns, id, &lim);
if (!nvme_update_disk_info(ns, id, &lim))
capacity = 0;
+
+ /*
+ * Validate the max atomic write size fits within the subsystem's
+ * atomic write capabilities.
+ */
+ if (lim.atomic_write_hw_max > ns->ctrl->subsys->atomic_bs) {
+ blk_mq_unfreeze_queue(ns->disk->queue, memflags);
+ ret = -ENXIO;
+ goto out;
+ }
+
nvme_config_discard(ns, &lim);
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
ns->head->ids.csi == NVME_CSI_ZNS)
@@ -2223,6 +2414,12 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if (!nvme_init_integrity(ns->head, &lim, info))
capacity = 0;
+ lim.max_write_streams = ns->head->nr_plids;
+ if (lim.max_write_streams)
+ lim.write_stream_granularity = min(info->runs, U32_MAX);
+ else
+ lim.write_stream_granularity = 0;
+
ret = queue_limits_commit_update(ns->disk->queue, &lim);
if (ret) {
blk_mq_unfreeze_queue(ns->disk->queue, memflags);
@@ -2326,6 +2523,8 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
ns->head->disk->flags |= GENHD_FL_HIDDEN;
else
nvme_init_integrity(ns->head, &lim, info);
+ lim.max_write_streams = ns_lim->max_write_streams;
+ lim.write_stream_granularity = ns_lim->write_stream_granularity;
ret = queue_limits_commit_update(ns->head->disk->queue, &lim);
set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk));
@@ -3031,7 +3230,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
kfree(subsys);
return -EINVAL;
}
- subsys->awupf = le16_to_cpu(id->awupf);
nvme_mpath_default_iopolicy(subsys);
subsys->dev.class = &nvme_subsys_class;
@@ -3084,8 +3282,8 @@ out_unlock:
return ret;
}
-int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
- void *log, size_t size, u64 offset)
+static int nvme_get_log_lsi(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page,
+ u8 lsp, u8 csi, void *log, size_t size, u64 offset, u16 lsi)
{
struct nvme_command c = { };
u32 dwlen = nvme_bytes_to_numd(size);
@@ -3099,10 +3297,18 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
c.get_log_page.csi = csi;
+ c.get_log_page.lsi = cpu_to_le16(lsi);
return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
}
+int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
+ void *log, size_t size, u64 offset)
+{
+ return nvme_get_log_lsi(ctrl, nsid, log_page, lsp, csi, log, size,
+ offset, 0);
+}
+
static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
struct nvme_effects_log **log)
{
@@ -3441,7 +3647,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
dev_pm_qos_expose_latency_tolerance(ctrl->device);
else if (!ctrl->apst_enabled && prev_apst_enabled)
dev_pm_qos_hide_latency_tolerance(ctrl->device);
-
+ ctrl->awupf = le16_to_cpu(id->awupf);
out_free:
kfree(id);
return ret;
@@ -3560,7 +3766,7 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
*/
if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
continue;
- if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
+ if (nvme_tryget_ns_head(h))
return h;
}
@@ -3804,7 +4010,8 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
}
} else {
ret = -EINVAL;
- if (!info->is_shared || !head->shared) {
+ if ((!info->is_shared || !head->shared) &&
+ !list_empty(&head->list)) {
dev_err(ctrl->device,
"Duplicate unshared namespace %d\n",
info->nsid);
@@ -4008,7 +4215,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_lock(&ns->ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
if (list_empty(&ns->head->list)) {
- list_del_init(&ns->head->entry);
+ if (!nvme_mpath_queue_if_no_path(ns->head))
+ list_del_init(&ns->head->entry);
last_path = true;
}
mutex_unlock(&ns->ctrl->subsys->lock);
@@ -4029,7 +4237,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
synchronize_srcu(&ns->ctrl->srcu);
if (last_path)
- nvme_mpath_shutdown_disk(ns->head);
+ nvme_mpath_remove_disk(ns->head);
nvme_put_ns(ns);
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 93e9041b9657..2e58a7ce1090 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -582,7 +582,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
* Do not retry when:
*
* - the DNR bit is set and the specification states no further connect
- * attempts with the same set of paramenters should be attempted.
+ * attempts with the same set of parameters should be attempted.
*
* - when the authentication attempt fails, because the key was invalid.
* This error code is set on the host side.
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 9cf5b020adba..1b58ee7d0dce 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -80,7 +80,7 @@ enum {
* @transport: Holds the fabric transport "technology name" (for a lack of
* better description) that will be used by an NVMe controller
* being added.
- * @subsysnqn: Hold the fully qualified NQN subystem name (format defined
+ * @subsysnqn: Hold the fully qualified NQN subsystem name (format defined
* in the NVMe specification, "NVMe Qualified Names").
* @traddr: The transport-specific TRADDR field for a port on the
* subsystem which is adding a controller.
@@ -156,7 +156,7 @@ struct nvmf_ctrl_options {
* @create_ctrl(): function pointer that points to a non-NVMe
* implementation-specific fabric technology
* that would go into starting up that fabric
- * for the purpose of conneciton to an NVMe controller
+ * for the purpose of connection to an NVMe controller
* using that fabric technology.
*
* Notes:
@@ -165,7 +165,7 @@ struct nvmf_ctrl_options {
* 2. create_ctrl() must be defined (even if it does nothing)
* 3. struct nvmf_transport_ops must be statically allocated in the
* modules .bss section so that a pure module_get on @module
- * prevents the memory from beeing freed.
+ * prevents the memory from being freed.
*/
struct nvmf_transport_ops {
struct list_head entry;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 2257c3c96dd2..014b387f1e8b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1410,9 +1410,8 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
}
static void
-nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
+nvme_fc_xmt_ls_rsp_free(struct nvmefc_ls_rcv_op *lsop)
{
- struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
struct nvme_fc_rport *rport = lsop->rport;
struct nvme_fc_lport *lport = rport->lport;
unsigned long flags;
@@ -1434,6 +1433,14 @@ nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
}
static void
+nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
+{
+ struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
+
+ nvme_fc_xmt_ls_rsp_free(lsop);
+}
+
+static void
nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
{
struct nvme_fc_rport *rport = lsop->rport;
@@ -1450,7 +1457,7 @@ nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
dev_warn(lport->dev,
"LLDD rejected LS RSP xmt: LS %d status %d\n",
w0->ls_cmd, ret);
- nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
+ nvme_fc_xmt_ls_rsp_free(lsop);
return;
}
}
@@ -1948,7 +1955,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
}
/*
- * For the linux implementation, if we have an unsuccesful
+ * For the linux implementation, if we have an unsucceesful
* status, they blk-mq layer can typically be called with the
* non-zero status and the content of the cqe isn't important.
*/
@@ -2472,7 +2479,7 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
* writing the registers for shutdown and polling (call
* nvme_disable_ctrl()). Given a bunch of i/o was potentially
* just aborted and we will wait on those contexts, and given
- * there was no indication of how live the controlelr is on the
+ * there was no indication of how live the controller is on the
* link, don't send more io to create more contexts for the
* shutdown. Let the controller fail via keepalive failure if
* its still present.
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index ca86d3bf7ea4..6b3ac8ae3f34 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -429,21 +429,14 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
/*
- * For iopoll, complete it directly. Note that using the uring_cmd
- * helper for this is safe only because we check blk_rq_is_poll().
- * As that returns false if we're NOT on a polled queue, then it's
- * safe to use the polled completion helper.
- *
- * Otherwise, move the completion to task work.
+ * IOPOLL could potentially complete this request directly, but
+ * if multiple rings are polling on the same queue, then it's possible
+ * for one ring to find completions for another ring. Punting the
+ * completion via task_work will always direct it to the right
+ * location, rather than potentially complete requests for ringA
+ * under iopoll invocations from ringB.
*/
- if (blk_rq_is_poll(req)) {
- if (pdu->bio)
- blk_rq_unmap_user(pdu->bio);
- io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
- } else {
- io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
- }
-
+ io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
return RQ_END_IO_FREE;
}
@@ -493,13 +486,15 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
d.timeout_ms = READ_ONCE(cmd->timeout_ms);
if (d.data_len && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
- /* fixedbufs is only for non-vectored io */
- if (vec)
- return -EINVAL;
+ int ddir = nvme_is_write(&c) ? WRITE : READ;
- ret = io_uring_cmd_import_fixed(d.addr, d.data_len,
- nvme_is_write(&c) ? WRITE : READ, &iter, ioucmd,
- issue_flags);
+ if (vec)
+ ret = io_uring_cmd_import_fixed_vec(ioucmd,
+ u64_to_user_ptr(d.addr), d.data_len,
+ ddir, &iter, issue_flags);
+ else
+ ret = io_uring_cmd_import_fixed(d.addr, d.data_len,
+ ddir, &iter, ioucmd, issue_flags);
if (ret < 0)
return ret;
@@ -521,7 +516,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (d.data_len) {
ret = nvme_map_user_request(req, d.addr, d.data_len,
nvme_to_user_ptr(d.metadata), d.metadata_len,
- map_iter, vec);
+ map_iter, vec ? NVME_IOCTL_VEC : 0);
if (ret)
goto out_free_req;
}
@@ -727,7 +722,7 @@ int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
/*
* Handle ioctls that apply to the controller instead of the namespace
- * seperately and drop the ns SRCU reference early. This avoids a
+ * separately and drop the ns SRCU reference early. This avoids a
* deadlock when deleting namespaces using the passthrough interface.
*/
if (is_ctrl_ioctl(cmd))
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 250f3da67cc9..e040e467f9fa 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -10,10 +10,61 @@
#include "nvme.h"
bool multipath = true;
-module_param(multipath, bool, 0444);
+static bool multipath_always_on;
+
+static int multipath_param_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+ bool *arg = kp->arg;
+
+ ret = param_set_bool(val, kp);
+ if (ret)
+ return ret;
+
+ if (multipath_always_on && !*arg) {
+ pr_err("Can't disable multipath when multipath_always_on is configured.\n");
+ *arg = true;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct kernel_param_ops multipath_param_ops = {
+ .set = multipath_param_set,
+ .get = param_get_bool,
+};
+
+module_param_cb(multipath, &multipath_param_ops, &multipath, 0444);
MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem");
+static int multipath_always_on_set(const char *val,
+ const struct kernel_param *kp)
+{
+ int ret;
+ bool *arg = kp->arg;
+
+ ret = param_set_bool(val, kp);
+ if (ret < 0)
+ return ret;
+
+ if (*arg)
+ multipath = true;
+
+ return 0;
+}
+
+static const struct kernel_param_ops multipath_always_on_ops = {
+ .set = multipath_always_on_set,
+ .get = param_get_bool,
+};
+
+module_param_cb(multipath_always_on, &multipath_always_on_ops,
+ &multipath_always_on, 0444);
+MODULE_PARM_DESC(multipath_always_on,
+ "create multipath node always except for private namespace with non-unique nsid; note that this also implicitly enables native multipath support");
+
static const char *nvme_iopolicy_names[] = {
[NVME_IOPOLICY_NUMA] = "numa",
[NVME_IOPOLICY_RR] = "round-robin",
@@ -442,7 +493,17 @@ static bool nvme_available_path(struct nvme_ns_head *head)
break;
}
}
- return false;
+
+ /*
+ * If "head->delayed_removal_secs" is configured (i.e., non-zero), do
+ * not immediately fail I/O. Instead, requeue the I/O for the configured
+ * duration, anticipating that if there's a transient link failure then
+ * it may recover within this time window. This parameter is exported to
+ * userspace via sysfs, and its default value is zero. It is internally
+ * mapped to NVME_NSHEAD_QUEUE_IF_NO_PATH. When delayed_removal_secs is
+ * non-zero, this flag is set to true. When zero, the flag is cleared.
+ */
+ return nvme_mpath_queue_if_no_path(head);
}
static void nvme_ns_head_submit_bio(struct bio *bio)
@@ -617,6 +678,40 @@ static void nvme_requeue_work(struct work_struct *work)
}
}
+static void nvme_remove_head(struct nvme_ns_head *head)
+{
+ if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
+ /*
+ * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared
+ * to allow multipath to fail all I/O.
+ */
+ kblockd_schedule_work(&head->requeue_work);
+
+ nvme_cdev_del(&head->cdev, &head->cdev_device);
+ synchronize_srcu(&head->srcu);
+ del_gendisk(head->disk);
+ nvme_put_ns_head(head);
+ }
+}
+
+static void nvme_remove_head_work(struct work_struct *work)
+{
+ struct nvme_ns_head *head = container_of(to_delayed_work(work),
+ struct nvme_ns_head, remove_work);
+ bool remove = false;
+
+ mutex_lock(&head->subsys->lock);
+ if (list_empty(&head->list)) {
+ list_del_init(&head->entry);
+ remove = true;
+ }
+ mutex_unlock(&head->subsys->lock);
+ if (remove)
+ nvme_remove_head(head);
+
+ module_put(THIS_MODULE);
+}
+
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
{
struct queue_limits lim;
@@ -626,19 +721,31 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
spin_lock_init(&head->requeue_lock);
INIT_WORK(&head->requeue_work, nvme_requeue_work);
INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work);
+ INIT_DELAYED_WORK(&head->remove_work, nvme_remove_head_work);
+ head->delayed_removal_secs = 0;
/*
- * Add a multipath node if the subsystems supports multiple controllers.
- * We also do this for private namespaces as the namespace sharing flag
- * could change after a rescan.
+ * If "multipath_always_on" is enabled, a multipath node is added
+ * regardless of whether the disk is single/multi ported, and whether
+ * the namespace is shared or private. If "multipath_always_on" is not
+ * enabled, a multipath node is added only if the subsystem supports
+ * multiple controllers and the "multipath" option is configured. In
+ * either case, for private namespaces, we ensure that the NSID is
+ * unique.
*/
- if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
- !nvme_is_unique_nsid(ctrl, head) || !multipath)
+ if (!multipath_always_on) {
+ if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
+ !multipath)
+ return 0;
+ }
+
+ if (!nvme_is_unique_nsid(ctrl, head))
return 0;
blk_set_stacking_limits(&lim);
lim.dma_alignment = 3;
- lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL;
+ lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT |
+ BLK_FEAT_POLL | BLK_FEAT_ATOMIC_WRITES;
if (head->ids.csi == NVME_CSI_ZNS)
lim.features |= BLK_FEAT_ZONED;
@@ -653,12 +760,13 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
* controller's scan_work context. If a path error occurs here, the IO
* will wait until a path becomes available or all paths are torn down,
* but that action also occurs within scan_work, so it would deadlock.
- * Defer the partion scan to a different context that does not block
+ * Defer the partition scan to a different context that does not block
* scan_work.
*/
set_bit(GD_SUPPRESS_PART_SCAN, &head->disk->state);
sprintf(head->disk->disk_name, "nvme%dn%d",
ctrl->subsys->instance, head->instance);
+ nvme_tryget_ns_head(head);
return 0;
}
@@ -890,7 +998,7 @@ void nvme_mpath_update(struct nvme_ctrl *ctrl)
static void nvme_anatt_timeout(struct timer_list *t)
{
- struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
+ struct nvme_ctrl *ctrl = timer_container_of(ctrl, t, anatt_timer);
dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
nvme_reset_ctrl(ctrl);
@@ -1015,6 +1123,49 @@ static ssize_t numa_nodes_show(struct device *dev, struct device_attribute *attr
}
DEVICE_ATTR_RO(numa_nodes);
+static ssize_t delayed_removal_secs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ struct nvme_ns_head *head = disk->private_data;
+ int ret;
+
+ mutex_lock(&head->subsys->lock);
+ ret = sysfs_emit(buf, "%u\n", head->delayed_removal_secs);
+ mutex_unlock(&head->subsys->lock);
+ return ret;
+}
+
+static ssize_t delayed_removal_secs_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ struct nvme_ns_head *head = disk->private_data;
+ unsigned int sec;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &sec);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&head->subsys->lock);
+ head->delayed_removal_secs = sec;
+ if (sec)
+ set_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags);
+ else
+ clear_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags);
+ mutex_unlock(&head->subsys->lock);
+ /*
+ * Ensure that update to NVME_NSHEAD_QUEUE_IF_NO_PATH is seen
+ * by its reader.
+ */
+ synchronize_srcu(&head->srcu);
+
+ return count;
+}
+
+DEVICE_ATTR_RW(delayed_removal_secs);
+
static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
struct nvme_ana_group_desc *desc, void *data)
{
@@ -1136,23 +1287,43 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
#endif
}
-void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
+void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
- if (!head->disk)
- return;
- if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
- nvme_cdev_del(&head->cdev, &head->cdev_device);
+ bool remove = false;
+
+ mutex_lock(&head->subsys->lock);
+ /*
+ * We are called when all paths have been removed, and at that point
+ * head->list is expected to be empty. However, nvme_remove_ns() and
+ * nvme_init_ns_head() can run concurrently and so if head->delayed_
+ * removal_secs is configured, it is possible that by the time we reach
+ * this point, head->list may no longer be empty. Therefore, we recheck
+ * head->list here. If it is no longer empty then we skip enqueuing the
+ * delayed head removal work.
+ */
+ if (!list_empty(&head->list))
+ goto out;
+
+ if (head->delayed_removal_secs) {
/*
- * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared
- * to allow multipath to fail all I/O.
+ * Ensure that no one could remove this module while the head
+ * remove work is pending.
*/
- synchronize_srcu(&head->srcu);
- kblockd_schedule_work(&head->requeue_work);
- del_gendisk(head->disk);
+ if (!try_module_get(THIS_MODULE))
+ goto out;
+ queue_delayed_work(nvme_wq, &head->remove_work,
+ head->delayed_removal_secs * HZ);
+ } else {
+ list_del_init(&head->entry);
+ remove = true;
}
+out:
+ mutex_unlock(&head->subsys->lock);
+ if (remove)
+ nvme_remove_head(head);
}
-void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+void nvme_mpath_put_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 51e078642127..a468cdc5b5cb 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -410,6 +410,7 @@ struct nvme_ctrl {
enum nvme_ctrl_type cntrltype;
enum nvme_dctype dctype;
+ u16 awupf; /* 0's based value. */
};
static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
@@ -442,11 +443,11 @@ struct nvme_subsystem {
u8 cmic;
enum nvme_subsys_type subtype;
u16 vendor_id;
- u16 awupf; /* 0's based awupf value. */
struct ida ns_ida;
#ifdef CONFIG_NVME_MULTIPATH
enum nvme_iopolicy iopolicy;
#endif
+ u32 atomic_bs;
};
/*
@@ -496,6 +497,9 @@ struct nvme_ns_head {
struct device cdev_device;
struct gendisk *disk;
+
+ u16 nr_plids;
+ u16 *plids;
#ifdef CONFIG_NVME_MULTIPATH
struct bio_list requeue_list;
spinlock_t requeue_lock;
@@ -503,7 +507,10 @@ struct nvme_ns_head {
struct work_struct partition_scan_work;
struct mutex lock;
unsigned long flags;
-#define NVME_NSHEAD_DISK_LIVE 0
+ struct delayed_work remove_work;
+ unsigned int delayed_removal_secs;
+#define NVME_NSHEAD_DISK_LIVE 0
+#define NVME_NSHEAD_QUEUE_IF_NO_PATH 1
struct nvme_ns __rcu *current_path[];
#endif
};
@@ -516,7 +523,7 @@ static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
enum nvme_ns_features {
NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
- NVME_NS_DEAC = 1 << 2, /* DEAC bit in Write Zeores supported */
+ NVME_NS_DEAC = 1 << 2, /* DEAC bit in Write Zeroes supported */
};
struct nvme_ns {
@@ -896,10 +903,10 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
int qid, nvme_submit_flags_t flags);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
- u32 *result);
+ void *result);
int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
- u32 *result);
+ void *result);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
@@ -960,7 +967,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_sysfs_link(struct nvme_ns_head *ns);
void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns);
void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid);
-void nvme_mpath_remove_disk(struct nvme_ns_head *head);
+void nvme_mpath_put_disk(struct nvme_ns_head *head);
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
void nvme_mpath_update(struct nvme_ctrl *ctrl);
@@ -969,7 +976,7 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
void nvme_mpath_revalidate_paths(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
-void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
+void nvme_mpath_remove_disk(struct nvme_ns_head *head);
void nvme_mpath_start_request(struct request *rq);
void nvme_mpath_end_request(struct request *rq);
@@ -986,12 +993,19 @@ extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
extern struct device_attribute dev_attr_queue_depth;
extern struct device_attribute dev_attr_numa_nodes;
+extern struct device_attribute dev_attr_delayed_removal_secs;
extern struct device_attribute subsys_attr_iopolicy;
static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
{
return disk->fops == &nvme_ns_head_ops;
}
+static inline bool nvme_mpath_queue_if_no_path(struct nvme_ns_head *head)
+{
+ if (test_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags))
+ return true;
+ return false;
+}
#else
#define multipath false
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
@@ -1012,7 +1026,7 @@ static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
{
}
-static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+static inline void nvme_mpath_put_disk(struct nvme_ns_head *head)
{
}
static inline void nvme_mpath_add_sysfs_link(struct nvme_ns *ns)
@@ -1031,7 +1045,7 @@ static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
}
-static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
+static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
}
static inline void nvme_trace_bio_complete(struct request *req)
@@ -1079,6 +1093,10 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
{
return false;
}
+static inline bool nvme_mpath_queue_if_no_path(struct nvme_ns_head *head)
+{
+ return false;
+}
#endif /* CONFIG_NVME_MULTIPATH */
int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16],
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 2e30e9be7408..8ff12e415cb5 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -18,6 +18,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/nodemask.h>
#include <linux/once.h>
#include <linux/pci.h>
#include <linux/suspend.h>
@@ -34,16 +35,31 @@
#define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
-#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc))
+/* Optimisation for I/Os between 4k and 128k */
+#define NVME_SMALL_POOL_SIZE 256
/*
* These can be higher, but we need to ensure that any command doesn't
* require an sg allocation that needs more than a page of data.
*/
#define NVME_MAX_KB_SZ 8192
-#define NVME_MAX_SEGS 128
-#define NVME_MAX_META_SEGS 15
-#define NVME_MAX_NR_ALLOCATIONS 5
+#define NVME_MAX_NR_DESCRIPTORS 5
+
+/*
+ * For data SGLs we support a single descriptors worth of SGL entries, but for
+ * now we also limit it to avoid an allocation larger than PAGE_SIZE for the
+ * scatterlist.
+ */
+#define NVME_MAX_SEGS \
+ min(NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc), \
+ (PAGE_SIZE / sizeof(struct scatterlist)))
+
+/*
+ * For metadata SGLs, only the small descriptor is supported, and the first
+ * entry is the segment descriptor, which for the data pointer sits in the SQE.
+ */
+#define NVME_MAX_META_SEGS \
+ ((NVME_SMALL_POOL_SIZE / sizeof(struct nvme_sgl_desc)) - 1)
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0444);
@@ -112,6 +128,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
static void nvme_delete_io_queues(struct nvme_dev *dev);
static void nvme_update_attrs(struct nvme_dev *dev);
+struct nvme_descriptor_pools {
+ struct dma_pool *large;
+ struct dma_pool *small;
+};
+
/*
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
@@ -121,8 +142,6 @@ struct nvme_dev {
struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
struct device *dev;
- struct dma_pool *prp_page_pool;
- struct dma_pool *prp_small_pool;
unsigned online_queues;
unsigned max_qid;
unsigned io_queues[HCTX_MAX_TYPES];
@@ -162,6 +181,7 @@ struct nvme_dev {
unsigned int nr_allocated_queues;
unsigned int nr_write_queues;
unsigned int nr_poll_queues;
+ struct nvme_descriptor_pools descriptor_pools[];
};
static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
@@ -191,6 +211,7 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
*/
struct nvme_queue {
struct nvme_dev *dev;
+ struct nvme_descriptor_pools descriptor_pools;
spinlock_t sq_lock;
void *sq_cmds;
/* only used for poll queues: */
@@ -219,30 +240,30 @@ struct nvme_queue {
struct completion delete_done;
};
-union nvme_descriptor {
- struct nvme_sgl_desc *sg_list;
- __le64 *prp_list;
+/* bits for iod->flags */
+enum nvme_iod_flags {
+ /* this command has been aborted by the timeout handler */
+ IOD_ABORTED = 1U << 0,
+
+ /* uses the small descriptor pool */
+ IOD_SMALL_DESCRIPTOR = 1U << 1,
};
/*
* The nvme_iod describes the data in an I/O.
- *
- * The sg pointer contains the list of PRP/SGL chunk allocations in addition
- * to the actual struct scatterlist.
*/
struct nvme_iod {
struct nvme_request req;
struct nvme_command cmd;
- bool aborted;
- s8 nr_allocations; /* PRP list pool allocations. 0 means small
- pool in use */
+ u8 flags;
+ u8 nr_descriptors;
unsigned int dma_len; /* length of single DMA segment mapping */
dma_addr_t first_dma;
dma_addr_t meta_dma;
struct sg_table sgt;
struct sg_table meta_sgt;
- union nvme_descriptor meta_list;
- union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS];
+ struct nvme_sgl_desc *meta_descriptor;
+ void *descriptors[NVME_MAX_NR_DESCRIPTORS];
};
static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
@@ -390,37 +411,85 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
* as it only leads to a small amount of wasted memory for the lifetime of
* the I/O.
*/
-static int nvme_pci_npages_prp(void)
+static __always_inline int nvme_pci_npages_prp(void)
{
unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE;
unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE);
return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8);
}
-static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int hctx_idx)
+static struct nvme_descriptor_pools *
+nvme_setup_descriptor_pools(struct nvme_dev *dev, unsigned numa_node)
{
- struct nvme_dev *dev = to_nvme_dev(data);
- struct nvme_queue *nvmeq = &dev->queues[0];
+ struct nvme_descriptor_pools *pools = &dev->descriptor_pools[numa_node];
+ size_t small_align = NVME_SMALL_POOL_SIZE;
- WARN_ON(hctx_idx != 0);
- WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
+ if (pools->small)
+ return pools; /* already initialized */
- hctx->driver_data = nvmeq;
- return 0;
+ pools->large = dma_pool_create_node("nvme descriptor page", dev->dev,
+ NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE, 0, numa_node);
+ if (!pools->large)
+ return ERR_PTR(-ENOMEM);
+
+ if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512)
+ small_align = 512;
+
+ pools->small = dma_pool_create_node("nvme descriptor small", dev->dev,
+ NVME_SMALL_POOL_SIZE, small_align, 0, numa_node);
+ if (!pools->small) {
+ dma_pool_destroy(pools->large);
+ pools->large = NULL;
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return pools;
}
-static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int hctx_idx)
+static void nvme_release_descriptor_pools(struct nvme_dev *dev)
+{
+ unsigned i;
+
+ for (i = 0; i < nr_node_ids; i++) {
+ struct nvme_descriptor_pools *pools = &dev->descriptor_pools[i];
+
+ dma_pool_destroy(pools->large);
+ dma_pool_destroy(pools->small);
+ }
+}
+
+static int nvme_init_hctx_common(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned qid)
{
struct nvme_dev *dev = to_nvme_dev(data);
- struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
+ struct nvme_queue *nvmeq = &dev->queues[qid];
+ struct nvme_descriptor_pools *pools;
+ struct blk_mq_tags *tags;
+
+ tags = qid ? dev->tagset.tags[qid - 1] : dev->admin_tagset.tags[0];
+ WARN_ON(tags != hctx->tags);
+ pools = nvme_setup_descriptor_pools(dev, hctx->numa_node);
+ if (IS_ERR(pools))
+ return PTR_ERR(pools);
- WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
+ nvmeq->descriptor_pools = *pools;
hctx->driver_data = nvmeq;
return 0;
}
+static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ WARN_ON(hctx_idx != 0);
+ return nvme_init_hctx_common(hctx, data, 0);
+}
+
+static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ return nvme_init_hctx_common(hctx, data, hctx_idx + 1);
+}
+
static int nvme_pci_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
@@ -537,23 +606,39 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
return true;
}
-static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
+static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq,
+ struct nvme_iod *iod)
+{
+ if (iod->flags & IOD_SMALL_DESCRIPTOR)
+ return nvmeq->descriptor_pools.small;
+ return nvmeq->descriptor_pools.large;
+}
+
+static void nvme_free_descriptors(struct nvme_queue *nvmeq, struct request *req)
{
const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
dma_addr_t dma_addr = iod->first_dma;
int i;
- for (i = 0; i < iod->nr_allocations; i++) {
- __le64 *prp_list = iod->list[i].prp_list;
+ if (iod->nr_descriptors == 1) {
+ dma_pool_free(nvme_dma_pool(nvmeq, iod), iod->descriptors[0],
+ dma_addr);
+ return;
+ }
+
+ for (i = 0; i < iod->nr_descriptors; i++) {
+ __le64 *prp_list = iod->descriptors[i];
dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
- dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
+ dma_pool_free(nvmeq->descriptor_pools.large, prp_list,
+ dma_addr);
dma_addr = next_dma_addr;
}
}
-static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+static void nvme_unmap_data(struct nvme_dev *dev, struct nvme_queue *nvmeq,
+ struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -566,15 +651,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
WARN_ON_ONCE(!iod->sgt.nents);
dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
-
- if (iod->nr_allocations == 0)
- dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list,
- iod->first_dma);
- else if (iod->nr_allocations == 1)
- dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list,
- iod->first_dma);
- else
- nvme_free_prps(dev, req);
+ nvme_free_descriptors(nvmeq, req);
mempool_free(iod->sgt.sgl, dev->iod_mempool);
}
@@ -592,11 +669,10 @@ static void nvme_print_sgl(struct scatterlist *sgl, int nents)
}
}
-static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
+static blk_status_t nvme_pci_setup_prps(struct nvme_queue *nvmeq,
struct request *req, struct nvme_rw_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct dma_pool *pool;
int length = blk_rq_payload_bytes(req);
struct scatterlist *sg = iod->sgt.sgl;
int dma_len = sg_dma_len(sg);
@@ -604,7 +680,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
__le64 *prp_list;
dma_addr_t prp_dma;
- int nprps, i;
+ int i;
length -= (NVME_CTRL_PAGE_SIZE - offset);
if (length <= 0) {
@@ -626,30 +702,26 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
goto done;
}
- nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
- if (nprps <= (256 / 8)) {
- pool = dev->prp_small_pool;
- iod->nr_allocations = 0;
- } else {
- pool = dev->prp_page_pool;
- iod->nr_allocations = 1;
- }
+ if (DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE) <=
+ NVME_SMALL_POOL_SIZE / sizeof(__le64))
+ iod->flags |= IOD_SMALL_DESCRIPTOR;
- prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
- if (!prp_list) {
- iod->nr_allocations = -1;
+ prp_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC,
+ &prp_dma);
+ if (!prp_list)
return BLK_STS_RESOURCE;
- }
- iod->list[0].prp_list = prp_list;
+ iod->descriptors[iod->nr_descriptors++] = prp_list;
iod->first_dma = prp_dma;
i = 0;
for (;;) {
if (i == NVME_CTRL_PAGE_SIZE >> 3) {
__le64 *old_prp_list = prp_list;
- prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+
+ prp_list = dma_pool_alloc(nvmeq->descriptor_pools.large,
+ GFP_ATOMIC, &prp_dma);
if (!prp_list)
goto free_prps;
- iod->list[iod->nr_allocations++].prp_list = prp_list;
+ iod->descriptors[iod->nr_descriptors++] = prp_list;
prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
i = 1;
@@ -673,7 +745,7 @@ done:
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
return BLK_STS_OK;
free_prps:
- nvme_free_prps(dev, req);
+ nvme_free_descriptors(nvmeq, req);
return BLK_STS_RESOURCE;
bad_sgl:
WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
@@ -698,11 +770,10 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
}
-static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
+static blk_status_t nvme_pci_setup_sgls(struct nvme_queue *nvmeq,
struct request *req, struct nvme_rw_command *cmd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct dma_pool *pool;
struct nvme_sgl_desc *sg_list;
struct scatterlist *sg = iod->sgt.sgl;
unsigned int entries = iod->sgt.nents;
@@ -717,21 +788,14 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
return BLK_STS_OK;
}
- if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
- pool = dev->prp_small_pool;
- iod->nr_allocations = 0;
- } else {
- pool = dev->prp_page_pool;
- iod->nr_allocations = 1;
- }
+ if (entries <= NVME_SMALL_POOL_SIZE / sizeof(*sg_list))
+ iod->flags |= IOD_SMALL_DESCRIPTOR;
- sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
- if (!sg_list) {
- iod->nr_allocations = -1;
+ sg_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC,
+ &sgl_dma);
+ if (!sg_list)
return BLK_STS_RESOURCE;
- }
-
- iod->list[0].sg_list = sg_list;
+ iod->descriptors[iod->nr_descriptors++] = sg_list;
iod->first_dma = sgl_dma;
nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);
@@ -785,12 +849,12 @@ static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret = BLK_STS_RESOURCE;
int rc;
if (blk_rq_nr_phys_segments(req) == 1) {
- struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct bio_vec bv = req_bvec(req);
if (!is_pci_p2pdma_page(bv.bv_page)) {
@@ -825,9 +889,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
}
if (nvme_pci_use_sgls(dev, req, iod->sgt.nents))
- ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
+ ret = nvme_pci_setup_sgls(nvmeq, req, &cmnd->rw);
else
- ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
+ ret = nvme_pci_setup_prps(nvmeq, req, &cmnd->rw);
if (ret != BLK_STS_OK)
goto out_unmap_sg;
return BLK_STS_OK;
@@ -842,6 +906,7 @@ out_free_sg:
static blk_status_t nvme_pci_setup_meta_sgls(struct nvme_dev *dev,
struct request *req)
{
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_rw_command *cmnd = &iod->cmd.rw;
struct nvme_sgl_desc *sg_list;
@@ -865,12 +930,13 @@ static blk_status_t nvme_pci_setup_meta_sgls(struct nvme_dev *dev,
if (rc)
goto out_free_sg;
- sg_list = dma_pool_alloc(dev->prp_small_pool, GFP_ATOMIC, &sgl_dma);
+ sg_list = dma_pool_alloc(nvmeq->descriptor_pools.small, GFP_ATOMIC,
+ &sgl_dma);
if (!sg_list)
goto out_unmap_sg;
entries = iod->meta_sgt.nents;
- iod->meta_list.sg_list = sg_list;
+ iod->meta_descriptor = sg_list;
iod->meta_dma = sgl_dma;
cmnd->flags = NVME_CMD_SGL_METASEG;
@@ -912,7 +978,10 @@ static blk_status_t nvme_pci_setup_meta_mptr(struct nvme_dev *dev,
static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req)
{
- if (nvme_pci_metadata_use_sgls(dev, req))
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) &&
+ nvme_pci_metadata_use_sgls(dev, req))
return nvme_pci_setup_meta_sgls(dev, req);
return nvme_pci_setup_meta_mptr(dev, req);
}
@@ -922,8 +991,8 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret;
- iod->aborted = false;
- iod->nr_allocations = -1;
+ iod->flags = 0;
+ iod->nr_descriptors = 0;
iod->sgt.nents = 0;
iod->meta_sgt.nents = 0;
@@ -947,7 +1016,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
return BLK_STS_OK;
out_unmap_data:
if (blk_rq_nr_phys_segments(req))
- nvme_unmap_data(dev, req);
+ nvme_unmap_data(dev, req->mq_hctx->driver_data, req);
out_free_cmd:
nvme_cleanup_cmd(req);
return ret;
@@ -1037,6 +1106,7 @@ static void nvme_queue_rqs(struct rq_list *rqlist)
}
static __always_inline void nvme_unmap_metadata(struct nvme_dev *dev,
+ struct nvme_queue *nvmeq,
struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -1048,8 +1118,8 @@ static __always_inline void nvme_unmap_metadata(struct nvme_dev *dev,
return;
}
- dma_pool_free(dev->prp_small_pool, iod->meta_list.sg_list,
- iod->meta_dma);
+ dma_pool_free(nvmeq->descriptor_pools.small, iod->meta_descriptor,
+ iod->meta_dma);
dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0);
mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool);
}
@@ -1060,10 +1130,10 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req)
struct nvme_dev *dev = nvmeq->dev;
if (blk_integrity_rq(req))
- nvme_unmap_metadata(dev, req);
+ nvme_unmap_metadata(dev, nvmeq, req);
if (blk_rq_nr_phys_segments(req))
- nvme_unmap_data(dev, req);
+ nvme_unmap_data(dev, nvmeq, req);
}
static void nvme_pci_complete_rq(struct request *req)
@@ -1202,7 +1272,9 @@ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
+ spin_lock(&nvmeq->cq_poll_lock);
nvme_poll_cq(nvmeq, NULL);
+ spin_unlock(&nvmeq->cq_poll_lock);
enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
}
@@ -1488,7 +1560,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
* returned to the driver, or if this is the admin queue.
*/
opcode = nvme_req(req)->cmd->common.opcode;
- if (!nvmeq->qid || iod->aborted) {
+ if (!nvmeq->qid || (iod->flags & IOD_ABORTED)) {
dev_warn(dev->ctrl.device,
"I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n",
req->tag, nvme_cid(req), opcode,
@@ -1501,7 +1573,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
atomic_inc(&dev->ctrl.abort_limit);
return BLK_EH_RESET_TIMER;
}
- iod->aborted = true;
+ iod->flags |= IOD_ABORTED;
cmd.abort.opcode = nvme_admin_abort_cmd;
cmd.abort.cid = nvme_cid(req);
@@ -2840,35 +2912,6 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
return 0;
}
-static int nvme_setup_prp_pools(struct nvme_dev *dev)
-{
- size_t small_align = 256;
-
- dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
- NVME_CTRL_PAGE_SIZE,
- NVME_CTRL_PAGE_SIZE, 0);
- if (!dev->prp_page_pool)
- return -ENOMEM;
-
- if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512)
- small_align = 512;
-
- /* Optimisation for I/Os between 4k and 128k */
- dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
- 256, small_align, 0);
- if (!dev->prp_small_pool) {
- dma_pool_destroy(dev->prp_page_pool);
- return -ENOMEM;
- }
- return 0;
-}
-
-static void nvme_release_prp_pools(struct nvme_dev *dev)
-{
- dma_pool_destroy(dev->prp_page_pool);
- dma_pool_destroy(dev->prp_small_pool);
-}
-
static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
{
size_t meta_size = sizeof(struct scatterlist) * (NVME_MAX_META_SEGS + 1);
@@ -2972,7 +3015,7 @@ static void nvme_reset_work(struct work_struct *work)
goto out;
/*
- * Freeze and update the number of I/O queues as thos might have
+ * Freeze and update the number of I/O queues as those might have
* changed. If there are no I/O queues left after this reset, keep the
* controller around but remove all namespaces.
*/
@@ -3143,7 +3186,7 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
/*
* Exclude some Kingston NV1 and A2000 devices from
* NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a
- * lot fo energy with s2idle sleep on some TUXEDO platforms.
+ * lot of energy with s2idle sleep on some TUXEDO platforms.
*/
if (dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
dmi_match(DMI_BOARD_NAME, "NS5x_7xAU") ||
@@ -3183,7 +3226,8 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
struct nvme_dev *dev;
int ret = -ENOMEM;
- dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
+ dev = kzalloc_node(struct_size(dev, descriptor_pools, nr_node_ids),
+ GFP_KERNEL, node);
if (!dev)
return ERR_PTR(-ENOMEM);
INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
@@ -3258,13 +3302,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto out_uninit_ctrl;
- result = nvme_setup_prp_pools(dev);
- if (result)
- goto out_dev_unmap;
-
result = nvme_pci_alloc_iod_mempool(dev);
if (result)
- goto out_release_prp_pools;
+ goto out_dev_unmap;
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
@@ -3340,8 +3380,6 @@ out_disable:
out_release_iod_mempool:
mempool_destroy(dev->iod_mempool);
mempool_destroy(dev->iod_meta_mempool);
-out_release_prp_pools:
- nvme_release_prp_pools(dev);
out_dev_unmap:
nvme_dev_unmap(dev);
out_uninit_ctrl:
@@ -3406,7 +3444,7 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_free_queues(dev, 0);
mempool_destroy(dev->iod_mempool);
mempool_destroy(dev->iod_meta_mempool);
- nvme_release_prp_pools(dev);
+ nvme_release_descriptor_pools(dev);
nvme_dev_unmap(dev);
nvme_uninit_ctrl(&dev->ctrl);
}
@@ -3737,6 +3775,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x025e, 0xf1ac), /* SOLIDIGM P44 pro SSDPFKKW020X7 */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
@@ -3805,9 +3845,7 @@ static int __init nvme_init(void)
BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
- BUILD_BUG_ON(NVME_MAX_SEGS > SGES_PER_PAGE);
- BUILD_BUG_ON(sizeof(struct scatterlist) * NVME_MAX_SEGS > PAGE_SIZE);
- BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_ALLOCATIONS);
+ BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_DESCRIPTORS);
return pci_register_driver(&nvme_driver);
}
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
index cf2d2c5039dd..ca6a74607b13 100644
--- a/drivers/nvme/host/pr.c
+++ b/drivers/nvme/host/pr.c
@@ -82,8 +82,6 @@ static int nvme_status_to_pr_err(int status)
return PR_STS_SUCCESS;
case NVME_SC_RESERVATION_CONFLICT:
return PR_STS_RESERVATION_CONFLICT;
- case NVME_SC_ONCS_NOT_SUPPORTED:
- return -EOPNOTSUPP;
case NVME_SC_BAD_ATTRIBUTES:
case NVME_SC_INVALID_OPCODE:
case NVME_SC_INVALID_FIELD:
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index b5a0295b5bf4..9bd3646568d0 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -221,7 +221,7 @@ static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
/*
* Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue
- * lifetime. It's safe, since any chage in the underlying RDMA device
+ * lifetime. It's safe, since any change in the underlying RDMA device
* will issue error recovery and queue re-creation.
*/
for (i = 0; i < ib_queue_size; i++) {
@@ -800,7 +800,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
/*
* Bind the async event SQE DMA mapping to the admin queue lifetime.
- * It's safe, since any chage in the underlying RDMA device will issue
+ * It's safe, since any change in the underlying RDMA device will issue
* error recovery and queue re-creation.
*/
error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index 6d31226f7a4f..29430949ce2f 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -260,6 +260,7 @@ static struct attribute *nvme_ns_attrs[] = {
&dev_attr_ana_state.attr,
&dev_attr_queue_depth.attr,
&dev_attr_numa_nodes.attr,
+ &dev_attr_delayed_removal_secs.attr,
#endif
&dev_attr_io_passthru_err_log_enabled.attr,
NULL,
@@ -296,6 +297,12 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
if (nvme_disk_is_ns_head(dev_to_disk(dev)))
return 0;
}
+ if (a == &dev_attr_delayed_removal_secs.attr) {
+ struct gendisk *disk = dev_to_disk(dev);
+
+ if (!nvme_disk_is_ns_head(disk))
+ return 0;
+ }
#endif
return a->mode;
}
@@ -306,13 +313,41 @@ static const struct attribute_group nvme_ns_attr_group = {
};
#ifdef CONFIG_NVME_MULTIPATH
+/*
+ * NOTE: The dummy attribute does not appear in sysfs. It exists solely to allow
+ * control over the visibility of the multipath sysfs node. Without at least one
+ * attribute defined in nvme_ns_mpath_attrs[], the sysfs implementation does not
+ * invoke the multipath_sysfs_group_visible() method. As a result, we would not
+ * be able to control the visibility of the multipath sysfs node.
+ */
+static struct attribute dummy_attr = {
+ .name = "dummy",
+};
+
static struct attribute *nvme_ns_mpath_attrs[] = {
+ &dummy_attr,
NULL,
};
+static bool multipath_sysfs_group_visible(struct kobject *kobj)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+
+ return nvme_disk_is_ns_head(dev_to_disk(dev));
+}
+
+static bool multipath_sysfs_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ return false;
+}
+
+DEFINE_SYSFS_GROUP_VISIBLE(multipath_sysfs)
+
const struct attribute_group nvme_ns_mpath_attr_group = {
.name = "multipath",
.attrs = nvme_ns_mpath_attrs,
+ .is_visible = SYSFS_GROUP_VISIBLE(multipath_sysfs),
};
#endif
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index aba365f97cf6..d924008c3949 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/crc32.h>
#include <linux/nvme-tcp.h>
#include <linux/nvme-keyring.h>
#include <net/sock.h>
@@ -16,7 +17,6 @@
#include <net/tls_prot.h>
#include <net/handshake.h>
#include <linux/blk-mq.h>
-#include <crypto/hash.h>
#include <net/busy_poll.h>
#include <trace/events/sock.h>
@@ -168,8 +168,8 @@ struct nvme_tcp_queue {
bool hdr_digest;
bool data_digest;
bool tls_enabled;
- struct ahash_request *rcv_hash;
- struct ahash_request *snd_hash;
+ u32 rcv_crc;
+ u32 snd_crc;
__le32 exp_ddgst;
__le32 recv_ddgst;
struct completion tls_complete;
@@ -403,7 +403,7 @@ static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
}
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
- bool sync, bool last)
+ bool last)
{
struct nvme_tcp_queue *queue = req->queue;
bool empty;
@@ -417,7 +417,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
* are on the same cpu, so we don't introduce contention.
*/
if (queue->io_cpu == raw_smp_processor_id() &&
- sync && empty && mutex_trylock(&queue->send_mutex)) {
+ empty && mutex_trylock(&queue->send_mutex)) {
nvme_tcp_send_all(queue);
mutex_unlock(&queue->send_mutex);
}
@@ -452,36 +452,43 @@ nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
return NULL;
}
- list_del(&req->entry);
+ list_del_init(&req->entry);
+ init_llist_node(&req->lentry);
return req;
}
-static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
- __le32 *dgst)
+#define NVME_TCP_CRC_SEED (~0)
+
+static inline void nvme_tcp_ddgst_update(u32 *crcp,
+ struct page *page, size_t off, size_t len)
{
- ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
- crypto_ahash_final(hash);
+ page += off / PAGE_SIZE;
+ off %= PAGE_SIZE;
+ while (len) {
+ const void *vaddr = kmap_local_page(page);
+ size_t n = min(len, (size_t)PAGE_SIZE - off);
+
+ *crcp = crc32c(*crcp, vaddr + off, n);
+ kunmap_local(vaddr);
+ page++;
+ off = 0;
+ len -= n;
+ }
}
-static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
- struct page *page, off_t off, size_t len)
+static inline __le32 nvme_tcp_ddgst_final(u32 crc)
{
- struct scatterlist sg;
-
- sg_init_table(&sg, 1);
- sg_set_page(&sg, page, len, off);
- ahash_request_set_crypt(hash, &sg, NULL, len);
- crypto_ahash_update(hash);
+ return cpu_to_le32(~crc);
}
-static inline void nvme_tcp_hdgst(struct ahash_request *hash,
- void *pdu, size_t len)
+static inline __le32 nvme_tcp_hdgst(const void *pdu, size_t len)
{
- struct scatterlist sg;
+ return cpu_to_le32(~crc32c(NVME_TCP_CRC_SEED, pdu, len));
+}
- sg_init_one(&sg, pdu, len);
- ahash_request_set_crypt(hash, &sg, pdu + len, len);
- crypto_ahash_digest(hash);
+static inline void nvme_tcp_set_hdgst(void *pdu, size_t len)
+{
+ *(__le32 *)(pdu + len) = nvme_tcp_hdgst(pdu, len);
}
static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
@@ -499,8 +506,7 @@ static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
}
recv_digest = *(__le32 *)(pdu + hdr->hlen);
- nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
- exp_digest = *(__le32 *)(pdu + hdr->hlen);
+ exp_digest = nvme_tcp_hdgst(pdu, pdu_len);
if (recv_digest != exp_digest) {
dev_err(queue->ctrl->ctrl.device,
"header digest error: recv %#x expected %#x\n",
@@ -526,7 +532,7 @@ static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
nvme_tcp_queue_id(queue));
return -EPROTO;
}
- crypto_ahash_init(queue->rcv_hash);
+ queue->rcv_crc = NVME_TCP_CRC_SEED;
return 0;
}
@@ -560,6 +566,8 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
req->queue = queue;
nvme_req(rq)->ctrl = &ctrl->ctrl;
nvme_req(rq)->cmd = &pdu->cmd;
+ init_llist_node(&req->lentry);
+ INIT_LIST_HEAD(&req->entry);
return 0;
}
@@ -764,13 +772,23 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
return -EPROTO;
}
+ if (llist_on_list(&req->lentry) ||
+ !list_empty(&req->entry)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "req %d unexpected r2t while processing request\n",
+ rq->tag);
+ return -EPROTO;
+ }
+
req->pdu_len = 0;
req->h2cdata_left = r2t_length;
req->h2cdata_offset = r2t_offset;
req->ttag = pdu->ttag;
nvme_tcp_setup_h2c_data_pdu(req);
- nvme_tcp_queue_request(req, false, true);
+
+ llist_add(&req->lentry, &queue->req_list);
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
return 0;
}
@@ -926,8 +944,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
iov_iter_count(&req->iter));
if (queue->data_digest)
- ret = skb_copy_and_hash_datagram_iter(skb, *offset,
- &req->iter, recv_len, queue->rcv_hash);
+ ret = skb_copy_and_crc32c_datagram_iter(skb, *offset,
+ &req->iter, recv_len, &queue->rcv_crc);
else
ret = skb_copy_datagram_iter(skb, *offset,
&req->iter, recv_len);
@@ -945,7 +963,7 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
if (!queue->data_remaining) {
if (queue->data_digest) {
- nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
+ queue->exp_ddgst = nvme_tcp_ddgst_final(queue->rcv_crc);
queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
} else {
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
@@ -1147,7 +1165,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
return ret;
if (queue->data_digest)
- nvme_tcp_ddgst_update(queue->snd_hash, page,
+ nvme_tcp_ddgst_update(&queue->snd_crc, page,
offset, ret);
/*
@@ -1161,8 +1179,8 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
/* fully successful last send in current PDU */
if (last && ret == len) {
if (queue->data_digest) {
- nvme_tcp_ddgst_final(queue->snd_hash,
- &req->ddgst);
+ req->ddgst =
+ nvme_tcp_ddgst_final(queue->snd_crc);
req->state = NVME_TCP_SEND_DDGST;
req->offset = 0;
} else {
@@ -1194,7 +1212,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
msg.msg_flags |= MSG_EOR;
if (queue->hdr_digest && !req->offset)
- nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+ nvme_tcp_set_hdgst(pdu, sizeof(*pdu));
bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
@@ -1207,7 +1225,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
if (inline_data) {
req->state = NVME_TCP_SEND_DATA;
if (queue->data_digest)
- crypto_ahash_init(queue->snd_hash);
+ queue->snd_crc = NVME_TCP_CRC_SEED;
} else {
nvme_tcp_done_send_req(queue);
}
@@ -1229,7 +1247,7 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
int ret;
if (queue->hdr_digest && !req->offset)
- nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+ nvme_tcp_set_hdgst(pdu, sizeof(*pdu));
if (!req->h2cdata_left)
msg.msg_flags |= MSG_SPLICE_PAGES;
@@ -1244,7 +1262,7 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
if (!len) {
req->state = NVME_TCP_SEND_DATA;
if (queue->data_digest)
- crypto_ahash_init(queue->snd_hash);
+ queue->snd_crc = NVME_TCP_CRC_SEED;
return 1;
}
req->offset += ret;
@@ -1348,7 +1366,7 @@ static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
queue->nr_cqe = 0;
consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
release_sock(sk);
- return consumed;
+ return consumed == -EAGAIN ? 0 : consumed;
}
static void nvme_tcp_io_work(struct work_struct *w)
@@ -1376,6 +1394,11 @@ static void nvme_tcp_io_work(struct work_struct *w)
else if (unlikely(result < 0))
return;
+ /* did we get some space after spending time in recv? */
+ if (nvme_tcp_queue_has_pending(queue) &&
+ sk_stream_is_writeable(queue->sock->sk))
+ pending = true;
+
if (!pending || !queue->rd_enabled)
return;
@@ -1384,41 +1407,6 @@ static void nvme_tcp_io_work(struct work_struct *w)
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
-static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
-
- ahash_request_free(queue->rcv_hash);
- ahash_request_free(queue->snd_hash);
- crypto_free_ahash(tfm);
-}
-
-static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
-{
- struct crypto_ahash *tfm;
-
- tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!queue->snd_hash)
- goto free_tfm;
- ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
-
- queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!queue->rcv_hash)
- goto free_snd_hash;
- ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
-
- return 0;
-free_snd_hash:
- ahash_request_free(queue->snd_hash);
-free_tfm:
- crypto_free_ahash(tfm);
- return -ENOMEM;
-}
-
static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
{
struct nvme_tcp_request *async = &ctrl->async_req;
@@ -1451,9 +1439,6 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
return;
- if (queue->hdr_digest || queue->data_digest)
- nvme_tcp_free_crypto(queue);
-
page_frag_cache_drain(&queue->pf_cache);
noreclaim_flag = memalloc_noreclaim_save();
@@ -1867,21 +1852,13 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
queue->hdr_digest = nctrl->opts->hdr_digest;
queue->data_digest = nctrl->opts->data_digest;
- if (queue->hdr_digest || queue->data_digest) {
- ret = nvme_tcp_alloc_crypto(queue);
- if (ret) {
- dev_err(nctrl->device,
- "failed to allocate queue %d crypto\n", qid);
- goto err_sock;
- }
- }
rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
nvme_tcp_hdgst_len(queue);
queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
if (!queue->pdu) {
ret = -ENOMEM;
- goto err_crypto;
+ goto err_sock;
}
dev_dbg(nctrl->device, "connecting queue %d\n",
@@ -1914,9 +1891,6 @@ err_init_connect:
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
err_rcv_pdu:
kfree(queue->pdu);
-err_crypto:
- if (queue->hdr_digest || queue->data_digest)
- nvme_tcp_free_crypto(queue);
err_sock:
/* ->sock will be released by fput() */
fput(queue->sock->file);
@@ -2385,14 +2359,14 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
if (ret)
return ret;
- if (ctrl->opts && ctrl->opts->concat && !ctrl->tls_pskid) {
+ if (ctrl->opts->concat && !ctrl->tls_pskid) {
/* See comments for nvme_tcp_key_revoke_needed() */
dev_dbg(ctrl->device, "restart admin queue for secure concatenation\n");
nvme_stop_keep_alive(ctrl);
nvme_tcp_teardown_admin_queue(ctrl, false);
ret = nvme_tcp_configure_admin_queue(ctrl, false);
if (ret)
- return ret;
+ goto destroy_admin;
}
if (ctrl->icdoff) {
@@ -2636,8 +2610,10 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
ctrl->async_req.offset = 0;
ctrl->async_req.curr_bio = NULL;
ctrl->async_req.data_len = 0;
+ init_llist_node(&ctrl->async_req.lentry);
+ INIT_LIST_HEAD(&ctrl->async_req.entry);
- nvme_tcp_queue_request(&ctrl->async_req, true, true);
+ nvme_tcp_queue_request(&ctrl->async_req, true);
}
static void nvme_tcp_complete_timed_out(struct request *rq)
@@ -2789,7 +2765,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
nvme_start_request(rq);
- nvme_tcp_queue_request(req, true, bd->last);
+ nvme_tcp_queue_request(req, bd->last);
return BLK_STS_OK;
}
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 4c253b433bf7..4904097dfd49 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -3,7 +3,7 @@
config NVME_TARGET
tristate "NVMe Target support"
depends on BLOCK
- depends on CONFIGFS_FS
+ select CONFIGFS_FS
select NVME_KEYRING if NVME_TARGET_TCP_TLS
select KEYS if NVME_TARGET_TCP_TLS
select SGL_ALLOC
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index acc138bbf8f2..3e378153a781 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -63,14 +63,9 @@ static void nvmet_execute_create_sq(struct nvmet_req *req)
if (status != NVME_SC_SUCCESS)
goto complete;
- /*
- * Note: The NVMe specification allows multiple SQs to use the same CQ.
- * However, the target code does not really support that. So for now,
- * prevent this and fail the command if sqid and cqid are different.
- */
- if (!cqid || cqid != sqid) {
- pr_err("SQ %u: Unsupported CQID %u\n", sqid, cqid);
- status = NVME_SC_CQ_INVALID | NVME_STATUS_DNR;
+ status = nvmet_check_io_cqid(ctrl, cqid, false);
+ if (status != NVME_SC_SUCCESS) {
+ pr_err("SQ %u: Invalid CQID %u\n", sqid, cqid);
goto complete;
}
@@ -79,7 +74,7 @@ static void nvmet_execute_create_sq(struct nvmet_req *req)
goto complete;
}
- status = ctrl->ops->create_sq(ctrl, sqid, sq_flags, qsize, prp1);
+ status = ctrl->ops->create_sq(ctrl, sqid, cqid, sq_flags, qsize, prp1);
complete:
nvmet_req_complete(req, status);
@@ -96,14 +91,15 @@ static void nvmet_execute_delete_cq(struct nvmet_req *req)
goto complete;
}
- if (!cqid) {
- status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ status = nvmet_check_io_cqid(ctrl, cqid, false);
+ if (status != NVME_SC_SUCCESS)
goto complete;
- }
- status = nvmet_check_cqid(ctrl, cqid);
- if (status != NVME_SC_SUCCESS)
+ if (!ctrl->cqs[cqid] || nvmet_cq_in_use(ctrl->cqs[cqid])) {
+ /* Some SQs are still using this CQ */
+ status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
goto complete;
+ }
status = ctrl->ops->delete_cq(ctrl, cqid);
@@ -127,12 +123,7 @@ static void nvmet_execute_create_cq(struct nvmet_req *req)
goto complete;
}
- if (!cqid) {
- status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
- goto complete;
- }
-
- status = nvmet_check_cqid(ctrl, cqid);
+ status = nvmet_check_io_cqid(ctrl, cqid, true);
if (status != NVME_SC_SUCCESS)
goto complete;
@@ -1174,7 +1165,7 @@ static void nvmet_execute_identify(struct nvmet_req *req)
* A "minimum viable" abort implementation: the command is mandatory in the
* spec, but we are not required to do any useful work. We couldn't really
* do a useful abort, so don't bother even with waiting for the command
- * to be exectuted and return immediately telling the command to abort
+ * to be executed and return immediately telling the command to abort
* wasn't found.
*/
static void nvmet_execute_abort(struct nvmet_req *req)
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index 9429b8218408..b340380f3892 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -280,9 +280,12 @@ void nvmet_destroy_auth(struct nvmet_ctrl *ctrl)
bool nvmet_check_auth_status(struct nvmet_req *req)
{
- if (req->sq->ctrl->host_key &&
- !req->sq->authenticated)
- return false;
+ if (req->sq->ctrl->host_key) {
+ if (req->sq->qid > 0)
+ return true;
+ if (!req->sq->authenticated)
+ return false;
+ }
return true;
}
@@ -290,7 +293,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
unsigned int shash_len)
{
struct crypto_shash *shash_tfm;
- struct shash_desc *shash;
+ SHASH_DESC_ON_STACK(shash, shash_tfm);
struct nvmet_ctrl *ctrl = req->sq->ctrl;
const char *hash_name;
u8 *challenge = req->sq->dhchap_c1;
@@ -342,19 +345,13 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
req->sq->dhchap_c1,
challenge, shash_len);
if (ret)
- goto out_free_challenge;
+ goto out;
}
pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
req->sq->dhchap_tid);
- shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
- GFP_KERNEL);
- if (!shash) {
- ret = -ENOMEM;
- goto out_free_challenge;
- }
shash->tfm = shash_tfm;
ret = crypto_shash_init(shash);
if (ret)
@@ -389,8 +386,6 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
goto out;
ret = crypto_shash_final(shash, response);
out:
- kfree(shash);
-out_free_challenge:
if (challenge != req->sq->dhchap_c1)
kfree(challenge);
out_free_response:
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 245475c43127..175c5b6d4dd5 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -62,14 +62,7 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
return NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
case -EOPNOTSUPP:
req->error_loc = offsetof(struct nvme_common_command, opcode);
- switch (req->cmd->common.opcode) {
- case nvme_cmd_dsm:
- case nvme_cmd_write_zeroes:
- return NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR;
- default:
- return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
- }
- break;
+ return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
case -ENODATA:
req->error_loc = offsetof(struct nvme_rw_command, nsid);
return NVME_SC_ACCESS_DENIED;
@@ -651,7 +644,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
* Now that we removed the namespaces from the lookup list, we
* can kill the per_cpu ref and wait for any remaining references
* to be dropped, as well as a RCU grace period for anyone only
- * using the namepace under rcu_read_lock(). Note that we can't
+ * using the namespace under rcu_read_lock(). Note that we can't
* use call_rcu here as we need to ensure the namespaces have
* been fully destroyed before unloading the module.
*/
@@ -813,11 +806,43 @@ void nvmet_req_complete(struct nvmet_req *req, u16 status)
}
EXPORT_SYMBOL_GPL(nvmet_req_complete);
+void nvmet_cq_init(struct nvmet_cq *cq)
+{
+ refcount_set(&cq->ref, 1);
+}
+EXPORT_SYMBOL_GPL(nvmet_cq_init);
+
+bool nvmet_cq_get(struct nvmet_cq *cq)
+{
+ return refcount_inc_not_zero(&cq->ref);
+}
+EXPORT_SYMBOL_GPL(nvmet_cq_get);
+
+void nvmet_cq_put(struct nvmet_cq *cq)
+{
+ if (refcount_dec_and_test(&cq->ref))
+ nvmet_cq_destroy(cq);
+}
+EXPORT_SYMBOL_GPL(nvmet_cq_put);
+
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
u16 qid, u16 size)
{
cq->qid = qid;
cq->size = size;
+
+ ctrl->cqs[qid] = cq;
+}
+
+void nvmet_cq_destroy(struct nvmet_cq *cq)
+{
+ struct nvmet_ctrl *ctrl = cq->ctrl;
+
+ if (ctrl) {
+ ctrl->cqs[cq->qid] = NULL;
+ nvmet_ctrl_put(cq->ctrl);
+ cq->ctrl = NULL;
+ }
}
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
@@ -837,37 +862,47 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)
complete(&sq->confirm_done);
}
-u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid)
+u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create)
{
- if (!ctrl->sqs)
+ if (!ctrl->cqs)
return NVME_SC_INTERNAL | NVME_STATUS_DNR;
if (cqid > ctrl->subsys->max_qid)
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
- /*
- * Note: For PCI controllers, the NVMe specifications allows multiple
- * SQs to share a single CQ. However, we do not support this yet, so
- * check that there is no SQ defined for a CQ. If one exist, then the
- * CQ ID is invalid for creation as well as when the CQ is being
- * deleted (as that would mean that the SQ was not deleted before the
- * CQ).
- */
- if (ctrl->sqs[cqid])
+ if ((create && ctrl->cqs[cqid]) || (!create && !ctrl->cqs[cqid]))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
return NVME_SC_SUCCESS;
}
+u16 nvmet_check_io_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create)
+{
+ if (!cqid)
+ return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
+ return nvmet_check_cqid(ctrl, cqid, create);
+}
+
+bool nvmet_cq_in_use(struct nvmet_cq *cq)
+{
+ return refcount_read(&cq->ref) > 1;
+}
+EXPORT_SYMBOL_GPL(nvmet_cq_in_use);
+
u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
u16 qid, u16 size)
{
u16 status;
- status = nvmet_check_cqid(ctrl, qid);
+ status = nvmet_check_cqid(ctrl, qid, true);
if (status != NVME_SC_SUCCESS)
return status;
+ if (!kref_get_unless_zero(&ctrl->ref))
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ cq->ctrl = ctrl;
+
+ nvmet_cq_init(cq);
nvmet_cq_setup(ctrl, cq, qid, size);
return NVME_SC_SUCCESS;
@@ -891,7 +926,7 @@ u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid,
}
u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
- u16 sqid, u16 size)
+ struct nvmet_cq *cq, u16 sqid, u16 size)
{
u16 status;
int ret;
@@ -903,7 +938,7 @@ u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
if (status != NVME_SC_SUCCESS)
return status;
- ret = nvmet_sq_init(sq);
+ ret = nvmet_sq_init(sq, cq);
if (ret) {
status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
goto ctrl_put;
@@ -935,6 +970,7 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
wait_for_completion(&sq->free_done);
percpu_ref_exit(&sq->ref);
nvmet_auth_sq_free(sq);
+ nvmet_cq_put(sq->cq);
/*
* we must reference the ctrl again after waiting for inflight IO
@@ -967,18 +1003,23 @@ static void nvmet_sq_free(struct percpu_ref *ref)
complete(&sq->free_done);
}
-int nvmet_sq_init(struct nvmet_sq *sq)
+int nvmet_sq_init(struct nvmet_sq *sq, struct nvmet_cq *cq)
{
int ret;
+ if (!nvmet_cq_get(cq))
+ return -EINVAL;
+
ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
if (ret) {
pr_err("percpu_ref init failed!\n");
+ nvmet_cq_put(cq);
return ret;
}
init_completion(&sq->free_done);
init_completion(&sq->confirm_done);
nvmet_auth_sq_init(sq);
+ sq->cq = cq;
return 0;
}
@@ -1108,13 +1149,13 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
return ret;
}
-bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
- struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
+bool nvmet_req_init(struct nvmet_req *req, struct nvmet_sq *sq,
+ const struct nvmet_fabrics_ops *ops)
{
u8 flags = req->cmd->common.flags;
u16 status;
- req->cq = cq;
+ req->cq = sq->cq;
req->sq = sq;
req->ops = ops;
req->sg = NULL;
@@ -1612,12 +1653,17 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
if (!ctrl->sqs)
goto out_free_changed_ns_list;
+ ctrl->cqs = kcalloc(subsys->max_qid + 1, sizeof(struct nvmet_cq *),
+ GFP_KERNEL);
+ if (!ctrl->cqs)
+ goto out_free_sqs;
+
ret = ida_alloc_range(&cntlid_ida,
subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL);
if (ret < 0) {
args->status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
- goto out_free_sqs;
+ goto out_free_cqs;
}
ctrl->cntlid = ret;
@@ -1676,6 +1722,8 @@ init_pr_fail:
mutex_unlock(&subsys->lock);
nvmet_stop_keep_alive_timer(ctrl);
ida_free(&cntlid_ida, ctrl->cntlid);
+out_free_cqs:
+ kfree(ctrl->cqs);
out_free_sqs:
kfree(ctrl->sqs);
out_free_changed_ns_list:
@@ -1712,6 +1760,7 @@ static void nvmet_ctrl_free(struct kref *ref)
nvmet_async_events_free(ctrl);
kfree(ctrl->sqs);
+ kfree(ctrl->cqs);
kfree(ctrl->changed_ns_list);
kfree(ctrl);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index df7207640506..c06f3e04296c 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -119,7 +119,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE);
memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
- strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
+ strscpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
}
/*
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index f012bdf89850..7b8d8b397802 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -208,6 +208,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
}
+ kref_get(&ctrl->ref);
+ old = cmpxchg(&req->cq->ctrl, NULL, ctrl);
+ if (old) {
+ pr_warn("queue already connected!\n");
+ req->error_loc = offsetof(struct nvmf_connect_command, opcode);
+ return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
+ }
+
/* note: convert queue size from 0's-based value to 1's-based value */
nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
@@ -239,8 +247,8 @@ static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq)
bool needs_auth = nvmet_has_auth(ctrl, sq);
key_serial_t keyid = nvmet_queue_tls_keyid(sq);
- /* Do not authenticate I/O queues for secure concatenation */
- if (ctrl->concat && sq->qid)
+ /* Do not authenticate I/O queues */
+ if (sq->qid)
needs_auth = false;
if (keyid)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 7b50130f10f6..25598a46bf0d 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -816,7 +816,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
- ret = nvmet_sq_init(&queue->nvme_sq);
+ nvmet_cq_init(&queue->nvme_cq);
+ ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq);
if (ret)
goto out_fail_iodlist;
@@ -826,6 +827,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
return queue;
out_fail_iodlist:
+ nvmet_cq_put(&queue->nvme_cq);
nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
destroy_workqueue(queue->work_q);
out_free_queue:
@@ -934,6 +936,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
flush_workqueue(queue->work_q);
nvmet_sq_destroy(&queue->nvme_sq);
+ nvmet_cq_put(&queue->nvme_cq);
nvmet_fc_tgt_q_put(queue);
}
@@ -1254,6 +1257,7 @@ nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
{
lockdep_assert_held(&nvmet_fc_tgtlock);
+ nvmet_fc_tgtport_get(tgtport);
pe->tgtport = tgtport;
tgtport->pe = pe;
@@ -1273,8 +1277,10 @@ nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
unsigned long flags;
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
- if (pe->tgtport)
+ if (pe->tgtport) {
+ nvmet_fc_tgtport_put(pe->tgtport);
pe->tgtport->pe = NULL;
+ }
list_del(&pe->pe_list);
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
}
@@ -1292,8 +1298,10 @@ nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
pe = tgtport->pe;
- if (pe)
+ if (pe) {
+ nvmet_fc_tgtport_put(pe->tgtport);
pe->tgtport = NULL;
+ }
tgtport->pe = NULL;
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
}
@@ -1316,6 +1324,9 @@ nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
if (tgtport->fc_target_port.node_name == pe->node_name &&
tgtport->fc_target_port.port_name == pe->port_name) {
+ if (!nvmet_fc_tgtport_get(tgtport))
+ continue;
+
WARN_ON(pe->tgtport);
tgtport->pe = pe;
pe->tgtport = tgtport;
@@ -1328,7 +1339,7 @@ nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
/**
* nvmet_fc_register_targetport - transport entry point called by an
* LLDD to register the existence of a local
- * NVME subystem FC port.
+ * NVME subsystem FC port.
* @pinfo: pointer to information about the port to be registered
* @template: LLDD entrypoints and operational parameters for the port
* @dev: physical hardware device node port corresponds to. Will be
@@ -1580,6 +1591,39 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
}
+static void
+nvmet_fc_free_pending_reqs(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_ls_req_op *lsop;
+ struct nvmefc_ls_req *lsreq;
+ struct nvmet_fc_ls_iod *iod;
+ int i;
+
+ iod = tgtport->iod;
+ for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++)
+ cancel_work(&iod->work);
+
+ /*
+ * After this point the connection is lost and thus any pending
+ * request can't be processed by the normal completion path. This
+ * is likely a request from nvmet_fc_send_ls_req_async.
+ */
+ while ((lsop = list_first_entry_or_null(&tgtport->ls_req_list,
+ struct nvmet_fc_ls_req_op, lsreq_list))) {
+ list_del(&lsop->lsreq_list);
+
+ if (!lsop->req_queued)
+ continue;
+
+ lsreq = &lsop->ls_req;
+ fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
+ (lsreq->rqstlen + lsreq->rsplen),
+ DMA_BIDIRECTIONAL);
+ nvmet_fc_tgtport_put(tgtport);
+ kfree(lsop);
+ }
+}
+
/**
* nvmet_fc_unregister_targetport - transport entry point called by an
* LLDD to deregister/remove a previously
@@ -1608,13 +1652,7 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
flush_workqueue(nvmet_wq);
- /*
- * should terminate LS's as well. However, LS's will be generated
- * at the tail end of association termination, so they likely don't
- * exist yet. And even if they did, it's worthwhile to just let
- * them finish and targetport ref counting will clean things up.
- */
-
+ nvmet_fc_free_pending_reqs(tgtport);
nvmet_fc_tgtport_put(tgtport);
return 0;
@@ -2531,10 +2569,8 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->data_sg = NULL;
fod->data_sg_cnt = 0;
- ret = nvmet_req_init(&fod->req,
- &fod->queue->nvme_cq,
- &fod->queue->nvme_sq,
- &nvmet_fc_tgt_fcp_ops);
+ ret = nvmet_req_init(&fod->req, &fod->queue->nvme_sq,
+ &nvmet_fc_tgt_fcp_ops);
if (!ret) {
/* bad SQE content or invalid ctrl state */
/* nvmet layer has already called op done to send rsp. */
@@ -2860,12 +2896,17 @@ nvmet_fc_add_port(struct nvmet_port *port)
list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
if ((tgtport->fc_target_port.node_name == traddr.nn) &&
(tgtport->fc_target_port.port_name == traddr.pn)) {
+ if (!nvmet_fc_tgtport_get(tgtport))
+ continue;
+
/* a FC port can only be 1 nvmet port id */
if (!tgtport->pe) {
nvmet_fc_portentry_bind(tgtport, pe, port);
ret = 0;
} else
ret = -EALREADY;
+
+ nvmet_fc_tgtport_put(tgtport);
break;
}
}
@@ -2881,11 +2922,21 @@ static void
nvmet_fc_remove_port(struct nvmet_port *port)
{
struct nvmet_fc_port_entry *pe = port->priv;
+ struct nvmet_fc_tgtport *tgtport = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport))
+ tgtport = pe->tgtport;
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
nvmet_fc_portentry_unbind(pe);
- /* terminate any outstanding associations */
- __nvmet_fc_free_assocs(pe->tgtport);
+ if (tgtport) {
+ /* terminate any outstanding associations */
+ __nvmet_fc_free_assocs(tgtport);
+ nvmet_fc_tgtport_put(tgtport);
+ }
kfree(pe);
}
@@ -2894,10 +2945,21 @@ static void
nvmet_fc_discovery_chg(struct nvmet_port *port)
{
struct nvmet_fc_port_entry *pe = port->priv;
- struct nvmet_fc_tgtport *tgtport = pe->tgtport;
+ struct nvmet_fc_tgtport *tgtport = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport))
+ tgtport = pe->tgtport;
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+ if (!tgtport)
+ return;
if (tgtport && tgtport->ops->discovery_event)
tgtport->ops->discovery_event(&tgtport->fc_target_port);
+
+ nvmet_fc_tgtport_put(tgtport);
}
static ssize_t
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 641201e62c1b..257b497d515a 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -207,7 +207,6 @@ static LIST_HEAD(fcloop_nports);
struct fcloop_lport {
struct nvme_fc_local_port *localport;
struct list_head lport_list;
- struct completion unreg_done;
refcount_t ref;
};
@@ -215,6 +214,9 @@ struct fcloop_lport_priv {
struct fcloop_lport *lport;
};
+/* The port is already being removed, avoid double free */
+#define PORT_DELETED 0
+
struct fcloop_rport {
struct nvme_fc_remote_port *remoteport;
struct nvmet_fc_target_port *targetport;
@@ -223,6 +225,7 @@ struct fcloop_rport {
spinlock_t lock;
struct list_head ls_list;
struct work_struct ls_work;
+ unsigned long flags;
};
struct fcloop_tport {
@@ -233,6 +236,7 @@ struct fcloop_tport {
spinlock_t lock;
struct list_head ls_list;
struct work_struct ls_work;
+ unsigned long flags;
};
struct fcloop_nport {
@@ -288,6 +292,9 @@ struct fcloop_ini_fcpreq {
spinlock_t inilock;
};
+/* SLAB cache for fcloop_lsreq structures */
+static struct kmem_cache *lsreq_cache;
+
static inline struct fcloop_lsreq *
ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
{
@@ -338,6 +345,7 @@ fcloop_rport_lsrqst_work(struct work_struct *work)
* callee may free memory containing tls_req.
* do not reference lsreq after this.
*/
+ kmem_cache_free(lsreq_cache, tls_req);
spin_lock(&rport->lock);
}
@@ -349,10 +357,13 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *remoteport,
struct nvmefc_ls_req *lsreq)
{
- struct fcloop_lsreq *tls_req = lsreq->private;
struct fcloop_rport *rport = remoteport->private;
+ struct fcloop_lsreq *tls_req;
int ret = 0;
+ tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL);
+ if (!tls_req)
+ return -ENOMEM;
tls_req->lsreq = lsreq;
INIT_LIST_HEAD(&tls_req->ls_list);
@@ -389,14 +400,17 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
lsrsp->done(lsrsp);
- if (remoteport) {
- rport = remoteport->private;
- spin_lock(&rport->lock);
- list_add_tail(&tls_req->ls_list, &rport->ls_list);
- spin_unlock(&rport->lock);
- queue_work(nvmet_wq, &rport->ls_work);
+ if (!remoteport) {
+ kmem_cache_free(lsreq_cache, tls_req);
+ return 0;
}
+ rport = remoteport->private;
+ spin_lock(&rport->lock);
+ list_add_tail(&tls_req->ls_list, &rport->ls_list);
+ spin_unlock(&rport->lock);
+ queue_work(nvmet_wq, &rport->ls_work);
+
return 0;
}
@@ -422,6 +436,7 @@ fcloop_tport_lsrqst_work(struct work_struct *work)
* callee may free memory containing tls_req.
* do not reference lsreq after this.
*/
+ kmem_cache_free(lsreq_cache, tls_req);
spin_lock(&tport->lock);
}
@@ -432,8 +447,8 @@ static int
fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
struct nvmefc_ls_req *lsreq)
{
- struct fcloop_lsreq *tls_req = lsreq->private;
struct fcloop_tport *tport = targetport->private;
+ struct fcloop_lsreq *tls_req;
int ret = 0;
/*
@@ -441,6 +456,10 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
* hosthandle ignored as fcloop currently is
* 1:1 tgtport vs remoteport
*/
+
+ tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL);
+ if (!tls_req)
+ return -ENOMEM;
tls_req->lsreq = lsreq;
INIT_LIST_HEAD(&tls_req->ls_list);
@@ -457,6 +476,9 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
lsreq->rqstaddr, lsreq->rqstlen);
+ if (ret)
+ kmem_cache_free(lsreq_cache, tls_req);
+
return ret;
}
@@ -471,18 +493,30 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
struct nvmet_fc_target_port *targetport = rport->targetport;
struct fcloop_tport *tport;
+ if (!targetport) {
+ /*
+ * The target port is gone. The target doesn't expect any
+ * response anymore and the ->done call is not valid
+ * because the resources have been freed by
+ * nvmet_fc_free_pending_reqs.
+ *
+ * We end up here from delete association exchange:
+ * nvmet_fc_xmt_disconnect_assoc sends an async request.
+ */
+ kmem_cache_free(lsreq_cache, tls_req);
+ return 0;
+ }
+
memcpy(lsreq->rspaddr, lsrsp->rspbuf,
((lsreq->rsplen < lsrsp->rsplen) ?
lsreq->rsplen : lsrsp->rsplen));
lsrsp->done(lsrsp);
- if (targetport) {
- tport = targetport->private;
- spin_lock(&tport->lock);
- list_add_tail(&tls_req->ls_list, &tport->ls_list);
- spin_unlock(&tport->lock);
- queue_work(nvmet_wq, &tport->ls_work);
- }
+ tport = targetport->private;
+ spin_lock(&tport->lock);
+ list_add_tail(&tls_req->ls_list, &tport->ls_list);
+ spin_unlock(&tport->lock);
+ queue_work(nvmet_wq, &tport->ls_work);
return 0;
}
@@ -566,7 +600,8 @@ fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
}
/* release original io reference on tgt struct */
- fcloop_tfcp_req_put(tfcp_req);
+ if (tfcp_req)
+ fcloop_tfcp_req_put(tfcp_req);
}
static bool drop_fabric_opcode;
@@ -618,12 +653,13 @@ fcloop_fcp_recv_work(struct work_struct *work)
{
struct fcloop_fcpreq *tfcp_req =
container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
- struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+ struct nvmefc_fcp_req *fcpreq;
unsigned long flags;
int ret = 0;
bool aborted = false;
spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ fcpreq = tfcp_req->fcpreq;
switch (tfcp_req->inistate) {
case INI_IO_START:
tfcp_req->inistate = INI_IO_ACTIVE;
@@ -638,16 +674,19 @@ fcloop_fcp_recv_work(struct work_struct *work)
}
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
- if (unlikely(aborted))
- ret = -ECANCELED;
- else {
- if (likely(!check_for_drop(tfcp_req)))
- ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
- &tfcp_req->tgt_fcp_req,
- fcpreq->cmdaddr, fcpreq->cmdlen);
- else
- pr_info("%s: dropped command ********\n", __func__);
+ if (unlikely(aborted)) {
+ /* the abort handler will call fcloop_call_host_done */
+ return;
+ }
+
+ if (unlikely(check_for_drop(tfcp_req))) {
+ pr_info("%s: dropped command ********\n", __func__);
+ return;
}
+
+ ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
+ &tfcp_req->tgt_fcp_req,
+ fcpreq->cmdaddr, fcpreq->cmdlen);
if (ret)
fcloop_call_host_done(fcpreq, tfcp_req, ret);
}
@@ -662,15 +701,17 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
unsigned long flags;
spin_lock_irqsave(&tfcp_req->reqlock, flags);
- fcpreq = tfcp_req->fcpreq;
switch (tfcp_req->inistate) {
case INI_IO_ABORTED:
+ fcpreq = tfcp_req->fcpreq;
+ tfcp_req->fcpreq = NULL;
break;
case INI_IO_COMPLETED:
completed = true;
break;
default:
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ fcloop_tfcp_req_put(tfcp_req);
WARN_ON(1);
return;
}
@@ -686,10 +727,6 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
&tfcp_req->tgt_fcp_req);
- spin_lock_irqsave(&tfcp_req->reqlock, flags);
- tfcp_req->fcpreq = NULL;
- spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
-
fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
/* call_host_done releases reference for abort downcall */
}
@@ -958,13 +995,16 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
spin_lock(&inireq->inilock);
tfcp_req = inireq->tfcp_req;
- if (tfcp_req)
- fcloop_tfcp_req_get(tfcp_req);
+ if (tfcp_req) {
+ if (!fcloop_tfcp_req_get(tfcp_req))
+ tfcp_req = NULL;
+ }
spin_unlock(&inireq->inilock);
- if (!tfcp_req)
+ if (!tfcp_req) {
/* abort has already been called */
- return;
+ goto out_host_done;
+ }
/* break initiator/target relationship for io */
spin_lock_irqsave(&tfcp_req->reqlock, flags);
@@ -979,7 +1019,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
default:
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
WARN_ON(1);
- return;
+ goto out_host_done;
}
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
@@ -993,6 +1033,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
*/
fcloop_tfcp_req_put(tfcp_req);
}
+
+ return;
+
+out_host_done:
+ fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
}
static void
@@ -1019,9 +1064,18 @@ fcloop_lport_get(struct fcloop_lport *lport)
static void
fcloop_nport_put(struct fcloop_nport *nport)
{
+ unsigned long flags;
+
if (!refcount_dec_and_test(&nport->ref))
return;
+ spin_lock_irqsave(&fcloop_lock, flags);
+ list_del(&nport->nport_list);
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ if (nport->lport)
+ fcloop_lport_put(nport->lport);
+
kfree(nport);
}
@@ -1037,9 +1091,6 @@ fcloop_localport_delete(struct nvme_fc_local_port *localport)
struct fcloop_lport_priv *lport_priv = localport->private;
struct fcloop_lport *lport = lport_priv->lport;
- /* release any threads waiting for the unreg to complete */
- complete(&lport->unreg_done);
-
fcloop_lport_put(lport);
}
@@ -1047,18 +1098,38 @@ static void
fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
{
struct fcloop_rport *rport = remoteport->private;
+ bool put_port = false;
+ unsigned long flags;
flush_work(&rport->ls_work);
- fcloop_nport_put(rport->nport);
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ if (!test_and_set_bit(PORT_DELETED, &rport->flags))
+ put_port = true;
+ rport->nport->rport = NULL;
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ if (put_port)
+ fcloop_nport_put(rport->nport);
}
static void
fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
{
struct fcloop_tport *tport = targetport->private;
+ bool put_port = false;
+ unsigned long flags;
flush_work(&tport->ls_work);
- fcloop_nport_put(tport->nport);
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ if (!test_and_set_bit(PORT_DELETED, &tport->flags))
+ put_port = true;
+ tport->nport->tport = NULL;
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ if (put_port)
+ fcloop_nport_put(tport->nport);
}
#define FCLOOP_HW_QUEUES 4
@@ -1082,7 +1153,6 @@ static struct nvme_fc_port_template fctemplate = {
/* sizes of additional private data for data structures */
.local_priv_sz = sizeof(struct fcloop_lport_priv),
.remote_priv_sz = sizeof(struct fcloop_rport),
- .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
.fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
};
@@ -1105,7 +1175,6 @@ static struct nvmet_fc_target_template tgttemplate = {
.target_features = 0,
/* sizes of additional private data for data structures */
.target_priv_sz = sizeof(struct fcloop_tport),
- .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
};
static ssize_t
@@ -1170,51 +1239,92 @@ out_free_lport:
}
static int
-__wait_localport_unreg(struct fcloop_lport *lport)
+__localport_unreg(struct fcloop_lport *lport)
{
- int ret;
+ return nvme_fc_unregister_localport(lport->localport);
+}
- init_completion(&lport->unreg_done);
+static struct fcloop_nport *
+__fcloop_nport_lookup(u64 node_name, u64 port_name)
+{
+ struct fcloop_nport *nport;
- ret = nvme_fc_unregister_localport(lport->localport);
+ list_for_each_entry(nport, &fcloop_nports, nport_list) {
+ if (nport->node_name != node_name ||
+ nport->port_name != port_name)
+ continue;
- if (!ret)
- wait_for_completion(&lport->unreg_done);
+ if (fcloop_nport_get(nport))
+ return nport;
- return ret;
+ break;
+ }
+
+ return NULL;
}
+static struct fcloop_nport *
+fcloop_nport_lookup(u64 node_name, u64 port_name)
+{
+ struct fcloop_nport *nport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ nport = __fcloop_nport_lookup(node_name, port_name);
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ return nport;
+}
+
+static struct fcloop_lport *
+__fcloop_lport_lookup(u64 node_name, u64 port_name)
+{
+ struct fcloop_lport *lport;
+
+ list_for_each_entry(lport, &fcloop_lports, lport_list) {
+ if (lport->localport->node_name != node_name ||
+ lport->localport->port_name != port_name)
+ continue;
+
+ if (fcloop_lport_get(lport))
+ return lport;
+
+ break;
+ }
+
+ return NULL;
+}
+
+static struct fcloop_lport *
+fcloop_lport_lookup(u64 node_name, u64 port_name)
+{
+ struct fcloop_lport *lport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ lport = __fcloop_lport_lookup(node_name, port_name);
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ return lport;
+}
static ssize_t
fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct fcloop_lport *tlport, *lport = NULL;
+ struct fcloop_lport *lport;
u64 nodename, portname;
- unsigned long flags;
int ret;
ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
if (ret)
return ret;
- spin_lock_irqsave(&fcloop_lock, flags);
-
- list_for_each_entry(tlport, &fcloop_lports, lport_list) {
- if (tlport->localport->node_name == nodename &&
- tlport->localport->port_name == portname) {
- if (!fcloop_lport_get(tlport))
- break;
- lport = tlport;
- break;
- }
- }
- spin_unlock_irqrestore(&fcloop_lock, flags);
-
+ lport = fcloop_lport_lookup(nodename, portname);
if (!lport)
return -ENOENT;
- ret = __wait_localport_unreg(lport);
+ ret = __localport_unreg(lport);
fcloop_lport_put(lport);
return ret ? ret : count;
@@ -1223,8 +1333,8 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
static struct fcloop_nport *
fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
{
- struct fcloop_nport *newnport, *nport = NULL;
- struct fcloop_lport *tmplport, *lport = NULL;
+ struct fcloop_nport *newnport, *nport;
+ struct fcloop_lport *lport;
struct fcloop_ctrl_options *opts;
unsigned long flags;
u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
@@ -1239,10 +1349,8 @@ fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
goto out_free_opts;
/* everything there ? */
- if ((opts->mask & opts_mask) != opts_mask) {
- ret = -EINVAL;
+ if ((opts->mask & opts_mask) != opts_mask)
goto out_free_opts;
- }
newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
if (!newnport)
@@ -1258,60 +1366,61 @@ fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
refcount_set(&newnport->ref, 1);
spin_lock_irqsave(&fcloop_lock, flags);
-
- list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
- if (tmplport->localport->node_name == opts->wwnn &&
- tmplport->localport->port_name == opts->wwpn)
- goto out_invalid_opts;
-
- if (tmplport->localport->node_name == opts->lpwwnn &&
- tmplport->localport->port_name == opts->lpwwpn)
- lport = tmplport;
+ lport = __fcloop_lport_lookup(opts->wwnn, opts->wwpn);
+ if (lport) {
+ /* invalid configuration */
+ fcloop_lport_put(lport);
+ goto out_free_newnport;
}
if (remoteport) {
- if (!lport)
- goto out_invalid_opts;
- newnport->lport = lport;
- }
-
- list_for_each_entry(nport, &fcloop_nports, nport_list) {
- if (nport->node_name == opts->wwnn &&
- nport->port_name == opts->wwpn) {
- if ((remoteport && nport->rport) ||
- (!remoteport && nport->tport)) {
- nport = NULL;
- goto out_invalid_opts;
- }
-
- fcloop_nport_get(nport);
-
- spin_unlock_irqrestore(&fcloop_lock, flags);
-
- if (remoteport)
- nport->lport = lport;
- if (opts->mask & NVMF_OPT_ROLES)
- nport->port_role = opts->roles;
- if (opts->mask & NVMF_OPT_FCADDR)
- nport->port_id = opts->fcaddr;
+ lport = __fcloop_lport_lookup(opts->lpwwnn, opts->lpwwpn);
+ if (!lport) {
+ /* invalid configuration */
goto out_free_newnport;
}
}
- list_add_tail(&newnport->nport_list, &fcloop_nports);
+ nport = __fcloop_nport_lookup(opts->wwnn, opts->wwpn);
+ if (nport) {
+ if ((remoteport && nport->rport) ||
+ (!remoteport && nport->tport)) {
+ /* invalid configuration */
+ goto out_put_nport;
+ }
+
+ /* found existing nport, discard the new nport */
+ kfree(newnport);
+ } else {
+ list_add_tail(&newnport->nport_list, &fcloop_nports);
+ nport = newnport;
+ }
+ if (opts->mask & NVMF_OPT_ROLES)
+ nport->port_role = opts->roles;
+ if (opts->mask & NVMF_OPT_FCADDR)
+ nport->port_id = opts->fcaddr;
+ if (lport) {
+ if (!nport->lport)
+ nport->lport = lport;
+ else
+ fcloop_lport_put(lport);
+ }
spin_unlock_irqrestore(&fcloop_lock, flags);
kfree(opts);
- return newnport;
+ return nport;
-out_invalid_opts:
- spin_unlock_irqrestore(&fcloop_lock, flags);
+out_put_nport:
+ if (lport)
+ fcloop_lport_put(lport);
+ fcloop_nport_put(nport);
out_free_newnport:
+ spin_unlock_irqrestore(&fcloop_lock, flags);
kfree(newnport);
out_free_opts:
kfree(opts);
- return nport;
+ return NULL;
}
static ssize_t
@@ -1352,6 +1461,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
rport->nport = nport;
rport->lport = nport->lport;
nport->rport = rport;
+ rport->flags = 0;
spin_lock_init(&rport->lock);
INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
INIT_LIST_HEAD(&rport->ls_list);
@@ -1365,21 +1475,18 @@ __unlink_remote_port(struct fcloop_nport *nport)
{
struct fcloop_rport *rport = nport->rport;
+ lockdep_assert_held(&fcloop_lock);
+
if (rport && nport->tport)
nport->tport->remoteport = NULL;
nport->rport = NULL;
- list_del(&nport->nport_list);
-
return rport;
}
static int
__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
{
- if (!rport)
- return -EALREADY;
-
return nvme_fc_unregister_remoteport(rport->remoteport);
}
@@ -1387,8 +1494,8 @@ static ssize_t
fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct fcloop_nport *nport = NULL, *tmpport;
- static struct fcloop_rport *rport;
+ struct fcloop_nport *nport;
+ struct fcloop_rport *rport;
u64 nodename, portname;
unsigned long flags;
int ret;
@@ -1397,24 +1504,24 @@ fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- spin_lock_irqsave(&fcloop_lock, flags);
-
- list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
- if (tmpport->node_name == nodename &&
- tmpport->port_name == portname && tmpport->rport) {
- nport = tmpport;
- rport = __unlink_remote_port(nport);
- break;
- }
- }
+ nport = fcloop_nport_lookup(nodename, portname);
+ if (!nport)
+ return -ENOENT;
+ spin_lock_irqsave(&fcloop_lock, flags);
+ rport = __unlink_remote_port(nport);
spin_unlock_irqrestore(&fcloop_lock, flags);
- if (!nport)
- return -ENOENT;
+ if (!rport) {
+ ret = -ENOENT;
+ goto out_nport_put;
+ }
ret = __remoteport_unreg(nport, rport);
+out_nport_put:
+ fcloop_nport_put(nport);
+
return ret ? ret : count;
}
@@ -1452,6 +1559,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
tport->nport = nport;
tport->lport = nport->lport;
nport->tport = tport;
+ tport->flags = 0;
spin_lock_init(&tport->lock);
INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
INIT_LIST_HEAD(&tport->ls_list);
@@ -1465,6 +1573,8 @@ __unlink_target_port(struct fcloop_nport *nport)
{
struct fcloop_tport *tport = nport->tport;
+ lockdep_assert_held(&fcloop_lock);
+
if (tport && nport->rport)
nport->rport->targetport = NULL;
nport->tport = NULL;
@@ -1475,9 +1585,6 @@ __unlink_target_port(struct fcloop_nport *nport)
static int
__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
{
- if (!tport)
- return -EALREADY;
-
return nvmet_fc_unregister_targetport(tport->targetport);
}
@@ -1485,8 +1592,8 @@ static ssize_t
fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct fcloop_nport *nport = NULL, *tmpport;
- struct fcloop_tport *tport = NULL;
+ struct fcloop_nport *nport;
+ struct fcloop_tport *tport;
u64 nodename, portname;
unsigned long flags;
int ret;
@@ -1495,24 +1602,24 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- spin_lock_irqsave(&fcloop_lock, flags);
-
- list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
- if (tmpport->node_name == nodename &&
- tmpport->port_name == portname && tmpport->tport) {
- nport = tmpport;
- tport = __unlink_target_port(nport);
- break;
- }
- }
+ nport = fcloop_nport_lookup(nodename, portname);
+ if (!nport)
+ return -ENOENT;
+ spin_lock_irqsave(&fcloop_lock, flags);
+ tport = __unlink_target_port(nport);
spin_unlock_irqrestore(&fcloop_lock, flags);
- if (!nport)
- return -ENOENT;
+ if (!tport) {
+ ret = -ENOENT;
+ goto out_nport_put;
+ }
ret = __targetport_unreg(nport, tport);
+out_nport_put:
+ fcloop_nport_put(nport);
+
return ret ? ret : count;
}
@@ -1578,15 +1685,20 @@ static const struct class fcloop_class = {
};
static struct device *fcloop_device;
-
static int __init fcloop_init(void)
{
int ret;
+ lsreq_cache = kmem_cache_create("lsreq_cache",
+ sizeof(struct fcloop_lsreq), 0,
+ 0, NULL);
+ if (!lsreq_cache)
+ return -ENOMEM;
+
ret = class_register(&fcloop_class);
if (ret) {
pr_err("couldn't register class fcloop\n");
- return ret;
+ goto out_destroy_cache;
}
fcloop_device = device_create_with_groups(
@@ -1604,13 +1716,15 @@ static int __init fcloop_init(void)
out_destroy_class:
class_unregister(&fcloop_class);
+out_destroy_cache:
+ kmem_cache_destroy(lsreq_cache);
return ret;
}
static void __exit fcloop_exit(void)
{
- struct fcloop_lport *lport = NULL;
- struct fcloop_nport *nport = NULL;
+ struct fcloop_lport *lport;
+ struct fcloop_nport *nport;
struct fcloop_tport *tport;
struct fcloop_rport *rport;
unsigned long flags;
@@ -1621,7 +1735,7 @@ static void __exit fcloop_exit(void)
for (;;) {
nport = list_first_entry_or_null(&fcloop_nports,
typeof(*nport), nport_list);
- if (!nport)
+ if (!nport || !fcloop_nport_get(nport))
break;
tport = __unlink_target_port(nport);
@@ -1629,13 +1743,21 @@ static void __exit fcloop_exit(void)
spin_unlock_irqrestore(&fcloop_lock, flags);
- ret = __targetport_unreg(nport, tport);
- if (ret)
- pr_warn("%s: Failed deleting target port\n", __func__);
+ if (tport) {
+ ret = __targetport_unreg(nport, tport);
+ if (ret)
+ pr_warn("%s: Failed deleting target port\n",
+ __func__);
+ }
- ret = __remoteport_unreg(nport, rport);
- if (ret)
- pr_warn("%s: Failed deleting remote port\n", __func__);
+ if (rport) {
+ ret = __remoteport_unreg(nport, rport);
+ if (ret)
+ pr_warn("%s: Failed deleting remote port\n",
+ __func__);
+ }
+
+ fcloop_nport_put(nport);
spin_lock_irqsave(&fcloop_lock, flags);
}
@@ -1648,7 +1770,7 @@ static void __exit fcloop_exit(void)
spin_unlock_irqrestore(&fcloop_lock, flags);
- ret = __wait_localport_unreg(lport);
+ ret = __localport_unreg(lport);
if (ret)
pr_warn("%s: Failed deleting local port\n", __func__);
@@ -1663,6 +1785,7 @@ static void __exit fcloop_exit(void)
device_destroy(&fcloop_class, MKDEV(0, 0));
class_unregister(&fcloop_class);
+ kmem_cache_destroy(lsreq_cache);
}
module_init(fcloop_init);
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 83be0657e6df..eba42df2f821 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -133,7 +133,7 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
* Right now there exists M : 1 mapping between block layer error
* to the NVMe status code (see nvme_error_status()). For consistency,
* when we reverse map we use most appropriate NVMe Status code from
- * the group of the NVMe staus codes used in the nvme_error_status().
+ * the group of the NVMe status codes used in the nvme_error_status().
*/
switch (blk_sts) {
case BLK_STS_NOSPC:
@@ -145,15 +145,8 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
req->error_loc = offsetof(struct nvme_rw_command, slba);
break;
case BLK_STS_NOTSUPP:
+ status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_common_command, opcode);
- switch (req->cmd->common.opcode) {
- case nvme_cmd_dsm:
- case nvme_cmd_write_zeroes:
- status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR;
- break;
- default:
- status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
- }
break;
case BLK_STS_MEDIUM:
status = NVME_SC_ACCESS_DENIED;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index a5c41144667c..f85a8441bcc6 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -33,10 +33,12 @@ struct nvme_loop_ctrl {
struct list_head list;
struct blk_mq_tag_set tag_set;
- struct nvme_loop_iod async_event_iod;
struct nvme_ctrl ctrl;
struct nvmet_port *port;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct nvme_loop_iod async_event_iod;
};
static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
@@ -148,8 +150,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
nvme_start_request(req);
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
iod->req.port = queue->ctrl->port;
- if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
- &queue->nvme_sq, &nvme_loop_ops))
+ if (!nvmet_req_init(&iod->req, &queue->nvme_sq, &nvme_loop_ops))
return BLK_STS_OK;
if (blk_rq_nr_phys_segments(req)) {
@@ -181,8 +182,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
- if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
- &nvme_loop_ops)) {
+ if (!nvmet_req_init(&iod->req, &queue->nvme_sq, &nvme_loop_ops)) {
dev_err(ctrl->ctrl.device, "failed async event work\n");
return;
}
@@ -273,6 +273,7 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
nvme_unquiesce_admin_queue(&ctrl->ctrl);
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
+ nvmet_cq_put(&ctrl->queues[0].nvme_cq);
nvme_remove_admin_tag_set(&ctrl->ctrl);
}
@@ -302,6 +303,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+ nvmet_cq_put(&ctrl->queues[i].nvme_cq);
}
ctrl->ctrl.queue_count = 1;
/*
@@ -327,9 +329,13 @@ static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
for (i = 1; i <= nr_io_queues; i++) {
ctrl->queues[i].ctrl = ctrl;
- ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
- if (ret)
+ nvmet_cq_init(&ctrl->queues[i].nvme_cq);
+ ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq,
+ &ctrl->queues[i].nvme_cq);
+ if (ret) {
+ nvmet_cq_put(&ctrl->queues[i].nvme_cq);
goto out_destroy_queues;
+ }
ctrl->ctrl.queue_count++;
}
@@ -360,9 +366,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
int error;
ctrl->queues[0].ctrl = ctrl;
- error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
- if (error)
+ nvmet_cq_init(&ctrl->queues[0].nvme_cq);
+ error = nvmet_sq_init(&ctrl->queues[0].nvme_sq,
+ &ctrl->queues[0].nvme_cq);
+ if (error) {
+ nvmet_cq_put(&ctrl->queues[0].nvme_cq);
return error;
+ }
ctrl->ctrl.queue_count = 1;
error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
@@ -401,6 +411,7 @@ out_cleanup_tagset:
nvme_remove_admin_tag_set(&ctrl->ctrl);
out_free_sq:
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
+ nvmet_cq_put(&ctrl->queues[0].nvme_cq);
return error;
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index b6db8b74dc4a..df69a9dee71c 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -141,13 +141,16 @@ static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
}
struct nvmet_cq {
+ struct nvmet_ctrl *ctrl;
u16 qid;
u16 size;
+ refcount_t ref;
};
struct nvmet_sq {
struct nvmet_ctrl *ctrl;
struct percpu_ref ref;
+ struct nvmet_cq *cq;
u16 qid;
u16 size;
u32 sqhd;
@@ -247,6 +250,7 @@ struct nvmet_pr_log_mgr {
struct nvmet_ctrl {
struct nvmet_subsys *subsys;
struct nvmet_sq **sqs;
+ struct nvmet_cq **cqs;
void *drvdata;
@@ -424,7 +428,7 @@ struct nvmet_fabrics_ops {
u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
/* Operations mandatory for PCI target controllers */
- u16 (*create_sq)(struct nvmet_ctrl *ctrl, u16 sqid, u16 flags,
+ u16 (*create_sq)(struct nvmet_ctrl *ctrl, u16 sqid, u16 cqid, u16 flags,
u16 qsize, u64 prp1);
u16 (*delete_sq)(struct nvmet_ctrl *ctrl, u16 sqid);
u16 (*create_cq)(struct nvmet_ctrl *ctrl, u16 cqid, u16 flags,
@@ -557,8 +561,8 @@ u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req);
u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req);
-bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
- struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
+bool nvmet_req_init(struct nvmet_req *req, struct nvmet_sq *sq,
+ const struct nvmet_fabrics_ops *ops);
void nvmet_req_uninit(struct nvmet_req *req);
size_t nvmet_req_transfer_len(struct nvmet_req *req);
bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
@@ -571,18 +575,24 @@ void nvmet_execute_set_features(struct nvmet_req *req);
void nvmet_execute_get_features(struct nvmet_req *req);
void nvmet_execute_keep_alive(struct nvmet_req *req);
-u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid);
+u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create);
+u16 nvmet_check_io_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create);
+void nvmet_cq_init(struct nvmet_cq *cq);
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
u16 size);
u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
u16 size);
+void nvmet_cq_destroy(struct nvmet_cq *cq);
+bool nvmet_cq_get(struct nvmet_cq *cq);
+void nvmet_cq_put(struct nvmet_cq *cq);
+bool nvmet_cq_in_use(struct nvmet_cq *cq);
u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, bool create);
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
u16 size);
-u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
- u16 size);
+u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
+ struct nvmet_cq *cq, u16 qid, u16 size);
void nvmet_sq_destroy(struct nvmet_sq *sq);
-int nvmet_sq_init(struct nvmet_sq *sq);
+int nvmet_sq_init(struct nvmet_sq *sq, struct nvmet_cq *cq);
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 26e2907ce8bb..b7515c53829b 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -99,7 +99,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
/*
* The passthru NVMe driver may have a limit on the number of segments
- * which depends on the host's memory fragementation. To solve this,
+ * which depends on the host's memory fragmentation. To solve this,
* ensure mdts is limited to the pages equal to the number of segments.
*/
max_hw_sectors = min_not_zero(pctrl->max_segments << PAGE_SECTORS_SHIFT,
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index 7fab7f3d79b7..a4295a5b8d28 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -62,8 +62,7 @@ static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex);
#define NVMET_PCI_EPF_CQ_RETRY_INTERVAL msecs_to_jiffies(1)
enum nvmet_pci_epf_queue_flags {
- NVMET_PCI_EPF_Q_IS_SQ = 0, /* The queue is a submission queue */
- NVMET_PCI_EPF_Q_LIVE, /* The queue is live */
+ NVMET_PCI_EPF_Q_LIVE = 0, /* The queue is live */
NVMET_PCI_EPF_Q_IRQ_ENABLED, /* IRQ is enabled for this queue */
};
@@ -596,9 +595,6 @@ static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
struct nvmet_pci_epf_irq_vector *iv = cq->iv;
bool ret;
- if (!test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
- return false;
-
/* IRQ coalescing for the admin queue is not allowed. */
if (!cq->qid)
return true;
@@ -625,7 +621,8 @@ static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
struct pci_epf *epf = nvme_epf->epf;
int ret = 0;
- if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
+ if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) ||
+ !test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
return;
mutex_lock(&ctrl->irq_lock);
@@ -636,14 +633,16 @@ static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
switch (nvme_epf->irq_type) {
case PCI_IRQ_MSIX:
case PCI_IRQ_MSI:
+ /*
+ * If we fail to raise an MSI or MSI-X interrupt, it is likely
+ * because the host is using legacy INTX IRQs (e.g. BIOS,
+ * grub), but we can fallback to the INTX type only if the
+ * endpoint controller supports this type.
+ */
ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
nvme_epf->irq_type, cq->vector + 1);
- if (!ret)
+ if (!ret || !nvme_epf->epc_features->intx_capable)
break;
- /*
- * If we got an error, it is likely because the host is using
- * legacy IRQs (e.g. BIOS, grub).
- */
fallthrough;
case PCI_IRQ_INTX:
ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
@@ -656,7 +655,9 @@ static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
}
if (ret)
- dev_err(ctrl->dev, "Failed to raise IRQ (err=%d)\n", ret);
+ dev_err_ratelimited(ctrl->dev,
+ "CQ[%u]: Failed to raise IRQ (err=%d)\n",
+ cq->qid, ret);
unlock:
mutex_unlock(&ctrl->irq_lock);
@@ -1319,8 +1320,14 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
- dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
- cqid, qsize, cq->qes, cq->vector);
+ if (test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
+ dev_dbg(ctrl->dev,
+ "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
+ cqid, qsize, cq->qes, cq->vector);
+ else
+ dev_dbg(ctrl->dev,
+ "CQ[%u]: %u entries of %zu B, IRQ disabled\n",
+ cqid, qsize, cq->qes);
return NVME_SC_SUCCESS;
@@ -1344,17 +1351,20 @@ static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid)
cancel_delayed_work_sync(&cq->work);
nvmet_pci_epf_drain_queue(cq);
- nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
+ if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
+ nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map);
+ nvmet_cq_put(&cq->nvme_cq);
return NVME_SC_SUCCESS;
}
static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
- u16 sqid, u16 flags, u16 qsize, u64 pci_addr)
+ u16 sqid, u16 cqid, u16 flags, u16 qsize, u64 pci_addr)
{
struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
+ struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
u16 status;
if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
@@ -1377,7 +1387,8 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
sq->qes = ctrl->io_sqes;
sq->pci_size = sq->qes * sq->depth;
- status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth);
+ status = nvmet_sq_create(tctrl, &sq->nvme_sq, &cq->nvme_cq, sqid,
+ sq->depth);
if (status != NVME_SC_SUCCESS)
return status;
@@ -1533,7 +1544,6 @@ static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl,
if (sq) {
queue = &ctrl->sq[qid];
- set_bit(NVMET_PCI_EPF_Q_IS_SQ, &queue->flags);
} else {
queue = &ctrl->cq[qid];
INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work);
@@ -1594,8 +1604,7 @@ static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
goto complete;
}
- if (!nvmet_req_init(req, &iod->cq->nvme_cq, &iod->sq->nvme_sq,
- &nvmet_pci_epf_fabrics_ops))
+ if (!nvmet_req_init(req, &iod->sq->nvme_sq, &nvmet_pci_epf_fabrics_ops))
goto complete;
iod->data_len = nvmet_req_transfer_len(req);
@@ -1872,8 +1881,8 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
qsize = aqa & 0x00000fff;
pci_addr = asq & GENMASK_ULL(63, 12);
- status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, NVME_QUEUE_PHYS_CONTIG,
- qsize, pci_addr);
+ status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, 0,
+ NVME_QUEUE_PHYS_CONTIG, qsize, pci_addr);
if (status != NVME_SC_SUCCESS) {
dev_err(ctrl->dev, "Failed to create admin submission queue\n");
nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 2a4536ef6184..67f61c67c167 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -976,8 +976,7 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
cmd->send_sge.addr, cmd->send_sge.length,
DMA_TO_DEVICE);
- if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
- &queue->nvme_sq, &nvmet_rdma_ops))
+ if (!nvmet_req_init(&cmd->req, &queue->nvme_sq, &nvmet_rdma_ops))
return;
status = nvmet_rdma_map_sgl(cmd);
@@ -1353,6 +1352,7 @@ static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
pr_debug("freeing queue %d\n", queue->idx);
nvmet_sq_destroy(&queue->nvme_sq);
+ nvmet_cq_put(&queue->nvme_cq);
nvmet_rdma_destroy_queue_ib(queue);
if (!queue->nsrq) {
@@ -1436,7 +1436,8 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
goto out_reject;
}
- ret = nvmet_sq_init(&queue->nvme_sq);
+ nvmet_cq_init(&queue->nvme_cq);
+ ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq);
if (ret) {
ret = NVME_RDMA_CM_NO_RSC;
goto out_free_queue;
@@ -1517,6 +1518,7 @@ out_ida_remove:
out_destroy_sq:
nvmet_sq_destroy(&queue->nvme_sq);
out_free_queue:
+ nvmet_cq_put(&queue->nvme_cq);
kfree(queue);
out_reject:
nvmet_rdma_cm_reject(cm_id, ret);
@@ -1999,7 +2001,7 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
struct nvmet_rdma_port *port = nport->priv;
struct rdma_cm_id *cm_id = port->cm_id;
- if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
+ if (inet_addr_is_any(&cm_id->route.addr.src_addr)) {
struct nvmet_rdma_rsp *rsp =
container_of(req, struct nvmet_rdma_rsp, req);
struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 12a5cb8641ca..688033b88d38 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/crc32c.h>
#include <linux/err.h>
#include <linux/nvme-tcp.h>
#include <linux/nvme-keyring.h>
@@ -17,7 +18,6 @@
#include <net/handshake.h>
#include <linux/inet.h>
#include <linux/llist.h>
-#include <crypto/hash.h>
#include <trace/events/sock.h>
#include "nvmet.h"
@@ -172,8 +172,6 @@ struct nvmet_tcp_queue {
/* digest state */
bool hdr_digest;
bool data_digest;
- struct ahash_request *snd_hash;
- struct ahash_request *rcv_hash;
/* TLS state */
key_serial_t tls_pskid;
@@ -294,14 +292,9 @@ static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
}
-static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
- void *pdu, size_t len)
+static inline void nvmet_tcp_hdgst(void *pdu, size_t len)
{
- struct scatterlist sg;
-
- sg_init_one(&sg, pdu, len);
- ahash_request_set_crypt(hash, &sg, pdu + len, len);
- crypto_ahash_digest(hash);
+ put_unaligned_le32(~crc32c(~0, pdu, len), pdu + len);
}
static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
@@ -318,7 +311,7 @@ static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
}
recv_digest = *(__le32 *)(pdu + hdr->hlen);
- nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
+ nvmet_tcp_hdgst(pdu, len);
exp_digest = *(__le32 *)(pdu + hdr->hlen);
if (recv_digest != exp_digest) {
pr_err("queue %d: header digest error: recv %#x expected %#x\n",
@@ -441,12 +434,24 @@ err:
return NVME_SC_INTERNAL;
}
-static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
- struct nvmet_tcp_cmd *cmd)
+static void nvmet_tcp_calc_ddgst(struct nvmet_tcp_cmd *cmd)
{
- ahash_request_set_crypt(hash, cmd->req.sg,
- (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
- crypto_ahash_digest(hash);
+ size_t total_len = cmd->req.transfer_len;
+ struct scatterlist *sg = cmd->req.sg;
+ u32 crc = ~0;
+
+ while (total_len) {
+ size_t len = min_t(size_t, total_len, sg->length);
+
+ /*
+ * Note that the scatterlist does not contain any highmem pages,
+ * as it was allocated by sgl_alloc() with GFP_KERNEL.
+ */
+ crc = crc32c(crc, sg_virt(sg), len);
+ total_len -= len;
+ sg = sg_next(sg);
+ }
+ cmd->exp_ddgst = cpu_to_le32(~crc);
}
static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
@@ -473,19 +478,18 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
if (queue->data_digest) {
pdu->hdr.flags |= NVME_TCP_F_DDGST;
- nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
+ nvmet_tcp_calc_ddgst(cmd);
}
if (cmd->queue->hdr_digest) {
pdu->hdr.flags |= NVME_TCP_F_HDGST;
- nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+ nvmet_tcp_hdgst(pdu, sizeof(*pdu));
}
}
static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
{
struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
- struct nvmet_tcp_queue *queue = cmd->queue;
u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
cmd->offset = 0;
@@ -503,14 +507,13 @@ static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
if (cmd->queue->hdr_digest) {
pdu->hdr.flags |= NVME_TCP_F_HDGST;
- nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+ nvmet_tcp_hdgst(pdu, sizeof(*pdu));
}
}
static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
{
struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
- struct nvmet_tcp_queue *queue = cmd->queue;
u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
cmd->offset = 0;
@@ -523,7 +526,7 @@ static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
if (cmd->queue->hdr_digest) {
pdu->hdr.flags |= NVME_TCP_F_HDGST;
- nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+ nvmet_tcp_hdgst(pdu, sizeof(*pdu));
}
}
@@ -857,42 +860,6 @@ static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU);
}
-static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
-
- ahash_request_free(queue->rcv_hash);
- ahash_request_free(queue->snd_hash);
- crypto_free_ahash(tfm);
-}
-
-static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
-{
- struct crypto_ahash *tfm;
-
- tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!queue->snd_hash)
- goto free_tfm;
- ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
-
- queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!queue->rcv_hash)
- goto free_snd_hash;
- ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
-
- return 0;
-free_snd_hash:
- ahash_request_free(queue->snd_hash);
-free_tfm:
- crypto_free_ahash(tfm);
- return -ENOMEM;
-}
-
-
static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
{
struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
@@ -921,11 +888,6 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
- if (queue->hdr_digest || queue->data_digest) {
- ret = nvmet_tcp_alloc_crypto(queue);
- if (ret)
- return ret;
- }
memset(icresp, 0, sizeof(*icresp));
icresp->hdr.type = nvme_tcp_icresp;
@@ -1077,8 +1039,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
req = &queue->cmd->req;
memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
- if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
- &queue->nvme_sq, &nvmet_tcp_ops))) {
+ if (unlikely(!nvmet_req_init(req, &queue->nvme_sq, &nvmet_tcp_ops))) {
pr_err("failed cmd %p id %d opcode %d, data_len: %d, status: %04x\n",
req->cmd, req->cmd->common.command_id,
req->cmd->common.opcode,
@@ -1247,7 +1208,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
{
struct nvmet_tcp_queue *queue = cmd->queue;
- nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
+ nvmet_tcp_calc_ddgst(cmd);
queue->offset = 0;
queue->left = NVME_TCP_DIGEST_LENGTH;
queue->rcv_state = NVMET_TCP_RECV_DDGST;
@@ -1615,13 +1576,12 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
nvmet_sq_put_tls_key(&queue->nvme_sq);
nvmet_tcp_uninit_data_in_cmds(queue);
nvmet_sq_destroy(&queue->nvme_sq);
+ nvmet_cq_put(&queue->nvme_cq);
cancel_work_sync(&queue->io_work);
nvmet_tcp_free_cmd_data_in_buffers(queue);
/* ->sock will be released by fput() */
fput(queue->sock->file);
nvmet_tcp_free_cmds(queue);
- if (queue->hdr_digest || queue->data_digest)
- nvmet_tcp_free_crypto(queue);
ida_free(&nvmet_tcp_queue_ida, queue->idx);
page_frag_cache_drain(&queue->pf_cache);
kfree(queue);
@@ -1950,7 +1910,8 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
if (ret)
goto out_ida_remove;
- ret = nvmet_sq_init(&queue->nvme_sq);
+ nvmet_cq_init(&queue->nvme_cq);
+ ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq);
if (ret)
goto out_free_connect;
@@ -1993,6 +1954,7 @@ out_destroy_sq:
mutex_unlock(&nvmet_tcp_queue_mutex);
nvmet_sq_destroy(&queue->nvme_sq);
out_free_connect:
+ nvmet_cq_put(&queue->nvme_cq);
nvmet_tcp_free_cmd(&queue->connect);
out_ida_remove:
ida_free(&nvmet_tcp_queue_ida, queue->idx);
@@ -2194,7 +2156,7 @@ static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
{
struct nvmet_tcp_port *port = nport->priv;
- if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
+ if (inet_addr_is_any(&port->addr)) {
struct nvmet_tcp_cmd *cmd =
container_of(req, struct nvmet_tcp_cmd, req);
struct nvmet_tcp_queue *queue = cmd->queue;
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 8671b7c974b9..d370b2ad11e7 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -40,6 +40,19 @@ config NVMEM_APPLE_EFUSES
This driver can also be built as a module. If so, the module will
be called nvmem-apple-efuses.
+config NVMEM_APPLE_SPMI
+ tristate "Apple SPMI NVMEM"
+ depends on ARCH_APPLE || COMPILE_TEST
+ depends on SPMI
+ select REGMAP_SPMI
+ help
+ Say y here to build a driver to expose NVMEM cells for a set of power
+ and RTC-related settings on a SPMI-attached PMIC present on Apple
+ devices, such as Apple Silicon Macs.
+
+ This driver can also be built as a module. If so, the module
+ will be called apple-nvmem-spmi.
+
config NVMEM_BCM_OCOTP
tristate "Broadcom On-Chip OTP Controller support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
@@ -154,6 +167,18 @@ config NVMEM_LPC18XX_OTP
To compile this driver as a module, choose M here: the module
will be called nvmem_lpc18xx_otp.
+config NVMEM_MAX77759
+ tristate "Maxim Integrated MAX77759 NVMEM Support"
+ depends on MFD_MAX77759
+ default MFD_MAX77759
+ help
+ Say Y here to include support for the user-accessible storage found
+ in Maxim Integrated MAX77759 PMICs. This IC provides space for 30
+ bytes of storage.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-max77759.
+
config NVMEM_MESON_EFUSE
tristate "Amlogic Meson GX eFuse Support"
depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
@@ -260,6 +285,7 @@ config NVMEM_RCAR_EFUSE
config NVMEM_RMEM
tristate "Reserved Memory Based Driver Support"
depends on HAS_IOMEM
+ select CRC32
help
This driver maps reserved memory into an nvmem device. It might be
useful to expose information left by firmware in memory.
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 5b77bbb6488b..2021d59688db 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -12,6 +12,8 @@ obj-y += layouts/
# Devices
obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o
nvmem-apple-efuses-y := apple-efuses.o
+obj-$(CONFIG_NVMEM_APPLE_SPMI) += apple_nvmem_spmi.o
+apple_nvmem_spmi-y := apple-spmi-nvmem.o
obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o
nvmem-bcm-ocotp-y := bcm-ocotp.o
obj-$(CONFIG_NVMEM_BRCM_NVRAM) += nvmem_brcm_nvram.o
@@ -34,6 +36,8 @@ obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o
nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o
obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o
nvmem_lpc18xx_otp-y := lpc18xx_otp.o
+obj-$(CONFIG_NVMEM_MAX77759) += nvmem-max77759.o
+nvmem-max77759-y := max77759-nvmem.o
obj-$(CONFIG_NVMEM_MESON_EFUSE) += nvmem_meson_efuse.o
nvmem_meson_efuse-y := meson-efuse.o
obj-$(CONFIG_NVMEM_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o
diff --git a/drivers/nvmem/apple-spmi-nvmem.c b/drivers/nvmem/apple-spmi-nvmem.c
new file mode 100644
index 000000000000..88614005d5ce
--- /dev/null
+++ b/drivers/nvmem/apple-spmi-nvmem.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SPMI NVMEM driver
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/spmi.h>
+#include <linux/regmap.h>
+
+static const struct regmap_config apple_spmi_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0xffff,
+};
+
+static int apple_spmi_nvmem_probe(struct spmi_device *sdev)
+{
+ struct regmap *regmap;
+ struct nvmem_config nvmem_cfg = {
+ .dev = &sdev->dev,
+ .name = "spmi_nvmem",
+ .id = NVMEM_DEVID_AUTO,
+ .word_size = 1,
+ .stride = 1,
+ .size = 0xffff,
+ .reg_read = (void *)regmap_bulk_read,
+ .reg_write = (void *)regmap_bulk_write,
+ };
+
+ regmap = devm_regmap_init_spmi_ext(sdev, &apple_spmi_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ nvmem_cfg.priv = regmap;
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(&sdev->dev, &nvmem_cfg));
+}
+
+static const struct of_device_id apple_spmi_nvmem_id_table[] = {
+ { .compatible = "apple,spmi-nvmem" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, apple_spmi_nvmem_id_table);
+
+static struct spmi_driver apple_spmi_nvmem_driver = {
+ .probe = apple_spmi_nvmem_probe,
+ .driver = {
+ .name = "apple-spmi-nvmem",
+ .of_match_table = apple_spmi_nvmem_id_table,
+ },
+};
+
+module_spmi_driver(apple_spmi_nvmem_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_DESCRIPTION("Apple SPMI NVMEM driver");
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index e206efc29a00..fd2a9698d1c9 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -47,9 +47,6 @@ struct nvmem_cell {
static DEFINE_MUTEX(nvmem_mutex);
static DEFINE_IDA(nvmem_ida);
-static DEFINE_MUTEX(nvmem_cell_mutex);
-static LIST_HEAD(nvmem_cell_tables);
-
static DEFINE_MUTEX(nvmem_lookup_mutex);
static LIST_HEAD(nvmem_lookup_list);
@@ -719,41 +716,6 @@ int nvmem_unregister_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
-static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
-{
- const struct nvmem_cell_info *info;
- struct nvmem_cell_table *table;
- struct nvmem_cell_entry *cell;
- int rval = 0, i;
-
- mutex_lock(&nvmem_cell_mutex);
- list_for_each_entry(table, &nvmem_cell_tables, node) {
- if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
- for (i = 0; i < table->ncells; i++) {
- info = &table->cells[i];
-
- cell = kzalloc(sizeof(*cell), GFP_KERNEL);
- if (!cell) {
- rval = -ENOMEM;
- goto out;
- }
-
- rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
- if (rval) {
- kfree(cell);
- goto out;
- }
-
- nvmem_cell_entry_add(cell);
- }
- }
- }
-
-out:
- mutex_unlock(&nvmem_cell_mutex);
- return rval;
-}
-
static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
{
@@ -1040,10 +1002,6 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
goto err_remove_cells;
}
- rval = nvmem_add_cells_from_table(nvmem);
- if (rval)
- goto err_remove_cells;
-
if (config->add_legacy_fixed_of_cells) {
rval = nvmem_add_cells_from_legacy_of(nvmem);
if (rval)
@@ -2152,32 +2110,6 @@ int nvmem_device_write(struct nvmem_device *nvmem,
EXPORT_SYMBOL_GPL(nvmem_device_write);
/**
- * nvmem_add_cell_table() - register a table of cell info entries
- *
- * @table: table of cell info entries
- */
-void nvmem_add_cell_table(struct nvmem_cell_table *table)
-{
- mutex_lock(&nvmem_cell_mutex);
- list_add_tail(&table->node, &nvmem_cell_tables);
- mutex_unlock(&nvmem_cell_mutex);
-}
-EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
-
-/**
- * nvmem_del_cell_table() - remove a previously registered cell info table
- *
- * @table: table of cell info entries
- */
-void nvmem_del_cell_table(struct nvmem_cell_table *table)
-{
- mutex_lock(&nvmem_cell_mutex);
- list_del(&table->node);
- mutex_unlock(&nvmem_cell_mutex);
-}
-EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
-
-/**
* nvmem_add_cell_lookups() - register a list of cell lookup entries
*
* @entries: array of cell lookup entries
diff --git a/drivers/nvmem/max77759-nvmem.c b/drivers/nvmem/max77759-nvmem.c
new file mode 100644
index 000000000000..c9961ad0e232
--- /dev/null
+++ b/drivers/nvmem/max77759-nvmem.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright 2020 Google Inc
+// Copyright 2025 Linaro Ltd.
+//
+// NVMEM driver for Maxim MAX77759
+
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/device/driver.h>
+#include <linux/err.h>
+#include <linux/mfd/max77759.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+
+#define MAX77759_NVMEM_OPCODE_HEADER_LEN 3
+/*
+ * NVMEM commands have a three byte header (which becomes part of the command),
+ * so we need to subtract that.
+ */
+#define MAX77759_NVMEM_SIZE (MAX77759_MAXQ_OPCODE_MAXLENGTH \
+ - MAX77759_NVMEM_OPCODE_HEADER_LEN)
+
+struct max77759_nvmem {
+ struct device *dev;
+ struct max77759 *max77759;
+};
+
+static int max77759_nvmem_reg_read(void *priv, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct max77759_nvmem *nvmem = priv;
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length,
+ MAX77759_NVMEM_OPCODE_HEADER_LEN);
+ DEFINE_FLEX(struct max77759_maxq_response, rsp, rsp, length,
+ MAX77759_MAXQ_OPCODE_MAXLENGTH);
+ int ret;
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_USER_SPACE_READ;
+ cmd->cmd[1] = offset;
+ cmd->cmd[2] = bytes;
+ rsp->length = bytes + MAX77759_NVMEM_OPCODE_HEADER_LEN;
+
+ ret = max77759_maxq_command(nvmem->max77759, cmd, rsp);
+ if (ret < 0)
+ return ret;
+
+ if (memcmp(cmd->cmd, rsp->rsp, MAX77759_NVMEM_OPCODE_HEADER_LEN)) {
+ dev_warn(nvmem->dev, "protocol error (read)\n");
+ return -EIO;
+ }
+
+ memcpy(val, &rsp->rsp[MAX77759_NVMEM_OPCODE_HEADER_LEN], bytes);
+
+ return 0;
+}
+
+static int max77759_nvmem_reg_write(void *priv, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct max77759_nvmem *nvmem = priv;
+ DEFINE_FLEX(struct max77759_maxq_command, cmd, cmd, length,
+ MAX77759_MAXQ_OPCODE_MAXLENGTH);
+ DEFINE_FLEX(struct max77759_maxq_response, rsp, rsp, length,
+ MAX77759_MAXQ_OPCODE_MAXLENGTH);
+ int ret;
+
+ cmd->cmd[0] = MAX77759_MAXQ_OPCODE_USER_SPACE_WRITE;
+ cmd->cmd[1] = offset;
+ cmd->cmd[2] = bytes;
+ memcpy(&cmd->cmd[MAX77759_NVMEM_OPCODE_HEADER_LEN], val, bytes);
+ cmd->length = bytes + MAX77759_NVMEM_OPCODE_HEADER_LEN;
+ rsp->length = cmd->length;
+
+ ret = max77759_maxq_command(nvmem->max77759, cmd, rsp);
+ if (ret < 0)
+ return ret;
+
+ if (memcmp(cmd->cmd, rsp->rsp, cmd->length)) {
+ dev_warn(nvmem->dev, "protocol error (write)\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int max77759_nvmem_probe(struct platform_device *pdev)
+{
+ struct nvmem_config config = {
+ .dev = &pdev->dev,
+ .name = dev_name(&pdev->dev),
+ .id = NVMEM_DEVID_NONE,
+ .type = NVMEM_TYPE_EEPROM,
+ .ignore_wp = true,
+ .size = MAX77759_NVMEM_SIZE,
+ .word_size = sizeof(u8),
+ .stride = sizeof(u8),
+ .reg_read = max77759_nvmem_reg_read,
+ .reg_write = max77759_nvmem_reg_write,
+ };
+ struct max77759_nvmem *nvmem;
+
+ nvmem = devm_kzalloc(&pdev->dev, sizeof(*nvmem), GFP_KERNEL);
+ if (!nvmem)
+ return -ENOMEM;
+
+ nvmem->dev = &pdev->dev;
+ nvmem->max77759 = dev_get_drvdata(pdev->dev.parent);
+
+ config.priv = nvmem;
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(config.dev, &config));
+}
+
+static const struct of_device_id max77759_nvmem_of_id[] = {
+ { .compatible = "maxim,max77759-nvmem", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77759_nvmem_of_id);
+
+static const struct platform_device_id max77759_nvmem_platform_id[] = {
+ { "max77759-nvmem", },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, max77759_nvmem_platform_id);
+
+static struct platform_driver max77759_nvmem_driver = {
+ .driver = {
+ .name = "max77759-nvmem",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = max77759_nvmem_of_id,
+ },
+ .probe = max77759_nvmem_probe,
+ .id_table = max77759_nvmem_platform_id,
+};
+
+module_platform_driver(max77759_nvmem_driver);
+
+MODULE_AUTHOR("André Draszik <andre.draszik@linaro.org>");
+MODULE_DESCRIPTION("NVMEM driver for Maxim MAX77759");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
index 8682adaacd69..7da717d6c7fa 100644
--- a/drivers/nvmem/zynqmp_nvmem.c
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -213,6 +213,7 @@ static int zynqmp_nvmem_probe(struct platform_device *pdev)
econfig.word_size = 1;
econfig.size = ZYNQMP_NVMEM_SIZE;
econfig.dev = dev;
+ econfig.priv = dev;
econfig.add_legacy_fixed_of_cells = true;
econfig.reg_read = zynqmp_nvmem_read;
econfig.reg_write = zynqmp_nvmem_write;
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 5053e5d532cc..c80426510ec2 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -35,44 +35,35 @@ EXPORT_SYMBOL(of_match_device);
static void
of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
{
- struct device_node *node, *of_node = dev->of_node;
- int count, i;
+ struct device_node *of_node = dev->of_node;
+ struct of_phandle_iterator it;
+ int rc, i = 0;
if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL))
return;
- count = of_property_count_elems_of_size(of_node, "memory-region",
- sizeof(u32));
/*
* If dev->of_node doesn't exist or doesn't contain memory-region, try
* the OF node having DMA configuration.
*/
- if (count <= 0) {
+ if (!of_property_present(of_node, "memory-region"))
of_node = np;
- count = of_property_count_elems_of_size(
- of_node, "memory-region", sizeof(u32));
- }
- for (i = 0; i < count; i++) {
- node = of_parse_phandle(of_node, "memory-region", i);
+ of_for_each_phandle(&it, rc, of_node, "memory-region", NULL, 0) {
/*
* There might be multiple memory regions, but only one
* restricted-dma-pool region is allowed.
*/
- if (of_device_is_compatible(node, "restricted-dma-pool") &&
- of_device_is_available(node)) {
- of_node_put(node);
+ if (of_device_is_compatible(it.node, "restricted-dma-pool") &&
+ of_device_is_available(it.node)) {
+ if (of_reserved_mem_device_init_by_idx(dev, of_node, i))
+ dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
+ of_node_put(it.node);
break;
}
- of_node_put(node);
+ i++;
}
- /*
- * Attempt to initialize a restricted-dma-pool region if one was found.
- * Note that count can hold a negative error code.
- */
- if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
- dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
}
/**
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index aedd0e2dcd89..0edd639898a6 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -25,6 +25,7 @@
#include <linux/serial_core.h>
#include <linux/sysfs.h>
#include <linux/random.h>
+#include <linux/kexec_handover.h>
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
@@ -875,6 +876,36 @@ void __init early_init_dt_check_for_usable_mem_range(void)
memblock_add(rgn[i].base, rgn[i].size);
}
+/**
+ * early_init_dt_check_kho - Decode info required for kexec handover from DT
+ */
+static void __init early_init_dt_check_kho(void)
+{
+ unsigned long node = chosen_node_offset;
+ u64 fdt_start, fdt_size, scratch_start, scratch_size;
+ const __be32 *p;
+ int l;
+
+ if (!IS_ENABLED(CONFIG_KEXEC_HANDOVER) || (long)node < 0)
+ return;
+
+ p = of_get_flat_dt_prop(node, "linux,kho-fdt", &l);
+ if (l != (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32))
+ return;
+
+ fdt_start = dt_mem_next_cell(dt_root_addr_cells, &p);
+ fdt_size = dt_mem_next_cell(dt_root_addr_cells, &p);
+
+ p = of_get_flat_dt_prop(node, "linux,kho-scratch", &l);
+ if (l != (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32))
+ return;
+
+ scratch_start = dt_mem_next_cell(dt_root_addr_cells, &p);
+ scratch_size = dt_mem_next_cell(dt_root_addr_cells, &p);
+
+ kho_populate(fdt_start, fdt_size, scratch_start, scratch_size);
+}
+
#ifdef CONFIG_SERIAL_EARLYCON
int __init early_init_dt_scan_chosen_stdout(void)
@@ -1169,6 +1200,9 @@ void __init early_init_dt_scan_nodes(void)
/* Handle linux,usable-memory-range property */
early_init_dt_check_for_usable_mem_range();
+
+ /* Handle kexec handover */
+ early_init_dt_check_kho();
}
bool __init early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys)
diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c
index 5b924597a4de..1ee2d31816ae 100644
--- a/drivers/of/kexec.c
+++ b/drivers/of/kexec.c
@@ -264,6 +264,43 @@ static inline int setup_ima_buffer(const struct kimage *image, void *fdt,
}
#endif /* CONFIG_IMA_KEXEC */
+static int kho_add_chosen(const struct kimage *image, void *fdt, int chosen_node)
+{
+ int ret = 0;
+#ifdef CONFIG_KEXEC_HANDOVER
+ phys_addr_t fdt_mem = 0;
+ phys_addr_t fdt_len = 0;
+ phys_addr_t scratch_mem = 0;
+ phys_addr_t scratch_len = 0;
+
+ ret = fdt_delprop(fdt, chosen_node, "linux,kho-fdt");
+ if (ret && ret != -FDT_ERR_NOTFOUND)
+ return ret;
+ ret = fdt_delprop(fdt, chosen_node, "linux,kho-scratch");
+ if (ret && ret != -FDT_ERR_NOTFOUND)
+ return ret;
+
+ if (!image->kho.fdt || !image->kho.scratch)
+ return 0;
+
+ fdt_mem = image->kho.fdt;
+ fdt_len = PAGE_SIZE;
+ scratch_mem = image->kho.scratch->mem;
+ scratch_len = image->kho.scratch->bufsz;
+
+ pr_debug("Adding kho metadata to DT");
+
+ ret = fdt_appendprop_addrrange(fdt, 0, chosen_node, "linux,kho-fdt",
+ fdt_mem, fdt_len);
+ if (ret)
+ return ret;
+ ret = fdt_appendprop_addrrange(fdt, 0, chosen_node, "linux,kho-scratch",
+ scratch_mem, scratch_len);
+
+#endif /* CONFIG_KEXEC_HANDOVER */
+ return ret;
+}
+
/*
* of_kexec_alloc_and_setup_fdt - Alloc and setup a new Flattened Device Tree
*
@@ -414,6 +451,11 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image,
#endif
}
+ /* Add kho metadata if this is a KHO image */
+ ret = kho_add_chosen(image, fdt, chosen_node);
+ if (ret)
+ goto out;
+
/* add bootargs */
if (cmdline) {
ret = fdt_setprop_string(fdt, chosen_node, "bootargs", cmdline);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index ee2e31522d7e..77016c0cc296 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) "OF: reserved mem: " fmt
#include <linux/err.h>
+#include <linux/ioport.h>
#include <linux/libfdt.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
@@ -740,3 +741,82 @@ struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
return NULL;
}
EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);
+
+/**
+ * of_reserved_mem_region_to_resource() - Get a reserved memory region as a resource
+ * @np: node containing 'memory-region' property
+ * @idx: index of 'memory-region' property to lookup
+ * @res: Pointer to a struct resource to fill in with reserved region
+ *
+ * This function allows drivers to lookup a node's 'memory-region' property
+ * entries by index and return a struct resource for the entry.
+ *
+ * Returns 0 on success with @res filled in. Returns -ENODEV if 'memory-region'
+ * is missing or unavailable, -EINVAL for any other error.
+ */
+int of_reserved_mem_region_to_resource(const struct device_node *np,
+ unsigned int idx, struct resource *res)
+{
+ struct reserved_mem *rmem;
+
+ if (!np)
+ return -EINVAL;
+
+ struct device_node __free(device_node) *target = of_parse_phandle(np, "memory-region", idx);
+ if (!target || !of_device_is_available(target))
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(target);
+ if (!rmem)
+ return -EINVAL;
+
+ resource_set_range(res, rmem->base, rmem->size);
+ res->name = rmem->name;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_reserved_mem_region_to_resource);
+
+/**
+ * of_reserved_mem_region_to_resource_byname() - Get a reserved memory region as a resource
+ * @np: node containing 'memory-region' property
+ * @name: name of 'memory-region' property entry to lookup
+ * @res: Pointer to a struct resource to fill in with reserved region
+ *
+ * This function allows drivers to lookup a node's 'memory-region' property
+ * entries by name and return a struct resource for the entry.
+ *
+ * Returns 0 on success with @res filled in, or a negative error-code on
+ * failure.
+ */
+int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
+ const char *name,
+ struct resource *res)
+{
+ int idx;
+
+ if (!name)
+ return -EINVAL;
+
+ idx = of_property_match_string(np, "memory-region-names", name);
+ if (idx < 0)
+ return idx;
+
+ return of_reserved_mem_region_to_resource(np, idx, res);
+}
+EXPORT_SYMBOL_GPL(of_reserved_mem_region_to_resource_byname);
+
+/**
+ * of_reserved_mem_region_count() - Return the number of 'memory-region' entries
+ * @np: node containing 'memory-region' property
+ *
+ * This function allows drivers to retrieve the number of entries for a node's
+ * 'memory-region' property.
+ *
+ * Returns the number of entries on success, or negative error code on a
+ * malformed property.
+ */
+int of_reserved_mem_region_count(const struct device_node *np)
+{
+ return of_count_phandle_with_args(np, "memory-region", NULL);
+}
+EXPORT_SYMBOL_GPL(of_reserved_mem_region_count);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 64d301893af7..eeb370e0f507 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -2029,15 +2029,16 @@ static int __init unittest_data_add(void)
rc = of_resolve_phandles(unittest_data_node);
if (rc) {
pr_err("%s: Failed to resolve phandles (rc=%i)\n", __func__, rc);
- of_overlay_mutex_unlock();
- return -EINVAL;
+ rc = -EINVAL;
+ goto unlock;
}
/* attach the sub-tree to live tree */
if (!of_root) {
pr_warn("%s: no live tree to attach sub-tree\n", __func__);
kfree(unittest_data);
- return -ENODEV;
+ rc = -ENODEV;
+ goto unlock;
}
EXPECT_BEGIN(KERN_INFO,
@@ -2056,9 +2057,10 @@ static int __init unittest_data_add(void)
EXPECT_END(KERN_INFO,
"Duplicate name in testcase-data, renamed to \"duplicate-name#1\"");
+unlock:
of_overlay_mutex_unlock();
- return 0;
+ return rc;
}
#ifdef CONFIG_OF_OVERLAY
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 73e9a3b2f29b..edbd60501cf0 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -40,17 +40,14 @@ static DEFINE_XARRAY_ALLOC1(opp_configs);
static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table)
{
struct opp_device *opp_dev;
- bool found = false;
- mutex_lock(&opp_table->lock);
+ guard(mutex)(&opp_table->lock);
+
list_for_each_entry(opp_dev, &opp_table->dev_list, node)
- if (opp_dev->dev == dev) {
- found = true;
- break;
- }
+ if (opp_dev->dev == dev)
+ return true;
- mutex_unlock(&opp_table->lock);
- return found;
+ return false;
}
static struct opp_table *_find_opp_table_unlocked(struct device *dev)
@@ -58,10 +55,8 @@ static struct opp_table *_find_opp_table_unlocked(struct device *dev)
struct opp_table *opp_table;
list_for_each_entry(opp_table, &opp_tables, node) {
- if (_find_opp_dev(dev, opp_table)) {
- _get_opp_table_kref(opp_table);
- return opp_table;
- }
+ if (_find_opp_dev(dev, opp_table))
+ return dev_pm_opp_get_opp_table_ref(opp_table);
}
return ERR_PTR(-ENODEV);
@@ -80,18 +75,13 @@ static struct opp_table *_find_opp_table_unlocked(struct device *dev)
*/
struct opp_table *_find_opp_table(struct device *dev)
{
- struct opp_table *opp_table;
-
if (IS_ERR_OR_NULL(dev)) {
pr_err("%s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
}
- mutex_lock(&opp_table_lock);
- opp_table = _find_opp_table_unlocked(dev);
- mutex_unlock(&opp_table_lock);
-
- return opp_table;
+ guard(mutex)(&opp_table_lock);
+ return _find_opp_table_unlocked(dev);
}
/*
@@ -319,18 +309,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
*/
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
{
- struct opp_table *opp_table;
- unsigned long clock_latency_ns;
+ struct opp_table *opp_table __free(put_opp_table);
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
return 0;
- clock_latency_ns = opp_table->clock_latency_ns_max;
-
- dev_pm_opp_put_opp_table(opp_table);
-
- return clock_latency_ns;
+ return opp_table->clock_latency_ns_max;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
@@ -342,7 +327,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
*/
unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
{
- struct opp_table *opp_table;
+ struct opp_table *opp_table __free(put_opp_table);
struct dev_pm_opp *opp;
struct regulator *reg;
unsigned long latency_ns = 0;
@@ -358,33 +343,31 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
/* Regulator may not be required for the device */
if (!opp_table->regulators)
- goto put_opp_table;
+ return 0;
count = opp_table->regulator_count;
uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
if (!uV)
- goto put_opp_table;
-
- mutex_lock(&opp_table->lock);
+ return 0;
- for (i = 0; i < count; i++) {
- uV[i].min = ~0;
- uV[i].max = 0;
+ scoped_guard(mutex, &opp_table->lock) {
+ for (i = 0; i < count; i++) {
+ uV[i].min = ~0;
+ uV[i].max = 0;
- list_for_each_entry(opp, &opp_table->opp_list, node) {
- if (!opp->available)
- continue;
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
+ if (!opp->available)
+ continue;
- if (opp->supplies[i].u_volt_min < uV[i].min)
- uV[i].min = opp->supplies[i].u_volt_min;
- if (opp->supplies[i].u_volt_max > uV[i].max)
- uV[i].max = opp->supplies[i].u_volt_max;
+ if (opp->supplies[i].u_volt_min < uV[i].min)
+ uV[i].min = opp->supplies[i].u_volt_min;
+ if (opp->supplies[i].u_volt_max > uV[i].max)
+ uV[i].max = opp->supplies[i].u_volt_max;
+ }
}
}
- mutex_unlock(&opp_table->lock);
-
/*
* The caller needs to ensure that opp_table (and hence the regulator)
* isn't freed, while we are executing this routine.
@@ -397,8 +380,6 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
}
kfree(uV);
-put_opp_table:
- dev_pm_opp_put_opp_table(opp_table);
return latency_ns;
}
@@ -428,7 +409,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
*/
unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
{
- struct opp_table *opp_table;
+ struct opp_table *opp_table __free(put_opp_table);
unsigned long freq = 0;
opp_table = _find_opp_table(dev);
@@ -438,8 +419,6 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
if (opp_table->suspend_opp && opp_table->suspend_opp->available)
freq = dev_pm_opp_get_freq(opp_table->suspend_opp);
- dev_pm_opp_put_opp_table(opp_table);
-
return freq;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
@@ -449,15 +428,13 @@ int _get_opp_count(struct opp_table *opp_table)
struct dev_pm_opp *opp;
int count = 0;
- mutex_lock(&opp_table->lock);
+ guard(mutex)(&opp_table->lock);
list_for_each_entry(opp, &opp_table->opp_list, node) {
if (opp->available)
count++;
}
- mutex_unlock(&opp_table->lock);
-
return count;
}
@@ -470,21 +447,16 @@ int _get_opp_count(struct opp_table *opp_table)
*/
int dev_pm_opp_get_opp_count(struct device *dev)
{
- struct opp_table *opp_table;
- int count;
+ struct opp_table *opp_table __free(put_opp_table);
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
- count = PTR_ERR(opp_table);
- dev_dbg(dev, "%s: OPP table not found (%d)\n",
- __func__, count);
- return count;
+ dev_dbg(dev, "%s: OPP table not found (%ld)\n",
+ __func__, PTR_ERR(opp_table));
+ return PTR_ERR(opp_table);
}
- count = _get_opp_count(opp_table);
- dev_pm_opp_put_opp_table(opp_table);
-
- return count;
+ return _get_opp_count(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
@@ -551,7 +523,7 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
if (assert && !assert(opp_table, index))
return ERR_PTR(-EINVAL);
- mutex_lock(&opp_table->lock);
+ guard(mutex)(&opp_table->lock);
list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available == available) {
@@ -566,8 +538,6 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
dev_pm_opp_get(opp);
}
- mutex_unlock(&opp_table->lock);
-
return opp;
}
@@ -578,8 +548,7 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available,
unsigned long opp_key, unsigned long key),
bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
- struct opp_table *opp_table;
- struct dev_pm_opp *opp;
+ struct opp_table *opp_table __free(put_opp_table);
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
@@ -588,12 +557,8 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available,
return ERR_CAST(opp_table);
}
- opp = _opp_table_find_key(opp_table, key, index, available, read,
- compare, assert);
-
- dev_pm_opp_put_opp_table(opp_table);
-
- return opp;
+ return _opp_table_find_key(opp_table, key, index, available, read,
+ compare, assert);
}
static struct dev_pm_opp *_find_key_exact(struct device *dev,
@@ -1187,10 +1152,9 @@ static void _find_current_opp(struct device *dev, struct opp_table *opp_table)
* make special checks to validate current_opp.
*/
if (IS_ERR(opp)) {
- mutex_lock(&opp_table->lock);
- opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node);
- dev_pm_opp_get(opp);
- mutex_unlock(&opp_table->lock);
+ guard(mutex)(&opp_table->lock);
+ opp = dev_pm_opp_get(list_first_entry(&opp_table->opp_list,
+ struct dev_pm_opp, node));
}
opp_table->current_opp = opp;
@@ -1329,8 +1293,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
dev_pm_opp_put(old_opp);
/* Make sure current_opp doesn't get freed */
- dev_pm_opp_get(opp);
- opp_table->current_opp = opp;
+ opp_table->current_opp = dev_pm_opp_get(opp);
return ret;
}
@@ -1348,11 +1311,10 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
*/
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
- struct opp_table *opp_table;
+ struct opp_table *opp_table __free(put_opp_table);
+ struct dev_pm_opp *opp __free(put_opp) = NULL;
unsigned long freq = 0, temp_freq;
- struct dev_pm_opp *opp = NULL;
bool forced = false;
- int ret;
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
@@ -1369,9 +1331,8 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
* equivalent to a clk_set_rate()
*/
if (!_get_opp_count(opp_table)) {
- ret = opp_table->config_clks(dev, opp_table, NULL,
- &target_freq, false);
- goto put_opp_table;
+ return opp_table->config_clks(dev, opp_table, NULL,
+ &target_freq, false);
}
freq = clk_round_rate(opp_table->clk, target_freq);
@@ -1386,10 +1347,9 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
temp_freq = freq;
opp = _find_freq_ceil(opp_table, &temp_freq);
if (IS_ERR(opp)) {
- ret = PTR_ERR(opp);
- dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
- __func__, freq, ret);
- goto put_opp_table;
+ dev_err(dev, "%s: failed to find OPP for freq %lu (%ld)\n",
+ __func__, freq, PTR_ERR(opp));
+ return PTR_ERR(opp);
}
/*
@@ -1402,14 +1362,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
forced = opp_table->current_rate_single_clk != freq;
}
- ret = _set_opp(dev, opp_table, opp, &freq, forced);
-
- if (freq)
- dev_pm_opp_put(opp);
-
-put_opp_table:
- dev_pm_opp_put_opp_table(opp_table);
- return ret;
+ return _set_opp(dev, opp_table, opp, &freq, forced);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
@@ -1425,8 +1378,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
*/
int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
{
- struct opp_table *opp_table;
- int ret;
+ struct opp_table *opp_table __free(put_opp_table);
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
@@ -1434,10 +1386,7 @@ int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
return PTR_ERR(opp_table);
}
- ret = _set_opp(dev, opp_table, opp, NULL, false);
- dev_pm_opp_put_opp_table(opp_table);
-
- return ret;
+ return _set_opp(dev, opp_table, opp, NULL, false);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp);
@@ -1462,9 +1411,8 @@ struct opp_device *_add_opp_dev(const struct device *dev,
/* Initialize opp-dev */
opp_dev->dev = dev;
- mutex_lock(&opp_table->lock);
- list_add(&opp_dev->node, &opp_table->dev_list);
- mutex_unlock(&opp_table->lock);
+ scoped_guard(mutex, &opp_table->lock)
+ list_add(&opp_dev->node, &opp_table->dev_list);
/* Create debugfs entries for the opp_table */
opp_debug_register(opp_dev, opp_table);
@@ -1688,14 +1636,10 @@ static void _opp_table_kref_release(struct kref *kref)
kfree(opp_table);
}
-void _get_opp_table_kref(struct opp_table *opp_table)
+struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table)
{
kref_get(&opp_table->kref);
-}
-
-void dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table)
-{
- _get_opp_table_kref(opp_table);
+ return opp_table;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table_ref);
@@ -1729,9 +1673,10 @@ static void _opp_kref_release(struct kref *kref)
kfree(opp);
}
-void dev_pm_opp_get(struct dev_pm_opp *opp)
+struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp)
{
kref_get(&opp->kref);
+ return opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get);
@@ -1750,27 +1695,25 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put);
*/
void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
+ struct opp_table *opp_table __free(put_opp_table);
struct dev_pm_opp *opp = NULL, *iter;
- struct opp_table *opp_table;
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
return;
if (!assert_single_clk(opp_table, 0))
- goto put_table;
-
- mutex_lock(&opp_table->lock);
+ return;
- list_for_each_entry(iter, &opp_table->opp_list, node) {
- if (iter->rates[0] == freq) {
- opp = iter;
- break;
+ scoped_guard(mutex, &opp_table->lock) {
+ list_for_each_entry(iter, &opp_table->opp_list, node) {
+ if (iter->rates[0] == freq) {
+ opp = iter;
+ break;
+ }
}
}
- mutex_unlock(&opp_table->lock);
-
if (opp) {
dev_pm_opp_put(opp);
@@ -1780,32 +1723,26 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
__func__, freq);
}
-
-put_table:
- /* Drop the reference taken by _find_opp_table() */
- dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table,
bool dynamic)
{
- struct dev_pm_opp *opp = NULL, *temp;
+ struct dev_pm_opp *opp;
+
+ guard(mutex)(&opp_table->lock);
- mutex_lock(&opp_table->lock);
- list_for_each_entry(temp, &opp_table->opp_list, node) {
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
/*
* Refcount must be dropped only once for each OPP by OPP core,
* do that with help of "removed" flag.
*/
- if (!temp->removed && dynamic == temp->dynamic) {
- opp = temp;
- break;
- }
+ if (!opp->removed && dynamic == opp->dynamic)
+ return opp;
}
- mutex_unlock(&opp_table->lock);
- return opp;
+ return NULL;
}
/*
@@ -1829,20 +1766,14 @@ static void _opp_remove_all(struct opp_table *opp_table, bool dynamic)
bool _opp_remove_all_static(struct opp_table *opp_table)
{
- mutex_lock(&opp_table->lock);
-
- if (!opp_table->parsed_static_opps) {
- mutex_unlock(&opp_table->lock);
- return false;
- }
+ scoped_guard(mutex, &opp_table->lock) {
+ if (!opp_table->parsed_static_opps)
+ return false;
- if (--opp_table->parsed_static_opps) {
- mutex_unlock(&opp_table->lock);
- return true;
+ if (--opp_table->parsed_static_opps)
+ return true;
}
- mutex_unlock(&opp_table->lock);
-
_opp_remove_all(opp_table, false);
return true;
}
@@ -1855,16 +1786,13 @@ bool _opp_remove_all_static(struct opp_table *opp_table)
*/
void dev_pm_opp_remove_all_dynamic(struct device *dev)
{
- struct opp_table *opp_table;
+ struct opp_table *opp_table __free(put_opp_table);
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
return;
_opp_remove_all(opp_table, true);
-
- /* Drop the reference taken by _find_opp_table() */
- dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
@@ -2049,17 +1977,15 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
struct list_head *head;
int ret;
- mutex_lock(&opp_table->lock);
- head = &opp_table->opp_list;
+ scoped_guard(mutex, &opp_table->lock) {
+ head = &opp_table->opp_list;
- ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
- if (ret) {
- mutex_unlock(&opp_table->lock);
- return ret;
- }
+ ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
+ if (ret)
+ return ret;
- list_add(&new_opp->node, head);
- mutex_unlock(&opp_table->lock);
+ list_add(&new_opp->node, head);
+ }
new_opp->opp_table = opp_table;
kref_init(&new_opp->kref);
@@ -2161,8 +2087,8 @@ static int _opp_set_supported_hw(struct opp_table *opp_table,
if (opp_table->supported_hw)
return 0;
- opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
- GFP_KERNEL);
+ opp_table->supported_hw = kmemdup_array(versions, count,
+ sizeof(*versions), GFP_KERNEL);
if (!opp_table->supported_hw)
return -ENOMEM;
@@ -2706,18 +2632,16 @@ struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table,
return ERR_PTR(-EBUSY);
for (i = 0; i < src_table->required_opp_count; i++) {
- if (src_table->required_opp_tables[i] == dst_table) {
- mutex_lock(&src_table->lock);
+ if (src_table->required_opp_tables[i] != dst_table)
+ continue;
+ scoped_guard(mutex, &src_table->lock) {
list_for_each_entry(opp, &src_table->opp_list, node) {
if (opp == src_opp) {
- dest_opp = opp->required_opps[i];
- dev_pm_opp_get(dest_opp);
+ dest_opp = dev_pm_opp_get(opp->required_opps[i]);
break;
}
}
-
- mutex_unlock(&src_table->lock);
break;
}
}
@@ -2749,7 +2673,6 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
unsigned int pstate)
{
struct dev_pm_opp *opp;
- int dest_pstate = -EINVAL;
int i;
/*
@@ -2783,22 +2706,17 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
return -EINVAL;
}
- mutex_lock(&src_table->lock);
+ guard(mutex)(&src_table->lock);
list_for_each_entry(opp, &src_table->opp_list, node) {
- if (opp->level == pstate) {
- dest_pstate = opp->required_opps[i]->level;
- goto unlock;
- }
+ if (opp->level == pstate)
+ return opp->required_opps[i]->level;
}
pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table,
dst_table);
-unlock:
- mutex_unlock(&src_table->lock);
-
- return dest_pstate;
+ return -EINVAL;
}
/**
@@ -2853,46 +2771,38 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add_dynamic);
static int _opp_set_availability(struct device *dev, unsigned long freq,
bool availability_req)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
- int r = 0;
+ struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp;
+ struct opp_table *opp_table __free(put_opp_table);
/* Find the opp_table */
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
- r = PTR_ERR(opp_table);
- dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
- return r;
+ dev_warn(dev, "%s: Device OPP not found (%ld)\n", __func__,
+ PTR_ERR(opp_table));
+ return PTR_ERR(opp_table);
}
- if (!assert_single_clk(opp_table, 0)) {
- r = -EINVAL;
- goto put_table;
- }
+ if (!assert_single_clk(opp_table, 0))
+ return -EINVAL;
- mutex_lock(&opp_table->lock);
+ scoped_guard(mutex, &opp_table->lock) {
+ /* Do we have the frequency? */
+ list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
+ if (tmp_opp->rates[0] == freq) {
+ opp = dev_pm_opp_get(tmp_opp);
- /* Do we have the frequency? */
- list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
- if (tmp_opp->rates[0] == freq) {
- opp = tmp_opp;
- break;
- }
- }
+ /* Is update really needed? */
+ if (opp->available == availability_req)
+ return 0;
- if (IS_ERR(opp)) {
- r = PTR_ERR(opp);
- goto unlock;
+ opp->available = availability_req;
+ break;
+ }
+ }
}
- /* Is update really needed? */
- if (opp->available == availability_req)
- goto unlock;
-
- opp->available = availability_req;
-
- dev_pm_opp_get(opp);
- mutex_unlock(&opp_table->lock);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
/* Notify the change of the OPP availability */
if (availability_req)
@@ -2902,14 +2812,7 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
blocking_notifier_call_chain(&opp_table->head,
OPP_EVENT_DISABLE, opp);
- dev_pm_opp_put(opp);
- goto put_table;
-
-unlock:
- mutex_unlock(&opp_table->lock);
-put_table:
- dev_pm_opp_put_opp_table(opp_table);
- return r;
+ return 0;
}
/**
@@ -2929,9 +2832,9 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
unsigned long u_volt_max)
{
- struct opp_table *opp_table;
- struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
- int r = 0;
+ struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp;
+ struct opp_table *opp_table __free(put_opp_table);
+ int r;
/* Find the opp_table */
opp_table = _find_opp_table(dev);
@@ -2941,49 +2844,36 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
return r;
}
- if (!assert_single_clk(opp_table, 0)) {
- r = -EINVAL;
- goto put_table;
- }
+ if (!assert_single_clk(opp_table, 0))
+ return -EINVAL;
- mutex_lock(&opp_table->lock);
+ scoped_guard(mutex, &opp_table->lock) {
+ /* Do we have the frequency? */
+ list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
+ if (tmp_opp->rates[0] == freq) {
+ opp = dev_pm_opp_get(tmp_opp);
- /* Do we have the frequency? */
- list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
- if (tmp_opp->rates[0] == freq) {
- opp = tmp_opp;
- break;
- }
- }
+ /* Is update really needed? */
+ if (opp->supplies->u_volt == u_volt)
+ return 0;
- if (IS_ERR(opp)) {
- r = PTR_ERR(opp);
- goto adjust_unlock;
- }
-
- /* Is update really needed? */
- if (opp->supplies->u_volt == u_volt)
- goto adjust_unlock;
+ opp->supplies->u_volt = u_volt;
+ opp->supplies->u_volt_min = u_volt_min;
+ opp->supplies->u_volt_max = u_volt_max;
- opp->supplies->u_volt = u_volt;
- opp->supplies->u_volt_min = u_volt_min;
- opp->supplies->u_volt_max = u_volt_max;
+ break;
+ }
+ }
+ }
- dev_pm_opp_get(opp);
- mutex_unlock(&opp_table->lock);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
/* Notify the voltage change of the OPP */
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE,
opp);
- dev_pm_opp_put(opp);
- goto put_table;
-
-adjust_unlock:
- mutex_unlock(&opp_table->lock);
-put_table:
- dev_pm_opp_put_opp_table(opp_table);
- return r;
+ return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage);
@@ -2997,9 +2887,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage);
*/
int dev_pm_opp_sync_regulators(struct device *dev)
{
- struct opp_table *opp_table;
+ struct opp_table *opp_table __free(put_opp_table);
struct regulator *reg;
- int i, ret = 0;
+ int ret, i;
/* Device may not have OPP table */
opp_table = _find_opp_table(dev);
@@ -3008,23 +2898,20 @@ int dev_pm_opp_sync_regulators(struct device *dev)
/* Regulator may not be required for the device */
if (unlikely(!opp_table->regulators))
- goto put_table;
+ return 0;
/* Nothing to sync if voltage wasn't changed */
if (!opp_table->enabled)
- goto put_table;
+ return 0;
for (i = 0; i < opp_table->regulator_count; i++) {
reg = opp_table->regulators[i];
ret = regulator_sync_voltage(reg);
if (ret)
- break;
+ return ret;
}
-put_table:
- /* Drop reference taken by _find_opp_table() */
- dev_pm_opp_put_opp_table(opp_table);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators);
@@ -3076,18 +2963,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
*/
int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
{
- struct opp_table *opp_table;
- int ret;
+ struct opp_table *opp_table __free(put_opp_table);
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
- ret = blocking_notifier_chain_register(&opp_table->head, nb);
-
- dev_pm_opp_put_opp_table(opp_table);
-
- return ret;
+ return blocking_notifier_chain_register(&opp_table->head, nb);
}
EXPORT_SYMBOL(dev_pm_opp_register_notifier);
@@ -3101,18 +2983,13 @@ EXPORT_SYMBOL(dev_pm_opp_register_notifier);
int dev_pm_opp_unregister_notifier(struct device *dev,
struct notifier_block *nb)
{
- struct opp_table *opp_table;
- int ret;
+ struct opp_table *opp_table __free(put_opp_table);
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
- ret = blocking_notifier_chain_unregister(&opp_table->head, nb);
-
- dev_pm_opp_put_opp_table(opp_table);
-
- return ret;
+ return blocking_notifier_chain_unregister(&opp_table->head, nb);
}
EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
@@ -3125,7 +3002,7 @@ EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
*/
void dev_pm_opp_remove_table(struct device *dev)
{
- struct opp_table *opp_table;
+ struct opp_table *opp_table __free(put_opp_table);
/* Check for existing table for 'dev' */
opp_table = _find_opp_table(dev);
@@ -3146,8 +3023,5 @@ void dev_pm_opp_remove_table(struct device *dev)
**/
if (_opp_remove_all_static(opp_table))
dev_pm_opp_put_opp_table(opp_table);
-
- /* Drop reference taken by _find_opp_table() */
- dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c
index 12c429b407ca..97989d4fe336 100644
--- a/drivers/opp/cpu.c
+++ b/drivers/opp/cpu.c
@@ -43,7 +43,6 @@
int dev_pm_opp_init_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **opp_table)
{
- struct dev_pm_opp *opp;
struct cpufreq_frequency_table *freq_table = NULL;
int i, max_opps, ret = 0;
unsigned long rate;
@@ -57,6 +56,8 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
return -ENOMEM;
for (i = 0, rate = 0; i < max_opps; i++, rate++) {
+ struct dev_pm_opp *opp __free(put_opp);
+
/* find next rate */
opp = dev_pm_opp_find_freq_ceil(dev, &rate);
if (IS_ERR(opp)) {
@@ -69,8 +70,6 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
/* Is Boost/turbo opp ? */
if (dev_pm_opp_is_turbo(opp))
freq_table[i].flags = CPUFREQ_BOOST_FREQ;
-
- dev_pm_opp_put(opp);
}
freq_table[i].driver_data = i;
@@ -155,10 +154,10 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
const struct cpumask *cpumask)
{
+ struct opp_table *opp_table __free(put_opp_table);
struct opp_device *opp_dev;
- struct opp_table *opp_table;
struct device *dev;
- int cpu, ret = 0;
+ int cpu;
opp_table = _find_opp_table(cpu_dev);
if (IS_ERR(opp_table))
@@ -186,9 +185,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
}
- dev_pm_opp_put_opp_table(opp_table);
-
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
@@ -204,33 +201,26 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
*/
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
+ struct opp_table *opp_table __free(put_opp_table);
struct opp_device *opp_dev;
- struct opp_table *opp_table;
- int ret = 0;
opp_table = _find_opp_table(cpu_dev);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
- if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) {
- ret = -EINVAL;
- goto put_opp_table;
- }
+ if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN)
+ return -EINVAL;
cpumask_clear(cpumask);
if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
- mutex_lock(&opp_table->lock);
+ guard(mutex)(&opp_table->lock);
list_for_each_entry(opp_dev, &opp_table->dev_list, node)
cpumask_set_cpu(opp_dev->dev->id, cpumask);
- mutex_unlock(&opp_table->lock);
} else {
cpumask_set_cpu(cpu_dev->id, cpumask);
}
-put_opp_table:
- dev_pm_opp_put_opp_table(opp_table);
-
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index a24f76f5fd01..505d79821584 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -45,7 +45,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
struct opp_table *_managed_opp(struct device *dev, int index)
{
struct opp_table *opp_table, *managed_table = NULL;
- struct device_node *np;
+ struct device_node *np __free(device_node);
np = _opp_of_get_opp_desc_node(dev->of_node, index);
if (!np)
@@ -60,17 +60,13 @@ struct opp_table *_managed_opp(struct device *dev, int index)
* But the OPPs will be considered as shared only if the
* OPP table contains a "opp-shared" property.
*/
- if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
- _get_opp_table_kref(opp_table);
- managed_table = opp_table;
- }
+ if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED)
+ managed_table = dev_pm_opp_get_opp_table_ref(opp_table);
break;
}
}
- of_node_put(np);
-
return managed_table;
}
@@ -80,18 +76,13 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
{
struct dev_pm_opp *opp;
- mutex_lock(&opp_table->lock);
+ guard(mutex)(&opp_table->lock);
list_for_each_entry(opp, &opp_table->opp_list, node) {
- if (opp->np == opp_np) {
- dev_pm_opp_get(opp);
- mutex_unlock(&opp_table->lock);
- return opp;
- }
+ if (opp->np == opp_np)
+ return dev_pm_opp_get(opp);
}
- mutex_unlock(&opp_table->lock);
-
return NULL;
}
@@ -104,27 +95,20 @@ static struct device_node *of_parse_required_opp(struct device_node *np,
/* The caller must call dev_pm_opp_put_opp_table() after the table is used */
static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np)
{
+ struct device_node *opp_table_np __free(device_node);
struct opp_table *opp_table;
- struct device_node *opp_table_np;
opp_table_np = of_get_parent(opp_np);
if (!opp_table_np)
- goto err;
+ return ERR_PTR(-ENODEV);
- /* It is safe to put the node now as all we need now is its address */
- of_node_put(opp_table_np);
+ guard(mutex)(&opp_table_lock);
- mutex_lock(&opp_table_lock);
list_for_each_entry(opp_table, &opp_tables, node) {
- if (opp_table_np == opp_table->np) {
- _get_opp_table_kref(opp_table);
- mutex_unlock(&opp_table_lock);
- return opp_table;
- }
+ if (opp_table_np == opp_table->np)
+ return dev_pm_opp_get_opp_table_ref(opp_table);
}
- mutex_unlock(&opp_table_lock);
-err:
return ERR_PTR(-ENODEV);
}
@@ -149,9 +133,8 @@ static void _opp_table_free_required_tables(struct opp_table *opp_table)
opp_table->required_opp_count = 0;
opp_table->required_opp_tables = NULL;
- mutex_lock(&opp_table_lock);
+ guard(mutex)(&opp_table_lock);
list_del(&opp_table->lazy);
- mutex_unlock(&opp_table_lock);
}
/*
@@ -163,7 +146,7 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
struct device_node *opp_np)
{
struct opp_table **required_opp_tables;
- struct device_node *required_np, *np;
+ struct device_node *np __free(device_node);
bool lazy = false;
int count, i, size;
@@ -171,30 +154,32 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
np = of_get_next_available_child(opp_np, NULL);
if (!np) {
dev_warn(dev, "Empty OPP table\n");
-
return;
}
count = of_count_phandle_with_args(np, "required-opps", NULL);
if (count <= 0)
- goto put_np;
+ return;
size = sizeof(*required_opp_tables) + sizeof(*opp_table->required_devs);
required_opp_tables = kcalloc(count, size, GFP_KERNEL);
if (!required_opp_tables)
- goto put_np;
+ return;
opp_table->required_opp_tables = required_opp_tables;
opp_table->required_devs = (void *)(required_opp_tables + count);
opp_table->required_opp_count = count;
for (i = 0; i < count; i++) {
+ struct device_node *required_np __free(device_node);
+
required_np = of_parse_required_opp(np, i);
- if (!required_np)
- goto free_required_tables;
+ if (!required_np) {
+ _opp_table_free_required_tables(opp_table);
+ return;
+ }
required_opp_tables[i] = _find_table_of_opp_np(required_np);
- of_node_put(required_np);
if (IS_ERR(required_opp_tables[i]))
lazy = true;
@@ -206,23 +191,15 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
* The OPP table is not held while allocating the table, take it
* now to avoid corruption to the lazy_opp_tables list.
*/
- mutex_lock(&opp_table_lock);
+ guard(mutex)(&opp_table_lock);
list_add(&opp_table->lazy, &lazy_opp_tables);
- mutex_unlock(&opp_table_lock);
}
-
- goto put_np;
-
-free_required_tables:
- _opp_table_free_required_tables(opp_table);
-put_np:
- of_node_put(np);
}
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
int index)
{
- struct device_node *np, *opp_np;
+ struct device_node *np __free(device_node), *opp_np;
u32 val;
/*
@@ -243,8 +220,6 @@ void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
/* Get OPP table node */
opp_np = _opp_of_get_opp_desc_node(np, index);
- of_node_put(np);
-
if (!opp_np)
return;
@@ -298,15 +273,13 @@ void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp)
static int _link_required_opps(struct dev_pm_opp *opp,
struct opp_table *required_table, int index)
{
- struct device_node *np;
+ struct device_node *np __free(device_node);
np = of_parse_required_opp(opp->np, index);
if (unlikely(!np))
return -ENODEV;
opp->required_opps[index] = _find_opp_of_np(required_table, np);
- of_node_put(np);
-
if (!opp->required_opps[index]) {
pr_err("%s: Unable to find required OPP node: %pOF (%d)\n",
__func__, opp->np, index);
@@ -370,19 +343,22 @@ static int lazy_link_required_opps(struct opp_table *opp_table,
static void lazy_link_required_opp_table(struct opp_table *new_table)
{
struct opp_table *opp_table, *temp, **required_opp_tables;
- struct device_node *required_np, *opp_np, *required_table_np;
struct dev_pm_opp *opp;
int i, ret;
- mutex_lock(&opp_table_lock);
+ guard(mutex)(&opp_table_lock);
list_for_each_entry_safe(opp_table, temp, &lazy_opp_tables, lazy) {
+ struct device_node *opp_np __free(device_node);
bool lazy = false;
/* opp_np can't be invalid here */
opp_np = of_get_next_available_child(opp_table->np, NULL);
for (i = 0; i < opp_table->required_opp_count; i++) {
+ struct device_node *required_np __free(device_node) = NULL;
+ struct device_node *required_table_np __free(device_node) = NULL;
+
required_opp_tables = opp_table->required_opp_tables;
/* Required opp-table is already parsed */
@@ -393,9 +369,6 @@ static void lazy_link_required_opp_table(struct opp_table *new_table)
required_np = of_parse_required_opp(opp_np, i);
required_table_np = of_get_parent(required_np);
- of_node_put(required_table_np);
- of_node_put(required_np);
-
/*
* Newly added table isn't the required opp-table for
* opp_table.
@@ -405,8 +378,7 @@ static void lazy_link_required_opp_table(struct opp_table *new_table)
continue;
}
- required_opp_tables[i] = new_table;
- _get_opp_table_kref(new_table);
+ required_opp_tables[i] = dev_pm_opp_get_opp_table_ref(new_table);
/* Link OPPs now */
ret = lazy_link_required_opps(opp_table, new_table, i);
@@ -417,8 +389,6 @@ static void lazy_link_required_opp_table(struct opp_table *new_table)
}
}
- of_node_put(opp_np);
-
/* All required opp-tables found, remove from lazy list */
if (!lazy) {
list_del_init(&opp_table->lazy);
@@ -427,22 +397,22 @@ static void lazy_link_required_opp_table(struct opp_table *new_table)
_required_opps_available(opp, opp_table->required_opp_count);
}
}
-
- mutex_unlock(&opp_table_lock);
}
static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
{
- struct device_node *np, *opp_np;
+ struct device_node *opp_np __free(device_node) = NULL;
+ struct device_node *np __free(device_node) = NULL;
struct property *prop;
if (!opp_table) {
+ struct device_node *np __free(device_node);
+
np = of_node_get(dev->of_node);
if (!np)
return -ENODEV;
opp_np = _opp_of_get_opp_desc_node(np, 0);
- of_node_put(np);
} else {
opp_np = of_node_get(opp_table->np);
}
@@ -453,15 +423,12 @@ static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
/* Checking only first OPP is sufficient */
np = of_get_next_available_child(opp_np, NULL);
- of_node_put(opp_np);
if (!np) {
dev_err(dev, "OPP table empty\n");
return -EINVAL;
}
prop = of_find_property(np, "opp-peak-kBps", NULL);
- of_node_put(np);
-
if (!prop || !prop->length)
return 0;
@@ -471,7 +438,7 @@ static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
int dev_pm_opp_of_find_icc_paths(struct device *dev,
struct opp_table *opp_table)
{
- struct device_node *np;
+ struct device_node *np __free(device_node) = of_node_get(dev->of_node);
int ret, i, count, num_paths;
struct icc_path **paths;
@@ -481,15 +448,13 @@ int dev_pm_opp_of_find_icc_paths(struct device *dev,
else if (ret <= 0)
return ret;
- ret = 0;
-
- np = of_node_get(dev->of_node);
if (!np)
return 0;
+ ret = 0;
+
count = of_count_phandle_with_args(np, "interconnects",
"#interconnect-cells");
- of_node_put(np);
if (count < 0)
return 0;
@@ -992,15 +957,14 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
struct dev_pm_opp *opp;
/* OPP table is already initialized for the device */
- mutex_lock(&opp_table->lock);
- if (opp_table->parsed_static_opps) {
- opp_table->parsed_static_opps++;
- mutex_unlock(&opp_table->lock);
- return 0;
- }
+ scoped_guard(mutex, &opp_table->lock) {
+ if (opp_table->parsed_static_opps) {
+ opp_table->parsed_static_opps++;
+ return 0;
+ }
- opp_table->parsed_static_opps = 1;
- mutex_unlock(&opp_table->lock);
+ opp_table->parsed_static_opps = 1;
+ }
/* We have opp-table node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_table->np, np) {
@@ -1040,15 +1004,14 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
const __be32 *val;
int nr, ret = 0;
- mutex_lock(&opp_table->lock);
- if (opp_table->parsed_static_opps) {
- opp_table->parsed_static_opps++;
- mutex_unlock(&opp_table->lock);
- return 0;
- }
+ scoped_guard(mutex, &opp_table->lock) {
+ if (opp_table->parsed_static_opps) {
+ opp_table->parsed_static_opps++;
+ return 0;
+ }
- opp_table->parsed_static_opps = 1;
- mutex_unlock(&opp_table->lock);
+ opp_table->parsed_static_opps = 1;
+ }
prop = of_find_property(dev->of_node, "operating-points", NULL);
if (!prop) {
@@ -1306,8 +1269,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
struct cpumask *cpumask)
{
- struct device_node *np, *tmp_np, *cpu_np;
- int cpu, ret = 0;
+ struct device_node *np __free(device_node);
+ int cpu;
/* Get OPP descriptor node */
np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
@@ -1320,9 +1283,12 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
/* OPPs are shared ? */
if (!of_property_read_bool(np, "opp-shared"))
- goto put_cpu_node;
+ return 0;
for_each_possible_cpu(cpu) {
+ struct device_node *cpu_np __free(device_node) = NULL;
+ struct device_node *tmp_np __free(device_node) = NULL;
+
if (cpu == cpu_dev->id)
continue;
@@ -1330,29 +1296,22 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
if (!cpu_np) {
dev_err(cpu_dev, "%s: failed to get cpu%d node\n",
__func__, cpu);
- ret = -ENOENT;
- goto put_cpu_node;
+ return -ENOENT;
}
/* Get OPP descriptor node */
tmp_np = _opp_of_get_opp_desc_node(cpu_np, 0);
- of_node_put(cpu_np);
if (!tmp_np) {
pr_err("%pOF: Couldn't find opp node\n", cpu_np);
- ret = -ENOENT;
- goto put_cpu_node;
+ return -ENOENT;
}
/* CPUs are sharing opp node */
if (np == tmp_np)
cpumask_set_cpu(cpu, cpumask);
-
- of_node_put(tmp_np);
}
-put_cpu_node:
- of_node_put(np);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
@@ -1369,9 +1328,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
*/
int of_get_required_opp_performance_state(struct device_node *np, int index)
{
- struct dev_pm_opp *opp;
- struct device_node *required_np;
- struct opp_table *opp_table;
+ struct device_node *required_np __free(device_node);
+ struct opp_table *opp_table __free(put_opp_table) = NULL;
+ struct dev_pm_opp *opp __free(put_opp) = NULL;
int pstate = -EINVAL;
required_np = of_parse_required_opp(np, index);
@@ -1382,13 +1341,13 @@ int of_get_required_opp_performance_state(struct device_node *np, int index)
if (IS_ERR(opp_table)) {
pr_err("%s: Failed to find required OPP table %pOF: %ld\n",
__func__, np, PTR_ERR(opp_table));
- goto put_required_np;
+ return PTR_ERR(opp_table);
}
/* The OPP tables must belong to a genpd */
if (unlikely(!opp_table->is_genpd)) {
pr_err("%s: Performance state is only valid for genpds.\n", __func__);
- goto put_required_np;
+ return -EINVAL;
}
opp = _find_opp_of_np(opp_table, required_np);
@@ -1399,15 +1358,8 @@ int of_get_required_opp_performance_state(struct device_node *np, int index)
} else {
pstate = opp->level;
}
- dev_pm_opp_put(opp);
-
}
- dev_pm_opp_put_opp_table(opp_table);
-
-put_required_np:
- of_node_put(required_np);
-
return pstate;
}
EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state);
@@ -1424,7 +1376,7 @@ EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state);
*/
bool dev_pm_opp_of_has_required_opp(struct device *dev)
{
- struct device_node *opp_np, *np;
+ struct device_node *np __free(device_node) = NULL, *opp_np __free(device_node);
int count;
opp_np = _opp_of_get_opp_desc_node(dev->of_node, 0);
@@ -1432,14 +1384,12 @@ bool dev_pm_opp_of_has_required_opp(struct device *dev)
return false;
np = of_get_next_available_child(opp_np, NULL);
- of_node_put(opp_np);
if (!np) {
dev_warn(dev, "Empty OPP table\n");
return false;
}
count = of_count_phandle_with_args(np, "required-opps", NULL);
- of_node_put(np);
return count > 0;
}
@@ -1475,7 +1425,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
static int __maybe_unused
_get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz)
{
- struct dev_pm_opp *opp;
+ struct dev_pm_opp *opp __free(put_opp);
unsigned long opp_freq, opp_power;
/* Find the right frequency and related OPP */
@@ -1485,7 +1435,6 @@ _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz)
return -EINVAL;
opp_power = dev_pm_opp_get_power(opp);
- dev_pm_opp_put(opp);
if (!opp_power)
return -EINVAL;
@@ -1516,8 +1465,8 @@ _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz)
int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW,
unsigned long *kHz)
{
- struct dev_pm_opp *opp;
- struct device_node *np;
+ struct dev_pm_opp *opp __free(put_opp) = NULL;
+ struct device_node *np __free(device_node);
unsigned long mV, Hz;
u32 cap;
u64 tmp;
@@ -1528,7 +1477,6 @@ int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW,
return -EINVAL;
ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
- of_node_put(np);
if (ret)
return -EINVAL;
@@ -1538,7 +1486,6 @@ int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW,
return -EINVAL;
mV = dev_pm_opp_get_voltage(opp) / 1000;
- dev_pm_opp_put(opp);
if (!mV)
return -EINVAL;
@@ -1555,20 +1502,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_calc_power);
static bool _of_has_opp_microwatt_property(struct device *dev)
{
- unsigned long power, freq = 0;
- struct dev_pm_opp *opp;
+ struct dev_pm_opp *opp __free(put_opp);
+ unsigned long freq = 0;
/* Check if at least one OPP has needed property */
opp = dev_pm_opp_find_freq_ceil(dev, &freq);
if (IS_ERR(opp))
return false;
- power = dev_pm_opp_get_power(opp);
- dev_pm_opp_put(opp);
- if (!power)
- return false;
-
- return true;
+ return !!dev_pm_opp_get_power(opp);
}
/**
@@ -1584,8 +1526,8 @@ static bool _of_has_opp_microwatt_property(struct device *dev)
*/
int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus)
{
+ struct device_node *np __free(device_node) = NULL;
struct em_data_callback em_cb;
- struct device_node *np;
int ret, nr_opp;
u32 cap;
@@ -1620,7 +1562,6 @@ int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus)
* user about the inconsistent configuration.
*/
ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
- of_node_put(np);
if (ret || !cap) {
dev_dbg(dev, "Couldn't find proper 'dynamic-power-coefficient' in DT\n");
ret = -EINVAL;
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 5c7c81190e41..9eba63e01a9e 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -251,7 +251,6 @@ struct opp_table {
/* Routines internal to opp core */
bool _opp_remove_all_static(struct opp_table *opp_table);
-void _get_opp_table_kref(struct opp_table *opp_table);
int _get_opp_count(struct opp_table *opp_table);
struct opp_table *_find_opp_table(struct device *dev);
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
index 474515d27e9c..4035010249cd 100644
--- a/drivers/parport/ieee1284.c
+++ b/drivers/parport/ieee1284.c
@@ -40,7 +40,7 @@ static void parport_ieee1284_wakeup (struct parport *port)
static void timeout_waiting_on_port (struct timer_list *t)
{
- struct parport *port = from_timer(port, t, timer);
+ struct parport *port = timer_container_of(port, t, timer);
parport_ieee1284_wakeup (port);
}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index da28295b4aac..9c0e4aaf4e8c 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -21,6 +21,7 @@ config GENERIC_PCI_IOMAP
menuconfig PCI
bool "PCI support"
depends on HAVE_PCI
+ depends on MMU
help
This option enables support for the PCI local bus, including
support for PCI-X and the foundations for PCI Express support.
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index b6851101ac36..69048869ef1c 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -369,7 +369,9 @@ void pci_bus_add_device(struct pci_dev *dev)
pdev->name);
}
- dev->match_driver = !dn || of_device_is_available(dn);
+ if (!dn || of_device_is_available(dn))
+ pci_dev_allow_binding(dev);
+
retval = device_attach(&dev->dev);
if (retval < 0 && retval != -EPROBE_DEFER)
pci_warn(dev, "device attach failed (%d)\n", retval);
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 9800b7681054..886f6f43a895 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -3,6 +3,10 @@
menu "PCI controller drivers"
depends on PCI
+config PCI_HOST_COMMON
+ tristate
+ select PCI_ECAM
+
config PCI_AARDVARK
tristate "Aardvark PCIe controller"
depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
@@ -40,6 +44,7 @@ config PCIE_APPLE
depends on OF
depends on PCI_MSI
select PCI_HOST_COMMON
+ select IRQ_MSI_LIB
help
Say Y here if you want to enable PCIe controller support on Apple
system-on-chips, like the Apple M1. This is required for the USB
@@ -119,10 +124,6 @@ config PCI_FTPCI100
depends on OF
default ARCH_GEMINI
-config PCI_HOST_COMMON
- tristate
- select PCI_ECAM
-
config PCI_HOST_GENERIC
tristate "Generic PCI host controller"
depends on OF
@@ -227,6 +228,7 @@ config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA || COMPILE_TEST
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say Y here if you want support for the PCIe host controller found
on NVIDIA Tegra SoCs.
@@ -303,6 +305,7 @@ config PCI_XGENE_MSI
bool "X-Gene v1 PCIe MSI feature"
depends on PCI_XGENE
depends on PCI_MSI
+ select IRQ_MSI_LIB
default y
help
Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
index 8a0044bb3989..666e16b6367f 100644
--- a/drivers/pci/controller/cadence/Kconfig
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -4,16 +4,16 @@ menu "Cadence-based PCIe controllers"
depends on PCI
config PCIE_CADENCE
- bool
+ tristate
config PCIE_CADENCE_HOST
- bool
+ tristate
depends on OF
select IRQ_DOMAIN
select PCIE_CADENCE
config PCIE_CADENCE_EP
- bool
+ tristate
depends on OF
depends on PCI_ENDPOINT
select PCIE_CADENCE
@@ -43,13 +43,14 @@ config PCIE_CADENCE_PLAT_EP
different vendors SoCs.
config PCI_J721E
- bool
+ tristate
+ select PCIE_CADENCE_HOST if PCI_J721E_HOST != n
+ select PCIE_CADENCE_EP if PCI_J721E_EP != n
config PCI_J721E_HOST
- bool "TI J721E PCIe controller (host mode)"
+ tristate "TI J721E PCIe controller (host mode)"
depends on ARCH_K3 || COMPILE_TEST
depends on OF
- select PCIE_CADENCE_HOST
select PCI_J721E
help
Say Y here if you want to support the TI J721E PCIe platform
@@ -57,11 +58,10 @@ config PCI_J721E_HOST
core.
config PCI_J721E_EP
- bool "TI J721E PCIe controller (endpoint mode)"
+ tristate "TI J721E PCIe controller (endpoint mode)"
depends on ARCH_K3 || COMPILE_TEST
depends on OF
depends on PCI_ENDPOINT
- select PCIE_CADENCE_EP
select PCI_J721E
help
Say Y here if you want to support the TI J721E PCIe platform
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index ef1cfdae33bb..6c93f39d0288 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -15,6 +15,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -27,6 +28,7 @@
#define cdns_pcie_to_rc(p) container_of(p, struct cdns_pcie_rc, pcie)
#define ENABLE_REG_SYS_2 0x108
+#define ENABLE_CLR_REG_SYS_2 0x308
#define STATUS_REG_SYS_2 0x508
#define STATUS_CLR_REG_SYS_2 0x708
#define LINK_DOWN BIT(1)
@@ -116,6 +118,15 @@ static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
return IRQ_HANDLED;
}
+static void j721e_pcie_disable_link_irq(struct j721e_pcie *pcie)
+{
+ u32 reg;
+
+ reg = j721e_pcie_intd_readl(pcie, ENABLE_CLR_REG_SYS_2);
+ reg |= pcie->linkdown_irq_regfield;
+ j721e_pcie_intd_writel(pcie, ENABLE_CLR_REG_SYS_2, reg);
+}
+
static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie)
{
u32 reg;
@@ -153,11 +164,7 @@ static bool j721e_pcie_link_up(struct cdns_pcie *cdns_pcie)
u32 reg;
reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_LINKSTATUS);
- reg &= LINK_STATUS;
- if (reg == LINK_UP_DL_COMPLETED)
- return true;
-
- return false;
+ return (reg & LINK_STATUS) == LINK_UP_DL_COMPLETED;
}
static const struct cdns_pcie_ops j721e_pcie_ops = {
@@ -464,7 +471,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
switch (mode) {
case PCI_MODE_RC:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST))
+ if (!IS_ENABLED(CONFIG_PCI_J721E_HOST))
return -ENODEV;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
@@ -483,7 +490,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pcie->cdns_pcie = cdns_pcie;
break;
case PCI_MODE_EP:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP))
+ if (!IS_ENABLED(CONFIG_PCI_J721E_EP))
return -ENODEV;
ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
@@ -633,9 +640,22 @@ static void j721e_pcie_remove(struct platform_device *pdev)
struct j721e_pcie *pcie = platform_get_drvdata(pdev);
struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
struct device *dev = &pdev->dev;
+ struct cdns_pcie_ep *ep;
+ struct cdns_pcie_rc *rc;
+
+ if (pcie->mode == PCI_MODE_RC) {
+ rc = container_of(cdns_pcie, struct cdns_pcie_rc, pcie);
+ cdns_pcie_host_disable(rc);
+ } else {
+ ep = container_of(cdns_pcie, struct cdns_pcie_ep, pcie);
+ cdns_pcie_ep_disable(ep);
+ }
+
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
clk_disable_unprepare(pcie->refclk);
cdns_pcie_disable_phy(cdns_pcie);
+ j721e_pcie_disable_link_irq(pcie);
pm_runtime_put(dev);
pm_runtime_disable(dev);
}
@@ -730,4 +750,8 @@ static struct platform_driver j721e_pcie_driver = {
.pm = pm_sleep_ptr(&j721e_pcie_pm_ops),
},
};
-builtin_platform_driver(j721e_pcie_driver);
+module_platform_driver(j721e_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCIe controller driver for TI's J721E and related SoCs");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 599ec4b1223e..8ab6cf70c18e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -6,12 +6,14 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci-epc.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include "pcie-cadence.h"
+#include "../../pci.h"
#define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
@@ -220,10 +222,11 @@ static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
clear_bit(r, &ep->ob_region_map);
}
-static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
+static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
+ u8 mmc = order_base_2(nr_irqs);
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags;
@@ -262,7 +265,7 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
*/
mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags);
- return mme;
+ return 1 << mme;
}
static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
@@ -281,12 +284,11 @@ static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
val &= PCI_MSIX_FLAGS_QSIZE;
- return val;
+ return val + 1;
}
static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
- u16 interrupts, enum pci_barno bir,
- u32 offset)
+ u16 nr_irqs, enum pci_barno bir, u32 offset)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
@@ -298,7 +300,7 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
reg = cap + PCI_MSIX_FLAGS;
val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
- val |= interrupts;
+ val |= nr_irqs - 1; /* encoded as N-1 */
cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
/* Set MSI-X BAR and offset */
@@ -308,7 +310,7 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
/* Set PBA BAR and offset. BAR must match MSI-X BAR */
reg = cap + PCI_MSIX_PBA;
- val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir;
cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
return 0;
@@ -337,10 +339,10 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
if (is_asserted) {
ep->irq_pending |= BIT(intx);
- msg_code = MSG_CODE_ASSERT_INTA + intx;
+ msg_code = PCIE_MSG_CODE_ASSERT_INTA + intx;
} else {
ep->irq_pending &= ~BIT(intx);
- msg_code = MSG_CODE_DEASSERT_INTA + intx;
+ msg_code = PCIE_MSG_CODE_DEASSERT_INTA + intx;
}
spin_lock_irqsave(&ep->lock, flags);
@@ -644,6 +646,17 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
.get_features = cdns_pcie_ep_get_features,
};
+void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
+{
+ struct device *dev = ep->pcie.dev;
+ struct pci_epc *epc = to_pci_epc(dev);
+
+ pci_epc_deinit_notify(epc);
+ pci_epc_mem_free_addr(epc, ep->irq_phys_addr, ep->irq_cpu_addr,
+ SZ_128K);
+ pci_epc_mem_exit(epc);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_ep_disable);
int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
@@ -751,3 +764,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_ep_setup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe endpoint controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 8af95e9da7ce..59a4631de79f 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/list_sort.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -72,6 +73,7 @@ void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
return rc->cfg_base + (where & 0xfff);
}
+EXPORT_SYMBOL_GPL(cdns_pci_map_bus);
static struct pci_ops cdns_pcie_host_ops = {
.map_bus = cdns_pci_map_bus,
@@ -150,6 +152,14 @@ static int cdns_pcie_retrain(struct cdns_pcie *pcie)
return ret;
}
+static void cdns_pcie_host_disable_ptm_response(struct cdns_pcie *pcie)
+{
+ u32 val;
+
+ val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val & ~CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
+}
+
static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
{
u32 val;
@@ -175,6 +185,26 @@ static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
return ret;
}
+static void cdns_pcie_host_deinit_root_port(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ u32 value, ctrl;
+
+ cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, 0xffff);
+ cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0xff);
+ cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0xff);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, 0xffffffff);
+ cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, 0xffff);
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
+ value = ~(CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
+ CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
+ CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
+ CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
+ CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
+ CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
+}
+
static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
@@ -391,6 +421,32 @@ static int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a,
return resource_size(entry2->res) - resource_size(entry1->res);
}
+static void cdns_pcie_host_unmap_dma_ranges(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ enum cdns_pcie_rp_bar bar;
+ u32 value;
+
+ /* Reset inbound configuration for all BARs which were being used */
+ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
+ if (rc->avail_ib_bar[bar])
+ continue;
+
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), 0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), 0);
+
+ if (bar == RP_NO_BAR)
+ continue;
+
+ value = ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
+ LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
+ }
+}
+
static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
@@ -428,6 +484,29 @@ static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
return 0;
}
+static void cdns_pcie_host_deinit_address_translation(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
+ struct resource_entry *entry;
+ int r;
+
+ cdns_pcie_host_unmap_dma_ranges(rc);
+
+ /*
+ * Reset outbound region 0 which was reserved for configuration space
+ * accesses.
+ */
+ cdns_pcie_reset_outbound_region(pcie, 0);
+
+ /* Reset rest of the outbound regions */
+ r = 1;
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ cdns_pcie_reset_outbound_region(pcie, r);
+ r++;
+ }
+}
+
static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
@@ -485,6 +564,12 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
return cdns_pcie_host_map_dma_ranges(rc);
}
+static void cdns_pcie_host_deinit(struct cdns_pcie_rc *rc)
+{
+ cdns_pcie_host_deinit_address_translation(rc);
+ cdns_pcie_host_deinit_root_port(rc);
+}
+
int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
{
int err;
@@ -495,6 +580,15 @@ int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
return cdns_pcie_host_init_address_translation(rc);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_init);
+
+static void cdns_pcie_host_link_disable(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+
+ cdns_pcie_stop_link(pcie);
+ cdns_pcie_host_disable_ptm_response(pcie);
+}
int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
{
@@ -519,6 +613,20 @@ int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
return 0;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_link_setup);
+
+void cdns_pcie_host_disable(struct cdns_pcie_rc *rc)
+{
+ struct pci_host_bridge *bridge;
+
+ bridge = pci_host_bridge_from_priv(rc);
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+
+ cdns_pcie_host_deinit(rc);
+ cdns_pcie_host_link_disable(rc);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_disable);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
@@ -570,14 +678,10 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
if (!bridge->ops)
bridge->ops = &cdns_pcie_host_ops;
- ret = pci_host_probe(bridge);
- if (ret < 0)
- goto err_init;
-
- return 0;
-
- err_init:
- pm_runtime_put_sync(dev);
-
- return ret;
+ return pci_host_probe(bridge);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_setup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe host controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 204e045aed8c..70a19573440e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -4,6 +4,7 @@
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include "pcie-cadence.h"
@@ -23,6 +24,7 @@ void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_detect_quiet_min_delay_set);
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
u32 r, bool is_io,
@@ -100,6 +102,7 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region);
void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
u8 busnr, u8 fn,
@@ -134,6 +137,7 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region_for_normal_msg);
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
{
@@ -146,6 +150,7 @@ void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_reset_outbound_region);
void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
{
@@ -156,6 +161,7 @@ void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
phy_exit(pcie->phy[i]);
}
}
+EXPORT_SYMBOL_GPL(cdns_pcie_disable_phy);
int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
{
@@ -184,6 +190,7 @@ err_phy:
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_enable_phy);
int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
{
@@ -243,6 +250,7 @@ err_phy:
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_init_phy);
static int cdns_pcie_suspend_noirq(struct device *dev)
{
@@ -271,3 +279,7 @@ const struct dev_pm_ops cdns_pcie_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
cdns_pcie_resume_noirq)
};
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 39ee9945c903..a149845d341a 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -250,17 +250,6 @@ struct cdns_pcie_rp_ib_bar {
struct cdns_pcie;
-enum cdns_pcie_msg_code {
- MSG_CODE_ASSERT_INTA = 0x20,
- MSG_CODE_ASSERT_INTB = 0x21,
- MSG_CODE_ASSERT_INTC = 0x22,
- MSG_CODE_ASSERT_INTD = 0x23,
- MSG_CODE_DEASSERT_INTA = 0x24,
- MSG_CODE_DEASSERT_INTB = 0x25,
- MSG_CODE_DEASSERT_INTC = 0x26,
- MSG_CODE_DEASSERT_INTD = 0x27,
-};
-
enum cdns_pcie_msg_routing {
/* Route to Root Complex */
MSG_ROUTING_TO_RC,
@@ -519,10 +508,11 @@ static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
return true;
}
-#ifdef CONFIG_PCIE_CADENCE_HOST
+#if IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)
int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc);
int cdns_pcie_host_init(struct cdns_pcie_rc *rc);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
+void cdns_pcie_host_disable(struct cdns_pcie_rc *rc);
void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
@@ -541,6 +531,10 @@ static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
return 0;
}
+static inline void cdns_pcie_host_disable(struct cdns_pcie_rc *rc)
+{
+}
+
static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
@@ -548,13 +542,18 @@ static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int d
}
#endif
-#ifdef CONFIG_PCIE_CADENCE_EP
+#if IS_ENABLED(CONFIG_PCIE_CADENCE_EP)
int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
+void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep);
#else
static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
return 0;
}
+
+static inline void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
+{
+}
#endif
void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 33d6bf460ffe..f97f5266d196 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -118,12 +118,12 @@ static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
return cpu_addr & DRA7XX_CPU_TO_BUS_ADDR;
}
-static int dra7xx_pcie_link_up(struct dw_pcie *pci)
+static bool dra7xx_pcie_link_up(struct dw_pcie *pci)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
- return !!(reg & LINK_UP);
+ return reg & LINK_UP;
}
static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
@@ -359,8 +359,8 @@ static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler,
pp);
- dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, pp);
+ dra7xx->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX, &intx_domain_ops, pp);
of_node_put(pcie_intc_node);
if (!dra7xx->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index ace736b025b1..1f0e98d07109 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -209,12 +209,12 @@ static struct pci_ops exynos_pci_ops = {
.write = exynos_pcie_wr_own_conf,
};
-static int exynos_pcie_link_up(struct dw_pcie *pci)
+static bool exynos_pcie_link_up(struct dw_pcie *pci)
{
struct exynos_pcie *ep = to_exynos_pcie(pci);
u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_RDLH_LINKUP);
- return (val & PCIE_ELBI_XMLH_LINKUP);
+ return val & PCIE_ELBI_XMLH_LINKUP;
}
static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 5f267dd261b5..5a38cfaf989b 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -45,9 +45,14 @@
#define IMX95_PCIE_PHY_GEN_CTRL 0x0
#define IMX95_PCIE_REF_USE_PAD BIT(17)
+#define IMX95_PCIE_PHY_MPLLA_CTRL 0x10
+#define IMX95_PCIE_PHY_MPLL_STATE BIT(30)
+
#define IMX95_PCIE_SS_RW_REG_0 0xf0
#define IMX95_PCIE_REF_CLKEN BIT(23)
#define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9)
+#define IMX95_PCIE_SS_RW_REG_1 0xf4
+#define IMX95_PCIE_SYS_AUX_PWR_DET BIT(31)
#define IMX95_PE0_GEN_CTRL_1 0x1050
#define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0)
@@ -71,6 +76,9 @@
#define IMX95_SID_MASK GENMASK(5, 0)
#define IMX95_MAX_LUT 32
+#define IMX95_PCIE_RST_CTRL 0x3010
+#define IMX95_PCIE_COLD_RST BIT(0)
+
#define to_imx_pcie(x) dev_get_drvdata((x)->dev)
enum imx_pcie_variants {
@@ -91,7 +99,7 @@ enum imx_pcie_variants {
};
#define IMX_PCIE_FLAG_IMX_PHY BIT(0)
-#define IMX_PCIE_FLAG_IMX_SPEED_CHANGE BIT(1)
+#define IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND BIT(1)
#define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
#define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3)
#define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4)
@@ -105,6 +113,7 @@ enum imx_pcie_variants {
*/
#define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9)
#define IMX_PCIE_FLAG_HAS_LUT BIT(10)
+#define IMX_PCIE_FLAG_8GT_ECN_ERR051586 BIT(11)
#define imx_check_flag(pci, val) (pci->drvdata->flags & val)
@@ -126,9 +135,15 @@ struct imx_pcie_drvdata {
int (*init_phy)(struct imx_pcie *pcie);
int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable);
int (*core_reset)(struct imx_pcie *pcie, bool assert);
+ int (*wait_pll_lock)(struct imx_pcie *pcie);
const struct dw_pcie_host_ops *ops;
};
+struct imx_lut_data {
+ u32 data1;
+ u32 data2;
+};
+
struct imx_pcie {
struct dw_pcie *pci;
struct gpio_desc *reset_gpiod;
@@ -148,6 +163,8 @@ struct imx_pcie {
struct regulator *vph;
void __iomem *phy_base;
+ /* LUT data for pcie */
+ struct imx_lut_data luts[IMX95_MAX_LUT];
/* power domain for pcie */
struct device *pd_pcie;
/* power domain for pcie phy */
@@ -224,6 +241,19 @@ static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie)
static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie)
{
+ /*
+ * ERR051624: The Controller Without Vaux Cannot Exit L23 Ready
+ * Through Beacon or PERST# De-assertion
+ *
+ * When the auxiliary power is not available, the controller
+ * cannot exit from L23 Ready with beacon or PERST# de-assertion
+ * when main power is not removed.
+ *
+ * Workaround: Set SS_RW_REG_1[SYS_AUX_PWR_DET] to 1.
+ */
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_1,
+ IMX95_PCIE_SYS_AUX_PWR_DET);
+
regmap_update_bits(imx_pcie->iomuxc_gpr,
IMX95_PCIE_SS_RW_REG_0,
IMX95_PCIE_PHY_CR_PARA_SEL,
@@ -460,6 +490,23 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie)
dev_err(dev, "PCIe PLL lock timeout\n");
}
+static int imx95_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie)
+{
+ u32 val;
+ struct device *dev = imx_pcie->pci->dev;
+
+ if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr,
+ IMX95_PCIE_PHY_MPLLA_CTRL, val,
+ val & IMX95_PCIE_PHY_MPLL_STATE,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+ PHY_PLL_LOCK_WAIT_TIMEOUT)) {
+ dev_err(dev, "PCIe PLL lock timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie)
{
unsigned long phy_rate = 0;
@@ -773,6 +820,43 @@ static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
return 0;
}
+static int imx95_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ u32 val;
+
+ if (assert) {
+ /*
+ * From i.MX95 PCIe PHY perspective, the COLD reset toggle
+ * should be complete after power-up by the following sequence.
+ * > 10us(at power-up)
+ * > 10ns(warm reset)
+ * |<------------>|
+ * ______________
+ * phy_reset ____/ \________________
+ * ____________
+ * ref_clk_en_______________________/
+ * Toggle COLD reset aligned with this sequence for i.MX95 PCIe.
+ */
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ IMX95_PCIE_COLD_RST);
+ /*
+ * Make sure the write to IMX95_PCIE_RST_CTRL is flushed to the
+ * hardware by doing a read. Otherwise, there is no guarantee
+ * that the write has reached the hardware before udelay().
+ */
+ regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ &val);
+ udelay(15);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ IMX95_PCIE_COLD_RST);
+ regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ &val);
+ udelay(10);
+ }
+
+ return 0;
+}
+
static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
{
reset_control_assert(imx_pcie->pciephy_reset);
@@ -860,6 +944,12 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
u32 tmp;
int ret;
+ if (!(imx_pcie->drvdata->flags &
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND)) {
+ imx_pcie_ltssm_enable(dev);
+ return 0;
+ }
+
/*
* Force Gen1 operation when starting the link. In case the link is
* started in Gen2 mode, there is a possibility the devices on the
@@ -875,11 +965,11 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
/* Start LTSSM. */
imx_pcie_ltssm_enable(dev);
- ret = dw_pcie_wait_for_link(pci);
- if (ret)
- goto err_reset_phy;
-
if (pci->max_link_speed > 1) {
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto err_reset_phy;
+
/* Allow faster modes after the link is up */
dw_pcie_dbi_ro_wr_en(pci);
tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
@@ -896,34 +986,15 @@ static int imx_pcie_start_link(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
dw_pcie_dbi_ro_wr_dis(pci);
- if (imx_pcie->drvdata->flags &
- IMX_PCIE_FLAG_IMX_SPEED_CHANGE) {
-
- /*
- * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
- * from i.MX6 family when no link speed transition
- * occurs and we go Gen1 -> yep, Gen1. The difference
- * is that, in such case, it will not be cleared by HW
- * which will cause the following code to report false
- * failure.
- */
- ret = imx_pcie_wait_for_speed_change(imx_pcie);
- if (ret) {
- dev_err(dev, "Failed to bring link up!\n");
- goto err_reset_phy;
- }
- }
-
- /* Make sure link training is finished as well! */
- ret = dw_pcie_wait_for_link(pci);
- if (ret)
+ ret = imx_pcie_wait_for_speed_change(imx_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
+ }
} else {
dev_info(dev, "Link: Only Gen1 is enabled\n");
}
- tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
- dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
@@ -1182,6 +1253,12 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp)
goto err_phy_off;
}
+ if (imx_pcie->drvdata->wait_pll_lock) {
+ ret = imx_pcie->drvdata->wait_pll_lock(imx_pcie);
+ if (ret < 0)
+ goto err_phy_off;
+ }
+
imx_setup_phy_mpll(imx_pcie);
return 0;
@@ -1214,6 +1291,32 @@ static void imx_pcie_host_exit(struct dw_pcie_rp *pp)
regulator_disable(imx_pcie->vpcie);
}
+static void imx_pcie_host_post_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+ u32 val;
+
+ if (imx_pcie->drvdata->flags & IMX_PCIE_FLAG_8GT_ECN_ERR051586) {
+ /*
+ * ERR051586: Compliance with 8GT/s Receiver Impedance ECN
+ *
+ * The default value of GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL]
+ * is 1 which makes receiver non-compliant with the ZRX-DC
+ * parameter for 2.5 GT/s when operating at 8 GT/s or higher.
+ * It causes unnecessary timeout in L1.
+ *
+ * Workaround: Program GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL]
+ * to 0.
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+}
+
/*
* In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2
* register is reserved, so the generic DWC implementation of sending the
@@ -1239,6 +1342,7 @@ static const struct dw_pcie_host_ops imx_pcie_host_ops = {
static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = {
.init = imx_pcie_host_init,
.deinit = imx_pcie_host_exit,
+ .post_init = imx_pcie_host_post_init,
};
static const struct dw_pcie_ops dw_pcie_ops = {
@@ -1350,6 +1454,7 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
dev_err(dev, "failed to initialize endpoint\n");
return ret;
}
+ imx_pcie_host_post_init(pp);
ret = dw_pcie_ep_init_registers(ep);
if (ret) {
@@ -1386,6 +1491,42 @@ static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save)
}
}
+static void imx_pcie_lut_save(struct imx_pcie *imx_pcie)
+{
+ u32 data1, data2;
+ int i;
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL,
+ IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+ if (data1 & IMX95_PE0_LUT_VLD) {
+ imx_pcie->luts[i].data1 = data1;
+ imx_pcie->luts[i].data2 = data2;
+ } else {
+ imx_pcie->luts[i].data1 = 0;
+ imx_pcie->luts[i].data2 = 0;
+ }
+ }
+}
+
+static void imx_pcie_lut_restore(struct imx_pcie *imx_pcie)
+{
+ int i;
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ if ((imx_pcie->luts[i].data1 & IMX95_PE0_LUT_VLD) == 0)
+ continue;
+
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1,
+ imx_pcie->luts[i].data1);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2,
+ imx_pcie->luts[i].data2);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, i);
+ }
+}
+
static int imx_pcie_suspend_noirq(struct device *dev)
{
struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
@@ -1394,6 +1535,8 @@ static int imx_pcie_suspend_noirq(struct device *dev)
return 0;
imx_pcie_msi_save_restore(imx_pcie, true);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT))
+ imx_pcie_lut_save(imx_pcie);
if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) {
/*
* The minimum for a workaround would be to set PERST# and to
@@ -1438,6 +1581,8 @@ static int imx_pcie_resume_noirq(struct device *dev)
if (ret)
return ret;
}
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT))
+ imx_pcie_lut_restore(imx_pcie);
imx_pcie_msi_save_restore(imx_pcie, false);
return 0;
@@ -1649,7 +1794,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
[IMX6Q] = {
.variant = IMX6Q,
.flags = IMX_PCIE_FLAG_IMX_PHY |
- IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
IMX_PCIE_FLAG_BROKEN_SUSPEND |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
@@ -1665,7 +1810,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
[IMX6SX] = {
.variant = IMX6SX,
.flags = IMX_PCIE_FLAG_IMX_PHY |
- IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx6q-iomuxc-gpr",
.ltssm_off = IOMUXC_GPR12,
@@ -1680,7 +1825,7 @@ static const struct imx_pcie_drvdata drvdata[] = {
[IMX6QP] = {
.variant = IMX6QP,
.flags = IMX_PCIE_FLAG_IMX_PHY |
- IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
@@ -1747,12 +1892,15 @@ static const struct imx_pcie_drvdata drvdata[] = {
.variant = IMX95,
.flags = IMX_PCIE_FLAG_HAS_SERDES |
IMX_PCIE_FLAG_HAS_LUT |
+ IMX_PCIE_FLAG_8GT_ECN_ERR051586 |
IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
.ltssm_mask = IMX95_PCIE_LTSSM_EN,
.mode_off[0] = IMX95_PE0_GEN_CTRL_1,
.mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
+ .core_reset = imx95_pcie_core_reset,
.init_phy = imx95_pcie_init_phy,
+ .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock,
},
[IMX8MQ_EP] = {
.variant = IMX8MQ_EP,
@@ -1799,12 +1947,15 @@ static const struct imx_pcie_drvdata drvdata[] = {
[IMX95_EP] = {
.variant = IMX95_EP,
.flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_8GT_ECN_ERR051586 |
IMX_PCIE_FLAG_SUPPORT_64BIT,
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
.ltssm_mask = IMX95_PCIE_LTSSM_EN,
.mode_off[0] = IMX95_PE0_GEN_CTRL_1,
.mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
.init_phy = imx95_pcie_init_phy,
+ .core_reset = imx95_pcie_core_reset,
+ .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock,
.epc_features = &imx95_pcie_epc_features,
.mode = DW_PCIE_EP_TYPE,
},
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 76a37368ae4f..2b2632e513b5 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -492,13 +492,12 @@ static struct pci_ops ks_pcie_ops = {
* @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
* controller driver information.
*/
-static int ks_pcie_link_up(struct dw_pcie *pci)
+static bool ks_pcie_link_up(struct dw_pcie *pci)
{
u32 val;
val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
- val &= PORT_LOGIC_LTSSM_STATE_MASK;
- return (val == PORT_LOGIC_LTSSM_STATE_L0);
+ return (val & PORT_LOGIC_LTSSM_STATE_MASK) == PORT_LOGIC_LTSSM_STATE_L0;
}
static void ks_pcie_stop_link(struct dw_pcie *pci)
@@ -761,7 +760,7 @@ static int ks_pcie_config_intx_irq(struct keystone_pcie *ks_pcie)
ks_pcie);
}
- intx_irq_domain = irq_domain_add_linear(intc_np, PCI_NUM_INTX,
+ intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_np), PCI_NUM_INTX,
&ks_pcie_intx_irq_domain_ops, NULL);
if (!intx_irq_domain) {
dev_err(dev, "Failed to add irq domain for INTX irqs\n");
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index db9482a113e9..787469d1b396 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -335,7 +335,7 @@ static struct pci_ops meson_pci_ops = {
.write = pci_generic_config_write,
};
-static int meson_pcie_link_up(struct dw_pcie *pci)
+static bool meson_pcie_link_up(struct dw_pcie *pci)
{
struct meson_pcie *mp = to_meson_pcie(pci);
struct device *dev = pci->dev;
@@ -363,7 +363,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci)
dev_dbg(dev, "speed_okay\n");
if (smlh_up && rdlh_up && ltssm_up && speed_okay)
- return 1;
+ return true;
cnt++;
@@ -371,7 +371,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci)
} while (cnt < WAIT_LINKUP_TIMEOUT);
dev_err(dev, "error: wait linkup timeout\n");
- return 0;
+ return false;
}
static int meson_pcie_host_init(struct dw_pcie_rp *pp)
diff --git a/drivers/pci/controller/dwc/pcie-amd-mdb.c b/drivers/pci/controller/dwc/pcie-amd-mdb.c
index 4eb2a4e8189d..9f7251a16d32 100644
--- a/drivers/pci/controller/dwc/pcie-amd-mdb.c
+++ b/drivers/pci/controller/dwc/pcie-amd-mdb.c
@@ -290,8 +290,8 @@ static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie,
return -ENODEV;
}
- pcie->mdb_domain = irq_domain_add_linear(pcie_intc_node, 32,
- &event_domain_ops, pcie);
+ pcie->mdb_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, pcie);
if (!pcie->mdb_domain) {
err = -ENOMEM;
dev_err(dev, "Failed to add MDB domain\n");
@@ -300,8 +300,8 @@ static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie,
irq_domain_update_bus_token(pcie->mdb_domain, DOMAIN_BUS_NEXUS);
- pcie->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &amd_intx_domain_ops, pcie);
+ pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX, &amd_intx_domain_ops, pcie);
if (!pcie->intx_domain) {
err = -ENOMEM;
dev_err(dev, "Failed to add INTx domain\n");
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index b5c599ccaacf..c2650fd0d458 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -139,7 +139,7 @@ static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie)
return ret;
}
-static int armada8k_pcie_link_up(struct dw_pcie *pci)
+static bool armada8k_pcie_link_up(struct dw_pcie *pci)
{
u32 reg;
u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
@@ -147,10 +147,10 @@ static int armada8k_pcie_link_up(struct dw_pcie *pci)
reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG);
if ((reg & mask) == mask)
- return 1;
+ return true;
dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg);
- return 0;
+ return false;
}
static int armada8k_pcie_start_link(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
index 9e6f4d00f262..c67601096c48 100644
--- a/drivers/pci/controller/dwc/pcie-designware-debugfs.c
+++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
@@ -642,16 +642,262 @@ static void dwc_pcie_ltssm_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
&dwc_pcie_ltssm_status_ops);
}
+static int dw_pcie_ptm_check_capability(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ pci->ptm_vsec_offset = dw_pcie_find_ptm_capability(pci);
+
+ return pci->ptm_vsec_offset;
+}
+
+static int dw_pcie_ptm_context_update_write(void *drvdata, u8 mode)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val |= PTM_REQ_AUTO_UPDATE_ENABLED;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else if (mode == PCIE_PTM_CONTEXT_UPDATE_MANUAL) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val &= ~PTM_REQ_AUTO_UPDATE_ENABLED;
+ val |= PTM_REQ_START_UPDATE;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_update_read(void *drvdata, u8 *mode)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ if (FIELD_GET(PTM_REQ_AUTO_UPDATE_ENABLED, val))
+ *mode = PCIE_PTM_CONTEXT_UPDATE_AUTO;
+ else
+ /*
+ * PTM_REQ_START_UPDATE is a self clearing register bit. So if
+ * PTM_REQ_AUTO_UPDATE_ENABLED is not set, then it implies that
+ * manual update is used.
+ */
+ *mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_valid_write(void *drvdata, bool valid)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ if (valid) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val |= PTM_RES_CCONTEXT_VALID;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val &= ~PTM_RES_CCONTEXT_VALID;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_valid_read(void *drvdata, bool *valid)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ *valid = !!FIELD_GET(PTM_RES_CCONTEXT_VALID, val);
+
+ return 0;
+}
+
+static int dw_pcie_ptm_local_clock_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_master_clock_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t1_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t2_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t3_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t4_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static bool dw_pcie_ptm_context_update_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_context_valid_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_RC_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_local_clock_visible(void *drvdata)
+{
+ /* PTM local clock is always visible */
+ return true;
+}
+
+static bool dw_pcie_ptm_master_clock_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_t1_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_t2_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_RC_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_t3_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_RC_TYPE) ? true : false;
+}
+
+static bool dw_pcie_ptm_t4_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return (pci->mode == DW_PCIE_EP_TYPE) ? true : false;
+}
+
+const struct pcie_ptm_ops dw_pcie_ptm_ops = {
+ .check_capability = dw_pcie_ptm_check_capability,
+ .context_update_write = dw_pcie_ptm_context_update_write,
+ .context_update_read = dw_pcie_ptm_context_update_read,
+ .context_valid_write = dw_pcie_ptm_context_valid_write,
+ .context_valid_read = dw_pcie_ptm_context_valid_read,
+ .local_clock_read = dw_pcie_ptm_local_clock_read,
+ .master_clock_read = dw_pcie_ptm_master_clock_read,
+ .t1_read = dw_pcie_ptm_t1_read,
+ .t2_read = dw_pcie_ptm_t2_read,
+ .t3_read = dw_pcie_ptm_t3_read,
+ .t4_read = dw_pcie_ptm_t4_read,
+ .context_update_visible = dw_pcie_ptm_context_update_visible,
+ .context_valid_visible = dw_pcie_ptm_context_valid_visible,
+ .local_clock_visible = dw_pcie_ptm_local_clock_visible,
+ .master_clock_visible = dw_pcie_ptm_master_clock_visible,
+ .t1_visible = dw_pcie_ptm_t1_visible,
+ .t2_visible = dw_pcie_ptm_t2_visible,
+ .t3_visible = dw_pcie_ptm_t3_visible,
+ .t4_visible = dw_pcie_ptm_t4_visible,
+};
+
void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
{
if (!pci->debugfs)
return;
+ pcie_ptm_destroy_debugfs(pci->ptm_debugfs);
dwc_pcie_rasdes_debugfs_deinit(pci);
debugfs_remove_recursive(pci->debugfs->debug_dir);
}
-void dwc_pcie_debugfs_init(struct dw_pcie *pci)
+void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode)
{
char dirname[DWC_DEBUGFS_BUF_MAX];
struct device *dev = pci->dev;
@@ -674,4 +920,8 @@ void dwc_pcie_debugfs_init(struct dw_pcie *pci)
err);
dwc_pcie_ltssm_debugfs_init(pci, dir);
+
+ pci->mode = mode;
+ pci->ptm_debugfs = pcie_ptm_create_debugfs(pci->dev, pci,
+ &dw_pcie_ptm_ops);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 1a0bf9341542..0ae54a94809b 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -256,11 +256,11 @@ static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci,
return offset;
reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT;
+ nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- bar_index = reg & PCI_REBAR_CTRL_BAR_IDX;
+ bar_index = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, reg);
if (bar_index == bar)
return offset;
}
@@ -532,15 +532,16 @@ static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
val = FIELD_GET(PCI_MSI_FLAGS_QSIZE, val);
- return val;
+ return 1 << val;
}
static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u8 interrupts)
+ u8 nr_irqs)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct dw_pcie_ep_func *ep_func;
+ u8 mmc = order_base_2(nr_irqs);
u32 val, reg;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
@@ -550,7 +551,7 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
reg = ep_func->msi_cap + PCI_MSI_FLAGS;
val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
val &= ~PCI_MSI_FLAGS_QMASK;
- val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, interrupts);
+ val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, mmc);
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_ep_writew_dbi(ep, func_no, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
@@ -575,11 +576,11 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
val &= PCI_MSIX_FLAGS_QSIZE;
- return val;
+ return val + 1;
}
static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u16 interrupts, enum pci_barno bir, u32 offset)
+ u16 nr_irqs, enum pci_barno bir, u32 offset)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -595,7 +596,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
- val |= interrupts;
+ val |= nr_irqs - 1; /* encoded as N-1 */
dw_pcie_writew_dbi(pci, reg, val);
reg = ep_func->msix_cap + PCI_MSIX_TABLE;
@@ -603,7 +604,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
reg = ep_func->msix_cap + PCI_MSIX_PBA;
- val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir;
dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
@@ -671,7 +672,7 @@ static const struct pci_epc_ops epc_ops = {
* @ep: DWC EP device
* @func_no: Function number of the endpoint
*
- * Return: 0 if success, errono otherwise.
+ * Return: 0 if success, errno otherwise.
*/
int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
{
@@ -690,7 +691,7 @@ EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq);
* @func_no: Function number of the endpoint
* @interrupt_num: Interrupt number to be raised
*
- * Return: 0 if success, errono otherwise.
+ * Return: 0 if success, errno otherwise.
*/
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
@@ -875,8 +876,7 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
if (offset) {
reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
- PCI_REBAR_CTRL_NBAR_SHIFT;
+ nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
/*
* PCIe r6.0, sec 7.8.6.2 require us to support at least one
@@ -897,7 +897,7 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
* is why RESBAR_CAP_REG is written here.
*/
val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- bar = val & PCI_REBAR_CTRL_BAR_IDX;
+ bar = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, val);
if (ep->epf_bar[bar])
pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val);
else
@@ -1013,7 +1013,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
dw_pcie_ep_init_non_sticky_registers(pci);
- dwc_pcie_debugfs_init(pci);
+ dwc_pcie_debugfs_init(pci, DW_PCIE_EP_TYPE);
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index ecc33f6789e3..906277f9ffaf 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -227,7 +227,7 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(pci->dev->of_node);
pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
&dw_pcie_msi_domain_ops, pp);
@@ -523,6 +523,13 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
dw_pcie_iatu_detect(pci);
+ if (pci->num_lanes < 1)
+ pci->num_lanes = dw_pcie_link_get_max_link_width(pci);
+
+ ret = of_pci_get_equalization_presets(dev, &pp->presets, pci->num_lanes);
+ if (ret)
+ goto err_free_msi;
+
/*
* Allocate the resource for MSG TLP before programming the iATU
* outbound window in dw_pcie_setup_rc(). Since the allocation depends
@@ -567,7 +574,7 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (pp->ops->post_init)
pp->ops->post_init(pp);
- dwc_pcie_debugfs_init(pci);
+ dwc_pcie_debugfs_init(pci, DW_PCIE_RC_TYPE);
return 0;
@@ -828,6 +835,77 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
return 0;
}
+static void dw_pcie_program_presets(struct dw_pcie_rp *pp, enum pci_bus_speed speed)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u8 lane_eq_offset, lane_reg_size, cap_id;
+ u8 *presets;
+ u32 cap;
+ int i;
+
+ if (speed == PCIE_SPEED_8_0GT) {
+ presets = (u8 *)pp->presets.eq_presets_8gts;
+ lane_eq_offset = PCI_SECPCI_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_SECPCI;
+ /* For data rate of 8 GT/S each lane equalization control is 16bits wide*/
+ lane_reg_size = 0x2;
+ } else if (speed == PCIE_SPEED_16_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_16GTS - 1];
+ lane_eq_offset = PCI_PL_16GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_16GT;
+ lane_reg_size = 0x1;
+ } else if (speed == PCIE_SPEED_32_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_32GTS - 1];
+ lane_eq_offset = PCI_PL_32GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_32GT;
+ lane_reg_size = 0x1;
+ } else if (speed == PCIE_SPEED_64_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_64GTS - 1];
+ lane_eq_offset = PCI_PL_64GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_64GT;
+ lane_reg_size = 0x1;
+ } else {
+ return;
+ }
+
+ if (presets[0] == PCI_EQ_RESV)
+ return;
+
+ cap = dw_pcie_find_ext_capability(pci, cap_id);
+ if (!cap)
+ return;
+
+ /*
+ * Write preset values to the registers byte-by-byte for the given
+ * number of lanes and register size.
+ */
+ for (i = 0; i < pci->num_lanes * lane_reg_size; i++)
+ dw_pcie_writeb_dbi(pci, cap + lane_eq_offset + i, presets[i]);
+}
+
+static void dw_pcie_config_presets(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ enum pci_bus_speed speed = pcie_link_speed[pci->max_link_speed];
+
+ /*
+ * Lane equalization settings need to be applied for all data rates the
+ * controller supports and for all supported lanes.
+ */
+
+ if (speed >= PCIE_SPEED_8_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_8_0GT);
+
+ if (speed >= PCIE_SPEED_16_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_16_0GT);
+
+ if (speed >= PCIE_SPEED_32_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_32_0GT);
+
+ if (speed >= PCIE_SPEED_64_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_64_0GT);
+}
+
int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -881,6 +959,7 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+ dw_pcie_config_presets(pp);
/*
* If the platform provides its own child bus config accesses, it means
* the platform uses its own address translation component rather than
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 97d76d3dc066..4d794964fa0f 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -54,6 +54,14 @@ static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = {
[DW_PCIE_PWR_RST] = "pwr",
};
+static const struct dwc_pcie_vsec_id dwc_pcie_ptm_vsec_ids[] = {
+ { .vendor_id = PCI_VENDOR_ID_QCOM, /* EP */
+ .vsec_id = 0x03, .vsec_rev = 0x1 },
+ { .vendor_id = PCI_VENDOR_ID_QCOM, /* RC */
+ .vsec_id = 0x04, .vsec_rev = 0x1 },
+ { }
+};
+
static int dw_pcie_get_clocks(struct dw_pcie *pci)
{
int i, ret;
@@ -330,6 +338,12 @@ u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci)
}
EXPORT_SYMBOL_GPL(dw_pcie_find_rasdes_capability);
+u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci)
+{
+ return dw_pcie_find_vsec_capability(pci, dwc_pcie_ptm_vsec_ids);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_ptm_capability);
+
int dw_pcie_read(void __iomem *addr, int size, u32 *val)
{
if (!IS_ALIGNED((uintptr_t)addr, size)) {
@@ -711,7 +725,7 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
}
EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
-int dw_pcie_link_up(struct dw_pcie *pci)
+bool dw_pcie_link_up(struct dw_pcie *pci)
{
u32 val;
@@ -781,6 +795,14 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci)
}
+int dw_pcie_link_get_max_link_width(struct dw_pcie *pci)
+{
+ u8 cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+
+ return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
+}
+
static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
{
u32 lnkcap, lwsc, plc;
@@ -797,22 +819,19 @@ static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
/* Set link width speed control register */
lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+ lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
switch (num_lanes) {
case 1:
plc |= PORT_LINK_MODE_1_LANES;
- lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
break;
case 2:
plc |= PORT_LINK_MODE_2_LANES;
- lwsc |= PORT_LOGIC_LINK_WIDTH_2_LANES;
break;
case 4:
plc |= PORT_LINK_MODE_4_LANES;
- lwsc |= PORT_LOGIC_LINK_WIDTH_4_LANES;
break;
case 8:
plc |= PORT_LINK_MODE_8_LANES;
- lwsc |= PORT_LOGIC_LINK_WIDTH_8_LANES;
break;
default:
dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 56aafdbcdaca..ce9e18554e42 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -25,6 +25,8 @@
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+#include "../../pci.h"
+
/* DWC PCIe IP-core versions (native support since v4.70a) */
#define DW_PCIE_VER_365A 0x3336352a
#define DW_PCIE_VER_460A 0x3436302a
@@ -260,6 +262,21 @@
#define PCIE_RAS_DES_EVENT_COUNTER_DATA 0xc
+/* PTM register definitions */
+#define PTM_RES_REQ_CTRL 0x8
+#define PTM_RES_CCONTEXT_VALID BIT(0)
+#define PTM_REQ_AUTO_UPDATE_ENABLED BIT(0)
+#define PTM_REQ_START_UPDATE BIT(1)
+
+#define PTM_LOCAL_LSB 0x10
+#define PTM_LOCAL_MSB 0x14
+#define PTM_T1_T2_LSB 0x18
+#define PTM_T1_T2_MSB 0x1c
+#define PTM_T3_T4_LSB 0x28
+#define PTM_T3_T4_MSB 0x2c
+#define PTM_MASTER_LSB 0x38
+#define PTM_MASTER_MSB 0x3c
+
/*
* The default address offset between dbi_base and atu_base. Root controller
* drivers are not required to initialize atu_base if the offset matches this
@@ -412,6 +429,7 @@ struct dw_pcie_rp {
int msg_atu_index;
struct resource *msg_res;
bool use_linkup_irq;
+ struct pci_eq_presets presets;
};
struct dw_pcie_ep_ops {
@@ -462,7 +480,7 @@ struct dw_pcie_ops {
size_t size, u32 val);
void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
size_t size, u32 val);
- int (*link_up)(struct dw_pcie *pcie);
+ bool (*link_up)(struct dw_pcie *pcie);
enum dw_pcie_ltssm (*get_ltssm)(struct dw_pcie *pcie);
int (*start_link)(struct dw_pcie *pcie);
void (*stop_link)(struct dw_pcie *pcie);
@@ -503,6 +521,9 @@ struct dw_pcie {
struct gpio_desc *pe_rst;
bool suspended;
struct debugfs_info *debugfs;
+ enum dw_pcie_device_mode mode;
+ u16 ptm_vsec_offset;
+ struct pci_ptm_debugfs *ptm_debugfs;
/*
* If iATU input addresses are offset from CPU physical addresses,
@@ -530,6 +551,7 @@ void dw_pcie_version_detect(struct dw_pcie *pci);
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci);
+u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci);
int dw_pcie_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_write(void __iomem *addr, int size, u32 val);
@@ -537,9 +559,10 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val);
u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);
void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
-int dw_pcie_link_up(struct dw_pcie *pci);
+bool dw_pcie_link_up(struct dw_pcie *pci);
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
+int dw_pcie_link_get_max_link_width(struct dw_pcie *pci);
int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
const struct dw_pcie_ob_atu_cfg *atu);
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
@@ -871,10 +894,11 @@ dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
#endif
#ifdef CONFIG_PCIE_DW_DEBUGFS
-void dwc_pcie_debugfs_init(struct dw_pcie *pci);
+void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode);
void dwc_pcie_debugfs_deinit(struct dw_pcie *pci);
#else
-static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci)
+static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci,
+ enum dw_pcie_device_mode mode)
{
}
static inline void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index c624b7ebd118..93171a392879 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -8,6 +8,7 @@
* Author: Simon Xue <xxm@rock-chips.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/irqchip/chained_irq.h>
@@ -21,6 +22,7 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#include "../../pci.h"
#include "pcie-designware.h"
/*
@@ -33,26 +35,36 @@
#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
-#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
-#define PCIE_CLIENT_EP_MODE HIWORD_UPDATE(0xf0, 0x0)
-#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
-#define PCIE_CLIENT_DISABLE_LTSSM HIWORD_UPDATE(0x0c, 0x8)
-#define PCIE_CLIENT_INTR_STATUS_MISC 0x10
-#define PCIE_CLIENT_INTR_MASK_MISC 0x24
-#define PCIE_SMLH_LINKUP BIT(16)
-#define PCIE_RDLH_LINKUP BIT(17)
-#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
-#define PCIE_RDLH_LINK_UP_CHGED BIT(1)
-#define PCIE_LINK_REQ_RST_NOT_INT BIT(2)
-#define PCIE_L0S_ENTRY 0x11
-#define PCIE_CLIENT_GENERAL_CONTROL 0x0
+/* General Control Register */
+#define PCIE_CLIENT_GENERAL_CON 0x0
+#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
+#define PCIE_CLIENT_EP_MODE HIWORD_UPDATE(0xf0, 0x0)
+#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
+#define PCIE_CLIENT_DISABLE_LTSSM HIWORD_UPDATE(0x0c, 0x8)
+
+/* Interrupt Status Register Related to Legacy Interrupt */
#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8
+
+/* Interrupt Status Register Related to Miscellaneous Operation */
+#define PCIE_CLIENT_INTR_STATUS_MISC 0x10
+#define PCIE_RDLH_LINK_UP_CHGED BIT(1)
+#define PCIE_LINK_REQ_RST_NOT_INT BIT(2)
+
+/* Interrupt Mask Register Related to Legacy Interrupt */
#define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
-#define PCIE_CLIENT_GENERAL_DEBUG 0x104
+
+/* Interrupt Mask Register Related to Miscellaneous Operation */
+#define PCIE_CLIENT_INTR_MASK_MISC 0x24
+
+/* Hot Reset Control Register */
#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
+
+/* LTSSM Status Register */
#define PCIE_CLIENT_LTSSM_STATUS 0x300
-#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
-#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
+#define PCIE_LINKUP 0x3
+#define PCIE_LINKUP_MASK GENMASK(17, 16)
+#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
struct rockchip_pcie {
struct dw_pcie pci;
@@ -144,8 +156,8 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return -EINVAL;
}
- rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &intx_domain_ops, rockchip);
+ rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
of_node_put(intc);
if (!rockchip->irq_domain) {
dev_err(dev, "failed to get a INTx IRQ domain\n");
@@ -163,25 +175,36 @@ static u32 rockchip_pcie_get_ltssm(struct rockchip_pcie *rockchip)
static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
{
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
- PCIE_CLIENT_GENERAL_CONTROL);
+ PCIE_CLIENT_GENERAL_CON);
}
static void rockchip_pcie_disable_ltssm(struct rockchip_pcie *rockchip)
{
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_DISABLE_LTSSM,
- PCIE_CLIENT_GENERAL_CONTROL);
+ PCIE_CLIENT_GENERAL_CON);
}
-static int rockchip_pcie_link_up(struct dw_pcie *pci)
+static bool rockchip_pcie_link_up(struct dw_pcie *pci)
{
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
u32 val = rockchip_pcie_get_ltssm(rockchip);
- if ((val & PCIE_LINKUP) == PCIE_LINKUP &&
- (val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY)
- return 1;
+ return FIELD_GET(PCIE_LINKUP_MASK, val) == PCIE_LINKUP;
+}
- return 0;
+static void rockchip_pcie_enable_l0s(struct dw_pcie *pci)
+{
+ u32 cap, lnkcap;
+
+ /* Enable L0S capability for all SoCs */
+ cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ if (cap) {
+ lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+ lnkcap |= PCI_EXP_LNKCAP_ASPM_L0S;
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
}
static int rockchip_pcie_start_link(struct dw_pcie *pci)
@@ -202,7 +225,7 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci)
* We need more extra time as before, rather than setting just
* 100us as we don't know how long should the device need to reset.
*/
- msleep(100);
+ msleep(PCIE_T_PVPERL_MS);
gpiod_set_value_cansleep(rockchip->rst_gpio, 1);
return 0;
@@ -233,6 +256,8 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler,
rockchip);
+ rockchip_pcie_enable_l0s(pci);
+
return 0;
}
@@ -263,16 +288,14 @@ static void rockchip_pcie_ep_hide_broken_ats_cap_rk3588(struct dw_pcie_ep *ep)
dev_err(dev, "failed to hide ATS capability\n");
}
-static void rockchip_pcie_ep_pre_init(struct dw_pcie_ep *ep)
-{
- rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep);
-}
-
static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar;
+ rockchip_pcie_enable_l0s(pci);
+ rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep);
+
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
};
@@ -342,7 +365,6 @@ rockchip_pcie_get_features(struct dw_pcie_ep *ep)
static const struct dw_pcie_ep_ops rockchip_pcie_ep_ops = {
.init = rockchip_pcie_ep_init,
- .pre_init = rockchip_pcie_ep_pre_init,
.raise_irq = rockchip_pcie_raise_irq,
.get_features = rockchip_pcie_get_features,
};
@@ -410,8 +432,8 @@ static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip)
static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
{
- phy_exit(rockchip->phy);
phy_power_off(rockchip->phy);
+ phy_exit(rockchip->phy);
}
static const struct dw_pcie_ops dw_pcie_ops = {
@@ -426,7 +448,7 @@ static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
struct dw_pcie *pci = &rockchip->pci;
struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
- u32 reg, val;
+ u32 reg;
reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
@@ -435,8 +457,7 @@ static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
if (reg & PCIE_RDLH_LINK_UP_CHGED) {
- val = rockchip_pcie_get_ltssm(rockchip);
- if ((val & PCIE_LINKUP) == PCIE_LINKUP) {
+ if (rockchip_pcie_link_up(pci)) {
dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
/* Rescan the bus to enumerate endpoint devices */
pci_lock_rescan_remove();
@@ -453,7 +474,7 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
struct rockchip_pcie *rockchip = arg;
struct dw_pcie *pci = &rockchip->pci;
struct device *dev = pci->dev;
- u32 reg, val;
+ u32 reg;
reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
@@ -467,8 +488,7 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
}
if (reg & PCIE_RDLH_LINK_UP_CHGED) {
- val = rockchip_pcie_get_ltssm(rockchip);
- if ((val & PCIE_LINKUP) == PCIE_LINKUP) {
+ if (rockchip_pcie_link_up(pci)) {
dev_dbg(dev, "link up\n");
dw_pcie_ep_linkup(&pci->ep);
}
@@ -505,7 +525,7 @@ static int rockchip_pcie_configure_rc(struct platform_device *pdev,
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
- PCIE_CLIENT_GENERAL_CONTROL);
+ PCIE_CLIENT_GENERAL_CON);
pp = &rockchip->pci.pp;
pp->ops = &rockchip_pcie_host_ops;
@@ -551,7 +571,7 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev,
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_EP_MODE,
- PCIE_CLIENT_GENERAL_CONTROL);
+ PCIE_CLIENT_GENERAL_CON);
rockchip->pci.ep.ops = &rockchip_pcie_ep_ops;
rockchip->pci.ep.page_size = SZ_64K;
@@ -601,6 +621,10 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
rockchip->pci.ops = &dw_pcie_ops;
rockchip->data = data;
+ /* Default N_FTS value (210) is broken, override it to 255 */
+ rockchip->pci.n_fts[0] = 255; /* Gen1 */
+ rockchip->pci.n_fts[1] = 255; /* Gen2+ */
+
ret = rockchip_pcie_resource_get(pdev, rockchip);
if (ret)
return ret;
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c
index 8904b5b85ee5..3c17897e56fc 100644
--- a/drivers/pci/controller/dwc/pcie-hisi.c
+++ b/drivers/pci/controller/dwc/pcie-hisi.c
@@ -15,6 +15,7 @@
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include "../../pci.h"
+#include "../pci-host-common.h"
#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 1f2f4c28a949..a52071589377 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -151,7 +151,7 @@ static struct pci_ops histb_pci_ops = {
.write = histb_pcie_wr_own_conf,
};
-static int histb_pcie_link_up(struct dw_pcie *pci)
+static bool histb_pcie_link_up(struct dw_pcie *pci)
{
struct histb_pcie *hipcie = to_histb_pcie(pci);
u32 regval;
@@ -160,11 +160,8 @@ static int histb_pcie_link_up(struct dw_pcie *pci)
regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0);
status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4);
status &= PCIE_LTSSM_STATE_MASK;
- if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
- (status == PCIE_LTSSM_STATE_ACTIVE))
- return 1;
-
- return 0;
+ return ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
+ (status == PCIE_LTSSM_STATE_ACTIVE));
}
static int histb_pcie_start_link(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 278205db60a2..67dd3337b447 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -101,7 +101,7 @@ static void keembay_pcie_ltssm_set(struct keembay_pcie *pcie, bool enable)
writel(val, pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
}
-static int keembay_pcie_link_up(struct dw_pcie *pci)
+static bool keembay_pcie_link_up(struct dw_pcie *pci)
{
struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
u32 val;
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index d0e6a3811b00..91559c8b1866 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -586,16 +586,13 @@ static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
}
-static int kirin_pcie_link_up(struct dw_pcie *pci)
+static bool kirin_pcie_link_up(struct dw_pcie *pci)
{
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
u32 val;
regmap_read(kirin_pcie->apb, PCIE_APB_PHY_STATUS0, &val);
- if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
- return 1;
-
- return 0;
+ return (val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE;
}
static int kirin_pcie_start_link(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 46b1c6d19974..bf7c6ac0f3e3 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -60,6 +60,7 @@
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_CFG 0x2c00
#define PARF_INT_ALL_5_MASK 0x2dcc
+#define PARF_INT_ALL_3_MASK 0x2e18
/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
#define PARF_INT_ALL_LINK_DOWN BIT(1)
@@ -132,6 +133,9 @@
/* PARF_INT_ALL_5_MASK fields */
#define PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR BIT(0)
+/* PARF_INT_ALL_3_MASK fields */
+#define PARF_INT_ALL_3_PTM_UPDATING BIT(4)
+
/* ELBI registers */
#define ELBI_SYS_STTS 0x08
#define ELBI_CS2_ENABLE 0xa4
@@ -261,7 +265,7 @@ static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
}
}
-static int qcom_pcie_dw_link_up(struct dw_pcie *pci)
+static bool qcom_pcie_dw_link_up(struct dw_pcie *pci)
{
struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
u32 reg;
@@ -497,6 +501,10 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_5_MASK);
}
+ val = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_3_MASK);
+ val &= ~PARF_INT_ALL_3_PTM_UPDATING;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_3_MASK);
+
ret = dw_pcie_ep_init_registers(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index dc98ae63362d..c789e3f85655 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -289,7 +289,7 @@ static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
{
/* Ensure that PERST has been asserted for at least 100 ms */
- msleep(100);
+ msleep(PCIE_T_PVPERL_MS);
gpiod_set_value_cansleep(pcie->reset, 0);
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
@@ -1221,12 +1221,12 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
return 0;
}
-static int qcom_pcie_link_up(struct dw_pcie *pci)
+static bool qcom_pcie_link_up(struct dw_pcie *pci)
{
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
- return !!(val & PCI_EXP_LNKSTA_DLLLA);
+ return val & PCI_EXP_LNKSTA_DLLLA;
}
static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
@@ -1840,6 +1840,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
{ .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index fc872dd35029..18055807a4f5 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -87,7 +87,7 @@ struct rcar_gen4_pcie {
#define to_rcar_gen4_pcie(_dw) container_of(_dw, struct rcar_gen4_pcie, dw)
/* Common */
-static int rcar_gen4_pcie_link_up(struct dw_pcie *dw)
+static bool rcar_gen4_pcie_link_up(struct dw_pcie *dw)
{
struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
u32 val, mask;
@@ -403,6 +403,7 @@ static const struct pci_epc_features rcar_gen4_pcie_epc_features = {
.msix_capable = false,
.bar[BAR_1] = { .type = BAR_RESERVED, },
.bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256 },
.bar[BAR_5] = { .type = BAR_RESERVED, },
.align = SZ_1M,
};
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index ff986ced56b2..01794a9d3ad2 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -110,15 +110,12 @@ static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pc
MSI_CTRL_INT, &app_reg->int_mask);
}
-static int spear13xx_pcie_link_up(struct dw_pcie *pci)
+static bool spear13xx_pcie_link_up(struct dw_pcie *pci)
{
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
- if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
- return 1;
-
- return 0;
+ return readl(&app_reg->app_status_1) & XMLH_LINK_UP;
}
static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 5103995cd6c7..4f26086f25da 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -713,7 +713,16 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie)
static void init_debugfs(struct tegra_pcie_dw *pcie)
{
- debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
+ struct device *dev = pcie->dev;
+ char *name;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name)
+ return;
+
+ pcie->debugfs = debugfs_create_dir(name, NULL);
+
+ debugfs_create_devm_seqfile(dev, "aspm_state_cnt", pcie->debugfs,
aspm_state_cnt);
}
#else
@@ -1027,12 +1036,12 @@ retry_link:
return 0;
}
-static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
+static bool tegra_pcie_dw_link_up(struct dw_pcie *pci)
{
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
- return !!(val & PCI_EXP_LNKSTA_DLLLA);
+ return val & PCI_EXP_LNKSTA_DLLLA;
}
static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
@@ -1634,7 +1643,6 @@ static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
{
struct device *dev = pcie->dev;
- char *name;
int ret;
pm_runtime_enable(dev);
@@ -1664,13 +1672,6 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
goto fail_host_init;
}
- name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
- if (!name) {
- ret = -ENOMEM;
- goto fail_host_init;
- }
-
- pcie->debugfs = debugfs_create_dir(name, NULL);
init_debugfs(pcie);
return ret;
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 5757ca3803c9..297e7a3d9b36 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -135,7 +135,7 @@ static int uniphier_pcie_wait_rc(struct uniphier_pcie *pcie)
return 0;
}
-static int uniphier_pcie_link_up(struct dw_pcie *pci)
+static bool uniphier_pcie_link_up(struct dw_pcie *pci)
{
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
u32 val, mask;
@@ -279,7 +279,7 @@ static int uniphier_pcie_config_intx_irq(struct dw_pcie_rp *pp)
goto out_put_node;
}
- pcie->intx_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
+ pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(np_intc), PCI_NUM_INTX,
&uniphier_intx_domain_ops, pp);
if (!pcie->intx_irq_domain) {
dev_err(pci->dev, "Failed to get INTx domain\n");
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
index 318c278e65c8..cdeac6177143 100644
--- a/drivers/pci/controller/dwc/pcie-visconti.c
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -121,13 +121,13 @@ static u32 visconti_mpu_readl(struct visconti_pcie *pcie, u32 reg)
return readl_relaxed(pcie->mpu_base + reg);
}
-static int visconti_pcie_link_up(struct dw_pcie *pci)
+static bool visconti_pcie_link_up(struct dw_pcie *pci)
{
struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
void __iomem *addr = pcie->ulreg_base;
u32 val = readl_relaxed(addr + PCIE_UL_REG_V_PHY_ST_02);
- return !!(val & PCIE_UL_S_L0);
+ return val & PCIE_UL_S_L0;
}
static int visconti_pcie_start_link(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
index 5af22bee913b..4919b27eaf44 100644
--- a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
+++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
@@ -53,18 +53,13 @@ static inline void ls_g4_pcie_pf_writel(struct ls_g4_pcie *pcie,
iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
}
-static int ls_g4_pcie_link_up(struct mobiveil_pcie *pci)
+static bool ls_g4_pcie_link_up(struct mobiveil_pcie *pci)
{
struct ls_g4_pcie *pcie = to_ls_g4_pcie(pci);
u32 state;
state = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
- state = state & PF_DBG_LTSSM_MASK;
-
- if (state == PF_DBG_LTSSM_L0)
- return 1;
-
- return 0;
+ return (state & PF_DBG_LTSSM_MASK) == PF_DBG_LTSSM_L0;
}
static void ls_g4_pcie_disable_interrupt(struct ls_g4_pcie *pcie)
@@ -174,8 +169,7 @@ static int ls_g4_pcie_interrupt_init(struct mobiveil_pcie *mv_pci)
static void ls_g4_pcie_reset(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work,
- work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct ls_g4_pcie *pcie = container_of(dwork, struct ls_g4_pcie, dwork);
struct mobiveil_pcie *mv_pci = &pcie->pci;
u16 ctrl;
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index 0e088e74155d..a600f46ee3c3 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -435,12 +435,12 @@ static const struct irq_domain_ops msi_domain_ops = {
static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
struct mobiveil_msi *msi = &pcie->rp.msi;
mutex_init(&msi->lock);
- msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
- &msi_domain_ops, pcie);
+ msi->dev_domain = irq_domain_create_linear(NULL, msi->num_of_vectors,
+ &msi_domain_ops, pcie);
if (!msi->dev_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
@@ -461,12 +461,11 @@ static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct device_node *node = dev->of_node;
struct mobiveil_root_port *rp = &pcie->rp;
/* setup INTx */
- rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
- &intx_domain_ops, pcie);
+ rp->intx_domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
if (!rp->intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
index e63abb887ee3..662f17f9bf65 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil.h
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
@@ -160,7 +160,7 @@ struct mobiveil_root_port {
};
struct mobiveil_pab_ops {
- int (*link_up)(struct mobiveil_pcie *pcie);
+ bool (*link_up)(struct mobiveil_pcie *pcie);
};
struct mobiveil_pcie {
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index a29796cce420..7bac64533b14 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -1456,9 +1456,8 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
raw_spin_lock_init(&pcie->msi_irq_lock);
mutex_init(&pcie->msi_used_lock);
- pcie->msi_inner_domain =
- irq_domain_add_linear(NULL, MSI_IRQ_NUM,
- &advk_msi_domain_ops, pcie);
+ pcie->msi_inner_domain = irq_domain_create_linear(NULL, MSI_IRQ_NUM,
+ &advk_msi_domain_ops, pcie);
if (!pcie->msi_inner_domain)
return -ENOMEM;
@@ -1508,9 +1507,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
irq_chip->irq_mask = advk_pcie_irq_mask;
irq_chip->irq_unmask = advk_pcie_irq_unmask;
- pcie->irq_domain =
- irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &advk_pcie_irq_domain_ops, pcie);
+ pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &advk_pcie_irq_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
ret = -ENOMEM;
@@ -1549,9 +1547,7 @@ static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = {
static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie)
{
- pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1,
- &advk_pcie_rp_irq_domain_ops,
- pcie);
+ pcie->rp_irq_domain = irq_domain_create_linear(NULL, 1, &advk_pcie_rp_irq_domain_ops, pcie);
if (!pcie->rp_irq_domain) {
dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index ffdeed25e961..28e43831c0f1 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -345,8 +345,8 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
return irq ?: -EINVAL;
}
- p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &faraday_pci_irqdomain_ops, p);
+ p->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &faraday_pci_irqdomain_ops, p);
of_node_put(intc);
if (!p->irqdomain) {
dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index f441bfd6f96a..b0992325dd65 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Generic PCI host driver common code
+ * Common library for PCI host controller drivers
*
* Copyright (C) 2014 ARM Limited
*
@@ -15,6 +15,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "pci-host-common.h"
+
static void gen_pci_unmap_cfg(void *ptr)
{
pci_ecam_free((struct pci_config_window *)ptr);
@@ -49,23 +51,17 @@ static struct pci_config_window *gen_pci_init(struct device *dev,
return cfg;
}
-int pci_host_common_probe(struct platform_device *pdev)
+int pci_host_common_init(struct platform_device *pdev,
+ const struct pci_ecam_ops *ops)
{
struct device *dev = &pdev->dev;
struct pci_host_bridge *bridge;
struct pci_config_window *cfg;
- const struct pci_ecam_ops *ops;
-
- ops = of_device_get_match_data(&pdev->dev);
- if (!ops)
- return -ENODEV;
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
- platform_set_drvdata(pdev, bridge);
-
of_pci_check_probe_only();
/* Parse and map our Configuration Space windows */
@@ -73,6 +69,8 @@ int pci_host_common_probe(struct platform_device *pdev)
if (IS_ERR(cfg))
return PTR_ERR(cfg);
+ platform_set_drvdata(pdev, bridge);
+
bridge->sysdata = cfg;
bridge->ops = (struct pci_ops *)&ops->pci_ops;
bridge->enable_device = ops->enable_device;
@@ -81,6 +79,18 @@ int pci_host_common_probe(struct platform_device *pdev)
return pci_host_probe(bridge);
}
+EXPORT_SYMBOL_GPL(pci_host_common_init);
+
+int pci_host_common_probe(struct platform_device *pdev)
+{
+ const struct pci_ecam_ops *ops;
+
+ ops = of_device_get_match_data(&pdev->dev);
+ if (!ops)
+ return -ENODEV;
+
+ return pci_host_common_init(pdev, ops);
+}
EXPORT_SYMBOL_GPL(pci_host_common_probe);
void pci_host_common_remove(struct platform_device *pdev)
@@ -94,5 +104,5 @@ void pci_host_common_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(pci_host_common_remove);
-MODULE_DESCRIPTION("Generic PCI host common driver");
+MODULE_DESCRIPTION("Common library for PCI host controller drivers");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-host-common.h b/drivers/pci/controller/pci-host-common.h
new file mode 100644
index 000000000000..65bd9e032353
--- /dev/null
+++ b/drivers/pci/controller/pci-host-common.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common library for PCI host controller drivers
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#ifndef _PCI_HOST_COMMON_H
+#define _PCI_HOST_COMMON_H
+
+struct pci_ecam_ops;
+
+int pci_host_common_probe(struct platform_device *pdev);
+int pci_host_common_init(struct platform_device *pdev,
+ const struct pci_ecam_ops *ops);
+void pci_host_common_remove(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c
index 4051b9b61dac..c1bc0d34348f 100644
--- a/drivers/pci/controller/pci-host-generic.c
+++ b/drivers/pci/controller/pci-host-generic.c
@@ -14,6 +14,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "pci-host-common.h"
+
static const struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
.bus_shift = 16,
.pci_ops = {
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index ac27bda5ba26..ef5d655a0052 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -50,6 +50,7 @@
#include <linux/irqdomain.h>
#include <linux/acpi.h>
#include <linux/sizes.h>
+#include <linux/of_irq.h>
#include <asm/mshyperv.h>
/*
@@ -309,8 +310,6 @@ struct pci_packet {
void (*completion_func)(void *context, struct pci_response *resp,
int resp_packet_size);
void *compl_ctxt;
-
- struct pci_message message[];
};
/*
@@ -817,9 +816,17 @@ static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
int ret;
fwspec.fwnode = domain->parent->fwnode;
- fwspec.param_count = 2;
- fwspec.param[0] = hwirq;
- fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ if (is_of_node(fwspec.fwnode)) {
+ /* SPI lines for OF translations start at offset 32 */
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0;
+ fwspec.param[1] = hwirq - 32;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+ } else {
+ fwspec.param_count = 2;
+ fwspec.param[0] = hwirq;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ }
ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
if (ret)
@@ -887,10 +894,44 @@ static const struct irq_domain_ops hv_pci_domain_ops = {
.activate = hv_pci_vec_irq_domain_activate,
};
+#ifdef CONFIG_OF
+
+static struct irq_domain *hv_pci_of_irq_domain_parent(void)
+{
+ struct device_node *parent;
+ struct irq_domain *domain;
+
+ parent = of_irq_find_parent(hv_get_vmbus_root_device()->of_node);
+ if (!parent)
+ return NULL;
+ domain = irq_find_host(parent);
+ of_node_put(parent);
+
+ return domain;
+}
+
+#endif
+
+#ifdef CONFIG_ACPI
+
+static struct irq_domain *hv_pci_acpi_irq_domain_parent(void)
+{
+ acpi_gsi_domain_disp_fn gsi_domain_disp_fn;
+
+ gsi_domain_disp_fn = acpi_get_gsi_dispatcher();
+ if (!gsi_domain_disp_fn)
+ return NULL;
+ return irq_find_matching_fwnode(gsi_domain_disp_fn(0),
+ DOMAIN_BUS_ANY);
+}
+
+#endif
+
static int hv_pci_irqchip_init(void)
{
static struct hv_pci_chip_data *chip_data;
struct fwnode_handle *fn = NULL;
+ struct irq_domain *irq_domain_parent = NULL;
int ret = -ENOMEM;
chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
@@ -907,9 +948,24 @@ static int hv_pci_irqchip_init(void)
* way to ensure that all the corresponding devices are also gone and
* no interrupts will be generated.
*/
- hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR,
- fn, &hv_pci_domain_ops,
- chip_data);
+#ifdef CONFIG_ACPI
+ if (!acpi_disabled)
+ irq_domain_parent = hv_pci_acpi_irq_domain_parent();
+#endif
+#ifdef CONFIG_OF
+ if (!irq_domain_parent)
+ irq_domain_parent = hv_pci_of_irq_domain_parent();
+#endif
+ if (!irq_domain_parent) {
+ WARN_ONCE(1, "Invalid firmware configuration for VMBus interrupts\n");
+ ret = -EINVAL;
+ goto free_chip;
+ }
+
+ hv_msi_gic_irq_domain = irq_domain_create_hierarchy(irq_domain_parent, 0,
+ HV_PCI_MSI_SPI_NR,
+ fn, &hv_pci_domain_ops,
+ chip_data);
if (!hv_msi_gic_irq_domain) {
pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
@@ -1438,7 +1494,7 @@ static int hv_read_config_block(struct pci_dev *pdev, void *buf,
memset(&pkt, 0, sizeof(pkt));
pkt.pkt.completion_func = hv_pci_read_config_compl;
pkt.pkt.compl_ctxt = &comp_pkt;
- read_blk = (struct pci_read_block *)&pkt.pkt.message;
+ read_blk = (struct pci_read_block *)pkt.buf;
read_blk->message_type.type = PCI_READ_BLOCK;
read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
read_blk->block_id = block_id;
@@ -1518,7 +1574,7 @@ static int hv_write_config_block(struct pci_dev *pdev, void *buf,
memset(&pkt, 0, sizeof(pkt));
pkt.pkt.completion_func = hv_pci_write_config_compl;
pkt.pkt.compl_ctxt = &comp_pkt;
- write_blk = (struct pci_write_block *)&pkt.pkt.message;
+ write_blk = (struct pci_write_block *)pkt.buf;
write_blk->message_type.type = PCI_WRITE_BLOCK;
write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
write_blk->block_id = block_id;
@@ -1599,7 +1655,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
return;
}
memset(&ctxt, 0, sizeof(ctxt));
- int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
+ int_pkt = (struct pci_delete_interrupt *)ctxt.buffer;
int_pkt->message_type.type =
PCI_DELETE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
@@ -2482,7 +2538,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
comp_pkt.hpdev = hpdev;
pkt.init_packet.compl_ctxt = &comp_pkt;
pkt.init_packet.completion_func = q_resource_requirements;
- res_req = (struct pci_child_message *)&pkt.init_packet.message;
+ res_req = (struct pci_child_message *)pkt.buffer;
res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
res_req->wslot.slot = desc->win_slot.slot;
@@ -2860,7 +2916,7 @@ static void hv_eject_device_work(struct work_struct *work)
pci_destroy_slot(hpdev->pci_slot);
memset(&ctxt, 0, sizeof(ctxt));
- ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
+ ejct_pkt = (struct pci_eject_response *)ctxt.buffer;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
@@ -3118,7 +3174,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev,
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- version_req = (struct pci_version_request *)&pkt->message;
+ version_req = (struct pci_version_request *)(pkt + 1);
version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
for (i = 0; i < num_version; i++) {
@@ -3340,7 +3396,7 @@ enter_d0_retry:
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
+ d0_entry = (struct pci_bus_d0_entry *)(pkt + 1);
d0_entry->message_type.type = PCI_BUS_D0ENTRY;
d0_entry->mmio_base = hbus->mem_config->start;
@@ -3498,20 +3554,20 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
res_assigned =
- (struct pci_resources_assigned *)&pkt->message;
+ (struct pci_resources_assigned *)(pkt + 1);
res_assigned->message_type.type =
PCI_RESOURCES_ASSIGNED;
res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
} else {
res_assigned2 =
- (struct pci_resources_assigned2 *)&pkt->message;
+ (struct pci_resources_assigned2 *)(pkt + 1);
res_assigned2->message_type.type =
PCI_RESOURCES_ASSIGNED2;
res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
}
put_pcichild(hpdev);
- ret = vmbus_sendpacket(hdev->channel, &pkt->message,
+ ret = vmbus_sendpacket(hdev->channel, pkt + 1,
size_res, (unsigned long)pkt,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -3809,6 +3865,7 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
struct pci_packet teardown_packet;
u8 buffer[sizeof(struct pci_message)];
} pkt;
+ struct pci_message *msg;
struct hv_pci_compl comp_pkt;
struct hv_pci_dev *hpdev, *tmp;
unsigned long flags;
@@ -3854,10 +3911,10 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
init_completion(&comp_pkt.host_event);
pkt.teardown_packet.completion_func = hv_pci_generic_compl;
pkt.teardown_packet.compl_ctxt = &comp_pkt;
- pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
+ msg = (struct pci_message *)pkt.buffer;
+ msg->type = PCI_BUS_D0EXIT;
- ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message,
- sizeof(struct pci_message),
+ ret = vmbus_sendpacket_getid(chan, msg, sizeof(*msg),
(unsigned long)&pkt.teardown_packet,
&trans_id, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -3975,24 +4032,18 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
{
struct irq_data *irq_data;
struct msi_desc *entry;
- int ret = 0;
if (!pdev->msi_enabled && !pdev->msix_enabled)
return 0;
- msi_lock_descs(&pdev->dev);
+ guard(msi_descs_lock)(&pdev->dev);
msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
irq_data = irq_get_irq_data(entry->irq);
- if (WARN_ON_ONCE(!irq_data)) {
- ret = -EINVAL;
- break;
- }
-
+ if (WARN_ON_ONCE(!irq_data))
+ return -EINVAL;
hv_compose_msi_msg(irq_data, &entry->msg);
}
- msi_unlock_descs(&pdev->dev);
-
- return ret;
+ return 0;
}
/*
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index b0e3bce10aa4..a4a2bac4f4b2 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1078,9 +1078,9 @@ static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port)
return -ENODEV;
}
- port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &mvebu_pcie_intx_irq_domain_ops,
- port);
+ port->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX,
+ &mvebu_pcie_intx_irq_domain_ops, port);
of_node_put(pcie_intc_node);
if (!port->intx_irq_domain) {
dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name);
@@ -1179,37 +1179,29 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
unsigned int *tgt,
unsigned int *attr)
{
- const int na = 3, ns = 2;
- const __be32 *range;
- int rlen, nranges, rangesz, pna, i;
+ struct of_range range;
+ struct of_range_parser parser;
*tgt = -1;
*attr = -1;
- range = of_get_property(np, "ranges", &rlen);
- if (!range)
+ if (of_pci_range_parser_init(&parser, np))
return -EINVAL;
- pna = of_n_addr_cells(np);
- rangesz = pna + na + ns;
- nranges = rlen / sizeof(__be32) / rangesz;
-
- for (i = 0; i < nranges; i++, range += rangesz) {
- u32 flags = of_read_number(range, 1);
- u32 slot = of_read_number(range + 1, 1);
- u64 cpuaddr = of_read_number(range + na, pna);
+ for_each_of_range(&parser, &range) {
unsigned long rtype;
+ u32 slot = upper_32_bits(range.bus_addr);
- if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
+ if (DT_FLAGS_TO_TYPE(range.flags) == DT_TYPE_IO)
rtype = IORESOURCE_IO;
- else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
+ else if (DT_FLAGS_TO_TYPE(range.flags) == DT_TYPE_MEM32)
rtype = IORESOURCE_MEM;
else
continue;
if (slot == PCI_SLOT(devfn) && type == rtype) {
- *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
- *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
+ *tgt = DT_CPUADDR_TO_TARGET(range.cpu_addr);
+ *attr = DT_CPUADDR_TO_ATTR(range.cpu_addr);
return 0;
}
}
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index d2f88997283a..467ddc701adc 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -22,6 +22,7 @@
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -1547,7 +1548,7 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc)
unsigned int index = i * 32 + offset;
int ret;
- ret = generic_handle_domain_irq(msi->domain->parent, index);
+ ret = generic_handle_domain_irq(msi->domain, index);
if (ret) {
/*
* that's weird who triggered this?
@@ -1565,30 +1566,6 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void tegra_msi_top_irq_ack(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void tegra_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void tegra_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip tegra_msi_top_chip = {
- .name = "Tegra PCIe MSI",
- .irq_ack = tegra_msi_top_irq_ack,
- .irq_mask = tegra_msi_top_irq_mask,
- .irq_unmask = tegra_msi_top_irq_unmask,
-};
-
static void tegra_msi_irq_ack(struct irq_data *d)
{
struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
@@ -1690,42 +1667,40 @@ static const struct irq_domain_ops tegra_msi_domain_ops = {
.free = tegra_msi_domain_free,
};
-static struct msi_domain_info tegra_msi_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
- .chip = &tegra_msi_top_chip,
+static const struct msi_parent_ops tegra_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSI_MASK_PARENT |
+ MSI_FLAG_NO_AFFINITY),
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int tegra_allocate_domains(struct tegra_msi *msi)
{
struct tegra_pcie *pcie = msi_to_pcie(msi);
struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
- struct irq_domain *parent;
-
- parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
- &tegra_msi_domain_ops, msi);
- if (!parent) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+ struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .ops = &tegra_msi_domain_ops,
+ .size = INT_PCI_MSI_NR,
+ .host_data = msi,
+ };
- msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent);
+ msi->domain = msi_create_parent_irq_domain(&info, &tegra_msi_parent_ops);
if (!msi->domain) {
dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
return -ENOMEM;
}
-
return 0;
}
static void tegra_free_domains(struct tegra_msi *msi)
{
- struct irq_domain *parent = msi->domain->parent;
-
irq_domain_remove(msi->domain);
- irq_domain_remove(parent);
}
static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index 08161065a89c..b5b4a958e6a2 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -11,6 +11,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "pci-host-common.h"
+
#if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
static void set_val(u32 v, int where, int size, u32 *val)
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index f1bd5de67997..5fa037fb61dc 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "../pci.h"
+#include "pci-host-common.h"
#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index 7bce327897c9..b05ec8b0bb93 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/of_pci.h>
@@ -32,7 +33,6 @@ struct xgene_msi_group {
struct xgene_msi {
struct device_node *node;
struct irq_domain *inner_domain;
- struct irq_domain *msi_domain;
u64 msi_addr;
void __iomem *msi_regs;
unsigned long *bitmap;
@@ -44,20 +44,6 @@ struct xgene_msi {
/* Global data */
static struct xgene_msi xgene_msi_ctrl;
-static struct irq_chip xgene_msi_top_irq_chip = {
- .name = "X-Gene1 MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static struct msi_domain_info xgene_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &xgene_msi_top_irq_chip,
-};
-
/*
* X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where
* n is group number (0..F), x is index of registers in each group (0..7)
@@ -235,34 +221,35 @@ static void xgene_irq_domain_free(struct irq_domain *domain,
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
-static const struct irq_domain_ops msi_domain_ops = {
+static const struct irq_domain_ops xgene_msi_domain_ops = {
.alloc = xgene_irq_domain_alloc,
.free = xgene_irq_domain_free,
};
+static const struct msi_parent_ops xgene_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS),
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
static int xgene_allocate_domains(struct xgene_msi *msi)
{
- msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
- &msi_domain_ops, msi);
- if (!msi->inner_domain)
- return -ENOMEM;
-
- msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node),
- &xgene_msi_domain_info,
- msi->inner_domain);
-
- if (!msi->msi_domain) {
- irq_domain_remove(msi->inner_domain);
- return -ENOMEM;
- }
-
- return 0;
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(msi->node),
+ .ops = &xgene_msi_domain_ops,
+ .size = NR_MSI_VEC,
+ .host_data = msi,
+ };
+
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &xgene_msi_parent_ops);
+ return msi->inner_domain ? 0 : -ENOMEM;
}
static void xgene_free_domains(struct xgene_msi *msi)
{
- if (msi->msi_domain)
- irq_domain_remove(msi->msi_domain);
if (msi->inner_domain)
irq_domain_remove(msi->inner_domain);
}
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index e1cee3c0575f..a43f21eb8fbb 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -164,9 +164,9 @@ static const struct irq_domain_ops msi_domain_ops = {
static int altera_allocate_domains(struct altera_msi *msi)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(msi->pdev->dev.of_node);
- msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
+ msi->inner_domain = irq_domain_create_linear(NULL, msi->num_of_vectors,
&msi_domain_ops, msi);
if (!msi->inner_domain) {
dev_err(&msi->pdev->dev, "failed to create IRQ domain\n");
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index 70409e71a18f..0fc77176a52e 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -855,7 +855,7 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
struct device_node *node = dev->of_node;
/* Setup INTx */
- pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
+ pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), PCI_NUM_INTX,
&intx_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index 18e11b9a7f46..77fe73976654 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -18,10 +18,12 @@
* Author: Marc Zyngier <maz@kernel.org>
*/
+#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -29,6 +31,9 @@
#include <linux/of_irq.h>
#include <linux/pci-ecam.h>
+#include "pci-host-common.h"
+
+/* T8103 (original M1) and related SoCs */
#define CORE_RC_PHYIF_CTL 0x00024
#define CORE_RC_PHYIF_CTL_RUN BIT(0)
#define CORE_RC_PHYIF_STAT 0x00028
@@ -39,14 +44,18 @@
#define CORE_RC_STAT_READY BIT(0)
#define CORE_FABRIC_STAT 0x04000
#define CORE_FABRIC_STAT_MASK 0x001F001F
-#define CORE_LANE_CFG(port) (0x84000 + 0x4000 * (port))
-#define CORE_LANE_CFG_REFCLK0REQ BIT(0)
-#define CORE_LANE_CFG_REFCLK1REQ BIT(1)
-#define CORE_LANE_CFG_REFCLK0ACK BIT(2)
-#define CORE_LANE_CFG_REFCLK1ACK BIT(3)
-#define CORE_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
-#define CORE_LANE_CTL(port) (0x84004 + 0x4000 * (port))
-#define CORE_LANE_CTL_CFGACC BIT(15)
+
+#define CORE_PHY_DEFAULT_BASE(port) (0x84000 + 0x4000 * (port))
+
+#define PHY_LANE_CFG 0x00000
+#define PHY_LANE_CFG_REFCLK0REQ BIT(0)
+#define PHY_LANE_CFG_REFCLK1REQ BIT(1)
+#define PHY_LANE_CFG_REFCLK0ACK BIT(2)
+#define PHY_LANE_CFG_REFCLK1ACK BIT(3)
+#define PHY_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
+#define PHY_LANE_CFG_REFCLKCGEN (BIT(30) | BIT(31))
+#define PHY_LANE_CTL 0x00004
+#define PHY_LANE_CTL_CFGACC BIT(15)
#define PORT_LTSSMCTL 0x00080
#define PORT_LTSSMCTL_START BIT(0)
@@ -100,7 +109,7 @@
#define PORT_REFCLK_CGDIS BIT(8)
#define PORT_PERST 0x00814
#define PORT_PERST_OFF BIT(0)
-#define PORT_RID2SID(i16) (0x00828 + 4 * (i16))
+#define PORT_RID2SID 0x00828
#define PORT_RID2SID_VALID BIT(31)
#define PORT_RID2SID_SID_SHIFT 16
#define PORT_RID2SID_BUS_SHIFT 8
@@ -118,7 +127,15 @@
#define PORT_TUNSTAT_PERST_ACK_PEND BIT(1)
#define PORT_PREFMEM_ENABLE 0x00994
-#define MAX_RID2SID 64
+/* T602x (M2-pro and co) */
+#define PORT_T602X_MSIADDR 0x016c
+#define PORT_T602X_MSIADDR_HI 0x0170
+#define PORT_T602X_PERST 0x082c
+#define PORT_T602X_RID2SID 0x3000
+#define PORT_T602X_MSIMAP 0x3800
+
+#define PORT_MSIMAP_ENABLE BIT(31)
+#define PORT_MSIMAP_TARGET GENMASK(7, 0)
/*
* The doorbell address is set to 0xfffff000, which by convention
@@ -129,11 +146,45 @@
*/
#define DOORBELL_ADDR CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
+struct hw_info {
+ u32 phy_lane_ctl;
+ u32 port_msiaddr;
+ u32 port_msiaddr_hi;
+ u32 port_refclk;
+ u32 port_perst;
+ u32 port_rid2sid;
+ u32 port_msimap;
+ u32 max_rid2sid;
+};
+
+static const struct hw_info t8103_hw = {
+ .phy_lane_ctl = PHY_LANE_CTL,
+ .port_msiaddr = PORT_MSIADDR,
+ .port_msiaddr_hi = 0,
+ .port_refclk = PORT_REFCLK,
+ .port_perst = PORT_PERST,
+ .port_rid2sid = PORT_RID2SID,
+ .port_msimap = 0,
+ .max_rid2sid = 64,
+};
+
+static const struct hw_info t602x_hw = {
+ .phy_lane_ctl = 0,
+ .port_msiaddr = PORT_T602X_MSIADDR,
+ .port_msiaddr_hi = PORT_T602X_MSIADDR_HI,
+ .port_refclk = 0,
+ .port_perst = PORT_T602X_PERST,
+ .port_rid2sid = PORT_T602X_RID2SID,
+ .port_msimap = PORT_T602X_MSIMAP,
+ /* 16 on t602x, guess for autodetect on future HW */
+ .max_rid2sid = 512,
+};
+
struct apple_pcie {
struct mutex lock;
struct device *dev;
void __iomem *base;
- struct irq_domain *domain;
+ const struct hw_info *hw;
unsigned long *bitmap;
struct list_head ports;
struct completion event;
@@ -142,12 +193,14 @@ struct apple_pcie {
};
struct apple_pcie_port {
+ raw_spinlock_t lock;
struct apple_pcie *pcie;
struct device_node *np;
void __iomem *base;
+ void __iomem *phy;
struct irq_domain *domain;
struct list_head entry;
- DECLARE_BITMAP(sid_map, MAX_RID2SID);
+ unsigned long *sid_map;
int sid_map_sz;
int idx;
};
@@ -162,27 +215,6 @@ static void rmw_clear(u32 clr, void __iomem *addr)
writel_relaxed(readl_relaxed(addr) & ~clr, addr);
}
-static void apple_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void apple_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip apple_msi_top_chip = {
- .name = "PCIe MSI",
- .irq_mask = apple_msi_top_irq_mask,
- .irq_unmask = apple_msi_top_irq_unmask,
- .irq_eoi = irq_chip_eoi_parent,
- .irq_set_affinity = irq_chip_set_affinity_parent,
- .irq_set_type = irq_chip_set_type_parent,
-};
-
static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
{
msg->address_hi = upper_32_bits(DOORBELL_ADDR);
@@ -226,8 +258,7 @@ static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
- &apple_msi_bottom_chip,
- domain->host_data);
+ &apple_msi_bottom_chip, pcie);
}
return 0;
@@ -251,24 +282,20 @@ static const struct irq_domain_ops apple_msi_domain_ops = {
.free = apple_msi_domain_free,
};
-static struct msi_domain_info apple_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
- .chip = &apple_msi_top_chip,
-};
-
static void apple_port_irq_mask(struct irq_data *data)
{
struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
- writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKSET);
+ guard(raw_spinlock_irqsave)(&port->lock);
+ rmw_set(BIT(data->hwirq), port->base + PORT_INTMSK);
}
static void apple_port_irq_unmask(struct irq_data *data)
{
struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
- writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKCLR);
+ guard(raw_spinlock_irqsave)(&port->lock);
+ rmw_clear(BIT(data->hwirq), port->base + PORT_INTMSK);
}
static bool hwirq_is_intx(unsigned int hwirq)
@@ -372,7 +399,9 @@ static void apple_port_irq_handler(struct irq_desc *desc)
static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
{
struct fwnode_handle *fwnode = &port->np->fwnode;
+ struct apple_pcie *pcie = port->pcie;
unsigned int irq;
+ u32 val = 0;
/* FIXME: consider moving each interrupt under each port */
irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)),
@@ -387,20 +416,31 @@ static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
return -ENOMEM;
/* Disable all interrupts */
- writel_relaxed(~0, port->base + PORT_INTMSKSET);
+ writel_relaxed(~0, port->base + PORT_INTMSK);
writel_relaxed(~0, port->base + PORT_INTSTAT);
+ writel_relaxed(~0, port->base + PORT_LINKCMDSTS);
irq_set_chained_handler_and_data(irq, apple_port_irq_handler, port);
/* Configure MSI base address */
BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR));
- writel_relaxed(lower_32_bits(DOORBELL_ADDR), port->base + PORT_MSIADDR);
+ writel_relaxed(lower_32_bits(DOORBELL_ADDR),
+ port->base + pcie->hw->port_msiaddr);
+ if (pcie->hw->port_msiaddr_hi)
+ writel_relaxed(0, port->base + pcie->hw->port_msiaddr_hi);
/* Enable MSIs, shared between all ports */
- writel_relaxed(0, port->base + PORT_MSIBASE);
- writel_relaxed((ilog2(port->pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT) |
- PORT_MSICFG_EN, port->base + PORT_MSICFG);
+ if (pcie->hw->port_msimap) {
+ for (int i = 0; i < pcie->nvecs; i++)
+ writel_relaxed(FIELD_PREP(PORT_MSIMAP_TARGET, i) |
+ PORT_MSIMAP_ENABLE,
+ port->base + pcie->hw->port_msimap + 4 * i);
+ } else {
+ writel_relaxed(0, port->base + PORT_MSIBASE);
+ val = ilog2(pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT;
+ }
+ writel_relaxed(val | PORT_MSICFG_EN, port->base + PORT_MSICFG);
return 0;
}
@@ -467,43 +507,47 @@ static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
u32 stat;
int res;
- res = readl_relaxed_poll_timeout(pcie->base + CORE_RC_PHYIF_STAT, stat,
- stat & CORE_RC_PHYIF_STAT_REFCLK,
- 100, 50000);
- if (res < 0)
- return res;
+ if (pcie->hw->phy_lane_ctl)
+ rmw_set(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl);
- rmw_set(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
- rmw_set(CORE_LANE_CFG_REFCLK0REQ, pcie->base + CORE_LANE_CFG(port->idx));
+ rmw_set(PHY_LANE_CFG_REFCLK0REQ, port->phy + PHY_LANE_CFG);
- res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
- stat, stat & CORE_LANE_CFG_REFCLK0ACK,
+ res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG,
+ stat, stat & PHY_LANE_CFG_REFCLK0ACK,
100, 50000);
if (res < 0)
return res;
- rmw_set(CORE_LANE_CFG_REFCLK1REQ, pcie->base + CORE_LANE_CFG(port->idx));
- res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
- stat, stat & CORE_LANE_CFG_REFCLK1ACK,
+ rmw_set(PHY_LANE_CFG_REFCLK1REQ, port->phy + PHY_LANE_CFG);
+ res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG,
+ stat, stat & PHY_LANE_CFG_REFCLK1ACK,
100, 50000);
if (res < 0)
return res;
- rmw_clear(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
+ if (pcie->hw->phy_lane_ctl)
+ rmw_clear(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl);
- rmw_set(CORE_LANE_CFG_REFCLKEN, pcie->base + CORE_LANE_CFG(port->idx));
- rmw_set(PORT_REFCLK_EN, port->base + PORT_REFCLK);
+ rmw_set(PHY_LANE_CFG_REFCLKEN, port->phy + PHY_LANE_CFG);
+
+ if (pcie->hw->port_refclk)
+ rmw_set(PORT_REFCLK_EN, port->base + pcie->hw->port_refclk);
return 0;
}
+static void __iomem *port_rid2sid_addr(struct apple_pcie_port *port, int idx)
+{
+ return port->base + port->pcie->hw->port_rid2sid + 4 * idx;
+}
+
static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port,
int idx, u32 val)
{
- writel_relaxed(val, port->base + PORT_RID2SID(idx));
+ writel_relaxed(val, port_rid2sid_addr(port, idx));
/* Read back to ensure completion of the write */
- return readl_relaxed(port->base + PORT_RID2SID(idx));
+ return readl_relaxed(port_rid2sid_addr(port, idx));
}
static int apple_pcie_setup_port(struct apple_pcie *pcie,
@@ -512,6 +556,8 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
struct platform_device *platform = to_platform_device(pcie->dev);
struct apple_pcie_port *port;
struct gpio_desc *reset;
+ struct resource *res;
+ char name[16];
u32 stat, idx;
int ret, i;
@@ -524,6 +570,10 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
if (!port)
return -ENOMEM;
+ port->sid_map = devm_bitmap_zalloc(pcie->dev, pcie->hw->max_rid2sid, GFP_KERNEL);
+ if (!port->sid_map)
+ return -ENOMEM;
+
ret = of_property_read_u32_index(np, "reg", 0, &idx);
if (ret)
return ret;
@@ -533,14 +583,28 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
port->pcie = pcie;
port->np = np;
- port->base = devm_platform_ioremap_resource(platform, port->idx + 2);
+ raw_spin_lock_init(&port->lock);
+
+ snprintf(name, sizeof(name), "port%d", port->idx);
+ res = platform_get_resource_byname(platform, IORESOURCE_MEM, name);
+ if (!res)
+ res = platform_get_resource(platform, IORESOURCE_MEM, port->idx + 2);
+
+ port->base = devm_ioremap_resource(&platform->dev, res);
if (IS_ERR(port->base))
return PTR_ERR(port->base);
+ snprintf(name, sizeof(name), "phy%d", port->idx);
+ res = platform_get_resource_byname(platform, IORESOURCE_MEM, name);
+ if (res)
+ port->phy = devm_ioremap_resource(&platform->dev, res);
+ else
+ port->phy = pcie->base + CORE_PHY_DEFAULT_BASE(port->idx);
+
rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
/* Assert PERST# before setting up the clock */
- gpiod_set_value(reset, 1);
+ gpiod_set_value_cansleep(reset, 1);
ret = apple_pcie_setup_refclk(pcie, port);
if (ret < 0)
@@ -550,8 +614,8 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
usleep_range(100, 200);
/* Deassert PERST# */
- rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
- gpiod_set_value(reset, 0);
+ rmw_set(PORT_PERST_OFF, port->base + pcie->hw->port_perst);
+ gpiod_set_value_cansleep(reset, 0);
/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
msleep(100);
@@ -563,7 +627,11 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return ret;
}
- rmw_clear(PORT_REFCLK_CGDIS, port->base + PORT_REFCLK);
+ if (pcie->hw->port_refclk)
+ rmw_clear(PORT_REFCLK_CGDIS, port->base + pcie->hw->port_refclk);
+ else
+ rmw_set(PHY_LANE_CFG_REFCLKCGEN, port->phy + PHY_LANE_CFG);
+
rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK);
ret = apple_pcie_port_setup_irq(port);
@@ -571,7 +639,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return ret;
/* Reset all RID/SID mappings, and check for RAZ/WI registers */
- for (i = 0; i < MAX_RID2SID; i++) {
+ for (i = 0; i < pcie->hw->max_rid2sid; i++) {
if (apple_pcie_rid2sid_write(port, i, 0xbad1d) != 0xbad1d)
break;
apple_pcie_rid2sid_write(port, i, 0);
@@ -584,6 +652,9 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
list_add_tail(&port->entry, &pcie->ports);
init_completion(&pcie->event);
+ /* In the success path, we keep a reference to np around */
+ of_node_get(np);
+
ret = apple_pcie_port_register_irqs(port);
WARN_ON(ret);
@@ -595,11 +666,28 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return 0;
}
+static const struct msi_parent_ops apple_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSI_MASK_PARENT),
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
static int apple_msi_init(struct apple_pcie *pcie)
{
struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .ops = &apple_msi_domain_ops,
+ .size = pcie->nvecs,
+ .host_data = pcie,
+ };
struct of_phandle_args args = {};
- struct irq_domain *parent;
int ret;
ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges",
@@ -619,28 +707,16 @@ static int apple_msi_init(struct apple_pcie *pcie)
if (!pcie->bitmap)
return -ENOMEM;
- parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
- if (!parent) {
+ info.parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
+ if (!info.parent) {
dev_err(pcie->dev, "failed to find parent domain\n");
return -ENXIO;
}
- parent = irq_domain_create_hierarchy(parent, 0, pcie->nvecs, fwnode,
- &apple_msi_domain_ops, pcie);
- if (!parent) {
+ if (!msi_create_parent_irq_domain(&info, &apple_msi_parent_ops)) {
dev_err(pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
-
- pcie->domain = pci_msi_create_irq_domain(fwnode, &apple_msi_info,
- parent);
- if (!pcie->domain) {
- dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
- return -ENOMEM;
- }
-
return 0;
}
@@ -716,7 +792,7 @@ static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci
for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
u32 val;
- val = readl_relaxed(port->base + PORT_RID2SID(idx));
+ val = readl_relaxed(port_rid2sid_addr(port, idx));
if ((val & 0xffff) == rid) {
apple_pcie_rid2sid_write(port, idx, 0);
bitmap_release_region(port->sid_map, idx, 0);
@@ -730,34 +806,14 @@ static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci
static int apple_pcie_init(struct pci_config_window *cfg)
{
+ struct apple_pcie *pcie = cfg->priv;
struct device *dev = cfg->parent;
- struct platform_device *platform = to_platform_device(dev);
- struct apple_pcie *pcie;
int ret;
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
- return -ENOMEM;
-
- pcie->dev = dev;
-
- mutex_init(&pcie->lock);
-
- pcie->base = devm_platform_ioremap_resource(platform, 1);
- if (IS_ERR(pcie->base))
- return PTR_ERR(pcie->base);
-
- cfg->priv = pcie;
- INIT_LIST_HEAD(&pcie->ports);
-
- ret = apple_msi_init(pcie);
- if (ret)
- return ret;
-
- for_each_child_of_node_scoped(dev->of_node, of_port) {
+ for_each_available_child_of_node_scoped(dev->of_node, of_port) {
ret = apple_pcie_setup_port(pcie, of_port);
if (ret) {
- dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
+ dev_err(dev, "Port %pOF setup fail: %d\n", of_port, ret);
return ret;
}
}
@@ -776,14 +832,44 @@ static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
}
};
+static int apple_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct apple_pcie *pcie;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->dev = dev;
+ pcie->hw = of_device_get_match_data(dev);
+ if (!pcie->hw)
+ return -ENODEV;
+ pcie->base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ mutex_init(&pcie->lock);
+ INIT_LIST_HEAD(&pcie->ports);
+ dev_set_drvdata(dev, pcie);
+
+ ret = apple_msi_init(pcie);
+ if (ret)
+ return ret;
+
+ return pci_host_common_init(pdev, &apple_pcie_cfg_ecam_ops);
+}
+
static const struct of_device_id apple_pcie_of_match[] = {
- { .compatible = "apple,pcie", .data = &apple_pcie_cfg_ecam_ops },
+ { .compatible = "apple,t6020-pcie", .data = &t602x_hw },
+ { .compatible = "apple,pcie", .data = &t8103_hw },
{ }
};
MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
static struct platform_driver apple_pcie_driver = {
- .probe = pci_host_common_probe,
+ .probe = apple_pcie_probe,
.driver = {
.name = "pcie-apple",
.of_match_table = apple_pcie_of_match,
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index e19628e13898..92887b394eb4 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -581,10 +581,10 @@ static const struct irq_domain_ops msi_domain_ops = {
static int brcm_allocate_domains(struct brcm_msi *msi)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
+ struct fwnode_handle *fwnode = of_fwnode_handle(msi->np);
struct device *dev = msi->dev;
- msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi);
+ msi->inner_domain = irq_domain_create_linear(NULL, msi->nr, &msi_domain_ops, msi);
if (!msi->inner_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 649fcb449f34..d2cb4c4f821a 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -446,12 +446,12 @@ static void iproc_msi_disable(struct iproc_msi *msi)
static int iproc_msi_alloc_domains(struct device_node *node,
struct iproc_msi *msi)
{
- msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs,
- &msi_domain_ops, msi);
+ msi->inner_domain = irq_domain_create_linear(NULL, msi->nr_msi_vecs,
+ &msi_domain_ops, msi);
if (!msi->inner_domain)
return -ENOMEM;
- msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ msi->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(node),
&iproc_msi_domain_info,
msi->inner_domain);
if (!msi->msi_domain) {
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 9d52504acae4..b55f5973414c 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -745,8 +745,8 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
return -ENODEV;
}
- pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
- &intx_domain_ops, pcie);
+ pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
if (!pcie->intx_domain) {
dev_err(dev, "failed to create INTx IRQ domain\n");
ret = -ENODEV;
@@ -756,8 +756,9 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
/* Setup MSI */
mutex_init(&pcie->lock);
- pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
- &mtk_msi_bottom_domain_ops, pcie);
+ pcie->msi_bottom_domain = irq_domain_create_linear(of_fwnode_handle(node),
+ PCIE_MSI_IRQS_NUM,
+ &mtk_msi_bottom_domain_ops, pcie);
if (!pcie->msi_bottom_domain) {
dev_err(dev, "failed to create MSI bottom domain\n");
ret = -ENODEV;
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 811a8b4acd50..e1934aa06c8d 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -485,7 +485,7 @@ static struct msi_domain_info mtk_msi_domain_info = {
static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(port->pcie->dev->of_node);
mutex_init(&port->lock);
@@ -569,8 +569,8 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
return -ENODEV;
}
- port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
+ port->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
of_node_put(pcie_intc_node);
if (!port->irq_domain) {
dev_err(dev, "failed to get INTx IRQ domain\n");
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index c5e0d025bc43..a8a966844cf3 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -256,15 +256,15 @@ static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
clear_bit(atu_index + 1, ep->ib_window_map);
}
-static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
- u8 interrupts)
+static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
struct rcar_pcie *pcie = &ep->pcie;
+ u8 mmc = order_base_2(nr_irqs);
u32 flags;
flags = rcar_pci_read_reg(pcie, MSICAP(fn));
- flags |= interrupts << MSICAP0_MMESCAP_OFFSET;
+ flags |= mmc << MSICAP0_MMESCAP_OFFSET;
rcar_pci_write_reg(pcie, flags, MSICAP(fn));
return 0;
@@ -280,7 +280,7 @@ static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
if (!(flags & MSICAP0_MSIE))
return -EINVAL;
- return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
+ return 1 << ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
}
static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 85ea36df2f59..55416b8311dd 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -308,10 +308,11 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
}
static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
- u8 multi_msg_cap)
+ u8 nr_irqs)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
+ u8 mmc = order_base_2(nr_irqs);
u32 flags;
flags = rockchip_pcie_read(rockchip,
@@ -319,7 +320,7 @@ static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
flags |=
- (multi_msg_cap << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
+ (mmc << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
(PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET);
flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
rockchip_pcie_write(rockchip, flags,
@@ -340,8 +341,8 @@ static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
return -EINVAL;
- return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
- ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+ return 1 << ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+ ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
}
static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
@@ -694,6 +695,7 @@ static const struct pci_epc_features rockchip_pcie_epc_features = {
.linkup_notifier = true,
.msi_capable = true,
.msix_capable = false,
+ .intx_capable = true,
.align = ROCKCHIP_PCIE_AT_SIZE_ALIGN,
};
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 6a46be17aa91..b9e7a8710cf0 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -693,8 +693,8 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return -EINVAL;
}
- rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &intx_domain_ops, rockchip);
+ rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
of_node_put(intc);
if (!rockchip->irq_domain) {
dev_err(dev, "failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index 14954f43e5e9..5864a20323f2 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -319,11 +319,12 @@ static const char * const rockchip_pci_pm_rsts[] = {
"aclk",
};
+/* NOTE: Do not reorder the deassert sequence of the following reset pins */
static const char * const rockchip_pci_core_rsts[] = {
- "mgmt-sticky",
- "core",
- "mgmt",
"pipe",
+ "mgmt",
+ "core",
+ "mgmt-sticky",
};
struct rockchip_pcie {
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index 13ca493d22bd..d38f27e20761 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -395,17 +395,15 @@ static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port)
return -EINVAL;
}
- port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32,
- &event_domain_ops,
- port);
+ port->cpm_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, port);
if (!port->cpm_domain)
goto out;
irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS);
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops,
- port);
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
if (!port->intx_domain)
goto out;
diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c
index dd117f07fc95..dc9690a535e1 100644
--- a/drivers/pci/controller/pcie-xilinx-dma-pl.c
+++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c
@@ -470,10 +470,10 @@ static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port)
struct device *dev = port->dev;
struct xilinx_msi *msi = &port->msi;
int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long);
- struct fwnode_handle *fwnode = of_node_to_fwnode(port->dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(port->dev->of_node);
- msi->dev_domain = irq_domain_add_linear(NULL, XILINX_NUM_MSI_IRQS,
- &dev_msi_domain_ops, port);
+ msi->dev_domain = irq_domain_create_linear(NULL, XILINX_NUM_MSI_IRQS,
+ &dev_msi_domain_ops, port);
if (!msi->dev_domain)
goto out;
@@ -585,15 +585,15 @@ static int xilinx_pl_dma_pcie_init_irq_domain(struct pl_dma_pcie *port)
return -EINVAL;
}
- port->pldma_domain = irq_domain_add_linear(pcie_intc_node, 32,
- &event_domain_ops, port);
+ port->pldma_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, port);
if (!port->pldma_domain)
return -ENOMEM;
irq_domain_update_bus_token(port->pldma_domain, DOMAIN_BUS_NEXUS);
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
if (!port->intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 8d6e2a89b067..c8b05477b719 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -495,11 +495,10 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
{
#ifdef CONFIG_PCI_MSI
struct device *dev = pcie->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
struct nwl_msi *msi = &pcie->msi;
- msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
- &dev_msi_domain_ops, pcie);
+ msi->dev_domain = irq_domain_create_linear(NULL, INT_PCI_MSI_NR, &dev_msi_domain_ops, pcie);
if (!msi->dev_domain) {
dev_err(dev, "failed to create dev IRQ domain\n");
return -ENOMEM;
@@ -582,10 +581,8 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
return -EINVAL;
}
- pcie->intx_irq_domain = irq_domain_add_linear(intc_node,
- PCI_NUM_INTX,
- &intx_domain_ops,
- pcie);
+ pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
of_node_put(intc_node);
if (!pcie->intx_irq_domain) {
dev_err(dev, "failed to create IRQ domain\n");
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index 0b534f73a942..e36aa874bae9 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -461,9 +461,8 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie *pcie)
return -ENODEV;
}
- pcie->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops,
- pcie);
+ pcie->leg_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
of_node_put(pcie_intc_node);
if (!pcie->leg_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/plda/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c
index 3fdfffdf0270..24bbf93b8051 100644
--- a/drivers/pci/controller/plda/pcie-microchip-host.c
+++ b/drivers/pci/controller/plda/pcie-microchip-host.c
@@ -23,6 +23,7 @@
#include <linux/wordpart.h>
#include "../../pci.h"
+#include "../pci-host-common.h"
#include "pcie-plda.h"
#define MC_MAX_NUM_INBOUND_WINDOWS 8
diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
index 4153214ca410..3abedf723215 100644
--- a/drivers/pci/controller/plda/pcie-plda-host.c
+++ b/drivers/pci/controller/plda/pcie-plda-host.c
@@ -150,13 +150,12 @@ static struct msi_domain_info plda_msi_domain_info = {
static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
{
struct device *dev = port->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
struct plda_msi *msi = &port->msi;
mutex_init(&port->msi.lock);
- msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
- &msi_domain_ops, port);
+ msi->dev_domain = irq_domain_create_linear(NULL, msi->num_vectors, &msi_domain_ops, port);
if (!msi->dev_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
@@ -393,10 +392,9 @@ static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
return -EINVAL;
}
- port->event_domain = irq_domain_add_linear(pcie_intc_node,
- port->num_events,
- &plda_event_domain_ops,
- port);
+ port->event_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ port->num_events, &plda_event_domain_ops,
+ port);
if (!port->event_domain) {
dev_err(dev, "failed to get event domain\n");
of_node_put(pcie_intc_node);
@@ -405,8 +403,8 @@ static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
if (!port->intx_domain) {
dev_err(dev, "failed to get an INTx IRQ domain\n");
of_node_put(pcie_intc_node);
diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
index 73047316889e..9f4190501395 100644
--- a/drivers/pci/devres.c
+++ b/drivers/pci/devres.c
@@ -6,30 +6,13 @@
/*
* On the state of PCI's devres implementation:
*
- * The older devres API for PCI has two significant problems:
+ * The older PCI devres API has one significant problem:
*
- * 1. It is very strongly tied to the statically allocated mapping table in
- * struct pcim_iomap_devres below. This is mostly solved in the sense of the
- * pcim_ functions in this file providing things like ranged mapping by
- * bypassing this table, whereas the functions that were present in the old
- * API still enter the mapping addresses into the table for users of the old
- * API.
- *
- * 2. The region-request-functions in pci.c do become managed IF the device has
- * been enabled with pcim_enable_device() instead of pci_enable_device().
- * This resulted in the API becoming inconsistent: Some functions have an
- * obviously managed counter-part (e.g., pci_iomap() <-> pcim_iomap()),
- * whereas some don't and are never managed, while others don't and are
- * _sometimes_ managed (e.g. pci_request_region()).
- *
- * Consequently, in the new API, region requests performed by the pcim_
- * functions are automatically cleaned up through the devres callback
- * pcim_addr_resource_release().
- *
- * Users of pcim_enable_device() + pci_*region*() are redirected in
- * pci.c to the managed functions here in this file. This isn't exactly
- * perfect, but the only alternative way would be to port ALL drivers
- * using said combination to pcim_ functions.
+ * It is very strongly tied to the statically allocated mapping table in struct
+ * pcim_iomap_devres below. This is mostly solved in the sense of the pcim_
+ * functions in this file providing things like ranged mapping by bypassing
+ * this table, whereas the functions that were present in the old API still
+ * enter the mapping addresses into the table for users of the old API.
*
* TODO:
* Remove the legacy table entirely once all calls to pcim_iomap_table() in
@@ -87,104 +70,6 @@ static inline void pcim_addr_devres_clear(struct pcim_addr_devres *res)
res->bar = -1;
}
-/*
- * The following functions, __pcim_*_region*, exist as counterparts to the
- * versions from pci.c - which, unfortunately, can be in "hybrid mode", i.e.,
- * sometimes managed, sometimes not.
- *
- * To separate the APIs cleanly, we define our own, simplified versions here.
- */
-
-/**
- * __pcim_request_region_range - Request a ranged region
- * @pdev: PCI device the region belongs to
- * @bar: BAR the range is within
- * @offset: offset from the BAR's start address
- * @maxlen: length in bytes, beginning at @offset
- * @name: name of the driver requesting the resource
- * @req_flags: flags for the request, e.g., for kernel-exclusive requests
- *
- * Returns: 0 on success, a negative error code on failure.
- *
- * Request a range within a device's PCI BAR. Sanity check the input.
- */
-static int __pcim_request_region_range(struct pci_dev *pdev, int bar,
- unsigned long offset,
- unsigned long maxlen,
- const char *name, int req_flags)
-{
- resource_size_t start = pci_resource_start(pdev, bar);
- resource_size_t len = pci_resource_len(pdev, bar);
- unsigned long dev_flags = pci_resource_flags(pdev, bar);
-
- if (start == 0 || len == 0) /* Unused BAR. */
- return 0;
- if (len <= offset)
- return -EINVAL;
-
- start += offset;
- len -= offset;
-
- if (len > maxlen && maxlen != 0)
- len = maxlen;
-
- if (dev_flags & IORESOURCE_IO) {
- if (!request_region(start, len, name))
- return -EBUSY;
- } else if (dev_flags & IORESOURCE_MEM) {
- if (!__request_mem_region(start, len, name, req_flags))
- return -EBUSY;
- } else {
- /* That's not a device we can request anything on. */
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void __pcim_release_region_range(struct pci_dev *pdev, int bar,
- unsigned long offset,
- unsigned long maxlen)
-{
- resource_size_t start = pci_resource_start(pdev, bar);
- resource_size_t len = pci_resource_len(pdev, bar);
- unsigned long flags = pci_resource_flags(pdev, bar);
-
- if (len <= offset || start == 0)
- return;
-
- if (len == 0 || maxlen == 0) /* This an unused BAR. Do nothing. */
- return;
-
- start += offset;
- len -= offset;
-
- if (len > maxlen)
- len = maxlen;
-
- if (flags & IORESOURCE_IO)
- release_region(start, len);
- else if (flags & IORESOURCE_MEM)
- release_mem_region(start, len);
-}
-
-static int __pcim_request_region(struct pci_dev *pdev, int bar,
- const char *name, int flags)
-{
- unsigned long offset = 0;
- unsigned long len = pci_resource_len(pdev, bar);
-
- return __pcim_request_region_range(pdev, bar, offset, len, name, flags);
-}
-
-static void __pcim_release_region(struct pci_dev *pdev, int bar)
-{
- unsigned long offset = 0;
- unsigned long len = pci_resource_len(pdev, bar);
-
- __pcim_release_region_range(pdev, bar, offset, len);
-}
-
static void pcim_addr_resource_release(struct device *dev, void *resource_raw)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -192,11 +77,11 @@ static void pcim_addr_resource_release(struct device *dev, void *resource_raw)
switch (res->type) {
case PCIM_ADDR_DEVRES_TYPE_REGION:
- __pcim_release_region(pdev, res->bar);
+ pci_release_region(pdev, res->bar);
break;
case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
pci_iounmap(pdev, res->baseaddr);
- __pcim_release_region(pdev, res->bar);
+ pci_release_region(pdev, res->bar);
break;
case PCIM_ADDR_DEVRES_TYPE_MAPPING:
pci_iounmap(pdev, res->baseaddr);
@@ -735,7 +620,7 @@ void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
res->type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
res->bar = bar;
- ret = __pcim_request_region(pdev, bar, name, 0);
+ ret = pci_request_region(pdev, bar, name);
if (ret != 0)
goto err_region;
@@ -749,7 +634,7 @@ void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
return res->baseaddr;
err_iomap:
- __pcim_release_region(pdev, bar);
+ pci_release_region(pdev, bar);
err_region:
pcim_addr_devres_free(res);
@@ -823,8 +708,20 @@ err:
}
EXPORT_SYMBOL(pcim_iomap_regions);
-static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
- int request_flags)
+/**
+ * pcim_request_region - Request a PCI BAR
+ * @pdev: PCI device to request region for
+ * @bar: Index of BAR to request
+ * @name: Name of the driver requesting the resource
+ *
+ * Returns: 0 on success, a negative error code on failure.
+ *
+ * Request region specified by @bar.
+ *
+ * The region will automatically be released on driver detach. If desired,
+ * release manually only with pcim_release_region().
+ */
+int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
{
int ret;
struct pcim_addr_devres *res;
@@ -838,7 +735,7 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
res->type = PCIM_ADDR_DEVRES_TYPE_REGION;
res->bar = bar;
- ret = __pcim_request_region(pdev, bar, name, request_flags);
+ ret = pci_request_region(pdev, bar, name);
if (ret != 0) {
pcim_addr_devres_free(res);
return ret;
@@ -847,45 +744,9 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
devres_add(&pdev->dev, res);
return 0;
}
-
-/**
- * pcim_request_region - Request a PCI BAR
- * @pdev: PCI device to request region for
- * @bar: Index of BAR to request
- * @name: Name of the driver requesting the resource
- *
- * Returns: 0 on success, a negative error code on failure.
- *
- * Request region specified by @bar.
- *
- * The region will automatically be released on driver detach. If desired,
- * release manually only with pcim_release_region().
- */
-int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
-{
- return _pcim_request_region(pdev, bar, name, 0);
-}
EXPORT_SYMBOL(pcim_request_region);
/**
- * pcim_request_region_exclusive - Request a PCI BAR exclusively
- * @pdev: PCI device to request region for
- * @bar: Index of BAR to request
- * @name: Name of the driver requesting the resource
- *
- * Returns: 0 on success, a negative error code on failure.
- *
- * Request region specified by @bar exclusively.
- *
- * The region will automatically be released on driver detach. If desired,
- * release manually only with pcim_release_region().
- */
-int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *name)
-{
- return _pcim_request_region(pdev, bar, name, IORESOURCE_EXCLUSIVE);
-}
-
-/**
* pcim_release_region - Release a PCI BAR
* @pdev: PCI device to operate on
* @bar: Index of BAR to release
@@ -893,7 +754,7 @@ int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *nam
* Release a region manually that was previously requested by
* pcim_request_region().
*/
-void pcim_release_region(struct pci_dev *pdev, int bar)
+static void pcim_release_region(struct pci_dev *pdev, int bar)
{
struct pcim_addr_devres res_searched;
@@ -956,30 +817,6 @@ err:
EXPORT_SYMBOL(pcim_request_all_regions);
/**
- * pcim_iounmap_regions - Unmap and release PCI BARs (DEPRECATED)
- * @pdev: PCI device to map IO resources for
- * @mask: Mask of BARs to unmap and release
- *
- * Unmap and release regions specified by @mask.
- *
- * This function is DEPRECATED. Do not use it in new code.
- * Use pcim_iounmap_region() instead.
- */
-void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
-{
- int i;
-
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (!mask_contains_bar(mask, i))
- continue;
-
- pcim_iounmap_region(pdev, i);
- pcim_remove_bar_from_legacy_table(pdev, i);
- }
-}
-EXPORT_SYMBOL(pcim_iounmap_regions);
-
-/**
* pcim_iomap_range - Create a ranged __iomap mapping within a PCI BAR
* @pdev: PCI device to map IO resources for
* @bar: Index of the BAR
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index 260b7de2dbd5..2c5e6446e00e 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -84,6 +84,8 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
goto err_exit_iomap;
}
+ cfg->priv = dev_get_drvdata(dev);
+
if (ops->init) {
err = ops->init(cfg);
if (err)
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index 874cb097b093..e4da3fdb0007 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -408,11 +408,9 @@ static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
*/
static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
{
- size_t align;
enum pci_barno barno;
struct epf_ntb_ctrl *ctrl;
u32 spad_size, ctrl_size;
- u64 size;
struct pci_epf *epf = ntb->epf;
struct device *dev = &epf->dev;
u32 spad_count;
@@ -422,31 +420,13 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
epf->func_no,
epf->vfunc_no);
barno = ntb->epf_ntb_bar[BAR_CONFIG];
- size = epc_features->bar[barno].fixed_size;
- align = epc_features->align;
-
- if ((!IS_ALIGNED(size, align)))
- return -EINVAL;
-
spad_count = ntb->spad_count;
- ctrl_size = sizeof(struct epf_ntb_ctrl);
+ ctrl_size = ALIGN(sizeof(struct epf_ntb_ctrl), sizeof(u32));
spad_size = 2 * spad_count * sizeof(u32);
- if (!align) {
- ctrl_size = roundup_pow_of_two(ctrl_size);
- spad_size = roundup_pow_of_two(spad_size);
- } else {
- ctrl_size = ALIGN(ctrl_size, align);
- spad_size = ALIGN(spad_size, align);
- }
-
- if (!size)
- size = ctrl_size + spad_size;
- else if (size < ctrl_size + spad_size)
- return -EINVAL;
-
- base = pci_epf_alloc_space(epf, size, barno, epc_features, 0);
+ base = pci_epf_alloc_space(epf, ctrl_size + spad_size,
+ barno, epc_features, 0);
if (!base) {
dev_err(dev, "Config/Status/SPAD alloc region fail\n");
return -ENOMEM;
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index beabea00af91..ca7f19cc973a 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -293,8 +293,6 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
if (interrupt < 0)
return 0;
- interrupt = 1 << interrupt;
-
return interrupt;
}
EXPORT_SYMBOL_GPL(pci_epc_get_msi);
@@ -304,28 +302,25 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi);
* @epc: the EPC device on which MSI has to be configured
* @func_no: the physical endpoint function number in the EPC device
* @vfunc_no: the virtual endpoint function number in the physical function
- * @interrupts: number of MSI interrupts required by the EPF
+ * @nr_irqs: number of MSI interrupts required by the EPF
*
* Invoke to set the required number of MSI interrupts.
*/
-int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 nr_irqs)
{
int ret;
- u8 encode_int;
if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
- if (interrupts < 1 || interrupts > 32)
+ if (nr_irqs < 1 || nr_irqs > 32)
return -EINVAL;
if (!epc->ops->set_msi)
return 0;
- encode_int = order_base_2(interrupts);
-
mutex_lock(&epc->lock);
- ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
+ ret = epc->ops->set_msi(epc, func_no, vfunc_no, nr_irqs);
mutex_unlock(&epc->lock);
return ret;
@@ -357,7 +352,7 @@ int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
if (interrupt < 0)
return 0;
- return interrupt + 1;
+ return interrupt;
}
EXPORT_SYMBOL_GPL(pci_epc_get_msix);
@@ -366,29 +361,28 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msix);
* @epc: the EPC device on which MSI-X has to be configured
* @func_no: the physical endpoint function number in the EPC device
* @vfunc_no: the virtual endpoint function number in the physical function
- * @interrupts: number of MSI-X interrupts required by the EPF
+ * @nr_irqs: number of MSI-X interrupts required by the EPF
* @bir: BAR where the MSI-X table resides
* @offset: Offset pointing to the start of MSI-X table
*
* Invoke to set the required number of MSI-X interrupts.
*/
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u16 interrupts, enum pci_barno bir, u32 offset)
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u16 nr_irqs,
+ enum pci_barno bir, u32 offset)
{
int ret;
if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
- if (interrupts < 1 || interrupts > 2048)
+ if (nr_irqs < 1 || nr_irqs > 2048)
return -EINVAL;
if (!epc->ops->set_msix)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
- offset);
+ ret = epc->ops->set_msix(epc, func_no, vfunc_no, nr_irqs, bir, offset);
mutex_unlock(&epc->lock);
return ret;
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 394395c7f8de..577a9e490115 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -236,12 +236,13 @@ void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
}
dev = epc->dev.parent;
- dma_free_coherent(dev, epf_bar[bar].size, addr,
+ dma_free_coherent(dev, epf_bar[bar].aligned_size, addr,
epf_bar[bar].phys_addr);
epf_bar[bar].phys_addr = 0;
epf_bar[bar].addr = NULL;
epf_bar[bar].size = 0;
+ epf_bar[bar].aligned_size = 0;
epf_bar[bar].barno = 0;
epf_bar[bar].flags = 0;
}
@@ -264,7 +265,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
enum pci_epc_interface_type type)
{
u64 bar_fixed_size = epc_features->bar[bar].fixed_size;
- size_t align = epc_features->align;
+ size_t aligned_size, align = epc_features->align;
struct pci_epf_bar *epf_bar;
dma_addr_t phys_addr;
struct pci_epc *epc;
@@ -285,12 +286,18 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
return NULL;
}
size = bar_fixed_size;
+ } else {
+ /* BAR size must be power of two */
+ size = roundup_pow_of_two(size);
}
- if (align)
- size = ALIGN(size, align);
- else
- size = roundup_pow_of_two(size);
+ /*
+ * Allocate enough memory to accommodate the iATU alignment
+ * requirement. In most cases, this will be the same as .size but
+ * it might be different if, for example, the fixed size of a BAR
+ * is smaller than align.
+ */
+ aligned_size = align ? ALIGN(size, align) : size;
if (type == PRIMARY_INTERFACE) {
epc = epf->epc;
@@ -301,7 +308,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
}
dev = epc->dev.parent;
- space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
+ space = dma_alloc_coherent(dev, aligned_size, &phys_addr, GFP_KERNEL);
if (!space) {
dev_err(dev, "failed to allocate mem space\n");
return NULL;
@@ -310,6 +317,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
epf_bar[bar].phys_addr = phys_addr;
epf_bar[bar].addr = space;
epf_bar[bar].size = size;
+ epf_bar[bar].aligned_size = aligned_size;
epf_bar[bar].barno = bar;
if (upper_32_bits(size) || epc_features->bar[bar].only_64bit)
epf_bar[bar].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index 20529d1a3c44..760a5dec0431 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1883,7 +1883,7 @@ void cpqhp_pushbutton_thread(struct timer_list *t)
{
u8 hp_slot;
struct pci_func *func;
- struct slot *p_slot = from_timer(p_slot, t, task_event);
+ struct slot *p_slot = timer_container_of(p_slot, t, task_event);
struct controller *ctrl = (struct controller *) p_slot->ctrl;
pushbutton_pending = NULL;
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index d30f1316c98e..fadcf98a8a66 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -20,13 +20,9 @@
#include <linux/types.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
-#include <linux/pagemap.h>
#include <linux/init.h>
-#include <linux/mount.h>
-#include <linux/namei.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
-#include <linux/uaccess.h>
#include "../pci.h"
#include "cpci_hotplug.h"
@@ -492,6 +488,75 @@ void pci_hp_destroy(struct hotplug_slot *slot)
}
EXPORT_SYMBOL_GPL(pci_hp_destroy);
+static DECLARE_WAIT_QUEUE_HEAD(pci_hp_link_change_wq);
+
+/**
+ * pci_hp_ignore_link_change - begin code section causing spurious link changes
+ * @pdev: PCI hotplug bridge
+ *
+ * Mark the beginning of a code section causing spurious link changes on the
+ * Secondary Bus of @pdev, e.g. as a side effect of a Secondary Bus Reset,
+ * D3cold transition, firmware update or FPGA reconfiguration.
+ *
+ * Hotplug drivers can thus check whether such a code section is executing
+ * concurrently, await it with pci_hp_spurious_link_change() and ignore the
+ * resulting link change events.
+ *
+ * Must be paired with pci_hp_unignore_link_change(). May be called both
+ * from the PCI core and from Endpoint drivers. May be called for bridges
+ * which are not hotplug-capable, in which case it has no effect because
+ * no hotplug driver is bound to the bridge.
+ */
+void pci_hp_ignore_link_change(struct pci_dev *pdev)
+{
+ set_bit(PCI_LINK_CHANGING, &pdev->priv_flags);
+ smp_mb__after_atomic(); /* pairs with implied barrier of wait_event() */
+}
+
+/**
+ * pci_hp_unignore_link_change - end code section causing spurious link changes
+ * @pdev: PCI hotplug bridge
+ *
+ * Mark the end of a code section causing spurious link changes on the
+ * Secondary Bus of @pdev. Must be paired with pci_hp_ignore_link_change().
+ */
+void pci_hp_unignore_link_change(struct pci_dev *pdev)
+{
+ set_bit(PCI_LINK_CHANGED, &pdev->priv_flags);
+ mb(); /* ensure pci_hp_spurious_link_change() sees either bit set */
+ clear_bit(PCI_LINK_CHANGING, &pdev->priv_flags);
+ wake_up_all(&pci_hp_link_change_wq);
+}
+
+/**
+ * pci_hp_spurious_link_change - check for spurious link changes
+ * @pdev: PCI hotplug bridge
+ *
+ * Check whether a code section is executing concurrently which is causing
+ * spurious link changes on the Secondary Bus of @pdev. Await the end of the
+ * code section if so.
+ *
+ * May be called by hotplug drivers to check whether a link change is spurious
+ * and can be ignored.
+ *
+ * Because a genuine link change may have occurred in-between a spurious link
+ * change and the invocation of this function, hotplug drivers should perform
+ * sanity checks such as retrieving the current link state and bringing down
+ * the slot if the link is down.
+ *
+ * Return: %true if such a code section has been executing concurrently,
+ * otherwise %false. Also return %true if such a code section has not been
+ * executing concurrently, but at least once since the last invocation of this
+ * function.
+ */
+bool pci_hp_spurious_link_change(struct pci_dev *pdev)
+{
+ wait_event(pci_hp_link_change_wq,
+ !test_bit(PCI_LINK_CHANGING, &pdev->priv_flags));
+
+ return test_and_clear_bit(PCI_LINK_CHANGED, &pdev->priv_flags);
+}
+
static int __init pci_hotplug_init(void)
{
int result;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 273dd8c66f4e..debc79b0adfb 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -187,6 +187,7 @@ int pciehp_card_present(struct controller *ctrl);
int pciehp_card_present_or_link_active(struct controller *ctrl);
int pciehp_check_link_status(struct controller *ctrl);
int pciehp_check_link_active(struct controller *ctrl);
+bool pciehp_device_replaced(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 997841c69893..f59baa912970 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -284,35 +284,6 @@ static int pciehp_suspend(struct pcie_device *dev)
return 0;
}
-static bool pciehp_device_replaced(struct controller *ctrl)
-{
- struct pci_dev *pdev __free(pci_dev_put) = NULL;
- u32 reg;
-
- if (pci_dev_is_disconnected(ctrl->pcie->port))
- return false;
-
- pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0));
- if (!pdev)
- return true;
-
- if (pci_read_config_dword(pdev, PCI_VENDOR_ID, &reg) ||
- reg != (pdev->vendor | (pdev->device << 16)) ||
- pci_read_config_dword(pdev, PCI_CLASS_REVISION, &reg) ||
- reg != (pdev->revision | (pdev->class << 8)))
- return true;
-
- if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
- (pci_read_config_dword(pdev, PCI_SUBSYSTEM_VENDOR_ID, &reg) ||
- reg != (pdev->subsystem_vendor | (pdev->subsystem_device << 16))))
- return true;
-
- if (pci_get_dsn(pdev) != ctrl->dsn)
- return true;
-
- return false;
-}
-
static int pciehp_resume_noirq(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index d603a7aa7483..bcc938d4420f 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -131,7 +131,7 @@ static void remove_board(struct controller *ctrl, bool safe_removal)
INDICATOR_NOOP);
/* Don't carry LBMS indications across */
- pcie_reset_lbms_count(ctrl->pcie->port);
+ pcie_reset_lbms(ctrl->pcie->port);
}
static int pciehp_enable_slot(struct controller *ctrl);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 8a09fb6083e2..91d2d92717d9 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -563,20 +563,50 @@ void pciehp_power_off_slot(struct controller *ctrl)
PCI_EXP_SLTCTL_PWR_OFF);
}
-static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
- struct pci_dev *pdev, int irq)
+bool pciehp_device_replaced(struct controller *ctrl)
+{
+ struct pci_dev *pdev __free(pci_dev_put) = NULL;
+ u32 reg;
+
+ if (pci_dev_is_disconnected(ctrl->pcie->port))
+ return false;
+
+ pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0));
+ if (!pdev)
+ return true;
+
+ if (pci_read_config_dword(pdev, PCI_VENDOR_ID, &reg) ||
+ reg != (pdev->vendor | (pdev->device << 16)) ||
+ pci_read_config_dword(pdev, PCI_CLASS_REVISION, &reg) ||
+ reg != (pdev->revision | (pdev->class << 8)))
+ return true;
+
+ if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
+ (pci_read_config_dword(pdev, PCI_SUBSYSTEM_VENDOR_ID, &reg) ||
+ reg != (pdev->subsystem_vendor | (pdev->subsystem_device << 16))))
+ return true;
+
+ if (pci_get_dsn(pdev) != ctrl->dsn)
+ return true;
+
+ return false;
+}
+
+static void pciehp_ignore_link_change(struct controller *ctrl,
+ struct pci_dev *pdev, int irq,
+ u16 ignored_events)
{
/*
* Ignore link changes which occurred while waiting for DPC recovery.
* Could be several if DPC triggered multiple times consecutively.
+ * Also ignore link changes caused by Secondary Bus Reset, etc.
*/
synchronize_hardirq(irq);
- atomic_and(~PCI_EXP_SLTSTA_DLLSC, &ctrl->pending_events);
+ atomic_and(~ignored_events, &ctrl->pending_events);
if (pciehp_poll_mode)
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
- PCI_EXP_SLTSTA_DLLSC);
- ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored (recovered by DPC)\n",
- slot_name(ctrl));
+ ignored_events);
+ ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored\n", slot_name(ctrl));
/*
* If the link is unexpectedly down after successful recovery,
@@ -584,8 +614,8 @@ static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
* Synthesize it to ensure that it is acted on.
*/
down_read_nested(&ctrl->reset_lock, ctrl->depth);
- if (!pciehp_check_link_active(ctrl))
- pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
+ if (!pciehp_check_link_active(ctrl) || pciehp_device_replaced(ctrl))
+ pciehp_request(ctrl, ignored_events);
up_read(&ctrl->reset_lock);
}
@@ -732,12 +762,19 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
/*
* Ignore Link Down/Up events caused by Downstream Port Containment
- * if recovery from the error succeeded.
+ * if recovery succeeded, or caused by Secondary Bus Reset,
+ * suspend to D3cold, firmware update, FPGA reconfiguration, etc.
*/
- if ((events & PCI_EXP_SLTSTA_DLLSC) && pci_dpc_recovered(pdev) &&
+ if ((events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC)) &&
+ (pci_dpc_recovered(pdev) || pci_hp_spurious_link_change(pdev)) &&
ctrl->state == ON_STATE) {
- events &= ~PCI_EXP_SLTSTA_DLLSC;
- pciehp_ignore_dpc_link_change(ctrl, pdev, irq);
+ u16 ignored_events = PCI_EXP_SLTSTA_DLLSC;
+
+ if (!ctrl->inband_presence_disabled)
+ ignored_events |= PCI_EXP_SLTSTA_PDC;
+
+ events &= ~ignored_events;
+ pciehp_ignore_link_change(ctrl, pdev, irq, ignored_events);
}
/*
@@ -902,7 +939,6 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
{
struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl_dev(ctrl);
- u16 stat_mask = 0, ctrl_mask = 0;
int rc;
if (probe)
@@ -910,23 +946,11 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
down_write_nested(&ctrl->reset_lock, ctrl->depth);
- if (!ATTN_BUTTN(ctrl)) {
- ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
- stat_mask |= PCI_EXP_SLTSTA_PDC;
- }
- ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
- stat_mask |= PCI_EXP_SLTSTA_DLLSC;
-
- pcie_write_cmd(ctrl, 0, ctrl_mask);
- ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
+ pci_hp_ignore_link_change(pdev);
rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
- pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
- ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
+ pci_hp_unignore_link_change(pdev);
up_write(&ctrl->reset_lock);
return rc;
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index e9e9aaa91770..d9996516f49e 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -65,9 +65,9 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
rc = zpci_deconfigure_device(zdev);
out:
- mutex_unlock(&zdev->state_lock);
if (pdev)
pci_dev_put(pdev);
+ mutex_unlock(&zdev->state_lock);
return rc;
}
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 387b85585263..183bf43510a1 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -211,7 +211,7 @@ static inline int shpc_indirect_read(struct controller *ctrl, int index,
*/
static void int_poll_timeout(struct timer_list *t)
{
- struct controller *ctrl = from_timer(ctrl, t, poll_timer);
+ struct controller *ctrl = timer_container_of(ctrl, t, poll_timer);
/* Poll for interrupt events. regs == NULL => polling */
shpc_isr(0, ctrl);
diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c
index fe706ed946df..ea86c282a386 100644
--- a/drivers/pci/iomap.c
+++ b/drivers/pci/iomap.c
@@ -25,10 +25,6 @@
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR from offset to the end, pass %0 here.
- *
- * NOTE:
- * This function is never managed, even if you initialized with
- * pcim_enable_device().
* */
void __iomem *pci_iomap_range(struct pci_dev *dev,
int bar,
@@ -76,10 +72,6 @@ EXPORT_SYMBOL(pci_iomap_range);
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR from offset to the end, pass %0 here.
- *
- * NOTE:
- * This function is never managed, even if you initialized with
- * pcim_enable_device().
* */
void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
int bar,
@@ -127,10 +119,6 @@ EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR without checking for its length first, pass %0 here.
- *
- * NOTE:
- * This function is never managed, even if you initialized with
- * pcim_enable_device(). If you need automatic cleanup, use pcim_iomap().
* */
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
@@ -152,10 +140,6 @@ EXPORT_SYMBOL(pci_iomap);
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR without checking for its length first, pass %0 here.
- *
- * NOTE:
- * This function is never managed, even if you initialized with
- * pcim_enable_device().
* */
void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
{
diff --git a/drivers/pci/msi/api.c b/drivers/pci/msi/api.c
index 17ec6332cb1d..818d55fbad0d 100644
--- a/drivers/pci/msi/api.c
+++ b/drivers/pci/msi/api.c
@@ -53,10 +53,9 @@ void pci_disable_msi(struct pci_dev *dev)
if (!pci_msi_enabled() || !dev || !dev->msi_enabled)
return;
- msi_lock_descs(&dev->dev);
+ guard(msi_descs_lock)(&dev->dev);
pci_msi_shutdown(dev);
pci_free_msi_irqs(dev);
- msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msi);
@@ -196,10 +195,9 @@ void pci_disable_msix(struct pci_dev *dev)
if (!pci_msi_enabled() || !dev || !dev->msix_enabled)
return;
- msi_lock_descs(&dev->dev);
+ guard(msi_descs_lock)(&dev->dev);
pci_msix_shutdown(dev);
pci_free_msi_irqs(dev);
- msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msix);
@@ -401,7 +399,7 @@ EXPORT_SYMBOL_GPL(pci_restore_msi_state);
* Return: true if MSI has not been globally disabled through ACPI FADT,
* PCI bridge quirks, or the "pci=nomsi" kernel command-line option.
*/
-int pci_msi_enabled(void)
+bool pci_msi_enabled(void)
{
return pci_msi_enable;
}
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index d7ba8795d60f..c05152733993 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -271,6 +271,7 @@ static bool pci_create_device_domain(struct pci_dev *pdev, const struct msi_doma
/**
* pci_setup_msi_device_domain - Setup a device MSI interrupt domain
* @pdev: The PCI device to create the domain on
+ * @hwsize: The maximum number of MSI vectors
*
* Return:
* True when:
@@ -287,7 +288,7 @@ static bool pci_create_device_domain(struct pci_dev *pdev, const struct msi_doma
* - The device is removed
* - MSI is disabled and a MSI-X domain is created
*/
-bool pci_setup_msi_device_domain(struct pci_dev *pdev)
+bool pci_setup_msi_device_domain(struct pci_dev *pdev, unsigned int hwsize)
{
if (WARN_ON_ONCE(pdev->msix_enabled))
return false;
@@ -297,7 +298,7 @@ bool pci_setup_msi_device_domain(struct pci_dev *pdev)
if (pci_match_device_domain(pdev, DOMAIN_BUS_PCI_DEVICE_MSIX))
msi_remove_device_irq_domain(&pdev->dev, MSI_DEFAULT_DOMAIN);
- return pci_create_device_domain(pdev, &pci_msi_template, 1);
+ return pci_create_device_domain(pdev, &pci_msi_template, hwsize);
}
/**
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 8b8848788618..6ede55a7c5e6 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -15,7 +15,7 @@
#include "../pci.h"
#include "msi.h"
-int pci_msi_enable = 1;
+bool pci_msi_enable = true;
/**
* pci_msi_supported - check whether MSI may be enabled on a device
@@ -335,43 +335,13 @@ static int msi_verify_entries(struct pci_dev *dev)
return !entry ? 0 : -EIO;
}
-/**
- * msi_capability_init - configure device's MSI capability structure
- * @dev: pointer to the pci_dev data structure of MSI device function
- * @nvec: number of interrupts to allocate
- * @affd: description of automatic IRQ affinity assignments (may be %NULL)
- *
- * Setup the MSI capability structure of the device with the requested
- * number of interrupts. A return value of zero indicates the successful
- * setup of an entry with the new MSI IRQ. A negative return value indicates
- * an error, and a positive return value indicates the number of interrupts
- * which could have been allocated.
- */
-static int msi_capability_init(struct pci_dev *dev, int nvec,
- struct irq_affinity *affd)
+static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
{
- struct irq_affinity_desc *masks = NULL;
+ int ret = msi_setup_msi_desc(dev, nvec, masks);
struct msi_desc *entry, desc;
- int ret;
-
- /* Reject multi-MSI early on irq domain enabled architectures */
- if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
- return 1;
-
- /*
- * Disable MSI during setup in the hardware, but mark it enabled
- * so that setup code can evaluate it.
- */
- pci_msi_set_enable(dev, 0);
- dev->msi_enabled = 1;
-
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
- msi_lock_descs(&dev->dev);
- ret = msi_setup_msi_desc(dev, nvec, masks);
if (ret)
- goto fail;
+ return ret;
/* All MSIs are unmasked by default; mask them all */
entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
@@ -393,24 +363,51 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
goto err;
/* Set MSI enabled bits */
+ dev->msi_enabled = 1;
pci_intx_for_msi(dev, 0);
pci_msi_set_enable(dev, 1);
pcibios_free_irq(dev);
dev->irq = entry->irq;
- goto unlock;
-
+ return 0;
err:
pci_msi_unmask(&desc, msi_multi_mask(&desc));
pci_free_msi_irqs(dev);
-fail:
- dev->msi_enabled = 0;
-unlock:
- msi_unlock_descs(&dev->dev);
- kfree(masks);
return ret;
}
+/**
+ * msi_capability_init - configure device's MSI capability structure
+ * @dev: pointer to the pci_dev data structure of MSI device function
+ * @nvec: number of interrupts to allocate
+ * @affd: description of automatic IRQ affinity assignments (may be %NULL)
+ *
+ * Setup the MSI capability structure of the device with the requested
+ * number of interrupts. A return value of zero indicates the successful
+ * setup of an entry with the new MSI IRQ. A negative return value indicates
+ * an error, and a positive return value indicates the number of interrupts
+ * which could have been allocated.
+ */
+static int msi_capability_init(struct pci_dev *dev, int nvec,
+ struct irq_affinity *affd)
+{
+ /* Reject multi-MSI early on irq domain enabled architectures */
+ if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
+ return 1;
+
+ /*
+ * Disable MSI during setup in the hardware, but mark it enabled
+ * so that setup code can evaluate it.
+ */
+ pci_msi_set_enable(dev, 0);
+
+ struct irq_affinity_desc *masks __free(kfree) =
+ affd ? irq_create_affinity_masks(nvec, affd) : NULL;
+
+ guard(msi_descs_lock)(&dev->dev);
+ return __msi_capability_init(dev, nvec, masks);
+}
+
int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
struct irq_affinity *affd)
{
@@ -442,16 +439,16 @@ int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (nvec < minvec)
return -ENOSPC;
- if (nvec > maxvec)
- nvec = maxvec;
-
rc = pci_setup_msi_context(dev);
if (rc)
return rc;
- if (!pci_setup_msi_device_domain(dev))
+ if (!pci_setup_msi_device_domain(dev, nvec))
return -ENODEV;
+ if (nvec > maxvec)
+ nvec = maxvec;
+
for (;;) {
if (affd) {
nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
@@ -666,38 +663,39 @@ static void msix_mask_all(void __iomem *base, int tsize)
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
-static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, struct irq_affinity *affd)
-{
- struct irq_affinity_desc *masks = NULL;
- int ret;
+DEFINE_FREE(free_msi_irqs, struct pci_dev *, if (_T) pci_free_msi_irqs(_T));
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
+static int __msix_setup_interrupts(struct pci_dev *__dev, struct msix_entry *entries,
+ int nvec, struct irq_affinity_desc *masks)
+{
+ struct pci_dev *dev __free(free_msi_irqs) = __dev;
- msi_lock_descs(&dev->dev);
- ret = msix_setup_msi_descs(dev, entries, nvec, masks);
+ int ret = msix_setup_msi_descs(dev, entries, nvec, masks);
if (ret)
- goto out_free;
+ return ret;
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
if (ret)
- goto out_free;
+ return ret;
/* Check if all MSI entries honor device restrictions */
ret = msi_verify_entries(dev);
if (ret)
- goto out_free;
+ return ret;
msix_update_entries(dev, entries);
- goto out_unlock;
+ retain_and_null_ptr(dev);
+ return 0;
+}
-out_free:
- pci_free_msi_irqs(dev);
-out_unlock:
- msi_unlock_descs(&dev->dev);
- kfree(masks);
- return ret;
+static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
+ int nvec, struct irq_affinity *affd)
+{
+ struct irq_affinity_desc *masks __free(kfree) =
+ affd ? irq_create_affinity_masks(nvec, affd) : NULL;
+
+ guard(msi_descs_lock)(&dev->dev);
+ return __msix_setup_interrupts(dev, entries, nvec, masks);
}
/**
@@ -873,13 +871,13 @@ void __pci_restore_msix_state(struct pci_dev *dev)
write_msg = arch_restore_msi_irqs(dev);
- msi_lock_descs(&dev->dev);
- msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
- if (write_msg)
- __pci_write_msi_msg(entry, &entry->msg);
- pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
+ scoped_guard (msi_descs_lock, &dev->dev) {
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
+ if (write_msg)
+ __pci_write_msi_msg(entry, &entry->msg);
+ pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
+ }
}
- msi_unlock_descs(&dev->dev);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
}
@@ -918,6 +916,53 @@ void pci_free_msi_irqs(struct pci_dev *dev)
}
}
+#ifdef CONFIG_PCIE_TPH
+/**
+ * pci_msix_write_tph_tag - Update the TPH tag for a given MSI-X vector
+ * @pdev: The PCIe device to update
+ * @index: The MSI-X index to update
+ * @tag: The tag to write
+ *
+ * Returns: 0 on success, error code on failure
+ */
+int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
+{
+ struct msi_desc *msi_desc;
+ struct irq_desc *irq_desc;
+ unsigned int virq;
+
+ if (!pdev->msix_enabled)
+ return -ENXIO;
+
+ guard(msi_descs_lock)(&pdev->dev);
+ virq = msi_get_virq(&pdev->dev, index);
+ if (!virq)
+ return -ENXIO;
+ /*
+ * This is a horrible hack, but short of implementing a PCI
+ * specific interrupt chip callback and a huge pile of
+ * infrastructure, this is the minor nuissance. It provides the
+ * protection against concurrent operations on this entry and keeps
+ * the control word cache in sync.
+ */
+ irq_desc = irq_to_desc(virq);
+ if (!irq_desc)
+ return -ENXIO;
+
+ guard(raw_spinlock_irq)(&irq_desc->lock);
+ msi_desc = irq_data_get_msi_desc(&irq_desc->irq_data);
+ if (!msi_desc || msi_desc->pci.msi_attrib.is_virtual)
+ return -ENXIO;
+
+ msi_desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_ST;
+ msi_desc->pci.msix_ctrl |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag);
+ pci_msix_write_vector_ctrl(msi_desc, msi_desc->pci.msix_ctrl);
+ /* Flush the write */
+ readl(pci_msix_desc_addr(msi_desc));
+ return 0;
+}
+#endif
+
/* Misc. infrastructure */
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
@@ -928,5 +973,5 @@ EXPORT_SYMBOL(msi_desc_to_pci_dev);
void pci_no_msi(void)
{
- pci_msi_enable = 0;
+ pci_msi_enable = false;
}
diff --git a/drivers/pci/msi/msi.h b/drivers/pci/msi/msi.h
index ee53cf079f4e..0b420b319f50 100644
--- a/drivers/pci/msi/msi.h
+++ b/drivers/pci/msi/msi.h
@@ -87,7 +87,7 @@ static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc)
void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc);
/* Subsystem variables */
-extern int pci_msi_enable;
+extern bool pci_msi_enable;
/* MSI internal functions invoked from the public APIs */
void pci_msi_shutdown(struct pci_dev *dev);
@@ -107,7 +107,7 @@ enum support_mode {
};
bool pci_msi_domain_supports(struct pci_dev *dev, unsigned int feature_mask, enum support_mode mode);
-bool pci_setup_msi_device_domain(struct pci_dev *pdev);
+bool pci_setup_msi_device_domain(struct pci_dev *pdev, unsigned int hwsize);
bool pci_setup_msix_device_domain(struct pci_dev *pdev, unsigned int hwsize);
/* Legacy (!IRQDOMAIN) fallbacks */
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index ab7a8252bf41..3579265f1198 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -966,3 +966,47 @@ u32 of_pci_get_slot_power_limit(struct device_node *node,
return slot_power_limit_mw;
}
EXPORT_SYMBOL_GPL(of_pci_get_slot_power_limit);
+
+/**
+ * of_pci_get_equalization_presets - Parses the "eq-presets-Ngts" property.
+ *
+ * @dev: Device containing the properties.
+ * @presets: Pointer to store the parsed data.
+ * @num_lanes: Maximum number of lanes supported.
+ *
+ * If the property is present, read and store the data in the @presets structure.
+ * Else, assign a default value of PCI_EQ_RESV.
+ *
+ * Return: 0 if the property is not available or successfully parsed else
+ * errno otherwise.
+ */
+int of_pci_get_equalization_presets(struct device *dev,
+ struct pci_eq_presets *presets,
+ int num_lanes)
+{
+ char name[20];
+ int ret;
+
+ presets->eq_presets_8gts[0] = PCI_EQ_RESV;
+ ret = of_property_read_u16_array(dev->of_node, "eq-presets-8gts",
+ presets->eq_presets_8gts, num_lanes);
+ if (ret && ret != -EINVAL) {
+ dev_err(dev, "Error reading eq-presets-8gts: %d\n", ret);
+ return ret;
+ }
+
+ for (int i = 0; i < EQ_PRESET_TYPE_MAX - 1; i++) {
+ presets->eq_presets_Ngts[i][0] = PCI_EQ_RESV;
+ snprintf(name, sizeof(name), "eq-presets-%dgts", 8 << (i + 1));
+ ret = of_property_read_u8_array(dev->of_node, name,
+ presets->eq_presets_Ngts[i],
+ num_lanes);
+ if (ret && ret != -EINVAL) {
+ dev_err(dev, "Error reading %s: %d\n", name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_pci_get_equalization_presets);
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 19214ec81fbb..8d955c25aed3 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -1004,40 +1004,12 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
return type;
}
-/**
- * pci_p2pdma_map_segment - map an sg segment determining the mapping type
- * @state: State structure that should be declared outside of the for_each_sg()
- * loop and initialized to zero.
- * @dev: DMA device that's doing the mapping operation
- * @sg: scatterlist segment to map
- *
- * This is a helper to be used by non-IOMMU dma_map_sg() implementations where
- * the sg segment is the same for the page_link and the dma_address.
- *
- * Attempt to map a single segment in an SGL with the PCI bus address.
- * The segment must point to a PCI P2PDMA page and thus must be
- * wrapped in a is_pci_p2pdma_page(sg_page(sg)) check.
- *
- * Returns the type of mapping used and maps the page if the type is
- * PCI_P2PDMA_MAP_BUS_ADDR.
- */
-enum pci_p2pdma_map_type
-pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
- struct scatterlist *sg)
+void __pci_p2pdma_update_state(struct pci_p2pdma_map_state *state,
+ struct device *dev, struct page *page)
{
- if (state->pgmap != page_pgmap(sg_page(sg))) {
- state->pgmap = page_pgmap(sg_page(sg));
- state->map = pci_p2pdma_map_type(state->pgmap, dev);
- state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset;
- }
-
- if (state->map == PCI_P2PDMA_MAP_BUS_ADDR) {
- sg->dma_address = sg_phys(sg) + state->bus_off;
- sg_dma_len(sg) = sg->length;
- sg_dma_mark_bus_address(sg);
- }
-
- return state->map;
+ state->pgmap = page_pgmap(page);
+ state->map = pci_p2pdma_map_type(state->pgmap, dev);
+ state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset;
}
/**
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index af370628e583..b78e0e417324 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -1676,24 +1676,19 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
return NULL;
root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
- if (!root_ops) {
- kfree(ri);
- return NULL;
- }
+ if (!root_ops)
+ goto free_ri;
ri->cfg = pci_acpi_setup_ecam_mapping(root);
- if (!ri->cfg) {
- kfree(ri);
- kfree(root_ops);
- return NULL;
- }
+ if (!ri->cfg)
+ goto free_root_ops;
root_ops->release_info = pci_acpi_generic_release_info;
root_ops->prepare_resources = pci_acpi_root_prepare_resources;
root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops;
bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg);
if (!bus)
- return NULL;
+ goto free_cfg;
/* If we must preserve the resource configuration, claim now */
host = pci_find_host_bridge(bus);
@@ -1710,6 +1705,14 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
pcie_bus_configure_settings(child);
return bus;
+
+free_cfg:
+ pci_ecam_free(ri->cfg);
+free_root_ops:
+ kfree(root_ops);
+free_ri:
+ kfree(ri);
+ return NULL;
}
void pcibios_add_bus(struct pci_bus *bus)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index c8bd71a739f7..67db34fd10ee 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -555,12 +555,6 @@ static void pci_pm_default_resume(struct pci_dev *pci_dev)
pci_enable_wake(pci_dev, PCI_D0, false);
}
-static void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
-{
- pci_power_up(pci_dev);
- pci_update_current_state(pci_dev, PCI_D0);
-}
-
static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
{
pci_pm_power_up_and_verify_state(pci_dev);
@@ -1507,7 +1501,7 @@ static int pci_bus_match(struct device *dev, const struct device_driver *drv)
struct pci_driver *pci_drv;
const struct pci_device_id *found_id;
- if (!pci_dev->match_driver)
+ if (pci_dev_binding_disallowed(pci_dev))
return 0;
pci_drv = (struct pci_driver *)to_pci_driver(drv);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index c6cda56ca52c..268c69daa4d5 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1475,6 +1475,9 @@ static ssize_t reset_method_store(struct device *dev,
return count;
}
+ pm_runtime_get_sync(dev);
+ struct device *pmdev __free(pm_runtime_put) = dev;
+
if (sysfs_streq(buf, "default")) {
pci_init_reset_methods(pdev);
return count;
@@ -1805,6 +1808,7 @@ const struct attribute_group *pci_dev_attr_groups[] = {
&pcie_dev_attr_group,
#ifdef CONFIG_PCIEAER
&aer_stats_attr_group,
+ &aer_attr_group,
#endif
#ifdef CONFIG_PCIEASPM
&aspm_ctrl_attr_group,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e77d5b53c0ce..9e42090fb108 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3192,6 +3192,12 @@ void pci_d3cold_disable(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_d3cold_disable);
+void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
+{
+ pci_power_up(pci_dev);
+ pci_update_current_state(pci_dev, PCI_D0);
+}
+
/**
* pci_pm_init - Initialize PM functions of given PCI device
* @dev: PCI device to handle.
@@ -3202,9 +3208,6 @@ void pci_pm_init(struct pci_dev *dev)
u16 status;
u16 pmc;
- pm_runtime_forbid(&dev->dev);
- pm_runtime_set_active(&dev->dev);
- pm_runtime_enable(&dev->dev);
device_enable_async_suspend(&dev->dev);
dev->wakeup_prepared = false;
@@ -3214,14 +3217,14 @@ void pci_pm_init(struct pci_dev *dev)
/* find PCI PM capability in list */
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
if (!pm)
- return;
+ goto poweron;
/* Check device's ability to generate PME# */
pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
pci_err(dev, "unsupported PM cap regs version (%u)\n",
pmc & PCI_PM_CAP_VER_MASK);
- return;
+ goto poweron;
}
dev->pm_cap = pm;
@@ -3266,6 +3269,11 @@ void pci_pm_init(struct pci_dev *dev)
pci_read_config_word(dev, PCI_STATUS, &status);
if (status & PCI_STATUS_IMM_READY)
dev->imm_ready = 1;
+poweron:
+ pci_pm_power_up_and_verify_state(dev);
+ pm_runtime_forbid(&dev->dev);
+ pm_runtime_set_active(&dev->dev);
+ pm_runtime_enable(&dev->dev);
}
static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
@@ -3937,16 +3945,6 @@ void pci_release_region(struct pci_dev *pdev, int bar)
if (!pci_bar_index_is_valid(bar))
return;
- /*
- * This is done for backwards compatibility, because the old PCI devres
- * API had a mode in which the function became managed if it had been
- * enabled with pcim_enable_device() instead of pci_enable_device().
- */
- if (pci_is_managed(pdev)) {
- pcim_release_region(pdev, bar);
- return;
- }
-
if (pci_resource_len(pdev, bar) == 0)
return;
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
@@ -3984,13 +3982,6 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
if (!pci_bar_index_is_valid(bar))
return -EINVAL;
- if (pci_is_managed(pdev)) {
- if (exclusive == IORESOURCE_EXCLUSIVE)
- return pcim_request_region_exclusive(pdev, bar, name);
-
- return pcim_request_region(pdev, bar, name);
- }
-
if (pci_resource_len(pdev, bar) == 0)
return 0;
@@ -4027,11 +4018,6 @@ err_out:
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_region(struct pci_dev *pdev, int bar, const char *name)
{
@@ -4084,11 +4070,6 @@ err_out:
* @name: Name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
const char *name)
@@ -4104,11 +4085,6 @@ EXPORT_SYMBOL(pci_request_selected_regions);
* @name: name of the driver requesting the resources
*
* Returns: 0 on success, negative error code on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
const char *name)
@@ -4144,11 +4120,6 @@ EXPORT_SYMBOL(pci_release_regions);
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_regions(struct pci_dev *pdev, const char *name)
{
@@ -4173,11 +4144,6 @@ EXPORT_SYMBOL(pci_request_regions);
*
* Returns 0 on success, or %EBUSY on error. A warning message is also
* printed on failure.
- *
- * NOTE:
- * This is a "hybrid" function: It's normally unmanaged, but becomes managed
- * when pcim_enable_device() has been called in advance. This hybrid feature is
- * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name)
{
@@ -4257,7 +4223,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address)
#ifndef pci_remap_iospace
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
-#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
+#if defined(PCI_IOBASE)
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
if (!(res->flags & IORESOURCE_IO))
@@ -4290,7 +4256,7 @@ EXPORT_SYMBOL(pci_remap_iospace);
*/
void pci_unmap_iospace(struct resource *res)
{
-#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
+#if defined(PCI_IOBASE)
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
vunmap_range(vaddr, vaddr + resource_size(res));
@@ -4718,6 +4684,11 @@ static int pcie_wait_for_link_status(struct pci_dev *pdev,
* @pdev: Device whose link to retrain.
* @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status.
*
+ * Trigger retraining of the PCIe Link and wait for the completion of the
+ * retraining. As link retraining is known to asserts LBMS and may change
+ * the Link Speed, LBMS is cleared after the retraining and the Link Speed
+ * of the subordinate bus is updated.
+ *
* Retrain completion status is retrieved from the Link Status Register
* according to @use_lt. It is not verified whether the use of the DLLLA
* bit is valid.
@@ -4757,7 +4728,19 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
* to track link speed or width changes made by hardware itself
* in attempt to correct unreliable link operation.
*/
- pcie_reset_lbms_count(pdev);
+ pcie_reset_lbms(pdev);
+
+ /*
+ * Ensure the Link Speed updates after retraining in case the Link
+ * Speed was changed because of the retraining. While the bwctrl's
+ * IRQ handler normally picks up the new Link Speed, clearing LBMS
+ * races with the IRQ handler reading the Link Status register and
+ * can result in the handler returning early without updating the
+ * Link Speed.
+ */
+ if (pdev->subordinate)
+ pcie_update_link_speed(pdev->subordinate);
+
return rc;
}
@@ -4954,7 +4937,7 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
delay);
if (!pcie_wait_for_link_delay(dev, true, delay)) {
/* Did not train, no need to wait any further */
- pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
+ pci_info(dev, "Data Link Layer Link Active not set in %d msec\n", delay);
return -ENOTTY;
}
@@ -5538,7 +5521,8 @@ static void pci_slot_unlock(struct pci_slot *slot)
continue;
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
}
@@ -6802,11 +6786,6 @@ int __weak pci_ext_cfg_avail(void)
return 1;
}
-void __weak pci_fixup_cardbus(struct pci_bus *bus)
-{
-}
-EXPORT_SYMBOL(pci_fixup_cardbus);
-
static int __init pci_setup(char *str)
{
while (str) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index b81e99cd4b62..12215ee72afb 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -9,6 +9,8 @@ struct pcie_tlp_log;
/* Number of possible devfns: 0.0 to 1f.7 inclusive */
#define MAX_NR_DEVFNS 256
+#define MAX_NR_LANES 16
+
#define PCI_FIND_CAP_TTL 48
#define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */
@@ -148,6 +150,7 @@ void pci_dev_adjust_pme(struct pci_dev *dev);
void pci_dev_complete_resume(struct pci_dev *pci_dev);
void pci_config_pm_runtime_get(struct pci_dev *dev);
void pci_config_pm_runtime_put(struct pci_dev *dev);
+void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev);
void pci_pm_init(struct pci_dev *dev);
void pci_ea_init(struct pci_dev *dev);
void pci_msi_init(struct pci_dev *dev);
@@ -227,6 +230,7 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; }
/* Functions for PCI Hotplug drivers to use */
int pci_hp_add_bridge(struct pci_dev *dev);
+bool pci_hp_spurious_link_change(struct pci_dev *pdev);
#if defined(CONFIG_SYSFS) && defined(HAVE_PCI_LEGACY)
void pci_create_legacy_files(struct pci_bus *bus);
@@ -557,6 +561,10 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
#define PCI_DPC_RECOVERED 1
#define PCI_DPC_RECOVERING 2
#define PCI_DEV_REMOVED 3
+#define PCI_LINK_CHANGED 4
+#define PCI_LINK_CHANGING 5
+#define PCI_LINK_LBMS_SEEN 6
+#define PCI_DEV_ALLOW_BINDING 7
static inline void pci_dev_assign_added(struct pci_dev *dev)
{
@@ -580,6 +588,16 @@ static inline bool pci_dev_test_and_set_removed(struct pci_dev *dev)
return test_and_set_bit(PCI_DEV_REMOVED, &dev->priv_flags);
}
+static inline void pci_dev_allow_binding(struct pci_dev *dev)
+{
+ set_bit(PCI_DEV_ALLOW_BINDING, &dev->priv_flags);
+}
+
+static inline bool pci_dev_binding_disallowed(struct pci_dev *dev)
+{
+ return !test_bit(PCI_DEV_ALLOW_BINDING, &dev->priv_flags);
+}
+
#ifdef CONFIG_PCIEAER
#include <linux/aer.h>
@@ -587,12 +605,15 @@ static inline bool pci_dev_test_and_set_removed(struct pci_dev *dev)
struct aer_err_info {
struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
+ int ratelimit_print[AER_MAX_MULTI_ERR_DEVICES];
int error_dev_num;
+ const char *level; /* printk level */
unsigned int id:16;
unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */
- unsigned int __pad1:5;
+ unsigned int root_ratelimit_print:1; /* 0=skip, 1=print */
+ unsigned int __pad1:4;
unsigned int multi_error_valid:1;
unsigned int first_error:5;
@@ -604,15 +625,16 @@ struct aer_err_info {
struct pcie_tlp_log tlp; /* TLP Header */
};
-int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
-void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+int aer_get_device_error_info(struct aer_err_info *info, int i);
+void aer_print_error(struct aer_err_info *info, int i);
int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
unsigned int tlp_len, bool flit,
struct pcie_tlp_log *log);
unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc);
void pcie_print_tlp_log(const struct pci_dev *dev,
- const struct pcie_tlp_log *log, const char *pfx);
+ const struct pcie_tlp_log *log, const char *level,
+ const char *pfx);
#endif /* CONFIG_PCIEAER */
#ifdef CONFIG_PCIEPORTBUS
@@ -824,14 +846,9 @@ static inline void pcie_ecrc_get_policy(char *str) { }
#endif
#ifdef CONFIG_PCIEPORTBUS
-void pcie_reset_lbms_count(struct pci_dev *port);
-int pcie_lbms_count(struct pci_dev *port, unsigned long *val);
+void pcie_reset_lbms(struct pci_dev *port);
#else
-static inline void pcie_reset_lbms_count(struct pci_dev *port) {}
-static inline int pcie_lbms_count(struct pci_dev *port, unsigned long *val)
-{
- return -EOPNOTSUPP;
-}
+static inline void pcie_reset_lbms(struct pci_dev *port) {}
#endif
struct pci_dev_reset_methods {
@@ -876,6 +893,21 @@ static inline u64 pci_rebar_size_to_bytes(int size)
struct device_node;
+#define PCI_EQ_RESV 0xff
+
+enum equalization_preset_type {
+ EQ_PRESET_TYPE_8GTS,
+ EQ_PRESET_TYPE_16GTS,
+ EQ_PRESET_TYPE_32GTS,
+ EQ_PRESET_TYPE_64GTS,
+ EQ_PRESET_TYPE_MAX
+};
+
+struct pci_eq_presets {
+ u16 eq_presets_8gts[MAX_NR_LANES];
+ u8 eq_presets_Ngts[EQ_PRESET_TYPE_MAX - 1][MAX_NR_LANES];
+};
+
#ifdef CONFIG_OF
int of_get_pci_domain_nr(struct device_node *node);
int of_pci_get_max_link_speed(struct device_node *node);
@@ -890,7 +922,9 @@ void pci_release_bus_of_node(struct pci_bus *bus);
int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge);
bool of_pci_supply_present(struct device_node *np);
-
+int of_pci_get_equalization_presets(struct device *dev,
+ struct pci_eq_presets *presets,
+ int num_lanes);
#else
static inline int
of_get_pci_domain_nr(struct device_node *node)
@@ -935,6 +969,17 @@ static inline bool of_pci_supply_present(struct device_node *np)
{
return false;
}
+
+static inline int of_pci_get_equalization_presets(struct device *dev,
+ struct pci_eq_presets *presets,
+ int num_lanes)
+{
+ presets->eq_presets_8gts[0] = PCI_EQ_RESV;
+ for (int i = 0; i < EQ_PRESET_TYPE_MAX - 1; i++)
+ presets->eq_presets_Ngts[i][0] = PCI_EQ_RESV;
+
+ return 0;
+}
#endif /* CONFIG_OF */
struct of_changeset;
@@ -961,6 +1006,7 @@ void pci_no_aer(void);
void pci_aer_init(struct pci_dev *dev);
void pci_aer_exit(struct pci_dev *dev);
extern const struct attribute_group aer_stats_attr_group;
+extern const struct attribute_group aer_attr_group;
void pci_aer_clear_fatal_status(struct pci_dev *dev);
int pci_aer_clear_status(struct pci_dev *dev);
int pci_aer_raw_clear_status(struct pci_dev *dev);
@@ -1059,10 +1105,14 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
}
#endif
-int pcim_intx(struct pci_dev *dev, int enable);
-int pcim_request_region_exclusive(struct pci_dev *pdev, int bar,
- const char *name);
-void pcim_release_region(struct pci_dev *pdev, int bar);
+#ifdef CONFIG_PCI_MSI
+int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag);
+#else
+static inline int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
+{
+ return -ENODEV;
+}
+#endif
/*
* Config Address for PCI Configuration Mechanism #1
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index a1cf8c7ef628..70ac66188367 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -28,6 +28,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/kfifo.h>
+#include <linux/ratelimit.h>
#include <linux/slab.h>
#include <acpi/apei.h>
#include <acpi/ghes.h>
@@ -54,8 +55,8 @@ struct aer_rpc {
DECLARE_KFIFO(aer_fifo, struct aer_err_source, AER_ERROR_SOURCES_MAX);
};
-/* AER stats for the device */
-struct aer_stats {
+/* AER info for the device */
+struct aer_info {
/*
* Fields for all AER capable devices. They indicate the errors
@@ -88,6 +89,10 @@ struct aer_stats {
u64 rootport_total_cor_errs;
u64 rootport_total_fatal_errs;
u64 rootport_total_nonfatal_errs;
+
+ /* Ratelimits for errors */
+ struct ratelimit_state correctable_ratelimit;
+ struct ratelimit_state nonfatal_ratelimit;
};
#define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
@@ -377,7 +382,12 @@ void pci_aer_init(struct pci_dev *dev)
if (!dev->aer_cap)
return;
- dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
+ dev->aer_info = kzalloc(sizeof(*dev->aer_info), GFP_KERNEL);
+
+ ratelimit_state_init(&dev->aer_info->correctable_ratelimit,
+ DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+ ratelimit_state_init(&dev->aer_info->nonfatal_ratelimit,
+ DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
/*
* We save/restore PCI_ERR_UNCOR_MASK, PCI_ERR_UNCOR_SEVER,
@@ -398,8 +408,8 @@ void pci_aer_init(struct pci_dev *dev)
void pci_aer_exit(struct pci_dev *dev)
{
- kfree(dev->aer_stats);
- dev->aer_stats = NULL;
+ kfree(dev->aer_info);
+ dev->aer_info = NULL;
}
#define AER_AGENT_RECEIVER 0
@@ -537,10 +547,10 @@ static const char *aer_agent_string[] = {
{ \
unsigned int i; \
struct pci_dev *pdev = to_pci_dev(dev); \
- u64 *stats = pdev->aer_stats->stats_array; \
+ u64 *stats = pdev->aer_info->stats_array; \
size_t len = 0; \
\
- for (i = 0; i < ARRAY_SIZE(pdev->aer_stats->stats_array); i++) {\
+ for (i = 0; i < ARRAY_SIZE(pdev->aer_info->stats_array); i++) { \
if (strings_array[i]) \
len += sysfs_emit_at(buf, len, "%s %llu\n", \
strings_array[i], \
@@ -551,7 +561,7 @@ static const char *aer_agent_string[] = {
i, stats[i]); \
} \
len += sysfs_emit_at(buf, len, "TOTAL_%s %llu\n", total_string, \
- pdev->aer_stats->total_field); \
+ pdev->aer_info->total_field); \
return len; \
} \
static DEVICE_ATTR_RO(name)
@@ -572,7 +582,7 @@ aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs,
char *buf) \
{ \
struct pci_dev *pdev = to_pci_dev(dev); \
- return sysfs_emit(buf, "%llu\n", pdev->aer_stats->field); \
+ return sysfs_emit(buf, "%llu\n", pdev->aer_info->field); \
} \
static DEVICE_ATTR_RO(name)
@@ -599,7 +609,7 @@ static umode_t aer_stats_attrs_are_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct pci_dev *pdev = to_pci_dev(dev);
- if (!pdev->aer_stats)
+ if (!pdev->aer_info)
return 0;
if ((a == &dev_attr_aer_rootport_total_err_cor.attr ||
@@ -617,31 +627,136 @@ const struct attribute_group aer_stats_attr_group = {
.is_visible = aer_stats_attrs_are_visible,
};
+/*
+ * Ratelimit interval
+ * <=0: disabled with ratelimit.interval = 0
+ * >0: enabled with ratelimit.interval in ms
+ */
+#define aer_ratelimit_interval_attr(name, ratelimit) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+ { \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ \
+ return sysfs_emit(buf, "%d\n", \
+ pdev->aer_info->ratelimit.interval); \
+ } \
+ \
+ static ssize_t \
+ name##_store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ int interval; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ if (kstrtoint(buf, 0, &interval) < 0) \
+ return -EINVAL; \
+ \
+ if (interval <= 0) \
+ interval = 0; \
+ else \
+ interval = msecs_to_jiffies(interval); \
+ \
+ pdev->aer_info->ratelimit.interval = interval; \
+ \
+ return count; \
+ } \
+ static DEVICE_ATTR_RW(name);
+
+#define aer_ratelimit_burst_attr(name, ratelimit) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+ { \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ \
+ return sysfs_emit(buf, "%d\n", \
+ pdev->aer_info->ratelimit.burst); \
+ } \
+ \
+ static ssize_t \
+ name##_store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ int burst; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ if (kstrtoint(buf, 0, &burst) < 0) \
+ return -EINVAL; \
+ \
+ pdev->aer_info->ratelimit.burst = burst; \
+ \
+ return count; \
+ } \
+ static DEVICE_ATTR_RW(name);
+
+#define aer_ratelimit_attrs(name) \
+ aer_ratelimit_interval_attr(name##_ratelimit_interval_ms, \
+ name##_ratelimit) \
+ aer_ratelimit_burst_attr(name##_ratelimit_burst, \
+ name##_ratelimit)
+
+aer_ratelimit_attrs(correctable)
+aer_ratelimit_attrs(nonfatal)
+
+static struct attribute *aer_attrs[] = {
+ &dev_attr_correctable_ratelimit_interval_ms.attr,
+ &dev_attr_correctable_ratelimit_burst.attr,
+ &dev_attr_nonfatal_ratelimit_interval_ms.attr,
+ &dev_attr_nonfatal_ratelimit_burst.attr,
+ NULL
+};
+
+static umode_t aer_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (!pdev->aer_info)
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group aer_attr_group = {
+ .name = "aer",
+ .attrs = aer_attrs,
+ .is_visible = aer_attrs_are_visible,
+};
+
static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
struct aer_err_info *info)
{
unsigned long status = info->status & ~info->mask;
int i, max = -1;
u64 *counter = NULL;
- struct aer_stats *aer_stats = pdev->aer_stats;
+ struct aer_info *aer_info = pdev->aer_info;
- if (!aer_stats)
+ if (!aer_info)
return;
switch (info->severity) {
case AER_CORRECTABLE:
- aer_stats->dev_total_cor_errs++;
- counter = &aer_stats->dev_cor_errs[0];
+ aer_info->dev_total_cor_errs++;
+ counter = &aer_info->dev_cor_errs[0];
max = AER_MAX_TYPEOF_COR_ERRS;
break;
case AER_NONFATAL:
- aer_stats->dev_total_nonfatal_errs++;
- counter = &aer_stats->dev_nonfatal_errs[0];
+ aer_info->dev_total_nonfatal_errs++;
+ counter = &aer_info->dev_nonfatal_errs[0];
max = AER_MAX_TYPEOF_UNCOR_ERRS;
break;
case AER_FATAL:
- aer_stats->dev_total_fatal_errs++;
- counter = &aer_stats->dev_fatal_errs[0];
+ aer_info->dev_total_fatal_errs++;
+ counter = &aer_info->dev_fatal_errs[0];
max = AER_MAX_TYPEOF_UNCOR_ERRS;
break;
}
@@ -653,37 +768,46 @@ static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
struct aer_err_source *e_src)
{
- struct aer_stats *aer_stats = pdev->aer_stats;
+ struct aer_info *aer_info = pdev->aer_info;
- if (!aer_stats)
+ if (!aer_info)
return;
if (e_src->status & PCI_ERR_ROOT_COR_RCV)
- aer_stats->rootport_total_cor_errs++;
+ aer_info->rootport_total_cor_errs++;
if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
- aer_stats->rootport_total_fatal_errs++;
+ aer_info->rootport_total_fatal_errs++;
else
- aer_stats->rootport_total_nonfatal_errs++;
+ aer_info->rootport_total_nonfatal_errs++;
+ }
+}
+
+static int aer_ratelimit(struct pci_dev *dev, unsigned int severity)
+{
+ switch (severity) {
+ case AER_NONFATAL:
+ return __ratelimit(&dev->aer_info->nonfatal_ratelimit);
+ case AER_CORRECTABLE:
+ return __ratelimit(&dev->aer_info->correctable_ratelimit);
+ default:
+ return 1; /* Don't ratelimit fatal errors */
}
}
-static void __aer_print_error(struct pci_dev *dev,
- struct aer_err_info *info)
+static void __aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
{
const char **strings;
unsigned long status = info->status & ~info->mask;
- const char *level, *errmsg;
+ const char *level = info->level;
+ const char *errmsg;
int i;
- if (info->severity == AER_CORRECTABLE) {
+ if (info->severity == AER_CORRECTABLE)
strings = aer_correctable_error_string;
- level = KERN_WARNING;
- } else {
+ else
strings = aer_uncorrectable_error_string;
- level = KERN_ERR;
- }
for_each_set_bit(i, &status, 32) {
errmsg = strings[i];
@@ -693,14 +817,39 @@ static void __aer_print_error(struct pci_dev *dev,
aer_printk(level, dev, " [%2d] %-22s%s\n", i, errmsg,
info->first_error == i ? " (First)" : "");
}
- pci_dev_aer_stats_incr(dev, info);
}
-void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
+static void aer_print_source(struct pci_dev *dev, struct aer_err_info *info,
+ bool found)
+{
+ u16 source = info->id;
+
+ pci_info(dev, "%s%s error message received from %04x:%02x:%02x.%d%s\n",
+ info->multi_error_valid ? "Multiple " : "",
+ aer_error_severity_string[info->severity],
+ pci_domain_nr(dev->bus), PCI_BUS_NUM(source),
+ PCI_SLOT(source), PCI_FUNC(source),
+ found ? "" : " (no details found");
+}
+
+void aer_print_error(struct aer_err_info *info, int i)
{
- int layer, agent;
- int id = pci_dev_id(dev);
- const char *level;
+ struct pci_dev *dev;
+ int layer, agent, id;
+ const char *level = info->level;
+
+ if (WARN_ON_ONCE(i >= AER_MAX_MULTI_ERR_DEVICES))
+ return;
+
+ dev = info->dev[i];
+ id = pci_dev_id(dev);
+
+ pci_dev_aer_stats_incr(dev, info);
+ trace_aer_event(pci_name(dev), (info->status & ~info->mask),
+ info->severity, info->tlp_header_valid, &info->tlp);
+
+ if (!info->ratelimit_print[i])
+ return;
if (!info->status) {
pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
@@ -711,8 +860,6 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
layer = AER_GET_LAYER_ERROR(info->severity, info->status);
agent = AER_GET_AGENT(info->severity, info->status);
- level = (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR;
-
aer_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
aer_error_severity_string[info->severity],
aer_error_layer[layer], aer_agent_string[agent]);
@@ -723,26 +870,11 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
__aer_print_error(dev, info);
if (info->tlp_header_valid)
- pcie_print_tlp_log(dev, &info->tlp, dev_fmt(" "));
+ pcie_print_tlp_log(dev, &info->tlp, level, dev_fmt(" "));
out:
if (info->id && info->error_dev_num > 1 && info->id == id)
pci_err(dev, " Error of this Agent is reported first\n");
-
- trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
- info->severity, info->tlp_header_valid, &info->tlp);
-}
-
-static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
-{
- u8 bus = info->id >> 8;
- u8 devfn = info->id & 0xff;
-
- pci_info(dev, "%s%s error message received from %04x:%02x:%02x.%d\n",
- info->multi_error_valid ? "Multiple " : "",
- aer_error_severity_string[info->severity],
- pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn),
- PCI_FUNC(devfn));
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
@@ -765,40 +897,48 @@ void pci_print_aer(struct pci_dev *dev, int aer_severity,
{
int layer, agent, tlp_header_valid = 0;
u32 status, mask;
- struct aer_err_info info;
+ struct aer_err_info info = {
+ .severity = aer_severity,
+ .first_error = PCI_ERR_CAP_FEP(aer->cap_control),
+ };
if (aer_severity == AER_CORRECTABLE) {
status = aer->cor_status;
mask = aer->cor_mask;
+ info.level = KERN_WARNING;
} else {
status = aer->uncor_status;
mask = aer->uncor_mask;
+ info.level = KERN_ERR;
tlp_header_valid = status & AER_LOG_TLP_MASKS;
}
- layer = AER_GET_LAYER_ERROR(aer_severity, status);
- agent = AER_GET_AGENT(aer_severity, status);
-
- memset(&info, 0, sizeof(info));
- info.severity = aer_severity;
info.status = status;
info.mask = mask;
- info.first_error = PCI_ERR_CAP_FEP(aer->cap_control);
- pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
+ pci_dev_aer_stats_incr(dev, &info);
+ trace_aer_event(pci_name(dev), (status & ~mask),
+ aer_severity, tlp_header_valid, &aer->header_log);
+
+ if (!aer_ratelimit(dev, info.severity))
+ return;
+
+ layer = AER_GET_LAYER_ERROR(aer_severity, status);
+ agent = AER_GET_AGENT(aer_severity, status);
+
+ aer_printk(info.level, dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n",
+ status, mask);
__aer_print_error(dev, &info);
- pci_err(dev, "aer_layer=%s, aer_agent=%s\n",
- aer_error_layer[layer], aer_agent_string[agent]);
+ aer_printk(info.level, dev, "aer_layer=%s, aer_agent=%s\n",
+ aer_error_layer[layer], aer_agent_string[agent]);
if (aer_severity != AER_CORRECTABLE)
- pci_err(dev, "aer_uncor_severity: 0x%08x\n",
- aer->uncor_severity);
+ aer_printk(info.level, dev, "aer_uncor_severity: 0x%08x\n",
+ aer->uncor_severity);
if (tlp_header_valid)
- pcie_print_tlp_log(dev, &aer->header_log, dev_fmt(" "));
-
- trace_aer_event(dev_name(&dev->dev), (status & ~mask),
- aer_severity, tlp_header_valid, &aer->header_log);
+ pcie_print_tlp_log(dev, &aer->header_log, info.level,
+ dev_fmt(" "));
}
EXPORT_SYMBOL_NS_GPL(pci_print_aer, "CXL");
@@ -809,12 +949,27 @@ EXPORT_SYMBOL_NS_GPL(pci_print_aer, "CXL");
*/
static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
{
- if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
- e_info->dev[e_info->error_dev_num] = pci_dev_get(dev);
- e_info->error_dev_num++;
- return 0;
+ int i = e_info->error_dev_num;
+
+ if (i >= AER_MAX_MULTI_ERR_DEVICES)
+ return -ENOSPC;
+
+ e_info->dev[i] = pci_dev_get(dev);
+ e_info->error_dev_num++;
+
+ /*
+ * Ratelimit AER log messages. "dev" is either the source
+ * identified by the root's Error Source ID or it has an unmasked
+ * error logged in its own AER Capability. Messages are emitted
+ * when "ratelimit_print[i]" is non-zero. If we will print detail
+ * for a downstream device, make sure we print the Error Source ID
+ * from the root as well.
+ */
+ if (aer_ratelimit(dev, e_info->severity)) {
+ e_info->ratelimit_print[i] = 1;
+ e_info->root_ratelimit_print = 1;
}
- return -ENOSPC;
+ return 0;
}
/**
@@ -908,7 +1063,7 @@ static int find_device_iter(struct pci_dev *dev, void *data)
* e_info->error_dev_num and e_info->dev[], based on the given information.
*/
static bool find_source_device(struct pci_dev *parent,
- struct aer_err_info *e_info)
+ struct aer_err_info *e_info)
{
struct pci_dev *dev = parent;
int result;
@@ -926,15 +1081,8 @@ static bool find_source_device(struct pci_dev *parent,
else
pci_walk_bus(parent->subordinate, find_device_iter, e_info);
- if (!e_info->error_dev_num) {
- u8 bus = e_info->id >> 8;
- u8 devfn = e_info->id & 0xff;
-
- pci_info(parent, "found no error details for %04x:%02x:%02x.%d\n",
- pci_domain_nr(parent->bus), bus, PCI_SLOT(devfn),
- PCI_FUNC(devfn));
+ if (!e_info->error_dev_num)
return false;
- }
return true;
}
@@ -1141,9 +1289,10 @@ static void aer_recover_work_func(struct work_struct *work)
pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
entry.devfn);
if (!pdev) {
- pr_err("no pci_dev for %04x:%02x:%02x.%x\n",
- entry.domain, entry.bus,
- PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
+ pr_err_ratelimited("%04x:%02x:%02x.%x: no pci_dev found\n",
+ entry.domain, entry.bus,
+ PCI_SLOT(entry.devfn),
+ PCI_FUNC(entry.devfn));
continue;
}
pci_print_aer(pdev, entry.severity, entry.regs);
@@ -1199,19 +1348,26 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
/**
* aer_get_device_error_info - read error status from dev and store it to info
- * @dev: pointer to the device expected to have an error record
* @info: pointer to structure to store the error record
+ * @i: index into info->dev[]
*
* Return: 1 on success, 0 on error.
*
* Note that @info is reused among all error devices. Clear fields properly.
*/
-int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
+int aer_get_device_error_info(struct aer_err_info *info, int i)
{
- int type = pci_pcie_type(dev);
- int aer = dev->aer_cap;
+ struct pci_dev *dev;
+ int type, aer;
u32 aercc;
+ if (i >= AER_MAX_MULTI_ERR_DEVICES)
+ return 0;
+
+ dev = info->dev[i];
+ aer = dev->aer_cap;
+ type = pci_pcie_type(dev);
+
/* Must reset in this function */
info->status = 0;
info->tlp_header_valid = 0;
@@ -1263,63 +1419,87 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
/* Report all before handling them, to not lose records by reset etc. */
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (aer_get_device_error_info(e_info->dev[i], e_info))
- aer_print_error(e_info->dev[i], e_info);
+ if (aer_get_device_error_info(e_info, i))
+ aer_print_error(e_info, i);
}
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (aer_get_device_error_info(e_info->dev[i], e_info))
+ if (aer_get_device_error_info(e_info, i))
handle_error_source(e_info->dev[i], e_info);
}
}
/**
- * aer_isr_one_error - consume an error detected by Root Port
- * @rpc: pointer to the Root Port which holds an error
- * @e_src: pointer to an error source
+ * aer_isr_one_error_type - consume a Correctable or Uncorrectable Error
+ * detected by Root Port or RCEC
+ * @root: pointer to Root Port or RCEC that signaled AER interrupt
+ * @info: pointer to AER error info
*/
-static void aer_isr_one_error(struct aer_rpc *rpc,
- struct aer_err_source *e_src)
+static void aer_isr_one_error_type(struct pci_dev *root,
+ struct aer_err_info *info)
{
- struct pci_dev *pdev = rpc->rpd;
- struct aer_err_info e_info;
+ bool found;
- pci_rootport_aer_stats_incr(pdev, e_src);
+ found = find_source_device(root, info);
/*
- * There is a possibility that both correctable error and
- * uncorrectable error being logged. Report correctable error first.
+ * If we're going to log error messages, we've already set
+ * "info->root_ratelimit_print" and "info->ratelimit_print[i]" to
+ * non-zero (which enables printing) because this is either an
+ * ERR_FATAL or we found a device with an error logged in its AER
+ * Capability.
+ *
+ * If we didn't find the Error Source device, at least log the
+ * Requester ID from the ERR_* Message received by the Root Port or
+ * RCEC, ratelimited by the RP or RCEC.
*/
- if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
- e_info.id = ERR_COR_ID(e_src->id);
- e_info.severity = AER_CORRECTABLE;
-
- if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
- e_info.multi_error_valid = 1;
- else
- e_info.multi_error_valid = 0;
- aer_print_port_info(pdev, &e_info);
+ if (info->root_ratelimit_print ||
+ (!found && aer_ratelimit(root, info->severity)))
+ aer_print_source(root, info, found);
- if (find_source_device(pdev, &e_info))
- aer_process_err_devices(&e_info);
- }
-
- if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
- e_info.id = ERR_UNCOR_ID(e_src->id);
+ if (found)
+ aer_process_err_devices(info);
+}
- if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
- e_info.severity = AER_FATAL;
- else
- e_info.severity = AER_NONFATAL;
+/**
+ * aer_isr_one_error - consume error(s) signaled by an AER interrupt from
+ * Root Port or RCEC
+ * @root: pointer to Root Port or RCEC that signaled AER interrupt
+ * @e_src: pointer to an error source
+ */
+static void aer_isr_one_error(struct pci_dev *root,
+ struct aer_err_source *e_src)
+{
+ u32 status = e_src->status;
- if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
- e_info.multi_error_valid = 1;
- else
- e_info.multi_error_valid = 0;
+ pci_rootport_aer_stats_incr(root, e_src);
- aer_print_port_info(pdev, &e_info);
+ /*
+ * There is a possibility that both correctable error and
+ * uncorrectable error being logged. Report correctable error first.
+ */
+ if (status & PCI_ERR_ROOT_COR_RCV) {
+ int multi = status & PCI_ERR_ROOT_MULTI_COR_RCV;
+ struct aer_err_info e_info = {
+ .id = ERR_COR_ID(e_src->id),
+ .severity = AER_CORRECTABLE,
+ .level = KERN_WARNING,
+ .multi_error_valid = multi ? 1 : 0,
+ };
+
+ aer_isr_one_error_type(root, &e_info);
+ }
- if (find_source_device(pdev, &e_info))
- aer_process_err_devices(&e_info);
+ if (status & PCI_ERR_ROOT_UNCOR_RCV) {
+ int fatal = status & PCI_ERR_ROOT_FATAL_RCV;
+ int multi = status & PCI_ERR_ROOT_MULTI_UNCOR_RCV;
+ struct aer_err_info e_info = {
+ .id = ERR_UNCOR_ID(e_src->id),
+ .severity = fatal ? AER_FATAL : AER_NONFATAL,
+ .level = KERN_ERR,
+ .multi_error_valid = multi ? 1 : 0,
+ };
+
+ aer_isr_one_error_type(root, &e_info);
}
}
@@ -1340,7 +1520,7 @@ static irqreturn_t aer_isr(int irq, void *context)
return IRQ_NONE;
while (kfifo_get(&rpc->aer_fifo, &e_src))
- aer_isr_one_error(rpc, &e_src);
+ aer_isr_one_error(rpc->rpd, &e_src);
return IRQ_HANDLED;
}
diff --git a/drivers/pci/pcie/bwctrl.c b/drivers/pci/pcie/bwctrl.c
index d8d2aa85a229..36f939f23d34 100644
--- a/drivers/pci/pcie/bwctrl.c
+++ b/drivers/pci/pcie/bwctrl.c
@@ -38,24 +38,14 @@
/**
* struct pcie_bwctrl_data - PCIe bandwidth controller
* @set_speed_mutex: Serializes link speed changes
- * @lbms_count: Count for LBMS (since last reset)
* @cdev: Thermal cooling device associated with the port
*/
struct pcie_bwctrl_data {
struct mutex set_speed_mutex;
- atomic_t lbms_count;
struct thermal_cooling_device *cdev;
};
-/*
- * Prevent port removal during LBMS count accessors and Link Speed changes.
- *
- * These have to be differentiated because pcie_bwctrl_change_speed() calls
- * pcie_retrain_link() which uses LBMS count reset accessor on success
- * (using just one rwsem triggers "possible recursive locking detected"
- * warning).
- */
-static DECLARE_RWSEM(pcie_bwctrl_lbms_rwsem);
+/* Prevent port removal during Link Speed changes. */
static DECLARE_RWSEM(pcie_bwctrl_setspeed_rwsem);
static bool pcie_valid_speed(enum pci_bus_speed speed)
@@ -127,18 +117,7 @@ static int pcie_bwctrl_change_speed(struct pci_dev *port, u16 target_speed, bool
if (ret != PCIBIOS_SUCCESSFUL)
return pcibios_err_to_errno(ret);
- ret = pcie_retrain_link(port, use_lt);
- if (ret < 0)
- return ret;
-
- /*
- * Ensure link speed updates also with platforms that have problems
- * with notifications.
- */
- if (port->subordinate)
- pcie_update_link_speed(port->subordinate);
-
- return 0;
+ return pcie_retrain_link(port, use_lt);
}
/**
@@ -202,15 +181,14 @@ int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req,
static void pcie_bwnotif_enable(struct pcie_device *srv)
{
- struct pcie_bwctrl_data *data = srv->port->link_bwctrl;
struct pci_dev *port = srv->port;
u16 link_status;
int ret;
- /* Count LBMS seen so far as one */
+ /* Note if LBMS has been seen so far */
ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status);
if (ret == PCIBIOS_SUCCESSFUL && link_status & PCI_EXP_LNKSTA_LBMS)
- atomic_inc(&data->lbms_count);
+ set_bit(PCI_LINK_LBMS_SEEN, &port->priv_flags);
pcie_capability_set_word(port, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
@@ -233,7 +211,6 @@ static void pcie_bwnotif_disable(struct pci_dev *port)
static irqreturn_t pcie_bwnotif_irq(int irq, void *context)
{
struct pcie_device *srv = context;
- struct pcie_bwctrl_data *data = srv->port->link_bwctrl;
struct pci_dev *port = srv->port;
u16 link_status, events;
int ret;
@@ -247,7 +224,7 @@ static irqreturn_t pcie_bwnotif_irq(int irq, void *context)
return IRQ_NONE;
if (events & PCI_EXP_LNKSTA_LBMS)
- atomic_inc(&data->lbms_count);
+ set_bit(PCI_LINK_LBMS_SEEN, &port->priv_flags);
pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
@@ -262,31 +239,10 @@ static irqreturn_t pcie_bwnotif_irq(int irq, void *context)
return IRQ_HANDLED;
}
-void pcie_reset_lbms_count(struct pci_dev *port)
+void pcie_reset_lbms(struct pci_dev *port)
{
- struct pcie_bwctrl_data *data;
-
- guard(rwsem_read)(&pcie_bwctrl_lbms_rwsem);
- data = port->link_bwctrl;
- if (data)
- atomic_set(&data->lbms_count, 0);
- else
- pcie_capability_write_word(port, PCI_EXP_LNKSTA,
- PCI_EXP_LNKSTA_LBMS);
-}
-
-int pcie_lbms_count(struct pci_dev *port, unsigned long *val)
-{
- struct pcie_bwctrl_data *data;
-
- guard(rwsem_read)(&pcie_bwctrl_lbms_rwsem);
- data = port->link_bwctrl;
- if (!data)
- return -ENOTTY;
-
- *val = atomic_read(&data->lbms_count);
-
- return 0;
+ clear_bit(PCI_LINK_LBMS_SEEN, &port->priv_flags);
+ pcie_capability_write_word(port, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
}
static int pcie_bwnotif_probe(struct pcie_device *srv)
@@ -308,18 +264,16 @@ static int pcie_bwnotif_probe(struct pcie_device *srv)
return ret;
scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
- scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
- port->link_bwctrl = data;
-
- ret = request_irq(srv->irq, pcie_bwnotif_irq,
- IRQF_SHARED, "PCIe bwctrl", srv);
- if (ret) {
- port->link_bwctrl = NULL;
- return ret;
- }
+ port->link_bwctrl = data;
- pcie_bwnotif_enable(srv);
+ ret = request_irq(srv->irq, pcie_bwnotif_irq,
+ IRQF_SHARED, "PCIe bwctrl", srv);
+ if (ret) {
+ port->link_bwctrl = NULL;
+ return ret;
}
+
+ pcie_bwnotif_enable(srv);
}
pci_dbg(port, "enabled with IRQ %d\n", srv->irq);
@@ -339,13 +293,11 @@ static void pcie_bwnotif_remove(struct pcie_device *srv)
pcie_cooling_device_unregister(data->cdev);
scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
- scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
- pcie_bwnotif_disable(srv->port);
+ pcie_bwnotif_disable(srv->port);
- free_irq(srv->irq, srv);
+ free_irq(srv->irq, srv);
- srv->port->link_bwctrl = NULL;
- }
+ srv->port->link_bwctrl = NULL;
}
}
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index df42f15c9829..fc18349614d7 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -222,7 +222,7 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
dpc_tlp_log_len(pdev),
pdev->subordinate->flit_mode,
&tlp_log);
- pcie_print_tlp_log(pdev, &tlp_log, dev_fmt(""));
+ pcie_print_tlp_log(pdev, &tlp_log, KERN_ERR, dev_fmt(""));
if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG + 1)
goto clear_status;
@@ -252,46 +252,59 @@ static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
else
info->severity = AER_NONFATAL;
+ info->level = KERN_ERR;
+
+ info->dev[0] = dev;
+ info->error_dev_num = 1;
+
return 1;
}
void dpc_process_error(struct pci_dev *pdev)
{
u16 cap = pdev->dpc_cap, status, source, reason, ext_reason;
- struct aer_err_info info;
+ struct aer_err_info info = {};
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
- pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
-
- pci_info(pdev, "containment event, status:%#06x source:%#06x\n",
- status, source);
reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN;
- ext_reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT;
- pci_warn(pdev, "%s detected\n",
- (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR) ?
- "unmasked uncorrectable error" :
- (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE) ?
- "ERR_NONFATAL" :
- (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE) ?
- "ERR_FATAL" :
- (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO) ?
- "RP PIO error" :
- (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER) ?
- "software trigger" :
- "reserved error");
-
- /* show RP PIO error detail information */
- if (pdev->dpc_rp_extensions &&
- reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT &&
- ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO)
- dpc_process_rp_pio_error(pdev);
- else if (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR &&
- dpc_get_aer_uncorrect_severity(pdev, &info) &&
- aer_get_device_error_info(pdev, &info)) {
- aer_print_error(pdev, &info);
- pci_aer_clear_nonfatal_status(pdev);
- pci_aer_clear_fatal_status(pdev);
+
+ switch (reason) {
+ case PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR:
+ pci_warn(pdev, "containment event, status:%#06x: unmasked uncorrectable error detected\n",
+ status);
+ if (dpc_get_aer_uncorrect_severity(pdev, &info) &&
+ aer_get_device_error_info(&info, 0)) {
+ aer_print_error(&info, 0);
+ pci_aer_clear_nonfatal_status(pdev);
+ pci_aer_clear_fatal_status(pdev);
+ }
+ break;
+ case PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE:
+ case PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE:
+ pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID,
+ &source);
+ pci_warn(pdev, "containment event, status:%#06x, %s received from %04x:%02x:%02x.%d\n",
+ status,
+ (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE) ?
+ "ERR_FATAL" : "ERR_NONFATAL",
+ pci_domain_nr(pdev->bus), PCI_BUS_NUM(source),
+ PCI_SLOT(source), PCI_FUNC(source));
+ break;
+ case PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT:
+ ext_reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT;
+ pci_warn(pdev, "containment event, status:%#06x: %s detected\n",
+ status,
+ (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO) ?
+ "RP PIO error" :
+ (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER) ?
+ "software trigger" :
+ "reserved error");
+ /* show RP PIO error detail information */
+ if (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO &&
+ pdev->dpc_rp_extensions)
+ dpc_process_rp_pio_error(pdev);
+ break;
}
}
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 31090770fffc..de6381c690f5 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -271,7 +271,6 @@ failed:
pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
- /* TODO: Should kernel panic here? */
pci_info(bridge, "device recovery failed\n");
return status;
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 7cfb6c0d5dcb..ee5f615a9023 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -5,6 +5,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -252,3 +253,302 @@ bool pcie_ptm_enabled(struct pci_dev *dev)
return dev->ptm_enabled;
}
EXPORT_SYMBOL(pcie_ptm_enabled);
+
+static ssize_t context_update_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = file->private_data;
+ char buf[7];
+ int ret;
+ u8 mode;
+
+ if (!ptm_debugfs->ops->context_update_write)
+ return -EOPNOTSUPP;
+
+ if (count < 1 || count >= sizeof(buf))
+ return -EINVAL;
+
+ ret = copy_from_user(buf, ubuf, count);
+ if (ret)
+ return -EFAULT;
+
+ buf[count] = '\0';
+
+ if (sysfs_streq(buf, "auto"))
+ mode = PCIE_PTM_CONTEXT_UPDATE_AUTO;
+ else if (sysfs_streq(buf, "manual"))
+ mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL;
+ else
+ return -EINVAL;
+
+ mutex_lock(&ptm_debugfs->lock);
+ ret = ptm_debugfs->ops->context_update_write(ptm_debugfs->pdata, mode);
+ mutex_unlock(&ptm_debugfs->lock);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t context_update_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = file->private_data;
+ char buf[8]; /* Extra space for NULL termination at the end */
+ ssize_t pos;
+ u8 mode;
+
+ if (!ptm_debugfs->ops->context_update_read)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptm_debugfs->lock);
+ ptm_debugfs->ops->context_update_read(ptm_debugfs->pdata, &mode);
+ mutex_unlock(&ptm_debugfs->lock);
+
+ if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO)
+ pos = scnprintf(buf, sizeof(buf), "auto\n");
+ else
+ pos = scnprintf(buf, sizeof(buf), "manual\n");
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+}
+
+static const struct file_operations context_update_fops = {
+ .open = simple_open,
+ .read = context_update_read,
+ .write = context_update_write,
+};
+
+static int context_valid_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ bool valid;
+ int ret;
+
+ if (!ptm_debugfs->ops->context_valid_read)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptm_debugfs->lock);
+ ret = ptm_debugfs->ops->context_valid_read(ptm_debugfs->pdata, &valid);
+ mutex_unlock(&ptm_debugfs->lock);
+ if (ret)
+ return ret;
+
+ *val = valid;
+
+ return 0;
+}
+
+static int context_valid_set(void *data, u64 val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ int ret;
+
+ if (!ptm_debugfs->ops->context_valid_write)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptm_debugfs->lock);
+ ret = ptm_debugfs->ops->context_valid_write(ptm_debugfs->pdata, !!val);
+ mutex_unlock(&ptm_debugfs->lock);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(context_valid_fops, context_valid_get,
+ context_valid_set, "%llu\n");
+
+static int local_clock_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->local_clock_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->local_clock_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(local_clock_fops, local_clock_get, NULL, "%llu\n");
+
+static int master_clock_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->master_clock_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->master_clock_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(master_clock_fops, master_clock_get, NULL, "%llu\n");
+
+static int t1_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->t1_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->t1_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(t1_fops, t1_get, NULL, "%llu\n");
+
+static int t2_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->t2_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->t2_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(t2_fops, t2_get, NULL, "%llu\n");
+
+static int t3_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->t3_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->t3_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(t3_fops, t3_get, NULL, "%llu\n");
+
+static int t4_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->t4_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->t4_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(t4_fops, t4_get, NULL, "%llu\n");
+
+#define pcie_ptm_create_debugfs_file(pdata, mode, attr) \
+ do { \
+ if (ops->attr##_visible && ops->attr##_visible(pdata)) \
+ debugfs_create_file(#attr, mode, ptm_debugfs->debugfs, \
+ ptm_debugfs, &attr##_fops); \
+ } while (0)
+
+/*
+ * pcie_ptm_create_debugfs() - Create debugfs entries for the PTM context
+ * @dev: PTM capable component device
+ * @pdata: Private data of the PTM capable component device
+ * @ops: PTM callback structure
+ *
+ * Create debugfs entries for exposing the PTM context of the PTM capable
+ * components such as Root Complex and Endpoint controllers.
+ *
+ * Return: Pointer to 'struct pci_ptm_debugfs' if success, NULL otherwise.
+ */
+struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
+ const struct pcie_ptm_ops *ops)
+{
+ struct pci_ptm_debugfs *ptm_debugfs;
+ char *dirname;
+ int ret;
+
+ /* Caller must provide check_capability() callback */
+ if (!ops->check_capability)
+ return NULL;
+
+ /* Check for PTM capability before creating debugfs attrbutes */
+ ret = ops->check_capability(pdata);
+ if (!ret) {
+ dev_dbg(dev, "PTM capability not present\n");
+ return NULL;
+ }
+
+ ptm_debugfs = kzalloc(sizeof(*ptm_debugfs), GFP_KERNEL);
+ if (!ptm_debugfs)
+ return NULL;
+
+ dirname = devm_kasprintf(dev, GFP_KERNEL, "pcie_ptm_%s", dev_name(dev));
+ if (!dirname)
+ return NULL;
+
+ ptm_debugfs->debugfs = debugfs_create_dir(dirname, NULL);
+ ptm_debugfs->pdata = pdata;
+ ptm_debugfs->ops = ops;
+ mutex_init(&ptm_debugfs->lock);
+
+ pcie_ptm_create_debugfs_file(pdata, 0644, context_update);
+ pcie_ptm_create_debugfs_file(pdata, 0644, context_valid);
+ pcie_ptm_create_debugfs_file(pdata, 0444, local_clock);
+ pcie_ptm_create_debugfs_file(pdata, 0444, master_clock);
+ pcie_ptm_create_debugfs_file(pdata, 0444, t1);
+ pcie_ptm_create_debugfs_file(pdata, 0444, t2);
+ pcie_ptm_create_debugfs_file(pdata, 0444, t3);
+ pcie_ptm_create_debugfs_file(pdata, 0444, t4);
+
+ return ptm_debugfs;
+}
+EXPORT_SYMBOL_GPL(pcie_ptm_create_debugfs);
+
+/*
+ * pcie_ptm_destroy_debugfs() - Destroy debugfs entries for the PTM context
+ * @ptm_debugfs: Pointer to the PTM debugfs struct
+ */
+void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs)
+{
+ if (!ptm_debugfs)
+ return;
+
+ mutex_destroy(&ptm_debugfs->lock);
+ debugfs_remove_recursive(ptm_debugfs->debugfs);
+}
+EXPORT_SYMBOL_GPL(pcie_ptm_destroy_debugfs);
diff --git a/drivers/pci/pcie/tlp.c b/drivers/pci/pcie/tlp.c
index 890d5391d7f5..71f8fc9ea2ed 100644
--- a/drivers/pci/pcie/tlp.c
+++ b/drivers/pci/pcie/tlp.c
@@ -98,12 +98,14 @@ int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
* pcie_print_tlp_log - Print TLP Header / Prefix Log contents
* @dev: PCIe device
* @log: TLP Log structure
+ * @level: Printk log level
* @pfx: String prefix
*
* Prints TLP Header and Prefix Log information held by @log.
*/
void pcie_print_tlp_log(const struct pci_dev *dev,
- const struct pcie_tlp_log *log, const char *pfx)
+ const struct pcie_tlp_log *log, const char *level,
+ const char *pfx)
{
/* EE_PREFIX_STR fits the extended DW space needed for the Flit mode */
char buf[11 * PCIE_STD_MAX_TLP_HEADERLOG + 1];
@@ -130,6 +132,6 @@ void pcie_print_tlp_log(const struct pci_dev *dev,
}
}
- pci_err(dev, "%sTLP Header%s: %s\n", pfx,
+ dev_printk(level, &dev->dev, "%sTLP Header%s: %s\n", pfx,
log->flit ? " (Flit)" : "", buf);
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 364fa2a514f8..4b8693ec9e4c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2058,7 +2058,7 @@ int pci_setup_device(struct pci_dev *dev)
if (class == PCI_CLASS_BRIDGE_PCI)
goto bad;
pci_read_irq(dev);
- pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
+ pci_read_bases(dev, PCI_STD_NUM_BARS, PCI_ROM_ADDRESS);
pci_subsystem_ids(dev, &dev->subsystem_vendor, &dev->subsystem_device);
@@ -2711,7 +2711,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
pci_set_msi_domain(dev);
/* Notifier could use PCI capabilities */
- dev->match_driver = false;
ret = device_add(&dev->dev);
WARN_ON(ret < 0);
diff --git a/drivers/pci/pwrctrl/Kconfig b/drivers/pci/pwrctrl/Kconfig
index 990cab67d413..6956c1854811 100644
--- a/drivers/pci/pwrctrl/Kconfig
+++ b/drivers/pci/pwrctrl/Kconfig
@@ -1,19 +1,19 @@
# SPDX-License-Identifier: GPL-2.0-only
-config HAVE_PWRCTL
+config HAVE_PWRCTRL
bool
-config PCI_PWRCTL
+config PCI_PWRCTRL
tristate
-config PCI_PWRCTL_PWRSEQ
+config PCI_PWRCTRL_PWRSEQ
tristate
select POWER_SEQUENCING
- select PCI_PWRCTL
+ select PCI_PWRCTRL
-config PCI_PWRCTL_SLOT
+config PCI_PWRCTRL_SLOT
tristate "PCI Power Control driver for PCI slots"
- select PCI_PWRCTL
+ select PCI_PWRCTRL
help
Say Y here to enable the PCI Power Control driver to control the power
state of PCI slots.
@@ -21,3 +21,13 @@ config PCI_PWRCTL_SLOT
This is a generic driver that controls the power state of different
PCI slots. The voltage regulators powering the rails of the PCI slots
are expected to be defined in the devicetree node of the PCI bridge.
+
+# deprecated
+config HAVE_PWRCTL
+ bool
+ select HAVE_PWRCTRL
+
+# deprecated
+config PCI_PWRCTL_PWRSEQ
+ tristate
+ select PCI_PWRCTRL_PWRSEQ
diff --git a/drivers/pci/pwrctrl/Makefile b/drivers/pci/pwrctrl/Makefile
index ddfb12c5aadf..a4e5808d7850 100644
--- a/drivers/pci/pwrctrl/Makefile
+++ b/drivers/pci/pwrctrl/Makefile
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_PCI_PWRCTL) += pci-pwrctrl-core.o
+obj-$(CONFIG_PCI_PWRCTRL) += pci-pwrctrl-core.o
pci-pwrctrl-core-y := core.o
-obj-$(CONFIG_PCI_PWRCTL_PWRSEQ) += pci-pwrctrl-pwrseq.o
+obj-$(CONFIG_PCI_PWRCTRL_PWRSEQ) += pci-pwrctrl-pwrseq.o
-obj-$(CONFIG_PCI_PWRCTL_SLOT) += pci-pwrctl-slot.o
-pci-pwrctl-slot-y := slot.o
+obj-$(CONFIG_PCI_PWRCTRL_SLOT) += pci-pwrctrl-slot.o
+pci-pwrctrl-slot-y := slot.o
diff --git a/drivers/pci/pwrctrl/core.c b/drivers/pci/pwrctrl/core.c
index 9cc7e2b7f2b5..6bdbfed584d6 100644
--- a/drivers/pci/pwrctrl/core.c
+++ b/drivers/pci/pwrctrl/core.c
@@ -101,6 +101,8 @@ EXPORT_SYMBOL_GPL(pci_pwrctrl_device_set_ready);
*/
void pci_pwrctrl_device_unset_ready(struct pci_pwrctrl *pwrctrl)
{
+ cancel_work_sync(&pwrctrl->work);
+
/*
* We don't have to delete the link here. Typically, this function
* is only called when the power control device is being detached. If
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 94daca15a096..d7f4ee634263 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -38,14 +38,10 @@
static bool pcie_lbms_seen(struct pci_dev *dev, u16 lnksta)
{
- unsigned long count;
- int ret;
-
- ret = pcie_lbms_count(dev, &count);
- if (ret < 0)
- return lnksta & PCI_EXP_LNKSTA_LBMS;
+ if (test_bit(PCI_LINK_LBMS_SEEN, &dev->priv_flags))
+ return true;
- return count > 0;
+ return lnksta & PCI_EXP_LNKSTA_LBMS;
}
/*
@@ -4995,6 +4991,18 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
+static int pci_quirk_loongson_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ /*
+ * Loongson PCIe Root Ports don't advertise an ACS capability, but
+ * they do not allow peer-to-peer transactions between Root Ports.
+ * Allow each Root Port to be in a separate IOMMU group by masking
+ * SV/RR/CR/UF bits.
+ */
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+}
+
/*
* Wangxun 40G/25G/10G/1G NICs have no ACS capability, but on
* multi-function devices, the hardware isolates the functions by
@@ -5128,6 +5136,17 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_BROADCOM, 0x1762, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0x1763, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
+ /* Loongson PCIe Root Ports */
+ { PCI_VENDOR_ID_LOONGSON, 0x3C09, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x3C19, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x3C29, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A09, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A19, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A29, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A39, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A49, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A59, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A69, pci_quirk_loongson_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
/* Zhaoxin multi-function devices */
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index e994c546422c..07c3d021a47e 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -776,8 +776,7 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
{
struct pci_dev *bridge = bus->self;
- pci_info(bridge, "PCI bridge to %pR\n",
- &bus->busn_res);
+ pci_info(bridge, "PCI bridge to %pR\n", &bus->busn_res);
if (type & IORESOURCE_IO)
pci_setup_bridge_io(bridge);
@@ -2302,8 +2301,8 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
/* Depth last, allocate resources and update the hardware. */
__pci_bus_assign_resources(bus, add_list, &fail_head);
- if (add_list)
- BUG_ON(!list_empty(add_list));
+ if (WARN_ON_ONCE(add_list && !list_empty(add_list)))
+ free_list(add_list);
tried_times++;
/* Any device complain? */
@@ -2365,7 +2364,8 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
pci_bridge_distribute_available_resources(bridge, &add_list);
__pci_bridge_assign_resources(bridge, &add_list, &fail_head);
- BUG_ON(!list_empty(&add_list));
+ if (WARN_ON_ONCE(!list_empty(&add_list)))
+ free_list(&add_list);
tried_times++;
if (list_empty(&fail_head))
@@ -2441,7 +2441,8 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
__pci_bus_size_bridges(bridge->subordinate, &added);
__pci_bridge_assign_resources(bridge, &added, &failed);
- BUG_ON(!list_empty(&added));
+ if (WARN_ON_ONCE(!list_empty(&added)))
+ free_list(&added);
if (!list_empty(&failed)) {
ret = -ENOSPC;
@@ -2497,6 +2498,7 @@ void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
__pci_bus_size_bridges(dev->subordinate, &add_list);
up_read(&pci_bus_sem);
__pci_bus_assign_resources(bus, &add_list, NULL);
- BUG_ON(!list_empty(&add_list));
+ if (WARN_ON_ONCE(!list_empty(&add_list)))
+ free_list(&add_list);
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bus_resources);
diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c
index 07de59ca2ebf..77fce5e1b830 100644
--- a/drivers/pci/tph.c
+++ b/drivers/pci/tph.c
@@ -204,48 +204,6 @@ static u8 get_rp_completer_type(struct pci_dev *pdev)
return FIELD_GET(PCI_EXP_DEVCAP2_TPH_COMP_MASK, reg);
}
-/* Write ST to MSI-X vector control reg - Return 0 if OK, otherwise -errno */
-static int write_tag_to_msix(struct pci_dev *pdev, int msix_idx, u16 tag)
-{
-#ifdef CONFIG_PCI_MSI
- struct msi_desc *msi_desc = NULL;
- void __iomem *vec_ctrl;
- u32 val;
- int err = 0;
-
- msi_lock_descs(&pdev->dev);
-
- /* Find the msi_desc entry with matching msix_idx */
- msi_for_each_desc(msi_desc, &pdev->dev, MSI_DESC_ASSOCIATED) {
- if (msi_desc->msi_index == msix_idx)
- break;
- }
-
- if (!msi_desc) {
- err = -ENXIO;
- goto err_out;
- }
-
- /* Get the vector control register (offset 0xc) pointed by msix_idx */
- vec_ctrl = pdev->msix_base + msix_idx * PCI_MSIX_ENTRY_SIZE;
- vec_ctrl += PCI_MSIX_ENTRY_VECTOR_CTRL;
-
- val = readl(vec_ctrl);
- val &= ~PCI_MSIX_ENTRY_CTRL_ST;
- val |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag);
- writel(val, vec_ctrl);
-
- /* Read back to flush the update */
- val = readl(vec_ctrl);
-
-err_out:
- msi_unlock_descs(&pdev->dev);
- return err;
-#else
- return -ENODEV;
-#endif
-}
-
/* Write tag to ST table - Return 0 if OK, otherwise -errno */
static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag)
{
@@ -346,7 +304,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
switch (loc) {
case PCI_TPH_LOC_MSIX:
- err = write_tag_to_msix(pdev, index, tag);
+ err = pci_msix_write_tph_tag(pdev, index, tag);
break;
case PCI_TPH_LOC_CAP:
err = write_tag_to_st_table(pdev, index, tag);
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.c b/drivers/pcmcia/bcm63xx_pcmcia.c
index 85874b7a9f36..d3baed444646 100644
--- a/drivers/pcmcia/bcm63xx_pcmcia.c
+++ b/drivers/pcmcia/bcm63xx_pcmcia.c
@@ -268,7 +268,7 @@ static void bcm63xx_pcmcia_poll(struct timer_list *t)
struct bcm63xx_pcmcia_socket *skt;
unsigned int stat, events;
- skt = from_timer(skt, t, timer);
+ skt = timer_container_of(skt, t, timer);
spin_lock_bh(&skt->lock);
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index 45c8252c8edc..5e5cf2c3e2c8 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -72,7 +72,6 @@ int __ref cb_alloc(struct pcmcia_socket *s)
pci_lock_rescan_remove();
s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
- pci_fixup_cardbus(bus);
max = bus->busn_res.start;
for (pass = 0; pass < 2; pass++)
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 3bdd939dd2f4..2530079d38f4 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -68,7 +68,7 @@ static int electra_cf_ss_init(struct pcmcia_socket *s)
/* the timer is primarily to kick this socket's pccardd */
static void electra_cf_timer(struct timer_list *t)
{
- struct electra_cf_socket *cf = from_timer(cf, t, timer);
+ struct electra_cf_socket *cf = timer_container_of(cf, t, timer);
int present = electra_cf_present(cf);
if (present != cf->present) {
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index f0ccf479f36e..1b1dff56ec7b 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -77,7 +77,7 @@ static int omap_cf_ss_init(struct pcmcia_socket *s)
/* the timer is primarily to kick this socket's pccardd */
static void omap_cf_timer(struct timer_list *t)
{
- struct omap_cf_socket *cf = from_timer(cf, t, timer);
+ struct omap_cf_socket *cf = timer_container_of(cf, t, timer);
unsigned present = omap_cf_present();
if (present != cf->present) {
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index a0a2e7f18356..6868b60fd325 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -236,7 +236,8 @@ static irqreturn_t pd6729_interrupt(int irq, void *dev)
static void pd6729_interrupt_wrapper(struct timer_list *t)
{
- struct pd6729_socket *socket = from_timer(socket, t, poll_timer);
+ struct pd6729_socket *socket = timer_container_of(socket, t,
+ poll_timer);
pd6729_interrupt(0, (void *)socket);
mod_timer(&socket->poll_timer, jiffies + HZ);
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index d361124db993..87aa3f667117 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -460,7 +460,7 @@ static void soc_common_check_status(struct soc_pcmcia_socket *skt)
/* Let's poll for events in addition to IRQs since IRQ only is unreliable... */
static void soc_common_pcmcia_poll_event(struct timer_list *t)
{
- struct soc_pcmcia_socket *skt = from_timer(skt, t, poll_timer);
+ struct soc_pcmcia_socket *skt = timer_container_of(skt, t, poll_timer);
debug(skt, 4, "polling for events\n");
mod_timer(&skt->poll_timer, jiffies + SOC_PCMCIA_POLL_PERIOD);
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 020ea86c24ec..923ed23570a0 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -539,7 +539,8 @@ static irqreturn_t yenta_interrupt(int irq, void *dev_id)
static void yenta_interrupt_wrapper(struct timer_list *t)
{
- struct yenta_socket *socket = from_timer(socket, t, poll_timer);
+ struct yenta_socket *socket = timer_container_of(socket, t,
+ poll_timer);
yenta_interrupt(0, (void *)socket);
socket->poll_timer.expires = jiffies + HZ;
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 4e268de351c4..278c929dc87a 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -202,7 +202,7 @@ config THUNDERX2_PMU
tristate "Cavium ThunderX2 SoC PMU UNCORE"
depends on ARCH_THUNDER2 || COMPILE_TEST
depends on NUMA && ACPI
- default m
+ default m if ARCH_THUNDER2
help
Provides support for ThunderX2 UNCORE events.
The SoC has PMU support in its L3 cache controller (L3C) and
diff --git a/drivers/perf/amlogic/meson_ddr_pmu_core.c b/drivers/perf/amlogic/meson_ddr_pmu_core.c
index 07446d784a1a..c1e755c356a3 100644
--- a/drivers/perf/amlogic/meson_ddr_pmu_core.c
+++ b/drivers/perf/amlogic/meson_ddr_pmu_core.c
@@ -511,7 +511,7 @@ int meson_ddr_pmu_create(struct platform_device *pdev)
fmt_attr_fill(pmu->info.hw_info->fmt_attr);
- pmu->cpu = smp_processor_id();
+ pmu->cpu = raw_smp_processor_id();
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME);
if (!name)
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
index df9a28ba69dc..81b6f1a62349 100644
--- a/drivers/perf/apple_m1_cpu_pmu.c
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -474,8 +474,7 @@ static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu)
if (!armpmu_event_set_period(event))
continue;
- if (perf_event_overflow(event, &data, regs))
- m1_pmu_disable_event(event);
+ perf_event_overflow(event, &data, regs);
}
cpu_pmu->start(cpu_pmu);
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index d4fe30ff225b..031d45d0fe3d 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -727,8 +727,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
if ((chan == 5 && cmn->rsp_vc_num < 2) ||
(chan == 6 && cmn->dat_vc_num < 2) ||
- (chan == 7 && cmn->snp_vc_num < 2) ||
- (chan == 8 && cmn->req_vc_num < 2))
+ (chan == 7 && cmn->req_vc_num < 2) ||
+ (chan == 8 && cmn->snp_vc_num < 2))
return 0;
}
@@ -882,8 +882,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
_CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)), \
_CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)), \
_CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)), \
- _CMN_EVENT_XP(snp2_##_name, (_event) | (7 << 5)), \
- _CMN_EVENT_XP(req2_##_name, (_event) | (8 << 5))
+ _CMN_EVENT_XP(req2_##_name, (_event) | (7 << 5)), \
+ _CMN_EVENT_XP(snp2_##_name, (_event) | (8 << 5))
#define CMN_EVENT_XP_DAT(_name, _event) \
_CMN_EVENT_XP_PORT(dat_##_name, (_event) | (3 << 5)), \
@@ -2167,13 +2167,6 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP);
- if (cmn->part == PART_CMN600 && cmn->num_dtcs > 1) {
- /* We do at least know that a DTC's XP must be in that DTC's domain */
- dn = arm_cmn_node(cmn, CMN_TYPE_DTC);
- for (int i = 0; i < cmn->num_dtcs; i++)
- arm_cmn_node_to_xp(cmn, dn + i)->dtc = i;
- }
-
for (dn = cmn->dns; dn->type; dn++) {
if (dn->type == CMN_TYPE_XP)
continue;
@@ -2558,6 +2551,7 @@ static int arm_cmn_probe(struct platform_device *pdev)
cmn->dev = &pdev->dev;
cmn->part = (unsigned long)device_get_match_data(cmn->dev);
+ cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
platform_set_drvdata(pdev, cmn);
if (cmn->part == PART_CMN600 && has_acpi_companion(cmn->dev)) {
@@ -2585,7 +2579,6 @@ static int arm_cmn_probe(struct platform_device *pdev)
if (err)
return err;
- cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
cmn->pmu = (struct pmu) {
.module = THIS_MODULE,
.parent = cmn->dev,
@@ -2651,6 +2644,7 @@ static const struct acpi_device_id arm_cmn_acpi_match[] = {
{ "ARMHC600", PART_CMN600 },
{ "ARMHC650" },
{ "ARMHC700" },
+ { "ARMHC003" },
{}
};
MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match);
diff --git a/drivers/perf/arm-ni.c b/drivers/perf/arm-ni.c
index fd7a5e60e963..de7b6cce4d68 100644
--- a/drivers/perf/arm-ni.c
+++ b/drivers/perf/arm-ni.c
@@ -575,6 +575,23 @@ static int arm_ni_init_cd(struct arm_ni *ni, struct arm_ni_node *node, u64 res_s
return err;
}
+static void arm_ni_remove(struct platform_device *pdev)
+{
+ struct arm_ni *ni = platform_get_drvdata(pdev);
+
+ for (int i = 0; i < ni->num_cds; i++) {
+ struct arm_ni_cd *cd = ni->cds + i;
+
+ if (!cd->pmu_base)
+ continue;
+
+ writel_relaxed(0, cd->pmu_base + NI_PMCR);
+ writel_relaxed(U32_MAX, cd->pmu_base + NI_PMINTENCLR);
+ perf_pmu_unregister(&cd->pmu);
+ cpuhp_state_remove_instance_nocalls(arm_ni_hp_state, &cd->cpuhp_node);
+ }
+}
+
static void arm_ni_probe_domain(void __iomem *base, struct arm_ni_node *node)
{
u32 reg = readl_relaxed(base + NI_NODE_TYPE);
@@ -643,6 +660,7 @@ static int arm_ni_probe(struct platform_device *pdev)
ni->num_cds = num_cds;
ni->part = part;
ni->id = atomic_fetch_inc(&id);
+ platform_set_drvdata(pdev, ni);
for (int v = 0; v < cfg.num_components; v++) {
reg = readl_relaxed(cfg.base + NI_CHILD_PTR(v));
@@ -656,8 +674,11 @@ static int arm_ni_probe(struct platform_device *pdev)
reg = readl_relaxed(pd.base + NI_CHILD_PTR(c));
arm_ni_probe_domain(base + reg, &cd);
ret = arm_ni_init_cd(ni, &cd, res->start);
- if (ret)
+ if (ret) {
+ ni->cds[cd.id].pmu_base = NULL;
+ arm_ni_remove(pdev);
return ret;
+ }
}
}
}
@@ -665,23 +686,6 @@ static int arm_ni_probe(struct platform_device *pdev)
return 0;
}
-static void arm_ni_remove(struct platform_device *pdev)
-{
- struct arm_ni *ni = platform_get_drvdata(pdev);
-
- for (int i = 0; i < ni->num_cds; i++) {
- struct arm_ni_cd *cd = ni->cds + i;
-
- if (!cd->pmu_base)
- continue;
-
- writel_relaxed(0, cd->pmu_base + NI_PMCR);
- writel_relaxed(U32_MAX, cd->pmu_base + NI_PMINTENCLR);
- perf_pmu_unregister(&cd->pmu);
- cpuhp_state_remove_instance_nocalls(arm_ni_hp_state, &cd->cpuhp_node);
- }
-}
-
#ifdef CONFIG_OF
static const struct of_device_id arm_ni_of_match[] = {
{ .compatible = "arm,ni-700" },
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index e506d59654e7..3db9f4ed17e8 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -887,8 +887,7 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
* an irq_work which will be taken care of in the handling of
* IPI_IRQ_WORK.
*/
- if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(event);
+ perf_event_overflow(event, &data, regs);
}
armv8pmu_start(cpu_pmu);
diff --git a/drivers/perf/arm_v6_pmu.c b/drivers/perf/arm_v6_pmu.c
index b09615bb2bb2..7cb12c8e06c7 100644
--- a/drivers/perf/arm_v6_pmu.c
+++ b/drivers/perf/arm_v6_pmu.c
@@ -276,8 +276,7 @@ armv6pmu_handle_irq(struct arm_pmu *cpu_pmu)
if (!armpmu_event_set_period(event))
continue;
- if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(event);
+ perf_event_overflow(event, &data, regs);
}
/*
diff --git a/drivers/perf/arm_v7_pmu.c b/drivers/perf/arm_v7_pmu.c
index 17831e1920bd..a1e438101114 100644
--- a/drivers/perf/arm_v7_pmu.c
+++ b/drivers/perf/arm_v7_pmu.c
@@ -930,8 +930,7 @@ static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
if (!armpmu_event_set_period(event))
continue;
- if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(event);
+ perf_event_overflow(event, &data, regs);
}
/*
diff --git a/drivers/perf/arm_xscale_pmu.c b/drivers/perf/arm_xscale_pmu.c
index 638fea9b1263..c2ac41dd9e19 100644
--- a/drivers/perf/arm_xscale_pmu.c
+++ b/drivers/perf/arm_xscale_pmu.c
@@ -186,8 +186,7 @@ xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu)
if (!armpmu_event_set_period(event))
continue;
- if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(event);
+ perf_event_overflow(event, &data, regs);
}
irq_work_run();
@@ -519,8 +518,7 @@ xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu)
if (!armpmu_event_set_period(event))
continue;
- if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(event);
+ perf_event_overflow(event, &data, regs);
}
irq_work_run();
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 8d58efe998ec..58c911e1b2d2 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -43,6 +43,14 @@ config PHY_PISTACHIO_USB
help
Enable this to support the USB2.0 PHY on the IMG Pistachio SoC.
+config PHY_SNPS_EUSB2
+ tristate "SNPS eUSB2 PHY Driver"
+ depends on OF && (ARCH_EXYNOS || ARCH_QCOM || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ Enable support for the USB high-speed SNPS eUSB2 phy on select
+ SoCs. The PHY is usually paired with a Synopsys DWC3 USB controller.
+
config PHY_XGENE
tristate "APM X-Gene 15Gbps PHY support"
depends on HAS_IOMEM && OF && (ARCH_XGENE || COMPILE_TEST)
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index e281442acc75..c670a8dac468 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_PHY_CAN_TRANSCEIVER) += phy-can-transceiver.o
obj-$(CONFIG_PHY_LPC18XX_USB_OTG) += phy-lpc18xx-usb-otg.o
obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
+obj-$(CONFIG_PHY_SNPS_EUSB2) += phy-snps-eusb2.o
obj-$(CONFIG_USB_LGM_PHY) += phy-lgm-usb.o
obj-$(CONFIG_PHY_AIROHA_PCIE) += phy-airoha-pcie.o
obj-$(CONFIG_PHY_NXP_PTN3222) += phy-nxp-ptn3222.o
diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
index 08a86962d949..c4a56b9d3289 100644
--- a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
+++ b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
@@ -377,13 +377,9 @@ static int phy_meson_axg_mipi_dphy_probe(struct platform_device *pdev)
return ret;
phy = devm_phy_create(dev, NULL, &phy_meson_axg_mipi_dphy_ops);
- if (IS_ERR(phy)) {
- ret = PTR_ERR(phy);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to create PHY\n");
-
- return ret;
- }
+ if (IS_ERR(phy))
+ return dev_err_probe(dev, PTR_ERR(phy),
+ "failed to create PHY\n");
phy_set_drvdata(phy, priv);
diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
index ae898f93f97b..c0ba2852dbb8 100644
--- a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
+++ b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
@@ -200,7 +200,6 @@ static int phy_axg_mipi_pcie_analog_probe(struct platform_device *pdev)
struct phy_axg_mipi_pcie_analog_priv *priv;
struct device_node *np = dev->of_node, *parent_np;
struct regmap *map;
- int ret;
priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -219,12 +218,9 @@ static int phy_axg_mipi_pcie_analog_probe(struct platform_device *pdev)
priv->regmap = map;
priv->phy = devm_phy_create(dev, np, &phy_axg_mipi_pcie_analog_ops);
- if (IS_ERR(priv->phy)) {
- ret = PTR_ERR(priv->phy);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to create PHY\n");
- return ret;
- }
+ if (IS_ERR(priv->phy))
+ return dev_err_probe(dev, PTR_ERR(priv->phy),
+ "failed to create PHY\n");
phy_set_drvdata(priv->phy, priv);
dev_set_drvdata(dev, priv);
diff --git a/drivers/phy/amlogic/phy-meson-axg-pcie.c b/drivers/phy/amlogic/phy-meson-axg-pcie.c
index 60be5cdc600b..14dee73f9cb5 100644
--- a/drivers/phy/amlogic/phy-meson-axg-pcie.c
+++ b/drivers/phy/amlogic/phy-meson-axg-pcie.c
@@ -131,20 +131,11 @@ static int phy_axg_pcie_probe(struct platform_device *pdev)
struct phy_axg_pcie_priv *priv;
struct device_node *np = dev->of_node;
void __iomem *base;
- int ret;
priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->phy = devm_phy_create(dev, np, &phy_axg_pcie_ops);
- if (IS_ERR(priv->phy)) {
- ret = PTR_ERR(priv->phy);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to create PHY\n");
- return ret;
- }
-
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -162,6 +153,11 @@ static int phy_axg_pcie_probe(struct platform_device *pdev)
if (IS_ERR(priv->analog))
return PTR_ERR(priv->analog);
+ priv->phy = devm_phy_create(dev, np, &phy_axg_pcie_ops);
+ if (IS_ERR(priv->phy))
+ return dev_err_probe(dev, PTR_ERR(priv->phy),
+ "failed to create PHY\n");
+
phy_set_drvdata(priv->phy, priv);
dev_set_drvdata(dev, priv);
pphy = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb2.c b/drivers/phy/amlogic/phy-meson-g12a-usb2.c
index 0e0b5c00b676..66bf0b7ef8ed 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb2.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb2.c
@@ -339,13 +339,9 @@ static int phy_meson_g12a_usb2_probe(struct platform_device *pdev)
return ret;
phy = devm_phy_create(dev, NULL, &phy_meson_g12a_usb2_ops);
- if (IS_ERR(phy)) {
- ret = PTR_ERR(phy);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to create PHY\n");
-
- return ret;
- }
+ if (IS_ERR(phy))
+ return dev_err_probe(dev, PTR_ERR(phy),
+ "failed to create PHY\n");
phy_set_bus_width(phy, 8);
phy_set_drvdata(phy, priv);
diff --git a/drivers/phy/amlogic/phy-meson-gxl-usb2.c b/drivers/phy/amlogic/phy-meson-gxl-usb2.c
index 14ea89927ab1..6b390304f723 100644
--- a/drivers/phy/amlogic/phy-meson-gxl-usb2.c
+++ b/drivers/phy/amlogic/phy-meson-gxl-usb2.c
@@ -237,7 +237,6 @@ static int phy_meson_gxl_usb2_probe(struct platform_device *pdev)
struct phy_meson_gxl_usb2_priv *priv;
struct phy *phy;
void __iomem *base;
- int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -266,13 +265,9 @@ static int phy_meson_gxl_usb2_probe(struct platform_device *pdev)
return PTR_ERR(priv->reset);
phy = devm_phy_create(dev, NULL, &phy_meson_gxl_usb2_ops);
- if (IS_ERR(phy)) {
- ret = PTR_ERR(phy);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to create PHY\n");
-
- return ret;
- }
+ if (IS_ERR(phy))
+ return dev_err_probe(dev, PTR_ERR(phy),
+ "failed to create PHY\n");
phy_set_drvdata(phy, priv);
diff --git a/drivers/phy/amlogic/phy-meson8b-usb2.c b/drivers/phy/amlogic/phy-meson8b-usb2.c
index d63147c41b8c..a553231a9f7c 100644
--- a/drivers/phy/amlogic/phy-meson8b-usb2.c
+++ b/drivers/phy/amlogic/phy-meson8b-usb2.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -39,9 +40,7 @@
#define REG_CTRL_TX_BITSTUFF_ENN BIT(18)
#define REG_CTRL_COMMON_ON BIT(19)
#define REG_CTRL_REF_CLK_SEL_MASK GENMASK(21, 20)
- #define REG_CTRL_REF_CLK_SEL_SHIFT 20
#define REG_CTRL_FSEL_MASK GENMASK(24, 22)
- #define REG_CTRL_FSEL_SHIFT 22
#define REG_CTRL_PORT_RESET BIT(25)
#define REG_CTRL_THREAD_ID_MASK GENMASK(31, 26)
@@ -166,33 +165,29 @@ static int phy_meson8b_usb2_power_on(struct phy *phy)
return ret;
}
- regmap_update_bits(priv->regmap, REG_CONFIG, REG_CONFIG_CLK_32k_ALTSEL,
- REG_CONFIG_CLK_32k_ALTSEL);
+ regmap_set_bits(priv->regmap, REG_CONFIG, REG_CONFIG_CLK_32k_ALTSEL);
regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_REF_CLK_SEL_MASK,
- 0x2 << REG_CTRL_REF_CLK_SEL_SHIFT);
+ FIELD_PREP(REG_CTRL_REF_CLK_SEL_MASK, 0x2));
regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_FSEL_MASK,
- 0x5 << REG_CTRL_FSEL_SHIFT);
+ FIELD_PREP(REG_CTRL_FSEL_MASK, 0x5));
/* reset the PHY */
- regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_POWER_ON_RESET,
- REG_CTRL_POWER_ON_RESET);
+ regmap_set_bits(priv->regmap, REG_CTRL, REG_CTRL_POWER_ON_RESET);
udelay(RESET_COMPLETE_TIME);
- regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_POWER_ON_RESET, 0);
+ regmap_clear_bits(priv->regmap, REG_CTRL, REG_CTRL_POWER_ON_RESET);
udelay(RESET_COMPLETE_TIME);
- regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_SOF_TOGGLE_OUT,
- REG_CTRL_SOF_TOGGLE_OUT);
+ regmap_set_bits(priv->regmap, REG_CTRL, REG_CTRL_SOF_TOGGLE_OUT);
if (priv->dr_mode == USB_DR_MODE_HOST) {
- regmap_update_bits(priv->regmap, REG_DBG_UART,
- REG_DBG_UART_SET_IDDQ, 0);
+ regmap_clear_bits(priv->regmap, REG_DBG_UART,
+ REG_DBG_UART_SET_IDDQ);
if (priv->match->host_enable_aca) {
- regmap_update_bits(priv->regmap, REG_ADP_BC,
- REG_ADP_BC_ACA_ENABLE,
- REG_ADP_BC_ACA_ENABLE);
+ regmap_set_bits(priv->regmap, REG_ADP_BC,
+ REG_ADP_BC_ACA_ENABLE);
udelay(ACA_ENABLE_COMPLETE_TIME);
@@ -215,17 +210,15 @@ static int phy_meson8b_usb2_power_off(struct phy *phy)
struct phy_meson8b_usb2_priv *priv = phy_get_drvdata(phy);
if (priv->dr_mode == USB_DR_MODE_HOST)
- regmap_update_bits(priv->regmap, REG_DBG_UART,
- REG_DBG_UART_SET_IDDQ,
- REG_DBG_UART_SET_IDDQ);
+ regmap_set_bits(priv->regmap, REG_DBG_UART,
+ REG_DBG_UART_SET_IDDQ);
clk_disable_unprepare(priv->clk_usb);
clk_disable_unprepare(priv->clk_usb_general);
reset_control_rearm(priv->reset);
/* power off the PHY by putting it into reset mode */
- regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_POWER_ON_RESET,
- REG_CTRL_POWER_ON_RESET);
+ regmap_set_bits(priv->regmap, REG_CTRL, REG_CTRL_POWER_ON_RESET);
return 0;
}
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
index dc452610934a..8a5ed50f2da0 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
@@ -43,6 +43,8 @@
#define USB_CTRL_SETUP_tca_drv_sel_MASK BIT(24)
#define USB_CTRL_SETUP_STRAP_IPP_SEL_MASK BIT(25)
#define USB_CTRL_USB_PM 0x04
+#define USB_CTRL_USB_PM_REF_S2_CLK_SWITCH_EN_MASK BIT(1)
+#define USB_CTRL_USB_PM_UTMI_S2_CLK_SWITCH_EN_MASK BIT(2)
#define USB_CTRL_USB_PM_XHC_S2_CLK_SWITCH_EN_MASK BIT(3)
#define USB_CTRL_USB_PM_XHC_PME_EN_MASK BIT(4)
#define USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK BIT(22)
@@ -61,6 +63,13 @@
#define USB_CTRL_CTLR_CSHCR_ctl_pme_en_MASK BIT(18)
#define USB_CTRL_P0_U2PHY_CFG1 0x68
#define USB_CTRL_P0_U2PHY_CFG1_COMMONONN_MASK BIT(10)
+#define USB_CTRL_P0_U2PHY_CFG2 0x6c
+#define USB_CTRL_P0_U2PHY_CFG2_TXVREFTUNE0_MASK GENMASK(20, 17)
+#define USB_CTRL_P0_U2PHY_CFG2_TXVREFTUNE0_SHIFT 17
+#define USB_CTRL_P0_U2PHY_CFG2_TXRESTUNE0_MASK GENMASK(24, 23)
+#define USB_CTRL_P0_U2PHY_CFG2_TXRESTUNE0_SHIFT 23
+#define USB_CTRL_P0_U2PHY_CFG2_TXPREEMPAMPTUNE0_MASK GENMASK(26, 25)
+#define USB_CTRL_P0_U2PHY_CFG2_TXPREEMPAMPTUNE0_SHIFT 25
/* Register definitions for the USB_PHY block in 7211b0 */
#define USB_PHY_PLL_CTL 0x00
@@ -369,6 +378,42 @@ static void usb_uninit_common_7216(struct brcm_usb_init_params *params)
}
}
+static void usb_init_common_74110(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg;
+
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_PM));
+ reg &= ~(USB_CTRL_MASK(USB_PM, REF_S2_CLK_SWITCH_EN) |
+ USB_CTRL_MASK(USB_PM, UTMI_S2_CLK_SWITCH_EN));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_PM));
+
+ usb_init_common_7216(params);
+
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, P0_U2PHY_CFG2));
+ reg &= ~(USB_CTRL_P0_U2PHY_CFG2_TXVREFTUNE0_MASK |
+ USB_CTRL_P0_U2PHY_CFG2_TXRESTUNE0_MASK |
+ USB_CTRL_P0_U2PHY_CFG2_TXPREEMPAMPTUNE0_MASK);
+ reg |= (0x6 << USB_CTRL_P0_U2PHY_CFG2_TXVREFTUNE0_SHIFT) |
+ (0x3 << USB_CTRL_P0_U2PHY_CFG2_TXRESTUNE0_SHIFT) |
+ (0x2 << USB_CTRL_P0_U2PHY_CFG2_TXPREEMPAMPTUNE0_SHIFT);
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, P0_U2PHY_CFG2));
+}
+
+static void usb_uninit_common_74110(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
+ u32 reg;
+
+ if (params->wake_enabled) {
+ reg = brcm_usb_readl(USB_CTRL_REG(ctrl, USB_PM));
+ reg |= (USB_CTRL_MASK(USB_PM, REF_S2_CLK_SWITCH_EN) |
+ USB_CTRL_MASK(USB_PM, UTMI_S2_CLK_SWITCH_EN));
+ brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_PM));
+ }
+ usb_uninit_common_7216(params);
+}
+
static void usb_uninit_common_7211b0(struct brcm_usb_init_params *params)
{
void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
@@ -426,6 +471,16 @@ static void usb_set_dual_select(struct brcm_usb_init_params *params)
brcm_usb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
}
+static const struct brcm_usb_init_ops bcm74110_ops = {
+ .init_ipp = usb_init_ipp,
+ .init_common = usb_init_common_74110,
+ .init_xhci = usb_init_xhci,
+ .uninit_common = usb_uninit_common_74110,
+ .uninit_xhci = usb_uninit_xhci,
+ .get_dual_select = usb_get_dual_select,
+ .set_dual_select = usb_set_dual_select,
+};
+
static const struct brcm_usb_init_ops bcm7216_ops = {
.init_ipp = usb_init_ipp,
.init_common = usb_init_common_7216,
@@ -446,6 +501,12 @@ static const struct brcm_usb_init_ops bcm7211b0_ops = {
.set_dual_select = usb_set_dual_select,
};
+void brcm_usb_dvr_init_74110(struct brcm_usb_init_params *params)
+{
+ params->family_name = "74110";
+ params->ops = &bcm74110_ops;
+}
+
void brcm_usb_dvr_init_7216(struct brcm_usb_init_params *params)
{
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h
index c1a88f5cd4cd..4c7be78d0b14 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.h
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.h
@@ -72,6 +72,7 @@ struct brcm_usb_init_params {
bool wake_enabled;
};
+void brcm_usb_dvr_init_74110(struct brcm_usb_init_params *params);
void brcm_usb_dvr_init_4908(struct brcm_usb_init_params *params);
void brcm_usb_dvr_init_7445(struct brcm_usb_init_params *params);
void brcm_usb_dvr_init_7216(struct brcm_usb_init_params *params);
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index 6362ca5b7fb6..0666864c2f77 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -283,6 +283,16 @@ static const struct attribute_group brcm_usb_phy_group = {
.attrs = brcm_usb_phy_attrs,
};
+static const struct match_chip_info chip_info_74110 = {
+ .init_func = &brcm_usb_dvr_init_74110,
+ .required_regs = {
+ BRCM_REGS_CTRL,
+ BRCM_REGS_XHCI_EC,
+ BRCM_REGS_XHCI_GBL,
+ -1,
+ },
+};
+
static const struct match_chip_info chip_info_4908 = {
.init_func = &brcm_usb_dvr_init_4908,
.required_regs = {
@@ -326,6 +336,10 @@ static const struct match_chip_info chip_info_7445 = {
static const struct of_device_id brcm_usb_dt_ids[] = {
{
+ .compatible = "brcm,bcm74110-usb-phy",
+ .data = &chip_info_74110,
+ },
+ {
.compatible = "brcm,bcm4908-usb-phy",
.data = &chip_info_4908,
},
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index 7355d9921b64..68fcc8114d75 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -238,24 +238,21 @@ static int imx8_pcie_phy_probe(struct platform_device *pdev)
imx8_phy->clkreq_unused = false;
imx8_phy->clk = devm_clk_get(dev, "ref");
- if (IS_ERR(imx8_phy->clk)) {
- dev_err(dev, "failed to get imx pcie phy clock\n");
- return PTR_ERR(imx8_phy->clk);
- }
+ if (IS_ERR(imx8_phy->clk))
+ return dev_err_probe(dev, PTR_ERR(imx8_phy->clk),
+ "failed to get imx pcie phy clock\n");
/* Grab GPR config register range */
imx8_phy->iomuxc_gpr =
syscon_regmap_lookup_by_compatible(imx8_phy->drvdata->gpr);
- if (IS_ERR(imx8_phy->iomuxc_gpr)) {
- dev_err(dev, "unable to find iomuxc registers\n");
- return PTR_ERR(imx8_phy->iomuxc_gpr);
- }
+ if (IS_ERR(imx8_phy->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx8_phy->iomuxc_gpr),
+ "unable to find iomuxc registers\n");
imx8_phy->reset = devm_reset_control_get_exclusive(dev, "pciephy");
- if (IS_ERR(imx8_phy->reset)) {
- dev_err(dev, "Failed to get PCIEPHY reset control\n");
- return PTR_ERR(imx8_phy->reset);
- }
+ if (IS_ERR(imx8_phy->reset))
+ return dev_err_probe(dev, PTR_ERR(imx8_phy->reset),
+ "Failed to get PCIEPHY reset control\n");
if (imx8_phy->drvdata->variant == IMX8MP) {
imx8_phy->perst =
diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
index a974ef94de9a..b94f242420fc 100644
--- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
+++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
@@ -293,6 +293,28 @@ static u32 phy_tx_vref_tune_from_property(u32 percent)
return DIV_ROUND_CLOSEST(percent - 94U, 2);
}
+static u32 imx95_phy_tx_vref_tune_from_property(u32 percent)
+{
+ percent = clamp(percent, 90U, 108U);
+
+ switch (percent) {
+ case 90 ... 91:
+ percent = 0;
+ break;
+ case 92 ... 96:
+ percent -= 91;
+ break;
+ case 97 ... 104:
+ percent -= 92;
+ break;
+ case 105 ... 108:
+ percent -= 93;
+ break;
+ }
+
+ return percent;
+}
+
static u32 phy_tx_rise_tune_from_property(u32 percent)
{
switch (percent) {
@@ -307,6 +329,22 @@ static u32 phy_tx_rise_tune_from_property(u32 percent)
}
}
+static u32 imx95_phy_tx_rise_tune_from_property(u32 percent)
+{
+ percent = clamp(percent, 90U, 120U);
+
+ switch (percent) {
+ case 90 ... 99:
+ return 3;
+ case 101 ... 115:
+ return 1;
+ case 116 ... 120:
+ return 0;
+ default:
+ return 2;
+ }
+}
+
static u32 phy_tx_preemp_amp_tune_from_property(u32 microamp)
{
microamp = min(microamp, 1800U);
@@ -317,12 +355,12 @@ static u32 phy_tx_preemp_amp_tune_from_property(u32 microamp)
static u32 phy_tx_vboost_level_from_property(u32 microvolt)
{
switch (microvolt) {
- case 0 ... 960:
- return 0;
- case 961 ... 1160:
- return 2;
- default:
+ case 1156:
+ return 5;
+ case 844:
return 3;
+ default:
+ return 4;
}
}
@@ -352,6 +390,29 @@ static u32 phy_comp_dis_tune_from_property(u32 percent)
return 7;
}
}
+
+static u32 imx95_phy_comp_dis_tune_from_property(u32 percent)
+{
+ percent = clamp(percent, 94, 104);
+
+ switch (percent) {
+ case 94 ... 95:
+ percent = 0;
+ break;
+ case 96 ... 98:
+ percent -= 95;
+ break;
+ case 99 ... 102:
+ percent -= 96;
+ break;
+ case 103 ... 104:
+ percent -= 97;
+ break;
+ }
+
+ return percent;
+}
+
static u32 phy_pcs_tx_swing_full_from_property(u32 percent)
{
percent = min(percent, 100U);
@@ -362,10 +423,17 @@ static u32 phy_pcs_tx_swing_full_from_property(u32 percent)
static void imx8m_get_phy_tuning_data(struct imx8mq_usb_phy *imx_phy)
{
struct device *dev = imx_phy->phy->dev.parent;
+ bool is_imx95 = false;
+
+ if (device_is_compatible(dev, "fsl,imx95-usb-phy"))
+ is_imx95 = true;
if (device_property_read_u32(dev, "fsl,phy-tx-vref-tune-percent",
&imx_phy->tx_vref_tune))
imx_phy->tx_vref_tune = PHY_TUNE_DEFAULT;
+ else if (is_imx95)
+ imx_phy->tx_vref_tune =
+ imx95_phy_tx_vref_tune_from_property(imx_phy->tx_vref_tune);
else
imx_phy->tx_vref_tune =
phy_tx_vref_tune_from_property(imx_phy->tx_vref_tune);
@@ -373,6 +441,9 @@ static void imx8m_get_phy_tuning_data(struct imx8mq_usb_phy *imx_phy)
if (device_property_read_u32(dev, "fsl,phy-tx-rise-tune-percent",
&imx_phy->tx_rise_tune))
imx_phy->tx_rise_tune = PHY_TUNE_DEFAULT;
+ else if (is_imx95)
+ imx_phy->tx_rise_tune =
+ imx95_phy_tx_rise_tune_from_property(imx_phy->tx_rise_tune);
else
imx_phy->tx_rise_tune =
phy_tx_rise_tune_from_property(imx_phy->tx_rise_tune);
@@ -394,6 +465,9 @@ static void imx8m_get_phy_tuning_data(struct imx8mq_usb_phy *imx_phy)
if (device_property_read_u32(dev, "fsl,phy-comp-dis-tune-percent",
&imx_phy->comp_dis_tune))
imx_phy->comp_dis_tune = PHY_TUNE_DEFAULT;
+ else if (is_imx95)
+ imx_phy->comp_dis_tune =
+ imx95_phy_comp_dis_tune_from_property(imx_phy->comp_dis_tune);
else
imx_phy->comp_dis_tune =
phy_comp_dis_tune_from_property(imx_phy->comp_dis_tune);
diff --git a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
index 10fbe8dee116..191c282246d9 100644
--- a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
+++ b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
@@ -456,6 +456,8 @@ static int fsl_samsung_hdmi_phy_configure(struct fsl_samsung_hdmi_phy *phy,
int i, ret;
u8 val;
+ phy->cur_cfg = cfg;
+
/* HDMI PHY init */
writeb(REG33_FIX_DA, phy->regs + PHY_REG(33));
@@ -508,7 +510,14 @@ static const struct phy_config *fsl_samsung_hdmi_phy_lookup_rate(unsigned long r
if (phy_pll_cfg[i].pixclk <= rate)
break;
- return &phy_pll_cfg[i];
+ /* If there is an exact match, or the array has been searched, return the value*/
+ if (phy_pll_cfg[i].pixclk == rate || i + 1 > ARRAY_SIZE(phy_pll_cfg) - 1)
+ return &phy_pll_cfg[i];
+
+ /* See if the next entry is closer to nominal than this one */
+ return (abs((long) rate - (long) phy_pll_cfg[i].pixclk) <
+ abs((long) rate - (long) phy_pll_cfg[i+1].pixclk) ?
+ &phy_pll_cfg[i] : &phy_pll_cfg[i+1]);
}
static void fsl_samsung_hdmi_calculate_phy(struct phy_config *cal_phy, unsigned long rate,
@@ -521,18 +530,9 @@ static void fsl_samsung_hdmi_calculate_phy(struct phy_config *cal_phy, unsigned
/* pll_div_regs 3-6 are fixed and pre-defined already */
}
-static u32 fsl_samsung_hdmi_phy_get_closest_rate(unsigned long rate,
- u32 int_div_clk, u32 frac_div_clk)
-{
- /* Calculate the absolute value of the differences and return whichever is closest */
- if (abs((long)rate - (long)int_div_clk) < abs((long)(rate - (long)frac_div_clk)))
- return int_div_clk;
-
- return frac_div_clk;
-}
-
-static long phy_clk_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static
+const struct phy_config *fsl_samsung_hdmi_phy_find_settings(struct fsl_samsung_hdmi_phy *phy,
+ unsigned long rate)
{
const struct phy_config *fract_div_phy;
u32 int_div_clk;
@@ -541,83 +541,66 @@ static long phy_clk_round_rate(struct clk_hw *hw,
/* If the clock is out of range return error instead of searching */
if (rate > 297000000 || rate < 22250000)
- return -EINVAL;
+ return NULL;
/* Search the fractional divider lookup table */
fract_div_phy = fsl_samsung_hdmi_phy_lookup_rate(rate);
+ if (fract_div_phy->pixclk == rate) {
+ dev_dbg(phy->dev, "fractional divider match = %u\n", fract_div_phy->pixclk);
+ return fract_div_phy;
+ }
- /* If the rate is an exact match, return that value */
- if (rate == fract_div_phy->pixclk)
- return fract_div_phy->pixclk;
-
- /* If the exact match isn't found, calculate the integer divider */
+ /* Calculate the integer divider */
int_div_clk = fsl_samsung_hdmi_phy_find_pms(rate, &p, &m, &s);
+ fsl_samsung_hdmi_calculate_phy(&calculated_phy_pll_cfg, int_div_clk, p, m, s);
+ if (int_div_clk == rate) {
+ dev_dbg(phy->dev, "integer divider match = %u\n", calculated_phy_pll_cfg.pixclk);
+ return &calculated_phy_pll_cfg;
+ }
- /* If the int_div_clk rate is an exact match, return that value */
- if (int_div_clk == rate)
- return int_div_clk;
+ /* Calculate the absolute value of the differences and return whichever is closest */
+ if (abs((long)rate - (long)int_div_clk) <
+ abs((long)rate - (long)fract_div_phy->pixclk)) {
+ dev_dbg(phy->dev, "integer divider = %u\n", calculated_phy_pll_cfg.pixclk);
+ return &calculated_phy_pll_cfg;
+ }
- /* If neither rate is an exact match, use the value from the LUT */
- return fract_div_phy->pixclk;
-}
+ dev_dbg(phy->dev, "fractional divider = %u\n", phy->cur_cfg->pixclk);
-static int phy_use_fract_div(struct fsl_samsung_hdmi_phy *phy, const struct phy_config *fract_div_phy)
-{
- phy->cur_cfg = fract_div_phy;
- dev_dbg(phy->dev, "fsl_samsung_hdmi_phy: using fractional divider rate = %u\n",
- phy->cur_cfg->pixclk);
- return fsl_samsung_hdmi_phy_configure(phy, phy->cur_cfg);
+ return fract_div_phy;
}
-static int phy_use_integer_div(struct fsl_samsung_hdmi_phy *phy,
- const struct phy_config *int_div_clk)
+static long fsl_samsung_hdmi_phy_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate)
{
- phy->cur_cfg = &calculated_phy_pll_cfg;
- dev_dbg(phy->dev, "fsl_samsung_hdmi_phy: integer divider rate = %u\n",
- phy->cur_cfg->pixclk);
- return fsl_samsung_hdmi_phy_configure(phy, phy->cur_cfg);
+ struct fsl_samsung_hdmi_phy *phy = to_fsl_samsung_hdmi_phy(hw);
+ const struct phy_config *target_settings = fsl_samsung_hdmi_phy_find_settings(phy, rate);
+
+ if (target_settings == NULL)
+ return -EINVAL;
+
+ dev_dbg(phy->dev, "round_rate, closest rate = %u\n", target_settings->pixclk);
+ return target_settings->pixclk;
}
-static int phy_clk_set_rate(struct clk_hw *hw,
+static int fsl_samsung_hdmi_phy_clk_set_rate(struct clk_hw *hw,
unsigned long rate, unsigned long parent_rate)
{
struct fsl_samsung_hdmi_phy *phy = to_fsl_samsung_hdmi_phy(hw);
- const struct phy_config *fract_div_phy;
- u32 int_div_clk;
- u16 m;
- u8 p, s;
+ const struct phy_config *target_settings = fsl_samsung_hdmi_phy_find_settings(phy, rate);
- /* Search the fractional divider lookup table */
- fract_div_phy = fsl_samsung_hdmi_phy_lookup_rate(rate);
-
- /* If the rate is an exact match, use that value */
- if (fract_div_phy->pixclk == rate)
- return phy_use_fract_div(phy, fract_div_phy);
+ if (target_settings == NULL)
+ return -EINVAL;
- /*
- * If the rate from the fractional divider is not exact, check the integer divider,
- * and use it if that value is an exact match.
- */
- int_div_clk = fsl_samsung_hdmi_phy_find_pms(rate, &p, &m, &s);
- fsl_samsung_hdmi_calculate_phy(&calculated_phy_pll_cfg, int_div_clk, p, m, s);
- if (int_div_clk == rate)
- return phy_use_integer_div(phy, &calculated_phy_pll_cfg);
+ dev_dbg(phy->dev, "set_rate, closest rate = %u\n", target_settings->pixclk);
- /*
- * Compare the difference between the integer clock and the fractional clock against
- * the desired clock and which whichever is closest.
- */
- if (fsl_samsung_hdmi_phy_get_closest_rate(rate, int_div_clk,
- fract_div_phy->pixclk) == fract_div_phy->pixclk)
- return phy_use_fract_div(phy, fract_div_phy);
- else
- return phy_use_integer_div(phy, &calculated_phy_pll_cfg);
+ return fsl_samsung_hdmi_phy_configure(phy, target_settings);
}
static const struct clk_ops phy_clk_ops = {
.recalc_rate = phy_clk_recalc_rate,
- .round_rate = phy_clk_round_rate,
- .set_rate = phy_clk_set_rate,
+ .round_rate = fsl_samsung_hdmi_phy_clk_round_rate,
+ .set_rate = fsl_samsung_hdmi_phy_clk_set_rate,
};
static int phy_clk_register(struct fsl_samsung_hdmi_phy *phy)
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index bdb87c976243..bccd72dccb77 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -29,7 +29,7 @@ config PHY_MVEBU_A3700_COMPHY
depends on ARCH_MVEBU || COMPILE_TEST
depends on OF
depends on HAVE_ARM_SMCCC
- default y
+ default ARCH_MVEBU
select GENERIC_PHY
help
This driver allows to control the comphy, a hardware block providing
@@ -40,7 +40,7 @@ config PHY_MVEBU_A3700_UTMI
tristate "Marvell A3700 UTMI driver"
depends on ARCH_MVEBU || COMPILE_TEST
depends on OF
- default y
+ default ARCH_MVEBU
select GENERIC_PHY
help
Enable this to support Marvell A3700 UTMI PHY driver.
diff --git a/drivers/phy/mediatek/phy-mtk-xsphy.c b/drivers/phy/mediatek/phy-mtk-xsphy.c
index 7c248f5cfca5..c0ddb9273cc3 100644
--- a/drivers/phy/mediatek/phy-mtk-xsphy.c
+++ b/drivers/phy/mediatek/phy-mtk-xsphy.c
@@ -11,10 +11,12 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include "phy-mtk-io.h"
@@ -81,12 +83,22 @@
#define XSP_SR_COEF_DIVISOR 1000
#define XSP_FM_DET_CYCLE_CNT 1024
+/* PHY switch between pcie/usb3/sgmii */
+#define USB_PHY_SWITCH_CTRL 0x0
+#define RG_PHY_SW_TYPE GENMASK(3, 0)
+#define RG_PHY_SW_PCIE 0x0
+#define RG_PHY_SW_USB3 0x1
+#define RG_PHY_SW_SGMII 0x2
+
struct xsphy_instance {
struct phy *phy;
void __iomem *port_base;
struct clk *ref_clk; /* reference clock of anolog phy */
u32 index;
u32 type;
+ struct regmap *type_sw;
+ u32 type_sw_reg;
+ u32 type_sw_index;
/* only for HQA test */
int efuse_intr;
int efuse_tx_imp;
@@ -259,6 +271,10 @@ static void phy_parse_property(struct mtk_xsphy *xsphy,
inst->efuse_intr, inst->efuse_tx_imp,
inst->efuse_rx_imp);
break;
+ case PHY_TYPE_PCIE:
+ case PHY_TYPE_SGMII:
+ /* nothing to do */
+ break;
default:
dev_err(xsphy->dev, "incompatible phy type\n");
return;
@@ -305,6 +321,62 @@ static void u3_phy_props_set(struct mtk_xsphy *xsphy,
RG_XTP_LN0_RX_IMPSEL, inst->efuse_rx_imp);
}
+/* type switch for usb3/pcie/sgmii */
+static int phy_type_syscon_get(struct xsphy_instance *instance,
+ struct device_node *dn)
+{
+ struct of_phandle_args args;
+ int ret;
+
+ /* type switch function is optional */
+ if (!of_property_present(dn, "mediatek,syscon-type"))
+ return 0;
+
+ ret = of_parse_phandle_with_fixed_args(dn, "mediatek,syscon-type",
+ 2, 0, &args);
+ if (ret)
+ return ret;
+
+ instance->type_sw_reg = args.args[0];
+ instance->type_sw_index = args.args[1] & 0x3; /* <=3 */
+ instance->type_sw = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
+ dev_info(&instance->phy->dev, "type_sw - reg %#x, index %d\n",
+ instance->type_sw_reg, instance->type_sw_index);
+
+ return PTR_ERR_OR_ZERO(instance->type_sw);
+}
+
+static int phy_type_set(struct xsphy_instance *instance)
+{
+ int type;
+ u32 offset;
+
+ if (!instance->type_sw)
+ return 0;
+
+ switch (instance->type) {
+ case PHY_TYPE_USB3:
+ type = RG_PHY_SW_USB3;
+ break;
+ case PHY_TYPE_PCIE:
+ type = RG_PHY_SW_PCIE;
+ break;
+ case PHY_TYPE_SGMII:
+ type = RG_PHY_SW_SGMII;
+ break;
+ case PHY_TYPE_USB2:
+ default:
+ return 0;
+ }
+
+ offset = instance->type_sw_index * BITS_PER_BYTE;
+ regmap_update_bits(instance->type_sw, instance->type_sw_reg,
+ RG_PHY_SW_TYPE << offset, type << offset);
+
+ return 0;
+}
+
static int mtk_phy_init(struct phy *phy)
{
struct xsphy_instance *inst = phy_get_drvdata(phy);
@@ -325,6 +397,10 @@ static int mtk_phy_init(struct phy *phy)
case PHY_TYPE_USB3:
u3_phy_props_set(xsphy, inst);
break;
+ case PHY_TYPE_PCIE:
+ case PHY_TYPE_SGMII:
+ /* nothing to do, only used to set type */
+ break;
default:
dev_err(xsphy->dev, "incompatible phy type\n");
clk_disable_unprepare(inst->ref_clk);
@@ -403,12 +479,15 @@ static struct phy *mtk_phy_xlate(struct device *dev,
inst->type = args->args[0];
if (!(inst->type == PHY_TYPE_USB2 ||
- inst->type == PHY_TYPE_USB3)) {
+ inst->type == PHY_TYPE_USB3 ||
+ inst->type == PHY_TYPE_PCIE ||
+ inst->type == PHY_TYPE_SGMII)) {
dev_err(dev, "unsupported phy type: %d\n", inst->type);
return ERR_PTR(-EINVAL);
}
phy_parse_property(xsphy, inst);
+ phy_type_set(inst);
return inst->phy;
}
@@ -510,6 +589,10 @@ static int mtk_xsphy_probe(struct platform_device *pdev)
dev_err(dev, "failed to get ref_clk(id-%d)\n", port);
return PTR_ERR(inst->ref_clk);
}
+
+ retval = phy_type_syscon_get(inst, child_np);
+ if (retval)
+ return retval;
}
provider = devm_of_phy_provider_register(dev, mtk_phy_xlate);
diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c
index 2bec70615449..f59caff4b3d4 100644
--- a/drivers/phy/phy-can-transceiver.c
+++ b/drivers/phy/phy-can-transceiver.c
@@ -93,6 +93,16 @@ static const struct of_device_id can_transceiver_phy_ids[] = {
};
MODULE_DEVICE_TABLE(of, can_transceiver_phy_ids);
+/* Temporary wrapper until the multiplexer subsystem supports optional muxes */
+static inline struct mux_state *
+devm_mux_state_get_optional(struct device *dev, const char *mux_name)
+{
+ if (!of_property_present(dev->of_node, "mux-states"))
+ return NULL;
+
+ return devm_mux_state_get(dev, mux_name);
+}
+
static int can_transceiver_phy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
@@ -114,13 +124,11 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
match = of_match_node(can_transceiver_phy_ids, pdev->dev.of_node);
drvdata = match->data;
- mux_state = devm_mux_state_get(dev, NULL);
- if (IS_ERR(mux_state)) {
- if (PTR_ERR(mux_state) == -EPROBE_DEFER)
- return PTR_ERR(mux_state);
- } else {
- can_transceiver_phy->mux_state = mux_state;
- }
+ mux_state = devm_mux_state_get_optional(dev, NULL);
+ if (IS_ERR(mux_state))
+ return PTR_ERR(mux_state);
+
+ can_transceiver_phy->mux_state = mux_state;
phy = devm_phy_create(dev, dev->of_node,
&can_transceiver_phy_ops);
diff --git a/drivers/phy/phy-snps-eusb2.c b/drivers/phy/phy-snps-eusb2.c
new file mode 100644
index 000000000000..b73a1d7e57b3
--- /dev/null
+++ b/drivers/phy/phy-snps-eusb2.c
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#define EXYNOS_USB_PHY_HS_PHY_CTRL_RST (0x0)
+#define USB_PHY_RST_MASK GENMASK(1, 0)
+#define UTMI_PORT_RST_MASK GENMASK(5, 4)
+
+#define EXYNOS_USB_PHY_HS_PHY_CTRL_COMMON (0x4)
+#define RPTR_MODE BIT(10)
+#define FSEL_20_MHZ_VAL (0x1)
+#define FSEL_24_MHZ_VAL (0x2)
+#define FSEL_26_MHZ_VAL (0x3)
+#define FSEL_48_MHZ_VAL (0x2)
+
+#define EXYNOS_USB_PHY_CFG_PLLCFG0 (0x8)
+#define PHY_CFG_PLL_FB_DIV_19_8_MASK GENMASK(19, 8)
+#define DIV_19_8_19_2_MHZ_VAL (0x170)
+#define DIV_19_8_20_MHZ_VAL (0x160)
+#define DIV_19_8_24_MHZ_VAL (0x120)
+#define DIV_19_8_26_MHZ_VAL (0x107)
+#define DIV_19_8_48_MHZ_VAL (0x120)
+
+#define EXYNOS_USB_PHY_CFG_PLLCFG1 (0xc)
+#define EXYNOS_PHY_CFG_PLL_FB_DIV_11_8_MASK GENMASK(11, 8)
+#define EXYNOS_DIV_11_8_19_2_MHZ_VAL (0x0)
+#define EXYNOS_DIV_11_8_20_MHZ_VAL (0x0)
+#define EXYNOS_DIV_11_8_24_MHZ_VAL (0x0)
+#define EXYNOS_DIV_11_8_26_MHZ_VAL (0x0)
+#define EXYNOS_DIV_11_8_48_MHZ_VAL (0x1)
+
+#define EXYNOS_PHY_CFG_TX (0x14)
+#define EXYNOS_PHY_CFG_TX_FSLS_VREF_TUNE_MASK GENMASK(2, 1)
+
+#define EXYNOS_USB_PHY_UTMI_TESTSE (0x20)
+#define TEST_IDDQ BIT(6)
+
+#define QCOM_USB_PHY_UTMI_CTRL0 (0x3c)
+#define SLEEPM BIT(0)
+#define OPMODE_MASK GENMASK(4, 3)
+#define OPMODE_NONDRIVING BIT(3)
+
+#define QCOM_USB_PHY_UTMI_CTRL5 (0x50)
+#define POR BIT(1)
+
+#define QCOM_USB_PHY_HS_PHY_CTRL_COMMON0 (0x54)
+#define PHY_ENABLE BIT(0)
+#define SIDDQ_SEL BIT(1)
+#define SIDDQ BIT(2)
+#define RETENABLEN BIT(3)
+#define FSEL_MASK GENMASK(6, 4)
+#define FSEL_19_2_MHZ_VAL (0x0)
+#define FSEL_38_4_MHZ_VAL (0x4)
+
+#define QCOM_USB_PHY_CFG_CTRL_1 (0x58)
+#define PHY_CFG_PLL_CPBIAS_CNTRL_MASK GENMASK(7, 1)
+
+#define QCOM_USB_PHY_CFG_CTRL_2 (0x5c)
+#define PHY_CFG_PLL_FB_DIV_7_0_MASK GENMASK(7, 0)
+#define DIV_7_0_19_2_MHZ_VAL (0x90)
+#define DIV_7_0_38_4_MHZ_VAL (0xc8)
+
+#define QCOM_USB_PHY_CFG_CTRL_3 (0x60)
+#define PHY_CFG_PLL_FB_DIV_11_8_MASK GENMASK(3, 0)
+#define DIV_11_8_19_2_MHZ_VAL (0x1)
+#define DIV_11_8_38_4_MHZ_VAL (0x0)
+
+#define PHY_CFG_PLL_REF_DIV GENMASK(7, 4)
+#define PLL_REF_DIV_VAL (0x0)
+
+#define QCOM_USB_PHY_HS_PHY_CTRL2 (0x64)
+#define VBUSVLDEXT0 BIT(0)
+#define USB2_SUSPEND_N BIT(2)
+#define USB2_SUSPEND_N_SEL BIT(3)
+#define VBUS_DET_EXT_SEL BIT(4)
+
+#define QCOM_USB_PHY_CFG_CTRL_4 (0x68)
+#define PHY_CFG_PLL_GMP_CNTRL_MASK GENMASK(1, 0)
+#define PHY_CFG_PLL_INT_CNTRL_MASK GENMASK(7, 2)
+
+#define QCOM_USB_PHY_CFG_CTRL_5 (0x6c)
+#define PHY_CFG_PLL_PROP_CNTRL_MASK GENMASK(4, 0)
+#define PHY_CFG_PLL_VREF_TUNE_MASK GENMASK(7, 6)
+
+#define QCOM_USB_PHY_CFG_CTRL_6 (0x70)
+#define PHY_CFG_PLL_VCO_CNTRL_MASK GENMASK(2, 0)
+
+#define QCOM_USB_PHY_CFG_CTRL_7 (0x74)
+
+#define QCOM_USB_PHY_CFG_CTRL_8 (0x78)
+#define PHY_CFG_TX_FSLS_VREF_TUNE_MASK GENMASK(1, 0)
+#define PHY_CFG_TX_FSLS_VREG_BYPASS BIT(2)
+#define PHY_CFG_TX_HS_VREF_TUNE_MASK GENMASK(5, 3)
+#define PHY_CFG_TX_HS_XV_TUNE_MASK GENMASK(7, 6)
+
+#define QCOM_USB_PHY_CFG_CTRL_9 (0x7c)
+#define PHY_CFG_TX_PREEMP_TUNE_MASK GENMASK(2, 0)
+#define PHY_CFG_TX_RES_TUNE_MASK GENMASK(4, 3)
+#define PHY_CFG_TX_RISE_TUNE_MASK GENMASK(6, 5)
+#define PHY_CFG_RCAL_BYPASS BIT(7)
+
+#define QCOM_USB_PHY_CFG_CTRL_10 (0x80)
+
+#define QCOM_USB_PHY_CFG0 (0x94)
+#define DATAPATH_CTRL_OVERRIDE_EN BIT(0)
+#define CMN_CTRL_OVERRIDE_EN BIT(1)
+
+#define QCOM_UTMI_PHY_CMN_CTRL0 (0x98)
+#define TESTBURNIN BIT(6)
+
+#define QCOM_USB_PHY_FSEL_SEL (0xb8)
+#define FSEL_SEL BIT(0)
+
+#define QCOM_USB_PHY_APB_ACCESS_CMD (0x130)
+#define RW_ACCESS BIT(0)
+#define APB_START_CMD BIT(1)
+#define APB_LOGIC_RESET BIT(2)
+
+#define QCOM_USB_PHY_APB_ACCESS_STATUS (0x134)
+#define ACCESS_DONE BIT(0)
+#define TIMED_OUT BIT(1)
+#define ACCESS_ERROR BIT(2)
+#define ACCESS_IN_PROGRESS BIT(3)
+
+#define QCOM_USB_PHY_APB_ADDRESS (0x138)
+#define APB_REG_ADDR_MASK GENMASK(7, 0)
+
+#define QCOM_USB_PHY_APB_WRDATA_LSB (0x13c)
+#define APB_REG_WRDATA_7_0_MASK GENMASK(3, 0)
+
+#define QCOM_USB_PHY_APB_WRDATA_MSB (0x140)
+#define APB_REG_WRDATA_15_8_MASK GENMASK(7, 4)
+
+#define QCOM_USB_PHY_APB_RDDATA_LSB (0x144)
+#define APB_REG_RDDATA_7_0_MASK GENMASK(3, 0)
+
+#define QCOM_USB_PHY_APB_RDDATA_MSB (0x148)
+#define APB_REG_RDDATA_15_8_MASK GENMASK(7, 4)
+
+static const char * const eusb2_hsphy_vreg_names[] = {
+ "vdd", "vdda12",
+};
+
+#define EUSB2_NUM_VREGS ARRAY_SIZE(eusb2_hsphy_vreg_names)
+
+struct snps_eusb2_phy_drvdata {
+ int (*phy_init)(struct phy *p);
+ const char * const *clk_names;
+ int num_clks;
+};
+
+struct snps_eusb2_hsphy {
+ struct phy *phy;
+ void __iomem *base;
+
+ struct clk *ref_clk;
+ struct clk_bulk_data *clks;
+ struct reset_control *phy_reset;
+
+ struct regulator_bulk_data vregs[EUSB2_NUM_VREGS];
+
+ enum phy_mode mode;
+
+ struct phy *repeater;
+
+ const struct snps_eusb2_phy_drvdata *data;
+};
+
+static int snps_eusb2_hsphy_set_mode(struct phy *p, enum phy_mode mode, int submode)
+{
+ struct snps_eusb2_hsphy *phy = phy_get_drvdata(p);
+
+ phy->mode = mode;
+
+ return phy_set_mode_ext(phy->repeater, mode, submode);
+}
+
+static void snps_eusb2_hsphy_write_mask(void __iomem *base, u32 offset,
+ u32 mask, u32 val)
+{
+ u32 reg;
+
+ reg = readl_relaxed(base + offset);
+ reg &= ~mask;
+ reg |= val & mask;
+ writel_relaxed(reg, base + offset);
+
+ /* Ensure above write is completed */
+ readl_relaxed(base + offset);
+}
+
+static void qcom_eusb2_default_parameters(struct snps_eusb2_hsphy *phy)
+{
+ /* default parameters: tx pre-emphasis */
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_9,
+ PHY_CFG_TX_PREEMP_TUNE_MASK,
+ FIELD_PREP(PHY_CFG_TX_PREEMP_TUNE_MASK, 0));
+
+ /* tx rise/fall time */
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_9,
+ PHY_CFG_TX_RISE_TUNE_MASK,
+ FIELD_PREP(PHY_CFG_TX_RISE_TUNE_MASK, 0x2));
+
+ /* source impedance adjustment */
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_9,
+ PHY_CFG_TX_RES_TUNE_MASK,
+ FIELD_PREP(PHY_CFG_TX_RES_TUNE_MASK, 0x1));
+
+ /* dc voltage level adjustement */
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_8,
+ PHY_CFG_TX_HS_VREF_TUNE_MASK,
+ FIELD_PREP(PHY_CFG_TX_HS_VREF_TUNE_MASK, 0x3));
+
+ /* transmitter HS crossover adjustement */
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_8,
+ PHY_CFG_TX_HS_XV_TUNE_MASK,
+ FIELD_PREP(PHY_CFG_TX_HS_XV_TUNE_MASK, 0x0));
+}
+
+struct snps_eusb2_ref_clk {
+ unsigned long freq;
+ u32 fsel_val;
+ u32 div_7_0_val;
+ u32 div_11_8_val;
+};
+
+static const struct snps_eusb2_ref_clk exynos_eusb2_ref_clk[] = {
+ { 19200000, FSEL_19_2_MHZ_VAL, DIV_19_8_19_2_MHZ_VAL, EXYNOS_DIV_11_8_19_2_MHZ_VAL },
+ { 20000000, FSEL_20_MHZ_VAL, DIV_19_8_20_MHZ_VAL, EXYNOS_DIV_11_8_20_MHZ_VAL },
+ { 24000000, FSEL_24_MHZ_VAL, DIV_19_8_24_MHZ_VAL, EXYNOS_DIV_11_8_24_MHZ_VAL },
+ { 26000000, FSEL_26_MHZ_VAL, DIV_19_8_26_MHZ_VAL, EXYNOS_DIV_11_8_26_MHZ_VAL },
+ { 48000000, FSEL_48_MHZ_VAL, DIV_19_8_48_MHZ_VAL, EXYNOS_DIV_11_8_48_MHZ_VAL },
+};
+
+static int exynos_eusb2_ref_clk_init(struct snps_eusb2_hsphy *phy)
+{
+ const struct snps_eusb2_ref_clk *config = NULL;
+ unsigned long ref_clk_freq = clk_get_rate(phy->ref_clk);
+
+ for (int i = 0; i < ARRAY_SIZE(exynos_eusb2_ref_clk); i++) {
+ if (exynos_eusb2_ref_clk[i].freq == ref_clk_freq) {
+ config = &exynos_eusb2_ref_clk[i];
+ break;
+ }
+ }
+
+ if (!config) {
+ dev_err(&phy->phy->dev, "unsupported ref_clk_freq:%lu\n", ref_clk_freq);
+ return -EINVAL;
+ }
+
+ snps_eusb2_hsphy_write_mask(phy->base, EXYNOS_USB_PHY_HS_PHY_CTRL_COMMON,
+ FSEL_MASK,
+ FIELD_PREP(FSEL_MASK, config->fsel_val));
+
+ snps_eusb2_hsphy_write_mask(phy->base, EXYNOS_USB_PHY_CFG_PLLCFG0,
+ PHY_CFG_PLL_FB_DIV_19_8_MASK,
+ FIELD_PREP(PHY_CFG_PLL_FB_DIV_19_8_MASK,
+ config->div_7_0_val));
+
+ snps_eusb2_hsphy_write_mask(phy->base, EXYNOS_USB_PHY_CFG_PLLCFG1,
+ EXYNOS_PHY_CFG_PLL_FB_DIV_11_8_MASK,
+ config->div_11_8_val);
+ return 0;
+}
+
+static const struct snps_eusb2_ref_clk qcom_eusb2_ref_clk[] = {
+ { 19200000, FSEL_19_2_MHZ_VAL, DIV_7_0_19_2_MHZ_VAL, DIV_11_8_19_2_MHZ_VAL },
+ { 38400000, FSEL_38_4_MHZ_VAL, DIV_7_0_38_4_MHZ_VAL, DIV_11_8_38_4_MHZ_VAL },
+};
+
+static int qcom_eusb2_ref_clk_init(struct snps_eusb2_hsphy *phy)
+{
+ const struct snps_eusb2_ref_clk *config = NULL;
+ unsigned long ref_clk_freq = clk_get_rate(phy->ref_clk);
+
+ for (int i = 0; i < ARRAY_SIZE(qcom_eusb2_ref_clk); i++) {
+ if (qcom_eusb2_ref_clk[i].freq == ref_clk_freq) {
+ config = &qcom_eusb2_ref_clk[i];
+ break;
+ }
+ }
+
+ if (!config) {
+ dev_err(&phy->phy->dev, "unsupported ref_clk_freq:%lu\n", ref_clk_freq);
+ return -EINVAL;
+ }
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_HS_PHY_CTRL_COMMON0,
+ FSEL_MASK,
+ FIELD_PREP(FSEL_MASK, config->fsel_val));
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_2,
+ PHY_CFG_PLL_FB_DIV_7_0_MASK,
+ config->div_7_0_val);
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_3,
+ PHY_CFG_PLL_FB_DIV_11_8_MASK,
+ config->div_11_8_val);
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_3,
+ PHY_CFG_PLL_REF_DIV, PLL_REF_DIV_VAL);
+
+ return 0;
+}
+
+static int exynos_snps_eusb2_hsphy_init(struct phy *p)
+{
+ struct snps_eusb2_hsphy *phy = phy_get_drvdata(p);
+ int ret;
+
+ snps_eusb2_hsphy_write_mask(phy->base, EXYNOS_USB_PHY_HS_PHY_CTRL_RST,
+ USB_PHY_RST_MASK | UTMI_PORT_RST_MASK,
+ USB_PHY_RST_MASK | UTMI_PORT_RST_MASK);
+ fsleep(50); /* required after holding phy in reset */
+
+ snps_eusb2_hsphy_write_mask(phy->base, EXYNOS_USB_PHY_HS_PHY_CTRL_COMMON,
+ RPTR_MODE, RPTR_MODE);
+
+ /* update ref_clk related registers */
+ ret = exynos_eusb2_ref_clk_init(phy);
+ if (ret)
+ return ret;
+
+ /* default parameter: tx fsls-vref */
+ snps_eusb2_hsphy_write_mask(phy->base, EXYNOS_PHY_CFG_TX,
+ EXYNOS_PHY_CFG_TX_FSLS_VREF_TUNE_MASK,
+ FIELD_PREP(EXYNOS_PHY_CFG_TX_FSLS_VREF_TUNE_MASK, 0x0));
+
+ snps_eusb2_hsphy_write_mask(phy->base, EXYNOS_USB_PHY_UTMI_TESTSE,
+ TEST_IDDQ, 0);
+ fsleep(10); /* required after releasing test_iddq */
+
+ snps_eusb2_hsphy_write_mask(phy->base, EXYNOS_USB_PHY_HS_PHY_CTRL_RST,
+ USB_PHY_RST_MASK, 0);
+
+ snps_eusb2_hsphy_write_mask(phy->base, EXYNOS_USB_PHY_HS_PHY_CTRL_COMMON,
+ PHY_ENABLE, PHY_ENABLE);
+
+ snps_eusb2_hsphy_write_mask(phy->base, EXYNOS_USB_PHY_HS_PHY_CTRL_RST,
+ UTMI_PORT_RST_MASK, 0);
+
+ return 0;
+}
+
+static const char * const exynos_eusb2_hsphy_clock_names[] = {
+ "ref", "bus", "ctrl",
+};
+
+static const struct snps_eusb2_phy_drvdata exynos2200_snps_eusb2_phy = {
+ .phy_init = exynos_snps_eusb2_hsphy_init,
+ .clk_names = exynos_eusb2_hsphy_clock_names,
+ .num_clks = ARRAY_SIZE(exynos_eusb2_hsphy_clock_names),
+};
+
+static int qcom_snps_eusb2_hsphy_init(struct phy *p)
+{
+ struct snps_eusb2_hsphy *phy = phy_get_drvdata(p);
+ int ret;
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG0,
+ CMN_CTRL_OVERRIDE_EN, CMN_CTRL_OVERRIDE_EN);
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_UTMI_CTRL5, POR, POR);
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_HS_PHY_CTRL_COMMON0,
+ PHY_ENABLE | RETENABLEN, PHY_ENABLE | RETENABLEN);
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_APB_ACCESS_CMD,
+ APB_LOGIC_RESET, APB_LOGIC_RESET);
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_UTMI_PHY_CMN_CTRL0, TESTBURNIN, 0);
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_FSEL_SEL,
+ FSEL_SEL, FSEL_SEL);
+
+ /* update ref_clk related registers */
+ ret = qcom_eusb2_ref_clk_init(phy);
+ if (ret)
+ return ret;
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_1,
+ PHY_CFG_PLL_CPBIAS_CNTRL_MASK,
+ FIELD_PREP(PHY_CFG_PLL_CPBIAS_CNTRL_MASK, 0x1));
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_4,
+ PHY_CFG_PLL_INT_CNTRL_MASK,
+ FIELD_PREP(PHY_CFG_PLL_INT_CNTRL_MASK, 0x8));
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_4,
+ PHY_CFG_PLL_GMP_CNTRL_MASK,
+ FIELD_PREP(PHY_CFG_PLL_GMP_CNTRL_MASK, 0x1));
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_5,
+ PHY_CFG_PLL_PROP_CNTRL_MASK,
+ FIELD_PREP(PHY_CFG_PLL_PROP_CNTRL_MASK, 0x10));
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_6,
+ PHY_CFG_PLL_VCO_CNTRL_MASK,
+ FIELD_PREP(PHY_CFG_PLL_VCO_CNTRL_MASK, 0x0));
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_5,
+ PHY_CFG_PLL_VREF_TUNE_MASK,
+ FIELD_PREP(PHY_CFG_PLL_VREF_TUNE_MASK, 0x1));
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_HS_PHY_CTRL2,
+ VBUS_DET_EXT_SEL, VBUS_DET_EXT_SEL);
+
+ /* set default parameters */
+ qcom_eusb2_default_parameters(phy);
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_HS_PHY_CTRL2,
+ USB2_SUSPEND_N_SEL | USB2_SUSPEND_N,
+ USB2_SUSPEND_N_SEL | USB2_SUSPEND_N);
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_UTMI_CTRL0, SLEEPM, SLEEPM);
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_HS_PHY_CTRL_COMMON0,
+ SIDDQ_SEL, SIDDQ_SEL);
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_HS_PHY_CTRL_COMMON0,
+ SIDDQ, 0);
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_UTMI_CTRL5, POR, 0);
+
+ snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_HS_PHY_CTRL2,
+ USB2_SUSPEND_N_SEL, 0);
+
+ return 0;
+}
+
+static const char * const qcom_eusb2_hsphy_clock_names[] = {
+ "ref",
+};
+
+static const struct snps_eusb2_phy_drvdata sm8550_snps_eusb2_phy = {
+ .phy_init = qcom_snps_eusb2_hsphy_init,
+ .clk_names = qcom_eusb2_hsphy_clock_names,
+ .num_clks = ARRAY_SIZE(qcom_eusb2_hsphy_clock_names),
+};
+
+static int snps_eusb2_hsphy_init(struct phy *p)
+{
+ struct snps_eusb2_hsphy *phy = phy_get_drvdata(p);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(phy->vregs), phy->vregs);
+ if (ret)
+ return ret;
+
+ ret = phy_init(phy->repeater);
+ if (ret) {
+ dev_err(&p->dev, "repeater init failed. %d\n", ret);
+ goto disable_vreg;
+ }
+
+ ret = clk_bulk_prepare_enable(phy->data->num_clks, phy->clks);
+ if (ret) {
+ dev_err(&p->dev, "failed to enable ref clock, %d\n", ret);
+ goto disable_vreg;
+ }
+
+ ret = reset_control_assert(phy->phy_reset);
+ if (ret) {
+ dev_err(&p->dev, "failed to assert phy_reset, %d\n", ret);
+ goto disable_ref_clk;
+ }
+
+ usleep_range(100, 150);
+
+ ret = reset_control_deassert(phy->phy_reset);
+ if (ret) {
+ dev_err(&p->dev, "failed to de-assert phy_reset, %d\n", ret);
+ goto disable_ref_clk;
+ }
+
+ ret = phy->data->phy_init(p);
+ if (ret)
+ goto disable_ref_clk;
+
+ return 0;
+
+disable_ref_clk:
+ clk_bulk_disable_unprepare(phy->data->num_clks, phy->clks);
+
+disable_vreg:
+ regulator_bulk_disable(ARRAY_SIZE(phy->vregs), phy->vregs);
+
+ return ret;
+}
+
+static int snps_eusb2_hsphy_exit(struct phy *p)
+{
+ struct snps_eusb2_hsphy *phy = phy_get_drvdata(p);
+
+ clk_disable_unprepare(phy->ref_clk);
+
+ regulator_bulk_disable(ARRAY_SIZE(phy->vregs), phy->vregs);
+
+ phy_exit(phy->repeater);
+
+ return 0;
+}
+
+static const struct phy_ops snps_eusb2_hsphy_ops = {
+ .init = snps_eusb2_hsphy_init,
+ .exit = snps_eusb2_hsphy_exit,
+ .set_mode = snps_eusb2_hsphy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static int snps_eusb2_hsphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct snps_eusb2_hsphy *phy;
+ struct phy_provider *phy_provider;
+ struct phy *generic_phy;
+ int ret, i;
+ int num;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->data = device_get_match_data(dev);
+ if (!phy->data)
+ return -EINVAL;
+
+ phy->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(phy->base))
+ return PTR_ERR(phy->base);
+
+ phy->phy_reset = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(phy->phy_reset))
+ return PTR_ERR(phy->phy_reset);
+
+ phy->clks = devm_kcalloc(dev, phy->data->num_clks, sizeof(*phy->clks),
+ GFP_KERNEL);
+ if (!phy->clks)
+ return -ENOMEM;
+
+ for (int i = 0; i < phy->data->num_clks; ++i)
+ phy->clks[i].id = phy->data->clk_names[i];
+
+ ret = devm_clk_bulk_get(dev, phy->data->num_clks, phy->clks);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get phy clock(s)\n");
+
+ phy->ref_clk = NULL;
+ for (int i = 0; i < phy->data->num_clks; ++i) {
+ if (!strcmp(phy->clks[i].id, "ref")) {
+ phy->ref_clk = phy->clks[i].clk;
+ break;
+ }
+ }
+
+ if (IS_ERR_OR_NULL(phy->ref_clk))
+ return dev_err_probe(dev, PTR_ERR(phy->ref_clk),
+ "failed to get ref clk\n");
+
+ num = ARRAY_SIZE(phy->vregs);
+ for (i = 0; i < num; i++)
+ phy->vregs[i].supply = eusb2_hsphy_vreg_names[i];
+
+ ret = devm_regulator_bulk_get(dev, num, phy->vregs);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
+
+ phy->repeater = devm_of_phy_optional_get(dev, np, 0);
+ if (IS_ERR(phy->repeater))
+ return dev_err_probe(dev, PTR_ERR(phy->repeater),
+ "failed to get repeater\n");
+
+ generic_phy = devm_phy_create(dev, NULL, &snps_eusb2_hsphy_ops);
+ if (IS_ERR(generic_phy)) {
+ dev_err(dev, "failed to create phy %d\n", ret);
+ return PTR_ERR(generic_phy);
+ }
+
+ dev_set_drvdata(dev, phy);
+ phy_set_drvdata(generic_phy, phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ dev_info(dev, "Registered Snps-eUSB2 phy\n");
+
+ return 0;
+}
+
+static const struct of_device_id snps_eusb2_hsphy_of_match_table[] = {
+ {
+ .compatible = "qcom,sm8550-snps-eusb2-phy",
+ .data = &sm8550_snps_eusb2_phy,
+ }, {
+ .compatible = "samsung,exynos2200-eusb2-phy",
+ .data = &exynos2200_snps_eusb2_phy,
+ }, { },
+};
+MODULE_DEVICE_TABLE(of, snps_eusb2_hsphy_of_match_table);
+
+static struct platform_driver snps_eusb2_hsphy_driver = {
+ .probe = snps_eusb2_hsphy_probe,
+ .driver = {
+ .name = "snps-eusb2-hsphy",
+ .of_match_table = snps_eusb2_hsphy_of_match_table,
+ },
+};
+
+module_platform_driver(snps_eusb2_hsphy_driver);
+MODULE_DESCRIPTION("Synopsys eUSB2 HS PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index c1e0a11ddd76..ef14f4e33973 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -125,15 +125,6 @@ config PHY_QCOM_QUSB2
PHY which is usually paired with either the ChipIdea or Synopsys DWC3
USB IPs on MSM SOCs.
-config PHY_QCOM_SNPS_EUSB2
- tristate "Qualcomm SNPS eUSB2 PHY Driver"
- depends on OF && (ARCH_QCOM || COMPILE_TEST)
- select GENERIC_PHY
- help
- Enable support for the USB high-speed SNPS eUSB2 phy on Qualcomm
- chipsets. The PHY is paired with a Synopsys DWC3 USB controller
- on Qualcomm SOCs.
-
config PHY_QCOM_EUSB2_REPEATER
tristate "Qualcomm SNPS eUSB2 Repeater Driver"
depends on OF && (ARCH_QCOM || COMPILE_TEST)
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index 42038bc30974..3851e28a212d 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_PHY_QCOM_QMP_USB) += phy-qcom-qmp-usb.o
obj-$(CONFIG_PHY_QCOM_QMP_USB_LEGACY) += phy-qcom-qmp-usb-legacy.o
obj-$(CONFIG_PHY_QCOM_QUSB2) += phy-qcom-qusb2.o
-obj-$(CONFIG_PHY_QCOM_SNPS_EUSB2) += phy-qcom-snps-eusb2.o
obj-$(CONFIG_PHY_QCOM_EUSB2_REPEATER) += phy-qcom-eusb2-repeater.o
obj-$(CONFIG_PHY_QCOM_UNIPHY_PCIE_28LP) += phy-qcom-uniphy-pcie-28lp.o
obj-$(CONFIG_PHY_QCOM_USB_HS) += phy-qcom-usb-hs.o
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index c232b8fe9846..461b9e0af610 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -3021,8 +3021,6 @@ struct qmp_phy_cfg {
bool skip_start_delay;
- bool has_nocsr_reset;
-
/* QMP PHY pipe clock interface rate */
unsigned long pipe_clock_rate;
@@ -3035,6 +3033,7 @@ struct qmp_pcie {
const struct qmp_phy_cfg *cfg;
bool tcsr_4ln_config;
+ bool skip_init;
void __iomem *serdes;
void __iomem *pcs;
@@ -4020,7 +4019,6 @@ static const struct qmp_phy_cfg sm8550_qmp_gen4x2_pciephy_cfg = {
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS_4_20,
- .has_nocsr_reset = true,
/* 20MHz PHY AUX Clock */
.aux_clock_rate = 20000000,
@@ -4053,7 +4051,6 @@ static const struct qmp_phy_cfg sm8650_qmp_gen4x2_pciephy_cfg = {
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS_4_20,
- .has_nocsr_reset = true,
/* 20MHz PHY AUX Clock */
.aux_clock_rate = 20000000,
@@ -4173,7 +4170,6 @@ static const struct qmp_phy_cfg x1e80100_qmp_gen4x2_pciephy_cfg = {
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS_4_20,
- .has_nocsr_reset = true,
};
static const struct qmp_phy_cfg x1e80100_qmp_gen4x4_pciephy_cfg = {
@@ -4207,7 +4203,6 @@ static const struct qmp_phy_cfg x1e80100_qmp_gen4x4_pciephy_cfg = {
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS_4_20,
- .has_nocsr_reset = true,
};
static const struct qmp_phy_cfg x1e80100_qmp_gen4x8_pciephy_cfg = {
@@ -4233,13 +4228,12 @@ static const struct qmp_phy_cfg x1e80100_qmp_gen4x8_pciephy_cfg = {
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
- .vreg_list = sm8550_qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(sm8550_qmp_phy_vreg_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = pciephy_v6_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS_4_20,
- .has_nocsr_reset = true,
};
static const struct qmp_phy_cfg qmp_v6_gen4x4_pciephy_cfg = {
@@ -4337,18 +4331,38 @@ static int qmp_pcie_init(struct phy *phy)
{
struct qmp_pcie *qmp = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qmp->pcs;
+ bool phy_initialized = !!(readl(pcs + cfg->regs[QPHY_START_CTRL]));
int ret;
+ qmp->skip_init = qmp->nocsr_reset && phy_initialized;
+ /*
+ * We need to check the existence of init sequences in two cases:
+ * 1. The PHY doesn't support no_csr reset.
+ * 2. The PHY supports no_csr reset but isn't initialized by bootloader.
+ * As we can't skip init in these two cases.
+ */
+ if (!qmp->skip_init && !cfg->tbls.serdes_num) {
+ dev_err(qmp->dev, "Init sequence not available\n");
+ return -ENODATA;
+ }
+
ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
if (ret) {
dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
return ret;
}
- ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
- if (ret) {
- dev_err(qmp->dev, "reset assert failed\n");
- goto err_disable_regulators;
+ /*
+ * Toggle BCR reset for PHY that doesn't support no_csr reset or has not
+ * been initialized.
+ */
+ if (!qmp->skip_init) {
+ ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset assert failed\n");
+ goto err_disable_regulators;
+ }
}
ret = reset_control_assert(qmp->nocsr_reset);
@@ -4359,10 +4373,12 @@ static int qmp_pcie_init(struct phy *phy)
usleep_range(200, 300);
- ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets);
- if (ret) {
- dev_err(qmp->dev, "reset deassert failed\n");
- goto err_assert_reset;
+ if (!qmp->skip_init) {
+ ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset deassert failed\n");
+ goto err_assert_reset;
+ }
}
ret = clk_bulk_prepare_enable(ARRAY_SIZE(qmp_pciephy_clk_l), qmp->clks);
@@ -4372,7 +4388,8 @@ static int qmp_pcie_init(struct phy *phy)
return 0;
err_assert_reset:
- reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+ if (!qmp->skip_init)
+ reset_control_bulk_assert(cfg->num_resets, qmp->resets);
err_disable_regulators:
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
@@ -4384,7 +4401,10 @@ static int qmp_pcie_exit(struct phy *phy)
struct qmp_pcie *qmp = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qmp->cfg;
- reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+ if (qmp->nocsr_reset)
+ reset_control_assert(qmp->nocsr_reset);
+ else
+ reset_control_bulk_assert(cfg->num_resets, qmp->resets);
clk_bulk_disable_unprepare(ARRAY_SIZE(qmp_pciephy_clk_l), qmp->clks);
@@ -4403,6 +4423,13 @@ static int qmp_pcie_power_on(struct phy *phy)
unsigned int mask, val;
int ret;
+ /*
+ * Write CSR register for PHY that doesn't support no_csr reset or has not
+ * been initialized.
+ */
+ if (qmp->skip_init)
+ goto skip_tbls_init;
+
qphy_setbits(pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
cfg->pwrdn_ctrl);
@@ -4414,6 +4441,7 @@ static int qmp_pcie_power_on(struct phy *phy)
qmp_pcie_init_registers(qmp, &cfg->tbls);
qmp_pcie_init_registers(qmp, mode_tbls);
+skip_tbls_init:
ret = clk_bulk_prepare_enable(qmp->num_pipe_clks, qmp->pipe_clks);
if (ret)
return ret;
@@ -4424,6 +4452,9 @@ static int qmp_pcie_power_on(struct phy *phy)
goto err_disable_pipe_clk;
}
+ if (qmp->skip_init)
+ goto skip_serdes_start;
+
/* Pull PHY out of reset state */
qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
@@ -4433,6 +4464,7 @@ static int qmp_pcie_power_on(struct phy *phy)
if (!cfg->skip_start_delay)
usleep_range(1000, 1200);
+skip_serdes_start:
status = pcs + cfg->regs[QPHY_PCS_STATUS];
mask = cfg->phy_status;
ret = readl_poll_timeout(status, val, !(val & mask), 200,
@@ -4457,6 +4489,15 @@ static int qmp_pcie_power_off(struct phy *phy)
clk_bulk_disable_unprepare(qmp->num_pipe_clks, qmp->pipe_clks);
+ /*
+ * While powering off the PHY, only qmp->nocsr_reset needs to be checked. In
+ * this way, no matter whether the PHY settings were initially programmed by
+ * bootloader or PHY driver itself, we can reuse them when PHY is powered on
+ * next time.
+ */
+ if (qmp->nocsr_reset)
+ goto skip_phy_deinit;
+
/* PHY reset */
qphy_setbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
@@ -4468,6 +4509,7 @@ static int qmp_pcie_power_off(struct phy *phy)
qphy_clrbits(qmp->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
cfg->pwrdn_ctrl);
+skip_phy_deinit:
return 0;
}
@@ -4557,12 +4599,10 @@ static int qmp_pcie_reset_init(struct qmp_pcie *qmp)
if (ret)
return dev_err_probe(dev, ret, "failed to get resets\n");
- if (cfg->has_nocsr_reset) {
- qmp->nocsr_reset = devm_reset_control_get_exclusive(dev, "phy_nocsr");
- if (IS_ERR(qmp->nocsr_reset))
- return dev_err_probe(dev, PTR_ERR(qmp->nocsr_reset),
- "failed to get no-csr reset\n");
- }
+ qmp->nocsr_reset = devm_reset_control_get_optional_exclusive(dev, "phy_nocsr");
+ if (IS_ERR(qmp->nocsr_reset))
+ return dev_err_probe(dev, PTR_ERR(qmp->nocsr_reset),
+ "failed to get no-csr reset\n");
return 0;
}
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index 45b3b792696e..b33e2e2b5014 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -1754,7 +1754,8 @@ static void qmp_ufs_init_registers(struct qmp_ufs *qmp, const struct qmp_phy_cfg
qmp_ufs_init_all(qmp, &cfg->tbls_hs_overlay[i]);
}
- qmp_ufs_init_all(qmp, &cfg->tbls_hs_b);
+ if (qmp->mode == PHY_MODE_UFS_HS_B)
+ qmp_ufs_init_all(qmp, &cfg->tbls_hs_b);
}
static int qmp_ufs_com_init(struct qmp_ufs *qmp)
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 787721570457..ed646a7e705b 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -2106,12 +2106,16 @@ static void __iomem *qmp_usb_iomap(struct device *dev, struct device_node *np,
int index, bool exclusive)
{
struct resource res;
+ void __iomem *mem;
if (!exclusive) {
if (of_address_to_resource(np, index, &res))
return IOMEM_ERR_PTR(-EINVAL);
- return devm_ioremap(dev, res.start, resource_size(&res));
+ mem = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!mem)
+ return IOMEM_ERR_PTR(-ENOMEM);
+ return mem;
}
return devm_of_iomap(dev, np, index, NULL);
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index 1f5f7df14d5a..49c37c53b38e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -151,21 +151,6 @@ static const struct qusb2_phy_init_tbl ipq6018_init_tbl[] = {
QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_AUTOPGM_CTL1, 0x9F),
};
-static const struct qusb2_phy_init_tbl ipq5424_init_tbl[] = {
- QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL, 0x14),
- QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE1, 0x00),
- QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE2, 0x53),
- QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE4, 0xc3),
- QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_TUNE, 0x30),
- QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_USER_CTL1, 0x79),
- QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_USER_CTL2, 0x21),
- QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE5, 0x00),
- QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_PWR_CTRL, 0x00),
- QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TEST2, 0x14),
- QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_TEST, 0x80),
- QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_AUTOPGM_CTL1, 0x9f),
-};
-
static const struct qusb2_phy_init_tbl qcs615_init_tbl[] = {
QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE1, 0xc8),
QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE2, 0xb3),
@@ -359,16 +344,6 @@ static const struct qusb2_phy_cfg ipq6018_phy_cfg = {
.autoresume_en = BIT(0),
};
-static const struct qusb2_phy_cfg ipq5424_phy_cfg = {
- .tbl = ipq5424_init_tbl,
- .tbl_num = ARRAY_SIZE(ipq5424_init_tbl),
- .regs = ipq6018_regs_layout,
-
- .disable_ctrl = POWER_DOWN,
- .mask_core_ready = PLL_LOCKED,
- .autoresume_en = BIT(0),
-};
-
static const struct qusb2_phy_cfg qcs615_phy_cfg = {
.tbl = qcs615_init_tbl,
.tbl_num = ARRAY_SIZE(qcs615_init_tbl),
@@ -955,7 +930,7 @@ static const struct phy_ops qusb2_phy_gen_ops = {
static const struct of_device_id qusb2_phy_of_match_table[] = {
{
.compatible = "qcom,ipq5424-qusb2-phy",
- .data = &ipq5424_phy_cfg,
+ .data = &ipq6018_phy_cfg,
}, {
.compatible = "qcom,ipq6018-qusb2-phy",
.data = &ipq6018_phy_cfg,
diff --git a/drivers/phy/qualcomm/phy-qcom-snps-eusb2.c b/drivers/phy/qualcomm/phy-qcom-snps-eusb2.c
deleted file mode 100644
index 1484691a41d5..000000000000
--- a/drivers/phy/qualcomm/phy-qcom-snps-eusb2.c
+++ /dev/null
@@ -1,442 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2023, Linaro Limited
- */
-
-#include <linux/bitfield.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/iopoll.h>
-#include <linux/mod_devicetable.h>
-#include <linux/phy/phy.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-#include <linux/reset.h>
-
-#define USB_PHY_UTMI_CTRL0 (0x3c)
-#define SLEEPM BIT(0)
-#define OPMODE_MASK GENMASK(4, 3)
-#define OPMODE_NONDRIVING BIT(3)
-
-#define USB_PHY_UTMI_CTRL5 (0x50)
-#define POR BIT(1)
-
-#define USB_PHY_HS_PHY_CTRL_COMMON0 (0x54)
-#define PHY_ENABLE BIT(0)
-#define SIDDQ_SEL BIT(1)
-#define SIDDQ BIT(2)
-#define RETENABLEN BIT(3)
-#define FSEL_MASK GENMASK(6, 4)
-#define FSEL_19_2_MHZ_VAL (0x0)
-#define FSEL_38_4_MHZ_VAL (0x4)
-
-#define USB_PHY_CFG_CTRL_1 (0x58)
-#define PHY_CFG_PLL_CPBIAS_CNTRL_MASK GENMASK(7, 1)
-
-#define USB_PHY_CFG_CTRL_2 (0x5c)
-#define PHY_CFG_PLL_FB_DIV_7_0_MASK GENMASK(7, 0)
-#define DIV_7_0_19_2_MHZ_VAL (0x90)
-#define DIV_7_0_38_4_MHZ_VAL (0xc8)
-
-#define USB_PHY_CFG_CTRL_3 (0x60)
-#define PHY_CFG_PLL_FB_DIV_11_8_MASK GENMASK(3, 0)
-#define DIV_11_8_19_2_MHZ_VAL (0x1)
-#define DIV_11_8_38_4_MHZ_VAL (0x0)
-
-#define PHY_CFG_PLL_REF_DIV GENMASK(7, 4)
-#define PLL_REF_DIV_VAL (0x0)
-
-#define USB_PHY_HS_PHY_CTRL2 (0x64)
-#define VBUSVLDEXT0 BIT(0)
-#define USB2_SUSPEND_N BIT(2)
-#define USB2_SUSPEND_N_SEL BIT(3)
-#define VBUS_DET_EXT_SEL BIT(4)
-
-#define USB_PHY_CFG_CTRL_4 (0x68)
-#define PHY_CFG_PLL_GMP_CNTRL_MASK GENMASK(1, 0)
-#define PHY_CFG_PLL_INT_CNTRL_MASK GENMASK(7, 2)
-
-#define USB_PHY_CFG_CTRL_5 (0x6c)
-#define PHY_CFG_PLL_PROP_CNTRL_MASK GENMASK(4, 0)
-#define PHY_CFG_PLL_VREF_TUNE_MASK GENMASK(7, 6)
-
-#define USB_PHY_CFG_CTRL_6 (0x70)
-#define PHY_CFG_PLL_VCO_CNTRL_MASK GENMASK(2, 0)
-
-#define USB_PHY_CFG_CTRL_7 (0x74)
-
-#define USB_PHY_CFG_CTRL_8 (0x78)
-#define PHY_CFG_TX_FSLS_VREF_TUNE_MASK GENMASK(1, 0)
-#define PHY_CFG_TX_FSLS_VREG_BYPASS BIT(2)
-#define PHY_CFG_TX_HS_VREF_TUNE_MASK GENMASK(5, 3)
-#define PHY_CFG_TX_HS_XV_TUNE_MASK GENMASK(7, 6)
-
-#define USB_PHY_CFG_CTRL_9 (0x7c)
-#define PHY_CFG_TX_PREEMP_TUNE_MASK GENMASK(2, 0)
-#define PHY_CFG_TX_RES_TUNE_MASK GENMASK(4, 3)
-#define PHY_CFG_TX_RISE_TUNE_MASK GENMASK(6, 5)
-#define PHY_CFG_RCAL_BYPASS BIT(7)
-
-#define USB_PHY_CFG_CTRL_10 (0x80)
-
-#define USB_PHY_CFG0 (0x94)
-#define DATAPATH_CTRL_OVERRIDE_EN BIT(0)
-#define CMN_CTRL_OVERRIDE_EN BIT(1)
-
-#define UTMI_PHY_CMN_CTRL0 (0x98)
-#define TESTBURNIN BIT(6)
-
-#define USB_PHY_FSEL_SEL (0xb8)
-#define FSEL_SEL BIT(0)
-
-#define USB_PHY_APB_ACCESS_CMD (0x130)
-#define RW_ACCESS BIT(0)
-#define APB_START_CMD BIT(1)
-#define APB_LOGIC_RESET BIT(2)
-
-#define USB_PHY_APB_ACCESS_STATUS (0x134)
-#define ACCESS_DONE BIT(0)
-#define TIMED_OUT BIT(1)
-#define ACCESS_ERROR BIT(2)
-#define ACCESS_IN_PROGRESS BIT(3)
-
-#define USB_PHY_APB_ADDRESS (0x138)
-#define APB_REG_ADDR_MASK GENMASK(7, 0)
-
-#define USB_PHY_APB_WRDATA_LSB (0x13c)
-#define APB_REG_WRDATA_7_0_MASK GENMASK(3, 0)
-
-#define USB_PHY_APB_WRDATA_MSB (0x140)
-#define APB_REG_WRDATA_15_8_MASK GENMASK(7, 4)
-
-#define USB_PHY_APB_RDDATA_LSB (0x144)
-#define APB_REG_RDDATA_7_0_MASK GENMASK(3, 0)
-
-#define USB_PHY_APB_RDDATA_MSB (0x148)
-#define APB_REG_RDDATA_15_8_MASK GENMASK(7, 4)
-
-static const char * const eusb2_hsphy_vreg_names[] = {
- "vdd", "vdda12",
-};
-
-#define EUSB2_NUM_VREGS ARRAY_SIZE(eusb2_hsphy_vreg_names)
-
-struct qcom_snps_eusb2_hsphy {
- struct phy *phy;
- void __iomem *base;
-
- struct clk *ref_clk;
- struct reset_control *phy_reset;
-
- struct regulator_bulk_data vregs[EUSB2_NUM_VREGS];
-
- enum phy_mode mode;
-
- struct phy *repeater;
-};
-
-static int qcom_snps_eusb2_hsphy_set_mode(struct phy *p, enum phy_mode mode, int submode)
-{
- struct qcom_snps_eusb2_hsphy *phy = phy_get_drvdata(p);
-
- phy->mode = mode;
-
- return phy_set_mode_ext(phy->repeater, mode, submode);
-}
-
-static void qcom_snps_eusb2_hsphy_write_mask(void __iomem *base, u32 offset,
- u32 mask, u32 val)
-{
- u32 reg;
-
- reg = readl_relaxed(base + offset);
- reg &= ~mask;
- reg |= val & mask;
- writel_relaxed(reg, base + offset);
-
- /* Ensure above write is completed */
- readl_relaxed(base + offset);
-}
-
-static void qcom_eusb2_default_parameters(struct qcom_snps_eusb2_hsphy *phy)
-{
- /* default parameters: tx pre-emphasis */
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_9,
- PHY_CFG_TX_PREEMP_TUNE_MASK,
- FIELD_PREP(PHY_CFG_TX_PREEMP_TUNE_MASK, 0));
-
- /* tx rise/fall time */
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_9,
- PHY_CFG_TX_RISE_TUNE_MASK,
- FIELD_PREP(PHY_CFG_TX_RISE_TUNE_MASK, 0x2));
-
- /* source impedance adjustment */
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_9,
- PHY_CFG_TX_RES_TUNE_MASK,
- FIELD_PREP(PHY_CFG_TX_RES_TUNE_MASK, 0x1));
-
- /* dc voltage level adjustement */
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_8,
- PHY_CFG_TX_HS_VREF_TUNE_MASK,
- FIELD_PREP(PHY_CFG_TX_HS_VREF_TUNE_MASK, 0x3));
-
- /* transmitter HS crossover adjustement */
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_8,
- PHY_CFG_TX_HS_XV_TUNE_MASK,
- FIELD_PREP(PHY_CFG_TX_HS_XV_TUNE_MASK, 0x0));
-}
-
-static int qcom_eusb2_ref_clk_init(struct qcom_snps_eusb2_hsphy *phy)
-{
- unsigned long ref_clk_freq = clk_get_rate(phy->ref_clk);
-
- switch (ref_clk_freq) {
- case 19200000:
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_HS_PHY_CTRL_COMMON0,
- FSEL_MASK,
- FIELD_PREP(FSEL_MASK, FSEL_19_2_MHZ_VAL));
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_2,
- PHY_CFG_PLL_FB_DIV_7_0_MASK,
- DIV_7_0_19_2_MHZ_VAL);
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_3,
- PHY_CFG_PLL_FB_DIV_11_8_MASK,
- DIV_11_8_19_2_MHZ_VAL);
- break;
-
- case 38400000:
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_HS_PHY_CTRL_COMMON0,
- FSEL_MASK,
- FIELD_PREP(FSEL_MASK, FSEL_38_4_MHZ_VAL));
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_2,
- PHY_CFG_PLL_FB_DIV_7_0_MASK,
- DIV_7_0_38_4_MHZ_VAL);
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_3,
- PHY_CFG_PLL_FB_DIV_11_8_MASK,
- DIV_11_8_38_4_MHZ_VAL);
- break;
-
- default:
- dev_err(&phy->phy->dev, "unsupported ref_clk_freq:%lu\n", ref_clk_freq);
- return -EINVAL;
- }
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_3,
- PHY_CFG_PLL_REF_DIV, PLL_REF_DIV_VAL);
-
- return 0;
-}
-
-static int qcom_snps_eusb2_hsphy_init(struct phy *p)
-{
- struct qcom_snps_eusb2_hsphy *phy = phy_get_drvdata(p);
- int ret;
-
- ret = regulator_bulk_enable(ARRAY_SIZE(phy->vregs), phy->vregs);
- if (ret)
- return ret;
-
- ret = phy_init(phy->repeater);
- if (ret) {
- dev_err(&p->dev, "repeater init failed. %d\n", ret);
- goto disable_vreg;
- }
-
- ret = clk_prepare_enable(phy->ref_clk);
- if (ret) {
- dev_err(&p->dev, "failed to enable ref clock, %d\n", ret);
- goto disable_vreg;
- }
-
- ret = reset_control_assert(phy->phy_reset);
- if (ret) {
- dev_err(&p->dev, "failed to assert phy_reset, %d\n", ret);
- goto disable_ref_clk;
- }
-
- usleep_range(100, 150);
-
- ret = reset_control_deassert(phy->phy_reset);
- if (ret) {
- dev_err(&p->dev, "failed to de-assert phy_reset, %d\n", ret);
- goto disable_ref_clk;
- }
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG0,
- CMN_CTRL_OVERRIDE_EN, CMN_CTRL_OVERRIDE_EN);
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_UTMI_CTRL5, POR, POR);
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_HS_PHY_CTRL_COMMON0,
- PHY_ENABLE | RETENABLEN, PHY_ENABLE | RETENABLEN);
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_APB_ACCESS_CMD,
- APB_LOGIC_RESET, APB_LOGIC_RESET);
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, UTMI_PHY_CMN_CTRL0, TESTBURNIN, 0);
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_FSEL_SEL,
- FSEL_SEL, FSEL_SEL);
-
- /* update ref_clk related registers */
- ret = qcom_eusb2_ref_clk_init(phy);
- if (ret)
- goto disable_ref_clk;
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_1,
- PHY_CFG_PLL_CPBIAS_CNTRL_MASK,
- FIELD_PREP(PHY_CFG_PLL_CPBIAS_CNTRL_MASK, 0x1));
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_4,
- PHY_CFG_PLL_INT_CNTRL_MASK,
- FIELD_PREP(PHY_CFG_PLL_INT_CNTRL_MASK, 0x8));
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_4,
- PHY_CFG_PLL_GMP_CNTRL_MASK,
- FIELD_PREP(PHY_CFG_PLL_GMP_CNTRL_MASK, 0x1));
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_5,
- PHY_CFG_PLL_PROP_CNTRL_MASK,
- FIELD_PREP(PHY_CFG_PLL_PROP_CNTRL_MASK, 0x10));
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_6,
- PHY_CFG_PLL_VCO_CNTRL_MASK,
- FIELD_PREP(PHY_CFG_PLL_VCO_CNTRL_MASK, 0x0));
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_5,
- PHY_CFG_PLL_VREF_TUNE_MASK,
- FIELD_PREP(PHY_CFG_PLL_VREF_TUNE_MASK, 0x1));
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_HS_PHY_CTRL2,
- VBUS_DET_EXT_SEL, VBUS_DET_EXT_SEL);
-
- /* set default parameters */
- qcom_eusb2_default_parameters(phy);
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_HS_PHY_CTRL2,
- USB2_SUSPEND_N_SEL | USB2_SUSPEND_N,
- USB2_SUSPEND_N_SEL | USB2_SUSPEND_N);
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_UTMI_CTRL0, SLEEPM, SLEEPM);
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_HS_PHY_CTRL_COMMON0,
- SIDDQ_SEL, SIDDQ_SEL);
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_HS_PHY_CTRL_COMMON0,
- SIDDQ, 0);
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_UTMI_CTRL5, POR, 0);
-
- qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_HS_PHY_CTRL2,
- USB2_SUSPEND_N_SEL, 0);
-
- return 0;
-
-disable_ref_clk:
- clk_disable_unprepare(phy->ref_clk);
-
-disable_vreg:
- regulator_bulk_disable(ARRAY_SIZE(phy->vregs), phy->vregs);
-
- return ret;
-}
-
-static int qcom_snps_eusb2_hsphy_exit(struct phy *p)
-{
- struct qcom_snps_eusb2_hsphy *phy = phy_get_drvdata(p);
-
- clk_disable_unprepare(phy->ref_clk);
-
- regulator_bulk_disable(ARRAY_SIZE(phy->vregs), phy->vregs);
-
- phy_exit(phy->repeater);
-
- return 0;
-}
-
-static const struct phy_ops qcom_snps_eusb2_hsphy_ops = {
- .init = qcom_snps_eusb2_hsphy_init,
- .exit = qcom_snps_eusb2_hsphy_exit,
- .set_mode = qcom_snps_eusb2_hsphy_set_mode,
- .owner = THIS_MODULE,
-};
-
-static int qcom_snps_eusb2_hsphy_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct qcom_snps_eusb2_hsphy *phy;
- struct phy_provider *phy_provider;
- struct phy *generic_phy;
- int ret, i;
- int num;
-
- phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
- if (!phy)
- return -ENOMEM;
-
- phy->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(phy->base))
- return PTR_ERR(phy->base);
-
- phy->phy_reset = devm_reset_control_get_exclusive(dev, NULL);
- if (IS_ERR(phy->phy_reset))
- return PTR_ERR(phy->phy_reset);
-
- phy->ref_clk = devm_clk_get(dev, "ref");
- if (IS_ERR(phy->ref_clk))
- return dev_err_probe(dev, PTR_ERR(phy->ref_clk),
- "failed to get ref clk\n");
-
- num = ARRAY_SIZE(phy->vregs);
- for (i = 0; i < num; i++)
- phy->vregs[i].supply = eusb2_hsphy_vreg_names[i];
-
- ret = devm_regulator_bulk_get(dev, num, phy->vregs);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to get regulator supplies\n");
-
- phy->repeater = devm_of_phy_get_by_index(dev, np, 0);
- if (IS_ERR(phy->repeater))
- return dev_err_probe(dev, PTR_ERR(phy->repeater),
- "failed to get repeater\n");
-
- generic_phy = devm_phy_create(dev, NULL, &qcom_snps_eusb2_hsphy_ops);
- if (IS_ERR(generic_phy)) {
- dev_err(dev, "failed to create phy %d\n", ret);
- return PTR_ERR(generic_phy);
- }
-
- dev_set_drvdata(dev, phy);
- phy_set_drvdata(generic_phy, phy);
-
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
- dev_info(dev, "Registered Qcom-eUSB2 phy\n");
-
- return 0;
-}
-
-static const struct of_device_id qcom_snps_eusb2_hsphy_of_match_table[] = {
- { .compatible = "qcom,sm8550-snps-eusb2-phy", },
- { },
-};
-MODULE_DEVICE_TABLE(of, qcom_snps_eusb2_hsphy_of_match_table);
-
-static struct platform_driver qcom_snps_eusb2_hsphy_driver = {
- .probe = qcom_snps_eusb2_hsphy_probe,
- .driver = {
- .name = "qcom-snps-eusb2-hsphy",
- .of_match_table = qcom_snps_eusb2_hsphy_of_match_table,
- },
-};
-
-module_platform_driver(qcom_snps_eusb2_hsphy_driver);
-MODULE_DESCRIPTION("Qualcomm SNPS eUSB2 HS PHY driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/phy/qualcomm/phy-qcom-uniphy-pcie-28lp.c b/drivers/phy/qualcomm/phy-qcom-uniphy-pcie-28lp.c
index c8b2a3818880..324c0a5d658e 100644
--- a/drivers/phy/qualcomm/phy-qcom-uniphy-pcie-28lp.c
+++ b/drivers/phy/qualcomm/phy-qcom-uniphy-pcie-28lp.c
@@ -75,6 +75,40 @@ struct qcom_uniphy_pcie {
#define phy_to_dw_phy(x) container_of((x), struct qca_uni_pcie_phy, phy)
+static const struct qcom_uniphy_pcie_regs ipq5018_regs[] = {
+ {
+ .offset = SSCG_CTRL_REG_4,
+ .val = 0x1cb9,
+ }, {
+ .offset = SSCG_CTRL_REG_5,
+ .val = 0x023a,
+ }, {
+ .offset = SSCG_CTRL_REG_3,
+ .val = 0xd360,
+ }, {
+ .offset = SSCG_CTRL_REG_1,
+ .val = 0x1,
+ }, {
+ .offset = SSCG_CTRL_REG_2,
+ .val = 0xeb,
+ }, {
+ .offset = CDR_CTRL_REG_4,
+ .val = 0x3f9,
+ }, {
+ .offset = CDR_CTRL_REG_5,
+ .val = 0x1c9,
+ }, {
+ .offset = CDR_CTRL_REG_2,
+ .val = 0x419,
+ }, {
+ .offset = CDR_CTRL_REG_1,
+ .val = 0x200,
+ }, {
+ .offset = PCS_INTERNAL_CONTROL_2,
+ .val = 0xf101,
+ },
+};
+
static const struct qcom_uniphy_pcie_regs ipq5332_regs[] = {
{
.offset = PHY_CFG_PLLCFG,
@@ -88,6 +122,14 @@ static const struct qcom_uniphy_pcie_regs ipq5332_regs[] = {
},
};
+static const struct qcom_uniphy_pcie_data ipq5018_data = {
+ .lane_offset = 0x800,
+ .phy_type = PHY_TYPE_PCIE_GEN2,
+ .init_seq = ipq5018_regs,
+ .init_seq_num = ARRAY_SIZE(ipq5018_regs),
+ .pipe_clk_rate = 125 * MEGA,
+};
+
static const struct qcom_uniphy_pcie_data ipq5332_data = {
.lane_offset = 0x800,
.phy_type = PHY_TYPE_PCIE_GEN3,
@@ -212,6 +254,9 @@ static inline int phy_pipe_clk_register(struct qcom_uniphy_pcie *phy, int id)
static const struct of_device_id qcom_uniphy_pcie_id_table[] = {
{
+ .compatible = "qcom,ipq5018-uniphy-pcie-phy",
+ .data = &ipq5018_data,
+ }, {
.compatible = "qcom,ipq5332-uniphy-pcie-phy",
.data = &ipq5332_data,
}, {
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index 775f4f973a6c..47beb94cd424 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -9,6 +9,7 @@
* Copyright (C) 2014 Cogent Embedded, Inc.
*/
+#include <linux/cleanup.h>
#include <linux/extcon-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -28,8 +29,10 @@
#define USB2_INT_ENABLE 0x000
#define USB2_AHB_BUS_CTR 0x008
#define USB2_USBCTR 0x00c
+#define USB2_REGEN_CG_CTRL 0x104 /* RZ/V2H(P) only */
#define USB2_SPD_RSM_TIMSET 0x10c
#define USB2_OC_TIMSET 0x110
+#define USB2_UTMI_CTRL 0x118 /* RZ/V2H(P) only */
#define USB2_COMMCTRL 0x600
#define USB2_OBINTSTA 0x604
#define USB2_OBINTEN 0x608
@@ -50,12 +53,18 @@
#define USB2_USBCTR_DIRPD BIT(2)
#define USB2_USBCTR_PLL_RST BIT(1)
+/* REGEN_CG_CTRL*/
+#define USB2_REGEN_CG_CTRL_UPHY_WEN BIT(0)
+
/* SPD_RSM_TIMSET */
#define USB2_SPD_RSM_TIMSET_INIT 0x014e029b
/* OC_TIMSET */
#define USB2_OC_TIMSET_INIT 0x000209ab
+/* UTMI_CTRL */
+#define USB2_UTMI_CTRL_INIT 0x8000018f
+
/* COMMCTRL */
#define USB2_COMMCTRL_OTG_PERI BIT(31) /* 1 = Peripheral mode */
@@ -107,7 +116,6 @@ struct rcar_gen3_phy {
struct rcar_gen3_chan *ch;
u32 int_enable_bits;
bool initialized;
- bool otg_initialized;
bool powered;
};
@@ -119,20 +127,21 @@ struct rcar_gen3_chan {
struct regulator *vbus;
struct reset_control *rstc;
struct work_struct work;
- struct mutex lock; /* protects rphys[...].powered */
+ spinlock_t lock; /* protects access to hardware and driver data structure. */
enum usb_dr_mode dr_mode;
- int irq;
u32 obint_enable_bits;
bool extcon_host;
bool is_otg_channel;
bool uses_otg_pins;
bool soc_no_adp_ctrl;
+ bool utmi_ctrl;
};
struct rcar_gen3_phy_drv_data {
const struct phy_ops *phy_usb2_ops;
bool no_adp_ctrl;
bool init_bus;
+ bool utmi_ctrl;
};
/*
@@ -320,16 +329,15 @@ static bool rcar_gen3_is_any_rphy_initialized(struct rcar_gen3_chan *ch)
return false;
}
-static bool rcar_gen3_needs_init_otg(struct rcar_gen3_chan *ch)
+static bool rcar_gen3_is_any_otg_rphy_initialized(struct rcar_gen3_chan *ch)
{
- int i;
-
- for (i = 0; i < NUM_OF_PHYS; i++) {
- if (ch->rphys[i].otg_initialized)
- return false;
+ for (enum rcar_gen3_phy_index i = PHY_INDEX_BOTH_HC; i <= PHY_INDEX_EHCI;
+ i++) {
+ if (ch->rphys[i].initialized)
+ return true;
}
- return true;
+ return false;
}
static bool rcar_gen3_are_all_rphys_power_off(struct rcar_gen3_chan *ch)
@@ -351,7 +359,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
bool is_b_device;
enum phy_mode cur_mode, new_mode;
- if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
+ guard(spinlock_irqsave)(&ch->lock);
+
+ if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch))
return -EIO;
if (sysfs_streq(buf, "host"))
@@ -389,7 +399,7 @@ static ssize_t role_show(struct device *dev, struct device_attribute *attr,
{
struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
- if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
+ if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch))
return -EIO;
return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" :
@@ -402,6 +412,9 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
void __iomem *usb2_base = ch->base;
u32 val;
+ if (!ch->is_otg_channel || rcar_gen3_is_any_otg_rphy_initialized(ch))
+ return;
+
/* Should not use functions of read-modify-write a register */
val = readl(usb2_base + USB2_LINECTRL1);
val = (val & ~USB2_LINECTRL1_DP_RPD) | USB2_LINECTRL1_DPRPD_EN |
@@ -415,7 +428,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
val = readl(usb2_base + USB2_ADPCTRL);
writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
}
- msleep(20);
+ mdelay(20);
writel(0xffffffff, usb2_base + USB2_OBINTSTA);
writel(ch->obint_enable_bits, usb2_base + USB2_OBINTEN);
@@ -427,16 +440,27 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
{
struct rcar_gen3_chan *ch = _ch;
void __iomem *usb2_base = ch->base;
- u32 status = readl(usb2_base + USB2_OBINTSTA);
+ struct device *dev = ch->dev;
irqreturn_t ret = IRQ_NONE;
+ u32 status;
- if (status & ch->obint_enable_bits) {
- dev_vdbg(ch->dev, "%s: %08x\n", __func__, status);
- writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA);
- rcar_gen3_device_recognition(ch);
- ret = IRQ_HANDLED;
+ pm_runtime_get_noresume(dev);
+
+ if (pm_runtime_suspended(dev))
+ goto rpm_put;
+
+ scoped_guard(spinlock, &ch->lock) {
+ status = readl(usb2_base + USB2_OBINTSTA);
+ if (status & ch->obint_enable_bits) {
+ dev_vdbg(dev, "%s: %08x\n", __func__, status);
+ writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA);
+ rcar_gen3_device_recognition(ch);
+ ret = IRQ_HANDLED;
+ }
}
+rpm_put:
+ pm_runtime_put_noidle(dev);
return ret;
}
@@ -446,30 +470,29 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
struct rcar_gen3_chan *channel = rphy->ch;
void __iomem *usb2_base = channel->base;
u32 val;
- int ret;
- if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) {
- INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
- ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq,
- IRQF_SHARED, dev_name(channel->dev), channel);
- if (ret < 0) {
- dev_err(channel->dev, "No irq handler (%d)\n", channel->irq);
- return ret;
- }
- }
+ guard(spinlock_irqsave)(&channel->lock);
/* Initialize USB2 part */
val = readl(usb2_base + USB2_INT_ENABLE);
val |= USB2_INT_ENABLE_UCOM_INTEN | rphy->int_enable_bits;
writel(val, usb2_base + USB2_INT_ENABLE);
- writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET);
- writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
-
- /* Initialize otg part */
- if (channel->is_otg_channel) {
- if (rcar_gen3_needs_init_otg(channel))
- rcar_gen3_init_otg(channel);
- rphy->otg_initialized = true;
+
+ if (!rcar_gen3_is_any_rphy_initialized(channel)) {
+ writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET);
+ writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
+ }
+
+ /* Initialize otg part (only if we initialize a PHY with IRQs). */
+ if (rphy->int_enable_bits)
+ rcar_gen3_init_otg(channel);
+
+ if (channel->utmi_ctrl) {
+ val = readl(usb2_base + USB2_REGEN_CG_CTRL) | USB2_REGEN_CG_CTRL_UPHY_WEN;
+ writel(val, usb2_base + USB2_REGEN_CG_CTRL);
+
+ writel(USB2_UTMI_CTRL_INIT, usb2_base + USB2_UTMI_CTRL);
+ writel(val & ~USB2_REGEN_CG_CTRL_UPHY_WEN, usb2_base + USB2_REGEN_CG_CTRL);
}
rphy->initialized = true;
@@ -484,10 +507,9 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
void __iomem *usb2_base = channel->base;
u32 val;
- rphy->initialized = false;
+ guard(spinlock_irqsave)(&channel->lock);
- if (channel->is_otg_channel)
- rphy->otg_initialized = false;
+ rphy->initialized = false;
val = readl(usb2_base + USB2_INT_ENABLE);
val &= ~rphy->int_enable_bits;
@@ -495,9 +517,6 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
val &= ~USB2_INT_ENABLE_UCOM_INTEN;
writel(val, usb2_base + USB2_INT_ENABLE);
- if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel))
- free_irq(channel->irq, channel);
-
return 0;
}
@@ -509,16 +528,17 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p)
u32 val;
int ret = 0;
- mutex_lock(&channel->lock);
- if (!rcar_gen3_are_all_rphys_power_off(channel))
- goto out;
-
if (channel->vbus) {
ret = regulator_enable(channel->vbus);
if (ret)
- goto out;
+ return ret;
}
+ guard(spinlock_irqsave)(&channel->lock);
+
+ if (!rcar_gen3_are_all_rphys_power_off(channel))
+ goto out;
+
val = readl(usb2_base + USB2_USBCTR);
val |= USB2_USBCTR_PLL_RST;
writel(val, usb2_base + USB2_USBCTR);
@@ -528,7 +548,6 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p)
out:
/* The powered flag should be set for any other phys anyway */
rphy->powered = true;
- mutex_unlock(&channel->lock);
return 0;
}
@@ -539,18 +558,20 @@ static int rcar_gen3_phy_usb2_power_off(struct phy *p)
struct rcar_gen3_chan *channel = rphy->ch;
int ret = 0;
- mutex_lock(&channel->lock);
- rphy->powered = false;
+ scoped_guard(spinlock_irqsave, &channel->lock) {
+ rphy->powered = false;
- if (!rcar_gen3_are_all_rphys_power_off(channel))
- goto out;
+ if (rcar_gen3_are_all_rphys_power_off(channel)) {
+ u32 val = readl(channel->base + USB2_USBCTR);
+
+ val |= USB2_USBCTR_PLL_RST;
+ writel(val, channel->base + USB2_USBCTR);
+ }
+ }
if (channel->vbus)
ret = regulator_disable(channel->vbus);
-out:
- mutex_unlock(&channel->lock);
-
return ret;
}
@@ -589,6 +610,12 @@ static const struct rcar_gen3_phy_drv_data rz_g3s_phy_usb2_data = {
.init_bus = true,
};
+static const struct rcar_gen3_phy_drv_data rz_v2h_phy_usb2_data = {
+ .phy_usb2_ops = &rcar_gen3_phy_usb2_ops,
+ .no_adp_ctrl = true,
+ .utmi_ctrl = true,
+};
+
static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
{
.compatible = "renesas,usb2-phy-r8a77470",
@@ -607,14 +634,18 @@ static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
.data = &rcar_gen3_phy_usb2_data,
},
{
- .compatible = "renesas,rzg2l-usb2-phy",
- .data = &rz_g2l_phy_usb2_data,
- },
- {
.compatible = "renesas,usb2-phy-r9a08g045",
.data = &rz_g3s_phy_usb2_data,
},
{
+ .compatible = "renesas,usb2-phy-r9a09g057",
+ .data = &rz_v2h_phy_usb2_data,
+ },
+ {
+ .compatible = "renesas,rzg2l-usb2-phy",
+ .data = &rz_g2l_phy_usb2_data,
+ },
+ {
.compatible = "renesas,rcar-gen3-usb2-phy",
.data = &rcar_gen3_phy_usb2_data,
},
@@ -703,7 +734,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rcar_gen3_chan *channel;
struct phy_provider *provider;
- int ret = 0, i;
+ int ret = 0, i, irq;
if (!dev->of_node) {
dev_err(dev, "This driver needs device tree\n");
@@ -719,8 +750,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
return PTR_ERR(channel->base);
channel->obint_enable_bits = USB2_OBINT_BITS;
- /* get irq number here and request_irq for OTG in phy_init */
- channel->irq = platform_get_irq_optional(pdev, 0);
channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
channel->is_otg_channel = true;
@@ -763,7 +792,9 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
if (phy_data->no_adp_ctrl)
channel->obint_enable_bits = USB2_OBINT_IDCHG_EN;
- mutex_init(&channel->lock);
+ channel->utmi_ctrl = phy_data->utmi_ctrl;
+
+ spin_lock_init(&channel->lock);
for (i = 0; i < NUM_OF_PHYS; i++) {
channel->rphys[i].phy = devm_phy_create(dev, NULL,
phy_data->phy_usb2_ops);
@@ -789,6 +820,20 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
channel->vbus = NULL;
}
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq < 0 && irq != -ENXIO) {
+ ret = irq;
+ goto error;
+ } else if (irq > 0) {
+ INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
+ ret = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
+ IRQF_SHARED, dev_name(dev), channel);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request irq (%d)\n", irq);
+ goto error;
+ }
+ }
+
provider = devm_of_phy_provider_register(dev, rcar_gen3_phy_usb2_xlate);
if (IS_ERR(provider)) {
dev_err(dev, "Failed to register PHY provider\n");
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index b5e6a864deeb..b0f23690ec30 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -1583,6 +1583,37 @@ static int rk3588_usb2phy_tuning(struct rockchip_usb2phy *rphy)
return ret;
}
+static const struct rockchip_usb2phy_cfg rk3036_phy_cfgs[] = {
+ {
+ .reg = 0x17c,
+ .num_ports = 2,
+ .phy_tuning = rk3128_usb2phy_tuning,
+ .clkout_ctl = { 0x017c, 11, 11, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0x017c, 8, 0, 0, 0x1d1 },
+ .bvalid_det_en = { 0x017c, 14, 14, 0, 1 },
+ .bvalid_det_st = { 0x017c, 15, 15, 0, 1 },
+ .bvalid_det_clr = { 0x017c, 15, 15, 0, 1 },
+ .ls_det_en = { 0x017c, 12, 12, 0, 1 },
+ .ls_det_st = { 0x017c, 13, 13, 0, 1 },
+ .ls_det_clr = { 0x017c, 13, 13, 0, 1 },
+ .utmi_bvalid = { 0x014c, 8, 8, 0, 1 },
+ .utmi_id = { 0x014c, 11, 11, 0, 1 },
+ .utmi_ls = { 0x014c, 10, 9, 0, 1 },
+
+ },
+ [USB2PHY_PORT_HOST] = {
+ .phy_sus = { 0x0194, 8, 0, 0, 0x1d1 },
+ .ls_det_en = { 0x0194, 14, 14, 0, 1 },
+ .ls_det_st = { 0x0194, 15, 15, 0, 1 },
+ .ls_det_clr = { 0x0194, 15, 15, 0, 1 }
+ }
+ },
+ },
+ { /* sentinel */ }
+};
+
static const struct rockchip_usb2phy_cfg rk3128_phy_cfgs[] = {
{
.reg = 0x17c,
@@ -1892,6 +1923,54 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
{ /* sentinel */ }
};
+static const struct rockchip_usb2phy_cfg rk3562_phy_cfgs[] = {
+ {
+ .reg = 0xff740000,
+ .num_ports = 2,
+ .clkout_ctl = { 0x0108, 4, 4, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0x0100, 8, 0, 0, 0x1d1 },
+ .bvalid_det_en = { 0x0110, 2, 2, 0, 1 },
+ .bvalid_det_st = { 0x0114, 2, 2, 0, 1 },
+ .bvalid_det_clr = { 0x0118, 2, 2, 0, 1 },
+ .idfall_det_en = { 0x0110, 5, 5, 0, 1 },
+ .idfall_det_st = { 0x0114, 5, 5, 0, 1 },
+ .idfall_det_clr = { 0x0118, 5, 5, 0, 1 },
+ .idrise_det_en = { 0x0110, 4, 4, 0, 1 },
+ .idrise_det_st = { 0x0114, 4, 4, 0, 1 },
+ .idrise_det_clr = { 0x0118, 4, 4, 0, 1 },
+ .ls_det_en = { 0x0110, 0, 0, 0, 1 },
+ .ls_det_st = { 0x0114, 0, 0, 0, 1 },
+ .ls_det_clr = { 0x0118, 0, 0, 0, 1 },
+ .utmi_avalid = { 0x0120, 10, 10, 0, 1 },
+ .utmi_bvalid = { 0x0120, 9, 9, 0, 1 },
+ .utmi_ls = { 0x0120, 5, 4, 0, 1 },
+ },
+ [USB2PHY_PORT_HOST] = {
+ .phy_sus = { 0x0104, 8, 0, 0x1d2, 0x1d1 },
+ .ls_det_en = { 0x0110, 1, 1, 0, 1 },
+ .ls_det_st = { 0x0114, 1, 1, 0, 1 },
+ .ls_det_clr = { 0x0118, 1, 1, 0, 1 },
+ .utmi_ls = { 0x0120, 17, 16, 0, 1 },
+ .utmi_hstdet = { 0x0120, 19, 19, 0, 1 }
+ }
+ },
+ .chg_det = {
+ .cp_det = { 0x0120, 24, 24, 0, 1 },
+ .dcp_det = { 0x0120, 23, 23, 0, 1 },
+ .dp_det = { 0x0120, 25, 25, 0, 1 },
+ .idm_sink_en = { 0x0108, 8, 8, 0, 1 },
+ .idp_sink_en = { 0x0108, 7, 7, 0, 1 },
+ .idp_src_en = { 0x0108, 9, 9, 0, 1 },
+ .rdm_pdwn_en = { 0x0108, 10, 10, 0, 1 },
+ .vdm_src_en = { 0x0108, 12, 12, 0, 1 },
+ .vdp_src_en = { 0x0108, 11, 11, 0, 1 },
+ },
+ },
+ { /* sentinel */ }
+};
+
static const struct rockchip_usb2phy_cfg rk3568_phy_cfgs[] = {
{
.reg = 0xfe8a0000,
@@ -2204,12 +2283,14 @@ static const struct rockchip_usb2phy_cfg rv1108_phy_cfgs[] = {
static const struct of_device_id rockchip_usb2phy_dt_match[] = {
{ .compatible = "rockchip,px30-usb2phy", .data = &rk3328_phy_cfgs },
+ { .compatible = "rockchip,rk3036-usb2phy", .data = &rk3036_phy_cfgs },
{ .compatible = "rockchip,rk3128-usb2phy", .data = &rk3128_phy_cfgs },
{ .compatible = "rockchip,rk3228-usb2phy", .data = &rk3228_phy_cfgs },
{ .compatible = "rockchip,rk3308-usb2phy", .data = &rk3308_phy_cfgs },
{ .compatible = "rockchip,rk3328-usb2phy", .data = &rk3328_phy_cfgs },
{ .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs },
{ .compatible = "rockchip,rk3399-usb2phy", .data = &rk3399_phy_cfgs },
+ { .compatible = "rockchip,rk3562-usb2phy", .data = &rk3562_phy_cfgs },
{ .compatible = "rockchip,rk3568-usb2phy", .data = &rk3568_phy_cfgs },
{ .compatible = "rockchip,rk3576-usb2phy", .data = &rk3576_phy_cfgs },
{ .compatible = "rockchip,rk3588-usb2phy", .data = &rk3588_phy_cfgs },
diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c b/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c
index 08c78c1bafc9..28a052e17366 100644
--- a/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c
@@ -1653,7 +1653,7 @@ static __maybe_unused int samsung_mipi_dcphy_runtime_resume(struct device *dev)
return ret;
}
- clk_prepare_enable(samsung->ref_clk);
+ ret = clk_prepare_enable(samsung->ref_clk);
if (ret) {
dev_err(samsung->dev, "Failed to enable reference clock, %d\n", ret);
clk_disable_unprepare(samsung->pclk);
diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
index fe7c05748356..79db57ee90d1 100644
--- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
@@ -320,6 +320,7 @@
#define LN3_TX_SER_RATE_SEL_HBR2_MASK BIT(3)
#define LN3_TX_SER_RATE_SEL_HBR3_MASK BIT(2)
+#define HDMI14_MAX_RATE 340000000
#define HDMI20_MAX_RATE 600000000
enum dp_link_rate {
@@ -328,39 +329,8 @@ enum dp_link_rate {
DP_BW_HBR2,
};
-struct lcpll_config {
- u32 bit_rate;
- u8 lcvco_mode_en;
- u8 pi_en;
- u8 clk_en_100m;
- u8 pms_mdiv;
- u8 pms_mdiv_afc;
- u8 pms_pdiv;
- u8 pms_refdiv;
- u8 pms_sdiv;
- u8 pi_cdiv_rstn;
- u8 pi_cdiv_sel;
- u8 sdm_en;
- u8 sdm_rstn;
- u8 sdc_frac_en;
- u8 sdc_rstn;
- u8 sdm_deno;
- u8 sdm_num_sign;
- u8 sdm_num;
- u8 sdc_n;
- u8 sdc_n2;
- u8 sdc_num;
- u8 sdc_deno;
- u8 sdc_ndiv_rstn;
- u8 ssc_en;
- u8 ssc_fm_dev;
- u8 ssc_fm_freq;
- u8 ssc_clk_div_sel;
- u8 cd_tx_ser_rate_sel;
-};
-
struct ropll_config {
- u32 bit_rate;
+ unsigned long long rate;
u8 pms_mdiv;
u8 pms_mdiv_afc;
u8 pms_pdiv;
@@ -422,19 +392,17 @@ struct rk_hdptx_phy {
struct regmap *regmap;
struct regmap *grf;
- /* PHY const config */
- const struct rk_hdptx_phy_cfg *cfgs;
int phy_id;
-
struct phy *phy;
- struct phy_config *phy_cfg;
+ struct phy_configure_opts_hdmi hdmi_cfg;
struct clk_bulk_data *clks;
int nr_clks;
struct reset_control_bulk_data rsts[RST_MAX];
/* clk provider */
struct clk_hw hw;
- unsigned long rate;
+ unsigned long hw_rate;
+ bool restrict_rate_change;
atomic_t usage_count;
@@ -444,45 +412,47 @@ struct rk_hdptx_phy {
};
static const struct ropll_config ropll_tmds_cfg[] = {
- { 5940000, 124, 124, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
+ { 594000000ULL, 124, 124, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 3712500, 155, 155, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
+ { 371250000ULL, 155, 155, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 2970000, 124, 124, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
+ { 297000000ULL, 124, 124, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 1620000, 135, 135, 1, 1, 3, 1, 1, 0, 1, 1, 1, 1, 4, 0, 3, 5, 5, 0x10,
+ { 162000000ULL, 135, 135, 1, 1, 3, 1, 1, 0, 1, 1, 1, 1, 4, 0, 3, 5, 5, 0x10,
1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 1856250, 155, 155, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
+ { 185625000ULL, 155, 155, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 1540000, 193, 193, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 193, 1, 32, 2, 1,
+ { 154000000ULL, 193, 193, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 193, 1, 32, 2, 1,
1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 1485000, 0x7b, 0x7b, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 4, 0, 3, 5, 5,
+ { 148500000ULL, 0x7b, 0x7b, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 4, 0, 3, 5, 5,
0x10, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 1462500, 122, 122, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 244, 1, 16, 2, 1, 1,
+ { 146250000ULL, 122, 122, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 244, 1, 16, 2, 1, 1,
1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 1190000, 149, 149, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 149, 1, 16, 2, 1, 1,
+ { 119000000ULL, 149, 149, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 149, 1, 16, 2, 1, 1,
1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 1065000, 89, 89, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 89, 1, 16, 1, 0, 1,
+ { 106500000ULL, 89, 89, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 89, 1, 16, 1, 0, 1,
1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 1080000, 135, 135, 1, 1, 5, 1, 1, 0, 1, 0, 1, 1, 0x9, 0, 0x05, 0,
+ { 108000000ULL, 135, 135, 1, 1, 5, 1, 1, 0, 1, 0, 1, 1, 0x9, 0, 0x05, 0,
0x14, 0x18, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 855000, 214, 214, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 214, 1, 16, 2, 1,
+ { 85500000ULL, 214, 214, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 214, 1, 16, 2, 1,
1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 835000, 105, 105, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 42, 1, 16, 1, 0,
+ { 83500000ULL, 105, 105, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 42, 1, 16, 1, 0,
1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 928125, 155, 155, 1, 1, 7, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
+ { 92812500ULL, 155, 155, 1, 1, 7, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 742500, 124, 124, 1, 1, 7, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
+ { 74250000ULL, 124, 124, 1, 1, 7, 1, 1, 1, 1, 1, 1, 1, 62, 1, 16, 5, 0,
1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 650000, 162, 162, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 54, 0, 16, 4, 1,
+ { 65000000ULL, 162, 162, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 54, 0, 16, 4, 1,
1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 337500, 0x70, 0x70, 1, 1, 0xf, 1, 1, 1, 1, 1, 1, 1, 0x2, 0, 0x01, 5,
+ { 50250000ULL, 84, 84, 1, 1, 7, 1, 1, 1, 1, 1, 1, 1, 11, 1, 4, 5,
+ 4, 11, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 33750000ULL, 0x70, 0x70, 1, 1, 0xf, 1, 1, 1, 1, 1, 1, 1, 0x2, 0, 0x01, 5,
1, 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 400000, 100, 100, 1, 1, 11, 1, 1, 0, 1, 0, 1, 1, 0x9, 0, 0x05, 0,
+ { 40000000ULL, 100, 100, 1, 1, 11, 1, 1, 0, 1, 0, 1, 1, 0x9, 0, 0x05, 0,
0x14, 0x18, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 270000, 0x5a, 0x5a, 1, 1, 0xf, 1, 1, 0, 1, 0, 1, 1, 0x9, 0, 0x05, 0,
+ { 27000000ULL, 0x5a, 0x5a, 1, 1, 0xf, 1, 1, 0, 1, 0, 1, 1, 0x9, 0, 0x05, 0,
0x14, 0x18, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
- { 251750, 84, 84, 1, 1, 0xf, 1, 1, 1, 1, 1, 1, 1, 168, 1, 16, 4, 1, 1,
+ { 25175000ULL, 84, 84, 1, 1, 0xf, 1, 1, 1, 1, 1, 1, 1, 168, 1, 16, 4, 1, 1,
1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
};
@@ -928,10 +898,10 @@ static void rk_hdptx_phy_disable(struct rk_hdptx_phy *hdptx)
regmap_write(hdptx->grf, GRF_HDPTX_CON0, val);
}
-static bool rk_hdptx_phy_clk_pll_calc(unsigned int data_rate,
+static bool rk_hdptx_phy_clk_pll_calc(unsigned long long rate,
struct ropll_config *cfg)
{
- const unsigned int fout = data_rate / 2, fref = 24000;
+ const unsigned int fout = div_u64(rate, 200), fref = 24000;
unsigned long k = 0, lc, k_sub, lc_sub;
unsigned int fvco, sdc;
u32 mdiv, sdiv, n = 8;
@@ -1000,33 +970,34 @@ static bool rk_hdptx_phy_clk_pll_calc(unsigned int data_rate,
return true;
}
-static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx,
- unsigned int rate)
+static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx)
{
const struct ropll_config *cfg = NULL;
struct ropll_config rc = {0};
- int i;
+ int ret, i;
- hdptx->rate = rate * 100;
+ if (!hdptx->hdmi_cfg.tmds_char_rate)
+ return 0;
for (i = 0; i < ARRAY_SIZE(ropll_tmds_cfg); i++)
- if (rate == ropll_tmds_cfg[i].bit_rate) {
+ if (hdptx->hdmi_cfg.tmds_char_rate == ropll_tmds_cfg[i].rate) {
cfg = &ropll_tmds_cfg[i];
break;
}
if (!cfg) {
- if (rk_hdptx_phy_clk_pll_calc(rate, &rc)) {
- cfg = &rc;
- } else {
- dev_err(hdptx->dev, "%s cannot find pll cfg\n", __func__);
+ if (!rk_hdptx_phy_clk_pll_calc(hdptx->hdmi_cfg.tmds_char_rate, &rc)) {
+ dev_err(hdptx->dev, "%s cannot find pll cfg for rate=%llu\n",
+ __func__, hdptx->hdmi_cfg.tmds_char_rate);
return -EINVAL;
}
+
+ cfg = &rc;
}
- dev_dbg(hdptx->dev, "mdiv=%u, sdiv=%u, sdm_en=%u, k_sign=%u, k=%u, lc=%u\n",
- cfg->pms_mdiv, cfg->pms_sdiv + 1, cfg->sdm_en,
- cfg->sdm_num_sign, cfg->sdm_num, cfg->sdm_deno);
+ dev_dbg(hdptx->dev, "%s rate=%llu mdiv=%u sdiv=%u sdm_en=%u k_sign=%u k=%u lc=%u\n",
+ __func__, hdptx->hdmi_cfg.tmds_char_rate, cfg->pms_mdiv, cfg->pms_sdiv + 1,
+ cfg->sdm_en, cfg->sdm_num_sign, cfg->sdm_num, cfg->sdm_deno);
rk_hdptx_pre_power_up(hdptx);
@@ -1059,20 +1030,26 @@ static int rk_hdptx_ropll_tmds_cmn_config(struct rk_hdptx_phy *hdptx,
regmap_update_bits(hdptx->regmap, CMN_REG(0086), PLL_PCG_POSTDIV_SEL_MASK,
FIELD_PREP(PLL_PCG_POSTDIV_SEL_MASK, cfg->pms_sdiv));
+ regmap_update_bits(hdptx->regmap, CMN_REG(0086), PLL_PCG_CLK_SEL_MASK,
+ FIELD_PREP(PLL_PCG_CLK_SEL_MASK, (hdptx->hdmi_cfg.bpc - 8) >> 1));
+
regmap_update_bits(hdptx->regmap, CMN_REG(0086), PLL_PCG_CLK_EN_MASK,
FIELD_PREP(PLL_PCG_CLK_EN_MASK, 0x1));
- return rk_hdptx_post_enable_pll(hdptx);
+ ret = rk_hdptx_post_enable_pll(hdptx);
+ if (!ret)
+ hdptx->hw_rate = hdptx->hdmi_cfg.tmds_char_rate;
+
+ return ret;
}
-static int rk_hdptx_ropll_tmds_mode_config(struct rk_hdptx_phy *hdptx,
- unsigned int rate)
+static int rk_hdptx_ropll_tmds_mode_config(struct rk_hdptx_phy *hdptx)
{
rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_common_sb_init_seq);
regmap_write(hdptx->regmap, LNTOP_REG(0200), 0x06);
- if (rate >= 3400000) {
+ if (hdptx->hdmi_cfg.tmds_char_rate > HDMI14_MAX_RATE) {
/* For 1/40 bitrate clk */
rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_tmds_lntop_highbr_seq);
} else {
@@ -1124,8 +1101,7 @@ static void rk_hdptx_dp_reset(struct rk_hdptx_phy *hdptx)
HDPTX_I_BGR_EN << 16 | FIELD_PREP(HDPTX_I_BGR_EN, 0x0));
}
-static int rk_hdptx_phy_consumer_get(struct rk_hdptx_phy *hdptx,
- unsigned int rate)
+static int rk_hdptx_phy_consumer_get(struct rk_hdptx_phy *hdptx)
{
enum phy_mode mode = phy_get_mode(hdptx->phy);
u32 status;
@@ -1144,11 +1120,9 @@ static int rk_hdptx_phy_consumer_get(struct rk_hdptx_phy *hdptx,
if (mode == PHY_MODE_DP) {
rk_hdptx_dp_reset(hdptx);
} else {
- if (rate) {
- ret = rk_hdptx_ropll_tmds_cmn_config(hdptx, rate);
- if (ret)
- goto dec_usage;
- }
+ ret = rk_hdptx_ropll_tmds_cmn_config(hdptx);
+ if (ret)
+ goto dec_usage;
}
return 0;
@@ -1443,21 +1417,26 @@ static int rk_hdptx_dp_aux_init(struct rk_hdptx_phy *hdptx)
static int rk_hdptx_phy_power_on(struct phy *phy)
{
struct rk_hdptx_phy *hdptx = phy_get_drvdata(phy);
- int bus_width = phy_get_bus_width(hdptx->phy);
enum phy_mode mode = phy_get_mode(phy);
int ret, lane;
- /*
- * FIXME: Temporary workaround to pass pixel_clk_rate
- * from the HDMI bridge driver until phy_configure_opts_hdmi
- * becomes available in the PHY API.
- */
- unsigned int rate = bus_width & 0xfffffff;
+ if (mode != PHY_MODE_DP) {
+ if (!hdptx->hdmi_cfg.tmds_char_rate) {
+ /*
+ * FIXME: Temporary workaround to setup TMDS char rate
+ * from the RK DW HDMI QP bridge driver.
+ * Will be removed as soon the switch to the HDMI PHY
+ * configuration API has been completed on both ends.
+ */
+ hdptx->hdmi_cfg.tmds_char_rate = phy_get_bus_width(hdptx->phy) & 0xfffffff;
+ hdptx->hdmi_cfg.tmds_char_rate *= 100;
+ }
- dev_dbg(hdptx->dev, "%s bus_width=%x rate=%u\n",
- __func__, bus_width, rate);
+ dev_dbg(hdptx->dev, "%s rate=%llu bpc=%u\n", __func__,
+ hdptx->hdmi_cfg.tmds_char_rate, hdptx->hdmi_cfg.bpc);
+ }
- ret = rk_hdptx_phy_consumer_get(hdptx, rate);
+ ret = rk_hdptx_phy_consumer_get(hdptx);
if (ret)
return ret;
@@ -1488,7 +1467,7 @@ static int rk_hdptx_phy_power_on(struct phy *phy)
regmap_write(hdptx->grf, GRF_HDPTX_CON0,
HDPTX_MODE_SEL << 16 | FIELD_PREP(HDPTX_MODE_SEL, 0x0));
- ret = rk_hdptx_ropll_tmds_mode_config(hdptx, rate);
+ ret = rk_hdptx_ropll_tmds_mode_config(hdptx);
if (ret)
rk_hdptx_phy_consumer_put(hdptx, true);
}
@@ -1503,8 +1482,40 @@ static int rk_hdptx_phy_power_off(struct phy *phy)
return rk_hdptx_phy_consumer_put(hdptx, false);
}
-static int rk_hdptx_phy_verify_config(struct rk_hdptx_phy *hdptx,
- struct phy_configure_opts_dp *dp)
+static int rk_hdptx_phy_verify_hdmi_config(struct rk_hdptx_phy *hdptx,
+ struct phy_configure_opts_hdmi *hdmi)
+{
+ int i;
+
+ if (!hdmi->tmds_char_rate || hdmi->tmds_char_rate > HDMI20_MAX_RATE)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ropll_tmds_cfg); i++)
+ if (hdmi->tmds_char_rate == ropll_tmds_cfg[i].rate)
+ break;
+
+ if (i == ARRAY_SIZE(ropll_tmds_cfg) &&
+ !rk_hdptx_phy_clk_pll_calc(hdmi->tmds_char_rate, NULL))
+ return -EINVAL;
+
+ if (!hdmi->bpc)
+ hdmi->bpc = 8;
+
+ switch (hdmi->bpc) {
+ case 8:
+ case 10:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rk_hdptx_phy_verify_dp_config(struct rk_hdptx_phy *hdptx,
+ struct phy_configure_opts_dp *dp)
{
int i;
@@ -1764,12 +1775,23 @@ static int rk_hdptx_phy_configure(struct phy *phy, union phy_configure_opts *opt
enum phy_mode mode = phy_get_mode(phy);
int ret;
- if (mode != PHY_MODE_DP)
- return 0;
+ if (mode != PHY_MODE_DP) {
+ ret = rk_hdptx_phy_verify_hdmi_config(hdptx, &opts->hdmi);
+ if (ret) {
+ dev_err(hdptx->dev, "invalid hdmi params for phy configure\n");
+ } else {
+ hdptx->hdmi_cfg = opts->hdmi;
+ hdptx->restrict_rate_change = true;
+ }
- ret = rk_hdptx_phy_verify_config(hdptx, &opts->dp);
+ dev_dbg(hdptx->dev, "%s rate=%llu bpc=%u\n", __func__,
+ hdptx->hdmi_cfg.tmds_char_rate, hdptx->hdmi_cfg.bpc);
+ return ret;
+ }
+
+ ret = rk_hdptx_phy_verify_dp_config(hdptx, &opts->dp);
if (ret) {
- dev_err(hdptx->dev, "invalid params for phy configure\n");
+ dev_err(hdptx->dev, "invalid dp params for phy configure\n");
return ret;
}
@@ -1801,10 +1823,22 @@ static int rk_hdptx_phy_configure(struct phy *phy, union phy_configure_opts *opt
return 0;
}
+static int rk_hdptx_phy_validate(struct phy *phy, enum phy_mode mode,
+ int submode, union phy_configure_opts *opts)
+{
+ struct rk_hdptx_phy *hdptx = phy_get_drvdata(phy);
+
+ if (mode != PHY_MODE_DP)
+ return rk_hdptx_phy_verify_hdmi_config(hdptx, &opts->hdmi);
+
+ return rk_hdptx_phy_verify_dp_config(hdptx, &opts->dp);
+}
+
static const struct phy_ops rk_hdptx_phy_ops = {
.power_on = rk_hdptx_phy_power_on,
.power_off = rk_hdptx_phy_power_off,
.configure = rk_hdptx_phy_configure,
+ .validate = rk_hdptx_phy_validate,
.owner = THIS_MODULE,
};
@@ -1817,7 +1851,7 @@ static int rk_hdptx_phy_clk_prepare(struct clk_hw *hw)
{
struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw);
- return rk_hdptx_phy_consumer_get(hdptx, hdptx->rate / 100);
+ return rk_hdptx_phy_consumer_get(hdptx);
}
static void rk_hdptx_phy_clk_unprepare(struct clk_hw *hw)
@@ -1832,27 +1866,37 @@ static unsigned long rk_hdptx_phy_clk_recalc_rate(struct clk_hw *hw,
{
struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw);
- return hdptx->rate;
+ return hdptx->hw_rate;
}
static long rk_hdptx_phy_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
- u32 bit_rate = rate / 100;
- int i;
+ struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw);
- if (rate > HDMI20_MAX_RATE)
- return rate;
+ /*
+ * FIXME: Temporarily allow altering TMDS char rate via CCF.
+ * To be dropped as soon as the RK DW HDMI QP bridge driver
+ * switches to make use of phy_configure().
+ */
+ if (!hdptx->restrict_rate_change && rate != hdptx->hdmi_cfg.tmds_char_rate) {
+ struct phy_configure_opts_hdmi hdmi = {
+ .tmds_char_rate = rate,
+ };
+ int ret = rk_hdptx_phy_verify_hdmi_config(hdptx, &hdmi);
- for (i = 0; i < ARRAY_SIZE(ropll_tmds_cfg); i++)
- if (bit_rate == ropll_tmds_cfg[i].bit_rate)
- break;
+ if (ret)
+ return ret;
- if (i == ARRAY_SIZE(ropll_tmds_cfg) &&
- !rk_hdptx_phy_clk_pll_calc(bit_rate, NULL))
- return -EINVAL;
+ hdptx->hdmi_cfg = hdmi;
+ }
- return rate;
+ /*
+ * The TMDS char rate shall be adjusted via phy_configure() only,
+ * hence ensure rk_hdptx_phy_clk_set_rate() won't be invoked with
+ * a different rate argument.
+ */
+ return hdptx->hdmi_cfg.tmds_char_rate;
}
static int rk_hdptx_phy_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1860,7 +1904,21 @@ static int rk_hdptx_phy_clk_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw);
- return rk_hdptx_ropll_tmds_cmn_config(hdptx, rate / 100);
+ /* Revert any unlikely TMDS char rate change since round_rate() */
+ if (hdptx->hdmi_cfg.tmds_char_rate != rate) {
+ dev_warn(hdptx->dev, "Reverting unexpected rate change from %lu to %llu\n",
+ rate, hdptx->hdmi_cfg.tmds_char_rate);
+ hdptx->hdmi_cfg.tmds_char_rate = rate;
+ }
+
+ /*
+ * The TMDS char rate would be normally programmed in HW during
+ * phy_ops.power_on() or clk_ops.prepare() callbacks, but it might
+ * happen that the former gets fired too late, i.e. after this call,
+ * while the latter being executed only once, i.e. when clock remains
+ * in the prepared state during rate changes.
+ */
+ return rk_hdptx_ropll_tmds_cmn_config(hdptx);
}
static const struct clk_ops hdptx_phy_clk_ops = {
@@ -1923,6 +1981,7 @@ static int rk_hdptx_phy_runtime_resume(struct device *dev)
static int rk_hdptx_phy_probe(struct platform_device *pdev)
{
+ const struct rk_hdptx_phy_cfg *cfgs;
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct rk_hdptx_phy *hdptx;
@@ -1935,20 +1994,21 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev)
return -ENOMEM;
hdptx->dev = dev;
+ hdptx->hdmi_cfg.bpc = 8;
regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return dev_err_probe(dev, PTR_ERR(regs),
"Failed to ioremap resource\n");
- hdptx->cfgs = device_get_match_data(dev);
- if (!hdptx->cfgs)
+ cfgs = device_get_match_data(dev);
+ if (!cfgs)
return dev_err_probe(dev, -EINVAL, "missing match data\n");
/* find the phy-id from the io address */
hdptx->phy_id = -ENODEV;
- for (id = 0; id < hdptx->cfgs->num_phys; id++) {
- if (res->start == hdptx->cfgs->phy_ids[id]) {
+ for (id = 0; id < cfgs->num_phys; id++) {
+ if (res->start == cfgs->phy_ids[id]) {
hdptx->phy_id = id;
break;
}
diff --git a/drivers/phy/samsung/Kconfig b/drivers/phy/samsung/Kconfig
index 6566100441d6..b7ab402909a8 100644
--- a/drivers/phy/samsung/Kconfig
+++ b/drivers/phy/samsung/Kconfig
@@ -85,7 +85,7 @@ config PHY_EXYNOS5_USBDRD
depends on USB_DWC3_EXYNOS
select GENERIC_PHY
select MFD_SYSCON
- default y
+ default ARCH_EXYNOS
help
Enable USB DRD PHY support for Exynos 5 SoC series.
This driver provides PHY interface for USB 3.0 DRD controller
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index 817fddee0392..917a76d584f0 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -36,14 +36,40 @@
#define EXYNOS5_FSEL_26MHZ 0x6
#define EXYNOS5_FSEL_50MHZ 0x7
+/* USB 3.2 DRD 4nm PHY link controller registers */
+#define EXYNOS2200_DRD_CLKRST 0x0c
+#define EXYNOS2200_CLKRST_LINK_PCLK_SEL BIT(1)
+
+#define EXYNOS2200_DRD_UTMI 0x10
+#define EXYNOS2200_UTMI_FORCE_VBUSVALID BIT(1)
+#define EXYNOS2200_UTMI_FORCE_BVALID BIT(0)
+
+#define EXYNOS2200_DRD_HSP_MISC 0x114
+#define HSP_MISC_SET_REQ_IN2 BIT(4)
+#define HSP_MISC_RES_TUNE GENMASK(1, 0)
+#define RES_TUNE_PHY1_PHY2 0x1
+#define RES_TUNE_PHY1 0x2
+#define RES_TUNE_PHY2 0x3
+
/* Exynos5: USB 3.0 DRD PHY registers */
#define EXYNOS5_DRD_LINKSYSTEM 0x04
#define LINKSYSTEM_XHCI_VERSION_CONTROL BIT(27)
-#define LINKSYSTEM_FLADJ_MASK (0x3f << 1)
-#define LINKSYSTEM_FLADJ(_x) ((_x) << 1)
+#define LINKSYSTEM_FORCE_VBUSVALID BIT(8)
+#define LINKSYSTEM_FORCE_BVALID BIT(7)
+#define LINKSYSTEM_FLADJ GENMASK(6, 1)
#define EXYNOS5_DRD_PHYUTMI 0x08
+#define PHYUTMI_UTMI_SUSPEND_COM_N BIT(12)
+#define PHYUTMI_UTMI_L1_SUSPEND_COM_N BIT(11)
+#define PHYUTMI_VBUSVLDEXTSEL BIT(10)
+#define PHYUTMI_VBUSVLDEXT BIT(9)
+#define PHYUTMI_TXBITSTUFFENH BIT(8)
+#define PHYUTMI_TXBITSTUFFEN BIT(7)
#define PHYUTMI_OTGDISABLE BIT(6)
+#define PHYUTMI_IDPULLUP BIT(5)
+#define PHYUTMI_DRVVBUS BIT(4)
+#define PHYUTMI_DPPULLDOWN BIT(3)
+#define PHYUTMI_DMPULLDOWN BIT(2)
#define PHYUTMI_FORCESUSPEND BIT(1)
#define PHYUTMI_FORCESLEEP BIT(0)
@@ -51,30 +77,27 @@
#define EXYNOS5_DRD_PHYCLKRST 0x10
#define PHYCLKRST_EN_UTMISUSPEND BIT(31)
-#define PHYCLKRST_SSC_REFCLKSEL_MASK (0xff << 23)
-#define PHYCLKRST_SSC_REFCLKSEL(_x) ((_x) << 23)
-#define PHYCLKRST_SSC_RANGE_MASK (0x03 << 21)
-#define PHYCLKRST_SSC_RANGE(_x) ((_x) << 21)
+#define PHYCLKRST_SSC_REFCLKSEL GENMASK(30, 23)
+#define PHYCLKRST_SSC_RANGE GENMASK(22, 21)
#define PHYCLKRST_SSC_EN BIT(20)
#define PHYCLKRST_REF_SSP_EN BIT(19)
#define PHYCLKRST_REF_CLKDIV2 BIT(18)
-#define PHYCLKRST_MPLL_MULTIPLIER_MASK (0x7f << 11)
-#define PHYCLKRST_MPLL_MULTIPLIER_100MHZ_REF (0x19 << 11)
-#define PHYCLKRST_MPLL_MULTIPLIER_50M_REF (0x32 << 11)
-#define PHYCLKRST_MPLL_MULTIPLIER_24MHZ_REF (0x68 << 11)
-#define PHYCLKRST_MPLL_MULTIPLIER_20MHZ_REF (0x7d << 11)
-#define PHYCLKRST_MPLL_MULTIPLIER_19200KHZ_REF (0x02 << 11)
-#define PHYCLKRST_FSEL_PIPE_MASK (0x7 << 8)
-#define PHYCLKRST_FSEL_UTMI_MASK (0x7 << 5)
-#define PHYCLKRST_FSEL(_x) ((_x) << 5)
-#define PHYCLKRST_FSEL_PAD_100MHZ (0x27 << 5)
-#define PHYCLKRST_FSEL_PAD_24MHZ (0x2a << 5)
-#define PHYCLKRST_FSEL_PAD_20MHZ (0x31 << 5)
-#define PHYCLKRST_FSEL_PAD_19_2MHZ (0x38 << 5)
+#define PHYCLKRST_MPLL_MULTIPLIER GENMASK(17, 11)
+#define PHYCLKRST_MPLL_MULTIPLIER_100MHZ_REF 0x19
+#define PHYCLKRST_MPLL_MULTIPLIER_50M_REF 0x32
+#define PHYCLKRST_MPLL_MULTIPLIER_24MHZ_REF 0x68
+#define PHYCLKRST_MPLL_MULTIPLIER_20MHZ_REF 0x7d
+#define PHYCLKRST_MPLL_MULTIPLIER_19200KHZ_REF 0x02
+#define PHYCLKRST_FSEL_PIPE GENMASK(10, 8)
+#define PHYCLKRST_FSEL_UTMI GENMASK(7, 5)
+#define PHYCLKRST_FSEL_PAD_100MHZ 0x27
+#define PHYCLKRST_FSEL_PAD_24MHZ 0x2a
+#define PHYCLKRST_FSEL_PAD_20MHZ 0x31
+#define PHYCLKRST_FSEL_PAD_19_2MHZ 0x38
#define PHYCLKRST_RETENABLEN BIT(4)
-#define PHYCLKRST_REFCLKSEL_MASK (0x03 << 2)
-#define PHYCLKRST_REFCLKSEL_PAD_REFCLK (0x2 << 2)
-#define PHYCLKRST_REFCLKSEL_EXT_REFCLK (0x3 << 2)
+#define PHYCLKRST_REFCLKSEL GENMASK(3, 2)
+#define PHYCLKRST_REFCLKSEL_PAD_REFCLK 0x2
+#define PHYCLKRST_REFCLKSEL_EXT_REFCLK 0x3
#define PHYCLKRST_PORTRESET BIT(1)
#define PHYCLKRST_COMMONONN BIT(0)
@@ -83,22 +106,32 @@
#define PHYREG0_SSC_RANGE BIT(20)
#define PHYREG0_CR_WRITE BIT(19)
#define PHYREG0_CR_READ BIT(18)
-#define PHYREG0_CR_DATA_IN(_x) ((_x) << 2)
+#define PHYREG0_CR_DATA_IN GENMASK(17, 2)
#define PHYREG0_CR_CAP_DATA BIT(1)
#define PHYREG0_CR_CAP_ADDR BIT(0)
#define EXYNOS5_DRD_PHYREG1 0x18
-#define PHYREG1_CR_DATA_OUT(_x) ((_x) << 1)
+#define PHYREG0_CR_DATA_OUT GENMASK(16, 1)
#define PHYREG1_CR_ACK BIT(0)
#define EXYNOS5_DRD_PHYPARAM0 0x1c
#define PHYPARAM0_REF_USE_PAD BIT(31)
-#define PHYPARAM0_REF_LOSLEVEL_MASK (0x1f << 26)
-#define PHYPARAM0_REF_LOSLEVEL (0x9 << 26)
+#define PHYPARAM0_REF_LOSLEVEL GENMASK(30, 26)
+#define PHYPARAM0_REF_LOSLEVEL_VAL 0x9
+#define PHYPARAM0_TXVREFTUNE GENMASK(25, 22)
+#define PHYPARAM0_TXRISETUNE GENMASK(21, 20)
+#define PHYPARAM0_TXRESTUNE GENMASK(19, 18)
+#define PHYPARAM0_TXPREEMPPULSETUNE BIT(17)
+#define PHYPARAM0_TXPREEMPAMPTUNE GENMASK(16, 15)
+#define PHYPARAM0_TXHSXVTUNE GENMASK(14, 13)
+#define PHYPARAM0_TXFSLSTUNE GENMASK(12, 9)
+#define PHYPARAM0_SQRXTUNE GENMASK(8, 6)
+#define PHYPARAM0_OTGTUNE GENMASK(5, 3)
+#define PHYPARAM0_COMPDISTUNE GENMASK(2, 0)
#define EXYNOS5_DRD_PHYPARAM1 0x20
-#define PHYPARAM1_PCS_TXDEEMPH_MASK (0x1f << 0)
-#define PHYPARAM1_PCS_TXDEEMPH (0x1c)
+#define PHYPARAM1_PCS_TXDEEMPH GENMASK(4, 0)
+#define PHYPARAM1_PCS_TXDEEMPH_VAL 0x1c
#define EXYNOS5_DRD_PHYTERM 0x24
@@ -114,6 +147,12 @@
#define EXYNOS5_DRD_PHYRESUME 0x34
#define EXYNOS5_DRD_LINKPORT 0x44
+#define LINKPORT_HOST_U3_PORT_DISABLE BIT(8)
+#define LINKPORT_HOST_U2_PORT_DISABLE BIT(7)
+#define LINKPORT_HOST_PORT_OVCR_U3 BIT(5)
+#define LINKPORT_HOST_PORT_OVCR_U2 BIT(4)
+#define LINKPORT_HOST_PORT_OVCR_U3_SEL BIT(3)
+#define LINKPORT_HOST_PORT_OVCR_U2_SEL BIT(2)
/* USB 3.0 DRD PHY SS Function Control Reg; accessed by CR_PORT */
#define EXYNOS5_DRD_PHYSS_LOSLEVEL_OVRD_IN (0x15)
@@ -134,13 +173,31 @@
#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_62M5 (0x20 << 4)
#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_96M_100M (0x40 << 4)
+/* Exynos7870: USB DRD PHY registers */
+#define EXYNOS7870_DRD_PHYPCSVAL 0x3C
+#define PHYPCSVAL_PCS_RX_LOS_MASK GENMASK(9, 0)
+
+#define EXYNOS7870_DRD_PHYPARAM2 0x50
+#define PHYPARAM2_TX_VBOOST_LVL GENMASK(6, 4)
+#define PHYPARAM2_LOS_BIAS GENMASK(2, 0)
+
+#define EXYNOS7870_DRD_HSPHYCTRL 0x54
+#define HSPHYCTRL_PHYSWRSTALL BIT(31)
+#define HSPHYCTRL_SIDDQ BIT(6)
+#define HSPHYCTRL_PHYSWRST BIT(0)
+
+#define EXYNOS7870_DRD_HSPHYPLLTUNE 0x70
+#define HSPHYPLLTUNE_PLL_B_TUNE BIT(6)
+#define HSPHYPLLTUNE_PLL_I_TUNE GENMASK(5, 4)
+#define HSPHYPLLTUNE_PLL_P_TUNE GENMASK(3, 0)
+
/* Exynos850: USB DRD PHY registers */
#define EXYNOS850_DRD_LINKCTRL 0x04
#define LINKCTRL_FORCE_RXELECIDLE BIT(18)
#define LINKCTRL_FORCE_PHYSTATUS BIT(17)
#define LINKCTRL_FORCE_PIPE_EN BIT(16)
#define LINKCTRL_FORCE_QACT BIT(8)
-#define LINKCTRL_BUS_FILTER_BYPASS(_x) ((_x) << 4)
+#define LINKCTRL_BUS_FILTER_BYPASS GENMASK(7, 4)
#define EXYNOS850_DRD_LINKPORT 0x08
#define LINKPORT_HOST_NUM_U3 GENMASK(19, 16)
@@ -389,6 +446,7 @@ struct exynos5_usbdrd_phy_drvdata {
* @clks: clocks for register access
* @core_clks: core clocks for phy (ref, pipe3, utmi+, ITP, etc. as required)
* @drv_data: pointer to SoC level driver data structure
+ * @hs_phy: pointer to non-Samsung IP high-speed phy controller
* @phy_mutex: mutex protecting phy_init/exit & TCPC callbacks
* @phys: array for 'EXYNOS5_DRDPHYS_NUM' number of PHY
* instances each with its 'phy' and 'phy_cfg'.
@@ -406,6 +464,7 @@ struct exynos5_usbdrd_phy {
struct clk_bulk_data *clks;
struct clk_bulk_data *core_clks;
const struct exynos5_usbdrd_phy_drvdata *drv_data;
+ struct phy *hs_phy;
struct mutex phy_mutex;
struct phy_usb_instance {
struct phy *phy;
@@ -497,29 +556,33 @@ exynos5_usbdrd_pipe3_set_refclk(struct phy_usb_instance *inst)
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST);
/* Use EXTREFCLK as ref clock */
- reg &= ~PHYCLKRST_REFCLKSEL_MASK;
- reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK;
+ reg &= ~PHYCLKRST_REFCLKSEL;
+ reg |= FIELD_PREP(PHYCLKRST_REFCLKSEL, PHYCLKRST_REFCLKSEL_EXT_REFCLK);
/* FSEL settings corresponding to reference clock */
- reg &= ~(PHYCLKRST_FSEL_PIPE_MASK |
- PHYCLKRST_MPLL_MULTIPLIER_MASK |
- PHYCLKRST_SSC_REFCLKSEL_MASK);
+ reg &= ~(PHYCLKRST_FSEL_PIPE |
+ PHYCLKRST_MPLL_MULTIPLIER |
+ PHYCLKRST_SSC_REFCLKSEL);
switch (phy_drd->extrefclk) {
case EXYNOS5_FSEL_50MHZ:
- reg |= (PHYCLKRST_MPLL_MULTIPLIER_50M_REF |
- PHYCLKRST_SSC_REFCLKSEL(0x00));
+ reg |= (FIELD_PREP(PHYCLKRST_SSC_REFCLKSEL, 0x00) |
+ FIELD_PREP(PHYCLKRST_MPLL_MULTIPLIER,
+ PHYCLKRST_MPLL_MULTIPLIER_50M_REF));
break;
case EXYNOS5_FSEL_24MHZ:
- reg |= (PHYCLKRST_MPLL_MULTIPLIER_24MHZ_REF |
- PHYCLKRST_SSC_REFCLKSEL(0x88));
+ reg |= (FIELD_PREP(PHYCLKRST_SSC_REFCLKSEL, 0x88) |
+ FIELD_PREP(PHYCLKRST_MPLL_MULTIPLIER,
+ PHYCLKRST_MPLL_MULTIPLIER_24MHZ_REF));
break;
case EXYNOS5_FSEL_20MHZ:
- reg |= (PHYCLKRST_MPLL_MULTIPLIER_20MHZ_REF |
- PHYCLKRST_SSC_REFCLKSEL(0x00));
+ reg |= (FIELD_PREP(PHYCLKRST_SSC_REFCLKSEL, 0x00) |
+ FIELD_PREP(PHYCLKRST_MPLL_MULTIPLIER,
+ PHYCLKRST_MPLL_MULTIPLIER_20MHZ_REF));
break;
case EXYNOS5_FSEL_19MHZ2:
- reg |= (PHYCLKRST_MPLL_MULTIPLIER_19200KHZ_REF |
- PHYCLKRST_SSC_REFCLKSEL(0x88));
+ reg |= (FIELD_PREP(PHYCLKRST_SSC_REFCLKSEL, 0x88) |
+ FIELD_PREP(PHYCLKRST_MPLL_MULTIPLIER,
+ PHYCLKRST_MPLL_MULTIPLIER_19200KHZ_REF));
break;
default:
dev_dbg(phy_drd->dev, "unsupported ref clk\n");
@@ -542,13 +605,13 @@ exynos5_usbdrd_utmi_set_refclk(struct phy_usb_instance *inst)
/* restore any previous reference clock settings */
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST);
- reg &= ~PHYCLKRST_REFCLKSEL_MASK;
- reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK;
+ reg &= ~PHYCLKRST_REFCLKSEL;
+ reg |= FIELD_PREP(PHYCLKRST_REFCLKSEL, PHYCLKRST_REFCLKSEL_EXT_REFCLK);
- reg &= ~(PHYCLKRST_FSEL_UTMI_MASK |
- PHYCLKRST_MPLL_MULTIPLIER_MASK |
- PHYCLKRST_SSC_REFCLKSEL_MASK);
- reg |= PHYCLKRST_FSEL(phy_drd->extrefclk);
+ reg &= ~(PHYCLKRST_FSEL_UTMI |
+ PHYCLKRST_MPLL_MULTIPLIER |
+ PHYCLKRST_SSC_REFCLKSEL);
+ reg |= FIELD_PREP(PHYCLKRST_FSEL_UTMI, phy_drd->extrefclk);
return reg;
}
@@ -598,8 +661,8 @@ static void exynos5_usbdrd_pipe3_init(struct exynos5_usbdrd_phy *phy_drd)
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM1);
/* Set Tx De-Emphasis level */
- reg &= ~PHYPARAM1_PCS_TXDEEMPH_MASK;
- reg |= PHYPARAM1_PCS_TXDEEMPH;
+ reg &= ~PHYPARAM1_PCS_TXDEEMPH;
+ reg |= FIELD_PREP(PHYPARAM1_PCS_TXDEEMPH, PHYPARAM1_PCS_TXDEEMPH_VAL);
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM1);
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST);
@@ -749,14 +812,14 @@ static void exynos5_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd)
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM0);
/* Set Loss-of-Signal Detector sensitivity */
- reg &= ~PHYPARAM0_REF_LOSLEVEL_MASK;
- reg |= PHYPARAM0_REF_LOSLEVEL;
+ reg &= ~PHYPARAM0_REF_LOSLEVEL;
+ reg |= FIELD_PREP(PHYPARAM0_REF_LOSLEVEL, PHYPARAM0_REF_LOSLEVEL_VAL);
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM0);
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM1);
/* Set Tx De-Emphasis level */
- reg &= ~PHYPARAM1_PCS_TXDEEMPH_MASK;
- reg |= PHYPARAM1_PCS_TXDEEMPH;
+ reg &= ~PHYPARAM1_PCS_TXDEEMPH;
+ reg |= FIELD_PREP(PHYPARAM1_PCS_TXDEEMPH, PHYPARAM1_PCS_TXDEEMPH_VAL);
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM1);
/* UTMI Power Control */
@@ -787,7 +850,7 @@ static int exynos5_usbdrd_phy_init(struct phy *phy)
* See xHCI 1.0 spec, 5.2.4
*/
reg = LINKSYSTEM_XHCI_VERSION_CONTROL |
- LINKSYSTEM_FLADJ(0x20);
+ FIELD_PREP(LINKSYSTEM_FLADJ, 0x20);
writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_LINKSYSTEM);
reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYPARAM0);
@@ -946,26 +1009,24 @@ static int crport_handshake(struct exynos5_usbdrd_phy *phy_drd,
static int crport_ctrl_write(struct exynos5_usbdrd_phy *phy_drd,
u32 addr, u32 data)
{
+ u32 val;
int ret;
/* Write Address */
- writel(PHYREG0_CR_DATA_IN(addr),
- phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0);
- ret = crport_handshake(phy_drd, PHYREG0_CR_DATA_IN(addr),
- PHYREG0_CR_CAP_ADDR);
+ val = FIELD_PREP(PHYREG0_CR_DATA_IN, addr);
+ writel(val, phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0);
+ ret = crport_handshake(phy_drd, val, PHYREG0_CR_CAP_ADDR);
if (ret)
return ret;
/* Write Data */
- writel(PHYREG0_CR_DATA_IN(data),
- phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0);
- ret = crport_handshake(phy_drd, PHYREG0_CR_DATA_IN(data),
- PHYREG0_CR_CAP_DATA);
+ val = FIELD_PREP(PHYREG0_CR_DATA_IN, data);
+ writel(val, phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0);
+ ret = crport_handshake(phy_drd, val, PHYREG0_CR_CAP_DATA);
if (ret)
return ret;
- ret = crport_handshake(phy_drd, PHYREG0_CR_DATA_IN(data),
- PHYREG0_CR_WRITE);
+ ret = crport_handshake(phy_drd, val, PHYREG0_CR_WRITE);
return ret;
}
@@ -1075,6 +1136,315 @@ static const struct phy_ops exynos5_usbdrd_phy_ops = {
.owner = THIS_MODULE,
};
+static void exynos7870_usbdrd_phy_isol(struct phy_usb_instance *inst,
+ bool isolate)
+{
+ unsigned int val;
+
+ if (!inst->reg_pmu)
+ return;
+
+ val = isolate ? 0 : EXYNOS7870_USB2PHY_ENABLE;
+
+ regmap_update_bits(inst->reg_pmu, inst->pmu_offset,
+ EXYNOS7870_USB2PHY_ENABLE, val);
+}
+
+static void exynos7870_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd)
+{
+ u32 reg;
+
+ reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST);
+ /* Use PADREFCLK as ref clock */
+ reg &= ~PHYCLKRST_REFCLKSEL;
+ reg |= FIELD_PREP(PHYCLKRST_REFCLKSEL, PHYCLKRST_REFCLKSEL_PAD_REFCLK);
+ /* Select ref clock rate */
+ reg &= ~PHYCLKRST_FSEL_UTMI;
+ reg &= ~PHYCLKRST_FSEL_PIPE;
+ reg |= FIELD_PREP(PHYCLKRST_FSEL_UTMI, phy_drd->extrefclk);
+ /* Enable suspend and reset the port */
+ reg |= PHYCLKRST_EN_UTMISUSPEND;
+ reg |= PHYCLKRST_COMMONONN;
+ reg |= PHYCLKRST_PORTRESET;
+ writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST);
+ udelay(10);
+
+ /* Clear the port reset bit */
+ reg &= ~PHYCLKRST_PORTRESET;
+ writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST);
+
+ /* Change PHY PLL tune value */
+ reg = readl(phy_drd->reg_phy + EXYNOS7870_DRD_HSPHYPLLTUNE);
+ if (phy_drd->extrefclk == EXYNOS5_FSEL_24MHZ)
+ reg |= HSPHYPLLTUNE_PLL_B_TUNE;
+ else
+ reg &= ~HSPHYPLLTUNE_PLL_B_TUNE;
+ reg &= ~HSPHYPLLTUNE_PLL_P_TUNE;
+ reg |= FIELD_PREP(HSPHYPLLTUNE_PLL_P_TUNE, 14);
+ writel(reg, phy_drd->reg_phy + EXYNOS7870_DRD_HSPHYPLLTUNE);
+
+ /* High-Speed PHY control */
+ reg = readl(phy_drd->reg_phy + EXYNOS7870_DRD_HSPHYCTRL);
+ reg &= ~HSPHYCTRL_SIDDQ;
+ reg &= ~HSPHYCTRL_PHYSWRST;
+ reg &= ~HSPHYCTRL_PHYSWRSTALL;
+ writel(reg, phy_drd->reg_phy + EXYNOS7870_DRD_HSPHYCTRL);
+ udelay(500);
+
+ reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_LINKSYSTEM);
+ /*
+ * Setting the Frame length Adj value[6:1] to default 0x20
+ * See xHCI 1.0 spec, 5.2.4
+ */
+ reg |= LINKSYSTEM_XHCI_VERSION_CONTROL;
+ reg &= ~LINKSYSTEM_FLADJ;
+ reg |= FIELD_PREP(LINKSYSTEM_FLADJ, 0x20);
+ /* Set VBUSVALID signal as the VBUS pad is not used */
+ reg |= LINKSYSTEM_FORCE_BVALID;
+ reg |= LINKSYSTEM_FORCE_VBUSVALID;
+ writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_LINKSYSTEM);
+
+ reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYUTMI);
+ /* Release force_sleep & force_suspend */
+ reg &= ~PHYUTMI_FORCESLEEP;
+ reg &= ~PHYUTMI_FORCESUSPEND;
+ /* DP/DM pull down control */
+ reg &= ~PHYUTMI_DMPULLDOWN;
+ reg &= ~PHYUTMI_DPPULLDOWN;
+ reg &= ~PHYUTMI_DRVVBUS;
+ /* Set DP-pull up as the VBUS pad is not used */
+ reg |= PHYUTMI_VBUSVLDEXTSEL;
+ reg |= PHYUTMI_VBUSVLDEXT;
+ /* Disable OTG block and VBUS valid comparator */
+ reg |= PHYUTMI_OTGDISABLE;
+ writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYUTMI);
+
+ /* Configure OVC IO usage */
+ reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_LINKPORT);
+ reg |= LINKPORT_HOST_PORT_OVCR_U3_SEL | LINKPORT_HOST_PORT_OVCR_U2_SEL;
+ writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_LINKPORT);
+
+ /* High-Speed PHY swrst */
+ reg = readl(phy_drd->reg_phy + EXYNOS7870_DRD_HSPHYCTRL);
+ reg |= HSPHYCTRL_PHYSWRST;
+ writel(reg, phy_drd->reg_phy + EXYNOS7870_DRD_HSPHYCTRL);
+ udelay(20);
+
+ /* Clear the PHY swrst bit */
+ reg = readl(phy_drd->reg_phy + EXYNOS7870_DRD_HSPHYCTRL);
+ reg &= ~HSPHYCTRL_PHYSWRST;
+ writel(reg, phy_drd->reg_phy + EXYNOS7870_DRD_HSPHYCTRL);
+
+ if (phy_drd->drv_data->phy_tunes)
+ exynos5_usbdrd_apply_phy_tunes(phy_drd,
+ PTS_UTMI_POSTINIT);
+}
+
+static int exynos7870_usbdrd_phy_init(struct phy *phy)
+{
+ struct phy_usb_instance *inst = phy_get_drvdata(phy);
+ struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks);
+ if (ret)
+ return ret;
+
+ /* UTMI or PIPE3 specific init */
+ inst->phy_cfg->phy_init(phy_drd);
+
+ clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks);
+
+ return 0;
+}
+
+static int exynos7870_usbdrd_phy_exit(struct phy *phy)
+{
+ int ret;
+ u32 reg;
+ struct phy_usb_instance *inst = phy_get_drvdata(phy);
+ struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
+
+ ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks);
+ if (ret)
+ return ret;
+
+ /*
+ * Disable the VBUS signal and the ID pull-up resistor.
+ * Enable force-suspend and force-sleep modes.
+ */
+ reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYUTMI);
+ reg &= ~(PHYUTMI_DRVVBUS | PHYUTMI_VBUSVLDEXT | PHYUTMI_VBUSVLDEXTSEL);
+ reg &= ~PHYUTMI_IDPULLUP;
+ reg |= PHYUTMI_FORCESUSPEND | PHYUTMI_FORCESLEEP;
+ writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYUTMI);
+
+ /* Power down PHY analog blocks */
+ reg = readl(phy_drd->reg_phy + EXYNOS7870_DRD_HSPHYCTRL);
+ reg |= HSPHYCTRL_SIDDQ;
+ writel(reg, phy_drd->reg_phy + EXYNOS7870_DRD_HSPHYCTRL);
+
+ /* Clear VBUSVALID signal as the VBUS pad is not used */
+ reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_LINKSYSTEM);
+ reg &= ~(LINKSYSTEM_FORCE_BVALID | LINKSYSTEM_FORCE_VBUSVALID);
+ writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_LINKSYSTEM);
+
+ clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks);
+
+ return 0;
+}
+
+static const struct phy_ops exynos7870_usbdrd_phy_ops = {
+ .init = exynos7870_usbdrd_phy_init,
+ .exit = exynos7870_usbdrd_phy_exit,
+ .power_on = exynos5_usbdrd_phy_power_on,
+ .power_off = exynos5_usbdrd_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static void exynos2200_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd)
+{
+ /* Configure non-Samsung IP PHY, responsible for UTMI */
+ phy_init(phy_drd->hs_phy);
+}
+
+static void exynos2200_usbdrd_link_init(struct exynos5_usbdrd_phy *phy_drd)
+{
+ void __iomem *regs_base = phy_drd->reg_phy;
+ u32 reg;
+
+ /*
+ * Disable HWACG (hardware auto clock gating control). This will force
+ * QACTIVE signal in Q-Channel interface to HIGH level, to make sure
+ * the PHY clock is not gated by the hardware.
+ */
+ reg = readl(regs_base + EXYNOS850_DRD_LINKCTRL);
+ reg |= LINKCTRL_FORCE_QACT;
+ writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL);
+
+ /* De-assert link reset */
+ reg = readl(regs_base + EXYNOS2200_DRD_CLKRST);
+ reg &= ~CLKRST_LINK_SW_RST;
+ writel(reg, regs_base + EXYNOS2200_DRD_CLKRST);
+
+ /* Set link VBUS Valid */
+ reg = readl(regs_base + EXYNOS2200_DRD_UTMI);
+ reg |= EXYNOS2200_UTMI_FORCE_BVALID | EXYNOS2200_UTMI_FORCE_VBUSVALID;
+ writel(reg, regs_base + EXYNOS2200_DRD_UTMI);
+}
+
+static void
+exynos2200_usbdrd_link_attach_detach_pipe3_phy(struct phy_usb_instance *inst)
+{
+ struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
+ void __iomem *regs_base = phy_drd->reg_phy;
+ u32 reg;
+
+ reg = readl(regs_base + EXYNOS850_DRD_LINKCTRL);
+ if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) {
+ /* force pipe3 signal for link */
+ reg &= ~LINKCTRL_FORCE_PHYSTATUS;
+ reg |= LINKCTRL_FORCE_PIPE_EN | LINKCTRL_FORCE_RXELECIDLE;
+ } else {
+ /* disable forcing pipe interface */
+ reg &= ~LINKCTRL_FORCE_PIPE_EN;
+ }
+ writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL);
+
+ reg = readl(regs_base + EXYNOS2200_DRD_HSP_MISC);
+ if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) {
+ /* calibrate only eUSB phy */
+ reg |= FIELD_PREP(HSP_MISC_RES_TUNE, RES_TUNE_PHY1);
+ reg |= HSP_MISC_SET_REQ_IN2;
+ } else {
+ /* calibrate for dual phy */
+ reg |= FIELD_PREP(HSP_MISC_RES_TUNE, RES_TUNE_PHY1_PHY2);
+ reg &= ~HSP_MISC_SET_REQ_IN2;
+ }
+ writel(reg, regs_base + EXYNOS2200_DRD_HSP_MISC);
+
+ reg = readl(regs_base + EXYNOS2200_DRD_CLKRST);
+ if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI)
+ reg &= ~EXYNOS2200_CLKRST_LINK_PCLK_SEL;
+ else
+ reg |= EXYNOS2200_CLKRST_LINK_PCLK_SEL;
+
+ writel(reg, regs_base + EXYNOS2200_DRD_CLKRST);
+}
+
+static int exynos2200_usbdrd_phy_init(struct phy *phy)
+{
+ struct phy_usb_instance *inst = phy_get_drvdata(phy);
+ struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
+ int ret;
+
+ if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) {
+ /* Power-on PHY ... */
+ ret = regulator_bulk_enable(phy_drd->drv_data->n_regulators,
+ phy_drd->regulators);
+ if (ret) {
+ dev_err(phy_drd->dev,
+ "Failed to enable PHY regulator(s)\n");
+ return ret;
+ }
+ }
+ /*
+ * ... and ungate power via PMU. Without this here, we get an SError
+ * trying to access PMA registers
+ */
+ exynos5_usbdrd_phy_isol(inst, false);
+
+ ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks);
+ if (ret)
+ return ret;
+
+ /* Set up the link controller */
+ exynos2200_usbdrd_link_init(phy_drd);
+
+ /* UTMI or PIPE3 link preparation */
+ exynos2200_usbdrd_link_attach_detach_pipe3_phy(inst);
+
+ /* UTMI or PIPE3 specific init */
+ inst->phy_cfg->phy_init(phy_drd);
+
+ clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks);
+
+ return 0;
+}
+
+static int exynos2200_usbdrd_phy_exit(struct phy *phy)
+{
+ struct phy_usb_instance *inst = phy_get_drvdata(phy);
+ struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
+ void __iomem *regs_base = phy_drd->reg_phy;
+ u32 reg;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks);
+ if (ret)
+ return ret;
+
+ reg = readl(regs_base + EXYNOS2200_DRD_UTMI);
+ reg &= ~(EXYNOS2200_UTMI_FORCE_BVALID | EXYNOS2200_UTMI_FORCE_VBUSVALID);
+ writel(reg, regs_base + EXYNOS2200_DRD_UTMI);
+
+ reg = readl(regs_base + EXYNOS2200_DRD_CLKRST);
+ reg |= CLKRST_LINK_SW_RST;
+ writel(reg, regs_base + EXYNOS2200_DRD_CLKRST);
+
+ clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks);
+
+ exynos5_usbdrd_phy_isol(inst, true);
+ return regulator_bulk_disable(phy_drd->drv_data->n_regulators,
+ phy_drd->regulators);
+}
+
+static const struct phy_ops exynos2200_usbdrd_phy_ops = {
+ .init = exynos2200_usbdrd_phy_init,
+ .exit = exynos2200_usbdrd_phy_exit,
+ .owner = THIS_MODULE,
+};
+
static void
exynos5_usbdrd_usb_v3p1_pipe_override(struct exynos5_usbdrd_phy *phy_drd)
{
@@ -1134,7 +1504,7 @@ static void exynos850_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd)
/* Set VBUS Valid and D+ pull-up control by VBUS pad usage */
reg = readl(regs_base + EXYNOS850_DRD_LINKCTRL);
- reg |= LINKCTRL_BUS_FILTER_BYPASS(0xf);
+ reg |= FIELD_PREP(LINKCTRL_BUS_FILTER_BYPASS, 0xf);
writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL);
if (!phy_drd->sw) {
@@ -1384,27 +1754,37 @@ static int exynos5_usbdrd_phy_clk_handle(struct exynos5_usbdrd_phy *phy_drd)
return dev_err_probe(phy_drd->dev, ret,
"failed to get phy core clock(s)\n");
- ref_clk = NULL;
- for (int i = 0; i < phy_drd->drv_data->n_core_clks; ++i) {
- if (!strcmp(phy_drd->core_clks[i].id, "ref")) {
- ref_clk = phy_drd->core_clks[i].clk;
- break;
+ if (phy_drd->drv_data->n_core_clks) {
+ ref_clk = NULL;
+ for (int i = 0; i < phy_drd->drv_data->n_core_clks; ++i) {
+ if (!strcmp(phy_drd->core_clks[i].id, "ref")) {
+ ref_clk = phy_drd->core_clks[i].clk;
+ break;
+ }
}
- }
- if (!ref_clk)
- return dev_err_probe(phy_drd->dev, -ENODEV,
- "failed to find phy reference clock\n");
+ if (!ref_clk)
+ return dev_err_probe(phy_drd->dev, -ENODEV,
+ "failed to find phy reference clock\n");
- ref_rate = clk_get_rate(ref_clk);
- ret = exynos5_rate_to_clk(ref_rate, &phy_drd->extrefclk);
- if (ret)
- return dev_err_probe(phy_drd->dev, ret,
- "clock rate (%ld) not supported\n",
- ref_rate);
+ ref_rate = clk_get_rate(ref_clk);
+ ret = exynos5_rate_to_clk(ref_rate, &phy_drd->extrefclk);
+ if (ret)
+ return dev_err_probe(phy_drd->dev, ret,
+ "clock rate (%ld) not supported\n",
+ ref_rate);
+ }
return 0;
}
+static const struct exynos5_usbdrd_phy_config phy_cfg_exynos2200[] = {
+ {
+ .id = EXYNOS5_DRDPHY_UTMI,
+ .phy_isol = exynos5_usbdrd_phy_isol,
+ .phy_init = exynos2200_usbdrd_utmi_init,
+ },
+};
+
static int exynos5_usbdrd_orien_sw_set(struct typec_switch_dev *sw,
enum typec_orientation orientation)
{
@@ -1501,6 +1881,14 @@ static const struct exynos5_usbdrd_phy_config phy_cfg_exynos5[] = {
},
};
+static const struct exynos5_usbdrd_phy_config phy_cfg_exynos7870[] = {
+ {
+ .id = EXYNOS5_DRDPHY_UTMI,
+ .phy_isol = exynos7870_usbdrd_phy_isol,
+ .phy_init = exynos7870_usbdrd_utmi_init,
+ },
+};
+
static const struct exynos5_usbdrd_phy_config phy_cfg_exynos850[] = {
{
.id = EXYNOS5_DRDPHY_UTMI,
@@ -1509,6 +1897,30 @@ static const struct exynos5_usbdrd_phy_config phy_cfg_exynos850[] = {
},
};
+static
+const struct exynos5_usbdrd_phy_tuning exynos7870_tunes_utmi_postinit[] = {
+ PHY_TUNING_ENTRY_PHY(EXYNOS5_DRD_PHYPARAM0,
+ (PHYPARAM0_TXVREFTUNE | PHYPARAM0_TXRISETUNE |
+ PHYPARAM0_TXRESTUNE | PHYPARAM0_TXPREEMPPULSETUNE |
+ PHYPARAM0_TXPREEMPAMPTUNE | PHYPARAM0_TXHSXVTUNE |
+ PHYPARAM0_TXFSLSTUNE | PHYPARAM0_SQRXTUNE |
+ PHYPARAM0_OTGTUNE | PHYPARAM0_COMPDISTUNE),
+ (FIELD_PREP_CONST(PHYPARAM0_TXVREFTUNE, 14) |
+ FIELD_PREP_CONST(PHYPARAM0_TXRISETUNE, 1) |
+ FIELD_PREP_CONST(PHYPARAM0_TXRESTUNE, 3) |
+ FIELD_PREP_CONST(PHYPARAM0_TXPREEMPAMPTUNE, 0) |
+ FIELD_PREP_CONST(PHYPARAM0_TXHSXVTUNE, 0) |
+ FIELD_PREP_CONST(PHYPARAM0_TXFSLSTUNE, 3) |
+ FIELD_PREP_CONST(PHYPARAM0_SQRXTUNE, 6) |
+ FIELD_PREP_CONST(PHYPARAM0_OTGTUNE, 2) |
+ FIELD_PREP_CONST(PHYPARAM0_COMPDISTUNE, 3))),
+ PHY_TUNING_ENTRY_LAST
+};
+
+static const struct exynos5_usbdrd_phy_tuning *exynos7870_tunes[PTS_MAX] = {
+ [PTS_UTMI_POSTINIT] = exynos7870_tunes_utmi_postinit,
+};
+
static const char * const exynos5_clk_names[] = {
"phy",
};
@@ -1525,6 +1937,19 @@ static const char * const exynos5_regulator_names[] = {
"vbus", "vbus-boost",
};
+static const struct exynos5_usbdrd_phy_drvdata exynos2200_usb32drd_phy = {
+ .phy_cfg = phy_cfg_exynos2200,
+ .phy_ops = &exynos2200_usbdrd_phy_ops,
+ .pmu_offset_usbdrd0_phy = EXYNOS2200_PHY_CTRL_USB20,
+ .clk_names = exynos5_clk_names,
+ .n_clks = ARRAY_SIZE(exynos5_clk_names),
+ /* clocks and regulators are specific to the underlying PHY blocks */
+ .core_clk_names = NULL,
+ .n_core_clks = 0,
+ .regulator_names = NULL,
+ .n_regulators = 0,
+};
+
static const struct exynos5_usbdrd_phy_drvdata exynos5420_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
.phy_ops = &exynos5_usbdrd_phy_ops,
@@ -1575,6 +2000,19 @@ static const struct exynos5_usbdrd_phy_drvdata exynos7_usbdrd_phy = {
.n_regulators = ARRAY_SIZE(exynos5_regulator_names),
};
+static const struct exynos5_usbdrd_phy_drvdata exynos7870_usbdrd_phy = {
+ .phy_cfg = phy_cfg_exynos7870,
+ .phy_tunes = exynos7870_tunes,
+ .phy_ops = &exynos7870_usbdrd_phy_ops,
+ .pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
+ .clk_names = exynos5_clk_names,
+ .n_clks = ARRAY_SIZE(exynos5_clk_names),
+ .core_clk_names = exynos5_core_clk_names,
+ .n_core_clks = ARRAY_SIZE(exynos5_core_clk_names),
+ .regulator_names = exynos5_regulator_names,
+ .n_regulators = ARRAY_SIZE(exynos5_regulator_names),
+};
+
static const struct exynos5_usbdrd_phy_drvdata exynos850_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos850,
.phy_ops = &exynos850_usbdrd_phy_ops,
@@ -1770,6 +2208,9 @@ static const struct of_device_id exynos5_usbdrd_phy_of_match[] = {
.compatible = "google,gs101-usb31drd-phy",
.data = &gs101_usbd31rd_phy
}, {
+ .compatible = "samsung,exynos2200-usb32drd-phy",
+ .data = &exynos2200_usb32drd_phy,
+ }, {
.compatible = "samsung,exynos5250-usbdrd-phy",
.data = &exynos5250_usbdrd_phy
}, {
@@ -1782,6 +2223,9 @@ static const struct of_device_id exynos5_usbdrd_phy_of_match[] = {
.compatible = "samsung,exynos7-usbdrd-phy",
.data = &exynos7_usbdrd_phy
}, {
+ .compatible = "samsung,exynos7870-usbdrd-phy",
+ .data = &exynos7870_usbdrd_phy
+ }, {
.compatible = "samsung,exynos850-usbdrd-phy",
.data = &exynos850_usbdrd_phy
},
@@ -1841,6 +2285,17 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
return PTR_ERR(phy_drd->reg_phy);
}
+ /*
+ * USB32DRD 4nm controller implements Synopsys eUSB2.0 PHY
+ * and Synopsys SS/USBDP COMBOPHY, managed by external code.
+ */
+ if (of_property_present(dev->of_node, "phy-names")) {
+ phy_drd->hs_phy = devm_of_phy_get(dev, dev->of_node, "hs");
+ if (IS_ERR(phy_drd->hs_phy))
+ return dev_err_probe(dev, PTR_ERR(phy_drd->hs_phy),
+ "failed to get hs_phy\n");
+ }
+
ret = exynos5_usbdrd_phy_clk_handle(phy_drd);
if (ret)
return ret;
diff --git a/drivers/phy/starfive/phy-jh7110-usb.c b/drivers/phy/starfive/phy-jh7110-usb.c
index cb5454fbe2c8..b505d89860b4 100644
--- a/drivers/phy/starfive/phy-jh7110-usb.c
+++ b/drivers/phy/starfive/phy-jh7110-usb.c
@@ -18,6 +18,8 @@
#include <linux/usb/of.h>
#define USB_125M_CLK_RATE 125000000
+#define USB_CLK_MODE_OFF 0x0
+#define USB_CLK_MODE_RX_NORMAL_PWR BIT(1)
#define USB_LS_KEEPALIVE_OFF 0x4
#define USB_LS_KEEPALIVE_ENABLE BIT(4)
@@ -78,6 +80,7 @@ static int jh7110_usb2_phy_init(struct phy *_phy)
{
struct jh7110_usb2_phy *phy = phy_get_drvdata(_phy);
int ret;
+ unsigned int val;
ret = clk_set_rate(phy->usb_125m_clk, USB_125M_CLK_RATE);
if (ret)
@@ -87,6 +90,10 @@ static int jh7110_usb2_phy_init(struct phy *_phy)
if (ret)
return ret;
+ val = readl(phy->regs + USB_CLK_MODE_OFF);
+ val |= USB_CLK_MODE_RX_NORMAL_PWR;
+ writel(val, phy->regs + USB_CLK_MODE_OFF);
+
return 0;
}
diff --git a/drivers/phy/tegra/Kconfig b/drivers/phy/tegra/Kconfig
index f30cfb42b210..342fb736da4b 100644
--- a/drivers/phy/tegra/Kconfig
+++ b/drivers/phy/tegra/Kconfig
@@ -13,7 +13,7 @@ config PHY_TEGRA_XUSB
config PHY_TEGRA194_P2U
tristate "NVIDIA Tegra194 PIPE2UPHY PHY driver"
- depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || COMPILE_TEST
+ depends on ARCH_TEGRA || COMPILE_TEST
select GENERIC_PHY
help
Enable this to support the P2U (PIPE to UPHY) that is part of Tegra 19x
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index fae6242aa730..23a23f2d64e5 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -237,6 +237,8 @@
#define DATA0_VAL_PD BIT(1)
#define USE_XUSB_AO BIT(4)
+#define TEGRA_UTMI_PAD_MAX 4
+
#define TEGRA186_LANE(_name, _offset, _shift, _mask, _type) \
{ \
.name = _name, \
@@ -269,7 +271,7 @@ struct tegra186_xusb_padctl {
/* UTMI bias and tracking */
struct clk *usb2_trk_clk;
- unsigned int bias_pad_enable;
+ DECLARE_BITMAP(utmi_pad_enabled, TEGRA_UTMI_PAD_MAX);
/* padctl context */
struct tegra186_xusb_padctl_context context;
@@ -603,12 +605,8 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
u32 value;
int err;
- mutex_lock(&padctl->lock);
-
- if (priv->bias_pad_enable++ > 0) {
- mutex_unlock(&padctl->lock);
+ if (!bitmap_empty(priv->utmi_pad_enabled, TEGRA_UTMI_PAD_MAX))
return;
- }
err = clk_prepare_enable(priv->usb2_trk_clk);
if (err < 0)
@@ -658,8 +656,6 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
} else {
clk_disable_unprepare(priv->usb2_trk_clk);
}
-
- mutex_unlock(&padctl->lock);
}
static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
@@ -667,17 +663,8 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
u32 value;
- mutex_lock(&padctl->lock);
-
- if (WARN_ON(priv->bias_pad_enable == 0)) {
- mutex_unlock(&padctl->lock);
- return;
- }
-
- if (--priv->bias_pad_enable > 0) {
- mutex_unlock(&padctl->lock);
+ if (!bitmap_empty(priv->utmi_pad_enabled, TEGRA_UTMI_PAD_MAX))
return;
- }
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
value |= USB2_PD_TRK;
@@ -690,13 +677,13 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
clk_disable_unprepare(priv->usb2_trk_clk);
}
- mutex_unlock(&padctl->lock);
}
static void tegra186_utmi_pad_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
struct tegra_xusb_usb2_port *port;
struct device *dev = padctl->dev;
unsigned int index = lane->index;
@@ -705,9 +692,16 @@ static void tegra186_utmi_pad_power_on(struct phy *phy)
if (!phy)
return;
+ mutex_lock(&padctl->lock);
+ if (test_bit(index, priv->utmi_pad_enabled)) {
+ mutex_unlock(&padctl->lock);
+ return;
+ }
+
port = tegra_xusb_find_usb2_port(padctl, index);
if (!port) {
dev_err(dev, "no port found for USB2 lane %u\n", index);
+ mutex_unlock(&padctl->lock);
return;
}
@@ -724,18 +718,28 @@ static void tegra186_utmi_pad_power_on(struct phy *phy)
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
value &= ~USB2_OTG_PD_DR;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+
+ set_bit(index, priv->utmi_pad_enabled);
+ mutex_unlock(&padctl->lock);
}
static void tegra186_utmi_pad_power_down(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
unsigned int index = lane->index;
u32 value;
if (!phy)
return;
+ mutex_lock(&padctl->lock);
+ if (!test_bit(index, priv->utmi_pad_enabled)) {
+ mutex_unlock(&padctl->lock);
+ return;
+ }
+
dev_dbg(padctl->dev, "power down UTMI pad %u\n", index);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
@@ -748,7 +752,11 @@ static void tegra186_utmi_pad_power_down(struct phy *phy)
udelay(2);
+ clear_bit(index, priv->utmi_pad_enabled);
+
tegra186_utmi_bias_pad_power_off(padctl);
+
+ mutex_unlock(&padctl->lock);
}
static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 79d4814d758d..c89df95aa6ca 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -548,16 +548,16 @@ static int tegra_xusb_port_init(struct tegra_xusb_port *port,
err = dev_set_name(&port->dev, "%s-%u", name, index);
if (err < 0)
- goto unregister;
+ goto put_device;
err = device_add(&port->dev);
if (err < 0)
- goto unregister;
+ goto put_device;
return 0;
-unregister:
- device_unregister(&port->dev);
+put_device:
+ put_device(&port->dev);
return err;
}
diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
index 05a4a59f7c40..fe6b4925d166 100644
--- a/drivers/phy/xilinx/phy-zynqmp.c
+++ b/drivers/phy/xilinx/phy-zynqmp.c
@@ -222,7 +222,6 @@ struct xpsgtr_phy {
* @siou: siou base address
* @gtr_mutex: mutex for locking
* @phys: PHY lanes
- * @refclk_sscs: spread spectrum settings for the reference clocks
* @clk: reference clocks
* @tx_term_fix: fix for GT issue
* @saved_icm_cfg0: stored value of ICM CFG0 register
@@ -235,7 +234,6 @@ struct xpsgtr_dev {
void __iomem *siou;
struct mutex gtr_mutex; /* mutex for locking */
struct xpsgtr_phy phys[NUM_LANES];
- const struct xpsgtr_ssc *refclk_sscs[NUM_LANES];
struct clk *clk[NUM_LANES];
bool tx_term_fix;
unsigned int saved_icm_cfg0;
@@ -398,13 +396,40 @@ got_phy:
return ret;
}
+/* Get the spread spectrum (SSC) settings for the reference clock rate */
+static const struct xpsgtr_ssc *xpsgtr_find_sscs(struct xpsgtr_phy *gtr_phy)
+{
+ unsigned long rate;
+ struct clk *clk;
+ unsigned int i;
+
+ clk = gtr_phy->dev->clk[gtr_phy->refclk];
+ rate = clk_get_rate(clk);
+
+ for (i = 0 ; i < ARRAY_SIZE(ssc_lookup); i++) {
+ /* Allow an error of 100 ppm */
+ unsigned long error = ssc_lookup[i].refclk_rate / 10000;
+
+ if (abs(rate - ssc_lookup[i].refclk_rate) < error)
+ return &ssc_lookup[i];
+ }
+
+ dev_err(gtr_phy->dev->dev, "Invalid rate %lu for reference clock %u\n",
+ rate, gtr_phy->refclk);
+
+ return NULL;
+}
+
/* Configure PLL and spread-sprectrum clock. */
-static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
+static int xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
{
const struct xpsgtr_ssc *ssc;
u32 step_size;
- ssc = gtr_phy->dev->refclk_sscs[gtr_phy->refclk];
+ ssc = xpsgtr_find_sscs(gtr_phy);
+ if (!ssc)
+ return -EINVAL;
+
step_size = ssc->step_size;
xpsgtr_clr_set(gtr_phy->dev, PLL_REF_SEL(gtr_phy->lane),
@@ -446,6 +471,8 @@ static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_3_MSB,
STEP_SIZE_3_MASK, (step_size & STEP_SIZE_3_MASK) |
FORCE_STEP_SIZE | FORCE_STEPS);
+
+ return 0;
}
/* Configure the lane protocol. */
@@ -658,7 +685,10 @@ static int xpsgtr_phy_init(struct phy *phy)
* Configure the PLL, the lane protocol, and perform protocol-specific
* initialization.
*/
- xpsgtr_configure_pll(gtr_phy);
+ ret = xpsgtr_configure_pll(gtr_phy);
+ if (ret)
+ goto out;
+
xpsgtr_lane_set_protocol(gtr_phy);
switch (gtr_phy->protocol) {
@@ -823,8 +853,7 @@ static struct phy *xpsgtr_xlate(struct device *dev,
}
refclk = args->args[3];
- if (refclk >= ARRAY_SIZE(gtr_dev->refclk_sscs) ||
- !gtr_dev->refclk_sscs[refclk]) {
+ if (refclk >= ARRAY_SIZE(gtr_dev->clk)) {
dev_err(dev, "Invalid reference clock number %u\n", refclk);
return ERR_PTR(-EINVAL);
}
@@ -928,9 +957,7 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
{
unsigned int refclk;
- for (refclk = 0; refclk < ARRAY_SIZE(gtr_dev->refclk_sscs); ++refclk) {
- unsigned long rate;
- unsigned int i;
+ for (refclk = 0; refclk < ARRAY_SIZE(gtr_dev->clk); ++refclk) {
struct clk *clk;
char name[8];
@@ -946,29 +973,6 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
continue;
gtr_dev->clk[refclk] = clk;
-
- /*
- * Get the spread spectrum (SSC) settings for the reference
- * clock rate.
- */
- rate = clk_get_rate(clk);
-
- for (i = 0 ; i < ARRAY_SIZE(ssc_lookup); i++) {
- /* Allow an error of 100 ppm */
- unsigned long error = ssc_lookup[i].refclk_rate / 10000;
-
- if (abs(rate - ssc_lookup[i].refclk_rate) < error) {
- gtr_dev->refclk_sscs[refclk] = &ssc_lookup[i];
- break;
- }
- }
-
- if (i == ARRAY_SIZE(ssc_lookup)) {
- dev_err(gtr_dev->dev,
- "Invalid rate %lu for reference clock %u\n",
- rate, refclk);
- return -EINVAL;
- }
}
return 0;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 464cc9aca157..33db9104df17 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -103,12 +103,10 @@ config PINCTRL_AS3722
config PINCTRL_AT91
bool "AT91 pinctrl driver"
- depends on OF
- depends on ARCH_AT91
+ depends on (OF && ARCH_AT91) || COMPILE_TEST
select PINMUX
select PINCONF
select GPIOLIB
- select OF_GPIO
select GPIOLIB_IRQCHIP
help
Say Y here to enable the at91 pinctrl driver
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index d49b77dcfcff..86f3d5c69e36 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -598,7 +598,7 @@ static int owl_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset));
}
-static void owl_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int owl_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct owl_pinctrl *pctrl = gpiochip_get_data(chip);
const struct owl_gpio_port *port;
@@ -607,13 +607,15 @@ static void owl_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
port = owl_gpio_get_port(pctrl, &offset);
if (WARN_ON(port == NULL))
- return;
+ return -ENODEV;
gpio_base = pctrl->base + port->offset;
raw_spin_lock_irqsave(&pctrl->lock, flags);
owl_gpio_update_reg(gpio_base + port->dat, offset, value);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
}
static int owl_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -960,7 +962,7 @@ int owl_pinctrl_probe(struct platform_device *pdev,
pctrl->chip.direction_input = owl_gpio_direction_input;
pctrl->chip.direction_output = owl_gpio_direction_output;
pctrl->chip.get = owl_gpio_get;
- pctrl->chip.set = owl_gpio_set;
+ pctrl->chip.set_rv = owl_gpio_set;
pctrl->chip.request = owl_gpio_request;
pctrl->chip.free = owl_gpio_free;
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index eaeec096bc9a..826827800474 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -356,11 +356,14 @@ static int bcm2835_gpio_get_direction(struct gpio_chip *chip, unsigned int offse
return GPIO_LINE_DIRECTION_IN;
}
-static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int bcm2835_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset);
+
+ return 0;
}
static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
@@ -394,7 +397,7 @@ static const struct gpio_chip bcm2835_gpio_chip = {
.direction_output = bcm2835_gpio_direction_output,
.get_direction = bcm2835_gpio_get_direction,
.get = bcm2835_gpio_get,
- .set = bcm2835_gpio_set,
+ .set_rv = bcm2835_gpio_set,
.set_config = gpiochip_generic_config,
.base = -1,
.ngpio = BCM2835_NUM_GPIOS,
@@ -411,7 +414,7 @@ static const struct gpio_chip bcm2711_gpio_chip = {
.direction_output = bcm2835_gpio_direction_output,
.get_direction = bcm2835_gpio_get_direction,
.get = bcm2835_gpio_get,
- .set = bcm2835_gpio_set,
+ .set_rv = bcm2835_gpio_set,
.set_config = gpiochip_generic_config,
.base = -1,
.ngpio = BCM2711_NUM_GPIOS,
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index c9a3d3aa8c10..1d08b8d4cdd7 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -390,7 +390,7 @@ static int iproc_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
return GPIO_LINE_DIRECTION_IN;
}
-static void iproc_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
+static int iproc_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct iproc_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
@@ -400,6 +400,8 @@ static void iproc_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
raw_spin_unlock_irqrestore(&chip->lock, flags);
dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val);
+
+ return 0;
}
static int iproc_gpio_get(struct gpio_chip *gc, unsigned gpio)
@@ -863,7 +865,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
gc->direction_input = iproc_gpio_direction_input;
gc->direction_output = iproc_gpio_direction_output;
gc->get_direction = iproc_gpio_get_direction;
- gc->set = iproc_gpio_set;
+ gc->set_rv = iproc_gpio_set;
gc->get = iproc_gpio_get;
chip->pinmux_is_supported = of_property_read_bool(dev->of_node,
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index a96be8f244e0..b08f8480ddc6 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -310,7 +310,7 @@ static int nsp_gpio_get_direction(struct gpio_chip *gc, unsigned gpio)
return !val;
}
-static void nsp_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
+static int nsp_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
@@ -320,6 +320,8 @@ static void nsp_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
raw_spin_unlock_irqrestore(&chip->lock, flags);
dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val);
+
+ return 0;
}
static int nsp_gpio_get(struct gpio_chip *gc, unsigned gpio)
@@ -654,7 +656,7 @@ static int nsp_gpio_probe(struct platform_device *pdev)
gc->direction_input = nsp_gpio_direction_input;
gc->direction_output = nsp_gpio_direction_output;
gc->get_direction = nsp_gpio_get_direction;
- gc->set = nsp_gpio_set;
+ gc->set_rv = nsp_gpio_set;
gc->get = nsp_gpio_get;
/* optional GPIO interrupt support */
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 4bdbf6bb26e2..9046292d1360 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1530,6 +1530,35 @@ void pinctrl_unregister_mappings(const struct pinctrl_map *map)
}
EXPORT_SYMBOL_GPL(pinctrl_unregister_mappings);
+static void devm_pinctrl_unregister_mappings(void *maps)
+{
+ pinctrl_unregister_mappings(maps);
+}
+
+/**
+ * devm_pinctrl_register_mappings() - Resource managed pinctrl_register_mappings()
+ * @dev: device for which mappings are registered
+ * @maps: the pincontrol mappings table to register. Note the pinctrl-core
+ * keeps a reference to the passed in maps, so they should _not_ be
+ * marked with __initdata.
+ * @num_maps: the number of maps in the mapping table
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int devm_pinctrl_register_mappings(struct device *dev,
+ const struct pinctrl_map *maps,
+ unsigned int num_maps)
+{
+ int ret;
+
+ ret = pinctrl_register_mappings(maps, num_maps);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_pinctrl_unregister_mappings, (void *)maps);
+}
+EXPORT_SYMBOL_GPL(devm_pinctrl_register_mappings);
+
/**
* pinctrl_force_sleep() - turn a given controller device into sleep state
* @pctldev: pin controller device
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 4c420b21b804..8d24decd3f07 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -20,7 +20,9 @@ config PINCTRL_IMX_SCMI
config PINCTRL_IMX_SCU
tristate
+ depends on OF
depends on IMX_SCU || COMPILE_TEST
+ default IMX_SCU
select PINCTRL_IMX
config PINCTRL_IMX1_CORE
@@ -159,6 +161,7 @@ config PINCTRL_IMX8MM
tristate "IMX8MM pinctrl driver"
depends on OF
depends on SOC_IMX8M || COMPILE_TEST
+ default SOC_IMX8M
select PINCTRL_IMX
help
Say Y here to enable the imx8mm pinctrl driver
@@ -167,6 +170,7 @@ config PINCTRL_IMX8MN
tristate "IMX8MN pinctrl driver"
depends on OF
depends on SOC_IMX8M || COMPILE_TEST
+ default SOC_IMX8M
select PINCTRL_IMX
help
Say Y here to enable the imx8mn pinctrl driver
@@ -175,6 +179,7 @@ config PINCTRL_IMX8MP
tristate "IMX8MP pinctrl driver"
depends on OF
depends on SOC_IMX8M || COMPILE_TEST
+ default SOC_IMX8M
select PINCTRL_IMX
help
Say Y here to enable the imx8mp pinctrl driver
@@ -183,6 +188,7 @@ config PINCTRL_IMX8MQ
tristate "IMX8MQ pinctrl driver"
depends on OF
depends on SOC_IMX8M || COMPILE_TEST
+ default SOC_IMX8M
select PINCTRL_IMX
help
Say Y here to enable the imx8mq pinctrl driver
@@ -191,6 +197,7 @@ config PINCTRL_IMX8QM
tristate "IMX8QM pinctrl driver"
depends on OF
depends on (IMX_SCU && ARCH_MXC && ARM64) || COMPILE_TEST
+ default ARCH_MXC
select PINCTRL_IMX_SCU
help
Say Y here to enable the imx8qm pinctrl driver
@@ -199,6 +206,7 @@ config PINCTRL_IMX8QXP
tristate "IMX8QXP pinctrl driver"
depends on OF
depends on (IMX_SCU && ARCH_MXC && ARM64) || COMPILE_TEST
+ default ARCH_MXC
select PINCTRL_IMX_SCU
help
Say Y here to enable the imx8qxp pinctrl driver
@@ -207,6 +215,7 @@ config PINCTRL_IMX8DXL
tristate "IMX8DXL pinctrl driver"
depends on OF
depends on (IMX_SCU && ARCH_MXC && ARM64) || COMPILE_TEST
+ default ARCH_MXC
select PINCTRL_IMX_SCU
help
Say Y here to enable the imx8dxl pinctrl driver
@@ -215,6 +224,7 @@ config PINCTRL_IMX8ULP
tristate "IMX8ULP pinctrl driver"
depends on OF
depends on ARCH_MXC || COMPILE_TEST
+ default ARCH_MXC
select PINCTRL_IMX
help
Say Y here to enable the imx8ulp pinctrl driver
@@ -239,6 +249,7 @@ config PINCTRL_IMX93
tristate "IMX93 pinctrl driver"
depends on OF
depends on ARCH_MXC || COMPILE_TEST
+ default SOC_IMX9
select PINCTRL_IMX
help
Say Y here to enable the imx93 pinctrl driver
diff --git a/drivers/pinctrl/freescale/pinctrl-imx-scmi.c b/drivers/pinctrl/freescale/pinctrl-imx-scmi.c
index 8f15c4c4dc44..4e8ab919b334 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx-scmi.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx-scmi.c
@@ -51,6 +51,7 @@ struct scmi_pinctrl_imx {
#define IMX_SCMI_PIN_SIZE 24
#define IMX95_DAISY_OFF 0x408
+#define IMX94_DAISY_OFF 0x608
static int pinctrl_scmi_imx_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np,
@@ -70,6 +71,8 @@ static int pinctrl_scmi_imx_dt_node_to_map(struct pinctrl_dev *pctldev,
if (!daisy_off) {
if (of_machine_is_compatible("fsl,imx95")) {
daisy_off = IMX95_DAISY_OFF;
+ } else if (of_machine_is_compatible("fsl,imx94")) {
+ daisy_off = IMX94_DAISY_OFF;
} else {
dev_err(pctldev->dev, "platform not support scmi pinctrl\n");
return -EINVAL;
@@ -289,6 +292,7 @@ scmi_pinctrl_imx_get_pins(struct scmi_pinctrl_imx *pmx, struct pinctrl_desc *des
static const char * const scmi_pinctrl_imx_allowlist[] = {
"fsl,imx95",
+ "fsl,imx94",
NULL
};
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 58f32818a0e6..2d15af6be276 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -181,6 +181,16 @@ config PINCTRL_MT6797
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
+config PINCTRL_MT6893
+ bool "MediaTek Dimensity MT6893 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_PARIS
+ help
+ Say yes here to support pin controller and gpio driver
+ on the MediaTek Dimensity 1200 MT6893 Smartphone SoC.
+
config PINCTRL_MT7622
bool "MediaTek MT7622 pin control"
depends on OF
@@ -263,6 +273,18 @@ config PINCTRL_MT8195
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
+config PINCTRL_MT8196
+ bool "MediaTek MT8196 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_PARIS
+ help
+ Say yes here to support pin controller and gpio driver
+ on MediaTek MT8196 SoC.
+ In MTK platform, we support virtual gpio and use it to
+ map specific eint which doesn't have real gpio pin.
+
config PINCTRL_MT8365
bool "MediaTek MT8365 pin control"
depends on OF
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index 721ae83476d0..7518980fba59 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_PINCTRL_MT6765) += pinctrl-mt6765.o
obj-$(CONFIG_PINCTRL_MT6779) += pinctrl-mt6779.o
obj-$(CONFIG_PINCTRL_MT6795) += pinctrl-mt6795.o
obj-$(CONFIG_PINCTRL_MT6797) += pinctrl-mt6797.o
+obj-$(CONFIG_PINCTRL_MT6893) += pinctrl-mt6893.o
obj-$(CONFIG_PINCTRL_MT7622) += pinctrl-mt7622.o
obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o
obj-$(CONFIG_PINCTRL_MT7629) += pinctrl-mt7629.o
@@ -36,6 +37,7 @@ obj-$(CONFIG_PINCTRL_MT8186) += pinctrl-mt8186.o
obj-$(CONFIG_PINCTRL_MT8188) += pinctrl-mt8188.o
obj-$(CONFIG_PINCTRL_MT8192) += pinctrl-mt8192.o
obj-$(CONFIG_PINCTRL_MT8195) += pinctrl-mt8195.o
+obj-$(CONFIG_PINCTRL_MT8196) += pinctrl-mt8196.o
obj-$(CONFIG_PINCTRL_MT8365) += pinctrl-mt8365.o
obj-$(CONFIG_PINCTRL_MT8516) += pinctrl-mt8516.o
obj-$(CONFIG_PINCTRL_MT6397) += pinctrl-mt6397.o
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index b4eb2beab691..d906a5e4101f 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -22,7 +22,6 @@
#include <linux/platform_device.h>
#include "mtk-eint.h"
-#include "pinctrl-mtk-common-v2.h"
#define MTK_EINT_EDGE_SENSITIVE 0
#define MTK_EINT_LEVEL_SENSITIVE 1
@@ -505,10 +504,9 @@ int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
}
EXPORT_SYMBOL_GPL(mtk_eint_find_irq);
-int mtk_eint_do_init(struct mtk_eint *eint)
+int mtk_eint_do_init(struct mtk_eint *eint, struct mtk_eint_pin *eint_pin)
{
- unsigned int size, i, port, inst = 0;
- struct mtk_pinctrl *hw = (struct mtk_pinctrl *)eint->pctl;
+ unsigned int size, i, port, virq, inst = 0;
/* If clients don't assign a specific regs, let's use generic one */
if (!eint->regs)
@@ -519,7 +517,15 @@ int mtk_eint_do_init(struct mtk_eint *eint)
if (!eint->base_pin_num)
return -ENOMEM;
- if (eint->nbase == 1) {
+ if (eint_pin) {
+ eint->pins = eint_pin;
+ for (i = 0; i < eint->hw->ap_num; i++) {
+ inst = eint->pins[i].instance;
+ if (inst >= eint->nbase)
+ continue;
+ eint->base_pin_num[inst]++;
+ }
+ } else {
size = eint->hw->ap_num * sizeof(struct mtk_eint_pin);
eint->pins = devm_kmalloc(eint->dev, size, GFP_KERNEL);
if (!eint->pins)
@@ -533,16 +539,6 @@ int mtk_eint_do_init(struct mtk_eint *eint)
}
}
- if (hw && hw->soc && hw->soc->eint_pin) {
- eint->pins = hw->soc->eint_pin;
- for (i = 0; i < eint->hw->ap_num; i++) {
- inst = eint->pins[i].instance;
- if (inst >= eint->nbase)
- continue;
- eint->base_pin_num[inst]++;
- }
- }
-
eint->pin_list = devm_kmalloc(eint->dev, eint->nbase * sizeof(u16 *), GFP_KERNEL);
if (!eint->pin_list)
goto err_pin_list;
@@ -565,9 +561,8 @@ int mtk_eint_do_init(struct mtk_eint *eint)
goto err_eint;
}
- eint->domain = irq_domain_add_linear(eint->dev->of_node,
- eint->hw->ap_num,
- &irq_domain_simple_ops, NULL);
+ eint->domain = irq_domain_create_linear(of_fwnode_handle(eint->dev->of_node),
+ eint->hw->ap_num, &irq_domain_simple_ops, NULL);
if (!eint->domain)
goto err_eint;
@@ -584,7 +579,7 @@ int mtk_eint_do_init(struct mtk_eint *eint)
if (inst >= eint->nbase)
continue;
eint->pin_list[inst][eint->pins[i].index] = i;
- int virq = irq_create_mapping(eint->domain, i);
+ virq = irq_create_mapping(eint->domain, i);
irq_set_chip_and_handler(virq, &mtk_eint_irq_chip,
handle_level_irq);
irq_set_chip_data(virq, eint);
@@ -610,7 +605,7 @@ err_cur_mask:
err_wake_mask:
devm_kfree(eint->dev, eint->pin_list);
err_pin_list:
- if (eint->nbase == 1)
+ if (!eint_pin)
devm_kfree(eint->dev, eint->pins);
err_pins:
devm_kfree(eint->dev, eint->base_pin_num);
diff --git a/drivers/pinctrl/mediatek/mtk-eint.h b/drivers/pinctrl/mediatek/mtk-eint.h
index f7f58cca0d5e..fc31a4c0c77b 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.h
+++ b/drivers/pinctrl/mediatek/mtk-eint.h
@@ -66,7 +66,7 @@ struct mtk_eint_xt {
struct mtk_eint {
struct device *dev;
void __iomem **base;
- u8 nbase;
+ int nbase;
u16 *base_pin_num;
struct irq_domain *domain;
int irq;
@@ -88,7 +88,7 @@ struct mtk_eint {
};
#if IS_ENABLED(CONFIG_EINT_MTK)
-int mtk_eint_do_init(struct mtk_eint *eint);
+int mtk_eint_do_init(struct mtk_eint *eint, struct mtk_eint_pin *eint_pin);
int mtk_eint_do_suspend(struct mtk_eint *eint);
int mtk_eint_do_resume(struct mtk_eint *eint);
int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_n,
@@ -96,7 +96,8 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_n,
int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n);
#else
-static inline int mtk_eint_do_init(struct mtk_eint *eint)
+static inline int mtk_eint_do_init(struct mtk_eint *eint,
+ struct mtk_eint_pin *eint_pin)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-airoha.c b/drivers/pinctrl/mediatek/pinctrl-airoha.c
index 5d84a778683d..b97b28ebb37a 100644
--- a/drivers/pinctrl/mediatek/pinctrl-airoha.c
+++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c
@@ -2247,15 +2247,16 @@ static int airoha_convert_pin_to_reg_offset(struct pinctrl_dev *pctrl_dev,
}
/* gpio callbacks */
-static void airoha_gpio_set(struct gpio_chip *chip, unsigned int gpio,
- int value)
+static int airoha_gpio_set(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct airoha_pinctrl *pinctrl = gpiochip_get_data(chip);
u32 offset = gpio % AIROHA_PIN_BANK_SIZE;
u8 index = gpio / AIROHA_PIN_BANK_SIZE;
- regmap_update_bits(pinctrl->regmap, pinctrl->gpiochip.data[index],
- BIT(offset), value ? BIT(offset) : 0);
+ return regmap_update_bits(pinctrl->regmap,
+ pinctrl->gpiochip.data[index],
+ BIT(offset), value ? BIT(offset) : 0);
}
static int airoha_gpio_get(struct gpio_chip *chip, unsigned int gpio)
@@ -2280,9 +2281,7 @@ static int airoha_gpio_direction_output(struct gpio_chip *chip,
if (err)
return err;
- airoha_gpio_set(chip, gpio, value);
-
- return 0;
+ return airoha_gpio_set(chip, gpio, value);
}
/* irq callbacks */
@@ -2419,7 +2418,7 @@ static int airoha_pinctrl_add_gpiochip(struct airoha_pinctrl *pinctrl,
gc->free = gpiochip_generic_free;
gc->direction_input = pinctrl_gpio_direction_input;
gc->direction_output = airoha_gpio_direction_output;
- gc->set = airoha_gpio_set;
+ gc->set_rv = airoha_gpio_set;
gc->get = airoha_gpio_get;
gc->base = -1;
gc->ngpio = AIROHA_NUM_PINS;
@@ -2715,9 +2714,7 @@ static int airoha_pinconf_set_pin_value(struct pinctrl_dev *pctrl_dev,
if (pin < 0)
return pin;
- airoha_gpio_set(&pinctrl->gpiochip.chip, pin, value);
-
- return 0;
+ return airoha_gpio_set(&pinctrl->gpiochip.chip, pin, value);
}
static int airoha_pinconf_set(struct pinctrl_dev *pctrl_dev,
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
index aad4891223d3..827d0f191031 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.c
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -496,24 +496,26 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
return !!value;
}
-static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+static int mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
struct mtk_pinctrl *hw = gpiochip_get_data(chip);
const struct mtk_pin_desc *desc;
desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
- if (!desc->name) {
- dev_err(hw->dev, "Failed to set gpio %d\n", gpio);
- return;
- }
+ if (!desc->name)
+ return -ENOTSUPP;
- mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value);
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value);
}
static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
int value)
{
- mtk_gpio_set(chip, gpio, value);
+ int ret;
+
+ ret = mtk_gpio_set(chip, gpio, value);
+ if (ret)
+ return ret;
return pinctrl_gpio_direction_output(chip, gpio);
}
@@ -567,7 +569,7 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw)
chip->direction_input = pinctrl_gpio_direction_input;
chip->direction_output = mtk_gpio_direction_output;
chip->get = mtk_gpio_get;
- chip->set = mtk_gpio_set;
+ chip->set_rv = mtk_gpio_set;
chip->to_irq = mtk_gpio_to_irq;
chip->set_config = mtk_gpio_set_config;
chip->base = -1;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6893.c b/drivers/pinctrl/mediatek/pinctrl-mt6893.c
new file mode 100644
index 000000000000..468ce0109b07
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6893.c
@@ -0,0 +1,879 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Copyright (C) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <linux/module.h>
+#include "pinctrl-mtk-mt6893.h"
+#include "pinctrl-paris.h"
+
+#define PIN_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 0)
+
+#define PINS_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 1)
+
+static const struct mtk_pin_field_calc mt6893_pin_mode_range[] = {
+ PIN_FIELD(0, 219, 0x0300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_dir_range[] = {
+ PIN_FIELD(0, 219, 0x0000, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_di_range[] = {
+ PIN_FIELD(0, 219, 0x0200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_do_range[] = {
+ PIN_FIELD(0, 219, 0x0100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_smt_range[] = {
+ PINS_FIELD_BASE(0, 9, 2, 0x00f0, 0x10, 7, 1),
+ PINS_FIELD_BASE(10, 15, 1, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(16, 17, 5, 0x00c0, 0x10, 8, 1),
+ PINS_FIELD_BASE(18, 25, 7, 0x00f0, 0x10, 1, 1),
+ PINS_FIELD_BASE(26, 30, 6, 0x00e0, 0x10, 6, 1),
+ PINS_FIELD_BASE(31, 35, 6, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(36, 36, 6, 0x00e0, 0x10, 16, 1),
+ PINS_FIELD_BASE(37, 39, 6, 0x00e0, 0x10, 15, 1),
+ PIN_FIELD_BASE(40, 41, 6, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(42, 42, 6, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(43, 44, 6, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(45, 45, 6, 0x00e0, 0x10, 12, 1),
+ PIN_FIELD_BASE(46, 46, 6, 0x00e0, 0x10, 14, 1),
+ PIN_FIELD_BASE(47, 47, 6, 0x00e0, 0x10, 13, 1),
+ PIN_FIELD_BASE(48, 49, 6, 0x00e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(50, 50, 6, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(51, 52, 3, 0x0090, 0x10, 6, 1),
+ PINS_FIELD_BASE(53, 56, 3, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(57, 60, 3, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(61, 61, 3, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(62, 62, 4, 0x0050, 0x10, 1, 1),
+ PINS_FIELD_BASE(63, 73, 3, 0x0090, 0x10, 0, 1),
+ PINS_FIELD_BASE(74, 84, 4, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(85, 86, 4, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(87, 88, 4, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(89, 90, 2, 0x00f0, 0x10, 26, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x00f0, 0x10, 0, 1),
+ PINS_FIELD_BASE(92, 95, 2, 0x0100, 0x10, 0, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x00f0, 0x10, 30, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x00f0, 0x10, 28, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x00f0, 0x10, 31, 1),
+ PINS_FIELD_BASE(99, 102, 2, 0x00f0, 0x10, 29, 1),
+ PINS_FIELD_BASE(103, 105, 2, 0x00f0, 0x10, 24, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x00f0, 0x10, 25, 1),
+ PIN_FIELD_BASE(107, 108, 2, 0x00f0, 0x10, 5, 1),
+ PINS_FIELD_BASE(109, 113, 2, 0x00f0, 0x10, 8, 1),
+ PINS_FIELD_BASE(114, 116, 2, 0x00f0, 0x10, 16, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x00f0, 0x10, 17, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x00f0, 0x10, 10, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x00f0, 0x10, 18, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x00f0, 0x10, 15, 1),
+ PIN_FIELD_BASE(121, 121, 2, 0x00f0, 0x10, 23, 1),
+ PIN_FIELD_BASE(122, 122, 2, 0x00f0, 0x10, 14, 1),
+ PIN_FIELD_BASE(123, 123, 2, 0x00f0, 0x10, 22, 1),
+ PIN_FIELD_BASE(124, 124, 2, 0x00f0, 0x10, 13, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x00f0, 0x10, 21, 1),
+ PINS_FIELD_BASE(126, 129, 2, 0x00f0, 0x10, 9, 1),
+ PINS_FIELD_BASE(130, 135, 2, 0x00f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(136, 138, 2, 0x00f0, 0x10, 1, 1),
+ PIN_FIELD_BASE(139, 139, 2, 0x00f0, 0x10, 12, 1),
+ PIN_FIELD_BASE(140, 140, 2, 0x00f0, 0x10, 20, 1),
+ PIN_FIELD_BASE(141, 141, 2, 0x00f0, 0x10, 11, 1),
+ PIN_FIELD_BASE(142, 142, 2, 0x00f0, 0x10, 19, 1),
+ PINS_FIELD_BASE(143, 148, 1, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 151, 1, 0x0090, 0x10, 0, 1),
+ PINS_FIELD_BASE(152, 155, 5, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(156, 156, 5, 0x00c0, 0x10, 14, 1),
+ PINS_FIELD_BASE(157, 159, 5, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(160, 161, 5, 0x00c0, 0x10, 11, 1),
+ PINS_FIELD_BASE(162, 171, 5, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(172, 173, 5, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(174, 174, 5, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(175, 175, 5, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(176, 177, 5, 0x00c0, 0x10, 1, 1),
+ PINS_FIELD_BASE(178, 182, 5, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(183, 183, 7, 0x00f0, 0x10, 3, 1),
+ PINS_FIELD_BASE(184, 190, 7, 0x00f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(191, 191, 7, 0x00f0, 0x10, 5, 1),
+ PIN_FIELD_BASE(192, 192, 7, 0x00f0, 0x10, 2, 1),
+ PIN_FIELD_BASE(193, 193, 7, 0x00f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(194, 194, 7, 0x00f0, 0x10, 6, 1),
+ PIN_FIELD_BASE(195, 195, 7, 0x00f0, 0x10, 12, 1),
+ PINS_FIELD_BASE(196, 199, 7, 0x00f0, 0x10, 0, 1),
+ PIN_FIELD_BASE(200, 200, 7, 0x00f0, 0x10, 11, 1),
+ PIN_FIELD_BASE(201, 201, 7, 0x00f0, 0x10, 14, 1),
+ PIN_FIELD_BASE(202, 202, 7, 0x00f0, 0x10, 10, 1),
+ PIN_FIELD_BASE(203, 203, 7, 0x00f0, 0x10, 13, 1),
+ PIN_FIELD_BASE(204, 205, 6, 0x00e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(206, 208, 7, 0x00f0, 0x10, 15, 1),
+ PINS_FIELD_BASE(209, 211, 7, 0x00f0, 0x10, 7, 1),
+ PIN_FIELD_BASE(212, 213, 7, 0x00f0, 0x10, 8, 1),
+ PINS_FIELD_BASE(214, 219, 7, 0x00f0, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_ies_range[] = {
+ PIN_FIELD_BASE(0, 9, 2, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(10, 15, 1, 0x0020, 0x10, 9, 1),
+ PIN_FIELD_BASE(16, 17, 5, 0x0030, 0x10, 21, 1),
+ PIN_FIELD_BASE(18, 25, 7, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(26, 30, 6, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(31, 31, 6, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(32, 32, 6, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(33, 33, 6, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(34, 34, 6, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(35, 35, 6, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(36, 39, 6, 0x0040, 0x10, 23, 1),
+ PIN_FIELD_BASE(40, 41, 6, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(42, 42, 6, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 44, 6, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(45, 45, 6, 0x0040, 0x10, 20, 1),
+ PIN_FIELD_BASE(46, 46, 6, 0x0040, 0x10, 22, 1),
+ PIN_FIELD_BASE(47, 47, 6, 0x0040, 0x10, 21, 1),
+ PIN_FIELD_BASE(48, 49, 6, 0x0040, 0x10, 18, 1),
+ PIN_FIELD_BASE(50, 50, 6, 0x0040, 0x10, 17, 1),
+ PIN_FIELD_BASE(51, 52, 3, 0x0020, 0x10, 16, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x0020, 0x10, 21, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x0020, 0x10, 18, 1),
+ PIN_FIELD_BASE(55, 55, 3, 0x0020, 0x10, 20, 1),
+ PIN_FIELD_BASE(56, 56, 3, 0x0020, 0x10, 19, 1),
+ PIN_FIELD_BASE(57, 60, 3, 0x0020, 0x10, 12, 1),
+ PIN_FIELD_BASE(61, 61, 3, 0x0020, 0x10, 11, 1),
+ PIN_FIELD_BASE(62, 62, 4, 0x0010, 0x10, 11, 1),
+ PIN_FIELD_BASE(63, 64, 3, 0x0020, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 72, 3, 0x0020, 0x10, 3, 1),
+ PIN_FIELD_BASE(73, 73, 3, 0x0020, 0x10, 2, 1),
+ PIN_FIELD_BASE(74, 84, 4, 0x0010, 0x10, 0, 1),
+ PIN_FIELD_BASE(85, 86, 4, 0x0010, 0x10, 14, 1),
+ PIN_FIELD_BASE(87, 88, 4, 0x0010, 0x10, 12, 1),
+ PIN_FIELD_BASE(89, 90, 2, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x0070, 0x10, 28, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x0070, 0x10, 30, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x0070, 0x10, 29, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x0070, 0x10, 31, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x0070, 0x10, 26, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x0070, 0x10, 21, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x0070, 0x10, 27, 1),
+ PIN_FIELD_BASE(99, 102, 2, 0x0070, 0x10, 22, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x0070, 0x10, 17, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(107, 108, 2, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x0060, 0x10, 25, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x0060, 0x10, 22, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x0060, 0x10, 24, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x0060, 0x10, 26, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x0060, 0x10, 23, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x0060, 0x10, 31, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(121, 121, 2, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(122, 122, 2, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(123, 123, 2, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(124, 124, 2, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(126, 129, 2, 0x0060, 0x10, 27, 1),
+ PIN_FIELD_BASE(130, 132, 2, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(133, 135, 2, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(136, 138, 2, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(139, 139, 2, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(140, 140, 2, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(141, 141, 2, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(142, 142, 2, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(143, 145, 1, 0x0020, 0x10, 6, 1),
+ PIN_FIELD_BASE(146, 148, 1, 0x0020, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 151, 1, 0x0020, 0x10, 0, 1),
+ PIN_FIELD_BASE(152, 152, 5, 0x0030, 0x10, 26, 1),
+ PIN_FIELD_BASE(153, 153, 5, 0x0030, 0x10, 25, 1),
+ PIN_FIELD_BASE(154, 155, 5, 0x0030, 0x10, 23, 1),
+ PIN_FIELD_BASE(156, 158, 5, 0x0030, 0x10, 29, 1),
+ PIN_FIELD_BASE(159, 159, 5, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(160, 161, 5, 0x0030, 0x10, 27, 1),
+ PIN_FIELD_BASE(162, 171, 5, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(172, 173, 5, 0x0030, 0x10, 13, 1),
+ PIN_FIELD_BASE(174, 174, 5, 0x0030, 0x10, 12, 1),
+ PIN_FIELD_BASE(175, 175, 5, 0x0030, 0x10, 15, 1),
+ PIN_FIELD_BASE(176, 177, 5, 0x0030, 0x10, 10, 1),
+ PIN_FIELD_BASE(178, 182, 5, 0x0030, 0x10, 16, 1),
+ PIN_FIELD_BASE(183, 184, 7, 0x0050, 0x10, 19, 1),
+ PIN_FIELD_BASE(185, 185, 7, 0x0050, 0x10, 22, 1),
+ PIN_FIELD_BASE(186, 186, 7, 0x0050, 0x10, 24, 1),
+ PIN_FIELD_BASE(187, 187, 7, 0x0050, 0x10, 26, 1),
+ PIN_FIELD_BASE(188, 188, 7, 0x0050, 0x10, 21, 1),
+ PIN_FIELD_BASE(189, 189, 7, 0x0050, 0x10, 25, 1),
+ PIN_FIELD_BASE(190, 191, 7, 0x0050, 0x10, 27, 1),
+ PIN_FIELD_BASE(192, 192, 7, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(193, 193, 7, 0x0050, 0x10, 23, 1),
+ PIN_FIELD_BASE(194, 194, 7, 0x0050, 0x10, 29, 1),
+ PIN_FIELD_BASE(195, 195, 7, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(196, 196, 7, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(197, 197, 7, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(198, 198, 7, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(199, 199, 7, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(200, 200, 7, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(201, 201, 7, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(202, 202, 7, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(203, 203, 7, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(204, 205, 6, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(206, 208, 7, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(209, 209, 7, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(210, 210, 7, 0x0050, 0x10, 31, 1),
+ PIN_FIELD_BASE(211, 211, 7, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(212, 212, 7, 0x0050, 0x10, 30, 1),
+ PIN_FIELD_BASE(213, 213, 7, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(214, 214, 7, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(215, 215, 7, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(216, 217, 7, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(218, 219, 7, 0x0050, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_pu_range[] = {
+ PIN_FIELD_BASE(0, 9, 2, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(16, 17, 5, 0x0070, 0x10, 21, 1),
+ PIN_FIELD_BASE(18, 25, 7, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(26, 30, 6, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(31, 31, 6, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(32, 32, 6, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(33, 33, 6, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(34, 34, 6, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(35, 35, 6, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(36, 39, 6, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(40, 41, 6, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(42, 42, 6, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 44, 6, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(57, 60, 3, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(61, 61, 3, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(62, 62, 4, 0x0030, 0x10, 11, 1),
+ PIN_FIELD_BASE(63, 64, 3, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 72, 3, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(73, 73, 3, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(74, 84, 4, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(85, 86, 4, 0x0030, 0x10, 14, 1),
+ PIN_FIELD_BASE(87, 88, 4, 0x0030, 0x10, 12, 1),
+ PIN_FIELD_BASE(89, 90, 2, 0x00b0, 0x10, 19, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x00b0, 0x10, 28, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x00b0, 0x10, 30, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x00b0, 0x10, 29, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x00b0, 0x10, 31, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x00b0, 0x10, 26, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x00b0, 0x10, 21, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x00b0, 0x10, 27, 1),
+ PIN_FIELD_BASE(99, 102, 2, 0x00b0, 0x10, 22, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x00b0, 0x10, 17, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x00b0, 0x10, 16, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x00b0, 0x10, 18, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x00b0, 0x10, 15, 1),
+ PIN_FIELD_BASE(107, 108, 2, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x00a0, 0x10, 25, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x00a0, 0x10, 22, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x00a0, 0x10, 24, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x00a0, 0x10, 26, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x00a0, 0x10, 23, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x00a0, 0x10, 31, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x00b0, 0x10, 9, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(121, 121, 2, 0x00b0, 0x10, 14, 1),
+ PIN_FIELD_BASE(122, 122, 2, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(123, 123, 2, 0x00b0, 0x10, 13, 1),
+ PIN_FIELD_BASE(124, 124, 2, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x00b0, 0x10, 12, 1),
+ PIN_FIELD_BASE(126, 129, 2, 0x00a0, 0x10, 27, 1),
+ PIN_FIELD_BASE(130, 132, 2, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(133, 135, 2, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(136, 138, 2, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(139, 139, 2, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(140, 140, 2, 0x00b0, 0x10, 11, 1),
+ PIN_FIELD_BASE(141, 141, 2, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(142, 142, 2, 0x00b0, 0x10, 10, 1),
+ PIN_FIELD_BASE(143, 145, 1, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(146, 148, 1, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 151, 1, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(156, 159, 5, 0x0070, 0x10, 25, 1),
+ PIN_FIELD_BASE(160, 161, 5, 0x0070, 0x10, 23, 1),
+ PIN_FIELD_BASE(162, 171, 5, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(172, 173, 5, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(174, 174, 5, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(175, 175, 5, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(176, 177, 5, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(178, 182, 5, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(195, 195, 7, 0x0090, 0x10, 25, 1),
+ PIN_FIELD_BASE(196, 196, 7, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(197, 197, 7, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(198, 198, 7, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(199, 199, 7, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(200, 200, 7, 0x0090, 0x10, 24, 1),
+ PIN_FIELD_BASE(201, 201, 7, 0x0090, 0x10, 27, 1),
+ PIN_FIELD_BASE(202, 202, 7, 0x0090, 0x10, 23, 1),
+ PIN_FIELD_BASE(203, 203, 7, 0x0090, 0x10, 26, 1),
+ PIN_FIELD_BASE(204, 205, 6, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(206, 208, 7, 0x0090, 0x10, 28, 1),
+ PIN_FIELD_BASE(209, 209, 7, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(210, 210, 7, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(211, 211, 7, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(212, 212, 7, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(213, 213, 7, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(214, 214, 7, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(215, 215, 7, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(216, 217, 7, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(218, 219, 7, 0x0090, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_pd_range[] = {
+ PIN_FIELD_BASE(0, 9, 2, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(16, 17, 5, 0x0050, 0x10, 21, 1),
+ PIN_FIELD_BASE(18, 25, 7, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(26, 30, 6, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(31, 31, 6, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(32, 32, 6, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(33, 33, 6, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(34, 34, 6, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(35, 35, 6, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(36, 39, 6, 0x0060, 0x10, 17, 1),
+ PIN_FIELD_BASE(40, 41, 6, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(42, 42, 6, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 44, 6, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(57, 60, 3, 0x0030, 0x10, 12, 1),
+ PIN_FIELD_BASE(61, 61, 3, 0x0030, 0x10, 11, 1),
+ PIN_FIELD_BASE(62, 62, 4, 0x0020, 0x10, 11, 1),
+ PIN_FIELD_BASE(63, 64, 3, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 72, 3, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(73, 73, 3, 0x0030, 0x10, 2, 1),
+ PIN_FIELD_BASE(74, 84, 4, 0x0020, 0x10, 0, 1),
+ PIN_FIELD_BASE(85, 86, 4, 0x0020, 0x10, 14, 1),
+ PIN_FIELD_BASE(87, 88, 4, 0x0020, 0x10, 12, 1),
+ PIN_FIELD_BASE(89, 90, 2, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x0090, 0x10, 28, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x0090, 0x10, 30, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x0090, 0x10, 29, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x0090, 0x10, 31, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x0090, 0x10, 26, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x0090, 0x10, 27, 1),
+ PIN_FIELD_BASE(99, 102, 2, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(107, 108, 2, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x0080, 0x10, 25, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x0080, 0x10, 22, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x0080, 0x10, 24, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x0080, 0x10, 26, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x0080, 0x10, 23, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x0080, 0x10, 31, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(121, 121, 2, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(122, 122, 2, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(123, 123, 2, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(124, 124, 2, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(126, 129, 2, 0x0080, 0x10, 27, 1),
+ PIN_FIELD_BASE(130, 132, 2, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(133, 135, 2, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(136, 138, 2, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(139, 139, 2, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(140, 140, 2, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(141, 141, 2, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(142, 142, 2, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(143, 145, 1, 0x0030, 0x10, 6, 1),
+ PIN_FIELD_BASE(146, 148, 1, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 151, 1, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(156, 159, 5, 0x0050, 0x10, 25, 1),
+ PIN_FIELD_BASE(160, 161, 5, 0x0050, 0x10, 23, 1),
+ PIN_FIELD_BASE(162, 171, 5, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(172, 173, 5, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(174, 174, 5, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(175, 175, 5, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(176, 177, 5, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(178, 182, 5, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(195, 195, 7, 0x0070, 0x10, 25, 1),
+ PIN_FIELD_BASE(196, 196, 7, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(197, 197, 7, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(198, 198, 7, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(199, 199, 7, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(200, 200, 7, 0x0070, 0x10, 24, 1),
+ PIN_FIELD_BASE(201, 201, 7, 0x0070, 0x10, 27, 1),
+ PIN_FIELD_BASE(202, 202, 7, 0x0070, 0x10, 23, 1),
+ PIN_FIELD_BASE(203, 203, 7, 0x0070, 0x10, 26, 1),
+ PIN_FIELD_BASE(204, 205, 6, 0x0060, 0x10, 15, 1),
+ PIN_FIELD_BASE(206, 208, 7, 0x0070, 0x10, 28, 1),
+ PIN_FIELD_BASE(209, 209, 7, 0x0070, 0x10, 20, 1),
+ PIN_FIELD_BASE(210, 210, 7, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(211, 211, 7, 0x0070, 0x10, 21, 1),
+ PIN_FIELD_BASE(212, 212, 7, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(213, 213, 7, 0x0070, 0x10, 22, 1),
+ PIN_FIELD_BASE(214, 214, 7, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(215, 215, 7, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(216, 217, 7, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(218, 219, 7, 0x0070, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_drv_range[] = {
+ PINS_FIELD_BASE(0, 9, 2, 0x0000, 0x10, 21, 3),
+ PINS_FIELD_BASE(10, 15, 1, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(16, 17, 5, 0x0000, 0x10, 18, 3),
+ PINS_FIELD_BASE(18, 25, 7, 0x0000, 0x10, 3, 3),
+ PINS_FIELD_BASE(26, 30, 6, 0x0000, 0x10, 15, 3),
+ PINS_FIELD_BASE(31, 35, 6, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(36, 36, 6, 0x0010, 0x10, 7, 3),
+ PINS_FIELD_BASE(37, 39, 6, 0x0010, 0x10, 4, 3),
+ PIN_FIELD_BASE(40, 41, 6, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(42, 42, 6, 0x0000, 0x10, 12, 3),
+ PINS_FIELD_BASE(43, 44, 6, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(45, 45, 6, 0x0000, 0x10, 30, 2),
+ PIN_FIELD_BASE(46, 46, 6, 0x0010, 0x10, 2, 2),
+ PIN_FIELD_BASE(47, 47, 6, 0x0010, 0x10, 0, 2),
+ PIN_FIELD_BASE(48, 49, 6, 0x0000, 0x10, 26, 2),
+ PIN_FIELD_BASE(50, 50, 6, 0x0000, 0x10, 24, 2),
+ PIN_FIELD_BASE(51, 52, 3, 0x0000, 0x10, 18, 3),
+ PINS_FIELD_BASE(53, 56, 3, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(57, 60, 3, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(61, 61, 3, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(62, 62, 4, 0x0000, 0x10, 3, 3),
+ PINS_FIELD_BASE(63, 73, 3, 0x0000, 0x10, 0, 3),
+ PINS_FIELD_BASE(74, 84, 4, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(85, 86, 4, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(87, 88, 4, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(89, 90, 2, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(91, 91, 2, 0x0000, 0x10, 0, 3),
+ PINS_FIELD_BASE(92, 95, 2, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(96, 96, 2, 0x0020, 0x10, 27, 3),
+ PIN_FIELD_BASE(97, 97, 2, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(98, 98, 2, 0x0030, 0x10, 0, 3),
+ PINS_FIELD_BASE(99, 102, 2, 0x0020, 0x10, 24, 3),
+ PINS_FIELD_BASE(103, 105, 2, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(106, 106, 2, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(107, 108, 2, 0x0000, 0x10, 15, 3),
+ PINS_FIELD_BASE(109, 113, 2, 0x0000, 0x10, 24, 3),
+ PINS_FIELD_BASE(114, 117, 2, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(118, 118, 2, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(119, 119, 2, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(120, 120, 2, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(121, 121, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(122, 122, 2, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(123, 123, 2, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(124, 124, 2, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(125, 125, 2, 0x0020, 0x10, 0, 3),
+ PINS_FIELD_BASE(126, 129, 2, 0x0000, 0x10, 27, 3),
+ PINS_FIELD_BASE(130, 135, 2, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(136, 138, 2, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(139, 139, 2, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(140, 140, 2, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(141, 141, 2, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(142, 142, 2, 0x0010, 0x10, 24, 3),
+ PINS_FIELD_BASE(143, 148, 1, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(149, 151, 1, 0x0000, 0x10, 0, 3),
+ PINS_FIELD_BASE(152, 155, 5, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(156, 156, 5, 0x0010, 0x10, 6, 3),
+ PINS_FIELD_BASE(157, 159, 5, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(160, 160, 5, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(161, 161, 5, 0x0010, 0x10, 0, 3),
+ PINS_FIELD_BASE(162, 171, 5, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(172, 172, 5, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(173, 173, 5, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(174, 174, 5, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(175, 177, 5, 0x0000, 0x10, 3, 3),
+ PINS_FIELD_BASE(178, 182, 5, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(183, 183, 7, 0x0000, 0x10, 9, 3),
+ PINS_FIELD_BASE(184, 190, 7, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(191, 191, 7, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(192, 192, 7, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(193, 193, 7, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(194, 194, 7, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(195, 195, 7, 0x0010, 0x10, 3, 3),
+ PINS_FIELD_BASE(196, 199, 7, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(200, 200, 7, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(201, 201, 7, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(202, 202, 7, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(203, 203, 7, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(204, 205, 6, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(206, 208, 7, 0x0010, 0x10, 12, 3),
+ PINS_FIELD_BASE(209, 212, 7, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(213, 213, 7, 0x0000, 0x10, 24, 3),
+ PINS_FIELD_BASE(214, 219, 7, 0x0000, 0x10, 0, 3),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_pupd_range[] = {
+ PIN_FIELD_BASE(10, 15, 1, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(45, 45, 6, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(46, 46, 6, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 6, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 49, 6, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(50, 50, 6, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(51, 52, 3, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 3, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(56, 56, 3, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(152, 152, 5, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(153, 153, 5, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(154, 155, 5, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(183, 184, 7, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(185, 185, 7, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(186, 186, 7, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(187, 187, 7, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(188, 188, 7, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(189, 189, 7, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(190, 191, 7, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(192, 192, 7, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 7, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(194, 194, 7, 0x0080, 0x10, 11, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_r0_range[] = {
+ PIN_FIELD_BASE(10, 15, 1, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(24, 24, 7, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(25, 25, 7, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(45, 45, 6, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(46, 46, 6, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 6, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 49, 6, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(50, 50, 6, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(51, 52, 3, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 3, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(56, 56, 3, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x00e0, 0x10, 12, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x00e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(121, 121, 2, 0x00e0, 0x10, 22, 1),
+ PIN_FIELD_BASE(122, 122, 2, 0x00e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(123, 123, 2, 0x00e0, 0x10, 20, 1),
+ PIN_FIELD_BASE(124, 124, 2, 0x00e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x00e0, 0x10, 18, 1),
+ PIN_FIELD_BASE(139, 139, 2, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(140, 140, 2, 0x00e0, 0x10, 16, 1),
+ PIN_FIELD_BASE(141, 141, 2, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(142, 142, 2, 0x00e0, 0x10, 14, 1),
+ PIN_FIELD_BASE(152, 152, 5, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(153, 153, 5, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(154, 155, 5, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(160, 160, 5, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(161, 161, 5, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(183, 184, 7, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(185, 185, 7, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(186, 186, 7, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(187, 187, 7, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(188, 188, 7, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(189, 189, 7, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(190, 191, 7, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(192, 192, 7, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 7, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(194, 194, 7, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(200, 200, 7, 0x00e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(201, 201, 7, 0x00e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(202, 202, 7, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(203, 203, 7, 0x00e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(204, 204, 6, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(205, 205, 6, 0x00d0, 0x10, 2, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_r1_range[] = {
+ PIN_FIELD_BASE(10, 15, 1, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(24, 24, 7, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(25, 25, 7, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(45, 45, 6, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(46, 46, 6, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(47, 47, 6, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(48, 49, 6, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(50, 50, 6, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(51, 52, 3, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(53, 53, 3, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(54, 54, 3, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 3, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(56, 56, 3, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(118, 118, 2, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(119, 119, 2, 0x00e0, 0x10, 13, 1),
+ PIN_FIELD_BASE(120, 120, 2, 0x00e0, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 2, 0x00e0, 0x10, 23, 1),
+ PIN_FIELD_BASE(122, 122, 2, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(123, 123, 2, 0x00e0, 0x10, 21, 1),
+ PIN_FIELD_BASE(124, 124, 2, 0x00e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(125, 125, 2, 0x00e0, 0x10, 19, 1),
+ PIN_FIELD_BASE(139, 139, 2, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(140, 140, 2, 0x00e0, 0x10, 17, 1),
+ PIN_FIELD_BASE(141, 141, 2, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(142, 142, 2, 0x00e0, 0x10, 15, 1),
+ PIN_FIELD_BASE(152, 152, 5, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(153, 153, 5, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(154, 155, 5, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(160, 160, 5, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(161, 161, 5, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(183, 184, 7, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(185, 185, 7, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(186, 186, 7, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(187, 187, 7, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(188, 188, 7, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(189, 189, 7, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(190, 191, 7, 0x00b0, 0x10, 9, 1),
+ PIN_FIELD_BASE(192, 192, 7, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 7, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(194, 194, 7, 0x00b0, 0x10, 11, 1),
+ PIN_FIELD_BASE(200, 200, 7, 0x00e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(201, 201, 7, 0x00e0, 0x10, 11, 1),
+ PIN_FIELD_BASE(202, 202, 7, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(203, 203, 7, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(204, 204, 6, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(205, 205, 6, 0x00d0, 0x10, 3, 1),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_drv_adv_range[] = {
+ PIN_FIELD_BASE(24, 24, 7, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(25, 25, 7, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(89, 89, 2, 0x0050, 0x10, 6, 5),
+ PIN_FIELD_BASE(90, 90, 2, 0x0050, 0x10, 11, 5),
+ PIN_FIELD_BASE(118, 118, 2, 0x0040, 0x10, 0, 3),
+ PIN_FIELD_BASE(119, 119, 2, 0x0040, 0x10, 18, 3),
+ PIN_FIELD_BASE(120, 120, 2, 0x0040, 0x10, 15, 3),
+ PIN_FIELD_BASE(121, 121, 2, 0x0050, 0x10, 3, 3),
+ PIN_FIELD_BASE(122, 122, 2, 0x0040, 0x10, 12, 3),
+ PIN_FIELD_BASE(123, 123, 2, 0x0050, 0x10, 0, 3),
+ PIN_FIELD_BASE(124, 124, 2, 0x0040, 0x10, 9, 3),
+ PIN_FIELD_BASE(125, 125, 2, 0x0040, 0x10, 27, 3),
+ PIN_FIELD_BASE(139, 139, 2, 0x0040, 0x10, 6, 3),
+ PIN_FIELD_BASE(140, 140, 2, 0x0040, 0x10, 24, 3),
+ PIN_FIELD_BASE(141, 141, 2, 0x0040, 0x10, 3, 3),
+ PIN_FIELD_BASE(142, 142, 2, 0x0040, 0x10, 21, 3),
+ PIN_FIELD_BASE(160, 160, 5, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(161, 161, 5, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(200, 200, 7, 0x0030, 0x10, 9, 3),
+ PIN_FIELD_BASE(201, 201, 7, 0x0030, 0x10, 15, 3),
+ PIN_FIELD_BASE(202, 202, 7, 0x0030, 0x10, 6, 3),
+ PIN_FIELD_BASE(203, 203, 7, 0x0030, 0x10, 12, 3),
+ PIN_FIELD_BASE(204, 204, 6, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(205, 205, 6, 0x0020, 0x10, 3, 3),
+};
+
+static const struct mtk_pin_field_calc mt6893_pin_rsel_range[] = {
+ PIN_FIELD_BASE(24, 24, 7, 0x00e0, 0x10, 0, 2),
+ PIN_FIELD_BASE(25, 25, 7, 0x00e0, 0x10, 2, 2),
+ PIN_FIELD_BASE(118, 118, 2, 0x00e0, 0x10, 0, 2),
+ PIN_FIELD_BASE(119, 119, 2, 0x00e0, 0x10, 12, 2),
+ PIN_FIELD_BASE(120, 120, 2, 0x00e0, 0x10, 10, 2),
+ PIN_FIELD_BASE(121, 121, 2, 0x00e0, 0x10, 22, 2),
+ PIN_FIELD_BASE(122, 122, 2, 0x00e0, 0x10, 8, 2),
+ PIN_FIELD_BASE(123, 123, 2, 0x00e0, 0x10, 20, 2),
+ PIN_FIELD_BASE(124, 124, 2, 0x00e0, 0x10, 6, 2),
+ PIN_FIELD_BASE(125, 125, 2, 0x00e0, 0x10, 18, 2),
+ PIN_FIELD_BASE(139, 139, 2, 0x00e0, 0x10, 4, 2),
+ PIN_FIELD_BASE(140, 140, 2, 0x00e0, 0x10, 16, 2),
+ PIN_FIELD_BASE(141, 141, 2, 0x00e0, 0x10, 2, 2),
+ PIN_FIELD_BASE(142, 142, 2, 0x00e0, 0x10, 14, 2),
+ PIN_FIELD_BASE(160, 160, 5, 0x00b0, 0x10, 0, 2),
+ PIN_FIELD_BASE(161, 161, 5, 0x00b0, 0x10, 2, 2),
+ PIN_FIELD_BASE(200, 200, 7, 0x00e0, 0x10, 6, 2),
+ PIN_FIELD_BASE(201, 201, 7, 0x00e0, 0x10, 10, 2),
+ PIN_FIELD_BASE(202, 202, 7, 0x00e0, 0x10, 4, 2),
+ PIN_FIELD_BASE(203, 203, 7, 0x00e0, 0x10, 8, 2),
+ PIN_FIELD_BASE(204, 204, 6, 0x00d0, 0x10, 0, 2),
+ PIN_FIELD_BASE(205, 205, 6, 0x00d0, 0x10, 2, 2),
+};
+
+static const unsigned int mt6893_pull_type[] = {
+ MTK_PULL_PU_PD_TYPE, /* 0 */ MTK_PULL_PU_PD_TYPE, /* 1 */
+ MTK_PULL_PU_PD_TYPE, /* 2 */ MTK_PULL_PU_PD_TYPE, /* 3 */
+ MTK_PULL_PU_PD_TYPE, /* 4 */ MTK_PULL_PU_PD_TYPE, /* 5 */
+ MTK_PULL_PU_PD_TYPE, /* 6 */ MTK_PULL_PU_PD_TYPE, /* 7 */
+ MTK_PULL_PU_PD_TYPE, /* 8 */ MTK_PULL_PU_PD_TYPE, /* 9 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 10 */ MTK_PULL_PUPD_R1R0_TYPE, /* 11 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 12 */ MTK_PULL_PUPD_R1R0_TYPE, /* 13 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 14 */ MTK_PULL_PUPD_R1R0_TYPE, /* 15 */
+ MTK_PULL_PU_PD_TYPE, /* 16 */ MTK_PULL_PU_PD_TYPE, /* 17 */
+ MTK_PULL_PU_PD_TYPE, /* 18 */ MTK_PULL_PU_PD_TYPE, /* 19 */
+ MTK_PULL_PU_PD_TYPE, /* 20 */ MTK_PULL_PU_PD_TYPE, /* 21 */
+ MTK_PULL_PU_PD_TYPE, /* 22 */ MTK_PULL_PU_PD_TYPE, /* 23 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 24 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 25 */
+ MTK_PULL_PU_PD_TYPE, /* 26 */ MTK_PULL_PU_PD_TYPE, /* 27 */
+ MTK_PULL_PU_PD_TYPE, /* 28 */ MTK_PULL_PU_PD_TYPE, /* 29 */
+ MTK_PULL_PU_PD_TYPE, /* 30 */ MTK_PULL_PU_PD_TYPE, /* 31 */
+ MTK_PULL_PU_PD_TYPE, /* 32 */ MTK_PULL_PU_PD_TYPE, /* 33 */
+ MTK_PULL_PU_PD_TYPE, /* 34 */ MTK_PULL_PU_PD_TYPE, /* 35 */
+ MTK_PULL_PU_PD_TYPE, /* 36 */ MTK_PULL_PU_PD_TYPE, /* 37 */
+ MTK_PULL_PU_PD_TYPE, /* 38 */ MTK_PULL_PU_PD_TYPE, /* 39 */
+ MTK_PULL_PU_PD_TYPE, /* 40 */ MTK_PULL_PU_PD_TYPE, /* 41 */
+ MTK_PULL_PU_PD_TYPE, /* 42 */ MTK_PULL_PU_PD_TYPE, /* 43 */
+ MTK_PULL_PU_PD_TYPE, /* 44 */ MTK_PULL_PUPD_R1R0_TYPE, /* 45 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 46 */ MTK_PULL_PUPD_R1R0_TYPE, /* 47 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 48 */ MTK_PULL_PUPD_R1R0_TYPE, /* 49 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 50 */ MTK_PULL_PUPD_R1R0_TYPE, /* 51 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 52 */ MTK_PULL_PUPD_R1R0_TYPE, /* 53 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 54 */ MTK_PULL_PUPD_R1R0_TYPE, /* 55 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 56 */ MTK_PULL_PU_PD_TYPE, /* 57 */
+ MTK_PULL_PU_PD_TYPE, /* 58 */ MTK_PULL_PU_PD_TYPE, /* 59 */
+ MTK_PULL_PU_PD_TYPE, /* 60 */ MTK_PULL_PU_PD_TYPE, /* 61 */
+ MTK_PULL_PU_PD_TYPE, /* 62 */ MTK_PULL_PU_PD_TYPE, /* 63 */
+ MTK_PULL_PU_PD_TYPE, /* 64 */ MTK_PULL_PU_PD_TYPE, /* 65 */
+ MTK_PULL_PU_PD_TYPE, /* 66 */ MTK_PULL_PU_PD_TYPE, /* 67 */
+ MTK_PULL_PU_PD_TYPE, /* 68 */ MTK_PULL_PU_PD_TYPE, /* 69 */
+ MTK_PULL_PU_PD_TYPE, /* 70 */ MTK_PULL_PU_PD_TYPE, /* 71 */
+ MTK_PULL_PU_PD_TYPE, /* 72 */ MTK_PULL_PU_PD_TYPE, /* 73 */
+ MTK_PULL_PU_PD_TYPE, /* 74 */ MTK_PULL_PU_PD_TYPE, /* 75 */
+ MTK_PULL_PU_PD_TYPE, /* 76 */ MTK_PULL_PU_PD_TYPE, /* 77 */
+ MTK_PULL_PU_PD_TYPE, /* 78 */ MTK_PULL_PU_PD_TYPE, /* 79 */
+ MTK_PULL_PU_PD_TYPE, /* 80 */ MTK_PULL_PU_PD_TYPE, /* 81 */
+ MTK_PULL_PU_PD_TYPE, /* 82 */ MTK_PULL_PU_PD_TYPE, /* 83 */
+ MTK_PULL_PU_PD_TYPE, /* 84 */ MTK_PULL_PU_PD_TYPE, /* 85 */
+ MTK_PULL_PU_PD_TYPE, /* 86 */ MTK_PULL_PU_PD_TYPE, /* 87 */
+ MTK_PULL_PU_PD_TYPE, /* 88 */ MTK_PULL_PU_PD_TYPE, /* 89 */
+ MTK_PULL_PU_PD_TYPE, /* 90 */ MTK_PULL_PU_PD_TYPE, /* 91 */
+ MTK_PULL_PU_PD_TYPE, /* 92 */ MTK_PULL_PU_PD_TYPE, /* 93 */
+ MTK_PULL_PU_PD_TYPE, /* 94 */ MTK_PULL_PU_PD_TYPE, /* 95 */
+ MTK_PULL_PU_PD_TYPE, /* 96 */ MTK_PULL_PU_PD_TYPE, /* 97 */
+ MTK_PULL_PU_PD_TYPE, /* 98 */ MTK_PULL_PU_PD_TYPE, /* 99 */
+ MTK_PULL_PU_PD_TYPE, /* 100 */ MTK_PULL_PU_PD_TYPE, /* 101 */
+ MTK_PULL_PU_PD_TYPE, /* 102 */ MTK_PULL_PU_PD_TYPE, /* 103 */
+ MTK_PULL_PU_PD_TYPE, /* 104 */ MTK_PULL_PU_PD_TYPE, /* 105 */
+ MTK_PULL_PU_PD_TYPE, /* 106 */ MTK_PULL_PU_PD_TYPE, /* 107 */
+ MTK_PULL_PU_PD_TYPE, /* 108 */ MTK_PULL_PU_PD_TYPE, /* 109 */
+ MTK_PULL_PU_PD_TYPE, /* 110 */ MTK_PULL_PU_PD_TYPE, /* 111 */
+ MTK_PULL_PU_PD_TYPE, /* 112 */ MTK_PULL_PU_PD_TYPE, /* 113 */
+ MTK_PULL_PU_PD_TYPE, /* 114 */ MTK_PULL_PU_PD_TYPE, /* 115 */
+ MTK_PULL_PU_PD_TYPE, /* 116 */ MTK_PULL_PU_PD_TYPE, /* 117 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 118 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 119 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 120 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 121 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 122 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 123 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 124 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 125 */
+ MTK_PULL_PU_PD_TYPE, /* 126 */ MTK_PULL_PU_PD_TYPE, /* 127 */
+ MTK_PULL_PU_PD_TYPE, /* 128 */ MTK_PULL_PU_PD_TYPE, /* 129 */
+ MTK_PULL_PU_PD_TYPE, /* 130 */ MTK_PULL_PU_PD_TYPE, /* 131 */
+ MTK_PULL_PU_PD_TYPE, /* 132 */ MTK_PULL_PU_PD_TYPE, /* 133 */
+ MTK_PULL_PU_PD_TYPE, /* 134 */ MTK_PULL_PU_PD_TYPE, /* 135 */
+ MTK_PULL_PU_PD_TYPE, /* 136 */ MTK_PULL_PU_PD_TYPE, /* 137 */
+ MTK_PULL_PU_PD_TYPE, /* 138 */ MTK_PULL_PU_PD_TYPE, /* 139 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 140 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 141 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 142 */ MTK_PULL_PU_PD_TYPE, /* 143 */
+ MTK_PULL_PU_PD_TYPE, /* 144 */ MTK_PULL_PU_PD_TYPE, /* 145 */
+ MTK_PULL_PU_PD_TYPE, /* 146 */ MTK_PULL_PU_PD_TYPE, /* 147 */
+ MTK_PULL_PU_PD_TYPE, /* 148 */ MTK_PULL_PU_PD_TYPE, /* 149 */
+ MTK_PULL_PU_PD_TYPE, /* 150 */ MTK_PULL_PU_PD_TYPE, /* 151 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 152 */ MTK_PULL_PUPD_R1R0_TYPE, /* 153 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 154 */ MTK_PULL_PUPD_R1R0_TYPE, /* 155 */
+ MTK_PULL_PU_PD_TYPE, /* 156 */ MTK_PULL_PU_PD_TYPE, /* 157 */
+ MTK_PULL_PU_PD_TYPE, /* 158 */ MTK_PULL_PU_PD_TYPE, /* 159 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 160 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 161 */
+ MTK_PULL_PU_PD_TYPE, /* 162 */ MTK_PULL_PU_PD_TYPE, /* 163 */
+ MTK_PULL_PU_PD_TYPE, /* 164 */ MTK_PULL_PU_PD_TYPE, /* 165 */
+ MTK_PULL_PU_PD_TYPE, /* 166 */ MTK_PULL_PU_PD_TYPE, /* 167 */
+ MTK_PULL_PU_PD_TYPE, /* 168 */ MTK_PULL_PU_PD_TYPE, /* 169 */
+ MTK_PULL_PU_PD_TYPE, /* 170 */ MTK_PULL_PU_PD_TYPE, /* 171 */
+ MTK_PULL_PU_PD_TYPE, /* 172 */ MTK_PULL_PU_PD_TYPE, /* 173 */
+ MTK_PULL_PU_PD_TYPE, /* 174 */ MTK_PULL_PU_PD_TYPE, /* 175 */
+ MTK_PULL_PU_PD_TYPE, /* 176 */ MTK_PULL_PU_PD_TYPE, /* 177 */
+ MTK_PULL_PU_PD_TYPE, /* 178 */ MTK_PULL_PU_PD_TYPE, /* 179 */
+ MTK_PULL_PU_PD_TYPE, /* 180 */ MTK_PULL_PU_PD_TYPE, /* 181 */
+ MTK_PULL_PU_PD_TYPE, /* 182 */ MTK_PULL_PUPD_R1R0_TYPE, /* 183 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 184 */ MTK_PULL_PUPD_R1R0_TYPE, /* 185 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 186 */ MTK_PULL_PUPD_R1R0_TYPE, /* 187 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 188 */ MTK_PULL_PUPD_R1R0_TYPE, /* 189 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 190 */ MTK_PULL_PUPD_R1R0_TYPE, /* 191 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 192 */ MTK_PULL_PUPD_R1R0_TYPE, /* 193 */
+ MTK_PULL_PUPD_R1R0_TYPE, /* 194 */ MTK_PULL_PU_PD_TYPE, /* 195 */
+ MTK_PULL_PU_PD_TYPE, /* 196 */ MTK_PULL_PU_PD_TYPE, /* 197 */
+ MTK_PULL_PU_PD_TYPE, /* 198 */ MTK_PULL_PU_PD_TYPE, /* 199 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 200 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 201 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 202 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 203 */
+ MTK_PULL_PU_PD_RSEL_TYPE, /* 204 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 205 */
+ MTK_PULL_PU_PD_TYPE, /* 206 */ MTK_PULL_PU_PD_TYPE, /* 207 */
+ MTK_PULL_PU_PD_TYPE, /* 208 */ MTK_PULL_PU_PD_TYPE, /* 209 */
+ MTK_PULL_PU_PD_TYPE, /* 210 */ MTK_PULL_PU_PD_TYPE, /* 211 */
+ MTK_PULL_PU_PD_TYPE, /* 212 */ MTK_PULL_PU_PD_TYPE, /* 213 */
+ MTK_PULL_PU_PD_TYPE, /* 214 */ MTK_PULL_PU_PD_TYPE, /* 215 */
+ MTK_PULL_PU_PD_TYPE, /* 216 */ MTK_PULL_PU_PD_TYPE, /* 217 */
+ MTK_PULL_PU_PD_TYPE, /* 218 */ MTK_PULL_PU_PD_TYPE, /* 219 */
+};
+
+static const char * const mt6893_pinctrl_register_base_name[] = {
+ "base", "rm", "bm", "bl", "br", "lm", "lb", "rt", "lt", "tl",
+};
+
+static const struct mtk_pin_reg_calc mt6893_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt6893_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt6893_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt6893_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt6893_pin_do_range),
+ [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt6893_pin_dir_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt6893_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt6893_pin_ies_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt6893_pin_pu_range),
+ [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt6893_pin_pd_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt6893_pin_drv_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt6893_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt6893_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt6893_pin_r1_range),
+ [PINCTRL_PIN_REG_DRV_ADV] = MTK_RANGE(mt6893_pin_drv_adv_range),
+ [PINCTRL_PIN_REG_RSEL] = MTK_RANGE(mt6893_pin_rsel_range),
+};
+
+static const struct mtk_eint_hw mt6893_eint_hw = {
+ .port_mask = 7,
+ .ports = 7,
+ .ap_num = 224,
+ .db_cnt = 32,
+ .db_time = debounce_time_mt6765,
+};
+
+static const struct mtk_pin_soc mt6893_data = {
+ .reg_cal = mt6893_reg_cals,
+ .pins = mtk_pins_mt6893,
+ .npins = ARRAY_SIZE(mtk_pins_mt6893),
+ .ngrps = ARRAY_SIZE(mtk_pins_mt6893),
+ .eint_hw = &mt6893_eint_hw,
+ .nfuncs = 8,
+ .gpio_m = 0,
+ .base_names = mt6893_pinctrl_register_base_name,
+ .nbase_names = ARRAY_SIZE(mt6893_pinctrl_register_base_name),
+ .pull_type = mt6893_pull_type,
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_drive_set = mtk_pinconf_adv_drive_set_raw,
+ .adv_drive_get = mtk_pinconf_adv_drive_get_raw,
+};
+
+static const struct of_device_id mt6893_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt6893-pinctrl", .data = &mt6893_data },
+ { /* sentinel */ }
+};
+
+static struct platform_driver mt6893_pinctrl_driver = {
+ .driver = {
+ .name = "mt6893-pinctrl",
+ .of_match_table = mt6893_pinctrl_of_match,
+ .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops)
+ },
+ .probe = mtk_paris_pinctrl_probe,
+};
+
+static int __init mt6893_pinctrl_init(void)
+{
+ return platform_driver_register(&mt6893_pinctrl_driver);
+}
+
+arch_initcall(mt6893_pinctrl_init);
+
+MODULE_DESCRIPTION("MediaTek MT6893 Pinctrl Driver");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8196.c b/drivers/pinctrl/mediatek/pinctrl-mt8196.c
new file mode 100644
index 000000000000..82a73929c7a0
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8196.c
@@ -0,0 +1,1860 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 MediaTek Inc.
+ * Author: Guodong Liu <Guodong.Liu@mediatek.com>
+ * Lei Xue <lei.xue@mediatek.com>
+ * Cathy Xu <ot_cathy.xu@mediatek.com>
+ */
+
+#include <linux/module.h>
+#include "pinctrl-mtk-mt8196.h"
+#include "pinctrl-paris.h"
+
+#define PIN_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 0)
+
+#define PINS_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 1)
+
+static const struct mtk_pin_field_calc mt8196_pin_mode_range[] = {
+ PIN_FIELD(0, 270, 0x0300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_dir_range[] = {
+ PIN_FIELD(0, 270, 0x0000, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_di_range[] = {
+ PIN_FIELD(0, 270, 0x0200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_do_range[] = {
+ PIN_FIELD(0, 270, 0x0100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_smt_range[] = {
+ PIN_FIELD_BASE(0, 0, 8, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 8, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 11, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(3, 3, 11, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(4, 4, 11, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(5, 5, 11, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(6, 6, 11, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(7, 7, 11, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(8, 8, 11, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(9, 9, 9, 0x0120, 0x10, 13, 1),
+ PIN_FIELD_BASE(10, 10, 9, 0x0120, 0x10, 12, 1),
+ PIN_FIELD_BASE(11, 11, 8, 0x00d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(12, 12, 9, 0x0120, 0x10, 15, 1),
+ PIN_FIELD_BASE(13, 13, 6, 0x0120, 0x10, 3, 1),
+ PIN_FIELD_BASE(14, 14, 3, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(15, 15, 6, 0x0120, 0x10, 0, 1),
+ PIN_FIELD_BASE(16, 16, 6, 0x0120, 0x10, 3, 1),
+ PIN_FIELD_BASE(17, 17, 6, 0x0120, 0x10, 3, 1),
+ PIN_FIELD_BASE(18, 18, 6, 0x0120, 0x10, 1, 1),
+ PIN_FIELD_BASE(19, 19, 6, 0x0120, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 3, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(26, 26, 2, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(31, 31, 2, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(39, 39, 8, 0x00d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(40, 40, 8, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(41, 41, 8, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(42, 42, 8, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(43, 43, 8, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(44, 44, 8, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(45, 45, 8, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(46, 46, 8, 0x00d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(47, 47, 8, 0x00d0, 0x10, 9, 1),
+ PIN_FIELD_BASE(48, 48, 8, 0x00d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(49, 49, 8, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(50, 50, 8, 0x00d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(51, 51, 8, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(52, 52, 9, 0x0120, 0x10, 7, 1),
+ PIN_FIELD_BASE(53, 53, 9, 0x0120, 0x10, 8, 1),
+ PIN_FIELD_BASE(54, 54, 9, 0x0120, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 9, 0x0120, 0x10, 1, 1),
+ PIN_FIELD_BASE(56, 56, 9, 0x0120, 0x10, 5, 1),
+ PIN_FIELD_BASE(57, 57, 9, 0x0120, 0x10, 6, 1),
+ PIN_FIELD_BASE(58, 58, 9, 0x0120, 0x10, 3, 1),
+ PIN_FIELD_BASE(59, 59, 9, 0x0120, 0x10, 4, 1),
+ PIN_FIELD_BASE(60, 60, 9, 0x0120, 0x10, 19, 1),
+ PIN_FIELD_BASE(61, 61, 9, 0x0120, 0x10, 10, 1),
+ PIN_FIELD_BASE(62, 62, 9, 0x0120, 0x10, 9, 1),
+ PIN_FIELD_BASE(63, 63, 9, 0x0120, 0x10, 14, 1),
+ PIN_FIELD_BASE(64, 64, 9, 0x0120, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 65, 9, 0x0120, 0x10, 11, 1),
+ PIN_FIELD_BASE(66, 66, 9, 0x0120, 0x10, 16, 1),
+ PIN_FIELD_BASE(67, 67, 9, 0x0120, 0x10, 18, 1),
+ PIN_FIELD_BASE(68, 68, 9, 0x0120, 0x10, 18, 1),
+ PIN_FIELD_BASE(69, 69, 9, 0x0120, 0x10, 18, 1),
+ PIN_FIELD_BASE(70, 70, 9, 0x0120, 0x10, 17, 1),
+ PIN_FIELD_BASE(71, 71, 9, 0x0120, 0x10, 17, 1),
+ PIN_FIELD_BASE(72, 72, 9, 0x0120, 0x10, 18, 1),
+ PIN_FIELD_BASE(73, 73, 9, 0x0120, 0x10, 17, 1),
+ PIN_FIELD_BASE(74, 74, 9, 0x0120, 0x10, 17, 1),
+ PIN_FIELD_BASE(75, 75, 10, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(76, 76, 10, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(77, 77, 10, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(78, 78, 10, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(79, 79, 10, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(80, 80, 10, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(81, 81, 11, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(82, 82, 11, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(83, 83, 11, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(84, 84, 11, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(85, 85, 11, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(86, 86, 11, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(87, 87, 11, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(88, 88, 11, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(89, 89, 11, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(90, 90, 11, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(91, 91, 12, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(92, 92, 12, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(93, 93, 12, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(94, 94, 12, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(95, 95, 12, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(96, 96, 12, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(97, 97, 12, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(98, 98, 12, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(99, 99, 12, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(100, 100, 12, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(101, 101, 12, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(102, 102, 12, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(103, 103, 12, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(104, 104, 12, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(105, 105, 12, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(106, 106, 5, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(107, 107, 5, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(108, 108, 5, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(109, 109, 5, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(116, 116, 5, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(117, 117, 5, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x0120, 0x10, 6, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x0120, 0x10, 7, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x0120, 0x10, 9, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x0120, 0x10, 8, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x0120, 0x10, 3, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x0120, 0x10, 4, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x0120, 0x10, 5, 1),
+ PIN_FIELD_BASE(125, 125, 7, 0x00f0, 0x10, 0, 1),
+ PIN_FIELD_BASE(126, 126, 7, 0x00f0, 0x10, 1, 1),
+ PIN_FIELD_BASE(127, 127, 7, 0x00f0, 0x10, 2, 1),
+ PIN_FIELD_BASE(128, 128, 7, 0x00f0, 0x10, 3, 1),
+ PIN_FIELD_BASE(129, 129, 7, 0x00f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(130, 130, 7, 0x00f0, 0x10, 5, 1),
+ PIN_FIELD_BASE(131, 131, 7, 0x00f0, 0x10, 9, 1),
+ PIN_FIELD_BASE(132, 132, 7, 0x00f0, 0x10, 11, 1),
+ PIN_FIELD_BASE(133, 133, 7, 0x00f0, 0x10, 10, 1),
+ PIN_FIELD_BASE(134, 134, 7, 0x00f0, 0x10, 6, 1),
+ PIN_FIELD_BASE(135, 135, 7, 0x00f0, 0x10, 8, 1),
+ PIN_FIELD_BASE(136, 136, 7, 0x00f0, 0x10, 7, 1),
+ PIN_FIELD_BASE(137, 137, 4, 0x00d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(138, 138, 4, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x00d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x00d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x00d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(143, 143, 4, 0x00d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(144, 144, 4, 0x00d0, 0x10, 9, 1),
+ PIN_FIELD_BASE(145, 145, 4, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(146, 146, 4, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(147, 147, 4, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(148, 148, 4, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(149, 149, 4, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(150, 150, 4, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(151, 151, 4, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(152, 152, 4, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(153, 153, 4, 0x00d0, 0x10, 13, 1),
+ PIN_FIELD_BASE(154, 154, 4, 0x00d0, 0x10, 13, 1),
+ PIN_FIELD_BASE(155, 155, 4, 0x00d0, 0x10, 12, 1),
+ PIN_FIELD_BASE(156, 156, 4, 0x00d0, 0x10, 12, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(160, 160, 3, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(161, 161, 3, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(162, 162, 3, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(163, 163, 3, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(164, 164, 3, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(165, 165, 3, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(166, 166, 3, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(167, 167, 3, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(168, 168, 3, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(174, 174, 1, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(175, 175, 1, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(176, 176, 1, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(177, 177, 1, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(178, 178, 1, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(179, 179, 1, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(180, 180, 1, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(181, 181, 1, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(182, 182, 1, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(183, 183, 1, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(184, 184, 1, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(185, 185, 1, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(186, 186, 13, 0x0110, 0x10, 14, 1),
+ PIN_FIELD_BASE(187, 187, 13, 0x0110, 0x10, 14, 1),
+ PIN_FIELD_BASE(188, 188, 13, 0x0110, 0x10, 4, 1),
+ PIN_FIELD_BASE(189, 189, 13, 0x0110, 0x10, 9, 1),
+ PIN_FIELD_BASE(190, 190, 13, 0x0110, 0x10, 5, 1),
+ PIN_FIELD_BASE(191, 191, 13, 0x0110, 0x10, 10, 1),
+ PIN_FIELD_BASE(192, 192, 13, 0x0110, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 13, 0x0110, 0x10, 15, 1),
+ PIN_FIELD_BASE(194, 194, 13, 0x0110, 0x10, 6, 1),
+ PIN_FIELD_BASE(195, 195, 13, 0x0110, 0x10, 11, 1),
+ PIN_FIELD_BASE(196, 196, 13, 0x0110, 0x10, 1, 1),
+ PIN_FIELD_BASE(197, 197, 13, 0x0110, 0x10, 16, 1),
+ PIN_FIELD_BASE(198, 198, 13, 0x0110, 0x10, 7, 1),
+ PIN_FIELD_BASE(199, 199, 13, 0x0110, 0x10, 12, 1),
+ PIN_FIELD_BASE(200, 200, 13, 0x0110, 0x10, 19, 1),
+ PIN_FIELD_BASE(201, 201, 13, 0x0110, 0x10, 22, 1),
+ PIN_FIELD_BASE(202, 202, 13, 0x0110, 0x10, 8, 1),
+ PIN_FIELD_BASE(203, 203, 13, 0x0110, 0x10, 13, 1),
+ PIN_FIELD_BASE(204, 204, 13, 0x0110, 0x10, 2, 1),
+ PIN_FIELD_BASE(205, 205, 13, 0x0110, 0x10, 3, 1),
+ PIN_FIELD_BASE(206, 206, 13, 0x0110, 0x10, 18, 1),
+ PIN_FIELD_BASE(207, 207, 13, 0x0110, 0x10, 17, 1),
+ PIN_FIELD_BASE(208, 208, 13, 0x0110, 0x10, 17, 1),
+ PIN_FIELD_BASE(209, 209, 13, 0x0110, 0x10, 17, 1),
+ PIN_FIELD_BASE(210, 210, 14, 0x0130, 0x10, 0, 1),
+ PIN_FIELD_BASE(211, 211, 14, 0x0130, 0x10, 1, 1),
+ PIN_FIELD_BASE(212, 212, 14, 0x0130, 0x10, 2, 1),
+ PIN_FIELD_BASE(213, 213, 14, 0x0130, 0x10, 3, 1),
+ PIN_FIELD_BASE(214, 214, 13, 0x0110, 0x10, 20, 1),
+ PIN_FIELD_BASE(215, 215, 13, 0x0110, 0x10, 21, 1),
+ PIN_FIELD_BASE(216, 216, 14, 0x0130, 0x10, 11, 1),
+ PIN_FIELD_BASE(217, 217, 14, 0x0130, 0x10, 11, 1),
+ PIN_FIELD_BASE(218, 218, 14, 0x0130, 0x10, 11, 1),
+ PIN_FIELD_BASE(219, 219, 14, 0x0130, 0x10, 4, 1),
+ PIN_FIELD_BASE(220, 220, 14, 0x0130, 0x10, 11, 1),
+ PIN_FIELD_BASE(221, 221, 14, 0x0130, 0x10, 12, 1),
+ PIN_FIELD_BASE(222, 222, 14, 0x0130, 0x10, 22, 1),
+ PIN_FIELD_BASE(223, 223, 14, 0x0130, 0x10, 21, 1),
+ PIN_FIELD_BASE(224, 224, 14, 0x0130, 0x10, 5, 1),
+ PIN_FIELD_BASE(225, 225, 14, 0x0130, 0x10, 6, 1),
+ PIN_FIELD_BASE(226, 226, 14, 0x0130, 0x10, 7, 1),
+ PIN_FIELD_BASE(227, 227, 14, 0x0130, 0x10, 8, 1),
+ PIN_FIELD_BASE(228, 228, 14, 0x0130, 0x10, 9, 1),
+ PIN_FIELD_BASE(229, 229, 14, 0x0130, 0x10, 10, 1),
+ PIN_FIELD_BASE(230, 230, 15, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(231, 231, 15, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(232, 232, 15, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(233, 233, 15, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(234, 234, 15, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(235, 235, 15, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(236, 236, 15, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(237, 237, 15, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(238, 238, 15, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(239, 239, 15, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(240, 240, 15, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(241, 241, 15, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(242, 242, 15, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(243, 243, 15, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(244, 244, 15, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(245, 245, 15, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(246, 246, 15, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(247, 247, 15, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(248, 248, 15, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(249, 249, 15, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(250, 250, 15, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(251, 251, 3, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(252, 252, 3, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(253, 253, 3, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(254, 254, 3, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(255, 255, 3, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(256, 256, 3, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(257, 257, 3, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(258, 258, 3, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(259, 259, 14, 0x0130, 0x10, 13, 1),
+ PIN_FIELD_BASE(260, 260, 14, 0x0130, 0x10, 14, 1),
+ PIN_FIELD_BASE(261, 261, 14, 0x0130, 0x10, 15, 1),
+ PIN_FIELD_BASE(262, 262, 14, 0x0130, 0x10, 16, 1),
+ PIN_FIELD_BASE(263, 263, 14, 0x0130, 0x10, 17, 1),
+ PIN_FIELD_BASE(264, 264, 14, 0x0130, 0x10, 18, 1),
+ PIN_FIELD_BASE(265, 265, 14, 0x0130, 0x10, 19, 1),
+ PIN_FIELD_BASE(266, 266, 14, 0x0130, 0x10, 20, 1),
+ PIN_FIELD_BASE(267, 267, 15, 0x00e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(268, 268, 15, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(269, 269, 15, 0x00e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(270, 270, 15, 0x00e0, 0x10, 7, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_ies_range[] = {
+ PIN_FIELD_BASE(0, 0, 8, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 8, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 11, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(3, 3, 11, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(4, 4, 11, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(5, 5, 11, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(6, 6, 11, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(7, 7, 11, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(8, 8, 11, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(9, 9, 9, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(10, 10, 9, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(11, 11, 8, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(12, 12, 9, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(13, 13, 6, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(14, 14, 3, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(15, 15, 6, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(16, 16, 6, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(17, 17, 6, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(18, 18, 6, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(19, 19, 6, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(20, 20, 3, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(26, 26, 2, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(31, 31, 2, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(39, 39, 8, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(40, 40, 8, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(41, 41, 8, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(42, 42, 8, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(43, 43, 8, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(44, 44, 8, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(45, 45, 8, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(46, 46, 8, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(47, 47, 8, 0x0060, 0x10, 13, 1),
+ PIN_FIELD_BASE(48, 48, 8, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(49, 49, 8, 0x0060, 0x10, 14, 1),
+ PIN_FIELD_BASE(50, 50, 8, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(51, 51, 8, 0x0060, 0x10, 15, 1),
+ PIN_FIELD_BASE(52, 52, 9, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(53, 53, 9, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(54, 54, 9, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 9, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(56, 56, 9, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(57, 57, 9, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(58, 58, 9, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(59, 59, 9, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(60, 60, 9, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(61, 61, 9, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(62, 62, 9, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(63, 63, 9, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(64, 64, 9, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 65, 9, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(66, 66, 9, 0x0070, 0x10, 24, 1),
+ PIN_FIELD_BASE(67, 67, 9, 0x0070, 0x10, 22, 1),
+ PIN_FIELD_BASE(68, 68, 9, 0x0070, 0x10, 21, 1),
+ PIN_FIELD_BASE(69, 69, 9, 0x0070, 0x10, 25, 1),
+ PIN_FIELD_BASE(70, 70, 9, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(71, 71, 9, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(72, 72, 9, 0x0070, 0x10, 23, 1),
+ PIN_FIELD_BASE(73, 73, 9, 0x0070, 0x10, 20, 1),
+ PIN_FIELD_BASE(74, 74, 9, 0x0070, 0x10, 17, 1),
+ PIN_FIELD_BASE(75, 75, 10, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(76, 76, 10, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(77, 77, 10, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(78, 78, 10, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(79, 79, 10, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(80, 80, 10, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(81, 81, 11, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(82, 82, 11, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(83, 83, 11, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(84, 84, 11, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(85, 85, 11, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(86, 86, 11, 0x0040, 0x10, 14, 1),
+ PIN_FIELD_BASE(87, 87, 11, 0x0040, 0x10, 16, 1),
+ PIN_FIELD_BASE(88, 88, 11, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(89, 89, 11, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(90, 90, 11, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(91, 91, 12, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(92, 92, 12, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(93, 93, 12, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(94, 94, 12, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(95, 95, 12, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(96, 96, 12, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(97, 97, 12, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(98, 98, 12, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(99, 99, 12, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(100, 100, 12, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(101, 101, 12, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(102, 102, 12, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(103, 103, 12, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(104, 104, 12, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(105, 105, 12, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(106, 106, 5, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(107, 107, 5, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(108, 108, 5, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(109, 109, 5, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(116, 116, 5, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(117, 117, 5, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(125, 125, 7, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(126, 126, 7, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(127, 127, 7, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(128, 128, 7, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(129, 129, 7, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(130, 130, 7, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(131, 131, 7, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(132, 132, 7, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(133, 133, 7, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(134, 134, 7, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(135, 135, 7, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(136, 136, 7, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(137, 137, 4, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(138, 138, 4, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 14, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(143, 143, 4, 0x0040, 0x10, 16, 1),
+ PIN_FIELD_BASE(144, 144, 4, 0x0040, 0x10, 17, 1),
+ PIN_FIELD_BASE(145, 145, 4, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(146, 146, 4, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(147, 147, 4, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(148, 148, 4, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 149, 4, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(150, 150, 4, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(151, 151, 4, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(152, 152, 4, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(153, 153, 4, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(154, 154, 4, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(155, 155, 4, 0x0040, 0x10, 18, 1),
+ PIN_FIELD_BASE(156, 156, 4, 0x0040, 0x10, 19, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(160, 160, 3, 0x0050, 0x10, 22, 1),
+ PIN_FIELD_BASE(161, 161, 3, 0x0050, 0x10, 20, 1),
+ PIN_FIELD_BASE(162, 162, 3, 0x0050, 0x10, 23, 1),
+ PIN_FIELD_BASE(163, 163, 3, 0x0050, 0x10, 21, 1),
+ PIN_FIELD_BASE(164, 164, 3, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(165, 165, 3, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(166, 166, 3, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(167, 167, 3, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(168, 168, 3, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x0050, 0x10, 17, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x0050, 0x10, 19, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(174, 174, 1, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(175, 175, 1, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(176, 176, 1, 0x0050, 0x10, 17, 1),
+ PIN_FIELD_BASE(177, 177, 1, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(178, 178, 1, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(179, 179, 1, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(180, 180, 1, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(181, 181, 1, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(182, 182, 1, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(183, 183, 1, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(184, 184, 1, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(185, 185, 1, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(186, 186, 13, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(187, 187, 13, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(188, 188, 13, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(189, 189, 13, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(190, 190, 13, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(191, 191, 13, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(192, 192, 13, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 13, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(194, 194, 13, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(195, 195, 13, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(196, 196, 13, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(197, 197, 13, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(198, 198, 13, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(199, 199, 13, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(200, 200, 13, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(201, 201, 13, 0x0090, 0x10, 25, 1),
+ PIN_FIELD_BASE(202, 202, 13, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(203, 203, 13, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(204, 204, 13, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(205, 205, 13, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(206, 206, 13, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(207, 207, 13, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(208, 208, 13, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(209, 209, 13, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(210, 210, 14, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(211, 211, 14, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(212, 212, 14, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(213, 213, 14, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(214, 214, 13, 0x0090, 0x10, 23, 1),
+ PIN_FIELD_BASE(215, 215, 13, 0x0090, 0x10, 24, 1),
+ PIN_FIELD_BASE(216, 216, 14, 0x0060, 0x10, 13, 1),
+ PIN_FIELD_BASE(217, 217, 14, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(218, 218, 14, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(219, 219, 14, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(220, 220, 14, 0x0060, 0x10, 22, 1),
+ PIN_FIELD_BASE(221, 221, 14, 0x0060, 0x10, 23, 1),
+ PIN_FIELD_BASE(222, 222, 14, 0x0060, 0x10, 25, 1),
+ PIN_FIELD_BASE(223, 223, 14, 0x0060, 0x10, 24, 1),
+ PIN_FIELD_BASE(224, 224, 14, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(225, 225, 14, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(226, 226, 14, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(227, 227, 14, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(228, 228, 14, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(229, 229, 14, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(230, 230, 15, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(231, 231, 15, 0x0040, 0x10, 14, 1),
+ PIN_FIELD_BASE(232, 232, 15, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(233, 233, 15, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(234, 234, 15, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(235, 235, 15, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(236, 236, 15, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(237, 237, 15, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(238, 238, 15, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(239, 239, 15, 0x0040, 0x10, 23, 1),
+ PIN_FIELD_BASE(240, 240, 15, 0x0040, 0x10, 22, 1),
+ PIN_FIELD_BASE(241, 241, 15, 0x0040, 0x10, 16, 1),
+ PIN_FIELD_BASE(242, 242, 15, 0x0040, 0x10, 17, 1),
+ PIN_FIELD_BASE(243, 243, 15, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(244, 244, 15, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(245, 245, 15, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(246, 246, 15, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(247, 247, 15, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(248, 248, 15, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(249, 249, 15, 0x0040, 0x10, 24, 1),
+ PIN_FIELD_BASE(250, 250, 15, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(251, 251, 3, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(252, 252, 3, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(253, 253, 3, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(254, 254, 3, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(255, 255, 3, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(256, 256, 3, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(257, 257, 3, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(258, 258, 3, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(259, 259, 14, 0x0060, 0x10, 14, 1),
+ PIN_FIELD_BASE(260, 260, 14, 0x0060, 0x10, 15, 1),
+ PIN_FIELD_BASE(261, 261, 14, 0x0060, 0x10, 16, 1),
+ PIN_FIELD_BASE(262, 262, 14, 0x0060, 0x10, 17, 1),
+ PIN_FIELD_BASE(263, 263, 14, 0x0060, 0x10, 18, 1),
+ PIN_FIELD_BASE(264, 264, 14, 0x0060, 0x10, 19, 1),
+ PIN_FIELD_BASE(265, 265, 14, 0x0060, 0x10, 20, 1),
+ PIN_FIELD_BASE(266, 266, 14, 0x0060, 0x10, 21, 1),
+ PIN_FIELD_BASE(267, 267, 15, 0x0040, 0x10, 20, 1),
+ PIN_FIELD_BASE(268, 268, 15, 0x0040, 0x10, 21, 1),
+ PIN_FIELD_BASE(269, 269, 15, 0x0040, 0x10, 18, 1),
+ PIN_FIELD_BASE(270, 270, 15, 0x0040, 0x10, 19, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_pupd_range[] = {
+ PIN_FIELD_BASE(60, 60, 9, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(125, 125, 7, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(126, 126, 7, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(127, 127, 7, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(128, 128, 7, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(129, 129, 7, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(130, 130, 7, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(131, 131, 7, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(132, 132, 7, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(133, 133, 7, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(134, 134, 7, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(135, 135, 7, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(136, 136, 7, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(137, 137, 4, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(138, 138, 4, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(143, 143, 4, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(144, 144, 4, 0x0070, 0x10, 17, 1),
+ PIN_FIELD_BASE(145, 145, 4, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(146, 146, 4, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(147, 147, 4, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(148, 148, 4, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 149, 4, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(150, 150, 4, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(151, 151, 4, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(152, 152, 4, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(153, 153, 4, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(154, 154, 4, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(155, 155, 4, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(156, 156, 4, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(217, 217, 14, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(218, 218, 14, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(219, 219, 14, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(224, 224, 14, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(225, 225, 14, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(226, 226, 14, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(227, 227, 14, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(228, 228, 14, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(229, 229, 14, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(259, 259, 14, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(260, 260, 14, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(261, 261, 14, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(262, 262, 14, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(263, 263, 14, 0x00a0, 0x10, 13, 1),
+ PIN_FIELD_BASE(264, 264, 14, 0x00a0, 0x10, 14, 1),
+ PIN_FIELD_BASE(265, 265, 14, 0x00a0, 0x10, 15, 1),
+ PIN_FIELD_BASE(266, 266, 14, 0x00a0, 0x10, 16, 1),
+ PIN_FIELD_BASE(267, 267, 15, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(268, 268, 15, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(269, 269, 15, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(270, 270, 15, 0x0080, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_r0_range[] = {
+ PIN_FIELD_BASE(60, 60, 9, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(125, 125, 7, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(126, 126, 7, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(127, 127, 7, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(128, 128, 7, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(129, 129, 7, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(130, 130, 7, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(131, 131, 7, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(132, 132, 7, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(133, 133, 7, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(134, 134, 7, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(135, 135, 7, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(136, 136, 7, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(137, 137, 4, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(138, 138, 4, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(143, 143, 4, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(144, 144, 4, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(145, 145, 4, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(146, 146, 4, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(147, 147, 4, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(148, 148, 4, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 149, 4, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(150, 150, 4, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(151, 151, 4, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(152, 152, 4, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(153, 153, 4, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(154, 154, 4, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(155, 155, 4, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(156, 156, 4, 0x0080, 0x10, 19, 1),
+ PIN_FIELD_BASE(217, 217, 14, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(218, 218, 14, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(219, 219, 14, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(224, 224, 14, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(225, 225, 14, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(226, 226, 14, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(227, 227, 14, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(228, 228, 14, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(229, 229, 14, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(259, 259, 14, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(260, 260, 14, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(261, 261, 14, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(262, 262, 14, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(263, 263, 14, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(264, 264, 14, 0x00c0, 0x10, 14, 1),
+ PIN_FIELD_BASE(265, 265, 14, 0x00c0, 0x10, 15, 1),
+ PIN_FIELD_BASE(266, 266, 14, 0x00c0, 0x10, 16, 1),
+ PIN_FIELD_BASE(267, 267, 15, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(268, 268, 15, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(269, 269, 15, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(270, 270, 15, 0x00a0, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_r1_range[] = {
+ PIN_FIELD_BASE(60, 60, 9, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(125, 125, 7, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(126, 126, 7, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(127, 127, 7, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(128, 128, 7, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(129, 129, 7, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(130, 130, 7, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(131, 131, 7, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(132, 132, 7, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(133, 133, 7, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(134, 134, 7, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(135, 135, 7, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(136, 136, 7, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(137, 137, 4, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(138, 138, 4, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(143, 143, 4, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(144, 144, 4, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(145, 145, 4, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(146, 146, 4, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(147, 147, 4, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(148, 148, 4, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 149, 4, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(150, 150, 4, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(151, 151, 4, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(152, 152, 4, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(153, 153, 4, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(154, 154, 4, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(155, 155, 4, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(156, 156, 4, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(217, 217, 14, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(218, 218, 14, 0x00d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(219, 219, 14, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(224, 224, 14, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(225, 225, 14, 0x00d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(226, 226, 14, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(227, 227, 14, 0x00d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(228, 228, 14, 0x00d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(229, 229, 14, 0x00d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(259, 259, 14, 0x00d0, 0x10, 9, 1),
+ PIN_FIELD_BASE(260, 260, 14, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(261, 261, 14, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(262, 262, 14, 0x00d0, 0x10, 12, 1),
+ PIN_FIELD_BASE(263, 263, 14, 0x00d0, 0x10, 13, 1),
+ PIN_FIELD_BASE(264, 264, 14, 0x00d0, 0x10, 14, 1),
+ PIN_FIELD_BASE(265, 265, 14, 0x00d0, 0x10, 15, 1),
+ PIN_FIELD_BASE(266, 266, 14, 0x00d0, 0x10, 16, 1),
+ PIN_FIELD_BASE(267, 267, 15, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(268, 268, 15, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(269, 269, 15, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(270, 270, 15, 0x00b0, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_pu_range[] = {
+ PIN_FIELD_BASE(0, 0, 8, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 8, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 11, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(3, 3, 11, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(4, 4, 11, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(5, 5, 11, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(6, 6, 11, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(7, 7, 11, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(8, 8, 11, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(9, 9, 9, 0x00c0, 0x10, 14, 1),
+ PIN_FIELD_BASE(10, 10, 9, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(11, 11, 8, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(12, 12, 9, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(13, 13, 6, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(14, 14, 3, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(15, 15, 6, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(16, 16, 6, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(17, 17, 6, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(18, 18, 6, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(19, 19, 6, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(20, 20, 3, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(26, 26, 2, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(31, 31, 2, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(39, 39, 8, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(40, 40, 8, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(41, 41, 8, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(42, 42, 8, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(43, 43, 8, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(44, 44, 8, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(45, 45, 8, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(46, 46, 8, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(47, 47, 8, 0x00a0, 0x10, 13, 1),
+ PIN_FIELD_BASE(48, 48, 8, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(49, 49, 8, 0x00a0, 0x10, 14, 1),
+ PIN_FIELD_BASE(50, 50, 8, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(51, 51, 8, 0x00a0, 0x10, 15, 1),
+ PIN_FIELD_BASE(52, 52, 9, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(53, 53, 9, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(54, 54, 9, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 9, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(56, 56, 9, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(57, 57, 9, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(58, 58, 9, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(59, 59, 9, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(61, 61, 9, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(62, 62, 9, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(63, 63, 9, 0x00c0, 0x10, 18, 1),
+ PIN_FIELD_BASE(64, 64, 9, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 65, 9, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(66, 66, 9, 0x00c0, 0x10, 24, 1),
+ PIN_FIELD_BASE(67, 67, 9, 0x00c0, 0x10, 21, 1),
+ PIN_FIELD_BASE(68, 68, 9, 0x00c0, 0x10, 20, 1),
+ PIN_FIELD_BASE(69, 69, 9, 0x00c0, 0x10, 25, 1),
+ PIN_FIELD_BASE(70, 70, 9, 0x00c0, 0x10, 16, 1),
+ PIN_FIELD_BASE(71, 71, 9, 0x00c0, 0x10, 15, 1),
+ PIN_FIELD_BASE(72, 72, 9, 0x00c0, 0x10, 23, 1),
+ PIN_FIELD_BASE(73, 73, 9, 0x00c0, 0x10, 19, 1),
+ PIN_FIELD_BASE(74, 74, 9, 0x00c0, 0x10, 17, 1),
+ PIN_FIELD_BASE(75, 75, 10, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(76, 76, 10, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(77, 77, 10, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(78, 78, 10, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(79, 79, 10, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(80, 80, 10, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(81, 81, 11, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(82, 82, 11, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(83, 83, 11, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(84, 84, 11, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(85, 85, 11, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(86, 86, 11, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(87, 87, 11, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(88, 88, 11, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(89, 89, 11, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(90, 90, 11, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(91, 91, 12, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(92, 92, 12, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(93, 93, 12, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(94, 94, 12, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(95, 95, 12, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(96, 96, 12, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(97, 97, 12, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(98, 98, 12, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(99, 99, 12, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(100, 100, 12, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(101, 101, 12, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(102, 102, 12, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(103, 103, 12, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(104, 104, 12, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(105, 105, 12, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(106, 106, 5, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(107, 107, 5, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(108, 108, 5, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(109, 109, 5, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(116, 116, 5, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(117, 117, 5, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x00b0, 0x10, 9, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x00b0, 0x10, 10, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x00b0, 0x10, 12, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x00b0, 0x10, 11, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(160, 160, 3, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(161, 161, 3, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(162, 162, 3, 0x0090, 0x10, 23, 1),
+ PIN_FIELD_BASE(163, 163, 3, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(164, 164, 3, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(165, 165, 3, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(166, 166, 3, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(167, 167, 3, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(168, 168, 3, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(174, 174, 1, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(175, 175, 1, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(176, 176, 1, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(177, 177, 1, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(178, 178, 1, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(179, 179, 1, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(180, 180, 1, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(181, 181, 1, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(182, 182, 1, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(183, 183, 1, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(184, 184, 1, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(185, 185, 1, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(186, 186, 13, 0x00d0, 0x10, 4, 1),
+ PIN_FIELD_BASE(187, 187, 13, 0x00d0, 0x10, 5, 1),
+ PIN_FIELD_BASE(188, 188, 13, 0x00d0, 0x10, 12, 1),
+ PIN_FIELD_BASE(189, 189, 13, 0x00d0, 0x10, 17, 1),
+ PIN_FIELD_BASE(190, 190, 13, 0x00d0, 0x10, 13, 1),
+ PIN_FIELD_BASE(191, 191, 13, 0x00d0, 0x10, 18, 1),
+ PIN_FIELD_BASE(192, 192, 13, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 13, 0x00d0, 0x10, 6, 1),
+ PIN_FIELD_BASE(194, 194, 13, 0x00d0, 0x10, 14, 1),
+ PIN_FIELD_BASE(195, 195, 13, 0x00d0, 0x10, 19, 1),
+ PIN_FIELD_BASE(196, 196, 13, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(197, 197, 13, 0x00d0, 0x10, 7, 1),
+ PIN_FIELD_BASE(198, 198, 13, 0x00d0, 0x10, 15, 1),
+ PIN_FIELD_BASE(199, 199, 13, 0x00d0, 0x10, 20, 1),
+ PIN_FIELD_BASE(200, 200, 13, 0x00d0, 0x10, 22, 1),
+ PIN_FIELD_BASE(201, 201, 13, 0x00d0, 0x10, 25, 1),
+ PIN_FIELD_BASE(202, 202, 13, 0x00d0, 0x10, 16, 1),
+ PIN_FIELD_BASE(203, 203, 13, 0x00d0, 0x10, 21, 1),
+ PIN_FIELD_BASE(204, 204, 13, 0x00d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(205, 205, 13, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(206, 206, 13, 0x00d0, 0x10, 8, 1),
+ PIN_FIELD_BASE(207, 207, 13, 0x00d0, 0x10, 9, 1),
+ PIN_FIELD_BASE(208, 208, 13, 0x00d0, 0x10, 10, 1),
+ PIN_FIELD_BASE(209, 209, 13, 0x00d0, 0x10, 11, 1),
+ PIN_FIELD_BASE(210, 210, 14, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(211, 211, 14, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(212, 212, 14, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(213, 213, 14, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(214, 214, 13, 0x00d0, 0x10, 23, 1),
+ PIN_FIELD_BASE(215, 215, 13, 0x00d0, 0x10, 24, 1),
+ PIN_FIELD_BASE(216, 216, 14, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(220, 220, 14, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(221, 221, 14, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(222, 222, 14, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(223, 223, 14, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(230, 230, 15, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(231, 231, 15, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(232, 232, 15, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(233, 233, 15, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(234, 234, 15, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(235, 235, 15, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(236, 236, 15, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(237, 237, 15, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(238, 238, 15, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(239, 239, 15, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(240, 240, 15, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(241, 241, 15, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(242, 242, 15, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(243, 243, 15, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(244, 244, 15, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(245, 245, 15, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(246, 246, 15, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(247, 247, 15, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(248, 248, 15, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(249, 249, 15, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(250, 250, 15, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(251, 251, 3, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(252, 252, 3, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(253, 253, 3, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(254, 254, 3, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(255, 255, 3, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(256, 256, 3, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(257, 257, 3, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(258, 258, 3, 0x0090, 0x10, 9, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_pd_range[] = {
+ PIN_FIELD_BASE(0, 0, 8, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(1, 1, 8, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(2, 2, 11, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(3, 3, 11, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(4, 4, 11, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(5, 5, 11, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(6, 6, 11, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(7, 7, 11, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(8, 8, 11, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(9, 9, 9, 0x00a0, 0x10, 14, 1),
+ PIN_FIELD_BASE(10, 10, 9, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(11, 11, 8, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(12, 12, 9, 0x00a0, 0x10, 13, 1),
+ PIN_FIELD_BASE(13, 13, 6, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(14, 14, 3, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(15, 15, 6, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(16, 16, 6, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(17, 17, 6, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(18, 18, 6, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(19, 19, 6, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(20, 20, 3, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(26, 26, 2, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(27, 27, 2, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(28, 28, 2, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(31, 31, 2, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(32, 32, 1, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(34, 34, 1, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(35, 35, 1, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(36, 36, 1, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(37, 37, 1, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(38, 38, 1, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(39, 39, 8, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(40, 40, 8, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(41, 41, 8, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(42, 42, 8, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(43, 43, 8, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(44, 44, 8, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(45, 45, 8, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(46, 46, 8, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(47, 47, 8, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(48, 48, 8, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(49, 49, 8, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(50, 50, 8, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(51, 51, 8, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(52, 52, 9, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(53, 53, 9, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(54, 54, 9, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 9, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(56, 56, 9, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(57, 57, 9, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(58, 58, 9, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(59, 59, 9, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(61, 61, 9, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(62, 62, 9, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(63, 63, 9, 0x00a0, 0x10, 18, 1),
+ PIN_FIELD_BASE(64, 64, 9, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 65, 9, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(66, 66, 9, 0x00a0, 0x10, 24, 1),
+ PIN_FIELD_BASE(67, 67, 9, 0x00a0, 0x10, 21, 1),
+ PIN_FIELD_BASE(68, 68, 9, 0x00a0, 0x10, 20, 1),
+ PIN_FIELD_BASE(69, 69, 9, 0x00a0, 0x10, 25, 1),
+ PIN_FIELD_BASE(70, 70, 9, 0x00a0, 0x10, 16, 1),
+ PIN_FIELD_BASE(71, 71, 9, 0x00a0, 0x10, 15, 1),
+ PIN_FIELD_BASE(72, 72, 9, 0x00a0, 0x10, 23, 1),
+ PIN_FIELD_BASE(73, 73, 9, 0x00a0, 0x10, 19, 1),
+ PIN_FIELD_BASE(74, 74, 9, 0x00a0, 0x10, 17, 1),
+ PIN_FIELD_BASE(75, 75, 10, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(76, 76, 10, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(77, 77, 10, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(78, 78, 10, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(79, 79, 10, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(80, 80, 10, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(81, 81, 11, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(82, 82, 11, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(83, 83, 11, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(84, 84, 11, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(85, 85, 11, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(86, 86, 11, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(87, 87, 11, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(88, 88, 11, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(89, 89, 11, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(90, 90, 11, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(91, 91, 12, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(92, 92, 12, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(93, 93, 12, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(94, 94, 12, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(95, 95, 12, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(96, 96, 12, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(97, 97, 12, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(98, 98, 12, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(99, 99, 12, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(100, 100, 12, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(101, 101, 12, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(102, 102, 12, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(103, 103, 12, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(104, 104, 12, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(105, 105, 12, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(106, 106, 5, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(107, 107, 5, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(108, 108, 5, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(109, 109, 5, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(110, 110, 5, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(111, 111, 5, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(112, 112, 5, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(113, 113, 5, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(114, 114, 5, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(115, 115, 5, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(116, 116, 5, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(117, 117, 5, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(118, 118, 6, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(119, 119, 6, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(120, 120, 6, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(121, 121, 6, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(122, 122, 6, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 6, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(124, 124, 6, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(157, 157, 2, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(158, 158, 2, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(159, 159, 2, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(160, 160, 3, 0x0080, 0x10, 22, 1),
+ PIN_FIELD_BASE(161, 161, 3, 0x0080, 0x10, 20, 1),
+ PIN_FIELD_BASE(162, 162, 3, 0x0080, 0x10, 23, 1),
+ PIN_FIELD_BASE(163, 163, 3, 0x0080, 0x10, 21, 1),
+ PIN_FIELD_BASE(164, 164, 3, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(165, 165, 3, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(166, 166, 3, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(167, 167, 3, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(168, 168, 3, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(169, 169, 3, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(170, 170, 3, 0x0080, 0x10, 19, 1),
+ PIN_FIELD_BASE(171, 171, 3, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(172, 172, 3, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(173, 173, 3, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(174, 174, 1, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(175, 175, 1, 0x0080, 0x10, 16, 1),
+ PIN_FIELD_BASE(176, 176, 1, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(177, 177, 1, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(178, 178, 1, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(179, 179, 1, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(180, 180, 1, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(181, 181, 1, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(182, 182, 1, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(183, 183, 1, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(184, 184, 1, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(185, 185, 1, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(186, 186, 13, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(187, 187, 13, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(188, 188, 13, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(189, 189, 13, 0x00c0, 0x10, 17, 1),
+ PIN_FIELD_BASE(190, 190, 13, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(191, 191, 13, 0x00c0, 0x10, 18, 1),
+ PIN_FIELD_BASE(192, 192, 13, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 13, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(194, 194, 13, 0x00c0, 0x10, 14, 1),
+ PIN_FIELD_BASE(195, 195, 13, 0x00c0, 0x10, 19, 1),
+ PIN_FIELD_BASE(196, 196, 13, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(197, 197, 13, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(198, 198, 13, 0x00c0, 0x10, 15, 1),
+ PIN_FIELD_BASE(199, 199, 13, 0x00c0, 0x10, 20, 1),
+ PIN_FIELD_BASE(200, 200, 13, 0x00c0, 0x10, 22, 1),
+ PIN_FIELD_BASE(201, 201, 13, 0x00c0, 0x10, 25, 1),
+ PIN_FIELD_BASE(202, 202, 13, 0x00c0, 0x10, 16, 1),
+ PIN_FIELD_BASE(203, 203, 13, 0x00c0, 0x10, 21, 1),
+ PIN_FIELD_BASE(204, 204, 13, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(205, 205, 13, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(206, 206, 13, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(207, 207, 13, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(208, 208, 13, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(209, 209, 13, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(210, 210, 14, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(211, 211, 14, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(212, 212, 14, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(213, 213, 14, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(214, 214, 13, 0x00c0, 0x10, 23, 1),
+ PIN_FIELD_BASE(215, 215, 13, 0x00c0, 0x10, 24, 1),
+ PIN_FIELD_BASE(216, 216, 14, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(220, 220, 14, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(221, 221, 14, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(222, 222, 14, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(223, 223, 14, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(230, 230, 15, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(231, 231, 15, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(232, 232, 15, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(233, 233, 15, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(234, 234, 15, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(235, 235, 15, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(236, 236, 15, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(237, 237, 15, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(238, 238, 15, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(239, 239, 15, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(240, 240, 15, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(241, 241, 15, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(242, 242, 15, 0x0070, 0x10, 17, 1),
+ PIN_FIELD_BASE(243, 243, 15, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(244, 244, 15, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(245, 245, 15, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(246, 246, 15, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(247, 247, 15, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(248, 248, 15, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(249, 249, 15, 0x0070, 0x10, 20, 1),
+ PIN_FIELD_BASE(250, 250, 15, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(251, 251, 3, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(252, 252, 3, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(253, 253, 3, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(254, 254, 3, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(255, 255, 3, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(256, 256, 3, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(257, 257, 3, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(258, 258, 3, 0x0080, 0x10, 9, 1),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_drv_range[] = {
+ PIN_FIELD_BASE(0, 0, 8, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(1, 1, 8, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(2, 2, 11, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(3, 3, 11, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(4, 4, 11, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(5, 5, 11, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(6, 6, 11, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(7, 7, 11, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(8, 8, 11, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(9, 9, 9, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(10, 10, 9, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(11, 11, 8, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(12, 12, 9, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(13, 13, 6, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(14, 14, 3, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(15, 15, 6, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(16, 16, 6, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(17, 17, 6, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(18, 18, 6, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(19, 19, 6, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(20, 20, 3, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(21, 21, 2, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(22, 22, 2, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(23, 23, 2, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(24, 24, 2, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(25, 25, 2, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(26, 26, 2, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(27, 27, 2, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(28, 28, 2, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(29, 29, 2, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(30, 30, 2, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(31, 31, 2, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(32, 32, 1, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(33, 33, 1, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(34, 34, 1, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(35, 35, 1, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(36, 36, 1, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(37, 37, 1, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(38, 38, 1, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(39, 39, 8, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(40, 40, 8, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(41, 41, 8, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(42, 42, 8, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(43, 43, 8, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(44, 44, 8, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(45, 45, 8, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(46, 46, 8, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(47, 47, 8, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(48, 48, 8, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(49, 49, 8, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(50, 50, 8, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(51, 51, 8, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(52, 52, 9, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(53, 53, 9, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(54, 54, 9, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(55, 55, 9, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(56, 56, 9, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(57, 57, 9, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(58, 58, 9, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(59, 59, 9, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(60, 60, 9, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(61, 61, 9, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(62, 62, 9, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(63, 63, 9, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(64, 64, 9, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(65, 65, 9, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(66, 66, 9, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(67, 67, 9, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(68, 68, 9, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(69, 69, 9, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(70, 70, 9, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(71, 71, 9, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(72, 72, 9, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(73, 73, 9, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(74, 74, 9, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(75, 75, 10, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(76, 76, 10, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(77, 77, 10, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(78, 78, 10, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(79, 79, 10, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(80, 80, 10, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(81, 81, 11, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(82, 82, 11, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(83, 83, 11, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(84, 84, 11, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(85, 85, 11, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(86, 86, 11, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(87, 87, 11, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(88, 88, 11, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(89, 89, 11, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(90, 90, 11, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(91, 91, 12, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(92, 92, 12, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(93, 93, 12, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(94, 94, 12, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(95, 95, 12, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(96, 96, 12, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(97, 97, 12, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(98, 98, 12, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(99, 99, 12, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(100, 100, 12, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(101, 101, 12, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(102, 102, 12, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(103, 103, 12, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(104, 104, 12, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(105, 105, 12, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(106, 106, 5, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(107, 107, 5, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(108, 108, 5, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(109, 109, 5, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(110, 110, 5, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(111, 111, 5, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(112, 112, 5, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(113, 113, 5, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(114, 114, 5, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(115, 115, 5, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(116, 116, 5, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(117, 117, 5, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(118, 118, 6, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(119, 119, 6, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(120, 120, 6, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(121, 121, 6, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(122, 122, 6, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(123, 123, 6, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(124, 124, 6, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(125, 125, 7, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(126, 126, 7, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(127, 127, 7, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(128, 128, 7, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(129, 129, 7, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(130, 130, 7, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(131, 131, 7, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(132, 132, 7, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(133, 133, 7, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(134, 134, 7, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(135, 135, 7, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(136, 136, 7, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(137, 137, 4, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(138, 138, 4, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(139, 139, 4, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(140, 140, 4, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(141, 141, 4, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(142, 142, 4, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(143, 143, 4, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(144, 144, 4, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(145, 145, 4, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(146, 146, 4, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(147, 147, 4, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(148, 148, 4, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(149, 149, 4, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(150, 150, 4, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(151, 151, 4, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(152, 152, 4, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(153, 153, 4, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(154, 154, 4, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(155, 155, 4, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(156, 156, 4, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(157, 157, 2, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(158, 158, 2, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(159, 159, 2, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(160, 160, 3, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(161, 161, 3, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(162, 162, 3, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(163, 163, 3, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(164, 164, 3, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(165, 165, 3, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(166, 166, 3, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(167, 167, 3, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(168, 168, 3, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(169, 169, 3, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(170, 170, 3, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(171, 171, 3, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(172, 172, 3, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(173, 173, 3, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(174, 174, 1, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(175, 175, 1, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(176, 176, 1, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(177, 177, 1, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(178, 178, 1, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(179, 179, 1, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(180, 180, 1, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(181, 181, 1, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(182, 182, 1, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(183, 183, 1, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(184, 184, 1, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(185, 185, 1, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(186, 186, 13, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(187, 187, 13, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(188, 188, 13, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(189, 189, 13, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(190, 190, 13, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(191, 191, 13, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(192, 192, 13, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(193, 193, 13, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(194, 194, 13, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(195, 195, 13, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(196, 196, 13, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(197, 197, 13, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(198, 198, 13, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(199, 199, 13, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(200, 200, 13, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(201, 201, 13, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(202, 202, 13, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(203, 203, 13, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(204, 204, 13, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(205, 205, 13, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(206, 206, 13, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(207, 207, 13, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(208, 208, 13, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(209, 209, 13, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(210, 210, 14, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(211, 211, 14, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(212, 212, 14, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(213, 213, 14, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(214, 214, 13, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(215, 215, 13, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(216, 216, 14, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(217, 217, 14, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(218, 218, 14, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(219, 219, 14, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(220, 220, 14, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(221, 221, 14, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(222, 222, 14, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(223, 223, 14, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(224, 224, 14, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(225, 225, 14, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(226, 226, 14, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(227, 227, 14, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(228, 228, 14, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(229, 229, 14, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(230, 230, 15, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(231, 231, 15, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(232, 232, 15, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(233, 233, 15, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(234, 234, 15, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(235, 235, 15, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(236, 236, 15, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(237, 237, 15, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(238, 238, 15, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(239, 239, 15, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(240, 240, 15, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(241, 241, 15, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(242, 242, 15, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(243, 243, 15, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(244, 244, 15, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(245, 245, 15, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(246, 246, 15, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(247, 247, 15, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(248, 248, 15, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(249, 249, 15, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(250, 250, 15, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(251, 251, 3, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(252, 252, 3, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(253, 253, 3, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(254, 254, 3, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(255, 255, 3, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(256, 256, 3, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(257, 257, 3, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(258, 258, 3, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(259, 259, 14, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(260, 260, 14, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(261, 261, 14, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(262, 262, 14, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(263, 263, 14, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(264, 264, 14, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(265, 265, 14, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(266, 266, 14, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(267, 267, 15, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(268, 268, 15, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(269, 269, 15, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(270, 270, 15, 0x0000, 0x10, 21, 3),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_drv_adv_range[] = {
+ PIN_FIELD_BASE(46, 46, 8, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(47, 47, 8, 0x0030, 0x10, 9, 3),
+ PIN_FIELD_BASE(48, 48, 8, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(49, 49, 8, 0x0030, 0x10, 12, 3),
+ PIN_FIELD_BASE(50, 50, 8, 0x0030, 0x10, 6, 3),
+ PIN_FIELD_BASE(51, 51, 8, 0x0030, 0x10, 15, 3),
+ PIN_FIELD_BASE(52, 52, 9, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(53, 53, 9, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(75, 75, 10, 0x0020, 0x10, 0, 5),
+ PIN_FIELD_BASE(76, 76, 10, 0x0020, 0x10, 5, 5),
+ PIN_FIELD_BASE(77, 77, 10, 0x0020, 0x10, 10, 5),
+ PIN_FIELD_BASE(78, 78, 10, 0x0020, 0x10, 15, 5),
+ PIN_FIELD_BASE(99, 99, 12, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(100, 100, 12, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(101, 101, 12, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(102, 102, 12, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(104, 104, 12, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(105, 105, 12, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(123, 123, 6, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(124, 124, 6, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(164, 164, 3, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(165, 165, 3, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(166, 166, 3, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(167, 167, 3, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(168, 168, 3, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(170, 170, 3, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(176, 176, 1, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(177, 177, 1, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(188, 188, 13, 0x0040, 0x10, 0, 3),
+ PIN_FIELD_BASE(189, 189, 13, 0x0040, 0x10, 15, 3),
+ PIN_FIELD_BASE(190, 190, 13, 0x0040, 0x10, 3, 3),
+ PIN_FIELD_BASE(191, 191, 13, 0x0040, 0x10, 18, 3),
+ PIN_FIELD_BASE(194, 194, 13, 0x0040, 0x10, 6, 3),
+ PIN_FIELD_BASE(195, 195, 13, 0x0040, 0x10, 21, 3),
+ PIN_FIELD_BASE(198, 198, 13, 0x0040, 0x10, 9, 3),
+ PIN_FIELD_BASE(199, 199, 13, 0x0040, 0x10, 24, 3),
+ PIN_FIELD_BASE(200, 200, 13, 0x0050, 0x10, 0, 3),
+ PIN_FIELD_BASE(201, 201, 13, 0x0050, 0x10, 9, 3),
+ PIN_FIELD_BASE(202, 202, 13, 0x0040, 0x10, 12, 3),
+ PIN_FIELD_BASE(203, 203, 13, 0x0040, 0x10, 27, 3),
+ PIN_FIELD_BASE(214, 214, 13, 0x0050, 0x10, 3, 3),
+ PIN_FIELD_BASE(215, 215, 13, 0x0050, 0x10, 6, 3),
+};
+
+static const struct mtk_pin_field_calc mt8196_pin_rsel_range[] = {
+ PIN_FIELD_BASE(46, 46, 8, 0x00c0, 0x10, 0, 3),
+ PIN_FIELD_BASE(47, 47, 8, 0x00c0, 0x10, 9, 3),
+ PIN_FIELD_BASE(48, 48, 8, 0x00c0, 0x10, 3, 3),
+ PIN_FIELD_BASE(49, 49, 8, 0x00c0, 0x10, 12, 3),
+ PIN_FIELD_BASE(50, 50, 8, 0x00c0, 0x10, 6, 3),
+ PIN_FIELD_BASE(51, 51, 8, 0x00c0, 0x10, 15, 3),
+ PIN_FIELD_BASE(52, 52, 9, 0x0110, 0x10, 0, 3),
+ PIN_FIELD_BASE(53, 53, 9, 0x0110, 0x10, 3, 3),
+ PIN_FIELD_BASE(99, 99, 12, 0x00b0, 0x10, 0, 3),
+ PIN_FIELD_BASE(100, 100, 12, 0x00b0, 0x10, 9, 3),
+ PIN_FIELD_BASE(101, 101, 12, 0x00b0, 0x10, 3, 3),
+ PIN_FIELD_BASE(102, 102, 12, 0x00b0, 0x10, 12, 3),
+ PIN_FIELD_BASE(104, 104, 12, 0x00b0, 0x10, 6, 3),
+ PIN_FIELD_BASE(105, 105, 12, 0x00b0, 0x10, 15, 3),
+ PIN_FIELD_BASE(123, 123, 6, 0x0100, 0x10, 0, 3),
+ PIN_FIELD_BASE(124, 124, 6, 0x0100, 0x10, 3, 3),
+ PIN_FIELD_BASE(164, 164, 3, 0x00b0, 0x10, 0, 3),
+ PIN_FIELD_BASE(165, 165, 3, 0x00b0, 0x10, 6, 3),
+ PIN_FIELD_BASE(166, 166, 3, 0x00b0, 0x10, 3, 3),
+ PIN_FIELD_BASE(167, 167, 3, 0x00b0, 0x10, 9, 3),
+ PIN_FIELD_BASE(168, 168, 3, 0x00b0, 0x10, 12, 3),
+ PIN_FIELD_BASE(170, 170, 3, 0x00b0, 0x10, 15, 3),
+ PIN_FIELD_BASE(176, 176, 1, 0x00b0, 0x10, 0, 3),
+ PIN_FIELD_BASE(177, 177, 1, 0x00b0, 0x10, 3, 3),
+ PIN_FIELD_BASE(188, 188, 13, 0x00f0, 0x10, 0, 3),
+ PIN_FIELD_BASE(189, 189, 13, 0x00f0, 0x10, 15, 3),
+ PIN_FIELD_BASE(190, 190, 13, 0x00f0, 0x10, 3, 3),
+ PIN_FIELD_BASE(191, 191, 13, 0x00f0, 0x10, 18, 3),
+ PIN_FIELD_BASE(194, 194, 13, 0x00f0, 0x10, 6, 3),
+ PIN_FIELD_BASE(195, 195, 13, 0x00f0, 0x10, 21, 3),
+ PIN_FIELD_BASE(198, 198, 13, 0x00f0, 0x10, 9, 3),
+ PIN_FIELD_BASE(199, 199, 13, 0x00f0, 0x10, 24, 3),
+ PIN_FIELD_BASE(200, 200, 13, 0x0100, 0x10, 0, 3),
+ PIN_FIELD_BASE(201, 201, 13, 0x0100, 0x10, 9, 3),
+ PIN_FIELD_BASE(202, 202, 13, 0x00f0, 0x10, 12, 3),
+ PIN_FIELD_BASE(203, 203, 13, 0x00f0, 0x10, 27, 3),
+ PIN_FIELD_BASE(214, 214, 13, 0x0100, 0x10, 3, 3),
+ PIN_FIELD_BASE(215, 215, 13, 0x0100, 0x10, 6, 3),
+};
+
+static const struct mtk_pin_rsel mt8196_pin_rsel_val_range[] = {
+ PIN_RSEL(46, 53, 0x0, 75000, 75000),
+ PIN_RSEL(46, 53, 0x1, 10000, 5000),
+ PIN_RSEL(46, 53, 0x2, 5000, 75000),
+ PIN_RSEL(46, 53, 0x3, 4000, 5000),
+ PIN_RSEL(46, 53, 0x4, 3000, 75000),
+ PIN_RSEL(46, 53, 0x5, 2000, 5000),
+ PIN_RSEL(46, 53, 0x6, 1500, 75000),
+ PIN_RSEL(46, 53, 0x7, 1000, 5000),
+ PIN_RSEL(99, 102, 0x0, 75000, 75000),
+ PIN_RSEL(99, 102, 0x1, 10000, 5000),
+ PIN_RSEL(99, 102, 0x2, 5000, 75000),
+ PIN_RSEL(99, 102, 0x3, 4000, 5000),
+ PIN_RSEL(99, 102, 0x4, 3000, 75000),
+ PIN_RSEL(99, 102, 0x5, 2000, 5000),
+ PIN_RSEL(99, 102, 0x6, 1500, 75000),
+ PIN_RSEL(99, 102, 0x7, 1000, 5000),
+ PIN_RSEL(104, 105, 0x0, 75000, 75000),
+ PIN_RSEL(104, 105, 0x1, 10000, 5000),
+ PIN_RSEL(104, 105, 0x2, 5000, 75000),
+ PIN_RSEL(104, 105, 0x3, 4000, 5000),
+ PIN_RSEL(104, 105, 0x4, 3000, 75000),
+ PIN_RSEL(104, 105, 0x5, 2000, 5000),
+ PIN_RSEL(104, 105, 0x6, 1500, 75000),
+ PIN_RSEL(104, 105, 0x7, 1000, 5000),
+ PIN_RSEL(123, 124, 0x0, 75000, 75000),
+ PIN_RSEL(123, 124, 0x1, 10000, 5000),
+ PIN_RSEL(123, 124, 0x2, 5000, 75000),
+ PIN_RSEL(123, 124, 0x3, 4000, 5000),
+ PIN_RSEL(123, 124, 0x4, 3000, 75000),
+ PIN_RSEL(123, 124, 0x5, 2000, 5000),
+ PIN_RSEL(123, 124, 0x6, 1500, 75000),
+ PIN_RSEL(123, 124, 0x7, 1000, 5000),
+ PIN_RSEL(164, 168, 0x0, 75000, 75000),
+ PIN_RSEL(164, 168, 0x1, 10000, 5000),
+ PIN_RSEL(164, 168, 0x2, 5000, 75000),
+ PIN_RSEL(164, 168, 0x3, 4000, 5000),
+ PIN_RSEL(164, 168, 0x4, 3000, 75000),
+ PIN_RSEL(164, 168, 0x5, 2000, 5000),
+ PIN_RSEL(164, 168, 0x6, 1500, 75000),
+ PIN_RSEL(164, 168, 0x7, 1000, 5000),
+ PIN_RSEL(170, 170, 0x0, 75000, 75000),
+ PIN_RSEL(170, 170, 0x1, 10000, 5000),
+ PIN_RSEL(170, 170, 0x2, 5000, 75000),
+ PIN_RSEL(170, 170, 0x3, 4000, 5000),
+ PIN_RSEL(170, 170, 0x4, 3000, 75000),
+ PIN_RSEL(170, 170, 0x5, 2000, 5000),
+ PIN_RSEL(170, 170, 0x6, 1500, 75000),
+ PIN_RSEL(170, 170, 0x7, 1000, 5000),
+ PIN_RSEL(176, 177, 0x0, 75000, 75000),
+ PIN_RSEL(176, 177, 0x1, 10000, 5000),
+ PIN_RSEL(176, 177, 0x2, 5000, 75000),
+ PIN_RSEL(176, 177, 0x3, 4000, 5000),
+ PIN_RSEL(176, 177, 0x4, 3000, 75000),
+ PIN_RSEL(176, 177, 0x5, 2000, 5000),
+ PIN_RSEL(176, 177, 0x6, 1500, 75000),
+ PIN_RSEL(176, 177, 0x7, 1000, 5000),
+ PIN_RSEL(188, 191, 0x0, 75000, 75000),
+ PIN_RSEL(188, 191, 0x1, 10000, 5000),
+ PIN_RSEL(188, 191, 0x2, 5000, 75000),
+ PIN_RSEL(188, 191, 0x3, 4000, 5000),
+ PIN_RSEL(188, 191, 0x4, 3000, 75000),
+ PIN_RSEL(188, 191, 0x5, 2000, 5000),
+ PIN_RSEL(188, 191, 0x6, 1500, 75000),
+ PIN_RSEL(188, 191, 0x7, 1000, 5000),
+ PIN_RSEL(194, 195, 0x0, 75000, 75000),
+ PIN_RSEL(194, 195, 0x1, 10000, 5000),
+ PIN_RSEL(194, 195, 0x2, 5000, 75000),
+ PIN_RSEL(194, 195, 0x3, 4000, 5000),
+ PIN_RSEL(194, 195, 0x4, 3000, 75000),
+ PIN_RSEL(194, 195, 0x5, 2000, 5000),
+ PIN_RSEL(194, 195, 0x6, 1500, 75000),
+ PIN_RSEL(194, 195, 0x7, 1000, 5000),
+ PIN_RSEL(198, 203, 0x0, 75000, 75000),
+ PIN_RSEL(198, 203, 0x1, 10000, 5000),
+ PIN_RSEL(198, 203, 0x2, 5000, 75000),
+ PIN_RSEL(198, 203, 0x3, 4000, 5000),
+ PIN_RSEL(198, 203, 0x4, 3000, 75000),
+ PIN_RSEL(198, 203, 0x5, 2000, 5000),
+ PIN_RSEL(198, 203, 0x6, 1500, 75000),
+ PIN_RSEL(198, 203, 0x7, 1000, 5000),
+ PIN_RSEL(214, 215, 0x0, 75000, 75000),
+ PIN_RSEL(214, 215, 0x1, 10000, 5000),
+ PIN_RSEL(214, 215, 0x2, 5000, 75000),
+ PIN_RSEL(214, 215, 0x3, 4000, 5000),
+ PIN_RSEL(214, 215, 0x4, 3000, 75000),
+ PIN_RSEL(214, 215, 0x5, 2000, 5000),
+ PIN_RSEL(214, 215, 0x6, 1500, 75000),
+ PIN_RSEL(214, 215, 0x7, 1000, 5000),
+};
+
+static const unsigned int mt8196_pull_type[] = {
+ MTK_PULL_PU_PD_TYPE,/*0*/ MTK_PULL_PU_PD_TYPE,/*1*/
+ MTK_PULL_PU_PD_TYPE,/*2*/ MTK_PULL_PU_PD_TYPE,/*3*/
+ MTK_PULL_PU_PD_TYPE,/*4*/ MTK_PULL_PU_PD_TYPE,/*5*/
+ MTK_PULL_PU_PD_TYPE,/*6*/ MTK_PULL_PU_PD_TYPE,/*7*/
+ MTK_PULL_PU_PD_TYPE,/*8*/ MTK_PULL_PU_PD_TYPE,/*9*/
+ MTK_PULL_PU_PD_TYPE,/*10*/ MTK_PULL_PU_PD_TYPE,/*11*/
+ MTK_PULL_PU_PD_TYPE,/*12*/ MTK_PULL_PU_PD_TYPE,/*13*/
+ MTK_PULL_PU_PD_TYPE,/*14*/ MTK_PULL_PU_PD_TYPE,/*15*/
+ MTK_PULL_PU_PD_TYPE,/*16*/ MTK_PULL_PU_PD_TYPE,/*17*/
+ MTK_PULL_PU_PD_TYPE,/*18*/ MTK_PULL_PU_PD_TYPE,/*19*/
+ MTK_PULL_PU_PD_TYPE,/*20*/ MTK_PULL_PU_PD_TYPE,/*21*/
+ MTK_PULL_PU_PD_TYPE,/*22*/ MTK_PULL_PU_PD_TYPE,/*23*/
+ MTK_PULL_PU_PD_TYPE,/*24*/ MTK_PULL_PU_PD_TYPE,/*25*/
+ MTK_PULL_PU_PD_TYPE,/*26*/ MTK_PULL_PU_PD_TYPE,/*27*/
+ MTK_PULL_PU_PD_TYPE,/*28*/ MTK_PULL_PU_PD_TYPE,/*29*/
+ MTK_PULL_PU_PD_TYPE,/*30*/ MTK_PULL_PU_PD_TYPE,/*31*/
+ MTK_PULL_PU_PD_TYPE,/*32*/ MTK_PULL_PU_PD_TYPE,/*33*/
+ MTK_PULL_PU_PD_TYPE,/*34*/ MTK_PULL_PU_PD_TYPE,/*35*/
+ MTK_PULL_PU_PD_TYPE,/*36*/ MTK_PULL_PU_PD_TYPE,/*37*/
+ MTK_PULL_PU_PD_TYPE,/*38*/ MTK_PULL_PU_PD_TYPE,/*39*/
+ MTK_PULL_PU_PD_TYPE,/*40*/ MTK_PULL_PU_PD_TYPE,/*41*/
+ MTK_PULL_PU_PD_TYPE,/*42*/ MTK_PULL_PU_PD_TYPE,/*43*/
+ MTK_PULL_PU_PD_TYPE,/*44*/ MTK_PULL_PU_PD_TYPE,/*45*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*46*/ MTK_PULL_PU_PD_RSEL_TYPE,/*47*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*48*/ MTK_PULL_PU_PD_RSEL_TYPE,/*49*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*50*/ MTK_PULL_PU_PD_RSEL_TYPE,/*51*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*52*/ MTK_PULL_PU_PD_RSEL_TYPE,/*53*/
+ MTK_PULL_PU_PD_TYPE,/*54*/ MTK_PULL_PU_PD_TYPE,/*55*/
+ MTK_PULL_PU_PD_TYPE,/*56*/ MTK_PULL_PU_PD_TYPE,/*57*/
+ MTK_PULL_PU_PD_TYPE,/*58*/ MTK_PULL_PU_PD_TYPE,/*59*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*60*/ MTK_PULL_PU_PD_TYPE,/*61*/
+ MTK_PULL_PU_PD_TYPE,/*62*/ MTK_PULL_PU_PD_TYPE,/*63*/
+ MTK_PULL_PU_PD_TYPE,/*64*/ MTK_PULL_PU_PD_TYPE,/*65*/
+ MTK_PULL_PU_PD_TYPE,/*66*/ MTK_PULL_PU_PD_TYPE,/*67*/
+ MTK_PULL_PU_PD_TYPE,/*68*/ MTK_PULL_PU_PD_TYPE,/*69*/
+ MTK_PULL_PU_PD_TYPE,/*70*/ MTK_PULL_PU_PD_TYPE,/*71*/
+ MTK_PULL_PU_PD_TYPE,/*72*/ MTK_PULL_PU_PD_TYPE,/*73*/
+ MTK_PULL_PU_PD_TYPE,/*74*/ MTK_PULL_PU_PD_TYPE,/*75*/
+ MTK_PULL_PU_PD_TYPE,/*76*/ MTK_PULL_PU_PD_TYPE,/*77*/
+ MTK_PULL_PU_PD_TYPE,/*78*/ MTK_PULL_PU_PD_TYPE,/*79*/
+ MTK_PULL_PU_PD_TYPE,/*80*/ MTK_PULL_PU_PD_TYPE,/*81*/
+ MTK_PULL_PU_PD_TYPE,/*82*/ MTK_PULL_PU_PD_TYPE,/*83*/
+ MTK_PULL_PU_PD_TYPE,/*84*/ MTK_PULL_PU_PD_TYPE,/*85*/
+ MTK_PULL_PU_PD_TYPE,/*86*/ MTK_PULL_PU_PD_TYPE,/*87*/
+ MTK_PULL_PU_PD_TYPE,/*88*/ MTK_PULL_PU_PD_TYPE,/*89*/
+ MTK_PULL_PU_PD_TYPE,/*90*/ MTK_PULL_PU_PD_TYPE,/*91*/
+ MTK_PULL_PU_PD_TYPE,/*92*/ MTK_PULL_PU_PD_TYPE,/*93*/
+ MTK_PULL_PU_PD_TYPE,/*94*/ MTK_PULL_PU_PD_TYPE,/*95*/
+ MTK_PULL_PU_PD_TYPE,/*96*/ MTK_PULL_PU_PD_TYPE,/*97*/
+ MTK_PULL_PU_PD_TYPE,/*98*/ MTK_PULL_PU_PD_RSEL_TYPE,/*99*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*100*/ MTK_PULL_PU_PD_RSEL_TYPE,/*101*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*102*/ MTK_PULL_PU_PD_TYPE,/*103*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*104*/ MTK_PULL_PU_PD_RSEL_TYPE,/*105*/
+ MTK_PULL_PU_PD_TYPE,/*106*/ MTK_PULL_PU_PD_TYPE,/*107*/
+ MTK_PULL_PU_PD_TYPE,/*108*/ MTK_PULL_PU_PD_TYPE,/*109*/
+ MTK_PULL_PU_PD_TYPE,/*110*/ MTK_PULL_PU_PD_TYPE,/*111*/
+ MTK_PULL_PU_PD_TYPE,/*112*/ MTK_PULL_PU_PD_TYPE,/*113*/
+ MTK_PULL_PU_PD_TYPE,/*114*/ MTK_PULL_PU_PD_TYPE,/*115*/
+ MTK_PULL_PU_PD_TYPE,/*116*/ MTK_PULL_PU_PD_TYPE,/*117*/
+ MTK_PULL_PU_PD_TYPE,/*118*/ MTK_PULL_PU_PD_TYPE,/*119*/
+ MTK_PULL_PU_PD_TYPE,/*120*/ MTK_PULL_PU_PD_TYPE,/*121*/
+ MTK_PULL_PU_PD_TYPE,/*122*/ MTK_PULL_PU_PD_RSEL_TYPE,/*123*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*124*/ MTK_PULL_PUPD_R1R0_TYPE,/*125*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*126*/ MTK_PULL_PUPD_R1R0_TYPE,/*127*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*128*/ MTK_PULL_PUPD_R1R0_TYPE,/*129*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*130*/ MTK_PULL_PUPD_R1R0_TYPE,/*131*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*132*/ MTK_PULL_PUPD_R1R0_TYPE,/*133*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*134*/ MTK_PULL_PUPD_R1R0_TYPE,/*135*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*136*/ MTK_PULL_PUPD_R1R0_TYPE,/*137*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*138*/ MTK_PULL_PUPD_R1R0_TYPE,/*139*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*140*/ MTK_PULL_PUPD_R1R0_TYPE,/*141*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*142*/ MTK_PULL_PUPD_R1R0_TYPE,/*143*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*144*/ MTK_PULL_PUPD_R1R0_TYPE,/*145*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*146*/ MTK_PULL_PUPD_R1R0_TYPE,/*147*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*148*/ MTK_PULL_PUPD_R1R0_TYPE,/*149*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*150*/ MTK_PULL_PUPD_R1R0_TYPE,/*151*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*152*/ MTK_PULL_PUPD_R1R0_TYPE,/*153*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*154*/ MTK_PULL_PUPD_R1R0_TYPE,/*155*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*156*/ MTK_PULL_PU_PD_TYPE,/*157*/
+ MTK_PULL_PU_PD_TYPE,/*158*/ MTK_PULL_PU_PD_TYPE,/*159*/
+ MTK_PULL_PU_PD_TYPE,/*160*/ MTK_PULL_PU_PD_TYPE,/*161*/
+ MTK_PULL_PU_PD_TYPE,/*162*/ MTK_PULL_PU_PD_TYPE,/*163*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*164*/ MTK_PULL_PU_PD_RSEL_TYPE,/*165*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*166*/ MTK_PULL_PU_PD_RSEL_TYPE,/*167*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*168*/ MTK_PULL_PU_PD_TYPE,/*169*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*170*/ MTK_PULL_PU_PD_TYPE,/*171*/
+ MTK_PULL_PU_PD_TYPE,/*172*/ MTK_PULL_PU_PD_TYPE,/*173*/
+ MTK_PULL_PU_PD_TYPE,/*174*/ MTK_PULL_PU_PD_TYPE,/*175*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*176*/ MTK_PULL_PU_PD_RSEL_TYPE,/*177*/
+ MTK_PULL_PU_PD_TYPE,/*178*/ MTK_PULL_PU_PD_TYPE,/*179*/
+ MTK_PULL_PU_PD_TYPE,/*180*/ MTK_PULL_PU_PD_TYPE,/*181*/
+ MTK_PULL_PU_PD_TYPE,/*182*/ MTK_PULL_PU_PD_TYPE,/*183*/
+ MTK_PULL_PU_PD_TYPE,/*184*/ MTK_PULL_PU_PD_TYPE,/*185*/
+ MTK_PULL_PU_PD_TYPE,/*186*/ MTK_PULL_PU_PD_TYPE,/*187*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*188*/ MTK_PULL_PU_PD_RSEL_TYPE,/*189*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*190*/ MTK_PULL_PU_PD_RSEL_TYPE,/*191*/
+ MTK_PULL_PU_PD_TYPE,/*192*/ MTK_PULL_PU_PD_TYPE,/*193*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*194*/ MTK_PULL_PU_PD_RSEL_TYPE,/*195*/
+ MTK_PULL_PU_PD_TYPE,/*196*/ MTK_PULL_PU_PD_TYPE,/*197*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*198*/ MTK_PULL_PU_PD_RSEL_TYPE,/*199*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*200*/ MTK_PULL_PU_PD_RSEL_TYPE,/*201*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*202*/ MTK_PULL_PU_PD_RSEL_TYPE,/*203*/
+ MTK_PULL_PU_PD_TYPE,/*204*/ MTK_PULL_PU_PD_TYPE,/*205*/
+ MTK_PULL_PU_PD_TYPE,/*206*/ MTK_PULL_PU_PD_TYPE,/*207*/
+ MTK_PULL_PU_PD_TYPE,/*208*/ MTK_PULL_PU_PD_TYPE,/*209*/
+ MTK_PULL_PU_PD_TYPE,/*210*/ MTK_PULL_PU_PD_TYPE,/*211*/
+ MTK_PULL_PU_PD_TYPE,/*212*/ MTK_PULL_PU_PD_TYPE,/*213*/
+ MTK_PULL_PU_PD_RSEL_TYPE,/*214*/ MTK_PULL_PU_PD_RSEL_TYPE,/*215*/
+ MTK_PULL_PU_PD_TYPE,/*216*/ MTK_PULL_PUPD_R1R0_TYPE,/*217*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*218*/ MTK_PULL_PUPD_R1R0_TYPE,/*219*/
+ MTK_PULL_PU_PD_TYPE,/*220*/ MTK_PULL_PU_PD_TYPE,/*221*/
+ MTK_PULL_PU_PD_TYPE,/*222*/ MTK_PULL_PU_PD_TYPE,/*223*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*224*/ MTK_PULL_PUPD_R1R0_TYPE,/*225*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*226*/ MTK_PULL_PUPD_R1R0_TYPE,/*227*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*228*/ MTK_PULL_PUPD_R1R0_TYPE,/*229*/
+ MTK_PULL_PU_PD_TYPE,/*230*/ MTK_PULL_PU_PD_TYPE,/*231*/
+ MTK_PULL_PU_PD_TYPE,/*232*/ MTK_PULL_PU_PD_TYPE,/*233*/
+ MTK_PULL_PU_PD_TYPE,/*234*/ MTK_PULL_PU_PD_TYPE,/*235*/
+ MTK_PULL_PU_PD_TYPE,/*236*/ MTK_PULL_PU_PD_TYPE,/*237*/
+ MTK_PULL_PU_PD_TYPE,/*238*/ MTK_PULL_PU_PD_TYPE,/*239*/
+ MTK_PULL_PU_PD_TYPE,/*240*/ MTK_PULL_PU_PD_TYPE,/*241*/
+ MTK_PULL_PU_PD_TYPE,/*242*/ MTK_PULL_PU_PD_TYPE,/*243*/
+ MTK_PULL_PU_PD_TYPE,/*244*/ MTK_PULL_PU_PD_TYPE,/*245*/
+ MTK_PULL_PU_PD_TYPE,/*246*/ MTK_PULL_PU_PD_TYPE,/*247*/
+ MTK_PULL_PU_PD_TYPE,/*248*/ MTK_PULL_PU_PD_TYPE,/*249*/
+ MTK_PULL_PU_PD_TYPE,/*250*/ MTK_PULL_PU_PD_TYPE,/*251*/
+ MTK_PULL_PU_PD_TYPE,/*252*/ MTK_PULL_PU_PD_TYPE,/*253*/
+ MTK_PULL_PU_PD_TYPE,/*254*/ MTK_PULL_PU_PD_TYPE,/*255*/
+ MTK_PULL_PU_PD_TYPE,/*256*/ MTK_PULL_PU_PD_TYPE,/*257*/
+ MTK_PULL_PU_PD_TYPE,/*258*/ MTK_PULL_PUPD_R1R0_TYPE,/*259*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*260*/ MTK_PULL_PUPD_R1R0_TYPE,/*261*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*262*/ MTK_PULL_PUPD_R1R0_TYPE,/*263*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*264*/ MTK_PULL_PUPD_R1R0_TYPE,/*265*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*266*/ MTK_PULL_PUPD_R1R0_TYPE,/*267*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*268*/ MTK_PULL_PUPD_R1R0_TYPE,/*269*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*270*/
+};
+
+static const struct mtk_pin_reg_calc mt8196_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt8196_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8196_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8196_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8196_pin_do_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8196_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8196_pin_ies_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt8196_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt8196_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt8196_pin_r1_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8196_pin_pu_range),
+ [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt8196_pin_pd_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt8196_pin_drv_range),
+ [PINCTRL_PIN_REG_DRV_ADV] = MTK_RANGE(mt8196_pin_drv_adv_range),
+ [PINCTRL_PIN_REG_RSEL] = MTK_RANGE(mt8196_pin_rsel_range),
+};
+
+static const char * const mt8196_pinctrl_register_base_names[] = {
+ "iocfg0", "iocfg_rt", "iocfg_rm1", "iocfg_rm2",
+ "iocfg_rb", "iocfg_bm1", "iocfg_bm2", "iocfg_bm3",
+ "iocfg_lt", "iocfg_lm1", "iocfg_lm2", "iocfg_lb1",
+ "iocfg_lb2", "iocfg_tm1", "iocfg_tm2", "iocfg_tm3",
+};
+
+static const struct mtk_eint_hw mt8196_eint_hw = {
+ .port_mask = 0xf,
+ .ports = 3,
+ .ap_num = 293,
+ .db_cnt = 32,
+ .db_time = debounce_time_mt6765,
+};
+
+static const struct mtk_pin_soc mt8196_data = {
+ .reg_cal = mt8196_reg_cals,
+ .pins = mtk_pins_mt8196,
+ .npins = ARRAY_SIZE(mtk_pins_mt8196),
+ .ngrps = ARRAY_SIZE(mtk_pins_mt8196),
+ .eint_hw = &mt8196_eint_hw,
+ .eint_pin = eint_pins_mt8196,
+ .nfuncs = 8,
+ .gpio_m = 0,
+ .base_names = mt8196_pinctrl_register_base_names,
+ .nbase_names = ARRAY_SIZE(mt8196_pinctrl_register_base_names),
+ .pull_type = mt8196_pull_type,
+ .pin_rsel = mt8196_pin_rsel_val_range,
+ .npin_rsel = ARRAY_SIZE(mt8196_pin_rsel_val_range),
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_drive_get = mtk_pinconf_adv_drive_get_raw,
+ .adv_drive_set = mtk_pinconf_adv_drive_set_raw,
+};
+
+static const struct of_device_id mt8196_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt8196-pinctrl", .data = &mt8196_data },
+ { /* sentinel */ }
+};
+
+static struct platform_driver mt8196_pinctrl_driver = {
+ .driver = {
+ .name = "mt8196-pinctrl",
+ .of_match_table = mt8196_pinctrl_of_match,
+ .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops),
+ },
+ .probe = mtk_paris_pinctrl_probe,
+};
+
+static int __init mt8196_pinctrl_init(void)
+{
+ return platform_driver_register(&mt8196_pinctrl_driver);
+}
+arch_initcall(mt8196_pinctrl_init);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Pinctrl Driver");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index d1556b75d9ef..4918d38abfc2 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -381,10 +381,13 @@ int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
return -ENOMEM;
count_reg_names = of_property_count_strings(np, "reg-names");
- if (count_reg_names < hw->soc->nbase_names)
+ if (count_reg_names < 0)
+ return -EINVAL;
+
+ hw->eint->nbase = count_reg_names - (int)hw->soc->nbase_names;
+ if (hw->eint->nbase <= 0)
return -EINVAL;
- hw->eint->nbase = count_reg_names - hw->soc->nbase_names;
hw->eint->base = devm_kmalloc_array(&pdev->dev, hw->eint->nbase,
sizeof(*hw->eint->base), GFP_KERNEL | __GFP_ZERO);
if (!hw->eint->base) {
@@ -416,7 +419,7 @@ int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
hw->eint->pctl = hw;
hw->eint->gpio_xlate = &mtk_eint_xt;
- ret = mtk_eint_do_init(hw->eint);
+ ret = mtk_eint_do_init(hw->eint, hw->soc->eint_pin);
if (ret)
goto err_free_eint;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 8596f3541265..a4cb6d511fcd 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -86,7 +86,7 @@ static int mtk_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
return 0;
}
-static void mtk_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int mtk_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
unsigned int reg_addr;
unsigned int bit;
@@ -100,7 +100,7 @@ static void mtk_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
else
reg_addr = CLR_ADDR(reg_addr, pctl);
- regmap_write(mtk_get_regmap(pctl, offset), reg_addr, bit);
+ return regmap_write(mtk_get_regmap(pctl, offset), reg_addr, bit);
}
static int mtk_pconf_set_ies_smt(struct mtk_pinctrl *pctl, unsigned pin,
@@ -809,7 +809,12 @@ static const struct pinmux_ops mtk_pmx_ops = {
static int mtk_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
- mtk_gpio_set(chip, offset, value);
+ int ret;
+
+ ret = mtk_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
+
return pinctrl_gpio_direction_output(chip, offset);
}
@@ -893,7 +898,7 @@ static const struct gpio_chip mtk_gpio_chip = {
.direction_input = pinctrl_gpio_direction_input,
.direction_output = mtk_gpio_direction_output,
.get = mtk_gpio_get,
- .set = mtk_gpio_set,
+ .set_rv = mtk_gpio_set,
.to_irq = mtk_gpio_to_irq,
.set_config = mtk_gpio_set_config,
};
@@ -1039,7 +1044,7 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
pctl->eint->pctl = pctl;
pctl->eint->gpio_xlate = &mtk_eint_xt;
- return mtk_eint_do_init(pctl->eint);
+ return mtk_eint_do_init(pctl->eint, NULL);
}
/* This is used as a common probe function */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt6893.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6893.h
new file mode 100644
index 000000000000..0d3bb16411f8
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6893.h
@@ -0,0 +1,2283 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Copyright (C) 2024 Collabora Ltd.
+ *
+ * Author: Andy Teng <andy.teng@mediatek.com>
+ */
+
+#ifndef __PINCTRL_MTK_MT6893_H
+#define __PINCTRL_MTK_MT6893_H
+
+#include "pinctrl-paris.h"
+
+static const struct mtk_pin_desc mtk_pins_mt6893[] = {
+ MTK_PIN(
+ 0, "GPIO0",
+ MTK_EINT_FUNCTION(0, 0),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "SPI6_CLK"),
+ MTK_FUNCTION(2, "I2S5_MCK"),
+ MTK_FUNCTION(3, "PWM_0"),
+ MTK_FUNCTION(4, "MD_INT0"),
+ MTK_FUNCTION(5, "TP_GPIO0_AO")
+ ),
+ MTK_PIN(
+ 1, "GPIO1",
+ MTK_EINT_FUNCTION(0, 1),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "SPI6_CSB"),
+ MTK_FUNCTION(2, "I2S5_BCK"),
+ MTK_FUNCTION(3, "PWM_1"),
+ MTK_FUNCTION(4, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(5, "TP_GPIO1_AO")
+ ),
+ MTK_PIN(
+ 2, "GPIO2",
+ MTK_EINT_FUNCTION(0, 2),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "SPI6_MI"),
+ MTK_FUNCTION(2, "I2S5_LRCK"),
+ MTK_FUNCTION(3, "PWM_2"),
+ MTK_FUNCTION(4, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(5, "TP_GPIO2_AO")
+ ),
+ MTK_PIN(
+ 3, "GPIO3",
+ MTK_EINT_FUNCTION(0, 3),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "SPI6_MO"),
+ MTK_FUNCTION(2, "I2S5_DO"),
+ MTK_FUNCTION(3, "PWM_3"),
+ MTK_FUNCTION(4, "CLKM0"),
+ MTK_FUNCTION(5, "TP_GPIO3_AO")
+ ),
+ MTK_PIN(
+ 4, "GPIO4",
+ MTK_EINT_FUNCTION(0, 4),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "SPI7_A_CLK"),
+ MTK_FUNCTION(2, "I2S2_MCK"),
+ MTK_FUNCTION(3, "DMIC1_CLK"),
+ MTK_FUNCTION(4, "PCM1_DI"),
+ MTK_FUNCTION(5, "TP_GPIO4_AO")
+ ),
+ MTK_PIN(
+ 5, "GPIO5",
+ MTK_EINT_FUNCTION(0, 5),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "SPI7_A_CSB"),
+ MTK_FUNCTION(2, "I2S2_BCK"),
+ MTK_FUNCTION(3, "DMIC1_DAT"),
+ MTK_FUNCTION(4, "PCM1_CLK"),
+ MTK_FUNCTION(5, "TP_GPIO5_AO")
+ ),
+ MTK_PIN(
+ 6, "GPIO6",
+ MTK_EINT_FUNCTION(0, 6),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "SPI7_A_MI"),
+ MTK_FUNCTION(2, "I2S2_LRCK"),
+ MTK_FUNCTION(3, "DMIC_CLK"),
+ MTK_FUNCTION(4, "PCM1_SYNC"),
+ MTK_FUNCTION(5, "TP_GPIO6_AO"),
+ MTK_FUNCTION(6, "CONN_TCXOENA_REQ")
+ ),
+ MTK_PIN(
+ 7, "GPIO7",
+ MTK_EINT_FUNCTION(0, 7),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(1, "SPI7_A_MO"),
+ MTK_FUNCTION(2, "I2S2_DI"),
+ MTK_FUNCTION(3, "DMIC_DAT"),
+ MTK_FUNCTION(4, "PCM1_DO0"),
+ MTK_FUNCTION(5, "TP_GPIO7_AO"),
+ MTK_FUNCTION(6, "WIFI_TXD")
+ ),
+ MTK_PIN(
+ 8, "GPIO8",
+ MTK_EINT_FUNCTION(0, 8),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(1, "SRCLKENAI1"),
+ MTK_FUNCTION(2, "I2S2_DI2"),
+ MTK_FUNCTION(3, "KPCOL2"),
+ MTK_FUNCTION(4, "PCM1_DO1"),
+ MTK_FUNCTION(5, "CLKM1"),
+ MTK_FUNCTION(6, "CONN_BT_TXD")
+ ),
+ MTK_PIN(
+ 9, "GPIO9",
+ MTK_EINT_FUNCTION(0, 9),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(3, "KPROW2"),
+ MTK_FUNCTION(4, "PCM1_DO2"),
+ MTK_FUNCTION(5, "CLKM3"),
+ MTK_FUNCTION(6, "CMMCLK4")
+ ),
+ MTK_PIN(
+ 10, "GPIO10",
+ MTK_EINT_FUNCTION(0, 10),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(1, "MSDC1_CLK_A"),
+ MTK_FUNCTION(2, "SPI4_B_CLK"),
+ MTK_FUNCTION(3, "I2S8_MCK"),
+ MTK_FUNCTION(4, "DSI1_TE"),
+ MTK_FUNCTION(5, "MD_INT0"),
+ MTK_FUNCTION(6, "TP_GPIO0_AO")
+ ),
+ MTK_PIN(
+ 11, "GPIO11",
+ MTK_EINT_FUNCTION(0, 11),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(1, "MSDC1_CMD_A"),
+ MTK_FUNCTION(2, "SPI4_B_CSB"),
+ MTK_FUNCTION(3, "I2S8_BCK"),
+ MTK_FUNCTION(4, "LCM1_RST"),
+ MTK_FUNCTION(5, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(6, "TP_GPIO1_AO")
+ ),
+ MTK_PIN(
+ 12, "GPIO12",
+ MTK_EINT_FUNCTION(0, 12),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(1, "MSDC1_DAT3_A"),
+ MTK_FUNCTION(2, "SPI4_B_MI"),
+ MTK_FUNCTION(3, "I2S8_LRCK"),
+ MTK_FUNCTION(4, "DMIC1_CLK"),
+ MTK_FUNCTION(5, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(6, "TP_GPIO2_AO")
+ ),
+ MTK_PIN(
+ 13, "GPIO13",
+ MTK_EINT_FUNCTION(0, 13),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(1, "MSDC1_DAT0_A"),
+ MTK_FUNCTION(2, "SPI4_B_MO"),
+ MTK_FUNCTION(3, "I2S8_DI"),
+ MTK_FUNCTION(4, "DMIC1_DAT"),
+ MTK_FUNCTION(5, "ANT_SEL10"),
+ MTK_FUNCTION(6, "TP_GPIO3_AO")
+ ),
+ MTK_PIN(
+ 14, "GPIO14",
+ MTK_EINT_FUNCTION(0, 14),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(1, "MSDC1_DAT2_A"),
+ MTK_FUNCTION(2, "SPI5_C_CLK"),
+ MTK_FUNCTION(3, "I2S9_MCK"),
+ MTK_FUNCTION(4, "IDDIG"),
+ MTK_FUNCTION(5, "ANT_SEL11"),
+ MTK_FUNCTION(6, "TP_GPIO4_AO")
+ ),
+ MTK_PIN(
+ 15, "GPIO15",
+ MTK_EINT_FUNCTION(0, 15),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(1, "MSDC1_DAT1_A"),
+ MTK_FUNCTION(2, "SPI5_C_CSB"),
+ MTK_FUNCTION(3, "I2S9_BCK"),
+ MTK_FUNCTION(4, "USB_DRVVBUS"),
+ MTK_FUNCTION(5, "ANT_SEL12"),
+ MTK_FUNCTION(6, "TP_GPIO5_AO")
+ ),
+ MTK_PIN(
+ 16, "GPIO16",
+ MTK_EINT_FUNCTION(0, 16),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO16"),
+ MTK_FUNCTION(1, "SRCLKENAI1"),
+ MTK_FUNCTION(2, "SPI5_C_MI"),
+ MTK_FUNCTION(3, "I2S9_LRCK"),
+ MTK_FUNCTION(4, "KPCOL2"),
+ MTK_FUNCTION(5, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(6, "TP_GPIO6_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A30")
+ ),
+ MTK_PIN(
+ 17, "GPIO17",
+ MTK_EINT_FUNCTION(0, 17),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO17"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "SPI5_C_MO"),
+ MTK_FUNCTION(3, "I2S9_DO"),
+ MTK_FUNCTION(4, "KPROW2"),
+ MTK_FUNCTION(5, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(6, "TP_GPIO7_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A31")
+ ),
+ MTK_PIN(
+ 18, "GPIO18",
+ MTK_EINT_FUNCTION(0, 18),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(1, "DP_TX_HPD"),
+ MTK_FUNCTION(2, "SPI4_C_MI"),
+ MTK_FUNCTION(3, "SPI1_B_MI"),
+ MTK_FUNCTION(4, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(5, "ANT_SEL10"),
+ MTK_FUNCTION(6, "MD_INT0")
+ ),
+ MTK_PIN(
+ 19, "GPIO19",
+ MTK_EINT_FUNCTION(0, 19),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "SRCLKENAI1"),
+ MTK_FUNCTION(2, "SPI4_C_MO"),
+ MTK_FUNCTION(3, "SPI1_B_MO"),
+ MTK_FUNCTION(4, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(5, "ANT_SEL11"),
+ MTK_FUNCTION(6, "MD_INT1_C2K_UIM0_HOT_PLUG")
+ ),
+ MTK_PIN(
+ 20, "GPIO20",
+ MTK_EINT_FUNCTION(0, 20),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "SPI4_C_CLK"),
+ MTK_FUNCTION(3, "SPI1_B_CLK"),
+ MTK_FUNCTION(4, "PWM_3"),
+ MTK_FUNCTION(5, "ANT_SEL12"),
+ MTK_FUNCTION(6, "MD_INT2_C2K_UIM1_HOT_PLUG")
+ ),
+ MTK_PIN(
+ 21, "GPIO21",
+ MTK_EINT_FUNCTION(0, 21),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(1, "DP_TX_HPD"),
+ MTK_FUNCTION(2, "SPI4_C_CSB"),
+ MTK_FUNCTION(3, "SPI1_B_CSB"),
+ MTK_FUNCTION(4, "I2S7_MCK"),
+ MTK_FUNCTION(5, "I2S9_MCK"),
+ MTK_FUNCTION(6, "IDDIG")
+ ),
+ MTK_PIN(
+ 22, "GPIO22",
+ MTK_EINT_FUNCTION(0, 22),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(1, "LCM1_RST"),
+ MTK_FUNCTION(2, "SPI0_C_CLK"),
+ MTK_FUNCTION(3, "SPI7_B_CLK"),
+ MTK_FUNCTION(4, "I2S7_BCK"),
+ MTK_FUNCTION(5, "I2S9_BCK"),
+ MTK_FUNCTION(6, "SCL13")
+ ),
+ MTK_PIN(
+ 23, "GPIO23",
+ MTK_EINT_FUNCTION(0, 23),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(1, "DSI1_TE"),
+ MTK_FUNCTION(2, "SPI0_C_CSB"),
+ MTK_FUNCTION(3, "SPI7_B_CSB"),
+ MTK_FUNCTION(4, "I2S7_LRCK"),
+ MTK_FUNCTION(5, "I2S9_LRCK"),
+ MTK_FUNCTION(6, "SDA13")
+ ),
+ MTK_PIN(
+ 24, "GPIO24",
+ MTK_EINT_FUNCTION(0, 24),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(1, "SRCLKENAI1"),
+ MTK_FUNCTION(2, "SPI0_C_MI"),
+ MTK_FUNCTION(3, "SPI7_B_MI"),
+ MTK_FUNCTION(4, "I2S6_DI"),
+ MTK_FUNCTION(5, "I2S8_DI"),
+ MTK_FUNCTION(6, "SCL_6306")
+ ),
+ MTK_PIN(
+ 25, "GPIO25",
+ MTK_EINT_FUNCTION(0, 25),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "SPI0_C_MO"),
+ MTK_FUNCTION(3, "SPI7_B_MO"),
+ MTK_FUNCTION(4, "I2S7_DO"),
+ MTK_FUNCTION(5, "I2S9_DO"),
+ MTK_FUNCTION(6, "SDA_6306")
+ ),
+ MTK_PIN(
+ 26, "GPIO26",
+ MTK_EINT_FUNCTION(0, 26),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "PWM_2"),
+ MTK_FUNCTION(2, "CLKM0"),
+ MTK_FUNCTION(3, "USB_DRVVBUS")
+ ),
+ MTK_PIN(
+ 27, "GPIO27",
+ MTK_EINT_FUNCTION(0, 27),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "PWM_3"),
+ MTK_FUNCTION(2, "CLKM1")
+ ),
+ MTK_PIN(
+ 28, "GPIO28",
+ MTK_EINT_FUNCTION(0, 28),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "PWM_0"),
+ MTK_FUNCTION(2, "CLKM2")
+ ),
+ MTK_PIN(
+ 29, "GPIO29",
+ MTK_EINT_FUNCTION(0, 29),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "PWM_1"),
+ MTK_FUNCTION(2, "CLKM3"),
+ MTK_FUNCTION(3, "DSI1_TE")
+ ),
+ MTK_PIN(
+ 30, "GPIO30",
+ MTK_EINT_FUNCTION(0, 30),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO30"),
+ MTK_FUNCTION(1, "PWM_2"),
+ MTK_FUNCTION(2, "CLKM0"),
+ MTK_FUNCTION(3, "LCM1_RST")
+ ),
+ MTK_PIN(
+ 31, "GPIO31",
+ MTK_EINT_FUNCTION(0, 31),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO31"),
+ MTK_FUNCTION(1, "I2S3_MCK"),
+ MTK_FUNCTION(2, "I2S1_MCK"),
+ MTK_FUNCTION(3, "I2S5_MCK"),
+ MTK_FUNCTION(4, "SRCLKENAI0"),
+ MTK_FUNCTION(5, "I2S0_MCK")
+ ),
+ MTK_PIN(
+ 32, "GPIO32",
+ MTK_EINT_FUNCTION(0, 32),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO32"),
+ MTK_FUNCTION(1, "I2S3_BCK"),
+ MTK_FUNCTION(2, "I2S1_BCK"),
+ MTK_FUNCTION(3, "I2S5_BCK"),
+ MTK_FUNCTION(4, "PCM0_CLK"),
+ MTK_FUNCTION(5, "I2S0_BCK")
+ ),
+ MTK_PIN(
+ 33, "GPIO33",
+ MTK_EINT_FUNCTION(0, 33),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, "I2S3_LRCK"),
+ MTK_FUNCTION(2, "I2S1_LRCK"),
+ MTK_FUNCTION(3, "I2S5_LRCK"),
+ MTK_FUNCTION(4, "PCM0_SYNC"),
+ MTK_FUNCTION(5, "I2S0_LRCK")
+ ),
+ MTK_PIN(
+ 34, "GPIO34",
+ MTK_EINT_FUNCTION(0, 34),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, "I2S0_DI"),
+ MTK_FUNCTION(2, "I2S2_DI"),
+ MTK_FUNCTION(3, "I2S2_DI2"),
+ MTK_FUNCTION(4, "PCM0_DI"),
+ MTK_FUNCTION(5, "I2S0_DI")
+ ),
+ MTK_PIN(
+ 35, "GPIO35",
+ MTK_EINT_FUNCTION(0, 35),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, "I2S3_DO"),
+ MTK_FUNCTION(2, "I2S1_DO"),
+ MTK_FUNCTION(3, "I2S5_DO"),
+ MTK_FUNCTION(4, "PCM0_DO")
+ ),
+ MTK_PIN(
+ 36, "GPIO36",
+ MTK_EINT_FUNCTION(0, 36),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, "SPI5_A_CLK"),
+ MTK_FUNCTION(2, "DMIC1_CLK"),
+ MTK_FUNCTION(3, "IDDIG"),
+ MTK_FUNCTION(4, "MD_URXD0"),
+ MTK_FUNCTION(5, "UCTS0"),
+ MTK_FUNCTION(6, "URXD1"),
+ MTK_FUNCTION(7, "DBG_MON_A0")
+ ),
+ MTK_PIN(
+ 37, "GPIO37",
+ MTK_EINT_FUNCTION(0, 37),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "SPI5_A_CSB"),
+ MTK_FUNCTION(2, "DMIC1_DAT"),
+ MTK_FUNCTION(3, "USB_DRVVBUS"),
+ MTK_FUNCTION(4, "MD_UTXD0"),
+ MTK_FUNCTION(5, "URTS0"),
+ MTK_FUNCTION(6, "UTXD1"),
+ MTK_FUNCTION(7, "DBG_MON_A1")
+ ),
+ MTK_PIN(
+ 38, "GPIO38",
+ MTK_EINT_FUNCTION(0, 38),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO38"),
+ MTK_FUNCTION(1, "SPI5_A_MI"),
+ MTK_FUNCTION(2, "DMIC_CLK"),
+ MTK_FUNCTION(3, "DSI1_TE"),
+ MTK_FUNCTION(4, "MD_URXD1"),
+ MTK_FUNCTION(5, "URXD0"),
+ MTK_FUNCTION(6, "UCTS1"),
+ MTK_FUNCTION(7, "DBG_MON_A2")
+ ),
+ MTK_PIN(
+ 39, "GPIO39",
+ MTK_EINT_FUNCTION(0, 39),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "SPI5_A_MO"),
+ MTK_FUNCTION(2, "DMIC_DAT"),
+ MTK_FUNCTION(3, "LCM1_RST"),
+ MTK_FUNCTION(4, "MD_UTXD1"),
+ MTK_FUNCTION(5, "UTXD0"),
+ MTK_FUNCTION(6, "URTS1"),
+ MTK_FUNCTION(7, "DBG_MON_A3")
+ ),
+ MTK_PIN(
+ 40, "GPIO40",
+ MTK_EINT_FUNCTION(0, 40),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO40"),
+ MTK_FUNCTION(1, "DISP_PWM"),
+ MTK_FUNCTION(7, "DBG_MON_A6")
+ ),
+ MTK_PIN(
+ 41, "GPIO41",
+ MTK_EINT_FUNCTION(0, 41),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "DSI_TE")
+ ),
+ MTK_PIN(
+ 42, "GPIO42",
+ MTK_EINT_FUNCTION(0, 42),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "LCM_RST")
+ ),
+ MTK_PIN(
+ 43, "GPIO43",
+ MTK_EINT_FUNCTION(0, 43),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(2, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(3, "SCL_6306"),
+ MTK_FUNCTION(4, "ADSP_URXD0"),
+ MTK_FUNCTION(5, "PTA_RXD"),
+ MTK_FUNCTION(6, "SSPM_URXD_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A4")
+ ),
+ MTK_PIN(
+ 44, "GPIO44",
+ MTK_EINT_FUNCTION(0, 44),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(2, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(3, "SDA_6306"),
+ MTK_FUNCTION(4, "ADSP_UTXD0"),
+ MTK_FUNCTION(5, "PTA_TXD"),
+ MTK_FUNCTION(6, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A5")
+ ),
+ MTK_PIN(
+ 45, "GPIO45",
+ MTK_EINT_FUNCTION(0, 45),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "MD1_SIM2_SCLK"),
+ MTK_FUNCTION(2, "MD1_SIM1_SCLK"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TDI"),
+ MTK_FUNCTION(4, "APU_JTAG_TDI"),
+ MTK_FUNCTION(5, "CCU_JTAG_TDI"),
+ MTK_FUNCTION(6, "LVTS_SCK")
+ ),
+ MTK_PIN(
+ 46, "GPIO46",
+ MTK_EINT_FUNCTION(0, 46),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "MD1_SIM2_SRST"),
+ MTK_FUNCTION(2, "MD1_SIM1_SRST"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TMS"),
+ MTK_FUNCTION(4, "APU_JTAG_TMS"),
+ MTK_FUNCTION(5, "CCU_JTAG_TMS"),
+ MTK_FUNCTION(6, "LVTS_SDI")
+ ),
+ MTK_PIN(
+ 47, "GPIO47",
+ MTK_EINT_FUNCTION(0, 47),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "MD1_SIM2_SIO"),
+ MTK_FUNCTION(2, "MD1_SIM1_SIO"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TDO"),
+ MTK_FUNCTION(4, "APU_JTAG_TDO"),
+ MTK_FUNCTION(5, "CCU_JTAG_TDO"),
+ MTK_FUNCTION(6, "LVTS_SCF")
+ ),
+ MTK_PIN(
+ 48, "GPIO48",
+ MTK_EINT_FUNCTION(0, 48),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "MD1_SIM1_SIO"),
+ MTK_FUNCTION(2, "MD1_SIM2_SIO"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TRSTN"),
+ MTK_FUNCTION(4, "APU_JTAG_TRST"),
+ MTK_FUNCTION(5, "CCU_JTAG_TRST"),
+ MTK_FUNCTION(6, "LVTS_FOUT")
+ ),
+ MTK_PIN(
+ 49, "GPIO49",
+ MTK_EINT_FUNCTION(0, 49),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "MD1_SIM1_SRST"),
+ MTK_FUNCTION(2, "MD1_SIM2_SRST"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TCK"),
+ MTK_FUNCTION(4, "APU_JTAG_TCK"),
+ MTK_FUNCTION(5, "CCU_JTAG_TCK"),
+ MTK_FUNCTION(6, "LVTS_SDO")
+ ),
+ MTK_PIN(
+ 50, "GPIO50",
+ MTK_EINT_FUNCTION(0, 50),
+ DRV_GRP2,
+ MTK_FUNCTION(0, "GPIO50"),
+ MTK_FUNCTION(1, "MD1_SIM1_SCLK"),
+ MTK_FUNCTION(2, "MD1_SIM2_SCLK"),
+ MTK_FUNCTION(6, "LVTS_26M")
+ ),
+ MTK_PIN(
+ 51, "GPIO51",
+ MTK_EINT_FUNCTION(0, 51),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO51"),
+ MTK_FUNCTION(1, "MSDC1_CLK"),
+ MTK_FUNCTION(2, "PCM1_CLK"),
+ MTK_FUNCTION(3, "VPU_UDI_TCK"),
+ MTK_FUNCTION(4, "UDI_TCK"),
+ MTK_FUNCTION(5, "IPU_JTAG_TCK"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TCK"),
+ MTK_FUNCTION(7, "JTCK_SEL3")
+ ),
+ MTK_PIN(
+ 52, "GPIO52",
+ MTK_EINT_FUNCTION(0, 52),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO52"),
+ MTK_FUNCTION(1, "MSDC1_CMD"),
+ MTK_FUNCTION(2, "PCM1_SYNC"),
+ MTK_FUNCTION(3, "VPU_UDI_TMS"),
+ MTK_FUNCTION(4, "UDI_TMS"),
+ MTK_FUNCTION(5, "IPU_JTAG_TMS"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TMS"),
+ MTK_FUNCTION(7, "JTMS_SEL3")
+ ),
+ MTK_PIN(
+ 53, "GPIO53",
+ MTK_EINT_FUNCTION(0, 53),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "MSDC1_DAT3"),
+ MTK_FUNCTION(2, "PCM1_DI")
+ ),
+ MTK_PIN(
+ 54, "GPIO54",
+ MTK_EINT_FUNCTION(0, 54),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(1, "MSDC1_DAT0"),
+ MTK_FUNCTION(2, "PCM1_DO0"),
+ MTK_FUNCTION(3, "VPU_UDI_TDI"),
+ MTK_FUNCTION(4, "UDI_TDI"),
+ MTK_FUNCTION(5, "IPU_JTAG_TDI"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDI"),
+ MTK_FUNCTION(7, "JTDI_SEL3")
+ ),
+ MTK_PIN(
+ 55, "GPIO55",
+ MTK_EINT_FUNCTION(0, 55),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "MSDC1_DAT2"),
+ MTK_FUNCTION(2, "PCM1_DO2"),
+ MTK_FUNCTION(3, "VPU_UDI_NTRST"),
+ MTK_FUNCTION(4, "UDI_NTRST"),
+ MTK_FUNCTION(5, "IPU_JTAG_TRST"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TRSTN"),
+ MTK_FUNCTION(7, "JTRSTN_SEL3")
+ ),
+ MTK_PIN(
+ 56, "GPIO56",
+ MTK_EINT_FUNCTION(0, 56),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "MSDC1_DAT1"),
+ MTK_FUNCTION(2, "PCM1_DO1"),
+ MTK_FUNCTION(3, "VPU_UDI_TDO"),
+ MTK_FUNCTION(4, "UDI_TDO"),
+ MTK_FUNCTION(5, "IPU_JTAG_TDO"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDO"),
+ MTK_FUNCTION(7, "JTDO_SEL3")
+ ),
+ MTK_PIN(
+ 57, "GPIO57",
+ MTK_EINT_FUNCTION(0, 57),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO57"),
+ MTK_FUNCTION(1, "MIPI2_D_SCLK"),
+ MTK_FUNCTION(7, "DBG_MON_A14")
+ ),
+ MTK_PIN(
+ 58, "GPIO58",
+ MTK_EINT_FUNCTION(0, 58),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO58"),
+ MTK_FUNCTION(1, "MIPI2_D_SDATA"),
+ MTK_FUNCTION(7, "DBG_MON_A15")
+ ),
+ MTK_PIN(
+ 59, "GPIO59",
+ MTK_EINT_FUNCTION(0, 59),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO59"),
+ MTK_FUNCTION(1, "MIPI_M_SCLK"),
+ MTK_FUNCTION(7, "DBG_MON_A17")
+ ),
+ MTK_PIN(
+ 60, "GPIO60",
+ MTK_EINT_FUNCTION(0, 60),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "MIPI_M_SDATA"),
+ MTK_FUNCTION(7, "DBG_MON_A18")
+ ),
+ MTK_PIN(
+ 61, "GPIO61",
+ MTK_EINT_FUNCTION(0, 61),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "MD_UCNT_A_TGL"),
+ MTK_FUNCTION(7, "DBG_MON_A16")
+ ),
+ MTK_PIN(
+ 62, "GPIO62",
+ MTK_EINT_FUNCTION(0, 62),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "DIGRF_IRQ")
+ ),
+ MTK_PIN(
+ 63, "GPIO63",
+ MTK_EINT_FUNCTION(0, 63),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "BPI_BUS0"),
+ MTK_FUNCTION(7, "DBG_MON_A19")
+ ),
+ MTK_PIN(
+ 64, "GPIO64",
+ MTK_EINT_FUNCTION(0, 64),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "BPI_BUS1"),
+ MTK_FUNCTION(7, "DBG_MON_A20")
+ ),
+ MTK_PIN(
+ 65, "GPIO65",
+ MTK_EINT_FUNCTION(0, 65),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "BPI_BUS2"),
+ MTK_FUNCTION(7, "DBG_MON_A21")
+ ),
+ MTK_PIN(
+ 66, "GPIO66",
+ MTK_EINT_FUNCTION(0, 66),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "BPI_BUS3"),
+ MTK_FUNCTION(7, "DBG_MON_A22")
+ ),
+ MTK_PIN(
+ 67, "GPIO67",
+ MTK_EINT_FUNCTION(0, 67),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(1, "BPI_BUS4")
+ ),
+ MTK_PIN(
+ 68, "GPIO68",
+ MTK_EINT_FUNCTION(0, 68),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "BPI_BUS5")
+ ),
+ MTK_PIN(
+ 69, "GPIO69",
+ MTK_EINT_FUNCTION(0, 69),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "BPI_BUS6"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS6")
+ ),
+ MTK_PIN(
+ 70, "GPIO70",
+ MTK_EINT_FUNCTION(0, 70),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "BPI_BUS7"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS7")
+ ),
+ MTK_PIN(
+ 71, "GPIO71",
+ MTK_EINT_FUNCTION(0, 71),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO71"),
+ MTK_FUNCTION(1, "BPI_BUS8"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS8")
+ ),
+ MTK_PIN(
+ 72, "GPIO72",
+ MTK_EINT_FUNCTION(0, 72),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO72"),
+ MTK_FUNCTION(1, "BPI_BUS9"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS9")
+ ),
+ MTK_PIN(
+ 73, "GPIO73",
+ MTK_EINT_FUNCTION(0, 73),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO73"),
+ MTK_FUNCTION(1, "BPI_BUS10"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS10")
+ ),
+ MTK_PIN(
+ 74, "GPIO74",
+ MTK_EINT_FUNCTION(0, 74),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO74"),
+ MTK_FUNCTION(1, "BPI_BUS11_OLAT0"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS11_OLAT0")
+ ),
+ MTK_PIN(
+ 75, "GPIO75",
+ MTK_EINT_FUNCTION(0, 75),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO75"),
+ MTK_FUNCTION(1, "BPI_BUS12_OLAT1"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS12_OLAT1")
+ ),
+ MTK_PIN(
+ 76, "GPIO76",
+ MTK_EINT_FUNCTION(0, 76),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO76"),
+ MTK_FUNCTION(1, "BPI_BUS13_OLAT2"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS13_OLAT2")
+ ),
+ MTK_PIN(
+ 77, "GPIO77",
+ MTK_EINT_FUNCTION(0, 77),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO77"),
+ MTK_FUNCTION(1, "BPI_BUS14_OLAT3"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS14_OLAT3")
+ ),
+ MTK_PIN(
+ 78, "GPIO78",
+ MTK_EINT_FUNCTION(0, 78),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO78"),
+ MTK_FUNCTION(1, "BPI_BUS15_OLAT4"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS15_OLAT4"),
+ MTK_FUNCTION(7, "DBG_MON_A7")
+ ),
+ MTK_PIN(
+ 79, "GPIO79",
+ MTK_EINT_FUNCTION(0, 79),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO79"),
+ MTK_FUNCTION(1, "BPI_BUS16_OLAT5"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS16_OLAT5"),
+ MTK_FUNCTION(7, "DBG_MON_A8")
+ ),
+ MTK_PIN(
+ 80, "GPIO80",
+ MTK_EINT_FUNCTION(0, 80),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO80"),
+ MTK_FUNCTION(1, "BPI_BUS17_ANT0"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS17_ANT0"),
+ MTK_FUNCTION(7, "DBG_MON_A9")
+ ),
+ MTK_PIN(
+ 81, "GPIO81",
+ MTK_EINT_FUNCTION(0, 81),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO81"),
+ MTK_FUNCTION(1, "BPI_BUS18_ANT1"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS18_ANT1"),
+ MTK_FUNCTION(7, "DBG_MON_A10")
+ ),
+ MTK_PIN(
+ 82, "GPIO82",
+ MTK_EINT_FUNCTION(0, 82),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO82"),
+ MTK_FUNCTION(1, "BPI_BUS19_ANT2"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS19_ANT2"),
+ MTK_FUNCTION(7, "DBG_MON_A11")
+ ),
+ MTK_PIN(
+ 83, "GPIO83",
+ MTK_EINT_FUNCTION(0, 83),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO83"),
+ MTK_FUNCTION(1, "BPI_BUS20_ANT3"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS20_ANT3"),
+ MTK_FUNCTION(7, "DBG_MON_A12")
+ ),
+ MTK_PIN(
+ 84, "GPIO84",
+ MTK_EINT_FUNCTION(0, 84),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO84"),
+ MTK_FUNCTION(1, "BPI_BUS21_ANT4"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS21_ANT4"),
+ MTK_FUNCTION(7, "DBG_MON_A13")
+ ),
+ MTK_PIN(
+ 85, "GPIO85",
+ MTK_EINT_FUNCTION(0, 85),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO85"),
+ MTK_FUNCTION(1, "MIPI1_D_SCLK"),
+ MTK_FUNCTION(2, "CONN_MIPI1_SCLK")
+ ),
+ MTK_PIN(
+ 86, "GPIO86",
+ MTK_EINT_FUNCTION(0, 86),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO86"),
+ MTK_FUNCTION(1, "MIPI1_D_SDATA"),
+ MTK_FUNCTION(2, "CONN_MIPI1_SDATA")
+ ),
+ MTK_PIN(
+ 87, "GPIO87",
+ MTK_EINT_FUNCTION(0, 87),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO87"),
+ MTK_FUNCTION(1, "MIPI0_D_SCLK"),
+ MTK_FUNCTION(2, "CONN_MIPI0_SCLK")
+ ),
+ MTK_PIN(
+ 88, "GPIO88",
+ MTK_EINT_FUNCTION(0, 88),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO88"),
+ MTK_FUNCTION(1, "MIPI0_D_SDATA"),
+ MTK_FUNCTION(2, "CONN_MIPI0_SDATA")
+ ),
+ MTK_PIN(
+ 89, "GPIO89",
+ MTK_EINT_FUNCTION(0, 89),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO89"),
+ MTK_FUNCTION(1, "SPMI_SCL"),
+ MTK_FUNCTION(2, "SCL10")
+ ),
+ MTK_PIN(
+ 90, "GPIO90",
+ MTK_EINT_FUNCTION(0, 90),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO90"),
+ MTK_FUNCTION(1, "SPMI_SDA"),
+ MTK_FUNCTION(2, "SDA10")
+ ),
+ MTK_PIN(
+ 91, "GPIO91",
+ MTK_EINT_FUNCTION(0, 91),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO91"),
+ MTK_FUNCTION(1, "AP_GOOD")
+ ),
+ MTK_PIN(
+ 92, "GPIO92",
+ MTK_EINT_FUNCTION(0, 92),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO92"),
+ MTK_FUNCTION(1, "URXD0"),
+ MTK_FUNCTION(2, "MD_URXD0"),
+ MTK_FUNCTION(3, "MD_URXD1"),
+ MTK_FUNCTION(4, "SSPM_URXD_AO"),
+ MTK_FUNCTION(5, "CONN_BGF_UART0_RXD")
+ ),
+ MTK_PIN(
+ 93, "GPIO93",
+ MTK_EINT_FUNCTION(0, 93),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO93"),
+ MTK_FUNCTION(1, "UTXD0"),
+ MTK_FUNCTION(2, "MD_UTXD0"),
+ MTK_FUNCTION(3, "MD_UTXD1"),
+ MTK_FUNCTION(4, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(5, "CONN_BGF_UART0_TXD"),
+ MTK_FUNCTION(6, "WIFI_TXD")
+ ),
+ MTK_PIN(
+ 94, "GPIO94",
+ MTK_EINT_FUNCTION(0, 94),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO94"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "ADSP_URXD0"),
+ MTK_FUNCTION(3, "MD32_0_RXD"),
+ MTK_FUNCTION(4, "SSPM_URXD_AO"),
+ MTK_FUNCTION(5, "TP_URXD1_AO"),
+ MTK_FUNCTION(6, "TP_URXD2_AO"),
+ MTK_FUNCTION(7, "MBISTREADEN_TRIGGER")
+ ),
+ MTK_PIN(
+ 95, "GPIO95",
+ MTK_EINT_FUNCTION(0, 95),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO95"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "ADSP_UTXD0"),
+ MTK_FUNCTION(3, "MD32_0_TXD"),
+ MTK_FUNCTION(4, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(5, "TP_UTXD1_AO"),
+ MTK_FUNCTION(6, "TP_UTXD2_AO"),
+ MTK_FUNCTION(7, "MBISTWRITEEN_TRIGGER")
+ ),
+ MTK_PIN(
+ 96, "GPIO96",
+ MTK_EINT_FUNCTION(0, 96),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO96"),
+ MTK_FUNCTION(1, "TDM_LRCK"),
+ MTK_FUNCTION(2, "I2S7_LRCK"),
+ MTK_FUNCTION(3, "I2S9_LRCK"),
+ MTK_FUNCTION(4, "SPI4_A_CLK"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TDI"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L1_JDI"),
+ MTK_FUNCTION(7, "IO_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 97, "GPIO97",
+ MTK_EINT_FUNCTION(0, 97),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO97"),
+ MTK_FUNCTION(1, "TDM_BCK"),
+ MTK_FUNCTION(2, "I2S7_BCK"),
+ MTK_FUNCTION(3, "I2S9_BCK"),
+ MTK_FUNCTION(4, "SPI4_A_CSB"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TRSTN"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L1_JINTP"),
+ MTK_FUNCTION(7, "IO_JTAG_TRSTN")
+ ),
+ MTK_PIN(
+ 98, "GPIO98",
+ MTK_EINT_FUNCTION(0, 98),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO98"),
+ MTK_FUNCTION(1, "TDM_MCK"),
+ MTK_FUNCTION(2, "I2S7_MCK"),
+ MTK_FUNCTION(3, "I2S9_MCK"),
+ MTK_FUNCTION(4, "SPI4_A_MI"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TCK"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L1_JCK"),
+ MTK_FUNCTION(7, "IO_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 99, "GPIO99",
+ MTK_EINT_FUNCTION(0, 99),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO99"),
+ MTK_FUNCTION(1, "TDM_DATA0"),
+ MTK_FUNCTION(2, "I2S6_DI"),
+ MTK_FUNCTION(3, "I2S8_DI"),
+ MTK_FUNCTION(4, "SPI4_A_MO"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TDO"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L1_JDO"),
+ MTK_FUNCTION(7, "IO_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 100, "GPIO100",
+ MTK_EINT_FUNCTION(0, 100),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO100"),
+ MTK_FUNCTION(1, "TDM_DATA1"),
+ MTK_FUNCTION(2, "I2S7_DO"),
+ MTK_FUNCTION(3, "I2S9_DO"),
+ MTK_FUNCTION(4, "DP_TX_HPD"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TMS"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L1_JMS"),
+ MTK_FUNCTION(7, "IO_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 101, "GPIO101",
+ MTK_EINT_FUNCTION(0, 101),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO101"),
+ MTK_FUNCTION(1, "TDM_DATA2"),
+ MTK_FUNCTION(2, "DMIC1_CLK"),
+ MTK_FUNCTION(3, "SRCLKENAI0"),
+ MTK_FUNCTION(4, "SPI5_B_CLK"),
+ MTK_FUNCTION(5, "CLKM0"),
+ MTK_FUNCTION(7, "DAP_MD32_SWD")
+ ),
+ MTK_PIN(
+ 102, "GPIO102",
+ MTK_EINT_FUNCTION(0, 102),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO102"),
+ MTK_FUNCTION(1, "TDM_DATA3"),
+ MTK_FUNCTION(2, "DMIC1_DAT"),
+ MTK_FUNCTION(3, "SRCLKENAI1"),
+ MTK_FUNCTION(4, "SPI5_B_CSB"),
+ MTK_FUNCTION(5, "DP_TX_HPD"),
+ MTK_FUNCTION(6, "DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(7, "DAP_MD32_SWCK")
+ ),
+ MTK_PIN(
+ 103, "GPIO103",
+ MTK_EINT_FUNCTION(0, 103),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO103"),
+ MTK_FUNCTION(1, "SPI0_A_MI"),
+ MTK_FUNCTION(2, "SCP_SPI0_MI"),
+ MTK_FUNCTION(5, "DFD_TDO"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDO"),
+ MTK_FUNCTION(7, "JTDO_SEL1")
+ ),
+ MTK_PIN(
+ 104, "GPIO104",
+ MTK_EINT_FUNCTION(0, 104),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO104"),
+ MTK_FUNCTION(1, "SPI0_A_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI0_CS"),
+ MTK_FUNCTION(5, "DFD_TMS"),
+ MTK_FUNCTION(6, "SPM_JTAG_TMS"),
+ MTK_FUNCTION(7, "JTMS_SEL1")
+ ),
+ MTK_PIN(
+ 105, "GPIO105",
+ MTK_EINT_FUNCTION(0, 105),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "SPI0_A_MO"),
+ MTK_FUNCTION(2, "SCP_SPI0_MO"),
+ MTK_FUNCTION(3, "SCP_SDA0"),
+ MTK_FUNCTION(5, "DFD_TDI"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDI"),
+ MTK_FUNCTION(7, "JTDI_SEL1")
+ ),
+ MTK_PIN(
+ 106, "GPIO106",
+ MTK_EINT_FUNCTION(0, 106),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "SPI0_A_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI0_CK"),
+ MTK_FUNCTION(3, "SCP_SCL0"),
+ MTK_FUNCTION(5, "DFD_TCK_XI"),
+ MTK_FUNCTION(6, "SPM_JTAG_TCK"),
+ MTK_FUNCTION(7, "JTCK_SEL1")
+ ),
+ MTK_PIN(
+ 107, "GPIO107",
+ MTK_EINT_FUNCTION(0, 107),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "DMIC_CLK"),
+ MTK_FUNCTION(2, "PWM_0"),
+ MTK_FUNCTION(3, "CLKM2"),
+ MTK_FUNCTION(4, "SPI5_B_MI"),
+ MTK_FUNCTION(6, "SPM_JTAG_TRSTN"),
+ MTK_FUNCTION(7, "JTRSTN_SEL1")
+ ),
+ MTK_PIN(
+ 108, "GPIO108",
+ MTK_EINT_FUNCTION(0, 108),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "DMIC_DAT"),
+ MTK_FUNCTION(2, "PWM_1"),
+ MTK_FUNCTION(3, "CLKM3"),
+ MTK_FUNCTION(4, "SPI5_B_MO"),
+ MTK_FUNCTION(7, "DAP_SONIC_SWD")
+ ),
+ MTK_PIN(
+ 109, "GPIO109",
+ MTK_EINT_FUNCTION(0, 109),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "I2S1_MCK"),
+ MTK_FUNCTION(2, "I2S3_MCK"),
+ MTK_FUNCTION(3, "I2S2_MCK"),
+ MTK_FUNCTION(4, "DP_TX_HPD"),
+ MTK_FUNCTION(5, "I2S2_MCK"),
+ MTK_FUNCTION(6, "SRCLKENAI0"),
+ MTK_FUNCTION(7, "DAP_SONIC_SWCK")
+ ),
+ MTK_PIN(
+ 110, "GPIO110",
+ MTK_EINT_FUNCTION(0, 110),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "I2S1_BCK"),
+ MTK_FUNCTION(2, "I2S3_BCK"),
+ MTK_FUNCTION(3, "I2S2_BCK"),
+ MTK_FUNCTION(4, "PCM0_CLK"),
+ MTK_FUNCTION(5, "I2S2_BCK"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_TDO")
+ ),
+ MTK_PIN(
+ 111, "GPIO111",
+ MTK_EINT_FUNCTION(0, 111),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "I2S1_LRCK"),
+ MTK_FUNCTION(2, "I2S3_LRCK"),
+ MTK_FUNCTION(3, "I2S2_LRCK"),
+ MTK_FUNCTION(4, "PCM0_SYNC"),
+ MTK_FUNCTION(5, "I2S2_LRCK"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_TDI")
+ ),
+ MTK_PIN(
+ 112, "GPIO112",
+ MTK_EINT_FUNCTION(0, 112),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "I2S2_DI"),
+ MTK_FUNCTION(2, "I2S0_DI"),
+ MTK_FUNCTION(3, "I2S2_DI2"),
+ MTK_FUNCTION(4, "PCM0_DI"),
+ MTK_FUNCTION(5, "I2S2_DI"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_TMS")
+ ),
+ MTK_PIN(
+ 113, "GPIO113",
+ MTK_EINT_FUNCTION(0, 113),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "I2S1_DO"),
+ MTK_FUNCTION(2, "I2S3_DO"),
+ MTK_FUNCTION(3, "I2S5_DO"),
+ MTK_FUNCTION(4, "PCM0_DO"),
+ MTK_FUNCTION(5, "I2S2_DI2"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_TCK")
+ ),
+ MTK_PIN(
+ 114, "GPIO114",
+ MTK_EINT_FUNCTION(0, 114),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "SPI2_MI"),
+ MTK_FUNCTION(2, "SCP_SPI2_MI"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_TRST_B")
+ ),
+ MTK_PIN(
+ 115, "GPIO115",
+ MTK_EINT_FUNCTION(0, 115),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "SPI2_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI2_CS"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_DBGI_N")
+ ),
+ MTK_PIN(
+ 116, "GPIO116",
+ MTK_EINT_FUNCTION(0, 116),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "SPI2_MO"),
+ MTK_FUNCTION(2, "SCP_SPI2_MO"),
+ MTK_FUNCTION(3, "SCP_SDA1"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_DBGACK_N")
+ ),
+ MTK_PIN(
+ 117, "GPIO117",
+ MTK_EINT_FUNCTION(0, 117),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "SPI2_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI2_CK"),
+ MTK_FUNCTION(3, "SCP_SCL1")
+ ),
+ MTK_PIN(
+ 118, "GPIO118",
+ MTK_EINT_FUNCTION(0, 118),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "SCL1"),
+ MTK_FUNCTION(2, "SCP_SCL0"),
+ MTK_FUNCTION(3, "SCP_SCL1")
+ ),
+ MTK_PIN(
+ 119, "GPIO119",
+ MTK_EINT_FUNCTION(0, 119),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "SDA1"),
+ MTK_FUNCTION(2, "SCP_SDA0"),
+ MTK_FUNCTION(3, "SCP_SDA1")
+ ),
+ MTK_PIN(
+ 120, "GPIO120",
+ MTK_EINT_FUNCTION(0, 120),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "SCL9")
+ ),
+ MTK_PIN(
+ 121, "GPIO121",
+ MTK_EINT_FUNCTION(0, 121),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO121"),
+ MTK_FUNCTION(1, "SDA9")
+ ),
+ MTK_PIN(
+ 122, "GPIO122",
+ MTK_EINT_FUNCTION(0, 122),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO122"),
+ MTK_FUNCTION(1, "SCL8")
+ ),
+ MTK_PIN(
+ 123, "GPIO123",
+ MTK_EINT_FUNCTION(0, 123),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO123"),
+ MTK_FUNCTION(1, "SDA8")
+ ),
+ MTK_PIN(
+ 124, "GPIO124",
+ MTK_EINT_FUNCTION(0, 124),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO124"),
+ MTK_FUNCTION(1, "SCL7"),
+ MTK_FUNCTION(2, "DMIC1_CLK")
+ ),
+ MTK_PIN(
+ 125, "GPIO125",
+ MTK_EINT_FUNCTION(0, 125),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO125"),
+ MTK_FUNCTION(1, "SDA7"),
+ MTK_FUNCTION(2, "DMIC1_DAT")
+ ),
+ MTK_PIN(
+ 126, "GPIO126",
+ MTK_EINT_FUNCTION(0, 126),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO126"),
+ MTK_FUNCTION(1, "CMFLASH0"),
+ MTK_FUNCTION(2, "PWM_2"),
+ MTK_FUNCTION(3, "TP_UCTS1_AO"),
+ MTK_FUNCTION(4, "UCTS0"),
+ MTK_FUNCTION(5, "SCL11"),
+ MTK_FUNCTION(6, "MD32_1_GPIO0")
+ ),
+ MTK_PIN(
+ 127, "GPIO127",
+ MTK_EINT_FUNCTION(0, 127),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO127"),
+ MTK_FUNCTION(1, "CMFLASH1"),
+ MTK_FUNCTION(2, "PWM_3"),
+ MTK_FUNCTION(3, "TP_URTS1_AO"),
+ MTK_FUNCTION(4, "URTS0"),
+ MTK_FUNCTION(5, "SDA11"),
+ MTK_FUNCTION(6, "MD32_1_GPIO1")
+ ),
+ MTK_PIN(
+ 128, "GPIO128",
+ MTK_EINT_FUNCTION(0, 128),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO128"),
+ MTK_FUNCTION(1, "CMFLASH2"),
+ MTK_FUNCTION(2, "PWM_0"),
+ MTK_FUNCTION(3, "TP_UCTS2_AO"),
+ MTK_FUNCTION(4, "UCTS1"),
+ MTK_FUNCTION(5, "SCL12"),
+ MTK_FUNCTION(6, "MD32_1_GPIO2")
+ ),
+ MTK_PIN(
+ 129, "GPIO129",
+ MTK_EINT_FUNCTION(0, 129),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO129"),
+ MTK_FUNCTION(1, "CMFLASH3"),
+ MTK_FUNCTION(2, "PWM_1"),
+ MTK_FUNCTION(3, "TP_URTS2_AO"),
+ MTK_FUNCTION(4, "URTS1"),
+ MTK_FUNCTION(5, "SDA12")
+ ),
+ MTK_PIN(
+ 130, "GPIO130",
+ MTK_EINT_FUNCTION(0, 130),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO130"),
+ MTK_FUNCTION(1, "CMVREF0"),
+ MTK_FUNCTION(2, "ANT_SEL10"),
+ MTK_FUNCTION(3, "SCP_JTAG0_TDO"),
+ MTK_FUNCTION(4, "MD32_0_JTAG_TDO"),
+ MTK_FUNCTION(5, "SCL11"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_TDO"),
+ MTK_FUNCTION(7, "DBG_MON_A23")
+ ),
+ MTK_PIN(
+ 131, "GPIO131",
+ MTK_EINT_FUNCTION(0, 131),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO131"),
+ MTK_FUNCTION(1, "CMVREF1"),
+ MTK_FUNCTION(2, "ANT_SEL11"),
+ MTK_FUNCTION(3, "SCP_JTAG0_TDI"),
+ MTK_FUNCTION(4, "MD32_0_JTAG_TDI"),
+ MTK_FUNCTION(5, "SDA11"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_TDI"),
+ MTK_FUNCTION(7, "DBG_MON_A26")
+ ),
+ MTK_PIN(
+ 132, "GPIO132",
+ MTK_EINT_FUNCTION(0, 132),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO132"),
+ MTK_FUNCTION(1, "CMVREF2"),
+ MTK_FUNCTION(2, "ANT_SEL12"),
+ MTK_FUNCTION(3, "SCP_JTAG0_TMS"),
+ MTK_FUNCTION(4, "MD32_0_JTAG_TMS"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_TMS"),
+ MTK_FUNCTION(7, "DBG_MON_A28")
+ ),
+ MTK_PIN(
+ 133, "GPIO133",
+ MTK_EINT_FUNCTION(0, 133),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO133"),
+ MTK_FUNCTION(1, "CMVREF3"),
+ MTK_FUNCTION(2, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(3, "SCP_JTAG0_TCK"),
+ MTK_FUNCTION(4, "MD32_0_JTAG_TCK"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_TCK"),
+ MTK_FUNCTION(7, "DBG_MON_A24")
+ ),
+ MTK_PIN(
+ 134, "GPIO134",
+ MTK_EINT_FUNCTION(0, 134),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO134"),
+ MTK_FUNCTION(1, "CMVREF4"),
+ MTK_FUNCTION(2, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(3, "SCP_JTAG0_TRSTN"),
+ MTK_FUNCTION(4, "MD32_0_JTAG_TRST"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_TRST_B"),
+ MTK_FUNCTION(7, "DBG_MON_A27")
+ ),
+ MTK_PIN(
+ 135, "GPIO135",
+ MTK_EINT_FUNCTION(0, 135),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO135"),
+ MTK_FUNCTION(1, "PWM_0"),
+ MTK_FUNCTION(2, "SRCLKENAI1"),
+ MTK_FUNCTION(3, "MD_URXD0"),
+ MTK_FUNCTION(4, "MD32_0_RXD"),
+ MTK_FUNCTION(5, "CONN_TCXOENA_REQ"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_DBGI_N"),
+ MTK_FUNCTION(7, "DBG_MON_A29")
+ ),
+ MTK_PIN(
+ 136, "GPIO136",
+ MTK_EINT_FUNCTION(0, 136),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO136"),
+ MTK_FUNCTION(1, "CMMCLK3"),
+ MTK_FUNCTION(2, "CLKM1"),
+ MTK_FUNCTION(3, "MD_UTXD0"),
+ MTK_FUNCTION(4, "MD32_0_TXD"),
+ MTK_FUNCTION(5, "CONN_BT_TXD"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_DBGACK_N"),
+ MTK_FUNCTION(7, "DBG_MON_A25")
+ ),
+ MTK_PIN(
+ 137, "GPIO137",
+ MTK_EINT_FUNCTION(0, 137),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO137"),
+ MTK_FUNCTION(1, "CMMCLK4"),
+ MTK_FUNCTION(2, "CLKM2"),
+ MTK_FUNCTION(3, "MD_URXD1"),
+ MTK_FUNCTION(4, "MD32_1_RXD"),
+ MTK_FUNCTION(5, "ILDO_DOUT0"),
+ MTK_FUNCTION(6, "CONN_BGF_UART0_RXD")
+ ),
+ MTK_PIN(
+ 138, "GPIO138",
+ MTK_EINT_FUNCTION(0, 138),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO138"),
+ MTK_FUNCTION(1, "CMMCLK5"),
+ MTK_FUNCTION(2, "CLKM3"),
+ MTK_FUNCTION(3, "MD_UTXD1"),
+ MTK_FUNCTION(4, "MD32_1_TXD"),
+ MTK_FUNCTION(5, "ILDO_DOUT1"),
+ MTK_FUNCTION(6, "CONN_BGF_UART0_TXD")
+ ),
+ MTK_PIN(
+ 139, "GPIO139",
+ MTK_EINT_FUNCTION(0, 139),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO139"),
+ MTK_FUNCTION(1, "SCL4")
+ ),
+ MTK_PIN(
+ 140, "GPIO140",
+ MTK_EINT_FUNCTION(0, 140),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO140"),
+ MTK_FUNCTION(1, "SDA4")
+ ),
+ MTK_PIN(
+ 141, "GPIO141",
+ MTK_EINT_FUNCTION(0, 141),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO141"),
+ MTK_FUNCTION(1, "SCL2")
+ ),
+ MTK_PIN(
+ 142, "GPIO142",
+ MTK_EINT_FUNCTION(0, 142),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO142"),
+ MTK_FUNCTION(1, "SDA2")
+ ),
+ MTK_PIN(
+ 143, "GPIO143",
+ MTK_EINT_FUNCTION(0, 143),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO143"),
+ MTK_FUNCTION(1, "CMVREF0"),
+ MTK_FUNCTION(2, "SPI3_CLK"),
+ MTK_FUNCTION(3, "ADSP_JTAG1_TDO"),
+ MTK_FUNCTION(4, "SCP_JTAG1_TDO"),
+ MTK_FUNCTION(5, "MD32_1_JTAG_TDO"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L5_JDO")
+ ),
+ MTK_PIN(
+ 144, "GPIO144",
+ MTK_EINT_FUNCTION(0, 144),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO144"),
+ MTK_FUNCTION(1, "CMVREF1"),
+ MTK_FUNCTION(2, "SPI3_CSB"),
+ MTK_FUNCTION(3, "ADSP_JTAG1_TDI"),
+ MTK_FUNCTION(4, "SCP_JTAG1_TDI"),
+ MTK_FUNCTION(5, "MD32_1_JTAG_TDI"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L5_JDI")
+ ),
+ MTK_PIN(
+ 145, "GPIO145",
+ MTK_EINT_FUNCTION(0, 145),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO145"),
+ MTK_FUNCTION(1, "CMVREF2"),
+ MTK_FUNCTION(2, "SPI3_MI"),
+ MTK_FUNCTION(3, "ADSP_JTAG1_TMS"),
+ MTK_FUNCTION(4, "SCP_JTAG1_TMS"),
+ MTK_FUNCTION(5, "MD32_1_JTAG_TMS"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L5_JMS")
+ ),
+ MTK_PIN(
+ 146, "GPIO146",
+ MTK_EINT_FUNCTION(0, 146),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO146"),
+ MTK_FUNCTION(1, "CMVREF3"),
+ MTK_FUNCTION(2, "SPI3_MO"),
+ MTK_FUNCTION(3, "ADSP_JTAG1_TCK"),
+ MTK_FUNCTION(4, "SCP_JTAG1_TCK"),
+ MTK_FUNCTION(5, "MD32_1_JTAG_TCK"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L5_JCK")
+ ),
+ MTK_PIN(
+ 147, "GPIO147",
+ MTK_EINT_FUNCTION(0, 147),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO147"),
+ MTK_FUNCTION(1, "CMVREF4"),
+ MTK_FUNCTION(2, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(3, "ADSP_JTAG1_TRSTN"),
+ MTK_FUNCTION(4, "SCP_JTAG1_TRSTN"),
+ MTK_FUNCTION(5, "MD32_1_JTAG_TRST"),
+ MTK_FUNCTION(6, "CONN_BGF_DSP_L5_JINTP")
+ ),
+ MTK_PIN(
+ 148, "GPIO148",
+ MTK_EINT_FUNCTION(0, 148),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO148"),
+ MTK_FUNCTION(1, "PWM_1"),
+ MTK_FUNCTION(2, "AGPS_SYNC"),
+ MTK_FUNCTION(3, "CMMCLK5"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_AICE_TMSC")
+ ),
+ MTK_PIN(
+ 149, "GPIO149",
+ MTK_EINT_FUNCTION(0, 149),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO149"),
+ MTK_FUNCTION(1, "CMMCLK0"),
+ MTK_FUNCTION(2, "CLKM0"),
+ MTK_FUNCTION(3, "MD32_0_GPIO0"),
+ MTK_FUNCTION(6, "CONN_WF_MCU_AICE_TCKC")
+ ),
+ MTK_PIN(
+ 150, "GPIO150",
+ MTK_EINT_FUNCTION(0, 150),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO150"),
+ MTK_FUNCTION(1, "CMMCLK1"),
+ MTK_FUNCTION(2, "CLKM1"),
+ MTK_FUNCTION(3, "MD32_0_GPIO1"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_AICE_TMSC")
+ ),
+ MTK_PIN(
+ 151, "GPIO151",
+ MTK_EINT_FUNCTION(0, 151),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO151"),
+ MTK_FUNCTION(1, "CMMCLK2"),
+ MTK_FUNCTION(2, "CLKM2"),
+ MTK_FUNCTION(3, "MD32_0_GPIO2"),
+ MTK_FUNCTION(6, "CONN_BGF_MCU_AICE_TCKC")
+ ),
+ MTK_PIN(
+ 152, "GPIO152",
+ MTK_EINT_FUNCTION(0, 152),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO152"),
+ MTK_FUNCTION(1, "KPROW1"),
+ MTK_FUNCTION(2, "PWM_2"),
+ MTK_FUNCTION(3, "IDDIG"),
+ MTK_FUNCTION(4, "DP_TX_HPD"),
+ MTK_FUNCTION(5, "DSI1_TE"),
+ MTK_FUNCTION(6, "MBISTREADEN_TRIGGER"),
+ MTK_FUNCTION(7, "DBG_MON_B2")
+ ),
+ MTK_PIN(
+ 153, "GPIO153",
+ MTK_EINT_FUNCTION(0, 153),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO153"),
+ MTK_FUNCTION(1, "KPROW0"),
+ MTK_FUNCTION(7, "DBG_MON_B1")
+ ),
+ MTK_PIN(
+ 154, "GPIO154",
+ MTK_EINT_FUNCTION(0, 154),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO154"),
+ MTK_FUNCTION(1, "KPCOL0"),
+ MTK_FUNCTION(7, "DBG_MON_A32")
+ ),
+ MTK_PIN(
+ 155, "GPIO155",
+ MTK_EINT_FUNCTION(0, 155),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO155"),
+ MTK_FUNCTION(1, "KPCOL1"),
+ MTK_FUNCTION(2, "PWM_3"),
+ MTK_FUNCTION(3, "USB_DRVVBUS"),
+ MTK_FUNCTION(4, "CONN_TCXOENA_REQ"),
+ MTK_FUNCTION(5, "LCM1_RST"),
+ MTK_FUNCTION(6, "MBISTWRITEEN_TRIGGER"),
+ MTK_FUNCTION(7, "DBG_MON_B0")
+ ),
+ MTK_PIN(
+ 156, "GPIO156",
+ MTK_EINT_FUNCTION(0, 156),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO156"),
+ MTK_FUNCTION(1, "SPI1_A_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_CK"),
+ MTK_FUNCTION(3, "MRG_CLK"),
+ MTK_FUNCTION(4, "AGPS_SYNC"),
+ MTK_FUNCTION(5, "SCL12"),
+ MTK_FUNCTION(7, "DBG_MON_B3")
+ ),
+ MTK_PIN(
+ 157, "GPIO157",
+ MTK_EINT_FUNCTION(0, 157),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO157"),
+ MTK_FUNCTION(1, "SPI1_A_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_CS"),
+ MTK_FUNCTION(3, "MRG_SYNC"),
+ MTK_FUNCTION(4, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(5, "SDA12"),
+ MTK_FUNCTION(7, "DBG_MON_B4")
+ ),
+ MTK_PIN(
+ 158, "GPIO158",
+ MTK_EINT_FUNCTION(0, 158),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO158"),
+ MTK_FUNCTION(1, "SPI1_A_MI"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_MI"),
+ MTK_FUNCTION(3, "MRG_DI"),
+ MTK_FUNCTION(4, "PTA_RXD"),
+ MTK_FUNCTION(5, "SCL13"),
+ MTK_FUNCTION(7, "DBG_MON_B5")
+ ),
+ MTK_PIN(
+ 159, "GPIO159",
+ MTK_EINT_FUNCTION(0, 159),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO159"),
+ MTK_FUNCTION(1, "SPI1_A_MO"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_MO"),
+ MTK_FUNCTION(3, "MRG_DO"),
+ MTK_FUNCTION(4, "PTA_TXD"),
+ MTK_FUNCTION(5, "SDA13"),
+ MTK_FUNCTION(7, "DBG_MON_B6")
+ ),
+ MTK_PIN(
+ 160, "GPIO160",
+ MTK_EINT_FUNCTION(0, 160),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO160"),
+ MTK_FUNCTION(1, "SCL3"),
+ MTK_FUNCTION(2, "SCP_SCL0"),
+ MTK_FUNCTION(3, "SCP_SCL1")
+ ),
+ MTK_PIN(
+ 161, "GPIO161",
+ MTK_EINT_FUNCTION(0, 161),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO161"),
+ MTK_FUNCTION(1, "SDA3"),
+ MTK_FUNCTION(2, "SCP_SDA0"),
+ MTK_FUNCTION(3, "SCP_SDA1")
+ ),
+ MTK_PIN(
+ 162, "GPIO162",
+ MTK_EINT_FUNCTION(0, 162),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO162"),
+ MTK_FUNCTION(1, "ANT_SEL0"),
+ MTK_FUNCTION(2, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(7, "DBG_MON_B7")
+ ),
+ MTK_PIN(
+ 163, "GPIO163",
+ MTK_EINT_FUNCTION(0, 163),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO163"),
+ MTK_FUNCTION(1, "ANT_SEL1"),
+ MTK_FUNCTION(2, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(7, "DBG_MON_B8")
+ ),
+ MTK_PIN(
+ 164, "GPIO164",
+ MTK_EINT_FUNCTION(0, 164),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO164"),
+ MTK_FUNCTION(1, "ANT_SEL2"),
+ MTK_FUNCTION(2, "SCP_SPI1_B_CK"),
+ MTK_FUNCTION(3, "TP_URXD1_AO"),
+ MTK_FUNCTION(5, "UCTS0"),
+ MTK_FUNCTION(7, "DBG_MON_B9")
+ ),
+ MTK_PIN(
+ 165, "GPIO165",
+ MTK_EINT_FUNCTION(0, 165),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO165"),
+ MTK_FUNCTION(1, "ANT_SEL3"),
+ MTK_FUNCTION(2, "SCP_SPI1_B_CS"),
+ MTK_FUNCTION(3, "TP_UTXD1_AO"),
+ MTK_FUNCTION(4, "CONN_TCXOENA_REQ"),
+ MTK_FUNCTION(5, "URTS0"),
+ MTK_FUNCTION(7, "DBG_MON_B10")
+ ),
+ MTK_PIN(
+ 166, "GPIO166",
+ MTK_EINT_FUNCTION(0, 166),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO166"),
+ MTK_FUNCTION(1, "ANT_SEL4"),
+ MTK_FUNCTION(2, "SCP_SPI1_B_MI"),
+ MTK_FUNCTION(3, "TP_URXD2_AO"),
+ MTK_FUNCTION(4, "SRCLKENAI1"),
+ MTK_FUNCTION(5, "UCTS1"),
+ MTK_FUNCTION(7, "DBG_MON_B11")
+ ),
+ MTK_PIN(
+ 167, "GPIO167",
+ MTK_EINT_FUNCTION(0, 167),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO167"),
+ MTK_FUNCTION(1, "ANT_SEL5"),
+ MTK_FUNCTION(2, "SCP_SPI1_B_MO"),
+ MTK_FUNCTION(3, "TP_UTXD2_AO"),
+ MTK_FUNCTION(4, "SRCLKENAI0"),
+ MTK_FUNCTION(5, "URTS1"),
+ MTK_FUNCTION(7, "DBG_MON_B12")
+ ),
+ MTK_PIN(
+ 168, "GPIO168",
+ MTK_EINT_FUNCTION(0, 168),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO168"),
+ MTK_FUNCTION(1, "ANT_SEL6"),
+ MTK_FUNCTION(2, "SPI0_B_CLK"),
+ MTK_FUNCTION(3, "TP_UCTS1_AO"),
+ MTK_FUNCTION(4, "KPCOL2"),
+ MTK_FUNCTION(5, "MD_UCTS0"),
+ MTK_FUNCTION(6, "SCL12"),
+ MTK_FUNCTION(7, "DBG_MON_B13")
+ ),
+ MTK_PIN(
+ 169, "GPIO169",
+ MTK_EINT_FUNCTION(0, 169),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO169"),
+ MTK_FUNCTION(1, "ANT_SEL7"),
+ MTK_FUNCTION(2, "SPI0_B_CSB"),
+ MTK_FUNCTION(3, "TP_URTS1_AO"),
+ MTK_FUNCTION(4, "KPROW2"),
+ MTK_FUNCTION(5, "MD_URTS0"),
+ MTK_FUNCTION(6, "SDA12"),
+ MTK_FUNCTION(7, "DBG_MON_B14")
+ ),
+ MTK_PIN(
+ 170, "GPIO170",
+ MTK_EINT_FUNCTION(0, 170),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO170"),
+ MTK_FUNCTION(1, "ANT_SEL8"),
+ MTK_FUNCTION(2, "SPI0_B_MI"),
+ MTK_FUNCTION(3, "TP_UCTS2_AO"),
+ MTK_FUNCTION(4, "SRCLKENAI1"),
+ MTK_FUNCTION(5, "MD_UCTS1"),
+ MTK_FUNCTION(6, "SCL13")
+ ),
+ MTK_PIN(
+ 171, "GPIO171",
+ MTK_EINT_FUNCTION(0, 171),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO171"),
+ MTK_FUNCTION(1, "ANT_SEL9"),
+ MTK_FUNCTION(2, "SPI0_B_MO"),
+ MTK_FUNCTION(3, "TP_URTS2_AO"),
+ MTK_FUNCTION(4, "SRCLKENAI0"),
+ MTK_FUNCTION(5, "MD_URTS1"),
+ MTK_FUNCTION(6, "SDA13")
+ ),
+ MTK_PIN(
+ 172, "GPIO172",
+ MTK_EINT_FUNCTION(0, 172),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO172"),
+ MTK_FUNCTION(1, "CONN_TOP_CLK"),
+ MTK_FUNCTION(2, "AUXIF_CLK0"),
+ MTK_FUNCTION(7, "DBG_MON_B18")
+ ),
+ MTK_PIN(
+ 173, "GPIO173",
+ MTK_EINT_FUNCTION(0, 173),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO173"),
+ MTK_FUNCTION(1, "CONN_TOP_DATA"),
+ MTK_FUNCTION(2, "AUXIF_ST0"),
+ MTK_FUNCTION(7, "DBG_MON_B19")
+ ),
+ MTK_PIN(
+ 174, "GPIO174",
+ MTK_EINT_FUNCTION(0, 174),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO174"),
+ MTK_FUNCTION(1, "CONN_HRST_B"),
+ MTK_FUNCTION(7, "DBG_MON_B17")
+ ),
+ MTK_PIN(
+ 175, "GPIO175",
+ MTK_EINT_FUNCTION(0, 175),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO175"),
+ MTK_FUNCTION(1, "CONN_WB_PTA"),
+ MTK_FUNCTION(7, "DBG_MON_B20")
+ ),
+ MTK_PIN(
+ 176, "GPIO176",
+ MTK_EINT_FUNCTION(0, 176),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO176"),
+ MTK_FUNCTION(1, "CONN_BT_CLK"),
+ MTK_FUNCTION(2, "AUXIF_CLK1"),
+ MTK_FUNCTION(7, "DBG_MON_B15")
+ ),
+ MTK_PIN(
+ 177, "GPIO177",
+ MTK_EINT_FUNCTION(0, 177),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO177"),
+ MTK_FUNCTION(1, "CONN_BT_DATA"),
+ MTK_FUNCTION(2, "AUXIF_ST1"),
+ MTK_FUNCTION(7, "DBG_MON_B16")
+ ),
+ MTK_PIN(
+ 178, "GPIO178",
+ MTK_EINT_FUNCTION(0, 178),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO178"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL0"),
+ MTK_FUNCTION(7, "DBG_MON_B21")
+ ),
+ MTK_PIN(
+ 179, "GPIO179",
+ MTK_EINT_FUNCTION(0, 179),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO179"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL1"),
+ MTK_FUNCTION(2, "UFS_MPHY_SCL"),
+ MTK_FUNCTION(7, "DBG_MON_B22")
+ ),
+ MTK_PIN(
+ 180, "GPIO180",
+ MTK_EINT_FUNCTION(0, 180),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO180"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL2"),
+ MTK_FUNCTION(2, "UFS_MPHY_SDA"),
+ MTK_FUNCTION(7, "DBG_MON_B23")
+ ),
+ MTK_PIN(
+ 181, "GPIO181",
+ MTK_EINT_FUNCTION(0, 181),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO181"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL3"),
+ MTK_FUNCTION(2, "UFS_UNIPRO_SDA")
+ ),
+ MTK_PIN(
+ 182, "GPIO182",
+ MTK_EINT_FUNCTION(0, 182),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO182"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL4"),
+ MTK_FUNCTION(2, "UFS_UNIPRO_SCL")
+ ),
+ MTK_PIN(
+ 183, "GPIO183",
+ MTK_EINT_FUNCTION(0, 183),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO183"),
+ MTK_FUNCTION(1, "MSDC0_CMD")
+ ),
+ MTK_PIN(
+ 184, "GPIO184",
+ MTK_EINT_FUNCTION(0, 184),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO184"),
+ MTK_FUNCTION(1, "MSDC0_DAT0")
+ ),
+ MTK_PIN(
+ 185, "GPIO185",
+ MTK_EINT_FUNCTION(0, 185),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO185"),
+ MTK_FUNCTION(1, "MSDC0_DAT2")
+ ),
+ MTK_PIN(
+ 186, "GPIO186",
+ MTK_EINT_FUNCTION(0, 186),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO186"),
+ MTK_FUNCTION(1, "MSDC0_DAT4")
+ ),
+ MTK_PIN(
+ 187, "GPIO187",
+ MTK_EINT_FUNCTION(0, 187),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO187"),
+ MTK_FUNCTION(1, "MSDC0_DAT6")
+ ),
+ MTK_PIN(
+ 188, "GPIO188",
+ MTK_EINT_FUNCTION(0, 188),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO188"),
+ MTK_FUNCTION(1, "MSDC0_DAT1")
+ ),
+ MTK_PIN(
+ 189, "GPIO189",
+ MTK_EINT_FUNCTION(0, 189),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO189"),
+ MTK_FUNCTION(1, "MSDC0_DAT5")
+ ),
+ MTK_PIN(
+ 190, "GPIO190",
+ MTK_EINT_FUNCTION(0, 190),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO190"),
+ MTK_FUNCTION(1, "MSDC0_DAT7")
+ ),
+ MTK_PIN(
+ 191, "GPIO191",
+ MTK_EINT_FUNCTION(0, 191),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO191"),
+ MTK_FUNCTION(1, "MSDC0_DSL"),
+ MTK_FUNCTION(2, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(3, "IDDIG"),
+ MTK_FUNCTION(4, "DMIC_CLK"),
+ MTK_FUNCTION(5, "DSI1_TE")
+ ),
+ MTK_PIN(
+ 192, "GPIO192",
+ MTK_EINT_FUNCTION(0, 192),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO192"),
+ MTK_FUNCTION(1, "MSDC0_CLK"),
+ MTK_FUNCTION(2, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(3, "USB_DRVVBUS"),
+ MTK_FUNCTION(4, "DMIC_DAT"),
+ MTK_FUNCTION(5, "LCM1_RST")
+ ),
+ MTK_PIN(
+ 193, "GPIO193",
+ MTK_EINT_FUNCTION(0, 193),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO193"),
+ MTK_FUNCTION(1, "MSDC0_DAT3")
+ ),
+ MTK_PIN(
+ 194, "GPIO194",
+ MTK_EINT_FUNCTION(0, 194),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO194"),
+ MTK_FUNCTION(1, "MSDC0_RSTB")
+ ),
+ MTK_PIN(
+ 195, "GPIO195",
+ MTK_EINT_FUNCTION(0, 195),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO195"),
+ MTK_FUNCTION(1, "SCP_VREQ_VAO"),
+ MTK_FUNCTION(2, "DVFSRC_EXT_REQ")
+ ),
+ MTK_PIN(
+ 196, "GPIO196",
+ MTK_EINT_FUNCTION(0, 196),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO196"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI2"),
+ MTK_FUNCTION(7, "DBG_MON_B27")
+ ),
+ MTK_PIN(
+ 197, "GPIO197",
+ MTK_EINT_FUNCTION(0, 197),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO197"),
+ MTK_FUNCTION(1, "AUD_NLE_MOSI1"),
+ MTK_FUNCTION(2, "AUD_CLK_MISO"),
+ MTK_FUNCTION(3, "I2S2_MCK"),
+ MTK_FUNCTION(4, "I2S6_MCK"),
+ MTK_FUNCTION(5, "I2S8_MCK"),
+ MTK_FUNCTION(6, "UFS_UNIPRO_SDA"),
+ MTK_FUNCTION(7, "DBG_MON_B28")
+ ),
+ MTK_PIN(
+ 198, "GPIO198",
+ MTK_EINT_FUNCTION(0, 198),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO198"),
+ MTK_FUNCTION(1, "AUD_NLE_MOSI0"),
+ MTK_FUNCTION(2, "AUD_SYNC_MISO"),
+ MTK_FUNCTION(3, "I2S2_BCK"),
+ MTK_FUNCTION(4, "I2S6_BCK"),
+ MTK_FUNCTION(5, "I2S8_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B29")
+ ),
+ MTK_PIN(
+ 199, "GPIO199",
+ MTK_EINT_FUNCTION(0, 199),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO199"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO2"),
+ MTK_FUNCTION(3, "I2S2_DI2"),
+ MTK_FUNCTION(7, "DBG_MON_B32")
+ ),
+ MTK_PIN(
+ 200, "GPIO200",
+ MTK_EINT_FUNCTION(0, 200),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO200"),
+ MTK_FUNCTION(1, "SCL6"),
+ MTK_FUNCTION(2, "SCP_SCL0"),
+ MTK_FUNCTION(3, "SCP_SCL1"),
+ MTK_FUNCTION(4, "SCL_6306")
+ ),
+ MTK_PIN(
+ 201, "GPIO201",
+ MTK_EINT_FUNCTION(0, 201),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO201"),
+ MTK_FUNCTION(1, "SDA6"),
+ MTK_FUNCTION(2, "SCP_SDA0"),
+ MTK_FUNCTION(3, "SCP_SDA1"),
+ MTK_FUNCTION(4, "SDA_6306")
+ ),
+ MTK_PIN(
+ 202, "GPIO202",
+ MTK_EINT_FUNCTION(0, 202),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO202"),
+ MTK_FUNCTION(1, "SCL5")
+ ),
+ MTK_PIN(
+ 203, "GPIO203",
+ MTK_EINT_FUNCTION(0, 203),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO203"),
+ MTK_FUNCTION(1, "SDA5")
+ ),
+ MTK_PIN(
+ 204, "GPIO204",
+ MTK_EINT_FUNCTION(0, 204),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO204"),
+ MTK_FUNCTION(1, "SCL0"),
+ MTK_FUNCTION(2, "SPI4_C_CLK"),
+ MTK_FUNCTION(3, "SPI7_B_CLK")
+ ),
+ MTK_PIN(
+ 205, "GPIO205",
+ MTK_EINT_FUNCTION(0, 205),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO205"),
+ MTK_FUNCTION(1, "SDA0"),
+ MTK_FUNCTION(2, "SPI4_C_CSB"),
+ MTK_FUNCTION(3, "SPI7_B_CSB")
+ ),
+ MTK_PIN(
+ 206, "GPIO206",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO206"),
+ MTK_FUNCTION(1, "SRCLKENA0")
+ ),
+ MTK_PIN(
+ 207, "GPIO207",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO207"),
+ MTK_FUNCTION(1, "SRCLKENA1")
+ ),
+ MTK_PIN(
+ 208, "GPIO208",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO208"),
+ MTK_FUNCTION(1, "WATCHDOG")
+ ),
+ MTK_PIN(
+ 209, "GPIO209",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO209"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MI"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MO")
+ ),
+ MTK_PIN(
+ 210, "GPIO210",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO210"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CSN")
+ ),
+ MTK_PIN(
+ 211, "GPIO211",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO211"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MO"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MI")
+ ),
+ MTK_PIN(
+ 212, "GPIO212",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO212"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CK")
+ ),
+ MTK_PIN(
+ 213, "GPIO213",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO213"),
+ MTK_FUNCTION(1, "RTC32K_CK")
+ ),
+ MTK_PIN(
+ 214, "GPIO214",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO214"),
+ MTK_FUNCTION(1, "AUD_CLK_MOSI"),
+ MTK_FUNCTION(3, "I2S1_MCK"),
+ MTK_FUNCTION(4, "I2S7_MCK"),
+ MTK_FUNCTION(5, "I2S9_MCK"),
+ MTK_FUNCTION(6, "UFS_UNIPRO_SCL")
+ ),
+ MTK_PIN(
+ 215, "GPIO215",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO215"),
+ MTK_FUNCTION(1, "AUD_SYNC_MOSI"),
+ MTK_FUNCTION(3, "I2S1_BCK"),
+ MTK_FUNCTION(4, "I2S7_BCK"),
+ MTK_FUNCTION(5, "I2S9_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B24")
+ ),
+ MTK_PIN(
+ 216, "GPIO216",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO216"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI0"),
+ MTK_FUNCTION(3, "I2S1_LRCK"),
+ MTK_FUNCTION(4, "I2S7_LRCK"),
+ MTK_FUNCTION(5, "I2S9_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B25")
+ ),
+ MTK_PIN(
+ 217, "GPIO217",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO217"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI1"),
+ MTK_FUNCTION(3, "I2S1_DO"),
+ MTK_FUNCTION(4, "I2S7_DO"),
+ MTK_FUNCTION(5, "I2S9_DO"),
+ MTK_FUNCTION(6, "UFS_MPHY_SDA"),
+ MTK_FUNCTION(7, "DBG_MON_B26")
+ ),
+ MTK_PIN(
+ 218, "GPIO218",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO218"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO0"),
+ MTK_FUNCTION(2, "VOW_DAT_MISO"),
+ MTK_FUNCTION(3, "I2S2_LRCK"),
+ MTK_FUNCTION(4, "I2S6_LRCK"),
+ MTK_FUNCTION(5, "I2S8_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B30")
+ ),
+ MTK_PIN(
+ 219, "GPIO219",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO219"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO1"),
+ MTK_FUNCTION(2, "VOW_CLK_MISO"),
+ MTK_FUNCTION(3, "I2S2_DI"),
+ MTK_FUNCTION(4, "I2S6_DI"),
+ MTK_FUNCTION(5, "I2S8_DI"),
+ MTK_FUNCTION(6, "UFS_MPHY_SCL"),
+ MTK_FUNCTION(7, "DBG_MON_B31")
+ ),
+ MTK_PIN(
+ 220, "GPIO220",
+ MTK_EINT_FUNCTION(0, 216),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 221, "GPIO221",
+ MTK_EINT_FUNCTION(0, 217),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 222, "GPIO222",
+ MTK_EINT_FUNCTION(0, 218),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 223, "GPIO223",
+ MTK_EINT_FUNCTION(0, 219),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 224, "GPIO224",
+ MTK_EINT_FUNCTION(0, 220),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 225, "GPIO225",
+ MTK_EINT_FUNCTION(0, 222),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 226, "GPIO226",
+ MTK_EINT_FUNCTION(0, 223),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+};
+
+#endif /* __PINCTRL_MTK_MT6893_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8196.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8196.h
new file mode 100644
index 000000000000..c2a7e239a234
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8196.h
@@ -0,0 +1,3085 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 MediaTek Inc.
+ * Author: Guodong Liu <Guodong.Liu@mediatek.com>
+ */
+
+#ifndef __PINCTRL_MTK_MT8196_H
+#define __PINCTRL_MTK_MT8196_H
+
+#include "pinctrl-paris.h"
+#define EINT_INVALID_BASE 0xff
+
+static const struct mtk_pin_desc mtk_pins_mt8196[] = {
+ MTK_PIN(
+ 0, "GPIO0",
+ MTK_EINT_FUNCTION(0, 0),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "DMIC1_CLK"),
+ MTK_FUNCTION(3, "SPI3_A_MO"),
+ MTK_FUNCTION(4, "FMI2S_B_LRCK"),
+ MTK_FUNCTION(5, "SCP_DMIC1_CLK"),
+ MTK_FUNCTION(6, "TP_GPIO14_AO")
+ ),
+ MTK_PIN(
+ 1, "GPIO1",
+ MTK_EINT_FUNCTION(0, 1),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "DMIC1_DAT"),
+ MTK_FUNCTION(2, "SRCLKENAI1"),
+ MTK_FUNCTION(3, "SPI3_A_MI"),
+ MTK_FUNCTION(4, "FMI2S_B_DI"),
+ MTK_FUNCTION(5, "SCP_DMIC1_DAT"),
+ MTK_FUNCTION(6, "TP_GPIO15_AO")
+ ),
+ MTK_PIN(
+ 2, "GPIO2",
+ MTK_EINT_FUNCTION(0, 2),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "PWM_VLP"),
+ MTK_FUNCTION(2, "DSI_HSYNC"),
+ MTK_FUNCTION(5, "RG_TSFDC_LDO_EN"),
+ MTK_FUNCTION(6, "TP_GPIO8_AO")
+ ),
+ MTK_PIN(
+ 3, "GPIO3",
+ MTK_EINT_FUNCTION(0, 3),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "MD_INT0"),
+ MTK_FUNCTION(2, "DSI1_HSYNC"),
+ MTK_FUNCTION(5, "DA_TSFDC_LDO_MODE"),
+ MTK_FUNCTION(6, "TP_GPIO9_AO")
+ ),
+ MTK_PIN(
+ 4, "GPIO4",
+ MTK_EINT_FUNCTION(0, 4),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "DISP_PWM1"),
+ MTK_FUNCTION(2, "MD32_0_GPIO0")
+ ),
+ MTK_PIN(
+ 5, "GPIO5",
+ MTK_EINT_FUNCTION(0, 5),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "LCM1_RST"),
+ MTK_FUNCTION(2, "SPI7_A_CLK")
+ ),
+ MTK_PIN(
+ 6, "GPIO6",
+ MTK_EINT_FUNCTION(0, 6),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "DSI1_TE"),
+ MTK_FUNCTION(2, "SPI7_A_CSB")
+ ),
+ MTK_PIN(
+ 7, "GPIO7",
+ MTK_EINT_FUNCTION(0, 7),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(2, "SPI7_A_MO"),
+ MTK_FUNCTION(3, "GPS_PPS0")
+ ),
+ MTK_PIN(
+ 8, "GPIO8",
+ MTK_EINT_FUNCTION(0, 8),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(2, "SPI7_A_MI"),
+ MTK_FUNCTION(3, "EDP_TX_HPD")
+ ),
+ MTK_PIN(
+ 9, "GPIO9",
+ MTK_EINT_FUNCTION(0, 9),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(3, "I2SIN1_LRCK"),
+ MTK_FUNCTION(7, "RG_TSFDC_LDO_REFSEL0")
+ ),
+ MTK_PIN(
+ 10, "GPIO10",
+ MTK_EINT_FUNCTION(0, 10),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(3, "I2SOUT1_DO"),
+ MTK_FUNCTION(7, "RG_TSFDC_LDO_REFSEL1")
+ ),
+ MTK_PIN(
+ 11, "GPIO11",
+ MTK_EINT_FUNCTION(0, 11),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(4, "FMI2S_B_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A30")
+ ),
+ MTK_PIN(
+ 12, "GPIO12",
+ MTK_EINT_FUNCTION(0, 12),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(3, "I2SIN1_DI_B")
+ ),
+ MTK_PIN(
+ 13, "GPIO13",
+ MTK_EINT_FUNCTION(0, 13),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(1, "EDP_TX_HPD"),
+ MTK_FUNCTION(2, "GPS_PPS1")
+ ),
+ MTK_PIN(
+ 14, "GPIO14",
+ MTK_EINT_FUNCTION(0, 14),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(1, "SRCLKENA2"),
+ MTK_FUNCTION(2, "DSI2_TE"),
+ MTK_FUNCTION(3, "SPMI_P_TRIG_FLAG"),
+ MTK_FUNCTION(5, "MD_INT3"),
+ MTK_FUNCTION(6, "TP_GPIO8_AO")
+ ),
+ MTK_PIN(
+ 15, "GPIO15",
+ MTK_EINT_FUNCTION(0, 15),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "SPMI_M_TRIG_FLAG"),
+ MTK_FUNCTION(3, "UCTS0"),
+ MTK_FUNCTION(4, "MD_INT4"),
+ MTK_FUNCTION(5, "I2SOUT2_DO"),
+ MTK_FUNCTION(6, "TP_GPIO9_AO")
+ ),
+ MTK_PIN(
+ 16, "GPIO16",
+ MTK_EINT_FUNCTION(0, 16),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO16"),
+ MTK_FUNCTION(1, "SRCLKENAI1"),
+ MTK_FUNCTION(2, "DP_TX_HPD"),
+ MTK_FUNCTION(3, "URTS0"),
+ MTK_FUNCTION(4, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(5, "KPROW2"),
+ MTK_FUNCTION(6, "TP_GPIO10_AO")
+ ),
+ MTK_PIN(
+ 17, "GPIO17",
+ MTK_EINT_FUNCTION(0, 17),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO17"),
+ MTK_FUNCTION(1, "MD_INT0"),
+ MTK_FUNCTION(2, "DP_OC_EN"),
+ MTK_FUNCTION(3, "UCTS1"),
+ MTK_FUNCTION(4, "MD_NTN_URXD1"),
+ MTK_FUNCTION(5, "KPCOL2"),
+ MTK_FUNCTION(6, "TP_GPIO11_AO")
+ ),
+ MTK_PIN(
+ 18, "GPIO18",
+ MTK_EINT_FUNCTION(0, 18),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(1, "DMIC1_CLK"),
+ MTK_FUNCTION(2, "DP_RAUX_SBU1"),
+ MTK_FUNCTION(3, "URTS1"),
+ MTK_FUNCTION(4, "MD_NTN_UTXD1"),
+ MTK_FUNCTION(5, "I2SIN2_DI"),
+ MTK_FUNCTION(6, "TP_UTXD_GNSS_VLP")
+ ),
+ MTK_PIN(
+ 19, "GPIO19",
+ MTK_EINT_FUNCTION(0, 19),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "DMIC1_DAT"),
+ MTK_FUNCTION(2, "DP_RAUX_SBU2"),
+ MTK_FUNCTION(3, "CONN_TCXOENA_REQ"),
+ MTK_FUNCTION(4, "CLKM3_A"),
+ MTK_FUNCTION(5, "I2SIN2_BCK"),
+ MTK_FUNCTION(6, "TP_URXD_GNSS_VLP")
+ ),
+ MTK_PIN(
+ 20, "GPIO20",
+ MTK_EINT_FUNCTION(0, 20),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "IDDIG"),
+ MTK_FUNCTION(2, "LCM2_RST"),
+ MTK_FUNCTION(3, "GPS_PPS1"),
+ MTK_FUNCTION(4, "CLKM2_A")
+ ),
+ MTK_PIN(
+ 21, "GPIO21",
+ MTK_EINT_FUNCTION(0, 21),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(1, "BPI_BUS11"),
+ MTK_FUNCTION(2, "PCIE_PERSTN_1P"),
+ MTK_FUNCTION(3, "DSI1_TE"),
+ MTK_FUNCTION(4, "DMIC_CLK"),
+ MTK_FUNCTION(5, "SCP_DMIC_CLK")
+ ),
+ MTK_PIN(
+ 22, "GPIO22",
+ MTK_EINT_FUNCTION(0, 22),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(1, "BPI_BUS12"),
+ MTK_FUNCTION(2, "PCIE_CLKREQN_1P"),
+ MTK_FUNCTION(3, "DSI2_TE"),
+ MTK_FUNCTION(4, "DMIC_DAT"),
+ MTK_FUNCTION(5, "SCP_DMIC_DAT")
+ ),
+ MTK_PIN(
+ 23, "GPIO23",
+ MTK_EINT_FUNCTION(0, 23),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(1, "BPI_BUS13"),
+ MTK_FUNCTION(2, "PCIE_WAKEN_1P"),
+ MTK_FUNCTION(3, "DSI3_TE"),
+ MTK_FUNCTION(4, "DMIC1_CLK"),
+ MTK_FUNCTION(5, "SCP_DMIC1_CLK")
+ ),
+ MTK_PIN(
+ 24, "GPIO24",
+ MTK_EINT_FUNCTION(0, 24),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(1, "BPI_BUS14"),
+ MTK_FUNCTION(2, "LCM1_RST"),
+ MTK_FUNCTION(3, "AGPS_SYNC"),
+ MTK_FUNCTION(4, "DMIC1_DAT"),
+ MTK_FUNCTION(5, "SCP_DMIC1_DAT"),
+ MTK_FUNCTION(6, "DISP_PWM1")
+ ),
+ MTK_PIN(
+ 25, "GPIO25",
+ MTK_EINT_FUNCTION(0, 25),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(1, "BPI_BUS15"),
+ MTK_FUNCTION(2, "LCM2_RST"),
+ MTK_FUNCTION(3, "SRCLKENAI1"),
+ MTK_FUNCTION(4, "DMIC2_CLK"),
+ MTK_FUNCTION(6, "DISP_PWM2")
+ ),
+ MTK_PIN(
+ 26, "GPIO26",
+ MTK_EINT_FUNCTION(0, 26),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "BPI_BUS16"),
+ MTK_FUNCTION(2, "LCM3_RST"),
+ MTK_FUNCTION(4, "DMIC2_DAT"),
+ MTK_FUNCTION(6, "DISP_PWM3")
+ ),
+ MTK_PIN(
+ 27, "GPIO27",
+ MTK_EINT_FUNCTION(0, 27),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "BPI_BUS17"),
+ MTK_FUNCTION(2, "UTXD4"),
+ MTK_FUNCTION(6, "DISP_PWM4"),
+ MTK_FUNCTION(7, "DBG_MON_A20")
+ ),
+ MTK_PIN(
+ 28, "GPIO28",
+ MTK_EINT_FUNCTION(0, 28),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "BPI_BUS18"),
+ MTK_FUNCTION(2, "URXD4"),
+ MTK_FUNCTION(3, "SPI2_A_MI"),
+ MTK_FUNCTION(4, "CLKM0_A"),
+ MTK_FUNCTION(7, "DBG_MON_A21")
+ ),
+ MTK_PIN(
+ 29, "GPIO29",
+ MTK_EINT_FUNCTION(0, 29),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "BPI_BUS19"),
+ MTK_FUNCTION(2, "MD_NTN_UTXD1"),
+ MTK_FUNCTION(3, "SPI2_A_MO"),
+ MTK_FUNCTION(4, "CLKM1_A"),
+ MTK_FUNCTION(6, "UCTS4"),
+ MTK_FUNCTION(7, "DBG_MON_A17")
+ ),
+ MTK_PIN(
+ 30, "GPIO30",
+ MTK_EINT_FUNCTION(0, 30),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO30"),
+ MTK_FUNCTION(1, "BPI_BUS20"),
+ MTK_FUNCTION(2, "MD_NTN_URXD1"),
+ MTK_FUNCTION(3, "SPI2_A_CLK"),
+ MTK_FUNCTION(4, "CLKM2_A"),
+ MTK_FUNCTION(5, "DSI3_HSYNC"),
+ MTK_FUNCTION(6, "URTS4"),
+ MTK_FUNCTION(7, "DBG_MON_A18")
+ ),
+ MTK_PIN(
+ 31, "GPIO31",
+ MTK_EINT_FUNCTION(0, 31),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO31"),
+ MTK_FUNCTION(1, "BPI_BUS21"),
+ MTK_FUNCTION(3, "SPI2_A_CSB"),
+ MTK_FUNCTION(4, "CLKM3_A"),
+ MTK_FUNCTION(6, "EDP_TX_HPD"),
+ MTK_FUNCTION(7, "DBG_MON_A19")
+ ),
+ MTK_PIN(
+ 32, "GPIO32",
+ MTK_EINT_FUNCTION(0, 32),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO32"),
+ MTK_FUNCTION(1, "LCM4_RST"),
+ MTK_FUNCTION(2, "DP_TX_HPD"),
+ MTK_FUNCTION(3, "SSPM_JTAG_TCK_VLP"),
+ MTK_FUNCTION(4, "ADSP_JTAG0_TCK"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TCK_VLP"),
+ MTK_FUNCTION(6, "SPU0_TCK"),
+ MTK_FUNCTION(7, "IO_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 33, "GPIO33",
+ MTK_EINT_FUNCTION(0, 33),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, "DSI4_TE"),
+ MTK_FUNCTION(2, "DP_OC_EN"),
+ MTK_FUNCTION(3, "SSPM_JTAG_TRSTN_VLP"),
+ MTK_FUNCTION(4, "ADSP_JTAG0_TRSTN"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TRSTN_VLP"),
+ MTK_FUNCTION(6, "SPU0_NTRST"),
+ MTK_FUNCTION(7, "IO_JTAG_TRSTN")
+ ),
+ MTK_PIN(
+ 34, "GPIO34",
+ MTK_EINT_FUNCTION(0, 34),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, "UCTS5"),
+ MTK_FUNCTION(2, "DP_RAUX_SBU1"),
+ MTK_FUNCTION(3, "SSPM_JTAG_TDI_VLP"),
+ MTK_FUNCTION(4, "ADSP_JTAG0_TDI"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TDI_VLP"),
+ MTK_FUNCTION(6, "SPU0_TDI"),
+ MTK_FUNCTION(7, "IO_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 35, "GPIO35",
+ MTK_EINT_FUNCTION(0, 35),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, "URTS5"),
+ MTK_FUNCTION(2, "DP_RAUX_SBU2"),
+ MTK_FUNCTION(3, "SSPM_JTAG_TDO_VLP"),
+ MTK_FUNCTION(4, "ADSP_JTAG0_TDO"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TDO_VLP"),
+ MTK_FUNCTION(6, "SPU0_TDO"),
+ MTK_FUNCTION(7, "IO_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 36, "GPIO36",
+ MTK_EINT_FUNCTION(0, 36),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, "UTXD5"),
+ MTK_FUNCTION(3, "SSPM_JTAG_TMS_VLP"),
+ MTK_FUNCTION(4, "ADSP_JTAG0_TMS"),
+ MTK_FUNCTION(5, "SCP_JTAG0_TMS_VLP"),
+ MTK_FUNCTION(6, "SPU0_TMS"),
+ MTK_FUNCTION(7, "IO_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 37, "GPIO37",
+ MTK_EINT_FUNCTION(0, 37),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "URXD5"),
+ MTK_FUNCTION(3, "MD_INT3"),
+ MTK_FUNCTION(4, "CLKM0_B"),
+ MTK_FUNCTION(5, "TP_GPIO5_AO"),
+ MTK_FUNCTION(6, "SPU0_UTX"),
+ MTK_FUNCTION(7, "DAP_MD32_SWCK")
+ ),
+ MTK_PIN(
+ 38, "GPIO38",
+ MTK_EINT_FUNCTION(0, 38),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO38"),
+ MTK_FUNCTION(2, "SPMI_P_TRIG_FLAG"),
+ MTK_FUNCTION(3, "MD_INT4"),
+ MTK_FUNCTION(4, "CLKM1_B"),
+ MTK_FUNCTION(5, "TP_GPIO6_AO"),
+ MTK_FUNCTION(6, "SPU0_URX"),
+ MTK_FUNCTION(7, "DAP_MD32_SWD")
+ ),
+ MTK_PIN(
+ 39, "GPIO39",
+ MTK_EINT_FUNCTION(0, 39),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "I2S_MCK0"),
+ MTK_FUNCTION(3, "GPS_PPS0"),
+ MTK_FUNCTION(4, "CONN_TCXOENA_REQ"),
+ MTK_FUNCTION(7, "DBG_MON_B12")
+ ),
+ MTK_PIN(
+ 40, "GPIO40",
+ MTK_EINT_FUNCTION(0, 40),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO40"),
+ MTK_FUNCTION(1, "I2SIN6_0_BCK"),
+ MTK_FUNCTION(3, "SPI4_B_CLK"),
+ MTK_FUNCTION(4, "UCTS2"),
+ MTK_FUNCTION(5, "CCU1_UTXD"),
+ MTK_FUNCTION(7, "DBG_MON_B13")
+ ),
+ MTK_PIN(
+ 41, "GPIO41",
+ MTK_EINT_FUNCTION(0, 41),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "I2SIN6_0_LRCK"),
+ MTK_FUNCTION(3, "SPI4_B_CSB"),
+ MTK_FUNCTION(4, "URTS2"),
+ MTK_FUNCTION(5, "CCU1_URXD"),
+ MTK_FUNCTION(7, "DBG_MON_B14")
+ ),
+ MTK_PIN(
+ 42, "GPIO42",
+ MTK_EINT_FUNCTION(0, 42),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "I2SIN6_0_DI"),
+ MTK_FUNCTION(3, "SPI4_B_MI"),
+ MTK_FUNCTION(4, "URXD2"),
+ MTK_FUNCTION(5, "CCU1_URTS"),
+ MTK_FUNCTION(6, "MD32_0_RXD"),
+ MTK_FUNCTION(7, "DBG_MON_B15")
+ ),
+ MTK_PIN(
+ 43, "GPIO43",
+ MTK_EINT_FUNCTION(0, 43),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "I2SOUT6_0_DO"),
+ MTK_FUNCTION(3, "SPI4_B_MO"),
+ MTK_FUNCTION(4, "UTXD2"),
+ MTK_FUNCTION(5, "CCU1_UCTS"),
+ MTK_FUNCTION(6, "MD32_0_TXD"),
+ MTK_FUNCTION(7, "DBG_MON_B16")
+ ),
+ MTK_PIN(
+ 44, "GPIO44",
+ MTK_EINT_FUNCTION(0, 44),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(3, "SPI3_A_CLK"),
+ MTK_FUNCTION(6, "TP_GPIO10_AO")
+ ),
+ MTK_PIN(
+ 45, "GPIO45",
+ MTK_EINT_FUNCTION(0, 45),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(2, "DSI2_HSYNC"),
+ MTK_FUNCTION(3, "SPI3_A_CSB"),
+ MTK_FUNCTION(4, "PWM_VLP"),
+ MTK_FUNCTION(6, "TP_GPIO11_AO")
+ ),
+ MTK_PIN(
+ 46, "GPIO46",
+ MTK_EINT_FUNCTION(0, 46),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "SCP_SCL4"),
+ MTK_FUNCTION(2, "PWM_VLP"),
+ MTK_FUNCTION(4, "SCP_ILDO_DTEST1_VLP"),
+ MTK_FUNCTION(5, "UFS_MPHY_SCL"),
+ MTK_FUNCTION(6, "TP_GPIO0_AO")
+ ),
+ MTK_PIN(
+ 47, "GPIO47",
+ MTK_EINT_FUNCTION(0, 47),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "SCP_SDA4"),
+ MTK_FUNCTION(4, "SCP_ILDO_DTEST2_VLP"),
+ MTK_FUNCTION(5, "UFS_MPHY_SDA"),
+ MTK_FUNCTION(6, "TP_GPIO1_AO")
+ ),
+ MTK_PIN(
+ 48, "GPIO48",
+ MTK_EINT_FUNCTION(0, 48),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "SCP_SCL5"),
+ MTK_FUNCTION(2, "PWM_VLP"),
+ MTK_FUNCTION(3, "CCU0_UTXD"),
+ MTK_FUNCTION(4, "SCP_ILDO_DTEST3_VLP"),
+ MTK_FUNCTION(6, "TP_GPIO2_AO")
+ ),
+ MTK_PIN(
+ 49, "GPIO49",
+ MTK_EINT_FUNCTION(0, 49),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "SCP_SDA5"),
+ MTK_FUNCTION(3, "CCU0_URXD"),
+ MTK_FUNCTION(4, "SCP_ILDO_DTEST4_VLP"),
+ MTK_FUNCTION(6, "TP_GPIO3_AO")
+ ),
+ MTK_PIN(
+ 50, "GPIO50",
+ MTK_EINT_FUNCTION(0, 50),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO50"),
+ MTK_FUNCTION(1, "SCP_SCL6"),
+ MTK_FUNCTION(2, "PWM_VLP"),
+ MTK_FUNCTION(3, "CCU0_URTS"),
+ MTK_FUNCTION(4, "DSI_HSYNC"),
+ MTK_FUNCTION(6, "TP_GPIO4_AO")
+ ),
+ MTK_PIN(
+ 51, "GPIO51",
+ MTK_EINT_FUNCTION(0, 51),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO51"),
+ MTK_FUNCTION(1, "SCP_SDA6"),
+ MTK_FUNCTION(3, "CCU0_UCTS"),
+ MTK_FUNCTION(4, "DSI1_HSYNC"),
+ MTK_FUNCTION(6, "TP_GPIO5_AO")
+ ),
+ MTK_PIN(
+ 52, "GPIO52",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO52"),
+ MTK_FUNCTION(1, "SCP_SCL1"),
+ MTK_FUNCTION(3, "TDM_DATA2")
+ ),
+ MTK_PIN(
+ 53, "GPIO53",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "SCP_SDA1"),
+ MTK_FUNCTION(3, "TDM_DATA3")
+ ),
+ MTK_PIN(
+ 54, "GPIO54",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(1, "AUD_CLK_MOSI"),
+ MTK_FUNCTION(3, "TDM_MCK")
+ ),
+ MTK_PIN(
+ 55, "GPIO55",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "AUD_CLK_MISO"),
+ MTK_FUNCTION(2, "I2SOUT2_BCK"),
+ MTK_FUNCTION(3, "TDM_BCK")
+ ),
+ MTK_PIN(
+ 56, "GPIO56",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI0"),
+ MTK_FUNCTION(2, "I2SOUT2_LRCK"),
+ MTK_FUNCTION(3, "TDM_LRCK")
+ ),
+ MTK_PIN(
+ 57, "GPIO57",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO57"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI1"),
+ MTK_FUNCTION(2, "I2SOUT2_DO"),
+ MTK_FUNCTION(3, "TDM_DATA0")
+ ),
+ MTK_PIN(
+ 58, "GPIO58",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO58"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO0"),
+ MTK_FUNCTION(3, "TDM_DATA1")
+ ),
+ MTK_PIN(
+ 59, "GPIO59",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO59"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO1"),
+ MTK_FUNCTION(3, "I2SIN1_BCK")
+ ),
+ MTK_PIN(
+ 60, "GPIO60",
+ MTK_EINT_FUNCTION(0, 60),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "KPCOL0"),
+ MTK_FUNCTION(6, "TP_GPIO13_AO")
+ ),
+ MTK_PIN(
+ 61, "GPIO61",
+ MTK_EINT_FUNCTION(0, 61),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "MCU_M_PMIC_POC_I")
+ ),
+ MTK_PIN(
+ 62, "GPIO62",
+ MTK_EINT_FUNCTION(0, 62),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "MCU_B_PMIC_POC_I")
+ ),
+ MTK_PIN(
+ 63, "GPIO63",
+ MTK_EINT_FUNCTION(0, 63),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "MFG_PMIC_POC_I")
+ ),
+ MTK_PIN(
+ 64, "GPIO64",
+ MTK_EINT_FUNCTION(0, 64),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "PRE_UVLO")
+ ),
+ MTK_PIN(
+ 65, "GPIO65",
+ MTK_EINT_FUNCTION(0, 65),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "DPM2PMIC"),
+ MTK_FUNCTION(2, "SRCLKENA1")
+ ),
+ MTK_PIN(
+ 66, "GPIO66",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "WATCHDOG")
+ ),
+ MTK_PIN(
+ 67, "GPIO67",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(1, "SRCLKENA0")
+ ),
+ MTK_PIN(
+ 68, "GPIO68",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "SCP_VREQ_VAO")
+ ),
+ MTK_PIN(
+ 69, "GPIO69",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "RTC32K_CK")
+ ),
+ MTK_PIN(
+ 70, "GPIO70",
+ MTK_EINT_FUNCTION(0, 70),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "CMFLASH0")
+ ),
+ MTK_PIN(
+ 71, "GPIO71",
+ MTK_EINT_FUNCTION(0, 71),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO71")
+ ),
+ MTK_PIN(
+ 72, "GPIO72",
+ MTK_EINT_FUNCTION(0, 72),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO72")
+ ),
+ MTK_PIN(
+ 73, "GPIO73",
+ MTK_EINT_FUNCTION(0, 73),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO73")
+ ),
+ MTK_PIN(
+ 74, "GPIO74",
+ MTK_EINT_FUNCTION(0, 74),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO74"),
+ MTK_FUNCTION(1, "DCXO_FPM_LPM")
+ ),
+ MTK_PIN(
+ 75, "GPIO75",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO75"),
+ MTK_FUNCTION(1, "SPMI_M_SCL")
+ ),
+ MTK_PIN(
+ 76, "GPIO76",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO76"),
+ MTK_FUNCTION(1, "SPMI_M_SDA")
+ ),
+ MTK_PIN(
+ 77, "GPIO77",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO77"),
+ MTK_FUNCTION(1, "SPMI_P_SCL")
+ ),
+ MTK_PIN(
+ 78, "GPIO78",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO78"),
+ MTK_FUNCTION(1, "SPMI_P_SDA")
+ ),
+ MTK_PIN(
+ 79, "GPIO79",
+ MTK_EINT_FUNCTION(0, 79),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO79"),
+ MTK_FUNCTION(1, "CMMCLK0"),
+ MTK_FUNCTION(2, "MD_INT4")
+ ),
+ MTK_PIN(
+ 80, "GPIO80",
+ MTK_EINT_FUNCTION(0, 80),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO80"),
+ MTK_FUNCTION(1, "CMMCLK1")
+ ),
+ MTK_PIN(
+ 81, "GPIO81",
+ MTK_EINT_FUNCTION(0, 81),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO81"),
+ MTK_FUNCTION(1, "SCP_SPI0_CK"),
+ MTK_FUNCTION(2, "SPI6_B_CLK"),
+ MTK_FUNCTION(3, "PWM_VLP"),
+ MTK_FUNCTION(4, "I2SOUT5_BCK"),
+ MTK_FUNCTION(6, "TP_GPIO0_AO")
+ ),
+ MTK_PIN(
+ 82, "GPIO82",
+ MTK_EINT_FUNCTION(0, 82),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO82"),
+ MTK_FUNCTION(1, "SCP_SPI0_CS"),
+ MTK_FUNCTION(2, "SPI6_B_CSB"),
+ MTK_FUNCTION(4, "I2SOUT5_LRCK"),
+ MTK_FUNCTION(6, "TP_GPIO1_AO")
+ ),
+ MTK_PIN(
+ 83, "GPIO83",
+ MTK_EINT_FUNCTION(0, 83),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO83"),
+ MTK_FUNCTION(1, "SCP_SPI0_MO"),
+ MTK_FUNCTION(2, "SPI6_B_MO"),
+ MTK_FUNCTION(4, "I2SOUT5_DATA0"),
+ MTK_FUNCTION(6, "TP_GPIO2_AO")
+ ),
+ MTK_PIN(
+ 84, "GPIO84",
+ MTK_EINT_FUNCTION(0, 84),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO84"),
+ MTK_FUNCTION(1, "SCP_SPI0_MI"),
+ MTK_FUNCTION(2, "SPI6_B_MI"),
+ MTK_FUNCTION(4, "I2SOUT5_DATA1"),
+ MTK_FUNCTION(6, "TP_GPIO3_AO")
+ ),
+ MTK_PIN(
+ 85, "GPIO85",
+ MTK_EINT_FUNCTION(0, 85),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO85"),
+ MTK_FUNCTION(1, "SCP_SPI1_CK"),
+ MTK_FUNCTION(2, "SPI7_B_CLK"),
+ MTK_FUNCTION(4, "I2SIN5_DATA0"),
+ MTK_FUNCTION(5, "PWM_VLP"),
+ MTK_FUNCTION(6, "TP_GPIO4_AO")
+ ),
+ MTK_PIN(
+ 86, "GPIO86",
+ MTK_EINT_FUNCTION(0, 86),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO86"),
+ MTK_FUNCTION(1, "SCP_SPI1_CS"),
+ MTK_FUNCTION(2, "SPI7_B_CSB"),
+ MTK_FUNCTION(4, "I2SIN5_DATA1"),
+ MTK_FUNCTION(6, "TP_GPIO5_AO")
+ ),
+ MTK_PIN(
+ 87, "GPIO87",
+ MTK_EINT_FUNCTION(0, 87),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO87"),
+ MTK_FUNCTION(1, "SCP_SPI1_MO"),
+ MTK_FUNCTION(2, "SPI7_B_MO"),
+ MTK_FUNCTION(4, "I2SIN5_BCK"),
+ MTK_FUNCTION(6, "TP_GPIO6_AO")
+ ),
+ MTK_PIN(
+ 88, "GPIO88",
+ MTK_EINT_FUNCTION(0, 88),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO88"),
+ MTK_FUNCTION(1, "SCP_SPI1_MI"),
+ MTK_FUNCTION(2, "SPI7_B_MI"),
+ MTK_FUNCTION(4, "I2SIN5_LRCK"),
+ MTK_FUNCTION(6, "TP_GPIO7_AO")
+ ),
+ MTK_PIN(
+ 89, "GPIO89",
+ MTK_EINT_FUNCTION(0, 89),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO89"),
+ MTK_FUNCTION(1, "DSI_TE"),
+ MTK_FUNCTION(2, "DSI1_TE"),
+ MTK_FUNCTION(7, "DBG_MON_B30")
+ ),
+ MTK_PIN(
+ 90, "GPIO90",
+ MTK_EINT_FUNCTION(0, 90),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO90"),
+ MTK_FUNCTION(1, "LCM_RST"),
+ MTK_FUNCTION(2, "LCM1_RST"),
+ MTK_FUNCTION(7, "DBG_MON_B31")
+ ),
+ MTK_PIN(
+ 91, "GPIO91",
+ MTK_EINT_FUNCTION(0, 91),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO91"),
+ MTK_FUNCTION(1, "CMFLASH2"),
+ MTK_FUNCTION(2, "SF_D0"),
+ MTK_FUNCTION(3, "SRCLKENAI1"),
+ MTK_FUNCTION(5, "KPCOL2"),
+ MTK_FUNCTION(6, "TP_GPIO11_AO")
+ ),
+ MTK_PIN(
+ 92, "GPIO92",
+ MTK_EINT_FUNCTION(0, 92),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO92"),
+ MTK_FUNCTION(1, "CMFLASH3"),
+ MTK_FUNCTION(2, "SF_D1"),
+ MTK_FUNCTION(4, "DISP_PWM1"),
+ MTK_FUNCTION(6, "TP_GPIO12_AO")
+ ),
+ MTK_PIN(
+ 93, "GPIO93",
+ MTK_EINT_FUNCTION(0, 93),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO93"),
+ MTK_FUNCTION(1, "CMFLASH1"),
+ MTK_FUNCTION(2, "SF_D2"),
+ MTK_FUNCTION(3, "SRCLKENAI0"),
+ MTK_FUNCTION(5, "KPROW2"),
+ MTK_FUNCTION(6, "TP_GPIO13_AO")
+ ),
+ MTK_PIN(
+ 94, "GPIO94",
+ MTK_EINT_FUNCTION(0, 94),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO94"),
+ MTK_FUNCTION(1, "I2S_MCK1"),
+ MTK_FUNCTION(2, "SF_D3"),
+ MTK_FUNCTION(4, "MD32_0_GPIO0"),
+ MTK_FUNCTION(5, "CLKM0_A"),
+ MTK_FUNCTION(6, "TP_GPIO14_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B18")
+ ),
+ MTK_PIN(
+ 95, "GPIO95",
+ MTK_EINT_FUNCTION(0, 95),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO95"),
+ MTK_FUNCTION(1, "I2SIN1_BCK"),
+ MTK_FUNCTION(2, "I2SIN4_BCK"),
+ MTK_FUNCTION(3, "SPI6_A_CLK"),
+ MTK_FUNCTION(4, "MD32_1_GPIO0"),
+ MTK_FUNCTION(5, "CLKM1_A"),
+ MTK_FUNCTION(6, "TP_GPIO15_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B19")
+ ),
+ MTK_PIN(
+ 96, "GPIO96",
+ MTK_EINT_FUNCTION(0, 96),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO96"),
+ MTK_FUNCTION(1, "I2SIN1_LRCK"),
+ MTK_FUNCTION(2, "I2SIN4_LRCK"),
+ MTK_FUNCTION(3, "SPI6_A_CSB"),
+ MTK_FUNCTION(4, "MD32_2_GPIO0"),
+ MTK_FUNCTION(5, "CLKM2_A"),
+ MTK_FUNCTION(7, "DBG_MON_B20")
+ ),
+ MTK_PIN(
+ 97, "GPIO97",
+ MTK_EINT_FUNCTION(0, 97),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO97"),
+ MTK_FUNCTION(1, "I2SIN1_DI_A"),
+ MTK_FUNCTION(2, "I2SIN4_DATA0"),
+ MTK_FUNCTION(3, "SPI6_A_MO"),
+ MTK_FUNCTION(4, "MD32_3_GPIO0"),
+ MTK_FUNCTION(5, "CLKM3_A"),
+ MTK_FUNCTION(7, "DBG_MON_B21")
+ ),
+ MTK_PIN(
+ 98, "GPIO98",
+ MTK_EINT_FUNCTION(0, 98),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO98"),
+ MTK_FUNCTION(1, "I2SOUT1_DO"),
+ MTK_FUNCTION(2, "I2SOUT4_DATA0"),
+ MTK_FUNCTION(3, "SPI6_A_MI"),
+ MTK_FUNCTION(7, "DBG_MON_B22")
+ ),
+ MTK_PIN(
+ 99, "GPIO99",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO99"),
+ MTK_FUNCTION(1, "SCL0"),
+ MTK_FUNCTION(2, "LCM2_RST"),
+ MTK_FUNCTION(3, "AUD_DAC_26M_CLK"),
+ MTK_FUNCTION(4, "SPU0_SCL"),
+ MTK_FUNCTION(7, "DBG_MON_B24")
+ ),
+ MTK_PIN(
+ 100, "GPIO100",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO100"),
+ MTK_FUNCTION(1, "SDA0"),
+ MTK_FUNCTION(2, "DSI2_TE"),
+ MTK_FUNCTION(4, "SPU0_SDA"),
+ MTK_FUNCTION(7, "DBG_MON_B25")
+ ),
+ MTK_PIN(
+ 101, "GPIO101",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO101"),
+ MTK_FUNCTION(1, "SCL10"),
+ MTK_FUNCTION(2, "SF_CS"),
+ MTK_FUNCTION(3, "SCP_DMIC1_CLK"),
+ MTK_FUNCTION(4, "I2SIN5_DATA2"),
+ MTK_FUNCTION(5, "SCP_SCL_OIS"),
+ MTK_FUNCTION(6, "TP_GPIO10_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B28")
+ ),
+ MTK_PIN(
+ 102, "GPIO102",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO102"),
+ MTK_FUNCTION(1, "SDA10"),
+ MTK_FUNCTION(2, "SF_CK"),
+ MTK_FUNCTION(3, "SCP_DMIC1_DAT"),
+ MTK_FUNCTION(4, "I2SIN5_DATA3"),
+ MTK_FUNCTION(5, "SCP_SDA_OIS"),
+ MTK_FUNCTION(6, "TP_GPIO11_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B29")
+ ),
+ MTK_PIN(
+ 103, "GPIO103",
+ MTK_EINT_FUNCTION(0, 103),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO103"),
+ MTK_FUNCTION(1, "DISP_PWM"),
+ MTK_FUNCTION(2, "DSI1_TE"),
+ MTK_FUNCTION(5, "I2S_MCK0"),
+ MTK_FUNCTION(7, "DBG_MON_B23")
+ ),
+ MTK_PIN(
+ 104, "GPIO104",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO104"),
+ MTK_FUNCTION(1, "SCL6"),
+ MTK_FUNCTION(2, "SPU1_SCL"),
+ MTK_FUNCTION(3, "AUD_DAC_26M_CLK"),
+ MTK_FUNCTION(4, "USB_DRVVBUS_2P"),
+ MTK_FUNCTION(5, "I2S_MCK1"),
+ MTK_FUNCTION(6, "IDDIG_2P"),
+ MTK_FUNCTION(7, "DBG_MON_B26")
+ ),
+ MTK_PIN(
+ 105, "GPIO105",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "SDA6"),
+ MTK_FUNCTION(2, "SPU1_SDA"),
+ MTK_FUNCTION(3, "DISP_PWM2"),
+ MTK_FUNCTION(4, "VBUSVALID_2P"),
+ MTK_FUNCTION(5, "I2S_MCK2"),
+ MTK_FUNCTION(6, "VBUSVALID_3P"),
+ MTK_FUNCTION(7, "DBG_MON_B27")
+ ),
+ MTK_PIN(
+ 106, "GPIO106",
+ MTK_EINT_FUNCTION(0, 106),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "SCP_SPI3_CK"),
+ MTK_FUNCTION(2, "SPI3_B_CLK"),
+ MTK_FUNCTION(3, "MD_UTXD0"),
+ MTK_FUNCTION(4, "TP_UTXD1_VLP"),
+ MTK_FUNCTION(5, "CONN_BG_GPS_MCU_UART0_TXD"),
+ MTK_FUNCTION(6, "TP_GPIO6_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B0")
+ ),
+ MTK_PIN(
+ 107, "GPIO107",
+ MTK_EINT_FUNCTION(0, 107),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "SCP_SPI3_CS"),
+ MTK_FUNCTION(2, "SPI3_B_CSB"),
+ MTK_FUNCTION(3, "MD_URXD0"),
+ MTK_FUNCTION(4, "TP_URXD1_VLP"),
+ MTK_FUNCTION(5, "CONN_BG_GPS_MCU_UART0_RXD"),
+ MTK_FUNCTION(6, "TP_GPIO7_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B1")
+ ),
+ MTK_PIN(
+ 108, "GPIO108",
+ MTK_EINT_FUNCTION(0, 108),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "SCP_SPI3_MO"),
+ MTK_FUNCTION(2, "SPI3_B_MO"),
+ MTK_FUNCTION(3, "MD_UTXD1"),
+ MTK_FUNCTION(4, "MD32PCM_UTXD_AO_VLP"),
+ MTK_FUNCTION(5, "CONN_BG_GPS_MCU_UART1_TXD"),
+ MTK_FUNCTION(6, "TP_GPIO8_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B2")
+ ),
+ MTK_PIN(
+ 109, "GPIO109",
+ MTK_EINT_FUNCTION(0, 109),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "SCP_SPI3_MI"),
+ MTK_FUNCTION(2, "SPI3_B_MI"),
+ MTK_FUNCTION(3, "MD_URXD1"),
+ MTK_FUNCTION(4, "MD32PCM_URXD_AO_VLP"),
+ MTK_FUNCTION(5, "CONN_BG_GPS_MCU_UART1_RXD"),
+ MTK_FUNCTION(6, "TP_GPIO9_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B3")
+ ),
+ MTK_PIN(
+ 110, "GPIO110",
+ MTK_EINT_FUNCTION(0, 110),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "SPI1_CLK"),
+ MTK_FUNCTION(2, "PWM_0"),
+ MTK_FUNCTION(3, "MD_UCTS0"),
+ MTK_FUNCTION(4, "TP_UCTS1_VLP"),
+ MTK_FUNCTION(6, "SPU0_GPIO_O"),
+ MTK_FUNCTION(7, "DBG_MON_B4")
+ ),
+ MTK_PIN(
+ 111, "GPIO111",
+ MTK_EINT_FUNCTION(0, 111),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "SPI1_CSB"),
+ MTK_FUNCTION(2, "PWM_1"),
+ MTK_FUNCTION(3, "MD_URTS0"),
+ MTK_FUNCTION(4, "TP_URTS1_VLP"),
+ MTK_FUNCTION(6, "SPU0_GPIO_I"),
+ MTK_FUNCTION(7, "DBG_MON_B5")
+ ),
+ MTK_PIN(
+ 112, "GPIO112",
+ MTK_EINT_FUNCTION(0, 112),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "SPI1_MO"),
+ MTK_FUNCTION(2, "PWM_2"),
+ MTK_FUNCTION(3, "MD_UCTS1"),
+ MTK_FUNCTION(6, "SPU1_GPIO_O"),
+ MTK_FUNCTION(7, "DBG_MON_B6")
+ ),
+ MTK_PIN(
+ 113, "GPIO113",
+ MTK_EINT_FUNCTION(0, 113),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "SPI1_MI"),
+ MTK_FUNCTION(2, "PWM_3"),
+ MTK_FUNCTION(3, "MD_URTS1"),
+ MTK_FUNCTION(6, "SPU1_GPIO_I"),
+ MTK_FUNCTION(7, "DBG_MON_B7")
+ ),
+ MTK_PIN(
+ 114, "GPIO114",
+ MTK_EINT_FUNCTION(0, 114),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "SPI0_SPU_CLK"),
+ MTK_FUNCTION(2, "SPI4_A_CLK"),
+ MTK_FUNCTION(5, "CONN_BG_GPS_MCU_DBG_UART_TX"),
+ MTK_FUNCTION(7, "DBG_MON_B8")
+ ),
+ MTK_PIN(
+ 115, "GPIO115",
+ MTK_EINT_FUNCTION(0, 115),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "SPI0_SPU_CSB"),
+ MTK_FUNCTION(2, "SPI4_A_CSB"),
+ MTK_FUNCTION(7, "DBG_MON_B9")
+ ),
+ MTK_PIN(
+ 116, "GPIO116",
+ MTK_EINT_FUNCTION(0, 116),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "SPI0_SPU_MO"),
+ MTK_FUNCTION(2, "SPI4_A_MO"),
+ MTK_FUNCTION(3, "LCM1_RST"),
+ MTK_FUNCTION(7, "DBG_MON_B10")
+ ),
+ MTK_PIN(
+ 117, "GPIO117",
+ MTK_EINT_FUNCTION(0, 117),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "SPI0_SPU_MI"),
+ MTK_FUNCTION(2, "SPI4_A_MI"),
+ MTK_FUNCTION(3, "DSI1_TE"),
+ MTK_FUNCTION(7, "DBG_MON_B11")
+ ),
+ MTK_PIN(
+ 118, "GPIO118",
+ MTK_EINT_FUNCTION(0, 118),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "SPI5_CLK"),
+ MTK_FUNCTION(2, "USB_DRVVBUS"),
+ MTK_FUNCTION(3, "DP_TX_HPD"),
+ MTK_FUNCTION(4, "AD_ILDO_DTEST0")
+ ),
+ MTK_PIN(
+ 119, "GPIO119",
+ MTK_EINT_FUNCTION(0, 119),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "SPI5_CSB"),
+ MTK_FUNCTION(2, "VBUSVALID"),
+ MTK_FUNCTION(3, "DP_OC_EN"),
+ MTK_FUNCTION(4, "AD_ILDO_DTEST1")
+ ),
+ MTK_PIN(
+ 120, "GPIO120",
+ MTK_EINT_FUNCTION(0, 120),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "SPI5_MO"),
+ MTK_FUNCTION(2, "LCM2_RST"),
+ MTK_FUNCTION(3, "DP_RAUX_SBU1"),
+ MTK_FUNCTION(4, "AD_ILDO_DTEST2"),
+ MTK_FUNCTION(6, "IDDIG_3P")
+ ),
+ MTK_PIN(
+ 121, "GPIO121",
+ MTK_EINT_FUNCTION(0, 121),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO121"),
+ MTK_FUNCTION(1, "SPI5_MI"),
+ MTK_FUNCTION(2, "DSI2_TE"),
+ MTK_FUNCTION(3, "DP_RAUX_SBU2"),
+ MTK_FUNCTION(4, "AD_ILDO_DTEST3"),
+ MTK_FUNCTION(6, "USB_DRVVBUS_3P"),
+ MTK_FUNCTION(7, "DBG_MON_B17")
+ ),
+ MTK_PIN(
+ 122, "GPIO122",
+ MTK_EINT_FUNCTION(0, 122),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO122"),
+ MTK_FUNCTION(1, "AP_GOOD"),
+ MTK_FUNCTION(2, "CONN_TCXOENA_REQ")
+ ),
+ MTK_PIN(
+ 123, "GPIO123",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO123"),
+ MTK_FUNCTION(1, "SCL3"),
+ MTK_FUNCTION(5, "I2SIN2_LRCK"),
+ MTK_FUNCTION(6, "TP_UTXD_MD_VCORE")
+ ),
+ MTK_PIN(
+ 124, "GPIO124",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO124"),
+ MTK_FUNCTION(1, "SDA3"),
+ MTK_FUNCTION(6, "TP_URXD_MD_VCORE")
+ ),
+ MTK_PIN(
+ 125, "GPIO125",
+ MTK_EINT_FUNCTION(0, 125),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO125"),
+ MTK_FUNCTION(1, "MSDC1_CLK"),
+ MTK_FUNCTION(2, "MD1_SIM2_SCLK"),
+ MTK_FUNCTION(3, "HFRP_JTAG0_TCK"),
+ MTK_FUNCTION(4, "UDI_TCK"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L1_JCK"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TCK_VLP"),
+ MTK_FUNCTION(7, "JTCK2_SEL1")
+ ),
+ MTK_PIN(
+ 126, "GPIO126",
+ MTK_EINT_FUNCTION(0, 126),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO126"),
+ MTK_FUNCTION(1, "MSDC1_CMD"),
+ MTK_FUNCTION(3, "HFRP_JTAG0_TMS"),
+ MTK_FUNCTION(4, "UDI_TMS"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L1_JMS"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TMS_VLP"),
+ MTK_FUNCTION(7, "JTMS2_SEL1")
+ ),
+ MTK_PIN(
+ 127, "GPIO127",
+ MTK_EINT_FUNCTION(0, 127),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO127"),
+ MTK_FUNCTION(1, "MSDC1_DAT0"),
+ MTK_FUNCTION(2, "MD1_SIM2_SRST"),
+ MTK_FUNCTION(3, "HFRP_JTAG0_TDI"),
+ MTK_FUNCTION(4, "UDI_TDI_0"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L1_JDI"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TDI_VLP"),
+ MTK_FUNCTION(7, "JTDI2_SEL1")
+ ),
+ MTK_PIN(
+ 128, "GPIO128",
+ MTK_EINT_FUNCTION(0, 128),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO128"),
+ MTK_FUNCTION(1, "MSDC1_DAT1"),
+ MTK_FUNCTION(2, "MD1_SIM2_SIO"),
+ MTK_FUNCTION(3, "HFRP_JTAG0_TDO"),
+ MTK_FUNCTION(4, "UDI_TDO_0"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L1_JDO"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TDO_VLP"),
+ MTK_FUNCTION(7, "JTDO2_SEL1")
+ ),
+ MTK_PIN(
+ 129, "GPIO129",
+ MTK_EINT_FUNCTION(0, 129),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO129"),
+ MTK_FUNCTION(1, "MSDC1_DAT2"),
+ MTK_FUNCTION(2, "DSI2_HSYNC"),
+ MTK_FUNCTION(3, "HFRP_JTAG0_TRSTN"),
+ MTK_FUNCTION(4, "UDI_NTRST"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TRSTN_VLP"),
+ MTK_FUNCTION(7, "JTRSTN2_SEL1")
+ ),
+ MTK_PIN(
+ 130, "GPIO130",
+ MTK_EINT_FUNCTION(0, 130),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO130"),
+ MTK_FUNCTION(1, "MSDC1_DAT3"),
+ MTK_FUNCTION(2, "DSI3_HSYNC"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L1_JINTP")
+ ),
+ MTK_PIN(
+ 131, "GPIO131",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO131"),
+ MTK_FUNCTION(1, "MD1_SIM2_SCLK"),
+ MTK_FUNCTION(2, "MD1_SIM1_SCLK"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TDI"),
+ MTK_FUNCTION(4, "CLKM0_A"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L5_JDI"),
+ MTK_FUNCTION(6, "TSFDC_SCK"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TDI_VCORE")
+ ),
+ MTK_PIN(
+ 132, "GPIO132",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO132"),
+ MTK_FUNCTION(1, "MD1_SIM2_SRST"),
+ MTK_FUNCTION(2, "MD1_SIM1_SRST"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TMS"),
+ MTK_FUNCTION(4, "CLKM1_B"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L5_JMS"),
+ MTK_FUNCTION(6, "TSFDC_SDI"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TMS_VCORE")
+ ),
+ MTK_PIN(
+ 133, "GPIO133",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO133"),
+ MTK_FUNCTION(1, "MD1_SIM2_SIO"),
+ MTK_FUNCTION(2, "MD1_SIM1_SIO"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TDO"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L5_JDO"),
+ MTK_FUNCTION(6, "TSFDC_SCF"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TDO_VCORE")
+ ),
+ MTK_PIN(
+ 134, "GPIO134",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO134"),
+ MTK_FUNCTION(1, "MD1_SIM1_SCLK"),
+ MTK_FUNCTION(2, "MD1_SIM2_SCLK"),
+ MTK_FUNCTION(6, "TSFDC_26M")
+ ),
+ MTK_PIN(
+ 135, "GPIO135",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO135"),
+ MTK_FUNCTION(1, "MD1_SIM1_SRST"),
+ MTK_FUNCTION(2, "MD1_SIM2_SRST"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TCK"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L5_JCK"),
+ MTK_FUNCTION(6, "TSFDC_SDO"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TCK_VCORE")
+ ),
+ MTK_PIN(
+ 136, "GPIO136",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO136"),
+ MTK_FUNCTION(1, "MD1_SIM1_SIO"),
+ MTK_FUNCTION(2, "MD1_SIM2_SIO"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TRSTN"),
+ MTK_FUNCTION(5, "CONN_BGF_DSP_L5_JINTP"),
+ MTK_FUNCTION(6, "TSFDC_FOUT"),
+ MTK_FUNCTION(7, "SCP_JTAG0_TRSTN_VCORE")
+ ),
+ MTK_PIN(
+ 137, "GPIO137",
+ MTK_EINT_FUNCTION(0, 137),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO137"),
+ MTK_FUNCTION(1, "MIPI0_D_SCLK"),
+ MTK_FUNCTION(2, "BPI_BUS16"),
+ MTK_FUNCTION(4, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(6, "SPM_JTAG_TRSTN_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A0")
+ ),
+ MTK_PIN(
+ 138, "GPIO138",
+ MTK_EINT_FUNCTION(0, 138),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO138"),
+ MTK_FUNCTION(1, "MIPI0_D_SDATA"),
+ MTK_FUNCTION(2, "BPI_BUS17"),
+ MTK_FUNCTION(4, "PCM0_LRCK"),
+ MTK_FUNCTION(6, "SPM_JTAG_TCK_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A1")
+ ),
+ MTK_PIN(
+ 139, "GPIO139",
+ MTK_EINT_FUNCTION(0, 139),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO139"),
+ MTK_FUNCTION(1, "MIPI1_D_SCLK"),
+ MTK_FUNCTION(2, "BPI_BUS18"),
+ MTK_FUNCTION(4, "MD_GPS_BLANK"),
+ MTK_FUNCTION(6, "SPM_JTAG_TMS_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A2")
+ ),
+ MTK_PIN(
+ 140, "GPIO140",
+ MTK_EINT_FUNCTION(0, 140),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO140"),
+ MTK_FUNCTION(1, "MIPI1_D_SDATA"),
+ MTK_FUNCTION(2, "BPI_BUS19"),
+ MTK_FUNCTION(4, "MD_URXD1_CONN"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDO_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A3")
+ ),
+ MTK_PIN(
+ 141, "GPIO141",
+ MTK_EINT_FUNCTION(0, 141),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO141"),
+ MTK_FUNCTION(1, "MIPI2_D_SCLK"),
+ MTK_FUNCTION(2, "BPI_BUS20"),
+ MTK_FUNCTION(4, "MD_UTXD1_CONN"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDI_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A4")
+ ),
+ MTK_PIN(
+ 142, "GPIO142",
+ MTK_EINT_FUNCTION(0, 142),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO142"),
+ MTK_FUNCTION(1, "MIPI2_D_SDATA"),
+ MTK_FUNCTION(2, "BPI_BUS21"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TRSTN_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A5")
+ ),
+ MTK_PIN(
+ 143, "GPIO143",
+ MTK_EINT_FUNCTION(0, 143),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO143"),
+ MTK_FUNCTION(1, "MIPI3_D_SCLK"),
+ MTK_FUNCTION(2, "BPI_BUS22"),
+ MTK_FUNCTION(4, "TP_UTXD_GNSS_VLP"),
+ MTK_FUNCTION(5, "MD_UTXD1_CONN"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TCK_VCORE")
+ ),
+ MTK_PIN(
+ 144, "GPIO144",
+ MTK_EINT_FUNCTION(0, 144),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO144"),
+ MTK_FUNCTION(1, "MIPI3_D_SDATA"),
+ MTK_FUNCTION(2, "BPI_BUS23"),
+ MTK_FUNCTION(4, "TP_URXD_GNSS_VLP"),
+ MTK_FUNCTION(5, "MD_URXD1_CONN"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TMS_VCORE")
+ ),
+ MTK_PIN(
+ 145, "GPIO145",
+ MTK_EINT_FUNCTION(0, 145),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO145"),
+ MTK_FUNCTION(1, "BPI_BUS0"),
+ MTK_FUNCTION(4, "PCIE_WAKEN_1P"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDO_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A10")
+ ),
+ MTK_PIN(
+ 146, "GPIO146",
+ MTK_EINT_FUNCTION(0, 146),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO146"),
+ MTK_FUNCTION(1, "BPI_BUS1"),
+ MTK_FUNCTION(4, "PCIE_PERSTN_1P"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDI_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A11")
+ ),
+ MTK_PIN(
+ 147, "GPIO147",
+ MTK_EINT_FUNCTION(0, 147),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO147"),
+ MTK_FUNCTION(1, "BPI_BUS2"),
+ MTK_FUNCTION(2, "AUD_DAC_26M_CLK"),
+ MTK_FUNCTION(4, "PCIE_CLKREQN_1P"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TRSTN_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A12")
+ ),
+ MTK_PIN(
+ 148, "GPIO148",
+ MTK_EINT_FUNCTION(0, 148),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO148"),
+ MTK_FUNCTION(1, "BPI_BUS3"),
+ MTK_FUNCTION(2, "AUD_DAC_26M_CLK"),
+ MTK_FUNCTION(4, "TP_UTXD_MD_VLP"),
+ MTK_FUNCTION(5, "TP_GPIO0_AO"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TCK_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A13")
+ ),
+ MTK_PIN(
+ 149, "GPIO149",
+ MTK_EINT_FUNCTION(0, 149),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO149"),
+ MTK_FUNCTION(1, "BPI_BUS4"),
+ MTK_FUNCTION(2, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(4, "TP_URXD_MD_VLP"),
+ MTK_FUNCTION(5, "TP_GPIO1_AO"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TMS_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A14")
+ ),
+ MTK_PIN(
+ 150, "GPIO150",
+ MTK_EINT_FUNCTION(0, 150),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO150"),
+ MTK_FUNCTION(1, "BPI_BUS5"),
+ MTK_FUNCTION(2, "GPS_PPS0"),
+ MTK_FUNCTION(5, "TP_GPIO2_AO"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TDO_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A15")
+ ),
+ MTK_PIN(
+ 151, "GPIO151",
+ MTK_EINT_FUNCTION(0, 151),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO151"),
+ MTK_FUNCTION(1, "BPI_BUS6"),
+ MTK_FUNCTION(2, "GPS_PPS1"),
+ MTK_FUNCTION(5, "TP_GPIO3_AO"),
+ MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TDI_VCORE")
+ ),
+ MTK_PIN(
+ 152, "GPIO152",
+ MTK_EINT_FUNCTION(0, 152),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO152"),
+ MTK_FUNCTION(1, "BPI_BUS7"),
+ MTK_FUNCTION(2, "EDP_TX_HPD"),
+ MTK_FUNCTION(5, "AGPS_SYNC"),
+ MTK_FUNCTION(6, "SSPM_UTXD_AO_VCORE")
+ ),
+ MTK_PIN(
+ 153, "GPIO153",
+ MTK_EINT_FUNCTION(0, 153),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO153"),
+ MTK_FUNCTION(1, "MD_UCNT_A_TGL"),
+ MTK_FUNCTION(6, "TP_URTS1_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A8")
+ ),
+ MTK_PIN(
+ 154, "GPIO154",
+ MTK_EINT_FUNCTION(0, 154),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO154"),
+ MTK_FUNCTION(1, "DIGRF_IRQ"),
+ MTK_FUNCTION(6, "TP_UCTS1_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A9")
+ ),
+ MTK_PIN(
+ 155, "GPIO155",
+ MTK_EINT_FUNCTION(0, 155),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO155"),
+ MTK_FUNCTION(1, "MIPI_M_SCLK"),
+ MTK_FUNCTION(4, "UCTS2"),
+ MTK_FUNCTION(6, "TP_UTXD_CONSYS_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A6")
+ ),
+ MTK_PIN(
+ 156, "GPIO156",
+ MTK_EINT_FUNCTION(0, 156),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO156"),
+ MTK_FUNCTION(1, "MIPI_M_SDATA"),
+ MTK_FUNCTION(4, "URTS2"),
+ MTK_FUNCTION(6, "TP_URXD_CONSYS_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A7")
+ ),
+ MTK_PIN(
+ 157, "GPIO157",
+ MTK_EINT_FUNCTION(0, 157),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO157"),
+ MTK_FUNCTION(1, "BPI_BUS8"),
+ MTK_FUNCTION(4, "UTXD2"),
+ MTK_FUNCTION(5, "CLKM0_A"),
+ MTK_FUNCTION(6, "SSPM_URXD_AO_VCORE"),
+ MTK_FUNCTION(7, "DBG_MON_A16")
+ ),
+ MTK_PIN(
+ 158, "GPIO158",
+ MTK_EINT_FUNCTION(0, 158),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO158"),
+ MTK_FUNCTION(1, "BPI_BUS9"),
+ MTK_FUNCTION(4, "URXD2"),
+ MTK_FUNCTION(5, "CLKM1_A"),
+ MTK_FUNCTION(6, "TP_UTXD1_VCORE")
+ ),
+ MTK_PIN(
+ 159, "GPIO159",
+ MTK_EINT_FUNCTION(0, 159),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO159"),
+ MTK_FUNCTION(1, "BPI_BUS10"),
+ MTK_FUNCTION(2, "MD_INT0"),
+ MTK_FUNCTION(3, "SRCLKENAI1"),
+ MTK_FUNCTION(5, "CLKM2_A"),
+ MTK_FUNCTION(6, "TP_URXD1_VCORE")
+ ),
+ MTK_PIN(
+ 160, "GPIO160",
+ MTK_EINT_FUNCTION(0, 160),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO160"),
+ MTK_FUNCTION(1, "UTXD0"),
+ MTK_FUNCTION(2, "MD_UTXD1"),
+ MTK_FUNCTION(5, "MBISTREADEN_TRIGGER"),
+ MTK_FUNCTION(6, "CONN_BG_GPS_MCU_DBG_UART_TX")
+ ),
+ MTK_PIN(
+ 161, "GPIO161",
+ MTK_EINT_FUNCTION(0, 161),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO161"),
+ MTK_FUNCTION(1, "URXD0"),
+ MTK_FUNCTION(2, "MD_URXD1"),
+ MTK_FUNCTION(5, "MBISTWRITEEN_TRIGGER")
+ ),
+ MTK_PIN(
+ 162, "GPIO162",
+ MTK_EINT_FUNCTION(0, 162),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO162"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "MD_UTXD0"),
+ MTK_FUNCTION(3, "TP_UTXD1_VLP"),
+ MTK_FUNCTION(4, "ADSP_UTXD0"),
+ MTK_FUNCTION(5, "SSPM_UTXD_AO_VLP"),
+ MTK_FUNCTION(6, "HFRP_UTXD1")
+ ),
+ MTK_PIN(
+ 163, "GPIO163",
+ MTK_EINT_FUNCTION(0, 163),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO163"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "MD_URXD0"),
+ MTK_FUNCTION(3, "TP_URXD1_VLP"),
+ MTK_FUNCTION(4, "ADSP_URXD0"),
+ MTK_FUNCTION(5, "SSPM_URXD_AO_VLP"),
+ MTK_FUNCTION(6, "HFRP_URXD1")
+ ),
+ MTK_PIN(
+ 164, "GPIO164",
+ MTK_EINT_FUNCTION(0, 164),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO164"),
+ MTK_FUNCTION(1, "SCP_SCL0"),
+ MTK_FUNCTION(6, "TP_GPIO0_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A22")
+ ),
+ MTK_PIN(
+ 165, "GPIO165",
+ MTK_EINT_FUNCTION(0, 165),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO165"),
+ MTK_FUNCTION(1, "SCP_SDA0"),
+ MTK_FUNCTION(6, "TP_GPIO1_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A23")
+ ),
+ MTK_PIN(
+ 166, "GPIO166",
+ MTK_EINT_FUNCTION(0, 166),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO166"),
+ MTK_FUNCTION(1, "SCP_SCL2"),
+ MTK_FUNCTION(6, "TP_GPIO2_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A24")
+ ),
+ MTK_PIN(
+ 167, "GPIO167",
+ MTK_EINT_FUNCTION(0, 167),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO167"),
+ MTK_FUNCTION(1, "SCP_SDA2"),
+ MTK_FUNCTION(6, "TP_GPIO3_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A25")
+ ),
+ MTK_PIN(
+ 168, "GPIO168",
+ MTK_EINT_FUNCTION(0, 168),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO168"),
+ MTK_FUNCTION(1, "SCP_SPI2_CK"),
+ MTK_FUNCTION(2, "SPI2_B_CLK"),
+ MTK_FUNCTION(3, "PWM_VLP"),
+ MTK_FUNCTION(4, "SCP_SCL2"),
+ MTK_FUNCTION(7, "DBG_MON_A26")
+ ),
+ MTK_PIN(
+ 169, "GPIO169",
+ MTK_EINT_FUNCTION(0, 169),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO169"),
+ MTK_FUNCTION(1, "SCP_SPI2_CS"),
+ MTK_FUNCTION(2, "SPI2_B_CSB"),
+ MTK_FUNCTION(7, "DBG_MON_A27")
+ ),
+ MTK_PIN(
+ 170, "GPIO170",
+ MTK_EINT_FUNCTION(0, 170),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO170"),
+ MTK_FUNCTION(1, "SCP_SPI2_MO"),
+ MTK_FUNCTION(2, "SPI2_B_MO"),
+ MTK_FUNCTION(4, "SCP_SDA2"),
+ MTK_FUNCTION(7, "DBG_MON_A28")
+ ),
+ MTK_PIN(
+ 171, "GPIO171",
+ MTK_EINT_FUNCTION(0, 171),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO171"),
+ MTK_FUNCTION(1, "SCP_SPI2_MI"),
+ MTK_FUNCTION(2, "SPI2_B_MI"),
+ MTK_FUNCTION(7, "DBG_MON_A29")
+ ),
+ MTK_PIN(
+ 172, "GPIO172",
+ MTK_EINT_FUNCTION(0, 172),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO172"),
+ MTK_FUNCTION(1, "CONN_TCXOENA_REQ")
+ ),
+ MTK_PIN(
+ 173, "GPIO173",
+ MTK_EINT_FUNCTION(0, 173),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO173"),
+ MTK_FUNCTION(1, "CMFLASH3"),
+ MTK_FUNCTION(2, "PWM_3"),
+ MTK_FUNCTION(3, "MD_GPS_L5_BLANK"),
+ MTK_FUNCTION(4, "CLKM1_A"),
+ MTK_FUNCTION(7, "DBG_MON_A31")
+ ),
+ MTK_PIN(
+ 174, "GPIO174",
+ MTK_EINT_FUNCTION(0, 174),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO174"),
+ MTK_FUNCTION(1, "CMFLASH0"),
+ MTK_FUNCTION(2, "PWM_0"),
+ MTK_FUNCTION(3, "VBUSVALID_1P"),
+ MTK_FUNCTION(4, "MD32_2_RXD"),
+ MTK_FUNCTION(5, "DISP_PWM3")
+ ),
+ MTK_PIN(
+ 175, "GPIO175",
+ MTK_EINT_FUNCTION(0, 175),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO175"),
+ MTK_FUNCTION(1, "CMFLASH1"),
+ MTK_FUNCTION(2, "PWM_1"),
+ MTK_FUNCTION(3, "EDP_TX_HPD"),
+ MTK_FUNCTION(4, "MD32_2_TXD"),
+ MTK_FUNCTION(5, "DISP_PWM4")
+ ),
+ MTK_PIN(
+ 176, "GPIO176",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO176"),
+ MTK_FUNCTION(1, "SCL5"),
+ MTK_FUNCTION(2, "LCM3_RST"),
+ MTK_FUNCTION(4, "MD_URXD1_CONN"),
+ MTK_FUNCTION(6, "TP_UTXD_GNSS_VCORE")
+ ),
+ MTK_PIN(
+ 177, "GPIO177",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO177"),
+ MTK_FUNCTION(1, "SDA5"),
+ MTK_FUNCTION(2, "DSI3_TE"),
+ MTK_FUNCTION(4, "MD_UTXD1_CONN"),
+ MTK_FUNCTION(6, "TP_URXD_GNSS_VCORE")
+ ),
+ MTK_PIN(
+ 178, "GPIO178",
+ MTK_EINT_FUNCTION(0, 178),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO178"),
+ MTK_FUNCTION(1, "DMIC_CLK"),
+ MTK_FUNCTION(2, "SCP_DMIC_CLK"),
+ MTK_FUNCTION(3, "SRCLKENAI0"),
+ MTK_FUNCTION(4, "CLKM2_B"),
+ MTK_FUNCTION(5, "TP_GPIO7_AO"),
+ MTK_FUNCTION(6, "SPU1_UTX"),
+ MTK_FUNCTION(7, "DAP_SONIC_SWCK")
+ ),
+ MTK_PIN(
+ 179, "GPIO179",
+ MTK_EINT_FUNCTION(0, 179),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO179"),
+ MTK_FUNCTION(1, "DMIC_DAT"),
+ MTK_FUNCTION(2, "SCP_DMIC_DAT"),
+ MTK_FUNCTION(3, "SRCLKENAI1"),
+ MTK_FUNCTION(4, "CLKM3_B"),
+ MTK_FUNCTION(5, "TP_GPIO8_AO"),
+ MTK_FUNCTION(6, "SPU1_URX"),
+ MTK_FUNCTION(7, "DAP_SONIC_SWD")
+ ),
+ MTK_PIN(
+ 180, "GPIO180",
+ MTK_EINT_FUNCTION(0, 180),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO180"),
+ MTK_FUNCTION(1, "IDDIG_1P"),
+ MTK_FUNCTION(2, "CMVREF0"),
+ MTK_FUNCTION(3, "GPS_PPS1"),
+ MTK_FUNCTION(4, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(5, "DISP_PWM1")
+ ),
+ MTK_PIN(
+ 181, "GPIO181",
+ MTK_EINT_FUNCTION(0, 181),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO181"),
+ MTK_FUNCTION(1, "USB_DRVVBUS_1P"),
+ MTK_FUNCTION(2, "CMVREF1"),
+ MTK_FUNCTION(3, "MFG_EB_JTAG_TRSTN"),
+ MTK_FUNCTION(4, "ADSP_JTAG1_TRSTN"),
+ MTK_FUNCTION(5, "HFRP_JTAG1_TRSTN"),
+ MTK_FUNCTION(6, "SPU1_NTRST"),
+ MTK_FUNCTION(7, "CONN_BG_GPS_MCU_TRST_B")
+ ),
+ MTK_PIN(
+ 182, "GPIO182",
+ MTK_EINT_FUNCTION(0, 182),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO182"),
+ MTK_FUNCTION(1, "SCL11"),
+ MTK_FUNCTION(2, "CMVREF2"),
+ MTK_FUNCTION(3, "MFG_EB_JTAG_TCK"),
+ MTK_FUNCTION(4, "ADSP_JTAG1_TCK"),
+ MTK_FUNCTION(5, "HFRP_JTAG1_TCK"),
+ MTK_FUNCTION(6, "SPU1_TCK"),
+ MTK_FUNCTION(7, "CONN_BG_GPS_MCU_TCK")
+ ),
+ MTK_PIN(
+ 183, "GPIO183",
+ MTK_EINT_FUNCTION(0, 183),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO183"),
+ MTK_FUNCTION(1, "SDA11"),
+ MTK_FUNCTION(2, "CMVREF3"),
+ MTK_FUNCTION(3, "MFG_EB_JTAG_TMS"),
+ MTK_FUNCTION(4, "ADSP_JTAG1_TMS"),
+ MTK_FUNCTION(5, "HFRP_JTAG1_TMS"),
+ MTK_FUNCTION(6, "SPU1_TMS"),
+ MTK_FUNCTION(7, "CONN_BG_GPS_MCU_TMS")
+ ),
+ MTK_PIN(
+ 184, "GPIO184",
+ MTK_EINT_FUNCTION(0, 184),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO184"),
+ MTK_FUNCTION(1, "SCL12"),
+ MTK_FUNCTION(2, "CMVREF4"),
+ MTK_FUNCTION(3, "MFG_EB_JTAG_TDO"),
+ MTK_FUNCTION(4, "ADSP_JTAG1_TDO"),
+ MTK_FUNCTION(5, "HFRP_JTAG1_TDO"),
+ MTK_FUNCTION(6, "SPU1_TDO"),
+ MTK_FUNCTION(7, "CONN_BG_GPS_MCU_TDO")
+ ),
+ MTK_PIN(
+ 185, "GPIO185",
+ MTK_EINT_FUNCTION(0, 185),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO185"),
+ MTK_FUNCTION(1, "SDA12"),
+ MTK_FUNCTION(2, "CMVREF5"),
+ MTK_FUNCTION(3, "MFG_EB_JTAG_TDI"),
+ MTK_FUNCTION(4, "ADSP_JTAG1_TDI"),
+ MTK_FUNCTION(5, "HFRP_JTAG1_TDI"),
+ MTK_FUNCTION(6, "SPU1_TDI"),
+ MTK_FUNCTION(7, "CONN_BG_GPS_MCU_TDI")
+ ),
+ MTK_PIN(
+ 186, "GPIO186",
+ MTK_EINT_FUNCTION(0, 186),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO186"),
+ MTK_FUNCTION(1, "MD_GPS_L1_BLANK"),
+ MTK_FUNCTION(2, "PMSR_SMAP"),
+ MTK_FUNCTION(3, "TP_GPIO2_AO")
+ ),
+ MTK_PIN(
+ 187, "GPIO187",
+ MTK_EINT_FUNCTION(0, 187),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO187"),
+ MTK_FUNCTION(1, "MD_GPS_L5_BLANK"),
+ MTK_FUNCTION(3, "TP_GPIO4_AO")
+ ),
+ MTK_PIN(
+ 188, "GPIO188",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO188"),
+ MTK_FUNCTION(1, "SCL2"),
+ MTK_FUNCTION(2, "SCP_SCL8")
+ ),
+ MTK_PIN(
+ 189, "GPIO189",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO189"),
+ MTK_FUNCTION(1, "SDA2"),
+ MTK_FUNCTION(2, "SCP_SDA8")
+ ),
+ MTK_PIN(
+ 190, "GPIO190",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO190"),
+ MTK_FUNCTION(1, "SCL4"),
+ MTK_FUNCTION(2, "SCP_SCL9"),
+ MTK_FUNCTION(6, "UDI_TDI_6")
+ ),
+ MTK_PIN(
+ 191, "GPIO191",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO191"),
+ MTK_FUNCTION(1, "SDA4"),
+ MTK_FUNCTION(2, "SCP_SDA9"),
+ MTK_FUNCTION(6, "UDI_TDI_7")
+ ),
+ MTK_PIN(
+ 192, "GPIO192",
+ MTK_EINT_FUNCTION(0, 192),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO192"),
+ MTK_FUNCTION(1, "CMMCLK2"),
+ MTK_FUNCTION(4, "MD32_3_RXD")
+ ),
+ MTK_PIN(
+ 193, "GPIO193",
+ MTK_EINT_FUNCTION(0, 193),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO193"),
+ MTK_FUNCTION(3, "CLKM0_B"),
+ MTK_FUNCTION(4, "MD32_3_TXD"),
+ MTK_FUNCTION(6, "UDI_TDO_7")
+ ),
+ MTK_PIN(
+ 194, "GPIO194",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO194"),
+ MTK_FUNCTION(1, "SCL7"),
+ MTK_FUNCTION(2, "MD32_3_GPIO0"),
+ MTK_FUNCTION(3, "CLKM2_B"),
+ MTK_FUNCTION(6, "UDI_TDI_2")
+ ),
+ MTK_PIN(
+ 195, "GPIO195",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO195"),
+ MTK_FUNCTION(1, "SDA7"),
+ MTK_FUNCTION(3, "CLKM3_B"),
+ MTK_FUNCTION(6, "UDI_TDI_3")
+ ),
+ MTK_PIN(
+ 196, "GPIO196",
+ MTK_EINT_FUNCTION(0, 196),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO196"),
+ MTK_FUNCTION(1, "CMMCLK3")
+ ),
+ MTK_PIN(
+ 197, "GPIO197",
+ MTK_EINT_FUNCTION(0, 197),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO197"),
+ MTK_FUNCTION(3, "CLKM1_B"),
+ MTK_FUNCTION(6, "UDI_TDI_1")
+ ),
+ MTK_PIN(
+ 198, "GPIO198",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO198"),
+ MTK_FUNCTION(1, "SCL8"),
+ MTK_FUNCTION(6, "UDI_TDI_4")
+ ),
+ MTK_PIN(
+ 199, "GPIO199",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO199"),
+ MTK_FUNCTION(1, "SDA8"),
+ MTK_FUNCTION(6, "UDI_TDI_5")
+ ),
+ MTK_PIN(
+ 200, "GPIO200",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO200"),
+ MTK_FUNCTION(1, "SCL1")
+ ),
+ MTK_PIN(
+ 201, "GPIO201",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO201"),
+ MTK_FUNCTION(1, "SDA1"),
+ MTK_FUNCTION(7, "TSFDC_BG_COMP")
+ ),
+ MTK_PIN(
+ 202, "GPIO202",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO202"),
+ MTK_FUNCTION(1, "SCL9"),
+ MTK_FUNCTION(2, "SCP_SCL7"),
+ MTK_FUNCTION(6, "TP_GPIO15_AO")
+ ),
+ MTK_PIN(
+ 203, "GPIO203",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO203"),
+ MTK_FUNCTION(1, "SDA9"),
+ MTK_FUNCTION(2, "SCP_SDA7"),
+ MTK_FUNCTION(6, "TP_GPIO9_AO")
+ ),
+ MTK_PIN(
+ 204, "GPIO204",
+ MTK_EINT_FUNCTION(0, 204),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO204"),
+ MTK_FUNCTION(1, "SCL13"),
+ MTK_FUNCTION(2, "CMVREF6"),
+ MTK_FUNCTION(3, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(5, "CLKM2_B"),
+ MTK_FUNCTION(6, "TP_GPIO12_AO")
+ ),
+ MTK_PIN(
+ 205, "GPIO205",
+ MTK_EINT_FUNCTION(0, 205),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO205"),
+ MTK_FUNCTION(1, "SDA13"),
+ MTK_FUNCTION(2, "CMVREF7"),
+ MTK_FUNCTION(3, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(5, "CLKM3_B"),
+ MTK_FUNCTION(6, "TP_GPIO13_AO")
+ ),
+ MTK_PIN(
+ 206, "GPIO206",
+ MTK_EINT_FUNCTION(0, 206),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO206"),
+ MTK_FUNCTION(2, "MD32_2_GPIO0"),
+ MTK_FUNCTION(5, "VBUSVALID"),
+ MTK_FUNCTION(6, "UDI_TDO_3")
+ ),
+ MTK_PIN(
+ 207, "GPIO207",
+ MTK_EINT_FUNCTION(0, 207),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO207"),
+ MTK_FUNCTION(1, "PCIE_WAKEN_2P"),
+ MTK_FUNCTION(2, "PMSR_SMAP_MAX"),
+ MTK_FUNCTION(4, "FMI2S_A_BCK"),
+ MTK_FUNCTION(6, "UDI_TDO_4")
+ ),
+ MTK_PIN(
+ 208, "GPIO208",
+ MTK_EINT_FUNCTION(0, 208),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO208"),
+ MTK_FUNCTION(1, "PCIE_CLKREQN_2P"),
+ MTK_FUNCTION(2, "PMSR_SMAP_MAX_W"),
+ MTK_FUNCTION(4, "FMI2S_A_LRCK"),
+ MTK_FUNCTION(5, "CLKM0_B"),
+ MTK_FUNCTION(6, "UDI_TDO_5")
+ ),
+ MTK_PIN(
+ 209, "GPIO209",
+ MTK_EINT_FUNCTION(0, 209),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO209"),
+ MTK_FUNCTION(1, "PCIE_PERSTN_2P"),
+ MTK_FUNCTION(2, "PMSR_SMAP"),
+ MTK_FUNCTION(4, "FMI2S_A_DI"),
+ MTK_FUNCTION(5, "CLKM1_B"),
+ MTK_FUNCTION(6, "UDI_TDO_6")
+ ),
+ MTK_PIN(
+ 210, "GPIO210",
+ MTK_EINT_FUNCTION(0, 210),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO210"),
+ MTK_FUNCTION(1, "CMMCLK4")
+ ),
+ MTK_PIN(
+ 211, "GPIO211",
+ MTK_EINT_FUNCTION(0, 211),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO211"),
+ MTK_FUNCTION(1, "CMMCLK5"),
+ MTK_FUNCTION(2, "CONN_TCXOENA_REQ")
+ ),
+ MTK_PIN(
+ 212, "GPIO212",
+ MTK_EINT_FUNCTION(0, 212),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO212"),
+ MTK_FUNCTION(1, "CMMCLK6"),
+ MTK_FUNCTION(2, "TP_GPIO10_AO"),
+ MTK_FUNCTION(5, "IDDIG"),
+ MTK_FUNCTION(6, "UDI_TDO_1")
+ ),
+ MTK_PIN(
+ 213, "GPIO213",
+ MTK_EINT_FUNCTION(0, 213),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO213"),
+ MTK_FUNCTION(1, "CMMCLK7"),
+ MTK_FUNCTION(2, "TP_GPIO11_AO"),
+ MTK_FUNCTION(5, "USB_DRVVBUS"),
+ MTK_FUNCTION(6, "UDI_TDO_2")
+ ),
+ MTK_PIN(
+ 214, "GPIO214",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO214"),
+ MTK_FUNCTION(1, "SCP_SCL3"),
+ MTK_FUNCTION(2, "SDA14_E1_SCL14_E2"),
+ MTK_FUNCTION(6, "GBE1_MDC"),
+ MTK_FUNCTION(7, "GBE0_MDC")
+ ),
+ MTK_PIN(
+ 215, "GPIO215",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO215"),
+ MTK_FUNCTION(1, "SCP_SDA3"),
+ MTK_FUNCTION(2, "SCL14_E1_SDA14_E2"),
+ MTK_FUNCTION(6, "GBE1_MDIO"),
+ MTK_FUNCTION(7, "GBE0_MDIO")
+ ),
+ MTK_PIN(
+ 216, "GPIO216",
+ MTK_EINT_FUNCTION(0, 216),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO216"),
+ MTK_FUNCTION(1, "GPS_PPS0")
+ ),
+ MTK_PIN(
+ 217, "GPIO217",
+ MTK_EINT_FUNCTION(0, 217),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO217"),
+ MTK_FUNCTION(1, "KPROW0"),
+ MTK_FUNCTION(6, "TP_GPIO12_AO")
+ ),
+ MTK_PIN(
+ 218, "GPIO218",
+ MTK_EINT_FUNCTION(0, 218),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO218"),
+ MTK_FUNCTION(1, "KPROW1"),
+ MTK_FUNCTION(2, "SPI0_WP"),
+ MTK_FUNCTION(3, "MBISTREADEN_TRIGGER"),
+ MTK_FUNCTION(5, "GPS_L5_ELNA_EN"),
+ MTK_FUNCTION(6, "TP_GPIO14_AO")
+ ),
+ MTK_PIN(
+ 219, "GPIO219",
+ MTK_EINT_FUNCTION(0, 219),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO219"),
+ MTK_FUNCTION(1, "KPCOL1"),
+ MTK_FUNCTION(2, "SPI0_HOLD"),
+ MTK_FUNCTION(3, "MBISTWRITEEN_TRIGGER"),
+ MTK_FUNCTION(4, "SPMI_M_TRIG_FLAG"),
+ MTK_FUNCTION(5, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(6, "SPM_JTAG_TRSTN_VLP"),
+ MTK_FUNCTION(7, "JTRSTN_SEL1")
+ ),
+ MTK_PIN(
+ 220, "GPIO220",
+ MTK_EINT_FUNCTION(0, 220),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO220"),
+ MTK_FUNCTION(1, "SPI0_CLK"),
+ MTK_FUNCTION(6, "SPM_JTAG_TCK_VLP"),
+ MTK_FUNCTION(7, "JTCK_SEL1")
+ ),
+ MTK_PIN(
+ 221, "GPIO221",
+ MTK_EINT_FUNCTION(0, 221),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO221"),
+ MTK_FUNCTION(1, "SPI0_CSB"),
+ MTK_FUNCTION(6, "SPM_JTAG_TMS_VLP"),
+ MTK_FUNCTION(7, "JTMS_SEL1")
+ ),
+ MTK_PIN(
+ 222, "GPIO222",
+ MTK_EINT_FUNCTION(0, 222),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO222"),
+ MTK_FUNCTION(1, "SPI0_MO"),
+ MTK_FUNCTION(2, "SCP_SCL7"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDO_VLP"),
+ MTK_FUNCTION(7, "JTDO_SEL1")
+ ),
+ MTK_PIN(
+ 223, "GPIO223",
+ MTK_EINT_FUNCTION(0, 223),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO223"),
+ MTK_FUNCTION(1, "SPI0_MI"),
+ MTK_FUNCTION(2, "SCP_SDA7"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDI_VLP"),
+ MTK_FUNCTION(7, "JTDI_SEL1")
+ ),
+ MTK_PIN(
+ 224, "GPIO224",
+ MTK_EINT_FUNCTION(0, 224),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO224"),
+ MTK_FUNCTION(1, "MSDC2_CLK"),
+ MTK_FUNCTION(2, "DMIC2_CLK"),
+ MTK_FUNCTION(3, "GBE0_AUX_PPS0"),
+ MTK_FUNCTION(4, "GBE0_TXER"),
+ MTK_FUNCTION(5, "GBE1_TXER"),
+ MTK_FUNCTION(6, "GBE1_AUX_PPS0"),
+ MTK_FUNCTION(7, "MD32_1_TXD")
+ ),
+ MTK_PIN(
+ 225, "GPIO225",
+ MTK_EINT_FUNCTION(0, 225),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO225"),
+ MTK_FUNCTION(1, "MSDC2_CMD"),
+ MTK_FUNCTION(2, "DMIC2_DAT"),
+ MTK_FUNCTION(3, "GBE0_AUX_PPS1"),
+ MTK_FUNCTION(4, "GBE0_RXER"),
+ MTK_FUNCTION(5, "GBE1_RXER"),
+ MTK_FUNCTION(6, "GBE1_AUX_PPS1"),
+ MTK_FUNCTION(7, "MD32_1_RXD")
+ ),
+ MTK_PIN(
+ 226, "GPIO226",
+ MTK_EINT_FUNCTION(0, 226),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO226"),
+ MTK_FUNCTION(1, "MSDC2_DAT0"),
+ MTK_FUNCTION(2, "I2SIN3_BCK"),
+ MTK_FUNCTION(3, "GBE0_AUX_PPS2"),
+ MTK_FUNCTION(4, "GBE0_COL"),
+ MTK_FUNCTION(5, "GBE1_COL"),
+ MTK_FUNCTION(6, "GBE1_AUX_PPS2"),
+ MTK_FUNCTION(7, "GBE1_MDC")
+ ),
+ MTK_PIN(
+ 227, "GPIO227",
+ MTK_EINT_FUNCTION(0, 227),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO227"),
+ MTK_FUNCTION(1, "MSDC2_DAT1"),
+ MTK_FUNCTION(2, "I2SIN3_LRCK"),
+ MTK_FUNCTION(3, "GBE0_AUX_PPS3"),
+ MTK_FUNCTION(4, "GBE0_INTR"),
+ MTK_FUNCTION(5, "GBE1_INTR"),
+ MTK_FUNCTION(6, "GBE1_AUX_PPS3"),
+ MTK_FUNCTION(7, "GBE1_MDIO")
+ ),
+ MTK_PIN(
+ 228, "GPIO228",
+ MTK_EINT_FUNCTION(0, 228),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO228"),
+ MTK_FUNCTION(1, "MSDC2_DAT2"),
+ MTK_FUNCTION(2, "I2SIN3_DI"),
+ MTK_FUNCTION(3, "GBE0_MDC"),
+ MTK_FUNCTION(4, "GBE1_MDC"),
+ MTK_FUNCTION(5, "CONN_BG_GPS_MCU_AICE_TCKC")
+ ),
+ MTK_PIN(
+ 229, "GPIO229",
+ MTK_EINT_FUNCTION(0, 229),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO229"),
+ MTK_FUNCTION(1, "MSDC2_DAT3"),
+ MTK_FUNCTION(2, "I2SOUT3_DO"),
+ MTK_FUNCTION(3, "GBE0_MDIO"),
+ MTK_FUNCTION(4, "GBE1_MDIO"),
+ MTK_FUNCTION(5, "CONN_BG_GPS_MCU_AICE_TMSC"),
+ MTK_FUNCTION(7, "AVB_CLK2")
+ ),
+ MTK_PIN(
+ 230, "GPIO230",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO230"),
+ MTK_FUNCTION(1, "CONN_TOP_CLK")
+ ),
+ MTK_PIN(
+ 231, "GPIO231",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO231"),
+ MTK_FUNCTION(1, "CONN_TOP_DATA")
+ ),
+ MTK_PIN(
+ 232, "GPIO232",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO232"),
+ MTK_FUNCTION(1, "CONN_HRST_B")
+ ),
+ MTK_PIN(
+ 233, "GPIO233",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO233"),
+ MTK_FUNCTION(1, "I2SIN0_BCK")
+ ),
+ MTK_PIN(
+ 234, "GPIO234",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO234"),
+ MTK_FUNCTION(1, "I2SIN0_LRCK")
+ ),
+ MTK_PIN(
+ 235, "GPIO235",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO235"),
+ MTK_FUNCTION(1, "I2SIN0_DI")
+ ),
+ MTK_PIN(
+ 236, "GPIO236",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO236"),
+ MTK_FUNCTION(1, "I2SOUT0_DO")
+ ),
+ MTK_PIN(
+ 237, "GPIO237",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO237"),
+ MTK_FUNCTION(1, "CONN_UARTHUB_UART_TX"),
+ MTK_FUNCTION(3, "UTXD3")
+ ),
+ MTK_PIN(
+ 238, "GPIO238",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO238"),
+ MTK_FUNCTION(1, "CONN_UARTHUB_UART_RX"),
+ MTK_FUNCTION(3, "URXD3")
+ ),
+ MTK_PIN(
+ 239, "GPIO239",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO239"),
+ MTK_FUNCTION(1, "TP_UTXD_CONSYS_VLP"),
+ MTK_FUNCTION(2, "TP_URXD_CONSYS_VLP")
+ ),
+ MTK_PIN(
+ 240, "GPIO240",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO240"),
+ MTK_FUNCTION(1, "TP_URXD_CONSYS_VLP"),
+ MTK_FUNCTION(2, "TP_UTXD_CONSYS_VLP")
+ ),
+ MTK_PIN(
+ 241, "GPIO241",
+ MTK_EINT_FUNCTION(0, 241),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO241"),
+ MTK_FUNCTION(1, "PCIE_PERSTN")
+ ),
+ MTK_PIN(
+ 242, "GPIO242",
+ MTK_EINT_FUNCTION(0, 242),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO242"),
+ MTK_FUNCTION(1, "PCIE_WAKEN")
+ ),
+ MTK_PIN(
+ 243, "GPIO243",
+ MTK_EINT_FUNCTION(0, 243),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO243"),
+ MTK_FUNCTION(1, "PCIE_CLKREQN")
+ ),
+ MTK_PIN(
+ 244, "GPIO244",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO244"),
+ MTK_FUNCTION(1, "CONN_RST")
+ ),
+ MTK_PIN(
+ 245, "GPIO245",
+ MTK_EINT_FUNCTION(0, 245),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO245")
+ ),
+ MTK_PIN(
+ 246, "GPIO246",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO246"),
+ MTK_FUNCTION(1, "CONN_PTA_TXD0")
+ ),
+ MTK_PIN(
+ 247, "GPIO247",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO247"),
+ MTK_FUNCTION(1, "CONN_PTA_RXD0")
+ ),
+ MTK_PIN(
+ 248, "GPIO248",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO248"),
+ MTK_FUNCTION(3, "UCTS3")
+ ),
+ MTK_PIN(
+ 249, "GPIO249",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO249"),
+ MTK_FUNCTION(3, "URTS3")
+ ),
+ MTK_PIN(
+ 250, "GPIO250",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO250")
+ ),
+ MTK_PIN(
+ 251, "GPIO251",
+ MTK_EINT_FUNCTION(0, 251),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO251"),
+ MTK_FUNCTION(1, "IDDIG_1P")
+ ),
+ MTK_PIN(
+ 252, "GPIO252",
+ MTK_EINT_FUNCTION(0, 252),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO252"),
+ MTK_FUNCTION(1, "USB_DRVVBUS_1P")
+ ),
+ MTK_PIN(
+ 253, "GPIO253",
+ MTK_EINT_FUNCTION(0, 253),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO253"),
+ MTK_FUNCTION(1, "VBUSVALID_1P")
+ ),
+ MTK_PIN(
+ 254, "GPIO254",
+ MTK_EINT_FUNCTION(0, 254),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO254"),
+ MTK_FUNCTION(1, "IDDIG_2P")
+ ),
+ MTK_PIN(
+ 255, "GPIO255",
+ MTK_EINT_FUNCTION(0, 255),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO255"),
+ MTK_FUNCTION(1, "USB_DRVVBUS_2P")
+ ),
+ MTK_PIN(
+ 256, "GPIO256",
+ MTK_EINT_FUNCTION(0, 256),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO256"),
+ MTK_FUNCTION(1, "VBUSVALID_2P")
+ ),
+ MTK_PIN(
+ 257, "GPIO257",
+ MTK_EINT_FUNCTION(0, 257),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO257"),
+ MTK_FUNCTION(1, "VBUSVALID_3P")
+ ),
+ MTK_PIN(
+ 258, "GPIO258",
+ MTK_EINT_FUNCTION(0, 258),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO258"),
+ MTK_FUNCTION(7, "AVB_CLK1")
+ ),
+ MTK_PIN(
+ 259, "GPIO259",
+ MTK_EINT_FUNCTION(0, 259),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO259"),
+ MTK_FUNCTION(1, "GBE0_TXD0"),
+ MTK_FUNCTION(2, "GBE1_TXD0")
+ ),
+ MTK_PIN(
+ 260, "GPIO260",
+ MTK_EINT_FUNCTION(0, 260),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO260"),
+ MTK_FUNCTION(1, "GBE0_TXD1"),
+ MTK_FUNCTION(2, "GBE1_TXD1")
+ ),
+ MTK_PIN(
+ 261, "GPIO261",
+ MTK_EINT_FUNCTION(0, 261),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO261"),
+ MTK_FUNCTION(1, "GBE0_TXC"),
+ MTK_FUNCTION(2, "GBE1_TXC")
+ ),
+ MTK_PIN(
+ 262, "GPIO262",
+ MTK_EINT_FUNCTION(0, 262),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO262"),
+ MTK_FUNCTION(1, "GBE0_TXEN"),
+ MTK_FUNCTION(2, "GBE1_TXEN")
+ ),
+ MTK_PIN(
+ 263, "GPIO263",
+ MTK_EINT_FUNCTION(0, 263),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO263"),
+ MTK_FUNCTION(1, "GBE0_RXD0"),
+ MTK_FUNCTION(2, "GBE1_RXD0"),
+ MTK_FUNCTION(3, "GBE0_AUX_PPS0")
+ ),
+ MTK_PIN(
+ 264, "GPIO264",
+ MTK_EINT_FUNCTION(0, 264),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO264"),
+ MTK_FUNCTION(1, "GBE0_RXD1"),
+ MTK_FUNCTION(2, "GBE1_RXD1"),
+ MTK_FUNCTION(3, "GBE0_AUX_PPS1")
+ ),
+ MTK_PIN(
+ 265, "GPIO265",
+ MTK_EINT_FUNCTION(0, 265),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO265"),
+ MTK_FUNCTION(1, "GBE0_RXC"),
+ MTK_FUNCTION(2, "GBE1_RXC"),
+ MTK_FUNCTION(3, "GBE0_AUX_PPS2")
+ ),
+ MTK_PIN(
+ 266, "GPIO266",
+ MTK_EINT_FUNCTION(0, 266),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO266"),
+ MTK_FUNCTION(1, "GBE0_RXDV"),
+ MTK_FUNCTION(2, "GBE1_RXDV"),
+ MTK_FUNCTION(3, "GBE0_AUX_PPS3")
+ ),
+ MTK_PIN(
+ 267, "GPIO267",
+ MTK_EINT_FUNCTION(0, 267),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO267"),
+ MTK_FUNCTION(1, "GBE0_TXD2"),
+ MTK_FUNCTION(2, "GBE1_TXD2"),
+ MTK_FUNCTION(3, "GBE0_RXER"),
+ MTK_FUNCTION(4, "GBE1_RXER")
+ ),
+ MTK_PIN(
+ 268, "GPIO268",
+ MTK_EINT_FUNCTION(0, 268),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO268"),
+ MTK_FUNCTION(1, "GBE0_TXD3"),
+ MTK_FUNCTION(2, "GBE1_TXD3")
+ ),
+ MTK_PIN(
+ 269, "GPIO269",
+ MTK_EINT_FUNCTION(0, 269),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO269"),
+ MTK_FUNCTION(1, "GBE0_RXD2"),
+ MTK_FUNCTION(2, "GBE1_RXD2"),
+ MTK_FUNCTION(3, "GBE0_MDC")
+ ),
+ MTK_PIN(
+ 270, "GPIO270",
+ MTK_EINT_FUNCTION(0, 270),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO270"),
+ MTK_FUNCTION(1, "GBE0_RXD3"),
+ MTK_FUNCTION(2, "GBE1_RXD3"),
+ MTK_FUNCTION(3, "GBE0_MDIO")
+ ),
+ MTK_PIN(
+ 271, "veint271",
+ MTK_EINT_FUNCTION(0, 271),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 272, "veint272",
+ MTK_EINT_FUNCTION(0, 272),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 273, "veint273",
+ MTK_EINT_FUNCTION(0, 273),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 274, "veint274",
+ MTK_EINT_FUNCTION(0, 274),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 275, "veint275",
+ MTK_EINT_FUNCTION(0, 275),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 276, "veint276",
+ MTK_EINT_FUNCTION(0, 276),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 277, "veint277",
+ MTK_EINT_FUNCTION(0, 277),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 278, "veint278",
+ MTK_EINT_FUNCTION(0, 278),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 279, "veint279",
+ MTK_EINT_FUNCTION(0, 279),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 280, "veint280",
+ MTK_EINT_FUNCTION(0, 280),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 281, "veint281",
+ MTK_EINT_FUNCTION(0, 281),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 282, "veint282",
+ MTK_EINT_FUNCTION(0, 282),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 283, "veint283",
+ MTK_EINT_FUNCTION(0, 283),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 284, "veint284",
+ MTK_EINT_FUNCTION(0, 284),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 285, "veint285",
+ MTK_EINT_FUNCTION(0, 285),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 286, "veint286",
+ MTK_EINT_FUNCTION(0, 286),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 287, "veint287",
+ MTK_EINT_FUNCTION(0, 287),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 288, "veint288",
+ MTK_EINT_FUNCTION(0, 288),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 289, "veint289",
+ MTK_EINT_FUNCTION(0, 289),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 290, "veint290",
+ MTK_EINT_FUNCTION(0, 290),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 291, "veint291",
+ MTK_EINT_FUNCTION(0, 291),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ ),
+ MTK_PIN(
+ 292, "veint292",
+ MTK_EINT_FUNCTION(0, 292),
+ DRV_GRP4,
+ MTK_FUNCTION(0, NULL)
+ )
+};
+
+static struct mtk_eint_pin eint_pins_mt8196[] = {
+ MTK_EINT_PIN(0, 2, 0, 1),
+ MTK_EINT_PIN(1, 2, 1, 1),
+ MTK_EINT_PIN(2, 2, 16, 0),
+ MTK_EINT_PIN(3, 2, 17, 0),
+ MTK_EINT_PIN(4, 2, 2, 1),
+ MTK_EINT_PIN(5, 2, 3, 1),
+ MTK_EINT_PIN(6, 2, 4, 1),
+ MTK_EINT_PIN(7, 2, 5, 1),
+ MTK_EINT_PIN(8, 2, 6, 1),
+ MTK_EINT_PIN(9, 2, 18, 0),
+ MTK_EINT_PIN(10, 2, 7, 1),
+ MTK_EINT_PIN(11, 2, 8, 1),
+ MTK_EINT_PIN(12, 2, 9, 1),
+ MTK_EINT_PIN(13, 1, 4, 0),
+ MTK_EINT_PIN(14, 0, 0, 1),
+ MTK_EINT_PIN(15, 1, 5, 0),
+ MTK_EINT_PIN(16, 1, 6, 0),
+ MTK_EINT_PIN(17, 1, 7, 0),
+ MTK_EINT_PIN(18, 1, 8, 0),
+ MTK_EINT_PIN(19, 1, 9, 0),
+ MTK_EINT_PIN(20, 0, 1, 1),
+ MTK_EINT_PIN(21, 0, 10, 0),
+ MTK_EINT_PIN(22, 0, 11, 0),
+ MTK_EINT_PIN(23, 0, 12, 0),
+ MTK_EINT_PIN(24, 0, 13, 0),
+ MTK_EINT_PIN(25, 0, 14, 0),
+ MTK_EINT_PIN(26, 0, 15, 0),
+ MTK_EINT_PIN(27, 0, 2, 1),
+ MTK_EINT_PIN(28, 0, 16, 0),
+ MTK_EINT_PIN(29, 0, 17, 0),
+ MTK_EINT_PIN(30, 0, 18, 0),
+ MTK_EINT_PIN(31, 0, 3, 1),
+ MTK_EINT_PIN(32, 0, 19, 0),
+ MTK_EINT_PIN(33, 0, 20, 0),
+ MTK_EINT_PIN(34, 0, 21, 0),
+ MTK_EINT_PIN(35, 0, 22, 0),
+ MTK_EINT_PIN(36, 0, 23, 0),
+ MTK_EINT_PIN(37, 0, 24, 0),
+ MTK_EINT_PIN(38, 0, 25, 0),
+ MTK_EINT_PIN(39, 2, 10, 1),
+ MTK_EINT_PIN(40, 2, 11, 1),
+ MTK_EINT_PIN(41, 2, 12, 1),
+ MTK_EINT_PIN(42, 2, 13, 1),
+ MTK_EINT_PIN(43, 2, 14, 1),
+ MTK_EINT_PIN(44, 2, 19, 0),
+ MTK_EINT_PIN(45, 2, 20, 0),
+ MTK_EINT_PIN(46, 2, 21, 0),
+ MTK_EINT_PIN(47, 2, 22, 0),
+ MTK_EINT_PIN(48, 2, 23, 0),
+ MTK_EINT_PIN(49, 2, 24, 0),
+ MTK_EINT_PIN(50, 2, 25, 0),
+ MTK_EINT_PIN(51, 2, 26, 0),
+ MTK_EINT_PIN(52, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(53, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(54, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(55, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(56, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(57, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(58, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(59, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(60, 2, 27, 0),
+ MTK_EINT_PIN(61, 2, 28, 0),
+ MTK_EINT_PIN(62, 2, 29, 0),
+ MTK_EINT_PIN(63, 2, 30, 0),
+ MTK_EINT_PIN(64, 2, 31, 0),
+ MTK_EINT_PIN(65, 2, 32, 0),
+ MTK_EINT_PIN(66, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(67, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(68, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(69, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(70, 2, 33, 0),
+ MTK_EINT_PIN(71, 2, 34, 0),
+ MTK_EINT_PIN(72, 2, 35, 0),
+ MTK_EINT_PIN(73, 2, 36, 0),
+ MTK_EINT_PIN(74, 2, 37, 0),
+ MTK_EINT_PIN(75, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(76, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(77, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(78, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(79, 2, 38, 0),
+ MTK_EINT_PIN(80, 2, 39, 0),
+ MTK_EINT_PIN(81, 2, 40, 0),
+ MTK_EINT_PIN(82, 2, 41, 0),
+ MTK_EINT_PIN(83, 2, 42, 0),
+ MTK_EINT_PIN(84, 2, 43, 0),
+ MTK_EINT_PIN(85, 2, 44, 0),
+ MTK_EINT_PIN(86, 2, 45, 0),
+ MTK_EINT_PIN(87, 2, 46, 0),
+ MTK_EINT_PIN(88, 2, 47, 0),
+ MTK_EINT_PIN(89, 2, 48, 0),
+ MTK_EINT_PIN(90, 2, 49, 0),
+ MTK_EINT_PIN(91, 2, 50, 0),
+ MTK_EINT_PIN(92, 2, 15, 1),
+ MTK_EINT_PIN(93, 2, 51, 0),
+ MTK_EINT_PIN(94, 2, 52, 0),
+ MTK_EINT_PIN(95, 2, 53, 0),
+ MTK_EINT_PIN(96, 2, 54, 0),
+ MTK_EINT_PIN(97, 2, 55, 0),
+ MTK_EINT_PIN(98, 2, 56, 0),
+ MTK_EINT_PIN(99, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(100, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(101, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(102, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(103, 2, 57, 0),
+ MTK_EINT_PIN(104, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(105, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(106, 1, 10, 0),
+ MTK_EINT_PIN(107, 1, 11, 0),
+ MTK_EINT_PIN(108, 1, 12, 0),
+ MTK_EINT_PIN(109, 1, 13, 0),
+ MTK_EINT_PIN(110, 1, 0, 1),
+ MTK_EINT_PIN(111, 1, 1, 1),
+ MTK_EINT_PIN(112, 1, 2, 1),
+ MTK_EINT_PIN(113, 1, 3, 1),
+ MTK_EINT_PIN(114, 1, 14, 0),
+ MTK_EINT_PIN(115, 1, 15, 0),
+ MTK_EINT_PIN(116, 1, 16, 0),
+ MTK_EINT_PIN(117, 1, 17, 0),
+ MTK_EINT_PIN(118, 1, 18, 0),
+ MTK_EINT_PIN(119, 1, 19, 0),
+ MTK_EINT_PIN(120, 1, 20, 0),
+ MTK_EINT_PIN(121, 1, 21, 0),
+ MTK_EINT_PIN(122, 1, 22, 0),
+ MTK_EINT_PIN(123, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(124, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(125, 1, 23, 0),
+ MTK_EINT_PIN(126, 1, 24, 0),
+ MTK_EINT_PIN(127, 1, 25, 0),
+ MTK_EINT_PIN(128, 1, 26, 0),
+ MTK_EINT_PIN(129, 1, 27, 0),
+ MTK_EINT_PIN(130, 1, 28, 0),
+ MTK_EINT_PIN(131, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(132, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(133, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(134, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(135, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(136, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(137, 0, 26, 0),
+ MTK_EINT_PIN(138, 0, 27, 0),
+ MTK_EINT_PIN(139, 0, 28, 0),
+ MTK_EINT_PIN(140, 0, 29, 0),
+ MTK_EINT_PIN(141, 0, 30, 0),
+ MTK_EINT_PIN(142, 0, 31, 0),
+ MTK_EINT_PIN(143, 0, 32, 0),
+ MTK_EINT_PIN(144, 0, 33, 0),
+ MTK_EINT_PIN(145, 0, 34, 0),
+ MTK_EINT_PIN(146, 0, 35, 0),
+ MTK_EINT_PIN(147, 0, 36, 0),
+ MTK_EINT_PIN(148, 0, 4, 1),
+ MTK_EINT_PIN(149, 0, 37, 0),
+ MTK_EINT_PIN(150, 0, 5, 1),
+ MTK_EINT_PIN(151, 0, 38, 0),
+ MTK_EINT_PIN(152, 0, 39, 0),
+ MTK_EINT_PIN(153, 0, 40, 0),
+ MTK_EINT_PIN(154, 0, 41, 0),
+ MTK_EINT_PIN(155, 0, 42, 0),
+ MTK_EINT_PIN(156, 0, 43, 0),
+ MTK_EINT_PIN(157, 0, 44, 0),
+ MTK_EINT_PIN(158, 0, 45, 0),
+ MTK_EINT_PIN(159, 0, 46, 0),
+ MTK_EINT_PIN(160, 0, 47, 0),
+ MTK_EINT_PIN(161, 0, 48, 0),
+ MTK_EINT_PIN(162, 0, 49, 0),
+ MTK_EINT_PIN(163, 0, 50, 0),
+ MTK_EINT_PIN(164, 0, 51, 0),
+ MTK_EINT_PIN(165, 0, 52, 0),
+ MTK_EINT_PIN(166, 0, 53, 0),
+ MTK_EINT_PIN(167, 0, 54, 0),
+ MTK_EINT_PIN(168, 0, 55, 0),
+ MTK_EINT_PIN(169, 0, 56, 0),
+ MTK_EINT_PIN(170, 0, 57, 0),
+ MTK_EINT_PIN(171, 0, 58, 0),
+ MTK_EINT_PIN(172, 0, 6, 1),
+ MTK_EINT_PIN(173, 0, 7, 1),
+ MTK_EINT_PIN(174, 0, 8, 1),
+ MTK_EINT_PIN(175, 0, 9, 1),
+ MTK_EINT_PIN(176, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(177, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(178, 0, 59, 0),
+ MTK_EINT_PIN(179, 0, 60, 0),
+ MTK_EINT_PIN(180, 0, 61, 0),
+ MTK_EINT_PIN(181, 0, 62, 0),
+ MTK_EINT_PIN(182, 0, 63, 0),
+ MTK_EINT_PIN(183, 0, 64, 0),
+ MTK_EINT_PIN(184, 0, 65, 0),
+ MTK_EINT_PIN(185, 0, 66, 0),
+ MTK_EINT_PIN(186, 3, 6, 0),
+ MTK_EINT_PIN(187, 3, 7, 0),
+ MTK_EINT_PIN(188, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(189, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(190, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(191, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(192, 3, 8, 0),
+ MTK_EINT_PIN(193, 3, 9, 0),
+ MTK_EINT_PIN(194, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(195, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(196, 3, 10, 0),
+ MTK_EINT_PIN(197, 3, 11, 0),
+ MTK_EINT_PIN(198, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(199, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(200, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(201, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(202, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(203, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(204, 3, 12, 0),
+ MTK_EINT_PIN(205, 3, 13, 0),
+ MTK_EINT_PIN(206, 3, 14, 0),
+ MTK_EINT_PIN(207, 3, 0, 1),
+ MTK_EINT_PIN(208, 3, 1, 1),
+ MTK_EINT_PIN(209, 3, 2, 1),
+ MTK_EINT_PIN(210, 3, 15, 0),
+ MTK_EINT_PIN(211, 3, 3, 1),
+ MTK_EINT_PIN(212, 3, 4, 1),
+ MTK_EINT_PIN(213, 3, 5, 1),
+ MTK_EINT_PIN(214, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(215, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(216, 3, 16, 0),
+ MTK_EINT_PIN(217, 3, 17, 0),
+ MTK_EINT_PIN(218, 3, 18, 0),
+ MTK_EINT_PIN(219, 3, 19, 0),
+ MTK_EINT_PIN(220, 3, 20, 0),
+ MTK_EINT_PIN(221, 3, 21, 0),
+ MTK_EINT_PIN(222, 3, 22, 0),
+ MTK_EINT_PIN(223, 3, 23, 0),
+ MTK_EINT_PIN(224, 3, 24, 0),
+ MTK_EINT_PIN(225, 3, 25, 0),
+ MTK_EINT_PIN(226, 3, 26, 0),
+ MTK_EINT_PIN(227, 3, 27, 0),
+ MTK_EINT_PIN(228, 3, 28, 0),
+ MTK_EINT_PIN(229, 3, 29, 0),
+ MTK_EINT_PIN(230, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(231, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(232, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(233, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(234, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(235, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(236, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(237, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(238, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(239, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(240, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(241, 3, 30, 0),
+ MTK_EINT_PIN(242, 3, 31, 0),
+ MTK_EINT_PIN(243, 3, 32, 0),
+ MTK_EINT_PIN(244, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(245, 3, 45, 0),
+ MTK_EINT_PIN(246, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(247, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(248, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(249, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(250, EINT_INVALID_BASE, 0, 0),
+ MTK_EINT_PIN(251, 0, 67, 0),
+ MTK_EINT_PIN(252, 0, 68, 0),
+ MTK_EINT_PIN(253, 0, 69, 0),
+ MTK_EINT_PIN(254, 0, 70, 0),
+ MTK_EINT_PIN(255, 0, 71, 0),
+ MTK_EINT_PIN(256, 0, 72, 0),
+ MTK_EINT_PIN(257, 0, 73, 0),
+ MTK_EINT_PIN(258, 0, 74, 0),
+ MTK_EINT_PIN(259, 3, 33, 0),
+ MTK_EINT_PIN(260, 3, 34, 0),
+ MTK_EINT_PIN(261, 3, 35, 0),
+ MTK_EINT_PIN(262, 3, 36, 0),
+ MTK_EINT_PIN(263, 3, 37, 0),
+ MTK_EINT_PIN(264, 3, 38, 0),
+ MTK_EINT_PIN(265, 3, 39, 0),
+ MTK_EINT_PIN(266, 3, 40, 0),
+ MTK_EINT_PIN(267, 3, 41, 0),
+ MTK_EINT_PIN(268, 3, 42, 0),
+ MTK_EINT_PIN(269, 3, 43, 0),
+ MTK_EINT_PIN(270, 3, 44, 0),
+ MTK_EINT_PIN(271, 4, 0, 0),
+ MTK_EINT_PIN(272, 4, 1, 0),
+ MTK_EINT_PIN(273, 4, 2, 0),
+ MTK_EINT_PIN(274, 4, 3, 0),
+ MTK_EINT_PIN(275, 4, 4, 0),
+ MTK_EINT_PIN(276, 4, 5, 0),
+ MTK_EINT_PIN(277, 4, 6, 0),
+ MTK_EINT_PIN(278, 4, 7, 0),
+ MTK_EINT_PIN(279, 4, 8, 0),
+ MTK_EINT_PIN(280, 4, 9, 0),
+ MTK_EINT_PIN(281, 4, 10, 0),
+ MTK_EINT_PIN(282, 4, 11, 0),
+ MTK_EINT_PIN(283, 4, 12, 0),
+ MTK_EINT_PIN(284, 4, 13, 0),
+ MTK_EINT_PIN(285, 4, 14, 0),
+ MTK_EINT_PIN(286, 4, 15, 0),
+ MTK_EINT_PIN(287, 4, 16, 0),
+ MTK_EINT_PIN(288, 4, 17, 0),
+ MTK_EINT_PIN(289, 4, 18, 0),
+ MTK_EINT_PIN(290, 4, 19, 0),
+ MTK_EINT_PIN(291, 4, 20, 0),
+ MTK_EINT_PIN(292, 4, 21, 0),
+};
+#endif /* __PINCTRL_MTK_MT8196_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index 87e958d827bf..89ef4e530fcc 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -840,9 +840,6 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
const struct mtk_pin_desc *desc;
int value, err;
- if (gpio >= hw->soc->npins)
- return -EINVAL;
-
/*
* "Virtual" GPIOs are always and only used for interrupts
* Since they are only used for interrupts, they are always inputs
@@ -868,9 +865,6 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
const struct mtk_pin_desc *desc;
int value, err;
- if (gpio >= hw->soc->npins)
- return -EINVAL;
-
desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DI, &value);
@@ -880,38 +874,29 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
return !!value;
}
-static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+static int mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
struct mtk_pinctrl *hw = gpiochip_get_data(chip);
const struct mtk_pin_desc *desc;
- if (gpio >= hw->soc->npins)
- return;
-
desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
- mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value);
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value);
}
static int mtk_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
{
- struct mtk_pinctrl *hw = gpiochip_get_data(chip);
-
- if (gpio >= hw->soc->npins)
- return -EINVAL;
-
return pinctrl_gpio_direction_input(chip, gpio);
}
static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
int value)
{
- struct mtk_pinctrl *hw = gpiochip_get_data(chip);
-
- if (gpio >= hw->soc->npins)
- return -EINVAL;
+ int ret;
- mtk_gpio_set(chip, gpio, value);
+ ret = mtk_gpio_set(chip, gpio, value);
+ if (ret)
+ return ret;
return pinctrl_gpio_direction_output(chip, gpio);
}
@@ -964,7 +949,7 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw)
chip->direction_input = mtk_gpio_direction_input;
chip->direction_output = mtk_gpio_direction_output;
chip->get = mtk_gpio_get;
- chip->set = mtk_gpio_set;
+ chip->set_rv = mtk_gpio_set;
chip->to_irq = mtk_gpio_to_irq;
chip->set_config = mtk_gpio_set_config;
chip->base = -1;
diff --git a/drivers/pinctrl/meson/Kconfig b/drivers/pinctrl/meson/Kconfig
index 90639bc171f6..0315e224bce6 100644
--- a/drivers/pinctrl/meson/Kconfig
+++ b/drivers/pinctrl/meson/Kconfig
@@ -3,7 +3,7 @@ menuconfig PINCTRL_MESON
tristate "Amlogic SoC pinctrl drivers"
depends on ARCH_MESON || COMPILE_TEST
depends on OF
- default y
+ default ARCH_MESON
select PINMUX
select PINCONF
select GENERIC_PINCONF
@@ -17,25 +17,25 @@ config PINCTRL_MESON8
bool "Meson 8 SoC pinctrl driver"
depends on ARM
select PINCTRL_MESON8_PMX
- default y
+ default ARCH_MESON
config PINCTRL_MESON8B
bool "Meson 8b SoC pinctrl driver"
depends on ARM
select PINCTRL_MESON8_PMX
- default y
+ default ARCH_MESON
config PINCTRL_MESON_GXBB
tristate "Meson gxbb SoC pinctrl driver"
depends on ARM64
select PINCTRL_MESON8_PMX
- default y
+ default ARCH_MESON
config PINCTRL_MESON_GXL
tristate "Meson gxl SoC pinctrl driver"
depends on ARM64
select PINCTRL_MESON8_PMX
- default y
+ default ARCH_MESON
config PINCTRL_MESON8_PMX
tristate
@@ -44,7 +44,7 @@ config PINCTRL_MESON_AXG
tristate "Meson axg Soc pinctrl driver"
depends on ARM64
select PINCTRL_MESON_AXG_PMX
- default y
+ default ARCH_MESON
config PINCTRL_MESON_AXG_PMX
tristate
@@ -53,24 +53,24 @@ config PINCTRL_MESON_G12A
tristate "Meson g12a Soc pinctrl driver"
depends on ARM64
select PINCTRL_MESON_AXG_PMX
- default y
+ default ARCH_MESON
config PINCTRL_MESON_A1
tristate "Meson a1 Soc pinctrl driver"
depends on ARM64
select PINCTRL_MESON_AXG_PMX
- default y
+ default ARCH_MESON
config PINCTRL_MESON_S4
tristate "Meson s4 Soc pinctrl driver"
depends on ARM64
select PINCTRL_MESON_AXG_PMX
- default y
+ default ARCH_MESON
config PINCTRL_AMLOGIC_A4
bool "AMLOGIC pincontrol"
depends on ARM64
- default y
+ default ARCH_MESON
help
This is the driver for the pin controller found on Amlogic SoCs.
@@ -82,12 +82,12 @@ config PINCTRL_AMLOGIC_C3
tristate "Amlogic C3 SoC pinctrl driver"
depends on ARM64
select PINCTRL_MESON_AXG_PMX
- default y
+ default ARCH_MESON
config PINCTRL_AMLOGIC_T7
tristate "Amlogic T7 SoC pinctrl driver"
depends on ARM64
select PINCTRL_MESON_AXG_PMX
- default y
+ default ARCH_MESON
endif
diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
index ee7bbc72f9b3..385cc619df13 100644
--- a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
+++ b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
@@ -596,20 +596,6 @@ static int aml_get_group_pins(struct pinctrl_dev *pctldev,
return 0;
}
-static inline const struct aml_pctl_group *
- aml_pctl_find_group_by_name(const struct aml_pinctrl *info,
- const char *name)
-{
- int i;
-
- for (i = 0; i < info->ngroups; i++) {
- if (!strcmp(info->groups[i].name, name))
- return &info->groups[i];
- }
-
- return NULL;
-}
-
static void aml_pin_dbg_show(struct pinctrl_dev *pcdev, struct seq_file *s,
unsigned int offset)
{
@@ -806,15 +792,15 @@ static int aml_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
value ? BIT(bit) : 0);
}
-static void aml_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+static int aml_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
struct aml_gpio_bank *bank = gpiochip_get_data(chip);
unsigned int bit, reg;
aml_gpio_calc_reg_and_bit(bank, AML_REG_OUT, gpio, &reg, &bit);
- regmap_update_bits(bank->reg_gpio, reg, BIT(bit),
- value ? BIT(bit) : 0);
+ return regmap_update_bits(bank->reg_gpio, reg, BIT(bit),
+ value ? BIT(bit) : 0);
}
static int aml_gpio_get(struct gpio_chip *chip, unsigned int gpio)
@@ -832,7 +818,7 @@ static const struct gpio_chip aml_gpio_template = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.set_config = gpiochip_generic_config,
- .set = aml_gpio_set,
+ .set_rv = aml_gpio_set,
.get = aml_gpio_get,
.direction_input = aml_gpio_direction_input,
.direction_output = aml_gpio_direction_output,
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index e5a32a0532ee..f5be61f2ede4 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -580,9 +580,9 @@ static int meson_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
gpio, value);
}
-static void meson_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
+static int meson_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
- meson_pinconf_set_drive(gpiochip_get_data(chip), gpio, value);
+ return meson_pinconf_set_drive(gpiochip_get_data(chip), gpio, value);
}
static int meson_gpio_get(struct gpio_chip *chip, unsigned gpio)
@@ -616,7 +616,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc)
pc->chip.direction_input = meson_gpio_direction_input;
pc->chip.direction_output = meson_gpio_direction_output;
pc->chip.get = meson_gpio_get;
- pc->chip.set = meson_gpio_set;
+ pc->chip.set_rv = meson_gpio_set;
pc->chip.base = -1;
pc->chip.ngpio = pc->data->num_pins;
pc->chip.can_sleep = false;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 335744ac8310..a6b106984e12 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -358,9 +358,7 @@ static int armada_37xx_pmx_set_by_name(struct pinctrl_dev *pctldev,
val = grp->val[func];
- regmap_update_bits(info->regmap, reg, mask, val);
-
- return 0;
+ return regmap_update_bits(info->regmap, reg, mask, val);
}
static int armada_37xx_pmx_set(struct pinctrl_dev *pctldev,
@@ -402,10 +400,13 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip,
struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
unsigned int reg = OUTPUT_EN;
unsigned int val, mask;
+ int ret;
armada_37xx_update_reg(&reg, &offset);
mask = BIT(offset);
- regmap_read(info->regmap, reg, &val);
+ ret = regmap_read(info->regmap, reg, &val);
+ if (ret)
+ return ret;
if (val & mask)
return GPIO_LINE_DIRECTION_OUT;
@@ -417,22 +418,22 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
- unsigned int reg = OUTPUT_EN;
+ unsigned int en_offset = offset;
+ unsigned int reg = OUTPUT_VAL;
unsigned int mask, val, ret;
armada_37xx_update_reg(&reg, &offset);
mask = BIT(offset);
+ val = value ? mask : 0;
- ret = regmap_update_bits(info->regmap, reg, mask, mask);
-
+ ret = regmap_update_bits(info->regmap, reg, mask, val);
if (ret)
return ret;
- reg = OUTPUT_VAL;
- val = value ? mask : 0;
- regmap_update_bits(info->regmap, reg, mask, val);
+ reg = OUTPUT_EN;
+ armada_37xx_update_reg(&reg, &en_offset);
- return 0;
+ return regmap_update_bits(info->regmap, reg, mask, mask);
}
static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -440,17 +441,20 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
unsigned int reg = INPUT_VAL;
unsigned int val, mask;
+ int ret;
armada_37xx_update_reg(&reg, &offset);
mask = BIT(offset);
- regmap_read(info->regmap, reg, &val);
+ ret = regmap_read(info->regmap, reg, &val);
+ if (ret)
+ return ret;
return (val & mask) != 0;
}
-static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
unsigned int reg = OUTPUT_VAL;
@@ -460,7 +464,7 @@ static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
mask = BIT(offset);
val = value ? mask : 0;
- regmap_update_bits(info->regmap, reg, mask, val);
+ return regmap_update_bits(info->regmap, reg, mask, val);
}
static int armada_37xx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -469,16 +473,17 @@ static int armada_37xx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
{
struct armada_37xx_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
struct gpio_chip *chip = range->gc;
+ int ret;
dev_dbg(info->dev, "gpio_direction for pin %u as %s-%d to %s\n",
offset, range->name, offset, input ? "input" : "output");
if (input)
- armada_37xx_gpio_direction_input(chip, offset);
+ ret = armada_37xx_gpio_direction_input(chip, offset);
else
- armada_37xx_gpio_direction_output(chip, offset, 0);
+ ret = armada_37xx_gpio_direction_output(chip, offset, 0);
- return 0;
+ return ret;
}
static int armada_37xx_gpio_request_enable(struct pinctrl_dev *pctldev,
@@ -513,7 +518,7 @@ static const struct pinmux_ops armada_37xx_pmx_ops = {
static const struct gpio_chip armada_37xx_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
- .set = armada_37xx_gpio_set,
+ .set_rv = armada_37xx_gpio_set,
.get = armada_37xx_gpio_get,
.get_direction = armada_37xx_gpio_get_direction,
.direction_input = armada_37xx_gpio_direction_input,
diff --git a/drivers/pinctrl/nomadik/Kconfig b/drivers/pinctrl/nomadik/Kconfig
index aafecf348670..1b4fe2a4c302 100644
--- a/drivers/pinctrl/nomadik/Kconfig
+++ b/drivers/pinctrl/nomadik/Kconfig
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-if ARCH_U8500
+if (ARCH_U8500 || COMPILE_TEST)
config PINCTRL_ABX500
bool "ST-Ericsson ABx500 family Mixed Signal Circuit gpio functions"
@@ -10,11 +10,11 @@ config PINCTRL_ABX500
config PINCTRL_AB8500
bool "AB8500 pin controller driver"
- depends on PINCTRL_ABX500 && ARCH_U8500
+ depends on PINCTRL_ABX500 && (ARCH_U8500 || COMPILE_TEST)
config PINCTRL_AB8505
bool "AB8505 pin controller driver"
- depends on PINCTRL_ABX500 && ARCH_U8500
+ depends on PINCTRL_ABX500 && (ARCH_U8500 || COMPILE_TEST)
endif
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 8cd4ba5cf0bd..2f55f83127cf 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -167,14 +167,10 @@ out:
return bit;
}
-static void abx500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int abx500_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
- struct abx500_pinctrl *pct = gpiochip_get_data(chip);
- int ret;
-
- ret = abx500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
- if (ret < 0)
- dev_err(pct->dev, "%s write failed (%d)\n", __func__, ret);
+ return abx500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
}
static int abx500_gpio_direction_output(struct gpio_chip *chip,
@@ -540,7 +536,7 @@ static const struct gpio_chip abx500gpio_chip = {
.direction_input = abx500_gpio_direction_input,
.get = abx500_gpio_get,
.direction_output = abx500_gpio_direction_output,
- .set = abx500_gpio_set,
+ .set_rv = abx500_gpio_set,
.to_irq = abx500_gpio_to_irq,
.dbg_show = abx500_gpio_dbg_show,
};
diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h
index a171195b3615..e1ae71610526 100644
--- a/drivers/pinctrl/pinconf.h
+++ b/drivers/pinctrl/pinconf.h
@@ -142,4 +142,21 @@ int pinconf_generic_parse_dt_config(struct device_node *np,
int pinconf_generic_parse_dt_pinmux(struct device_node *np, struct device *dev,
unsigned int **pid, unsigned int **pmux,
unsigned int *npins);
+#else
+static inline int
+pinconf_generic_parse_dt_config(struct device_node *np,
+ struct pinctrl_dev *pctldev,
+ unsigned long **configs,
+ unsigned int *nconfigs)
+{
+ return -ENOTSUPP;
+}
+
+static inline int
+pinconf_generic_parse_dt_pinmux(struct device_node *np, struct device *dev,
+ unsigned int **pid, unsigned int **pmux,
+ unsigned int *npins)
+{
+ return -ENOTSUPP;
+}
#endif
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 1d7fdcdec4c8..5cf3db6d78b7 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -37,6 +37,10 @@
#include "pinctrl-utils.h"
#include "pinctrl-amd.h"
+#ifdef CONFIG_SUSPEND
+static struct amd_gpio *pinctrl_dev;
+#endif
+
static int amd_gpio_get_direction(struct gpio_chip *gc, unsigned offset)
{
unsigned long flags;
@@ -101,7 +105,8 @@ static int amd_gpio_get_value(struct gpio_chip *gc, unsigned offset)
return !!(pin_reg & BIT(PIN_STS_OFF));
}
-static void amd_gpio_set_value(struct gpio_chip *gc, unsigned offset, int value)
+static int amd_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
u32 pin_reg;
unsigned long flags;
@@ -115,6 +120,8 @@ static void amd_gpio_set_value(struct gpio_chip *gc, unsigned offset, int value)
pin_reg &= ~BIT(OUTPUT_VALUE_OFF);
writel(pin_reg, gpio_dev->base + offset * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+
+ return 0;
}
static int amd_gpio_set_debounce(struct amd_gpio *gpio_dev, unsigned int offset,
@@ -890,6 +897,44 @@ static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
}
}
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_ACPI)
+static void amd_gpio_check_pending(void)
+{
+ struct amd_gpio *gpio_dev = pinctrl_dev;
+ struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ int i;
+
+ if (!pm_debug_messages_on)
+ return;
+
+ for (i = 0; i < desc->npins; i++) {
+ int pin = desc->pins[i].number;
+ u32 tmp;
+
+ tmp = readl(gpio_dev->base + pin * 4);
+ if (tmp & PIN_IRQ_PENDING)
+ pm_pr_dbg("%s: GPIO %d is active: 0x%x.\n", __func__, pin, tmp);
+ }
+}
+
+static struct acpi_s2idle_dev_ops pinctrl_amd_s2idle_dev_ops = {
+ .check = amd_gpio_check_pending,
+};
+
+static void amd_gpio_register_s2idle_ops(void)
+{
+ acpi_register_lps0_dev(&pinctrl_amd_s2idle_dev_ops);
+}
+
+static void amd_gpio_unregister_s2idle_ops(void)
+{
+ acpi_unregister_lps0_dev(&pinctrl_amd_s2idle_dev_ops);
+}
+#else
+static inline void amd_gpio_register_s2idle_ops(void) {}
+static inline void amd_gpio_unregister_s2idle_ops(void) {}
+#endif
+
#ifdef CONFIG_PM_SLEEP
static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
{
@@ -942,6 +987,9 @@ static int amd_gpio_suspend_hibernate_common(struct device *dev, bool is_suspend
static int amd_gpio_suspend(struct device *dev)
{
+#ifdef CONFIG_SUSPEND
+ pinctrl_dev = dev_get_drvdata(dev);
+#endif
return amd_gpio_suspend_hibernate_common(dev, true);
}
@@ -1115,7 +1163,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
if (gpio_dev->irq < 0)
return gpio_dev->irq;
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_SUSPEND
gpio_dev->saved_regs = devm_kcalloc(&pdev->dev, amd_pinctrl_desc.npins,
sizeof(*gpio_dev->saved_regs),
GFP_KERNEL);
@@ -1128,7 +1176,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
gpio_dev->gc.direction_input = amd_gpio_direction_input;
gpio_dev->gc.direction_output = amd_gpio_direction_output;
gpio_dev->gc.get = amd_gpio_get_value;
- gpio_dev->gc.set = amd_gpio_set_value;
+ gpio_dev->gc.set_rv = amd_gpio_set_value;
gpio_dev->gc.set_config = amd_gpio_set_config;
gpio_dev->gc.dbg_show = amd_gpio_dbg_show;
@@ -1181,6 +1229,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gpio_dev);
acpi_register_wakeup_handler(gpio_dev->irq, amd_gpio_check_wake, gpio_dev);
+ amd_gpio_register_s2idle_ops();
dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
return ret;
@@ -1199,6 +1248,7 @@ static void amd_gpio_remove(struct platform_device *pdev)
gpiochip_remove(&gpio_dev->gc);
acpi_unregister_wakeup_handler(amd_gpio_check_wake, gpio_dev);
+ amd_gpio_unregister_s2idle_ops();
}
#ifdef CONFIG_ACPI
diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c
index f861e63f4115..0f551d67d482 100644
--- a/drivers/pinctrl/pinctrl-apple-gpio.c
+++ b/drivers/pinctrl/pinctrl-apple-gpio.c
@@ -66,7 +66,7 @@ struct apple_gpio_pinctrl {
#define REG_GPIOx_DRIVE_STRENGTH1 GENMASK(23, 22)
#define REG_IRQ(g, x) (0x800 + 0x40 * (g) + 4 * ((x) >> 5))
-struct regmap_config regmap_config = {
+static const struct regmap_config regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -79,13 +79,13 @@ struct regmap_config regmap_config = {
/* No locking needed to mask/unmask IRQs as the interrupt mode is per pin-register. */
static void apple_gpio_set_reg(struct apple_gpio_pinctrl *pctl,
- unsigned int pin, u32 mask, u32 value)
+ unsigned int pin, u32 mask, u32 value)
{
regmap_update_bits(pctl->map, REG_GPIO(pin), mask, value);
}
static u32 apple_gpio_get_reg(struct apple_gpio_pinctrl *pctl,
- unsigned int pin)
+ unsigned int pin)
{
int ret;
u32 val;
@@ -100,9 +100,9 @@ static u32 apple_gpio_get_reg(struct apple_gpio_pinctrl *pctl,
/* Pin controller functions */
static int apple_gpio_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *node,
- struct pinctrl_map **map,
- unsigned *num_maps)
+ struct device_node *node,
+ struct pinctrl_map **map,
+ unsigned *num_maps)
{
unsigned reserved_maps;
struct apple_gpio_pinctrl *pctl;
@@ -147,8 +147,8 @@ static int apple_gpio_dt_node_to_map(struct pinctrl_dev *pctldev,
group_name = pinctrl_generic_get_group_name(pctldev, pin);
function_name = pinmux_generic_get_function_name(pctl->pctldev, func);
ret = pinctrl_utils_add_map_mux(pctl->pctldev, map,
- &reserved_maps, num_maps,
- group_name, function_name);
+ &reserved_maps, num_maps,
+ group_name, function_name);
if (ret)
goto free_map;
}
@@ -171,7 +171,7 @@ static const struct pinctrl_ops apple_gpio_pinctrl_ops = {
/* Pin multiplexer functions */
static int apple_gpio_pinmux_set(struct pinctrl_dev *pctldev, unsigned func,
- unsigned group)
+ unsigned group)
{
struct apple_gpio_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
@@ -237,7 +237,7 @@ static int apple_gpio_direction_input(struct gpio_chip *chip, unsigned int offse
}
static int apple_gpio_direction_output(struct gpio_chip *chip,
- unsigned int offset, int value)
+ unsigned int offset, int value)
{
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip);
@@ -282,7 +282,7 @@ static void apple_gpio_irq_mask(struct irq_data *data)
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(gc);
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
- FIELD_PREP(REG_GPIOx_MODE, REG_GPIOx_IN_IRQ_OFF));
+ FIELD_PREP(REG_GPIOx_MODE, REG_GPIOx_IN_IRQ_OFF));
gpiochip_disable_irq(gc, data->hwirq);
}
@@ -294,7 +294,7 @@ static void apple_gpio_irq_unmask(struct irq_data *data)
gpiochip_enable_irq(gc, data->hwirq);
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
- FIELD_PREP(REG_GPIOx_MODE, irqtype));
+ FIELD_PREP(REG_GPIOx_MODE, irqtype));
}
static unsigned int apple_gpio_irq_startup(struct irq_data *data)
@@ -303,7 +303,7 @@ static unsigned int apple_gpio_irq_startup(struct irq_data *data)
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip);
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_GRP,
- FIELD_PREP(REG_GPIOx_GRP, 0));
+ FIELD_PREP(REG_GPIOx_GRP, 0));
apple_gpio_direction_input(chip, data->hwirq);
apple_gpio_irq_unmask(data);
@@ -320,7 +320,7 @@ static int apple_gpio_irq_set_type(struct irq_data *data, unsigned int type)
return -EINVAL;
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
- FIELD_PREP(REG_GPIOx_MODE, irqtype));
+ FIELD_PREP(REG_GPIOx_MODE, irqtype));
if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(data, handle_level_irq);
@@ -429,7 +429,7 @@ static int apple_gpio_pinctrl_probe(struct platform_device *pdev)
unsigned int npins;
const char **pin_names;
unsigned int *pin_nums;
- static const char* pinmux_functions[] = {
+ static const char *pinmux_functions[] = {
"gpio", "periph1", "periph2", "periph3"
};
unsigned int i, nirqs = 0;
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 8b01d312305a..ca8a54a43ff5 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -390,7 +390,7 @@ static int atmel_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
-static void atmel_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
+static int atmel_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
struct atmel_pin *pin = atmel_pioctrl->pins[offset];
@@ -398,10 +398,12 @@ static void atmel_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
atmel_gpio_write(atmel_pioctrl, pin->bank,
val ? ATMEL_PIO_SODR : ATMEL_PIO_CODR,
BIT(pin->line));
+
+ return 0;
}
-static void atmel_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int atmel_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
unsigned int bank;
@@ -431,6 +433,8 @@ static void atmel_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
bits[word] >>= ATMEL_PIO_NPINS_PER_BANK;
#endif
}
+
+ return 0;
}
static struct gpio_chip atmel_gpio_chip = {
@@ -438,8 +442,8 @@ static struct gpio_chip atmel_gpio_chip = {
.get = atmel_gpio_get,
.get_multiple = atmel_gpio_get_multiple,
.direction_output = atmel_gpio_direction_output,
- .set = atmel_gpio_set,
- .set_multiple = atmel_gpio_set_multiple,
+ .set_rv = atmel_gpio_set,
+ .set_multiple_rv = atmel_gpio_set_multiple,
.to_irq = atmel_gpio_to_irq,
.base = 0,
};
@@ -609,8 +613,10 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
if (ret)
goto exit;
- pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps,
+ ret = pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps,
group, func);
+ if (ret)
+ goto exit;
if (num_configs) {
ret = pinctrl_utils_add_map_configs(pctldev, map,
@@ -1206,7 +1212,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
dev_dbg(dev, "bank %i: irq=%d\n", i, ret);
}
- atmel_pioctrl->irq_domain = irq_domain_add_linear(dev->of_node,
+ atmel_pioctrl->irq_domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node),
atmel_pioctrl->gpio_chip->ngpio,
&irq_domain_simple_ops, NULL);
if (!atmel_pioctrl->irq_domain)
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 93ab277d9943..6c2727bd55bc 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1449,18 +1449,19 @@ static int at91_gpio_get(struct gpio_chip *chip, unsigned offset)
return (pdsr & mask) != 0;
}
-static void at91_gpio_set(struct gpio_chip *chip, unsigned offset,
- int val)
+static int at91_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
struct at91_gpio_chip *at91_gpio = gpiochip_get_data(chip);
void __iomem *pio = at91_gpio->regbase;
unsigned mask = 1 << offset;
writel_relaxed(mask, pio + (val ? PIO_SODR : PIO_CODR));
+
+ return 0;
}
-static void at91_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
+static int at91_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
{
struct at91_gpio_chip *at91_gpio = gpiochip_get_data(chip);
void __iomem *pio = at91_gpio->regbase;
@@ -1472,6 +1473,8 @@ static void at91_gpio_set_multiple(struct gpio_chip *chip,
writel_relaxed(set_mask, pio + PIO_SODR);
writel_relaxed(clear_mask, pio + PIO_CODR);
+
+ return 0;
}
static int at91_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
@@ -1798,8 +1801,8 @@ static const struct gpio_chip at91_gpio_template = {
.direction_input = at91_gpio_direction_input,
.get = at91_gpio_get,
.direction_output = at91_gpio_direction_output,
- .set = at91_gpio_set,
- .set_multiple = at91_gpio_set_multiple,
+ .set_rv = at91_gpio_set,
+ .set_multiple_rv = at91_gpio_set_multiple,
.dbg_show = at91_gpio_dbg_show,
.can_sleep = false,
.ngpio = MAX_NB_GPIO_PER_BANK,
@@ -1819,12 +1822,16 @@ static int at91_gpio_probe(struct platform_device *pdev)
struct at91_gpio_chip *at91_chip = NULL;
struct gpio_chip *chip;
struct pinctrl_gpio_range *range;
+ int alias_idx;
int ret = 0;
int irq, i;
- int alias_idx = of_alias_get_id(np, "gpio");
uint32_t ngpio;
char **names;
+ alias_idx = of_alias_get_id(np, "gpio");
+ if (alias_idx < 0)
+ return alias_idx;
+
BUG_ON(alias_idx >= ARRAY_SIZE(gpio_chips));
if (gpio_chips[alias_idx])
return dev_err_probe(dev, -EBUSY, "%d slot is occupied.\n", alias_idx);
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
index 2b4805e74eed..fff408b60c4a 100644
--- a/drivers/pinctrl/pinctrl-axp209.c
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -192,34 +192,30 @@ static int axp20x_gpio_get_direction(struct gpio_chip *chip,
static int axp20x_gpio_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
- chip->set(chip, offset, value);
-
- return 0;
+ return chip->set_rv(chip, offset, value);
}
-static void axp20x_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int axp20x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct axp20x_pctl *pctl = gpiochip_get_data(chip);
int reg;
/* AXP209 has GPIO3 status sharing the settings register */
- if (offset == 3) {
- regmap_update_bits(pctl->regmap, AXP20X_GPIO3_CTRL,
- AXP20X_GPIO3_FUNCTIONS,
- value ? AXP20X_GPIO3_FUNCTION_OUT_HIGH :
- AXP20X_GPIO3_FUNCTION_OUT_LOW);
- return;
- }
+ if (offset == 3)
+ return regmap_update_bits(pctl->regmap, AXP20X_GPIO3_CTRL,
+ AXP20X_GPIO3_FUNCTIONS,
+ value ?
+ AXP20X_GPIO3_FUNCTION_OUT_HIGH :
+ AXP20X_GPIO3_FUNCTION_OUT_LOW);
reg = axp20x_gpio_get_reg(offset);
if (reg < 0)
- return;
+ return reg;
- regmap_update_bits(pctl->regmap, reg,
- AXP20X_GPIO_FUNCTIONS,
- value ? AXP20X_GPIO_FUNCTION_OUT_HIGH :
- AXP20X_GPIO_FUNCTION_OUT_LOW);
+ return regmap_update_bits(pctl->regmap, reg, AXP20X_GPIO_FUNCTIONS,
+ value ? AXP20X_GPIO_FUNCTION_OUT_HIGH :
+ AXP20X_GPIO_FUNCTION_OUT_LOW);
}
static int axp20x_pmx_set(struct pinctrl_dev *pctldev, unsigned int offset,
@@ -229,12 +225,11 @@ static int axp20x_pmx_set(struct pinctrl_dev *pctldev, unsigned int offset,
int reg;
/* AXP209 GPIO3 settings have a different layout */
- if (offset == 3) {
+ if (offset == 3)
return regmap_update_bits(pctl->regmap, AXP20X_GPIO3_CTRL,
AXP20X_GPIO3_FUNCTIONS,
config == AXP20X_MUX_GPIO_OUT ? AXP20X_GPIO3_FUNCTION_OUT_LOW :
AXP20X_GPIO3_FUNCTION_INPUT);
- }
reg = axp20x_gpio_get_reg(offset);
if (reg < 0)
@@ -468,7 +463,7 @@ static int axp20x_pctl_probe(struct platform_device *pdev)
pctl->chip.owner = THIS_MODULE;
pctl->chip.get = axp20x_gpio_get;
pctl->chip.get_direction = axp20x_gpio_get_direction;
- pctl->chip.set = axp20x_gpio_set;
+ pctl->chip.set_rv = axp20x_gpio_set;
pctl->chip.direction_input = pinctrl_gpio_direction_input;
pctl->chip.direction_output = axp20x_gpio_output;
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index 3cfbcaee9e65..8a2fd632bdd4 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -742,14 +742,15 @@ static int cy8c95x0_gpio_get_value(struct gpio_chip *gc, unsigned int off)
return reg_val ? 1 : 0;
}
-static void cy8c95x0_gpio_set_value(struct gpio_chip *gc, unsigned int off,
- int val)
+static int cy8c95x0_gpio_set_value(struct gpio_chip *gc, unsigned int off,
+ int val)
{
struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
u8 port = cypress_get_port(chip, off);
u8 bit = cypress_get_pin_mask(chip, off);
- cy8c95x0_regmap_write_bits(chip, CY8C95X0_OUTPUT, port, bit, val ? bit : 0);
+ return cy8c95x0_regmap_write_bits(chip, CY8C95X0_OUTPUT, port, bit,
+ val ? bit : 0);
}
static int cy8c95x0_gpio_get_direction(struct gpio_chip *gc, unsigned int off)
@@ -908,12 +909,12 @@ static int cy8c95x0_gpio_get_multiple(struct gpio_chip *gc,
return cy8c95x0_read_regs_mask(chip, CY8C95X0_INPUT, bits, mask);
}
-static void cy8c95x0_gpio_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int cy8c95x0_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
- cy8c95x0_write_regs_mask(chip, CY8C95X0_OUTPUT, bits, mask);
+ return cy8c95x0_write_regs_mask(chip, CY8C95X0_OUTPUT, bits, mask);
}
static int cy8c95x0_add_pin_ranges(struct gpio_chip *gc)
@@ -938,10 +939,10 @@ static int cy8c95x0_setup_gpiochip(struct cy8c95x0_pinctrl *chip)
gc->direction_input = cy8c95x0_gpio_direction_input;
gc->direction_output = cy8c95x0_gpio_direction_output;
gc->get = cy8c95x0_gpio_get_value;
- gc->set = cy8c95x0_gpio_set_value;
+ gc->set_rv = cy8c95x0_gpio_set_value;
gc->get_direction = cy8c95x0_gpio_get_direction;
gc->get_multiple = cy8c95x0_gpio_get_multiple;
- gc->set_multiple = cy8c95x0_gpio_set_multiple;
+ gc->set_multiple_rv = cy8c95x0_gpio_set_multiple;
gc->set_config = gpiochip_generic_config;
gc->can_sleep = true;
gc->add_pin_ranges = cy8c95x0_add_pin_ranges;
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index a9e48eac15f6..3c660471ec69 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -3800,12 +3800,14 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(irq_chip, desc);
}
-static void ingenic_gpio_set(struct gpio_chip *gc,
- unsigned int offset, int value)
+static int ingenic_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
ingenic_gpio_set_value(jzgc, offset, value);
+
+ return 0;
}
static int ingenic_gpio_get(struct gpio_chip *gc, unsigned int offset)
@@ -4449,7 +4451,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->gc.fwnode = fwnode;
jzgc->gc.owner = THIS_MODULE;
- jzgc->gc.set = ingenic_gpio_set;
+ jzgc->gc.set_rv = ingenic_gpio_set;
jzgc->gc.get = ingenic_gpio_get;
jzgc->gc.direction_input = pinctrl_gpio_direction_input;
jzgc->gc.direction_output = ingenic_gpio_direction_output;
diff --git a/drivers/pinctrl/pinctrl-keembay.c b/drivers/pinctrl/pinctrl-keembay.c
index b693f4787044..0d7cc8280ea2 100644
--- a/drivers/pinctrl/pinctrl-keembay.c
+++ b/drivers/pinctrl/pinctrl-keembay.c
@@ -1268,7 +1268,7 @@ static void keembay_gpio_irq_handler(struct irq_desc *desc)
for_each_set_clump8(bit, clump, &reg, BITS_PER_TYPE(typeof(reg))) {
pin = clump & ~KEEMBAY_GPIO_IRQ_ENABLE;
val = keembay_read_pin(kpc->base0 + KEEMBAY_GPIO_DATA_IN, pin);
- kmb_irq = irq_linear_revmap(gc->irq.domain, pin);
+ kmb_irq = irq_find_mapping(gc->irq.domain, pin);
/* Checks if the interrupt is enabled */
if (val && (clump & KEEMBAY_GPIO_IRQ_ENABLE))
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 4d1f41488017..c2f4b16f42d2 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -636,6 +636,14 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ /*
+ * Reset the chip - we don't really know what state it's in, so reset
+ * all pins to input first to prevent surprises.
+ */
+ ret = mcp_write(mcp, MCP_IODIR, mcp->chip.ngpio == 16 ? 0xFFFF : 0xFF);
+ if (ret < 0)
+ return ret;
+
/* verify MCP_IOCON.SEQOP = 0, so sequential reads work,
* and MCP_IOCON.HAEN = 1, so we work with all chips.
*/
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index a60db93b61b1..88c2f14cfc6b 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -555,10 +555,10 @@ static int microchip_sgpio_get_direction(struct gpio_chip *gc, unsigned int gpio
return bank->is_input ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
}
-static void microchip_sgpio_set_value(struct gpio_chip *gc,
- unsigned int gpio, int value)
+static int microchip_sgpio_set_value(struct gpio_chip *gc, unsigned int gpio,
+ int value)
{
- microchip_sgpio_direction_output(gc, gpio, value);
+ return microchip_sgpio_direction_output(gc, gpio, value);
}
static int microchip_sgpio_get_value(struct gpio_chip *gc, unsigned int gpio)
@@ -858,7 +858,7 @@ static int microchip_sgpio_register_bank(struct device *dev,
gc->direction_input = microchip_sgpio_direction_input;
gc->direction_output = microchip_sgpio_direction_output;
gc->get = microchip_sgpio_get_value;
- gc->set = microchip_sgpio_set_value;
+ gc->set_rv = microchip_sgpio_set_value;
gc->request = gpiochip_generic_request;
gc->free = gpiochip_generic_free;
gc->of_xlate = microchip_sgpio_of_xlate;
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 329d54b11529..fbb3d43746bb 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -1950,17 +1950,18 @@ static int ocelot_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset % 32));
}
-static void ocelot_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int ocelot_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ocelot_pinctrl *info = gpiochip_get_data(chip);
if (value)
- regmap_write(info->map, REG(OCELOT_GPIO_OUT_SET, info, offset),
- BIT(offset % 32));
- else
- regmap_write(info->map, REG(OCELOT_GPIO_OUT_CLR, info, offset),
- BIT(offset % 32));
+ return regmap_write(info->map,
+ REG(OCELOT_GPIO_OUT_SET, info, offset),
+ BIT(offset % 32));
+
+ return regmap_write(info->map, REG(OCELOT_GPIO_OUT_CLR, info, offset),
+ BIT(offset % 32));
}
static int ocelot_gpio_get_direction(struct gpio_chip *chip,
@@ -1996,7 +1997,7 @@ static int ocelot_gpio_direction_output(struct gpio_chip *chip,
static const struct gpio_chip ocelot_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
- .set = ocelot_gpio_set,
+ .set_rv = ocelot_gpio_set,
.get = ocelot_gpio_get,
.get_direction = ocelot_gpio_get_direction,
.direction_input = pinctrl_gpio_direction_input,
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 8c50e0091b32..e7bf60960961 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1186,12 +1186,14 @@ static int pistachio_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(gpio_readl(bank, reg) & BIT(offset));
}
-static void pistachio_gpio_set(struct gpio_chip *chip, unsigned offset,
- int value)
+static int pistachio_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct pistachio_gpio_bank *bank = gpiochip_get_data(chip);
gpio_mask_writel(bank, GPIO_OUTPUT, offset, !!value);
+
+ return 0;
}
static int pistachio_gpio_direction_input(struct gpio_chip *chip,
@@ -1326,7 +1328,7 @@ static void pistachio_gpio_irq_handler(struct irq_desc *desc)
.direction_input = pistachio_gpio_direction_input, \
.direction_output = pistachio_gpio_direction_output, \
.get = pistachio_gpio_get, \
- .set = pistachio_gpio_set, \
+ .set_rv = pistachio_gpio_set, \
.base = _pin_base, \
.ngpio = _npins, \
}, \
diff --git a/drivers/pinctrl/pinctrl-rk805.c b/drivers/pinctrl/pinctrl-rk805.c
index c42f1bf93404..fc0e330b1d11 100644
--- a/drivers/pinctrl/pinctrl-rk805.c
+++ b/drivers/pinctrl/pinctrl-rk805.c
@@ -325,26 +325,26 @@ static int rk805_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & pci->pin_cfg[offset].val_msk);
}
-static void rk805_gpio_set(struct gpio_chip *chip,
- unsigned int offset,
- int value)
+static int rk805_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct rk805_pctrl_info *pci = gpiochip_get_data(chip);
- int ret;
- ret = regmap_update_bits(pci->rk808->regmap,
- pci->pin_cfg[offset].reg,
- pci->pin_cfg[offset].val_msk,
- value ? pci->pin_cfg[offset].val_msk : 0);
- if (ret)
- dev_err(pci->dev, "set gpio%d value %d failed\n",
- offset, value);
+ return regmap_update_bits(pci->rk808->regmap,
+ pci->pin_cfg[offset].reg,
+ pci->pin_cfg[offset].val_msk,
+ value ? pci->pin_cfg[offset].val_msk : 0);
}
static int rk805_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
- rk805_gpio_set(chip, offset, value);
+ int ret;
+
+ ret = rk805_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
+
return pinctrl_gpio_direction_output(chip, offset);
}
@@ -378,7 +378,7 @@ static const struct gpio_chip rk805_gpio_chip = {
.free = gpiochip_generic_free,
.get_direction = rk805_gpio_get_direction,
.get = rk805_gpio_get,
- .set = rk805_gpio_set,
+ .set_rv = rk805_gpio_set,
.direction_input = pinctrl_gpio_direction_input,
.direction_output = rk805_gpio_direction_output,
.can_sleep = true,
diff --git a/drivers/pinctrl/pinctrl-scmi.c b/drivers/pinctrl/pinctrl-scmi.c
index df4bbcd7d1d5..383681041e4c 100644
--- a/drivers/pinctrl/pinctrl-scmi.c
+++ b/drivers/pinctrl/pinctrl-scmi.c
@@ -507,6 +507,7 @@ static int pinctrl_scmi_get_pins(struct scmi_pinctrl *pmx,
static const char * const scmi_pinctrl_blocklist[] = {
"fsl,imx95",
+ "fsl,imx94",
NULL
};
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 5be14dc979e2..5cda6201b60f 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1611,15 +1611,16 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs,
/*
* We can use the register offset as the hardirq
- * number as irq_domain_add_simple maps them lazily.
+ * number as irq_domain_create_simple maps them lazily.
* This way we can easily support more than one
* interrupt per function if needed.
*/
num_irqs = pcs->size;
- pcs->domain = irq_domain_add_simple(np, num_irqs, 0,
- &pcs_irqdomain_ops,
- pcs_soc);
+ pcs->domain = irq_domain_create_simple(of_fwnode_handle(np),
+ num_irqs, 0,
+ &pcs_irqdomain_ops,
+ pcs_soc);
if (!pcs->domain) {
irq_set_chained_handler(pcs_soc->irq, NULL);
return -EINVAL;
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index fe2d52e434db..8a2ef74862d3 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -374,11 +374,6 @@ static struct st_pio_control *st_get_pio_control(
}
/* Low level functions.. */
-static inline int st_gpio_bank(int gpio)
-{
- return gpio/ST_GPIO_PINS_PER_BANK;
-}
-
static inline int st_gpio_pin(int gpio)
{
return gpio%ST_GPIO_PINS_PER_BANK;
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index aae01120dc52..f4fdcaa043e6 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -115,14 +115,14 @@ static int stmfx_gpio_get(struct gpio_chip *gc, unsigned int offset)
return ret ? ret : !!(value & mask);
}
-static void stmfx_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+static int stmfx_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
u32 reg = value ? STMFX_REG_GPO_SET : STMFX_REG_GPO_CLR;
u32 mask = get_mask(offset);
- regmap_write_bits(pctl->stmfx->map, reg + get_reg(offset),
- mask, mask);
+ return regmap_write_bits(pctl->stmfx->map, reg + get_reg(offset),
+ mask, mask);
}
static int stmfx_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
@@ -161,8 +161,11 @@ static int stmfx_gpio_direction_output(struct gpio_chip *gc,
struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
u32 reg = STMFX_REG_GPIO_DIR + get_reg(offset);
u32 mask = get_mask(offset);
+ int ret;
- stmfx_gpio_set(gc, offset, value);
+ ret = stmfx_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
return regmap_write_bits(pctl->stmfx->map, reg, mask, mask);
}
@@ -694,7 +697,7 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
pctl->gpio_chip.direction_input = stmfx_gpio_direction_input;
pctl->gpio_chip.direction_output = stmfx_gpio_direction_output;
pctl->gpio_chip.get = stmfx_gpio_get;
- pctl->gpio_chip.set = stmfx_gpio_set;
+ pctl->gpio_chip.set_rv = stmfx_gpio_set;
pctl->gpio_chip.set_config = gpiochip_generic_config;
pctl->gpio_chip.base = -1;
pctl->gpio_chip.ngpio = pctl->pctl_desc.npins;
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index 98262b8ce43a..d3a12c1c0de2 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -432,24 +432,25 @@ static int sx150x_gpio_oscio_set(struct sx150x_pinctrl *pctl,
(value ? 0x1f : 0x10));
}
-static void sx150x_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int sx150x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct sx150x_pinctrl *pctl = gpiochip_get_data(chip);
if (sx150x_pin_is_oscio(pctl, offset))
- sx150x_gpio_oscio_set(pctl, value);
- else
- __sx150x_gpio_set(pctl, offset, value);
+ return sx150x_gpio_oscio_set(pctl, value);
+
+ return __sx150x_gpio_set(pctl, offset, value);
}
-static void sx150x_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask,
- unsigned long *bits)
+static int sx150x_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits)
{
struct sx150x_pinctrl *pctl = gpiochip_get_data(chip);
- regmap_write_bits(pctl->regmap, pctl->data->reg_data, *mask, *bits);
+ return regmap_write_bits(pctl->regmap, pctl->data->reg_data, *mask,
+ *bits);
}
static int sx150x_gpio_direction_input(struct gpio_chip *chip,
@@ -1175,7 +1176,7 @@ static int sx150x_probe(struct i2c_client *client)
pctl->gpio.direction_input = sx150x_gpio_direction_input;
pctl->gpio.direction_output = sx150x_gpio_direction_output;
pctl->gpio.get = sx150x_gpio_get;
- pctl->gpio.set = sx150x_gpio_set;
+ pctl->gpio.set_rv = sx150x_gpio_set;
pctl->gpio.set_config = gpiochip_generic_config;
pctl->gpio.parent = dev;
pctl->gpio.can_sleep = true;
@@ -1190,7 +1191,7 @@ static int sx150x_probe(struct i2c_client *client)
* would require locking that is not in place at this time.
*/
if (pctl->data->model != SX150X_789)
- pctl->gpio.set_multiple = sx150x_gpio_set_multiple;
+ pctl->gpio.set_multiple_rv = sx150x_gpio_set_multiple;
/* Add Interrupt support if an irq is specified */
if (client->irq > 0) {
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index d6bb8f58978d..4edb20e61951 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -823,7 +823,7 @@ static struct platform_driver tb10x_pinctrl_pdrv = {
.remove = tb10x_pinctrl_remove,
.driver = {
.name = "tb10x_pinctrl",
- .of_match_table = of_match_ptr(tb10x_pinctrl_dt_ids),
+ .of_match_table = tb10x_pinctrl_dt_ids,
}
};
diff --git a/drivers/pinctrl/qcom/pinctrl-apq8064.c b/drivers/pinctrl/qcom/pinctrl-apq8064.c
index 20c3b9025044..3654913f1ae5 100644
--- a/drivers/pinctrl/qcom/pinctrl-apq8064.c
+++ b/drivers/pinctrl/qcom/pinctrl-apq8064.c
@@ -629,7 +629,6 @@ static struct platform_driver apq8064_pinctrl_driver = {
.of_match_table = apq8064_pinctrl_of_match,
},
.probe = apq8064_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init apq8064_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-apq8084.c b/drivers/pinctrl/qcom/pinctrl-apq8084.c
index 3fc0a40762b6..27693cd64881 100644
--- a/drivers/pinctrl/qcom/pinctrl-apq8084.c
+++ b/drivers/pinctrl/qcom/pinctrl-apq8084.c
@@ -1207,7 +1207,6 @@ static struct platform_driver apq8084_pinctrl_driver = {
.of_match_table = apq8084_pinctrl_of_match,
},
.probe = apq8084_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init apq8084_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index 1f7944dd829d..6ede3149b6e1 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -710,7 +710,6 @@ static struct platform_driver ipq4019_pinctrl_driver = {
.of_match_table = ipq4019_pinctrl_of_match,
},
.probe = ipq4019_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq4019_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5018.c b/drivers/pinctrl/qcom/pinctrl-ipq5018.c
index e2951f81c3ee..10b99d5d8a11 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5018.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5018.c
@@ -754,7 +754,6 @@ static struct platform_driver ipq5018_pinctrl_driver = {
.of_match_table = ipq5018_pinctrl_of_match,
},
.probe = ipq5018_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq5018_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5332.c b/drivers/pinctrl/qcom/pinctrl-ipq5332.c
index 625f8014051f..1ac2fc09c119 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5332.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5332.c
@@ -834,7 +834,6 @@ static struct platform_driver ipq5332_pinctrl_driver = {
.of_match_table = ipq5332_pinctrl_of_match,
},
.probe = ipq5332_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq5332_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5424.c b/drivers/pinctrl/qcom/pinctrl-ipq5424.c
index 0d610b076da3..7ff1f8acc1a3 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5424.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5424.c
@@ -791,7 +791,6 @@ static struct platform_driver ipq5424_pinctrl_driver = {
.of_match_table = ipq5424_pinctrl_of_match,
},
.probe = ipq5424_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq5424_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq6018.c b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
index 0ad08647dbcd..a4ba980252e1 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq6018.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
@@ -1080,7 +1080,6 @@ static struct platform_driver ipq6018_pinctrl_driver = {
.of_match_table = ipq6018_pinctrl_of_match,
},
.probe = ipq6018_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq6018_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8064.c b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
index e2bb94e86aef..0a9e357e64c6 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq8064.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
@@ -631,7 +631,6 @@ static struct platform_driver ipq8064_pinctrl_driver = {
.of_match_table = ipq8064_pinctrl_of_match,
},
.probe = ipq8064_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq8064_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8074.c b/drivers/pinctrl/qcom/pinctrl-ipq8074.c
index 337f3a1c92c1..482f13282fc2 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq8074.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq8074.c
@@ -1041,7 +1041,6 @@ static struct platform_driver ipq8074_pinctrl_driver = {
.of_match_table = ipq8074_pinctrl_of_match,
},
.probe = ipq8074_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq8074_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq9574.c b/drivers/pinctrl/qcom/pinctrl-ipq9574.c
index e2491617b236..89c05d8eb550 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq9574.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq9574.c
@@ -799,7 +799,6 @@ static struct platform_driver ipq9574_pinctrl_driver = {
.of_match_table = ipq9574_pinctrl_of_match,
},
.probe = ipq9574_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq9574_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index 7366aba5a199..57fefeb603f0 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -327,14 +327,14 @@ static int lpi_gpio_get(struct gpio_chip *chip, unsigned int pin)
LPI_GPIO_VALUE_IN_MASK;
}
-static void lpi_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
+static int lpi_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
struct lpi_pinctrl *state = gpiochip_get_data(chip);
unsigned long config;
config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
- lpi_config_set(state->ctrl, pin, &config, 1);
+ return lpi_config_set(state->ctrl, pin, &config, 1);
}
#ifdef CONFIG_DEBUG_FS
@@ -398,7 +398,7 @@ static const struct gpio_chip lpi_gpio_template = {
.direction_input = lpi_gpio_direction_input,
.direction_output = lpi_gpio_direction_output,
.get = lpi_gpio_get,
- .set = lpi_gpio_set,
+ .set_rv = lpi_gpio_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.dbg_show = lpi_gpio_dbg_show,
diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9607.c b/drivers/pinctrl/qcom/pinctrl-mdm9607.c
index e7cd3ef1cf3e..3e18ba124fed 100644
--- a/drivers/pinctrl/qcom/pinctrl-mdm9607.c
+++ b/drivers/pinctrl/qcom/pinctrl-mdm9607.c
@@ -1059,7 +1059,6 @@ static struct platform_driver mdm9607_pinctrl_driver = {
.of_match_table = mdm9607_pinctrl_of_match,
},
.probe = mdm9607_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init mdm9607_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9615.c b/drivers/pinctrl/qcom/pinctrl-mdm9615.c
index 0a2ae383d3d5..bea1ca3d1b7f 100644
--- a/drivers/pinctrl/qcom/pinctrl-mdm9615.c
+++ b/drivers/pinctrl/qcom/pinctrl-mdm9615.c
@@ -446,7 +446,6 @@ static struct platform_driver mdm9615_pinctrl_driver = {
.of_match_table = mdm9615_pinctrl_of_match,
},
.probe = mdm9615_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init mdm9615_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 82f0cc43bbf4..5c4687de1464 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -44,7 +44,6 @@
* @pctrl: pinctrl handle.
* @chip: gpiochip handle.
* @desc: pin controller descriptor
- * @restart_nb: restart notifier block.
* @irq: parent irq for the TLMM irq_chip.
* @intr_target_use_scm: route irq to application cpu using scm calls
* @lock: Spinlock to protect register resources as well
@@ -64,7 +63,6 @@ struct msm_pinctrl {
struct pinctrl_dev *pctrl;
struct gpio_chip chip;
struct pinctrl_desc desc;
- struct notifier_block restart_nb;
int irq;
@@ -637,7 +635,7 @@ static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(val & BIT(g->in_bit));
}
-static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int msm_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
const struct msm_pingroup *g;
struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
@@ -656,6 +654,8 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
msm_writel_io(val, pctrl, g);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
}
#ifdef CONFIG_DEBUG_FS
@@ -792,7 +792,7 @@ static const struct gpio_chip msm_gpio_template = {
.direction_output = msm_gpio_direction_output,
.get_direction = msm_gpio_get_direction,
.get = msm_gpio_get,
- .set = msm_gpio_set,
+ .set_rv = msm_gpio_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.dbg_show = msm_gpio_dbg_show,
@@ -1442,7 +1442,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
girq->handler = handle_bad_irq;
girq->parents[0] = pctrl->irq;
- ret = gpiochip_add_data(&pctrl->chip, pctrl);
+ ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
if (ret) {
dev_err(pctrl->dev, "Failed register gpiochip\n");
return ret;
@@ -1463,7 +1463,6 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
dev_name(pctrl->dev), 0, 0, chip->ngpio);
if (ret) {
dev_err(pctrl->dev, "Failed to add pin range\n");
- gpiochip_remove(&pctrl->chip);
return ret;
}
}
@@ -1471,10 +1470,9 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
return 0;
}
-static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action,
- void *data)
+static int msm_ps_hold_restart(struct sys_off_data *data)
{
- struct msm_pinctrl *pctrl = container_of(nb, struct msm_pinctrl, restart_nb);
+ struct msm_pinctrl *pctrl = data->cb_data;
writel(0, pctrl->regs[0] + PS_HOLD_OFFSET);
mdelay(1000);
@@ -1485,7 +1483,11 @@ static struct msm_pinctrl *poweroff_pctrl;
static void msm_ps_hold_poweroff(void)
{
- msm_ps_hold_restart(&poweroff_pctrl->restart_nb, 0, NULL);
+ struct sys_off_data data = {
+ .cb_data = poweroff_pctrl,
+ };
+
+ msm_ps_hold_restart(&data);
}
static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
@@ -1495,9 +1497,11 @@ static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
for (i = 0; i < pctrl->soc->nfunctions; i++)
if (!strcmp(func[i].name, "ps_hold")) {
- pctrl->restart_nb.notifier_call = msm_ps_hold_restart;
- pctrl->restart_nb.priority = 128;
- if (register_restart_handler(&pctrl->restart_nb))
+ if (devm_register_sys_off_handler(pctrl->dev,
+ SYS_OFF_MODE_RESTART,
+ 128,
+ msm_ps_hold_restart,
+ pctrl))
dev_err(pctrl->dev,
"failed to setup restart handler.\n");
poweroff_pctrl = pctrl;
@@ -1594,15 +1598,5 @@ int msm_pinctrl_probe(struct platform_device *pdev,
}
EXPORT_SYMBOL(msm_pinctrl_probe);
-void msm_pinctrl_remove(struct platform_device *pdev)
-{
- struct msm_pinctrl *pctrl = platform_get_drvdata(pdev);
-
- gpiochip_remove(&pctrl->chip);
-
- unregister_restart_handler(&pctrl->restart_nb);
-}
-EXPORT_SYMBOL(msm_pinctrl_remove);
-
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. TLMM driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 63852ed70295..d7dc0947bb16 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -171,6 +171,5 @@ extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
int msm_pinctrl_probe(struct platform_device *pdev,
const struct msm_pinctrl_soc_data *soc_data);
-void msm_pinctrl_remove(struct platform_device *pdev);
#endif
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8226.c b/drivers/pinctrl/qcom/pinctrl-msm8226.c
index 64fee70f1772..f9a957347340 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8226.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8226.c
@@ -654,7 +654,6 @@ static struct platform_driver msm8226_pinctrl_driver = {
.of_match_table = msm8226_pinctrl_of_match,
},
.probe = msm8226_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8226_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8660.c b/drivers/pinctrl/qcom/pinctrl-msm8660.c
index 999a5f867eb5..4dbc19ffd80e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8660.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8660.c
@@ -981,7 +981,6 @@ static struct platform_driver msm8660_pinctrl_driver = {
.of_match_table = msm8660_pinctrl_of_match,
},
.probe = msm8660_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8660_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8909.c b/drivers/pinctrl/qcom/pinctrl-msm8909.c
index 756856d20d6b..0aa4f77b774f 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8909.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8909.c
@@ -929,7 +929,6 @@ static struct platform_driver msm8909_pinctrl_driver = {
.of_match_table = msm8909_pinctrl_of_match,
},
.probe = msm8909_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8909_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c
index cea5c54f92fe..0dfc6dd33d58 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8916.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c
@@ -969,7 +969,6 @@ static struct platform_driver msm8916_pinctrl_driver = {
.of_match_table = msm8916_pinctrl_of_match,
},
.probe = msm8916_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8916_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8917.c b/drivers/pinctrl/qcom/pinctrl-msm8917.c
index 350636807b07..2e1a94ab18b2 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8917.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8917.c
@@ -1607,7 +1607,6 @@ static struct platform_driver msm8917_pinctrl_driver = {
.of_match_table = msm8917_pinctrl_of_match,
},
.probe = msm8917_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8917_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8953.c b/drivers/pinctrl/qcom/pinctrl-msm8953.c
index 998351bdfee1..956383341a7a 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8953.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8953.c
@@ -1816,7 +1816,6 @@ static struct platform_driver msm8953_pinctrl_driver = {
.of_match_table = msm8953_pinctrl_of_match,
},
.probe = msm8953_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8953_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8960.c b/drivers/pinctrl/qcom/pinctrl-msm8960.c
index ebe230b3b437..a937ea867de7 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8960.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8960.c
@@ -1246,7 +1246,6 @@ static struct platform_driver msm8960_pinctrl_driver = {
.of_match_table = msm8960_pinctrl_of_match,
},
.probe = msm8960_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8960_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
index c30d80e4e98c..3bcb03387781 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8976.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
@@ -1096,7 +1096,6 @@ static struct platform_driver msm8976_pinctrl_driver = {
.of_match_table = msm8976_pinctrl_of_match,
},
.probe = msm8976_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8976_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8994.c b/drivers/pinctrl/qcom/pinctrl-msm8994.c
index b1a6759ab4a5..7a3b6cbccb68 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8994.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8994.c
@@ -1343,7 +1343,6 @@ static struct platform_driver msm8994_pinctrl_driver = {
.of_match_table = msm8994_pinctrl_of_match,
},
.probe = msm8994_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8994_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8996.c b/drivers/pinctrl/qcom/pinctrl-msm8996.c
index 1b5d80eaab83..d86d83106d3b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8996.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8996.c
@@ -1920,7 +1920,6 @@ static struct platform_driver msm8996_pinctrl_driver = {
.of_match_table = msm8996_pinctrl_of_match,
},
.probe = msm8996_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8996_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8998.c b/drivers/pinctrl/qcom/pinctrl-msm8998.c
index b7cbf32b3125..1daee815888f 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8998.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8998.c
@@ -1535,7 +1535,6 @@ static struct platform_driver msm8998_pinctrl_driver = {
.of_match_table = msm8998_pinctrl_of_match,
},
.probe = msm8998_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8998_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8x74.c b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
index 238c83f6ec4f..8253aa25775b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8x74.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
@@ -1083,7 +1083,6 @@ static struct platform_driver msm8x74_pinctrl_driver = {
.of_match_table = msm8x74_pinctrl_of_match,
},
.probe = msm8x74_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8x74_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qcm2290.c b/drivers/pinctrl/qcom/pinctrl-qcm2290.c
index ba699eac9ee8..eeeec6434f6a 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcm2290.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcm2290.c
@@ -37,6 +37,8 @@
.mux_bit = 2, \
.pull_bit = 0, \
.drv_bit = 6, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
.oe_bit = 9, \
.in_bit = 0, \
.out_bit = 1, \
@@ -165,6 +167,10 @@ static const struct pinctrl_pin_desc qcm2290_pins[] = {
PINCTRL_PIN(62, "GPIO_62"),
PINCTRL_PIN(63, "GPIO_63"),
PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
PINCTRL_PIN(69, "GPIO_69"),
PINCTRL_PIN(70, "GPIO_70"),
PINCTRL_PIN(71, "GPIO_71"),
@@ -179,12 +185,17 @@ static const struct pinctrl_pin_desc qcm2290_pins[] = {
PINCTRL_PIN(80, "GPIO_80"),
PINCTRL_PIN(81, "GPIO_81"),
PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
PINCTRL_PIN(86, "GPIO_86"),
PINCTRL_PIN(87, "GPIO_87"),
PINCTRL_PIN(88, "GPIO_88"),
PINCTRL_PIN(89, "GPIO_89"),
PINCTRL_PIN(90, "GPIO_90"),
PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
PINCTRL_PIN(94, "GPIO_94"),
PINCTRL_PIN(95, "GPIO_95"),
PINCTRL_PIN(96, "GPIO_96"),
@@ -387,6 +398,7 @@ enum qcm2290_functions {
msm_mux_ddr_pxi1,
msm_mux_ddr_pxi2,
msm_mux_ddr_pxi3,
+ msm_mux_egpio,
msm_mux_gcc_gp1,
msm_mux_gcc_gp2,
msm_mux_gcc_gp3,
@@ -816,6 +828,13 @@ static const char * const sd_write_groups[] = {
static const char * const jitter_bist_groups[] = {
"gpio96", "gpio97",
};
+static const char * const egpio_groups[] = {
+ "gpio98", "gpio99", "gpio100", "gpio101", "gpio102", "gpio103",
+ "gpio104", "gpio105", "gpio106", "gpio107", "gpio108", "gpio109",
+ "gpio110", "gpio111", "gpio112", "gpio113", "gpio114", "gpio115",
+ "gpio116", "gpio117", "gpio118", "gpio119", "gpio120", "gpio121",
+ "gpio122", "gpio123", "gpio124", "gpio125", "gpio126",
+};
static const char * const ddr_pxi2_groups[] = {
"gpio102", "gpio103",
};
@@ -851,6 +870,7 @@ static const struct pinfunction qcm2290_functions[] = {
MSM_PIN_FUNCTION(ddr_pxi1),
MSM_PIN_FUNCTION(ddr_pxi2),
MSM_PIN_FUNCTION(ddr_pxi3),
+ MSM_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
@@ -1037,35 +1057,35 @@ static const struct msm_pingroup qcm2290_groups[] = {
[95] = PINGROUP(95, nav_gpio, gp_pdm0, qdss_gpio, wlan1_adc1, _, _, _, _, _),
[96] = PINGROUP(96, qup4, nav_gpio, mdp_vsync, gp_pdm1, sd_write, jitter_bist, qdss_cti, qdss_cti, _),
[97] = PINGROUP(97, qup4, nav_gpio, mdp_vsync, gp_pdm2, jitter_bist, qdss_cti, qdss_cti, _, _),
- [98] = PINGROUP(98, _, _, _, _, _, _, _, _, _),
- [99] = PINGROUP(99, _, _, _, _, _, _, _, _, _),
- [100] = PINGROUP(100, atest, _, _, _, _, _, _, _, _),
- [101] = PINGROUP(101, atest, _, _, _, _, _, _, _, _),
- [102] = PINGROUP(102, _, phase_flag, dac_calib, ddr_pxi2, _, _, _, _, _),
- [103] = PINGROUP(103, _, phase_flag, dac_calib, ddr_pxi2, _, _, _, _, _),
- [104] = PINGROUP(104, _, phase_flag, qdss_gpio, dac_calib, ddr_pxi3, _, pwm_8, _, _),
- [105] = PINGROUP(105, _, phase_flag, qdss_gpio, dac_calib, ddr_pxi3, _, _, _, _),
- [106] = PINGROUP(106, nav_gpio, gcc_gp3, qdss_gpio, _, _, _, _, _, _),
- [107] = PINGROUP(107, nav_gpio, gcc_gp2, qdss_gpio, _, _, _, _, _, _),
- [108] = PINGROUP(108, nav_gpio, _, _, _, _, _, _, _, _),
- [109] = PINGROUP(109, _, qdss_gpio, _, _, _, _, _, _, _),
- [110] = PINGROUP(110, _, qdss_gpio, _, _, _, _, _, _, _),
- [111] = PINGROUP(111, _, _, _, _, _, _, _, _, _),
- [112] = PINGROUP(112, _, _, _, _, _, _, _, _, _),
- [113] = PINGROUP(113, _, _, _, _, _, _, _, _, _),
- [114] = PINGROUP(114, _, _, _, _, _, _, _, _, _),
- [115] = PINGROUP(115, _, pwm_9, _, _, _, _, _, _, _),
- [116] = PINGROUP(116, _, _, _, _, _, _, _, _, _),
- [117] = PINGROUP(117, _, _, _, _, _, _, _, _, _),
- [118] = PINGROUP(118, _, _, _, _, _, _, _, _, _),
- [119] = PINGROUP(119, _, _, _, _, _, _, _, _, _),
- [120] = PINGROUP(120, _, _, _, _, _, _, _, _, _),
- [121] = PINGROUP(121, _, _, _, _, _, _, _, _, _),
- [122] = PINGROUP(122, _, _, _, _, _, _, _, _, _),
- [123] = PINGROUP(123, _, _, _, _, _, _, _, _, _),
- [124] = PINGROUP(124, _, _, _, _, _, _, _, _, _),
- [125] = PINGROUP(125, _, _, _, _, _, _, _, _, _),
- [126] = PINGROUP(126, _, _, _, _, _, _, _, _, _),
+ [98] = PINGROUP(98, _, _, _, _, _, _, _, _, egpio),
+ [99] = PINGROUP(99, _, _, _, _, _, _, _, _, egpio),
+ [100] = PINGROUP(100, atest, _, _, _, _, _, _, _, egpio),
+ [101] = PINGROUP(101, atest, _, _, _, _, _, _, _, egpio),
+ [102] = PINGROUP(102, _, phase_flag, dac_calib, ddr_pxi2, _, _, _, _, egpio),
+ [103] = PINGROUP(103, _, phase_flag, dac_calib, ddr_pxi2, _, _, _, _, egpio),
+ [104] = PINGROUP(104, _, phase_flag, qdss_gpio, dac_calib, ddr_pxi3, _, pwm_8, _, egpio),
+ [105] = PINGROUP(105, _, phase_flag, qdss_gpio, dac_calib, ddr_pxi3, _, _, _, egpio),
+ [106] = PINGROUP(106, nav_gpio, gcc_gp3, qdss_gpio, _, _, _, _, _, egpio),
+ [107] = PINGROUP(107, nav_gpio, gcc_gp2, qdss_gpio, _, _, _, _, _, egpio),
+ [108] = PINGROUP(108, nav_gpio, _, _, _, _, _, _, _, egpio),
+ [109] = PINGROUP(109, _, qdss_gpio, _, _, _, _, _, _, egpio),
+ [110] = PINGROUP(110, _, qdss_gpio, _, _, _, _, _, _, egpio),
+ [111] = PINGROUP(111, _, _, _, _, _, _, _, _, egpio),
+ [112] = PINGROUP(112, _, _, _, _, _, _, _, _, egpio),
+ [113] = PINGROUP(113, _, _, _, _, _, _, _, _, egpio),
+ [114] = PINGROUP(114, _, _, _, _, _, _, _, _, egpio),
+ [115] = PINGROUP(115, _, pwm_9, _, _, _, _, _, _, egpio),
+ [116] = PINGROUP(116, _, _, _, _, _, _, _, _, egpio),
+ [117] = PINGROUP(117, _, _, _, _, _, _, _, _, egpio),
+ [118] = PINGROUP(118, _, _, _, _, _, _, _, _, egpio),
+ [119] = PINGROUP(119, _, _, _, _, _, _, _, _, egpio),
+ [120] = PINGROUP(120, _, _, _, _, _, _, _, _, egpio),
+ [121] = PINGROUP(121, _, _, _, _, _, _, _, _, egpio),
+ [122] = PINGROUP(122, _, _, _, _, _, _, _, _, egpio),
+ [123] = PINGROUP(123, _, _, _, _, _, _, _, _, egpio),
+ [124] = PINGROUP(124, _, _, _, _, _, _, _, _, egpio),
+ [125] = PINGROUP(125, _, _, _, _, _, _, _, _, egpio),
+ [126] = PINGROUP(126, _, _, _, _, _, _, _, _, egpio),
[127] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x84004, 0, 0),
[128] = SDC_QDSD_PINGROUP(sdc1_clk, 0x84000, 13, 6),
[129] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x84000, 11, 3),
@@ -1095,6 +1115,7 @@ static const struct msm_pinctrl_soc_data qcm2290_pinctrl = {
.ngpios = 127,
.wakeirq_map = qcm2290_mpm_map,
.nwakeirq_map = ARRAY_SIZE(qcm2290_mpm_map),
+ .egpio_func = 9,
};
static int qcm2290_pinctrl_probe(struct platform_device *pdev)
@@ -1113,7 +1134,6 @@ static struct platform_driver qcm2290_pinctrl_driver = {
.of_match_table = qcm2290_pinctrl_of_match,
},
.probe = qcm2290_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qcm2290_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c
index ae7224012f8a..54e3b4435349 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs404.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c
@@ -1644,7 +1644,6 @@ static struct platform_driver qcs404_pinctrl_driver = {
.of_match_table = qcs404_pinctrl_of_match,
},
.probe = qcs404_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qcs404_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs615.c b/drivers/pinctrl/qcom/pinctrl-qcs615.c
index 23015b055f6a..2a943bc46a62 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs615.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs615.c
@@ -1062,7 +1062,7 @@ static const struct msm_pinctrl_soc_data qcs615_tlmm = {
.nfunctions = ARRAY_SIZE(qcs615_functions),
.groups = qcs615_groups,
.ngroups = ARRAY_SIZE(qcs615_groups),
- .ngpios = 123,
+ .ngpios = 124,
.tiles = qcs615_tiles,
.ntiles = ARRAY_SIZE(qcs615_tiles),
.wakeirq_map = qcs615_pdc_map,
@@ -1087,7 +1087,6 @@ static struct platform_driver qcs615_tlmm_driver = {
.of_match_table = qcs615_tlmm_of_match,
},
.probe = qcs615_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qcs615_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs8300.c b/drivers/pinctrl/qcom/pinctrl-qcs8300.c
index ba6de944a859..d6437e26392b 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs8300.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs8300.c
@@ -1204,7 +1204,7 @@ static const struct msm_pinctrl_soc_data qcs8300_pinctrl = {
.nfunctions = ARRAY_SIZE(qcs8300_functions),
.groups = qcs8300_groups,
.ngroups = ARRAY_SIZE(qcs8300_groups),
- .ngpios = 133,
+ .ngpios = 134,
.wakeirq_map = qcs8300_pdc_map,
.nwakeirq_map = ARRAY_SIZE(qcs8300_pdc_map),
.egpio_func = 11,
@@ -1227,7 +1227,6 @@ static struct platform_driver qcs8300_pinctrl_driver = {
.of_match_table = qcs8300_pinctrl_of_match,
},
.probe = qcs8300_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qcs8300_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
index b5808fcfb13c..9ecc4d40e4dc 100644
--- a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
+++ b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
@@ -145,7 +145,6 @@ static struct platform_driver qdf2xxx_pinctrl_driver = {
.acpi_match_table = qdf2xxx_acpi_ids,
},
.probe = qdf2xxx_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qdf2xxx_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qdu1000.c b/drivers/pinctrl/qcom/pinctrl-qdu1000.c
index 47bc529ef550..eacb89fa3888 100644
--- a/drivers/pinctrl/qcom/pinctrl-qdu1000.c
+++ b/drivers/pinctrl/qcom/pinctrl-qdu1000.c
@@ -1248,7 +1248,6 @@ static struct platform_driver qdu1000_tlmm_driver = {
.of_match_table = qdu1000_tlmm_of_match,
},
.probe = qdu1000_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qdu1000_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sa8775p.c b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
index a5b38221aea8..1b62eb3e6620 100644
--- a/drivers/pinctrl/qcom/pinctrl-sa8775p.c
+++ b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
@@ -1540,7 +1540,6 @@ static struct platform_driver sa8775p_pinctrl_driver = {
.of_match_table = sa8775p_pinctrl_of_match,
},
.probe = sa8775p_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sa8775p_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sar2130p.c b/drivers/pinctrl/qcom/pinctrl-sar2130p.c
index 19a2e37826c7..3dd1b5e5cfee 100644
--- a/drivers/pinctrl/qcom/pinctrl-sar2130p.c
+++ b/drivers/pinctrl/qcom/pinctrl-sar2130p.c
@@ -1486,7 +1486,6 @@ static struct platform_driver sar2130p_tlmm_driver = {
.of_match_table = sar2130p_tlmm_of_match,
},
.probe = sar2130p_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sar2130p_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
index 6eb0c73791c0..c43fe10b71ad 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
@@ -1159,7 +1159,6 @@ static struct platform_driver sc7180_pinctrl_driver = {
.of_match_table = sc7180_pinctrl_of_match,
},
.probe = sc7180_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sc7180_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c
index 0c10eeb60b55..1b070e9d41f5 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
@@ -1505,7 +1505,6 @@ static struct platform_driver sc7280_pinctrl_driver = {
.of_match_table = sc7280_pinctrl_of_match,
},
.probe = sc7280_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sc7280_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
index d6a79ad41a40..26dd165d1543 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
@@ -1720,7 +1720,6 @@ static struct platform_driver sc8180x_pinctrl_driver = {
.acpi_match_table = sc8180x_pinctrl_acpi_match,
},
.probe = sc8180x_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sc8180x_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
index 96f4fb5a5d29..6ccd7e5648d4 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
@@ -1926,7 +1926,6 @@ static struct platform_driver sc8280xp_pinctrl_driver = {
.of_match_table = sc8280xp_pinctrl_of_match,
},
.probe = sc8280xp_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sc8280xp_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660.c b/drivers/pinctrl/qcom/pinctrl-sdm660.c
index 907e4ffca5e7..1a78288f1bc8 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm660.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm660.c
@@ -1442,7 +1442,6 @@ static struct platform_driver sdm660_pinctrl_driver = {
.of_match_table = sdm660_pinctrl_of_match,
},
.probe = sdm660_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdm660_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm670.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c
index c76183ba95e1..0fe1fa94cd6d 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm670.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c
@@ -1337,7 +1337,6 @@ static struct platform_driver sdm670_pinctrl_driver = {
.of_match_table = sdm670_pinctrl_of_match,
},
.probe = sdm670_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdm670_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index cc05c415ed15..0446e291aa48 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -1351,7 +1351,6 @@ static struct platform_driver sdm845_pinctrl_driver = {
.acpi_match_table = ACPI_PTR(sdm845_pinctrl_acpi_match),
},
.probe = sdm845_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdm845_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c
index 8826db9d21d0..2c17bf889146 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx55.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c
@@ -990,7 +990,6 @@ static struct platform_driver sdx55_pinctrl_driver = {
.of_match_table = sdx55_pinctrl_of_match,
},
.probe = sdx55_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdx55_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx65.c b/drivers/pinctrl/qcom/pinctrl-sdx65.c
index f6f319c997fc..85b5c0206dbd 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx65.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx65.c
@@ -939,7 +939,6 @@ static struct platform_driver sdx65_pinctrl_driver = {
.of_match_table = sdx65_pinctrl_of_match,
},
.probe = sdx65_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdx65_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx75.c b/drivers/pinctrl/qcom/pinctrl-sdx75.c
index 3cfe8c7f04df..ab13a3a57a83 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx75.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx75.c
@@ -1124,7 +1124,6 @@ static struct platform_driver sdx75_pinctrl_driver = {
.of_match_table = sdx75_pinctrl_of_match,
},
.probe = sdx75_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdx75_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm4450.c b/drivers/pinctrl/qcom/pinctrl-sm4450.c
index 622f20e6f6f8..1ecdf1ab4f27 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm4450.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm4450.c
@@ -994,7 +994,6 @@ static struct platform_driver sm4450_tlmm_driver = {
.of_match_table = sm4450_tlmm_of_match,
},
.probe = sm4450_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
MODULE_DEVICE_TABLE(of, sm4450_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6115.c b/drivers/pinctrl/qcom/pinctrl-sm6115.c
index 4e91c75ad952..c273efa43996 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6115.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6115.c
@@ -907,7 +907,6 @@ static struct platform_driver sm6115_tlmm_driver = {
.of_match_table = sm6115_tlmm_of_match,
},
.probe = sm6115_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm6115_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6125.c b/drivers/pinctrl/qcom/pinctrl-sm6125.c
index c188842047aa..5092f20e0c1b 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6125.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6125.c
@@ -1266,7 +1266,6 @@ static struct platform_driver sm6125_tlmm_driver = {
.of_match_table = sm6125_tlmm_of_match,
},
.probe = sm6125_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm6125_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6350.c b/drivers/pinctrl/qcom/pinctrl-sm6350.c
index f3828c07b134..ba4686c86c54 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6350.c
@@ -1373,7 +1373,6 @@ static struct platform_driver sm6350_tlmm_driver = {
.of_match_table = sm6350_tlmm_of_match,
},
.probe = sm6350_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm6350_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6375.c b/drivers/pinctrl/qcom/pinctrl-sm6375.c
index c82c8516932e..49031571e65e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6375.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6375.c
@@ -1516,7 +1516,6 @@ static struct platform_driver sm6375_tlmm_driver = {
.of_match_table = sm6375_tlmm_of_match,
},
.probe = sm6375_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm6375_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm7150.c b/drivers/pinctrl/qcom/pinctrl-sm7150.c
index 3c7fd8af6635..6e89966cd70e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm7150.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm7150.c
@@ -1255,7 +1255,6 @@ static struct platform_driver sm7150_tlmm_driver = {
.of_match_table = sm7150_tlmm_of_match,
},
.probe = sm7150_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm7150_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8150.c b/drivers/pinctrl/qcom/pinctrl-sm8150.c
index 01aea9c70b7a..794ed99463f7 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8150.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8150.c
@@ -1542,7 +1542,6 @@ static struct platform_driver sm8150_pinctrl_driver = {
.of_match_table = sm8150_pinctrl_of_match,
},
.probe = sm8150_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8150_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
index e9961a49ff98..fb6f005d64f5 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
@@ -1351,7 +1351,6 @@ static struct platform_driver sm8250_pinctrl_driver = {
.of_match_table = sm8250_pinctrl_of_match,
},
.probe = sm8250_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8250_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350.c b/drivers/pinctrl/qcom/pinctrl-sm8350.c
index 9c69458bd910..c8a3f39ce6f1 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8350.c
@@ -1642,7 +1642,6 @@ static struct platform_driver sm8350_tlmm_driver = {
.of_match_table = sm8350_tlmm_of_match,
},
.probe = sm8350_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8350_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450.c b/drivers/pinctrl/qcom/pinctrl-sm8450.c
index d11bb1ee9e3d..f2e52d5a0f93 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8450.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8450.c
@@ -1677,7 +1677,6 @@ static struct platform_driver sm8450_tlmm_driver = {
.of_match_table = sm8450_tlmm_of_match,
},
.probe = sm8450_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8450_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8550.c b/drivers/pinctrl/qcom/pinctrl-sm8550.c
index 3c847d9cb5d9..1b4496cb39eb 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8550.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8550.c
@@ -1762,7 +1762,6 @@ static struct platform_driver sm8550_tlmm_driver = {
.of_match_table = sm8550_tlmm_of_match,
},
.probe = sm8550_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8550_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8650.c b/drivers/pinctrl/qcom/pinctrl-sm8650.c
index 104708252d12..449a0077f4b1 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8650.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8650.c
@@ -1742,7 +1742,6 @@ static struct platform_driver sm8650_tlmm_driver = {
.of_match_table = sm8650_tlmm_of_match,
},
.probe = sm8650_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8650_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8750.c b/drivers/pinctrl/qcom/pinctrl-sm8750.c
index b94fb4ee0ec3..8516693d1db5 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8750.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8750.c
@@ -1711,7 +1711,6 @@ static struct platform_driver sm8750_tlmm_driver = {
.of_match_table = sm8750_tlmm_of_match,
},
.probe = sm8750_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8750_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index c8ce61066070..bc082bfb52ef 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -764,14 +764,14 @@ static int pmic_gpio_get(struct gpio_chip *chip, unsigned pin)
return !!pad->out_value;
}
-static void pmic_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
+static int pmic_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
struct pmic_gpio_state *state = gpiochip_get_data(chip);
unsigned long config;
config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
- pmic_gpio_config_set(state->ctrl, pin, &config, 1);
+ return pmic_gpio_config_set(state->ctrl, pin, &config, 1);
}
static int pmic_gpio_of_xlate(struct gpio_chip *chip,
@@ -802,7 +802,7 @@ static const struct gpio_chip pmic_gpio_gpio_template = {
.direction_input = pmic_gpio_direction_input,
.direction_output = pmic_gpio_direction_output,
.get = pmic_gpio_get,
- .set = pmic_gpio_set,
+ .set_rv = pmic_gpio_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.of_xlate = pmic_gpio_of_xlate,
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 7b28c5fb2402..ba9084978f90 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -600,14 +600,14 @@ static int pmic_mpp_get(struct gpio_chip *chip, unsigned pin)
return !!pad->out_value;
}
-static void pmic_mpp_set(struct gpio_chip *chip, unsigned pin, int value)
+static int pmic_mpp_set(struct gpio_chip *chip, unsigned int pin, int value)
{
struct pmic_mpp_state *state = gpiochip_get_data(chip);
unsigned long config;
config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
- pmic_mpp_config_set(state->ctrl, pin, &config, 1);
+ return pmic_mpp_config_set(state->ctrl, pin, &config, 1);
}
static int pmic_mpp_of_xlate(struct gpio_chip *chip,
@@ -638,7 +638,7 @@ static const struct gpio_chip pmic_mpp_gpio_template = {
.direction_input = pmic_mpp_direction_input,
.direction_output = pmic_mpp_direction_output,
.get = pmic_mpp_get,
- .set = pmic_mpp_set,
+ .set_rv = pmic_mpp_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.of_xlate = pmic_mpp_of_xlate,
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index 82679417e25f..3a8014ebf064 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -507,7 +507,8 @@ static int pm8xxx_gpio_get(struct gpio_chip *chip, unsigned offset)
return ret;
}
-static void pm8xxx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int pm8xxx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip);
struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
@@ -519,7 +520,7 @@ static void pm8xxx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
val |= pin->open_drain << 1;
val |= pin->output_value;
- pm8xxx_write_bank(pctrl, pin, 1, val);
+ return pm8xxx_write_bank(pctrl, pin, 1, val);
}
static int pm8xxx_gpio_of_xlate(struct gpio_chip *chip,
@@ -596,7 +597,7 @@ static const struct gpio_chip pm8xxx_gpio_template = {
.direction_input = pm8xxx_gpio_direction_input,
.direction_output = pm8xxx_gpio_direction_output,
.get = pm8xxx_gpio_get,
- .set = pm8xxx_gpio_set,
+ .set_rv = pm8xxx_gpio_set,
.of_xlate = pm8xxx_gpio_of_xlate,
.dbg_show = pm8xxx_gpio_dbg_show,
.owner = THIS_MODULE,
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 4841bbfe4864..087c37d304fc 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -511,14 +511,15 @@ static int pm8xxx_mpp_get(struct gpio_chip *chip, unsigned offset)
return ret;
}
-static void pm8xxx_mpp_set(struct gpio_chip *chip, unsigned offset, int value)
+static int pm8xxx_mpp_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct pm8xxx_mpp *pctrl = gpiochip_get_data(chip);
struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
pin->output_value = !!value;
- pm8xxx_mpp_update(pctrl, pin);
+ return pm8xxx_mpp_update(pctrl, pin);
}
static int pm8xxx_mpp_of_xlate(struct gpio_chip *chip,
@@ -633,7 +634,7 @@ static const struct gpio_chip pm8xxx_mpp_template = {
.direction_input = pm8xxx_mpp_direction_input,
.direction_output = pm8xxx_mpp_direction_output,
.get = pm8xxx_mpp_get,
- .set = pm8xxx_mpp_set,
+ .set_rv = pm8xxx_mpp_set,
.of_xlate = pm8xxx_mpp_of_xlate,
.dbg_show = pm8xxx_mpp_dbg_show,
.owner = THIS_MODULE,
diff --git a/drivers/pinctrl/qcom/pinctrl-x1e80100.c b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
index 419cb8facb2f..d4b215f34c39 100644
--- a/drivers/pinctrl/qcom/pinctrl-x1e80100.c
+++ b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
@@ -1861,7 +1861,6 @@ static struct platform_driver x1e80100_pinctrl_driver = {
.of_match_table = x1e80100_pinctrl_of_match,
},
.probe = x1e80100_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init x1e80100_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/tlmm-test.c b/drivers/pinctrl/qcom/tlmm-test.c
index fd02bf3a76cb..7b99e89e0f67 100644
--- a/drivers/pinctrl/qcom/tlmm-test.c
+++ b/drivers/pinctrl/qcom/tlmm-test.c
@@ -547,6 +547,7 @@ static int tlmm_test_init(struct kunit *test)
struct tlmm_test_priv *priv;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
atomic_set(&priv->intr_count, 0);
atomic_set(&priv->thread_count, 0);
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index 3c18d908b21e..e16034fc1bbf 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -42,6 +42,7 @@ config PINCTRL_RENESAS
select PINCTRL_RZG2L if ARCH_RZG2L
select PINCTRL_RZV2M if ARCH_R9A09G011
select PINCTRL_RZG2L if ARCH_R9A09G047
+ select PINCTRL_RZG2L if ARCH_R9A09G056
select PINCTRL_RZG2L if ARCH_R9A09G057
select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index c72e250f4a15..78fa08ff0faa 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -2230,135 +2230,146 @@ static const struct rzg2l_dedicated_configs rzg3s_dedicated_pins[] = {
PIN_CFG_IO_VMC_SD1)) },
};
-static struct rzg2l_dedicated_configs rzv2h_dedicated_pins[] = {
- { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, PIN_CFG_NF) },
- { "TMS_SWDIO", RZG2L_SINGLE_PIN_PACK(0x3, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN)) },
- { "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
- { "WDTUDFCA", RZG2L_SINGLE_PIN_PACK(0x5, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD | PIN_CFG_NOD)) },
- { "WDTUDFCM", RZG2L_SINGLE_PIN_PACK(0x5, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD | PIN_CFG_NOD)) },
- { "SCIF_RXD", RZG2L_SINGLE_PIN_PACK(0x6, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "SCIF_TXD", RZG2L_SINGLE_PIN_PACK(0x6, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_CKP", RZG2L_SINGLE_PIN_PACK(0x7, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD | PIN_CFG_OEN)) },
- { "XSPI0_CKN", RZG2L_SINGLE_PIN_PACK(0x7, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD | PIN_CFG_OEN)) },
- { "XSPI0_CS0N", RZG2L_SINGLE_PIN_PACK(0x7, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD | PIN_CFG_OEN)) },
- { "XSPI0_DS", RZG2L_SINGLE_PIN_PACK(0x7, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_RESET0N", RZG2L_SINGLE_PIN_PACK(0x7, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD | PIN_CFG_OEN)) },
- { "XSPI0_RSTO0N", RZG2L_SINGLE_PIN_PACK(0x7, 5, (PIN_CFG_PUPD)) },
- { "XSPI0_INT0N", RZG2L_SINGLE_PIN_PACK(0x7, 6, (PIN_CFG_PUPD)) },
- { "XSPI0_ECS0N", RZG2L_SINGLE_PIN_PACK(0x7, 7, (PIN_CFG_PUPD)) },
- { "XSPI0_IO0", RZG2L_SINGLE_PIN_PACK(0x8, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_IO1", RZG2L_SINGLE_PIN_PACK(0x8, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_IO2", RZG2L_SINGLE_PIN_PACK(0x8, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_IO3", RZG2L_SINGLE_PIN_PACK(0x8, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_IO4", RZG2L_SINGLE_PIN_PACK(0x8, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_IO5", RZG2L_SINGLE_PIN_PACK(0x8, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_IO6", RZG2L_SINGLE_PIN_PACK(0x8, 6, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "XSPI0_IO7", RZG2L_SINGLE_PIN_PACK(0x8, 7, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "SD0CLK", RZG2L_SINGLE_PIN_PACK(0x9, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
- { "SD0CMD", RZG2L_SINGLE_PIN_PACK(0x9, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD0RSTN", RZG2L_SINGLE_PIN_PACK(0x9, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
- { "SD0DAT0", RZG2L_SINGLE_PIN_PACK(0xa, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD0DAT1", RZG2L_SINGLE_PIN_PACK(0xa, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD0DAT2", RZG2L_SINGLE_PIN_PACK(0xa, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD0DAT3", RZG2L_SINGLE_PIN_PACK(0xa, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD0DAT4", RZG2L_SINGLE_PIN_PACK(0xa, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD0DAT5", RZG2L_SINGLE_PIN_PACK(0xa, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD0DAT6", RZG2L_SINGLE_PIN_PACK(0xa, 6, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD0DAT7", RZG2L_SINGLE_PIN_PACK(0xa, 7, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD1CLK", RZG2L_SINGLE_PIN_PACK(0xb, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
- { "SD1CMD", RZG2L_SINGLE_PIN_PACK(0xb, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD1DAT0", RZG2L_SINGLE_PIN_PACK(0xc, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD1DAT1", RZG2L_SINGLE_PIN_PACK(0xc, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD1DAT2", RZG2L_SINGLE_PIN_PACK(0xc, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "SD1DAT3", RZG2L_SINGLE_PIN_PACK(0xc, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "PCIE0_RSTOUTB", RZG2L_SINGLE_PIN_PACK(0xe, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
- { "PCIE1_RSTOUTB", RZG2L_SINGLE_PIN_PACK(0xe, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
- { "ET0_MDIO", RZG2L_SINGLE_PIN_PACK(0xf, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "ET0_MDC", RZG2L_SINGLE_PIN_PACK(0xf, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET0_RXCTL_RXDV", RZG2L_SINGLE_PIN_PACK(0x10, 0, (PIN_CFG_PUPD)) },
- { "ET0_TXCTL_TXEN", RZG2L_SINGLE_PIN_PACK(0x10, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET0_TXER", RZG2L_SINGLE_PIN_PACK(0x10, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET0_RXER", RZG2L_SINGLE_PIN_PACK(0x10, 3, (PIN_CFG_PUPD)) },
- { "ET0_RXC_RXCLK", RZG2L_SINGLE_PIN_PACK(0x10, 4, (PIN_CFG_PUPD)) },
- { "ET0_TXC_TXCLK", RZG2L_SINGLE_PIN_PACK(0x10, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD | PIN_CFG_OEN)) },
- { "ET0_CRS", RZG2L_SINGLE_PIN_PACK(0x10, 6, (PIN_CFG_PUPD)) },
- { "ET0_COL", RZG2L_SINGLE_PIN_PACK(0x10, 7, (PIN_CFG_PUPD)) },
- { "ET0_TXD0", RZG2L_SINGLE_PIN_PACK(0x11, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET0_TXD1", RZG2L_SINGLE_PIN_PACK(0x11, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET0_TXD2", RZG2L_SINGLE_PIN_PACK(0x11, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET0_TXD3", RZG2L_SINGLE_PIN_PACK(0x11, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET0_RXD0", RZG2L_SINGLE_PIN_PACK(0x11, 4, (PIN_CFG_PUPD)) },
- { "ET0_RXD1", RZG2L_SINGLE_PIN_PACK(0x11, 5, (PIN_CFG_PUPD)) },
- { "ET0_RXD2", RZG2L_SINGLE_PIN_PACK(0x11, 6, (PIN_CFG_PUPD)) },
- { "ET0_RXD3", RZG2L_SINGLE_PIN_PACK(0x11, 7, (PIN_CFG_PUPD)) },
- { "ET1_MDIO", RZG2L_SINGLE_PIN_PACK(0x12, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_IEN | PIN_CFG_PUPD)) },
- { "ET1_MDC", RZG2L_SINGLE_PIN_PACK(0x12, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET1_RXCTL_RXDV", RZG2L_SINGLE_PIN_PACK(0x13, 0, (PIN_CFG_PUPD)) },
- { "ET1_TXCTL_TXEN", RZG2L_SINGLE_PIN_PACK(0x13, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+static const struct {
+ struct rzg2l_dedicated_configs common[77];
+ struct rzg2l_dedicated_configs pcie1[1];
+} rzv2h_dedicated_pins = {
+ .common = {
+ { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, PIN_CFG_NF) },
+ { "TMS_SWDIO", RZG2L_SINGLE_PIN_PACK(0x3, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN)) },
+ { "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "WDTUDFCA", RZG2L_SINGLE_PIN_PACK(0x5, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_NOD)) },
+ { "WDTUDFCM", RZG2L_SINGLE_PIN_PACK(0x5, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_NOD)) },
+ { "SCIF_RXD", RZG2L_SINGLE_PIN_PACK(0x6, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "SCIF_TXD", RZG2L_SINGLE_PIN_PACK(0x6, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_CKP", RZG2L_SINGLE_PIN_PACK(0x7, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "XSPI0_CKN", RZG2L_SINGLE_PIN_PACK(0x7, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "XSPI0_CS0N", RZG2L_SINGLE_PIN_PACK(0x7, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "XSPI0_DS", RZG2L_SINGLE_PIN_PACK(0x7, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_RESET0N", RZG2L_SINGLE_PIN_PACK(0x7, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "XSPI0_RSTO0N", RZG2L_SINGLE_PIN_PACK(0x7, 5, (PIN_CFG_PUPD)) },
+ { "XSPI0_INT0N", RZG2L_SINGLE_PIN_PACK(0x7, 6, (PIN_CFG_PUPD)) },
+ { "XSPI0_ECS0N", RZG2L_SINGLE_PIN_PACK(0x7, 7, (PIN_CFG_PUPD)) },
+ { "XSPI0_IO0", RZG2L_SINGLE_PIN_PACK(0x8, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO1", RZG2L_SINGLE_PIN_PACK(0x8, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO2", RZG2L_SINGLE_PIN_PACK(0x8, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO3", RZG2L_SINGLE_PIN_PACK(0x8, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO4", RZG2L_SINGLE_PIN_PACK(0x8, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO5", RZG2L_SINGLE_PIN_PACK(0x8, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO6", RZG2L_SINGLE_PIN_PACK(0x8, 6, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "XSPI0_IO7", RZG2L_SINGLE_PIN_PACK(0x8, 7, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "SD0CLK", RZG2L_SINGLE_PIN_PACK(0x9, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0CMD", RZG2L_SINGLE_PIN_PACK(0x9, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0RSTN", RZG2L_SINGLE_PIN_PACK(0x9, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD0DAT0", RZG2L_SINGLE_PIN_PACK(0xa, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT1", RZG2L_SINGLE_PIN_PACK(0xa, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT2", RZG2L_SINGLE_PIN_PACK(0xa, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT3", RZG2L_SINGLE_PIN_PACK(0xa, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT4", RZG2L_SINGLE_PIN_PACK(0xa, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT5", RZG2L_SINGLE_PIN_PACK(0xa, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT6", RZG2L_SINGLE_PIN_PACK(0xa, 6, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD0DAT7", RZG2L_SINGLE_PIN_PACK(0xa, 7, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD1CLK", RZG2L_SINGLE_PIN_PACK(0xb, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) },
+ { "SD1CMD", RZG2L_SINGLE_PIN_PACK(0xb, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD1DAT0", RZG2L_SINGLE_PIN_PACK(0xc, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD1DAT1", RZG2L_SINGLE_PIN_PACK(0xc, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD1DAT2", RZG2L_SINGLE_PIN_PACK(0xc, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "SD1DAT3", RZG2L_SINGLE_PIN_PACK(0xc, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "PCIE0_RSTOUTB", RZG2L_SINGLE_PIN_PACK(0xe, 0, (PIN_CFG_IOLH_RZV2H |
+ PIN_CFG_SR)) },
+ { "ET0_MDIO", RZG2L_SINGLE_PIN_PACK(0xf, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "ET0_MDC", RZG2L_SINGLE_PIN_PACK(0xf, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
PIN_CFG_PUPD)) },
- { "ET1_TXER", RZG2L_SINGLE_PIN_PACK(0x13, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET1_RXER", RZG2L_SINGLE_PIN_PACK(0x13, 3, (PIN_CFG_PUPD)) },
- { "ET1_RXC_RXCLK", RZG2L_SINGLE_PIN_PACK(0x13, 4, (PIN_CFG_PUPD)) },
- { "ET1_TXC_TXCLK", RZG2L_SINGLE_PIN_PACK(0x13, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD | PIN_CFG_OEN)) },
- { "ET1_CRS", RZG2L_SINGLE_PIN_PACK(0x13, 6, (PIN_CFG_PUPD)) },
- { "ET1_COL", RZG2L_SINGLE_PIN_PACK(0x13, 7, (PIN_CFG_PUPD)) },
- { "ET1_TXD0", RZG2L_SINGLE_PIN_PACK(0x14, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET1_TXD1", RZG2L_SINGLE_PIN_PACK(0x14, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET1_TXD2", RZG2L_SINGLE_PIN_PACK(0x14, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET1_TXD3", RZG2L_SINGLE_PIN_PACK(0x14, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
- PIN_CFG_PUPD)) },
- { "ET1_RXD0", RZG2L_SINGLE_PIN_PACK(0x14, 4, (PIN_CFG_PUPD)) },
- { "ET1_RXD1", RZG2L_SINGLE_PIN_PACK(0x14, 5, (PIN_CFG_PUPD)) },
- { "ET1_RXD2", RZG2L_SINGLE_PIN_PACK(0x14, 6, (PIN_CFG_PUPD)) },
- { "ET1_RXD3", RZG2L_SINGLE_PIN_PACK(0x14, 7, (PIN_CFG_PUPD)) },
+ { "ET0_RXCTL_RXDV", RZG2L_SINGLE_PIN_PACK(0x10, 0, (PIN_CFG_PUPD)) },
+ { "ET0_TXCTL_TXEN", RZG2L_SINGLE_PIN_PACK(0x10, 1, (PIN_CFG_IOLH_RZV2H |
+ PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_TXER", RZG2L_SINGLE_PIN_PACK(0x10, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_RXER", RZG2L_SINGLE_PIN_PACK(0x10, 3, (PIN_CFG_PUPD)) },
+ { "ET0_RXC_RXCLK", RZG2L_SINGLE_PIN_PACK(0x10, 4, (PIN_CFG_PUPD)) },
+ { "ET0_TXC_TXCLK", RZG2L_SINGLE_PIN_PACK(0x10, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "ET0_CRS", RZG2L_SINGLE_PIN_PACK(0x10, 6, (PIN_CFG_PUPD)) },
+ { "ET0_COL", RZG2L_SINGLE_PIN_PACK(0x10, 7, (PIN_CFG_PUPD)) },
+ { "ET0_TXD0", RZG2L_SINGLE_PIN_PACK(0x11, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_TXD1", RZG2L_SINGLE_PIN_PACK(0x11, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_TXD2", RZG2L_SINGLE_PIN_PACK(0x11, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_TXD3", RZG2L_SINGLE_PIN_PACK(0x11, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET0_RXD0", RZG2L_SINGLE_PIN_PACK(0x11, 4, (PIN_CFG_PUPD)) },
+ { "ET0_RXD1", RZG2L_SINGLE_PIN_PACK(0x11, 5, (PIN_CFG_PUPD)) },
+ { "ET0_RXD2", RZG2L_SINGLE_PIN_PACK(0x11, 6, (PIN_CFG_PUPD)) },
+ { "ET0_RXD3", RZG2L_SINGLE_PIN_PACK(0x11, 7, (PIN_CFG_PUPD)) },
+ { "ET1_MDIO", RZG2L_SINGLE_PIN_PACK(0x12, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_IEN | PIN_CFG_PUPD)) },
+ { "ET1_MDC", RZG2L_SINGLE_PIN_PACK(0x12, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_RXCTL_RXDV", RZG2L_SINGLE_PIN_PACK(0x13, 0, (PIN_CFG_PUPD)) },
+ { "ET1_TXCTL_TXEN", RZG2L_SINGLE_PIN_PACK(0x13, 1, (PIN_CFG_IOLH_RZV2H |
+ PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_TXER", RZG2L_SINGLE_PIN_PACK(0x13, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_RXER", RZG2L_SINGLE_PIN_PACK(0x13, 3, (PIN_CFG_PUPD)) },
+ { "ET1_RXC_RXCLK", RZG2L_SINGLE_PIN_PACK(0x13, 4, (PIN_CFG_PUPD)) },
+ { "ET1_TXC_TXCLK", RZG2L_SINGLE_PIN_PACK(0x13, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD | PIN_CFG_OEN)) },
+ { "ET1_CRS", RZG2L_SINGLE_PIN_PACK(0x13, 6, (PIN_CFG_PUPD)) },
+ { "ET1_COL", RZG2L_SINGLE_PIN_PACK(0x13, 7, (PIN_CFG_PUPD)) },
+ { "ET1_TXD0", RZG2L_SINGLE_PIN_PACK(0x14, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_TXD1", RZG2L_SINGLE_PIN_PACK(0x14, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_TXD2", RZG2L_SINGLE_PIN_PACK(0x14, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_TXD3", RZG2L_SINGLE_PIN_PACK(0x14, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR |
+ PIN_CFG_PUPD)) },
+ { "ET1_RXD0", RZG2L_SINGLE_PIN_PACK(0x14, 4, (PIN_CFG_PUPD)) },
+ { "ET1_RXD1", RZG2L_SINGLE_PIN_PACK(0x14, 5, (PIN_CFG_PUPD)) },
+ { "ET1_RXD2", RZG2L_SINGLE_PIN_PACK(0x14, 6, (PIN_CFG_PUPD)) },
+ { "ET1_RXD3", RZG2L_SINGLE_PIN_PACK(0x14, 7, (PIN_CFG_PUPD)) },
+ },
+ .pcie1 = {
+ { "PCIE1_RSTOUTB", RZG2L_SINGLE_PIN_PACK(0xe, 1, (PIN_CFG_IOLH_RZV2H |
+ PIN_CFG_SR)) },
+ },
};
static struct rzg2l_dedicated_configs rzg3e_dedicated_pins[] = {
@@ -3349,13 +3360,37 @@ static struct rzg2l_pinctrl_data r9a09g047_data = {
.bias_param_to_hw = &rzv2h_bias_param_to_hw,
};
+static struct rzg2l_pinctrl_data r9a09g056_data = {
+ .port_pins = rzv2h_gpio_names,
+ .port_pin_configs = r9a09g057_gpio_configs,
+ .n_ports = ARRAY_SIZE(r9a09g057_gpio_configs),
+ .dedicated_pins = rzv2h_dedicated_pins.common,
+ .n_port_pins = ARRAY_SIZE(r9a09g057_gpio_configs) * RZG2L_PINS_PER_PORT,
+ .n_dedicated_pins = ARRAY_SIZE(rzv2h_dedicated_pins.common),
+ .hwcfg = &rzv2h_hwcfg,
+ .variable_pin_cfg = r9a09g057_variable_pin_cfg,
+ .n_variable_pin_cfg = ARRAY_SIZE(r9a09g057_variable_pin_cfg),
+ .num_custom_params = ARRAY_SIZE(renesas_rzv2h_custom_bindings),
+ .custom_params = renesas_rzv2h_custom_bindings,
+#ifdef CONFIG_DEBUG_FS
+ .custom_conf_items = renesas_rzv2h_conf_items,
+#endif
+ .pwpr_pfc_lock_unlock = &rzv2h_pwpr_pfc_lock_unlock,
+ .pmc_writeb = &rzv2h_pmc_writeb,
+ .oen_read = &rzv2h_oen_read,
+ .oen_write = &rzv2h_oen_write,
+ .hw_to_bias_param = &rzv2h_hw_to_bias_param,
+ .bias_param_to_hw = &rzv2h_bias_param_to_hw,
+};
+
static struct rzg2l_pinctrl_data r9a09g057_data = {
.port_pins = rzv2h_gpio_names,
.port_pin_configs = r9a09g057_gpio_configs,
.n_ports = ARRAY_SIZE(r9a09g057_gpio_configs),
- .dedicated_pins = rzv2h_dedicated_pins,
+ .dedicated_pins = rzv2h_dedicated_pins.common,
.n_port_pins = ARRAY_SIZE(r9a09g057_gpio_configs) * RZG2L_PINS_PER_PORT,
- .n_dedicated_pins = ARRAY_SIZE(rzv2h_dedicated_pins),
+ .n_dedicated_pins = ARRAY_SIZE(rzv2h_dedicated_pins.common) +
+ ARRAY_SIZE(rzv2h_dedicated_pins.pcie1),
.hwcfg = &rzv2h_hwcfg,
.variable_pin_cfg = r9a09g057_variable_pin_cfg,
.n_variable_pin_cfg = ARRAY_SIZE(r9a09g057_variable_pin_cfg),
@@ -3390,6 +3425,10 @@ static const struct of_device_id rzg2l_pinctrl_of_table[] = {
.data = &r9a09g047_data,
},
{
+ .compatible = "renesas,r9a09g056-pinctrl",
+ .data = &r9a09g056_data,
+ },
+ {
.compatible = "renesas,r9a09g057-pinctrl",
.data = &r9a09g057_data,
},
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index dd07720e32cc..9fd894729a7b 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -1419,8 +1419,8 @@ static const struct samsung_pin_ctrl exynosautov920_pin_ctrl[] = {
.pin_banks = exynosautov920_pin_banks0,
.nr_banks = ARRAY_SIZE(exynosautov920_pin_banks0),
.eint_wkup_init = exynos_eint_wkup_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = exynosautov920_pinctrl_suspend,
+ .resume = exynosautov920_pinctrl_resume,
.retention_data = &exynosautov920_retention_data,
}, {
/* pin-controller instance 1 AUD data */
@@ -1431,43 +1431,43 @@ static const struct samsung_pin_ctrl exynosautov920_pin_ctrl[] = {
.pin_banks = exynosautov920_pin_banks2,
.nr_banks = ARRAY_SIZE(exynosautov920_pin_banks2),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = exynosautov920_pinctrl_suspend,
+ .resume = exynosautov920_pinctrl_resume,
}, {
/* pin-controller instance 3 HSI1 data */
.pin_banks = exynosautov920_pin_banks3,
.nr_banks = ARRAY_SIZE(exynosautov920_pin_banks3),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = exynosautov920_pinctrl_suspend,
+ .resume = exynosautov920_pinctrl_resume,
}, {
/* pin-controller instance 4 HSI2 data */
.pin_banks = exynosautov920_pin_banks4,
.nr_banks = ARRAY_SIZE(exynosautov920_pin_banks4),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = exynosautov920_pinctrl_suspend,
+ .resume = exynosautov920_pinctrl_resume,
}, {
/* pin-controller instance 5 HSI2UFS data */
.pin_banks = exynosautov920_pin_banks5,
.nr_banks = ARRAY_SIZE(exynosautov920_pin_banks5),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = exynosautov920_pinctrl_suspend,
+ .resume = exynosautov920_pinctrl_resume,
}, {
/* pin-controller instance 6 PERIC0 data */
.pin_banks = exynosautov920_pin_banks6,
.nr_banks = ARRAY_SIZE(exynosautov920_pin_banks6),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = exynosautov920_pinctrl_suspend,
+ .resume = exynosautov920_pinctrl_resume,
}, {
/* pin-controller instance 7 PERIC1 data */
.pin_banks = exynosautov920_pin_banks7,
.nr_banks = ARRAY_SIZE(exynosautov920_pin_banks7),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = exynosautov920_pinctrl_suspend,
+ .resume = exynosautov920_pinctrl_resume,
},
};
@@ -1762,15 +1762,15 @@ static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = {
.pin_banks = gs101_pin_alive,
.nr_banks = ARRAY_SIZE(gs101_pin_alive),
.eint_wkup_init = exynos_eint_wkup_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = gs101_pinctrl_suspend,
+ .resume = gs101_pinctrl_resume,
}, {
/* pin banks of gs101 pin-controller (FAR_ALIVE) */
.pin_banks = gs101_pin_far_alive,
.nr_banks = ARRAY_SIZE(gs101_pin_far_alive),
.eint_wkup_init = exynos_eint_wkup_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = gs101_pinctrl_suspend,
+ .resume = gs101_pinctrl_resume,
}, {
/* pin banks of gs101 pin-controller (GSACORE) */
.pin_banks = gs101_pin_gsacore,
@@ -1784,29 +1784,29 @@ static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = {
.pin_banks = gs101_pin_peric0,
.nr_banks = ARRAY_SIZE(gs101_pin_peric0),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = gs101_pinctrl_suspend,
+ .resume = gs101_pinctrl_resume,
}, {
/* pin banks of gs101 pin-controller (PERIC1) */
.pin_banks = gs101_pin_peric1,
.nr_banks = ARRAY_SIZE(gs101_pin_peric1),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = gs101_pinctrl_suspend,
+ .resume = gs101_pinctrl_resume,
}, {
/* pin banks of gs101 pin-controller (HSI1) */
.pin_banks = gs101_pin_hsi1,
.nr_banks = ARRAY_SIZE(gs101_pin_hsi1),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = gs101_pinctrl_suspend,
+ .resume = gs101_pinctrl_resume,
}, {
/* pin banks of gs101 pin-controller (HSI2) */
.pin_banks = gs101_pin_hsi2,
.nr_banks = ARRAY_SIZE(gs101_pin_hsi2),
.eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
+ .suspend = gs101_pinctrl_suspend,
+ .resume = gs101_pinctrl_resume,
},
};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 42093bae8bb7..f3e1c11abe55 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -370,6 +370,37 @@ struct exynos_eint_gpio_save {
u32 eint_mask;
};
+static void exynos_eint_update_flt_reg(void __iomem *reg, int cnt, int con)
+{
+ unsigned int val, shift;
+ int i;
+
+ val = readl(reg);
+ for (i = 0; i < cnt; i++) {
+ shift = i * EXYNOS_FLTCON_LEN;
+ val &= ~(EXYNOS_FLTCON_DIGITAL << shift);
+ val |= con << shift;
+ }
+ writel(val, reg);
+}
+
+/*
+ * Set the desired filter (digital or analog delay) and enable it to
+ * every pin in the bank. Note the filter selection bitfield is only
+ * found on alive banks. The filter determines to what extent signal
+ * fluctuations received through the pad are considered glitches.
+ */
+static void exynos_eint_set_filter(struct samsung_pin_bank *bank, int filter)
+{
+ unsigned int off = EXYNOS_GPIO_EFLTCON_OFFSET + bank->eint_fltcon_offset;
+ void __iomem *reg = bank->drvdata->virt_base + off;
+ unsigned int con = EXYNOS_FLTCON_EN | filter;
+
+ for (int n = 0; n < bank->nr_pins; n += 4)
+ exynos_eint_update_flt_reg(reg + n,
+ min(bank->nr_pins - n, 4), con);
+}
+
/*
* exynos_eint_gpio_init() - setup handling of external gpio interrupts.
* @d: driver data of samsung pinctrl driver.
@@ -762,153 +793,190 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
return 0;
}
-static void exynos_pinctrl_suspend_bank(
- struct samsung_pinctrl_drv_data *drvdata,
- struct samsung_pin_bank *bank)
+static void exynos_set_wakeup(struct samsung_pin_bank *bank)
{
- struct exynos_eint_gpio_save *save = bank->soc_priv;
- const void __iomem *regs = bank->eint_base;
+ struct exynos_irq_chip *irq_chip;
- if (clk_enable(bank->drvdata->pclk)) {
- dev_err(bank->gpio_chip.parent,
- "unable to enable clock for saving state\n");
- return;
+ if (bank->irq_chip) {
+ irq_chip = bank->irq_chip;
+ irq_chip->set_eint_wakeup_mask(bank->drvdata, irq_chip);
}
-
- save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET
- + bank->eint_offset);
- save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
- + 2 * bank->eint_offset);
- save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
- + 2 * bank->eint_offset + 4);
- save->eint_mask = readl(regs + bank->irq_chip->eint_mask
- + bank->eint_offset);
-
- clk_disable(bank->drvdata->pclk);
-
- pr_debug("%s: save con %#010x\n", bank->name, save->eint_con);
- pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0);
- pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1);
- pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask);
}
-static void exynosauto_pinctrl_suspend_bank(struct samsung_pinctrl_drv_data *drvdata,
- struct samsung_pin_bank *bank)
+void exynos_pinctrl_suspend(struct samsung_pin_bank *bank)
{
struct exynos_eint_gpio_save *save = bank->soc_priv;
const void __iomem *regs = bank->eint_base;
- if (clk_enable(bank->drvdata->pclk)) {
- dev_err(bank->gpio_chip.parent,
- "unable to enable clock for saving state\n");
- return;
+ if (bank->eint_type == EINT_TYPE_GPIO) {
+ save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET
+ + bank->eint_offset);
+ save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset);
+ save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset + 4);
+ save->eint_mask = readl(regs + bank->irq_chip->eint_mask
+ + bank->eint_offset);
+
+ pr_debug("%s: save con %#010x\n",
+ bank->name, save->eint_con);
+ pr_debug("%s: save fltcon0 %#010x\n",
+ bank->name, save->eint_fltcon0);
+ pr_debug("%s: save fltcon1 %#010x\n",
+ bank->name, save->eint_fltcon1);
+ pr_debug("%s: save mask %#010x\n",
+ bank->name, save->eint_mask);
+ } else if (bank->eint_type == EINT_TYPE_WKUP) {
+ exynos_set_wakeup(bank);
}
-
- save->eint_con = readl(regs + bank->pctl_offset + bank->eint_con_offset);
- save->eint_mask = readl(regs + bank->pctl_offset + bank->eint_mask_offset);
-
- clk_disable(bank->drvdata->pclk);
-
- pr_debug("%s: save con %#010x\n", bank->name, save->eint_con);
- pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask);
}
-void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
+void gs101_pinctrl_suspend(struct samsung_pin_bank *bank)
{
- struct samsung_pin_bank *bank = drvdata->pin_banks;
- struct exynos_irq_chip *irq_chip = NULL;
- int i;
+ struct exynos_eint_gpio_save *save = bank->soc_priv;
+ const void __iomem *regs = bank->eint_base;
- for (i = 0; i < drvdata->nr_banks; ++i, ++bank) {
- if (bank->eint_type == EINT_TYPE_GPIO) {
- if (bank->eint_con_offset)
- exynosauto_pinctrl_suspend_bank(drvdata, bank);
- else
- exynos_pinctrl_suspend_bank(drvdata, bank);
- }
- else if (bank->eint_type == EINT_TYPE_WKUP) {
- if (!irq_chip) {
- irq_chip = bank->irq_chip;
- irq_chip->set_eint_wakeup_mask(drvdata,
- irq_chip);
- }
- }
+ if (bank->eint_type == EINT_TYPE_GPIO) {
+ save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET
+ + bank->eint_offset);
+
+ save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + bank->eint_fltcon_offset);
+
+ /* fltcon1 register only exists for pins 4-7 */
+ if (bank->nr_pins > 4)
+ save->eint_fltcon1 = readl(regs +
+ EXYNOS_GPIO_EFLTCON_OFFSET
+ + bank->eint_fltcon_offset + 4);
+
+ save->eint_mask = readl(regs + bank->irq_chip->eint_mask
+ + bank->eint_offset);
+
+ pr_debug("%s: save con %#010x\n",
+ bank->name, save->eint_con);
+ pr_debug("%s: save fltcon0 %#010x\n",
+ bank->name, save->eint_fltcon0);
+ if (bank->nr_pins > 4)
+ pr_debug("%s: save fltcon1 %#010x\n",
+ bank->name, save->eint_fltcon1);
+ pr_debug("%s: save mask %#010x\n",
+ bank->name, save->eint_mask);
+ } else if (bank->eint_type == EINT_TYPE_WKUP) {
+ exynos_set_wakeup(bank);
+ exynos_eint_set_filter(bank, EXYNOS_FLTCON_ANALOG);
}
}
-static void exynos_pinctrl_resume_bank(
- struct samsung_pinctrl_drv_data *drvdata,
- struct samsung_pin_bank *bank)
+void exynosautov920_pinctrl_suspend(struct samsung_pin_bank *bank)
{
struct exynos_eint_gpio_save *save = bank->soc_priv;
- void __iomem *regs = bank->eint_base;
+ const void __iomem *regs = bank->eint_base;
- if (clk_enable(bank->drvdata->pclk)) {
- dev_err(bank->gpio_chip.parent,
- "unable to enable clock for restoring state\n");
- return;
+ if (bank->eint_type == EINT_TYPE_GPIO) {
+ save->eint_con = readl(regs + bank->pctl_offset +
+ bank->eint_con_offset);
+ save->eint_mask = readl(regs + bank->pctl_offset +
+ bank->eint_mask_offset);
+ pr_debug("%s: save con %#010x\n",
+ bank->name, save->eint_con);
+ pr_debug("%s: save mask %#010x\n",
+ bank->name, save->eint_mask);
+ } else if (bank->eint_type == EINT_TYPE_WKUP) {
+ exynos_set_wakeup(bank);
}
+}
- pr_debug("%s: con %#010x => %#010x\n", bank->name,
- readl(regs + EXYNOS_GPIO_ECON_OFFSET
- + bank->eint_offset), save->eint_con);
- pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name,
- readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
- + 2 * bank->eint_offset), save->eint_fltcon0);
- pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name,
- readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
- + 2 * bank->eint_offset + 4), save->eint_fltcon1);
- pr_debug("%s: mask %#010x => %#010x\n", bank->name,
- readl(regs + bank->irq_chip->eint_mask
- + bank->eint_offset), save->eint_mask);
-
- writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET
- + bank->eint_offset);
- writel(save->eint_fltcon0, regs + EXYNOS_GPIO_EFLTCON_OFFSET
- + 2 * bank->eint_offset);
- writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET
- + 2 * bank->eint_offset + 4);
- writel(save->eint_mask, regs + bank->irq_chip->eint_mask
- + bank->eint_offset);
+void gs101_pinctrl_resume(struct samsung_pin_bank *bank)
+{
+ struct exynos_eint_gpio_save *save = bank->soc_priv;
- clk_disable(bank->drvdata->pclk);
+ void __iomem *regs = bank->eint_base;
+ void __iomem *eint_fltcfg0 = regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + bank->eint_fltcon_offset;
+
+ if (bank->eint_type == EINT_TYPE_GPIO) {
+ pr_debug("%s: con %#010x => %#010x\n", bank->name,
+ readl(regs + EXYNOS_GPIO_ECON_OFFSET
+ + bank->eint_offset), save->eint_con);
+
+ pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name,
+ readl(eint_fltcfg0), save->eint_fltcon0);
+
+ /* fltcon1 register only exists for pins 4-7 */
+ if (bank->nr_pins > 4)
+ pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name,
+ readl(eint_fltcfg0 + 4), save->eint_fltcon1);
+
+ pr_debug("%s: mask %#010x => %#010x\n", bank->name,
+ readl(regs + bank->irq_chip->eint_mask
+ + bank->eint_offset), save->eint_mask);
+
+ writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET
+ + bank->eint_offset);
+ writel(save->eint_fltcon0, eint_fltcfg0);
+
+ if (bank->nr_pins > 4)
+ writel(save->eint_fltcon1, eint_fltcfg0 + 4);
+ writel(save->eint_mask, regs + bank->irq_chip->eint_mask
+ + bank->eint_offset);
+ } else if (bank->eint_type == EINT_TYPE_WKUP) {
+ exynos_eint_set_filter(bank, EXYNOS_FLTCON_DIGITAL);
+ }
}
-static void exynosauto_pinctrl_resume_bank(struct samsung_pinctrl_drv_data *drvdata,
- struct samsung_pin_bank *bank)
+void exynos_pinctrl_resume(struct samsung_pin_bank *bank)
{
struct exynos_eint_gpio_save *save = bank->soc_priv;
void __iomem *regs = bank->eint_base;
- if (clk_enable(bank->drvdata->pclk)) {
- dev_err(bank->gpio_chip.parent,
- "unable to enable clock for restoring state\n");
- return;
+ if (bank->eint_type == EINT_TYPE_GPIO) {
+ pr_debug("%s: con %#010x => %#010x\n", bank->name,
+ readl(regs + EXYNOS_GPIO_ECON_OFFSET
+ + bank->eint_offset), save->eint_con);
+ pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name,
+ readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset), save->eint_fltcon0);
+ pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name,
+ readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset + 4),
+ save->eint_fltcon1);
+ pr_debug("%s: mask %#010x => %#010x\n", bank->name,
+ readl(regs + bank->irq_chip->eint_mask
+ + bank->eint_offset), save->eint_mask);
+
+ writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET
+ + bank->eint_offset);
+ writel(save->eint_fltcon0, regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset);
+ writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ + 2 * bank->eint_offset + 4);
+ writel(save->eint_mask, regs + bank->irq_chip->eint_mask
+ + bank->eint_offset);
}
-
- pr_debug("%s: con %#010x => %#010x\n", bank->name,
- readl(regs + bank->pctl_offset + bank->eint_con_offset), save->eint_con);
- pr_debug("%s: mask %#010x => %#010x\n", bank->name,
- readl(regs + bank->pctl_offset + bank->eint_mask_offset), save->eint_mask);
-
- writel(save->eint_con, regs + bank->pctl_offset + bank->eint_con_offset);
- writel(save->eint_mask, regs + bank->pctl_offset + bank->eint_mask_offset);
-
- clk_disable(bank->drvdata->pclk);
}
-void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
+void exynosautov920_pinctrl_resume(struct samsung_pin_bank *bank)
{
- struct samsung_pin_bank *bank = drvdata->pin_banks;
- int i;
+ struct exynos_eint_gpio_save *save = bank->soc_priv;
+ void __iomem *regs = bank->eint_base;
- for (i = 0; i < drvdata->nr_banks; ++i, ++bank)
- if (bank->eint_type == EINT_TYPE_GPIO) {
- if (bank->eint_con_offset)
- exynosauto_pinctrl_resume_bank(drvdata, bank);
- else
- exynos_pinctrl_resume_bank(drvdata, bank);
- }
+ if (bank->eint_type == EINT_TYPE_GPIO) {
+ /* exynosautov920 has eint_con_offset for all but one bank */
+ if (!bank->eint_con_offset)
+ exynos_pinctrl_resume(bank);
+
+ pr_debug("%s: con %#010x => %#010x\n", bank->name,
+ readl(regs + bank->pctl_offset + bank->eint_con_offset),
+ save->eint_con);
+ pr_debug("%s: mask %#010x => %#010x\n", bank->name,
+ readl(regs + bank->pctl_offset +
+ bank->eint_mask_offset), save->eint_mask);
+
+ writel(save->eint_con,
+ regs + bank->pctl_offset + bank->eint_con_offset);
+ writel(save->eint_mask,
+ regs + bank->pctl_offset + bank->eint_mask_offset);
+ }
}
static void exynos_retention_enable(struct samsung_pinctrl_drv_data *drvdata)
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index b483270ddc53..362dc533186f 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -52,6 +52,26 @@
#define EXYNOS_EINT_MAX_PER_BANK 8
#define EXYNOS_EINT_NR_WKUP_EINT
+/*
+ * EINT filter configuration register (on alive banks) has
+ * the following layout.
+ *
+ * BitfieldName[PinNum][Bit:Bit]
+ * FLT_EN[3][31] FLT_SEL[3][30] FLT_WIDTH[3][29:24]
+ * FLT_EN[2][23] FLT_SEL[2][22] FLT_WIDTH[2][21:16]
+ * FLT_EN[1][15] FLT_SEL[1][14] FLT_WIDTH[1][13:8]
+ * FLT_EN[0][7] FLT_SEL[0][6] FLT_WIDTH[0][5:0]
+ *
+ * FLT_EN 0x0 = Disable, 0x1=Enable
+ * FLT_SEL 0x0 = Analog delay filter, 0x1 Digital filter (clock count)
+ * FLT_WIDTH Filtering width. Valid when FLT_SEL is 0x1
+ */
+
+#define EXYNOS_FLTCON_EN BIT(7)
+#define EXYNOS_FLTCON_DIGITAL BIT(6)
+#define EXYNOS_FLTCON_ANALOG (0 << 6)
+#define EXYNOS_FLTCON_LEN 8
+
#define EXYNOS_PIN_BANK_EINTN(pins, reg, id) \
{ \
.type = &bank_type_off, \
@@ -240,8 +260,12 @@ struct exynos_muxed_weint_data {
int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d);
int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d);
-void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata);
-void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata);
+void exynosautov920_pinctrl_resume(struct samsung_pin_bank *bank);
+void exynosautov920_pinctrl_suspend(struct samsung_pin_bank *bank);
+void exynos_pinctrl_suspend(struct samsung_pin_bank *bank);
+void exynos_pinctrl_resume(struct samsung_pin_bank *bank);
+void gs101_pinctrl_suspend(struct samsung_pin_bank *bank);
+void gs101_pinctrl_resume(struct samsung_pin_bank *bank);
struct samsung_retention_ctrl *
exynos_retention_init(struct samsung_pinctrl_drv_data *drvdata,
const struct samsung_retention_data *data);
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 2896eb2de2c0..fe1ac82b9d79 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -570,15 +570,18 @@ static void samsung_gpio_set_value(struct gpio_chip *gc,
}
/* gpiolib gpio_set callback function */
-static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+static int samsung_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct samsung_pin_bank *bank = gpiochip_get_data(gc);
struct samsung_pinctrl_drv_data *drvdata = bank->drvdata;
unsigned long flags;
+ int ret;
- if (clk_enable(drvdata->pclk)) {
+ ret = clk_enable(drvdata->pclk);
+ if (ret) {
dev_err(drvdata->dev, "failed to enable clock\n");
- return;
+ return ret;
}
raw_spin_lock_irqsave(&bank->slock, flags);
@@ -586,6 +589,8 @@ static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
raw_spin_unlock_irqrestore(&bank->slock, flags);
clk_disable(drvdata->pclk);
+
+ return 0;
}
/* gpiolib gpio_get callback function */
@@ -1062,7 +1067,7 @@ static int samsung_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
static const struct gpio_chip samsung_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
- .set = samsung_gpio_set,
+ .set_rv = samsung_gpio_set,
.get = samsung_gpio_get,
.direction_input = samsung_gpio_direction_input,
.direction_output = samsung_gpio_direction_output,
@@ -1333,6 +1338,7 @@ err_put_banks:
static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
{
struct samsung_pinctrl_drv_data *drvdata = dev_get_drvdata(dev);
+ struct samsung_pin_bank *bank;
int i;
i = clk_enable(drvdata->pclk);
@@ -1343,7 +1349,7 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
}
for (i = 0; i < drvdata->nr_banks; i++) {
- struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
+ bank = &drvdata->pin_banks[i];
const void __iomem *reg = bank->pctl_base + bank->pctl_offset;
const u8 *offs = bank->type->reg_offset;
const u8 *widths = bank->type->fld_width;
@@ -1371,10 +1377,14 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
}
}
+ for (i = 0; i < drvdata->nr_banks; i++) {
+ bank = &drvdata->pin_banks[i];
+ if (drvdata->suspend)
+ drvdata->suspend(bank);
+ }
+
clk_disable(drvdata->pclk);
- if (drvdata->suspend)
- drvdata->suspend(drvdata);
if (drvdata->retention_ctrl && drvdata->retention_ctrl->enable)
drvdata->retention_ctrl->enable(drvdata);
@@ -1392,6 +1402,7 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
static int __maybe_unused samsung_pinctrl_resume(struct device *dev)
{
struct samsung_pinctrl_drv_data *drvdata = dev_get_drvdata(dev);
+ struct samsung_pin_bank *bank;
int ret;
int i;
@@ -1406,11 +1417,14 @@ static int __maybe_unused samsung_pinctrl_resume(struct device *dev)
return ret;
}
- if (drvdata->resume)
- drvdata->resume(drvdata);
+ for (i = 0; i < drvdata->nr_banks; i++) {
+ bank = &drvdata->pin_banks[i];
+ if (drvdata->resume)
+ drvdata->resume(bank);
+ }
for (i = 0; i < drvdata->nr_banks; i++) {
- struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
+ bank = &drvdata->pin_banks[i];
void __iomem *reg = bank->pctl_base + bank->pctl_offset;
const u8 *offs = bank->type->reg_offset;
const u8 *widths = bank->type->fld_width;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index 3cf758df7d69..fcc57c244d16 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -285,8 +285,8 @@ struct samsung_pin_ctrl {
int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *);
int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *);
void (*pud_value_init)(struct samsung_pinctrl_drv_data *drvdata);
- void (*suspend)(struct samsung_pinctrl_drv_data *);
- void (*resume)(struct samsung_pinctrl_drv_data *);
+ void (*suspend)(struct samsung_pin_bank *bank);
+ void (*resume)(struct samsung_pin_bank *bank);
};
/**
@@ -335,8 +335,8 @@ struct samsung_pinctrl_drv_data {
struct samsung_retention_ctrl *retention_ctrl;
- void (*suspend)(struct samsung_pinctrl_drv_data *);
- void (*resume)(struct samsung_pinctrl_drv_data *);
+ void (*suspend)(struct samsung_pin_bank *bank);
+ void (*resume)(struct samsung_pin_bank *bank);
};
/**
diff --git a/drivers/pinctrl/spacemit/pinctrl-k1.c b/drivers/pinctrl/spacemit/pinctrl-k1.c
index 67e867b04a02..9996b1c4a07e 100644
--- a/drivers/pinctrl/spacemit/pinctrl-k1.c
+++ b/drivers/pinctrl/spacemit/pinctrl-k1.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2024 Yixun Lan <dlan@gentoo.org> */
#include <linux/bits.h>
+#include <linux/clk.h>
#include <linux/cleanup.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -721,6 +722,7 @@ static int spacemit_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spacemit_pinctrl *pctrl;
+ struct clk *func_clk, *bus_clk;
const struct spacemit_pinctrl_data *pctrl_data;
int ret;
@@ -739,6 +741,14 @@ static int spacemit_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(pctrl->regs))
return PTR_ERR(pctrl->regs);
+ func_clk = devm_clk_get_enabled(dev, "func");
+ if (IS_ERR(func_clk))
+ return dev_err_probe(dev, PTR_ERR(func_clk), "failed to get func clock\n");
+
+ bus_clk = devm_clk_get_enabled(dev, "bus");
+ if (IS_ERR(bus_clk))
+ return dev_err_probe(dev, PTR_ERR(bus_clk), "failed to get bus clock\n");
+
pctrl->pdesc.name = dev_name(dev);
pctrl->pdesc.pins = pctrl_data->pins;
pctrl->pdesc.npins = pctrl_data->npins;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index cc0b4d1d7cff..ba49d48c3a1d 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -228,11 +228,14 @@ static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
}
-static void stm32_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int stm32_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
__stm32_gpio_set(bank, offset, value);
+
+ return 0;
}
static int stm32_gpio_direction_output(struct gpio_chip *chip,
@@ -308,7 +311,7 @@ static const struct gpio_chip stm32_gpio_template = {
.request = stm32_gpio_request,
.free = pinctrl_gpio_free,
.get = stm32_gpio_get,
- .set = stm32_gpio_set,
+ .set_rv = stm32_gpio_set,
.direction_input = pinctrl_gpio_direction_input,
.direction_output = stm32_gpio_direction_output,
.to_irq = stm32_gpio_to_irq,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c b/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
index 1833078f6877..4e34b0cd3b73 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
@@ -143,7 +143,7 @@ static struct sunxi_desc_pin *init_pins_table(struct device *dev,
*/
static int prepare_function_table(struct device *dev, struct device_node *pnode,
struct sunxi_desc_pin *pins, int npins,
- const u8 *irq_bank_muxes)
+ unsigned pin_base, const u8 *irq_bank_muxes)
{
struct device_node *node;
struct property *prop;
@@ -166,7 +166,7 @@ static int prepare_function_table(struct device *dev, struct device_node *pnode,
*/
for (i = 0; i < npins; i++) {
struct sunxi_desc_pin *pin = &pins[i];
- int bank = pin->pin.number / PINS_PER_BANK;
+ int bank = (pin->pin.number - pin_base) / PINS_PER_BANK;
if (irq_bank_muxes[bank]) {
pin->variant++;
@@ -211,7 +211,7 @@ static int prepare_function_table(struct device *dev, struct device_node *pnode,
last_bank = 0;
for (i = 0; i < npins; i++) {
struct sunxi_desc_pin *pin = &pins[i];
- int bank = pin->pin.number / PINS_PER_BANK;
+ int bank = (pin->pin.number - pin_base) / PINS_PER_BANK;
int lastfunc = pin->variant + 1;
int irq_mux = irq_bank_muxes[bank];
@@ -353,7 +353,7 @@ int sunxi_pinctrl_dt_table_init(struct platform_device *pdev,
return PTR_ERR(pins);
ret = prepare_function_table(&pdev->dev, pnode, pins, desc->npins,
- irq_bank_muxes);
+ desc->pin_base, irq_bank_muxes);
if (ret)
return ret;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index f1c5a991cf7b..bf8612d72daa 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -1646,10 +1646,9 @@ int sunxi_pinctrl_init_with_flags(struct platform_device *pdev,
}
}
- pctl->domain = irq_domain_add_linear(node,
- pctl->desc->irq_banks * IRQ_PER_BANK,
- &sunxi_pinctrl_irq_domain_ops,
- pctl);
+ pctl->domain = irq_domain_create_linear(of_fwnode_handle(node),
+ pctl->desc->irq_banks * IRQ_PER_BANK,
+ &sunxi_pinctrl_irq_domain_ops, pctl);
if (!pctl->domain) {
dev_err(&pdev->dev, "Couldn't register IRQ domain\n");
ret = -ENOMEM;
diff --git a/drivers/pinctrl/uniphier/Kconfig b/drivers/pinctrl/uniphier/Kconfig
index b71c07d84662..5e3de0df756b 100644
--- a/drivers/pinctrl/uniphier/Kconfig
+++ b/drivers/pinctrl/uniphier/Kconfig
@@ -3,7 +3,7 @@ menuconfig PINCTRL_UNIPHIER
bool "UniPhier SoC pinctrl drivers"
depends on ARCH_UNIPHIER || COMPILE_TEST
depends on OF && MFD_SYSCON
- default y
+ default ARCH_UNIPHIER
select PINMUX
select GENERIC_PINCONF
diff --git a/drivers/platform/arm64/Kconfig b/drivers/platform/arm64/Kconfig
index 0abe5377891b..06288aebc559 100644
--- a/drivers/platform/arm64/Kconfig
+++ b/drivers/platform/arm64/Kconfig
@@ -6,7 +6,7 @@
menuconfig ARM64_PLATFORM_DEVICES
bool "ARM64 Platform-Specific Device Drivers"
depends on ARM64 || COMPILE_TEST
- default y
+ default ARM64
help
Say Y here to get to see options for platform-specific device drivers
for arm64 based devices, primarily EC-like device drivers.
diff --git a/drivers/platform/arm64/acer-aspire1-ec.c b/drivers/platform/arm64/acer-aspire1-ec.c
index 2df42406430d..438532a047e6 100644
--- a/drivers/platform/arm64/acer-aspire1-ec.c
+++ b/drivers/platform/arm64/acer-aspire1-ec.c
@@ -366,7 +366,8 @@ static const struct power_supply_desc aspire_ec_adp_psy_desc = {
* USB-C DP Alt mode HPD.
*/
-static int aspire_ec_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
+static int aspire_ec_bridge_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
{
return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
}
@@ -451,9 +452,9 @@ static int aspire_ec_probe(struct i2c_client *client)
int ret;
u8 tmp;
- ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL);
- if (!ec)
- return -ENOMEM;
+ ec = devm_drm_bridge_alloc(dev, struct aspire_ec, bridge, &aspire_ec_bridge_funcs);
+ if (IS_ERR(ec))
+ return PTR_ERR(ec);
ec->client = client;
i2c_set_clientdata(client, ec);
@@ -496,7 +497,6 @@ static int aspire_ec_probe(struct i2c_client *client)
fwnode = device_get_named_child_node(dev, "connector");
if (fwnode) {
INIT_WORK(&ec->work, aspire_ec_bridge_update_hpd_work);
- ec->bridge.funcs = &aspire_ec_bridge_funcs;
ec->bridge.of_node = to_of_node(fwnode);
ec->bridge.ops = DRM_BRIDGE_OP_HPD;
ec->bridge.type = DRM_MODE_CONNECTOR_USB;
diff --git a/drivers/platform/arm64/huawei-gaokun-ec.c b/drivers/platform/arm64/huawei-gaokun-ec.c
index 97c2607f8d9f..7e5aa7ca2403 100644
--- a/drivers/platform/arm64/huawei-gaokun-ec.c
+++ b/drivers/platform/arm64/huawei-gaokun-ec.c
@@ -651,7 +651,7 @@ static int gaokun_ec_resume(struct device *dev)
break;
msleep(100); /* EC need time to resume */
- };
+ }
ec->suspended = false;
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 1b2f2bd09662..10941ac37305 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -155,13 +155,14 @@ config CROS_EC_LPC
module will be called cros_ec_lpcs.
config CROS_EC_PROTO
- bool
+ tristate
help
ChromeOS EC communication protocol helpers.
config CROS_KBD_LED_BACKLIGHT
tristate "Backlight LED support for Chrome OS keyboards"
- depends on LEDS_CLASS && (ACPI || CROS_EC || MFD_CROS_EC_DEV)
+ depends on LEDS_CLASS
+ depends on MFD_CROS_EC_DEV || (MFD_CROS_EC_DEV=n && ACPI)
help
This option enables support for the keyboard backlight LEDs on
select Chrome OS systems.
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 1a5a484563cc..b981a1bb5bd8 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -25,7 +25,8 @@ endif
obj-$(CONFIG_CROS_EC_TYPEC) += cros-ec-typec.o
obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o
-obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o cros_ec_trace.o
+cros-ec-proto-objs := cros_ec_proto.o cros_ec_trace.o
+obj-$(CONFIG_CROS_EC_PROTO) += cros-ec-proto.o
obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o
obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o
obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
diff --git a/drivers/platform/chrome/chromeos_of_hw_prober.c b/drivers/platform/chrome/chromeos_of_hw_prober.c
index c6992f5cdc76..f3cd612e5584 100644
--- a/drivers/platform/chrome/chromeos_of_hw_prober.c
+++ b/drivers/platform/chrome/chromeos_of_hw_prober.c
@@ -57,7 +57,9 @@ static int chromeos_i2c_component_prober(struct device *dev, const void *_data)
}
DEFINE_CHROMEOS_I2C_PROBE_DATA_DUMB_BY_TYPE(touchscreen);
+DEFINE_CHROMEOS_I2C_PROBE_DATA_DUMB_BY_TYPE(trackpad);
+DEFINE_CHROMEOS_I2C_PROBE_CFG_SIMPLE_BY_TYPE(touchscreen);
DEFINE_CHROMEOS_I2C_PROBE_CFG_SIMPLE_BY_TYPE(trackpad);
static const struct chromeos_i2c_probe_data chromeos_i2c_probe_hana_trackpad = {
@@ -75,6 +77,17 @@ static const struct chromeos_i2c_probe_data chromeos_i2c_probe_hana_trackpad = {
},
};
+static const struct chromeos_i2c_probe_data chromeos_i2c_probe_squirtle_touchscreen = {
+ .cfg = &chromeos_i2c_probe_simple_touchscreen_cfg,
+ .opts = &(const struct i2c_of_probe_simple_opts) {
+ .res_node_compatible = "elan,ekth6a12nay",
+ .supply_name = "vcc33",
+ .gpio_name = "reset",
+ .post_power_on_delay_ms = 10,
+ .post_gpio_config_delay_ms = 300,
+ },
+};
+
static const struct hw_prober_entry hw_prober_platforms[] = {
{
.compatible = "google,hana",
@@ -84,6 +97,26 @@ static const struct hw_prober_entry hw_prober_platforms[] = {
.compatible = "google,hana",
.prober = chromeos_i2c_component_prober,
.data = &chromeos_i2c_probe_hana_trackpad,
+ }, {
+ .compatible = "google,spherion",
+ .prober = chromeos_i2c_component_prober,
+ .data = &chromeos_i2c_probe_hana_trackpad,
+ }, {
+ .compatible = "google,squirtle",
+ .prober = chromeos_i2c_component_prober,
+ .data = &chromeos_i2c_probe_dumb_trackpad,
+ }, {
+ .compatible = "google,squirtle",
+ .prober = chromeos_i2c_component_prober,
+ .data = &chromeos_i2c_probe_squirtle_touchscreen,
+ }, {
+ .compatible = "google,steelix",
+ .prober = chromeos_i2c_component_prober,
+ .data = &chromeos_i2c_probe_dumb_trackpad,
+ }, {
+ .compatible = "google,voltorb",
+ .prober = chromeos_i2c_component_prober,
+ .data = &chromeos_i2c_probe_dumb_trackpad,
},
};
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 92ac9a2f9c88..d10f9561990c 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -207,22 +207,15 @@ static ssize_t cros_ec_pdinfo_read(struct file *file,
char read_buf[EC_USB_PD_MAX_PORTS * 40], *p = read_buf;
struct cros_ec_debugfs *debug_info = file->private_data;
struct cros_ec_device *ec_dev = debug_info->ec->ec_dev;
- struct {
- struct cros_ec_command msg;
- union {
- struct ec_response_usb_pd_control_v1 resp;
- struct ec_params_usb_pd_control params;
- };
- } __packed ec_buf;
- struct cros_ec_command *msg;
- struct ec_response_usb_pd_control_v1 *resp;
- struct ec_params_usb_pd_control *params;
+ DEFINE_RAW_FLEX(struct cros_ec_command, msg, data,
+ MAX(sizeof(struct ec_response_usb_pd_control_v1),
+ sizeof(struct ec_params_usb_pd_control)));
+ struct ec_response_usb_pd_control_v1 *resp =
+ (struct ec_response_usb_pd_control_v1 *)msg->data;
+ struct ec_params_usb_pd_control *params =
+ (struct ec_params_usb_pd_control *)msg->data;
int i;
- msg = &ec_buf.msg;
- params = (struct ec_params_usb_pd_control *)msg->data;
- resp = (struct ec_response_usb_pd_control_v1 *)msg->data;
-
msg->command = EC_CMD_USB_PD_CONTROL;
msg->version = 1;
msg->insize = sizeof(*resp);
@@ -253,17 +246,15 @@ static ssize_t cros_ec_pdinfo_read(struct file *file,
static bool cros_ec_uptime_is_supported(struct cros_ec_device *ec_dev)
{
- struct {
- struct cros_ec_command cmd;
- struct ec_response_uptime_info resp;
- } __packed msg = {};
+ DEFINE_RAW_FLEX(struct cros_ec_command, msg, data,
+ sizeof(struct ec_response_uptime_info));
int ret;
- msg.cmd.command = EC_CMD_GET_UPTIME_INFO;
- msg.cmd.insize = sizeof(msg.resp);
+ msg->command = EC_CMD_GET_UPTIME_INFO;
+ msg->insize = sizeof(struct ec_response_uptime_info);
- ret = cros_ec_cmd_xfer_status(ec_dev, &msg.cmd);
- if (ret == -EPROTO && msg.cmd.result == EC_RES_INVALID_COMMAND)
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
+ if (ret == -EPROTO && msg->result == EC_RES_INVALID_COMMAND)
return false;
/* Other errors maybe a transient error, do not rule about support. */
@@ -275,20 +266,17 @@ static ssize_t cros_ec_uptime_read(struct file *file, char __user *user_buf,
{
struct cros_ec_debugfs *debug_info = file->private_data;
struct cros_ec_device *ec_dev = debug_info->ec->ec_dev;
- struct {
- struct cros_ec_command cmd;
- struct ec_response_uptime_info resp;
- } __packed msg = {};
- struct ec_response_uptime_info *resp;
+ DEFINE_RAW_FLEX(struct cros_ec_command, msg, data,
+ sizeof(struct ec_response_uptime_info));
+ struct ec_response_uptime_info *resp =
+ (struct ec_response_uptime_info *)msg->data;
char read_buf[32];
int ret;
- resp = (struct ec_response_uptime_info *)&msg.resp;
-
- msg.cmd.command = EC_CMD_GET_UPTIME_INFO;
- msg.cmd.insize = sizeof(*resp);
+ msg->command = EC_CMD_GET_UPTIME_INFO;
+ msg->insize = sizeof(*resp);
- ret = cros_ec_cmd_xfer_status(ec_dev, &msg.cmd);
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
if (ret < 0)
return ret;
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 877b107fee4b..3e94a0a82173 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -139,12 +139,10 @@ static int cros_ec_xfer_command(struct cros_ec_device *ec_dev, struct cros_ec_co
static int cros_ec_wait_until_complete(struct cros_ec_device *ec_dev, uint32_t *result)
{
- struct {
- struct cros_ec_command msg;
- struct ec_response_get_comms_status status;
- } __packed buf;
- struct cros_ec_command *msg = &buf.msg;
- struct ec_response_get_comms_status *status = &buf.status;
+ DEFINE_RAW_FLEX(struct cros_ec_command, msg, data,
+ sizeof(struct ec_response_get_comms_status));
+ struct ec_response_get_comms_status *status =
+ (struct ec_response_get_comms_status *)msg->data;
int ret = 0, i;
msg->version = 0;
@@ -757,16 +755,13 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev,
static int get_next_event(struct cros_ec_device *ec_dev)
{
- struct {
- struct cros_ec_command msg;
- struct ec_response_get_next_event_v3 event;
- } __packed buf;
- struct cros_ec_command *msg = &buf.msg;
- struct ec_response_get_next_event_v3 *event = &buf.event;
+ DEFINE_RAW_FLEX(struct cros_ec_command, msg, data,
+ sizeof(struct ec_response_get_next_event_v3));
+ struct ec_response_get_next_event_v3 *event =
+ (struct ec_response_get_next_event_v3 *)msg->data;
int cmd_version = ec_dev->mkbp_event_supported - 1;
u32 size;
- memset(msg, 0, sizeof(*msg));
if (ec_dev->suspended) {
dev_dbg(ec_dev->dev, "Device suspended.\n");
return -EHOSTDOWN;
@@ -1157,3 +1152,6 @@ int cros_ec_get_cmd_versions(struct cros_ec_device *ec_dev, u16 cmd)
return resp.version_mask;
}
EXPORT_SYMBOL_GPL(cros_ec_get_cmd_versions);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC communication protocol helpers");
diff --git a/drivers/platform/chrome/cros_ec_proto_test_util.h b/drivers/platform/chrome/cros_ec_proto_test_util.h
index 414002271c9c..b17239f052c2 100644
--- a/drivers/platform/chrome/cros_ec_proto_test_util.h
+++ b/drivers/platform/chrome/cros_ec_proto_test_util.h
@@ -13,7 +13,6 @@ struct ec_xfer_mock {
struct kunit *test;
/* input */
- struct cros_ec_command msg;
void *i_data;
/* output */
@@ -21,6 +20,10 @@ struct ec_xfer_mock {
int result;
void *o_data;
u32 o_data_len;
+
+ /* input */
+ /* Must be last -ends in a flexible-array member. */
+ struct cros_ec_command msg;
};
extern int cros_kunit_ec_xfer_mock_default_result;
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index d2228720991f..7678e3d05fd3 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -22,8 +22,10 @@
#define DRV_NAME "cros-ec-typec"
-#define DP_PORT_VDO (DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D)) | \
- DP_CAP_DFP_D | DP_CAP_RECEPTACLE)
+#define DP_PORT_VDO (DP_CAP_DFP_D | DP_CAP_RECEPTACLE | \
+ DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_C) | \
+ BIT(DP_PIN_ASSIGN_D) | \
+ BIT(DP_PIN_ASSIGN_E)))
static void cros_typec_role_switch_quirk(struct fwnode_handle *fwnode)
{
diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c
index fc27bd7fc4b9..f4c2282129f5 100644
--- a/drivers/platform/chrome/cros_kbd_led_backlight.c
+++ b/drivers/platform/chrome/cros_kbd_led_backlight.c
@@ -137,16 +137,12 @@ static int
keyboard_led_set_brightness_ec_pwm(struct led_classdev *cdev,
enum led_brightness brightness)
{
- struct {
- struct cros_ec_command msg;
- struct ec_params_pwm_set_keyboard_backlight params;
- } __packed buf;
- struct ec_params_pwm_set_keyboard_backlight *params = &buf.params;
- struct cros_ec_command *msg = &buf.msg;
+ DEFINE_RAW_FLEX(struct cros_ec_command, msg, data,
+ sizeof(struct ec_params_pwm_set_keyboard_backlight));
+ struct ec_params_pwm_set_keyboard_backlight *params =
+ (struct ec_params_pwm_set_keyboard_backlight *)msg->data;
struct keyboard_led *keyboard_led = container_of(cdev, struct keyboard_led, cdev);
- memset(&buf, 0, sizeof(buf));
-
msg->command = EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT;
msg->outsize = sizeof(*params);
@@ -158,17 +154,13 @@ keyboard_led_set_brightness_ec_pwm(struct led_classdev *cdev,
static enum led_brightness
keyboard_led_get_brightness_ec_pwm(struct led_classdev *cdev)
{
- struct {
- struct cros_ec_command msg;
- struct ec_response_pwm_get_keyboard_backlight resp;
- } __packed buf;
- struct ec_response_pwm_get_keyboard_backlight *resp = &buf.resp;
- struct cros_ec_command *msg = &buf.msg;
+ DEFINE_RAW_FLEX(struct cros_ec_command, msg, data,
+ sizeof(struct ec_response_pwm_get_keyboard_backlight));
+ struct ec_response_pwm_get_keyboard_backlight *resp =
+ (struct ec_response_pwm_get_keyboard_backlight *)msg->data;
struct keyboard_led *keyboard_led = container_of(cdev, struct keyboard_led, cdev);
int ret;
- memset(&buf, 0, sizeof(buf));
-
msg->command = EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT;
msg->insize = sizeof(*resp);
diff --git a/drivers/platform/cznic/Kconfig b/drivers/platform/cznic/Kconfig
index 13e37b49d9d0..61cff5f7e02e 100644
--- a/drivers/platform/cznic/Kconfig
+++ b/drivers/platform/cznic/Kconfig
@@ -76,6 +76,23 @@ config TURRIS_OMNIA_MCU_TRNG
Say Y here to add support for the true random number generator
provided by CZ.NIC's Turris Omnia MCU.
+config TURRIS_OMNIA_MCU_KEYCTL
+ bool "Turris Omnia MCU ECDSA message signing"
+ default y
+ depends on KEYS
+ depends on ASYMMETRIC_KEY_TYPE
+ depends on TURRIS_OMNIA_MCU_GPIO
+ select TURRIS_SIGNING_KEY
+ help
+ Say Y here to add support for ECDSA message signing with board private
+ key (if available on the MCU). This is exposed via the keyctl()
+ syscall.
+
endif # TURRIS_OMNIA_MCU
+config TURRIS_SIGNING_KEY
+ tristate
+ depends on KEYS
+ depends on ASYMMETRIC_KEY_TYPE
+
endif # CZNIC_PLATFORMS
diff --git a/drivers/platform/cznic/Makefile b/drivers/platform/cznic/Makefile
index ce6d997f34d6..ccad7bec82e1 100644
--- a/drivers/platform/cznic/Makefile
+++ b/drivers/platform/cznic/Makefile
@@ -3,6 +3,9 @@
obj-$(CONFIG_TURRIS_OMNIA_MCU) += turris-omnia-mcu.o
turris-omnia-mcu-y := turris-omnia-mcu-base.o
turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_GPIO) += turris-omnia-mcu-gpio.o
+turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_KEYCTL) += turris-omnia-mcu-keyctl.o
turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_SYSOFF_WAKEUP) += turris-omnia-mcu-sys-off-wakeup.o
turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_TRNG) += turris-omnia-mcu-trng.o
turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_WATCHDOG) += turris-omnia-mcu-watchdog.o
+
+obj-$(CONFIG_TURRIS_SIGNING_KEY) += turris-signing-key.o
diff --git a/drivers/platform/cznic/turris-omnia-mcu-base.c b/drivers/platform/cznic/turris-omnia-mcu-base.c
index 770e680b96f9..e8fc0d7b3343 100644
--- a/drivers/platform/cznic/turris-omnia-mcu-base.c
+++ b/drivers/platform/cznic/turris-omnia-mcu-base.c
@@ -392,6 +392,10 @@ static int omnia_mcu_probe(struct i2c_client *client)
if (err)
return err;
+ err = omnia_mcu_register_keyctl(mcu);
+ if (err)
+ return err;
+
return omnia_mcu_register_trng(mcu);
}
diff --git a/drivers/platform/cznic/turris-omnia-mcu-gpio.c b/drivers/platform/cznic/turris-omnia-mcu-gpio.c
index 5f35f7c5d5d7..c2df24ea8686 100644
--- a/drivers/platform/cznic/turris-omnia-mcu-gpio.c
+++ b/drivers/platform/cznic/turris-omnia-mcu-gpio.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/devm-helpers.h>
#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -195,7 +196,7 @@ static const struct omnia_gpio omnia_gpios[64] = {
};
/* mapping from interrupts to indexes of GPIOs in the omnia_gpios array */
-const u8 omnia_int_to_gpio_idx[32] = {
+static const u8 omnia_int_to_gpio_idx[32] = {
[__bf_shf(OMNIA_INT_CARD_DET)] = 4,
[__bf_shf(OMNIA_INT_MSATA_IND)] = 5,
[__bf_shf(OMNIA_INT_USB30_OVC)] = 6,
@@ -1093,3 +1094,21 @@ int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu)
return 0;
}
+
+int omnia_mcu_request_irq(struct omnia_mcu *mcu, u32 spec,
+ irq_handler_t thread_fn, const char *devname)
+{
+ u8 irq_idx;
+ int irq;
+
+ if (!spec)
+ return -EINVAL;
+
+ irq_idx = omnia_int_to_gpio_idx[ffs(spec) - 1];
+ irq = gpiod_to_irq(gpio_device_get_desc(mcu->gc.gpiodev, irq_idx));
+ if (irq < 0)
+ return irq;
+
+ return devm_request_threaded_irq(&mcu->client->dev, irq, NULL,
+ thread_fn, IRQF_ONESHOT, devname, mcu);
+}
diff --git a/drivers/platform/cznic/turris-omnia-mcu-keyctl.c b/drivers/platform/cznic/turris-omnia-mcu-keyctl.c
new file mode 100644
index 000000000000..dc40f942f082
--- /dev/null
+++ b/drivers/platform/cznic/turris-omnia-mcu-keyctl.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CZ.NIC's Turris Omnia MCU ECDSA message signing via keyctl
+ *
+ * 2025 by Marek Behún <kabel@kernel.org>
+ */
+
+#include <crypto/sha2.h>
+#include <linux/cleanup.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/key.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <linux/turris-omnia-mcu-interface.h>
+#include <linux/turris-signing-key.h>
+#include "turris-omnia-mcu.h"
+
+static irqreturn_t omnia_msg_signed_irq_handler(int irq, void *dev_id)
+{
+ u8 reply[1 + OMNIA_MCU_CRYPTO_SIGNATURE_LEN];
+ struct omnia_mcu *mcu = dev_id;
+ int err;
+
+ err = omnia_cmd_read(mcu->client, OMNIA_CMD_CRYPTO_COLLECT_SIGNATURE,
+ reply, sizeof(reply));
+ if (!err && reply[0] != OMNIA_MCU_CRYPTO_SIGNATURE_LEN)
+ err = -EIO;
+
+ guard(mutex)(&mcu->sign_lock);
+
+ if (mcu->sign_requested) {
+ mcu->sign_err = err;
+ if (!err)
+ memcpy(mcu->signature, &reply[1],
+ OMNIA_MCU_CRYPTO_SIGNATURE_LEN);
+ mcu->sign_requested = false;
+ complete(&mcu->msg_signed);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int omnia_mcu_sign(const struct key *key, const void *msg,
+ void *signature)
+{
+ struct omnia_mcu *mcu = dev_get_drvdata(turris_signing_key_get_dev(key));
+ u8 cmd[1 + SHA256_DIGEST_SIZE], reply;
+ int err;
+
+ scoped_guard(mutex, &mcu->sign_lock) {
+ if (mcu->sign_requested)
+ return -EBUSY;
+
+ cmd[0] = OMNIA_CMD_CRYPTO_SIGN_MESSAGE;
+ memcpy(&cmd[1], msg, SHA256_DIGEST_SIZE);
+
+ err = omnia_cmd_write_read(mcu->client, cmd, sizeof(cmd),
+ &reply, 1);
+ if (err)
+ return err;
+
+ if (!reply)
+ return -EBUSY;
+
+ mcu->sign_requested = true;
+ }
+
+ if (wait_for_completion_interruptible(&mcu->msg_signed))
+ return -EINTR;
+
+ guard(mutex)(&mcu->sign_lock);
+
+ if (mcu->sign_err)
+ return mcu->sign_err;
+
+ memcpy(signature, mcu->signature, OMNIA_MCU_CRYPTO_SIGNATURE_LEN);
+
+ /* forget the signature, for security */
+ memzero_explicit(mcu->signature, sizeof(mcu->signature));
+
+ return OMNIA_MCU_CRYPTO_SIGNATURE_LEN;
+}
+
+static const void *omnia_mcu_get_public_key(const struct key *key)
+{
+ struct omnia_mcu *mcu = dev_get_drvdata(turris_signing_key_get_dev(key));
+
+ return mcu->board_public_key;
+}
+
+static const struct turris_signing_key_subtype omnia_signing_key_subtype = {
+ .key_size = 256,
+ .data_size = SHA256_DIGEST_SIZE,
+ .sig_size = OMNIA_MCU_CRYPTO_SIGNATURE_LEN,
+ .public_key_size = OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN,
+ .hash_algo = "sha256",
+ .get_public_key = omnia_mcu_get_public_key,
+ .sign = omnia_mcu_sign,
+};
+
+static int omnia_mcu_read_public_key(struct omnia_mcu *mcu)
+{
+ u8 reply[1 + OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN];
+ int err;
+
+ err = omnia_cmd_read(mcu->client, OMNIA_CMD_CRYPTO_GET_PUBLIC_KEY,
+ reply, sizeof(reply));
+ if (err)
+ return err;
+
+ if (reply[0] != OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN)
+ return -EIO;
+
+ memcpy(mcu->board_public_key, &reply[1],
+ OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN);
+
+ return 0;
+}
+
+int omnia_mcu_register_keyctl(struct omnia_mcu *mcu)
+{
+ struct device *dev = &mcu->client->dev;
+ char desc[48];
+ int err;
+
+ if (!(mcu->features & OMNIA_FEAT_CRYPTO))
+ return 0;
+
+ err = omnia_mcu_read_public_key(mcu);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Cannot read board public key\n");
+
+ err = devm_mutex_init(dev, &mcu->sign_lock);
+ if (err)
+ return err;
+
+ init_completion(&mcu->msg_signed);
+
+ err = omnia_mcu_request_irq(mcu, OMNIA_INT_MESSAGE_SIGNED,
+ omnia_msg_signed_irq_handler,
+ "turris-omnia-mcu-keyctl");
+ if (err)
+ return dev_err_probe(dev, err,
+ "Cannot request MESSAGE_SIGNED IRQ\n");
+
+ sprintf(desc, "Turris Omnia SN %016llX MCU ECDSA key",
+ mcu->board_serial_number);
+
+ err = devm_turris_signing_key_create(dev, &omnia_signing_key_subtype,
+ desc);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot create signing key\n");
+
+ return 0;
+}
diff --git a/drivers/platform/cznic/turris-omnia-mcu-trng.c b/drivers/platform/cznic/turris-omnia-mcu-trng.c
index 9a1d9292dc9a..e3826959e6de 100644
--- a/drivers/platform/cznic/turris-omnia-mcu-trng.c
+++ b/drivers/platform/cznic/turris-omnia-mcu-trng.c
@@ -5,12 +5,9 @@
* 2024 by Marek Behún <kabel@kernel.org>
*/
-#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/container_of.h>
#include <linux/errno.h>
-#include <linux/gpio/consumer.h>
-#include <linux/gpio/driver.h>
#include <linux/hw_random.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -62,17 +59,12 @@ static int omnia_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
int omnia_mcu_register_trng(struct omnia_mcu *mcu)
{
struct device *dev = &mcu->client->dev;
- u8 irq_idx, dummy;
- int irq, err;
+ u8 dummy;
+ int err;
if (!(mcu->features & OMNIA_FEAT_TRNG))
return 0;
- irq_idx = omnia_int_to_gpio_idx[__bf_shf(OMNIA_INT_TRNG)];
- irq = gpiod_to_irq(gpio_device_get_desc(mcu->gc.gpiodev, irq_idx));
- if (irq < 0)
- return dev_err_probe(dev, irq, "Cannot get TRNG IRQ\n");
-
/*
* If someone else cleared the TRNG interrupt but did not read the
* entropy, a new interrupt won't be generated, and entropy collection
@@ -86,9 +78,8 @@ int omnia_mcu_register_trng(struct omnia_mcu *mcu)
init_completion(&mcu->trng_entropy_ready);
- err = devm_request_threaded_irq(dev, irq, NULL, omnia_trng_irq_handler,
- IRQF_ONESHOT, "turris-omnia-mcu-trng",
- mcu);
+ err = omnia_mcu_request_irq(mcu, OMNIA_INT_TRNG, omnia_trng_irq_handler,
+ "turris-omnia-mcu-trng");
if (err)
return dev_err_probe(dev, err, "Cannot request TRNG IRQ\n");
diff --git a/drivers/platform/cznic/turris-omnia-mcu.h b/drivers/platform/cznic/turris-omnia-mcu.h
index 088541be3f4c..8473a3031917 100644
--- a/drivers/platform/cznic/turris-omnia-mcu.h
+++ b/drivers/platform/cznic/turris-omnia-mcu.h
@@ -12,11 +12,17 @@
#include <linux/gpio/driver.h>
#include <linux/hw_random.h>
#include <linux/if_ether.h>
+#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/workqueue.h>
+enum {
+ OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN = 1 + 32,
+ OMNIA_MCU_CRYPTO_SIGNATURE_LEN = 64,
+};
+
struct i2c_client;
struct rtc_device;
@@ -55,6 +61,12 @@ struct rtc_device;
* @wdt: watchdog driver structure
* @trng: RNG driver structure
* @trng_entropy_ready: RNG entropy ready completion
+ * @msg_signed: message signed completion
+ * @sign_lock: mutex to protect message signing state
+ * @sign_requested: flag indicating that message signing was requested but not completed
+ * @sign_err: message signing error number, filled in interrupt handler
+ * @signature: message signing signature, filled in interrupt handler
+ * @board_public_key: board public key, if stored in MCU
*/
struct omnia_mcu {
struct i2c_client *client;
@@ -88,12 +100,22 @@ struct omnia_mcu {
struct hwrng trng;
struct completion trng_entropy_ready;
#endif
+
+#ifdef CONFIG_TURRIS_OMNIA_MCU_KEYCTL
+ struct completion msg_signed;
+ struct mutex sign_lock;
+ bool sign_requested;
+ int sign_err;
+ u8 signature[OMNIA_MCU_CRYPTO_SIGNATURE_LEN];
+ u8 board_public_key[OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN];
+#endif
};
#ifdef CONFIG_TURRIS_OMNIA_MCU_GPIO
-extern const u8 omnia_int_to_gpio_idx[32];
extern const struct attribute_group omnia_mcu_gpio_group;
int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu);
+int omnia_mcu_request_irq(struct omnia_mcu *mcu, u32 spec,
+ irq_handler_t thread_fn, const char *devname);
#else
static inline int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu)
{
@@ -101,6 +123,15 @@ static inline int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu)
}
#endif
+#ifdef CONFIG_TURRIS_OMNIA_MCU_KEYCTL
+int omnia_mcu_register_keyctl(struct omnia_mcu *mcu);
+#else
+static inline int omnia_mcu_register_keyctl(struct omnia_mcu *mcu)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_TURRIS_OMNIA_MCU_SYSOFF_WAKEUP
extern const struct attribute_group omnia_mcu_poweroff_group;
int omnia_mcu_register_sys_off_and_wakeup(struct omnia_mcu *mcu);
diff --git a/drivers/platform/cznic/turris-signing-key.c b/drivers/platform/cznic/turris-signing-key.c
new file mode 100644
index 000000000000..3827178565e2
--- /dev/null
+++ b/drivers/platform/cznic/turris-signing-key.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Some of CZ.NIC's Turris devices support signing messages with a per-device unique asymmetric
+ * cryptographic key that was burned into the device at manufacture.
+ *
+ * This helper module exposes this message signing ability via the keyctl() syscall. Upon load, it
+ * creates the `.turris-signing-keys` keyring. A device-specific driver then has to create a signing
+ * key by calling devm_turris_signing_key_create().
+ *
+ * 2025 by Marek Behún <kabel@kernel.org>
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/key-type.h>
+#include <linux/key.h>
+#include <linux/keyctl.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <linux/turris-signing-key.h>
+
+static int turris_signing_key_instantiate(struct key *key,
+ struct key_preparsed_payload *payload)
+{
+ return 0;
+}
+
+static void turris_signing_key_describe(const struct key *key, struct seq_file *m)
+{
+ const struct turris_signing_key_subtype *subtype = dereference_key_rcu(key);
+
+ if (!subtype)
+ return;
+
+ seq_printf(m, "%s: %*phN", key->description, subtype->public_key_size,
+ subtype->get_public_key(key));
+}
+
+static long turris_signing_key_read(const struct key *key, char *buffer, size_t buflen)
+{
+ const struct turris_signing_key_subtype *subtype = dereference_key_rcu(key);
+
+ if (!subtype)
+ return -EIO;
+
+ if (buffer) {
+ if (buflen > subtype->public_key_size)
+ buflen = subtype->public_key_size;
+
+ memcpy(buffer, subtype->get_public_key(key), subtype->public_key_size);
+ }
+
+ return subtype->public_key_size;
+}
+
+static bool turris_signing_key_asym_valid_params(const struct turris_signing_key_subtype *subtype,
+ const struct kernel_pkey_params *params)
+{
+ if (params->encoding && strcmp(params->encoding, "raw"))
+ return false;
+
+ if (params->hash_algo && strcmp(params->hash_algo, subtype->hash_algo))
+ return false;
+
+ return true;
+}
+
+static int turris_signing_key_asym_query(const struct kernel_pkey_params *params,
+ struct kernel_pkey_query *info)
+{
+ const struct turris_signing_key_subtype *subtype = dereference_key_rcu(params->key);
+
+ if (!subtype)
+ return -EIO;
+
+ if (!turris_signing_key_asym_valid_params(subtype, params))
+ return -EINVAL;
+
+ info->supported_ops = KEYCTL_SUPPORTS_SIGN;
+ info->key_size = subtype->key_size;
+ info->max_data_size = subtype->data_size;
+ info->max_sig_size = subtype->sig_size;
+ info->max_enc_size = 0;
+ info->max_dec_size = 0;
+
+ return 0;
+}
+
+static int turris_signing_key_asym_eds_op(struct kernel_pkey_params *params,
+ const void *in, void *out)
+{
+ const struct turris_signing_key_subtype *subtype = dereference_key_rcu(params->key);
+ int err;
+
+ if (!subtype)
+ return -EIO;
+
+ if (!turris_signing_key_asym_valid_params(subtype, params))
+ return -EINVAL;
+
+ if (params->op != kernel_pkey_sign)
+ return -EOPNOTSUPP;
+
+ if (params->in_len != subtype->data_size || params->out_len != subtype->sig_size)
+ return -EINVAL;
+
+ err = subtype->sign(params->key, in, out);
+ if (err)
+ return err;
+
+ return subtype->sig_size;
+}
+
+static struct key_type turris_signing_key_type = {
+ .name = "turris-signing-key",
+ .instantiate = turris_signing_key_instantiate,
+ .describe = turris_signing_key_describe,
+ .read = turris_signing_key_read,
+ .asym_query = turris_signing_key_asym_query,
+ .asym_eds_op = turris_signing_key_asym_eds_op,
+};
+
+static struct key *turris_signing_keyring;
+
+static void turris_signing_key_release(void *key)
+{
+ key_unlink(turris_signing_keyring, key);
+ key_put(key);
+}
+
+int
+devm_turris_signing_key_create(struct device *dev, const struct turris_signing_key_subtype *subtype,
+ const char *desc)
+{
+ struct key *key;
+ key_ref_t kref;
+
+ kref = key_create(make_key_ref(turris_signing_keyring, true),
+ turris_signing_key_type.name, desc, NULL, 0,
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ |
+ KEY_USR_SEARCH,
+ KEY_ALLOC_BUILT_IN | KEY_ALLOC_SET_KEEP | KEY_ALLOC_NOT_IN_QUOTA);
+ if (IS_ERR(kref))
+ return PTR_ERR(kref);
+
+ key = key_ref_to_ptr(kref);
+ key->payload.data[1] = dev;
+ rcu_assign_keypointer(key, subtype);
+
+ return devm_add_action_or_reset(dev, turris_signing_key_release, key);
+}
+EXPORT_SYMBOL_GPL(devm_turris_signing_key_create);
+
+static int turris_signing_key_init(void)
+{
+ int err;
+
+ err = register_key_type(&turris_signing_key_type);
+ if (err)
+ return err;
+
+ turris_signing_keyring = keyring_alloc(".turris-signing-keys",
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW |
+ KEY_USR_READ | KEY_USR_SEARCH,
+ KEY_ALLOC_BUILT_IN | KEY_ALLOC_SET_KEEP |
+ KEY_ALLOC_NOT_IN_QUOTA,
+ NULL, NULL);
+ if (IS_ERR(turris_signing_keyring)) {
+ pr_err("Cannot allocate Turris keyring\n");
+
+ unregister_key_type(&turris_signing_key_type);
+
+ return PTR_ERR(turris_signing_keyring);
+ }
+
+ return 0;
+}
+module_init(turris_signing_key_init);
+
+static void turris_signing_key_exit(void)
+{
+ key_put(turris_signing_keyring);
+ unregister_key_type(&turris_signing_key_type);
+}
+module_exit(turris_signing_key_exit);
+
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
+MODULE_DESCRIPTION("CZ.NIC's Turris signing key helper");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/loongarch/loongson-laptop.c b/drivers/platform/loongarch/loongson-laptop.c
index 99203584949d..61b18ac206c9 100644
--- a/drivers/platform/loongarch/loongson-laptop.c
+++ b/drivers/platform/loongarch/loongson-laptop.c
@@ -56,8 +56,7 @@ static struct input_dev *generic_inputdev;
static acpi_handle hotkey_handle;
static struct key_entry hotkey_keycode_map[GENERIC_HOTKEY_MAP_MAX];
-int loongson_laptop_turn_on_backlight(void);
-int loongson_laptop_turn_off_backlight(void);
+static bool bl_powered;
static int loongson_laptop_backlight_update(struct backlight_device *bd);
/* 2. ACPI Helpers and device model */
@@ -354,16 +353,42 @@ static int ec_backlight_level(u8 level)
return level;
}
+static int ec_backlight_set_power(bool state)
+{
+ int status;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+
+ arg0.integer.value = state;
+ status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL);
+ if (ACPI_FAILURE(status)) {
+ pr_info("Loongson lvds error: 0x%x\n", status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int loongson_laptop_backlight_update(struct backlight_device *bd)
{
- int lvl = ec_backlight_level(bd->props.brightness);
+ bool target_powered = !backlight_is_blank(bd);
+ int ret = 0, lvl = ec_backlight_level(bd->props.brightness);
if (lvl < 0)
return -EIO;
+
if (ec_set_brightness(lvl))
return -EIO;
- return 0;
+ if (target_powered != bl_powered) {
+ ret = ec_backlight_set_power(target_powered);
+ if (ret < 0)
+ return ret;
+
+ bl_powered = target_powered;
+ }
+
+ return ret;
}
static int loongson_laptop_get_brightness(struct backlight_device *bd)
@@ -384,7 +409,7 @@ static const struct backlight_ops backlight_laptop_ops = {
static int laptop_backlight_register(void)
{
- int status = 0;
+ int status = 0, ret;
struct backlight_properties props;
memset(&props, 0, sizeof(props));
@@ -392,44 +417,20 @@ static int laptop_backlight_register(void)
if (!acpi_evalf(hotkey_handle, &status, "ECLL", "d"))
return -EIO;
- props.brightness = 1;
+ ret = ec_backlight_set_power(true);
+ if (ret)
+ return ret;
+
+ bl_powered = true;
+
props.max_brightness = status;
+ props.brightness = ec_get_brightness();
+ props.power = BACKLIGHT_POWER_ON;
props.type = BACKLIGHT_PLATFORM;
backlight_device_register("loongson_laptop",
NULL, NULL, &backlight_laptop_ops, &props);
- return 0;
-}
-
-int loongson_laptop_turn_on_backlight(void)
-{
- int status;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list args = { 1, &arg0 };
-
- arg0.integer.value = 1;
- status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL);
- if (ACPI_FAILURE(status)) {
- pr_info("Loongson lvds error: 0x%x\n", status);
- return -ENODEV;
- }
-
- return 0;
-}
-
-int loongson_laptop_turn_off_backlight(void)
-{
- int status;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list args = { 1, &arg0 };
-
- arg0.integer.value = 0;
- status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL);
- if (ACPI_FAILURE(status)) {
- pr_info("Loongson lvds error: 0x%x\n", status);
- return -ENODEV;
- }
return 0;
}
@@ -611,11 +612,17 @@ static int __init generic_acpi_laptop_init(void)
static void __exit generic_acpi_laptop_exit(void)
{
+ int i;
+
if (generic_inputdev) {
- if (input_device_registered)
- input_unregister_device(generic_inputdev);
- else
+ if (!input_device_registered) {
input_free_device(generic_inputdev);
+ } else {
+ input_unregister_device(generic_inputdev);
+
+ for (i = 0; i < ARRAY_SIZE(generic_sub_drivers); i++)
+ generic_subdriver_exit(&generic_sub_drivers[i]);
+ }
}
}
diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
index aa760f064a17..e3afbe62c7f6 100644
--- a/drivers/platform/mellanox/Kconfig
+++ b/drivers/platform/mellanox/Kconfig
@@ -27,6 +27,19 @@ config MLX_PLATFORM
If you have a Mellanox system, say Y or M here.
+config MLXREG_DPU
+ tristate "Nvidia Data Processor Unit platform driver support"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver provides support for the Nvidia BF3 Data Processor Units,
+ which are the part of SN4280 Ethernet smart switch systems
+ providing a high performance switching solution for Enterprise Data
+ Centers (EDC) for building Ethernet based clusters, High-Performance
+ Computing (HPC) and embedded environments.
+
+ If you have a Nvidia smart switch system, say Y or M here.
+
config MLXREG_HOTPLUG
tristate "Mellanox platform hotplug driver support"
depends on HWMON
diff --git a/drivers/platform/mellanox/Makefile b/drivers/platform/mellanox/Makefile
index ba56485cbe8c..e86723b44c2e 100644
--- a/drivers/platform/mellanox/Makefile
+++ b/drivers/platform/mellanox/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
obj-$(CONFIG_MLXBF_BOOTCTL) += mlxbf-bootctl.o
obj-$(CONFIG_MLXBF_PMC) += mlxbf-pmc.o
obj-$(CONFIG_MLXBF_TMFIFO) += mlxbf-tmfifo.o
+obj-$(CONFIG_MLXREG_DPU) += mlxreg-dpu.o
obj-$(CONFIG_MLXREG_HOTPLUG) += mlxreg-hotplug.o
obj-$(CONFIG_MLXREG_IO) += mlxreg-io.o
obj-$(CONFIG_MLXREG_LC) += mlxreg-lc.o
diff --git a/drivers/platform/mellanox/mlx-platform.c b/drivers/platform/mellanox/mlx-platform.c
index 08b0430a2899..d0df18be93c7 100644
--- a/drivers/platform/mellanox/mlx-platform.c
+++ b/drivers/platform/mellanox/mlx-platform.c
@@ -38,6 +38,7 @@
#define MLXPLAT_CPLD_LPC_REG_CPLD4_PN1_OFFSET 0x0b
#define MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET 0x17
#define MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET 0x19
+#define MLXPLAT_CPLD_LPC_REG_RESET_GP3_OFFSET 0x1b
#define MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET 0x1c
#define MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET 0x1d
#define MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET 0x1e
@@ -49,9 +50,11 @@
#define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET 0x24
#define MLXPLAT_CPLD_LPC_REG_LED6_OFFSET 0x25
#define MLXPLAT_CPLD_LPC_REG_LED7_OFFSET 0x26
+#define MLXPLAT_CPLD_LPC_REG_LED8_OFFSET 0x27
#define MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION 0x2a
#define MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET 0x2b
#define MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET 0x2d
+#define MLXPLAT_CPLD_LPC_REG_GP1_RO_OFFSET 0x2c
#define MLXPLAT_CPLD_LPC_REG_GP0_OFFSET 0x2e
#define MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET 0x2f
#define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET 0x30
@@ -71,12 +74,14 @@
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET 0x43
#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET 0x44
#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45
+#define MLXPLAT_CPLD_LPC_REG_GP3_OFFSET 0x46
#define MLXPLAT_CPLD_LPC_REG_BRD_OFFSET 0x47
#define MLXPLAT_CPLD_LPC_REG_BRD_EVENT_OFFSET 0x48
#define MLXPLAT_CPLD_LPC_REG_BRD_MASK_OFFSET 0x49
#define MLXPLAT_CPLD_LPC_REG_GWP_OFFSET 0x4a
#define MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET 0x4b
#define MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET 0x4c
+#define MLXPLAT_CPLD_LPC_REG_GPI_MASK_OFFSET 0x4e
#define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50
#define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51
#define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET 0x52
@@ -88,15 +93,20 @@
#define MLXPLAT_CPLD_LPC_REG_PSU_OFFSET 0x58
#define MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET 0x59
#define MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET 0x5a
+#define MLXPLAT_CPLD_LPC_REG_PSU_AC_OFFSET 0x5e
#define MLXPLAT_CPLD_LPC_REG_PWR_OFFSET 0x64
#define MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET 0x65
#define MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET 0x66
+#define MLXPLAT_CPLD_LPC_REG_PSU_ALERT_OFFSET 0x6a
#define MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET 0x70
#define MLXPLAT_CPLD_LPC_REG_LC_IN_EVENT_OFFSET 0x71
#define MLXPLAT_CPLD_LPC_REG_LC_IN_MASK_OFFSET 0x72
#define MLXPLAT_CPLD_LPC_REG_FAN_OFFSET 0x88
#define MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET 0x89
#define MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET 0x8a
+#define MLXPLAT_CPLD_LPC_REG_FAN2_OFFSET 0x8b
+#define MLXPLAT_CPLD_LPC_REG_FAN2_EVENT_OFFSET 0x8c
+#define MLXPLAT_CPLD_LPC_REG_FAN2_MASK_OFFSET 0x8d
#define MLXPLAT_CPLD_LPC_REG_CPLD5_VER_OFFSET 0x8e
#define MLXPLAT_CPLD_LPC_REG_CPLD5_PN_OFFSET 0x8f
#define MLXPLAT_CPLD_LPC_REG_CPLD5_PN1_OFFSET 0x90
@@ -128,10 +138,15 @@
#define MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET 0xaa
#define MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET 0xab
#define MLXPLAT_CPLD_LPC_REG_LC_PWR_ON 0xb2
+#define MLXPLAT_CPLD_LPC_REG_TACHO19_OFFSET 0xb4
+#define MLXPLAT_CPLD_LPC_REG_TACHO20_OFFSET 0xb5
#define MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET 0xb6
#define MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET 0xb7
#define MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET 0xb8
#define MLXPLAT_CPLD_LPC_REG_DBG4_OFFSET 0xb9
+#define MLXPLAT_CPLD_LPC_REG_TACHO17_OFFSET 0xba
+#define MLXPLAT_CPLD_LPC_REG_TACHO18_OFFSET 0xbb
+#define MLXPLAT_CPLD_LPC_REG_ASIC_CAP_OFFSET 0xc1
#define MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET 0xc2
#define MLXPLAT_CPLD_LPC_REG_SPI_CHNL_SELECT 0xc3
#define MLXPLAT_CPLD_LPC_REG_CPLD5_MVER_OFFSET 0xc4
@@ -182,6 +197,9 @@
#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET 0xfb
#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET 0xfc
#define MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET 0xfd
+#define MLXPLAT_CPLD_LPC_REG_TACHO15_OFFSET 0xfe
+#define MLXPLAT_CPLD_LPC_REG_TACHO16_OFFSET 0xff
+
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL
@@ -210,9 +228,15 @@
#define MLXPLAT_CPLD_AGGR_MASK_NG_DEF 0x04
#define MLXPLAT_CPLD_AGGR_MASK_COMEX BIT(0)
#define MLXPLAT_CPLD_AGGR_MASK_LC BIT(3)
+#define MLXPLAT_CPLD_AGGR_MASK_DPU_BRD BIT(4)
+#define MLXPLAT_CPLD_AGGR_MASK_DPU_CORE BIT(5)
#define MLXPLAT_CPLD_AGGR_MASK_MODULAR (MLXPLAT_CPLD_AGGR_MASK_NG_DEF | \
MLXPLAT_CPLD_AGGR_MASK_COMEX | \
MLXPLAT_CPLD_AGGR_MASK_LC)
+#define MLXPLAT_CPLD_AGGR_MASK_SMART_SW (MLXPLAT_CPLD_AGGR_MASK_COMEX | \
+ MLXPLAT_CPLD_AGGR_MASK_NG_DEF | \
+ MLXPLAT_CPLD_AGGR_MASK_DPU_BRD | \
+ MLXPLAT_CPLD_AGGR_MASK_DPU_CORE)
#define MLXPLAT_CPLD_AGGR_MASK_LC_PRSNT BIT(0)
#define MLXPLAT_CPLD_AGGR_MASK_LC_RDY BIT(1)
#define MLXPLAT_CPLD_AGGR_MASK_LC_PG BIT(2)
@@ -235,15 +259,21 @@
#define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PSU_EXT_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_PWR_EXT_MASK GENMASK(3, 0)
+#define MLXPLAT_CPLD_PSU_XDR_MASK GENMASK(7, 0)
+#define MLXPLAT_CPLD_PWR_XDR_MASK GENMASK(7, 0)
#define MLXPLAT_CPLD_FAN_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_ASIC_MASK GENMASK(1, 0)
+#define MLXPLAT_CPLD_ASIC_XDR_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_FAN_NG_MASK GENMASK(6, 0)
+#define MLXPLAT_CPLD_FAN_XDR_MASK GENMASK(7, 0)
#define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK GENMASK(7, 4)
#define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4)
#define MLXPLAT_CPLD_GWP_MASK GENMASK(0, 0)
#define MLXPLAT_CPLD_EROT_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_FU_CAP_MASK GENMASK(1, 0)
+#define MLXPLAT_CPLD_BIOS_STATUS_MASK GENMASK(3, 1)
+#define MLXPLAT_CPLD_DPU_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_PWR_BUTTON_MASK BIT(0)
#define MLXPLAT_CPLD_LATCH_RST_MASK BIT(6)
#define MLXPLAT_CPLD_THERMAL1_PDB_MASK BIT(3)
@@ -267,6 +297,9 @@
/* Masks for aggregation for modular systems */
#define MLXPLAT_CPLD_LPC_LC_MASK GENMASK(7, 0)
+/* Masks for aggregation for smart switch systems */
+#define MLXPLAT_CPLD_LPC_SM_SW_MASK GENMASK(7, 0)
+
#define MLXPLAT_CPLD_HALT_MASK BIT(3)
#define MLXPLAT_CPLD_RESET_MASK GENMASK(7, 1)
@@ -297,15 +330,18 @@
#define MLXPLAT_CPLD_NR_NONE -1
#define MLXPLAT_CPLD_PSU_DEFAULT_NR 10
#define MLXPLAT_CPLD_PSU_MSNXXXX_NR 4
+#define MLXPLAT_CPLD_PSU_XDR_NR 3
#define MLXPLAT_CPLD_FAN1_DEFAULT_NR 11
#define MLXPLAT_CPLD_FAN2_DEFAULT_NR 12
#define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13
#define MLXPLAT_CPLD_FAN4_DEFAULT_NR 14
#define MLXPLAT_CPLD_NR_ASIC 3
#define MLXPLAT_CPLD_NR_LC_BASE 34
+#define MLXPLAT_CPLD_NR_DPU_BASE 18
#define MLXPLAT_CPLD_NR_LC_SET(nr) (MLXPLAT_CPLD_NR_LC_BASE + (nr))
#define MLXPLAT_CPLD_LC_ADDR 0x32
+#define MLXPLAT_CPLD_DPU_ADDR 0x68
/* Masks and default values for watchdogs */
#define MLXPLAT_CPLD_WD1_CLEAR_MASK GENMASK(7, 1)
@@ -320,6 +356,7 @@
#define MLXPLAT_CPLD_WD_DFLT_TIMEOUT 30
#define MLXPLAT_CPLD_WD3_DFLT_TIMEOUT 600
#define MLXPLAT_CPLD_WD_MAX_DEVS 2
+#define MLXPLAT_CPLD_DPU_MAX_DEVS 4
#define MLXPLAT_CPLD_LPC_SYSIRQ 17
@@ -346,6 +383,7 @@
* @pdev_io_regs - register access platform devices
* @pdev_fan - FAN platform devices
* @pdev_wd - array of watchdog platform devices
+ * pdev_dpu - array of Data Processor Unit platform devices
* @regmap: device register map
* @hotplug_resources: system hotplug resources
* @hotplug_resources_size: size of system hotplug resources
@@ -360,6 +398,7 @@ struct mlxplat_priv {
struct platform_device *pdev_io_regs;
struct platform_device *pdev_fan;
struct platform_device *pdev_wd[MLXPLAT_CPLD_WD_MAX_DEVS];
+ struct platform_device *pdev_dpu[MLXPLAT_CPLD_DPU_MAX_DEVS];
void *regmap;
struct resource *hotplug_resources;
unsigned int hotplug_resources_size;
@@ -626,6 +665,21 @@ static struct i2c_board_info mlxplat_mlxcpld_pwr_ng800[] = {
},
};
+static struct i2c_board_info mlxplat_mlxcpld_xdr_pwr[] = {
+ {
+ I2C_BOARD_INFO("dps460", 0x5d),
+ },
+ {
+ I2C_BOARD_INFO("dps460", 0x5c),
+ },
+ {
+ I2C_BOARD_INFO("dps460", 0x5e),
+ },
+ {
+ I2C_BOARD_INFO("dps460", 0x5f),
+ },
+};
+
static struct i2c_board_info mlxplat_mlxcpld_fan[] = {
{
I2C_BOARD_INFO("24c32", 0x50),
@@ -852,7 +906,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_comex_items[] = {
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
.items = mlxplat_mlxcpld_default_items,
- .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_items),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
@@ -892,7 +946,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_wc_items[] = {
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_wc_data = {
.items = mlxplat_mlxcpld_default_wc_items,
- .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_wc_items),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_wc_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
@@ -902,7 +956,7 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_wc_data = {
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_comex_data = {
.items = mlxplat_mlxcpld_comex_items,
- .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_items),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_comex_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_CARR_DEF,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET,
@@ -949,7 +1003,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_msn21xx_items[] = {
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn21xx_data = {
.items = mlxplat_mlxcpld_msn21xx_items,
- .counter = ARRAY_SIZE(mlxplat_mlxcpld_msn21xx_items),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_msn21xx_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
@@ -1058,7 +1112,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_msn274x_items[] = {
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn274x_data = {
.items = mlxplat_mlxcpld_msn274x_items,
- .counter = ARRAY_SIZE(mlxplat_mlxcpld_msn274x_items),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_msn274x_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
@@ -1105,7 +1159,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_msn201x_items[] = {
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn201x_data = {
.items = mlxplat_mlxcpld_msn201x_items,
- .counter = ARRAY_SIZE(mlxplat_mlxcpld_msn201x_items),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_msn201x_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
@@ -1229,7 +1283,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_ng_items[] = {
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_ng_data = {
.items = mlxplat_mlxcpld_default_ng_items,
- .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_items),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
@@ -1389,7 +1443,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_ng800_items[] = {
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = {
.items = mlxplat_mlxcpld_ext_items,
- .counter = ARRAY_SIZE(mlxplat_mlxcpld_ext_items),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
@@ -1399,7 +1453,7 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = {
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ng800_data = {
.items = mlxplat_mlxcpld_ng800_items,
- .counter = ARRAY_SIZE(mlxplat_mlxcpld_ng800_items),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ng800_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
@@ -2240,7 +2294,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_modular_items[] = {
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_modular_data = {
.items = mlxplat_mlxcpld_modular_items,
- .counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_items),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_MODULAR,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
@@ -2272,7 +2326,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_chassis_blade_items[] = {
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_chassis_blade_data = {
.items = mlxplat_mlxcpld_chassis_blade_items,
- .counter = ARRAY_SIZE(mlxplat_mlxcpld_chassis_blade_items),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_chassis_blade_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
@@ -2363,13 +2417,434 @@ static struct mlxreg_core_item mlxplat_mlxcpld_rack_switch_items[] = {
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_rack_switch_data = {
.items = mlxplat_mlxcpld_rack_switch_items,
- .counter = ARRAY_SIZE(mlxplat_mlxcpld_rack_switch_items),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_rack_switch_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
+/* Platform hotplug XDR and smart switch system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_xdr_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+ .slot = 1,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+ .slot = 2,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu3",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(2),
+ .slot = 3,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu4",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(3),
+ .slot = 4,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu5",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(4),
+ .slot = 5,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu6",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(5),
+ .slot = 6,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu7",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(6),
+ .slot = 7,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu8",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(7),
+ .slot = 8,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_xdr_pwr_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .slot = 1,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .slot = 2,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr3",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(2),
+ .slot = 3,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_ext_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr4",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(3),
+ .slot = 4,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_ext_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr5",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(4),
+ .slot = 5,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_xdr_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_XDR_NR,
+ },
+ {
+ .label = "pwr6",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(5),
+ .slot = 6,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_xdr_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_XDR_NR,
+ },
+ {
+ .label = "pwr7",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(6),
+ .slot = 7,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_xdr_pwr[2],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_XDR_NR,
+ },
+ {
+ .label = "pwr8",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(7),
+ .slot = 8,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_xdr_pwr[3],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_XDR_NR,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_xdr_fan_items_data[] = {
+ {
+ .label = "fan1",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(0),
+ .slot = 1,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan2",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(1),
+ .slot = 2,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan3",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(2),
+ .slot = 3,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(2),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan4",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(3),
+ .slot = 4,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(3),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan5",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(4),
+ .slot = 5,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(4),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan6",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(5),
+ .slot = 6,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(5),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan7",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(6),
+ .slot = 7,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(6),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan8",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(7),
+ .slot = 8,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(7),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_xdr_asic1_items_data[] = {
+ {
+ .label = "asic1",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .slot = 1,
+ .capability = MLXPLAT_CPLD_LPC_REG_ASIC_CAP_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ }
+};
+
+/* Platform hotplug for smart switch systems families data */
+static struct mlxreg_core_data mlxplat_mlxcpld_smart_switch_dpu_ready_data[] = {
+ {
+ .label = "dpu1_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(0),
+ .slot = 1,
+ .capability = MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "dpu2_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(1),
+ .slot = 2,
+ .capability = MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "dpu3_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(2),
+ .slot = 3,
+ .capability = MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "dpu4_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(3),
+ .slot = 4,
+ .capability = MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_smart_switch_dpu_shtdn_ready_data[] = {
+ {
+ .label = "dpu1_shtdn_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(0),
+ .slot = 1,
+ .capability = MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "dpu2_shtdn_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(1),
+ .slot = 2,
+ .capability = MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "dpu3_shtdn_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(2),
+ .slot = 3,
+ .capability = MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "dpu4_shtdn_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(3),
+ .slot = 4,
+ .capability = MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_smart_switch_items[] = {
+ {
+ .data = mlxplat_mlxcpld_xdr_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_XDR_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_xdr_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_xdr_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_XDR_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_xdr_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_xdr_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_XDR_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_xdr_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_xdr_asic1_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_XDR_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_ASIC_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_xdr_asic1_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+ {
+ .data = mlxplat_mlxcpld_smart_switch_dpu_ready_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_DPU_CORE,
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = MLXPLAT_CPLD_DPU_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_smart_switch_dpu_ready_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_smart_switch_dpu_shtdn_ready_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_DPU_CORE,
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = MLXPLAT_CPLD_DPU_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_smart_switch_dpu_shtdn_ready_data),
+ .inversed = 1,
+ .health = false,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_smart_switch_data = {
+ .items = mlxplat_mlxcpld_smart_switch_items,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_smart_switch_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX |
+ MLXPLAT_CPLD_AGGR_MASK_DPU_BRD | MLXPLAT_CPLD_AGGR_MASK_DPU_CORE,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
+/* Smart switch data processor units data */
+static struct i2c_board_info mlxplat_mlxcpld_smart_switch_dpu_devs[] = {
+ {
+ I2C_BOARD_INFO("mlxreg-dpu", MLXPLAT_CPLD_DPU_ADDR),
+ .irq = MLXPLAT_CPLD_LPC_SYSIRQ,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-dpu", MLXPLAT_CPLD_DPU_ADDR),
+ .irq = MLXPLAT_CPLD_LPC_SYSIRQ,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-dpu", MLXPLAT_CPLD_DPU_ADDR),
+ .irq = MLXPLAT_CPLD_LPC_SYSIRQ,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-dpu", MLXPLAT_CPLD_DPU_ADDR),
+ .irq = MLXPLAT_CPLD_LPC_SYSIRQ,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_smart_switch_dpu_data[] = {
+ {
+ .label = "dpu1",
+ .hpdev.brdinfo = &mlxplat_mlxcpld_smart_switch_dpu_devs[0],
+ .hpdev.nr = MLXPLAT_CPLD_NR_DPU_BASE,
+ .slot = 1,
+ },
+ {
+ .label = "dpu2",
+ .hpdev.brdinfo = &mlxplat_mlxcpld_smart_switch_dpu_devs[1],
+ .hpdev.nr = MLXPLAT_CPLD_NR_DPU_BASE + 1,
+ .slot = 2,
+ },
+ {
+ .label = "dpu3",
+ .hpdev.brdinfo = &mlxplat_mlxcpld_smart_switch_dpu_devs[2],
+ .hpdev.nr = MLXPLAT_CPLD_NR_DPU_BASE + 2,
+ .slot = 3,
+ },
+ {
+ .label = "dpu4",
+ .hpdev.brdinfo = &mlxplat_mlxcpld_smart_switch_dpu_devs[3],
+ .hpdev.nr = MLXPLAT_CPLD_NR_DPU_BASE + 3,
+ .slot = 4,
+ },
+};
+
/* Callback performs graceful shutdown after notification about power button event */
static int
mlxplat_mlxcpld_l1_switch_pwr_events_handler(void *handle, enum mlxreg_hotplug_kind kind,
@@ -2518,13 +2993,66 @@ static struct mlxreg_core_item mlxplat_mlxcpld_l1_switch_events_items[] = {
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_l1_switch_data = {
.items = mlxplat_mlxcpld_l1_switch_events_items,
- .counter = ARRAY_SIZE(mlxplat_mlxcpld_l1_switch_events_items),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_l1_switch_events_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW | MLXPLAT_CPLD_LOW_AGGR_MASK_PWR_BUT,
};
+/* Platform hotplug for 800G systems family data */
+static struct mlxreg_core_item mlxplat_mlxcpld_ng800_hi171_items[] = {
+ {
+ .data = mlxplat_mlxcpld_ext_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_xdr_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_XDR_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_xdr_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ng800_hi171_data = {
+ .items = mlxplat_mlxcpld_ng800_hi171_items,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ng800_hi171_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW | MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2,
+};
+
/* Platform led default data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = {
{
@@ -3162,6 +3690,180 @@ static struct mlxreg_core_platform_data mlxplat_l1_switch_led_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_l1_switch_led_data),
};
+/* Platform led data for XDR and smart switch systems */
+static struct mlxreg_core_data mlxplat_mlxcpld_xdr_led_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "psu:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "psu:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan1:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 1,
+ },
+ {
+ .label = "fan1:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 1,
+ },
+ {
+ .label = "fan2:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 2,
+ },
+ {
+ .label = "fan2:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 2,
+ },
+ {
+ .label = "fan3:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 3,
+ },
+ {
+ .label = "fan3:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 3,
+ },
+ {
+ .label = "fan4:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 4,
+ },
+ {
+ .label = "fan4:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 4,
+ },
+ {
+ .label = "fan5:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 5,
+ },
+ {
+ .label = "fan5:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 5,
+ },
+ {
+ .label = "fan6:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 6,
+ },
+ {
+ .label = "fan6:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 6,
+ },
+ {
+ .label = "fan7:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 7,
+ },
+ {
+ .label = "fan7:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 7,
+ },
+ {
+ .label = "fan8:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED7_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 8,
+ },
+ {
+ .label = "fan8:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED7_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 8,
+ },
+ {
+ .label = "fan9:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED7_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 9,
+ },
+ {
+ .label = "fan9:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED7_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 9,
+ },
+ {
+ .label = "fan10:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED8_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 10,
+ },
+ {
+ .label = "fan10:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED8_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .slot = 10,
+ },
+ {
+ .label = "uid:blue",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_xdr_led_data = {
+ .data = mlxplat_mlxcpld_xdr_led_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_xdr_led_data),
+};
+
/* Platform register access default */
static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = {
{
@@ -3838,6 +4540,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0644,
},
{
+ .label = "shutdown_unlock",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0644,
+ },
+ {
.label = "erot1_ap_reset",
.reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
@@ -4610,6 +5318,480 @@ static struct mlxreg_core_platform_data mlxplat_chassis_blade_regs_io_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_chassis_blade_regs_io_data),
};
+/* Platform register access for smart switch systems families data */
+static struct mlxreg_core_data mlxplat_mlxcpld_smart_switch_regs_io_data[] = {
+ {
+ .label = "cpld1_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld3_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld1_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld2_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld3_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld1_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld3_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "kexec_activated",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0644,
+ },
+ {
+ .label = "asic_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "eth_switch_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "dpu1_rst",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP3_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0200,
+ },
+ {
+ .label = "dpu2_rst",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP3_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0200,
+ },
+ {
+ .label = "dpu3_rst",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP3_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "dpu4_rst",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP3_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "dpu1_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0200,
+ },
+ {
+ .label = "dpu2_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0200,
+ },
+ {
+ .label = "dpu3_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "dpu4_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "reset_long_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_short_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_ref",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_swb_dc_dc_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_swb_wd",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_asic_thermal",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sw_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_reload",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_platform",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_soc",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_pwr_converter_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_system",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sw_pwr_off",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_thermal",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_ac_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "voltreg_update_status",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
+ .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
+ .bit = 5,
+ .mode = 0444,
+ },
+ {
+ .label = "port80",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_RO_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_status",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = MLXPLAT_CPLD_BIOS_STATUS_MASK,
+ .bit = 2,
+ .mode = 0444,
+ },
+ {
+ .label = "bios_start_retry",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_active_image",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "vpd_wp",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "pcie_asic_reset_dis",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "shutdown_unlock",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0644,
+ },
+ {
+ .label = "fan_dir",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "dpu1_rst_en",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0200,
+ },
+ {
+ .label = "dpu2_rst_en",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0200,
+ },
+ {
+ .label = "dpu3_rst_en",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "dpu4_rst_en",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "psu1_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0200,
+ },
+ {
+ .label = "psu2_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_cycle",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_down",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "jtag_cap",
+ .reg = MLXPLAT_CPLD_LPC_REG_FU_CAP_OFFSET,
+ .mask = MLXPLAT_CPLD_FU_CAP_MASK,
+ .bit = 1,
+ .mode = 0444,
+ },
+ {
+ .label = "jtag_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE,
+ .mask = GENMASK(1, 0),
+ .bit = 1,
+ .mode = 0644,
+ },
+ {
+ .label = "non_active_bios_select",
+ .reg = MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "bios_upgrade_fail",
+ .reg = MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_image_invert",
+ .reg = MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
+ {
+ .label = "me_reboot",
+ .reg = MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0644,
+ },
+ {
+ .label = "dpu1_pwr_force",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP3_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0200,
+ },
+ {
+ .label = "dpu2_pwr_force",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP3_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0200,
+ },
+ {
+ .label = "dpu3_pwr_force",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP3_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "dpu4_pwr_force",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP3_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "ufm_done",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPI_MASK_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "asic_health",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .bit = 1,
+ .mode = 0444,
+ },
+ {
+ .label = "psu1_ac_ok",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_AC_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ },
+ {
+ .label = "psu2_ac_ok",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_AC_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0644,
+ },
+ {
+ .label = "psu1_no_alert",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_ALERT_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ },
+ {
+ .label = "psu2_no_alert",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_ALERT_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0644,
+ },
+ {
+ .label = "asic_pg_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "spi_chnl_select",
+ .reg = MLXPLAT_CPLD_LPC_REG_SPI_CHNL_SELECT,
+ .mask = GENMASK(7, 0),
+ .bit = 1,
+ .mode = 0644,
+ },
+ {
+ .label = "config1",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config2",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config3",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "ufm_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_smart_switch_regs_io_data = {
+ .data = mlxplat_mlxcpld_smart_switch_regs_io_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_smart_switch_regs_io_data),
+};
+
/* Platform FAN default */
static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
{
@@ -4751,6 +5933,185 @@ static struct mlxreg_core_platform_data mlxplat_default_fan_data = {
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
};
+/* XDR and smart switch platform fan data */
+static struct mlxreg_core_data mlxplat_mlxcpld_xdr_fan_data[] = {
+ {
+ .label = "pwm1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET,
+ },
+ {
+ .label = "tacho1",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 1,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho2",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 2,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho3",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 3,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho4",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 4,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho5",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 5,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho6",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 6,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho7",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 7,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho8",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 8,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho9",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 9,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho10",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 10,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho11",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 11,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho12",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 12,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho13",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO13_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 13,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho14",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO14_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 14,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho15",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO15_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 15,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho16",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO16_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 16,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ },
+ {
+ .label = "tacho17",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO17_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 17,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN2_OFFSET,
+ },
+ {
+ .label = "tacho18",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO18_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 18,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN2_OFFSET,
+ },
+ {
+ .label = "tacho19",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO19_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 19,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN2_OFFSET,
+ },
+ {
+ .label = "tacho20",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO20_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
+ .slot = 20,
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN2_OFFSET,
+ },
+ {
+ .label = "conf",
+ .capability = MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_xdr_fan_data = {
+ .data = mlxplat_mlxcpld_xdr_fan_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_xdr_fan_data),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .version = 1,
+};
+
/* Watchdog type1: hardware implementation version1
* (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140 systems).
*/
@@ -4975,6 +6336,8 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RESET_GP3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED2_OFFSET:
@@ -4983,12 +6346,14 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED6_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED7_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED8_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE:
case MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET:
case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
@@ -5012,10 +6377,14 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_AC_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_ALERT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN2_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN2_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROT_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROT_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROTE_EVENT_OFFSET:
@@ -5083,6 +6452,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_CPLD5_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD5_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RESET_GP3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET:
@@ -5094,15 +6465,18 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED6_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED7_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED8_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP1_RO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE:
case MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET:
case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
@@ -5122,6 +6496,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_GWP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GPI_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_BRD_OFFSET:
case MLXPLAT_CPLD_LPC_REG_BRD_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_BRD_MASK_OFFSET:
@@ -5134,12 +6509,17 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_PSU_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_AC_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_ALERT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN2_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FAN2_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROT_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROT_MASK_OFFSET:
@@ -5213,6 +6593,13 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO13_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO14_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO15_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO16_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO17_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO18_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO19_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO20_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
@@ -5248,6 +6635,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_CPLD5_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD5_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RESET_GP3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET:
@@ -5259,13 +6648,16 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED6_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED7_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED8_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP1_RO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE:
case MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET:
case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
@@ -5285,6 +6677,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_GWP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GPI_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_BRD_OFFSET:
case MLXPLAT_CPLD_LPC_REG_BRD_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_BRD_MASK_OFFSET:
@@ -5297,9 +6690,11 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_PSU_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_AC_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PSU_ALERT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
@@ -5370,6 +6765,13 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO13_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO14_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO15_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO16_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO17_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO18_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO19_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO20_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_ASIC_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
@@ -5431,6 +6833,14 @@ static const struct reg_default mlxplat_mlxcpld_regmap_eth_modular[] = {
MLXPLAT_CPLD_AGGR_MASK_LC_LOW },
};
+static const struct reg_default mlxplat_mlxcpld_regmap_smart_switch[] = {
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET, MLXPLAT_CPLD_LPC_SM_SW_MASK },
+};
+
struct mlxplat_mlxcpld_regmap_context {
void __iomem *base;
};
@@ -5539,6 +6949,20 @@ static const struct regmap_config mlxplat_mlxcpld_regmap_config_eth_modular = {
.reg_write = mlxplat_mlxcpld_reg_write,
};
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_smart_switch = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 255,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+ .readable_reg = mlxplat_mlxcpld_readable_reg,
+ .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_smart_switch,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_smart_switch),
+ .reg_read = mlxplat_mlxcpld_reg_read,
+ .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
static struct resource mlxplat_mlxcpld_resources[] = {
[0] = DEFINE_RES_IRQ_NAMED(MLXPLAT_CPLD_LPC_SYSIRQ, "mlxreg-hotplug"),
};
@@ -5550,6 +6974,7 @@ static struct mlxreg_core_platform_data *mlxplat_regs_io;
static struct mlxreg_core_platform_data *mlxplat_fan;
static struct mlxreg_core_platform_data
*mlxplat_wd_data[MLXPLAT_CPLD_WD_MAX_DEVS];
+static struct mlxreg_core_data *mlxplat_dpu_data[MLXPLAT_CPLD_DPU_MAX_DEVS];
static const struct regmap_config *mlxplat_regmap_config;
static struct pci_dev *lpc_bridge;
static struct pci_dev *i2c_bridge;
@@ -5921,6 +7346,54 @@ static int __init mlxplat_dmi_l1_switch_matched(const struct dmi_system_id *dmi)
return mlxplat_register_platform_device();
}
+static int __init mlxplat_dmi_smart_switch_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_ng800_mux_data);
+ mlxplat_mux_data = mlxplat_ng800_mux_data;
+ mlxplat_hotplug = &mlxplat_mlxcpld_smart_switch_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_xdr_led_data;
+ mlxplat_regs_io = &mlxplat_smart_switch_regs_io_data;
+ mlxplat_fan = &mlxplat_xdr_fan_data;
+
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_smart_switch_dpu_data); i++)
+ mlxplat_dpu_data[i] = &mlxplat_mlxcpld_smart_switch_dpu_data[i];
+
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_smart_switch;
+
+ return mlxplat_register_platform_device();
+}
+
+static int __init mlxplat_dmi_ng400_hi171_matched(const struct dmi_system_id *dmi)
+{
+ unsigned int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_ng800_mux_data);
+ mlxplat_mux_data = mlxplat_ng800_mux_data;
+ mlxplat_hotplug = &mlxplat_mlxcpld_ng800_hi171_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_ng_led_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ mlxplat_fan = &mlxplat_xdr_fan_data;
+
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type3); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type3[i];
+
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
+
+ return mlxplat_register_platform_device();
+}
+
static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
{
.callback = mlxplat_dmi_default_wc_matched,
@@ -6016,6 +7489,26 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
},
},
{
+ .callback = mlxplat_dmi_smart_switch_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0019"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_ng400_hi171_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0022"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "HI171"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_ng400_hi171_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0022"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "HI172"),
+ },
+ },
+ {
.callback = mlxplat_dmi_msn274x_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
@@ -6390,10 +7883,27 @@ static int mlxplat_platdevs_init(struct mlxplat_priv *priv)
}
}
+ /* Add DPU drivers. */
+ for (i = 0; i < MLXPLAT_CPLD_DPU_MAX_DEVS; i++) {
+ if (!mlxplat_dpu_data[i])
+ continue;
+ priv->pdev_dpu[i] =
+ platform_device_register_resndata(&mlxplat_dev->dev, "mlxreg-dpu",
+ i, NULL, 0, mlxplat_dpu_data[i],
+ sizeof(*mlxplat_dpu_data[i]));
+ if (IS_ERR(priv->pdev_dpu[i])) {
+ err = PTR_ERR(priv->pdev_dpu[i]);
+ goto fail_platform_dpu_register;
+ }
+ }
+
return 0;
+fail_platform_dpu_register:
+ while (i--)
+ platform_device_unregister(priv->pdev_dpu[i]);
fail_platform_wd_register:
- while (--i >= 0)
+ while (i--)
platform_device_unregister(priv->pdev_wd[i]);
fail_platform_fan_register:
if (mlxplat_regs_io)
@@ -6412,7 +7922,9 @@ static void mlxplat_platdevs_exit(struct mlxplat_priv *priv)
{
int i;
- for (i = MLXPLAT_CPLD_WD_MAX_DEVS - 1; i >= 0 ; i--)
+ for (i = MLXPLAT_CPLD_DPU_MAX_DEVS - 1; i >= 0; i--)
+ platform_device_unregister(priv->pdev_dpu[i]);
+ for (i = MLXPLAT_CPLD_WD_MAX_DEVS - 1; i >= 0; i--)
platform_device_unregister(priv->pdev_wd[i]);
if (priv->pdev_fan)
platform_device_unregister(priv->pdev_fan);
@@ -6457,7 +7969,7 @@ static int mlxplat_i2c_mux_topology_init(struct mlxplat_priv *priv)
return mlxplat_i2c_mux_complition_notify(priv, NULL, NULL);
fail_platform_mux_register:
- while (--i >= 0)
+ while (i--)
platform_device_unregister(priv->pdev_mux[i]);
return err;
}
@@ -6466,7 +7978,7 @@ static void mlxplat_i2c_mux_topology_exit(struct mlxplat_priv *priv)
{
int i;
- for (i = mlxplat_mux_num - 1; i >= 0 ; i--) {
+ for (i = mlxplat_mux_num - 1; i >= 0; i--) {
if (priv->pdev_mux[i])
platform_device_unregister(priv->pdev_mux[i]);
}
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index 36a00692347d..900069eb186e 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -33,7 +33,7 @@
#define MLXBF_PMC_EVENT_SET_BF3 2
#define MLXBF_PMC_EVENT_INFO_LEN 100
-#define MLXBF_PMC_MAX_BLOCKS 30
+#define MLXBF_PMC_MAX_BLOCKS 40
#define MLXBF_PMC_MAX_ATTRS 70
#define MLXBF_PMC_INFO_SZ 4
#define MLXBF_PMC_REG_SIZE 8
@@ -139,6 +139,7 @@ struct mlxbf_pmc_block_info {
* @pdev: The kernel structure representing the device
* @total_blocks: Total number of blocks
* @tile_count: Number of tiles in the system
+ * @apt_enable: Info on enabled APTs
* @llt_enable: Info on enabled LLTs
* @mss_enable: Info on enabled MSSs
* @group_num: Group number assigned to each valid block
@@ -154,6 +155,7 @@ struct mlxbf_pmc_context {
struct platform_device *pdev;
u32 total_blocks;
u32 tile_count;
+ u8 apt_enable;
u8 llt_enable;
u8 mss_enable;
u32 group_num;
@@ -893,6 +895,107 @@ static const struct mlxbf_pmc_events mlxbf_pmc_clock_events[] = {
{ 0x6c, "REFERENCE_WINDOW_WIDTH_REF_156" },
};
+static const struct mlxbf_pmc_events mlxbf_pmc_gga_events[] = {
+ { 0, "GGA_PERF_DESC_WQE_STRB" },
+ { 5, "GGA_PERF_DESC_CQE_STRB" },
+ { 8, "GGA_PERF_DESC_TPT_REQUEST_STRB" },
+ { 17, "GGA_PERF_DESC_TPT_RESPONSESTRB" },
+ { 120, "GGA_PERF_DESC_ENGINE0_IN_DATA_STRB" },
+ { 121, "GGA_PERF_DESC_ENGINE1_IN_DATA_STRB" },
+ { 122, "GGA_PERF_DESC_ENGINE2_IN_DATA_STRB" },
+ { 123, "GGA_PERF_DESC_ENGINE3_IN_DATA_STRB" },
+ { 124, "GGA_PERF_DESC_ENGINE4_IN_DATA_STRB" },
+ { 125, "GGA_PERF_DESC_ENGINE5_IN_DATA_STRB" },
+ { 126, "GGA_PERF_DESC_ENGINE6_IN_DATA_STRB" },
+ { 127, "GGA_PERF_DESC_ENGINE7_IN_DATA_STRB" },
+ { 128, "GGA_PERF_DESC_ENGINE8_IN_DATA_STRB" },
+ { 129, "GGA_PERF_DESC_ENGINE9_IN_DATA_STRB" },
+ { 130, "GGA_PERF_DESC_ENGINE10_IN_DATA_STRB" },
+ { 131, "GGA_PERF_DESC_ENGINE11_IN_DATA_STRB" },
+ { 132, "GGA_PERF_DESC_ENGINE12_IN_DATA_STRB" },
+ { 133, "GGA_PERF_DESC_ENGINE13_IN_DATA_STRB" },
+ { 134, "GGA_PERF_DESC_ENGINE14_IN_DATA_STRB" },
+ { 195, "GGA_PERF_DESC_ENGINE0_OUT_DATA_STRB" },
+ { 196, "GGA_PERF_DESC_ENGINE1_OUT_DATA_STRB" },
+ { 197, "GGA_PERF_DESC_ENGINE2_OUT_DATA_STRB" },
+ { 198, "GGA_PERF_DESC_ENGINE3_OUT_DATA_STRB" },
+ { 199, "GGA_PERF_DESC_ENGINE4_OUT_DATA_STRB" },
+ { 200, "GGA_PERF_DESC_ENGINE5_OUT_DATA_STRB" },
+ { 201, "GGA_PERF_DESC_ENGINE6_OUT_DATA_STRB" },
+ { 202, "GGA_PERF_DESC_ENGINE7_OUT_DATA_STRB" },
+ { 203, "GGA_PERF_DESC_ENGINE8_OUT_DATA_STRB" },
+ { 204, "GGA_PERF_DESC_ENGINE9_OUT_DATA_STRB" },
+ { 205, "GGA_PERF_DESC_ENGINE10_OUT_DATA_STRB" },
+ { 206, "GGA_PERF_DESC_ENGINE11_OUT_DATA_STRB" },
+ { 207, "GGA_PERF_DESC_ENGINE12_OUT_DATA_STRB" },
+ { 208, "GGA_PERF_DESC_ENGINE13_OUT_DATA_STRB" },
+ { 209, "GGA_PERF_DESC_ENGINE14_OUT_DATA_STRB" },
+};
+
+static const struct mlxbf_pmc_events mlxbf_pmc_apt_events[] = {
+ { 0, "APT_DATA_0" },
+ { 1, "APT_DATA_1" },
+ { 2, "APT_DATA_2" },
+ { 3, "APT_DATA_3" },
+ { 4, "APT_DATA_4" },
+ { 5, "APT_DATA_5" },
+ { 6, "APT_DATA_6" },
+ { 7, "APT_DATA_7" },
+ { 8, "APT_DATA_8" },
+ { 9, "APT_DATA_9" },
+ { 10, "APT_DATA_10" },
+ { 11, "APT_DATA_11" },
+ { 12, "APT_DATA_12" },
+ { 13, "APT_DATA_13" },
+ { 14, "APT_DATA_14" },
+ { 15, "APT_DATA_15" },
+ { 16, "APT_DATA_16" },
+ { 17, "APT_DATA_17" },
+ { 18, "APT_DATA_18" },
+ { 19, "APT_DATA_19" },
+ { 20, "APT_DATA_20" },
+ { 21, "APT_DATA_21" },
+};
+
+static const struct mlxbf_pmc_events mlxbf_pmc_emi_events[] = {
+ { 0, "MCH_WR_IN_MCH_REQ_IN_STRB" },
+ { 10, "MCH_RD_IN_MCH_REQ_IN_STRB" },
+ { 20, "MCH_RD_RESP_DATA_MCH_RESP_OUT_STRB" },
+ { 98, "EMI_ARBITER_EARB2CTRL_STRB" },
+ { 99, "EMI_ARBITER_EARB2CTRL_RAS_STRB" },
+ { 100, "EMI_ARBITER_EARB2CTRL_CAS_STRB" },
+};
+
+static const struct mlxbf_pmc_events mlxbf_pmc_prnf_events[] = {
+ { 0, "PRNF_DMA_RD_TLP_REQ" },
+ { 1, "PRNF_DMA_RD_ICMC_BYPASS_REQ" },
+ { 8, "PRNF_DMA_RD_TLP_SENT_TO_CHI" },
+ { 11, "PRNF_DMA_RD_CHI_RES" },
+ { 17, "PRNF_DMA_RD_TLP_RES_SENT" },
+ { 18, "PRNF_DMA_WR_WR0_SLICE_ALLOC_RO" },
+ { 19, "PRNF_DMA_WR_WR0_SLICE_ALLOC_NRO" },
+ { 24, "PRNF_DMA_WR_WR1_SLICE_ALLOC_RO" },
+ { 25, "PRNF_DMA_WR_WR1_SLICE_ALLOC_NRO" },
+ { 30, "PRNF_PIO_POSTED_REQ_PUSH" },
+ { 31, "PRNF_PIO_POSTED_REQ_POP" },
+ { 32, "PRNF_PIO_NP_REQ_PUSH" },
+ { 33, "PRNF_PIO_NP_REQ_POP" },
+ { 34, "PRNF_PIO_COMP_RO_PUSH" },
+ { 35, "PRNF_PIO_COMP_RO_POP" },
+ { 36, "PRNF_PIO_COMP_NRO_PUSH" },
+ { 37, "PRNF_PIO_COMP_NRO_POP" },
+};
+
+static const struct mlxbf_pmc_events mlxbf_pmc_msn_events[] = {
+ { 46, "MSN_CORE_MMA_WQE_DONE_PUSH_STRB" },
+ { 116, "MSN_CORE_MSN2MMA_WQE_STRB" },
+ { 164, "MSN_CORE_WQE_TOP_TILE_WQE_STRB" },
+ { 168, "MSN_CORE_TPT_TOP_GGA_REQ_STRB" },
+ { 171, "MSN_CORE_TPT_TOP_MMA_REQ_STRB" },
+ { 174, "MSN_CORE_TPT_TOP_GGA_RES_STRB" },
+ { 177, "MSN_CORE_TPT_TOP_MMA_RES_STRB" },
+};
+
static struct mlxbf_pmc_context *pmc;
/* UUID used to probe ATF service. */
@@ -1069,6 +1172,21 @@ static const struct mlxbf_pmc_events *mlxbf_pmc_event_list(const char *blk, size
} else if (strstr(blk, "clock_measure")) {
events = mlxbf_pmc_clock_events;
size = ARRAY_SIZE(mlxbf_pmc_clock_events);
+ } else if (strstr(blk, "gga")) {
+ events = mlxbf_pmc_gga_events;
+ size = ARRAY_SIZE(mlxbf_pmc_gga_events);
+ } else if (strstr(blk, "apt")) {
+ events = mlxbf_pmc_apt_events;
+ size = ARRAY_SIZE(mlxbf_pmc_apt_events);
+ } else if (strstr(blk, "emi")) {
+ events = mlxbf_pmc_emi_events;
+ size = ARRAY_SIZE(mlxbf_pmc_emi_events);
+ } else if (strstr(blk, "prnf")) {
+ events = mlxbf_pmc_prnf_events;
+ size = ARRAY_SIZE(mlxbf_pmc_prnf_events);
+ } else if (strstr(blk, "msn")) {
+ events = mlxbf_pmc_msn_events;
+ size = ARRAY_SIZE(mlxbf_pmc_msn_events);
} else {
events = NULL;
size = 0;
@@ -2056,6 +2174,18 @@ static int mlxbf_pmc_map_counters(struct device *dev)
continue;
}
+ /* Create sysfs only for enabled EMI blocks */
+ if (strstr(pmc->block_name[i], "emi") &&
+ pmc->event_set == MLXBF_PMC_EVENT_SET_BF3) {
+ unsigned int emi_num;
+
+ if (sscanf(pmc->block_name[i], "emi%u", &emi_num) != 1)
+ continue;
+
+ if (!((pmc->mss_enable >> (emi_num / 2)) & 0x1))
+ continue;
+ }
+
/* Create sysfs only for enabled LLT blocks */
if (strstr(pmc->block_name[i], "llt_miss")) {
unsigned int llt_num;
@@ -2075,6 +2205,17 @@ static int mlxbf_pmc_map_counters(struct device *dev)
continue;
}
+ /* Create sysfs only for enabled APT blocks */
+ if (strstr(pmc->block_name[i], "apt")) {
+ unsigned int apt_num;
+
+ if (sscanf(pmc->block_name[i], "apt%u", &apt_num) != 1)
+ continue;
+
+ if (!((pmc->apt_enable >> apt_num) & 0x1))
+ continue;
+ }
+
ret = device_property_read_u64_array(dev, pmc->block_name[i],
info, MLXBF_PMC_INFO_SZ);
if (ret)
@@ -2171,13 +2312,17 @@ static int mlxbf_pmc_probe(struct platform_device *pdev)
return -EFAULT;
if (device_property_read_u32(dev, "tile_num", &pmc->tile_count)) {
+ if (device_property_read_u8(dev, "apt_enable", &pmc->apt_enable)) {
+ dev_warn(dev, "Number of APTs undefined, ignoring blocks\n");
+ pmc->apt_enable = 0;
+ }
if (device_property_read_u8(dev, "llt_enable", &pmc->llt_enable)) {
- dev_err(dev, "Number of tiles/LLTs undefined\n");
- return -EINVAL;
+ dev_warn(dev, "Number of LLTs undefined, ignoring blocks\n");
+ pmc->llt_enable = 0;
}
if (device_property_read_u8(dev, "mss_enable", &pmc->mss_enable)) {
- dev_err(dev, "Number of tiles/MSSs undefined\n");
- return -EINVAL;
+ dev_warn(dev, "Number of MSSs undefined, ignoring blocks\n");
+ pmc->mss_enable = 0;
}
}
diff --git a/drivers/platform/mellanox/mlxreg-dpu.c b/drivers/platform/mellanox/mlxreg-dpu.c
new file mode 100644
index 000000000000..52260106a9f1
--- /dev/null
+++ b/drivers/platform/mellanox/mlxreg-dpu.c
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Nvidia Data Processor Unit platform driver
+ *
+ * Copyright (C) 2025 Nvidia Technologies Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/dev_printk.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_data/mlxcpld.h>
+#include <linux/platform_data/mlxreg.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* I2C bus IO offsets */
+#define MLXREG_DPU_REG_FPGA1_VER_OFFSET 0x2400
+#define MLXREG_DPU_REG_FPGA1_PN_OFFSET 0x2404
+#define MLXREG_DPU_REG_FPGA1_PN1_OFFSET 0x2405
+#define MLXREG_DPU_REG_PG_OFFSET 0x2414
+#define MLXREG_DPU_REG_PG_EVENT_OFFSET 0x2415
+#define MLXREG_DPU_REG_PG_MASK_OFFSET 0x2416
+#define MLXREG_DPU_REG_RESET_GP1_OFFSET 0x2417
+#define MLXREG_DPU_REG_RST_CAUSE1_OFFSET 0x241e
+#define MLXREG_DPU_REG_GP0_RO_OFFSET 0x242b
+#define MLXREG_DPU_REG_GP0_OFFSET 0x242e
+#define MLXREG_DPU_REG_GP1_OFFSET 0x242c
+#define MLXREG_DPU_REG_GP4_OFFSET 0x2438
+#define MLXREG_DPU_REG_AGGRCO_OFFSET 0x2442
+#define MLXREG_DPU_REG_AGGRCO_MASK_OFFSET 0x2443
+#define MLXREG_DPU_REG_HEALTH_OFFSET 0x244d
+#define MLXREG_DPU_REG_HEALTH_EVENT_OFFSET 0x244e
+#define MLXREG_DPU_REG_HEALTH_MASK_OFFSET 0x244f
+#define MLXREG_DPU_REG_FPGA1_MVER_OFFSET 0x24de
+#define MLXREG_DPU_REG_CONFIG3_OFFSET 0x24fd
+#define MLXREG_DPU_REG_MAX 0x3fff
+
+/* Power Good event masks. */
+#define MLXREG_DPU_PG_VDDIO_MASK BIT(0)
+#define MLXREG_DPU_PG_VDD_CPU_MASK BIT(1)
+#define MLXREG_DPU_PG_VDD_MASK BIT(2)
+#define MLXREG_DPU_PG_1V8_MASK BIT(3)
+#define MLXREG_DPU_PG_COMPARATOR_MASK BIT(4)
+#define MLXREG_DPU_PG_VDDQ_MASK BIT(5)
+#define MLXREG_DPU_PG_HVDD_MASK BIT(6)
+#define MLXREG_DPU_PG_DVDD_MASK BIT(7)
+#define MLXREG_DPU_PG_MASK (MLXREG_DPU_PG_DVDD_MASK | \
+ MLXREG_DPU_PG_HVDD_MASK | \
+ MLXREG_DPU_PG_VDDQ_MASK | \
+ MLXREG_DPU_PG_COMPARATOR_MASK | \
+ MLXREG_DPU_PG_1V8_MASK | \
+ MLXREG_DPU_PG_VDD_CPU_MASK | \
+ MLXREG_DPU_PG_VDD_MASK | \
+ MLXREG_DPU_PG_VDDIO_MASK)
+
+/* Health event masks. */
+#define MLXREG_DPU_HLTH_THERMAL_TRIP_MASK BIT(0)
+#define MLXREG_DPU_HLTH_UFM_UPGRADE_DONE_MASK BIT(1)
+#define MLXREG_DPU_HLTH_VDDQ_HOT_ALERT_MASK BIT(2)
+#define MLXREG_DPU_HLTH_VDD_CPU_HOT_ALERT_MASK BIT(3)
+#define MLXREG_DPU_HLTH_VDDQ_ALERT_MASK BIT(4)
+#define MLXREG_DPU_HLTH_VDD_CPU_ALERT_MASK BIT(5)
+#define MLXREG_DPU_HEALTH_MASK (MLXREG_DPU_HLTH_UFM_UPGRADE_DONE_MASK | \
+ MLXREG_DPU_HLTH_VDDQ_HOT_ALERT_MASK | \
+ MLXREG_DPU_HLTH_VDD_CPU_HOT_ALERT_MASK | \
+ MLXREG_DPU_HLTH_VDDQ_ALERT_MASK | \
+ MLXREG_DPU_HLTH_VDD_CPU_ALERT_MASK | \
+ MLXREG_DPU_HLTH_THERMAL_TRIP_MASK)
+
+/* Hotplug aggregation masks. */
+#define MLXREG_DPU_HEALTH_AGGR_MASK BIT(0)
+#define MLXREG_DPU_PG_AGGR_MASK BIT(1)
+#define MLXREG_DPU_AGGR_MASK (MLXREG_DPU_HEALTH_AGGR_MASK | \
+ MLXREG_DPU_PG_AGGR_MASK)
+
+/* Voltage regulator firmware update status mask. */
+#define MLXREG_DPU_VOLTREG_UPD_MASK GENMASK(5, 4)
+
+#define MLXREG_DPU_NR_NONE (-1)
+
+/*
+ * enum mlxreg_dpu_type - Data Processor Unit types
+ *
+ * @MLXREG_DPU_BF3: DPU equipped with BF3 SoC;
+ */
+enum mlxreg_dpu_type {
+ MLXREG_DPU_BF3 = 0x0050,
+};
+
+/* Default register access data. */
+static struct mlxreg_core_data mlxreg_dpu_io_data[] = {
+ {
+ .label = "fpga1_version",
+ .reg = MLXREG_DPU_REG_FPGA1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "fpga1_pn",
+ .reg = MLXREG_DPU_REG_FPGA1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "fpga1_version_min",
+ .reg = MLXREG_DPU_REG_FPGA1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "perst_rst",
+ .reg = MLXREG_DPU_REG_RESET_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ },
+ {
+ .label = "usbphy_rst",
+ .reg = MLXREG_DPU_REG_RESET_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0644,
+ },
+ {
+ .label = "phy_rst",
+ .reg = MLXREG_DPU_REG_RESET_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0644,
+ },
+ {
+ .label = "tpm_rst",
+ .reg = MLXREG_DPU_REG_RESET_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
+ {
+ .label = "reset_from_main_board",
+ .reg = MLXREG_DPU_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_reload",
+ .reg = MLXREG_DPU_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_pwr_fail",
+ .reg = MLXREG_DPU_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_dpu_thermal",
+ .reg = MLXREG_DPU_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_pwr_off",
+ .reg = MLXREG_DPU_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "dpu_id",
+ .reg = MLXREG_DPU_REG_GP0_RO_OFFSET,
+ .bit = GENMASK(3, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "voltreg_update_status",
+ .reg = MLXREG_DPU_REG_GP0_RO_OFFSET,
+ .mask = MLXREG_DPU_VOLTREG_UPD_MASK,
+ .bit = 5,
+ .mode = 0444,
+ },
+ {
+ .label = "boot_progress",
+ .reg = MLXREG_DPU_REG_GP1_OFFSET,
+ .mask = GENMASK(3, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "ufm_upgrade",
+ .reg = MLXREG_DPU_REG_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0644,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxreg_dpu_default_regs_io_data = {
+ .data = mlxreg_dpu_io_data,
+ .counter = ARRAY_SIZE(mlxreg_dpu_io_data),
+};
+
+/* Default hotplug data. */
+static struct mlxreg_core_data mlxreg_dpu_power_events_items_data[] = {
+ {
+ .label = "pg_vddio",
+ .reg = MLXREG_DPU_REG_PG_OFFSET,
+ .mask = MLXREG_DPU_PG_VDDIO_MASK,
+ .hpdev.nr = MLXREG_DPU_NR_NONE,
+ },
+ {
+ .label = "pg_vdd_cpu",
+ .reg = MLXREG_DPU_REG_PG_OFFSET,
+ .mask = MLXREG_DPU_PG_VDD_CPU_MASK,
+ .hpdev.nr = MLXREG_DPU_NR_NONE,
+ },
+ {
+ .label = "pg_vdd",
+ .reg = MLXREG_DPU_REG_PG_OFFSET,
+ .mask = MLXREG_DPU_PG_VDD_MASK,
+ .hpdev.nr = MLXREG_DPU_NR_NONE,
+ },
+ {
+ .label = "pg_1v8",
+ .reg = MLXREG_DPU_REG_PG_OFFSET,
+ .mask = MLXREG_DPU_PG_1V8_MASK,
+ .hpdev.nr = MLXREG_DPU_NR_NONE,
+ },
+ {
+ .label = "pg_comparator",
+ .reg = MLXREG_DPU_REG_PG_OFFSET,
+ .mask = MLXREG_DPU_PG_COMPARATOR_MASK,
+ .hpdev.nr = MLXREG_DPU_NR_NONE,
+ },
+ {
+ .label = "pg_vddq",
+ .reg = MLXREG_DPU_REG_PG_OFFSET,
+ .mask = MLXREG_DPU_PG_VDDQ_MASK,
+ .hpdev.nr = MLXREG_DPU_NR_NONE,
+ },
+ {
+ .label = "pg_hvdd",
+ .reg = MLXREG_DPU_REG_PG_OFFSET,
+ .mask = MLXREG_DPU_PG_HVDD_MASK,
+ .hpdev.nr = MLXREG_DPU_NR_NONE,
+ },
+ {
+ .label = "pg_dvdd",
+ .reg = MLXREG_DPU_REG_PG_OFFSET,
+ .mask = MLXREG_DPU_PG_DVDD_MASK,
+ .hpdev.nr = MLXREG_DPU_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data mlxreg_dpu_health_events_items_data[] = {
+ {
+ .label = "thermal_trip",
+ .reg = MLXREG_DPU_REG_HEALTH_OFFSET,
+ .mask = MLXREG_DPU_HLTH_THERMAL_TRIP_MASK,
+ .hpdev.nr = MLXREG_DPU_NR_NONE,
+ },
+ {
+ .label = "ufm_upgrade_done",
+ .reg = MLXREG_DPU_REG_HEALTH_OFFSET,
+ .mask = MLXREG_DPU_HLTH_UFM_UPGRADE_DONE_MASK,
+ .hpdev.nr = MLXREG_DPU_NR_NONE,
+ },
+ {
+ .label = "vddq_hot_alert",
+ .reg = MLXREG_DPU_REG_HEALTH_OFFSET,
+ .mask = MLXREG_DPU_HLTH_VDDQ_HOT_ALERT_MASK,
+ .hpdev.nr = MLXREG_DPU_NR_NONE,
+ },
+ {
+ .label = "vdd_cpu_hot_alert",
+ .reg = MLXREG_DPU_REG_HEALTH_OFFSET,
+ .mask = MLXREG_DPU_HLTH_VDD_CPU_HOT_ALERT_MASK,
+ .hpdev.nr = MLXREG_DPU_NR_NONE,
+ },
+ {
+ .label = "vddq_alert",
+ .reg = MLXREG_DPU_REG_HEALTH_OFFSET,
+ .mask = MLXREG_DPU_HLTH_VDDQ_ALERT_MASK,
+ .hpdev.nr = MLXREG_DPU_NR_NONE,
+ },
+ {
+ .label = "vdd_cpu_alert",
+ .reg = MLXREG_DPU_REG_HEALTH_OFFSET,
+ .mask = MLXREG_DPU_HLTH_VDD_CPU_ALERT_MASK,
+ .hpdev.nr = MLXREG_DPU_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_item mlxreg_dpu_hotplug_items[] = {
+ {
+ .data = mlxreg_dpu_power_events_items_data,
+ .aggr_mask = MLXREG_DPU_PG_AGGR_MASK,
+ .reg = MLXREG_DPU_REG_PG_OFFSET,
+ .mask = MLXREG_DPU_PG_MASK,
+ .count = ARRAY_SIZE(mlxreg_dpu_power_events_items_data),
+ .health = false,
+ .inversed = 0,
+ },
+ {
+ .data = mlxreg_dpu_health_events_items_data,
+ .aggr_mask = MLXREG_DPU_HEALTH_AGGR_MASK,
+ .reg = MLXREG_DPU_REG_HEALTH_OFFSET,
+ .mask = MLXREG_DPU_HEALTH_MASK,
+ .count = ARRAY_SIZE(mlxreg_dpu_health_events_items_data),
+ .health = false,
+ .inversed = 0,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxreg_dpu_default_hotplug_data = {
+ .items = mlxreg_dpu_hotplug_items,
+ .count = ARRAY_SIZE(mlxreg_dpu_hotplug_items),
+ .cell = MLXREG_DPU_REG_AGGRCO_OFFSET,
+ .mask = MLXREG_DPU_AGGR_MASK,
+};
+
+/**
+ * struct mlxreg_dpu - device private data
+ * @dev: platform device
+ * @data: platform core data
+ * @io_data: register access platform data
+ * @io_regs: register access device
+ * @hotplug_data: hotplug platform data
+ * @hotplug: hotplug device
+ */
+struct mlxreg_dpu {
+ struct device *dev;
+ struct mlxreg_core_data *data;
+ struct mlxreg_core_platform_data *io_data;
+ struct platform_device *io_regs;
+ struct mlxreg_core_hotplug_platform_data *hotplug_data;
+ struct platform_device *hotplug;
+};
+
+static bool mlxreg_dpu_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MLXREG_DPU_REG_PG_EVENT_OFFSET:
+ case MLXREG_DPU_REG_PG_MASK_OFFSET:
+ case MLXREG_DPU_REG_RESET_GP1_OFFSET:
+ case MLXREG_DPU_REG_GP0_OFFSET:
+ case MLXREG_DPU_REG_GP1_OFFSET:
+ case MLXREG_DPU_REG_GP4_OFFSET:
+ case MLXREG_DPU_REG_AGGRCO_OFFSET:
+ case MLXREG_DPU_REG_AGGRCO_MASK_OFFSET:
+ case MLXREG_DPU_REG_HEALTH_EVENT_OFFSET:
+ case MLXREG_DPU_REG_HEALTH_MASK_OFFSET:
+ return true;
+ }
+ return false;
+}
+
+static bool mlxreg_dpu_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MLXREG_DPU_REG_FPGA1_VER_OFFSET:
+ case MLXREG_DPU_REG_FPGA1_PN_OFFSET:
+ case MLXREG_DPU_REG_FPGA1_PN1_OFFSET:
+ case MLXREG_DPU_REG_PG_OFFSET:
+ case MLXREG_DPU_REG_PG_EVENT_OFFSET:
+ case MLXREG_DPU_REG_PG_MASK_OFFSET:
+ case MLXREG_DPU_REG_RESET_GP1_OFFSET:
+ case MLXREG_DPU_REG_RST_CAUSE1_OFFSET:
+ case MLXREG_DPU_REG_GP0_RO_OFFSET:
+ case MLXREG_DPU_REG_GP0_OFFSET:
+ case MLXREG_DPU_REG_GP1_OFFSET:
+ case MLXREG_DPU_REG_GP4_OFFSET:
+ case MLXREG_DPU_REG_AGGRCO_OFFSET:
+ case MLXREG_DPU_REG_AGGRCO_MASK_OFFSET:
+ case MLXREG_DPU_REG_HEALTH_OFFSET:
+ case MLXREG_DPU_REG_HEALTH_EVENT_OFFSET:
+ case MLXREG_DPU_REG_HEALTH_MASK_OFFSET:
+ case MLXREG_DPU_REG_FPGA1_MVER_OFFSET:
+ case MLXREG_DPU_REG_CONFIG3_OFFSET:
+ return true;
+ }
+ return false;
+}
+
+static bool mlxreg_dpu_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MLXREG_DPU_REG_FPGA1_VER_OFFSET:
+ case MLXREG_DPU_REG_FPGA1_PN_OFFSET:
+ case MLXREG_DPU_REG_FPGA1_PN1_OFFSET:
+ case MLXREG_DPU_REG_PG_OFFSET:
+ case MLXREG_DPU_REG_PG_EVENT_OFFSET:
+ case MLXREG_DPU_REG_PG_MASK_OFFSET:
+ case MLXREG_DPU_REG_RESET_GP1_OFFSET:
+ case MLXREG_DPU_REG_RST_CAUSE1_OFFSET:
+ case MLXREG_DPU_REG_GP0_RO_OFFSET:
+ case MLXREG_DPU_REG_GP0_OFFSET:
+ case MLXREG_DPU_REG_GP1_OFFSET:
+ case MLXREG_DPU_REG_GP4_OFFSET:
+ case MLXREG_DPU_REG_AGGRCO_OFFSET:
+ case MLXREG_DPU_REG_AGGRCO_MASK_OFFSET:
+ case MLXREG_DPU_REG_HEALTH_OFFSET:
+ case MLXREG_DPU_REG_HEALTH_EVENT_OFFSET:
+ case MLXREG_DPU_REG_HEALTH_MASK_OFFSET:
+ case MLXREG_DPU_REG_FPGA1_MVER_OFFSET:
+ case MLXREG_DPU_REG_CONFIG3_OFFSET:
+ return true;
+ }
+ return false;
+}
+
+/* Configuration for the register map of a device with 2 bytes address space. */
+static const struct regmap_config mlxreg_dpu_regmap_conf = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = MLXREG_DPU_REG_MAX,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxreg_dpu_writeable_reg,
+ .readable_reg = mlxreg_dpu_readable_reg,
+ .volatile_reg = mlxreg_dpu_volatile_reg,
+};
+
+static int
+mlxreg_dpu_copy_hotplug_data(struct device *dev, struct mlxreg_dpu *mlxreg_dpu,
+ const struct mlxreg_core_hotplug_platform_data *hotplug_data)
+{
+ struct mlxreg_core_item *item;
+ int i;
+
+ mlxreg_dpu->hotplug_data = devm_kmemdup(dev, hotplug_data,
+ sizeof(*mlxreg_dpu->hotplug_data), GFP_KERNEL);
+ if (!mlxreg_dpu->hotplug_data)
+ return -ENOMEM;
+
+ mlxreg_dpu->hotplug_data->items = devm_kmemdup(dev, hotplug_data->items,
+ mlxreg_dpu->hotplug_data->count *
+ sizeof(*mlxreg_dpu->hotplug_data->items),
+ GFP_KERNEL);
+ if (!mlxreg_dpu->hotplug_data->items)
+ return -ENOMEM;
+
+ item = mlxreg_dpu->hotplug_data->items;
+ for (i = 0; i < hotplug_data->count; i++, item++) {
+ item->data = devm_kmemdup(dev, hotplug_data->items[i].data,
+ hotplug_data->items[i].count * sizeof(*item->data),
+ GFP_KERNEL);
+ if (!item->data)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int mlxreg_dpu_config_init(struct mlxreg_dpu *mlxreg_dpu, void *regmap,
+ struct mlxreg_core_data *data, int irq)
+{
+ struct device *dev = &data->hpdev.client->dev;
+ u32 regval;
+ int err;
+
+ /* Validate DPU type. */
+ err = regmap_read(regmap, MLXREG_DPU_REG_CONFIG3_OFFSET, &regval);
+ if (err)
+ return err;
+
+ switch (regval) {
+ case MLXREG_DPU_BF3:
+ /* Copy platform specific hotplug data. */
+ err = mlxreg_dpu_copy_hotplug_data(dev, mlxreg_dpu,
+ &mlxreg_dpu_default_hotplug_data);
+ if (err)
+ return err;
+
+ mlxreg_dpu->io_data = &mlxreg_dpu_default_regs_io_data;
+
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ /* Register IO access driver. */
+ if (mlxreg_dpu->io_data) {
+ mlxreg_dpu->io_data->regmap = regmap;
+ mlxreg_dpu->io_regs =
+ platform_device_register_resndata(dev, "mlxreg-io",
+ data->slot, NULL, 0,
+ mlxreg_dpu->io_data,
+ sizeof(*mlxreg_dpu->io_data));
+ if (IS_ERR(mlxreg_dpu->io_regs)) {
+ dev_err(dev, "Failed to create regio for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr,
+ data->hpdev.brdinfo->addr);
+ return PTR_ERR(mlxreg_dpu->io_regs);
+ }
+ }
+
+ /* Register hotplug driver. */
+ if (mlxreg_dpu->hotplug_data && irq) {
+ mlxreg_dpu->hotplug_data->regmap = regmap;
+ mlxreg_dpu->hotplug_data->irq = irq;
+ mlxreg_dpu->hotplug =
+ platform_device_register_resndata(dev, "mlxreg-hotplug",
+ data->slot, NULL, 0,
+ mlxreg_dpu->hotplug_data,
+ sizeof(*mlxreg_dpu->hotplug_data));
+ if (IS_ERR(mlxreg_dpu->hotplug)) {
+ err = PTR_ERR(mlxreg_dpu->hotplug);
+ goto fail_register_hotplug;
+ }
+ }
+
+ return 0;
+
+fail_register_hotplug:
+ platform_device_unregister(mlxreg_dpu->io_regs);
+
+ return err;
+}
+
+static void mlxreg_dpu_config_exit(struct mlxreg_dpu *mlxreg_dpu)
+{
+ platform_device_unregister(mlxreg_dpu->hotplug);
+ platform_device_unregister(mlxreg_dpu->io_regs);
+}
+
+static int mlxreg_dpu_probe(struct platform_device *pdev)
+{
+ struct mlxreg_core_data *data;
+ struct mlxreg_dpu *mlxreg_dpu;
+ void *regmap;
+ int err;
+
+ data = dev_get_platdata(&pdev->dev);
+ if (!data || !data->hpdev.brdinfo)
+ return -EINVAL;
+
+ data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr);
+ if (!data->hpdev.adapter)
+ return -EPROBE_DEFER;
+
+ mlxreg_dpu = devm_kzalloc(&pdev->dev, sizeof(*mlxreg_dpu), GFP_KERNEL);
+ if (!mlxreg_dpu) {
+ err = -ENOMEM;
+ goto alloc_fail;
+ }
+
+ /* Create device at the top of DPU I2C tree. */
+ data->hpdev.client = i2c_new_client_device(data->hpdev.adapter,
+ data->hpdev.brdinfo);
+ if (IS_ERR(data->hpdev.client)) {
+ dev_err(&pdev->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
+ err = PTR_ERR(data->hpdev.client);
+ goto i2c_new_device_fail;
+ }
+
+ regmap = devm_regmap_init_i2c(data->hpdev.client, &mlxreg_dpu_regmap_conf);
+ if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "Failed to create regmap for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
+ err = PTR_ERR(regmap);
+ goto devm_regmap_init_i2c_fail;
+ }
+
+ /* Sync registers with hardware. */
+ regcache_mark_dirty(regmap);
+ err = regcache_sync(regmap);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to sync regmap for client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
+ goto regcache_sync_fail;
+ }
+
+ mlxreg_dpu->data = data;
+ mlxreg_dpu->dev = &pdev->dev;
+ platform_set_drvdata(pdev, mlxreg_dpu);
+
+ err = mlxreg_dpu_config_init(mlxreg_dpu, regmap, data, data->hpdev.brdinfo->irq);
+ if (err)
+ goto mlxreg_dpu_config_init_fail;
+
+ return err;
+
+mlxreg_dpu_config_init_fail:
+regcache_sync_fail:
+devm_regmap_init_i2c_fail:
+ i2c_unregister_device(data->hpdev.client);
+i2c_new_device_fail:
+alloc_fail:
+ i2c_put_adapter(data->hpdev.adapter);
+ return err;
+}
+
+static void mlxreg_dpu_remove(struct platform_device *pdev)
+{
+ struct mlxreg_core_data *data = dev_get_platdata(&pdev->dev);
+ struct mlxreg_dpu *mlxreg_dpu = platform_get_drvdata(pdev);
+
+ mlxreg_dpu_config_exit(mlxreg_dpu);
+ i2c_unregister_device(data->hpdev.client);
+ i2c_put_adapter(data->hpdev.adapter);
+}
+
+static struct platform_driver mlxreg_dpu_driver = {
+ .probe = mlxreg_dpu_probe,
+ .remove = mlxreg_dpu_remove,
+ .driver = {
+ .name = "mlxreg-dpu",
+ },
+};
+
+module_platform_driver(mlxreg_dpu_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@nvidia.com>");
+MODULE_DESCRIPTION("Nvidia Data Processor Unit platform driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:mlxreg-dpu");
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index b347000e4329..d246772aafd6 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -262,7 +262,7 @@ static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
item = pdata->items;
/* Go over all kinds of items - psu, pwr, fan. */
- for (i = 0; i < pdata->counter; i++, item++) {
+ for (i = 0; i < pdata->count; i++, item++) {
if (item->capability) {
/*
* Read group capability register to get actual number
@@ -541,7 +541,7 @@ static void mlxreg_hotplug_work_handler(struct work_struct *work)
goto unmask_event;
/* Handle topology and health configuration changes. */
- for (i = 0; i < pdata->counter; i++, item++) {
+ for (i = 0; i < pdata->count; i++, item++) {
if (aggr_asserted & item->aggr_mask) {
if (item->health)
mlxreg_hotplug_health_work_helper(priv, item);
@@ -590,7 +590,7 @@ static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
pdata = dev_get_platdata(&priv->pdev->dev);
item = pdata->items;
- for (i = 0; i < pdata->counter; i++, item++) {
+ for (i = 0; i < pdata->count; i++, item++) {
/* Clear group presense event. */
ret = regmap_write(priv->regmap, item->reg +
MLXREG_HOTPLUG_EVENT_OFF, 0);
@@ -674,7 +674,7 @@ static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv)
0);
/* Clear topology configurations. */
- for (i = 0; i < pdata->counter; i++, item++) {
+ for (i = 0; i < pdata->count; i++, item++) {
data = item->data;
/* Mask group presense event. */
regmap_write(priv->regmap, data->reg + MLXREG_HOTPLUG_MASK_OFF,
diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c
index 0c047aa2345b..db31c8bf2255 100644
--- a/drivers/platform/mellanox/nvsw-sn2201.c
+++ b/drivers/platform/mellanox/nvsw-sn2201.c
@@ -6,6 +6,7 @@
*/
#include <linux/device.h>
+#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -104,6 +105,9 @@
| NVSW_SN2201_CPLD_AGGR_PSU_MASK_DEF \
| NVSW_SN2201_CPLD_AGGR_PWR_MASK_DEF \
| NVSW_SN2201_CPLD_AGGR_FAN_MASK_DEF)
+#define NVSW_SN2201_CPLD_AGGR_BUSBAR_MASK_DEF \
+ (NVSW_SN2201_CPLD_AGGR_ASIC_MASK_DEF \
+ | NVSW_SN2201_CPLD_AGGR_FAN_MASK_DEF)
#define NVSW_SN2201_CPLD_ASIC_MASK GENMASK(3, 1)
#define NVSW_SN2201_CPLD_PSU_MASK GENMASK(1, 0)
@@ -132,6 +136,7 @@
* @cpld_devs: I2C devices for cpld;
* @cpld_devs_num: number of I2C devices for cpld;
* @main_mux_deferred_nr: I2C adapter number must be exist prior creating devices execution;
+ * @ext_pwr_source: true if system powered by external power supply; false - by internal;
*/
struct nvsw_sn2201 {
struct device *dev;
@@ -152,6 +157,7 @@ struct nvsw_sn2201 {
struct mlxreg_hotplug_device *cpld_devs;
int cpld_devs_num;
int main_mux_deferred_nr;
+ bool ext_pwr_source;
};
static bool nvsw_sn2201_writeable_reg(struct device *dev, unsigned int reg)
@@ -517,11 +523,40 @@ static struct mlxreg_core_item nvsw_sn2201_items[] = {
static
struct mlxreg_core_hotplug_platform_data nvsw_sn2201_hotplug = {
.items = nvsw_sn2201_items,
- .counter = ARRAY_SIZE(nvsw_sn2201_items),
+ .count = ARRAY_SIZE(nvsw_sn2201_items),
.cell = NVSW_SN2201_SYS_INT_STATUS_OFFSET,
.mask = NVSW_SN2201_CPLD_AGGR_MASK_DEF,
};
+static struct mlxreg_core_item nvsw_sn2201_busbar_items[] = {
+ {
+ .data = nvsw_sn2201_fan_items_data,
+ .aggr_mask = NVSW_SN2201_CPLD_AGGR_FAN_MASK_DEF,
+ .reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET,
+ .mask = NVSW_SN2201_CPLD_FAN_MASK,
+ .count = ARRAY_SIZE(nvsw_sn2201_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = nvsw_sn2201_sys_items_data,
+ .aggr_mask = NVSW_SN2201_CPLD_AGGR_ASIC_MASK_DEF,
+ .reg = NVSW_SN2201_ASIC_STATUS_OFFSET,
+ .mask = NVSW_SN2201_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(nvsw_sn2201_sys_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data nvsw_sn2201_busbar_hotplug = {
+ .items = nvsw_sn2201_busbar_items,
+ .count = ARRAY_SIZE(nvsw_sn2201_busbar_items),
+ .cell = NVSW_SN2201_SYS_INT_STATUS_OFFSET,
+ .mask = NVSW_SN2201_CPLD_AGGR_BUSBAR_MASK_DEF,
+};
+
/* SN2201 static devices. */
static struct i2c_board_info nvsw_sn2201_static_devices[] = {
{
@@ -557,6 +592,9 @@ static struct i2c_board_info nvsw_sn2201_static_devices[] = {
{
I2C_BOARD_INFO("pmbus", 0x40),
},
+ {
+ I2C_BOARD_INFO("lm5066i", 0x15),
+ },
};
/* SN2201 default static board info. */
@@ -607,6 +645,58 @@ static struct mlxreg_hotplug_device nvsw_sn2201_static_brdinfo[] = {
},
};
+/* SN2201 default busbar static board info. */
+static struct mlxreg_hotplug_device nvsw_sn2201_busbar_static_brdinfo[] = {
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[0],
+ .nr = NVSW_SN2201_MAIN_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[1],
+ .nr = NVSW_SN2201_MAIN_MUX_CH0_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[2],
+ .nr = NVSW_SN2201_MAIN_MUX_CH0_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[3],
+ .nr = NVSW_SN2201_MAIN_MUX_CH0_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[4],
+ .nr = NVSW_SN2201_MAIN_MUX_CH3_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[5],
+ .nr = NVSW_SN2201_MAIN_MUX_CH5_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[6],
+ .nr = NVSW_SN2201_MAIN_MUX_CH5_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[7],
+ .nr = NVSW_SN2201_MAIN_MUX_CH5_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[8],
+ .nr = NVSW_SN2201_MAIN_MUX_CH6_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[9],
+ .nr = NVSW_SN2201_MAIN_MUX_CH6_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[10],
+ .nr = NVSW_SN2201_MAIN_MUX_CH7_NR,
+ },
+ {
+ .brdinfo = &nvsw_sn2201_static_devices[11],
+ .nr = NVSW_SN2201_MAIN_MUX_CH1_NR,
+ },
+};
+
/* LED default data. */
static struct mlxreg_core_data nvsw_sn2201_led_data[] = {
{
@@ -981,7 +1071,10 @@ static int nvsw_sn2201_config_init(struct nvsw_sn2201 *nvsw_sn2201, void *regmap
nvsw_sn2201->io_data = &nvsw_sn2201_regs_io;
nvsw_sn2201->led_data = &nvsw_sn2201_led;
nvsw_sn2201->wd_data = &nvsw_sn2201_wd;
- nvsw_sn2201->hotplug_data = &nvsw_sn2201_hotplug;
+ if (nvsw_sn2201->ext_pwr_source)
+ nvsw_sn2201->hotplug_data = &nvsw_sn2201_busbar_hotplug;
+ else
+ nvsw_sn2201->hotplug_data = &nvsw_sn2201_hotplug;
/* Register IO access driver. */
if (nvsw_sn2201->io_data) {
@@ -1198,12 +1291,18 @@ static int nvsw_sn2201_config_pre_init(struct nvsw_sn2201 *nvsw_sn2201)
static int nvsw_sn2201_probe(struct platform_device *pdev)
{
struct nvsw_sn2201 *nvsw_sn2201;
+ const char *sku;
int ret;
nvsw_sn2201 = devm_kzalloc(&pdev->dev, sizeof(*nvsw_sn2201), GFP_KERNEL);
if (!nvsw_sn2201)
return -ENOMEM;
+ /* Validate system powering type - only HI168 SKU supports external power. */
+ sku = dmi_get_system_info(DMI_PRODUCT_SKU);
+ if (sku && !strcmp(sku, "HI168"))
+ nvsw_sn2201->ext_pwr_source = true;
+
nvsw_sn2201->dev = &pdev->dev;
platform_set_drvdata(pdev, nvsw_sn2201);
ret = platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
@@ -1214,8 +1313,13 @@ static int nvsw_sn2201_probe(struct platform_device *pdev)
nvsw_sn2201->main_mux_deferred_nr = NVSW_SN2201_MAIN_MUX_DEFER_NR;
nvsw_sn2201->main_mux_devs = nvsw_sn2201_main_mux_brdinfo;
nvsw_sn2201->cpld_devs = nvsw_sn2201_cpld_brdinfo;
- nvsw_sn2201->sn2201_devs = nvsw_sn2201_static_brdinfo;
- nvsw_sn2201->sn2201_devs_num = ARRAY_SIZE(nvsw_sn2201_static_brdinfo);
+ if (nvsw_sn2201->ext_pwr_source) {
+ nvsw_sn2201->sn2201_devs = nvsw_sn2201_busbar_static_brdinfo;
+ nvsw_sn2201->sn2201_devs_num = ARRAY_SIZE(nvsw_sn2201_busbar_static_brdinfo);
+ } else {
+ nvsw_sn2201->sn2201_devs = nvsw_sn2201_static_brdinfo;
+ nvsw_sn2201->sn2201_devs_num = ARRAY_SIZE(nvsw_sn2201_static_brdinfo);
+ }
return nvsw_sn2201_config_pre_init(nvsw_sn2201);
}
diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
index b629e82af97c..f775c6ca1ec1 100644
--- a/drivers/platform/surface/Kconfig
+++ b/drivers/platform/surface/Kconfig
@@ -6,7 +6,7 @@
menuconfig SURFACE_PLATFORMS
bool "Microsoft Surface Platform-Specific Device Drivers"
depends on ARM64 || X86 || COMPILE_TEST
- default y
+ default y if ARM64 || X86
help
Say Y here to get to see options for platform-specific device drivers
for Microsoft Surface devices. This option alone does not add any
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 43407e76476b..e5cbd58a99f3 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -779,6 +779,21 @@ config PCENGINES_APU2
To compile this driver as a module, choose M here: the module
will be called pcengines-apuv2.
+config PORTWELL_EC
+ tristate "Portwell Embedded Controller driver"
+ depends on X86 && HAS_IOPORT && WATCHDOG && GPIOLIB
+ select WATCHDOG_CORE
+ help
+ This driver provides support for the GPIO pins and watchdog timer
+ embedded in Portwell's EC.
+
+ Theoretically, this driver should work on multiple Portwell platforms,
+ but it has only been tested on the Portwell NANO-6064 board.
+ If you encounter any issues on other boards, please report them.
+
+ To compile this driver as a module, choose M here: the module
+ will be called portwell-ec.
+
config BARCO_P50_GPIO
tristate "Barco P50 GPIO driver for identify LED/button"
depends on GPIOLIB
@@ -1075,6 +1090,16 @@ config LENOVO_WMI_CAMERA
To compile this driver as a module, choose M here: the module
will be called lenovo-wmi-camera.
+config DASHARO_ACPI
+ tristate "Dasharo ACPI Platform Driver"
+ depends on ACPI
+ depends on HWMON
+ help
+ This driver provides HWMON support for devices running Dasharo
+ firmware.
+
+ If you have a device with Dasharo firmware, choose Y or M here.
+
source "drivers/platform/x86/x86-android-tablets/Kconfig"
config FW_ATTR_CLASS
@@ -1201,6 +1226,21 @@ config SEL3350_PLATFORM
To compile this driver as a module, choose M here: the module
will be called sel3350-platform.
+config OXP_EC
+ tristate "OneXPlayer EC platform control"
+ depends on ACPI_EC
+ depends on ACPI_BATTERY
+ depends on HWMON
+ depends on X86
+ help
+ Enables support for the platform EC of OneXPlayer and AOKZOE
+ handheld devices. This includes fan speed, fan controls, and
+ disabling the default TDP behavior of the device. Due to legacy
+ reasons, this driver also provides hwmon functionality to Ayaneo
+ devices and the OrangePi Neo.
+
+source "drivers/platform/x86/tuxedo/Kconfig"
+
endif # X86_PLATFORM_DEVICES
config P2SB
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 650dfbebb6c8..abbc2644ff6d 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -92,6 +92,9 @@ obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o
# PC Engines
obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
+# Portwell
+obj-$(CONFIG_PORTWELL_EC) += portwell-ec.o
+
# Barco
obj-$(CONFIG_BARCO_P50_GPIO) += barco-p50-gpio.o
@@ -112,6 +115,9 @@ obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
# Inspur
obj-$(CONFIG_INSPUR_PLATFORM_PROFILE) += inspur_platform_profile.o
+# Dasharo
+obj-$(CONFIG_DASHARO_ACPI) += dasharo-acpi.o
+
# Laptop drivers
obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o
obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
@@ -149,8 +155,14 @@ obj-$(CONFIG_SIEMENS_SIMATIC_IPC) += siemens/
# Silicom
obj-$(CONFIG_SILICOM_PLATFORM) += silicom-platform.o
+# TUXEDO
+obj-y += tuxedo/
+
# Winmate
obj-$(CONFIG_WINMATE_FM07_KEYS) += winmate-fm07-keys.o
# SEL
obj-$(CONFIG_SEL3350_PLATFORM) += sel3350-platform.o
+
+# OneXPlayer
+obj-$(CONFIG_OXP_EC) += oxpec.o
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 4c3bb68e8fe4..5ce5ad3efe69 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -271,7 +271,7 @@ static const struct bios_settings bios_tbl[] __initconst = {
* this struct is used to instruct thermal layer to use bang_bang instead of
* default governor for acerhdf
*/
-static struct thermal_zone_params acerhdf_zone_params = {
+static const struct thermal_zone_params acerhdf_zone_params = {
.governor_name = "bang_bang",
};
@@ -426,7 +426,7 @@ static int acerhdf_get_crit_temp(struct thermal_zone_device *thermal,
}
/* bind callback functions to thermalzone */
-static struct thermal_zone_device_ops acerhdf_dev_ops = {
+static const struct thermal_zone_device_ops acerhdf_dev_ops = {
.should_bind = acerhdf_should_bind,
.get_temp = acerhdf_get_ec_temp,
.change_mode = acerhdf_change_mode,
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
index c3e086ea64fc..63e4bd985699 100644
--- a/drivers/platform/x86/amd/Kconfig
+++ b/drivers/platform/x86/amd/Kconfig
@@ -32,3 +32,14 @@ config AMD_WBRF
This mechanism will only be activated on platforms that advertise a
need for it.
+
+config AMD_ISP_PLATFORM
+ tristate "AMD ISP4 platform driver"
+ depends on I2C && X86_64 && ACPI
+ help
+ Platform driver for AMD platforms containing image signal processor
+ gen 4. Provides camera sensor module board information to allow
+ sensor and V4L drivers to work properly.
+
+ This driver can also be built as a module. If so, the module
+ will be called amd_isp4.
diff --git a/drivers/platform/x86/amd/Makefile b/drivers/platform/x86/amd/Makefile
index c6c40bdcbded..b0e284b5d497 100644
--- a/drivers/platform/x86/amd/Makefile
+++ b/drivers/platform/x86/amd/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_AMD_PMC) += pmc/
obj-$(CONFIG_AMD_HSMP) += hsmp/
obj-$(CONFIG_AMD_PMF) += pmf/
obj-$(CONFIG_AMD_WBRF) += wbrf.o
+obj-$(CONFIG_AMD_ISP_PLATFORM) += amd_isp4.o
diff --git a/drivers/platform/x86/amd/amd_isp4.c b/drivers/platform/x86/amd/amd_isp4.c
new file mode 100644
index 000000000000..0cc01441bcbb
--- /dev/null
+++ b/drivers/platform/x86/amd/amd_isp4.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * AMD ISP platform driver for sensor i2-client instantiation
+ *
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#define AMDISP_OV05C10_I2C_ADDR 0x10
+#define AMDISP_OV05C10_HID "OMNI5C10"
+#define AMDISP_OV05C10_REMOTE_EP_NAME "ov05c10_isp_4_1_1"
+#define AMD_ISP_PLAT_DRV_NAME "amd-isp4"
+
+/*
+ * AMD ISP platform info definition to initialize sensor
+ * specific platform configuration to prepare the amdisp
+ * platform.
+ */
+struct amdisp_platform_info {
+ struct i2c_board_info board_info;
+ const struct software_node **swnodes;
+};
+
+/*
+ * AMD ISP platform definition to configure the device properties
+ * missing in the ACPI table.
+ */
+struct amdisp_platform {
+ const struct amdisp_platform_info *pinfo;
+ struct i2c_board_info board_info;
+ struct notifier_block i2c_nb;
+ struct i2c_client *i2c_dev;
+ struct mutex lock; /* protects i2c client creation */
+};
+
+/* Top-level OV05C10 camera node property table */
+static const struct property_entry ov05c10_camera_props[] = {
+ PROPERTY_ENTRY_U32("clock-frequency", 24 * HZ_PER_MHZ),
+ { }
+};
+
+/* Root AMD ISP OV05C10 camera node definition */
+static const struct software_node camera_node = {
+ .name = AMDISP_OV05C10_HID,
+ .properties = ov05c10_camera_props,
+};
+
+/*
+ * AMD ISP OV05C10 Ports node definition. No properties defined for
+ * ports node for OV05C10.
+ */
+static const struct software_node ports = {
+ .name = "ports",
+ .parent = &camera_node,
+};
+
+/*
+ * AMD ISP OV05C10 Port node definition. No properties defined for
+ * port node for OV05C10.
+ */
+static const struct software_node port_node = {
+ .name = "port@",
+ .parent = &ports,
+};
+
+/*
+ * Remote endpoint AMD ISP node definition. No properties defined for
+ * remote endpoint node for OV05C10.
+ */
+static const struct software_node remote_ep_isp_node = {
+ .name = AMDISP_OV05C10_REMOTE_EP_NAME,
+};
+
+/*
+ * Remote endpoint reference for isp node included in the
+ * OV05C10 endpoint.
+ */
+static const struct software_node_ref_args ov05c10_refs[] = {
+ SOFTWARE_NODE_REFERENCE(&remote_ep_isp_node),
+};
+
+/* OV05C10 supports one single link frequency */
+static const u64 ov05c10_link_freqs[] = {
+ 925 * HZ_PER_MHZ,
+};
+
+/* OV05C10 supports only 2-lane configuration */
+static const u32 ov05c10_data_lanes[] = {
+ 1,
+ 2,
+};
+
+/* OV05C10 endpoint node properties table */
+static const struct property_entry ov05c10_endpoint_props[] = {
+ PROPERTY_ENTRY_U32("bus-type", 4),
+ PROPERTY_ENTRY_U32_ARRAY_LEN("data-lanes", ov05c10_data_lanes,
+ ARRAY_SIZE(ov05c10_data_lanes)),
+ PROPERTY_ENTRY_U64_ARRAY_LEN("link-frequencies", ov05c10_link_freqs,
+ ARRAY_SIZE(ov05c10_link_freqs)),
+ PROPERTY_ENTRY_REF_ARRAY("remote-endpoint", ov05c10_refs),
+ { }
+};
+
+/* AMD ISP endpoint node definition */
+static const struct software_node endpoint_node = {
+ .name = "endpoint",
+ .parent = &port_node,
+ .properties = ov05c10_endpoint_props,
+};
+
+/*
+ * AMD ISP swnode graph uses 5 nodes and also its relationship is
+ * fixed to align with the structure that v4l2 expects for successful
+ * endpoint fwnode parsing.
+ *
+ * It is only the node property_entries that will vary for each platform
+ * supporting different sensor modules.
+ */
+static const struct software_node *ov05c10_nodes[] = {
+ &camera_node,
+ &ports,
+ &port_node,
+ &endpoint_node,
+ &remote_ep_isp_node,
+ NULL
+};
+
+/* OV05C10 specific AMD ISP platform configuration */
+static const struct amdisp_platform_info ov05c10_platform_config = {
+ .board_info = {
+ .dev_name = "ov05c10",
+ I2C_BOARD_INFO("ov05c10", AMDISP_OV05C10_I2C_ADDR),
+ },
+ .swnodes = ov05c10_nodes,
+};
+
+static const struct acpi_device_id amdisp_sensor_ids[] = {
+ { AMDISP_OV05C10_HID, (kernel_ulong_t)&ov05c10_platform_config },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, amdisp_sensor_ids);
+
+static inline bool is_isp_i2c_adapter(struct i2c_adapter *adap)
+{
+ return !strcmp(adap->owner->name, "i2c_designware_amdisp");
+}
+
+static void instantiate_isp_i2c_client(struct amdisp_platform *isp4_platform,
+ struct i2c_adapter *adap)
+{
+ struct i2c_board_info *info = &isp4_platform->board_info;
+ struct i2c_client *i2c_dev;
+
+ guard(mutex)(&isp4_platform->lock);
+
+ if (isp4_platform->i2c_dev)
+ return;
+
+ i2c_dev = i2c_new_client_device(adap, info);
+ if (IS_ERR(i2c_dev)) {
+ dev_err(&adap->dev, "error %pe registering isp i2c_client\n", i2c_dev);
+ return;
+ }
+ isp4_platform->i2c_dev = i2c_dev;
+}
+
+static int isp_i2c_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct amdisp_platform *isp4_platform =
+ container_of(nb, struct amdisp_platform, i2c_nb);
+ struct device *dev = data;
+ struct i2c_client *client;
+ struct i2c_adapter *adap;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ adap = i2c_verify_adapter(dev);
+ if (!adap)
+ break;
+ if (is_isp_i2c_adapter(adap))
+ instantiate_isp_i2c_client(isp4_platform, adap);
+ break;
+ case BUS_NOTIFY_REMOVED_DEVICE:
+ client = i2c_verify_client(dev);
+ if (!client)
+ break;
+
+ scoped_guard(mutex, &isp4_platform->lock) {
+ if (isp4_platform->i2c_dev == client) {
+ dev_dbg(&client->adapter->dev, "amdisp i2c_client removed\n");
+ isp4_platform->i2c_dev = NULL;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct amdisp_platform *prepare_amdisp_platform(struct device *dev,
+ const struct amdisp_platform_info *src)
+{
+ struct amdisp_platform *isp4_platform;
+ int ret;
+
+ isp4_platform = devm_kzalloc(dev, sizeof(*isp4_platform), GFP_KERNEL);
+ if (!isp4_platform)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_mutex_init(dev, &isp4_platform->lock);
+ if (ret)
+ return ERR_PTR(ret);
+
+ isp4_platform->board_info.dev_name = src->board_info.dev_name;
+ strscpy(isp4_platform->board_info.type, src->board_info.type);
+ isp4_platform->board_info.addr = src->board_info.addr;
+ isp4_platform->pinfo = src;
+
+ ret = software_node_register_node_group(src->swnodes);
+ if (ret)
+ return ERR_PTR(ret);
+
+ isp4_platform->board_info.swnode = src->swnodes[0];
+
+ return isp4_platform;
+}
+
+static int try_to_instantiate_i2c_client(struct device *dev, void *data)
+{
+ struct i2c_adapter *adap = i2c_verify_adapter(dev);
+ struct amdisp_platform *isp4_platform = data;
+
+ if (!isp4_platform || !adap)
+ return 0;
+ if (!adap->owner)
+ return 0;
+
+ if (is_isp_i2c_adapter(adap))
+ instantiate_isp_i2c_client(isp4_platform, adap);
+
+ return 0;
+}
+
+static int amd_isp_probe(struct platform_device *pdev)
+{
+ const struct amdisp_platform_info *pinfo;
+ struct amdisp_platform *isp4_platform;
+ int ret;
+
+ pinfo = device_get_match_data(&pdev->dev);
+ if (!pinfo)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "failed to get valid ACPI data\n");
+
+ isp4_platform = prepare_amdisp_platform(&pdev->dev, pinfo);
+ if (IS_ERR(isp4_platform))
+ return dev_err_probe(&pdev->dev, PTR_ERR(isp4_platform),
+ "failed to prepare AMD ISP platform fwnode\n");
+
+ isp4_platform->i2c_nb.notifier_call = isp_i2c_bus_notify;
+ ret = bus_register_notifier(&i2c_bus_type, &isp4_platform->i2c_nb);
+ if (ret)
+ goto error_unregister_sw_node;
+
+ /* check if adapter is already registered and create i2c client instance */
+ i2c_for_each_dev(isp4_platform, try_to_instantiate_i2c_client);
+
+ platform_set_drvdata(pdev, isp4_platform);
+ return 0;
+
+error_unregister_sw_node:
+ software_node_unregister_node_group(isp4_platform->pinfo->swnodes);
+ return ret;
+}
+
+static void amd_isp_remove(struct platform_device *pdev)
+{
+ struct amdisp_platform *isp4_platform = platform_get_drvdata(pdev);
+
+ bus_unregister_notifier(&i2c_bus_type, &isp4_platform->i2c_nb);
+ i2c_unregister_device(isp4_platform->i2c_dev);
+ software_node_unregister_node_group(isp4_platform->pinfo->swnodes);
+}
+
+static struct platform_driver amd_isp_platform_driver = {
+ .driver = {
+ .name = AMD_ISP_PLAT_DRV_NAME,
+ .acpi_match_table = amdisp_sensor_ids,
+ },
+ .probe = amd_isp_probe,
+ .remove = amd_isp_remove,
+};
+
+module_platform_driver(amd_isp_platform_driver);
+
+MODULE_AUTHOR("Benjamin Chan <benjamin.chan@amd.com>");
+MODULE_AUTHOR("Pratap Nirujogi <pratap.nirujogi@amd.com>");
+MODULE_DESCRIPTION("AMD ISP4 Platform Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/amd/hsmp/Kconfig b/drivers/platform/x86/amd/hsmp/Kconfig
index d6f7a62d55b5..2911120792e8 100644
--- a/drivers/platform/x86/amd/hsmp/Kconfig
+++ b/drivers/platform/x86/amd/hsmp/Kconfig
@@ -12,6 +12,7 @@ menu "AMD HSMP Driver"
config AMD_HSMP_ACPI
tristate "AMD HSMP ACPI device driver"
depends on ACPI
+ depends on HWMON || !HWMON
select AMD_HSMP
help
Host System Management Port (HSMP) interface is a mailbox interface
@@ -29,6 +30,7 @@ config AMD_HSMP_ACPI
config AMD_HSMP_PLAT
tristate "AMD HSMP platform device driver"
+ depends on HWMON || !HWMON
select AMD_HSMP
help
Host System Management Port (HSMP) interface is a mailbox interface
diff --git a/drivers/platform/x86/amd/hsmp/Makefile b/drivers/platform/x86/amd/hsmp/Makefile
index 0759bbcd13f6..ce8342e71f50 100644
--- a/drivers/platform/x86/amd/hsmp/Makefile
+++ b/drivers/platform/x86/amd/hsmp/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_AMD_HSMP) += hsmp_common.o
hsmp_common-y := hsmp.o
+hsmp_common-$(CONFIG_HWMON) += hwmon.o
obj-$(CONFIG_AMD_HSMP_PLAT) += amd_hsmp.o
amd_hsmp-y := plat.o
obj-$(CONFIG_AMD_HSMP_ACPI) += hsmp_acpi.o
diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c
index c1eccb3c80c5..2f1faa82d13e 100644
--- a/drivers/platform/x86/amd/hsmp/acpi.c
+++ b/drivers/platform/x86/amd/hsmp/acpi.c
@@ -9,9 +9,12 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/amd_hsmp.h>
+#include <asm/amd/hsmp.h>
#include <linux/acpi.h>
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/dev_printk.h>
#include <linux/ioport.h>
@@ -23,13 +26,11 @@
#include <uapi/asm-generic/errno-base.h>
-#include <asm/amd_node.h>
+#include <asm/amd/node.h>
#include "hsmp.h"
-#define DRIVER_NAME "amd_hsmp"
-#define DRIVER_VERSION "2.3"
-#define ACPI_HSMP_DEVICE_HID "AMDI0097"
+#define DRIVER_NAME "hsmp_acpi"
/* These are the strings specified in ACPI table */
#define MSG_IDOFF_STR "MsgIdOffset"
@@ -38,6 +39,11 @@
static struct hsmp_plat_device *hsmp_pdev;
+struct hsmp_sys_attr {
+ struct device_attribute dattr;
+ u32 msg_id;
+};
+
static int amd_hsmp_acpi_rdwr(struct hsmp_socket *sock, u32 offset,
u32 *value, bool write)
{
@@ -245,6 +251,215 @@ static umode_t hsmp_is_sock_attr_visible(struct kobject *kobj,
return 0;
}
+static umode_t hsmp_is_sock_dev_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int id)
+{
+ return attr->mode;
+}
+
+#define to_hsmp_sys_attr(_attr) container_of(_attr, struct hsmp_sys_attr, dattr)
+
+static ssize_t hsmp_msg_resp32_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr);
+ struct hsmp_socket *sock = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", data);
+}
+
+#define DDR_MAX_BW_MASK GENMASK(31, 20)
+#define DDR_UTIL_BW_MASK GENMASK(19, 8)
+#define DDR_UTIL_BW_PERC_MASK GENMASK(7, 0)
+#define FW_VER_MAJOR_MASK GENMASK(23, 16)
+#define FW_VER_MINOR_MASK GENMASK(15, 8)
+#define FW_VER_DEBUG_MASK GENMASK(7, 0)
+#define FMAX_MASK GENMASK(31, 16)
+#define FMIN_MASK GENMASK(15, 0)
+#define FREQ_LIMIT_MASK GENMASK(31, 16)
+#define FREQ_SRC_IND_MASK GENMASK(15, 0)
+
+static ssize_t hsmp_ddr_max_bw_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr);
+ struct hsmp_socket *sock = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%lu\n", FIELD_GET(DDR_MAX_BW_MASK, data));
+}
+
+static ssize_t hsmp_ddr_util_bw_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr);
+ struct hsmp_socket *sock = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%lu\n", FIELD_GET(DDR_UTIL_BW_MASK, data));
+}
+
+static ssize_t hsmp_ddr_util_bw_perc_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr);
+ struct hsmp_socket *sock = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%lu\n", FIELD_GET(DDR_UTIL_BW_PERC_MASK, data));
+}
+
+static ssize_t hsmp_msg_fw_ver_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr);
+ struct hsmp_socket *sock = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%lu.%lu.%lu\n",
+ FIELD_GET(FW_VER_MAJOR_MASK, data),
+ FIELD_GET(FW_VER_MINOR_MASK, data),
+ FIELD_GET(FW_VER_DEBUG_MASK, data));
+}
+
+static ssize_t hsmp_fclk_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr);
+ struct hsmp_socket *sock = dev_get_drvdata(dev);
+ u32 data[2];
+ int ret;
+
+ ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, data, 2);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", data[0]);
+}
+
+static ssize_t hsmp_mclk_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr);
+ struct hsmp_socket *sock = dev_get_drvdata(dev);
+ u32 data[2];
+ int ret;
+
+ ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, data, 2);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", data[1]);
+}
+
+static ssize_t hsmp_clk_fmax_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr);
+ struct hsmp_socket *sock = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%lu\n", FIELD_GET(FMAX_MASK, data));
+}
+
+static ssize_t hsmp_clk_fmin_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr);
+ struct hsmp_socket *sock = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%lu\n", FIELD_GET(FMIN_MASK, data));
+}
+
+static ssize_t hsmp_freq_limit_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr);
+ struct hsmp_socket *sock = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%lu\n", FIELD_GET(FREQ_LIMIT_MASK, data));
+}
+
+static const char * const freqlimit_srcnames[] = {
+ "cHTC-Active",
+ "PROCHOT",
+ "TDC limit",
+ "PPT Limit",
+ "OPN Max",
+ "Reliability Limit",
+ "APML Agent",
+ "HSMP Agent",
+};
+
+static ssize_t hsmp_freq_limit_source_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hsmp_sys_attr *hattr = to_hsmp_sys_attr(attr);
+ struct hsmp_socket *sock = dev_get_drvdata(dev);
+ unsigned int index;
+ int len = 0;
+ u16 src_ind;
+ u32 data;
+ int ret;
+
+ ret = hsmp_msg_get_nargs(sock->sock_ind, hattr->msg_id, &data, 1);
+ if (ret)
+ return ret;
+
+ src_ind = FIELD_GET(FREQ_SRC_IND_MASK, data);
+ for (index = 0; index < ARRAY_SIZE(freqlimit_srcnames); index++) {
+ if (!src_ind)
+ break;
+ if (src_ind & 1)
+ len += sysfs_emit_at(buf, len, "%s\n", freqlimit_srcnames[index]);
+ src_ind >>= 1;
+ }
+ return len;
+}
+
static int init_acpi(struct device *dev)
{
u16 sock_ind;
@@ -283,6 +498,12 @@ static int init_acpi(struct device *dev)
dev_err(dev, "Failed to init metric table\n");
}
+ ret = hsmp_create_sensor(dev, sock_ind);
+ if (ret)
+ dev_err(dev, "Failed to register HSMP sensors with hwmon\n");
+
+ dev_set_drvdata(dev, &hsmp_pdev->sock[sock_ind]);
+
return ret;
}
@@ -297,9 +518,52 @@ static const struct bin_attribute *hsmp_attr_list[] = {
NULL
};
+#define HSMP_DEV_ATTR(_name, _msg_id, _show, _mode) \
+static struct hsmp_sys_attr hattr_##_name = { \
+ .dattr = __ATTR(_name, _mode, _show, NULL), \
+ .msg_id = _msg_id, \
+}
+
+HSMP_DEV_ATTR(c0_residency_input, HSMP_GET_C0_PERCENT, hsmp_msg_resp32_show, 0444);
+HSMP_DEV_ATTR(prochot_status, HSMP_GET_PROC_HOT, hsmp_msg_resp32_show, 0444);
+HSMP_DEV_ATTR(smu_fw_version, HSMP_GET_SMU_VER, hsmp_msg_fw_ver_show, 0444);
+HSMP_DEV_ATTR(protocol_version, HSMP_GET_PROTO_VER, hsmp_msg_resp32_show, 0444);
+HSMP_DEV_ATTR(cclk_freq_limit_input, HSMP_GET_CCLK_THROTTLE_LIMIT, hsmp_msg_resp32_show, 0444);
+HSMP_DEV_ATTR(ddr_max_bw, HSMP_GET_DDR_BANDWIDTH, hsmp_ddr_max_bw_show, 0444);
+HSMP_DEV_ATTR(ddr_utilised_bw_input, HSMP_GET_DDR_BANDWIDTH, hsmp_ddr_util_bw_show, 0444);
+HSMP_DEV_ATTR(ddr_utilised_bw_perc_input, HSMP_GET_DDR_BANDWIDTH, hsmp_ddr_util_bw_perc_show, 0444);
+HSMP_DEV_ATTR(fclk_input, HSMP_GET_FCLK_MCLK, hsmp_fclk_show, 0444);
+HSMP_DEV_ATTR(mclk_input, HSMP_GET_FCLK_MCLK, hsmp_mclk_show, 0444);
+HSMP_DEV_ATTR(clk_fmax, HSMP_GET_SOCKET_FMAX_FMIN, hsmp_clk_fmax_show, 0444);
+HSMP_DEV_ATTR(clk_fmin, HSMP_GET_SOCKET_FMAX_FMIN, hsmp_clk_fmin_show, 0444);
+HSMP_DEV_ATTR(pwr_current_active_freq_limit, HSMP_GET_SOCKET_FREQ_LIMIT,
+ hsmp_freq_limit_show, 0444);
+HSMP_DEV_ATTR(pwr_current_active_freq_limit_source, HSMP_GET_SOCKET_FREQ_LIMIT,
+ hsmp_freq_limit_source_show, 0444);
+
+static struct attribute *hsmp_dev_attr_list[] = {
+ &hattr_c0_residency_input.dattr.attr,
+ &hattr_prochot_status.dattr.attr,
+ &hattr_smu_fw_version.dattr.attr,
+ &hattr_protocol_version.dattr.attr,
+ &hattr_cclk_freq_limit_input.dattr.attr,
+ &hattr_ddr_max_bw.dattr.attr,
+ &hattr_ddr_utilised_bw_input.dattr.attr,
+ &hattr_ddr_utilised_bw_perc_input.dattr.attr,
+ &hattr_fclk_input.dattr.attr,
+ &hattr_mclk_input.dattr.attr,
+ &hattr_clk_fmax.dattr.attr,
+ &hattr_clk_fmin.dattr.attr,
+ &hattr_pwr_current_active_freq_limit.dattr.attr,
+ &hattr_pwr_current_active_freq_limit_source.dattr.attr,
+ NULL
+};
+
static const struct attribute_group hsmp_attr_grp = {
.bin_attrs_new = hsmp_attr_list,
+ .attrs = hsmp_dev_attr_list,
.is_bin_visible = hsmp_is_sock_attr_visible,
+ .is_visible = hsmp_is_sock_dev_attr_visible,
};
static const struct attribute_group *hsmp_groups[] = {
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c
index a3ac09a90de4..885e2f8136fd 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp/hsmp.c
@@ -7,7 +7,7 @@
* This file provides a device implementation for HSMP interface
*/
-#include <asm/amd_hsmp.h>
+#include <asm/amd/hsmp.h>
#include <linux/acpi.h>
#include <linux/delay.h>
@@ -32,8 +32,6 @@
#define HSMP_WR true
#define HSMP_RD false
-#define DRIVER_VERSION "2.4"
-
/*
* When same message numbers are used for both GET and SET operation,
* bit:31 indicates whether its SET or GET operation.
@@ -99,7 +97,7 @@ static int __hsmp_send_message(struct hsmp_socket *sock, struct hsmp_message *ms
short_sleep = jiffies + msecs_to_jiffies(HSMP_SHORT_SLEEP);
timeout = jiffies + msecs_to_jiffies(HSMP_MSG_TIMEOUT);
- while (time_before(jiffies, timeout)) {
+ while (true) {
ret = sock->amd_hsmp_rdwr(sock, mbinfo->msg_resp_off, &mbox_status, HSMP_RD);
if (ret) {
dev_err(sock->dev, "Error %d reading mailbox status\n", ret);
@@ -108,6 +106,10 @@ static int __hsmp_send_message(struct hsmp_socket *sock, struct hsmp_message *ms
if (mbox_status != HSMP_STATUS_NOT_READY)
break;
+
+ if (!time_before(jiffies, timeout))
+ break;
+
if (time_before(jiffies, short_sleep))
usleep_range(50, 100);
else
@@ -212,13 +214,7 @@ int hsmp_send_message(struct hsmp_message *msg)
return -ENODEV;
sock = &hsmp_pdev.sock[msg->sock_ind];
- /*
- * The time taken by smu operation to complete is between
- * 10us to 1ms. Sometime it may take more time.
- * In SMP system timeout of 100 millisecs should
- * be enough for the previous thread to finish the operation
- */
- ret = down_timeout(&sock->hsmp_sem, msecs_to_jiffies(HSMP_MSG_TIMEOUT));
+ ret = down_interruptible(&sock->hsmp_sem);
if (ret < 0)
return ret;
@@ -230,6 +226,29 @@ int hsmp_send_message(struct hsmp_message *msg)
}
EXPORT_SYMBOL_NS_GPL(hsmp_send_message, "AMD_HSMP");
+int hsmp_msg_get_nargs(u16 sock_ind, u32 msg_id, u32 *data, u8 num_args)
+{
+ struct hsmp_message msg = {};
+ unsigned int i;
+ int ret;
+
+ if (!data)
+ return -EINVAL;
+ msg.msg_id = msg_id;
+ msg.sock_ind = sock_ind;
+ msg.response_sz = num_args;
+
+ ret = hsmp_send_message(&msg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_args; i++)
+ data[i] = msg.args[i];
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(hsmp_msg_get_nargs, "AMD_HSMP");
+
int hsmp_test(u16 sock_ind, u32 value)
{
struct hsmp_message msg = { 0 };
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.h b/drivers/platform/x86/amd/hsmp/hsmp.h
index af8b21f821d6..36b5ceea9ac0 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.h
+++ b/drivers/platform/x86/amd/hsmp/hsmp.h
@@ -12,6 +12,7 @@
#include <linux/compiler_types.h>
#include <linux/device.h>
+#include <linux/hwmon.h>
#include <linux/miscdevice.h>
#include <linux/pci.h>
#include <linux/semaphore.h>
@@ -23,6 +24,9 @@
#define HSMP_CDEV_NAME "hsmp_cdev"
#define HSMP_DEVNODE_NAME "hsmp"
+#define ACPI_HSMP_DEVICE_HID "AMDI0097"
+
+#define DRIVER_VERSION "2.5"
struct hsmp_mbaddr_info {
u32 base_addr;
@@ -60,4 +64,10 @@ int hsmp_misc_register(struct device *dev);
int hsmp_get_tbl_dram_base(u16 sock_ind);
ssize_t hsmp_metric_tbl_read(struct hsmp_socket *sock, char *buf, size_t size);
struct hsmp_plat_device *get_hsmp_pdev(void);
+#if IS_REACHABLE(CONFIG_HWMON)
+int hsmp_create_sensor(struct device *dev, u16 sock_ind);
+#else
+static inline int hsmp_create_sensor(struct device *dev, u16 sock_ind) { return 0; }
+#endif
+int hsmp_msg_get_nargs(u16 sock_ind, u32 msg_id, u32 *data, u8 num_args);
#endif /* HSMP_H */
diff --git a/drivers/platform/x86/amd/hsmp/hwmon.c b/drivers/platform/x86/amd/hsmp/hwmon.c
new file mode 100644
index 000000000000..0cc9a742497f
--- /dev/null
+++ b/drivers/platform/x86/amd/hsmp/hwmon.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD HSMP hwmon support
+ * Copyright (c) 2025, AMD.
+ * All Rights Reserved.
+ *
+ * This file provides hwmon implementation for HSMP interface.
+ */
+
+#include <asm/amd/hsmp.h>
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include "hsmp.h"
+
+#define HSMP_HWMON_NAME "amd_hsmp_hwmon"
+
+static int hsmp_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ u16 sock_ind = (uintptr_t)dev_get_drvdata(dev);
+ struct hsmp_message msg = {};
+
+ if (type != hwmon_power)
+ return -EOPNOTSUPP;
+
+ if (attr != hwmon_power_cap)
+ return -EOPNOTSUPP;
+
+ msg.num_args = 1;
+ msg.args[0] = val / MICROWATT_PER_MILLIWATT;
+ msg.msg_id = HSMP_SET_SOCKET_POWER_LIMIT;
+ msg.sock_ind = sock_ind;
+ return hsmp_send_message(&msg);
+}
+
+static int hsmp_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ u16 sock_ind = (uintptr_t)dev_get_drvdata(dev);
+ struct hsmp_message msg = {};
+ int ret;
+
+ if (type != hwmon_power)
+ return -EOPNOTSUPP;
+
+ msg.sock_ind = sock_ind;
+ msg.response_sz = 1;
+
+ switch (attr) {
+ case hwmon_power_input:
+ msg.msg_id = HSMP_GET_SOCKET_POWER;
+ break;
+ case hwmon_power_cap:
+ msg.msg_id = HSMP_GET_SOCKET_POWER_LIMIT;
+ break;
+ case hwmon_power_cap_max:
+ msg.msg_id = HSMP_GET_SOCKET_POWER_LIMIT_MAX;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = hsmp_send_message(&msg);
+ if (!ret)
+ *val = msg.args[0] * MICROWATT_PER_MILLIWATT;
+
+ return ret;
+}
+
+static umode_t hsmp_hwmon_is_visble(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_power)
+ return 0;
+
+ switch (attr) {
+ case hwmon_power_input:
+ return 0444;
+ case hwmon_power_cap:
+ return 0644;
+ case hwmon_power_cap_max:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_ops hsmp_hwmon_ops = {
+ .read = hsmp_hwmon_read,
+ .is_visible = hsmp_hwmon_is_visble,
+ .write = hsmp_hwmon_write,
+};
+
+static const struct hwmon_channel_info * const hsmp_info[] = {
+ HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_CAP | HWMON_P_CAP_MAX),
+ NULL
+};
+
+static const struct hwmon_chip_info hsmp_chip_info = {
+ .ops = &hsmp_hwmon_ops,
+ .info = hsmp_info,
+};
+
+int hsmp_create_sensor(struct device *dev, u16 sock_ind)
+{
+ struct device *hwmon_dev;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, HSMP_HWMON_NAME,
+ (void *)(uintptr_t)sock_ind,
+ &hsmp_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+EXPORT_SYMBOL_NS(hsmp_create_sensor, "AMD_HSMP");
diff --git a/drivers/platform/x86/amd/hsmp/plat.c b/drivers/platform/x86/amd/hsmp/plat.c
index b9782a078dbd..e3874c47ed9e 100644
--- a/drivers/platform/x86/amd/hsmp/plat.c
+++ b/drivers/platform/x86/amd/hsmp/plat.c
@@ -9,8 +9,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/amd_hsmp.h>
+#include <asm/amd/hsmp.h>
+#include <linux/acpi.h>
#include <linux/build_bug.h>
#include <linux/device.h>
#include <linux/module.h>
@@ -18,12 +19,11 @@
#include <linux/platform_device.h>
#include <linux/sysfs.h>
-#include <asm/amd_node.h>
+#include <asm/amd/node.h>
#include "hsmp.h"
#define DRIVER_NAME "amd_hsmp"
-#define DRIVER_VERSION "2.3"
/*
* To access specific HSMP mailbox register, s/w writes the SMN address of HSMP mailbox
@@ -189,6 +189,11 @@ static int init_platform_device(struct device *dev)
if (ret)
dev_err(dev, "Failed to init metric table\n");
}
+
+ /* Register with hwmon interface for reporting power */
+ ret = hsmp_create_sensor(dev, i);
+ if (ret)
+ dev_err(dev, "Failed to register HSMP sensors with hwmon\n");
}
return 0;
@@ -266,7 +271,7 @@ static bool legacy_hsmp_support(void)
}
case 0x1A:
switch (boot_cpu_data.x86_model) {
- case 0x00 ... 0x1F:
+ case 0x00 ... 0x0F:
return true;
default:
return false;
@@ -288,6 +293,9 @@ static int __init hsmp_plt_init(void)
return ret;
}
+ if (acpi_dev_present(ACPI_HSMP_DEVICE_HID, NULL, -1))
+ return -ENODEV;
+
hsmp_pdev = get_hsmp_pdev();
if (!hsmp_pdev)
return -ENOMEM;
diff --git a/drivers/platform/x86/amd/pmc/mp1_stb.c b/drivers/platform/x86/amd/pmc/mp1_stb.c
index c005f00988f7..3b9b9f30faa3 100644
--- a/drivers/platform/x86/amd/pmc/mp1_stb.c
+++ b/drivers/platform/x86/amd/pmc/mp1_stb.c
@@ -11,7 +11,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/amd_nb.h>
+#include <asm/amd/nb.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index b4f49720c87f..f292111bd065 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -11,6 +11,7 @@
#include <linux/dmi.h>
#include <linux/io.h>
#include <linux/ioport.h>
+#include <asm/amd/fch.h>
#include "pmc.h"
@@ -20,7 +21,7 @@ struct quirk_entry {
};
static struct quirk_entry quirk_s2idle_bug = {
- .s2idle_bug_mmio = 0xfed80380,
+ .s2idle_bug_mmio = FCH_PM_BASE + FCH_PM_SCRATCH,
};
static struct quirk_entry quirk_spurious_8042 = {
@@ -217,6 +218,22 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_BIOS_VERSION, "03.05"),
}
},
+ {
+ .ident = "MECHREVO Wujie 14X (GX4HRXL)",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"),
+ }
+ },
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=220116 */
+ {
+ .ident = "PCSpecialist Lafite Pro V 14M",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lafite Pro V 14M"),
+ }
+ },
{}
};
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index 0329fafe14eb..0b9b23eb7c2c 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -28,7 +28,7 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
-#include <asm/amd_node.h>
+#include <asm/amd/node.h>
#include "pmc.h"
@@ -157,6 +157,8 @@ static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
return -ENOMEM;
}
+ memset_io(dev->smu_virt_addr, 0, sizeof(struct smu_metrics));
+
/* Start the logging */
amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_RESET, false);
amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, false);
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index 96821101ec77..ef988605c4da 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -14,7 +14,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
-#include <asm/amd_node.h>
+#include <asm/amd/node.h>
#include "pmf.h"
/* PMF-SMU communication registers */
@@ -280,7 +280,7 @@ int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer)
dev_err(dev->dev, "Invalid CPU id: 0x%x", dev->cpu_id);
}
- dev->buf = kzalloc(dev->mtable_size, GFP_KERNEL);
+ dev->buf = devm_kzalloc(dev->dev, dev->mtable_size, GFP_KERNEL);
if (!dev->buf)
return -ENOMEM;
}
@@ -493,7 +493,6 @@ static void amd_pmf_remove(struct platform_device *pdev)
mutex_destroy(&dev->lock);
mutex_destroy(&dev->update_mutex);
mutex_destroy(&dev->cb_mutex);
- kfree(dev->buf);
}
static const struct attribute_group *amd_pmf_driver_groups[] = {
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index 14b99d8b63d2..4f626ebcb619 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -334,6 +334,11 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
return 0;
}
+static inline bool amd_pmf_pb_valid(struct amd_pmf_dev *dev)
+{
+ return memchr_inv(dev->policy_buf, 0xff, dev->policy_sz);
+}
+
#ifdef CONFIG_AMD_PMF_DEBUG
static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev)
{
@@ -353,14 +358,22 @@ static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf,
return -EINVAL;
/* re-alloc to the new buffer length of the policy binary */
- new_policy_buf = memdup_user(buf, length);
- if (IS_ERR(new_policy_buf))
- return PTR_ERR(new_policy_buf);
+ new_policy_buf = devm_kzalloc(dev->dev, length, GFP_KERNEL);
+ if (!new_policy_buf)
+ return -ENOMEM;
- kfree(dev->policy_buf);
+ if (copy_from_user(new_policy_buf, buf, length)) {
+ devm_kfree(dev->dev, new_policy_buf);
+ return -EFAULT;
+ }
+
+ devm_kfree(dev->dev, dev->policy_buf);
dev->policy_buf = new_policy_buf;
dev->policy_sz = length;
+ if (!amd_pmf_pb_valid(dev))
+ return -EINVAL;
+
amd_pmf_hex_dump_pb(dev);
ret = amd_pmf_start_policy_engine(dev);
if (ret < 0)
@@ -407,12 +420,12 @@ static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id, const uuid_
rc = tee_client_open_session(ctx, &sess_arg, NULL);
if (rc < 0 || sess_arg.ret != 0) {
pr_err("Failed to open TEE session err:%#x, rc:%d\n", sess_arg.ret, rc);
- return rc;
+ return rc ?: -EINVAL;
}
*id = sess_arg.session;
- return rc;
+ return 0;
}
static int amd_pmf_register_input_device(struct amd_pmf_dev *dev)
@@ -447,7 +460,9 @@ static int amd_pmf_tee_init(struct amd_pmf_dev *dev, const uuid_t *uuid)
dev->tee_ctx = tee_client_open_context(NULL, amd_pmf_amdtee_ta_match, NULL, NULL);
if (IS_ERR(dev->tee_ctx)) {
dev_err(dev->dev, "Failed to open TEE context\n");
- return PTR_ERR(dev->tee_ctx);
+ ret = PTR_ERR(dev->tee_ctx);
+ dev->tee_ctx = NULL;
+ return ret;
}
ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id, uuid);
@@ -487,9 +502,12 @@ out_ctx:
static void amd_pmf_tee_deinit(struct amd_pmf_dev *dev)
{
+ if (!dev->tee_ctx)
+ return;
tee_shm_free(dev->fw_shm_pool);
tee_client_close_session(dev->tee_ctx, dev->session_id);
tee_client_close_context(dev->tee_ctx);
+ dev->tee_ctx = NULL;
}
int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
@@ -512,58 +530,45 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
ret = amd_pmf_set_dram_addr(dev, true);
if (ret)
- goto err_cancel_work;
+ return ret;
dev->policy_base = devm_ioremap_resource(dev->dev, dev->res);
- if (IS_ERR(dev->policy_base)) {
- ret = PTR_ERR(dev->policy_base);
- goto err_free_dram_buf;
- }
+ if (IS_ERR(dev->policy_base))
+ return PTR_ERR(dev->policy_base);
- dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
- if (!dev->policy_buf) {
- ret = -ENOMEM;
- goto err_free_dram_buf;
- }
+ dev->policy_buf = devm_kzalloc(dev->dev, dev->policy_sz, GFP_KERNEL);
+ if (!dev->policy_buf)
+ return -ENOMEM;
memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz);
+ if (!amd_pmf_pb_valid(dev)) {
+ dev_info(dev->dev, "No Smart PC policy present\n");
+ return -EINVAL;
+ }
+
amd_pmf_hex_dump_pb(dev);
- dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL);
- if (!dev->prev_data) {
- ret = -ENOMEM;
- goto err_free_policy;
- }
+ dev->prev_data = devm_kzalloc(dev->dev, sizeof(*dev->prev_data), GFP_KERNEL);
+ if (!dev->prev_data)
+ return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(amd_pmf_ta_uuid); i++) {
ret = amd_pmf_tee_init(dev, &amd_pmf_ta_uuid[i]);
if (ret)
- goto err_free_prev_data;
+ return ret;
ret = amd_pmf_start_policy_engine(dev);
- switch (ret) {
- case TA_PMF_TYPE_SUCCESS:
- status = true;
- break;
- case TA_ERROR_CRYPTO_INVALID_PARAM:
- case TA_ERROR_CRYPTO_BIN_TOO_LARGE:
- amd_pmf_tee_deinit(dev);
- status = false;
- break;
- default:
- ret = -EINVAL;
- amd_pmf_tee_deinit(dev);
- goto err_free_prev_data;
- }
-
+ dev_dbg(dev->dev, "start policy engine ret: %d\n", ret);
+ status = ret == TA_PMF_TYPE_SUCCESS;
if (status)
break;
+ amd_pmf_tee_deinit(dev);
}
if (!status && !pb_side_load) {
ret = -EINVAL;
- goto err_free_prev_data;
+ goto err;
}
if (pb_side_load)
@@ -571,22 +576,12 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
ret = amd_pmf_register_input_device(dev);
if (ret)
- goto err_pmf_remove_pb;
+ goto err;
return 0;
-err_pmf_remove_pb:
- if (pb_side_load && dev->esbin)
- amd_pmf_remove_pb(dev);
- amd_pmf_tee_deinit(dev);
-err_free_prev_data:
- kfree(dev->prev_data);
-err_free_policy:
- kfree(dev->policy_buf);
-err_free_dram_buf:
- kfree(dev->buf);
-err_cancel_work:
- cancel_delayed_work_sync(&dev->pb_work);
+err:
+ amd_pmf_deinit_smart_pc(dev);
return ret;
}
@@ -600,11 +595,5 @@ void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev)
amd_pmf_remove_pb(dev);
cancel_delayed_work_sync(&dev->pb_work);
- kfree(dev->prev_data);
- dev->prev_data = NULL;
- kfree(dev->policy_buf);
- dev->policy_buf = NULL;
- kfree(dev->buf);
- dev->buf = NULL;
amd_pmf_tee_deinit(dev);
}
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 0c697b46f436..f7191fdded14 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -142,16 +142,20 @@ module_param(fnlock_default, bool, 0444);
#define ASUS_MINI_LED_2024_STRONG 0x01
#define ASUS_MINI_LED_2024_OFF 0x02
-/* Controls the power state of the USB0 hub on ROG Ally which input is on */
#define ASUS_USB0_PWR_EC0_CSEE "\\_SB.PCI0.SBRG.EC0.CSEE"
-/* 300ms so far seems to produce a reliable result on AC and battery */
-#define ASUS_USB0_PWR_EC0_CSEE_WAIT 1500
+/*
+ * The period required to wait after screen off/on/s2idle.check in MS.
+ * Time here greatly impacts the wake behaviour. Used in suspend/wake.
+ */
+#define ASUS_USB0_PWR_EC0_CSEE_WAIT 600
+#define ASUS_USB0_PWR_EC0_CSEE_OFF 0xB7
+#define ASUS_USB0_PWR_EC0_CSEE_ON 0xB8
static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
static int throttle_thermal_policy_write(struct asus_wmi *);
-static const struct dmi_system_id asus_ally_mcu_quirk[] = {
+static const struct dmi_system_id asus_rog_ally_device[] = {
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "RC71L"),
@@ -274,9 +278,6 @@ struct asus_wmi {
u32 tablet_switch_dev_id;
bool tablet_switch_inverted;
- /* The ROG Ally device requires the MCU USB device be disconnected before suspend */
- bool ally_mcu_usb_switch;
-
enum fan_type fan_type;
enum fan_type gpu_fan_type;
enum fan_type mid_fan_type;
@@ -336,6 +337,9 @@ struct asus_wmi {
struct asus_wmi_driver *driver;
};
+/* Global to allow setting externally without requiring driver data */
+static enum asus_ally_mcu_hack use_ally_mcu_hack = ASUS_WMI_ALLY_MCU_HACK_INIT;
+
/* WMI ************************************************************************/
static int asus_wmi_evaluate_method3(u32 method_id,
@@ -550,7 +554,7 @@ static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval)
return 0;
}
-static int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param,
+int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param,
u32 *retval)
{
return asus_wmi_evaluate_method(ASUS_WMI_METHODID_DEVS, dev_id,
@@ -1344,6 +1348,44 @@ static ssize_t nv_temp_target_show(struct device *dev,
static DEVICE_ATTR_RW(nv_temp_target);
/* Ally MCU Powersave ********************************************************/
+
+/*
+ * The HID driver needs to check MCU version and set this to false if the MCU FW
+ * version is >= the minimum requirements. New FW do not need the hacks.
+ */
+void set_ally_mcu_hack(enum asus_ally_mcu_hack status)
+{
+ use_ally_mcu_hack = status;
+ pr_debug("%s Ally MCU suspend quirk\n",
+ status == ASUS_WMI_ALLY_MCU_HACK_ENABLED ? "Enabled" : "Disabled");
+}
+EXPORT_SYMBOL_NS_GPL(set_ally_mcu_hack, "ASUS_WMI");
+
+/*
+ * mcu_powersave should be enabled always, as it is fixed in MCU FW versions:
+ * - v313 for Ally X
+ * - v319 for Ally 1
+ * The HID driver checks MCU versions and so should set this if requirements match
+ */
+void set_ally_mcu_powersave(bool enabled)
+{
+ int result, err;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_MCU_POWERSAVE, enabled, &result);
+ if (err) {
+ pr_warn("Failed to set MCU powersave: %d\n", err);
+ return;
+ }
+ if (result > 1) {
+ pr_warn("Failed to set MCU powersave (result): 0x%x\n", result);
+ return;
+ }
+
+ pr_debug("%s MCU Powersave\n",
+ enabled ? "Enabled" : "Disabled");
+}
+EXPORT_SYMBOL_NS_GPL(set_ally_mcu_powersave, "ASUS_WMI");
+
static ssize_t mcu_powersave_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -4712,6 +4754,21 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_platform;
+ if (use_ally_mcu_hack == ASUS_WMI_ALLY_MCU_HACK_INIT) {
+ if (acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE)
+ && dmi_check_system(asus_rog_ally_device))
+ use_ally_mcu_hack = ASUS_WMI_ALLY_MCU_HACK_ENABLED;
+ if (dmi_match(DMI_BOARD_NAME, "RC71")) {
+ /*
+ * These steps ensure the device is in a valid good state, this is
+ * especially important for the Ally 1 after a reboot.
+ */
+ acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE,
+ ASUS_USB0_PWR_EC0_CSEE_ON);
+ msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT);
+ }
+ }
+
/* ensure defaults for tunables */
asus->ppt_pl2_sppt = 5;
asus->ppt_pl1_spl = 5;
@@ -4725,8 +4782,6 @@ static int asus_wmi_add(struct platform_device *pdev)
asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU);
asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE);
asus->oobe_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE);
- asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE)
- && dmi_check_system(asus_ally_mcu_quirk);
if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE))
asus->mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE;
@@ -4779,7 +4834,8 @@ static int asus_wmi_add(struct platform_device *pdev)
goto fail_leds;
asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
- if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
+ if ((result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) ==
+ (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
asus->driver->wlan_ctrl_by_user = 1;
if (!(asus->driver->wlan_ctrl_by_user && ashs_present())) {
@@ -4912,34 +4968,6 @@ static int asus_hotk_resume(struct device *device)
return 0;
}
-static int asus_hotk_resume_early(struct device *device)
-{
- struct asus_wmi *asus = dev_get_drvdata(device);
-
- if (asus->ally_mcu_usb_switch) {
- /* sleep required to prevent USB0 being yanked then reappearing rapidly */
- if (ACPI_FAILURE(acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, 0xB8)))
- dev_err(device, "ROG Ally MCU failed to connect USB dev\n");
- else
- msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT);
- }
- return 0;
-}
-
-static int asus_hotk_prepare(struct device *device)
-{
- struct asus_wmi *asus = dev_get_drvdata(device);
-
- if (asus->ally_mcu_usb_switch) {
- /* sleep required to ensure USB0 is disabled before sleep continues */
- if (ACPI_FAILURE(acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, 0xB7)))
- dev_err(device, "ROG Ally MCU failed to disconnect USB dev\n");
- else
- msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT);
- }
- return 0;
-}
-
static int asus_hotk_restore(struct device *device)
{
struct asus_wmi *asus = dev_get_drvdata(device);
@@ -4987,11 +5015,50 @@ static int asus_hotk_restore(struct device *device)
return 0;
}
+static int asus_hotk_prepare(struct device *device)
+{
+ if (use_ally_mcu_hack == ASUS_WMI_ALLY_MCU_HACK_ENABLED) {
+ acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE,
+ ASUS_USB0_PWR_EC0_CSEE_OFF);
+ msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT);
+ }
+ return 0;
+}
+
+#if defined(CONFIG_SUSPEND)
+static void asus_ally_s2idle_restore(void)
+{
+ if (use_ally_mcu_hack == ASUS_WMI_ALLY_MCU_HACK_ENABLED) {
+ acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE,
+ ASUS_USB0_PWR_EC0_CSEE_ON);
+ msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT);
+ }
+}
+
+/* Use only for Ally devices due to the wake_on_ac */
+static struct acpi_s2idle_dev_ops asus_ally_s2idle_dev_ops = {
+ .restore = asus_ally_s2idle_restore,
+};
+
+static void asus_s2idle_check_register(void)
+{
+ if (acpi_register_lps0_dev(&asus_ally_s2idle_dev_ops))
+ pr_warn("failed to register LPS0 sleep handler in asus-wmi\n");
+}
+
+static void asus_s2idle_check_unregister(void)
+{
+ acpi_unregister_lps0_dev(&asus_ally_s2idle_dev_ops);
+}
+#else
+static void asus_s2idle_check_register(void) {}
+static void asus_s2idle_check_unregister(void) {}
+#endif /* CONFIG_SUSPEND */
+
static const struct dev_pm_ops asus_pm_ops = {
.thaw = asus_hotk_thaw,
.restore = asus_hotk_restore,
.resume = asus_hotk_resume,
- .resume_early = asus_hotk_resume_early,
.prepare = asus_hotk_prepare,
};
@@ -5019,6 +5086,8 @@ static int asus_wmi_probe(struct platform_device *pdev)
return ret;
}
+ asus_s2idle_check_register();
+
return asus_wmi_add(pdev);
}
@@ -5051,6 +5120,8 @@ EXPORT_SYMBOL_GPL(asus_wmi_register_driver);
void asus_wmi_unregister_driver(struct asus_wmi_driver *driver)
{
+ asus_s2idle_check_unregister();
+
platform_device_unregister(driver->platform_device);
platform_driver_unregister(&driver->platform_driver);
used = false;
diff --git a/drivers/platform/x86/barco-p50-gpio.c b/drivers/platform/x86/barco-p50-gpio.c
index 143d14548565..bb3393bbfb89 100644
--- a/drivers/platform/x86/barco-p50-gpio.c
+++ b/drivers/platform/x86/barco-p50-gpio.c
@@ -268,15 +268,19 @@ static int p50_gpio_get(struct gpio_chip *gc, unsigned int offset)
return ret;
}
-static void p50_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+static int p50_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct p50_gpio *p50 = gpiochip_get_data(gc);
+ int ret;
mutex_lock(&p50->lock);
- p50_send_mbox_cmd(p50, P50_MBOX_CMD_WRITE_GPIO, gpio_params[offset], value);
+ ret = p50_send_mbox_cmd(p50, P50_MBOX_CMD_WRITE_GPIO,
+ gpio_params[offset], value);
mutex_unlock(&p50->lock);
+
+ return ret;
}
static int p50_gpio_probe(struct platform_device *pdev)
@@ -312,7 +316,7 @@ static int p50_gpio_probe(struct platform_device *pdev)
p50->gc.base = -1;
p50->gc.get_direction = p50_gpio_get_direction;
p50->gc.get = p50_gpio_get;
- p50->gc.set = p50_gpio_set;
+ p50->gc.set_rv = p50_gpio_set;
/* reset mbox */
diff --git a/drivers/platform/x86/dasharo-acpi.c b/drivers/platform/x86/dasharo-acpi.c
new file mode 100644
index 000000000000..f0c5136af29d
--- /dev/null
+++ b/drivers/platform/x86/dasharo-acpi.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Dasharo ACPI Driver
+ */
+
+#include <linux/acpi.h>
+#include <linux/array_size.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+enum dasharo_feature {
+ DASHARO_FEATURE_TEMPERATURE = 0,
+ DASHARO_FEATURE_FAN_PWM,
+ DASHARO_FEATURE_FAN_TACH,
+ DASHARO_FEATURE_MAX
+};
+
+enum dasharo_temperature {
+ DASHARO_TEMPERATURE_CPU_PACKAGE = 0,
+ DASHARO_TEMPERATURE_CPU_CORE,
+ DASHARO_TEMPERATURE_GPU,
+ DASHARO_TEMPERATURE_BOARD,
+ DASHARO_TEMPERATURE_CHASSIS,
+ DASHARO_TEMPERATURE_MAX
+};
+
+enum dasharo_fan {
+ DASHARO_FAN_CPU = 0,
+ DASHARO_FAN_GPU,
+ DASHARO_FAN_CHASSIS,
+ DASHARO_FAN_MAX
+};
+
+#define MAX_GROUPS_PER_FEAT 8
+
+static const char * const dasharo_group_names[DASHARO_FEATURE_MAX][MAX_GROUPS_PER_FEAT] = {
+ [DASHARO_FEATURE_TEMPERATURE] = {
+ [DASHARO_TEMPERATURE_CPU_PACKAGE] = "CPU Package",
+ [DASHARO_TEMPERATURE_CPU_CORE] = "CPU Core",
+ [DASHARO_TEMPERATURE_GPU] = "GPU",
+ [DASHARO_TEMPERATURE_BOARD] = "Board",
+ [DASHARO_TEMPERATURE_CHASSIS] = "Chassis",
+ },
+ [DASHARO_FEATURE_FAN_PWM] = {
+ [DASHARO_FAN_CPU] = "CPU",
+ [DASHARO_FAN_GPU] = "GPU",
+ [DASHARO_FAN_CHASSIS] = "Chassis",
+ },
+ [DASHARO_FEATURE_FAN_TACH] = {
+ [DASHARO_FAN_CPU] = "CPU",
+ [DASHARO_FAN_GPU] = "GPU",
+ [DASHARO_FAN_CHASSIS] = "Chassis",
+ },
+};
+
+struct dasharo_capability {
+ unsigned int group;
+ unsigned int index;
+ char name[16];
+};
+
+#define MAX_CAPS_PER_FEAT 24
+
+struct dasharo_data {
+ struct platform_device *pdev;
+ int caps_found[DASHARO_FEATURE_MAX];
+ struct dasharo_capability capabilities[DASHARO_FEATURE_MAX][MAX_CAPS_PER_FEAT];
+};
+
+static int dasharo_get_feature_cap_count(struct dasharo_data *data, enum dasharo_feature feat, int cap)
+{
+ struct acpi_object_list obj_list;
+ union acpi_object obj[2];
+ acpi_handle handle;
+ acpi_status status;
+ u64 count;
+
+ obj[0].type = ACPI_TYPE_INTEGER;
+ obj[0].integer.value = feat;
+ obj[1].type = ACPI_TYPE_INTEGER;
+ obj[1].integer.value = cap;
+ obj_list.count = 2;
+ obj_list.pointer = &obj[0];
+
+ handle = ACPI_HANDLE(&data->pdev->dev);
+ status = acpi_evaluate_integer(handle, "GFCP", &obj_list, &count);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return count;
+}
+
+static int dasharo_read_channel(struct dasharo_data *data, char *method, enum dasharo_feature feat, int channel, long *value)
+{
+ struct acpi_object_list obj_list;
+ union acpi_object obj[2];
+ acpi_handle handle;
+ acpi_status status;
+ u64 val;
+
+ if (feat >= ARRAY_SIZE(data->capabilities))
+ return -EINVAL;
+
+ if (channel >= data->caps_found[feat])
+ return -EINVAL;
+
+ obj[0].type = ACPI_TYPE_INTEGER;
+ obj[0].integer.value = data->capabilities[feat][channel].group;
+ obj[1].type = ACPI_TYPE_INTEGER;
+ obj[1].integer.value = data->capabilities[feat][channel].index;
+ obj_list.count = 2;
+ obj_list.pointer = &obj[0];
+
+ handle = ACPI_HANDLE(&data->pdev->dev);
+ status = acpi_evaluate_integer(handle, method, &obj_list, &val);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ *value = val;
+ return 0;
+}
+
+static int dasharo_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct dasharo_data *data = dev_get_drvdata(dev);
+ long value;
+ int ret;
+
+ switch (type) {
+ case hwmon_temp:
+ ret = dasharo_read_channel(data, "GTMP", DASHARO_FEATURE_TEMPERATURE, channel, &value);
+ if (!ret)
+ *val = value * MILLIDEGREE_PER_DEGREE;
+ break;
+ case hwmon_fan:
+ ret = dasharo_read_channel(data, "GFTH", DASHARO_FEATURE_FAN_TACH, channel, &value);
+ if (!ret)
+ *val = value;
+ break;
+ case hwmon_pwm:
+ ret = dasharo_read_channel(data, "GFDC", DASHARO_FEATURE_FAN_PWM, channel, &value);
+ if (!ret)
+ *val = value;
+ break;
+ default:
+ return -ENODEV;
+ break;
+ }
+
+ return ret;
+}
+
+static int dasharo_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct dasharo_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ if (channel >= data->caps_found[DASHARO_FEATURE_TEMPERATURE])
+ return -EINVAL;
+
+ *str = data->capabilities[DASHARO_FEATURE_TEMPERATURE][channel].name;
+ break;
+ case hwmon_fan:
+ if (channel >= data->caps_found[DASHARO_FEATURE_FAN_TACH])
+ return -EINVAL;
+
+ *str = data->capabilities[DASHARO_FEATURE_FAN_TACH][channel].name;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static umode_t dasharo_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct dasharo_data *data = drvdata;
+
+ switch (type) {
+ case hwmon_temp:
+ if (channel < data->caps_found[DASHARO_FEATURE_TEMPERATURE])
+ return 0444;
+ break;
+ case hwmon_pwm:
+ if (channel < data->caps_found[DASHARO_FEATURE_FAN_PWM])
+ return 0444;
+ break;
+ case hwmon_fan:
+ if (channel < data->caps_found[DASHARO_FEATURE_FAN_TACH])
+ return 0444;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops dasharo_hwmon_ops = {
+ .is_visible = dasharo_hwmon_is_visible,
+ .read_string = dasharo_hwmon_read_string,
+ .read = dasharo_hwmon_read,
+};
+
+// Max 24 capabilities per feature
+static const struct hwmon_channel_info * const dasharo_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info dasharo_hwmon_chip_info = {
+ .ops = &dasharo_hwmon_ops,
+ .info = dasharo_hwmon_info,
+};
+
+static void dasharo_fill_feature_caps(struct dasharo_data *data, enum dasharo_feature feat)
+{
+ struct dasharo_capability *cap;
+ int cap_count = 0;
+ int count;
+
+ for (int group = 0; group < MAX_GROUPS_PER_FEAT; ++group) {
+ count = dasharo_get_feature_cap_count(data, feat, group);
+ if (count <= 0)
+ continue;
+
+ for (unsigned int i = 0; i < count; ++i) {
+ if (cap_count >= ARRAY_SIZE(data->capabilities[feat]))
+ break;
+
+ cap = &data->capabilities[feat][cap_count];
+ cap->group = group;
+ cap->index = i;
+ scnprintf(cap->name, sizeof(cap->name), "%s %d",
+ dasharo_group_names[feat][group], i);
+ cap_count++;
+ }
+ }
+ data->caps_found[feat] = cap_count;
+}
+
+static int dasharo_probe(struct platform_device *pdev)
+{
+ struct dasharo_data *data;
+ struct device *hwmon;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->pdev = pdev;
+
+ for (unsigned int i = 0; i < DASHARO_FEATURE_MAX; ++i)
+ dasharo_fill_feature_caps(data, i);
+
+ hwmon = devm_hwmon_device_register_with_info(&pdev->dev, "dasharo_acpi", data,
+ &dasharo_hwmon_chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon);
+}
+
+static const struct acpi_device_id dasharo_device_ids[] = {
+ {"DSHR0001", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, dasharo_device_ids);
+
+static struct platform_driver dasharo_driver = {
+ .driver = {
+ .name = "dasharo-acpi",
+ .acpi_match_table = dasharo_device_ids,
+ },
+ .probe = dasharo_probe,
+};
+module_platform_driver(dasharo_driver);
+
+MODULE_DESCRIPTION("Dasharo ACPI Driver");
+MODULE_AUTHOR("Michał Kopeć <michal.kopec@3mdeb.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index f8a0dffcaab7..738c108c2163 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -22,6 +22,7 @@ config ALIENWARE_WMI
depends on DMI
depends on LEDS_CLASS
depends on NEW_LEDS
+ depends on HWMON
help
This is a driver for controlling Alienware WMI driven features.
@@ -171,7 +172,7 @@ config DELL_SMBIOS_SMM
config DELL_SMO8800
tristate "Dell Latitude freefall driver (ACPI SMO88XX)"
- default m
+ default m if ACPI
depends on I2C
depends on ACPI || COMPILE_TEST
help
diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c
index 08b82c151e07..20ec122a9fe0 100644
--- a/drivers/platform/x86/dell/alienware-wmi-wmax.c
+++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c
@@ -8,11 +8,21 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/array_size.h>
#include <linux/bitfield.h>
+#include <linux/bitmap.h>
#include <linux/bits.h>
+#include <linux/debugfs.h>
#include <linux/dmi.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/kstrtox.h>
+#include <linux/minmax.h>
#include <linux/moduleparam.h>
#include <linux/platform_profile.h>
+#include <linux/pm.h>
+#include <linux/seq_file.h>
+#include <linux/units.h>
#include <linux/wmi.h>
#include "alienware-wmi.h"
@@ -24,16 +34,31 @@
#define WMAX_METHOD_DEEP_SLEEP_STATUS 0x0C
#define WMAX_METHOD_BRIGHTNESS 0x3
#define WMAX_METHOD_ZONE_CONTROL 0x4
-#define WMAX_METHOD_THERMAL_INFORMATION 0x14
-#define WMAX_METHOD_THERMAL_CONTROL 0x15
-#define WMAX_METHOD_GAME_SHIFT_STATUS 0x25
-#define WMAX_THERMAL_MODE_GMODE 0xAB
+#define AWCC_METHOD_GET_FAN_SENSORS 0x13
+#define AWCC_METHOD_THERMAL_INFORMATION 0x14
+#define AWCC_METHOD_THERMAL_CONTROL 0x15
+#define AWCC_METHOD_FWUP_GPIO_CONTROL 0x20
+#define AWCC_METHOD_READ_TOTAL_GPIOS 0x21
+#define AWCC_METHOD_READ_GPIO_STATUS 0x22
+#define AWCC_METHOD_GAME_SHIFT_STATUS 0x25
-#define WMAX_FAILURE_CODE 0xFFFFFFFF
-#define WMAX_THERMAL_TABLE_MASK GENMASK(7, 4)
-#define WMAX_THERMAL_MODE_MASK GENMASK(3, 0)
-#define WMAX_SENSOR_ID_MASK BIT(8)
+#define AWCC_FAILURE_CODE 0xFFFFFFFF
+#define AWCC_FAILURE_CODE_2 0xFFFFFFFE
+
+#define AWCC_SENSOR_ID_FLAG BIT(8)
+#define AWCC_THERMAL_MODE_MASK GENMASK(3, 0)
+#define AWCC_THERMAL_TABLE_MASK GENMASK(7, 4)
+#define AWCC_RESOURCE_ID_MASK GENMASK(7, 0)
+
+/* Arbitrary limit based on supported models */
+#define AWCC_MAX_RES_COUNT 16
+#define AWCC_ID_BITMAP_SIZE (U8_MAX + 1)
+#define AWCC_ID_BITMAP_LONGS BITS_TO_LONGS(AWCC_ID_BITMAP_SIZE)
+
+static bool force_hwmon;
+module_param_unsafe(force_hwmon, bool, 0);
+MODULE_PARM_DESC(force_hwmon, "Force probing for HWMON support without checking if the WMI backend is available");
static bool force_platform_profile;
module_param_unsafe(force_platform_profile, bool, 0);
@@ -44,16 +69,19 @@ module_param_unsafe(force_gmode, bool, 0);
MODULE_PARM_DESC(force_gmode, "Forces G-Mode when performance profile is selected");
struct awcc_quirks {
+ bool hwmon;
bool pprof;
bool gmode;
};
static struct awcc_quirks g_series_quirks = {
+ .hwmon = true,
.pprof = true,
.gmode = true,
};
static struct awcc_quirks generic_quirks = {
+ .hwmon = true,
.pprof = true,
.gmode = false,
};
@@ -91,7 +119,7 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m16 R1 AMD"),
},
- .driver_data = &g_series_quirks,
+ .driver_data = &generic_quirks,
},
{
.ident = "Alienware m16 R2",
@@ -207,38 +235,59 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
},
};
-enum WMAX_THERMAL_INFORMATION_OPERATIONS {
- WMAX_OPERATION_SYS_DESCRIPTION = 0x02,
- WMAX_OPERATION_LIST_IDS = 0x03,
- WMAX_OPERATION_CURRENT_PROFILE = 0x0B,
+enum AWCC_GET_FAN_SENSORS_OPERATIONS {
+ AWCC_OP_GET_TOTAL_FAN_TEMPS = 0x01,
+ AWCC_OP_GET_FAN_TEMP_ID = 0x02,
+};
+
+enum AWCC_THERMAL_INFORMATION_OPERATIONS {
+ AWCC_OP_GET_SYSTEM_DESCRIPTION = 0x02,
+ AWCC_OP_GET_RESOURCE_ID = 0x03,
+ AWCC_OP_GET_TEMPERATURE = 0x04,
+ AWCC_OP_GET_FAN_RPM = 0x05,
+ AWCC_OP_GET_FAN_MIN_RPM = 0x08,
+ AWCC_OP_GET_FAN_MAX_RPM = 0x09,
+ AWCC_OP_GET_CURRENT_PROFILE = 0x0B,
+ AWCC_OP_GET_FAN_BOOST = 0x0C,
};
-enum WMAX_THERMAL_CONTROL_OPERATIONS {
- WMAX_OPERATION_ACTIVATE_PROFILE = 0x01,
+enum AWCC_THERMAL_CONTROL_OPERATIONS {
+ AWCC_OP_ACTIVATE_PROFILE = 0x01,
+ AWCC_OP_SET_FAN_BOOST = 0x02,
};
-enum WMAX_GAME_SHIFT_STATUS_OPERATIONS {
- WMAX_OPERATION_TOGGLE_GAME_SHIFT = 0x01,
- WMAX_OPERATION_GET_GAME_SHIFT_STATUS = 0x02,
+enum AWCC_GAME_SHIFT_STATUS_OPERATIONS {
+ AWCC_OP_TOGGLE_GAME_SHIFT = 0x01,
+ AWCC_OP_GET_GAME_SHIFT_STATUS = 0x02,
};
-enum WMAX_THERMAL_TABLES {
- WMAX_THERMAL_TABLE_BASIC = 0x90,
- WMAX_THERMAL_TABLE_USTT = 0xA0,
+enum AWCC_THERMAL_TABLES {
+ AWCC_THERMAL_TABLE_LEGACY = 0x9,
+ AWCC_THERMAL_TABLE_USTT = 0xA,
};
-enum wmax_thermal_mode {
- THERMAL_MODE_USTT_BALANCED,
- THERMAL_MODE_USTT_BALANCED_PERFORMANCE,
- THERMAL_MODE_USTT_COOL,
- THERMAL_MODE_USTT_QUIET,
- THERMAL_MODE_USTT_PERFORMANCE,
- THERMAL_MODE_USTT_LOW_POWER,
- THERMAL_MODE_BASIC_QUIET,
- THERMAL_MODE_BASIC_BALANCED,
- THERMAL_MODE_BASIC_BALANCED_PERFORMANCE,
- THERMAL_MODE_BASIC_PERFORMANCE,
- THERMAL_MODE_LAST,
+enum AWCC_SPECIAL_THERMAL_CODES {
+ AWCC_SPECIAL_PROFILE_CUSTOM = 0x00,
+ AWCC_SPECIAL_PROFILE_GMODE = 0xAB,
+};
+
+enum AWCC_TEMP_SENSOR_TYPES {
+ AWCC_TEMP_SENSOR_CPU = 0x01,
+ AWCC_TEMP_SENSOR_GPU = 0x06,
+};
+
+enum awcc_thermal_profile {
+ AWCC_PROFILE_USTT_BALANCED,
+ AWCC_PROFILE_USTT_BALANCED_PERFORMANCE,
+ AWCC_PROFILE_USTT_COOL,
+ AWCC_PROFILE_USTT_QUIET,
+ AWCC_PROFILE_USTT_PERFORMANCE,
+ AWCC_PROFILE_USTT_LOW_POWER,
+ AWCC_PROFILE_LEGACY_QUIET,
+ AWCC_PROFILE_LEGACY_BALANCED,
+ AWCC_PROFILE_LEGACY_BALANCED_PERFORMANCE,
+ AWCC_PROFILE_LEGACY_PERFORMANCE,
+ AWCC_PROFILE_LAST,
};
struct wmax_led_args {
@@ -263,23 +312,49 @@ struct wmax_u32_args {
u8 arg3;
};
+struct awcc_fan_data {
+ unsigned long auto_channels_temp;
+ const char *label;
+ u32 min_rpm;
+ u32 max_rpm;
+ u8 suspend_cache;
+ u8 id;
+};
+
struct awcc_priv {
struct wmi_device *wdev;
+ union {
+ u32 system_description;
+ struct {
+ u8 fan_count;
+ u8 temp_count;
+ u8 unknown_count;
+ u8 profile_count;
+ };
+ u8 res_count[4];
+ };
+
struct device *ppdev;
- enum wmax_thermal_mode supported_thermal_profiles[PLATFORM_PROFILE_LAST];
+ u8 supported_profiles[PLATFORM_PROFILE_LAST];
+
+ struct device *hwdev;
+ struct awcc_fan_data **fan_data;
+ unsigned long temp_sensors[AWCC_ID_BITMAP_LONGS];
+
+ u32 gpio_count;
};
-static const enum platform_profile_option wmax_mode_to_platform_profile[THERMAL_MODE_LAST] = {
- [THERMAL_MODE_USTT_BALANCED] = PLATFORM_PROFILE_BALANCED,
- [THERMAL_MODE_USTT_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE,
- [THERMAL_MODE_USTT_COOL] = PLATFORM_PROFILE_COOL,
- [THERMAL_MODE_USTT_QUIET] = PLATFORM_PROFILE_QUIET,
- [THERMAL_MODE_USTT_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE,
- [THERMAL_MODE_USTT_LOW_POWER] = PLATFORM_PROFILE_LOW_POWER,
- [THERMAL_MODE_BASIC_QUIET] = PLATFORM_PROFILE_QUIET,
- [THERMAL_MODE_BASIC_BALANCED] = PLATFORM_PROFILE_BALANCED,
- [THERMAL_MODE_BASIC_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE,
- [THERMAL_MODE_BASIC_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE,
+static const enum platform_profile_option awcc_mode_to_platform_profile[AWCC_PROFILE_LAST] = {
+ [AWCC_PROFILE_USTT_BALANCED] = PLATFORM_PROFILE_BALANCED,
+ [AWCC_PROFILE_USTT_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE,
+ [AWCC_PROFILE_USTT_COOL] = PLATFORM_PROFILE_COOL,
+ [AWCC_PROFILE_USTT_QUIET] = PLATFORM_PROFILE_QUIET,
+ [AWCC_PROFILE_USTT_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE,
+ [AWCC_PROFILE_USTT_LOW_POWER] = PLATFORM_PROFILE_LOW_POWER,
+ [AWCC_PROFILE_LEGACY_QUIET] = PLATFORM_PROFILE_QUIET,
+ [AWCC_PROFILE_LEGACY_BALANCED] = PLATFORM_PROFILE_BALANCED,
+ [AWCC_PROFILE_LEGACY_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE,
+ [AWCC_PROFILE_LEGACY_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE,
};
static struct awcc_quirks *awcc;
@@ -497,123 +572,686 @@ const struct attribute_group wmax_deepsleep_attribute_group = {
};
/*
- * Thermal Profile control
- * - Provides thermal profile control through the Platform Profile API
+ * AWCC Helpers
*/
-static bool is_wmax_thermal_code(u32 code)
+static bool is_awcc_thermal_profile_id(u8 code)
{
- if (code & WMAX_SENSOR_ID_MASK)
- return false;
+ u8 table = FIELD_GET(AWCC_THERMAL_TABLE_MASK, code);
+ u8 mode = FIELD_GET(AWCC_THERMAL_MODE_MASK, code);
- if ((code & WMAX_THERMAL_MODE_MASK) >= THERMAL_MODE_LAST)
+ if (mode >= AWCC_PROFILE_LAST)
return false;
- if ((code & WMAX_THERMAL_TABLE_MASK) == WMAX_THERMAL_TABLE_BASIC &&
- (code & WMAX_THERMAL_MODE_MASK) >= THERMAL_MODE_BASIC_QUIET)
+ if (table == AWCC_THERMAL_TABLE_LEGACY && mode >= AWCC_PROFILE_LEGACY_QUIET)
return true;
- if ((code & WMAX_THERMAL_TABLE_MASK) == WMAX_THERMAL_TABLE_USTT &&
- (code & WMAX_THERMAL_MODE_MASK) <= THERMAL_MODE_USTT_LOW_POWER)
+ if (table == AWCC_THERMAL_TABLE_USTT && mode <= AWCC_PROFILE_USTT_LOW_POWER)
return true;
return false;
}
-static int wmax_thermal_information(struct wmi_device *wdev, u8 operation,
- u8 arg, u32 *out_data)
+static int awcc_wmi_command(struct wmi_device *wdev, u32 method_id,
+ struct wmax_u32_args *args, u32 *out)
{
- struct wmax_u32_args in_args = {
+ int ret;
+
+ ret = alienware_wmi_command(wdev, method_id, args, sizeof(*args), out);
+ if (ret)
+ return ret;
+
+ if (*out == AWCC_FAILURE_CODE || *out == AWCC_FAILURE_CODE_2)
+ return -EBADRQC;
+
+ return 0;
+}
+
+static int awcc_get_fan_sensors(struct wmi_device *wdev, u8 operation,
+ u8 fan_id, u8 index, u32 *out)
+{
+ struct wmax_u32_args args = {
+ .operation = operation,
+ .arg1 = fan_id,
+ .arg2 = index,
+ .arg3 = 0,
+ };
+
+ return awcc_wmi_command(wdev, AWCC_METHOD_GET_FAN_SENSORS, &args, out);
+}
+
+static int awcc_thermal_information(struct wmi_device *wdev, u8 operation, u8 arg,
+ u32 *out)
+{
+ struct wmax_u32_args args = {
.operation = operation,
.arg1 = arg,
.arg2 = 0,
.arg3 = 0,
};
- int ret;
- ret = alienware_wmi_command(wdev, WMAX_METHOD_THERMAL_INFORMATION,
- &in_args, sizeof(in_args), out_data);
- if (ret < 0)
- return ret;
+ return awcc_wmi_command(wdev, AWCC_METHOD_THERMAL_INFORMATION, &args, out);
+}
- if (*out_data == WMAX_FAILURE_CODE)
- return -EBADRQC;
+static int awcc_fwup_gpio_control(struct wmi_device *wdev, u8 pin, u8 status)
+{
+ struct wmax_u32_args args = {
+ .operation = pin,
+ .arg1 = status,
+ .arg2 = 0,
+ .arg3 = 0,
+ };
+ u32 out;
- return 0;
+ return awcc_wmi_command(wdev, AWCC_METHOD_FWUP_GPIO_CONTROL, &args, &out);
}
-static int wmax_thermal_control(struct wmi_device *wdev, u8 profile)
+static int awcc_read_total_gpios(struct wmi_device *wdev, u32 *count)
{
- struct wmax_u32_args in_args = {
- .operation = WMAX_OPERATION_ACTIVATE_PROFILE,
- .arg1 = profile,
+ struct wmax_u32_args args = {};
+
+ return awcc_wmi_command(wdev, AWCC_METHOD_READ_TOTAL_GPIOS, &args, count);
+}
+
+static int awcc_read_gpio_status(struct wmi_device *wdev, u8 pin, u32 *status)
+{
+ struct wmax_u32_args args = {
+ .operation = pin,
+ .arg1 = 0,
+ .arg2 = 0,
+ .arg3 = 0,
+ };
+
+ return awcc_wmi_command(wdev, AWCC_METHOD_READ_GPIO_STATUS, &args, status);
+}
+
+static int awcc_game_shift_status(struct wmi_device *wdev, u8 operation,
+ u32 *out)
+{
+ struct wmax_u32_args args = {
+ .operation = operation,
+ .arg1 = 0,
+ .arg2 = 0,
+ .arg3 = 0,
+ };
+
+ return awcc_wmi_command(wdev, AWCC_METHOD_GAME_SHIFT_STATUS, &args, out);
+}
+
+/**
+ * awcc_op_get_resource_id - Get the resource ID at a given index
+ * @wdev: AWCC WMI device
+ * @index: Index
+ * @out: Value returned by the WMI call
+ *
+ * Get the resource ID at a given @index. Resource IDs are listed in the
+ * following order:
+ *
+ * - Fan IDs
+ * - Sensor IDs
+ * - Unknown IDs
+ * - Thermal Profile IDs
+ *
+ * The total number of IDs of a given type can be obtained with
+ * AWCC_OP_GET_SYSTEM_DESCRIPTION.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int awcc_op_get_resource_id(struct wmi_device *wdev, u8 index, u8 *out)
+{
+ struct wmax_u32_args args = {
+ .operation = AWCC_OP_GET_RESOURCE_ID,
+ .arg1 = index,
.arg2 = 0,
.arg3 = 0,
};
u32 out_data;
int ret;
- ret = alienware_wmi_command(wdev, WMAX_METHOD_THERMAL_CONTROL,
- &in_args, sizeof(in_args), &out_data);
+ ret = awcc_wmi_command(wdev, AWCC_METHOD_THERMAL_INFORMATION, &args, &out_data);
if (ret)
return ret;
- if (out_data == WMAX_FAILURE_CODE)
- return -EBADRQC;
+ *out = FIELD_GET(AWCC_RESOURCE_ID_MASK, out_data);
return 0;
}
-static int wmax_game_shift_status(struct wmi_device *wdev, u8 operation,
- u32 *out_data)
+static int awcc_op_get_fan_rpm(struct wmi_device *wdev, u8 fan_id, u32 *out)
{
- struct wmax_u32_args in_args = {
- .operation = operation,
+ struct wmax_u32_args args = {
+ .operation = AWCC_OP_GET_FAN_RPM,
+ .arg1 = fan_id,
+ .arg2 = 0,
+ .arg3 = 0,
+ };
+
+ return awcc_wmi_command(wdev, AWCC_METHOD_THERMAL_INFORMATION, &args, out);
+}
+
+static int awcc_op_get_temperature(struct wmi_device *wdev, u8 temp_id, u32 *out)
+{
+ struct wmax_u32_args args = {
+ .operation = AWCC_OP_GET_TEMPERATURE,
+ .arg1 = temp_id,
+ .arg2 = 0,
+ .arg3 = 0,
+ };
+
+ return awcc_wmi_command(wdev, AWCC_METHOD_THERMAL_INFORMATION, &args, out);
+}
+
+static int awcc_op_get_fan_boost(struct wmi_device *wdev, u8 fan_id, u32 *out)
+{
+ struct wmax_u32_args args = {
+ .operation = AWCC_OP_GET_FAN_BOOST,
+ .arg1 = fan_id,
+ .arg2 = 0,
+ .arg3 = 0,
+ };
+
+ return awcc_wmi_command(wdev, AWCC_METHOD_THERMAL_INFORMATION, &args, out);
+}
+
+static int awcc_op_get_current_profile(struct wmi_device *wdev, u32 *out)
+{
+ struct wmax_u32_args args = {
+ .operation = AWCC_OP_GET_CURRENT_PROFILE,
.arg1 = 0,
.arg2 = 0,
.arg3 = 0,
};
+
+ return awcc_wmi_command(wdev, AWCC_METHOD_THERMAL_INFORMATION, &args, out);
+}
+
+static int awcc_op_activate_profile(struct wmi_device *wdev, u8 profile)
+{
+ struct wmax_u32_args args = {
+ .operation = AWCC_OP_ACTIVATE_PROFILE,
+ .arg1 = profile,
+ .arg2 = 0,
+ .arg3 = 0,
+ };
+ u32 out;
+
+ return awcc_wmi_command(wdev, AWCC_METHOD_THERMAL_CONTROL, &args, &out);
+}
+
+static int awcc_op_set_fan_boost(struct wmi_device *wdev, u8 fan_id, u8 boost)
+{
+ struct wmax_u32_args args = {
+ .operation = AWCC_OP_SET_FAN_BOOST,
+ .arg1 = fan_id,
+ .arg2 = boost,
+ .arg3 = 0,
+ };
+ u32 out;
+
+ return awcc_wmi_command(wdev, AWCC_METHOD_THERMAL_CONTROL, &args, &out);
+}
+
+/*
+ * HWMON
+ * - Provides temperature and fan speed monitoring as well as manual fan
+ * control
+ */
+static umode_t awcc_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct awcc_priv *priv = drvdata;
+ unsigned int temp_count;
+
+ switch (type) {
+ case hwmon_temp:
+ temp_count = bitmap_weight(priv->temp_sensors, AWCC_ID_BITMAP_SIZE);
+
+ return channel < temp_count ? 0444 : 0;
+ case hwmon_fan:
+ return channel < priv->fan_count ? 0444 : 0;
+ case hwmon_pwm:
+ return channel < priv->fan_count ? 0444 : 0;
+ default:
+ return 0;
+ }
+}
+
+static int awcc_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct awcc_priv *priv = dev_get_drvdata(dev);
+ const struct awcc_fan_data *fan;
+ u32 state;
int ret;
+ u8 temp;
- ret = alienware_wmi_command(wdev, WMAX_METHOD_GAME_SHIFT_STATUS,
- &in_args, sizeof(in_args), out_data);
- if (ret < 0)
- return ret;
+ switch (type) {
+ case hwmon_temp:
+ temp = find_nth_bit(priv->temp_sensors, AWCC_ID_BITMAP_SIZE, channel);
- if (*out_data == WMAX_FAILURE_CODE)
+ switch (attr) {
+ case hwmon_temp_input:
+ ret = awcc_op_get_temperature(priv->wdev, temp, &state);
+ if (ret)
+ return ret;
+
+ *val = state * MILLIDEGREE_PER_DEGREE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ break;
+ case hwmon_fan:
+ fan = priv->fan_data[channel];
+
+ switch (attr) {
+ case hwmon_fan_input:
+ ret = awcc_op_get_fan_rpm(priv->wdev, fan->id, &state);
+ if (ret)
+ return ret;
+
+ *val = state;
+ break;
+ case hwmon_fan_min:
+ *val = fan->min_rpm;
+ break;
+ case hwmon_fan_max:
+ *val = fan->max_rpm;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ break;
+ case hwmon_pwm:
+ fan = priv->fan_data[channel];
+
+ switch (attr) {
+ case hwmon_pwm_auto_channels_temp:
+ *val = fan->auto_channels_temp;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ break;
+ default:
return -EOPNOTSUPP;
+ }
return 0;
}
-static int thermal_profile_get(struct device *dev,
- enum platform_profile_option *profile)
+static int awcc_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
{
struct awcc_priv *priv = dev_get_drvdata(dev);
- u32 out_data;
+ u8 temp;
+
+ switch (type) {
+ case hwmon_temp:
+ temp = find_nth_bit(priv->temp_sensors, AWCC_ID_BITMAP_SIZE, channel);
+
+ switch (temp) {
+ case AWCC_TEMP_SENSOR_CPU:
+ *str = "CPU";
+ break;
+ case AWCC_TEMP_SENSOR_GPU:
+ *str = "GPU";
+ break;
+ default:
+ *str = "Unknown";
+ break;
+ }
+
+ break;
+ case hwmon_fan:
+ *str = priv->fan_data[channel]->label;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops awcc_hwmon_ops = {
+ .is_visible = awcc_hwmon_is_visible,
+ .read = awcc_hwmon_read,
+ .read_string = awcc_hwmon_read_string,
+};
+
+static const struct hwmon_channel_info * const awcc_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_LABEL | HWMON_T_INPUT,
+ HWMON_T_LABEL | HWMON_T_INPUT,
+ HWMON_T_LABEL | HWMON_T_INPUT,
+ HWMON_T_LABEL | HWMON_T_INPUT,
+ HWMON_T_LABEL | HWMON_T_INPUT,
+ HWMON_T_LABEL | HWMON_T_INPUT
+ ),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_LABEL | HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_MAX,
+ HWMON_F_LABEL | HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_MAX,
+ HWMON_F_LABEL | HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_MAX,
+ HWMON_F_LABEL | HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_MAX,
+ HWMON_F_LABEL | HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_MAX,
+ HWMON_F_LABEL | HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_MAX
+ ),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP
+ ),
+ NULL
+};
+
+static const struct hwmon_chip_info awcc_hwmon_chip_info = {
+ .ops = &awcc_hwmon_ops,
+ .info = awcc_hwmon_info,
+};
+
+static ssize_t fan_boost_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct awcc_priv *priv = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+ struct awcc_fan_data *fan = priv->fan_data[index];
+ u32 boost;
int ret;
- ret = wmax_thermal_information(priv->wdev, WMAX_OPERATION_CURRENT_PROFILE,
- 0, &out_data);
+ ret = awcc_op_get_fan_boost(priv->wdev, fan->id, &boost);
+ if (ret)
+ return ret;
- if (ret < 0)
+ return sysfs_emit(buf, "%u\n", boost);
+}
+
+static ssize_t fan_boost_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct awcc_priv *priv = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+ struct awcc_fan_data *fan = priv->fan_data[index];
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
return ret;
- if (out_data == WMAX_THERMAL_MODE_GMODE) {
+ ret = awcc_op_set_fan_boost(priv->wdev, fan->id, clamp_val(val, 0, 255));
+
+ return ret ? ret : count;
+}
+
+static SENSOR_DEVICE_ATTR_RW(fan1_boost, fan_boost, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_boost, fan_boost, 1);
+static SENSOR_DEVICE_ATTR_RW(fan3_boost, fan_boost, 2);
+static SENSOR_DEVICE_ATTR_RW(fan4_boost, fan_boost, 3);
+static SENSOR_DEVICE_ATTR_RW(fan5_boost, fan_boost, 4);
+static SENSOR_DEVICE_ATTR_RW(fan6_boost, fan_boost, 5);
+
+static umode_t fan_boost_attr_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ struct awcc_priv *priv = dev_get_drvdata(kobj_to_dev(kobj));
+
+ return n < priv->fan_count ? attr->mode : 0;
+}
+
+static bool fan_boost_group_visible(struct kobject *kobj)
+{
+ return true;
+}
+
+DEFINE_SYSFS_GROUP_VISIBLE(fan_boost);
+
+static struct attribute *fan_boost_attrs[] = {
+ &sensor_dev_attr_fan1_boost.dev_attr.attr,
+ &sensor_dev_attr_fan2_boost.dev_attr.attr,
+ &sensor_dev_attr_fan3_boost.dev_attr.attr,
+ &sensor_dev_attr_fan4_boost.dev_attr.attr,
+ &sensor_dev_attr_fan5_boost.dev_attr.attr,
+ &sensor_dev_attr_fan6_boost.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group fan_boost_group = {
+ .attrs = fan_boost_attrs,
+ .is_visible = SYSFS_GROUP_VISIBLE(fan_boost),
+};
+
+static const struct attribute_group *awcc_hwmon_groups[] = {
+ &fan_boost_group,
+ NULL
+};
+
+static int awcc_hwmon_temps_init(struct wmi_device *wdev)
+{
+ struct awcc_priv *priv = dev_get_drvdata(&wdev->dev);
+ unsigned int i;
+ int ret;
+ u8 id;
+
+ for (i = 0; i < priv->temp_count; i++) {
+ /*
+ * Temperature sensors IDs are listed after the fan IDs at
+ * offset `fan_count`
+ */
+ ret = awcc_op_get_resource_id(wdev, i + priv->fan_count, &id);
+ if (ret)
+ return ret;
+
+ __set_bit(id, priv->temp_sensors);
+ }
+
+ return 0;
+}
+
+static char *awcc_get_fan_label(unsigned long *fan_temps)
+{
+ unsigned int temp_count = bitmap_weight(fan_temps, AWCC_ID_BITMAP_SIZE);
+ char *label;
+ u8 temp_id;
+
+ switch (temp_count) {
+ case 0:
+ label = "Independent Fan";
+ break;
+ case 1:
+ temp_id = find_first_bit(fan_temps, AWCC_ID_BITMAP_SIZE);
+
+ switch (temp_id) {
+ case AWCC_TEMP_SENSOR_CPU:
+ label = "Processor Fan";
+ break;
+ case AWCC_TEMP_SENSOR_GPU:
+ label = "Video Fan";
+ break;
+ default:
+ label = "Unknown Fan";
+ break;
+ }
+
+ break;
+ default:
+ label = "Shared Fan";
+ break;
+ }
+
+ return label;
+}
+
+static int awcc_hwmon_fans_init(struct wmi_device *wdev)
+{
+ struct awcc_priv *priv = dev_get_drvdata(&wdev->dev);
+ unsigned long fan_temps[AWCC_ID_BITMAP_LONGS];
+ unsigned long gather[AWCC_ID_BITMAP_LONGS];
+ u32 min_rpm, max_rpm, temp_count, temp_id;
+ struct awcc_fan_data *fan_data;
+ unsigned int i, j;
+ int ret;
+ u8 id;
+
+ for (i = 0; i < priv->fan_count; i++) {
+ fan_data = devm_kzalloc(&wdev->dev, sizeof(*fan_data), GFP_KERNEL);
+ if (!fan_data)
+ return -ENOMEM;
+
+ /*
+ * Fan IDs are listed first at offset 0
+ */
+ ret = awcc_op_get_resource_id(wdev, i, &id);
+ if (ret)
+ return ret;
+
+ ret = awcc_thermal_information(wdev, AWCC_OP_GET_FAN_MIN_RPM, id,
+ &min_rpm);
+ if (ret)
+ return ret;
+
+ ret = awcc_thermal_information(wdev, AWCC_OP_GET_FAN_MAX_RPM, id,
+ &max_rpm);
+ if (ret)
+ return ret;
+
+ ret = awcc_get_fan_sensors(wdev, AWCC_OP_GET_TOTAL_FAN_TEMPS, id,
+ 0, &temp_count);
+ if (ret)
+ return ret;
+
+ bitmap_zero(fan_temps, AWCC_ID_BITMAP_SIZE);
+
+ for (j = 0; j < temp_count; j++) {
+ ret = awcc_get_fan_sensors(wdev, AWCC_OP_GET_FAN_TEMP_ID,
+ id, j, &temp_id);
+ if (ret)
+ break;
+
+ temp_id = FIELD_GET(AWCC_RESOURCE_ID_MASK, temp_id);
+ __set_bit(temp_id, fan_temps);
+ }
+
+ fan_data->id = id;
+ fan_data->min_rpm = min_rpm;
+ fan_data->max_rpm = max_rpm;
+ fan_data->label = awcc_get_fan_label(fan_temps);
+ bitmap_gather(gather, fan_temps, priv->temp_sensors, AWCC_ID_BITMAP_SIZE);
+ bitmap_copy(&fan_data->auto_channels_temp, gather, BITS_PER_LONG);
+ priv->fan_data[i] = fan_data;
+ }
+
+ return 0;
+}
+
+static int awcc_hwmon_init(struct wmi_device *wdev)
+{
+ struct awcc_priv *priv = dev_get_drvdata(&wdev->dev);
+ int ret;
+
+ priv->fan_data = devm_kcalloc(&wdev->dev, priv->fan_count,
+ sizeof(*priv->fan_data), GFP_KERNEL);
+ if (!priv->fan_data)
+ return -ENOMEM;
+
+ ret = awcc_hwmon_temps_init(wdev);
+ if (ret)
+ return ret;
+
+ ret = awcc_hwmon_fans_init(wdev);
+ if (ret)
+ return ret;
+
+ priv->hwdev = devm_hwmon_device_register_with_info(&wdev->dev, "alienware_wmi",
+ priv, &awcc_hwmon_chip_info,
+ awcc_hwmon_groups);
+
+ return PTR_ERR_OR_ZERO(priv->hwdev);
+}
+
+static void awcc_hwmon_suspend(struct device *dev)
+{
+ struct awcc_priv *priv = dev_get_drvdata(dev);
+ struct awcc_fan_data *fan;
+ unsigned int i;
+ u32 boost;
+ int ret;
+
+ for (i = 0; i < priv->fan_count; i++) {
+ fan = priv->fan_data[i];
+
+ ret = awcc_thermal_information(priv->wdev, AWCC_OP_GET_FAN_BOOST,
+ fan->id, &boost);
+ if (ret)
+ dev_err(dev, "Failed to store Fan %u boost while suspending\n", i);
+
+ fan->suspend_cache = ret ? 0 : clamp_val(boost, 0, 255);
+
+ awcc_op_set_fan_boost(priv->wdev, fan->id, 0);
+ if (ret)
+ dev_err(dev, "Failed to set Fan %u boost to 0 while suspending\n", i);
+ }
+}
+
+static void awcc_hwmon_resume(struct device *dev)
+{
+ struct awcc_priv *priv = dev_get_drvdata(dev);
+ struct awcc_fan_data *fan;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < priv->fan_count; i++) {
+ fan = priv->fan_data[i];
+
+ if (!fan->suspend_cache)
+ continue;
+
+ ret = awcc_op_set_fan_boost(priv->wdev, fan->id, fan->suspend_cache);
+ if (ret)
+ dev_err(dev, "Failed to restore Fan %u boost while resuming\n", i);
+ }
+}
+
+/*
+ * Thermal Profile control
+ * - Provides thermal profile control through the Platform Profile API
+ */
+static int awcc_platform_profile_get(struct device *dev,
+ enum platform_profile_option *profile)
+{
+ struct awcc_priv *priv = dev_get_drvdata(dev);
+ u32 out_data;
+ int ret;
+
+ ret = awcc_op_get_current_profile(priv->wdev, &out_data);
+ if (ret)
+ return ret;
+
+ switch (out_data) {
+ case AWCC_SPECIAL_PROFILE_CUSTOM:
+ *profile = PLATFORM_PROFILE_CUSTOM;
+ return 0;
+ case AWCC_SPECIAL_PROFILE_GMODE:
*profile = PLATFORM_PROFILE_PERFORMANCE;
return 0;
+ default:
+ break;
}
- if (!is_wmax_thermal_code(out_data))
+ if (!is_awcc_thermal_profile_id(out_data))
return -ENODATA;
- out_data &= WMAX_THERMAL_MODE_MASK;
- *profile = wmax_mode_to_platform_profile[out_data];
+ out_data = FIELD_GET(AWCC_THERMAL_MODE_MASK, out_data);
+ *profile = awcc_mode_to_platform_profile[out_data];
return 0;
}
-static int thermal_profile_set(struct device *dev,
- enum platform_profile_option profile)
+static int awcc_platform_profile_set(struct device *dev,
+ enum platform_profile_option profile)
{
struct awcc_priv *priv = dev_get_drvdata(dev);
@@ -621,8 +1259,8 @@ static int thermal_profile_set(struct device *dev,
u32 gmode_status;
int ret;
- ret = wmax_game_shift_status(priv->wdev,
- WMAX_OPERATION_GET_GAME_SHIFT_STATUS,
+ ret = awcc_game_shift_status(priv->wdev,
+ AWCC_OP_GET_GAME_SHIFT_STATUS,
&gmode_status);
if (ret < 0)
@@ -630,8 +1268,8 @@ static int thermal_profile_set(struct device *dev,
if ((profile == PLATFORM_PROFILE_PERFORMANCE && !gmode_status) ||
(profile != PLATFORM_PROFILE_PERFORMANCE && gmode_status)) {
- ret = wmax_game_shift_status(priv->wdev,
- WMAX_OPERATION_TOGGLE_GAME_SHIFT,
+ ret = awcc_game_shift_status(priv->wdev,
+ AWCC_OP_TOGGLE_GAME_SHIFT,
&gmode_status);
if (ret < 0)
@@ -639,62 +1277,70 @@ static int thermal_profile_set(struct device *dev,
}
}
- return wmax_thermal_control(priv->wdev,
- priv->supported_thermal_profiles[profile]);
+ return awcc_op_activate_profile(priv->wdev, priv->supported_profiles[profile]);
}
-static int thermal_profile_probe(void *drvdata, unsigned long *choices)
+static int awcc_platform_profile_probe(void *drvdata, unsigned long *choices)
{
enum platform_profile_option profile;
struct awcc_priv *priv = drvdata;
- enum wmax_thermal_mode mode;
- u8 sys_desc[4];
- u32 first_mode;
- u32 out_data;
+ enum awcc_thermal_profile mode;
+ u8 id, offset = 0;
int ret;
- ret = wmax_thermal_information(priv->wdev, WMAX_OPERATION_SYS_DESCRIPTION,
- 0, (u32 *) &sys_desc);
- if (ret < 0)
- return ret;
-
- first_mode = sys_desc[0] + sys_desc[1];
-
- for (u32 i = 0; i < sys_desc[3]; i++) {
- ret = wmax_thermal_information(priv->wdev, WMAX_OPERATION_LIST_IDS,
- i + first_mode, &out_data);
+ /*
+ * Thermal profile IDs are listed last at offset
+ * fan_count + temp_count + unknown_count
+ */
+ for (unsigned int i = 0; i < ARRAY_SIZE(priv->res_count) - 1; i++)
+ offset += priv->res_count[i];
+
+ for (unsigned int i = 0; i < priv->profile_count; i++) {
+ ret = awcc_op_get_resource_id(priv->wdev, i + offset, &id);
+ /*
+ * Some devices report an incorrect number of thermal profiles
+ * so the resource ID list may end prematurely
+ */
if (ret == -EBADRQC)
break;
if (ret)
return ret;
- if (!is_wmax_thermal_code(out_data))
+ if (!is_awcc_thermal_profile_id(id)) {
+ dev_dbg(&priv->wdev->dev, "Unmapped thermal profile ID 0x%02x\n", id);
continue;
+ }
- mode = out_data & WMAX_THERMAL_MODE_MASK;
- profile = wmax_mode_to_platform_profile[mode];
- priv->supported_thermal_profiles[profile] = out_data;
+ mode = FIELD_GET(AWCC_THERMAL_MODE_MASK, id);
+ profile = awcc_mode_to_platform_profile[mode];
+ priv->supported_profiles[profile] = id;
- set_bit(profile, choices);
+ __set_bit(profile, choices);
}
if (bitmap_empty(choices, PLATFORM_PROFILE_LAST))
return -ENODEV;
if (awcc->gmode) {
- priv->supported_thermal_profiles[PLATFORM_PROFILE_PERFORMANCE] =
- WMAX_THERMAL_MODE_GMODE;
+ priv->supported_profiles[PLATFORM_PROFILE_PERFORMANCE] =
+ AWCC_SPECIAL_PROFILE_GMODE;
- set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+ __set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
}
+ /* Every model supports the "custom" profile */
+ priv->supported_profiles[PLATFORM_PROFILE_CUSTOM] =
+ AWCC_SPECIAL_PROFILE_CUSTOM;
+
+ __set_bit(PLATFORM_PROFILE_CUSTOM, choices);
+
return 0;
}
static const struct platform_profile_ops awcc_platform_profile_ops = {
- .probe = thermal_profile_probe,
- .profile_get = thermal_profile_get,
- .profile_set = thermal_profile_set,
+ .probe = awcc_platform_profile_probe,
+ .profile_get = awcc_platform_profile_get,
+ .profile_set = awcc_platform_profile_set,
};
static int awcc_platform_profile_init(struct wmi_device *wdev)
@@ -707,6 +1353,157 @@ static int awcc_platform_profile_init(struct wmi_device *wdev)
return PTR_ERR_OR_ZERO(priv->ppdev);
}
+/*
+ * DebugFS
+ */
+static int awcc_debugfs_system_description_read(struct seq_file *seq, void *data)
+{
+ struct device *dev = seq->private;
+ struct awcc_priv *priv = dev_get_drvdata(dev);
+
+ seq_printf(seq, "0x%08x\n", priv->system_description);
+
+ return 0;
+}
+
+static int awcc_debugfs_hwmon_data_read(struct seq_file *seq, void *data)
+{
+ struct device *dev = seq->private;
+ struct awcc_priv *priv = dev_get_drvdata(dev);
+ const struct awcc_fan_data *fan;
+ unsigned int bit;
+
+ seq_printf(seq, "Number of fans: %u\n", priv->fan_count);
+ seq_printf(seq, "Number of temperature sensors: %u\n\n", priv->temp_count);
+
+ for (u32 i = 0; i < priv->fan_count; i++) {
+ fan = priv->fan_data[i];
+
+ seq_printf(seq, "Fan %u:\n", i);
+ seq_printf(seq, " ID: 0x%02x\n", fan->id);
+ seq_printf(seq, " Related temperature sensors bitmap: %lu\n",
+ fan->auto_channels_temp);
+ }
+
+ seq_puts(seq, "\nTemperature sensor IDs:\n");
+ for_each_set_bit(bit, priv->temp_sensors, AWCC_ID_BITMAP_SIZE)
+ seq_printf(seq, " 0x%02x\n", bit);
+
+ return 0;
+}
+
+static int awcc_debugfs_pprof_data_read(struct seq_file *seq, void *data)
+{
+ struct device *dev = seq->private;
+ struct awcc_priv *priv = dev_get_drvdata(dev);
+
+ seq_printf(seq, "Number of thermal profiles: %u\n\n", priv->profile_count);
+
+ for (u32 i = 0; i < PLATFORM_PROFILE_LAST; i++) {
+ if (!priv->supported_profiles[i])
+ continue;
+
+ seq_printf(seq, "Platform profile %u:\n", i);
+ seq_printf(seq, " ID: 0x%02x\n", priv->supported_profiles[i]);
+ }
+
+ return 0;
+}
+
+static int awcc_gpio_pin_show(struct seq_file *seq, void *data)
+{
+ unsigned long pin = debugfs_get_aux_num(seq->file);
+ struct wmi_device *wdev = seq->private;
+ u32 status;
+ int ret;
+
+ ret = awcc_read_gpio_status(wdev, pin, &status);
+ if (ret)
+ return ret;
+
+ seq_printf(seq, "%u\n", status);
+
+ return 0;
+}
+
+static ssize_t awcc_gpio_pin_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long pin = debugfs_get_aux_num(file);
+ struct seq_file *seq = file->private_data;
+ struct wmi_device *wdev = seq->private;
+ bool status;
+ int ret;
+
+ if (!ppos || *ppos)
+ return -EINVAL;
+
+ ret = kstrtobool_from_user(buf, count, &status);
+ if (ret)
+ return ret;
+
+ ret = awcc_fwup_gpio_control(wdev, pin, status);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+DEFINE_SHOW_STORE_ATTRIBUTE(awcc_gpio_pin);
+
+static void awcc_debugfs_remove(void *data)
+{
+ struct dentry *root = data;
+
+ debugfs_remove(root);
+}
+
+static void awcc_debugfs_init(struct wmi_device *wdev)
+{
+ struct awcc_priv *priv = dev_get_drvdata(&wdev->dev);
+ struct dentry *root, *gpio_ctl;
+ u32 gpio_count;
+ char name[64];
+ int ret;
+
+ scnprintf(name, sizeof(name), "%s-%s", "alienware-wmi", dev_name(&wdev->dev));
+ root = debugfs_create_dir(name, NULL);
+
+ debugfs_create_devm_seqfile(&wdev->dev, "system_description", root,
+ awcc_debugfs_system_description_read);
+
+ if (awcc->hwmon)
+ debugfs_create_devm_seqfile(&wdev->dev, "hwmon_data", root,
+ awcc_debugfs_hwmon_data_read);
+
+ if (awcc->pprof)
+ debugfs_create_devm_seqfile(&wdev->dev, "pprof_data", root,
+ awcc_debugfs_pprof_data_read);
+
+ ret = awcc_read_total_gpios(wdev, &gpio_count);
+ if (ret) {
+ dev_dbg(&wdev->dev, "Failed to get total GPIO Pin count\n");
+ goto out_add_action;
+ } else if (gpio_count > AWCC_MAX_RES_COUNT) {
+ dev_dbg(&wdev->dev, "Reported GPIO Pin count may be incorrect: %u\n", gpio_count);
+ goto out_add_action;
+ }
+
+ gpio_ctl = debugfs_create_dir("gpio_ctl", root);
+
+ priv->gpio_count = gpio_count;
+ debugfs_create_u32("total_gpios", 0444, gpio_ctl, &priv->gpio_count);
+
+ for (unsigned int i = 0; i < gpio_count; i++) {
+ scnprintf(name, sizeof(name), "pin%u", i);
+ debugfs_create_file_aux_num(name, 0644, gpio_ctl, wdev, i,
+ &awcc_gpio_pin_fops);
+ }
+
+out_add_action:
+ devm_add_action_or_reset(&wdev->dev, awcc_debugfs_remove, root);
+}
+
static int alienware_awcc_setup(struct wmi_device *wdev)
{
struct awcc_priv *priv;
@@ -716,15 +1513,37 @@ static int alienware_awcc_setup(struct wmi_device *wdev)
if (!priv)
return -ENOMEM;
+ ret = awcc_thermal_information(wdev, AWCC_OP_GET_SYSTEM_DESCRIPTION,
+ 0, &priv->system_description);
+ if (ret < 0)
+ return ret;
+
+ /* Sanity check */
+ for (unsigned int i = 0; i < ARRAY_SIZE(priv->res_count); i++) {
+ if (priv->res_count[i] > AWCC_MAX_RES_COUNT) {
+ dev_err(&wdev->dev, "Malformed system description: 0x%08x\n",
+ priv->system_description);
+ return -ENXIO;
+ }
+ }
+
priv->wdev = wdev;
dev_set_drvdata(&wdev->dev, priv);
+ if (awcc->hwmon) {
+ ret = awcc_hwmon_init(wdev);
+ if (ret)
+ return ret;
+ }
+
if (awcc->pprof) {
ret = awcc_platform_profile_init(wdev);
if (ret)
return ret;
}
+ awcc_debugfs_init(wdev);
+
return 0;
}
@@ -775,6 +1594,24 @@ static int wmax_wmi_probe(struct wmi_device *wdev, const void *context)
return ret;
}
+static int wmax_wmi_suspend(struct device *dev)
+{
+ if (awcc->hwmon)
+ awcc_hwmon_suspend(dev);
+
+ return 0;
+}
+
+static int wmax_wmi_resume(struct device *dev)
+{
+ if (awcc->hwmon)
+ awcc_hwmon_resume(dev);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(wmax_wmi_pm_ops, wmax_wmi_suspend, wmax_wmi_resume);
+
static const struct wmi_device_id alienware_wmax_device_id_table[] = {
{ WMAX_CONTROL_GUID, NULL },
{ },
@@ -785,6 +1622,7 @@ static struct wmi_driver alienware_wmax_wmi_driver = {
.driver = {
.name = "alienware-wmi-wmax",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = pm_sleep_ptr(&wmax_wmi_pm_ops),
},
.id_table = alienware_wmax_device_id_table,
.probe = wmax_wmi_probe,
@@ -799,6 +1637,13 @@ int __init alienware_wmax_wmi_init(void)
if (id)
awcc = id->driver_data;
+ if (force_hwmon) {
+ if (!awcc)
+ awcc = &empty_quirks;
+
+ awcc->hwmon = true;
+ }
+
if (force_platform_profile) {
if (!awcc)
awcc = &empty_quirks;
diff --git a/drivers/platform/x86/dell/dell-pc.c b/drivers/platform/x86/dell/dell-pc.c
index 483240bb36e7..48cc7511905a 100644
--- a/drivers/platform/x86/dell/dell-pc.c
+++ b/drivers/platform/x86/dell/dell-pc.c
@@ -11,19 +11,20 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitfield.h>
+#include <linux/bitops.h>
#include <linux/bits.h>
+#include <linux/device/faux.h>
#include <linux/dmi.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_profile.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include "dell-smbios.h"
-static struct platform_device *platform_device;
+static struct faux_device *dell_pc_fdev;
static int supported_modes;
static const struct dmi_system_id dell_device_table[] __initconst = {
@@ -146,11 +147,6 @@ static int thermal_get_supported_modes(int *supported_bits)
dell_fill_request(&buffer, 0x0, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_INFO, SELECT_THERMAL_MANAGEMENT);
- /* Thermal function not supported */
- if (ret == -ENXIO) {
- *supported_bits = 0;
- return 0;
- }
if (ret)
return ret;
*supported_bits = FIELD_GET(DELL_THERMAL_SUPPORTED, buffer.output[1]);
@@ -233,13 +229,13 @@ static int thermal_platform_profile_get(struct device *dev,
static int thermal_platform_profile_probe(void *drvdata, unsigned long *choices)
{
if (supported_modes & DELL_QUIET)
- set_bit(PLATFORM_PROFILE_QUIET, choices);
+ __set_bit(PLATFORM_PROFILE_QUIET, choices);
if (supported_modes & DELL_COOL_BOTTOM)
- set_bit(PLATFORM_PROFILE_COOL, choices);
+ __set_bit(PLATFORM_PROFILE_COOL, choices);
if (supported_modes & DELL_BALANCED)
- set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ __set_bit(PLATFORM_PROFILE_BALANCED, choices);
if (supported_modes & DELL_PERFORMANCE)
- set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+ __set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
return 0;
}
@@ -250,68 +246,43 @@ static const struct platform_profile_ops dell_pc_platform_profile_ops = {
.profile_set = thermal_platform_profile_set,
};
-static int thermal_init(void)
+static int dell_pc_faux_probe(struct faux_device *fdev)
{
struct device *ppdev;
int ret;
- /* If thermal commands are not supported, exit without error */
if (!dell_smbios_class_is_supported(CLASS_INFO))
- return 0;
+ return -ENODEV;
- /* If thermal modes are not supported, exit without error */
ret = thermal_get_supported_modes(&supported_modes);
if (ret < 0)
return ret;
- if (!supported_modes)
- return 0;
-
- platform_device = platform_device_register_simple("dell-pc", PLATFORM_DEVID_NONE, NULL, 0);
- if (IS_ERR(platform_device))
- return PTR_ERR(platform_device);
-
- ppdev = devm_platform_profile_register(&platform_device->dev, "dell-pc",
- NULL, &dell_pc_platform_profile_ops);
- if (IS_ERR(ppdev)) {
- ret = PTR_ERR(ppdev);
- goto cleanup_platform_device;
- }
- return 0;
+ ppdev = devm_platform_profile_register(&fdev->dev, "dell-pc", NULL,
+ &dell_pc_platform_profile_ops);
-cleanup_platform_device:
- platform_device_unregister(platform_device);
-
- return ret;
+ return PTR_ERR_OR_ZERO(ppdev);
}
-static void thermal_cleanup(void)
-{
- platform_device_unregister(platform_device);
-}
+static const struct faux_device_ops dell_pc_faux_ops = {
+ .probe = dell_pc_faux_probe,
+};
static int __init dell_init(void)
{
- int ret;
-
if (!dmi_check_system(dell_device_table))
return -ENODEV;
- /* Do not fail module if thermal modes not supported, just skip */
- ret = thermal_init();
- if (ret)
- goto fail_thermal;
+ dell_pc_fdev = faux_device_create("dell-pc", NULL, &dell_pc_faux_ops);
+ if (!dell_pc_fdev)
+ return -ENODEV;
return 0;
-
-fail_thermal:
- thermal_cleanup();
- return ret;
}
static void __exit dell_exit(void)
{
- thermal_cleanup();
+ faux_device_destroy(dell_pc_fdev);
}
module_init(dell_init);
diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c
index f27739da380f..67f3d7158403 100644
--- a/drivers/platform/x86/dell/dell-wmi-ddv.c
+++ b/drivers/platform/x86/dell/dell-wmi-ddv.c
@@ -8,6 +8,7 @@
#define pr_format(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/device/driver.h>
@@ -39,6 +40,33 @@
#define DELL_DDV_SUPPORTED_VERSION_MAX 3
#define DELL_DDV_GUID "8A42EA14-4F2A-FD45-6422-0087F7A7E608"
+/* Battery indices 1, 2 and 3 */
+#define DELL_DDV_NUM_BATTERIES 3
+
+#define SBS_MANUFACTURE_YEAR_MASK GENMASK(15, 9)
+#define SBS_MANUFACTURE_MONTH_MASK GENMASK(8, 5)
+#define SBS_MANUFACTURE_DAY_MASK GENMASK(4, 0)
+
+#define MA_FAILURE_MODE_MASK GENMASK(11, 8)
+#define MA_FAILURE_MODE_PERMANENT 0x9
+#define MA_FAILURE_MODE_OVERHEAT 0xA
+#define MA_FAILURE_MODE_OVERCURRENT 0xB
+
+#define MA_PERMANENT_FAILURE_CODE_MASK GENMASK(13, 12)
+#define MA_PERMANENT_FAILURE_FUSE_BLOWN 0x0
+#define MA_PERMANENT_FAILURE_CELL_IMBALANCE 0x1
+#define MA_PERMANENT_FAILURE_OVERVOLTAGE 0x2
+#define MA_PERMANENT_FAILURE_FET_FAILURE 0x3
+
+#define MA_OVERHEAT_FAILURE_CODE_MASK GENMASK(15, 12)
+#define MA_OVERHEAT_FAILURE_START 0x5
+#define MA_OVERHEAT_FAILURE_CHARGING 0x7
+#define MA_OVERHEAT_FAILURE_DISCHARGING 0x8
+
+#define MA_OVERCURRENT_FAILURE_CODE_MASK GENMASK(15, 12)
+#define MA_OVERCURRENT_FAILURE_CHARGING 0x6
+#define MA_OVERCURRENT_FAILURE_DISCHARGING 0xB
+
#define DELL_EPPID_LENGTH 20
#define DELL_EPPID_EXT_LENGTH 23
@@ -105,6 +133,8 @@ struct dell_wmi_ddv_sensors {
struct dell_wmi_ddv_data {
struct acpi_battery_hook hook;
struct device_attribute eppid_attr;
+ struct mutex translation_cache_lock; /* Protects the translation cache */
+ struct power_supply *translation_cache[DELL_DDV_NUM_BATTERIES];
struct dell_wmi_ddv_sensors fans;
struct dell_wmi_ddv_sensors temps;
struct wmi_device *wdev;
@@ -639,15 +669,78 @@ err_release:
return ret;
}
-static int dell_wmi_ddv_battery_index(struct acpi_device *acpi_dev, u32 *index)
+static int dell_wmi_ddv_battery_translate(struct dell_wmi_ddv_data *data,
+ struct power_supply *battery, u32 *index)
{
- const char *uid_str;
+ u32 serial_dec, serial_hex, serial;
+ union power_supply_propval val;
+ int ret;
+
+ guard(mutex)(&data->translation_cache_lock);
+
+ for (int i = 0; i < ARRAY_SIZE(data->translation_cache); i++) {
+ if (data->translation_cache[i] == battery) {
+ dev_dbg(&data->wdev->dev, "Translation cache hit for battery index %u\n",
+ i + 1);
+ *index = i + 1;
+ return 0;
+ }
+ }
- uid_str = acpi_device_uid(acpi_dev);
- if (!uid_str)
- return -ENODEV;
+ dev_dbg(&data->wdev->dev, "Translation cache miss\n");
+
+ /* Perform a translation between a ACPI battery and a battery index */
+
+ ret = power_supply_get_property(battery, POWER_SUPPLY_PROP_SERIAL_NUMBER, &val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Some devices display the serial number of the ACPI battery (string!) as a decimal
+ * number while other devices display it as a hexadecimal number. Because of this we
+ * have to check both cases.
+ */
+ ret = kstrtou32(val.strval, 16, &serial_hex);
+ if (ret < 0)
+ return ret; /* Should never fail */
+
+ ret = kstrtou32(val.strval, 10, &serial_dec);
+ if (ret < 0)
+ serial_dec = 0; /* Can fail, thus we only mark serial_dec as invalid */
+
+ for (int i = 0; i < ARRAY_SIZE(data->translation_cache); i++) {
+ ret = dell_wmi_ddv_query_integer(data->wdev, DELL_DDV_BATTERY_SERIAL_NUMBER, i + 1,
+ &serial);
+ if (ret < 0)
+ return ret;
- return kstrtou32(uid_str, 10, index);
+ /* A serial number of 0 signals that this index is not associated with a battery */
+ if (!serial)
+ continue;
+
+ if (serial == serial_dec || serial == serial_hex) {
+ dev_dbg(&data->wdev->dev, "Translation cache update for battery index %u\n",
+ i + 1);
+ data->translation_cache[i] = battery;
+ *index = i + 1;
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+static void dell_wmi_battery_invalidate(struct dell_wmi_ddv_data *data,
+ struct power_supply *battery)
+{
+ guard(mutex)(&data->translation_cache_lock);
+
+ for (int i = 0; i < ARRAY_SIZE(data->translation_cache); i++) {
+ if (data->translation_cache[i] == battery) {
+ data->translation_cache[i] = NULL;
+ return;
+ }
+ }
}
static ssize_t eppid_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -657,7 +750,7 @@ static ssize_t eppid_show(struct device *dev, struct device_attribute *attr, cha
u32 index;
int ret;
- ret = dell_wmi_ddv_battery_index(to_acpi_device(dev->parent), &index);
+ ret = dell_wmi_ddv_battery_translate(data, to_power_supply(dev), &index);
if (ret < 0)
return ret;
@@ -676,6 +769,116 @@ static ssize_t eppid_show(struct device *dev, struct device_attribute *attr, cha
return ret;
}
+static int dell_wmi_ddv_get_health(struct dell_wmi_ddv_data *data, u32 index,
+ union power_supply_propval *val)
+{
+ u32 value, code;
+ int ret;
+
+ ret = dell_wmi_ddv_query_integer(data->wdev, DELL_DDV_BATTERY_MANUFACTURER_ACCESS, index,
+ &value);
+ if (ret < 0)
+ return ret;
+
+ switch (FIELD_GET(MA_FAILURE_MODE_MASK, value)) {
+ case MA_FAILURE_MODE_PERMANENT:
+ code = FIELD_GET(MA_PERMANENT_FAILURE_CODE_MASK, value);
+ switch (code) {
+ case MA_PERMANENT_FAILURE_FUSE_BLOWN:
+ val->intval = POWER_SUPPLY_HEALTH_BLOWN_FUSE;
+ return 0;
+ case MA_PERMANENT_FAILURE_CELL_IMBALANCE:
+ val->intval = POWER_SUPPLY_HEALTH_CELL_IMBALANCE;
+ return 0;
+ case MA_PERMANENT_FAILURE_OVERVOLTAGE:
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ return 0;
+ case MA_PERMANENT_FAILURE_FET_FAILURE:
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ return 0;
+ default:
+ dev_notice_once(&data->wdev->dev, "Unknown permanent failure code %u\n",
+ code);
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ return 0;
+ }
+ case MA_FAILURE_MODE_OVERHEAT:
+ code = FIELD_GET(MA_OVERHEAT_FAILURE_CODE_MASK, value);
+ switch (code) {
+ case MA_OVERHEAT_FAILURE_START:
+ case MA_OVERHEAT_FAILURE_CHARGING:
+ case MA_OVERHEAT_FAILURE_DISCHARGING:
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ return 0;
+ default:
+ dev_notice_once(&data->wdev->dev, "Unknown overheat failure code %u\n",
+ code);
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ return 0;
+ }
+ case MA_FAILURE_MODE_OVERCURRENT:
+ code = FIELD_GET(MA_OVERCURRENT_FAILURE_CODE_MASK, value);
+ switch (code) {
+ case MA_OVERCURRENT_FAILURE_CHARGING:
+ case MA_OVERCURRENT_FAILURE_DISCHARGING:
+ val->intval = POWER_SUPPLY_HEALTH_OVERCURRENT;
+ return 0;
+ default:
+ dev_notice_once(&data->wdev->dev, "Unknown overcurrent failure code %u\n",
+ code);
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ return 0;
+ }
+ default:
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ return 0;
+ }
+}
+
+static int dell_wmi_ddv_get_manufacture_date(struct dell_wmi_ddv_data *data, u32 index,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ u16 year, month, day;
+ u32 value;
+ int ret;
+
+ ret = dell_wmi_ddv_query_integer(data->wdev, DELL_DDV_BATTERY_MANUFACTURE_DATE,
+ index, &value);
+ if (ret < 0)
+ return ret;
+ if (value > U16_MAX)
+ return -ENXIO;
+
+ /*
+ * Some devices report a invalid manufacture date value
+ * like 0.0.1980. Because of this we have to check the
+ * whole value before exposing parts of it to user space.
+ */
+ year = FIELD_GET(SBS_MANUFACTURE_YEAR_MASK, value) + 1980;
+ month = FIELD_GET(SBS_MANUFACTURE_MONTH_MASK, value);
+ if (month < 1 || month > 12)
+ return -ENODATA;
+
+ day = FIELD_GET(SBS_MANUFACTURE_DAY_MASK, value);
+ if (day < 1 || day > 31)
+ return -ENODATA;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_MANUFACTURE_YEAR:
+ val->intval = year;
+ return 0;
+ case POWER_SUPPLY_PROP_MANUFACTURE_MONTH:
+ val->intval = month;
+ return 0;
+ case POWER_SUPPLY_PROP_MANUFACTURE_DAY:
+ val->intval = day;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static int dell_wmi_ddv_get_property(struct power_supply *psy, const struct power_supply_ext *ext,
void *drvdata, enum power_supply_property psp,
union power_supply_propval *val)
@@ -684,11 +887,13 @@ static int dell_wmi_ddv_get_property(struct power_supply *psy, const struct powe
u32 index, value;
int ret;
- ret = dell_wmi_ddv_battery_index(to_acpi_device(psy->dev.parent), &index);
+ ret = dell_wmi_ddv_battery_translate(data, psy, &index);
if (ret < 0)
return ret;
switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ return dell_wmi_ddv_get_health(data, index, val);
case POWER_SUPPLY_PROP_TEMP:
ret = dell_wmi_ddv_query_integer(data->wdev, DELL_DDV_BATTERY_TEMPERATURE, index,
&value);
@@ -700,13 +905,21 @@ static int dell_wmi_ddv_get_property(struct power_supply *psy, const struct powe
*/
val->intval = value - 2732;
return 0;
+ case POWER_SUPPLY_PROP_MANUFACTURE_YEAR:
+ case POWER_SUPPLY_PROP_MANUFACTURE_MONTH:
+ case POWER_SUPPLY_PROP_MANUFACTURE_DAY:
+ return dell_wmi_ddv_get_manufacture_date(data, index, psp, val);
default:
return -EINVAL;
}
}
static const enum power_supply_property dell_wmi_ddv_properties[] = {
+ POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
+ POWER_SUPPLY_PROP_MANUFACTURE_MONTH,
+ POWER_SUPPLY_PROP_MANUFACTURE_DAY,
};
static const struct power_supply_ext dell_wmi_ddv_extension = {
@@ -719,13 +932,12 @@ static const struct power_supply_ext dell_wmi_ddv_extension = {
static int dell_wmi_ddv_add_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
{
struct dell_wmi_ddv_data *data = container_of(hook, struct dell_wmi_ddv_data, hook);
- u32 index;
int ret;
- /* Return 0 instead of error to avoid being unloaded */
- ret = dell_wmi_ddv_battery_index(to_acpi_device(battery->dev.parent), &index);
- if (ret < 0)
- return 0;
+ /*
+ * We cannot do the battery matching here since the battery might be absent, preventing
+ * us from reading the serial number.
+ */
ret = device_create_file(&battery->dev, &data->eppid_attr);
if (ret < 0)
@@ -749,11 +961,19 @@ static int dell_wmi_ddv_remove_battery(struct power_supply *battery, struct acpi
device_remove_file(&battery->dev, &data->eppid_attr);
power_supply_unregister_extension(battery, &dell_wmi_ddv_extension);
+ dell_wmi_battery_invalidate(data, battery);
+
return 0;
}
static int dell_wmi_ddv_battery_add(struct dell_wmi_ddv_data *data)
{
+ int ret;
+
+ ret = devm_mutex_init(&data->wdev->dev, &data->translation_cache_lock);
+ if (ret < 0)
+ return ret;
+
data->hook.name = "Dell DDV Battery Extension";
data->hook.add_battery = dell_wmi_ddv_add_battery;
data->hook.remove_battery = dell_wmi_ddv_remove_battery;
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
index 230e6ee96636..d8f1bf5e58a0 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
@@ -45,7 +45,7 @@ static ssize_t current_password_store(struct kobject *kobj,
int length;
length = strlen(buf);
- if (buf[length-1] == '\n')
+ if (length && buf[length - 1] == '\n')
length--;
/* firmware does verifiation of min/max password length,
diff --git a/drivers/platform/x86/dell/dell_rbu.c b/drivers/platform/x86/dell/dell_rbu.c
index e30ca325938c..9dd9f2cb074f 100644
--- a/drivers/platform/x86/dell/dell_rbu.c
+++ b/drivers/platform/x86/dell/dell_rbu.c
@@ -45,7 +45,7 @@
MODULE_AUTHOR("Abhay Salunke <abhay_salunke@dell.com>");
MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems");
MODULE_LICENSE("GPL");
-MODULE_VERSION("3.2");
+MODULE_VERSION("3.3");
#define BIOS_SCAN_LIMIT 0xffffffff
#define MAX_IMAGE_LENGTH 16
@@ -91,7 +91,7 @@ static void init_packet_head(void)
rbu_data.imagesize = 0;
}
-static int create_packet(void *data, size_t length)
+static int create_packet(void *data, size_t length) __must_hold(&rbu_data.lock)
{
struct packet_data *newpacket;
int ordernum = 0;
@@ -292,7 +292,7 @@ static int packet_read_list(char *data, size_t * pread_length)
remaining_bytes = *pread_length;
bytes_read = rbu_data.packet_read_count;
- list_for_each_entry(newpacket, (&packet_data_head.list)->next, list) {
+ list_for_each_entry(newpacket, &packet_data_head.list, list) {
bytes_copied = do_packet_read(pdest, newpacket,
remaining_bytes, bytes_read, &temp_count);
remaining_bytes -= bytes_copied;
@@ -315,14 +315,14 @@ static void packet_empty_list(void)
{
struct packet_data *newpacket, *tmp;
- list_for_each_entry_safe(newpacket, tmp, (&packet_data_head.list)->next, list) {
+ list_for_each_entry_safe(newpacket, tmp, &packet_data_head.list, list) {
list_del(&newpacket->list);
/*
* zero out the RBU packet memory before freeing
* to make sure there are no stale RBU packets left in memory
*/
- memset(newpacket->data, 0, rbu_data.packetsize);
+ memset(newpacket->data, 0, newpacket->length);
set_memory_wb((unsigned long)newpacket->data,
1 << newpacket->ordernum);
free_pages((unsigned long) newpacket->data,
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index f52fbc4924d4..d1908815f5a2 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1370,8 +1370,8 @@ static int eeepc_acpi_add(struct acpi_device *device)
if (!eeepc)
return -ENOMEM;
eeepc->handle = device->handle;
- strcpy(acpi_device_name(device), EEEPC_ACPI_DEVICE_NAME);
- strcpy(acpi_device_class(device), EEEPC_ACPI_CLASS);
+ strscpy(acpi_device_name(device), EEEPC_ACPI_DEVICE_NAME);
+ strscpy(acpi_device_class(device), EEEPC_ACPI_CLASS);
device->driver_data = eeepc;
eeepc->device = device;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index a0eae24ca9e6..162809140f68 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -17,13 +17,13 @@
/*
* fujitsu-laptop.c - Fujitsu laptop support, providing access to additional
* features made available on a range of Fujitsu laptops including the
- * P2xxx/P5xxx/S6xxx/S7xxx series.
+ * P2xxx/P5xxx/S2xxx/S6xxx/S7xxx series.
*
* This driver implements a vendor-specific backlight control interface for
* Fujitsu laptops and provides support for hotkeys present on certain Fujitsu
* laptops.
*
- * This driver has been tested on a Fujitsu Lifebook S6410, S7020 and
+ * This driver has been tested on a Fujitsu Lifebook S2110, S6410, S7020 and
* P8010. It should work on most P-series and S-series Lifebooks, but
* YMMV.
*
@@ -107,7 +107,11 @@
#define KEY2_CODE 0x411
#define KEY3_CODE 0x412
#define KEY4_CODE 0x413
-#define KEY5_CODE 0x420
+#define KEY5_CODE 0x414
+#define KEY6_CODE 0x415
+#define KEY7_CODE 0x416
+#define KEY8_CODE 0x417
+#define KEY9_CODE 0x420
/* Hotkey ringbuffer limits */
#define MAX_HOTKEY_RINGBUFFER_SIZE 100
@@ -560,7 +564,7 @@ static const struct key_entry keymap_default[] = {
{ KE_KEY, KEY2_CODE, { KEY_PROG2 } },
{ KE_KEY, KEY3_CODE, { KEY_PROG3 } },
{ KE_KEY, KEY4_CODE, { KEY_PROG4 } },
- { KE_KEY, KEY5_CODE, { KEY_RFKILL } },
+ { KE_KEY, KEY9_CODE, { KEY_RFKILL } },
/* Soft keys read from status flags */
{ KE_KEY, FLAG_RFKILL, { KEY_RFKILL } },
{ KE_KEY, FLAG_TOUCHPAD_TOGGLE, { KEY_TOUCHPAD_TOGGLE } },
@@ -584,6 +588,18 @@ static const struct key_entry keymap_p8010[] = {
{ KE_END, 0 }
};
+static const struct key_entry keymap_s2110[] = {
+ { KE_KEY, KEY1_CODE, { KEY_PROG1 } }, /* "A" */
+ { KE_KEY, KEY2_CODE, { KEY_PROG2 } }, /* "B" */
+ { KE_KEY, KEY3_CODE, { KEY_WWW } }, /* "Internet" */
+ { KE_KEY, KEY4_CODE, { KEY_EMAIL } }, /* "E-mail" */
+ { KE_KEY, KEY5_CODE, { KEY_STOPCD } },
+ { KE_KEY, KEY6_CODE, { KEY_PLAYPAUSE } },
+ { KE_KEY, KEY7_CODE, { KEY_PREVIOUSSONG } },
+ { KE_KEY, KEY8_CODE, { KEY_NEXTSONG } },
+ { KE_END, 0 }
+};
+
static const struct key_entry *keymap = keymap_default;
static int fujitsu_laptop_dmi_keymap_override(const struct dmi_system_id *id)
@@ -621,6 +637,15 @@ static const struct dmi_system_id fujitsu_laptop_dmi_table[] = {
},
.driver_data = (void *)keymap_p8010
},
+ {
+ .callback = fujitsu_laptop_dmi_keymap_override,
+ .ident = "Fujitsu LifeBook S2110",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S2110"),
+ },
+ .driver_data = (void *)keymap_s2110
+ },
{}
};
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index ede483573fe0..b5e4da6a6779 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -15,6 +15,7 @@
#include <linux/bug.h>
#include <linux/cleanup.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/i8042.h>
@@ -267,6 +268,20 @@ static void ideapad_shared_exit(struct ideapad_private *priv)
*/
#define IDEAPAD_EC_TIMEOUT 200 /* in ms */
+/*
+ * Some models (e.g., ThinkBook since 2024) have a low tolerance for being
+ * polled too frequently. Doing so may break the state machine in the EC,
+ * resulting in a hard shutdown.
+ *
+ * It is also observed that frequent polls may disturb the ongoing operation
+ * and notably delay the availability of EC response.
+ *
+ * These values are used as the delay before the first poll and the interval
+ * between subsequent polls to solve the above issues.
+ */
+#define IDEAPAD_EC_POLL_MIN_US 150
+#define IDEAPAD_EC_POLL_MAX_US 300
+
static int eval_int(acpi_handle handle, const char *name, unsigned long *res)
{
unsigned long long result;
@@ -383,7 +398,7 @@ static int read_ec_data(acpi_handle handle, unsigned long cmd, unsigned long *da
end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1;
while (time_before(jiffies, end_jiffies)) {
- schedule();
+ usleep_range(IDEAPAD_EC_POLL_MIN_US, IDEAPAD_EC_POLL_MAX_US);
err = eval_vpcr(handle, 1, &val);
if (err)
@@ -414,7 +429,7 @@ static int write_ec_cmd(acpi_handle handle, unsigned long cmd, unsigned long dat
end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1;
while (time_before(jiffies, end_jiffies)) {
- schedule();
+ usleep_range(IDEAPAD_EC_POLL_MIN_US, IDEAPAD_EC_POLL_MAX_US);
err = eval_vpcr(handle, 1, &val);
if (err)
diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c
index 1ae50702bdb7..b73e582128c9 100644
--- a/drivers/platform/x86/intel/ifs/core.c
+++ b/drivers/platform/x86/intel/ifs/core.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
#include "ifs.h"
@@ -115,13 +116,13 @@ static int __init ifs_init(void)
if (!m)
return -ENODEV;
- if (rdmsrl_safe(MSR_IA32_CORE_CAPS, &msrval))
+ if (rdmsrq_safe(MSR_IA32_CORE_CAPS, &msrval))
return -ENODEV;
if (!(msrval & MSR_IA32_CORE_CAPS_INTEGRITY_CAPS))
return -ENODEV;
- if (rdmsrl_safe(MSR_INTEGRITY_CAPS, &msrval))
+ if (rdmsrq_safe(MSR_INTEGRITY_CAPS, &msrval))
return -ENODEV;
ifs_pkg_auth = kmalloc_array(topology_max_packages(), sizeof(bool), GFP_KERNEL);
diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
index de54bd1a5970..50f1fdf7dfed 100644
--- a/drivers/platform/x86/intel/ifs/load.c
+++ b/drivers/platform/x86/intel/ifs/load.c
@@ -5,6 +5,7 @@
#include <linux/sizes.h>
#include <asm/cpu.h>
#include <asm/microcode.h>
+#include <asm/msr.h>
#include "ifs.h"
@@ -127,8 +128,8 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work)
ifsd = ifs_get_data(dev);
msrs = ifs_get_test_msrs(dev);
/* run scan hash copy */
- wrmsrl(msrs->copy_hashes, ifs_hash_ptr);
- rdmsrl(msrs->copy_hashes_status, hashes_status.data);
+ wrmsrq(msrs->copy_hashes, ifs_hash_ptr);
+ rdmsrq(msrs->copy_hashes_status, hashes_status.data);
/* enumerate the scan image information */
num_chunks = hashes_status.num_chunks;
@@ -149,8 +150,8 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work)
linear_addr = base + i * chunk_size;
linear_addr |= i;
- wrmsrl(msrs->copy_chunks, linear_addr);
- rdmsrl(msrs->copy_chunks_status, chunk_status.data);
+ wrmsrq(msrs->copy_chunks, linear_addr);
+ rdmsrq(msrs->copy_chunks_status, chunk_status.data);
ifsd->valid_chunks = chunk_status.valid_chunks;
err_code = chunk_status.error_code;
@@ -195,8 +196,8 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
msrs = ifs_get_test_msrs(dev);
if (need_copy_scan_hashes(ifsd)) {
- wrmsrl(msrs->copy_hashes, ifs_hash_ptr);
- rdmsrl(msrs->copy_hashes_status, hashes_status.data);
+ wrmsrq(msrs->copy_hashes, ifs_hash_ptr);
+ rdmsrq(msrs->copy_hashes_status, hashes_status.data);
/* enumerate the scan image information */
chunk_size = hashes_status.chunk_size * SZ_1K;
@@ -216,8 +217,8 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
}
if (ifsd->generation >= IFS_GEN_STRIDE_AWARE) {
- wrmsrl(msrs->test_ctrl, INVALIDATE_STRIDE);
- rdmsrl(msrs->copy_chunks_status, chunk_status.data);
+ wrmsrq(msrs->test_ctrl, INVALIDATE_STRIDE);
+ rdmsrq(msrs->copy_chunks_status, chunk_status.data);
if (chunk_status.valid_chunks != 0) {
dev_err(dev, "Couldn't invalidate installed stride - %d\n",
chunk_status.valid_chunks);
@@ -238,9 +239,9 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev)
chunk_table[1] = linear_addr;
do {
local_irq_disable();
- wrmsrl(msrs->copy_chunks, (u64)chunk_table);
+ wrmsrq(msrs->copy_chunks, (u64)chunk_table);
local_irq_enable();
- rdmsrl(msrs->copy_chunks_status, chunk_status.data);
+ rdmsrq(msrs->copy_chunks_status, chunk_status.data);
err_code = chunk_status.error_code;
} while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count);
diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c
index f978dd05d4d8..dfc119d7354d 100644
--- a/drivers/platform/x86/intel/ifs/runtest.c
+++ b/drivers/platform/x86/intel/ifs/runtest.c
@@ -7,6 +7,7 @@
#include <linux/nmi.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
+#include <asm/msr.h>
#include "ifs.h"
@@ -209,8 +210,8 @@ static int doscan(void *data)
* take up to 200 milliseconds (in the case where all chunks
* are processed in a single pass) before it retires.
*/
- wrmsrl(MSR_ACTIVATE_SCAN, params->activate->data);
- rdmsrl(MSR_SCAN_STATUS, status.data);
+ wrmsrq(MSR_ACTIVATE_SCAN, params->activate->data);
+ rdmsrq(MSR_SCAN_STATUS, status.data);
trace_ifs_status(ifsd->cur_batch, start, stop, status.data);
@@ -321,9 +322,9 @@ static int do_array_test(void *data)
first = cpumask_first(cpu_smt_mask(cpu));
if (cpu == first) {
- wrmsrl(MSR_ARRAY_BIST, command->data);
+ wrmsrq(MSR_ARRAY_BIST, command->data);
/* Pass back the result of the test */
- rdmsrl(MSR_ARRAY_BIST, command->data);
+ rdmsrq(MSR_ARRAY_BIST, command->data);
}
return 0;
@@ -374,8 +375,8 @@ static int do_array_test_gen1(void *status)
first = cpumask_first(cpu_smt_mask(cpu));
if (cpu == first) {
- wrmsrl(MSR_ARRAY_TRIGGER, ARRAY_GEN1_TEST_ALL_ARRAYS);
- rdmsrl(MSR_ARRAY_STATUS, *((u64 *)status));
+ wrmsrq(MSR_ARRAY_TRIGGER, ARRAY_GEN1_TEST_ALL_ARRAYS);
+ rdmsrq(MSR_ARRAY_STATUS, *((u64 *)status));
}
return 0;
@@ -526,8 +527,8 @@ static int dosbaf(void *data)
* starts scan of each requested bundle. The core test happens
* during the "execution" of the WRMSR.
*/
- wrmsrl(MSR_ACTIVATE_SBAF, run_params->activate->data);
- rdmsrl(MSR_SBAF_STATUS, status.data);
+ wrmsrq(MSR_ACTIVATE_SBAF, run_params->activate->data);
+ rdmsrq(MSR_SBAF_STATUS, status.data);
trace_ifs_sbaf(ifsd->cur_batch, *run_params->activate, status);
/* Pass back the result of the test */
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index 3b48cd7a4075..9bc24ed19c64 100644
--- a/drivers/platform/x86/intel/int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -23,7 +23,7 @@
* ACPI mechanisms, this is not a real GPIO at all.
*
* This driver will bind to the INT0002 device, and register as a GPIO
- * controller, letting gpiolib-acpi.c call the _L02 handler as it would
+ * controller, letting gpiolib-acpi call the _L02 handler as it would
* for a real GPIO controller.
*/
@@ -65,9 +65,10 @@ static int int0002_gpio_get(struct gpio_chip *chip, unsigned int offset)
return 0;
}
-static void int0002_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int int0002_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
+ return 0;
}
static int int0002_gpio_direction_output(struct gpio_chip *chip,
@@ -192,7 +193,7 @@ static int int0002_probe(struct platform_device *pdev)
chip->parent = dev;
chip->owner = THIS_MODULE;
chip->get = int0002_gpio_get;
- chip->set = int0002_gpio_set;
+ chip->set_rv = int0002_gpio_set;
chip->direction_input = int0002_gpio_get;
chip->direction_output = int0002_gpio_direction_output;
chip->base = -1;
diff --git a/drivers/platform/x86/intel/int3472/Makefile b/drivers/platform/x86/intel/int3472/Makefile
index a8aba07bf1dc..103661e6685d 100644
--- a/drivers/platform/x86/intel/int3472/Makefile
+++ b/drivers/platform/x86/intel/int3472/Makefile
@@ -1,7 +1,8 @@
obj-$(CONFIG_INTEL_SKL_INT3472) += intel_skl_int3472_discrete.o \
intel_skl_int3472_tps68470.o \
intel_skl_int3472_common.o
-intel_skl_int3472_discrete-y := discrete.o clk_and_regulator.o led.o
+intel_skl_int3472_discrete-y := discrete.o discrete_quirks.o \
+ clk_and_regulator.o led.o
intel_skl_int3472_tps68470-y := tps68470.o tps68470_board_data.o
intel_skl_int3472_common-y += common.o
diff --git a/drivers/platform/x86/intel/int3472/clk_and_regulator.c b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
index 16e36ac0a7b4..476ec24d3702 100644
--- a/drivers/platform/x86/intel/int3472/clk_and_regulator.c
+++ b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
@@ -5,13 +5,11 @@
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
-#include <linux/dmi.h>
#include <linux/gpio/consumer.h>
+#include <linux/platform_data/x86/int3472.h>
#include <linux/regulator/driver.h>
#include <linux/slab.h>
-#include "common.h"
-
/*
* 82c0d13a-78c5-4244-9bb1-eb8b539a8d11
* This _DSM GUID allows controlling the sensor clk when it is not controlled
@@ -118,7 +116,7 @@ static const struct clk_ops skl_int3472_clock_ops = {
.recalc_rate = skl_int3472_clk_recalc_rate,
};
-int skl_int3472_register_dsm_clock(struct int3472_discrete_device *int3472)
+static int skl_int3472_register_clock(struct int3472_discrete_device *int3472)
{
struct acpi_device *adev = int3472->adev;
struct clk_init_data init = {
@@ -127,12 +125,6 @@ int skl_int3472_register_dsm_clock(struct int3472_discrete_device *int3472)
};
int ret;
- if (int3472->clock.cl)
- return 0; /* A GPIO controlled clk has already been registered */
-
- if (!acpi_check_dsm(adev->handle, &img_clk_guid, 0, BIT(1)))
- return 0; /* DSM clock control is not available */
-
init.name = kasprintf(GFP_KERNEL, "%s-clk", acpi_dev_name(adev));
if (!init.name)
return -ENOMEM;
@@ -161,51 +153,26 @@ out_free_init_name:
return ret;
}
+int skl_int3472_register_dsm_clock(struct int3472_discrete_device *int3472)
+{
+ if (int3472->clock.cl)
+ return 0; /* A GPIO controlled clk has already been registered */
+
+ if (!acpi_check_dsm(int3472->adev->handle, &img_clk_guid, 0, BIT(1)))
+ return 0; /* DSM clock control is not available */
+
+ return skl_int3472_register_clock(int3472);
+}
+
int skl_int3472_register_gpio_clock(struct int3472_discrete_device *int3472,
struct gpio_desc *gpio)
{
- struct clk_init_data init = {
- .ops = &skl_int3472_clock_ops,
- .flags = CLK_GET_RATE_NOCACHE,
- };
- int ret;
-
if (int3472->clock.cl)
return -EBUSY;
int3472->clock.ena_gpio = gpio;
- init.name = kasprintf(GFP_KERNEL, "%s-clk",
- acpi_dev_name(int3472->adev));
- if (!init.name)
- return -ENOMEM;
-
- int3472->clock.frequency = skl_int3472_get_clk_frequency(int3472);
-
- int3472->clock.clk_hw.init = &init;
- int3472->clock.clk = clk_register(&int3472->adev->dev,
- &int3472->clock.clk_hw);
- if (IS_ERR(int3472->clock.clk)) {
- ret = PTR_ERR(int3472->clock.clk);
- goto out_free_init_name;
- }
-
- int3472->clock.cl = clkdev_create(int3472->clock.clk, NULL,
- int3472->sensor_name);
- if (!int3472->clock.cl) {
- ret = -ENOMEM;
- goto err_unregister_clk;
- }
-
- kfree(init.name);
- return 0;
-
-err_unregister_clk:
- clk_unregister(int3472->clock.clk);
-out_free_init_name:
- kfree(init.name);
-
- return ret;
+ return skl_int3472_register_clock(int3472);
}
void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472)
@@ -215,100 +182,78 @@ void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472)
clkdev_drop(int3472->clock.cl);
clk_unregister(int3472->clock.clk);
+ gpiod_put(int3472->clock.ena_gpio);
}
-/*
- * The INT3472 device is going to be the only supplier of a regulator for
- * the sensor device. But unlike the clk framework the regulator framework
- * does not allow matching by consumer-device-name only.
- *
- * Ideally all sensor drivers would use "avdd" as supply-id. But for drivers
- * where this cannot be changed because another supply-id is already used in
- * e.g. DeviceTree files an alias for the other supply-id can be added here.
- *
- * Do not forget to update GPIO_REGULATOR_SUPPLY_MAP_COUNT when changing this.
- */
-static const char * const skl_int3472_regulator_map_supplies[] = {
- "avdd",
- "AVDD",
-};
-
-static_assert(ARRAY_SIZE(skl_int3472_regulator_map_supplies) ==
- GPIO_REGULATOR_SUPPLY_MAP_COUNT);
-
-/*
- * On some models there is a single GPIO regulator which is shared between
- * sensors and only listed in the ACPI resources of one sensor.
- * This DMI table contains the name of the second sensor. This is used to add
- * entries for the second sensor to the supply_map.
- */
-static const struct dmi_system_id skl_int3472_regulator_second_sensor[] = {
- {
- /* Lenovo Miix 510-12IKB */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "MIIX 510-12IKB"),
- },
- .driver_data = "i2c-OVTI2680:00",
- },
- { }
-};
-
int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
- struct gpio_desc *gpio)
+ struct gpio_desc *gpio,
+ unsigned int enable_time,
+ const char *supply_name,
+ const char *second_sensor)
{
struct regulator_init_data init_data = { };
+ struct int3472_gpio_regulator *regulator;
struct regulator_config cfg = { };
- const char *second_sensor = NULL;
- const struct dmi_system_id *id;
int i, j;
- id = dmi_first_match(skl_int3472_regulator_second_sensor);
- if (id)
- second_sensor = id->driver_data;
+ if (int3472->n_regulator_gpios >= INT3472_MAX_REGULATORS) {
+ dev_err(int3472->dev, "Too many regulators mapped\n");
+ return -EINVAL;
+ }
+
+ if (strlen(supply_name) >= GPIO_SUPPLY_NAME_LENGTH) {
+ dev_err(int3472->dev, "supply-name '%s' length too long\n", supply_name);
+ return -E2BIG;
+ }
+
+ regulator = &int3472->regulators[int3472->n_regulator_gpios];
+ string_upper(regulator->supply_name_upper, supply_name);
+
+ /* The below code assume that map-count is 2 (upper- and lower-case) */
+ static_assert(GPIO_REGULATOR_SUPPLY_MAP_COUNT == 2);
- for (i = 0, j = 0; i < ARRAY_SIZE(skl_int3472_regulator_map_supplies); i++) {
- int3472->regulator.supply_map[j].supply = skl_int3472_regulator_map_supplies[i];
- int3472->regulator.supply_map[j].dev_name = int3472->sensor_name;
+ for (i = 0, j = 0; i < GPIO_REGULATOR_SUPPLY_MAP_COUNT; i++) {
+ const char *supply = i ? regulator->supply_name_upper : supply_name;
+
+ regulator->supply_map[j].supply = supply;
+ regulator->supply_map[j].dev_name = int3472->sensor_name;
j++;
if (second_sensor) {
- int3472->regulator.supply_map[j].supply =
- skl_int3472_regulator_map_supplies[i];
- int3472->regulator.supply_map[j].dev_name = second_sensor;
+ regulator->supply_map[j].supply = supply;
+ regulator->supply_map[j].dev_name = second_sensor;
j++;
}
}
init_data.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
- init_data.consumer_supplies = int3472->regulator.supply_map;
+ init_data.consumer_supplies = regulator->supply_map;
init_data.num_consumer_supplies = j;
- snprintf(int3472->regulator.regulator_name,
- sizeof(int3472->regulator.regulator_name), "%s-regulator",
- acpi_dev_name(int3472->adev));
- snprintf(int3472->regulator.supply_name,
- GPIO_REGULATOR_SUPPLY_NAME_LENGTH, "supply-0");
-
- int3472->regulator.rdesc = INT3472_REGULATOR(
- int3472->regulator.regulator_name,
- int3472->regulator.supply_name,
- &int3472_gpio_regulator_ops);
+ snprintf(regulator->regulator_name, sizeof(regulator->regulator_name), "%s-%s",
+ acpi_dev_name(int3472->adev), supply_name);
- int3472->regulator.gpio = gpio;
+ regulator->rdesc = INT3472_REGULATOR(regulator->regulator_name,
+ &int3472_gpio_regulator_ops,
+ enable_time, GPIO_REGULATOR_OFF_ON_DELAY);
cfg.dev = &int3472->adev->dev;
cfg.init_data = &init_data;
- cfg.ena_gpiod = int3472->regulator.gpio;
+ cfg.ena_gpiod = gpio;
- int3472->regulator.rdev = regulator_register(int3472->dev,
- &int3472->regulator.rdesc,
- &cfg);
+ regulator->rdev = regulator_register(int3472->dev, &regulator->rdesc, &cfg);
+ if (IS_ERR(regulator->rdev))
+ return PTR_ERR(regulator->rdev);
- return PTR_ERR_OR_ZERO(int3472->regulator.rdev);
+ int3472->regulators[int3472->n_regulator_gpios].ena_gpio = gpio;
+ int3472->n_regulator_gpios++;
+ return 0;
}
void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472)
{
- regulator_unregister(int3472->regulator.rdev);
+ for (int i = 0; i < int3472->n_regulator_gpios; i++) {
+ regulator_unregister(int3472->regulators[i].rdev);
+ gpiod_put(int3472->regulators[i].ena_gpio);
+ }
}
diff --git a/drivers/platform/x86/intel/int3472/common.c b/drivers/platform/x86/intel/int3472/common.c
index 1638be8fa71e..6dc38d5cbd0b 100644
--- a/drivers/platform/x86/intel/int3472/common.c
+++ b/drivers/platform/x86/intel/int3472/common.c
@@ -2,10 +2,9 @@
/* Author: Dan Scally <djrscally@gmail.com> */
#include <linux/acpi.h>
+#include <linux/platform_data/x86/int3472.h>
#include <linux/slab.h>
-#include "common.h"
-
union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev, char *id)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -29,7 +28,7 @@ union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev, char *i
return obj;
}
-EXPORT_SYMBOL_GPL(skl_int3472_get_acpi_buffer);
+EXPORT_SYMBOL_NS_GPL(skl_int3472_get_acpi_buffer, "INTEL_INT3472");
int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb)
{
@@ -53,7 +52,7 @@ out_free_obj:
kfree(obj);
return ret;
}
-EXPORT_SYMBOL_GPL(skl_int3472_fill_cldb);
+EXPORT_SYMBOL_NS_GPL(skl_int3472_fill_cldb, "INTEL_INT3472");
/* sensor_adev_ret may be NULL, name_ret must not be NULL */
int skl_int3472_get_sensor_adev_and_name(struct device *dev,
@@ -84,7 +83,7 @@ int skl_int3472_get_sensor_adev_and_name(struct device *dev,
return ret;
}
-EXPORT_SYMBOL_GPL(skl_int3472_get_sensor_adev_and_name);
+EXPORT_SYMBOL_NS_GPL(skl_int3472_get_sensor_adev_and_name, "INTEL_INT3472");
MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI Device Driver library");
MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>");
diff --git a/drivers/platform/x86/intel/int3472/common.h b/drivers/platform/x86/intel/int3472/common.h
deleted file mode 100644
index 145dec66df64..000000000000
--- a/drivers/platform/x86/intel/int3472/common.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Author: Dan Scally <djrscally@gmail.com> */
-
-#ifndef _INTEL_SKL_INT3472_H
-#define _INTEL_SKL_INT3472_H
-
-#include <linux/clk-provider.h>
-#include <linux/gpio/machine.h>
-#include <linux/leds.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/types.h>
-
-/* FIXME drop this once the I2C_DEV_NAME_FORMAT macro has been added to include/linux/i2c.h */
-#ifndef I2C_DEV_NAME_FORMAT
-#define I2C_DEV_NAME_FORMAT "i2c-%s"
-#endif
-
-/* PMIC GPIO Types */
-#define INT3472_GPIO_TYPE_RESET 0x00
-#define INT3472_GPIO_TYPE_POWERDOWN 0x01
-#define INT3472_GPIO_TYPE_POWER_ENABLE 0x0b
-#define INT3472_GPIO_TYPE_CLK_ENABLE 0x0c
-#define INT3472_GPIO_TYPE_PRIVACY_LED 0x0d
-
-#define INT3472_PDEV_MAX_NAME_LEN 23
-#define INT3472_MAX_SENSOR_GPIOS 3
-
-#define GPIO_REGULATOR_NAME_LENGTH 21
-#define GPIO_REGULATOR_SUPPLY_NAME_LENGTH 9
-#define GPIO_REGULATOR_SUPPLY_MAP_COUNT 2
-
-#define INT3472_LED_MAX_NAME_LEN 32
-
-#define CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET 86
-
-#define INT3472_REGULATOR(_name, _supply, _ops) \
- (const struct regulator_desc) { \
- .name = _name, \
- .supply_name = _supply, \
- .type = REGULATOR_VOLTAGE, \
- .ops = _ops, \
- .owner = THIS_MODULE, \
- }
-
-#define to_int3472_clk(hw) \
- container_of(hw, struct int3472_clock, clk_hw)
-
-#define to_int3472_device(clk) \
- container_of(clk, struct int3472_discrete_device, clock)
-
-struct acpi_device;
-struct i2c_client;
-struct platform_device;
-
-struct int3472_cldb {
- u8 version;
- /*
- * control logic type
- * 0: UNKNOWN
- * 1: DISCRETE(CRD-D)
- * 2: PMIC TPS68470
- * 3: PMIC uP6641
- */
- u8 control_logic_type;
- u8 control_logic_id;
- u8 sensor_card_sku;
- u8 reserved[10];
- u8 clock_source;
- u8 reserved2[17];
-};
-
-struct int3472_discrete_device {
- struct acpi_device *adev;
- struct device *dev;
- struct acpi_device *sensor;
- const char *sensor_name;
-
- const struct int3472_sensor_config *sensor_config;
-
- struct int3472_gpio_regulator {
- /* SUPPLY_MAP_COUNT * 2 to make room for second sensor mappings */
- struct regulator_consumer_supply supply_map[GPIO_REGULATOR_SUPPLY_MAP_COUNT * 2];
- char regulator_name[GPIO_REGULATOR_NAME_LENGTH];
- char supply_name[GPIO_REGULATOR_SUPPLY_NAME_LENGTH];
- struct gpio_desc *gpio;
- struct regulator_dev *rdev;
- struct regulator_desc rdesc;
- } regulator;
-
- struct int3472_clock {
- struct clk *clk;
- struct clk_hw clk_hw;
- struct clk_lookup *cl;
- struct gpio_desc *ena_gpio;
- u32 frequency;
- u8 imgclk_index;
- } clock;
-
- struct int3472_pled {
- struct led_classdev classdev;
- struct led_lookup_data lookup;
- char name[INT3472_LED_MAX_NAME_LEN];
- struct gpio_desc *gpio;
- } pled;
-
- unsigned int ngpios; /* how many GPIOs have we seen */
- unsigned int n_sensor_gpios; /* how many have we mapped to sensor */
- struct gpiod_lookup_table gpios;
-};
-
-union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev,
- char *id);
-int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb);
-int skl_int3472_get_sensor_adev_and_name(struct device *dev,
- struct acpi_device **sensor_adev_ret,
- const char **name_ret);
-
-int skl_int3472_register_gpio_clock(struct int3472_discrete_device *int3472,
- struct gpio_desc *gpio);
-int skl_int3472_register_dsm_clock(struct int3472_discrete_device *int3472);
-void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472);
-
-int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
- struct gpio_desc *gpio);
-void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472);
-
-int skl_int3472_register_pled(struct int3472_discrete_device *int3472, struct gpio_desc *gpio);
-void skl_int3472_unregister_pled(struct int3472_discrete_device *int3472);
-
-#endif
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index 30ff8f3ea1f5..4c0aed6e626f 100644
--- a/drivers/platform/x86/intel/int3472/discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -5,18 +5,18 @@
#include <linux/array_size.h>
#include <linux/bitfield.h>
#include <linux/device.h>
+#include <linux/dmi.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/machine.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/overflow.h>
+#include <linux/platform_data/x86/int3472.h>
#include <linux/platform_device.h>
#include <linux/string_choices.h>
#include <linux/uuid.h>
-#include "common.h"
-
/*
* 79234640-9e10-4fea-a5c1-b5aa8b19756f
* This _DSM GUID returns information about the GPIO lines mapped to a
@@ -117,7 +117,7 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
return ERR_PTR(ret);
gpiod_add_lookup_table(lookup);
- desc = devm_gpiod_get(int3472->dev, con_id, GPIOD_OUT_LOW);
+ desc = gpiod_get(int3472->dev, con_id, GPIOD_OUT_LOW);
gpiod_remove_lookup_table(lookup);
return desc;
@@ -142,12 +142,16 @@ struct int3472_gpio_map {
};
static const struct int3472_gpio_map int3472_gpio_map[] = {
+ /* mt9m114 designs declare a powerdown pin which controls the regulators */
+ { "INT33F0", INT3472_GPIO_TYPE_POWERDOWN, INT3472_GPIO_TYPE_POWER_ENABLE, false, "vdd" },
+ /* ov7251 driver / DT-bindings expect "enable" as con_id for reset */
{ "INT347E", INT3472_GPIO_TYPE_RESET, INT3472_GPIO_TYPE_RESET, false, "enable" },
};
-static void int3472_get_con_id_and_polarity(struct acpi_device *adev, u8 *type,
+static void int3472_get_con_id_and_polarity(struct int3472_discrete_device *int3472, u8 *type,
const char **con_id, unsigned long *gpio_flags)
{
+ struct acpi_device *adev = int3472->sensor;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(int3472_gpio_map); i++) {
@@ -162,6 +166,9 @@ static void int3472_get_con_id_and_polarity(struct acpi_device *adev, u8 *type,
if (!acpi_dev_hid_uid_match(adev, int3472_gpio_map[i].hid, NULL))
continue;
+ dev_dbg(int3472->dev, "mapping type 0x%02x pin to 0x%02x %s\n",
+ *type, int3472_gpio_map[i].type_to, int3472_gpio_map[i].con_id);
+
*type = int3472_gpio_map[i].type_to;
*gpio_flags = int3472_gpio_map[i].polarity_low ?
GPIO_ACTIVE_LOW : GPIO_ACTIVE_HIGH;
@@ -187,7 +194,11 @@ static void int3472_get_con_id_and_polarity(struct acpi_device *adev, u8 *type,
*gpio_flags = GPIO_ACTIVE_HIGH;
break;
case INT3472_GPIO_TYPE_POWER_ENABLE:
- *con_id = "power-enable";
+ *con_id = "avdd";
+ *gpio_flags = GPIO_ACTIVE_HIGH;
+ break;
+ case INT3472_GPIO_TYPE_HANDSHAKE:
+ *con_id = "dvdd";
*gpio_flags = GPIO_ACTIVE_HIGH;
break;
default:
@@ -262,7 +273,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
type = FIELD_GET(INT3472_GPIO_DSM_TYPE, obj->integer.value);
- int3472_get_con_id_and_polarity(int3472->sensor, &type, &con_id, &gpio_flags);
+ int3472_get_con_id_and_polarity(int3472, &type, &con_id, &gpio_flags);
pin = FIELD_GET(INT3472_GPIO_DSM_PIN, obj->integer.value);
/* Pin field is not really used under Windows and wraps around at 8 bits */
@@ -289,6 +300,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
case INT3472_GPIO_TYPE_CLK_ENABLE:
case INT3472_GPIO_TYPE_PRIVACY_LED:
case INT3472_GPIO_TYPE_POWER_ENABLE:
+ case INT3472_GPIO_TYPE_HANDSHAKE:
gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, con_id, gpio_flags);
if (IS_ERR(gpio)) {
ret = PTR_ERR(gpio);
@@ -310,15 +322,31 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
break;
case INT3472_GPIO_TYPE_POWER_ENABLE:
- ret = skl_int3472_register_regulator(int3472, gpio);
+ ret = skl_int3472_register_regulator(int3472, gpio,
+ GPIO_REGULATOR_ENABLE_TIME,
+ con_id,
+ int3472->quirks.avdd_second_sensor);
if (ret)
- err_msg = "Failed to map regulator to sensor\n";
+ err_msg = "Failed to map power-enable to sensor\n";
+
+ break;
+ case INT3472_GPIO_TYPE_HANDSHAKE:
+ /* Setups using a handshake pin need 25 ms enable delay */
+ ret = skl_int3472_register_regulator(int3472, gpio,
+ 25 * USEC_PER_MSEC,
+ con_id, NULL);
+ if (ret)
+ err_msg = "Failed to map handshake to sensor\n";
break;
default: /* Never reached */
ret = -EINVAL;
break;
}
+
+ if (ret)
+ gpiod_put(gpio);
+
break;
default:
dev_warn(int3472->dev,
@@ -338,7 +366,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
return 1;
}
-static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472)
+int int3472_discrete_parse_crs(struct int3472_discrete_device *int3472)
{
LIST_HEAD(resource_list);
int ret;
@@ -363,28 +391,39 @@ static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472)
return 0;
}
+EXPORT_SYMBOL_NS_GPL(int3472_discrete_parse_crs, "INTEL_INT3472_DISCRETE");
-static void skl_int3472_discrete_remove(struct platform_device *pdev)
+void int3472_discrete_cleanup(struct int3472_discrete_device *int3472)
{
- struct int3472_discrete_device *int3472 = platform_get_drvdata(pdev);
-
gpiod_remove_lookup_table(&int3472->gpios);
skl_int3472_unregister_clock(int3472);
skl_int3472_unregister_pled(int3472);
skl_int3472_unregister_regulator(int3472);
}
+EXPORT_SYMBOL_NS_GPL(int3472_discrete_cleanup, "INTEL_INT3472_DISCRETE");
+
+static void skl_int3472_discrete_remove(struct platform_device *pdev)
+{
+ int3472_discrete_cleanup(platform_get_drvdata(pdev));
+}
static int skl_int3472_discrete_probe(struct platform_device *pdev)
{
struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ const struct int3472_discrete_quirks *quirks = NULL;
struct int3472_discrete_device *int3472;
+ const struct dmi_system_id *id;
struct int3472_cldb cldb;
int ret;
if (!adev)
return -ENODEV;
+ id = dmi_first_match(skl_int3472_discrete_quirks);
+ if (id)
+ quirks = id->driver_data;
+
ret = skl_int3472_fill_cldb(adev, &cldb);
if (ret) {
dev_err(&pdev->dev, "Couldn't fill CLDB structure\n");
@@ -408,6 +447,9 @@ static int skl_int3472_discrete_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, int3472);
int3472->clock.imgclk_index = cldb.clock_source;
+ if (quirks)
+ int3472->quirks = *quirks;
+
ret = skl_int3472_get_sensor_adev_and_name(&pdev->dev, &int3472->sensor,
&int3472->sensor_name);
if (ret)
@@ -419,7 +461,7 @@ static int skl_int3472_discrete_probe(struct platform_device *pdev)
*/
INIT_LIST_HEAD(&int3472->gpios.list);
- ret = skl_int3472_parse_crs(int3472);
+ ret = int3472_discrete_parse_crs(int3472);
if (ret) {
skl_int3472_discrete_remove(pdev);
return ret;
@@ -448,3 +490,4 @@ module_platform_driver(int3472_discrete);
MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI Discrete Device Driver");
MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("INTEL_INT3472");
diff --git a/drivers/platform/x86/intel/int3472/discrete_quirks.c b/drivers/platform/x86/intel/int3472/discrete_quirks.c
new file mode 100644
index 000000000000..552869ef91ab
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/discrete_quirks.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Hans de Goede <hansg@kernel.org> */
+
+#include <linux/dmi.h>
+#include <linux/platform_data/x86/int3472.h>
+
+static const struct int3472_discrete_quirks lenovo_miix_510_quirks = {
+ .avdd_second_sensor = "i2c-OVTI2680:00",
+};
+
+const struct dmi_system_id skl_int3472_discrete_quirks[] = {
+ {
+ /* Lenovo Miix 510-12IKB */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "MIIX 510-12IKB"),
+ },
+ .driver_data = (void *)&lenovo_miix_510_quirks,
+ },
+ { }
+};
diff --git a/drivers/platform/x86/intel/int3472/led.c b/drivers/platform/x86/intel/int3472/led.c
index 9cbed694e2ca..f1d6d7b0cb75 100644
--- a/drivers/platform/x86/intel/int3472/led.c
+++ b/drivers/platform/x86/intel/int3472/led.c
@@ -4,7 +4,7 @@
#include <linux/acpi.h>
#include <linux/gpio/consumer.h>
#include <linux/leds.h>
-#include "common.h"
+#include <linux/platform_data/x86/int3472.h>
static int int3472_pled_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
@@ -56,4 +56,5 @@ void skl_int3472_unregister_pled(struct int3472_discrete_device *int3472)
led_remove_lookup(&int3472->pled.lookup);
led_classdev_unregister(&int3472->pled.classdev);
+ gpiod_put(int3472->pled.gpio);
}
diff --git a/drivers/platform/x86/intel/int3472/tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c
index 81ac4c691963..0133405697dc 100644
--- a/drivers/platform/x86/intel/int3472/tps68470.c
+++ b/drivers/platform/x86/intel/int3472/tps68470.c
@@ -8,10 +8,10 @@
#include <linux/mfd/tps68470.h>
#include <linux/platform_device.h>
#include <linux/platform_data/tps68470.h>
+#include <linux/platform_data/x86/int3472.h>
#include <linux/regmap.h>
#include <linux/string.h>
-#include "common.h"
#include "tps68470.h"
#define DESIGNED_FOR_CHROMEOS 1
@@ -261,4 +261,5 @@ module_i2c_driver(int3472_tps68470);
MODULE_DESCRIPTION("Intel SkyLake INT3472 ACPI TPS68470 Device Driver");
MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("INTEL_INT3472");
MODULE_SOFTDEP("pre: clk-tps68470 tps68470-regulator");
diff --git a/drivers/platform/x86/intel/pmc/Kconfig b/drivers/platform/x86/intel/pmc/Kconfig
index d2f651fbec2c..c6ef0bcf76af 100644
--- a/drivers/platform/x86/intel/pmc/Kconfig
+++ b/drivers/platform/x86/intel/pmc/Kconfig
@@ -8,6 +8,7 @@ config INTEL_PMC_CORE
depends on PCI
depends on ACPI
depends on INTEL_PMT_TELEMETRY
+ select INTEL_PMC_SSRAM_TELEMETRY
help
The Intel Platform Controller Hub for Intel Core SoCs provides access
to Power Management Controller registers via various interfaces. This
@@ -24,3 +25,6 @@ config INTEL_PMC_CORE
- SLPS0 Debug registers (Cannonlake/Icelake PCH)
- Low Power Mode registers (Tigerlake and beyond)
- PMC quirks as needed to enable SLPS0/S0ix
+
+config INTEL_PMC_SSRAM_TELEMETRY
+ tristate
diff --git a/drivers/platform/x86/intel/pmc/Makefile b/drivers/platform/x86/intel/pmc/Makefile
index b148b40d09f5..5f68c8503a56 100644
--- a/drivers/platform/x86/intel/pmc/Makefile
+++ b/drivers/platform/x86/intel/pmc/Makefile
@@ -3,8 +3,12 @@
# Intel x86 Platform-Specific Drivers
#
-intel_pmc_core-y := core.o core_ssram.o spt.o cnp.o \
- icl.o tgl.o adl.o mtl.o arl.o lnl.o ptl.o
+intel_pmc_core-y := core.o spt.o cnp.o icl.o \
+ tgl.o adl.o mtl.o arl.o lnl.o ptl.o
obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o
intel_pmc_core_pltdrv-y := pltdrv.o
obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core_pltdrv.o
+
+# Intel PMC SSRAM driver
+intel_pmc_ssram_telemetry-y += ssram_telemetry.o
+obj-$(CONFIG_INTEL_PMC_SSRAM_TELEMETRY) += intel_pmc_ssram_telemetry.o
diff --git a/drivers/platform/x86/intel/pmc/arl.c b/drivers/platform/x86/intel/pmc/arl.c
index 320993bd6d31..9d66d65e7577 100644
--- a/drivers/platform/x86/intel/pmc/arl.c
+++ b/drivers/platform/x86/intel/pmc/arl.c
@@ -10,7 +10,6 @@
#include <linux/pci.h>
#include "core.h"
-#include "../pmt/telemetry.h"
/* PMC SSRAM PMT Telemetry GUID */
#define IOEP_LPM_REQ_GUID 0x5077612
@@ -651,29 +650,25 @@ static const struct pmc_reg_map arl_pchs_reg_map = {
.etr3_offset = ETR3_OFFSET,
};
-#define PMC_DEVID_SOCM 0x777f
-#define PMC_DEVID_SOCS 0xae7f
-#define PMC_DEVID_IOEP 0x7ecf
-#define PMC_DEVID_PCHS 0x7f27
static struct pmc_info arl_pmc_info_list[] = {
{
.guid = IOEP_LPM_REQ_GUID,
- .devid = PMC_DEVID_IOEP,
+ .devid = PMC_DEVID_ARL_IOEP,
.map = &mtl_ioep_reg_map,
},
{
.guid = SOCS_LPM_REQ_GUID,
- .devid = PMC_DEVID_SOCS,
+ .devid = PMC_DEVID_ARL_SOCS,
.map = &arl_socs_reg_map,
},
{
.guid = PCHS_LPM_REQ_GUID,
- .devid = PMC_DEVID_PCHS,
+ .devid = PMC_DEVID_ARL_PCHS,
.map = &arl_pchs_reg_map,
},
{
.guid = SOCM_LPM_REQ_GUID,
- .devid = PMC_DEVID_SOCM,
+ .devid = PMC_DEVID_ARL_SOCM,
.map = &mtl_socm_reg_map,
},
{}
@@ -681,6 +676,7 @@ static struct pmc_info arl_pmc_info_list[] = {
#define ARL_NPU_PCI_DEV 0xad1d
#define ARL_GNA_PCI_DEV 0xae4c
+#define ARL_H_NPU_PCI_DEV 0x7d1d
#define ARL_H_GNA_PCI_DEV 0x774c
/*
* Set power state of select devices that do not have drivers to D3
@@ -694,7 +690,7 @@ static void arl_d3_fixup(void)
static void arl_h_d3_fixup(void)
{
- pmc_core_set_device_d3(ARL_NPU_PCI_DEV);
+ pmc_core_set_device_d3(ARL_H_NPU_PCI_DEV);
pmc_core_set_device_d3(ARL_H_GNA_PCI_DEV);
}
diff --git a/drivers/platform/x86/intel/pmc/cnp.c b/drivers/platform/x86/intel/pmc/cnp.c
index 2c5af158bbe2..efea4e1ba52b 100644
--- a/drivers/platform/x86/intel/pmc/cnp.c
+++ b/drivers/platform/x86/intel/pmc/cnp.c
@@ -10,6 +10,7 @@
#include <linux/smp.h>
#include <linux/suspend.h>
+#include <asm/msr.h>
#include "core.h"
/* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */
@@ -227,10 +228,10 @@ static void disable_c1_auto_demote(void *unused)
int cpunum = smp_processor_id();
u64 val;
- rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, val);
+ rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, val);
per_cpu(pkg_cst_config, cpunum) = val;
val &= ~NHM_C1_AUTO_DEMOTE;
- wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, val);
+ wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, val);
pr_debug("%s: cpu:%d cst %llx\n", __func__, cpunum, val);
}
@@ -239,7 +240,7 @@ static void restore_c1_auto_demote(void *unused)
{
int cpunum = smp_processor_id();
- wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, per_cpu(pkg_cst_config, cpunum));
+ wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, per_cpu(pkg_cst_config, cpunum));
pr_debug("%s: cpu:%d cst %llx\n", __func__, cpunum,
per_cpu(pkg_cst_config, cpunum));
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 7a1d11f2914f..540cd2fb0673 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -22,13 +22,14 @@
#include <linux/suspend.h>
#include <linux/units.h>
-#include <asm/cpuid.h>
+#include <asm/cpuid/api.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/msr.h>
#include <asm/tsc.h>
#include "core.h"
+#include "ssram_telemetry.h"
#include "../pmt/telemetry.h"
/* Maximum number of modes supported by platfoms that has low power mode capability */
@@ -1082,7 +1083,7 @@ static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
unsigned int index;
for (index = 0; map[index].name ; index++) {
- if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
+ if (rdmsrq_safe(map[index].bit_mask, &pcstate_count))
continue;
pcstate_count *= 1000;
@@ -1345,6 +1346,198 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
}
}
+static u32 pmc_core_find_guid(struct pmc_info *list, const struct pmc_reg_map *map)
+{
+ for (; list->map; ++list)
+ if (list->map == map)
+ return list->guid;
+
+ return 0;
+}
+
+/*
+ * This function retrieves low power mode requirement data from PMC Low
+ * Power Mode (LPM) table.
+ *
+ * In telemetry space, the LPM table contains a 4 byte header followed
+ * by 8 consecutive mode blocks (one for each LPM mode). Each block
+ * has a 4 byte header followed by a set of registers that describe the
+ * IP state requirements for the given mode. The IP mapping is platform
+ * specific but the same for each block, making for easy analysis.
+ * Platforms only use a subset of the space to track the requirements
+ * for their IPs. Callers provide the requirement registers they use as
+ * a list of indices. Each requirement register is associated with an
+ * IP map that's maintained by the caller.
+ *
+ * Header
+ * +----+----------------------------+----------------------------+
+ * | 0 | REVISION | ENABLED MODES |
+ * +----+--------------+-------------+-------------+--------------+
+ *
+ * Low Power Mode 0 Block
+ * +----+--------------+-------------+-------------+--------------+
+ * | 1 | SUB ID | SIZE | MAJOR | MINOR |
+ * +----+--------------+-------------+-------------+--------------+
+ * | 2 | LPM0 Requirements 0 |
+ * +----+---------------------------------------------------------+
+ * | | ... |
+ * +----+---------------------------------------------------------+
+ * | 29 | LPM0 Requirements 27 |
+ * +----+---------------------------------------------------------+
+ *
+ * ...
+ *
+ * Low Power Mode 7 Block
+ * +----+--------------+-------------+-------------+--------------+
+ * | | SUB ID | SIZE | MAJOR | MINOR |
+ * +----+--------------+-------------+-------------+--------------+
+ * | 60 | LPM7 Requirements 0 |
+ * +----+---------------------------------------------------------+
+ * | | ... |
+ * +----+---------------------------------------------------------+
+ * | 87 | LPM7 Requirements 27 |
+ * +----+---------------------------------------------------------+
+ *
+ */
+static int pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc, struct pci_dev *pcidev)
+{
+ struct telem_endpoint *ep;
+ const u8 *lpm_indices;
+ int num_maps, mode_offset = 0;
+ int ret, mode;
+ int lpm_size;
+ u32 guid;
+
+ lpm_indices = pmc->map->lpm_reg_index;
+ num_maps = pmc->map->lpm_num_maps;
+ lpm_size = LPM_MAX_NUM_MODES * num_maps;
+
+ guid = pmc_core_find_guid(pmcdev->regmap_list, pmc->map);
+ if (!guid)
+ return -ENXIO;
+
+ ep = pmt_telem_find_and_register_endpoint(pcidev, guid, 0);
+ if (IS_ERR(ep)) {
+ dev_dbg(&pmcdev->pdev->dev, "couldn't get telem endpoint %pe", ep);
+ return -EPROBE_DEFER;
+ }
+
+ pmc->lpm_req_regs = devm_kzalloc(&pmcdev->pdev->dev,
+ lpm_size * sizeof(u32),
+ GFP_KERNEL);
+ if (!pmc->lpm_req_regs) {
+ ret = -ENOMEM;
+ goto unregister_ep;
+ }
+
+ mode_offset = LPM_HEADER_OFFSET + LPM_MODE_OFFSET;
+ pmc_for_each_mode(mode, pmcdev) {
+ u32 *req_offset = pmc->lpm_req_regs + (mode * num_maps);
+ int m;
+
+ for (m = 0; m < num_maps; m++) {
+ u8 sample_id = lpm_indices[m] + mode_offset;
+
+ ret = pmt_telem_read32(ep, sample_id, req_offset, 1);
+ if (ret) {
+ dev_err(&pmcdev->pdev->dev,
+ "couldn't read Low Power Mode requirements: %d\n", ret);
+ goto unregister_ep;
+ }
+ ++req_offset;
+ }
+ mode_offset += LPM_REG_COUNT + LPM_MODE_OFFSET;
+ }
+
+unregister_ep:
+ pmt_telem_unregister_endpoint(ep);
+
+ return ret;
+}
+
+static int pmc_core_ssram_get_lpm_reqs(struct pmc_dev *pmcdev, int func)
+{
+ struct pci_dev *pcidev __free(pci_dev_put) = NULL;
+ unsigned int i;
+ int ret;
+
+ pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(20, func));
+ if (!pcidev)
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
+ if (!pmcdev->pmcs[i])
+ continue;
+
+ ret = pmc_core_get_lpm_req(pmcdev, pmcdev->pmcs[i], pcidev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pmc_reg_map *pmc_core_find_regmap(struct pmc_info *list, u16 devid)
+{
+ for (; list->map; ++list)
+ if (devid == list->devid)
+ return list->map;
+
+ return NULL;
+}
+
+static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_index)
+
+{
+ struct pmc_ssram_telemetry pmc_ssram_telemetry;
+ const struct pmc_reg_map *map;
+ struct pmc *pmc;
+ int ret;
+
+ ret = pmc_ssram_telemetry_get_pmc_info(pmc_index, &pmc_ssram_telemetry);
+ if (ret)
+ return ret;
+
+ map = pmc_core_find_regmap(pmcdev->regmap_list, pmc_ssram_telemetry.devid);
+ if (!map)
+ return -ENODEV;
+
+ pmc = pmcdev->pmcs[pmc_index];
+ /* Memory for primary PMC has been allocated */
+ if (!pmc) {
+ pmc = devm_kzalloc(&pmcdev->pdev->dev, sizeof(*pmc), GFP_KERNEL);
+ if (!pmc)
+ return -ENOMEM;
+ }
+
+ pmc->map = map;
+ pmc->base_addr = pmc_ssram_telemetry.base_addr;
+ pmc->regbase = ioremap(pmc->base_addr, pmc->map->regmap_length);
+
+ if (!pmc->regbase) {
+ devm_kfree(&pmcdev->pdev->dev, pmc);
+ return -ENOMEM;
+ }
+
+ pmcdev->pmcs[pmc_index] = pmc;
+
+ return 0;
+}
+
+static int pmc_core_ssram_get_reg_base(struct pmc_dev *pmcdev)
+{
+ int ret;
+
+ ret = pmc_core_pmc_add(pmcdev, PMC_IDX_MAIN);
+ if (ret)
+ return ret;
+
+ pmc_core_pmc_add(pmcdev, PMC_IDX_IOE);
+ pmc_core_pmc_add(pmcdev, PMC_IDX_PCH);
+
+ return 0;
+}
+
/*
* When supported, ssram init is used to achieve all available PMCs.
* If ssram init fails, this function uses legacy method to at least get the
@@ -1362,10 +1555,18 @@ int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
ssram = pmc_dev_info->regmap_list != NULL;
if (ssram) {
pmcdev->regmap_list = pmc_dev_info->regmap_list;
- ret = pmc_core_ssram_init(pmcdev, pmc_dev_info->pci_func);
+ ret = pmc_core_ssram_get_reg_base(pmcdev);
+ /*
+ * EAGAIN error code indicates Intel PMC SSRAM Telemetry driver
+ * has not finished probe and PMC info is not available yet. Try
+ * again later.
+ */
+ if (ret == -EAGAIN)
+ return -EPROBE_DEFER;
+
if (ret) {
dev_warn(&pmcdev->pdev->dev,
- "ssram init failed, %d, using legacy init\n", ret);
+ "Failed to get PMC info from SSRAM, %d, using legacy init\n", ret);
ssram = false;
}
}
@@ -1381,10 +1582,26 @@ int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
if (pmc_dev_info->dmu_guid)
pmc_core_punit_pmt_init(pmcdev, pmc_dev_info->dmu_guid);
- if (ssram)
- return pmc_core_ssram_get_lpm_reqs(pmcdev);
+ if (ssram) {
+ ret = pmc_core_ssram_get_lpm_reqs(pmcdev, pmc_dev_info->pci_func);
+ if (ret)
+ goto unmap_regbase;
+ }
return 0;
+
+unmap_regbase:
+ for (unsigned int i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
+ struct pmc *pmc = pmcdev->pmcs[i];
+
+ if (pmc && pmc->regbase)
+ iounmap(pmc->regbase);
+ }
+
+ if (pmcdev->punit_ep)
+ pmt_telem_unregister_endpoint(pmcdev->punit_ep);
+
+ return ret;
}
static const struct x86_cpu_id intel_pmc_core_ids[] = {
@@ -1471,20 +1688,14 @@ static void pmc_core_clean_structure(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
struct pmc *pmc = pmcdev->pmcs[i];
- if (pmc)
+ if (pmc && pmc->regbase)
iounmap(pmc->regbase);
}
- if (pmcdev->ssram_pcidev) {
- pci_dev_put(pmcdev->ssram_pcidev);
- pci_disable_device(pmcdev->ssram_pcidev);
- }
-
if (pmcdev->punit_ep)
pmt_telem_unregister_endpoint(pmcdev->punit_ep);
platform_set_drvdata(pdev, NULL);
- mutex_destroy(&pmcdev->lock);
}
static int pmc_core_probe(struct platform_device *pdev)
@@ -1529,7 +1740,9 @@ static int pmc_core_probe(struct platform_device *pdev)
if (!pmcdev->pkgc_res_cnt)
return -ENOMEM;
- mutex_init(&pmcdev->lock);
+ ret = devm_mutex_init(&pdev->dev, &pmcdev->lock);
+ if (ret)
+ return ret;
if (pmc_dev_info->init)
ret = pmc_dev_info->init(pmcdev, pmc_dev_info);
@@ -1537,7 +1750,7 @@ static int pmc_core_probe(struct platform_device *pdev)
ret = generic_core_init(pmcdev, pmc_dev_info);
if (ret) {
- pmc_core_clean_structure(pdev);
+ platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -1587,7 +1800,7 @@ static __maybe_unused int pmc_core_suspend(struct device *dev)
/* Save PKGC residency for checking later */
for (i = 0; i < pmcdev->num_of_pkgc; i++) {
- if (rdmsrl_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i]))
+ if (rdmsrq_safe(msr_map[i].bit_mask, &pmcdev->pkgc_res_cnt[i]))
return -EIO;
}
@@ -1603,7 +1816,7 @@ static inline bool pmc_core_is_deepest_pkgc_failed(struct pmc_dev *pmcdev)
u32 deepest_pkgc_msr = msr_map[pmcdev->num_of_pkgc - 1].bit_mask;
u64 deepest_pkgc_residency;
- if (rdmsrl_safe(deepest_pkgc_msr, &deepest_pkgc_residency))
+ if (rdmsrq_safe(deepest_pkgc_msr, &deepest_pkgc_residency))
return false;
if (deepest_pkgc_residency == pmcdev->pkgc_res_cnt[pmcdev->num_of_pkgc - 1])
@@ -1655,7 +1868,7 @@ int pmc_core_resume_common(struct pmc_dev *pmcdev)
for (i = 0; i < pmcdev->num_of_pkgc; i++) {
u64 pc_cnt;
- if (!rdmsrl_safe(msr_map[i].bit_mask, &pc_cnt)) {
+ if (!rdmsrq_safe(msr_map[i].bit_mask, &pc_cnt)) {
dev_info(dev, "Prev %s cnt = 0x%llx, Current %s cnt = 0x%llx\n",
msr_map[i].name, pmcdev->pkgc_res_cnt[i],
msr_map[i].name, pc_cnt);
@@ -1719,5 +1932,6 @@ static struct platform_driver pmc_core_driver = {
module_platform_driver(pmc_core_driver);
+MODULE_IMPORT_NS("INTEL_PMT_TELEMETRY");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel PMC Core Driver");
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
index 945a1c440cca..4a94a4ee031e 100644
--- a/drivers/platform/x86/intel/pmc/core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -24,6 +24,11 @@ struct telem_endpoint;
#define MAX_NUM_PMC 3
#define S0IX_BLK_SIZE 4
+/* PCH query */
+#define LPM_HEADER_OFFSET 1
+#define LPM_REG_COUNT 28
+#define LPM_MODE_OFFSET 1
+
/* Sunrise Point Power Management Controller PCI Device ID */
#define SPT_PMC_PCI_DEVICE_ID 0x9d21
#define SPT_PMC_BASE_ADDR_OFFSET 0x48
@@ -293,6 +298,25 @@ enum ppfear_regs {
#define PTL_PMC_LTR_CUR_PLT 0x1C2C
#define PTL_PCD_PMC_MMIO_REG_LEN 0x31A8
+/* SSRAM PMC Device ID */
+/* LNL */
+#define PMC_DEVID_LNL_SOCM 0xa87f
+
+/* PTL */
+#define PMC_DEVID_PTL_PCDH 0xe37f
+#define PMC_DEVID_PTL_PCDP 0xe47f
+
+/* ARL */
+#define PMC_DEVID_ARL_SOCM 0x777f
+#define PMC_DEVID_ARL_SOCS 0xae7f
+#define PMC_DEVID_ARL_IOEP 0x7ecf
+#define PMC_DEVID_ARL_PCHS 0x7f27
+
+/* MTL */
+#define PMC_DEVID_MTL_SOCM 0x7e7f
+#define PMC_DEVID_MTL_IOEP 0x7ecf
+#define PMC_DEVID_MTL_IOEM 0x7ebf
+
extern const char *pmc_lpm_modes[];
struct pmc_bit_map {
@@ -396,7 +420,6 @@ struct pmc {
* struct pmc_dev - pmc device structure
* @devs: pointer to an array of pmc pointers
* @pdev: pointer to platform_device struct
- * @ssram_pcidev: pointer to pci device struct for the PMC SSRAM
* @crystal_freq: crystal frequency from cpuid
* @dbgfs_dir: path to debugfs interface
* @pmc_xram_read_bit: flag to indicate whether PMC XRAM shadow registers
@@ -416,7 +439,6 @@ struct pmc_dev {
struct pmc *pmcs[MAX_NUM_PMC];
struct dentry *dbgfs_dir;
struct platform_device *pdev;
- struct pci_dev *ssram_pcidev;
unsigned int crystal_freq;
int pmc_xram_read_bit;
struct mutex lock; /* generic mutex lock for PMC Core */
@@ -485,7 +507,6 @@ extern const struct pmc_reg_map mtl_socm_reg_map;
extern const struct pmc_reg_map mtl_ioep_reg_map;
void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev);
-int pmc_core_ssram_get_lpm_reqs(struct pmc_dev *pmcdev);
int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore);
int pmc_core_resume_common(struct pmc_dev *pmcdev);
@@ -494,8 +515,6 @@ void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev);
void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid);
void pmc_core_set_device_d3(unsigned int device);
-int pmc_core_ssram_init(struct pmc_dev *pmcdev, int func);
-
int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info);
extern struct pmc_dev_info spt_pmc_dev;
diff --git a/drivers/platform/x86/intel/pmc/core_ssram.c b/drivers/platform/x86/intel/pmc/core_ssram.c
deleted file mode 100644
index 739569803017..000000000000
--- a/drivers/platform/x86/intel/pmc/core_ssram.c
+++ /dev/null
@@ -1,332 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This file contains functions to handle discovery of PMC metrics located
- * in the PMC SSRAM PCI device.
- *
- * Copyright (c) 2023, Intel Corporation.
- * All Rights Reserved.
- *
- */
-
-#include <linux/cleanup.h>
-#include <linux/intel_vsec.h>
-#include <linux/pci.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-
-#include "core.h"
-#include "../pmt/telemetry.h"
-
-#define SSRAM_HDR_SIZE 0x100
-#define SSRAM_PWRM_OFFSET 0x14
-#define SSRAM_DVSEC_OFFSET 0x1C
-#define SSRAM_DVSEC_SIZE 0x10
-#define SSRAM_PCH_OFFSET 0x60
-#define SSRAM_IOE_OFFSET 0x68
-#define SSRAM_DEVID_OFFSET 0x70
-
-/* PCH query */
-#define LPM_HEADER_OFFSET 1
-#define LPM_REG_COUNT 28
-#define LPM_MODE_OFFSET 1
-
-DEFINE_FREE(pmc_core_iounmap, void __iomem *, if (_T) iounmap(_T))
-
-static u32 pmc_core_find_guid(struct pmc_info *list, const struct pmc_reg_map *map)
-{
- for (; list->map; ++list)
- if (list->map == map)
- return list->guid;
-
- return 0;
-}
-
-static int pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc)
-{
- struct telem_endpoint *ep;
- const u8 *lpm_indices;
- int num_maps, mode_offset = 0;
- int ret, mode;
- int lpm_size;
- u32 guid;
-
- lpm_indices = pmc->map->lpm_reg_index;
- num_maps = pmc->map->lpm_num_maps;
- lpm_size = LPM_MAX_NUM_MODES * num_maps;
-
- guid = pmc_core_find_guid(pmcdev->regmap_list, pmc->map);
- if (!guid)
- return -ENXIO;
-
- ep = pmt_telem_find_and_register_endpoint(pmcdev->ssram_pcidev, guid, 0);
- if (IS_ERR(ep)) {
- dev_dbg(&pmcdev->pdev->dev, "couldn't get telem endpoint %ld",
- PTR_ERR(ep));
- return -EPROBE_DEFER;
- }
-
- pmc->lpm_req_regs = devm_kzalloc(&pmcdev->pdev->dev,
- lpm_size * sizeof(u32),
- GFP_KERNEL);
- if (!pmc->lpm_req_regs) {
- ret = -ENOMEM;
- goto unregister_ep;
- }
-
- /*
- * PMC Low Power Mode (LPM) table
- *
- * In telemetry space, the LPM table contains a 4 byte header followed
- * by 8 consecutive mode blocks (one for each LPM mode). Each block
- * has a 4 byte header followed by a set of registers that describe the
- * IP state requirements for the given mode. The IP mapping is platform
- * specific but the same for each block, making for easy analysis.
- * Platforms only use a subset of the space to track the requirements
- * for their IPs. Callers provide the requirement registers they use as
- * a list of indices. Each requirement register is associated with an
- * IP map that's maintained by the caller.
- *
- * Header
- * +----+----------------------------+----------------------------+
- * | 0 | REVISION | ENABLED MODES |
- * +----+--------------+-------------+-------------+--------------+
- *
- * Low Power Mode 0 Block
- * +----+--------------+-------------+-------------+--------------+
- * | 1 | SUB ID | SIZE | MAJOR | MINOR |
- * +----+--------------+-------------+-------------+--------------+
- * | 2 | LPM0 Requirements 0 |
- * +----+---------------------------------------------------------+
- * | | ... |
- * +----+---------------------------------------------------------+
- * | 29 | LPM0 Requirements 27 |
- * +----+---------------------------------------------------------+
- *
- * ...
- *
- * Low Power Mode 7 Block
- * +----+--------------+-------------+-------------+--------------+
- * | | SUB ID | SIZE | MAJOR | MINOR |
- * +----+--------------+-------------+-------------+--------------+
- * | 60 | LPM7 Requirements 0 |
- * +----+---------------------------------------------------------+
- * | | ... |
- * +----+---------------------------------------------------------+
- * | 87 | LPM7 Requirements 27 |
- * +----+---------------------------------------------------------+
- *
- */
- mode_offset = LPM_HEADER_OFFSET + LPM_MODE_OFFSET;
- pmc_for_each_mode(mode, pmcdev) {
- u32 *req_offset = pmc->lpm_req_regs + (mode * num_maps);
- int m;
-
- for (m = 0; m < num_maps; m++) {
- u8 sample_id = lpm_indices[m] + mode_offset;
-
- ret = pmt_telem_read32(ep, sample_id, req_offset, 1);
- if (ret) {
- dev_err(&pmcdev->pdev->dev,
- "couldn't read Low Power Mode requirements: %d\n", ret);
- devm_kfree(&pmcdev->pdev->dev, pmc->lpm_req_regs);
- goto unregister_ep;
- }
- ++req_offset;
- }
- mode_offset += LPM_REG_COUNT + LPM_MODE_OFFSET;
- }
-
-unregister_ep:
- pmt_telem_unregister_endpoint(ep);
-
- return ret;
-}
-
-int pmc_core_ssram_get_lpm_reqs(struct pmc_dev *pmcdev)
-{
- int ret, i;
-
- if (!pmcdev->ssram_pcidev)
- return -ENODEV;
-
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- if (!pmcdev->pmcs[i])
- continue;
-
- ret = pmc_core_get_lpm_req(pmcdev, pmcdev->pmcs[i]);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static void
-pmc_add_pmt(struct pmc_dev *pmcdev, u64 ssram_base, void __iomem *ssram)
-{
- struct pci_dev *pcidev = pmcdev->ssram_pcidev;
- struct intel_vsec_platform_info info = {};
- struct intel_vsec_header *headers[2] = {};
- struct intel_vsec_header header;
- void __iomem *dvsec;
- u32 dvsec_offset;
- u32 table, hdr;
-
- ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
- if (!ssram)
- return;
-
- dvsec_offset = readl(ssram + SSRAM_DVSEC_OFFSET);
- iounmap(ssram);
-
- dvsec = ioremap(ssram_base + dvsec_offset, SSRAM_DVSEC_SIZE);
- if (!dvsec)
- return;
-
- hdr = readl(dvsec + PCI_DVSEC_HEADER1);
- header.id = readw(dvsec + PCI_DVSEC_HEADER2);
- header.rev = PCI_DVSEC_HEADER1_REV(hdr);
- header.length = PCI_DVSEC_HEADER1_LEN(hdr);
- header.num_entries = readb(dvsec + INTEL_DVSEC_ENTRIES);
- header.entry_size = readb(dvsec + INTEL_DVSEC_SIZE);
-
- table = readl(dvsec + INTEL_DVSEC_TABLE);
- header.tbir = INTEL_DVSEC_TABLE_BAR(table);
- header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
- iounmap(dvsec);
-
- headers[0] = &header;
- info.caps = VSEC_CAP_TELEMETRY;
- info.headers = headers;
- info.base_addr = ssram_base;
- info.parent = &pmcdev->pdev->dev;
-
- intel_vsec_register(pcidev, &info);
-}
-
-static const struct pmc_reg_map *pmc_core_find_regmap(struct pmc_info *list, u16 devid)
-{
- for (; list->map; ++list)
- if (devid == list->devid)
- return list->map;
-
- return NULL;
-}
-
-static inline u64 get_base(void __iomem *addr, u32 offset)
-{
- return lo_hi_readq(addr + offset) & GENMASK_ULL(63, 3);
-}
-
-static int
-pmc_core_pmc_add(struct pmc_dev *pmcdev, u64 pwrm_base,
- const struct pmc_reg_map *reg_map, int pmc_index)
-{
- struct pmc *pmc = pmcdev->pmcs[pmc_index];
-
- if (!pwrm_base)
- return -ENODEV;
-
- /* Memory for primary PMC has been allocated in core.c */
- if (!pmc) {
- pmc = devm_kzalloc(&pmcdev->pdev->dev, sizeof(*pmc), GFP_KERNEL);
- if (!pmc)
- return -ENOMEM;
- }
-
- pmc->map = reg_map;
- pmc->base_addr = pwrm_base;
- pmc->regbase = ioremap(pmc->base_addr, pmc->map->regmap_length);
-
- if (!pmc->regbase) {
- devm_kfree(&pmcdev->pdev->dev, pmc);
- return -ENOMEM;
- }
-
- pmcdev->pmcs[pmc_index] = pmc;
-
- return 0;
-}
-
-static int
-pmc_core_ssram_get_pmc(struct pmc_dev *pmcdev, int pmc_idx, u32 offset)
-{
- struct pci_dev *ssram_pcidev = pmcdev->ssram_pcidev;
- void __iomem __free(pmc_core_iounmap) *tmp_ssram = NULL;
- void __iomem __free(pmc_core_iounmap) *ssram = NULL;
- const struct pmc_reg_map *map;
- u64 ssram_base, pwrm_base;
- u16 devid;
-
- if (!pmcdev->regmap_list)
- return -ENOENT;
-
- ssram_base = ssram_pcidev->resource[0].start;
- tmp_ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
- if (!tmp_ssram)
- return -ENOMEM;
-
- if (pmc_idx != PMC_IDX_MAIN) {
- /*
- * The secondary PMC BARS (which are behind hidden PCI devices)
- * are read from fixed offsets in MMIO of the primary PMC BAR.
- * If a device is not present, the value will be 0.
- */
- ssram_base = get_base(tmp_ssram, offset);
- if (!ssram_base)
- return 0;
-
- ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
- if (!ssram)
- return -ENOMEM;
-
- } else {
- ssram = no_free_ptr(tmp_ssram);
- }
-
- pwrm_base = get_base(ssram, SSRAM_PWRM_OFFSET);
- devid = readw(ssram + SSRAM_DEVID_OFFSET);
-
- /* Find and register and PMC telemetry entries */
- pmc_add_pmt(pmcdev, ssram_base, ssram);
-
- map = pmc_core_find_regmap(pmcdev->regmap_list, devid);
- if (!map)
- return -ENODEV;
-
- return pmc_core_pmc_add(pmcdev, pwrm_base, map, pmc_idx);
-}
-
-int pmc_core_ssram_init(struct pmc_dev *pmcdev, int func)
-{
- struct pci_dev *pcidev;
- int ret;
-
- pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(20, func));
- if (!pcidev)
- return -ENODEV;
-
- ret = pcim_enable_device(pcidev);
- if (ret)
- goto release_dev;
-
- pmcdev->ssram_pcidev = pcidev;
-
- ret = pmc_core_ssram_get_pmc(pmcdev, PMC_IDX_MAIN, 0);
- if (ret)
- goto disable_dev;
-
- pmc_core_ssram_get_pmc(pmcdev, PMC_IDX_IOE, SSRAM_IOE_OFFSET);
- pmc_core_ssram_get_pmc(pmcdev, PMC_IDX_PCH, SSRAM_PCH_OFFSET);
-
- return 0;
-
-disable_dev:
- pmcdev->ssram_pcidev = NULL;
- pci_disable_device(pcidev);
-release_dev:
- pci_dev_put(pcidev);
-
- return ret;
-}
-MODULE_IMPORT_NS("INTEL_VSEC");
-MODULE_IMPORT_NS("INTEL_PMT_TELEMETRY");
diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c
index 8862829694a7..faa13a7ee688 100644
--- a/drivers/platform/x86/intel/pmc/mtl.c
+++ b/drivers/platform/x86/intel/pmc/mtl.c
@@ -10,7 +10,6 @@
#include <linux/pci.h>
#include "core.h"
-#include "../pmt/telemetry.h"
/* PMC SSRAM PMT Telemetry GUIDS */
#define SOCP_LPM_REQ_GUID 0x2625030
@@ -947,23 +946,20 @@ static const struct pmc_reg_map mtl_ioem_reg_map = {
.lpm_reg_index = MTL_LPM_REG_INDEX,
};
-#define PMC_DEVID_SOCM 0x7e7f
-#define PMC_DEVID_IOEP 0x7ecf
-#define PMC_DEVID_IOEM 0x7ebf
static struct pmc_info mtl_pmc_info_list[] = {
{
.guid = SOCP_LPM_REQ_GUID,
- .devid = PMC_DEVID_SOCM,
+ .devid = PMC_DEVID_MTL_SOCM,
.map = &mtl_socm_reg_map,
},
{
.guid = IOEP_LPM_REQ_GUID,
- .devid = PMC_DEVID_IOEP,
+ .devid = PMC_DEVID_MTL_IOEP,
.map = &mtl_ioep_reg_map,
},
{
.guid = IOEM_LPM_REQ_GUID,
- .devid = PMC_DEVID_IOEM,
+ .devid = PMC_DEVID_MTL_IOEM,
.map = &mtl_ioem_reg_map
},
{}
diff --git a/drivers/platform/x86/intel/pmc/ssram_telemetry.c b/drivers/platform/x86/intel/pmc/ssram_telemetry.c
new file mode 100644
index 000000000000..93579152188e
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/ssram_telemetry.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel PMC SSRAM TELEMETRY PCI Driver
+ *
+ * Copyright (c) 2023, Intel Corporation.
+ */
+
+#include <linux/cleanup.h>
+#include <linux/intel_vsec.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include "core.h"
+#include "ssram_telemetry.h"
+
+#define SSRAM_HDR_SIZE 0x100
+#define SSRAM_PWRM_OFFSET 0x14
+#define SSRAM_DVSEC_OFFSET 0x1C
+#define SSRAM_DVSEC_SIZE 0x10
+#define SSRAM_PCH_OFFSET 0x60
+#define SSRAM_IOE_OFFSET 0x68
+#define SSRAM_DEVID_OFFSET 0x70
+
+DEFINE_FREE(pmc_ssram_telemetry_iounmap, void __iomem *, if (_T) iounmap(_T))
+
+static struct pmc_ssram_telemetry *pmc_ssram_telems;
+static bool device_probed;
+
+static int
+pmc_ssram_telemetry_add_pmt(struct pci_dev *pcidev, u64 ssram_base, void __iomem *ssram)
+{
+ struct intel_vsec_platform_info info = {};
+ struct intel_vsec_header *headers[2] = {};
+ struct intel_vsec_header header;
+ void __iomem *dvsec;
+ u32 dvsec_offset;
+ u32 table, hdr;
+
+ dvsec_offset = readl(ssram + SSRAM_DVSEC_OFFSET);
+ dvsec = ioremap(ssram_base + dvsec_offset, SSRAM_DVSEC_SIZE);
+ if (!dvsec)
+ return -ENOMEM;
+
+ hdr = readl(dvsec + PCI_DVSEC_HEADER1);
+ header.id = readw(dvsec + PCI_DVSEC_HEADER2);
+ header.rev = PCI_DVSEC_HEADER1_REV(hdr);
+ header.length = PCI_DVSEC_HEADER1_LEN(hdr);
+ header.num_entries = readb(dvsec + INTEL_DVSEC_ENTRIES);
+ header.entry_size = readb(dvsec + INTEL_DVSEC_SIZE);
+
+ table = readl(dvsec + INTEL_DVSEC_TABLE);
+ header.tbir = INTEL_DVSEC_TABLE_BAR(table);
+ header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
+ iounmap(dvsec);
+
+ headers[0] = &header;
+ info.caps = VSEC_CAP_TELEMETRY;
+ info.headers = headers;
+ info.base_addr = ssram_base;
+ info.parent = &pcidev->dev;
+
+ return intel_vsec_register(pcidev, &info);
+}
+
+static inline u64 get_base(void __iomem *addr, u32 offset)
+{
+ return lo_hi_readq(addr + offset) & GENMASK_ULL(63, 3);
+}
+
+static int
+pmc_ssram_telemetry_get_pmc(struct pci_dev *pcidev, unsigned int pmc_idx, u32 offset)
+{
+ void __iomem __free(pmc_ssram_telemetry_iounmap) *tmp_ssram = NULL;
+ void __iomem __free(pmc_ssram_telemetry_iounmap) *ssram = NULL;
+ u64 ssram_base, pwrm_base;
+ u16 devid;
+
+ ssram_base = pci_resource_start(pcidev, 0);
+ tmp_ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
+ if (!tmp_ssram)
+ return -ENOMEM;
+
+ if (pmc_idx != PMC_IDX_MAIN) {
+ /*
+ * The secondary PMC BARS (which are behind hidden PCI devices)
+ * are read from fixed offsets in MMIO of the primary PMC BAR.
+ * If a device is not present, the value will be 0.
+ */
+ ssram_base = get_base(tmp_ssram, offset);
+ if (!ssram_base)
+ return 0;
+
+ ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
+ if (!ssram)
+ return -ENOMEM;
+
+ } else {
+ ssram = no_free_ptr(tmp_ssram);
+ }
+
+ pwrm_base = get_base(ssram, SSRAM_PWRM_OFFSET);
+ devid = readw(ssram + SSRAM_DEVID_OFFSET);
+
+ pmc_ssram_telems[pmc_idx].devid = devid;
+ pmc_ssram_telems[pmc_idx].base_addr = pwrm_base;
+
+ /* Find and register and PMC telemetry entries */
+ return pmc_ssram_telemetry_add_pmt(pcidev, ssram_base, ssram);
+}
+
+/**
+ * pmc_ssram_telemetry_get_pmc_info() - Get a PMC devid and base_addr information
+ * @pmc_idx: Index of the PMC
+ * @pmc_ssram_telemetry: pmc_ssram_telemetry structure to store the PMC information
+ *
+ * Return:
+ * * 0 - Success
+ * * -EAGAIN - Probe function has not finished yet. Try again.
+ * * -EINVAL - Invalid pmc_idx
+ * * -ENODEV - PMC device is not available
+ */
+int pmc_ssram_telemetry_get_pmc_info(unsigned int pmc_idx,
+ struct pmc_ssram_telemetry *pmc_ssram_telemetry)
+{
+ /*
+ * PMCs are discovered in probe function. If this function is called before
+ * probe function complete, the result would be invalid. Use device_probed
+ * variable to avoid this case. Return -EAGAIN to inform the consumer to call
+ * again later.
+ */
+ if (!device_probed)
+ return -EAGAIN;
+
+ /*
+ * Memory barrier is used to ensure the correct read order between
+ * device_probed variable and PMC info.
+ */
+ smp_rmb();
+ if (pmc_idx >= MAX_NUM_PMC)
+ return -EINVAL;
+
+ if (!pmc_ssram_telems || !pmc_ssram_telems[pmc_idx].devid)
+ return -ENODEV;
+
+ pmc_ssram_telemetry->devid = pmc_ssram_telems[pmc_idx].devid;
+ pmc_ssram_telemetry->base_addr = pmc_ssram_telems[pmc_idx].base_addr;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmc_ssram_telemetry_get_pmc_info);
+
+static int intel_pmc_ssram_telemetry_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
+{
+ int ret;
+
+ pmc_ssram_telems = devm_kzalloc(&pcidev->dev, sizeof(*pmc_ssram_telems) * MAX_NUM_PMC,
+ GFP_KERNEL);
+ if (!pmc_ssram_telems) {
+ ret = -ENOMEM;
+ goto probe_finish;
+ }
+
+ ret = pcim_enable_device(pcidev);
+ if (ret) {
+ dev_dbg(&pcidev->dev, "failed to enable PMC SSRAM device\n");
+ goto probe_finish;
+ }
+
+ ret = pmc_ssram_telemetry_get_pmc(pcidev, PMC_IDX_MAIN, 0);
+ if (ret)
+ goto probe_finish;
+
+ pmc_ssram_telemetry_get_pmc(pcidev, PMC_IDX_IOE, SSRAM_IOE_OFFSET);
+ pmc_ssram_telemetry_get_pmc(pcidev, PMC_IDX_PCH, SSRAM_PCH_OFFSET);
+
+probe_finish:
+ /*
+ * Memory barrier is used to ensure the correct write order between PMC info
+ * and device_probed variable.
+ */
+ smp_wmb();
+ device_probed = true;
+ return ret;
+}
+
+static const struct pci_device_id intel_pmc_ssram_telemetry_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_MTL_SOCM) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_ARL_SOCS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_ARL_SOCM) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_LNL_SOCM) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_PTL_PCDH) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_PTL_PCDP) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, intel_pmc_ssram_telemetry_pci_ids);
+
+static struct pci_driver intel_pmc_ssram_telemetry_driver = {
+ .name = "intel_pmc_ssram_telemetry",
+ .id_table = intel_pmc_ssram_telemetry_pci_ids,
+ .probe = intel_pmc_ssram_telemetry_probe,
+};
+module_pci_driver(intel_pmc_ssram_telemetry_driver);
+
+MODULE_IMPORT_NS("INTEL_VSEC");
+MODULE_AUTHOR("Xi Pardee <xi.pardee@intel.com>");
+MODULE_DESCRIPTION("Intel PMC SSRAM Telemetry driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/pmc/ssram_telemetry.h b/drivers/platform/x86/intel/pmc/ssram_telemetry.h
new file mode 100644
index 000000000000..daf8aeeb2275
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/ssram_telemetry.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel PMC SSRAM Telemetry PCI Driver Header File
+ *
+ * Copyright (c) 2024, Intel Corporation.
+ */
+
+#ifndef PMC_SSRAM_H
+#define PMC_SSRAM_H
+
+/**
+ * struct pmc_ssram_telemetry - Structure to keep pmc info in ssram device
+ * @devid: device id of the pmc device
+ * @base_addr: contains PWRM base address
+ */
+struct pmc_ssram_telemetry {
+ u16 devid;
+ u64 base_addr;
+};
+
+int pmc_ssram_telemetry_get_pmc_info(unsigned int pmc_idx,
+ struct pmc_ssram_telemetry *pmc_ssram_telemetry);
+
+#endif /* PMC_SSRAM_H */
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 31239a93dd71..71e104a068e9 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -21,6 +21,7 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/msr.h>
#include "isst_if_common.h"
@@ -191,32 +192,13 @@ void isst_resume_common(void)
if (cb->registered)
isst_mbox_resume_command(cb, sst_cmd);
} else {
- wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
+ wrmsrq_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
sst_cmd->data);
}
}
}
EXPORT_SYMBOL_GPL(isst_resume_common);
-static void isst_restore_msr_local(int cpu)
-{
- struct isst_cmd *sst_cmd;
- int i;
-
- mutex_lock(&isst_hash_lock);
- for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
- if (!punit_msr_white_list[i])
- break;
-
- hash_for_each_possible(isst_hash, sst_cmd, hnode,
- punit_msr_white_list[i]) {
- if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu)
- wrmsrl_safe(sst_cmd->cmd, sst_cmd->data);
- }
- }
- mutex_unlock(&isst_hash_lock);
-}
-
/**
* isst_if_mbox_cmd_invalid() - Check invalid mailbox commands
* @cmd: Pointer to the command structure to verify.
@@ -406,7 +388,7 @@ static int isst_if_cpu_online(unsigned int cpu)
isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
- ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
+ ret = rdmsrq_safe(MSR_CPU_BUS_NUMBER, &data);
if (ret) {
/* This is not a fatal error on MSR mailbox only I/F */
isst_cpu_info[cpu].bus_info[0] = -1;
@@ -420,12 +402,12 @@ static int isst_if_cpu_online(unsigned int cpu)
if (isst_hpm_support) {
- ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data);
+ ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data);
if (!ret)
goto set_punit_id;
}
- ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data);
+ ret = rdmsrq_safe(MSR_THREAD_ID_INFO, &data);
if (ret) {
isst_cpu_info[cpu].punit_cpu_id = -1;
return ret;
@@ -434,8 +416,6 @@ static int isst_if_cpu_online(unsigned int cpu)
set_punit_id:
isst_cpu_info[cpu].punit_cpu_id = data;
- isst_restore_msr_local(cpu);
-
return 0;
}
@@ -524,7 +504,7 @@ static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu,
+ ret = wrmsrq_safe_on_cpu(msr_cmd->logical_cpu,
msr_cmd->msr,
msr_cmd->data);
*write_only = 1;
@@ -535,7 +515,7 @@ static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
} else {
u64 data;
- ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu,
+ ret = rdmsrq_safe_on_cpu(msr_cmd->logical_cpu,
msr_cmd->msr, &data);
if (!ret) {
msr_cmd->data = data;
@@ -831,8 +811,8 @@ static int __init isst_if_common_init(void)
u64 data;
/* Can fail only on some Skylake-X generations */
- if (rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data) ||
- rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data))
+ if (rdmsrq_safe(MSR_OS_MAILBOX_INTERFACE, &data) ||
+ rdmsrq_safe(MSR_OS_MAILBOX_DATA, &data))
return -ENODEV;
}
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
index c4b7af00352b..22745b217c6f 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c
@@ -18,6 +18,7 @@
#include <uapi/linux/isst_if.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/msr.h>
#include "isst_if_common.h"
@@ -39,7 +40,7 @@ static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter,
/* Poll for rb bit == 0 */
retries = OS_MAILBOX_RETRY_COUNT;
do {
- rdmsrl(MSR_OS_MAILBOX_INTERFACE, data);
+ rdmsrq(MSR_OS_MAILBOX_INTERFACE, data);
if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) {
ret = -EBUSY;
continue;
@@ -52,19 +53,19 @@ static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter,
return ret;
/* Write DATA register */
- wrmsrl(MSR_OS_MAILBOX_DATA, command_data);
+ wrmsrq(MSR_OS_MAILBOX_DATA, command_data);
/* Write command register */
data = BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT) |
(parameter & GENMASK_ULL(13, 0)) << 16 |
(sub_command << 8) |
command;
- wrmsrl(MSR_OS_MAILBOX_INTERFACE, data);
+ wrmsrq(MSR_OS_MAILBOX_INTERFACE, data);
/* Poll for rb bit == 0 */
retries = OS_MAILBOX_RETRY_COUNT;
do {
- rdmsrl(MSR_OS_MAILBOX_INTERFACE, data);
+ rdmsrq(MSR_OS_MAILBOX_INTERFACE, data);
if (data & BIT_ULL(MSR_OS_MAILBOX_BUSY_BIT)) {
ret = -EBUSY;
continue;
@@ -74,7 +75,7 @@ static int isst_if_send_mbox_cmd(u8 command, u8 sub_command, u32 parameter,
return -ENXIO;
if (response_data) {
- rdmsrl(MSR_OS_MAILBOX_DATA, data);
+ rdmsrq(MSR_OS_MAILBOX_DATA, data);
*response_data = data;
}
ret = 0;
@@ -176,11 +177,11 @@ static int __init isst_if_mbox_init(void)
return -ENODEV;
/* Check presence of mailbox MSRs */
- ret = rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data);
+ ret = rdmsrq_safe(MSR_OS_MAILBOX_INTERFACE, &data);
if (ret)
return ret;
- ret = rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data);
+ ret = rdmsrq_safe(MSR_OS_MAILBOX_DATA, &data);
if (ret)
return ret;
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
index 9978cdd19851..18c035710eb9 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/minmax.h>
#include <linux/module.h>
+#include <asm/msr.h>
#include <uapi/linux/isst_if.h>
#include "isst_tpmi_core.h"
@@ -34,7 +35,7 @@
/* Supported SST hardware version by this driver */
#define ISST_MAJOR_VERSION 0
-#define ISST_MINOR_VERSION 1
+#define ISST_MINOR_VERSION 2
/*
* Used to indicate if value read from MMIO needs to get multiplied
@@ -380,7 +381,7 @@ static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domai
return -ENODEV;
}
- if (TPMI_MINOR_VERSION(pd_info->sst_header.interface_version) != ISST_MINOR_VERSION)
+ if (TPMI_MINOR_VERSION(pd_info->sst_header.interface_version) > ISST_MINOR_VERSION)
dev_info(dev, "SST: Ignore: Unsupported minor version:%lx\n",
TPMI_MINOR_VERSION(pd_info->sst_header.interface_version));
@@ -556,7 +557,7 @@ static bool disable_dynamic_sst_features(void)
{
u64 value;
- rdmsrl(MSR_PM_ENABLE, value);
+ rdmsrq(MSR_PM_ENABLE, value);
return !(value & 0x1);
}
@@ -1016,6 +1017,7 @@ static int isst_if_set_perf_feature(void __user *argp)
#define SST_PP_INFO_10_OFFSET 80
#define SST_PP_INFO_11_OFFSET 88
+#define SST_PP_INFO_12_OFFSET 96
#define SST_PP_P1_SSE_START 0
#define SST_PP_P1_SSE_WIDTH 8
@@ -1068,6 +1070,15 @@ static int isst_if_set_perf_feature(void __user *argp)
#define SST_PP_CORE_RATIO_PM_FABRIC_START 48
#define SST_PP_CORE_RATIO_PM_FABRIC_WIDTH 8
+#define SST_PP_CORE_RATIO_P0_FABRIC_1_START 0
+#define SST_PP_CORE_RATIO_P0_FABRIC_1_WIDTH 8
+
+#define SST_PP_CORE_RATIO_P1_FABRIC_1_START 8
+#define SST_PP_CORE_RATIO_P1_FABRIC_1_WIDTH 8
+
+#define SST_PP_CORE_RATIO_PM_FABRIC_1_START 16
+#define SST_PP_CORE_RATIO_PM_FABRIC_1_WIDTH 8
+
static int isst_if_get_perf_level_info(void __user *argp)
{
struct isst_perf_level_data_info perf_level;
@@ -1167,6 +1178,59 @@ static int isst_if_get_perf_level_info(void __user *argp)
return 0;
}
+static int isst_if_get_perf_level_fabric_info(void __user *argp)
+{
+ struct isst_perf_level_fabric_info perf_level_fabric;
+ struct tpmi_per_power_domain_info *power_domain_info;
+ int start = SST_PP_CORE_RATIO_P0_FABRIC_START;
+ int width = SST_PP_CORE_RATIO_P0_FABRIC_WIDTH;
+ int offset = SST_PP_INFO_11_OFFSET;
+ int i;
+
+ if (copy_from_user(&perf_level_fabric, argp, sizeof(perf_level_fabric)))
+ return -EFAULT;
+
+ power_domain_info = get_instance(perf_level_fabric.socket_id,
+ perf_level_fabric.power_domain_id);
+ if (!power_domain_info)
+ return -EINVAL;
+
+ if (perf_level_fabric.level > power_domain_info->max_level)
+ return -EINVAL;
+
+ if (power_domain_info->pp_header.feature_rev < 2)
+ return -EINVAL;
+
+ if (!(power_domain_info->pp_header.level_en_mask & BIT(perf_level_fabric.level)))
+ return -EINVAL;
+
+ /* For revision 2, maximum number of fabrics is 2 */
+ perf_level_fabric.max_fabrics = 2;
+
+ for (i = 0; i < perf_level_fabric.max_fabrics; i++) {
+ _read_pp_level_info("p0_fabric_freq_mhz", perf_level_fabric.p0_fabric_freq_mhz[i],
+ perf_level_fabric.level, offset, start, width,
+ SST_MUL_FACTOR_FREQ)
+ start += width;
+
+ _read_pp_level_info("p1_fabric_freq_mhz", perf_level_fabric.p1_fabric_freq_mhz[i],
+ perf_level_fabric.level, offset, start, width,
+ SST_MUL_FACTOR_FREQ)
+ start += width;
+
+ _read_pp_level_info("pm_fabric_freq_mhz", perf_level_fabric.pm_fabric_freq_mhz[i],
+ perf_level_fabric.level, offset, start, width,
+ SST_MUL_FACTOR_FREQ)
+ offset = SST_PP_INFO_12_OFFSET;
+ start = SST_PP_CORE_RATIO_P0_FABRIC_1_START;
+ }
+
+ if (copy_to_user(argp, &perf_level_fabric, sizeof(perf_level_fabric)))
+ return -EFAULT;
+
+ return 0;
+}
+
#define SST_PP_FUSED_CORE_COUNT_START 0
#define SST_PP_FUSED_CORE_COUNT_WIDTH 8
@@ -1328,9 +1392,14 @@ static int isst_if_get_tpmi_instance_count(void __user *argp)
#define SST_TF_INFO_0_OFFSET 0
#define SST_TF_INFO_1_OFFSET 8
#define SST_TF_INFO_2_OFFSET 16
+#define SST_TF_INFO_8_OFFSET 64
+#define SST_TF_INFO_8_BUCKETS 3
#define SST_TF_MAX_LP_CLIP_RATIOS TRL_MAX_LEVELS
+#define SST_TF_FEATURE_REV_START 4
+#define SST_TF_FEATURE_REV_WIDTH 8
+
#define SST_TF_LP_CLIP_RATIO_0_START 16
#define SST_TF_LP_CLIP_RATIO_0_WIDTH 8
@@ -1340,10 +1409,14 @@ static int isst_if_get_tpmi_instance_count(void __user *argp)
#define SST_TF_NUM_CORE_0_START 0
#define SST_TF_NUM_CORE_0_WIDTH 8
+#define SST_TF_NUM_MOD_0_START 0
+#define SST_TF_NUM_MOD_0_WIDTH 16
+
static int isst_if_get_turbo_freq_info(void __user *argp)
{
static struct isst_turbo_freq_info turbo_freq;
struct tpmi_per_power_domain_info *power_domain_info;
+ u8 feature_rev;
int i, j;
if (copy_from_user(&turbo_freq, argp, sizeof(turbo_freq)))
@@ -1360,6 +1433,10 @@ static int isst_if_get_turbo_freq_info(void __user *argp)
turbo_freq.max_trl_levels = TRL_MAX_LEVELS;
turbo_freq.max_clip_freqs = SST_TF_MAX_LP_CLIP_RATIOS;
+ _read_tf_level_info("feature_rev", feature_rev, turbo_freq.level,
+ SST_TF_INFO_0_OFFSET, SST_TF_FEATURE_REV_START,
+ SST_TF_FEATURE_REV_WIDTH, SST_MUL_FACTOR_NONE);
+
for (i = 0; i < turbo_freq.max_clip_freqs; ++i)
_read_tf_level_info("lp_clip*", turbo_freq.lp_clip_freq_mhz[i],
turbo_freq.level, SST_TF_INFO_0_OFFSET,
@@ -1376,12 +1453,32 @@ static int isst_if_get_turbo_freq_info(void __user *argp)
SST_MUL_FACTOR_FREQ)
}
+ if (feature_rev >= 2) {
+ bool has_tf_info_8 = false;
+
+ for (i = 0; i < SST_TF_INFO_8_BUCKETS; ++i) {
+ _read_tf_level_info("bucket_*_mod_count", turbo_freq.bucket_core_counts[i],
+ turbo_freq.level, SST_TF_INFO_8_OFFSET,
+ SST_TF_NUM_MOD_0_WIDTH * i, SST_TF_NUM_MOD_0_WIDTH,
+ SST_MUL_FACTOR_NONE)
+
+ if (turbo_freq.bucket_core_counts[i])
+ has_tf_info_8 = true;
+ }
+
+ if (has_tf_info_8)
+ goto done_core_count;
+ }
+
for (i = 0; i < TRL_MAX_BUCKETS; ++i)
_read_tf_level_info("bucket_*_core_count", turbo_freq.bucket_core_counts[i],
turbo_freq.level, SST_TF_INFO_1_OFFSET,
SST_TF_NUM_CORE_0_WIDTH * i, SST_TF_NUM_CORE_0_WIDTH,
SST_MUL_FACTOR_NONE)
+
+done_core_count:
+
if (copy_to_user(argp, &turbo_freq, sizeof(turbo_freq)))
return -EFAULT;
@@ -1420,6 +1517,9 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
case ISST_IF_GET_PERF_LEVEL_INFO:
ret = isst_if_get_perf_level_info(argp);
break;
+ case ISST_IF_GET_PERF_LEVEL_FABRIC_INFO:
+ ret = isst_if_get_perf_level_fabric_info(argp);
+ break;
case ISST_IF_GET_PERF_LEVEL_CPU_MASK:
ret = isst_if_get_perf_level_mask(argp);
break;
diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c
index 2f01cd22a6ee..9d8247bb9cfa 100644
--- a/drivers/platform/x86/intel/tpmi_power_domains.c
+++ b/drivers/platform/x86/intel/tpmi_power_domains.c
@@ -74,6 +74,8 @@ static enum cpuhp_state tpmi_hp_state __read_mostly;
static cpumask_t *tpmi_power_domain_mask;
+static u16 *domain_die_map;
+
/* Lock to protect tpmi_power_domain_mask and tpmi_cpu_hash */
static DEFINE_MUTEX(tpmi_lock);
@@ -152,12 +154,21 @@ cpumask_t *tpmi_get_power_domain_mask(int cpu_no)
}
EXPORT_SYMBOL_NS_GPL(tpmi_get_power_domain_mask, "INTEL_TPMI_POWER_DOMAIN");
+int tpmi_get_linux_die_id(int pkg_id, int domain_id)
+{
+ if (pkg_id >= topology_max_packages() || domain_id >= MAX_POWER_DOMAINS)
+ return -EINVAL;
+
+ return domain_die_map[pkg_id * MAX_POWER_DOMAINS + domain_id];
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_linux_die_id, "INTEL_TPMI_POWER_DOMAIN");
+
static int tpmi_get_logical_id(unsigned int cpu, struct tpmi_cpu_info *info)
{
u64 data;
int ret;
- ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data);
+ ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data);
if (ret)
return ret;
@@ -189,6 +200,9 @@ static int tpmi_cpu_online(unsigned int cpu)
cpumask_set_cpu(cpu, &tpmi_power_domain_mask[index]);
hash_add(tpmi_cpu_hash, &info->hnode, info->punit_core_id);
+ domain_die_map[info->pkg_id * MAX_POWER_DOMAINS + info->punit_domain_id] =
+ topology_die_id(cpu);
+
return 0;
}
@@ -203,7 +217,7 @@ static int __init tpmi_init(void)
return -ENODEV;
/* Check for MSR 0x54 presence */
- ret = rdmsrl_safe(MSR_PM_LOGICAL_ID, &data);
+ ret = rdmsrq_safe(MSR_PM_LOGICAL_ID, &data);
if (ret)
return ret;
@@ -212,17 +226,30 @@ static int __init tpmi_init(void)
if (!tpmi_power_domain_mask)
return -ENOMEM;
+ domain_die_map = kcalloc(size_mul(topology_max_packages(), MAX_POWER_DOMAINS),
+ sizeof(*domain_die_map), GFP_KERNEL);
+ if (!domain_die_map) {
+ ret = -ENOMEM;
+ goto free_domain_mask;
+ }
+
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"platform/x86/tpmi_power_domains:online",
tpmi_cpu_online, NULL);
- if (ret < 0) {
- kfree(tpmi_power_domain_mask);
- return ret;
- }
+ if (ret < 0)
+ goto free_domain_map;
tpmi_hp_state = ret;
return 0;
+
+free_domain_map:
+ kfree(domain_die_map);
+
+free_domain_mask:
+ kfree(tpmi_power_domain_mask);
+
+ return ret;
}
module_init(tpmi_init)
@@ -230,6 +257,7 @@ static void __exit tpmi_exit(void)
{
cpuhp_remove_state(tpmi_hp_state);
kfree(tpmi_power_domain_mask);
+ kfree(domain_die_map);
}
module_exit(tpmi_exit)
diff --git a/drivers/platform/x86/intel/tpmi_power_domains.h b/drivers/platform/x86/intel/tpmi_power_domains.h
index e35750dd9273..2fd0dd7afbd2 100644
--- a/drivers/platform/x86/intel/tpmi_power_domains.h
+++ b/drivers/platform/x86/intel/tpmi_power_domains.h
@@ -14,5 +14,6 @@ int tpmi_get_linux_cpu_number(int package_id, int die_id, int punit_core_id);
int tpmi_get_punit_core_number(int cpu_no);
int tpmi_get_power_domain_id(int cpu_no);
cpumask_t *tpmi_get_power_domain_mask(int cpu_no);
+int tpmi_get_linux_die_id(int pkg_id, int domain_id);
#endif
diff --git a/drivers/platform/x86/intel/turbo_max_3.c b/drivers/platform/x86/intel/turbo_max_3.c
index 79a0bcdeffb8..b5af3e91ba04 100644
--- a/drivers/platform/x86/intel/turbo_max_3.c
+++ b/drivers/platform/x86/intel/turbo_max_3.c
@@ -17,6 +17,7 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/msr.h>
#define MSR_OC_MAILBOX 0x150
#define MSR_OC_MAILBOX_CMD_OFFSET 32
@@ -41,14 +42,14 @@ static int get_oc_core_priority(unsigned int cpu)
value = cmd << MSR_OC_MAILBOX_CMD_OFFSET;
/* Set the busy bit to indicate OS is trying to issue command */
value |= BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT);
- ret = wrmsrl_safe(MSR_OC_MAILBOX, value);
+ ret = wrmsrq_safe(MSR_OC_MAILBOX, value);
if (ret) {
pr_debug("cpu %d OC mailbox write failed\n", cpu);
return ret;
}
for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) {
- ret = rdmsrl_safe(MSR_OC_MAILBOX, &value);
+ ret = rdmsrq_safe(MSR_OC_MAILBOX, &value);
if (ret) {
pr_debug("cpu %d OC mailbox read failed\n", cpu);
break;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
index 4e2c6a2d7e6e..65897fae17df 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -43,6 +43,29 @@ static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr
return sprintf(buf, "%u\n", data->package_id);
}
+#define MAX_UNCORE_AGENT_TYPES 4
+
+/* The order follows AGENT_TYPE_* defines */
+static const char *agent_name[MAX_UNCORE_AGENT_TYPES] = {"core", "cache", "memory", "io"};
+
+static ssize_t show_agent_types(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct uncore_data *data = container_of(attr, struct uncore_data, agent_types_kobj_attr);
+ unsigned long agent_mask = data->agent_type_mask;
+ int agent, length = 0;
+
+ for_each_set_bit(agent, &agent_mask, MAX_UNCORE_AGENT_TYPES) {
+ if (length)
+ length += sysfs_emit_at(buf, length, " ");
+
+ length += sysfs_emit_at(buf, length, "%s", agent_name[agent]);
+ }
+
+ length += sysfs_emit_at(buf, length, "\n");
+
+ return length;
+}
+
static ssize_t show_attr(struct uncore_data *data, char *buf, enum uncore_index index)
{
unsigned int value;
@@ -120,6 +143,8 @@ show_uncore_attr(elc_high_threshold_enable,
UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE);
show_uncore_attr(elc_floor_freq_khz, UNCORE_INDEX_EFF_LAT_CTRL_FREQ);
+show_uncore_attr(die_id, UNCORE_INDEX_DIE_ID);
+
#define show_uncore_data(member_name) \
static ssize_t show_##member_name(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf)\
@@ -179,6 +204,15 @@ static int create_attr_group(struct uncore_data *data, char *name)
data->uncore_attrs[index++] = &data->fabric_cluster_id_kobj_attr.attr;
init_attribute_root_ro(package_id);
data->uncore_attrs[index++] = &data->package_id_kobj_attr.attr;
+ if (data->agent_type_mask) {
+ init_attribute_ro(agent_types);
+ data->uncore_attrs[index++] = &data->agent_types_kobj_attr.attr;
+ }
+ if (topology_max_dies_per_package() > 1 &&
+ data->agent_type_mask & AGENT_TYPE_CORE) {
+ init_attribute_ro(die_id);
+ data->uncore_attrs[index++] = &data->die_id_kobj_attr.attr;
+ }
}
data->uncore_attrs[index++] = &data->max_freq_khz_kobj_attr.attr;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
index 26c854cd5d97..70ae11519837 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
@@ -11,6 +11,18 @@
#include <linux/device.h>
+/*
+ * Define uncore agents, which are under uncore frequency control.
+ * Defined in the same order as specified in the TPMI UFS Specifications.
+ * It is possible that there are common uncore frequency control to more than
+ * one hardware agents. So, these defines are used as a bit mask.
+*/
+
+#define AGENT_TYPE_CORE 0x01
+#define AGENT_TYPE_CACHE 0x02
+#define AGENT_TYPE_MEMORY 0x04
+#define AGENT_TYPE_IO 0x08
+
/**
* struct uncore_data - Encapsulate all uncore data
* @stored_uncore_data: Last user changed MSR 620 value, which will be restored
@@ -25,6 +37,7 @@
* @cluster_id: cluster id in a domain
* @instance_id: Unique instance id to append to directory name
* @name: Sysfs entry name for this instance
+ * @agent_type_mask: Bit mask of all hardware agents for this domain
* @uncore_attr_group: Attribute group storage
* @max_freq_khz_kobj_attr: Storage for kobject attribute max_freq_khz
* @mix_freq_khz_kobj_attr: Storage for kobject attribute min_freq_khz
@@ -41,6 +54,7 @@
* @elc_high_threshold_enable_kobj_attr:
Storage for kobject attribute elc_high_threshold_enable
* @elc_floor_freq_khz_kobj_attr: Storage for kobject attribute elc_floor_freq_khz
+ * @agent_types_kobj_attr: Storage for kobject attribute agent_type
* @uncore_attrs: Attribute storage for group creation
*
* This structure is used to encapsulate all data related to uncore sysfs
@@ -58,6 +72,7 @@ struct uncore_data {
int cluster_id;
int instance_id;
char name[32];
+ u16 agent_type_mask;
struct attribute_group uncore_attr_group;
struct kobj_attribute max_freq_khz_kobj_attr;
@@ -72,7 +87,9 @@ struct uncore_data {
struct kobj_attribute elc_high_threshold_percent_kobj_attr;
struct kobj_attribute elc_high_threshold_enable_kobj_attr;
struct kobj_attribute elc_floor_freq_khz_kobj_attr;
- struct attribute *uncore_attrs[13];
+ struct kobj_attribute agent_types_kobj_attr;
+ struct kobj_attribute die_id_kobj_attr;
+ struct attribute *uncore_attrs[15];
};
#define UNCORE_DOMAIN_ID_INVALID -1
@@ -85,6 +102,7 @@ enum uncore_index {
UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD,
UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE,
UNCORE_INDEX_EFF_LAT_CTRL_FREQ,
+ UNCORE_INDEX_DIE_ID,
};
int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value,
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
index 4aa6c227ec82..44d9948ed224 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/intel_tpmi.h>
+#include "../tpmi_power_domains.h"
#include "uncore-frequency-common.h"
#define UNCORE_MAJOR_VERSION 0
@@ -49,6 +50,7 @@ struct tpmi_uncore_cluster_info {
bool root_domain;
bool elc_supported;
u8 __iomem *cluster_base;
+ u16 cdie_id;
struct uncore_data uncore_data;
struct tpmi_uncore_struct *uncore_root;
};
@@ -347,9 +349,31 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
return 0;
}
+/*
+ * Agent types as per the TPMI UFS Specification for UFS_STATUS
+ * Agent Type - Core Bit: 23
+ * Agent Type - Cache Bit: 24
+ * Agent Type - Memory Bit: 25
+ * Agent Type - IO Bit: 26
+ */
+
+#define UNCORE_AGENT_TYPES GENMASK_ULL(26, 23)
+
+/* Helper function to read agent type over MMIO and set the agent type mask */
+static void uncore_set_agent_type(struct tpmi_uncore_cluster_info *cluster_info)
+{
+ u64 status;
+
+ status = readq((u8 __iomem *)cluster_info->cluster_base + UNCORE_STATUS_INDEX);
+ cluster_info->uncore_data.agent_type_mask = FIELD_GET(UNCORE_AGENT_TYPES, status);
+}
+
/* Callback for sysfs read for TPMI uncore values. Called under mutex locks. */
static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index)
{
+ struct tpmi_uncore_cluster_info *cluster_info;
+ int ret;
+
switch (index) {
case UNCORE_INDEX_MIN_FREQ:
case UNCORE_INDEX_MAX_FREQ:
@@ -364,6 +388,16 @@ static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncor
case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
return read_eff_lat_ctrl(data, value, index);
+ case UNCORE_INDEX_DIE_ID:
+ cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
+ ret = tpmi_get_linux_die_id(cluster_info->uncore_data.package_id,
+ cluster_info->cdie_id);
+ if (ret < 0)
+ return ret;
+
+ *value = ret;
+ return 0;
+
default:
break;
}
@@ -413,6 +447,16 @@ static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore)
}
}
+static void set_cdie_id(int domain_id, struct tpmi_uncore_cluster_info *cluster_info,
+ struct intel_tpmi_plat_info *plat_info)
+{
+
+ cluster_info->cdie_id = domain_id;
+
+ if (plat_info->cdie_mask && cluster_info->uncore_data.agent_type_mask & AGENT_TYPE_CORE)
+ cluster_info->cdie_id = domain_id + ffs(plat_info->cdie_mask) - 1;
+}
+
#define UNCORE_VERSION_MASK GENMASK_ULL(7, 0)
#define UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK GENMASK_ULL(15, 8)
#define UNCORE_CLUSTER_OFF_MASK GENMASK_ULL(7, 0)
@@ -467,10 +511,13 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
/* Get the package ID from the TPMI core */
plat_info = tpmi_get_platform_data(auxdev);
- if (plat_info)
- pkg = plat_info->package_id;
- else
+ if (unlikely(!plat_info)) {
dev_info(&auxdev->dev, "Platform information is NULL\n");
+ ret = -ENODEV;
+ goto err_rem_common;
+ }
+
+ pkg = plat_info->package_id;
for (i = 0; i < num_resources; ++i) {
struct tpmi_uncore_power_domain_info *pd_info;
@@ -552,12 +599,16 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
cluster_info->cluster_base = pd_info->uncore_base + mask;
+ uncore_set_agent_type(cluster_info);
+
cluster_info->uncore_data.package_id = pkg;
/* There are no dies like Cascade Lake */
cluster_info->uncore_data.die_id = 0;
cluster_info->uncore_data.domain_id = i;
cluster_info->uncore_data.cluster_id = j;
+ set_cdie_id(i, cluster_info, plat_info);
+
cluster_info->uncore_root = tpmi_uncore;
if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) >= UNCORE_ELC_SUPPORTED_VERSION)
@@ -631,5 +682,6 @@ module_auxiliary_driver(intel_uncore_aux_driver);
MODULE_IMPORT_NS("INTEL_TPMI");
MODULE_IMPORT_NS("INTEL_UNCORE_FREQUENCY");
+MODULE_IMPORT_NS("INTEL_TPMI_POWER_DOMAIN");
MODULE_DESCRIPTION("Intel TPMI UFS Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
index bdee5d00f30b..2a6897035150 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
@@ -21,6 +21,7 @@
#include <linux/suspend.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/msr.h>
#include "uncore-frequency-common.h"
@@ -51,7 +52,7 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *valu
if (data->control_cpu < 0)
return -ENXIO;
- ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+ ret = rdmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
if (ret)
return ret;
@@ -76,7 +77,7 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu
if (data->control_cpu < 0)
return -ENXIO;
- ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
+ ret = rdmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
if (ret)
return ret;
@@ -88,7 +89,7 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu
cap |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input);
}
- ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
+ ret = wrmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
if (ret)
return ret;
@@ -105,7 +106,7 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
if (data->control_cpu < 0)
return -ENXIO;
- ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_PERF_STATUS, &ratio);
+ ret = rdmsrq_on_cpu(data->control_cpu, MSR_UNCORE_PERF_STATUS, &ratio);
if (ret)
return ret;
@@ -212,7 +213,7 @@ static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode,
if (!data || !data->valid || !data->stored_uncore_data)
return 0;
- wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT,
+ wrmsrq_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT,
data->stored_uncore_data);
}
break;
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index db3c031d1757..055ca9f48fb4 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -332,13 +332,16 @@ static bool intel_vsec_walk_vsec(struct pci_dev *pdev,
return have_devices;
}
-void intel_vsec_register(struct pci_dev *pdev,
+int intel_vsec_register(struct pci_dev *pdev,
struct intel_vsec_platform_info *info)
{
if (!pdev || !info || !info->headers)
- return;
+ return -EINVAL;
- intel_vsec_walk_header(pdev, info);
+ if (!intel_vsec_walk_header(pdev, info))
+ return -ENODEV;
+ else
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(intel_vsec_register, "INTEL_VSEC");
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 5d717b1c23cf..b1b2d9caba7b 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -370,7 +370,7 @@ static void ips_cpu_raise(struct ips_driver *ips)
if (!ips->cpu_turbo_enabled)
return;
- rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+ rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override);
cur_tdp_limit = turbo_override & TURBO_TDP_MASK;
new_tdp_limit = cur_tdp_limit + 8; /* 1W increase */
@@ -382,12 +382,12 @@ static void ips_cpu_raise(struct ips_driver *ips)
thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8);
turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
- wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+ wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override);
turbo_override &= ~TURBO_TDP_MASK;
turbo_override |= new_tdp_limit;
- wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+ wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override);
}
/**
@@ -405,7 +405,7 @@ static void ips_cpu_lower(struct ips_driver *ips)
u64 turbo_override;
u16 cur_limit, new_limit;
- rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+ rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override);
cur_limit = turbo_override & TURBO_TDP_MASK;
new_limit = cur_limit - 8; /* 1W decrease */
@@ -417,12 +417,12 @@ static void ips_cpu_lower(struct ips_driver *ips)
thm_writew(THM_MPCPC, (new_limit * 10) / 8);
turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
- wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+ wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override);
turbo_override &= ~TURBO_TDP_MASK;
turbo_override |= new_limit;
- wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+ wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override);
}
/**
@@ -437,10 +437,10 @@ static void do_enable_cpu_turbo(void *data)
{
u64 perf_ctl;
- rdmsrl(IA32_PERF_CTL, perf_ctl);
+ rdmsrq(IA32_PERF_CTL, perf_ctl);
if (perf_ctl & IA32_PERF_TURBO_DIS) {
perf_ctl &= ~IA32_PERF_TURBO_DIS;
- wrmsrl(IA32_PERF_CTL, perf_ctl);
+ wrmsrq(IA32_PERF_CTL, perf_ctl);
}
}
@@ -475,10 +475,10 @@ static void do_disable_cpu_turbo(void *data)
{
u64 perf_ctl;
- rdmsrl(IA32_PERF_CTL, perf_ctl);
+ rdmsrq(IA32_PERF_CTL, perf_ctl);
if (!(perf_ctl & IA32_PERF_TURBO_DIS)) {
perf_ctl |= IA32_PERF_TURBO_DIS;
- wrmsrl(IA32_PERF_CTL, perf_ctl);
+ wrmsrq(IA32_PERF_CTL, perf_ctl);
}
}
@@ -934,7 +934,7 @@ static u32 calc_avg_power(struct ips_driver *ips, u32 *array)
static void monitor_timeout(struct timer_list *t)
{
- struct ips_driver *ips = from_timer(ips, t, timer);
+ struct ips_driver *ips = timer_container_of(ips, t, timer);
wake_up_process(ips->monitor);
}
@@ -1215,7 +1215,7 @@ static int cpu_clamp_show(struct seq_file *m, void *data)
u64 turbo_override;
int tdp, tdc;
- rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+ rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override);
tdp = (int)(turbo_override & TURBO_TDP_MASK);
tdc = (int)((turbo_override & TURBO_TDC_MASK) >> TURBO_TDC_SHIFT);
@@ -1290,7 +1290,7 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
return NULL;
}
- rdmsrl(IA32_MISC_ENABLE, misc_en);
+ rdmsrq(IA32_MISC_ENABLE, misc_en);
/*
* If the turbo enable bit isn't set, we shouldn't try to enable/disable
* turbo manually or we'll get an illegal MSR access, even though
@@ -1312,7 +1312,7 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
return NULL;
}
- rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power);
+ rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_power);
tdp = turbo_power & TURBO_TDP_MASK;
/* Sanity check TDP against CPU */
@@ -1496,7 +1496,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
* Check PLATFORM_INFO MSR to make sure this chip is
* turbo capable.
*/
- rdmsrl(PLATFORM_INFO, platform_info);
+ rdmsrq(PLATFORM_INFO, platform_info);
if (!(platform_info & PLATFORM_TDP)) {
dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n");
return -ENODEV;
@@ -1529,7 +1529,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
ips->mgta_val = thm_readw(THM_MGTA);
/* Save turbo limits & ratios */
- rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
+ rdmsrq(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
ips_disable_cpu_turbo(ips);
ips->cpu_turbo_enabled = false;
@@ -1596,10 +1596,10 @@ static void ips_remove(struct pci_dev *dev)
if (ips->gpu_turbo_disable)
symbol_put(i915_gpu_turbo_disable);
- rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+ rdmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override);
turbo_override &= ~(TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN);
- wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
- wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
+ wrmsrq(TURBO_POWER_CURRENT_LIMIT, turbo_override);
+ wrmsrq(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
free_irq(ips->irq, ips);
pci_free_irq_vectors(dev);
diff --git a/drivers/platform/x86/oxpec.c b/drivers/platform/x86/oxpec.c
new file mode 100644
index 000000000000..06759036945d
--- /dev/null
+++ b/drivers/platform/x86/oxpec.c
@@ -0,0 +1,1054 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Platform driver for OneXPlayer and AOKZOE devices. For the time being,
+ * it also exposes fan controls for AYANEO, and OrangePi Handhelds via
+ * hwmon sysfs.
+ *
+ * Fan control is provided via pwm interface in the range [0-255].
+ * Old AMD boards use [0-100] as range in the EC, the written value is
+ * scaled to accommodate for that. Newer boards like the mini PRO and
+ * AOKZOE are not scaled but have the same EC layout. Newer models
+ * like the 2 and X1 are [0-184] and are scaled to 0-255. OrangePi
+ * are [1-244] and scaled to 0-255.
+ *
+ * Copyright (C) 2022 Joaquín I. Aramendía <samsagax@gmail.com>
+ * Copyright (C) 2024 Derek J. Clark <derekjohn.clark@gmail.com>
+ * Copyright (C) 2025 Antheas Kapenekakis <lkml@antheas.dev>
+ */
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/hwmon.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/processor.h>
+#include <acpi/battery.h>
+
+/* Handle ACPI lock mechanism */
+static u32 oxp_mutex;
+
+#define ACPI_LOCK_DELAY_MS 500
+
+static bool lock_global_acpi_lock(void)
+{
+ return ACPI_SUCCESS(acpi_acquire_global_lock(ACPI_LOCK_DELAY_MS, &oxp_mutex));
+}
+
+static bool unlock_global_acpi_lock(void)
+{
+ return ACPI_SUCCESS(acpi_release_global_lock(oxp_mutex));
+}
+
+enum oxp_board {
+ aok_zoe_a1 = 1,
+ aya_neo_2,
+ aya_neo_air,
+ aya_neo_air_1s,
+ aya_neo_air_plus_mendo,
+ aya_neo_air_pro,
+ aya_neo_flip,
+ aya_neo_geek,
+ aya_neo_kun,
+ orange_pi_neo,
+ oxp_2,
+ oxp_fly,
+ oxp_mini_amd,
+ oxp_mini_amd_a07,
+ oxp_mini_amd_pro,
+ oxp_x1,
+ oxp_g1,
+};
+
+static enum oxp_board board;
+static struct device *oxp_dev;
+
+/* Fan reading and PWM */
+#define OXP_SENSOR_FAN_REG 0x76 /* Fan reading is 2 registers long */
+#define OXP_2_SENSOR_FAN_REG 0x58 /* Fan reading is 2 registers long */
+#define OXP_SENSOR_PWM_ENABLE_REG 0x4A /* PWM enable is 1 register long */
+#define OXP_SENSOR_PWM_REG 0x4B /* PWM reading is 1 register long */
+#define PWM_MODE_AUTO 0x00
+#define PWM_MODE_MANUAL 0x01
+
+/* OrangePi fan reading and PWM */
+#define ORANGEPI_SENSOR_FAN_REG 0x78 /* Fan reading is 2 registers long */
+#define ORANGEPI_SENSOR_PWM_ENABLE_REG 0x40 /* PWM enable is 1 register long */
+#define ORANGEPI_SENSOR_PWM_REG 0x38 /* PWM reading is 1 register long */
+
+/* Turbo button takeover function
+ * Different boards have different values and EC registers
+ * for the same function
+ */
+#define OXP_TURBO_SWITCH_REG 0xF1 /* Mini Pro, OneXFly, AOKZOE */
+#define OXP_2_TURBO_SWITCH_REG 0xEB /* OXP2 and X1 */
+#define OXP_MINI_TURBO_SWITCH_REG 0x1E /* Mini AO7 */
+
+#define OXP_MINI_TURBO_TAKE_VAL 0x01 /* Mini AO7 */
+#define OXP_TURBO_TAKE_VAL 0x40 /* All other models */
+
+/* X1 Turbo LED */
+#define OXP_X1_TURBO_LED_REG 0x57
+
+#define OXP_X1_TURBO_LED_OFF 0x01
+#define OXP_X1_TURBO_LED_ON 0x02
+
+/* Battery extension settings */
+#define EC_CHARGE_CONTROL_BEHAVIOURS (BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO) | \
+ BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE) | \
+ BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE_AWAKE))
+
+#define OXP_X1_CHARGE_LIMIT_REG 0xA3 /* X1 charge limit (%) */
+#define OXP_X1_CHARGE_INHIBIT_REG 0xA4 /* X1 bypass charging */
+
+#define OXP_X1_CHARGE_INHIBIT_MASK_AWAKE 0x01
+/* X1 Mask is 0x0A, F1Pro is 0x02 but the extra bit on the X1 does nothing. */
+#define OXP_X1_CHARGE_INHIBIT_MASK_OFF 0x02
+#define OXP_X1_CHARGE_INHIBIT_MASK_ALWAYS (OXP_X1_CHARGE_INHIBIT_MASK_AWAKE | \
+ OXP_X1_CHARGE_INHIBIT_MASK_OFF)
+
+static const struct dmi_system_id dmi_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOKZOE"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AOKZOE A1 AR07"),
+ },
+ .driver_data = (void *)aok_zoe_a1,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOKZOE"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AOKZOE A1 Pro"),
+ },
+ .driver_data = (void *)aok_zoe_a1,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "AYANEO 2"),
+ },
+ .driver_data = (void *)aya_neo_2,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR"),
+ },
+ .driver_data = (void *)aya_neo_air,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR 1S"),
+ },
+ .driver_data = (void *)aya_neo_air_1s,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AB05-Mendocino"),
+ },
+ .driver_data = (void *)aya_neo_air_plus_mendo,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR Pro"),
+ },
+ .driver_data = (void *)aya_neo_air_pro,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "FLIP"),
+ },
+ .driver_data = (void *)aya_neo_flip,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "GEEK"),
+ },
+ .driver_data = (void *)aya_neo_geek,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "KUN"),
+ },
+ .driver_data = (void *)aya_neo_kun,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "OrangePi"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "NEO-01"),
+ },
+ .driver_data = (void *)orange_pi_neo,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONE XPLAYER"),
+ },
+ .driver_data = (void *)oxp_mini_amd,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_MATCH(DMI_BOARD_NAME, "ONEXPLAYER 2"),
+ },
+ .driver_data = (void *)oxp_2,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER F1"),
+ },
+ .driver_data = (void *)oxp_fly,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER F1 EVA-01"),
+ },
+ .driver_data = (void *)oxp_fly,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER F1 OLED"),
+ },
+ .driver_data = (void *)oxp_fly,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER F1L"),
+ },
+ .driver_data = (void *)oxp_fly,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER F1Pro"),
+ },
+ .driver_data = (void *)oxp_fly,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER F1 EVA-02"),
+ },
+ .driver_data = (void *)oxp_fly,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER G1 A"),
+ },
+ .driver_data = (void *)oxp_g1,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER G1 i"),
+ },
+ .driver_data = (void *)oxp_g1,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER mini A07"),
+ },
+ .driver_data = (void *)oxp_mini_amd_a07,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER Mini Pro"),
+ },
+ .driver_data = (void *)oxp_mini_amd_pro,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER X1 A"),
+ },
+ .driver_data = (void *)oxp_x1,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER X1 i"),
+ },
+ .driver_data = (void *)oxp_x1,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER X1 mini"),
+ },
+ .driver_data = (void *)oxp_x1,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER X1Pro"),
+ },
+ .driver_data = (void *)oxp_x1,
+ },
+ {},
+};
+
+/* Helper functions to handle EC read/write */
+static int read_from_ec(u8 reg, int size, long *val)
+{
+ u8 buffer;
+ int ret;
+ int i;
+
+ if (!lock_global_acpi_lock())
+ return -EBUSY;
+
+ *val = 0;
+ for (i = 0; i < size; i++) {
+ ret = ec_read(reg + i, &buffer);
+ if (ret)
+ return ret;
+ *val <<= i * 8;
+ *val += buffer;
+ }
+
+ if (!unlock_global_acpi_lock())
+ return -EBUSY;
+
+ return 0;
+}
+
+static int write_to_ec(u8 reg, u8 value)
+{
+ int ret;
+
+ if (!lock_global_acpi_lock())
+ return -EBUSY;
+
+ ret = ec_write(reg, value);
+
+ if (!unlock_global_acpi_lock())
+ return -EBUSY;
+
+ return ret;
+}
+
+/* Callbacks for turbo toggle attribute */
+static umode_t tt_toggle_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ switch (board) {
+ case aok_zoe_a1:
+ case oxp_2:
+ case oxp_fly:
+ case oxp_mini_amd_a07:
+ case oxp_mini_amd_pro:
+ case oxp_x1:
+ case oxp_g1:
+ return attr->mode;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static ssize_t tt_toggle_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ u8 reg, mask, val;
+ long raw_val;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret)
+ return ret;
+
+ switch (board) {
+ case oxp_mini_amd_a07:
+ reg = OXP_MINI_TURBO_SWITCH_REG;
+ mask = OXP_MINI_TURBO_TAKE_VAL;
+ break;
+ case aok_zoe_a1:
+ case oxp_fly:
+ case oxp_mini_amd_pro:
+ reg = OXP_TURBO_SWITCH_REG;
+ mask = OXP_TURBO_TAKE_VAL;
+ break;
+ case oxp_2:
+ case oxp_x1:
+ case oxp_g1:
+ reg = OXP_2_TURBO_SWITCH_REG;
+ mask = OXP_TURBO_TAKE_VAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = read_from_ec(reg, 1, &raw_val);
+ if (ret)
+ return ret;
+
+ val = raw_val;
+ if (enable)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ ret = write_to_ec(reg, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t tt_toggle_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 reg, mask;
+ int retval;
+ long val;
+
+ switch (board) {
+ case oxp_mini_amd_a07:
+ reg = OXP_MINI_TURBO_SWITCH_REG;
+ mask = OXP_MINI_TURBO_TAKE_VAL;
+ break;
+ case aok_zoe_a1:
+ case oxp_fly:
+ case oxp_mini_amd_pro:
+ reg = OXP_TURBO_SWITCH_REG;
+ mask = OXP_TURBO_TAKE_VAL;
+ break;
+ case oxp_2:
+ case oxp_x1:
+ case oxp_g1:
+ reg = OXP_2_TURBO_SWITCH_REG;
+ mask = OXP_TURBO_TAKE_VAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ retval = read_from_ec(reg, 1, &val);
+ if (retval)
+ return retval;
+
+ return sysfs_emit(buf, "%d\n", (val & mask) == mask);
+}
+
+static DEVICE_ATTR_RW(tt_toggle);
+
+/* Callbacks for turbo LED attribute */
+static umode_t tt_led_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ switch (board) {
+ case oxp_x1:
+ return attr->mode;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static ssize_t tt_led_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ u8 reg, val;
+ bool value;
+ int ret;
+
+ ret = kstrtobool(buf, &value);
+ if (ret)
+ return ret;
+
+ switch (board) {
+ case oxp_x1:
+ reg = OXP_X1_TURBO_LED_REG;
+ val = value ? OXP_X1_TURBO_LED_ON : OXP_X1_TURBO_LED_OFF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = write_to_ec(reg, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t tt_led_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ long enval;
+ long val;
+ int ret;
+ u8 reg;
+
+ switch (board) {
+ case oxp_x1:
+ reg = OXP_X1_TURBO_LED_REG;
+ enval = OXP_X1_TURBO_LED_ON;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = read_from_ec(reg, 1, &val);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", val == enval);
+}
+
+static DEVICE_ATTR_RW(tt_led);
+
+/* Callbacks for charge behaviour attributes */
+static bool oxp_psy_ext_supported(void)
+{
+ switch (board) {
+ case oxp_x1:
+ case oxp_g1:
+ case oxp_fly:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static int oxp_psy_ext_get_prop(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ long raw_val;
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ ret = read_from_ec(OXP_X1_CHARGE_LIMIT_REG, 1, &raw_val);
+ if (ret)
+ return ret;
+ if (raw_val < 0 || raw_val > 100)
+ return -EINVAL;
+ val->intval = raw_val;
+ return 0;
+ case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+ ret = read_from_ec(OXP_X1_CHARGE_INHIBIT_REG, 1, &raw_val);
+ if (ret)
+ return ret;
+ if ((raw_val & OXP_X1_CHARGE_INHIBIT_MASK_ALWAYS) ==
+ OXP_X1_CHARGE_INHIBIT_MASK_ALWAYS)
+ val->intval = POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE;
+ else if ((raw_val & OXP_X1_CHARGE_INHIBIT_MASK_AWAKE) ==
+ OXP_X1_CHARGE_INHIBIT_MASK_AWAKE)
+ val->intval = POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE_AWAKE;
+ else
+ val->intval = POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int oxp_psy_ext_set_prop(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ long raw_val;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ if (val->intval < 0 || val->intval > 100)
+ return -EINVAL;
+ return write_to_ec(OXP_X1_CHARGE_LIMIT_REG, val->intval);
+ case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+ switch (val->intval) {
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO:
+ raw_val = 0;
+ break;
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE_AWAKE:
+ raw_val = OXP_X1_CHARGE_INHIBIT_MASK_AWAKE;
+ break;
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE:
+ raw_val = OXP_X1_CHARGE_INHIBIT_MASK_ALWAYS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return write_to_ec(OXP_X1_CHARGE_INHIBIT_REG, raw_val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int oxp_psy_prop_is_writeable(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp)
+{
+ return true;
+}
+
+static const enum power_supply_property oxp_psy_ext_props[] = {
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD,
+};
+
+static const struct power_supply_ext oxp_psy_ext = {
+ .name = "oxp-charge-control",
+ .properties = oxp_psy_ext_props,
+ .num_properties = ARRAY_SIZE(oxp_psy_ext_props),
+ .charge_behaviours = EC_CHARGE_CONTROL_BEHAVIOURS,
+ .get_property = oxp_psy_ext_get_prop,
+ .set_property = oxp_psy_ext_set_prop,
+ .property_is_writeable = oxp_psy_prop_is_writeable,
+};
+
+static int oxp_add_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
+{
+ return power_supply_register_extension(battery, &oxp_psy_ext, oxp_dev, NULL);
+}
+
+static int oxp_remove_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
+{
+ power_supply_unregister_extension(battery, &oxp_psy_ext);
+ return 0;
+}
+
+static struct acpi_battery_hook battery_hook = {
+ .add_battery = oxp_add_battery,
+ .remove_battery = oxp_remove_battery,
+ .name = "OneXPlayer Battery",
+};
+
+/* PWM enable/disable functions */
+static int oxp_pwm_enable(void)
+{
+ switch (board) {
+ case orange_pi_neo:
+ return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_MANUAL);
+ case aok_zoe_a1:
+ case aya_neo_2:
+ case aya_neo_air:
+ case aya_neo_air_plus_mendo:
+ case aya_neo_air_pro:
+ case aya_neo_flip:
+ case aya_neo_geek:
+ case aya_neo_kun:
+ case oxp_2:
+ case oxp_fly:
+ case oxp_mini_amd:
+ case oxp_mini_amd_a07:
+ case oxp_mini_amd_pro:
+ case oxp_x1:
+ case oxp_g1:
+ return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, PWM_MODE_MANUAL);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int oxp_pwm_disable(void)
+{
+ switch (board) {
+ case orange_pi_neo:
+ return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_AUTO);
+ case aok_zoe_a1:
+ case aya_neo_2:
+ case aya_neo_air:
+ case aya_neo_air_1s:
+ case aya_neo_air_plus_mendo:
+ case aya_neo_air_pro:
+ case aya_neo_flip:
+ case aya_neo_geek:
+ case aya_neo_kun:
+ case oxp_2:
+ case oxp_fly:
+ case oxp_mini_amd:
+ case oxp_mini_amd_a07:
+ case oxp_mini_amd_pro:
+ case oxp_x1:
+ case oxp_g1:
+ return write_to_ec(OXP_SENSOR_PWM_ENABLE_REG, PWM_MODE_AUTO);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int oxp_pwm_read(long *val)
+{
+ switch (board) {
+ case orange_pi_neo:
+ return read_from_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, 1, val);
+ case aok_zoe_a1:
+ case aya_neo_2:
+ case aya_neo_air:
+ case aya_neo_air_1s:
+ case aya_neo_air_plus_mendo:
+ case aya_neo_air_pro:
+ case aya_neo_flip:
+ case aya_neo_geek:
+ case aya_neo_kun:
+ case oxp_2:
+ case oxp_fly:
+ case oxp_mini_amd:
+ case oxp_mini_amd_a07:
+ case oxp_mini_amd_pro:
+ case oxp_x1:
+ case oxp_g1:
+ return read_from_ec(OXP_SENSOR_PWM_ENABLE_REG, 1, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Callbacks for hwmon interface */
+static umode_t oxp_ec_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type, u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ return 0444;
+ case hwmon_pwm:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+/* Fan speed read function */
+static int oxp_pwm_fan_speed(long *val)
+{
+ switch (board) {
+ case orange_pi_neo:
+ return read_from_ec(ORANGEPI_SENSOR_FAN_REG, 2, val);
+ case oxp_2:
+ case oxp_x1:
+ case oxp_g1:
+ return read_from_ec(OXP_2_SENSOR_FAN_REG, 2, val);
+ case aok_zoe_a1:
+ case aya_neo_2:
+ case aya_neo_air:
+ case aya_neo_air_1s:
+ case aya_neo_air_plus_mendo:
+ case aya_neo_air_pro:
+ case aya_neo_flip:
+ case aya_neo_geek:
+ case aya_neo_kun:
+ case oxp_fly:
+ case oxp_mini_amd:
+ case oxp_mini_amd_a07:
+ case oxp_mini_amd_pro:
+ return read_from_ec(OXP_SENSOR_FAN_REG, 2, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* PWM input read/write functions */
+static int oxp_pwm_input_write(long val)
+{
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ switch (board) {
+ case orange_pi_neo:
+ /* scale to range [1-244] */
+ val = ((val - 1) * 243 / 254) + 1;
+ return write_to_ec(ORANGEPI_SENSOR_PWM_REG, val);
+ case oxp_2:
+ case oxp_x1:
+ case oxp_g1:
+ /* scale to range [0-184] */
+ val = (val * 184) / 255;
+ return write_to_ec(OXP_SENSOR_PWM_REG, val);
+ case aya_neo_2:
+ case aya_neo_air:
+ case aya_neo_air_1s:
+ case aya_neo_air_plus_mendo:
+ case aya_neo_air_pro:
+ case aya_neo_flip:
+ case aya_neo_geek:
+ case aya_neo_kun:
+ case oxp_mini_amd:
+ case oxp_mini_amd_a07:
+ /* scale to range [0-100] */
+ val = (val * 100) / 255;
+ return write_to_ec(OXP_SENSOR_PWM_REG, val);
+ case aok_zoe_a1:
+ case oxp_fly:
+ case oxp_mini_amd_pro:
+ return write_to_ec(OXP_SENSOR_PWM_REG, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int oxp_pwm_input_read(long *val)
+{
+ int ret;
+
+ switch (board) {
+ case orange_pi_neo:
+ ret = read_from_ec(ORANGEPI_SENSOR_PWM_REG, 1, val);
+ if (ret)
+ return ret;
+ /* scale from range [1-244] */
+ *val = ((*val - 1) * 254 / 243) + 1;
+ break;
+ case oxp_2:
+ case oxp_x1:
+ case oxp_g1:
+ ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
+ if (ret)
+ return ret;
+ /* scale from range [0-184] */
+ *val = (*val * 255) / 184;
+ break;
+ case aya_neo_2:
+ case aya_neo_air:
+ case aya_neo_air_1s:
+ case aya_neo_air_plus_mendo:
+ case aya_neo_air_pro:
+ case aya_neo_flip:
+ case aya_neo_geek:
+ case aya_neo_kun:
+ case oxp_mini_amd:
+ case oxp_mini_amd_a07:
+ ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
+ if (ret)
+ return ret;
+ /* scale from range [0-100] */
+ *val = (*val * 255) / 100;
+ break;
+ case aok_zoe_a1:
+ case oxp_fly:
+ case oxp_mini_amd_pro:
+ default:
+ ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
+ if (ret)
+ return ret;
+ break;
+ }
+ return 0;
+}
+
+static int oxp_platform_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ int ret;
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ return oxp_pwm_fan_speed(val);
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ return oxp_pwm_input_read(val);
+ case hwmon_pwm_enable:
+ ret = oxp_pwm_read(val);
+ if (ret)
+ return ret;
+
+ /* Check for auto and return 2 */
+ if (!*val) {
+ *val = 2;
+ return 0;
+ }
+
+ /* Return 0 if at full fan speed, 1 otherwise */
+ ret = oxp_pwm_fan_speed(val);
+ if (ret)
+ return ret;
+
+ if (*val == 255)
+ *val = 0;
+ else
+ *val = 1;
+
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int oxp_platform_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ int ret;
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_enable:
+ if (val == 1)
+ return oxp_pwm_enable();
+ else if (val == 2)
+ return oxp_pwm_disable();
+ else if (val != 0)
+ return -EINVAL;
+
+ /* Enable PWM and set to max speed */
+ ret = oxp_pwm_enable();
+ if (ret)
+ return ret;
+ return oxp_pwm_input_write(255);
+ case hwmon_pwm_input:
+ return oxp_pwm_input_write(val);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+/* Known sensors in the OXP EC controllers */
+static const struct hwmon_channel_info * const oxp_platform_sensors[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
+ NULL,
+};
+
+static struct attribute *oxp_tt_toggle_attrs[] = {
+ &dev_attr_tt_toggle.attr,
+ NULL
+};
+
+static const struct attribute_group oxp_tt_toggle_attribute_group = {
+ .is_visible = tt_toggle_is_visible,
+ .attrs = oxp_tt_toggle_attrs,
+};
+
+static struct attribute *oxp_tt_led_attrs[] = {
+ &dev_attr_tt_led.attr,
+ NULL
+};
+
+static const struct attribute_group oxp_tt_led_attribute_group = {
+ .is_visible = tt_led_is_visible,
+ .attrs = oxp_tt_led_attrs,
+};
+
+static const struct attribute_group *oxp_ec_groups[] = {
+ &oxp_tt_toggle_attribute_group,
+ &oxp_tt_led_attribute_group,
+ NULL
+};
+
+static const struct hwmon_ops oxp_ec_hwmon_ops = {
+ .is_visible = oxp_ec_hwmon_is_visible,
+ .read = oxp_platform_read,
+ .write = oxp_platform_write,
+};
+
+static const struct hwmon_chip_info oxp_ec_chip_info = {
+ .ops = &oxp_ec_hwmon_ops,
+ .info = oxp_platform_sensors,
+};
+
+/* Initialization logic */
+static int oxp_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *hwdev;
+ int ret;
+
+ oxp_dev = dev;
+ hwdev = devm_hwmon_device_register_with_info(dev, "oxp_ec", NULL,
+ &oxp_ec_chip_info, NULL);
+
+ if (IS_ERR(hwdev))
+ return PTR_ERR(hwdev);
+
+ if (oxp_psy_ext_supported()) {
+ ret = devm_battery_hook_register(dev, &battery_hook);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver oxp_platform_driver = {
+ .driver = {
+ .name = "oxp-platform",
+ .dev_groups = oxp_ec_groups,
+ },
+ .probe = oxp_platform_probe,
+};
+
+static struct platform_device *oxp_platform_device;
+
+static int __init oxp_platform_init(void)
+{
+ const struct dmi_system_id *dmi_entry;
+
+ dmi_entry = dmi_first_match(dmi_table);
+ if (!dmi_entry)
+ return -ENODEV;
+
+ board = (enum oxp_board)(unsigned long)dmi_entry->driver_data;
+
+ /*
+ * Have to check for AMD processor here because DMI strings are the same
+ * between Intel and AMD boards on older OneXPlayer devices, the only way
+ * to tell them apart is the CPU. Old Intel boards have an unsupported EC.
+ */
+ if (board == oxp_mini_amd && boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return -ENODEV;
+
+ oxp_platform_device =
+ platform_create_bundle(&oxp_platform_driver,
+ oxp_platform_probe, NULL, 0, NULL, 0);
+
+ return PTR_ERR_OR_ZERO(oxp_platform_device);
+}
+
+static void __exit oxp_platform_exit(void)
+{
+ platform_device_unregister(oxp_platform_device);
+ platform_driver_unregister(&oxp_platform_driver);
+}
+
+MODULE_DEVICE_TABLE(dmi, dmi_table);
+
+module_init(oxp_platform_init);
+module_exit(oxp_platform_exit);
+
+MODULE_AUTHOR("Joaquín Ignacio Aramendía <samsagax@gmail.com>");
+MODULE_DESCRIPTION("Platform driver that handles EC sensors of OneXPlayer devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 2987b4db6009..255317e6fec8 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -1033,8 +1033,8 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
pcc->handle = device->handle;
pcc->num_sifr = num_sifr;
device->driver_data = pcc;
- strcpy(acpi_device_name(device), ACPI_PCC_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_PCC_CLASS);
+ strscpy(acpi_device_name(device), ACPI_PCC_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_PCC_CLASS);
result = acpi_pcc_init_input(pcc);
if (result) {
diff --git a/drivers/platform/x86/portwell-ec.c b/drivers/platform/x86/portwell-ec.c
new file mode 100644
index 000000000000..8b788822237b
--- /dev/null
+++ b/drivers/platform/x86/portwell-ec.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * portwell-ec.c: Portwell embedded controller driver.
+ *
+ * Tested on:
+ * - Portwell NANO-6064
+ *
+ * This driver provides support for GPIO and Watchdog Timer
+ * functionalities of the Portwell boards with ITE embedded controller (EC).
+ * The EC is accessed through I/O ports and provides:
+ * - 8 GPIO pins for control and monitoring
+ * - Hardware watchdog with 1-15300 second timeout range
+ *
+ * It integrates with the Linux GPIO and Watchdog subsystems, allowing
+ * userspace interaction with EC GPIO pins and watchdog control,
+ * ensuring system stability and configurability.
+ *
+ * (C) Copyright 2025 Portwell, Inc.
+ * Author: Yen-Chi Huang (jesse.huang@portwell.com.tw)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/dmi.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/string.h>
+#include <linux/watchdog.h>
+
+#define PORTWELL_EC_IOSPACE 0xe300
+#define PORTWELL_EC_IOSPACE_LEN SZ_256
+
+#define PORTWELL_GPIO_PINS 8
+#define PORTWELL_GPIO_DIR_REG 0x2b
+#define PORTWELL_GPIO_VAL_REG 0x2c
+
+#define PORTWELL_WDT_EC_CONFIG_ADDR 0x06
+#define PORTWELL_WDT_CONFIG_ENABLE 0x1
+#define PORTWELL_WDT_CONFIG_DISABLE 0x0
+#define PORTWELL_WDT_EC_COUNT_MIN_ADDR 0x07
+#define PORTWELL_WDT_EC_COUNT_SEC_ADDR 0x08
+#define PORTWELL_WDT_EC_MAX_COUNT_SECOND (255 * 60)
+
+#define PORTWELL_EC_FW_VENDOR_ADDRESS 0x4d
+#define PORTWELL_EC_FW_VENDOR_LENGTH 3
+#define PORTWELL_EC_FW_VENDOR_NAME "PWG"
+
+static bool force;
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "Force loading EC driver without checking DMI boardname");
+
+static const struct dmi_system_id pwec_dmi_table[] = {
+ {
+ .ident = "NANO-6064 series",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NANO-6064"),
+ },
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, pwec_dmi_table);
+
+/* Functions for access EC via IOSPACE */
+
+static void pwec_write(u8 index, u8 data)
+{
+ outb(data, PORTWELL_EC_IOSPACE + index);
+}
+
+static u8 pwec_read(u8 address)
+{
+ return inb(PORTWELL_EC_IOSPACE + address);
+}
+
+/* GPIO functions */
+
+static int pwec_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ return pwec_read(PORTWELL_GPIO_VAL_REG) & BIT(offset) ? 1 : 0;
+}
+
+static int pwec_gpio_set_rv(struct gpio_chip *chip, unsigned int offset, int val)
+{
+ u8 tmp = pwec_read(PORTWELL_GPIO_VAL_REG);
+
+ if (val)
+ tmp |= BIT(offset);
+ else
+ tmp &= ~BIT(offset);
+ pwec_write(PORTWELL_GPIO_VAL_REG, tmp);
+
+ return 0;
+}
+
+static int pwec_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ u8 direction = pwec_read(PORTWELL_GPIO_DIR_REG) & BIT(offset);
+
+ if (direction)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+/*
+ * Changing direction causes issues on some boards,
+ * so direction_input and direction_output are disabled for now.
+ */
+
+static int pwec_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ return -EOPNOTSUPP;
+}
+
+static int pwec_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct gpio_chip pwec_gpio_chip = {
+ .label = "portwell-ec-gpio",
+ .get_direction = pwec_gpio_get_direction,
+ .direction_input = pwec_gpio_direction_input,
+ .direction_output = pwec_gpio_direction_output,
+ .get = pwec_gpio_get,
+ .set_rv = pwec_gpio_set_rv,
+ .base = -1,
+ .ngpio = PORTWELL_GPIO_PINS,
+};
+
+/* Watchdog functions */
+
+static void pwec_wdt_write_timeout(unsigned int timeout)
+{
+ pwec_write(PORTWELL_WDT_EC_COUNT_MIN_ADDR, timeout / 60);
+ pwec_write(PORTWELL_WDT_EC_COUNT_SEC_ADDR, timeout % 60);
+}
+
+static int pwec_wdt_trigger(struct watchdog_device *wdd)
+{
+ pwec_wdt_write_timeout(wdd->timeout);
+ pwec_write(PORTWELL_WDT_EC_CONFIG_ADDR, PORTWELL_WDT_CONFIG_ENABLE);
+
+ return 0;
+}
+
+static int pwec_wdt_start(struct watchdog_device *wdd)
+{
+ return pwec_wdt_trigger(wdd);
+}
+
+static int pwec_wdt_stop(struct watchdog_device *wdd)
+{
+ pwec_write(PORTWELL_WDT_EC_CONFIG_ADDR, PORTWELL_WDT_CONFIG_DISABLE);
+ return 0;
+}
+
+static int pwec_wdt_set_timeout(struct watchdog_device *wdd, unsigned int timeout)
+{
+ wdd->timeout = timeout;
+ pwec_wdt_write_timeout(wdd->timeout);
+
+ return 0;
+}
+
+/* Ensure consistent min/sec read in case of second rollover. */
+static unsigned int pwec_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+ u8 sec, min, old_min;
+
+ do {
+ old_min = pwec_read(PORTWELL_WDT_EC_COUNT_MIN_ADDR);
+ sec = pwec_read(PORTWELL_WDT_EC_COUNT_SEC_ADDR);
+ min = pwec_read(PORTWELL_WDT_EC_COUNT_MIN_ADDR);
+ } while (min != old_min);
+
+ return min * 60 + sec;
+}
+
+static const struct watchdog_ops pwec_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = pwec_wdt_start,
+ .stop = pwec_wdt_stop,
+ .ping = pwec_wdt_trigger,
+ .set_timeout = pwec_wdt_set_timeout,
+ .get_timeleft = pwec_wdt_get_timeleft,
+};
+
+static struct watchdog_device ec_wdt_dev = {
+ .info = &(struct watchdog_info){
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "Portwell EC watchdog",
+ },
+ .ops = &pwec_wdt_ops,
+ .timeout = 60,
+ .min_timeout = 1,
+ .max_timeout = PORTWELL_WDT_EC_MAX_COUNT_SECOND,
+};
+
+static int pwec_firmware_vendor_check(void)
+{
+ u8 buf[PORTWELL_EC_FW_VENDOR_LENGTH + 1];
+ u8 i;
+
+ for (i = 0; i < PORTWELL_EC_FW_VENDOR_LENGTH; i++)
+ buf[i] = pwec_read(PORTWELL_EC_FW_VENDOR_ADDRESS + i);
+ buf[PORTWELL_EC_FW_VENDOR_LENGTH] = '\0';
+
+ return !strcmp(PORTWELL_EC_FW_VENDOR_NAME, buf) ? 0 : -ENODEV;
+}
+
+static int pwec_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ if (!devm_request_region(&pdev->dev, PORTWELL_EC_IOSPACE,
+ PORTWELL_EC_IOSPACE_LEN, dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "failed to get IO region\n");
+ return -EBUSY;
+ }
+
+ ret = pwec_firmware_vendor_check();
+ if (ret < 0)
+ return ret;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &pwec_gpio_chip, NULL);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register Portwell EC GPIO\n");
+ return ret;
+ }
+
+ ret = devm_watchdog_register_device(&pdev->dev, &ec_wdt_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register Portwell EC Watchdog\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver pwec_driver = {
+ .driver = {
+ .name = "portwell-ec",
+ },
+ .probe = pwec_probe,
+};
+
+static struct platform_device *pwec_dev;
+
+static int __init pwec_init(void)
+{
+ int ret;
+
+ if (!dmi_check_system(pwec_dmi_table)) {
+ if (!force)
+ return -ENODEV;
+ pr_warn("force load portwell-ec without DMI check\n");
+ }
+
+ ret = platform_driver_register(&pwec_driver);
+ if (ret)
+ return ret;
+
+ pwec_dev = platform_device_register_simple("portwell-ec", -1, NULL, 0);
+ if (IS_ERR(pwec_dev)) {
+ platform_driver_unregister(&pwec_driver);
+ return PTR_ERR(pwec_dev);
+ }
+
+ return 0;
+}
+
+static void __exit pwec_exit(void)
+{
+ platform_device_unregister(pwec_dev);
+ platform_driver_unregister(&pwec_driver);
+}
+
+module_init(pwec_init);
+module_exit(pwec_exit);
+
+MODULE_AUTHOR("Yen-Chi Huang <jesse.huang@portwell.com.tw>");
+MODULE_DESCRIPTION("Portwell EC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/samsung-galaxybook.c b/drivers/platform/x86/samsung-galaxybook.c
index 5878a351993e..3c13e13d4885 100644
--- a/drivers/platform/x86/samsung-galaxybook.c
+++ b/drivers/platform/x86/samsung-galaxybook.c
@@ -1403,6 +1403,7 @@ static int galaxybook_probe(struct platform_device *pdev)
}
static const struct acpi_device_id galaxybook_device_ids[] = {
+ { "SAM0426" },
{ "SAM0427" },
{ "SAM0428" },
{ "SAM0429" },
diff --git a/drivers/platform/x86/silicom-platform.c b/drivers/platform/x86/silicom-platform.c
index c0910af16a3a..021f3fed197a 100644
--- a/drivers/platform/x86/silicom-platform.c
+++ b/drivers/platform/x86/silicom-platform.c
@@ -245,18 +245,19 @@ static int silicom_gpio_direction_input(struct gpio_chip *gc,
return direction == GPIO_LINE_DIRECTION_IN ? 0 : -EINVAL;
}
-static void silicom_gpio_set(struct gpio_chip *gc,
- unsigned int offset,
- int value)
+static int silicom_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
int direction = silicom_gpio_get_direction(gc, offset);
u8 *channels = gpiochip_get_data(gc);
int channel = channels[offset];
if (direction == GPIO_LINE_DIRECTION_IN)
- return;
+ return -EPERM;
silicom_mec_port_set(channel, !value);
+
+ return 0;
}
static int silicom_gpio_direction_output(struct gpio_chip *gc,
@@ -469,7 +470,7 @@ static struct gpio_chip silicom_gpio_chip = {
.direction_input = silicom_gpio_direction_input,
.direction_output = silicom_gpio_direction_output,
.get = silicom_gpio_get,
- .set = silicom_gpio_set,
+ .set_rv = silicom_gpio_set,
.base = -1,
.ngpio = ARRAY_SIZE(plat_0222_gpio_channels),
.names = plat_0222_gpio_names,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index b52390fbd743..56beebc38850 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -48,7 +48,6 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/sonypi.h>
-#include <linux/sony-laptop.h>
#include <linux/rfkill.h>
#ifdef CONFIG_SONYPI_COMPAT
#include <linux/poll.h>
@@ -3157,7 +3156,7 @@ static int sony_nc_add(struct acpi_device *device)
struct sony_nc_value *item;
sony_nc_acpi_device = device;
- strcpy(acpi_device_class(device), "sony/hotkey");
+ strscpy(acpi_device_class(device), "sony/hotkey");
sony_nc_acpi_handle = device->handle;
@@ -3327,8 +3326,10 @@ struct sony_pic_ioport {
};
struct sony_pic_irq {
- struct acpi_resource_irq irq;
struct list_head list;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct acpi_resource_irq irq;
};
struct sonypi_eventtypes {
@@ -3619,22 +3620,6 @@ static u8 sony_pic_call2(u8 dev, u8 fn)
return v1;
}
-static u8 sony_pic_call3(u8 dev, u8 fn, u8 v)
-{
- u8 v1;
-
- wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
- outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
- wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
- outb(fn, spic_dev.cur_ioport->io1.minimum);
- wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
- outb(v, spic_dev.cur_ioport->io1.minimum);
- v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
- dprintk("sony_pic_call3(0x%.2x - 0x%.2x - 0x%.2x): 0x%.4x\n",
- dev, fn, v, v1);
- return v1;
-}
-
/*
* minidrivers for SPIC models
*/
@@ -3722,156 +3707,6 @@ out:
dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
}
-/* camera tests and poweron/poweroff */
-#define SONYPI_CAMERA_PICTURE 5
-#define SONYPI_CAMERA_CONTROL 0x10
-
-#define SONYPI_CAMERA_BRIGHTNESS 0
-#define SONYPI_CAMERA_CONTRAST 1
-#define SONYPI_CAMERA_HUE 2
-#define SONYPI_CAMERA_COLOR 3
-#define SONYPI_CAMERA_SHARPNESS 4
-
-#define SONYPI_CAMERA_EXPOSURE_MASK 0xC
-#define SONYPI_CAMERA_WHITE_BALANCE_MASK 0x3
-#define SONYPI_CAMERA_PICTURE_MODE_MASK 0x30
-#define SONYPI_CAMERA_MUTE_MASK 0x40
-
-/* the rest don't need a loop until not 0xff */
-#define SONYPI_CAMERA_AGC 6
-#define SONYPI_CAMERA_AGC_MASK 0x30
-#define SONYPI_CAMERA_SHUTTER_MASK 0x7
-
-#define SONYPI_CAMERA_SHUTDOWN_REQUEST 7
-#define SONYPI_CAMERA_CONTROL 0x10
-
-#define SONYPI_CAMERA_STATUS 7
-#define SONYPI_CAMERA_STATUS_READY 0x2
-#define SONYPI_CAMERA_STATUS_POSITION 0x4
-
-#define SONYPI_DIRECTION_BACKWARDS 0x4
-
-#define SONYPI_CAMERA_REVISION 8
-#define SONYPI_CAMERA_ROMVERSION 9
-
-static int __sony_pic_camera_ready(void)
-{
- u8 v;
-
- v = sony_pic_call2(0x8f, SONYPI_CAMERA_STATUS);
- return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY));
-}
-
-static int __sony_pic_camera_off(void)
-{
- if (!camera) {
- pr_warn("camera control not enabled\n");
- return -ENODEV;
- }
-
- wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_PICTURE,
- SONYPI_CAMERA_MUTE_MASK),
- ITERATIONS_SHORT);
-
- if (spic_dev.camera_power) {
- sony_pic_call2(0x91, 0);
- spic_dev.camera_power = 0;
- }
- return 0;
-}
-
-static int __sony_pic_camera_on(void)
-{
- int i, j, x;
-
- if (!camera) {
- pr_warn("camera control not enabled\n");
- return -ENODEV;
- }
-
- if (spic_dev.camera_power)
- return 0;
-
- for (j = 5; j > 0; j--) {
-
- for (x = 0; x < 100 && sony_pic_call2(0x91, 0x1); x++)
- msleep(10);
- sony_pic_call1(0x93);
-
- for (i = 400; i > 0; i--) {
- if (__sony_pic_camera_ready())
- break;
- msleep(10);
- }
- if (i)
- break;
- }
-
- if (j == 0) {
- pr_warn("failed to power on camera\n");
- return -ENODEV;
- }
-
- wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_CONTROL,
- 0x5a),
- ITERATIONS_SHORT);
-
- spic_dev.camera_power = 1;
- return 0;
-}
-
-/* External camera command (exported to the motion eye v4l driver) */
-int sony_pic_camera_command(int command, u8 value)
-{
- if (!camera)
- return -EIO;
-
- mutex_lock(&spic_dev.lock);
-
- switch (command) {
- case SONY_PIC_COMMAND_SETCAMERA:
- if (value)
- __sony_pic_camera_on();
- else
- __sony_pic_camera_off();
- break;
- case SONY_PIC_COMMAND_SETCAMERABRIGHTNESS:
- wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_BRIGHTNESS, value),
- ITERATIONS_SHORT);
- break;
- case SONY_PIC_COMMAND_SETCAMERACONTRAST:
- wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_CONTRAST, value),
- ITERATIONS_SHORT);
- break;
- case SONY_PIC_COMMAND_SETCAMERAHUE:
- wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_HUE, value),
- ITERATIONS_SHORT);
- break;
- case SONY_PIC_COMMAND_SETCAMERACOLOR:
- wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_COLOR, value),
- ITERATIONS_SHORT);
- break;
- case SONY_PIC_COMMAND_SETCAMERASHARPNESS:
- wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_SHARPNESS, value),
- ITERATIONS_SHORT);
- break;
- case SONY_PIC_COMMAND_SETCAMERAPICTURE:
- wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_PICTURE, value),
- ITERATIONS_SHORT);
- break;
- case SONY_PIC_COMMAND_SETCAMERAAGC:
- wait_on_command(sony_pic_call3(0x90, SONYPI_CAMERA_AGC, value),
- ITERATIONS_SHORT);
- break;
- default:
- pr_err("sony_pic_camera_command invalid: %d\n", command);
- break;
- }
- mutex_unlock(&spic_dev.lock);
- return 0;
-}
-EXPORT_SYMBOL(sony_pic_camera_command);
-
/* gprs/edge modem (SZ460N and SZ210P), thanks to Joshua Wise */
static void __sony_pic_set_wwanpower(u8 state)
{
@@ -4677,7 +4512,7 @@ static int sony_pic_add(struct acpi_device *device)
struct sony_pic_irq *irq, *tmp_irq;
spic_dev.acpi_dev = device;
- strcpy(acpi_device_class(device), "sony/hotkey");
+ strscpy(acpi_device_class(device), "sony/hotkey");
sony_pic_detect_device_type(&spic_dev);
mutex_init(&spic_dev.lock);
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 0fc275e461be..00b1e7c79a3d 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -1061,8 +1061,8 @@ static ssize_t current_value_store(struct kobject *kobj,
ret = -EINVAL;
goto out;
}
- set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->display_name,
- new_setting, tlmi_priv.pwd_admin->signature);
+ set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->name,
+ new_setting, tlmi_priv.pwd_admin->signature);
if (!set_str) {
ret = -ENOMEM;
goto out;
@@ -1092,7 +1092,7 @@ static ssize_t current_value_store(struct kobject *kobj,
goto out;
}
- set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name,
+ set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->name,
new_setting);
if (!set_str) {
ret = -ENOMEM;
@@ -1120,11 +1120,11 @@ static ssize_t current_value_store(struct kobject *kobj,
}
if (auth_str)
- set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->display_name,
- new_setting, auth_str);
+ set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->name,
+ new_setting, auth_str);
else
- set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name,
- new_setting);
+ set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->name,
+ new_setting);
if (!set_str) {
ret = -ENOMEM;
goto out;
@@ -1629,9 +1629,6 @@ static int tlmi_analyze(struct wmi_device *wdev)
continue;
}
- /* It is not allowed to have '/' for file name. Convert it into '\'. */
- strreplace(item, '/', '\\');
-
/* Remove the value part */
strreplace(item, ',', '\0');
@@ -1644,11 +1641,16 @@ static int tlmi_analyze(struct wmi_device *wdev)
}
setting->wdev = wdev;
setting->index = i;
+
+ strscpy(setting->name, item);
+ /* It is not allowed to have '/' for file name. Convert it into '\'. */
+ strreplace(item, '/', '\\');
strscpy(setting->display_name, item);
+
/* If BIOS selections supported, load those */
if (tlmi_priv.can_get_bios_selections) {
- ret = tlmi_get_bios_selections(setting->display_name,
- &setting->possible_values);
+ ret = tlmi_get_bios_selections(setting->name,
+ &setting->possible_values);
if (ret || !setting->possible_values)
pr_info("Error retrieving possible values for %d : %s\n",
i, setting->display_name);
diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h
index a80452482227..9b014644d316 100644
--- a/drivers/platform/x86/think-lmi.h
+++ b/drivers/platform/x86/think-lmi.h
@@ -90,6 +90,7 @@ struct tlmi_attr_setting {
struct kobject kobj;
struct wmi_device *wdev;
int index;
+ char name[TLMI_SETTINGS_MAXLEN];
char display_name[TLMI_SETTINGS_MAXLEN];
char *possible_values;
};
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 5790095c175e..e7350c9fa3aa 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -182,6 +182,7 @@ enum tpacpi_hkey_event_t {
* directly in the sparse-keymap.
*/
TP_HKEY_EV_AMT_TOGGLE = 0x131a, /* Toggle AMT on/off */
+ TP_HKEY_EV_CAMERASHUTTER_TOGGLE = 0x131b, /* Toggle Camera Shutter */
TP_HKEY_EV_DOUBLETAP_TOGGLE = 0x131c, /* Toggle trackpoint doubletap on/off */
TP_HKEY_EV_PROFILE_TOGGLE = 0x131f, /* Toggle platform profile in 2024 systems */
TP_HKEY_EV_PROFILE_TOGGLE2 = 0x1401, /* Toggle platform profile in 2025 + systems */
@@ -231,6 +232,7 @@ enum tpacpi_hkey_event_t {
/* Thermal events */
TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */
TP_HKEY_EV_ALARM_BAT_XHOT = 0x6012, /* battery critically hot */
+ TP_HKEY_EV_ALARM_BAT_LIM_CHANGE = 0x6013, /* battery charge limit changed*/
TP_HKEY_EV_ALARM_SENSOR_HOT = 0x6021, /* sensor too hot */
TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */
TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* windows; thermal table changed */
@@ -836,9 +838,9 @@ static int __init setup_acpi_notify(struct ibm_struct *ibm)
}
ibm->acpi->device->driver_data = ibm;
- sprintf(acpi_device_class(ibm->acpi->device), "%s/%s",
- TPACPI_ACPI_EVENT_PREFIX,
- ibm->name);
+ scnprintf(acpi_device_class(ibm->acpi->device),
+ sizeof(acpi_device_class(ibm->acpi->device)),
+ "%s/%s", TPACPI_ACPI_EVENT_PREFIX, ibm->name);
status = acpi_install_notify_handler(*ibm->acpi->handle,
ibm->acpi->type, dispatch_acpi_notify, ibm);
@@ -2250,6 +2252,25 @@ static void tpacpi_input_send_tabletsw(void)
}
}
+#define GCES_NO_SHUTTER_DEVICE BIT(31)
+
+static int get_camera_shutter(void)
+{
+ acpi_handle gces_handle;
+ int output;
+
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, "GCES", &gces_handle)))
+ return -ENODEV;
+
+ if (!acpi_evalf(gces_handle, &output, NULL, "dd", 0))
+ return -EIO;
+
+ if (output & GCES_NO_SHUTTER_DEVICE)
+ return -ENODEV;
+
+ return output;
+}
+
static bool tpacpi_input_send_key(const u32 hkey, bool *send_acpi_ev)
{
bool known_ev;
@@ -3303,7 +3324,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
const struct key_entry *keymap;
bool radiosw_state = false;
bool tabletsw_state = false;
- int hkeyv, res, status;
+ int hkeyv, res, status, camera_shutter_state;
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
"initializing hotkey subdriver\n");
@@ -3467,6 +3488,12 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (res)
return res;
+ camera_shutter_state = get_camera_shutter();
+ if (camera_shutter_state >= 0) {
+ input_set_capability(tpacpi_inputdev, EV_SW, SW_CAMERA_LENS_COVER);
+ input_report_switch(tpacpi_inputdev, SW_CAMERA_LENS_COVER, camera_shutter_state);
+ }
+
if (tp_features.hotkey_wlsw) {
input_set_capability(tpacpi_inputdev, EV_SW, SW_RFKILL_ALL);
input_report_switch(tpacpi_inputdev,
@@ -3777,6 +3804,10 @@ static bool hotkey_notify_6xxx(const u32 hkey, bool *send_acpi_ev)
pr_alert("THERMAL EMERGENCY: battery is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
break;
+ case TP_HKEY_EV_ALARM_BAT_LIM_CHANGE:
+ pr_debug("Battery Info: battery charge threshold changed\n");
+ /* User changed charging threshold. No action needed */
+ return true;
case TP_HKEY_EV_ALARM_SENSOR_HOT:
pr_crit("THERMAL ALARM: a sensor reports something is too hot!\n");
/* recommended action: warn user through gui, that */
@@ -11161,6 +11192,8 @@ static struct platform_driver tpacpi_hwmon_pdriver = {
*/
static bool tpacpi_driver_event(const unsigned int hkey_event)
{
+ int camera_shutter_state;
+
switch (hkey_event) {
case TP_HKEY_EV_BRGHT_UP:
case TP_HKEY_EV_BRGHT_DOWN:
@@ -11237,6 +11270,19 @@ static bool tpacpi_driver_event(const unsigned int hkey_event)
dytc_control_amt(!dytc_amt_active);
return true;
+ case TP_HKEY_EV_CAMERASHUTTER_TOGGLE:
+ camera_shutter_state = get_camera_shutter();
+ if (camera_shutter_state < 0) {
+ pr_err("Error retrieving camera shutter state after shutter event\n");
+ return true;
+ }
+ mutex_lock(&tpacpi_inputdev_send_mutex);
+
+ input_report_switch(tpacpi_inputdev, SW_CAMERA_LENS_COVER, camera_shutter_state);
+ input_sync(tpacpi_inputdev);
+
+ mutex_unlock(&tpacpi_inputdev_send_mutex);
+ return true;
case TP_HKEY_EV_DOUBLETAP_TOGGLE:
tp_features.trackpoint_doubletap = !tp_features.trackpoint_doubletap;
return true;
@@ -11478,6 +11524,8 @@ static int __must_check __init get_thinkpad_model_data(
tp->vendor = PCI_VENDOR_ID_IBM;
else if (dmi_name_in_vendors("LENOVO"))
tp->vendor = PCI_VENDOR_ID_LENOVO;
+ else if (dmi_name_in_vendors("NEC"))
+ tp->vendor = PCI_VENDOR_ID_LENOVO;
else
return 0;
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index 20df1ebefc30..53fc2b364552 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -296,8 +296,8 @@ static int topstar_acpi_add(struct acpi_device *device)
if (!topstar)
return -ENOMEM;
- strcpy(acpi_device_name(device), "Topstar TPSACPI");
- strcpy(acpi_device_class(device), TOPSTAR_LAPTOP_CLASS);
+ strscpy(acpi_device_name(device), "Topstar TPSACPI");
+ strscpy(acpi_device_class(device), TOPSTAR_LAPTOP_CLASS);
device->driver_data = topstar;
topstar->device = device;
diff --git a/drivers/platform/x86/tuxedo/Kconfig b/drivers/platform/x86/tuxedo/Kconfig
new file mode 100644
index 000000000000..80be0947dddc
--- /dev/null
+++ b/drivers/platform/x86/tuxedo/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024-2025 Werner Sembach wse@tuxedocomputers.com
+#
+# TUXEDO X86 Platform Specific Drivers
+#
+
+source "drivers/platform/x86/tuxedo/nb04/Kconfig"
diff --git a/drivers/platform/x86/tuxedo/Makefile b/drivers/platform/x86/tuxedo/Makefile
new file mode 100644
index 000000000000..0afe0d0f455e
--- /dev/null
+++ b/drivers/platform/x86/tuxedo/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024-2025 Werner Sembach wse@tuxedocomputers.com
+#
+# TUXEDO X86 Platform Specific Drivers
+#
+
+obj-y += nb04/
diff --git a/drivers/platform/x86/tuxedo/nb04/Kconfig b/drivers/platform/x86/tuxedo/nb04/Kconfig
new file mode 100644
index 000000000000..9e7a9f9230d1
--- /dev/null
+++ b/drivers/platform/x86/tuxedo/nb04/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024-2025 Werner Sembach wse@tuxedocomputers.com
+#
+# TUXEDO X86 Platform Specific Drivers
+#
+
+config TUXEDO_NB04_WMI_AB
+ tristate "TUXEDO NB04 WMI AB Platform Driver"
+ depends on ACPI_WMI
+ depends on HID
+ help
+ This driver implements the WMI AB device found on TUXEDO notebooks
+ with board vendor NB04. This enables keyboard backlight control via a
+ virtual HID LampArray device.
+
+ When compiled as a module it will be called tuxedo_nb04_wmi_ab.
diff --git a/drivers/platform/x86/tuxedo/nb04/Makefile b/drivers/platform/x86/tuxedo/nb04/Makefile
new file mode 100644
index 000000000000..c963e0d60505
--- /dev/null
+++ b/drivers/platform/x86/tuxedo/nb04/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2024-2025 Werner Sembach wse@tuxedocomputers.com
+#
+# TUXEDO X86 Platform Specific Drivers
+#
+
+tuxedo_nb04_wmi_ab-y := wmi_ab.o
+tuxedo_nb04_wmi_ab-y += wmi_util.o
+obj-$(CONFIG_TUXEDO_NB04_WMI_AB) += tuxedo_nb04_wmi_ab.o
diff --git a/drivers/platform/x86/tuxedo/nb04/wmi_ab.c b/drivers/platform/x86/tuxedo/nb04/wmi_ab.c
new file mode 100644
index 000000000000..32d7756022c2
--- /dev/null
+++ b/drivers/platform/x86/tuxedo/nb04/wmi_ab.c
@@ -0,0 +1,923 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This driver implements the WMI AB device found on TUXEDO notebooks with board
+ * vendor NB04.
+ *
+ * Copyright (C) 2024-2025 Werner Sembach <wse@tuxedocomputers.com>
+ */
+
+#include <linux/dmi.h>
+#include <linux/hid.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/wmi.h>
+
+#include "wmi_util.h"
+
+static const struct wmi_device_id tuxedo_nb04_wmi_ab_device_ids[] = {
+ { .guid_string = "80C9BAA6-AC48-4538-9234-9F81A55E7C85" },
+ { }
+};
+MODULE_DEVICE_TABLE(wmi, tuxedo_nb04_wmi_ab_device_ids);
+
+enum {
+ LAMP_ARRAY_ATTRIBUTES_REPORT_ID = 0x01,
+ LAMP_ATTRIBUTES_REQUEST_REPORT_ID = 0x02,
+ LAMP_ATTRIBUTES_RESPONSE_REPORT_ID = 0x03,
+ LAMP_MULTI_UPDATE_REPORT_ID = 0x04,
+ LAMP_RANGE_UPDATE_REPORT_ID = 0x05,
+ LAMP_ARRAY_CONTROL_REPORT_ID = 0x06,
+};
+
+static u8 tux_report_descriptor[327] = {
+ 0x05, 0x59, // Usage Page (Lighting and Illumination)
+ 0x09, 0x01, // Usage (Lamp Array)
+ 0xa1, 0x01, // Collection (Application)
+ 0x85, LAMP_ARRAY_ATTRIBUTES_REPORT_ID, // Report ID (1)
+ 0x09, 0x02, // Usage (Lamp Array Attributes Report)
+ 0xa1, 0x02, // Collection (Logical)
+ 0x09, 0x03, // Usage (Lamp Count)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x27, 0xff, 0xff, 0x00, 0x00, // Logical Maximum (65535)
+ 0x75, 0x10, // Report Size (16)
+ 0x95, 0x01, // Report Count (1)
+ 0xb1, 0x03, // Feature (Cnst,Var,Abs)
+ 0x09, 0x04, // Usage (Bounding Box Width In Micrometers)
+ 0x09, 0x05, // Usage (Bounding Box Height In Micrometers)
+ 0x09, 0x06, // Usage (Bounding Box Depth In Micrometers)
+ 0x09, 0x07, // Usage (Lamp Array Kind)
+ 0x09, 0x08, // Usage (Min Update Interval In Microseconds)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x27, 0xff, 0xff, 0xff, 0x7f, // Logical Maximum (2147483647)
+ 0x75, 0x20, // Report Size (32)
+ 0x95, 0x05, // Report Count (5)
+ 0xb1, 0x03, // Feature (Cnst,Var,Abs)
+ 0xc0, // End Collection
+ 0x85, LAMP_ATTRIBUTES_REQUEST_REPORT_ID, // Report ID (2)
+ 0x09, 0x20, // Usage (Lamp Attributes Request Report)
+ 0xa1, 0x02, // Collection (Logical)
+ 0x09, 0x21, // Usage (Lamp Id)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x27, 0xff, 0xff, 0x00, 0x00, // Logical Maximum (65535)
+ 0x75, 0x10, // Report Size (16)
+ 0x95, 0x01, // Report Count (1)
+ 0xb1, 0x02, // Feature (Data,Var,Abs)
+ 0xc0, // End Collection
+ 0x85, LAMP_ATTRIBUTES_RESPONSE_REPORT_ID, // Report ID (3)
+ 0x09, 0x22, // Usage (Lamp Attributes Response Report)
+ 0xa1, 0x02, // Collection (Logical)
+ 0x09, 0x21, // Usage (Lamp Id)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x27, 0xff, 0xff, 0x00, 0x00, // Logical Maximum (65535)
+ 0x75, 0x10, // Report Size (16)
+ 0x95, 0x01, // Report Count (1)
+ 0xb1, 0x02, // Feature (Data,Var,Abs)
+ 0x09, 0x23, // Usage (Position X In Micrometers)
+ 0x09, 0x24, // Usage (Position Y In Micrometers)
+ 0x09, 0x25, // Usage (Position Z In Micrometers)
+ 0x09, 0x27, // Usage (Update Latency In Microseconds)
+ 0x09, 0x26, // Usage (Lamp Purposes)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x27, 0xff, 0xff, 0xff, 0x7f, // Logical Maximum (2147483647)
+ 0x75, 0x20, // Report Size (32)
+ 0x95, 0x05, // Report Count (5)
+ 0xb1, 0x02, // Feature (Data,Var,Abs)
+ 0x09, 0x28, // Usage (Red Level Count)
+ 0x09, 0x29, // Usage (Green Level Count)
+ 0x09, 0x2a, // Usage (Blue Level Count)
+ 0x09, 0x2b, // Usage (Intensity Level Count)
+ 0x09, 0x2c, // Usage (Is Programmable)
+ 0x09, 0x2d, // Usage (Input Binding)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0xff, 0x00, // Logical Maximum (255)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x06, // Report Count (6)
+ 0xb1, 0x02, // Feature (Data,Var,Abs)
+ 0xc0, // End Collection
+ 0x85, LAMP_MULTI_UPDATE_REPORT_ID, // Report ID (4)
+ 0x09, 0x50, // Usage (Lamp Multi Update Report)
+ 0xa1, 0x02, // Collection (Logical)
+ 0x09, 0x03, // Usage (Lamp Count)
+ 0x09, 0x55, // Usage (Lamp Update Flags)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x25, 0x08, // Logical Maximum (8)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x02, // Report Count (2)
+ 0xb1, 0x02, // Feature (Data,Var,Abs)
+ 0x09, 0x21, // Usage (Lamp Id)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x27, 0xff, 0xff, 0x00, 0x00, // Logical Maximum (65535)
+ 0x75, 0x10, // Report Size (16)
+ 0x95, 0x08, // Report Count (8)
+ 0xb1, 0x02, // Feature (Data,Var,Abs)
+ 0x09, 0x51, // Usage (Red Update Channel)
+ 0x09, 0x52, // Usage (Green Update Channel)
+ 0x09, 0x53, // Usage (Blue Update Channel)
+ 0x09, 0x54, // Usage (Intensity Update Channel)
+ 0x09, 0x51, // Usage (Red Update Channel)
+ 0x09, 0x52, // Usage (Green Update Channel)
+ 0x09, 0x53, // Usage (Blue Update Channel)
+ 0x09, 0x54, // Usage (Intensity Update Channel)
+ 0x09, 0x51, // Usage (Red Update Channel)
+ 0x09, 0x52, // Usage (Green Update Channel)
+ 0x09, 0x53, // Usage (Blue Update Channel)
+ 0x09, 0x54, // Usage (Intensity Update Channel)
+ 0x09, 0x51, // Usage (Red Update Channel)
+ 0x09, 0x52, // Usage (Green Update Channel)
+ 0x09, 0x53, // Usage (Blue Update Channel)
+ 0x09, 0x54, // Usage (Intensity Update Channel)
+ 0x09, 0x51, // Usage (Red Update Channel)
+ 0x09, 0x52, // Usage (Green Update Channel)
+ 0x09, 0x53, // Usage (Blue Update Channel)
+ 0x09, 0x54, // Usage (Intensity Update Channel)
+ 0x09, 0x51, // Usage (Red Update Channel)
+ 0x09, 0x52, // Usage (Green Update Channel)
+ 0x09, 0x53, // Usage (Blue Update Channel)
+ 0x09, 0x54, // Usage (Intensity Update Channel)
+ 0x09, 0x51, // Usage (Red Update Channel)
+ 0x09, 0x52, // Usage (Green Update Channel)
+ 0x09, 0x53, // Usage (Blue Update Channel)
+ 0x09, 0x54, // Usage (Intensity Update Channel)
+ 0x09, 0x51, // Usage (Red Update Channel)
+ 0x09, 0x52, // Usage (Green Update Channel)
+ 0x09, 0x53, // Usage (Blue Update Channel)
+ 0x09, 0x54, // Usage (Intensity Update Channel)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0xff, 0x00, // Logical Maximum (255)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x20, // Report Count (32)
+ 0xb1, 0x02, // Feature (Data,Var,Abs)
+ 0xc0, // End Collection
+ 0x85, LAMP_RANGE_UPDATE_REPORT_ID, // Report ID (5)
+ 0x09, 0x60, // Usage (Lamp Range Update Report)
+ 0xa1, 0x02, // Collection (Logical)
+ 0x09, 0x55, // Usage (Lamp Update Flags)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x25, 0x08, // Logical Maximum (8)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x01, // Report Count (1)
+ 0xb1, 0x02, // Feature (Data,Var,Abs)
+ 0x09, 0x61, // Usage (Lamp Id Start)
+ 0x09, 0x62, // Usage (Lamp Id End)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x27, 0xff, 0xff, 0x00, 0x00, // Logical Maximum (65535)
+ 0x75, 0x10, // Report Size (16)
+ 0x95, 0x02, // Report Count (2)
+ 0xb1, 0x02, // Feature (Data,Var,Abs)
+ 0x09, 0x51, // Usage (Red Update Channel)
+ 0x09, 0x52, // Usage (Green Update Channel)
+ 0x09, 0x53, // Usage (Blue Update Channel)
+ 0x09, 0x54, // Usage (Intensity Update Channel)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0xff, 0x00, // Logical Maximum (255)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x04, // Report Count (4)
+ 0xb1, 0x02, // Feature (Data,Var,Abs)
+ 0xc0, // End Collection
+ 0x85, LAMP_ARRAY_CONTROL_REPORT_ID, // Report ID (6)
+ 0x09, 0x70, // Usage (Lamp Array Control Report)
+ 0xa1, 0x02, // Collection (Logical)
+ 0x09, 0x71, // Usage (Autonomous Mode)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x25, 0x01, // Logical Maximum (1)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x01, // Report Count (1)
+ 0xb1, 0x02, // Feature (Data,Var,Abs)
+ 0xc0, // End Collection
+ 0xc0 // End Collection
+};
+
+struct tux_kbl_map_entry_t {
+ u8 code;
+ struct {
+ u32 x;
+ u32 y;
+ u32 z;
+ } pos;
+};
+
+static const struct tux_kbl_map_entry_t sirius_16_ansii_kbl_map[] = {
+ { 0x29, { 25000, 53000, 5000 } },
+ { 0x3a, { 41700, 53000, 5000 } },
+ { 0x3b, { 58400, 53000, 5000 } },
+ { 0x3c, { 75100, 53000, 5000 } },
+ { 0x3d, { 91800, 53000, 5000 } },
+ { 0x3e, { 108500, 53000, 5000 } },
+ { 0x3f, { 125200, 53000, 5000 } },
+ { 0x40, { 141900, 53000, 5000 } },
+ { 0x41, { 158600, 53000, 5000 } },
+ { 0x42, { 175300, 53000, 5000 } },
+ { 0x43, { 192000, 53000, 5000 } },
+ { 0x44, { 208700, 53000, 5000 } },
+ { 0x45, { 225400, 53000, 5000 } },
+ { 0xf1, { 242100, 53000, 5000 } },
+ { 0x46, { 258800, 53000, 5000 } },
+ { 0x4c, { 275500, 53000, 5000 } },
+ { 0x4a, { 294500, 53000, 5000 } },
+ { 0x4d, { 311200, 53000, 5000 } },
+ { 0x4b, { 327900, 53000, 5000 } },
+ { 0x4e, { 344600, 53000, 5000 } },
+ { 0x35, { 24500, 67500, 5250 } },
+ { 0x1e, { 42500, 67500, 5250 } },
+ { 0x1f, { 61000, 67500, 5250 } },
+ { 0x20, { 79500, 67500, 5250 } },
+ { 0x21, { 98000, 67500, 5250 } },
+ { 0x22, { 116500, 67500, 5250 } },
+ { 0x23, { 135000, 67500, 5250 } },
+ { 0x24, { 153500, 67500, 5250 } },
+ { 0x25, { 172000, 67500, 5250 } },
+ { 0x26, { 190500, 67500, 5250 } },
+ { 0x27, { 209000, 67500, 5250 } },
+ { 0x2d, { 227500, 67500, 5250 } },
+ { 0x2e, { 246000, 67500, 5250 } },
+ { 0x2a, { 269500, 67500, 5250 } },
+ { 0x53, { 294500, 67500, 5250 } },
+ { 0x55, { 311200, 67500, 5250 } },
+ { 0x54, { 327900, 67500, 5250 } },
+ { 0x56, { 344600, 67500, 5250 } },
+ { 0x2b, { 31000, 85500, 5500 } },
+ { 0x14, { 51500, 85500, 5500 } },
+ { 0x1a, { 70000, 85500, 5500 } },
+ { 0x08, { 88500, 85500, 5500 } },
+ { 0x15, { 107000, 85500, 5500 } },
+ { 0x17, { 125500, 85500, 5500 } },
+ { 0x1c, { 144000, 85500, 5500 } },
+ { 0x18, { 162500, 85500, 5500 } },
+ { 0x0c, { 181000, 85500, 5500 } },
+ { 0x12, { 199500, 85500, 5500 } },
+ { 0x13, { 218000, 85500, 5500 } },
+ { 0x2f, { 236500, 85500, 5500 } },
+ { 0x30, { 255000, 85500, 5500 } },
+ { 0x31, { 273500, 85500, 5500 } },
+ { 0x5f, { 294500, 85500, 5500 } },
+ { 0x60, { 311200, 85500, 5500 } },
+ { 0x61, { 327900, 85500, 5500 } },
+ { 0x39, { 33000, 103500, 5750 } },
+ { 0x04, { 57000, 103500, 5750 } },
+ { 0x16, { 75500, 103500, 5750 } },
+ { 0x07, { 94000, 103500, 5750 } },
+ { 0x09, { 112500, 103500, 5750 } },
+ { 0x0a, { 131000, 103500, 5750 } },
+ { 0x0b, { 149500, 103500, 5750 } },
+ { 0x0d, { 168000, 103500, 5750 } },
+ { 0x0e, { 186500, 103500, 5750 } },
+ { 0x0f, { 205000, 103500, 5750 } },
+ { 0x33, { 223500, 103500, 5750 } },
+ { 0x34, { 242000, 103500, 5750 } },
+ { 0x28, { 267500, 103500, 5750 } },
+ { 0x5c, { 294500, 103500, 5750 } },
+ { 0x5d, { 311200, 103500, 5750 } },
+ { 0x5e, { 327900, 103500, 5750 } },
+ { 0x57, { 344600, 94500, 5625 } },
+ { 0xe1, { 37000, 121500, 6000 } },
+ { 0x1d, { 66000, 121500, 6000 } },
+ { 0x1b, { 84500, 121500, 6000 } },
+ { 0x06, { 103000, 121500, 6000 } },
+ { 0x19, { 121500, 121500, 6000 } },
+ { 0x05, { 140000, 121500, 6000 } },
+ { 0x11, { 158500, 121500, 6000 } },
+ { 0x10, { 177000, 121500, 6000 } },
+ { 0x36, { 195500, 121500, 6000 } },
+ { 0x37, { 214000, 121500, 6000 } },
+ { 0x38, { 232500, 121500, 6000 } },
+ { 0xe5, { 251500, 121500, 6000 } },
+ { 0x52, { 273500, 129000, 6125 } },
+ { 0x59, { 294500, 121500, 6000 } },
+ { 0x5a, { 311200, 121500, 6000 } },
+ { 0x5b, { 327900, 121500, 6000 } },
+ { 0xe0, { 28000, 139500, 6250 } },
+ { 0xfe, { 47500, 139500, 6250 } },
+ { 0xe3, { 66000, 139500, 6250 } },
+ { 0xe2, { 84500, 139500, 6250 } },
+ { 0x2c, { 140000, 139500, 6250 } },
+ { 0xe6, { 195500, 139500, 6250 } },
+ { 0x65, { 214000, 139500, 6250 } },
+ { 0xe4, { 234000, 139500, 6250 } },
+ { 0x50, { 255000, 147000, 6375 } },
+ { 0x51, { 273500, 147000, 6375 } },
+ { 0x4f, { 292000, 147000, 6375 } },
+ { 0x62, { 311200, 139500, 6250 } },
+ { 0x63, { 327900, 139500, 6250 } },
+ { 0x58, { 344600, 130500, 6125 } },
+};
+
+static const struct tux_kbl_map_entry_t sirius_16_iso_kbl_map[] = {
+ { 0x29, { 25000, 53000, 5000 } },
+ { 0x3a, { 41700, 53000, 5000 } },
+ { 0x3b, { 58400, 53000, 5000 } },
+ { 0x3c, { 75100, 53000, 5000 } },
+ { 0x3d, { 91800, 53000, 5000 } },
+ { 0x3e, { 108500, 53000, 5000 } },
+ { 0x3f, { 125200, 53000, 5000 } },
+ { 0x40, { 141900, 53000, 5000 } },
+ { 0x41, { 158600, 53000, 5000 } },
+ { 0x42, { 175300, 53000, 5000 } },
+ { 0x43, { 192000, 53000, 5000 } },
+ { 0x44, { 208700, 53000, 5000 } },
+ { 0x45, { 225400, 53000, 5000 } },
+ { 0xf1, { 242100, 53000, 5000 } },
+ { 0x46, { 258800, 53000, 5000 } },
+ { 0x4c, { 275500, 53000, 5000 } },
+ { 0x4a, { 294500, 53000, 5000 } },
+ { 0x4d, { 311200, 53000, 5000 } },
+ { 0x4b, { 327900, 53000, 5000 } },
+ { 0x4e, { 344600, 53000, 5000 } },
+ { 0x35, { 24500, 67500, 5250 } },
+ { 0x1e, { 42500, 67500, 5250 } },
+ { 0x1f, { 61000, 67500, 5250 } },
+ { 0x20, { 79500, 67500, 5250 } },
+ { 0x21, { 98000, 67500, 5250 } },
+ { 0x22, { 116500, 67500, 5250 } },
+ { 0x23, { 135000, 67500, 5250 } },
+ { 0x24, { 153500, 67500, 5250 } },
+ { 0x25, { 172000, 67500, 5250 } },
+ { 0x26, { 190500, 67500, 5250 } },
+ { 0x27, { 209000, 67500, 5250 } },
+ { 0x2d, { 227500, 67500, 5250 } },
+ { 0x2e, { 246000, 67500, 5250 } },
+ { 0x2a, { 269500, 67500, 5250 } },
+ { 0x53, { 294500, 67500, 5250 } },
+ { 0x55, { 311200, 67500, 5250 } },
+ { 0x54, { 327900, 67500, 5250 } },
+ { 0x56, { 344600, 67500, 5250 } },
+ { 0x2b, { 31000, 85500, 5500 } },
+ { 0x14, { 51500, 85500, 5500 } },
+ { 0x1a, { 70000, 85500, 5500 } },
+ { 0x08, { 88500, 85500, 5500 } },
+ { 0x15, { 107000, 85500, 5500 } },
+ { 0x17, { 125500, 85500, 5500 } },
+ { 0x1c, { 144000, 85500, 5500 } },
+ { 0x18, { 162500, 85500, 5500 } },
+ { 0x0c, { 181000, 85500, 5500 } },
+ { 0x12, { 199500, 85500, 5500 } },
+ { 0x13, { 218000, 85500, 5500 } },
+ { 0x2f, { 234500, 85500, 5500 } },
+ { 0x30, { 251000, 85500, 5500 } },
+ { 0x5f, { 294500, 85500, 5500 } },
+ { 0x60, { 311200, 85500, 5500 } },
+ { 0x61, { 327900, 85500, 5500 } },
+ { 0x39, { 33000, 103500, 5750 } },
+ { 0x04, { 57000, 103500, 5750 } },
+ { 0x16, { 75500, 103500, 5750 } },
+ { 0x07, { 94000, 103500, 5750 } },
+ { 0x09, { 112500, 103500, 5750 } },
+ { 0x0a, { 131000, 103500, 5750 } },
+ { 0x0b, { 149500, 103500, 5750 } },
+ { 0x0d, { 168000, 103500, 5750 } },
+ { 0x0e, { 186500, 103500, 5750 } },
+ { 0x0f, { 205000, 103500, 5750 } },
+ { 0x33, { 223500, 103500, 5750 } },
+ { 0x34, { 240000, 103500, 5750 } },
+ { 0x32, { 256500, 103500, 5750 } },
+ { 0x28, { 271500, 94500, 5750 } },
+ { 0x5c, { 294500, 103500, 5750 } },
+ { 0x5d, { 311200, 103500, 5750 } },
+ { 0x5e, { 327900, 103500, 5750 } },
+ { 0x57, { 344600, 94500, 5625 } },
+ { 0xe1, { 28000, 121500, 6000 } },
+ { 0x64, { 47500, 121500, 6000 } },
+ { 0x1d, { 66000, 121500, 6000 } },
+ { 0x1b, { 84500, 121500, 6000 } },
+ { 0x06, { 103000, 121500, 6000 } },
+ { 0x19, { 121500, 121500, 6000 } },
+ { 0x05, { 140000, 121500, 6000 } },
+ { 0x11, { 158500, 121500, 6000 } },
+ { 0x10, { 177000, 121500, 6000 } },
+ { 0x36, { 195500, 121500, 6000 } },
+ { 0x37, { 214000, 121500, 6000 } },
+ { 0x38, { 232500, 121500, 6000 } },
+ { 0xe5, { 251500, 121500, 6000 } },
+ { 0x52, { 273500, 129000, 6125 } },
+ { 0x59, { 294500, 121500, 6000 } },
+ { 0x5a, { 311200, 121500, 6000 } },
+ { 0x5b, { 327900, 121500, 6000 } },
+ { 0xe0, { 28000, 139500, 6250 } },
+ { 0xfe, { 47500, 139500, 6250 } },
+ { 0xe3, { 66000, 139500, 6250 } },
+ { 0xe2, { 84500, 139500, 6250 } },
+ { 0x2c, { 140000, 139500, 6250 } },
+ { 0xe6, { 195500, 139500, 6250 } },
+ { 0x65, { 214000, 139500, 6250 } },
+ { 0xe4, { 234000, 139500, 6250 } },
+ { 0x50, { 255000, 147000, 6375 } },
+ { 0x51, { 273500, 147000, 6375 } },
+ { 0x4f, { 292000, 147000, 6375 } },
+ { 0x62, { 311200, 139500, 6250 } },
+ { 0x63, { 327900, 139500, 6250 } },
+ { 0x58, { 344600, 130500, 6125 } },
+};
+
+struct tux_driver_data_t {
+ struct hid_device *hdev;
+};
+
+struct tux_hdev_driver_data_t {
+ u8 lamp_count;
+ const struct tux_kbl_map_entry_t *kbl_map;
+ u8 next_lamp_id;
+ union tux_wmi_xx_496in_80out_in_t next_kbl_set_multiple_keys_in;
+};
+
+static int tux_ll_start(struct hid_device *hdev)
+{
+ struct wmi_device *wdev = to_wmi_device(hdev->dev.parent);
+ struct tux_hdev_driver_data_t *driver_data;
+ union tux_wmi_xx_8in_80out_out_t out;
+ union tux_wmi_xx_8in_80out_in_t in;
+ u8 keyboard_type;
+ int ret;
+
+ driver_data = devm_kzalloc(&hdev->dev, sizeof(*driver_data), GFP_KERNEL);
+ if (!driver_data)
+ return -ENOMEM;
+
+ in.get_device_status_in.device_type = TUX_GET_DEVICE_STATUS_DEVICE_ID_KEYBOARD;
+ ret = tux_wmi_xx_8in_80out(wdev, TUX_GET_DEVICE_STATUS, &in, &out);
+ if (ret)
+ return ret;
+
+ keyboard_type = out.get_device_status_out.keyboard_physical_layout;
+ if (keyboard_type == TUX_GET_DEVICE_STATUS_KEYBOARD_LAYOUT_ANSII) {
+ driver_data->lamp_count = ARRAY_SIZE(sirius_16_ansii_kbl_map);
+ driver_data->kbl_map = sirius_16_ansii_kbl_map;
+ } else if (keyboard_type == TUX_GET_DEVICE_STATUS_KEYBOARD_LAYOUT_ISO) {
+ driver_data->lamp_count = ARRAY_SIZE(sirius_16_iso_kbl_map);
+ driver_data->kbl_map = sirius_16_iso_kbl_map;
+ } else {
+ return -EINVAL;
+ }
+ driver_data->next_lamp_id = 0;
+
+ dev_set_drvdata(&hdev->dev, driver_data);
+
+ return ret;
+}
+
+static void tux_ll_stop(struct hid_device *hdev __always_unused)
+{
+}
+
+static int tux_ll_open(struct hid_device *hdev __always_unused)
+{
+ return 0;
+}
+
+static void tux_ll_close(struct hid_device *hdev __always_unused)
+{
+}
+
+static int tux_ll_parse(struct hid_device *hdev)
+{
+ return hid_parse_report(hdev, tux_report_descriptor,
+ sizeof(tux_report_descriptor));
+}
+
+struct __packed lamp_array_attributes_report_t {
+ const u8 report_id;
+ u16 lamp_count;
+ u32 bounding_box_width_in_micrometers;
+ u32 bounding_box_height_in_micrometers;
+ u32 bounding_box_depth_in_micrometers;
+ u32 lamp_array_kind;
+ u32 min_update_interval_in_microseconds;
+};
+
+static int handle_lamp_array_attributes_report(struct hid_device *hdev,
+ struct lamp_array_attributes_report_t *rep)
+{
+ struct tux_hdev_driver_data_t *driver_data = dev_get_drvdata(&hdev->dev);
+
+ rep->lamp_count = driver_data->lamp_count;
+ rep->bounding_box_width_in_micrometers = 368000;
+ rep->bounding_box_height_in_micrometers = 266000;
+ rep->bounding_box_depth_in_micrometers = 30000;
+ /*
+ * LampArrayKindKeyboard, see "26.2.1 LampArrayKind Values" of
+ * "HID Usage Tables v1.5"
+ */
+ rep->lamp_array_kind = 1;
+ // Some guessed value for interval microseconds
+ rep->min_update_interval_in_microseconds = 500;
+
+ return sizeof(*rep);
+}
+
+struct __packed lamp_attributes_request_report_t {
+ const u8 report_id;
+ u16 lamp_id;
+};
+
+static int handle_lamp_attributes_request_report(struct hid_device *hdev,
+ struct lamp_attributes_request_report_t *rep)
+{
+ struct tux_hdev_driver_data_t *driver_data = dev_get_drvdata(&hdev->dev);
+
+ if (rep->lamp_id < driver_data->lamp_count)
+ driver_data->next_lamp_id = rep->lamp_id;
+ else
+ driver_data->next_lamp_id = 0;
+
+ return sizeof(*rep);
+}
+
+struct __packed lamp_attributes_response_report_t {
+ const u8 report_id;
+ u16 lamp_id;
+ u32 position_x_in_micrometers;
+ u32 position_y_in_micrometers;
+ u32 position_z_in_micrometers;
+ u32 update_latency_in_microseconds;
+ u32 lamp_purpose;
+ u8 red_level_count;
+ u8 green_level_count;
+ u8 blue_level_count;
+ u8 intensity_level_count;
+ u8 is_programmable;
+ u8 input_binding;
+};
+
+static int handle_lamp_attributes_response_report(struct hid_device *hdev,
+ struct lamp_attributes_response_report_t *rep)
+{
+ struct tux_hdev_driver_data_t *driver_data = dev_get_drvdata(&hdev->dev);
+ u16 lamp_id = driver_data->next_lamp_id;
+
+ rep->lamp_id = lamp_id;
+ // Some guessed value for latency microseconds
+ rep->update_latency_in_microseconds = 100;
+ /*
+ * LampPurposeControl, see "26.3.1 LampPurposes Flags" of
+ * "HID Usage Tables v1.5"
+ */
+ rep->lamp_purpose = 1;
+ rep->red_level_count = 0xff;
+ rep->green_level_count = 0xff;
+ rep->blue_level_count = 0xff;
+ rep->intensity_level_count = 0xff;
+ rep->is_programmable = 1;
+
+ if (driver_data->kbl_map[lamp_id].code <= 0xe8) {
+ rep->input_binding = driver_data->kbl_map[lamp_id].code;
+ } else {
+ /*
+ * Everything bigger is reserved/undefined, see
+ * "10 Keyboard/Keypad Page (0x07)" of "HID Usage Tables v1.5"
+ * and should return 0, see "26.8.3 Lamp Attributes" of the same
+ * document.
+ */
+ rep->input_binding = 0;
+ }
+ rep->position_x_in_micrometers = driver_data->kbl_map[lamp_id].pos.x;
+ rep->position_y_in_micrometers = driver_data->kbl_map[lamp_id].pos.y;
+ rep->position_z_in_micrometers = driver_data->kbl_map[lamp_id].pos.z;
+
+ driver_data->next_lamp_id = (driver_data->next_lamp_id + 1) % driver_data->lamp_count;
+
+ return sizeof(*rep);
+}
+
+#define LAMP_UPDATE_FLAGS_LAMP_UPDATE_COMPLETE BIT(0)
+
+struct __packed lamp_rgbi_tuple_t {
+ u8 red;
+ u8 green;
+ u8 blue;
+ u8 intensity;
+};
+
+#define LAMP_MULTI_UPDATE_REPORT_LAMP_COUNT_MAX 8
+
+struct __packed lamp_multi_update_report_t {
+ const u8 report_id;
+ u8 lamp_count;
+ u8 lamp_update_flags;
+ u16 lamp_id[LAMP_MULTI_UPDATE_REPORT_LAMP_COUNT_MAX];
+ struct lamp_rgbi_tuple_t update_channels[LAMP_MULTI_UPDATE_REPORT_LAMP_COUNT_MAX];
+};
+
+static int handle_lamp_multi_update_report(struct hid_device *hdev,
+ struct lamp_multi_update_report_t *rep)
+{
+ struct tux_hdev_driver_data_t *driver_data = dev_get_drvdata(&hdev->dev);
+ union tux_wmi_xx_496in_80out_in_t *next = &driver_data->next_kbl_set_multiple_keys_in;
+ struct tux_kbl_set_multiple_keys_in_rgb_config_t *rgb_configs_j;
+ struct wmi_device *wdev = to_wmi_device(hdev->dev.parent);
+ union tux_wmi_xx_496in_80out_out_t out;
+ u8 key_id, key_id_j, intensity_i, red_i, green_i, blue_i;
+ int ret;
+
+ /*
+ * Catching misformatted lamp_multi_update_report and fail silently
+ * according to "HID Usage Tables v1.5"
+ */
+ for (unsigned int i = 0; i < rep->lamp_count; ++i) {
+ if (rep->lamp_id[i] > driver_data->lamp_count) {
+ hid_dbg(hdev, "Out of bounds lamp_id in lamp_multi_update_report. Skipping whole report!\n");
+ return sizeof(*rep);
+ }
+
+ for (unsigned int j = i + 1; j < rep->lamp_count; ++j) {
+ if (rep->lamp_id[i] == rep->lamp_id[j]) {
+ hid_dbg(hdev, "Duplicate lamp_id in lamp_multi_update_report. Skipping whole report!\n");
+ return sizeof(*rep);
+ }
+ }
+ }
+
+ for (unsigned int i = 0; i < rep->lamp_count; ++i) {
+ key_id = driver_data->kbl_map[rep->lamp_id[i]].code;
+
+ for (unsigned int j = 0;
+ j < TUX_KBL_SET_MULTIPLE_KEYS_LIGHTING_SETTINGS_COUNT_MAX;
+ ++j) {
+ rgb_configs_j = &next->kbl_set_multiple_keys_in.rgb_configs[j];
+ key_id_j = rgb_configs_j->key_id;
+ if (key_id_j != 0x00 && key_id_j != key_id)
+ continue;
+
+ if (key_id_j == 0x00)
+ next->kbl_set_multiple_keys_in.rgb_configs_cnt =
+ j + 1;
+ rgb_configs_j->key_id = key_id;
+ /*
+ * While this driver respects update_channel.intensity
+ * according to "HID Usage Tables v1.5" also on RGB
+ * leds, the Microsoft MacroPad reference implementation
+ * (https://github.com/microsoft/RP2040MacropadHidSample
+ * 1d6c3ad) does not and ignores it. If it turns out
+ * that Windows writes intensity = 0 for RGB leds
+ * instead of intensity = 255, this driver should also
+ * ignore the update_channel.intensity.
+ */
+ intensity_i = rep->update_channels[i].intensity;
+ red_i = rep->update_channels[i].red;
+ green_i = rep->update_channels[i].green;
+ blue_i = rep->update_channels[i].blue;
+ rgb_configs_j->red = red_i * intensity_i / 0xff;
+ rgb_configs_j->green = green_i * intensity_i / 0xff;
+ rgb_configs_j->blue = blue_i * intensity_i / 0xff;
+
+ break;
+ }
+ }
+
+ if (rep->lamp_update_flags & LAMP_UPDATE_FLAGS_LAMP_UPDATE_COMPLETE) {
+ ret = tux_wmi_xx_496in_80out(wdev, TUX_KBL_SET_MULTIPLE_KEYS,
+ next, &out);
+ memset(next, 0, sizeof(*next));
+ if (ret)
+ return ret;
+ }
+
+ return sizeof(*rep);
+}
+
+struct __packed lamp_range_update_report_t {
+ const u8 report_id;
+ u8 lamp_update_flags;
+ u16 lamp_id_start;
+ u16 lamp_id_end;
+ struct lamp_rgbi_tuple_t update_channel;
+};
+
+static int handle_lamp_range_update_report(struct hid_device *hdev,
+ struct lamp_range_update_report_t *rep)
+{
+ struct tux_hdev_driver_data_t *driver_data = dev_get_drvdata(&hdev->dev);
+ struct lamp_multi_update_report_t lamp_multi_update_report = {
+ .report_id = LAMP_MULTI_UPDATE_REPORT_ID,
+ };
+ struct lamp_rgbi_tuple_t *update_channels_j;
+ int ret;
+
+ /*
+ * Catching misformatted lamp_range_update_report and fail silently
+ * according to "HID Usage Tables v1.5"
+ */
+ if (rep->lamp_id_start > rep->lamp_id_end) {
+ hid_dbg(hdev, "lamp_id_start > lamp_id_end in lamp_range_update_report. Skipping whole report!\n");
+ return sizeof(*rep);
+ }
+
+ if (rep->lamp_id_end > driver_data->lamp_count - 1) {
+ hid_dbg(hdev, "Out of bounds lamp_id_end in lamp_range_update_report. Skipping whole report!\n");
+ return sizeof(*rep);
+ }
+
+ /*
+ * Break handle_lamp_range_update_report call down to multiple
+ * handle_lamp_multi_update_report calls to easily ensure that mixing
+ * handle_lamp_range_update_report and handle_lamp_multi_update_report
+ * does not break things.
+ */
+ for (unsigned int i = rep->lamp_id_start; i < rep->lamp_id_end + 1;
+ i = i + LAMP_MULTI_UPDATE_REPORT_LAMP_COUNT_MAX) {
+ lamp_multi_update_report.lamp_count =
+ min(rep->lamp_id_end + 1 - i,
+ LAMP_MULTI_UPDATE_REPORT_LAMP_COUNT_MAX);
+ lamp_multi_update_report.lamp_update_flags =
+ i + LAMP_MULTI_UPDATE_REPORT_LAMP_COUNT_MAX >=
+ rep->lamp_id_end + 1 ?
+ LAMP_UPDATE_FLAGS_LAMP_UPDATE_COMPLETE : 0;
+
+ for (unsigned int j = 0; j < lamp_multi_update_report.lamp_count; ++j) {
+ lamp_multi_update_report.lamp_id[j] = i + j;
+ update_channels_j =
+ &lamp_multi_update_report.update_channels[j];
+ update_channels_j->red = rep->update_channel.red;
+ update_channels_j->green = rep->update_channel.green;
+ update_channels_j->blue = rep->update_channel.blue;
+ update_channels_j->intensity = rep->update_channel.intensity;
+ }
+
+ ret = handle_lamp_multi_update_report(hdev, &lamp_multi_update_report);
+ if (ret < 0)
+ return ret;
+ if (ret != sizeof(lamp_multi_update_report))
+ return -EIO;
+ }
+
+ return sizeof(*rep);
+}
+
+struct __packed lamp_array_control_report_t {
+ const u8 report_id;
+ u8 autonomous_mode;
+};
+
+static int handle_lamp_array_control_report(struct hid_device *hdev __always_unused,
+ struct lamp_array_control_report_t *rep)
+{
+ /*
+ * The keyboards firmware doesn't have any built in controls and the
+ * built in effects are not implemented so this is a NOOP.
+ * According to the HID Documentation (HID Usage Tables v1.5) this
+ * function is optional and can be removed from the HID Report
+ * Descriptor, but it should first be confirmed that userspace respects
+ * this possibility too. The Microsoft MacroPad reference implementation
+ * (https://github.com/microsoft/RP2040MacropadHidSample 1d6c3ad)
+ * already deviates from the spec at another point, see
+ * handle_lamp_*_update_report.
+ */
+
+ return sizeof(*rep);
+}
+
+static int tux_ll_raw_request(struct hid_device *hdev, u8 reportnum, u8 *buf,
+ size_t len, unsigned char rtype, int reqtype)
+{
+ if (rtype != HID_FEATURE_REPORT)
+ return -EINVAL;
+
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ switch (reportnum) {
+ case LAMP_ARRAY_ATTRIBUTES_REPORT_ID:
+ if (len != sizeof(struct lamp_array_attributes_report_t))
+ return -EINVAL;
+ return handle_lamp_array_attributes_report(hdev,
+ (struct lamp_array_attributes_report_t *)buf);
+ case LAMP_ATTRIBUTES_RESPONSE_REPORT_ID:
+ if (len != sizeof(struct lamp_attributes_response_report_t))
+ return -EINVAL;
+ return handle_lamp_attributes_response_report(hdev,
+ (struct lamp_attributes_response_report_t *)buf);
+ }
+ break;
+ case HID_REQ_SET_REPORT:
+ switch (reportnum) {
+ case LAMP_ATTRIBUTES_REQUEST_REPORT_ID:
+ if (len != sizeof(struct lamp_attributes_request_report_t))
+ return -EINVAL;
+ return handle_lamp_attributes_request_report(hdev,
+ (struct lamp_attributes_request_report_t *)buf);
+ case LAMP_MULTI_UPDATE_REPORT_ID:
+ if (len != sizeof(struct lamp_multi_update_report_t))
+ return -EINVAL;
+ return handle_lamp_multi_update_report(hdev,
+ (struct lamp_multi_update_report_t *)buf);
+ case LAMP_RANGE_UPDATE_REPORT_ID:
+ if (len != sizeof(struct lamp_range_update_report_t))
+ return -EINVAL;
+ return handle_lamp_range_update_report(hdev,
+ (struct lamp_range_update_report_t *)buf);
+ case LAMP_ARRAY_CONTROL_REPORT_ID:
+ if (len != sizeof(struct lamp_array_control_report_t))
+ return -EINVAL;
+ return handle_lamp_array_control_report(hdev,
+ (struct lamp_array_control_report_t *)buf);
+ }
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct hid_ll_driver tux_ll_driver = {
+ .start = &tux_ll_start,
+ .stop = &tux_ll_stop,
+ .open = &tux_ll_open,
+ .close = &tux_ll_close,
+ .parse = &tux_ll_parse,
+ .raw_request = &tux_ll_raw_request,
+};
+
+static int tux_virt_lamparray_add_device(struct wmi_device *wdev,
+ struct hid_device **hdev_out)
+{
+ struct hid_device *hdev;
+ int ret;
+
+ dev_dbg(&wdev->dev, "Adding TUXEDO NB04 Virtual LampArray device.\n");
+
+ hdev = hid_allocate_device();
+ if (IS_ERR(hdev))
+ return PTR_ERR(hdev);
+ *hdev_out = hdev;
+
+ strscpy(hdev->name, "TUXEDO NB04 RGB Lighting", sizeof(hdev->name));
+
+ hdev->ll_driver = &tux_ll_driver;
+ hdev->bus = BUS_VIRTUAL;
+ hdev->vendor = 0x21ba;
+ hdev->product = 0x0400;
+ hdev->dev.parent = &wdev->dev;
+
+ ret = hid_add_device(hdev);
+ if (ret)
+ hid_destroy_device(hdev);
+ return ret;
+}
+
+static int tux_probe(struct wmi_device *wdev, const void *context __always_unused)
+{
+ struct tux_driver_data_t *driver_data;
+
+ driver_data = devm_kzalloc(&wdev->dev, sizeof(*driver_data), GFP_KERNEL);
+ if (!driver_data)
+ return -ENOMEM;
+
+ dev_set_drvdata(&wdev->dev, driver_data);
+
+ return tux_virt_lamparray_add_device(wdev, &driver_data->hdev);
+}
+
+static void tux_remove(struct wmi_device *wdev)
+{
+ struct tux_driver_data_t *driver_data = dev_get_drvdata(&wdev->dev);
+
+ hid_destroy_device(driver_data->hdev);
+}
+
+static struct wmi_driver tuxedo_nb04_wmi_tux_driver = {
+ .driver = {
+ .name = "tuxedo_nb04_wmi_ab",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .id_table = tuxedo_nb04_wmi_ab_device_ids,
+ .probe = tux_probe,
+ .remove = tux_remove,
+ .no_singleton = true,
+};
+
+/*
+ * We don't know if the WMI API is stable and how unique the GUID is for this
+ * ODM. To be on the safe side we therefore only run this driver on tested
+ * devices defined by this list.
+ */
+static const struct dmi_system_id tested_devices_dmi_table[] __initconst = {
+ {
+ // TUXEDO Sirius 16 Gen1
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "APX958"),
+ },
+ },
+ {
+ // TUXEDO Sirius 16 Gen2
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AHP958"),
+ },
+ },
+ { }
+};
+
+static int __init tuxedo_nb04_wmi_tux_init(void)
+{
+ if (!dmi_check_system(tested_devices_dmi_table))
+ return -ENODEV;
+
+ return wmi_driver_register(&tuxedo_nb04_wmi_tux_driver);
+}
+module_init(tuxedo_nb04_wmi_tux_init);
+
+static void __exit tuxedo_nb04_wmi_tux_exit(void)
+{
+ return wmi_driver_unregister(&tuxedo_nb04_wmi_tux_driver);
+}
+module_exit(tuxedo_nb04_wmi_tux_exit);
+
+MODULE_DESCRIPTION("Virtual HID LampArray interface for TUXEDO NB04 devices");
+MODULE_AUTHOR("Werner Sembach <wse@tuxedocomputers.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/tuxedo/nb04/wmi_util.c b/drivers/platform/x86/tuxedo/nb04/wmi_util.c
new file mode 100644
index 000000000000..e894690da1a8
--- /dev/null
+++ b/drivers/platform/x86/tuxedo/nb04/wmi_util.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This code gives functions to avoid code duplication while interacting with
+ * the TUXEDO NB04 wmi interfaces.
+ *
+ * Copyright (C) 2024-2025 Werner Sembach <wse@tuxedocomputers.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cleanup.h>
+#include <linux/wmi.h>
+
+#include "wmi_util.h"
+
+static int __wmi_method_acpi_object_out(struct wmi_device *wdev,
+ u32 wmi_method_id,
+ u8 *in,
+ acpi_size in_len,
+ union acpi_object **out)
+{
+ struct acpi_buffer acpi_buffer_in = { in_len, in };
+ struct acpi_buffer acpi_buffer_out = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ dev_dbg(&wdev->dev, "Evaluate WMI method: %u in:\n", wmi_method_id);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, in, in_len);
+
+ acpi_status status = wmidev_evaluate_method(wdev, 0, wmi_method_id,
+ &acpi_buffer_in,
+ &acpi_buffer_out);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&wdev->dev, "Failed to evaluate WMI method.\n");
+ return -EIO;
+ }
+ if (!acpi_buffer_out.pointer) {
+ dev_err(&wdev->dev, "Unexpected empty out buffer.\n");
+ return -ENODATA;
+ }
+
+ *out = acpi_buffer_out.pointer;
+
+ return 0;
+}
+
+static int __wmi_method_buffer_out(struct wmi_device *wdev,
+ u32 wmi_method_id,
+ u8 *in,
+ acpi_size in_len,
+ u8 *out,
+ acpi_size out_len)
+{
+ int ret;
+
+ union acpi_object *acpi_object_out __free(kfree) = NULL;
+
+ ret = __wmi_method_acpi_object_out(wdev, wmi_method_id,
+ in, in_len,
+ &acpi_object_out);
+ if (ret)
+ return ret;
+
+ if (acpi_object_out->type != ACPI_TYPE_BUFFER) {
+ dev_err(&wdev->dev, "Unexpected out buffer type. Expected: %u Got: %u\n",
+ ACPI_TYPE_BUFFER, acpi_object_out->type);
+ return -EIO;
+ }
+ if (acpi_object_out->buffer.length < out_len) {
+ dev_err(&wdev->dev, "Unexpected out buffer length.\n");
+ return -EIO;
+ }
+
+ memcpy(out, acpi_object_out->buffer.pointer, out_len);
+
+ return 0;
+}
+
+int tux_wmi_xx_8in_80out(struct wmi_device *wdev,
+ enum tux_wmi_xx_8in_80out_methods method,
+ union tux_wmi_xx_8in_80out_in_t *in,
+ union tux_wmi_xx_8in_80out_out_t *out)
+{
+ return __wmi_method_buffer_out(wdev, method, in->raw, 8, out->raw, 80);
+}
+
+int tux_wmi_xx_496in_80out(struct wmi_device *wdev,
+ enum tux_wmi_xx_496in_80out_methods method,
+ union tux_wmi_xx_496in_80out_in_t *in,
+ union tux_wmi_xx_496in_80out_out_t *out)
+{
+ return __wmi_method_buffer_out(wdev, method, in->raw, 496, out->raw, 80);
+}
diff --git a/drivers/platform/x86/tuxedo/nb04/wmi_util.h b/drivers/platform/x86/tuxedo/nb04/wmi_util.h
new file mode 100644
index 000000000000..c44093fd5093
--- /dev/null
+++ b/drivers/platform/x86/tuxedo/nb04/wmi_util.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * This code gives functions to avoid code duplication while interacting with
+ * the TUXEDO NB04 wmi interfaces.
+ *
+ * Copyright (C) 2024-2025 Werner Sembach <wse@tuxedocomputers.com>
+ */
+
+#ifndef TUXEDO_NB04_WMI_UTIL_H
+#define TUXEDO_NB04_WMI_UTIL_H
+
+#include <linux/wmi.h>
+
+#define TUX_GET_DEVICE_STATUS_DEVICE_ID_TOUCHPAD 1
+#define TUX_GET_DEVICE_STATUS_DEVICE_ID_KEYBOARD 2
+#define TUX_GET_DEVICE_STATUS_DEVICE_ID_APP_PAGES 3
+
+#define TUX_GET_DEVICE_STATUS_KBL_TYPE_NONE 0
+#define TUX_GET_DEVICE_STATUS_KBL_TYPE_PER_KEY 1
+#define TUX_GET_DEVICE_STATUS_KBL_TYPE_FOUR_ZONE 2
+#define TUX_GET_DEVICE_STATUS_KBL_TYPE_WHITE_ONLY 3
+
+#define TUX_GET_DEVICE_STATUS_KEYBOARD_LAYOUT_ANSII 0
+#define TUX_GET_DEVICE_STATUS_KEYBOARD_LAYOUT_ISO 1
+
+#define TUX_GET_DEVICE_STATUS_COLOR_ID_RED 1
+#define TUX_GET_DEVICE_STATUS_COLOR_ID_GREEN 2
+#define TUX_GET_DEVICE_STATUS_COLOR_ID_YELLOW 3
+#define TUX_GET_DEVICE_STATUS_COLOR_ID_BLUE 4
+#define TUX_GET_DEVICE_STATUS_COLOR_ID_PURPLE 5
+#define TUX_GET_DEVICE_STATUS_COLOR_ID_INDIGO 6
+#define TUX_GET_DEVICE_STATUS_COLOR_ID_WHITE 7
+
+#define TUX_GET_DEVICE_STATUS_APP_PAGES_DASHBOARD BIT(0)
+#define TUX_GET_DEVICE_STATUS_APP_PAGES_SYSTEMINFOS BIT(1)
+#define TUX_GET_DEVICE_STATUS_APP_PAGES_KBL BIT(2)
+#define TUX_GET_DEVICE_STATUS_APP_PAGES_HOTKEYS BIT(3)
+
+union tux_wmi_xx_8in_80out_in_t {
+ u8 raw[8];
+ struct __packed {
+ u8 device_type;
+ u8 reserved[7];
+ } get_device_status_in;
+};
+
+union tux_wmi_xx_8in_80out_out_t {
+ u8 raw[80];
+ struct __packed {
+ u16 return_status;
+ u8 device_enabled;
+ u8 kbl_type;
+ u8 kbl_side_bar_supported;
+ u8 keyboard_physical_layout;
+ u8 app_pages;
+ u8 per_key_kbl_default_color;
+ u8 four_zone_kbl_default_color_1;
+ u8 four_zone_kbl_default_color_2;
+ u8 four_zone_kbl_default_color_3;
+ u8 four_zone_kbl_default_color_4;
+ u8 light_bar_kbl_default_color;
+ u8 reserved_0[1];
+ u16 dedicated_gpu_id;
+ u8 reserved_1[64];
+ } get_device_status_out;
+};
+
+enum tux_wmi_xx_8in_80out_methods {
+ TUX_GET_DEVICE_STATUS = 2,
+};
+
+#define TUX_KBL_SET_MULTIPLE_KEYS_LIGHTING_SETTINGS_COUNT_MAX 120
+
+union tux_wmi_xx_496in_80out_in_t {
+ u8 raw[496];
+ struct __packed {
+ u8 reserved[15];
+ u8 rgb_configs_cnt;
+ struct tux_kbl_set_multiple_keys_in_rgb_config_t {
+ u8 key_id;
+ u8 red;
+ u8 green;
+ u8 blue;
+ } rgb_configs[TUX_KBL_SET_MULTIPLE_KEYS_LIGHTING_SETTINGS_COUNT_MAX];
+ } kbl_set_multiple_keys_in;
+};
+
+union tux_wmi_xx_496in_80out_out_t {
+ u8 raw[80];
+ struct __packed {
+ u8 return_value;
+ u8 reserved[79];
+ } kbl_set_multiple_keys_out;
+};
+
+enum tux_wmi_xx_496in_80out_methods {
+ TUX_KBL_SET_MULTIPLE_KEYS = 6,
+};
+
+int tux_wmi_xx_8in_80out(struct wmi_device *wdev,
+ enum tux_wmi_xx_8in_80out_methods method,
+ union tux_wmi_xx_8in_80out_in_t *in,
+ union tux_wmi_xx_8in_80out_out_t *out);
+int tux_wmi_xx_496in_80out(struct wmi_device *wdev,
+ enum tux_wmi_xx_496in_80out_methods method,
+ union tux_wmi_xx_496in_80out_in_t *in,
+ union tux_wmi_xx_496in_80out_out_t *out);
+
+#endif
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index df2bf1c58523..cb02222c978c 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -84,7 +84,6 @@ static int ebook_switch_add(struct acpi_device *device)
const struct acpi_device_id *id;
struct ebook_switch *button;
struct input_dev *input;
- char *name, *class;
int error;
button = kzalloc(sizeof(struct ebook_switch), GFP_KERNEL);
@@ -99,9 +98,6 @@ static int ebook_switch_add(struct acpi_device *device)
goto err_free_button;
}
- name = acpi_device_name(device);
- class = acpi_device_class(device);
-
id = acpi_match_acpi_device(ebook_device_ids, device);
if (!id) {
dev_err(&device->dev, "Unsupported hid\n");
@@ -109,12 +105,12 @@ static int ebook_switch_add(struct acpi_device *device)
goto err_free_input;
}
- strcpy(name, XO15_EBOOK_DEVICE_NAME);
- sprintf(class, "%s/%s", XO15_EBOOK_CLASS, XO15_EBOOK_SUBCLASS);
+ strscpy(acpi_device_name(device), XO15_EBOOK_DEVICE_NAME);
+ strscpy(acpi_device_class(device), XO15_EBOOK_CLASS "/" XO15_EBOOK_SUBCLASS);
snprintf(button->phys, sizeof(button->phys), "%s/button/input0", id->id);
- input->name = name;
+ input->name = acpi_device_name(device);
input->phys = button->phys;
input->id.bustype = BUS_HOST;
input->dev.parent = &device->dev;
diff --git a/drivers/pmdomain/amlogic/meson-ee-pwrc.c b/drivers/pmdomain/amlogic/meson-ee-pwrc.c
index fbb2b4103930..55c8c9f66a1b 100644
--- a/drivers/pmdomain/amlogic/meson-ee-pwrc.c
+++ b/drivers/pmdomain/amlogic/meson-ee-pwrc.c
@@ -69,27 +69,27 @@ struct meson_ee_pwrc_domain_desc {
char *name;
unsigned int reset_names_count;
unsigned int clk_names_count;
- struct meson_ee_pwrc_top_domain *top_pd;
+ const struct meson_ee_pwrc_top_domain *top_pd;
unsigned int mem_pd_count;
- struct meson_ee_pwrc_mem_domain *mem_pd;
+ const struct meson_ee_pwrc_mem_domain *mem_pd;
bool (*is_powered_off)(struct meson_ee_pwrc_domain *pwrc_domain);
};
struct meson_ee_pwrc_domain_data {
unsigned int count;
- struct meson_ee_pwrc_domain_desc *domains;
+ const struct meson_ee_pwrc_domain_desc *domains;
};
/* TOP Power Domains */
-static struct meson_ee_pwrc_top_domain gx_pwrc_vpu = {
+static const struct meson_ee_pwrc_top_domain gx_pwrc_vpu = {
.sleep_reg = GX_AO_RTI_GEN_PWR_SLEEP0,
.sleep_mask = BIT(8),
.iso_reg = GX_AO_RTI_GEN_PWR_SLEEP0,
.iso_mask = BIT(9),
};
-static struct meson_ee_pwrc_top_domain meson8_pwrc_vpu = {
+static const struct meson_ee_pwrc_top_domain meson8_pwrc_vpu = {
.sleep_reg = MESON8_AO_RTI_GEN_PWR_SLEEP0,
.sleep_mask = BIT(8),
.iso_reg = MESON8_AO_RTI_GEN_PWR_SLEEP0,
@@ -104,20 +104,20 @@ static struct meson_ee_pwrc_top_domain meson8_pwrc_vpu = {
.iso_mask = BIT(__bit), \
}
-static struct meson_ee_pwrc_top_domain sm1_pwrc_vpu = SM1_EE_PD(8);
-static struct meson_ee_pwrc_top_domain sm1_pwrc_nna = SM1_EE_PD(16);
-static struct meson_ee_pwrc_top_domain sm1_pwrc_usb = SM1_EE_PD(17);
-static struct meson_ee_pwrc_top_domain sm1_pwrc_pci = SM1_EE_PD(18);
-static struct meson_ee_pwrc_top_domain sm1_pwrc_ge2d = SM1_EE_PD(19);
+static const struct meson_ee_pwrc_top_domain sm1_pwrc_vpu = SM1_EE_PD(8);
+static const struct meson_ee_pwrc_top_domain sm1_pwrc_nna = SM1_EE_PD(16);
+static const struct meson_ee_pwrc_top_domain sm1_pwrc_usb = SM1_EE_PD(17);
+static const struct meson_ee_pwrc_top_domain sm1_pwrc_pci = SM1_EE_PD(18);
+static const struct meson_ee_pwrc_top_domain sm1_pwrc_ge2d = SM1_EE_PD(19);
-static struct meson_ee_pwrc_top_domain g12a_pwrc_nna = {
+static const struct meson_ee_pwrc_top_domain g12a_pwrc_nna = {
.sleep_reg = GX_AO_RTI_GEN_PWR_SLEEP0,
.sleep_mask = BIT(16) | BIT(17),
.iso_reg = GX_AO_RTI_GEN_PWR_ISO0,
.iso_mask = BIT(16) | BIT(17),
};
-static struct meson_ee_pwrc_top_domain g12a_pwrc_isp = {
+static const struct meson_ee_pwrc_top_domain g12a_pwrc_isp = {
.sleep_reg = GX_AO_RTI_GEN_PWR_SLEEP0,
.sleep_mask = BIT(18) | BIT(19),
.iso_reg = GX_AO_RTI_GEN_PWR_ISO0,
@@ -154,39 +154,39 @@ static struct meson_ee_pwrc_top_domain g12a_pwrc_isp = {
{ __reg, BIT(14) }, \
{ __reg, BIT(15) }
-static struct meson_ee_pwrc_mem_domain axg_pwrc_mem_vpu[] = {
+static const struct meson_ee_pwrc_mem_domain axg_pwrc_mem_vpu[] = {
VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
};
-static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_vpu[] = {
+static const struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_vpu[] = {
VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
VPU_MEMPD(HHI_VPU_MEM_PD_REG2),
VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
};
-static struct meson_ee_pwrc_mem_domain gxbb_pwrc_mem_vpu[] = {
+static const struct meson_ee_pwrc_mem_domain gxbb_pwrc_mem_vpu[] = {
VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
};
-static struct meson_ee_pwrc_mem_domain meson_pwrc_mem_eth[] = {
+static const struct meson_ee_pwrc_mem_domain meson_pwrc_mem_eth[] = {
{ HHI_MEM_PD_REG0, GENMASK(3, 2) },
};
-static struct meson_ee_pwrc_mem_domain meson8_pwrc_audio_dsp_mem[] = {
+static const struct meson_ee_pwrc_mem_domain meson8_pwrc_audio_dsp_mem[] = {
{ HHI_MEM_PD_REG0, GENMASK(1, 0) },
};
-static struct meson_ee_pwrc_mem_domain meson8_pwrc_mem_vpu[] = {
+static const struct meson_ee_pwrc_mem_domain meson8_pwrc_mem_vpu[] = {
VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
};
-static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_vpu[] = {
+static const struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_vpu[] = {
VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
VPU_MEMPD(HHI_VPU_MEM_PD_REG2),
@@ -198,28 +198,28 @@ static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_vpu[] = {
VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
};
-static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_nna[] = {
+static const struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_nna[] = {
{ HHI_NANOQ_MEM_PD_REG0, 0xff },
{ HHI_NANOQ_MEM_PD_REG1, 0xff },
};
-static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_usb[] = {
+static const struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_usb[] = {
{ HHI_MEM_PD_REG0, GENMASK(31, 30) },
};
-static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_pcie[] = {
+static const struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_pcie[] = {
{ HHI_MEM_PD_REG0, GENMASK(29, 26) },
};
-static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_ge2d[] = {
+static const struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_ge2d[] = {
{ HHI_MEM_PD_REG0, GENMASK(25, 18) },
};
-static struct meson_ee_pwrc_mem_domain axg_pwrc_mem_audio[] = {
+static const struct meson_ee_pwrc_mem_domain axg_pwrc_mem_audio[] = {
{ HHI_MEM_PD_REG0, GENMASK(5, 4) },
};
-static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
+static const struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
{ HHI_MEM_PD_REG0, GENMASK(5, 4) },
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(1, 0) },
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(3, 2) },
@@ -235,12 +235,12 @@ static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(27, 26) },
};
-static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_nna[] = {
+static const struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_nna[] = {
{ G12A_HHI_NANOQ_MEM_PD_REG0, GENMASK(31, 0) },
{ G12A_HHI_NANOQ_MEM_PD_REG1, GENMASK(31, 0) },
};
-static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_isp[] = {
+static const struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_isp[] = {
{ G12A_HHI_ISP_MEM_PD_REG0, GENMASK(31, 0) },
{ G12A_HHI_ISP_MEM_PD_REG1, GENMASK(31, 0) },
};
@@ -270,14 +270,14 @@ static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_isp[] = {
static bool pwrc_ee_is_powered_off(struct meson_ee_pwrc_domain *pwrc_domain);
-static struct meson_ee_pwrc_domain_desc axg_pwrc_domains[] = {
+static const struct meson_ee_pwrc_domain_desc axg_pwrc_domains[] = {
[PWRC_AXG_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, axg_pwrc_mem_vpu,
pwrc_ee_is_powered_off, 5, 2),
[PWRC_AXG_ETHERNET_MEM_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
[PWRC_AXG_AUDIO_ID] = MEM_PD("AUDIO", axg_pwrc_mem_audio),
};
-static struct meson_ee_pwrc_domain_desc g12a_pwrc_domains[] = {
+static const struct meson_ee_pwrc_domain_desc g12a_pwrc_domains[] = {
[PWRC_G12A_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, g12a_pwrc_mem_vpu,
pwrc_ee_is_powered_off, 11, 2),
[PWRC_G12A_ETH_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
@@ -287,13 +287,13 @@ static struct meson_ee_pwrc_domain_desc g12a_pwrc_domains[] = {
pwrc_ee_is_powered_off),
};
-static struct meson_ee_pwrc_domain_desc gxbb_pwrc_domains[] = {
+static const struct meson_ee_pwrc_domain_desc gxbb_pwrc_domains[] = {
[PWRC_GXBB_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, gxbb_pwrc_mem_vpu,
pwrc_ee_is_powered_off, 12, 2),
[PWRC_GXBB_ETHERNET_MEM_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
};
-static struct meson_ee_pwrc_domain_desc meson8_pwrc_domains[] = {
+static const struct meson_ee_pwrc_domain_desc meson8_pwrc_domains[] = {
[PWRC_MESON8_VPU_ID] = VPU_PD("VPU", &meson8_pwrc_vpu,
meson8_pwrc_mem_vpu,
pwrc_ee_is_powered_off, 0, 1),
@@ -303,7 +303,7 @@ static struct meson_ee_pwrc_domain_desc meson8_pwrc_domains[] = {
meson8_pwrc_audio_dsp_mem),
};
-static struct meson_ee_pwrc_domain_desc meson8b_pwrc_domains[] = {
+static const struct meson_ee_pwrc_domain_desc meson8b_pwrc_domains[] = {
[PWRC_MESON8_VPU_ID] = VPU_PD("VPU", &meson8_pwrc_vpu,
meson8_pwrc_mem_vpu,
pwrc_ee_is_powered_off, 11, 1),
@@ -313,7 +313,7 @@ static struct meson_ee_pwrc_domain_desc meson8b_pwrc_domains[] = {
meson8_pwrc_audio_dsp_mem),
};
-static struct meson_ee_pwrc_domain_desc sm1_pwrc_domains[] = {
+static const struct meson_ee_pwrc_domain_desc sm1_pwrc_domains[] = {
[PWRC_SM1_VPU_ID] = VPU_PD("VPU", &sm1_pwrc_vpu, sm1_pwrc_mem_vpu,
pwrc_ee_is_powered_off, 11, 2),
[PWRC_SM1_NNA_ID] = TOP_PD("NNA", &sm1_pwrc_nna, sm1_pwrc_mem_nna,
@@ -576,32 +576,32 @@ static void meson_ee_pwrc_shutdown(struct platform_device *pdev)
}
}
-static struct meson_ee_pwrc_domain_data meson_ee_g12a_pwrc_data = {
+static const struct meson_ee_pwrc_domain_data meson_ee_g12a_pwrc_data = {
.count = ARRAY_SIZE(g12a_pwrc_domains),
.domains = g12a_pwrc_domains,
};
-static struct meson_ee_pwrc_domain_data meson_ee_axg_pwrc_data = {
+static const struct meson_ee_pwrc_domain_data meson_ee_axg_pwrc_data = {
.count = ARRAY_SIZE(axg_pwrc_domains),
.domains = axg_pwrc_domains,
};
-static struct meson_ee_pwrc_domain_data meson_ee_gxbb_pwrc_data = {
+static const struct meson_ee_pwrc_domain_data meson_ee_gxbb_pwrc_data = {
.count = ARRAY_SIZE(gxbb_pwrc_domains),
.domains = gxbb_pwrc_domains,
};
-static struct meson_ee_pwrc_domain_data meson_ee_m8_pwrc_data = {
+static const struct meson_ee_pwrc_domain_data meson_ee_m8_pwrc_data = {
.count = ARRAY_SIZE(meson8_pwrc_domains),
.domains = meson8_pwrc_domains,
};
-static struct meson_ee_pwrc_domain_data meson_ee_m8b_pwrc_data = {
+static const struct meson_ee_pwrc_domain_data meson_ee_m8b_pwrc_data = {
.count = ARRAY_SIZE(meson8b_pwrc_domains),
.domains = meson8b_pwrc_domains,
};
-static struct meson_ee_pwrc_domain_data meson_ee_sm1_pwrc_data = {
+static const struct meson_ee_pwrc_domain_data meson_ee_sm1_pwrc_data = {
.count = ARRAY_SIZE(sm1_pwrc_domains),
.domains = sm1_pwrc_domains,
};
diff --git a/drivers/pmdomain/arm/Kconfig b/drivers/pmdomain/arm/Kconfig
index efa139c34e08..afed10d382ad 100644
--- a/drivers/pmdomain/arm/Kconfig
+++ b/drivers/pmdomain/arm/Kconfig
@@ -2,7 +2,7 @@
config ARM_SCMI_PERF_DOMAIN
tristate "SCMI performance domain driver"
depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
- default y
+ default ARM_SCMI_PROTOCOL
select PM_GENERIC_DOMAINS if PM
help
This enables support for the SCMI performance domains which can be
@@ -14,7 +14,7 @@ config ARM_SCMI_PERF_DOMAIN
config ARM_SCMI_POWER_DOMAIN
tristate "SCMI power domain driver"
depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
- default y
+ default ARM_SCMI_PROTOCOL
select PM_GENERIC_DOMAINS if PM
help
This enables support for the SCMI power domains which can be
@@ -27,7 +27,7 @@ config ARM_SCMI_POWER_DOMAIN
config ARM_SCPI_POWER_DOMAIN
tristate "SCPI power domain driver"
depends on ARM_SCPI_PROTOCOL || (COMPILE_TEST && OF)
- default y
+ default ARM_SCPI_PROTOCOL
select PM_GENERIC_DOMAINS if PM
help
This enables support for the SCPI power domains which can be
diff --git a/drivers/pmdomain/bcm/bcm2835-power.c b/drivers/pmdomain/bcm/bcm2835-power.c
index d3cd816979ac..f5289fd184d0 100644
--- a/drivers/pmdomain/bcm/bcm2835-power.c
+++ b/drivers/pmdomain/bcm/bcm2835-power.c
@@ -506,18 +506,10 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
struct device *dev = power->dev;
struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
- dom->clk = devm_clk_get(dev->parent, name);
- if (IS_ERR(dom->clk)) {
- int ret = PTR_ERR(dom->clk);
-
- if (ret == -EPROBE_DEFER)
- return ret;
-
- /* Some domains don't have a clk, so make sure that we
- * don't deref an error pointer later.
- */
- dom->clk = NULL;
- }
+ dom->clk = devm_clk_get_optional(dev->parent, name);
+ if (IS_ERR(dom->clk))
+ return dev_err_probe(dev, PTR_ERR(dom->clk), "Failed to get clock %s\n",
+ name);
dom->base.name = name;
dom->base.flags = GENPD_FLAG_ACTIVE_WAKEUP;
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 9b2f28b34bb5..ff5c7f2b69ce 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -304,10 +304,40 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd)
genpd->accounting_time = now;
}
+
+static void genpd_reflect_residency(struct generic_pm_domain *genpd)
+{
+ struct genpd_governor_data *gd = genpd->gd;
+ struct genpd_power_state *state, *next_state;
+ unsigned int state_idx;
+ s64 sleep_ns, target_ns;
+
+ if (!gd || !gd->reflect_residency)
+ return;
+
+ sleep_ns = ktime_to_ns(ktime_sub(ktime_get(), gd->last_enter));
+ state_idx = genpd->state_idx;
+ state = &genpd->states[state_idx];
+ target_ns = state->power_off_latency_ns + state->residency_ns;
+
+ if (sleep_ns < target_ns) {
+ state->above++;
+ } else if (state_idx < (genpd->state_count -1)) {
+ next_state = &genpd->states[state_idx + 1];
+ target_ns = next_state->power_off_latency_ns +
+ next_state->residency_ns;
+
+ if (sleep_ns >= target_ns)
+ state->below++;
+ }
+
+ gd->reflect_residency = false;
+}
#else
static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
+static inline void genpd_reflect_residency(struct generic_pm_domain *genpd) {}
#endif
static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
@@ -728,6 +758,31 @@ int dev_pm_genpd_rpm_always_on(struct device *dev, bool on)
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_rpm_always_on);
+/**
+ * pm_genpd_inc_rejected() - Adjust the rejected/usage counts for an idle-state.
+ *
+ * @genpd: The PM domain the idle-state belongs to.
+ * @state_idx: The index of the idle-state that failed.
+ *
+ * In some special cases the ->power_off() callback is asynchronously powering
+ * off the PM domain, leading to that it may return zero to indicate success,
+ * even though the actual power-off could fail. To account for this correctly in
+ * the rejected/usage counts for the idle-state statistics, users can call this
+ * function to adjust the values.
+ *
+ * It is assumed that the users guarantee that the genpd doesn't get removed
+ * while this routine is getting called.
+ */
+void pm_genpd_inc_rejected(struct generic_pm_domain *genpd,
+ unsigned int state_idx)
+{
+ genpd_lock(genpd);
+ genpd->states[genpd->state_idx].rejected++;
+ genpd->states[genpd->state_idx].usage--;
+ genpd_unlock(genpd);
+}
+EXPORT_SYMBOL_GPL(pm_genpd_inc_rejected);
+
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
@@ -853,31 +908,24 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
* If all of the @genpd's devices have been suspended and all of its subdomains
* have been powered down, remove power from @genpd.
*/
-static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
- unsigned int depth)
+static void genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
+ unsigned int depth)
{
struct pm_domain_data *pdd;
struct gpd_link *link;
unsigned int not_suspended = 0;
- int ret;
/*
* Do not try to power off the domain in the following situations:
- * (1) The domain is already in the "power off" state.
- * (2) System suspend is in progress.
+ * The domain is already in the "power off" state.
+ * System suspend is in progress.
+ * The domain is configured as always on.
+ * The domain has a subdomain being powered on.
*/
- if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
- return 0;
-
- /*
- * Abort power off for the PM domain in the following situations:
- * (1) The domain is configured as always on.
- * (2) When the domain has a subdomain being powered on.
- */
- if (genpd_is_always_on(genpd) ||
- genpd_is_rpm_always_on(genpd) ||
- atomic_read(&genpd->sd_count) > 0)
- return -EBUSY;
+ if (!genpd_status_on(genpd) || genpd->prepared_count > 0 ||
+ genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd) ||
+ atomic_read(&genpd->sd_count) > 0)
+ return;
/*
* The children must be in their deepest (powered-off) states to allow
@@ -888,7 +936,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
list_for_each_entry(link, &genpd->parent_links, parent_node) {
struct generic_pm_domain *child = link->child;
if (child->state_idx < child->state_count - 1)
- return -EBUSY;
+ return;
}
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
@@ -902,15 +950,15 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
/* The device may need its PM domain to stay powered on. */
if (to_gpd_data(pdd)->rpm_always_on)
- return -EBUSY;
+ return;
}
if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
- return -EBUSY;
+ return;
if (genpd->gov && genpd->gov->power_down_ok) {
if (!genpd->gov->power_down_ok(&genpd->domain))
- return -EAGAIN;
+ return;
}
/* Default to shallowest state. */
@@ -919,12 +967,11 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
/* Don't power off, if a child domain is waiting to power on. */
if (atomic_read(&genpd->sd_count) > 0)
- return -EBUSY;
+ return;
- ret = _genpd_power_off(genpd, true);
- if (ret) {
+ if (_genpd_power_off(genpd, true)) {
genpd->states[genpd->state_idx].rejected++;
- return ret;
+ return;
}
genpd->status = GENPD_STATE_OFF;
@@ -937,8 +984,6 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
genpd_power_off(link->parent, false, depth + 1);
genpd_unlock(link->parent);
}
-
- return 0;
}
/**
@@ -957,6 +1002,9 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
if (genpd_status_on(genpd))
return 0;
+ /* Reflect over the entered idle-states residency for debugfs. */
+ genpd_reflect_residency(genpd);
+
/*
* The list is guaranteed not to change while the loop below is being
* executed, unless one of the parents' .power_on() callbacks fiddles
@@ -1450,7 +1498,7 @@ static int genpd_finish_suspend(struct device *dev,
if (ret)
return ret;
- if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
+ if (device_awake_path(dev) && genpd_is_active_wakeup(genpd))
return 0;
if (genpd->dev_ops.stop && genpd->dev_ops.start &&
@@ -1505,7 +1553,7 @@ static int genpd_finish_resume(struct device *dev,
if (IS_ERR(genpd))
return -EINVAL;
- if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
+ if (device_awake_path(dev) && genpd_is_active_wakeup(genpd))
return resume_noirq(dev);
genpd_lock(genpd);
@@ -2229,8 +2277,10 @@ static int genpd_alloc_data(struct generic_pm_domain *genpd)
return 0;
put:
put_device(&genpd->dev);
- if (genpd->free_states == genpd_free_default_power_state)
+ if (genpd->free_states == genpd_free_default_power_state) {
kfree(genpd->states);
+ genpd->states = NULL;
+ }
free:
if (genpd_is_cpu_domain(genpd))
free_cpumask_var(genpd->cpus);
@@ -2293,6 +2343,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->provider = NULL;
genpd->device_id = -ENXIO;
genpd->has_provider = false;
+ genpd->opp_table = NULL;
genpd->accounting_time = ktime_get_mono_fast_ns();
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = genpd_runtime_resume;
@@ -2567,7 +2618,7 @@ int of_genpd_add_provider_simple(struct device_node *np,
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
if (ret) {
- if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
+ if (genpd->opp_table) {
dev_pm_opp_put_opp_table(genpd->opp_table);
dev_pm_opp_of_remove_table(&genpd->dev);
}
@@ -2647,7 +2698,7 @@ error:
genpd->provider = NULL;
genpd->has_provider = false;
- if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
+ if (genpd->opp_table) {
dev_pm_opp_put_opp_table(genpd->opp_table);
dev_pm_opp_of_remove_table(&genpd->dev);
}
@@ -2679,11 +2730,10 @@ void of_genpd_del_provider(struct device_node *np)
if (gpd->provider == &np->fwnode) {
gpd->has_provider = false;
- if (genpd_is_opp_table_fw(gpd) || !gpd->set_performance_state)
- continue;
-
- dev_pm_opp_put_opp_table(gpd->opp_table);
- dev_pm_opp_of_remove_table(&gpd->dev);
+ if (gpd->opp_table) {
+ dev_pm_opp_put_opp_table(gpd->opp_table);
+ dev_pm_opp_of_remove_table(&gpd->dev);
+ }
}
}
@@ -3126,7 +3176,7 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
/* Verify that the index is within a valid range. */
num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells");
- if (index >= num_domains)
+ if (num_domains < 0 || index >= num_domains)
return NULL;
/* Allocate and register device on the genpd bus. */
@@ -3492,7 +3542,7 @@ static int idle_states_show(struct seq_file *s, void *data)
if (ret)
return -ERESTARTSYS;
- seq_puts(s, "State Time Spent(ms) Usage Rejected\n");
+ seq_puts(s, "State Time Spent(ms) Usage Rejected Above Below\n");
for (i = 0; i < genpd->state_count; i++) {
struct genpd_power_state *state = &genpd->states[i];
@@ -3512,9 +3562,10 @@ static int idle_states_show(struct seq_file *s, void *data)
snprintf(state_name, ARRAY_SIZE(state_name), "S%-13d", i);
do_div(idle_time, NSEC_PER_MSEC);
- seq_printf(s, "%-14s %-14llu %-14llu %llu\n",
+ seq_printf(s, "%-14s %-14llu %-10llu %-10llu %-10llu %llu\n",
state->name ?: state_name, idle_time,
- state->usage, state->rejected);
+ state->usage, state->rejected, state->above,
+ state->below);
}
genpd_unlock(genpd);
diff --git a/drivers/pmdomain/governor.c b/drivers/pmdomain/governor.c
index d1a10eeebd16..c1e148657c87 100644
--- a/drivers/pmdomain/governor.c
+++ b/drivers/pmdomain/governor.c
@@ -392,6 +392,8 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
if (idle_duration_ns >= (genpd->states[i].residency_ns +
genpd->states[i].power_off_latency_ns)) {
genpd->state_idx = i;
+ genpd->gd->last_enter = now;
+ genpd->gd->reflect_residency = true;
return true;
}
} while (--i >= 0);
diff --git a/drivers/pmdomain/mediatek/mt6893-pm-domains.h b/drivers/pmdomain/mediatek/mt6893-pm-domains.h
new file mode 100644
index 000000000000..c9e2aa511448
--- /dev/null
+++ b/drivers/pmdomain/mediatek/mt6893-pm-domains.h
@@ -0,0 +1,585 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 Collabora Ltd
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __PMDOMAIN_MEDIATEK_MT6893_PM_DOMAINS_H
+#define __PMDOMAIN_MEDIATEK_MT6893_PM_DOMAINS_H
+
+#include <linux/soc/mediatek/infracfg.h>
+#include <dt-bindings/power/mediatek,mt6893-power.h>
+#include "mtk-pm-domains.h"
+
+#define MT6893_TOP_AXI_PROT_EN_MCU_STA1 0x2e4
+#define MT6893_TOP_AXI_PROT_EN_MCU_SET 0x2c4
+#define MT6893_TOP_AXI_PROT_EN_MCU_CLR 0x2c8
+#define MT6893_TOP_AXI_PROT_EN_VDNR_1_SET 0xba4
+#define MT6893_TOP_AXI_PROT_EN_VDNR_1_CLR 0xba8
+#define MT6893_TOP_AXI_PROT_EN_VDNR_1_STA1 0xbb0
+#define MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET 0xbb8
+#define MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR 0xbbc
+#define MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1 0xbc4
+
+#define MT6893_TOP_AXI_PROT_EN_1_MFG1_STEP1 GENMASK(21, 19)
+#define MT6893_TOP_AXI_PROT_EN_2_MFG1_STEP2 GENMASK(6, 5)
+#define MT6893_TOP_AXI_PROT_EN_MFG1_STEP3 GENMASK(22, 21)
+#define MT6893_TOP_AXI_PROT_EN_2_MFG1_STEP4 BIT(7)
+#define MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1_STEP5 GENMASK(19, 17)
+#define MT6893_TOP_AXI_PROT_EN_MM_VDEC0_STEP1 BIT(24)
+#define MT6893_TOP_AXI_PROT_EN_MM_2_VDEC0_STEP2 BIT(25)
+#define MT6893_TOP_AXI_PROT_EN_MM_VDEC1_STEP1 BIT(6)
+#define MT6893_TOP_AXI_PROT_EN_MM_VDEC1_STEP2 BIT(7)
+#define MT6893_TOP_AXI_PROT_EN_MM_VENC0_STEP1 BIT(26)
+#define MT6893_TOP_AXI_PROT_EN_MM_2_VENC0_STEP2 BIT(0)
+#define MT6893_TOP_AXI_PROT_EN_MM_VENC0_STEP3 BIT(27)
+#define MT6893_TOP_AXI_PROT_EN_MM_2_VENC0_STEP4 BIT(1)
+#define MT6893_TOP_AXI_PROT_EN_MM_VENC1_STEP1 GENMASK(30, 28)
+#define MT6893_TOP_AXI_PROT_EN_MM_VENC1_STEP2 GENMASK(31, 29)
+#define MT6893_TOP_AXI_PROT_EN_MDP_STEP1 BIT(10)
+#define MT6893_TOP_AXI_PROT_EN_MM_MDP_STEP2 (BIT(2) | BIT(4) | BIT(6) | \
+ BIT(8) | BIT(18) | BIT(22) | \
+ BIT(28) | BIT(30))
+#define MT6893_TOP_AXI_PROT_EN_MM_2_MDP_STEP3 (BIT(0) | BIT(2) | BIT(4) | \
+ BIT(6) | BIT(8))
+#define MT6893_TOP_AXI_PROT_EN_MDP_STEP4 BIT(23)
+#define MT6893_TOP_AXI_PROT_EN_MM_MDP_STEP5 (BIT(3) | BIT(5) | BIT(7) | \
+ BIT(9) | BIT(19) | BIT(23) | \
+ BIT(29) | BIT(31))
+#define MT6893_TOP_AXI_PROT_EN_MM_2_MDP_STEP6 (BIT(1) | BIT(7) | BIT(9) | BIT(11))
+#define MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MDP_STEP7 BIT(20)
+#define MT6893_TOP_AXI_PROT_EN_MM_DISP_STEP1 (BIT(0) | BIT(6) | BIT(8) | \
+ BIT(10) | BIT(12) | BIT(14) | \
+ BIT(16) | BIT(20) | BIT(24) | \
+ BIT(26))
+#define MT6893_TOP_AXI_PROT_EN_DISP_STEP2 BIT(6)
+#define MT6893_TOP_AXI_PROT_EN_MM_DISP_STEP3 (BIT(1) | BIT(7) | BIT(9) | \
+ BIT(15) | BIT(17) | BIT(21) | \
+ BIT(25) | BIT(27))
+#define MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_DISP_STEP4 BIT(21)
+#define MT6893_TOP_AXI_PROT_EN_2_ADSP BIT(3)
+#define MT6893_TOP_AXI_PROT_EN_2_CAM_STEP1 BIT(1)
+#define MT6893_TOP_AXI_PROT_EN_MM_CAM_STEP2 (BIT(0) | BIT(2) | BIT(4))
+#define MT6893_TOP_AXI_PROT_EN_1_CAM_STEP3 BIT(22)
+#define MT6893_TOP_AXI_PROT_EN_MM_CAM_STEP4 (BIT(1) | BIT(3) | BIT(5))
+#define MT6893_TOP_AXI_PROT_EN_MM_CAM_RAWA_STEP1 BIT(18)
+#define MT6893_TOP_AXI_PROT_EN_MM_CAM_RAWA_STEP2 BIT(19)
+#define MT6893_TOP_AXI_PROT_EN_MM_CAM_RAWB_STEP1 BIT(20)
+#define MT6893_TOP_AXI_PROT_EN_MM_CAM_RAWB_STEP2 BIT(21)
+#define MT6893_TOP_AXI_PROT_EN_MM_CAM_RAWC_STEP1 BIT(22)
+#define MT6893_TOP_AXI_PROT_EN_MM_CAM_RAWC_STEP2 BIT(23)
+
+/*
+ * MT6893 Power Domain (MTCMOS) support
+ *
+ * The register layout for this IP is very similar to MT8192 so where possible
+ * the same definitions are reused to avoid duplication.
+ * Where the bus protection bits are also the same, the entire set is reused.
+ */
+static const struct scpsys_domain_data scpsys_domain_data_mt6893[] = {
+ [MT6893_POWER_DOMAIN_CONN] = {
+ .name = "conn",
+ .sta_mask = BIT(1),
+ .ctl_offs = 0x304,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = 0,
+ .sram_pdn_ack_bits = 0,
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT8192_TOP_AXI_PROT_EN_CONN,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(INFRA,
+ MT8192_TOP_AXI_PROT_EN_CONN_2ND,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(INFRA,
+ MT8192_TOP_AXI_PROT_EN_1_CONN,
+ MT8192_TOP_AXI_PROT_EN_1_SET,
+ MT8192_TOP_AXI_PROT_EN_1_CLR,
+ MT8192_TOP_AXI_PROT_EN_1_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT6893_POWER_DOMAIN_MFG0] = {
+ .name = "mfg0",
+ .sta_mask = BIT(2),
+ .ctl_offs = 0x308,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY,
+ },
+ [MT6893_POWER_DOMAIN_MFG1] = {
+ .name = "mfg1",
+ .sta_mask = BIT(3),
+ .ctl_offs = 0x30c,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_1_MFG1_STEP1,
+ MT8192_TOP_AXI_PROT_EN_1_SET,
+ MT8192_TOP_AXI_PROT_EN_1_CLR,
+ MT8192_TOP_AXI_PROT_EN_1_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_2_MFG1_STEP2,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MFG1_STEP3,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_2_MFG1_STEP4,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1_STEP5,
+ MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET,
+ MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR,
+ MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY,
+ },
+ [MT6893_POWER_DOMAIN_MFG2] = {
+ .name = "mfg2",
+ .sta_mask = BIT(4),
+ .ctl_offs = 0x310,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT6893_POWER_DOMAIN_MFG3] = {
+ .name = "mfg3",
+ .sta_mask = BIT(5),
+ .ctl_offs = 0x314,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT6893_POWER_DOMAIN_MFG4] = {
+ .name = "mfg4",
+ .sta_mask = BIT(6),
+ .ctl_offs = 0x318,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT6893_POWER_DOMAIN_MFG5] = {
+ .name = "mfg5",
+ .sta_mask = BIT(7),
+ .ctl_offs = 0x31c,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT6893_POWER_DOMAIN_MFG6] = {
+ .name = "mfg6",
+ .sta_mask = BIT(8),
+ .ctl_offs = 0x320,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT6893_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = BIT(12),
+ .ctl_offs = 0x330,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT8192_TOP_AXI_PROT_EN_MM_2_ISP,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(INFRA,
+ MT8192_TOP_AXI_PROT_EN_MM_2_ISP_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ },
+ [MT6893_POWER_DOMAIN_ISP2] = {
+ .name = "isp2",
+ .sta_mask = BIT(13),
+ .ctl_offs = 0x334,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT8192_TOP_AXI_PROT_EN_MM_ISP2,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(INFRA,
+ MT8192_TOP_AXI_PROT_EN_MM_ISP2_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT6893_POWER_DOMAIN_IPE] = {
+ .name = "ipe",
+ .sta_mask = BIT(14),
+ .ctl_offs = 0x338,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT8192_TOP_AXI_PROT_EN_MM_IPE,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(INFRA,
+ MT8192_TOP_AXI_PROT_EN_MM_IPE_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT6893_POWER_DOMAIN_VDEC0] = {
+ .name = "vdec0",
+ .sta_mask = BIT(15),
+ .ctl_offs = 0x33c,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_VDEC0_STEP1,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_2_VDEC0_STEP2,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT6893_POWER_DOMAIN_VDEC1] = {
+ .name = "vdec1",
+ .sta_mask = BIT(16),
+ .ctl_offs = 0x340,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_VDEC1_STEP1,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_VDEC1_STEP2,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT6893_POWER_DOMAIN_VENC0] = {
+ .name = "venc0",
+ .sta_mask = BIT(17),
+ .ctl_offs = 0x344,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_VENC0_STEP1,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_2_VENC0_STEP2,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_VENC0_STEP3,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_2_VENC0_STEP4,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT6893_POWER_DOMAIN_VENC1] = {
+ .name = "venc1",
+ .sta_mask = BIT(18),
+ .ctl_offs = 0x348,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_VENC1_STEP1,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_VENC1_STEP2,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT6893_POWER_DOMAIN_MDP] = {
+ .name = "mdp",
+ .sta_mask = BIT(19),
+ .ctl_offs = 0x34c,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MDP_STEP1,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_MDP_STEP2,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_2_MDP_STEP3,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MDP_STEP4,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_MDP_STEP5,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_2_MDP_STEP6,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MDP_STEP7,
+ MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET,
+ MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR,
+ MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1),
+ },
+ },
+ [MT6893_POWER_DOMAIN_DISP] = {
+ .name = "disp",
+ .sta_mask = BIT(20),
+ .ctl_offs = 0x350,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_DISP_STEP1,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_DISP_STEP2,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_DISP_STEP3,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_DISP_STEP4,
+ MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET,
+ MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR,
+ MT6893_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1),
+ },
+ },
+ [MT6893_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
+ .sta_mask = BIT(21),
+ .ctl_offs = 0x354,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT8192_TOP_AXI_PROT_EN_2_AUDIO,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ },
+ },
+ [MT6893_POWER_DOMAIN_ADSP] = {
+ .name = "audio",
+ .sta_mask = BIT(22),
+ .ctl_offs = 0x358,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(9),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_2_ADSP,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ },
+ },
+ [MT6893_POWER_DOMAIN_CAM] = {
+ .name = "cam",
+ .sta_mask = BIT(23),
+ .ctl_offs = 0x35c,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_2_CAM_STEP1,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_CAM_STEP2,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_1_CAM_STEP3,
+ MT8192_TOP_AXI_PROT_EN_1_SET,
+ MT8192_TOP_AXI_PROT_EN_1_CLR,
+ MT8192_TOP_AXI_PROT_EN_1_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_CAM_STEP4,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT6893_POWER_DOMAIN_CAM_RAWA] = {
+ .name = "cam_rawa",
+ .sta_mask = BIT(24),
+ .ctl_offs = 0x360,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_CAM_RAWA_STEP1,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_CAM_RAWA_STEP2,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT6893_POWER_DOMAIN_CAM_RAWB] = {
+ .name = "cam_rawb",
+ .sta_mask = BIT(25),
+ .ctl_offs = 0x364,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_CAM_RAWB_STEP1,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_CAM_RAWB_STEP2,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT6893_POWER_DOMAIN_CAM_RAWC] = {
+ .name = "cam_rawc",
+ .sta_mask = BIT(26),
+ .ctl_offs = 0x368,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .bp_cfg = {
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_CAM_RAWC_STEP1,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(INFRA,
+ MT6893_TOP_AXI_PROT_EN_MM_CAM_RAWC_STEP2,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT6893_POWER_DOMAIN_DP_TX] = {
+ .name = "dp_tx",
+ .sta_mask = BIT(27),
+ .ctl_offs = 0x3ac,
+ .pwr_sta_offs = 0x16c,
+ .pwr_sta2nd_offs = 0x170,
+ .sram_pdn_bits = BIT(8),
+ .sram_pdn_ack_bits = BIT(12),
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+};
+
+static const struct scpsys_soc_data mt6893_scpsys_data = {
+ .domains_data = scpsys_domain_data_mt6893,
+ .num_domains = ARRAY_SIZE(scpsys_domain_data_mt6893),
+};
+
+#endif /* __PMDOMAIN_MEDIATEK_MT6893_PM_DOMAINS_H */
diff --git a/drivers/pmdomain/mediatek/mtk-pm-domains.c b/drivers/pmdomain/mediatek/mtk-pm-domains.c
index b866b006af69..a58ed7e2d9a4 100644
--- a/drivers/pmdomain/mediatek/mtk-pm-domains.c
+++ b/drivers/pmdomain/mediatek/mtk-pm-domains.c
@@ -18,6 +18,7 @@
#include "mt6735-pm-domains.h"
#include "mt6795-pm-domains.h"
+#include "mt6893-pm-domains.h"
#include "mt8167-pm-domains.h"
#include "mt8173-pm-domains.h"
#include "mt8183-pm-domains.h"
@@ -397,20 +398,26 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
pd->infracfg = syscon_regmap_lookup_by_phandle_optional(node, "mediatek,infracfg");
if (IS_ERR(pd->infracfg))
- return ERR_CAST(pd->infracfg);
+ return dev_err_cast_probe(scpsys->dev, pd->infracfg,
+ "%pOF: failed to get infracfg regmap\n",
+ node);
smi_node = of_parse_phandle(node, "mediatek,smi", 0);
if (smi_node) {
pd->smi = device_node_to_regmap(smi_node);
of_node_put(smi_node);
if (IS_ERR(pd->smi))
- return ERR_CAST(pd->smi);
+ return dev_err_cast_probe(scpsys->dev, pd->smi,
+ "%pOF: failed to get SMI regmap\n",
+ node);
}
if (MTK_SCPD_CAPS(pd, MTK_SCPD_HAS_INFRA_NAO)) {
pd->infracfg_nao = syscon_regmap_lookup_by_phandle(node, "mediatek,infracfg-nao");
if (IS_ERR(pd->infracfg_nao))
- return ERR_CAST(pd->infracfg_nao);
+ return dev_err_cast_probe(scpsys->dev, pd->infracfg_nao,
+ "%pOF: failed to get infracfg-nao regmap\n",
+ node);
} else {
pd->infracfg_nao = NULL;
}
@@ -618,6 +625,10 @@ static const struct of_device_id scpsys_of_match[] = {
.data = &mt6795_scpsys_data,
},
{
+ .compatible = "mediatek,mt6893-power-controller",
+ .data = &mt6893_scpsys_data,
+ },
+ {
.compatible = "mediatek,mt8167-power-controller",
.data = &mt8167_scpsys_data,
},
diff --git a/drivers/pmdomain/mediatek/mtk-pm-domains.h b/drivers/pmdomain/mediatek/mtk-pm-domains.h
index 2ac96804b985..7085fa2976e9 100644
--- a/drivers/pmdomain/mediatek/mtk-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mtk-pm-domains.h
@@ -44,7 +44,7 @@
#define PWR_STATUS_AUDIO BIT(24)
#define PWR_STATUS_USB BIT(25)
-#define SPM_MAX_BUS_PROT_DATA 6
+#define SPM_MAX_BUS_PROT_DATA 7
enum scpsys_bus_prot_flags {
BUS_PROT_REG_UPDATE = BIT(1),
diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c
index dfd0f80154e4..078323b85b56 100644
--- a/drivers/pmdomain/qcom/rpmhpd.c
+++ b/drivers/pmdomain/qcom/rpmhpd.c
@@ -360,6 +360,21 @@ static const struct rpmhpd_desc sdx75_desc = {
.num_pds = ARRAY_SIZE(sdx75_rpmhpds),
};
+/* SM4450 RPMH powerdomains */
+static struct rpmhpd *sm4450_rpmhpds[] = {
+ [RPMHPD_CX] = &cx,
+ [RPMHPD_CX_AO] = &cx_ao,
+ [RPMHPD_EBI] = &ebi,
+ [RPMHPD_LMX] = &lmx,
+ [RPMHPD_MSS] = &mss,
+ [RPMHPD_MX] = &mx,
+};
+
+static const struct rpmhpd_desc sm4450_desc = {
+ .rpmhpds = sm4450_rpmhpds,
+ .num_pds = ARRAY_SIZE(sm4450_rpmhpds),
+};
+
/* SM6350 RPMH powerdomains */
static struct rpmhpd *sm6350_rpmhpds[] = {
[SM6350_CX] = &cx_w_mx_parent,
@@ -724,6 +739,7 @@ static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,sdx55-rpmhpd", .data = &sdx55_desc},
{ .compatible = "qcom,sdx65-rpmhpd", .data = &sdx65_desc},
{ .compatible = "qcom,sdx75-rpmhpd", .data = &sdx75_desc},
+ { .compatible = "qcom,sm4450-rpmhpd", .data = &sm4450_desc },
{ .compatible = "qcom,sm6350-rpmhpd", .data = &sm6350_desc },
{ .compatible = "qcom,sm7150-rpmhpd", .data = &sm7150_desc },
{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
diff --git a/drivers/pmdomain/renesas/rcar-gen4-sysc.c b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
index 66409cff2083..e001b5c25bed 100644
--- a/drivers/pmdomain/renesas/rcar-gen4-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
@@ -338,11 +338,6 @@ static int __init rcar_gen4_sysc_pd_init(void)
struct rcar_gen4_sysc_pd *pd;
size_t n;
- if (!area->name) {
- /* Skip NULLified area */
- continue;
- }
-
n = strlen(area->name) + 1;
pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
if (!pd) {
diff --git a/drivers/pmdomain/renesas/rcar-sysc.c b/drivers/pmdomain/renesas/rcar-sysc.c
index dce1a6d37e80..047495f54e8a 100644
--- a/drivers/pmdomain/renesas/rcar-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-sysc.c
@@ -396,11 +396,6 @@ static int __init rcar_sysc_pd_init(void)
struct rcar_sysc_pd *pd;
size_t n;
- if (!area->name) {
- /* Skip NULLified area */
- continue;
- }
-
n = strlen(area->name) + 1;
pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
if (!pd) {
diff --git a/drivers/pmdomain/rockchip/pm-domains.c b/drivers/pmdomain/rockchip/pm-domains.c
index 03bcf79a461f..4cce407bb1eb 100644
--- a/drivers/pmdomain/rockchip/pm-domains.c
+++ b/drivers/pmdomain/rockchip/pm-domains.c
@@ -2,7 +2,7 @@
/*
* Rockchip Generic power domain support.
*
- * Copyright (c) 2015 ROCKCHIP, Co. Ltd.
+ * Copyright (c) 2015 Rockchip Electronics Co., Ltd.
*/
#include <linux/arm-smccc.h>
@@ -35,6 +35,7 @@
#include <dt-bindings/power/rk3366-power.h>
#include <dt-bindings/power/rk3368-power.h>
#include <dt-bindings/power/rk3399-power.h>
+#include <dt-bindings/power/rockchip,rk3562-power.h>
#include <dt-bindings/power/rk3568-power.h>
#include <dt-bindings/power/rockchip,rk3576-power.h>
#include <dt-bindings/power/rk3588-power.h>
@@ -135,6 +136,20 @@ struct rockchip_pmu {
.active_wakeup = wakeup, \
}
+#define DOMAIN_M_G_SD(_name, pwr, status, req, idle, ack, g_mask, mem, wakeup, keepon) \
+{ \
+ .name = _name, \
+ .pwr_w_mask = (pwr) << 16, \
+ .pwr_mask = (pwr), \
+ .status_mask = (status), \
+ .req_w_mask = (req) << 16, \
+ .req_mask = (req), \
+ .idle_mask = (idle), \
+ .ack_mask = (ack), \
+ .clk_ungate_mask = (g_mask), \
+ .active_wakeup = wakeup, \
+}
+
#define DOMAIN_M_O_R(_name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, ack, wakeup, regulator) \
{ \
.name = _name, \
@@ -201,6 +216,9 @@ struct rockchip_pmu {
#define DOMAIN_RK3399(name, pwr, status, req, wakeup) \
DOMAIN(name, pwr, status, req, req, req, wakeup)
+#define DOMAIN_RK3562(name, pwr, req, g_mask, mem, wakeup) \
+ DOMAIN_M_G_SD(name, pwr, pwr, req, req, req, g_mask, mem, wakeup, false)
+
#define DOMAIN_RK3568(name, pwr, req, wakeup) \
DOMAIN_M(name, pwr, pwr, req, req, req, wakeup)
@@ -1197,6 +1215,18 @@ static const struct rockchip_domain_info rk3399_pm_domains[] = {
[RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399("sdioaudio", BIT(31), BIT(31), BIT(29), true),
};
+static const struct rockchip_domain_info rk3562_pm_domains[] = {
+ /* name pwr req g_mask mem wakeup */
+ [RK3562_PD_GPU] = DOMAIN_RK3562("gpu", BIT(0), BIT(1), BIT(1), 0, false),
+ [RK3562_PD_NPU] = DOMAIN_RK3562("npu", BIT(1), BIT(2), BIT(2), 0, false),
+ [RK3562_PD_VDPU] = DOMAIN_RK3562("vdpu", BIT(2), BIT(6), BIT(6), 0, false),
+ [RK3562_PD_VEPU] = DOMAIN_RK3562("vepu", BIT(3), BIT(7), BIT(7) | BIT(3), 0, false),
+ [RK3562_PD_RGA] = DOMAIN_RK3562("rga", BIT(4), BIT(5), BIT(5) | BIT(4), 0, false),
+ [RK3562_PD_VI] = DOMAIN_RK3562("vi", BIT(5), BIT(3), BIT(3), 0, false),
+ [RK3562_PD_VO] = DOMAIN_RK3562("vo", BIT(6), BIT(4), BIT(4), 16, false),
+ [RK3562_PD_PHP] = DOMAIN_RK3562("php", BIT(7), BIT(8), BIT(8), 0, false),
+};
+
static const struct rockchip_domain_info rk3568_pm_domains[] = {
[RK3568_PD_NPU] = DOMAIN_RK3568("npu", BIT(1), BIT(2), false),
[RK3568_PD_GPU] = DOMAIN_RK3568("gpu", BIT(0), BIT(1), false),
@@ -1398,6 +1428,18 @@ static const struct rockchip_pmu_info rk3399_pmu = {
.domain_info = rk3399_pm_domains,
};
+static const struct rockchip_pmu_info rk3562_pmu = {
+ .pwr_offset = 0x210,
+ .status_offset = 0x230,
+ .req_offset = 0x110,
+ .idle_offset = 0x128,
+ .ack_offset = 0x120,
+ .clk_ungate_offset = 0x140,
+
+ .num_domains = ARRAY_SIZE(rk3562_pm_domains),
+ .domain_info = rk3562_pm_domains,
+};
+
static const struct rockchip_pmu_info rk3568_pmu = {
.pwr_offset = 0xa0,
.status_offset = 0x98,
@@ -1497,6 +1539,10 @@ static const struct of_device_id rockchip_pm_domain_dt_match[] = {
.data = (void *)&rk3399_pmu,
},
{
+ .compatible = "rockchip,rk3562-power-controller",
+ .data = (void *)&rk3562_pmu,
+ },
+ {
.compatible = "rockchip,rk3568-power-controller",
.data = (void *)&rk3568_pmu,
},
diff --git a/drivers/pmdomain/sunxi/Kconfig b/drivers/pmdomain/sunxi/Kconfig
index 17781bf8d86d..43eecb3ea981 100644
--- a/drivers/pmdomain/sunxi/Kconfig
+++ b/drivers/pmdomain/sunxi/Kconfig
@@ -8,3 +8,13 @@ config SUN20I_PPU
help
Say y to enable the PPU power domain driver. This saves power
when certain peripherals, such as the video engine, are idle.
+
+config SUN50I_H6_PRCM_PPU
+ tristate "Allwinner H6 PRCM power domain driver"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ help
+ Say y to enable the Allwinner H6/H616 PRCM power domain driver.
+ This is required to enable the Mali GPU in the H616 SoC, it is
+ optional for the H6.
diff --git a/drivers/pmdomain/sunxi/Makefile b/drivers/pmdomain/sunxi/Makefile
index ec1d7a2fb21d..c1343e123759 100644
--- a/drivers/pmdomain/sunxi/Makefile
+++ b/drivers/pmdomain/sunxi/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_SUN20I_PPU) += sun20i-ppu.o
+obj-$(CONFIG_SUN50I_H6_PRCM_PPU) += sun50i-h6-prcm-ppu.o
diff --git a/drivers/pmdomain/sunxi/sun50i-h6-prcm-ppu.c b/drivers/pmdomain/sunxi/sun50i-h6-prcm-ppu.c
new file mode 100644
index 000000000000..d59644499dfe
--- /dev/null
+++ b/drivers/pmdomain/sunxi/sun50i-h6-prcm-ppu.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) Arm Ltd. 2024
+ *
+ * Allwinner H6/H616 PRCM power domain driver.
+ * This covers a few registers inside the PRCM (Power Reset Clock Management)
+ * block that control some power rails, most prominently for the Mali GPU.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/reset.h>
+
+/*
+ * The PRCM block covers multiple devices, starting with some clocks,
+ * then followed by the power rails.
+ * The clocks are covered by a different driver, so this driver's MMIO range
+ * starts later in the PRCM MMIO frame, not at the beginning of it.
+ * To keep the register offsets consistent with other PRCM documentation,
+ * express the registers relative to the beginning of the whole PRCM, and
+ * subtract the PPU offset this driver is bound to.
+ */
+#define PD_H6_PPU_OFFSET 0x250
+#define PD_H6_VDD_SYS_REG 0x250
+#define PD_H616_ANA_VDD_GATE BIT(4)
+#define PD_H6_CPUS_VDD_GATE BIT(3)
+#define PD_H6_AVCC_VDD_GATE BIT(2)
+#define PD_H6_GPU_REG 0x254
+#define PD_H6_GPU_GATE BIT(0)
+
+struct sun50i_h6_ppu_pd {
+ struct generic_pm_domain genpd;
+ void __iomem *reg;
+ u32 gate_mask;
+ bool negated;
+};
+
+#define FLAG_PPU_ALWAYS_ON BIT(0)
+#define FLAG_PPU_NEGATED BIT(1)
+
+struct sun50i_h6_ppu_desc {
+ const char *name;
+ u32 offset;
+ u32 mask;
+ unsigned int flags;
+};
+
+static const struct sun50i_h6_ppu_desc sun50i_h6_ppus[] = {
+ { "AVCC", PD_H6_VDD_SYS_REG, PD_H6_AVCC_VDD_GATE },
+ { "CPUS", PD_H6_VDD_SYS_REG, PD_H6_CPUS_VDD_GATE },
+ { "GPU", PD_H6_GPU_REG, PD_H6_GPU_GATE },
+};
+static const struct sun50i_h6_ppu_desc sun50i_h616_ppus[] = {
+ { "PLL", PD_H6_VDD_SYS_REG, PD_H6_AVCC_VDD_GATE,
+ FLAG_PPU_ALWAYS_ON | FLAG_PPU_NEGATED },
+ { "ANA", PD_H6_VDD_SYS_REG, PD_H616_ANA_VDD_GATE, FLAG_PPU_ALWAYS_ON },
+ { "GPU", PD_H6_GPU_REG, PD_H6_GPU_GATE, FLAG_PPU_NEGATED },
+};
+
+struct sun50i_h6_ppu_data {
+ const struct sun50i_h6_ppu_desc *descs;
+ int nr_domains;
+};
+
+static const struct sun50i_h6_ppu_data sun50i_h6_ppu_data = {
+ .descs = sun50i_h6_ppus,
+ .nr_domains = ARRAY_SIZE(sun50i_h6_ppus),
+};
+
+static const struct sun50i_h6_ppu_data sun50i_h616_ppu_data = {
+ .descs = sun50i_h616_ppus,
+ .nr_domains = ARRAY_SIZE(sun50i_h616_ppus),
+};
+
+#define to_sun50i_h6_ppu_pd(_genpd) \
+ container_of(_genpd, struct sun50i_h6_ppu_pd, genpd)
+
+static bool sun50i_h6_ppu_power_status(const struct sun50i_h6_ppu_pd *pd)
+{
+ bool bit = readl(pd->reg) & pd->gate_mask;
+
+ return bit ^ pd->negated;
+}
+
+static int sun50i_h6_ppu_pd_set_power(const struct sun50i_h6_ppu_pd *pd,
+ bool set_bit)
+{
+ u32 reg = readl(pd->reg);
+
+ if (set_bit)
+ writel(reg | pd->gate_mask, pd->reg);
+ else
+ writel(reg & ~pd->gate_mask, pd->reg);
+
+ return 0;
+}
+
+static int sun50i_h6_ppu_pd_power_on(struct generic_pm_domain *genpd)
+{
+ const struct sun50i_h6_ppu_pd *pd = to_sun50i_h6_ppu_pd(genpd);
+
+ return sun50i_h6_ppu_pd_set_power(pd, !pd->negated);
+}
+
+static int sun50i_h6_ppu_pd_power_off(struct generic_pm_domain *genpd)
+{
+ const struct sun50i_h6_ppu_pd *pd = to_sun50i_h6_ppu_pd(genpd);
+
+ return sun50i_h6_ppu_pd_set_power(pd, pd->negated);
+}
+
+static int sun50i_h6_ppu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct genpd_onecell_data *ppu;
+ struct sun50i_h6_ppu_pd *pds;
+ const struct sun50i_h6_ppu_data *data;
+ void __iomem *base;
+ int ret, i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ pds = devm_kcalloc(dev, data->nr_domains, sizeof(*pds), GFP_KERNEL);
+ if (!pds)
+ return -ENOMEM;
+
+ ppu = devm_kzalloc(dev, sizeof(*ppu), GFP_KERNEL);
+ if (!ppu)
+ return -ENOMEM;
+
+ ppu->num_domains = data->nr_domains;
+ ppu->domains = devm_kcalloc(dev, data->nr_domains,
+ sizeof(*ppu->domains), GFP_KERNEL);
+ if (!ppu->domains)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ppu);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ for (i = 0; i < data->nr_domains; i++) {
+ struct sun50i_h6_ppu_pd *pd = &pds[i];
+ const struct sun50i_h6_ppu_desc *desc = &data->descs[i];
+
+ pd->genpd.name = desc->name;
+ pd->genpd.power_off = sun50i_h6_ppu_pd_power_off;
+ pd->genpd.power_on = sun50i_h6_ppu_pd_power_on;
+ if (desc->flags & FLAG_PPU_ALWAYS_ON)
+ pd->genpd.flags = GENPD_FLAG_ALWAYS_ON;
+ pd->negated = !!(desc->flags & FLAG_PPU_NEGATED);
+ pd->reg = base + desc->offset - PD_H6_PPU_OFFSET;
+ pd->gate_mask = desc->mask;
+
+ ret = pm_genpd_init(&pd->genpd, NULL,
+ !sun50i_h6_ppu_power_status(pd));
+ if (ret) {
+ dev_warn(dev, "Failed to add %s power domain: %d\n",
+ desc->name, ret);
+ goto out_remove_pds;
+ }
+ ppu->domains[i] = &pd->genpd;
+ }
+
+ ret = of_genpd_add_provider_onecell(dev->of_node, ppu);
+ if (!ret)
+ return 0;
+
+ dev_warn(dev, "Failed to add provider: %d\n", ret);
+out_remove_pds:
+ for (i--; i >= 0; i--)
+ pm_genpd_remove(&pds[i].genpd);
+
+ return ret;
+}
+
+static const struct of_device_id sun50i_h6_ppu_of_match[] = {
+ { .compatible = "allwinner,sun50i-h6-prcm-ppu",
+ .data = &sun50i_h6_ppu_data },
+ { .compatible = "allwinner,sun50i-h616-prcm-ppu",
+ .data = &sun50i_h616_ppu_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun50i_h6_ppu_of_match);
+
+static struct platform_driver sun50i_h6_ppu_driver = {
+ .probe = sun50i_h6_ppu_probe,
+ .driver = {
+ .name = "sun50i-h6-prcm-ppu",
+ .of_match_table = sun50i_h6_ppu_of_match,
+ /* Power domains cannot be removed while they are in use. */
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(sun50i_h6_ppu_driver);
+
+MODULE_AUTHOR("Andre Przywara <andre.przywara@arm.com>");
+MODULE_DESCRIPTION("Allwinner H6 PRCM power domain driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pmdomain/ti/omap_prm.c b/drivers/pmdomain/ti/omap_prm.c
index 79d165331d8c..5142f064bf5c 100644
--- a/drivers/pmdomain/ti/omap_prm.c
+++ b/drivers/pmdomain/ti/omap_prm.c
@@ -18,7 +18,9 @@
#include <linux/pm_domain.h>
#include <linux/reset-controller.h>
#include <linux/delay.h>
-
+#if IS_ENABLED(CONFIG_SUSPEND)
+#include <linux/suspend.h>
+#endif
#include <linux/platform_data/ti-prm.h>
enum omap_prm_domain_mode {
@@ -88,6 +90,7 @@ struct omap_reset_data {
#define OMAP_PRM_HAS_RSTST BIT(1)
#define OMAP_PRM_HAS_NO_CLKDM BIT(2)
#define OMAP_PRM_RET_WHEN_IDLE BIT(3)
+#define OMAP_PRM_ON_WHEN_STANDBY BIT(4)
#define OMAP_PRM_HAS_RESETS (OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_RSTST)
@@ -404,7 +407,8 @@ static const struct omap_prm_data am3_prm_data[] = {
.name = "per", .base = 0x44e00c00,
.pwrstctrl = 0xc, .pwrstst = 0x8, .dmap = &omap_prm_noinact,
.rstctrl = 0x0, .rstmap = am3_per_rst_map,
- .flags = OMAP_PRM_HAS_RSTCTRL, .clkdm_name = "pruss_ocp"
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_ON_WHEN_STANDBY,
+ .clkdm_name = "pruss_ocp",
},
{
.name = "wkup", .base = 0x44e00d00,
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 6085a1471de2..6e1d4bfd28ac 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -290,7 +290,7 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
#ifdef CONFIG_AMD_NB
-#include <asm/amd_nb.h>
+#include <asm/amd/nb.h>
static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
{
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 60bf0ca64cf3..e71f0af4e378 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -216,6 +216,19 @@ config POWER_RESET_ST
help
Reset support for STMicroelectronics boards.
+config POWER_RESET_TORADEX_EC
+ tristate "Toradex Embedded Controller power-off and reset driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver supports power-off and reset for SMARC Toradex SoMs,
+ for example the SMARC iMX8MP and SMARC iMX95, using Toradex
+ Embedded Controller (EC).
+
+ Say Y here if you have a Toradex SMARC SoM.
+
+ If unsure, say N.
+
config POWER_RESET_TPS65086
bool "TPS65086 restart driver"
depends on MFD_TPS65086
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 10782d32e1da..1b9b63a1a873 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
obj-$(CONFIG_POWER_RESET_REGULATOR) += regulator-poweroff.o
obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
obj-$(CONFIG_POWER_RESET_ST) += st-poweroff.o
+obj-$(CONFIG_POWER_RESET_TORADEX_EC) += tdx-ec-poweroff.o
obj-$(CONFIG_POWER_RESET_TPS65086) += tps65086-restart.o
obj-$(CONFIG_POWER_RESET_VERSATILE) += arm-versatile-reboot.o
obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 036b18a1f90f..511f5a8f8961 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -129,12 +129,11 @@ static int at91_reset(struct notifier_block *this, unsigned long mode,
" str %4, [%0, %6]\n\t"
/* Disable SDRAM1 accesses */
"1: tst %1, #0\n\t"
- " beq 2f\n\t"
" strne %3, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t"
/* Power down SDRAM1 */
" strne %4, [%1, %6]\n\t"
/* Reset CPU */
- "2: str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t"
+ " str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t"
" b .\n\t"
:
@@ -145,7 +144,7 @@ static int at91_reset(struct notifier_block *this, unsigned long mode,
"r" cpu_to_le32(AT91_DDRSDRC_LPCB_POWER_DOWN),
"r" (reset->data->reset_args),
"r" (reset->ramc_lpr)
- : "r4");
+ );
return NOTIFY_DONE;
}
diff --git a/drivers/power/reset/reboot-mode.c b/drivers/power/reset/reboot-mode.c
index b4076b10b893..fba53f638da0 100644
--- a/drivers/power/reset/reboot-mode.c
+++ b/drivers/power/reset/reboot-mode.c
@@ -23,20 +23,29 @@ static unsigned int get_reboot_mode_magic(struct reboot_mode_driver *reboot,
const char *cmd)
{
const char *normal = "normal";
- int magic = 0;
struct mode_info *info;
+ char cmd_[110];
if (!cmd)
cmd = normal;
- list_for_each_entry(info, &reboot->head, list) {
- if (!strcmp(info->mode, cmd)) {
- magic = info->magic;
- break;
- }
- }
+ list_for_each_entry(info, &reboot->head, list)
+ if (!strcmp(info->mode, cmd))
+ return info->magic;
+
+ /* try to match again, replacing characters impossible in DT */
+ if (strscpy(cmd_, cmd, sizeof(cmd_)) == -E2BIG)
+ return 0;
- return magic;
+ strreplace(cmd_, ' ', '-');
+ strreplace(cmd_, ',', '-');
+ strreplace(cmd_, '/', '-');
+
+ list_for_each_entry(info, &reboot->head, list)
+ if (!strcmp(info->mode, cmd_))
+ return info->magic;
+
+ return 0;
}
static int reboot_mode_notify(struct notifier_block *this,
diff --git a/drivers/power/reset/syscon-reboot.c b/drivers/power/reset/syscon-reboot.c
index d623d77e657e..2e2cf5f62d73 100644
--- a/drivers/power/reset/syscon-reboot.c
+++ b/drivers/power/reset/syscon-reboot.c
@@ -14,11 +14,24 @@
#include <linux/reboot.h>
#include <linux/regmap.h>
-struct syscon_reboot_context {
- struct regmap *map;
+struct reboot_mode_bits {
u32 offset;
- u32 value;
u32 mask;
+ u32 value;
+ bool valid;
+};
+
+struct reboot_data {
+ struct reboot_mode_bits mode_bits[REBOOT_SOFT + 1];
+ struct reboot_mode_bits catchall;
+};
+
+struct syscon_reboot_context {
+ struct regmap *map;
+
+ const struct reboot_data *rd; /* from of match data, if any */
+ struct reboot_mode_bits catchall; /* from DT */
+
struct notifier_block restart_handler;
};
@@ -28,9 +41,21 @@ static int syscon_restart_handle(struct notifier_block *this,
struct syscon_reboot_context *ctx =
container_of(this, struct syscon_reboot_context,
restart_handler);
+ const struct reboot_mode_bits *mode_bits;
+
+ if (ctx->rd) {
+ if (mode < ARRAY_SIZE(ctx->rd->mode_bits) &&
+ ctx->rd->mode_bits[mode].valid)
+ mode_bits = &ctx->rd->mode_bits[mode];
+ else
+ mode_bits = &ctx->rd->catchall;
+ } else {
+ mode_bits = &ctx->catchall;
+ }
/* Issue the reboot */
- regmap_update_bits(ctx->map, ctx->offset, ctx->mask, ctx->value);
+ regmap_update_bits(ctx->map, mode_bits->offset, mode_bits->mask,
+ mode_bits->value);
mdelay(1000);
@@ -42,7 +67,6 @@ static int syscon_reboot_probe(struct platform_device *pdev)
{
struct syscon_reboot_context *ctx;
struct device *dev = &pdev->dev;
- int mask_err, value_err;
int priority;
int err;
@@ -60,24 +84,33 @@ static int syscon_reboot_probe(struct platform_device *pdev)
if (of_property_read_s32(pdev->dev.of_node, "priority", &priority))
priority = 192;
- if (of_property_read_u32(pdev->dev.of_node, "offset", &ctx->offset))
- if (of_property_read_u32(pdev->dev.of_node, "reg", &ctx->offset))
- return -EINVAL;
+ ctx->rd = of_device_get_match_data(dev);
+ if (!ctx->rd) {
+ int mask_err, value_err;
- value_err = of_property_read_u32(pdev->dev.of_node, "value", &ctx->value);
- mask_err = of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask);
- if (value_err && mask_err) {
- dev_err(dev, "unable to read 'value' and 'mask'");
- return -EINVAL;
- }
+ if (of_property_read_u32(pdev->dev.of_node, "offset",
+ &ctx->catchall.offset) &&
+ of_property_read_u32(pdev->dev.of_node, "reg",
+ &ctx->catchall.offset))
+ return -EINVAL;
- if (value_err) {
- /* support old binding */
- ctx->value = ctx->mask;
- ctx->mask = 0xFFFFFFFF;
- } else if (mask_err) {
- /* support value without mask*/
- ctx->mask = 0xFFFFFFFF;
+ value_err = of_property_read_u32(pdev->dev.of_node, "value",
+ &ctx->catchall.value);
+ mask_err = of_property_read_u32(pdev->dev.of_node, "mask",
+ &ctx->catchall.mask);
+ if (value_err && mask_err) {
+ dev_err(dev, "unable to read 'value' and 'mask'");
+ return -EINVAL;
+ }
+
+ if (value_err) {
+ /* support old binding */
+ ctx->catchall.value = ctx->catchall.mask;
+ ctx->catchall.mask = 0xFFFFFFFF;
+ } else if (mask_err) {
+ /* support value without mask */
+ ctx->catchall.mask = 0xFFFFFFFF;
+ }
}
ctx->restart_handler.notifier_call = syscon_restart_handle;
@@ -89,7 +122,30 @@ static int syscon_reboot_probe(struct platform_device *pdev)
return err;
}
+static const struct reboot_data gs101_reboot_data = {
+ .mode_bits = {
+ [REBOOT_WARM] = {
+ .offset = 0x3a00, /* SYSTEM_CONFIGURATION */
+ .mask = 0x00000002, /* SWRESET_SYSTEM */
+ .value = 0x00000002,
+ .valid = true,
+ },
+ [REBOOT_SOFT] = {
+ .offset = 0x3a00, /* SYSTEM_CONFIGURATION */
+ .mask = 0x00000002, /* SWRESET_SYSTEM */
+ .value = 0x00000002,
+ .valid = true,
+ },
+ },
+ .catchall = {
+ .offset = 0x3e9c, /* PAD_CTRL_PWR_HOLD */
+ .mask = 0x00000100,
+ .value = 0x00000000,
+ },
+};
+
static const struct of_device_id syscon_reboot_of_match[] = {
+ { .compatible = "google,gs101-reboot", .data = &gs101_reboot_data },
{ .compatible = "syscon-reboot" },
{}
};
diff --git a/drivers/power/reset/tdx-ec-poweroff.c b/drivers/power/reset/tdx-ec-poweroff.c
new file mode 100644
index 000000000000..3302a127fce5
--- /dev/null
+++ b/drivers/power/reset/tdx-ec-poweroff.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Toradex Embedded Controller driver
+ *
+ * Copyright (C) 2025 Toradex
+ *
+ * Author: Emanuele Ghidoli <emanuele.ghidoli@toradex.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#define EC_CHIP_ID_REG 0x00
+#define EC_CHIP_ID_SMARC_IMX95 0x11
+#define EC_CHIP_ID_SMARC_IMX8MP 0x12
+
+#define EC_VERSION_REG_MAJOR 0x01
+#define EC_VERSION_REG_MINOR 0x02
+#define EC_ID_VERSION_LEN 3
+
+#define EC_CMD_REG 0xD0
+#define EC_CMD_POWEROFF 0x01
+#define EC_CMD_RESET 0x02
+
+#define EC_REG_MAX 0xD0
+
+static const struct regmap_range volatile_ranges[] = {
+ regmap_reg_range(EC_CMD_REG, EC_CMD_REG),
+};
+
+static const struct regmap_access_table volatile_table = {
+ .yes_ranges = volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(volatile_ranges),
+};
+
+static const struct regmap_range read_ranges[] = {
+ regmap_reg_range(EC_CHIP_ID_REG, EC_VERSION_REG_MINOR),
+};
+
+static const struct regmap_access_table read_table = {
+ .yes_ranges = read_ranges,
+ .n_yes_ranges = ARRAY_SIZE(read_ranges),
+};
+
+static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = EC_REG_MAX,
+ .cache_type = REGCACHE_RBTREE,
+ .rd_table = &read_table,
+ .volatile_table = &volatile_table,
+};
+
+static int tdx_ec_cmd(struct regmap *regmap, u8 cmd)
+{
+ int err = regmap_write(regmap, EC_CMD_REG, cmd);
+
+ if (err)
+ dev_err(regmap_get_device(regmap), "Failed to send command 0x%02X: %d\n", cmd, err);
+
+ return err;
+}
+
+static int tdx_ec_power_off(struct sys_off_data *data)
+{
+ struct regmap *regmap = data->cb_data;
+ int err;
+
+ err = tdx_ec_cmd(regmap, EC_CMD_POWEROFF);
+
+ return err ? NOTIFY_BAD : NOTIFY_DONE;
+}
+
+static int tdx_ec_restart(struct sys_off_data *data)
+{
+ struct regmap *regmap = data->cb_data;
+ int err;
+
+ err = tdx_ec_cmd(regmap, EC_CMD_RESET);
+
+ return err ? NOTIFY_BAD : NOTIFY_DONE;
+}
+
+static int tdx_ec_register_power_off_restart(struct device *dev, struct regmap *regmap)
+{
+ int err;
+
+ err = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART,
+ SYS_OFF_PRIO_FIRMWARE,
+ tdx_ec_restart, regmap);
+ if (err)
+ return err;
+
+ return devm_register_sys_off_handler(dev, SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_FIRMWARE,
+ tdx_ec_power_off, regmap);
+}
+
+static int tdx_ec_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ u8 reg_val[EC_ID_VERSION_LEN];
+ struct regmap *regmap;
+ int err;
+
+ regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ err = regmap_bulk_read(regmap, EC_CHIP_ID_REG, &reg_val, EC_ID_VERSION_LEN);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Cannot read id and version registers\n");
+
+ dev_info(dev, "Toradex Embedded Controller id %x - Firmware %u.%u\n",
+ reg_val[0], reg_val[1], reg_val[2]);
+
+ err = tdx_ec_register_power_off_restart(dev, regmap);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Cannot register system restart handler\n");
+
+ return 0;
+}
+
+static const struct of_device_id __maybe_unused of_tdx_ec_match[] = {
+ { .compatible = "toradex,smarc-ec" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_tdx_ec_match);
+
+static struct i2c_driver tdx_ec_driver = {
+ .probe = tdx_ec_probe,
+ .driver = {
+ .name = "toradex-smarc-ec",
+ .of_match_table = of_tdx_ec_match,
+ },
+};
+module_i2c_driver(tdx_ec_driver);
+
+MODULE_AUTHOR("Emanuele Ghidoli <emanuele.ghidoli@toradex.com>");
+MODULE_DESCRIPTION("Toradex SMARC Embedded Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 8dbd39afa43c..79ddb006e2da 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -107,6 +107,18 @@ config BATTERY_ACT8945A
Say Y here to enable support for power supply provided by
Active-semi ActivePath ACT8945A charger.
+config BATTERY_CHAGALL
+ tristate "Pegatron Chagall battery driver"
+ depends on I2C
+ depends on LEDS_CLASS
+ help
+ Say Y to include support for Cypress CG7153AM IC based battery
+ fuel gauge with custom firmware found in Pegatron Chagall based
+ tablet line.
+
+ This driver can also be built as a module. If so, the module will be
+ called chagall-battery.
+
config BATTERY_CPCAP
tristate "Motorola CPCAP PMIC battery driver"
depends on MFD_CPCAP && IIO
@@ -161,6 +173,16 @@ config BATTERY_DS2782
Say Y here to enable support for the DS2782/DS2786 standalone battery
gas-gauge.
+config BATTERY_HUAWEI_GAOKUN
+ tristate "Huawei Matebook E Go power supply"
+ depends on EC_HUAWEI_GAOKUN
+ help
+ This driver enables battery and adapter support on the Huawei Matebook
+ E Go, which is a sc8280xp-based 2-in-1 tablet.
+
+ To compile the driver as a module, choose M here: the module will be
+ called huawei-gaokun-battery.
+
config BATTERY_LEGO_EV3
tristate "LEGO MINDSTORMS EV3 battery"
depends on OF && IIO && GPIOLIB && (ARCH_DAVINCI_DA850 || COMPILE_TEST)
@@ -595,6 +617,21 @@ config CHARGER_MAX77976
This driver can also be built as a module. If so, the module will be
called max77976_charger.
+config CHARGER_MAX8971
+ tristate "Maxim MAX8971 battery charger driver"
+ depends on I2C
+ depends on EXTCON || !EXTCON
+ select REGMAP_I2C
+ help
+ The MAX8971 is a compact, high-frequency, high-efficiency switch-mode
+ charger for a one-cell lithium-ion (Li+) battery. It delivers up to
+ 1.55A of current to the battery from inputs up to 7.5V and withstands
+ transient inputs up to 22V.
+
+ Say Y to enable support for the Maxim MAX8971 battery charger.
+ This driver can also be built as a module. If so, the module will be
+ called max8971_charger.
+
config CHARGER_MAX8997
tristate "Maxim MAX8997/MAX8966 PMIC battery charger driver"
depends on MFD_MAX8997 && REGULATOR_MAX8997
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 61677be328b0..4f5f8e3507f8 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_CHARGER_ADP5061) += adp5061.o
obj-$(CONFIG_BATTERY_ACT8945A) += act8945a_charger.o
obj-$(CONFIG_BATTERY_AXP20X) += axp20x_battery.o
obj-$(CONFIG_CHARGER_AXP20X) += axp20x_ac_power.o
+obj-$(CONFIG_BATTERY_CHAGALL) += chagall-battery.o
obj-$(CONFIG_BATTERY_CPCAP) += cpcap-battery.o
obj-$(CONFIG_BATTERY_CW2015) += cw2015_battery.o
obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_BATTERY_DS2781) += ds2781_battery.o
obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o
obj-$(CONFIG_BATTERY_GAUGE_LTC2941) += ltc2941-battery-gauge.o
obj-$(CONFIG_BATTERY_GOLDFISH) += goldfish_battery.o
+obj-$(CONFIG_BATTERY_HUAWEI_GAOKUN) += huawei-gaokun-battery.o
obj-$(CONFIG_BATTERY_LEGO_EV3) += lego_ev3_battery.o
obj-$(CONFIG_BATTERY_LENOVO_YOGA_C630) += lenovo_yoga_c630_battery.o
obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
@@ -81,6 +83,7 @@ obj-$(CONFIG_CHARGER_MAX77650) += max77650-charger.o
obj-$(CONFIG_CHARGER_MAX77693) += max77693_charger.o
obj-$(CONFIG_CHARGER_MAX77705) += max77705_charger.o
obj-$(CONFIG_CHARGER_MAX77976) += max77976_charger.o
+obj-$(CONFIG_CHARGER_MAX8971) += max8971_charger.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
obj-$(CONFIG_CHARGER_MP2629) += mp2629_charger.o
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index f0d97ab45bd8..1867beadd7af 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -207,6 +207,7 @@ enum bq24190_chip {
BQ24190,
BQ24192,
BQ24192i,
+ BQ24193,
BQ24196,
BQ24296,
BQ24297,
@@ -2021,6 +2022,17 @@ static const struct bq24190_chip_info bq24190_chip_info_tbl[] = {
.get_ntc_status = bq24190_charger_get_ntc_status,
.set_otg_vbus = bq24190_set_otg_vbus,
},
+ [BQ24193] = {
+ .ichg_array_size = ARRAY_SIZE(bq24190_ccc_ichg_values),
+#ifdef CONFIG_REGULATOR
+ .vbus_desc = &bq24190_vbus_desc,
+#endif
+ .check_chip = bq24190_check_chip,
+ .set_chg_config = bq24190_battery_set_chg_config,
+ .ntc_fault_mask = BQ24190_REG_F_NTC_FAULT_MASK,
+ .get_ntc_status = bq24190_charger_get_ntc_status,
+ .set_otg_vbus = bq24190_set_otg_vbus,
+ },
[BQ24196] = {
.ichg_array_size = ARRAY_SIZE(bq24190_ccc_ichg_values),
#ifdef CONFIG_REGULATOR
@@ -2308,6 +2320,7 @@ static const struct i2c_device_id bq24190_i2c_ids[] = {
{ "bq24190", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24190] },
{ "bq24192", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24192] },
{ "bq24192i", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24192i] },
+ { "bq24193", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24193] },
{ "bq24196", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24196] },
{ "bq24296", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24296] },
{ "bq24297", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24297] },
@@ -2319,6 +2332,7 @@ static const struct of_device_id bq24190_of_match[] = {
{ .compatible = "ti,bq24190", .data = &bq24190_chip_info_tbl[BQ24190] },
{ .compatible = "ti,bq24192", .data = &bq24190_chip_info_tbl[BQ24192] },
{ .compatible = "ti,bq24192i", .data = &bq24190_chip_info_tbl[BQ24192i] },
+ { .compatible = "ti,bq24193", .data = &bq24190_chip_info_tbl[BQ24193] },
{ .compatible = "ti,bq24196", .data = &bq24190_chip_info_tbl[BQ24196] },
{ .compatible = "ti,bq24296", .data = &bq24190_chip_info_tbl[BQ24296] },
{ .compatible = "ti,bq24297", .data = &bq24190_chip_info_tbl[BQ24297] },
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 2f31d750a4c1..93dcebbe1141 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -2131,7 +2131,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
mutex_unlock(&di->lock);
if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0)
- return -ENODEV;
+ return di->cache.flags;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index ba0d22d90429..868e95f0887e 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -6,6 +6,7 @@
* Andrew F. Davis <afd@ti.com>
*/
+#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -31,6 +32,7 @@ static int bq27xxx_battery_i2c_read(struct bq27xxx_device_info *di, u8 reg,
struct i2c_msg msg[2];
u8 data[2];
int ret;
+ int retry = 0;
if (!client->adapter)
return -ENODEV;
@@ -47,7 +49,16 @@ static int bq27xxx_battery_i2c_read(struct bq27xxx_device_info *di, u8 reg,
else
msg[1].len = 2;
- ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ do {
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret == -EBUSY && ++retry < 3) {
+ /* sleep 10 milliseconds when busy */
+ usleep_range(10000, 11000);
+ continue;
+ }
+ break;
+ } while (1);
+
if (ret < 0)
return ret;
diff --git a/drivers/power/supply/chagall-battery.c b/drivers/power/supply/chagall-battery.c
new file mode 100644
index 000000000000..8b05422aca6f
--- /dev/null
+++ b/drivers/power/supply/chagall-battery.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/array_size.h>
+#include <linux/delay.h>
+#include <linux/devm-helpers.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+
+#define CHAGALL_REG_LED_AMBER 0x60
+#define CHAGALL_REG_LED_WHITE 0x70
+#define CHAGALL_REG_BATTERY_TEMPERATURE 0xa2
+#define CHAGALL_REG_BATTERY_VOLTAGE 0xa4
+#define CHAGALL_REG_BATTERY_CURRENT 0xa6
+#define CHAGALL_REG_BATTERY_CAPACITY 0xa8
+#define CHAGALL_REG_BATTERY_CHARGING_CURRENT 0xaa
+#define CHAGALL_REG_BATTERY_CHARGING_VOLTAGE 0xac
+#define CHAGALL_REG_BATTERY_STATUS 0xae
+#define BATTERY_DISCHARGING BIT(6)
+#define BATTERY_FULL_CHARGED BIT(5)
+#define BATTERY_FULL_DISCHARGED BIT(4)
+#define CHAGALL_REG_BATTERY_REMAIN_CAPACITY 0xb0
+#define CHAGALL_REG_BATTERY_FULL_CAPACITY 0xb2
+#define CHAGALL_REG_MAX_COUNT 0xb4
+
+#define CHAGALL_BATTERY_DATA_REFRESH 5000
+#define TEMP_CELSIUS_OFFSET 2731
+
+static const struct regmap_config chagall_battery_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = CHAGALL_REG_MAX_COUNT,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+struct chagall_battery_data {
+ struct regmap *regmap;
+ struct led_classdev amber_led;
+ struct led_classdev white_led;
+ struct power_supply *battery;
+ struct delayed_work poll_work;
+ u16 last_state;
+};
+
+static void chagall_led_set_brightness_amber(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct chagall_battery_data *cg =
+ container_of(led, struct chagall_battery_data, amber_led);
+
+ regmap_write(cg->regmap, CHAGALL_REG_LED_AMBER, brightness);
+}
+
+static void chagall_led_set_brightness_white(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct chagall_battery_data *cg =
+ container_of(led, struct chagall_battery_data, white_led);
+
+ regmap_write(cg->regmap, CHAGALL_REG_LED_WHITE, brightness);
+}
+
+static const enum power_supply_property chagall_battery_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+};
+
+static const unsigned int chagall_battery_prop_offs[] = {
+ [POWER_SUPPLY_PROP_STATUS] = CHAGALL_REG_BATTERY_STATUS,
+ [POWER_SUPPLY_PROP_VOLTAGE_NOW] = CHAGALL_REG_BATTERY_VOLTAGE,
+ [POWER_SUPPLY_PROP_VOLTAGE_MAX] = CHAGALL_REG_BATTERY_CHARGING_VOLTAGE,
+ [POWER_SUPPLY_PROP_CURRENT_NOW] = CHAGALL_REG_BATTERY_CURRENT,
+ [POWER_SUPPLY_PROP_CURRENT_MAX] = CHAGALL_REG_BATTERY_CHARGING_CURRENT,
+ [POWER_SUPPLY_PROP_CAPACITY] = CHAGALL_REG_BATTERY_CAPACITY,
+ [POWER_SUPPLY_PROP_TEMP] = CHAGALL_REG_BATTERY_TEMPERATURE,
+ [POWER_SUPPLY_PROP_CHARGE_FULL] = CHAGALL_REG_BATTERY_FULL_CAPACITY,
+ [POWER_SUPPLY_PROP_CHARGE_NOW] = CHAGALL_REG_BATTERY_REMAIN_CAPACITY,
+};
+
+static int chagall_battery_get_value(struct chagall_battery_data *cg,
+ enum power_supply_property psp, u32 *val)
+{
+ if (psp >= ARRAY_SIZE(chagall_battery_prop_offs))
+ return -EINVAL;
+ if (!chagall_battery_prop_offs[psp])
+ return -EINVAL;
+
+ /* Battery data is stored in 2 consecutive registers with little-endian */
+ return regmap_bulk_read(cg->regmap, chagall_battery_prop_offs[psp], val, 2);
+}
+
+static int chagall_battery_get_status(u32 status_reg)
+{
+ if (status_reg & BATTERY_FULL_CHARGED)
+ return POWER_SUPPLY_STATUS_FULL;
+ else if (status_reg & BATTERY_DISCHARGING)
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ return POWER_SUPPLY_STATUS_CHARGING;
+}
+
+static int chagall_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct chagall_battery_data *cg = power_supply_get_drvdata(psy);
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+
+ default:
+ ret = chagall_battery_get_value(cg, psp, &val->intval);
+ if (ret)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval -= TEMP_CELSIUS_OFFSET;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval *= 1000;
+ break;
+
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = chagall_battery_get_status(val->intval);
+ break;
+
+ default:
+ break;
+ }
+
+ break;
+ }
+
+ return 0;
+}
+
+static void chagall_battery_poll_work(struct work_struct *work)
+{
+ struct chagall_battery_data *cg =
+ container_of(work, struct chagall_battery_data, poll_work.work);
+ u32 state;
+ int ret;
+
+ ret = chagall_battery_get_value(cg, POWER_SUPPLY_PROP_STATUS, &state);
+ if (ret)
+ return;
+
+ state = chagall_battery_get_status(state);
+
+ if (cg->last_state != state) {
+ cg->last_state = state;
+ power_supply_changed(cg->battery);
+ }
+
+ /* continuously send uevent notification */
+ schedule_delayed_work(&cg->poll_work,
+ msecs_to_jiffies(CHAGALL_BATTERY_DATA_REFRESH));
+}
+
+static const struct power_supply_desc chagall_battery_desc = {
+ .name = "chagall-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = chagall_battery_properties,
+ .num_properties = ARRAY_SIZE(chagall_battery_properties),
+ .get_property = chagall_battery_get_property,
+ .external_power_changed = power_supply_changed,
+};
+
+static int chagall_battery_probe(struct i2c_client *client)
+{
+ struct chagall_battery_data *cg;
+ struct device *dev = &client->dev;
+ struct power_supply_config cfg = { };
+ int ret;
+
+ cg = devm_kzalloc(dev, sizeof(*cg), GFP_KERNEL);
+ if (!cg)
+ return -ENOMEM;
+
+ cfg.drv_data = cg;
+ cfg.fwnode = dev_fwnode(dev);
+
+ i2c_set_clientdata(client, cg);
+
+ cg->regmap = devm_regmap_init_i2c(client, &chagall_battery_regmap_config);
+ if (IS_ERR(cg->regmap))
+ return dev_err_probe(dev, PTR_ERR(cg->regmap), "cannot allocate regmap\n");
+
+ cg->last_state = POWER_SUPPLY_STATUS_UNKNOWN;
+ cg->battery = devm_power_supply_register(dev, &chagall_battery_desc, &cfg);
+ if (IS_ERR(cg->battery))
+ return dev_err_probe(dev, PTR_ERR(cg->battery),
+ "failed to register power supply\n");
+
+ cg->amber_led.name = "power::amber";
+ cg->amber_led.max_brightness = 1;
+ cg->amber_led.flags = LED_CORE_SUSPENDRESUME;
+ cg->amber_led.brightness_set = chagall_led_set_brightness_amber;
+ cg->amber_led.default_trigger = "chagall-battery-charging";
+
+ ret = devm_led_classdev_register(dev, &cg->amber_led);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register amber LED\n");
+
+ cg->white_led.name = "power::white";
+ cg->white_led.max_brightness = 1;
+ cg->white_led.flags = LED_CORE_SUSPENDRESUME;
+ cg->white_led.brightness_set = chagall_led_set_brightness_white;
+ cg->white_led.default_trigger = "chagall-battery-full";
+
+ ret = devm_led_classdev_register(dev, &cg->white_led);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register white LED\n");
+
+ led_set_brightness(&cg->amber_led, LED_OFF);
+ led_set_brightness(&cg->white_led, LED_OFF);
+
+ ret = devm_delayed_work_autocancel(dev, &cg->poll_work, chagall_battery_poll_work);
+ if (ret)
+ return ret;
+
+ schedule_delayed_work(&cg->poll_work, msecs_to_jiffies(CHAGALL_BATTERY_DATA_REFRESH));
+
+ return 0;
+}
+
+static int __maybe_unused chagall_battery_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct chagall_battery_data *cg = i2c_get_clientdata(client);
+
+ cancel_delayed_work_sync(&cg->poll_work);
+
+ return 0;
+}
+
+static int __maybe_unused chagall_battery_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct chagall_battery_data *cg = i2c_get_clientdata(client);
+
+ schedule_delayed_work(&cg->poll_work, msecs_to_jiffies(CHAGALL_BATTERY_DATA_REFRESH));
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(chagall_battery_pm_ops,
+ chagall_battery_suspend, chagall_battery_resume);
+
+static const struct of_device_id chagall_of_match[] = {
+ { .compatible = "pegatron,chagall-ec" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, chagall_of_match);
+
+static struct i2c_driver chagall_battery_driver = {
+ .driver = {
+ .name = "chagall-battery",
+ .pm = &chagall_battery_pm_ops,
+ .of_match_table = chagall_of_match,
+ },
+ .probe = chagall_battery_probe,
+};
+module_i2c_driver(chagall_battery_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Pegatron Chagall fuel gauge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/collie_battery.c b/drivers/power/supply/collie_battery.c
index 68390bd1004f..3daf7befc0bf 100644
--- a/drivers/power/supply/collie_battery.c
+++ b/drivers/power/supply/collie_battery.c
@@ -440,6 +440,7 @@ err_put_gpio_full:
static void collie_bat_remove(struct ucb1x00_dev *dev)
{
+ device_init_wakeup(&ucb->dev, 0);
free_irq(gpiod_to_irq(collie_bat_main.gpio_full), &collie_bat_main);
power_supply_unregister(collie_bat_bu.psy);
power_supply_unregister(collie_bat_main.psy);
diff --git a/drivers/power/supply/cros_charge-control.c b/drivers/power/supply/cros_charge-control.c
index 02d5bdbe2e8d..53e6a77e03fc 100644
--- a/drivers/power/supply/cros_charge-control.c
+++ b/drivers/power/supply/cros_charge-control.c
@@ -47,29 +47,20 @@ struct cros_chctl_priv {
static int cros_chctl_send_charge_control_cmd(struct cros_ec_device *cros_ec,
u8 cmd_version, struct ec_params_charge_control *req)
{
+ int ret;
static const u8 outsizes[] = {
[1] = offsetof(struct ec_params_charge_control, cmd),
[2] = sizeof(struct ec_params_charge_control),
[3] = sizeof(struct ec_params_charge_control),
};
- struct {
- struct cros_ec_command msg;
- union {
- struct ec_params_charge_control req;
- struct ec_response_charge_control resp;
- } __packed data;
- } __packed buf = {
- .msg = {
- .command = EC_CMD_CHARGE_CONTROL,
- .version = cmd_version,
- .insize = 0,
- .outsize = outsizes[cmd_version],
- },
- .data.req = *req,
- };
+ ret = cros_ec_cmd(cros_ec, cmd_version, EC_CMD_CHARGE_CONTROL, req,
+ outsizes[cmd_version], NULL, 0);
+
+ if (ret < 0)
+ return ret;
- return cros_ec_cmd_xfer_status(cros_ec, &buf.msg);
+ return 0;
}
static int cros_chctl_configure_ec(struct cros_chctl_priv *priv)
diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c
index 1dfd5b0cb30d..1b2da9b5fb65 100644
--- a/drivers/power/supply/gpio-charger.c
+++ b/drivers/power/supply/gpio-charger.c
@@ -366,7 +366,9 @@ static int gpio_charger_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gpio_charger);
- device_init_wakeup(dev, 1);
+ ret = devm_device_init_wakeup(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wakeup\n");
return 0;
}
diff --git a/drivers/power/supply/huawei-gaokun-battery.c b/drivers/power/supply/huawei-gaokun-battery.c
new file mode 100644
index 000000000000..e4dfec3b4241
--- /dev/null
+++ b/drivers/power/supply/huawei-gaokun-battery.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * huawei-gaokun-battery - A power supply driver for HUAWEI Matebook E Go
+ *
+ * Copyright (C) 2024 Pengyu Luo <mitltlatltl@gmail.com>
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_data/huawei-gaokun-ec.h>
+#include <linux/power_supply.h>
+#include <linux/sprintf.h>
+
+/* -------------------------------------------------------------------------- */
+/* String Data Reg */
+
+#define EC_BAT_VENDOR 0x01 /* from 0x01 to 0x0F, SUNWODA */
+#define EC_BAT_MODEL 0x11 /* from 0x11 to 0x1F, HB30A8P9ECW-22T */
+
+#define EC_ADP_STATUS 0x81
+#define EC_AC_STATUS BIT(0)
+#define EC_BAT_PRESENT BIT(1) /* BATC._STA */
+
+#define EC_BAT_STATUS 0x82 /* _BST */
+#define EC_BAT_DISCHARGING BIT(0)
+#define EC_BAT_CHARGING BIT(1)
+#define EC_BAT_CRITICAL BIT(2) /* Low Battery Level */
+#define EC_BAT_FULL BIT(3)
+
+/* -------------------------------------------------------------------------- */
+/* Word Data Reg */
+
+/* 0x5A: ?
+ * 0x5C: ?
+ * 0x5E: ?
+ * 0X60: ?
+ * 0x84: ?
+ */
+
+#define EC_BAT_STATUS_START 0x90
+#define EC_BAT_PERCENTAGE 0x90
+#define EC_BAT_VOLTAGE 0x92
+#define EC_BAT_CAPACITY 0x94
+#define EC_BAT_FULL_CAPACITY 0x96
+/* 0x98: ? */
+#define EC_BAT_CURRENT 0x9A
+/* 0x9C: ? */
+
+#define EC_BAT_INFO_START 0xA0
+/* 0xA0: POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT? */
+#define EC_BAT_DESIGN_CAPACITY 0xA2
+#define EC_BAT_DESIGN_VOLTAGE 0xA4
+#define EC_BAT_SERIAL_NUMBER 0xA6
+#define EC_BAT_CYCLE_COUNT 0xAA
+
+/* -------------------------------------------------------------------------- */
+/* Battery Event ID */
+
+#define EC_EVENT_BAT_A0 0xA0
+#define EC_EVENT_BAT_A1 0xA1
+#define EC_EVENT_BAT_A2 0xA2
+#define EC_EVENT_BAT_A3 0xA3
+#define EC_EVENT_BAT_B1 0xB1
+/* EVENT B1 A0 A1 repeat about every 1s 2s 3s respectively */
+
+/* ACPI _BIX field, Min sampling time, the duration between two _BST */
+#define CACHE_TIME 2000 /* cache time in milliseconds */
+
+#define MILLI_TO_MICRO 1000
+
+#define SMART_CHARGE_MODE 0
+#define SMART_CHARGE_DELAY 1
+#define SMART_CHARGE_START 2
+#define SMART_CHARGE_END 3
+
+#define NO_DELAY_MODE 1
+#define DELAY_MODE 4
+
+struct gaokun_psy_bat_status {
+ __le16 percentage_now; /* 0x90 */
+ __le16 voltage_now;
+ __le16 capacity_now;
+ __le16 full_capacity;
+ __le16 unknown1;
+ __le16 rate_now;
+ __le16 unknown2; /* 0x9C */
+} __packed;
+
+struct gaokun_psy_bat_info {
+ __le16 unknown3; /* 0xA0 */
+ __le16 design_capacity;
+ __le16 design_voltage;
+ __le16 serial_number;
+ __le16 padding2;
+ __le16 cycle_count; /* 0xAA */
+} __packed;
+
+struct gaokun_psy {
+ struct gaokun_ec *ec;
+ struct device *dev;
+ struct notifier_block nb;
+
+ struct power_supply *bat_psy;
+ struct power_supply *adp_psy;
+
+ unsigned long update_time;
+ struct gaokun_psy_bat_status status;
+ struct gaokun_psy_bat_info info;
+
+ char battery_model[0x10]; /* HB30A8P9ECW-22T, the real one is XXX-22A */
+ char battery_serial[0x10];
+ char battery_vendor[0x10];
+
+ int charge_now;
+ int online;
+ int bat_present;
+};
+
+/* -------------------------------------------------------------------------- */
+/* Adapter */
+
+static int gaokun_psy_get_adp_status(struct gaokun_psy *ecbat)
+{
+ /* _PSR */
+ int ret;
+ u8 online;
+
+ ret = gaokun_ec_psy_read_byte(ecbat->ec, EC_ADP_STATUS, &online);
+ if (ret)
+ return ret;
+
+ ecbat->online = !!(online & EC_AC_STATUS);
+
+ return 0;
+}
+
+static int gaokun_psy_get_adp_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct gaokun_psy *ecbat = power_supply_get_drvdata(psy);
+ int ret;
+
+ ret = gaokun_psy_get_adp_status(ecbat);
+ if (ret)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = ecbat->online;
+ break;
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ val->intval = POWER_SUPPLY_USB_TYPE_C;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum power_supply_property gaokun_psy_adp_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_USB_TYPE,
+};
+
+static const struct power_supply_desc gaokun_psy_adp_desc = {
+ .name = "gaokun-ec-adapter",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_C),
+ .get_property = gaokun_psy_get_adp_property,
+ .properties = gaokun_psy_adp_props,
+ .num_properties = ARRAY_SIZE(gaokun_psy_adp_props),
+};
+
+/* -------------------------------------------------------------------------- */
+/* Battery */
+
+static inline void gaokun_psy_get_bat_present(struct gaokun_psy *ecbat)
+{
+ int ret;
+ u8 present;
+
+ /* Some kind of initialization */
+ gaokun_ec_write(ecbat->ec, (u8 []){0x02, 0xB2, 1, 0x90});
+
+ ret = gaokun_ec_psy_read_byte(ecbat->ec, EC_ADP_STATUS, &present);
+
+ ecbat->bat_present = ret ? false : !!(present & EC_BAT_PRESENT);
+}
+
+static inline int gaokun_psy_bat_present(struct gaokun_psy *ecbat)
+{
+ return ecbat->bat_present;
+}
+
+static int gaokun_psy_get_bat_info(struct gaokun_psy *ecbat)
+{
+ /* _BIX */
+ if (!gaokun_psy_bat_present(ecbat))
+ return 0;
+
+ return gaokun_ec_psy_multi_read(ecbat->ec, EC_BAT_INFO_START,
+ sizeof(ecbat->info), (u8 *)&ecbat->info);
+}
+
+static void gaokun_psy_update_bat_charge(struct gaokun_psy *ecbat)
+{
+ u8 charge;
+
+ gaokun_ec_psy_read_byte(ecbat->ec, EC_BAT_STATUS, &charge);
+
+ switch (charge) {
+ case EC_BAT_CHARGING:
+ ecbat->charge_now = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case EC_BAT_DISCHARGING:
+ ecbat->charge_now = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case EC_BAT_FULL:
+ ecbat->charge_now = POWER_SUPPLY_STATUS_FULL;
+ break;
+ default:
+ dev_warn(ecbat->dev, "unknown charge state %d\n", charge);
+ }
+}
+
+static int gaokun_psy_get_bat_status(struct gaokun_psy *ecbat)
+{
+ /* _BST */
+ int ret;
+
+ if (time_before(jiffies, ecbat->update_time +
+ msecs_to_jiffies(CACHE_TIME)))
+ return 0;
+
+ gaokun_psy_update_bat_charge(ecbat);
+ ret = gaokun_ec_psy_multi_read(ecbat->ec, EC_BAT_STATUS_START,
+ sizeof(ecbat->status), (u8 *)&ecbat->status);
+
+ ecbat->update_time = jiffies;
+
+ return ret;
+}
+
+static void gaokun_psy_init(struct gaokun_psy *ecbat)
+{
+ gaokun_psy_get_bat_present(ecbat);
+ if (!gaokun_psy_bat_present(ecbat))
+ return;
+
+ gaokun_psy_get_bat_info(ecbat);
+
+ snprintf(ecbat->battery_serial, sizeof(ecbat->battery_serial),
+ "%d", le16_to_cpu(ecbat->info.serial_number));
+
+ gaokun_ec_psy_multi_read(ecbat->ec, EC_BAT_VENDOR,
+ sizeof(ecbat->battery_vendor) - 1,
+ ecbat->battery_vendor);
+
+ gaokun_ec_psy_multi_read(ecbat->ec, EC_BAT_MODEL,
+ sizeof(ecbat->battery_model) - 1,
+ ecbat->battery_model);
+
+ ecbat->battery_model[14] = 'A'; /* FIX UP */
+}
+
+static int gaokun_psy_get_bat_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct gaokun_psy *ecbat = power_supply_get_drvdata(psy);
+ u8 buf[GAOKUN_SMART_CHARGE_DATA_SIZE];
+ int ret;
+
+ if (gaokun_psy_bat_present(ecbat))
+ gaokun_psy_get_bat_status(ecbat);
+ else if (psp != POWER_SUPPLY_PROP_PRESENT)
+ return -ENODEV;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = ecbat->charge_now;
+ break;
+
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = ecbat->bat_present;
+ break;
+
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ val->intval = le16_to_cpu(ecbat->info.cycle_count);
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = le16_to_cpu(ecbat->info.design_voltage) * MILLI_TO_MICRO;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = le16_to_cpu(ecbat->status.voltage_now) * MILLI_TO_MICRO;
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = (s16)le16_to_cpu(ecbat->status.rate_now) * MILLI_TO_MICRO;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = le16_to_cpu(ecbat->info.design_capacity) * MILLI_TO_MICRO;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = le16_to_cpu(ecbat->status.full_capacity) * MILLI_TO_MICRO;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = le16_to_cpu(ecbat->status.capacity_now) * MILLI_TO_MICRO;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD:
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ ret = gaokun_ec_psy_get_smart_charge(ecbat->ec, buf);
+ if (ret)
+ return ret;
+
+ if (psp == POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD)
+ val->intval = buf[SMART_CHARGE_START];
+ else
+ val->intval = buf[SMART_CHARGE_END];
+ break;
+
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = le16_to_cpu(ecbat->status.percentage_now);
+ break;
+
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = ecbat->battery_model;
+ break;
+
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = ecbat->battery_vendor;
+ break;
+
+ case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+ val->strval = ecbat->battery_serial;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int gaokun_psy_set_bat_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct gaokun_psy *ecbat = power_supply_get_drvdata(psy);
+ u8 buf[GAOKUN_SMART_CHARGE_DATA_SIZE];
+ int ret;
+
+ if (!gaokun_psy_bat_present(ecbat))
+ return -ENODEV;
+
+ ret = gaokun_ec_psy_get_smart_charge(ecbat->ec, buf);
+ if (ret)
+ return ret;
+
+ switch (psp) {
+ /*
+ * Resetting another thershold makes single thersold setting more likely
+ * to succeed. But setting start = end makes thing strange(failure).
+ */
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD:
+ buf[SMART_CHARGE_START] = val->intval;
+ if (buf[SMART_CHARGE_START] > buf[SMART_CHARGE_END])
+ buf[SMART_CHARGE_END] = buf[SMART_CHARGE_START] + 1;
+ return gaokun_ec_psy_set_smart_charge(ecbat->ec, buf);
+
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ buf[SMART_CHARGE_END] = val->intval;
+ if (buf[SMART_CHARGE_END] < buf[SMART_CHARGE_START])
+ buf[SMART_CHARGE_START] = buf[SMART_CHARGE_END] - 1;
+ return gaokun_ec_psy_set_smart_charge(ecbat->ec, buf);
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int gaokun_psy_is_bat_property_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ return psp == POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD ||
+ psp == POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD;
+}
+
+static enum power_supply_property gaokun_psy_bat_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+
+static const struct power_supply_desc gaokun_psy_bat_desc = {
+ .name = "gaokun-ec-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .get_property = gaokun_psy_get_bat_property,
+ .set_property = gaokun_psy_set_bat_property,
+ .property_is_writeable = gaokun_psy_is_bat_property_writeable,
+ .properties = gaokun_psy_bat_props,
+ .num_properties = ARRAY_SIZE(gaokun_psy_bat_props),
+};
+
+/* -------------------------------------------------------------------------- */
+/* Sysfs */
+
+/*
+ * Note that, HUAWEI calls them SBAC/GBAC and SBCM/GBCM in DSDT, they are likely
+ * Set/Get Battery Adaptive Charging and Set/Get Battery Charging Mode.
+ */
+
+/* battery adaptive charge */
+static ssize_t battery_adaptive_charge_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct gaokun_psy *ecbat = power_supply_get_drvdata(psy);
+ int ret;
+ bool on;
+
+ ret = gaokun_ec_psy_get_smart_charge_enable(ecbat->ec, &on);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", on);
+}
+
+static ssize_t battery_adaptive_charge_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct gaokun_psy *ecbat = power_supply_get_drvdata(psy);
+ int ret;
+ bool on;
+
+ if (kstrtobool(buf, &on))
+ return -EINVAL;
+
+ ret = gaokun_ec_psy_set_smart_charge_enable(ecbat->ec, on);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(battery_adaptive_charge);
+
+static inline int get_charge_delay(u8 buf[GAOKUN_SMART_CHARGE_DATA_SIZE])
+{
+ return buf[SMART_CHARGE_MODE] == NO_DELAY_MODE ? 0 : buf[SMART_CHARGE_DELAY];
+}
+
+static inline void
+set_charge_delay(u8 buf[GAOKUN_SMART_CHARGE_DATA_SIZE], u8 delay)
+{
+ if (delay) {
+ buf[SMART_CHARGE_DELAY] = delay;
+ buf[SMART_CHARGE_MODE] = DELAY_MODE;
+ } else {
+ /* No writing zero, there is a specific mode for it. */
+ buf[SMART_CHARGE_MODE] = NO_DELAY_MODE;
+ }
+}
+
+/* Smart charge */
+static ssize_t smart_charge_delay_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct gaokun_psy *ecbat = power_supply_get_drvdata(psy);
+ u8 bf[GAOKUN_SMART_CHARGE_DATA_SIZE];
+ int ret;
+
+ ret = gaokun_ec_psy_get_smart_charge(ecbat->ec, bf);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", get_charge_delay(bf));
+}
+
+static ssize_t smart_charge_delay_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct gaokun_psy *ecbat = power_supply_get_drvdata(psy);
+ u8 bf[GAOKUN_SMART_CHARGE_DATA_SIZE];
+ u8 delay;
+ int ret;
+
+ if (kstrtou8(buf, 10, &delay))
+ return -EINVAL;
+
+ ret = gaokun_ec_psy_get_smart_charge(ecbat->ec, bf);
+ if (ret)
+ return ret;
+
+ set_charge_delay(bf, delay);
+
+ ret = gaokun_ec_psy_set_smart_charge(ecbat->ec, bf);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(smart_charge_delay);
+
+static struct attribute *gaokun_psy_features_attrs[] = {
+ &dev_attr_battery_adaptive_charge.attr,
+ &dev_attr_smart_charge_delay.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(gaokun_psy_features);
+
+static int gaokun_psy_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct gaokun_psy *ecbat = container_of(nb, struct gaokun_psy, nb);
+
+ switch (action) {
+ case EC_EVENT_BAT_A2:
+ case EC_EVENT_BAT_B1:
+ gaokun_psy_get_bat_info(ecbat);
+ return NOTIFY_OK;
+
+ case EC_EVENT_BAT_A0:
+ gaokun_psy_get_adp_status(ecbat);
+ power_supply_changed(ecbat->adp_psy);
+ msleep(10);
+ fallthrough;
+
+ case EC_EVENT_BAT_A1:
+ case EC_EVENT_BAT_A3:
+ if (action == EC_EVENT_BAT_A3) {
+ gaokun_psy_get_bat_info(ecbat);
+ msleep(100);
+ }
+ gaokun_psy_get_bat_status(ecbat);
+ power_supply_changed(ecbat->bat_psy);
+ return NOTIFY_OK;
+
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static int gaokun_psy_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct gaokun_ec *ec = adev->dev.platform_data;
+ struct power_supply_config psy_cfg = {};
+ struct device *dev = &adev->dev;
+ struct gaokun_psy *ecbat;
+
+ ecbat = devm_kzalloc(&adev->dev, sizeof(*ecbat), GFP_KERNEL);
+ if (!ecbat)
+ return -ENOMEM;
+
+ ecbat->ec = ec;
+ ecbat->dev = dev;
+ ecbat->nb.notifier_call = gaokun_psy_notify;
+
+ auxiliary_set_drvdata(adev, ecbat);
+
+ psy_cfg.drv_data = ecbat;
+ ecbat->adp_psy = devm_power_supply_register(dev, &gaokun_psy_adp_desc,
+ &psy_cfg);
+ if (IS_ERR(ecbat->adp_psy))
+ return dev_err_probe(dev, PTR_ERR(ecbat->adp_psy),
+ "Failed to register AC power supply\n");
+
+ psy_cfg.supplied_to = (char **)&gaokun_psy_bat_desc.name;
+ psy_cfg.num_supplicants = 1;
+ psy_cfg.no_wakeup_source = true;
+ psy_cfg.attr_grp = gaokun_psy_features_groups;
+ ecbat->bat_psy = devm_power_supply_register(dev, &gaokun_psy_bat_desc,
+ &psy_cfg);
+ if (IS_ERR(ecbat->bat_psy))
+ return dev_err_probe(dev, PTR_ERR(ecbat->bat_psy),
+ "Failed to register battery power supply\n");
+ gaokun_psy_init(ecbat);
+
+ return gaokun_ec_register_notify(ec, &ecbat->nb);
+}
+
+static void gaokun_psy_remove(struct auxiliary_device *adev)
+{
+ struct gaokun_psy *ecbat = auxiliary_get_drvdata(adev);
+
+ gaokun_ec_unregister_notify(ecbat->ec, &ecbat->nb);
+}
+
+static const struct auxiliary_device_id gaokun_psy_id_table[] = {
+ { .name = GAOKUN_MOD_NAME "." GAOKUN_DEV_PSY, },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, gaokun_psy_id_table);
+
+static struct auxiliary_driver gaokun_psy_driver = {
+ .name = GAOKUN_DEV_PSY,
+ .id_table = gaokun_psy_id_table,
+ .probe = gaokun_psy_probe,
+ .remove = gaokun_psy_remove,
+};
+
+module_auxiliary_driver(gaokun_psy_driver);
+
+MODULE_DESCRIPTION("HUAWEI Matebook E Go psy driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 51310f6e4803..c1640bc6accd 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -410,8 +410,9 @@ static int max17040_get_property(struct power_supply *psy,
if (!chip->channel_temp)
return -ENODATA;
- iio_read_channel_processed_scale(chip->channel_temp,
- &val->intval, 10);
+ iio_read_channel_processed(chip->channel_temp, &val->intval);
+ val->intval /= 100; /* Convert from milli- to deci-degree */
+
break;
default:
return -EINVAL;
diff --git a/drivers/power/supply/max77705_charger.c b/drivers/power/supply/max77705_charger.c
index eec5e9ef795e..329b430d0e50 100644
--- a/drivers/power/supply/max77705_charger.c
+++ b/drivers/power/supply/max77705_charger.c
@@ -545,20 +545,28 @@ static int max77705_charger_probe(struct i2c_client *i2c)
return dev_err_probe(dev, ret, "failed to add irq chip\n");
chg->wqueue = create_singlethread_workqueue(dev_name(dev));
- if (IS_ERR(chg->wqueue))
- return dev_err_probe(dev, PTR_ERR(chg->wqueue), "failed to create workqueue\n");
+ if (!chg->wqueue)
+ return dev_err_probe(dev, -ENOMEM, "failed to create workqueue\n");
ret = devm_work_autocancel(dev, &chg->chgin_work, max77705_chgin_isr_work);
- if (ret)
- return dev_err_probe(dev, ret, "failed to initialize interrupt work\n");
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to initialize interrupt work\n");
+ goto destroy_wq;
+ }
max77705_charger_initialize(chg);
ret = max77705_charger_enable(chg);
- if (ret)
- return dev_err_probe(dev, ret, "failed to enable charge\n");
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to enable charge\n");
+ goto destroy_wq;
+ }
return devm_add_action_or_reset(dev, max77705_charger_disable, chg);
+
+destroy_wq:
+ destroy_workqueue(chg->wqueue);
+ return ret;
}
static const struct of_device_id max77705_charger_of_match[] = {
diff --git a/drivers/power/supply/max8971_charger.c b/drivers/power/supply/max8971_charger.c
new file mode 100644
index 000000000000..26416d26f235
--- /dev/null
+++ b/drivers/power/supply/max8971_charger.c
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/devm-helpers.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/extcon.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_graph.h>
+#include <linux/property.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#define MAX8971_REG_CHGINT 0x0f
+#define MAX8971_REG_CHG_RST BIT(0)
+#define MAX8971_REG_CHGINT_MASK 0x01
+#define MAX8971_AICL_MASK BIT(7)
+#define MAX8971_REG_CHG_STAT 0x02
+#define MAX8971_CHG_MASK BIT(3)
+#define MAX8971_REG_DETAILS1 0x03
+#define MAX8971_REG_DETAILS2 0x04
+#define MAX8971_REG_CHGCNTL1 0x05
+#define MAX8971_REG_FCHGCRNT 0x06
+#define MAX8971_REG_DCCRNT 0x07
+#define MAX8971_CHGRSTRT_MASK BIT(6)
+#define MAX8971_REG_TOPOFF 0x08
+#define MAX8971_REG_TEMPREG 0x09
+#define MAX8971_REG_PROTCMD 0x0a
+#define MAX8971_CHGPROT_LOCKED 0x00
+#define MAX8971_CHGPROT_UNLOCKED 0x03
+
+#define MAX8971_FCHGT_DEFAULT 2
+#define MAX8971_TOPOFFT_DEFAULT 3
+
+static const char *max8971_manufacturer = "Maxim Integrated";
+static const char *max8971_model = "MAX8971";
+
+enum max8971_charging_state {
+ MAX8971_CHARGING_DEAD_BATTERY,
+ MAX8971_CHARGING_PREQUALIFICATION,
+ MAX8971_CHARGING_FAST_CONST_CURRENT,
+ MAX8971_CHARGING_FAST_CONST_VOLTAGE,
+ MAX8971_CHARGING_TOP_OFF,
+ MAX8971_CHARGING_DONE,
+ MAX8971_CHARGING_TIMER_FAULT,
+ MAX8971_CHARGING_SUSPENDED_THERMAL,
+ MAX8971_CHARGING_OFF,
+ MAX8971_CHARGING_THERMAL_LOOP,
+};
+
+enum max8971_health_state {
+ MAX8971_HEALTH_UNKNOWN,
+ MAX8971_HEALTH_COLD,
+ MAX8971_HEALTH_COOL,
+ MAX8971_HEALTH_WARM,
+ MAX8971_HEALTH_HOT,
+ MAX8971_HEALTH_OVERHEAT,
+};
+
+/* Fast-Charge current limit, 250..1550 mA, 50 mA steps */
+#define MAX8971_CHG_CC_STEP 50000U
+#define MAX8971_CHG_CC_MIN 250000U
+#define MAX8971_CHG_CC_MAX 1550000U
+
+/* Input current limit, 250..1500 mA, 25 mA steps */
+#define MAX8971_DCILMT_STEP 25000U
+#define MAX8971_DCILMT_MIN 250000U
+#define MAX8971_DCILMT_MAX 1500000U
+
+enum max8971_field_idx {
+ THM_DTLS, /* DETAILS1 */
+ BAT_DTLS, CHG_DTLS, /* DETAILS2 */
+ CHG_CC, FCHG_T, /* FCHGCRNT */
+ DCI_LMT, /* DCCRNT */
+ TOPOFF_T, TOPOFF_S, /* TOPOFF */
+ CPROT, /* PROTCMD */
+ MAX8971_N_REGMAP_FIELDS
+};
+
+static const struct reg_field max8971_reg_field[MAX8971_N_REGMAP_FIELDS] = {
+ [THM_DTLS] = REG_FIELD(MAX8971_REG_DETAILS1, 0, 2),
+ [BAT_DTLS] = REG_FIELD(MAX8971_REG_DETAILS2, 4, 5),
+ [CHG_DTLS] = REG_FIELD(MAX8971_REG_DETAILS2, 0, 3),
+ [CHG_CC] = REG_FIELD(MAX8971_REG_FCHGCRNT, 0, 4),
+ [FCHG_T] = REG_FIELD(MAX8971_REG_FCHGCRNT, 5, 7),
+ [DCI_LMT] = REG_FIELD(MAX8971_REG_DCCRNT, 0, 5),
+ [TOPOFF_T] = REG_FIELD(MAX8971_REG_TOPOFF, 5, 7),
+ [TOPOFF_S] = REG_FIELD(MAX8971_REG_TOPOFF, 2, 3),
+ [CPROT] = REG_FIELD(MAX8971_REG_PROTCMD, 2, 3),
+};
+
+static const struct regmap_config max8971_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX8971_REG_CHGINT,
+};
+
+struct max8971_data {
+ struct device *dev;
+ struct power_supply *psy_mains;
+
+ struct extcon_dev *edev;
+ struct notifier_block extcon_nb;
+ struct delayed_work extcon_work;
+
+ struct regmap *regmap;
+ struct regmap_field *rfield[MAX8971_N_REGMAP_FIELDS];
+
+ enum power_supply_usb_type usb_type;
+
+ u32 fchgt;
+ u32 tofft;
+ u32 toffs;
+
+ bool present;
+};
+
+static int max8971_get_status(struct max8971_data *priv, int *val)
+{
+ u32 regval;
+ int err;
+
+ err = regmap_field_read(priv->rfield[CHG_DTLS], &regval);
+ if (err)
+ return err;
+
+ switch (regval) {
+ case MAX8971_CHARGING_DEAD_BATTERY:
+ case MAX8971_CHARGING_PREQUALIFICATION:
+ case MAX8971_CHARGING_FAST_CONST_CURRENT:
+ case MAX8971_CHARGING_FAST_CONST_VOLTAGE:
+ case MAX8971_CHARGING_TOP_OFF:
+ case MAX8971_CHARGING_THERMAL_LOOP:
+ *val = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case MAX8971_CHARGING_DONE:
+ *val = POWER_SUPPLY_STATUS_FULL;
+ break;
+ case MAX8971_CHARGING_TIMER_FAULT:
+ *val = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case MAX8971_CHARGING_OFF:
+ case MAX8971_CHARGING_SUSPENDED_THERMAL:
+ *val = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ default:
+ *val = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int max8971_get_charge_type(struct max8971_data *priv, int *val)
+{
+ u32 regval;
+ int err;
+
+ err = regmap_field_read(priv->rfield[CHG_DTLS], &regval);
+ if (err)
+ return err;
+
+ switch (regval) {
+ case MAX8971_CHARGING_DEAD_BATTERY:
+ case MAX8971_CHARGING_PREQUALIFICATION:
+ *val = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ case MAX8971_CHARGING_FAST_CONST_CURRENT:
+ case MAX8971_CHARGING_FAST_CONST_VOLTAGE:
+ *val = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ break;
+ case MAX8971_CHARGING_TOP_OFF:
+ case MAX8971_CHARGING_THERMAL_LOOP:
+ *val = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+ break;
+ case MAX8971_CHARGING_DONE:
+ case MAX8971_CHARGING_TIMER_FAULT:
+ case MAX8971_CHARGING_SUSPENDED_THERMAL:
+ case MAX8971_CHARGING_OFF:
+ *val = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ default:
+ *val = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int max8971_get_health(struct max8971_data *priv, int *val)
+{
+ u32 regval;
+ int err;
+
+ err = regmap_field_read(priv->rfield[THM_DTLS], &regval);
+ if (err)
+ return err;
+
+ switch (regval) {
+ case MAX8971_HEALTH_COLD:
+ *val = POWER_SUPPLY_HEALTH_COLD;
+ break;
+ case MAX8971_HEALTH_COOL:
+ *val = POWER_SUPPLY_HEALTH_COOL;
+ break;
+ case MAX8971_HEALTH_WARM:
+ *val = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case MAX8971_HEALTH_HOT:
+ *val = POWER_SUPPLY_HEALTH_HOT;
+ break;
+ case MAX8971_HEALTH_OVERHEAT:
+ *val = POWER_SUPPLY_HEALTH_OVERHEAT;
+ break;
+ case MAX8971_HEALTH_UNKNOWN:
+ default:
+ *val = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int max8971_get_online(struct max8971_data *priv, int *val)
+{
+ u32 regval;
+ int err;
+
+ err = regmap_read(priv->regmap, MAX8971_REG_CHG_STAT, &regval);
+ if (err)
+ return err;
+
+ if (priv->present)
+ /* CHG_OK bit is 0 when charger is online */
+ *val = !(regval & MAX8971_CHG_MASK);
+ else
+ *val = priv->present;
+
+ return 0;
+}
+
+static int max8971_get_integer(struct max8971_data *priv, enum max8971_field_idx fidx,
+ u32 clamp_min, u32 clamp_max, u32 mult, int *val)
+{
+ u32 regval;
+ int err;
+
+ err = regmap_field_read(priv->rfield[fidx], &regval);
+ if (err)
+ return err;
+
+ *val = clamp_val(regval * mult, clamp_min, clamp_max);
+
+ return 0;
+}
+
+static int max8971_set_integer(struct max8971_data *priv, enum max8971_field_idx fidx,
+ u32 clamp_min, u32 clamp_max, u32 div, int val)
+{
+ u32 regval;
+
+ regval = clamp_val(val, clamp_min, clamp_max) / div;
+
+ return regmap_field_write(priv->rfield[fidx], regval);
+}
+
+static int max8971_get_property(struct power_supply *psy, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max8971_data *priv = power_supply_get_drvdata(psy);
+ int err = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ err = max8971_get_status(priv, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ err = max8971_get_charge_type(priv, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ val->intval = priv->usb_type;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ err = max8971_get_health(priv, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ err = max8971_get_online(priv, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = priv->present;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+ val->intval = MAX8971_CHG_CC_MAX;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ err = max8971_get_integer(priv, CHG_CC, MAX8971_CHG_CC_MIN, MAX8971_CHG_CC_MAX,
+ MAX8971_CHG_CC_STEP, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ err = max8971_get_integer(priv, DCI_LMT, MAX8971_DCILMT_MIN, MAX8971_DCILMT_MAX,
+ MAX8971_DCILMT_STEP, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = max8971_model;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = max8971_manufacturer;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int max8971_set_property(struct power_supply *psy, enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct max8971_data *priv = power_supply_get_drvdata(psy);
+ int err = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ err = max8971_set_integer(priv, CHG_CC, MAX8971_CHG_CC_MIN, MAX8971_CHG_CC_MAX,
+ MAX8971_CHG_CC_STEP, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ err = max8971_set_integer(priv, DCI_LMT, MAX8971_DCILMT_MIN, MAX8971_DCILMT_MAX,
+ MAX8971_DCILMT_STEP, val->intval);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+};
+
+static int max8971_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static enum power_supply_property max8971_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static const struct power_supply_desc max8971_charger_desc = {
+ .name = "max8971-charger",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN) |
+ BIT(POWER_SUPPLY_USB_TYPE_SDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_DCP) |
+ BIT(POWER_SUPPLY_USB_TYPE_CDP) |
+ BIT(POWER_SUPPLY_USB_TYPE_ACA),
+ .properties = max8971_properties,
+ .num_properties = ARRAY_SIZE(max8971_properties),
+ .get_property = max8971_get_property,
+ .set_property = max8971_set_property,
+ .property_is_writeable = max8971_property_is_writeable,
+};
+
+static void max8971_update_config(struct max8971_data *priv)
+{
+ regmap_field_write(priv->rfield[CPROT], MAX8971_CHGPROT_UNLOCKED);
+
+ if (priv->fchgt != MAX8971_FCHGT_DEFAULT)
+ regmap_field_write(priv->rfield[FCHG_T], priv->fchgt);
+
+ regmap_write_bits(priv->regmap, MAX8971_REG_DCCRNT, MAX8971_CHGRSTRT_MASK,
+ MAX8971_CHGRSTRT_MASK);
+
+ if (priv->tofft != MAX8971_TOPOFFT_DEFAULT)
+ regmap_field_write(priv->rfield[TOPOFF_T], priv->tofft);
+
+ if (priv->toffs)
+ regmap_field_write(priv->rfield[TOPOFF_S], priv->toffs);
+
+ regmap_field_write(priv->rfield[CPROT], MAX8971_CHGPROT_LOCKED);
+}
+
+static ssize_t fast_charge_timer_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct max8971_data *priv = power_supply_get_drvdata(psy);
+ u32 regval;
+ int err;
+
+ err = regmap_field_read(priv->rfield[FCHG_T], &regval);
+ if (err)
+ return err;
+
+ switch (regval) {
+ case 0x1 ... 0x7:
+ /* Time is off by 3 hours comparing to value */
+ regval += 3;
+ break;
+ case 0x0:
+ default:
+ regval = 0;
+ break;
+ }
+
+ return sysfs_emit(buf, "%u\n", regval);
+}
+
+static ssize_t fast_charge_timer_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct max8971_data *priv = power_supply_get_drvdata(psy);
+ unsigned long hours;
+ int val, err;
+
+ err = kstrtoul(buf, 10, &hours);
+ if (err)
+ return err;
+
+ val = hours - 3;
+ if (val <= 0 || val > 7)
+ priv->fchgt = 0;
+ else
+ priv->fchgt = val;
+
+ max8971_update_config(priv);
+
+ return count;
+}
+
+static ssize_t top_off_threshold_current_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct max8971_data *priv = power_supply_get_drvdata(psy);
+ u32 regval, val;
+ int err;
+
+ err = regmap_field_read(priv->rfield[TOPOFF_S], &regval);
+ if (err)
+ return err;
+
+ /* 50uA start with 50uA step */
+ val = regval * 50 + 50;
+ val *= 1000;
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t top_off_threshold_current_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct max8971_data *priv = power_supply_get_drvdata(psy);
+ unsigned long uamp;
+ int err;
+
+ err = kstrtoul(buf, 10, &uamp);
+ if (err)
+ return err;
+
+ if (uamp < 50000 || uamp > 200000)
+ return -EINVAL;
+
+ priv->toffs = uamp / 50000 - 1;
+
+ max8971_update_config(priv);
+
+ return count;
+}
+
+static ssize_t top_off_timer_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct max8971_data *priv = power_supply_get_drvdata(psy);
+ u32 regval;
+ int err;
+
+ err = regmap_field_read(priv->rfield[TOPOFF_T], &regval);
+ if (err)
+ return err;
+
+ /* 10 min intervals */
+ regval *= 10;
+
+ return sysfs_emit(buf, "%u\n", regval);
+}
+
+static ssize_t top_off_timer_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct power_supply *psy = to_power_supply(dev);
+ struct max8971_data *priv = power_supply_get_drvdata(psy);
+ unsigned long minutes;
+ int err;
+
+ err = kstrtoul(buf, 10, &minutes);
+ if (err)
+ return err;
+
+ if (minutes > 70)
+ return -EINVAL;
+
+ priv->tofft = minutes / 10;
+
+ max8971_update_config(priv);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(fast_charge_timer);
+static DEVICE_ATTR_RW(top_off_threshold_current);
+static DEVICE_ATTR_RW(top_off_timer);
+
+static struct attribute *max8971_attrs[] = {
+ &dev_attr_fast_charge_timer.attr,
+ &dev_attr_top_off_threshold_current.attr,
+ &dev_attr_top_off_timer.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(max8971);
+
+static void max8971_extcon_evt_worker(struct work_struct *work)
+{
+ struct max8971_data *priv =
+ container_of(work, struct max8971_data, extcon_work.work);
+ struct device *dev = priv->dev;
+ struct extcon_dev *edev = priv->edev;
+ u32 chgcc, dcilmt;
+
+ if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) {
+ dev_dbg(dev, "USB SDP charger is connected\n");
+ priv->usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+ chgcc = 500000;
+ dcilmt = 500000;
+ } else if (extcon_get_state(edev, EXTCON_USB) > 0) {
+ dev_dbg(dev, "USB charger is connected\n");
+ priv->usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+ chgcc = 500000;
+ dcilmt = 500000;
+ } else if (extcon_get_state(edev, EXTCON_DISP_MHL) > 0) {
+ dev_dbg(dev, "MHL plug is connected\n");
+ priv->usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+ chgcc = 500000;
+ dcilmt = 500000;
+ } else if (extcon_get_state(edev, EXTCON_CHG_USB_DCP) > 0) {
+ dev_dbg(dev, "USB DCP charger is connected\n");
+ priv->usb_type = POWER_SUPPLY_USB_TYPE_DCP;
+ chgcc = 900000;
+ dcilmt = 1200000;
+ } else if (extcon_get_state(edev, EXTCON_CHG_USB_FAST) > 0) {
+ dev_dbg(dev, "USB FAST charger is connected\n");
+ priv->usb_type = POWER_SUPPLY_USB_TYPE_ACA;
+ chgcc = 900000;
+ dcilmt = 1200000;
+ } else if (extcon_get_state(edev, EXTCON_CHG_USB_SLOW) > 0) {
+ dev_dbg(dev, "USB SLOW charger is connected\n");
+ priv->usb_type = POWER_SUPPLY_USB_TYPE_ACA;
+ chgcc = 900000;
+ dcilmt = 1200000;
+ } else if (extcon_get_state(edev, EXTCON_CHG_USB_CDP) > 0) {
+ dev_dbg(dev, "USB CDP charger is connected\n");
+ priv->usb_type = POWER_SUPPLY_USB_TYPE_CDP;
+ chgcc = 900000;
+ dcilmt = 1200000;
+ } else {
+ dev_dbg(dev, "USB state is unknown\n");
+ priv->usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ return;
+ }
+
+ regmap_field_write(priv->rfield[CPROT], MAX8971_CHGPROT_UNLOCKED);
+
+ max8971_set_integer(priv, CHG_CC, MAX8971_CHG_CC_MIN, MAX8971_CHG_CC_MAX,
+ MAX8971_CHG_CC_STEP, chgcc);
+ max8971_set_integer(priv, DCI_LMT, MAX8971_DCILMT_MIN, MAX8971_DCILMT_MAX,
+ MAX8971_DCILMT_STEP, dcilmt);
+
+ regmap_field_write(priv->rfield[CPROT], MAX8971_CHGPROT_LOCKED);
+}
+
+static int extcon_get_charger_type(struct notifier_block *nb,
+ unsigned long state, void *data)
+{
+ struct max8971_data *priv =
+ container_of(nb, struct max8971_data, extcon_nb);
+ schedule_delayed_work(&priv->extcon_work, 0);
+
+ return NOTIFY_OK;
+}
+
+static irqreturn_t max8971_interrupt(int irq, void *dev_id)
+{
+ struct max8971_data *priv = dev_id;
+ struct device *dev = priv->dev;
+ int err, state;
+
+ err = regmap_read(priv->regmap, MAX8971_REG_CHGINT, &state);
+ if (err)
+ dev_err(dev, "interrupt reg read failed %d\n", err);
+
+ err = regmap_write_bits(priv->regmap, MAX8971_REG_CHGINT_MASK,
+ MAX8971_AICL_MASK, MAX8971_AICL_MASK);
+ if (err)
+ dev_err(dev, "failed to mask IRQ\n");
+
+ /* set presence prop */
+ priv->present = state & MAX8971_REG_CHG_RST;
+
+ /* on every plug chip resets to default */
+ if (priv->present)
+ max8971_update_config(priv);
+
+ /* update supply status */
+ power_supply_changed(priv->psy_mains);
+
+ return IRQ_HANDLED;
+}
+
+static int max8971_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct max8971_data *priv;
+ struct device_node *extcon;
+ struct power_supply_config cfg = { };
+ int err, i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+
+ i2c_set_clientdata(client, priv);
+
+ priv->regmap = devm_regmap_init_i2c(client, &max8971_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return dev_err_probe(dev, PTR_ERR(priv->regmap), "cannot allocate regmap\n");
+
+ for (i = 0; i < MAX8971_N_REGMAP_FIELDS; i++) {
+ priv->rfield[i] = devm_regmap_field_alloc(dev, priv->regmap, max8971_reg_field[i]);
+ if (IS_ERR(priv->rfield[i]))
+ return dev_err_probe(dev, PTR_ERR(priv->rfield[i]),
+ "cannot allocate regmap field\n");
+ }
+
+ cfg.attr_grp = max8971_groups;
+ cfg.drv_data = priv;
+ cfg.fwnode = dev_fwnode(dev);
+
+ priv->psy_mains = devm_power_supply_register(dev, &max8971_charger_desc, &cfg);
+ if (IS_ERR(priv->psy_mains))
+ return dev_err_probe(dev, PTR_ERR(priv->psy_mains),
+ "failed to register mains supply\n");
+
+ err = regmap_write_bits(priv->regmap, MAX8971_REG_CHGINT_MASK, MAX8971_AICL_MASK,
+ MAX8971_AICL_MASK);
+ if (err)
+ return dev_err_probe(dev, err, "failed to mask IRQ\n");
+
+ err = devm_request_threaded_irq(dev, client->irq, NULL, &max8971_interrupt,
+ IRQF_ONESHOT | IRQF_SHARED, client->name, priv);
+ if (err)
+ return dev_err_probe(dev, err, "failed to register IRQ %d\n", client->irq);
+
+ extcon = of_graph_get_remote_node(dev->of_node, -1, -1);
+ if (!extcon)
+ return 0;
+
+ priv->edev = extcon_find_edev_by_node(extcon);
+ of_node_put(extcon);
+ if (IS_ERR(priv->edev))
+ return dev_err_probe(dev, PTR_ERR(priv->edev), "failed to find extcon\n");
+
+ err = devm_delayed_work_autocancel(dev, &priv->extcon_work,
+ max8971_extcon_evt_worker);
+ if (err)
+ return dev_err_probe(dev, err, "failed to add extcon evt stop action\n");
+
+ priv->extcon_nb.notifier_call = extcon_get_charger_type;
+
+ err = devm_extcon_register_notifier_all(dev, priv->edev, &priv->extcon_nb);
+ if (err)
+ return dev_err_probe(dev, err, "failed to register notifier\n");
+
+ /* Initial configuration work with 1 sec delay */
+ schedule_delayed_work(&priv->extcon_work, msecs_to_jiffies(1000));
+
+ return 0;
+}
+
+static int __maybe_unused max8971_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max8971_data *priv = i2c_get_clientdata(client);
+
+ irq_wake_thread(client->irq, priv);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(max8971_pm_ops, NULL, max8971_resume);
+
+static const struct of_device_id max8971_match_ids[] = {
+ { .compatible = "maxim,max8971" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, max8971_match_ids);
+
+static const struct i2c_device_id max8971_i2c_id[] = {
+ { "max8971" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max8971_i2c_id);
+
+static struct i2c_driver max8971_driver = {
+ .driver = {
+ .name = "max8971-charger",
+ .of_match_table = max8971_match_ids,
+ .pm = &max8971_pm_ops,
+ },
+ .probe = max8971_probe,
+ .id_table = max8971_i2c_id,
+};
+module_i2c_driver(max8971_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("MAX8971 Charger Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 439dd0bf8644..18e5e84a81c6 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -110,6 +110,8 @@ static const char * const POWER_SUPPLY_HEALTH_TEXT[] = {
[POWER_SUPPLY_HEALTH_COOL] = "Cool",
[POWER_SUPPLY_HEALTH_HOT] = "Hot",
[POWER_SUPPLY_HEALTH_NO_BATTERY] = "No battery",
+ [POWER_SUPPLY_HEALTH_BLOWN_FUSE] = "Blown fuse",
+ [POWER_SUPPLY_HEALTH_CELL_IMBALANCE] = "Cell imbalance",
};
static const char * const POWER_SUPPLY_TECHNOLOGY_TEXT[] = {
@@ -138,9 +140,10 @@ static const char * const POWER_SUPPLY_SCOPE_TEXT[] = {
};
static const char * const POWER_SUPPLY_CHARGE_BEHAVIOUR_TEXT[] = {
- [POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO] = "auto",
- [POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE] = "inhibit-charge",
- [POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE] = "force-discharge",
+ [POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO] = "auto",
+ [POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE] = "inhibit-charge",
+ [POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE_AWAKE] = "inhibit-charge-awake",
+ [POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE] = "force-discharge",
};
static struct power_supply_attr power_supply_attrs[] __ro_after_init = {
@@ -321,6 +324,27 @@ static ssize_t power_supply_show_charge_behaviour(struct device *dev,
value->intval, buf);
}
+static ssize_t power_supply_show_charge_types(struct device *dev,
+ struct power_supply *psy,
+ enum power_supply_charge_type current_type,
+ char *buf)
+{
+ struct power_supply_ext_registration *reg;
+
+ scoped_guard(rwsem_read, &psy->extensions_sem) {
+ power_supply_for_each_extension(reg, psy) {
+ if (power_supply_ext_has_property(reg->ext,
+ POWER_SUPPLY_PROP_CHARGE_TYPES))
+ return power_supply_charge_types_show(dev,
+ reg->ext->charge_types,
+ current_type, buf);
+ }
+ }
+
+ return power_supply_charge_types_show(dev, psy->desc->charge_types,
+ current_type, buf);
+}
+
static ssize_t power_supply_format_property(struct device *dev,
bool uevent,
struct device_attribute *attr,
@@ -365,7 +389,7 @@ static ssize_t power_supply_format_property(struct device *dev,
case POWER_SUPPLY_PROP_CHARGE_TYPES:
if (uevent) /* no possible values in uevents */
goto default_format;
- ret = power_supply_charge_types_show(dev, psy->desc->charge_types,
+ ret = power_supply_show_charge_types(dev, psy,
value.intval, buf);
break;
case POWER_SUPPLY_PROP_MODEL_NAME ... POWER_SUPPLY_PROP_SERIAL_NUMBER:
diff --git a/drivers/power/supply/qcom_pmi8998_charger.c b/drivers/power/supply/qcom_pmi8998_charger.c
index 74a8d8ed8d9f..c2f8f2e24398 100644
--- a/drivers/power/supply/qcom_pmi8998_charger.c
+++ b/drivers/power/supply/qcom_pmi8998_charger.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2023, Linaro Ltd.
- * Author: Caleb Connolly <caleb.connolly@linaro.org>
+ * Author: Casey Connolly <casey.connolly@linaro.org>
*
* This driver is for the switch-mode battery charger and boost
* hardware found in pmi8998 and related PMICs.
@@ -1045,6 +1045,6 @@ static struct platform_driver qcom_spmi_smb2 = {
module_platform_driver(qcom_spmi_smb2);
-MODULE_AUTHOR("Caleb Connolly <caleb.connolly@linaro.org>");
+MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>");
MODULE_DESCRIPTION("Qualcomm SMB2 Charger Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
index 945c7720c4ae..1251022eb052 100644
--- a/drivers/power/supply/rk817_charger.c
+++ b/drivers/power/supply/rk817_charger.c
@@ -1088,7 +1088,7 @@ static int rk817_charger_probe(struct platform_device *pdev)
rk817_bat_calib_vol(charger);
pscfg.drv_data = charger;
- pscfg.fwnode = node ? &node->fwnode : NULL;
+ pscfg.fwnode = &node->fwnode;
/*
* Get sample resistor value. Note only values of 10000 or 20000
diff --git a/drivers/power/supply/rt9471.c b/drivers/power/supply/rt9471.c
index bd966abb4df5..e7f843f12c98 100644
--- a/drivers/power/supply/rt9471.c
+++ b/drivers/power/supply/rt9471.c
@@ -192,12 +192,12 @@ static const struct reg_field rt9471_reg_fields[F_MAX_FIELDS] = {
};
static const struct linear_range rt9471_chg_ranges[RT9471_MAX_RANGES] = {
- [RT9471_RANGE_AICR] = { .min = 50000, .min_sel = 1, .max_sel = 63, .step = 50000 },
- [RT9471_RANGE_MIVR] = { .min = 3900000, .min_sel = 0, .max_sel = 15, .step = 100000 },
- [RT9471_RANGE_IPRE] = { .min = 50000, .min_sel = 0, .max_sel = 15, .step = 50000 },
- [RT9471_RANGE_VCHG] = { .min = 3900000, .min_sel = 0, .max_sel = 80, .step = 10000 },
- [RT9471_RANGE_ICHG] = { .min = 0, .min_sel = 0, .max_sel = 63, .step = 50000 },
- [RT9471_RANGE_IEOC] = { .min = 50000, .min_sel = 0, .max_sel = 15, .step = 50000 },
+ [RT9471_RANGE_AICR] = LINEAR_RANGE(50000, 1, 63, 50000),
+ [RT9471_RANGE_MIVR] = LINEAR_RANGE(3900000, 0, 15, 100000),
+ [RT9471_RANGE_IPRE] = LINEAR_RANGE(50000, 0, 15, 50000),
+ [RT9471_RANGE_VCHG] = LINEAR_RANGE(3900000, 0, 80, 10000),
+ [RT9471_RANGE_ICHG] = LINEAR_RANGE(0, 0, 63, 50000),
+ [RT9471_RANGE_IEOC] = LINEAR_RANGE(50000, 0, 15, 50000),
};
static int rt9471_set_value_by_field_range(struct rt9471_chip *chip,
diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c
index 2a975a110f48..5bfdfcf6013b 100644
--- a/drivers/power/supply/test_power.c
+++ b/drivers/power/supply/test_power.c
@@ -37,6 +37,8 @@ static int battery_charge_counter = -1000;
static int battery_current = -1600;
static enum power_supply_charge_behaviour battery_charge_behaviour =
POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO;
+static enum power_supply_charge_type battery_charge_types =
+ POWER_SUPPLY_CHARGE_TYPE_STANDARD;
static bool battery_extension;
static bool module_initialized;
@@ -87,7 +89,7 @@ static int test_power_get_battery_property(struct power_supply *psy,
val->intval = battery_status;
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
- val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ val->intval = battery_charge_types;
break;
case POWER_SUPPLY_PROP_HEALTH:
val->intval = battery_health;
@@ -129,6 +131,9 @@ static int test_power_get_battery_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
val->intval = battery_charge_behaviour;
break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPES:
+ val->intval = battery_charge_types;
+ break;
default:
pr_info("%s: some properties deliberately report errors.\n",
__func__);
@@ -140,7 +145,7 @@ static int test_power_get_battery_property(struct power_supply *psy,
static int test_power_battery_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
- return psp == POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR;
+ return psp == POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR || psp == POWER_SUPPLY_PROP_CHARGE_TYPES;
}
static int test_power_set_battery_property(struct power_supply *psy,
@@ -156,6 +161,14 @@ static int test_power_set_battery_property(struct power_supply *psy,
}
battery_charge_behaviour = val->intval;
break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPES:
+ if (val->intval < 0 ||
+ val->intval >= BITS_PER_TYPE(typeof(psy->desc->charge_types)) ||
+ !(BIT(val->intval) & psy->desc->charge_types)) {
+ return -EINVAL;
+ }
+ battery_charge_types = val->intval;
+ break;
default:
return -EINVAL;
}
@@ -188,6 +201,7 @@ static enum power_supply_property test_power_battery_props[] = {
POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
+ POWER_SUPPLY_PROP_CHARGE_TYPES,
};
static char *test_power_ac_supplied_to[] = {
@@ -214,7 +228,10 @@ static const struct power_supply_desc test_power_desc[] = {
.property_is_writeable = test_power_battery_property_is_writeable,
.charge_behaviours = BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO)
| BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE)
+ | BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE_AWAKE)
| BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE),
+ .charge_types = BIT(POWER_SUPPLY_CHARGE_TYPE_STANDARD)
+ | BIT(POWER_SUPPLY_CHARGE_TYPE_LONGLIFE)
},
[TEST_USB] = {
.name = "test_usb",
diff --git a/drivers/power/supply/wm831x_power.c b/drivers/power/supply/wm831x_power.c
index 538055b29dec..6acdba7885ca 100644
--- a/drivers/power/supply/wm831x_power.c
+++ b/drivers/power/supply/wm831x_power.c
@@ -89,7 +89,7 @@ static int wm831x_wall_get_prop(struct power_supply *psy,
return ret;
}
-static enum power_supply_property wm831x_wall_props[] = {
+static const enum power_supply_property wm831x_wall_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
};
@@ -120,7 +120,7 @@ static int wm831x_usb_get_prop(struct power_supply *psy,
return ret;
}
-static enum power_supply_property wm831x_usb_props[] = {
+static const enum power_supply_property wm831x_usb_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
};
@@ -171,21 +171,21 @@ struct chg_map {
int reg_val;
};
-static struct chg_map trickle_ilims[] = {
+static const struct chg_map trickle_ilims[] = {
{ 50, 0 << WM831X_CHG_TRKL_ILIM_SHIFT },
{ 100, 1 << WM831X_CHG_TRKL_ILIM_SHIFT },
{ 150, 2 << WM831X_CHG_TRKL_ILIM_SHIFT },
{ 200, 3 << WM831X_CHG_TRKL_ILIM_SHIFT },
};
-static struct chg_map vsels[] = {
+static const struct chg_map vsels[] = {
{ 4050, 0 << WM831X_CHG_VSEL_SHIFT },
{ 4100, 1 << WM831X_CHG_VSEL_SHIFT },
{ 4150, 2 << WM831X_CHG_VSEL_SHIFT },
{ 4200, 3 << WM831X_CHG_VSEL_SHIFT },
};
-static struct chg_map fast_ilims[] = {
+static const struct chg_map fast_ilims[] = {
{ 0, 0 << WM831X_CHG_FAST_ILIM_SHIFT },
{ 50, 1 << WM831X_CHG_FAST_ILIM_SHIFT },
{ 100, 2 << WM831X_CHG_FAST_ILIM_SHIFT },
@@ -204,7 +204,7 @@ static struct chg_map fast_ilims[] = {
{ 1000, 15 << WM831X_CHG_FAST_ILIM_SHIFT },
};
-static struct chg_map eoc_iterms[] = {
+static const struct chg_map eoc_iterms[] = {
{ 20, 0 << WM831X_CHG_ITERM_SHIFT },
{ 30, 1 << WM831X_CHG_ITERM_SHIFT },
{ 40, 2 << WM831X_CHG_ITERM_SHIFT },
@@ -215,7 +215,7 @@ static struct chg_map eoc_iterms[] = {
{ 90, 7 << WM831X_CHG_ITERM_SHIFT },
};
-static struct chg_map chg_times[] = {
+static const struct chg_map chg_times[] = {
{ 60, 0 << WM831X_CHG_TIME_SHIFT },
{ 90, 1 << WM831X_CHG_TIME_SHIFT },
{ 120, 2 << WM831X_CHG_TIME_SHIFT },
@@ -235,7 +235,7 @@ static struct chg_map chg_times[] = {
};
static void wm831x_battery_apply_config(struct wm831x *wm831x,
- struct chg_map *map, int count, int val,
+ const struct chg_map *map, int count, int val,
int *reg, const char *name,
const char *units)
{
@@ -462,7 +462,7 @@ static int wm831x_bat_get_prop(struct power_supply *psy,
return ret;
}
-static enum power_supply_property wm831x_bat_props[] = {
+static const enum power_supply_property wm831x_bat_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
@@ -470,7 +470,7 @@ static enum power_supply_property wm831x_bat_props[] = {
POWER_SUPPLY_PROP_CHARGE_TYPE,
};
-static const char *wm831x_bat_irqs[] = {
+static const char * const wm831x_bat_irqs[] = {
"BATT HOT",
"BATT COLD",
"BATT FAIL",
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 5ab3feb29686..e3be40adc0d7 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -28,6 +28,7 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/iosf_mbi.h>
+#include <asm/msr.h>
/* bitmasks for RAPL MSRs, used by primitive access functions */
#define ENERGY_STATUS_MASK 0xffffffff
diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
index 2b81aabdb0db..8ad2115d65f6 100644
--- a/drivers/powercap/intel_rapl_msr.c
+++ b/drivers/powercap/intel_rapl_msr.c
@@ -24,6 +24,7 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
+#include <asm/msr.h>
/* Local defines */
#define MSR_PLATFORM_POWER_LIMIT 0x0000065C
@@ -103,7 +104,7 @@ static int rapl_cpu_down_prep(unsigned int cpu)
static int rapl_msr_read_raw(int cpu, struct reg_action *ra)
{
- if (rdmsrl_safe_on_cpu(cpu, ra->reg.msr, &ra->value)) {
+ if (rdmsrq_safe_on_cpu(cpu, ra->reg.msr, &ra->value)) {
pr_debug("failed to read msr 0x%x on cpu %d\n", ra->reg.msr, cpu);
return -EIO;
}
@@ -116,14 +117,14 @@ static void rapl_msr_update_func(void *info)
struct reg_action *ra = info;
u64 val;
- ra->err = rdmsrl_safe(ra->reg.msr, &val);
+ ra->err = rdmsrq_safe(ra->reg.msr, &val);
if (ra->err)
return;
val &= ~ra->mask;
val |= ra->value;
- ra->err = wrmsrl_safe(ra->reg.msr, val);
+ ra->err = wrmsrq_safe(ra->reg.msr, val);
}
static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index 374ceefd6f2a..47d9891de368 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -98,7 +98,7 @@ static void pps_gpio_echo_timer_callback(struct timer_list *t)
{
const struct pps_gpio_device_data *info;
- info = from_timer(info, t, echo_timer);
+ info = timer_container_of(info, t, echo_timer);
gpiod_set_value(info->echo_pin, 0);
}
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 07bf7f9aae01..204278eb215e 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -44,7 +44,7 @@ config PTP_1588_CLOCK_DTE
depends on PTP_1588_CLOCK
depends on NET && HAS_IOMEM
depends on ARCH_BCM_MOBILE || (ARCH_BCM_IPROC && !(ARCH_BCM_NSP || ARCH_BCM_5301X)) || COMPILE_TEST
- default y
+ default y if ARCH_BCM_MOBILE || ARCH_BCM_IPROC
help
This driver adds support for using the Digital timing engine
(DTE) in the Broadcom SoC's as a PTP clock.
@@ -59,7 +59,7 @@ config PTP_1588_CLOCK_QORIQ
tristate "Freescale QorIQ 1588 timer as PTP clock"
depends on GIANFAR || FSL_DPAA_ETH || FSL_DPAA2_ETH || FSL_ENETC || FSL_ENETC_VF || COMPILE_TEST
depends on PTP_1588_CLOCK
- default y
+ default y if GIANFAR || FSL_DPAA_ETH || FSL_DPAA2_ETH || FSL_ENETC || FSL_ENETC_VF
help
This driver adds support for using the Freescale QorIQ 1588
timer as a PTP clock. This clock is only useful if your PTP
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 4380e6ddb849..4bf421765d03 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -162,6 +162,7 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
{
struct ptp_clock *ptp =
container_of(pccontext->clk, struct ptp_clock, clock);
+ unsigned int i, pin_index, supported_extts_flags;
struct ptp_sys_offset_extended *extoff = NULL;
struct ptp_sys_offset_precise precise_offset;
struct system_device_crosststamp xtstamp;
@@ -172,7 +173,6 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
struct ptp_clock_request req;
struct ptp_clock_caps caps;
struct ptp_clock_time *pct;
- unsigned int i, pin_index;
struct ptp_pin_desc pd;
struct timespec64 ts;
int enable, err = 0;
@@ -240,6 +240,18 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
err = -EINVAL;
break;
}
+ supported_extts_flags = ptp->info->supported_extts_flags;
+ /* The PTP_ENABLE_FEATURE flag is always supported. */
+ supported_extts_flags |= PTP_ENABLE_FEATURE;
+ /* If the driver does not support strictly checking flags, the
+ * PTP_RISING_EDGE and PTP_FALLING_EDGE flags are merely
+ * hints which are not enforced.
+ */
+ if (!(supported_extts_flags & PTP_STRICT_FLAGS))
+ supported_extts_flags |= PTP_EXTTS_EDGES;
+ /* Reject unsupported flags */
+ if (req.extts.flags & ~supported_extts_flags)
+ return -EOPNOTSUPP;
req.type = PTP_CLK_REQ_EXTTS;
enable = req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0;
if (mutex_lock_interruptible(&ptp->pincfg_mux))
@@ -312,6 +324,8 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
err = -EINVAL;
break;
}
+ if (req.perout.flags & ~ptp->info->supported_perout_flags)
+ return -EOPNOTSUPP;
req.type = PTP_CLK_REQ_PEROUT;
enable = req.perout.period.sec || req.perout.period.nsec;
if (mutex_lock_interruptible(&ptp->pincfg_mux))
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 35a5994bf64f..36f57d7b4a66 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -121,7 +121,8 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
struct ptp_clock_info *ops;
int err = -EOPNOTSUPP;
- if (ptp_clock_freerun(ptp)) {
+ if (tx->modes & (ADJ_SETOFFSET | ADJ_FREQUENCY | ADJ_OFFSET) &&
+ ptp_clock_freerun(ptp)) {
pr_err("ptp: physical clock is free running\n");
return -EBUSY;
}
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index fbb3fa8fc60b..b8d4df8c6da2 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -283,18 +283,6 @@ static int idtcm_extts_enable(struct idtcm_channel *channel,
idtcm = channel->idtcm;
old_mask = idtcm->extts_mask;
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
- /* Reject requests to enable time stamping on falling edge */
- if ((rq->extts.flags & PTP_ENABLE_FEATURE) &&
- (rq->extts.flags & PTP_FALLING_EDGE))
- return -EOPNOTSUPP;
-
if (index >= MAX_TOD)
return -EINVAL;
@@ -2043,6 +2031,7 @@ static const struct ptp_clock_info idtcm_caps = {
.n_per_out = 12,
.n_ext_ts = MAX_TOD,
.n_pins = MAX_REF_CLK,
+ .supported_extts_flags = PTP_RISING_EDGE | PTP_STRICT_FLAGS,
.adjphase = &idtcm_adjphase,
.getmaxphase = &idtcm_getmaxphase,
.adjfine = &idtcm_adjfine,
@@ -2060,6 +2049,7 @@ static const struct ptp_clock_info idtcm_caps_deprecated = {
.n_per_out = 12,
.n_ext_ts = MAX_TOD,
.n_pins = MAX_REF_CLK,
+ .supported_extts_flags = PTP_RISING_EDGE | PTP_STRICT_FLAGS,
.adjphase = &idtcm_adjphase,
.getmaxphase = &idtcm_getmaxphase,
.adjfine = &idtcm_adjfine,
diff --git a/drivers/ptp/ptp_fc3.c b/drivers/ptp/ptp_fc3.c
index cfced36c70bc..70002500170e 100644
--- a/drivers/ptp/ptp_fc3.c
+++ b/drivers/ptp/ptp_fc3.c
@@ -592,6 +592,7 @@ static const struct ptp_clock_info idtfc3_caps = {
.max_adj = MAX_FFO_PPB,
.n_per_out = 1,
.n_ext_ts = 1,
+ .supported_extts_flags = PTP_STRICT_FLAGS | PTP_EXT_OFFSET,
.adjphase = &idtfc3_adjphase,
.adjfine = &idtfc3_adjfine,
.adjtime = &idtfc3_adjtime,
diff --git a/drivers/ptp/ptp_idt82p33.c b/drivers/ptp/ptp_idt82p33.c
index b2fd94d4f863..f01c50dfa44e 100644
--- a/drivers/ptp/ptp_idt82p33.c
+++ b/drivers/ptp/ptp_idt82p33.c
@@ -246,18 +246,6 @@ static int idt82p33_extts_enable(struct idt82p33_channel *channel,
idt82p33 = channel->idt82p33;
old_mask = idt82p33->extts_mask;
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
- /* Reject requests to enable time stamping on falling edge */
- if ((rq->extts.flags & PTP_ENABLE_FEATURE) &&
- (rq->extts.flags & PTP_FALLING_EDGE))
- return -EOPNOTSUPP;
-
if (index >= MAX_PHC_PLL)
return -EINVAL;
@@ -1187,6 +1175,9 @@ static void idt82p33_caps_init(u32 index, struct ptp_clock_info *caps,
caps->pin_config = pin_cfg;
+ caps->supported_extts_flags = PTP_RISING_EDGE |
+ PTP_STRICT_FLAGS;
+
for (i = 0; i < max_pins; ++i) {
ppd = &pin_cfg[i];
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 2ccdca4f6960..1e7f72e57557 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -315,6 +315,8 @@ struct ptp_ocp_serial_port {
#define OCP_BOARD_ID_LEN 13
#define OCP_SERIAL_LEN 6
#define OCP_SMA_NUM 4
+#define OCP_SIGNAL_NUM 4
+#define OCP_FREQ_NUM 4
enum {
PORT_GNSS,
@@ -342,8 +344,8 @@ struct ptp_ocp {
struct dcf_master_reg __iomem *dcf_out;
struct dcf_slave_reg __iomem *dcf_in;
struct tod_reg __iomem *nmea_out;
- struct frequency_reg __iomem *freq_in[4];
- struct ptp_ocp_ext_src *signal_out[4];
+ struct frequency_reg __iomem *freq_in[OCP_FREQ_NUM];
+ struct ptp_ocp_ext_src *signal_out[OCP_SIGNAL_NUM];
struct ptp_ocp_ext_src *pps;
struct ptp_ocp_ext_src *ts0;
struct ptp_ocp_ext_src *ts1;
@@ -378,10 +380,12 @@ struct ptp_ocp {
u32 utc_tai_offset;
u32 ts_window_adjust;
u64 fw_cap;
- struct ptp_ocp_signal signal[4];
+ struct ptp_ocp_signal signal[OCP_SIGNAL_NUM];
struct ptp_ocp_sma_connector sma[OCP_SMA_NUM];
const struct ocp_sma_op *sma_op;
struct dpll_device *dpll;
+ int signals_nr;
+ int freq_in_nr;
};
#define OCP_REQ_TIMESTAMP BIT(0)
@@ -1522,7 +1526,7 @@ ptp_ocp_utc_distribute(struct ptp_ocp *bp, u32 val)
static void
ptp_ocp_watchdog(struct timer_list *t)
{
- struct ptp_ocp *bp = from_timer(bp, t, watchdog);
+ struct ptp_ocp *bp = timer_container_of(bp, t, watchdog);
unsigned long flags;
u32 status, utc_offset;
@@ -2372,7 +2376,7 @@ ptp_ocp_attr_group_add(struct ptp_ocp *bp,
if (attr_tbl[i].cap & bp->fw_cap)
count++;
- bp->attr_group = kcalloc(count + 1, sizeof(struct attribute_group *),
+ bp->attr_group = kcalloc(count + 1, sizeof(*bp->attr_group),
GFP_KERNEL);
if (!bp->attr_group)
return -ENOMEM;
@@ -2697,6 +2701,8 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
bp->eeprom_map = fb_eeprom_map;
bp->fw_version = ioread32(&bp->image->version);
bp->sma_op = &ocp_fb_sma_op;
+ bp->signals_nr = 4;
+ bp->freq_in_nr = 4;
ptp_ocp_fb_set_version(bp);
@@ -2862,6 +2868,8 @@ ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
bp->fw_version = ioread32(&bp->reg->version);
bp->fw_tag = 2;
bp->sma_op = &ocp_art_sma_op;
+ bp->signals_nr = 4;
+ bp->freq_in_nr = 4;
/* Enable MAC serial port during initialisation */
iowrite32(1, &bp->board_config->mro50_serial_activate);
@@ -2888,6 +2896,8 @@ ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
bp->flash_start = 0xA00000;
bp->eeprom_map = fb_eeprom_map;
bp->sma_op = &ocp_adva_sma_op;
+ bp->signals_nr = 2;
+ bp->freq_in_nr = 2;
version = ioread32(&bp->image->version);
/* if lower 16 bits are empty, this is the fw loader. */
@@ -4008,7 +4018,7 @@ _signal_summary_show(struct seq_file *s, struct ptp_ocp *bp, int nr)
{
struct signal_reg __iomem *reg = bp->signal_out[nr]->mem;
struct ptp_ocp_signal *signal = &bp->signal[nr];
- char label[8];
+ char label[16];
bool on;
u32 val;
@@ -4031,7 +4041,7 @@ static void
_frequency_summary_show(struct seq_file *s, int nr,
struct frequency_reg __iomem *reg)
{
- char label[8];
+ char label[16];
bool on;
u32 val;
@@ -4175,11 +4185,11 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
}
if (bp->fw_cap & OCP_CAP_SIGNAL)
- for (i = 0; i < 4; i++)
+ for (i = 0; i < bp->signals_nr; i++)
_signal_summary_show(s, bp, i);
if (bp->fw_cap & OCP_CAP_FREQ)
- for (i = 0; i < 4; i++)
+ for (i = 0; i < bp->freq_in_nr; i++)
_frequency_summary_show(s, i, bp->freq_in[i]);
if (bp->irig_out) {
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 18934e28469e..a6aad743c282 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -100,10 +100,20 @@ static inline bool ptp_vclock_in_use(struct ptp_clock *ptp)
{
bool in_use = false;
+ /* Virtual clocks can't be stacked on top of virtual clocks.
+ * Avoid acquiring the n_vclocks_mux on virtual clocks, to allow this
+ * function to be called from code paths where the n_vclocks_mux of the
+ * parent physical clock is already held. Functionally that's not an
+ * issue, but lockdep would complain, because they have the same lock
+ * class.
+ */
+ if (ptp->is_virtual_clock)
+ return false;
+
if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
return true;
- if (!ptp->is_virtual_clock && ptp->n_vclocks)
+ if (ptp->n_vclocks)
in_use = true;
mutex_unlock(&ptp->n_vclocks_mux);
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 4731d5b90d7e..d9bcd1e8413e 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -114,6 +114,16 @@ config PWM_AXI_PWMGEN
To compile this driver as a module, choose M here: the module will be
called pwm-axi-pwmgen.
+config PWM_BCM2835
+ tristate "BCM2835 PWM support"
+ depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ PWM framework driver for BCM2835 controller (Raspberry Pi)
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-bcm2835.
+
config PWM_BCM_IPROC
tristate "iProc PWM support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
@@ -137,16 +147,6 @@ config PWM_BCM_KONA
To compile this driver as a module, choose M here: the module
will be called pwm-bcm-kona.
-config PWM_BCM2835
- tristate "BCM2835 PWM support"
- depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
- depends on HAS_IOMEM
- help
- PWM framework driver for BCM2835 controller (Raspberry Pi)
-
- To compile this driver as a module, choose M here: the module
- will be called pwm-bcm2835.
-
config PWM_BERLIN
tristate "Marvell Berlin PWM support"
depends on ARCH_BERLIN || COMPILE_TEST
@@ -351,6 +351,18 @@ config PWM_KEEMBAY
To compile this driver as a module, choose M here: the module
will be called pwm-keembay.
+config PWM_LOONGSON
+ tristate "Loongson PWM support"
+ depends on MACH_LOONGSON64 || COMPILE_TEST
+ depends on COMMON_CLK
+ help
+ Generic PWM framework driver for Loongson family.
+ It can be found on Loongson-2K series cpus and Loongson LS7A
+ bridge chips.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-loongson.
+
config PWM_LP3943
tristate "TI/National Semiconductor LP3943 PWM support"
depends on MFD_LP3943
@@ -411,26 +423,17 @@ config PWM_LPSS_PLATFORM
To compile this driver as a module, choose M here: the module
will be called pwm-lpss-platform.
-config PWM_MESON
- tristate "Amlogic Meson PWM driver"
- depends on ARCH_MESON || COMPILE_TEST
- depends on COMMON_CLK && HAS_IOMEM
- help
- The platform driver for Amlogic Meson PWM controller.
-
- To compile this driver as a module, choose M here: the module
- will be called pwm-meson.
-
-config PWM_MTK_DISP
- tristate "MediaTek display PWM driver"
- depends on ARCH_MEDIATEK || COMPILE_TEST
- depends on HAS_IOMEM
+config PWM_MC33XS2410
+ tristate "MC33XS2410 PWM support"
+ depends on OF
+ depends on SPI
help
- Generic PWM framework driver for MediaTek disp-pwm device.
- The PWM is used to control the backlight brightness for display.
+ NXP MC33XS2410 high-side switch driver. The MC33XS2410 is a four
+ channel high-side switch. The device is operational from 3.0 V
+ to 60 V. The device is controlled by SPI port for configuration.
To compile this driver as a module, choose M here: the module
- will be called pwm-mtk-disp.
+ will be called pwm-mc33xs2410.
config PWM_MEDIATEK
tristate "MediaTek PWM support"
@@ -442,6 +445,16 @@ config PWM_MEDIATEK
To compile this driver as a module, choose M here: the module
will be called pwm-mediatek.
+config PWM_MESON
+ tristate "Amlogic Meson PWM driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on COMMON_CLK && HAS_IOMEM
+ help
+ The platform driver for Amlogic Meson PWM controller.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-meson.
+
config PWM_MICROCHIP_CORE
tristate "Microchip corePWM PWM support"
depends on ARCH_MICROCHIP_POLARFIRE || COMPILE_TEST
@@ -452,6 +465,17 @@ config PWM_MICROCHIP_CORE
To compile this driver as a module, choose M here: the module
will be called pwm-microchip-core.
+config PWM_MTK_DISP
+ tristate "MediaTek display PWM driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Generic PWM framework driver for MediaTek disp-pwm device.
+ The PWM is used to control the backlight brightness for display.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-mtk-disp.
+
config PWM_MXS
tristate "Freescale MXS PWM support"
depends on ARCH_MXS || COMPILE_TEST
@@ -510,7 +534,7 @@ config PWM_RASPBERRYPI_POE
Enable Raspberry Pi firmware controller PWM bus used to control the
official RPI PoE hat
-config PWM_RCAR
+config PWM_RENESAS_RCAR
tristate "Renesas R-Car PWM support"
depends on ARCH_RENESAS || COMPILE_TEST
depends on HAS_IOMEM
@@ -521,6 +545,28 @@ config PWM_RCAR
To compile this driver as a module, choose M here: the module
will be called pwm-rcar.
+config PWM_RENESAS_RZG2L_GPT
+ tristate "Renesas RZ/G2L General PWM Timer support"
+ depends on ARCH_RZG2L || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This driver exposes the General PWM Timer controller found in Renesas
+ RZ/G2L like chips through the PWM API.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-rzg2l-gpt.
+
+config PWM_RENESAS_RZ_MTU3
+ tristate "Renesas RZ/G2L MTU3a PWM Timer support"
+ depends on RZ_MTU3
+ depends on HAS_IOMEM
+ help
+ This driver exposes the MTU3a PWM Timer controller found in Renesas
+ RZ/G2L like chips through the PWM API.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-rz-mtu3.
+
config PWM_RENESAS_TPU
tristate "Renesas TPU PWM support"
depends on ARCH_RENESAS || COMPILE_TEST
@@ -540,17 +586,6 @@ config PWM_ROCKCHIP
Generic PWM framework driver for the PWM controller found on
Rockchip SoCs.
-config PWM_RZ_MTU3
- tristate "Renesas RZ/G2L MTU3a PWM Timer support"
- depends on RZ_MTU3
- depends on HAS_IOMEM
- help
- This driver exposes the MTU3a PWM Timer controller found in Renesas
- RZ/G2L like chips through the PWM API.
-
- To compile this driver as a module, choose M here: the module
- will be called pwm-rz-mtu3.
-
config PWM_SAMSUNG
tristate "Samsung PWM support"
depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 539e0def3f82..96160f4257fc 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -7,9 +7,9 @@ obj-$(CONFIG_PWM_ATMEL) += pwm-atmel.o
obj-$(CONFIG_PWM_ATMEL_HLCDC_PWM) += pwm-atmel-hlcdc.o
obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
obj-$(CONFIG_PWM_AXI_PWMGEN) += pwm-axi-pwmgen.o
+obj-$(CONFIG_PWM_BCM2835) += pwm-bcm2835.o
obj-$(CONFIG_PWM_BCM_IPROC) += pwm-bcm-iproc.o
obj-$(CONFIG_PWM_BCM_KONA) += pwm-bcm-kona.o
-obj-$(CONFIG_PWM_BCM2835) += pwm-bcm2835.o
obj-$(CONFIG_PWM_BERLIN) += pwm-berlin.o
obj-$(CONFIG_PWM_BRCMSTB) += pwm-brcmstb.o
obj-$(CONFIG_PWM_CLK) += pwm-clk.o
@@ -30,14 +30,16 @@ obj-$(CONFIG_PWM_INTEL_LGM) += pwm-intel-lgm.o
obj-$(CONFIG_PWM_IQS620A) += pwm-iqs620a.o
obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
obj-$(CONFIG_PWM_KEEMBAY) += pwm-keembay.o
+obj-$(CONFIG_PWM_LOONGSON) += pwm-loongson.o
obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o
obj-$(CONFIG_PWM_LPC18XX_SCT) += pwm-lpc18xx-sct.o
obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o
obj-$(CONFIG_PWM_LPSS) += pwm-lpss.o
obj-$(CONFIG_PWM_LPSS_PCI) += pwm-lpss-pci.o
obj-$(CONFIG_PWM_LPSS_PLATFORM) += pwm-lpss-platform.o
-obj-$(CONFIG_PWM_MESON) += pwm-meson.o
+obj-$(CONFIG_PWM_MC33XS2410) += pwm-mc33xs2410.o
obj-$(CONFIG_PWM_MEDIATEK) += pwm-mediatek.o
+obj-$(CONFIG_PWM_MESON) += pwm-meson.o
obj-$(CONFIG_PWM_MICROCHIP_CORE) += pwm-microchip-core.o
obj-$(CONFIG_PWM_MTK_DISP) += pwm-mtk-disp.o
obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
@@ -46,10 +48,11 @@ obj-$(CONFIG_PWM_OMAP_DMTIMER) += pwm-omap-dmtimer.o
obj-$(CONFIG_PWM_PCA9685) += pwm-pca9685.o
obj-$(CONFIG_PWM_PXA) += pwm-pxa.o
obj-$(CONFIG_PWM_RASPBERRYPI_POE) += pwm-raspberrypi-poe.o
-obj-$(CONFIG_PWM_RCAR) += pwm-rcar.o
+obj-$(CONFIG_PWM_RENESAS_RCAR) += pwm-rcar.o
+obj-$(CONFIG_PWM_RENESAS_RZG2L_GPT) += pwm-rzg2l-gpt.o
+obj-$(CONFIG_PWM_RENESAS_RZ_MTU3) += pwm-rz-mtu3.o
obj-$(CONFIG_PWM_RENESAS_TPU) += pwm-renesas-tpu.o
obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o
-obj-$(CONFIG_PWM_RZ_MTU3) += pwm-rz-mtu3.o
obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
obj-$(CONFIG_PWM_SIFIVE) += pwm-sifive.o
obj-$(CONFIG_PWM_SL28CPLD) += pwm-sl28cpld.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 0387bd838487..4d842c692194 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -216,21 +216,28 @@ static int __pwm_write_waveform(struct pwm_chip *chip, struct pwm_device *pwm, c
*
* Typically a given waveform cannot be implemented exactly by hardware, e.g.
* because hardware only supports coarse period resolution or no duty_offset.
- * This function returns the actually implemented waveform if you pass wf to
- * pwm_set_waveform_might_sleep now.
+ * This function returns the actually implemented waveform if you pass @wf to
+ * pwm_set_waveform_might_sleep() now.
*
* Note however that the world doesn't stop turning when you call it, so when
- * doing
+ * doing::
*
- * pwm_round_waveform_might_sleep(mypwm, &wf);
- * pwm_set_waveform_might_sleep(mypwm, &wf, true);
+ * pwm_round_waveform_might_sleep(mypwm, &wf);
+ * pwm_set_waveform_might_sleep(mypwm, &wf, true);
*
* the latter might fail, e.g. because an input clock changed its rate between
* these two calls and the waveform determined by
* pwm_round_waveform_might_sleep() cannot be implemented any more.
*
- * Returns 0 on success, 1 if there is no valid hardware configuration matching
- * the input waveform under the PWM rounding rules or a negative errno.
+ * Usually all values passed in @wf are rounded down to the nearest possible
+ * value (in the order period_length_ns, duty_length_ns and then
+ * duty_offset_ns). Only if this isn't possible, a value might grow. See the
+ * documentation for pwm_set_waveform_might_sleep() for a more formal
+ * description.
+ *
+ * Returns: 0 on success, 1 if at least one value had to be rounded up or a
+ * negative errno.
+ * Context: May sleep.
*/
int pwm_round_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *wf)
{
@@ -270,10 +277,10 @@ int pwm_round_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *
wf_req.duty_length_ns, wf_req.period_length_ns, wf_req.duty_offset_ns, ret_tohw);
if (IS_ENABLED(CONFIG_PWM_DEBUG) &&
- ret_tohw == 0 && !pwm_check_rounding(&wf_req, wf))
- dev_err(&chip->dev, "Wrong rounding: requested %llu/%llu [+%llu], result %llu/%llu [+%llu]\n",
+ (ret_tohw == 0) != pwm_check_rounding(&wf_req, wf))
+ dev_err(&chip->dev, "Wrong rounding: requested %llu/%llu [+%llu], result %llu/%llu [+%llu], ret: %d\n",
wf_req.duty_length_ns, wf_req.period_length_ns, wf_req.duty_offset_ns,
- wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns);
+ wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns, ret_tohw);
return ret_tohw;
}
@@ -287,6 +294,9 @@ EXPORT_SYMBOL_GPL(pwm_round_waveform_might_sleep);
*
* Stores the current configuration of the PWM in @wf. Note this is the
* equivalent of pwm_get_state_hw() (and not pwm_get_state()) for pwm_waveform.
+ *
+ * Returns: 0 on success or a negative errno
+ * Context: May sleep.
*/
int pwm_get_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *wf)
{
@@ -341,10 +351,10 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
if (err)
return err;
- if (IS_ENABLED(CONFIG_PWM_DEBUG) && ret_tohw == 0 && !pwm_check_rounding(wf, &wf_rounded))
- dev_err(&chip->dev, "Wrong rounding: requested %llu/%llu [+%llu], result %llu/%llu [+%llu]\n",
+ if (IS_ENABLED(CONFIG_PWM_DEBUG) && (ret_tohw == 0) != pwm_check_rounding(wf, &wf_rounded))
+ dev_err(&chip->dev, "Wrong rounding: requested %llu/%llu [+%llu], result %llu/%llu [+%llu], ret: %d\n",
wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns,
- wf_rounded.duty_length_ns, wf_rounded.period_length_ns, wf_rounded.duty_offset_ns);
+ wf_rounded.duty_length_ns, wf_rounded.period_length_ns, wf_rounded.duty_offset_ns, ret_tohw);
if (exact && pwmwfcmp(wf, &wf_rounded)) {
dev_dbg(&chip->dev, "Requested no rounding, but %llu/%llu [+%llu] -> %llu/%llu [+%llu]\n",
@@ -395,13 +405,37 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
*
* Typically a requested waveform cannot be implemented exactly, e.g. because
* you requested .period_length_ns = 100 ns, but the hardware can only set
- * periods that are a multiple of 8.5 ns. With that hardware passing exact =
- * true results in pwm_set_waveform_might_sleep() failing and returning 1. If
- * exact = false you get a period of 93.5 ns (i.e. the biggest period not bigger
- * than the requested value).
- * Note that even with exact = true, some rounding by less than 1 is
+ * periods that are a multiple of 8.5 ns. With that hardware passing @exact =
+ * true results in pwm_set_waveform_might_sleep() failing and returning -EDOM.
+ * If @exact = false you get a period of 93.5 ns (i.e. the biggest period not
+ * bigger than the requested value).
+ * Note that even with @exact = true, some rounding by less than 1 ns is
* possible/needed. In the above example requesting .period_length_ns = 94 and
- * exact = true, you get the hardware configured with period = 93.5 ns.
+ * @exact = true, you get the hardware configured with period = 93.5 ns.
+ *
+ * Let C be the set of possible hardware configurations for a given PWM device,
+ * consisting of tuples (p, d, o) where p is the period length, d is the duty
+ * length and o the duty offset.
+ *
+ * The following algorithm is implemented to pick the hardware setting
+ * (p, d, o) ∈ C for a given request (p', d', o') with @exact = false::
+ *
+ * p = max( { ṗ | ∃ ḋ, ȯ : (ṗ, ḋ, ȯ) ∈ C ∧ ṗ ≤ p' } ∪ { min({ ṗ | ∃ ḋ, ȯ : (ṗ, ḋ, ȯ) ∈ C }) })
+ * d = max( { ḋ | ∃ ȯ : (p, ḋ, ȯ) ∈ C ∧ ḋ ≤ d' } ∪ { min({ ḋ | ∃ ȯ : (p, ḋ, ȯ) ∈ C }) })
+ * o = max( { ȯ | (p, d, ȯ) ∈ C ∧ ȯ ≤ o' } ∪ { min({ ȯ | (p, d, ȯ) ∈ C }) })
+ *
+ * In words: The chosen period length is the maximal possible period length not
+ * bigger than the requested period length and if that doesn't exist, the
+ * minimal period length. The chosen duty length is the maximal possible duty
+ * length that is compatible with the chosen period length and isn't bigger than
+ * the requested duty length. Again if such a value doesn't exist, the minimal
+ * duty length compatible with the chosen period is picked. After that the duty
+ * offset compatible with the chosen period and duty length is chosen in the
+ * same way.
+ *
+ * Returns: 0 on success, -EDOM if setting failed due to the exact waveform not
+ * being possible (if @exact), or a different negative errno on failure.
+ * Context: May sleep.
*/
int pwm_set_waveform_might_sleep(struct pwm_device *pwm,
const struct pwm_waveform *wf, bool exact)
@@ -428,6 +462,19 @@ int pwm_set_waveform_might_sleep(struct pwm_device *pwm,
err = __pwm_set_waveform(pwm, wf, exact);
}
+ /*
+ * map err == 1 to -EDOM for exact requests and 0 for !exact ones. Also
+ * make sure that -EDOM is only returned in exactly that case. Note that
+ * __pwm_set_waveform() should never return -EDOM which justifies the
+ * unlikely().
+ */
+ if (unlikely(err == -EDOM))
+ err = -EINVAL;
+ else if (exact && err == 1)
+ err = -EDOM;
+ else if (err == 1)
+ err = 0;
+
return err;
}
EXPORT_SYMBOL_GPL(pwm_set_waveform_might_sleep);
@@ -561,11 +608,6 @@ static bool pwm_state_valid(const struct pwm_state *state)
return true;
}
-/**
- * __pwm_apply() - atomically apply a new state to a PWM device
- * @pwm: PWM device
- * @state: new state to apply
- */
static int __pwm_apply(struct pwm_device *pwm, const struct pwm_state *state)
{
struct pwm_chip *chip;
@@ -674,6 +716,9 @@ static int __pwm_apply(struct pwm_device *pwm, const struct pwm_state *state)
* Cannot be used in atomic context.
* @pwm: PWM device
* @state: new state to apply
+ *
+ * Returns: 0 on success, or a negative errno
+ * Context: May sleep.
*/
int pwm_apply_might_sleep(struct pwm_device *pwm, const struct pwm_state *state)
{
@@ -715,6 +760,9 @@ EXPORT_SYMBOL_GPL(pwm_apply_might_sleep);
* Not all PWM devices support this function, check with pwm_might_sleep().
* @pwm: PWM device
* @state: new state to apply
+ *
+ * Returns: 0 on success, or a negative errno
+ * Context: Any
*/
int pwm_apply_atomic(struct pwm_device *pwm, const struct pwm_state *state)
{
@@ -788,6 +836,9 @@ EXPORT_SYMBOL_GPL(pwm_get_state_hw);
* This function will adjust the PWM config to the PWM arguments provided
* by the DT or PWM lookup table. This is particularly useful to adapt
* the bootloader config to the Linux one.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ * Context: May sleep.
*/
int pwm_adjust_config(struct pwm_device *pwm)
{
@@ -2221,25 +2272,28 @@ static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
for (i = 0; i < chip->npwm; i++) {
struct pwm_device *pwm = &chip->pwms[i];
- struct pwm_state state;
+ struct pwm_state state, hwstate;
pwm_get_state(pwm, &state);
+ pwm_get_state_hw(pwm, &hwstate);
seq_printf(s, " pwm-%-3d (%-20.20s):", i, pwm->label);
if (test_bit(PWMF_REQUESTED, &pwm->flags))
seq_puts(s, " requested");
- if (state.enabled)
- seq_puts(s, " enabled");
+ seq_puts(s, "\n");
- seq_printf(s, " period: %llu ns", state.period);
- seq_printf(s, " duty: %llu ns", state.duty_cycle);
- seq_printf(s, " polarity: %s",
+ seq_printf(s, " requested configuration: %3sabled, %llu/%llu ns, %s polarity",
+ state.enabled ? "en" : "dis", state.duty_cycle, state.period,
state.polarity ? "inverse" : "normal");
-
if (state.usage_power)
- seq_puts(s, " usage_power");
+ seq_puts(s, ", usage_power");
+ seq_puts(s, "\n");
+
+ seq_printf(s, " actual configuration: %3sabled, %llu/%llu ns, %s polarity",
+ hwstate.enabled ? "en" : "dis", hwstate.duty_cycle, hwstate.period,
+ hwstate.polarity ? "inverse" : "normal");
seq_puts(s, "\n");
}
diff --git a/drivers/pwm/pwm-adp5585.c b/drivers/pwm/pwm-adp5585.c
index 40472ac5db64..d79106d12181 100644
--- a/drivers/pwm/pwm-adp5585.c
+++ b/drivers/pwm/pwm-adp5585.c
@@ -20,6 +20,7 @@
#include <linux/mfd/adp5585.h>
#include <linux/minmax.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
diff --git a/drivers/pwm/pwm-axi-pwmgen.c b/drivers/pwm/pwm-axi-pwmgen.c
index 4337c8f5acf0..60dcd3542373 100644
--- a/drivers/pwm/pwm-axi-pwmgen.c
+++ b/drivers/pwm/pwm-axi-pwmgen.c
@@ -257,7 +257,7 @@ static int axi_pwmgen_probe(struct platform_device *pdev)
struct regmap *regmap;
struct pwm_chip *chip;
struct axi_pwmgen_ddata *ddata;
- struct clk *clk;
+ struct clk *axi_clk, *clk;
void __iomem *io_base;
int ret;
@@ -280,9 +280,26 @@ static int axi_pwmgen_probe(struct platform_device *pdev)
ddata = pwmchip_get_drvdata(chip);
ddata->regmap = regmap;
- clk = devm_clk_get_enabled(dev, NULL);
+ /*
+ * Using NULL here instead of "axi" for backwards compatibility. There
+ * are some dtbs that don't give clock-names and have the "ext" clock
+ * as the one and only clock (due to mistake in the original bindings).
+ */
+ axi_clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(axi_clk))
+ return dev_err_probe(dev, PTR_ERR(axi_clk), "failed to get axi clock\n");
+
+ clk = devm_clk_get_optional_enabled(dev, "ext");
if (IS_ERR(clk))
- return dev_err_probe(dev, PTR_ERR(clk), "failed to get clock\n");
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to get ext clock\n");
+
+ /*
+ * If there is no "ext" clock, it means the HDL was compiled with
+ * ASYNC_CLK_EN=0. In this case, the AXI clock is also used for the
+ * PWM output clock.
+ */
+ if (!clk)
+ clk = axi_clk;
ret = devm_clk_rate_exclusive_get(dev, clk);
if (ret)
diff --git a/drivers/pwm/pwm-loongson.c b/drivers/pwm/pwm-loongson.c
new file mode 100644
index 000000000000..1ba16168cbb4
--- /dev/null
+++ b/drivers/pwm/pwm-loongson.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2025 Loongson Technology Corporation Limited.
+ *
+ * Loongson PWM driver
+ *
+ * For Loongson's PWM IP block documentation please refer Chapter 11 of
+ * Reference Manual: https://loongson.github.io/LoongArch-Documentation/Loongson-7A1000-usermanual-EN.pdf
+ *
+ * Author: Juxin Gao <gaojuxin@loongson.cn>
+ * Further cleanup and restructuring by:
+ * Binbin Zhou <zhoubinbin@loongson.cn>
+ *
+ * Limitations:
+ * - If both DUTY and PERIOD are set to 0, the output is a constant low signal.
+ * - When disabled the output is driven to 0 independent of the configured
+ * polarity.
+ * - If the register is reconfigured while PWM is running, it does not complete
+ * the currently running period.
+ * - Disabling the PWM stops the output immediately (without waiting for current
+ * period to complete first).
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/units.h>
+
+/* Loongson PWM registers */
+#define LOONGSON_PWM_REG_DUTY 0x4 /* Low Pulse Buffer Register */
+#define LOONGSON_PWM_REG_PERIOD 0x8 /* Pulse Period Buffer Register */
+#define LOONGSON_PWM_REG_CTRL 0xc /* Control Register */
+
+/* Control register bits */
+#define LOONGSON_PWM_CTRL_REG_EN BIT(0) /* Counter Enable Bit */
+#define LOONGSON_PWM_CTRL_REG_OE BIT(3) /* Pulse Output Enable Control Bit, Valid Low */
+#define LOONGSON_PWM_CTRL_REG_SINGLE BIT(4) /* Single Pulse Control Bit */
+#define LOONGSON_PWM_CTRL_REG_INTE BIT(5) /* Interrupt Enable Bit */
+#define LOONGSON_PWM_CTRL_REG_INT BIT(6) /* Interrupt Bit */
+#define LOONGSON_PWM_CTRL_REG_RST BIT(7) /* Counter Reset Bit */
+#define LOONGSON_PWM_CTRL_REG_CAPTE BIT(8) /* Measurement Pulse Enable Bit */
+#define LOONGSON_PWM_CTRL_REG_INVERT BIT(9) /* Output flip-flop Enable Bit */
+#define LOONGSON_PWM_CTRL_REG_DZONE BIT(10) /* Anti-dead Zone Enable Bit */
+
+/* default input clk frequency for the ACPI case */
+#define LOONGSON_PWM_FREQ_DEFAULT 50000 /* Hz */
+
+struct pwm_loongson_ddata {
+ struct clk *clk;
+ void __iomem *base;
+ u64 clk_rate;
+};
+
+static inline __pure struct pwm_loongson_ddata *to_pwm_loongson_ddata(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
+
+static inline u32 pwm_loongson_readl(struct pwm_loongson_ddata *ddata, u32 offset)
+{
+ return readl(ddata->base + offset);
+}
+
+static inline void pwm_loongson_writel(struct pwm_loongson_ddata *ddata,
+ u32 val, u32 offset)
+{
+ writel(val, ddata->base + offset);
+}
+
+static int pwm_loongson_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ u16 val;
+ struct pwm_loongson_ddata *ddata = to_pwm_loongson_ddata(chip);
+
+ val = pwm_loongson_readl(ddata, LOONGSON_PWM_REG_CTRL);
+
+ if (polarity == PWM_POLARITY_INVERSED)
+ /* Duty cycle defines LOW period of PWM */
+ val |= LOONGSON_PWM_CTRL_REG_INVERT;
+ else
+ /* Duty cycle defines HIGH period of PWM */
+ val &= ~LOONGSON_PWM_CTRL_REG_INVERT;
+
+ pwm_loongson_writel(ddata, val, LOONGSON_PWM_REG_CTRL);
+
+ return 0;
+}
+
+static void pwm_loongson_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ u32 val;
+ struct pwm_loongson_ddata *ddata = to_pwm_loongson_ddata(chip);
+
+ val = pwm_loongson_readl(ddata, LOONGSON_PWM_REG_CTRL);
+ val &= ~LOONGSON_PWM_CTRL_REG_EN;
+ pwm_loongson_writel(ddata, val, LOONGSON_PWM_REG_CTRL);
+}
+
+static int pwm_loongson_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ u32 val;
+ struct pwm_loongson_ddata *ddata = to_pwm_loongson_ddata(chip);
+
+ val = pwm_loongson_readl(ddata, LOONGSON_PWM_REG_CTRL);
+ val |= LOONGSON_PWM_CTRL_REG_EN;
+ pwm_loongson_writel(ddata, val, LOONGSON_PWM_REG_CTRL);
+
+ return 0;
+}
+
+static int pwm_loongson_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ u64 duty_ns, u64 period_ns)
+{
+ u64 duty, period;
+ struct pwm_loongson_ddata *ddata = to_pwm_loongson_ddata(chip);
+
+ /* duty = duty_ns * ddata->clk_rate / NSEC_PER_SEC */
+ duty = mul_u64_u64_div_u64(duty_ns, ddata->clk_rate, NSEC_PER_SEC);
+ if (duty > U32_MAX)
+ duty = U32_MAX;
+
+ /* period = period_ns * ddata->clk_rate / NSEC_PER_SEC */
+ period = mul_u64_u64_div_u64(period_ns, ddata->clk_rate, NSEC_PER_SEC);
+ if (period > U32_MAX)
+ period = U32_MAX;
+
+ pwm_loongson_writel(ddata, duty, LOONGSON_PWM_REG_DUTY);
+ pwm_loongson_writel(ddata, period, LOONGSON_PWM_REG_PERIOD);
+
+ return 0;
+}
+
+static int pwm_loongson_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int ret;
+ bool enabled = pwm->state.enabled;
+
+ if (!state->enabled) {
+ if (enabled)
+ pwm_loongson_disable(chip, pwm);
+ return 0;
+ }
+
+ ret = pwm_loongson_set_polarity(chip, pwm, state->polarity);
+ if (ret)
+ return ret;
+
+ ret = pwm_loongson_config(chip, pwm, state->duty_cycle, state->period);
+ if (ret)
+ return ret;
+
+ if (!enabled && state->enabled)
+ ret = pwm_loongson_enable(chip, pwm);
+
+ return ret;
+}
+
+static int pwm_loongson_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ u32 duty, period, ctrl;
+ struct pwm_loongson_ddata *ddata = to_pwm_loongson_ddata(chip);
+
+ duty = pwm_loongson_readl(ddata, LOONGSON_PWM_REG_DUTY);
+ period = pwm_loongson_readl(ddata, LOONGSON_PWM_REG_PERIOD);
+ ctrl = pwm_loongson_readl(ddata, LOONGSON_PWM_REG_CTRL);
+
+ /* duty & period have a max of 2^32, so we can't overflow */
+ state->duty_cycle = DIV64_U64_ROUND_UP((u64)duty * NSEC_PER_SEC, ddata->clk_rate);
+ state->period = DIV64_U64_ROUND_UP((u64)period * NSEC_PER_SEC, ddata->clk_rate);
+ state->polarity = (ctrl & LOONGSON_PWM_CTRL_REG_INVERT) ? PWM_POLARITY_INVERSED :
+ PWM_POLARITY_NORMAL;
+ state->enabled = (ctrl & LOONGSON_PWM_CTRL_REG_EN) ? true : false;
+
+ return 0;
+}
+
+static const struct pwm_ops pwm_loongson_ops = {
+ .apply = pwm_loongson_apply,
+ .get_state = pwm_loongson_get_state,
+};
+
+static int pwm_loongson_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct pwm_chip *chip;
+ struct pwm_loongson_ddata *ddata;
+ struct device *dev = &pdev->dev;
+
+ chip = devm_pwmchip_alloc(dev, 1, sizeof(*ddata));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ ddata = to_pwm_loongson_ddata(chip);
+
+ ddata->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ddata->base))
+ return PTR_ERR(ddata->base);
+
+ ddata->clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(ddata->clk))
+ return dev_err_probe(dev, PTR_ERR(ddata->clk),
+ "Failed to get pwm clock\n");
+ if (ddata->clk) {
+ ret = devm_clk_rate_exclusive_get(dev, ddata->clk);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get exclusive rate\n");
+
+ ddata->clk_rate = clk_get_rate(ddata->clk);
+ if (!ddata->clk_rate)
+ return dev_err_probe(dev, -EINVAL,
+ "Failed to get frequency\n");
+ } else {
+ ddata->clk_rate = LOONGSON_PWM_FREQ_DEFAULT;
+ }
+
+ /* This check is done to prevent an overflow in .apply */
+ if (ddata->clk_rate > NSEC_PER_SEC)
+ return dev_err_probe(dev, -EINVAL, "PWM clock out of range\n");
+
+ chip->ops = &pwm_loongson_ops;
+ chip->atomic = true;
+ dev_set_drvdata(dev, chip);
+
+ ret = devm_pwmchip_add(dev, chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to add PWM chip\n");
+
+ return 0;
+}
+
+static int pwm_loongson_suspend(struct device *dev)
+{
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct pwm_loongson_ddata *ddata = to_pwm_loongson_ddata(chip);
+ struct pwm_device *pwm = &chip->pwms[0];
+
+ if (pwm->state.enabled)
+ return -EBUSY;
+
+ clk_disable_unprepare(ddata->clk);
+
+ return 0;
+}
+
+static int pwm_loongson_resume(struct device *dev)
+{
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct pwm_loongson_ddata *ddata = to_pwm_loongson_ddata(chip);
+
+ return clk_prepare_enable(ddata->clk);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(pwm_loongson_pm_ops, pwm_loongson_suspend,
+ pwm_loongson_resume);
+
+static const struct of_device_id pwm_loongson_of_ids[] = {
+ { .compatible = "loongson,ls7a-pwm" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, pwm_loongson_of_ids);
+
+static const struct acpi_device_id pwm_loongson_acpi_ids[] = {
+ { "LOON0006" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, pwm_loongson_acpi_ids);
+
+static struct platform_driver pwm_loongson_driver = {
+ .probe = pwm_loongson_probe,
+ .driver = {
+ .name = "loongson-pwm",
+ .pm = pm_ptr(&pwm_loongson_pm_ops),
+ .of_match_table = pwm_loongson_of_ids,
+ .acpi_match_table = pwm_loongson_acpi_ids,
+ },
+};
+module_platform_driver(pwm_loongson_driver);
+
+MODULE_DESCRIPTION("Loongson PWM driver");
+MODULE_AUTHOR("Loongson Technology Corporation Limited.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-mc33xs2410.c b/drivers/pwm/pwm-mc33xs2410.c
new file mode 100644
index 000000000000..a1ac3445ccdb
--- /dev/null
+++ b/drivers/pwm/pwm-mc33xs2410.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Liebherr-Electronics and Drives GmbH
+ *
+ * Reference Manual : https://www.nxp.com/docs/en/data-sheet/MC33XS2410.pdf
+ *
+ * Limitations:
+ * - Supports frequencies between 0.5Hz and 2048Hz with following steps:
+ * - 0.5 Hz steps from 0.5 Hz to 32 Hz
+ * - 2 Hz steps from 2 Hz to 128 Hz
+ * - 8 Hz steps from 8 Hz to 512 Hz
+ * - 32 Hz steps from 32 Hz to 2048 Hz
+ * - Cannot generate a 0 % duty cycle.
+ * - Always produces low output if disabled.
+ * - Configuration isn't atomic. When changing polarity, duty cycle or period
+ * the data is taken immediately, counters not being affected, resulting in a
+ * behavior of the output pin that is neither the old nor the new state,
+ * rather something in between.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/math64.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pwm.h>
+
+#include <linux/spi/spi.h>
+
+#define MC33XS2410_GLB_CTRL 0x00
+#define MC33XS2410_GLB_CTRL_MODE GENMASK(7, 6)
+#define MC33XS2410_GLB_CTRL_MODE_NORMAL FIELD_PREP(MC33XS2410_GLB_CTRL_MODE, 1)
+
+#define MC33XS2410_PWM_CTRL1 0x05
+/* chan in { 1 ... 4 } */
+#define MC33XS2410_PWM_CTRL1_POL_INV(chan) BIT((chan) + 1)
+
+#define MC33XS2410_PWM_CTRL3 0x07
+/* chan in { 1 ... 4 } */
+#define MC33XS2410_PWM_CTRL3_EN(chan) BIT(4 + (chan) - 1)
+
+/* chan in { 1 ... 4 } */
+#define MC33XS2410_PWM_FREQ(chan) (0x08 + (chan) - 1)
+#define MC33XS2410_PWM_FREQ_STEP GENMASK(7, 6)
+#define MC33XS2410_PWM_FREQ_COUNT GENMASK(5, 0)
+
+/* chan in { 1 ... 4 } */
+#define MC33XS2410_PWM_DC(chan) (0x0c + (chan) - 1)
+
+#define MC33XS2410_WDT 0x14
+
+#define MC33XS2410_PWM_MIN_PERIOD 488282
+/* step in { 0 ... 3 } */
+#define MC33XS2410_PWM_MAX_PERIOD(step) (2000000000 >> (2 * (step)))
+
+#define MC33XS2410_FRAME_IN_ADDR GENMASK(15, 8)
+#define MC33XS2410_FRAME_IN_DATA GENMASK(7, 0)
+#define MC33XS2410_FRAME_IN_ADDR_WR BIT(7)
+#define MC33XS2410_FRAME_IN_DATA_RD BIT(7)
+#define MC33XS2410_FRAME_OUT_DATA GENMASK(13, 0)
+
+#define MC33XS2410_MAX_TRANSFERS 5
+
+static int mc33xs2410_write_regs(struct spi_device *spi, u8 *reg, u8 *val,
+ unsigned int len)
+{
+ u16 tx[MC33XS2410_MAX_TRANSFERS];
+ int i;
+
+ if (len > MC33XS2410_MAX_TRANSFERS)
+ return -EINVAL;
+
+ for (i = 0; i < len; i++)
+ tx[i] = FIELD_PREP(MC33XS2410_FRAME_IN_DATA, val[i]) |
+ FIELD_PREP(MC33XS2410_FRAME_IN_ADDR,
+ MC33XS2410_FRAME_IN_ADDR_WR | reg[i]);
+
+ return spi_write(spi, tx, len * 2);
+}
+
+static int mc33xs2410_read_regs(struct spi_device *spi, u8 *reg, u8 flag,
+ u16 *val, unsigned int len)
+{
+ u16 tx[MC33XS2410_MAX_TRANSFERS];
+ u16 rx[MC33XS2410_MAX_TRANSFERS];
+ struct spi_transfer t = {
+ .tx_buf = tx,
+ .rx_buf = rx,
+ };
+ int i, ret;
+
+ len++;
+ if (len > MC33XS2410_MAX_TRANSFERS)
+ return -EINVAL;
+
+ t.len = len * 2;
+ for (i = 0; i < len - 1; i++)
+ tx[i] = FIELD_PREP(MC33XS2410_FRAME_IN_DATA, flag) |
+ FIELD_PREP(MC33XS2410_FRAME_IN_ADDR, reg[i]);
+
+ ret = spi_sync_transfer(spi, &t, 1);
+ if (ret < 0)
+ return ret;
+
+ for (i = 1; i < len; i++)
+ val[i - 1] = FIELD_GET(MC33XS2410_FRAME_OUT_DATA, rx[i]);
+
+ return 0;
+}
+
+static int mc33xs2410_write_reg(struct spi_device *spi, u8 reg, u8 val)
+{
+ return mc33xs2410_write_regs(spi, &reg, &val, 1);
+}
+
+static int mc33xs2410_read_reg(struct spi_device *spi, u8 reg, u16 *val, u8 flag)
+{
+ return mc33xs2410_read_regs(spi, &reg, flag, val, 1);
+}
+
+static int mc33xs2410_read_reg_ctrl(struct spi_device *spi, u8 reg, u16 *val)
+{
+ return mc33xs2410_read_reg(spi, reg, val, MC33XS2410_FRAME_IN_DATA_RD);
+}
+
+static int mc33xs2410_modify_reg(struct spi_device *spi, u8 reg, u8 mask, u8 val)
+{
+ u16 tmp;
+ int ret;
+
+ ret = mc33xs2410_read_reg_ctrl(spi, reg, &tmp);
+ if (ret < 0)
+ return ret;
+
+ tmp &= ~mask;
+ tmp |= val & mask;
+
+ return mc33xs2410_write_reg(spi, reg, tmp);
+}
+
+static u8 mc33xs2410_pwm_get_freq(u64 period)
+{
+ u8 step, count;
+
+ /*
+ * Check which step [0 .. 3] is appropriate for the given period. The
+ * period ranges for the different step values overlap. Prefer big step
+ * values as these allow more finegrained period and duty cycle
+ * selection.
+ */
+
+ switch (period) {
+ case MC33XS2410_PWM_MIN_PERIOD ... MC33XS2410_PWM_MAX_PERIOD(3):
+ step = 3;
+ break;
+ case MC33XS2410_PWM_MAX_PERIOD(3) + 1 ... MC33XS2410_PWM_MAX_PERIOD(2):
+ step = 2;
+ break;
+ case MC33XS2410_PWM_MAX_PERIOD(2) + 1 ... MC33XS2410_PWM_MAX_PERIOD(1):
+ step = 1;
+ break;
+ case MC33XS2410_PWM_MAX_PERIOD(1) + 1 ... MC33XS2410_PWM_MAX_PERIOD(0):
+ step = 0;
+ break;
+ }
+
+ /*
+ * Round up here because a higher count results in a higher frequency
+ * and so a smaller period.
+ */
+ count = DIV_ROUND_UP((u32)MC33XS2410_PWM_MAX_PERIOD(step), (u32)period);
+ return FIELD_PREP(MC33XS2410_PWM_FREQ_STEP, step) |
+ FIELD_PREP(MC33XS2410_PWM_FREQ_COUNT, count - 1);
+}
+
+static u64 mc33xs2410_pwm_get_period(u8 reg)
+{
+ u32 doubled_freq, code, doubled_steps;
+
+ /*
+ * steps:
+ * - 0 = 0.5Hz
+ * - 1 = 2Hz
+ * - 2 = 8Hz
+ * - 3 = 32Hz
+ * frequency = (code + 1) x steps.
+ *
+ * To avoid losing precision in case steps value is zero, scale the
+ * steps value for now by two and keep it in mind when calculating the
+ * period that the frequency had been doubled.
+ */
+ doubled_steps = 1 << (FIELD_GET(MC33XS2410_PWM_FREQ_STEP, reg) * 2);
+ code = FIELD_GET(MC33XS2410_PWM_FREQ_COUNT, reg);
+ doubled_freq = (code + 1) * doubled_steps;
+
+ /* Convert frequency to period, considering the doubled frequency. */
+ return DIV_ROUND_UP(2 * NSEC_PER_SEC, doubled_freq);
+}
+
+/*
+ * The hardware cannot generate a 0% relative duty cycle for normal and inversed
+ * polarity. For normal polarity, the channel must be disabled, the device then
+ * emits a constant low signal.
+ * For inverted polarity, the channel must be enabled, the polarity must be set
+ * to normal and the relative duty cylce must be set to 100%. The device then
+ * emits a constant high signal.
+ */
+static int mc33xs2410_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct spi_device *spi = pwmchip_get_drvdata(chip);
+ u8 reg[4] = {
+ MC33XS2410_PWM_FREQ(pwm->hwpwm + 1),
+ MC33XS2410_PWM_DC(pwm->hwpwm + 1),
+ MC33XS2410_PWM_CTRL1,
+ MC33XS2410_PWM_CTRL3
+ };
+ u64 period, duty_cycle;
+ int ret, rel_dc;
+ u16 rd_val[2];
+ u8 wr_val[4];
+ u8 mask;
+
+ period = min(state->period, MC33XS2410_PWM_MAX_PERIOD(0));
+ if (period < MC33XS2410_PWM_MIN_PERIOD)
+ return -EINVAL;
+
+ ret = mc33xs2410_read_regs(spi, &reg[2], MC33XS2410_FRAME_IN_DATA_RD, rd_val, 2);
+ if (ret < 0)
+ return ret;
+
+ /* Frequency */
+ wr_val[0] = mc33xs2410_pwm_get_freq(period);
+ /* Continue calculations with the possibly truncated period */
+ period = mc33xs2410_pwm_get_period(wr_val[0]);
+
+ /* Duty cycle */
+ duty_cycle = min(period, state->duty_cycle);
+ rel_dc = div64_u64(duty_cycle * 256, period) - 1;
+ if (rel_dc >= 0)
+ wr_val[1] = rel_dc;
+ else if (state->polarity == PWM_POLARITY_NORMAL)
+ wr_val[1] = 0;
+ else
+ wr_val[1] = 255;
+
+ /* Polarity */
+ mask = MC33XS2410_PWM_CTRL1_POL_INV(pwm->hwpwm + 1);
+ if (state->polarity == PWM_POLARITY_INVERSED && rel_dc >= 0)
+ wr_val[2] = rd_val[0] | mask;
+ else
+ wr_val[2] = rd_val[0] & ~mask;
+
+ /* Enable */
+ mask = MC33XS2410_PWM_CTRL3_EN(pwm->hwpwm + 1);
+ if (state->enabled &&
+ !(state->polarity == PWM_POLARITY_NORMAL && rel_dc < 0))
+ wr_val[3] = rd_val[1] | mask;
+ else
+ wr_val[3] = rd_val[1] & ~mask;
+
+ return mc33xs2410_write_regs(spi, reg, wr_val, 4);
+}
+
+static int mc33xs2410_pwm_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct spi_device *spi = pwmchip_get_drvdata(chip);
+ u8 reg[4] = {
+ MC33XS2410_PWM_FREQ(pwm->hwpwm + 1),
+ MC33XS2410_PWM_DC(pwm->hwpwm + 1),
+ MC33XS2410_PWM_CTRL1,
+ MC33XS2410_PWM_CTRL3,
+ };
+ u16 val[4];
+ int ret;
+
+ ret = mc33xs2410_read_regs(spi, reg, MC33XS2410_FRAME_IN_DATA_RD, val,
+ ARRAY_SIZE(reg));
+ if (ret < 0)
+ return ret;
+
+ state->period = mc33xs2410_pwm_get_period(val[0]);
+ state->polarity = (val[2] & MC33XS2410_PWM_CTRL1_POL_INV(pwm->hwpwm + 1)) ?
+ PWM_POLARITY_INVERSED : PWM_POLARITY_NORMAL;
+ state->enabled = !!(val[3] & MC33XS2410_PWM_CTRL3_EN(pwm->hwpwm + 1));
+ state->duty_cycle = DIV_ROUND_UP_ULL((val[1] + 1) * state->period, 256);
+
+ return 0;
+}
+
+static const struct pwm_ops mc33xs2410_pwm_ops = {
+ .apply = mc33xs2410_pwm_apply,
+ .get_state = mc33xs2410_pwm_get_state,
+};
+
+static int mc33xs2410_reset(struct device *dev)
+{
+ struct gpio_desc *reset_gpio;
+
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR_OR_NULL(reset_gpio))
+ return PTR_ERR_OR_ZERO(reset_gpio);
+
+ /* Wake-up time */
+ fsleep(10000);
+
+ return 0;
+}
+
+static int mc33xs2410_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct pwm_chip *chip;
+ int ret;
+
+ chip = devm_pwmchip_alloc(dev, 4, 0);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ spi->bits_per_word = 16;
+ spi->mode |= SPI_CS_WORD;
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return ret;
+
+ pwmchip_set_drvdata(chip, spi);
+ chip->ops = &mc33xs2410_pwm_ops;
+
+ /*
+ * Deasserts the reset of the device. Shouldn't change the output signal
+ * if the device was setup prior to probing.
+ */
+ ret = mc33xs2410_reset(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * Disable watchdog and keep in mind that the watchdog won't trigger a
+ * reset of the machine when running into an timeout, instead the
+ * control over the outputs is handed over to the INx input logic
+ * signals of the device. Disabling it here just deactivates this
+ * feature until a proper solution is found.
+ */
+ ret = mc33xs2410_write_reg(spi, MC33XS2410_WDT, 0x0);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to disable watchdog\n");
+
+ /* Transition to normal mode */
+ ret = mc33xs2410_modify_reg(spi, MC33XS2410_GLB_CTRL,
+ MC33XS2410_GLB_CTRL_MODE,
+ MC33XS2410_GLB_CTRL_MODE_NORMAL);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to transition to normal mode\n");
+
+ ret = devm_pwmchip_add(dev, chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to add pwm chip\n");
+
+ return 0;
+}
+
+static const struct spi_device_id mc33xs2410_spi_id[] = {
+ { "mc33xs2410" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, mc33xs2410_spi_id);
+
+static const struct of_device_id mc33xs2410_of_match[] = {
+ { .compatible = "nxp,mc33xs2410" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mc33xs2410_of_match);
+
+static struct spi_driver mc33xs2410_driver = {
+ .driver = {
+ .name = "mc33xs2410-pwm",
+ .of_match_table = mc33xs2410_of_match,
+ },
+ .probe = mc33xs2410_probe,
+ .id_table = mc33xs2410_spi_id,
+};
+module_spi_driver(mc33xs2410_driver);
+
+MODULE_DESCRIPTION("NXP MC33XS2410 high-side switch driver");
+MODULE_AUTHOR("Dimitri Fedrau <dimitri.fedrau@liebherr.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 98e6c1533312..8c6bf3d49753 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -6,7 +6,7 @@
* PWM output is achieved by calculating a clock that permits calculating
* two periods (low and high). The counter then has to be set to switch after
* N cycles for the first half period.
- * The hardware has no "polarity" setting. This driver reverses the period
+ * Partly the hardware has no "polarity" setting. This driver reverses the period
* cycles (the low length is inverted with the high length) for
* PWM_POLARITY_INVERSED. This means that .get_state cannot read the polarity
* from the hardware.
@@ -56,6 +56,10 @@
#define MISC_B_CLK_SEL_SHIFT 6
#define MISC_A_CLK_SEL_SHIFT 4
#define MISC_CLK_SEL_MASK 0x3
+#define MISC_B_CONSTANT_EN BIT(29)
+#define MISC_A_CONSTANT_EN BIT(28)
+#define MISC_B_INVERT_EN BIT(27)
+#define MISC_A_INVERT_EN BIT(26)
#define MISC_B_EN BIT(1)
#define MISC_A_EN BIT(0)
@@ -68,6 +72,8 @@ static struct meson_pwm_channel_data {
u8 clk_div_shift;
u8 clk_en_shift;
u32 pwm_en_mask;
+ u32 const_en_mask;
+ u32 inv_en_mask;
} meson_pwm_per_channel_data[MESON_NUM_PWMS] = {
{
.reg_offset = REG_PWM_A,
@@ -75,6 +81,8 @@ static struct meson_pwm_channel_data {
.clk_div_shift = MISC_A_CLK_DIV_SHIFT,
.clk_en_shift = MISC_A_CLK_EN_SHIFT,
.pwm_en_mask = MISC_A_EN,
+ .const_en_mask = MISC_A_CONSTANT_EN,
+ .inv_en_mask = MISC_A_INVERT_EN,
},
{
.reg_offset = REG_PWM_B,
@@ -82,6 +90,8 @@ static struct meson_pwm_channel_data {
.clk_div_shift = MISC_B_CLK_DIV_SHIFT,
.clk_en_shift = MISC_B_CLK_EN_SHIFT,
.pwm_en_mask = MISC_B_EN,
+ .const_en_mask = MISC_B_CONSTANT_EN,
+ .inv_en_mask = MISC_B_INVERT_EN,
}
};
@@ -89,6 +99,8 @@ struct meson_pwm_channel {
unsigned long rate;
unsigned int hi;
unsigned int lo;
+ bool constant;
+ bool inverted;
struct clk_mux mux;
struct clk_divider div;
@@ -99,6 +111,8 @@ struct meson_pwm_channel {
struct meson_pwm_data {
const char *const parent_names[MESON_NUM_MUX_PARENTS];
int (*channels_init)(struct pwm_chip *chip);
+ bool has_constant;
+ bool has_polarity;
};
struct meson_pwm {
@@ -160,7 +174,7 @@ static int meson_pwm_calc(struct pwm_chip *chip, struct pwm_device *pwm,
* Fixing this needs some care however as some machines might rely on
* this.
*/
- if (state->polarity == PWM_POLARITY_INVERSED)
+ if (state->polarity == PWM_POLARITY_INVERSED && !meson->data->has_polarity)
duty = period - duty;
freq = div64_u64(NSEC_PER_SEC * 0xffffULL, period);
@@ -187,9 +201,11 @@ static int meson_pwm_calc(struct pwm_chip *chip, struct pwm_device *pwm,
if (duty == period) {
channel->hi = cnt;
channel->lo = 0;
+ channel->constant = true;
} else if (duty == 0) {
channel->hi = 0;
channel->lo = cnt;
+ channel->constant = true;
} else {
duty_cnt = mul_u64_u64_div_u64(fin_freq, duty, NSEC_PER_SEC);
@@ -197,6 +213,7 @@ static int meson_pwm_calc(struct pwm_chip *chip, struct pwm_device *pwm,
channel->hi = duty_cnt;
channel->lo = cnt - duty_cnt;
+ channel->constant = false;
}
channel->rate = fin_freq;
@@ -227,6 +244,19 @@ static void meson_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
value = readl(meson->base + REG_MISC_AB);
value |= channel_data->pwm_en_mask;
+
+ if (meson->data->has_constant) {
+ value &= ~channel_data->const_en_mask;
+ if (channel->constant)
+ value |= channel_data->const_en_mask;
+ }
+
+ if (meson->data->has_polarity) {
+ value &= ~channel_data->inv_en_mask;
+ if (channel->inverted)
+ value |= channel_data->inv_en_mask;
+ }
+
writel(value, meson->base + REG_MISC_AB);
spin_unlock_irqrestore(&meson->lock, flags);
@@ -235,13 +265,24 @@ static void meson_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
static void meson_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct meson_pwm *meson = to_meson_pwm(chip);
+ struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
+ struct meson_pwm_channel_data *channel_data;
unsigned long flags;
u32 value;
+ channel_data = &meson_pwm_per_channel_data[pwm->hwpwm];
+
spin_lock_irqsave(&meson->lock, flags);
value = readl(meson->base + REG_MISC_AB);
- value &= ~meson_pwm_per_channel_data[pwm->hwpwm].pwm_en_mask;
+ value &= ~channel_data->pwm_en_mask;
+
+ if (meson->data->has_polarity) {
+ value &= ~channel_data->inv_en_mask;
+ if (channel->inverted)
+ value |= channel_data->inv_en_mask;
+ }
+
writel(value, meson->base + REG_MISC_AB);
spin_unlock_irqrestore(&meson->lock, flags);
@@ -254,10 +295,12 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
int err = 0;
+ channel->inverted = (state->polarity == PWM_POLARITY_INVERSED);
+
if (!state->enabled) {
- if (state->polarity == PWM_POLARITY_INVERSED) {
+ if (channel->inverted && !meson->data->has_polarity) {
/*
- * This IP block revision doesn't have an "always high"
+ * Some of IP block revisions don't have an "always high"
* setting which we can use for "inverted disabled".
* Instead we achieve this by setting mux parent with
* highest rate and minimum divider value, resulting
@@ -271,6 +314,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
channel->rate = ULONG_MAX;
channel->hi = ~0;
channel->lo = 0;
+ channel->constant = true;
meson_pwm_enable(chip, pwm);
} else {
@@ -287,21 +331,9 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
-static u64 meson_pwm_cnt_to_ns(struct pwm_chip *chip, struct pwm_device *pwm,
- u32 cnt)
+static u64 meson_pwm_cnt_to_ns(unsigned long fin_freq, u32 cnt)
{
- struct meson_pwm *meson = to_meson_pwm(chip);
- struct meson_pwm_channel *channel;
- unsigned long fin_freq;
-
- /* to_meson_pwm() can only be used after .get_state() is called */
- channel = &meson->channels[pwm->hwpwm];
-
- fin_freq = clk_get_rate(channel->clk);
- if (fin_freq == 0)
- return 0;
-
- return div64_ul(NSEC_PER_SEC * (u64)cnt, fin_freq);
+ return fin_freq ? div64_ul(NSEC_PER_SEC * (u64)cnt, fin_freq) : 0;
}
static int meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -309,23 +341,27 @@ static int meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct meson_pwm *meson = to_meson_pwm(chip);
struct meson_pwm_channel_data *channel_data;
- struct meson_pwm_channel *channel;
+ unsigned long fin_freq;
+ unsigned int hi, lo;
u32 value;
- channel = &meson->channels[pwm->hwpwm];
channel_data = &meson_pwm_per_channel_data[pwm->hwpwm];
+ fin_freq = clk_get_rate(meson->channels[pwm->hwpwm].clk);
value = readl(meson->base + REG_MISC_AB);
state->enabled = value & channel_data->pwm_en_mask;
- value = readl(meson->base + channel_data->reg_offset);
- channel->lo = FIELD_GET(PWM_LOW_MASK, value);
- channel->hi = FIELD_GET(PWM_HIGH_MASK, value);
+ if (meson->data->has_polarity && (value & channel_data->inv_en_mask))
+ state->polarity = PWM_POLARITY_INVERSED;
+ else
+ state->polarity = PWM_POLARITY_NORMAL;
- state->period = meson_pwm_cnt_to_ns(chip, pwm, channel->lo + channel->hi);
- state->duty_cycle = meson_pwm_cnt_to_ns(chip, pwm, channel->hi);
+ value = readl(meson->base + channel_data->reg_offset);
+ lo = FIELD_GET(PWM_LOW_MASK, value);
+ hi = FIELD_GET(PWM_HIGH_MASK, value);
- state->polarity = PWM_POLARITY_NORMAL;
+ state->period = meson_pwm_cnt_to_ns(fin_freq, lo + hi);
+ state->duty_cycle = meson_pwm_cnt_to_ns(fin_freq, hi);
return 0;
}
@@ -508,29 +544,52 @@ static const struct meson_pwm_data pwm_gxbb_ao_data = {
static const struct meson_pwm_data pwm_axg_ee_data = {
.parent_names = { "xtal", "fclk_div5", "fclk_div4", "fclk_div3" },
.channels_init = meson_pwm_init_channels_meson8b_legacy,
+ .has_constant = true,
+ .has_polarity = true,
};
static const struct meson_pwm_data pwm_axg_ao_data = {
.parent_names = { "xtal", "axg_ao_clk81", "fclk_div4", "fclk_div5" },
.channels_init = meson_pwm_init_channels_meson8b_legacy,
+ .has_constant = true,
+ .has_polarity = true,
+};
+
+static const struct meson_pwm_data pwm_g12a_ee_data = {
+ .parent_names = { "xtal", NULL, "fclk_div4", "fclk_div3" },
+ .channels_init = meson_pwm_init_channels_meson8b_legacy,
+ .has_constant = true,
+ .has_polarity = true,
};
static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
.parent_names = { "xtal", "g12a_ao_clk81", "fclk_div4", "fclk_div5" },
.channels_init = meson_pwm_init_channels_meson8b_legacy,
+ .has_constant = true,
+ .has_polarity = true,
};
static const struct meson_pwm_data pwm_g12a_ao_cd_data = {
.parent_names = { "xtal", "g12a_ao_clk81", NULL, NULL },
.channels_init = meson_pwm_init_channels_meson8b_legacy,
+ .has_constant = true,
+ .has_polarity = true,
};
static const struct meson_pwm_data pwm_meson8_v2_data = {
.channels_init = meson_pwm_init_channels_meson8b_v2,
};
+static const struct meson_pwm_data pwm_meson_axg_v2_data = {
+ .channels_init = meson_pwm_init_channels_meson8b_v2,
+ .has_constant = true,
+ .has_polarity = true,
+};
+
static const struct meson_pwm_data pwm_s4_data = {
.channels_init = meson_pwm_init_channels_s4,
+ .has_constant = true,
+ .has_polarity = true,
};
static const struct of_device_id meson_pwm_matches[] = {
@@ -538,6 +597,14 @@ static const struct of_device_id meson_pwm_matches[] = {
.compatible = "amlogic,meson8-pwm-v2",
.data = &pwm_meson8_v2_data
},
+ {
+ .compatible = "amlogic,meson-axg-pwm-v2",
+ .data = &pwm_meson_axg_v2_data
+ },
+ {
+ .compatible = "amlogic,meson-g12-pwm-v2",
+ .data = &pwm_meson_axg_v2_data
+ },
/* The following compatibles are obsolete */
{
.compatible = "amlogic,meson8b-pwm",
@@ -561,7 +628,7 @@ static const struct of_device_id meson_pwm_matches[] = {
},
{
.compatible = "amlogic,meson-g12a-ee-pwm",
- .data = &pwm_meson8b_data
+ .data = &pwm_g12a_ee_data
},
{
.compatible = "amlogic,meson-g12a-ao-pwm-ab",
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 5162f3991644..eb03ccd5b688 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -263,12 +263,14 @@ static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset)
return pca9685_pwm_get_duty(chip, offset) != 0;
}
-static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset,
- int value)
+static int pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset,
+ int value)
{
struct pwm_chip *chip = gpiochip_get_data(gpio);
pca9685_pwm_set_duty(chip, offset, value ? PCA9685_COUNTER_RANGE : 0);
+
+ return 0;
}
static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
@@ -321,7 +323,7 @@ static int pca9685_pwm_gpio_probe(struct pwm_chip *chip)
pca->gpio.direction_input = pca9685_pwm_gpio_direction_input;
pca->gpio.direction_output = pca9685_pwm_gpio_direction_output;
pca->gpio.get = pca9685_pwm_gpio_get;
- pca->gpio.set = pca9685_pwm_gpio_set;
+ pca->gpio.set_rv = pca9685_pwm_gpio_set;
pca->gpio.base = -1;
pca->gpio.ngpio = PCA9685_MAXCHAN;
pca->gpio.can_sleep = true;
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index 430bd6a709e9..8a4a3d2df30d 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -160,24 +160,24 @@ static int pwm_probe(struct platform_device *pdev)
const struct platform_device_id *id = platform_get_device_id(pdev);
struct pwm_chip *chip;
struct pxa_pwm_chip *pc;
+ struct device *dev = &pdev->dev;
int ret = 0;
if (IS_ENABLED(CONFIG_OF) && id == NULL)
- id = of_device_get_match_data(&pdev->dev);
+ id = of_device_get_match_data(dev);
if (id == NULL)
return -EINVAL;
- chip = devm_pwmchip_alloc(&pdev->dev,
- (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1,
+ chip = devm_pwmchip_alloc(dev, (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1,
sizeof(*pc));
if (IS_ERR(chip))
return PTR_ERR(chip);
pc = to_pxa_pwm_chip(chip);
- pc->clk = devm_clk_get(&pdev->dev, NULL);
+ pc->clk = devm_clk_get(dev, NULL);
if (IS_ERR(pc->clk))
- return PTR_ERR(pc->clk);
+ return dev_err_probe(dev, PTR_ERR(pc->clk), "Failed to get clock\n");
chip->ops = &pxa_pwm_ops;
@@ -188,11 +188,9 @@ static int pwm_probe(struct platform_device *pdev)
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
- ret = devm_pwmchip_add(&pdev->dev, chip);
- if (ret < 0) {
- dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
- return ret;
- }
+ ret = devm_pwmchip_add(dev, chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "pwmchip_add() failed\n");
return 0;
}
diff --git a/drivers/pwm/pwm-rzg2l-gpt.c b/drivers/pwm/pwm-rzg2l-gpt.c
new file mode 100644
index 000000000000..360c8bf3b190
--- /dev/null
+++ b/drivers/pwm/pwm-rzg2l-gpt.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2L General PWM Timer (GPT) driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ *
+ * Hardware manual for this IP can be found here
+ * https://www.renesas.com/eu/en/document/mah/rzg2l-group-rzg2lc-group-users-manual-hardware-0?language=en
+ *
+ * Limitations:
+ * - Counter must be stopped before modifying Mode and Prescaler.
+ * - When PWM is disabled, the output is driven to inactive.
+ * - While the hardware supports both polarities, the driver (for now)
+ * only handles normal polarity.
+ * - General PWM Timer (GPT) has 8 HW channels for PWM operations and
+ * each HW channel have 2 IOs.
+ * - Each IO is modelled as an independent PWM channel.
+ * - When both channels are used, disabling the channel on one stops the
+ * other.
+ * - When both channels are used, the period of both IOs in the HW channel
+ * must be same (for now).
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/reset.h>
+#include <linux/time.h>
+#include <linux/units.h>
+
+#define RZG2L_GET_CH(hwpwm) ((hwpwm) / 2)
+#define RZG2L_GET_CH_OFFS(ch) (0x100 * (ch))
+
+#define RZG2L_GTCR(ch) (0x2c + RZG2L_GET_CH_OFFS(ch))
+#define RZG2L_GTUDDTYC(ch) (0x30 + RZG2L_GET_CH_OFFS(ch))
+#define RZG2L_GTIOR(ch) (0x34 + RZG2L_GET_CH_OFFS(ch))
+#define RZG2L_GTBER(ch) (0x40 + RZG2L_GET_CH_OFFS(ch))
+#define RZG2L_GTCNT(ch) (0x48 + RZG2L_GET_CH_OFFS(ch))
+#define RZG2L_GTCCR(ch, sub_ch) (0x4c + RZG2L_GET_CH_OFFS(ch) + 4 * (sub_ch))
+#define RZG2L_GTPR(ch) (0x64 + RZG2L_GET_CH_OFFS(ch))
+
+#define RZG2L_GTCR_CST BIT(0)
+#define RZG2L_GTCR_MD GENMASK(18, 16)
+#define RZG2L_GTCR_TPCS GENMASK(26, 24)
+
+#define RZG2L_GTCR_MD_SAW_WAVE_PWM_MODE FIELD_PREP(RZG2L_GTCR_MD, 0)
+
+#define RZG2L_GTUDDTYC_UP BIT(0)
+#define RZG2L_GTUDDTYC_UDF BIT(1)
+#define RZG2L_GTUDDTYC_UP_COUNTING (RZG2L_GTUDDTYC_UP | RZG2L_GTUDDTYC_UDF)
+
+#define RZG2L_GTIOR_GTIOA GENMASK(4, 0)
+#define RZG2L_GTIOR_GTIOB GENMASK(20, 16)
+#define RZG2L_GTIOR_GTIOx(sub_ch) ((sub_ch) ? RZG2L_GTIOR_GTIOB : RZG2L_GTIOR_GTIOA)
+#define RZG2L_GTIOR_OAE BIT(8)
+#define RZG2L_GTIOR_OBE BIT(24)
+#define RZG2L_GTIOR_OxE(sub_ch) ((sub_ch) ? RZG2L_GTIOR_OBE : RZG2L_GTIOR_OAE)
+
+#define RZG2L_INIT_OUT_HI_OUT_HI_END_TOGGLE 0x1b
+#define RZG2L_GTIOR_GTIOA_OUT_HI_END_TOGGLE_CMP_MATCH \
+ (RZG2L_INIT_OUT_HI_OUT_HI_END_TOGGLE | RZG2L_GTIOR_OAE)
+#define RZG2L_GTIOR_GTIOB_OUT_HI_END_TOGGLE_CMP_MATCH \
+ (FIELD_PREP(RZG2L_GTIOR_GTIOB, RZG2L_INIT_OUT_HI_OUT_HI_END_TOGGLE) | RZG2L_GTIOR_OBE)
+
+#define RZG2L_GTIOR_GTIOx_OUT_HI_END_TOGGLE_CMP_MATCH(sub_ch) \
+ ((sub_ch) ? RZG2L_GTIOR_GTIOB_OUT_HI_END_TOGGLE_CMP_MATCH : \
+ RZG2L_GTIOR_GTIOA_OUT_HI_END_TOGGLE_CMP_MATCH)
+
+#define RZG2L_MAX_HW_CHANNELS 8
+#define RZG2L_CHANNELS_PER_IO 2
+#define RZG2L_MAX_PWM_CHANNELS (RZG2L_MAX_HW_CHANNELS * RZG2L_CHANNELS_PER_IO)
+#define RZG2L_MAX_SCALE_FACTOR 1024
+#define RZG2L_MAX_TICKS ((u64)U32_MAX * RZG2L_MAX_SCALE_FACTOR)
+
+struct rzg2l_gpt_chip {
+ void __iomem *mmio;
+ struct mutex lock; /* lock to protect shared channel resources */
+ unsigned long rate_khz;
+ u32 period_ticks[RZG2L_MAX_HW_CHANNELS];
+ u32 channel_request_count[RZG2L_MAX_HW_CHANNELS];
+ u32 channel_enable_count[RZG2L_MAX_HW_CHANNELS];
+};
+
+static inline struct rzg2l_gpt_chip *to_rzg2l_gpt_chip(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
+
+static inline unsigned int rzg2l_gpt_subchannel(unsigned int hwpwm)
+{
+ return hwpwm & 0x1;
+}
+
+static void rzg2l_gpt_write(struct rzg2l_gpt_chip *rzg2l_gpt, u32 reg, u32 data)
+{
+ writel(data, rzg2l_gpt->mmio + reg);
+}
+
+static u32 rzg2l_gpt_read(struct rzg2l_gpt_chip *rzg2l_gpt, u32 reg)
+{
+ return readl(rzg2l_gpt->mmio + reg);
+}
+
+static void rzg2l_gpt_modify(struct rzg2l_gpt_chip *rzg2l_gpt, u32 reg, u32 clr,
+ u32 set)
+{
+ rzg2l_gpt_write(rzg2l_gpt, reg,
+ (rzg2l_gpt_read(rzg2l_gpt, reg) & ~clr) | set);
+}
+
+static u8 rzg2l_gpt_calculate_prescale(struct rzg2l_gpt_chip *rzg2l_gpt,
+ u64 period_ticks)
+{
+ u32 prescaled_period_ticks;
+ u8 prescale;
+
+ prescaled_period_ticks = period_ticks >> 32;
+ if (prescaled_period_ticks >= 256)
+ prescale = 5;
+ else
+ prescale = (fls(prescaled_period_ticks) + 1) / 2;
+
+ return prescale;
+}
+
+static int rzg2l_gpt_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct rzg2l_gpt_chip *rzg2l_gpt = to_rzg2l_gpt_chip(chip);
+ u32 ch = RZG2L_GET_CH(pwm->hwpwm);
+
+ guard(mutex)(&rzg2l_gpt->lock);
+ rzg2l_gpt->channel_request_count[ch]++;
+
+ return 0;
+}
+
+static void rzg2l_gpt_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct rzg2l_gpt_chip *rzg2l_gpt = to_rzg2l_gpt_chip(chip);
+ u32 ch = RZG2L_GET_CH(pwm->hwpwm);
+
+ guard(mutex)(&rzg2l_gpt->lock);
+ rzg2l_gpt->channel_request_count[ch]--;
+}
+
+static bool rzg2l_gpt_is_ch_enabled(struct rzg2l_gpt_chip *rzg2l_gpt, u8 hwpwm)
+{
+ u8 ch = RZG2L_GET_CH(hwpwm);
+ u32 val;
+
+ val = rzg2l_gpt_read(rzg2l_gpt, RZG2L_GTCR(ch));
+ if (!(val & RZG2L_GTCR_CST))
+ return false;
+
+ val = rzg2l_gpt_read(rzg2l_gpt, RZG2L_GTIOR(ch));
+
+ return val & RZG2L_GTIOR_OxE(rzg2l_gpt_subchannel(hwpwm));
+}
+
+/* Caller holds the lock while calling rzg2l_gpt_enable() */
+static void rzg2l_gpt_enable(struct rzg2l_gpt_chip *rzg2l_gpt,
+ struct pwm_device *pwm)
+{
+ u8 sub_ch = rzg2l_gpt_subchannel(pwm->hwpwm);
+ u32 val = RZG2L_GTIOR_GTIOx(sub_ch) | RZG2L_GTIOR_OxE(sub_ch);
+ u8 ch = RZG2L_GET_CH(pwm->hwpwm);
+
+ /* Enable pin output */
+ rzg2l_gpt_modify(rzg2l_gpt, RZG2L_GTIOR(ch), val,
+ RZG2L_GTIOR_GTIOx_OUT_HI_END_TOGGLE_CMP_MATCH(sub_ch));
+
+ if (!rzg2l_gpt->channel_enable_count[ch])
+ rzg2l_gpt_modify(rzg2l_gpt, RZG2L_GTCR(ch), 0, RZG2L_GTCR_CST);
+
+ rzg2l_gpt->channel_enable_count[ch]++;
+}
+
+/* Caller holds the lock while calling rzg2l_gpt_disable() */
+static void rzg2l_gpt_disable(struct rzg2l_gpt_chip *rzg2l_gpt,
+ struct pwm_device *pwm)
+{
+ u8 sub_ch = rzg2l_gpt_subchannel(pwm->hwpwm);
+ u8 ch = RZG2L_GET_CH(pwm->hwpwm);
+
+ /* Stop count, Output low on GTIOCx pin when counting stops */
+ rzg2l_gpt->channel_enable_count[ch]--;
+
+ if (!rzg2l_gpt->channel_enable_count[ch])
+ rzg2l_gpt_modify(rzg2l_gpt, RZG2L_GTCR(ch), RZG2L_GTCR_CST, 0);
+
+ /* Disable pin output */
+ rzg2l_gpt_modify(rzg2l_gpt, RZG2L_GTIOR(ch), RZG2L_GTIOR_OxE(sub_ch), 0);
+}
+
+static u64 rzg2l_gpt_calculate_period_or_duty(struct rzg2l_gpt_chip *rzg2l_gpt,
+ u32 val, u8 prescale)
+{
+ u64 tmp;
+
+ /*
+ * The calculation doesn't overflow an u64 because prescale ≤ 5 and so
+ * tmp = val << (2 * prescale) * USEC_PER_SEC
+ * < 2^32 * 2^10 * 10^6
+ * < 2^32 * 2^10 * 2^20
+ * = 2^62
+ */
+ tmp = (u64)val << (2 * prescale);
+ tmp *= USEC_PER_SEC;
+
+ return DIV64_U64_ROUND_UP(tmp, rzg2l_gpt->rate_khz);
+}
+
+static int rzg2l_gpt_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct rzg2l_gpt_chip *rzg2l_gpt = to_rzg2l_gpt_chip(chip);
+
+ state->enabled = rzg2l_gpt_is_ch_enabled(rzg2l_gpt, pwm->hwpwm);
+ if (state->enabled) {
+ u32 sub_ch = rzg2l_gpt_subchannel(pwm->hwpwm);
+ u32 ch = RZG2L_GET_CH(pwm->hwpwm);
+ u8 prescale;
+ u32 val;
+
+ val = rzg2l_gpt_read(rzg2l_gpt, RZG2L_GTCR(ch));
+ prescale = FIELD_GET(RZG2L_GTCR_TPCS, val);
+
+ val = rzg2l_gpt_read(rzg2l_gpt, RZG2L_GTPR(ch));
+ state->period = rzg2l_gpt_calculate_period_or_duty(rzg2l_gpt, val, prescale);
+
+ val = rzg2l_gpt_read(rzg2l_gpt, RZG2L_GTCCR(ch, sub_ch));
+ state->duty_cycle = rzg2l_gpt_calculate_period_or_duty(rzg2l_gpt, val, prescale);
+ if (state->duty_cycle > state->period)
+ state->duty_cycle = state->period;
+ }
+
+ state->polarity = PWM_POLARITY_NORMAL;
+
+ return 0;
+}
+
+static u32 rzg2l_gpt_calculate_pv_or_dc(u64 period_or_duty_cycle, u8 prescale)
+{
+ return min_t(u64, DIV_ROUND_DOWN_ULL(period_or_duty_cycle, 1 << (2 * prescale)),
+ U32_MAX);
+}
+
+/* Caller holds the lock while calling rzg2l_gpt_config() */
+static int rzg2l_gpt_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct rzg2l_gpt_chip *rzg2l_gpt = to_rzg2l_gpt_chip(chip);
+ u8 sub_ch = rzg2l_gpt_subchannel(pwm->hwpwm);
+ u8 ch = RZG2L_GET_CH(pwm->hwpwm);
+ u64 period_ticks, duty_ticks;
+ unsigned long pv, dc;
+ u8 prescale;
+
+ /* Limit period/duty cycle to max value supported by the HW */
+ period_ticks = mul_u64_u64_div_u64(state->period, rzg2l_gpt->rate_khz, USEC_PER_SEC);
+ if (period_ticks > RZG2L_MAX_TICKS)
+ period_ticks = RZG2L_MAX_TICKS;
+ /*
+ * GPT counter is shared by the two IOs of a single channel, so
+ * prescale and period can NOT be modified when there are multiple IOs
+ * in use with different settings.
+ */
+ if (rzg2l_gpt->channel_request_count[ch] > 1) {
+ if (period_ticks < rzg2l_gpt->period_ticks[ch])
+ return -EBUSY;
+ else
+ period_ticks = rzg2l_gpt->period_ticks[ch];
+ }
+
+ prescale = rzg2l_gpt_calculate_prescale(rzg2l_gpt, period_ticks);
+ pv = rzg2l_gpt_calculate_pv_or_dc(period_ticks, prescale);
+
+ duty_ticks = mul_u64_u64_div_u64(state->duty_cycle, rzg2l_gpt->rate_khz, USEC_PER_SEC);
+ if (duty_ticks > period_ticks)
+ duty_ticks = period_ticks;
+ dc = rzg2l_gpt_calculate_pv_or_dc(duty_ticks, prescale);
+
+ /*
+ * GPT counter is shared by multiple channels, we cache the period ticks
+ * from the first enabled channel and use the same value for both
+ * channels.
+ */
+ rzg2l_gpt->period_ticks[ch] = period_ticks;
+
+ /*
+ * Counter must be stopped before modifying mode, prescaler, timer
+ * counter and buffer enable registers. These registers are shared
+ * between both channels. So allow updating these registers only for the
+ * first enabled channel.
+ */
+ if (rzg2l_gpt->channel_enable_count[ch] <= 1) {
+ rzg2l_gpt_modify(rzg2l_gpt, RZG2L_GTCR(ch), RZG2L_GTCR_CST, 0);
+
+ /* GPT set operating mode (saw-wave up-counting) */
+ rzg2l_gpt_modify(rzg2l_gpt, RZG2L_GTCR(ch), RZG2L_GTCR_MD,
+ RZG2L_GTCR_MD_SAW_WAVE_PWM_MODE);
+
+ /* Set count direction */
+ rzg2l_gpt_write(rzg2l_gpt, RZG2L_GTUDDTYC(ch), RZG2L_GTUDDTYC_UP_COUNTING);
+
+ /* Select count clock */
+ rzg2l_gpt_modify(rzg2l_gpt, RZG2L_GTCR(ch), RZG2L_GTCR_TPCS,
+ FIELD_PREP(RZG2L_GTCR_TPCS, prescale));
+
+ /* Set period */
+ rzg2l_gpt_write(rzg2l_gpt, RZG2L_GTPR(ch), pv);
+ }
+
+ /* Set duty cycle */
+ rzg2l_gpt_write(rzg2l_gpt, RZG2L_GTCCR(ch, sub_ch), dc);
+
+ if (rzg2l_gpt->channel_enable_count[ch] <= 1) {
+ /* Set initial value for counter */
+ rzg2l_gpt_write(rzg2l_gpt, RZG2L_GTCNT(ch), 0);
+
+ /* Set no buffer operation */
+ rzg2l_gpt_write(rzg2l_gpt, RZG2L_GTBER(ch), 0);
+
+ /* Restart the counter after updating the registers */
+ rzg2l_gpt_modify(rzg2l_gpt, RZG2L_GTCR(ch),
+ RZG2L_GTCR_CST, RZG2L_GTCR_CST);
+ }
+
+ return 0;
+}
+
+static int rzg2l_gpt_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct rzg2l_gpt_chip *rzg2l_gpt = to_rzg2l_gpt_chip(chip);
+ bool enabled = pwm->state.enabled;
+ int ret;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ guard(mutex)(&rzg2l_gpt->lock);
+ if (!state->enabled) {
+ if (enabled)
+ rzg2l_gpt_disable(rzg2l_gpt, pwm);
+
+ return 0;
+ }
+
+ ret = rzg2l_gpt_config(chip, pwm, state);
+ if (!ret && !enabled)
+ rzg2l_gpt_enable(rzg2l_gpt, pwm);
+
+ return ret;
+}
+
+static const struct pwm_ops rzg2l_gpt_ops = {
+ .request = rzg2l_gpt_request,
+ .free = rzg2l_gpt_free,
+ .get_state = rzg2l_gpt_get_state,
+ .apply = rzg2l_gpt_apply,
+};
+
+static int rzg2l_gpt_probe(struct platform_device *pdev)
+{
+ struct rzg2l_gpt_chip *rzg2l_gpt;
+ struct device *dev = &pdev->dev;
+ struct reset_control *rstc;
+ struct pwm_chip *chip;
+ unsigned long rate;
+ struct clk *clk;
+ int ret;
+
+ chip = devm_pwmchip_alloc(dev, RZG2L_MAX_PWM_CHANNELS, sizeof(*rzg2l_gpt));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ rzg2l_gpt = to_rzg2l_gpt_chip(chip);
+
+ rzg2l_gpt->mmio = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rzg2l_gpt->mmio))
+ return PTR_ERR(rzg2l_gpt->mmio);
+
+ rstc = devm_reset_control_get_exclusive_deasserted(dev, NULL);
+ if (IS_ERR(rstc))
+ return dev_err_probe(dev, PTR_ERR(rstc), "Cannot deassert reset control\n");
+
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Cannot get clock\n");
+
+ ret = devm_clk_rate_exclusive_get(dev, clk);
+ if (ret)
+ return ret;
+
+ rate = clk_get_rate(clk);
+ if (!rate)
+ return dev_err_probe(dev, -EINVAL, "The gpt clk rate is 0");
+
+ /*
+ * Refuse clk rates > 1 GHz to prevent overflow later for computing
+ * period and duty cycle.
+ */
+ if (rate > NSEC_PER_SEC)
+ return dev_err_probe(dev, -EINVAL, "The gpt clk rate is > 1GHz");
+
+ /*
+ * Rate is in MHz and is always integer for peripheral clk
+ * 2^32 * 2^10 (prescalar) * 10^6 (rate_khz) < 2^64
+ * So make sure rate is multiple of 1000.
+ */
+ rzg2l_gpt->rate_khz = rate / KILO;
+ if (rzg2l_gpt->rate_khz * KILO != rate)
+ return dev_err_probe(dev, -EINVAL, "Rate is not multiple of 1000");
+
+ mutex_init(&rzg2l_gpt->lock);
+
+ chip->ops = &rzg2l_gpt_ops;
+ ret = devm_pwmchip_add(dev, chip);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add PWM chip\n");
+
+ return 0;
+}
+
+static const struct of_device_id rzg2l_gpt_of_table[] = {
+ { .compatible = "renesas,rzg2l-gpt", },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rzg2l_gpt_of_table);
+
+static struct platform_driver rzg2l_gpt_driver = {
+ .driver = {
+ .name = "pwm-rzg2l-gpt",
+ .of_match_table = rzg2l_gpt_of_table,
+ },
+ .probe = rzg2l_gpt_probe,
+};
+module_platform_driver(rzg2l_gpt_driver);
+
+MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/G2L General PWM Timer (GPT) Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 5832dce8ed9d..4789eafb8bac 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -20,6 +20,7 @@
struct stm32_pwm_lp {
struct clk *clk;
struct regmap *regmap;
+ unsigned int num_cc_chans;
};
static inline struct stm32_pwm_lp *to_stm32_pwm_lp(struct pwm_chip *chip)
@@ -30,13 +31,101 @@ static inline struct stm32_pwm_lp *to_stm32_pwm_lp(struct pwm_chip *chip)
/* STM32 Low-Power Timer is preceded by a configurable power-of-2 prescaler */
#define STM32_LPTIM_MAX_PRESCALER 128
+static int stm32_pwm_lp_update_allowed(struct stm32_pwm_lp *priv, int channel)
+{
+ int ret;
+ u32 ccmr1;
+ unsigned long ccmr;
+
+ /* Only one PWM on this LPTIMER: enable, prescaler and reload value can be changed */
+ if (!priv->num_cc_chans)
+ return true;
+
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CCMR1, &ccmr1);
+ if (ret)
+ return ret;
+ ccmr = ccmr1 & (STM32_LPTIM_CC1E | STM32_LPTIM_CC2E);
+
+ /* More than one channel enabled: enable, prescaler or ARR value can't be changed */
+ if (bitmap_weight(&ccmr, sizeof(u32) * BITS_PER_BYTE) > 1)
+ return false;
+
+ /*
+ * Only one channel is enabled (or none): check status on the other channel, to
+ * report if enable, prescaler or ARR value can be changed.
+ */
+ if (channel)
+ return !(ccmr1 & STM32_LPTIM_CC1E);
+ else
+ return !(ccmr1 & STM32_LPTIM_CC2E);
+}
+
+static int stm32_pwm_lp_compare_channel_apply(struct stm32_pwm_lp *priv, int channel,
+ bool enable, enum pwm_polarity polarity)
+{
+ u32 ccmr1, val, mask;
+ bool reenable;
+ int ret;
+
+ /* No dedicated CC channel: nothing to do */
+ if (!priv->num_cc_chans)
+ return 0;
+
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CCMR1, &ccmr1);
+ if (ret)
+ return ret;
+
+ if (channel) {
+ /* Must disable CC channel (CCxE) to modify polarity (CCxP), then re-enable */
+ reenable = (enable && FIELD_GET(STM32_LPTIM_CC2E, ccmr1)) &&
+ (polarity != FIELD_GET(STM32_LPTIM_CC2P, ccmr1));
+
+ mask = STM32_LPTIM_CC2SEL | STM32_LPTIM_CC2E | STM32_LPTIM_CC2P;
+ val = FIELD_PREP(STM32_LPTIM_CC2P, polarity);
+ val |= FIELD_PREP(STM32_LPTIM_CC2E, enable);
+ } else {
+ reenable = (enable && FIELD_GET(STM32_LPTIM_CC1E, ccmr1)) &&
+ (polarity != FIELD_GET(STM32_LPTIM_CC1P, ccmr1));
+
+ mask = STM32_LPTIM_CC1SEL | STM32_LPTIM_CC1E | STM32_LPTIM_CC1P;
+ val = FIELD_PREP(STM32_LPTIM_CC1P, polarity);
+ val |= FIELD_PREP(STM32_LPTIM_CC1E, enable);
+ }
+
+ if (reenable) {
+ u32 cfgr, presc;
+ unsigned long rate;
+ unsigned int delay_us;
+
+ ret = regmap_update_bits(priv->regmap, STM32_LPTIM_CCMR1,
+ channel ? STM32_LPTIM_CC2E : STM32_LPTIM_CC1E, 0);
+ if (ret)
+ return ret;
+ /*
+ * After a write to the LPTIM_CCMRx register, a new write operation can only be
+ * performed after a delay of at least (PRESC × 3) clock cycles
+ */
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CFGR, &cfgr);
+ if (ret)
+ return ret;
+ presc = FIELD_GET(STM32_LPTIM_PRESC, cfgr);
+ rate = clk_get_rate(priv->clk) >> presc;
+ if (!rate)
+ return -EINVAL;
+ delay_us = 3 * DIV_ROUND_UP(USEC_PER_SEC, rate);
+ usleep_range(delay_us, delay_us * 2);
+ }
+
+ return regmap_update_bits(priv->regmap, STM32_LPTIM_CCMR1, mask, val);
+}
+
static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct stm32_pwm_lp *priv = to_stm32_pwm_lp(chip);
unsigned long long prd, div, dty;
struct pwm_state cstate;
- u32 val, mask, cfgr, presc = 0;
+ u32 arr, val, mask, cfgr, presc = 0;
bool reenable;
int ret;
@@ -45,10 +134,28 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (!state->enabled) {
if (cstate.enabled) {
- /* Disable LP timer */
- ret = regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
+ /* Disable CC channel if any */
+ ret = stm32_pwm_lp_compare_channel_apply(priv, pwm->hwpwm, false,
+ state->polarity);
if (ret)
return ret;
+ ret = regmap_write(priv->regmap, pwm->hwpwm ?
+ STM32_LPTIM_CCR2 : STM32_LPTIM_CMP, 0);
+ if (ret)
+ return ret;
+
+ /* Check if the timer can be disabled */
+ ret = stm32_pwm_lp_update_allowed(priv, pwm->hwpwm);
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ /* Disable LP timer */
+ ret = regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
+ if (ret)
+ return ret;
+ }
+
/* disable clock to PWM counter */
clk_disable(priv->clk);
}
@@ -79,6 +186,23 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
dty = prd * state->duty_cycle;
do_div(dty, state->period);
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CFGR, &cfgr);
+ if (ret)
+ return ret;
+
+ /*
+ * When there are several channels, they share the same prescaler and reload value.
+ * Check if this can be changed, or the values are the same for all channels.
+ */
+ if (!stm32_pwm_lp_update_allowed(priv, pwm->hwpwm)) {
+ ret = regmap_read(priv->regmap, STM32_LPTIM_ARR, &arr);
+ if (ret)
+ return ret;
+
+ if ((FIELD_GET(STM32_LPTIM_PRESC, cfgr) != presc) || (arr != prd - 1))
+ return -EBUSY;
+ }
+
if (!cstate.enabled) {
/* enable clock to drive PWM counter */
ret = clk_enable(priv->clk);
@@ -86,15 +210,20 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
}
- ret = regmap_read(priv->regmap, STM32_LPTIM_CFGR, &cfgr);
- if (ret)
- goto err;
-
if ((FIELD_GET(STM32_LPTIM_PRESC, cfgr) != presc) ||
- (FIELD_GET(STM32_LPTIM_WAVPOL, cfgr) != state->polarity)) {
+ ((FIELD_GET(STM32_LPTIM_WAVPOL, cfgr) != state->polarity) && !priv->num_cc_chans)) {
val = FIELD_PREP(STM32_LPTIM_PRESC, presc);
- val |= FIELD_PREP(STM32_LPTIM_WAVPOL, state->polarity);
- mask = STM32_LPTIM_PRESC | STM32_LPTIM_WAVPOL;
+ mask = STM32_LPTIM_PRESC;
+
+ if (!priv->num_cc_chans) {
+ /*
+ * WAVPOL bit is only available when no capature compare channel is used,
+ * e.g. on LPTIMER instances that have only one output channel. CCMR1 is
+ * used otherwise.
+ */
+ val |= FIELD_PREP(STM32_LPTIM_WAVPOL, state->polarity);
+ mask |= STM32_LPTIM_WAVPOL;
+ }
/* Must disable LP timer to modify CFGR */
reenable = true;
@@ -120,20 +249,27 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (ret)
goto err;
- ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, prd - (1 + dty));
+ /* Write CMP/CCRx register and ensure it's been properly written */
+ ret = regmap_write(priv->regmap, pwm->hwpwm ? STM32_LPTIM_CCR2 : STM32_LPTIM_CMP,
+ prd - (1 + dty));
if (ret)
goto err;
- /* ensure CMP & ARR registers are properly written */
- ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
+ /* ensure ARR and CMP/CCRx registers are properly written */
+ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val, pwm->hwpwm ?
+ (val & STM32_LPTIM_CMP2_ARROK) == STM32_LPTIM_CMP2_ARROK :
(val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
100, 1000);
if (ret) {
dev_err(pwmchip_parent(chip), "ARR/CMP registers write issue\n");
goto err;
}
- ret = regmap_write(priv->regmap, STM32_LPTIM_ICR,
- STM32_LPTIM_CMPOKCF_ARROKCF);
+ ret = regmap_write(priv->regmap, STM32_LPTIM_ICR, pwm->hwpwm ?
+ STM32_LPTIM_CMP2OKCF_ARROKCF : STM32_LPTIM_CMPOKCF_ARROKCF);
+ if (ret)
+ goto err;
+
+ ret = stm32_pwm_lp_compare_channel_apply(priv, pwm->hwpwm, true, state->polarity);
if (ret)
goto err;
@@ -161,11 +297,22 @@ static int stm32_pwm_lp_get_state(struct pwm_chip *chip,
{
struct stm32_pwm_lp *priv = to_stm32_pwm_lp(chip);
unsigned long rate = clk_get_rate(priv->clk);
- u32 val, presc, prd;
+ u32 val, presc, prd, ccmr1;
+ bool enabled;
u64 tmp;
regmap_read(priv->regmap, STM32_LPTIM_CR, &val);
- state->enabled = !!FIELD_GET(STM32_LPTIM_ENABLE, val);
+ enabled = !!FIELD_GET(STM32_LPTIM_ENABLE, val);
+ if (priv->num_cc_chans) {
+ /* There's a CC chan, need to also check if it's enabled */
+ regmap_read(priv->regmap, STM32_LPTIM_CCMR1, &ccmr1);
+ if (pwm->hwpwm)
+ enabled &= !!FIELD_GET(STM32_LPTIM_CC2E, ccmr1);
+ else
+ enabled &= !!FIELD_GET(STM32_LPTIM_CC1E, ccmr1);
+ }
+ state->enabled = enabled;
+
/* Keep PWM counter clock refcount in sync with PWM initial state */
if (state->enabled) {
int ret = clk_enable(priv->clk);
@@ -176,14 +323,21 @@ static int stm32_pwm_lp_get_state(struct pwm_chip *chip,
regmap_read(priv->regmap, STM32_LPTIM_CFGR, &val);
presc = FIELD_GET(STM32_LPTIM_PRESC, val);
- state->polarity = FIELD_GET(STM32_LPTIM_WAVPOL, val);
+ if (priv->num_cc_chans) {
+ if (pwm->hwpwm)
+ state->polarity = FIELD_GET(STM32_LPTIM_CC2P, ccmr1);
+ else
+ state->polarity = FIELD_GET(STM32_LPTIM_CC1P, ccmr1);
+ } else {
+ state->polarity = FIELD_GET(STM32_LPTIM_WAVPOL, val);
+ }
regmap_read(priv->regmap, STM32_LPTIM_ARR, &prd);
tmp = prd + 1;
tmp = (tmp << presc) * NSEC_PER_SEC;
state->period = DIV_ROUND_CLOSEST_ULL(tmp, rate);
- regmap_read(priv->regmap, STM32_LPTIM_CMP, &val);
+ regmap_read(priv->regmap, pwm->hwpwm ? STM32_LPTIM_CCR2 : STM32_LPTIM_CMP, &val);
tmp = prd - val;
tmp = (tmp << presc) * NSEC_PER_SEC;
state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, rate);
@@ -201,15 +355,25 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent);
struct stm32_pwm_lp *priv;
struct pwm_chip *chip;
+ unsigned int npwm;
int ret;
- chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*priv));
+ if (!ddata->num_cc_chans) {
+ /* No dedicated CC channel, so there's only one PWM channel */
+ npwm = 1;
+ } else {
+ /* There are dedicated CC channels, each with one PWM output */
+ npwm = ddata->num_cc_chans;
+ }
+
+ chip = devm_pwmchip_alloc(&pdev->dev, npwm, sizeof(*priv));
if (IS_ERR(chip))
return PTR_ERR(chip);
priv = to_stm32_pwm_lp(chip);
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
+ priv->num_cc_chans = ddata->num_cc_chans;
chip->ops = &stm32_pwm_lp_ops;
ret = devm_pwmchip_add(&pdev->dev, chip);
@@ -225,12 +389,15 @@ static int stm32_pwm_lp_suspend(struct device *dev)
{
struct pwm_chip *chip = dev_get_drvdata(dev);
struct pwm_state state;
-
- pwm_get_state(&chip->pwms[0], &state);
- if (state.enabled) {
- dev_err(dev, "The consumer didn't stop us (%s)\n",
- chip->pwms[0].label);
- return -EBUSY;
+ unsigned int i;
+
+ for (i = 0; i < chip->npwm; i++) {
+ pwm_get_state(&chip->pwms[i], &state);
+ if (state.enabled) {
+ dev_err(dev, "The consumer didn't stop us (%s)\n",
+ chip->pwms[i].label);
+ return -EBUSY;
+ }
}
return pinctrl_pm_select_sleep_state(dev);
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index ec2c05c9ee7a..4b148f0afeb9 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -88,7 +88,7 @@ static int stm32_pwm_round_waveform_tohw(struct pwm_chip *chip,
rate = clk_get_rate(priv->clk);
- if (active_channels(priv) & ~(1 << ch * 4)) {
+ if (active_channels(priv) & ~TIM_CCER_CCxE(ch + 1)) {
u64 arr;
/*
@@ -180,11 +180,11 @@ static int stm32_pwm_round_waveform_tohw(struct pwm_chip *chip,
wfhw->ccr = min_t(u64, ccr, wfhw->arr + 1);
+out:
dev_dbg(&chip->dev, "pwm#%u: %lld/%lld [+%lld] @%lu -> CCER: %08x, PSC: %08x, ARR: %08x, CCR: %08x\n",
pwm->hwpwm, wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns,
rate, wfhw->ccer, wfhw->psc, wfhw->arr, wfhw->ccr);
-out:
clk_disable(priv->clk);
return ret;
@@ -213,10 +213,10 @@ static int stm32_pwm_round_waveform_fromhw(struct pwm_chip *chip,
{
const struct stm32_pwm_waveform *wfhw = _wfhw;
struct stm32_pwm *priv = to_stm32_pwm_dev(chip);
+ unsigned long rate = clk_get_rate(priv->clk);
unsigned int ch = pwm->hwpwm;
if (wfhw->ccer & TIM_CCER_CCxE(ch + 1)) {
- unsigned long rate = clk_get_rate(priv->clk);
u64 ccr_ns;
/* The result doesn't overflow for rate >= 15259 */
@@ -236,17 +236,16 @@ static int stm32_pwm_round_waveform_fromhw(struct pwm_chip *chip,
wf->duty_length_ns = ccr_ns;
wf->duty_offset_ns = 0;
}
-
- dev_dbg(&chip->dev, "pwm#%u: CCER: %08x, PSC: %08x, ARR: %08x, CCR: %08x @%lu -> %lld/%lld [+%lld]\n",
- pwm->hwpwm, wfhw->ccer, wfhw->psc, wfhw->arr, wfhw->ccr, rate,
- wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns);
-
} else {
*wf = (struct pwm_waveform){
.period_length_ns = 0,
};
}
+ dev_dbg(&chip->dev, "pwm#%u: CCER: %08x, PSC: %08x, ARR: %08x, CCR: %08x @%lu -> %lld/%lld [+%lld]\n",
+ pwm->hwpwm, wfhw->ccer, wfhw->psc, wfhw->arr, wfhw->ccr, rate,
+ wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns);
+
return 0;
}
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index cbf531d0ba68..995cfeca972b 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -98,18 +98,6 @@ MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
#endif
/*
- * An internal DMA coherent buffer
- */
-struct mport_dma_buf {
- void *ib_base;
- dma_addr_t ib_phys;
- u32 ib_size;
- u64 ib_rio_base;
- bool ib_map;
- struct file *filp;
-};
-
-/*
* Internal memory mapping structure
*/
enum rio_mport_map_dir {
@@ -131,14 +119,6 @@ struct rio_mport_mapping {
struct file *filp;
};
-struct rio_mport_dma_map {
- int valid;
- u64 length;
- void *vaddr;
- dma_addr_t paddr;
-};
-
-#define MPORT_MAX_DMA_BUFS 16
#define MPORT_EVENT_DEPTH 10
/*
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 9544b8ee0c96..46daf32ea13b 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1775,19 +1775,6 @@ struct dma_chan *rio_request_mport_dma(struct rio_mport *mport)
EXPORT_SYMBOL_GPL(rio_request_mport_dma);
/**
- * rio_request_dma - request RapidIO capable DMA channel that supports
- * specified target RapidIO device.
- * @rdev: RIO device associated with DMA transfer
- *
- * Returns pointer to allocated DMA channel or NULL if failed.
- */
-struct dma_chan *rio_request_dma(struct rio_dev *rdev)
-{
- return rio_request_mport_dma(rdev->net->hport);
-}
-EXPORT_SYMBOL_GPL(rio_request_dma);
-
-/**
* rio_release_dma - release specified DMA channel
* @dchan: DMA channel to release
*/
@@ -1834,57 +1821,9 @@ struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan,
}
EXPORT_SYMBOL_GPL(rio_dma_prep_xfer);
-/**
- * rio_dma_prep_slave_sg - RapidIO specific wrapper
- * for device_prep_slave_sg callback defined by DMAENGINE.
- * @rdev: RIO device control structure
- * @dchan: DMA channel to configure
- * @data: RIO specific data descriptor
- * @direction: DMA data transfer direction (TO or FROM the device)
- * @flags: dmaengine defined flags
- *
- * Initializes RapidIO capable DMA channel for the specified data transfer.
- * Uses DMA channel private extension to pass information related to remote
- * target RIO device.
- *
- * Returns: pointer to DMA transaction descriptor if successful,
- * error-valued pointer or NULL if failed.
- */
-struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
- struct dma_chan *dchan, struct rio_dma_data *data,
- enum dma_transfer_direction direction, unsigned long flags)
-{
- return rio_dma_prep_xfer(dchan, rdev->destid, data, direction, flags);
-}
-EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
-
#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
/**
- * rio_find_mport - find RIO mport by its ID
- * @mport_id: number (ID) of mport device
- *
- * Given a RIO mport number, the desired mport is located
- * in the global list of mports. If the mport is found, a pointer to its
- * data structure is returned. If no mport is found, %NULL is returned.
- */
-struct rio_mport *rio_find_mport(int mport_id)
-{
- struct rio_mport *port;
-
- mutex_lock(&rio_mport_list_lock);
- list_for_each_entry(port, &rio_mports, node) {
- if (port->id == mport_id)
- goto found;
- }
- port = NULL;
-found:
- mutex_unlock(&rio_mport_list_lock);
-
- return port;
-}
-
-/**
* rio_register_scan - enumeration/discovery method registration interface
* @mport_id: mport device ID for which fabric scan routine has to be set
* (RIO_MPORT_ANY = set for all available mports)
@@ -1962,48 +1901,6 @@ err_out:
EXPORT_SYMBOL_GPL(rio_register_scan);
/**
- * rio_unregister_scan - removes enumeration/discovery method from mport
- * @mport_id: mport device ID for which fabric scan routine has to be
- * unregistered (RIO_MPORT_ANY = apply to all mports that use
- * the specified scan_ops)
- * @scan_ops: enumeration/discovery operations structure
- *
- * Removes enumeration or discovery method assigned to the specified mport
- * device. If RIO_MPORT_ANY is specified, removes the specified operations from
- * all mports that have them attached.
- */
-int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops)
-{
- struct rio_mport *port;
- struct rio_scan_node *scan;
-
- pr_debug("RIO: %s for mport_id=%d\n", __func__, mport_id);
-
- if (mport_id != RIO_MPORT_ANY && mport_id >= RIO_MAX_MPORTS)
- return -EINVAL;
-
- mutex_lock(&rio_mport_list_lock);
-
- list_for_each_entry(port, &rio_mports, node)
- if (port->id == mport_id ||
- (mport_id == RIO_MPORT_ANY && port->nscan == scan_ops))
- port->nscan = NULL;
-
- list_for_each_entry(scan, &rio_scans, node) {
- if (scan->mport_id == mport_id) {
- list_del(&scan->node);
- kfree(scan);
- break;
- }
- }
-
- mutex_unlock(&rio_mport_list_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rio_unregister_scan);
-
-/**
* rio_mport_scan - execute enumeration/discovery on the specified mport
* @mport_id: number (ID) of mport device
*/
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index f482de0d0370..a0e2a09ddb8e 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -41,9 +41,7 @@ extern void rio_del_device(struct rio_dev *rdev, enum rio_device_state state);
extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid,
u8 hopcount, u8 port_num);
extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops);
-extern int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops);
extern void rio_attach_device(struct rio_dev *rdev);
-extern struct rio_mport *rio_find_mport(int mport_id);
extern int rio_mport_scan(int mport_id);
/* Structures internal to the RIO core code */
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index 9135227301c8..66464674223f 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -198,12 +198,6 @@ struct cm_peer {
struct rio_dev *rdev;
};
-struct rio_cm_work {
- struct work_struct work;
- struct cm_dev *cm;
- void *data;
-};
-
struct conn_req {
struct list_head node;
u32 destid; /* requester destID */
@@ -789,6 +783,9 @@ static int riocm_ch_send(u16 ch_id, void *buf, int len)
if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE)
return -EINVAL;
+ if (len < sizeof(struct rio_ch_chan_hdr))
+ return -EINVAL; /* insufficient data from user */
+
ch = riocm_get_channel(ch_id);
if (!ch) {
riocm_error("%s(%d) ch_%d not found", current->comm,
diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h
index d096b58cd0ae..2b6279d32774 100644
--- a/drivers/ras/amd/atl/internal.h
+++ b/drivers/ras/amd/atl/internal.h
@@ -17,8 +17,8 @@
#include <linux/bitops.h>
#include <linux/ras.h>
-#include <asm/amd_nb.h>
-#include <asm/amd_node.h>
+#include <asm/amd/nb.h>
+#include <asm/amd/node.h>
#include "reg_fields.h"
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 05e32d764028..6d8988387da4 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -122,6 +122,17 @@ config REGULATOR_AD5398
This driver supports AD5398 and AD5821 current regulator chips.
If building into module, its name is ad5398.ko.
+config REGULATOR_ADP5055
+ tristate "Analog Devices ADP5055 Triple Buck Regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver controls an Analog Devices ADP5055 with triple buck
+ regulators using an I2C interface.
+
+ Say M here if you want to include support for the regulator as a
+ module.
+
config REGULATOR_ANATOP
tristate "Freescale i.MX on-chip ANATOP LDO regulators"
depends on ARCH_MXC || COMPILE_TEST
@@ -1579,10 +1590,16 @@ config REGULATOR_TPS65219
tristate "TI TPS65219 Power regulators"
depends on MFD_TPS65219 && OF
help
- This driver supports TPS65219 voltage regulator chips.
+ This driver supports TPS65219, TPS65215, and TPS65214 voltage
+ regulator chips.
TPS65219 series of PMICs have 3 single phase BUCKs & 4 LDOs
- voltage regulators. It supports software based voltage control
- for different voltage domains.
+ voltage regulators.
+ TPS65215 PMIC has 3 single phase BUCKs & 2 LDOs.
+ TPS65214 PMIC has 3 synchronous stepdown DC-DC converters & 2
+ LDOs. One LDO supports a maximum output current of 300 mA and the
+ other a maximum of 500 mA
+ All 3 PMICs support software based voltage control for different
+ voltage domains.
config REGULATOR_TPS6594
tristate "TI TPS6594 Power regulators"
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 524e026c0273..c0bc7a0f4e67 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_REGULATOR_AB8500) += ab8500-ext.o ab8500.o
obj-$(CONFIG_REGULATOR_ACT8865) += act8865-regulator.o
obj-$(CONFIG_REGULATOR_ACT8945A) += act8945a-regulator.o
obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
+obj-$(CONFIG_REGULATOR_ADP5055) += adp5055-regulator.o
obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
obj-$(CONFIG_REGULATOR_ARIZONA_LDO1) += arizona-ldo1.o
obj-$(CONFIG_REGULATOR_ARIZONA_MICSUPP) += arizona-micsupp.o
diff --git a/drivers/regulator/adp5055-regulator.c b/drivers/regulator/adp5055-regulator.c
new file mode 100644
index 000000000000..4b004a6b2f84
--- /dev/null
+++ b/drivers/regulator/adp5055-regulator.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Regulator driver for Analog Devices ADP5055
+//
+// Copyright (C) 2025 Analog Devices, Inc.
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+// ADP5055 Register Map.
+
+#define ADP5055_CTRL123 0xD1
+#define ADP5055_CTRL_MODE1 0xD3
+#define ADP5055_CTRL_MODE2 0xD4
+#define ADP5055_DLY0 0xD5
+#define ADP5055_DLY1 0xD6
+#define ADP5055_DLY2 0xD7
+#define ADP5055_VID0 0xD8
+#define ADP5055_VID1 0xD9
+#define ADP5055_VID2 0xDA
+#define ADP5055_DVS_LIM0 0xDC
+#define ADP5055_DVS_LIM1 0xDD
+#define ADP5055_DVS_LIM2 0xDE
+#define ADP5055_FT_CFG 0xDF
+#define ADP5055_PG_CFG 0xE0
+
+// ADP5055 Field Masks.
+
+#define ADP5055_MASK_EN_MODE BIT(0)
+#define ADP5055_MASK_OCP_BLANKING BIT(7)
+#define ADP5055_MASK_PSM BIT(4)
+#define ADP5055_MASK_DIS2 BIT(2)
+#define ADP5055_MASK_DIS1 BIT(1)
+#define ADP5055_MASK_DIS0 BIT(0)
+#define ADP5055_MASK_DIS_DLY GENMASK(6, 4)
+#define ADP5055_MASK_EN_DLY GENMASK(2, 0)
+#define ADP5055_MASK_DVS_LIM_UPPER GENMASK(7, 4)
+#define ADP5055_MASK_DVS_LIM_LOWER GENMASK(3, 0)
+#define ADP5055_MASK_FAST_TRANSIENT2 GENMASK(5, 4)
+#define ADP5055_MASK_FAST_TRANSIENT1 GENMASK(3, 2)
+#define ADP5055_MASK_FAST_TRANSIENT0 GENMASK(1, 0)
+#define ADP5055_MASK_DLY_PWRGD BIT(4)
+#define ADP5055_MASK_PWRGD2 BIT(2)
+#define ADP5055_MASK_PWRGD1 BIT(1)
+#define ADP5055_MASK_PWRGD0 BIT(0)
+
+#define ADP5055_MIN_VOUT 408000
+#define ADP5055_NUM_CH 3
+
+struct adp5055 {
+ struct device *dev;
+ struct regmap *regmap;
+ u32 tset;
+ struct gpio_desc *en_gpiod[ADP5055_NUM_CH];
+ bool en_mode_software;
+ int dvs_limit_upper[ADP5055_NUM_CH];
+ int dvs_limit_lower[ADP5055_NUM_CH];
+ u32 fast_transient[ADP5055_NUM_CH];
+ bool mask_power_good[ADP5055_NUM_CH];
+};
+
+static const unsigned int adp5055_tset_vals[] = {
+ 2600,
+ 20800,
+};
+
+static const unsigned int adp5055_enable_delay_vals_2_6[] = {
+ 0,
+ 2600,
+ 5200,
+ 7800,
+ 10400,
+ 13000,
+ 15600,
+ 18200,
+};
+
+static const unsigned int adp5055_enable_delay_vals_20_8[] = {
+ 0,
+ 20800,
+ 41600,
+ 62400,
+ 83200,
+ 104000,
+ 124800,
+ 145600,
+};
+
+static const char * const adp5055_fast_transient_vals[] = {
+ "none",
+ "3G_1.5%",
+ "5G_1.5%",
+ "5G_2.5%",
+};
+
+static int adp5055_get_prop_index(const u32 *table, size_t table_size,
+ u32 value)
+{
+ int i;
+
+ for (i = 0; i < table_size; i++)
+ if (table[i] == value)
+ return i;
+
+ return -EINVAL;
+}
+
+static const struct regmap_range adp5055_reg_ranges[] = {
+ regmap_reg_range(0xD1, 0xE0),
+};
+
+static const struct regmap_access_table adp5055_access_ranges_table = {
+ .yes_ranges = adp5055_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(adp5055_reg_ranges),
+};
+
+static const struct regmap_config adp5055_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xE0,
+ .wr_table = &adp5055_access_ranges_table,
+ .rd_table = &adp5055_access_ranges_table,
+};
+
+static const struct linear_range adp5055_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(ADP5055_MIN_VOUT, 0, 255, 1500),
+};
+
+static int adp5055_parse_fw(struct device *dev, struct adp5055 *adp5055)
+{
+ int i, ret;
+ struct regmap *regmap = adp5055->regmap;
+ int val;
+ bool ocp_blanking;
+ bool delay_power_good;
+
+ ret = device_property_read_u32(dev, "adi,tset-us", &adp5055->tset);
+ if (!ret) {
+ ret = adp5055_get_prop_index(adp5055_tset_vals,
+ ARRAY_SIZE(adp5055_tset_vals), adp5055->tset);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to initialize tset.");
+ adp5055->tset = adp5055_tset_vals[ret];
+ }
+
+ ocp_blanking = device_property_read_bool(dev, "adi,ocp-blanking");
+
+ delay_power_good = device_property_read_bool(dev,
+ "adi,delay-power-good");
+
+ for (i = 0; i < ADP5055_NUM_CH; i++) {
+ val = FIELD_PREP(ADP5055_MASK_DVS_LIM_UPPER,
+ DIV_ROUND_CLOSEST_ULL(192000 - adp5055->dvs_limit_upper[i], 12000));
+ val |= FIELD_PREP(ADP5055_MASK_DVS_LIM_LOWER,
+ DIV_ROUND_CLOSEST_ULL(adp5055->dvs_limit_lower[i] + 190500, 12000));
+ ret = regmap_write(regmap, ADP5055_DVS_LIM0 + i, val);
+ if (ret)
+ return ret;
+ }
+
+ val = FIELD_PREP(ADP5055_MASK_EN_MODE, adp5055->en_mode_software);
+ ret = regmap_write(regmap, ADP5055_CTRL_MODE1, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(ADP5055_MASK_OCP_BLANKING, ocp_blanking);
+ ret = regmap_update_bits(regmap, ADP5055_CTRL_MODE2,
+ ADP5055_MASK_OCP_BLANKING, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(ADP5055_MASK_FAST_TRANSIENT2, adp5055->fast_transient[2]);
+ val |= FIELD_PREP(ADP5055_MASK_FAST_TRANSIENT1, adp5055->fast_transient[1]);
+ val |= FIELD_PREP(ADP5055_MASK_FAST_TRANSIENT0, adp5055->fast_transient[0]);
+ ret = regmap_write(regmap, ADP5055_FT_CFG, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(ADP5055_MASK_DLY_PWRGD, delay_power_good);
+ val |= FIELD_PREP(ADP5055_MASK_PWRGD2, adp5055->mask_power_good[2]);
+ val |= FIELD_PREP(ADP5055_MASK_PWRGD1, adp5055->mask_power_good[1]);
+ val |= FIELD_PREP(ADP5055_MASK_PWRGD0, adp5055->mask_power_good[0]);
+ ret = regmap_write(regmap, ADP5055_PG_CFG, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int adp5055_of_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
+{
+ struct adp5055 *adp5055 = config->driver_data;
+ int id, ret, pval, i;
+
+ id = desc->id;
+
+ if (of_property_read_bool(np, "enable-gpios")) {
+ adp5055->en_gpiod[id] = devm_fwnode_gpiod_get(config->dev,
+ of_fwnode_handle(np), "enable",
+ GPIOD_OUT_LOW, "enable");
+ if (IS_ERR(adp5055->en_gpiod[id]))
+ return dev_err_probe(config->dev, PTR_ERR(adp5055->en_gpiod[id]),
+ "Failed to get enable GPIO\n");
+
+ config->ena_gpiod = adp5055->en_gpiod[id];
+ } else {
+ adp5055->en_mode_software = true;
+ }
+
+ ret = of_property_read_u32(np, "adi,dvs-limit-upper-microvolt", &pval);
+ if (ret)
+ adp5055->dvs_limit_upper[id] = 192000;
+ else
+ adp5055->dvs_limit_upper[id] = pval;
+
+ if (adp5055->dvs_limit_upper[id] > 192000 || adp5055->dvs_limit_upper[id] < 12000)
+ return dev_err_probe(config->dev, adp5055->dvs_limit_upper[id],
+ "Out of range - dvs-limit-upper-microvolt value.");
+
+ ret = of_property_read_u32(np, "adi,dvs-limit-lower-microvolt", &pval);
+ if (ret)
+ adp5055->dvs_limit_lower[id] = -190500;
+ else
+ adp5055->dvs_limit_lower[id] = pval;
+
+ if (adp5055->dvs_limit_lower[id] > -10500 || adp5055->dvs_limit_lower[id] < -190500)
+ return dev_err_probe(config->dev, adp5055->dvs_limit_lower[id],
+ "Out of range - dvs-limit-lower-microvolt value.");
+
+ for (i = 0; i < 4; i++) {
+ ret = of_property_match_string(np, "adi,fast-transient",
+ adp5055_fast_transient_vals[i]);
+ if (!ret)
+ break;
+ }
+
+ if (ret < 0)
+ adp5055->fast_transient[id] = 3;
+ else
+ adp5055->fast_transient[id] = i;
+
+ adp5055->mask_power_good[id] = of_property_read_bool(np, "adi,mask-power-good");
+
+ return 0;
+}
+
+static int adp5055_set_mode(struct regulator_dev *rdev, u32 mode)
+{
+ struct adp5055 *adp5055 = rdev_get_drvdata(rdev);
+ int id, ret;
+
+ id = rdev_get_id(rdev);
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ ret = regmap_update_bits(adp5055->regmap, ADP5055_CTRL_MODE2,
+ ADP5055_MASK_PSM << id, 0);
+ break;
+ case REGULATOR_MODE_IDLE:
+ ret = regmap_update_bits(adp5055->regmap, ADP5055_CTRL_MODE2,
+ ADP5055_MASK_PSM << id, ADP5055_MASK_PSM << id);
+ break;
+ default:
+ return dev_err_probe(&rdev->dev, -EINVAL,
+ "Unsupported mode: %d\n", mode);
+ }
+
+ return ret;
+}
+
+static unsigned int adp5055_get_mode(struct regulator_dev *rdev)
+{
+ struct adp5055 *adp5055 = rdev_get_drvdata(rdev);
+ int id, ret, regval;
+
+ id = rdev_get_id(rdev);
+
+ ret = regmap_read(adp5055->regmap, ADP5055_CTRL_MODE2, &regval);
+ if (ret)
+ return ret;
+
+ if (regval & (ADP5055_MASK_PSM << id))
+ return REGULATOR_MODE_IDLE;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+static const struct regulator_ops adp5055_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_mode = adp5055_set_mode,
+ .get_mode = adp5055_get_mode,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
+};
+
+#define ADP5055_REG_(_name, _id, _ch, _ops) \
+ [_id] = { \
+ .name = _name, \
+ .of_match = of_match_ptr(_name), \
+ .of_parse_cb = adp5055_of_parse_cb, \
+ .id = _id, \
+ .ops = _ops, \
+ .linear_ranges = adp5055_voltage_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(adp5055_voltage_ranges), \
+ .vsel_reg = ADP5055_VID##_ch, \
+ .vsel_mask = GENMASK(7, 0), \
+ .enable_reg = ADP5055_CTRL123, \
+ .enable_mask = BIT(_ch), \
+ .active_discharge_on = ADP5055_MASK_DIS##_id, \
+ .active_discharge_off = 0, \
+ .active_discharge_mask = ADP5055_MASK_DIS##_id, \
+ .active_discharge_reg = ADP5055_CTRL_MODE2, \
+ .ramp_reg = ADP5055_DLY##_ch, \
+ .ramp_mask = ADP5055_MASK_EN_DLY, \
+ .n_ramp_values = ARRAY_SIZE(adp5055_enable_delay_vals_2_6), \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }
+
+#define ADP5055_REG(_name, _id, _ch) \
+ ADP5055_REG_(_name, _id, _ch, &adp5055_ops)
+
+static struct regulator_desc adp5055_regulators[] = {
+ ADP5055_REG("buck0", 0, 0),
+ ADP5055_REG("buck1", 1, 1),
+ ADP5055_REG("buck2", 2, 2),
+};
+
+static int adp5055_probe(struct i2c_client *client)
+{
+ struct regulator_init_data *init_data;
+ struct device *dev = &client->dev;
+ struct adp5055 *adp5055;
+ int i, ret;
+
+ init_data = of_get_regulator_init_data(dev, client->dev.of_node,
+ &adp5055_regulators[0]);
+ if (!init_data)
+ return -EINVAL;
+
+ adp5055 = devm_kzalloc(dev, sizeof(struct adp5055), GFP_KERNEL);
+ if (!adp5055)
+ return -ENOMEM;
+
+ adp5055->tset = 2600;
+ adp5055->en_mode_software = false;
+
+ adp5055->regmap = devm_regmap_init_i2c(client, &adp5055_regmap_config);
+ if (IS_ERR(adp5055->regmap))
+ return dev_err_probe(dev, PTR_ERR(adp5055->regmap), "Failed to allocate reg map");
+
+ for (i = 0; i < ADP5055_NUM_CH; i++) {
+ const struct regulator_desc *desc;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+
+ if (adp5055->tset == 2600)
+ adp5055_regulators[i].ramp_delay_table = adp5055_enable_delay_vals_2_6;
+ else
+ adp5055_regulators[i].ramp_delay_table = adp5055_enable_delay_vals_20_8;
+
+ desc = &adp5055_regulators[i];
+
+ config.dev = dev;
+ config.driver_data = adp5055;
+ config.regmap = adp5055->regmap;
+ config.init_data = init_data;
+
+ rdev = devm_regulator_register(dev, desc, &config);
+ if (IS_ERR(rdev)) {
+ return dev_err_probe(dev, PTR_ERR(rdev),
+ "Failed to register %s\n", desc->name);
+ }
+ }
+
+ ret = adp5055_parse_fw(dev, adp5055);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id adp5055_of_match[] = {
+ { .compatible = "adi,adp5055", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adp5055_of_match);
+
+static const struct i2c_device_id adp5055_ids[] = {
+ { .name = "adp5055"},
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, adp5055_ids);
+
+static struct i2c_driver adp5055_driver = {
+ .driver = {
+ .name = "adp5055",
+ .of_match_table = adp5055_of_match,
+ },
+ .probe = adp5055_probe,
+ .id_table = adp5055_ids,
+};
+module_i2c_driver(adp5055_driver);
+
+MODULE_DESCRIPTION("ADP5055 Voltage Regulator Driver");
+MODULE_AUTHOR("Alexis Czezar Torreno <alexisczezar.torreno@analog.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
index 9f0cda46b015..50414f4cb109 100644
--- a/drivers/regulator/bcm590xx-regulator.c
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -18,112 +18,236 @@
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
-/* I2C slave 0 registers */
-#define BCM590XX_RFLDOPMCTRL1 0x60
-#define BCM590XX_IOSR1PMCTRL1 0x7a
-#define BCM590XX_IOSR2PMCTRL1 0x7c
-#define BCM590XX_CSRPMCTRL1 0x7e
-#define BCM590XX_SDSR1PMCTRL1 0x82
-#define BCM590XX_SDSR2PMCTRL1 0x86
-#define BCM590XX_MSRPMCTRL1 0x8a
-#define BCM590XX_VSRPMCTRL1 0x8e
-#define BCM590XX_RFLDOCTRL 0x96
-#define BCM590XX_CSRVOUT1 0xc0
-
-/* I2C slave 1 registers */
-#define BCM590XX_GPLDO5PMCTRL1 0x16
-#define BCM590XX_GPLDO6PMCTRL1 0x18
-#define BCM590XX_GPLDO1CTRL 0x1a
-#define BCM590XX_GPLDO2CTRL 0x1b
-#define BCM590XX_GPLDO3CTRL 0x1c
-#define BCM590XX_GPLDO4CTRL 0x1d
-#define BCM590XX_GPLDO5CTRL 0x1e
-#define BCM590XX_GPLDO6CTRL 0x1f
-#define BCM590XX_OTG_CTRL 0x40
-#define BCM590XX_GPLDO1PMCTRL1 0x57
-#define BCM590XX_GPLDO2PMCTRL1 0x59
-#define BCM590XX_GPLDO3PMCTRL1 0x5b
-#define BCM590XX_GPLDO4PMCTRL1 0x5d
-
#define BCM590XX_REG_ENABLE BIT(7)
#define BCM590XX_VBUS_ENABLE BIT(2)
#define BCM590XX_LDO_VSEL_MASK GENMASK(5, 3)
#define BCM590XX_SR_VSEL_MASK GENMASK(5, 0)
+enum bcm590xx_reg_type {
+ BCM590XX_REG_TYPE_LDO,
+ BCM590XX_REG_TYPE_GPLDO,
+ BCM590XX_REG_TYPE_SR,
+ BCM590XX_REG_TYPE_VBUS
+};
+
+struct bcm590xx_reg_data {
+ enum bcm590xx_reg_type type;
+ enum bcm590xx_regmap_type regmap;
+ const struct regulator_desc desc;
+};
+
+struct bcm590xx_reg {
+ struct bcm590xx *mfd;
+ unsigned int n_regulators;
+ const struct bcm590xx_reg_data *regs;
+};
+
+static const struct regulator_ops bcm590xx_ops_ldo = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+};
+
+/*
+ * LDO ops without voltage selection, used for MICLDO on BCM59054.
+ * (These are currently the same as VBUS ops, but will be different
+ * in the future once full PMMODE support is implemented.)
+ */
+static const struct regulator_ops bcm590xx_ops_ldo_novolt = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+};
+
+static const struct regulator_ops bcm590xx_ops_dcdc = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+};
+
+static const struct regulator_ops bcm590xx_ops_vbus = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+};
+
+#define BCM590XX_REG_DESC(_model, _name, _name_lower) \
+ .id = _model##_REG_##_name, \
+ .name = #_name_lower, \
+ .of_match = of_match_ptr(#_name_lower), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE \
+
+#define BCM590XX_LDO_DESC(_model, _model_lower, _name, _name_lower, _table) \
+ BCM590XX_REG_DESC(_model, _name, _name_lower), \
+ .ops = &bcm590xx_ops_ldo, \
+ .n_voltages = ARRAY_SIZE(_model_lower##_##_table), \
+ .volt_table = _model_lower##_##_table, \
+ .vsel_reg = _model##_##_name##CTRL, \
+ .vsel_mask = BCM590XX_LDO_VSEL_MASK, \
+ .enable_reg = _model##_##_name##PMCTRL1, \
+ .enable_mask = BCM590XX_REG_ENABLE, \
+ .enable_is_inverted = true
+
+#define BCM590XX_SR_DESC(_model, _model_lower, _name, _name_lower, _ranges) \
+ BCM590XX_REG_DESC(_model, _name, _name_lower), \
+ .ops = &bcm590xx_ops_dcdc, \
+ .n_voltages = 64, \
+ .linear_ranges = _model_lower##_##_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(_model_lower##_##_ranges), \
+ .vsel_reg = _model##_##_name##VOUT1, \
+ .vsel_mask = BCM590XX_SR_VSEL_MASK, \
+ .enable_reg = _model##_##_name##PMCTRL1, \
+ .enable_mask = BCM590XX_REG_ENABLE, \
+ .enable_is_inverted = true
+
+#define BCM59056_REG_DESC(_name, _name_lower) \
+ BCM590XX_REG_DESC(BCM59056, _name, _name_lower)
+#define BCM59056_LDO_DESC(_name, _name_lower, _table) \
+ BCM590XX_LDO_DESC(BCM59056, bcm59056, _name, _name_lower, _table)
+#define BCM59056_SR_DESC(_name, _name_lower, _ranges) \
+ BCM590XX_SR_DESC(BCM59056, bcm59056, _name, _name_lower, _ranges)
+
+#define BCM59054_REG_DESC(_name, _name_lower) \
+ BCM590XX_REG_DESC(BCM59054, _name, _name_lower)
+#define BCM59054_LDO_DESC(_name, _name_lower, _table) \
+ BCM590XX_LDO_DESC(BCM59054, bcm59054, _name, _name_lower, _table)
+#define BCM59054_SR_DESC(_name, _name_lower, _ranges) \
+ BCM590XX_SR_DESC(BCM59054, bcm59054, _name, _name_lower, _ranges)
+
+/* BCM59056 data */
+
+/* I2C slave 0 registers */
+#define BCM59056_RFLDOPMCTRL1 0x60
+#define BCM59056_CAMLDO1PMCTRL1 0x62
+#define BCM59056_CAMLDO2PMCTRL1 0x64
+#define BCM59056_SIMLDO1PMCTRL1 0x66
+#define BCM59056_SIMLDO2PMCTRL1 0x68
+#define BCM59056_SDLDOPMCTRL1 0x6a
+#define BCM59056_SDXLDOPMCTRL1 0x6c
+#define BCM59056_MMCLDO1PMCTRL1 0x6e
+#define BCM59056_MMCLDO2PMCTRL1 0x70
+#define BCM59056_AUDLDOPMCTRL1 0x72
+#define BCM59056_MICLDOPMCTRL1 0x74
+#define BCM59056_USBLDOPMCTRL1 0x76
+#define BCM59056_VIBLDOPMCTRL1 0x78
+#define BCM59056_IOSR1PMCTRL1 0x7a
+#define BCM59056_IOSR2PMCTRL1 0x7c
+#define BCM59056_CSRPMCTRL1 0x7e
+#define BCM59056_SDSR1PMCTRL1 0x82
+#define BCM59056_SDSR2PMCTRL1 0x86
+#define BCM59056_MSRPMCTRL1 0x8a
+#define BCM59056_VSRPMCTRL1 0x8e
+#define BCM59056_RFLDOCTRL 0x96
+#define BCM59056_CAMLDO1CTRL 0x97
+#define BCM59056_CAMLDO2CTRL 0x98
+#define BCM59056_SIMLDO1CTRL 0x99
+#define BCM59056_SIMLDO2CTRL 0x9a
+#define BCM59056_SDLDOCTRL 0x9b
+#define BCM59056_SDXLDOCTRL 0x9c
+#define BCM59056_MMCLDO1CTRL 0x9d
+#define BCM59056_MMCLDO2CTRL 0x9e
+#define BCM59056_AUDLDOCTRL 0x9f
+#define BCM59056_MICLDOCTRL 0xa0
+#define BCM59056_USBLDOCTRL 0xa1
+#define BCM59056_VIBLDOCTRL 0xa2
+#define BCM59056_CSRVOUT1 0xc0
+#define BCM59056_IOSR1VOUT1 0xc3
+#define BCM59056_IOSR2VOUT1 0xc6
+#define BCM59056_MSRVOUT1 0xc9
+#define BCM59056_SDSR1VOUT1 0xcc
+#define BCM59056_SDSR2VOUT1 0xcf
+#define BCM59056_VSRVOUT1 0xd2
+
+/* I2C slave 1 registers */
+#define BCM59056_GPLDO5PMCTRL1 0x16
+#define BCM59056_GPLDO6PMCTRL1 0x18
+#define BCM59056_GPLDO1CTRL 0x1a
+#define BCM59056_GPLDO2CTRL 0x1b
+#define BCM59056_GPLDO3CTRL 0x1c
+#define BCM59056_GPLDO4CTRL 0x1d
+#define BCM59056_GPLDO5CTRL 0x1e
+#define BCM59056_GPLDO6CTRL 0x1f
+#define BCM59056_OTG_CTRL 0x40
+#define BCM59056_GPLDO1PMCTRL1 0x57
+#define BCM59056_GPLDO2PMCTRL1 0x59
+#define BCM59056_GPLDO3PMCTRL1 0x5b
+#define BCM59056_GPLDO4PMCTRL1 0x5d
+
/*
* RFLDO to VSR regulators are
* accessed via I2C slave 0
*/
/* LDO regulator IDs */
-#define BCM590XX_REG_RFLDO 0
-#define BCM590XX_REG_CAMLDO1 1
-#define BCM590XX_REG_CAMLDO2 2
-#define BCM590XX_REG_SIMLDO1 3
-#define BCM590XX_REG_SIMLDO2 4
-#define BCM590XX_REG_SDLDO 5
-#define BCM590XX_REG_SDXLDO 6
-#define BCM590XX_REG_MMCLDO1 7
-#define BCM590XX_REG_MMCLDO2 8
-#define BCM590XX_REG_AUDLDO 9
-#define BCM590XX_REG_MICLDO 10
-#define BCM590XX_REG_USBLDO 11
-#define BCM590XX_REG_VIBLDO 12
+#define BCM59056_REG_RFLDO 0
+#define BCM59056_REG_CAMLDO1 1
+#define BCM59056_REG_CAMLDO2 2
+#define BCM59056_REG_SIMLDO1 3
+#define BCM59056_REG_SIMLDO2 4
+#define BCM59056_REG_SDLDO 5
+#define BCM59056_REG_SDXLDO 6
+#define BCM59056_REG_MMCLDO1 7
+#define BCM59056_REG_MMCLDO2 8
+#define BCM59056_REG_AUDLDO 9
+#define BCM59056_REG_MICLDO 10
+#define BCM59056_REG_USBLDO 11
+#define BCM59056_REG_VIBLDO 12
/* DCDC regulator IDs */
-#define BCM590XX_REG_CSR 13
-#define BCM590XX_REG_IOSR1 14
-#define BCM590XX_REG_IOSR2 15
-#define BCM590XX_REG_MSR 16
-#define BCM590XX_REG_SDSR1 17
-#define BCM590XX_REG_SDSR2 18
-#define BCM590XX_REG_VSR 19
+#define BCM59056_REG_CSR 13
+#define BCM59056_REG_IOSR1 14
+#define BCM59056_REG_IOSR2 15
+#define BCM59056_REG_MSR 16
+#define BCM59056_REG_SDSR1 17
+#define BCM59056_REG_SDSR2 18
+#define BCM59056_REG_VSR 19
/*
* GPLDO1 to VBUS regulators are
* accessed via I2C slave 1
*/
-#define BCM590XX_REG_GPLDO1 20
-#define BCM590XX_REG_GPLDO2 21
-#define BCM590XX_REG_GPLDO3 22
-#define BCM590XX_REG_GPLDO4 23
-#define BCM590XX_REG_GPLDO5 24
-#define BCM590XX_REG_GPLDO6 25
-#define BCM590XX_REG_VBUS 26
+#define BCM59056_REG_GPLDO1 20
+#define BCM59056_REG_GPLDO2 21
+#define BCM59056_REG_GPLDO3 22
+#define BCM59056_REG_GPLDO4 23
+#define BCM59056_REG_GPLDO5 24
+#define BCM59056_REG_GPLDO6 25
+#define BCM59056_REG_VBUS 26
-#define BCM590XX_NUM_REGS 27
-
-#define BCM590XX_REG_IS_LDO(n) (n < BCM590XX_REG_CSR)
-#define BCM590XX_REG_IS_GPLDO(n) \
- ((n > BCM590XX_REG_VSR) && (n < BCM590XX_REG_VBUS))
-#define BCM590XX_REG_IS_VBUS(n) (n == BCM590XX_REG_VBUS)
+#define BCM59056_NUM_REGS 27
/* LDO group A: supported voltages in microvolts */
-static const unsigned int ldo_a_table[] = {
+static const unsigned int bcm59056_ldo_a_table[] = {
1200000, 1800000, 2500000, 2700000, 2800000,
2900000, 3000000, 3300000,
};
/* LDO group C: supported voltages in microvolts */
-static const unsigned int ldo_c_table[] = {
+static const unsigned int bcm59056_ldo_c_table[] = {
3100000, 1800000, 2500000, 2700000, 2800000,
2900000, 3000000, 3300000,
};
-static const unsigned int ldo_vbus[] = {
- 5000000,
-};
-
/* DCDC group CSR: supported voltages in microvolts */
-static const struct linear_range dcdc_csr_ranges[] = {
+static const struct linear_range bcm59056_dcdc_csr_ranges[] = {
REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
REGULATOR_LINEAR_RANGE(1360000, 51, 55, 20000),
REGULATOR_LINEAR_RANGE(900000, 56, 63, 0),
};
/* DCDC group IOSR1: supported voltages in microvolts */
-static const struct linear_range dcdc_iosr1_ranges[] = {
+static const struct linear_range bcm59056_dcdc_iosr1_ranges[] = {
REGULATOR_LINEAR_RANGE(860000, 2, 51, 10000),
REGULATOR_LINEAR_RANGE(1500000, 52, 52, 0),
REGULATOR_LINEAR_RANGE(1800000, 53, 53, 0),
@@ -131,155 +255,854 @@ static const struct linear_range dcdc_iosr1_ranges[] = {
};
/* DCDC group SDSR1: supported voltages in microvolts */
-static const struct linear_range dcdc_sdsr1_ranges[] = {
+static const struct linear_range bcm59056_dcdc_sdsr1_ranges[] = {
REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
REGULATOR_LINEAR_RANGE(1340000, 51, 51, 0),
REGULATOR_LINEAR_RANGE(900000, 52, 63, 0),
};
-struct bcm590xx_info {
- const char *name;
- const char *vin_name;
- u8 n_voltages;
- const unsigned int *volt_table;
- u8 n_linear_ranges;
- const struct linear_range *linear_ranges;
-};
+static const struct bcm590xx_reg_data bcm59056_regs[BCM59056_NUM_REGS] = {
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(RFLDO, rfldo, ldo_a_table),
+ },
+ },
-#define BCM590XX_REG_TABLE(_name, _table) \
- { \
- .name = #_name, \
- .n_voltages = ARRAY_SIZE(_table), \
- .volt_table = _table, \
- }
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(CAMLDO1, camldo1, ldo_c_table),
+ },
+ },
-#define BCM590XX_REG_RANGES(_name, _ranges) \
- { \
- .name = #_name, \
- .n_voltages = 64, \
- .n_linear_ranges = ARRAY_SIZE(_ranges), \
- .linear_ranges = _ranges, \
- }
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(CAMLDO2, camldo2, ldo_c_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(SIMLDO1, simldo1, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(SIMLDO2, simldo2, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(SDLDO, sdldo, ldo_c_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(SDXLDO, sdxldo, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(MMCLDO1, mmcldo1, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(MMCLDO2, mmcldo2, ldo_a_table),
+ },
+ },
-static struct bcm590xx_info bcm590xx_regs[] = {
- BCM590XX_REG_TABLE(rfldo, ldo_a_table),
- BCM590XX_REG_TABLE(camldo1, ldo_c_table),
- BCM590XX_REG_TABLE(camldo2, ldo_c_table),
- BCM590XX_REG_TABLE(simldo1, ldo_a_table),
- BCM590XX_REG_TABLE(simldo2, ldo_a_table),
- BCM590XX_REG_TABLE(sdldo, ldo_c_table),
- BCM590XX_REG_TABLE(sdxldo, ldo_a_table),
- BCM590XX_REG_TABLE(mmcldo1, ldo_a_table),
- BCM590XX_REG_TABLE(mmcldo2, ldo_a_table),
- BCM590XX_REG_TABLE(audldo, ldo_a_table),
- BCM590XX_REG_TABLE(micldo, ldo_a_table),
- BCM590XX_REG_TABLE(usbldo, ldo_a_table),
- BCM590XX_REG_TABLE(vibldo, ldo_c_table),
- BCM590XX_REG_RANGES(csr, dcdc_csr_ranges),
- BCM590XX_REG_RANGES(iosr1, dcdc_iosr1_ranges),
- BCM590XX_REG_RANGES(iosr2, dcdc_iosr1_ranges),
- BCM590XX_REG_RANGES(msr, dcdc_iosr1_ranges),
- BCM590XX_REG_RANGES(sdsr1, dcdc_sdsr1_ranges),
- BCM590XX_REG_RANGES(sdsr2, dcdc_iosr1_ranges),
- BCM590XX_REG_RANGES(vsr, dcdc_iosr1_ranges),
- BCM590XX_REG_TABLE(gpldo1, ldo_a_table),
- BCM590XX_REG_TABLE(gpldo2, ldo_a_table),
- BCM590XX_REG_TABLE(gpldo3, ldo_a_table),
- BCM590XX_REG_TABLE(gpldo4, ldo_a_table),
- BCM590XX_REG_TABLE(gpldo5, ldo_a_table),
- BCM590XX_REG_TABLE(gpldo6, ldo_a_table),
- BCM590XX_REG_TABLE(vbus, ldo_vbus),
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(AUDLDO, audldo, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(MICLDO, micldo, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(USBLDO, usbldo, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_LDO_DESC(VIBLDO, vibldo, ldo_c_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(CSR, csr, dcdc_csr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(IOSR1, iosr1, dcdc_iosr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(IOSR2, iosr2, dcdc_iosr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(MSR, msr, dcdc_iosr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(SDSR1, sdsr1, dcdc_sdsr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(SDSR2, sdsr2, dcdc_iosr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59056_SR_DESC(VSR, vsr, dcdc_iosr1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO1, gpldo1, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO2, gpldo2, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO3, gpldo3, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO4, gpldo4, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO5, gpldo5, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_LDO_DESC(GPLDO6, gpldo6, ldo_a_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_VBUS,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59056_REG_DESC(VBUS, vbus),
+ .ops = &bcm590xx_ops_vbus,
+ .n_voltages = 1,
+ .fixed_uV = 5000000,
+ .enable_reg = BCM59056_OTG_CTRL,
+ .enable_mask = BCM590XX_VBUS_ENABLE,
+ },
+ },
};
-struct bcm590xx_reg {
- struct regulator_desc *desc;
- struct bcm590xx *mfd;
+/* BCM59054 data */
+
+/* I2C slave 0 registers */
+#define BCM59054_RFLDOPMCTRL1 0x60
+#define BCM59054_CAMLDO1PMCTRL1 0x62
+#define BCM59054_CAMLDO2PMCTRL1 0x64
+#define BCM59054_SIMLDO1PMCTRL1 0x66
+#define BCM59054_SIMLDO2PMCTRL1 0x68
+#define BCM59054_SDLDOPMCTRL1 0x6a
+#define BCM59054_SDXLDOPMCTRL1 0x6c
+#define BCM59054_MMCLDO1PMCTRL1 0x6e
+#define BCM59054_MMCLDO2PMCTRL1 0x70
+#define BCM59054_AUDLDOPMCTRL1 0x72
+#define BCM59054_MICLDOPMCTRL1 0x74
+#define BCM59054_USBLDOPMCTRL1 0x76
+#define BCM59054_VIBLDOPMCTRL1 0x78
+#define BCM59054_IOSR1PMCTRL1 0x7a
+#define BCM59054_IOSR2PMCTRL1 0x7c
+#define BCM59054_CSRPMCTRL1 0x7e
+#define BCM59054_SDSR1PMCTRL1 0x82
+#define BCM59054_SDSR2PMCTRL1 0x86
+#define BCM59054_MMSRPMCTRL1 0x8a
+#define BCM59054_VSRPMCTRL1 0x8e
+#define BCM59054_RFLDOCTRL 0x96
+#define BCM59054_CAMLDO1CTRL 0x97
+#define BCM59054_CAMLDO2CTRL 0x98
+#define BCM59054_SIMLDO1CTRL 0x99
+#define BCM59054_SIMLDO2CTRL 0x9a
+#define BCM59054_SDLDOCTRL 0x9b
+#define BCM59054_SDXLDOCTRL 0x9c
+#define BCM59054_MMCLDO1CTRL 0x9d
+#define BCM59054_MMCLDO2CTRL 0x9e
+#define BCM59054_AUDLDOCTRL 0x9f
+#define BCM59054_MICLDOCTRL 0xa0
+#define BCM59054_USBLDOCTRL 0xa1
+#define BCM59054_VIBLDOCTRL 0xa2
+#define BCM59054_CSRVOUT1 0xc0
+#define BCM59054_IOSR1VOUT1 0xc3
+#define BCM59054_IOSR2VOUT1 0xc6
+#define BCM59054_MMSRVOUT1 0xc9
+#define BCM59054_SDSR1VOUT1 0xcc
+#define BCM59054_SDSR2VOUT1 0xcf
+#define BCM59054_VSRVOUT1 0xd2
+
+/* I2C slave 1 registers */
+#define BCM59054_LVLDO1PMCTRL1 0x16
+#define BCM59054_LVLDO2PMCTRL1 0x18
+#define BCM59054_GPLDO1CTRL 0x1a
+#define BCM59054_GPLDO2CTRL 0x1b
+#define BCM59054_GPLDO3CTRL 0x1c
+#define BCM59054_TCXLDOCTRL 0x1d
+#define BCM59054_LVLDO1CTRL 0x1e
+#define BCM59054_LVLDO2CTRL 0x1f
+#define BCM59054_OTG_CTRL 0x40
+#define BCM59054_GPLDO1PMCTRL1 0x57
+#define BCM59054_GPLDO2PMCTRL1 0x59
+#define BCM59054_GPLDO3PMCTRL1 0x5b
+#define BCM59054_TCXLDOPMCTRL1 0x5d
+
+/*
+ * RFLDO to VSR regulators are
+ * accessed via I2C slave 0
+ */
+
+/* LDO regulator IDs */
+#define BCM59054_REG_RFLDO 0
+#define BCM59054_REG_CAMLDO1 1
+#define BCM59054_REG_CAMLDO2 2
+#define BCM59054_REG_SIMLDO1 3
+#define BCM59054_REG_SIMLDO2 4
+#define BCM59054_REG_SDLDO 5
+#define BCM59054_REG_SDXLDO 6
+#define BCM59054_REG_MMCLDO1 7
+#define BCM59054_REG_MMCLDO2 8
+#define BCM59054_REG_AUDLDO 9
+#define BCM59054_REG_MICLDO 10
+#define BCM59054_REG_USBLDO 11
+#define BCM59054_REG_VIBLDO 12
+
+/* DCDC regulator IDs */
+#define BCM59054_REG_CSR 13
+#define BCM59054_REG_IOSR1 14
+#define BCM59054_REG_IOSR2 15
+#define BCM59054_REG_MMSR 16
+#define BCM59054_REG_SDSR1 17
+#define BCM59054_REG_SDSR2 18
+#define BCM59054_REG_VSR 19
+
+/*
+ * GPLDO1 to VBUS regulators are
+ * accessed via I2C slave 1
+ */
+
+#define BCM59054_REG_GPLDO1 20
+#define BCM59054_REG_GPLDO2 21
+#define BCM59054_REG_GPLDO3 22
+#define BCM59054_REG_TCXLDO 23
+#define BCM59054_REG_LVLDO1 24
+#define BCM59054_REG_LVLDO2 25
+#define BCM59054_REG_VBUS 26
+
+#define BCM59054_NUM_REGS 27
+
+/* LDO group 1: supported voltages in microvolts */
+static const unsigned int bcm59054_ldo_1_table[] = {
+ 1200000, 1800000, 2500000, 2700000, 2800000,
+ 2900000, 3000000, 3300000,
};
-static int bcm590xx_get_vsel_register(int id)
-{
- if (BCM590XX_REG_IS_LDO(id))
- return BCM590XX_RFLDOCTRL + id;
- else if (BCM590XX_REG_IS_GPLDO(id))
- return BCM590XX_GPLDO1CTRL + id;
- else
- return BCM590XX_CSRVOUT1 + (id - BCM590XX_REG_CSR) * 3;
-}
+/* LDO group 2: supported voltages in microvolts */
+static const unsigned int bcm59054_ldo_2_table[] = {
+ 3100000, 1800000, 2500000, 2700000, 2800000,
+ 2900000, 3000000, 3300000,
+};
-static int bcm590xx_get_enable_register(int id)
-{
- int reg = 0;
-
- if (BCM590XX_REG_IS_LDO(id))
- reg = BCM590XX_RFLDOPMCTRL1 + id * 2;
- else if (BCM590XX_REG_IS_GPLDO(id))
- reg = BCM590XX_GPLDO1PMCTRL1 + id * 2;
- else
- switch (id) {
- case BCM590XX_REG_CSR:
- reg = BCM590XX_CSRPMCTRL1;
- break;
- case BCM590XX_REG_IOSR1:
- reg = BCM590XX_IOSR1PMCTRL1;
- break;
- case BCM590XX_REG_IOSR2:
- reg = BCM590XX_IOSR2PMCTRL1;
- break;
- case BCM590XX_REG_MSR:
- reg = BCM590XX_MSRPMCTRL1;
- break;
- case BCM590XX_REG_SDSR1:
- reg = BCM590XX_SDSR1PMCTRL1;
- break;
- case BCM590XX_REG_SDSR2:
- reg = BCM590XX_SDSR2PMCTRL1;
- break;
- case BCM590XX_REG_VSR:
- reg = BCM590XX_VSRPMCTRL1;
- break;
- case BCM590XX_REG_VBUS:
- reg = BCM590XX_OTG_CTRL;
- break;
- }
+/* LDO group 3: supported voltages in microvolts */
+static const unsigned int bcm59054_ldo_3_table[] = {
+ 1000000, 1107000, 1143000, 1214000, 1250000,
+ 1464000, 1500000, 1786000,
+};
+/* DCDC group SR: supported voltages in microvolts */
+static const struct linear_range bcm59054_dcdc_sr_ranges[] = {
+ REGULATOR_LINEAR_RANGE(0, 0, 1, 0),
+ REGULATOR_LINEAR_RANGE(860000, 2, 60, 10000),
+ REGULATOR_LINEAR_RANGE(1500000, 61, 61, 0),
+ REGULATOR_LINEAR_RANGE(1800000, 62, 62, 0),
+ REGULATOR_LINEAR_RANGE(900000, 63, 63, 0),
+};
- return reg;
-}
+/* DCDC group VSR (BCM59054A1): supported voltages in microvolts */
+static const struct linear_range bcm59054_dcdc_vsr_a1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(0, 0, 1, 0),
+ REGULATOR_LINEAR_RANGE(860000, 2, 59, 10000),
+ REGULATOR_LINEAR_RANGE(1700000, 60, 60, 0),
+ REGULATOR_LINEAR_RANGE(1500000, 61, 61, 0),
+ REGULATOR_LINEAR_RANGE(1800000, 62, 62, 0),
+ REGULATOR_LINEAR_RANGE(1600000, 63, 63, 0),
+};
-static const struct regulator_ops bcm590xx_ops_ldo = {
- .is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .list_voltage = regulator_list_voltage_table,
- .map_voltage = regulator_map_voltage_iterate,
+/* DCDC group CSR: supported voltages in microvolts */
+static const struct linear_range bcm59054_dcdc_csr_ranges[] = {
+ REGULATOR_LINEAR_RANGE(700000, 0, 1, 100000),
+ REGULATOR_LINEAR_RANGE(860000, 2, 60, 10000),
+ REGULATOR_LINEAR_RANGE(900000, 61, 63, 0),
};
-static const struct regulator_ops bcm590xx_ops_dcdc = {
- .is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .list_voltage = regulator_list_voltage_linear_range,
- .map_voltage = regulator_map_voltage_linear_range,
+static const struct bcm590xx_reg_data bcm59054_regs[BCM59054_NUM_REGS] = {
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(RFLDO, rfldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(CAMLDO1, camldo1, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(CAMLDO2, camldo2, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SIMLDO1, simldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SIMLDO2, simldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SDLDO, sdldo, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SDXLDO, sdxldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(MMCLDO1, mmcldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(MMCLDO2, mmcldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(AUDLDO, audldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_REG_DESC(MICLDO, micldo),
+ .ops = &bcm590xx_ops_ldo_novolt,
+ /* MICLDO is locked at 1.8V */
+ .n_voltages = 1,
+ .fixed_uV = 1800000,
+ .enable_reg = BCM59054_MICLDOPMCTRL1,
+ .enable_mask = BCM590XX_REG_ENABLE,
+ .enable_is_inverted = true,
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(USBLDO, usbldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(VIBLDO, vibldo, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(CSR, csr, dcdc_csr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(IOSR1, iosr1, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(IOSR2, iosr2, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(MMSR, mmsr, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(SDSR1, sdsr1, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(SDSR2, sdsr2, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(VSR, vsr, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO1, gpldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO2, gpldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO3, gpldo3, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(TCXLDO, tcxldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(LVLDO1, lvldo1, ldo_3_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(LVLDO2, lvldo2, ldo_3_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_VBUS,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_REG_DESC(VBUS, vbus),
+ .ops = &bcm590xx_ops_vbus,
+ .n_voltages = 1,
+ .fixed_uV = 5000000,
+ .enable_reg = BCM59054_OTG_CTRL,
+ .enable_mask = BCM590XX_VBUS_ENABLE,
+ },
+ },
};
-static const struct regulator_ops bcm590xx_ops_vbus = {
- .is_enabled = regulator_is_enabled_regmap,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
+/*
+ * BCM59054A1 regulators; same as previous revision, but with different
+ * VSR voltage table.
+ */
+static const struct bcm590xx_reg_data bcm59054_a1_regs[BCM59054_NUM_REGS] = {
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(RFLDO, rfldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(CAMLDO1, camldo1, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(CAMLDO2, camldo2, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SIMLDO1, simldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SIMLDO2, simldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SDLDO, sdldo, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(SDXLDO, sdxldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(MMCLDO1, mmcldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(MMCLDO2, mmcldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(AUDLDO, audldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_REG_DESC(MICLDO, micldo),
+ .ops = &bcm590xx_ops_ldo_novolt,
+ /* MICLDO is locked at 1.8V */
+ .n_voltages = 1,
+ .fixed_uV = 1800000,
+ .enable_reg = BCM59054_MICLDOPMCTRL1,
+ .enable_mask = BCM590XX_REG_ENABLE,
+ .enable_is_inverted = true,
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(USBLDO, usbldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_LDO,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_LDO_DESC(VIBLDO, vibldo, ldo_2_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(CSR, csr, dcdc_csr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(IOSR1, iosr1, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(IOSR2, iosr2, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(MMSR, mmsr, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(SDSR1, sdsr1, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(SDSR2, sdsr2, dcdc_sr_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_SR,
+ .regmap = BCM590XX_REGMAP_PRI,
+ .desc = {
+ BCM59054_SR_DESC(VSR, vsr, dcdc_vsr_a1_ranges),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO1, gpldo1, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO2, gpldo2, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(GPLDO3, gpldo3, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(TCXLDO, tcxldo, ldo_1_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(LVLDO1, lvldo1, ldo_3_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_GPLDO,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_LDO_DESC(LVLDO2, lvldo2, ldo_3_table),
+ },
+ },
+
+ {
+ .type = BCM590XX_REG_TYPE_VBUS,
+ .regmap = BCM590XX_REGMAP_SEC,
+ .desc = {
+ BCM59054_REG_DESC(VBUS, vbus),
+ .ops = &bcm590xx_ops_vbus,
+ .n_voltages = 1,
+ .fixed_uV = 5000000,
+ .enable_reg = BCM59054_OTG_CTRL,
+ .enable_mask = BCM590XX_VBUS_ENABLE,
+ },
+ },
};
static int bcm590xx_probe(struct platform_device *pdev)
{
struct bcm590xx *bcm590xx = dev_get_drvdata(pdev->dev.parent);
struct bcm590xx_reg *pmu;
+ const struct bcm590xx_reg_data *info;
struct regulator_config config = { };
- struct bcm590xx_info *info;
struct regulator_dev *rdev;
- int i;
+ unsigned int i;
pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
if (!pmu)
@@ -287,65 +1110,53 @@ static int bcm590xx_probe(struct platform_device *pdev)
pmu->mfd = bcm590xx;
- platform_set_drvdata(pdev, pmu);
-
- pmu->desc = devm_kcalloc(&pdev->dev,
- BCM590XX_NUM_REGS,
- sizeof(struct regulator_desc),
- GFP_KERNEL);
- if (!pmu->desc)
- return -ENOMEM;
+ switch (pmu->mfd->pmu_id) {
+ case BCM590XX_PMUID_BCM59054:
+ pmu->n_regulators = BCM59054_NUM_REGS;
+ if (pmu->mfd->rev_analog == BCM59054_REV_ANALOG_A1)
+ pmu->regs = bcm59054_a1_regs;
+ else
+ pmu->regs = bcm59054_regs;
+ break;
+ case BCM590XX_PMUID_BCM59056:
+ pmu->n_regulators = BCM59056_NUM_REGS;
+ pmu->regs = bcm59056_regs;
+ break;
+ default:
+ dev_err(bcm590xx->dev,
+ "unknown device type, could not initialize\n");
+ return -EINVAL;
+ }
- info = bcm590xx_regs;
-
- for (i = 0; i < BCM590XX_NUM_REGS; i++, info++) {
- /* Register the regulators */
- pmu->desc[i].name = info->name;
- pmu->desc[i].of_match = of_match_ptr(info->name);
- pmu->desc[i].regulators_node = of_match_ptr("regulators");
- pmu->desc[i].supply_name = info->vin_name;
- pmu->desc[i].id = i;
- pmu->desc[i].volt_table = info->volt_table;
- pmu->desc[i].n_voltages = info->n_voltages;
- pmu->desc[i].linear_ranges = info->linear_ranges;
- pmu->desc[i].n_linear_ranges = info->n_linear_ranges;
-
- if ((BCM590XX_REG_IS_LDO(i)) || (BCM590XX_REG_IS_GPLDO(i))) {
- pmu->desc[i].ops = &bcm590xx_ops_ldo;
- pmu->desc[i].vsel_mask = BCM590XX_LDO_VSEL_MASK;
- } else if (BCM590XX_REG_IS_VBUS(i))
- pmu->desc[i].ops = &bcm590xx_ops_vbus;
- else {
- pmu->desc[i].ops = &bcm590xx_ops_dcdc;
- pmu->desc[i].vsel_mask = BCM590XX_SR_VSEL_MASK;
- }
+ platform_set_drvdata(pdev, pmu);
- if (BCM590XX_REG_IS_VBUS(i))
- pmu->desc[i].enable_mask = BCM590XX_VBUS_ENABLE;
- else {
- pmu->desc[i].vsel_reg = bcm590xx_get_vsel_register(i);
- pmu->desc[i].enable_is_inverted = true;
- pmu->desc[i].enable_mask = BCM590XX_REG_ENABLE;
- }
- pmu->desc[i].enable_reg = bcm590xx_get_enable_register(i);
- pmu->desc[i].type = REGULATOR_VOLTAGE;
- pmu->desc[i].owner = THIS_MODULE;
+ /* Register the regulators */
+ for (i = 0; i < pmu->n_regulators; i++) {
+ info = &pmu->regs[i];
config.dev = bcm590xx->dev;
config.driver_data = pmu;
- if (BCM590XX_REG_IS_GPLDO(i) || BCM590XX_REG_IS_VBUS(i))
- config.regmap = bcm590xx->regmap_sec;
- else
- config.regmap = bcm590xx->regmap_pri;
- rdev = devm_regulator_register(&pdev->dev, &pmu->desc[i],
- &config);
- if (IS_ERR(rdev)) {
+ switch (info->regmap) {
+ case BCM590XX_REGMAP_PRI:
+ config.regmap = bcm590xx->regmap_pri;
+ break;
+ case BCM590XX_REGMAP_SEC:
+ config.regmap = bcm590xx->regmap_sec;
+ break;
+ default:
dev_err(bcm590xx->dev,
- "failed to register %s regulator\n",
+ "invalid regmap for %s regulator; this is a driver bug\n",
pdev->name);
- return PTR_ERR(rdev);
+ return -EINVAL;
}
+
+ rdev = devm_regulator_register(&pdev->dev, &info->desc,
+ &config);
+ if (IS_ERR(rdev))
+ return dev_err_probe(bcm590xx->dev, PTR_ERR(rdev),
+ "failed to register %s regulator\n",
+ pdev->name);
}
return 0;
diff --git a/drivers/regulator/bd96801-regulator.c b/drivers/regulator/bd96801-regulator.c
index 3a9d772491a8..24d21172298b 100644
--- a/drivers/regulator/bd96801-regulator.c
+++ b/drivers/regulator/bd96801-regulator.c
@@ -83,6 +83,7 @@ enum {
#define BD96801_LDO6_VSEL_REG 0x26
#define BD96801_LDO7_VSEL_REG 0x27
#define BD96801_BUCK_VSEL_MASK 0x1F
+#define BD96805_BUCK_VSEL_MASK 0x3f
#define BD96801_LDO_VSEL_MASK 0xff
#define BD96801_MASK_RAMP_DELAY 0xc0
@@ -90,6 +91,7 @@ enum {
#define BD96801_BUCK_INT_VOUT_MASK 0xff
#define BD96801_BUCK_VOLTS 256
+#define BD96805_BUCK_VOLTS 64
#define BD96801_LDO_VOLTS 256
#define BD96801_OVP_MASK 0x03
@@ -160,6 +162,30 @@ static const struct linear_range bd96801_buck_init_volts[] = {
REGULATOR_LINEAR_RANGE(3300000 - 150000, 0xed, 0xff, 0),
};
+/* BD96802 uses same voltage ranges for bucks as BD96801 */
+#define bd96802_tune_volts bd96801_tune_volts
+#define bd96802_buck_init_volts bd96801_buck_init_volts
+
+/*
+ * On BD96805 we have similar "negative tuning range" as on BD96801, except
+ * that the max tuning is -310 ... +310 mV (instead of the 150mV). We use same
+ * approach as with the BD96801 ranges.
+ */
+static const struct linear_range bd96805_tune_volts[] = {
+ REGULATOR_LINEAR_RANGE(310000, 0x00, 0x1F, 10000),
+ REGULATOR_LINEAR_RANGE(0, 0x20, 0x3F, 10000),
+};
+
+static const struct linear_range bd96805_buck_init_volts[] = {
+ REGULATOR_LINEAR_RANGE(500000 - 310000, 0x00, 0xc8, 5000),
+ REGULATOR_LINEAR_RANGE(1550000 - 310000, 0xc9, 0xec, 50000),
+ REGULATOR_LINEAR_RANGE(3300000 - 310000, 0xed, 0xff, 0),
+};
+
+/* BD96806 uses same voltage ranges for bucks as BD96805 */
+#define bd96806_tune_volts bd96805_tune_volts
+#define bd96806_buck_init_volts bd96805_buck_init_volts
+
static const struct linear_range bd96801_ldo_int_volts[] = {
REGULATOR_LINEAR_RANGE(300000, 0x00, 0x78, 25000),
REGULATOR_LINEAR_RANGE(3300000, 0x79, 0xff, 0),
@@ -198,89 +224,89 @@ struct bd96801_irqinfo {
static const struct bd96801_irqinfo buck1_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "buck1-over-curr-h", 500,
- "bd96801-buck1-overcurr-h"),
+ "buck1-overcurr-h"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck1-over-curr-l", 500,
- "bd96801-buck1-overcurr-l"),
+ "buck1-overcurr-l"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck1-over-curr-n", 500,
- "bd96801-buck1-overcurr-n"),
+ "buck1-overcurr-n"),
BD96801_IRQINFO(BD96801_PROT_OVP, "buck1-over-voltage", 500,
- "bd96801-buck1-overvolt"),
+ "buck1-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "buck1-under-voltage", 500,
- "bd96801-buck1-undervolt"),
+ "buck1-undervolt"),
BD96801_IRQINFO(BD96801_PROT_TEMP, "buck1-over-temp", 500,
- "bd96801-buck1-thermal")
+ "buck1-thermal")
};
static const struct bd96801_irqinfo buck2_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "buck2-over-curr-h", 500,
- "bd96801-buck2-overcurr-h"),
+ "buck2-overcurr-h"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck2-over-curr-l", 500,
- "bd96801-buck2-overcurr-l"),
+ "buck2-overcurr-l"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck2-over-curr-n", 500,
- "bd96801-buck2-overcurr-n"),
+ "buck2-overcurr-n"),
BD96801_IRQINFO(BD96801_PROT_OVP, "buck2-over-voltage", 500,
- "bd96801-buck2-overvolt"),
+ "buck2-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "buck2-under-voltage", 500,
- "bd96801-buck2-undervolt"),
+ "buck2-undervolt"),
BD96801_IRQINFO(BD96801_PROT_TEMP, "buck2-over-temp", 500,
- "bd96801-buck2-thermal")
+ "buck2-thermal")
};
static const struct bd96801_irqinfo buck3_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "buck3-over-curr-h", 500,
- "bd96801-buck3-overcurr-h"),
+ "buck3-overcurr-h"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck3-over-curr-l", 500,
- "bd96801-buck3-overcurr-l"),
+ "buck3-overcurr-l"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck3-over-curr-n", 500,
- "bd96801-buck3-overcurr-n"),
+ "buck3-overcurr-n"),
BD96801_IRQINFO(BD96801_PROT_OVP, "buck3-over-voltage", 500,
- "bd96801-buck3-overvolt"),
+ "buck3-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "buck3-under-voltage", 500,
- "bd96801-buck3-undervolt"),
+ "buck3-undervolt"),
BD96801_IRQINFO(BD96801_PROT_TEMP, "buck3-over-temp", 500,
- "bd96801-buck3-thermal")
+ "buck3-thermal")
};
static const struct bd96801_irqinfo buck4_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "buck4-over-curr-h", 500,
- "bd96801-buck4-overcurr-h"),
+ "buck4-overcurr-h"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck4-over-curr-l", 500,
- "bd96801-buck4-overcurr-l"),
+ "buck4-overcurr-l"),
BD96801_IRQINFO(BD96801_PROT_OCP, "buck4-over-curr-n", 500,
- "bd96801-buck4-overcurr-n"),
+ "buck4-overcurr-n"),
BD96801_IRQINFO(BD96801_PROT_OVP, "buck4-over-voltage", 500,
- "bd96801-buck4-overvolt"),
+ "buck4-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "buck4-under-voltage", 500,
- "bd96801-buck4-undervolt"),
+ "buck4-undervolt"),
BD96801_IRQINFO(BD96801_PROT_TEMP, "buck4-over-temp", 500,
- "bd96801-buck4-thermal")
+ "buck4-thermal")
};
static const struct bd96801_irqinfo ldo5_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "ldo5-overcurr", 500,
- "bd96801-ldo5-overcurr"),
+ "ldo5-overcurr"),
BD96801_IRQINFO(BD96801_PROT_OVP, "ldo5-over-voltage", 500,
- "bd96801-ldo5-overvolt"),
+ "ldo5-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "ldo5-under-voltage", 500,
- "bd96801-ldo5-undervolt"),
+ "ldo5-undervolt"),
};
static const struct bd96801_irqinfo ldo6_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "ldo6-overcurr", 500,
- "bd96801-ldo6-overcurr"),
+ "ldo6-overcurr"),
BD96801_IRQINFO(BD96801_PROT_OVP, "ldo6-over-voltage", 500,
- "bd96801-ldo6-overvolt"),
+ "ldo6-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "ldo6-under-voltage", 500,
- "bd96801-ldo6-undervolt"),
+ "ldo6-undervolt"),
};
static const struct bd96801_irqinfo ldo7_irqinfo[] = {
BD96801_IRQINFO(BD96801_PROT_OCP, "ldo7-overcurr", 500,
- "bd96801-ldo7-overcurr"),
+ "ldo7-overcurr"),
BD96801_IRQINFO(BD96801_PROT_OVP, "ldo7-over-voltage", 500,
- "bd96801-ldo7-overvolt"),
+ "ldo7-overvolt"),
BD96801_IRQINFO(BD96801_PROT_UVP, "ldo7-under-voltage", 500,
- "bd96801-ldo7-undervolt"),
+ "ldo7-undervolt"),
};
struct bd96801_irq_desc {
@@ -302,6 +328,7 @@ struct bd96801_pmic_data {
struct bd96801_regulator_data regulator_data[BD96801_NUM_REGULATORS];
struct regmap *regmap;
int fatal_ind;
+ int num_regulators;
};
static int ldo_map_notif(int irq, struct regulator_irq_data *rid,
@@ -503,6 +530,70 @@ static int bd96801_walk_regulator_dt(struct device *dev, struct regmap *regmap,
* case later. What we can easly do for preparing is to not use static global
* data for regulators though.
*/
+static const struct bd96801_pmic_data bd96802_data = {
+ .regulator_data = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("buck1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK1,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96802_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96802_tune_volts),
+ .n_voltages = BD96801_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK1_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK1_VSEL_REG,
+ .vsel_mask = BD96801_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK1_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .init_ranges = bd96802_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96802_buck_init_volts),
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck1_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck1_irqinfo),
+ },
+ },
+ {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("buck2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK2,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96802_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96802_tune_volts),
+ .n_voltages = BD96801_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK2_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK2_VSEL_REG,
+ .vsel_mask = BD96801_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK2_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck2_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck2_irqinfo),
+ },
+ .init_ranges = bd96802_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96802_buck_init_volts),
+ },
+ },
+ .num_regulators = 2,
+};
+
static const struct bd96801_pmic_data bd96801_data = {
.regulator_data = {
{
@@ -688,11 +779,265 @@ static const struct bd96801_pmic_data bd96801_data = {
.ldo_vol_lvl = BD96801_LDO7_VOL_LVL_REG,
},
},
+ .num_regulators = 7,
};
-static int initialize_pmic_data(struct device *dev,
+static const struct bd96801_pmic_data bd96805_data = {
+ .regulator_data = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("buck1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK1,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96805_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96805_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK1_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK1_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK1_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .init_ranges = bd96805_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96805_buck_init_volts),
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck1_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck1_irqinfo),
+ },
+ }, {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("buck2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK2,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96805_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96805_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK2_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK2_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK2_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck2_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck2_irqinfo),
+ },
+ .init_ranges = bd96805_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96805_buck_init_volts),
+ }, {
+ .desc = {
+ .name = "buck3",
+ .of_match = of_match_ptr("buck3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK3,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96805_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96805_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK3_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK3_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK3_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck3_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck3_irqinfo),
+ },
+ .init_ranges = bd96805_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96805_buck_init_volts),
+ }, {
+ .desc = {
+ .name = "buck4",
+ .of_match = of_match_ptr("buck4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK4,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96805_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96805_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK4_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK4_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK4_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck4_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck4_irqinfo),
+ },
+ .init_ranges = bd96805_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96805_buck_init_volts),
+ }, {
+ .desc = {
+ .name = "ldo5",
+ .of_match = of_match_ptr("ldo5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_LDO5,
+ .ops = &bd96801_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96801_ldo_int_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96801_ldo_int_volts),
+ .n_voltages = BD96801_LDO_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_LDO5_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_LDO5_VSEL_REG,
+ .vsel_mask = BD96801_LDO_VSEL_MASK,
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&ldo5_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(ldo5_irqinfo),
+ },
+ .ldo_vol_lvl = BD96801_LDO5_VOL_LVL_REG,
+ }, {
+ .desc = {
+ .name = "ldo6",
+ .of_match = of_match_ptr("ldo6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_LDO6,
+ .ops = &bd96801_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96801_ldo_int_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96801_ldo_int_volts),
+ .n_voltages = BD96801_LDO_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_LDO6_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_LDO6_VSEL_REG,
+ .vsel_mask = BD96801_LDO_VSEL_MASK,
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&ldo6_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(ldo6_irqinfo),
+ },
+ .ldo_vol_lvl = BD96801_LDO6_VOL_LVL_REG,
+ }, {
+ .desc = {
+ .name = "ldo7",
+ .of_match = of_match_ptr("ldo7"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_LDO7,
+ .ops = &bd96801_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96801_ldo_int_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96801_ldo_int_volts),
+ .n_voltages = BD96801_LDO_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_LDO7_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_LDO7_VSEL_REG,
+ .vsel_mask = BD96801_LDO_VSEL_MASK,
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&ldo7_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(ldo7_irqinfo),
+ },
+ .ldo_vol_lvl = BD96801_LDO7_VOL_LVL_REG,
+ },
+ },
+ .num_regulators = 7,
+};
+
+static const struct bd96801_pmic_data bd96806_data = {
+ .regulator_data = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("buck1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK1,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96806_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96806_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK1_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK1_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK1_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .init_ranges = bd96806_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96806_buck_init_volts),
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck1_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck1_irqinfo),
+ },
+ },
+ {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("buck2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD96801_BUCK2,
+ .ops = &bd96801_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .linear_ranges = bd96806_tune_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd96806_tune_volts),
+ .n_voltages = BD96805_BUCK_VOLTS,
+ .enable_reg = BD96801_REG_ENABLE,
+ .enable_mask = BD96801_BUCK2_EN_MASK,
+ .enable_is_inverted = true,
+ .vsel_reg = BD96801_BUCK2_VSEL_REG,
+ .vsel_mask = BD96805_BUCK_VSEL_MASK,
+ .ramp_reg = BD96801_BUCK2_VSEL_REG,
+ .ramp_mask = BD96801_MASK_RAMP_DELAY,
+ .ramp_delay_table = &buck_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(buck_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .irq_desc = {
+ .irqinfo = (struct bd96801_irqinfo *)&buck2_irqinfo[0],
+ .num_irqs = ARRAY_SIZE(buck2_irqinfo),
+ },
+ .init_ranges = bd96806_buck_init_volts,
+ .num_ranges = ARRAY_SIZE(bd96806_buck_init_volts),
+ },
+ },
+ .num_regulators = 2,
+};
+
+static int initialize_pmic_data(struct platform_device *pdev,
struct bd96801_pmic_data *pdata)
{
+ struct device *dev = &pdev->dev;
int r, i;
/*
@@ -700,7 +1045,7 @@ static int initialize_pmic_data(struct device *dev,
* wish to modify IRQ information independently for each driver
* instance.
*/
- for (r = 0; r < BD96801_NUM_REGULATORS; r++) {
+ for (r = 0; r < pdata->num_regulators; r++) {
const struct bd96801_irqinfo *template;
struct bd96801_irqinfo *new;
int num_infos;
@@ -741,8 +1086,7 @@ static int bd96801_rdev_errb_irqs(struct platform_device *pdev,
int i;
void *retp;
static const char * const single_out_errb_irqs[] = {
- "bd96801-%s-pvin-err", "bd96801-%s-ovp-err",
- "bd96801-%s-uvp-err", "bd96801-%s-shdn-err",
+ "%s-pvin-err", "%s-ovp-err", "%s-uvp-err", "%s-shdn-err",
};
for (i = 0; i < ARRAY_SIZE(single_out_errb_irqs); i++) {
@@ -779,12 +1123,10 @@ static int bd96801_global_errb_irqs(struct platform_device *pdev,
int i, num_irqs;
void *retp;
static const char * const global_errb_irqs[] = {
- "bd96801-otp-err", "bd96801-dbist-err", "bd96801-eep-err",
- "bd96801-abist-err", "bd96801-prstb-err", "bd96801-drmoserr1",
- "bd96801-drmoserr2", "bd96801-slave-err", "bd96801-vref-err",
- "bd96801-tsd", "bd96801-uvlo-err", "bd96801-ovlo-err",
- "bd96801-osc-err", "bd96801-pon-err", "bd96801-poff-err",
- "bd96801-cmd-shdn-err", "bd96801-int-shdn-err"
+ "otp-err", "dbist-err", "eep-err", "abist-err", "prstb-err",
+ "drmoserr1", "drmoserr2", "slave-err", "vref-err", "tsd",
+ "uvlo-err", "ovlo-err", "osc-err", "pon-err", "poff-err",
+ "cmd-shdn-err", "int-shdn-err"
};
num_irqs = ARRAY_SIZE(global_errb_irqs);
@@ -869,6 +1211,7 @@ static int bd96801_probe(struct platform_device *pdev)
{
struct regulator_dev *ldo_errs_rdev_arr[BD96801_NUM_LDOS];
struct regulator_dev *all_rdevs[BD96801_NUM_REGULATORS];
+ struct bd96801_pmic_data *pdata_template;
struct bd96801_regulator_data *rdesc;
struct regulator_config config = {};
int ldo_errs_arr[BD96801_NUM_LDOS];
@@ -881,12 +1224,16 @@ static int bd96801_probe(struct platform_device *pdev)
parent = pdev->dev.parent;
- pdata = devm_kmemdup(&pdev->dev, &bd96801_data, sizeof(bd96801_data),
+ pdata_template = (struct bd96801_pmic_data *)platform_get_device_id(pdev)->driver_data;
+ if (!pdata_template)
+ return -ENODEV;
+
+ pdata = devm_kmemdup(&pdev->dev, pdata_template, sizeof(bd96801_data),
GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- if (initialize_pmic_data(&pdev->dev, pdata))
+ if (initialize_pmic_data(pdev, pdata))
return -ENOMEM;
pdata->regmap = dev_get_regmap(parent, NULL);
@@ -909,11 +1256,11 @@ static int bd96801_probe(struct platform_device *pdev)
use_errb = true;
ret = bd96801_walk_regulator_dt(&pdev->dev, pdata->regmap, rdesc,
- BD96801_NUM_REGULATORS);
+ pdata->num_regulators);
if (ret)
return ret;
- for (i = 0; i < ARRAY_SIZE(pdata->regulator_data); i++) {
+ for (i = 0; i < pdata->num_regulators; i++) {
struct regulator_dev *rdev;
struct bd96801_irq_desc *idesc = &rdesc[i].irq_desc;
int j;
@@ -926,6 +1273,7 @@ static int bd96801_probe(struct platform_device *pdev)
rdesc[i].desc.name);
return PTR_ERR(rdev);
}
+
all_rdevs[i] = rdev;
/*
* LDOs don't have own temperature monitoring. If temperature
@@ -956,12 +1304,12 @@ static int bd96801_probe(struct platform_device *pdev)
if (temp_notif_ldos) {
int irq;
struct regulator_irq_desc tw_desc = {
- .name = "bd96801-core-thermal",
+ .name = "core-thermal",
.irq_off_ms = 500,
.map_event = ldo_map_notif,
};
- irq = platform_get_irq_byname(pdev, "bd96801-core-thermal");
+ irq = platform_get_irq_byname(pdev, "core-thermal");
if (irq < 0)
return irq;
@@ -975,14 +1323,17 @@ static int bd96801_probe(struct platform_device *pdev)
if (use_errb)
return bd96801_global_errb_irqs(pdev, all_rdevs,
- ARRAY_SIZE(all_rdevs));
+ pdata->num_regulators);
return 0;
}
static const struct platform_device_id bd96801_pmic_id[] = {
- { "bd96801-regulator", },
- { }
+ { "bd96801-regulator", (kernel_ulong_t)&bd96801_data },
+ { "bd96802-regulator", (kernel_ulong_t)&bd96802_data },
+ { "bd96805-regulator", (kernel_ulong_t)&bd96805_data },
+ { "bd96806-regulator", (kernel_ulong_t)&bd96806_data },
+ { },
};
MODULE_DEVICE_TABLE(platform, bd96801_pmic_id);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 90629a756693..7a248dc8d2e2 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2617,7 +2617,7 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev,
mutex_lock(&regulator_list_mutex);
list_for_each_entry(pin, &regulator_ena_gpio_list, list) {
- if (pin->gpiod == gpiod) {
+ if (gpiod_is_equal(pin->gpiod, gpiod)) {
rdev_dbg(rdev, "GPIO is already used\n");
goto update_ena_gpio_to_rdev;
}
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index 17527a3f53b4..ef161eb0ca27 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -1129,7 +1129,7 @@ static int da9121_i2c_probe(struct i2c_client *i2c)
}
chip->pdata = i2c->dev.platform_data;
- chip->subvariant_id = (enum da9121_subvariant)i2c_get_match_data(i2c);
+ chip->subvariant_id = (kernel_ulong_t)i2c_get_match_data(i2c);
ret = da9121_assign_chip_model(i2c, chip);
if (ret < 0)
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index bd9447dac596..c282236959b1 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -147,6 +147,7 @@ struct fan53555_device_info {
unsigned int slew_mask;
const unsigned int *ramp_delay_table;
unsigned int n_ramp_values;
+ unsigned int enable_time;
unsigned int slew_rate;
};
@@ -282,6 +283,7 @@ static int fan53526_voltages_setup_fairchild(struct fan53555_device_info *di)
di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates);
+ di->enable_time = 250;
di->vsel_count = FAN53526_NVOLTAGES;
return 0;
@@ -296,10 +298,12 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
case FAN53555_CHIP_REV_00:
di->vsel_min = 600000;
di->vsel_step = 10000;
+ di->enable_time = 400;
break;
case FAN53555_CHIP_REV_13:
di->vsel_min = 800000;
di->vsel_step = 10000;
+ di->enable_time = 400;
break;
default:
dev_err(di->dev,
@@ -311,13 +315,19 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
case FAN53555_CHIP_ID_01:
case FAN53555_CHIP_ID_03:
case FAN53555_CHIP_ID_05:
+ di->vsel_min = 600000;
+ di->vsel_step = 10000;
+ di->enable_time = 400;
+ break;
case FAN53555_CHIP_ID_08:
di->vsel_min = 600000;
di->vsel_step = 10000;
+ di->enable_time = 175;
break;
case FAN53555_CHIP_ID_04:
di->vsel_min = 603000;
di->vsel_step = 12826;
+ di->enable_time = 400;
break;
default:
dev_err(di->dev,
@@ -350,6 +360,7 @@ static int fan53555_voltages_setup_rockchip(struct fan53555_device_info *di)
di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates);
+ di->enable_time = 360;
di->vsel_count = FAN53555_NVOLTAGES;
return 0;
@@ -372,6 +383,7 @@ static int rk8602_voltages_setup_rockchip(struct fan53555_device_info *di)
di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates);
+ di->enable_time = 360;
di->vsel_count = RK8602_NVOLTAGES;
return 0;
@@ -395,6 +407,7 @@ static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di)
di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates);
+ di->enable_time = 400;
di->vsel_count = FAN53555_NVOLTAGES;
return 0;
@@ -594,6 +607,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
rdesc->ramp_mask = di->slew_mask;
rdesc->ramp_delay_table = di->ramp_delay_table;
rdesc->n_ramp_values = di->n_ramp_values;
+ rdesc->enable_time = di->enable_time;
rdesc->owner = THIS_MODULE;
rdev = devm_regulator_register(di->dev, &di->desc, config);
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 65927fa2ef16..75bd53445ba7 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -240,7 +240,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
struct regulator_config cfg = { };
struct regulator_dev *rdev;
enum gpiod_flags gflags;
- int ptr, ret, state, i;
+ int ptr, state, i;
drvdata = devm_kzalloc(dev, sizeof(struct gpio_regulator_data),
GFP_KERNEL);
@@ -345,11 +345,9 @@ static int gpio_regulator_probe(struct platform_device *pdev)
return PTR_ERR(cfg.ena_gpiod);
rdev = devm_regulator_register(dev, &drvdata->desc, &cfg);
- if (IS_ERR(rdev)) {
- ret = PTR_ERR(rdev);
- dev_err(dev, "Failed to register regulator: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(rdev))
+ return dev_err_probe(dev, PTR_ERR(rdev),
+ "Failed to register regulator\n");
platform_set_drvdata(pdev, drvdata);
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
index 5e7171b9065a..41fd15adfd1f 100644
--- a/drivers/regulator/max14577-regulator.c
+++ b/drivers/regulator/max14577-regulator.c
@@ -40,11 +40,14 @@ static int max14577_reg_get_current_limit(struct regulator_dev *rdev)
struct max14577 *max14577 = rdev_get_drvdata(rdev);
const struct maxim_charger_current *limits =
&maxim_charger_currents[max14577->dev_type];
+ int ret;
if (rdev_get_id(rdev) != MAX14577_CHARGER)
return -EINVAL;
- max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL4, &reg_data);
+ ret = max14577_read_reg(rmap, MAX14577_CHG_REG_CHG_CTRL4, &reg_data);
+ if (ret < 0)
+ return ret;
if ((reg_data & CHGCTRL4_MBCICHWRCL_MASK) == 0)
return limits->min;
diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c
index 59eb23d467ec..fcdd2d0317a5 100644
--- a/drivers/regulator/max20086-regulator.c
+++ b/drivers/regulator/max20086-regulator.c
@@ -5,6 +5,7 @@
// Copyright (C) 2022 Laurent Pinchart <laurent.pinchart@idesonboard.com>
// Copyright (C) 2018 Avnet, Inc.
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -28,7 +29,7 @@
#define MAX20086_REG_ADC4 0x09
/* DEVICE IDs */
-#define MAX20086_DEVICE_ID_MAX20086 0x40
+#define MAX20086_DEVICE_ID_MAX20086 0x30
#define MAX20086_DEVICE_ID_MAX20087 0x20
#define MAX20086_DEVICE_ID_MAX20088 0x10
#define MAX20086_DEVICE_ID_MAX20089 0x00
@@ -132,23 +133,27 @@ static int max20086_regulators_register(struct max20086 *chip)
static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
{
- struct of_regulator_match matches[MAX20086_MAX_REGULATORS] = { };
- struct device_node *node;
+ struct of_regulator_match *matches;
unsigned int i;
int ret;
- node = of_get_child_by_name(chip->dev->of_node, "regulators");
+ struct device_node *node __free(device_node) =
+ of_get_child_by_name(chip->dev->of_node, "regulators");
if (!node) {
dev_err(chip->dev, "regulators node not found\n");
return -ENODEV;
}
+ matches = devm_kcalloc(chip->dev, chip->info->num_outputs,
+ sizeof(*matches), GFP_KERNEL);
+ if (!matches)
+ return -ENOMEM;
+
for (i = 0; i < chip->info->num_outputs; ++i)
matches[i].name = max20086_output_names[i];
ret = of_regulator_match(chip->dev, node, matches,
chip->info->num_outputs);
- of_node_put(node);
if (ret < 0) {
dev_err(chip->dev, "Failed to match regulators\n");
return -EINVAL;
@@ -259,7 +264,7 @@ static int max20086_i2c_probe(struct i2c_client *i2c)
* shutdown.
*/
flags = boot_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
- chip->ena_gpiod = devm_gpiod_get(chip->dev, "enable", flags);
+ chip->ena_gpiod = devm_gpiod_get_optional(chip->dev, "enable", flags);
if (IS_ERR(chip->ena_gpiod)) {
ret = PTR_ERR(chip->ena_gpiod);
dev_err(chip->dev, "Failed to get enable GPIO: %d\n", ret);
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index a56f3ab754fa..14d19a6d6655 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -9,6 +9,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/reboot.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -33,6 +34,7 @@ struct pca9450 {
struct device *dev;
struct regmap *regmap;
struct gpio_desc *sd_vsel_gpio;
+ struct notifier_block restart_nb;
enum pca9450_chip_type type;
unsigned int rcnt;
int irq;
@@ -965,6 +967,25 @@ static irqreturn_t pca9450_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
+static int pca9450_i2c_restart_handler(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct pca9450 *pca9450 = container_of(nb, struct pca9450, restart_nb);
+ struct i2c_client *i2c = container_of(pca9450->dev, struct i2c_client, dev);
+
+ dev_dbg(&i2c->dev, "Restarting device..\n");
+ if (i2c_smbus_write_byte_data(i2c, PCA9450_REG_SWRST, SW_RST_COMMAND) == 0) {
+ /* tRESTART is 250ms, so 300 should be enough to make sure it happened */
+ mdelay(300);
+ /* When we get here, the PMIC didn't power cycle for some reason. so warn.*/
+ dev_warn(&i2c->dev, "Device didn't respond to restart command\n");
+ } else {
+ dev_err(&i2c->dev, "Restart command failed\n");
+ }
+
+ return 0;
+}
+
static int pca9450_i2c_probe(struct i2c_client *i2c)
{
enum pca9450_chip_type type = (unsigned int)(uintptr_t)
@@ -1107,6 +1128,12 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
pca9450->sd_vsel_fixed_low =
of_property_read_bool(ldo5->dev.of_node, "nxp,sd-vsel-fixed-low");
+ pca9450->restart_nb.notifier_call = pca9450_i2c_restart_handler;
+ pca9450->restart_nb.priority = PCA9450_RESTART_HANDLER_PRIORITY;
+
+ if (register_restart_handler(&pca9450->restart_nb))
+ dev_warn(&i2c->dev, "Failed to register restart handler\n");
+
dev_info(&i2c->dev, "%s probed.\n",
type == PCA9450_TYPE_PCA9450A ? "pca9450a" :
(type == PCA9450_TYPE_PCA9451A ? "pca9451a" : "pca9450bc"));
diff --git a/drivers/regulator/pf9453-regulator.c b/drivers/regulator/pf9453-regulator.c
index ed6bf0f6c4fe..be627f49b617 100644
--- a/drivers/regulator/pf9453-regulator.c
+++ b/drivers/regulator/pf9453-regulator.c
@@ -214,7 +214,7 @@ static const struct regmap_config pf9453_regmap_config = {
.val_bits = 8,
.volatile_table = &pf9453_volatile_regs,
.max_register = PF9453_MAX_REG - 1,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
/*
@@ -412,6 +412,7 @@ static int find_closest_bigger(unsigned int target, const unsigned int *table,
* pf9453_regulator_set_ramp_delay_regmap
*
* @rdev: regulator to operate on
+ * @ramp_delay: desired ramp delay value in microseconds
*
* Regulators that use regmap for their register I/O can set the ramp_reg
* and ramp_mask fields in their descriptor and then use this as their
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index d66a0f61637e..c1a41ce70b36 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -400,7 +400,7 @@ struct spmi_voltage_range {
* so that range[i].set_point_max_uV < range[i+1].set_point_min_uV.
*/
struct spmi_voltage_set_points {
- struct spmi_voltage_range *range;
+ const struct spmi_voltage_range *range;
int count;
unsigned n_voltages;
};
@@ -474,6 +474,9 @@ struct spmi_regulator_data {
.set_point_max_uV = _set_point_max_uV, \
.step_uV = _step_uV, \
.range_sel = _range_sel, \
+ .n_voltages = (_set_point_max_uV != 0) ? \
+ ((_set_point_max_uV - _set_point_min_uV) / _step_uV) + 1 : \
+ 0, \
}
#define DEFINE_SPMI_SET_POINTS(name) \
@@ -489,110 +492,110 @@ struct spmi_voltage_set_points name##_set_points = { \
* increasing and unique. The set_voltage callback functions expect these
* properties to hold.
*/
-static struct spmi_voltage_range pldo_ranges[] = {
+static const struct spmi_voltage_range pldo_ranges[] = {
SPMI_VOLTAGE_RANGE(2, 750000, 750000, 1537500, 1537500, 12500),
SPMI_VOLTAGE_RANGE(3, 1500000, 1550000, 3075000, 3075000, 25000),
SPMI_VOLTAGE_RANGE(4, 1750000, 3100000, 4900000, 4900000, 50000),
};
-static struct spmi_voltage_range nldo1_ranges[] = {
+static const struct spmi_voltage_range nldo1_ranges[] = {
SPMI_VOLTAGE_RANGE(2, 750000, 750000, 1537500, 1537500, 12500),
};
-static struct spmi_voltage_range nldo2_ranges[] = {
+static const struct spmi_voltage_range nldo2_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 375000, 0, 0, 1537500, 12500),
SPMI_VOLTAGE_RANGE(1, 375000, 375000, 768750, 768750, 6250),
SPMI_VOLTAGE_RANGE(2, 750000, 775000, 1537500, 1537500, 12500),
};
-static struct spmi_voltage_range nldo3_ranges[] = {
+static const struct spmi_voltage_range nldo3_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 375000, 375000, 1537500, 1537500, 12500),
SPMI_VOLTAGE_RANGE(1, 375000, 0, 0, 1537500, 12500),
SPMI_VOLTAGE_RANGE(2, 750000, 0, 0, 1537500, 12500),
};
-static struct spmi_voltage_range ln_ldo_ranges[] = {
+static const struct spmi_voltage_range ln_ldo_ranges[] = {
SPMI_VOLTAGE_RANGE(1, 690000, 690000, 1110000, 1110000, 60000),
SPMI_VOLTAGE_RANGE(0, 1380000, 1380000, 2220000, 2220000, 120000),
};
-static struct spmi_voltage_range smps_ranges[] = {
+static const struct spmi_voltage_range smps_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 375000, 375000, 1562500, 1562500, 12500),
SPMI_VOLTAGE_RANGE(1, 1550000, 1575000, 3125000, 3125000, 25000),
};
-static struct spmi_voltage_range ftsmps_ranges[] = {
+static const struct spmi_voltage_range ftsmps_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 0, 350000, 1275000, 1275000, 5000),
SPMI_VOLTAGE_RANGE(1, 0, 1280000, 2040000, 2040000, 10000),
};
-static struct spmi_voltage_range ftsmps2p5_ranges[] = {
+static const struct spmi_voltage_range ftsmps2p5_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 80000, 350000, 1355000, 1355000, 5000),
SPMI_VOLTAGE_RANGE(1, 160000, 1360000, 2200000, 2200000, 10000),
};
-static struct spmi_voltage_range ftsmps426_ranges[] = {
+static const struct spmi_voltage_range ftsmps426_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 0, 320000, 1352000, 1352000, 4000),
};
-static struct spmi_voltage_range boost_ranges[] = {
+static const struct spmi_voltage_range boost_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 4000000, 4000000, 5550000, 5550000, 50000),
};
-static struct spmi_voltage_range boost_byp_ranges[] = {
+static const struct spmi_voltage_range boost_byp_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 2500000, 2500000, 5200000, 5650000, 50000),
};
-static struct spmi_voltage_range ult_lo_smps_ranges[] = {
+static const struct spmi_voltage_range ult_lo_smps_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 375000, 375000, 1562500, 1562500, 12500),
SPMI_VOLTAGE_RANGE(1, 750000, 0, 0, 1525000, 25000),
};
-static struct spmi_voltage_range ult_ho_smps_ranges[] = {
+static const struct spmi_voltage_range ult_ho_smps_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 1550000, 1550000, 2325000, 2325000, 25000),
};
-static struct spmi_voltage_range ult_nldo_ranges[] = {
+static const struct spmi_voltage_range ult_nldo_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 375000, 375000, 1537500, 1537500, 12500),
};
-static struct spmi_voltage_range ult_pldo_ranges[] = {
+static const struct spmi_voltage_range ult_pldo_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 1750000, 1750000, 3337500, 3337500, 12500),
};
-static struct spmi_voltage_range pldo660_ranges[] = {
+static const struct spmi_voltage_range pldo660_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 1504000, 1504000, 3544000, 3544000, 8000),
};
-static struct spmi_voltage_range nldo660_ranges[] = {
+static const struct spmi_voltage_range nldo660_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 320000, 320000, 1304000, 1304000, 8000),
};
-static struct spmi_voltage_range ht_lvpldo_ranges[] = {
+static const struct spmi_voltage_range ht_lvpldo_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 1504000, 1504000, 2000000, 2000000, 8000),
};
-static struct spmi_voltage_range ht_nldo_ranges[] = {
+static const struct spmi_voltage_range ht_nldo_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 312000, 312000, 1304000, 1304000, 8000),
};
-static struct spmi_voltage_range hfs430_ranges[] = {
+static const struct spmi_voltage_range hfs430_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 320000, 320000, 2040000, 2040000, 8000),
};
-static struct spmi_voltage_range ht_p150_ranges[] = {
+static const struct spmi_voltage_range ht_p150_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 1616000, 1616000, 3304000, 3304000, 8000),
};
-static struct spmi_voltage_range ht_p600_ranges[] = {
+static const struct spmi_voltage_range ht_p600_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 1704000, 1704000, 1896000, 1896000, 8000),
};
-static struct spmi_voltage_range nldo_510_ranges[] = {
+static const struct spmi_voltage_range nldo_510_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 320000, 320000, 1304000, 1304000, 8000),
};
-static struct spmi_voltage_range ftsmps510_ranges[] = {
+static const struct spmi_voltage_range ftsmps510_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 300000, 300000, 1372000, 1372000, 4000),
};
@@ -1676,18 +1679,10 @@ static const struct spmi_regulator_mapping supported_regulators[] = {
static void spmi_calculate_num_voltages(struct spmi_voltage_set_points *points)
{
- unsigned int n;
- struct spmi_voltage_range *range = points->range;
-
- for (; range < points->range + points->count; range++) {
- n = 0;
- if (range->set_point_max_uV) {
- n = range->set_point_max_uV - range->set_point_min_uV;
- n = (n / range->step_uV) + 1;
- }
- range->n_voltages = n;
- points->n_voltages += n;
- }
+ const struct spmi_voltage_range *range = points->range;
+
+ for (; range < points->range + points->count; range++)
+ points->n_voltages += range->n_voltages;
}
static int spmi_regulator_match(struct spmi_regulator *vreg, u16 force_type)
diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c
index 6c3b6bfac961..58dbf8bffa5d 100644
--- a/drivers/regulator/rpi-panel-attiny-regulator.c
+++ b/drivers/regulator/rpi-panel-attiny-regulator.c
@@ -6,12 +6,14 @@
*/
#include <linux/backlight.h>
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
@@ -93,7 +95,7 @@ static int attiny_lcd_power_enable(struct regulator_dev *rdev)
{
struct attiny_lcd *state = rdev_get_drvdata(rdev);
- mutex_lock(&state->lock);
+ guard(mutex)(&state->lock);
/* Ensure bridge, and tp stay in reset */
attiny_set_port_state(state, REG_PORTC, 0);
@@ -114,8 +116,6 @@ static int attiny_lcd_power_enable(struct regulator_dev *rdev)
msleep(80);
- mutex_unlock(&state->lock);
-
return 0;
}
@@ -123,7 +123,7 @@ static int attiny_lcd_power_disable(struct regulator_dev *rdev)
{
struct attiny_lcd *state = rdev_get_drvdata(rdev);
- mutex_lock(&state->lock);
+ guard(mutex)(&state->lock);
regmap_write(rdev->regmap, REG_PWM, 0);
usleep_range(5000, 10000);
@@ -135,8 +135,6 @@ static int attiny_lcd_power_disable(struct regulator_dev *rdev)
attiny_set_port_state(state, REG_PORTC, 0);
msleep(30);
- mutex_unlock(&state->lock);
-
return 0;
}
@@ -144,19 +142,17 @@ static int attiny_lcd_power_is_enabled(struct regulator_dev *rdev)
{
struct attiny_lcd *state = rdev_get_drvdata(rdev);
unsigned int data;
- int ret, i;
-
- mutex_lock(&state->lock);
-
- for (i = 0; i < 10; i++) {
- ret = regmap_read(rdev->regmap, REG_PORTC, &data);
- if (!ret)
- break;
- usleep_range(10000, 12000);
+ int ret = 0, i;
+
+ scoped_guard(mutex, &state->lock) {
+ for (i = 0; i < 10; i++) {
+ ret = regmap_read(rdev->regmap, REG_PORTC, &data);
+ if (!ret)
+ break;
+ usleep_range(10000, 12000);
+ }
}
- mutex_unlock(&state->lock);
-
if (ret < 0)
return ret;
@@ -189,7 +185,7 @@ static int attiny_update_status(struct backlight_device *bl)
int brightness = backlight_get_brightness(bl);
int ret, i;
- mutex_lock(&state->lock);
+ guard(mutex)(&state->lock);
for (i = 0; i < 10; i++) {
ret = regmap_write(regmap, REG_PWM, brightness);
@@ -197,8 +193,6 @@ static int attiny_update_status(struct backlight_device *bl)
break;
}
- mutex_unlock(&state->lock);
-
return ret;
}
@@ -211,15 +205,12 @@ static int attiny_gpio_get_direction(struct gpio_chip *gc, unsigned int off)
return GPIO_LINE_DIRECTION_OUT;
}
-static void attiny_gpio_set(struct gpio_chip *gc, unsigned int off, int val)
+static int attiny_gpio_set(struct gpio_chip *gc, unsigned int off, int val)
{
struct attiny_lcd *state = gpiochip_get_data(gc);
u8 last_val;
- if (off >= NUM_GPIO)
- return;
-
- mutex_lock(&state->lock);
+ guard(mutex)(&state->lock);
last_val = attiny_get_port_state(state, mappings[off].reg);
if (val)
@@ -242,7 +233,7 @@ static void attiny_gpio_set(struct gpio_chip *gc, unsigned int off, int val)
msleep(100);
}
- mutex_unlock(&state->lock);
+ return 0;
}
static int attiny_i2c_read(struct i2c_client *client, u8 reg, unsigned int *buf)
@@ -296,7 +287,10 @@ static int attiny_i2c_probe(struct i2c_client *i2c)
if (!state)
return -ENOMEM;
- mutex_init(&state->lock);
+ ret = devm_mutex_init(&i2c->dev, &state->lock);
+ if (ret)
+ return ret;
+
i2c_set_clientdata(i2c, state);
regmap = devm_regmap_init_i2c(i2c, &attiny_regmap_config);
@@ -304,13 +298,13 @@ static int attiny_i2c_probe(struct i2c_client *i2c)
ret = PTR_ERR(regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
- goto error;
+ return ret;
}
ret = attiny_i2c_read(i2c, REG_ID, &data);
if (ret < 0) {
dev_err(&i2c->dev, "Failed to read REG_ID reg: %d\n", ret);
- goto error;
+ return ret;
}
switch (data) {
@@ -319,8 +313,7 @@ static int attiny_i2c_probe(struct i2c_client *i2c)
break;
default:
dev_err(&i2c->dev, "Unknown Atmel firmware revision: 0x%02x\n", data);
- ret = -ENODEV;
- goto error;
+ return -ENODEV;
}
regmap_write(regmap, REG_POWERON, 0);
@@ -336,8 +329,7 @@ static int attiny_i2c_probe(struct i2c_client *i2c)
rdev = devm_regulator_register(&i2c->dev, &attiny_regulator, &config);
if (IS_ERR(rdev)) {
dev_err(&i2c->dev, "Failed to register ATTINY regulator\n");
- ret = PTR_ERR(rdev);
- goto error;
+ return PTR_ERR(rdev);
}
props.type = BACKLIGHT_RAW;
@@ -348,10 +340,8 @@ static int attiny_i2c_probe(struct i2c_client *i2c)
bl = devm_backlight_device_register(&i2c->dev, dev_name(&i2c->dev),
&i2c->dev, state, &attiny_bl,
&props);
- if (IS_ERR(bl)) {
- ret = PTR_ERR(bl);
- goto error;
- }
+ if (IS_ERR(bl))
+ return PTR_ERR(bl);
bl->props.brightness = 0xff;
@@ -361,31 +351,17 @@ static int attiny_i2c_probe(struct i2c_client *i2c)
state->gc.base = -1;
state->gc.ngpio = NUM_GPIO;
- state->gc.set = attiny_gpio_set;
+ state->gc.set_rv = attiny_gpio_set;
state->gc.get_direction = attiny_gpio_get_direction;
state->gc.can_sleep = true;
ret = devm_gpiochip_add_data(&i2c->dev, &state->gc, state);
- if (ret) {
+ if (ret)
dev_err(&i2c->dev, "Failed to create gpiochip: %d\n", ret);
- goto error;
- }
-
- return 0;
-
-error:
- mutex_destroy(&state->lock);
return ret;
}
-static void attiny_i2c_remove(struct i2c_client *client)
-{
- struct attiny_lcd *state = i2c_get_clientdata(client);
-
- mutex_destroy(&state->lock);
-}
-
static const struct of_device_id attiny_dt_ids[] = {
{ .compatible = "raspberrypi,7inch-touchscreen-panel-regulator" },
{},
@@ -399,7 +375,6 @@ static struct i2c_driver attiny_regulator_driver = {
.of_match_table = attiny_dt_ids,
},
.probe = attiny_i2c_probe,
- .remove = attiny_i2c_remove,
};
module_i2c_driver(attiny_regulator_driver);
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index d25cd81e3f36..fe2631378ccd 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -5,7 +5,7 @@
#include <linux/cleanup.h>
#include <linux/err.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -35,8 +35,8 @@ struct s5m8767_info {
u8 buck2_vol[8];
u8 buck3_vol[8];
u8 buck4_vol[8];
- int buck_gpios[3];
- int buck_ds[3];
+ struct gpio_desc *buck_gpios[3];
+ struct gpio_desc *buck_ds[3];
int buck_gpioindex;
};
@@ -272,9 +272,9 @@ static inline int s5m8767_set_high(struct s5m8767_info *s5m8767)
{
int temp_index = s5m8767->buck_gpioindex;
- gpio_set_value(s5m8767->buck_gpios[0], (temp_index >> 2) & 0x1);
- gpio_set_value(s5m8767->buck_gpios[1], (temp_index >> 1) & 0x1);
- gpio_set_value(s5m8767->buck_gpios[2], temp_index & 0x1);
+ gpiod_set_value(s5m8767->buck_gpios[0], !!(temp_index & BIT(2)));
+ gpiod_set_value(s5m8767->buck_gpios[1], !!(temp_index & BIT(1)));
+ gpiod_set_value(s5m8767->buck_gpios[2], !!(temp_index & BIT(0)));
return 0;
}
@@ -283,9 +283,9 @@ static inline int s5m8767_set_low(struct s5m8767_info *s5m8767)
{
int temp_index = s5m8767->buck_gpioindex;
- gpio_set_value(s5m8767->buck_gpios[2], temp_index & 0x1);
- gpio_set_value(s5m8767->buck_gpios[1], (temp_index >> 1) & 0x1);
- gpio_set_value(s5m8767->buck_gpios[0], (temp_index >> 2) & 0x1);
+ gpiod_set_value(s5m8767->buck_gpios[2], !!(temp_index & BIT(0)));
+ gpiod_set_value(s5m8767->buck_gpios[1], !!(temp_index & BIT(1)));
+ gpiod_set_value(s5m8767->buck_gpios[0], !!(temp_index & BIT(2)));
return 0;
}
@@ -482,42 +482,6 @@ static int s5m8767_enable_ext_control(struct s5m8767_info *s5m8767,
#ifdef CONFIG_OF
-static int s5m8767_pmic_dt_parse_dvs_gpio(struct sec_pmic_dev *iodev,
- struct sec_platform_data *pdata,
- struct device_node *pmic_np)
-{
- int i, gpio;
-
- for (i = 0; i < 3; i++) {
- gpio = of_get_named_gpio(pmic_np,
- "s5m8767,pmic-buck-dvs-gpios", i);
- if (!gpio_is_valid(gpio)) {
- dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio);
- return -EINVAL;
- }
- pdata->buck_gpios[i] = gpio;
- }
- return 0;
-}
-
-static int s5m8767_pmic_dt_parse_ds_gpio(struct sec_pmic_dev *iodev,
- struct sec_platform_data *pdata,
- struct device_node *pmic_np)
-{
- int i, gpio;
-
- for (i = 0; i < 3; i++) {
- gpio = of_get_named_gpio(pmic_np,
- "s5m8767,pmic-buck-ds-gpios", i);
- if (!gpio_is_valid(gpio)) {
- dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio);
- return -EINVAL;
- }
- pdata->buck_ds[i] = gpio;
- }
- return 0;
-}
-
static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
struct sec_platform_data *pdata)
{
@@ -525,7 +489,7 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
struct device_node *pmic_np, *reg_np;
struct sec_regulator_data *rdata;
struct sec_opmode_data *rmode;
- unsigned int i, dvs_voltage_nr = 8, ret;
+ unsigned int i, dvs_voltage_nr = 8;
pmic_np = iodev->dev->of_node;
if (!pmic_np) {
@@ -635,10 +599,6 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
pdata->buck4_gpiodvs) {
- ret = s5m8767_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np);
- if (ret)
- return -EINVAL;
-
if (of_property_read_u32(pmic_np,
"s5m8767,pmic-buck-default-dvs-idx",
&pdata->buck_default_idx)) {
@@ -652,10 +612,6 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
}
}
- ret = s5m8767_pmic_dt_parse_ds_gpio(iodev, pdata, pmic_np);
- if (ret)
- return -EINVAL;
-
pdata->buck2_ramp_enable = of_property_read_bool(pmic_np, "s5m8767,pmic-buck2-ramp-enable");
pdata->buck3_ramp_enable = of_property_read_bool(pmic_np, "s5m8767,pmic-buck3-ramp-enable");
pdata->buck4_ramp_enable = of_property_read_bool(pmic_np, "s5m8767,pmic-buck4-ramp-enable");
@@ -684,6 +640,8 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
struct regulator_config config = { };
struct s5m8767_info *s5m8767;
int i, ret, buck_init;
+ const char *gpiods_names[3] = { "S5M8767 DS2", "S5M8767 DS3", "S5M8767 DS4" };
+ const char *gpiodvs_names[3] = { "S5M8767 SET1", "S5M8767 SET2", "S5M8767 SET3" };
if (!pdata) {
dev_err(pdev->dev.parent, "Platform data not supplied\n");
@@ -731,12 +689,6 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
s5m8767->buck2_gpiodvs = pdata->buck2_gpiodvs;
s5m8767->buck3_gpiodvs = pdata->buck3_gpiodvs;
s5m8767->buck4_gpiodvs = pdata->buck4_gpiodvs;
- s5m8767->buck_gpios[0] = pdata->buck_gpios[0];
- s5m8767->buck_gpios[1] = pdata->buck_gpios[1];
- s5m8767->buck_gpios[2] = pdata->buck_gpios[2];
- s5m8767->buck_ds[0] = pdata->buck_ds[0];
- s5m8767->buck_ds[1] = pdata->buck_ds[1];
- s5m8767->buck_ds[2] = pdata->buck_ds[2];
s5m8767->ramp_delay = pdata->buck_ramp_delay;
s5m8767->buck2_ramp = pdata->buck2_ramp_enable;
@@ -787,58 +739,36 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
pdata->buck4_gpiodvs) {
+ for (i = 0; i < 3; i++) {
+ enum gpiod_flags flags;
- if (!gpio_is_valid(pdata->buck_gpios[0]) ||
- !gpio_is_valid(pdata->buck_gpios[1]) ||
- !gpio_is_valid(pdata->buck_gpios[2])) {
- dev_err(&pdev->dev, "GPIO NOT VALID\n");
- return -EINVAL;
- }
-
- ret = devm_gpio_request(&pdev->dev, pdata->buck_gpios[0],
- "S5M8767 SET1");
- if (ret)
- return ret;
-
- ret = devm_gpio_request(&pdev->dev, pdata->buck_gpios[1],
- "S5M8767 SET2");
- if (ret)
- return ret;
-
- ret = devm_gpio_request(&pdev->dev, pdata->buck_gpios[2],
- "S5M8767 SET3");
- if (ret)
- return ret;
+ if (s5m8767->buck_gpioindex & BIT(2 - i))
+ flags = GPIOD_OUT_HIGH;
+ else
+ flags = GPIOD_OUT_LOW;
+
+ s5m8767->buck_gpios[i] = devm_gpiod_get_index(iodev->dev,
+ "s5m8767,pmic-buck-dvs", i,
+ flags);
+ if (IS_ERR(s5m8767->buck_gpios[i])) {
+ return dev_err_probe(iodev->dev, PTR_ERR(s5m8767->buck_gpios[i]),
+ "invalid gpio[%d]\n", i);
+ }
- /* SET1 GPIO */
- gpio_direction_output(pdata->buck_gpios[0],
- (s5m8767->buck_gpioindex >> 2) & 0x1);
- /* SET2 GPIO */
- gpio_direction_output(pdata->buck_gpios[1],
- (s5m8767->buck_gpioindex >> 1) & 0x1);
- /* SET3 GPIO */
- gpio_direction_output(pdata->buck_gpios[2],
- (s5m8767->buck_gpioindex >> 0) & 0x1);
+ gpiod_set_consumer_name(s5m8767->buck_gpios[i], gpiodvs_names[i]);
+ }
}
- ret = devm_gpio_request(&pdev->dev, pdata->buck_ds[0], "S5M8767 DS2");
- if (ret)
- return ret;
-
- ret = devm_gpio_request(&pdev->dev, pdata->buck_ds[1], "S5M8767 DS3");
- if (ret)
- return ret;
-
- ret = devm_gpio_request(&pdev->dev, pdata->buck_ds[2], "S5M8767 DS4");
- if (ret)
- return ret;
-
- /* DS2 GPIO */
- gpio_direction_output(pdata->buck_ds[0], 0x0);
- /* DS3 GPIO */
- gpio_direction_output(pdata->buck_ds[1], 0x0);
- /* DS4 GPIO */
- gpio_direction_output(pdata->buck_ds[2], 0x0);
+ for (i = 0; i < 3; i++) {
+ s5m8767->buck_ds[i] = devm_gpiod_get_index(iodev->dev,
+ "s5m8767,pmic-buck-ds", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(s5m8767->buck_ds[i])) {
+ return dev_err_probe(iodev->dev, PTR_ERR(s5m8767->buck_ds[i]),
+ "can't get GPIO %d\n", i);
+ }
+ gpiod_set_consumer_name(s5m8767->buck_ds[i], gpiods_names[i]);
+ }
regmap_update_bits(s5m8767->iodev->regmap_pmic,
S5M8767_REG_BUCK2CTRL, 1 << 1,
diff --git a/drivers/regulator/tps65219-regulator.c b/drivers/regulator/tps65219-regulator.c
index aa65077f9d41..b16b300d7f45 100644
--- a/drivers/regulator/tps65219-regulator.c
+++ b/drivers/regulator/tps65219-regulator.c
@@ -1,10 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
//
-// tps65219-regulator.c
-//
-// Regulator driver for TPS65219 PMIC
+// TPS65214/TPS65215/TPS65219 PMIC Regulator Driver
//
// Copyright (C) 2022 BayLibre Incorporated - https://www.baylibre.com/
+// Copyright (C) 2024 Texas Instruments Incorporated - https://www.ti.com/
//
// This implementation derived from tps65218 authored by
// "J Keerthy <j-keerthy@ti.com>"
@@ -30,6 +29,11 @@ struct tps65219_regulator_irq_type {
unsigned long event;
};
+static struct tps65219_regulator_irq_type tps65215_regulator_irq_types[] = {
+ { "SENSOR_3_WARM", "SENSOR3", "warm temperature", REGULATOR_EVENT_OVER_TEMP_WARN},
+ { "SENSOR_3_HOT", "SENSOR3", "hot temperature", REGULATOR_EVENT_OVER_TEMP},
+};
+
static struct tps65219_regulator_irq_type tps65219_regulator_irq_types[] = {
{ "LDO3_SCG", "LDO3", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
{ "LDO3_OC", "LDO3", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
@@ -37,6 +41,16 @@ static struct tps65219_regulator_irq_type tps65219_regulator_irq_types[] = {
{ "LDO4_SCG", "LDO4", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
{ "LDO4_OC", "LDO4", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
{ "LDO4_UV", "LDO4", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
+ { "LDO3_RV", "LDO3", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO4_RV", "LDO4", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO3_RV_SD", "LDO3", "residual voltage on shutdown", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO4_RV_SD", "LDO4", "residual voltage on shutdown", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "SENSOR_3_WARM", "SENSOR3", "warm temperature", REGULATOR_EVENT_OVER_TEMP_WARN},
+ { "SENSOR_3_HOT", "SENSOR3", "hot temperature", REGULATOR_EVENT_OVER_TEMP},
+};
+
+/* All of TPS65214's irq types are the same as common_regulator_irq_types */
+static struct tps65219_regulator_irq_type common_regulator_irq_types[] = {
{ "LDO1_SCG", "LDO1", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
{ "LDO1_OC", "LDO1", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
{ "LDO1_UV", "LDO1", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
@@ -60,8 +74,6 @@ static struct tps65219_regulator_irq_type tps65219_regulator_irq_types[] = {
{ "BUCK3_RV", "BUCK3", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ "LDO1_RV", "LDO1", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ "LDO2_RV", "LDO2", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
- { "LDO3_RV", "LDO3", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
- { "LDO4_RV", "LDO4", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ "BUCK1_RV_SD", "BUCK1", "residual voltage on shutdown",
REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ "BUCK2_RV_SD", "BUCK2", "residual voltage on shutdown",
@@ -70,13 +82,9 @@ static struct tps65219_regulator_irq_type tps65219_regulator_irq_types[] = {
REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ "LDO1_RV_SD", "LDO1", "residual voltage on shutdown", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
{ "LDO2_RV_SD", "LDO2", "residual voltage on shutdown", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
- { "LDO3_RV_SD", "LDO3", "residual voltage on shutdown", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
- { "LDO4_RV_SD", "LDO4", "residual voltage on shutdown", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
- { "SENSOR_3_WARM", "SENSOR3", "warm temperature", REGULATOR_EVENT_OVER_TEMP_WARN},
{ "SENSOR_2_WARM", "SENSOR2", "warm temperature", REGULATOR_EVENT_OVER_TEMP_WARN },
{ "SENSOR_1_WARM", "SENSOR1", "warm temperature", REGULATOR_EVENT_OVER_TEMP_WARN },
{ "SENSOR_0_WARM", "SENSOR0", "warm temperature", REGULATOR_EVENT_OVER_TEMP_WARN },
- { "SENSOR_3_HOT", "SENSOR3", "hot temperature", REGULATOR_EVENT_OVER_TEMP},
{ "SENSOR_2_HOT", "SENSOR2", "hot temperature", REGULATOR_EVENT_OVER_TEMP },
{ "SENSOR_1_HOT", "SENSOR1", "hot temperature", REGULATOR_EVENT_OVER_TEMP },
{ "SENSOR_0_HOT", "SENSOR0", "hot temperature", REGULATOR_EVENT_OVER_TEMP },
@@ -125,12 +133,28 @@ static const struct linear_range bucks_ranges[] = {
REGULATOR_LINEAR_RANGE(3400000, 0x34, 0x3f, 0),
};
-static const struct linear_range ldos_1_2_ranges[] = {
+static const struct linear_range ldo_1_range[] = {
REGULATOR_LINEAR_RANGE(600000, 0x0, 0x37, 50000),
REGULATOR_LINEAR_RANGE(3400000, 0x38, 0x3f, 0),
};
-static const struct linear_range ldos_3_4_ranges[] = {
+static const struct linear_range tps65214_ldo_1_2_range[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x0, 0x2, 0),
+ REGULATOR_LINEAR_RANGE(650000, 0x3, 0x37, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x38, 0x3F, 0),
+};
+
+static const struct linear_range tps65215_ldo_2_range[] = {
+ REGULATOR_LINEAR_RANGE(1200000, 0x0, 0xC, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x36, 0x3F, 0),
+};
+
+static const struct linear_range tps65219_ldo_2_range[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x0, 0x37, 50000),
+ REGULATOR_LINEAR_RANGE(3400000, 0x38, 0x3f, 0),
+};
+
+static const struct linear_range tps65219_ldos_3_4_range[] = {
REGULATOR_LINEAR_RANGE(1200000, 0x0, 0xC, 0),
REGULATOR_LINEAR_RANGE(1250000, 0xD, 0x35, 50000),
REGULATOR_LINEAR_RANGE(3300000, 0x36, 0x3F, 0),
@@ -174,7 +198,7 @@ static unsigned int tps65219_get_mode(struct regulator_dev *dev)
}
/* Operations permitted on BUCK1/2/3 */
-static const struct regulator_ops tps65219_bucks_ops = {
+static const struct regulator_ops bucks_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -189,7 +213,7 @@ static const struct regulator_ops tps65219_bucks_ops = {
};
/* Operations permitted on LDO1/2 */
-static const struct regulator_ops tps65219_ldos_1_2_ops = {
+static const struct regulator_ops ldos_1_2_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -204,7 +228,7 @@ static const struct regulator_ops tps65219_ldos_1_2_ops = {
};
/* Operations permitted on LDO3/4 */
-static const struct regulator_ops tps65219_ldos_3_4_ops = {
+static const struct regulator_ops ldos_3_4_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -216,55 +240,98 @@ static const struct regulator_ops tps65219_ldos_3_4_ops = {
.map_voltage = regulator_map_voltage_linear_range,
};
-static const struct regulator_desc regulators[] = {
+static const struct regulator_desc common_regs[] = {
TPS65219_REGULATOR("BUCK1", "buck1", TPS65219_BUCK_1,
- REGULATOR_VOLTAGE, tps65219_bucks_ops, 64,
+ REGULATOR_VOLTAGE, bucks_ops, 64,
TPS65219_REG_BUCK1_VOUT,
TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
TPS65219_REG_ENABLE_CTRL,
TPS65219_ENABLE_BUCK1_EN_MASK, 0, 0, bucks_ranges,
3, 4000, 0, NULL, 0, 0),
TPS65219_REGULATOR("BUCK2", "buck2", TPS65219_BUCK_2,
- REGULATOR_VOLTAGE, tps65219_bucks_ops, 64,
+ REGULATOR_VOLTAGE, bucks_ops, 64,
TPS65219_REG_BUCK2_VOUT,
TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
TPS65219_REG_ENABLE_CTRL,
TPS65219_ENABLE_BUCK2_EN_MASK, 0, 0, bucks_ranges,
3, 4000, 0, NULL, 0, 0),
TPS65219_REGULATOR("BUCK3", "buck3", TPS65219_BUCK_3,
- REGULATOR_VOLTAGE, tps65219_bucks_ops, 64,
+ REGULATOR_VOLTAGE, bucks_ops, 64,
TPS65219_REG_BUCK3_VOUT,
TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
TPS65219_REG_ENABLE_CTRL,
TPS65219_ENABLE_BUCK3_EN_MASK, 0, 0, bucks_ranges,
3, 0, 0, NULL, 0, 0),
+};
+
+static const struct regulator_desc tps65214_regs[] = {
+ // TPS65214's LDO3 pin maps to TPS65219's LDO3 pin
+ TPS65219_REGULATOR("LDO1", "ldo1", TPS65214_LDO_1,
+ REGULATOR_VOLTAGE, ldos_3_4_ops, 64,
+ TPS65214_REG_LDO1_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_LDO3_EN_MASK, 0, 0, tps65214_ldo_1_2_range,
+ 3, 0, 0, NULL, 0, 0),
+ TPS65219_REGULATOR("LDO2", "ldo2", TPS65214_LDO_2,
+ REGULATOR_VOLTAGE, ldos_3_4_ops, 64,
+ TPS65214_REG_LDO2_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_LDO2_EN_MASK, 0, 0, tps65214_ldo_1_2_range,
+ 3, 0, 0, NULL, 0, 0),
+};
+
+static const struct regulator_desc tps65215_regs[] = {
+ /*
+ * TPS65215's LDO1 is the same as TPS65219's LDO1. LDO1 is
+ * configurable as load switch and bypass-mode.
+ * TPS65215's LDO2 is the same as TPS65219's LDO3
+ */
TPS65219_REGULATOR("LDO1", "ldo1", TPS65219_LDO_1,
- REGULATOR_VOLTAGE, tps65219_ldos_1_2_ops, 64,
+ REGULATOR_VOLTAGE, ldos_1_2_ops, 64,
TPS65219_REG_LDO1_VOUT,
TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
TPS65219_REG_ENABLE_CTRL,
- TPS65219_ENABLE_LDO1_EN_MASK, 0, 0, ldos_1_2_ranges,
+ TPS65219_ENABLE_LDO1_EN_MASK, 0, 0, ldo_1_range,
+ 2, 0, 0, NULL, 0, TPS65219_LDOS_BYP_CONFIG_MASK),
+ TPS65219_REGULATOR("LDO2", "ldo2", TPS65215_LDO_2,
+ REGULATOR_VOLTAGE, ldos_3_4_ops, 64,
+ TPS65215_REG_LDO2_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65215_ENABLE_LDO2_EN_MASK, 0, 0, tps65215_ldo_2_range,
+ 2, 0, 0, NULL, 0, 0),
+};
+
+static const struct regulator_desc tps65219_regs[] = {
+ TPS65219_REGULATOR("LDO1", "ldo1", TPS65219_LDO_1,
+ REGULATOR_VOLTAGE, ldos_1_2_ops, 64,
+ TPS65219_REG_LDO1_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_LDO1_EN_MASK, 0, 0, ldo_1_range,
2, 0, 0, NULL, 0, TPS65219_LDOS_BYP_CONFIG_MASK),
TPS65219_REGULATOR("LDO2", "ldo2", TPS65219_LDO_2,
- REGULATOR_VOLTAGE, tps65219_ldos_1_2_ops, 64,
+ REGULATOR_VOLTAGE, ldos_1_2_ops, 64,
TPS65219_REG_LDO2_VOUT,
TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
TPS65219_REG_ENABLE_CTRL,
- TPS65219_ENABLE_LDO2_EN_MASK, 0, 0, ldos_1_2_ranges,
+ TPS65219_ENABLE_LDO2_EN_MASK, 0, 0, tps65219_ldo_2_range,
2, 0, 0, NULL, 0, TPS65219_LDOS_BYP_CONFIG_MASK),
TPS65219_REGULATOR("LDO3", "ldo3", TPS65219_LDO_3,
- REGULATOR_VOLTAGE, tps65219_ldos_3_4_ops, 64,
+ REGULATOR_VOLTAGE, ldos_3_4_ops, 64,
TPS65219_REG_LDO3_VOUT,
TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
TPS65219_REG_ENABLE_CTRL,
- TPS65219_ENABLE_LDO3_EN_MASK, 0, 0, ldos_3_4_ranges,
+ TPS65219_ENABLE_LDO3_EN_MASK, 0, 0, tps65219_ldos_3_4_range,
3, 0, 0, NULL, 0, 0),
TPS65219_REGULATOR("LDO4", "ldo4", TPS65219_LDO_4,
- REGULATOR_VOLTAGE, tps65219_ldos_3_4_ops, 64,
+ REGULATOR_VOLTAGE, ldos_3_4_ops, 64,
TPS65219_REG_LDO4_VOUT,
TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
TPS65219_REG_ENABLE_CTRL,
- TPS65219_ENABLE_LDO4_EN_MASK, 0, 0, ldos_3_4_ranges,
+ TPS65219_ENABLE_LDO4_EN_MASK, 0, 0, tps65219_ldos_3_4_range,
3, 0, 0, NULL, 0, 0),
};
@@ -287,64 +354,141 @@ static irqreturn_t tps65219_regulator_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
+struct tps65219_chip_data {
+ size_t rdesc_size;
+ size_t common_rdesc_size;
+ size_t dev_irq_size;
+ size_t common_irq_size;
+ const struct regulator_desc *rdesc;
+ const struct regulator_desc *common_rdesc;
+ struct tps65219_regulator_irq_type *irq_types;
+ struct tps65219_regulator_irq_type *common_irq_types;
+};
+
+static struct tps65219_chip_data chip_info_table[] = {
+ [TPS65214] = {
+ .rdesc = tps65214_regs,
+ .rdesc_size = ARRAY_SIZE(tps65214_regs),
+ .common_rdesc = common_regs,
+ .common_rdesc_size = ARRAY_SIZE(common_regs),
+ .irq_types = NULL,
+ .dev_irq_size = 0,
+ .common_irq_types = common_regulator_irq_types,
+ .common_irq_size = ARRAY_SIZE(common_regulator_irq_types),
+ },
+ [TPS65215] = {
+ .rdesc = tps65215_regs,
+ .rdesc_size = ARRAY_SIZE(tps65215_regs),
+ .common_rdesc = common_regs,
+ .common_rdesc_size = ARRAY_SIZE(common_regs),
+ .irq_types = tps65215_regulator_irq_types,
+ .dev_irq_size = ARRAY_SIZE(tps65215_regulator_irq_types),
+ .common_irq_types = common_regulator_irq_types,
+ .common_irq_size = ARRAY_SIZE(common_regulator_irq_types),
+ },
+ [TPS65219] = {
+ .rdesc = tps65219_regs,
+ .rdesc_size = ARRAY_SIZE(tps65219_regs),
+ .common_rdesc = common_regs,
+ .common_rdesc_size = ARRAY_SIZE(common_regs),
+ .irq_types = tps65219_regulator_irq_types,
+ .dev_irq_size = ARRAY_SIZE(tps65219_regulator_irq_types),
+ .common_irq_types = common_regulator_irq_types,
+ .common_irq_size = ARRAY_SIZE(common_regulator_irq_types),
+ },
+};
+
static int tps65219_regulator_probe(struct platform_device *pdev)
{
- struct tps65219 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct tps65219_regulator_irq_data *irq_data;
+ struct tps65219_regulator_irq_type *irq_type;
+ struct tps65219_chip_data *pmic;
struct regulator_dev *rdev;
- struct regulator_config config = { };
- int i;
int error;
int irq;
- struct tps65219_regulator_irq_data *irq_data;
- struct tps65219_regulator_irq_type *irq_type;
+ int i;
+
+ struct tps65219 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = { };
+ enum pmic_id chip = platform_get_device_id(pdev)->driver_data;
+
+ pmic = &chip_info_table[chip];
config.dev = tps->dev;
config.driver_data = tps;
config.regmap = tps->regmap;
- for (i = 0; i < ARRAY_SIZE(regulators); i++) {
- rdev = devm_regulator_register(&pdev->dev, &regulators[i],
+ for (i = 0; i < pmic->common_rdesc_size; i++) {
+ rdev = devm_regulator_register(&pdev->dev, &pmic->common_rdesc[i],
+ &config);
+ if (IS_ERR(rdev))
+ return dev_err_probe(tps->dev, PTR_ERR(rdev),
+ "Failed to register %s regulator\n",
+ pmic->common_rdesc[i].name);
+ }
+
+ for (i = 0; i < pmic->rdesc_size; i++) {
+ rdev = devm_regulator_register(&pdev->dev, &pmic->rdesc[i],
&config);
if (IS_ERR(rdev))
return dev_err_probe(tps->dev, PTR_ERR(rdev),
- "Failed to register %s regulator\n",
- regulators[i].name);
+ "Failed to register %s regulator\n",
+ pmic->rdesc[i].name);
}
- irq_data = devm_kmalloc(tps->dev,
- ARRAY_SIZE(tps65219_regulator_irq_types) *
- sizeof(struct tps65219_regulator_irq_data),
- GFP_KERNEL);
+ irq_data = devm_kmalloc(tps->dev, pmic->common_irq_size, GFP_KERNEL);
if (!irq_data)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(tps65219_regulator_irq_types); ++i) {
- irq_type = &tps65219_regulator_irq_types[i];
-
+ for (i = 0; i < pmic->common_irq_size; ++i) {
+ irq_type = &pmic->common_irq_types[i];
irq = platform_get_irq_byname(pdev, irq_type->irq_name);
if (irq < 0)
return -EINVAL;
irq_data[i].dev = tps->dev;
irq_data[i].type = irq_type;
+ error = devm_request_threaded_irq(tps->dev, irq, NULL,
+ tps65219_regulator_irq_handler,
+ IRQF_ONESHOT,
+ irq_type->irq_name,
+ &irq_data[i]);
+ if (error)
+ return dev_err_probe(tps->dev, PTR_ERR(rdev),
+ "Failed to request %s IRQ %d: %d\n",
+ irq_type->irq_name, irq, error);
+ }
+
+ irq_data = devm_kmalloc(tps->dev, pmic->dev_irq_size, GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+ for (i = 0; i < pmic->dev_irq_size; ++i) {
+ irq_type = &pmic->irq_types[i];
+ irq = platform_get_irq_byname(pdev, irq_type->irq_name);
+ if (irq < 0)
+ return -EINVAL;
+
+ irq_data[i].dev = tps->dev;
+ irq_data[i].type = irq_type;
error = devm_request_threaded_irq(tps->dev, irq, NULL,
tps65219_regulator_irq_handler,
IRQF_ONESHOT,
irq_type->irq_name,
&irq_data[i]);
- if (error) {
- dev_err(tps->dev, "failed to request %s IRQ %d: %d\n",
- irq_type->irq_name, irq, error);
- return error;
- }
+ if (error)
+ return dev_err_probe(tps->dev, PTR_ERR(rdev),
+ "Failed to request %s IRQ %d: %d\n",
+ irq_type->irq_name, irq, error);
}
return 0;
}
static const struct platform_device_id tps65219_regulator_id_table[] = {
- { "tps65219-regulator", },
+ { "tps65214-regulator", TPS65214 },
+ { "tps65215-regulator", TPS65215 },
+ { "tps65219-regulator", TPS65219 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, tps65219_regulator_id_table);
@@ -361,5 +505,5 @@ static struct platform_driver tps65219_regulator_driver = {
module_platform_driver(tps65219_regulator_driver);
MODULE_AUTHOR("Jerome Neanne <j-neanne@baylibre.com>");
-MODULE_DESCRIPTION("TPS65219 voltage regulator driver");
+MODULE_DESCRIPTION("TPS65214/TPS65215/TPS65219 Regulator driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 5ff4e2fee4ab..1c7598b8475d 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -36,7 +36,7 @@ obj-$(CONFIG_RCAR_REMOTEPROC) += rcar_rproc.o
obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o
-obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o
-obj-$(CONFIG_TI_K3_M4_REMOTEPROC) += ti_k3_m4_remoteproc.o
-obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o
+obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o ti_k3_common.o
+obj-$(CONFIG_TI_K3_M4_REMOTEPROC) += ti_k3_m4_remoteproc.o ti_k3_common.o
+obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o ti_k3_common.o
obj-$(CONFIG_XLNX_R5_REMOTEPROC) += xlnx_r5_remoteproc.o
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index 90cb1fc13e71..5ee622bf5352 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -36,9 +36,18 @@ module_param_named(no_mailboxes, no_mailboxes, int, 0644);
MODULE_PARM_DESC(no_mailboxes,
"There is no mailbox between cores, so ignore remote proc reply after start, default is 0 (off).");
+/* Flag indicating that the remote is up and running */
#define REMOTE_IS_READY BIT(0)
+/* Flag indicating that the host should wait for a firmware-ready response */
+#define WAIT_FW_READY BIT(1)
#define REMOTE_READY_WAIT_MAX_RETRIES 500
+/*
+ * This flag is set in the DSP resource table's features field to indicate
+ * that the firmware requires the host NOT to wait for a FW_READY response.
+ */
+#define FEATURE_DONT_WAIT_FW_READY BIT(0)
+
/* att flags */
/* DSP own area */
#define ATT_OWN BIT(31)
@@ -73,6 +82,10 @@ MODULE_PARM_DESC(no_mailboxes,
#define IMX8ULP_SIP_HIFI_XRDC 0xc200000e
+#define FW_RSC_NXP_S_MAGIC ((uint32_t)'n' << 24 | \
+ (uint32_t)'x' << 16 | \
+ (uint32_t)'p' << 8 | \
+ (uint32_t)'s')
/*
* enum - Predefined Mailbox Messages
*
@@ -139,6 +152,24 @@ struct imx_dsp_rproc_dcfg {
int (*reset)(struct imx_dsp_rproc *priv);
};
+/**
+ * struct fw_rsc_imx_dsp - i.MX DSP specific info
+ *
+ * @len: length of the resource entry
+ * @magic_num: 32-bit magic number
+ * @version: version of data structure
+ * @features: feature flags supported by the i.MX DSP firmware
+ *
+ * This represents a DSP-specific resource in the firmware's
+ * resource table, providing information on supported features.
+ */
+struct fw_rsc_imx_dsp {
+ uint32_t len;
+ uint32_t magic_num;
+ uint32_t version;
+ uint32_t features;
+} __packed;
+
static const struct imx_rproc_att imx_dsp_rproc_att_imx8qm[] = {
/* dev addr , sys addr , size , flags */
{ 0x596e8000, 0x556e8000, 0x00008000, ATT_OWN },
@@ -297,6 +328,66 @@ static int imx_dsp_rproc_ready(struct rproc *rproc)
return -ETIMEDOUT;
}
+/**
+ * imx_dsp_rproc_handle_rsc() - Handle DSP-specific resource table entries
+ * @rproc: remote processor instance
+ * @rsc_type: resource type identifier
+ * @rsc: pointer to the resource entry
+ * @offset: offset of the resource entry
+ * @avail: available space in the resource table
+ *
+ * Parse the DSP-specific resource entry and update flags accordingly.
+ * If the WAIT_FW_READY feature is set, the host must wait for the firmware
+ * to signal readiness before proceeding with execution.
+ *
+ * Return: RSC_HANDLED if processed successfully, RSC_IGNORED otherwise.
+ */
+static int imx_dsp_rproc_handle_rsc(struct rproc *rproc, u32 rsc_type,
+ void *rsc, int offset, int avail)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ struct fw_rsc_imx_dsp *imx_dsp_rsc = rsc;
+ struct device *dev = rproc->dev.parent;
+
+ if (!imx_dsp_rsc) {
+ dev_dbg(dev, "Invalid fw_rsc_imx_dsp.\n");
+ return RSC_IGNORED;
+ }
+
+ /* Make sure resource isn't truncated */
+ if (sizeof(struct fw_rsc_imx_dsp) > avail ||
+ sizeof(struct fw_rsc_imx_dsp) != imx_dsp_rsc->len) {
+ dev_dbg(dev, "Resource fw_rsc_imx_dsp is truncated.\n");
+ return RSC_IGNORED;
+ }
+
+ /*
+ * If FW_RSC_NXP_S_MAGIC number is not found then
+ * wait for fw_ready reply (default work flow)
+ */
+ if (imx_dsp_rsc->magic_num != FW_RSC_NXP_S_MAGIC) {
+ dev_dbg(dev, "Invalid resource table magic number.\n");
+ return RSC_IGNORED;
+ }
+
+ /*
+ * For now, in struct fw_rsc_imx_dsp, version 0,
+ * only FEATURE_DONT_WAIT_FW_READY is valid.
+ *
+ * When adding new features, please upgrade version.
+ */
+ if (imx_dsp_rsc->version > 0) {
+ dev_warn(dev, "Unexpected fw_rsc_imx_dsp version %d.\n",
+ imx_dsp_rsc->version);
+ return RSC_IGNORED;
+ }
+
+ if (imx_dsp_rsc->features & FEATURE_DONT_WAIT_FW_READY)
+ priv->flags &= ~WAIT_FW_READY;
+
+ return RSC_HANDLED;
+}
+
/*
* Start function for rproc_ops
*
@@ -335,8 +426,8 @@ static int imx_dsp_rproc_start(struct rproc *rproc)
if (ret)
dev_err(dev, "Failed to enable remote core!\n");
- else
- ret = imx_dsp_rproc_ready(rproc);
+ else if (priv->flags & WAIT_FW_READY)
+ return imx_dsp_rproc_ready(rproc);
return ret;
}
@@ -939,6 +1030,7 @@ static const struct rproc_ops imx_dsp_rproc_ops = {
.kick = imx_dsp_rproc_kick,
.load = imx_dsp_rproc_elf_load_segments,
.parse_fw = imx_dsp_rproc_parse_fw,
+ .handle_rsc = imx_dsp_rproc_handle_rsc,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
@@ -1058,6 +1150,8 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
priv = rproc->priv;
priv->rproc = rproc;
priv->dsp_dcfg = dsp_dcfg;
+ /* By default, host waits for fw_ready reply */
+ priv->flags |= WAIT_FW_READY;
if (no_mailboxes)
imx_dsp_rproc_mbox_init = imx_dsp_rproc_mbox_no_alloc;
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 775b056d795a..2c7e519a2254 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -456,7 +456,8 @@ static int wcnss_init_regulators(struct qcom_wcnss *wcnss,
if (wcnss->num_pds) {
info += wcnss->num_pds;
/* Handle single power domain case */
- num_vregs += num_pd_vregs - wcnss->num_pds;
+ if (wcnss->num_pds < num_pd_vregs)
+ num_vregs += num_pd_vregs - wcnss->num_pds;
} else {
num_vregs += num_pd_vregs;
}
diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
index b989718776bd..2b52b403eb3f 100644
--- a/drivers/remoteproc/qcom_wcnss_iris.c
+++ b/drivers/remoteproc/qcom_wcnss_iris.c
@@ -196,6 +196,7 @@ struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo)
err_device_del:
device_del(&iris->dev);
+ put_device(&iris->dev);
return ERR_PTR(ret);
}
@@ -203,4 +204,5 @@ err_device_del:
void qcom_iris_remove(struct qcom_iris *iris)
{
device_del(&iris->dev);
+ put_device(&iris->dev);
}
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index b21eedefff87..81b2ccf988e8 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -1617,7 +1617,7 @@ static int rproc_attach(struct rproc *rproc)
ret = rproc_set_rsc_table(rproc);
if (ret) {
dev_err(dev, "can't load resource table: %d\n", ret);
- goto unprepare_device;
+ goto clean_up_resources;
}
/* reset max_notifyid */
@@ -1634,7 +1634,7 @@ static int rproc_attach(struct rproc *rproc)
ret = rproc_handle_resources(rproc, rproc_loading_handlers);
if (ret) {
dev_err(dev, "Failed to process resources: %d\n", ret);
- goto unprepare_device;
+ goto clean_up_resources;
}
/* Allocate carveout resources associated to rproc */
@@ -1653,9 +1653,9 @@ static int rproc_attach(struct rproc *rproc)
clean_up_resources:
rproc_resource_cleanup(rproc);
-unprepare_device:
/* release HW resources if needed */
rproc_unprepare_device(rproc);
+ kfree(rproc->clean_table);
disable_iommu:
rproc_disable_iommu(rproc);
return ret;
@@ -2025,7 +2025,6 @@ int rproc_shutdown(struct rproc *rproc)
kfree(rproc->cached_table);
rproc->cached_table = NULL;
rproc->table_ptr = NULL;
- rproc->table_sz = 0;
out:
mutex_unlock(&rproc->lock);
return ret;
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index b02b36a3f515..431648607d53 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -835,6 +835,7 @@ static int stm32_rproc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct stm32_rproc *ddata;
struct device_node *np = dev->of_node;
+ const char *fw_name;
struct rproc *rproc;
unsigned int state;
int ret;
@@ -843,7 +844,12 @@ static int stm32_rproc_probe(struct platform_device *pdev)
if (ret)
return ret;
- rproc = devm_rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
+ /* Look for an optional firmware name */
+ ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
+ rproc = devm_rproc_alloc(dev, np->name, &st_rproc_ops, fw_name, sizeof(*ddata));
if (!rproc)
return -ENOMEM;
diff --git a/drivers/remoteproc/ti_k3_common.c b/drivers/remoteproc/ti_k3_common.c
new file mode 100644
index 000000000000..d5dccc81d460
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_common.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI K3 Remote Processor(s) driver common code
+ *
+ * Refactored out of ti_k3_r5_remoteproc.c, ti_k3_dsp_remoteproc.c and
+ * ti_k3_m4_remoteproc.c.
+ *
+ * ti_k3_r5_remoteproc.c:
+ * Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ *
+ * ti_k3_dsp_remoteproc.c:
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ *
+ * ti_k3_m4_remoteproc.c:
+ * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/
+ * Hari Nagalla <hnagalla@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/omap-mailbox.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+#include "ti_sci_proc.h"
+#include "ti_k3_common.h"
+
+/**
+ * k3_rproc_mbox_callback() - inbound mailbox message handler
+ * @client: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by the K3 mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicate different events. Those values are deliberately very
+ * large so they don't coincide with virtqueue indices.
+ */
+void k3_rproc_mbox_callback(struct mbox_client *client, void *data)
+{
+ struct k3_rproc *kproc = container_of(client, struct k3_rproc, client);
+ struct device *dev = kproc->rproc->dev.parent;
+ struct rproc *rproc = kproc->rproc;
+ u32 msg = (u32)(uintptr_t)(data);
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ /*
+ * remoteproc detected an exception, but error recovery is not
+ * supported. So, just log this for now
+ */
+ dev_err(dev, "K3 rproc %s crashed\n", rproc->name);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ dev_info(dev, "received echo reply from %s\n", rproc->name);
+ break;
+ default:
+ /* silently handle all other valid messages */
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
+ /* msg contains the index of the triggered vring */
+ if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+ }
+}
+EXPORT_SYMBOL_GPL(k3_rproc_mbox_callback);
+
+/*
+ * Kick the remote processor to notify about pending unprocessed messages.
+ * The vqid usage is not used and is inconsequential, as the kick is performed
+ * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
+ * the remote processor is expected to process both its Tx and Rx virtqueues.
+ */
+void k3_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ u32 msg = (u32)vqid;
+ int ret;
+
+ /*
+ * Send the index of the triggered virtqueue in the mailbox payload.
+ * NOTE: msg is cast to uintptr_t to prevent compiler warnings when
+ * void* is 64bit. It is safely cast back to u32 in the mailbox driver.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)(uintptr_t)msg);
+ if (ret < 0)
+ dev_err(dev, "failed to send mailbox message, status = %d\n",
+ ret);
+}
+EXPORT_SYMBOL_GPL(k3_rproc_kick);
+
+/* Put the remote processor into reset */
+int k3_rproc_reset(struct k3_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ if (kproc->data->uses_lreset) {
+ ret = reset_control_assert(kproc->reset);
+ if (ret)
+ dev_err(dev, "local-reset assert failed (%pe)\n", ERR_PTR(ret));
+ } else {
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret)
+ dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_reset);
+
+/* Release the remote processor from reset */
+int k3_rproc_release(struct k3_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ if (kproc->data->uses_lreset) {
+ ret = reset_control_deassert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset deassert failed, (%pe)\n", ERR_PTR(ret));
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
+ dev_warn(dev, "module-reset assert back failed\n");
+ }
+ } else {
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret)
+ dev_err(dev, "module-reset deassert failed (%pe)\n", ERR_PTR(ret));
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_release);
+
+int k3_rproc_request_mbox(struct rproc *rproc)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ struct mbox_client *client = &kproc->client;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = k3_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+ if (IS_ERR(kproc->mbox))
+ return dev_err_probe(dev, PTR_ERR(kproc->mbox),
+ "mbox_request_channel failed\n");
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed (%pe)\n", ERR_PTR(ret));
+ mbox_free_channel(kproc->mbox);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_request_mbox);
+
+/*
+ * The K3 DSP and M4 cores have a local reset that affects only the CPU, and a
+ * generic module reset that powers on the device and allows the internal
+ * memories to be accessed while the local reset is asserted. This function is
+ * used to release the global reset on remote cores to allow loading into the
+ * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
+ * firmware loading, and is followed by the .start() ops after loading to
+ * actually let the remote cores to run.
+ */
+int k3_rproc_prepare(struct rproc *rproc)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /* If the core is running already no need to deassert the module reset */
+ if (rproc->state == RPROC_DETACHED)
+ return 0;
+
+ /*
+ * Ensure the local reset is asserted so the core doesn't
+ * execute bogus code when the module reset is released.
+ */
+ if (kproc->data->uses_lreset) {
+ ret = k3_rproc_reset(kproc);
+ if (ret)
+ return ret;
+
+ ret = reset_control_status(kproc->reset);
+ if (ret <= 0) {
+ dev_err(dev, "local reset still not asserted\n");
+ return ret;
+ }
+ }
+
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "could not deassert module-reset for internal RAM loading\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_prepare);
+
+/*
+ * This function implements the .unprepare() ops and performs the complimentary
+ * operations to that of the .prepare() ops. The function is used to assert the
+ * global reset on applicable K3 DSP and M4 cores. This completes the second
+ * portion of powering down the remote core. The cores themselves are only
+ * halted in the .stop() callback through the local reset, and the .unprepare()
+ * ops is invoked by the remoteproc core after the remoteproc is stopped to
+ * balance the global reset.
+ */
+int k3_rproc_unprepare(struct rproc *rproc)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /* If the core is going to be detached do not assert the module reset */
+ if (rproc->state == RPROC_DETACHED)
+ return 0;
+
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "module-reset assert failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_unprepare);
+
+/*
+ * Power up the remote processor.
+ *
+ * This function will be invoked only after the firmware for this rproc
+ * was loaded, parsed successfully, and all of its resource requirements
+ * were met. This callback is invoked only in remoteproc mode.
+ */
+int k3_rproc_start(struct rproc *rproc)
+{
+ struct k3_rproc *kproc = rproc->priv;
+
+ return k3_rproc_release(kproc);
+}
+EXPORT_SYMBOL_GPL(k3_rproc_start);
+
+/*
+ * Stop the remote processor.
+ *
+ * This function puts the remote processor into reset, and finishes processing
+ * of any pending messages. This callback is invoked only in remoteproc mode.
+ */
+int k3_rproc_stop(struct rproc *rproc)
+{
+ struct k3_rproc *kproc = rproc->priv;
+
+ return k3_rproc_reset(kproc);
+}
+EXPORT_SYMBOL_GPL(k3_rproc_stop);
+
+/*
+ * Attach to a running remote processor (IPC-only mode)
+ *
+ * The rproc attach callback is a NOP. The remote processor is already booted,
+ * and all required resources have been acquired during probe routine, so there
+ * is no need to issue any TI-SCI commands to boot the remote cores in IPC-only
+ * mode. This callback is invoked only in IPC-only mode and exists because
+ * rproc_validate() checks for its existence.
+ */
+int k3_rproc_attach(struct rproc *rproc) { return 0; }
+EXPORT_SYMBOL_GPL(k3_rproc_attach);
+
+/*
+ * Detach from a running remote processor (IPC-only mode)
+ *
+ * The rproc detach callback is a NOP. The remote processor is not stopped and
+ * will be left in booted state in IPC-only mode. This callback is invoked only
+ * in IPC-only mode and exists for sanity sake
+ */
+int k3_rproc_detach(struct rproc *rproc) { return 0; }
+EXPORT_SYMBOL_GPL(k3_rproc_detach);
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for a booted remote processor in IPC-only
+ * mode. The remote processor firmwares follow a design-by-contract approach
+ * and are expected to have the resource table at the base of the DDR region
+ * reserved for firmware usage. This provides flexibility for the remote
+ * processor to be booted by different bootloaders that may or may not have the
+ * ability to publish the resource table address and size through a DT
+ * property.
+ */
+struct resource_table *k3_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->rmem[0].cpu_addr) {
+ dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * NOTE: The resource table size is currently hard-coded to a maximum
+ * of 256 bytes. The most common resource table usage for K3 firmwares
+ * is to only have the vdev resource entry and an optional trace entry.
+ * The exact size could be computed based on resource table address, but
+ * the hard-coded value suffices to support the IPC-only mode.
+ */
+ *rsc_table_sz = 256;
+ return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+EXPORT_SYMBOL_GPL(k3_get_loaded_rsc_table);
+
+/*
+ * Custom function to translate a remote processor device address (internal
+ * RAMs only) to a kernel virtual address. The remote processors can access
+ * their RAMs at either an internal address visible only from a remote
+ * processor, or at the SoC-level bus address. Both these addresses need to be
+ * looked through for translation. The translated addresses can be used either
+ * by the remoteproc core for loading (when using kernel remoteproc loader), or
+ * by any rpmsg bus drivers.
+ */
+void *k3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct k3_rproc *kproc = rproc->priv;
+ void __iomem *va = NULL;
+ phys_addr_t bus_addr;
+ u32 dev_addr, offset;
+ size_t size;
+ int i;
+
+ if (len == 0)
+ return NULL;
+
+ for (i = 0; i < kproc->num_mems; i++) {
+ bus_addr = kproc->mem[i].bus_addr;
+ dev_addr = kproc->mem[i].dev_addr;
+ size = kproc->mem[i].size;
+
+ /* handle rproc-view addresses */
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+
+ /* handle SoC-view addresses */
+ if (da >= bus_addr && (da + len) <= (bus_addr + size)) {
+ offset = da - bus_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ /* handle static DDR reserved memory regions */
+ for (i = 0; i < kproc->num_rmems; i++) {
+ dev_addr = kproc->rmem[i].dev_addr;
+ size = kproc->rmem[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->rmem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_da_to_va);
+
+int k3_rproc_of_get_memories(struct platform_device *pdev,
+ struct k3_rproc *kproc)
+{
+ const struct k3_rproc_dev_data *data = kproc->data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_mems = 0;
+ int i;
+
+ num_mems = data->num_mems;
+ kproc->mem = devm_kcalloc(kproc->dev, num_mems,
+ sizeof(*kproc->mem), GFP_KERNEL);
+ if (!kproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ data->mems[i].name);
+ if (!res) {
+ dev_err(dev, "found no memory resource for %s\n",
+ data->mems[i].name);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(dev, res->start,
+ resource_size(res),
+ dev_name(dev))) {
+ dev_err(dev, "could not request %s region for resource\n",
+ data->mems[i].name);
+ return -EBUSY;
+ }
+
+ kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+ resource_size(res));
+ if (!kproc->mem[i].cpu_addr) {
+ dev_err(dev, "failed to map %s memory\n",
+ data->mems[i].name);
+ return -ENOMEM;
+ }
+ kproc->mem[i].bus_addr = res->start;
+ kproc->mem[i].dev_addr = data->mems[i].dev_addr;
+ kproc->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ data->mems[i].name, &kproc->mem[i].bus_addr,
+ kproc->mem[i].size, kproc->mem[i].cpu_addr,
+ kproc->mem[i].dev_addr);
+ }
+ kproc->num_mems = num_mems;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_rproc_of_get_memories);
+
+void k3_mem_release(void *data)
+{
+ struct device *dev = data;
+
+ of_reserved_mem_device_release(dev);
+}
+EXPORT_SYMBOL_GPL(k3_mem_release);
+
+int k3_reserved_mem_init(struct k3_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *rmem_np;
+ struct reserved_mem *rmem;
+ int num_rmems;
+ int ret, i;
+
+ num_rmems = of_property_count_elems_of_size(np, "memory-region",
+ sizeof(phandle));
+ if (num_rmems < 0) {
+ dev_err(dev, "device does not reserved memory regions (%d)\n",
+ num_rmems);
+ return -EINVAL;
+ }
+ if (num_rmems < 2) {
+ dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+
+ /* use reserved memory region 0 for vring DMA allocations */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
+ if (ret) {
+ dev_err(dev, "device cannot initialize DMA pool (%d)\n", ret);
+ return ret;
+ }
+ ret = devm_add_action_or_reset(dev, k3_mem_release, dev);
+ if (ret)
+ return ret;
+
+ num_rmems--;
+ kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem)
+ return -ENOMEM;
+
+ /* use remaining reserved memory regions for static carveouts */
+ for (i = 0; i < num_rmems; i++) {
+ rmem_np = of_parse_phandle(np, "memory-region", i + 1);
+ if (!rmem_np)
+ return -EINVAL;
+
+ rmem = of_reserved_mem_lookup(rmem_np);
+ of_node_put(rmem_np);
+ if (!rmem)
+ return -EINVAL;
+
+ kproc->rmem[i].bus_addr = rmem->base;
+ /* 64-bit address regions currently not supported */
+ kproc->rmem[i].dev_addr = (u32)rmem->base;
+ kproc->rmem[i].size = rmem->size;
+ kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ if (!kproc->rmem[i].cpu_addr) {
+ dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
+ i + 1, &rmem->base, &rmem->size);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i + 1, &kproc->rmem[i].bus_addr,
+ kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
+ kproc->rmem[i].dev_addr);
+ }
+ kproc->num_rmems = num_rmems;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_reserved_mem_init);
+
+void k3_release_tsp(void *data)
+{
+ struct ti_sci_proc *tsp = data;
+
+ ti_sci_proc_release(tsp);
+}
+EXPORT_SYMBOL_GPL(k3_release_tsp);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI K3 common Remoteproc code");
diff --git a/drivers/remoteproc/ti_k3_common.h b/drivers/remoteproc/ti_k3_common.h
new file mode 100644
index 000000000000..aee3c28dbe51
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_common.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * TI K3 Remote Processor(s) driver common code
+ *
+ * Refactored out of ti_k3_r5_remoteproc.c, ti_k3_dsp_remoteproc.c and
+ * ti_k3_m4_remoteproc.c.
+ *
+ * ti_k3_r5_remoteproc.c:
+ * Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ *
+ * ti_k3_dsp_remoteproc.c:
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ *
+ * ti_k3_m4_remoteproc.c:
+ * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/
+ * Hari Nagalla <hnagalla@ti.com>
+ */
+
+#ifndef REMOTEPROC_TI_K3_COMMON_H
+#define REMOTEPROC_TI_K3_COMMON_H
+
+#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
+
+/**
+ * struct k3_rproc_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address of the memory region from remote processor view
+ * @size: Size of the memory region
+ */
+struct k3_rproc_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct k3_rproc_mem_data - memory definitions for a remote processor
+ * @name: name for this memory entry
+ * @dev_addr: device address for the memory entry
+ */
+struct k3_rproc_mem_data {
+ const char *name;
+ const u32 dev_addr;
+};
+
+/**
+ * struct k3_rproc_dev_data - device data structure for a remote processor
+ * @mems: pointer to memory definitions for a remote processor
+ * @num_mems: number of memory regions in @mems
+ * @boot_align_addr: boot vector address alignment granularity
+ * @uses_lreset: flag to denote the need for local reset management
+ */
+struct k3_rproc_dev_data {
+ const struct k3_rproc_mem_data *mems;
+ u32 num_mems;
+ u32 boot_align_addr;
+ bool uses_lreset;
+};
+
+/**
+ * struct k3_rproc - k3 remote processor driver structure
+ * @dev: cached device pointer
+ * @rproc: remoteproc device handle
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @rmem: reserved memory regions data
+ * @num_rmems: number of reserved memory regions
+ * @reset: reset control handle
+ * @data: pointer to DSP-specific device data
+ * @tsp: TI-SCI processor control handle
+ * @ti_sci: TI-SCI handle
+ * @ti_sci_id: TI-SCI device identifier
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
+ * @priv: void pointer to carry any private data
+ */
+struct k3_rproc {
+ struct device *dev;
+ struct rproc *rproc;
+ struct k3_rproc_mem *mem;
+ int num_mems;
+ struct k3_rproc_mem *rmem;
+ int num_rmems;
+ struct reset_control *reset;
+ const struct k3_rproc_dev_data *data;
+ struct ti_sci_proc *tsp;
+ const struct ti_sci_handle *ti_sci;
+ u32 ti_sci_id;
+ struct mbox_chan *mbox;
+ struct mbox_client client;
+ void *priv;
+};
+
+void k3_rproc_mbox_callback(struct mbox_client *client, void *data);
+void k3_rproc_kick(struct rproc *rproc, int vqid);
+int k3_rproc_reset(struct k3_rproc *kproc);
+int k3_rproc_release(struct k3_rproc *kproc);
+int k3_rproc_request_mbox(struct rproc *rproc);
+int k3_rproc_prepare(struct rproc *rproc);
+int k3_rproc_unprepare(struct rproc *rproc);
+int k3_rproc_start(struct rproc *rproc);
+int k3_rproc_stop(struct rproc *rproc);
+int k3_rproc_attach(struct rproc *rproc);
+int k3_rproc_detach(struct rproc *rproc);
+struct resource_table *k3_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz);
+void *k3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len,
+ bool *is_iomem);
+int k3_rproc_of_get_memories(struct platform_device *pdev,
+ struct k3_rproc *kproc);
+void k3_mem_release(void *data);
+int k3_reserved_mem_init(struct k3_rproc *kproc);
+void k3_release_tsp(void *data);
+#endif /* REMOTEPROC_TI_K3_COMMON_H */
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index a695890254ff..7a72933bd403 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -20,291 +20,7 @@
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
#include "ti_sci_proc.h"
-
-#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
-
-/**
- * struct k3_dsp_mem - internal memory structure
- * @cpu_addr: MPU virtual address of the memory region
- * @bus_addr: Bus address used to access the memory region
- * @dev_addr: Device address of the memory region from DSP view
- * @size: Size of the memory region
- */
-struct k3_dsp_mem {
- void __iomem *cpu_addr;
- phys_addr_t bus_addr;
- u32 dev_addr;
- size_t size;
-};
-
-/**
- * struct k3_dsp_mem_data - memory definitions for a DSP
- * @name: name for this memory entry
- * @dev_addr: device address for the memory entry
- */
-struct k3_dsp_mem_data {
- const char *name;
- const u32 dev_addr;
-};
-
-/**
- * struct k3_dsp_dev_data - device data structure for a DSP
- * @mems: pointer to memory definitions for a DSP
- * @num_mems: number of memory regions in @mems
- * @boot_align_addr: boot vector address alignment granularity
- * @uses_lreset: flag to denote the need for local reset management
- */
-struct k3_dsp_dev_data {
- const struct k3_dsp_mem_data *mems;
- u32 num_mems;
- u32 boot_align_addr;
- bool uses_lreset;
-};
-
-/**
- * struct k3_dsp_rproc - k3 DSP remote processor driver structure
- * @dev: cached device pointer
- * @rproc: remoteproc device handle
- * @mem: internal memory regions data
- * @num_mems: number of internal memory regions
- * @rmem: reserved memory regions data
- * @num_rmems: number of reserved memory regions
- * @reset: reset control handle
- * @data: pointer to DSP-specific device data
- * @tsp: TI-SCI processor control handle
- * @ti_sci: TI-SCI handle
- * @ti_sci_id: TI-SCI device identifier
- * @mbox: mailbox channel handle
- * @client: mailbox client to request the mailbox channel
- */
-struct k3_dsp_rproc {
- struct device *dev;
- struct rproc *rproc;
- struct k3_dsp_mem *mem;
- int num_mems;
- struct k3_dsp_mem *rmem;
- int num_rmems;
- struct reset_control *reset;
- const struct k3_dsp_dev_data *data;
- struct ti_sci_proc *tsp;
- const struct ti_sci_handle *ti_sci;
- u32 ti_sci_id;
- struct mbox_chan *mbox;
- struct mbox_client client;
-};
-
-/**
- * k3_dsp_rproc_mbox_callback() - inbound mailbox message handler
- * @client: mailbox client pointer used for requesting the mailbox channel
- * @data: mailbox payload
- *
- * This handler is invoked by the OMAP mailbox driver whenever a mailbox
- * message is received. Usually, the mailbox payload simply contains
- * the index of the virtqueue that is kicked by the remote processor,
- * and we let remoteproc core handle it.
- *
- * In addition to virtqueue indices, we also have some out-of-band values
- * that indicate different events. Those values are deliberately very
- * large so they don't coincide with virtqueue indices.
- */
-static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
-{
- struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc,
- client);
- struct device *dev = kproc->rproc->dev.parent;
- const char *name = kproc->rproc->name;
- u32 msg = omap_mbox_message(data);
-
- /* Do not forward messages from a detached core */
- if (kproc->rproc->state == RPROC_DETACHED)
- return;
-
- dev_dbg(dev, "mbox msg: 0x%x\n", msg);
-
- switch (msg) {
- case RP_MBOX_CRASH:
- /*
- * remoteproc detected an exception, but error recovery is not
- * supported. So, just log this for now
- */
- dev_err(dev, "K3 DSP rproc %s crashed\n", name);
- break;
- case RP_MBOX_ECHO_REPLY:
- dev_info(dev, "received echo reply from %s\n", name);
- break;
- default:
- /* silently handle all other valid messages */
- if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
- return;
- if (msg > kproc->rproc->max_notifyid) {
- dev_dbg(dev, "dropping unknown message 0x%x", msg);
- return;
- }
- /* msg contains the index of the triggered vring */
- if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
- dev_dbg(dev, "no message was found in vqid %d\n", msg);
- }
-}
-
-/*
- * Kick the remote processor to notify about pending unprocessed messages.
- * The vqid usage is not used and is inconsequential, as the kick is performed
- * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
- * the remote processor is expected to process both its Tx and Rx virtqueues.
- */
-static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = rproc->dev.parent;
- mbox_msg_t msg = (mbox_msg_t)vqid;
- int ret;
-
- /* Do not forward messages to a detached core */
- if (kproc->rproc->state == RPROC_DETACHED)
- return;
-
- /* send the index of the triggered virtqueue in the mailbox payload */
- ret = mbox_send_message(kproc->mbox, (void *)msg);
- if (ret < 0)
- dev_err(dev, "failed to send mailbox message (%pe)\n",
- ERR_PTR(ret));
-}
-
-/* Put the DSP processor into reset */
-static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- int ret;
-
- ret = reset_control_assert(kproc->reset);
- if (ret) {
- dev_err(dev, "local-reset assert failed (%pe)\n", ERR_PTR(ret));
- return ret;
- }
-
- if (kproc->data->uses_lreset)
- return ret;
-
- ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret) {
- dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
- if (reset_control_deassert(kproc->reset))
- dev_warn(dev, "local-reset deassert back failed\n");
- }
-
- return ret;
-}
-
-/* Release the DSP processor from reset */
-static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- int ret;
-
- if (kproc->data->uses_lreset)
- goto lreset;
-
- ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret) {
- dev_err(dev, "module-reset deassert failed (%pe)\n", ERR_PTR(ret));
- return ret;
- }
-
-lreset:
- ret = reset_control_deassert(kproc->reset);
- if (ret) {
- dev_err(dev, "local-reset deassert failed, (%pe)\n", ERR_PTR(ret));
- if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
- kproc->ti_sci_id))
- dev_warn(dev, "module-reset assert back failed\n");
- }
-
- return ret;
-}
-
-static int k3_dsp_rproc_request_mbox(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct mbox_client *client = &kproc->client;
- struct device *dev = kproc->dev;
- int ret;
-
- client->dev = dev;
- client->tx_done = NULL;
- client->rx_callback = k3_dsp_rproc_mbox_callback;
- client->tx_block = false;
- client->knows_txdone = false;
-
- kproc->mbox = mbox_request_channel(client, 0);
- if (IS_ERR(kproc->mbox))
- return dev_err_probe(dev, PTR_ERR(kproc->mbox),
- "mbox_request_channel failed\n");
-
- /*
- * Ping the remote processor, this is only for sanity-sake for now;
- * there is no functional effect whatsoever.
- *
- * Note that the reply will _not_ arrive immediately: this message
- * will wait in the mailbox fifo until the remote processor is booted.
- */
- ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
- if (ret < 0) {
- dev_err(dev, "mbox_send_message failed (%pe)\n", ERR_PTR(ret));
- mbox_free_channel(kproc->mbox);
- return ret;
- }
-
- return 0;
-}
-/*
- * The C66x DSP cores have a local reset that affects only the CPU, and a
- * generic module reset that powers on the device and allows the DSP internal
- * memories to be accessed while the local reset is asserted. This function is
- * used to release the global reset on C66x DSPs to allow loading into the DSP
- * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
- * firmware loading, and is followed by the .start() ops after loading to
- * actually let the C66x DSP cores run. This callback is invoked only in
- * remoteproc mode.
- */
-static int k3_dsp_rproc_prepare(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret)
- dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading (%pe)\n",
- ERR_PTR(ret));
-
- return ret;
-}
-
-/*
- * This function implements the .unprepare() ops and performs the complimentary
- * operations to that of the .prepare() ops. The function is used to assert the
- * global reset on applicable C66x cores. This completes the second portion of
- * powering down the C66x DSP cores. The cores themselves are only halted in the
- * .stop() callback through the local reset, and the .unprepare() ops is invoked
- * by the remoteproc core after the remoteproc is stopped to balance the global
- * reset. This callback is invoked only in remoteproc mode.
- */
-static int k3_dsp_rproc_unprepare(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret)
- dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
-
- return ret;
-}
+#include "ti_k3_common.h"
/*
* Power up the DSP remote processor.
@@ -315,7 +31,7 @@ static int k3_dsp_rproc_unprepare(struct rproc *rproc)
*/
static int k3_dsp_rproc_start(struct rproc *rproc)
{
- struct k3_dsp_rproc *kproc = rproc->priv;
+ struct k3_rproc *kproc = rproc->priv;
struct device *dev = kproc->dev;
u32 boot_addr;
int ret;
@@ -332,288 +48,30 @@ static int k3_dsp_rproc_start(struct rproc *rproc)
if (ret)
return ret;
- ret = k3_dsp_rproc_release(kproc);
+ /* Call the K3 common start function after doing DSP specific stuff */
+ ret = k3_rproc_start(rproc);
if (ret)
return ret;
return 0;
}
-/*
- * Stop the DSP remote processor.
- *
- * This function puts the DSP processor into reset, and finishes processing
- * of any pending messages. This callback is invoked only in remoteproc mode.
- */
-static int k3_dsp_rproc_stop(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
-
- k3_dsp_rproc_reset(kproc);
-
- return 0;
-}
-
-/*
- * Attach to a running DSP remote processor (IPC-only mode)
- *
- * This rproc attach callback is a NOP. The remote processor is already booted,
- * and all required resources have been acquired during probe routine, so there
- * is no need to issue any TI-SCI commands to boot the DSP core. This callback
- * is invoked only in IPC-only mode and exists because rproc_validate() checks
- * for its existence.
- */
-static int k3_dsp_rproc_attach(struct rproc *rproc) { return 0; }
-
-/*
- * Detach from a running DSP remote processor (IPC-only mode)
- *
- * This rproc detach callback is a NOP. The DSP core is not stopped and will be
- * left to continue to run its booted firmware. This callback is invoked only in
- * IPC-only mode and exists for sanity sake.
- */
-static int k3_dsp_rproc_detach(struct rproc *rproc) { return 0; }
-
-/*
- * This function implements the .get_loaded_rsc_table() callback and is used
- * to provide the resource table for a booted DSP in IPC-only mode. The K3 DSP
- * firmwares follow a design-by-contract approach and are expected to have the
- * resource table at the base of the DDR region reserved for firmware usage.
- * This provides flexibility for the remote processor to be booted by different
- * bootloaders that may or may not have the ability to publish the resource table
- * address and size through a DT property. This callback is invoked only in
- * IPC-only mode.
- */
-static struct resource_table *k3_dsp_get_loaded_rsc_table(struct rproc *rproc,
- size_t *rsc_table_sz)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
-
- if (!kproc->rmem[0].cpu_addr) {
- dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
- return ERR_PTR(-ENOMEM);
- }
-
- /*
- * NOTE: The resource table size is currently hard-coded to a maximum
- * of 256 bytes. The most common resource table usage for K3 firmwares
- * is to only have the vdev resource entry and an optional trace entry.
- * The exact size could be computed based on resource table address, but
- * the hard-coded value suffices to support the IPC-only mode.
- */
- *rsc_table_sz = 256;
- return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
-}
-
-/*
- * Custom function to translate a DSP device address (internal RAMs only) to a
- * kernel virtual address. The DSPs can access their RAMs at either an internal
- * address visible only from a DSP, or at the SoC-level bus address. Both these
- * addresses need to be looked through for translation. The translated addresses
- * can be used either by the remoteproc core for loading (when using kernel
- * remoteproc loader), or by any rpmsg bus drivers.
- */
-static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- void __iomem *va = NULL;
- phys_addr_t bus_addr;
- u32 dev_addr, offset;
- size_t size;
- int i;
-
- if (len == 0)
- return NULL;
-
- for (i = 0; i < kproc->num_mems; i++) {
- bus_addr = kproc->mem[i].bus_addr;
- dev_addr = kproc->mem[i].dev_addr;
- size = kproc->mem[i].size;
-
- if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
- /* handle DSP-view addresses */
- if (da >= dev_addr &&
- ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = kproc->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- } else {
- /* handle SoC-view addresses */
- if (da >= bus_addr &&
- (da + len) <= (bus_addr + size)) {
- offset = da - bus_addr;
- va = kproc->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
- }
-
- /* handle static DDR reserved memory regions */
- for (i = 0; i < kproc->num_rmems; i++) {
- dev_addr = kproc->rmem[i].dev_addr;
- size = kproc->rmem[i].size;
-
- if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = kproc->rmem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
-
- return NULL;
-}
-
static const struct rproc_ops k3_dsp_rproc_ops = {
- .start = k3_dsp_rproc_start,
- .stop = k3_dsp_rproc_stop,
- .kick = k3_dsp_rproc_kick,
- .da_to_va = k3_dsp_rproc_da_to_va,
+ .start = k3_dsp_rproc_start,
+ .stop = k3_rproc_stop,
+ .attach = k3_rproc_attach,
+ .detach = k3_rproc_detach,
+ .kick = k3_rproc_kick,
+ .da_to_va = k3_rproc_da_to_va,
+ .get_loaded_rsc_table = k3_get_loaded_rsc_table,
};
-static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
- struct k3_dsp_rproc *kproc)
-{
- const struct k3_dsp_dev_data *data = kproc->data;
- struct device *dev = &pdev->dev;
- struct resource *res;
- int num_mems = 0;
- int i;
-
- num_mems = kproc->data->num_mems;
- kproc->mem = devm_kcalloc(kproc->dev, num_mems,
- sizeof(*kproc->mem), GFP_KERNEL);
- if (!kproc->mem)
- return -ENOMEM;
-
- for (i = 0; i < num_mems; i++) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- data->mems[i].name);
- if (!res) {
- dev_err(dev, "found no memory resource for %s\n",
- data->mems[i].name);
- return -EINVAL;
- }
- if (!devm_request_mem_region(dev, res->start,
- resource_size(res),
- dev_name(dev))) {
- dev_err(dev, "could not request %s region for resource\n",
- data->mems[i].name);
- return -EBUSY;
- }
-
- kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
- resource_size(res));
- if (!kproc->mem[i].cpu_addr) {
- dev_err(dev, "failed to map %s memory\n",
- data->mems[i].name);
- return -ENOMEM;
- }
- kproc->mem[i].bus_addr = res->start;
- kproc->mem[i].dev_addr = data->mems[i].dev_addr;
- kproc->mem[i].size = resource_size(res);
-
- dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- data->mems[i].name, &kproc->mem[i].bus_addr,
- kproc->mem[i].size, kproc->mem[i].cpu_addr,
- kproc->mem[i].dev_addr);
- }
- kproc->num_mems = num_mems;
-
- return 0;
-}
-
-static void k3_dsp_mem_release(void *data)
-{
- struct device *dev = data;
-
- of_reserved_mem_device_release(dev);
-}
-
-static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- struct device_node *np = dev->of_node;
- struct device_node *rmem_np;
- struct reserved_mem *rmem;
- int num_rmems;
- int ret, i;
-
- num_rmems = of_property_count_elems_of_size(np, "memory-region",
- sizeof(phandle));
- if (num_rmems < 0) {
- dev_err(dev, "device does not reserved memory regions (%pe)\n",
- ERR_PTR(num_rmems));
- return -EINVAL;
- }
- if (num_rmems < 2) {
- dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
- num_rmems);
- return -EINVAL;
- }
-
- /* use reserved memory region 0 for vring DMA allocations */
- ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
- if (ret) {
- dev_err(dev, "device cannot initialize DMA pool (%pe)\n",
- ERR_PTR(ret));
- return ret;
- }
- ret = devm_add_action_or_reset(dev, k3_dsp_mem_release, dev);
- if (ret)
- return ret;
-
- num_rmems--;
- kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
- if (!kproc->rmem)
- return -ENOMEM;
-
- /* use remaining reserved memory regions for static carveouts */
- for (i = 0; i < num_rmems; i++) {
- rmem_np = of_parse_phandle(np, "memory-region", i + 1);
- if (!rmem_np)
- return -EINVAL;
-
- rmem = of_reserved_mem_lookup(rmem_np);
- of_node_put(rmem_np);
- if (!rmem)
- return -EINVAL;
-
- kproc->rmem[i].bus_addr = rmem->base;
- /* 64-bit address regions currently not supported */
- kproc->rmem[i].dev_addr = (u32)rmem->base;
- kproc->rmem[i].size = rmem->size;
- kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
- if (!kproc->rmem[i].cpu_addr) {
- dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
- i + 1, &rmem->base, &rmem->size);
- return -ENOMEM;
- }
-
- dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- i + 1, &kproc->rmem[i].bus_addr,
- kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
- kproc->rmem[i].dev_addr);
- }
- kproc->num_rmems = num_rmems;
-
- return 0;
-}
-
-static void k3_dsp_release_tsp(void *data)
-{
- struct ti_sci_proc *tsp = data;
-
- ti_sci_proc_release(tsp);
-}
-
static int k3_dsp_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- const struct k3_dsp_dev_data *data;
- struct k3_dsp_rproc *kproc;
+ const struct k3_rproc_dev_data *data;
+ struct k3_rproc *kproc;
struct rproc *rproc;
const char *fw_name;
bool p_state = false;
@@ -635,15 +93,15 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
rproc->has_iommu = false;
rproc->recovery_disabled = true;
if (data->uses_lreset) {
- rproc->ops->prepare = k3_dsp_rproc_prepare;
- rproc->ops->unprepare = k3_dsp_rproc_unprepare;
+ rproc->ops->prepare = k3_rproc_prepare;
+ rproc->ops->unprepare = k3_rproc_unprepare;
}
kproc = rproc->priv;
kproc->rproc = rproc;
kproc->dev = dev;
kproc->data = data;
- ret = k3_dsp_rproc_request_mbox(rproc);
+ ret = k3_rproc_request_mbox(rproc);
if (ret)
return ret;
@@ -671,15 +129,15 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
return ret;
}
- ret = devm_add_action_or_reset(dev, k3_dsp_release_tsp, kproc->tsp);
+ ret = devm_add_action_or_reset(dev, k3_release_tsp, kproc->tsp);
if (ret)
return ret;
- ret = k3_dsp_rproc_of_get_memories(pdev, kproc);
+ ret = k3_rproc_of_get_memories(pdev, kproc);
if (ret)
return ret;
- ret = k3_dsp_reserved_mem_init(kproc);
+ ret = k3_reserved_mem_init(kproc);
if (ret)
return dev_err_probe(dev, ret, "reserved memory init failed\n");
@@ -692,30 +150,8 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
if (p_state) {
dev_info(dev, "configured DSP for IPC-only mode\n");
rproc->state = RPROC_DETACHED;
- /* override rproc ops with only required IPC-only mode ops */
- rproc->ops->prepare = NULL;
- rproc->ops->unprepare = NULL;
- rproc->ops->start = NULL;
- rproc->ops->stop = NULL;
- rproc->ops->attach = k3_dsp_rproc_attach;
- rproc->ops->detach = k3_dsp_rproc_detach;
- rproc->ops->get_loaded_rsc_table = k3_dsp_get_loaded_rsc_table;
} else {
dev_info(dev, "configured DSP for remoteproc mode\n");
- /*
- * ensure the DSP local reset is asserted to ensure the DSP
- * doesn't execute bogus code in .prepare() when the module
- * reset is released.
- */
- if (data->uses_lreset) {
- ret = reset_control_status(kproc->reset);
- if (ret < 0) {
- return dev_err_probe(dev, ret, "failed to get reset status\n");
- } else if (ret == 0) {
- dev_warn(dev, "local reset is deasserted for device\n");
- k3_dsp_rproc_reset(kproc);
- }
- }
}
ret = devm_rproc_add(dev, rproc);
@@ -729,7 +165,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
static void k3_dsp_rproc_remove(struct platform_device *pdev)
{
- struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
+ struct k3_rproc *kproc = platform_get_drvdata(pdev);
struct rproc *rproc = kproc->rproc;
struct device *dev = &pdev->dev;
int ret;
@@ -743,37 +179,37 @@ static void k3_dsp_rproc_remove(struct platform_device *pdev)
mbox_free_channel(kproc->mbox);
}
-static const struct k3_dsp_mem_data c66_mems[] = {
+static const struct k3_rproc_mem_data c66_mems[] = {
{ .name = "l2sram", .dev_addr = 0x800000 },
{ .name = "l1pram", .dev_addr = 0xe00000 },
{ .name = "l1dram", .dev_addr = 0xf00000 },
};
/* C71x cores only have a L1P Cache, there are no L1P SRAMs */
-static const struct k3_dsp_mem_data c71_mems[] = {
+static const struct k3_rproc_mem_data c71_mems[] = {
{ .name = "l2sram", .dev_addr = 0x800000 },
{ .name = "l1dram", .dev_addr = 0xe00000 },
};
-static const struct k3_dsp_mem_data c7xv_mems[] = {
+static const struct k3_rproc_mem_data c7xv_mems[] = {
{ .name = "l2sram", .dev_addr = 0x800000 },
};
-static const struct k3_dsp_dev_data c66_data = {
+static const struct k3_rproc_dev_data c66_data = {
.mems = c66_mems,
.num_mems = ARRAY_SIZE(c66_mems),
.boot_align_addr = SZ_1K,
.uses_lreset = true,
};
-static const struct k3_dsp_dev_data c71_data = {
+static const struct k3_rproc_dev_data c71_data = {
.mems = c71_mems,
.num_mems = ARRAY_SIZE(c71_mems),
.boot_align_addr = SZ_2M,
.uses_lreset = false,
};
-static const struct k3_dsp_dev_data c7xv_data = {
+static const struct k3_rproc_dev_data c7xv_data = {
.mems = c7xv_mems,
.num_mems = ARRAY_SIZE(c7xv_mems),
.boot_align_addr = SZ_2M,
diff --git a/drivers/remoteproc/ti_k3_m4_remoteproc.c b/drivers/remoteproc/ti_k3_m4_remoteproc.c
index a16fb165fced..3a11fd24eb52 100644
--- a/drivers/remoteproc/ti_k3_m4_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_m4_remoteproc.c
@@ -19,552 +19,35 @@
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
#include "ti_sci_proc.h"
-
-#define K3_M4_IRAM_DEV_ADDR 0x00000
-#define K3_M4_DRAM_DEV_ADDR 0x30000
-
-/**
- * struct k3_m4_rproc_mem - internal memory structure
- * @cpu_addr: MPU virtual address of the memory region
- * @bus_addr: Bus address used to access the memory region
- * @dev_addr: Device address of the memory region from remote processor view
- * @size: Size of the memory region
- */
-struct k3_m4_rproc_mem {
- void __iomem *cpu_addr;
- phys_addr_t bus_addr;
- u32 dev_addr;
- size_t size;
-};
-
-/**
- * struct k3_m4_rproc_mem_data - memory definitions for a remote processor
- * @name: name for this memory entry
- * @dev_addr: device address for the memory entry
- */
-struct k3_m4_rproc_mem_data {
- const char *name;
- const u32 dev_addr;
-};
-
-/**
- * struct k3_m4_rproc - k3 remote processor driver structure
- * @dev: cached device pointer
- * @mem: internal memory regions data
- * @num_mems: number of internal memory regions
- * @rmem: reserved memory regions data
- * @num_rmems: number of reserved memory regions
- * @reset: reset control handle
- * @tsp: TI-SCI processor control handle
- * @ti_sci: TI-SCI handle
- * @ti_sci_id: TI-SCI device identifier
- * @mbox: mailbox channel handle
- * @client: mailbox client to request the mailbox channel
- */
-struct k3_m4_rproc {
- struct device *dev;
- struct k3_m4_rproc_mem *mem;
- int num_mems;
- struct k3_m4_rproc_mem *rmem;
- int num_rmems;
- struct reset_control *reset;
- struct ti_sci_proc *tsp;
- const struct ti_sci_handle *ti_sci;
- u32 ti_sci_id;
- struct mbox_chan *mbox;
- struct mbox_client client;
-};
-
-/**
- * k3_m4_rproc_mbox_callback() - inbound mailbox message handler
- * @client: mailbox client pointer used for requesting the mailbox channel
- * @data: mailbox payload
- *
- * This handler is invoked by the K3 mailbox driver whenever a mailbox
- * message is received. Usually, the mailbox payload simply contains
- * the index of the virtqueue that is kicked by the remote processor,
- * and we let remoteproc core handle it.
- *
- * In addition to virtqueue indices, we also have some out-of-band values
- * that indicate different events. Those values are deliberately very
- * large so they don't coincide with virtqueue indices.
- */
-static void k3_m4_rproc_mbox_callback(struct mbox_client *client, void *data)
-{
- struct device *dev = client->dev;
- struct rproc *rproc = dev_get_drvdata(dev);
- u32 msg = (u32)(uintptr_t)(data);
-
- dev_dbg(dev, "mbox msg: 0x%x\n", msg);
-
- switch (msg) {
- case RP_MBOX_CRASH:
- /*
- * remoteproc detected an exception, but error recovery is not
- * supported. So, just log this for now
- */
- dev_err(dev, "K3 rproc %s crashed\n", rproc->name);
- break;
- case RP_MBOX_ECHO_REPLY:
- dev_info(dev, "received echo reply from %s\n", rproc->name);
- break;
- default:
- /* silently handle all other valid messages */
- if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
- return;
- if (msg > rproc->max_notifyid) {
- dev_dbg(dev, "dropping unknown message 0x%x", msg);
- return;
- }
- /* msg contains the index of the triggered vring */
- if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
- dev_dbg(dev, "no message was found in vqid %d\n", msg);
- }
-}
-
-/*
- * Kick the remote processor to notify about pending unprocessed messages.
- * The vqid usage is not used and is inconsequential, as the kick is performed
- * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
- * the remote processor is expected to process both its Tx and Rx virtqueues.
- */
-static void k3_m4_rproc_kick(struct rproc *rproc, int vqid)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- u32 msg = (u32)vqid;
- int ret;
-
- /*
- * Send the index of the triggered virtqueue in the mailbox payload.
- * NOTE: msg is cast to uintptr_t to prevent compiler warnings when
- * void* is 64bit. It is safely cast back to u32 in the mailbox driver.
- */
- ret = mbox_send_message(kproc->mbox, (void *)(uintptr_t)msg);
- if (ret < 0)
- dev_err(dev, "failed to send mailbox message, status = %d\n",
- ret);
-}
-
-static int k3_m4_rproc_ping_mbox(struct k3_m4_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- int ret;
-
- /*
- * Ping the remote processor, this is only for sanity-sake for now;
- * there is no functional effect whatsoever.
- *
- * Note that the reply will _not_ arrive immediately: this message
- * will wait in the mailbox fifo until the remote processor is booted.
- */
- ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
- if (ret < 0) {
- dev_err(dev, "mbox_send_message failed: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-/*
- * The M4 cores have a local reset that affects only the CPU, and a
- * generic module reset that powers on the device and allows the internal
- * memories to be accessed while the local reset is asserted. This function is
- * used to release the global reset on remote cores to allow loading into the
- * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
- * firmware loading, and is followed by the .start() ops after loading to
- * actually let the remote cores to run.
- */
-static int k3_m4_rproc_prepare(struct rproc *rproc)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- /* If the core is running already no need to deassert the module reset */
- if (rproc->state == RPROC_DETACHED)
- return 0;
-
- /*
- * Ensure the local reset is asserted so the core doesn't
- * execute bogus code when the module reset is released.
- */
- ret = reset_control_assert(kproc->reset);
- if (ret) {
- dev_err(dev, "could not assert local reset\n");
- return ret;
- }
-
- ret = reset_control_status(kproc->reset);
- if (ret <= 0) {
- dev_err(dev, "local reset still not asserted\n");
- return ret;
- }
-
- ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret) {
- dev_err(dev, "could not deassert module-reset for internal RAM loading\n");
- return ret;
- }
-
- return 0;
-}
-
-/*
- * This function implements the .unprepare() ops and performs the complimentary
- * operations to that of the .prepare() ops. The function is used to assert the
- * global reset on applicable cores. This completes the second portion of
- * powering down the remote core. The cores themselves are only halted in the
- * .stop() callback through the local reset, and the .unprepare() ops is invoked
- * by the remoteproc core after the remoteproc is stopped to balance the global
- * reset.
- */
-static int k3_m4_rproc_unprepare(struct rproc *rproc)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- /* If the core is going to be detached do not assert the module reset */
- if (rproc->state == RPROC_ATTACHED)
- return 0;
-
- ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
- kproc->ti_sci_id);
- if (ret) {
- dev_err(dev, "module-reset assert failed\n");
- return ret;
- }
-
- return 0;
-}
-
-/*
- * This function implements the .get_loaded_rsc_table() callback and is used
- * to provide the resource table for a booted remote processor in IPC-only
- * mode. The remote processor firmwares follow a design-by-contract approach
- * and are expected to have the resource table at the base of the DDR region
- * reserved for firmware usage. This provides flexibility for the remote
- * processor to be booted by different bootloaders that may or may not have the
- * ability to publish the resource table address and size through a DT
- * property.
- */
-static struct resource_table *k3_m4_get_loaded_rsc_table(struct rproc *rproc,
- size_t *rsc_table_sz)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
-
- if (!kproc->rmem[0].cpu_addr) {
- dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
- return ERR_PTR(-ENOMEM);
- }
-
- /*
- * NOTE: The resource table size is currently hard-coded to a maximum
- * of 256 bytes. The most common resource table usage for K3 firmwares
- * is to only have the vdev resource entry and an optional trace entry.
- * The exact size could be computed based on resource table address, but
- * the hard-coded value suffices to support the IPC-only mode.
- */
- *rsc_table_sz = 256;
- return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
-}
-
-/*
- * Custom function to translate a remote processor device address (internal
- * RAMs only) to a kernel virtual address. The remote processors can access
- * their RAMs at either an internal address visible only from a remote
- * processor, or at the SoC-level bus address. Both these addresses need to be
- * looked through for translation. The translated addresses can be used either
- * by the remoteproc core for loading (when using kernel remoteproc loader), or
- * by any rpmsg bus drivers.
- */
-static void *k3_m4_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- void __iomem *va = NULL;
- phys_addr_t bus_addr;
- u32 dev_addr, offset;
- size_t size;
- int i;
-
- if (len == 0)
- return NULL;
-
- for (i = 0; i < kproc->num_mems; i++) {
- bus_addr = kproc->mem[i].bus_addr;
- dev_addr = kproc->mem[i].dev_addr;
- size = kproc->mem[i].size;
-
- /* handle M4-view addresses */
- if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = kproc->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
-
- /* handle SoC-view addresses */
- if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
- offset = da - bus_addr;
- va = kproc->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
-
- /* handle static DDR reserved memory regions */
- for (i = 0; i < kproc->num_rmems; i++) {
- dev_addr = kproc->rmem[i].dev_addr;
- size = kproc->rmem[i].size;
-
- if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = kproc->rmem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
-
- return NULL;
-}
-
-static int k3_m4_rproc_of_get_memories(struct platform_device *pdev,
- struct k3_m4_rproc *kproc)
-{
- static const char * const mem_names[] = { "iram", "dram" };
- static const u32 mem_addrs[] = { K3_M4_IRAM_DEV_ADDR, K3_M4_DRAM_DEV_ADDR };
- struct device *dev = &pdev->dev;
- struct resource *res;
- int num_mems;
- int i;
-
- num_mems = ARRAY_SIZE(mem_names);
- kproc->mem = devm_kcalloc(kproc->dev, num_mems,
- sizeof(*kproc->mem), GFP_KERNEL);
- if (!kproc->mem)
- return -ENOMEM;
-
- for (i = 0; i < num_mems; i++) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- mem_names[i]);
- if (!res) {
- dev_err(dev, "found no memory resource for %s\n",
- mem_names[i]);
- return -EINVAL;
- }
- if (!devm_request_mem_region(dev, res->start,
- resource_size(res),
- dev_name(dev))) {
- dev_err(dev, "could not request %s region for resource\n",
- mem_names[i]);
- return -EBUSY;
- }
-
- kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
- resource_size(res));
- if (!kproc->mem[i].cpu_addr) {
- dev_err(dev, "failed to map %s memory\n",
- mem_names[i]);
- return -ENOMEM;
- }
- kproc->mem[i].bus_addr = res->start;
- kproc->mem[i].dev_addr = mem_addrs[i];
- kproc->mem[i].size = resource_size(res);
-
- dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- mem_names[i], &kproc->mem[i].bus_addr,
- kproc->mem[i].size, kproc->mem[i].cpu_addr,
- kproc->mem[i].dev_addr);
- }
- kproc->num_mems = num_mems;
-
- return 0;
-}
-
-static void k3_m4_rproc_dev_mem_release(void *data)
-{
- struct device *dev = data;
-
- of_reserved_mem_device_release(dev);
-}
-
-static int k3_m4_reserved_mem_init(struct k3_m4_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- struct device_node *np = dev->of_node;
- struct device_node *rmem_np;
- struct reserved_mem *rmem;
- int num_rmems;
- int ret, i;
-
- num_rmems = of_property_count_elems_of_size(np, "memory-region",
- sizeof(phandle));
- if (num_rmems < 0) {
- dev_err(dev, "device does not reserved memory regions (%d)\n",
- num_rmems);
- return -EINVAL;
- }
- if (num_rmems < 2) {
- dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
- num_rmems);
- return -EINVAL;
- }
-
- /* use reserved memory region 0 for vring DMA allocations */
- ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
- if (ret) {
- dev_err(dev, "device cannot initialize DMA pool (%d)\n", ret);
- return ret;
- }
- ret = devm_add_action_or_reset(dev, k3_m4_rproc_dev_mem_release, dev);
- if (ret)
- return ret;
-
- num_rmems--;
- kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
- if (!kproc->rmem)
- return -ENOMEM;
-
- /* use remaining reserved memory regions for static carveouts */
- for (i = 0; i < num_rmems; i++) {
- rmem_np = of_parse_phandle(np, "memory-region", i + 1);
- if (!rmem_np)
- return -EINVAL;
-
- rmem = of_reserved_mem_lookup(rmem_np);
- of_node_put(rmem_np);
- if (!rmem)
- return -EINVAL;
-
- kproc->rmem[i].bus_addr = rmem->base;
- /* 64-bit address regions currently not supported */
- kproc->rmem[i].dev_addr = (u32)rmem->base;
- kproc->rmem[i].size = rmem->size;
- kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
- if (!kproc->rmem[i].cpu_addr) {
- dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
- i + 1, &rmem->base, &rmem->size);
- return -ENOMEM;
- }
-
- dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- i + 1, &kproc->rmem[i].bus_addr,
- kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
- kproc->rmem[i].dev_addr);
- }
- kproc->num_rmems = num_rmems;
-
- return 0;
-}
-
-static void k3_m4_release_tsp(void *data)
-{
- struct ti_sci_proc *tsp = data;
-
- ti_sci_proc_release(tsp);
-}
-
-/*
- * Power up the M4 remote processor.
- *
- * This function will be invoked only after the firmware for this rproc
- * was loaded, parsed successfully, and all of its resource requirements
- * were met. This callback is invoked only in remoteproc mode.
- */
-static int k3_m4_rproc_start(struct rproc *rproc)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = k3_m4_rproc_ping_mbox(kproc);
- if (ret)
- return ret;
-
- ret = reset_control_deassert(kproc->reset);
- if (ret) {
- dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-/*
- * Stop the M4 remote processor.
- *
- * This function puts the M4 processor into reset, and finishes processing
- * of any pending messages. This callback is invoked only in remoteproc mode.
- */
-static int k3_m4_rproc_stop(struct rproc *rproc)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = reset_control_assert(kproc->reset);
- if (ret) {
- dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-/*
- * Attach to a running M4 remote processor (IPC-only mode)
- *
- * The remote processor is already booted, so there is no need to issue any
- * TI-SCI commands to boot the M4 core. This callback is used only in IPC-only
- * mode.
- */
-static int k3_m4_rproc_attach(struct rproc *rproc)
-{
- struct k3_m4_rproc *kproc = rproc->priv;
- int ret;
-
- ret = k3_m4_rproc_ping_mbox(kproc);
- if (ret)
- return ret;
-
- return 0;
-}
-
-/*
- * Detach from a running M4 remote processor (IPC-only mode)
- *
- * This rproc detach callback performs the opposite operation to attach
- * callback, the M4 core is not stopped and will be left to continue to
- * run its booted firmware. This callback is invoked only in IPC-only mode.
- */
-static int k3_m4_rproc_detach(struct rproc *rproc)
-{
- return 0;
-}
+#include "ti_k3_common.h"
static const struct rproc_ops k3_m4_rproc_ops = {
- .prepare = k3_m4_rproc_prepare,
- .unprepare = k3_m4_rproc_unprepare,
- .start = k3_m4_rproc_start,
- .stop = k3_m4_rproc_stop,
- .attach = k3_m4_rproc_attach,
- .detach = k3_m4_rproc_detach,
- .kick = k3_m4_rproc_kick,
- .da_to_va = k3_m4_rproc_da_to_va,
- .get_loaded_rsc_table = k3_m4_get_loaded_rsc_table,
+ .prepare = k3_rproc_prepare,
+ .unprepare = k3_rproc_unprepare,
+ .start = k3_rproc_start,
+ .stop = k3_rproc_stop,
+ .attach = k3_rproc_attach,
+ .detach = k3_rproc_detach,
+ .kick = k3_rproc_kick,
+ .da_to_va = k3_rproc_da_to_va,
+ .get_loaded_rsc_table = k3_get_loaded_rsc_table,
};
static int k3_m4_rproc_probe(struct platform_device *pdev)
{
+ const struct k3_rproc_dev_data *data;
struct device *dev = &pdev->dev;
- struct k3_m4_rproc *kproc;
+ struct k3_rproc *kproc;
struct rproc *rproc;
const char *fw_name;
bool r_state = false;
bool p_state = false;
int ret;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
ret = rproc_of_parse_firmware(dev, 0, &fw_name);
if (ret)
return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
@@ -578,6 +61,8 @@ static int k3_m4_rproc_probe(struct platform_device *pdev)
rproc->recovery_disabled = true;
kproc = rproc->priv;
kproc->dev = dev;
+ kproc->rproc = rproc;
+ kproc->data = data;
platform_set_drvdata(pdev, rproc);
kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
@@ -601,15 +86,15 @@ static int k3_m4_rproc_probe(struct platform_device *pdev)
ret = ti_sci_proc_request(kproc->tsp);
if (ret < 0)
return dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
- ret = devm_add_action_or_reset(dev, k3_m4_release_tsp, kproc->tsp);
+ ret = devm_add_action_or_reset(dev, k3_release_tsp, kproc->tsp);
if (ret)
return ret;
- ret = k3_m4_rproc_of_get_memories(pdev, kproc);
+ ret = k3_rproc_of_get_memories(pdev, kproc);
if (ret)
return ret;
- ret = k3_m4_reserved_mem_init(kproc);
+ ret = k3_reserved_mem_init(kproc);
if (ret)
return dev_err_probe(dev, ret, "reserved memory init failed\n");
@@ -627,15 +112,9 @@ static int k3_m4_rproc_probe(struct platform_device *pdev)
dev_info(dev, "configured M4F for remoteproc mode\n");
}
- kproc->client.dev = dev;
- kproc->client.tx_done = NULL;
- kproc->client.rx_callback = k3_m4_rproc_mbox_callback;
- kproc->client.tx_block = false;
- kproc->client.knows_txdone = false;
- kproc->mbox = mbox_request_channel(&kproc->client, 0);
- if (IS_ERR(kproc->mbox))
- return dev_err_probe(dev, PTR_ERR(kproc->mbox),
- "mbox_request_channel failed\n");
+ ret = k3_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
ret = devm_rproc_add(dev, rproc);
if (ret)
@@ -645,8 +124,20 @@ static int k3_m4_rproc_probe(struct platform_device *pdev)
return 0;
}
+static const struct k3_rproc_mem_data am64_m4_mems[] = {
+ { .name = "iram", .dev_addr = 0x0 },
+ { .name = "dram", .dev_addr = 0x30000 },
+};
+
+static const struct k3_rproc_dev_data am64_m4_data = {
+ .mems = am64_m4_mems,
+ .num_mems = ARRAY_SIZE(am64_m4_mems),
+ .boot_align_addr = SZ_1K,
+ .uses_lreset = true,
+};
+
static const struct of_device_id k3_m4_of_match[] = {
- { .compatible = "ti,am64-m4fss", },
+ { .compatible = "ti,am64-m4fss", .data = &am64_m4_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, k3_m4_of_match);
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index dbc513c5569c..e34c04c135fc 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -26,6 +26,7 @@
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
#include "ti_sci_proc.h"
+#include "ti_k3_common.h"
/* This address can either be for ATCM or BTCM with the other at address 0x0 */
#define K3_R5_TCM_DEV_ADDR 0x41010000
@@ -55,20 +56,6 @@
/* Applicable to only AM64x SoCs */
#define PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY 0x00000200
-/**
- * struct k3_r5_mem - internal memory structure
- * @cpu_addr: MPU virtual address of the memory region
- * @bus_addr: Bus address used to access the memory region
- * @dev_addr: Device address from remoteproc view
- * @size: Size of the memory region
- */
-struct k3_r5_mem {
- void __iomem *cpu_addr;
- phys_addr_t bus_addr;
- u32 dev_addr;
- size_t size;
-};
-
/*
* All cluster mode values are not applicable on all SoCs. The following
* are the modes supported on various SoCs:
@@ -90,12 +77,14 @@ enum cluster_mode {
* @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
* @single_cpu_mode: flag to denote if SoC/IP supports Single-CPU mode
* @is_single_core: flag to denote if SoC/IP has only single core R5
+ * @core_data: pointer to R5-core-specific device data
*/
struct k3_r5_soc_data {
bool tcm_is_double;
bool tcm_ecc_autoinit;
bool single_cpu_mode;
bool is_single_core;
+ const struct k3_rproc_dev_data *core_data;
};
/**
@@ -118,15 +107,10 @@ struct k3_r5_cluster {
* struct k3_r5_core - K3 R5 core structure
* @elem: linked list item
* @dev: cached device pointer
- * @rproc: rproc handle representing this core
- * @mem: internal memory regions data
+ * @kproc: K3 rproc handle representing this core
+ * @cluster: cached pointer to parent cluster structure
* @sram: on-chip SRAM memory regions data
- * @num_mems: number of internal memory regions
* @num_sram: number of on-chip SRAM memory regions
- * @reset: reset control handle
- * @tsp: TI-SCI processor control handle
- * @ti_sci: TI-SCI handle
- * @ti_sci_id: TI-SCI device identifier
* @atcm_enable: flag to control ATCM enablement
* @btcm_enable: flag to control BTCM enablement
* @loczrama: flag to dictate which TCM is at device address 0x0
@@ -135,157 +119,58 @@ struct k3_r5_cluster {
struct k3_r5_core {
struct list_head elem;
struct device *dev;
- struct rproc *rproc;
- struct k3_r5_mem *mem;
- struct k3_r5_mem *sram;
- int num_mems;
+ struct k3_rproc *kproc;
+ struct k3_r5_cluster *cluster;
+ struct k3_rproc_mem *sram;
int num_sram;
- struct reset_control *reset;
- struct ti_sci_proc *tsp;
- const struct ti_sci_handle *ti_sci;
- u32 ti_sci_id;
u32 atcm_enable;
u32 btcm_enable;
u32 loczrama;
bool released_from_reset;
};
-/**
- * struct k3_r5_rproc - K3 remote processor state
- * @dev: cached device pointer
- * @cluster: cached pointer to parent cluster structure
- * @mbox: mailbox channel handle
- * @client: mailbox client to request the mailbox channel
- * @rproc: rproc handle
- * @core: cached pointer to r5 core structure being used
- * @rmem: reserved memory regions data
- * @num_rmems: number of reserved memory regions
- */
-struct k3_r5_rproc {
- struct device *dev;
- struct k3_r5_cluster *cluster;
- struct mbox_chan *mbox;
- struct mbox_client client;
- struct rproc *rproc;
- struct k3_r5_core *core;
- struct k3_r5_mem *rmem;
- int num_rmems;
-};
-
-/**
- * k3_r5_rproc_mbox_callback() - inbound mailbox message handler
- * @client: mailbox client pointer used for requesting the mailbox channel
- * @data: mailbox payload
- *
- * This handler is invoked by the OMAP mailbox driver whenever a mailbox
- * message is received. Usually, the mailbox payload simply contains
- * the index of the virtqueue that is kicked by the remote processor,
- * and we let remoteproc core handle it.
- *
- * In addition to virtqueue indices, we also have some out-of-band values
- * that indicate different events. Those values are deliberately very
- * large so they don't coincide with virtqueue indices.
- */
-static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
-{
- struct k3_r5_rproc *kproc = container_of(client, struct k3_r5_rproc,
- client);
- struct device *dev = kproc->rproc->dev.parent;
- const char *name = kproc->rproc->name;
- u32 msg = omap_mbox_message(data);
-
- /* Do not forward message from a detached core */
- if (kproc->rproc->state == RPROC_DETACHED)
- return;
-
- dev_dbg(dev, "mbox msg: 0x%x\n", msg);
-
- switch (msg) {
- case RP_MBOX_CRASH:
- /*
- * remoteproc detected an exception, but error recovery is not
- * supported. So, just log this for now
- */
- dev_err(dev, "K3 R5F rproc %s crashed\n", name);
- break;
- case RP_MBOX_ECHO_REPLY:
- dev_info(dev, "received echo reply from %s\n", name);
- break;
- default:
- /* silently handle all other valid messages */
- if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
- return;
- if (msg > kproc->rproc->max_notifyid) {
- dev_dbg(dev, "dropping unknown message 0x%x", msg);
- return;
- }
- /* msg contains the index of the triggered vring */
- if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
- dev_dbg(dev, "no message was found in vqid %d\n", msg);
- }
-}
-
-/* kick a virtqueue */
-static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
-{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct device *dev = rproc->dev.parent;
- mbox_msg_t msg = (mbox_msg_t)vqid;
- int ret;
-
- /* Do not forward message to a detached core */
- if (kproc->rproc->state == RPROC_DETACHED)
- return;
-
- /* send the index of the triggered virtqueue in the mailbox payload */
- ret = mbox_send_message(kproc->mbox, (void *)msg);
- if (ret < 0)
- dev_err(dev, "failed to send mailbox message, status = %d\n",
- ret);
-}
-
-static int k3_r5_split_reset(struct k3_r5_core *core)
+static int k3_r5_split_reset(struct k3_rproc *kproc)
{
int ret;
- ret = reset_control_assert(core->reset);
+ ret = reset_control_assert(kproc->reset);
if (ret) {
- dev_err(core->dev, "local-reset assert failed, ret = %d\n",
+ dev_err(kproc->dev, "local-reset assert failed, ret = %d\n",
ret);
return ret;
}
- ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
- core->ti_sci_id);
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
if (ret) {
- dev_err(core->dev, "module-reset assert failed, ret = %d\n",
+ dev_err(kproc->dev, "module-reset assert failed, ret = %d\n",
ret);
- if (reset_control_deassert(core->reset))
- dev_warn(core->dev, "local-reset deassert back failed\n");
+ if (reset_control_deassert(kproc->reset))
+ dev_warn(kproc->dev, "local-reset deassert back failed\n");
}
return ret;
}
-static int k3_r5_split_release(struct k3_r5_core *core)
+static int k3_r5_split_release(struct k3_rproc *kproc)
{
int ret;
- ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
- core->ti_sci_id);
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
if (ret) {
- dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
+ dev_err(kproc->dev, "module-reset deassert failed, ret = %d\n",
ret);
return ret;
}
- ret = reset_control_deassert(core->reset);
+ ret = reset_control_deassert(kproc->reset);
if (ret) {
- dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
+ dev_err(kproc->dev, "local-reset deassert failed, ret = %d\n",
ret);
- if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
- core->ti_sci_id))
- dev_warn(core->dev, "module-reset assert back failed\n");
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
+ dev_warn(kproc->dev, "module-reset assert back failed\n");
}
return ret;
@@ -294,11 +179,12 @@ static int k3_r5_split_release(struct k3_r5_core *core)
static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
{
struct k3_r5_core *core;
+ struct k3_rproc *kproc;
int ret;
/* assert local reset on all applicable cores */
list_for_each_entry(core, &cluster->cores, elem) {
- ret = reset_control_assert(core->reset);
+ ret = reset_control_assert(core->kproc->reset);
if (ret) {
dev_err(core->dev, "local-reset assert failed, ret = %d\n",
ret);
@@ -309,8 +195,9 @@ static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
/* disable PSC modules on all applicable cores */
list_for_each_entry(core, &cluster->cores, elem) {
- ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
- core->ti_sci_id);
+ kproc = core->kproc;
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
if (ret) {
dev_err(core->dev, "module-reset assert failed, ret = %d\n",
ret);
@@ -322,14 +209,15 @@ static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
unroll_module_reset:
list_for_each_entry_continue_reverse(core, &cluster->cores, elem) {
- if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
- core->ti_sci_id))
+ kproc = core->kproc;
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
dev_warn(core->dev, "module-reset assert back failed\n");
}
core = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
unroll_local_reset:
list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
- if (reset_control_deassert(core->reset))
+ if (reset_control_deassert(core->kproc->reset))
dev_warn(core->dev, "local-reset deassert back failed\n");
}
@@ -339,12 +227,14 @@ unroll_local_reset:
static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
{
struct k3_r5_core *core;
+ struct k3_rproc *kproc;
int ret;
/* enable PSC modules on all applicable cores */
list_for_each_entry_reverse(core, &cluster->cores, elem) {
- ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
- core->ti_sci_id);
+ kproc = core->kproc;
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
if (ret) {
dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
ret);
@@ -355,7 +245,7 @@ static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
/* deassert local reset on all applicable cores */
list_for_each_entry_reverse(core, &cluster->cores, elem) {
- ret = reset_control_deassert(core->reset);
+ ret = reset_control_deassert(core->kproc->reset);
if (ret) {
dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
ret);
@@ -367,67 +257,33 @@ static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
unroll_local_reset:
list_for_each_entry_continue(core, &cluster->cores, elem) {
- if (reset_control_assert(core->reset))
+ if (reset_control_assert(core->kproc->reset))
dev_warn(core->dev, "local-reset assert back failed\n");
}
core = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
unroll_module_reset:
list_for_each_entry_from(core, &cluster->cores, elem) {
- if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
- core->ti_sci_id))
+ kproc = core->kproc;
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
dev_warn(core->dev, "module-reset assert back failed\n");
}
return ret;
}
-static inline int k3_r5_core_halt(struct k3_r5_core *core)
+static inline int k3_r5_core_halt(struct k3_rproc *kproc)
{
- return ti_sci_proc_set_control(core->tsp,
+ return ti_sci_proc_set_control(kproc->tsp,
PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
}
-static inline int k3_r5_core_run(struct k3_r5_core *core)
+static inline int k3_r5_core_run(struct k3_rproc *kproc)
{
- return ti_sci_proc_set_control(core->tsp,
+ return ti_sci_proc_set_control(kproc->tsp,
0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
}
-static int k3_r5_rproc_request_mbox(struct rproc *rproc)
-{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct mbox_client *client = &kproc->client;
- struct device *dev = kproc->dev;
- int ret;
-
- client->dev = dev;
- client->tx_done = NULL;
- client->rx_callback = k3_r5_rproc_mbox_callback;
- client->tx_block = false;
- client->knows_txdone = false;
-
- kproc->mbox = mbox_request_channel(client, 0);
- if (IS_ERR(kproc->mbox))
- return dev_err_probe(dev, PTR_ERR(kproc->mbox),
- "mbox_request_channel failed\n");
-
- /*
- * Ping the remote processor, this is only for sanity-sake for now;
- * there is no functional effect whatsoever.
- *
- * Note that the reply will _not_ arrive immediately: this message
- * will wait in the mailbox fifo until the remote processor is booted.
- */
- ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
- if (ret < 0) {
- dev_err(dev, "mbox_send_message failed: %d\n", ret);
- mbox_free_channel(kproc->mbox);
- return ret;
- }
-
- return 0;
-}
-
/*
* The R5F cores have controls for both a reset and a halt/run. The code
* execution from DDR requires the initial boot-strapping code to be run
@@ -446,16 +302,39 @@ static int k3_r5_rproc_request_mbox(struct rproc *rproc)
*/
static int k3_r5_rproc_prepare(struct rproc *rproc)
{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct k3_r5_cluster *cluster = kproc->cluster;
- struct k3_r5_core *core = kproc->core;
+ struct k3_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->priv, *core0, *core1;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *dev = kproc->dev;
u32 ctrl = 0, cfg = 0, stat = 0;
u64 boot_vec = 0;
bool mem_init_dis;
int ret;
- ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, &stat);
+ /*
+ * R5 cores require to be powered on sequentially, core0 should be in
+ * higher power state than core1 in a cluster. So, wait for core0 to
+ * power up before proceeding to core1 and put timeout of 2sec. This
+ * waiting mechanism is necessary because rproc_auto_boot_callback() for
+ * core1 can be called before core0 due to thread execution order.
+ *
+ * By placing the wait mechanism here in .prepare() ops, this condition
+ * is enforced for rproc boot requests from sysfs as well.
+ */
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+ core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
+ if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1 &&
+ !core0->released_from_reset) {
+ ret = wait_event_interruptible_timeout(cluster->core_transition,
+ core0->released_from_reset,
+ msecs_to_jiffies(2000));
+ if (ret <= 0) {
+ dev_err(dev, "can not power up core1 before core0");
+ return -EPERM;
+ }
+ }
+
+ ret = ti_sci_proc_get_status(kproc->tsp, &boot_vec, &cfg, &ctrl, &stat);
if (ret < 0)
return ret;
mem_init_dis = !!(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
@@ -463,7 +342,7 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
/* Re-use LockStep-mode reset logic for Single-CPU mode */
ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
cluster->mode == CLUSTER_MODE_SINGLECPU) ?
- k3_r5_lockstep_release(cluster) : k3_r5_split_release(core);
+ k3_r5_lockstep_release(cluster) : k3_r5_split_release(kproc);
if (ret) {
dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n",
ret);
@@ -471,6 +350,14 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
}
/*
+ * Notify all threads in the wait queue when core0 state has changed so
+ * that threads waiting for this condition can be executed.
+ */
+ core->released_from_reset = true;
+ if (core == core0)
+ wake_up_interruptible(&cluster->core_transition);
+
+ /*
* Newer IP revisions like on J7200 SoCs support h/w auto-initialization
* of TCMs, so there is no need to perform the s/w memzero. This bit is
* configurable through System Firmware, the default value does perform
@@ -487,10 +374,10 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
* can be effective on all TCM addresses.
*/
dev_dbg(dev, "zeroing out ATCM memory\n");
- memset_io(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
+ memset_io(kproc->mem[0].cpu_addr, 0x00, kproc->mem[0].size);
dev_dbg(dev, "zeroing out BTCM memory\n");
- memset_io(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
+ memset_io(kproc->mem[1].cpu_addr, 0x00, kproc->mem[1].size);
return 0;
}
@@ -513,19 +400,47 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
*/
static int k3_r5_rproc_unprepare(struct rproc *rproc)
{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct k3_r5_cluster *cluster = kproc->cluster;
- struct k3_r5_core *core = kproc->core;
+ struct k3_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->priv, *core0, *core1;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *dev = kproc->dev;
int ret;
+ /*
+ * Ensure power-down of cores is sequential in split mode. Core1 must
+ * power down before Core0 to maintain the expected state. By placing
+ * the wait mechanism here in .unprepare() ops, this condition is
+ * enforced for rproc stop or shutdown requests from sysfs and device
+ * removal as well.
+ */
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+ core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
+ if (cluster->mode == CLUSTER_MODE_SPLIT && core == core0 &&
+ core1->released_from_reset) {
+ ret = wait_event_interruptible_timeout(cluster->core_transition,
+ !core1->released_from_reset,
+ msecs_to_jiffies(2000));
+ if (ret <= 0) {
+ dev_err(dev, "can not power down core0 before core1");
+ return -EPERM;
+ }
+ }
+
/* Re-use LockStep-mode reset logic for Single-CPU mode */
ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
cluster->mode == CLUSTER_MODE_SINGLECPU) ?
- k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core);
+ k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(kproc);
if (ret)
dev_err(dev, "unable to disable cores, ret = %d\n", ret);
+ /*
+ * Notify all threads in the wait queue when core1 state has changed so
+ * that threads waiting for this condition can be executed.
+ */
+ core->released_from_reset = false;
+ if (core == core1)
+ wake_up_interruptible(&cluster->core_transition);
+
return ret;
}
@@ -548,10 +463,10 @@ static int k3_r5_rproc_unprepare(struct rproc *rproc)
*/
static int k3_r5_rproc_start(struct rproc *rproc)
{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->priv;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *dev = kproc->dev;
- struct k3_r5_core *core0, *core;
u32 boot_addr;
int ret;
@@ -560,41 +475,28 @@ static int k3_r5_rproc_start(struct rproc *rproc)
dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
/* boot vector need not be programmed for Core1 in LockStep mode */
- core = kproc->core;
- ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
+ ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
if (ret)
return ret;
/* unhalt/run all applicable cores */
if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
list_for_each_entry_reverse(core, &cluster->cores, elem) {
- ret = k3_r5_core_run(core);
+ ret = k3_r5_core_run(core->kproc);
if (ret)
goto unroll_core_run;
}
} else {
- /* do not allow core 1 to start before core 0 */
- core0 = list_first_entry(&cluster->cores, struct k3_r5_core,
- elem);
- if (core != core0 && core0->rproc->state == RPROC_OFFLINE) {
- dev_err(dev, "%s: can not start core 1 before core 0\n",
- __func__);
- return -EPERM;
- }
-
- ret = k3_r5_core_run(core);
+ ret = k3_r5_core_run(core->kproc);
if (ret)
return ret;
-
- core->released_from_reset = true;
- wake_up_interruptible(&cluster->core_transition);
}
return 0;
unroll_core_run:
list_for_each_entry_continue(core, &cluster->cores, elem) {
- if (k3_r5_core_halt(core))
+ if (k3_r5_core_halt(core->kproc))
dev_warn(core->dev, "core halt back failed\n");
}
return ret;
@@ -626,33 +528,22 @@ unroll_core_run:
*/
static int k3_r5_rproc_stop(struct rproc *rproc)
{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct k3_r5_cluster *cluster = kproc->cluster;
- struct device *dev = kproc->dev;
- struct k3_r5_core *core1, *core = kproc->core;
+ struct k3_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->priv;
+ struct k3_r5_cluster *cluster = core->cluster;
int ret;
/* halt all applicable cores */
if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
list_for_each_entry(core, &cluster->cores, elem) {
- ret = k3_r5_core_halt(core);
+ ret = k3_r5_core_halt(core->kproc);
if (ret) {
core = list_prev_entry(core, elem);
goto unroll_core_halt;
}
}
} else {
- /* do not allow core 0 to stop before core 1 */
- core1 = list_last_entry(&cluster->cores, struct k3_r5_core,
- elem);
- if (core != core1 && core1->rproc->state != RPROC_OFFLINE) {
- dev_err(dev, "%s: can not stop core 0 before core 1\n",
- __func__);
- ret = -EPERM;
- goto out;
- }
-
- ret = k3_r5_core_halt(core);
+ ret = k3_r5_core_halt(core->kproc);
if (ret)
goto out;
}
@@ -661,7 +552,7 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
unroll_core_halt:
list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
- if (k3_r5_core_run(core))
+ if (k3_r5_core_run(core->kproc))
dev_warn(core->dev, "core run back failed\n");
}
out:
@@ -669,58 +560,6 @@ out:
}
/*
- * Attach to a running R5F remote processor (IPC-only mode)
- *
- * The R5F attach callback is a NOP. The remote processor is already booted, and
- * all required resources have been acquired during probe routine, so there is
- * no need to issue any TI-SCI commands to boot the R5F cores in IPC-only mode.
- * This callback is invoked only in IPC-only mode and exists because
- * rproc_validate() checks for its existence.
- */
-static int k3_r5_rproc_attach(struct rproc *rproc) { return 0; }
-
-/*
- * Detach from a running R5F remote processor (IPC-only mode)
- *
- * The R5F detach callback is a NOP. The R5F cores are not stopped and will be
- * left in booted state in IPC-only mode. This callback is invoked only in
- * IPC-only mode and exists for sanity sake.
- */
-static int k3_r5_rproc_detach(struct rproc *rproc) { return 0; }
-
-/*
- * This function implements the .get_loaded_rsc_table() callback and is used
- * to provide the resource table for the booted R5F in IPC-only mode. The K3 R5F
- * firmwares follow a design-by-contract approach and are expected to have the
- * resource table at the base of the DDR region reserved for firmware usage.
- * This provides flexibility for the remote processor to be booted by different
- * bootloaders that may or may not have the ability to publish the resource table
- * address and size through a DT property. This callback is invoked only in
- * IPC-only mode.
- */
-static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc,
- size_t *rsc_table_sz)
-{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
-
- if (!kproc->rmem[0].cpu_addr) {
- dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
- return ERR_PTR(-ENOMEM);
- }
-
- /*
- * NOTE: The resource table size is currently hard-coded to a maximum
- * of 256 bytes. The most common resource table usage for K3 firmwares
- * is to only have the vdev resource entry and an optional trace entry.
- * The exact size could be computed based on resource table address, but
- * the hard-coded value suffices to support the IPC-only mode.
- */
- *rsc_table_sz = 256;
- return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
-}
-
-/*
* Internal Memory translation helper
*
* Custom function implementing the rproc .da_to_va ops to provide address
@@ -730,10 +569,9 @@ static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc,
*/
static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct k3_r5_core *core = kproc->core;
+ struct k3_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->priv;
void __iomem *va = NULL;
- phys_addr_t bus_addr;
u32 dev_addr, offset;
size_t size;
int i;
@@ -741,27 +579,6 @@ static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool
if (len == 0)
return NULL;
- /* handle both R5 and SoC views of ATCM and BTCM */
- for (i = 0; i < core->num_mems; i++) {
- bus_addr = core->mem[i].bus_addr;
- dev_addr = core->mem[i].dev_addr;
- size = core->mem[i].size;
-
- /* handle R5-view addresses of TCMs */
- if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = core->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
-
- /* handle SoC-view addresses of TCMs */
- if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
- offset = da - bus_addr;
- va = core->mem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
-
/* handle any SRAM regions using SoC-view addresses */
for (i = 0; i < core->num_sram; i++) {
dev_addr = core->sram[i].dev_addr;
@@ -774,19 +591,8 @@ static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool
}
}
- /* handle static DDR reserved memory regions */
- for (i = 0; i < kproc->num_rmems; i++) {
- dev_addr = kproc->rmem[i].dev_addr;
- size = kproc->rmem[i].size;
-
- if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
- offset = da - dev_addr;
- va = kproc->rmem[i].cpu_addr + offset;
- return (__force void *)va;
- }
- }
-
- return NULL;
+ /* handle both TCM and DDR memory regions */
+ return k3_rproc_da_to_va(rproc, da, len, is_iomem);
}
static const struct rproc_ops k3_r5_rproc_ops = {
@@ -794,7 +600,7 @@ static const struct rproc_ops k3_r5_rproc_ops = {
.unprepare = k3_r5_rproc_unprepare,
.start = k3_r5_rproc_start,
.stop = k3_r5_rproc_stop,
- .kick = k3_r5_rproc_kick,
+ .kick = k3_rproc_kick,
.da_to_va = k3_r5_rproc_da_to_va,
};
@@ -833,11 +639,11 @@ static const struct rproc_ops k3_r5_rproc_ops = {
* both the cores with the same settings, before reconfiguing again for
* LockStep mode.
*/
-static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
+static int k3_r5_rproc_configure(struct k3_rproc *kproc)
{
- struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *temp, *core0, *core = kproc->priv;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *dev = kproc->dev;
- struct k3_r5_core *core0, *core, *temp;
u32 ctrl = 0, cfg = 0, stat = 0;
u32 set_cfg = 0, clr_cfg = 0;
u64 boot_vec = 0;
@@ -851,10 +657,10 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
cluster->mode == CLUSTER_MODE_SINGLECORE) {
core = core0;
} else {
- core = kproc->core;
+ core = kproc->priv;
}
- ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+ ret = ti_sci_proc_get_status(core->kproc->tsp, &boot_vec, &cfg, &ctrl,
&stat);
if (ret < 0)
return ret;
@@ -924,7 +730,7 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
* and TEINIT config is only allowed with Core0.
*/
list_for_each_entry(temp, &cluster->cores, elem) {
- ret = k3_r5_core_halt(temp);
+ ret = k3_r5_core_halt(temp->kproc);
if (ret)
goto out;
@@ -932,7 +738,7 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_TEINIT;
}
- ret = ti_sci_proc_set_config(temp->tsp, boot_vec,
+ ret = ti_sci_proc_set_config(temp->kproc->tsp, boot_vec,
set_cfg, clr_cfg);
if (ret)
goto out;
@@ -940,14 +746,14 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
set_cfg = PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
clr_cfg = 0;
- ret = ti_sci_proc_set_config(core->tsp, boot_vec,
+ ret = ti_sci_proc_set_config(core->kproc->tsp, boot_vec,
set_cfg, clr_cfg);
} else {
- ret = k3_r5_core_halt(core);
+ ret = k3_r5_core_halt(core->kproc);
if (ret)
goto out;
- ret = ti_sci_proc_set_config(core->tsp, boot_vec,
+ ret = ti_sci_proc_set_config(core->kproc->tsp, boot_vec,
set_cfg, clr_cfg);
}
@@ -955,93 +761,6 @@ out:
return ret;
}
-static void k3_r5_mem_release(void *data)
-{
- struct device *dev = data;
-
- of_reserved_mem_device_release(dev);
-}
-
-static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
-{
- struct device *dev = kproc->dev;
- struct device_node *np = dev_of_node(dev);
- struct device_node *rmem_np;
- struct reserved_mem *rmem;
- int num_rmems;
- int ret, i;
-
- num_rmems = of_property_count_elems_of_size(np, "memory-region",
- sizeof(phandle));
- if (num_rmems <= 0) {
- dev_err(dev, "device does not have reserved memory regions, ret = %d\n",
- num_rmems);
- return -EINVAL;
- }
- if (num_rmems < 2) {
- dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
- num_rmems);
- return -EINVAL;
- }
-
- /* use reserved memory region 0 for vring DMA allocations */
- ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
- if (ret) {
- dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
- ret);
- return ret;
- }
-
- ret = devm_add_action_or_reset(dev, k3_r5_mem_release, dev);
- if (ret)
- return ret;
-
- num_rmems--;
- kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
- if (!kproc->rmem)
- return -ENOMEM;
-
- /* use remaining reserved memory regions for static carveouts */
- for (i = 0; i < num_rmems; i++) {
- rmem_np = of_parse_phandle(np, "memory-region", i + 1);
- if (!rmem_np)
- return -EINVAL;
-
- rmem = of_reserved_mem_lookup(rmem_np);
- of_node_put(rmem_np);
- if (!rmem)
- return -EINVAL;
-
- kproc->rmem[i].bus_addr = rmem->base;
- /*
- * R5Fs do not have an MMU, but have a Region Address Translator
- * (RAT) module that provides a fixed entry translation between
- * the 32-bit processor addresses to 64-bit bus addresses. The
- * RAT is programmable only by the R5F cores. Support for RAT
- * is currently not supported, so 64-bit address regions are not
- * supported. The absence of MMUs implies that the R5F device
- * addresses/supported memory regions are restricted to 32-bit
- * bus addresses, and are identical
- */
- kproc->rmem[i].dev_addr = (u32)rmem->base;
- kproc->rmem[i].size = rmem->size;
- kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
- if (!kproc->rmem[i].cpu_addr) {
- dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
- i + 1, &rmem->base, &rmem->size);
- return -ENOMEM;
- }
-
- dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- i + 1, &kproc->rmem[i].bus_addr,
- kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
- kproc->rmem[i].dev_addr);
- }
- kproc->num_rmems = num_rmems;
-
- return 0;
-}
-
/*
* Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
* split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both
@@ -1055,12 +774,11 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
* supported SoCs. The Core0 TCM sizes therefore have to be adjusted to only
* half the original size in Split mode.
*/
-static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
+static void k3_r5_adjust_tcm_sizes(struct k3_rproc *kproc)
{
- struct k3_r5_cluster *cluster = kproc->cluster;
- struct k3_r5_core *core = kproc->core;
+ struct k3_r5_core *core0, *core = kproc->priv;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *cdev = core->dev;
- struct k3_r5_core *core0;
if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
cluster->mode == CLUSTER_MODE_SINGLECPU ||
@@ -1070,14 +788,14 @@ static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
if (core == core0) {
- WARN_ON(core->mem[0].size != SZ_64K);
- WARN_ON(core->mem[1].size != SZ_64K);
+ WARN_ON(kproc->mem[0].size != SZ_64K);
+ WARN_ON(kproc->mem[1].size != SZ_64K);
- core->mem[0].size /= 2;
- core->mem[1].size /= 2;
+ kproc->mem[0].size /= 2;
+ kproc->mem[1].size /= 2;
dev_dbg(cdev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
- core->mem[0].size, core->mem[1].size);
+ kproc->mem[0].size, kproc->mem[1].size);
}
}
@@ -1094,24 +812,23 @@ static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
* actual values configured by bootloader. The driver internal device memory
* addresses for TCMs are also updated.
*/
-static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
+static int k3_r5_rproc_configure_mode(struct k3_rproc *kproc)
{
- struct k3_r5_cluster *cluster = kproc->cluster;
- struct k3_r5_core *core = kproc->core;
+ struct k3_r5_core *core0, *core = kproc->priv;
+ struct k3_r5_cluster *cluster = core->cluster;
struct device *cdev = core->dev;
bool r_state = false, c_state = false, lockstep_en = false, single_cpu = false;
u32 ctrl = 0, cfg = 0, stat = 0, halted = 0;
u64 boot_vec = 0;
u32 atcm_enable, btcm_enable, loczrama;
- struct k3_r5_core *core0;
enum cluster_mode mode = cluster->mode;
int reset_ctrl_status;
int ret;
core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
- ret = core->ti_sci->ops.dev_ops.is_on(core->ti_sci, core->ti_sci_id,
- &r_state, &c_state);
+ ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
+ &r_state, &c_state);
if (ret) {
dev_err(cdev, "failed to get initial state, mode cannot be determined, ret = %d\n",
ret);
@@ -1122,7 +839,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
r_state, c_state);
}
- reset_ctrl_status = reset_control_status(core->reset);
+ reset_ctrl_status = reset_control_status(kproc->reset);
if (reset_ctrl_status < 0) {
dev_err(cdev, "failed to get initial local reset status, ret = %d\n",
reset_ctrl_status);
@@ -1135,7 +852,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
*/
core->released_from_reset = c_state;
- ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+ ret = ti_sci_proc_get_status(kproc->tsp, &boot_vec, &cfg, &ctrl,
&stat);
if (ret < 0) {
dev_err(cdev, "failed to get initial processor status, ret = %d\n",
@@ -1170,10 +887,10 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
kproc->rproc->ops->unprepare = NULL;
kproc->rproc->ops->start = NULL;
kproc->rproc->ops->stop = NULL;
- kproc->rproc->ops->attach = k3_r5_rproc_attach;
- kproc->rproc->ops->detach = k3_r5_rproc_detach;
+ kproc->rproc->ops->attach = k3_rproc_attach;
+ kproc->rproc->ops->detach = k3_rproc_detach;
kproc->rproc->ops->get_loaded_rsc_table =
- k3_r5_get_loaded_rsc_table;
+ k3_get_loaded_rsc_table;
} else if (!c_state) {
dev_info(cdev, "configured R5F for remoteproc mode\n");
ret = 0;
@@ -1192,19 +909,121 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
core->atcm_enable = atcm_enable;
core->btcm_enable = btcm_enable;
core->loczrama = loczrama;
- core->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR;
- core->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0;
+ kproc->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR;
+ kproc->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0;
}
return ret;
}
+static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
+ struct k3_rproc *kproc)
+{
+ const struct k3_rproc_dev_data *data = kproc->data;
+ struct device *dev = &pdev->dev;
+ struct k3_r5_core *core = kproc->priv;
+ int num_mems;
+ int i, ret;
+
+ num_mems = data->num_mems;
+ kproc->mem = devm_kcalloc(kproc->dev, num_mems, sizeof(*kproc->mem),
+ GFP_KERNEL);
+ if (!kproc->mem)
+ return -ENOMEM;
+
+ ret = k3_rproc_of_get_memories(pdev, kproc);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_mems; i++) {
+ /*
+ * TODO:
+ * The R5F cores can place ATCM & BTCM anywhere in its address
+ * based on the corresponding Region Registers in the System
+ * Control coprocessor. For now, place ATCM and BTCM at
+ * addresses 0 and 0x41010000 (same as the bus address on AM65x
+ * SoCs) based on loczrama setting overriding default assignment
+ * done by k3_rproc_of_get_memories().
+ */
+ if (!strcmp(data->mems[i].name, "atcm")) {
+ kproc->mem[i].dev_addr = core->loczrama ?
+ 0 : K3_R5_TCM_DEV_ADDR;
+ } else {
+ kproc->mem[i].dev_addr = core->loczrama ?
+ K3_R5_TCM_DEV_ADDR : 0;
+ }
+
+ dev_dbg(dev, "Updating bus addr %pa of memory %5s\n",
+ &kproc->mem[i].bus_addr, data->mems[i].name);
+ }
+
+ return 0;
+}
+
+static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
+ struct k3_r5_core *core)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *sram_np;
+ struct resource res;
+ int num_sram;
+ int i, ret;
+
+ num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
+ if (num_sram <= 0) {
+ dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
+ num_sram);
+ return 0;
+ }
+
+ core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
+ if (!core->sram)
+ return -ENOMEM;
+
+ for (i = 0; i < num_sram; i++) {
+ sram_np = of_parse_phandle(np, "sram", i);
+ if (!sram_np)
+ return -EINVAL;
+
+ if (!of_device_is_available(sram_np)) {
+ of_node_put(sram_np);
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(sram_np, 0, &res);
+ of_node_put(sram_np);
+ if (ret)
+ return -EINVAL;
+
+ core->sram[i].bus_addr = res.start;
+ core->sram[i].dev_addr = res.start;
+ core->sram[i].size = resource_size(&res);
+ core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
+ resource_size(&res));
+ if (!core->sram[i].cpu_addr) {
+ dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
+ i, &res.start);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i, &core->sram[i].bus_addr,
+ core->sram[i].size, core->sram[i].cpu_addr,
+ core->sram[i].dev_addr);
+ }
+ core->num_sram = num_sram;
+
+ return 0;
+}
+
static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
{
struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
- struct k3_r5_rproc *kproc;
+ struct k3_rproc *kproc;
struct k3_r5_core *core, *core1;
+ struct device_node *np;
struct device *cdev;
const char *fw_name;
struct rproc *rproc;
@@ -1213,6 +1032,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
list_for_each_entry(core, &cluster->cores, elem) {
cdev = core->dev;
+ np = dev_of_node(cdev);
ret = rproc_of_parse_firmware(cdev, 0, &fw_name);
if (ret) {
dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
@@ -1233,13 +1053,66 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
rproc->recovery_disabled = true;
kproc = rproc->priv;
- kproc->cluster = cluster;
- kproc->core = core;
+ kproc->priv = core;
kproc->dev = cdev;
kproc->rproc = rproc;
- core->rproc = rproc;
+ kproc->data = cluster->soc_data->core_data;
+ core->kproc = kproc;
+
+ kproc->ti_sci = devm_ti_sci_get_by_phandle(cdev, "ti,sci");
+ if (IS_ERR(kproc->ti_sci)) {
+ ret = dev_err_probe(cdev, PTR_ERR(kproc->ti_sci),
+ "failed to get ti-sci handle\n");
+ kproc->ti_sci = NULL;
+ goto out;
+ }
+
+ ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
+ if (ret) {
+ dev_err(cdev, "missing 'ti,sci-dev-id' property\n");
+ goto out;
+ }
- ret = k3_r5_rproc_request_mbox(rproc);
+ kproc->reset = devm_reset_control_get_exclusive(cdev, NULL);
+ if (IS_ERR_OR_NULL(kproc->reset)) {
+ ret = PTR_ERR_OR_ZERO(kproc->reset);
+ if (!ret)
+ ret = -ENODEV;
+ dev_err_probe(cdev, ret, "failed to get reset handle\n");
+ goto out;
+ }
+
+ kproc->tsp = ti_sci_proc_of_get_tsp(cdev, kproc->ti_sci);
+ if (IS_ERR(kproc->tsp)) {
+ ret = dev_err_probe(cdev, PTR_ERR(kproc->tsp),
+ "failed to construct ti-sci proc control\n");
+ goto out;
+ }
+
+ ret = k3_r5_core_of_get_internal_memories(to_platform_device(cdev), kproc);
+ if (ret) {
+ dev_err(cdev, "failed to get internal memories, ret = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = ti_sci_proc_request(kproc->tsp);
+ if (ret < 0) {
+ dev_err(cdev, "ti_sci_proc_request failed, ret = %d\n", ret);
+ goto out;
+ }
+
+ ret = devm_add_action_or_reset(cdev, k3_release_tsp, kproc->tsp);
+ if (ret)
+ goto out;
+ }
+
+ list_for_each_entry(core, &cluster->cores, elem) {
+ cdev = core->dev;
+ kproc = core->kproc;
+ rproc = kproc->rproc;
+
+ ret = k3_rproc_request_mbox(rproc);
if (ret)
return ret;
@@ -1251,7 +1124,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
ret = k3_r5_rproc_configure(kproc);
if (ret) {
- dev_err(dev, "initial configure failed, ret = %d\n",
+ dev_err(cdev, "initial configure failed, ret = %d\n",
ret);
goto out;
}
@@ -1259,16 +1132,16 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
init_rmem:
k3_r5_adjust_tcm_sizes(kproc);
- ret = k3_r5_reserved_mem_init(kproc);
+ ret = k3_reserved_mem_init(kproc);
if (ret) {
- dev_err(dev, "reserved memory init failed, ret = %d\n",
+ dev_err(cdev, "reserved memory init failed, ret = %d\n",
ret);
goto out;
}
- ret = devm_rproc_add(dev, rproc);
+ ret = devm_rproc_add(cdev, rproc);
if (ret) {
- dev_err_probe(dev, ret, "rproc_add failed\n");
+ dev_err_probe(cdev, ret, "rproc_add failed\n");
goto out;
}
@@ -1279,26 +1152,6 @@ init_rmem:
cluster->mode == CLUSTER_MODE_SINGLECPU ||
cluster->mode == CLUSTER_MODE_SINGLECORE)
break;
-
- /*
- * R5 cores require to be powered on sequentially, core0
- * should be in higher power state than core1 in a cluster
- * So, wait for current core to power up before proceeding
- * to next core and put timeout of 2sec for each core.
- *
- * This waiting mechanism is necessary because
- * rproc_auto_boot_callback() for core1 can be called before
- * core0 due to thread execution order.
- */
- ret = wait_event_interruptible_timeout(cluster->core_transition,
- core->released_from_reset,
- msecs_to_jiffies(2000));
- if (ret <= 0) {
- dev_err(dev,
- "Timed out waiting for %s core to power up!\n",
- rproc->name);
- goto out;
- }
}
return 0;
@@ -1317,8 +1170,8 @@ out:
/* undo core0 upon any failures on core1 in split-mode */
if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
core = list_prev_entry(core, elem);
- rproc = core->rproc;
- kproc = rproc->priv;
+ kproc = core->kproc;
+ rproc = kproc->rproc;
goto err_split;
}
return ret;
@@ -1327,7 +1180,7 @@ out:
static void k3_r5_cluster_rproc_exit(void *data)
{
struct k3_r5_cluster *cluster = platform_get_drvdata(data);
- struct k3_r5_rproc *kproc;
+ struct k3_rproc *kproc;
struct k3_r5_core *core;
struct rproc *rproc;
int ret;
@@ -1343,8 +1196,8 @@ static void k3_r5_cluster_rproc_exit(void *data)
list_last_entry(&cluster->cores, struct k3_r5_core, elem);
list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
- rproc = core->rproc;
- kproc = rproc->priv;
+ kproc = core->kproc;
+ rproc = kproc->rproc;
if (rproc->state == RPROC_ATTACHED) {
ret = rproc_detach(rproc);
@@ -1358,142 +1211,6 @@ static void k3_r5_cluster_rproc_exit(void *data)
}
}
-static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
- struct k3_r5_core *core)
-{
- static const char * const mem_names[] = {"atcm", "btcm"};
- struct device *dev = &pdev->dev;
- struct resource *res;
- int num_mems;
- int i;
-
- num_mems = ARRAY_SIZE(mem_names);
- core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), GFP_KERNEL);
- if (!core->mem)
- return -ENOMEM;
-
- for (i = 0; i < num_mems; i++) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- mem_names[i]);
- if (!res) {
- dev_err(dev, "found no memory resource for %s\n",
- mem_names[i]);
- return -EINVAL;
- }
- if (!devm_request_mem_region(dev, res->start,
- resource_size(res),
- dev_name(dev))) {
- dev_err(dev, "could not request %s region for resource\n",
- mem_names[i]);
- return -EBUSY;
- }
-
- /*
- * TCMs are designed in general to support RAM-like backing
- * memories. So, map these as Normal Non-Cached memories. This
- * also avoids/fixes any potential alignment faults due to
- * unaligned data accesses when using memcpy() or memset()
- * functions (normally seen with device type memory).
- */
- core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
- resource_size(res));
- if (!core->mem[i].cpu_addr) {
- dev_err(dev, "failed to map %s memory\n", mem_names[i]);
- return -ENOMEM;
- }
- core->mem[i].bus_addr = res->start;
-
- /*
- * TODO:
- * The R5F cores can place ATCM & BTCM anywhere in its address
- * based on the corresponding Region Registers in the System
- * Control coprocessor. For now, place ATCM and BTCM at
- * addresses 0 and 0x41010000 (same as the bus address on AM65x
- * SoCs) based on loczrama setting
- */
- if (!strcmp(mem_names[i], "atcm")) {
- core->mem[i].dev_addr = core->loczrama ?
- 0 : K3_R5_TCM_DEV_ADDR;
- } else {
- core->mem[i].dev_addr = core->loczrama ?
- K3_R5_TCM_DEV_ADDR : 0;
- }
- core->mem[i].size = resource_size(res);
-
- dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- mem_names[i], &core->mem[i].bus_addr,
- core->mem[i].size, core->mem[i].cpu_addr,
- core->mem[i].dev_addr);
- }
- core->num_mems = num_mems;
-
- return 0;
-}
-
-static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
- struct k3_r5_core *core)
-{
- struct device_node *np = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- struct device_node *sram_np;
- struct resource res;
- int num_sram;
- int i, ret;
-
- num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
- if (num_sram <= 0) {
- dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
- num_sram);
- return 0;
- }
-
- core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
- if (!core->sram)
- return -ENOMEM;
-
- for (i = 0; i < num_sram; i++) {
- sram_np = of_parse_phandle(np, "sram", i);
- if (!sram_np)
- return -EINVAL;
-
- if (!of_device_is_available(sram_np)) {
- of_node_put(sram_np);
- return -EINVAL;
- }
-
- ret = of_address_to_resource(sram_np, 0, &res);
- of_node_put(sram_np);
- if (ret)
- return -EINVAL;
-
- core->sram[i].bus_addr = res.start;
- core->sram[i].dev_addr = res.start;
- core->sram[i].size = resource_size(&res);
- core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
- resource_size(&res));
- if (!core->sram[i].cpu_addr) {
- dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
- i, &res.start);
- return -ENOMEM;
- }
-
- dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
- i, &core->sram[i].bus_addr,
- core->sram[i].size, core->sram[i].cpu_addr,
- core->sram[i].dev_addr);
- }
- core->num_sram = num_sram;
-
- return 0;
-}
-
-static void k3_r5_release_tsp(void *data)
-{
- struct ti_sci_proc *tsp = data;
-
- ti_sci_proc_release(tsp);
-}
-
static int k3_r5_core_of_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1539,58 +1256,12 @@ static int k3_r5_core_of_init(struct platform_device *pdev)
goto err;
}
- core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
- if (IS_ERR(core->ti_sci)) {
- ret = dev_err_probe(dev, PTR_ERR(core->ti_sci), "failed to get ti-sci handle\n");
- core->ti_sci = NULL;
- goto err;
- }
-
- ret = of_property_read_u32(np, "ti,sci-dev-id", &core->ti_sci_id);
- if (ret) {
- dev_err(dev, "missing 'ti,sci-dev-id' property\n");
- goto err;
- }
-
- core->reset = devm_reset_control_get_exclusive(dev, NULL);
- if (IS_ERR_OR_NULL(core->reset)) {
- ret = PTR_ERR_OR_ZERO(core->reset);
- if (!ret)
- ret = -ENODEV;
- dev_err_probe(dev, ret, "failed to get reset handle\n");
- goto err;
- }
-
- core->tsp = ti_sci_proc_of_get_tsp(dev, core->ti_sci);
- if (IS_ERR(core->tsp)) {
- ret = dev_err_probe(dev, PTR_ERR(core->tsp),
- "failed to construct ti-sci proc control\n");
- goto err;
- }
-
- ret = k3_r5_core_of_get_internal_memories(pdev, core);
- if (ret) {
- dev_err(dev, "failed to get internal memories, ret = %d\n",
- ret);
- goto err;
- }
-
ret = k3_r5_core_of_get_sram_memories(pdev, core);
if (ret) {
dev_err(dev, "failed to get sram memories, ret = %d\n", ret);
goto err;
}
- ret = ti_sci_proc_request(core->tsp);
- if (ret < 0) {
- dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
- goto err;
- }
-
- ret = devm_add_action_or_reset(dev, k3_r5_release_tsp, core->tsp);
- if (ret)
- goto err;
-
platform_set_drvdata(pdev, core);
devres_close_group(dev, k3_r5_core_of_init);
@@ -1652,6 +1323,7 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev)
}
core = platform_get_drvdata(cpdev);
+ core->cluster = cluster;
put_device(&cpdev->dev);
list_add_tail(&core->elem, &cluster->cores);
}
@@ -1749,11 +1421,24 @@ static int k3_r5_probe(struct platform_device *pdev)
return 0;
}
+static const struct k3_rproc_mem_data r5_mems[] = {
+ { .name = "atcm", .dev_addr = 0x0 },
+ { .name = "btcm", .dev_addr = K3_R5_TCM_DEV_ADDR },
+};
+
+static const struct k3_rproc_dev_data r5_data = {
+ .mems = r5_mems,
+ .num_mems = ARRAY_SIZE(r5_mems),
+ .boot_align_addr = 0,
+ .uses_lreset = true,
+};
+
static const struct k3_r5_soc_data am65_j721e_soc_data = {
.tcm_is_double = false,
.tcm_ecc_autoinit = false,
.single_cpu_mode = false,
.is_single_core = false,
+ .core_data = &r5_data,
};
static const struct k3_r5_soc_data j7200_j721s2_soc_data = {
@@ -1761,6 +1446,7 @@ static const struct k3_r5_soc_data j7200_j721s2_soc_data = {
.tcm_ecc_autoinit = true,
.single_cpu_mode = false,
.is_single_core = false,
+ .core_data = &r5_data,
};
static const struct k3_r5_soc_data am64_soc_data = {
@@ -1768,6 +1454,7 @@ static const struct k3_r5_soc_data am64_soc_data = {
.tcm_ecc_autoinit = true,
.single_cpu_mode = true,
.is_single_core = false,
+ .core_data = &r5_data,
};
static const struct k3_r5_soc_data am62_soc_data = {
@@ -1775,6 +1462,7 @@ static const struct k3_r5_soc_data am62_soc_data = {
.tcm_ecc_autoinit = true,
.single_cpu_mode = false,
.is_single_core = true,
+ .core_data = &r5_data,
};
static const struct of_device_id k3_r5_of_match[] = {
diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 5aeedeaf3c41..1af89782e116 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -380,6 +380,18 @@ static int zynqmp_r5_rproc_start(struct rproc *rproc)
dev_dbg(r5_core->dev, "RPU boot addr 0x%llx from %s.", rproc->bootaddr,
bootmem == PM_RPU_BOOTMEM_HIVEC ? "OCM" : "TCM");
+ /* Request node before starting RPU core if new version of API is supported */
+ if (zynqmp_pm_feature(PM_REQUEST_NODE) > 1) {
+ ret = zynqmp_pm_request_node(r5_core->pm_domain_id,
+ ZYNQMP_PM_CAPABILITY_ACCESS, 0,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ if (ret < 0) {
+ dev_err(r5_core->dev, "failed to request 0x%x",
+ r5_core->pm_domain_id);
+ return ret;
+ }
+ }
+
ret = zynqmp_pm_request_wake(r5_core->pm_domain_id, 1,
bootmem, ZYNQMP_PM_REQUEST_ACK_NO);
if (ret)
@@ -401,10 +413,30 @@ static int zynqmp_r5_rproc_stop(struct rproc *rproc)
struct zynqmp_r5_core *r5_core = rproc->priv;
int ret;
+ /* Use release node API to stop core if new version of API is supported */
+ if (zynqmp_pm_feature(PM_RELEASE_NODE) > 1) {
+ ret = zynqmp_pm_release_node(r5_core->pm_domain_id);
+ if (ret)
+ dev_err(r5_core->dev, "failed to stop remoteproc RPU %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Check expected version of EEMI call before calling it. This avoids
+ * any error or warning prints from firmware as it is expected that fw
+ * doesn't support it.
+ */
+ if (zynqmp_pm_feature(PM_FORCE_POWERDOWN) != 1) {
+ dev_dbg(r5_core->dev, "EEMI interface %d ver 1 not supported\n",
+ PM_FORCE_POWERDOWN);
+ return -EOPNOTSUPP;
+ }
+
+ /* maintain force pwr down for backward compatibility */
ret = zynqmp_pm_force_pwrdwn(r5_core->pm_domain_id,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
if (ret)
- dev_err(r5_core->dev, "failed to stop remoteproc RPU %d\n", ret);
+ dev_err(r5_core->dev, "core force power down failed\n");
return ret;
}
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 99f6f9784e68..d85be5899da6 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -225,6 +225,13 @@ config RESET_RZG2L_USBPHY_CTRL
Support for USBPHY Control found on RZ/G2L family. It mainly
controls reset and power down of the USB/PHY.
+config RESET_RZV2H_USB2PHY
+ tristate "Renesas RZ/V2H(P) (and similar SoCs) USB2PHY Reset driver"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ Support for USB2PHY Port reset Control found on the RZ/V2H(P) SoC
+ (and similar SoCs).
+
config RESET_SCMI
tristate "Reset driver controlled via ARM SCMI interface"
depends on ARM_SCMI_PROTOCOL || COMPILE_TEST
@@ -279,6 +286,16 @@ config RESET_SUNXI
help
This enables the reset driver for Allwinner SoCs.
+config RESET_TH1520
+ tristate "T-HEAD 1520 reset controller"
+ depends on ARCH_THEAD || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ This driver provides support for the T-HEAD TH1520 SoC reset controller,
+ which manages hardware reset lines for SoC components such as the GPU.
+ Enable this option if you need to control hardware resets on TH1520-based
+ systems.
+
config RESET_TI_SCI
tristate "TI System Control Interface (TI-SCI) reset driver"
depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n)
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 31f9904d13f9..91e6348e3351 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -31,11 +31,13 @@ obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o
obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o
obj-$(CONFIG_RESET_RASPBERRYPI) += reset-raspberrypi.o
obj-$(CONFIG_RESET_RZG2L_USBPHY_CTRL) += reset-rzg2l-usbphy-ctrl.o
+obj-$(CONFIG_RESET_RZV2H_USB2PHY) += reset-rzv2h-usb2phy.o
obj-$(CONFIG_RESET_SCMI) += reset-scmi.o
obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
obj-$(CONFIG_RESET_SUNPLUS) += reset-sunplus.o
obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
+obj-$(CONFIG_RESET_TH1520) += reset-th1520.o
obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
obj-$(CONFIG_RESET_TI_TPS380X) += reset-tps380x.o
diff --git a/drivers/reset/reset-rzv2h-usb2phy.c b/drivers/reset/reset-rzv2h-usb2phy.c
new file mode 100644
index 000000000000..ae643575b067
--- /dev/null
+++ b/drivers/reset/reset-rzv2h-usb2phy.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2H(P) USB2PHY Port reset control driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/reset-controller.h>
+
+struct rzv2h_usb2phy_regval {
+ u16 reg;
+ u16 val;
+};
+
+struct rzv2h_usb2phy_reset_of_data {
+ const struct rzv2h_usb2phy_regval *init_vals;
+ unsigned int init_val_count;
+
+ u16 reset_reg;
+ u16 reset_assert_val;
+ u16 reset_deassert_val;
+ u16 reset_status_bits;
+ u16 reset_release_val;
+
+ u16 reset2_reg;
+ u16 reset2_acquire_val;
+ u16 reset2_release_val;
+};
+
+struct rzv2h_usb2phy_reset_priv {
+ const struct rzv2h_usb2phy_reset_of_data *data;
+ void __iomem *base;
+ struct device *dev;
+ struct reset_controller_dev rcdev;
+ spinlock_t lock; /* protects register accesses */
+};
+
+static inline struct rzv2h_usb2phy_reset_priv
+*rzv2h_usbphy_rcdev_to_priv(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct rzv2h_usb2phy_reset_priv, rcdev);
+}
+
+/* This function must be called only after pm_runtime_resume_and_get() has been called */
+static void rzv2h_usbphy_assert_helper(struct rzv2h_usb2phy_reset_priv *priv)
+{
+ const struct rzv2h_usb2phy_reset_of_data *data = priv->data;
+
+ scoped_guard(spinlock, &priv->lock) {
+ writel(data->reset2_acquire_val, priv->base + data->reset2_reg);
+ writel(data->reset_assert_val, priv->base + data->reset_reg);
+ }
+
+ usleep_range(11, 20);
+}
+
+static int rzv2h_usbphy_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct rzv2h_usb2phy_reset_priv *priv = rzv2h_usbphy_rcdev_to_priv(rcdev);
+ struct device *dev = priv->dev;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ dev_err(dev, "pm_runtime_resume_and_get failed\n");
+ return ret;
+ }
+
+ rzv2h_usbphy_assert_helper(priv);
+
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static int rzv2h_usbphy_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct rzv2h_usb2phy_reset_priv *priv = rzv2h_usbphy_rcdev_to_priv(rcdev);
+ const struct rzv2h_usb2phy_reset_of_data *data = priv->data;
+ struct device *dev = priv->dev;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ dev_err(dev, "pm_runtime_resume_and_get failed\n");
+ return ret;
+ }
+
+ scoped_guard(spinlock, &priv->lock) {
+ writel(data->reset_deassert_val, priv->base + data->reset_reg);
+ writel(data->reset2_release_val, priv->base + data->reset2_reg);
+ writel(data->reset_release_val, priv->base + data->reset_reg);
+ }
+
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static int rzv2h_usbphy_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct rzv2h_usb2phy_reset_priv *priv = rzv2h_usbphy_rcdev_to_priv(rcdev);
+ struct device *dev = priv->dev;
+ int ret;
+ u32 reg;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ dev_err(dev, "pm_runtime_resume_and_get failed\n");
+ return ret;
+ }
+
+ reg = readl(priv->base + priv->data->reset_reg);
+
+ pm_runtime_put(dev);
+
+ return (reg & priv->data->reset_status_bits) == priv->data->reset_status_bits;
+}
+
+static const struct reset_control_ops rzv2h_usbphy_reset_ops = {
+ .assert = rzv2h_usbphy_reset_assert,
+ .deassert = rzv2h_usbphy_reset_deassert,
+ .status = rzv2h_usbphy_reset_status,
+};
+
+static int rzv2h_usb2phy_reset_of_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ /* No special handling needed, we have only one reset line per device */
+ return 0;
+}
+
+static int rzv2h_usb2phy_reset_probe(struct platform_device *pdev)
+{
+ const struct rzv2h_usb2phy_reset_of_data *data;
+ struct rzv2h_usb2phy_reset_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct reset_control *rstc;
+ int error;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ data = of_device_get_match_data(dev);
+ priv->data = data;
+ priv->dev = dev;
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ rstc = devm_reset_control_get_shared_deasserted(dev, NULL);
+ if (IS_ERR(rstc))
+ return dev_err_probe(dev, PTR_ERR(rstc),
+ "failed to get deasserted reset\n");
+
+ spin_lock_init(&priv->lock);
+
+ error = devm_pm_runtime_enable(dev);
+ if (error)
+ return dev_err_probe(dev, error, "Failed to enable pm_runtime\n");
+
+ error = pm_runtime_resume_and_get(dev);
+ if (error)
+ return dev_err_probe(dev, error, "pm_runtime_resume_and_get failed\n");
+
+ for (unsigned int i = 0; i < data->init_val_count; i++)
+ writel(data->init_vals[i].val, priv->base + data->init_vals[i].reg);
+
+ /* keep usb2phy in asserted state */
+ rzv2h_usbphy_assert_helper(priv);
+
+ pm_runtime_put(dev);
+
+ priv->rcdev.ops = &rzv2h_usbphy_reset_ops;
+ priv->rcdev.of_reset_n_cells = 0;
+ priv->rcdev.nr_resets = 1;
+ priv->rcdev.of_xlate = rzv2h_usb2phy_reset_of_xlate;
+ priv->rcdev.of_node = dev->of_node;
+ priv->rcdev.dev = dev;
+
+ return devm_reset_controller_register(dev, &priv->rcdev);
+}
+
+/*
+ * initialization values required to prepare the PHY to receive
+ * assert and deassert requests.
+ */
+static const struct rzv2h_usb2phy_regval rzv2h_init_vals[] = {
+ { .reg = 0xc10, .val = 0x67c },
+ { .reg = 0xc14, .val = 0x1f },
+ { .reg = 0x600, .val = 0x909 },
+};
+
+static const struct rzv2h_usb2phy_reset_of_data rzv2h_reset_of_data = {
+ .init_vals = rzv2h_init_vals,
+ .init_val_count = ARRAY_SIZE(rzv2h_init_vals),
+ .reset_reg = 0,
+ .reset_assert_val = 0x206,
+ .reset_status_bits = BIT(2),
+ .reset_deassert_val = 0x200,
+ .reset_release_val = 0x0,
+ .reset2_reg = 0xb04,
+ .reset2_acquire_val = 0x303,
+ .reset2_release_val = 0x3,
+};
+
+static const struct of_device_id rzv2h_usb2phy_reset_of_match[] = {
+ { .compatible = "renesas,r9a09g057-usb2phy-reset", .data = &rzv2h_reset_of_data },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rzv2h_usb2phy_reset_of_match);
+
+static struct platform_driver rzv2h_usb2phy_reset_driver = {
+ .driver = {
+ .name = "rzv2h_usb2phy_reset",
+ .of_match_table = rzv2h_usb2phy_reset_of_match,
+ },
+ .probe = rzv2h_usb2phy_reset_probe,
+};
+module_platform_driver(rzv2h_usb2phy_reset_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/V2H(P) USB2PHY Control");
diff --git a/drivers/reset/reset-th1520.c b/drivers/reset/reset-th1520.c
new file mode 100644
index 000000000000..7874f0693e1b
--- /dev/null
+++ b/drivers/reset/reset-th1520.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Michal Wilczynski <m.wilczynski@samsung.com>
+ */
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/reset/thead,th1520-reset.h>
+
+ /* register offset in VOSYS_REGMAP */
+#define TH1520_GPU_RST_CFG 0x0
+#define TH1520_GPU_RST_CFG_MASK GENMASK(1, 0)
+
+/* register values */
+#define TH1520_GPU_SW_GPU_RST BIT(0)
+#define TH1520_GPU_SW_CLKGEN_RST BIT(1)
+
+struct th1520_reset_priv {
+ struct reset_controller_dev rcdev;
+ struct regmap *map;
+};
+
+struct th1520_reset_map {
+ u32 bit;
+ u32 reg;
+};
+
+static const struct th1520_reset_map th1520_resets[] = {
+ [TH1520_RESET_ID_GPU] = {
+ .bit = TH1520_GPU_SW_GPU_RST,
+ .reg = TH1520_GPU_RST_CFG,
+ },
+ [TH1520_RESET_ID_GPU_CLKGEN] = {
+ .bit = TH1520_GPU_SW_CLKGEN_RST,
+ .reg = TH1520_GPU_RST_CFG,
+ }
+};
+
+static inline struct th1520_reset_priv *
+to_th1520_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct th1520_reset_priv, rcdev);
+}
+
+static int th1520_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct th1520_reset_priv *priv = to_th1520_reset(rcdev);
+ const struct th1520_reset_map *reset;
+
+ reset = &th1520_resets[id];
+
+ return regmap_update_bits(priv->map, reset->reg, reset->bit, 0);
+}
+
+static int th1520_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct th1520_reset_priv *priv = to_th1520_reset(rcdev);
+ const struct th1520_reset_map *reset;
+
+ reset = &th1520_resets[id];
+
+ return regmap_update_bits(priv->map, reset->reg, reset->bit,
+ reset->bit);
+}
+
+static const struct reset_control_ops th1520_reset_ops = {
+ .assert = th1520_reset_assert,
+ .deassert = th1520_reset_deassert,
+};
+
+static const struct regmap_config th1520_reset_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+};
+
+static int th1520_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct th1520_reset_priv *priv;
+ void __iomem *base;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->map = devm_regmap_init_mmio(dev, base,
+ &th1520_reset_regmap_config);
+ if (IS_ERR(priv->map))
+ return PTR_ERR(priv->map);
+
+ /* Initialize GPU resets to asserted state */
+ ret = regmap_update_bits(priv->map, TH1520_GPU_RST_CFG,
+ TH1520_GPU_RST_CFG_MASK, 0);
+ if (ret)
+ return ret;
+
+ priv->rcdev.owner = THIS_MODULE;
+ priv->rcdev.nr_resets = ARRAY_SIZE(th1520_resets);
+ priv->rcdev.ops = &th1520_reset_ops;
+ priv->rcdev.of_node = dev->of_node;
+
+ return devm_reset_controller_register(dev, &priv->rcdev);
+}
+
+static const struct of_device_id th1520_reset_match[] = {
+ { .compatible = "thead,th1520-reset" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, th1520_reset_match);
+
+static struct platform_driver th1520_reset_driver = {
+ .driver = {
+ .name = "th1520-reset",
+ .of_match_table = th1520_reset_match,
+ },
+ .probe = th1520_reset_probe,
+};
+module_platform_driver(th1520_reset_driver);
+
+MODULE_AUTHOR("Michal Wilczynski <m.wilczynski@samsung.com>");
+MODULE_DESCRIPTION("T-HEAD TH1520 SoC reset controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 40d386809d6b..87c944d4b4f3 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -746,7 +746,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data,
__le32 hdr[5] = { cpu_to_le32(len), };
int tlen = sizeof(hdr) + len;
unsigned long flags;
- int ret;
+ int ret = 0;
/* Word aligned channels only accept word size aligned data */
if (channel->info_word && len % 4)
@@ -1369,7 +1369,8 @@ static int qcom_smd_parse_edge(struct device *dev,
edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0);
if (IS_ERR(edge->mbox_chan)) {
if (PTR_ERR(edge->mbox_chan) != -ENODEV) {
- ret = PTR_ERR(edge->mbox_chan);
+ ret = dev_err_probe(dev, PTR_ERR(edge->mbox_chan),
+ "failed to acquire IPC mailbox\n");
goto put_node;
}
@@ -1386,6 +1387,7 @@ static int qcom_smd_parse_edge(struct device *dev,
of_node_put(syscon_np);
if (IS_ERR(edge->ipc_regmap)) {
ret = PTR_ERR(edge->ipc_regmap);
+ dev_err(dev, "failed to get regmap from syscon: %d\n", ret);
goto put_node;
}
@@ -1501,10 +1503,8 @@ struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
}
ret = qcom_smd_parse_edge(&edge->dev, node, edge);
- if (ret) {
- dev_err(&edge->dev, "failed to parse smd edge\n");
+ if (ret)
goto unregister_dev;
- }
ret = qcom_smd_create_chrdev(edge);
if (ret) {
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 207b64c0a2fe..6ee36adcbdba 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -194,38 +194,6 @@ int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
EXPORT_SYMBOL(rpmsg_sendto);
/**
- * rpmsg_send_offchannel() - send a message using explicit src/dst addresses
- * @ept: the rpmsg endpoint
- * @src: source address
- * @dst: destination address
- * @data: payload of message
- * @len: length of payload
- *
- * This function sends @data of length @len to the remote @dst address,
- * and uses @src as the source address.
- * The message will be sent to the remote processor which the @ept
- * endpoint belongs to.
- * In case there are no TX buffers available, the function will block until
- * one becomes available, or a timeout of 15 seconds elapses. When the latter
- * happens, -ERESTARTSYS is returned.
- *
- * Can only be called from process context (for now).
- *
- * Return: 0 on success and an appropriate error value on failure.
- */
-int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len)
-{
- if (WARN_ON(!ept))
- return -EINVAL;
- if (!ept->ops->send_offchannel)
- return -ENXIO;
-
- return ept->ops->send_offchannel(ept, src, dst, data, len);
-}
-EXPORT_SYMBOL(rpmsg_send_offchannel);
-
-/**
* rpmsg_trysend() - send a message across to the remote processor
* @ept: the rpmsg endpoint
* @data: payload of message
@@ -302,37 +270,6 @@ __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
EXPORT_SYMBOL(rpmsg_poll);
/**
- * rpmsg_trysend_offchannel() - send a message using explicit src/dst addresses
- * @ept: the rpmsg endpoint
- * @src: source address
- * @dst: destination address
- * @data: payload of message
- * @len: length of payload
- *
- * This function sends @data of length @len to the remote @dst address,
- * and uses @src as the source address.
- * The message will be sent to the remote processor which the @ept
- * endpoint belongs to.
- * In case there are no TX buffers available, the function will immediately
- * return -ENOMEM without waiting until one becomes available.
- *
- * Can only be called from process context (for now).
- *
- * Return: 0 on success and an appropriate error value on failure.
- */
-int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len)
-{
- if (WARN_ON(!ept))
- return -EINVAL;
- if (!ept->ops->trysend_offchannel)
- return -ENXIO;
-
- return ept->ops->trysend_offchannel(ept, src, dst, data, len);
-}
-EXPORT_SYMBOL(rpmsg_trysend_offchannel);
-
-/**
* rpmsg_set_flow_control() - request remote to pause/resume transmission
* @ept: the rpmsg endpoint
* @pause: pause transmission
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index 42c7007be1b5..397e4926bd02 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -50,10 +50,8 @@ struct rpmsg_device_ops {
* @destroy_ept: see @rpmsg_destroy_ept(), required
* @send: see @rpmsg_send(), required
* @sendto: see @rpmsg_sendto(), optional
- * @send_offchannel: see @rpmsg_send_offchannel(), optional
* @trysend: see @rpmsg_trysend(), required
* @trysendto: see @rpmsg_trysendto(), optional
- * @trysend_offchannel: see @rpmsg_trysend_offchannel(), optional
* @poll: see @rpmsg_poll(), optional
* @set_flow_control: see @rpmsg_set_flow_control(), optional
* @get_mtu: see @rpmsg_get_mtu(), optional
@@ -67,13 +65,9 @@ struct rpmsg_endpoint_ops {
int (*send)(struct rpmsg_endpoint *ept, void *data, int len);
int (*sendto)(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
- int (*send_offchannel)(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len);
int (*trysend)(struct rpmsg_endpoint *ept, void *data, int len);
int (*trysendto)(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
- int (*trysend_offchannel)(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len);
__poll_t (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
int (*set_flow_control)(struct rpmsg_endpoint *ept, bool pause, u32 dst);
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 89d7a3b8c48b..4730b1c8b322 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -141,13 +141,9 @@ static void virtio_rpmsg_destroy_ept(struct rpmsg_endpoint *ept);
static int virtio_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
u32 dst);
-static int virtio_rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src,
- u32 dst, void *data, int len);
static int virtio_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len);
static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
int len, u32 dst);
-static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
- u32 dst, void *data, int len);
static ssize_t virtio_rpmsg_get_mtu(struct rpmsg_endpoint *ept);
static struct rpmsg_device *__rpmsg_create_channel(struct virtproc_info *vrp,
struct rpmsg_channel_info *chinfo);
@@ -156,10 +152,8 @@ static const struct rpmsg_endpoint_ops virtio_endpoint_ops = {
.destroy_ept = virtio_rpmsg_destroy_ept,
.send = virtio_rpmsg_send,
.sendto = virtio_rpmsg_sendto,
- .send_offchannel = virtio_rpmsg_send_offchannel,
.trysend = virtio_rpmsg_trysend,
.trysendto = virtio_rpmsg_trysendto,
- .trysend_offchannel = virtio_rpmsg_trysend_offchannel,
.get_mtu = virtio_rpmsg_get_mtu,
};
@@ -545,7 +539,7 @@ static void rpmsg_downref_sleepers(struct virtproc_info *vrp)
* the function will immediately fail, and -ENOMEM will be returned.
*
* Normally drivers shouldn't use this function directly; instead, drivers
- * should use the appropriate rpmsg_{try}send{to, _offchannel} API
+ * should use the appropriate rpmsg_{try}send{to} API
* (see include/linux/rpmsg.h).
*
* Return: 0 on success and an appropriate error value on failure.
@@ -665,14 +659,6 @@ static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
}
-static int virtio_rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src,
- u32 dst, void *data, int len)
-{
- struct rpmsg_device *rpdev = ept->rpdev;
-
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
-}
-
static int virtio_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
{
struct rpmsg_device *rpdev = ept->rpdev;
@@ -690,14 +676,6 @@ static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
}
-static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
- u32 dst, void *data, int len)
-{
- struct rpmsg_device *rpdev = ept->rpdev;
-
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
-}
-
static ssize_t virtio_rpmsg_get_mtu(struct rpmsg_endpoint *ept)
{
struct rpmsg_device *rpdev = ept->rpdev;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 838bdc138ffe..9aec922613ce 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1388,6 +1388,18 @@ config RTC_DRV_ASM9260
This driver can also be built as a module. If so, the module
will be called rtc-asm9260.
+config RTC_DRV_CV1800
+ tristate "Sophgo CV1800 RTC"
+ depends on SOPHGO_CV1800_RTCSYS || COMPILE_TEST
+ select MFD_SYSCON
+ select REGMAP
+ help
+ If you say yes here you get support the RTC driver for Sophgo CV1800
+ series SoC.
+
+ This driver can also be built as a module. If so, the module will be
+ called rtc-cv1800.
+
config RTC_DRV_DIGICOLOR
tristate "Conexant Digicolor RTC"
depends on ARCH_DIGICOLOR || COMPILE_TEST
@@ -2088,7 +2100,7 @@ config RTC_DRV_AMLOGIC_A4
tristate "Amlogic RTC"
depends on ARCH_MESON || COMPILE_TEST
select REGMAP_MMIO
- default y
+ default ARCH_MESON
help
If you say yes here you get support for the RTC block on the
Amlogic A113L2(A4) and A113X2(A5) SoCs.
@@ -2096,4 +2108,15 @@ config RTC_DRV_AMLOGIC_A4
This driver can also be built as a module. If so, the module
will be called "rtc-amlogic-a4".
+config RTC_DRV_S32G
+ tristate "RTC driver for S32G2/S32G3 SoCs"
+ depends on ARCH_S32 || COMPILE_TEST
+ depends on COMMON_CLK
+ help
+ Say yes to enable RTC driver for platforms based on the
+ S32G2/S32G3 SoC family.
+
+ This RTC module can be used as a wakeup source.
+ Please note that it is not battery-powered.
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 31473b3276d9..4619aa2ac469 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_RTC_DRV_CADENCE) += rtc-cadence.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
obj-$(CONFIG_RTC_DRV_CPCAP) += rtc-cpcap.o
obj-$(CONFIG_RTC_DRV_CROS_EC) += rtc-cros-ec.o
+obj-$(CONFIG_RTC_DRV_CV1800) += rtc-cv1800.o
obj-$(CONFIG_RTC_DRV_DA9052) += rtc-da9052.o
obj-$(CONFIG_RTC_DRV_DA9055) += rtc-da9055.o
obj-$(CONFIG_RTC_DRV_DA9063) += rtc-da9063.o
@@ -160,6 +161,7 @@ obj-$(CONFIG_RTC_DRV_RX8111) += rtc-rx8111.o
obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o
obj-$(CONFIG_RTC_DRV_RZN1) += rtc-rzn1.o
obj-$(CONFIG_RTC_DRV_RENESAS_RTCA3) += rtc-renesas-rtca3.o
+obj-$(CONFIG_RTC_DRV_S32G) += rtc-s32g.o
obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o
obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
obj-$(CONFIG_RTC_DRV_S5M) += rtc-s5m.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index b88cd4fb295b..b1a2be1f9e3b 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -326,7 +326,7 @@ static void rtc_device_get_offset(struct rtc_device *rtc)
*
* Otherwise the offset seconds should be 0.
*/
- if (rtc->start_secs > rtc->range_max ||
+ if ((rtc->start_secs >= 0 && rtc->start_secs > rtc->range_max) ||
rtc->start_secs + range_secs - 1 < rtc->range_min)
rtc->offset_secs = rtc->start_secs - rtc->range_min;
else if (rtc->start_secs > rtc->range_min)
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index 0eeae5bcc3aa..baf1a8ca8b2b 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -72,7 +72,7 @@ static void rtc_uie_task(struct work_struct *work)
static void rtc_uie_timer(struct timer_list *t)
{
- struct rtc_device *rtc = from_timer(rtc, t, uie_timer);
+ struct rtc_device *rtc = timer_container_of(rtc, t, uie_timer);
unsigned long flags;
spin_lock_irqsave(&rtc->irq_lock, flags);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index aaf76406cd7d..dc741ba29fa3 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -205,7 +205,7 @@ static int rtc_read_alarm_internal(struct rtc_device *rtc,
mutex_unlock(&rtc->ops_lock);
- trace_rtc_read_alarm(rtc_tm_to_time64(&alarm->time), err);
+ trace_rtc_read_alarm(err?0:rtc_tm_to_time64(&alarm->time), err);
return err;
}
diff --git a/drivers/rtc/lib.c b/drivers/rtc/lib.c
index fe361652727a..13b5b1f20465 100644
--- a/drivers/rtc/lib.c
+++ b/drivers/rtc/lib.c
@@ -46,24 +46,38 @@ EXPORT_SYMBOL(rtc_year_days);
* rtc_time64_to_tm - converts time64_t to rtc_time.
*
* @time: The number of seconds since 01-01-1970 00:00:00.
- * (Must be positive.)
+ * Works for values since at least 1900
* @tm: Pointer to the struct rtc_time.
*/
void rtc_time64_to_tm(time64_t time, struct rtc_time *tm)
{
- unsigned int secs;
- int days;
+ int days, secs;
u64 u64tmp;
u32 u32tmp, udays, century, day_of_century, year_of_century, year,
day_of_year, month, day;
bool is_Jan_or_Feb, is_leap_year;
- /* time must be positive */
+ /*
+ * Get days and seconds while preserving the sign to
+ * handle negative time values (dates before 1970-01-01)
+ */
days = div_s64_rem(time, 86400, &secs);
+ /*
+ * We need 0 <= secs < 86400 which isn't given for negative
+ * values of time. Fixup accordingly.
+ */
+ if (secs < 0) {
+ days -= 1;
+ secs += 86400;
+ }
+
/* day of the week, 1970-01-01 was a Thursday */
tm->tm_wday = (days + 4) % 7;
+ /* Ensure tm_wday is always positive */
+ if (tm->tm_wday < 0)
+ tm->tm_wday += 7;
/*
* The following algorithm is, basically, Proposition 6.3 of Neri
@@ -93,7 +107,7 @@ void rtc_time64_to_tm(time64_t time, struct rtc_time *tm)
* thus, is slightly different from [1].
*/
- udays = ((u32) days) + 719468;
+ udays = days + 719468;
u32tmp = 4 * udays + 3;
century = u32tmp / 146097;
diff --git a/drivers/rtc/lib_test.c b/drivers/rtc/lib_test.c
index c30c759662e3..0eebad1fe2a0 100644
--- a/drivers/rtc/lib_test.c
+++ b/drivers/rtc/lib_test.c
@@ -6,8 +6,10 @@
/*
* Advance a date by one day.
*/
-static void advance_date(int *year, int *month, int *mday, int *yday)
+static void advance_date(int *year, int *month, int *mday, int *yday, int *wday)
{
+ *wday = (*wday + 1) % 7;
+
if (*mday != rtc_month_days(*month - 1, *year)) {
++*mday;
++*yday;
@@ -39,35 +41,38 @@ static void rtc_time64_to_tm_test_date_range(struct kunit *test, int years)
*/
time64_t total_secs = ((time64_t)years) / 400 * 146097 * 86400;
- int year = 1970;
+ int year = 1900;
int month = 1;
int mday = 1;
int yday = 1;
+ int wday = 1; /* Jan 1st 1900 was a Monday */
struct rtc_time result;
time64_t secs;
- s64 days;
+ const time64_t sec_offset = RTC_TIMESTAMP_BEGIN_1900 + ((1 * 60) + 2) * 60 + 3;
for (secs = 0; secs <= total_secs; secs += 86400) {
- rtc_time64_to_tm(secs, &result);
-
- days = div_s64(secs, 86400);
+ rtc_time64_to_tm(secs + sec_offset, &result);
- #define FAIL_MSG "%d/%02d/%02d (%2d) : %lld", \
- year, month, mday, yday, days
+ #define FAIL_MSG "%d/%02d/%02d (%2d, %d) : %lld", \
+ year, month, mday, yday, wday, secs + sec_offset
KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG);
KUNIT_ASSERT_EQ_MSG(test, month - 1, result.tm_mon, FAIL_MSG);
KUNIT_ASSERT_EQ_MSG(test, mday, result.tm_mday, FAIL_MSG);
KUNIT_ASSERT_EQ_MSG(test, yday, result.tm_yday, FAIL_MSG);
+ KUNIT_ASSERT_EQ_MSG(test, 1, result.tm_hour, FAIL_MSG);
+ KUNIT_ASSERT_EQ_MSG(test, 2, result.tm_min, FAIL_MSG);
+ KUNIT_ASSERT_EQ_MSG(test, 3, result.tm_sec, FAIL_MSG);
+ KUNIT_ASSERT_EQ_MSG(test, wday, result.tm_wday, FAIL_MSG);
- advance_date(&year, &month, &mday, &yday);
+ advance_date(&year, &month, &mday, &yday, &wday);
}
}
/*
- * Checks every day in a 160000 years interval starting on 1970-01-01
+ * Checks every day in a 160000 years interval starting on 1900-01-01
* against the expected result.
*/
static void rtc_time64_to_tm_test_date_range_160000(struct kunit *test)
@@ -76,7 +81,7 @@ static void rtc_time64_to_tm_test_date_range_160000(struct kunit *test)
}
/*
- * Checks every day in a 1000 years interval starting on 1970-01-01
+ * Checks every day in a 1000 years interval starting on 1900-01-01
* against the expected result.
*/
static void rtc_time64_to_tm_test_date_range_1000(struct kunit *test)
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index f6b0102a843a..643734dbae33 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -654,4 +654,3 @@ module_platform_driver_probe(at91_rtc_driver, at91_rtc_probe);
MODULE_AUTHOR("Rick Bronson");
MODULE_DESCRIPTION("RTC driver for Atmel AT91RM9200");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:at91_rtc");
diff --git a/drivers/rtc/rtc-cpcap.c b/drivers/rtc/rtc-cpcap.c
index 568a89e79c11..c170345ac076 100644
--- a/drivers/rtc/rtc-cpcap.c
+++ b/drivers/rtc/rtc-cpcap.c
@@ -320,7 +320,6 @@ static struct platform_driver cpcap_rtc_driver = {
module_platform_driver(cpcap_rtc_driver);
-MODULE_ALIAS("platform:cpcap-rtc");
MODULE_DESCRIPTION("CPCAP RTC driver");
MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-cv1800.c b/drivers/rtc/rtc-cv1800.c
new file mode 100644
index 000000000000..678c2c10bf58
--- /dev/null
+++ b/drivers/rtc/rtc-cv1800.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * rtc-cv1800.c: RTC driver for Sophgo cv1800 RTC
+ *
+ * Author: Jingbao Qiu <qiujingbao.dlmu@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+#define SEC_PULSE_GEN 0x1004
+#define ALARM_TIME 0x1008
+#define ALARM_ENABLE 0x100C
+#define SET_SEC_CNTR_VAL 0x1010
+#define SET_SEC_CNTR_TRIG 0x1014
+#define SEC_CNTR_VAL 0x1018
+
+/*
+ * When in VDDBKUP domain, this MACRO register
+ * does not power down
+ */
+#define MACRO_RO_T 0x14A8
+#define MACRO_RG_SET_T 0x1498
+
+#define ALARM_ENABLE_MASK BIT(0)
+#define SEL_SEC_PULSE BIT(31)
+
+struct cv1800_rtc_priv {
+ struct rtc_device *rtc_dev;
+ struct regmap *rtc_map;
+ struct clk *clk;
+ int irq;
+};
+
+static bool cv1800_rtc_enabled(struct device *dev)
+{
+ struct cv1800_rtc_priv *info = dev_get_drvdata(dev);
+ u32 reg;
+
+ regmap_read(info->rtc_map, SEC_PULSE_GEN, &reg);
+
+ return (reg & SEL_SEC_PULSE) == 0;
+}
+
+static void cv1800_rtc_enable(struct device *dev)
+{
+ struct cv1800_rtc_priv *info = dev_get_drvdata(dev);
+
+ /* Sec pulse generated internally */
+ regmap_update_bits(info->rtc_map, SEC_PULSE_GEN, SEL_SEC_PULSE, 0);
+}
+
+static int cv1800_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct cv1800_rtc_priv *info = dev_get_drvdata(dev);
+
+ regmap_write(info->rtc_map, ALARM_ENABLE, enabled);
+
+ return 0;
+}
+
+static int cv1800_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct cv1800_rtc_priv *info = dev_get_drvdata(dev);
+ unsigned long alarm_time;
+
+ alarm_time = rtc_tm_to_time64(&alrm->time);
+
+ cv1800_rtc_alarm_irq_enable(dev, 0);
+
+ regmap_write(info->rtc_map, ALARM_TIME, alarm_time);
+
+ cv1800_rtc_alarm_irq_enable(dev, alrm->enabled);
+
+ return 0;
+}
+
+static int cv1800_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct cv1800_rtc_priv *info = dev_get_drvdata(dev);
+ u32 enabled;
+ u32 time;
+
+ if (!cv1800_rtc_enabled(dev)) {
+ alarm->enabled = 0;
+ return 0;
+ }
+
+ regmap_read(info->rtc_map, ALARM_ENABLE, &enabled);
+
+ alarm->enabled = enabled & ALARM_ENABLE_MASK;
+
+ regmap_read(info->rtc_map, ALARM_TIME, &time);
+
+ rtc_time64_to_tm(time, &alarm->time);
+
+ return 0;
+}
+
+static int cv1800_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct cv1800_rtc_priv *info = dev_get_drvdata(dev);
+ u32 sec;
+
+ if (!cv1800_rtc_enabled(dev))
+ return -EINVAL;
+
+ regmap_read(info->rtc_map, SEC_CNTR_VAL, &sec);
+
+ rtc_time64_to_tm(sec, tm);
+
+ return 0;
+}
+
+static int cv1800_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct cv1800_rtc_priv *info = dev_get_drvdata(dev);
+ unsigned long sec;
+
+ sec = rtc_tm_to_time64(tm);
+
+ regmap_write(info->rtc_map, SET_SEC_CNTR_VAL, sec);
+ regmap_write(info->rtc_map, SET_SEC_CNTR_TRIG, 1);
+
+ regmap_write(info->rtc_map, MACRO_RG_SET_T, sec);
+
+ cv1800_rtc_enable(dev);
+
+ return 0;
+}
+
+static irqreturn_t cv1800_rtc_irq_handler(int irq, void *dev_id)
+{
+ struct cv1800_rtc_priv *info = dev_id;
+
+ rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+ regmap_write(info->rtc_map, ALARM_ENABLE, 0);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops cv1800_rtc_ops = {
+ .read_time = cv1800_rtc_read_time,
+ .set_time = cv1800_rtc_set_time,
+ .read_alarm = cv1800_rtc_read_alarm,
+ .set_alarm = cv1800_rtc_set_alarm,
+ .alarm_irq_enable = cv1800_rtc_alarm_irq_enable,
+};
+
+static int cv1800_rtc_probe(struct platform_device *pdev)
+{
+ struct cv1800_rtc_priv *rtc;
+ int ret;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->rtc_map = device_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(rtc->rtc_map))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rtc->rtc_map),
+ "cannot get parent regmap\n");
+
+ rtc->irq = platform_get_irq(pdev, 0);
+ if (rtc->irq < 0)
+ return rtc->irq;
+
+ rtc->clk = devm_clk_get_enabled(pdev->dev.parent, "rtc");
+ if (IS_ERR(rtc->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rtc->clk),
+ "rtc clk not found\n");
+
+ platform_set_drvdata(pdev, rtc);
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc->rtc_dev))
+ return PTR_ERR(rtc->rtc_dev);
+
+ rtc->rtc_dev->ops = &cv1800_rtc_ops;
+ rtc->rtc_dev->range_max = U32_MAX;
+
+ ret = devm_request_irq(&pdev->dev, rtc->irq, cv1800_rtc_irq_handler,
+ IRQF_TRIGGER_HIGH, "rtc alarm", rtc);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "cannot register interrupt handler\n");
+
+ return devm_rtc_register_device(rtc->rtc_dev);
+}
+
+static const struct platform_device_id cv1800_rtc_id[] = {
+ { .name = "cv1800b-rtc" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, cv1800_rtc_id);
+
+static struct platform_driver cv1800_rtc_driver = {
+ .driver = {
+ .name = "sophgo-cv1800-rtc",
+ },
+ .probe = cv1800_rtc_probe,
+ .id_table = cv1800_rtc_id,
+};
+
+module_platform_driver(cv1800_rtc_driver);
+MODULE_AUTHOR("Jingbao Qiu");
+MODULE_DESCRIPTION("Sophgo cv1800 RTC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index 859397541f29..557c9b29dcc1 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -194,26 +194,17 @@ static void da9063_tm_to_data(struct rtc_time *tm, u8 *data,
config->rtc_count_year_mask;
}
-static int da9063_rtc_stop_alarm(struct device *dev)
-{
- struct da9063_compatible_rtc *rtc = dev_get_drvdata(dev);
- const struct da9063_compatible_rtc_regmap *config = rtc->config;
-
- return regmap_update_bits(rtc->regmap,
- config->rtc_alarm_year_reg,
- config->rtc_alarm_on_mask,
- 0);
-}
-
-static int da9063_rtc_start_alarm(struct device *dev)
+static int da9063_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
{
struct da9063_compatible_rtc *rtc = dev_get_drvdata(dev);
const struct da9063_compatible_rtc_regmap *config = rtc->config;
+ u8 set_bit = enabled ? config->rtc_alarm_on_mask : 0;
return regmap_update_bits(rtc->regmap,
config->rtc_alarm_year_reg,
config->rtc_alarm_on_mask,
- config->rtc_alarm_on_mask);
+ set_bit);
}
static int da9063_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -312,7 +303,7 @@ static int da9063_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
da9063_tm_to_data(&alrm->time, data, rtc);
- ret = da9063_rtc_stop_alarm(dev);
+ ret = da9063_rtc_alarm_irq_enable(dev, 0);
if (ret < 0) {
dev_err(dev, "Failed to stop alarm: %d\n", ret);
return ret;
@@ -330,7 +321,7 @@ static int da9063_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
da9063_data_to_tm(data, &rtc->alarm_time, rtc);
if (alrm->enabled) {
- ret = da9063_rtc_start_alarm(dev);
+ ret = da9063_rtc_alarm_irq_enable(dev, 1);
if (ret < 0) {
dev_err(dev, "Failed to start alarm: %d\n", ret);
return ret;
@@ -340,15 +331,6 @@ static int da9063_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return ret;
}
-static int da9063_rtc_alarm_irq_enable(struct device *dev,
- unsigned int enabled)
-{
- if (enabled)
- return da9063_rtc_start_alarm(dev);
- else
- return da9063_rtc_stop_alarm(dev);
-}
-
static irqreturn_t da9063_alarm_event(int irq, void *data)
{
struct da9063_compatible_rtc *rtc = data;
@@ -513,4 +495,3 @@ module_platform_driver(da9063_rtc_driver);
MODULE_AUTHOR("S Twiss <stwiss.opensource@diasemi.com>");
MODULE_DESCRIPTION("Real time clock device driver for Dialog DA9063");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DA9063_DRVNAME_RTC);
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 44bba356268c..11fce47be780 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -437,4 +437,3 @@ module_platform_driver(jz4740_rtc_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n");
-MODULE_ALIAS("platform:jz4740-rtc");
diff --git a/drivers/rtc/rtc-loongson.c b/drivers/rtc/rtc-loongson.c
index 97e5625c064c..2ca7ffd5d7a9 100644
--- a/drivers/rtc/rtc-loongson.c
+++ b/drivers/rtc/rtc-loongson.c
@@ -129,6 +129,14 @@ static u32 loongson_rtc_handler(void *id)
{
struct loongson_rtc_priv *priv = (struct loongson_rtc_priv *)id;
+ rtc_update_irq(priv->rtcdev, 1, RTC_AF | RTC_IRQF);
+
+ /*
+ * The TOY_MATCH0_REG should be cleared 0 here,
+ * otherwise the interrupt cannot be cleared.
+ */
+ regmap_write(priv->regmap, TOY_MATCH0_REG, 0);
+
spin_lock(&priv->lock);
/* Disable RTC alarm wakeup and interrupt */
writel(readl(priv->pm_base + PM1_EN_REG) & ~RTC_EN,
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 1f58ae8b151e..c568639d2151 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/string.h>
+#include <linux/delay.h>
#ifdef CONFIG_RTC_DRV_M41T80_WDT
#include <linux/fs.h>
#include <linux/ioctl.h>
@@ -204,14 +205,14 @@ static int m41t80_rtc_read_time(struct device *dev, struct rtc_time *tm)
return flags;
if (flags & M41T80_FLAGS_OF) {
- dev_err(&client->dev, "Oscillator failure, data is invalid.\n");
+ dev_err(&client->dev, "Oscillator failure, time may not be accurate, write time to RTC to fix it.\n");
return -EINVAL;
}
err = i2c_smbus_read_i2c_block_data(client, M41T80_REG_SSEC,
sizeof(buf), buf);
if (err < 0) {
- dev_err(&client->dev, "Unable to read date\n");
+ dev_dbg(&client->dev, "Unable to read date\n");
return err;
}
@@ -227,21 +228,31 @@ static int m41t80_rtc_read_time(struct device *dev, struct rtc_time *tm)
return 0;
}
-static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
+static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *in_tm)
{
struct i2c_client *client = to_i2c_client(dev);
struct m41t80_data *clientdata = i2c_get_clientdata(client);
+ struct rtc_time tm = *in_tm;
unsigned char buf[8];
int err, flags;
+ time64_t time = 0;
+ flags = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
+ if (flags < 0)
+ return flags;
+ if (flags & M41T80_FLAGS_OF) {
+ /* add 4sec of oscillator stablize time otherwise we are behind 4sec */
+ time = rtc_tm_to_time64(&tm);
+ rtc_time64_to_tm(time + 4, &tm);
+ }
buf[M41T80_REG_SSEC] = 0;
- buf[M41T80_REG_SEC] = bin2bcd(tm->tm_sec);
- buf[M41T80_REG_MIN] = bin2bcd(tm->tm_min);
- buf[M41T80_REG_HOUR] = bin2bcd(tm->tm_hour);
- buf[M41T80_REG_DAY] = bin2bcd(tm->tm_mday);
- buf[M41T80_REG_MON] = bin2bcd(tm->tm_mon + 1);
- buf[M41T80_REG_YEAR] = bin2bcd(tm->tm_year - 100);
- buf[M41T80_REG_WDAY] = tm->tm_wday;
+ buf[M41T80_REG_SEC] = bin2bcd(tm.tm_sec);
+ buf[M41T80_REG_MIN] = bin2bcd(tm.tm_min);
+ buf[M41T80_REG_HOUR] = bin2bcd(tm.tm_hour);
+ buf[M41T80_REG_DAY] = bin2bcd(tm.tm_mday);
+ buf[M41T80_REG_MON] = bin2bcd(tm.tm_mon + 1);
+ buf[M41T80_REG_YEAR] = bin2bcd(tm.tm_year - 100);
+ buf[M41T80_REG_WDAY] = tm.tm_wday;
/* If the square wave output is controlled in the weekday register */
if (clientdata->features & M41T80_FEATURE_SQ_ALT) {
@@ -257,20 +268,37 @@ static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
err = i2c_smbus_write_i2c_block_data(client, M41T80_REG_SSEC,
sizeof(buf), buf);
if (err < 0) {
- dev_err(&client->dev, "Unable to write to date registers\n");
+ dev_dbg(&client->dev, "Unable to write to date registers\n");
return err;
}
-
- /* Clear the OF bit of Flags Register */
- flags = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
- if (flags < 0)
- return flags;
-
- err = i2c_smbus_write_byte_data(client, M41T80_REG_FLAGS,
- flags & ~M41T80_FLAGS_OF);
- if (err < 0) {
- dev_err(&client->dev, "Unable to write flags register\n");
- return err;
+ if (flags & M41T80_FLAGS_OF) {
+ /* OF cannot be immediately reset: oscillator has to be restarted. */
+ dev_warn(&client->dev, "OF bit is still set, kickstarting clock.\n");
+ err = i2c_smbus_write_byte_data(client, M41T80_REG_SEC, M41T80_SEC_ST);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Can't set ST bit\n");
+ return err;
+ }
+ err = i2c_smbus_write_byte_data(client, M41T80_REG_SEC, flags & ~M41T80_SEC_ST);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Can't clear ST bit\n");
+ return err;
+ }
+ /* oscillator must run for 4sec before we attempt to reset OF bit */
+ msleep(4000);
+ /* Clear the OF bit of Flags Register */
+ err = i2c_smbus_write_byte_data(client, M41T80_REG_FLAGS, flags & ~M41T80_FLAGS_OF);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write flags register\n");
+ return err;
+ }
+ flags = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
+ if (flags < 0) {
+ return flags;
+ } else if (flags & M41T80_FLAGS_OF) {
+ dev_dbg(&client->dev, "Can't clear the OF bit check battery\n");
+ return err;
+ }
}
return err;
@@ -308,7 +336,7 @@ static int m41t80_alarm_irq_enable(struct device *dev, unsigned int enabled)
retval = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, flags);
if (retval < 0) {
- dev_err(dev, "Unable to enable alarm IRQ %d\n", retval);
+ dev_dbg(dev, "Unable to enable alarm IRQ %d\n", retval);
return retval;
}
return 0;
@@ -333,7 +361,7 @@ static int m41t80_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
err = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
ret & ~(M41T80_ALMON_AFE));
if (err < 0) {
- dev_err(dev, "Unable to clear AFE bit\n");
+ dev_dbg(dev, "Unable to clear AFE bit\n");
return err;
}
@@ -347,7 +375,7 @@ static int m41t80_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
err = i2c_smbus_write_byte_data(client, M41T80_REG_FLAGS,
ret & ~(M41T80_FLAGS_AF));
if (err < 0) {
- dev_err(dev, "Unable to clear AF bit\n");
+ dev_dbg(dev, "Unable to clear AF bit\n");
return err;
}
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 6979d225a78e..692c00ff544b 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -332,6 +332,7 @@ static const struct mtk_rtc_data mt6397_rtc_data = {
static const struct of_device_id mt6397_rtc_of_match[] = {
{ .compatible = "mediatek,mt6323-rtc", .data = &mt6397_rtc_data },
+ { .compatible = "mediatek,mt6357-rtc", .data = &mt6358_rtc_data },
{ .compatible = "mediatek,mt6358-rtc", .data = &mt6358_rtc_data },
{ .compatible = "mediatek,mt6397-rtc", .data = &mt6397_rtc_data },
{ }
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 5a084d426e58..b2611697fa5e 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -285,7 +285,7 @@ static int pcf8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
buf[2] = bin2bcd(tm->time.tm_mday);
buf[3] = tm->time.tm_wday & 0x07;
- err = regmap_bulk_write(pcf8563->regmap, PCF8563_REG_SC, buf,
+ err = regmap_bulk_write(pcf8563->regmap, PCF8563_REG_AMN, buf,
sizeof(buf));
if (err)
return err;
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index 3c1dddcc81df..e624f848c22b 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -576,13 +576,20 @@ static int pm8xxx_rtc_probe_offset(struct pm8xxx_rtc *rtc_dd)
}
/* Use UEFI storage as fallback if available */
- if (efivar_is_available()) {
- rc = pm8xxx_rtc_read_uefi_offset(rtc_dd);
- if (rc == 0)
- rtc_dd->use_uefi = true;
+ rtc_dd->use_uefi = of_property_read_bool(rtc_dd->dev->of_node,
+ "qcom,uefi-rtc-info");
+ if (!rtc_dd->use_uefi)
+ return 0;
+
+ if (!efivar_is_available()) {
+ if (IS_ENABLED(CONFIG_EFI))
+ return -EPROBE_DEFER;
+
+ dev_warn(rtc_dd->dev, "efivars not available\n");
+ rtc_dd->use_uefi = false;
}
- return 0;
+ return pm8xxx_rtc_read_uefi_offset(rtc_dd);
}
static int pm8xxx_rtc_probe(struct platform_device *pdev)
@@ -676,7 +683,6 @@ static struct platform_driver pm8xxx_rtc_driver = {
module_platform_driver(pm8xxx_rtc_driver);
-MODULE_ALIAS("platform:rtc-pm8xxx");
MODULE_DESCRIPTION("PMIC8xxx RTC driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Anirudh Ghayal <aghayal@codeaurora.org>");
diff --git a/drivers/rtc/rtc-rzn1.c b/drivers/rtc/rtc-rzn1.c
index eeb9612a666f..c4ed43735457 100644
--- a/drivers/rtc/rtc-rzn1.c
+++ b/drivers/rtc/rtc-rzn1.c
@@ -12,6 +12,7 @@
*/
#include <linux/bcd.h>
+#include <linux/clk.h>
#include <linux/init.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -22,9 +23,9 @@
#include <linux/spinlock.h>
#define RZN1_RTC_CTL0 0x00
-#define RZN1_RTC_CTL0_SLSB_SUBU 0
#define RZN1_RTC_CTL0_SLSB_SCMP BIT(4)
#define RZN1_RTC_CTL0_AMPM BIT(5)
+#define RZN1_RTC_CTL0_CEST BIT(6)
#define RZN1_RTC_CTL0_CE BIT(7)
#define RZN1_RTC_CTL1 0x04
@@ -49,6 +50,8 @@
#define RZN1_RTC_SUBU_DEV BIT(7)
#define RZN1_RTC_SUBU_DECR BIT(6)
+#define RZN1_RTC_SCMP 0x3c
+
#define RZN1_RTC_ALM 0x40
#define RZN1_RTC_ALH 0x44
#define RZN1_RTC_ALW 0x48
@@ -356,7 +359,7 @@ static int rzn1_rtc_set_offset(struct device *dev, long offset)
return 0;
}
-static const struct rtc_class_ops rzn1_rtc_ops = {
+static const struct rtc_class_ops rzn1_rtc_ops_subu = {
.read_time = rzn1_rtc_read_time,
.set_time = rzn1_rtc_set_time,
.read_alarm = rzn1_rtc_read_alarm,
@@ -366,11 +369,21 @@ static const struct rtc_class_ops rzn1_rtc_ops = {
.set_offset = rzn1_rtc_set_offset,
};
+static const struct rtc_class_ops rzn1_rtc_ops_scmp = {
+ .read_time = rzn1_rtc_read_time,
+ .set_time = rzn1_rtc_set_time,
+ .read_alarm = rzn1_rtc_read_alarm,
+ .set_alarm = rzn1_rtc_set_alarm,
+ .alarm_irq_enable = rzn1_rtc_alarm_irq_enable,
+};
+
static int rzn1_rtc_probe(struct platform_device *pdev)
{
struct rzn1_rtc *rtc;
- int irq;
- int ret;
+ u32 val, scmp_val = 0;
+ struct clk *xtal;
+ unsigned long rate;
+ int irq, ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
@@ -393,7 +406,6 @@ static int rzn1_rtc_probe(struct platform_device *pdev)
rtc->rtcdev->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->rtcdev->range_max = RTC_TIMESTAMP_END_2099;
rtc->rtcdev->alarm_offset_max = 7 * 86400;
- rtc->rtcdev->ops = &rzn1_rtc_ops;
ret = devm_pm_runtime_enable(&pdev->dev);
if (ret < 0)
@@ -402,12 +414,44 @@ static int rzn1_rtc_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- /*
- * Ensure the clock counter is enabled.
- * Set 24-hour mode and possible oscillator offset compensation in SUBU mode.
- */
- writel(RZN1_RTC_CTL0_CE | RZN1_RTC_CTL0_AMPM | RZN1_RTC_CTL0_SLSB_SUBU,
- rtc->base + RZN1_RTC_CTL0);
+ /* Only switch to scmp if we have an xtal clock with a valid rate and != 32768 */
+ xtal = devm_clk_get_optional(&pdev->dev, "xtal");
+ if (IS_ERR(xtal)) {
+ ret = PTR_ERR(xtal);
+ goto dis_runtime_pm;
+ } else if (xtal) {
+ rate = clk_get_rate(xtal);
+
+ if (rate < 32000 || rate > BIT(22)) {
+ ret = -EOPNOTSUPP;
+ goto dis_runtime_pm;
+ }
+
+ if (rate != 32768)
+ scmp_val = RZN1_RTC_CTL0_SLSB_SCMP;
+ }
+
+ /* Disable controller during SUBU/SCMP setup */
+ val = readl(rtc->base + RZN1_RTC_CTL0) & ~RZN1_RTC_CTL0_CE;
+ writel(val, rtc->base + RZN1_RTC_CTL0);
+ /* Wait 2-4 32k clock cycles for the disabled controller */
+ ret = readl_poll_timeout(rtc->base + RZN1_RTC_CTL0, val,
+ !(val & RZN1_RTC_CTL0_CEST), 62, 123);
+ if (ret)
+ goto dis_runtime_pm;
+
+ /* Set desired modes leaving the controller disabled */
+ writel(RZN1_RTC_CTL0_AMPM | scmp_val, rtc->base + RZN1_RTC_CTL0);
+
+ if (scmp_val) {
+ writel(rate - 1, rtc->base + RZN1_RTC_SCMP);
+ rtc->rtcdev->ops = &rzn1_rtc_ops_scmp;
+ } else {
+ rtc->rtcdev->ops = &rzn1_rtc_ops_subu;
+ }
+
+ /* Enable controller finally */
+ writel(RZN1_RTC_CTL0_CE | RZN1_RTC_CTL0_AMPM | scmp_val, rtc->base + RZN1_RTC_CTL0);
/* Disable all interrupts */
writel(0, rtc->base + RZN1_RTC_CTL1);
@@ -444,6 +488,11 @@ dis_runtime_pm:
static void rzn1_rtc_remove(struct platform_device *pdev)
{
+ struct rzn1_rtc *rtc = platform_get_drvdata(pdev);
+
+ /* Disable all interrupts */
+ writel(0, rtc->base + RZN1_RTC_CTL1);
+
pm_runtime_put(&pdev->dev);
}
diff --git a/drivers/rtc/rtc-s32g.c b/drivers/rtc/rtc-s32g.c
new file mode 100644
index 000000000000..3a0818e972eb
--- /dev/null
+++ b/drivers/rtc/rtc-s32g.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+#define RTCC_OFFSET 0x4ul
+#define RTCS_OFFSET 0x8ul
+#define APIVAL_OFFSET 0x10ul
+
+/* RTCC fields */
+#define RTCC_CNTEN BIT(31)
+#define RTCC_APIEN BIT(15)
+#define RTCC_APIIE BIT(14)
+#define RTCC_CLKSEL_MASK GENMASK(13, 12)
+#define RTCC_DIV512EN BIT(11)
+#define RTCC_DIV32EN BIT(10)
+
+/* RTCS fields */
+#define RTCS_INV_API BIT(17)
+#define RTCS_APIF BIT(13)
+
+#define APIVAL_MAX_VAL GENMASK(31, 0)
+#define RTC_SYNCH_TIMEOUT (100 * USEC_PER_MSEC)
+
+/*
+ * S32G2 and S32G3 SoCs have RTC clock source1 reserved and
+ * should not be used.
+ */
+#define RTC_CLK_SRC1_RESERVED BIT(1)
+
+/*
+ * S32G RTC module has a 512 value and a 32 value hardware frequency
+ * divisors (DIV512 and DIV32) which could be used to achieve higher
+ * counter ranges by lowering the RTC frequency.
+ */
+enum {
+ DIV1 = 1,
+ DIV32 = 32,
+ DIV512 = 512,
+ DIV512_32 = 16384
+};
+
+static const char *const rtc_clk_src[] = {
+ "source0",
+ "source1",
+ "source2",
+ "source3"
+};
+
+struct rtc_priv {
+ struct rtc_device *rdev;
+ void __iomem *rtc_base;
+ struct clk *ipg;
+ struct clk *clk_src;
+ const struct rtc_soc_data *rtc_data;
+ u64 rtc_hz;
+ time64_t sleep_sec;
+ int irq;
+ u32 clk_src_idx;
+};
+
+struct rtc_soc_data {
+ u32 clk_div;
+ u32 reserved_clk_mask;
+};
+
+static const struct rtc_soc_data rtc_s32g2_data = {
+ .clk_div = DIV512_32,
+ .reserved_clk_mask = RTC_CLK_SRC1_RESERVED,
+};
+
+static irqreturn_t s32g_rtc_handler(int irq, void *dev)
+{
+ struct rtc_priv *priv = platform_get_drvdata(dev);
+ u32 status;
+
+ status = readl(priv->rtc_base + RTCS_OFFSET);
+
+ if (status & RTCS_APIF) {
+ writel(0x0, priv->rtc_base + APIVAL_OFFSET);
+ writel(status | RTCS_APIF, priv->rtc_base + RTCS_OFFSET);
+ }
+
+ rtc_update_irq(priv->rdev, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * The function is not really getting time from the RTC since the S32G RTC
+ * has several limitations. Thus, to setup alarm use system time.
+ */
+static int s32g_rtc_read_time(struct device *dev,
+ struct rtc_time *tm)
+{
+ struct rtc_priv *priv = dev_get_drvdata(dev);
+ time64_t sec;
+
+ if (check_add_overflow(ktime_get_real_seconds(),
+ priv->sleep_sec, &sec))
+ return -ERANGE;
+
+ rtc_time64_to_tm(sec, tm);
+
+ return 0;
+}
+
+static int s32g_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rtc_priv *priv = dev_get_drvdata(dev);
+ u32 rtcc, rtcs;
+
+ rtcc = readl(priv->rtc_base + RTCC_OFFSET);
+ rtcs = readl(priv->rtc_base + RTCS_OFFSET);
+
+ alrm->enabled = rtcc & RTCC_APIIE;
+ if (alrm->enabled)
+ alrm->pending = !(rtcs & RTCS_APIF);
+
+ return 0;
+}
+
+static int s32g_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct rtc_priv *priv = dev_get_drvdata(dev);
+ u32 rtcc;
+
+ /* RTC API functionality is used both for triggering interrupts
+ * and as a wakeup event. Hence it should always be enabled.
+ */
+ rtcc = readl(priv->rtc_base + RTCC_OFFSET);
+ rtcc |= RTCC_APIEN | RTCC_APIIE;
+ writel(rtcc, priv->rtc_base + RTCC_OFFSET);
+
+ return 0;
+}
+
+static int s32g_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rtc_priv *priv = dev_get_drvdata(dev);
+ unsigned long long cycles;
+ long long t_offset;
+ time64_t alrm_time;
+ u32 rtcs;
+ int ret;
+
+ alrm_time = rtc_tm_to_time64(&alrm->time);
+ t_offset = alrm_time - ktime_get_real_seconds() - priv->sleep_sec;
+ if (t_offset < 0)
+ return -ERANGE;
+
+ cycles = t_offset * priv->rtc_hz;
+ if (cycles > APIVAL_MAX_VAL)
+ return -ERANGE;
+
+ /* APIVAL could have been reset from the IRQ handler.
+ * Hence, we wait in case there is a synchronization process.
+ */
+ ret = read_poll_timeout(readl, rtcs, !(rtcs & RTCS_INV_API),
+ 0, RTC_SYNCH_TIMEOUT, false, priv->rtc_base + RTCS_OFFSET);
+ if (ret)
+ return ret;
+
+ writel(cycles, priv->rtc_base + APIVAL_OFFSET);
+
+ return read_poll_timeout(readl, rtcs, !(rtcs & RTCS_INV_API),
+ 0, RTC_SYNCH_TIMEOUT, false, priv->rtc_base + RTCS_OFFSET);
+}
+
+/*
+ * Disable the 32-bit free running counter.
+ * This allows Clock Source and Divisors selection
+ * to be performed without causing synchronization issues.
+ */
+static void s32g_rtc_disable(struct rtc_priv *priv)
+{
+ u32 rtcc = readl(priv->rtc_base + RTCC_OFFSET);
+
+ rtcc &= ~RTCC_CNTEN;
+ writel(rtcc, priv->rtc_base + RTCC_OFFSET);
+}
+
+static void s32g_rtc_enable(struct rtc_priv *priv)
+{
+ u32 rtcc = readl(priv->rtc_base + RTCC_OFFSET);
+
+ rtcc |= RTCC_CNTEN;
+ writel(rtcc, priv->rtc_base + RTCC_OFFSET);
+}
+
+static int rtc_clk_src_setup(struct rtc_priv *priv)
+{
+ u32 rtcc;
+
+ rtcc = FIELD_PREP(RTCC_CLKSEL_MASK, priv->clk_src_idx);
+
+ switch (priv->rtc_data->clk_div) {
+ case DIV512_32:
+ rtcc |= RTCC_DIV512EN;
+ rtcc |= RTCC_DIV32EN;
+ break;
+ case DIV512:
+ rtcc |= RTCC_DIV512EN;
+ break;
+ case DIV32:
+ rtcc |= RTCC_DIV32EN;
+ break;
+ case DIV1:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rtcc |= RTCC_APIEN | RTCC_APIIE;
+ /*
+ * Make sure the CNTEN is 0 before we configure
+ * the clock source and dividers.
+ */
+ s32g_rtc_disable(priv);
+ writel(rtcc, priv->rtc_base + RTCC_OFFSET);
+ s32g_rtc_enable(priv);
+
+ return 0;
+}
+
+static const struct rtc_class_ops rtc_ops = {
+ .read_time = s32g_rtc_read_time,
+ .read_alarm = s32g_rtc_read_alarm,
+ .set_alarm = s32g_rtc_set_alarm,
+ .alarm_irq_enable = s32g_rtc_alarm_irq_enable,
+};
+
+static int rtc_clk_dts_setup(struct rtc_priv *priv,
+ struct device *dev)
+{
+ u32 i;
+
+ priv->ipg = devm_clk_get_enabled(dev, "ipg");
+ if (IS_ERR(priv->ipg))
+ return dev_err_probe(dev, PTR_ERR(priv->ipg),
+ "Failed to get 'ipg' clock\n");
+
+ for (i = 0; i < ARRAY_SIZE(rtc_clk_src); i++) {
+ if (priv->rtc_data->reserved_clk_mask & BIT(i))
+ return -EOPNOTSUPP;
+
+ priv->clk_src = devm_clk_get_enabled(dev, rtc_clk_src[i]);
+ if (!IS_ERR(priv->clk_src)) {
+ priv->clk_src_idx = i;
+ break;
+ }
+ }
+
+ if (IS_ERR(priv->clk_src))
+ return dev_err_probe(dev, PTR_ERR(priv->clk_src),
+ "Failed to get rtc module clock source\n");
+
+ return 0;
+}
+
+static int s32g_rtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rtc_priv *priv;
+ unsigned long rtc_hz;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->rtc_data = of_device_get_match_data(dev);
+ if (!priv->rtc_data)
+ return -ENODEV;
+
+ priv->rtc_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->rtc_base))
+ return PTR_ERR(priv->rtc_base);
+
+ device_init_wakeup(dev, true);
+
+ ret = rtc_clk_dts_setup(priv, dev);
+ if (ret)
+ return ret;
+
+ priv->rdev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(priv->rdev))
+ return PTR_ERR(priv->rdev);
+
+ ret = rtc_clk_src_setup(priv);
+ if (ret)
+ return ret;
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0) {
+ ret = priv->irq;
+ goto disable_rtc;
+ }
+
+ rtc_hz = clk_get_rate(priv->clk_src);
+ if (!rtc_hz) {
+ dev_err(dev, "Failed to get RTC frequency\n");
+ ret = -EINVAL;
+ goto disable_rtc;
+ }
+
+ priv->rtc_hz = DIV_ROUND_UP(rtc_hz, priv->rtc_data->clk_div);
+
+ platform_set_drvdata(pdev, priv);
+ priv->rdev->ops = &rtc_ops;
+
+ ret = devm_request_irq(dev, priv->irq,
+ s32g_rtc_handler, 0, dev_name(dev), pdev);
+ if (ret) {
+ dev_err(dev, "Request interrupt %d failed, error: %d\n",
+ priv->irq, ret);
+ goto disable_rtc;
+ }
+
+ ret = devm_rtc_register_device(priv->rdev);
+ if (ret)
+ goto disable_rtc;
+
+ return 0;
+
+disable_rtc:
+ s32g_rtc_disable(priv);
+ return ret;
+}
+
+static int s32g_rtc_suspend(struct device *dev)
+{
+ struct rtc_priv *priv = dev_get_drvdata(dev);
+ u32 apival = readl(priv->rtc_base + APIVAL_OFFSET);
+
+ if (check_add_overflow(priv->sleep_sec, div64_u64(apival, priv->rtc_hz),
+ &priv->sleep_sec)) {
+ dev_warn(dev, "Overflow on sleep cycles occurred. Resetting to 0.\n");
+ priv->sleep_sec = 0;
+ }
+
+ return 0;
+}
+
+static int s32g_rtc_resume(struct device *dev)
+{
+ struct rtc_priv *priv = dev_get_drvdata(dev);
+
+ /* The transition from resume to run is a reset event.
+ * This leads to the RTC registers being reset after resume from
+ * suspend. It is uncommon, but this behaviour has been observed
+ * on S32G RTC after issuing a Suspend to RAM operation.
+ * Thus, reconfigure RTC registers on the resume path.
+ */
+ return rtc_clk_src_setup(priv);
+}
+
+static const struct of_device_id rtc_dt_ids[] = {
+ { .compatible = "nxp,s32g2-rtc", .data = &rtc_s32g2_data },
+ { /* sentinel */ },
+};
+
+static DEFINE_SIMPLE_DEV_PM_OPS(s32g_rtc_pm_ops,
+ s32g_rtc_suspend, s32g_rtc_resume);
+
+static struct platform_driver s32g_rtc_driver = {
+ .driver = {
+ .name = "s32g-rtc",
+ .pm = pm_sleep_ptr(&s32g_rtc_pm_ops),
+ .of_match_table = rtc_dt_ids,
+ },
+ .probe = s32g_rtc_probe,
+};
+module_platform_driver(s32g_rtc_driver);
+
+MODULE_AUTHOR("NXP");
+MODULE_DESCRIPTION("NXP RTC driver for S32G2/S32G3");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 58c957eb753d..5dd575865adf 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -609,4 +609,3 @@ module_platform_driver(s3c_rtc_driver);
MODULE_DESCRIPTION("Samsung S3C RTC Driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c2410-rtc");
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 9ea40f40188f..f15ef3aa82a0 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -5,6 +5,7 @@
* Copyright (C) 2006 - 2009 Paul Mundt
* Copyright (C) 2006 Jamie Lenehan
* Copyright (C) 2008 Angelo Castello
+ * Copyright (C) 2025 Wolfram Sang, Renesas Electronics Corporation
*
* Based on the old arch/sh/kernel/cpu/rtc.c by:
*
@@ -31,7 +32,7 @@
/* Default values for RZ/A RTC */
#define rtc_reg_size sizeof(u16)
#define RTC_BIT_INVERTED 0 /* no chip bugs */
-#define RTC_CAP_4_DIGIT_YEAR (1 << 0)
+#define RTC_CAP_4_DIGIT_YEAR BIT(0)
#define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR
#endif
@@ -70,62 +71,35 @@
*/
/* ALARM Bits - or with BCD encoded value */
-#define AR_ENB 0x80 /* Enable for alarm cmp */
-
-/* Period Bits */
-#define PF_HP 0x100 /* Enable Half Period to support 8,32,128Hz */
-#define PF_COUNT 0x200 /* Half periodic counter */
-#define PF_OXS 0x400 /* Periodic One x Second */
-#define PF_KOU 0x800 /* Kernel or User periodic request 1=kernel */
-#define PF_MASK 0xf00
+#define AR_ENB BIT(7) /* Enable for alarm cmp */
/* RCR1 Bits */
-#define RCR1_CF 0x80 /* Carry Flag */
-#define RCR1_CIE 0x10 /* Carry Interrupt Enable */
-#define RCR1_AIE 0x08 /* Alarm Interrupt Enable */
-#define RCR1_AF 0x01 /* Alarm Flag */
+#define RCR1_CF BIT(7) /* Carry Flag */
+#define RCR1_CIE BIT(4) /* Carry Interrupt Enable */
+#define RCR1_AIE BIT(3) /* Alarm Interrupt Enable */
+#define RCR1_AF BIT(0) /* Alarm Flag */
/* RCR2 Bits */
-#define RCR2_PEF 0x80 /* PEriodic interrupt Flag */
-#define RCR2_PESMASK 0x70 /* Periodic interrupt Set */
-#define RCR2_RTCEN 0x08 /* ENable RTC */
-#define RCR2_ADJ 0x04 /* ADJustment (30-second) */
-#define RCR2_RESET 0x02 /* Reset bit */
-#define RCR2_START 0x01 /* Start bit */
+#define RCR2_RTCEN BIT(3) /* ENable RTC */
+#define RCR2_ADJ BIT(2) /* ADJustment (30-second) */
+#define RCR2_RESET BIT(1) /* Reset bit */
+#define RCR2_START BIT(0) /* Start bit */
struct sh_rtc {
void __iomem *regbase;
- unsigned long regsize;
- struct resource *res;
int alarm_irq;
- int periodic_irq;
- int carry_irq;
struct clk *clk;
struct rtc_device *rtc_dev;
- spinlock_t lock;
+ spinlock_t lock; /* protecting register access */
unsigned long capabilities; /* See asm/rtc.h for cap bits */
- unsigned short periodic_freq;
};
-static int __sh_rtc_interrupt(struct sh_rtc *rtc)
+static irqreturn_t sh_rtc_alarm(int irq, void *dev_id)
{
+ struct sh_rtc *rtc = dev_id;
unsigned int tmp, pending;
- tmp = readb(rtc->regbase + RCR1);
- pending = tmp & RCR1_CF;
- tmp &= ~RCR1_CF;
- writeb(tmp, rtc->regbase + RCR1);
-
- /* Users have requested One x Second IRQ */
- if (pending && rtc->periodic_freq & PF_OXS)
- rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF);
-
- return pending;
-}
-
-static int __sh_rtc_alarm(struct sh_rtc *rtc)
-{
- unsigned int tmp, pending;
+ spin_lock(&rtc->lock);
tmp = readb(rtc->regbase + RCR1);
pending = tmp & RCR1_AF;
@@ -135,84 +109,12 @@ static int __sh_rtc_alarm(struct sh_rtc *rtc)
if (pending)
rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
- return pending;
-}
-
-static int __sh_rtc_periodic(struct sh_rtc *rtc)
-{
- unsigned int tmp, pending;
-
- tmp = readb(rtc->regbase + RCR2);
- pending = tmp & RCR2_PEF;
- tmp &= ~RCR2_PEF;
- writeb(tmp, rtc->regbase + RCR2);
-
- if (!pending)
- return 0;
-
- /* Half period enabled than one skipped and the next notified */
- if ((rtc->periodic_freq & PF_HP) && (rtc->periodic_freq & PF_COUNT))
- rtc->periodic_freq &= ~PF_COUNT;
- else {
- if (rtc->periodic_freq & PF_HP)
- rtc->periodic_freq |= PF_COUNT;
- rtc_update_irq(rtc->rtc_dev, 1, RTC_PF | RTC_IRQF);
- }
-
- return pending;
-}
-
-static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id)
-{
- struct sh_rtc *rtc = dev_id;
- int ret;
-
- spin_lock(&rtc->lock);
- ret = __sh_rtc_interrupt(rtc);
- spin_unlock(&rtc->lock);
-
- return IRQ_RETVAL(ret);
-}
-
-static irqreturn_t sh_rtc_alarm(int irq, void *dev_id)
-{
- struct sh_rtc *rtc = dev_id;
- int ret;
-
- spin_lock(&rtc->lock);
- ret = __sh_rtc_alarm(rtc);
- spin_unlock(&rtc->lock);
-
- return IRQ_RETVAL(ret);
-}
-
-static irqreturn_t sh_rtc_periodic(int irq, void *dev_id)
-{
- struct sh_rtc *rtc = dev_id;
- int ret;
-
- spin_lock(&rtc->lock);
- ret = __sh_rtc_periodic(rtc);
- spin_unlock(&rtc->lock);
-
- return IRQ_RETVAL(ret);
-}
-
-static irqreturn_t sh_rtc_shared(int irq, void *dev_id)
-{
- struct sh_rtc *rtc = dev_id;
- int ret;
-
- spin_lock(&rtc->lock);
- ret = __sh_rtc_interrupt(rtc);
- ret |= __sh_rtc_alarm(rtc);
- ret |= __sh_rtc_periodic(rtc);
spin_unlock(&rtc->lock);
- return IRQ_RETVAL(ret);
+ return IRQ_RETVAL(pending);
}
-static inline void sh_rtc_setaie(struct device *dev, unsigned int enable)
+static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
{
struct sh_rtc *rtc = dev_get_drvdata(dev);
unsigned int tmp;
@@ -229,45 +131,7 @@ static inline void sh_rtc_setaie(struct device *dev, unsigned int enable)
writeb(tmp, rtc->regbase + RCR1);
spin_unlock_irq(&rtc->lock);
-}
-
-static int sh_rtc_proc(struct device *dev, struct seq_file *seq)
-{
- struct sh_rtc *rtc = dev_get_drvdata(dev);
- unsigned int tmp;
-
- tmp = readb(rtc->regbase + RCR1);
- seq_printf(seq, "carry_IRQ\t: %s\n", (tmp & RCR1_CIE) ? "yes" : "no");
-
- tmp = readb(rtc->regbase + RCR2);
- seq_printf(seq, "periodic_IRQ\t: %s\n",
- (tmp & RCR2_PESMASK) ? "yes" : "no");
-
- return 0;
-}
-
-static inline void sh_rtc_setcie(struct device *dev, unsigned int enable)
-{
- struct sh_rtc *rtc = dev_get_drvdata(dev);
- unsigned int tmp;
-
- spin_lock_irq(&rtc->lock);
-
- tmp = readb(rtc->regbase + RCR1);
-
- if (!enable)
- tmp &= ~RCR1_CIE;
- else
- tmp |= RCR1_CIE;
-
- writeb(tmp, rtc->regbase + RCR1);
-
- spin_unlock_irq(&rtc->lock);
-}
-static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
-{
- sh_rtc_setaie(dev, enabled);
return 0;
}
@@ -320,14 +184,8 @@ static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
tm->tm_sec--;
#endif
- /* only keep the carry interrupt enabled if UIE is on */
- if (!(rtc->periodic_freq & PF_OXS))
- sh_rtc_setcie(dev, 0);
-
- dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
- "mday=%d, mon=%d, year=%d, wday=%d\n",
- __func__,
- tm->tm_sec, tm->tm_min, tm->tm_hour,
+ dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
+ __func__, tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday);
return 0;
@@ -461,16 +319,17 @@ static const struct rtc_class_ops sh_rtc_ops = {
.set_time = sh_rtc_set_time,
.read_alarm = sh_rtc_read_alarm,
.set_alarm = sh_rtc_set_alarm,
- .proc = sh_rtc_proc,
.alarm_irq_enable = sh_rtc_alarm_irq_enable,
};
static int __init sh_rtc_probe(struct platform_device *pdev)
{
struct sh_rtc *rtc;
- struct resource *res;
+ struct resource *res, *req_res;
char clk_name[14];
int clk_id, ret;
+ unsigned int tmp;
+ resource_size_t regsize;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (unlikely(!rtc))
@@ -478,34 +337,32 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
spin_lock_init(&rtc->lock);
- /* get periodic/carry/alarm irqs */
ret = platform_get_irq(pdev, 0);
if (unlikely(ret <= 0)) {
dev_err(&pdev->dev, "No IRQ resource\n");
return -ENOENT;
}
- rtc->periodic_irq = ret;
- rtc->carry_irq = platform_get_irq(pdev, 1);
- rtc->alarm_irq = platform_get_irq(pdev, 2);
+ if (!pdev->dev.of_node)
+ rtc->alarm_irq = platform_get_irq(pdev, 2);
+ else
+ rtc->alarm_irq = ret;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(res == NULL)) {
+ if (!res) {
dev_err(&pdev->dev, "No IO resource\n");
return -ENOENT;
}
- rtc->regsize = resource_size(res);
-
- rtc->res = devm_request_mem_region(&pdev->dev, res->start,
- rtc->regsize, pdev->name);
- if (unlikely(!rtc->res))
+ regsize = resource_size(res);
+ req_res = devm_request_mem_region(&pdev->dev, res->start, regsize, pdev->name);
+ if (!req_res)
return -EBUSY;
- rtc->regbase = devm_ioremap(&pdev->dev, rtc->res->start, rtc->regsize);
- if (unlikely(!rtc->regbase))
+ rtc->regbase = devm_ioremap(&pdev->dev, req_res->start, regsize);
+ if (!rtc->regbase)
return -EINVAL;
if (!pdev->dev.of_node) {
@@ -515,8 +372,9 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
clk_id = 0;
snprintf(clk_name, sizeof(clk_name), "rtc%d", clk_id);
- } else
+ } else {
snprintf(clk_name, sizeof(clk_name), "fck");
+ }
rtc->clk = devm_clk_get(&pdev->dev, clk_name);
if (IS_ERR(rtc->clk)) {
@@ -550,51 +408,19 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
}
#endif
- if (rtc->carry_irq <= 0) {
- /* register shared periodic/carry/alarm irq */
- ret = devm_request_irq(&pdev->dev, rtc->periodic_irq,
- sh_rtc_shared, 0, "sh-rtc", rtc);
- if (unlikely(ret)) {
- dev_err(&pdev->dev,
- "request IRQ failed with %d, IRQ %d\n", ret,
- rtc->periodic_irq);
- goto err_unmap;
- }
- } else {
- /* register periodic/carry/alarm irqs */
- ret = devm_request_irq(&pdev->dev, rtc->periodic_irq,
- sh_rtc_periodic, 0, "sh-rtc period", rtc);
- if (unlikely(ret)) {
- dev_err(&pdev->dev,
- "request period IRQ failed with %d, IRQ %d\n",
- ret, rtc->periodic_irq);
- goto err_unmap;
- }
-
- ret = devm_request_irq(&pdev->dev, rtc->carry_irq,
- sh_rtc_interrupt, 0, "sh-rtc carry", rtc);
- if (unlikely(ret)) {
- dev_err(&pdev->dev,
- "request carry IRQ failed with %d, IRQ %d\n",
- ret, rtc->carry_irq);
- goto err_unmap;
- }
-
- ret = devm_request_irq(&pdev->dev, rtc->alarm_irq,
- sh_rtc_alarm, 0, "sh-rtc alarm", rtc);
- if (unlikely(ret)) {
- dev_err(&pdev->dev,
- "request alarm IRQ failed with %d, IRQ %d\n",
- ret, rtc->alarm_irq);
- goto err_unmap;
- }
+ ret = devm_request_irq(&pdev->dev, rtc->alarm_irq, sh_rtc_alarm, 0, "sh-rtc", rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "request alarm IRQ failed with %d, IRQ %d\n",
+ ret, rtc->alarm_irq);
+ goto err_unmap;
}
platform_set_drvdata(pdev, rtc);
/* everything disabled by default */
- sh_rtc_setaie(&pdev->dev, 0);
- sh_rtc_setcie(&pdev->dev, 0);
+ tmp = readb(rtc->regbase + RCR1);
+ tmp &= ~(RCR1_CIE | RCR1_AIE);
+ writeb(tmp, rtc->regbase + RCR1);
rtc->rtc_dev->ops = &sh_rtc_ops;
rtc->rtc_dev->max_user_freq = 256;
@@ -624,36 +450,27 @@ static void __exit sh_rtc_remove(struct platform_device *pdev)
{
struct sh_rtc *rtc = platform_get_drvdata(pdev);
- sh_rtc_setaie(&pdev->dev, 0);
- sh_rtc_setcie(&pdev->dev, 0);
+ sh_rtc_alarm_irq_enable(&pdev->dev, 0);
clk_disable(rtc->clk);
}
-static void sh_rtc_set_irq_wake(struct device *dev, int enabled)
+static int __maybe_unused sh_rtc_suspend(struct device *dev)
{
struct sh_rtc *rtc = dev_get_drvdata(dev);
- irq_set_irq_wake(rtc->periodic_irq, enabled);
-
- if (rtc->carry_irq > 0) {
- irq_set_irq_wake(rtc->carry_irq, enabled);
- irq_set_irq_wake(rtc->alarm_irq, enabled);
- }
-}
-
-static int __maybe_unused sh_rtc_suspend(struct device *dev)
-{
if (device_may_wakeup(dev))
- sh_rtc_set_irq_wake(dev, 1);
+ irq_set_irq_wake(rtc->alarm_irq, 1);
return 0;
}
static int __maybe_unused sh_rtc_resume(struct device *dev)
{
+ struct sh_rtc *rtc = dev_get_drvdata(dev);
+
if (device_may_wakeup(dev))
- sh_rtc_set_irq_wake(dev, 0);
+ irq_set_irq_wake(rtc->alarm_irq, 0);
return 0;
}
@@ -684,8 +501,8 @@ static struct platform_driver sh_rtc_platform_driver __refdata = {
module_platform_driver_probe(sh_rtc_platform_driver, sh_rtc_probe);
MODULE_DESCRIPTION("SuperH on-chip RTC driver");
-MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, "
- "Jamie Lenehan <lenehan@twibble.org>, "
- "Angelo Castello <angelo.castello@st.com>");
+MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
+MODULE_AUTHOR("Jamie Lenehan <lenehan@twibble.org>");
+MODULE_AUTHOR("Angelo Castello <angelo.castello@st.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index 1b715db47160..ef8fb88aab48 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -1283,7 +1283,6 @@ static struct platform_driver stm32_rtc_driver = {
module_platform_driver(stm32_rtc_driver);
-MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@st.com>");
MODULE_DESCRIPTION("STMicroelectronics STM32 Real Time Clock driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index a9f5b9466fb5..94f995febe5b 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -107,7 +107,7 @@ static const struct rtc_class_ops test_rtc_ops = {
static void test_rtc_alarm_handler(struct timer_list *t)
{
- struct rtc_test_data *rtd = from_timer(rtd, t, alarm);
+ struct rtc_test_data *rtd = timer_container_of(rtd, t, alarm);
rtc_update_irq(rtd->rtc, 1, RTC_AF | RTC_IRQF);
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index cf36d3bafeca..b16efecfde4b 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1493,7 +1493,7 @@ static void dasd_device_timeout(struct timer_list *t)
unsigned long flags;
struct dasd_device *device;
- device = from_timer(device, t, timer);
+ device = timer_container_of(device, t, timer);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
/* re-activate request queue */
dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
@@ -2677,7 +2677,7 @@ static void dasd_block_timeout(struct timer_list *t)
unsigned long flags;
struct dasd_block *block;
- block = from_timer(block, t, timer);
+ block = timer_container_of(block, t, timer);
spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
/* re-activate request queue */
dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 7248e547fefb..cdc7b2f16b88 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -314,7 +314,7 @@ dcssblk_load_segment(char *name, struct segment_info **seg_info)
if (*seg_info == NULL)
return -ENOMEM;
- strcpy((*seg_info)->segment_name, name);
+ strscpy((*seg_info)->segment_name, name);
/* load the segment */
rc = segment_load(name, SEGMENT_SHARED,
@@ -612,7 +612,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
rc = -ENOMEM;
goto out;
}
- strcpy(dev_info->segment_name, local_buf);
+ strscpy(dev_info->segment_name, local_buf);
dev_info->segment_type = seg_info->segment_type;
INIT_LIST_HEAD(&dev_info->seg_list);
}
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 6a61c0a595d9..56e43d43c713 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -284,7 +284,7 @@ static void raw3215_start_io(struct raw3215_info *raw)
*/
static void raw3215_timeout(struct timer_list *t)
{
- struct raw3215_info *raw = from_timer(raw, t, timer);
+ struct raw3215_info *raw = timer_container_of(raw, t, timer);
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 34f3820d7f74..b78b86e8f281 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -102,6 +102,7 @@ struct tty3270 {
/* Input stuff. */
char *prompt; /* Output string for input area. */
+ size_t prompt_sz; /* Size of output string. */
char *input; /* Input string for read request. */
struct raw3270_request *read; /* Single read request. */
struct raw3270_request *kreset; /* Single keyboard reset request. */
@@ -206,7 +207,7 @@ static int tty3270_input_size(int cols)
static void tty3270_update_prompt(struct tty3270 *tp, char *input)
{
- strcpy(tp->prompt, input);
+ strscpy(tp->prompt, input, tp->prompt_sz);
tp->update_flags |= TTY_UPDATE_INPUT;
tty3270_set_timer(tp, 1);
}
@@ -524,7 +525,7 @@ static void tty3270_update_lines_all(struct tty3270 *tp, struct raw3270_request
*/
static void tty3270_update(struct timer_list *t)
{
- struct tty3270 *tp = from_timer(tp, t, timer);
+ struct tty3270 *tp = timer_container_of(tp, t, timer);
struct raw3270_request *wrq;
u8 cmd = TC_WRITE;
int rc, len;
@@ -971,6 +972,7 @@ static void tty3270_resize(struct raw3270_view *view,
char *old_input, *new_input;
struct tty_struct *tty;
struct winsize ws;
+ size_t prompt_sz;
int new_allocated, old_allocated = tp->allocated_lines;
if (old_model == new_model &&
@@ -982,10 +984,11 @@ static void tty3270_resize(struct raw3270_view *view,
return;
}
- new_input = kzalloc(tty3270_input_size(new_cols), GFP_KERNEL | GFP_DMA);
+ prompt_sz = tty3270_input_size(new_cols);
+ new_input = kzalloc(prompt_sz, GFP_KERNEL | GFP_DMA);
if (!new_input)
return;
- new_prompt = kzalloc(tty3270_input_size(new_cols), GFP_KERNEL);
+ new_prompt = kzalloc(prompt_sz, GFP_KERNEL);
if (!new_prompt)
goto out_input;
screen = tty3270_alloc_screen(tp, new_rows, new_cols, &new_allocated);
@@ -1010,6 +1013,7 @@ static void tty3270_resize(struct raw3270_view *view,
old_rcl_lines = tp->rcl_lines;
tp->input = new_input;
tp->prompt = new_prompt;
+ tp->prompt_sz = prompt_sz;
tp->rcl_lines = new_rcl_lines;
tp->rcl_read_index = 0;
tp->rcl_write_index = 0;
@@ -1096,6 +1100,7 @@ static int
tty3270_create_view(int index, struct tty3270 **newtp)
{
struct tty3270 *tp;
+ size_t prompt_sz;
int rc;
if (tty3270_max_index < index + 1)
@@ -1125,17 +1130,19 @@ tty3270_create_view(int index, struct tty3270 **newtp)
goto out_free_screen;
}
- tp->input = kzalloc(tty3270_input_size(tp->view.cols), GFP_KERNEL | GFP_DMA);
+ prompt_sz = tty3270_input_size(tp->view.cols);
+ tp->input = kzalloc(prompt_sz, GFP_KERNEL | GFP_DMA);
if (!tp->input) {
rc = -ENOMEM;
goto out_free_converted_line;
}
- tp->prompt = kzalloc(tty3270_input_size(tp->view.cols), GFP_KERNEL);
+ tp->prompt = kzalloc(prompt_sz, GFP_KERNEL);
if (!tp->prompt) {
rc = -ENOMEM;
goto out_free_input;
}
+ tp->prompt_sz = prompt_sz;
tp->rcl_lines = tty3270_alloc_recall(tp->view.cols);
if (!tp->rcl_lines) {
diff --git a/drivers/s390/char/diag_ftp.c b/drivers/s390/char/diag_ftp.c
index 711f6982438e..f41b39c9d267 100644
--- a/drivers/s390/char/diag_ftp.c
+++ b/drivers/s390/char/diag_ftp.c
@@ -159,7 +159,7 @@ ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize)
goto out;
}
- len = strscpy(ldfpl->fident, ftp->fname, sizeof(ldfpl->fident));
+ len = strscpy(ldfpl->fident, ftp->fname);
if (len < 0) {
len = -EINVAL;
goto out_free;
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 48e8417a5cff..fa063e84eafc 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -821,7 +821,7 @@ tape_delayed_next_request(struct work_struct *work)
static void tape_long_busy_timeout(struct timer_list *t)
{
- struct tape_device *device = from_timer(device, t, lb_timeout);
+ struct tape_device *device = timer_container_of(device, t, lb_timeout);
struct tape_request *request;
spin_lock_irq(get_ccwdev_lock(device->cdev));
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index b76038632883..7ff177406bc3 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -35,7 +35,8 @@
static void
tape_std_assign_timeout(struct timer_list *t)
{
- struct tape_request * request = from_timer(request, t, timer);
+ struct tape_request * request = timer_container_of(request, t,
+ timer);
struct tape_device * device = request->device;
int rc;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index dac85294d2f5..e284eea331d7 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -255,7 +255,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
/*
* The recording commands needs to be called with option QID
- * for guests that have previlege classes A or B.
+ * for guests that have privilege classes A or B.
* Purging has to be done as separate step, because recording
* can't be switched on as long as records are on the queue.
* Doing both at the same time doesn't work.
@@ -557,7 +557,7 @@ static ssize_t vmlogrdr_purge_store(struct device * dev,
/*
* The recording command needs to be called with option QID
- * for guests that have previlege classes A or B.
+ * for guests that have privilege classes A or B.
* Other guests will not recognize the command and we have to
* issue the same command without the QID parameter.
*/
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index e1b1fbdabb1b..e849d3271b0e 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -98,7 +98,7 @@ static void ccw_timeout_log(struct ccw_device *cdev)
void
ccw_device_timeout(struct timer_list *t)
{
- struct ccw_device_private *priv = from_timer(priv, t, timer);
+ struct ccw_device_private *priv = timer_container_of(priv, t, timer);
struct ccw_device *cdev = priv->cdev;
spin_lock_irq(cdev->ccwlock);
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index ac382355dc04..37ea30be710c 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -98,7 +98,7 @@ static int eadm_subchannel_clear(struct subchannel *sch)
static void eadm_subchannel_timeout(struct timer_list *t)
{
- struct eadm_private *private = from_timer(private, t, timer);
+ struct eadm_private *private = timer_container_of(private, t, timer);
struct subchannel *sch = private->sch;
spin_lock_irq(&sch->lock);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 1564cd7e3f59..bd95bd390b5c 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -41,6 +41,7 @@
#include <linux/module.h>
#include <asm/uv.h>
#include <asm/chsc.h>
+#include <linux/mempool.h>
#include "ap_bus.h"
#include "ap_debug.h"
@@ -103,6 +104,27 @@ static struct ap_config_info *const ap_qci_info_old = &qci[1];
debug_info_t *ap_dbf_info;
/*
+ * There is a need for a do-not-allocate-memory path through the AP bus
+ * layer. The pkey layer may be triggered via the in-kernel interface from
+ * a protected key crypto algorithm (namely PAES) to convert a secure key
+ * into a protected key. This happens in a workqueue context, so sleeping
+ * is allowed but memory allocations causing IO operations are not permitted.
+ * To accomplish this, an AP message memory pool with pre-allocated space
+ * is established. When ap_init_apmsg() with use_mempool set to true is
+ * called, instead of kmalloc() the ap message buffer is allocated from
+ * the ap_msg_pool. This pool only holds a limited amount of buffers:
+ * ap_msg_pool_min_items with the item size AP_DEFAULT_MAX_MSG_SIZE and
+ * exactly one of these items (if available) is returned if ap_init_apmsg()
+ * with the use_mempool arg set to true is called. When this pool is exhausted
+ * and use_mempool is set true, ap_init_apmsg() returns -ENOMEM without
+ * any attempt to allocate memory and the caller has to deal with that.
+ */
+static mempool_t *ap_msg_pool;
+static unsigned int ap_msg_pool_min_items = 8;
+module_param_named(msgpool_min_items, ap_msg_pool_min_items, uint, 0440);
+MODULE_PARM_DESC(msgpool_min_items, "AP message pool minimal items");
+
+/*
* AP bus rescan related things.
*/
static bool ap_scan_bus(void);
@@ -406,7 +428,7 @@ void ap_wait(enum ap_sm_wait wait)
*/
void ap_request_timeout(struct timer_list *t)
{
- struct ap_queue *aq = from_timer(aq, t, timeout);
+ struct ap_queue *aq = timer_container_of(aq, t, timeout);
spin_lock_bh(&aq->lock);
ap_wait(ap_sm_event(aq, AP_SM_EVENT_TIMEOUT));
@@ -547,6 +569,48 @@ static void ap_poll_thread_stop(void)
#define is_card_dev(x) ((x)->parent == ap_root_device)
#define is_queue_dev(x) ((x)->parent != ap_root_device)
+/*
+ * ap_init_apmsg() - Initialize ap_message.
+ */
+int ap_init_apmsg(struct ap_message *ap_msg, u32 flags)
+{
+ unsigned int maxmsgsize;
+
+ memset(ap_msg, 0, sizeof(*ap_msg));
+ ap_msg->flags = flags;
+
+ if (flags & AP_MSG_FLAG_MEMPOOL) {
+ ap_msg->msg = mempool_alloc_preallocated(ap_msg_pool);
+ if (!ap_msg->msg)
+ return -ENOMEM;
+ ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE;
+ return 0;
+ }
+
+ maxmsgsize = atomic_read(&ap_max_msg_size);
+ ap_msg->msg = kmalloc(maxmsgsize, GFP_KERNEL);
+ if (!ap_msg->msg)
+ return -ENOMEM;
+ ap_msg->bufsize = maxmsgsize;
+
+ return 0;
+}
+EXPORT_SYMBOL(ap_init_apmsg);
+
+/*
+ * ap_release_apmsg() - Release ap_message.
+ */
+void ap_release_apmsg(struct ap_message *ap_msg)
+{
+ if (ap_msg->flags & AP_MSG_FLAG_MEMPOOL) {
+ memzero_explicit(ap_msg->msg, ap_msg->bufsize);
+ mempool_free(ap_msg->msg, ap_msg_pool);
+ } else {
+ kfree_sensitive(ap_msg->msg);
+ }
+}
+EXPORT_SYMBOL(ap_release_apmsg);
+
/**
* ap_bus_match()
* @dev: Pointer to device
@@ -2431,6 +2495,14 @@ static int __init ap_module_init(void)
/* init ap_queue hashtable */
hash_init(ap_queues);
+ /* create ap msg buffer memory pool */
+ ap_msg_pool = mempool_create_kmalloc_pool(ap_msg_pool_min_items,
+ AP_DEFAULT_MAX_MSG_SIZE);
+ if (!ap_msg_pool) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
/* set up the AP permissions (ioctls, ap and aq masks) */
ap_perms_init();
@@ -2477,6 +2549,7 @@ out_device:
out_bus:
bus_unregister(&ap_bus_type);
out:
+ mempool_destroy(ap_msg_pool);
ap_debug_exit();
return rc;
}
@@ -2487,6 +2560,7 @@ static void __exit ap_module_exit(void)
ap_irq_exit();
root_device_unregister(ap_root_device);
bus_unregister(&ap_bus_type);
+ mempool_destroy(ap_msg_pool);
ap_debug_exit();
}
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index f4622ee4d894..88b625ba1978 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -214,6 +214,11 @@ struct ap_queue {
typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue);
+struct ap_response_type {
+ struct completion work;
+ int type;
+};
+
struct ap_message {
struct list_head list; /* Request queueing. */
unsigned long psmid; /* Message id. */
@@ -222,7 +227,7 @@ struct ap_message {
size_t bufsize; /* allocated msg buffer size */
u16 flags; /* Flags, see AP_MSG_FLAG_xxx */
int rc; /* Return code for this message */
- void *private; /* ap driver private pointer. */
+ struct ap_response_type response;
/* receive is called from tasklet context */
void (*receive)(struct ap_queue *, struct ap_message *,
struct ap_message *);
@@ -231,27 +236,10 @@ struct ap_message {
#define AP_MSG_FLAG_SPECIAL 0x0001 /* flag msg as 'special' with NQAP */
#define AP_MSG_FLAG_USAGE 0x0002 /* CCA, EP11: usage (no admin) msg */
#define AP_MSG_FLAG_ADMIN 0x0004 /* CCA, EP11: admin (=control) msg */
+#define AP_MSG_FLAG_MEMPOOL 0x0008 /* ap msg buffer allocated via mempool */
-/**
- * ap_init_message() - Initialize ap_message.
- * Initialize a message before using. Otherwise this might result in
- * unexpected behaviour.
- */
-static inline void ap_init_message(struct ap_message *ap_msg)
-{
- memset(ap_msg, 0, sizeof(*ap_msg));
-}
-
-/**
- * ap_release_message() - Release ap_message.
- * Releases all memory used internal within the ap_message struct
- * Currently this is the message and private field.
- */
-static inline void ap_release_message(struct ap_message *ap_msg)
-{
- kfree_sensitive(ap_msg->msg);
- kfree_sensitive(ap_msg->private);
-}
+int ap_init_apmsg(struct ap_message *ap_msg, u32 flags);
+void ap_release_apmsg(struct ap_message *ap_msg);
enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event);
enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 3a39e167bdbf..cef60770f68b 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -24,7 +24,8 @@
*/
static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
const u8 *key, size_t keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags)
{
int rc;
@@ -32,14 +33,14 @@ static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
rc = pkey_handler_key_to_protkey(apqns, nr_apqns,
key, keylen,
protkey, protkeylen,
- protkeytype);
+ protkeytype, xflags);
/* if this did not work, try the slowpath way */
if (rc == -ENODEV) {
rc = pkey_handler_slowpath_key_to_protkey(apqns, nr_apqns,
key, keylen,
protkey, protkeylen,
- protkeytype);
+ protkeytype, xflags);
if (rc)
rc = -ENODEV;
}
@@ -52,16 +53,16 @@ static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
* In-Kernel function: Transform a key blob (of any type) into a protected key
*/
int pkey_key2protkey(const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags)
{
int rc;
rc = key2protkey(NULL, 0, key, keylen,
- protkey, protkeylen, protkeytype);
+ protkey, protkeylen, protkeytype, xflags);
if (rc == -ENODEV) {
pkey_handler_request_modules();
rc = key2protkey(NULL, 0, key, keylen,
- protkey, protkeylen, protkeytype);
+ protkey, protkeylen, protkeytype, xflags);
}
return rc;
@@ -103,7 +104,7 @@ static int pkey_ioctl_genseck(struct pkey_genseck __user *ugs)
keybuflen = sizeof(kgs.seckey.seckey);
rc = pkey_handler_gen_key(&apqn, 1,
kgs.keytype, PKEY_TYPE_CCA_DATA, 0, 0,
- kgs.seckey.seckey, &keybuflen, NULL);
+ kgs.seckey.seckey, &keybuflen, NULL, 0);
pr_debug("gen_key()=%d\n", rc);
if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs)))
rc = -EFAULT;
@@ -129,7 +130,7 @@ static int pkey_ioctl_clr2seck(struct pkey_clr2seck __user *ucs)
kcs.keytype, PKEY_TYPE_CCA_DATA, 0, 0,
kcs.clrkey.clrkey,
pkey_keytype_aes_to_size(kcs.keytype),
- kcs.seckey.seckey, &keybuflen, NULL);
+ kcs.seckey.seckey, &keybuflen, NULL, 0);
pr_debug("clr_to_key()=%d\n", rc);
if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs)))
rc = -EFAULT;
@@ -154,7 +155,8 @@ static int pkey_ioctl_sec2protk(struct pkey_sec2protk __user *usp)
ksp.seckey.seckey,
sizeof(ksp.seckey.seckey),
ksp.protkey.protkey,
- &ksp.protkey.len, &ksp.protkey.type);
+ &ksp.protkey.len, &ksp.protkey.type,
+ 0);
pr_debug("key_to_protkey()=%d\n", rc);
if (!rc && copy_to_user(usp, &ksp, sizeof(ksp)))
rc = -EFAULT;
@@ -198,7 +200,7 @@ static int pkey_ioctl_clr2protk(struct pkey_clr2protk __user *ucp)
rc = key2protkey(NULL, 0,
tmpbuf, sizeof(*t) + keylen,
kcp.protkey.protkey,
- &kcp.protkey.len, &kcp.protkey.type);
+ &kcp.protkey.len, &kcp.protkey.type, 0);
pr_debug("key2protkey()=%d\n", rc);
kfree_sensitive(tmpbuf);
@@ -228,12 +230,12 @@ static int pkey_ioctl_findcard(struct pkey_findcard __user *ufc)
rc = pkey_handler_apqns_for_key(kfc.seckey.seckey,
sizeof(kfc.seckey.seckey),
PKEY_FLAGS_MATCH_CUR_MKVP,
- apqns, &nr_apqns);
+ apqns, &nr_apqns, 0);
if (rc == -ENODEV)
rc = pkey_handler_apqns_for_key(kfc.seckey.seckey,
sizeof(kfc.seckey.seckey),
PKEY_FLAGS_MATCH_ALT_MKVP,
- apqns, &nr_apqns);
+ apqns, &nr_apqns, 0);
pr_debug("apqns_for_key()=%d\n", rc);
if (rc) {
kfree(apqns);
@@ -262,7 +264,7 @@ static int pkey_ioctl_skey2pkey(struct pkey_skey2pkey __user *usp)
sizeof(ksp.seckey.seckey),
ksp.protkey.protkey,
&ksp.protkey.len,
- &ksp.protkey.type);
+ &ksp.protkey.type, 0);
pr_debug("key_to_protkey()=%d\n", rc);
if (!rc && copy_to_user(usp, &ksp, sizeof(ksp)))
rc = -EFAULT;
@@ -285,7 +287,7 @@ static int pkey_ioctl_verifykey(struct pkey_verifykey __user *uvk)
rc = pkey_handler_verify_key(kvk.seckey.seckey,
sizeof(kvk.seckey.seckey),
&kvk.cardnr, &kvk.domain,
- &keytype, &keybitsize, &flags);
+ &keytype, &keybitsize, &flags, 0);
pr_debug("verify_key()=%d\n", rc);
if (!rc && keytype != PKEY_TYPE_CCA_DATA)
rc = -EINVAL;
@@ -312,7 +314,7 @@ static int pkey_ioctl_genprotk(struct pkey_genprotk __user *ugp)
rc = pkey_handler_gen_key(NULL, 0, kgp.keytype,
PKEY_TYPE_PROTKEY, 0, 0,
kgp.protkey.protkey, &kgp.protkey.len,
- &kgp.protkey.type);
+ &kgp.protkey.type, 0);
pr_debug("gen_key()=%d\n", rc);
if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp)))
rc = -EFAULT;
@@ -354,7 +356,7 @@ static int pkey_ioctl_verifyprotk(struct pkey_verifyprotk __user *uvp)
memcpy(t->protkey, kvp.protkey.protkey, kvp.protkey.len);
rc = pkey_handler_verify_key(tmpbuf, sizeof(*t),
- NULL, NULL, NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL, NULL, 0);
pr_debug("verify_key()=%d\n", rc);
kfree_sensitive(tmpbuf);
@@ -377,7 +379,7 @@ static int pkey_ioctl_kblob2protk(struct pkey_kblob2pkey __user *utp)
ktp.protkey.len = sizeof(ktp.protkey.protkey);
rc = key2protkey(NULL, 0, kkey, ktp.keylen,
ktp.protkey.protkey, &ktp.protkey.len,
- &ktp.protkey.type);
+ &ktp.protkey.type, 0);
pr_debug("key2protkey()=%d\n", rc);
kfree_sensitive(kkey);
if (!rc && copy_to_user(utp, &ktp, sizeof(ktp)))
@@ -414,7 +416,7 @@ static int pkey_ioctl_genseck2(struct pkey_genseck2 __user *ugs)
}
rc = pkey_handler_gen_key(apqns, kgs.apqn_entries,
u, kgs.type, kgs.size, kgs.keygenflags,
- kkey, &klen, NULL);
+ kkey, &klen, NULL, 0);
pr_debug("gen_key()=%d\n", rc);
kfree(apqns);
if (rc) {
@@ -471,7 +473,7 @@ static int pkey_ioctl_clr2seck2(struct pkey_clr2seck2 __user *ucs)
rc = pkey_handler_clr_to_key(apqns, kcs.apqn_entries,
u, kcs.type, kcs.size, kcs.keygenflags,
kcs.clrkey.clrkey, kcs.size / 8,
- kkey, &klen, NULL);
+ kkey, &klen, NULL, 0);
pr_debug("clr_to_key()=%d\n", rc);
kfree(apqns);
if (rc) {
@@ -514,7 +516,7 @@ static int pkey_ioctl_verifykey2(struct pkey_verifykey2 __user *uvk)
rc = pkey_handler_verify_key(kkey, kvk.keylen,
&kvk.cardnr, &kvk.domain,
- &kvk.type, &kvk.size, &kvk.flags);
+ &kvk.type, &kvk.size, &kvk.flags, 0);
pr_debug("verify_key()=%d\n", rc);
kfree_sensitive(kkey);
@@ -544,7 +546,7 @@ static int pkey_ioctl_kblob2protk2(struct pkey_kblob2pkey2 __user *utp)
ktp.protkey.len = sizeof(ktp.protkey.protkey);
rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen,
ktp.protkey.protkey, &ktp.protkey.len,
- &ktp.protkey.type);
+ &ktp.protkey.type, 0);
pr_debug("key2protkey()=%d\n", rc);
kfree(apqns);
kfree_sensitive(kkey);
@@ -579,7 +581,7 @@ static int pkey_ioctl_apqns4k(struct pkey_apqns4key __user *uak)
return PTR_ERR(kkey);
}
rc = pkey_handler_apqns_for_key(kkey, kak.keylen, kak.flags,
- apqns, &nr_apqns);
+ apqns, &nr_apqns, 0);
pr_debug("apqns_for_key()=%d\n", rc);
kfree_sensitive(kkey);
if (rc && rc != -ENOSPC) {
@@ -626,7 +628,7 @@ static int pkey_ioctl_apqns4kt(struct pkey_apqns4keytype __user *uat)
}
rc = pkey_handler_apqns_for_keytype(kat.type,
kat.cur_mkvp, kat.alt_mkvp,
- kat.flags, apqns, &nr_apqns);
+ kat.flags, apqns, &nr_apqns, 0);
pr_debug("apqns_for_keytype()=%d\n", rc);
if (rc && rc != -ENOSPC) {
kfree(apqns);
@@ -678,7 +680,7 @@ static int pkey_ioctl_kblob2protk3(struct pkey_kblob2pkey3 __user *utp)
return -ENOMEM;
}
rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen,
- protkey, &protkeylen, &ktp.pkeytype);
+ protkey, &protkeylen, &ktp.pkeytype, 0);
pr_debug("key2protkey()=%d\n", rc);
kfree(apqns);
kfree_sensitive(kkey);
diff --git a/drivers/s390/crypto/pkey_base.c b/drivers/s390/crypto/pkey_base.c
index 64a376501d26..9e6f319acc63 100644
--- a/drivers/s390/crypto/pkey_base.c
+++ b/drivers/s390/crypto/pkey_base.c
@@ -150,7 +150,8 @@ EXPORT_SYMBOL(pkey_handler_put);
int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags)
{
const struct pkey_handler *h;
int rc = -ENODEV;
@@ -159,7 +160,7 @@ int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
if (h && h->key_to_protkey) {
rc = h->key_to_protkey(apqns, nr_apqns, key, keylen,
protkey, protkeylen,
- protkeytype);
+ protkeytype, xflags);
}
pkey_handler_put(h);
@@ -177,7 +178,7 @@ int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns,
size_t nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen,
- u32 *protkeytype)
+ u32 *protkeytype, u32 xflags)
{
const struct pkey_handler *h, *htmp[10];
int i, n = 0, rc = -ENODEV;
@@ -199,7 +200,7 @@ int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns,
rc = h->slowpath_key_to_protkey(apqns, nr_apqns,
key, keylen,
protkey, protkeylen,
- protkeytype);
+ protkeytype, xflags);
module_put(h->module);
}
@@ -210,7 +211,7 @@ EXPORT_SYMBOL(pkey_handler_slowpath_key_to_protkey);
int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 keysubtype,
u32 keybitsize, u32 flags,
- u8 *keybuf, u32 *keybuflen, u32 *keyinfo)
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags)
{
const struct pkey_handler *h;
int rc = -ENODEV;
@@ -219,7 +220,7 @@ int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
if (h && h->gen_key) {
rc = h->gen_key(apqns, nr_apqns, keytype, keysubtype,
keybitsize, flags,
- keybuf, keybuflen, keyinfo);
+ keybuf, keybuflen, keyinfo, xflags);
}
pkey_handler_put(h);
@@ -231,7 +232,8 @@ int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 keysubtype,
u32 keybitsize, u32 flags,
const u8 *clrkey, u32 clrkeylen,
- u8 *keybuf, u32 *keybuflen, u32 *keyinfo)
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo,
+ u32 xflags)
{
const struct pkey_handler *h;
int rc = -ENODEV;
@@ -240,7 +242,7 @@ int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns,
if (h && h->clr_to_key) {
rc = h->clr_to_key(apqns, nr_apqns, keytype, keysubtype,
keybitsize, flags, clrkey, clrkeylen,
- keybuf, keybuflen, keyinfo);
+ keybuf, keybuflen, keyinfo, xflags);
}
pkey_handler_put(h);
@@ -250,7 +252,8 @@ EXPORT_SYMBOL(pkey_handler_clr_to_key);
int pkey_handler_verify_key(const u8 *key, u32 keylen,
u16 *card, u16 *dom,
- u32 *keytype, u32 *keybitsize, u32 *flags)
+ u32 *keytype, u32 *keybitsize, u32 *flags,
+ u32 xflags)
{
const struct pkey_handler *h;
int rc = -ENODEV;
@@ -258,7 +261,7 @@ int pkey_handler_verify_key(const u8 *key, u32 keylen,
h = pkey_handler_get_keybased(key, keylen);
if (h && h->verify_key) {
rc = h->verify_key(key, keylen, card, dom,
- keytype, keybitsize, flags);
+ keytype, keybitsize, flags, xflags);
}
pkey_handler_put(h);
@@ -267,14 +270,16 @@ int pkey_handler_verify_key(const u8 *key, u32 keylen,
EXPORT_SYMBOL(pkey_handler_verify_key);
int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags,
- struct pkey_apqn *apqns, size_t *nr_apqns)
+ struct pkey_apqn *apqns, size_t *nr_apqns,
+ u32 xflags)
{
const struct pkey_handler *h;
int rc = -ENODEV;
h = pkey_handler_get_keybased(key, keylen);
if (h && h->apqns_for_key)
- rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns);
+ rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns,
+ xflags);
pkey_handler_put(h);
return rc;
@@ -283,7 +288,8 @@ EXPORT_SYMBOL(pkey_handler_apqns_for_key);
int pkey_handler_apqns_for_keytype(enum pkey_key_type keysubtype,
u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
- struct pkey_apqn *apqns, size_t *nr_apqns)
+ struct pkey_apqn *apqns, size_t *nr_apqns,
+ u32 xflags)
{
const struct pkey_handler *h;
int rc = -ENODEV;
@@ -292,7 +298,7 @@ int pkey_handler_apqns_for_keytype(enum pkey_key_type keysubtype,
if (h && h->apqns_for_keytype) {
rc = h->apqns_for_keytype(keysubtype,
cur_mkvp, alt_mkvp, flags,
- apqns, nr_apqns);
+ apqns, nr_apqns, xflags);
}
pkey_handler_put(h);
diff --git a/drivers/s390/crypto/pkey_base.h b/drivers/s390/crypto/pkey_base.h
index 7347647dfaa7..9cdb3e74477f 100644
--- a/drivers/s390/crypto/pkey_base.h
+++ b/drivers/s390/crypto/pkey_base.h
@@ -159,29 +159,33 @@ struct pkey_handler {
bool (*is_supported_keytype)(enum pkey_key_type);
int (*key_to_protkey)(const struct pkey_apqn *apqns, size_t nr_apqns,
const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags);
int (*slowpath_key_to_protkey)(const struct pkey_apqn *apqns,
size_t nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen,
- u32 *protkeytype);
+ u32 *protkeytype, u32 xflags);
int (*gen_key)(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 keysubtype,
u32 keybitsize, u32 flags,
- u8 *keybuf, u32 *keybuflen, u32 *keyinfo);
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags);
int (*clr_to_key)(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 keysubtype,
u32 keybitsize, u32 flags,
const u8 *clrkey, u32 clrkeylen,
- u8 *keybuf, u32 *keybuflen, u32 *keyinfo);
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags);
int (*verify_key)(const u8 *key, u32 keylen,
u16 *card, u16 *dom,
- u32 *keytype, u32 *keybitsize, u32 *flags);
+ u32 *keytype, u32 *keybitsize, u32 *flags,
+ u32 xflags);
int (*apqns_for_key)(const u8 *key, u32 keylen, u32 flags,
- struct pkey_apqn *apqns, size_t *nr_apqns);
+ struct pkey_apqn *apqns, size_t *nr_apqns,
+ u32 xflags);
int (*apqns_for_keytype)(enum pkey_key_type ktype,
u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
- struct pkey_apqn *apqns, size_t *nr_apqns);
+ struct pkey_apqn *apqns, size_t *nr_apqns,
+ u32 xflags);
/* used internal by pkey base */
struct list_head list;
};
@@ -199,29 +203,34 @@ void pkey_handler_put(const struct pkey_handler *handler);
int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags);
int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns,
size_t nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen,
- u32 *protkeytype);
+ u32 *protkeytype, u32 xflags);
int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 keysubtype,
u32 keybitsize, u32 flags,
- u8 *keybuf, u32 *keybuflen, u32 *keyinfo);
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags);
int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 keysubtype,
u32 keybitsize, u32 flags,
const u8 *clrkey, u32 clrkeylen,
- u8 *keybuf, u32 *keybuflen, u32 *keyinfo);
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo,
+ u32 xflags);
int pkey_handler_verify_key(const u8 *key, u32 keylen,
u16 *card, u16 *dom,
- u32 *keytype, u32 *keybitsize, u32 *flags);
+ u32 *keytype, u32 *keybitsize, u32 *flags,
+ u32 xflags);
int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags,
- struct pkey_apqn *apqns, size_t *nr_apqns);
+ struct pkey_apqn *apqns, size_t *nr_apqns,
+ u32 xflags);
int pkey_handler_apqns_for_keytype(enum pkey_key_type ktype,
u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
- struct pkey_apqn *apqns, size_t *nr_apqns);
+ struct pkey_apqn *apqns, size_t *nr_apqns,
+ u32 xflags);
/*
* Unconditional try to load all handler modules
diff --git a/drivers/s390/crypto/pkey_cca.c b/drivers/s390/crypto/pkey_cca.c
index cda22db31f6c..6c7897a93f27 100644
--- a/drivers/s390/crypto/pkey_cca.c
+++ b/drivers/s390/crypto/pkey_cca.c
@@ -70,12 +70,15 @@ static bool is_cca_keytype(enum pkey_key_type key_type)
}
static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags,
- struct pkey_apqn *apqns, size_t *nr_apqns)
+ struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
- u32 _nr_apqns, *_apqns = NULL;
+ u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns);
+ u32 xflags;
int rc;
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
if (!flags)
flags = PKEY_FLAGS_MATCH_CUR_MKVP | PKEY_FLAGS_MATCH_ALT_MKVP;
@@ -107,9 +110,9 @@ static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags,
/* unknown CCA internal token type */
return -EINVAL;
}
- rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
minhwtype, AES_MK_SET,
- cur_mkvp, old_mkvp, 1);
+ cur_mkvp, old_mkvp, xflags);
if (rc)
goto out;
@@ -126,9 +129,9 @@ static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags,
/* unknown CCA internal 2 token type */
return -EINVAL;
}
- rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
ZCRYPT_CEX7, APKA_MK_SET,
- cur_mkvp, old_mkvp, 1);
+ cur_mkvp, old_mkvp, xflags);
if (rc)
goto out;
@@ -147,18 +150,21 @@ static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags,
*nr_apqns = _nr_apqns;
out:
- kfree(_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
static int cca_apqns4type(enum pkey_key_type ktype,
u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
- struct pkey_apqn *apqns, size_t *nr_apqns)
+ struct pkey_apqn *apqns, size_t *nr_apqns,
+ u32 pflags)
{
- u32 _nr_apqns, *_apqns = NULL;
+ u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns);
+ u32 xflags;
int rc;
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
zcrypt_wait_api_operational();
if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) {
@@ -171,9 +177,9 @@ static int cca_apqns4type(enum pkey_key_type ktype,
old_mkvp = *((u64 *)alt_mkvp);
if (ktype == PKEY_TYPE_CCA_CIPHER)
minhwtype = ZCRYPT_CEX6;
- rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
minhwtype, AES_MK_SET,
- cur_mkvp, old_mkvp, 1);
+ cur_mkvp, old_mkvp, xflags);
if (rc)
goto out;
@@ -184,9 +190,9 @@ static int cca_apqns4type(enum pkey_key_type ktype,
cur_mkvp = *((u64 *)cur_mkvp);
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
old_mkvp = *((u64 *)alt_mkvp);
- rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
ZCRYPT_CEX7, APKA_MK_SET,
- cur_mkvp, old_mkvp, 1);
+ cur_mkvp, old_mkvp, xflags);
if (rc)
goto out;
@@ -205,19 +211,22 @@ static int cca_apqns4type(enum pkey_key_type ktype,
*nr_apqns = _nr_apqns;
out:
- kfree(_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 pflags)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
- struct pkey_apqn *local_apqns = NULL;
+ struct pkey_apqn _apqns[MAXAPQNSINLIST];
+ u32 xflags;
int i, rc;
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
if (keylen < sizeof(*hdr))
return -EINVAL;
@@ -253,14 +262,10 @@ static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
if (!apqns || (nr_apqns == 1 &&
apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
nr_apqns = MAXAPQNSINLIST;
- local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
- GFP_KERNEL);
- if (!local_apqns)
- return -ENOMEM;
- rc = cca_apqns4key(key, keylen, 0, local_apqns, &nr_apqns);
+ rc = cca_apqns4key(key, keylen, 0, _apqns, &nr_apqns, pflags);
if (rc)
goto out;
- apqns = local_apqns;
+ apqns = _apqns;
}
for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
@@ -268,16 +273,16 @@ static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
hdr->version == TOKVER_CCA_AES) {
rc = cca_sec2protkey(apqns[i].card, apqns[i].domain,
key, protkey,
- protkeylen, protkeytype);
+ protkeylen, protkeytype, xflags);
} else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_VLSC) {
rc = cca_cipher2protkey(apqns[i].card, apqns[i].domain,
key, protkey,
- protkeylen, protkeytype);
+ protkeylen, protkeytype, xflags);
} else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
rc = cca_ecc2protkey(apqns[i].card, apqns[i].domain,
key, protkey,
- protkeylen, protkeytype);
+ protkeylen, protkeytype, xflags);
} else {
rc = -EINVAL;
break;
@@ -285,7 +290,6 @@ static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
}
out:
- kfree(local_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
@@ -302,10 +306,13 @@ out:
static int cca_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 subtype,
u32 keybitsize, u32 flags,
- u8 *keybuf, u32 *keybuflen, u32 *_keyinfo)
+ u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags)
{
- struct pkey_apqn *local_apqns = NULL;
+ struct pkey_apqn _apqns[MAXAPQNSINLIST];
int i, len, rc;
+ u32 xflags;
+
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
/* check keytype, subtype, keybitsize */
switch (keytype) {
@@ -340,32 +347,27 @@ static int cca_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
if (!apqns || (nr_apqns == 1 &&
apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
nr_apqns = MAXAPQNSINLIST;
- local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
- GFP_KERNEL);
- if (!local_apqns)
- return -ENOMEM;
rc = cca_apqns4type(subtype, NULL, NULL, 0,
- local_apqns, &nr_apqns);
+ _apqns, &nr_apqns, pflags);
if (rc)
goto out;
- apqns = local_apqns;
+ apqns = _apqns;
}
for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
if (subtype == PKEY_TYPE_CCA_CIPHER) {
rc = cca_gencipherkey(apqns[i].card, apqns[i].domain,
keybitsize, flags,
- keybuf, keybuflen);
+ keybuf, keybuflen, xflags);
} else {
/* PKEY_TYPE_CCA_DATA */
rc = cca_genseckey(apqns[i].card, apqns[i].domain,
- keybitsize, keybuf);
+ keybitsize, keybuf, xflags);
*keybuflen = (rc ? 0 : SECKEYBLOBSIZE);
}
}
out:
- kfree(local_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
@@ -383,10 +385,13 @@ static int cca_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 subtype,
u32 keybitsize, u32 flags,
const u8 *clrkey, u32 clrkeylen,
- u8 *keybuf, u32 *keybuflen, u32 *_keyinfo)
+ u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags)
{
- struct pkey_apqn *local_apqns = NULL;
+ struct pkey_apqn _apqns[MAXAPQNSINLIST];
int i, len, rc;
+ u32 xflags;
+
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
/* check keytype, subtype, clrkeylen, keybitsize */
switch (keytype) {
@@ -426,44 +431,42 @@ static int cca_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns,
if (!apqns || (nr_apqns == 1 &&
apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
nr_apqns = MAXAPQNSINLIST;
- local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
- GFP_KERNEL);
- if (!local_apqns)
- return -ENOMEM;
rc = cca_apqns4type(subtype, NULL, NULL, 0,
- local_apqns, &nr_apqns);
+ _apqns, &nr_apqns, pflags);
if (rc)
goto out;
- apqns = local_apqns;
+ apqns = _apqns;
}
for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
if (subtype == PKEY_TYPE_CCA_CIPHER) {
rc = cca_clr2cipherkey(apqns[i].card, apqns[i].domain,
keybitsize, flags, clrkey,
- keybuf, keybuflen);
+ keybuf, keybuflen, xflags);
} else {
/* PKEY_TYPE_CCA_DATA */
rc = cca_clr2seckey(apqns[i].card, apqns[i].domain,
- keybitsize, clrkey, keybuf);
+ keybitsize, clrkey, keybuf, xflags);
*keybuflen = (rc ? 0 : SECKEYBLOBSIZE);
}
}
out:
- kfree(local_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
static int cca_verifykey(const u8 *key, u32 keylen,
u16 *card, u16 *dom,
- u32 *keytype, u32 *keybitsize, u32 *flags)
+ u32 *keytype, u32 *keybitsize, u32 *flags, u32 pflags)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
- u32 nr_apqns, *apqns = NULL;
+ u32 apqns[MAXAPQNSINLIST], nr_apqns = ARRAY_SIZE(apqns);
+ u32 xflags;
int rc;
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
if (keylen < sizeof(*hdr))
return -EINVAL;
@@ -478,15 +481,16 @@ static int cca_verifykey(const u8 *key, u32 keylen,
goto out;
*keytype = PKEY_TYPE_CCA_DATA;
*keybitsize = t->bitsize;
- rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom,
+ rc = cca_findcard2(apqns, &nr_apqns, *card, *dom,
ZCRYPT_CEX3C, AES_MK_SET,
- t->mkvp, 0, 1);
+ t->mkvp, 0, xflags);
if (!rc)
*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
if (rc == -ENODEV) {
- rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom,
+ nr_apqns = ARRAY_SIZE(apqns);
+ rc = cca_findcard2(apqns, &nr_apqns, *card, *dom,
ZCRYPT_CEX3C, AES_MK_SET,
- 0, t->mkvp, 1);
+ 0, t->mkvp, xflags);
if (!rc)
*flags = PKEY_FLAGS_MATCH_ALT_MKVP;
}
@@ -511,15 +515,16 @@ static int cca_verifykey(const u8 *key, u32 keylen,
*keybitsize = PKEY_SIZE_AES_192;
else if (!t->plfver && t->wpllen == 640)
*keybitsize = PKEY_SIZE_AES_256;
- rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom,
+ rc = cca_findcard2(apqns, &nr_apqns, *card, *dom,
ZCRYPT_CEX6, AES_MK_SET,
- t->mkvp0, 0, 1);
+ t->mkvp0, 0, xflags);
if (!rc)
*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
if (rc == -ENODEV) {
- rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom,
+ nr_apqns = ARRAY_SIZE(apqns);
+ rc = cca_findcard2(apqns, &nr_apqns, *card, *dom,
ZCRYPT_CEX6, AES_MK_SET,
- 0, t->mkvp0, 1);
+ 0, t->mkvp0, xflags);
if (!rc)
*flags = PKEY_FLAGS_MATCH_ALT_MKVP;
}
@@ -535,7 +540,6 @@ static int cca_verifykey(const u8 *key, u32 keylen,
}
out:
- kfree(apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
@@ -551,12 +555,12 @@ static int cca_slowpath_key2protkey(const struct pkey_apqn *apqns,
size_t nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen,
- u32 *protkeytype)
+ u32 *protkeytype, u32 pflags)
{
const struct keytoken_header *hdr = (const struct keytoken_header *)key;
const struct clearkeytoken *t = (const struct clearkeytoken *)key;
+ u8 tmpbuf[SECKEYBLOBSIZE]; /* 64 bytes */
u32 tmplen, keysize = 0;
- u8 *tmpbuf;
int i, rc;
if (keylen < sizeof(*hdr))
@@ -568,26 +572,20 @@ static int cca_slowpath_key2protkey(const struct pkey_apqn *apqns,
if (!keysize || t->len != keysize)
return -EINVAL;
- /* alloc tmp key buffer */
- tmpbuf = kmalloc(SECKEYBLOBSIZE, GFP_ATOMIC);
- if (!tmpbuf)
- return -ENOMEM;
-
/* try two times in case of failure */
for (i = 0, rc = -ENODEV; i < 2 && rc; i++) {
tmplen = SECKEYBLOBSIZE;
rc = cca_clr2key(NULL, 0, t->keytype, PKEY_TYPE_CCA_DATA,
8 * keysize, 0, t->clearkey, t->len,
- tmpbuf, &tmplen, NULL);
+ tmpbuf, &tmplen, NULL, pflags);
pr_debug("cca_clr2key()=%d\n", rc);
if (rc)
continue;
rc = cca_key2protkey(NULL, 0, tmpbuf, tmplen,
- protkey, protkeylen, protkeytype);
+ protkey, protkeylen, protkeytype, pflags);
pr_debug("cca_key2protkey()=%d\n", rc);
}
- kfree(tmpbuf);
pr_debug("rc=%d\n", rc);
return rc;
}
diff --git a/drivers/s390/crypto/pkey_ep11.c b/drivers/s390/crypto/pkey_ep11.c
index 5b033ca3e828..6b23adc560c8 100644
--- a/drivers/s390/crypto/pkey_ep11.c
+++ b/drivers/s390/crypto/pkey_ep11.c
@@ -70,12 +70,15 @@ static bool is_ep11_keytype(enum pkey_key_type key_type)
}
static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags,
- struct pkey_apqn *apqns, size_t *nr_apqns)
+ struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
- u32 _nr_apqns, *_apqns = NULL;
+ u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns);
+ u32 xflags;
int rc;
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
if (!flags)
flags = PKEY_FLAGS_MATCH_CUR_MKVP;
@@ -98,8 +101,8 @@ static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags,
minhwtype = ZCRYPT_CEX7;
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
}
- rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- minhwtype, api, kb->wkvp);
+ rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, api, kb->wkvp, xflags);
if (rc)
goto out;
@@ -115,8 +118,8 @@ static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags,
minhwtype = ZCRYPT_CEX7;
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
}
- rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- minhwtype, api, kb->wkvp);
+ rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, api, kb->wkvp, xflags);
if (rc)
goto out;
@@ -135,18 +138,20 @@ static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags,
*nr_apqns = _nr_apqns;
out:
- kfree(_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
static int ep11_apqns4type(enum pkey_key_type ktype,
u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
- struct pkey_apqn *apqns, size_t *nr_apqns)
+ struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags)
{
- u32 _nr_apqns, *_apqns = NULL;
+ u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns);
+ u32 xflags;
int rc;
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
zcrypt_wait_api_operational();
if (ktype == PKEY_TYPE_EP11 ||
@@ -158,8 +163,8 @@ static int ep11_apqns4type(enum pkey_key_type ktype,
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
wkvp = cur_mkvp;
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
- rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7, api, wkvp);
+ rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, api, wkvp, xflags);
if (rc)
goto out;
@@ -178,19 +183,22 @@ static int ep11_apqns4type(enum pkey_key_type ktype,
*nr_apqns = _nr_apqns;
out:
- kfree(_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 pflags)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
- struct pkey_apqn *local_apqns = NULL;
+ struct pkey_apqn _apqns[MAXAPQNSINLIST];
+ u32 xflags;
int i, rc;
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
if (keylen < sizeof(*hdr))
return -EINVAL;
@@ -225,14 +233,10 @@ static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
if (!apqns || (nr_apqns == 1 &&
apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
nr_apqns = MAXAPQNSINLIST;
- local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
- GFP_KERNEL);
- if (!local_apqns)
- return -ENOMEM;
- rc = ep11_apqns4key(key, keylen, 0, local_apqns, &nr_apqns);
+ rc = ep11_apqns4key(key, keylen, 0, _apqns, &nr_apqns, pflags);
if (rc)
goto out;
- apqns = local_apqns;
+ apqns = _apqns;
}
for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
@@ -241,19 +245,19 @@ static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain,
key, hdr->len, protkey,
- protkeylen, protkeytype);
+ protkeylen, protkeytype, xflags);
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_ECC_WITH_HEADER &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain,
key, hdr->len, protkey,
- protkeylen, protkeytype);
+ protkeylen, protkeytype, xflags);
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES &&
is_ep11_keyblob(key)) {
rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain,
key, hdr->len, protkey,
- protkeylen, protkeytype);
+ protkeylen, protkeytype, xflags);
} else {
rc = -EINVAL;
break;
@@ -261,7 +265,6 @@ static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
}
out:
- kfree(local_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
@@ -278,10 +281,13 @@ out:
static int ep11_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 subtype,
u32 keybitsize, u32 flags,
- u8 *keybuf, u32 *keybuflen, u32 *_keyinfo)
+ u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags)
{
- struct pkey_apqn *local_apqns = NULL;
+ struct pkey_apqn _apqns[MAXAPQNSINLIST];
int i, len, rc;
+ u32 xflags;
+
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
/* check keytype, subtype, keybitsize */
switch (keytype) {
@@ -316,25 +322,20 @@ static int ep11_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
if (!apqns || (nr_apqns == 1 &&
apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
nr_apqns = MAXAPQNSINLIST;
- local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
- GFP_KERNEL);
- if (!local_apqns)
- return -ENOMEM;
rc = ep11_apqns4type(subtype, NULL, NULL, 0,
- local_apqns, &nr_apqns);
+ _apqns, &nr_apqns, pflags);
if (rc)
goto out;
- apqns = local_apqns;
+ apqns = _apqns;
}
for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
rc = ep11_genaeskey(apqns[i].card, apqns[i].domain,
keybitsize, flags,
- keybuf, keybuflen, subtype);
+ keybuf, keybuflen, subtype, xflags);
}
out:
- kfree(local_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
@@ -352,10 +353,13 @@ static int ep11_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns,
u32 keytype, u32 subtype,
u32 keybitsize, u32 flags,
const u8 *clrkey, u32 clrkeylen,
- u8 *keybuf, u32 *keybuflen, u32 *_keyinfo)
+ u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags)
{
- struct pkey_apqn *local_apqns = NULL;
+ struct pkey_apqn _apqns[MAXAPQNSINLIST];
int i, len, rc;
+ u32 xflags;
+
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
/* check keytype, subtype, clrkeylen, keybitsize */
switch (keytype) {
@@ -395,37 +399,35 @@ static int ep11_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns,
if (!apqns || (nr_apqns == 1 &&
apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
nr_apqns = MAXAPQNSINLIST;
- local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn),
- GFP_KERNEL);
- if (!local_apqns)
- return -ENOMEM;
rc = ep11_apqns4type(subtype, NULL, NULL, 0,
- local_apqns, &nr_apqns);
+ _apqns, &nr_apqns, pflags);
if (rc)
goto out;
- apqns = local_apqns;
+ apqns = _apqns;
}
for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
rc = ep11_clr2keyblob(apqns[i].card, apqns[i].domain,
keybitsize, flags, clrkey,
- keybuf, keybuflen, subtype);
+ keybuf, keybuflen, subtype, xflags);
}
out:
- kfree(local_apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
static int ep11_verifykey(const u8 *key, u32 keylen,
u16 *card, u16 *dom,
- u32 *keytype, u32 *keybitsize, u32 *flags)
+ u32 *keytype, u32 *keybitsize, u32 *flags, u32 pflags)
{
struct keytoken_header *hdr = (struct keytoken_header *)key;
- u32 nr_apqns, *apqns = NULL;
+ u32 apqns[MAXAPQNSINLIST], nr_apqns = ARRAY_SIZE(apqns);
+ u32 xflags;
int rc;
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
if (keylen < sizeof(*hdr))
return -EINVAL;
@@ -443,9 +445,9 @@ static int ep11_verifykey(const u8 *key, u32 keylen,
*keybitsize = kb->head.bitlen;
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
- rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom,
+ rc = ep11_findcard2(apqns, &nr_apqns, *card, *dom,
ZCRYPT_CEX7, api,
- ep11_kb_wkvp(key, keylen));
+ ep11_kb_wkvp(key, keylen), xflags);
if (rc)
goto out;
@@ -467,9 +469,9 @@ static int ep11_verifykey(const u8 *key, u32 keylen,
*keybitsize = kh->bitlen;
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
- rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom,
+ rc = ep11_findcard2(apqns, &nr_apqns, *card, *dom,
ZCRYPT_CEX7, api,
- ep11_kb_wkvp(key, keylen));
+ ep11_kb_wkvp(key, keylen), xflags);
if (rc)
goto out;
@@ -484,7 +486,6 @@ static int ep11_verifykey(const u8 *key, u32 keylen,
}
out:
- kfree(apqns);
pr_debug("rc=%d\n", rc);
return rc;
}
@@ -500,12 +501,12 @@ static int ep11_slowpath_key2protkey(const struct pkey_apqn *apqns,
size_t nr_apqns,
const u8 *key, u32 keylen,
u8 *protkey, u32 *protkeylen,
- u32 *protkeytype)
+ u32 *protkeytype, u32 pflags)
{
const struct keytoken_header *hdr = (const struct keytoken_header *)key;
const struct clearkeytoken *t = (const struct clearkeytoken *)key;
+ u8 tmpbuf[MAXEP11AESKEYBLOBSIZE]; /* 336 bytes */
u32 tmplen, keysize = 0;
- u8 *tmpbuf;
int i, rc;
if (keylen < sizeof(*hdr))
@@ -517,26 +518,20 @@ static int ep11_slowpath_key2protkey(const struct pkey_apqn *apqns,
if (!keysize || t->len != keysize)
return -EINVAL;
- /* alloc tmp key buffer */
- tmpbuf = kmalloc(MAXEP11AESKEYBLOBSIZE, GFP_ATOMIC);
- if (!tmpbuf)
- return -ENOMEM;
-
/* try two times in case of failure */
for (i = 0, rc = -ENODEV; i < 2 && rc; i++) {
tmplen = MAXEP11AESKEYBLOBSIZE;
rc = ep11_clr2key(NULL, 0, t->keytype, PKEY_TYPE_EP11,
8 * keysize, 0, t->clearkey, t->len,
- tmpbuf, &tmplen, NULL);
+ tmpbuf, &tmplen, NULL, pflags);
pr_debug("ep11_clr2key()=%d\n", rc);
if (rc)
continue;
rc = ep11_key2protkey(NULL, 0, tmpbuf, tmplen,
- protkey, protkeylen, protkeytype);
+ protkey, protkeylen, protkeytype, pflags);
pr_debug("ep11_key2protkey()=%d\n", rc);
}
- kfree(tmpbuf);
pr_debug("rc=%d\n", rc);
return rc;
}
diff --git a/drivers/s390/crypto/pkey_pckmo.c b/drivers/s390/crypto/pkey_pckmo.c
index 835d59f4fbc5..7eca9f1340bd 100644
--- a/drivers/s390/crypto/pkey_pckmo.c
+++ b/drivers/s390/crypto/pkey_pckmo.c
@@ -406,7 +406,8 @@ out:
static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns,
size_t _nr_apqns,
const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *keyinfo)
+ u8 *protkey, u32 *protkeylen, u32 *keyinfo,
+ u32 _xflags __always_unused)
{
return pckmo_key2protkey(key, keylen,
protkey, protkeylen, keyinfo);
@@ -415,7 +416,8 @@ static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns,
static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns,
u32 keytype, u32 keysubtype,
u32 _keybitsize, u32 _flags,
- u8 *keybuf, u32 *keybuflen, u32 *keyinfo)
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo,
+ u32 _xflags __always_unused)
{
return pckmo_gen_protkey(keytype, keysubtype,
keybuf, keybuflen, keyinfo);
@@ -423,7 +425,8 @@ static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns,
static int pkey_pckmo_verifykey(const u8 *key, u32 keylen,
u16 *_card, u16 *_dom,
- u32 *_keytype, u32 *_keybitsize, u32 *_flags)
+ u32 *_keytype, u32 *_keybitsize,
+ u32 *_flags, u32 _xflags __always_unused)
{
return pckmo_verify_key(key, keylen);
}
diff --git a/drivers/s390/crypto/pkey_sysfs.c b/drivers/s390/crypto/pkey_sysfs.c
index 57edc97bafd2..cea772973649 100644
--- a/drivers/s390/crypto/pkey_sysfs.c
+++ b/drivers/s390/crypto/pkey_sysfs.c
@@ -29,13 +29,13 @@ static int sys_pkey_handler_gen_key(u32 keytype, u32 keysubtype,
rc = pkey_handler_gen_key(NULL, 0,
keytype, keysubtype,
keybitsize, flags,
- keybuf, keybuflen, keyinfo);
+ keybuf, keybuflen, keyinfo, 0);
if (rc == -ENODEV) {
pkey_handler_request_modules();
rc = pkey_handler_gen_key(NULL, 0,
keytype, keysubtype,
keybitsize, flags,
- keybuf, keybuflen, keyinfo);
+ keybuf, keybuflen, keyinfo, 0);
}
return rc;
diff --git a/drivers/s390/crypto/pkey_uv.c b/drivers/s390/crypto/pkey_uv.c
index 805817b14354..e5c6e01acaf3 100644
--- a/drivers/s390/crypto/pkey_uv.c
+++ b/drivers/s390/crypto/pkey_uv.c
@@ -21,6 +21,12 @@ MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 protected key UV handler");
/*
+ * One pre-allocated uv_secret_list for use with uv_find_secret()
+ */
+static struct uv_secret_list *uv_list;
+static DEFINE_MUTEX(uv_list_mutex);
+
+/*
* UV secret token struct and defines.
*/
@@ -85,13 +91,26 @@ static bool is_uv_keytype(enum pkey_key_type keytype)
}
}
+static int get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],
+ struct uv_secret_list_item_hdr *secret)
+{
+ int rc;
+
+ mutex_lock(&uv_list_mutex);
+ memset(uv_list, 0, sizeof(*uv_list));
+ rc = uv_find_secret(secret_id, uv_list, secret);
+ mutex_unlock(&uv_list_mutex);
+
+ return rc;
+}
+
static int retrieve_secret(const u8 secret_id[UV_SECRET_ID_LEN],
u16 *secret_type, u8 *buf, u32 *buflen)
{
struct uv_secret_list_item_hdr secret_meta_data;
int rc;
- rc = uv_get_secret_metadata(secret_id, &secret_meta_data);
+ rc = get_secret_metadata(secret_id, &secret_meta_data);
if (rc)
return rc;
@@ -172,7 +191,8 @@ static int uv_get_size_and_type(u16 secret_type, u32 *pkeysize, u32 *pkeytype)
static int uv_key2protkey(const struct pkey_apqn *_apqns __always_unused,
size_t _nr_apqns __always_unused,
const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *keyinfo)
+ u8 *protkey, u32 *protkeylen, u32 *keyinfo,
+ u32 _xflags __always_unused)
{
struct uvsecrettoken *t = (struct uvsecrettoken *)key;
u32 pkeysize, pkeytype;
@@ -214,7 +234,8 @@ out:
static int uv_verifykey(const u8 *key, u32 keylen,
u16 *_card __always_unused,
u16 *_dom __always_unused,
- u32 *keytype, u32 *keybitsize, u32 *flags)
+ u32 *keytype, u32 *keybitsize, u32 *flags,
+ u32 xflags __always_unused)
{
struct uvsecrettoken *t = (struct uvsecrettoken *)key;
struct uv_secret_list_item_hdr secret_meta_data;
@@ -225,7 +246,7 @@ static int uv_verifykey(const u8 *key, u32 keylen,
if (rc)
goto out;
- rc = uv_get_secret_metadata(t->secret_id, &secret_meta_data);
+ rc = get_secret_metadata(t->secret_id, &secret_meta_data);
if (rc)
goto out;
@@ -263,13 +284,23 @@ static struct pkey_handler uv_handler = {
*/
static int __init pkey_uv_init(void)
{
+ int rc;
+
if (!is_prot_virt_guest())
return -ENODEV;
if (!test_bit_inv(BIT_UVC_CMD_RETR_SECRET, uv_info.inst_calls_list))
return -ENODEV;
- return pkey_handler_register(&uv_handler);
+ uv_list = kmalloc(sizeof(*uv_list), GFP_KERNEL);
+ if (!uv_list)
+ return -ENOMEM;
+
+ rc = pkey_handler_register(&uv_handler);
+ if (rc)
+ kfree(uv_list);
+
+ return rc;
}
/*
@@ -278,6 +309,9 @@ static int __init pkey_uv_init(void)
static void __exit pkey_uv_exit(void)
{
pkey_handler_unregister(&uv_handler);
+ mutex_lock(&uv_list_mutex);
+ kvfree(uv_list);
+ mutex_unlock(&uv_list_mutex);
}
module_cpu_feature_match(S390_CPU_FEATURE_UV, pkey_uv_init);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 5020696f1379..89baa87a13fc 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -50,6 +50,10 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
+unsigned int zcrypt_mempool_threshold = 5;
+module_param_named(mempool_threshold, zcrypt_mempool_threshold, uint, 0440);
+MODULE_PARM_DESC(mempool_threshold, "CCA and EP11 request/reply mempool minimal items (min: 1)");
+
/*
* zcrypt tracepoint functions
*/
@@ -642,16 +646,17 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
struct zcrypt_queue *zq, *pref_zq;
struct ap_message ap_msg;
unsigned int wgt = 0, pref_wgt = 0;
- unsigned int func_code;
- int cpen, qpen, qid = 0, rc = -ENODEV;
+ unsigned int func_code = 0;
+ int cpen, qpen, qid = 0, rc;
struct module *mod;
trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
- ap_init_message(&ap_msg);
+ rc = ap_init_apmsg(&ap_msg, 0);
+ if (rc)
+ goto out;
if (mex->outputdatalength < mex->inputdatalength) {
- func_code = 0;
rc = -EINVAL;
goto out;
}
@@ -728,7 +733,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
out:
- ap_release_message(&ap_msg);
+ ap_release_apmsg(&ap_msg);
if (tr) {
tr->last_rc = rc;
tr->last_qid = qid;
@@ -746,16 +751,17 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
struct zcrypt_queue *zq, *pref_zq;
struct ap_message ap_msg;
unsigned int wgt = 0, pref_wgt = 0;
- unsigned int func_code;
- int cpen, qpen, qid = 0, rc = -ENODEV;
+ unsigned int func_code = 0;
+ int cpen, qpen, qid = 0, rc;
struct module *mod;
trace_s390_zcrypt_req(crt, TP_ICARSACRT);
- ap_init_message(&ap_msg);
+ rc = ap_init_apmsg(&ap_msg, 0);
+ if (rc)
+ goto out;
if (crt->outputdatalength < crt->inputdatalength) {
- func_code = 0;
rc = -EINVAL;
goto out;
}
@@ -832,7 +838,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
out:
- ap_release_message(&ap_msg);
+ ap_release_apmsg(&ap_msg);
if (tr) {
tr->last_rc = rc;
tr->last_qid = qid;
@@ -842,23 +848,28 @@ out:
return rc;
}
-static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
+static long _zcrypt_send_cprb(u32 xflags, struct ap_perms *perms,
struct zcrypt_track *tr,
struct ica_xcRB *xcrb)
{
+ bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE;
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
struct ap_message ap_msg;
unsigned int wgt = 0, pref_wgt = 0;
- unsigned int func_code;
+ unsigned int func_code = 0;
unsigned short *domain, tdom;
- int cpen, qpen, qid = 0, rc = -ENODEV;
+ int cpen, qpen, qid = 0, rc;
struct module *mod;
trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB);
xcrb->status = 0;
- ap_init_message(&ap_msg);
+
+ rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ?
+ AP_MSG_FLAG_MEMPOOL : 0);
+ if (rc)
+ goto out;
rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
if (rc)
@@ -962,7 +973,7 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
out:
- ap_release_message(&ap_msg);
+ ap_release_apmsg(&ap_msg);
if (tr) {
tr->last_rc = rc;
tr->last_qid = qid;
@@ -972,7 +983,7 @@ out:
return rc;
}
-long zcrypt_send_cprb(struct ica_xcRB *xcrb)
+long zcrypt_send_cprb(struct ica_xcRB *xcrb, u32 xflags)
{
struct zcrypt_track tr;
int rc;
@@ -980,13 +991,13 @@ long zcrypt_send_cprb(struct ica_xcRB *xcrb)
memset(&tr, 0, sizeof(tr));
do {
- rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb);
+ rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb);
} while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
/* on ENODEV failure: retry once again after a requested rescan */
if (rc == -ENODEV && zcrypt_process_rescan())
do {
- rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb);
+ rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb);
} while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
@@ -1024,50 +1035,50 @@ static bool is_desired_ep11_queue(unsigned int dev_qid,
return false;
}
-static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
+static long _zcrypt_send_ep11_cprb(u32 xflags, struct ap_perms *perms,
struct zcrypt_track *tr,
struct ep11_urb *xcrb)
{
+ bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE;
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
- struct ep11_target_dev *targets;
+ struct ep11_target_dev *targets = NULL;
unsigned short target_num;
unsigned int wgt = 0, pref_wgt = 0;
- unsigned int func_code, domain;
+ unsigned int func_code = 0, domain;
struct ap_message ap_msg;
- int cpen, qpen, qid = 0, rc = -ENODEV;
+ int cpen, qpen, qid = 0, rc;
struct module *mod;
trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
- ap_init_message(&ap_msg);
+ rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ?
+ AP_MSG_FLAG_MEMPOOL : 0);
+ if (rc)
+ goto out;
target_num = (unsigned short)xcrb->targets_num;
/* empty list indicates autoselect (all available targets) */
- targets = NULL;
+ rc = -ENOMEM;
if (target_num != 0) {
- struct ep11_target_dev __user *uptr;
-
- targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
- if (!targets) {
- func_code = 0;
- rc = -ENOMEM;
- goto out;
- }
-
- uptr = (struct ep11_target_dev __force __user *)xcrb->targets;
- if (z_copy_from_user(userspace, targets, uptr,
- target_num * sizeof(*targets))) {
- func_code = 0;
- rc = -EFAULT;
- goto out_free;
+ if (userspace) {
+ targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
+ if (!targets)
+ goto out;
+ if (copy_from_user(targets, xcrb->targets,
+ target_num * sizeof(*targets))) {
+ rc = -EFAULT;
+ goto out;
+ }
+ } else {
+ targets = (struct ep11_target_dev __force __kernel *)xcrb->targets;
}
}
rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
if (rc)
- goto out_free;
+ goto out;
print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1,
ap_msg.msg, ap_msg.len, false);
@@ -1075,11 +1086,11 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
if (!test_bit_inv(domain, perms->adm)) {
rc = -ENODEV;
- goto out_free;
+ goto out;
}
} else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
rc = -EOPNOTSUPP;
- goto out_free;
+ goto out;
}
}
@@ -1147,7 +1158,7 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
pr_debug("no match for address ff.ffff => ENODEV\n");
}
rc = -ENODEV;
- goto out_free;
+ goto out;
}
qid = pref_zq->queue->qid;
@@ -1161,10 +1172,10 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
spin_unlock(&zcrypt_list_lock);
-out_free:
- kfree(targets);
out:
- ap_release_message(&ap_msg);
+ if (userspace)
+ kfree(targets);
+ ap_release_apmsg(&ap_msg);
if (tr) {
tr->last_rc = rc;
tr->last_qid = qid;
@@ -1174,7 +1185,7 @@ out:
return rc;
}
-long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
+long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb, u32 xflags)
{
struct zcrypt_track tr;
int rc;
@@ -1182,13 +1193,13 @@ long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
memset(&tr, 0, sizeof(tr));
do {
- rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb);
+ rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb);
} while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
/* on ENODEV failure: retry once again after a requested rescan */
if (rc == -ENODEV && zcrypt_process_rescan())
do {
- rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb);
+ rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb);
} while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
@@ -1204,7 +1215,7 @@ static long zcrypt_rng(char *buffer)
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
unsigned int wgt = 0, pref_wgt = 0;
- unsigned int func_code;
+ unsigned int func_code = 0;
struct ap_message ap_msg;
unsigned int domain;
int qid = 0, rc = -ENODEV;
@@ -1212,7 +1223,9 @@ static long zcrypt_rng(char *buffer)
trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
- ap_init_message(&ap_msg);
+ rc = ap_init_apmsg(&ap_msg, 0);
+ if (rc)
+ goto out;
rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain);
if (rc)
goto out;
@@ -1258,7 +1271,7 @@ static long zcrypt_rng(char *buffer)
spin_unlock(&zcrypt_list_lock);
out:
- ap_release_message(&ap_msg);
+ ap_release_apmsg(&ap_msg);
trace_s390_zcrypt_rep(buffer, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
@@ -1291,19 +1304,25 @@ static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
spin_unlock(&zcrypt_list_lock);
}
-void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
+void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus,
+ int maxcard, int maxqueue)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
struct zcrypt_device_status_ext *stat;
int card, queue;
+ maxcard = min_t(int, maxcard, MAX_ZDEV_CARDIDS_EXT);
+ maxqueue = min_t(int, maxqueue, MAX_ZDEV_DOMAINS_EXT);
+
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
card = AP_QID_CARD(zq->queue->qid);
queue = AP_QID_QUEUE(zq->queue->qid);
- stat = &devstatus[card * AP_DOMAINS + queue];
+ if (card >= maxcard || queue >= maxqueue)
+ continue;
+ stat = &devstatus[card * maxqueue + queue];
stat->hwtype = zc->card->ap_dev.device_type;
stat->functions = zc->card->hwinfo.fac >> 26;
stat->qid = zq->queue->qid;
@@ -1523,6 +1542,7 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
int rc;
struct ica_xcRB xcrb;
struct zcrypt_track tr;
+ u32 xflags = ZCRYPT_XFLAG_USERSPACE;
struct ica_xcRB __user *uxcrb = (void __user *)arg;
memset(&tr, 0, sizeof(tr));
@@ -1530,13 +1550,13 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
return -EFAULT;
do {
- rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
+ rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb);
} while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
/* on ENODEV failure: retry once again after a requested rescan */
if (rc == -ENODEV && zcrypt_process_rescan())
do {
- rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
+ rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb);
} while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
@@ -1553,6 +1573,7 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
int rc;
struct ep11_urb xcrb;
struct zcrypt_track tr;
+ u32 xflags = ZCRYPT_XFLAG_USERSPACE;
struct ep11_urb __user *uxcrb = (void __user *)arg;
memset(&tr, 0, sizeof(tr));
@@ -1560,13 +1581,13 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
return -EFAULT;
do {
- rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
+ rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb);
} while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
/* on ENODEV failure: retry once again after a requested rescan */
if (rc == -ENODEV && zcrypt_process_rescan())
do {
- rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
+ rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb);
} while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
@@ -1607,7 +1628,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
GFP_KERNEL);
if (!device_status)
return -ENOMEM;
- zcrypt_device_status_mask_ext(device_status);
+ zcrypt_device_status_mask_ext(device_status,
+ MAX_ZDEV_CARDIDS_EXT,
+ MAX_ZDEV_DOMAINS_EXT);
if (copy_to_user((char __user *)arg, device_status,
total_size))
rc = -EFAULT;
@@ -1827,6 +1850,7 @@ static long trans_xcrb32(struct ap_perms *perms, struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg);
+ u32 xflags = ZCRYPT_XFLAG_USERSPACE;
struct compat_ica_xcrb xcrb32;
struct zcrypt_track tr;
struct ica_xcRB xcrb64;
@@ -1856,13 +1880,13 @@ static long trans_xcrb32(struct ap_perms *perms, struct file *filp,
xcrb64.priority_window = xcrb32.priority_window;
xcrb64.status = xcrb32.status;
do {
- rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
+ rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64);
} while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
/* on ENODEV failure: retry once again after a requested rescan */
if (rc == -ENODEV && zcrypt_process_rescan())
do {
- rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
+ rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64);
} while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
@@ -2132,13 +2156,27 @@ int __init zcrypt_api_init(void)
{
int rc;
+ /* make sure the mempool threshold is >= 1 */
+ if (zcrypt_mempool_threshold < 1) {
+ rc = -EINVAL;
+ goto out;
+ }
+
rc = zcrypt_debug_init();
if (rc)
goto out;
rc = zcdn_init();
if (rc)
- goto out;
+ goto out_zcdn_init_failed;
+
+ rc = zcrypt_ccamisc_init();
+ if (rc)
+ goto out_ccamisc_init_failed;
+
+ rc = zcrypt_ep11misc_init();
+ if (rc)
+ goto out_ep11misc_init_failed;
/* Register the request sprayer. */
rc = misc_register(&zcrypt_misc_device);
@@ -2151,7 +2189,12 @@ int __init zcrypt_api_init(void)
return 0;
out_misc_register_failed:
+ zcrypt_ep11misc_exit();
+out_ep11misc_init_failed:
+ zcrypt_ccamisc_exit();
+out_ccamisc_init_failed:
zcdn_exit();
+out_zcdn_init_failed:
zcrypt_debug_exit();
out:
return rc;
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 4ed481df57ca..6ef8850a42df 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -76,6 +76,13 @@ struct zcrypt_track {
#define TRACK_AGAIN_CARD_WEIGHT_PENALTY 1000
#define TRACK_AGAIN_QUEUE_WEIGHT_PENALTY 10000
+/*
+ * xflags - to be used with zcrypt_send_cprb() and
+ * zcrypt_send_ep11_cprb() for the xflags parameter.
+ */
+#define ZCRYPT_XFLAG_USERSPACE 0x0001 /* data ptrs address userspace */
+#define ZCRYPT_XFLAG_NOMEMALLOC 0x0002 /* do not allocate memory via kmalloc */
+
struct zcrypt_ops {
long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *,
struct ap_message *);
@@ -132,6 +139,8 @@ extern atomic_t zcrypt_rescan_req;
extern spinlock_t zcrypt_list_lock;
extern struct list_head zcrypt_card_list;
+extern unsigned int zcrypt_mempool_threshold;
+
#define for_each_zcrypt_card(_zc) \
list_for_each_entry(_zc, &zcrypt_card_list, list)
@@ -161,9 +170,10 @@ void zcrypt_msgtype_unregister(struct zcrypt_ops *);
struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
int zcrypt_api_init(void);
void zcrypt_api_exit(void);
-long zcrypt_send_cprb(struct ica_xcRB *xcRB);
-long zcrypt_send_ep11_cprb(struct ep11_urb *urb);
-void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
+long zcrypt_send_cprb(struct ica_xcRB *xcRB, u32 xflags);
+long zcrypt_send_ep11_cprb(struct ep11_urb *urb, u32 xflags);
+void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus,
+ int maxcard, int maxqueue);
int zcrypt_device_status_ext(int card, int queue,
struct zcrypt_device_status_ext *devstatus);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 43a27cb3db84..b975a3728c23 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
+#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/random.h>
@@ -29,16 +30,31 @@
/* Size of vardata block used for some of the cca requests/replies */
#define VARDATASIZE 4096
-struct cca_info_list_entry {
- struct list_head list;
- u16 cardnr;
- u16 domain;
- struct cca_info info;
-};
+/*
+ * Cprb memory pool held for urgent cases where no memory
+ * can be allocated via kmalloc. This pool is only used
+ * when alloc_and_prep_cprbmem() is called with the xflag
+ * ZCRYPT_XFLAG_NOMEMALLOC. The cprb memory needs to hold
+ * space for request AND reply!
+ */
+#define CPRB_MEMPOOL_ITEM_SIZE (16 * 1024)
+static mempool_t *cprb_mempool;
-/* a list with cca_info_list_entry entries */
-static LIST_HEAD(cca_info_list);
-static DEFINE_SPINLOCK(cca_info_list_lock);
+/*
+ * This is a pre-allocated memory for the device status array
+ * used within the findcard() functions. It is currently
+ * 128 * 128 * 4 bytes = 64 KB big. Usage of this memory is
+ * controlled via dev_status_mem_mutex. Needs adaption if more
+ * than 128 cards or domains to be are supported.
+ */
+#define ZCRYPT_DEV_STATUS_CARD_MAX 128
+#define ZCRYPT_DEV_STATUS_QUEUE_MAX 128
+#define ZCRYPT_DEV_STATUS_ENTRIES (ZCRYPT_DEV_STATUS_CARD_MAX * \
+ ZCRYPT_DEV_STATUS_QUEUE_MAX)
+#define ZCRYPT_DEV_STATUS_EXT_SIZE (ZCRYPT_DEV_STATUS_ENTRIES * \
+ sizeof(struct zcrypt_device_status_ext))
+static void *dev_status_mem;
+static DEFINE_MUTEX(dev_status_mem_mutex);
/*
* Simple check if the token is a valid CCA secure AES data key
@@ -219,19 +235,27 @@ EXPORT_SYMBOL(cca_check_sececckeytoken);
static int alloc_and_prep_cprbmem(size_t paramblen,
u8 **p_cprb_mem,
struct CPRBX **p_req_cprb,
- struct CPRBX **p_rep_cprb)
+ struct CPRBX **p_rep_cprb,
+ u32 xflags)
{
- u8 *cprbmem;
+ u8 *cprbmem = NULL;
size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen;
+ size_t len = 2 * cprbplusparamblen;
struct CPRBX *preqcblk, *prepcblk;
/*
* allocate consecutive memory for request CPRB, request param
* block, reply CPRB and reply param block
*/
- cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL);
+ if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) {
+ if (len <= CPRB_MEMPOOL_ITEM_SIZE)
+ cprbmem = mempool_alloc_preallocated(cprb_mempool);
+ } else {
+ cprbmem = kmalloc(len, GFP_KERNEL);
+ }
if (!cprbmem)
return -ENOMEM;
+ memset(cprbmem, 0, len);
preqcblk = (struct CPRBX *)cprbmem;
prepcblk = (struct CPRBX *)(cprbmem + cprbplusparamblen);
@@ -261,11 +285,15 @@ static int alloc_and_prep_cprbmem(size_t paramblen,
* with zeros before freeing (useful if there was some
* clear key material in there).
*/
-static void free_cprbmem(void *mem, size_t paramblen, int scrub)
+static void free_cprbmem(void *mem, size_t paramblen, bool scrub, u32 xflags)
{
- if (scrub)
+ if (mem && scrub)
memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen));
- kfree(mem);
+
+ if (xflags & ZCRYPT_XFLAG_NOMEMALLOC)
+ mempool_free(mem, cprb_mempool);
+ else
+ kfree(mem);
}
/*
@@ -290,7 +318,7 @@ static inline void prep_xcrb(struct ica_xcRB *pxcrb,
* Generate (random) CCA AES DATA secure key.
*/
int cca_genseckey(u16 cardnr, u16 domain,
- u32 keybitsize, u8 *seckey)
+ u32 keybitsize, u8 *seckey, u32 xflags)
{
int i, rc, keysize;
int seckeysize;
@@ -332,7 +360,8 @@ int cca_genseckey(u16 cardnr, u16 domain,
} __packed * prepparm;
/* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem,
+ &preqcblk, &prepcblk, xflags);
if (rc)
return rc;
@@ -379,7 +408,7 @@ int cca_genseckey(u16 cardnr, u16 domain,
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = zcrypt_send_cprb(&xcrb);
+ rc = zcrypt_send_cprb(&xcrb, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n",
__func__, (int)cardnr, (int)domain, rc);
@@ -424,7 +453,7 @@ int cca_genseckey(u16 cardnr, u16 domain,
memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
out:
- free_cprbmem(mem, PARMBSIZE, 0);
+ free_cprbmem(mem, PARMBSIZE, false, xflags);
return rc;
}
EXPORT_SYMBOL(cca_genseckey);
@@ -433,7 +462,7 @@ EXPORT_SYMBOL(cca_genseckey);
* Generate an CCA AES DATA secure key with given key value.
*/
int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
- const u8 *clrkey, u8 *seckey)
+ const u8 *clrkey, u8 *seckey, u32 xflags)
{
int rc, keysize, seckeysize;
u8 *mem, *ptr;
@@ -473,7 +502,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
} __packed * prepparm;
/* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem,
+ &preqcblk, &prepcblk, xflags);
if (rc)
return rc;
@@ -517,7 +547,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = zcrypt_send_cprb(&xcrb);
+ rc = zcrypt_send_cprb(&xcrb, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int)cardnr, (int)domain, rc);
@@ -563,7 +593,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
out:
- free_cprbmem(mem, PARMBSIZE, 1);
+ free_cprbmem(mem, PARMBSIZE, true, xflags);
return rc;
}
EXPORT_SYMBOL(cca_clr2seckey);
@@ -573,7 +603,7 @@ EXPORT_SYMBOL(cca_clr2seckey);
*/
int cca_sec2protkey(u16 cardnr, u16 domain,
const u8 *seckey, u8 *protkey, u32 *protkeylen,
- u32 *protkeytype)
+ u32 *protkeytype, u32 xflags)
{
int rc;
u8 *mem, *ptr;
@@ -619,7 +649,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
} __packed * prepparm;
/* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem,
+ &preqcblk, &prepcblk, xflags);
if (rc)
return rc;
@@ -644,7 +675,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = zcrypt_send_cprb(&xcrb);
+ rc = zcrypt_send_cprb(&xcrb, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int)cardnr, (int)domain, rc);
@@ -712,7 +743,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
*protkeylen = prepparm->lv3.ckb.len;
out:
- free_cprbmem(mem, PARMBSIZE, 0);
+ free_cprbmem(mem, PARMBSIZE, true, xflags);
return rc;
}
EXPORT_SYMBOL(cca_sec2protkey);
@@ -737,7 +768,7 @@ static const u8 aes_cipher_key_skeleton[] = {
* Generate (random) CCA AES CIPHER secure key.
*/
int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
- u8 *keybuf, u32 *keybufsize)
+ u8 *keybuf, u32 *keybufsize, u32 xflags)
{
int rc;
u8 *mem, *ptr;
@@ -813,7 +844,8 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
struct cipherkeytoken *t;
/* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem,
+ &preqcblk, &prepcblk, xflags);
if (rc)
return rc;
@@ -872,7 +904,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = zcrypt_send_cprb(&xcrb);
+ rc = zcrypt_send_cprb(&xcrb, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int)cardnr, (int)domain, rc);
@@ -923,7 +955,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
*keybufsize = t->len;
out:
- free_cprbmem(mem, PARMBSIZE, 0);
+ free_cprbmem(mem, PARMBSIZE, false, xflags);
return rc;
}
EXPORT_SYMBOL(cca_gencipherkey);
@@ -938,7 +970,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
const u8 *clr_key_value,
int clr_key_bit_size,
u8 *key_token,
- int *key_token_size)
+ int *key_token_size,
+ u32 xflags)
{
int rc, n;
u8 *mem, *ptr;
@@ -989,7 +1022,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
int complete = strncmp(rule_array_2, "COMPLETE", 8) ? 0 : 1;
/* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem,
+ &preqcblk, &prepcblk, xflags);
if (rc)
return rc;
@@ -1038,7 +1072,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = zcrypt_send_cprb(&xcrb);
+ rc = zcrypt_send_cprb(&xcrb, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int)cardnr, (int)domain, rc);
@@ -1077,7 +1111,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
*key_token_size = t->len;
out:
- free_cprbmem(mem, PARMBSIZE, 0);
+ free_cprbmem(mem, PARMBSIZE, false, xflags);
return rc;
}
@@ -1085,23 +1119,31 @@ out:
* Build CCA AES CIPHER secure key with a given clear key value.
*/
int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags,
- const u8 *clrkey, u8 *keybuf, u32 *keybufsize)
+ const u8 *clrkey, u8 *keybuf, u32 *keybufsize, u32 xflags)
{
int rc;
- u8 *token;
+ void *mem;
int tokensize;
- u8 exorbuf[32];
+ u8 *token, exorbuf[32];
struct cipherkeytoken *t;
/* fill exorbuf with random data */
get_random_bytes(exorbuf, sizeof(exorbuf));
- /* allocate space for the key token to build */
- token = kmalloc(MAXCCAVLSCTOKENSIZE, GFP_KERNEL);
- if (!token)
+ /*
+ * Allocate space for the key token to build.
+ * Also we only need up to MAXCCAVLSCTOKENSIZE bytes for this
+ * we use the already existing cprb mempool to solve this
+ * short term memory requirement.
+ */
+ mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ?
+ mempool_alloc_preallocated(cprb_mempool) :
+ mempool_alloc(cprb_mempool, GFP_KERNEL);
+ if (!mem)
return -ENOMEM;
/* prepare the token with the key skeleton */
+ token = (u8 *)mem;
tokensize = SIZEOF_SKELETON;
memcpy(token, aes_cipher_key_skeleton, tokensize);
@@ -1120,28 +1162,28 @@ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags,
* 4/4 COMPLETE the secure cipher key import
*/
rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART",
- exorbuf, keybitsize, token, &tokensize);
+ exorbuf, keybitsize, token, &tokensize, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n",
__func__, rc);
goto out;
}
rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
- clrkey, keybitsize, token, &tokensize);
+ clrkey, keybitsize, token, &tokensize, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n",
__func__, rc);
goto out;
}
rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
- exorbuf, keybitsize, token, &tokensize);
+ exorbuf, keybitsize, token, &tokensize, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n",
__func__, rc);
goto out;
}
rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL,
- NULL, keybitsize, token, &tokensize);
+ NULL, keybitsize, token, &tokensize, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n",
__func__, rc);
@@ -1158,7 +1200,7 @@ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags,
*keybufsize = tokensize;
out:
- kfree(token);
+ mempool_free(mem, cprb_mempool);
return rc;
}
EXPORT_SYMBOL(cca_clr2cipherkey);
@@ -1167,7 +1209,8 @@ EXPORT_SYMBOL(cca_clr2cipherkey);
* Derive proteced key from CCA AES cipher secure key.
*/
int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags)
{
int rc;
u8 *mem, *ptr;
@@ -1219,7 +1262,8 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
int keytoklen = ((struct cipherkeytoken *)ckey)->len;
/* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem,
+ &preqcblk, &prepcblk, xflags);
if (rc)
return rc;
@@ -1249,7 +1293,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = zcrypt_send_cprb(&xcrb);
+ rc = zcrypt_send_cprb(&xcrb, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int)cardnr, (int)domain, rc);
@@ -1323,7 +1367,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
*protkeylen = prepparm->vud.ckb.keylen;
out:
- free_cprbmem(mem, PARMBSIZE, 0);
+ free_cprbmem(mem, PARMBSIZE, true, xflags);
return rc;
}
EXPORT_SYMBOL(cca_cipher2protkey);
@@ -1332,7 +1376,7 @@ EXPORT_SYMBOL(cca_cipher2protkey);
* Derive protected key from CCA ECC secure private key.
*/
int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags)
{
int rc;
u8 *mem, *ptr;
@@ -1382,7 +1426,8 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
int keylen = ((struct eccprivkeytoken *)key)->len;
/* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem,
+ &preqcblk, &prepcblk, xflags);
if (rc)
return rc;
@@ -1412,7 +1457,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = zcrypt_send_cprb(&xcrb);
+ rc = zcrypt_send_cprb(&xcrb, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int)cardnr, (int)domain, rc);
@@ -1470,7 +1515,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
*protkeytype = PKEY_KEYTYPE_ECC;
out:
- free_cprbmem(mem, PARMBSIZE, 0);
+ free_cprbmem(mem, PARMBSIZE, true, xflags);
return rc;
}
EXPORT_SYMBOL(cca_ecc2protkey);
@@ -1481,7 +1526,8 @@ EXPORT_SYMBOL(cca_ecc2protkey);
int cca_query_crypto_facility(u16 cardnr, u16 domain,
const char *keyword,
u8 *rarray, size_t *rarraylen,
- u8 *varray, size_t *varraylen)
+ u8 *varray, size_t *varraylen,
+ u32 xflags)
{
int rc;
u16 len;
@@ -1505,7 +1551,8 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain,
} __packed * prepparm;
/* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk);
+ rc = alloc_and_prep_cprbmem(parmbsize, &mem,
+ &preqcblk, &prepcblk, xflags);
if (rc)
return rc;
@@ -1526,7 +1573,7 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain,
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = zcrypt_send_cprb(&xcrb);
+ rc = zcrypt_send_cprb(&xcrb, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int)cardnr, (int)domain, rc);
@@ -1573,94 +1620,21 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain,
}
out:
- free_cprbmem(mem, parmbsize, 0);
+ free_cprbmem(mem, parmbsize, false, xflags);
return rc;
}
EXPORT_SYMBOL(cca_query_crypto_facility);
-static int cca_info_cache_fetch(u16 cardnr, u16 domain, struct cca_info *ci)
-{
- int rc = -ENOENT;
- struct cca_info_list_entry *ptr;
-
- spin_lock_bh(&cca_info_list_lock);
- list_for_each_entry(ptr, &cca_info_list, list) {
- if (ptr->cardnr == cardnr && ptr->domain == domain) {
- memcpy(ci, &ptr->info, sizeof(*ci));
- rc = 0;
- break;
- }
- }
- spin_unlock_bh(&cca_info_list_lock);
-
- return rc;
-}
-
-static void cca_info_cache_update(u16 cardnr, u16 domain,
- const struct cca_info *ci)
-{
- int found = 0;
- struct cca_info_list_entry *ptr;
-
- spin_lock_bh(&cca_info_list_lock);
- list_for_each_entry(ptr, &cca_info_list, list) {
- if (ptr->cardnr == cardnr &&
- ptr->domain == domain) {
- memcpy(&ptr->info, ci, sizeof(*ci));
- found = 1;
- break;
- }
- }
- if (!found) {
- ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
- if (!ptr) {
- spin_unlock_bh(&cca_info_list_lock);
- return;
- }
- ptr->cardnr = cardnr;
- ptr->domain = domain;
- memcpy(&ptr->info, ci, sizeof(*ci));
- list_add(&ptr->list, &cca_info_list);
- }
- spin_unlock_bh(&cca_info_list_lock);
-}
-
-static void cca_info_cache_scrub(u16 cardnr, u16 domain)
-{
- struct cca_info_list_entry *ptr;
-
- spin_lock_bh(&cca_info_list_lock);
- list_for_each_entry(ptr, &cca_info_list, list) {
- if (ptr->cardnr == cardnr &&
- ptr->domain == domain) {
- list_del(&ptr->list);
- kfree(ptr);
- break;
- }
- }
- spin_unlock_bh(&cca_info_list_lock);
-}
-
-static void __exit mkvp_cache_free(void)
-{
- struct cca_info_list_entry *ptr, *pnext;
-
- spin_lock_bh(&cca_info_list_lock);
- list_for_each_entry_safe(ptr, pnext, &cca_info_list, list) {
- list_del(&ptr->list);
- kfree(ptr);
- }
- spin_unlock_bh(&cca_info_list_lock);
-}
-
/*
- * Fetch cca_info values via query_crypto_facility from adapter.
+ * Fetch cca_info values about a CCA queue via
+ * query_crypto_facility from adapter.
*/
-static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
+int cca_get_info(u16 cardnr, u16 domain, struct cca_info *ci, u32 xflags)
{
+ void *mem;
int rc, found = 0;
size_t rlen, vlen;
- u8 *rarray, *varray, *pg;
+ u8 *rarray, *varray;
struct zcrypt_device_status_ext devstat;
memset(ci, 0, sizeof(*ci));
@@ -1671,17 +1645,22 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
return rc;
ci->hwtype = devstat.hwtype;
- /* prep page for rule array and var array use */
- pg = (u8 *)__get_free_page(GFP_KERNEL);
- if (!pg)
+ /*
+ * Prep memory for rule array and var array use.
+ * Use the cprb mempool for this.
+ */
+ mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ?
+ mempool_alloc_preallocated(cprb_mempool) :
+ mempool_alloc(cprb_mempool, GFP_KERNEL);
+ if (!mem)
return -ENOMEM;
- rarray = pg;
- varray = pg + PAGE_SIZE / 2;
+ rarray = (u8 *)mem;
+ varray = (u8 *)mem + PAGE_SIZE / 2;
rlen = vlen = PAGE_SIZE / 2;
/* QF for this card/domain */
rc = cca_query_crypto_facility(cardnr, domain, "STATICSA",
- rarray, &rlen, varray, &vlen);
+ rarray, &rlen, varray, &vlen, xflags);
if (rc == 0 && rlen >= 10 * 8 && vlen >= 204) {
memcpy(ci->serial, rarray, 8);
ci->new_asym_mk_state = (char)rarray[4 * 8];
@@ -1708,7 +1687,7 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
goto out;
rlen = vlen = PAGE_SIZE / 2;
rc = cca_query_crypto_facility(cardnr, domain, "STATICSB",
- rarray, &rlen, varray, &vlen);
+ rarray, &rlen, varray, &vlen, xflags);
if (rc == 0 && rlen >= 13 * 8 && vlen >= 240) {
ci->new_apka_mk_state = (char)rarray[10 * 8];
ci->cur_apka_mk_state = (char)rarray[11 * 8];
@@ -1723,177 +1702,32 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
}
out:
- free_page((unsigned long)pg);
+ mempool_free(mem, cprb_mempool);
return found == 2 ? 0 : -ENOENT;
}
-
-/*
- * Fetch cca information about a CCA queue.
- */
-int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify)
-{
- int rc;
-
- rc = cca_info_cache_fetch(card, dom, ci);
- if (rc || verify) {
- rc = fetch_cca_info(card, dom, ci);
- if (rc == 0)
- cca_info_cache_update(card, dom, ci);
- }
-
- return rc;
-}
EXPORT_SYMBOL(cca_get_info);
-/*
- * Search for a matching crypto card based on the
- * Master Key Verification Pattern given.
- */
-static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain,
- int verify, int minhwtype)
-{
- struct zcrypt_device_status_ext *device_status;
- u16 card, dom;
- struct cca_info ci;
- int i, rc, oi = -1;
-
- /* mkvp must not be zero, minhwtype needs to be >= 0 */
- if (mkvp == 0 || minhwtype < 0)
- return -EINVAL;
-
- /* fetch status of all crypto cards */
- device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT,
- sizeof(struct zcrypt_device_status_ext),
- GFP_KERNEL);
- if (!device_status)
- return -ENOMEM;
- zcrypt_device_status_mask_ext(device_status);
-
- /* walk through all crypto cards */
- for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
- card = AP_QID_CARD(device_status[i].qid);
- dom = AP_QID_QUEUE(device_status[i].qid);
- if (device_status[i].online &&
- device_status[i].functions & 0x04) {
- /* enabled CCA card, check current mkvp from cache */
- if (cca_info_cache_fetch(card, dom, &ci) == 0 &&
- ci.hwtype >= minhwtype &&
- ci.cur_aes_mk_state == '2' &&
- ci.cur_aes_mkvp == mkvp) {
- if (!verify)
- break;
- /* verify: refresh card info */
- if (fetch_cca_info(card, dom, &ci) == 0) {
- cca_info_cache_update(card, dom, &ci);
- if (ci.hwtype >= minhwtype &&
- ci.cur_aes_mk_state == '2' &&
- ci.cur_aes_mkvp == mkvp)
- break;
- }
- }
- } else {
- /* Card is offline and/or not a CCA card. */
- /* del mkvp entry from cache if it exists */
- cca_info_cache_scrub(card, dom);
- }
- }
- if (i >= MAX_ZDEV_ENTRIES_EXT) {
- /* nothing found, so this time without cache */
- for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
- if (!(device_status[i].online &&
- device_status[i].functions & 0x04))
- continue;
- card = AP_QID_CARD(device_status[i].qid);
- dom = AP_QID_QUEUE(device_status[i].qid);
- /* fresh fetch mkvp from adapter */
- if (fetch_cca_info(card, dom, &ci) == 0) {
- cca_info_cache_update(card, dom, &ci);
- if (ci.hwtype >= minhwtype &&
- ci.cur_aes_mk_state == '2' &&
- ci.cur_aes_mkvp == mkvp)
- break;
- if (ci.hwtype >= minhwtype &&
- ci.old_aes_mk_state == '2' &&
- ci.old_aes_mkvp == mkvp &&
- oi < 0)
- oi = i;
- }
- }
- if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) {
- /* old mkvp matched, use this card then */
- card = AP_QID_CARD(device_status[oi].qid);
- dom = AP_QID_QUEUE(device_status[oi].qid);
- }
- }
- if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) {
- if (pcardnr)
- *pcardnr = card;
- if (pdomain)
- *pdomain = dom;
- rc = (i < MAX_ZDEV_ENTRIES_EXT ? 0 : 1);
- } else {
- rc = -ENODEV;
- }
-
- kvfree(device_status);
- return rc;
-}
-
-/*
- * Search for a matching crypto card based on the Master Key
- * Verification Pattern provided inside a secure key token.
- */
-int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify)
-{
- u64 mkvp;
- int minhwtype = 0;
- const struct keytoken_header *hdr = (struct keytoken_header *)key;
-
- if (hdr->type != TOKTYPE_CCA_INTERNAL)
- return -EINVAL;
-
- switch (hdr->version) {
- case TOKVER_CCA_AES:
- mkvp = ((struct secaeskeytoken *)key)->mkvp;
- break;
- case TOKVER_CCA_VLSC:
- mkvp = ((struct cipherkeytoken *)key)->mkvp0;
- minhwtype = AP_DEVICE_TYPE_CEX6;
- break;
- default:
- return -EINVAL;
- }
-
- return findcard(mkvp, pcardnr, pdomain, verify, minhwtype);
-}
-EXPORT_SYMBOL(cca_findcard);
-
-int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+int cca_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp,
- int verify)
+ u32 xflags)
{
struct zcrypt_device_status_ext *device_status;
- u32 *_apqns = NULL, _nr_apqns = 0;
- int i, card, dom, curmatch, oldmatch, rc = 0;
+ int i, card, dom, curmatch, oldmatch;
struct cca_info ci;
+ u32 _nr_apqns = 0;
- /* fetch status of all crypto cards */
- device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT,
- sizeof(struct zcrypt_device_status_ext),
- GFP_KERNEL);
- if (!device_status)
- return -ENOMEM;
- zcrypt_device_status_mask_ext(device_status);
+ /* occupy the device status memory */
+ mutex_lock(&dev_status_mem_mutex);
+ memset(dev_status_mem, 0, ZCRYPT_DEV_STATUS_EXT_SIZE);
+ device_status = (struct zcrypt_device_status_ext *)dev_status_mem;
- /* allocate 1k space for up to 256 apqns */
- _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
- if (!_apqns) {
- kvfree(device_status);
- return -ENOMEM;
- }
+ /* fetch crypto device status into this struct */
+ zcrypt_device_status_mask_ext(device_status,
+ ZCRYPT_DEV_STATUS_CARD_MAX,
+ ZCRYPT_DEV_STATUS_QUEUE_MAX);
/* walk through all the crypto apqnss */
- for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ for (i = 0; i < ZCRYPT_DEV_STATUS_ENTRIES; i++) {
card = AP_QID_CARD(device_status[i].qid);
dom = AP_QID_QUEUE(device_status[i].qid);
/* check online state */
@@ -1909,7 +1743,7 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
if (domain != 0xFFFF && dom != domain)
continue;
/* get cca info on this apqn */
- if (cca_get_info(card, dom, &ci, verify))
+ if (cca_get_info(card, dom, &ci, xflags))
continue;
/* current master key needs to be valid */
if (mktype == AES_MK_SET && ci.cur_aes_mk_state != '2')
@@ -1939,27 +1773,41 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
continue;
}
/* apqn passed all filtering criterons, add to the array */
- if (_nr_apqns < 256)
- _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom);
+ if (_nr_apqns < *nr_apqns)
+ apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom);
}
- /* nothing found ? */
- if (!_nr_apqns) {
- kfree(_apqns);
- rc = -ENODEV;
- } else {
- /* no re-allocation, simple return the _apqns array */
- *apqns = _apqns;
- *nr_apqns = _nr_apqns;
- rc = 0;
- }
+ *nr_apqns = _nr_apqns;
- kvfree(device_status);
- return rc;
+ /* release the device status memory */
+ mutex_unlock(&dev_status_mem_mutex);
+
+ return _nr_apqns ? 0 : -ENODEV;
}
EXPORT_SYMBOL(cca_findcard2);
-void __exit zcrypt_ccamisc_exit(void)
+int __init zcrypt_ccamisc_init(void)
+{
+ /* Pre-allocate a small memory pool for cca cprbs. */
+ cprb_mempool = mempool_create_kmalloc_pool(zcrypt_mempool_threshold,
+ CPRB_MEMPOOL_ITEM_SIZE);
+ if (!cprb_mempool)
+ return -ENOMEM;
+
+ /* Pre-allocate one crypto status card struct used in findcard() */
+ dev_status_mem = kvmalloc(ZCRYPT_DEV_STATUS_EXT_SIZE, GFP_KERNEL);
+ if (!dev_status_mem) {
+ mempool_destroy(cprb_mempool);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void zcrypt_ccamisc_exit(void)
{
- mkvp_cache_free();
+ mutex_lock(&dev_status_mem_mutex);
+ kvfree(dev_status_mem);
+ mutex_unlock(&dev_status_mem_mutex);
+ mempool_destroy(cprb_mempool);
}
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
index 26bdca702523..1ecc4e37e9ad 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.h
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -160,44 +160,47 @@ int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
/*
* Generate (random) CCA AES DATA secure key.
*/
-int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey);
+int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey,
+ u32 xflags);
/*
* Generate CCA AES DATA secure key with given clear key value.
*/
int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
- const u8 *clrkey, u8 *seckey);
+ const u8 *clrkey, u8 *seckey, u32 xflags);
/*
* Derive proteced key from an CCA AES DATA secure key.
*/
int cca_sec2protkey(u16 cardnr, u16 domain,
const u8 *seckey, u8 *protkey, u32 *protkeylen,
- u32 *protkeytype);
+ u32 *protkeytype, u32 xflags);
/*
* Generate (random) CCA AES CIPHER secure key.
*/
int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
- u8 *keybuf, u32 *keybufsize);
+ u8 *keybuf, u32 *keybufsize, u32 xflags);
/*
* Derive proteced key from CCA AES cipher secure key.
*/
int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags);
/*
* Build CCA AES CIPHER secure key with a given clear key value.
*/
int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
- const u8 *clrkey, u8 *keybuf, u32 *keybufsize);
+ const u8 *clrkey, u8 *keybuf, u32 *keybufsize,
+ u32 xflags);
/*
* Derive proteced key from CCA ECC secure private key.
*/
int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags);
/*
* Query cryptographic facility from CCA adapter
@@ -205,16 +208,8 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
int cca_query_crypto_facility(u16 cardnr, u16 domain,
const char *keyword,
u8 *rarray, size_t *rarraylen,
- u8 *varray, size_t *varraylen);
-
-/*
- * Search for a matching crypto card based on the Master Key
- * Verification Pattern provided inside a secure key.
- * Works with CCA AES data and cipher keys.
- * Returns < 0 on failure, 0 if CURRENT MKVP matches and
- * 1 if OLD MKVP matches.
- */
-int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify);
+ u8 *varray, size_t *varraylen,
+ u32 xflags);
/*
* Build a list of cca apqns meeting the following constrains:
@@ -224,21 +219,16 @@ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify);
* - if minhwtype > 0 only apqns with hwtype >= minhwtype
* - if cur_mkvp != 0 only apqns where cur_mkvp == mkvp
* - if old_mkvp != 0 only apqns where old_mkvp == mkvp
- * - if verify is enabled and a cur_mkvp and/or old_mkvp
- * value is given, then refetch the cca_info and make sure the current
- * cur_mkvp or old_mkvp values of the apqn are used.
* The mktype determines which set of master keys to use:
* 0 = AES_MK_SET - AES MK set, 1 = APKA MK_SET - APKA MK set
- * The array of apqn entries is allocated with kmalloc and returned in *apqns;
- * the number of apqns stored into the list is returned in *nr_apqns. One apqn
- * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
- * may be casted to struct pkey_apqn. The return value is either 0 for success
- * or a negative errno value. If no apqn meeting the criteria is found,
- * -ENODEV is returned.
+ * The caller should set *nr_apqns to the nr of elements available in *apqns.
+ * On return *nr_apqns is then updated with the nr of apqns filled into *apqns.
+ * The return value is either 0 for success or a negative errno value.
+ * If no apqn meeting the criteria is found, -ENODEV is returned.
*/
-int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+int cca_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp,
- int verify);
+ u32 xflags);
#define AES_MK_SET 0
#define APKA_MK_SET 1
@@ -270,8 +260,9 @@ struct cca_info {
/*
* Fetch cca information about an CCA queue.
*/
-int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify);
+int cca_get_info(u16 card, u16 dom, struct cca_info *ci, u32 xflags);
+int zcrypt_ccamisc_init(void);
void zcrypt_ccamisc_exit(void);
#endif /* _ZCRYPT_CCAMISC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 64df7d2f6266..6ba7fbddd3f7 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -79,14 +79,13 @@ static ssize_t cca_serialnr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct zcrypt_card *zc = dev_get_drvdata(dev);
- struct cca_info ci;
struct ap_card *ac = to_ap_card(dev);
+ struct cca_info ci;
memset(&ci, 0, sizeof(ci));
if (ap_domain_index >= 0)
- cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
+ cca_get_info(ac->id, ap_domain_index, &ci, 0);
return sysfs_emit(buf, "%s\n", ci.serial);
}
@@ -110,17 +109,17 @@ static ssize_t cca_mkvps_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ static const char * const new_state[] = { "empty", "partial", "full" };
+ static const char * const cao_state[] = { "invalid", "valid" };
struct zcrypt_queue *zq = dev_get_drvdata(dev);
- int n = 0;
struct cca_info ci;
- static const char * const cao_state[] = { "invalid", "valid" };
- static const char * const new_state[] = { "empty", "partial", "full" };
+ int n = 0;
memset(&ci, 0, sizeof(ci));
cca_get_info(AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
- &ci, zq->online);
+ &ci, 0);
if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
n += sysfs_emit_at(buf, n, "AES NEW: %s 0x%016llx\n",
@@ -210,13 +209,12 @@ static ssize_t ep11_api_ordinalnr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct zcrypt_card *zc = dev_get_drvdata(dev);
- struct ep11_card_info ci;
struct ap_card *ac = to_ap_card(dev);
+ struct ep11_card_info ci;
memset(&ci, 0, sizeof(ci));
- ep11_get_card_info(ac->id, &ci, zc->online);
+ ep11_get_card_info(ac->id, &ci, 0);
if (ci.API_ord_nr > 0)
return sysfs_emit(buf, "%u\n", ci.API_ord_nr);
@@ -231,13 +229,12 @@ static ssize_t ep11_fw_version_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct zcrypt_card *zc = dev_get_drvdata(dev);
- struct ep11_card_info ci;
struct ap_card *ac = to_ap_card(dev);
+ struct ep11_card_info ci;
memset(&ci, 0, sizeof(ci));
- ep11_get_card_info(ac->id, &ci, zc->online);
+ ep11_get_card_info(ac->id, &ci, 0);
if (ci.FW_version > 0)
return sysfs_emit(buf, "%d.%d\n",
@@ -254,13 +251,12 @@ static ssize_t ep11_serialnr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct zcrypt_card *zc = dev_get_drvdata(dev);
- struct ep11_card_info ci;
struct ap_card *ac = to_ap_card(dev);
+ struct ep11_card_info ci;
memset(&ci, 0, sizeof(ci));
- ep11_get_card_info(ac->id, &ci, zc->online);
+ ep11_get_card_info(ac->id, &ci, 0);
if (ci.serial[0])
return sysfs_emit(buf, "%16.16s\n", ci.serial);
@@ -291,14 +287,13 @@ static ssize_t ep11_card_op_modes_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct zcrypt_card *zc = dev_get_drvdata(dev);
- int i, n = 0;
- struct ep11_card_info ci;
struct ap_card *ac = to_ap_card(dev);
+ struct ep11_card_info ci;
+ int i, n = 0;
memset(&ci, 0, sizeof(ci));
- ep11_get_card_info(ac->id, &ci, zc->online);
+ ep11_get_card_info(ac->id, &ci, 0);
for (i = 0; ep11_op_modes[i].mode_txt; i++) {
if (ci.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
@@ -348,7 +343,7 @@ static ssize_t ep11_mkvps_show(struct device *dev,
if (zq->online)
ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
- &di);
+ &di, 0);
if (di.cur_wk_state == '0') {
n = sysfs_emit(buf, "WK CUR: %s -\n",
@@ -395,7 +390,7 @@ static ssize_t ep11_queue_op_modes_show(struct device *dev,
if (zq->online)
ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
AP_QID_QUEUE(zq->queue->qid),
- &di);
+ &di, 0);
for (i = 0; ep11_op_modes[i].mode_txt; i++) {
if (di.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index cb7e6da43602..2f50fc7b8f61 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -10,9 +10,10 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
+#include <linux/mempool.h>
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/random.h>
+#include <linux/slab.h>
#include <asm/zcrypt.h>
#include <asm/pkey.h>
#include <crypto/aes.h>
@@ -30,85 +31,29 @@
static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff };
-/* ep11 card info cache */
-struct card_list_entry {
- struct list_head list;
- u16 cardnr;
- struct ep11_card_info info;
-};
-static LIST_HEAD(card_list);
-static DEFINE_SPINLOCK(card_list_lock);
-
-static int card_cache_fetch(u16 cardnr, struct ep11_card_info *ci)
-{
- int rc = -ENOENT;
- struct card_list_entry *ptr;
-
- spin_lock_bh(&card_list_lock);
- list_for_each_entry(ptr, &card_list, list) {
- if (ptr->cardnr == cardnr) {
- memcpy(ci, &ptr->info, sizeof(*ci));
- rc = 0;
- break;
- }
- }
- spin_unlock_bh(&card_list_lock);
-
- return rc;
-}
-
-static void card_cache_update(u16 cardnr, const struct ep11_card_info *ci)
-{
- int found = 0;
- struct card_list_entry *ptr;
-
- spin_lock_bh(&card_list_lock);
- list_for_each_entry(ptr, &card_list, list) {
- if (ptr->cardnr == cardnr) {
- memcpy(&ptr->info, ci, sizeof(*ci));
- found = 1;
- break;
- }
- }
- if (!found) {
- ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
- if (!ptr) {
- spin_unlock_bh(&card_list_lock);
- return;
- }
- ptr->cardnr = cardnr;
- memcpy(&ptr->info, ci, sizeof(*ci));
- list_add(&ptr->list, &card_list);
- }
- spin_unlock_bh(&card_list_lock);
-}
-
-static void card_cache_scrub(u16 cardnr)
-{
- struct card_list_entry *ptr;
-
- spin_lock_bh(&card_list_lock);
- list_for_each_entry(ptr, &card_list, list) {
- if (ptr->cardnr == cardnr) {
- list_del(&ptr->list);
- kfree(ptr);
- break;
- }
- }
- spin_unlock_bh(&card_list_lock);
-}
-
-static void __exit card_cache_free(void)
-{
- struct card_list_entry *ptr, *pnext;
+/*
+ * Cprb memory pool held for urgent cases where no memory
+ * can be allocated via kmalloc. This pool is only used when
+ * alloc_cprbmem() is called with the xflag ZCRYPT_XFLAG_NOMEMALLOC.
+ */
+#define CPRB_MEMPOOL_ITEM_SIZE (8 * 1024)
+static mempool_t *cprb_mempool;
- spin_lock_bh(&card_list_lock);
- list_for_each_entry_safe(ptr, pnext, &card_list, list) {
- list_del(&ptr->list);
- kfree(ptr);
- }
- spin_unlock_bh(&card_list_lock);
-}
+/*
+ * This is a pre-allocated memory for the device status array
+ * used within the ep11_findcard2() function. It is currently
+ * 128 * 128 * 4 bytes = 64 KB big. Usage of this memory is
+ * controlled via dev_status_mem_mutex. Needs adaption if more
+ * than 128 cards or domains to be are supported.
+ */
+#define ZCRYPT_DEV_STATUS_CARD_MAX 128
+#define ZCRYPT_DEV_STATUS_QUEUE_MAX 128
+#define ZCRYPT_DEV_STATUS_ENTRIES (ZCRYPT_DEV_STATUS_CARD_MAX * \
+ ZCRYPT_DEV_STATUS_QUEUE_MAX)
+#define ZCRYPT_DEV_STATUS_EXT_SIZE (ZCRYPT_DEV_STATUS_ENTRIES * \
+ sizeof(struct zcrypt_device_status_ext))
+static void *dev_status_mem;
+static DEFINE_MUTEX(dev_status_mem_mutex);
static int ep11_kb_split(const u8 *kb, size_t kblen, u32 kbver,
struct ep11kblob_header **kbhdr, size_t *kbhdrsize,
@@ -411,14 +356,20 @@ EXPORT_SYMBOL(ep11_check_aes_key);
/*
* Allocate and prepare ep11 cprb plus additional payload.
*/
-static inline struct ep11_cprb *alloc_cprb(size_t payload_len)
+static void *alloc_cprbmem(size_t payload_len, u32 xflags)
{
size_t len = sizeof(struct ep11_cprb) + payload_len;
- struct ep11_cprb *cprb;
+ struct ep11_cprb *cprb = NULL;
- cprb = kzalloc(len, GFP_KERNEL);
+ if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) {
+ if (len <= CPRB_MEMPOOL_ITEM_SIZE)
+ cprb = mempool_alloc_preallocated(cprb_mempool);
+ } else {
+ cprb = kmalloc(len, GFP_KERNEL);
+ }
if (!cprb)
return NULL;
+ memset(cprb, 0, len);
cprb->cprb_len = sizeof(struct ep11_cprb);
cprb->cprb_ver_id = 0x04;
@@ -430,6 +381,20 @@ static inline struct ep11_cprb *alloc_cprb(size_t payload_len)
}
/*
+ * Free ep11 cprb buffer space.
+ */
+static void free_cprbmem(void *mem, size_t payload_len, bool scrub, u32 xflags)
+{
+ if (mem && scrub)
+ memzero_explicit(mem, sizeof(struct ep11_cprb) + payload_len);
+
+ if (xflags & ZCRYPT_XFLAG_NOMEMALLOC)
+ mempool_free(mem, cprb_mempool);
+ else
+ kfree(mem);
+}
+
+/*
* Some helper functions related to ASN1 encoding.
* Limited to length info <= 2 byte.
*/
@@ -489,6 +454,7 @@ static inline void prep_urb(struct ep11_urb *u,
struct ep11_cprb *req, size_t req_len,
struct ep11_cprb *rep, size_t rep_len)
{
+ memset(u, 0, sizeof(*u));
u->targets = (u8 __user *)t;
u->targets_num = nt;
u->req = (u8 __user *)req;
@@ -583,7 +549,7 @@ static int check_reply_cprb(const struct ep11_cprb *rep, const char *func)
* Helper function which does an ep11 query with given query type.
*/
static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
- size_t buflen, u8 *buf)
+ size_t buflen, u8 *buf, u32 xflags)
{
struct ep11_info_req_pl {
struct pl_head head;
@@ -605,11 +571,11 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
} __packed * rep_pl;
struct ep11_cprb *req = NULL, *rep = NULL;
struct ep11_target_dev target;
- struct ep11_urb *urb = NULL;
+ struct ep11_urb urb;
int api = EP11_API_V1, rc = -ENOMEM;
/* request cprb and payload */
- req = alloc_cprb(sizeof(struct ep11_info_req_pl));
+ req = alloc_cprbmem(sizeof(struct ep11_info_req_pl), xflags);
if (!req)
goto out;
req_pl = (struct ep11_info_req_pl *)(((u8 *)req) + sizeof(*req));
@@ -621,22 +587,19 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
req_pl->query_subtype_len = sizeof(u32);
/* reply cprb and payload */
- rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen);
+ rep = alloc_cprbmem(sizeof(struct ep11_info_rep_pl) + buflen, xflags);
if (!rep)
goto out;
rep_pl = (struct ep11_info_rep_pl *)(((u8 *)rep) + sizeof(*rep));
/* urb and target */
- urb = kmalloc(sizeof(*urb), GFP_KERNEL);
- if (!urb)
- goto out;
target.ap_id = cardnr;
target.dom_id = domain;
- prep_urb(urb, &target, 1,
+ prep_urb(&urb, &target, 1,
req, sizeof(*req) + sizeof(*req_pl),
rep, sizeof(*rep) + sizeof(*rep_pl) + buflen);
- rc = zcrypt_send_ep11_cprb(urb);
+ rc = zcrypt_send_ep11_cprb(&urb, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
__func__, (int)cardnr, (int)domain, rc);
@@ -667,16 +630,15 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
memcpy(buf, ((u8 *)rep_pl) + sizeof(*rep_pl), rep_pl->data_len);
out:
- kfree(req);
- kfree(rep);
- kfree(urb);
+ free_cprbmem(req, 0, false, xflags);
+ free_cprbmem(rep, 0, false, xflags);
return rc;
}
/*
* Provide information about an EP11 card.
*/
-int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify)
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, u32 xflags)
{
int rc;
struct ep11_module_query_info {
@@ -706,30 +668,26 @@ int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify)
u32 max_CP_index;
} __packed * pmqi = NULL;
- rc = card_cache_fetch(card, info);
- if (rc || verify) {
- pmqi = kmalloc(sizeof(*pmqi), GFP_KERNEL);
- if (!pmqi)
- return -ENOMEM;
- rc = ep11_query_info(card, AUTOSEL_DOM,
- 0x01 /* module info query */,
- sizeof(*pmqi), (u8 *)pmqi);
- if (rc) {
- if (rc == -ENODEV)
- card_cache_scrub(card);
- goto out;
- }
- memset(info, 0, sizeof(*info));
- info->API_ord_nr = pmqi->API_ord_nr;
- info->FW_version =
- (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers;
- memcpy(info->serial, pmqi->serial, sizeof(info->serial));
- info->op_mode = pmqi->op_mode;
- card_cache_update(card, info);
- }
+ /* use the cprb mempool to satisfy this short term mem alloc */
+ pmqi = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ?
+ mempool_alloc_preallocated(cprb_mempool) :
+ mempool_alloc(cprb_mempool, GFP_KERNEL);
+ if (!pmqi)
+ return -ENOMEM;
+ rc = ep11_query_info(card, AUTOSEL_DOM,
+ 0x01 /* module info query */,
+ sizeof(*pmqi), (u8 *)pmqi, xflags);
+ if (rc)
+ goto out;
+
+ memset(info, 0, sizeof(*info));
+ info->API_ord_nr = pmqi->API_ord_nr;
+ info->FW_version = (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers;
+ memcpy(info->serial, pmqi->serial, sizeof(info->serial));
+ info->op_mode = pmqi->op_mode;
out:
- kfree(pmqi);
+ mempool_free(pmqi, cprb_mempool);
return rc;
}
EXPORT_SYMBOL(ep11_get_card_info);
@@ -737,7 +695,8 @@ EXPORT_SYMBOL(ep11_get_card_info);
/*
* Provide information about a domain within an EP11 card.
*/
-int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info)
+int ep11_get_domain_info(u16 card, u16 domain,
+ struct ep11_domain_info *info, u32 xflags)
{
int rc;
struct ep11_domain_query_info {
@@ -746,36 +705,32 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info)
u8 new_WK_VP[32];
u32 dom_flags;
u64 op_mode;
- } __packed * p_dom_info;
-
- p_dom_info = kmalloc(sizeof(*p_dom_info), GFP_KERNEL);
- if (!p_dom_info)
- return -ENOMEM;
+ } __packed dom_query_info;
rc = ep11_query_info(card, domain, 0x03 /* domain info query */,
- sizeof(*p_dom_info), (u8 *)p_dom_info);
+ sizeof(dom_query_info), (u8 *)&dom_query_info,
+ xflags);
if (rc)
goto out;
memset(info, 0, sizeof(*info));
info->cur_wk_state = '0';
info->new_wk_state = '0';
- if (p_dom_info->dom_flags & 0x10 /* left imprint mode */) {
- if (p_dom_info->dom_flags & 0x02 /* cur wk valid */) {
+ if (dom_query_info.dom_flags & 0x10 /* left imprint mode */) {
+ if (dom_query_info.dom_flags & 0x02 /* cur wk valid */) {
info->cur_wk_state = '1';
- memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32);
+ memcpy(info->cur_wkvp, dom_query_info.cur_WK_VP, 32);
}
- if (p_dom_info->dom_flags & 0x04 || /* new wk present */
- p_dom_info->dom_flags & 0x08 /* new wk committed */) {
+ if (dom_query_info.dom_flags & 0x04 || /* new wk present */
+ dom_query_info.dom_flags & 0x08 /* new wk committed */) {
info->new_wk_state =
- p_dom_info->dom_flags & 0x08 ? '2' : '1';
- memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32);
+ dom_query_info.dom_flags & 0x08 ? '2' : '1';
+ memcpy(info->new_wkvp, dom_query_info.new_WK_VP, 32);
}
}
- info->op_mode = p_dom_info->op_mode;
+ info->op_mode = dom_query_info.op_mode;
out:
- kfree(p_dom_info);
return rc;
}
EXPORT_SYMBOL(ep11_get_domain_info);
@@ -788,7 +743,7 @@ EXPORT_SYMBOL(ep11_get_domain_info);
static int _ep11_genaeskey(u16 card, u16 domain,
u32 keybitsize, u32 keygenflags,
- u8 *keybuf, size_t *keybufsize)
+ u8 *keybuf, size_t *keybufsize, u32 xflags)
{
struct keygen_req_pl {
struct pl_head head;
@@ -823,7 +778,7 @@ static int _ep11_genaeskey(u16 card, u16 domain,
struct ep11_cprb *req = NULL, *rep = NULL;
size_t req_pl_size, pinblob_size = 0;
struct ep11_target_dev target;
- struct ep11_urb *urb = NULL;
+ struct ep11_urb urb;
int api, rc = -ENOMEM;
u8 *p;
@@ -851,7 +806,7 @@ static int _ep11_genaeskey(u16 card, u16 domain,
pinblob_size = EP11_PINBLOB_V1_BYTES;
}
req_pl_size = sizeof(struct keygen_req_pl) + ASN1TAGLEN(pinblob_size);
- req = alloc_cprb(req_pl_size);
+ req = alloc_cprbmem(req_pl_size, xflags);
if (!req)
goto out;
req_pl = (struct keygen_req_pl *)(((u8 *)req) + sizeof(*req));
@@ -877,22 +832,19 @@ static int _ep11_genaeskey(u16 card, u16 domain,
*p++ = pinblob_size;
/* reply cprb and payload */
- rep = alloc_cprb(sizeof(struct keygen_rep_pl));
+ rep = alloc_cprbmem(sizeof(struct keygen_rep_pl), xflags);
if (!rep)
goto out;
rep_pl = (struct keygen_rep_pl *)(((u8 *)rep) + sizeof(*rep));
/* urb and target */
- urb = kmalloc(sizeof(*urb), GFP_KERNEL);
- if (!urb)
- goto out;
target.ap_id = card;
target.dom_id = domain;
- prep_urb(urb, &target, 1,
+ prep_urb(&urb, &target, 1,
req, sizeof(*req) + req_pl_size,
rep, sizeof(*rep) + sizeof(*rep_pl));
- rc = zcrypt_send_ep11_cprb(urb);
+ rc = zcrypt_send_ep11_cprb(&urb, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
__func__, (int)card, (int)domain, rc);
@@ -925,14 +877,13 @@ static int _ep11_genaeskey(u16 card, u16 domain,
*keybufsize = rep_pl->data_len;
out:
- kfree(req);
- kfree(rep);
- kfree(urb);
+ free_cprbmem(req, 0, false, xflags);
+ free_cprbmem(rep, sizeof(struct keygen_rep_pl), true, xflags);
return rc;
}
int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
- u8 *keybuf, u32 *keybufsize, u32 keybufver)
+ u8 *keybuf, u32 *keybufsize, u32 keybufver, u32 xflags)
{
struct ep11kblob_header *hdr;
size_t hdr_size, pl_size;
@@ -953,7 +904,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
return rc;
rc = _ep11_genaeskey(card, domain, keybitsize, keygenflags,
- pl, &pl_size);
+ pl, &pl_size, xflags);
if (rc)
return rc;
@@ -973,7 +924,8 @@ static int ep11_cryptsingle(u16 card, u16 domain,
u16 mode, u32 mech, const u8 *iv,
const u8 *key, size_t keysize,
const u8 *inbuf, size_t inbufsize,
- u8 *outbuf, size_t *outbufsize)
+ u8 *outbuf, size_t *outbufsize,
+ u32 xflags)
{
struct crypt_req_pl {
struct pl_head head;
@@ -1000,8 +952,8 @@ static int ep11_cryptsingle(u16 card, u16 domain,
} __packed * rep_pl;
struct ep11_cprb *req = NULL, *rep = NULL;
struct ep11_target_dev target;
- struct ep11_urb *urb = NULL;
- size_t req_pl_size, rep_pl_size;
+ struct ep11_urb urb;
+ size_t req_pl_size, rep_pl_size = 0;
int n, api = EP11_API_V1, rc = -ENOMEM;
u8 *p;
@@ -1012,7 +964,7 @@ static int ep11_cryptsingle(u16 card, u16 domain,
/* request cprb and payload */
req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0)
+ ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize);
- req = alloc_cprb(req_pl_size);
+ req = alloc_cprbmem(req_pl_size, xflags);
if (!req)
goto out;
req_pl = (struct crypt_req_pl *)(((u8 *)req) + sizeof(*req));
@@ -1034,22 +986,19 @@ static int ep11_cryptsingle(u16 card, u16 domain,
/* reply cprb and payload, assume out data size <= in data size + 32 */
rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32);
- rep = alloc_cprb(rep_pl_size);
+ rep = alloc_cprbmem(rep_pl_size, xflags);
if (!rep)
goto out;
rep_pl = (struct crypt_rep_pl *)(((u8 *)rep) + sizeof(*rep));
/* urb and target */
- urb = kmalloc(sizeof(*urb), GFP_KERNEL);
- if (!urb)
- goto out;
target.ap_id = card;
target.dom_id = domain;
- prep_urb(urb, &target, 1,
+ prep_urb(&urb, &target, 1,
req, sizeof(*req) + req_pl_size,
rep, sizeof(*rep) + rep_pl_size);
- rc = zcrypt_send_ep11_cprb(urb);
+ rc = zcrypt_send_ep11_cprb(&urb, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
__func__, (int)card, (int)domain, rc);
@@ -1095,9 +1044,8 @@ static int ep11_cryptsingle(u16 card, u16 domain,
*outbufsize = n;
out:
- kfree(req);
- kfree(rep);
- kfree(urb);
+ free_cprbmem(req, req_pl_size, true, xflags);
+ free_cprbmem(rep, rep_pl_size, true, xflags);
return rc;
}
@@ -1106,7 +1054,7 @@ static int _ep11_unwrapkey(u16 card, u16 domain,
const u8 *enckey, size_t enckeysize,
u32 mech, const u8 *iv,
u32 keybitsize, u32 keygenflags,
- u8 *keybuf, size_t *keybufsize)
+ u8 *keybuf, size_t *keybufsize, u32 xflags)
{
struct uw_req_pl {
struct pl_head head;
@@ -1143,7 +1091,7 @@ static int _ep11_unwrapkey(u16 card, u16 domain,
struct ep11_cprb *req = NULL, *rep = NULL;
size_t req_pl_size, pinblob_size = 0;
struct ep11_target_dev target;
- struct ep11_urb *urb = NULL;
+ struct ep11_urb urb;
int api, rc = -ENOMEM;
u8 *p;
@@ -1161,7 +1109,7 @@ static int _ep11_unwrapkey(u16 card, u16 domain,
req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0)
+ ASN1TAGLEN(keksize) + ASN1TAGLEN(0)
+ ASN1TAGLEN(pinblob_size) + ASN1TAGLEN(enckeysize);
- req = alloc_cprb(req_pl_size);
+ req = alloc_cprbmem(req_pl_size, xflags);
if (!req)
goto out;
req_pl = (struct uw_req_pl *)(((u8 *)req) + sizeof(*req));
@@ -1197,22 +1145,19 @@ static int _ep11_unwrapkey(u16 card, u16 domain,
p += asn1tag_write(p, 0x04, enckey, enckeysize);
/* reply cprb and payload */
- rep = alloc_cprb(sizeof(struct uw_rep_pl));
+ rep = alloc_cprbmem(sizeof(struct uw_rep_pl), xflags);
if (!rep)
goto out;
rep_pl = (struct uw_rep_pl *)(((u8 *)rep) + sizeof(*rep));
/* urb and target */
- urb = kmalloc(sizeof(*urb), GFP_KERNEL);
- if (!urb)
- goto out;
target.ap_id = card;
target.dom_id = domain;
- prep_urb(urb, &target, 1,
+ prep_urb(&urb, &target, 1,
req, sizeof(*req) + req_pl_size,
rep, sizeof(*rep) + sizeof(*rep_pl));
- rc = zcrypt_send_ep11_cprb(urb);
+ rc = zcrypt_send_ep11_cprb(&urb, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
__func__, (int)card, (int)domain, rc);
@@ -1245,9 +1190,8 @@ static int _ep11_unwrapkey(u16 card, u16 domain,
*keybufsize = rep_pl->data_len;
out:
- kfree(req);
- kfree(rep);
- kfree(urb);
+ free_cprbmem(req, req_pl_size, true, xflags);
+ free_cprbmem(rep, sizeof(struct uw_rep_pl), true, xflags);
return rc;
}
@@ -1257,7 +1201,7 @@ static int ep11_unwrapkey(u16 card, u16 domain,
u32 mech, const u8 *iv,
u32 keybitsize, u32 keygenflags,
u8 *keybuf, u32 *keybufsize,
- u8 keybufver)
+ u8 keybufver, u32 xflags)
{
struct ep11kblob_header *hdr;
size_t hdr_size, pl_size;
@@ -1271,7 +1215,7 @@ static int ep11_unwrapkey(u16 card, u16 domain,
rc = _ep11_unwrapkey(card, domain, kek, keksize, enckey, enckeysize,
mech, iv, keybitsize, keygenflags,
- pl, &pl_size);
+ pl, &pl_size, xflags);
if (rc)
return rc;
@@ -1290,7 +1234,7 @@ static int ep11_unwrapkey(u16 card, u16 domain,
static int _ep11_wrapkey(u16 card, u16 domain,
const u8 *key, size_t keysize,
u32 mech, const u8 *iv,
- u8 *databuf, size_t *datasize)
+ u8 *databuf, size_t *datasize, u32 xflags)
{
struct wk_req_pl {
struct pl_head head;
@@ -1319,7 +1263,7 @@ static int _ep11_wrapkey(u16 card, u16 domain,
} __packed * rep_pl;
struct ep11_cprb *req = NULL, *rep = NULL;
struct ep11_target_dev target;
- struct ep11_urb *urb = NULL;
+ struct ep11_urb urb;
size_t req_pl_size;
int api, rc = -ENOMEM;
u8 *p;
@@ -1327,7 +1271,7 @@ static int _ep11_wrapkey(u16 card, u16 domain,
/* request cprb and payload */
req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0)
+ ASN1TAGLEN(keysize) + 4;
- req = alloc_cprb(req_pl_size);
+ req = alloc_cprbmem(req_pl_size, xflags);
if (!req)
goto out;
if (!mech || mech == 0x80060001)
@@ -1357,22 +1301,19 @@ static int _ep11_wrapkey(u16 card, u16 domain,
*p++ = 0;
/* reply cprb and payload */
- rep = alloc_cprb(sizeof(struct wk_rep_pl));
+ rep = alloc_cprbmem(sizeof(struct wk_rep_pl), xflags);
if (!rep)
goto out;
rep_pl = (struct wk_rep_pl *)(((u8 *)rep) + sizeof(*rep));
/* urb and target */
- urb = kmalloc(sizeof(*urb), GFP_KERNEL);
- if (!urb)
- goto out;
target.ap_id = card;
target.dom_id = domain;
- prep_urb(urb, &target, 1,
+ prep_urb(&urb, &target, 1,
req, sizeof(*req) + req_pl_size,
rep, sizeof(*rep) + sizeof(*rep_pl));
- rc = zcrypt_send_ep11_cprb(urb);
+ rc = zcrypt_send_ep11_cprb(&urb, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
__func__, (int)card, (int)domain, rc);
@@ -1405,18 +1346,18 @@ static int _ep11_wrapkey(u16 card, u16 domain,
*datasize = rep_pl->data_len;
out:
- kfree(req);
- kfree(rep);
- kfree(urb);
+ free_cprbmem(req, req_pl_size, true, xflags);
+ free_cprbmem(rep, sizeof(struct wk_rep_pl), true, xflags);
return rc;
}
int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
const u8 *clrkey, u8 *keybuf, u32 *keybufsize,
- u32 keytype)
+ u32 keytype, u32 xflags)
{
int rc;
- u8 encbuf[64], *kek = NULL;
+ void *mem;
+ u8 encbuf[64], *kek;
size_t clrkeylen, keklen, encbuflen = sizeof(encbuf);
if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) {
@@ -1427,18 +1368,24 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
return -EINVAL;
}
- /* allocate memory for the temp kek */
+ /*
+ * Allocate space for the temp kek.
+ * Also we only need up to MAXEP11AESKEYBLOBSIZE bytes for this
+ * we use the already existing cprb mempool to solve this
+ * short term memory requirement.
+ */
+ mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ?
+ mempool_alloc_preallocated(cprb_mempool) :
+ mempool_alloc(cprb_mempool, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+ kek = (u8 *)mem;
keklen = MAXEP11AESKEYBLOBSIZE;
- kek = kmalloc(keklen, GFP_ATOMIC);
- if (!kek) {
- rc = -ENOMEM;
- goto out;
- }
/* Step 1: generate AES 256 bit random kek key */
rc = _ep11_genaeskey(card, domain, 256,
0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
- kek, &keklen);
+ kek, &keklen, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s generate kek key failed, rc=%d\n",
__func__, rc);
@@ -1447,7 +1394,7 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
/* Step 2: encrypt clear key value with the kek key */
rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
- clrkey, clrkeylen, encbuf, &encbuflen);
+ clrkey, clrkeylen, encbuf, &encbuflen, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s encrypting key value with kek key failed, rc=%d\n",
__func__, rc);
@@ -1457,22 +1404,23 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
/* Step 3: import the encrypted key value as a new key */
rc = ep11_unwrapkey(card, domain, kek, keklen,
encbuf, encbuflen, 0, def_iv,
- keybitsize, 0, keybuf, keybufsize, keytype);
+ keybitsize, 0, keybuf, keybufsize, keytype, xflags);
if (rc) {
- ZCRYPT_DBF_ERR("%s importing key value as new key failed,, rc=%d\n",
+ ZCRYPT_DBF_ERR("%s importing key value as new key failed, rc=%d\n",
__func__, rc);
goto out;
}
out:
- kfree(kek);
+ mempool_free(mem, cprb_mempool);
return rc;
}
EXPORT_SYMBOL(ep11_clr2keyblob);
int ep11_kblob2protkey(u16 card, u16 dom,
const u8 *keyblob, u32 keybloblen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags)
{
struct ep11kblob_header *hdr;
struct ep11keyblob *key;
@@ -1498,15 +1446,29 @@ int ep11_kblob2protkey(u16 card, u16 dom,
}
/* !!! hdr is no longer a valid header !!! */
- /* alloc temp working buffer */
+ /* need a temp working buffer */
wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1));
- wkbuf = kmalloc(wkbuflen, GFP_ATOMIC);
- if (!wkbuf)
- return -ENOMEM;
+ if (wkbuflen > CPRB_MEMPOOL_ITEM_SIZE) {
+ /* this should never happen */
+ rc = -ENOMEM;
+ ZCRYPT_DBF_WARN("%s wkbuflen %d > cprb mempool item size %d, rc=%d\n",
+ __func__, (int)wkbuflen, CPRB_MEMPOOL_ITEM_SIZE, rc);
+ return rc;
+ }
+ /* use the cprb mempool to satisfy this short term mem allocation */
+ wkbuf = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ?
+ mempool_alloc_preallocated(cprb_mempool) :
+ mempool_alloc(cprb_mempool, GFP_ATOMIC);
+ if (!wkbuf) {
+ rc = -ENOMEM;
+ ZCRYPT_DBF_WARN("%s allocating tmp buffer via cprb mempool failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
/* ep11 secure key -> protected key + info */
rc = _ep11_wrapkey(card, dom, (u8 *)key, keylen,
- 0, def_iv, wkbuf, &wkbuflen);
+ 0, def_iv, wkbuf, &wkbuflen, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s rewrapping ep11 key to pkey failed, rc=%d\n",
__func__, rc);
@@ -1573,37 +1535,32 @@ int ep11_kblob2protkey(u16 card, u16 dom,
*protkeylen = wki->pkeysize;
out:
- kfree(wkbuf);
+ mempool_free(wkbuf, cprb_mempool);
return rc;
}
EXPORT_SYMBOL(ep11_kblob2protkey);
-int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
- int minhwtype, int minapi, const u8 *wkvp)
+int ep11_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, int minapi, const u8 *wkvp, u32 xflags)
{
struct zcrypt_device_status_ext *device_status;
- u32 *_apqns = NULL, _nr_apqns = 0;
- int i, card, dom, rc = -ENOMEM;
struct ep11_domain_info edi;
struct ep11_card_info eci;
+ u32 _nr_apqns = 0;
+ int i, card, dom;
- /* fetch status of all crypto cards */
- device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT,
- sizeof(struct zcrypt_device_status_ext),
- GFP_KERNEL);
- if (!device_status)
- return -ENOMEM;
- zcrypt_device_status_mask_ext(device_status);
+ /* occupy the device status memory */
+ mutex_lock(&dev_status_mem_mutex);
+ memset(dev_status_mem, 0, ZCRYPT_DEV_STATUS_EXT_SIZE);
+ device_status = (struct zcrypt_device_status_ext *)dev_status_mem;
- /* allocate 1k space for up to 256 apqns */
- _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
- if (!_apqns) {
- kvfree(device_status);
- return -ENOMEM;
- }
+ /* fetch crypto device status into this struct */
+ zcrypt_device_status_mask_ext(device_status,
+ ZCRYPT_DEV_STATUS_CARD_MAX,
+ ZCRYPT_DEV_STATUS_QUEUE_MAX);
/* walk through all the crypto apqnss */
- for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ for (i = 0; i < ZCRYPT_DEV_STATUS_ENTRIES; i++) {
card = AP_QID_CARD(device_status[i].qid);
dom = AP_QID_QUEUE(device_status[i].qid);
/* check online state */
@@ -1623,14 +1580,14 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
continue;
/* check min api version if given */
if (minapi > 0) {
- if (ep11_get_card_info(card, &eci, 0))
+ if (ep11_get_card_info(card, &eci, xflags))
continue;
if (minapi > eci.API_ord_nr)
continue;
}
/* check wkvp if given */
if (wkvp) {
- if (ep11_get_domain_info(card, dom, &edi))
+ if (ep11_get_domain_info(card, dom, &edi, xflags))
continue;
if (edi.cur_wk_state != '1')
continue;
@@ -1638,27 +1595,40 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
continue;
}
/* apqn passed all filtering criterons, add to the array */
- if (_nr_apqns < 256)
- _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom);
+ if (_nr_apqns < *nr_apqns)
+ apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom);
}
- /* nothing found ? */
- if (!_nr_apqns) {
- kfree(_apqns);
- rc = -ENODEV;
- } else {
- /* no re-allocation, simple return the _apqns array */
- *apqns = _apqns;
- *nr_apqns = _nr_apqns;
- rc = 0;
- }
+ *nr_apqns = _nr_apqns;
- kvfree(device_status);
- return rc;
+ mutex_unlock(&dev_status_mem_mutex);
+
+ return _nr_apqns ? 0 : -ENODEV;
}
EXPORT_SYMBOL(ep11_findcard2);
-void __exit zcrypt_ep11misc_exit(void)
+int __init zcrypt_ep11misc_init(void)
+{
+ /* Pre-allocate a small memory pool for ep11 cprbs. */
+ cprb_mempool = mempool_create_kmalloc_pool(2 * zcrypt_mempool_threshold,
+ CPRB_MEMPOOL_ITEM_SIZE);
+ if (!cprb_mempool)
+ return -ENOMEM;
+
+ /* Pre-allocate one crypto status card struct used in ep11_findcard2() */
+ dev_status_mem = kvmalloc(ZCRYPT_DEV_STATUS_EXT_SIZE, GFP_KERNEL);
+ if (!dev_status_mem) {
+ mempool_destroy(cprb_mempool);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void zcrypt_ep11misc_exit(void)
{
- card_cache_free();
+ mutex_lock(&dev_status_mem_mutex);
+ kvfree(dev_status_mem);
+ mutex_unlock(&dev_status_mem_mutex);
+ mempool_destroy(cprb_mempool);
}
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
index 9f1bdffdec68..b5e6fd861815 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.h
+++ b/drivers/s390/crypto/zcrypt_ep11misc.h
@@ -104,25 +104,26 @@ struct ep11_domain_info {
/*
* Provide information about an EP11 card.
*/
-int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify);
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, u32 xflags);
/*
* Provide information about a domain within an EP11 card.
*/
-int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info);
+int ep11_get_domain_info(u16 card, u16 domain,
+ struct ep11_domain_info *info, u32 xflags);
/*
* Generate (random) EP11 AES secure key.
*/
int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
- u8 *keybuf, u32 *keybufsize, u32 keybufver);
+ u8 *keybuf, u32 *keybufsize, u32 keybufver, u32 xflags);
/*
* Generate EP11 AES secure key with given clear key value.
*/
int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
const u8 *clrkey, u8 *keybuf, u32 *keybufsize,
- u32 keytype);
+ u32 keytype, u32 xflags);
/*
* Build a list of ep11 apqns meeting the following constrains:
@@ -136,22 +137,22 @@ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
* key for this domain. When a wkvp is given there will always be a re-fetch
* of the domain info for the potential apqn - so this triggers an request
* reply to each apqn eligible.
- * The array of apqn entries is allocated with kmalloc and returned in *apqns;
- * the number of apqns stored into the list is returned in *nr_apqns. One apqn
- * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
- * may be casted to struct pkey_apqn. The return value is either 0 for success
- * or a negative errno value. If no apqn meeting the criteria is found,
- * -ENODEV is returned.
+ * The caller should set *nr_apqns to the nr of elements available in *apqns.
+ * On return *nr_apqns is then updated with the nr of apqns filled into *apqns.
+ * The return value is either 0 for success or a negative errno value.
+ * If no apqn meeting the criteria is found, -ENODEV is returned.
*/
-int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
- int minhwtype, int minapi, const u8 *wkvp);
+int ep11_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, int minapi, const u8 *wkvp, u32 xflags);
/*
* Derive proteced key from EP11 key blob (AES and ECC keys).
*/
int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, u32 keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags);
+int zcrypt_ep11misc_init(void);
void zcrypt_ep11misc_exit(void);
#endif /* _ZCRYPT_EP11MISC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index adc65eddaa1e..fc0a2a053dc2 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -438,7 +438,7 @@ static void zcrypt_msgtype50_receive(struct ap_queue *aq,
msg->len = sizeof(error_reply);
}
out:
- complete((struct completion *)msg->private);
+ complete(&msg->response.work);
}
static atomic_t zcrypt_step = ATOMIC_INIT(0);
@@ -449,30 +449,30 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0);
* @zq: pointer to zcrypt_queue structure that identifies the
* CEXxA device to the request distributor
* @mex: pointer to the modexpo request buffer
+ * This function assumes that ap_msg has been initialized with
+ * ap_init_apmsg() and thus a valid buffer with the size of
+ * ap_msg->bufsize is available within ap_msg. Also the caller has
+ * to make sure ap_release_apmsg() is always called even on failure.
*/
static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq,
struct ica_rsa_modexpo *mex,
struct ap_message *ap_msg)
{
- struct completion work;
int rc;
- ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE;
- ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
- if (!ap_msg->msg)
- return -ENOMEM;
+ if (ap_msg->bufsize < MSGTYPE50_CRB3_MAX_MSG_SIZE)
+ return -EMSGSIZE;
ap_msg->receive = zcrypt_msgtype50_receive;
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
- ap_msg->private = &work;
rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex);
if (rc)
goto out;
- init_completion(&work);
+ init_completion(&ap_msg->response.work);
rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
goto out;
- rc = wait_for_completion_interruptible(&work);
+ rc = wait_for_completion_interruptible(&ap_msg->response.work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
@@ -485,7 +485,6 @@ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq,
}
out:
- ap_msg->private = NULL;
if (rc)
pr_debug("send me cprb at dev=%02x.%04x rc=%d\n",
AP_QID_CARD(zq->queue->qid),
@@ -499,30 +498,30 @@ out:
* @zq: pointer to zcrypt_queue structure that identifies the
* CEXxA device to the request distributor
* @crt: pointer to the modexpoc_crt request buffer
+ * This function assumes that ap_msg has been initialized with
+ * ap_init_apmsg() and thus a valid buffer with the size of
+ * ap_msg->bufsize is available within ap_msg. Also the caller has
+ * to make sure ap_release_apmsg() is always called even on failure.
*/
static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq,
struct ica_rsa_modexpo_crt *crt,
struct ap_message *ap_msg)
{
- struct completion work;
int rc;
- ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE;
- ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
- if (!ap_msg->msg)
- return -ENOMEM;
+ if (ap_msg->bufsize < MSGTYPE50_CRB3_MAX_MSG_SIZE)
+ return -EMSGSIZE;
ap_msg->receive = zcrypt_msgtype50_receive;
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
- ap_msg->private = &work;
rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt);
if (rc)
goto out;
- init_completion(&work);
+ init_completion(&ap_msg->response.work);
rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
goto out;
- rc = wait_for_completion_interruptible(&work);
+ rc = wait_for_completion_interruptible(&ap_msg->response.work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
@@ -535,7 +534,6 @@ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq,
}
out:
- ap_msg->private = NULL;
if (rc)
pr_debug("send crt cprb at dev=%02x.%04x rc=%d\n",
AP_QID_CARD(zq->queue->qid),
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index b64c9d9fc613..9cefbb30960f 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -31,11 +31,6 @@
#define CEIL4(x) ((((x) + 3) / 4) * 4)
-struct response_type {
- struct completion work;
- int type;
-};
-
#define CEXXC_RESPONSE_TYPE_ICA 0
#define CEXXC_RESPONSE_TYPE_XCRB 1
#define CEXXC_RESPONSE_TYPE_EP11 2
@@ -856,7 +851,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
- struct response_type *resp_type = msg->private;
+ struct ap_response_type *resp_type = &msg->response;
struct type86x_reply *t86r;
int len;
@@ -920,7 +915,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
- struct response_type *resp_type = msg->private;
+ struct ap_response_type *resp_type = &msg->response;
struct type86_ep11_reply *t86r;
int len;
@@ -967,9 +962,7 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
struct ica_rsa_modexpo *mex,
struct ap_message *ap_msg)
{
- struct response_type resp_type = {
- .type = CEXXC_RESPONSE_TYPE_ICA,
- };
+ struct ap_response_type *resp_type = &ap_msg->response;
int rc;
ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL);
@@ -979,15 +972,15 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
ap_msg->receive = zcrypt_msgtype6_receive;
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
- ap_msg->private = &resp_type;
rc = icamex_msg_to_type6mex_msgx(zq, ap_msg, mex);
if (rc)
goto out_free;
- init_completion(&resp_type.work);
+ resp_type->type = CEXXC_RESPONSE_TYPE_ICA;
+ init_completion(&resp_type->work);
rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
goto out_free;
- rc = wait_for_completion_interruptible(&resp_type.work);
+ rc = wait_for_completion_interruptible(&resp_type->work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
@@ -1001,7 +994,6 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
out_free:
free_page((unsigned long)ap_msg->msg);
- ap_msg->private = NULL;
ap_msg->msg = NULL;
return rc;
}
@@ -1017,9 +1009,7 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
struct ica_rsa_modexpo_crt *crt,
struct ap_message *ap_msg)
{
- struct response_type resp_type = {
- .type = CEXXC_RESPONSE_TYPE_ICA,
- };
+ struct ap_response_type *resp_type = &ap_msg->response;
int rc;
ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL);
@@ -1029,15 +1019,15 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
ap_msg->receive = zcrypt_msgtype6_receive;
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
- ap_msg->private = &resp_type;
rc = icacrt_msg_to_type6crt_msgx(zq, ap_msg, crt);
if (rc)
goto out_free;
- init_completion(&resp_type.work);
+ resp_type->type = CEXXC_RESPONSE_TYPE_ICA;
+ init_completion(&resp_type->work);
rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
goto out_free;
- rc = wait_for_completion_interruptible(&resp_type.work);
+ rc = wait_for_completion_interruptible(&resp_type->work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
@@ -1051,7 +1041,6 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
out_free:
free_page((unsigned long)ap_msg->msg);
- ap_msg->private = NULL;
ap_msg->msg = NULL;
return rc;
}
@@ -1061,28 +1050,21 @@ out_free:
* Prepare a CCA AP msg: fetch the required data from userspace,
* prepare the AP msg, fill some info into the ap_message struct,
* extract some data from the CPRB and give back to the caller.
- * This function allocates memory and needs an ap_msg prepared
- * by the caller with ap_init_message(). Also the caller has to
- * make sure ap_release_message() is always called even on failure.
+ * This function assumes that ap_msg has been initialized with
+ * ap_init_apmsg() and thus a valid buffer with the size of
+ * ap_msg->bufsize is available within ap_msg. Also the caller has
+ * to make sure ap_release_apmsg() is always called even on failure.
*/
int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcrb,
struct ap_message *ap_msg,
unsigned int *func_code, unsigned short **dom)
{
- struct response_type resp_type = {
- .type = CEXXC_RESPONSE_TYPE_XCRB,
- };
+ struct ap_response_type *resp_type = &ap_msg->response;
- ap_msg->bufsize = atomic_read(&ap_max_msg_size);
- ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
- if (!ap_msg->msg)
- return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive;
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
- ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
- if (!ap_msg->private)
- return -ENOMEM;
+ resp_type->type = CEXXC_RESPONSE_TYPE_XCRB;
return xcrb_msg_to_type6cprb_msgx(userspace, ap_msg, xcrb, func_code, dom);
}
@@ -1097,7 +1079,7 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
struct ica_xcRB *xcrb,
struct ap_message *ap_msg)
{
- struct response_type *rtype = ap_msg->private;
+ struct ap_response_type *resp_type = &ap_msg->response;
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
@@ -1128,11 +1110,11 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
msg->hdr.fromcardlen1 -= delta;
}
- init_completion(&rtype->work);
+ init_completion(&resp_type->work);
rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
goto out;
- rc = wait_for_completion_interruptible(&rtype->work);
+ rc = wait_for_completion_interruptible(&resp_type->work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
@@ -1158,28 +1140,21 @@ out:
* Prepare an EP11 AP msg: fetch the required data from userspace,
* prepare the AP msg, fill some info into the ap_message struct,
* extract some data from the CPRB and give back to the caller.
- * This function allocates memory and needs an ap_msg prepared
- * by the caller with ap_init_message(). Also the caller has to
- * make sure ap_release_message() is always called even on failure.
+ * This function assumes that ap_msg has been initialized with
+ * ap_init_apmsg() and thus a valid buffer with the size of
+ * ap_msg->bufsize is available within ap_msg. Also the caller has
+ * to make sure ap_release_apmsg() is always called even on failure.
*/
int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb,
struct ap_message *ap_msg,
unsigned int *func_code, unsigned int *domain)
{
- struct response_type resp_type = {
- .type = CEXXC_RESPONSE_TYPE_EP11,
- };
+ struct ap_response_type *resp_type = &ap_msg->response;
- ap_msg->bufsize = atomic_read(&ap_max_msg_size);
- ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
- if (!ap_msg->msg)
- return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive_ep11;
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
- ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
- if (!ap_msg->private)
- return -ENOMEM;
+ resp_type->type = CEXXC_RESPONSE_TYPE_EP11;
return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb,
func_code, domain);
}
@@ -1197,7 +1172,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
{
int rc;
unsigned int lfmt;
- struct response_type *rtype = ap_msg->private;
+ struct ap_response_type *resp_type = &ap_msg->response;
struct {
struct type6_hdr hdr;
struct ep11_cprb cprbx;
@@ -1251,11 +1226,11 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
msg->hdr.fromcardlen1 = zq->reply.bufsize -
sizeof(struct type86_hdr) - sizeof(struct type86_fmt2_ext);
- init_completion(&rtype->work);
+ init_completion(&resp_type->work);
rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
goto out;
- rc = wait_for_completion_interruptible(&rtype->work);
+ rc = wait_for_completion_interruptible(&resp_type->work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
@@ -1276,23 +1251,25 @@ out:
return rc;
}
+/*
+ * Prepare a CEXXC get random request ap message.
+ * This function assumes that ap_msg has been initialized with
+ * ap_init_apmsg() and thus a valid buffer with the size of
+ * ap_max_msg_size is available within ap_msg. Also the caller has
+ * to make sure ap_release_apmsg() is always called even on failure.
+ */
int prep_rng_ap_msg(struct ap_message *ap_msg, int *func_code,
unsigned int *domain)
{
- struct response_type resp_type = {
- .type = CEXXC_RESPONSE_TYPE_XCRB,
- };
+ struct ap_response_type *resp_type = &ap_msg->response;
- ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE;
- ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
- if (!ap_msg->msg)
- return -ENOMEM;
+ if (ap_msg->bufsize < AP_DEFAULT_MAX_MSG_SIZE)
+ return -EMSGSIZE;
ap_msg->receive = zcrypt_msgtype6_receive;
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
- ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
- if (!ap_msg->private)
- return -ENOMEM;
+
+ resp_type->type = CEXXC_RESPONSE_TYPE_XCRB;
rng_type6cprb_msgx(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain);
@@ -1319,16 +1296,16 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
short int verb_length;
short int key_length;
} __packed * msg = ap_msg->msg;
- struct response_type *rtype = ap_msg->private;
+ struct ap_response_type *resp_type = &ap_msg->response;
int rc;
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
- init_completion(&rtype->work);
+ init_completion(&resp_type->work);
rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
goto out;
- rc = wait_for_completion_interruptible(&rtype->work);
+ rc = wait_for_completion_interruptible(&resp_type->work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 9e580ef69bda..aaa1eea6149b 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -179,7 +179,7 @@ void ctcmpc_dumpit(char *buf, int len)
ctcm_pr_debug(" %s (+%s) : %s [%s]\n",
addr, boff, bhex, basc);
dup = 0;
- strcpy(duphex, bhex);
+ strscpy(duphex, bhex);
} else
dup++;
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index 5fcdce116862..6a12d2422540 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -132,7 +132,7 @@ fsm_getstate_str(fsm_instance *fi)
static void
fsm_expire_timer(struct timer_list *t)
{
- fsm_timer *this = from_timer(this, t, tl);
+ fsm_timer *this = timer_container_of(this, t, tl);
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Timer %p expired\n",
this->fi->name, this);
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 60ed70a39d2c..b7f15f303ea2 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -611,7 +611,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ism->dev.parent = &pdev->dev;
ism->dev.release = ism_dev_release;
device_initialize(&ism->dev);
- dev_set_name(&ism->dev, dev_name(&pdev->dev));
+ dev_set_name(&ism->dev, "%s", dev_name(&pdev->dev));
ret = device_add(&ism->dev);
if (ret)
goto err_dev;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f5cfaebfb7c9..fe9f48c315d9 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2619,7 +2619,8 @@ err_qdio_bufs:
static void qeth_tx_completion_timer(struct timer_list *timer)
{
- struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
+ struct qeth_qdio_out_q *queue = timer_container_of(queue, timer,
+ timer);
napi_schedule(&queue->napi);
QETH_TXQ_STAT_INC(queue, completion_timer);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 22074e81bd38..dc2265ebb11b 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -312,15 +312,13 @@ static void zfcp_print_sl(struct seq_file *m, struct service_level *sl)
static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter)
{
- char name[TASK_COMM_LEN];
-
- snprintf(name, sizeof(name), "zfcp_q_%s",
- dev_name(&adapter->ccw_device->dev));
- adapter->work_queue = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ adapter->work_queue =
+ alloc_ordered_workqueue("zfcp_q_%s", WQ_MEM_RECLAIM,
+ dev_name(&adapter->ccw_device->dev));
+ if (!adapter->work_queue)
+ return -ENOMEM;
- if (adapter->work_queue)
- return 0;
- return -ENOMEM;
+ return 0;
}
static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter)
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 78d52a4c55f5..ffd994416995 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -615,7 +615,7 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
*/
void zfcp_erp_timeout_handler(struct timer_list *t)
{
- struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
+ struct zfcp_fsf_req *fsf_req = timer_container_of(fsf_req, t, timer);
struct zfcp_erp_action *act;
if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
@@ -629,7 +629,7 @@ void zfcp_erp_timeout_handler(struct timer_list *t)
static void zfcp_erp_memwait_handler(struct timer_list *t)
{
- struct zfcp_erp_action *act = from_timer(act, t, timer);
+ struct zfcp_erp_action *act = timer_container_of(act, t, timer);
zfcp_erp_notify(act, 0);
}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index d5f5f563881e..c5bba1be88f4 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -36,7 +36,7 @@ MODULE_PARM_DESC(ber_stop,
static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
{
- struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
+ struct zfcp_fsf_req *fsf_req = timer_container_of(fsf_req, t, timer);
struct zfcp_adapter *adapter = fsf_req->adapter;
zfcp_qdio_siosl(adapter);
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 0957e3f8b46e..f2410bc44ad3 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -102,7 +102,8 @@ static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
static void zfcp_qdio_request_timer(struct timer_list *timer)
{
- struct zfcp_qdio *qdio = from_timer(qdio, timer, request_timer);
+ struct zfcp_qdio *qdio = timer_container_of(qdio, timer,
+ request_timer);
tasklet_schedule(&qdio->request_tasklet);
}
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 41e36af35488..90a84ae98b97 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -449,6 +449,8 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
return -EINVAL;
+ flush_work(&port->rport_work);
+
retval = zfcp_unit_add(port, fcp_lun);
if (retval)
return retval;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 5a3c670aec27..5522310bab8d 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -403,6 +403,7 @@ config SCSI_ACARD
config SCSI_AHA152X
tristate "Adaptec AHA152X/2825 support"
depends on ISA && SCSI
+ depends on !HIGHMEM
select SCSI_SPI_ATTRS
select CHECK_SIGNATURE
help
@@ -795,6 +796,7 @@ config SCSI_PPA
tristate "IOMEGA parallel port (ppa - older drives)"
depends on SCSI && PARPORT_PC
depends on HAS_IOPORT
+ depends on !HIGHMEM
help
This driver supports older versions of IOMEGA's parallel port ZIP
drive (a 100 MB removable media device).
@@ -822,6 +824,7 @@ config SCSI_PPA
config SCSI_IMM
tristate "IOMEGA parallel port (imm - newer drives)"
depends on SCSI && PARPORT_PC
+ depends on !HIGHMEM
help
This driver supports newer versions of IOMEGA's parallel port ZIP
drive (a 100 MB removable media device).
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 8c384c25dca1..0a5888b53d6d 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -93,7 +93,6 @@ enum {
#define AAC_NUM_MGT_FIB 8
#define AAC_NUM_IO_FIB (1024 - AAC_NUM_MGT_FIB)
-#define AAC_NUM_FIB (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB)
#define AAC_MAX_LUN 256
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index ffef61c4aa01..7d9a4dce236b 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -48,15 +48,7 @@
static int fib_map_alloc(struct aac_dev *dev)
{
- if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE)
- dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
- else
- dev->max_cmd_size = dev->max_fib_size;
- if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) {
- dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
- } else {
- dev->max_cmd_size = dev->max_fib_size;
- }
+ dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
dprintk((KERN_INFO
"allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n",
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 4276f868cd91..e94c0a19c435 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -746,7 +746,6 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
/* need to have host registered before triggering any interrupt */
list_add_tail(&HOSTDATA(shpnt)->host_list, &aha152x_host_list);
- shpnt->no_highmem = true;
shpnt->io_port = setup->io_port;
shpnt->n_io_port = IO_RANGE;
shpnt->irq = setup->irq;
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index f9372a81cd4e..6b87ea004e53 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -8784,7 +8784,7 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
static void
ahd_stat_timer(struct timer_list *t)
{
- struct ahd_softc *ahd = from_timer(ahd, t, stat_timer);
+ struct ahd_softc *ahd = timer_container_of(ahd, t, stat_timer);
u_long s;
int enint_coal;
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 68214a58b160..08c8dad9ad62 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -862,7 +862,7 @@ void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id,
*/
void asd_ascb_timedout(struct timer_list *t)
{
- struct asd_ascb *ascb = from_timer(ascb, t, timer);
+ struct asd_ascb *ascb = timer_container_of(ascb, t, timer);
struct asd_seq_data *seq = &ascb->ha->seq;
unsigned long flags;
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index d45dbf98f25e..28ac92b041fe 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -70,7 +70,7 @@ static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
static void asd_clear_nexus_timedout(struct timer_list *t)
{
- struct asd_ascb *ascb = from_timer(ascb, t, timer);
+ struct asd_ascb *ascb = timer_container_of(ascb, t, timer);
struct tasklet_completion_status *tcs = ascb->uldd_task;
ASD_DPRINTK("%s: here\n", __func__);
@@ -244,7 +244,7 @@ static int asd_clear_nexus_index(struct sas_task *task)
static void asd_tmf_timedout(struct timer_list *t)
{
- struct asd_ascb *ascb = from_timer(ascb, t, timer);
+ struct asd_ascb *ascb = timer_container_of(ascb, t, timer);
struct tasklet_completion_status *tcs = ascb->uldd_task;
ASD_DPRINTK("tmf timed out\n");
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index b450b1fc6bbb..fb57343a97bd 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -3935,7 +3935,8 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
static void arcmsr_set_iop_datetime(struct timer_list *t)
{
- struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer);
+ struct AdapterControlBlock *pacb = timer_container_of(pacb, t,
+ refresh_timer);
unsigned int next_time;
struct tm tm;
@@ -4263,7 +4264,8 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
static void arcmsr_request_device_map(struct timer_list *t)
{
- struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
+ struct AdapterControlBlock *acb = timer_container_of(acb, t,
+ eternal_timer);
if (acb->acb_flags & (ACB_F_MSG_GET_CONFIG | ACB_F_BUS_RESET | ACB_F_ABORT)) {
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
} else {
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index e0b55d869a35..b1a749ab18f8 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2327,7 +2327,7 @@ DEF_SCSI_QCMD(fas216_noqueue_command)
*/
static void fas216_eh_timer(struct timer_list *t)
{
- FAS216_Info *info = from_timer(info, t, eh_timer);
+ FAS216_Info *info = timer_container_of(info, t, eh_timer);
fas216_log(info, LOG_ERROR, "error handling timed out\n");
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 7d1b767d87fb..dc88bc46dcc0 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -5240,7 +5240,7 @@ static void beiscsi_eqd_update_work(struct work_struct *work)
static void beiscsi_hw_tpe_check(struct timer_list *t)
{
- struct beiscsi_hba *phba = from_timer(phba, t, hw_check);
+ struct beiscsi_hba *phba = timer_container_of(phba, t, hw_check);
u32 wait;
/* if not TPE, do nothing */
@@ -5257,7 +5257,7 @@ static void beiscsi_hw_tpe_check(struct timer_list *t)
static void beiscsi_hw_health_check(struct timer_list *t)
{
- struct beiscsi_hba *phba = from_timer(phba, t, hw_check);
+ struct beiscsi_hba *phba = timer_container_of(phba, t, hw_check);
beiscsi_detect_ue(phba);
if (beiscsi_detect_ue(phba)) {
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 598f2fc93ef2..ff9adfc0b332 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -685,7 +685,8 @@ ext:
void
bfad_bfa_tmo(struct timer_list *t)
{
- struct bfad_s *bfad = from_timer(bfad, t, hal_tmo);
+ struct bfad_s *bfad = timer_container_of(bfad, t,
+ hal_tmo);
unsigned long flags;
struct list_head doneq;
diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
index ecdc0f0f4f4e..3cf7e08df809 100644
--- a/drivers/scsi/bnx2fc/Kconfig
+++ b/drivers/scsi/bnx2fc/Kconfig
@@ -5,7 +5,6 @@ config SCSI_BNX2X_FCOE
depends on (IPV6 || IPV6=n)
depends on LIBFC
depends on LIBFCOE
- depends on MMU
select NETDEVICES
select ETHERNET
select NET_VENDOR_BROADCOM
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index de6574cccf58..58da993251e9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -837,7 +837,7 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
static void bnx2fc_destroy_timer(struct timer_list *t)
{
- struct bnx2fc_hba *hba = from_timer(hba, t, destroy_timer);
+ struct bnx2fc_hba *hba = timer_container_of(hba, t, destroy_timer);
printk(KERN_ERR PFX "ERROR:bnx2fc_destroy_timer - "
"Destroy compl not received!!\n");
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index b8227cfef64f..77dcdfc412b1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -30,7 +30,7 @@ static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
static void bnx2fc_upld_timer(struct timer_list *t)
{
- struct bnx2fc_rport *tgt = from_timer(tgt, t, upld_timer);
+ struct bnx2fc_rport *tgt = timer_container_of(tgt, t, upld_timer);
BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
/* fake upload completion */
@@ -43,7 +43,7 @@ static void bnx2fc_upld_timer(struct timer_list *t)
static void bnx2fc_ofld_timer(struct timer_list *t)
{
- struct bnx2fc_rport *tgt = from_timer(tgt, t, ofld_timer);
+ struct bnx2fc_rport *tgt = timer_container_of(tgt, t, ofld_timer);
BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
/* NOTE: This function should never be called, as
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
index 0cc06c2ce0b8..75ace2302fed 100644
--- a/drivers/scsi/bnx2i/Kconfig
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -4,7 +4,6 @@ config SCSI_BNX2_ISCSI
depends on NET
depends on PCI
depends on (IPV6 || IPV6=n)
- depends on MMU
select SCSI_ISCSI_ATTRS
select NETDEVICES
select ETHERNET
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 6c864b093ac9..40db5190a222 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -685,7 +685,7 @@ void bnx2i_update_iscsi_conn(struct iscsi_conn *conn)
*/
void bnx2i_ep_ofld_timer(struct timer_list *t)
{
- struct bnx2i_endpoint *ep = from_timer(ep, t, ofld_timer);
+ struct bnx2i_endpoint *ep = timer_container_of(ep, t, ofld_timer);
if (ep->state == EP_STATE_OFLD_START) {
printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index beded091dff1..7aa418ebfe01 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -3738,7 +3738,7 @@ csio_mberr_worker(void *data)
static void
csio_hw_mb_timer(struct timer_list *t)
{
- struct csio_mbm *mbm = from_timer(mbm, t, timer);
+ struct csio_mbm *mbm = timer_container_of(mbm, t, timer);
struct csio_hw *hw = mbm->hw;
struct csio_mb *mbp = NULL;
@@ -4107,7 +4107,7 @@ csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req)
static void
csio_mgmt_tmo_handler(struct timer_list *t)
{
- struct csio_mgmtm *mgmtm = from_timer(mgmtm, t, mgmt_timer);
+ struct csio_mgmtm *mgmtm = timer_container_of(mgmtm, t, mgmt_timer);
struct list_head *tmp;
struct csio_ioreq *io_req;
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 461d38e2fb19..69de9657f7cb 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -547,7 +547,7 @@ static int act_open_rpl_status_to_errno(int status)
static void act_open_retry_timer(struct timer_list *t)
{
- struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
+ struct cxgbi_sock *csk = timer_container_of(csk, t, retry_timer);
struct sk_buff *skb;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index aaba294ecb58..42676627c3af 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -988,7 +988,7 @@ static int act_open_rpl_status_to_errno(int status)
static void csk_act_open_retry_timer(struct timer_list *t)
{
struct sk_buff *skb = NULL;
- struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
+ struct cxgbi_sock *csk = timer_container_of(csk, t, retry_timer);
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
struct l2t_entry *);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 8dc6be9a00c1..386c8359e1cc 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -83,65 +83,6 @@
/*#define DC395x_NO_SYNC*/
/*#define DC395x_NO_WIDE*/
-/*---------------------------------------------------------------------------
- Debugging
- ---------------------------------------------------------------------------*/
-/*
- * Types of debugging that can be enabled and disabled
- */
-#define DBG_KG 0x0001
-#define DBG_0 0x0002
-#define DBG_1 0x0004
-#define DBG_SG 0x0020
-#define DBG_FIFO 0x0040
-#define DBG_PIO 0x0080
-
-
-/*
- * Set set of things to output debugging for.
- * Undefine to remove all debugging
- */
-/*#define DEBUG_MASK (DBG_0|DBG_1|DBG_SG|DBG_FIFO|DBG_PIO)*/
-/*#define DEBUG_MASK DBG_0*/
-
-
-/*
- * Output a kernel mesage at the specified level and append the
- * driver name and a ": " to the start of the message
- */
-#define dprintkl(level, format, arg...) \
- printk(level DC395X_NAME ": " format , ## arg)
-
-
-#ifdef DEBUG_MASK
-/*
- * print a debug message - this is formated with KERN_DEBUG, then the
- * driver name followed by a ": " and then the message is output.
- * This also checks that the specified debug level is enabled before
- * outputing the message
- */
-#define dprintkdbg(type, format, arg...) \
- do { \
- if ((type) & (DEBUG_MASK)) \
- dprintkl(KERN_DEBUG , format , ## arg); \
- } while (0)
-
-/*
- * Check if the specified type of debugging is enabled
- */
-#define debug_enabled(type) ((DEBUG_MASK) & (type))
-
-#else
-/*
- * No debugging. Do nothing
- */
-#define dprintkdbg(type, format, arg...) \
- do {} while (0)
-#define debug_enabled(type) (0)
-
-#endif
-
-
#ifndef PCI_VENDOR_ID_TEKRAM
#define PCI_VENDOR_ID_TEKRAM 0x1DE1 /* Vendor ID */
#endif
@@ -432,7 +373,6 @@ static void *dc395x_scsi_phase1[] = {
/* real period:48ns,76ns,100ns,124ns,148ns,176ns,200ns,248ns */
static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 };
-static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 };
/*---------------------------------------------------------------------------
@@ -564,7 +504,6 @@ static void set_safe_settings(void)
{
int i;
- dprintkl(KERN_INFO, "Using safe settings.\n");
for (i = 0; i < CFG_NUM; i++)
{
cfg_data[i].value = cfg_data[i].safe;
@@ -581,15 +520,6 @@ static void fix_settings(void)
{
int i;
- dprintkdbg(DBG_1,
- "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x "
- "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n",
- cfg_data[CFG_ADAPTER_ID].value,
- cfg_data[CFG_MAX_SPEED].value,
- cfg_data[CFG_DEV_MODE].value,
- cfg_data[CFG_ADAPTER_MODE].value,
- cfg_data[CFG_TAGS].value,
- cfg_data[CFG_RESET_DELAY].value);
for (i = 0; i < CFG_NUM; i++)
{
if (cfg_data[i].value < cfg_data[i].min
@@ -821,9 +751,7 @@ static void waiting_process_next(struct AdapterCtlBlk *acb)
static void waiting_timeout(struct timer_list *t)
{
unsigned long flags;
- struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer);
- dprintkdbg(DBG_1,
- "waiting_timeout: Queue woken up by timer. acb=%p\n", acb);
+ struct AdapterCtlBlk *acb = timer_container_of(acb, t, waiting_timer);
DC395x_LOCK_IO(acb->scsi_host, flags);
waiting_process_next(acb);
DC395x_UNLOCK_IO(acb->scsi_host, flags);
@@ -864,8 +792,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
{
int nseg;
enum dma_data_direction dir = cmd->sc_data_direction;
- dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n",
- cmd, dcb->target_id, dcb->target_lun);
srb->dcb = dcb;
srb->cmd = cmd;
@@ -887,12 +813,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
nseg = scsi_dma_map(cmd);
BUG_ON(nseg < 0);
- if (dir == DMA_NONE || !nseg) {
- dprintkdbg(DBG_0,
- "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
- cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
- srb->segment_x[0].address);
- } else {
+ if (!(dir == DMA_NONE || !nseg)) {
int i;
u32 reqlen = scsi_bufflen(cmd);
struct scatterlist *sg;
@@ -900,11 +821,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
srb->sg_count = nseg;
- dprintkdbg(DBG_0,
- "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
- reqlen, scsi_sglist(cmd), scsi_sg_count(cmd),
- srb->sg_count);
-
scsi_for_each_sg(cmd, sg, srb->sg_count, i) {
u32 busaddr = (u32)sg_dma_address(sg);
u32 seglen = (u32)sg->length;
@@ -933,8 +849,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev,
srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE);
- dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
- srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
}
srb->request_length = srb->total_xfer_length;
@@ -966,8 +880,6 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd)
struct ScsiReqBlk *srb;
struct AdapterCtlBlk *acb =
(struct AdapterCtlBlk *)cmd->device->host->hostdata;
- dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n",
- cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]);
/* Assume BAD_TARGET; will be cleared later */
set_host_byte(cmd, DID_BAD_TARGET);
@@ -975,37 +887,26 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd)
/* ignore invalid targets */
if (cmd->device->id >= acb->scsi_host->max_id ||
cmd->device->lun >= acb->scsi_host->max_lun ||
- cmd->device->lun >31) {
+ cmd->device->lun > 31)
goto complete;
- }
/* does the specified lun on the specified device exist */
- if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) {
- dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n",
- cmd->device->id, (u8)cmd->device->lun);
+ if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun)))
goto complete;
- }
/* do we have a DCB for the device */
dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
- if (!dcb) {
- /* should never happen */
- dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>",
- cmd->device->id, (u8)cmd->device->lun);
+ if (!dcb)
goto complete;
- }
set_host_byte(cmd, DID_OK);
set_status_byte(cmd, SAM_STAT_GOOD);
srb = list_first_entry_or_null(&acb->srb_free_list,
- struct ScsiReqBlk, list);
+ struct ScsiReqBlk, list);
+
if (!srb) {
- /*
- * Return 1 since we are unable to queue this command at this
- * point in time.
- */
- dprintkdbg(DBG_0, "queue_command: No free srb's\n");
+ /* should never happen */
return 1;
}
list_del(&srb->list);
@@ -1020,7 +921,6 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd)
/* process immediately */
send_srb(acb, srb);
}
- dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd);
return 0;
complete:
@@ -1036,82 +936,8 @@ complete:
static DEF_SCSI_QCMD(dc395x_queue_command)
-static void dump_register_info(struct AdapterCtlBlk *acb,
- struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
-{
- u16 pstat;
- struct pci_dev *dev = acb->dev;
- pci_read_config_word(dev, PCI_STATUS, &pstat);
- if (!dcb)
- dcb = acb->active_dcb;
- if (!srb && dcb)
- srb = dcb->active_srb;
- if (srb) {
- if (!srb->cmd)
- dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
- srb, srb->cmd);
- else
- dprintkl(KERN_INFO, "dump: srb=%p cmd=%p "
- "cmnd=0x%02x <%02i-%i>\n",
- srb, srb->cmd,
- srb->cmd->cmnd[0], srb->cmd->device->id,
- (u8)srb->cmd->device->lun);
- printk(" sglist=%p cnt=%i idx=%i len=%zu\n",
- srb->segment_x, srb->sg_count, srb->sg_index,
- srb->total_xfer_length);
- printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n",
- srb->state, srb->status, srb->scsi_phase,
- (acb->active_dcb) ? "" : "not");
- }
- dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x "
- "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x "
- "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x "
- "config2=0x%02x cmd=0x%02x selto=0x%02x}\n",
- DC395x_read16(acb, TRM_S1040_SCSI_STATUS),
- DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
- DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL),
- DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS),
- DC395x_read8(acb, TRM_S1040_SCSI_SYNC),
- DC395x_read8(acb, TRM_S1040_SCSI_TARGETID),
- DC395x_read8(acb, TRM_S1040_SCSI_IDMSG),
- DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
- DC395x_read8(acb, TRM_S1040_SCSI_INTEN),
- DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0),
- DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2),
- DC395x_read8(acb, TRM_S1040_SCSI_COMMAND),
- DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT));
- dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x "
- "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x "
- "ctctr=0x%08x addr=0x%08x:0x%08x}\n",
- DC395x_read16(acb, TRM_S1040_DMA_COMMAND),
- DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
- DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
- DC395x_read8(acb, TRM_S1040_DMA_STATUS),
- DC395x_read8(acb, TRM_S1040_DMA_INTEN),
- DC395x_read16(acb, TRM_S1040_DMA_CONFIG),
- DC395x_read32(acb, TRM_S1040_DMA_XCNT),
- DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
- DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR),
- DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR));
- dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} "
- "pci{status=0x%04x}\n",
- DC395x_read8(acb, TRM_S1040_GEN_CONTROL),
- DC395x_read8(acb, TRM_S1040_GEN_STATUS),
- DC395x_read8(acb, TRM_S1040_GEN_TIMER),
- pstat);
-}
-
-
static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt)
{
-#if debug_enabled(DBG_FIFO)
- u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
- u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
- if (!(fifocnt & 0x40))
- dprintkdbg(DBG_FIFO,
- "clear_fifo: (%i bytes) on phase %02x in %s\n",
- fifocnt & 0x3f, lines, txt);
-#endif
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO);
}
@@ -1120,7 +946,6 @@ static void reset_dev_param(struct AdapterCtlBlk *acb)
{
struct DeviceCtlBlk *dcb;
struct NvRamType *eeprom = &acb->eeprom;
- dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb);
list_for_each_entry(dcb, &acb->dcb_list, list) {
u8 period_index;
@@ -1148,9 +973,6 @@ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
{
struct AdapterCtlBlk *acb =
(struct AdapterCtlBlk *)cmd->device->host->hostdata;
- dprintkl(KERN_INFO,
- "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n",
- cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
if (timer_pending(&acb->waiting_timer))
timer_delete(&acb->waiting_timer);
@@ -1216,14 +1038,10 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
(struct AdapterCtlBlk *)cmd->device->host->hostdata;
struct DeviceCtlBlk *dcb;
struct ScsiReqBlk *srb;
- dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n",
- cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
- if (!dcb) {
- dprintkl(KERN_DEBUG, "eh_abort: No such device\n");
+ if (!dcb)
return FAILED;
- }
srb = find_cmd(cmd, &dcb->srb_waiting_list);
if (srb) {
@@ -1232,16 +1050,12 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
pci_unmap_srb(acb, srb);
free_tag(dcb, srb);
list_add_tail(&srb->list, &acb->srb_free_list);
- dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
set_host_byte(cmd, DID_ABORT);
return SUCCESS;
}
srb = find_cmd(cmd, &dcb->srb_going_list);
if (srb) {
- dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n");
/* XXX: Should abort the command here */
- } else {
- dprintkl(KERN_DEBUG, "eh_abort: Command not found\n");
}
return FAILED;
}
@@ -1253,10 +1067,6 @@ static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
{
u8 *ptr = srb->msgout_buf + srb->msg_count;
if (srb->msg_count > 1) {
- dprintkl(KERN_INFO,
- "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n",
- srb->msg_count, srb->msgout_buf[0],
- srb->msgout_buf[1]);
return;
}
if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
@@ -1278,13 +1088,9 @@ static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
(acb->config & HCC_WIDE_CARD)) ? 1 : 0;
u8 *ptr = srb->msgout_buf + srb->msg_count;
- if (srb->msg_count > 1) {
- dprintkl(KERN_INFO,
- "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n",
- srb->msg_count, srb->msgout_buf[0],
- srb->msgout_buf[1]);
+ if (srb->msg_count > 1)
return;
- }
+
srb->msg_count += spi_populate_width_msg(ptr, wide);
srb->state |= SRB_DO_WIDE_NEGO;
}
@@ -1316,11 +1122,9 @@ void selection_timeout_missed(unsigned long ptr)
unsigned long flags;
struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
struct ScsiReqBlk *srb;
- dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n");
- if (!acb->active_dcb || !acb->active_dcb->active_srb) {
- dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n");
+ if (!acb->active_dcb || !acb->active_dcb->active_srb)
return;
- }
+
DC395x_LOCK_IO(acb->scsi_host, flags);
srb = acb->active_dcb->active_srb;
disconnect(acb);
@@ -1335,8 +1139,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
u16 __maybe_unused s_stat2, return_code;
u8 s_stat, scsicommand, i, identify_message;
u8 *ptr;
- dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
- dcb->target_id, dcb->target_lun, srb);
srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */
@@ -1345,8 +1147,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
#if 1
if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) {
- dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n",
- s_stat, s_stat2);
/*
* Try anyway?
*
@@ -1361,24 +1161,16 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
return 1;
}
#endif
- if (acb->active_dcb) {
- dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a"
- "command while another command (0x%p) is active.",
- srb->cmd,
- acb->active_dcb->active_srb ?
- acb->active_dcb->active_srb->cmd : NULL);
+ if (acb->active_dcb)
return 1;
- }
- if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
- dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd);
+
+ if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT)
return 1;
- }
+
/* Allow starting of SCSI commands half a second before we allow the mid-level
* to queue them again after a reset */
- if (time_before(jiffies, acb->last_reset - HZ / 2)) {
- dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
+ if (time_before(jiffies, acb->last_reset - HZ / 2))
return 1;
- }
/* Flush FIFO */
clear_fifo(acb, "start_scsi");
@@ -1442,10 +1234,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
tag_number++;
}
if (tag_number >= dcb->max_command) {
- dprintkl(KERN_WARNING, "start_scsi: (0x%p) "
- "Out of tags target=<%02i-%i>)\n",
- srb->cmd, srb->cmd->device->id,
- (u8)srb->cmd->device->lun);
srb->state = SRB_READY;
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
DO_HWRESELECT);
@@ -1462,9 +1250,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
#endif
/*polling:*/
/* Send CDB ..command block ......... */
- dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n",
- srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
- srb->cmd->cmnd[0], srb->tag_number);
if (srb->flag & AUTO_REQSENSE) {
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
@@ -1486,8 +1271,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
* we caught an interrupt (must be reset or reselection ... )
* : Let's process it first!
*/
- dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n",
- srb->cmd, dcb->target_id, dcb->target_lun);
srb->state = SRB_READY;
free_tag(dcb, srb);
srb->msg_count = 0;
@@ -1551,14 +1334,6 @@ static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
/* This acknowledges the IRQ */
scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
- if ((scsi_status & 0x2007) == 0x2002)
- dprintkl(KERN_DEBUG,
- "COP after COP completed? %04x\n", scsi_status);
- if (debug_enabled(DBG_KG)) {
- if (scsi_intstatus & INT_SELTIMEOUT)
- dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n");
- }
- /*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */
if (timer_pending(&acb->selto_timer))
timer_delete(&acb->selto_timer);
@@ -1571,27 +1346,21 @@ static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
reselect(acb);
goto out_unlock;
}
- if (scsi_intstatus & INT_SELECT) {
- dprintkl(KERN_INFO, "Host does not support target mode!\n");
+ if (scsi_intstatus & INT_SELECT)
goto out_unlock;
- }
+
if (scsi_intstatus & INT_SCSIRESET) {
scsi_reset_detect(acb);
goto out_unlock;
}
if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) {
dcb = acb->active_dcb;
- if (!dcb) {
- dprintkl(KERN_DEBUG,
- "Oops: BusService (%04x %02x) w/o ActiveDCB!\n",
- scsi_status, scsi_intstatus);
+ if (!dcb)
goto out_unlock;
- }
+
srb = dcb->active_srb;
- if (dcb->flag & ABORT_DEV_) {
- dprintkdbg(DBG_0, "MsgOut Abort Device.....\n");
+ if (dcb->flag & ABORT_DEV_)
enable_msgout_abort(acb, srb);
- }
/* software sequential machine */
phase = (u16)srb->scsi_phase;
@@ -1659,9 +1428,7 @@ static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
}
else if (dma_status & 0x20) {
/* Error from the DMA engine */
- dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status);
#if 0
- dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n");
if (acb->active_dcb) {
acb->active_dcb-> flag |= ABORT_DEV_;
if (acb->active_dcb->active_srb)
@@ -1669,7 +1436,6 @@ static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
}
DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO);
#else
- dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n");
acb = NULL;
#endif
handled = IRQ_HANDLED;
@@ -1682,7 +1448,6 @@ static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
- dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd);
if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
*pscsi_status = PH_BUS_FREE; /*.. initial phase */
@@ -1696,18 +1461,12 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
{
u16 i;
u8 *ptr;
- dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd);
clear_fifo(acb, "msgout_phase1");
- if (!(srb->state & SRB_MSGOUT)) {
+ if (!(srb->state & SRB_MSGOUT))
srb->state |= SRB_MSGOUT;
- dprintkl(KERN_DEBUG,
- "msgout_phase1: (0x%p) Phase unexpected\n",
- srb->cmd); /* So what ? */
- }
+
if (!srb->msg_count) {
- dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n",
- srb->cmd);
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, NOP);
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
/* it's important for atn stop */
@@ -1728,7 +1487,6 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
- dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd);
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
}
@@ -1739,7 +1497,6 @@ static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
struct DeviceCtlBlk *dcb;
u8 *ptr;
u16 i;
- dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd);
clear_fifo(acb, "command_phase1");
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN);
@@ -1768,26 +1525,6 @@ static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
/*
- * Verify that the remaining space in the hw sg lists is the same as
- * the count of remaining bytes in srb->total_xfer_length
- */
-static void sg_verify_length(struct ScsiReqBlk *srb)
-{
- if (debug_enabled(DBG_SG)) {
- unsigned len = 0;
- unsigned idx = srb->sg_index;
- struct SGentry *psge = srb->segment_x + idx;
- for (; idx < srb->sg_count; psge++, idx++)
- len += psge->length;
- if (len != srb->total_xfer_length)
- dprintkdbg(DBG_SG,
- "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n",
- srb->total_xfer_length, len);
- }
-}
-
-
-/*
* Compute the next Scatter Gather list index and adjust its length
* and address if necessary
*/
@@ -1797,15 +1534,11 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
u32 xferred = srb->total_xfer_length - left; /* bytes transferred */
struct SGentry *psge = srb->segment_x + srb->sg_index;
- dprintkdbg(DBG_0,
- "sg_update_list: Transferred %i of %i bytes, %i remain\n",
- xferred, srb->total_xfer_length, left);
if (xferred == 0) {
/* nothing to update since we did not transfer any data */
return;
}
- sg_verify_length(srb);
srb->total_xfer_length = left; /* update remaining count */
for (idx = srb->sg_index; idx < srb->sg_count; idx++) {
if (xferred >= psge->length) {
@@ -1826,7 +1559,6 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
}
psge++;
}
- sg_verify_length(srb);
}
@@ -1882,8 +1614,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
struct DeviceCtlBlk *dcb = srb->dcb;
u16 scsi_status = *pscsi_status;
u32 d_left_counter = 0;
- dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
/*
* KG: We need to drain the buffers before we draw any conclusions!
@@ -1897,14 +1627,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
* KG: Stop DMA engine pushing more data into the SCSI FIFO
* If we need more data, the DMA SG list will be freshly set up, anyway
*/
- dprintkdbg(DBG_PIO, "data_out_phase0: "
- "DMA{fifocnt=0x%02x fifostat=0x%02x} "
- "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n",
- DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
- DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
- DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
- DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status,
- srb->total_xfer_length);
DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO);
if (!(srb->state & SRB_XFERPAD)) {
@@ -1928,16 +1650,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
if (dcb->sync_period & WIDE_SYNC)
d_left_counter <<= 1;
- dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n"
- "SCSI{fifocnt=0x%02x cnt=0x%08x} "
- "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n",
- DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
- (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
- DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
- DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
- DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
- DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
- DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
}
/*
* calculate all the residue data that not yet tranfered
@@ -1958,9 +1670,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
&& scsi_bufflen(srb->cmd) % 2) {
d_left_counter = 0;
- dprintkl(KERN_INFO,
- "data_out_phase0: Discard 1 byte (0x%02x)\n",
- scsi_status);
}
/*
* KG: Oops again. Same thinko as above: The SCSI might have been
@@ -1991,8 +1700,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
|| ((oldxferred & ~PAGE_MASK) ==
(PAGE_SIZE - diff))
) {
- dprintkl(KERN_INFO, "data_out_phase0: "
- "Work around chip bug (%i)?\n", diff);
d_left_counter =
srb->total_xfer_length - diff;
sg_update_list(srb, d_left_counter);
@@ -2003,17 +1710,14 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
}
}
}
- if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) {
+ if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT)
cleanup_after_transfer(acb, srb);
- }
}
static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
- dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
clear_fifo(acb, "data_out_phase1");
/* do prepare before transfer when data out phase */
data_io_transfer(acb, srb, XFERDATAOUT);
@@ -2024,8 +1728,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
{
u16 scsi_status = *pscsi_status;
- dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
/*
* KG: DataIn is much more tricky than DataOut. When the device is finished
@@ -2045,8 +1747,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
unsigned int sc, fc;
if (scsi_status & PARITYERROR) {
- dprintkl(KERN_INFO, "data_in_phase0: (0x%p) "
- "Parity Error\n", srb->cmd);
srb->status |= PARITY_ERROR;
}
/*
@@ -2058,26 +1758,14 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) {
#if 0
int ctr = 6000000;
- dprintkl(KERN_DEBUG,
- "DIP0: Wait for DMA FIFO to flush ...\n");
/*DC395x_write8 (TRM_S1040_DMA_CONTROL, STOPDMAXFER); */
/*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 7); */
/*DC395x_write8 (TRM_S1040_SCSI_COMMAND, SCMD_DMA_IN); */
while (!
(DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) &
0x80) && --ctr);
- if (ctr < 6000000 - 1)
- dprintkl(KERN_DEBUG
- "DIP0: Had to wait for DMA ...\n");
- if (!ctr)
- dprintkl(KERN_ERR,
- "Deadlock in DIP0 waiting for DMA FIFO empty!!\n");
/*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 0); */
#endif
- dprintkdbg(DBG_KG, "data_in_phase0: "
- "DMA{fifocnt=0x%02x fifostat=0x%02x}\n",
- DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
- DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT));
}
/* Now: Check remainig data: The SCSI counters should tell us ... */
sc = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
@@ -2085,17 +1773,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
d_left_counter = sc + ((fc & 0x1f)
<< ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
0));
- dprintkdbg(DBG_KG, "data_in_phase0: "
- "SCSI{fifocnt=0x%02x%s ctr=0x%08x} "
- "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} "
- "Remain{totxfer=%i scsi_fifo+ctr=%i}\n",
- fc,
- (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
- sc,
- fc,
- DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
- DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
- srb->total_xfer_length, d_left_counter);
#if DC395x_LASTPIO
/* KG: Less than or equal to 4 bytes can not be transferred via DMA, it seems. */
if (d_left_counter
@@ -2104,12 +1781,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
/*u32 addr = (srb->segment_x[srb->sg_index].address); */
/*sg_update_list (srb, d_left_counter); */
- dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) "
- "for remaining %i bytes:",
- fc & 0x1f,
- (srb->dcb->sync_period & WIDE_SYNC) ?
- "words" : "bytes",
- srb->total_xfer_length);
if (srb->dcb->sync_period & WIDE_SYNC)
DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
CFG2_WIDEFIFO);
@@ -2133,9 +1804,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
*virt++ = byte;
- if (debug_enabled(DBG_PIO))
- printk(" %02x", byte);
-
d_left_counter--;
sg_subtract_one(srb);
@@ -2158,8 +1826,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
*virt++ = byte;
srb->total_xfer_length--;
- if (debug_enabled(DBG_PIO))
- printk(" %02x", byte);
}
DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
@@ -2168,10 +1834,7 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
scsi_kunmap_atomic_sg(base);
local_irq_restore(flags);
}
- /*printk(" %08x", *(u32*)(bus_to_virt (addr))); */
/*srb->total_xfer_length = 0; */
- if (debug_enabled(DBG_PIO))
- printk("\n");
}
#endif /* DC395x_LASTPIO */
@@ -2207,9 +1870,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
TempDMAstatus =
DC395x_read8(acb, TRM_S1040_DMA_STATUS);
} while (!(TempDMAstatus & DMAXFERCOMP) && --ctr);
- if (!ctr)
- dprintkl(KERN_ERR,
- "Deadlock in DataInPhase0 waiting for DMA!!\n");
srb->total_xfer_length = 0;
#endif
srb->total_xfer_length = d_left_counter;
@@ -2226,17 +1886,14 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
}
}
/* KG: The target may decide to disconnect: Empty FIFO before! */
- if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) {
+ if ((*pscsi_status & PHASEMASK) != PH_DATA_IN)
cleanup_after_transfer(acb, srb);
- }
}
static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
- dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
data_io_transfer(acb, srb, XFERDATAIN);
}
@@ -2246,13 +1903,7 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
{
struct DeviceCtlBlk *dcb = srb->dcb;
u8 bval;
- dprintkdbg(DBG_0,
- "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
- srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
- ((io_dir & DMACMD_DIR) ? 'r' : 'w'),
- srb->total_xfer_length, srb->sg_index, srb->sg_count);
- if (srb == acb->tmp_srb)
- dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n");
+
if (srb->sg_index >= srb->sg_count) {
/* can't happen? out of bounds error */
return;
@@ -2265,9 +1916,6 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
* Maybe, even ABORTXFER would be appropriate
*/
if (dma_status & XFERPENDING) {
- dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! "
- "Expect trouble!\n");
- dump_register_info(acb, dcb, srb);
DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
}
/* clear_fifo(acb, "IO"); */
@@ -2346,9 +1994,6 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
left_io -= len;
while (len--) {
- if (debug_enabled(DBG_PIO))
- printk(" %02x", *virt);
-
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *virt++);
sg_subtract_one(srb);
@@ -2360,14 +2005,10 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
if (srb->dcb->sync_period & WIDE_SYNC) {
if (ln % 2) {
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
- if (debug_enabled(DBG_PIO))
- printk(" |00");
}
DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
}
/*DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, ln); */
- if (debug_enabled(DBG_PIO))
- printk("\n");
DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
SCMD_FIFO_OUT);
}
@@ -2419,8 +2060,6 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
- dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */
srb->state = SRB_COMPLETED;
@@ -2433,8 +2072,6 @@ static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
- dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
srb->state = SRB_STATUS;
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
@@ -2464,9 +2101,6 @@ static inline void msgin_reject(struct AdapterCtlBlk *acb,
DC395x_ENABLE_MSGOUT;
srb->state &= ~SRB_MSGIN;
srb->state |= SRB_MSGOUT;
- dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n",
- srb->msgin_buf[0],
- srb->dcb->target_id, srb->dcb->target_lun);
}
@@ -2475,13 +2109,6 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
{
struct ScsiReqBlk *srb = NULL;
struct ScsiReqBlk *i;
- dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n",
- srb->cmd, tag, srb);
-
- if (!(dcb->tag_mask & (1 << tag)))
- dprintkl(KERN_DEBUG,
- "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n",
- dcb->tag_mask, tag);
if (list_empty(&dcb->srb_going_list))
goto mingx0;
@@ -2494,8 +2121,6 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
if (!srb)
goto mingx0;
- dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->dcb->target_id, srb->dcb->target_lun);
if (dcb->flag & ABORT_DEV_) {
/*srb->state = SRB_ABORT_SENT; */
enable_msgout_abort(acb, srb);
@@ -2518,7 +2143,6 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
srb->msgout_buf[0] = ABORT_TASK;
srb->msg_count = 1;
DC395x_ENABLE_MSGOUT;
- dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag);
return srb;
}
@@ -2537,8 +2161,6 @@ static inline void reprogram_regs(struct AdapterCtlBlk *acb,
static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
{
struct DeviceCtlBlk *dcb = srb->dcb;
- dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n",
- dcb->target_id, dcb->target_lun);
dcb->sync_mode &= ~(SYNC_NEGO_ENABLE);
dcb->sync_mode |= SYNC_NEGO_DONE;
@@ -2551,7 +2173,6 @@ static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
&& !(dcb->sync_mode & WIDE_NEGO_DONE)) {
build_wdtr(acb, dcb, srb);
DC395x_ENABLE_MSGOUT;
- dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n");
}
}
@@ -2562,12 +2183,6 @@ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
struct DeviceCtlBlk *dcb = srb->dcb;
u8 bval;
int fact;
- dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins "
- "(%02i.%01i MHz) Offset %i\n",
- dcb->target_id, srb->msgin_buf[3] << 2,
- (250 / srb->msgin_buf[3]),
- ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3],
- srb->msgin_buf[4]);
if (srb->msgin_buf[4] > 15)
srb->msgin_buf[4] = 15;
@@ -2584,10 +2199,7 @@ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
|| dcb->min_nego_period >
clock_period[bval]))
bval++;
- if (srb->msgin_buf[3] < clock_period[bval])
- dprintkl(KERN_INFO,
- "msgin_set_sync: Increase sync nego period to %ins\n",
- clock_period[bval] << 2);
+
srb->msgin_buf[3] = clock_period[bval];
dcb->sync_period &= 0xf0;
dcb->sync_period |= ALT_SYNC | bval;
@@ -2598,18 +2210,8 @@ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
else
fact = 250;
- dprintkl(KERN_INFO,
- "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n",
- dcb->target_id, (fact == 500) ? "Wide16" : "",
- dcb->min_nego_period << 2, dcb->sync_offset,
- (fact / dcb->min_nego_period),
- ((fact % dcb->min_nego_period) * 10 +
- dcb->min_nego_period / 2) / dcb->min_nego_period);
-
if (!(srb->state & SRB_DO_SYNC_NEGO)) {
/* Reply with corrected SDTR Message */
- dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n",
- srb->msgin_buf[3] << 2, srb->msgin_buf[4]);
memcpy(srb->msgout_buf, srb->msgin_buf, 5);
srb->msg_count = 5;
@@ -2620,7 +2222,6 @@ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
&& !(dcb->sync_mode & WIDE_NEGO_DONE)) {
build_wdtr(acb, dcb, srb);
DC395x_ENABLE_MSGOUT;
- dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n");
}
}
srb->state &= ~SRB_DO_SYNC_NEGO;
@@ -2634,7 +2235,6 @@ static inline void msgin_set_nowide(struct AdapterCtlBlk *acb,
struct ScsiReqBlk *srb)
{
struct DeviceCtlBlk *dcb = srb->dcb;
- dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id);
dcb->sync_period &= ~WIDE_SYNC;
dcb->sync_mode &= ~(WIDE_NEGO_ENABLE);
@@ -2645,7 +2245,6 @@ static inline void msgin_set_nowide(struct AdapterCtlBlk *acb,
&& !(dcb->sync_mode & SYNC_NEGO_DONE)) {
build_sdtr(acb, dcb, srb);
DC395x_ENABLE_MSGOUT;
- dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n");
}
}
@@ -2654,15 +2253,11 @@ static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
struct DeviceCtlBlk *dcb = srb->dcb;
u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO
&& acb->config & HCC_WIDE_CARD) ? 1 : 0;
- dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id);
if (srb->msgin_buf[3] > wide)
srb->msgin_buf[3] = wide;
/* Completed */
if (!(srb->state & SRB_DO_WIDE_NEGO)) {
- dprintkl(KERN_DEBUG,
- "msgin_set_wide: Wide nego initiated <%02i>\n",
- dcb->target_id);
memcpy(srb->msgout_buf, srb->msgin_buf, 4);
srb->msg_count = 4;
srb->state |= SRB_DO_WIDE_NEGO;
@@ -2676,15 +2271,11 @@ static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
dcb->sync_period &= ~WIDE_SYNC;
srb->state &= ~SRB_DO_WIDE_NEGO;
/*dcb->sync_mode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); */
- dprintkdbg(DBG_1,
- "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n",
- (8 << srb->msgin_buf[3]), dcb->target_id);
reprogram_regs(acb, dcb);
if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
&& !(dcb->sync_mode & SYNC_NEGO_DONE)) {
build_sdtr(acb, dcb, srb);
DC395x_ENABLE_MSGOUT;
- dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n");
}
}
@@ -2705,7 +2296,6 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
struct DeviceCtlBlk *dcb = acb->active_dcb;
- dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd);
srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
@@ -2759,7 +2349,6 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
case IGNORE_WIDE_RESIDUE:
/* Discard wide residual */
- dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n");
break;
case COMMAND_COMPLETE:
@@ -2771,20 +2360,12 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
* SAVE POINTER may be ignored as we have the struct
* ScsiReqBlk* associated with the scsi command.
*/
- dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
- "SAVE POINTER rem=%i Ignore\n",
- srb->cmd, srb->total_xfer_length);
break;
case RESTORE_POINTERS:
- dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n");
break;
case ABORT:
- dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
- "<%02i-%i> ABORT msg\n",
- srb->cmd, dcb->target_id,
- dcb->target_lun);
dcb->flag |= ABORT_DEV_;
enable_msgout_abort(acb, srb);
break;
@@ -2792,7 +2373,6 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
default:
/* reject unknown messages */
if (srb->msgin_buf[0] & IDENTIFY_BASE) {
- dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n");
srb->msg_count = 1;
srb->msgout_buf[0] = dcb->identify_msg;
DC395x_ENABLE_MSGOUT;
@@ -2815,7 +2395,6 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
- dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd);
clear_fifo(acb, "msgin_phase1");
DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
if (!(srb->state & SRB_MSGIN)) {
@@ -2869,7 +2448,6 @@ static void disconnect(struct AdapterCtlBlk *acb)
struct ScsiReqBlk *srb;
if (!dcb) {
- dprintkl(KERN_ERR, "disconnect: No such device\n");
udelay(500);
/* Suspend queue for a while */
acb->last_reset =
@@ -2881,21 +2459,16 @@ static void disconnect(struct AdapterCtlBlk *acb)
}
srb = dcb->active_srb;
acb->active_dcb = NULL;
- dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd);
srb->scsi_phase = PH_BUS_FREE; /* initial phase */
clear_fifo(acb, "disconnect");
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
if (srb->state & SRB_UNEXPECT_RESEL) {
- dprintkl(KERN_ERR,
- "disconnect: Unexpected reselection <%02i-%i>\n",
- dcb->target_id, dcb->target_lun);
srb->state = 0;
waiting_process_next(acb);
} else if (srb->state & SRB_ABORT_SENT) {
dcb->flag &= ~ABORT_DEV_;
acb->last_reset = jiffies + HZ / 2 + 1;
- dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
waiting_process_next(acb);
} else {
@@ -2910,16 +2483,10 @@ static void disconnect(struct AdapterCtlBlk *acb)
if (srb->state != SRB_START_
&& srb->state != SRB_MSGOUT) {
srb->state = SRB_READY;
- dprintkl(KERN_DEBUG,
- "disconnect: (0x%p) Unexpected\n",
- srb->cmd);
srb->target_status = SCSI_STAT_SEL_TIMEOUT;
goto disc1;
} else {
/* Normal selection timeout */
- dprintkdbg(DBG_KG, "disconnect: (0x%p) "
- "<%02i-%i> SelTO\n", srb->cmd,
- dcb->target_id, dcb->target_lun);
if (srb->retry_count++ > DC395x_MAX_RETRIES
|| acb->scan_devices) {
srb->target_status =
@@ -2928,9 +2495,6 @@ static void disconnect(struct AdapterCtlBlk *acb)
}
free_tag(dcb, srb);
list_move(&srb->list, &dcb->srb_waiting_list);
- dprintkdbg(DBG_KG,
- "disconnect: (0x%p) Retry\n",
- srb->cmd);
waiting_set_timer(acb, HZ / 20);
}
} else if (srb->state & SRB_DISCONNECT) {
@@ -2939,9 +2503,6 @@ static void disconnect(struct AdapterCtlBlk *acb)
* SRB_DISCONNECT (This is what we expect!)
*/
if (bval & 0x40) {
- dprintkdbg(DBG_0, "disconnect: SCSI bus stat "
- " 0x%02x: ACK set! Other controllers?\n",
- bval);
/* It could come from another initiator, therefore don't do much ! */
} else
waiting_process_next(acb);
@@ -2965,7 +2526,6 @@ static void reselect(struct AdapterCtlBlk *acb)
struct ScsiReqBlk *srb = NULL;
u16 rsel_tar_lun_id;
u8 id, lun;
- dprintkdbg(DBG_0, "reselect: acb=%p\n", acb);
clear_fifo(acb, "reselect");
/*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT | DO_DATALATCH); */
@@ -2974,18 +2534,11 @@ static void reselect(struct AdapterCtlBlk *acb)
if (dcb) { /* Arbitration lost but Reselection win */
srb = dcb->active_srb;
if (!srb) {
- dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, "
- "but active_srb == NULL\n");
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
return;
}
/* Why the if ? */
if (!acb->scan_devices) {
- dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> "
- "Arb lost but Resel win rsel=%i stat=0x%04x\n",
- srb->cmd, dcb->target_id,
- dcb->target_lun, rsel_tar_lun_id,
- DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
/*srb->state |= SRB_DISCONNECT; */
srb->state = SRB_READY;
@@ -2997,25 +2550,15 @@ static void reselect(struct AdapterCtlBlk *acb)
}
}
/* Read Reselected Target Id and LUN */
- if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8)))
- dprintkl(KERN_DEBUG, "reselect: Expects identify msg. "
- "Got %i!\n", rsel_tar_lun_id);
id = rsel_tar_lun_id & 0xff;
lun = (rsel_tar_lun_id >> 8) & 7;
dcb = find_dcb(acb, id, lun);
if (!dcb) {
- dprintkl(KERN_ERR, "reselect: From non existent device "
- "<%02i-%i>\n", id, lun);
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
return;
}
acb->active_dcb = dcb;
- if (!(dcb->dev_mode & NTC_DO_DISCONNECT))
- dprintkl(KERN_DEBUG, "reselect: in spite of forbidden "
- "disconnection? <%02i-%i>\n",
- dcb->target_id, dcb->target_lun);
-
if (dcb->sync_mode & EN_TAG_QUEUEING) {
srb = acb->tmp_srb;
dcb->active_srb = srb;
@@ -3026,9 +2569,6 @@ static void reselect(struct AdapterCtlBlk *acb)
/*
* abort command
*/
- dprintkl(KERN_DEBUG,
- "reselect: w/o disconnected cmds <%02i-%i>\n",
- dcb->target_id, dcb->target_lun);
srb = acb->tmp_srb;
srb->state = SRB_UNEXPECT_RESEL;
dcb->active_srb = srb;
@@ -3045,7 +2585,6 @@ static void reselect(struct AdapterCtlBlk *acb)
srb->scsi_phase = PH_BUS_FREE; /* initial phase */
/* Program HA ID, target ID, period and offset */
- dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id);
DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); /* host ID */
DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); /* target ID */
DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); /* offset */
@@ -3111,12 +2650,8 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
if (scsi_sg_count(cmd) && dir != DMA_NONE) {
/* unmap DC395x SG list */
- dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
- srb->sg_bus_addr, SEGMENTX_LEN);
dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN,
DMA_TO_DEVICE);
- dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
- scsi_sg_count(cmd), scsi_bufflen(cmd));
/* unmap the sg segments */
scsi_dma_unmap(cmd);
}
@@ -3130,8 +2665,6 @@ static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
if (!(srb->flag & AUTO_REQSENSE))
return;
/* Unmap sense buffer */
- dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
- srb->segment_x[0].address);
dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address,
srb->segment_x[0].length, DMA_FROM_DEVICE);
/* Restore SG stuff */
@@ -3155,16 +2688,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
enum dma_data_direction dir = cmd->sc_data_direction;
int ckc_only = 1;
- dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd,
- srb->cmd->device->id, (u8)srb->cmd->device->lun);
- dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
- srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
- scsi_sgtalbe(cmd));
status = srb->target_status;
set_host_byte(cmd, DID_OK);
set_status_byte(cmd, SAM_STAT_GOOD);
if (srb->flag & AUTO_REQSENSE) {
- dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
pci_unmap_srb_sense(acb, srb);
/*
** target status..........................
@@ -3172,57 +2699,11 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
srb->flag &= ~AUTO_REQSENSE;
srb->adapter_status = 0;
srb->target_status = SAM_STAT_CHECK_CONDITION;
- if (debug_enabled(DBG_1)) {
- switch (cmd->sense_buffer[2] & 0x0f) {
- case NOT_READY:
- dprintkl(KERN_DEBUG,
- "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
- cmd->cmnd[0], dcb->target_id,
- dcb->target_lun, status, acb->scan_devices);
- break;
- case UNIT_ATTENTION:
- dprintkl(KERN_DEBUG,
- "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
- cmd->cmnd[0], dcb->target_id,
- dcb->target_lun, status, acb->scan_devices);
- break;
- case ILLEGAL_REQUEST:
- dprintkl(KERN_DEBUG,
- "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
- cmd->cmnd[0], dcb->target_id,
- dcb->target_lun, status, acb->scan_devices);
- break;
- case MEDIUM_ERROR:
- dprintkl(KERN_DEBUG,
- "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
- cmd->cmnd[0], dcb->target_id,
- dcb->target_lun, status, acb->scan_devices);
- break;
- case HARDWARE_ERROR:
- dprintkl(KERN_DEBUG,
- "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
- cmd->cmnd[0], dcb->target_id,
- dcb->target_lun, status, acb->scan_devices);
- break;
- }
- if (cmd->sense_buffer[7] >= 6)
- printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x "
- "(0x%08x 0x%08x)\n",
- cmd->sense_buffer[2], cmd->sense_buffer[12],
- cmd->sense_buffer[13],
- *((unsigned int *)(cmd->sense_buffer + 3)),
- *((unsigned int *)(cmd->sense_buffer + 8)));
- else
- printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n",
- cmd->sense_buffer[2],
- *((unsigned int *)(cmd->sense_buffer + 3)));
- }
if (status == SAM_STAT_CHECK_CONDITION) {
set_host_byte(cmd, DID_BAD_TARGET);
goto ckc_e;
}
- dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
@@ -3239,8 +2720,6 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
return;
} else if (status == SAM_STAT_TASK_SET_FULL) {
tempcnt = (u8)list_size(&dcb->srb_going_list);
- dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
- dcb->target_id, dcb->target_lun, tempcnt);
if (tempcnt > 1)
tempcnt--;
dcb->max_command = tempcnt;
@@ -3314,21 +2793,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
/* Here is the info for Doug Gilbert's sg3 ... */
scsi_set_resid(cmd, srb->total_xfer_length);
- if (debug_enabled(DBG_KG)) {
- if (srb->total_xfer_length)
- dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
- "cmnd=0x%02x Missed %i bytes\n",
- cmd, cmd->device->id, (u8)cmd->device->lun,
- cmd->cmnd[0], srb->total_xfer_length);
- }
if (srb != acb->tmp_srb) {
/* Add to free list */
- dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
- cmd, cmd->result);
list_move_tail(&srb->list, &acb->srb_free_list);
- } else {
- dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
}
scsi_done(cmd);
@@ -3341,7 +2809,6 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
struct scsi_cmnd *cmd, u8 force)
{
struct DeviceCtlBlk *dcb;
- dprintkl(KERN_INFO, "doing_srb_done: pids ");
list_for_each_entry(dcb, &acb->dcb_list, list) {
struct ScsiReqBlk *srb;
@@ -3365,15 +2832,6 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
scsi_done(p);
}
}
- if (!list_empty(&dcb->srb_going_list))
- dprintkl(KERN_DEBUG,
- "How could the ML send cmnds to the Going queue? <%02i-%i>\n",
- dcb->target_id, dcb->target_lun);
- if (dcb->tag_mask)
- dprintkl(KERN_DEBUG,
- "tag_mask for <%02i-%i> should be empty, is %08x!\n",
- dcb->target_id, dcb->target_lun,
- dcb->tag_mask);
/* Waiting queue */
list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
@@ -3392,19 +2850,13 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
scsi_done(cmd);
}
}
- if (!list_empty(&dcb->srb_waiting_list))
- dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n",
- list_size(&dcb->srb_waiting_list), dcb->target_id,
- dcb->target_lun);
dcb->flag &= ~ABORT_DEV_;
}
- printk("\n");
}
static void reset_scsi_bus(struct AdapterCtlBlk *acb)
{
- dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb);
acb->acb_flag |= RESET_DEV; /* RESET_DETECT, RESET_DONE, RESET_DEV */
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
@@ -3451,7 +2903,6 @@ static void set_basic_config(struct AdapterCtlBlk *acb)
static void scsi_reset_detect(struct AdapterCtlBlk *acb)
{
- dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb);
/* delay half a second */
if (timer_pending(&acb->waiting_timer))
timer_delete(&acb->waiting_timer);
@@ -3488,8 +2939,6 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
struct ScsiReqBlk *srb)
{
struct scsi_cmnd *cmd = srb->cmd;
- dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n",
- cmd, cmd->device->id, (u8)cmd->device->lun);
srb->flag |= AUTO_REQSENSE;
srb->adapter_status = 0;
@@ -3511,16 +2960,10 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
srb->segment_x[0].address = dma_map_single(&acb->dev->dev,
cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
DMA_FROM_DEVICE);
- dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
- cmd->sense_buffer, srb->segment_x[0].address,
- SCSI_SENSE_BUFFERSIZE);
srb->sg_count = 1;
srb->sg_index = 0;
if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */
- dprintkl(KERN_DEBUG,
- "request_sense: (0x%p) failed <%02i-%i>\n",
- srb->cmd, dcb->target_id, dcb->target_lun);
list_move(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 100);
}
@@ -3548,7 +2991,6 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
struct DeviceCtlBlk *dcb;
dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC);
- dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun);
if (!dcb)
return NULL;
dcb->acb = NULL;
@@ -3598,10 +3040,6 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
return NULL;
}
- dprintkdbg(DBG_1,
- "device_alloc: <%02i-%i> copy from <%02i-%i>\n",
- dcb->target_id, dcb->target_lun,
- p->target_id, p->target_lun);
dcb->sync_mode = p->sync_mode;
dcb->sync_period = p->sync_period;
dcb->min_nego_period = p->min_nego_period;
@@ -3651,8 +3089,6 @@ static void adapter_remove_device(struct AdapterCtlBlk *acb,
{
struct DeviceCtlBlk *i;
struct DeviceCtlBlk *tmp;
- dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n",
- dcb->target_id, dcb->target_lun);
/* fix up any pointers to this device that we have in the adapter */
if (acb->active_dcb == dcb)
@@ -3685,10 +3121,6 @@ static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb,
struct DeviceCtlBlk *dcb)
{
if (list_size(&dcb->srb_going_list) > 1) {
- dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> "
- "Won't remove because of %i active requests.\n",
- dcb->target_id, dcb->target_lun,
- list_size(&dcb->srb_going_list));
return;
}
adapter_remove_device(acb, dcb);
@@ -3706,8 +3138,6 @@ static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
{
struct DeviceCtlBlk *dcb;
struct DeviceCtlBlk *tmp;
- dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n",
- list_size(&acb->dcb_list));
list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list)
adapter_remove_and_free_device(acb, dcb);
@@ -4002,8 +3432,6 @@ static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
* Checksum is wrong.
* Load a set of defaults into the eeprom buffer
*/
- dprintkl(KERN_WARNING,
- "EEProm checksum error: using default values and options.\n");
eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
@@ -4055,15 +3483,6 @@ static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
**/
static void print_eeprom_settings(struct NvRamType *eeprom)
{
- dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n",
- eeprom->scsi_id,
- eeprom->target[0].period,
- clock_speed[eeprom->target[0].period] / 10,
- clock_speed[eeprom->target[0].period] % 10,
- eeprom->target[0].cfg0);
- dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n",
- eeprom->channel_cfg, eeprom->max_tag,
- 1 << eeprom->max_tag, eeprom->delay_time);
}
@@ -4094,15 +3513,12 @@ static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
acb->srb_array[i].segment_x = NULL;
- dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
while (pages--) {
ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!ptr) {
adapter_sg_tables_free(acb);
return 1;
}
- dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n",
- PAGE_SIZE, ptr, srb_idx);
i = 0;
while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT)
acb->srb_array[srb_idx++].segment_x =
@@ -4111,8 +3527,6 @@ static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
if (i < srbs_per_page)
acb->srb.segment_x =
ptr + (i * DC395x_MAX_SG_LISTENTRY);
- else
- dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
return 0;
}
@@ -4132,8 +3546,6 @@ static void adapter_print_config(struct AdapterCtlBlk *acb)
u8 bval;
bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS);
- dprintkl(KERN_INFO, "%sConnectors: ",
- ((bval & WIDESCSI) ? "(Wide) " : ""));
if (!(bval & CON5068))
printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50");
if (!(bval & CON68))
@@ -4293,7 +3705,6 @@ static void adapter_init_chip(struct AdapterCtlBlk *acb)
acb->config |= HCC_SCSI_RESET;
if (acb->config & HCC_SCSI_RESET) {
- dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n");
DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
/*while (!( DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET )); */
@@ -4327,7 +3738,6 @@ static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port,
u32 io_port_len, unsigned int irq)
{
if (!request_region(io_port, io_port_len, DC395X_NAME)) {
- dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port);
goto failed;
}
/* store port base to indicate we have registered it */
@@ -4336,7 +3746,6 @@ static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port,
if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) {
/* release the region we just claimed */
- dprintkl(KERN_INFO, "Failed to register IRQ\n");
goto failed;
}
/* store irq to indicate we have registered it */
@@ -4353,18 +3762,12 @@ static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port,
adapter_print_config(acb);
if (adapter_sg_tables_alloc(acb)) {
- dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n");
goto failed;
}
adapter_init_scsi_host(acb->scsi_host);
adapter_init_chip(acb);
set_basic_config(acb);
- dprintkdbg(DBG_0,
- "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p "
- "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
- acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk),
- sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk));
return 0;
failed:
@@ -4528,14 +3931,6 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
seq_putc(m, '\n');
}
- if (debug_enabled(DBG_1)) {
- seq_printf(m, "DCB list for ACB %p:\n", acb);
- list_for_each_entry(dcb, &acb->dcb_list, list) {
- seq_printf(m, "%p -> ", dcb);
- }
- seq_puts(m, "END\n");
- }
-
DC395x_UNLOCK_IO(acb->scsi_host, flags);
return 0;
}
@@ -4560,21 +3955,6 @@ static const struct scsi_host_template dc395x_driver_template = {
/**
- * banner_display - Display banner on first instance of driver
- * initialized.
- **/
-static void banner_display(void)
-{
- static int banner_done = 0;
- if (!banner_done)
- {
- dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION);
- banner_done = 1;
- }
-}
-
-
-/**
* dc395x_init_one - Initialise a single instance of the adapter.
*
* The PCI layer will call this once for each instance of the adapter
@@ -4595,33 +3975,25 @@ static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
unsigned int io_port_len;
unsigned int irq;
- dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev));
- banner_display();
-
if (pci_enable_device(dev))
- {
- dprintkl(KERN_INFO, "PCI Enable device failed.\n");
return -ENODEV;
- }
+
io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK;
io_port_len = pci_resource_len(dev, 0);
irq = dev->irq;
- dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq);
/* allocate scsi host information (includes out adapter) */
scsi_host = scsi_host_alloc(&dc395x_driver_template,
sizeof(struct AdapterCtlBlk));
- if (!scsi_host) {
- dprintkl(KERN_INFO, "scsi_host_alloc failed\n");
+ if (!scsi_host)
goto fail;
- }
+
acb = (struct AdapterCtlBlk*)scsi_host->hostdata;
acb->scsi_host = scsi_host;
acb->dev = dev;
/* initialise the adapter and everything we need */
if (adapter_init(acb, io_port_base, io_port_len, irq)) {
- dprintkl(KERN_INFO, "adapter init failed\n");
acb = NULL;
goto fail;
}
@@ -4629,10 +4001,9 @@ static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
pci_set_master(dev);
/* get the scsi mid level to scan for new devices on the bus */
- if (scsi_add_host(scsi_host, &dev->dev)) {
- dprintkl(KERN_ERR, "scsi_add_host failed\n");
+ if (scsi_add_host(scsi_host, &dev->dev))
goto fail;
- }
+
pci_set_drvdata(dev, scsi_host);
scsi_scan_host(scsi_host);
@@ -4659,8 +4030,6 @@ static void dc395x_remove_one(struct pci_dev *dev)
struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata);
- dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb);
-
scsi_remove_host(scsi_host);
adapter_uninit(acb);
pci_disable_device(dev);
diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c
index 5a5525054d71..5b079b8b7a08 100644
--- a/drivers/scsi/elx/efct/efct_hw.c
+++ b/drivers/scsi/elx/efct/efct_hw.c
@@ -1120,7 +1120,7 @@ int
efct_hw_parse_filter(struct efct_hw *hw, void *value)
{
int rc = 0;
- char *p = NULL;
+ char *p = NULL, *pp = NULL;
char *token;
u32 idx = 0;
@@ -1132,6 +1132,7 @@ efct_hw_parse_filter(struct efct_hw *hw, void *value)
efc_log_err(hw->os, "p is NULL\n");
return -ENOMEM;
}
+ pp = p;
idx = 0;
while ((token = strsep(&p, ",")) && *token) {
@@ -1144,7 +1145,7 @@ efct_hw_parse_filter(struct efct_hw *hw, void *value)
if (idx == ARRAY_SIZE(hw->config.filter_def))
break;
}
- kfree(p);
+ kfree(pp);
return rc;
}
diff --git a/drivers/scsi/elx/efct/efct_xport.c b/drivers/scsi/elx/efct/efct_xport.c
index 2aca60f6428e..dfe05fab7b42 100644
--- a/drivers/scsi/elx/efct/efct_xport.c
+++ b/drivers/scsi/elx/efct/efct_xport.c
@@ -180,7 +180,7 @@ efct_xport_config_stats_timer(struct efct *efct);
static void
efct_xport_stats_timer_cb(struct timer_list *t)
{
- struct efct_xport *xport = from_timer(xport, t, stats_timer);
+ struct efct_xport *xport = timer_container_of(xport, t, stats_timer);
struct efct *efct = xport->efct;
efct_xport_config_stats_timer(efct);
diff --git a/drivers/scsi/elx/libefc/efc_els.c b/drivers/scsi/elx/libefc/efc_els.c
index 84bc81d7ce76..1786cee08729 100644
--- a/drivers/scsi/elx/libefc/efc_els.c
+++ b/drivers/scsi/elx/libefc/efc_els.c
@@ -147,7 +147,7 @@ efc_els_retry(struct efc_els_io_req *els);
static void
efc_els_delay_timer_cb(struct timer_list *t)
{
- struct efc_els_io_req *els = from_timer(els, t, delay_timer);
+ struct efc_els_io_req *els = timer_container_of(els, t, delay_timer);
/* Retry delay timer expired, retry the ELS request */
efc_els_retry(els);
diff --git a/drivers/scsi/elx/libefc/efc_fabric.c b/drivers/scsi/elx/libefc/efc_fabric.c
index cf7e738c4edc..4ed9f46ded65 100644
--- a/drivers/scsi/elx/libefc/efc_fabric.c
+++ b/drivers/scsi/elx/libefc/efc_fabric.c
@@ -886,7 +886,7 @@ __efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
static void
gidpt_delay_timer_cb(struct timer_list *t)
{
- struct efc_node *node = from_timer(node, t, gidpt_delay_timer);
+ struct efc_node *node = timer_container_of(node, t, gidpt_delay_timer);
timer_delete(&node->gidpt_delay_timer);
diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c
index 5e7fb110bc3f..d9a231fc0e0d 100644
--- a/drivers/scsi/elx/libefc_sli/sli4.c
+++ b/drivers/scsi/elx/libefc_sli/sli4.c
@@ -3804,7 +3804,7 @@ sli_cmd_common_write_object(struct sli4 *sli4, void *buf, u16 noc,
wr_obj->desired_write_len_dword = cpu_to_le32(dwflags);
wr_obj->write_offset = cpu_to_le32(offset);
- strncpy(wr_obj->object_name, obj_name, sizeof(wr_obj->object_name) - 1);
+ strscpy(wr_obj->object_name, obj_name);
wr_obj->host_buffer_descriptor_count = cpu_to_le32(1);
bde = (struct sli4_bde *)wr_obj->host_buffer_descriptor;
@@ -3833,7 +3833,7 @@ sli_cmd_common_delete_object(struct sli4 *sli4, void *buf, char *obj_name)
SLI4_SUBSYSTEM_COMMON, CMD_V0,
SLI4_RQST_PYLD_LEN(cmn_delete_object));
- strncpy(req->object_name, obj_name, sizeof(req->object_name) - 1);
+ strscpy(req->object_name, obj_name);
return 0;
}
@@ -3856,7 +3856,7 @@ sli_cmd_common_read_object(struct sli4 *sli4, void *buf, u32 desired_read_len,
cpu_to_le32(desired_read_len & SLI4_REQ_DESIRE_READLEN);
rd_obj->read_offset = cpu_to_le32(offset);
- strncpy(rd_obj->object_name, obj_name, sizeof(rd_obj->object_name) - 1);
+ strscpy(rd_obj->object_name, obj_name);
rd_obj->host_buffer_descriptor_count = cpu_to_le32(1);
bde = (struct sli4_bde *)rd_obj->host_buffer_descriptor;
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 44871746944a..3f31875ff46e 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -1585,7 +1585,7 @@ void esas2r_kickoff_timer(struct esas2r_adapter *a)
static void esas2r_timer_callback(struct timer_list *t)
{
- struct esas2r_adapter *a = from_timer(a, t, timer);
+ struct esas2r_adapter *a = timer_container_of(a, t, timer);
set_bit(AF2_TIMER_TICK, &a->flags2);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 56d270526c9c..8e4241c295e3 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1773,7 +1773,7 @@ unlock:
*/
static void fcoe_ctlr_timeout(struct timer_list *t)
{
- struct fcoe_ctlr *fip = from_timer(fip, t, timer);
+ struct fcoe_ctlr *fip = timer_container_of(fip, t, timer);
schedule_work(&fip->timer_work);
}
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index a48d24af9ac3..2f478426f16e 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -447,7 +447,7 @@ EXPORT_SYMBOL_GPL(fcoe_check_wait_queue);
*/
void fcoe_queue_timer(struct timer_list *t)
{
- struct fcoe_port *port = from_timer(port, t, timer);
+ struct fcoe_port *port = timer_container_of(port, t, timer);
fcoe_check_wait_queue(port->lport, NULL);
}
diff --git a/drivers/scsi/fnic/fdls_disc.c b/drivers/scsi/fnic/fdls_disc.c
index c2b6f4eb338e..f8ab69c51dab 100644
--- a/drivers/scsi/fnic/fdls_disc.c
+++ b/drivers/scsi/fnic/fdls_disc.c
@@ -2074,7 +2074,8 @@ static void fdls_fdmi_register_pa(struct fnic_iport_s *iport)
void fdls_fabric_timer_callback(struct timer_list *t)
{
- struct fnic_fdls_fabric_s *fabric = from_timer(fabric, t, retry_timer);
+ struct fnic_fdls_fabric_s *fabric = timer_container_of(fabric, t,
+ retry_timer);
struct fnic_iport_s *iport =
container_of(fabric, struct fnic_iport_s, fabric);
struct fnic *fnic = iport->fnic;
@@ -2246,7 +2247,8 @@ void fdls_fabric_timer_callback(struct timer_list *t)
void fdls_fdmi_timer_callback(struct timer_list *t)
{
- struct fnic_fdls_fabric_s *fabric = from_timer(fabric, t, fdmi_timer);
+ struct fnic_fdls_fabric_s *fabric = timer_container_of(fabric, t,
+ fdmi_timer);
struct fnic_iport_s *iport =
container_of(fabric, struct fnic_iport_s, fabric);
struct fnic *fnic = iport->fnic;
@@ -2323,7 +2325,7 @@ static void fdls_send_delete_tport_msg(struct fnic_tport_s *tport)
static void fdls_tport_timer_callback(struct timer_list *t)
{
- struct fnic_tport_s *tport = from_timer(tport, t, retry_timer);
+ struct fnic_tport_s *tport = timer_container_of(tport, t, retry_timer);
struct fnic_iport_s *iport = (struct fnic_iport_s *) tport->iport;
struct fnic *fnic = iport->fnic;
uint16_t oxid;
diff --git a/drivers/scsi/fnic/fip.c b/drivers/scsi/fnic/fip.c
index 6e7c0b00eb41..ce62ab1180bd 100644
--- a/drivers/scsi/fnic/fip.c
+++ b/drivers/scsi/fnic/fip.c
@@ -200,7 +200,7 @@ void fnic_fcoe_start_fcf_discovery(struct fnic *fnic)
return;
}
- memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN);
+ eth_zero_addr(iport->selected_fcf.fcf_mac);
pdisc_sol = (struct fip_discovery *) frame;
*pdisc_sol = (struct fip_discovery) {
@@ -588,12 +588,12 @@ void fnic_common_fip_cleanup(struct fnic *fnic)
if (!is_zero_ether_addr(iport->fpma))
vnic_dev_del_addr(fnic->vdev, iport->fpma);
- memset(iport->fpma, 0, ETH_ALEN);
+ eth_zero_addr(iport->fpma);
iport->fcid = 0;
iport->r_a_tov = 0;
iport->e_d_tov = 0;
- memset(fnic->iport.fcfmac, 0, ETH_ALEN);
- memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN);
+ eth_zero_addr(fnic->iport.fcfmac);
+ eth_zero_addr(iport->selected_fcf.fcf_mac);
iport->selected_fcf.fcf_priority = 0;
iport->selected_fcf.fka_adv_period = 0;
iport->selected_fcf.ka_disabled = 0;
@@ -777,7 +777,7 @@ void fnic_work_on_fip_timer(struct work_struct *work)
*/
void fnic_handle_fip_timer(struct timer_list *t)
{
- struct fnic *fnic = from_timer(fnic, t, retry_fip_timer);
+ struct fnic *fnic = timer_container_of(fnic, t, retry_fip_timer);
INIT_WORK(&fnic->fip_timer_work, fnic_work_on_fip_timer);
queue_work(fnic_fip_queue, &fnic->fip_timer_work);
@@ -790,7 +790,7 @@ void fnic_handle_fip_timer(struct timer_list *t)
void fnic_handle_enode_ka_timer(struct timer_list *t)
{
uint8_t *frame;
- struct fnic *fnic = from_timer(fnic, t, enode_ka_timer);
+ struct fnic *fnic = timer_container_of(fnic, t, enode_ka_timer);
struct fnic_iport_s *iport = &fnic->iport;
struct fip_enode_ka *penode_ka;
@@ -843,7 +843,7 @@ void fnic_handle_enode_ka_timer(struct timer_list *t)
void fnic_handle_vn_ka_timer(struct timer_list *t)
{
uint8_t *frame;
- struct fnic *fnic = from_timer(fnic, t, vn_ka_timer);
+ struct fnic *fnic = timer_container_of(fnic, t, vn_ka_timer);
struct fnic_iport_s *iport = &fnic->iport;
struct fip_vn_port_ka *pvn_port_ka;
@@ -998,7 +998,7 @@ void fnic_work_on_fcs_ka_timer(struct work_struct *work)
*/
void fnic_handle_fcs_ka_timer(struct timer_list *t)
{
- struct fnic *fnic = from_timer(fnic, t, fcs_ka_timer);
+ struct fnic *fnic = timer_container_of(fnic, t, fcs_ka_timer);
INIT_WORK(&fnic->fip_timer_work, fnic_work_on_fcs_ka_timer);
queue_work(fnic_fip_queue, &fnic->fip_timer_work);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 9a357ff42085..4cc4077ea53c 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -446,7 +446,7 @@ static int fnic_notify_set(struct fnic *fnic)
static void fnic_notify_timer(struct timer_list *t)
{
- struct fnic *fnic = from_timer(fnic, t, notify_timer);
+ struct fnic *fnic = timer_container_of(fnic, t, notify_timer);
fnic_handle_link_event(fnic);
mod_timer(&fnic->notify_timer,
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index e17f5d8226bf..1323ed8aa717 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -46,6 +46,13 @@
#define HISI_SAS_IOST_ITCT_CACHE_DW_SZ 10
#define HISI_SAS_FIFO_DATA_DW_SIZE 32
+#define HISI_SAS_REG_MEM_SIZE 4
+#define HISI_SAS_MAX_CDB_LEN 16
+#define HISI_SAS_BLK_QUEUE_DEPTH 64
+
+#define BYTE_TO_DW 4
+#define BYTE_TO_DDW 8
+
#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
@@ -92,6 +99,8 @@
#define HISI_SAS_WAIT_PHYUP_TIMEOUT (30 * HZ)
#define HISI_SAS_CLEAR_ITCT_TIMEOUT (20 * HZ)
+#define HISI_SAS_DELAY_FOR_PHY_DISABLE 100
+#define NAME_BUF_SIZE 256
struct hisi_hba;
@@ -167,6 +176,8 @@ struct hisi_sas_debugfs_fifo {
u32 rd_data[HISI_SAS_FIFO_DATA_DW_SIZE];
};
+#define FRAME_RCVD_BUF 32
+#define SAS_PHY_RESV_SIZE 2
struct hisi_sas_phy {
struct work_struct works[HISI_PHYES_NUM];
struct hisi_hba *hisi_hba;
@@ -178,10 +189,10 @@ struct hisi_sas_phy {
spinlock_t lock;
u64 port_id; /* from hw */
u64 frame_rcvd_size;
- u8 frame_rcvd[32];
+ u8 frame_rcvd[FRAME_RCVD_BUF];
u8 phy_attached;
u8 in_reset;
- u8 reserved[2];
+ u8 reserved[SAS_PHY_RESV_SIZE];
u32 phy_type;
u32 code_violation_err_count;
enum sas_linkrate minimum_linkrate;
@@ -348,7 +359,8 @@ struct hisi_sas_hw {
const struct scsi_host_template *sht;
};
-#define HISI_SAS_MAX_DEBUGFS_DUMP (50)
+#define HISI_SAS_MAX_DEBUGFS_DUMP 50
+#define HISI_SAS_DEFAULT_DEBUGFS_DUMP 1
struct hisi_sas_debugfs_cq {
struct hisi_sas_cq *cq;
@@ -448,12 +460,12 @@ struct hisi_hba {
dma_addr_t sata_breakpoint_dma;
struct hisi_sas_slot *slot_info;
unsigned long flags;
- const struct hisi_sas_hw *hw; /* Low level hw interface */
+ const struct hisi_sas_hw *hw; /* Low level hw interface */
unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)];
struct work_struct rst_work;
u32 phy_state;
- u32 intr_coal_ticks; /* Time of interrupt coalesce in us */
- u32 intr_coal_count; /* Interrupt count to coalesce */
+ u32 intr_coal_ticks; /* Time of interrupt coalesce in us */
+ u32 intr_coal_count; /* Interrupt count to coalesce */
int cq_nvecs;
@@ -528,12 +540,13 @@ struct hisi_sas_cmd_hdr {
__le64 dif_prd_table_addr;
};
+#define ITCT_RESV_DDW 12
struct hisi_sas_itct {
__le64 qw0;
__le64 sas_addr;
__le64 qw2;
__le64 qw3;
- __le64 qw4_15[12];
+ __le64 qw4_15[ITCT_RESV_DDW];
};
struct hisi_sas_iost {
@@ -543,22 +556,26 @@ struct hisi_sas_iost {
__le64 qw3;
};
+#define ERROR_RECORD_BUF_DW 4
struct hisi_sas_err_record {
- u32 data[4];
+ u32 data[ERROR_RECORD_BUF_DW];
};
+#define FIS_RESV_DW 3
struct hisi_sas_initial_fis {
struct hisi_sas_err_record err_record;
struct dev_to_host_fis fis;
- u32 rsvd[3];
+ u32 rsvd[FIS_RESV_DW];
};
+#define BREAKPOINT_DATA_SIZE 128
struct hisi_sas_breakpoint {
- u8 data[128];
+ u8 data[BREAKPOINT_DATA_SIZE];
};
+#define BREAKPOINT_TAG_NUM 32
struct hisi_sas_sata_breakpoint {
- struct hisi_sas_breakpoint tag[32];
+ struct hisi_sas_breakpoint tag[BREAKPOINT_TAG_NUM];
};
struct hisi_sas_sge {
@@ -569,13 +586,15 @@ struct hisi_sas_sge {
__le32 data_off;
};
+#define SMP_CMD_TABLE_SIZE 44
struct hisi_sas_command_table_smp {
- u8 bytes[44];
+ u8 bytes[SMP_CMD_TABLE_SIZE];
};
+#define DUMMY_BUF_SIZE 12
struct hisi_sas_command_table_stp {
struct host_to_dev_fis command_fis;
- u8 dummy[12];
+ u8 dummy[DUMMY_BUF_SIZE];
u8 atapi_cdb[ATAPI_CDB_LEN];
};
@@ -589,12 +608,13 @@ struct hisi_sas_sge_dif_page {
struct hisi_sas_sge sge[HISI_SAS_SGE_DIF_PAGE_CNT];
} __aligned(16);
+#define PROT_BUF_SIZE 7
struct hisi_sas_command_table_ssp {
struct ssp_frame_hdr hdr;
union {
struct {
struct ssp_command_iu task;
- u32 prot[7];
+ u32 prot[PROT_BUF_SIZE];
};
struct ssp_tmf_iu ssp_task;
struct xfer_rdy_iu xfer_rdy;
@@ -608,9 +628,10 @@ union hisi_sas_command_table {
struct hisi_sas_command_table_stp stp;
} __aligned(16);
+#define IU_BUF_SIZE 1024
struct hisi_sas_status_buffer {
struct hisi_sas_err_record err;
- u8 iu[1024];
+ u8 iu[IU_BUF_SIZE];
} __aligned(16);
struct hisi_sas_slot_buf_table {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 944cf2fb0561..d1a4cc69d408 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -7,6 +7,16 @@
#include "hisi_sas.h"
#define DRV_NAME "hisi_sas"
+#define LINK_RATE_BIT_MASK 2
+#define FIS_BUF_SIZE 20
+#define WAIT_CMD_COMPLETE_DELAY 100
+#define WAIT_CMD_COMPLETE_TMROUT 5000
+#define DELAY_FOR_LINK_READY 2000
+#define BLK_CNT_OPTIMIZE_MARK 64
+#define HZ_TO_MHZ 1000000
+#define DELAY_FOR_SOFTRESET_MAX 1000
+#define DELAY_FOR_SOFTRESET_MIN 900
+
#define DEV_IS_GONE(dev) \
((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
@@ -114,12 +124,10 @@ u8 hisi_sas_get_ata_protocol(struct sas_task *task)
}
default:
- {
if (direction == DMA_NONE)
return HISI_SAS_SATA_PROTOCOL_NONDATA;
return hisi_sas_get_ata_protocol_from_tf(qc);
}
- }
}
EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
@@ -131,7 +139,7 @@ void hisi_sas_sata_done(struct sas_task *task,
struct hisi_sas_status_buffer *status_buf =
hisi_sas_status_buf_addr_mem(slot);
u8 *iu = &status_buf->iu[0];
- struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
+ struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
resp->frame_len = sizeof(struct dev_to_host_fis);
memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
@@ -151,7 +159,7 @@ u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
max -= SAS_LINK_RATE_1_5_GBPS;
for (i = 0; i <= max; i++)
- rate |= 1 << (i * 2);
+ rate |= 1 << (i * LINK_RATE_BIT_MASK);
return rate;
}
EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
@@ -900,7 +908,7 @@ int hisi_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
if (ret)
return ret;
if (!dev_is_sata(dev))
- sas_change_queue_depth(sdev, 64);
+ sas_change_queue_depth(sdev, HISI_SAS_BLK_QUEUE_DEPTH);
return 0;
}
@@ -1008,7 +1016,7 @@ EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
{
- struct hisi_sas_phy *phy = from_timer(phy, t, timer);
+ struct hisi_sas_phy *phy = timer_container_of(phy, t, timer);
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct device *dev = hisi_hba->dev;
int phy_no = phy->sas_phy.id;
@@ -1262,7 +1270,7 @@ static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
sas_phy->phy->minimum_linkrate = min;
hisi_sas_phy_enable(hisi_hba, phy_no, 0);
- msleep(100);
+ msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE);
hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
hisi_sas_phy_enable(hisi_hba, phy_no, 1);
@@ -1292,7 +1300,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
case PHY_FUNC_LINK_RESET:
hisi_sas_phy_enable(hisi_hba, phy_no, 0);
- msleep(100);
+ msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE);
hisi_sas_phy_enable(hisi_hba, phy_no, 1);
break;
@@ -1347,7 +1355,7 @@ static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
static int hisi_sas_softreset_ata_disk(struct domain_device *device)
{
- u8 fis[20] = {0};
+ u8 fis[FIS_BUF_SIZE] = {0};
struct ata_port *ap = device->sata_dev.ap;
struct ata_link *link;
int rc = TMF_RESP_FUNC_FAILED;
@@ -1364,7 +1372,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
}
if (rc == TMF_RESP_FUNC_COMPLETE) {
- usleep_range(900, 1000);
+ usleep_range(DELAY_FOR_SOFTRESET_MIN, DELAY_FOR_SOFTRESET_MAX);
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
@@ -1494,7 +1502,7 @@ static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
struct device *dev = hisi_hba->dev;
int rc = TMF_RESP_FUNC_FAILED;
struct ata_link *link;
- u8 fis[20] = {0};
+ u8 fis[FIS_BUF_SIZE] = {0};
int i;
for (i = 0; i < hisi_hba->n_phy; i++) {
@@ -1561,7 +1569,9 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
- hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
+ hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba,
+ WAIT_CMD_COMPLETE_DELAY,
+ WAIT_CMD_COMPLETE_TMROUT);
/*
* hisi_hba->timer is only used for v1/v2 hw, and check hw->sht
@@ -1862,7 +1872,7 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT,
smp_ata_check_ready_type);
} else {
- msleep(2000);
+ msleep(DELAY_FOR_LINK_READY);
}
return rc;
@@ -1885,33 +1895,14 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
}
hisi_sas_dereg_device(hisi_hba, device);
- rc = hisi_sas_debug_I_T_nexus_reset(device);
- if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) {
- struct sas_phy *local_phy;
-
+ if (dev_is_sata(device)) {
rc = hisi_sas_softreset_ata_disk(device);
- switch (rc) {
- case -ECOMM:
- rc = -ENODEV;
- break;
- case TMF_RESP_FUNC_FAILED:
- case -EMSGSIZE:
- case -EIO:
- local_phy = sas_get_local_phy(device);
- rc = sas_phy_enable(local_phy, 0);
- if (!rc) {
- local_phy->enabled = 0;
- dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n",
- SAS_ADDR(device->sas_addr), rc);
- rc = -ENODEV;
- }
- sas_put_local_phy(local_phy);
- break;
- default:
- break;
- }
+ if (rc == TMF_RESP_FUNC_FAILED)
+ dev_err(dev, "ata disk %016llx reset (%d)\n",
+ SAS_ADDR(device->sas_addr), rc);
}
+ rc = hisi_sas_debug_I_T_nexus_reset(device);
if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
hisi_sas_release_task(hisi_hba, device);
@@ -1934,12 +1925,9 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
hisi_sas_dereg_device(hisi_hba, device);
if (dev_is_sata(device)) {
- struct sas_phy *phy;
-
- phy = sas_get_local_phy(device);
+ struct sas_phy *phy = sas_get_local_phy(device);
rc = sas_phy_reset(phy, true);
-
if (rc == 0)
hisi_sas_release_task(hisi_hba, device);
sas_put_local_phy(phy);
@@ -2123,7 +2111,7 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags);
hisi_sas_port_notify_formed(sas_phy);
} else {
- struct hisi_sas_port *port = phy->port;
+ struct hisi_sas_port *port = phy->port;
if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) ||
phy->in_reset) {
@@ -2296,12 +2284,14 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
goto err_out;
/* roundup to avoid overly large block size */
- max_command_entries_ru = roundup(max_command_entries, 64);
+ max_command_entries_ru = roundup(max_command_entries,
+ BLK_CNT_OPTIMIZE_MARK);
if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table);
else
sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
- sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
+
+ sz_slot_buf_ru = roundup(sz_slot_buf_ru, BLK_CNT_OPTIMIZE_MARK);
s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE);
blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
slots_per_blk = s / sz_slot_buf_ru;
@@ -2466,7 +2456,8 @@ int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
if (IS_ERR(refclk))
dev_dbg(dev, "no ref clk property\n");
else
- hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
+ hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) /
+ HZ_TO_MHZ;
if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
dev_err(dev, "could not get property phy-count\n");
@@ -2588,7 +2579,7 @@ int hisi_sas_probe(struct platform_device *pdev,
shost->max_id = HISI_SAS_MAX_DEVICES;
shost->max_lun = ~0;
shost->max_channel = 1;
- shost->max_cmd_len = 16;
+ shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN;
if (hisi_hba->hw->slot_index_alloc) {
shost->can_queue = HISI_SAS_MAX_COMMANDS;
shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 6621d633b2cc..fa94d7110714 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -795,7 +795,7 @@ static void phy_hard_reset_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
static void start_phys_v1_hw(struct timer_list *t)
{
- struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer);
+ struct hisi_hba *hisi_hba = timer_container_of(hisi_hba, t, timer);
int i;
for (i = 0; i < hisi_hba->n_phy; i++) {
@@ -1759,7 +1759,7 @@ static const struct scsi_host_template sht_v1_hw = {
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
.sdev_init = hisi_sas_sdev_init,
.shost_groups = host_v1_hw_groups,
- .host_reset = hisi_sas_host_reset,
+ .host_reset = hisi_sas_host_reset,
};
static const struct hisi_sas_hw hisi_sas_v1_hw = {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 1e9830940f84..24cd172905f3 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1328,7 +1328,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
static void link_timeout_enable_link(struct timer_list *t)
{
- struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer);
+ struct hisi_hba *hisi_hba = timer_container_of(hisi_hba, t, timer);
int i, reg_val;
for (i = 0; i < hisi_hba->n_phy; i++) {
@@ -1349,7 +1349,7 @@ static void link_timeout_enable_link(struct timer_list *t)
static void link_timeout_disable_link(struct timer_list *t)
{
- struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer);
+ struct hisi_hba *hisi_hba = timer_container_of(hisi_hba, t, timer);
int i, reg_val;
reg_val = hisi_sas_read32(hisi_hba, PHY_STATE);
@@ -2581,7 +2581,8 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t)
{
- struct hisi_sas_slot *slot = from_timer(slot, t, internal_abort_timer);
+ struct hisi_sas_slot *slot = timer_container_of(slot, t,
+ internal_abort_timer);
struct hisi_sas_port *port = slot->port;
struct asd_sas_port *asd_sas_port;
struct asd_sas_phy *sas_phy;
@@ -2771,7 +2772,7 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO)
>> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff;
while (irq_msk) {
- if (irq_msk & 1) {
+ if (irq_msk & 1) {
u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no,
CHL_INT0);
@@ -3111,7 +3112,7 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
-static irqreturn_t cq_thread_v2_hw(int irq_no, void *p)
+static irqreturn_t cq_thread_v2_hw(int irq_no, void *p)
{
struct hisi_sas_cq *cq = p;
struct hisi_hba *hisi_hba = cq->hisi_hba;
@@ -3499,7 +3500,7 @@ static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
* numbered drive in the fourth byte.
* See SFF-8485 Rev. 0.7 Table 24.
*/
- void __iomem *reg_addr = hisi_hba->sgpio_regs +
+ void __iomem *reg_addr = hisi_hba->sgpio_regs +
reg_index * 4 + phy_no;
int data_idx = phy_no + 3 - (phy_no % 4) * 2;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 08dac9ae2f10..bc5d5356dd00 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -466,6 +466,12 @@
#define ITCT_HDR_RTOLT_OFF 48
#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
+/*debugfs*/
+#define TWO_PARA_PER_LINE 2
+#define FOUR_PARA_PER_LINE 4
+#define DUMP_BUF_SIZE 8
+#define BIST_BUF_SIZE 16
+
struct hisi_sas_protect_iu_v3_hw {
u32 dw0;
u32 lbrtcv;
@@ -536,6 +542,43 @@ struct hisi_sas_err_record_v3 {
#define BASE_VECTORS_V3_HW 16
#define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1)
+#define IRQ_PHY_UP_DOWN_INDEX 1
+#define IRQ_CHL_INDEX 2
+#define IRQ_AXI_INDEX 11
+
+#define DELAY_FOR_RESET_HW 100
+#define HDR_SG_MOD 0x2
+#define LUN_SIZE 8
+#define ATTR_PRIO_REGION 9
+#define CDB_REGION 12
+#define PRIO_OFF 3
+#define TMF_REGION 10
+#define TAG_MSB 12
+#define TAG_LSB 13
+#define SMP_FRAME_TYPE 2
+#define SMP_CRC_SIZE 4
+#define HDR_TAG_OFF 3
+#define HOST_NO_OFF 6
+#define PHY_NO_OFF 7
+#define IDENTIFY_REG_READ 6
+#define LINK_RESET_TIMEOUT_OFF 4
+#define DECIMALISM_FLAG 10
+#define WAIT_RETRY 100
+#define WAIT_TMROUT 5000
+
+#define ID_DWORD0_INDEX 0
+#define ID_DWORD1_INDEX 1
+#define ID_DWORD2_INDEX 2
+#define ID_DWORD3_INDEX 3
+#define ID_DWORD4_INDEX 4
+#define ID_DWORD5_INDEX 5
+#define TICKS_BIT_INDEX 24
+#define COUNT_BIT_INDEX 8
+
+#define PORT_REG_LENGTH 0x100
+#define GLOBAL_REG_LENGTH 0x800
+#define AXI_REG_LENGTH 0x61
+#define RAS_REG_LENGTH 0x10
#define CHNL_INT_STS_MSK 0xeeeeeeee
#define CHNL_INT_STS_PHY_MSK 0xe
@@ -811,17 +854,17 @@ static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
identify_buffer = (u32 *)(&identify_frame);
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
- __swab32(identify_buffer[0]));
+ __swab32(identify_buffer[ID_DWORD0_INDEX]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
- __swab32(identify_buffer[1]));
+ __swab32(identify_buffer[ID_DWORD1_INDEX]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
- __swab32(identify_buffer[2]));
+ __swab32(identify_buffer[ID_DWORD2_INDEX]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
- __swab32(identify_buffer[3]));
+ __swab32(identify_buffer[ID_DWORD3_INDEX]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
- __swab32(identify_buffer[4]));
+ __swab32(identify_buffer[ID_DWORD4_INDEX]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
- __swab32(identify_buffer[5]));
+ __swab32(identify_buffer[ID_DWORD5_INDEX]));
}
static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
@@ -941,7 +984,7 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
/* Disable all of the PHYs */
hisi_sas_stop_phys(hisi_hba);
- udelay(50);
+ udelay(HISI_SAS_DELAY_FOR_PHY_DISABLE);
/* Ensure axi bus idle */
ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val,
@@ -981,7 +1024,7 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
return rc;
}
- msleep(100);
+ msleep(DELAY_FOR_RESET_HW);
init_reg_v3_hw(hisi_hba);
if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) {
@@ -1030,7 +1073,7 @@ static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
cfg &= ~PHY_CFG_ENA_MSK;
hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
- mdelay(50);
+ mdelay(HISI_SAS_DELAY_FOR_PHY_DISABLE);
state = hisi_sas_read32(hisi_hba, PHY_STATE);
if (state & BIT(phy_no)) {
@@ -1066,7 +1109,7 @@ static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
txid_auto | TX_HARDRST_MSK);
}
- msleep(100);
+ msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE);
hisi_sas_phy_enable(hisi_hba, phy_no, 1);
}
@@ -1111,7 +1154,8 @@ static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id)
for (i = 0; i < hisi_hba->n_phy; i++)
if (phy_state & BIT(i))
- if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
+ if (((phy_port_num_ma >> (i * HISI_SAS_REG_MEM_SIZE)) & 0xf) ==
+ port_id)
bitmap |= BIT(i);
return bitmap;
@@ -1308,10 +1352,10 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
/* map itct entry */
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
- dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
- + 3) / 4) << CMD_HDR_CFL_OFF) |
- ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) |
- (2 << CMD_HDR_SG_MOD_OFF);
+ dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) +
+ 3) / BYTE_TO_DW) << CMD_HDR_CFL_OFF) |
+ ((HISI_SAS_MAX_SSP_RESP_SZ / BYTE_TO_DW) << CMD_HDR_MRFL_OFF) |
+ (HDR_SG_MOD << CMD_HDR_SG_MOD_OFF);
hdr->dw2 = cpu_to_le32(dw2);
hdr->transfer_tags = cpu_to_le32(slot->idx);
@@ -1331,18 +1375,19 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
sizeof(struct ssp_frame_hdr);
- memcpy(buf_cmd, &task->ssp_task.LUN, 8);
+ memcpy(buf_cmd, &task->ssp_task.LUN, LUN_SIZE);
if (!tmf) {
- buf_cmd[9] = ssp_task->task_attr;
- memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+ buf_cmd[ATTR_PRIO_REGION] = ssp_task->task_attr;
+ memcpy(buf_cmd + CDB_REGION, scsi_cmnd->cmnd,
+ scsi_cmnd->cmd_len);
} else {
- buf_cmd[10] = tmf->tmf;
+ buf_cmd[TMF_REGION] = tmf->tmf;
switch (tmf->tmf) {
case TMF_ABORT_TASK:
case TMF_QUERY_TASK:
- buf_cmd[12] =
+ buf_cmd[TAG_MSB] =
(tmf->tag_of_task_to_be_managed >> 8) & 0xff;
- buf_cmd[13] =
+ buf_cmd[TAG_LSB] =
tmf->tag_of_task_to_be_managed & 0xff;
break;
default:
@@ -1375,7 +1420,8 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
unsigned int interval = scsi_prot_interval(scsi_cmnd);
unsigned int ilog2_interval = ilog2(interval);
- len = (task->total_xfer_len >> ilog2_interval) * 8;
+ len = (task->total_xfer_len >> ilog2_interval) *
+ BYTE_TO_DDW;
}
}
@@ -1395,6 +1441,7 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev = device->lldd_dev;
dma_addr_t req_dma_addr;
unsigned int req_len;
+ u32 cfl;
/* req */
sg_req = &task->smp_task.smp_req;
@@ -1405,7 +1452,7 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
/* dw0 */
hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
(1 << CMD_HDR_PRIORITY_OFF) | /* high pri */
- (2 << CMD_HDR_CMD_OFF)); /* smp */
+ (SMP_FRAME_TYPE << CMD_HDR_CMD_OFF)); /* smp */
/* map itct entry */
hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) |
@@ -1413,8 +1460,9 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
(DIR_NO_DATA << CMD_HDR_DIR_OFF));
/* dw2 */
- hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) |
- (HISI_SAS_MAX_SMP_RESP_SZ / 4 <<
+ cfl = (req_len - SMP_CRC_SIZE) / BYTE_TO_DW;
+ hdr->dw2 = cpu_to_le32((cfl << CMD_HDR_CFL_OFF) |
+ (HISI_SAS_MAX_SMP_RESP_SZ / BYTE_TO_DW <<
CMD_HDR_MRFL_OFF));
hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
@@ -1479,12 +1527,13 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
struct ata_queued_cmd *qc = task->uldd_task;
hdr_tag = qc->tag;
- task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+ task->ata_task.fis.sector_count |=
+ (u8)(hdr_tag << HDR_TAG_OFF);
dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
}
- dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF |
- 2 << CMD_HDR_SG_MOD_OFF;
+ dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / BYTE_TO_DW) << CMD_HDR_CFL_OFF |
+ HDR_SG_MOD << CMD_HDR_SG_MOD_OFF;
hdr->dw2 = cpu_to_le32(dw2);
/* dw3 */
@@ -1544,9 +1593,9 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
- port_id = (port_id >> (4 * phy_no)) & 0xf;
+ port_id = (port_id >> (HISI_SAS_REG_MEM_SIZE * phy_no)) & 0xf;
link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
- link_rate = (link_rate >> (phy_no * 4)) & 0xf;
+ link_rate = (link_rate >> (phy_no * HISI_SAS_REG_MEM_SIZE)) & 0xf;
if (port_id == 0xf) {
dev_err(dev, "phyup: phy%d invalid portid\n", phy_no);
@@ -1579,8 +1628,8 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
sas_phy->oob_mode = SATA_OOB_MODE;
attached_sas_addr[0] = 0x50;
- attached_sas_addr[6] = shost->host_no;
- attached_sas_addr[7] = phy_no;
+ attached_sas_addr[HOST_NO_OFF] = shost->host_no;
+ attached_sas_addr[PHY_NO_OFF] = phy_no;
memcpy(sas_phy->attached_sas_addr,
attached_sas_addr,
SAS_ADDR_SIZE);
@@ -1596,7 +1645,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
(struct sas_identify_frame *)frame_rcvd;
dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < IDENTIFY_REG_READ; i++) {
u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no,
RX_IDAF_DWORD0 + (i * 4));
frame_rcvd[i] = __swab32(idaf);
@@ -1701,7 +1750,7 @@ static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p)
irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
& 0x11111111;
while (irq_msk) {
- if (irq_msk & 1) {
+ if (irq_msk & 1) {
u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no,
CHL_INT0);
u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
@@ -1866,7 +1915,7 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
phy_no, reg_value);
- if (reg_value & BIT(4))
+ if (reg_value & BIT(LINK_RESET_TIMEOUT_OFF))
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
}
@@ -1924,8 +1973,7 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
u32 irq_msk;
int phy_no = 0;
- irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
- & CHNL_INT_STS_MSK;
+ irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) & CHNL_INT_STS_MSK;
while (irq_msk) {
if (irq_msk & (CHNL_INT_STS_INT0_MSK << (phy_no * CHNL_WIDTH)))
@@ -2570,7 +2618,6 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
if (vectors < 0)
return -ENOENT;
-
hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW - hisi_hba->iopoll_q_cnt;
shost->nr_hw_queues = hisi_hba->cq_nvecs + hisi_hba->iopoll_q_cnt;
@@ -2583,7 +2630,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
struct pci_dev *pdev = hisi_hba->pci_dev;
int rc, i;
- rc = devm_request_irq(dev, pci_irq_vector(pdev, 1),
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX),
int_phy_up_down_bcast_v3_hw, 0,
DRV_NAME " phy", hisi_hba);
if (rc) {
@@ -2591,7 +2638,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
return -ENOENT;
}
- rc = devm_request_irq(dev, pci_irq_vector(pdev, 2),
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_CHL_INDEX),
int_chnl_int_v3_hw, 0,
DRV_NAME " channel", hisi_hba);
if (rc) {
@@ -2599,7 +2646,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
return -ENOENT;
}
- rc = devm_request_irq(dev, pci_irq_vector(pdev, 11),
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_AXI_INDEX),
fatal_axi_int_v3_hw, 0,
DRV_NAME " fatal", hisi_hba);
if (rc) {
@@ -2612,7 +2659,8 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
- int nr = hisi_sas_intr_conv ? 16 : 16 + i;
+ int nr = hisi_sas_intr_conv ? BASE_VECTORS_V3_HW :
+ BASE_VECTORS_V3_HW + i;
unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED :
IRQF_ONESHOT;
@@ -2670,14 +2718,14 @@ static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba)
struct pci_dev *pdev = hisi_hba->pci_dev;
int i;
- synchronize_irq(pci_irq_vector(pdev, 1));
- synchronize_irq(pci_irq_vector(pdev, 2));
- synchronize_irq(pci_irq_vector(pdev, 11));
+ synchronize_irq(pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX));
+ synchronize_irq(pci_irq_vector(pdev, IRQ_CHL_INDEX));
+ synchronize_irq(pci_irq_vector(pdev, IRQ_AXI_INDEX));
for (i = 0; i < hisi_hba->queue_count; i++)
hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1);
for (i = 0; i < hisi_hba->cq_nvecs; i++)
- synchronize_irq(pci_irq_vector(pdev, i + 16));
+ synchronize_irq(pci_irq_vector(pdev, i + BASE_VECTORS_V3_HW));
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff);
@@ -2709,7 +2757,7 @@ static int disable_host_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_stop_phys(hisi_hba);
- mdelay(10);
+ mdelay(HISI_SAS_DELAY_FOR_PHY_DISABLE);
reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
AM_CTRL_GLOBAL);
@@ -2846,13 +2894,13 @@ static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev,
u32 intr_coal_ticks;
int ret;
- ret = kstrtou32(buf, 10, &intr_coal_ticks);
+ ret = kstrtou32(buf, DECIMALISM_FLAG, &intr_coal_ticks);
if (ret) {
dev_err(dev, "Input data of interrupt coalesce unmatch\n");
return -EINVAL;
}
- if (intr_coal_ticks >= BIT(24)) {
+ if (intr_coal_ticks >= BIT(TICKS_BIT_INDEX)) {
dev_err(dev, "intr_coal_ticks must be less than 2^24!\n");
return -EINVAL;
}
@@ -2885,13 +2933,13 @@ static ssize_t intr_coal_count_v3_hw_store(struct device *dev,
u32 intr_coal_count;
int ret;
- ret = kstrtou32(buf, 10, &intr_coal_count);
+ ret = kstrtou32(buf, DECIMALISM_FLAG, &intr_coal_count);
if (ret) {
dev_err(dev, "Input data of interrupt coalesce unmatch\n");
return -EINVAL;
}
- if (intr_coal_count >= BIT(8)) {
+ if (intr_coal_count >= BIT(COUNT_BIT_INDEX)) {
dev_err(dev, "intr_coal_count must be less than 2^8!\n");
return -EINVAL;
}
@@ -3023,7 +3071,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = {
static const struct hisi_sas_debugfs_reg debugfs_port_reg = {
.lu = debugfs_port_reg_lu,
- .count = 0x100,
+ .count = PORT_REG_LENGTH,
.base_off = PORT_BASE,
};
@@ -3097,7 +3145,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = {
static const struct hisi_sas_debugfs_reg debugfs_global_reg = {
.lu = debugfs_global_reg_lu,
- .count = 0x800,
+ .count = GLOBAL_REG_LENGTH,
};
static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = {
@@ -3110,7 +3158,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = {
static const struct hisi_sas_debugfs_reg debugfs_axi_reg = {
.lu = debugfs_axi_reg_lu,
- .count = 0x61,
+ .count = AXI_REG_LENGTH,
.base_off = AXI_MASTER_CFG_BASE,
};
@@ -3127,7 +3175,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = {
static const struct hisi_sas_debugfs_reg debugfs_ras_reg = {
.lu = debugfs_ras_reg_lu,
- .count = 0x10,
+ .count = RAS_REG_LENGTH,
.base_off = RAS_BASE,
};
@@ -3136,7 +3184,7 @@ static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
struct Scsi_Host *shost = hisi_hba->shost;
scsi_block_requests(shost);
- wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000);
+ wait_cmds_complete_timeout_v3_hw(hisi_hba, WAIT_RETRY, WAIT_TMROUT);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
hisi_sas_sync_cqs(hisi_hba);
@@ -3177,7 +3225,7 @@ static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
return;
}
- memset(buf, 0, cache_dw_size * 4);
+ memset(buf, 0, cache_dw_size * BYTE_TO_DW);
buf[0] = val;
for (i = 1; i < cache_dw_size; i++)
@@ -3224,7 +3272,7 @@ static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba)
reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
/* init OOB link rate as 1.5 Gbits */
reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK;
- reg_val |= (0x8 << CFG_PROG_OOB_PHY_LINK_RATE_OFF);
+ reg_val |= (SAS_LINK_RATE_1_5_GBPS << CFG_PROG_OOB_PHY_LINK_RATE_OFF);
hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, reg_val);
/* enable PHY */
@@ -3233,6 +3281,9 @@ static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba)
#define SAS_PHY_BIST_CODE_INIT 0x1
#define SAS_PHY_BIST_CODE1_INIT 0X80
+#define SAS_PHY_BIST_INIT_DELAY 100
+#define SAS_PHY_BIST_LOOP_TEST_0 1
+#define SAS_PHY_BIST_LOOP_TEST_1 2
static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
{
u32 reg_val, mode_tmp;
@@ -3251,12 +3302,13 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
ffe[FFE_SATA_1_5_GBPS], ffe[FFE_SATA_3_0_GBPS],
ffe[FFE_SATA_6_0_GBPS], fix_code[FIXED_CODE],
fix_code[FIXED_CODE_1]);
- mode_tmp = path_mode ? 2 : 1;
+ mode_tmp = path_mode ? SAS_PHY_BIST_LOOP_TEST_1 :
+ SAS_PHY_BIST_LOOP_TEST_0;
if (enable) {
/* some preparations before bist test */
hisi_sas_bist_test_prep_v3_hw(hisi_hba);
- /* set linkrate of bit test*/
+ /* set linkrate of bit test */
reg_val = hisi_sas_phy_read32(hisi_hba, phy_no,
PROG_PHY_LINK_RATE);
reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK;
@@ -3294,13 +3346,13 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
SAS_PHY_BIST_CODE1_INIT);
}
- mdelay(100);
+ mdelay(SAS_PHY_BIST_INIT_DELAY);
reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL,
reg_val);
/* clear error bit */
- mdelay(100);
+ mdelay(SAS_PHY_BIST_INIT_DELAY);
hisi_sas_phy_read32(hisi_hba, phy_no, SAS_BIST_ERR_CNT);
} else {
/* disable bist test and recover it */
@@ -3354,7 +3406,7 @@ static const struct scsi_host_template sht_v3_hw = {
.shost_groups = host_v3_hw_groups,
.sdev_groups = sdev_groups_v3_hw,
.tag_alloc_policy_rr = true,
- .host_reset = hisi_sas_host_reset,
+ .host_reset = hisi_sas_host_reset,
.host_tagset = 1,
.mq_poll = queue_complete_v3_hw,
};
@@ -3496,7 +3548,7 @@ static void debugfs_snapshot_port_reg_v3_hw(struct hisi_hba *hisi_hba)
for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data;
for (i = 0; i < port->count; i++, databuf++) {
- offset = port->base_off + 4 * i;
+ offset = port->base_off + HISI_SAS_REG_MEM_SIZE * i;
*databuf = hisi_sas_phy_read32(hisi_hba, phy_cnt,
offset);
}
@@ -3510,7 +3562,8 @@ static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba *hisi_hba)
int i;
for (i = 0; i < debugfs_global_reg.count; i++, databuf++)
- *databuf = hisi_sas_read32(hisi_hba, 4 * i);
+ *databuf = hisi_sas_read32(hisi_hba,
+ HISI_SAS_REG_MEM_SIZE * i);
}
static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba *hisi_hba)
@@ -3521,7 +3574,9 @@ static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba *hisi_hba)
int i;
for (i = 0; i < axi->count; i++, databuf++)
- *databuf = hisi_sas_read32(hisi_hba, 4 * i + axi->base_off);
+ *databuf = hisi_sas_read32(hisi_hba,
+ HISI_SAS_REG_MEM_SIZE * i +
+ axi->base_off);
}
static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba *hisi_hba)
@@ -3532,7 +3587,9 @@ static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba *hisi_hba)
int i;
for (i = 0; i < ras->count; i++, databuf++)
- *databuf = hisi_sas_read32(hisi_hba, 4 * i + ras->base_off);
+ *databuf = hisi_sas_read32(hisi_hba,
+ HISI_SAS_REG_MEM_SIZE * i +
+ ras->base_off);
}
static void debugfs_snapshot_itct_reg_v3_hw(struct hisi_hba *hisi_hba)
@@ -3595,12 +3652,11 @@ static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s,
int i;
for (i = 0; i < reg->count; i++) {
- int off = i * 4;
+ int off = i * HISI_SAS_REG_MEM_SIZE;
const char *name;
name = debugfs_to_reg_name_v3_hw(off, reg->base_off,
reg->lu);
-
if (name)
seq_printf(s, "0x%08x 0x%08x %s\n", off,
regs_val[i], name);
@@ -3673,9 +3729,9 @@ static void debugfs_show_row_64_v3_hw(struct seq_file *s, int index,
/* completion header size not fixed per HW version */
seq_printf(s, "index %04d:\n\t", index);
- for (i = 1; i <= sz / 8; i++, ptr++) {
+ for (i = 1; i <= sz / BYTE_TO_DDW; i++, ptr++) {
seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr));
- if (!(i % 2))
+ if (!(i % TWO_PARA_PER_LINE))
seq_puts(s, "\n\t");
}
@@ -3689,9 +3745,9 @@ static void debugfs_show_row_32_v3_hw(struct seq_file *s, int index,
/* completion header size not fixed per HW version */
seq_printf(s, "index %04d:\n\t", index);
- for (i = 1; i <= sz / 4; i++, ptr++) {
+ for (i = 1; i <= sz / BYTE_TO_DW; i++, ptr++) {
seq_printf(s, " 0x%08x", le32_to_cpu(*ptr));
- if (!(i % 4))
+ if (!(i % FOUR_PARA_PER_LINE))
seq_puts(s, "\n\t");
}
seq_puts(s, "\n");
@@ -3776,7 +3832,7 @@ static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p)
struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private;
struct hisi_sas_iost_itct_cache *iost_cache =
debugfs_iost_cache->cache;
- u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
+ u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * BYTE_TO_DW;
int i, tab_idx;
__le64 *iost;
@@ -3824,7 +3880,7 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p)
struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private;
struct hisi_sas_iost_itct_cache *itct_cache =
debugfs_itct_cache->cache;
- u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
+ u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * BYTE_TO_DW;
int i, tab_idx;
__le64 *itct;
@@ -3853,12 +3909,12 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index)
u64 *debugfs_timestamp;
struct dentry *dump_dentry;
struct dentry *dentry;
- char name[256];
+ char name[NAME_BUF_SIZE];
int p;
int c;
int d;
- snprintf(name, 256, "%d", index);
+ snprintf(name, NAME_BUF_SIZE, "%d", index);
dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry);
@@ -3874,7 +3930,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index)
/* Create port dir and files */
dentry = debugfs_create_dir("port", dump_dentry);
for (p = 0; p < hisi_hba->n_phy; p++) {
- snprintf(name, 256, "%d", p);
+ snprintf(name, NAME_BUF_SIZE, "%d", p);
debugfs_create_file(name, 0400, dentry,
&hisi_hba->debugfs_port_reg[index][p],
@@ -3884,7 +3940,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index)
/* Create CQ dir and files */
dentry = debugfs_create_dir("cq", dump_dentry);
for (c = 0; c < hisi_hba->queue_count; c++) {
- snprintf(name, 256, "%d", c);
+ snprintf(name, NAME_BUF_SIZE, "%d", c);
debugfs_create_file(name, 0400, dentry,
&hisi_hba->debugfs_cq[index][c],
@@ -3894,7 +3950,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index)
/* Create DQ dir and files */
dentry = debugfs_create_dir("dq", dump_dentry);
for (d = 0; d < hisi_hba->queue_count; d++) {
- snprintf(name, 256, "%d", d);
+ snprintf(name, NAME_BUF_SIZE, "%d", d);
debugfs_create_file(name, 0400, dentry,
&hisi_hba->debugfs_dq[index][d],
@@ -3931,9 +3987,9 @@ static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
size_t count, loff_t *ppos)
{
struct hisi_hba *hisi_hba = file->f_inode->i_private;
- char buf[8];
+ char buf[DUMP_BUF_SIZE];
- if (count > 8)
+ if (count > DUMP_BUF_SIZE)
return -EFAULT;
if (copy_from_user(buf, user_buf, count))
@@ -3997,7 +4053,7 @@ static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp,
{
struct seq_file *m = filp->private_data;
struct hisi_hba *hisi_hba = m->private;
- char kbuf[16] = {}, *pkbuf;
+ char kbuf[BIST_BUF_SIZE] = {}, *pkbuf;
bool found = false;
int i;
@@ -4014,7 +4070,7 @@ static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp,
for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) {
if (!strncmp(debugfs_loop_linkrate_v3_hw[i].name,
- pkbuf, 16)) {
+ pkbuf, BIST_BUF_SIZE)) {
hisi_hba->debugfs_bist_linkrate =
debugfs_loop_linkrate_v3_hw[i].value;
found = true;
@@ -4072,7 +4128,7 @@ static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp,
{
struct seq_file *m = filp->private_data;
struct hisi_hba *hisi_hba = m->private;
- char kbuf[16] = {}, *pkbuf;
+ char kbuf[BIST_BUF_SIZE] = {}, *pkbuf;
bool found = false;
int i;
@@ -4089,7 +4145,7 @@ static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp,
for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) {
if (!strncmp(debugfs_loop_code_mode_v3_hw[i].name,
- pkbuf, 16)) {
+ pkbuf, BIST_BUF_SIZE)) {
hisi_hba->debugfs_bist_code_mode =
debugfs_loop_code_mode_v3_hw[i].value;
found = true;
@@ -4204,7 +4260,7 @@ static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp,
{
struct seq_file *m = filp->private_data;
struct hisi_hba *hisi_hba = m->private;
- char kbuf[16] = {}, *pkbuf;
+ char kbuf[BIST_BUF_SIZE] = {}, *pkbuf;
bool found = false;
int i;
@@ -4220,7 +4276,8 @@ static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp,
pkbuf = strstrip(kbuf);
for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) {
- if (!strncmp(debugfs_loop_modes_v3_hw[i].name, pkbuf, 16)) {
+ if (!strncmp(debugfs_loop_modes_v3_hw[i].name, pkbuf,
+ BIST_BUF_SIZE)) {
hisi_hba->debugfs_bist_mode =
debugfs_loop_modes_v3_hw[i].value;
found = true;
@@ -4499,8 +4556,9 @@ static int debugfs_fifo_data_v3_hw_show(struct seq_file *s, void *p)
debugfs_read_fifo_data_v3_hw(phy);
- debugfs_show_row_32_v3_hw(s, 0, HISI_SAS_FIFO_DATA_DW_SIZE * 4,
- (__le32 *)phy->fifo.rd_data);
+ debugfs_show_row_32_v3_hw(s, 0,
+ HISI_SAS_FIFO_DATA_DW_SIZE * HISI_SAS_REG_MEM_SIZE,
+ (__le32 *)phy->fifo.rd_data);
return 0;
}
@@ -4632,14 +4690,14 @@ static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
struct hisi_sas_debugfs_regs *regs =
&hisi_hba->debugfs_regs[dump_index][r];
- sz = debugfs_reg_array_v3_hw[r]->count * 4;
+ sz = debugfs_reg_array_v3_hw[r]->count * HISI_SAS_REG_MEM_SIZE;
regs->data = devm_kmalloc(dev, sz, GFP_KERNEL);
if (!regs->data)
goto fail;
regs->hisi_hba = hisi_hba;
}
- sz = debugfs_port_reg.count * 4;
+ sz = debugfs_port_reg.count * HISI_SAS_REG_MEM_SIZE;
for (p = 0; p < hisi_hba->n_phy; p++) {
struct hisi_sas_debugfs_port *port =
&hisi_hba->debugfs_port_reg[dump_index][p];
@@ -4749,11 +4807,11 @@ static void debugfs_phy_down_cnt_init_v3_hw(struct hisi_hba *hisi_hba)
{
struct dentry *dir = debugfs_create_dir("phy_down_cnt",
hisi_hba->debugfs_dir);
- char name[16];
+ char name[NAME_BUF_SIZE];
int phy_no;
for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
- snprintf(name, 16, "%d", phy_no);
+ snprintf(name, NAME_BUF_SIZE, "%d", phy_no);
debugfs_create_file(name, 0600, dir,
&hisi_hba->phy[phy_no],
&debugfs_phy_down_cnt_v3_hw_fops);
@@ -4938,7 +4996,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_id = HISI_SAS_MAX_DEVICES;
shost->max_lun = ~0;
shost->max_channel = 1;
- shost->max_cmd_len = 16;
+ shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN;
shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
if (hisi_hba->iopoll_q_cnt)
@@ -5016,12 +5074,13 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
{
int i;
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 1), hisi_hba);
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 2), hisi_hba);
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 11), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_CHL_INDEX), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_AXI_INDEX), hisi_hba);
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
- int nr = hisi_sas_intr_conv ? 16 : 16 + i;
+ int nr = hisi_sas_intr_conv ? BASE_VECTORS_V3_HW :
+ BASE_VECTORS_V3_HW + i;
devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq);
}
@@ -5051,9 +5110,11 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
{
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct Scsi_Host *shost = hisi_hba->shost;
struct device *dev = hisi_hba->dev;
int rc;
+ wait_event(shost->host_wait, !scsi_host_in_recovery(shost));
dev_info(dev, "FLR prepare\n");
down(&hisi_hba->sem);
set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 4c493b06062a..862ab0fbc893 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1697,7 +1697,7 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
**/
static void ibmvfc_timeout(struct timer_list *t)
{
- struct ibmvfc_event *evt = from_timer(evt, t, timer);
+ struct ibmvfc_event *evt = timer_container_of(evt, t, timer);
struct ibmvfc_host *vhost = evt->vhost;
dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
ibmvfc_reset_host(vhost);
@@ -4630,7 +4630,7 @@ static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
**/
static void ibmvfc_adisc_timeout(struct timer_list *t)
{
- struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
+ struct ibmvfc_target *tgt = timer_container_of(tgt, t, timer);
struct ibmvfc_host *vhost = tgt->vhost;
struct ibmvfc_event *evt;
struct ibmvfc_tmf *tmf;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d65a45860b33..3d65a498b701 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -845,7 +845,8 @@ static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
*/
static void ibmvscsi_timeout(struct timer_list *t)
{
- struct srp_event_struct *evt_struct = from_timer(evt_struct, t, timer);
+ struct srp_event_struct *evt_struct = timer_container_of(evt_struct,
+ t, timer);
struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 1d4c7310f1a6..0821cf994b98 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1224,7 +1224,6 @@ static int __imm_attach(struct parport *pb)
host = scsi_host_alloc(&imm_template, sizeof(imm_struct *));
if (!host)
goto out1;
- host->no_highmem = true;
host->io_port = pb->base;
host->n_io_port = ports;
host->dma_channel = -1;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d89135fb8faa..b29bec6abd72 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2589,7 +2589,7 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
**/
static void ipr_timeout(struct timer_list *t)
{
- struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
+ struct ipr_cmnd *ipr_cmd = timer_container_of(ipr_cmd, t, timer);
unsigned long lock_flags = 0;
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
@@ -2622,7 +2622,7 @@ static void ipr_timeout(struct timer_list *t)
**/
static void ipr_oper_timeout(struct timer_list *t)
{
- struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
+ struct ipr_cmnd *ipr_cmd = timer_container_of(ipr_cmd, t, timer);
unsigned long lock_flags = 0;
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
@@ -5151,7 +5151,7 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
**/
static void ipr_abort_timeout(struct timer_list *t)
{
- struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
+ struct ipr_cmnd *ipr_cmd = timer_container_of(ipr_cmd, t, timer);
struct ipr_cmnd *reset_cmd;
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_cmd_pkt *cmd_pkt;
@@ -7476,7 +7476,7 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
**/
static void ipr_reset_timer_done(struct timer_list *t)
{
- struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
+ struct ipr_cmnd *ipr_cmd = timer_container_of(ipr_cmd, t, timer);
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
unsigned long lock_flags = 0;
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index c108b5b940c3..6d2f4c831df7 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -958,7 +958,7 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
static void phy_startup_timeout(struct timer_list *t)
{
- struct sci_timer *tmr = from_timer(tmr, t, timer);
+ struct sci_timer *tmr = timer_container_of(tmr, t, timer);
struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
unsigned long flags;
enum sci_status status;
@@ -1592,7 +1592,7 @@ static const struct sci_base_state sci_controller_state_table[] = {
static void controller_timeout(struct timer_list *t)
{
- struct sci_timer *tmr = from_timer(tmr, t, timer);
+ struct sci_timer *tmr = timer_container_of(tmr, t, timer);
struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
struct sci_base_state_machine *sm = &ihost->sm;
unsigned long flags;
@@ -1737,7 +1737,7 @@ static u8 max_spin_up(struct isci_host *ihost)
static void power_control_timeout(struct timer_list *t)
{
- struct sci_timer *tmr = from_timer(tmr, t, timer);
+ struct sci_timer *tmr = timer_container_of(tmr, t, timer);
struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
struct isci_phy *iphy;
unsigned long flags;
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 743a3c64b0da..88237ec8b15f 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -317,7 +317,7 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
static void phy_sata_timeout(struct timer_list *t)
{
- struct sci_timer *tmr = from_timer(tmr, t, timer);
+ struct sci_timer *tmr = timer_container_of(tmr, t, timer);
struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer);
struct isci_host *ihost = iphy->owning_port->owning_controller;
unsigned long flags;
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 1609aba1c9c1..10bd2aac2cb4 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -775,7 +775,7 @@ bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy)
static void port_timeout(struct timer_list *t)
{
- struct sci_timer *tmr = from_timer(tmr, t, timer);
+ struct sci_timer *tmr = timer_container_of(tmr, t, timer);
struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
struct isci_host *ihost = iport->owning_controller;
unsigned long flags;
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index c382a257b51b..3b4820defe63 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -321,7 +321,7 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
static void mpc_agent_timeout(struct timer_list *t)
{
u8 index;
- struct sci_timer *tmr = from_timer(tmr, t, timer);
+ struct sci_timer *tmr = timer_container_of(tmr, t, timer);
struct sci_port_configuration_agent *port_agent;
struct isci_host *ihost;
unsigned long flags;
@@ -659,7 +659,7 @@ static void sci_apc_agent_link_down(
static void apc_agent_timeout(struct timer_list *t)
{
u32 index;
- struct sci_timer *tmr = from_timer(tmr, t, timer);
+ struct sci_timer *tmr = timer_container_of(tmr, t, timer);
struct sci_port_configuration_agent *port_agent;
struct isci_host *ihost;
unsigned long flags;
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 287e1ba8ddd7..82deb6a83a8c 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -392,36 +392,6 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
}
}
-enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
-{
- struct sci_base_state_machine *sm = &idev->sm;
- enum sci_remote_device_states state = sm->current_state_id;
-
- switch (state) {
- case SCI_DEV_INITIAL:
- case SCI_DEV_STOPPED:
- case SCI_DEV_STARTING:
- case SCI_SMP_DEV_IDLE:
- case SCI_SMP_DEV_CMD:
- case SCI_DEV_STOPPING:
- case SCI_DEV_FAILED:
- case SCI_DEV_RESETTING:
- case SCI_DEV_FINAL:
- default:
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
- __func__, dev_state_name(state));
- return SCI_FAILURE_INVALID_STATE;
- case SCI_DEV_READY:
- case SCI_STP_DEV_IDLE:
- case SCI_STP_DEV_CMD:
- case SCI_STP_DEV_NCQ:
- case SCI_STP_DEV_NCQ_ERROR:
- case SCI_STP_DEV_AWAIT_RESET:
- sci_change_state(sm, SCI_DEV_RESETTING);
- return SCI_SUCCESS;
- }
-}
-
enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
u32 frame_index)
{
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 561ae3f2cbbd..c1fdf45751cd 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -160,21 +160,6 @@ enum sci_status sci_remote_device_stop(
u32 timeout);
/**
- * sci_remote_device_reset() - This method will reset the device making it
- * ready for operation. This method must be called anytime the device is
- * reset either through a SMP phy control or a port hard reset request.
- * @remote_device: This parameter specifies the device to be reset.
- *
- * This method does not actually cause the device hardware to be reset. This
- * method resets the software object so that it will be operational after a
- * device hardware reset completes. An indication of whether the device reset
- * was accepted. SCI_SUCCESS This value is returned if the device reset is
- * started.
- */
-enum sci_status sci_remote_device_reset(
- struct isci_remote_device *idev);
-
-/**
* enum sci_remote_device_states - This enumeration depicts all the states
* for the common remote device state machine.
* @SCI_DEV_INITIAL: Simply the initial state for the base remote device
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index e705c30b4e1b..16d0f02af1e4 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1283,7 +1283,7 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
*/
static void fc_lun_reset_send(struct timer_list *t)
{
- struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer);
+ struct fc_fcp_pkt *fsp = timer_container_of(fsp, t, timer);
struct fc_lport *lport = fsp->lp;
if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) {
@@ -1416,7 +1416,7 @@ static void fc_fcp_cleanup(struct fc_lport *lport)
*/
static void fc_fcp_timeout(struct timer_list *t)
{
- struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer);
+ struct fc_fcp_pkt *fsp = timer_container_of(fsp, t, timer);
struct fc_rport *rport = fsp->rport;
struct fc_rport_libfc_priv *rpriv = rport->dd_data;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 1ddaf7228340..392d57e054db 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1898,7 +1898,8 @@ EXPORT_SYMBOL_GPL(iscsi_target_alloc);
static void iscsi_tmf_timedout(struct timer_list *t)
{
- struct iscsi_session *session = from_timer(session, t, tmf_timer);
+ struct iscsi_session *session = timer_container_of(session, t,
+ tmf_timer);
spin_lock(&session->frwd_lock);
if (session->tmf_state == TMF_QUEUED) {
@@ -2240,7 +2241,7 @@ EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
static void iscsi_check_transport_timeouts(struct timer_list *t)
{
- struct iscsi_conn *conn = from_timer(conn, t, transport_timer);
+ struct iscsi_conn *conn = timer_container_of(conn, t, transport_timer);
struct iscsi_session *session = conn->session;
unsigned long recv_timeout, next_timeout = 0, last_recv;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index feb2461b90e8..928723c90b75 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -865,7 +865,7 @@ void sas_task_internal_done(struct sas_task *task)
void sas_task_internal_timedout(struct timer_list *t)
{
- struct sas_task_slow *slow = from_timer(slow, t, timer);
+ struct sas_task_slow *slow = timer_container_of(slow, t, timer);
struct sas_task *task = slow->task;
bool is_completed = true;
unsigned long flags;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 397216ff2c7e..54ee8ecec3b3 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -291,6 +291,138 @@ buffer_done:
return len;
}
+static ssize_t
+lpfc_vmid_info_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_vmid *vmp;
+ int len = 0, i, j, k, cpu;
+ char hxstr[LPFC_MAX_VMID_SIZE * 3] = {0};
+ struct timespec64 curr_tm;
+ struct lpfc_vmid_priority_range *vr;
+ u64 *lta, rct_acc = 0, max_lta = 0;
+ struct tm tm_val;
+
+ ktime_get_ts64(&curr_tm);
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "Key 'vmid':\n");
+
+ /* if enabled continue, else return */
+ if (lpfc_is_vmid_enabled(phba)) {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "lpfc VMID Page: ON\n\n");
+ } else {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "lpfc VMID Page: OFF\n\n");
+ return len;
+ }
+
+ /* if using priority tagging */
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "VMID priority ranges:\n");
+ vr = vport->vmid_priority.vmid_range;
+ for (i = 0; i < vport->vmid_priority.num_descriptors; ++i) {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "\t[x%x - x%x], qos: x%x\n",
+ vr->low, vr->high, vr->qos);
+ vr++;
+ }
+ }
+
+ for (i = 0; i < phba->cfg_max_vmid; i++) {
+ vmp = &vport->vmid[i];
+ max_lta = 0;
+
+ /* only if the slot is used */
+ if (!(vmp->flag & LPFC_VMID_SLOT_USED) ||
+ !(vmp->flag & LPFC_VMID_REGISTERED))
+ continue;
+
+ /* if using priority tagging */
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "VEM ID: %02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ vport->lpfc_vmid_host_uuid[0],
+ vport->lpfc_vmid_host_uuid[1],
+ vport->lpfc_vmid_host_uuid[2],
+ vport->lpfc_vmid_host_uuid[3],
+ vport->lpfc_vmid_host_uuid[4],
+ vport->lpfc_vmid_host_uuid[5],
+ vport->lpfc_vmid_host_uuid[6],
+ vport->lpfc_vmid_host_uuid[7],
+ vport->lpfc_vmid_host_uuid[8],
+ vport->lpfc_vmid_host_uuid[9],
+ vport->lpfc_vmid_host_uuid[10],
+ vport->lpfc_vmid_host_uuid[11],
+ vport->lpfc_vmid_host_uuid[12],
+ vport->lpfc_vmid_host_uuid[13],
+ vport->lpfc_vmid_host_uuid[14],
+ vport->lpfc_vmid_host_uuid[15]);
+ }
+
+ /* IO stats */
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "ID00 READs:%llx WRITEs:%llx\n",
+ vmp->io_rd_cnt,
+ vmp->io_wr_cnt);
+ for (j = 0, k = 0; j < strlen(vmp->host_vmid); j++, k += 3)
+ sprintf((char *)(hxstr + k), "%2x ", vmp->host_vmid[j]);
+ /* UUIDs */
+ len += scnprintf(buf + len, PAGE_SIZE - len, "UUID:\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n", hxstr);
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "String (%s)\n",
+ vmp->host_vmid);
+
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "CS_CTL VMID: 0x%x\n",
+ vmp->un.cs_ctl_vmid);
+ else
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "Application id: 0x%x\n",
+ vmp->un.app_id);
+
+ /* calculate the last access time */
+ for_each_possible_cpu(cpu) {
+ lta = per_cpu_ptr(vmp->last_io_time, cpu);
+ if (!lta)
+ continue;
+
+ /* if last access time is less than timeout */
+ if (time_after((unsigned long)*lta, jiffies))
+ continue;
+
+ if (*lta > max_lta)
+ max_lta = *lta;
+ }
+
+ rct_acc = jiffies_to_msecs(jiffies - max_lta) / 1000;
+ /* current time */
+ time64_to_tm(ktime_get_real_seconds(),
+ -(sys_tz.tz_minuteswest * 60) - rct_acc, &tm_val);
+
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "Last Access Time :"
+ "%ld-%d-%dT%02d:%02d:%02d\n\n",
+ 1900 + tm_val.tm_year, tm_val.tm_mon + 1,
+ tm_val.tm_mday, tm_val.tm_hour,
+ tm_val.tm_min, tm_val.tm_sec);
+
+ if (len >= PAGE_SIZE)
+ return len;
+
+ memset(hxstr, 0, LPFC_MAX_VMID_SIZE * 3);
+ }
+ return len;
+}
+
/**
* lpfc_drvr_version_show - Return the Emulex driver string with version number
* @dev: class unused variable.
@@ -3011,6 +3143,7 @@ static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
NULL);
static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL);
+static DEVICE_ATTR_RO(lpfc_vmid_info);
#define WWN_SZ 8
/**
@@ -6117,6 +6250,7 @@ static struct attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_vmid_inactivity_timeout.attr,
&dev_attr_lpfc_vmid_app_header.attr,
&dev_attr_lpfc_vmid_priority_tagging.attr,
+ &dev_attr_lpfc_vmid_info.attr,
NULL,
};
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index c8f8496bbdf8..d61d979f9b77 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2687,8 +2687,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
evt->wait_time_stamp = jiffies;
time_left = wait_event_interruptible_timeout(
evt->wq, !list_empty(&evt->events_to_see),
- msecs_to_jiffies(1000 *
- ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
+ secs_to_jiffies(phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT));
if (list_empty(&evt->events_to_see))
ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
else {
@@ -3258,8 +3257,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
evt->waiting = 1;
time_left = wait_event_interruptible_timeout(
evt->wq, !list_empty(&evt->events_to_see),
- msecs_to_jiffies(1000 *
- ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
+ secs_to_jiffies(phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT));
evt->waiting = 0;
if (list_empty(&evt->events_to_see)) {
rc = (time_left) ? -EINTR : -ETIMEDOUT;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 12c67cdd7c19..530dddd39bab 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -3433,7 +3433,8 @@ fdmi_cmd_exit:
void
lpfc_delayed_disc_tmo(struct timer_list *t)
{
- struct lpfc_vport *vport = from_timer(vport, t, delayed_disc_tmo);
+ struct lpfc_vport *vport = timer_container_of(vport, t,
+ delayed_disc_tmo);
struct lpfc_hba *phba = vport->phba;
uint32_t tmo_posted;
unsigned long iflag;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 375a879c31f1..b1a61eca8295 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -4378,7 +4378,8 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
void
lpfc_els_retry_delay(struct timer_list *t)
{
- struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc);
+ struct lpfc_nodelist *ndlp = timer_container_of(ndlp, t,
+ nlp_delayfunc);
struct lpfc_vport *vport = ndlp->vport;
struct lpfc_hba *phba = vport->phba;
unsigned long flags;
@@ -9385,7 +9386,7 @@ out:
void
lpfc_els_timeout(struct timer_list *t)
{
- struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc);
+ struct lpfc_vport *vport = timer_container_of(vport, t, els_tmofunc);
struct lpfc_hba *phba = vport->phba;
uint32_t tmo_posted;
unsigned long iflag;
@@ -11594,7 +11595,8 @@ err:
void
lpfc_fabric_block_timeout(struct timer_list *t)
{
- struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer);
+ struct lpfc_hba *phba = timer_container_of(phba, t,
+ fabric_block_timer);
unsigned long iflags;
uint32_t tmo_posted;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 179be6c5a43e..b88e54a7e65c 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -161,7 +161,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
struct lpfc_hba *phba;
struct lpfc_work_evt *evtp;
unsigned long iflags;
- bool nvme_reg = false;
+ bool drop_initial_node_ref = false;
ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
if (!ndlp)
@@ -188,8 +188,13 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->rport = NULL;
- if (ndlp->fc4_xpt_flags & NVME_XPT_REGD)
- nvme_reg = true;
+ /* Only 1 thread can drop the initial node reference.
+ * If not registered for NVME and NLP_DROPPED flag is
+ * clear, remove the initial reference.
+ */
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
+ if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ drop_initial_node_ref = true;
/* The scsi_transport is done with the rport so lpfc cannot
* call to unregister.
@@ -200,13 +205,16 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
/* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node,
* unregister calls were made to the scsi and nvme
* transports and refcnt was already decremented. Clear
- * the NLP_XPT_REGD flag only if the NVME Rport is
+ * the NLP_XPT_REGD flag only if the NVME nrport is
* confirmed unregistered.
*/
- if (!nvme_reg && ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
- ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
spin_unlock_irqrestore(&ndlp->lock, iflags);
- lpfc_nlp_put(ndlp); /* may free ndlp */
+
+ /* Release scsi transport reference */
+ lpfc_nlp_put(ndlp);
} else {
spin_unlock_irqrestore(&ndlp->lock, iflags);
}
@@ -214,14 +222,8 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
spin_unlock_irqrestore(&ndlp->lock, iflags);
}
- /* Only 1 thread can drop the initial node reference. If
- * another thread has set NLP_DROPPED, this thread is done.
- */
- if (nvme_reg || test_bit(NLP_DROPPED, &ndlp->nlp_flag))
- return;
-
- set_bit(NLP_DROPPED, &ndlp->nlp_flag);
- lpfc_nlp_put(ndlp);
+ if (drop_initial_node_ref)
+ lpfc_nlp_put(ndlp);
return;
}
@@ -4695,9 +4697,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
vport->phba->nport_event_cnt++;
if (vport->phba->nvmet_support == 0) {
- /* Start devloss if target. */
- if (ndlp->nlp_type & NLP_NVME_TARGET)
- lpfc_nvme_unregister_port(vport, ndlp);
+ lpfc_nvme_unregister_port(vport, ndlp);
} else {
/* NVMET has no upcall. */
lpfc_nlp_put(ndlp);
@@ -5053,7 +5053,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
case CMD_GEN_REQUEST64_CR:
if (iocb->ndlp == ndlp)
return 1;
- fallthrough;
+ break;
case CMD_ELS_REQUEST64_CR:
if (remote_id == ndlp->nlp_DID)
return 1;
@@ -6059,7 +6059,7 @@ lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
void
lpfc_disc_timeout(struct timer_list *t)
{
- struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo);
+ struct lpfc_vport *vport = timer_container_of(vport, t, fc_disctmo);
struct lpfc_hba *phba = vport->phba;
uint32_t tmo_posted;
unsigned long flags = 0;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 90021653e59e..20fa450def03 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1196,7 +1196,7 @@ lpfc_hb_timeout(struct timer_list *t)
uint32_t tmo_posted;
unsigned long iflag;
- phba = from_timer(phba, t, hb_tmofunc);
+ phba = timer_container_of(phba, t, hb_tmofunc);
/* Check for heart beat timeout conditions */
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
@@ -1228,7 +1228,7 @@ lpfc_rrq_timeout(struct timer_list *t)
{
struct lpfc_hba *phba;
- phba = from_timer(phba, t, rrq_tmr);
+ phba = timer_container_of(phba, t, rrq_tmr);
if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
return;
@@ -1907,6 +1907,9 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
uint32_t intr_mode;
LPFC_MBOXQ_t *mboxq;
+ /* Notifying the transport that the targets are going offline. */
+ lpfc_scsi_dev_block(phba);
+
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2) {
/*
@@ -5128,7 +5131,7 @@ lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
static void
lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
{
- struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
+ struct lpfc_hba *phba = timer_container_of(phba, t, fcf.redisc_wait);
/* Don't send FCF rediscovery event if timer cancelled */
spin_lock_irq(&phba->hbalock);
@@ -5159,7 +5162,8 @@ lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
static void
lpfc_vmid_poll(struct timer_list *t)
{
- struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
+ struct lpfc_hba *phba = timer_container_of(phba, t,
+ inactive_vmid_poll);
u32 wake_up = 0;
/* check if there is a need to issue QFPA */
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index b1adb9f59097..a6647dd360d1 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -2508,7 +2508,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
"6031 RemotePort Registration failed "
"err: %d, DID x%06x ref %u\n",
ret, ndlp->nlp_DID, kref_read(&ndlp->kref));
- lpfc_nlp_put(ndlp);
+
+ /* Only release reference if one was taken for this request */
+ if (!oldrport)
+ lpfc_nlp_put(ndlp);
}
return ret;
@@ -2614,7 +2617,8 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* clear any rport state until the transport calls back.
*/
- if (ndlp->nlp_type & NLP_NVME_TARGET) {
+ if ((ndlp->nlp_type & NLP_NVME_TARGET) ||
+ (remoteport->port_role & FC_PORT_ROLE_NVME_TARGET)) {
/* No concern about the role change on the nvme remoteport.
* The transport will update it.
*/
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 9edf80b14b1a..8acb744febcd 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5190,7 +5190,7 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba)
**/
void lpfc_poll_timeout(struct timer_list *t)
{
- struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
+ struct lpfc_hba *phba = timer_container_of(phba, t, fcp_poll_timer);
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_handle_fast_ring_event(phba,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 6574f9e74476..47bbcb78fb4d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -3925,13 +3925,20 @@ void lpfc_poll_eratt(struct timer_list *t)
uint32_t eratt = 0;
uint64_t sli_intr, cnt;
- phba = from_timer(phba, t, eratt_poll);
- if (!test_bit(HBA_SETUP, &phba->hba_flag))
- return;
+ phba = timer_container_of(phba, t, eratt_poll);
if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
return;
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ !test_bit(HBA_SETUP, &phba->hba_flag)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "0663 HBA still initializing 0x%lx, restart "
+ "timer\n",
+ phba->hba_flag);
+ goto restart_timer;
+ }
+
/* Here we will also keep track of interrupts per sec of the hba */
sli_intr = phba->sli.slistat.sli_intr;
@@ -3950,13 +3957,16 @@ void lpfc_poll_eratt(struct timer_list *t)
/* Check chip HA register for error event */
eratt = lpfc_sli_check_eratt(phba);
- if (eratt)
+ if (eratt) {
/* Tell the worker thread there is work to do */
lpfc_worker_wake_up(phba);
- else
- /* Restart the timer for next eratt poll */
- mod_timer(&phba->eratt_poll,
- jiffies + secs_to_jiffies(phba->eratt_poll_interval));
+ return;
+ }
+
+restart_timer:
+ /* Restart the timer for next eratt poll */
+ mod_timer(&phba->eratt_poll,
+ jiffies + secs_to_jiffies(phba->eratt_poll_interval));
return;
}
@@ -6003,9 +6013,9 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
- memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
- strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
+ memcpy(phba->BIOSVersion, cntl_attr->bios_ver_str,
sizeof(phba->BIOSVersion));
+ phba->BIOSVersion[sizeof(phba->BIOSVersion) - 1] = '\0';
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
@@ -9115,7 +9125,7 @@ out_free_mbox:
void
lpfc_mbox_timeout(struct timer_list *t)
{
- struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
+ struct lpfc_hba *phba = timer_container_of(phba, t, sli.mbox_tmo);
unsigned long iflag;
uint32_t tmo_posted;
@@ -15651,7 +15661,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
void lpfc_sli4_poll_hbtimer(struct timer_list *t)
{
- struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
+ struct lpfc_hba *phba = timer_container_of(phba, t, cpuhp_poll_timer);
struct lpfc_queue *eq;
rcu_read_lock();
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 638b50f35287..749688aa8a82 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.8"
+#define LPFC_DRIVER_VERSION "14.4.0.9"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index cc56a7334319..2797aa75a689 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -505,7 +505,7 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
wait_event_timeout(waitq,
!test_bit(NLP_WAIT_FOR_LOGO,
&ndlp->save_flags),
- msecs_to_jiffies(phba->fc_ratov * 2000));
+ secs_to_jiffies(phba->fc_ratov * 2));
if (!test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags))
goto logo_cmpl;
@@ -703,7 +703,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
wait_event_timeout(waitq,
!test_bit(NLP_WAIT_FOR_DA_ID,
&ndlp->save_flags),
- msecs_to_jiffies(phba->fc_ratov * 2000));
+ secs_to_jiffies(phba->fc_ratov * 2));
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index d533a8aa72cc..b610cad83321 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -3836,7 +3836,7 @@ megaraid_sysfs_get_ldmap_done(uioc_t *uioc)
static void
megaraid_sysfs_get_ldmap_timeout(struct timer_list *t)
{
- struct uioc_timeout *timeout = from_timer(timeout, t, timer);
+ struct uioc_timeout *timeout = timer_container_of(timeout, t, timer);
uioc_t *uioc = timeout->uioc;
adapter_t *adapter = (adapter_t *)uioc->buf_vaddr;
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
@@ -3952,7 +3952,7 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
timer_delete_sync(&timeout.timer);
- destroy_timer_on_stack(&timeout.timer);
+ timer_destroy_on_stack(&timeout.timer);
mutex_unlock(&raid_dev->sysfs_mtx);
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 1f2cd15e3361..87184e2538b0 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -704,7 +704,7 @@ lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
wait_event(wait_q, (kioc->status != -ENODATA));
if (timeout.timer.function) {
timer_delete_sync(&timeout.timer);
- destroy_timer_on_stack(&timeout.timer);
+ timer_destroy_on_stack(&timeout.timer);
}
/*
@@ -783,7 +783,7 @@ ioctl_done(uioc_t *kioc)
static void
lld_timedout(struct timer_list *t)
{
- struct uioc_timeout *timeout = from_timer(timeout, t, timer);
+ struct uioc_timeout *timeout = timer_container_of(timeout, t, timer);
uioc_t *kioc = timeout->uioc;
kioc->status = -ETIME;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 5e33d411fa3d..3aac0e17cb00 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2724,7 +2724,7 @@ out:
static void megasas_sriov_heartbeat_handler(struct timer_list *t)
{
struct megasas_instance *instance =
- from_timer(instance, t, sriov_heartbeat_timer);
+ timer_container_of(instance, t, sriov_heartbeat_timer);
if (instance->hb_host_mem->HB.fwCounter !=
instance->hb_host_mem->HB.driverCounter) {
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index c186b892150f..ce444efd859e 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -985,6 +985,10 @@ static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
goto out;
}
}
+ dprint_event_bh(mrioc,
+ "exposed target device with handle(0x%04x), perst_id(%d)\n",
+ tgtdev->dev_handle, perst_id);
+ goto out;
} else
mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev);
out:
@@ -1344,9 +1348,9 @@ static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
(struct mpi3_event_data_device_status_change *)fwevt->event_data;
dev_handle = le16_to_cpu(evtdata->dev_handle);
- ioc_info(mrioc,
- "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
- __func__, dev_handle, evtdata->reason_code);
+ dprint_event_bh(mrioc,
+ "processing device status change event bottom half for handle(0x%04x), rc(0x%02x)\n",
+ dev_handle, evtdata->reason_code);
switch (evtdata->reason_code) {
case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
delete = 1;
@@ -1365,8 +1369,13 @@ static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
}
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
- if (!tgtdev)
+ if (!tgtdev) {
+ dprint_event_bh(mrioc,
+ "processing device status change event bottom half,\n"
+ "cannot identify target device for handle(0x%04x), rc(0x%02x)\n",
+ dev_handle, evtdata->reason_code);
goto out;
+ }
if (uhide) {
tgtdev->is_hidden = 0;
if (!tgtdev->host_exposed)
@@ -1406,12 +1415,17 @@ static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
perst_id = le16_to_cpu(dev_pg0->persistent_id);
dev_handle = le16_to_cpu(dev_pg0->dev_handle);
- ioc_info(mrioc,
- "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
- __func__, dev_handle, perst_id);
+ dprint_event_bh(mrioc,
+ "processing device info change event bottom half for handle(0x%04x), perst_id(%d)\n",
+ dev_handle, perst_id);
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
- if (!tgtdev)
+ if (!tgtdev) {
+ dprint_event_bh(mrioc,
+ "cannot identify target device for device info\n"
+ "change event handle(0x%04x), perst_id(%d)\n",
+ dev_handle, perst_id);
goto out;
+ }
mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false);
if (!tgtdev->is_hidden && !tgtdev->host_exposed)
mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
@@ -2012,8 +2026,11 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
mpi3mr_fwevt_del_from_list(mrioc, fwevt);
mrioc->current_event = fwevt;
- if (mrioc->stop_drv_processing)
+ if (mrioc->stop_drv_processing) {
+ dprint_event_bh(mrioc, "ignoring event(0x%02x) in the bottom half handler\n"
+ "due to stop_drv_processing\n", fwevt->event_id);
goto out;
+ }
if (mrioc->unrecoverable) {
dprint_event_bh(mrioc,
@@ -2025,6 +2042,9 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
if (!fwevt->process_evt)
goto evt_ack;
+ dprint_event_bh(mrioc, "processing event(0x%02x) in the bottom half handler\n",
+ fwevt->event_id);
+
switch (fwevt->event_id) {
case MPI3_EVENT_DEVICE_ADDED:
{
@@ -2763,6 +2783,9 @@ static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
goto out;
dev_handle = le16_to_cpu(evtdata->dev_handle);
+ dprint_event_th(mrioc,
+ "device status change event top half with rc(0x%02x) for handle(0x%04x)\n",
+ evtdata->reason_code, dev_handle);
switch (evtdata->reason_code) {
case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
@@ -2786,8 +2809,12 @@ static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
}
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
- if (!tgtdev)
+ if (!tgtdev) {
+ dprint_event_th(mrioc,
+ "processing device status change event could not identify device for handle(0x%04x)\n",
+ dev_handle);
goto out;
+ }
if (hide)
tgtdev->is_hidden = hide;
if (tgtdev->starget && tgtdev->starget->hostdata) {
@@ -2863,13 +2890,13 @@ static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout);
if (shutdown_timeout <= 0) {
- ioc_warn(mrioc,
+ dprint_event_th(mrioc,
"%s :Invalid Shutdown Timeout received = %d\n",
__func__, shutdown_timeout);
return;
}
- ioc_info(mrioc,
+ dprint_event_th(mrioc,
"%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
__func__, mrioc->facts.shutdown_timeout, shutdown_timeout);
mrioc->facts.shutdown_timeout = shutdown_timeout;
@@ -2945,9 +2972,9 @@ void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc)
* @mrioc: Adapter instance reference
* @event_reply: event data
*
- * Identify whteher the event has to handled and acknowledged
- * and either process the event in the tophalf and/or schedule a
- * bottom half through mpi3mr_fwevt_worker.
+ * Identifies whether the event has to be handled and acknowledged,
+ * and either processes the event in the top-half and/or schedule a
+ * bottom-half through mpi3mr_fwevt_worker().
*
* Return: Nothing
*/
@@ -2974,9 +3001,11 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
struct mpi3_device_page0 *dev_pg0 =
(struct mpi3_device_page0 *)event_reply->event_data;
if (mpi3mr_create_tgtdev(mrioc, dev_pg0))
- ioc_err(mrioc,
- "%s :Failed to add device in the device add event\n",
- __func__);
+ dprint_event_th(mrioc,
+ "failed to process device added event for handle(0x%04x),\n"
+ "perst_id(%d) in the event top half handler\n",
+ le16_to_cpu(dev_pg0->dev_handle),
+ le16_to_cpu(dev_pg0->persistent_id));
else
process_evt_bh = 1;
break;
@@ -3039,11 +3068,15 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
break;
}
if (process_evt_bh || ack_req) {
+ dprint_event_th(mrioc,
+ "scheduling bottom half handler for event(0x%02x),ack_required=%d\n",
+ evt_type, ack_req);
sz = event_reply->event_data_length * 4;
fwevt = mpi3mr_alloc_fwevt(sz);
if (!fwevt) {
- ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n",
- __func__, __FILE__, __LINE__, __func__);
+ dprint_event_th(mrioc,
+ "failed to schedule bottom half handler for\n"
+ "event(0x%02x), ack_required=%d\n", evt_type, ack_req);
return;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 063b10dd8251..02fc204b9bf7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -2869,8 +2869,9 @@ _ctl_get_mpt_mctp_passthru_adapter(int dev_index)
if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) {
if (count == dev_index) {
spin_unlock(&gioc_lock);
- return 0;
+ return ioc;
}
+ count++;
}
}
spin_unlock(&gioc_lock);
diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h
index c25a5dfe7889..749f616b21af 100644
--- a/drivers/scsi/mvsas/mv_64xx.h
+++ b/drivers/scsi/mvsas/mv_64xx.h
@@ -101,8 +101,8 @@ enum sas_sata_vsp_regs {
VSR_PHY_MODE9 = 0x09, /* Test */
VSR_PHY_MODE10 = 0x0A, /* Power */
VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
- VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
- VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
+ VSR_PHY_VS0 = 0x0C, /* Vendor Specific 0 */
+ VSR_PHY_VS1 = 0x0D, /* Vendor Specific 1 */
};
enum chip_register_bits {
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index 8ef174cd4d37..3e4124177b2a 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -215,7 +215,7 @@ enum hw_register_bits {
/* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
- PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */
+ PHYEV_DCDR_ERR = (1U << 23), /* STP Decoder Error */
PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */
PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
PHYEV_AN = (1U << 18), /* SATA async notification */
@@ -347,7 +347,7 @@ enum sas_cmd_port_registers {
CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
- CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
+ CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memory BIST Status */
CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 52ac10226cb0..6c46654b9cd9 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1755,7 +1755,7 @@ static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
static void mvs_sig_time_out(struct timer_list *t)
{
- struct mvs_phy *phy = from_timer(phy, t, timer);
+ struct mvs_phy *phy = timer_container_of(phy, t, timer);
struct mvs_info *mvi = phy->mvi;
u8 phy_no;
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 14ac81ec0aa0..34ba9b137789 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -7923,7 +7923,7 @@ irqreturn_t ncr53c8xx_intr(int irq, void *dev_id)
static void ncr53c8xx_timeout(struct timer_list *t)
{
- struct ncb *np = from_timer(np, t, timer);
+ struct ncb *np = timer_container_of(np, t, timer);
unsigned long flags;
struct scsi_cmnd *done_list;
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 85ff95c6543a..7618f9cc9986 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -644,7 +644,7 @@ static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL);
#define FLASH_CMD_SET_NVMD 0x02
struct flash_command {
- u8 command[8];
+ u8 command[8] __nonstring;
int code;
};
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index e0aeb206df8d..33f403e307eb 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -544,7 +544,7 @@ static void pmcraid_ioa_reset(struct pmcraid_cmd *);
*/
static void pmcraid_bist_done(struct timer_list *t)
{
- struct pmcraid_cmd *cmd = from_timer(cmd, t, timer);
+ struct pmcraid_cmd *cmd = timer_container_of(cmd, t, timer);
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long lock_flags;
int rc;
@@ -601,7 +601,7 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
*/
static void pmcraid_reset_alert_done(struct timer_list *t)
{
- struct pmcraid_cmd *cmd = from_timer(cmd, t, timer);
+ struct pmcraid_cmd *cmd = timer_container_of(cmd, t, timer);
struct pmcraid_instance *pinstance = cmd->drv_inst;
u32 status = ioread32(pinstance->ioa_status);
unsigned long lock_flags;
@@ -685,7 +685,7 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
*/
static void pmcraid_timeout_handler(struct timer_list *t)
{
- struct pmcraid_cmd *cmd = from_timer(cmd, t, timer);
+ struct pmcraid_cmd *cmd = timer_container_of(cmd, t, timer);
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long lock_flags;
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index a06329b47851..1ed3171f1797 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -1104,7 +1104,6 @@ static int __ppa_attach(struct parport *pb)
host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *));
if (!host)
goto out1;
- host->no_highmem = true;
host->io_port = pb->base;
host->n_io_port = ports;
host->dma_channel = -1;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 436bd29d5eba..6b1ebab36fa3 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -699,7 +699,7 @@ static u32 qedf_get_login_failures(void *cookie)
}
static struct qed_fcoe_cb_ops qedf_cb_ops = {
- {
+ .common = {
.link_update = qedf_link_update,
.bw_update = qedf_bw_update,
.schedule_recovery_handler = qedf_schedule_recovery_handler,
diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c
index 2ebef4d20b5b..2f3e044b818f 100644
--- a/drivers/scsi/qedi/qedi_dbg.c
+++ b/drivers/scsi/qedi/qedi_dbg.c
@@ -103,25 +103,3 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
ret:
va_end(va);
}
-
-int
-qedi_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
-{
- int ret = 0;
-
- for (; iter->name; iter++) {
- ret = sysfs_create_bin_file(&shost->shost_gendev.kobj,
- iter->attr);
- if (ret)
- pr_err("Unable to create sysfs %s attr, err(%d).\n",
- iter->name, ret);
- }
- return ret;
-}
-
-void
-qedi_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
-{
- for (; iter->name; iter++)
- sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr);
-}
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
index 5a1ec4542183..864951865869 100644
--- a/drivers/scsi/qedi/qedi_dbg.h
+++ b/drivers/scsi/qedi/qedi_dbg.h
@@ -87,18 +87,6 @@ void qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
void qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
u32 info, const char *fmt, ...);
-struct Scsi_Host;
-
-struct sysfs_bin_attrs {
- char *name;
- const struct bin_attribute *attr;
-};
-
-int qedi_create_sysfs_attr(struct Scsi_Host *shost,
- struct sysfs_bin_attrs *iter);
-void qedi_remove_sysfs_attr(struct Scsi_Host *shost,
- struct sysfs_bin_attrs *iter);
-
/* DebugFS related code */
struct qedi_list_of_funcs {
char *oper_str;
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 772218445a56..5e10441f2e22 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -45,7 +45,6 @@ int qedi_iscsi_cleanup_task(struct iscsi_task *task,
void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd);
void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
struct qedi_cmd *qedi_cmd);
-void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
void qedi_process_iscsi_error(struct qedi_endpoint *ep,
struct iscsi_eqe_data *data);
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index e87885cc701c..b168bb2178e9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1877,14 +1877,6 @@ void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid)
WARN_ON(1);
}
-void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt)
-{
- *proto_itt = qedi->itt_map[tid].itt;
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
- "Get itt map tid [0x%x with proto itt[0x%x]",
- tid, *proto_itt);
-}
-
struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid)
{
struct qedi_cmd *cmd = NULL;
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 078a9c80bce2..6af018f1ca22 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -721,7 +721,7 @@ enum action {
static void qla1280_mailbox_timeout(struct timer_list *t)
{
- struct scsi_qla_host *ha = from_timer(ha, t, mailbox_timer);
+ struct scsi_qla_host *ha = timer_container_of(ha, t, mailbox_timer);
struct device_reg __iomem *reg;
reg = ha->iobase;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 691ef827a5ab..5136549005e7 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2706,59 +2706,6 @@ ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf,
}
/*
- * This function is for formatting and logging log messages.
- * It is to be used when vha is available. It formats the message
- * and logs it to the messages file. All the messages will be logged
- * irrespective of value of ql2xextended_error_logging.
- * parameters:
- * level: The level of the log messages to be printed in the
- * messages file.
- * vha: Pointer to the scsi_qla_host_t
- * id: This is a unique id for the level. It identifies the
- * part of the code from where the message originated.
- * msg: The message to be displayed.
- */
-void
-ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
- const char *fmt, ...)
-{
- va_list va;
- struct va_format vaf;
- char pbuf[128];
-
- if (level > ql_errlev)
- return;
-
- ql_ktrace(0, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt);
-
- if (!pbuf[0]) /* set by ql_ktrace */
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL,
- qpair ? qpair->vha : NULL, id);
-
- va_start(va, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &va;
-
- switch (level) {
- case ql_log_fatal: /* FATAL LOG */
- pr_crit("%s%pV", pbuf, &vaf);
- break;
- case ql_log_warn:
- pr_err("%s%pV", pbuf, &vaf);
- break;
- case ql_log_info:
- pr_warn("%s%pV", pbuf, &vaf);
- break;
- default:
- pr_info("%s%pV", pbuf, &vaf);
- break;
- }
-
- va_end(va);
-}
-
-/*
* This function is for formatting and logging debug information.
* It is to be used when vha is available. It formats the message
* and logs it to the messages file.
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 54f0a412226f..5f4a8c9ae6ba 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -334,9 +334,6 @@ ql_log(uint, scsi_qla_host_t *vha, uint, const char *fmt, ...);
void __attribute__((format (printf, 4, 5)))
ql_log_pci(uint, struct pci_dev *pdev, uint, const char *fmt, ...);
-void __attribute__((format (printf, 4, 5)))
-ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
-
/* Debug Levels */
/* The 0x40000000 is the max value any debug level can have
* as ql2xextended_error_logging is of type signed int
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index dcde55c8ee5d..91bbd3b75bff 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -94,7 +94,8 @@ static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport,
/* timeout called when no traffic and delayed rx sa_index delete */
static void qla2x00_sa_replace_iocb_timeout(struct timer_list *t)
{
- struct edif_list_entry *edif_entry = from_timer(edif_entry, t, timer);
+ struct edif_list_entry *edif_entry = timer_container_of(edif_entry, t,
+ timer);
fc_port_t *fcport = edif_entry->fcport;
struct scsi_qla_host *vha = fcport->vha;
struct edif_sa_ctl *sa_ctl;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index e556f57c91af..03e50e8fc08d 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -164,10 +164,8 @@ extern int ql2xsmartsan;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xextended_error_logging_ktrace;
-extern int ql2xiidmaenable;
extern int ql2xmqsupport;
extern int ql2xfwloadbin;
-extern int ql2xetsenable;
extern int ql2xshiftctondsd;
extern int ql2xdbwr;
extern int ql2xasynctmfenable;
@@ -720,7 +718,6 @@ extern void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
extern int qla2x00_fdmi_register(scsi_qla_host_t *);
extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *);
-extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *);
extern size_t qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
struct ct_sns_rsp *, const char *);
@@ -822,7 +819,6 @@ extern int qlafx00_rescan_isp(scsi_qla_host_t *);
/* PCI related functions */
extern int qla82xx_pci_config(struct scsi_qla_host *);
extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int);
-extern int qla82xx_pci_region_offset(struct pci_dev *, int);
extern int qla82xx_iospace_config(struct qla_hw_data *);
/* Initialization related functions */
@@ -866,7 +862,6 @@ extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
/* ISP 8021 IDC */
extern void qla82xx_clear_drv_active(struct qla_hw_data *);
-extern uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *, uint32_t);
extern int qla82xx_idc_lock(struct qla_hw_data *);
extern void qla82xx_idc_unlock(struct qla_hw_data *);
extern int qla82xx_device_state_handler(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index d2bddca7045a..51c7cea71f90 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -2626,96 +2626,6 @@ qla2x00_port_speed_capability(uint16_t speed)
}
/**
- * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
- * @vha: HA context
- * @list: switch info entries to populate
- *
- * Returns 0 on success.
- */
-int
-qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
-{
- int rval;
- uint16_t i;
- struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
-
- if (!IS_IIDMA_CAPABLE(ha))
- return QLA_FUNCTION_FAILED;
- if (!ha->flags.gpsc_supported)
- return QLA_FUNCTION_FAILED;
-
- rval = qla2x00_mgmt_svr_login(vha);
- if (rval)
- return rval;
-
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = GPSC_REQ_SIZE;
- arg.rsp_size = GPSC_RSP_SIZE;
- arg.nport_handle = vha->mgmt_svr_loop_id;
-
- for (i = 0; i < ha->max_fibre_devices; i++) {
- /* Issue GFPN_ID */
- /* Prepare common MS IOCB */
- ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
-
- /* Prepare CT request */
- ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
- GPSC_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
-
- /* Prepare CT arguments -- port_name */
- memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
- WWN_SIZE);
-
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
- if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2059,
- "GPSC issue IOCB failed (%d).\n", rval);
- } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
- "GPSC")) != QLA_SUCCESS) {
- /* FM command unsupported? */
- if (rval == QLA_INVALID_COMMAND &&
- (ct_rsp->header.reason_code ==
- CT_REASON_INVALID_COMMAND_CODE ||
- ct_rsp->header.reason_code ==
- CT_REASON_COMMAND_UNSUPPORTED)) {
- ql_dbg(ql_dbg_disc, vha, 0x205a,
- "GPSC command unsupported, disabling "
- "query.\n");
- ha->flags.gpsc_supported = 0;
- rval = QLA_FUNCTION_FAILED;
- break;
- }
- rval = QLA_FUNCTION_FAILED;
- } else {
- list->fp_speed = qla2x00_port_speed_capability(
- be16_to_cpu(ct_rsp->rsp.gpsc.speed));
- ql_dbg(ql_dbg_disc, vha, 0x205b,
- "GPSC ext entry - fpn "
- "%8phN speeds=%04x speed=%04x.\n",
- list[i].fabric_port_name,
- be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
- be16_to_cpu(ct_rsp->rsp.gpsc.speed));
- }
-
- /* Last device exit. */
- if (list[i].d_id.b.rsvd_1 != 0)
- break;
- }
-
- return (rval);
-}
-
-/**
* qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
*
* @vha: HA context
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 0c2dd782b675..514934dd6f80 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -45,7 +45,7 @@ static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
void
qla2x00_sp_timeout(struct timer_list *t)
{
- srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
+ srb_t *sp = timer_container_of(sp, t, u.iocb_cmd.timer);
struct srb_iocb *iocb;
scsi_qla_host_t *vha = sp->vha;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 6dfb70edb9a6..0cd3db8ed4ef 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1099,11 +1099,6 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
unsigned offset, n;
struct qla_hw_data *ha = vha->hw;
- struct crb_addr_pair {
- long addr;
- long data;
- };
-
/* Halt all the individual PEGs and other blocks of the ISP */
qla82xx_rom_lock(ha);
@@ -1595,25 +1590,6 @@ qla82xx_get_fw_offs(struct qla_hw_data *ha)
return (u8 *)&ha->hablob->fw->data[offset];
}
-/* PCI related functions */
-int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
-{
- unsigned long val = 0;
- u32 control;
-
- switch (region) {
- case 0:
- val = 0;
- break;
- case 1:
- pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
- val = control + QLA82XX_MSIX_TBL_SPACE;
- break;
- }
- return val;
-}
-
-
int
qla82xx_iospace_config(struct qla_hw_data *ha)
{
@@ -2934,32 +2910,6 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
}
}
-/*
-* qla82xx_wait_for_state_change
-* Wait for device state to change from given current state
-*
-* Note:
-* IDC lock must not be held upon entry
-*
-* Return:
-* Changed device state.
-*/
-uint32_t
-qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
-{
- struct qla_hw_data *ha = vha->hw;
- uint32_t dev_state;
-
- do {
- msleep(1000);
- qla82xx_idc_lock(ha);
- dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- qla82xx_idc_unlock(ha);
- } while (dev_state == curr_state);
-
- return dev_state;
-}
-
void
qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
{
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index b44d134e7105..d4b484c0fd9d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -176,12 +176,6 @@ MODULE_PARM_DESC(ql2xenablehba_err_chk,
" 1 -- Error isolation enabled only for DIX Type 0\n"
" 2 -- Error isolation enabled for all Types\n");
-int ql2xiidmaenable = 1;
-module_param(ql2xiidmaenable, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xiidmaenable,
- "Enables iIDMA settings "
- "Default is 1 - perform iIDMA. 0 - no iIDMA.");
-
int ql2xmqsupport = 1;
module_param(ql2xmqsupport, int, S_IRUGO);
MODULE_PARM_DESC(ql2xmqsupport,
@@ -199,12 +193,6 @@ MODULE_PARM_DESC(ql2xfwloadbin,
" 1 -- load firmware from flash.\n"
" 0 -- use default semantics.\n");
-int ql2xetsenable;
-module_param(ql2xetsenable, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xetsenable,
- "Enables firmware ETS burst."
- "Default is 0 - skip ETS enablement.");
-
int ql2xdbwr = 1;
module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xdbwr,
@@ -7392,7 +7380,7 @@ static void qla_wind_down_chip(scsi_qla_host_t *vha)
void
qla2x00_timer(struct timer_list *t)
{
- scsi_qla_host_t *vha = from_timer(vha, t, timer);
+ scsi_qla_host_t *vha = timer_container_of(vha, t, timer);
unsigned long cpu_flags = 0;
int start_dpc = 0;
int index;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 11eadb3bd36e..1e81582085e3 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1454,50 +1454,6 @@ static struct fc_port *qlt_create_sess(
return sess;
}
-/*
- * max_gen - specifies maximum session generation
- * at which this deletion requestion is still valid
- */
-void
-qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
-{
- struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- struct fc_port *sess = fcport;
- unsigned long flags;
-
- if (!vha->hw->tgt.tgt_ops)
- return;
-
- if (!tgt)
- return;
-
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- if (tgt->tgt_stop) {
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- return;
- }
- if (!sess->se_sess) {
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- return;
- }
-
- if (max_gen - sess->generation < 0) {
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
- "Ignoring stale deletion request for se_sess %p / sess %p"
- " for port %8phC, req_gen %d, sess_gen %d\n",
- sess->se_sess, sess, sess->port_name, max_gen,
- sess->generation);
- return;
- }
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
-
- sess->local = 1;
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- qlt_schedule_sess_for_deletion(sess);
-}
-
static inline int test_tgt_sess_count(struct qla_tgt *tgt)
{
struct qla_hw_data *ha = tgt->ha;
@@ -5539,81 +5495,6 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}
-int
-qlt_free_qfull_cmds(struct qla_qpair *qpair)
-{
- struct scsi_qla_host *vha = qpair->vha;
- struct qla_hw_data *ha = vha->hw;
- unsigned long flags;
- struct qla_tgt_cmd *cmd, *tcmd;
- struct list_head free_list, q_full_list;
- int rc = 0;
-
- if (list_empty(&ha->tgt.q_full_list))
- return 0;
-
- INIT_LIST_HEAD(&free_list);
- INIT_LIST_HEAD(&q_full_list);
-
- spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
- if (list_empty(&ha->tgt.q_full_list)) {
- spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
- return 0;
- }
-
- list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
- spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
-
- spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
- if (cmd->q_full)
- /* cmd->state is a borrowed field to hold status */
- rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
- else if (cmd->term_exchg)
- rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
-
- if (rc == -ENOMEM)
- break;
-
- if (cmd->q_full)
- ql_dbg(ql_dbg_io, vha, 0x3006,
- "%s: busy sent for ox_id[%04x]\n", __func__,
- be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
- else if (cmd->term_exchg)
- ql_dbg(ql_dbg_io, vha, 0x3007,
- "%s: Term exchg sent for ox_id[%04x]\n", __func__,
- be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
- else
- ql_dbg(ql_dbg_io, vha, 0x3008,
- "%s: Unexpected cmd in QFull list %p\n", __func__,
- cmd);
-
- list_move_tail(&cmd->cmd_list, &free_list);
-
- /* piggy back on hardware_lock for protection */
- vha->hw->tgt.num_qfull_cmds_alloc--;
- }
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-
- cmd = NULL;
-
- list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
- list_del(&cmd->cmd_list);
- /* This cmd was never sent to TCM. There is no need
- * to schedule free or call free_cmd
- */
- qlt_free_cmd(cmd);
- }
-
- if (!list_empty(&q_full_list)) {
- spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
- list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
- spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
- }
-
- return rc;
-}
-
static void
qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
uint16_t status)
@@ -7091,16 +6972,6 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
}
void
-qlt_83xx_iospace_config(struct qla_hw_data *ha)
-{
- if (!QLA_TGT_MODE_ENABLED())
- return;
-
- ha->msix_count += 1; /* For ATIO Q */
-}
-
-
-void
qlt_modify_vp_config(struct scsi_qla_host *vha,
struct vp_config_entry_24xx *vpmod)
{
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 354fca2e7feb..15a59c125c53 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -1014,7 +1014,6 @@ extern int qlt_lport_register(void *, u64, u64, u64,
extern void qlt_lport_deregister(struct scsi_qla_host *);
extern void qlt_unreg_sess(struct fc_port *);
extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
-extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
extern int __init qlt_init(void);
extern void qlt_exit(void);
extern void qlt_free_session_done(struct work_struct *);
@@ -1082,8 +1081,6 @@ extern void qlt_mem_free(struct qla_hw_data *);
extern int qlt_stop_phase1(struct qla_tgt *);
extern void qlt_stop_phase2(struct qla_tgt *);
extern irqreturn_t qla83xx_msix_atio_q(int, void *);
-extern void qlt_83xx_iospace_config(struct qla_hw_data *);
-extern int qlt_free_qfull_cmds(struct qla_qpair *);
extern void qlt_logo_completion_handler(fc_port_t *, int);
extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 47adff9f0506..da2fc66ffedd 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -973,11 +973,6 @@ qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
unsigned long off;
unsigned offset, n;
- struct crb_addr_pair {
- long addr;
- long data;
- };
-
/* Halt all the indiviual PEGs and other blocks of the ISP */
qla4_82xx_rom_lock(ha);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index d540d66e6ffc..d4141656b204 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -4551,7 +4551,7 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
**/
static void qla4xxx_timer(struct timer_list *t)
{
- struct scsi_qla_host *ha = from_timer(ha, t, timer);
+ struct scsi_qla_host *ha = timer_container_of(ha, t, timer);
int start_dpc = 0;
uint16_t w;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index f0eec4708ddd..aef33d1e346a 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -162,7 +162,7 @@ static const char *sdebug_version_date = "20210520";
#define DEF_VPD_USE_HOSTNO 1
#define DEF_WRITESAME_LENGTH 0xFFFF
#define DEF_ATOMIC_WR 0
-#define DEF_ATOMIC_WR_MAX_LENGTH 8192
+#define DEF_ATOMIC_WR_MAX_LENGTH 128
#define DEF_ATOMIC_WR_ALIGN 2
#define DEF_ATOMIC_WR_GRAN 2
#define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH)
@@ -294,6 +294,14 @@ struct tape_block {
#define FF_SA (F_SA_HIGH | F_SA_LOW)
#define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
+/* Device selection bit mask */
+#define DS_ALL 0xffffffff
+#define DS_SBC (1 << TYPE_DISK)
+#define DS_SSC (1 << TYPE_TAPE)
+#define DS_ZBC (1 << TYPE_ZBC)
+
+#define DS_NO_SSC (DS_ALL & ~DS_SSC)
+
#define SDEBUG_MAX_PARTS 4
#define SDEBUG_MAX_CMD_LEN 32
@@ -472,6 +480,7 @@ struct opcode_info_t {
/* for terminating element */
u8 opcode; /* if num_attached > 0, preferred */
u16 sa; /* service action */
+ u32 devsel; /* device type mask for this definition */
u32 flags; /* OR-ed set of SDEB_F_* */
int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
const struct opcode_info_t *arrp; /* num_attached elements or NULL */
@@ -519,7 +528,8 @@ enum sdeb_opcode_index {
SDEB_I_WRITE_FILEMARKS = 35,
SDEB_I_SPACE = 36,
SDEB_I_FORMAT_MEDIUM = 37,
- SDEB_I_LAST_ELEM_P1 = 38, /* keep this last (previous + 1) */
+ SDEB_I_ERASE = 38,
+ SDEB_I_LAST_ELEM_P1 = 39, /* keep this last (previous + 1) */
};
@@ -530,7 +540,7 @@ static const unsigned char opcode_ind_arr[256] = {
SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0,
SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE,
- 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
+ 0, SDEB_I_ERASE, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
SDEB_I_ALLOW_REMOVAL, 0,
/* 0x20; 0x20->0x3f: 10 byte cdbs */
0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
@@ -585,7 +595,9 @@ static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_read_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_tape(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -613,8 +625,10 @@ static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_read_position(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_erase(struct scsi_cmnd *, struct sdebug_dev_info *);
static int sdebug_do_add_host(bool mk_new_store);
static int sdebug_add_host_helper(int per_host_idx);
@@ -629,113 +643,121 @@ static void sdebug_erase_all_stores(bool apart_from_first);
* should be placed in opcode_info_arr[], the others should be placed here.
*/
static const struct opcode_info_t msense_iarr[] = {
- {0, 0x1a, 0, F_D_IN, NULL, NULL,
+ {0, 0x1a, 0, DS_ALL, F_D_IN, NULL, NULL,
{6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
static const struct opcode_info_t mselect_iarr[] = {
- {0, 0x15, 0, F_D_OUT, NULL, NULL,
+ {0, 0x15, 0, DS_ALL, F_D_OUT, NULL, NULL,
{6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
static const struct opcode_info_t read_iarr[] = {
- {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
+ {0, 0x28, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} },
- {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
+ {0, 0x8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) disk */
{6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
+ {0, 0x8, 0, DS_SSC, F_D_IN | FF_MEDIA_IO, resp_read_tape, NULL, /* READ(6) tape */
+ {6, 0x03, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0xa8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
{12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
0xc7, 0, 0, 0, 0} },
};
static const struct opcode_info_t write_iarr[] = {
- {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
+ {0, 0x2a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
0, 0, 0, 0, 0, 0} },
- {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
+ {0, 0xa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) disk */
NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0} },
- {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
+ {0, 0xa, 0, DS_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_tape, /* WRITE(6) tape */
+ NULL, {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0} },
+ {0, 0xaa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xbf, 0xc7, 0, 0, 0, 0} },
};
static const struct opcode_info_t verify_iarr[] = {
- {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
+ {0, 0x2f, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
0, 0, 0, 0, 0, 0} },
};
static const struct opcode_info_t sa_in_16_iarr[] = {
- {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
+ {0, 0x9e, 0x12, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
{16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
- {0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
+ {0, 0x9e, 0x16, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
{16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
0, 0} }, /* GET STREAM STATUS */
};
static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
- {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
+ {0, 0x7f, 0xb, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
- {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
+ {0, 0x7f, 0x11, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
};
static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
- {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
+ {0, 0xa3, 0xc, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
{12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
- {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
+ {0, 0xa3, 0xd, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
{12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
};
static const struct opcode_info_t write_same_iarr[] = {
- {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
+ {0, 0x93, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
{16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
};
static const struct opcode_info_t reserve_iarr[] = {
- {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
+ {0, 0x16, 0, DS_ALL, F_D_OUT, NULL, NULL, /* RESERVE(6) */
{6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
static const struct opcode_info_t release_iarr[] = {
- {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
+ {0, 0x17, 0, DS_ALL, F_D_OUT, NULL, NULL, /* RELEASE(6) */
{6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
static const struct opcode_info_t sync_cache_iarr[] = {
- {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
+ {0, 0x91, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
{16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
};
static const struct opcode_info_t pre_fetch_iarr[] = {
- {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
+ {0, 0x90, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
{16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
+ {0, 0x34, 0, DS_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_read_position, NULL,
+ {10, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0, 0,
+ 0, 0, 0, 0} }, /* READ POSITION (10) */
};
static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
- {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
+ {0, 0x94, 0x1, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
{16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
- {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
+ {0, 0x94, 0x2, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
{16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
- {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
+ {0, 0x94, 0x4, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
{16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
};
static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
- {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
+ {0, 0x95, 0x6, DS_NO_SSC, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
{16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
};
@@ -746,130 +768,132 @@ static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
* REPORT SUPPORTED OPERATION CODES. */
static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
/* 0 */
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
+ {0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
+ {0, 0x12, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
{6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
+ {0, 0xa0, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
{12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
0, 0} }, /* REPORT LUNS */
- {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
+ {0, 0x3, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_requests, NULL,
{6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
+ {0, 0x0, 0, DS_ALL, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
{6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
/* 5 */
- {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
+ {ARRAY_SIZE(msense_iarr), 0x5a, 0, DS_ALL, F_D_IN, /* MODE SENSE(10) */
resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
+ {ARRAY_SIZE(mselect_iarr), 0x55, 0, DS_ALL, F_D_OUT, /* MODE SELECT(10) */
resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
+ {0, 0x4d, 0, DS_NO_SSC, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
{10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
0, 0, 0} },
- {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
+ {0, 0x25, 0, DS_NO_SSC, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
{10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
0, 0} },
- {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
+ {ARRAY_SIZE(read_iarr), 0x88, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, /* READ(16) */
resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
/* 10 */
- {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
+ {ARRAY_SIZE(write_iarr), 0x8a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
resp_write_dt0, write_iarr, /* WRITE(16) */
{16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
- {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
+ {0, 0x1b, 0, DS_ALL, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
{6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
+ {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, DS_NO_SSC, F_SA_LOW | F_D_IN,
resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
{16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
- {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
+ {0, 0x9f, 0x12, DS_NO_SSC, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
- {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
+ {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, DS_ALL, F_SA_LOW | F_D_IN,
resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
0xff, 0, 0xc7, 0, 0, 0, 0} },
/* 15 */
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
+ {0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {ARRAY_SIZE(verify_iarr), 0x8f, 0,
+ {ARRAY_SIZE(verify_iarr), 0x8f, 0, DS_NO_SSC,
F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
- {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
+ {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, DS_NO_SSC, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
{32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
0xff, 0xff} },
- {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
+ {ARRAY_SIZE(reserve_iarr), 0x56, 0, DS_ALL, F_D_OUT,
NULL, reserve_iarr, /* RESERVE(10) <no response function> */
{10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0} },
- {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
+ {ARRAY_SIZE(release_iarr), 0x57, 0, DS_ALL, F_D_OUT,
NULL, release_iarr, /* RELEASE(10) <no response function> */
{10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0} },
/* 20 */
- {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
+ {0, 0x1e, 0, DS_ALL, 0, NULL, NULL, /* ALLOW REMOVAL */
{6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x1, 0, 0, resp_rewind, NULL,
+ {0, 0x1, 0, DS_SSC, 0, resp_rewind, NULL,
{6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
+ {0, 0, 0, DS_NO_SSC, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
+ {0, 0x1d, 0, DS_ALL, F_D_OUT, NULL, NULL, /* SEND DIAGNOSTIC */
{6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
+ {0, 0x42, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
{10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
/* 25 */
- {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
+ {0, 0x3b, 0, DS_NO_SSC, F_D_OUT_MAYBE, resp_write_buffer, NULL,
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* WRITE_BUFFER */
- {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
+ {ARRAY_SIZE(write_same_iarr), 0x41, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO,
resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
0, 0, 0, 0, 0} },
- {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
+ {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS,
resp_sync_cache, sync_cache_iarr,
{10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* SYNC_CACHE (10) */
- {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
+ {0, 0x89, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
{16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
- {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
+ {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO,
resp_pre_fetch, pre_fetch_iarr,
{10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* PRE-FETCH (10) */
/* READ POSITION (10) */
/* 30 */
- {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
+ {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
{16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
- {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
+ {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, DS_NO_SSC, F_SA_LOW | F_M_ACCESS,
resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
{16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
/* 32 */
- {0, 0x0, 0x0, F_D_OUT | FF_MEDIA_IO,
+ {0, 0x9c, 0x0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO,
resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
{16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
- {0, 0x05, 0, F_D_IN, resp_read_blklimits, NULL, /* READ BLOCK LIMITS (6) */
+ {0, 0x05, 0, DS_SSC, F_D_IN, resp_read_blklimits, NULL, /* READ BLOCK LIMITS (6) */
{6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x2b, 0, F_D_UNKN, resp_locate, NULL, /* LOCATE (10) */
+ {0, 0x2b, 0, DS_SSC, F_D_UNKN, resp_locate, NULL, /* LOCATE (10) */
{10, 0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} },
- {0, 0x10, 0, F_D_IN, resp_write_filemarks, NULL, /* WRITE FILEMARKS (6) */
+ {0, 0x10, 0, DS_SSC, F_D_IN, resp_write_filemarks, NULL, /* WRITE FILEMARKS (6) */
{6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x11, 0, F_D_IN, resp_space, NULL, /* SPACE (6) */
+ {0, 0x11, 0, DS_SSC, F_D_IN, resp_space, NULL, /* SPACE (6) */
{6, 0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x4, 0, 0, resp_format_medium, NULL, /* FORMAT MEDIUM (6) */
+ {0, 0x4, 0, DS_SSC, 0, resp_format_medium, NULL, /* FORMAT MEDIUM (6) */
{6, 0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
-/* 38 */
+ {0, 0x19, 0, DS_SSC, F_D_IN, resp_erase, NULL, /* ERASE (6) */
+ {6, 0x03, 0x33, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+/* 39 */
/* sentinel */
- {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
+ {0xff, 0, 0, 0, 0, NULL, NULL, /* terminating element */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
@@ -1015,6 +1039,19 @@ static const int condition_met_result = SAM_STAT_CONDITION_MET;
static struct dentry *sdebug_debugfs_root;
static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);
+static u32 sdebug_get_devsel(struct scsi_device *sdp)
+{
+ unsigned char devtype = sdp->type;
+ u32 devsel;
+
+ if (devtype < 32)
+ devsel = (1 << devtype);
+ else
+ devsel = DS_ALL;
+
+ return devsel;
+}
+
static void sdebug_err_free(struct rcu_head *head)
{
struct sdebug_err_inject *inject =
@@ -2032,13 +2069,19 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
unsigned char *cmd = scp->cmnd;
u32 alloc_len, n;
int ret;
- bool have_wlun, is_disk, is_zbc, is_disk_zbc;
+ bool have_wlun, is_disk, is_zbc, is_disk_zbc, is_tape;
alloc_len = get_unaligned_be16(cmd + 3);
arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
if (! arr)
return DID_REQUEUE << 16;
- is_disk = (sdebug_ptype == TYPE_DISK);
+ if (scp->device->type >= 32) {
+ is_disk = (sdebug_ptype == TYPE_DISK);
+ is_tape = (sdebug_ptype == TYPE_TAPE);
+ } else {
+ is_disk = (scp->device->type == TYPE_DISK);
+ is_tape = (scp->device->type == TYPE_TAPE);
+ }
is_zbc = devip->zoned;
is_disk_zbc = (is_disk || is_zbc);
have_wlun = scsi_is_wlun(scp->device->lun);
@@ -2047,7 +2090,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
else
- pq_pdt = (sdebug_ptype & 0x1f);
+ pq_pdt = ((scp->device->type >= 32 ?
+ sdebug_ptype : scp->device->type) & 0x1f);
arr[0] = pq_pdt;
if (0x2 & cmd[1]) { /* CMDDT bit set */
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
@@ -2170,7 +2214,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if (is_disk) { /* SBC-4 no version claimed */
put_unaligned_be16(0x600, arr + n);
n += 2;
- } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
+ } else if (is_tape) { /* SSC-4 rev 3 */
put_unaligned_be16(0x525, arr + n);
n += 2;
} else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
@@ -2279,7 +2323,7 @@ static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
changing = (stopped_state != want_stop);
if (changing)
atomic_xchg(&devip->stopped, want_stop);
- if (sdebug_ptype == TYPE_TAPE && !want_stop) {
+ if (scp->device->type == TYPE_TAPE && !want_stop) {
int i;
set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */
@@ -2454,11 +2498,12 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp,
u8 reporting_opts, req_opcode, sdeb_i, supp;
u16 req_sa, u;
u32 alloc_len, a_len;
- int k, offset, len, errsts, count, bump, na;
+ int k, offset, len, errsts, bump, na;
const struct opcode_info_t *oip;
const struct opcode_info_t *r_oip;
u8 *arr;
u8 *cmd = scp->cmnd;
+ u32 devsel = sdebug_get_devsel(scp->device);
rctd = !!(cmd[2] & 0x80);
reporting_opts = cmd[2] & 0x7;
@@ -2481,34 +2526,30 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp,
}
switch (reporting_opts) {
case 0: /* all commands */
- /* count number of commands */
- for (count = 0, oip = opcode_info_arr;
- oip->num_attached != 0xff; ++oip) {
- if (F_INV_OP & oip->flags)
- continue;
- count += (oip->num_attached + 1);
- }
bump = rctd ? 20 : 8;
- put_unaligned_be32(count * bump, arr);
for (offset = 4, oip = opcode_info_arr;
oip->num_attached != 0xff && offset < a_len; ++oip) {
if (F_INV_OP & oip->flags)
continue;
+ if ((devsel & oip->devsel) != 0) {
+ arr[offset] = oip->opcode;
+ put_unaligned_be16(oip->sa, arr + offset + 2);
+ if (rctd)
+ arr[offset + 5] |= 0x2;
+ if (FF_SA & oip->flags)
+ arr[offset + 5] |= 0x1;
+ put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
+ if (rctd)
+ put_unaligned_be16(0xa, arr + offset + 8);
+ offset += bump;
+ }
na = oip->num_attached;
- arr[offset] = oip->opcode;
- put_unaligned_be16(oip->sa, arr + offset + 2);
- if (rctd)
- arr[offset + 5] |= 0x2;
- if (FF_SA & oip->flags)
- arr[offset + 5] |= 0x1;
- put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
- if (rctd)
- put_unaligned_be16(0xa, arr + offset + 8);
r_oip = oip;
for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
if (F_INV_OP & oip->flags)
continue;
- offset += bump;
+ if ((devsel & oip->devsel) == 0)
+ continue;
arr[offset] = oip->opcode;
put_unaligned_be16(oip->sa, arr + offset + 2);
if (rctd)
@@ -2516,14 +2557,15 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp,
if (FF_SA & oip->flags)
arr[offset + 5] |= 0x1;
put_unaligned_be16(oip->len_mask[0],
- arr + offset + 6);
+ arr + offset + 6);
if (rctd)
put_unaligned_be16(0xa,
arr + offset + 8);
+ offset += bump;
}
oip = r_oip;
- offset += bump;
}
+ put_unaligned_be32(offset - 4, arr);
break;
case 1: /* one command: opcode only */
case 2: /* one command: opcode plus service action */
@@ -2549,13 +2591,15 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp,
return check_condition_result;
}
if (0 == (FF_SA & oip->flags) &&
- req_opcode == oip->opcode)
+ (devsel & oip->devsel) != 0 &&
+ req_opcode == oip->opcode)
supp = 3;
else if (0 == (FF_SA & oip->flags)) {
na = oip->num_attached;
for (k = 0, oip = oip->arrp; k < na;
++k, ++oip) {
- if (req_opcode == oip->opcode)
+ if (req_opcode == oip->opcode &&
+ (devsel & oip->devsel) != 0)
break;
}
supp = (k >= na) ? 1 : 3;
@@ -2563,7 +2607,8 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp,
na = oip->num_attached;
for (k = 0, oip = oip->arrp; k < na;
++k, ++oip) {
- if (req_sa == oip->sa)
+ if (req_sa == oip->sa &&
+ (devsel & oip->devsel) != 0)
break;
}
supp = (k >= na) ? 1 : 3;
@@ -2914,9 +2959,9 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
subpcode = cmd[3];
msense_6 = (MODE_SENSE == cmd[0]);
llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
- is_disk = (sdebug_ptype == TYPE_DISK);
+ is_disk = (scp->device->type == TYPE_DISK);
is_zbc = devip->zoned;
- is_tape = (sdebug_ptype == TYPE_TAPE);
+ is_tape = (scp->device->type == TYPE_TAPE);
if ((is_disk || is_zbc || is_tape) && !dbd)
bd_len = llbaa ? 16 : 8;
else
@@ -3131,7 +3176,7 @@ static int resp_mode_select(struct scsi_cmnd *scp,
md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
off = (mselect6 ? 4 : 8);
- if (sdebug_ptype == TYPE_TAPE) {
+ if (scp->device->type == TYPE_TAPE) {
int blksize;
if (bd_len != 8) {
@@ -3196,7 +3241,7 @@ static int resp_mode_select(struct scsi_cmnd *scp,
}
break;
case 0xf: /* Compression mode page */
- if (sdebug_ptype != TYPE_TAPE)
+ if (scp->device->type != TYPE_TAPE)
goto bad_pcode;
if ((arr[off + 2] & 0x40) != 0) {
devip->tape_dce = (arr[off + 2] & 0x80) != 0;
@@ -3204,7 +3249,7 @@ static int resp_mode_select(struct scsi_cmnd *scp,
}
break;
case 0x11: /* Medium Partition Mode Page (tape) */
- if (sdebug_ptype == TYPE_TAPE) {
+ if (scp->device->type == TYPE_TAPE) {
int fld;
fld = process_medium_part_m_pg(devip, &arr[off], pg_len);
@@ -3563,6 +3608,30 @@ is_eop:
return check_condition_result;
}
+enum {SDEBUG_READ_POSITION_ARR_SZ = 20};
+static int resp_read_position(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ u8 *cmd = scp->cmnd;
+ int all_length;
+ unsigned char arr[20];
+ unsigned int pos;
+
+ all_length = get_unaligned_be16(cmd + 7);
+ if ((cmd[1] & 0xfe) != 0 ||
+ all_length != 0) { /* only short form */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB,
+ all_length ? 7 : 1, 0);
+ return check_condition_result;
+ }
+ memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ);
+ arr[1] = devip->tape_partition;
+ pos = devip->tape_location[devip->tape_partition];
+ put_unaligned_be32(pos, arr + 4);
+ put_unaligned_be32(pos, arr + 8);
+ return fill_from_dev_buffer(scp, arr, SDEBUG_READ_POSITION_ARR_SZ);
+}
+
static int resp_rewind(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
@@ -3604,10 +3673,6 @@ static int resp_format_medium(struct scsi_cmnd *scp,
int res = 0;
unsigned char *cmd = scp->cmnd;
- if (sdebug_ptype != TYPE_TAPE) {
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 0, -1);
- return check_condition_result;
- }
if (cmd[2] > 2) {
mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1);
return check_condition_result;
@@ -3631,6 +3696,19 @@ static int resp_format_medium(struct scsi_cmnd *scp,
return 0;
}
+static int resp_erase(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ int partition = devip->tape_partition;
+ int pos = devip->tape_location[partition];
+ struct tape_block *blp;
+
+ blp = devip->tape_blocks[partition] + pos;
+ blp->fl_size = TAPE_BLOCK_EOD_FLAG;
+
+ return 0;
+}
+
static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
{
return devip->nr_zones != 0;
@@ -4467,9 +4545,6 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd;
bool meta_data_locked = false;
- if (sdebug_ptype == TYPE_TAPE)
- return resp_read_tape(scp, devip);
-
switch (cmd[0]) {
case READ_16:
ei_lba = 0;
@@ -4839,9 +4914,6 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd;
bool meta_data_locked = false;
- if (sdebug_ptype == TYPE_TAPE)
- return resp_write_tape(scp, devip);
-
switch (cmd[0]) {
case WRITE_16:
ei_lba = 0;
@@ -5573,7 +5645,6 @@ static int resp_sync_cache(struct scsi_cmnd *scp,
*
* The pcode 0x34 is also used for READ POSITION by tape devices.
*/
-enum {SDEBUG_READ_POSITION_ARR_SZ = 20};
static int resp_pre_fetch(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
@@ -5585,31 +5656,6 @@ static int resp_pre_fetch(struct scsi_cmnd *scp,
struct sdeb_store_info *sip = devip2sip(devip, true);
u8 *fsp = sip->storep;
- if (sdebug_ptype == TYPE_TAPE) {
- if (cmd[0] == PRE_FETCH) { /* READ POSITION (10) */
- int all_length;
- unsigned char arr[20];
- unsigned int pos;
-
- all_length = get_unaligned_be16(cmd + 7);
- if ((cmd[1] & 0xfe) != 0 ||
- all_length != 0) { /* only short form */
- mk_sense_invalid_fld(scp, SDEB_IN_CDB,
- all_length ? 7 : 1, 0);
- return check_condition_result;
- }
- memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ);
- arr[1] = devip->tape_partition;
- pos = devip->tape_location[devip->tape_partition];
- put_unaligned_be32(pos, arr + 4);
- put_unaligned_be32(pos, arr + 8);
- return fill_from_dev_buffer(scp, arr,
- SDEBUG_READ_POSITION_ARR_SZ);
- }
- mk_sense_invalid_opcode(scp);
- return check_condition_result;
- }
-
if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
lba = get_unaligned_be32(cmd + 2);
nblks = get_unaligned_be16(cmd + 7);
@@ -6645,7 +6691,7 @@ static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
debugfs_remove(devip->debugfs_entry);
- if (sdebug_ptype == TYPE_TAPE) {
+ if (sdp->type == TYPE_TAPE) {
kfree(devip->tape_blocks[0]);
devip->tape_blocks[0] = NULL;
}
@@ -6833,18 +6879,16 @@ static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
static void scsi_tape_reset_clear(struct sdebug_dev_info *devip)
{
- if (sdebug_ptype == TYPE_TAPE) {
- int i;
+ int i;
- devip->tape_blksize = TAPE_DEF_BLKSIZE;
- devip->tape_density = TAPE_DEF_DENSITY;
- devip->tape_partition = 0;
- devip->tape_dce = 0;
- for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
- devip->tape_location[i] = 0;
- devip->tape_pending_nbr_partitions = -1;
- /* Don't reset partitioning? */
- }
+ devip->tape_blksize = TAPE_DEF_BLKSIZE;
+ devip->tape_density = TAPE_DEF_DENSITY;
+ devip->tape_partition = 0;
+ devip->tape_dce = 0;
+ for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
+ devip->tape_location[i] = 0;
+ devip->tape_pending_nbr_partitions = -1;
+ /* Don't reset partitioning? */
}
static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
@@ -6862,7 +6906,8 @@ static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
scsi_debug_stop_all_queued(sdp);
if (devip) {
set_bit(SDEBUG_UA_POR, devip->uas_bm);
- scsi_tape_reset_clear(devip);
+ if (SCpnt->device->type == TYPE_TAPE)
+ scsi_tape_reset_clear(devip);
}
if (sdebug_fail_lun_reset(SCpnt)) {
@@ -6901,7 +6946,8 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
if (devip->target == sdp->id) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
- scsi_tape_reset_clear(devip);
+ if (SCpnt->device->type == TYPE_TAPE)
+ scsi_tape_reset_clear(devip);
++k;
}
}
@@ -6933,7 +6979,8 @@ static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
- scsi_tape_reset_clear(devip);
+ if (SCpnt->device->type == TYPE_TAPE)
+ scsi_tape_reset_clear(devip);
++k;
}
@@ -6957,7 +7004,8 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list,
dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
- scsi_tape_reset_clear(devip);
+ if (SCpnt->device->type == TYPE_TAPE)
+ scsi_tape_reset_clear(devip);
++k;
}
}
@@ -9173,6 +9221,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
u32 flags;
u16 sa;
u8 opcode = cmd[0];
+ u32 devsel = sdebug_get_devsel(scp->device);
bool has_wlun_rl;
bool inject_now;
int ret = 0;
@@ -9252,12 +9301,14 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
else
sa = get_unaligned_be16(cmd + 8);
for (k = 0; k <= na; oip = r_oip->arrp + k++) {
- if (opcode == oip->opcode && sa == oip->sa)
+ if (opcode == oip->opcode && sa == oip->sa &&
+ (devsel & oip->devsel) != 0)
break;
}
} else { /* since no service action only check opcode */
for (k = 0; k <= na; oip = r_oip->arrp + k++) {
- if (opcode == oip->opcode)
+ if (opcode == oip->opcode &&
+ (devsel & oip->devsel) != 0)
break;
}
}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 90f1393a23f8..8ff679e67bbf 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -486,33 +486,6 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
}
/**
- * scsi_dev_info_list_del_keyed - remove one dev_info list entry.
- * @vendor: vendor string
- * @model: model (product) string
- * @key: specify list to use
- *
- * Description:
- * Remove and destroy one dev_info entry for @vendor, @model
- * in list specified by @key.
- *
- * Returns: 0 OK, -error on failure.
- **/
-int scsi_dev_info_list_del_keyed(char *vendor, char *model,
- enum scsi_devinfo_key key)
-{
- struct scsi_dev_info_list *found;
-
- found = scsi_dev_info_list_find(vendor, model, key);
- if (IS_ERR(found))
- return PTR_ERR(found);
-
- list_del(&found->dev_info_list);
- kfree(found);
- return 0;
-}
-EXPORT_SYMBOL(scsi_dev_info_list_del_keyed);
-
-/**
* scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list.
* @dev_list: string of device flags to add
*
@@ -863,7 +836,7 @@ int __init scsi_init_devinfo(void)
goto out;
for (i = 0; scsi_static_device_list[i].vendor; i++) {
- error = scsi_dev_info_list_add(1 /* compatibile */,
+ error = scsi_dev_info_list_add(1 /* compatible */,
scsi_static_device_list[i].vendor,
scsi_static_device_list[i].model,
NULL,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 376b8897ab90..746ff6a1f309 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -665,7 +665,8 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
* if the device is in the process of becoming ready, we
* should retry.
*/
- if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
+ if ((sshdr.asc == 0x04) &&
+ (sshdr.ascq == 0x01 || sshdr.ascq == 0x0a))
return NEEDS_RETRY;
/*
* if the device is not started, we need to wake
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 2fa45556e1ea..0ddc95bafc71 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -601,7 +601,7 @@ static int sg_scsi_ioctl(struct request_queue *q, bool open_for_write,
}
if (bytes) {
- err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO);
+ err = blk_rq_map_kern(rq, buffer, bytes, GFP_NOIO);
if (err)
goto error;
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 1b43013d72c0..144c72f0737a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -313,8 +313,7 @@ retry:
return PTR_ERR(req);
if (bufflen) {
- ret = blk_rq_map_kern(sdev->request_queue, req,
- buffer, bufflen, GFP_NOIO);
+ ret = blk_rq_map_kern(req, buffer, bufflen, GFP_NOIO);
if (ret)
goto out;
}
@@ -2004,9 +2003,6 @@ void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim)
lim->dma_alignment = max_t(unsigned int,
shost->dma_alignment, dma_get_cache_alignment() - 1);
- if (shost->no_highmem)
- lim->features |= BLK_FEAT_BOUNCE_HIGH;
-
/*
* Propagate the DMA formation properties to the dma-mapping layer as
* a courtesy service to the LLDDs. This needs to check that the buses
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 9fc397a9ce7a..5b2b19f5e8ec 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -79,8 +79,6 @@ extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
char *model, char *strflags,
blist_flags_t flags,
enum scsi_devinfo_key key);
-extern int scsi_dev_info_list_del_keyed(char *vendor, char *model,
- enum scsi_devinfo_key key);
extern int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name);
extern int scsi_dev_info_remove_list(enum scsi_devinfo_key key);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 082f76e76721..6b165a3ec6de 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3509,7 +3509,7 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
* state as the LLDD would not have had an rport
* reference to pass us.
*
- * Take no action on the del_timer failure as the state
+ * Take no action on the timer_delete() failure as the state
* machine state change will validate the
* transaction.
*/
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0b8c91bf793f..c75a806496d6 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -3499,7 +3499,7 @@ static int iscsi_new_flashnode(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.new_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_new_fnode;
}
index = transport->new_flashnode(shost, data, len);
@@ -3509,7 +3509,6 @@ static int iscsi_new_flashnode(struct iscsi_transport *transport,
else
err = -EIO;
-put_host:
scsi_host_put(shost);
exit_new_fnode:
@@ -3534,7 +3533,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.del_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_del_fnode;
}
idx = ev->u.del_flashnode.flashnode_idx;
@@ -3576,7 +3575,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.login_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_login_fnode;
}
idx = ev->u.login_flashnode.flashnode_idx;
@@ -3628,7 +3627,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.logout_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_logout_fnode;
}
idx = ev->u.logout_flashnode.flashnode_idx;
@@ -3678,7 +3677,7 @@ static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.logout_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_logout_sid;
}
session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 950d8c9fb884..3f6e87705b62 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3215,7 +3215,7 @@ static bool sd_is_perm_stream(struct scsi_disk *sdkp, unsigned int stream_id)
return false;
if (get_unaligned_be32(&buf.h.len) < sizeof(struct scsi_stream_status))
return false;
- return buf.h.stream_status[0].perm;
+ return buf.s.perm;
}
static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 7a447ff600d2..a8db66428f80 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -169,6 +169,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
unsigned int nr_zones, size_t *buflen)
{
struct request_queue *q = sdkp->disk->queue;
+ unsigned int max_segments;
size_t bufsize;
void *buf;
@@ -180,12 +181,15 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
* Furthermore, since the report zone command cannot be split, make
* sure that the allocated buffer can always be mapped by limiting the
* number of pages allocated to the HBA max segments limit.
+ * Since max segments can be larger than the max inline bio vectors,
+ * further limit the allocated buffer to BIO_MAX_INLINE_VECS.
*/
nr_zones = min(nr_zones, sdkp->zone_info.nr_zones);
bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
bufsize = min_t(size_t, bufsize,
queue_max_hw_sectors(q) << SECTOR_SHIFT);
- bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
+ max_segments = min(BIO_MAX_INLINE_VECS, queue_max_segments(q));
+ bufsize = min_t(size_t, bufsize, max_segments << PAGE_SHIFT);
while (bufsize >= SECTOR_SIZE) {
buf = kvzalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index effb7e768165..3c02a5f7b5f3 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1658,8 +1658,7 @@ static void register_sg_sysctls(void)
static void unregister_sg_sysctls(void)
{
- if (hdr)
- unregister_sysctl_table(hdr);
+ unregister_sysctl_table(hdr);
}
#else
#define register_sg_sysctls() do { } while (0)
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 8a26eca4fdc9..3d40a63e378d 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.30-031"
+#define DRIVER_VERSION "2.1.34-035"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 30
-#define DRIVER_REVISION 31
+#define DRIVER_RELEASE 34
+#define DRIVER_REVISION 35
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -68,6 +68,7 @@ static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
static void pqi_verify_structures(void);
static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
+static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info);
static void pqi_ctrl_offline_worker(struct work_struct *work);
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
static void pqi_scan_start(struct Scsi_Host *shost);
@@ -2011,18 +2012,31 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"-:-");
- if (pqi_is_logical_device(device))
+ if (pqi_is_logical_device(device)) {
count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
" %08x%08x",
*((u32 *)&device->scsi3addr),
*((u32 *)&device->scsi3addr[4]));
- else
+ } else if (ctrl_info->rpl_extended_format_4_5_supported) {
+ if (device->device_type == SA_DEVICE_TYPE_NVME)
+ count += scnprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ " %016llx%016llx",
+ get_unaligned_be64(&device->wwid[0]),
+ get_unaligned_be64(&device->wwid[8]));
+ else
+ count += scnprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ " %016llx",
+ get_unaligned_be64(&device->wwid[0]));
+ } else {
count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
- " %016llx%016llx",
- get_unaligned_be64(&device->wwid[0]),
- get_unaligned_be64(&device->wwid[8]));
+ " %016llx",
+ get_unaligned_be64(&device->wwid[0]));
+ }
+
count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
" %s %.8s %.16s ",
@@ -3811,7 +3825,8 @@ static void pqi_heartbeat_timer_handler(struct timer_list *t)
{
int num_interrupts;
u32 heartbeat_count;
- struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
+ struct pqi_ctrl_info *ctrl_info = timer_container_of(ctrl_info, t,
+ heartbeat_timer);
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
@@ -5990,7 +6005,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
pqi_stream_data->next_lba = rmd.first_block +
rmd.block_cnt;
pqi_stream_data->last_accessed = jiffies;
- per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++;
+ per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->write_stream_cnt++;
return true;
}
@@ -6069,7 +6084,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true;
- per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++;
+ per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->raid_bypass_cnt++;
}
}
if (!raid_bypassed)
@@ -9129,6 +9144,7 @@ static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
pqi_ctrl_wait_until_quiesced(ctrl_info);
pqi_fail_all_outstanding_requests(ctrl_info);
pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_take_ctrl_devices_offline(ctrl_info);
}
static void pqi_ctrl_offline_worker(struct work_struct *work)
@@ -9203,6 +9219,27 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
schedule_work(&ctrl_info->ctrl_offline_work);
}
+static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+ rc = list_is_last(&device->scsi_device_list_entry, &ctrl_info->scsi_device_list);
+ if (rc)
+ continue;
+
+ /*
+ * Is the sdev pointer NULL?
+ */
+ if (device->sdev)
+ scsi_device_set_state(device->sdev, SDEV_OFFLINE);
+ }
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+}
+
static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
@@ -9711,6 +9748,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x00a3)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x00a1)
},
{
@@ -10047,6 +10088,30 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4044)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4054)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4084)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4094)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4140)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4240)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADVANTECH, 0x8312)
},
{
@@ -10263,6 +10328,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1018, 0x8238)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f3f, 0x0610)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_LENOVO, 0x0220)
},
{
@@ -10271,10 +10344,30 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0222)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0223)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0224)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0225)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_LENOVO, 0x0520)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0521)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_LENOVO, 0x0522)
},
{
@@ -10295,6 +10388,26 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0624)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0625)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0626)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0627)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0628)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1014, 0x0718)
},
{
@@ -10323,6 +10436,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ded, 0x3301)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0045)
},
{
@@ -10471,6 +10588,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x100b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x100e)
},
{
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 35db061ae3ec..d9e59204a9c3 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -362,7 +362,7 @@ MODULE_PARM_DESC(ring_avail_percent_lowater,
/*
* Timeout in seconds for all devices managed by this driver.
*/
-static int storvsc_timeout = 180;
+static const int storvsc_timeout = 180;
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
static struct scsi_transport_template *fc_transport_template;
@@ -768,7 +768,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
return;
}
- t = wait_for_completion_timeout(&request->wait_event, 10*HZ);
+ t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ);
if (t == 0) {
dev_err(dev, "Failed to create sub-channel: timed out\n");
return;
@@ -833,7 +833,7 @@ static int storvsc_execute_vstor_op(struct hv_device *device,
if (ret != 0)
return ret;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ);
if (t == 0)
return -ETIMEDOUT;
@@ -1350,6 +1350,8 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size,
return ret;
ret = storvsc_channel_init(device, is_fc);
+ if (ret)
+ vmbus_close(device->channel);
return ret;
}
@@ -1668,7 +1670,7 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
if (ret != 0)
return FAILED;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ);
if (t == 0)
return TIMEOUT_ERROR;
@@ -1819,6 +1821,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
return SCSI_MLQUEUE_DEVICE_BUSY;
}
+ payload->rangecount = 1;
payload->range.len = length;
payload->range.offset = offset_in_hvpg;
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 1a6eb72ca281..57637a81776d 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -545,7 +545,7 @@ static irqreturn_t sym53c8xx_intr(int irq, void *dev_id)
*/
static void sym53c8xx_timer(struct timer_list *t)
{
- struct sym_hcb *np = from_timer(np, t, s.timer);
+ struct sym_hcb *np = timer_container_of(np, t, s.timer);
unsigned long flags;
spin_lock_irqsave(np->s.host->host_lock, flags);
diff --git a/drivers/sh/intc/irqdomain.c b/drivers/sh/intc/irqdomain.c
index 3968f1c3c5c3..ed7a570ffdf2 100644
--- a/drivers/sh/intc/irqdomain.c
+++ b/drivers/sh/intc/irqdomain.c
@@ -59,10 +59,9 @@ void __init intc_irq_domain_init(struct intc_desc_int *d,
* tree penalty for linear cases with non-zero hwirq bases.
*/
if (irq_base == 0 && irq_end == (irq_base + hw->nr_vectors - 1))
- d->domain = irq_domain_add_linear(NULL, hw->nr_vectors,
- &intc_evt_ops, NULL);
+ d->domain = irq_domain_create_linear(NULL, hw->nr_vectors, &intc_evt_ops, NULL);
else
- d->domain = irq_domain_add_tree(NULL, &intc_evt_ops, NULL);
+ d->domain = irq_domain_create_tree(NULL, &intc_evt_ops, NULL);
BUG_ON(!d->domain);
}
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 6a8daeb8c4b9..a2d65adffb80 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -23,11 +23,13 @@ source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
source "drivers/soc/rockchip/Kconfig"
source "drivers/soc/samsung/Kconfig"
+source "drivers/soc/sophgo/Kconfig"
source "drivers/soc/sunxi/Kconfig"
source "drivers/soc/tegra/Kconfig"
source "drivers/soc/ti/Kconfig"
source "drivers/soc/ux500/Kconfig"
source "drivers/soc/versatile/Kconfig"
+source "drivers/soc/vt8500/Kconfig"
source "drivers/soc/xilinx/Kconfig"
endmenu
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 2037a8695cb2..c9e689080ceb 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -29,9 +29,11 @@ obj-y += qcom/
obj-y += renesas/
obj-y += rockchip/
obj-$(CONFIG_SOC_SAMSUNG) += samsung/
+obj-y += sophgo/
obj-y += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-y += versatile/
+obj-y += vt8500/
obj-y += xilinx/
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
index a6453ffeb753..d862e30a244e 100644
--- a/drivers/soc/amlogic/meson-clk-measure.c
+++ b/drivers/soc/amlogic/meson-clk-measure.c
@@ -14,11 +14,6 @@
static DEFINE_MUTEX(measure_lock);
-#define MSR_CLK_DUTY 0x0
-#define MSR_CLK_REG0 0x4
-#define MSR_CLK_REG1 0x8
-#define MSR_CLK_REG2 0xc
-
#define MSR_DURATION GENMASK(15, 0)
#define MSR_ENABLE BIT(16)
#define MSR_CONT BIT(17) /* continuous measurement */
@@ -33,23 +28,34 @@ static DEFINE_MUTEX(measure_lock);
#define DIV_STEP 32
#define DIV_MAX 640
-#define CLK_MSR_MAX 128
-
struct meson_msr_id {
struct meson_msr *priv;
unsigned int id;
const char *name;
};
+struct msr_reg_offset {
+ unsigned int duty_val;
+ unsigned int freq_ctrl;
+ unsigned int duty_ctrl;
+ unsigned int freq_val;
+};
+
+struct meson_msr_data {
+ struct meson_msr_id *msr_table;
+ unsigned int msr_count;
+ const struct msr_reg_offset *reg;
+};
+
struct meson_msr {
struct regmap *regmap;
- struct meson_msr_id msr_table[CLK_MSR_MAX];
+ struct meson_msr_data data;
};
#define CLK_MSR_ID(__id, __name) \
[__id] = {.id = __id, .name = __name,}
-static struct meson_msr_id clk_msr_m8[CLK_MSR_MAX] = {
+static const struct meson_msr_id clk_msr_m8[] = {
CLK_MSR_ID(0, "ring_osc_out_ee0"),
CLK_MSR_ID(1, "ring_osc_out_ee1"),
CLK_MSR_ID(2, "ring_osc_out_ee2"),
@@ -98,7 +104,7 @@ static struct meson_msr_id clk_msr_m8[CLK_MSR_MAX] = {
CLK_MSR_ID(63, "mipi_csi_cfg"),
};
-static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = {
+static const struct meson_msr_id clk_msr_gx[] = {
CLK_MSR_ID(0, "ring_osc_out_ee_0"),
CLK_MSR_ID(1, "ring_osc_out_ee_1"),
CLK_MSR_ID(2, "ring_osc_out_ee_2"),
@@ -168,7 +174,7 @@ static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = {
CLK_MSR_ID(82, "ge2d"),
};
-static struct meson_msr_id clk_msr_axg[CLK_MSR_MAX] = {
+static const struct meson_msr_id clk_msr_axg[] = {
CLK_MSR_ID(0, "ring_osc_out_ee_0"),
CLK_MSR_ID(1, "ring_osc_out_ee_1"),
CLK_MSR_ID(2, "ring_osc_out_ee_2"),
@@ -242,7 +248,7 @@ static struct meson_msr_id clk_msr_axg[CLK_MSR_MAX] = {
CLK_MSR_ID(109, "audio_locker_in"),
};
-static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = {
+static const struct meson_msr_id clk_msr_g12a[] = {
CLK_MSR_ID(0, "ring_osc_out_ee_0"),
CLK_MSR_ID(1, "ring_osc_out_ee_1"),
CLK_MSR_ID(2, "ring_osc_out_ee_2"),
@@ -358,7 +364,7 @@ static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = {
CLK_MSR_ID(122, "audio_pdm_dclk"),
};
-static struct meson_msr_id clk_msr_sm1[CLK_MSR_MAX] = {
+static const struct meson_msr_id clk_msr_sm1[] = {
CLK_MSR_ID(0, "ring_osc_out_ee_0"),
CLK_MSR_ID(1, "ring_osc_out_ee_1"),
CLK_MSR_ID(2, "ring_osc_out_ee_2"),
@@ -488,10 +494,304 @@ static struct meson_msr_id clk_msr_sm1[CLK_MSR_MAX] = {
CLK_MSR_ID(127, "csi2_data"),
};
+static const struct meson_msr_id clk_msr_c3[] = {
+ CLK_MSR_ID(0, "sys_clk"),
+ CLK_MSR_ID(1, "axi_clk"),
+ CLK_MSR_ID(2, "rtc_clk"),
+ CLK_MSR_ID(3, "p20_usb2_ckout"),
+ CLK_MSR_ID(4, "eth_mpll_test"),
+ CLK_MSR_ID(5, "sys_pll"),
+ CLK_MSR_ID(6, "cpu_clk_div16"),
+ CLK_MSR_ID(7, "ts_pll"),
+ CLK_MSR_ID(8, "fclk_div2"),
+ CLK_MSR_ID(9, "fclk_div2p5"),
+ CLK_MSR_ID(10, "fclk_div3"),
+ CLK_MSR_ID(11, "fclk_div4"),
+ CLK_MSR_ID(12, "fclk_div5"),
+ CLK_MSR_ID(13, "fclk_div7"),
+ CLK_MSR_ID(15, "fclk_50m"),
+ CLK_MSR_ID(16, "sys_oscin32k_i"),
+ CLK_MSR_ID(17, "mclk_pll"),
+ CLK_MSR_ID(19, "hifi_pll"),
+ CLK_MSR_ID(20, "gp0_pll"),
+ CLK_MSR_ID(21, "gp1_pll"),
+ CLK_MSR_ID(22, "eth_mppll_50m_ckout"),
+ CLK_MSR_ID(23, "sys_pll_div16"),
+ CLK_MSR_ID(24, "ddr_dpll_pt_clk"),
+ CLK_MSR_ID(26, "nna_core"),
+ CLK_MSR_ID(27, "rtc_sec_pulse_out"),
+ CLK_MSR_ID(28, "rtc_osc_clk_out"),
+ CLK_MSR_ID(29, "debug_in_clk"),
+ CLK_MSR_ID(30, "mod_eth_phy_ref_clk"),
+ CLK_MSR_ID(31, "mod_eth_tx_clk"),
+ CLK_MSR_ID(32, "eth_125m"),
+ CLK_MSR_ID(33, "eth_rmii"),
+ CLK_MSR_ID(34, "co_clkin_to_mac"),
+ CLK_MSR_ID(36, "co_rx_clk"),
+ CLK_MSR_ID(37, "co_tx_clk"),
+ CLK_MSR_ID(38, "eth_phy_rxclk"),
+ CLK_MSR_ID(39, "eth_phy_plltxclk"),
+ CLK_MSR_ID(40, "ephy_test_clk"),
+ CLK_MSR_ID(66, "vapb"),
+ CLK_MSR_ID(67, "ge2d"),
+ CLK_MSR_ID(68, "dewarpa"),
+ CLK_MSR_ID(70, "mipi_dsi_meas"),
+ CLK_MSR_ID(71, "dsi_phy"),
+ CLK_MSR_ID(79, "rama"),
+ CLK_MSR_ID(94, "vc9000e_core"),
+ CLK_MSR_ID(95, "vc9000e_sys"),
+ CLK_MSR_ID(96, "vc9000e_aclk"),
+ CLK_MSR_ID(97, "hcodec"),
+ CLK_MSR_ID(106, "deskew_pll_clk_div32_out"),
+ CLK_MSR_ID(107, "mipi_csi_phy_clk_out[0]"),
+ CLK_MSR_ID(108, "mipi_csi_phy_clk_out[1]"),
+ CLK_MSR_ID(110, "spifc"),
+ CLK_MSR_ID(111, "saradc"),
+ CLK_MSR_ID(112, "ts"),
+ CLK_MSR_ID(113, "sd_emmc_c"),
+ CLK_MSR_ID(114, "sd_emmc_b"),
+ CLK_MSR_ID(115, "sd_emmc_a"),
+ CLK_MSR_ID(116, "gpio_msr_clk"),
+ CLK_MSR_ID(117, "spicc_b"),
+ CLK_MSR_ID(118, "spicc_a"),
+ CLK_MSR_ID(122, "mod_audio_pdm_dclk_o"),
+ CLK_MSR_ID(124, "o_earcrx_dmac_clk"),
+ CLK_MSR_ID(125, "o_earcrx_cmdc_clk"),
+ CLK_MSR_ID(126, "o_earctx_dmac_clk"),
+ CLK_MSR_ID(127, "o_earctx_cmdc_clk"),
+ CLK_MSR_ID(128, "o_tohdmitx_bclk"),
+ CLK_MSR_ID(129, "o_tohdmitx_mclk"),
+ CLK_MSR_ID(130, "o_tohdmitx_spdif_clk"),
+ CLK_MSR_ID(131, "o_toacodec_bclk"),
+ CLK_MSR_ID(132, "o_toacodec_mclk"),
+ CLK_MSR_ID(133, "o_spdifout_b_mst_clk"),
+ CLK_MSR_ID(134, "o_spdifout_mst_clk"),
+ CLK_MSR_ID(135, "o_spdifin_mst_clk"),
+ CLK_MSR_ID(136, "o_audio_mclk"),
+ CLK_MSR_ID(137, "o_vad_clk"),
+ CLK_MSR_ID(138, "o_tdmout_d_sclk"),
+ CLK_MSR_ID(139, "o_tdmout_c_sclk"),
+ CLK_MSR_ID(140, "o_tdmout_b_sclk"),
+ CLK_MSR_ID(141, "o_tdmout_a_sclk"),
+ CLK_MSR_ID(142, "o_tdminb_1b_sclk"),
+ CLK_MSR_ID(143, "o_tdmin_1b_sclk"),
+ CLK_MSR_ID(144, "o_tdmin_d_sclk"),
+ CLK_MSR_ID(145, "o_tdmin_c_sclk"),
+ CLK_MSR_ID(146, "o_tdmin_b_sclk"),
+ CLK_MSR_ID(147, "o_tdmin_a_sclk"),
+ CLK_MSR_ID(148, "o_resampleb_clk"),
+ CLK_MSR_ID(149, "o_resamplea_clk"),
+ CLK_MSR_ID(150, "o_pdmb_sysclk"),
+ CLK_MSR_ID(151, "o_pdmb_dclk"),
+ CLK_MSR_ID(152, "o_pdm_sysclk"),
+ CLK_MSR_ID(153, "o_pdm_dclk"),
+ CLK_MSR_ID(154, "c_alockerb_out_clk"),
+ CLK_MSR_ID(155, "c_alockerb_in_clk"),
+ CLK_MSR_ID(156, "c_alocker_out_clk"),
+ CLK_MSR_ID(157, "c_alocker_in_clk"),
+ CLK_MSR_ID(158, "audio_mst_clk[34]"),
+ CLK_MSR_ID(159, "audio_mst_clk[35]"),
+ CLK_MSR_ID(160, "pwm_n"),
+ CLK_MSR_ID(161, "pwm_m"),
+ CLK_MSR_ID(162, "pwm_l"),
+ CLK_MSR_ID(163, "pwm_k"),
+ CLK_MSR_ID(164, "pwm_j"),
+ CLK_MSR_ID(165, "pwm_i"),
+ CLK_MSR_ID(166, "pwm_h"),
+ CLK_MSR_ID(167, "pwm_g"),
+ CLK_MSR_ID(168, "pwm_f"),
+ CLK_MSR_ID(169, "pwm_e"),
+ CLK_MSR_ID(170, "pwm_d"),
+ CLK_MSR_ID(171, "pwm_c"),
+ CLK_MSR_ID(172, "pwm_b"),
+ CLK_MSR_ID(173, "pwm_a"),
+ CLK_MSR_ID(174, "AU_DAC1_CLK_TO_GPIO"),
+ CLK_MSR_ID(175, "AU_ADC_CLK_TO_GPIO"),
+ CLK_MSR_ID(176, "rng_ring_osc_clk[0]"),
+ CLK_MSR_ID(177, "rng_ring_osc_clk[1]"),
+ CLK_MSR_ID(178, "rng_ring_osc_clk[2]"),
+ CLK_MSR_ID(179, "rng_ring_osc_clk[3]"),
+ CLK_MSR_ID(180, "sys_cpu_ring_osc_clk[0]"),
+ CLK_MSR_ID(181, "sys_cpu_ring_osc_clk[1]"),
+ CLK_MSR_ID(182, "sys_cpu_ring_osc_clk[2]"),
+ CLK_MSR_ID(183, "sys_cpu_ring_osc_clk[3]"),
+ CLK_MSR_ID(184, "sys_cpu_ring_osc_clk[4]"),
+ CLK_MSR_ID(185, "sys_cpu_ring_osc_clk[5]"),
+ CLK_MSR_ID(186, "sys_cpu_ring_osc_clk[6]"),
+ CLK_MSR_ID(187, "sys_cpu_ring_osc_clk[7]"),
+ CLK_MSR_ID(188, "sys_cpu_ring_osc_clk[8]"),
+ CLK_MSR_ID(189, "sys_cpu_ring_osc_clk[9]"),
+ CLK_MSR_ID(190, "sys_cpu_ring_osc_clk[10]"),
+ CLK_MSR_ID(191, "sys_cpu_ring_osc_clk[11]"),
+ CLK_MSR_ID(192, "am_ring_osc_clk_out[12](dmc)"),
+ CLK_MSR_ID(193, "am_ring_osc_clk_out[13](rama)"),
+ CLK_MSR_ID(194, "am_ring_osc_clk_out[14](nna)"),
+ CLK_MSR_ID(195, "am_ring_osc_clk_out[15](nna)"),
+ CLK_MSR_ID(200, "rng_ring_osc_clk_1[0]"),
+ CLK_MSR_ID(201, "rng_ring_osc_clk_1[1]"),
+ CLK_MSR_ID(202, "rng_ring_osc_clk_1[2]"),
+ CLK_MSR_ID(203, "rng_ring_osc_clk_1[3]"),
+
+};
+
+static const struct meson_msr_id clk_msr_s4[] = {
+ CLK_MSR_ID(0, "sys_clk"),
+ CLK_MSR_ID(1, "axi_clk"),
+ CLK_MSR_ID(2, "rtc_clk"),
+ CLK_MSR_ID(5, "mali"),
+ CLK_MSR_ID(6, "cpu_clk_div16"),
+ CLK_MSR_ID(7, "ceca_clk"),
+ CLK_MSR_ID(8, "cecb_clk"),
+ CLK_MSR_ID(10, "fclk_div5"),
+ CLK_MSR_ID(11, "mpll0"),
+ CLK_MSR_ID(12, "mpll1"),
+ CLK_MSR_ID(13, "mpll2"),
+ CLK_MSR_ID(14, "mpll3"),
+ CLK_MSR_ID(15, "fclk_50m"),
+ CLK_MSR_ID(16, "pcie_clk_inp"),
+ CLK_MSR_ID(17, "pcie_clk_inn"),
+ CLK_MSR_ID(18, "mpll_clk_test_out"),
+ CLK_MSR_ID(19, "hifi_pll"),
+ CLK_MSR_ID(20, "gp0_pll"),
+ CLK_MSR_ID(21, "gp1_pll"),
+ CLK_MSR_ID(22, "eth_mppll_50m_ckout"),
+ CLK_MSR_ID(23, "sys_pll_div16"),
+ CLK_MSR_ID(24, "ddr_dpll_pt_clk"),
+ CLK_MSR_ID(30, "mod_eth_phy_ref_clk"),
+ CLK_MSR_ID(31, "mod_eth_tx_clk"),
+ CLK_MSR_ID(32, "eth_125m"),
+ CLK_MSR_ID(33, "eth_rmii"),
+ CLK_MSR_ID(34, "co_clkin_to_mac"),
+ CLK_MSR_ID(35, "mod_eth_rx_clk_rmii"),
+ CLK_MSR_ID(36, "co_rx_clk"),
+ CLK_MSR_ID(37, "co_tx_clk"),
+ CLK_MSR_ID(38, "eth_phy_rxclk"),
+ CLK_MSR_ID(39, "eth_phy_plltxclk"),
+ CLK_MSR_ID(40, "ephy_test_clk"),
+ CLK_MSR_ID(50, "vid_pll_div_clk_out"),
+ CLK_MSR_ID(51, "enci"),
+ CLK_MSR_ID(52, "encp"),
+ CLK_MSR_ID(53, "encl"),
+ CLK_MSR_ID(54, "vdac"),
+ CLK_MSR_ID(55, "cdac_clk_c"),
+ CLK_MSR_ID(56, "mod_tcon_clko"),
+ CLK_MSR_ID(57, "lcd_an_clk_ph2"),
+ CLK_MSR_ID(58, "lcd_an_clk_ph3"),
+ CLK_MSR_ID(59, "hdmitx_pixel"),
+ CLK_MSR_ID(60, "vdin_meas"),
+ CLK_MSR_ID(61, "vpu"),
+ CLK_MSR_ID(62, "vpu_clkb"),
+ CLK_MSR_ID(63, "vpu_clkb_tmp"),
+ CLK_MSR_ID(64, "vpu_clkc"),
+ CLK_MSR_ID(65, "vid_lock"),
+ CLK_MSR_ID(66, "vapb"),
+ CLK_MSR_ID(67, "ge2d"),
+ CLK_MSR_ID(68, "cts_hdcp22_esmclk"),
+ CLK_MSR_ID(69, "cts_hdcp22_skpclk"),
+ CLK_MSR_ID(76, "hdmitx_tmds"),
+ CLK_MSR_ID(77, "hdmitx_sys_clk"),
+ CLK_MSR_ID(78, "hdmitx_fe_clk"),
+ CLK_MSR_ID(79, "rama"),
+ CLK_MSR_ID(93, "vdec"),
+ CLK_MSR_ID(99, "hevcf"),
+ CLK_MSR_ID(100, "demod_core"),
+ CLK_MSR_ID(101, "adc_extclk_in"),
+ CLK_MSR_ID(102, "cts_demod_core_t2_clk"),
+ CLK_MSR_ID(103, "adc_dpll_intclk"),
+ CLK_MSR_ID(104, "adc_dpll_clk_b3"),
+ CLK_MSR_ID(105, "s2_adc_clk"),
+ CLK_MSR_ID(106, "deskew_pll_clk_div32_out"),
+ CLK_MSR_ID(110, "sc"),
+ CLK_MSR_ID(111, "sar_adc"),
+ CLK_MSR_ID(113, "sd_emmc_c"),
+ CLK_MSR_ID(114, "sd_emmc_b"),
+ CLK_MSR_ID(115, "sd_emmc_a"),
+ CLK_MSR_ID(116, "gpio_msr_clk"),
+ CLK_MSR_ID(118, "spicc0"),
+ CLK_MSR_ID(121, "ts"),
+ CLK_MSR_ID(130, "audio_vad_clk"),
+ CLK_MSR_ID(131, "acodec_dac_clk_x128"),
+ CLK_MSR_ID(132, "audio_locker_in_clk"),
+ CLK_MSR_ID(133, "audio_locker_out_clk"),
+ CLK_MSR_ID(134, "audio_tdmout_c_sclk"),
+ CLK_MSR_ID(135, "audio_tdmout_b_sclk"),
+ CLK_MSR_ID(136, "audio_tdmout_a_sclk"),
+ CLK_MSR_ID(137, "audio_tdmin_lb_sclk"),
+ CLK_MSR_ID(138, "audio_tdmin_c_sclk"),
+ CLK_MSR_ID(139, "audio_tdmin_b_sclk"),
+ CLK_MSR_ID(140, "audio_tdmin_a_sclk"),
+ CLK_MSR_ID(141, "audio_resamplea_clk"),
+ CLK_MSR_ID(142, "audio_pdm_sysclk"),
+ CLK_MSR_ID(143, "audio_spdifout_b_mst_clk"),
+ CLK_MSR_ID(144, "audio_spdifout_mst_clk"),
+ CLK_MSR_ID(145, "audio_spdifin_mst_clk"),
+ CLK_MSR_ID(146, "audio_pdm_dclk"),
+ CLK_MSR_ID(147, "audio_resampleb_clk"),
+ CLK_MSR_ID(160, "pwm_j"),
+ CLK_MSR_ID(161, "pwm_i"),
+ CLK_MSR_ID(162, "pwm_h"),
+ CLK_MSR_ID(163, "pwm_g"),
+ CLK_MSR_ID(164, "pwm_f"),
+ CLK_MSR_ID(165, "pwm_e"),
+ CLK_MSR_ID(166, "pwm_d"),
+ CLK_MSR_ID(167, "pwm_c"),
+ CLK_MSR_ID(168, "pwm_b"),
+ CLK_MSR_ID(169, "pwm_a"),
+ CLK_MSR_ID(176, "rng_ring_0"),
+ CLK_MSR_ID(177, "rng_ring_1"),
+ CLK_MSR_ID(178, "rng_ring_2"),
+ CLK_MSR_ID(179, "rng_ring_3"),
+ CLK_MSR_ID(180, "dmc_osc_ring(LVT16)"),
+ CLK_MSR_ID(181, "gpu_osc_ring0(LVT16)"),
+ CLK_MSR_ID(182, "gpu_osc_ring1(ULVT16)"),
+ CLK_MSR_ID(183, "gpu_osc_ring2(SLVT16)"),
+ CLK_MSR_ID(184, "vpu_osc_ring0(SVT24)"),
+ CLK_MSR_ID(185, "vpu_osc_ring1(LVT20)"),
+ CLK_MSR_ID(186, "vpu_osc_ring2(LVT16)"),
+ CLK_MSR_ID(187, "dos_osc_ring0(SVT24)"),
+ CLK_MSR_ID(188, "dos_osc_ring1(SVT16)"),
+ CLK_MSR_ID(189, "dos_osc_ring2(LVT16)"),
+ CLK_MSR_ID(190, "dos_osc_ring3(ULVT20)"),
+ CLK_MSR_ID(192, "axi_sram_osc_ring(SVT16)"),
+ CLK_MSR_ID(193, "demod_osc_ring0"),
+ CLK_MSR_ID(194, "demod_osc_ring1"),
+ CLK_MSR_ID(195, "sar_osc_ring"),
+ CLK_MSR_ID(196, "sys_cpu_osc_ring0"),
+ CLK_MSR_ID(197, "sys_cpu_osc_ring1"),
+ CLK_MSR_ID(198, "sys_cpu_osc_ring2"),
+ CLK_MSR_ID(199, "sys_cpu_osc_ring3"),
+ CLK_MSR_ID(200, "sys_cpu_osc_ring4"),
+ CLK_MSR_ID(201, "sys_cpu_osc_ring5"),
+ CLK_MSR_ID(202, "sys_cpu_osc_ring6"),
+ CLK_MSR_ID(203, "sys_cpu_osc_ring7"),
+ CLK_MSR_ID(204, "sys_cpu_osc_ring8"),
+ CLK_MSR_ID(205, "sys_cpu_osc_ring9"),
+ CLK_MSR_ID(206, "sys_cpu_osc_ring10"),
+ CLK_MSR_ID(207, "sys_cpu_osc_ring11"),
+ CLK_MSR_ID(208, "sys_cpu_osc_ring12"),
+ CLK_MSR_ID(209, "sys_cpu_osc_ring13"),
+ CLK_MSR_ID(210, "sys_cpu_osc_ring14"),
+ CLK_MSR_ID(211, "sys_cpu_osc_ring15"),
+ CLK_MSR_ID(212, "sys_cpu_osc_ring16"),
+ CLK_MSR_ID(213, "sys_cpu_osc_ring17"),
+ CLK_MSR_ID(214, "sys_cpu_osc_ring18"),
+ CLK_MSR_ID(215, "sys_cpu_osc_ring19"),
+ CLK_MSR_ID(216, "sys_cpu_osc_ring20"),
+ CLK_MSR_ID(217, "sys_cpu_osc_ring21"),
+ CLK_MSR_ID(218, "sys_cpu_osc_ring22"),
+ CLK_MSR_ID(219, "sys_cpu_osc_ring23"),
+ CLK_MSR_ID(220, "sys_cpu_osc_ring24"),
+ CLK_MSR_ID(221, "sys_cpu_osc_ring25"),
+ CLK_MSR_ID(222, "sys_cpu_osc_ring26"),
+ CLK_MSR_ID(223, "sys_cpu_osc_ring27"),
+
+};
+
static int meson_measure_id(struct meson_msr_id *clk_msr_id,
- unsigned int duration)
+ unsigned int duration)
{
struct meson_msr *priv = clk_msr_id->priv;
+ const struct msr_reg_offset *reg = priv->data.reg;
unsigned int val;
int ret;
@@ -499,22 +799,22 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id,
if (ret)
return ret;
- regmap_write(priv->regmap, MSR_CLK_REG0, 0);
+ regmap_write(priv->regmap, reg->freq_ctrl, 0);
/* Set measurement duration */
- regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_DURATION,
+ regmap_update_bits(priv->regmap, reg->freq_ctrl, MSR_DURATION,
FIELD_PREP(MSR_DURATION, duration - 1));
/* Set ID */
- regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_CLK_SRC,
+ regmap_update_bits(priv->regmap, reg->freq_ctrl, MSR_CLK_SRC,
FIELD_PREP(MSR_CLK_SRC, clk_msr_id->id));
/* Enable & Start */
- regmap_update_bits(priv->regmap, MSR_CLK_REG0,
+ regmap_update_bits(priv->regmap, reg->freq_ctrl,
MSR_RUN | MSR_ENABLE,
MSR_RUN | MSR_ENABLE);
- ret = regmap_read_poll_timeout(priv->regmap, MSR_CLK_REG0,
+ ret = regmap_read_poll_timeout(priv->regmap, reg->freq_ctrl,
val, !(val & MSR_BUSY), 10, 10000);
if (ret) {
mutex_unlock(&measure_lock);
@@ -522,10 +822,10 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id,
}
/* Disable */
- regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_ENABLE, 0);
+ regmap_update_bits(priv->regmap, reg->freq_ctrl, MSR_ENABLE, 0);
/* Get the value in multiple of gate time counts */
- regmap_read(priv->regmap, MSR_CLK_REG2, &val);
+ regmap_read(priv->regmap, reg->freq_val, &val);
mutex_unlock(&measure_lock);
@@ -573,13 +873,14 @@ DEFINE_SHOW_ATTRIBUTE(clk_msr);
static int clk_msr_summary_show(struct seq_file *s, void *data)
{
struct meson_msr_id *msr_table = s->private;
+ unsigned int msr_count = msr_table->priv->data.msr_count;
unsigned int precision = 0;
int val, i;
seq_puts(s, " clock rate precision\n");
seq_puts(s, "---------------------------------------------\n");
- for (i = 0 ; i < CLK_MSR_MAX ; ++i) {
+ for (i = 0 ; i < msr_count ; ++i) {
if (!msr_table[i].name)
continue;
@@ -595,18 +896,18 @@ static int clk_msr_summary_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(clk_msr_summary);
-static const struct regmap_config meson_clk_msr_regmap_config = {
+static struct regmap_config meson_clk_msr_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .max_register = MSR_CLK_REG2,
};
static int meson_msr_probe(struct platform_device *pdev)
{
- const struct meson_msr_id *match_data;
+ const struct meson_msr_data *match_data;
struct meson_msr *priv;
struct dentry *root, *clks;
+ struct resource *res;
void __iomem *base;
int i;
@@ -621,60 +922,142 @@ static int meson_msr_probe(struct platform_device *pdev)
return -ENODEV;
}
- memcpy(priv->msr_table, match_data, sizeof(priv->msr_table));
+ priv->data.msr_table = devm_kcalloc(&pdev->dev,
+ match_data->msr_count,
+ sizeof(struct meson_msr_id),
+ GFP_KERNEL);
+ if (!priv->data.msr_table)
+ return -ENOMEM;
- base = devm_platform_ioremap_resource(pdev, 0);
+ memcpy(priv->data.msr_table, match_data->msr_table,
+ match_data->msr_count * sizeof(struct meson_msr_id));
+ priv->data.msr_count = match_data->msr_count;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
+ meson_clk_msr_regmap_config.max_register = resource_size(res) - 4;
priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&meson_clk_msr_regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
+ priv->data.reg = devm_kzalloc(&pdev->dev, sizeof(struct msr_reg_offset),
+ GFP_KERNEL);
+ if (!priv->data.reg)
+ return -ENOMEM;
+
+ memcpy((void *)priv->data.reg, match_data->reg,
+ sizeof(struct msr_reg_offset));
+
root = debugfs_create_dir("meson-clk-msr", NULL);
clks = debugfs_create_dir("clks", root);
debugfs_create_file("measure_summary", 0444, root,
- priv->msr_table, &clk_msr_summary_fops);
+ priv->data.msr_table, &clk_msr_summary_fops);
- for (i = 0 ; i < CLK_MSR_MAX ; ++i) {
- if (!priv->msr_table[i].name)
+ for (i = 0 ; i < priv->data.msr_count ; ++i) {
+ if (!priv->data.msr_table[i].name)
continue;
- priv->msr_table[i].priv = priv;
+ priv->data.msr_table[i].priv = priv;
- debugfs_create_file(priv->msr_table[i].name, 0444, clks,
- &priv->msr_table[i], &clk_msr_fops);
+ debugfs_create_file(priv->data.msr_table[i].name, 0444, clks,
+ &priv->data.msr_table[i], &clk_msr_fops);
}
return 0;
}
+static const struct msr_reg_offset msr_reg_offset = {
+ .duty_val = 0x0,
+ .freq_ctrl = 0x4,
+ .duty_ctrl = 0x8,
+ .freq_val = 0xc,
+};
+
+static const struct meson_msr_data clk_msr_gx_data = {
+ .msr_table = (void *)clk_msr_gx,
+ .msr_count = ARRAY_SIZE(clk_msr_gx),
+ .reg = &msr_reg_offset,
+};
+
+static const struct meson_msr_data clk_msr_m8_data = {
+ .msr_table = (void *)clk_msr_m8,
+ .msr_count = ARRAY_SIZE(clk_msr_m8),
+ .reg = &msr_reg_offset,
+};
+
+static const struct meson_msr_data clk_msr_axg_data = {
+ .msr_table = (void *)clk_msr_axg,
+ .msr_count = ARRAY_SIZE(clk_msr_axg),
+ .reg = &msr_reg_offset,
+};
+
+static const struct meson_msr_data clk_msr_g12a_data = {
+ .msr_table = (void *)clk_msr_g12a,
+ .msr_count = ARRAY_SIZE(clk_msr_g12a),
+ .reg = &msr_reg_offset,
+};
+
+static const struct meson_msr_data clk_msr_sm1_data = {
+ .msr_table = (void *)clk_msr_sm1,
+ .msr_count = ARRAY_SIZE(clk_msr_sm1),
+ .reg = &msr_reg_offset,
+};
+
+static const struct msr_reg_offset msr_reg_offset_v2 = {
+ .freq_ctrl = 0x0,
+ .duty_ctrl = 0x4,
+ .freq_val = 0x8,
+ .duty_val = 0x18,
+};
+
+static const struct meson_msr_data clk_msr_c3_data = {
+ .msr_table = (void *)clk_msr_c3,
+ .msr_count = ARRAY_SIZE(clk_msr_c3),
+ .reg = &msr_reg_offset_v2,
+};
+
+static const struct meson_msr_data clk_msr_s4_data = {
+ .msr_table = (void *)clk_msr_s4,
+ .msr_count = ARRAY_SIZE(clk_msr_s4),
+ .reg = &msr_reg_offset_v2,
+};
+
static const struct of_device_id meson_msr_match_table[] = {
{
.compatible = "amlogic,meson-gx-clk-measure",
- .data = (void *)clk_msr_gx,
+ .data = &clk_msr_gx_data,
},
{
.compatible = "amlogic,meson8-clk-measure",
- .data = (void *)clk_msr_m8,
+ .data = &clk_msr_m8_data,
},
{
.compatible = "amlogic,meson8b-clk-measure",
- .data = (void *)clk_msr_m8,
+ .data = &clk_msr_m8_data,
},
{
.compatible = "amlogic,meson-axg-clk-measure",
- .data = (void *)clk_msr_axg,
+ .data = &clk_msr_axg_data,
},
{
.compatible = "amlogic,meson-g12a-clk-measure",
- .data = (void *)clk_msr_g12a,
+ .data = &clk_msr_g12a_data,
},
{
.compatible = "amlogic,meson-sm1-clk-measure",
- .data = (void *)clk_msr_sm1,
+ .data = &clk_msr_sm1_data,
+ },
+ {
+ .compatible = "amlogic,c3-clk-measure",
+ .data = &clk_msr_c3_data,
+ },
+ {
+ .compatible = "amlogic,s4-clk-measure",
+ .data = &clk_msr_s4_data,
},
{ /* sentinel */ }
};
diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
index 9ab5ba9cf1d6..ef8f355589a5 100644
--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
+++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
@@ -166,7 +166,7 @@ static int aspeed_lpc_snoop_config_irq(struct aspeed_lpc_snoop *lpc_snoop,
int rc;
lpc_snoop->irq = platform_get_irq(pdev, 0);
- if (!lpc_snoop->irq)
+ if (lpc_snoop->irq < 0)
return -ENODEV;
rc = devm_request_irq(dev, lpc_snoop->irq,
@@ -200,11 +200,15 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
lpc_snoop->chan[channel].miscdev.minor = MISC_DYNAMIC_MINOR;
lpc_snoop->chan[channel].miscdev.name =
devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, channel);
+ if (!lpc_snoop->chan[channel].miscdev.name) {
+ rc = -ENOMEM;
+ goto err_free_fifo;
+ }
lpc_snoop->chan[channel].miscdev.fops = &snoop_fops;
lpc_snoop->chan[channel].miscdev.parent = dev;
rc = misc_register(&lpc_snoop->chan[channel].miscdev);
if (rc)
- return rc;
+ goto err_free_fifo;
/* Enable LPC snoop channel at requested port */
switch (channel) {
@@ -221,7 +225,8 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
hicrb_en = HICRB_ENSNP1D;
break;
default:
- return -EINVAL;
+ rc = -EINVAL;
+ goto err_misc_deregister;
}
regmap_update_bits(lpc_snoop->regmap, HICR5, hicr5_en, hicr5_en);
@@ -231,6 +236,12 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
regmap_update_bits(lpc_snoop->regmap, HICRB,
hicrb_en, hicrb_en);
+ return 0;
+
+err_misc_deregister:
+ misc_deregister(&lpc_snoop->chan[channel].miscdev);
+err_free_fifo:
+ kfifo_free(&lpc_snoop->chan[channel].fifo);
return rc;
}
diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c
index 6202dbcd20a8..7bbd3f940e4d 100644
--- a/drivers/soc/dove/pmu.c
+++ b/drivers/soc/dove/pmu.c
@@ -257,10 +257,9 @@ static void pmu_irq_handler(struct irq_desc *desc)
* So, let's structure the code so that the window is as small as
* possible.
*/
- irq_gc_lock(gc);
+ guard(raw_spinlock)(&gc->lock);
done &= readl_relaxed(base + PMC_IRQ_CAUSE);
writel_relaxed(done, base + PMC_IRQ_CAUSE);
- irq_gc_unlock(gc);
}
static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)
@@ -274,8 +273,8 @@ static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)
writel(0, pmu->pmc_base + PMC_IRQ_MASK);
writel(0, pmu->pmc_base + PMC_IRQ_CAUSE);
- domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS,
- &irq_generic_chip_ops, NULL);
+ domain = irq_domain_create_linear(of_fwnode_handle(pmu->of_node), NR_PMU_IRQS,
+ &irq_generic_chip_ops, NULL);
if (!domain) {
pr_err("%s: unable to add irq domain\n", name);
return -ENOMEM;
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index a1e0bc8c1757..47870e29c290 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -36,7 +36,7 @@ config FSL_MC_DPIO
config DPAA2_CONSOLE
tristate "QorIQ DPAA2 console driver"
depends on OF && (ARCH_LAYERSCAPE || COMPILE_TEST)
- default y
+ default ARCH_LAYERSCAPE
help
Console driver for DPAA2 platforms. Exports 2 char devices,
/dev/dpaa2_mc_console and /dev/dpaa2_aiop_console,
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 4dc8aba33d9b..9be240999f87 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1270,7 +1270,7 @@ static int qman_create_portal(struct qman_portal *portal,
qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
- portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL);
+ portal->cgrs = kmalloc_array(2, sizeof(*portal->cgrs), GFP_KERNEL);
if (!portal->cgrs)
goto fail_cgrs;
/* initial snapshot is no-depletion */
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
index 77bf0e83ffcc..4068b501a3a3 100644
--- a/drivers/soc/fsl/qe/qe_ic.c
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -232,11 +232,6 @@ static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
iowrite32be(value, base + (reg >> 2));
}
-static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
-{
- return irq_get_chip_data(virq);
-}
-
static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
{
return irq_data_get_irq_chip_data(d);
@@ -446,8 +441,8 @@ static int qe_ic_init(struct platform_device *pdev)
high_handler = NULL;
}
- qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
- &qe_ic_host_ops, qe_ic);
+ qe_ic->irqhost = irq_domain_create_linear(of_fwnode_handle(node), NR_QE_IC_INTS,
+ &qe_ic_host_ops, qe_ic);
if (qe_ic->irqhost == NULL) {
dev_err(dev, "failed to add irq domain\n");
return -ENODEV;
@@ -455,13 +450,11 @@ static int qe_ic_init(struct platform_device *pdev)
qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
- irq_set_handler_data(qe_ic->virq_low, qe_ic);
- irq_set_chained_handler(qe_ic->virq_low, low_handler);
+ irq_set_chained_handler_and_data(qe_ic->virq_low, low_handler, qe_ic);
- if (high_handler) {
- irq_set_handler_data(qe_ic->virq_high, qe_ic);
- irq_set_chained_handler(qe_ic->virq_high, high_handler);
- }
+ if (high_handler)
+ irq_set_chained_handler_and_data(qe_ic->virq_high,
+ high_handler, qe_ic);
return 0;
}
static const struct of_device_id qe_ic_ids[] = {
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.c b/drivers/soc/hisilicon/kunpeng_hccs.c
index 444a8f59b7da..7fc353732d55 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.c
+++ b/drivers/soc/hisilicon/kunpeng_hccs.c
@@ -167,10 +167,6 @@ static void hccs_pcc_rx_callback(struct mbox_client *cl, void *mssg)
static void hccs_unregister_pcc_channel(struct hccs_dev *hdev)
{
- struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
-
- if (cl_info->pcc_comm_addr)
- iounmap(cl_info->pcc_comm_addr);
pcc_mbox_free_channel(hdev->cl_info.pcc_chan);
}
@@ -179,6 +175,7 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev)
struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
struct mbox_client *cl = &cl_info->client;
struct pcc_mbox_chan *pcc_chan;
+ struct mbox_chan *mbox_chan;
struct device *dev = hdev->dev;
int rc;
@@ -196,7 +193,7 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev)
goto out;
}
cl_info->pcc_chan = pcc_chan;
- cl_info->mbox_chan = pcc_chan->mchan;
+ mbox_chan = pcc_chan->mchan;
/*
* pcc_chan->latency is just a nominal value. In reality the remote
@@ -206,34 +203,24 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev)
cl_info->deadline_us =
HCCS_PCC_CMD_WAIT_RETRIES_NUM * pcc_chan->latency;
if (!hdev->verspec_data->has_txdone_irq &&
- cl_info->mbox_chan->mbox->txdone_irq) {
+ mbox_chan->mbox->txdone_irq) {
dev_err(dev, "PCC IRQ in PCCT is enabled.\n");
rc = -EINVAL;
goto err_mbx_channel_free;
} else if (hdev->verspec_data->has_txdone_irq &&
- !cl_info->mbox_chan->mbox->txdone_irq) {
+ !mbox_chan->mbox->txdone_irq) {
dev_err(dev, "PCC IRQ in PCCT isn't supported.\n");
rc = -EINVAL;
goto err_mbx_channel_free;
}
- if (!pcc_chan->shmem_base_addr ||
- pcc_chan->shmem_size != HCCS_PCC_SHARE_MEM_BYTES) {
- dev_err(dev, "The base address or size (%llu) of PCC communication region is invalid.\n",
- pcc_chan->shmem_size);
+ if (pcc_chan->shmem_size != HCCS_PCC_SHARE_MEM_BYTES) {
+ dev_err(dev, "Base size (%llu) of PCC communication region must be %d bytes.\n",
+ pcc_chan->shmem_size, HCCS_PCC_SHARE_MEM_BYTES);
rc = -EINVAL;
goto err_mbx_channel_free;
}
- cl_info->pcc_comm_addr = ioremap(pcc_chan->shmem_base_addr,
- pcc_chan->shmem_size);
- if (!cl_info->pcc_comm_addr) {
- dev_err(dev, "Failed to ioremap PCC communication region for channel-%u.\n",
- hdev->chan_id);
- rc = -ENOMEM;
- goto err_mbx_channel_free;
- }
-
return 0;
err_mbx_channel_free:
@@ -246,7 +233,7 @@ static int hccs_wait_cmd_complete_by_poll(struct hccs_dev *hdev)
{
struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
struct acpi_pcct_shared_memory __iomem *comm_base =
- cl_info->pcc_comm_addr;
+ cl_info->pcc_chan->shmem;
u16 status;
int ret;
@@ -289,7 +276,7 @@ static inline void hccs_fill_pcc_shared_mem_region(struct hccs_dev *hdev,
.status = 0,
};
- memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp,
+ memcpy_toio(hdev->cl_info.pcc_chan->shmem, (void *)&tmp,
sizeof(struct acpi_pcct_shared_memory));
/* Copy the message to the PCC comm space */
@@ -309,7 +296,7 @@ static inline void hccs_fill_ext_pcc_shared_mem_region(struct hccs_dev *hdev,
.command = cmd,
};
- memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp,
+ memcpy_toio(hdev->cl_info.pcc_chan->shmem, (void *)&tmp,
sizeof(struct acpi_pcct_ext_pcc_shared_memory));
/* Copy the message to the PCC comm space */
@@ -321,12 +308,13 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
{
const struct hccs_verspecific_data *verspec_data = hdev->verspec_data;
struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
+ struct mbox_chan *mbox_chan = cl_info->pcc_chan->mchan;
struct hccs_fw_inner_head *fw_inner_head;
void __iomem *comm_space;
u16 space_size;
int ret;
- comm_space = cl_info->pcc_comm_addr + verspec_data->shared_mem_size;
+ comm_space = cl_info->pcc_chan->shmem + verspec_data->shared_mem_size;
space_size = HCCS_PCC_SHARE_MEM_BYTES - verspec_data->shared_mem_size;
verspec_data->fill_pcc_shared_mem(hdev, cmd, desc,
comm_space, space_size);
@@ -334,7 +322,7 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
reinit_completion(&cl_info->done);
/* Ring doorbell */
- ret = mbox_send_message(cl_info->mbox_chan, &cmd);
+ ret = mbox_send_message(mbox_chan, &cmd);
if (ret < 0) {
dev_err(hdev->dev, "Send PCC mbox message failed, ret = %d.\n",
ret);
@@ -356,9 +344,9 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
end:
if (verspec_data->has_txdone_irq)
- mbox_chan_txdone(cl_info->mbox_chan, ret);
+ mbox_chan_txdone(mbox_chan, ret);
else
- mbox_client_txdone(cl_info->mbox_chan, ret);
+ mbox_client_txdone(mbox_chan, ret);
return ret;
}
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.h b/drivers/soc/hisilicon/kunpeng_hccs.h
index dc267136919b..f0a9a5618d97 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.h
+++ b/drivers/soc/hisilicon/kunpeng_hccs.h
@@ -60,10 +60,8 @@ struct hccs_chip_info {
struct hccs_mbox_client_info {
struct mbox_client client;
- struct mbox_chan *mbox_chan;
struct pcc_mbox_chan *pcc_chan;
u64 deadline_us;
- void __iomem *pcc_comm_addr;
struct completion done;
};
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index 3ed8161d7d28..04a1b60f2f2b 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -24,13 +24,21 @@
#define OCOTP_UID_HIGH 0x420
#define IMX8MP_OCOTP_UID_OFFSET 0x10
+#define IMX8MP_OCOTP_UID_HIGH 0xE00
/* Same as ANADIG_DIGPROG_IMX7D */
#define ANADIG_DIGPROG_IMX8MM 0x800
struct imx8_soc_data {
char *name;
- int (*soc_revision)(u32 *socrev, u64 *socuid);
+ const char *ocotp_compatible;
+ int (*soc_revision)(struct platform_device *pdev, u32 *socrev);
+ int (*soc_uid)(struct platform_device *pdev, u64 *socuid);
+};
+
+struct imx8_soc_drvdata {
+ void __iomem *ocotp_base;
+ struct clk *clk;
};
#ifdef CONFIG_HAVE_ARM_SMCCC
@@ -49,30 +57,24 @@ static u32 imx8mq_soc_revision_from_atf(void)
static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; };
#endif
-static int imx8mq_soc_revision(u32 *socrev, u64 *socuid)
+static int imx8m_soc_uid(struct platform_device *pdev, u64 *socuid)
{
- struct device_node *np __free(device_node) =
- of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp");
- void __iomem *ocotp_base;
- u32 magic;
- u32 rev;
- struct clk *clk;
- int ret;
+ struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev);
+ void __iomem *ocotp_base = drvdata->ocotp_base;
- if (!np)
- return -EINVAL;
-
- ocotp_base = of_iomap(np, 0);
- if (!ocotp_base)
- return -EINVAL;
+ *socuid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+ *socuid <<= 32;
+ *socuid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
- clk = of_clk_get_by_name(np, NULL);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto err_clk;
- }
+ return 0;
+}
- clk_prepare_enable(clk);
+static int imx8mq_soc_revision(struct platform_device *pdev, u32 *socrev)
+{
+ struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev);
+ void __iomem *ocotp_base = drvdata->ocotp_base;
+ u32 magic;
+ u32 rev;
/*
* SOC revision on older imx8mq is not available in fuses so query
@@ -85,98 +87,109 @@ static int imx8mq_soc_revision(u32 *socrev, u64 *socuid)
rev = REV_B1;
}
- *socuid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
- *socuid <<= 32;
- *socuid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
-
*socrev = rev;
- clk_disable_unprepare(clk);
- clk_put(clk);
- iounmap(ocotp_base);
-
return 0;
+}
-err_clk:
- iounmap(ocotp_base);
- return ret;
+static int imx8mp_soc_uid(struct platform_device *pdev, u64 *socuid)
+{
+ struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev);
+ void __iomem *ocotp_base = drvdata->ocotp_base;
+
+ socuid[0] = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + IMX8MP_OCOTP_UID_OFFSET);
+ socuid[0] <<= 32;
+ socuid[0] |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + IMX8MP_OCOTP_UID_OFFSET);
+
+ socuid[1] = readl_relaxed(ocotp_base + IMX8MP_OCOTP_UID_HIGH + 0x10);
+ socuid[1] <<= 32;
+ socuid[1] |= readl_relaxed(ocotp_base + IMX8MP_OCOTP_UID_HIGH);
+
+ return 0;
}
-static int imx8mm_soc_uid(u64 *socuid)
+static int imx8mm_soc_revision(struct platform_device *pdev, u32 *socrev)
{
struct device_node *np __free(device_node) =
- of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
- void __iomem *ocotp_base;
- struct clk *clk;
- int ret = 0;
- u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
- IMX8MP_OCOTP_UID_OFFSET : 0;
+ of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
+ void __iomem *anatop_base;
if (!np)
return -EINVAL;
- ocotp_base = of_iomap(np, 0);
- if (!ocotp_base)
+ anatop_base = of_iomap(np, 0);
+ if (!anatop_base)
return -EINVAL;
- clk = of_clk_get_by_name(np, NULL);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto err_clk;
- }
-
- clk_prepare_enable(clk);
-
- *socuid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
- *socuid <<= 32;
- *socuid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
+ *socrev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM);
- clk_disable_unprepare(clk);
- clk_put(clk);
+ iounmap(anatop_base);
-err_clk:
- iounmap(ocotp_base);
- return ret;
+ return 0;
}
-static int imx8mm_soc_revision(u32 *socrev, u64 *socuid)
+static int imx8m_soc_prepare(struct platform_device *pdev, const char *ocotp_compatible)
{
struct device_node *np __free(device_node) =
- of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
- void __iomem *anatop_base;
+ of_find_compatible_node(NULL, NULL, ocotp_compatible);
+ struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev);
+ int ret = 0;
if (!np)
return -EINVAL;
- anatop_base = of_iomap(np, 0);
- if (!anatop_base)
+ drvdata->ocotp_base = of_iomap(np, 0);
+ if (!drvdata->ocotp_base)
return -EINVAL;
- *socrev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM);
+ drvdata->clk = of_clk_get_by_name(np, NULL);
+ if (IS_ERR(drvdata->clk)) {
+ ret = PTR_ERR(drvdata->clk);
+ goto err_clk;
+ }
- iounmap(anatop_base);
+ return clk_prepare_enable(drvdata->clk);
+
+err_clk:
+ iounmap(drvdata->ocotp_base);
+ return ret;
+}
+
+static void imx8m_soc_unprepare(struct platform_device *pdev)
+{
+ struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev);
- return imx8mm_soc_uid(socuid);
+ clk_disable_unprepare(drvdata->clk);
+ clk_put(drvdata->clk);
+ iounmap(drvdata->ocotp_base);
}
static const struct imx8_soc_data imx8mq_soc_data = {
.name = "i.MX8MQ",
+ .ocotp_compatible = "fsl,imx8mq-ocotp",
.soc_revision = imx8mq_soc_revision,
+ .soc_uid = imx8m_soc_uid,
};
static const struct imx8_soc_data imx8mm_soc_data = {
.name = "i.MX8MM",
+ .ocotp_compatible = "fsl,imx8mm-ocotp",
.soc_revision = imx8mm_soc_revision,
+ .soc_uid = imx8m_soc_uid,
};
static const struct imx8_soc_data imx8mn_soc_data = {
.name = "i.MX8MN",
+ .ocotp_compatible = "fsl,imx8mm-ocotp",
.soc_revision = imx8mm_soc_revision,
+ .soc_uid = imx8m_soc_uid,
};
static const struct imx8_soc_data imx8mp_soc_data = {
.name = "i.MX8MP",
+ .ocotp_compatible = "fsl,imx8mm-ocotp",
.soc_revision = imx8mm_soc_revision,
+ .soc_uid = imx8mp_soc_uid,
};
static __maybe_unused const struct of_device_id imx8_soc_match[] = {
@@ -207,17 +220,24 @@ static int imx8m_soc_probe(struct platform_device *pdev)
struct soc_device_attribute *soc_dev_attr;
struct platform_device *cpufreq_dev;
const struct imx8_soc_data *data;
+ struct imx8_soc_drvdata *drvdata;
struct device *dev = &pdev->dev;
const struct of_device_id *id;
struct soc_device *soc_dev;
u32 soc_rev = 0;
- u64 soc_uid = 0;
+ u64 soc_uid[2] = {0, 0};
int ret;
soc_dev_attr = devm_kzalloc(dev, sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, drvdata);
+
soc_dev_attr->family = "Freescale i.MX";
ret = of_property_read_string(of_root, "model", &soc_dev_attr->machine);
@@ -231,18 +251,37 @@ static int imx8m_soc_probe(struct platform_device *pdev)
data = id->data;
if (data) {
soc_dev_attr->soc_id = data->name;
+ ret = imx8m_soc_prepare(pdev, data->ocotp_compatible);
+ if (ret)
+ return ret;
+
if (data->soc_revision) {
- ret = data->soc_revision(&soc_rev, &soc_uid);
- if (ret)
+ ret = data->soc_revision(pdev, &soc_rev);
+ if (ret) {
+ imx8m_soc_unprepare(pdev);
+ return ret;
+ }
+ }
+ if (data->soc_uid) {
+ ret = data->soc_uid(pdev, soc_uid);
+ if (ret) {
+ imx8m_soc_unprepare(pdev);
return ret;
+ }
}
+ imx8m_soc_unprepare(pdev);
}
soc_dev_attr->revision = imx8_revision(dev, soc_rev);
if (!soc_dev_attr->revision)
return -ENOMEM;
- soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL, "%016llX", soc_uid);
+ if (soc_uid[1])
+ soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL, "%016llX%016llX",
+ soc_uid[1], soc_uid[0]);
+ else
+ soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL, "%016llX",
+ soc_uid[0]);
if (!soc_dev_attr->serial_number)
return -ENOMEM;
diff --git a/drivers/soc/mediatek/mtk-dvfsrc.c b/drivers/soc/mediatek/mtk-dvfsrc.c
index 83bf46fdcf2d..41add5636b03 100644
--- a/drivers/soc/mediatek/mtk-dvfsrc.c
+++ b/drivers/soc/mediatek/mtk-dvfsrc.c
@@ -446,6 +446,46 @@ static int mtk_dvfsrc_probe(struct platform_device *pdev)
return 0;
}
+static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_v1 = { 0, 0, 0 };
+static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_v2 = {
+ .max_dram_nom_bw = 255,
+ .max_dram_peak_bw = 255,
+ .max_dram_hrt_bw = 1023,
+};
+
+static const struct dvfsrc_opp dvfsrc_opp_mt6893_lp4[] = {
+ { 0, 0 }, { 1, 0 }, { 2, 0 }, { 3, 0 },
+ { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1 },
+ { 0, 2 }, { 1, 2 }, { 2, 2 }, { 3, 2 },
+ { 0, 3 }, { 1, 3 }, { 2, 3 }, { 3, 3 },
+ { 1, 4 }, { 2, 4 }, { 3, 4 }, { 2, 5 },
+ { 3, 5 }, { 3, 6 }, { 4, 6 }, { 4, 7 },
+};
+
+static const struct dvfsrc_opp_desc dvfsrc_opp_mt6893_desc[] = {
+ [0] = {
+ .opps = dvfsrc_opp_mt6893_lp4,
+ .num_opp = ARRAY_SIZE(dvfsrc_opp_mt6893_lp4),
+ }
+};
+
+static const struct dvfsrc_soc_data mt6893_data = {
+ .opps_desc = dvfsrc_opp_mt6893_desc,
+ .regs = dvfsrc_mt8195_regs,
+ .get_target_level = dvfsrc_get_target_level_v2,
+ .get_current_level = dvfsrc_get_current_level_v2,
+ .get_vcore_level = dvfsrc_get_vcore_level_v2,
+ .get_vscp_level = dvfsrc_get_vscp_level_v2,
+ .set_dram_bw = dvfsrc_set_dram_bw_v1,
+ .set_dram_peak_bw = dvfsrc_set_dram_peak_bw_v1,
+ .set_dram_hrt_bw = dvfsrc_set_dram_hrt_bw_v1,
+ .set_vcore_level = dvfsrc_set_vcore_level_v2,
+ .set_vscp_level = dvfsrc_set_vscp_level_v2,
+ .wait_for_opp_level = dvfsrc_wait_for_opp_level_v2,
+ .wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1,
+ .bw_constraints = &dvfsrc_bw_constr_v2,
+};
+
static const struct dvfsrc_opp dvfsrc_opp_mt8183_lp4[] = {
{ 0, 0 }, { 0, 1 }, { 0, 2 }, { 1, 2 },
};
@@ -469,8 +509,6 @@ static const struct dvfsrc_opp_desc dvfsrc_opp_mt8183_desc[] = {
}
};
-static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_mt8183 = { 0, 0, 0 };
-
static const struct dvfsrc_soc_data mt8183_data = {
.opps_desc = dvfsrc_opp_mt8183_desc,
.regs = dvfsrc_mt8183_regs,
@@ -482,7 +520,7 @@ static const struct dvfsrc_soc_data mt8183_data = {
.set_vcore_level = dvfsrc_set_vcore_level_v1,
.wait_for_opp_level = dvfsrc_wait_for_opp_level_v1,
.wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1,
- .bw_constraints = &dvfsrc_bw_constr_mt8183,
+ .bw_constraints = &dvfsrc_bw_constr_v1,
};
static const struct dvfsrc_opp dvfsrc_opp_mt8195_lp4[] = {
@@ -501,12 +539,6 @@ static const struct dvfsrc_opp_desc dvfsrc_opp_mt8195_desc[] = {
}
};
-static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_mt8195 = {
- .max_dram_nom_bw = 255,
- .max_dram_peak_bw = 255,
- .max_dram_hrt_bw = 1023,
-};
-
static const struct dvfsrc_soc_data mt8195_data = {
.opps_desc = dvfsrc_opp_mt8195_desc,
.regs = dvfsrc_mt8195_regs,
@@ -521,10 +553,11 @@ static const struct dvfsrc_soc_data mt8195_data = {
.set_vscp_level = dvfsrc_set_vscp_level_v2,
.wait_for_opp_level = dvfsrc_wait_for_opp_level_v2,
.wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1,
- .bw_constraints = &dvfsrc_bw_constr_mt8195,
+ .bw_constraints = &dvfsrc_bw_constr_v2,
};
static const struct of_device_id mtk_dvfsrc_of_match[] = {
+ { .compatible = "mediatek,mt6893-dvfsrc", .data = &mt6893_data },
{ .compatible = "mediatek,mt8183-dvfsrc", .data = &mt8183_data },
{ .compatible = "mediatek,mt8195-dvfsrc", .data = &mt8195_data },
{ /* sentinel */ }
diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c
index 2310afa77b76..c467b55b4174 100644
--- a/drivers/soc/qcom/ice.c
+++ b/drivers/soc/qcom/ice.c
@@ -21,20 +21,63 @@
#include <soc/qcom/ice.h>
-#define AES_256_XTS_KEY_SIZE 64
+#define AES_256_XTS_KEY_SIZE 64 /* for raw keys only */
+#define QCOM_ICE_HWKM_WRAPPED_KEY_SIZE 100 /* assuming HWKM v2 */
/* QCOM ICE registers */
+
+#define QCOM_ICE_REG_CONTROL 0x0000
+#define QCOM_ICE_LEGACY_MODE_ENABLED BIT(0)
+
#define QCOM_ICE_REG_VERSION 0x0008
+
#define QCOM_ICE_REG_FUSE_SETTING 0x0010
+#define QCOM_ICE_FUSE_SETTING_MASK BIT(0)
+#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK BIT(1)
+#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK BIT(2)
+
#define QCOM_ICE_REG_BIST_STATUS 0x0070
+#define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28)
+
#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000
-/* BIST ("built-in self-test") status flags */
-#define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28)
+#define QCOM_ICE_REG_CRYPTOCFG_BASE 0x4040
+#define QCOM_ICE_REG_CRYPTOCFG_SIZE 0x80
+#define QCOM_ICE_REG_CRYPTOCFG(slot) (QCOM_ICE_REG_CRYPTOCFG_BASE + \
+ QCOM_ICE_REG_CRYPTOCFG_SIZE * (slot))
+union crypto_cfg {
+ __le32 regval;
+ struct {
+ u8 dusize;
+ u8 capidx;
+ u8 reserved;
+#define QCOM_ICE_HWKM_CFG_ENABLE_VAL BIT(7)
+ u8 cfge;
+ };
+};
-#define QCOM_ICE_FUSE_SETTING_MASK 0x1
-#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2
-#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4
+/* QCOM ICE HWKM (Hardware Key Manager) registers */
+
+#define HWKM_OFFSET 0x8000
+
+#define QCOM_ICE_REG_HWKM_TZ_KM_CTL (HWKM_OFFSET + 0x1000)
+#define QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL (BIT(1) | BIT(2))
+
+#define QCOM_ICE_REG_HWKM_TZ_KM_STATUS (HWKM_OFFSET + 0x1004)
+#define QCOM_ICE_HWKM_KT_CLEAR_DONE BIT(0)
+#define QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE BIT(1)
+#define QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE BIT(2)
+#define QCOM_ICE_HWKM_CRYPTO_BIST_DONE_V2 BIT(7)
+#define QCOM_ICE_HWKM_BIST_DONE_V2 BIT(9)
+
+#define QCOM_ICE_REG_HWKM_BANK0_BANKN_IRQ_STATUS (HWKM_OFFSET + 0x2008)
+#define QCOM_ICE_HWKM_RSP_FIFO_CLEAR_VAL BIT(3)
+
+#define QCOM_ICE_REG_HWKM_BANK0_BBAC_0 (HWKM_OFFSET + 0x5000)
+#define QCOM_ICE_REG_HWKM_BANK0_BBAC_1 (HWKM_OFFSET + 0x5004)
+#define QCOM_ICE_REG_HWKM_BANK0_BBAC_2 (HWKM_OFFSET + 0x5008)
+#define QCOM_ICE_REG_HWKM_BANK0_BBAC_3 (HWKM_OFFSET + 0x500C)
+#define QCOM_ICE_REG_HWKM_BANK0_BBAC_4 (HWKM_OFFSET + 0x5010)
#define qcom_ice_writel(engine, val, reg) \
writel((val), (engine)->base + (reg))
@@ -42,11 +85,18 @@
#define qcom_ice_readl(engine, reg) \
readl((engine)->base + (reg))
+static bool qcom_ice_use_wrapped_keys;
+module_param_named(use_wrapped_keys, qcom_ice_use_wrapped_keys, bool, 0660);
+MODULE_PARM_DESC(use_wrapped_keys,
+ "Support wrapped keys instead of raw keys, if available on the platform");
+
struct qcom_ice {
struct device *dev;
void __iomem *base;
struct clk *core_clk;
+ bool use_hwkm;
+ bool hwkm_init_complete;
};
static bool qcom_ice_check_supported(struct qcom_ice *ice)
@@ -76,6 +126,35 @@ static bool qcom_ice_check_supported(struct qcom_ice *ice)
return false;
}
+ /*
+ * Check for HWKM support and decide whether to use it or not. ICE
+ * v3.2.1 and later have HWKM v2. ICE v3.2.0 has HWKM v1. Earlier ICE
+ * versions don't have HWKM at all. However, for HWKM to be fully
+ * usable by Linux, the TrustZone software also needs to support certain
+ * SCM calls including the ones to generate and prepare keys. That
+ * effectively makes the earliest supported SoC be SM8650, which has
+ * HWKM v2. Therefore, this driver doesn't include support for HWKM v1,
+ * and it checks for the SCM call support before it decides to use HWKM.
+ *
+ * Also, since HWKM and legacy mode are mutually exclusive, and
+ * ICE-capable storage driver(s) need to know early on whether to
+ * advertise support for raw keys or wrapped keys, HWKM cannot be used
+ * unconditionally. A module parameter is used to opt into using it.
+ */
+ if ((major >= 4 ||
+ (major == 3 && (minor >= 3 || (minor == 2 && step >= 1)))) &&
+ qcom_scm_has_wrapped_key_support()) {
+ if (qcom_ice_use_wrapped_keys) {
+ dev_info(dev, "Using HWKM. Supporting wrapped keys only.\n");
+ ice->use_hwkm = true;
+ } else {
+ dev_info(dev, "Not using HWKM. Supporting raw keys only.\n");
+ }
+ } else if (qcom_ice_use_wrapped_keys) {
+ dev_warn(dev, "A supported HWKM is not present. Ignoring qcom_ice.use_wrapped_keys=1.\n");
+ } else {
+ dev_info(dev, "A supported HWKM is not present. Supporting raw keys only.\n");
+ }
return true;
}
@@ -123,17 +202,71 @@ static int qcom_ice_wait_bist_status(struct qcom_ice *ice)
err = readl_poll_timeout(ice->base + QCOM_ICE_REG_BIST_STATUS,
regval, !(regval & QCOM_ICE_BIST_STATUS_MASK),
50, 5000);
- if (err)
+ if (err) {
dev_err(ice->dev, "Timed out waiting for ICE self-test to complete\n");
+ return err;
+ }
- return err;
+ if (ice->use_hwkm &&
+ qcom_ice_readl(ice, QCOM_ICE_REG_HWKM_TZ_KM_STATUS) !=
+ (QCOM_ICE_HWKM_KT_CLEAR_DONE |
+ QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE |
+ QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE |
+ QCOM_ICE_HWKM_CRYPTO_BIST_DONE_V2 |
+ QCOM_ICE_HWKM_BIST_DONE_V2)) {
+ dev_err(ice->dev, "HWKM self-test error!\n");
+ /*
+ * Too late to revoke use_hwkm here, as it was already
+ * propagated up the stack into the crypto capabilities.
+ */
+ }
+ return 0;
+}
+
+static void qcom_ice_hwkm_init(struct qcom_ice *ice)
+{
+ u32 regval;
+
+ if (!ice->use_hwkm)
+ return;
+
+ BUILD_BUG_ON(QCOM_ICE_HWKM_WRAPPED_KEY_SIZE >
+ BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE);
+ /*
+ * When ICE is in HWKM mode, it only supports wrapped keys.
+ * When ICE is in legacy mode, it only supports raw keys.
+ *
+ * Put ICE in HWKM mode. ICE defaults to legacy mode.
+ */
+ regval = qcom_ice_readl(ice, QCOM_ICE_REG_CONTROL);
+ regval &= ~QCOM_ICE_LEGACY_MODE_ENABLED;
+ qcom_ice_writel(ice, regval, QCOM_ICE_REG_CONTROL);
+
+ /* Disable CRC checks. This HWKM feature is not used. */
+ qcom_ice_writel(ice, QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL,
+ QCOM_ICE_REG_HWKM_TZ_KM_CTL);
+
+ /*
+ * Allow the HWKM slave to read and write the keyslots in the ICE HWKM
+ * slave. Without this, TrustZone cannot program keys into ICE.
+ */
+ qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_0);
+ qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_1);
+ qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_2);
+ qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_3);
+ qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_4);
+
+ /* Clear the HWKM response FIFO. */
+ qcom_ice_writel(ice, QCOM_ICE_HWKM_RSP_FIFO_CLEAR_VAL,
+ QCOM_ICE_REG_HWKM_BANK0_BANKN_IRQ_STATUS);
+ ice->hwkm_init_complete = true;
}
int qcom_ice_enable(struct qcom_ice *ice)
{
qcom_ice_low_power_mode_enable(ice);
qcom_ice_optimization_enable(ice);
-
+ qcom_ice_hwkm_init(ice);
return qcom_ice_wait_bist_status(ice);
}
EXPORT_SYMBOL_GPL(qcom_ice_enable);
@@ -149,7 +282,7 @@ int qcom_ice_resume(struct qcom_ice *ice)
err);
return err;
}
-
+ qcom_ice_hwkm_init(ice);
return qcom_ice_wait_bist_status(ice);
}
EXPORT_SYMBOL_GPL(qcom_ice_resume);
@@ -157,15 +290,58 @@ EXPORT_SYMBOL_GPL(qcom_ice_resume);
int qcom_ice_suspend(struct qcom_ice *ice)
{
clk_disable_unprepare(ice->core_clk);
+ ice->hwkm_init_complete = false;
return 0;
}
EXPORT_SYMBOL_GPL(qcom_ice_suspend);
-int qcom_ice_program_key(struct qcom_ice *ice,
- u8 algorithm_id, u8 key_size,
- const u8 crypto_key[], u8 data_unit_size,
- int slot)
+static unsigned int translate_hwkm_slot(struct qcom_ice *ice, unsigned int slot)
+{
+ return slot * 2;
+}
+
+static int qcom_ice_program_wrapped_key(struct qcom_ice *ice, unsigned int slot,
+ const struct blk_crypto_key *bkey)
+{
+ struct device *dev = ice->dev;
+ union crypto_cfg cfg = {
+ .dusize = bkey->crypto_cfg.data_unit_size / 512,
+ .capidx = QCOM_SCM_ICE_CIPHER_AES_256_XTS,
+ .cfge = QCOM_ICE_HWKM_CFG_ENABLE_VAL,
+ };
+ int err;
+
+ if (!ice->use_hwkm) {
+ dev_err_ratelimited(dev, "Got wrapped key when not using HWKM\n");
+ return -EINVAL;
+ }
+ if (!ice->hwkm_init_complete) {
+ dev_err_ratelimited(dev, "HWKM not yet initialized\n");
+ return -EINVAL;
+ }
+
+ /* Clear CFGE before programming the key. */
+ qcom_ice_writel(ice, 0x0, QCOM_ICE_REG_CRYPTOCFG(slot));
+
+ /* Call into TrustZone to program the wrapped key using HWKM. */
+ err = qcom_scm_ice_set_key(translate_hwkm_slot(ice, slot), bkey->bytes,
+ bkey->size, cfg.capidx, cfg.dusize);
+ if (err) {
+ dev_err_ratelimited(dev,
+ "qcom_scm_ice_set_key failed; err=%d, slot=%u\n",
+ err, slot);
+ return err;
+ }
+
+ /* Set CFGE after programming the key. */
+ qcom_ice_writel(ice, le32_to_cpu(cfg.regval),
+ QCOM_ICE_REG_CRYPTOCFG(slot));
+ return 0;
+}
+
+int qcom_ice_program_key(struct qcom_ice *ice, unsigned int slot,
+ const struct blk_crypto_key *blk_key)
{
struct device *dev = ice->dev;
union {
@@ -176,15 +352,26 @@ int qcom_ice_program_key(struct qcom_ice *ice,
int err;
/* Only AES-256-XTS has been tested so far. */
- if (algorithm_id != QCOM_ICE_CRYPTO_ALG_AES_XTS ||
- key_size != QCOM_ICE_CRYPTO_KEY_SIZE_256) {
- dev_err_ratelimited(dev,
- "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n",
- algorithm_id, key_size);
+ if (blk_key->crypto_cfg.crypto_mode !=
+ BLK_ENCRYPTION_MODE_AES_256_XTS) {
+ dev_err_ratelimited(dev, "Unsupported crypto mode: %d\n",
+ blk_key->crypto_cfg.crypto_mode);
+ return -EINVAL;
+ }
+
+ if (blk_key->crypto_cfg.key_type == BLK_CRYPTO_KEY_TYPE_HW_WRAPPED)
+ return qcom_ice_program_wrapped_key(ice, slot, blk_key);
+
+ if (ice->use_hwkm) {
+ dev_err_ratelimited(dev, "Got raw key when using HWKM\n");
return -EINVAL;
}
- memcpy(key.bytes, crypto_key, AES_256_XTS_KEY_SIZE);
+ if (blk_key->size != AES_256_XTS_KEY_SIZE) {
+ dev_err_ratelimited(dev, "Incorrect key size\n");
+ return -EINVAL;
+ }
+ memcpy(key.bytes, blk_key->bytes, AES_256_XTS_KEY_SIZE);
/* The SCM call requires that the key words are encoded in big endian */
for (i = 0; i < ARRAY_SIZE(key.words); i++)
@@ -192,7 +379,7 @@ int qcom_ice_program_key(struct qcom_ice *ice,
err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE,
QCOM_SCM_ICE_CIPHER_AES_256_XTS,
- data_unit_size);
+ blk_key->crypto_cfg.data_unit_size / 512);
memzero_explicit(&key, sizeof(key));
@@ -202,10 +389,131 @@ EXPORT_SYMBOL_GPL(qcom_ice_program_key);
int qcom_ice_evict_key(struct qcom_ice *ice, int slot)
{
+ if (ice->hwkm_init_complete)
+ slot = translate_hwkm_slot(ice, slot);
return qcom_scm_ice_invalidate_key(slot);
}
EXPORT_SYMBOL_GPL(qcom_ice_evict_key);
+/**
+ * qcom_ice_get_supported_key_type() - Get the supported key type
+ * @ice: ICE driver data
+ *
+ * Return: the blk-crypto key type that the ICE driver is configured to use.
+ * This is the key type that ICE-capable storage drivers should advertise as
+ * supported in the crypto capabilities of any disks they register.
+ */
+enum blk_crypto_key_type qcom_ice_get_supported_key_type(struct qcom_ice *ice)
+{
+ if (ice->use_hwkm)
+ return BLK_CRYPTO_KEY_TYPE_HW_WRAPPED;
+ return BLK_CRYPTO_KEY_TYPE_RAW;
+}
+EXPORT_SYMBOL_GPL(qcom_ice_get_supported_key_type);
+
+/**
+ * qcom_ice_derive_sw_secret() - Derive software secret from wrapped key
+ * @ice: ICE driver data
+ * @eph_key: an ephemerally-wrapped key
+ * @eph_key_size: size of @eph_key in bytes
+ * @sw_secret: output buffer for the software secret
+ *
+ * Use HWKM to derive the "software secret" from a hardware-wrapped key that is
+ * given in ephemerally-wrapped form.
+ *
+ * Return: 0 on success; -EBADMSG if the given ephemerally-wrapped key is
+ * invalid; or another -errno value.
+ */
+int qcom_ice_derive_sw_secret(struct qcom_ice *ice,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+ int err = qcom_scm_derive_sw_secret(eph_key, eph_key_size,
+ sw_secret,
+ BLK_CRYPTO_SW_SECRET_SIZE);
+ if (err == -EIO || err == -EINVAL)
+ err = -EBADMSG; /* probably invalid key */
+ return err;
+}
+EXPORT_SYMBOL_GPL(qcom_ice_derive_sw_secret);
+
+/**
+ * qcom_ice_generate_key() - Generate a wrapped key for inline encryption
+ * @ice: ICE driver data
+ * @lt_key: output buffer for the long-term wrapped key
+ *
+ * Use HWKM to generate a new key and return it as a long-term wrapped key.
+ *
+ * Return: the size of the resulting wrapped key on success; -errno on failure.
+ */
+int qcom_ice_generate_key(struct qcom_ice *ice,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ int err;
+
+ err = qcom_scm_generate_ice_key(lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE);
+ if (err)
+ return err;
+
+ return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE;
+}
+EXPORT_SYMBOL_GPL(qcom_ice_generate_key);
+
+/**
+ * qcom_ice_prepare_key() - Prepare a wrapped key for inline encryption
+ * @ice: ICE driver data
+ * @lt_key: a long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes
+ * @eph_key: output buffer for the ephemerally-wrapped key
+ *
+ * Use HWKM to re-wrap a long-term wrapped key with the per-boot ephemeral key.
+ *
+ * Return: the size of the resulting wrapped key on success; -EBADMSG if the
+ * given long-term wrapped key is invalid; or another -errno value.
+ */
+int qcom_ice_prepare_key(struct qcom_ice *ice,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ int err;
+
+ err = qcom_scm_prepare_ice_key(lt_key, lt_key_size,
+ eph_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE);
+ if (err == -EIO || err == -EINVAL)
+ err = -EBADMSG; /* probably invalid key */
+ if (err)
+ return err;
+
+ return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE;
+}
+EXPORT_SYMBOL_GPL(qcom_ice_prepare_key);
+
+/**
+ * qcom_ice_import_key() - Import a raw key for inline encryption
+ * @ice: ICE driver data
+ * @raw_key: the raw key to import
+ * @raw_key_size: size of @raw_key in bytes
+ * @lt_key: output buffer for the long-term wrapped key
+ *
+ * Use HWKM to import a raw key and return it as a long-term wrapped key.
+ *
+ * Return: the size of the resulting wrapped key on success; -errno on failure.
+ */
+int qcom_ice_import_key(struct qcom_ice *ice,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ int err;
+
+ err = qcom_scm_import_ice_key(raw_key, raw_key_size,
+ lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE);
+ if (err)
+ return err;
+
+ return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE;
+}
+EXPORT_SYMBOL_GPL(qcom_ice_import_key);
+
static struct qcom_ice *qcom_ice_create(struct device *dev,
void __iomem *base)
{
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 56823b6a2fac..192edc3f64dc 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -35,6 +35,11 @@
#define ATTR0_RES_WAYS_MASK GENMASK(15, 0)
#define ATTR0_BONUS_WAYS_MASK GENMASK(31, 16)
#define ATTR0_BONUS_WAYS_SHIFT 16
+#define ATTR2_PROBE_TARGET_WAYS_MASK BIT(4)
+#define ATTR2_FIXED_SIZE_MASK BIT(8)
+#define ATTR2_PRIORITY_MASK GENMASK(14, 12)
+#define ATTR2_PARENT_SCID_MASK GENMASK(21, 16)
+#define ATTR2_IN_A_GROUP_MASK BIT(24)
#define LLCC_STATUS_READ_DELAY 100
#define CACHE_LINE_SIZE_SHIFT 6
@@ -49,6 +54,10 @@
#define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n)
#define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n)
#define LLCC_TRP_ATTR2_CFGn(n) (0x21100 + SZ_4 * n)
+#define LLCC_V6_TRP_ATTR0_CFGn(n) (cfg->reg_offset[LLCC_TRP_ATTR0_CFG] + SZ_64 * (n))
+#define LLCC_V6_TRP_ATTR1_CFGn(n) (cfg->reg_offset[LLCC_TRP_ATTR1_CFG] + SZ_64 * (n))
+#define LLCC_V6_TRP_ATTR2_CFGn(n) (cfg->reg_offset[LLCC_TRP_ATTR2_CFG] + SZ_64 * (n))
+#define LLCC_V6_TRP_ATTR3_CFGn(n) (cfg->reg_offset[LLCC_TRP_ATTR3_CFG] + SZ_64 * (n))
#define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x21f00
#define LLCC_TRP_PCB_ACT 0x21f04
@@ -66,6 +75,7 @@
#define LLCC_VERSION_2_0_0_0 0x02000000
#define LLCC_VERSION_2_1_0_0 0x02010000
#define LLCC_VERSION_4_1_0_0 0x04010000
+#define LLCC_VERSION_6_0_0_0 0X06000000
/**
* struct llcc_slice_config - Data associated with the llcc slice
@@ -106,6 +116,7 @@
* ovcap_en.
* @vict_prio: When current scid is under-capacity, allocate over other
* lower-than victim priority-line threshold scid.
+ * @parent_slice_id: For grouped slices, specifies the slice id of the parent.
*/
struct llcc_slice_config {
u32 usecase_id;
@@ -130,6 +141,7 @@ struct llcc_slice_config {
bool ovcap_en;
bool ovcap_prio;
bool vict_prio;
+ u32 parent_slice_id;
};
struct qcom_llcc_config {
@@ -153,6 +165,21 @@ struct qcom_sct_config {
enum llcc_reg_offset {
LLCC_COMMON_HW_INFO,
LLCC_COMMON_STATUS0,
+ LLCC_TRP_ATTR0_CFG,
+ LLCC_TRP_ATTR1_CFG,
+ LLCC_TRP_ATTR2_CFG,
+ LLCC_TRP_ATTR3_CFG,
+ LLCC_TRP_SID_DIS_CAP_ALLOC,
+ LLCC_TRP_ALGO_STALE_EN,
+ LLCC_TRP_ALGO_STALE_CAP_EN,
+ LLCC_TRP_ALGO_MRU0,
+ LLCC_TRP_ALGO_MRU1,
+ LLCC_TRP_ALGO_ALLOC0,
+ LLCC_TRP_ALGO_ALLOC1,
+ LLCC_TRP_ALGO_ALLOC2,
+ LLCC_TRP_ALGO_ALLOC3,
+ LLCC_TRP_WRS_EN,
+ LLCC_TRP_WRS_CACHEABLE_EN,
};
static const struct llcc_slice_config ipq5424_data[] = {
@@ -2662,6 +2689,263 @@ static const struct llcc_slice_config sm8650_data[] = {
},
};
+static const struct llcc_slice_config sm8750_data[] = {
+ {
+ .usecase_id = LLCC_CPUSS,
+ .slice_id = 1,
+ .max_cap = 5120,
+ .priority = 1,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ }, {
+ .usecase_id = LLCC_MDMHPFX,
+ .slice_id = 24,
+ .max_cap = 1024,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_VIDSC0,
+ .slice_id = 2,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_AUDIO,
+ .slice_id = 35,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_MDMHPGRW,
+ .slice_id = 25,
+ .max_cap = 1024,
+ .priority = 5,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_MODHW,
+ .slice_id = 26,
+ .max_cap = 1024,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_CMPT,
+ .slice_id = 34,
+ .max_cap = 4096,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_GPUHTW,
+ .slice_id = 11,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_GPU,
+ .slice_id = 9,
+ .max_cap = 5632,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .write_scid_en = true,
+ .write_scid_cacheable_en = true
+ }, {
+ .usecase_id = LLCC_MMUHWT,
+ .slice_id = 18,
+ .max_cap = 768,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_DISP,
+ .slice_id = 16,
+ .max_cap = 7168,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .cache_mode = 2,
+ .stale_en = true,
+ }, {
+ .usecase_id = LLCC_VIDFW,
+ .slice_id = 17,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_CAMFW,
+ .slice_id = 20,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_MDMPNG,
+ .slice_id = 27,
+ .max_cap = 256,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xf0000000,
+ }, {
+ .usecase_id = LLCC_AUDHW,
+ .slice_id = 22,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_CVP,
+ .slice_id = 8,
+ .max_cap = 800,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .vict_prio = true,
+ }, {
+ .usecase_id = LLCC_MODPE,
+ .slice_id = 29,
+ .max_cap = 256,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xf0000000,
+ .alloc_oneway_en = true,
+ }, {
+ .usecase_id = LLCC_WRCACHE,
+ .slice_id = 31,
+ .max_cap = 512,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ }, {
+ .usecase_id = LLCC_CVPFW,
+ .slice_id = 19,
+ .max_cap = 64,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_CMPTHCP,
+ .slice_id = 15,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_LCPDARE,
+ .slice_id = 30,
+ .max_cap = 128,
+ .priority = 5,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .alloc_oneway_en = true,
+ }, {
+ .usecase_id = LLCC_AENPU,
+ .slice_id = 3,
+ .max_cap = 3072,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .cache_mode = 2,
+ }, {
+ .usecase_id = LLCC_ISLAND1,
+ .slice_id = 12,
+ .max_cap = 7936,
+ .priority = 7,
+ .fixed_size = true,
+ .bonus_ways = 0x7fffffff,
+ }, {
+ .usecase_id = LLCC_DISP_WB,
+ .slice_id = 23,
+ .max_cap = 512,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_VIDVSP,
+ .slice_id = 4,
+ .max_cap = 256,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ }, {
+ .usecase_id = LLCC_VIDDEC,
+ .slice_id = 5,
+ .max_cap = 6144,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .cache_mode = 2,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMOFE,
+ .slice_id = 33,
+ .max_cap = 6144,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMRTIP,
+ .slice_id = 13,
+ .max_cap = 1024,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMSRTIP,
+ .slice_id = 14,
+ .max_cap = 6144,
+ .priority = 4,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMRTRF,
+ .slice_id = 7,
+ .max_cap = 3584,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CAMSRTRF,
+ .slice_id = 21,
+ .max_cap = 6144,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .stale_en = true,
+ .ovcap_prio = true,
+ .parent_slice_id = 33,
+ }, {
+ .usecase_id = LLCC_CPUSSMPAM,
+ .slice_id = 6,
+ .max_cap = 2048,
+ .priority = 1,
+ .fixed_size = true,
+ .bonus_ways = 0xffffffff,
+ .activate_on_init = true,
+ .write_scid_en = true,
+ },
+};
+
static const struct llcc_slice_config qcs615_data[] = {
{
.usecase_id = LLCC_CPUSS,
@@ -3161,6 +3445,33 @@ static const struct llcc_edac_reg_offset llcc_v2_1_edac_reg_offset = {
.drp_ecc_db_err_syn0 = 0x52120,
};
+static const struct llcc_edac_reg_offset llcc_v6_edac_reg_offset = {
+ .trp_ecc_error_status0 = 0x47448,
+ .trp_ecc_error_status1 = 0x47450,
+ .trp_ecc_sb_err_syn0 = 0x47490,
+ .trp_ecc_db_err_syn0 = 0x474d0,
+ .trp_ecc_error_cntr_clear = 0x47444,
+ .trp_interrupt_0_status = 0x47600,
+ .trp_interrupt_0_clear = 0x47604,
+ .trp_interrupt_0_enable = 0x47608,
+
+ /* LLCC Common registers */
+ .cmn_status0 = 0x6400c,
+ .cmn_interrupt_0_enable = 0x6401c,
+ .cmn_interrupt_2_enable = 0x6403c,
+
+ /* LLCC DRP registers */
+ .drp_ecc_error_cfg = 0x80000,
+ .drp_ecc_error_cntr_clear = 0x80004,
+ .drp_interrupt_status = 0x80020,
+ .drp_interrupt_clear = 0x80028,
+ .drp_interrupt_enable = 0x8002c,
+ .drp_ecc_error_status0 = 0x820f4,
+ .drp_ecc_error_status1 = 0x820f8,
+ .drp_ecc_sb_err_syn0 = 0x820fc,
+ .drp_ecc_db_err_syn0 = 0x82120,
+};
+
/* LLCC register offset starting from v1.0.0 */
static const u32 llcc_v1_reg_offset[] = {
[LLCC_COMMON_HW_INFO] = 0x00030000,
@@ -3173,6 +3484,27 @@ static const u32 llcc_v2_1_reg_offset[] = {
[LLCC_COMMON_STATUS0] = 0x0003400c,
};
+/* LLCC register offset starting from v6.0.0 */
+static const u32 llcc_v6_reg_offset[] = {
+ [LLCC_COMMON_HW_INFO] = 0x00064000,
+ [LLCC_COMMON_STATUS0] = 0x0006400c,
+ [LLCC_TRP_ATTR0_CFG] = 0x00041000,
+ [LLCC_TRP_ATTR1_CFG] = 0x00041008,
+ [LLCC_TRP_ATTR2_CFG] = 0x00041010,
+ [LLCC_TRP_ATTR3_CFG] = 0x00041014,
+ [LLCC_TRP_SID_DIS_CAP_ALLOC] = 0x00042000,
+ [LLCC_TRP_ALGO_STALE_EN] = 0x00042008,
+ [LLCC_TRP_ALGO_STALE_CAP_EN] = 0x00042010,
+ [LLCC_TRP_ALGO_MRU0] = 0x00042018,
+ [LLCC_TRP_ALGO_MRU1] = 0x00042020,
+ [LLCC_TRP_ALGO_ALLOC0] = 0x00042028,
+ [LLCC_TRP_ALGO_ALLOC1] = 0x00042030,
+ [LLCC_TRP_ALGO_ALLOC2] = 0x00042038,
+ [LLCC_TRP_ALGO_ALLOC3] = 0x00042040,
+ [LLCC_TRP_WRS_EN] = 0x00042080,
+ [LLCC_TRP_WRS_CACHEABLE_EN] = 0x00042088,
+};
+
static const struct qcom_llcc_config qcs615_cfg[] = {
{
.sct_data = qcs615_data,
@@ -3379,6 +3711,16 @@ static const struct qcom_llcc_config sm8650_cfg[] = {
},
};
+static const struct qcom_llcc_config sm8750_cfg[] = {
+ {
+ .sct_data = sm8750_data,
+ .size = ARRAY_SIZE(sm8750_data),
+ .skip_llcc_cfg = false,
+ .reg_offset = llcc_v6_reg_offset,
+ .edac_reg_offset = &llcc_v6_edac_reg_offset,
+ },
+};
+
static const struct qcom_llcc_config x1e80100_cfg[] = {
{
.sct_data = x1e80100_data,
@@ -3489,6 +3831,11 @@ static const struct qcom_sct_config sm8650_cfgs = {
.num_config = ARRAY_SIZE(sm8650_cfg),
};
+static const struct qcom_sct_config sm8750_cfgs = {
+ .llcc_config = sm8750_cfg,
+ .num_config = ARRAY_SIZE(sm8750_cfg),
+};
+
static const struct qcom_sct_config x1e80100_cfgs = {
.llcc_config = x1e80100_cfg,
.num_config = ARRAY_SIZE(x1e80100_cfg),
@@ -3869,6 +4216,139 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
return ret;
}
+static int _qcom_llcc_cfg_program_v6(const struct llcc_slice_config *config,
+ const struct qcom_llcc_config *cfg)
+{
+ u32 stale_en, stale_cap_en, mru_uncap_en, mru_rollover;
+ u32 alloc_oneway_en, ovcap_en, ovcap_prio, vict_prio;
+ u32 attr0_cfg, attr1_cfg, attr2_cfg, attr3_cfg;
+ u32 attr0_val, attr1_val, attr2_val, attr3_val;
+ u32 slice_offset, reg_offset;
+ struct llcc_slice_desc *desc;
+ u32 wren, wr_cache_en;
+ int ret;
+
+ attr0_cfg = LLCC_V6_TRP_ATTR0_CFGn(config->slice_id);
+ attr1_cfg = LLCC_V6_TRP_ATTR1_CFGn(config->slice_id);
+ attr2_cfg = LLCC_V6_TRP_ATTR2_CFGn(config->slice_id);
+ attr3_cfg = LLCC_V6_TRP_ATTR3_CFGn(config->slice_id);
+
+ attr0_val = config->res_ways;
+ attr1_val = config->bonus_ways;
+ attr2_val = config->cache_mode;
+ attr2_val |= FIELD_PREP(ATTR2_PROBE_TARGET_WAYS_MASK, config->probe_target_ways);
+ attr2_val |= FIELD_PREP(ATTR2_FIXED_SIZE_MASK, config->fixed_size);
+ attr2_val |= FIELD_PREP(ATTR2_PRIORITY_MASK, config->priority);
+
+ if (config->parent_slice_id && config->fixed_size) {
+ attr2_val |= FIELD_PREP(ATTR2_PARENT_SCID_MASK, config->parent_slice_id);
+ attr2_val |= ATTR2_IN_A_GROUP_MASK;
+ }
+
+ attr3_val = MAX_CAP_TO_BYTES(config->max_cap);
+ attr3_val /= drv_data->num_banks;
+ attr3_val >>= CACHE_LINE_SIZE_SHIFT;
+
+ ret = regmap_write(drv_data->bcast_regmap, attr0_cfg, attr0_val);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(drv_data->bcast_regmap, attr1_cfg, attr1_val);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(drv_data->bcast_regmap, attr2_cfg, attr2_val);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(drv_data->bcast_regmap, attr3_cfg, attr3_val);
+ if (ret)
+ return ret;
+
+ slice_offset = config->slice_id % 32;
+ reg_offset = (config->slice_id / 32) * 4;
+
+ wren = config->write_scid_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_WRS_EN] + reg_offset,
+ BIT(slice_offset), wren);
+ if (ret)
+ return ret;
+
+ wr_cache_en = config->write_scid_cacheable_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_WRS_CACHEABLE_EN] + reg_offset,
+ BIT(slice_offset), wr_cache_en);
+ if (ret)
+ return ret;
+
+ stale_en = config->stale_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_STALE_EN] + reg_offset,
+ BIT(slice_offset), stale_en);
+ if (ret)
+ return ret;
+
+ stale_cap_en = config->stale_cap_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_STALE_CAP_EN] + reg_offset,
+ BIT(slice_offset), stale_cap_en);
+ if (ret)
+ return ret;
+
+ mru_uncap_en = config->mru_uncap_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_MRU0] + reg_offset,
+ BIT(slice_offset), mru_uncap_en);
+ if (ret)
+ return ret;
+
+ mru_rollover = config->mru_rollover << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_MRU1] + reg_offset,
+ BIT(slice_offset), mru_rollover);
+ if (ret)
+ return ret;
+
+ alloc_oneway_en = config->alloc_oneway_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_ALLOC0] + reg_offset,
+ BIT(slice_offset), alloc_oneway_en);
+ if (ret)
+ return ret;
+
+ ovcap_en = config->ovcap_en << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_ALLOC1] + reg_offset,
+ BIT(slice_offset), ovcap_en);
+ if (ret)
+ return ret;
+
+ ovcap_prio = config->ovcap_prio << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_ALLOC2] + reg_offset,
+ BIT(slice_offset), ovcap_prio);
+ if (ret)
+ return ret;
+
+ vict_prio = config->vict_prio << slice_offset;
+ ret = regmap_update_bits(drv_data->bcast_regmap,
+ cfg->reg_offset[LLCC_TRP_ALGO_ALLOC3] + reg_offset,
+ BIT(slice_offset), vict_prio);
+ if (ret)
+ return ret;
+
+ if (config->activate_on_init) {
+ desc = llcc_slice_getd(config->usecase_id);
+ if (PTR_ERR_OR_ZERO(desc))
+ return -EINVAL;
+
+ ret = llcc_slice_activate(desc);
+ }
+
+ return ret;
+}
+
static int qcom_llcc_cfg_program(struct platform_device *pdev,
const struct qcom_llcc_config *cfg)
{
@@ -3880,10 +4360,18 @@ static int qcom_llcc_cfg_program(struct platform_device *pdev,
sz = drv_data->cfg_size;
llcc_table = drv_data->cfg;
- for (i = 0; i < sz; i++) {
- ret = _qcom_llcc_cfg_program(&llcc_table[i], cfg);
- if (ret)
- return ret;
+ if (drv_data->version >= LLCC_VERSION_6_0_0_0) {
+ for (i = 0; i < sz; i++) {
+ ret = _qcom_llcc_cfg_program_v6(&llcc_table[i], cfg);
+ if (ret)
+ return ret;
+ }
+ } else {
+ for (i = 0; i < sz; i++) {
+ ret = _qcom_llcc_cfg_program(&llcc_table[i], cfg);
+ if (ret)
+ return ret;
+ }
}
return ret;
@@ -4102,6 +4590,7 @@ static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sm8450-llcc", .data = &sm8450_cfgs },
{ .compatible = "qcom,sm8550-llcc", .data = &sm8550_cfgs },
{ .compatible = "qcom,sm8650-llcc", .data = &sm8650_cfgs },
+ { .compatible = "qcom,sm8750-llcc", .data = &sm8750_cfgs },
{ .compatible = "qcom,x1e80100-llcc", .data = &x1e80100_cfgs },
{ }
};
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index cde19cdfd3c7..0a6d325b195c 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -371,15 +371,11 @@ static void pmic_glink_remove(struct platform_device *pdev)
__pmic_glink = NULL;
}
-static const unsigned long pmic_glink_sc8280xp_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) |
- BIT(PMIC_GLINK_CLIENT_ALTMODE);
-
static const unsigned long pmic_glink_sm8450_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) |
BIT(PMIC_GLINK_CLIENT_ALTMODE) |
BIT(PMIC_GLINK_CLIENT_UCSI);
static const struct of_device_id pmic_glink_of_match[] = {
- { .compatible = "qcom,sc8280xp-pmic-glink", .data = &pmic_glink_sc8280xp_client_mask },
{ .compatible = "qcom,pmic-glink", .data = &pmic_glink_sm8450_client_mask },
{}
};
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index bd06ce161804..7f11acd33323 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -218,21 +218,29 @@ static void pmic_glink_altmode_worker(struct work_struct *work)
{
struct pmic_glink_altmode_port *alt_port = work_to_altmode_port(work);
struct pmic_glink_altmode *altmode = alt_port->altmode;
+ enum drm_connector_status conn_status;
typec_switch_set(alt_port->typec_switch, alt_port->orientation);
- if (alt_port->svid == USB_TYPEC_DP_SID && alt_port->mode == 0xff)
- pmic_glink_altmode_safe(altmode, alt_port);
- else if (alt_port->svid == USB_TYPEC_DP_SID)
- pmic_glink_altmode_enable_dp(altmode, alt_port, alt_port->mode,
- alt_port->hpd_state, alt_port->hpd_irq);
- else
- pmic_glink_altmode_enable_usb(altmode, alt_port);
+ if (alt_port->svid == USB_TYPEC_DP_SID) {
+ if (alt_port->mode == 0xff) {
+ pmic_glink_altmode_safe(altmode, alt_port);
+ } else {
+ pmic_glink_altmode_enable_dp(altmode, alt_port,
+ alt_port->mode,
+ alt_port->hpd_state,
+ alt_port->hpd_irq);
+ }
- drm_aux_hpd_bridge_notify(&alt_port->bridge->dev,
- alt_port->hpd_state ?
- connector_status_connected :
- connector_status_disconnected);
+ if (alt_port->hpd_state)
+ conn_status = connector_status_connected;
+ else
+ conn_status = connector_status_disconnected;
+
+ drm_aux_hpd_bridge_notify(&alt_port->bridge->dev, conn_status);
+ } else {
+ pmic_glink_altmode_enable_usb(altmode, alt_port);
+ }
pmic_glink_altmode_request(altmode, ALTMODE_PAN_ACK, alt_port->index);
}
diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
index 1d1c438be3e7..3abea241b1c4 100644
--- a/drivers/soc/qcom/qcom_pd_mapper.c
+++ b/drivers/soc/qcom/qcom_pd_mapper.c
@@ -488,6 +488,16 @@ static const struct qcom_pdm_domain_data *sm6350_domains[] = {
NULL,
};
+static const struct qcom_pdm_domain_data *sm7150_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &cdsp_root_pd,
+ &mpss_root_pd_gps,
+ &mpss_wlan_pd,
+ NULL,
+};
+
static const struct qcom_pdm_domain_data *sm8150_domains[] = {
&adsp_audio_pd,
&adsp_root_pd,
@@ -565,6 +575,7 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,sm4250", .data = sm6115_domains, },
{ .compatible = "qcom,sm6115", .data = sm6115_domains, },
{ .compatible = "qcom,sm6350", .data = sm6350_domains, },
+ { .compatible = "qcom,sm7150", .data = sm7150_domains, },
{ .compatible = "qcom,sm7225", .data = sm6350_domains, },
{ .compatible = "qcom,sm7325", .data = sc7280_domains, },
{ .compatible = "qcom,sm8150", .data = sm8150_domains, },
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 592819701809..cf425930539e 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -86,7 +86,7 @@
#define SMEM_GLOBAL_HOST 0xfffe
/* Max number of processors/hosts in a system */
-#define SMEM_HOST_COUNT 20
+#define SMEM_HOST_COUNT 25
/**
* struct smem_proc_comm - proc_comm communication struct (legacy)
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index a3e88ced328a..cb515c2340c1 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -399,7 +399,7 @@ static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
struct smp2p_entry *entry,
struct device_node *node)
{
- entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry);
+ entry->domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &smp2p_irq_ops, entry);
if (!entry->domain) {
dev_err(smp2p->dev, "failed to add irq_domain\n");
return -ENOMEM;
@@ -575,7 +575,7 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
smp2p->mbox_client.knows_txdone = true;
smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0);
if (IS_ERR(smp2p->mbox_chan)) {
- if (PTR_ERR(smp2p->mbox_chan) != -ENODEV)
+ if (PTR_ERR(smp2p->mbox_chan) != -ENOENT)
return PTR_ERR(smp2p->mbox_chan);
smp2p->mbox_chan = NULL;
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index e803ea342c97..021e9d1f61dc 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -456,7 +456,7 @@ static int smsm_inbound_entry(struct qcom_smsm *smsm,
return ret;
}
- entry->domain = irq_domain_add_linear(node, 32, &smsm_irq_ops, entry);
+ entry->domain = irq_domain_create_linear(of_fwnode_handle(node), 32, &smsm_irq_ops, entry);
if (!entry->domain) {
dev_err(smsm->dev, "failed to add irq_domain\n");
return -ENOMEM;
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 18d7f1be9093..8c4147737c35 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -444,6 +444,7 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(IPQ5302) },
{ qcom_board_id(QCS8550) },
{ qcom_board_id(QCM8550) },
+ { qcom_board_id(SM8750) },
{ qcom_board_id(IPQ5300) },
{ qcom_board_id(IPQ5321) },
{ qcom_board_id(IPQ5424) },
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 49648cf28bd2..fbc3b69d21a7 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -65,17 +65,20 @@ if ARM && ARCH_RENESAS
config ARCH_EMEV2
bool "ARM32 Platform support for Emma Mobile EV2"
+ default ARCH_RENESAS
select HAVE_ARM_SCU if SMP
select SYS_SUPPORTS_EM_STI
config ARCH_R8A7794
bool "ARM32 Platform support for R-Car E2"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_814220
select SYSC_R8A7794
config ARCH_R8A7779
bool "ARM32 Platform support for R-Car H1"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN1
select ARM_ERRATA_754322
select ARM_GLOBAL_TIMER
@@ -85,6 +88,7 @@ config ARCH_R8A7779
config ARCH_R8A7790
bool "ARM32 Platform support for R-Car H2"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
select ARM_ERRATA_814220
@@ -93,11 +97,13 @@ config ARCH_R8A7790
config ARCH_R8A7778
bool "ARM32 Platform support for R-Car M1A"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN1
select ARM_ERRATA_754322
config ARCH_R8A7793
bool "ARM32 Platform support for R-Car M2-N"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
select I2C
@@ -105,6 +111,7 @@ config ARCH_R8A7793
config ARCH_R8A7791
bool "ARM32 Platform support for R-Car M2-W"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
select I2C
@@ -112,18 +119,21 @@ config ARCH_R8A7791
config ARCH_R8A7792
bool "ARM32 Platform support for R-Car V2H"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
select SYSC_R8A7792
config ARCH_R8A7740
bool "ARM32 Platform support for R-Mobile A1"
+ default ARCH_RENESAS
select ARCH_RMOBILE
select ARM_ERRATA_754322
select RENESAS_INTC_IRQPIN
config ARCH_R8A73A4
bool "ARM32 Platform support for R-Mobile APE6"
+ default ARCH_RENESAS
select ARCH_RMOBILE
select ARM_ERRATA_798181 if SMP
select ARM_ERRATA_814220
@@ -132,6 +142,7 @@ config ARCH_R8A73A4
config ARCH_R7S72100
bool "ARM32 Platform support for RZ/A1H"
+ default ARCH_RENESAS
select ARM_ERRATA_754322
select PM
select PM_GENERIC_DOMAINS
@@ -141,6 +152,7 @@ config ARCH_R7S72100
config ARCH_R7S9210
bool "ARM32 Platform support for RZ/A2"
+ default ARCH_RENESAS
select PM
select PM_GENERIC_DOMAINS
select RENESAS_OSTM
@@ -148,18 +160,21 @@ config ARCH_R7S9210
config ARCH_R8A77470
bool "ARM32 Platform support for RZ/G1C"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_814220
select SYSC_R8A77470
config ARCH_R8A7745
bool "ARM32 Platform support for RZ/G1E"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_814220
select SYSC_R8A7745
config ARCH_R8A7742
bool "ARM32 Platform support for RZ/G1H"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
select ARM_ERRATA_814220
@@ -167,23 +182,27 @@ config ARCH_R8A7742
config ARCH_R8A7743
bool "ARM32 Platform support for RZ/G1M"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
select SYSC_R8A7743
config ARCH_R8A7744
bool "ARM32 Platform support for RZ/G1N"
+ default ARCH_RENESAS
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
select SYSC_R8A7743
config ARCH_R9A06G032
bool "ARM32 Platform support for RZ/N1D"
+ default ARCH_RENESAS
select ARCH_RZN1
select ARM_ERRATA_814220
config ARCH_SH73A0
bool "ARM32 Platform support for SH-Mobile AG5"
+ default ARCH_RENESAS
select ARCH_RMOBILE
select ARM_ERRATA_754322
select ARM_GLOBAL_TIMER
@@ -197,6 +216,7 @@ if ARM64
config ARCH_R8A77995
bool "ARM64 Platform support for R-Car D3"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
select SYSC_R8A77995
help
@@ -205,6 +225,7 @@ config ARCH_R8A77995
config ARCH_R8A77990
bool "ARM64 Platform support for R-Car E3"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
select SYSC_R8A77990
help
@@ -213,6 +234,7 @@ config ARCH_R8A77990
config ARCH_R8A77951
bool "ARM64 Platform support for R-Car H3 ES2.0+"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
select SYSC_R8A7795
help
@@ -222,6 +244,7 @@ config ARCH_R8A77951
config ARCH_R8A77965
bool "ARM64 Platform support for R-Car M3-N"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
select SYSC_R8A77965
help
@@ -230,6 +253,7 @@ config ARCH_R8A77965
config ARCH_R8A77960
bool "ARM64 Platform support for R-Car M3-W"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
select SYSC_R8A77960
help
@@ -237,6 +261,7 @@ config ARCH_R8A77960
config ARCH_R8A77961
bool "ARM64 Platform support for R-Car M3-W+"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
select SYSC_R8A77961
help
@@ -245,6 +270,7 @@ config ARCH_R8A77961
config ARCH_R8A779F0
bool "ARM64 Platform support for R-Car S4-8"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN4
select SYSC_R8A779F0
help
@@ -252,6 +278,7 @@ config ARCH_R8A779F0
config ARCH_R8A77980
bool "ARM64 Platform support for R-Car V3H"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
select SYSC_R8A77980
help
@@ -259,6 +286,7 @@ config ARCH_R8A77980
config ARCH_R8A77970
bool "ARM64 Platform support for R-Car V3M"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
select SYSC_R8A77970
help
@@ -266,6 +294,7 @@ config ARCH_R8A77970
config ARCH_R8A779A0
bool "ARM64 Platform support for R-Car V3U"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN4
select SYSC_R8A779A0
help
@@ -273,6 +302,7 @@ config ARCH_R8A779A0
config ARCH_R8A779G0
bool "ARM64 Platform support for R-Car V4H"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN4
select SYSC_R8A779G0
help
@@ -280,6 +310,7 @@ config ARCH_R8A779G0
config ARCH_R8A779H0
bool "ARM64 Platform support for R-Car V4M"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN4
select SYSC_R8A779H0
help
@@ -287,6 +318,7 @@ config ARCH_R8A779H0
config ARCH_R8A774C0
bool "ARM64 Platform support for RZ/G2E"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
select SYSC_R8A774C0
help
@@ -294,6 +326,7 @@ config ARCH_R8A774C0
config ARCH_R8A774E1
bool "ARM64 Platform support for RZ/G2H"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
select SYSC_R8A774E1
help
@@ -301,6 +334,7 @@ config ARCH_R8A774E1
config ARCH_R8A774A1
bool "ARM64 Platform support for RZ/G2M"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
select SYSC_R8A774A1
help
@@ -308,6 +342,7 @@ config ARCH_R8A774A1
config ARCH_R8A774B1
bool "ARM64 Platform support for RZ/G2N"
+ default y if ARCH_RENESAS
select ARCH_RCAR_GEN3
select SYSC_R8A774B1
help
@@ -315,24 +350,28 @@ config ARCH_R8A774B1
config ARCH_R9A07G043
bool "ARM64 Platform support for RZ/G2UL"
+ default y if ARCH_RENESAS
select ARCH_RZG2L
help
This enables support for the Renesas RZ/G2UL SoC variants.
config ARCH_R9A07G044
bool "ARM64 Platform support for RZ/G2L"
+ default y if ARCH_RENESAS
select ARCH_RZG2L
help
This enables support for the Renesas RZ/G2L SoC variants.
config ARCH_R9A07G054
bool "ARM64 Platform support for RZ/V2L"
+ default y if ARCH_RENESAS
select ARCH_RZG2L
help
This enables support for the Renesas RZ/V2L SoC variants.
config ARCH_R9A08G045
bool "ARM64 Platform support for RZ/G3S"
+ default y if ARCH_RENESAS
select ARCH_RZG2L
select SYSC_R9A08G045
help
@@ -340,6 +379,7 @@ config ARCH_R9A08G045
config ARCH_R9A09G011
bool "ARM64 Platform support for RZ/V2M"
+ default y if ARCH_RENESAS
select PM
select PM_GENERIC_DOMAINS
select PWC_RZV2M
@@ -348,12 +388,21 @@ config ARCH_R9A09G011
config ARCH_R9A09G047
bool "ARM64 Platform support for RZ/G3E"
+ default y if ARCH_RENESAS
select SYS_R9A09G047
help
This enables support for the Renesas RZ/G3E SoC variants.
+config ARCH_R9A09G056
+ bool "ARM64 Platform support for RZ/V2N"
+ default y if ARCH_RENESAS
+ select SYS_R9A09G056
+ help
+ This enables support for the Renesas RZ/V2N SoC variants.
+
config ARCH_R9A09G057
bool "ARM64 Platform support for RZ/V2H(P)"
+ default y if ARCH_RENESAS
select RENESAS_RZV2H_ICU
select SYS_R9A09G057
help
@@ -397,6 +446,10 @@ config SYS_R9A09G047
bool "Renesas RZ/G3E System controller support" if COMPILE_TEST
select SYSC_RZ
+config SYS_R9A09G056
+ bool "Renesas RZ/V2N System controller support" if COMPILE_TEST
+ select SYSC_RZ
+
config SYS_R9A09G057
bool "Renesas RZ/V2H System controller support" if COMPILE_TEST
select SYSC_RZ
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 81d4c5726e4c..3bdcc6a395d5 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
endif
obj-$(CONFIG_SYSC_R9A08G045) += r9a08g045-sysc.o
obj-$(CONFIG_SYS_R9A09G047) += r9a09g047-sys.o
+obj-$(CONFIG_SYS_R9A09G056) += r9a09g056-sys.o
obj-$(CONFIG_SYS_R9A09G057) += r9a09g057-sys.o
# Family
diff --git a/drivers/soc/renesas/r9a09g056-sys.c b/drivers/soc/renesas/r9a09g056-sys.c
new file mode 100644
index 000000000000..3ad1422eba36
--- /dev/null
+++ b/drivers/soc/renesas/r9a09g056-sys.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/V2N System controller (SYS) driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include "rz-sysc.h"
+
+/* Register Offsets */
+#define SYS_LSI_MODE 0x300
+#define SYS_LSI_MODE_SEC_EN BIT(16)
+/*
+ * BOOTPLLCA[1:0]
+ * [0,0] => 1.1GHZ
+ * [0,1] => 1.5GHZ
+ * [1,0] => 1.6GHZ
+ * [1,1] => 1.7GHZ
+ */
+#define SYS_LSI_MODE_STAT_BOOTPLLCA55 GENMASK(12, 11)
+#define SYS_LSI_MODE_CA55_1_7GHZ 0x3
+
+#define SYS_LSI_PRR 0x308
+#define SYS_LSI_PRR_GPU_DIS BIT(0)
+#define SYS_LSI_PRR_ISP_DIS BIT(4)
+
+#define SYS_RZV2N_FEATURE_G31 BIT(0)
+#define SYS_RZV2N_FEATURE_C55 BIT(1)
+#define SYS_RZV2N_FEATURE_SEC BIT(2)
+
+static void rzv2n_sys_print_id(struct device *dev,
+ void __iomem *sysc_base,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ u32 prr_val, mode_val;
+ u8 feature_flags;
+
+ prr_val = readl(sysc_base + SYS_LSI_PRR);
+ mode_val = readl(sysc_base + SYS_LSI_MODE);
+
+ /* Check GPU, ISP and Cryptographic configuration */
+ feature_flags = !(prr_val & SYS_LSI_PRR_GPU_DIS) ? SYS_RZV2N_FEATURE_G31 : 0;
+ feature_flags |= !(prr_val & SYS_LSI_PRR_ISP_DIS) ? SYS_RZV2N_FEATURE_C55 : 0;
+ feature_flags |= (mode_val & SYS_LSI_MODE_SEC_EN) ? SYS_RZV2N_FEATURE_SEC : 0;
+
+ dev_info(dev, "Detected Renesas %s %sn%d Rev %s%s%s%s%s\n", soc_dev_attr->family,
+ soc_dev_attr->soc_id, 41 + feature_flags, soc_dev_attr->revision,
+ feature_flags ? " with" : "",
+ feature_flags & SYS_RZV2N_FEATURE_G31 ? " GE3D (Mali-G31)" : "",
+ feature_flags & SYS_RZV2N_FEATURE_SEC ? " Cryptographic engine" : "",
+ feature_flags & SYS_RZV2N_FEATURE_C55 ? " ISP (Mali-C55)" : "");
+
+ /* Check CA55 PLL configuration */
+ if (FIELD_GET(SYS_LSI_MODE_STAT_BOOTPLLCA55, mode_val) != SYS_LSI_MODE_CA55_1_7GHZ)
+ dev_warn(dev, "CA55 PLL is not set to 1.7GHz\n");
+}
+
+static const struct rz_sysc_soc_id_init_data rzv2n_sys_soc_id_init_data __initconst = {
+ .family = "RZ/V2N",
+ .id = 0x867d447,
+ .devid_offset = 0x304,
+ .revision_mask = GENMASK(31, 28),
+ .specific_id_mask = GENMASK(27, 0),
+ .print_id = rzv2n_sys_print_id,
+};
+
+const struct rz_sysc_init_data rzv2n_sys_init_data = {
+ .soc_id_init_data = &rzv2n_sys_soc_id_init_data,
+};
diff --git a/drivers/soc/renesas/rz-sysc.c b/drivers/soc/renesas/rz-sysc.c
index 14db508f669f..ffa65fb4dade 100644
--- a/drivers/soc/renesas/rz-sysc.c
+++ b/drivers/soc/renesas/rz-sysc.c
@@ -88,6 +88,9 @@ static const struct of_device_id rz_sysc_match[] = {
#ifdef CONFIG_SYS_R9A09G047
{ .compatible = "renesas,r9a09g047-sys", .data = &rzg3e_sys_init_data },
#endif
+#ifdef CONFIG_SYS_R9A09G056
+ { .compatible = "renesas,r9a09g056-sys", .data = &rzv2n_sys_init_data },
+#endif
#ifdef CONFIG_SYS_R9A09G057
{ .compatible = "renesas,r9a09g057-sys", .data = &rzv2h_sys_init_data },
#endif
diff --git a/drivers/soc/renesas/rz-sysc.h b/drivers/soc/renesas/rz-sysc.h
index aa83948c5117..56bc047a1bff 100644
--- a/drivers/soc/renesas/rz-sysc.h
+++ b/drivers/soc/renesas/rz-sysc.h
@@ -42,5 +42,6 @@ struct rz_sysc_init_data {
extern const struct rz_sysc_init_data rzg3e_sys_init_data;
extern const struct rz_sysc_init_data rzg3s_sysc_init_data;
extern const struct rz_sysc_init_data rzv2h_sys_init_data;
+extern const struct rz_sysc_init_data rzv2n_sys_init_data;
#endif /* __SOC_RENESAS_RZ_SYSC_H__ */
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index c40313886a01..a77288f49d24 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -7,6 +7,7 @@
#include <linux/array_size.h>
#include <linux/arm-smccc.h>
+#include <linux/cpuhotplug.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/mfd/core.h>
@@ -33,6 +34,7 @@ struct exynos_pmu_context {
struct device *dev;
const struct exynos_pmu_data *pmu_data;
struct regmap *pmureg;
+ struct regmap *pmuintrgen;
};
void __iomem *pmu_base_addr;
@@ -222,7 +224,8 @@ static const struct regmap_config regmap_smccfg = {
};
static const struct exynos_pmu_data gs101_pmu_data = {
- .pmu_secure = true
+ .pmu_secure = true,
+ .pmu_cpuhp = true,
};
/*
@@ -326,6 +329,59 @@ struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
}
EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
+/*
+ * CPU_INFORM register hint values which are used by
+ * EL3 firmware (el3mon).
+ */
+#define CPU_INFORM_CLEAR 0
+#define CPU_INFORM_C2 1
+
+static int gs101_cpuhp_pmu_online(unsigned int cpu)
+{
+ unsigned int cpuhint = smp_processor_id();
+ u32 reg, mask;
+
+ /* clear cpu inform hint */
+ regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
+ CPU_INFORM_CLEAR);
+
+ mask = BIT(cpu);
+
+ regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE,
+ mask, (0 << cpu));
+
+ regmap_read(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_UPEND, &reg);
+
+ regmap_write(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_CLEAR,
+ reg & mask);
+
+ return 0;
+}
+
+static int gs101_cpuhp_pmu_offline(unsigned int cpu)
+{
+ u32 reg, mask;
+ unsigned int cpuhint = smp_processor_id();
+
+ /* set cpu inform hint */
+ regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
+ CPU_INFORM_C2);
+
+ mask = BIT(cpu);
+ regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE,
+ mask, BIT(cpu));
+
+ regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, &reg);
+ regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
+ reg & mask);
+
+ mask = (BIT(cpu + 8));
+ regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, &reg);
+ regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
+ reg & mask);
+ return 0;
+}
+
static int exynos_pmu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -378,6 +434,26 @@ static int exynos_pmu_probe(struct platform_device *pdev)
pmu_context->pmureg = regmap;
pmu_context->dev = dev;
+ if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_cpuhp) {
+ pmu_context->pmuintrgen = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "google,pmu-intr-gen-syscon");
+ if (IS_ERR(pmu_context->pmuintrgen)) {
+ /*
+ * To maintain support for older DTs that didn't specify syscon phandle
+ * just issue a warning rather than fail to probe.
+ */
+ dev_warn(&pdev->dev, "pmu-intr-gen syscon unavailable\n");
+ } else {
+ cpuhp_setup_state(CPUHP_BP_PREPARE_DYN,
+ "soc/exynos-pmu:prepare",
+ gs101_cpuhp_pmu_online, NULL);
+
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "soc/exynos-pmu:online",
+ NULL, gs101_cpuhp_pmu_offline);
+ }
+ }
+
if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init)
pmu_context->pmu_data->pmu_init();
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
index 0a49a2c9a08e..0938bb4fe15f 100644
--- a/drivers/soc/samsung/exynos-pmu.h
+++ b/drivers/soc/samsung/exynos-pmu.h
@@ -22,6 +22,7 @@ struct exynos_pmu_data {
const struct exynos_pmu_conf *pmu_config;
const struct exynos_pmu_conf *pmu_config_extra;
bool pmu_secure;
+ bool pmu_cpuhp;
void (*pmu_init)(void);
void (*powerdown_conf)(enum sys_powerdown);
diff --git a/drivers/soc/samsung/exynos-usi.c b/drivers/soc/samsung/exynos-usi.c
index c5661ac19f7b..5f7bdf3bab05 100644
--- a/drivers/soc/samsung/exynos-usi.c
+++ b/drivers/soc/samsung/exynos-usi.c
@@ -233,7 +233,7 @@ static void exynos_usi_unconfigure(void *data)
/* Make sure that we've stopped providing the clock to USI IP */
val = readl(usi->regs + USI_OPTION);
val &= ~USI_OPTION_CLKREQ_ON;
- val |= ~USI_OPTION_CLKSTOP_ON;
+ val |= USI_OPTION_CLKSTOP_ON;
writel(val, usi->regs + USI_OPTION);
/* Set USI block state to reset */
diff --git a/drivers/soc/sophgo/Kconfig b/drivers/soc/sophgo/Kconfig
new file mode 100644
index 000000000000..45f78b270c91
--- /dev/null
+++ b/drivers/soc/sophgo/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Sophgo SoC drivers
+#
+
+if ARCH_SOPHGO || COMPILE_TEST
+menu "Sophgo SoC drivers"
+
+config SOPHGO_CV1800_RTCSYS
+ tristate "Sophgo CV1800 RTC MFD"
+ select MFD_CORE
+ help
+ If you say yes here you get support the RTC MFD driver for Sophgo
+ CV1800 series SoC. The RTC module comprises a 32kHz oscillator,
+ Power-on-Reset (PoR) sub-module, HW state machine to control chip
+ power-on, power-off and reset. Furthermore, the 8051 subsystem is
+ located within RTCSYS including associated SRAM block.
+
+ This driver can also be built as a module. If so, the module will be
+ called cv1800-rtcsys.
+
+config SOPHGO_SG2044_TOPSYS
+ tristate "Sophgo SG2044 TOP syscon driver"
+ select MFD_CORE
+ help
+ This is the core driver for the Sophgo SG2044 TOP system
+ controller device. This driver provide PLL clock device
+ for the SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called sg2044-topsys.
+
+endmenu
+endif
diff --git a/drivers/soc/sophgo/Makefile b/drivers/soc/sophgo/Makefile
new file mode 100644
index 000000000000..27f68df22c4d
--- /dev/null
+++ b/drivers/soc/sophgo/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SOPHGO_CV1800_RTCSYS) += cv1800-rtcsys.o
+obj-$(CONFIG_SOPHGO_SG2044_TOPSYS) += sg2044-topsys.o
diff --git a/drivers/soc/sophgo/cv1800-rtcsys.c b/drivers/soc/sophgo/cv1800-rtcsys.c
new file mode 100644
index 000000000000..fdae2e2a61c5
--- /dev/null
+++ b/drivers/soc/sophgo/cv1800-rtcsys.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Sophgo CV1800 series SoC RTC subsystem
+ *
+ * The RTC module comprises a 32kHz oscillator, Power-on-Reset (PoR) sub-module,
+ * HW state machine to control chip power-on, power-off and reset. Furthermore,
+ * the 8051 subsystem is located within RTCSYS including associated SRAM block.
+ *
+ * Copyright (C) 2025 Alexander Sverdlin <alexander.sverdlin@gmail.com>
+ *
+ */
+
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/property.h>
+
+static struct resource cv1800_rtcsys_irq_resources[] = {
+ DEFINE_RES_IRQ_NAMED(0, "alarm"),
+};
+
+static const struct mfd_cell cv1800_rtcsys_subdev[] = {
+ {
+ .name = "cv1800b-rtc",
+ .num_resources = 1,
+ .resources = &cv1800_rtcsys_irq_resources[0],
+ },
+};
+
+static int cv1800_rtcsys_probe(struct platform_device *pdev)
+{
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "alarm");
+ if (irq < 0)
+ return irq;
+ cv1800_rtcsys_irq_resources[0].start = irq;
+ cv1800_rtcsys_irq_resources[0].end = irq;
+
+ return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
+ cv1800_rtcsys_subdev,
+ ARRAY_SIZE(cv1800_rtcsys_subdev),
+ NULL, 0, NULL);
+}
+
+static const struct of_device_id cv1800_rtcsys_of_match[] = {
+ { .compatible = "sophgo,cv1800b-rtc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cv1800_rtcsys_of_match);
+
+static struct platform_driver cv1800_rtcsys_mfd = {
+ .probe = cv1800_rtcsys_probe,
+ .driver = {
+ .name = "cv1800_rtcsys",
+ .of_match_table = cv1800_rtcsys_of_match,
+ },
+};
+module_platform_driver(cv1800_rtcsys_mfd);
+
+MODULE_AUTHOR("Alexander Sverdlin <alexander.sverdlin@gmail.com>");
+MODULE_DESCRIPTION("Sophgo CV1800 series SoC RTC subsystem driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/sophgo/sg2044-topsys.c b/drivers/soc/sophgo/sg2044-topsys.c
new file mode 100644
index 000000000000..179f2620b2a9
--- /dev/null
+++ b/drivers/soc/sophgo/sg2044-topsys.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2044 multi-function system controller driver
+ *
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/mfd/core.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/resource.h>
+
+static const struct mfd_cell sg2044_topsys_subdev[] = {
+ {
+ .name = "sg2044-pll",
+ },
+};
+
+static int sg2044_topsys_probe(struct platform_device *pdev)
+{
+ return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
+ sg2044_topsys_subdev,
+ ARRAY_SIZE(sg2044_topsys_subdev),
+ NULL, 0, NULL);
+}
+
+static const struct of_device_id sg2044_topsys_of_match[] = {
+ { .compatible = "sophgo,sg2044-top-syscon" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sg2044_topsys_of_match);
+
+static struct platform_driver sg2044_topsys_driver = {
+ .probe = sg2044_topsys_probe,
+ .driver = {
+ .name = "sg2044-topsys",
+ .of_match_table = sg2044_topsys_of_match,
+ },
+};
+module_platform_driver(sg2044_topsys_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo SG2044 multi-function system controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 51b9d852bb6a..e0d67bfe955c 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -2500,8 +2500,9 @@ static int tegra_pmc_irq_init(struct tegra_pmc *pmc)
pmc->irq.irq_set_type = pmc->soc->irq_set_type;
pmc->irq.irq_set_wake = pmc->soc->irq_set_wake;
- pmc->domain = irq_domain_add_hierarchy(parent, 0, 96, pmc->dev->of_node,
- &tegra_pmc_irq_domain_ops, pmc);
+ pmc->domain = irq_domain_create_hierarchy(parent, 0, 96,
+ of_fwnode_handle(pmc->dev->of_node),
+ &tegra_pmc_irq_domain_ops, pmc);
if (!pmc->domain) {
dev_err(pmc->dev, "failed to allocate domain\n");
return -ENOMEM;
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index 82a15cad1c6c..7602b8a909b0 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -1291,7 +1291,7 @@ struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
mutex_lock(&k3_ringacc_list_lock);
list_for_each_entry(entry, &k3_ringacc_list, list)
- if (entry->dev->of_node == ringacc_np) {
+ if (device_match_of_node(entry->dev, ringacc_np)) {
ringacc = entry;
break;
}
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index 704039eb3c07..d716be113c84 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -43,6 +43,7 @@
#define JTAG_ID_PARTNO_AM62AX 0xBB8D
#define JTAG_ID_PARTNO_AM62PX 0xBB9D
#define JTAG_ID_PARTNO_J722S 0xBBA0
+#define JTAG_ID_PARTNO_AM62LX 0xBBA7
static const struct k3_soc_id {
unsigned int id;
@@ -58,6 +59,7 @@ static const struct k3_soc_id {
{ JTAG_ID_PARTNO_AM62AX, "AM62AX" },
{ JTAG_ID_PARTNO_AM62PX, "AM62PX" },
{ JTAG_ID_PARTNO_J722S, "J722S" },
+ { JTAG_ID_PARTNO_AM62LX, "AM62LX" },
};
static const char * const j721e_rev_string_map[] = {
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index ea52425864a9..6e56e7609ccd 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -252,8 +252,7 @@ static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
return qh;
err:
- if (qh->stats)
- free_percpu(qh->stats);
+ free_percpu(qh->stats);
devm_kfree(inst->kdev->dev, qh);
return ERR_PTR(ret);
}
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c
index c36364522157..193266f5e3f9 100644
--- a/drivers/soc/ti/ti_sci_inta_msi.c
+++ b/drivers/soc/ti/ti_sci_inta_msi.c
@@ -103,19 +103,15 @@ int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
if (ret)
return ret;
- msi_lock_descs(dev);
+ guard(msi_descs_lock)(dev);
nvec = ti_sci_inta_msi_alloc_descs(dev, res);
- if (nvec <= 0) {
- ret = nvec;
- goto unlock;
- }
+ if (nvec <= 0)
+ return nvec;
/* Use alloc ALL as it's unclear whether there are gaps in the indices */
ret = msi_domain_alloc_irqs_all_locked(dev, MSI_DEFAULT_DOMAIN, nvec);
if (ret)
dev_err(dev, "Failed to allocate IRQs %d\n", ret);
-unlock:
- msi_unlock_descs(dev);
return ret;
}
EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs);
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 79dde9a7ec63..5845fc652adc 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -644,11 +644,9 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
m3_ipc->mbox = mbox_request_channel(&m3_ipc->mbox_client, 0);
- if (IS_ERR(m3_ipc->mbox)) {
- dev_err(dev, "IPC Request for A8->M3 Channel failed! %ld\n",
- PTR_ERR(m3_ipc->mbox));
- return PTR_ERR(m3_ipc->mbox);
- }
+ if (IS_ERR(m3_ipc->mbox))
+ return dev_err_probe(dev, PTR_ERR(m3_ipc->mbox),
+ "IPC Request for A8->M3 Channel failed!\n");
if (of_property_read_u32(dev->of_node, "ti,rproc", &rproc_phandle)) {
dev_err(&pdev->dev, "could not get rproc phandle\n");
diff --git a/drivers/soc/vt8500/Kconfig b/drivers/soc/vt8500/Kconfig
new file mode 100644
index 000000000000..b4cc0ba1128b
--- /dev/null
+++ b/drivers/soc/vt8500/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+if ARCH_VT8500 || COMPILE_TEST
+
+menu "VIA/WonderMedia SoC drivers"
+
+config WMT_SOCINFO
+ bool "VIA/WonderMedia SoC Information driver"
+ default ARCH_VT8500
+ select SOC_BUS
+ help
+ Say yes to support decoding of VIA/WonderMedia system configuration
+ register information. This currently includes just the chip ID register
+ which helps identify the exact hardware revision of the SoC the kernel
+ is running on (to know if any revision-specific quirks are required)
+
+endmenu
+
+endif
diff --git a/drivers/soc/vt8500/Makefile b/drivers/soc/vt8500/Makefile
new file mode 100644
index 000000000000..05964c5f2890
--- /dev/null
+++ b/drivers/soc/vt8500/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_WMT_SOCINFO) += wmt-socinfo.o
diff --git a/drivers/soc/vt8500/wmt-socinfo.c b/drivers/soc/vt8500/wmt-socinfo.c
new file mode 100644
index 000000000000..461f8c1ae56e
--- /dev/null
+++ b/drivers/soc/vt8500/wmt-socinfo.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2025 Alexey Charkov <alchark@gmail.com>
+ * Based on aspeed-socinfo.c
+ */
+
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
+
+static const struct {
+ const char *name;
+ const u32 id;
+} chip_id_table[] = {
+ /* VIA */
+ { "VT8420", 0x3300 },
+ { "VT8430", 0x3357 },
+ { "VT8500", 0x3400 },
+
+ /* WonderMedia */
+ { "WM8425", 0x3429 },
+ { "WM8435", 0x3437 },
+ { "WM8440", 0x3451 },
+ { "WM8505", 0x3426 },
+ { "WM8650", 0x3465 },
+ { "WM8750", 0x3445 },
+ { "WM8850", 0x3481 },
+ { "WM8880", 0x3498 },
+};
+
+static const char *sccid_to_name(u32 sccid)
+{
+ u32 id = sccid >> 16;
+ unsigned int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(chip_id_table) ; ++i) {
+ if (chip_id_table[i].id == id)
+ return chip_id_table[i].name;
+ }
+
+ return "Unknown";
+}
+
+static int wmt_socinfo_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct soc_device_attribute *attrs;
+ struct soc_device *soc_dev;
+ char letter, digit;
+ void __iomem *reg;
+ u32 sccid;
+
+ reg = devm_of_iomap(&pdev->dev, np, 0, NULL);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ sccid = readl(reg);
+
+ attrs = devm_kzalloc(&pdev->dev, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ /*
+ * Machine: VIA APC Rock
+ * Family: WM8850
+ * Revision: A2
+ * SoC ID: raw silicon revision id (34810103 in hexadecimal)
+ */
+
+ attrs->family = sccid_to_name(sccid);
+
+ letter = (sccid >> 8) & 0xf;
+ letter = (letter - 1) + 'A';
+ digit = sccid & 0xff;
+ digit = (digit - 1) + '0';
+ attrs->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "%c%c", letter, digit);
+
+ attrs->soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%08x", sccid);
+
+ if (!attrs->revision || !attrs->soc_id)
+ return -ENOMEM;
+
+ soc_dev = soc_device_register(attrs);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ dev_info(&pdev->dev,
+ "VIA/WonderMedia %s rev %s (%s)\n",
+ attrs->family,
+ attrs->revision,
+ attrs->soc_id);
+
+ platform_set_drvdata(pdev, soc_dev);
+ return 0;
+}
+
+static void wmt_socinfo_remove(struct platform_device *pdev)
+{
+ struct soc_device *soc_dev = platform_get_drvdata(pdev);
+
+ soc_device_unregister(soc_dev);
+}
+
+static const struct of_device_id wmt_socinfo_ids[] = {
+ { .compatible = "via,vt8500-scc-id" },
+ { /* Sentinel */ },
+};
+
+static struct platform_driver wmt_socinfo = {
+ .probe = wmt_socinfo_probe,
+ .remove = wmt_socinfo_remove,
+ .driver = {
+ .name = "wmt-socinfo",
+ .of_match_table = wmt_socinfo_ids,
+ },
+};
+module_platform_driver(wmt_socinfo);
+
+MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
+MODULE_DESCRIPTION("VIA/WonderMedia socinfo driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 6f8a20014e76..68db4b67a86f 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -56,6 +56,8 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
return ret;
}
+ ida_init(&bus->slave_ida);
+
ret = sdw_master_device_add(bus, parent, fwnode);
if (ret < 0) {
dev_err(parent, "Failed to add master device at link %d\n",
@@ -122,6 +124,10 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
set_bit(SDW_GROUP13_DEV_NUM, bus->assigned);
set_bit(SDW_MASTER_DEV_NUM, bus->assigned);
+ ret = sdw_irq_create(bus, fwnode);
+ if (ret)
+ return ret;
+
/*
* SDW is an enumerable bus, but devices can be powered off. So,
* they won't be able to report as present.
@@ -138,6 +144,7 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
if (ret < 0) {
dev_err(bus->dev, "Finding slaves failed:%d\n", ret);
+ sdw_irq_delete(bus);
return ret;
}
@@ -156,10 +163,6 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
bus->params.curr_bank = SDW_BANK0;
bus->params.next_bank = SDW_BANK1;
- ret = sdw_irq_create(bus, fwnode);
- if (ret)
- return ret;
-
return 0;
}
EXPORT_SYMBOL(sdw_bus_master_add);
@@ -750,41 +753,36 @@ err:
static int sdw_assign_device_num(struct sdw_slave *slave)
{
struct sdw_bus *bus = slave->bus;
- int ret, dev_num;
- bool new_device = false;
+ struct device *dev = bus->dev;
+ int ret;
/* check first if device number is assigned, if so reuse that */
if (!slave->dev_num) {
if (!slave->dev_num_sticky) {
+ int dev_num;
+
mutex_lock(&slave->bus->bus_lock);
dev_num = sdw_get_device_num(slave);
mutex_unlock(&slave->bus->bus_lock);
if (dev_num < 0) {
- dev_err(bus->dev, "Get dev_num failed: %d\n",
- dev_num);
+ dev_err(dev, "Get dev_num failed: %d\n", dev_num);
return dev_num;
}
- slave->dev_num = dev_num;
+
slave->dev_num_sticky = dev_num;
- new_device = true;
} else {
- slave->dev_num = slave->dev_num_sticky;
+ dev_dbg(dev, "Slave already registered, reusing dev_num: %d\n",
+ slave->dev_num_sticky);
}
}
- if (!new_device)
- dev_dbg(bus->dev,
- "Slave already registered, reusing dev_num:%d\n",
- slave->dev_num);
-
/* Clear the slave->dev_num to transfer message on device 0 */
- dev_num = slave->dev_num;
slave->dev_num = 0;
- ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, dev_num);
+ ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, slave->dev_num_sticky);
if (ret < 0) {
- dev_err(bus->dev, "Program device_num %d failed: %d\n",
- dev_num, ret);
+ dev_err(dev, "Program device_num %d failed: %d\n",
+ slave->dev_num_sticky, ret);
return ret;
}
@@ -792,7 +790,7 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
slave->dev_num = slave->dev_num_sticky;
if (bus->ops && bus->ops->new_peripheral_assigned)
- bus->ops->new_peripheral_assigned(bus, slave, dev_num);
+ bus->ops->new_peripheral_assigned(bus, slave, slave->dev_num);
return 0;
}
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index e98d5db81b1c..75d6f16efced 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -105,9 +105,17 @@ static int sdw_drv_probe(struct device *dev)
if (ret)
return ret;
+ ret = ida_alloc_max(&slave->bus->slave_ida, SDW_FW_MAX_DEVICES, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(dev, "Failed to allocated ID: %d\n", ret);
+ return ret;
+ }
+ slave->index = ret;
+
ret = drv->probe(slave, id);
if (ret) {
dev_pm_domain_detach(dev, false);
+ ida_free(&slave->bus->slave_ida, slave->index);
return ret;
}
@@ -174,6 +182,8 @@ static int sdw_drv_remove(struct device *dev)
dev_pm_domain_detach(dev, false);
+ ida_free(&slave->bus->slave_ida, slave->index);
+
return ret;
}
diff --git a/drivers/soundwire/generic_bandwidth_allocation.c b/drivers/soundwire/generic_bandwidth_allocation.c
index 1cfaccf43eac..c18f0c16f929 100644
--- a/drivers/soundwire/generic_bandwidth_allocation.c
+++ b/drivers/soundwire/generic_bandwidth_allocation.c
@@ -204,6 +204,13 @@ static void _sdw_compute_port_params(struct sdw_bus *bus,
port_bo = 1;
list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
+ /*
+ * Only runtimes with CONFIGURED, PREPARED, ENABLED, and DISABLED
+ * states should be included in the bandwidth calculation.
+ */
+ if (m_rt->stream->state > SDW_STREAM_DISABLED ||
+ m_rt->stream->state < SDW_STREAM_CONFIGURED)
+ continue;
sdw_compute_master_ports(m_rt, &params[i], &port_bo, hstop);
}
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
index d44e70d3c4e3..86abc465260f 100644
--- a/drivers/soundwire/intel.h
+++ b/drivers/soundwire/intel.h
@@ -22,6 +22,7 @@ struct hdac_bus;
* @shim_lock: mutex to handle access to shared SHIM registers
* @shim_mask: global pointer to check SHIM register initialization
* @clock_stop_quirks: mask defining requested behavior on pm_suspend
+ * @mic_privacy: ACE version supports microphone privacy
* @link_mask: global mask needed for power-up/down sequences
* @cdns: Cadence master descriptor
* @list: used to walk-through all masters exposed by the same controller
@@ -42,6 +43,7 @@ struct sdw_intel_link_res {
struct mutex *shim_lock; /* protect shared registers */
u32 *shim_mask;
u32 clock_stop_quirks;
+ bool mic_privacy;
u32 link_mask;
struct sdw_cdns *cdns;
struct list_head list;
diff --git a/drivers/soundwire/intel_ace2x_debugfs.c b/drivers/soundwire/intel_ace2x_debugfs.c
index 206a8d511ebd..fda8f0daaa96 100644
--- a/drivers/soundwire/intel_ace2x_debugfs.c
+++ b/drivers/soundwire/intel_ace2x_debugfs.c
@@ -76,6 +76,12 @@ static int intel_reg_show(struct seq_file *s_file, void *data)
ret += intel_sprintf(vs_s, false, buf, ret, SDW_SHIM2_INTEL_VS_IOCTL);
ret += intel_sprintf(vs_s, false, buf, ret, SDW_SHIM2_INTEL_VS_ACTMCTL);
+ if (sdw->link_res->mic_privacy) {
+ ret += scnprintf(buf + ret, RD_BUF - ret, "\nVS PVCCS\n");
+ ret += intel_sprintf(vs_s, false, buf, ret,
+ SDW_SHIM2_INTEL_VS_PVCCS);
+ }
+
seq_printf(s_file, "%s", buf);
kfree(buf);
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index 5f53666514a4..4ffdabaf9693 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -77,6 +77,7 @@ static struct sdw_intel_link_dev *intel_link_dev_register(struct sdw_intel_res *
link->shim = res->mmio_base + SDW_SHIM2_GENERIC_BASE(link_id);
link->shim_vs = res->mmio_base + SDW_SHIM2_VS_BASE(link_id);
link->shim_lock = res->eml_lock;
+ link->mic_privacy = res->mic_privacy;
}
link->ops = res->ops;
diff --git a/drivers/soundwire/irq.c b/drivers/soundwire/irq.c
index c237e6d0766b..f18be37efef8 100644
--- a/drivers/soundwire/irq.c
+++ b/drivers/soundwire/irq.c
@@ -31,7 +31,7 @@ int sdw_irq_create(struct sdw_bus *bus,
{
bus->irq_chip.name = dev_name(bus->dev);
- bus->domain = irq_domain_create_linear(fwnode, SDW_MAX_DEVICES,
+ bus->domain = irq_domain_create_linear(fwnode, SDW_FW_MAX_DEVICES,
&sdw_domain_ops, bus);
if (!bus->domain) {
dev_err(bus->dev, "Failed to add IRQ domain\n");
@@ -50,12 +50,12 @@ static void sdw_irq_dispose_mapping(void *data)
{
struct sdw_slave *slave = data;
- irq_dispose_mapping(irq_find_mapping(slave->bus->domain, slave->dev_num));
+ irq_dispose_mapping(slave->irq);
}
void sdw_irq_create_mapping(struct sdw_slave *slave)
{
- slave->irq = irq_create_mapping(slave->bus->domain, slave->dev_num);
+ slave->irq = irq_create_mapping(slave->bus->domain, slave->index);
if (!slave->irq)
dev_warn(&slave->dev, "Failed to map IRQ\n");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ed38f6d41f47..c51da3fc3604 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -1266,7 +1266,9 @@ config SPI_ZYNQMP_GQSPI
config SPI_AMD
tristate "AMD SPI controller"
- depends on SPI_MASTER || COMPILE_TEST
+ depends on PCI
+ depends on SPI_MASTER || X86 || COMPILE_TEST
+ depends on SPI_MEM
help
Enables SPI controller driver for AMD SoC.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index c3a1a47b3bf4..4ea89f6fc531 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -162,7 +162,7 @@ obj-$(CONFIG_SPI_XLP) += spi-xlp.o
obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
obj-$(CONFIG_SPI_ZYNQ_QSPI) += spi-zynq-qspi.o
obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o
-obj-$(CONFIG_SPI_AMD) += spi-amd.o
+obj-$(CONFIG_SPI_AMD) += spi-amd.o spi-amd-pci.o
# SPI slave protocol handlers
obj-$(CONFIG_SPI_SLAVE_TIME) += spi-slave-time.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 244ac0106862..2f6797324227 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -1287,9 +1287,9 @@ static int atmel_qspi_dma_init(struct spi_controller *ctrl)
aq->rx_chan = dma_request_chan(&aq->pdev->dev, "rx");
if (IS_ERR(aq->rx_chan)) {
- aq->rx_chan = NULL;
- return dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->rx_chan),
- "RX DMA channel is not available\n");
+ ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->rx_chan),
+ "RX DMA channel is not available\n");
+ goto null_rx_chan;
}
aq->tx_chan = dma_request_chan(&aq->pdev->dev, "tx");
@@ -1310,8 +1310,9 @@ static int atmel_qspi_dma_init(struct spi_controller *ctrl)
release_rx_chan:
dma_release_channel(aq->rx_chan);
- aq->rx_chan = NULL;
aq->tx_chan = NULL;
+null_rx_chan:
+ aq->rx_chan = NULL;
return ret;
}
@@ -1436,22 +1437,17 @@ static int atmel_qspi_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_noresume(&pdev->dev);
+ devm_pm_runtime_set_active_enabled(&pdev->dev);
+ devm_pm_runtime_get_noresume(&pdev->dev);
err = atmel_qspi_init(aq);
if (err)
goto dma_release;
err = spi_register_controller(ctrl);
- if (err) {
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
- pm_runtime_dont_use_autosuspend(&pdev->dev);
+ if (err)
goto dma_release;
- }
+
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
@@ -1530,10 +1526,6 @@ static void atmel_qspi_remove(struct platform_device *pdev)
*/
dev_warn(&pdev->dev, "Failed to resume device on remove\n");
}
-
- pm_runtime_disable(&pdev->dev);
- pm_runtime_dont_use_autosuspend(&pdev->dev);
- pm_runtime_put_noidle(&pdev->dev);
}
static int __maybe_unused atmel_qspi_suspend(struct device *dev)
diff --git a/drivers/spi/spi-amd-pci.c b/drivers/spi/spi-amd-pci.c
new file mode 100644
index 000000000000..e5faab414c17
--- /dev/null
+++ b/drivers/spi/spi-amd-pci.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD SPI controller driver
+ *
+ * Copyright (c) 2025, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Authors: Krishnamoorthi M <krishnamoorthi.m@amd.com>
+ * Akshata MukundShetty <akshata.mukundshetty@amd.com>
+ */
+
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+#include <linux/pci.h>
+
+#include "spi-amd.h"
+
+#define AMD_PCI_DEVICE_ID_LPC_BRIDGE 0x1682
+#define AMD_PCI_LPC_SPI_BASE_ADDR_REG 0xA0
+#define AMD_SPI_BASE_ADDR_MASK ~0xFF
+#define AMD_HID2_PCI_BAR_OFFSET 0x00002000
+#define AMD_HID2_MEM_SIZE 0x200
+
+static struct pci_device_id pci_spi_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_PCI_DEVICE_ID_LPC_BRIDGE) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, pci_spi_ids);
+
+static int amd_spi_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *host;
+ struct amd_spi *amd_spi;
+ u32 io_base_addr;
+
+ /* Allocate storage for host and driver private data */
+ host = devm_spi_alloc_host(dev, sizeof(struct amd_spi));
+ if (!host)
+ return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n");
+
+ amd_spi = spi_controller_get_devdata(host);
+
+ pci_read_config_dword(pdev, AMD_PCI_LPC_SPI_BASE_ADDR_REG, &io_base_addr);
+ io_base_addr = (io_base_addr & AMD_SPI_BASE_ADDR_MASK) + AMD_HID2_PCI_BAR_OFFSET;
+ amd_spi->io_remap_addr = devm_ioremap(dev, io_base_addr, AMD_HID2_MEM_SIZE);
+
+ if (!amd_spi->io_remap_addr)
+ return dev_err_probe(dev, -ENOMEM,
+ "ioremap of SPI registers failed\n");
+
+ dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr);
+
+ amd_spi->version = AMD_HID2_SPI;
+ host->bus_num = 2;
+
+ return amd_spi_probe_common(dev, host);
+}
+
+static struct pci_driver amd_spi_pci_driver = {
+ .name = "amd_spi_pci",
+ .id_table = pci_spi_ids,
+ .probe = amd_spi_pci_probe,
+};
+
+module_pci_driver(amd_spi_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AMD HID2 SPI Controller Driver");
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index 17fc0b17e756..02e7fe095a0b 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -17,6 +17,8 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
+#include "spi-amd.h"
+
#define AMD_SPI_CTRL0_REG 0x00
#define AMD_SPI_EXEC_CMD BIT(16)
#define AMD_SPI_FIFO_CLEAR BIT(20)
@@ -52,10 +54,13 @@
#define AMD_SPI_SPD7_MASK GENMASK(13, AMD_SPI_SPD7_SHIFT)
#define AMD_SPI_HID2_INPUT_RING_BUF0 0X100
+#define AMD_SPI_HID2_OUTPUT_BUF0 0x140
#define AMD_SPI_HID2_CNTRL 0x150
#define AMD_SPI_HID2_INT_STATUS 0x154
#define AMD_SPI_HID2_CMD_START 0x156
#define AMD_SPI_HID2_INT_MASK 0x158
+#define AMD_SPI_HID2_WRITE_CNTRL0 0x160
+#define AMD_SPI_HID2_WRITE_CNTRL1 0x164
#define AMD_SPI_HID2_READ_CNTRL0 0x170
#define AMD_SPI_HID2_READ_CNTRL1 0x174
#define AMD_SPI_HID2_READ_CNTRL2 0x180
@@ -81,17 +86,9 @@
#define AMD_SPI_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */
#define AMD_SPI_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */
-/**
- * enum amd_spi_versions - SPI controller versions
- * @AMD_SPI_V1: AMDI0061 hardware version
- * @AMD_SPI_V2: AMDI0062 hardware version
- * @AMD_HID2_SPI: AMDI0063 hardware version
- */
-enum amd_spi_versions {
- AMD_SPI_V1 = 1,
- AMD_SPI_V2,
- AMD_HID2_SPI,
-};
+/* SPINAND write command opcodes */
+#define AMD_SPI_OP_PP 0x02 /* Page program */
+#define AMD_SPI_OP_PP_RANDOM 0x84 /* Page program */
enum amd_spi_speed {
F_66_66MHz,
@@ -118,22 +115,6 @@ struct amd_spi_freq {
u32 spd7_val;
};
-/**
- * struct amd_spi - SPI driver instance
- * @io_remap_addr: Start address of the SPI controller registers
- * @phy_dma_buf: Physical address of DMA buffer
- * @dma_virt_addr: Virtual address of DMA buffer
- * @version: SPI controller hardware version
- * @speed_hz: Device frequency
- */
-struct amd_spi {
- void __iomem *io_remap_addr;
- dma_addr_t phy_dma_buf;
- void *dma_virt_addr;
- enum amd_spi_versions version;
- unsigned int speed_hz;
-};
-
static inline u8 amd_spi_readreg8(struct amd_spi *amd_spi, int idx)
{
return readb((u8 __iomem *)amd_spi->io_remap_addr + idx);
@@ -445,6 +426,17 @@ static inline bool amd_is_spi_read_cmd(const u16 op)
}
}
+static inline bool amd_is_spi_write_cmd(const u16 op)
+{
+ switch (op) {
+ case AMD_SPI_OP_PP:
+ case AMD_SPI_OP_PP_RANDOM:
+ return true;
+ default:
+ return false;
+ }
+}
+
static bool amd_spi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
@@ -455,7 +447,7 @@ static bool amd_spi_supports_op(struct spi_mem *mem,
return false;
/* AMD SPI controllers support quad mode only for read operations */
- if (amd_is_spi_read_cmd(op->cmd.opcode)) {
+ if (amd_is_spi_read_cmd(op->cmd.opcode) || amd_is_spi_write_cmd(op->cmd.opcode)) {
if (op->data.buswidth > 4)
return false;
@@ -464,7 +456,8 @@ static bool amd_spi_supports_op(struct spi_mem *mem,
* doesn't support 4-byte address commands.
*/
if (amd_spi->version == AMD_HID2_SPI) {
- if (amd_is_spi_read_cmd_4b(op->cmd.opcode) ||
+ if ((amd_is_spi_read_cmd_4b(op->cmd.opcode) ||
+ amd_is_spi_write_cmd(op->cmd.opcode)) &&
op->data.nbytes > AMD_SPI_HID2_DMA_SIZE)
return false;
} else if (op->data.nbytes > AMD_SPI_MAX_DATA) {
@@ -490,7 +483,8 @@ static int amd_spi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
* controller index mode supports maximum of 64 bytes in a single
* transaction.
*/
- if (amd_spi->version == AMD_HID2_SPI && amd_is_spi_read_cmd(op->cmd.opcode))
+ if (amd_spi->version == AMD_HID2_SPI && (amd_is_spi_read_cmd(op->cmd.opcode) ||
+ amd_is_spi_write_cmd(op->cmd.opcode)))
op->data.nbytes = clamp_val(op->data.nbytes, 0, AMD_SPI_HID2_DMA_SIZE);
else
op->data.nbytes = clamp_val(op->data.nbytes, 0, AMD_SPI_MAX_DATA);
@@ -514,32 +508,96 @@ static void amd_spi_set_addr(struct amd_spi *amd_spi,
}
}
+static void amd_spi_hiddma_write(struct amd_spi *amd_spi, const struct spi_mem_op *op)
+{
+ u16 hid_cmd_start, val;
+ u32 hid_regval;
+
+ /*
+ * Program the HID2 output Buffer0. 4k aligned buf_memory_addr[31:12],
+ * buf_size[2:0].
+ */
+ hid_regval = amd_spi->phy_dma_buf | BIT(0);
+ amd_spi_writereg32(amd_spi, AMD_SPI_HID2_OUTPUT_BUF0, hid_regval);
+
+ /* Program max write length in hid2_write_control1 register */
+ hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_WRITE_CNTRL1);
+ hid_regval = (hid_regval & ~GENMASK(15, 0)) | ((op->data.nbytes) + 3);
+ amd_spi_writereg32(amd_spi, AMD_SPI_HID2_WRITE_CNTRL1, hid_regval);
+
+ /* Set cmd start bit in hid2_cmd_start register to trigger HID basic write operation */
+ hid_cmd_start = amd_spi_readreg16(amd_spi, AMD_SPI_HID2_CMD_START);
+ amd_spi_writereg16(amd_spi, AMD_SPI_HID2_CMD_START, (hid_cmd_start | BIT(2)));
+
+ /* Check interrupt status of HIDDMA basic write operation in hid2_int_status register */
+ readw_poll_timeout(amd_spi->io_remap_addr + AMD_SPI_HID2_INT_STATUS, val,
+ (val & BIT(2)), AMD_SPI_IO_SLEEP_US, AMD_SPI_IO_TIMEOUT_US);
+
+ /* Clear the interrupts by writing to hid2_int_status register */
+ val = amd_spi_readreg16(amd_spi, AMD_SPI_HID2_INT_STATUS);
+ amd_spi_writereg16(amd_spi, AMD_SPI_HID2_INT_STATUS, val);
+}
+
static void amd_spi_mem_data_out(struct amd_spi *amd_spi,
const struct spi_mem_op *op)
{
int base_addr = AMD_SPI_FIFO_BASE + op->addr.nbytes;
u64 *buf_64 = (u64 *)op->data.buf.out;
+ u64 addr_val = op->addr.val;
u32 nbytes = op->data.nbytes;
u32 left_data = nbytes;
u8 *buf;
int i;
- amd_spi_set_opcode(amd_spi, op->cmd.opcode);
- amd_spi_set_addr(amd_spi, op);
+ /*
+ * Condition for using HID write mode. Only for writing complete page data, use HID write.
+ * Use index mode otherwise.
+ */
+ if (amd_spi->version == AMD_HID2_SPI && amd_is_spi_write_cmd(op->cmd.opcode)) {
+ u64 *dma_buf64 = (u64 *)(amd_spi->dma_virt_addr + op->addr.nbytes + op->cmd.nbytes);
+ u8 *dma_buf = (u8 *)amd_spi->dma_virt_addr;
- for (i = 0; left_data >= 8; i++, left_data -= 8)
- amd_spi_writereg64(amd_spi, base_addr + op->dummy.nbytes + (i * 8), *buf_64++);
+ /* Copy opcode and address to DMA buffer */
+ *dma_buf = op->cmd.opcode;
- buf = (u8 *)buf_64;
- for (i = 0; i < left_data; i++) {
- amd_spi_writereg8(amd_spi, base_addr + op->dummy.nbytes + nbytes + i - left_data,
- buf[i]);
- }
+ dma_buf = (u8 *)dma_buf64;
+ for (i = 0; i < op->addr.nbytes; i++) {
+ *--dma_buf = addr_val & GENMASK(7, 0);
+ addr_val >>= 8;
+ }
- amd_spi_set_tx_count(amd_spi, op->addr.nbytes + op->data.nbytes);
- amd_spi_set_rx_count(amd_spi, 0);
- amd_spi_clear_fifo_ptr(amd_spi);
- amd_spi_execute_opcode(amd_spi);
+ /* Copy data to DMA buffer */
+ while (left_data >= 8) {
+ *dma_buf64++ = *buf_64++;
+ left_data -= 8;
+ }
+
+ buf = (u8 *)buf_64;
+ dma_buf = (u8 *)dma_buf64;
+ while (left_data--)
+ *dma_buf++ = *buf++;
+
+ amd_spi_hiddma_write(amd_spi, op);
+ } else {
+ amd_spi_set_opcode(amd_spi, op->cmd.opcode);
+ amd_spi_set_addr(amd_spi, op);
+
+ for (i = 0; left_data >= 8; i++, left_data -= 8)
+ amd_spi_writereg64(amd_spi, base_addr + op->dummy.nbytes + (i * 8),
+ *buf_64++);
+
+ buf = (u8 *)buf_64;
+ for (i = 0; i < left_data; i++) {
+ amd_spi_writereg8(amd_spi,
+ base_addr + op->dummy.nbytes + nbytes + i - left_data,
+ buf[i]);
+ }
+
+ amd_spi_set_tx_count(amd_spi, op->addr.nbytes + op->data.nbytes);
+ amd_spi_set_rx_count(amd_spi, 0);
+ amd_spi_clear_fifo_ptr(amd_spi);
+ amd_spi_execute_opcode(amd_spi);
+ }
}
static void amd_spi_hiddma_read(struct amd_spi *amd_spi, const struct spi_mem_op *op)
@@ -616,15 +674,21 @@ static void amd_spi_mem_data_in(struct amd_spi *amd_spi,
* Use index mode otherwise.
*/
if (amd_spi->version == AMD_HID2_SPI && amd_is_spi_read_cmd(op->cmd.opcode)) {
+ u64 *dma_buf64 = (u64 *)amd_spi->dma_virt_addr;
+ u8 *dma_buf;
+
amd_spi_hiddma_read(amd_spi, op);
- for (i = 0; left_data >= 8; i++, left_data -= 8)
- *buf_64++ = readq((u8 __iomem *)amd_spi->dma_virt_addr + (i * 8));
+ /* Copy data from DMA buffer */
+ while (left_data >= 8) {
+ *buf_64++ = *dma_buf64++;
+ left_data -= 8;
+ }
buf = (u8 *)buf_64;
- for (i = 0; i < left_data; i++)
- buf[i] = readb((u8 __iomem *)amd_spi->dma_virt_addr +
- (nbytes - left_data + i));
+ dma_buf = (u8 *)dma_buf64;
+ while (left_data--)
+ *buf++ = *dma_buf++;
/* Reset HID RX memory logic */
data = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_CNTRL);
@@ -728,51 +792,38 @@ static int amd_spi_setup_hiddma(struct amd_spi *amd_spi, struct device *dev)
{
u32 hid_regval;
- /* Allocate DMA buffer to use for HID basic read operation */
- amd_spi->dma_virt_addr = dma_alloc_coherent(dev, AMD_SPI_HID2_DMA_SIZE,
- &amd_spi->phy_dma_buf, GFP_KERNEL);
+ /* Allocate DMA buffer to use for HID basic read and write operations. For write
+ * operations, the DMA buffer should include the opcode, address bytes and dummy
+ * bytes(if any) in addition to the data bytes. Additionally, the hardware requires
+ * that the buffer address be 4K aligned. So, allocate DMA buffer of size
+ * 2 * AMD_SPI_HID2_DMA_SIZE.
+ */
+ amd_spi->dma_virt_addr = dmam_alloc_coherent(dev, AMD_SPI_HID2_DMA_SIZE * 2,
+ &amd_spi->phy_dma_buf, GFP_KERNEL);
if (!amd_spi->dma_virt_addr)
return -ENOMEM;
/*
* Enable interrupts and set mask bits in hid2_int_mask register to generate interrupt
- * properly for HIDDMA basic read operations.
+ * properly for HIDDMA basic read and write operations.
*/
hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_INT_MASK);
- hid_regval = (hid_regval & GENMASK(31, 8)) | BIT(19);
+ hid_regval = (hid_regval & GENMASK(31, 8)) | BIT(18) | BIT(19);
amd_spi_writereg32(amd_spi, AMD_SPI_HID2_INT_MASK, hid_regval);
- /* Configure buffer unit(4k) in hid2_control register */
+ /* Configure buffer unit(4k) and write threshold in hid2_control register */
hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_CNTRL);
- amd_spi_writereg32(amd_spi, AMD_SPI_HID2_CNTRL, hid_regval & ~BIT(3));
+ amd_spi_writereg32(amd_spi, AMD_SPI_HID2_CNTRL, (hid_regval | GENMASK(13, 12)) & ~BIT(3));
return 0;
}
-static int amd_spi_probe(struct platform_device *pdev)
+int amd_spi_probe_common(struct device *dev, struct spi_controller *host)
{
- struct device *dev = &pdev->dev;
- struct spi_controller *host;
- struct amd_spi *amd_spi;
+ struct amd_spi *amd_spi = spi_controller_get_devdata(host);
int err;
- /* Allocate storage for host and driver private data */
- host = devm_spi_alloc_host(dev, sizeof(struct amd_spi));
- if (!host)
- return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n");
-
- amd_spi = spi_controller_get_devdata(host);
- amd_spi->io_remap_addr = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(amd_spi->io_remap_addr))
- return dev_err_probe(dev, PTR_ERR(amd_spi->io_remap_addr),
- "ioremap of SPI registers failed\n");
-
- dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr);
-
- amd_spi->version = (uintptr_t) device_get_match_data(dev);
-
/* Initialize the spi_controller fields */
- host->bus_num = (amd_spi->version == AMD_HID2_SPI) ? 2 : 0;
host->num_chipselect = 4;
host->mode_bits = SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD;
host->flags = SPI_CONTROLLER_HALF_DUPLEX;
@@ -795,6 +846,32 @@ static int amd_spi_probe(struct platform_device *pdev)
return err;
}
+EXPORT_SYMBOL_GPL(amd_spi_probe_common);
+
+static int amd_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *host;
+ struct amd_spi *amd_spi;
+
+ /* Allocate storage for host and driver private data */
+ host = devm_spi_alloc_host(dev, sizeof(struct amd_spi));
+ if (!host)
+ return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n");
+
+ amd_spi = spi_controller_get_devdata(host);
+ amd_spi->io_remap_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(amd_spi->io_remap_addr))
+ return dev_err_probe(dev, PTR_ERR(amd_spi->io_remap_addr),
+ "ioremap of SPI registers failed\n");
+
+ dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr);
+
+ amd_spi->version = (uintptr_t)device_get_match_data(dev);
+ host->bus_num = 0;
+
+ return amd_spi_probe_common(dev, host);
+}
#ifdef CONFIG_ACPI
static const struct acpi_device_id spi_acpi_match[] = {
diff --git a/drivers/spi/spi-amd.h b/drivers/spi/spi-amd.h
new file mode 100644
index 000000000000..5f39ce7b5587
--- /dev/null
+++ b/drivers/spi/spi-amd.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AMD SPI controller driver common stuff
+ *
+ * Copyright (c) 2025, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Krishnamoorthi M <krishnamoorthi.m@amd.com>
+ */
+
+#ifndef SPI_AMD_H
+#define SPI_AMD_H
+
+/**
+ * enum amd_spi_versions - SPI controller versions
+ * @AMD_SPI_V1: AMDI0061 hardware version
+ * @AMD_SPI_V2: AMDI0062 hardware version
+ * @AMD_HID2_SPI: AMDI0063 hardware version
+ */
+enum amd_spi_versions {
+ AMD_SPI_V1 = 1,
+ AMD_SPI_V2,
+ AMD_HID2_SPI,
+};
+
+/**
+ * struct amd_spi - SPI driver instance
+ * @io_remap_addr: Start address of the SPI controller registers
+ * @phy_dma_buf: Physical address of DMA buffer
+ * @dma_virt_addr: Virtual address of DMA buffer
+ * @version: SPI controller hardware version
+ * @speed_hz: Device frequency
+ */
+struct amd_spi {
+ void __iomem *io_remap_addr;
+ dma_addr_t phy_dma_buf;
+ void *dma_virt_addr;
+ enum amd_spi_versions version;
+ unsigned int speed_hz;
+};
+
+int amd_spi_probe_common(struct device *dev, struct spi_controller *host);
+
+#endif /* SPI_AMD_H */
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index da9840957778..8cc19934b48b 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -14,6 +14,7 @@
#include <linux/fpga/adi-axi-common.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/overflow.h>
@@ -140,6 +141,8 @@ struct spi_engine_offload {
struct spi_engine *spi_engine;
unsigned long flags;
unsigned int offload_num;
+ unsigned int spi_mode_config;
+ u8 bits_per_word;
};
struct spi_engine {
@@ -159,6 +162,7 @@ struct spi_engine {
unsigned int offload_sdo_mem_size;
struct spi_offload *offload;
u32 offload_caps;
+ bool offload_requires_sync;
};
static void spi_engine_program_add_cmd(struct spi_engine_program *p,
@@ -265,6 +269,8 @@ static int spi_engine_precompile_message(struct spi_message *msg)
{
unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
struct spi_transfer *xfer;
+ u8 min_bits_per_word = U8_MAX;
+ u8 max_bits_per_word = 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/* If we have an offload transfer, we can't rx to buffer */
@@ -273,6 +279,24 @@ static int spi_engine_precompile_message(struct spi_message *msg)
clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
+
+ if (xfer->len) {
+ min_bits_per_word = min(min_bits_per_word, xfer->bits_per_word);
+ max_bits_per_word = max(max_bits_per_word, xfer->bits_per_word);
+ }
+ }
+
+ /*
+ * If all xfers in the message use the same bits_per_word, we can
+ * provide some optimization when using SPI offload.
+ */
+ if (msg->offload) {
+ struct spi_engine_offload *priv = msg->offload->priv;
+
+ if (min_bits_per_word == max_bits_per_word)
+ priv->bits_per_word = min_bits_per_word;
+ else
+ priv->bits_per_word = 0;
}
return 0;
@@ -283,6 +307,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry,
{
struct spi_device *spi = msg->spi;
struct spi_controller *host = spi->controller;
+ struct spi_engine_offload *priv;
struct spi_transfer *xfer;
int clk_div, new_clk_div, inst_ns;
bool keep_cs = false;
@@ -296,9 +321,24 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry,
clk_div = 1;
- spi_engine_program_add_cmd(p, dry,
- SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
- spi_engine_get_config(spi)));
+ /*
+ * As an optimization, SPI offload sets once this when the offload is
+ * enabled instead of repeating the instruction in each message.
+ */
+ if (msg->offload) {
+ priv = msg->offload->priv;
+ priv->spi_mode_config = spi_engine_get_config(spi);
+
+ /*
+ * If all xfers use the same bits_per_word, it can be optimized
+ * in the same way.
+ */
+ bits_per_word = priv->bits_per_word;
+ } else {
+ spi_engine_program_add_cmd(p, dry,
+ SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
+ spi_engine_get_config(spi)));
+ }
xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
@@ -663,6 +703,8 @@ static void spi_engine_offload_unprepare(struct spi_offload *offload)
static int spi_engine_optimize_message(struct spi_message *msg)
{
+ struct spi_controller *host = msg->spi->controller;
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
struct spi_engine_program p_dry, *p;
int ret;
@@ -679,8 +721,13 @@ static int spi_engine_optimize_message(struct spi_message *msg)
spi_engine_compile_message(msg, false, p);
- spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
- msg->offload ? 0 : AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
+ /*
+ * Non-offload needs SYNC for completion interrupt. Older versions of
+ * the IP core also need SYNC for offload to work properly.
+ */
+ if (!msg->offload || spi_engine->offload_requires_sync)
+ spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
+ msg->offload ? 0 : AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
msg->opt_state = p;
@@ -739,12 +786,16 @@ static int spi_engine_setup(struct spi_device *device)
{
struct spi_controller *host = device->controller;
struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ unsigned int reg;
if (device->mode & SPI_CS_HIGH)
spi_engine->cs_inv |= BIT(spi_get_chipselect(device, 0));
else
spi_engine->cs_inv &= ~BIT(spi_get_chipselect(device, 0));
+ writel_relaxed(SPI_ENGINE_CMD_SYNC(0),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+
writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
@@ -755,7 +806,11 @@ static int spi_engine_setup(struct spi_device *device)
writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
- return 0;
+ writel_relaxed(SPI_ENGINE_CMD_SYNC(1),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+
+ return readl_relaxed_poll_timeout(spi_engine->base + SPI_ENGINE_REG_SYNC_ID,
+ reg, reg == 1, 1, 1000);
}
static int spi_engine_transfer_one_message(struct spi_controller *host,
@@ -833,6 +888,27 @@ static int spi_engine_trigger_enable(struct spi_offload *offload)
struct spi_engine_offload *priv = offload->priv;
struct spi_engine *spi_engine = priv->spi_engine;
unsigned int reg;
+ int ret;
+
+ writel_relaxed(SPI_ENGINE_CMD_SYNC(0),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+
+ writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
+ priv->spi_mode_config),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+
+ if (priv->bits_per_word)
+ writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
+ priv->bits_per_word),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+
+ writel_relaxed(SPI_ENGINE_CMD_SYNC(1),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+
+ ret = readl_relaxed_poll_timeout(spi_engine->base + SPI_ENGINE_REG_SYNC_ID,
+ reg, reg == 1, 1, 1000);
+ if (ret)
+ return ret;
reg = readl_relaxed(spi_engine->base +
SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
@@ -987,6 +1063,9 @@ static int spi_engine_probe(struct platform_device *pdev)
spi_engine->offload_sdo_mem_size = SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE;
}
+ /* IP v1.5 dropped the requirement for SYNC in offload messages. */
+ spi_engine->offload_requires_sync = ADI_AXI_PCORE_VER_MINOR(version) < 5;
+
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index 644b44d2aef2..18261cbd413b 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -745,7 +745,7 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
if (IS_ERR(clk))
return PTR_ERR(clk);
- reset = devm_reset_control_get_optional_exclusive(dev, NULL);
+ reset = devm_reset_control_get_optional_shared(dev, NULL);
if (IS_ERR(reset))
return PTR_ERR(reset);
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index c8f64ec69344..b56210734caa 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -523,7 +523,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
return PTR_ERR(clk);
}
- reset = devm_reset_control_get_optional_exclusive(dev, NULL);
+ reset = devm_reset_control_get_optional_shared(dev, NULL);
if (IS_ERR(reset))
return PTR_ERR(reset);
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index c90462783b3f..fe0f122f07b0 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1949,7 +1949,7 @@ static int cqspi_probe(struct platform_device *pdev)
host->num_chipselect = cqspi->num_chipselect;
- if (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET)
+ if (ddata && (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET))
cqspi_device_reset(cqspi);
if (cqspi->use_direct_mode) {
diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c
index 337aef12abcc..367ae7120bb3 100644
--- a/drivers/spi/spi-cavium-thunderx.c
+++ b/drivers/spi/spi-cavium-thunderx.c
@@ -34,7 +34,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
if (ret)
goto error;
- ret = pci_request_regions(pdev, DRV_NAME);
+ ret = pcim_request_all_regions(pdev, DRV_NAME);
if (ret)
goto error;
@@ -78,7 +78,6 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
return 0;
error:
- pci_release_regions(pdev);
spi_controller_put(host);
return ret;
}
@@ -92,7 +91,6 @@ static void thunderx_spi_remove(struct pci_dev *pdev)
if (!p)
return;
- pci_release_regions(pdev);
/* Put everything in a known state. */
writeq(0, p->register_base + OCTEON_SPI_CFG(p));
}
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
index ceefc253c549..b28a840b3b04 100644
--- a/drivers/spi/spi-cs42l43.c
+++ b/drivers/spi/spi-cs42l43.c
@@ -237,7 +237,9 @@ static int cs42l43_get_speaker_id_gpios(struct cs42l43_spi *priv, int *result)
int i, ret;
descs = gpiod_get_array_optional(priv->dev, "spk-id", GPIOD_IN);
- if (IS_ERR_OR_NULL(descs))
+ if (!descs)
+ return 0;
+ else if (IS_ERR(descs))
return PTR_ERR(descs);
spkid = 0;
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index 941ecc6f59f8..b3b883cb9541 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -423,7 +423,7 @@ static int dw_spi_transfer_one(struct spi_controller *host,
int ret;
dws->dma_mapped = 0;
- dws->n_bytes = roundup_pow_of_two(BITS_TO_BYTES(transfer->bits_per_word));
+ dws->n_bytes = spi_bpw_to_bytes(transfer->bits_per_word);
dws->tx = (void *)transfer->tx_buf;
dws->tx_len = transfer->len / dws->n_bytes;
dws->rx = transfer->rx_buf;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 067c954cb6ea..863781ba6c16 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2013 Freescale Semiconductor, Inc.
-// Copyright 2020 NXP
+// Copyright 2020-2025 NXP
//
// Freescale DSPI driver
// This file contains a driver for the Freescale DSPI
@@ -62,6 +62,7 @@
#define SPI_SR_TFIWF BIT(18)
#define SPI_SR_RFDF BIT(17)
#define SPI_SR_CMDFFF BIT(16)
+#define SPI_SR_TXRXS BIT(30)
#define SPI_SR_CLEAR (SPI_SR_TCFQF | \
SPI_SR_TFUF | SPI_SR_TFFF | \
SPI_SR_CMDTCF | SPI_SR_SPEF | \
@@ -921,9 +922,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
struct spi_transfer *transfer;
bool cs = false;
int status = 0;
+ u32 val = 0;
+ bool cs_change = false;
message->actual_length = 0;
+ /* Put DSPI in running mode if halted. */
+ regmap_read(dspi->regmap, SPI_MCR, &val);
+ if (val & SPI_MCR_HALT) {
+ regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0);
+ while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
+ !(val & SPI_SR_TXRXS))
+ ;
+ }
+
list_for_each_entry(transfer, &message->transfers, transfer_list) {
dspi->cur_transfer = transfer;
dspi->cur_msg = message;
@@ -953,6 +965,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
}
+ cs_change = transfer->cs_change;
dspi->tx = transfer->tx_buf;
dspi->rx = transfer->rx_buf;
dspi->len = transfer->len;
@@ -962,6 +975,8 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
+ regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
+
spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
dspi->progress, !dspi->irq);
@@ -988,6 +1003,15 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi_deassert_cs(spi, &cs);
}
+ if (status || !cs_change) {
+ /* Put DSPI in stop mode */
+ regmap_update_bits(dspi->regmap, SPI_MCR,
+ SPI_MCR_HALT, SPI_MCR_HALT);
+ while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
+ val & SPI_SR_TXRXS)
+ ;
+ }
+
message->status = status;
spi_finalize_current_message(ctlr);
@@ -1167,6 +1191,20 @@ static int dspi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
+static const struct regmap_range dspi_yes_ranges[] = {
+ regmap_reg_range(SPI_MCR, SPI_MCR),
+ regmap_reg_range(SPI_TCR, SPI_CTAR(3)),
+ regmap_reg_range(SPI_SR, SPI_TXFR3),
+ regmap_reg_range(SPI_RXFR0, SPI_RXFR3),
+ regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)),
+ regmap_reg_range(SPI_SREX, SPI_SREX),
+};
+
+static const struct regmap_access_table dspi_access_table = {
+ .yes_ranges = dspi_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges),
+};
+
static const struct regmap_range dspi_volatile_ranges[] = {
regmap_reg_range(SPI_MCR, SPI_TCR),
regmap_reg_range(SPI_SR, SPI_SR),
@@ -1184,6 +1222,8 @@ static const struct regmap_config dspi_regmap_config = {
.reg_stride = 4,
.max_register = 0x88,
.volatile_table = &dspi_volatile_table,
+ .rd_table = &dspi_access_table,
+ .wr_table = &dspi_access_table,
};
static const struct regmap_range dspi_xspi_volatile_ranges[] = {
@@ -1205,6 +1245,8 @@ static const struct regmap_config dspi_xspi_regmap_config[] = {
.reg_stride = 4,
.max_register = 0x13c,
.volatile_table = &dspi_xspi_volatile_table,
+ .rd_table = &dspi_access_table,
+ .wr_table = &dspi_access_table,
},
{
.name = "pushr",
@@ -1227,6 +1269,8 @@ static int dspi_init(struct fsl_dspi *dspi)
if (!spi_controller_is_target(dspi->ctlr))
mcr |= SPI_MCR_HOST;
+ mcr |= SPI_MCR_HALT;
+
regmap_write(dspi->regmap, SPI_MCR, mcr);
regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index b5ecffcaf795..c887abb028d7 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -264,14 +264,14 @@ static const struct fsl_qspi_devtype_data ls2080a_data = {
struct fsl_qspi {
void __iomem *iobase;
void __iomem *ahb_addr;
- u32 memmap_phy;
- struct clk *clk, *clk_en;
- struct device *dev;
- struct completion c;
const struct fsl_qspi_devtype_data *devtype_data;
struct mutex lock;
+ struct completion c;
+ struct clk *clk, *clk_en;
struct pm_qos_request pm_qos_req;
+ struct device *dev;
int selected;
+ u32 memmap_phy;
};
static inline int needs_swap_endian(struct fsl_qspi *q)
@@ -844,13 +844,18 @@ static const struct spi_controller_mem_caps fsl_qspi_mem_caps = {
.per_op_freq = true,
};
-static void fsl_qspi_cleanup(void *data)
+static void fsl_qspi_disable(void *data)
{
struct fsl_qspi *q = data;
/* disable the hardware */
qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
+}
+
+static void fsl_qspi_cleanup(void *data)
+{
+ struct fsl_qspi *q = data;
fsl_qspi_clk_disable_unprep(q);
@@ -866,7 +871,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
struct fsl_qspi *q;
int ret;
- ctlr = spi_alloc_host(&pdev->dev, sizeof(*q));
+ ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*q));
if (!ctlr)
return -ENOMEM;
@@ -876,68 +881,60 @@ static int fsl_qspi_probe(struct platform_device *pdev)
q = spi_controller_get_devdata(ctlr);
q->dev = dev;
q->devtype_data = of_device_get_match_data(dev);
- if (!q->devtype_data) {
- ret = -ENODEV;
- goto err_put_ctrl;
- }
+ if (!q->devtype_data)
+ return -ENODEV;
platform_set_drvdata(pdev, q);
/* find the resources */
q->iobase = devm_platform_ioremap_resource_byname(pdev, "QuadSPI");
- if (IS_ERR(q->iobase)) {
- ret = PTR_ERR(q->iobase);
- goto err_put_ctrl;
- }
+ if (IS_ERR(q->iobase))
+ return PTR_ERR(q->iobase);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"QuadSPI-memory");
- if (!res) {
- ret = -EINVAL;
- goto err_put_ctrl;
- }
+ if (!res)
+ return -EINVAL;
q->memmap_phy = res->start;
/* Since there are 4 cs, map size required is 4 times ahb_buf_size */
q->ahb_addr = devm_ioremap(dev, q->memmap_phy,
(q->devtype_data->ahb_buf_size * 4));
- if (!q->ahb_addr) {
- ret = -ENOMEM;
- goto err_put_ctrl;
- }
+ if (!q->ahb_addr)
+ return -ENOMEM;
/* find the clocks */
q->clk_en = devm_clk_get(dev, "qspi_en");
- if (IS_ERR(q->clk_en)) {
- ret = PTR_ERR(q->clk_en);
- goto err_put_ctrl;
- }
+ if (IS_ERR(q->clk_en))
+ return PTR_ERR(q->clk_en);
q->clk = devm_clk_get(dev, "qspi");
- if (IS_ERR(q->clk)) {
- ret = PTR_ERR(q->clk);
- goto err_put_ctrl;
- }
+ if (IS_ERR(q->clk))
+ return PTR_ERR(q->clk);
+
+ mutex_init(&q->lock);
ret = fsl_qspi_clk_prep_enable(q);
if (ret) {
dev_err(dev, "can not enable the clock\n");
- goto err_put_ctrl;
+ return ret;
}
+ ret = devm_add_action_or_reset(dev, fsl_qspi_cleanup, q);
+ if (ret)
+ return ret;
+
/* find the irq */
ret = platform_get_irq(pdev, 0);
if (ret < 0)
- goto err_disable_clk;
+ return ret;
ret = devm_request_irq(dev, ret,
fsl_qspi_irq_handler, 0, pdev->name, q);
if (ret) {
dev_err(dev, "failed to request irq: %d\n", ret);
- goto err_disable_clk;
+ return ret;
}
- mutex_init(&q->lock);
-
ctlr->bus_num = -1;
ctlr->num_chipselect = 4;
ctlr->mem_ops = &fsl_qspi_mem_ops;
@@ -947,23 +944,15 @@ static int fsl_qspi_probe(struct platform_device *pdev)
ctlr->dev.of_node = np;
- ret = devm_add_action_or_reset(dev, fsl_qspi_cleanup, q);
+ ret = devm_add_action_or_reset(dev, fsl_qspi_disable, q);
if (ret)
- goto err_put_ctrl;
+ return ret;
ret = devm_spi_register_controller(dev, ctlr);
if (ret)
- goto err_put_ctrl;
+ return ret;
return 0;
-
-err_disable_clk:
- fsl_qspi_clk_disable_unprep(q);
-
-err_put_ctrl:
- spi_controller_put(ctlr);
-
- return ret;
}
static int fsl_qspi_suspend(struct device *dev)
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 405deb6677c1..ea5f1b10b79e 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -46,7 +46,7 @@ struct spi_gpio {
static inline struct spi_gpio *__pure
spi_to_spi_gpio(const struct spi_device *spi)
{
- const struct spi_bitbang *bang;
+ struct spi_bitbang *bang;
struct spi_gpio *spi_gpio;
bang = spi_controller_get_devdata(spi->controller);
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index 4d9ffec900bb..4b63cb98df9c 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -44,6 +44,7 @@ static int intel_spi_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct intel_spi_boardinfo *info;
+ void __iomem *base;
int ret;
ret = pcim_enable_device(pdev);
@@ -56,7 +57,12 @@ static int intel_spi_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
info->data = pdev;
- return intel_spi_probe(&pdev->dev, &pdev->resource[0], info);
+
+ base = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ return intel_spi_probe(&pdev->dev, base, info);
}
static const struct pci_device_id intel_spi_pci_ids[] = {
diff --git a/drivers/spi/spi-intel-platform.c b/drivers/spi/spi-intel-platform.c
index 0974cca83a5d..6cbed0b2e063 100644
--- a/drivers/spi/spi-intel-platform.c
+++ b/drivers/spi/spi-intel-platform.c
@@ -14,14 +14,17 @@
static int intel_spi_platform_probe(struct platform_device *pdev)
{
struct intel_spi_boardinfo *info;
- struct resource *mem;
+ void __iomem *base;
info = dev_get_platdata(&pdev->dev);
if (!info)
return -EINVAL;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- return intel_spi_probe(&pdev->dev, mem, info);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ return intel_spi_probe(&pdev->dev, base, info);
}
static struct platform_driver intel_spi_platform_driver = {
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index b0dcdb6fb8fa..5d5a546c62ea 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -1467,13 +1467,13 @@ EXPORT_SYMBOL_GPL(intel_spi_groups);
/**
* intel_spi_probe() - Probe the Intel SPI flash controller
* @dev: Pointer to the parent device
- * @mem: MMIO resource
+ * @base: iomapped MMIO resource
* @info: Platform specific information
*
* Probes Intel SPI flash controller and creates the flash chip device.
* Returns %0 on success and negative errno in case of failure.
*/
-int intel_spi_probe(struct device *dev, struct resource *mem,
+int intel_spi_probe(struct device *dev, void __iomem *base,
const struct intel_spi_boardinfo *info)
{
struct spi_controller *host;
@@ -1488,10 +1488,7 @@ int intel_spi_probe(struct device *dev, struct resource *mem,
ispi = spi_controller_get_devdata(host);
- ispi->base = devm_ioremap_resource(dev, mem);
- if (IS_ERR(ispi->base))
- return PTR_ERR(ispi->base);
-
+ ispi->base = base;
ispi->dev = dev;
ispi->host = host;
ispi->info = info;
diff --git a/drivers/spi/spi-intel.h b/drivers/spi/spi-intel.h
index c5f35060dd63..53b292c6ea0c 100644
--- a/drivers/spi/spi-intel.h
+++ b/drivers/spi/spi-intel.h
@@ -11,11 +11,9 @@
#include <linux/platform_data/x86/spi-intel.h>
-struct resource;
-
extern const struct attribute_group *intel_spi_groups[];
-int intel_spi_probe(struct device *dev, struct resource *mem,
+int intel_spi_probe(struct device *dev, void __iomem *base,
const struct intel_spi_boardinfo *info);
#endif /* SPI_INTEL_H */
diff --git a/drivers/spi/spi-loongson-core.c b/drivers/spi/spi-loongson-core.c
index 4fec226456d1..b46f072a0387 100644
--- a/drivers/spi/spi-loongson-core.c
+++ b/drivers/spi/spi-loongson-core.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 31a878d9458d..7dd92deffe3f 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -420,7 +420,7 @@ MODULE_LICENSE("GPL");
static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len)
{
/* limit the hex_dump */
- if (len < 1024) {
+ if (len <= 1024) {
print_hex_dump(KERN_INFO, pre,
DUMP_PREFIX_OFFSET, 16, 1,
ptr, len, 0);
@@ -494,8 +494,8 @@ struct rx_ranges {
static int rx_ranges_cmp(void *priv, const struct list_head *a,
const struct list_head *b)
{
- struct rx_ranges *rx_a = list_entry(a, struct rx_ranges, list);
- struct rx_ranges *rx_b = list_entry(b, struct rx_ranges, list);
+ const struct rx_ranges *rx_a = list_entry(a, struct rx_ranges, list);
+ const struct rx_ranges *rx_b = list_entry(b, struct rx_ranges, list);
if (rx_a->start > rx_b->start)
return 1;
@@ -635,8 +635,8 @@ static int spi_test_check_loopback_result(struct spi_device *spi,
} else {
/* first byte received */
txb = ((u8 *)xfer->rx_buf)[0];
- /* first byte may be 0 or xff */
- if (!((txb == 0) || (txb == 0xff))) {
+ /* first byte may be 0 or 0xff */
+ if (txb != 0 && txb != 0xff) {
dev_err(&spi->dev,
"loopback strangeness - we expect 0x00 or 0xff, but not 0x%02x\n",
txb);
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index df74ad5060f8..6b9137307533 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -21,18 +21,26 @@
#include <linux/interrupt.h>
#include <linux/reset.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/dma-mapping.h>
/*
- * The Meson SPICC controller could support DMA based transfers, but is not
- * implemented by the vendor code, and while having the registers documentation
- * it has never worked on the GXL Hardware.
- * The PIO mode is the only mode implemented, and due to badly designed HW :
- * - all transfers are cutted in 16 words burst because the FIFO hangs on
- * TX underflow, and there is no TX "Half-Empty" interrupt, so we go by
- * FIFO max size chunk only
- * - CS management is dumb, and goes UP between every burst, so is really a
- * "Data Valid" signal than a Chip Select, GPIO link should be used instead
- * to have a CS go down over the full transfer
+ * There are two modes for data transmission: PIO and DMA.
+ * When bits_per_word is 8, 16, 24, or 32, data is transferred using PIO mode.
+ * When bits_per_word is 64, DMA mode is used by default.
+ *
+ * DMA achieves a transfer with one or more SPI bursts, each SPI burst is made
+ * up of one or more DMA bursts. The DMA burst implementation mechanism is,
+ * For TX, when the number of words in TXFIFO is less than the preset
+ * reading threshold, SPICC starts a reading DMA burst, which reads the preset
+ * number of words from TX buffer, then writes them into TXFIFO.
+ * For RX, when the number of words in RXFIFO is greater than the preset
+ * writing threshold, SPICC starts a writing request burst, which reads the
+ * preset number of words from RXFIFO, then write them into RX buffer.
+ * DMA works if the transfer meets the following conditions,
+ * - 64 bits per word
+ * - The transfer length in word must be multiples of the dma_burst_len, and
+ * the dma_burst_len should be one of 8,7...2, otherwise, it will be split
+ * into several SPI bursts by this driver
*/
#define SPICC_MAX_BURST 128
@@ -128,6 +136,23 @@
#define SPICC_DWADDR 0x24 /* Write Address of DMA */
+#define SPICC_LD_CNTL0 0x28
+#define VSYNC_IRQ_SRC_SELECT BIT(0)
+#define DMA_EN_SET_BY_VSYNC BIT(2)
+#define XCH_EN_SET_BY_VSYNC BIT(3)
+#define DMA_READ_COUNTER_EN BIT(4)
+#define DMA_WRITE_COUNTER_EN BIT(5)
+#define DMA_RADDR_LOAD_BY_VSYNC BIT(6)
+#define DMA_WADDR_LOAD_BY_VSYNC BIT(7)
+#define DMA_ADDR_LOAD_FROM_LD_ADDR BIT(8)
+
+#define SPICC_LD_CNTL1 0x2c
+#define DMA_READ_COUNTER GENMASK(15, 0)
+#define DMA_WRITE_COUNTER GENMASK(31, 16)
+#define DMA_BURST_LEN_DEFAULT 8
+#define DMA_BURST_COUNT_MAX 0xffff
+#define SPI_BURST_LEN_MAX (DMA_BURST_LEN_DEFAULT * DMA_BURST_COUNT_MAX)
+
#define SPICC_ENH_CTL0 0x38 /* Enhanced Feature */
#define SPICC_ENH_CLK_CS_DELAY_MASK GENMASK(15, 0)
#define SPICC_ENH_DATARATE_MASK GENMASK(23, 16)
@@ -171,6 +196,9 @@ struct meson_spicc_device {
struct pinctrl *pinctrl;
struct pinctrl_state *pins_idle_high;
struct pinctrl_state *pins_idle_low;
+ dma_addr_t tx_dma;
+ dma_addr_t rx_dma;
+ bool using_dma;
};
#define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div)
@@ -202,6 +230,148 @@ static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
writel_relaxed(conf, spicc->base + SPICC_ENH_CTL0);
}
+static int meson_spicc_dma_map(struct meson_spicc_device *spicc,
+ struct spi_transfer *t)
+{
+ struct device *dev = spicc->host->dev.parent;
+
+ if (!(t->tx_buf && t->rx_buf))
+ return -EINVAL;
+
+ t->tx_dma = dma_map_single(dev, (void *)t->tx_buf, t->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, t->tx_dma))
+ return -ENOMEM;
+
+ t->rx_dma = dma_map_single(dev, t->rx_buf, t->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, t->rx_dma))
+ return -ENOMEM;
+
+ spicc->tx_dma = t->tx_dma;
+ spicc->rx_dma = t->rx_dma;
+
+ return 0;
+}
+
+static void meson_spicc_dma_unmap(struct meson_spicc_device *spicc,
+ struct spi_transfer *t)
+{
+ struct device *dev = spicc->host->dev.parent;
+
+ if (t->tx_dma)
+ dma_unmap_single(dev, t->tx_dma, t->len, DMA_TO_DEVICE);
+ if (t->rx_dma)
+ dma_unmap_single(dev, t->rx_dma, t->len, DMA_FROM_DEVICE);
+}
+
+/*
+ * According to the remain words length, calculate a suitable spi burst length
+ * and a dma burst length for current spi burst
+ */
+static u32 meson_spicc_calc_dma_len(struct meson_spicc_device *spicc,
+ u32 len, u32 *dma_burst_len)
+{
+ u32 i;
+
+ if (len <= spicc->data->fifo_size) {
+ *dma_burst_len = len;
+ return len;
+ }
+
+ *dma_burst_len = DMA_BURST_LEN_DEFAULT;
+
+ if (len == (SPI_BURST_LEN_MAX + 1))
+ return SPI_BURST_LEN_MAX - DMA_BURST_LEN_DEFAULT;
+
+ if (len >= SPI_BURST_LEN_MAX)
+ return SPI_BURST_LEN_MAX;
+
+ for (i = DMA_BURST_LEN_DEFAULT; i > 1; i--)
+ if ((len % i) == 0) {
+ *dma_burst_len = i;
+ return len;
+ }
+
+ i = len % DMA_BURST_LEN_DEFAULT;
+ len -= i;
+
+ if (i == 1)
+ len -= DMA_BURST_LEN_DEFAULT;
+
+ return len;
+}
+
+static void meson_spicc_setup_dma(struct meson_spicc_device *spicc)
+{
+ unsigned int len;
+ unsigned int dma_burst_len, dma_burst_count;
+ unsigned int count_en = 0;
+ unsigned int txfifo_thres = 0;
+ unsigned int read_req = 0;
+ unsigned int rxfifo_thres = 31;
+ unsigned int write_req = 0;
+ unsigned int ld_ctr1 = 0;
+
+ writel_relaxed(spicc->tx_dma, spicc->base + SPICC_DRADDR);
+ writel_relaxed(spicc->rx_dma, spicc->base + SPICC_DWADDR);
+
+ /* Set the max burst length to support a transmission with length of
+ * no more than 1024 bytes(128 words), which must use the CS management
+ * because of some strict timing requirements
+ */
+ writel_bits_relaxed(SPICC_BURSTLENGTH_MASK, SPICC_BURSTLENGTH_MASK,
+ spicc->base + SPICC_CONREG);
+
+ len = meson_spicc_calc_dma_len(spicc, spicc->xfer_remain,
+ &dma_burst_len);
+ spicc->xfer_remain -= len;
+ dma_burst_count = DIV_ROUND_UP(len, dma_burst_len);
+ dma_burst_len--;
+
+ if (spicc->tx_dma) {
+ spicc->tx_dma += len;
+ count_en |= DMA_READ_COUNTER_EN;
+ txfifo_thres = spicc->data->fifo_size - dma_burst_len;
+ read_req = dma_burst_len;
+ ld_ctr1 |= FIELD_PREP(DMA_READ_COUNTER, dma_burst_count);
+ }
+
+ if (spicc->rx_dma) {
+ spicc->rx_dma += len;
+ count_en |= DMA_WRITE_COUNTER_EN;
+ rxfifo_thres = dma_burst_len;
+ write_req = dma_burst_len;
+ ld_ctr1 |= FIELD_PREP(DMA_WRITE_COUNTER, dma_burst_count);
+ }
+
+ writel_relaxed(count_en, spicc->base + SPICC_LD_CNTL0);
+ writel_relaxed(ld_ctr1, spicc->base + SPICC_LD_CNTL1);
+ writel_relaxed(SPICC_DMA_ENABLE
+ | SPICC_DMA_URGENT
+ | FIELD_PREP(SPICC_TXFIFO_THRESHOLD_MASK, txfifo_thres)
+ | FIELD_PREP(SPICC_READ_BURST_MASK, read_req)
+ | FIELD_PREP(SPICC_RXFIFO_THRESHOLD_MASK, rxfifo_thres)
+ | FIELD_PREP(SPICC_WRITE_BURST_MASK, write_req),
+ spicc->base + SPICC_DMAREG);
+}
+
+static irqreturn_t meson_spicc_dma_irq(struct meson_spicc_device *spicc)
+{
+ if (readl_relaxed(spicc->base + SPICC_DMAREG) & SPICC_DMA_ENABLE)
+ return IRQ_HANDLED;
+
+ if (spicc->xfer_remain) {
+ meson_spicc_setup_dma(spicc);
+ } else {
+ writel_bits_relaxed(SPICC_SMC, 0, spicc->base + SPICC_CONREG);
+ writel_relaxed(0, spicc->base + SPICC_INTREG);
+ writel_relaxed(0, spicc->base + SPICC_DMAREG);
+ meson_spicc_dma_unmap(spicc, spicc->xfer);
+ complete(&spicc->done);
+ }
+
+ return IRQ_HANDLED;
+}
+
static inline bool meson_spicc_txfull(struct meson_spicc_device *spicc)
{
return !!FIELD_GET(SPICC_TF,
@@ -293,6 +463,9 @@ static irqreturn_t meson_spicc_irq(int irq, void *data)
writel_bits_relaxed(SPICC_TC, SPICC_TC, spicc->base + SPICC_STATREG);
+ if (spicc->using_dma)
+ return meson_spicc_dma_irq(spicc);
+
/* Empty RX FIFO */
meson_spicc_rx(spicc);
@@ -426,9 +599,6 @@ static int meson_spicc_transfer_one(struct spi_controller *host,
meson_spicc_reset_fifo(spicc);
- /* Setup burst */
- meson_spicc_setup_burst(spicc);
-
/* Setup wait for completion */
reinit_completion(&spicc->done);
@@ -442,11 +612,36 @@ static int meson_spicc_transfer_one(struct spi_controller *host,
/* Increase it twice and add 200 ms tolerance */
timeout += timeout + 200;
- /* Start burst */
- writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG);
+ if (xfer->bits_per_word == 64) {
+ int ret;
+
+ /* dma_burst_len 1 can't trigger a dma burst */
+ if (xfer->len < 16)
+ return -EINVAL;
+
+ ret = meson_spicc_dma_map(spicc, xfer);
+ if (ret) {
+ meson_spicc_dma_unmap(spicc, xfer);
+ dev_err(host->dev.parent, "dma map failed\n");
+ return ret;
+ }
+
+ spicc->using_dma = true;
+ spicc->xfer_remain = DIV_ROUND_UP(xfer->len, spicc->bytes_per_word);
+ meson_spicc_setup_dma(spicc);
+ writel_relaxed(SPICC_TE_EN, spicc->base + SPICC_INTREG);
+ writel_bits_relaxed(SPICC_SMC, SPICC_SMC, spicc->base + SPICC_CONREG);
+ } else {
+ spicc->using_dma = false;
+ /* Setup burst */
+ meson_spicc_setup_burst(spicc);
- /* Enable interrupts */
- writel_relaxed(SPICC_TC_EN, spicc->base + SPICC_INTREG);
+ /* Start burst */
+ writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG);
+
+ /* Enable interrupts */
+ writel_relaxed(SPICC_TC_EN, spicc->base + SPICC_INTREG);
+ }
if (!wait_for_completion_timeout(&spicc->done, msecs_to_jiffies(timeout)))
return -ETIMEDOUT;
@@ -545,6 +740,14 @@ static int meson_spicc_setup(struct spi_device *spi)
if (!spi->controller_state)
spi->controller_state = spi_controller_get_devdata(spi->controller);
+ /* DMA works at 64 bits, the rest works on PIO */
+ if (spi->bits_per_word != 8 &&
+ spi->bits_per_word != 16 &&
+ spi->bits_per_word != 24 &&
+ spi->bits_per_word != 32 &&
+ spi->bits_per_word != 64)
+ return -EINVAL;
+
return 0;
}
@@ -853,10 +1056,6 @@ static int meson_spicc_probe(struct platform_device *pdev)
host->num_chipselect = 4;
host->dev.of_node = pdev->dev.of_node;
host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LOOP;
- host->bits_per_word_mask = SPI_BPW_MASK(32) |
- SPI_BPW_MASK(24) |
- SPI_BPW_MASK(16) |
- SPI_BPW_MASK(8);
host->flags = (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX);
host->min_speed_hz = spicc->data->min_speed_hz;
host->max_speed_hz = spicc->data->max_speed_hz;
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index bad6b30bab0e..e63c77e41823 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -48,6 +48,8 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
@@ -57,6 +59,9 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
+/* runtime pm timeout */
+#define FSPI_RPM_TIMEOUT 50 /* 50ms */
+
/* Registers used by the driver */
#define FSPI_MCR0 0x00
#define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24)
@@ -394,6 +399,8 @@ struct nxp_fspi {
struct mutex lock;
struct pm_qos_request pm_qos_req;
int selected;
+#define FSPI_NEED_INIT (1 << 0)
+ int flags;
};
static inline int needs_ip_only(struct nxp_fspi *f)
@@ -627,15 +634,15 @@ static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
return 0;
}
-static int nxp_fspi_clk_disable_unprep(struct nxp_fspi *f)
+static void nxp_fspi_clk_disable_unprep(struct nxp_fspi *f)
{
if (is_acpi_node(dev_fwnode(f->dev)))
- return 0;
+ return;
clk_disable_unprepare(f->clk);
clk_disable_unprepare(f->clk_en);
- return 0;
+ return;
}
static void nxp_fspi_dll_calibration(struct nxp_fspi *f)
@@ -925,7 +932,13 @@ static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->controller);
int err = 0;
- mutex_lock(&f->lock);
+ guard(mutex)(&f->lock);
+
+ err = pm_runtime_get_sync(f->dev);
+ if (err < 0) {
+ dev_err(f->dev, "Failed to enable clock %d\n", __LINE__);
+ return err;
+ }
/* Wait for controller being ready. */
err = fspi_readl_poll_tout(f, f->iobase + FSPI_STS0,
@@ -955,7 +968,8 @@ static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
/* Invalidate the data in the AHB buffer. */
nxp_fspi_invalid(f);
- mutex_unlock(&f->lock);
+ pm_runtime_mark_last_busy(f->dev);
+ pm_runtime_put_autosuspend(f->dev);
return err;
}
@@ -1154,6 +1168,24 @@ static const struct spi_controller_mem_caps nxp_fspi_mem_caps = {
.per_op_freq = true,
};
+static void nxp_fspi_cleanup(void *data)
+{
+ struct nxp_fspi *f = data;
+
+ /* enable clock first since there is register access */
+ pm_runtime_get_sync(f->dev);
+
+ /* disable the hardware */
+ fspi_writel(f, FSPI_MCR0_MDIS, f->iobase + FSPI_MCR0);
+
+ pm_runtime_disable(f->dev);
+ pm_runtime_put_noidle(f->dev);
+ nxp_fspi_clk_disable_unprep(f);
+
+ if (f->ahb_addr)
+ iounmap(f->ahb_addr);
+}
+
static int nxp_fspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
@@ -1161,10 +1193,10 @@ static int nxp_fspi_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct resource *res;
struct nxp_fspi *f;
- int ret;
+ int ret, irq;
u32 reg;
- ctlr = spi_alloc_host(&pdev->dev, sizeof(*f));
+ ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*f));
if (!ctlr)
return -ENOMEM;
@@ -1174,10 +1206,8 @@ static int nxp_fspi_probe(struct platform_device *pdev)
f = spi_controller_get_devdata(ctlr);
f->dev = dev;
f->devtype_data = (struct nxp_fspi_devtype_data *)device_get_match_data(dev);
- if (!f->devtype_data) {
- ret = -ENODEV;
- goto err_put_ctrl;
- }
+ if (!f->devtype_data)
+ return -ENODEV;
platform_set_drvdata(pdev, f);
@@ -1186,11 +1216,8 @@ static int nxp_fspi_probe(struct platform_device *pdev)
f->iobase = devm_platform_ioremap_resource(pdev, 0);
else
f->iobase = devm_platform_ioremap_resource_byname(pdev, "fspi_base");
-
- if (IS_ERR(f->iobase)) {
- ret = PTR_ERR(f->iobase);
- goto err_put_ctrl;
- }
+ if (IS_ERR(f->iobase))
+ return PTR_ERR(f->iobase);
/* find the resources - controller memory mapped space */
if (is_acpi_node(dev_fwnode(f->dev)))
@@ -1198,11 +1225,8 @@ static int nxp_fspi_probe(struct platform_device *pdev)
else
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "fspi_mmap");
-
- if (!res) {
- ret = -ENODEV;
- goto err_put_ctrl;
- }
+ if (!res)
+ return -ENODEV;
/* assign memory mapped starting address and mapped size. */
f->memmap_phy = res->start;
@@ -1211,100 +1235,109 @@ static int nxp_fspi_probe(struct platform_device *pdev)
/* find the clocks */
if (dev_of_node(&pdev->dev)) {
f->clk_en = devm_clk_get(dev, "fspi_en");
- if (IS_ERR(f->clk_en)) {
- ret = PTR_ERR(f->clk_en);
- goto err_put_ctrl;
- }
+ if (IS_ERR(f->clk_en))
+ return PTR_ERR(f->clk_en);
f->clk = devm_clk_get(dev, "fspi");
- if (IS_ERR(f->clk)) {
- ret = PTR_ERR(f->clk);
- goto err_put_ctrl;
- }
-
- ret = nxp_fspi_clk_prep_enable(f);
- if (ret) {
- dev_err(dev, "can not enable the clock\n");
- goto err_put_ctrl;
- }
+ if (IS_ERR(f->clk))
+ return PTR_ERR(f->clk);
}
+ /* find the irq */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get irq source");
+
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, FSPI_RPM_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+
+ /* enable clock */
+ ret = pm_runtime_get_sync(f->dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to enable clock");
+
/* Clear potential interrupts */
reg = fspi_readl(f, f->iobase + FSPI_INTR);
if (reg)
fspi_writel(f, reg, f->iobase + FSPI_INTR);
- /* find the irq */
- ret = platform_get_irq(pdev, 0);
+ nxp_fspi_default_setup(f);
+
+ ret = pm_runtime_put_sync(dev);
if (ret < 0)
- goto err_disable_clk;
+ return dev_err_probe(dev, ret, "Failed to disable clock");
- ret = devm_request_irq(dev, ret,
+ ret = devm_request_irq(dev, irq,
nxp_fspi_irq_handler, 0, pdev->name, f);
- if (ret) {
- dev_err(dev, "failed to request irq: %d\n", ret);
- goto err_disable_clk;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request irq\n");
- mutex_init(&f->lock);
+ devm_mutex_init(dev, &f->lock);
ctlr->bus_num = -1;
ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT;
ctlr->mem_ops = &nxp_fspi_mem_ops;
ctlr->mem_caps = &nxp_fspi_mem_caps;
-
- nxp_fspi_default_setup(f);
-
ctlr->dev.of_node = np;
- ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ ret = devm_add_action_or_reset(dev, nxp_fspi_cleanup, f);
if (ret)
- goto err_destroy_mutex;
+ return dev_err_probe(dev, ret, "Failed to register nxp_fspi_cleanup\n");
- return 0;
+ return devm_spi_register_controller(&pdev->dev, ctlr);
+}
-err_destroy_mutex:
- mutex_destroy(&f->lock);
+static int nxp_fspi_runtime_suspend(struct device *dev)
+{
+ struct nxp_fspi *f = dev_get_drvdata(dev);
-err_disable_clk:
nxp_fspi_clk_disable_unprep(f);
-err_put_ctrl:
- spi_controller_put(ctlr);
-
- dev_err(dev, "NXP FSPI probe failed\n");
- return ret;
+ return 0;
}
-static void nxp_fspi_remove(struct platform_device *pdev)
+static int nxp_fspi_runtime_resume(struct device *dev)
{
- struct nxp_fspi *f = platform_get_drvdata(pdev);
-
- /* disable the hardware */
- fspi_writel(f, FSPI_MCR0_MDIS, f->iobase + FSPI_MCR0);
+ struct nxp_fspi *f = dev_get_drvdata(dev);
+ int ret;
- nxp_fspi_clk_disable_unprep(f);
+ ret = nxp_fspi_clk_prep_enable(f);
+ if (ret)
+ return ret;
- mutex_destroy(&f->lock);
+ if (f->flags & FSPI_NEED_INIT) {
+ nxp_fspi_default_setup(f);
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ dev_err(dev, "select flexspi default pinctrl failed!\n");
+ f->flags &= ~FSPI_NEED_INIT;
+ }
- if (f->ahb_addr)
- iounmap(f->ahb_addr);
+ return ret;
}
static int nxp_fspi_suspend(struct device *dev)
{
- return 0;
-}
-
-static int nxp_fspi_resume(struct device *dev)
-{
struct nxp_fspi *f = dev_get_drvdata(dev);
+ int ret;
- nxp_fspi_default_setup(f);
+ ret = pinctrl_pm_select_sleep_state(dev);
+ if (ret) {
+ dev_err(dev, "select flexspi sleep pinctrl failed!\n");
+ return ret;
+ }
- return 0;
+ f->flags |= FSPI_NEED_INIT;
+
+ return pm_runtime_force_suspend(dev);
}
+static const struct dev_pm_ops nxp_fspi_pm_ops = {
+ RUNTIME_PM_OPS(nxp_fspi_runtime_suspend, nxp_fspi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(nxp_fspi_suspend, pm_runtime_force_resume)
+};
+
static const struct of_device_id nxp_fspi_dt_ids[] = {
{ .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, },
{ .compatible = "nxp,imx8mm-fspi", .data = (void *)&imx8mm_data, },
@@ -1324,20 +1357,14 @@ static const struct acpi_device_id nxp_fspi_acpi_ids[] = {
MODULE_DEVICE_TABLE(acpi, nxp_fspi_acpi_ids);
#endif
-static const struct dev_pm_ops nxp_fspi_pm_ops = {
- .suspend = nxp_fspi_suspend,
- .resume = nxp_fspi_resume,
-};
-
static struct platform_driver nxp_fspi_driver = {
.driver = {
.name = "nxp-fspi",
.of_match_table = nxp_fspi_dt_ids,
.acpi_match_table = ACPI_PTR(nxp_fspi_acpi_ids),
- .pm = &nxp_fspi_pm_ops,
+ .pm = pm_ptr(&nxp_fspi_pm_ops),
},
.probe = nxp_fspi_probe,
- .remove = nxp_fspi_remove,
};
module_platform_driver(nxp_fspi_driver);
diff --git a/drivers/spi/spi-offload.c b/drivers/spi/spi-offload.c
index 6bad042fe437..d336f4d228d5 100644
--- a/drivers/spi/spi-offload.c
+++ b/drivers/spi/spi-offload.c
@@ -183,9 +183,6 @@ static struct spi_offload_trigger
guard(mutex)(&trigger->lock);
- if (!trigger->ops)
- return ERR_PTR(-ENODEV);
-
if (trigger->ops->request) {
ret = trigger->ops->request(trigger, type, args->args, args->nargs);
if (ret)
@@ -300,7 +297,7 @@ int spi_offload_trigger_enable(struct spi_offload *offload,
if (trigger->ops->enable) {
ret = trigger->ops->enable(trigger, config);
if (ret) {
- if (offload->ops->trigger_disable)
+ if (offload->ops && offload->ops->trigger_disable)
offload->ops->trigger_disable(offload);
return ret;
}
@@ -434,7 +431,7 @@ int devm_spi_offload_trigger_register(struct device *dev,
{
struct spi_offload_trigger *trigger;
- if (!info->fwnode || !info->ops)
+ if (!info->fwnode || !info->ops || !info->ops->match)
return -EINVAL;
trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 29c616e2c408..70bb74b3bd9c 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -134,6 +134,7 @@ struct omap2_mcspi {
size_t max_xfer_len;
u32 ref_clk_hz;
bool use_multi_mode;
+ bool last_msg_kept_cs;
};
struct omap2_mcspi_cs {
@@ -1269,6 +1270,10 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
* multi-mode is applicable.
*/
mcspi->use_multi_mode = true;
+
+ if (mcspi->last_msg_kept_cs)
+ mcspi->use_multi_mode = false;
+
list_for_each_entry(tr, &msg->transfers, transfer_list) {
if (!tr->bits_per_word)
bits_per_word = msg->spi->bits_per_word;
@@ -1287,18 +1292,19 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
mcspi->use_multi_mode = false;
}
- /* Check if transfer asks to change the CS status after the transfer */
- if (!tr->cs_change)
- mcspi->use_multi_mode = false;
-
- /*
- * If at least one message is not compatible, switch back to single mode
- *
- * The bits_per_word of certain transfer can be different, but it will have no
- * impact on the signal itself.
- */
- if (!mcspi->use_multi_mode)
- break;
+ if (list_is_last(&tr->transfer_list, &msg->transfers)) {
+ /* Check if transfer asks to keep the CS status after the whole message */
+ if (tr->cs_change) {
+ mcspi->use_multi_mode = false;
+ mcspi->last_msg_kept_cs = true;
+ } else {
+ mcspi->last_msg_kept_cs = false;
+ }
+ } else {
+ /* Check if transfer asks to change the CS status after the transfer */
+ if (!tr->cs_change)
+ mcspi->use_multi_mode = false;
+ }
}
omap2_mcspi_set_mode(ctlr);
diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c
index fc98979eba48..e27642c4dea4 100644
--- a/drivers/spi/spi-pci1xxxx.c
+++ b/drivers/spi/spi-pci1xxxx.c
@@ -685,6 +685,17 @@ static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev)
return pci1xxxx_spi_isr_io(irq, dev);
}
+static irqreturn_t pci1xxxx_spi_shared_isr(int irq, void *dev)
+{
+ struct pci1xxxx_spi *par = dev;
+ u8 i = 0;
+
+ for (i = 0; i < par->total_hw_instances; i++)
+ pci1xxxx_spi_isr(irq, par->spi_int[i]);
+
+ return IRQ_HANDLED;
+}
+
static bool pci1xxxx_spi_can_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
@@ -702,6 +713,7 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
struct device *dev = &pdev->dev;
struct pci1xxxx_spi *spi_bus;
struct spi_controller *spi_host;
+ int num_vector = 0;
u32 regval;
int ret;
@@ -741,21 +753,19 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
if (ret)
return -ENOMEM;
- ret = pci_request_regions(pdev, DRV_NAME);
+ ret = pcim_request_all_regions(pdev, DRV_NAME);
if (ret)
return -ENOMEM;
spi_bus->reg_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
- if (!spi_bus->reg_base) {
- ret = -EINVAL;
- goto error;
- }
+ if (!spi_bus->reg_base)
+ return -EINVAL;
- ret = pci_alloc_irq_vectors(pdev, hw_inst_cnt, hw_inst_cnt,
- PCI_IRQ_ALL_TYPES);
- if (ret < 0) {
+ num_vector = pci_alloc_irq_vectors(pdev, 1, hw_inst_cnt,
+ PCI_IRQ_INTX | PCI_IRQ_MSI);
+ if (num_vector < 0) {
dev_err(&pdev->dev, "Error allocating MSI vectors\n");
- goto error;
+ return num_vector;
}
init_completion(&spi_sub_ptr->spi_xfer_done);
@@ -767,19 +777,24 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
spi_sub_ptr->irq = pci_irq_vector(pdev, 0);
- ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
- pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
- pci_name(pdev), spi_sub_ptr);
+ if (num_vector >= hw_inst_cnt)
+ ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
+ pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
+ pci_name(pdev), spi_sub_ptr);
+ else
+ ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
+ pci1xxxx_spi_shared_isr,
+ PCI1XXXX_IRQ_FLAGS | IRQF_SHARED,
+ pci_name(pdev), spi_bus);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to request irq : %d",
spi_sub_ptr->irq);
- ret = -ENODEV;
- goto error;
+ return -ENODEV;
}
ret = pci1xxxx_spi_dma_init(spi_bus, spi_sub_ptr->irq);
if (ret && ret != -EOPNOTSUPP)
- goto error;
+ return ret;
/* This register is only applicable for 1st instance */
regval = readl(spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
@@ -801,15 +816,16 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
regval &= ~SPI_INTR;
writel(regval, spi_bus->reg_base +
SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
- spi_sub_ptr->irq = pci_irq_vector(pdev, iter);
- ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
- pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
- pci_name(pdev), spi_sub_ptr);
- if (ret < 0) {
- dev_err(&pdev->dev, "Unable to request irq : %d",
- spi_sub_ptr->irq);
- ret = -ENODEV;
- goto error;
+ if (num_vector >= hw_inst_cnt) {
+ spi_sub_ptr->irq = pci_irq_vector(pdev, iter);
+ ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
+ pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
+ pci_name(pdev), spi_sub_ptr);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to request irq : %d",
+ spi_sub_ptr->irq);
+ return -ENODEV;
+ }
}
}
@@ -828,15 +844,11 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
spi_controller_set_devdata(spi_host, spi_sub_ptr);
ret = devm_spi_register_controller(dev, spi_host);
if (ret)
- goto error;
+ return ret;
}
pci_set_drvdata(pdev, spi_bus);
return 0;
-
-error:
- pci_release_regions(pdev);
- return ret;
}
static void store_restore_config(struct pci1xxxx_spi *spi_ptr,
diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c
index 94948c8781e8..77d9cc65477a 100644
--- a/drivers/spi/spi-qpic-snand.c
+++ b/drivers/spi/spi-qpic-snand.c
@@ -116,7 +116,6 @@ struct qpic_spi_nand {
struct nand_ecc_engine ecc_eng;
u8 *data_buf;
u8 *oob_buf;
- u32 wlen;
__le32 addr1;
__le32 addr2;
__le32 cmd;
@@ -131,9 +130,9 @@ static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc,
int is_last_read_loc)
{
__le32 locreg_val;
- u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
- ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc)
- << READ_LOCATION_LAST));
+ u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) |
+ FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) |
+ FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc);
locreg_val = cpu_to_le32(val);
@@ -152,9 +151,9 @@ static void qcom_spi_set_read_loc_last(struct qcom_nand_controller *snandc,
int is_last_read_loc)
{
__le32 locreg_val;
- u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
- ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc)
- << READ_LOCATION_LAST));
+ u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) |
+ FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) |
+ FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc);
locreg_val = cpu_to_le32(val);
@@ -250,9 +249,11 @@ static const struct mtd_ooblayout_ops qcom_spi_ooblayout = {
static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand)
{
struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct nand_ecc_props *reqs = &nand->ecc.requirements;
+ struct nand_ecc_props *user = &nand->ecc.user_conf;
struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
struct mtd_info *mtd = nanddev_to_mtd(nand);
- int cwperpage, bad_block_byte;
+ int cwperpage, bad_block_byte, ret;
struct qpic_ecc *ecc_cfg;
cwperpage = mtd->writesize / NANDC_STEP_SIZE;
@@ -261,11 +262,39 @@ static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand)
ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
if (!ecc_cfg)
return -ENOMEM;
- snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize,
+
+ if (user->step_size && user->strength) {
+ ecc_cfg->step_size = user->step_size;
+ ecc_cfg->strength = user->strength;
+ } else if (reqs->step_size && reqs->strength) {
+ ecc_cfg->step_size = reqs->step_size;
+ ecc_cfg->strength = reqs->strength;
+ } else {
+ /* use defaults */
+ ecc_cfg->step_size = NANDC_STEP_SIZE;
+ ecc_cfg->strength = 4;
+ }
+
+ if (ecc_cfg->step_size != NANDC_STEP_SIZE) {
+ dev_err(snandc->dev,
+ "only %u bytes ECC step size is supported\n",
+ NANDC_STEP_SIZE);
+ ret = -EOPNOTSUPP;
+ goto err_free_ecc_cfg;
+ }
+
+ if (ecc_cfg->strength != 4) {
+ dev_err(snandc->dev,
+ "only 4 bits ECC strength is supported\n");
+ ret = -EOPNOTSUPP;
+ goto err_free_ecc_cfg;
+ }
+
+ snandc->qspi->oob_buf = kmalloc(mtd->writesize + mtd->oobsize,
GFP_KERNEL);
if (!snandc->qspi->oob_buf) {
- kfree(ecc_cfg);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_ecc_cfg;
}
memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize);
@@ -280,8 +309,6 @@ static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand)
ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size;
ecc_cfg->steps = 4;
- ecc_cfg->strength = 4;
- ecc_cfg->step_size = 512;
ecc_cfg->cw_data = 516;
ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes;
bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1;
@@ -325,7 +352,7 @@ static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand)
FIELD_PREP(ECC_MODE_MASK, 0) |
FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw);
- ecc_cfg->ecc_buf_cfg = 0x203 << NUM_STEPS;
+ ecc_cfg->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, 0x203);
ecc_cfg->clrflashstatus = FS_READY_BSY_N;
ecc_cfg->clrreadstatus = 0xc0;
@@ -339,6 +366,10 @@ static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand)
ecc_cfg->strength, ecc_cfg->step_size);
return 0;
+
+err_free_ecc_cfg:
+ kfree(ecc_cfg);
+ return ret;
}
static void qcom_spi_ecc_cleanup_ctx_pipelined(struct nand_device *nand)
@@ -452,7 +483,8 @@ static int qcom_spi_block_erase(struct qcom_nand_controller *snandc)
snandc->regs->cmd = snandc->qspi->cmd;
snandc->regs->addr0 = snandc->qspi->addr1;
snandc->regs->addr1 = snandc->qspi->addr2;
- snandc->regs->cfg0 = cpu_to_le32(ecc_cfg->cfg0_raw & ~(7 << CW_PER_PAGE));
+ snandc->regs->cfg0 = cpu_to_le32((ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, 0));
snandc->regs->cfg1 = cpu_to_le32(ecc_cfg->cfg1_raw);
snandc->regs->exec = cpu_to_le32(1);
@@ -493,6 +525,22 @@ static void qcom_spi_config_single_cw_page_read(struct qcom_nand_controller *sna
qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, 0);
}
+static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt)
+{
+ int i;
+
+ qcom_nandc_dev_to_mem(snandc, true);
+
+ for (i = 0; i < cw_cnt; i++) {
+ u32 flash = le32_to_cpu(snandc->reg_read_buf[i]);
+
+ if (flash & (FS_OP_ERR | FS_MPU_ERR))
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc,
const struct spi_mem_op *op)
{
@@ -513,8 +561,8 @@ static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc,
snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
snandc->regs->addr1 = snandc->qspi->addr2;
- cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
- 0 << CW_PER_PAGE;
+ cfg0 = (ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, 0);
cfg1 = ecc_cfg->cfg1_raw;
ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
@@ -538,11 +586,9 @@ static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc,
return ret;
}
- qcom_nandc_dev_to_mem(snandc, true);
- u32 flash = le32_to_cpu(snandc->reg_read_buf[0]);
-
- if (flash & (FS_OP_ERR | FS_MPU_ERR))
- return -EIO;
+ ret = qcom_spi_check_raw_flash_errors(snandc, 1);
+ if (ret)
+ return ret;
bbpos = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
@@ -556,7 +602,7 @@ static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc,
return ret;
}
-static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_buf, u8 *oob_buf)
+static int qcom_spi_check_error(struct qcom_nand_controller *snandc)
{
struct snandc_read_status *buf;
struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
@@ -573,15 +619,6 @@ static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_bu
for (i = 0; i < num_cw; i++, buf++) {
u32 flash, buffer, erased_cw;
- int data_len, oob_len;
-
- if (i == (num_cw - 1)) {
- data_len = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
- oob_len = num_cw << 2;
- } else {
- data_len = ecc_cfg->cw_data;
- oob_len = 0;
- }
flash = le32_to_cpu(buf->snandc_flash);
buffer = le32_to_cpu(buf->snandc_buffer);
@@ -602,14 +639,23 @@ static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_bu
unsigned int stat;
stat = buffer & BS_CORRECTABLE_ERR_MSK;
+
+ /*
+ * The exact number of the corrected bits is
+ * unknown because the hardware only reports the
+ * number of the corrected bytes.
+ *
+ * Since we have no better solution at the moment,
+ * report that value as the number of bit errors
+ * despite that it is inaccurate in most cases.
+ */
+ if (stat && stat != ecc_cfg->strength)
+ dev_warn_once(snandc->dev,
+ "Warning: due to hw limitation, the reported number of the corrected bits may be inaccurate\n");
+
snandc->qspi->ecc_stats.corrected += stat;
max_bitflips = max(max_bitflips, stat);
}
-
- if (data_buf)
- data_buf += data_len;
- if (oob_buf)
- oob_buf += oob_len + ecc_cfg->bytes;
}
if (flash_op_err)
@@ -623,22 +669,6 @@ static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_bu
return 0;
}
-static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt)
-{
- int i;
-
- qcom_nandc_dev_to_mem(snandc, true);
-
- for (i = 0; i < cw_cnt; i++) {
- u32 flash = le32_to_cpu(snandc->reg_read_buf[i]);
-
- if (flash & (FS_OP_ERR | FS_MPU_ERR))
- return -EIO;
- }
-
- return 0;
-}
-
static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_buf,
u8 *oob_buf, int cw)
{
@@ -656,8 +686,8 @@ static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_bu
qcom_clear_bam_transaction(snandc);
raw_cw = num_cw - 1;
- cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
- 0 << CW_PER_PAGE;
+ cfg0 = (ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, 0);
cfg1 = ecc_cfg->cfg1_raw;
ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
@@ -763,22 +793,19 @@ static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc,
const struct spi_mem_op *op)
{
struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
- u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start;
+ u8 *data_buf = NULL, *oob_buf = NULL;
int ret, i;
u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
data_buf = op->data.buf.in;
- data_buf_start = data_buf;
-
oob_buf = snandc->qspi->oob_buf;
- oob_buf_start = oob_buf;
snandc->buf_count = 0;
snandc->buf_start = 0;
qcom_clear_read_regs(snandc);
- cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE;
+ cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
cfg1 = ecc_cfg->cfg1;
ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
@@ -852,29 +879,26 @@ static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc,
return ret;
}
- return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start);
+ return qcom_spi_check_error(snandc);
}
static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc,
const struct spi_mem_op *op)
{
struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
- u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start;
+ u8 *oob_buf = NULL;
int ret, i;
u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
oob_buf = op->data.buf.in;
- oob_buf_start = oob_buf;
-
- data_buf_start = data_buf;
snandc->buf_count = 0;
snandc->buf_start = 0;
qcom_clear_read_regs(snandc);
qcom_clear_bam_transaction(snandc);
- cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE;
+ cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
cfg1 = ecc_cfg->cfg1;
ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
@@ -934,7 +958,7 @@ static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc,
return ret;
}
- return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start);
+ return qcom_spi_check_error(snandc);
}
static int qcom_spi_read_page(struct qcom_nand_controller *snandc,
@@ -984,8 +1008,8 @@ static int qcom_spi_program_raw(struct qcom_nand_controller *snandc,
int num_cw = snandc->qspi->num_cw;
u32 cfg0, cfg1, ecc_bch_cfg;
- cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE;
+ cfg0 = (ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
cfg1 = ecc_cfg->cfg1_raw;
ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
@@ -1067,8 +1091,8 @@ static int qcom_spi_program_ecc(struct qcom_nand_controller *snandc,
int num_cw = snandc->qspi->num_cw;
u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
- cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE;
+ cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
cfg1 = ecc_cfg->cfg1;
ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
@@ -1144,8 +1168,8 @@ static int qcom_spi_program_oob(struct qcom_nand_controller *snandc,
int num_cw = snandc->qspi->num_cw;
u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
- cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE;
+ cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
cfg1 = ecc_cfg->cfg1;
ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
@@ -1374,8 +1398,10 @@ static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_
}
ret = qcom_submit_descs(snandc);
- if (ret)
+ if (ret) {
dev_err(snandc->dev, "failure in submitting descriptor for:%d\n", opcode);
+ return ret;
+ }
if (copy) {
qcom_nandc_dev_to_mem(snandc, true);
@@ -1389,7 +1415,7 @@ static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_
memcpy(op->data.buf.in, &val, snandc->buf_count);
}
- return ret;
+ return 0;
}
static bool qcom_spi_is_page_op(const struct spi_mem_op *op)
@@ -1604,6 +1630,7 @@ static void qcom_spi_remove(struct platform_device *pdev)
static const struct qcom_nandc_props ipq9574_snandc_props = {
.dev_cmd_reg_start = 0x7000,
+ .bam_offset = 0x30000,
.supports_bam = true,
};
diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
index e0c66a24a3cb..627cffea5d5c 100644
--- a/drivers/spi/spi-rpc-if.c
+++ b/drivers/spi/spi-rpc-if.c
@@ -75,6 +75,19 @@ static bool rpcif_spi_mem_supports_op(struct spi_mem *mem,
return true;
}
+static ssize_t xspi_spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, const void *buf)
+{
+ struct rpcif *rpc = spi_controller_get_devdata(desc->mem->spi->controller);
+
+ if (offs + desc->info.offset + len > U32_MAX)
+ return -EINVAL;
+
+ rpcif_spi_mem_prepare(desc->mem->spi, &desc->info.op_tmpl, &offs, &len);
+
+ return xspi_dirmap_write(rpc->dev, offs, len, buf);
+}
+
static ssize_t rpcif_spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
@@ -103,7 +116,7 @@ static int rpcif_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc)
if (!rpc->dirmap)
return -EOPNOTSUPP;
- if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
+ if (!rpc->xspi && desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
return -EOPNOTSUPP;
return 0;
@@ -125,6 +138,7 @@ static const struct spi_controller_mem_ops rpcif_spi_mem_ops = {
.exec_op = rpcif_spi_mem_exec_op,
.dirmap_create = rpcif_spi_mem_dirmap_create,
.dirmap_read = rpcif_spi_mem_dirmap_read,
+ .dirmap_write = xspi_spi_mem_dirmap_write,
};
static int rpcif_spi_probe(struct platform_device *pdev)
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 8a98c313548e..94a867967e02 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
@@ -62,135 +63,6 @@ struct sh_msiof_spi_priv {
#define MAX_SS 3 /* Maximum number of native chip selects */
-#define SITMDR1 0x00 /* Transmit Mode Register 1 */
-#define SITMDR2 0x04 /* Transmit Mode Register 2 */
-#define SITMDR3 0x08 /* Transmit Mode Register 3 */
-#define SIRMDR1 0x10 /* Receive Mode Register 1 */
-#define SIRMDR2 0x14 /* Receive Mode Register 2 */
-#define SIRMDR3 0x18 /* Receive Mode Register 3 */
-#define SITSCR 0x20 /* Transmit Clock Select Register */
-#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
-#define SICTR 0x28 /* Control Register */
-#define SIFCTR 0x30 /* FIFO Control Register */
-#define SISTR 0x40 /* Status Register */
-#define SIIER 0x44 /* Interrupt Enable Register */
-#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
-#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
-#define SITFDR 0x50 /* Transmit FIFO Data Register */
-#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
-#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
-#define SIRFDR 0x60 /* Receive FIFO Data Register */
-
-/* SITMDR1 and SIRMDR1 */
-#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
-#define SIMDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */
-#define SIMDR1_SYNCMD_SPI (2 << 28) /* Level mode/SPI */
-#define SIMDR1_SYNCMD_LR (3 << 28) /* L/R mode */
-#define SIMDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
-#define SIMDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
-#define SIMDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
-#define SIMDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
-#define SIMDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
-#define SIMDR1_FLD_SHIFT 2
-#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
-/* SITMDR1 */
-#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */
-#define SITMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */
-#define SITMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
-
-/* SITMDR2 and SIRMDR2 */
-#define SIMDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
-#define SIMDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
-#define SIMDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */
-
-/* SITSCR and SIRSCR */
-#define SISCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */
-#define SISCR_BRPS(i) (((i) - 1) << 8)
-#define SISCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
-#define SISCR_BRDV_DIV_2 0
-#define SISCR_BRDV_DIV_4 1
-#define SISCR_BRDV_DIV_8 2
-#define SISCR_BRDV_DIV_16 3
-#define SISCR_BRDV_DIV_32 4
-#define SISCR_BRDV_DIV_1 7
-
-/* SICTR */
-#define SICTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
-#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
-#define SICTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
-#define SICTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */
-#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
-#define SICTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
-#define SICTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
-#define SICTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
-#define SICTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */
-#define SICTR_TXDIZ_LOW (0 << 22) /* 0 */
-#define SICTR_TXDIZ_HIGH (1 << 22) /* 1 */
-#define SICTR_TXDIZ_HIZ (2 << 22) /* High-impedance */
-#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
-#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
-#define SICTR_TXE BIT(9) /* Transmit Enable */
-#define SICTR_RXE BIT(8) /* Receive Enable */
-#define SICTR_TXRST BIT(1) /* Transmit Reset */
-#define SICTR_RXRST BIT(0) /* Receive Reset */
-
-/* SIFCTR */
-#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
-#define SIFCTR_TFWM_64 (0UL << 29) /* Transfer Request when 64 empty stages */
-#define SIFCTR_TFWM_32 (1UL << 29) /* Transfer Request when 32 empty stages */
-#define SIFCTR_TFWM_24 (2UL << 29) /* Transfer Request when 24 empty stages */
-#define SIFCTR_TFWM_16 (3UL << 29) /* Transfer Request when 16 empty stages */
-#define SIFCTR_TFWM_12 (4UL << 29) /* Transfer Request when 12 empty stages */
-#define SIFCTR_TFWM_8 (5UL << 29) /* Transfer Request when 8 empty stages */
-#define SIFCTR_TFWM_4 (6UL << 29) /* Transfer Request when 4 empty stages */
-#define SIFCTR_TFWM_1 (7UL << 29) /* Transfer Request when 1 empty stage */
-#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
-#define SIFCTR_TFUA_SHIFT 20
-#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT)
-#define SIFCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */
-#define SIFCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */
-#define SIFCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */
-#define SIFCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */
-#define SIFCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */
-#define SIFCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */
-#define SIFCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */
-#define SIFCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */
-#define SIFCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */
-#define SIFCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
-#define SIFCTR_RFUA_SHIFT 4
-#define SIFCTR_RFUA(i) ((i) << SIFCTR_RFUA_SHIFT)
-
-/* SISTR */
-#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */
-#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */
-#define SISTR_TEOF BIT(23) /* Frame Transmission End */
-#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
-#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */
-#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */
-#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */
-#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */
-#define SISTR_REOF BIT(7) /* Frame Reception End */
-#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
-#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */
-#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */
-
-/* SIIER */
-#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
-#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
-#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
-#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */
-#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
-#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
-#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
-#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
-#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */
-#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
-#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */
-#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
-#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
-#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
-
-
static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
{
switch (reg_offs) {
@@ -255,11 +127,6 @@ static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p)
100);
}
-static const u32 sh_msiof_spi_div_array[] = {
- SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4,
- SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32,
-};
-
static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
struct spi_transfer *t)
{
@@ -298,7 +165,9 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
t->effective_speed_hz = parent_rate / (brps << div_pow);
- scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps);
+ /* div_pow == 0 maps to SISCR_BRDV_DIV_1 == all ones */
+ scr = FIELD_PREP(SISCR_BRDV, div_pow - 1) |
+ FIELD_PREP(SISCR_BRPS, brps - 1);
sh_msiof_write(p, SITSCR, scr);
if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
sh_msiof_write(p, SIRSCR, scr);
@@ -340,18 +209,19 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
return 0;
}
- val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT;
- val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT;
+ val = FIELD_PREP(SIMDR1_DTDL, sh_msiof_get_delay_bit(p->info->dtdl)) |
+ FIELD_PREP(SIMDR1_SYNCDL,
+ sh_msiof_get_delay_bit(p->info->syncdl));
return val;
}
static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
- u32 cpol, u32 cpha,
- u32 tx_hi_z, u32 lsb_first, u32 cs_high)
+ bool cpol, bool cpha, bool tx_hi_z,
+ bool lsb_first, bool cs_high)
{
+ bool edge;
u32 tmp;
- int edge;
/*
* CPOL CPHA TSCKIZ RSCKIZ TEDG REDG
@@ -360,16 +230,18 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
* 1 0 11 11 0 0
* 1 1 11 11 1 1
*/
- tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP;
- tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT;
- tmp |= lsb_first << SIMDR1_BITLSB_SHIFT;
+ tmp = FIELD_PREP(SIMDR1_SYNCMD, SIMDR1_SYNCMD_SPI) |
+ FIELD_PREP(SIMDR1_FLD, 1) | SIMDR1_XXSTP |
+ FIELD_PREP(SIMDR1_SYNCAC, !cs_high) |
+ FIELD_PREP(SIMDR1_BITLSB, lsb_first);
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
if (spi_controller_is_target(p->ctlr)) {
sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON);
} else {
sh_msiof_write(p, SITMDR1,
tmp | SIMDR1_TRMD | SITMDR1_PCON |
- (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT);
+ FIELD_PREP(SITMDR1_SYNCCH,
+ ss < MAX_SS ? ss : 0));
}
if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
/* These bits are reserved if RX needs TX */
@@ -378,30 +250,42 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
sh_msiof_write(p, SIRMDR1, tmp);
tmp = 0;
- tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT;
- tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT;
+ tmp |= SICTR_TSCKIZ_SCK | FIELD_PREP(SICTR_TSCKIZ_POL, cpol);
+ tmp |= SICTR_RSCKIZ_SCK | FIELD_PREP(SICTR_RSCKIZ_POL, cpol);
edge = cpol ^ !cpha;
- tmp |= edge << SICTR_TEDG_SHIFT;
- tmp |= edge << SICTR_REDG_SHIFT;
- tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW;
+ tmp |= FIELD_PREP(SICTR_TEDG, edge);
+ tmp |= FIELD_PREP(SICTR_REDG, edge);
+ tmp |= FIELD_PREP(SICTR_TXDIZ,
+ tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW);
sh_msiof_write(p, SICTR, tmp);
}
static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
const void *tx_buf, void *rx_buf,
- u32 bits, u32 words)
+ u32 bits, u32 words1, u32 words2)
{
- u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words);
+ u32 dr2 = FIELD_PREP(SIMDR2_GRP, words2 ? 1 : 0) |
+ FIELD_PREP(SIMDR2_BITLEN1, bits - 1) |
+ FIELD_PREP(SIMDR2_WDLEN1, words1 - 1);
if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
sh_msiof_write(p, SITMDR2, dr2);
else
- sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1);
+ sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK);
if (rx_buf)
sh_msiof_write(p, SIRMDR2, dr2);
+
+ if (words2) {
+ u32 dr3 = FIELD_PREP(SIMDR3_BITLEN2, bits - 1) |
+ FIELD_PREP(SIMDR3_WDLEN2, words2 - 1);
+
+ sh_msiof_write(p, SITMDR3, dr3);
+ if (rx_buf)
+ sh_msiof_write(p, SIRMDR3, dr3);
+ }
}
static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
@@ -411,140 +295,154 @@ static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
}
static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
- const void *tx_buf, int words, int fs)
+ const void *tx_buf, unsigned int words,
+ unsigned int fs)
{
const u8 *buf_8 = tx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, buf_8[k] << fs);
}
static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
- const void *tx_buf, int words, int fs)
+ const void *tx_buf, unsigned int words,
+ unsigned int fs)
{
const u16 *buf_16 = tx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, buf_16[k] << fs);
}
static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
- const void *tx_buf, int words, int fs)
+ const void *tx_buf, unsigned int words,
+ unsigned int fs)
{
const u16 *buf_16 = tx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs);
}
static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
- const void *tx_buf, int words, int fs)
+ const void *tx_buf, unsigned int words,
+ unsigned int fs)
{
const u32 *buf_32 = tx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, buf_32[k] << fs);
}
static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
- const void *tx_buf, int words, int fs)
+ const void *tx_buf, unsigned int words,
+ unsigned int fs)
{
const u32 *buf_32 = tx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs);
}
static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
- const void *tx_buf, int words, int fs)
+ const void *tx_buf, unsigned int words,
+ unsigned int fs)
{
const u32 *buf_32 = tx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs));
}
static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
- const void *tx_buf, int words, int fs)
+ const void *tx_buf,
+ unsigned int words, unsigned int fs)
{
const u32 *buf_32 = tx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs));
}
static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
- void *rx_buf, int words, int fs)
+ void *rx_buf, unsigned int words,
+ unsigned int fs)
{
u8 *buf_8 = rx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
- void *rx_buf, int words, int fs)
+ void *rx_buf, unsigned int words,
+ unsigned int fs)
{
u16 *buf_16 = rx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
- void *rx_buf, int words, int fs)
+ void *rx_buf, unsigned int words,
+ unsigned int fs)
{
u16 *buf_16 = rx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]);
}
static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
- void *rx_buf, int words, int fs)
+ void *rx_buf, unsigned int words,
+ unsigned int fs)
{
u32 *buf_32 = rx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs;
}
static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
- void *rx_buf, int words, int fs)
+ void *rx_buf, unsigned int words,
+ unsigned int fs)
{
u32 *buf_32 = rx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]);
}
static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
- void *rx_buf, int words, int fs)
+ void *rx_buf, unsigned int words,
+ unsigned int fs)
{
u32 *buf_32 = rx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs);
}
static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
- void *rx_buf, int words, int fs)
+ void *rx_buf, unsigned int words,
+ unsigned int fs)
{
u32 *buf_32 = rx_buf;
- int k;
+ unsigned int k;
for (k = 0; k < words; k++)
put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]);
@@ -564,12 +462,12 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
return 0;
/* Configure native chip select mode/polarity early */
- clr = SIMDR1_SYNCMD_MASK;
- set = SIMDR1_SYNCMD_SPI;
+ clr = SIMDR1_SYNCMD;
+ set = FIELD_PREP(SIMDR1_SYNCMD, SIMDR1_SYNCMD_SPI);
if (spi->mode & SPI_CS_HIGH)
- clr |= BIT(SIMDR1_SYNCAC_SHIFT);
+ clr |= SIMDR1_SYNCAC;
else
- set |= BIT(SIMDR1_SYNCAC_SHIFT);
+ set |= SIMDR1_SYNCAC;
pm_runtime_get_sync(&p->pdev->dev);
tmp = sh_msiof_read(p, SITMDR1) & ~clr;
sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON);
@@ -586,7 +484,8 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr,
{
struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
const struct spi_device *spi = msg->spi;
- u32 ss, cs_high;
+ bool cs_high;
+ u32 ss;
/* Configure pins before asserting CS */
if (spi_get_csgpiod(spi, 0)) {
@@ -594,12 +493,11 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr,
cs_high = p->native_cs_high;
} else {
ss = spi_get_chipselect(spi, 0);
- cs_high = !!(spi->mode & SPI_CS_HIGH);
+ cs_high = spi->mode & SPI_CS_HIGH;
}
- sh_msiof_spi_set_pin_regs(p, ss, !!(spi->mode & SPI_CPOL),
- !!(spi->mode & SPI_CPHA),
- !!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST), cs_high);
+ sh_msiof_spi_set_pin_regs(p, ss, spi->mode & SPI_CPOL,
+ spi->mode & SPI_CPHA, spi->mode & SPI_3WIRE,
+ spi->mode & SPI_LSB_FIRST, cs_high);
return 0;
}
@@ -672,20 +570,22 @@ static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p,
static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
void (*tx_fifo)(struct sh_msiof_spi_priv *,
- const void *, int, int),
+ const void *, unsigned int,
+ unsigned int),
void (*rx_fifo)(struct sh_msiof_spi_priv *,
- void *, int, int),
+ void *, unsigned int,
+ unsigned int),
const void *tx_buf, void *rx_buf,
- int words, int bits)
+ unsigned int words, unsigned int bits)
{
- int fifo_shift;
+ unsigned int fifo_shift;
int ret;
/* limit maximum word transfer to rx/tx fifo size */
if (tx_buf)
- words = min_t(int, words, p->tx_fifo_size);
+ words = min(words, p->tx_fifo_size);
if (rx_buf)
- words = min_t(int, words, p->rx_fifo_size);
+ words = min(words, p->rx_fifo_size);
/* the fifo contents need shifting */
fifo_shift = 32 - bits;
@@ -694,7 +594,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
sh_msiof_write(p, SIFCTR, 0);
/* setup msiof transfer mode registers */
- sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
+ sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words, 0);
sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE);
/* write tx fifo */
@@ -744,10 +644,12 @@ static void sh_msiof_dma_complete(void *arg)
}
static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
- void *rx, unsigned int len)
+ void *rx, unsigned int len,
+ unsigned int max_wdlen)
{
u32 ier_bits = 0;
struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
+ unsigned int words1, words2;
dma_cookie_t cookie;
int ret;
@@ -789,10 +691,14 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
}
/* 1 stage FIFO watermarks for DMA */
- sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1);
+ sh_msiof_write(p, SIFCTR,
+ FIELD_PREP(SIFCTR_TFWM, SIFCTR_TFWM_1) |
+ FIELD_PREP(SIFCTR_RFWM, SIFCTR_RFWM_1));
/* setup msiof transfer mode registers (32-bit words) */
- sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
+ words1 = min(len / 4, max_wdlen);
+ words2 = len / 4 - words1;
+ sh_msiof_spi_set_mode_regs(p, tx, rx, 32, words1, words2);
sh_msiof_write(p, SIIER, ier_bits);
@@ -911,9 +817,12 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr,
struct spi_transfer *t)
{
struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
+ unsigned int max_wdlen = FIELD_MAX(SIMDR2_WDLEN1) + 1;
void (*copy32)(u32 *, const u32 *, unsigned int);
- void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
- void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
+ void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, unsigned int,
+ unsigned int);
+ void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, unsigned int,
+ unsigned int);
const void *tx_buf = t->tx_buf;
void *rx_buf = t->rx_buf;
unsigned int len = t->len;
@@ -931,17 +840,17 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr,
if (!spi_controller_is_target(p->ctlr))
sh_msiof_spi_set_clk_regs(p, t);
+ if (tx_buf)
+ max_wdlen = min(max_wdlen, p->tx_fifo_size);
+ if (rx_buf)
+ max_wdlen = min(max_wdlen, p->rx_fifo_size);
+
while (ctlr->dma_tx && len > 15) {
/*
* DMA supports 32-bit words only, hence pack 8-bit and 16-bit
* words, with byte resp. word swapping.
*/
- unsigned int l = 0;
-
- if (tx_buf)
- l = min(round_down(len, 4), p->tx_fifo_size * 4);
- if (rx_buf)
- l = min(round_down(len, 4), p->rx_fifo_size * 4);
+ unsigned int l = min(round_down(len, 4), 2 * max_wdlen * 4);
if (bits <= 8) {
copy32 = copy_bswap32;
@@ -954,7 +863,7 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr,
if (tx_buf)
copy32(p->tx_dma_page, tx_buf, l / 4);
- ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l);
+ ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l, max_wdlen);
if (ret == -EAGAIN) {
dev_warn_once(&p->pdev->dev,
"DMA not available, falling back to PIO\n");
@@ -1061,7 +970,7 @@ static const struct sh_msiof_chipdata rcar_gen2_data = {
.bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
.tx_fifo_size = 64,
- .rx_fifo_size = 64,
+ .rx_fifo_size = 128,
.ctlr_flags = SPI_CONTROLLER_MUST_TX,
.min_div_pow = 0,
};
@@ -1070,7 +979,16 @@ static const struct sh_msiof_chipdata rcar_gen3_data = {
.bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
.tx_fifo_size = 64,
- .rx_fifo_size = 64,
+ .rx_fifo_size = 256,
+ .ctlr_flags = SPI_CONTROLLER_MUST_TX,
+ .min_div_pow = 1,
+};
+
+static const struct sh_msiof_chipdata rcar_gen4_data = {
+ .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
+ .tx_fifo_size = 256,
+ .rx_fifo_size = 256,
.ctlr_flags = SPI_CONTROLLER_MUST_TX,
.min_div_pow = 1,
};
@@ -1079,7 +997,7 @@ static const struct sh_msiof_chipdata rcar_r8a7795_data = {
.bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
.tx_fifo_size = 64,
- .rx_fifo_size = 64,
+ .rx_fifo_size = 256,
.ctlr_flags = SPI_CONTROLLER_MUST_TX,
.min_div_pow = 1,
.flags = SH_MSIOF_FLAG_FIXED_DTDL_200,
@@ -1087,20 +1005,14 @@ static const struct sh_msiof_chipdata rcar_r8a7795_data = {
static const struct of_device_id sh_msiof_match[] __maybe_unused = {
{ .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
- { .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data },
- { .compatible = "renesas,msiof-r8a7745", .data = &rcar_gen2_data },
- { .compatible = "renesas,msiof-r8a7790", .data = &rcar_gen2_data },
- { .compatible = "renesas,msiof-r8a7791", .data = &rcar_gen2_data },
- { .compatible = "renesas,msiof-r8a7792", .data = &rcar_gen2_data },
- { .compatible = "renesas,msiof-r8a7793", .data = &rcar_gen2_data },
- { .compatible = "renesas,msiof-r8a7794", .data = &rcar_gen2_data },
{ .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7795", .data = &rcar_r8a7795_data },
- { .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data },
{ .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data },
- { .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen3_data },
+ { .compatible = "renesas,msiof-r8a779a0", .data = &rcar_gen3_data },
+ { .compatible = "renesas,msiof-r8a779f0", .data = &rcar_gen3_data },
+ { .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen4_data },
{ .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sh_msiof_match);
@@ -1276,20 +1188,26 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
const struct sh_msiof_chipdata *chipdata;
struct sh_msiof_spi_info *info;
struct sh_msiof_spi_priv *p;
+ struct device *dev = &pdev->dev;
unsigned long clksrc;
int i;
int ret;
- chipdata = of_device_get_match_data(&pdev->dev);
+ /* Check whether MSIOF is used as I2S mode or SPI mode by checking "port" node */
+ struct device_node *port __free(device_node) = of_graph_get_next_port(dev->of_node, NULL);
+ if (port) /* It was MSIOF-I2S */
+ return -ENODEV;
+
+ chipdata = of_device_get_match_data(dev);
if (chipdata) {
- info = sh_msiof_spi_parse_dt(&pdev->dev);
+ info = sh_msiof_spi_parse_dt(dev);
} else {
chipdata = (const void *)pdev->id_entry->driver_data;
- info = dev_get_platdata(&pdev->dev);
+ info = dev_get_platdata(dev);
}
if (!info) {
- dev_err(&pdev->dev, "failed to obtain device info\n");
+ dev_err(dev, "failed to obtain device info\n");
return -ENXIO;
}
@@ -1297,11 +1215,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
info->dtdl = 200;
if (info->mode == MSIOF_SPI_TARGET)
- ctlr = spi_alloc_target(&pdev->dev,
- sizeof(struct sh_msiof_spi_priv));
+ ctlr = spi_alloc_target(dev, sizeof(struct sh_msiof_spi_priv));
else
- ctlr = spi_alloc_host(&pdev->dev,
- sizeof(struct sh_msiof_spi_priv));
+ ctlr = spi_alloc_host(dev, sizeof(struct sh_msiof_spi_priv));
if (ctlr == NULL)
return -ENOMEM;
@@ -1315,9 +1231,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
init_completion(&p->done);
init_completion(&p->done_txdma);
- p->clk = devm_clk_get(&pdev->dev, NULL);
+ p->clk = devm_clk_get(dev, NULL);
if (IS_ERR(p->clk)) {
- dev_err(&pdev->dev, "cannot get clock\n");
+ dev_err(dev, "cannot get clock\n");
ret = PTR_ERR(p->clk);
goto err1;
}
@@ -1334,15 +1250,14 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
goto err1;
}
- ret = devm_request_irq(&pdev->dev, i, sh_msiof_spi_irq, 0,
- dev_name(&pdev->dev), p);
+ ret = devm_request_irq(dev, i, sh_msiof_spi_irq, 0, dev_name(dev), p);
if (ret) {
- dev_err(&pdev->dev, "unable to request irq\n");
+ dev_err(dev, "unable to request irq\n");
goto err1;
}
p->pdev = pdev;
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_enable(dev);
/* Platform data may override FIFO sizes */
p->tx_fifo_size = chipdata->tx_fifo_size;
@@ -1361,7 +1276,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
ctlr->flags = chipdata->ctlr_flags;
ctlr->bus_num = pdev->id;
ctlr->num_chipselect = p->info->num_chipselect;
- ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->dev.of_node = dev->of_node;
ctlr->setup = sh_msiof_spi_setup;
ctlr->prepare_message = sh_msiof_prepare_message;
ctlr->target_abort = sh_msiof_target_abort;
@@ -1373,11 +1288,11 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
ret = sh_msiof_request_dma(p);
if (ret < 0)
- dev_warn(&pdev->dev, "DMA not available, using PIO\n");
+ dev_warn(dev, "DMA not available, using PIO\n");
- ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ ret = devm_spi_register_controller(dev, ctlr);
if (ret < 0) {
- dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
+ dev_err(dev, "devm_spi_register_controller error.\n");
goto err2;
}
@@ -1385,7 +1300,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
err2:
sh_msiof_release_dma(p);
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
err1:
spi_controller_put(ctlr);
return ret;
diff --git a/drivers/spi/spi-stm32-ospi.c b/drivers/spi/spi-stm32-ospi.c
index 9ec9823409cc..4ab7e86f4bd5 100644
--- a/drivers/spi/spi-stm32-ospi.c
+++ b/drivers/spi/spi-stm32-ospi.c
@@ -804,7 +804,7 @@ static int stm32_ospi_get_resources(struct platform_device *pdev)
return ret;
}
- ospi->rstc = devm_reset_control_array_get_optional_exclusive(dev);
+ ospi->rstc = devm_reset_control_array_get_exclusive_released(dev);
if (IS_ERR(ospi->rstc))
return dev_err_probe(dev, PTR_ERR(ospi->rstc),
"Can't get reset\n");
@@ -936,12 +936,16 @@ static int stm32_ospi_probe(struct platform_device *pdev)
if (ret < 0)
goto err_pm_enable;
- if (ospi->rstc) {
- reset_control_assert(ospi->rstc);
- udelay(2);
- reset_control_deassert(ospi->rstc);
+ ret = reset_control_acquire(ospi->rstc);
+ if (ret) {
+ dev_err_probe(dev, ret, "Can not acquire reset %d\n", ret);
+ goto err_pm_resume;
}
+ reset_control_assert(ospi->rstc);
+ udelay(2);
+ reset_control_deassert(ospi->rstc);
+
ret = spi_register_controller(ctrl);
if (ret) {
/* Disable ospi */
@@ -987,6 +991,8 @@ static void stm32_ospi_remove(struct platform_device *pdev)
if (ospi->dma_chrx)
dma_release_channel(ospi->dma_chrx);
+ reset_control_release(ospi->rstc);
+
pm_runtime_put_sync_suspend(ospi->dev);
pm_runtime_force_suspend(ospi->dev);
}
@@ -997,6 +1003,8 @@ static int __maybe_unused stm32_ospi_suspend(struct device *dev)
pinctrl_pm_select_sleep_state(dev);
+ reset_control_release(ospi->rstc);
+
return pm_runtime_force_suspend(ospi->dev);
}
@@ -1016,6 +1024,12 @@ static int __maybe_unused stm32_ospi_resume(struct device *dev)
if (ret < 0)
return ret;
+ ret = reset_control_acquire(ospi->rstc);
+ if (ret) {
+ dev_err(dev, "Can not acquire reset\n");
+ return ret;
+ }
+
writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR);
writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1);
pm_runtime_mark_last_busy(ospi->dev);
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index f89826d7dc49..aa92fd5a35a9 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -264,6 +264,9 @@ static int sun4i_spi_transfer_one(struct spi_controller *host,
else
reg |= SUN4I_CTL_DHB;
+ /* Now that the settings are correct, enable the interface */
+ reg |= SUN4I_CTL_ENABLE;
+
sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
/* Ensure that we have a parent clock fast enough */
@@ -404,7 +407,7 @@ static int sun4i_spi_runtime_resume(struct device *dev)
}
sun4i_spi_write(sspi, SUN4I_CTL_REG,
- SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP);
+ SUN4I_CTL_MASTER | SUN4I_CTL_TP);
return 0;
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 2a8bb798e95b..795a8482c2c7 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -728,9 +728,9 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
u32 inactive_cycles;
u8 cs_state;
- if ((setup->unit && setup->unit != SPI_DELAY_UNIT_SCK) ||
- (hold->unit && hold->unit != SPI_DELAY_UNIT_SCK) ||
- (inactive->unit && inactive->unit != SPI_DELAY_UNIT_SCK)) {
+ if ((setup->value && setup->unit != SPI_DELAY_UNIT_SCK) ||
+ (hold->value && hold->unit != SPI_DELAY_UNIT_SCK) ||
+ (inactive->value && inactive->unit != SPI_DELAY_UNIT_SCK)) {
dev_err(&spi->dev,
"Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
SPI_DELAY_UNIT_SCK);
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 64e1b2f8a000..3be7499db21e 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -22,6 +22,7 @@
#include <linux/spi/spi.h>
#include <linux/acpi.h>
#include <linux/property.h>
+#include <linux/sizes.h>
#define QSPI_COMMAND1 0x000
#define QSPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
@@ -110,6 +111,9 @@
#define QSPI_DMA_BLK 0x024
#define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0)
+#define QSPI_DMA_MEM_ADDRESS 0x028
+#define QSPI_DMA_HI_ADDRESS 0x02c
+
#define QSPI_TX_FIFO 0x108
#define QSPI_RX_FIFO 0x188
@@ -134,7 +138,7 @@
#define QSPI_COMMAND_VALUE_SET(X) (((x) & 0xFF) << 0)
#define QSPI_CMB_SEQ_CMD_CFG 0x1a0
-#define QSPI_COMMAND_X1_X2_X4(x) (((x) & 0x3) << 13)
+#define QSPI_COMMAND_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13)
#define QSPI_COMMAND_X1_X2_X4_MASK (0x03 << 13)
#define QSPI_COMMAND_SDR_DDR BIT(12)
#define QSPI_COMMAND_SIZE_SET(x) (((x) & 0xFF) << 0)
@@ -147,7 +151,7 @@
#define QSPI_ADDRESS_VALUE_SET(X) (((x) & 0xFFFF) << 0)
#define QSPI_CMB_SEQ_ADDR_CFG 0x1ac
-#define QSPI_ADDRESS_X1_X2_X4(x) (((x) & 0x3) << 13)
+#define QSPI_ADDRESS_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13)
#define QSPI_ADDRESS_X1_X2_X4_MASK (0x03 << 13)
#define QSPI_ADDRESS_SDR_DDR BIT(12)
#define QSPI_ADDRESS_SIZE_SET(x) (((x) & 0xFF) << 0)
@@ -156,15 +160,19 @@
#define DATA_DIR_RX BIT(1)
#define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
-#define DEFAULT_QSPI_DMA_BUF_LEN (64 * 1024)
-#define CMD_TRANSFER 0
-#define ADDR_TRANSFER 1
-#define DATA_TRANSFER 2
+#define DEFAULT_QSPI_DMA_BUF_LEN SZ_64K
+
+enum tegra_qspi_transfer_type {
+ CMD_TRANSFER = 0,
+ ADDR_TRANSFER = 1,
+ DUMMY_TRANSFER = 2,
+ DATA_TRANSFER = 3
+};
struct tegra_qspi_soc_data {
- bool has_dma;
bool cmb_xfer_capable;
bool supports_tpm;
+ bool has_ext_dma;
unsigned int cs_count;
};
@@ -399,9 +407,6 @@ tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_tra
static void
tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
{
- dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys,
- tqspi->dma_buf_size, DMA_TO_DEVICE);
-
/*
* In packed mode, each word in FIFO may contain multiple packets
* based on bits per word. So all bytes in each FIFO word are valid.
@@ -434,17 +439,11 @@ tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_
tqspi->cur_tx_pos += write_bytes;
}
-
- dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys,
- tqspi->dma_buf_size, DMA_TO_DEVICE);
}
static void
tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
{
- dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys,
- tqspi->dma_buf_size, DMA_FROM_DEVICE);
-
if (tqspi->is_packed) {
tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
} else {
@@ -470,9 +469,6 @@ tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_
tqspi->cur_rx_pos += read_bytes;
}
-
- dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
- tqspi->dma_buf_size, DMA_FROM_DEVICE);
}
static void tegra_qspi_dma_complete(void *args)
@@ -600,13 +596,16 @@ static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_trans
len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
- dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
- dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
+ if (t->tx_buf)
+ dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
+ if (t->rx_buf)
+ dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
}
static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
{
struct dma_slave_config dma_sconfig = { 0 };
+ dma_addr_t rx_dma_phys, tx_dma_phys;
unsigned int len;
u8 dma_burst;
int ret = 0;
@@ -629,60 +628,84 @@ static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct
len = tqspi->curr_dma_words * 4;
/* set attention level based on length of transfer */
- val = 0;
- if (len & 0xf) {
- val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
- dma_burst = 1;
- } else if (((len) >> 4) & 0x1) {
- val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
- dma_burst = 4;
- } else {
- val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
- dma_burst = 8;
+ if (tqspi->soc_data->has_ext_dma) {
+ val = 0;
+ if (len & 0xf) {
+ val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
+ dma_burst = 1;
+ } else if (((len) >> 4) & 0x1) {
+ val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
+ dma_burst = 4;
+ } else {
+ val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
+ dma_burst = 8;
+ }
+
+ tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
}
- tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
tqspi->dma_control_reg = val;
dma_sconfig.device_fc = true;
+
if (tqspi->cur_direction & DATA_DIR_TX) {
- dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
- dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_sconfig.dst_maxburst = dma_burst;
- ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
- if (ret < 0) {
- dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
- return ret;
- }
+ if (tqspi->tx_dma_chan) {
+ dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_maxburst = dma_burst;
+ ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
+ return ret;
+ }
- tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
- ret = tegra_qspi_start_tx_dma(tqspi, t, len);
- if (ret < 0) {
- dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
- return ret;
+ tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
+ ret = tegra_qspi_start_tx_dma(tqspi, t, len);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
+ return ret;
+ }
+ } else {
+ if (tqspi->is_packed)
+ tx_dma_phys = t->tx_dma;
+ else
+ tx_dma_phys = tqspi->tx_dma_phys;
+ tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
+ tegra_qspi_writel(tqspi, lower_32_bits(tx_dma_phys),
+ QSPI_DMA_MEM_ADDRESS);
+ tegra_qspi_writel(tqspi, (upper_32_bits(tx_dma_phys) & 0xff),
+ QSPI_DMA_HI_ADDRESS);
}
}
if (tqspi->cur_direction & DATA_DIR_RX) {
- dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
- dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_sconfig.src_maxburst = dma_burst;
- ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
- if (ret < 0) {
- dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
- return ret;
- }
-
- dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
- tqspi->dma_buf_size,
- DMA_FROM_DEVICE);
+ if (tqspi->rx_dma_chan) {
+ dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = dma_burst;
+ ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
+ return ret;
+ }
- ret = tegra_qspi_start_rx_dma(tqspi, t, len);
- if (ret < 0) {
- dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
- if (tqspi->cur_direction & DATA_DIR_TX)
- dmaengine_terminate_all(tqspi->tx_dma_chan);
- return ret;
+ ret = tegra_qspi_start_rx_dma(tqspi, t, len);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
+ if (tqspi->cur_direction & DATA_DIR_TX)
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ return ret;
+ }
+ } else {
+ if (tqspi->is_packed)
+ rx_dma_phys = t->rx_dma;
+ else
+ rx_dma_phys = tqspi->rx_dma_phys;
+
+ tegra_qspi_writel(tqspi, lower_32_bits(rx_dma_phys),
+ QSPI_DMA_MEM_ADDRESS);
+ tegra_qspi_writel(tqspi, (upper_32_bits(rx_dma_phys) & 0xff),
+ QSPI_DMA_HI_ADDRESS);
}
}
@@ -721,9 +744,6 @@ static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct s
static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
{
- if (!tqspi->soc_data->has_dma)
- return;
-
if (tqspi->tx_dma_buf) {
dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
tqspi->tx_dma_buf, tqspi->tx_dma_phys);
@@ -754,16 +774,29 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
u32 *dma_buf;
int err;
- if (!tqspi->soc_data->has_dma)
- return 0;
+ if (tqspi->soc_data->has_ext_dma) {
+ dma_chan = dma_request_chan(tqspi->dev, "rx");
+ if (IS_ERR(dma_chan)) {
+ err = PTR_ERR(dma_chan);
+ goto err_out;
+ }
- dma_chan = dma_request_chan(tqspi->dev, "rx");
- if (IS_ERR(dma_chan)) {
- err = PTR_ERR(dma_chan);
- goto err_out;
- }
+ tqspi->rx_dma_chan = dma_chan;
- tqspi->rx_dma_chan = dma_chan;
+ dma_chan = dma_request_chan(tqspi->dev, "tx");
+ if (IS_ERR(dma_chan)) {
+ err = PTR_ERR(dma_chan);
+ goto err_out;
+ }
+
+ tqspi->tx_dma_chan = dma_chan;
+ } else {
+ if (!device_iommu_mapped(tqspi->dev)) {
+ dev_warn(tqspi->dev,
+ "IOMMU not enabled in device-tree, falling back to PIO mode\n");
+ return 0;
+ }
+ }
dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
if (!dma_buf) {
@@ -774,14 +807,6 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
tqspi->rx_dma_buf = dma_buf;
tqspi->rx_dma_phys = dma_phys;
- dma_chan = dma_request_chan(tqspi->dev, "tx");
- if (IS_ERR(dma_chan)) {
- err = PTR_ERR(dma_chan);
- goto err_out;
- }
-
- tqspi->tx_dma_chan = dma_chan;
-
dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
if (!dma_buf) {
err = -ENOMEM;
@@ -1036,10 +1061,6 @@ static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len)
{
u32 addr_config = 0;
- /* Extract Address configuration and value */
- is_ddr = 0; //Only SDR mode supported
- bus_width = 0; //X1 mode
-
if (is_ddr)
addr_config |= QSPI_ADDRESS_SDR_DDR;
else
@@ -1079,16 +1100,23 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
switch (transfer_phase) {
case CMD_TRANSFER:
/* X1 SDR mode */
- cmd_config = tegra_qspi_cmd_config(false, 0,
+ cmd_config = tegra_qspi_cmd_config(false, xfer->tx_nbits,
xfer->len);
cmd_value = *((const u8 *)(xfer->tx_buf));
break;
case ADDR_TRANSFER:
/* X1 SDR mode */
- addr_config = tegra_qspi_addr_config(false, 0,
+ addr_config = tegra_qspi_addr_config(false, xfer->tx_nbits,
xfer->len);
address_value = *((const u32 *)(xfer->tx_buf));
break;
+ case DUMMY_TRANSFER:
+ if (xfer->dummy_data) {
+ tqspi->dummy_cycles = xfer->len * 8 / xfer->tx_nbits;
+ break;
+ }
+ transfer_phase++;
+ fallthrough;
case DATA_TRANSFER:
/* Program Command, Address value in register */
tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD);
@@ -1120,15 +1148,14 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
if (WARN_ON_ONCE(ret == 0)) {
dev_err_ratelimited(tqspi->dev,
"QSPI Transfer failed with timeout\n");
- if (tqspi->is_curr_dma_xfer &&
- (tqspi->cur_direction & DATA_DIR_TX))
- dmaengine_terminate_all
- (tqspi->tx_dma_chan);
-
- if (tqspi->is_curr_dma_xfer &&
- (tqspi->cur_direction & DATA_DIR_RX))
- dmaengine_terminate_all
- (tqspi->rx_dma_chan);
+ if (tqspi->is_curr_dma_xfer) {
+ if ((tqspi->cur_direction & DATA_DIR_TX) &&
+ tqspi->tx_dma_chan)
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ if ((tqspi->cur_direction & DATA_DIR_RX) &&
+ tqspi->rx_dma_chan)
+ dmaengine_terminate_all(tqspi->rx_dma_chan);
+ }
/* Abort transfer by resetting pio/dma bit */
if (!tqspi->is_curr_dma_xfer) {
@@ -1163,26 +1190,22 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
ret = -EIO;
goto exit;
}
- if (!xfer->cs_change) {
- tegra_qspi_transfer_end(spi);
- spi_transfer_delay_exec(xfer);
- }
break;
default:
ret = -EINVAL;
goto exit;
}
msg->actual_length += xfer->len;
+ if (!xfer->cs_change && transfer_phase == DATA_TRANSFER) {
+ tegra_qspi_transfer_end(spi);
+ spi_transfer_delay_exec(xfer);
+ }
transfer_phase++;
}
ret = 0;
exit:
msg->status = ret;
- if (ret < 0) {
- tegra_qspi_transfer_end(spi);
- spi_transfer_delay_exec(xfer);
- }
return ret;
}
@@ -1247,10 +1270,12 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
QSPI_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
dev_err(tqspi->dev, "transfer timeout\n");
- if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
- dmaengine_terminate_all(tqspi->tx_dma_chan);
- if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
- dmaengine_terminate_all(tqspi->rx_dma_chan);
+ if (tqspi->is_curr_dma_xfer) {
+ if ((tqspi->cur_direction & DATA_DIR_TX) && tqspi->tx_dma_chan)
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ if ((tqspi->cur_direction & DATA_DIR_RX) && tqspi->rx_dma_chan)
+ dmaengine_terminate_all(tqspi->rx_dma_chan);
+ }
tegra_qspi_handle_error(tqspi);
ret = -EIO;
goto complete_xfer;
@@ -1300,7 +1325,9 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
transfer_count++;
}
- if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3)
+ if (!tqspi->soc_data->cmb_xfer_capable)
+ return false;
+ if (transfer_count > 4 || transfer_count < 3)
return false;
xfer = list_first_entry(&msg->transfers, typeof(*xfer),
transfer_list);
@@ -1310,7 +1337,14 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
if (xfer->len > 4 || xfer->len < 3)
return false;
xfer = list_next_entry(xfer, transfer_list);
- if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
+ if (transfer_count == 4) {
+ if (xfer->dummy_data != 1)
+ return false;
+ if ((xfer->len * 8 / xfer->tx_nbits) > QSPI_DUMMY_CYCLES_MAX)
+ return false;
+ xfer = list_next_entry(xfer, transfer_list);
+ }
+ if (!tqspi->soc_data->has_ext_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
return false;
return true;
@@ -1371,41 +1405,43 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
unsigned int total_fifo_words;
unsigned long flags;
long wait_status;
- int err = 0;
+ int num_errors = 0;
if (tqspi->cur_direction & DATA_DIR_TX) {
if (tqspi->tx_status) {
- dmaengine_terminate_all(tqspi->tx_dma_chan);
- err += 1;
- } else {
+ if (tqspi->tx_dma_chan)
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ num_errors++;
+ } else if (tqspi->tx_dma_chan) {
wait_status = wait_for_completion_interruptible_timeout(
&tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
if (wait_status <= 0) {
dmaengine_terminate_all(tqspi->tx_dma_chan);
dev_err(tqspi->dev, "failed TX DMA transfer\n");
- err += 1;
+ num_errors++;
}
}
}
if (tqspi->cur_direction & DATA_DIR_RX) {
if (tqspi->rx_status) {
- dmaengine_terminate_all(tqspi->rx_dma_chan);
- err += 2;
- } else {
+ if (tqspi->rx_dma_chan)
+ dmaengine_terminate_all(tqspi->rx_dma_chan);
+ num_errors++;
+ } else if (tqspi->rx_dma_chan) {
wait_status = wait_for_completion_interruptible_timeout(
&tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
if (wait_status <= 0) {
dmaengine_terminate_all(tqspi->rx_dma_chan);
dev_err(tqspi->dev, "failed RX DMA transfer\n");
- err += 2;
+ num_errors++;
}
}
}
spin_lock_irqsave(&tqspi->lock, flags);
- if (err) {
+ if (num_errors) {
tegra_qspi_dma_unmap_xfer(tqspi, t);
tegra_qspi_handle_error(tqspi);
complete(&tqspi->xfer_completion);
@@ -1431,9 +1467,9 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
/* continue transfer in current message */
total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
if (total_fifo_words > QSPI_FIFO_DEPTH)
- err = tegra_qspi_start_dma_based_transfer(tqspi, t);
+ num_errors = tegra_qspi_start_dma_based_transfer(tqspi, t);
else
- err = tegra_qspi_start_cpu_based_transfer(tqspi, t);
+ num_errors = tegra_qspi_start_cpu_based_transfer(tqspi, t);
exit:
spin_unlock_irqrestore(&tqspi->lock, flags);
@@ -1461,28 +1497,28 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
}
static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
- .has_dma = true,
+ .has_ext_dma = true,
.cmb_xfer_capable = false,
.supports_tpm = false,
.cs_count = 1,
};
static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
- .has_dma = true,
+ .has_ext_dma = true,
.cmb_xfer_capable = true,
.supports_tpm = false,
.cs_count = 1,
};
static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
- .has_dma = false,
+ .has_ext_dma = false,
.cmb_xfer_capable = true,
.supports_tpm = true,
.cs_count = 1,
};
static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
- .has_dma = false,
+ .has_ext_dma = true,
.cmb_xfer_capable = true,
.supports_tpm = true,
.cs_count = 4,
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index 3bd0149d8f4e..1a40c4866ce1 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -44,8 +44,8 @@ struct spi_xcomm {
u8 buf[63];
};
-static void spi_xcomm_gpio_set_value(struct gpio_chip *chip,
- unsigned int offset, int val)
+static int spi_xcomm_gpio_set_value(struct gpio_chip *chip,
+ unsigned int offset, int val)
{
struct spi_xcomm *spi_xcomm = gpiochip_get_data(chip);
unsigned char buf[2];
@@ -53,7 +53,7 @@ static void spi_xcomm_gpio_set_value(struct gpio_chip *chip,
buf[0] = SPI_XCOMM_CMD_GPIO_SET;
buf[1] = !!val;
- i2c_master_send(spi_xcomm->i2c, buf, 2);
+ return i2c_master_send(spi_xcomm->i2c, buf, 2);
}
static int spi_xcomm_gpio_get_direction(struct gpio_chip *chip,
@@ -70,7 +70,7 @@ static int spi_xcomm_gpio_add(struct spi_xcomm *spi_xcomm)
return 0;
spi_xcomm->gc.get_direction = spi_xcomm_gpio_get_direction;
- spi_xcomm->gc.set = spi_xcomm_gpio_set_value;
+ spi_xcomm->gc.set_rv = spi_xcomm_gpio_set_value;
spi_xcomm->gc.can_sleep = 1;
spi_xcomm->gc.base = -1;
spi_xcomm->gc.ngpio = 1;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 90e27729ef6b..1bc0fdbb1bd7 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1076,10 +1076,8 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
* Avoid calling into the driver (or doing delays) if the chip select
* isn't actually changing from the last time this was called.
*/
- if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
- spi_is_last_cs(spi)) ||
- (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
- !spi_is_last_cs(spi))) &&
+ if (!force && (enable == spi_is_last_cs(spi)) &&
+ (spi->controller->last_cs_index_mask == spi->cs_index_mask) &&
(spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
return;
@@ -1088,9 +1086,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
spi->controller->last_cs_index_mask = spi->cs_index_mask;
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS;
- spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
- if (spi->mode & SPI_CS_HIGH)
+ spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
+ if (spi->controller->last_cs_mode_high)
enable = !enable;
/*
@@ -3802,7 +3800,7 @@ int spi_split_transfers_maxwords(struct spi_controller *ctlr,
size_t maxsize;
int ret;
- maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
+ maxsize = maxwords * spi_bpw_to_bytes(xfer->bits_per_word);
if (xfer->len > maxsize) {
ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
maxsize);
@@ -4096,6 +4094,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
return -EINVAL;
+ /* DDR mode is supported only if controller has dtr_caps=true.
+ * default considered as SDR mode for SPI and QSPI controller.
+ * Note: This is applicable only to QSPI controller.
+ */
+ if (xfer->dtr_mode && !ctlr->dtr_caps)
+ return -EINVAL;
+
/*
* SPI transfer length should be multiple of SPI word size
* where SPI word size should be power-of-two multiple.
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index 737802046314..a80cf4047b86 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -11,9 +11,18 @@ menuconfig SPMI
if SPMI
+config SPMI_APPLE
+ tristate "Apple SoC SPMI Controller platform driver"
+ depends on ARCH_APPLE || COMPILE_TEST
+ help
+ If you say yes to this option, support will be included for the
+ SPMI controller present on many Apple SoCs, including the
+ t8103 (M1) and t600x (M1 Pro/Max).
+
config SPMI_HISI3670
tristate "Hisilicon 3670 SPMI Controller"
select IRQ_DOMAIN_HIERARCHY
+ depends on ARM64 || COMPILE_TEST
depends on HAS_IOMEM
help
If you say yes to this option, support will be included for the
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
index 7f152167bb05..38ac635645ba 100644
--- a/drivers/spmi/Makefile
+++ b/drivers/spmi/Makefile
@@ -4,6 +4,7 @@
#
obj-$(CONFIG_SPMI) += spmi.o spmi-devres.o
+obj-$(CONFIG_SPMI_APPLE) += spmi-apple-controller.o
obj-$(CONFIG_SPMI_HISI3670) += hisi-spmi-controller.o
obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o
obj-$(CONFIG_SPMI_MTK_PMIF) += spmi-mtk-pmif.o
diff --git a/drivers/spmi/spmi-apple-controller.c b/drivers/spmi/spmi-apple-controller.c
new file mode 100644
index 000000000000..697b3e8bb023
--- /dev/null
+++ b/drivers/spmi/spmi-apple-controller.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Apple SoC SPMI device driver
+ *
+ * Copyright The Asahi Linux Contributors
+ *
+ * Inspired by:
+ * OpenBSD support Copyright (c) 2021 Mark Kettenis <kettenis@openbsd.org>
+ * Correllium support Copyright (C) 2021 Corellium LLC
+ * hisi-spmi-controller.c
+ * spmi-pmic-arb.c Copyright (c) 2021, The Linux Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/spmi.h>
+
+/* SPMI Controller Registers */
+#define SPMI_STATUS_REG 0
+#define SPMI_CMD_REG 0x4
+#define SPMI_RSP_REG 0x8
+
+#define SPMI_RX_FIFO_EMPTY BIT(24)
+
+#define REG_POLL_INTERVAL_US 10000
+#define REG_POLL_TIMEOUT_US (REG_POLL_INTERVAL_US * 5)
+
+struct apple_spmi {
+ void __iomem *regs;
+};
+
+#define poll_reg(spmi, reg, val, cond) \
+ readl_poll_timeout((spmi)->regs + (reg), (val), (cond), \
+ REG_POLL_INTERVAL_US, REG_POLL_TIMEOUT_US)
+
+static inline u32 apple_spmi_pack_cmd(u8 opc, u8 sid, u16 saddr, size_t len)
+{
+ return opc | sid << 8 | saddr << 16 | (len - 1) | (1 << 15);
+}
+
+/* Wait for Rx FIFO to have something */
+static int apple_spmi_wait_rx_not_empty(struct spmi_controller *ctrl)
+{
+ struct apple_spmi *spmi = spmi_controller_get_drvdata(ctrl);
+ int ret;
+ u32 status;
+
+ ret = poll_reg(spmi, SPMI_STATUS_REG, status, !(status & SPMI_RX_FIFO_EMPTY));
+ if (ret) {
+ dev_err(&ctrl->dev,
+ "failed to wait for RX FIFO not empty\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ u16 saddr, u8 *buf, size_t len)
+{
+ struct apple_spmi *spmi = spmi_controller_get_drvdata(ctrl);
+ u32 spmi_cmd = apple_spmi_pack_cmd(opc, sid, saddr, len);
+ u32 rsp;
+ size_t len_read = 0;
+ u8 i;
+ int ret;
+
+ writel(spmi_cmd, spmi->regs + SPMI_CMD_REG);
+
+ ret = apple_spmi_wait_rx_not_empty(ctrl);
+ if (ret)
+ return ret;
+
+ /* Discard SPMI reply status */
+ readl(spmi->regs + SPMI_RSP_REG);
+
+ /* Read SPMI data reply */
+ while (len_read < len) {
+ rsp = readl(spmi->regs + SPMI_RSP_REG);
+ i = 0;
+ while ((len_read < len) && (i < 4)) {
+ buf[len_read++] = ((0xff << (8 * i)) & rsp) >> (8 * i);
+ i += 1;
+ }
+ }
+
+ return 0;
+}
+
+static int spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ u16 saddr, const u8 *buf, size_t len)
+{
+ struct apple_spmi *spmi = spmi_controller_get_drvdata(ctrl);
+ u32 spmi_cmd = apple_spmi_pack_cmd(opc, sid, saddr, len);
+ size_t i = 0, j;
+ int ret;
+
+ writel(spmi_cmd, spmi->regs + SPMI_CMD_REG);
+
+ while (i < len) {
+ j = 0;
+ spmi_cmd = 0;
+ while ((j < 4) & (i < len))
+ spmi_cmd |= buf[i++] << (j++ * 8);
+
+ writel(spmi_cmd, spmi->regs + SPMI_CMD_REG);
+ }
+
+ ret = apple_spmi_wait_rx_not_empty(ctrl);
+ if (ret)
+ return ret;
+
+ /* Discard */
+ readl(spmi->regs + SPMI_RSP_REG);
+
+ return 0;
+}
+
+static int apple_spmi_probe(struct platform_device *pdev)
+{
+ struct apple_spmi *spmi;
+ struct spmi_controller *ctrl;
+ int ret;
+
+ ctrl = devm_spmi_controller_alloc(&pdev->dev, sizeof(*spmi));
+ if (IS_ERR(ctrl))
+ return -ENOMEM;
+
+ spmi = spmi_controller_get_drvdata(ctrl);
+
+ spmi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spmi->regs))
+ return PTR_ERR(spmi->regs);
+
+ ctrl->dev.of_node = pdev->dev.of_node;
+
+ ctrl->read_cmd = spmi_read_cmd;
+ ctrl->write_cmd = spmi_write_cmd;
+
+ ret = devm_spmi_controller_add(&pdev->dev, ctrl);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "spmi_controller_add failed\n");
+
+ return 0;
+}
+
+static const struct of_device_id apple_spmi_match_table[] = {
+ { .compatible = "apple,spmi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, apple_spmi_match_table);
+
+static struct platform_driver apple_spmi_driver = {
+ .probe = apple_spmi_probe,
+ .driver = {
+ .name = "apple-spmi",
+ .of_match_table = apple_spmi_match_table,
+ },
+};
+module_platform_driver(apple_spmi_driver);
+
+MODULE_AUTHOR("Jean-Francois Bortolotti <jeff@borto.fr>");
+MODULE_DESCRIPTION("Apple SoC SPMI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 5c058db21821..91581974ef84 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1737,7 +1737,7 @@ static int spmi_pmic_arb_bus_init(struct platform_device *pdev,
dev_dbg(&pdev->dev, "adding irq domain for bus %d\n", bus_index);
- bus->domain = irq_domain_add_tree(node, &pmic_arb_irq_domain_ops, bus);
+ bus->domain = irq_domain_create_tree(of_fwnode_handle(node), &pmic_arb_irq_domain_ops, bus);
if (!bus->domain) {
dev_err(&pdev->dev, "unable to create irq_domain\n");
return -ENOMEM;
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index 897cb8db5084..f9426a586653 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -148,8 +148,8 @@ static int ssb_gpio_irq_chipco_domain_init(struct ssb_bus *bus)
if (bus->bustype != SSB_BUSTYPE_SSB)
return 0;
- bus->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
- &irq_domain_simple_ops, chipco);
+ bus->irq_domain = irq_domain_create_linear(NULL, chip->ngpio, &irq_domain_simple_ops,
+ chipco);
if (!bus->irq_domain) {
err = -ENODEV;
goto err_irq_domain;
@@ -347,8 +347,8 @@ static int ssb_gpio_irq_extif_domain_init(struct ssb_bus *bus)
if (bus->bustype != SSB_BUSTYPE_SSB)
return 0;
- bus->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
- &irq_domain_simple_ops, extif);
+ bus->irq_domain = irq_domain_create_linear(NULL, chip->ngpio, &irq_domain_simple_ops,
+ extif);
if (!bus->irq_domain) {
err = -ENODEV;
goto err_irq_domain;
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index dcf6a70455cc..c2655768209a 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -8,160 +8,136 @@ menuconfig FB_TFT
select FB_BACKLIGHT
select FB_SYSMEM_HELPERS_DEFERRED
+if FB_TFT
+
config FB_TFT_AGM1264K_FL
tristate "FB driver for the AGM1264K-FL LCD display"
- depends on FB_TFT
help
Framebuffer support for the AGM1264K-FL LCD display (two Samsung KS0108 compatible chips)
config FB_TFT_BD663474
tristate "FB driver for the BD663474 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for BD663474
config FB_TFT_HX8340BN
tristate "FB driver for the HX8340BN LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for HX8340BN
config FB_TFT_HX8347D
tristate "FB driver for the HX8347D LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for HX8347D
config FB_TFT_HX8353D
tristate "FB driver for the HX8353D LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for HX8353D
config FB_TFT_HX8357D
tristate "FB driver for the HX8357D LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for HX8357D
config FB_TFT_ILI9163
tristate "FB driver for the ILI9163 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for ILI9163
config FB_TFT_ILI9320
tristate "FB driver for the ILI9320 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for ILI9320
config FB_TFT_ILI9325
tristate "FB driver for the ILI9325 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for ILI9325
config FB_TFT_ILI9340
tristate "FB driver for the ILI9340 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for ILI9340
config FB_TFT_ILI9341
tristate "FB driver for the ILI9341 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for ILI9341
config FB_TFT_ILI9481
tristate "FB driver for the ILI9481 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for ILI9481
config FB_TFT_ILI9486
tristate "FB driver for the ILI9486 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for ILI9486
config FB_TFT_PCD8544
tristate "FB driver for the PCD8544 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for PCD8544
config FB_TFT_RA8875
tristate "FB driver for the RA8875 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for RA8875
config FB_TFT_S6D02A1
tristate "FB driver for the S6D02A1 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for S6D02A1
config FB_TFT_S6D1121
tristate "FB driver for the S6D1211 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for S6D1121
config FB_TFT_SEPS525
tristate "FB driver for the SEPS525 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for SEPS525
Say Y if you have such a display that utilizes this controller.
config FB_TFT_SH1106
tristate "FB driver for the SH1106 OLED Controller"
- depends on FB_TFT
help
Framebuffer support for SH1106
config FB_TFT_SSD1289
tristate "FB driver for the SSD1289 LCD Controller"
- depends on FB_TFT
help
Framebuffer support for SSD1289
config FB_TFT_SSD1305
tristate "FB driver for the SSD1305 OLED Controller"
- depends on FB_TFT
help
Framebuffer support for SSD1305
config FB_TFT_SSD1306
tristate "FB driver for the SSD1306 OLED Controller"
- depends on FB_TFT
help
Framebuffer support for SSD1306
config FB_TFT_SSD1331
tristate "FB driver for the SSD1331 LCD Controller"
- depends on FB_TFT
help
Framebuffer support for SSD1331
config FB_TFT_SSD1351
tristate "FB driver for the SSD1351 LCD Controller"
- depends on FB_TFT
help
Framebuffer support for SSD1351
config FB_TFT_ST7735R
tristate "FB driver for the ST7735R LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for ST7735R
config FB_TFT_ST7789V
tristate "FB driver for the ST7789V LCD Controller"
- depends on FB_TFT
help
This enables generic framebuffer support for the Sitronix ST7789V
display controller. The controller is intended for small color
@@ -171,30 +147,27 @@ config FB_TFT_ST7789V
config FB_TFT_TINYLCD
tristate "FB driver for tinylcd.com display"
- depends on FB_TFT
help
Custom Framebuffer support for tinylcd.com display
config FB_TFT_TLS8204
tristate "FB driver for the TLS8204 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for TLS8204
config FB_TFT_UC1611
tristate "FB driver for the UC1611 LCD controller"
- depends on FB_TFT
help
Generic Framebuffer support for UC1611
config FB_TFT_UC1701
tristate "FB driver for the UC1701 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for UC1701
config FB_TFT_UPD161704
tristate "FB driver for the uPD161704 LCD Controller"
- depends on FB_TFT
help
Generic Framebuffer support for uPD161704
+
+endif
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
index 445b9380ff98..94bbb3b6576d 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
@@ -27,10 +27,10 @@ MODULE_DESCRIPTION("GPIB driver for Agilent 82350b");
static int read_transfer_counter(struct agilent_82350b_priv *a_priv);
static unsigned short read_and_clear_event_status(struct gpib_board *board);
static void set_transfer_counter(struct agilent_82350b_priv *a_priv, int count);
-static int agilent_82350b_write(struct gpib_board *board, uint8_t *buffer,
+static int agilent_82350b_write(struct gpib_board *board, u8 *buffer,
size_t length, int send_eoi, size_t *bytes_written);
-static int agilent_82350b_accel_read(struct gpib_board *board, uint8_t *buffer,
+static int agilent_82350b_accel_read(struct gpib_board *board, u8 *buffer,
size_t length, int *end, size_t *bytes_read)
{
@@ -39,7 +39,7 @@ static int agilent_82350b_accel_read(struct gpib_board *board, uint8_t *buffer,
int retval = 0;
unsigned short event_status;
int i, num_fifo_bytes;
- //hardware doesn't support checking for end-of-string character when using fifo
+ /* hardware doesn't support checking for end-of-string character when using fifo */
if (tms_priv->eos_flags & REOS)
return tms9914_read(board, tms_priv, buffer, length, end, bytes_read);
@@ -50,9 +50,9 @@ static int agilent_82350b_accel_read(struct gpib_board *board, uint8_t *buffer,
*bytes_read = 0;
if (length == 0)
return 0;
- //disable fifo for the moment
+ /* disable fifo for the moment */
writeb(DIRECTION_GPIB_TO_HOST, a_priv->gpib_base + SRAM_ACCESS_CONTROL_REG);
- // handle corner case of board not in holdoff and one byte might slip in early
+ /* handle corner case of board not in holdoff and one byte might slip in early */
if (tms_priv->holdoff_active == 0 && length > 1) {
size_t num_bytes;
@@ -67,7 +67,8 @@ static int agilent_82350b_accel_read(struct gpib_board *board, uint8_t *buffer,
tms9914_release_holdoff(tms_priv);
i = 0;
num_fifo_bytes = length - 1;
- write_byte(tms_priv, tms_priv->imr0_bits & ~HR_BIIE, IMR0); // disable BI interrupts
+ /* disable BI interrupts */
+ write_byte(tms_priv, tms_priv->imr0_bits & ~HR_BIIE, IMR0);
while (i < num_fifo_bytes && *end == 0) {
int block_size;
int j;
@@ -111,17 +112,18 @@ static int agilent_82350b_accel_read(struct gpib_board *board, uint8_t *buffer,
break;
}
}
- write_byte(tms_priv, tms_priv->imr0_bits, IMR0); // re-enable BI interrupts
+ /* re-enable BI interrupts */
+ write_byte(tms_priv, tms_priv->imr0_bits, IMR0);
*bytes_read += i;
buffer += i;
length -= i;
writeb(DIRECTION_GPIB_TO_HOST, a_priv->gpib_base + SRAM_ACCESS_CONTROL_REG);
if (retval < 0)
return retval;
- // read last bytes if we havn't received an END yet
+ /* read last bytes if we havn't received an END yet */
if (*end == 0) {
size_t num_bytes;
- // try to make sure we holdoff after last byte read
+ /* try to make sure we holdoff after last byte read */
retval = tms9914_read(board, tms_priv, buffer, length, end, &num_bytes);
*bytes_read += num_bytes;
if (retval < 0)
@@ -145,7 +147,7 @@ static int translate_wait_return_value(struct gpib_board *board, int retval)
return 0;
}
-static int agilent_82350b_accel_write(struct gpib_board *board, uint8_t *buffer,
+static int agilent_82350b_accel_write(struct gpib_board *board, u8 *buffer,
size_t length, int send_eoi,
size_t *bytes_written)
{
@@ -169,7 +171,7 @@ static int agilent_82350b_accel_write(struct gpib_board *board, uint8_t *buffer,
event_status = read_and_clear_event_status(board);
#ifdef EXPERIMENTAL
- // wait for previous BO to complete if any
+ /* wait for previous BO to complete if any */
retval = wait_event_interruptible(board->wait,
test_bit(DEV_CLEAR_BN, &tms_priv->state) ||
test_bit(WRITE_READY_BN, &tms_priv->state) ||
@@ -192,7 +194,7 @@ static int agilent_82350b_accel_write(struct gpib_board *board, uint8_t *buffer,
block_size = min(fifotransferlength - i, agilent_82350b_fifo_size);
set_transfer_counter(a_priv, block_size);
for (j = 0; j < block_size; ++j, ++i) {
- // load data into board's sram
+ /* load data into board's sram */
writeb(buffer[i], a_priv->sram_base + j);
}
writeb(ENABLE_TI_TO_SRAM, a_priv->gpib_base + SRAM_ACCESS_CONTROL_REG);
@@ -262,7 +264,7 @@ static irqreturn_t agilent_82350b_interrupt(int irq, void *arg)
tms9914_interrupt_have_status(board, &a_priv->tms9914_priv, tms9914_status1,
tms9914_status2);
}
-//write-clear status bits
+ /* write-clear status bits */
if (event_status & (BUFFER_END_STATUS_BIT | TERM_COUNT_STATUS_BIT)) {
writeb(event_status & (BUFFER_END_STATUS_BIT | TERM_COUNT_STATUS_BIT),
a_priv->gpib_base + EVENT_STATUS_REG);
@@ -292,12 +294,12 @@ static void set_transfer_counter(struct agilent_82350b_priv *a_priv, int count)
writeb(complement & 0xff, a_priv->gpib_base + XFER_COUNT_LO_REG);
writeb((complement >> 8) & 0xff, a_priv->gpib_base + XFER_COUNT_MID_REG);
- //I don't think the hi count reg is even used, but oh well
+ /* I don't think the hi count reg is even used, but oh well */
writeb((complement >> 16) & 0xf, a_priv->gpib_base + XFER_COUNT_HI_REG);
}
-// wrappers for interface functions
-static int agilent_82350b_read(struct gpib_board *board, uint8_t *buffer,
+/* wrappers for interface functions */
+static int agilent_82350b_read(struct gpib_board *board, u8 *buffer,
size_t length, int *end, size_t *bytes_read)
{
struct agilent_82350b_priv *priv = board->private_data;
@@ -305,7 +307,7 @@ static int agilent_82350b_read(struct gpib_board *board, uint8_t *buffer,
return tms9914_read(board, &priv->tms9914_priv, buffer, length, end, bytes_read);
}
-static int agilent_82350b_write(struct gpib_board *board, uint8_t *buffer,
+static int agilent_82350b_write(struct gpib_board *board, u8 *buffer,
size_t length, int send_eoi, size_t *bytes_written)
{
@@ -314,7 +316,7 @@ static int agilent_82350b_write(struct gpib_board *board, uint8_t *buffer,
return tms9914_write(board, &priv->tms9914_priv, buffer, length, send_eoi, bytes_written);
}
-static int agilent_82350b_command(struct gpib_board *board, uint8_t *buffer,
+static int agilent_82350b_command(struct gpib_board *board, u8 *buffer,
size_t length, size_t *bytes_written)
{
@@ -339,9 +341,7 @@ static int agilent_82350b_go_to_standby(struct gpib_board *board)
return tms9914_go_to_standby(board, &priv->tms9914_priv);
}
-static void agilent_82350b_request_system_control(struct gpib_board *board,
- int request_control)
-
+static int agilent_82350b_request_system_control(struct gpib_board *board, int request_control)
{
struct agilent_82350b_priv *a_priv = board->private_data;
@@ -355,7 +355,7 @@ static void agilent_82350b_request_system_control(struct gpib_board *board,
writeb(0, a_priv->gpib_base + INTERNAL_CONFIG_REG);
}
writeb(a_priv->card_mode_bits, a_priv->gpib_base + CARD_MODE_REG);
- tms9914_request_system_control(board, &a_priv->tms9914_priv, request_control);
+ return tms9914_request_system_control(board, &a_priv->tms9914_priv, request_control);
}
static void agilent_82350b_interface_clear(struct gpib_board *board, int assert)
@@ -373,7 +373,7 @@ static void agilent_82350b_remote_enable(struct gpib_board *board, int enable)
tms9914_remote_enable(board, &priv->tms9914_priv, enable);
}
-static int agilent_82350b_enable_eos(struct gpib_board *board, uint8_t eos_byte,
+static int agilent_82350b_enable_eos(struct gpib_board *board, u8 eos_byte,
int compare_8_bits)
{
struct agilent_82350b_priv *priv = board->private_data;
@@ -412,7 +412,7 @@ static int agilent_82350b_secondary_address(struct gpib_board *board,
return tms9914_secondary_address(board, &priv->tms9914_priv, address, enable);
}
-static int agilent_82350b_parallel_poll(struct gpib_board *board, uint8_t *result)
+static int agilent_82350b_parallel_poll(struct gpib_board *board, u8 *result)
{
struct agilent_82350b_priv *priv = board->private_data;
@@ -420,7 +420,7 @@ static int agilent_82350b_parallel_poll(struct gpib_board *board, uint8_t *resul
}
static void agilent_82350b_parallel_poll_configure(struct gpib_board *board,
- uint8_t config)
+ u8 config)
{
struct agilent_82350b_priv *priv = board->private_data;
@@ -434,14 +434,14 @@ static void agilent_82350b_parallel_poll_response(struct gpib_board *board, int
tms9914_parallel_poll_response(board, &priv->tms9914_priv, ist);
}
-static void agilent_82350b_serial_poll_response(struct gpib_board *board, uint8_t status)
+static void agilent_82350b_serial_poll_response(struct gpib_board *board, u8 status)
{
struct agilent_82350b_priv *priv = board->private_data;
tms9914_serial_poll_response(board, &priv->tms9914_priv, status);
}
-static uint8_t agilent_82350b_serial_poll_status(struct gpib_board *board)
+static u8 agilent_82350b_serial_poll_status(struct gpib_board *board)
{
struct agilent_82350b_priv *priv = board->private_data;
@@ -492,7 +492,7 @@ static void agilent_82350b_free_private(struct gpib_board *board)
}
static int init_82350a_hardware(struct gpib_board *board,
- const gpib_board_config_t *config)
+ const struct gpib_board_config *config)
{
struct agilent_82350b_priv *a_priv = board->private_data;
static const unsigned int firmware_length = 5302;
@@ -511,18 +511,18 @@ static int init_82350a_hardware(struct gpib_board *board,
PLX9050_PCI_RETRY_DELAY_BITS(64) |
PLX9050_DIRECT_SLAVE_LOCK_ENABLE_BIT;
-// load borg data
+ /* load borg data */
borg_status = readb(a_priv->borg_base);
if ((borg_status & BORG_DONE_BIT))
return 0;
- // need to programme borg
+ /* need to programme borg */
if (!config->init_data || config->init_data_length != firmware_length) {
dev_err(board->gpib_dev, "the 82350A board requires firmware after powering on.\n");
return -EIO;
}
dev_dbg(board->gpib_dev, "Loading firmware...\n");
- // tickle the borg
+ /* tickle the borg */
writel(plx_cntrl_static_bits | PLX9050_USER3_DATA_BIT,
a_priv->plx_base + PLX9050_CNTRL_REG);
usleep_range(1000, 2000);
@@ -563,7 +563,7 @@ static int test_sram(struct gpib_board *board)
struct agilent_82350b_priv *a_priv = board->private_data;
unsigned int i;
const unsigned int sram_length = pci_resource_len(a_priv->pci_device, SRAM_82350A_REGION);
- // test SRAM
+ /* test SRAM */
const unsigned int byte_mask = 0xff;
for (i = 0; i < sram_length; ++i) {
@@ -587,7 +587,7 @@ static int test_sram(struct gpib_board *board)
}
static int agilent_82350b_generic_attach(struct gpib_board *board,
- const gpib_board_config_t *config,
+ const struct gpib_board_config *config,
int use_fifos)
{
@@ -606,7 +606,7 @@ static int agilent_82350b_generic_attach(struct gpib_board *board,
tms_priv->write_byte = tms9914_iomem_write_byte;
tms_priv->offset = 1;
- // find board
+ /* find board */
a_priv->pci_device = gpib_pci_get_device(config, PCI_VENDOR_ID_AGILENT,
PCI_DEVICE_ID_82350B, NULL);
if (a_priv->pci_device) {
@@ -702,7 +702,7 @@ static int agilent_82350b_generic_attach(struct gpib_board *board,
writeb(a_priv->card_mode_bits, a_priv->gpib_base + CARD_MODE_REG);
if (a_priv->model == MODEL_82350A) {
- // enable PCI interrupts for 82350a
+ /* enable PCI interrupts for 82350a */
writel(PLX9050_LINTR1_EN_BIT | PLX9050_LINTR2_POLARITY_BIT |
PLX9050_PCI_INTR_EN_BIT,
a_priv->plx_base + PLX9050_INTCSR_REG);
@@ -713,7 +713,7 @@ static int agilent_82350b_generic_attach(struct gpib_board *board,
a_priv->gpib_base + EVENT_ENABLE_REG);
writeb(ENABLE_TERM_COUNT_INTERRUPT_BIT | ENABLE_BUFFER_END_INTERRUPT_BIT |
ENABLE_TMS9914_INTERRUPTS_BIT, a_priv->gpib_base + INTERRUPT_ENABLE_REG);
- //write-clear event status bits
+ /* write-clear event status bits */
writeb(BUFFER_END_STATUS_BIT | TERM_COUNT_STATUS_BIT,
a_priv->gpib_base + EVENT_STATUS_REG);
} else {
@@ -730,13 +730,13 @@ static int agilent_82350b_generic_attach(struct gpib_board *board,
}
static int agilent_82350b_unaccel_attach(struct gpib_board *board,
- const gpib_board_config_t *config)
+ const struct gpib_board_config *config)
{
return agilent_82350b_generic_attach(board, config, 0);
}
static int agilent_82350b_accel_attach(struct gpib_board *board,
- const gpib_board_config_t *config)
+ const struct gpib_board_config *config)
{
return agilent_82350b_generic_attach(board, config, 1);
}
@@ -747,7 +747,7 @@ static void agilent_82350b_detach(struct gpib_board *board)
struct tms9914_priv *tms_priv;
if (a_priv) {
- if (a_priv->plx_base) // disable interrupts
+ if (a_priv->plx_base) /* disable interrupts */
writel(0, a_priv->plx_base + PLX9050_INTCSR_REG);
tms_priv = &a_priv->tms9914_priv;
@@ -773,7 +773,7 @@ static void agilent_82350b_detach(struct gpib_board *board)
agilent_82350b_free_private(board);
}
-static gpib_interface_t agilent_82350b_unaccel_interface = {
+static struct gpib_interface agilent_82350b_unaccel_interface = {
.name = "agilent_82350b_unaccel",
.attach = agilent_82350b_unaccel_attach,
.detach = agilent_82350b_detach,
@@ -790,7 +790,7 @@ static gpib_interface_t agilent_82350b_unaccel_interface = {
.parallel_poll = agilent_82350b_parallel_poll,
.parallel_poll_configure = agilent_82350b_parallel_poll_configure,
.parallel_poll_response = agilent_82350b_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
+ .local_parallel_poll_mode = NULL, /* XXX */
.line_status = agilent_82350b_line_status,
.update_status = agilent_82350b_update_status,
.primary_address = agilent_82350b_primary_address,
@@ -801,7 +801,7 @@ static gpib_interface_t agilent_82350b_unaccel_interface = {
.return_to_local = agilent_82350b_return_to_local,
};
-static gpib_interface_t agilent_82350b_interface = {
+static struct gpib_interface agilent_82350b_interface = {
.name = "agilent_82350b",
.attach = agilent_82350b_accel_attach,
.detach = agilent_82350b_detach,
@@ -818,7 +818,7 @@ static gpib_interface_t agilent_82350b_interface = {
.parallel_poll = agilent_82350b_parallel_poll,
.parallel_poll_configure = agilent_82350b_parallel_poll_configure,
.parallel_poll_response = agilent_82350b_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
+ .local_parallel_poll_mode = NULL, /* XXX */
.line_status = agilent_82350b_line_status,
.update_status = agilent_82350b_update_status,
.primary_address = agilent_82350b_primary_address,
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.h b/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
index 1573230c619d..ef841957297f 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
@@ -41,11 +41,11 @@ enum board_model {
MODEL_82351A
};
-// struct which defines private_data for board
+/* struct which defines private_data for board */
struct agilent_82350b_priv {
struct tms9914_priv tms9914_priv;
struct pci_dev *pci_device;
- void __iomem *plx_base; //82350a only
+ void __iomem *plx_base; /* 82350a only */
void __iomem *gpib_base;
void __iomem *sram_base;
void __iomem *misc_base;
@@ -57,12 +57,12 @@ struct agilent_82350b_priv {
bool using_fifos;
};
-//registers
+/* registers */
enum agilent_82350b_gpib_registers
{
CARD_MODE_REG = 0x1,
- CONFIG_DATA_REG = 0x2, // 82350A specific
+ CONFIG_DATA_REG = 0x2, /* 82350A specific */
INTERRUPT_ENABLE_REG = 0x3,
EVENT_STATUS_REG = 0x4,
EVENT_ENABLE_REG = 0x5,
@@ -76,8 +76,8 @@ enum agilent_82350b_gpib_registers
XFER_COUNT_HI_REG = 0xe,
TMS9914_BASE_REG = 0x10,
INTERNAL_CONFIG_REG = 0x18,
- IMR0_READ_REG = 0x19, //read
- T1_DELAY_REG = 0x19, // write
+ IMR0_READ_REG = 0x19, /* read */
+ T1_DELAY_REG = 0x19, /* write */
IMR1_READ_REG = 0x1a,
ADR_READ_REG = 0x1b,
SPMR_READ_REG = 0x1c,
@@ -89,7 +89,7 @@ enum agilent_82350b_gpib_registers
enum card_mode_bits
{
- ACTIVE_CONTROLLER_BIT = 0x2, // read-only
+ ACTIVE_CONTROLLER_BIT = 0x2, /* read-only */
CM_SYSTEM_CONTROLLER_BIT = 0x8,
ENABLE_BUS_MONITOR_BIT = 0x10,
ENABLE_PCI_IRQ_BIT = 0x20,
@@ -115,15 +115,15 @@ enum event_status_bits
{
TMS9914_IRQ_STATUS_BIT = 0x1,
IRQ_STATUS_BIT = 0x2,
- BUFFER_END_STATUS_BIT = 0x10, // write-clear
- TERM_COUNT_STATUS_BIT = 0x20, // write-clear
+ BUFFER_END_STATUS_BIT = 0x10, /* write-clear */
+ TERM_COUNT_STATUS_BIT = 0x20, /* write-clear */
};
enum stream_status_bits
{
- HALTED_STATUS_BIT = 0x1, //read
- RESTART_STREAM_BIT = 0x1, //write
+ HALTED_STATUS_BIT = 0x1, /* read */
+ RESTART_STREAM_BIT = 0x1, /* write */
};
enum internal_config_bits
@@ -135,9 +135,9 @@ enum internal_config_bits
enum sram_access_control_bits
{
- DIRECTION_GPIB_TO_HOST = 0x20, // transfer direction
- ENABLE_TI_TO_SRAM = 0x40, // enable fifo
- ENABLE_FAST_TALKER = 0x80 // added for 82350A (not used)
+ DIRECTION_GPIB_TO_HOST = 0x20, /* transfer direction */
+ ENABLE_TI_TO_SRAM = 0x40, /* enable fifo */
+ ENABLE_FAST_TALKER = 0x80 /* added for 82350A (not used) */
};
enum borg_bits
diff --git a/drivers/staging/gpib/agilent_82357a/agilent_82357a.c b/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
index da229965d98e..b923dc606d1d 100644
--- a/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
+++ b/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
@@ -26,7 +26,7 @@ static struct usb_interface *agilent_82357a_driver_interfaces[MAX_NUM_82357A_INT
static DEFINE_MUTEX(agilent_82357a_hotplug_lock); // protect board insertion and removal
static unsigned int agilent_82357a_update_status(struct gpib_board *board,
- unsigned int clear_mask);
+ unsigned int clear_mask);
static int agilent_82357a_take_control_internal(struct gpib_board *board, int synchronous);
@@ -34,16 +34,17 @@ static void agilent_82357a_bulk_complete(struct urb *urb)
{
struct agilent_82357a_urb_ctx *context = urb->context;
- up(&context->complete);
+ complete(&context->complete);
}
static void agilent_82357a_timeout_handler(struct timer_list *t)
{
- struct agilent_82357a_priv *a_priv = from_timer(a_priv, t, bulk_timer);
+ struct agilent_82357a_priv *a_priv = timer_container_of(a_priv, t,
+ bulk_timer);
struct agilent_82357a_urb_ctx *context = &a_priv->context;
context->timed_out = 1;
- up(&context->complete);
+ complete(&context->complete);
}
static int agilent_82357a_send_bulk_msg(struct agilent_82357a_priv *a_priv, void *data,
@@ -74,7 +75,7 @@ static int agilent_82357a_send_bulk_msg(struct agilent_82357a_priv *a_priv, void
}
usb_dev = interface_to_usbdev(a_priv->bus_interface);
out_pipe = usb_sndbulkpipe(usb_dev, a_priv->bulk_out_endpoint);
- sema_init(&context->complete, 0);
+ init_completion(&context->complete);
context->timed_out = 0;
usb_fill_bulk_urb(a_priv->bulk_urb, usb_dev, out_pipe, data, data_length,
&agilent_82357a_bulk_complete, context);
@@ -89,7 +90,7 @@ static int agilent_82357a_send_bulk_msg(struct agilent_82357a_priv *a_priv, void
goto cleanup;
}
mutex_unlock(&a_priv->bulk_alloc_lock);
- if (down_interruptible(&context->complete)) {
+ if (wait_for_completion_interruptible(&context->complete)) {
retval = -ERESTARTSYS;
goto cleanup;
}
@@ -142,7 +143,7 @@ static int agilent_82357a_receive_bulk_msg(struct agilent_82357a_priv *a_priv, v
}
usb_dev = interface_to_usbdev(a_priv->bus_interface);
in_pipe = usb_rcvbulkpipe(usb_dev, AGILENT_82357_BULK_IN_ENDPOINT);
- sema_init(&context->complete, 0);
+ init_completion(&context->complete);
context->timed_out = 0;
usb_fill_bulk_urb(a_priv->bulk_urb, usb_dev, in_pipe, data, data_length,
&agilent_82357a_bulk_complete, context);
@@ -157,7 +158,7 @@ static int agilent_82357a_receive_bulk_msg(struct agilent_82357a_priv *a_priv, v
goto cleanup;
}
mutex_unlock(&a_priv->bulk_alloc_lock);
- if (down_interruptible(&context->complete)) {
+ if (wait_for_completion_interruptible(&context->complete)) {
retval = -ERESTARTSYS;
goto cleanup;
}
@@ -420,10 +421,10 @@ cleanup:
}
// interface functions
-int agilent_82357a_command(struct gpib_board *board, uint8_t *buffer, size_t length,
+int agilent_82357a_command(struct gpib_board *board, u8 *buffer, size_t length,
size_t *bytes_written);
-static int agilent_82357a_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+static int agilent_82357a_read(struct gpib_board *board, u8 *buffer, size_t length, int *end,
size_t *nbytes)
{
int retval;
@@ -524,9 +525,10 @@ static int agilent_82357a_read(struct gpib_board *board, uint8_t *buffer, size_t
}
kfree(in_data);
- /* Fix for a bug in 9914A that does not return the contents of ADSR
- * when the board is in listener active state and ATN is not asserted.
- * Set ATN here to obtain a valid board level ibsta
+ /*
+ * Fix for a bug in 9914A that does not return the contents of ADSR
+ * when the board is in listener active state and ATN is not asserted.
+ * Set ATN here to obtain a valid board level ibsta
*/
agilent_82357a_take_control_internal(board, 0);
@@ -535,7 +537,7 @@ static int agilent_82357a_read(struct gpib_board *board, uint8_t *buffer, size_t
}
static ssize_t agilent_82357a_generic_write(struct gpib_board *board,
- uint8_t *buffer, size_t length,
+ u8 *buffer, size_t length,
int send_commands, int send_eoi,
size_t *bytes_written)
{
@@ -675,13 +677,13 @@ static ssize_t agilent_82357a_generic_write(struct gpib_board *board,
return 0;
}
-static int agilent_82357a_write(struct gpib_board *board, uint8_t *buffer,
+static int agilent_82357a_write(struct gpib_board *board, u8 *buffer,
size_t length, int send_eoi, size_t *bytes_written)
{
return agilent_82357a_generic_write(board, buffer, length, 0, send_eoi, bytes_written);
}
-int agilent_82357a_command(struct gpib_board *board, uint8_t *buffer, size_t length,
+int agilent_82357a_command(struct gpib_board *board, u8 *buffer, size_t length,
size_t *bytes_written)
{
return agilent_82357a_generic_write(board, buffer, length, 1, 0, bytes_written);
@@ -715,9 +717,10 @@ static int agilent_82357a_take_control(struct gpib_board *board, int synchronous
if (!a_priv->bus_interface)
return -ENODEV;
-/* It looks like the 9914 does not handle tcs properly.
- * See comment above tms9914_take_control_workaround() in
- * drivers/gpib/tms9914/tms9914_aux.c
+/*
+ * It looks like the 9914 does not handle tcs properly.
+ * See comment above tms9914_take_control_workaround() in
+ * drivers/gpib/tms9914/tms9914_aux.c
*/
if (synchronous)
return -ETIMEDOUT;
@@ -754,9 +757,7 @@ static int agilent_82357a_go_to_standby(struct gpib_board *board)
return 0;
}
-//FIXME should change prototype to return int
-static void agilent_82357a_request_system_control(struct gpib_board *board,
- int request_control)
+static int agilent_82357a_request_system_control(struct gpib_board *board, int request_control)
{
struct agilent_82357a_priv *a_priv = board->private_data;
struct usb_device *usb_dev;
@@ -765,7 +766,7 @@ static void agilent_82357a_request_system_control(struct gpib_board *board,
int i = 0;
if (!a_priv->bus_interface)
- return; // -ENODEV;
+ return -ENODEV;
usb_dev = interface_to_usbdev(a_priv->bus_interface);
/* 82357B needs bit to be set in 9914 AUXCR register */
@@ -774,9 +775,7 @@ static void agilent_82357a_request_system_control(struct gpib_board *board,
writes[i].value = AUX_RQC;
a_priv->hw_control_bits |= SYSTEM_CONTROLLER;
} else {
- writes[i].value = AUX_RLC;
- a_priv->is_cic = 0;
- a_priv->hw_control_bits &= ~SYSTEM_CONTROLLER;
+ return -EINVAL;
}
++i;
writes[i].address = HW_CONTROL;
@@ -785,7 +784,7 @@ static void agilent_82357a_request_system_control(struct gpib_board *board,
retval = agilent_82357a_write_registers(a_priv, writes, i);
if (retval)
dev_err(&usb_dev->dev, "write_registers() returned error\n");
- return;// retval;
+ return retval;
}
static void agilent_82357a_interface_clear(struct gpib_board *board, int assert)
@@ -832,7 +831,7 @@ static void agilent_82357a_remote_enable(struct gpib_board *board, int enable)
return;// 0;
}
-static int agilent_82357a_enable_eos(struct gpib_board *board, uint8_t eos_byte,
+static int agilent_82357a_enable_eos(struct gpib_board *board, u8 eos_byte,
int compare_8_bits)
{
struct agilent_82357a_priv *a_priv = board->private_data;
@@ -946,7 +945,7 @@ static int agilent_82357a_secondary_address(struct gpib_board *board,
return 0;
}
-static int agilent_82357a_parallel_poll(struct gpib_board *board, uint8_t *result)
+static int agilent_82357a_parallel_poll(struct gpib_board *board, u8 *result)
{
struct agilent_82357a_priv *a_priv = board->private_data;
struct usb_device *usb_dev;
@@ -988,7 +987,7 @@ static int agilent_82357a_parallel_poll(struct gpib_board *board, uint8_t *resul
return 0;
}
-static void agilent_82357a_parallel_poll_configure(struct gpib_board *board, uint8_t config)
+static void agilent_82357a_parallel_poll_configure(struct gpib_board *board, u8 config)
{
//board can only be system controller
return;// 0;
@@ -1000,13 +999,13 @@ static void agilent_82357a_parallel_poll_response(struct gpib_board *board, int
return;// 0;
}
-static void agilent_82357a_serial_poll_response(struct gpib_board *board, uint8_t status)
+static void agilent_82357a_serial_poll_response(struct gpib_board *board, u8 status)
{
//board can only be system controller
return;// 0;
}
-static uint8_t agilent_82357a_serial_poll_status(struct gpib_board *board)
+static u8 agilent_82357a_serial_poll_status(struct gpib_board *board)
{
//board can only be system controller
return 0;
@@ -1292,7 +1291,7 @@ static int agilent_82357a_init(struct gpib_board *board)
}
static inline int agilent_82357a_device_match(struct usb_interface *interface,
- const gpib_board_config_t *config)
+ const struct gpib_board_config *config)
{
struct usb_device * const usbdev = interface_to_usbdev(interface);
@@ -1305,7 +1304,7 @@ static inline int agilent_82357a_device_match(struct usb_interface *interface,
return 1;
}
-static int agilent_82357a_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int agilent_82357a_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
int retval;
int i;
@@ -1432,7 +1431,7 @@ static void agilent_82357a_detach(struct gpib_board *board)
mutex_unlock(&agilent_82357a_hotplug_lock);
}
-static gpib_interface_t agilent_82357a_gpib_interface = {
+static struct gpib_interface agilent_82357a_gpib_interface = {
.name = "agilent_82357a",
.attach = agilent_82357a_attach,
.detach = agilent_82357a_detach,
@@ -1591,7 +1590,7 @@ static int agilent_82357a_driver_resume(struct usb_interface *interface)
{
struct usb_device *usb_dev = interface_to_usbdev(interface);
struct gpib_board *board;
- int i, retval;
+ int i, retval = 0;
mutex_lock(&agilent_82357a_hotplug_lock);
@@ -1602,8 +1601,10 @@ static int agilent_82357a_driver_resume(struct usb_interface *interface)
break;
}
}
- if (i == MAX_NUM_82357A_INTERFACES)
+ if (i == MAX_NUM_82357A_INTERFACES) {
+ retval = -ENOENT;
goto resume_exit;
+ }
struct agilent_82357a_priv *a_priv = board->private_data;
@@ -1626,7 +1627,7 @@ static int agilent_82357a_driver_resume(struct usb_interface *interface)
return retval;
}
// set/unset system controller
- agilent_82357a_request_system_control(board, board->master);
+ retval = agilent_82357a_request_system_control(board, board->master);
// toggle ifc if master
if (board->master) {
agilent_82357a_interface_clear(board, 1);
@@ -1644,7 +1645,7 @@ static int agilent_82357a_driver_resume(struct usb_interface *interface)
resume_exit:
mutex_unlock(&agilent_82357a_hotplug_lock);
- return 0;
+ return retval;
}
static struct usb_driver agilent_82357a_bus_driver = {
diff --git a/drivers/staging/gpib/agilent_82357a/agilent_82357a.h b/drivers/staging/gpib/agilent_82357a/agilent_82357a.h
index cdbc3ec5d8bd..23aa4799eb86 100644
--- a/drivers/staging/gpib/agilent_82357a/agilent_82357a.h
+++ b/drivers/staging/gpib/agilent_82357a/agilent_82357a.h
@@ -6,7 +6,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
-#include <linux/semaphore.h>
+#include <linux/completion.h>
#include <linux/usb.h>
#include <linux/timer.h>
#include <linux/compiler_attributes.h>
@@ -115,7 +115,7 @@ enum xfer_abort_type {
#define INTERRUPT_BUF_LEN 8
struct agilent_82357a_urb_ctx {
- struct semaphore complete;
+ struct completion complete;
unsigned timed_out : 1;
};
diff --git a/drivers/staging/gpib/cb7210/cb7210.c b/drivers/staging/gpib/cb7210/cb7210.c
index 6b22a33a8c4f..298ed306189d 100644
--- a/drivers/staging/gpib/cb7210/cb7210.c
+++ b/drivers/staging/gpib/cb7210/cb7210.c
@@ -27,7 +27,7 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver Measurement Computing boards using cb7210.2 and cbi488.2");
-static int cb7210_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int cb7210_read(struct gpib_board *board, u8 *buffer, size_t length,
int *end, size_t *bytes_read);
static inline int have_fifo_word(const struct cb7210_priv *cb_priv)
@@ -76,7 +76,7 @@ static inline void input_fifo_enable(struct gpib_board *board, int enable)
spin_unlock_irqrestore(&board->spinlock, flags);
}
-static int fifo_read(struct gpib_board *board, struct cb7210_priv *cb_priv, uint8_t *buffer,
+static int fifo_read(struct gpib_board *board, struct cb7210_priv *cb_priv, u8 *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -170,7 +170,7 @@ static int fifo_read(struct gpib_board *board, struct cb7210_priv *cb_priv, uint
return retval;
}
-static int cb7210_accel_read(struct gpib_board *board, uint8_t *buffer,
+static int cb7210_accel_read(struct gpib_board *board, u8 *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval;
@@ -264,7 +264,7 @@ static inline void output_fifo_enable(struct gpib_board *board, int enable)
spin_unlock_irqrestore(&board->spinlock, flags);
}
-static int fifo_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int fifo_write(struct gpib_board *board, u8 *buffer, size_t length,
size_t *bytes_written)
{
size_t count = 0;
@@ -350,7 +350,7 @@ static int fifo_write(struct gpib_board *board, uint8_t *buffer, size_t length,
return retval;
}
-static int cb7210_accel_write(struct gpib_board *board, uint8_t *buffer,
+static int cb7210_accel_write(struct gpib_board *board, u8 *buffer,
size_t length, int send_eoi, size_t *bytes_written)
{
struct cb7210_priv *cb_priv = board->private_data;
@@ -533,14 +533,14 @@ static irqreturn_t cb7210_interrupt(int irq, void *arg)
return cb7210_internal_interrupt(arg);
}
-static int cb_pci_attach(struct gpib_board *board, const gpib_board_config_t *config);
-static int cb_isa_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static int cb_pci_attach(struct gpib_board *board, const struct gpib_board_config *config);
+static int cb_isa_attach(struct gpib_board *board, const struct gpib_board_config *config);
static void cb_pci_detach(struct gpib_board *board);
static void cb_isa_detach(struct gpib_board *board);
// wrappers for interface functions
-static int cb7210_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int cb7210_read(struct gpib_board *board, u8 *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct cb7210_priv *priv = board->private_data;
@@ -548,7 +548,7 @@ static int cb7210_read(struct gpib_board *board, uint8_t *buffer, size_t length,
return nec7210_read(board, &priv->nec7210_priv, buffer, length, end, bytes_read);
}
-static int cb7210_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int cb7210_write(struct gpib_board *board, u8 *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
struct cb7210_priv *priv = board->private_data;
@@ -556,7 +556,7 @@ static int cb7210_write(struct gpib_board *board, uint8_t *buffer, size_t length
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-static int cb7210_command(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int cb7210_command(struct gpib_board *board, u8 *buffer, size_t length,
size_t *bytes_written)
{
struct cb7210_priv *priv = board->private_data;
@@ -578,7 +578,7 @@ static int cb7210_go_to_standby(struct gpib_board *board)
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-static void cb7210_request_system_control(struct gpib_board *board, int request_control)
+static int cb7210_request_system_control(struct gpib_board *board, int request_control)
{
struct cb7210_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -589,7 +589,7 @@ static void cb7210_request_system_control(struct gpib_board *board, int request_
priv->hs_mode_bits &= ~HS_SYS_CONTROL;
cb7210_write_byte(priv, priv->hs_mode_bits, HS_MODE);
- nec7210_request_system_control(board, nec_priv, request_control);
+ return nec7210_request_system_control(board, nec_priv, request_control);
}
static void cb7210_interface_clear(struct gpib_board *board, int assert)
@@ -606,7 +606,7 @@ static void cb7210_remote_enable(struct gpib_board *board, int enable)
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-static int cb7210_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
+static int cb7210_enable_eos(struct gpib_board *board, u8 eos_byte, int compare_8_bits)
{
struct cb7210_priv *priv = board->private_data;
@@ -641,14 +641,14 @@ static int cb7210_secondary_address(struct gpib_board *board, unsigned int addre
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-static int cb7210_parallel_poll(struct gpib_board *board, uint8_t *result)
+static int cb7210_parallel_poll(struct gpib_board *board, u8 *result)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-static void cb7210_parallel_poll_configure(struct gpib_board *board, uint8_t configuration)
+static void cb7210_parallel_poll_configure(struct gpib_board *board, u8 configuration)
{
struct cb7210_priv *priv = board->private_data;
@@ -662,14 +662,14 @@ static void cb7210_parallel_poll_response(struct gpib_board *board, int ist)
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-static void cb7210_serial_poll_response(struct gpib_board *board, uint8_t status)
+static void cb7210_serial_poll_response(struct gpib_board *board, u8 status)
{
struct cb7210_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-static uint8_t cb7210_serial_poll_status(struct gpib_board *board)
+static u8 cb7210_serial_poll_status(struct gpib_board *board)
{
struct cb7210_priv *priv = board->private_data;
@@ -686,7 +686,7 @@ static void cb7210_return_to_local(struct gpib_board *board)
write_byte(nec_priv, AUX_RTL, AUXMR);
}
-static gpib_interface_t cb_pci_unaccel_interface = {
+static struct gpib_interface cb_pci_unaccel_interface = {
.name = "cbi_pci_unaccel",
.attach = cb_pci_attach,
.detach = cb_pci_detach,
@@ -714,7 +714,7 @@ static gpib_interface_t cb_pci_unaccel_interface = {
.return_to_local = cb7210_return_to_local,
};
-static gpib_interface_t cb_pci_accel_interface = {
+static struct gpib_interface cb_pci_accel_interface = {
.name = "cbi_pci_accel",
.attach = cb_pci_attach,
.detach = cb_pci_detach,
@@ -742,7 +742,7 @@ static gpib_interface_t cb_pci_accel_interface = {
.return_to_local = cb7210_return_to_local,
};
-static gpib_interface_t cb_pci_interface = {
+static struct gpib_interface cb_pci_interface = {
.name = "cbi_pci",
.attach = cb_pci_attach,
.detach = cb_pci_detach,
@@ -769,7 +769,7 @@ static gpib_interface_t cb_pci_interface = {
.return_to_local = cb7210_return_to_local,
};
-static gpib_interface_t cb_isa_unaccel_interface = {
+static struct gpib_interface cb_isa_unaccel_interface = {
.name = "cbi_isa_unaccel",
.attach = cb_isa_attach,
.detach = cb_isa_detach,
@@ -797,7 +797,7 @@ static gpib_interface_t cb_isa_unaccel_interface = {
.return_to_local = cb7210_return_to_local,
};
-static gpib_interface_t cb_isa_interface = {
+static struct gpib_interface cb_isa_interface = {
.name = "cbi_isa",
.attach = cb_isa_attach,
.detach = cb_isa_detach,
@@ -824,7 +824,7 @@ static gpib_interface_t cb_isa_interface = {
.return_to_local = cb7210_return_to_local,
};
-static gpib_interface_t cb_isa_accel_interface = {
+static struct gpib_interface cb_isa_accel_interface = {
.name = "cbi_isa_accel",
.attach = cb_isa_attach,
.detach = cb_isa_detach,
@@ -905,7 +905,8 @@ static int cb7210_init(struct cb7210_priv *cb_priv, struct gpib_board *board)
cb7210_write_byte(cb_priv, cb_priv->hs_mode_bits, HS_MODE);
write_byte(nec_priv, AUX_LO_SPEED, AUXMR);
- /* set clock register for maximum (20 MHz) driving frequency
+ /*
+ * set clock register for maximum (20 MHz) driving frequency
* ICR should be set to clock in megahertz (1-15) and to zero
* for clocks faster than 15 MHz (max 20MHz)
*/
@@ -926,7 +927,7 @@ static int cb7210_init(struct cb7210_priv *cb_priv, struct gpib_board *board)
return 0;
}
-static int cb_pci_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int cb_pci_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
struct cb7210_priv *cb_priv;
struct nec7210_priv *nec_priv;
@@ -1031,7 +1032,7 @@ static void cb_pci_detach(struct gpib_board *board)
cb7210_generic_detach(board);
}
-static int cb_isa_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int cb_isa_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
int isr_flags = 0;
struct cb7210_priv *cb_priv;
@@ -1133,7 +1134,7 @@ static struct pci_driver cb7210_pci_driver = {
static int cb_gpib_config(struct pcmcia_device *link);
static void cb_gpib_release(struct pcmcia_device *link);
-static int cb_pcmcia_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static int cb_pcmcia_attach(struct gpib_board *board, const struct gpib_board_config *config);
static void cb_pcmcia_detach(struct gpib_board *board);
/*
@@ -1246,13 +1247,8 @@ static int cb_gpib_config_iteration(struct pcmcia_device *link, void *priv_data)
static int cb_gpib_config(struct pcmcia_device *link)
{
- struct pcmcia_device *handle;
- struct local_info *dev;
int retval;
- handle = link;
- dev = link->priv;
-
retval = pcmcia_loop_config(link, &cb_gpib_config_iteration, NULL);
if (retval) {
dev_warn(&link->dev, "no configuration found\n");
@@ -1275,9 +1271,9 @@ static int cb_gpib_config(struct pcmcia_device *link)
} /* gpib_config */
/*
- * After a card is removed, gpib_release() will unregister the net
- * device, and release the PCMCIA configuration. If the device is
- * still open, this will be postponed until it is closed.
+ * After a card is removed, gpib_release() will unregister the net
+ * device, and release the PCMCIA configuration. If the device is
+ * still open, this will be postponed until it is closed.
*/
static void cb_gpib_release(struct pcmcia_device *link)
@@ -1333,7 +1329,7 @@ static void cb_pcmcia_cleanup_module(void)
pcmcia_unregister_driver(&cb_gpib_cs_driver);
}
-static gpib_interface_t cb_pcmcia_unaccel_interface = {
+static struct gpib_interface cb_pcmcia_unaccel_interface = {
.name = "cbi_pcmcia_unaccel",
.attach = cb_pcmcia_attach,
.detach = cb_pcmcia_detach,
@@ -1361,7 +1357,7 @@ static gpib_interface_t cb_pcmcia_unaccel_interface = {
.return_to_local = cb7210_return_to_local,
};
-static gpib_interface_t cb_pcmcia_interface = {
+static struct gpib_interface cb_pcmcia_interface = {
.name = "cbi_pcmcia",
.attach = cb_pcmcia_attach,
.detach = cb_pcmcia_detach,
@@ -1389,7 +1385,7 @@ static gpib_interface_t cb_pcmcia_interface = {
.return_to_local = cb7210_return_to_local,
};
-static gpib_interface_t cb_pcmcia_accel_interface = {
+static struct gpib_interface cb_pcmcia_accel_interface = {
.name = "cbi_pcmcia_accel",
.attach = cb_pcmcia_attach,
.detach = cb_pcmcia_detach,
@@ -1417,7 +1413,7 @@ static gpib_interface_t cb_pcmcia_accel_interface = {
.return_to_local = cb7210_return_to_local,
};
-static int cb_pcmcia_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int cb_pcmcia_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
struct cb7210_priv *cb_priv;
struct nec7210_priv *nec_priv;
diff --git a/drivers/staging/gpib/cb7210/cb7210.h b/drivers/staging/gpib/cb7210/cb7210.h
index 2108fe7a8ce5..13f127563ab3 100644
--- a/drivers/staging/gpib/cb7210/cb7210.h
+++ b/drivers/staging/gpib/cb7210/cb7210.h
@@ -73,8 +73,8 @@ static inline int cb7210_page_in_bits(unsigned int page)
return 0x50 | (page & 0xf);
}
-static inline uint8_t cb7210_paged_read_byte(struct cb7210_priv *cb_priv,
- unsigned int register_num, unsigned int page)
+static inline u8 cb7210_paged_read_byte(struct cb7210_priv *cb_priv,
+ unsigned int register_num, unsigned int page)
{
struct nec7210_priv *nec_priv = &cb_priv->nec7210_priv;
u8 retval;
@@ -89,8 +89,8 @@ static inline uint8_t cb7210_paged_read_byte(struct cb7210_priv *cb_priv,
}
// don't use for register_num < 8, since it doesn't lock
-static inline uint8_t cb7210_read_byte(const struct cb7210_priv *cb_priv,
- enum hs_regs register_num)
+static inline u8 cb7210_read_byte(const struct cb7210_priv *cb_priv,
+ enum hs_regs register_num)
{
const struct nec7210_priv *nec_priv = &cb_priv->nec7210_priv;
u8 retval;
@@ -99,7 +99,7 @@ static inline uint8_t cb7210_read_byte(const struct cb7210_priv *cb_priv,
return retval;
}
-static inline void cb7210_paged_write_byte(struct cb7210_priv *cb_priv, uint8_t data,
+static inline void cb7210_paged_write_byte(struct cb7210_priv *cb_priv, u8 data,
unsigned int register_num, unsigned int page)
{
struct nec7210_priv *nec_priv = &cb_priv->nec7210_priv;
@@ -113,7 +113,7 @@ static inline void cb7210_paged_write_byte(struct cb7210_priv *cb_priv, uint8_t
}
// don't use for register_num < 8, since it doesn't lock
-static inline void cb7210_write_byte(const struct cb7210_priv *cb_priv, uint8_t data,
+static inline void cb7210_write_byte(const struct cb7210_priv *cb_priv, u8 data,
enum hs_regs register_num)
{
const struct nec7210_priv *nec_priv = &cb_priv->nec7210_priv;
@@ -134,7 +134,8 @@ enum bus_status_bits {
/* CBI 488.2 HS control */
-/* when both bit 0 and 1 are set, it
+/*
+ * when both bit 0 and 1 are set, it
* 1 clears the transmit state machine to an initial condition
* 2 clears any residual interrupts left latched on cbi488.2
* 3 resets all control bits in HS_MODE to zero
@@ -189,11 +190,12 @@ static inline unsigned int irq_bits(unsigned int irq)
}
enum cb7210_aux_cmds {
-/* AUX_RTL2 is an undocumented aux command which causes cb7210 to assert
- * (and keep asserted) local rtl message. This is used in conjunction
- * with the (stupid) cb7210 implementation
- * of the normal nec7210 AUX_RTL aux command, which
- * causes the rtl message to toggle between on and off.
+/*
+ * AUX_RTL2 is an undocumented aux command which causes cb7210 to assert
+ * (and keep asserted) local rtl message. This is used in conjunction
+ * with the (stupid) cb7210 implementation
+ * of the normal nec7210 AUX_RTL aux command, which
+ * causes the rtl message to toggle between on and off.
*/
AUX_RTL2 = 0xd,
AUX_LO_SPEED = 0x40,
diff --git a/drivers/staging/gpib/cec/cec_gpib.c b/drivers/staging/gpib/cec/cec_gpib.c
index a822fa428cd0..e8736cbf50e3 100644
--- a/drivers/staging/gpib/cec/cec_gpib.c
+++ b/drivers/staging/gpib/cec/cec_gpib.c
@@ -40,12 +40,12 @@ static irqreturn_t cec_interrupt(int irq, void *arg)
#define CEC_DEV_ID 0x5cec
#define CEC_SUBID 0x9050
-static int cec_pci_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static int cec_pci_attach(struct gpib_board *board, const struct gpib_board_config *config);
static void cec_pci_detach(struct gpib_board *board);
// wrappers for interface functions
-static int cec_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+static int cec_read(struct gpib_board *board, u8 *buffer, size_t length, int *end,
size_t *bytes_read)
{
struct cec_priv *priv = board->private_data;
@@ -53,7 +53,7 @@ static int cec_read(struct gpib_board *board, uint8_t *buffer, size_t length, in
return nec7210_read(board, &priv->nec7210_priv, buffer, length, end, bytes_read);
}
-static int cec_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+static int cec_write(struct gpib_board *board, u8 *buffer, size_t length, int send_eoi,
size_t *bytes_written)
{
struct cec_priv *priv = board->private_data;
@@ -61,7 +61,7 @@ static int cec_write(struct gpib_board *board, uint8_t *buffer, size_t length, i
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-static int cec_command(struct gpib_board *board, uint8_t *buffer,
+static int cec_command(struct gpib_board *board, u8 *buffer,
size_t length, size_t *bytes_written)
{
struct cec_priv *priv = board->private_data;
@@ -83,11 +83,11 @@ static int cec_go_to_standby(struct gpib_board *board)
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-static void cec_request_system_control(struct gpib_board *board, int request_control)
+static int cec_request_system_control(struct gpib_board *board, int request_control)
{
struct cec_priv *priv = board->private_data;
- nec7210_request_system_control(board, &priv->nec7210_priv, request_control);
+ return nec7210_request_system_control(board, &priv->nec7210_priv, request_control);
}
static void cec_interface_clear(struct gpib_board *board, int assert)
@@ -104,7 +104,7 @@ static void cec_remote_enable(struct gpib_board *board, int enable)
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-static int cec_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
+static int cec_enable_eos(struct gpib_board *board, u8 eos_byte, int compare_8_bits)
{
struct cec_priv *priv = board->private_data;
@@ -139,14 +139,14 @@ static int cec_secondary_address(struct gpib_board *board, unsigned int address,
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-static int cec_parallel_poll(struct gpib_board *board, uint8_t *result)
+static int cec_parallel_poll(struct gpib_board *board, u8 *result)
{
struct cec_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-static void cec_parallel_poll_configure(struct gpib_board *board, uint8_t config)
+static void cec_parallel_poll_configure(struct gpib_board *board, u8 config)
{
struct cec_priv *priv = board->private_data;
@@ -160,14 +160,14 @@ static void cec_parallel_poll_response(struct gpib_board *board, int ist)
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-static void cec_serial_poll_response(struct gpib_board *board, uint8_t status)
+static void cec_serial_poll_response(struct gpib_board *board, u8 status)
{
struct cec_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-static uint8_t cec_serial_poll_status(struct gpib_board *board)
+static u8 cec_serial_poll_status(struct gpib_board *board)
{
struct cec_priv *priv = board->private_data;
@@ -188,7 +188,7 @@ static void cec_return_to_local(struct gpib_board *board)
nec7210_return_to_local(board, &priv->nec7210_priv);
}
-static gpib_interface_t cec_pci_interface = {
+static struct gpib_interface cec_pci_interface = {
.name = "cec_pci",
.attach = cec_pci_attach,
.detach = cec_pci_detach,
@@ -265,7 +265,7 @@ static void cec_init(struct cec_priv *cec_priv, const struct gpib_board *board)
nec7210_board_online(nec_priv, board);
}
-static int cec_pci_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int cec_pci_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
struct cec_priv *cec_priv;
struct nec7210_priv *nec_priv;
diff --git a/drivers/staging/gpib/common/gpib_os.c b/drivers/staging/gpib/common/gpib_os.c
index 8456b97290b8..a193d64db033 100644
--- a/drivers/staging/gpib/common/gpib_os.c
+++ b/drivers/staging/gpib/common/gpib_os.c
@@ -26,35 +26,37 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB base support");
MODULE_ALIAS_CHARDEV_MAJOR(GPIB_CODE);
-static int board_type_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board, unsigned long arg);
-static int read_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
+static int board_type_ioctl(struct gpib_file_private *file_priv,
+ struct gpib_board *board, unsigned long arg);
+static int read_ioctl(struct gpib_file_private *file_priv, struct gpib_board *board,
unsigned long arg);
-static int write_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
+static int write_ioctl(struct gpib_file_private *file_priv, struct gpib_board *board,
unsigned long arg);
-static int command_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
+static int command_ioctl(struct gpib_file_private *file_priv, struct gpib_board *board,
unsigned long arg);
static int open_dev_ioctl(struct file *filep, struct gpib_board *board, unsigned long arg);
static int close_dev_ioctl(struct file *filep, struct gpib_board *board, unsigned long arg);
static int serial_poll_ioctl(struct gpib_board *board, unsigned long arg);
-static int wait_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board, unsigned long arg);
+static int wait_ioctl(struct gpib_file_private *file_priv,
+ struct gpib_board *board, unsigned long arg);
static int parallel_poll_ioctl(struct gpib_board *board, unsigned long arg);
static int online_ioctl(struct gpib_board *board, unsigned long arg);
static int remote_enable_ioctl(struct gpib_board *board, unsigned long arg);
static int take_control_ioctl(struct gpib_board *board, unsigned long arg);
static int line_status_ioctl(struct gpib_board *board, unsigned long arg);
-static int pad_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
+static int pad_ioctl(struct gpib_board *board, struct gpib_file_private *file_priv,
unsigned long arg);
-static int sad_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
+static int sad_ioctl(struct gpib_board *board, struct gpib_file_private *file_priv,
unsigned long arg);
static int eos_ioctl(struct gpib_board *board, unsigned long arg);
static int request_service_ioctl(struct gpib_board *board, unsigned long arg);
static int request_service2_ioctl(struct gpib_board *board, unsigned long arg);
-static int iobase_ioctl(gpib_board_config_t *config, unsigned long arg);
-static int irq_ioctl(gpib_board_config_t *config, unsigned long arg);
-static int dma_ioctl(gpib_board_config_t *config, unsigned long arg);
-static int autospoll_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
+static int iobase_ioctl(struct gpib_board_config *config, unsigned long arg);
+static int irq_ioctl(struct gpib_board_config *config, unsigned long arg);
+static int dma_ioctl(struct gpib_board_config *config, unsigned long arg);
+static int autospoll_ioctl(struct gpib_board *board, struct gpib_file_private *file_priv,
unsigned long arg);
-static int mutex_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
+static int mutex_ioctl(struct gpib_board *board, struct gpib_file_private *file_priv,
unsigned long arg);
static int timeout_ioctl(struct gpib_board *board, unsigned long arg);
static int status_bytes_ioctl(struct gpib_board *board, unsigned long arg);
@@ -64,15 +66,16 @@ static int set_local_ppoll_mode_ioctl(struct gpib_board *board, unsigned long ar
static int get_local_ppoll_mode_ioctl(struct gpib_board *board, unsigned long arg);
static int query_board_rsv_ioctl(struct gpib_board *board, unsigned long arg);
static int interface_clear_ioctl(struct gpib_board *board, unsigned long arg);
-static int select_pci_ioctl(gpib_board_config_t *config, unsigned long arg);
-static int select_device_path_ioctl(gpib_board_config_t *config, unsigned long arg);
+static int select_pci_ioctl(struct gpib_board_config *config, unsigned long arg);
+static int select_device_path_ioctl(struct gpib_board_config *config, unsigned long arg);
static int event_ioctl(struct gpib_board *board, unsigned long arg);
static int request_system_control_ioctl(struct gpib_board *board, unsigned long arg);
static int t1_delay_ioctl(struct gpib_board *board, unsigned long arg);
-static int cleanup_open_devices(gpib_file_private_t *file_priv, struct gpib_board *board);
+static int cleanup_open_devices(struct gpib_file_private *file_priv, struct gpib_board *board);
-static int pop_gpib_event_nolock(struct gpib_board *board, gpib_event_queue_t *queue, short *event_type);
+static int pop_gpib_event_nolock(struct gpib_board *board,
+ struct gpib_event_queue *queue, short *event_type);
/*
* Timer functions
@@ -82,7 +85,7 @@ static int pop_gpib_event_nolock(struct gpib_board *board, gpib_event_queue_t *q
static void watchdog_timeout(struct timer_list *t)
{
- struct gpib_board *board = from_timer(board, t, timer);
+ struct gpib_board *board = timer_container_of(board, t, timer);
set_bit(TIMO_NUM, &board->status);
wake_up_interruptible(&board->wait);
@@ -119,7 +122,8 @@ int io_timed_out(struct gpib_board *board)
return 0;
}
-/* this is a function instead of a constant because of Suse
+/*
+ * this is a function instead of a constant because of Suse
* defining HZ to be a function call to get_hz()
*/
static inline int pseudo_irq_period(void)
@@ -129,7 +133,8 @@ static inline int pseudo_irq_period(void)
static void pseudo_irq_handler(struct timer_list *t)
{
- struct gpib_pseudo_irq *pseudo_irq = from_timer(pseudo_irq, t, timer);
+ struct gpib_pseudo_irq *pseudo_irq = timer_container_of(pseudo_irq, t,
+ timer);
if (pseudo_irq->handler)
pseudo_irq->handler(0, pseudo_irq->board);
@@ -170,7 +175,7 @@ EXPORT_SYMBOL(gpib_free_pseudo_irq);
static const unsigned int serial_timeout = 1000000;
-unsigned int num_status_bytes(const gpib_status_queue_t *dev)
+unsigned int num_status_bytes(const struct gpib_status_queue *dev)
{
if (!dev)
return 0;
@@ -178,10 +183,10 @@ unsigned int num_status_bytes(const gpib_status_queue_t *dev)
}
// push status byte onto back of status byte fifo
-int push_status_byte(struct gpib_board *board, gpib_status_queue_t *device, u8 poll_byte)
+int push_status_byte(struct gpib_board *board, struct gpib_status_queue *device, u8 poll_byte)
{
struct list_head *head = &device->status_bytes;
- status_byte_t *status;
+ struct gpib_status_byte *status;
static const unsigned int max_num_status_bytes = 1024;
int retval;
@@ -194,7 +199,7 @@ int push_status_byte(struct gpib_board *board, gpib_status_queue_t *device, u8 p
return retval;
}
- status = kmalloc(sizeof(status_byte_t), GFP_KERNEL);
+ status = kmalloc(sizeof(*status), GFP_KERNEL);
if (!status)
return -ENOMEM;
@@ -212,11 +217,11 @@ int push_status_byte(struct gpib_board *board, gpib_status_queue_t *device, u8 p
}
// pop status byte from front of status byte fifo
-int pop_status_byte(struct gpib_board *board, gpib_status_queue_t *device, u8 *poll_byte)
+int pop_status_byte(struct gpib_board *board, struct gpib_status_queue *device, u8 *poll_byte)
{
struct list_head *head = &device->status_bytes;
struct list_head *front = head->next;
- status_byte_t *status;
+ struct gpib_status_byte *status;
if (num_status_bytes(device) == 0)
return -EIO;
@@ -229,7 +234,7 @@ int pop_status_byte(struct gpib_board *board, gpib_status_queue_t *device, u8 *p
return -EPIPE;
}
- status = list_entry(front, status_byte_t, list);
+ status = list_entry(front, struct gpib_status_byte, list);
*poll_byte = status->poll_byte;
list_del(front);
@@ -243,14 +248,14 @@ int pop_status_byte(struct gpib_board *board, gpib_status_queue_t *device, u8 *p
return 0;
}
-gpib_status_queue_t *get_gpib_status_queue(struct gpib_board *board, unsigned int pad, int sad)
+struct gpib_status_queue *get_gpib_status_queue(struct gpib_board *board, unsigned int pad, int sad)
{
- gpib_status_queue_t *device;
+ struct gpib_status_queue *device;
struct list_head *list_ptr;
const struct list_head *head = &board->device_list;
for (list_ptr = head->next; list_ptr != head; list_ptr = list_ptr->next) {
- device = list_entry(list_ptr, gpib_status_queue_t, list);
+ device = list_entry(list_ptr, struct gpib_status_queue, list);
if (gpib_address_equal(device->pad, device->sad, pad, sad))
return device;
}
@@ -258,10 +263,10 @@ gpib_status_queue_t *get_gpib_status_queue(struct gpib_board *board, unsigned in
return NULL;
}
-int get_serial_poll_byte(struct gpib_board *board, unsigned int pad, int sad, unsigned int usec_timeout,
- uint8_t *poll_byte)
+int get_serial_poll_byte(struct gpib_board *board, unsigned int pad, int sad,
+ unsigned int usec_timeout, u8 *poll_byte)
{
- gpib_status_queue_t *device;
+ struct gpib_status_queue *device;
device = get_gpib_status_queue(board, pad, sad);
if (num_status_bytes(device))
@@ -291,7 +296,8 @@ int autopoll_all_devices(struct gpib_board *board)
}
dev_dbg(board->gpib_dev, "complete\n");
- /* need to wake wait queue in case someone is
+ /*
+ * need to wake wait queue in case someone is
* waiting on RQS
*/
wake_up_interruptible(&board->wait);
@@ -334,7 +340,7 @@ static int setup_serial_poll(struct gpib_board *board, unsigned int usec_timeout
}
static int read_serial_poll_byte(struct gpib_board *board, unsigned int pad,
- int sad, unsigned int usec_timeout, uint8_t *result)
+ int sad, unsigned int usec_timeout, u8 *result)
{
u8 cmd_string[8];
int end_flag;
@@ -405,7 +411,7 @@ static int cleanup_serial_poll(struct gpib_board *board, unsigned int usec_timeo
}
static int serial_poll_single(struct gpib_board *board, unsigned int pad, int sad,
- unsigned int usec_timeout, uint8_t *result)
+ unsigned int usec_timeout, u8 *result)
{
int retval, cleanup_retval;
@@ -427,7 +433,7 @@ int serial_poll_all(struct gpib_board *board, unsigned int usec_timeout)
int retval = 0;
struct list_head *cur;
const struct list_head *head = NULL;
- gpib_status_queue_t *device;
+ struct gpib_status_queue *device;
u8 result;
unsigned int num_bytes = 0;
@@ -440,7 +446,7 @@ int serial_poll_all(struct gpib_board *board, unsigned int usec_timeout)
return retval;
for (cur = head->next; cur != head; cur = cur->next) {
- device = list_entry(cur, gpib_status_queue_t, list);
+ device = list_entry(cur, struct gpib_status_queue, list);
retval = read_serial_poll_byte(board,
device->pad, device->sad, usec_timeout, &result);
if (retval < 0)
@@ -470,7 +476,7 @@ int serial_poll_all(struct gpib_board *board, unsigned int usec_timeout)
*/
int dvrsp(struct gpib_board *board, unsigned int pad, int sad,
- unsigned int usec_timeout, uint8_t *result)
+ unsigned int usec_timeout, u8 *result)
{
int status = ibstatus(board);
int retval;
@@ -492,8 +498,8 @@ int dvrsp(struct gpib_board *board, unsigned int pad, int sad,
return retval;
}
-static gpib_descriptor_t *handle_to_descriptor(const gpib_file_private_t *file_priv,
- int handle)
+static struct gpib_descriptor *handle_to_descriptor(const struct gpib_file_private *file_priv,
+ int handle)
{
if (handle < 0 || handle >= GPIB_MAX_NUM_DESCRIPTORS) {
pr_err("gpib: invalid handle %i\n", handle);
@@ -503,11 +509,11 @@ static gpib_descriptor_t *handle_to_descriptor(const gpib_file_private_t *file_p
return file_priv->descriptors[handle];
}
-static int init_gpib_file_private(gpib_file_private_t *priv)
+static int init_gpib_file_private(struct gpib_file_private *priv)
{
memset(priv, 0, sizeof(*priv));
atomic_set(&priv->holding_mutex, 0);
- priv->descriptors[0] = kmalloc(sizeof(gpib_descriptor_t), GFP_KERNEL);
+ priv->descriptors[0] = kmalloc(sizeof(struct gpib_descriptor), GFP_KERNEL);
if (!priv->descriptors[0]) {
pr_err("gpib: failed to allocate default board descriptor\n");
return -ENOMEM;
@@ -522,7 +528,7 @@ int ibopen(struct inode *inode, struct file *filep)
{
unsigned int minor = iminor(inode);
struct gpib_board *board;
- gpib_file_private_t *priv;
+ struct gpib_file_private *priv;
if (minor >= GPIB_MAX_NUM_BOARDS) {
pr_err("gpib: invalid minor number of device file\n");
@@ -531,12 +537,12 @@ int ibopen(struct inode *inode, struct file *filep)
board = &board_array[minor];
- filep->private_data = kmalloc(sizeof(gpib_file_private_t), GFP_KERNEL);
+ filep->private_data = kmalloc(sizeof(struct gpib_file_private), GFP_KERNEL);
if (!filep->private_data)
return -ENOMEM;
priv = filep->private_data;
- init_gpib_file_private((gpib_file_private_t *)filep->private_data);
+ init_gpib_file_private((struct gpib_file_private *)filep->private_data);
if (board->use_count == 0) {
int retval;
@@ -560,8 +566,8 @@ int ibclose(struct inode *inode, struct file *filep)
{
unsigned int minor = iminor(inode);
struct gpib_board *board;
- gpib_file_private_t *priv = filep->private_data;
- gpib_descriptor_t *desc;
+ struct gpib_file_private *priv = filep->private_data;
+ struct gpib_descriptor *desc;
if (minor >= GPIB_MAX_NUM_BOARDS) {
pr_err("gpib: invalid minor number of device file\n");
@@ -606,7 +612,7 @@ long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
unsigned int minor = iminor(filep->f_path.dentry->d_inode);
struct gpib_board *board;
- gpib_file_private_t *file_priv = filep->private_data;
+ struct gpib_file_private *file_priv = filep->private_data;
long retval = -ENOTTY;
if (minor >= GPIB_MAX_NUM_BOARDS) {
@@ -665,8 +671,9 @@ long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
retval = board_info_ioctl(board, arg);
goto done;
case IBMUTEX:
- /* Need to unlock board->big_gpib_mutex before potentially locking board->user_mutex
- * to maintain consistent locking order
+ /*
+ * Need to unlock board->big_gpib_mutex before potentially locking board->user_mutex
+ * to maintain consistent locking order
*/
mutex_unlock(&board->big_gpib_mutex);
return mutex_ioctl(board, file_priv, arg);
@@ -736,8 +743,9 @@ long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
retval = take_control_ioctl(board, arg);
goto done;
case IBCMD:
- /* IO ioctls can take a long time, we need to unlock board->big_gpib_mutex
- * before we call them.
+ /*
+ * IO ioctls can take a long time, we need to unlock board->big_gpib_mutex
+ * before we call them.
*/
mutex_unlock(&board->big_gpib_mutex);
return command_ioctl(file_priv, board, arg);
@@ -760,8 +768,9 @@ long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
retval = query_board_rsv_ioctl(board, arg);
goto done;
case IBRD:
- /* IO ioctls can take a long time, we need to unlock board->big_gpib_mutex
- * before we call them.
+ /*
+ * IO ioctls can take a long time, we need to unlock board->big_gpib_mutex
+ * before we call them.
*/
mutex_unlock(&board->big_gpib_mutex);
return read_ioctl(file_priv, board, arg);
@@ -790,8 +799,9 @@ long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
retval = timeout_ioctl(board, arg);
goto done;
case IBWRT:
- /* IO ioctls can take a long time, we need to unlock board->big_gpib_mutex
- * before we call them.
+ /*
+ * IO ioctls can take a long time, we need to unlock board->big_gpib_mutex
+ * before we call them.
*/
mutex_unlock(&board->big_gpib_mutex);
return write_ioctl(file_priv, board, arg);
@@ -806,10 +816,11 @@ done:
return retval;
}
-static int board_type_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board, unsigned long arg)
+static int board_type_ioctl(struct gpib_file_private *file_priv,
+ struct gpib_board *board, unsigned long arg)
{
struct list_head *list_ptr;
- board_type_ioctl_t cmd;
+ struct gpib_board_type_ioctl cmd;
int retval;
if (!capable(CAP_SYS_ADMIN))
@@ -817,15 +828,16 @@ static int board_type_ioctl(gpib_file_private_t *file_priv, struct gpib_board *b
if (board->online)
return -EBUSY;
- retval = copy_from_user(&cmd, (void __user *)arg, sizeof(board_type_ioctl_t));
+ retval = copy_from_user(&cmd, (void __user *)arg,
+ sizeof(struct gpib_board_type_ioctl));
if (retval)
return retval;
for (list_ptr = registered_drivers.next; list_ptr != &registered_drivers;
list_ptr = list_ptr->next) {
- gpib_interface_list_t *entry;
+ struct gpib_interface_list *entry;
- entry = list_entry(list_ptr, gpib_interface_list_t, list);
+ entry = list_entry(list_ptr, struct gpib_interface_list, list);
if (strcmp(entry->interface->name, cmd.name) == 0) {
int i;
int had_module = file_priv->got_module;
@@ -857,16 +869,16 @@ static int board_type_ioctl(gpib_file_private_t *file_priv, struct gpib_board *b
return -EINVAL;
}
-static int read_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
+static int read_ioctl(struct gpib_file_private *file_priv, struct gpib_board *board,
unsigned long arg)
{
- read_write_ioctl_t read_cmd;
+ struct gpib_read_write_ioctl read_cmd;
u8 __user *userbuf;
unsigned long remain;
int end_flag = 0;
int retval;
ssize_t read_ret = 0;
- gpib_descriptor_t *desc;
+ struct gpib_descriptor *desc;
size_t nbytes;
retval = copy_from_user(&read_cmd, (void __user *)arg, sizeof(read_cmd));
@@ -913,7 +925,8 @@ static int read_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
}
read_cmd.completed_transfer_count = read_cmd.requested_transfer_count - remain;
read_cmd.end = end_flag;
- /* suppress errors (for example due to timeout or interruption by device clear)
+ /*
+ * suppress errors (for example due to timeout or interruption by device clear)
* if all bytes got sent. This prevents races that can occur in the various drivers
* if a device receives a device clear immediately after a transfer completes and
* the driver code wasn't careful enough to handle that case.
@@ -932,15 +945,15 @@ static int read_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
return read_ret;
}
-static int command_ioctl(gpib_file_private_t *file_priv,
+static int command_ioctl(struct gpib_file_private *file_priv,
struct gpib_board *board, unsigned long arg)
{
- read_write_ioctl_t cmd;
+ struct gpib_read_write_ioctl cmd;
u8 __user *userbuf;
unsigned long remain;
int retval;
int fault = 0;
- gpib_descriptor_t *desc;
+ struct gpib_descriptor *desc;
size_t bytes_written;
int no_clear_io_in_prog;
@@ -967,10 +980,11 @@ static int command_ioctl(gpib_file_private_t *file_priv,
if (!access_ok(userbuf, remain))
return -EFAULT;
- /* Write buffer loads till we empty the user supplied buffer.
- * Call drivers at least once, even if remain is zero, in
- * order to allow them to insure previous commands were
- * completely finished, in the case of a restarted ioctl.
+ /*
+ * Write buffer loads till we empty the user supplied buffer.
+ * Call drivers at least once, even if remain is zero, in
+ * order to allow them to insure previous commands were
+ * completely finished, in the case of a restarted ioctl.
*/
atomic_set(&desc->io_in_progress, 1);
@@ -1016,15 +1030,15 @@ static int command_ioctl(gpib_file_private_t *file_priv,
return retval;
}
-static int write_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
+static int write_ioctl(struct gpib_file_private *file_priv, struct gpib_board *board,
unsigned long arg)
{
- read_write_ioctl_t write_cmd;
+ struct gpib_read_write_ioctl write_cmd;
u8 __user *userbuf;
unsigned long remain;
int retval = 0;
int fault;
- gpib_descriptor_t *desc;
+ struct gpib_descriptor *desc;
fault = copy_from_user(&write_cmd, (void __user *)arg, sizeof(write_cmd));
if (fault)
@@ -1068,7 +1082,8 @@ static int write_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
break;
}
write_cmd.completed_transfer_count = write_cmd.requested_transfer_count - remain;
- /* suppress errors (for example due to timeout or interruption by device clear)
+ /*
+ * suppress errors (for example due to timeout or interruption by device clear)
* if all bytes got sent. This prevents races that can occur in the various drivers
* if a device receives a device clear immediately after a transfer completes and
* the driver code wasn't careful enough to handle that case.
@@ -1089,8 +1104,8 @@ static int write_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
static int status_bytes_ioctl(struct gpib_board *board, unsigned long arg)
{
- gpib_status_queue_t *device;
- spoll_bytes_ioctl_t cmd;
+ struct gpib_status_queue *device;
+ struct gpib_spoll_bytes_ioctl cmd;
int retval;
retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
@@ -1114,13 +1129,14 @@ static int increment_open_device_count(struct gpib_board *board, struct list_hea
unsigned int pad, int sad)
{
struct list_head *list_ptr;
- gpib_status_queue_t *device;
+ struct gpib_status_queue *device;
- /* first see if address has already been opened, then increment
+ /*
+ * first see if address has already been opened, then increment
* open count
*/
for (list_ptr = head->next; list_ptr != head; list_ptr = list_ptr->next) {
- device = list_entry(list_ptr, gpib_status_queue_t, list);
+ device = list_entry(list_ptr, struct gpib_status_queue, list);
if (gpib_address_equal(device->pad, device->sad, pad, sad)) {
dev_dbg(board->gpib_dev, "incrementing open count for pad %i, sad %i\n",
device->pad, device->sad);
@@ -1129,8 +1145,8 @@ static int increment_open_device_count(struct gpib_board *board, struct list_hea
}
}
- /* otherwise we need to allocate a new gpib_status_queue_t */
- device = kmalloc(sizeof(gpib_status_queue_t), GFP_ATOMIC);
+ /* otherwise we need to allocate a new struct gpib_status_queue */
+ device = kmalloc(sizeof(struct gpib_status_queue), GFP_ATOMIC);
if (!device)
return -ENOMEM;
init_gpib_status_queue(device);
@@ -1148,11 +1164,11 @@ static int increment_open_device_count(struct gpib_board *board, struct list_hea
static int subtract_open_device_count(struct gpib_board *board, struct list_head *head,
unsigned int pad, int sad, unsigned int count)
{
- gpib_status_queue_t *device;
+ struct gpib_status_queue *device;
struct list_head *list_ptr;
for (list_ptr = head->next; list_ptr != head; list_ptr = list_ptr->next) {
- device = list_entry(list_ptr, gpib_status_queue_t, list);
+ device = list_entry(list_ptr, struct gpib_status_queue, list);
if (gpib_address_equal(device->pad, device->sad, pad, sad)) {
dev_dbg(board->gpib_dev, "decrementing open count for pad %i, sad %i\n",
device->pad, device->sad);
@@ -1180,13 +1196,13 @@ static inline int decrement_open_device_count(struct gpib_board *board, struct l
return subtract_open_device_count(board, head, pad, sad, 1);
}
-static int cleanup_open_devices(gpib_file_private_t *file_priv, struct gpib_board *board)
+static int cleanup_open_devices(struct gpib_file_private *file_priv, struct gpib_board *board)
{
int retval = 0;
int i;
for (i = 0; i < GPIB_MAX_NUM_DESCRIPTORS; i++) {
- gpib_descriptor_t *desc;
+ struct gpib_descriptor *desc;
desc = file_priv->descriptors[i];
if (!desc)
@@ -1207,9 +1223,9 @@ static int cleanup_open_devices(gpib_file_private_t *file_priv, struct gpib_boar
static int open_dev_ioctl(struct file *filep, struct gpib_board *board, unsigned long arg)
{
- open_dev_ioctl_t open_dev_cmd;
+ struct gpib_open_dev_ioctl open_dev_cmd;
int retval;
- gpib_file_private_t *file_priv = filep->private_data;
+ struct gpib_file_private *file_priv = filep->private_data;
int i;
retval = copy_from_user(&open_dev_cmd, (void __user *)arg, sizeof(open_dev_cmd));
@@ -1225,7 +1241,7 @@ static int open_dev_ioctl(struct file *filep, struct gpib_board *board, unsigned
mutex_unlock(&file_priv->descriptors_mutex);
return -ERANGE;
}
- file_priv->descriptors[i] = kmalloc(sizeof(gpib_descriptor_t), GFP_KERNEL);
+ file_priv->descriptors[i] = kmalloc(sizeof(struct gpib_descriptor), GFP_KERNEL);
if (!file_priv->descriptors[i]) {
mutex_unlock(&file_priv->descriptors_mutex);
return -ENOMEM;
@@ -1242,7 +1258,8 @@ static int open_dev_ioctl(struct file *filep, struct gpib_board *board, unsigned
if (retval < 0)
return retval;
- /* clear stuck srq state, since we may be able to find service request on
+ /*
+ * clear stuck srq state, since we may be able to find service request on
* the new device
*/
atomic_set(&board->stuck_srq, 0);
@@ -1257,8 +1274,8 @@ static int open_dev_ioctl(struct file *filep, struct gpib_board *board, unsigned
static int close_dev_ioctl(struct file *filep, struct gpib_board *board, unsigned long arg)
{
- close_dev_ioctl_t cmd;
- gpib_file_private_t *file_priv = filep->private_data;
+ struct gpib_close_dev_ioctl cmd;
+ struct gpib_file_private *file_priv = filep->private_data;
int retval;
retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
@@ -1284,7 +1301,7 @@ static int close_dev_ioctl(struct file *filep, struct gpib_board *board, unsigne
static int serial_poll_ioctl(struct gpib_board *board, unsigned long arg)
{
- serial_poll_ioctl_t serial_cmd;
+ struct gpib_serial_poll_ioctl serial_cmd;
int retval;
retval = copy_from_user(&serial_cmd, (void __user *)arg, sizeof(serial_cmd));
@@ -1303,12 +1320,12 @@ static int serial_poll_ioctl(struct gpib_board *board, unsigned long arg)
return 0;
}
-static int wait_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
+static int wait_ioctl(struct gpib_file_private *file_priv, struct gpib_board *board,
unsigned long arg)
{
- wait_ioctl_t wait_cmd;
+ struct gpib_wait_ioctl wait_cmd;
int retval;
- gpib_descriptor_t *desc;
+ struct gpib_descriptor *desc;
retval = copy_from_user(&wait_cmd, (void __user *)arg, sizeof(wait_cmd));
if (retval)
@@ -1348,7 +1365,7 @@ static int parallel_poll_ioctl(struct gpib_board *board, unsigned long arg)
static int online_ioctl(struct gpib_board *board, unsigned long arg)
{
- online_ioctl_t online_cmd;
+ struct gpib_online_ioctl online_cmd;
int retval;
void __user *init_data = NULL;
@@ -1430,12 +1447,12 @@ static int line_status_ioctl(struct gpib_board *board, unsigned long arg)
return 0;
}
-static int pad_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
+static int pad_ioctl(struct gpib_board *board, struct gpib_file_private *file_priv,
unsigned long arg)
{
- pad_ioctl_t cmd;
+ struct gpib_pad_ioctl cmd;
int retval;
- gpib_descriptor_t *desc;
+ struct gpib_descriptor *desc;
retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
@@ -1466,12 +1483,12 @@ static int pad_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
return 0;
}
-static int sad_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
+static int sad_ioctl(struct gpib_board *board, struct gpib_file_private *file_priv,
unsigned long arg)
{
- sad_ioctl_t cmd;
+ struct gpib_sad_ioctl cmd;
int retval;
- gpib_descriptor_t *desc;
+ struct gpib_descriptor *desc;
retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
@@ -1503,7 +1520,7 @@ static int sad_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
static int eos_ioctl(struct gpib_board *board, unsigned long arg)
{
- eos_ioctl_t eos_cmd;
+ struct gpib_eos_ioctl eos_cmd;
int retval;
retval = copy_from_user(&eos_cmd, (void __user *)arg, sizeof(eos_cmd));
@@ -1527,11 +1544,11 @@ static int request_service_ioctl(struct gpib_board *board, unsigned long arg)
static int request_service2_ioctl(struct gpib_board *board, unsigned long arg)
{
- request_service2_t request_service2_cmd;
+ struct gpib_request_service2 request_service2_cmd;
int retval;
retval = copy_from_user(&request_service2_cmd, (void __user *)arg,
- sizeof(request_service2_t));
+ sizeof(struct gpib_request_service2));
if (retval)
return -EFAULT;
@@ -1539,7 +1556,7 @@ static int request_service2_ioctl(struct gpib_board *board, unsigned long arg)
request_service2_cmd.new_reason_for_service);
}
-static int iobase_ioctl(gpib_board_config_t *config, unsigned long arg)
+static int iobase_ioctl(struct gpib_board_config *config, unsigned long arg)
{
u64 base_addr;
int retval;
@@ -1558,7 +1575,7 @@ static int iobase_ioctl(gpib_board_config_t *config, unsigned long arg)
return 0;
}
-static int irq_ioctl(gpib_board_config_t *config, unsigned long arg)
+static int irq_ioctl(struct gpib_board_config *config, unsigned long arg)
{
unsigned int irq;
int retval;
@@ -1575,7 +1592,7 @@ static int irq_ioctl(gpib_board_config_t *config, unsigned long arg)
return 0;
}
-static int dma_ioctl(gpib_board_config_t *config, unsigned long arg)
+static int dma_ioctl(struct gpib_board_config *config, unsigned long arg)
{
unsigned int dma_channel;
int retval;
@@ -1592,12 +1609,12 @@ static int dma_ioctl(gpib_board_config_t *config, unsigned long arg)
return 0;
}
-static int autospoll_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
+static int autospoll_ioctl(struct gpib_board *board, struct gpib_file_private *file_priv,
unsigned long arg)
{
- autospoll_ioctl_t enable;
+ short enable;
int retval;
- gpib_descriptor_t *desc;
+ struct gpib_descriptor *desc;
retval = copy_from_user(&enable, (void __user *)arg, sizeof(enable));
if (retval)
@@ -1630,7 +1647,7 @@ static int autospoll_ioctl(struct gpib_board *board, gpib_file_private_t *file_p
return retval;
}
-static int mutex_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
+static int mutex_ioctl(struct gpib_board *board, struct gpib_file_private *file_priv,
unsigned long arg)
{
int retval, lock_mutex;
@@ -1687,7 +1704,7 @@ static int timeout_ioctl(struct gpib_board *board, unsigned long arg)
static int ppc_ioctl(struct gpib_board *board, unsigned long arg)
{
- ppoll_config_ioctl_t cmd;
+ struct gpib_ppoll_config_ioctl cmd;
int retval;
retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
@@ -1713,7 +1730,7 @@ static int ppc_ioctl(struct gpib_board *board, unsigned long arg)
static int set_local_ppoll_mode_ioctl(struct gpib_board *board, unsigned long arg)
{
- local_ppoll_mode_ioctl_t cmd;
+ short cmd;
int retval;
retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
@@ -1730,7 +1747,7 @@ static int set_local_ppoll_mode_ioctl(struct gpib_board *board, unsigned long ar
static int get_local_ppoll_mode_ioctl(struct gpib_board *board, unsigned long arg)
{
- local_ppoll_mode_ioctl_t cmd;
+ short cmd;
int retval;
cmd = board->local_ppoll_mode;
@@ -1757,7 +1774,7 @@ static int query_board_rsv_ioctl(struct gpib_board *board, unsigned long arg)
static int board_info_ioctl(const struct gpib_board *board, unsigned long arg)
{
- board_info_ioctl_t info;
+ struct gpib_board_info_ioctl info;
int retval;
info.pad = board->pad;
@@ -1790,9 +1807,9 @@ static int interface_clear_ioctl(struct gpib_board *board, unsigned long arg)
return ibsic(board, usec_duration);
}
-static int select_pci_ioctl(gpib_board_config_t *config, unsigned long arg)
+static int select_pci_ioctl(struct gpib_board_config *config, unsigned long arg)
{
- select_pci_ioctl_t selection;
+ struct gpib_select_pci_ioctl selection;
int retval;
if (!capable(CAP_SYS_ADMIN))
@@ -1808,19 +1825,20 @@ static int select_pci_ioctl(gpib_board_config_t *config, unsigned long arg)
return 0;
}
-static int select_device_path_ioctl(gpib_board_config_t *config, unsigned long arg)
+static int select_device_path_ioctl(struct gpib_board_config *config, unsigned long arg)
{
- select_device_path_ioctl_t *selection;
+ struct gpib_select_device_path_ioctl *selection;
int retval;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- selection = vmalloc(sizeof(select_device_path_ioctl_t));
+ selection = vmalloc(sizeof(struct gpib_select_device_path_ioctl));
if (!selection)
return -ENOMEM;
- retval = copy_from_user(selection, (void __user *)arg, sizeof(select_device_path_ioctl_t));
+ retval = copy_from_user(selection, (void __user *)arg,
+ sizeof(struct gpib_select_device_path_ioctl));
if (retval) {
vfree(selection);
return -EFAULT;
@@ -1836,16 +1854,16 @@ static int select_device_path_ioctl(gpib_board_config_t *config, unsigned long a
return 0;
}
-unsigned int num_gpib_events(const gpib_event_queue_t *queue)
+unsigned int num_gpib_events(const struct gpib_event_queue *queue)
{
return queue->num_events;
}
static int push_gpib_event_nolock(struct gpib_board *board, short event_type)
{
- gpib_event_queue_t *queue = &board->event_queue;
+ struct gpib_event_queue *queue = &board->event_queue;
struct list_head *head = &queue->event_head;
- gpib_event_t *event;
+ struct gpib_event *event;
static const unsigned int max_num_events = 1024;
int retval;
@@ -1858,7 +1876,7 @@ static int push_gpib_event_nolock(struct gpib_board *board, short event_type)
return retval;
}
- event = kmalloc(sizeof(gpib_event_t), GFP_ATOMIC);
+ event = kmalloc(sizeof(struct gpib_event), GFP_ATOMIC);
if (!event) {
queue->dropped_event = 1;
dev_err(board->gpib_dev, "failed to allocate memory for event\n");
@@ -1888,23 +1906,24 @@ int push_gpib_event(struct gpib_board *board, short event_type)
retval = push_gpib_event_nolock(board, event_type);
spin_unlock_irqrestore(&board->event_queue.lock, flags);
- if (event_type == EventDevTrg)
+ if (event_type == EVENT_DEV_TRG)
board->status |= DTAS;
- if (event_type == EventDevClr)
+ if (event_type == EVENT_DEV_CLR)
board->status |= DCAS;
return retval;
}
EXPORT_SYMBOL(push_gpib_event);
-static int pop_gpib_event_nolock(struct gpib_board *board, gpib_event_queue_t *queue, short *event_type)
+static int pop_gpib_event_nolock(struct gpib_board *board,
+ struct gpib_event_queue *queue, short *event_type)
{
struct list_head *head = &queue->event_head;
struct list_head *front = head->next;
- gpib_event_t *event;
+ struct gpib_event *event;
if (num_gpib_events(queue) == 0) {
- *event_type = EventNone;
+ *event_type = EVENT_NONE;
return 0;
}
@@ -1916,7 +1935,7 @@ static int pop_gpib_event_nolock(struct gpib_board *board, gpib_event_queue_t *q
return -EPIPE;
}
- event = list_entry(front, gpib_event_t, list);
+ event = list_entry(front, struct gpib_event, list);
*event_type = event->event_type;
list_del(front);
@@ -1931,7 +1950,7 @@ static int pop_gpib_event_nolock(struct gpib_board *board, gpib_event_queue_t *q
}
// pop event from front of event queue
-int pop_gpib_event(struct gpib_board *board, gpib_event_queue_t *queue, short *event_type)
+int pop_gpib_event(struct gpib_board *board, struct gpib_event_queue *queue, short *event_type)
{
unsigned long flags;
int retval;
@@ -1944,7 +1963,7 @@ int pop_gpib_event(struct gpib_board *board, gpib_event_queue_t *queue, short *e
static int event_ioctl(struct gpib_board *board, unsigned long arg)
{
- event_ioctl_t user_event;
+ short user_event;
int retval;
short event;
@@ -1963,21 +1982,19 @@ static int event_ioctl(struct gpib_board *board, unsigned long arg)
static int request_system_control_ioctl(struct gpib_board *board, unsigned long arg)
{
- rsc_ioctl_t request_control;
+ int request_control;
int retval;
retval = copy_from_user(&request_control, (void __user *)arg, sizeof(request_control));
if (retval)
return -EFAULT;
- ibrsc(board, request_control);
-
- return 0;
+ return ibrsc(board, request_control);
}
static int t1_delay_ioctl(struct gpib_board *board, unsigned long arg)
{
- t1_delay_ioctl_t cmd;
+ unsigned int cmd;
unsigned int delay;
int retval;
@@ -2011,7 +2028,7 @@ struct gpib_board board_array[GPIB_MAX_NUM_BOARDS];
LIST_HEAD(registered_drivers);
-void init_gpib_descriptor(gpib_descriptor_t *desc)
+void init_gpib_descriptor(struct gpib_descriptor *desc)
{
desc->pad = 0;
desc->sad = -1;
@@ -2020,9 +2037,9 @@ void init_gpib_descriptor(gpib_descriptor_t *desc)
atomic_set(&desc->io_in_progress, 0);
}
-int gpib_register_driver(gpib_interface_t *interface, struct module *provider_module)
+int gpib_register_driver(struct gpib_interface *interface, struct module *provider_module)
{
- struct gpib_interface_list_struct *entry;
+ struct gpib_interface_list *entry;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
@@ -2036,7 +2053,7 @@ int gpib_register_driver(gpib_interface_t *interface, struct module *provider_mo
}
EXPORT_SYMBOL(gpib_register_driver);
-void gpib_unregister_driver(gpib_interface_t *interface)
+void gpib_unregister_driver(struct gpib_interface *interface)
{
int i;
struct list_head *list_ptr;
@@ -2053,9 +2070,9 @@ void gpib_unregister_driver(gpib_interface_t *interface)
}
}
for (list_ptr = registered_drivers.next; list_ptr != &registered_drivers;) {
- gpib_interface_list_t *entry;
+ struct gpib_interface_list *entry;
- entry = list_entry(list_ptr, gpib_interface_list_t, list);
+ entry = list_entry(list_ptr, struct gpib_interface_list, list);
list_ptr = list_ptr->next;
if (entry->interface == interface) {
list_del(&entry->list);
@@ -2065,9 +2082,9 @@ void gpib_unregister_driver(gpib_interface_t *interface)
}
EXPORT_SYMBOL(gpib_unregister_driver);
-static void init_gpib_board_config(gpib_board_config_t *config)
+static void init_gpib_board_config(struct gpib_board_config *config)
{
- memset(config, 0, sizeof(gpib_board_config_t));
+ memset(config, 0, sizeof(struct gpib_board_config));
config->pci_bus = -1;
config->pci_slot = -1;
}
@@ -2143,7 +2160,7 @@ static void init_board_array(struct gpib_board *board_array, unsigned int length
}
}
-void init_gpib_status_queue(gpib_status_queue_t *device)
+void init_gpib_status_queue(struct gpib_status_queue *device)
{
INIT_LIST_HEAD(&device->list);
INIT_LIST_HEAD(&device->status_bytes);
@@ -2208,7 +2225,7 @@ int gpib_match_device_path(struct device *dev, const char *device_path_in)
}
EXPORT_SYMBOL(gpib_match_device_path);
-struct pci_dev *gpib_pci_get_device(const gpib_board_config_t *config, unsigned int vendor_id,
+struct pci_dev *gpib_pci_get_device(const struct gpib_board_config *config, unsigned int vendor_id,
unsigned int device_id, struct pci_dev *from)
{
struct pci_dev *pci_device = from;
@@ -2227,7 +2244,7 @@ struct pci_dev *gpib_pci_get_device(const gpib_board_config_t *config, unsigned
}
EXPORT_SYMBOL(gpib_pci_get_device);
-struct pci_dev *gpib_pci_get_subsys(const gpib_board_config_t *config, unsigned int vendor_id,
+struct pci_dev *gpib_pci_get_subsys(const struct gpib_board_config *config, unsigned int vendor_id,
unsigned int device_id, unsigned int ss_vendor,
unsigned int ss_device,
struct pci_dev *from)
diff --git a/drivers/staging/gpib/common/iblib.c b/drivers/staging/gpib/common/iblib.c
index b297261818f2..549280d9a6e9 100644
--- a/drivers/staging/gpib/common/iblib.c
+++ b/drivers/staging/gpib/common/iblib.c
@@ -33,9 +33,10 @@ int ibcac(struct gpib_board *board, int sync, int fallback_to_async)
return 0;
if (sync && (status & LACS) == 0)
- /* tcs (take control synchronously) can only possibly work when
- * controller is listener. Error code also needs to be -ETIMEDOUT
- * or it will giveout without doing fallback.
+ /*
+ * tcs (take control synchronously) can only possibly work when
+ * controller is listener. Error code also needs to be -ETIMEDOUT
+ * or it will giveout without doing fallback.
*/
retval = -ETIMEDOUT;
else
@@ -50,7 +51,8 @@ int ibcac(struct gpib_board *board, int sync, int fallback_to_async)
return retval;
}
-/* After ATN is asserted, it should cause any connected devices
+/*
+ * After ATN is asserted, it should cause any connected devices
* to start listening for command bytes and leave acceptor idle state.
* So if ATN is asserted and neither NDAC or NRFD are asserted,
* then there are no devices and ibcmd should error out immediately.
@@ -96,7 +98,7 @@ static int check_for_command_acceptors(struct gpib_board *board)
* must be called to initialize the GPIB and enable
* the interface to leave the controller idle state.
*/
-int ibcmd(struct gpib_board *board, uint8_t *buf, size_t length, size_t *bytes_written)
+int ibcmd(struct gpib_board *board, u8 *buf, size_t length, size_t *bytes_written)
{
ssize_t ret = 0;
int status;
@@ -218,7 +220,8 @@ int ibonline(struct gpib_board *board)
board->interface->detach(board);
return retval;
}
- /* nios2nommu on 2.6.11 uclinux kernel has weird problems
+ /*
+ * nios2nommu on 2.6.11 uclinux kernel has weird problems
* with autospoll thread causing huge slowdowns
*/
#ifndef CONFIG_NIOS2
@@ -297,7 +300,7 @@ int iblines(const struct gpib_board *board, short *lines)
* calling ibcmd.
*/
-int ibrd(struct gpib_board *board, uint8_t *buf, size_t length, int *end_flag, size_t *nbytes)
+int ibrd(struct gpib_board *board, u8 *buf, size_t length, int *end_flag, size_t *nbytes)
{
ssize_t ret = 0;
int retval;
@@ -313,7 +316,8 @@ int ibrd(struct gpib_board *board, uint8_t *buf, size_t length, int *end_flag, s
if (retval < 0)
return retval;
}
- /* XXX resetting timer here could cause timeouts take longer than they should,
+ /*
+ * XXX resetting timer here could cause timeouts take longer than they should,
* since read_ioctl calls this
* function in a loop, there is probably a similar problem with writes/commands
*/
@@ -343,7 +347,7 @@ ibrd_out:
* 1. Prior to conducting the poll the interface is placed
* in the controller active state.
*/
-int ibrpp(struct gpib_board *board, uint8_t *result)
+int ibrpp(struct gpib_board *board, u8 *result)
{
int retval = 0;
@@ -358,7 +362,7 @@ int ibrpp(struct gpib_board *board, uint8_t *result)
return retval;
}
-int ibppc(struct gpib_board *board, uint8_t configuration)
+int ibppc(struct gpib_board *board, u8 configuration)
{
configuration &= 0x1f;
board->interface->parallel_poll_configure(board, configuration);
@@ -367,7 +371,7 @@ int ibppc(struct gpib_board *board, uint8_t configuration)
return 0;
}
-int ibrsv2(struct gpib_board *board, uint8_t status_byte, int new_reason_for_service)
+int ibrsv2(struct gpib_board *board, u8 status_byte, int new_reason_for_service)
{
int board_status = ibstatus(board);
const unsigned int MSS = status_byte & request_service_bit;
@@ -418,12 +422,21 @@ int ibsic(struct gpib_board *board, unsigned int usec_duration)
return 0;
}
- /* FIXME make int */
-void ibrsc(struct gpib_board *board, int request_control)
+int ibrsc(struct gpib_board *board, int request_control)
{
+ int retval;
+
+ if (!board->interface->request_system_control)
+ return -EPERM;
+
+ retval = board->interface->request_system_control(board, request_control);
+
+ if (retval)
+ return retval;
+
board->master = request_control != 0;
- if (board->interface->request_system_control)
- board->interface->request_system_control(board, request_control);
+
+ return 0;
}
/*
@@ -506,15 +519,16 @@ int ibstatus(struct gpib_board *board)
return general_ibstatus(board, NULL, 0, 0, NULL);
}
-int general_ibstatus(struct gpib_board *board, const gpib_status_queue_t *device,
- int clear_mask, int set_mask, gpib_descriptor_t *desc)
+int general_ibstatus(struct gpib_board *board, const struct gpib_status_queue *device,
+ int clear_mask, int set_mask, struct gpib_descriptor *desc)
{
int status = 0;
short line_status;
if (board->private_data) {
status = board->interface->update_status(board, clear_mask);
- /* XXX should probably stop having drivers use TIMO bit in
+ /*
+ * XXX should probably stop having drivers use TIMO bit in
* board->status to avoid confusion
*/
status &= ~TIMO;
@@ -560,7 +574,7 @@ struct wait_info {
static void wait_timeout(struct timer_list *t)
{
- struct wait_info *winfo = from_timer(winfo, t, timer);
+ struct wait_info *winfo = timer_container_of(winfo, t, timer);
winfo->timed_out = 1;
wake_up_interruptible(&winfo->board->wait);
@@ -573,8 +587,8 @@ static void init_wait_info(struct wait_info *winfo)
timer_setup_on_stack(&winfo->timer, wait_timeout, 0);
}
-static int wait_satisfied(struct wait_info *winfo, gpib_status_queue_t *status_queue,
- int wait_mask, int *status, gpib_descriptor_t *desc)
+static int wait_satisfied(struct wait_info *winfo, struct gpib_status_queue *status_queue,
+ int wait_mask, int *status, struct gpib_descriptor *desc)
{
struct gpib_board *board = winfo->board;
int temp_status;
@@ -611,7 +625,7 @@ static void start_wait_timer(struct wait_info *winfo)
static void remove_wait_timer(struct wait_info *winfo)
{
timer_delete_sync(&winfo->timer);
- destroy_timer_on_stack(&winfo->timer);
+ timer_destroy_on_stack(&winfo->timer);
}
/*
@@ -623,10 +637,10 @@ static void remove_wait_timer(struct wait_info *winfo)
* no condition is waited for.
*/
int ibwait(struct gpib_board *board, int wait_mask, int clear_mask, int set_mask,
- int *status, unsigned long usec_timeout, gpib_descriptor_t *desc)
+ int *status, unsigned long usec_timeout, struct gpib_descriptor *desc)
{
int retval = 0;
- gpib_status_queue_t *status_queue;
+ struct gpib_status_queue *status_queue;
struct wait_info winfo;
if (desc->is_board)
@@ -677,7 +691,7 @@ int ibwait(struct gpib_board *board, int wait_mask, int clear_mask, int set_mask
* well as the interface board itself must be
* addressed by calling ibcmd.
*/
-int ibwrt(struct gpib_board *board, uint8_t *buf, size_t cnt, int send_eoi, size_t *bytes_written)
+int ibwrt(struct gpib_board *board, u8 *buf, size_t cnt, int send_eoi, size_t *bytes_written)
{
int ret = 0;
int retval;
diff --git a/drivers/staging/gpib/common/ibsys.h b/drivers/staging/gpib/common/ibsys.h
index 19960af809c2..e5a148f513a8 100644
--- a/drivers/staging/gpib/common/ibsys.h
+++ b/drivers/staging/gpib/common/ibsys.h
@@ -22,10 +22,13 @@
int gpib_allocate_board(struct gpib_board *board);
void gpib_deallocate_board(struct gpib_board *board);
-unsigned int num_status_bytes(const gpib_status_queue_t *dev);
-int push_status_byte(struct gpib_board *board, gpib_status_queue_t *device, uint8_t poll_byte);
-int pop_status_byte(struct gpib_board *board, gpib_status_queue_t *device, uint8_t *poll_byte);
-gpib_status_queue_t *get_gpib_status_queue(struct gpib_board *board, unsigned int pad, int sad);
+unsigned int num_status_bytes(const struct gpib_status_queue *dev);
+int push_status_byte(struct gpib_board *board, struct gpib_status_queue *device,
+ u8 poll_byte);
+int pop_status_byte(struct gpib_board *board, struct gpib_status_queue *device,
+ u8 *poll_byte);
+struct gpib_status_queue *get_gpib_status_queue(struct gpib_board *board,
+ unsigned int pad, int sad);
int get_serial_poll_byte(struct gpib_board *board, unsigned int pad, int sad,
- unsigned int usec_timeout, uint8_t *poll_byte);
+ unsigned int usec_timeout, u8 *poll_byte);
int autopoll_all_devices(struct gpib_board *board);
diff --git a/drivers/staging/gpib/eastwood/fluke_gpib.c b/drivers/staging/gpib/eastwood/fluke_gpib.c
index a6b1ac169f94..491356433249 100644
--- a/drivers/staging/gpib/eastwood/fluke_gpib.c
+++ b/drivers/staging/gpib/eastwood/fluke_gpib.c
@@ -24,15 +24,17 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB Driver for Fluke cda devices");
-static int fluke_attach_holdoff_all(struct gpib_board *board, const gpib_board_config_t *config);
-static int fluke_attach_holdoff_end(struct gpib_board *board, const gpib_board_config_t *config);
+static int fluke_attach_holdoff_all(struct gpib_board *board,
+ const struct gpib_board_config *config);
+static int fluke_attach_holdoff_end(struct gpib_board *board,
+ const struct gpib_board_config *config);
static void fluke_detach(struct gpib_board *board);
static int fluke_config_dma(struct gpib_board *board, int output);
static irqreturn_t fluke_gpib_internal_interrupt(struct gpib_board *board);
static struct platform_device *fluke_gpib_pdev;
-static uint8_t fluke_locking_read_byte(struct nec7210_priv *nec_priv, unsigned int register_number)
+static u8 fluke_locking_read_byte(struct nec7210_priv *nec_priv, unsigned int register_number)
{
u8 retval;
unsigned long flags;
@@ -43,7 +45,7 @@ static uint8_t fluke_locking_read_byte(struct nec7210_priv *nec_priv, unsigned i
return retval;
}
-static void fluke_locking_write_byte(struct nec7210_priv *nec_priv, uint8_t byte,
+static void fluke_locking_write_byte(struct nec7210_priv *nec_priv, u8 byte,
unsigned int register_number)
{
unsigned long flags;
@@ -54,7 +56,7 @@ static void fluke_locking_write_byte(struct nec7210_priv *nec_priv, uint8_t byte
}
// wrappers for interface functions
-static int fluke_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+static int fluke_read(struct gpib_board *board, u8 *buffer, size_t length, int *end,
size_t *bytes_read)
{
struct fluke_priv *priv = board->private_data;
@@ -62,7 +64,7 @@ static int fluke_read(struct gpib_board *board, uint8_t *buffer, size_t length,
return nec7210_read(board, &priv->nec7210_priv, buffer, length, end, bytes_read);
}
-static int fluke_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int fluke_write(struct gpib_board *board, u8 *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
struct fluke_priv *priv = board->private_data;
@@ -70,7 +72,7 @@ static int fluke_write(struct gpib_board *board, uint8_t *buffer, size_t length,
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-static int fluke_command(struct gpib_board *board, uint8_t *buffer,
+static int fluke_command(struct gpib_board *board, u8 *buffer,
size_t length, size_t *bytes_written)
{
struct fluke_priv *priv = board->private_data;
@@ -92,12 +94,12 @@ static int fluke_go_to_standby(struct gpib_board *board)
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-static void fluke_request_system_control(struct gpib_board *board, int request_control)
+static int fluke_request_system_control(struct gpib_board *board, int request_control)
{
struct fluke_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
- nec7210_request_system_control(board, nec_priv, request_control);
+ return nec7210_request_system_control(board, nec_priv, request_control);
}
static void fluke_interface_clear(struct gpib_board *board, int assert)
@@ -114,7 +116,7 @@ static void fluke_remote_enable(struct gpib_board *board, int enable)
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-static int fluke_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
+static int fluke_enable_eos(struct gpib_board *board, u8 eos_byte, int compare_8_bits)
{
struct fluke_priv *priv = board->private_data;
@@ -149,14 +151,14 @@ static int fluke_secondary_address(struct gpib_board *board, unsigned int addres
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-static int fluke_parallel_poll(struct gpib_board *board, uint8_t *result)
+static int fluke_parallel_poll(struct gpib_board *board, u8 *result)
{
struct fluke_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-static void fluke_parallel_poll_configure(struct gpib_board *board, uint8_t configuration)
+static void fluke_parallel_poll_configure(struct gpib_board *board, u8 configuration)
{
struct fluke_priv *priv = board->private_data;
@@ -170,14 +172,14 @@ static void fluke_parallel_poll_response(struct gpib_board *board, int ist)
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-static void fluke_serial_poll_response(struct gpib_board *board, uint8_t status)
+static void fluke_serial_poll_response(struct gpib_board *board, u8 status)
{
struct fluke_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-static uint8_t fluke_serial_poll_status(struct gpib_board *board)
+static u8 fluke_serial_poll_status(struct gpib_board *board)
{
struct fluke_priv *priv = board->private_data;
@@ -254,7 +256,8 @@ static int lacs_or_read_ready(struct gpib_board *board)
return retval;
}
-/* Wait until it is possible for a read to do something useful. This
+/*
+ * Wait until it is possible for a read to do something useful. This
* is not essential, it only exists to prevent RFD holdoff from being released pointlessly.
*/
static int wait_for_read(struct gpib_board *board)
@@ -276,7 +279,8 @@ static int wait_for_read(struct gpib_board *board)
return retval;
}
-/* Check if the SH state machine is in SGNS. We check twice since there is a very small chance
+/*
+ * Check if the SH state machine is in SGNS. We check twice since there is a very small chance
* we could be blowing through SGNS from SIDS to SDYS if there is already a
* byte available in the handshake state machine. We are interested
* in the case where the handshake is stuck in SGNS due to no byte being
@@ -310,7 +314,8 @@ static int source_handshake_is_sids_or_sgns(struct fluke_priv *e_priv)
(source_handshake_bits == SOURCE_HANDSHAKE_SIDS_BITS);
}
-/* Wait until the gpib chip is ready to accept a data out byte.
+/*
+ * Wait until the gpib chip is ready to accept a data out byte.
* If the chip is SGNS it is probably waiting for a a byte to
* be written to it.
*/
@@ -371,7 +376,7 @@ static void fluke_dma_callback(void *arg)
spin_unlock_irqrestore(&board->spinlock, flags);
}
-static int fluke_dma_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int fluke_dma_write(struct gpib_board *board, u8 *buffer, size_t length,
size_t *bytes_written)
{
struct fluke_priv *e_priv = board->private_data;
@@ -441,7 +446,8 @@ static int fluke_dma_write(struct gpib_board *board, uint8_t *buffer, size_t len
if (test_bit(DMA_WRITE_IN_PROGRESS_BN, &nec_priv->state))
fluke_dma_callback(board);
- /* if everything went fine, try to wait until last byte is actually
+ /*
+ * if everything went fine, try to wait until last byte is actually
* transmitted across gpib (but don't try _too_ hard)
*/
if (retval == 0)
@@ -456,7 +462,7 @@ cleanup:
return retval;
}
-static int fluke_accel_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int fluke_accel_write(struct gpib_board *board, u8 *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
struct fluke_priv *e_priv = board->private_data;
@@ -508,7 +514,8 @@ static int fluke_accel_write(struct gpib_board *board, uint8_t *buffer, size_t l
if (WARN_ON_ONCE(remainder != 1))
return -EFAULT;
- /* wait until we are sure we will be able to write the data byte
+ /*
+ * wait until we are sure we will be able to write the data byte
* into the chip before we send AUX_SEOI. This prevents a timeout
* scenerio where we send AUX_SEOI but then timeout without getting
* any bytes into the gpib chip. This will result in the first byte
@@ -539,12 +546,14 @@ static int fluke_get_dma_residue(struct dma_chan *chan, dma_cookie_t cookie)
return result;
}
dmaengine_tx_status(chan, cookie, &state);
- // hardware doesn't support resume, so dont call this
- // method unless the dma transfer is done.
+ /*
+ * hardware doesn't support resume, so dont call this
+ * method unless the dma transfer is done.
+ */
return state.residue;
}
-static int fluke_dma_read(struct gpib_board *board, uint8_t *buffer,
+static int fluke_dma_read(struct gpib_board *board, u8 *buffer,
size_t length, int *end, size_t *bytes_read)
{
struct fluke_priv *e_priv = board->private_data;
@@ -608,7 +617,8 @@ static int fluke_dma_read(struct gpib_board *board, uint8_t *buffer,
if (test_bit(DEV_CLEAR_BN, &nec_priv->state))
retval = -EINTR;
- /* If we woke up because of end, wait until the dma transfer has pulled
+ /*
+ * If we woke up because of end, wait until the dma transfer has pulled
* the data byte associated with the end before we cancel the dma transfer.
*/
if (test_bit(RECEIVED_END_BN, &nec_priv->state)) {
@@ -625,7 +635,8 @@ static int fluke_dma_read(struct gpib_board *board, uint8_t *buffer,
// stop the dma transfer
nec7210_set_reg_bits(nec_priv, IMR2, HR_DMAI, 0);
- /* delay a little just to make sure any bytes in dma controller's fifo get
+ /*
+ * delay a little just to make sure any bytes in dma controller's fifo get
* written to memory before we disable it
*/
usleep_range(10, 15);
@@ -641,14 +652,17 @@ static int fluke_dma_read(struct gpib_board *board, uint8_t *buffer,
dma_unmap_single(board->dev, bus_address, length, DMA_FROM_DEVICE);
memcpy(buffer, e_priv->dma_buffer, *bytes_read);
- /* If we got an end interrupt, figure out if it was
+ /*
+ * If we got an end interrupt, figure out if it was
* associated with the last byte we dma'd or with a
* byte still sitting on the cb7210.
*/
spin_lock_irqsave(&board->spinlock, flags);
if (test_bit(READ_READY_BN, &nec_priv->state) == 0) {
- // There is no byte sitting on the cb7210. If we
- // saw an end interrupt, we need to deal with it now
+ /*
+ * There is no byte sitting on the cb7210. If we
+ * saw an end interrupt, we need to deal with it now
+ */
if (test_and_clear_bit(RECEIVED_END_BN, &nec_priv->state))
*end = 1;
}
@@ -657,7 +671,7 @@ static int fluke_dma_read(struct gpib_board *board, uint8_t *buffer,
return retval;
}
-static int fluke_accel_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int fluke_accel_read(struct gpib_board *board, u8 *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct fluke_priv *e_priv = board->private_data;
@@ -698,7 +712,7 @@ static int fluke_accel_read(struct gpib_board *board, uint8_t *buffer, size_t le
return retval;
}
-static gpib_interface_t fluke_unaccel_interface = {
+static struct gpib_interface fluke_unaccel_interface = {
.name = "fluke_unaccel",
.attach = fluke_attach_holdoff_all,
.detach = fluke_detach,
@@ -725,7 +739,8 @@ static gpib_interface_t fluke_unaccel_interface = {
.return_to_local = fluke_return_to_local,
};
-/* fluke_hybrid uses dma for writes but not for reads. Added
+/*
+ * fluke_hybrid uses dma for writes but not for reads. Added
* to deal with occasional corruption of bytes seen when doing dma
* reads. From looking at the cb7210 vhdl, I believe the corruption
* is due to a hardware bug triggered by the cpu reading a cb7210
@@ -733,7 +748,7 @@ static gpib_interface_t fluke_unaccel_interface = {
* register just as the dma controller is also doing a read.
*/
-static gpib_interface_t fluke_hybrid_interface = {
+static struct gpib_interface fluke_hybrid_interface = {
.name = "fluke_hybrid",
.attach = fluke_attach_holdoff_all,
.detach = fluke_detach,
@@ -760,7 +775,7 @@ static gpib_interface_t fluke_hybrid_interface = {
.return_to_local = fluke_return_to_local,
};
-static gpib_interface_t fluke_interface = {
+static struct gpib_interface fluke_interface = {
.name = "fluke",
.attach = fluke_attach_holdoff_end,
.detach = fluke_detach,
@@ -802,7 +817,7 @@ irqreturn_t fluke_gpib_internal_interrupt(struct gpib_board *board)
status2 = read_byte(nec_priv, ISR2);
if (status0 & FLUKE_IFCI_BIT) {
- push_gpib_event(board, EventIFC);
+ push_gpib_event(board, EVENT_IFC);
retval = IRQ_HANDLED;
}
@@ -914,7 +929,8 @@ static int fluke_init(struct fluke_priv *e_priv, struct gpib_board *board, int h
nec7210_board_reset(nec_priv, board);
write_byte(nec_priv, AUX_LO_SPEED, AUXMR);
- /* set clock register for driving frequency
+ /*
+ * set clock register for driving frequency
* ICR should be set to clock in megahertz (1-15) and to zero
* for clocks faster than 15 MHz (max 20MHz)
*/
@@ -933,7 +949,8 @@ static int fluke_init(struct fluke_priv *e_priv, struct gpib_board *board, int h
return 0;
}
-/* This function is passed to dma_request_channel() in order to
+/*
+ * This function is passed to dma_request_channel() in order to
* select the pl330 dma channel which has been hardwired to
* the gpib controller.
*/
@@ -943,7 +960,7 @@ static bool gpib_dma_channel_filter(struct dma_chan *chan, void *filter_param)
return chan->chan_id == 0;
}
-static int fluke_attach_impl(struct gpib_board *board, const gpib_board_config_t *config,
+static int fluke_attach_impl(struct gpib_board *board, const struct gpib_board_config *config,
unsigned int handshake_mode)
{
struct fluke_priv *e_priv;
@@ -1024,10 +1041,8 @@ static int fluke_attach_impl(struct gpib_board *board, const gpib_board_config_t
}
irq = platform_get_irq(fluke_gpib_pdev, 0);
- if (irq < 0) {
- dev_err(&fluke_gpib_pdev->dev, "failed to obtain IRQ\n");
+ if (irq < 0)
return -EBUSY;
- }
retval = request_irq(irq, fluke_gpib_interrupt, isr_flags, fluke_gpib_pdev->name, board);
if (retval) {
dev_err(&fluke_gpib_pdev->dev,
@@ -1042,19 +1057,21 @@ static int fluke_attach_impl(struct gpib_board *board, const gpib_board_config_t
e_priv->dma_channel = dma_request_channel(dma_cap, gpib_dma_channel_filter, NULL);
if (!e_priv->dma_channel) {
dev_err(board->gpib_dev, "failed to allocate a dma channel.\n");
- // we don't error out here because unaccel interface will still
- // work without dma
+ /*
+ * we don't error out here because unaccel interface will still
+ * work without dma
+ */
}
return fluke_init(e_priv, board, handshake_mode);
}
-int fluke_attach_holdoff_all(struct gpib_board *board, const gpib_board_config_t *config)
+int fluke_attach_holdoff_all(struct gpib_board *board, const struct gpib_board_config *config)
{
return fluke_attach_impl(board, config, HR_HLDA);
}
-int fluke_attach_holdoff_end(struct gpib_board *board, const gpib_board_config_t *config)
+int fluke_attach_holdoff_end(struct gpib_board *board, const struct gpib_board_config *config)
{
return fluke_attach_impl(board, config, HR_HLDE);
}
diff --git a/drivers/staging/gpib/eastwood/fluke_gpib.h b/drivers/staging/gpib/eastwood/fluke_gpib.h
index 3e4348196b42..493c200d0bbf 100644
--- a/drivers/staging/gpib/eastwood/fluke_gpib.h
+++ b/drivers/staging/gpib/eastwood/fluke_gpib.h
@@ -55,8 +55,10 @@ enum state1_bits {
SOURCE_HANDSHAKE_MASK = 0x7
};
-// we customized the cb7210 vhdl to give the "data in" status
-// on the unused bit 7 of the address0 register.
+/*
+ * we customized the cb7210 vhdl to give the "data in" status
+ * on the unused bit 7 of the address0 register.
+ */
enum cb7210_address0 {
DATA_IN_STATUS = 0x80
};
@@ -67,8 +69,8 @@ static inline int cb7210_page_in_bits(unsigned int page)
}
// don't use without locking nec_priv->register_page_lock
-static inline uint8_t fluke_read_byte_nolock(struct nec7210_priv *nec_priv,
- int register_num)
+static inline u8 fluke_read_byte_nolock(struct nec7210_priv *nec_priv,
+ int register_num)
{
u8 retval;
@@ -77,14 +79,14 @@ static inline uint8_t fluke_read_byte_nolock(struct nec7210_priv *nec_priv,
}
// don't use without locking nec_priv->register_page_lock
-static inline void fluke_write_byte_nolock(struct nec7210_priv *nec_priv, uint8_t data,
+static inline void fluke_write_byte_nolock(struct nec7210_priv *nec_priv, u8 data,
int register_num)
{
writel(data, nec_priv->mmiobase + register_num * nec_priv->offset);
}
-static inline uint8_t fluke_paged_read_byte(struct fluke_priv *e_priv,
- unsigned int register_num, unsigned int page)
+static inline u8 fluke_paged_read_byte(struct fluke_priv *e_priv,
+ unsigned int register_num, unsigned int page)
{
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
u8 retval;
@@ -99,7 +101,7 @@ static inline uint8_t fluke_paged_read_byte(struct fluke_priv *e_priv,
return retval;
}
-static inline void fluke_paged_write_byte(struct fluke_priv *e_priv, uint8_t data,
+static inline void fluke_paged_write_byte(struct fluke_priv *e_priv, u8 data,
unsigned int register_num, unsigned int page)
{
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -124,11 +126,12 @@ enum bus_status_bits {
};
enum cb7210_aux_cmds {
-/* AUX_RTL2 is an undocumented aux command which causes cb7210 to assert
- * (and keep asserted) local rtl message. This is used in conjunction
- * with the (stupid) cb7210 implementation
- * of the normal nec7210 AUX_RTL aux command, which
- * causes the rtl message to toggle between on and off.
+/*
+ * AUX_RTL2 is an undocumented aux command which causes cb7210 to assert
+ * (and keep asserted) local rtl message. This is used in conjunction
+ * with the (stupid) cb7210 implementation
+ * of the normal nec7210 AUX_RTL aux command, which
+ * causes the rtl message to toggle between on and off.
*/
AUX_RTL2 = 0xd,
AUX_NBAF = 0xe, // new byte available false (also clears seoi)
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
index 53f4b3fccc3c..4138f3d2bae7 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
+++ b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
@@ -32,13 +32,15 @@ MODULE_DESCRIPTION("GPIB Driver for fmh_gpib_core");
MODULE_AUTHOR("Frank Mori Hess <fmh6jj@gmail.com>");
static irqreturn_t fmh_gpib_interrupt(int irq, void *arg);
-static int fmh_gpib_attach_holdoff_all(struct gpib_board *board, const gpib_board_config_t *config);
-static int fmh_gpib_attach_holdoff_end(struct gpib_board *board, const gpib_board_config_t *config);
+static int fmh_gpib_attach_holdoff_all(struct gpib_board *board,
+ const struct gpib_board_config *config);
+static int fmh_gpib_attach_holdoff_end(struct gpib_board *board,
+ const struct gpib_board_config *config);
static void fmh_gpib_detach(struct gpib_board *board);
static int fmh_gpib_pci_attach_holdoff_all(struct gpib_board *board,
- const gpib_board_config_t *config);
+ const struct gpib_board_config *config);
static int fmh_gpib_pci_attach_holdoff_end(struct gpib_board *board,
- const gpib_board_config_t *config);
+ const struct gpib_board_config *config);
static void fmh_gpib_pci_detach(struct gpib_board *board);
static int fmh_gpib_config_dma(struct gpib_board *board, int output);
static irqreturn_t fmh_gpib_internal_interrupt(struct gpib_board *board);
@@ -46,7 +48,7 @@ static struct platform_driver fmh_gpib_platform_driver;
static struct pci_driver fmh_gpib_pci_driver;
// wrappers for interface functions
-static int fmh_gpib_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_read(struct gpib_board *board, u8 *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct fmh_priv *priv = board->private_data;
@@ -54,7 +56,7 @@ static int fmh_gpib_read(struct gpib_board *board, uint8_t *buffer, size_t lengt
return nec7210_read(board, &priv->nec7210_priv, buffer, length, end, bytes_read);
}
-static int fmh_gpib_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_write(struct gpib_board *board, u8 *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
struct fmh_priv *priv = board->private_data;
@@ -62,7 +64,7 @@ static int fmh_gpib_write(struct gpib_board *board, uint8_t *buffer, size_t leng
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-static int fmh_gpib_command(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_command(struct gpib_board *board, u8 *buffer, size_t length,
size_t *bytes_written)
{
struct fmh_priv *priv = board->private_data;
@@ -84,12 +86,12 @@ static int fmh_gpib_go_to_standby(struct gpib_board *board)
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-static void fmh_gpib_request_system_control(struct gpib_board *board, int request_control)
+static int fmh_gpib_request_system_control(struct gpib_board *board, int request_control)
{
struct fmh_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
- nec7210_request_system_control(board, nec_priv, request_control);
+ return nec7210_request_system_control(board, nec_priv, request_control);
}
static void fmh_gpib_interface_clear(struct gpib_board *board, int assert)
@@ -106,7 +108,7 @@ static void fmh_gpib_remote_enable(struct gpib_board *board, int enable)
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-static int fmh_gpib_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
+static int fmh_gpib_enable_eos(struct gpib_board *board, u8 eos_byte, int compare_8_bits)
{
struct fmh_priv *priv = board->private_data;
@@ -141,14 +143,14 @@ static int fmh_gpib_secondary_address(struct gpib_board *board, unsigned int add
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-static int fmh_gpib_parallel_poll(struct gpib_board *board, uint8_t *result)
+static int fmh_gpib_parallel_poll(struct gpib_board *board, u8 *result)
{
struct fmh_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-static void fmh_gpib_parallel_poll_configure(struct gpib_board *board, uint8_t configuration)
+static void fmh_gpib_parallel_poll_configure(struct gpib_board *board, u8 configuration)
{
struct fmh_priv *priv = board->private_data;
@@ -169,7 +171,8 @@ static void fmh_gpib_local_parallel_poll_mode(struct gpib_board *board, int loca
if (local) {
write_byte(&priv->nec7210_priv, AUX_I_REG | LOCAL_PPOLL_MODE_BIT, AUXMR);
} else {
- /* For fmh_gpib_core, remote parallel poll config mode is unaffected by the
+ /*
+ * For fmh_gpib_core, remote parallel poll config mode is unaffected by the
* state of the disable bit of the parallel poll register (unlike the tnt4882).
* So, we don't need to worry about that.
*/
@@ -177,7 +180,7 @@ static void fmh_gpib_local_parallel_poll_mode(struct gpib_board *board, int loca
}
}
-static void fmh_gpib_serial_poll_response2(struct gpib_board *board, uint8_t status,
+static void fmh_gpib_serial_poll_response2(struct gpib_board *board, u8 status,
int new_reason_for_service)
{
struct fmh_priv *priv = board->private_data;
@@ -195,7 +198,8 @@ static void fmh_gpib_serial_poll_response2(struct gpib_board *board, uint8_t sta
}
if (reqt) {
- /* It may seem like a race to issue reqt before updating
+ /*
+ * It may seem like a race to issue reqt before updating
* the status byte, but it is not. The chip does not
* issue the reqt until the SPMR is written to at
* a later time.
@@ -204,7 +208,8 @@ static void fmh_gpib_serial_poll_response2(struct gpib_board *board, uint8_t sta
} else if (reqf) {
write_byte(&priv->nec7210_priv, AUX_REQF, AUXMR);
}
- /* We need to always zero bit 6 of the status byte before writing it to
+ /*
+ * We need to always zero bit 6 of the status byte before writing it to
* the SPMR to insure we are using
* serial poll mode SP1, and not accidentally triggering mode SP3.
*/
@@ -212,7 +217,7 @@ static void fmh_gpib_serial_poll_response2(struct gpib_board *board, uint8_t sta
spin_unlock_irqrestore(&board->spinlock, flags);
}
-static uint8_t fmh_gpib_serial_poll_status(struct gpib_board *board)
+static u8 fmh_gpib_serial_poll_status(struct gpib_board *board)
{
struct fmh_priv *priv = board->private_data;
@@ -333,7 +338,8 @@ static int wait_for_rx_fifo_half_full_or_end(struct gpib_board *board)
return retval;
}
-/* Wait until the gpib chip is ready to accept a data out byte.
+/*
+ * Wait until the gpib chip is ready to accept a data out byte.
*/
static int wait_for_data_out_ready(struct gpib_board *board)
{
@@ -377,7 +383,8 @@ static void fmh_gpib_dma_callback(void *arg)
spin_unlock_irqrestore(&board->spinlock, flags);
}
-/* returns true when all the bytes of a write have been transferred to
+/*
+ * returns true when all the bytes of a write have been transferred to
* the chip and successfully transferred out over the gpib bus.
*/
static int fmh_gpib_all_bytes_are_sent(struct fmh_priv *e_priv)
@@ -391,7 +398,7 @@ static int fmh_gpib_all_bytes_are_sent(struct fmh_priv *e_priv)
return 1;
}
-static int fmh_gpib_dma_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_dma_write(struct gpib_board *board, u8 *buffer, size_t length,
size_t *bytes_written)
{
struct fmh_priv *e_priv = board->private_data;
@@ -469,7 +476,7 @@ cleanup:
return retval;
}
-static int fmh_gpib_accel_write(struct gpib_board *board, uint8_t *buffer,
+static int fmh_gpib_accel_write(struct gpib_board *board, u8 *buffer,
size_t length, int send_eoi, size_t *bytes_written)
{
struct fmh_priv *e_priv = board->private_data;
@@ -523,7 +530,8 @@ static int fmh_gpib_accel_write(struct gpib_board *board, uint8_t *buffer,
if (WARN_ON_ONCE(remainder != 1))
return -EFAULT;
- /* wait until we are sure we will be able to write the data byte
+ /*
+ * wait until we are sure we will be able to write the data byte
* into the chip before we send AUX_SEOI. This prevents a timeout
* scenario where we send AUX_SEOI but then timeout without getting
* any bytes into the gpib chip. This will result in the first byte
@@ -554,8 +562,10 @@ static int fmh_gpib_get_dma_residue(struct dma_chan *chan, dma_cookie_t cookie)
return result;
}
dmaengine_tx_status(chan, cookie, &state);
- // dma330 hardware doesn't support resume, so dont call this
- // method unless the dma transfer is done.
+ /*
+ * dma330 hardware doesn't support resume, so dont call this
+ * method unless the dma transfer is done.
+ */
return state.residue;
}
@@ -581,10 +591,11 @@ static int wait_for_tx_fifo_half_empty(struct gpib_board *board)
return retval;
}
-/* supports writing a chunk of data whose length must fit into the hardware'd xfer counter,
+/*
+ * supports writing a chunk of data whose length must fit into the hardware'd xfer counter,
* called in a loop by fmh_gpib_fifo_write()
*/
-static int fmh_gpib_fifo_write_countable(struct gpib_board *board, uint8_t *buffer,
+static int fmh_gpib_fifo_write_countable(struct gpib_board *board, u8 *buffer,
size_t length, int send_eoi, size_t *bytes_written)
{
struct fmh_priv *e_priv = board->private_data;
@@ -650,7 +661,7 @@ cleanup:
return retval;
}
-static int fmh_gpib_fifo_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_fifo_write(struct gpib_board *board, u8 *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
struct fmh_priv *e_priv = board->private_data;
@@ -697,7 +708,7 @@ static int fmh_gpib_fifo_write(struct gpib_board *board, uint8_t *buffer, size_t
return retval;
}
-static int fmh_gpib_dma_read(struct gpib_board *board, uint8_t *buffer,
+static int fmh_gpib_dma_read(struct gpib_board *board, u8 *buffer,
size_t length, int *end, size_t *bytes_read)
{
struct fmh_priv *e_priv = board->private_data;
@@ -768,8 +779,10 @@ static int fmh_gpib_dma_read(struct gpib_board *board, uint8_t *buffer,
// stop the dma transfer
nec7210_set_reg_bits(nec_priv, IMR2, HR_DMAI, 0);
fifos_write(e_priv, 0, FIFO_CONTROL_STATUS_REG);
- // give time for pl330 to transfer any in-flight data, since
- // pl330 will throw it away when dmaengine_pause is called.
+ /*
+ * give time for pl330 to transfer any in-flight data, since
+ * pl330 will throw it away when dmaengine_pause is called.
+ */
usleep_range(10, 15);
residue = fmh_gpib_get_dma_residue(e_priv->dma_channel, dma_cookie);
if (WARN_ON_ONCE(residue > length || residue < 0))
@@ -793,14 +806,17 @@ static int fmh_gpib_dma_read(struct gpib_board *board, uint8_t *buffer,
buffer[(*bytes_read)++] = fifos_read(e_priv, FIFO_DATA_REG) & fifo_data_mask;
}
- /* If we got an end interrupt, figure out if it was
+ /*
+ * If we got an end interrupt, figure out if it was
* associated with the last byte we dma'd or with a
* byte still sitting on the cb7210.
*/
spin_lock_irqsave(&board->spinlock, flags);
if (*bytes_read > 0 && test_bit(READ_READY_BN, &nec_priv->state) == 0) {
- // If there is no byte sitting on the cb7210 and we
- // saw an end, we need to deal with it now
+ /*
+ * If there is no byte sitting on the cb7210 and we
+ * saw an end, we need to deal with it now
+ */
if (test_and_clear_bit(RECEIVED_END_BN, &nec_priv->state))
*end = 1;
}
@@ -819,7 +835,8 @@ static void fmh_gpib_release_rfd_holdoff(struct gpib_board *board, struct fmh_pr
ext_status_1 = read_byte(nec_priv, EXT_STATUS_1_REG);
- /* if there is an end byte sitting on the chip, don't release
+ /*
+ * if there is an end byte sitting on the chip, don't release
* holdoff. We want it left set after we read out the end
* byte.
*/
@@ -828,7 +845,8 @@ static void fmh_gpib_release_rfd_holdoff(struct gpib_board *board, struct fmh_pr
if (ext_status_1 & RFD_HOLDOFF_STATUS_BIT)
write_byte(nec_priv, AUX_FH, AUXMR);
- /* Check if an end byte raced in before we executed the AUX_FH command.
+ /*
+ * Check if an end byte raced in before we executed the AUX_FH command.
* If it did, we want to make sure the rfd holdoff is in effect. The end
* byte can arrive since
* AUX_RFD_HOLDOFF_ASAP doesn't immediately force the acceptor handshake
@@ -846,7 +864,7 @@ static void fmh_gpib_release_rfd_holdoff(struct gpib_board *board, struct fmh_pr
spin_unlock_irqrestore(&board->spinlock, flags);
}
-static int fmh_gpib_accel_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_accel_read(struct gpib_board *board, u8 *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct fmh_priv *e_priv = board->private_data;
@@ -893,10 +911,11 @@ static int fmh_gpib_accel_read(struct gpib_board *board, uint8_t *buffer, size_t
return retval;
}
-/* Read a chunk of data whose length is within the limits of the hardware's
+/*
+ * Read a chunk of data whose length is within the limits of the hardware's
* xfer counter. Called in a loop from fmh_gpib_fifo_read().
*/
-static int fmh_gpib_fifo_read_countable(struct gpib_board *board, uint8_t *buffer,
+static int fmh_gpib_fifo_read_countable(struct gpib_board *board, u8 *buffer,
size_t length, int *end, size_t *bytes_read)
{
struct fmh_priv *e_priv = board->private_data;
@@ -954,7 +973,7 @@ cleanup:
return retval;
}
-static int fmh_gpib_fifo_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_fifo_read(struct gpib_board *board, u8 *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct fmh_priv *e_priv = board->private_data;
@@ -969,7 +988,8 @@ static int fmh_gpib_fifo_read(struct gpib_board *board, uint8_t *buffer, size_t
*end = 0;
*bytes_read = 0;
- /* Do a little prep with data in interrupt so that following wait_for_read()
+ /*
+ * Do a little prep with data in interrupt so that following wait_for_read()
* will wake up if a data byte is received.
*/
nec7210_set_reg_bits(nec_priv, IMR1, HR_DIIE, HR_DIIE);
@@ -1011,7 +1031,7 @@ static int fmh_gpib_fifo_read(struct gpib_board *board, uint8_t *buffer, size_t
return retval;
}
-static gpib_interface_t fmh_gpib_unaccel_interface = {
+static struct gpib_interface fmh_gpib_unaccel_interface = {
.name = "fmh_gpib_unaccel",
.attach = fmh_gpib_attach_holdoff_all,
.detach = fmh_gpib_detach,
@@ -1039,7 +1059,7 @@ static gpib_interface_t fmh_gpib_unaccel_interface = {
.return_to_local = fmh_gpib_return_to_local,
};
-static gpib_interface_t fmh_gpib_interface = {
+static struct gpib_interface fmh_gpib_interface = {
.name = "fmh_gpib",
.attach = fmh_gpib_attach_holdoff_end,
.detach = fmh_gpib_detach,
@@ -1067,7 +1087,7 @@ static gpib_interface_t fmh_gpib_interface = {
.return_to_local = fmh_gpib_return_to_local,
};
-static gpib_interface_t fmh_gpib_pci_interface = {
+static struct gpib_interface fmh_gpib_pci_interface = {
.name = "fmh_gpib_pci",
.attach = fmh_gpib_pci_attach_holdoff_end,
.detach = fmh_gpib_pci_detach,
@@ -1095,7 +1115,7 @@ static gpib_interface_t fmh_gpib_pci_interface = {
.return_to_local = fmh_gpib_return_to_local,
};
-static gpib_interface_t fmh_gpib_pci_unaccel_interface = {
+static struct gpib_interface fmh_gpib_pci_unaccel_interface = {
.name = "fmh_gpib_pci_unaccel",
.attach = fmh_gpib_pci_attach_holdoff_all,
.detach = fmh_gpib_pci_detach,
@@ -1136,7 +1156,7 @@ irqreturn_t fmh_gpib_internal_interrupt(struct gpib_board *board)
fifo_status = fifos_read(priv, FIFO_CONTROL_STATUS_REG);
if (status0 & IFC_INTERRUPT_BIT) {
- push_gpib_event(board, EventIFC);
+ push_gpib_event(board, EVENT_IFC);
retval = IRQ_HANDLED;
}
@@ -1166,7 +1186,8 @@ irqreturn_t fmh_gpib_internal_interrupt(struct gpib_board *board)
clear_bit(RFD_HOLDOFF_BN, &nec_priv->state);
if (ext_status_1 & END_STATUS_BIT) {
- /* only set RECEIVED_END while there is still a data
+ /*
+ * only set RECEIVED_END while there is still a data
* byte sitting in the chip, to avoid spuriously
* setting it multiple times after it has been cleared
* during a read.
@@ -1179,7 +1200,8 @@ irqreturn_t fmh_gpib_internal_interrupt(struct gpib_board *board)
if ((fifo_status & TX_FIFO_HALF_EMPTY_INTERRUPT_IS_ENABLED) &&
(fifo_status & TX_FIFO_HALF_EMPTY)) {
- /* We really only want to clear the
+ /*
+ * We really only want to clear the
* TX_FIFO_HALF_EMPTY_INTERRUPT_ENABLE bit in the
* FIFO_CONTROL_STATUS_REG. Since we are not being
* careful, this also has a side effect of disabling
@@ -1193,7 +1215,8 @@ irqreturn_t fmh_gpib_internal_interrupt(struct gpib_board *board)
if ((fifo_status & RX_FIFO_HALF_FULL_INTERRUPT_IS_ENABLED) &&
(fifo_status & RX_FIFO_HALF_FULL)) {
- /* We really only want to clear the
+ /*
+ * We really only want to clear the
* RX_FIFO_HALF_FULL_INTERRUPT_ENABLE bit in the
* FIFO_CONTROL_STATUS_REG. Since we are not being
* careful, this also has a side effect of disabling
@@ -1335,7 +1358,7 @@ static int fmh_gpib_init(struct fmh_priv *e_priv, struct gpib_board *board, int
/* Match callback for driver_find_device */
static int fmh_gpib_device_match(struct device *dev, const void *data)
{
- const gpib_board_config_t *config = data;
+ const struct gpib_board_config *config = data;
if (dev_get_drvdata(dev))
return 0;
@@ -1351,7 +1374,7 @@ static int fmh_gpib_device_match(struct device *dev, const void *data)
return 1;
}
-static int fmh_gpib_attach_impl(struct gpib_board *board, const gpib_board_config_t *config,
+static int fmh_gpib_attach_impl(struct gpib_board *board, const struct gpib_board_config *config,
unsigned int handshake_mode, int acquire_dma)
{
struct fmh_priv *e_priv;
@@ -1424,10 +1447,8 @@ static int fmh_gpib_attach_impl(struct gpib_board *board, const gpib_board_confi
(unsigned long)resource_size(e_priv->dma_port_res));
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(board->dev, "request for IRQ failed\n");
+ if (irq < 0)
return -EBUSY;
- }
retval = request_irq(irq, fmh_gpib_interrupt, IRQF_SHARED, pdev->name, board);
if (retval) {
dev_err(board->dev,
@@ -1444,7 +1465,8 @@ static int fmh_gpib_attach_impl(struct gpib_board *board, const gpib_board_confi
return -EIO;
}
}
- /* in the future we might want to know the half-fifo size
+ /*
+ * in the future we might want to know the half-fifo size
* (dma_burst_length) even when not using dma, so go ahead an
* initialize it unconditionally.
*/
@@ -1454,12 +1476,12 @@ static int fmh_gpib_attach_impl(struct gpib_board *board, const gpib_board_confi
return fmh_gpib_init(e_priv, board, handshake_mode);
}
-int fmh_gpib_attach_holdoff_all(struct gpib_board *board, const gpib_board_config_t *config)
+int fmh_gpib_attach_holdoff_all(struct gpib_board *board, const struct gpib_board_config *config)
{
return fmh_gpib_attach_impl(board, config, HR_HLDA, 0);
}
-int fmh_gpib_attach_holdoff_end(struct gpib_board *board, const gpib_board_config_t *config)
+int fmh_gpib_attach_holdoff_end(struct gpib_board *board, const struct gpib_board_config *config)
{
return fmh_gpib_attach_impl(board, config, HR_HLDE, 1);
}
@@ -1497,7 +1519,8 @@ void fmh_gpib_detach(struct gpib_board *board)
fmh_gpib_generic_detach(board);
}
-static int fmh_gpib_pci_attach_impl(struct gpib_board *board, const gpib_board_config_t *config,
+static int fmh_gpib_pci_attach_impl(struct gpib_board *board,
+ const struct gpib_board_config *config,
unsigned int handshake_mode)
{
struct fmh_priv *e_priv;
@@ -1570,12 +1593,14 @@ static int fmh_gpib_pci_attach_impl(struct gpib_board *board, const gpib_board_c
return fmh_gpib_init(e_priv, board, handshake_mode);
}
-int fmh_gpib_pci_attach_holdoff_all(struct gpib_board *board, const gpib_board_config_t *config)
+int fmh_gpib_pci_attach_holdoff_all(struct gpib_board *board,
+ const struct gpib_board_config *config)
{
return fmh_gpib_pci_attach_impl(board, config, HR_HLDA);
}
-int fmh_gpib_pci_attach_holdoff_end(struct gpib_board *board, const gpib_board_config_t *config)
+int fmh_gpib_pci_attach_holdoff_end(struct gpib_board *board,
+ const struct gpib_board_config *config)
{
int retval;
struct fmh_priv *e_priv;
@@ -1631,7 +1656,6 @@ MODULE_DEVICE_TABLE(of, fmh_gpib_of_match);
static struct platform_driver fmh_gpib_platform_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = fmh_gpib_of_match,
},
.probe = &fmh_gpib_platform_probe
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.h b/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
index de6fd2164414..e7602d7e1401 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
+++ b/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
@@ -124,13 +124,13 @@ static const unsigned int fifo_data_mask = 0x00ff;
static const unsigned int fifo_xfer_counter_mask = 0x0fff;
static const unsigned int fifo_max_burst_length_mask = 0x00ff;
-static inline uint8_t gpib_cs_read_byte(struct nec7210_priv *nec_priv,
- unsigned int register_num)
+static inline u8 gpib_cs_read_byte(struct nec7210_priv *nec_priv,
+ unsigned int register_num)
{
return readb(nec_priv->mmiobase + register_num * nec_priv->offset);
}
-static inline void gpib_cs_write_byte(struct nec7210_priv *nec_priv, uint8_t data,
+static inline void gpib_cs_write_byte(struct nec7210_priv *nec_priv, u8 data,
unsigned int register_num)
{
writeb(data, nec_priv->mmiobase + register_num * nec_priv->offset);
diff --git a/drivers/staging/gpib/gpio/gpib_bitbang.c b/drivers/staging/gpib/gpio/gpib_bitbang.c
index 86bdd381472a..625fef24a0bf 100644
--- a/drivers/staging/gpib/gpio/gpib_bitbang.c
+++ b/drivers/staging/gpib/gpio/gpib_bitbang.c
@@ -32,7 +32,8 @@
#define ENABLE_IRQ(IRQ, TYPE) irq_set_irq_type(IRQ, TYPE)
#define DISABLE_IRQ(IRQ) irq_set_irq_type(IRQ, IRQ_TYPE_NONE)
-/* Debug print levels:
+/*
+ * Debug print levels:
* 0 = load/unload info and errors that make the driver fail;
* 1 = + warnings for unforeseen events that may break the current
* operation and lead to a timeout, but do not affect the
@@ -65,7 +66,6 @@
#include <linux/gpio/machine.h>
#include <linux/gpio.h>
#include <linux/irq.h>
-#include <linux/leds.h>
static int sn7516x_used = 1, sn7516x;
module_param(sn7516x_used, int, 0660);
@@ -135,19 +135,14 @@ enum lines_t {
#define SN7516X_PINS 4
#define NUM_PINS (GPIB_PINS + SN7516X_PINS)
-DEFINE_LED_TRIGGER(ledtrig_gpib);
-#define ACT_LED_ON do { \
+#define ACT_LED_ON do { \
if (ACT_LED) \
- gpiod_direction_output(ACT_LED, 1); \
- else \
- led_trigger_event(ledtrig_gpib, LED_FULL); } \
- while (0)
-#define ACT_LED_OFF do { \
+ gpiod_direction_output(ACT_LED, 1); \
+ } while (0)
+#define ACT_LED_OFF do { \
if (ACT_LED) \
- gpiod_direction_output(ACT_LED, 0); \
- else \
- led_trigger_event(ledtrig_gpib, LED_OFF); } \
- while (0)
+ gpiod_direction_output(ACT_LED, 0); \
+ } while (0)
static struct gpio_desc *all_descriptors[GPIB_PINS + SN7516X_PINS];
@@ -311,7 +306,6 @@ struct bb_priv {
int dav_seq;
long all_irqs;
int dav_idle;
- int atn_asserted;
enum talker_function_state talker_state;
enum listener_function_state listener_state;
@@ -324,7 +318,7 @@ static void set_data_lines(u8 byte);
static u8 get_data_lines(void);
static void set_data_lines_input(void);
static void set_data_lines_output(void);
-static inline int check_for_eos(struct bb_priv *priv, uint8_t byte);
+static inline int check_for_eos(struct bb_priv *priv, u8 byte);
static void set_atn(struct gpib_board *board, int atn_asserted);
static inline void SET_DIR_WRITE(struct bb_priv *priv);
@@ -353,7 +347,7 @@ static char printable(char x)
* *
***************************************************************************/
-static int bb_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int bb_read(struct gpib_board *board, u8 *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct bb_priv *priv = board->private_data;
@@ -491,7 +485,7 @@ dav_exit:
* *
***************************************************************************/
-static int bb_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int bb_write(struct gpib_board *board, u8 *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
unsigned long flags;
@@ -522,7 +516,7 @@ static int bb_write(struct gpib_board *board, uint8_t *buffer, size_t length,
gpiod_get_value(NRFD), gpiod_get_value(NDAC));
if (gpiod_get_value(NRFD) && gpiod_get_value(NDAC)) { /* check for listener */
- retval = -ENODEV;
+ retval = -ENOTCONN;
goto write_end;
}
@@ -619,7 +613,8 @@ static irqreturn_t bb_NRFD_interrupt(int irq, void *arg)
goto nrfd_exit;
}
- if (priv->atn_asserted && priv->w_cnt >= priv->length) { // test for end of transfer
+ if (priv->w_cnt >= priv->length) { // test for missed NDAC end of transfer
+ dev_err(board->gpib_dev, "Unexpected NRFD exit\n");
priv->write_done = 1;
priv->w_busy = 0;
wake_up_interruptible(&board->wait);
@@ -691,14 +686,14 @@ static irqreturn_t bb_NDAC_interrupt(int irq, void *arg)
dbg_printk(3, "accepted %zu\n", priv->w_cnt - 1);
- if (!priv->atn_asserted && priv->w_cnt >= priv->length) { // test for end of transfer
+ gpiod_set_value(DAV, 1); // Data not available
+ priv->dav_tx = 1;
+ priv->phase = 510;
+
+ if (priv->w_cnt >= priv->length) { // test for end of transfer
priv->write_done = 1;
priv->w_busy = 0;
wake_up_interruptible(&board->wait);
- } else {
- gpiod_set_value(DAV, 1); // Data not available
- priv->dav_tx = 1;
- priv->phase = 510;
}
ndac_exit:
@@ -728,7 +723,7 @@ static irqreturn_t bb_SRQ_interrupt(int irq, void *arg)
return IRQ_HANDLED;
}
-static int bb_command(struct gpib_board *board, uint8_t *buffer,
+static int bb_command(struct gpib_board *board, u8 *buffer,
size_t length, size_t *bytes_written)
{
size_t ret;
@@ -855,6 +850,7 @@ static void set_atn(struct gpib_board *board, int atn_asserted)
priv->listener_state = listener_addressed;
if (priv->talker_state == talker_active)
priv->talker_state = talker_addressed;
+ SET_DIR_WRITE(priv); // need to be able to read bus NRFD/NDAC
} else {
if (priv->listener_state == listener_addressed) {
priv->listener_state = listener_active;
@@ -864,14 +860,12 @@ static void set_atn(struct gpib_board *board, int atn_asserted)
priv->talker_state = talker_active;
}
gpiod_direction_output(_ATN, !atn_asserted);
- priv->atn_asserted = atn_asserted;
}
static int bb_take_control(struct gpib_board *board, int synchronous)
{
dbg_printk(2, "%d\n", synchronous);
set_atn(board, 1);
- set_bit(CIC_NUM, &board->status);
return 0;
}
@@ -882,16 +876,24 @@ static int bb_go_to_standby(struct gpib_board *board)
return 0;
}
-static void bb_request_system_control(struct gpib_board *board, int request_control)
+static int bb_request_system_control(struct gpib_board *board, int request_control)
{
+ struct bb_priv *priv = board->private_data;
+
dbg_printk(2, "%d\n", request_control);
- if (request_control) {
- set_bit(CIC_NUM, &board->status);
- // drive DAV & EOI false, enable NRFD & NDAC irqs
- SET_DIR_WRITE(board->private_data);
- } else {
- clear_bit(CIC_NUM, &board->status);
- }
+ if (!request_control)
+ return -EINVAL;
+
+ gpiod_direction_output(REN, 1); /* user space must enable REN if needed */
+ gpiod_direction_output(IFC, 1); /* user space must toggle IFC if needed */
+ if (sn7516x)
+ gpiod_direction_output(DC, 0); /* enable ATN as output on SN75161/2 */
+
+ gpiod_direction_input(SRQ);
+
+ ENABLE_IRQ(priv->irq_SRQ, IRQ_TYPE_EDGE_FALLING);
+
+ return 0;
}
static void bb_interface_clear(struct gpib_board *board, int assert)
@@ -903,6 +905,7 @@ static void bb_interface_clear(struct gpib_board *board, int assert)
gpiod_direction_output(IFC, 0);
priv->talker_state = talker_idle;
priv->listener_state = listener_idle;
+ set_bit(CIC_NUM, &board->status);
} else {
gpiod_direction_output(IFC, 1);
}
@@ -920,7 +923,7 @@ static void bb_remote_enable(struct gpib_board *board, int enable)
}
}
-static int bb_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
+static int bb_enable_eos(struct gpib_board *board, u8 eos_byte, int compare_8_bits)
{
struct bb_priv *priv = board->private_data;
@@ -987,12 +990,12 @@ static int bb_secondary_address(struct gpib_board *board, unsigned int address,
return 0;
}
-static int bb_parallel_poll(struct gpib_board *board, uint8_t *result)
+static int bb_parallel_poll(struct gpib_board *board, u8 *result)
{
return -ENOENT;
}
-static void bb_parallel_poll_configure(struct gpib_board *board, uint8_t config)
+static void bb_parallel_poll_configure(struct gpib_board *board, u8 config)
{
}
@@ -1000,11 +1003,11 @@ static void bb_parallel_poll_response(struct gpib_board *board, int ist)
{
}
-static void bb_serial_poll_response(struct gpib_board *board, uint8_t status)
+static void bb_serial_poll_response(struct gpib_board *board, u8 status)
{
}
-static uint8_t bb_serial_poll_status(struct gpib_board *board)
+static u8 bb_serial_poll_status(struct gpib_board *board)
{
return 0; // -ENOENT;
}
@@ -1120,8 +1123,7 @@ static void release_gpios(void)
static int allocate_gpios(struct gpib_board *board)
{
- int j, retval = 0;
- bool error = false;
+ int j;
int table_index = 0;
char name[256];
struct gpio_desc *desc;
@@ -1132,8 +1134,8 @@ static int allocate_gpios(struct gpib_board *board)
return -ENOENT;
}
- lookup_table = lookup_tables[0];
- lookup_table->dev_id = dev_name(board->gpib_dev);
+ lookup_table = lookup_tables[table_index];
+ lookup_table->dev_id = dev_name(board->gpib_dev);
gpiod_add_lookup_table(lookup_table);
dbg_printk(1, "Allocating gpios using table index %d\n", table_index);
@@ -1150,30 +1152,26 @@ try_again:
gpiod_remove_lookup_table(lookup_table);
table_index++;
lookup_table = lookup_tables[table_index];
- if (lookup_table) {
- dbg_printk(1, "Allocation failed, now using table_index %d\n",
- table_index);
- lookup_table->dev_id = dev_name(board->gpib_dev);
- gpiod_add_lookup_table(lookup_table);
- goto try_again;
+ if (!lookup_table) {
+ dev_err(board->gpib_dev, "Unable to obtain gpio descriptor for pin %d error %ld\n",
+ gpios_vector[j], PTR_ERR(desc));
+ goto alloc_gpios_fail;
}
- dev_err(board->gpib_dev, "Unable to obtain gpio descriptor for pin %d error %ld\n",
- gpios_vector[j], PTR_ERR(desc));
- error = true;
- break;
+ dbg_printk(1, "Allocation failed, now using table_index %d\n", table_index);
+ lookup_table->dev_id = dev_name(board->gpib_dev);
+ gpiod_add_lookup_table(lookup_table);
+ goto try_again;
}
all_descriptors[j] = desc;
}
- if (error) { /* undo what already done */
- release_gpios();
- retval = -1;
- }
- if (lookup_table)
- gpiod_remove_lookup_table(lookup_table);
- // Initialize LED trigger
- led_trigger_register_simple("gpib", &ledtrig_gpib);
- return retval;
+ gpiod_remove_lookup_table(lookup_table);
+
+ return 0;
+
+alloc_gpios_fail:
+ release_gpios();
+ return -1;
}
static void bb_detach(struct gpib_board *board)
@@ -1184,8 +1182,6 @@ static void bb_detach(struct gpib_board *board)
if (!board->private_data)
return;
- led_trigger_unregister_simple(ledtrig_gpib);
-
bb_free_irq(board, &priv->irq_DAV, NAME "_DAV");
bb_free_irq(board, &priv->irq_NRFD, NAME "_NRFD");
bb_free_irq(board, &priv->irq_NDAC, NAME "_NDAC");
@@ -1206,7 +1202,7 @@ static void bb_detach(struct gpib_board *board)
free_private(board);
}
-static int bb_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int bb_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
struct bb_priv *priv;
int retval = 0;
@@ -1245,7 +1241,6 @@ static int bb_attach(struct gpib_board *board, const gpib_board_config_t *config
gpios_vector[&(D06) - &all_descriptors[0]] = YOGA_D06_pin_nr;
gpios_vector[&(PE) - &all_descriptors[0]] = -1;
gpios_vector[&(DC) - &all_descriptors[0]] = -1;
- gpios_vector[&(ACT_LED) - &all_descriptors[0]] = -1;
} else {
dev_err(board->gpib_dev, "Unrecognized pin map %s\n", pin_map);
goto bb_attach_fail;
@@ -1256,7 +1251,8 @@ static int bb_attach(struct gpib_board *board, const gpib_board_config_t *config
if (allocate_gpios(board))
goto bb_attach_fail;
-/* Configure SN7516X control lines.
+/*
+ * Configure SN7516X control lines.
* drive ATN, IFC and REN as outputs only when master
* i.e. system controller. In this mode can only be the CIC
* When not master then enable device mode ATN, IFC & REN as inputs
@@ -1266,6 +1262,10 @@ static int bb_attach(struct gpib_board *board, const gpib_board_config_t *config
gpiod_direction_output(TE, 1);
gpiod_direction_output(PE, 1);
}
+/* Set main control lines to a known state */
+ gpiod_direction_output(IFC, 1);
+ gpiod_direction_output(REN, 1);
+ gpiod_direction_output(_ATN, 1);
if (strcmp(PINMAP_2, pin_map) == 0) { /* YOGA: enable level shifters */
gpiod_direction_output(YOGA_ENABLE, 1);
@@ -1293,8 +1293,6 @@ static int bb_attach(struct gpib_board *board, const gpib_board_config_t *config
IRQF_TRIGGER_NONE))
goto bb_attach_fail_r;
- ENABLE_IRQ(priv->irq_SRQ, IRQ_TYPE_EDGE_FALLING);
-
dbg_printk(0, "attached board %d\n", board->minor);
goto bb_attach_out;
@@ -1306,7 +1304,7 @@ bb_attach_out:
return retval;
}
-static gpib_interface_t bb_interface = {
+static struct gpib_interface bb_interface = {
.name = NAME,
.attach = bb_attach,
.detach = bb_detach,
@@ -1364,7 +1362,7 @@ inline long usec_diff(struct timespec64 *a, struct timespec64 *b)
(a->tv_nsec - b->tv_nsec) / 1000);
}
-static inline int check_for_eos(struct bb_priv *priv, uint8_t byte)
+static inline int check_for_eos(struct bb_priv *priv, u8 byte)
{
if (priv->eos_check)
return 0;
diff --git a/drivers/staging/gpib/hp_82335/hp82335.c b/drivers/staging/gpib/hp_82335/hp82335.c
index fd23b1cb80f9..d0e47ef77c87 100644
--- a/drivers/staging/gpib/hp_82335/hp82335.c
+++ b/drivers/staging/gpib/hp_82335/hp82335.c
@@ -4,8 +4,9 @@
* copyright : (C) 2002 by Frank Mori Hess *
***************************************************************************/
-/*should enable ATN interrupts (and update board->status on occurrence),
- * implement recovery from bus errors (if necessary)
+/*
+ * should enable ATN interrupts (and update board->status on occurrence),
+ * implement recovery from bus errors (if necessary)
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -24,12 +25,12 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver for HP 82335 interface cards");
-static int hp82335_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static int hp82335_attach(struct gpib_board *board, const struct gpib_board_config *config);
static void hp82335_detach(struct gpib_board *board);
static irqreturn_t hp82335_interrupt(int irq, void *arg);
// wrappers for interface functions
-static int hp82335_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int hp82335_read(struct gpib_board *board, u8 *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct hp82335_priv *priv = board->private_data;
@@ -37,7 +38,7 @@ static int hp82335_read(struct gpib_board *board, uint8_t *buffer, size_t length
return tms9914_read(board, &priv->tms9914_priv, buffer, length, end, bytes_read);
}
-static int hp82335_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+static int hp82335_write(struct gpib_board *board, u8 *buffer, size_t length, int send_eoi,
size_t *bytes_written)
{
struct hp82335_priv *priv = board->private_data;
@@ -45,7 +46,7 @@ static int hp82335_write(struct gpib_board *board, uint8_t *buffer, size_t lengt
return tms9914_write(board, &priv->tms9914_priv, buffer, length, send_eoi, bytes_written);
}
-static int hp82335_command(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int hp82335_command(struct gpib_board *board, u8 *buffer, size_t length,
size_t *bytes_written)
{
struct hp82335_priv *priv = board->private_data;
@@ -67,11 +68,11 @@ static int hp82335_go_to_standby(struct gpib_board *board)
return tms9914_go_to_standby(board, &priv->tms9914_priv);
}
-static void hp82335_request_system_control(struct gpib_board *board, int request_control)
+static int hp82335_request_system_control(struct gpib_board *board, int request_control)
{
struct hp82335_priv *priv = board->private_data;
- tms9914_request_system_control(board, &priv->tms9914_priv, request_control);
+ return tms9914_request_system_control(board, &priv->tms9914_priv, request_control);
}
static void hp82335_interface_clear(struct gpib_board *board, int assert)
@@ -88,7 +89,7 @@ static void hp82335_remote_enable(struct gpib_board *board, int enable)
tms9914_remote_enable(board, &priv->tms9914_priv, enable);
}
-static int hp82335_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
+static int hp82335_enable_eos(struct gpib_board *board, u8 eos_byte, int compare_8_bits)
{
struct hp82335_priv *priv = board->private_data;
@@ -123,14 +124,14 @@ static int hp82335_secondary_address(struct gpib_board *board, unsigned int addr
return tms9914_secondary_address(board, &priv->tms9914_priv, address, enable);
}
-static int hp82335_parallel_poll(struct gpib_board *board, uint8_t *result)
+static int hp82335_parallel_poll(struct gpib_board *board, u8 *result)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_parallel_poll(board, &priv->tms9914_priv, result);
}
-static void hp82335_parallel_poll_configure(struct gpib_board *board, uint8_t config)
+static void hp82335_parallel_poll_configure(struct gpib_board *board, u8 config)
{
struct hp82335_priv *priv = board->private_data;
@@ -144,14 +145,14 @@ static void hp82335_parallel_poll_response(struct gpib_board *board, int ist)
tms9914_parallel_poll_response(board, &priv->tms9914_priv, ist);
}
-static void hp82335_serial_poll_response(struct gpib_board *board, uint8_t status)
+static void hp82335_serial_poll_response(struct gpib_board *board, u8 status)
{
struct hp82335_priv *priv = board->private_data;
tms9914_serial_poll_response(board, &priv->tms9914_priv, status);
}
-static uint8_t hp82335_serial_poll_status(struct gpib_board *board)
+static u8 hp82335_serial_poll_status(struct gpib_board *board)
{
struct hp82335_priv *priv = board->private_data;
@@ -179,7 +180,7 @@ static void hp82335_return_to_local(struct gpib_board *board)
tms9914_return_to_local(board, &priv->tms9914_priv);
}
-static gpib_interface_t hp82335_interface = {
+static struct gpib_interface hp82335_interface = {
.name = "hp82335",
.attach = hp82335_attach,
.detach = hp82335_detach,
@@ -226,12 +227,12 @@ static inline unsigned int tms9914_to_hp82335_offset(unsigned int register_num)
return 0x1ff8 + register_num;
}
-static uint8_t hp82335_read_byte(struct tms9914_priv *priv, unsigned int register_num)
+static u8 hp82335_read_byte(struct tms9914_priv *priv, unsigned int register_num)
{
return tms9914_iomem_read_byte(priv, tms9914_to_hp82335_offset(register_num));
}
-static void hp82335_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num)
+static void hp82335_write_byte(struct tms9914_priv *priv, u8 data, unsigned int register_num)
{
tms9914_iomem_write_byte(priv, data, tms9914_to_hp82335_offset(register_num));
}
@@ -243,7 +244,7 @@ static void hp82335_clear_interrupt(struct hp82335_priv *hp_priv)
writeb(0, tms_priv->mmiobase + HPREG_INTR_CLEAR);
}
-static int hp82335_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int hp82335_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
struct hp82335_priv *hp_priv;
struct tms9914_priv *tms_priv;
diff --git a/drivers/staging/gpib/hp_82341/hp_82341.c b/drivers/staging/gpib/hp_82341/hp_82341.c
index f52e673dc869..1b0822b2a3b8 100644
--- a/drivers/staging/gpib/hp_82341/hp_82341.c
+++ b/drivers/staging/gpib/hp_82341/hp_82341.c
@@ -25,11 +25,11 @@ MODULE_DESCRIPTION("GPIB driver for hp 82341a/b/c/d boards");
static unsigned short read_and_clear_event_status(struct gpib_board *board);
static void set_transfer_counter(struct hp_82341_priv *hp_priv, int count);
static int read_transfer_counter(struct hp_82341_priv *hp_priv);
-static int hp_82341_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+static int hp_82341_write(struct gpib_board *board, u8 *buffer, size_t length, int send_eoi,
size_t *bytes_written);
static irqreturn_t hp_82341_interrupt(int irq, void *arg);
-static int hp_82341_accel_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+static int hp_82341_accel_read(struct gpib_board *board, u8 *buffer, size_t length, int *end,
size_t *bytes_read)
{
struct hp_82341_priv *hp_priv = board->private_data;
@@ -51,11 +51,12 @@ static int hp_82341_accel_read(struct gpib_board *board, uint8_t *buffer, size_t
return 0;
//disable fifo for the moment
outb(DIRECTION_GPIB_TO_HOST_BIT, hp_priv->iobase[3] + BUFFER_CONTROL_REG);
- // Handle corner case of board not in holdoff and one byte has slipped in already.
- // Also, board sometimes has problems (spurious 1 byte reads) when read fifo is
- // started up with board in
- // TACS under certain data holdoff conditions. Doing a 1 byte tms9914-style
- // read avoids these problems.
+ /*
+ * Handle corner case of board not in holdoff and one byte has slipped in already.
+ * Also, board sometimes has problems (spurious 1 byte reads) when read fifo is
+ * started up with board in TACS under certain data holdoff conditions.
+ * Doing a 1 byte tms9914-style read avoids these problems.
+ */
if (/*tms_priv->holdoff_active == 0 && */length > 1) {
size_t num_bytes;
@@ -172,7 +173,7 @@ static int restart_write_fifo(struct gpib_board *board, struct hp_82341_priv *hp
return 0;
}
-static int hp_82341_accel_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int hp_82341_accel_write(struct gpib_board *board, u8 *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
struct hp_82341_priv *hp_priv = board->private_data;
@@ -250,12 +251,12 @@ static int hp_82341_accel_write(struct gpib_board *board, uint8_t *buffer, size_
return 0;
}
-static int hp_82341_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static int hp_82341_attach(struct gpib_board *board, const struct gpib_board_config *config);
static void hp_82341_detach(struct gpib_board *board);
// wrappers for interface functions
-static int hp_82341_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+static int hp_82341_read(struct gpib_board *board, u8 *buffer, size_t length, int *end,
size_t *bytes_read)
{
struct hp_82341_priv *priv = board->private_data;
@@ -263,7 +264,7 @@ static int hp_82341_read(struct gpib_board *board, uint8_t *buffer, size_t lengt
return tms9914_read(board, &priv->tms9914_priv, buffer, length, end, bytes_read);
}
-static int hp_82341_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+static int hp_82341_write(struct gpib_board *board, u8 *buffer, size_t length, int send_eoi,
size_t *bytes_written)
{
struct hp_82341_priv *priv = board->private_data;
@@ -271,7 +272,7 @@ static int hp_82341_write(struct gpib_board *board, uint8_t *buffer, size_t leng
return tms9914_write(board, &priv->tms9914_priv, buffer, length, send_eoi, bytes_written);
}
-static int hp_82341_command(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int hp_82341_command(struct gpib_board *board, u8 *buffer, size_t length,
size_t *bytes_written)
{
struct hp_82341_priv *priv = board->private_data;
@@ -293,7 +294,7 @@ static int hp_82341_go_to_standby(struct gpib_board *board)
return tms9914_go_to_standby(board, &priv->tms9914_priv);
}
-static void hp_82341_request_system_control(struct gpib_board *board, int request_control)
+static int hp_82341_request_system_control(struct gpib_board *board, int request_control)
{
struct hp_82341_priv *priv = board->private_data;
@@ -302,7 +303,7 @@ static void hp_82341_request_system_control(struct gpib_board *board, int reques
else
priv->mode_control_bits &= ~SYSTEM_CONTROLLER_BIT;
outb(priv->mode_control_bits, priv->iobase[0] + MODE_CONTROL_STATUS_REG);
- tms9914_request_system_control(board, &priv->tms9914_priv, request_control);
+ return tms9914_request_system_control(board, &priv->tms9914_priv, request_control);
}
static void hp_82341_interface_clear(struct gpib_board *board, int assert)
@@ -319,7 +320,7 @@ static void hp_82341_remote_enable(struct gpib_board *board, int enable)
tms9914_remote_enable(board, &priv->tms9914_priv, enable);
}
-static int hp_82341_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
+static int hp_82341_enable_eos(struct gpib_board *board, u8 eos_byte, int compare_8_bits)
{
struct hp_82341_priv *priv = board->private_data;
@@ -354,14 +355,14 @@ static int hp_82341_secondary_address(struct gpib_board *board, unsigned int add
return tms9914_secondary_address(board, &priv->tms9914_priv, address, enable);
}
-static int hp_82341_parallel_poll(struct gpib_board *board, uint8_t *result)
+static int hp_82341_parallel_poll(struct gpib_board *board, u8 *result)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_parallel_poll(board, &priv->tms9914_priv, result);
}
-static void hp_82341_parallel_poll_configure(struct gpib_board *board, uint8_t config)
+static void hp_82341_parallel_poll_configure(struct gpib_board *board, u8 config)
{
struct hp_82341_priv *priv = board->private_data;
@@ -375,14 +376,14 @@ static void hp_82341_parallel_poll_response(struct gpib_board *board, int ist)
tms9914_parallel_poll_response(board, &priv->tms9914_priv, ist);
}
-static void hp_82341_serial_poll_response(struct gpib_board *board, uint8_t status)
+static void hp_82341_serial_poll_response(struct gpib_board *board, u8 status)
{
struct hp_82341_priv *priv = board->private_data;
tms9914_serial_poll_response(board, &priv->tms9914_priv, status);
}
-static uint8_t hp_82341_serial_poll_status(struct gpib_board *board)
+static u8 hp_82341_serial_poll_status(struct gpib_board *board)
{
struct hp_82341_priv *priv = board->private_data;
@@ -410,7 +411,7 @@ static void hp_82341_return_to_local(struct gpib_board *board)
tms9914_return_to_local(board, &priv->tms9914_priv);
}
-static gpib_interface_t hp_82341_unaccel_interface = {
+static struct gpib_interface hp_82341_unaccel_interface = {
.name = "hp_82341_unaccel",
.attach = hp_82341_attach,
.detach = hp_82341_detach,
@@ -438,7 +439,7 @@ static gpib_interface_t hp_82341_unaccel_interface = {
.return_to_local = hp_82341_return_to_local,
};
-static gpib_interface_t hp_82341_interface = {
+static struct gpib_interface hp_82341_interface = {
.name = "hp_82341",
.attach = hp_82341_attach,
.detach = hp_82341_detach,
@@ -479,12 +480,12 @@ static void hp_82341_free_private(struct gpib_board *board)
board->private_data = NULL;
}
-static uint8_t hp_82341_read_byte(struct tms9914_priv *priv, unsigned int register_num)
+static u8 hp_82341_read_byte(struct tms9914_priv *priv, unsigned int register_num)
{
return inb(priv->iobase + register_num);
}
-static void hp_82341_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num)
+static void hp_82341_write_byte(struct tms9914_priv *priv, u8 data, unsigned int register_num)
{
outb(data, priv->iobase + register_num);
}
@@ -619,7 +620,8 @@ static int hp_82341_load_firmware_array(struct hp_82341_priv *hp_priv,
return 0;
}
-static int hp_82341_load_firmware(struct hp_82341_priv *hp_priv, const gpib_board_config_t *config)
+static int hp_82341_load_firmware(struct hp_82341_priv *hp_priv,
+ const struct gpib_board_config *config)
{
if (config->init_data_length == 0) {
if (xilinx_done(hp_priv))
@@ -686,7 +688,7 @@ static int clear_xilinx(struct hp_82341_priv *hp_priv)
return 0;
}
-static int hp_82341_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int hp_82341_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
struct hp_82341_priv *hp_priv;
struct tms9914_priv *tms_priv;
diff --git a/drivers/staging/gpib/include/gpibP.h b/drivers/staging/gpib/include/gpibP.h
index 0c71a038e444..0af72934ce24 100644
--- a/drivers/staging/gpib/include/gpibP.h
+++ b/drivers/staging/gpib/include/gpibP.h
@@ -11,23 +11,23 @@
#include "gpib_types.h"
#include "gpib_proto.h"
-#include "gpib_user.h"
+#include "gpib.h"
#include "gpib_ioctl.h"
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-int gpib_register_driver(gpib_interface_t *interface, struct module *mod);
-void gpib_unregister_driver(gpib_interface_t *interface);
-struct pci_dev *gpib_pci_get_device(const gpib_board_config_t *config, unsigned int vendor_id,
+int gpib_register_driver(struct gpib_interface *interface, struct module *mod);
+void gpib_unregister_driver(struct gpib_interface *interface);
+struct pci_dev *gpib_pci_get_device(const struct gpib_board_config *config, unsigned int vendor_id,
unsigned int device_id, struct pci_dev *from);
-struct pci_dev *gpib_pci_get_subsys(const gpib_board_config_t *config, unsigned int vendor_id,
+struct pci_dev *gpib_pci_get_subsys(const struct gpib_board_config *config, unsigned int vendor_id,
unsigned int device_id, unsigned int ss_vendor,
unsigned int ss_device, struct pci_dev *from);
-unsigned int num_gpib_events(const gpib_event_queue_t *queue);
+unsigned int num_gpib_events(const struct gpib_event_queue *queue);
int push_gpib_event(struct gpib_board *board, short event_type);
-int pop_gpib_event(struct gpib_board *board, gpib_event_queue_t *queue, short *event_type);
+int pop_gpib_event(struct gpib_board *board, struct gpib_event_queue *queue, short *event_type);
int gpib_request_pseudo_irq(struct gpib_board *board, irqreturn_t (*handler)(int, void *));
void gpib_free_pseudo_irq(struct gpib_board *board);
int gpib_match_device_path(struct device *dev, const char *device_path_in);
diff --git a/drivers/staging/gpib/include/gpib_proto.h b/drivers/staging/gpib/include/gpib_proto.h
index 2c7dfc02f517..42e736e3b7cd 100644
--- a/drivers/staging/gpib/include/gpib_proto.h
+++ b/drivers/staging/gpib/include/gpib_proto.h
@@ -8,12 +8,8 @@
int ibopen(struct inode *inode, struct file *filep);
int ibclose(struct inode *inode, struct file *file);
long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg);
-int osInit(void);
-void osReset(void);
void os_start_timer(struct gpib_board *board, unsigned int usec_timeout);
void os_remove_timer(struct gpib_board *board);
-void osSendEOI(void);
-void osSendEOI(void);
void init_gpib_board(struct gpib_board *board);
static inline unsigned long usec_to_jiffies(unsigned int usec)
{
@@ -23,34 +19,31 @@ static inline unsigned long usec_to_jiffies(unsigned int usec)
};
int serial_poll_all(struct gpib_board *board, unsigned int usec_timeout);
-void init_gpib_descriptor(gpib_descriptor_t *desc);
+void init_gpib_descriptor(struct gpib_descriptor *desc);
int dvrsp(struct gpib_board *board, unsigned int pad, int sad,
- unsigned int usec_timeout, uint8_t *result);
-int ibAPWait(struct gpib_board *board, int pad);
-int ibAPrsp(struct gpib_board *board, int padsad, char *spb);
-void ibAPE(struct gpib_board *board, int pad, int v);
+ unsigned int usec_timeout, u8 *result);
int ibcac(struct gpib_board *board, int sync, int fallback_to_async);
-int ibcmd(struct gpib_board *board, uint8_t *buf, size_t length, size_t *bytes_written);
+int ibcmd(struct gpib_board *board, u8 *buf, size_t length, size_t *bytes_written);
int ibgts(struct gpib_board *board);
int ibonline(struct gpib_board *board);
int iboffline(struct gpib_board *board);
int iblines(const struct gpib_board *board, short *lines);
-int ibrd(struct gpib_board *board, uint8_t *buf, size_t length, int *end_flag, size_t *bytes_read);
-int ibrpp(struct gpib_board *board, uint8_t *buf);
-int ibrsv2(struct gpib_board *board, uint8_t status_byte, int new_reason_for_service);
-void ibrsc(struct gpib_board *board, int request_control);
+int ibrd(struct gpib_board *board, u8 *buf, size_t length, int *end_flag, size_t *bytes_read);
+int ibrpp(struct gpib_board *board, u8 *buf);
+int ibrsv2(struct gpib_board *board, u8 status_byte, int new_reason_for_service);
+int ibrsc(struct gpib_board *board, int request_control);
int ibsic(struct gpib_board *board, unsigned int usec_duration);
int ibsre(struct gpib_board *board, int enable);
int ibpad(struct gpib_board *board, unsigned int addr);
int ibsad(struct gpib_board *board, int addr);
int ibeos(struct gpib_board *board, int eos, int eosflags);
int ibwait(struct gpib_board *board, int wait_mask, int clear_mask, int set_mask,
- int *status, unsigned long usec_timeout, gpib_descriptor_t *desc);
-int ibwrt(struct gpib_board *board, uint8_t *buf, size_t cnt, int send_eoi, size_t *bytes_written);
+ int *status, unsigned long usec_timeout, struct gpib_descriptor *desc);
+int ibwrt(struct gpib_board *board, u8 *buf, size_t cnt, int send_eoi, size_t *bytes_written);
int ibstatus(struct gpib_board *board);
-int general_ibstatus(struct gpib_board *board, const gpib_status_queue_t *device,
- int clear_mask, int set_mask, gpib_descriptor_t *desc);
+int general_ibstatus(struct gpib_board *board, const struct gpib_status_queue *device,
+ int clear_mask, int set_mask, struct gpib_descriptor *desc);
int io_timed_out(struct gpib_board *board);
-int ibppc(struct gpib_board *board, uint8_t configuration);
+int ibppc(struct gpib_board *board, u8 configuration);
#endif /* GPIB_PROTO_INCLUDED */
diff --git a/drivers/staging/gpib/include/gpib_types.h b/drivers/staging/gpib/include/gpib_types.h
index 2d9b9be683f8..db040c80d778 100644
--- a/drivers/staging/gpib/include/gpib_types.h
+++ b/drivers/staging/gpib/include/gpib_types.h
@@ -8,12 +8,7 @@
#define _GPIB_TYPES_H
#ifdef __KERNEL__
-/* gpib_interface_t defines the interface
- * between the board-specific details dealt with in the drivers
- * and generic interface provided by gpib-common.
- * This really should be in a different header file.
- */
-#include "gpib_user.h"
+#include "gpib.h"
#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/mutex.h>
@@ -22,11 +17,10 @@
#include <linux/timer.h>
#include <linux/interrupt.h>
-typedef struct gpib_interface_struct gpib_interface_t;
struct gpib_board;
/* config parameters that are only used by driver attach functions */
-typedef struct {
+struct gpib_board_config {
/* firmware blob */
void *init_data;
int init_data_length;
@@ -37,11 +31,13 @@ typedef struct {
unsigned int ibirq;
/* dma channel to use for non-pnp cards (set by core, driver should make local copy) */
unsigned int ibdma;
- /* pci bus of card, useful for distinguishing multiple identical pci cards
+ /*
+ * pci bus of card, useful for distinguishing multiple identical pci cards
* (negative means don't care)
*/
int pci_bus;
- /* pci slot of card, useful for distinguishing multiple identical pci cards
+ /*
+ * pci slot of card, useful for distinguishing multiple identical pci cards
* (negative means don't care)
*/
int pci_slot;
@@ -49,16 +45,23 @@ typedef struct {
char *device_path;
/* serial number of hardware to attach */
char *serial_number;
-} gpib_board_config_t;
+};
-struct gpib_interface_struct {
+/*
+ * struct gpib_interface defines the interface
+ * between the board-specific details dealt with in the drivers
+ * and generic interface provided by gpib-common.
+ * This really should be in a different header file.
+ */
+struct gpib_interface {
/* name of board */
char *name;
/* attach() initializes board and allocates resources */
- int (*attach)(struct gpib_board *board, const gpib_board_config_t *config);
+ int (*attach)(struct gpib_board *board, const struct gpib_board_config *config);
/* detach() shuts down board and frees resources */
void (*detach)(struct gpib_board *board);
- /* read() should read at most 'length' bytes from the bus into
+ /*
+ * read() should read at most 'length' bytes from the bus into
* 'buffer'. It should return when it fills the buffer or
* encounters an END (EOI and or EOS if appropriate). It should set 'end'
* to be nonzero if the read was terminated by an END, otherwise 'end'
@@ -68,76 +71,88 @@ struct gpib_interface_struct {
* return indicates error.
* nbytes returns number of bytes read
*/
- int (*read)(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+ int (*read)(struct gpib_board *board, u8 *buffer, size_t length, int *end,
size_t *bytes_read);
- /* write() should write 'length' bytes from buffer to the bus.
+ /*
+ * write() should write 'length' bytes from buffer to the bus.
* If the boolean value send_eoi is nonzero, then EOI should
* be sent along with the last byte. Returns number of bytes
* written or negative value on error.
*/
- int (*write)(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+ int (*write)(struct gpib_board *board, u8 *buffer, size_t length, int send_eoi,
size_t *bytes_written);
- /* command() writes the command bytes in 'buffer' to the bus
+ /*
+ * command() writes the command bytes in 'buffer' to the bus
* Returns zero on success or negative value on error.
*/
- int (*command)(struct gpib_board *board, uint8_t *buffer, size_t length,
+ int (*command)(struct gpib_board *board, u8 *buffer, size_t length,
size_t *bytes_written);
- /* Take control (assert ATN). If 'asyncronous' is nonzero, take
+ /*
+ * Take control (assert ATN). If 'asyncronous' is nonzero, take
* control asyncronously (assert ATN immediately without waiting
* for other processes to complete first). Should not return
* until board becomes controller in charge. Returns zero no success,
* nonzero on error.
*/
int (*take_control)(struct gpib_board *board, int asyncronous);
- /* De-assert ATN. Returns zero on success, nonzer on error.
+ /*
+ * De-assert ATN. Returns zero on success, nonzer on error.
*/
int (*go_to_standby)(struct gpib_board *board);
/* request/release control of the IFC and REN lines (system controller) */
- void (*request_system_control)(struct gpib_board *board, int request_control);
- /* Asserts or de-asserts 'interface clear' (IFC) depending on
+ int (*request_system_control)(struct gpib_board *board, int request_control);
+ /*
+ * Asserts or de-asserts 'interface clear' (IFC) depending on
* boolean value of 'assert'
*/
void (*interface_clear)(struct gpib_board *board, int assert);
- /* Sends remote enable command if 'enable' is nonzero, disables remote mode
+ /*
+ * Sends remote enable command if 'enable' is nonzero, disables remote mode
* if 'enable' is zero
*/
void (*remote_enable)(struct gpib_board *board, int enable);
- /* enable END for reads, when byte 'eos' is received. If
+ /*
+ * enable END for reads, when byte 'eos' is received. If
* 'compare_8_bits' is nonzero, then all 8 bits are compared
* with the eos bytes. Otherwise only the 7 least significant
* bits are compared.
*/
- int (*enable_eos)(struct gpib_board *board, uint8_t eos, int compare_8_bits);
+ int (*enable_eos)(struct gpib_board *board, u8 eos, int compare_8_bits);
/* disable END on eos byte (END on EOI only)*/
void (*disable_eos)(struct gpib_board *board);
/* configure parallel poll */
- void (*parallel_poll_configure)(struct gpib_board *board, uint8_t configuration);
+ void (*parallel_poll_configure)(struct gpib_board *board, u8 configuration);
/* conduct parallel poll */
- int (*parallel_poll)(struct gpib_board *board, uint8_t *result);
+ int (*parallel_poll)(struct gpib_board *board, u8 *result);
/* set/clear ist (individual status bit) */
void (*parallel_poll_response)(struct gpib_board *board, int ist);
/* select local parallel poll configuration mode PP2 versus remote PP1 */
void (*local_parallel_poll_mode)(struct gpib_board *board, int local);
- /* Returns current status of the bus lines. Should be set to
+ /*
+ * Returns current status of the bus lines. Should be set to
* NULL if your board does not have the ability to query the
* state of the bus lines.
*/
int (*line_status)(const struct gpib_board *board);
- /* updates and returns the board's current status.
+ /*
+ * updates and returns the board's current status.
* The meaning of the bits are specified in gpib_user.h
* in the IBSTA section. The driver does not need to
* worry about setting the CMPL, END, TIMO, or ERR bits.
*/
unsigned int (*update_status)(struct gpib_board *board, unsigned int clear_mask);
- /* Sets primary address 0-30 for gpib interface card.
+ /*
+ * Sets primary address 0-30 for gpib interface card.
*/
int (*primary_address)(struct gpib_board *board, unsigned int address);
- /* Sets and enables, or disables secondary address 0-30
+ /*
+ * Sets and enables, or disables secondary address 0-30
* for gpib interface card.
*/
int (*secondary_address)(struct gpib_board *board, unsigned int address,
int enable);
- /* Sets the byte the board should send in response to a serial poll.
+ /*
+ * Sets the byte the board should send in response to a serial poll.
* This function should also start or stop requests for service via
* IEEE 488.2 reqt/reqf, based on MSS (bit 6 of the status_byte).
* If the more flexible serial_poll_response2 is implemented by the
@@ -149,8 +164,9 @@ struct gpib_interface_struct {
* by IEEE 488.2 section 11.3.3.4.3 "Allowed Coupled Control of
* STB, reqt, and reqf".
*/
- void (*serial_poll_response)(struct gpib_board *board, uint8_t status_byte);
- /* Sets the byte the board should send in response to a serial poll.
+ void (*serial_poll_response)(struct gpib_board *board, u8 status_byte);
+ /*
+ * Sets the byte the board should send in response to a serial poll.
* This function should also request service via IEEE 488.2 reqt/reqf
* based on MSS (bit 6 of the status_byte) and new_reason_for_service.
* reqt should be set true if new_reason_for_service is true,
@@ -164,11 +180,12 @@ struct gpib_interface_struct {
* If this method is left NULL by the driver, then the user library
* function ibrsv2 will not work.
*/
- void (*serial_poll_response2)(struct gpib_board *board, uint8_t status_byte,
+ void (*serial_poll_response2)(struct gpib_board *board, u8 status_byte,
int new_reason_for_service);
- /* returns the byte the board will send in response to a serial poll.
+ /*
+ * returns the byte the board will send in response to a serial poll.
*/
- uint8_t (*serial_poll_status)(struct gpib_board *board);
+ u8 (*serial_poll_status)(struct gpib_board *board);
/* adjust T1 delay */
int (*t1_delay)(struct gpib_board *board, unsigned int nano_sec);
/* go to local mode */
@@ -179,14 +196,14 @@ struct gpib_interface_struct {
unsigned skip_check_for_command_acceptors : 1;
};
-typedef struct {
+struct gpib_event_queue {
struct list_head event_head;
spinlock_t lock; // for access to event list
unsigned int num_events;
unsigned dropped_event : 1;
-} gpib_event_queue_t;
+};
-static inline void init_event_queue(gpib_event_queue_t *queue)
+static inline void init_event_queue(struct gpib_event_queue *queue)
{
INIT_LIST_HEAD(&queue->event_head);
queue->num_events = 0;
@@ -210,20 +227,22 @@ static inline void init_gpib_pseudo_irq(struct gpib_pseudo_irq *pseudo_irq)
}
/* list so we can make a linked list of drivers */
-typedef struct gpib_interface_list_struct {
+struct gpib_interface_list {
struct list_head list;
- gpib_interface_t *interface;
+ struct gpib_interface *interface;
struct module *module;
-} gpib_interface_list_t;
+};
-/* One struct gpib_board is allocated for each physical board in the computer.
+/*
+ * One struct gpib_board is allocated for each physical board in the computer.
* It provides storage for variables local to each board, and interface
* functions for performing operations on the board
*/
struct gpib_board {
/* functions used by this board */
- gpib_interface_t *interface;
- /* Pointer to module whose use count we should increment when
+ struct gpib_interface *interface;
+ /*
+ * Pointer to module whose use count we should increment when
* interface is in use
*/
struct module *provider_module;
@@ -231,20 +250,24 @@ struct gpib_board {
u8 *buffer;
/* length of buffer */
unsigned int buffer_length;
- /* Used to hold the board's current status (see update_status() above)
+ /*
+ * Used to hold the board's current status (see update_status() above)
*/
unsigned long status;
- /* Driver should only sleep on this wait queue. It is special in that the
+ /*
+ * Driver should only sleep on this wait queue. It is special in that the
* core will wake this queue and set the TIMO bit in 'status' when the
* watchdog timer times out.
*/
wait_queue_head_t wait;
- /* Lock that only allows one process to access this board at a time.
+ /*
+ * Lock that only allows one process to access this board at a time.
* Has to be first in any locking order, since it can be locked over
* multiple ioctls.
*/
struct mutex user_mutex;
- /* Mutex which compensates for removal of "big kernel lock" from kernel.
+ /*
+ * Mutex which compensates for removal of "big kernel lock" from kernel.
* Should not be held for extended waits.
*/
struct mutex big_gpib_mutex;
@@ -259,7 +282,8 @@ struct gpib_board {
struct device *dev;
/* gpib_common device gpibN */
struct device *gpib_dev;
- /* 'private_data' can be used as seen fit by the driver to
+ /*
+ * 'private_data' can be used as seen fit by the driver to
* store additional variables for this board
*/
void *private_data;
@@ -284,34 +308,36 @@ struct gpib_board {
/* autospoll kernel thread */
struct task_struct *autospoll_task;
/* queue for recording received trigger/clear/ifc events */
- gpib_event_queue_t event_queue;
+ struct gpib_event_queue event_queue;
/* minor number for this board's device file */
int minor;
/* struct to deal with polling mode*/
struct gpib_pseudo_irq pseudo_irq;
/* error dong autopoll */
atomic_t stuck_srq;
- gpib_board_config_t config;
+ struct gpib_board_config config;
/* Flag that indicates whether board is system controller of the bus */
unsigned master : 1;
/* individual status bit */
unsigned ist : 1;
- /* one means local parallel poll mode ieee 488.1 PP2 (or no parallel poll PP0),
+ /*
+ * one means local parallel poll mode ieee 488.1 PP2 (or no parallel poll PP0),
* zero means remote parallel poll configuration mode ieee 488.1 PP1
*/
unsigned local_ppoll_mode : 1;
};
/* element of event queue */
-typedef struct {
+struct gpib_event {
struct list_head list;
short event_type;
-} gpib_event_t;
+};
-/* Each board has a list of gpib_status_queue_t to keep track of all open devices
+/*
+ * Each board has a list of gpib_status_queue to keep track of all open devices
* on the bus, so we know what address to poll when we get a service request
*/
-typedef struct {
+struct gpib_status_queue {
/* list_head so we can make a linked list of devices */
struct list_head list;
unsigned int pad; /* primary gpib address */
@@ -323,31 +349,31 @@ typedef struct {
unsigned int reference_count;
/* flags loss of status byte error due to limit on size of queue */
unsigned dropped_byte : 1;
-} gpib_status_queue_t;
+};
-typedef struct {
+struct gpib_status_byte {
struct list_head list;
u8 poll_byte;
-} status_byte_t;
+};
-void init_gpib_status_queue(gpib_status_queue_t *device);
+void init_gpib_status_queue(struct gpib_status_queue *device);
/* Used to store device-descriptor-specific information */
-typedef struct {
+struct gpib_descriptor {
unsigned int pad; /* primary gpib address */
int sad; /* secondary gpib address (negative means disabled) */
atomic_t io_in_progress;
unsigned is_board : 1;
unsigned autopoll_enabled : 1;
-} gpib_descriptor_t;
+};
-typedef struct {
+struct gpib_file_private {
atomic_t holding_mutex;
- gpib_descriptor_t *descriptors[GPIB_MAX_NUM_DESCRIPTORS];
+ struct gpib_descriptor *descriptors[GPIB_MAX_NUM_DESCRIPTORS];
/* locked while descriptors are being allocated/deallocated */
struct mutex descriptors_mutex;
unsigned got_module : 1;
-} gpib_file_private_t;
+};
#endif /* __KERNEL__ */
diff --git a/drivers/staging/gpib/include/nec7210.h b/drivers/staging/gpib/include/nec7210.h
index 069896456230..312217b4580e 100644
--- a/drivers/staging/gpib/include/nec7210.h
+++ b/drivers/staging/gpib/include/nec7210.h
@@ -1,4 +1,4 @@
-//* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 */
/***************************************************************************
* copyright : (C) 2002 by Frank Mori Hess
@@ -78,19 +78,19 @@ enum {
};
// interface functions
-int nec7210_read(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
+int nec7210_read(struct gpib_board *board, struct nec7210_priv *priv, u8 *buffer,
size_t length, int *end, size_t *bytes_read);
-int nec7210_write(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
+int nec7210_write(struct gpib_board *board, struct nec7210_priv *priv, u8 *buffer,
size_t length, int send_eoi, size_t *bytes_written);
-int nec7210_command(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
+int nec7210_command(struct gpib_board *board, struct nec7210_priv *priv, u8 *buffer,
size_t length, size_t *bytes_written);
int nec7210_take_control(struct gpib_board *board, struct nec7210_priv *priv, int syncronous);
int nec7210_go_to_standby(struct gpib_board *board, struct nec7210_priv *priv);
-void nec7210_request_system_control(struct gpib_board *board,
- struct nec7210_priv *priv, int request_control);
+int nec7210_request_system_control(struct gpib_board *board,
+ struct nec7210_priv *priv, int request_control);
void nec7210_interface_clear(struct gpib_board *board, struct nec7210_priv *priv, int assert);
void nec7210_remote_enable(struct gpib_board *board, struct nec7210_priv *priv, int enable);
-int nec7210_enable_eos(struct gpib_board *board, struct nec7210_priv *priv, uint8_t eos_bytes,
+int nec7210_enable_eos(struct gpib_board *board, struct nec7210_priv *priv, u8 eos_bytes,
int compare_8_bits);
void nec7210_disable_eos(struct gpib_board *board, struct nec7210_priv *priv);
unsigned int nec7210_update_status(struct gpib_board *board, struct nec7210_priv *priv,
@@ -100,14 +100,14 @@ int nec7210_primary_address(const struct gpib_board *board,
struct nec7210_priv *priv, unsigned int address);
int nec7210_secondary_address(const struct gpib_board *board, struct nec7210_priv *priv,
unsigned int address, int enable);
-int nec7210_parallel_poll(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *result);
-void nec7210_serial_poll_response(struct gpib_board *board, struct nec7210_priv *priv, uint8_t status);
+int nec7210_parallel_poll(struct gpib_board *board, struct nec7210_priv *priv, u8 *result);
+void nec7210_serial_poll_response(struct gpib_board *board,
+ struct nec7210_priv *priv, u8 status);
void nec7210_parallel_poll_configure(struct gpib_board *board,
struct nec7210_priv *priv, unsigned int configuration);
void nec7210_parallel_poll_response(struct gpib_board *board,
struct nec7210_priv *priv, int ist);
-uint8_t nec7210_serial_poll_status(struct gpib_board *board,
- struct nec7210_priv *priv);
+u8 nec7210_serial_poll_status(struct gpib_board *board, struct nec7210_priv *priv);
int nec7210_t1_delay(struct gpib_board *board,
struct nec7210_priv *priv, unsigned int nano_sec);
void nec7210_return_to_local(const struct gpib_board *board, struct nec7210_priv *priv);
@@ -119,18 +119,18 @@ unsigned int nec7210_set_reg_bits(struct nec7210_priv *priv, unsigned int reg,
unsigned int mask, unsigned int bits);
void nec7210_set_handshake_mode(struct gpib_board *board, struct nec7210_priv *priv, int mode);
void nec7210_release_rfd_holdoff(struct gpib_board *board, struct nec7210_priv *priv);
-uint8_t nec7210_read_data_in(struct gpib_board *board, struct nec7210_priv *priv, int *end);
+u8 nec7210_read_data_in(struct gpib_board *board, struct nec7210_priv *priv, int *end);
// wrappers for io functions
-uint8_t nec7210_ioport_read_byte(struct nec7210_priv *priv, unsigned int register_num);
-void nec7210_ioport_write_byte(struct nec7210_priv *priv, uint8_t data, unsigned int register_num);
-uint8_t nec7210_iomem_read_byte(struct nec7210_priv *priv, unsigned int register_num);
-void nec7210_iomem_write_byte(struct nec7210_priv *priv, uint8_t data, unsigned int register_num);
-uint8_t nec7210_locking_ioport_read_byte(struct nec7210_priv *priv, unsigned int register_num);
-void nec7210_locking_ioport_write_byte(struct nec7210_priv *priv, uint8_t data,
+u8 nec7210_ioport_read_byte(struct nec7210_priv *priv, unsigned int register_num);
+void nec7210_ioport_write_byte(struct nec7210_priv *priv, u8 data, unsigned int register_num);
+u8 nec7210_iomem_read_byte(struct nec7210_priv *priv, unsigned int register_num);
+void nec7210_iomem_write_byte(struct nec7210_priv *priv, u8 data, unsigned int register_num);
+u8 nec7210_locking_ioport_read_byte(struct nec7210_priv *priv, unsigned int register_num);
+void nec7210_locking_ioport_write_byte(struct nec7210_priv *priv, u8 data,
unsigned int register_num);
-uint8_t nec7210_locking_iomem_read_byte(struct nec7210_priv *priv, unsigned int register_num);
-void nec7210_locking_iomem_write_byte(struct nec7210_priv *priv, uint8_t data,
+u8 nec7210_locking_iomem_read_byte(struct nec7210_priv *priv, unsigned int register_num);
+void nec7210_locking_iomem_write_byte(struct nec7210_priv *priv, u8 data,
unsigned int register_num);
// interrupt service routine
diff --git a/drivers/staging/gpib/include/nec7210_registers.h b/drivers/staging/gpib/include/nec7210_registers.h
index 888803dd97f9..97c53ac8e805 100644
--- a/drivers/staging/gpib/include/nec7210_registers.h
+++ b/drivers/staging/gpib/include/nec7210_registers.h
@@ -17,7 +17,8 @@ enum nec7210_chipset {
TNT5004, // NI (minor differences to TNT4882)
};
-/* nec7210 register numbers (might need to be multiplied by
+/*
+ * nec7210 register numbers (might need to be multiplied by
* a board-dependent offset to get actually io address offset)
*/
// write registers
diff --git a/drivers/staging/gpib/include/tms9914.h b/drivers/staging/gpib/include/tms9914.h
index 424c95ad85c6..50a9d3b22619 100644
--- a/drivers/staging/gpib/include/tms9914.h
+++ b/drivers/staging/gpib/include/tms9914.h
@@ -1,4 +1,4 @@
-//* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 */
/***************************************************************************
* copyright : (C) 2002 by Frank Mori Hess
@@ -79,24 +79,25 @@ enum {
};
// interface functions
-int tms9914_read(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
+int tms9914_read(struct gpib_board *board, struct tms9914_priv *priv, u8 *buffer,
size_t length, int *end, size_t *bytes_read);
-int tms9914_write(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
+int tms9914_write(struct gpib_board *board, struct tms9914_priv *priv, u8 *buffer,
size_t length, int send_eoi, size_t *bytes_written);
-int tms9914_command(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
+int tms9914_command(struct gpib_board *board, struct tms9914_priv *priv, u8 *buffer,
size_t length, size_t *bytes_written);
int tms9914_take_control(struct gpib_board *board, struct tms9914_priv *priv, int syncronous);
-/* alternate version of tms9914_take_control which works around buggy tcs
+/*
+ * alternate version of tms9914_take_control which works around buggy tcs
* implementation.
*/
int tms9914_take_control_workaround(struct gpib_board *board, struct tms9914_priv *priv,
int syncronous);
int tms9914_go_to_standby(struct gpib_board *board, struct tms9914_priv *priv);
-void tms9914_request_system_control(struct gpib_board *board, struct tms9914_priv *priv,
- int request_control);
+int tms9914_request_system_control(struct gpib_board *board, struct tms9914_priv *priv,
+ int request_control);
void tms9914_interface_clear(struct gpib_board *board, struct tms9914_priv *priv, int assert);
void tms9914_remote_enable(struct gpib_board *board, struct tms9914_priv *priv, int enable);
-int tms9914_enable_eos(struct gpib_board *board, struct tms9914_priv *priv, uint8_t eos_bytes,
+int tms9914_enable_eos(struct gpib_board *board, struct tms9914_priv *priv, u8 eos_bytes,
int compare_8_bits);
void tms9914_disable_eos(struct gpib_board *board, struct tms9914_priv *priv);
unsigned int tms9914_update_status(struct gpib_board *board, struct tms9914_priv *priv,
@@ -105,13 +106,14 @@ int tms9914_primary_address(struct gpib_board *board,
struct tms9914_priv *priv, unsigned int address);
int tms9914_secondary_address(struct gpib_board *board, struct tms9914_priv *priv,
unsigned int address, int enable);
-int tms9914_parallel_poll(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *result);
+int tms9914_parallel_poll(struct gpib_board *board, struct tms9914_priv *priv, u8 *result);
void tms9914_parallel_poll_configure(struct gpib_board *board,
- struct tms9914_priv *priv, uint8_t config);
+ struct tms9914_priv *priv, u8 config);
void tms9914_parallel_poll_response(struct gpib_board *board,
struct tms9914_priv *priv, int ist);
-void tms9914_serial_poll_response(struct gpib_board *board, struct tms9914_priv *priv, uint8_t status);
-uint8_t tms9914_serial_poll_status(struct gpib_board *board, struct tms9914_priv *priv);
+void tms9914_serial_poll_response(struct gpib_board *board,
+ struct tms9914_priv *priv, u8 status);
+u8 tms9914_serial_poll_status(struct gpib_board *board, struct tms9914_priv *priv);
int tms9914_line_status(const struct gpib_board *board, struct tms9914_priv *priv);
unsigned int tms9914_t1_delay(struct gpib_board *board, struct tms9914_priv *priv,
unsigned int nano_sec);
@@ -124,10 +126,10 @@ void tms9914_release_holdoff(struct tms9914_priv *priv);
void tms9914_set_holdoff_mode(struct tms9914_priv *priv, enum tms9914_holdoff_mode mode);
// wrappers for io functions
-uint8_t tms9914_ioport_read_byte(struct tms9914_priv *priv, unsigned int register_num);
-void tms9914_ioport_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num);
-uint8_t tms9914_iomem_read_byte(struct tms9914_priv *priv, unsigned int register_num);
-void tms9914_iomem_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num);
+u8 tms9914_ioport_read_byte(struct tms9914_priv *priv, unsigned int register_num);
+void tms9914_ioport_write_byte(struct tms9914_priv *priv, u8 data, unsigned int register_num);
+u8 tms9914_iomem_read_byte(struct tms9914_priv *priv, unsigned int register_num);
+void tms9914_iomem_write_byte(struct tms9914_priv *priv, u8 data, unsigned int register_num);
// interrupt service routine
irqreturn_t tms9914_interrupt(struct gpib_board *board, struct tms9914_priv *priv);
@@ -139,7 +141,8 @@ enum {
ms9914_num_registers = 8,
};
-/* tms9914 register numbers (might need to be multiplied by
+/*
+ * tms9914 register numbers (might need to be multiplied by
* a board-dependent offset to get actually io address offset)
*/
// write registers
diff --git a/drivers/staging/gpib/ines/ines.h b/drivers/staging/gpib/ines/ines.h
index ff27f055a0ff..f0210ce2470d 100644
--- a/drivers/staging/gpib/ines/ines.h
+++ b/drivers/staging/gpib/ines/ines.h
@@ -35,44 +35,6 @@ struct ines_priv {
u8 extend_mode_bits;
};
-// interface functions
-int ines_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read);
-int ines_write(struct gpib_board *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written);
-int ines_accel_read(struct gpib_board *board, uint8_t *buffer, size_t length,
- int *end, size_t *bytes_read);
-int ines_accel_write(struct gpib_board *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written);
-int ines_command(struct gpib_board *board, uint8_t *buffer, size_t length, size_t *bytes_written);
-int ines_take_control(struct gpib_board *board, int synchronous);
-int ines_go_to_standby(struct gpib_board *board);
-void ines_request_system_control(struct gpib_board *board, int request_control);
-void ines_interface_clear(struct gpib_board *board, int assert);
-void ines_remote_enable(struct gpib_board *board, int enable);
-int ines_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits);
-void ines_disable_eos(struct gpib_board *board);
-unsigned int ines_update_status(struct gpib_board *board, unsigned int clear_mask);
-int ines_primary_address(struct gpib_board *board, unsigned int address);
-int ines_secondary_address(struct gpib_board *board, unsigned int address, int enable);
-int ines_parallel_poll(struct gpib_board *board, uint8_t *result);
-void ines_parallel_poll_configure(struct gpib_board *board, uint8_t config);
-void ines_parallel_poll_response(struct gpib_board *board, int ist);
-void ines_serial_poll_response(struct gpib_board *board, uint8_t status);
-uint8_t ines_serial_poll_status(struct gpib_board *board);
-int ines_line_status(const struct gpib_board *board);
-int ines_t1_delay(struct gpib_board *board, unsigned int nano_sec);
-void ines_return_to_local(struct gpib_board *board);
-
-// interrupt service routines
-irqreturn_t ines_pci_interrupt(int irq, void *arg);
-irqreturn_t ines_interrupt(struct gpib_board *board);
-
-// utility functions
-void ines_free_private(struct gpib_board *board);
-int ines_generic_attach(struct gpib_board *board);
-void ines_online(struct ines_priv *priv, const struct gpib_board *board, int use_accel);
-void ines_set_xfer_counter(struct ines_priv *priv, unsigned int count);
-
/* inb/outb wrappers */
static inline unsigned int ines_inb(struct ines_priv *priv, unsigned int register_number)
{
@@ -87,11 +49,6 @@ static inline void ines_outb(struct ines_priv *priv, unsigned int value,
register_number * priv->nec7210_priv.offset);
}
-// pcmcia init/cleanup
-
-int ines_pcmcia_init_module(void);
-void ines_pcmcia_cleanup_module(void);
-
enum ines_regs {
// read
FIFO_STATUS = 0x8,
diff --git a/drivers/staging/gpib/ines/ines_gpib.c b/drivers/staging/gpib/ines/ines_gpib.c
index d93eb05dab90..c851fd014f48 100644
--- a/drivers/staging/gpib/ines/ines_gpib.c
+++ b/drivers/staging/gpib/ines/ines_gpib.c
@@ -25,7 +25,9 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver for Ines iGPIB 72010");
-int ines_line_status(const struct gpib_board *board)
+static irqreturn_t ines_interrupt(struct gpib_board *board);
+
+static int ines_line_status(const struct gpib_board *board)
{
int status = VALID_ALL;
int bcm_bits;
@@ -55,7 +57,7 @@ int ines_line_status(const struct gpib_board *board)
return status;
}
-void ines_set_xfer_counter(struct ines_priv *priv, unsigned int count)
+static void ines_set_xfer_counter(struct ines_priv *priv, unsigned int count)
{
if (count > 0xffff) {
pr_err("bug! tried to set xfer counter > 0xffff\n");
@@ -65,7 +67,7 @@ void ines_set_xfer_counter(struct ines_priv *priv, unsigned int count)
ines_outb(priv, count & 0xff, XFER_COUNT_LOWER);
}
-int ines_t1_delay(struct gpib_board *board, unsigned int nano_sec)
+static int ines_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct ines_priv *ines_priv = board->private_data;
struct nec7210_priv *nec_priv = &ines_priv->nec7210_priv;
@@ -95,7 +97,7 @@ static inline unsigned short num_in_fifo_bytes(struct ines_priv *ines_priv)
return ines_inb(ines_priv, IN_FIFO_COUNT);
}
-static ssize_t pio_read(struct gpib_board *board, struct ines_priv *ines_priv, uint8_t *buffer,
+static ssize_t pio_read(struct gpib_board *board, struct ines_priv *ines_priv, u8 *buffer,
size_t length, size_t *nbytes)
{
ssize_t retval = 0;
@@ -133,8 +135,8 @@ static ssize_t pio_read(struct gpib_board *board, struct ines_priv *ines_priv, u
return retval;
}
-int ines_accel_read(struct gpib_board *board, uint8_t *buffer,
- size_t length, int *end, size_t *bytes_read)
+static int ines_accel_read(struct gpib_board *board, u8 *buffer,
+ size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
struct ines_priv *ines_priv = board->private_data;
@@ -213,8 +215,8 @@ static int ines_write_wait(struct gpib_board *board, struct ines_priv *ines_priv
return 0;
}
-int ines_accel_write(struct gpib_board *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written)
+static int ines_accel_write(struct gpib_board *board, u8 *buffer, size_t length,
+ int send_eoi, size_t *bytes_written)
{
size_t count = 0;
ssize_t retval = 0;
@@ -264,7 +266,7 @@ int ines_accel_write(struct gpib_board *board, uint8_t *buffer, size_t length,
return retval;
}
-irqreturn_t ines_pci_interrupt(int irq, void *arg)
+static irqreturn_t ines_pci_interrupt(int irq, void *arg)
{
struct gpib_board *board = arg;
struct ines_priv *priv = board->private_data;
@@ -281,7 +283,7 @@ irqreturn_t ines_pci_interrupt(int irq, void *arg)
return ines_interrupt(board);
}
-irqreturn_t ines_interrupt(struct gpib_board *board)
+static irqreturn_t ines_interrupt(struct gpib_board *board)
{
struct ines_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -295,7 +297,7 @@ irqreturn_t ines_interrupt(struct gpib_board *board)
isr3_bits = ines_inb(priv, ISR3);
isr4_bits = ines_inb(priv, ISR4);
if (isr3_bits & IFC_ACTIVE_BIT) {
- push_gpib_event(board, EventIFC);
+ push_gpib_event(board, EVENT_IFC);
wake++;
}
if (isr3_bits & FIFO_ERROR_BIT)
@@ -313,9 +315,9 @@ irqreturn_t ines_interrupt(struct gpib_board *board)
return IRQ_HANDLED;
}
-static int ines_pci_attach(struct gpib_board *board, const gpib_board_config_t *config);
-static int ines_pci_accel_attach(struct gpib_board *board, const gpib_board_config_t *config);
-static int ines_isa_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static int ines_pci_attach(struct gpib_board *board, const struct gpib_board_config *config);
+static int ines_pci_accel_attach(struct gpib_board *board, const struct gpib_board_config *config);
+static int ines_isa_attach(struct gpib_board *board, const struct gpib_board_config *config);
static void ines_pci_detach(struct gpib_board *board);
static void ines_isa_detach(struct gpib_board *board);
@@ -393,8 +395,8 @@ static struct ines_pci_id pci_ids[] = {
static const int num_pci_chips = ARRAY_SIZE(pci_ids);
// wrappers for interface functions
-int ines_read(struct gpib_board *board, uint8_t *buffer, size_t length,
- int *end, size_t *bytes_read)
+static int ines_read(struct gpib_board *board, u8 *buffer, size_t length,
+ int *end, size_t *bytes_read)
{
struct ines_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -412,134 +414,134 @@ int ines_read(struct gpib_board *board, uint8_t *buffer, size_t length,
return retval;
}
-int ines_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
+static int ines_write(struct gpib_board *board, u8 *buffer, size_t length, int send_eoi,
+ size_t *bytes_written)
{
struct ines_priv *priv = board->private_data;
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-int ines_command(struct gpib_board *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int ines_command(struct gpib_board *board, u8 *buffer, size_t length, size_t *bytes_written)
{
struct ines_priv *priv = board->private_data;
return nec7210_command(board, &priv->nec7210_priv, buffer, length, bytes_written);
}
-int ines_take_control(struct gpib_board *board, int synchronous)
+static int ines_take_control(struct gpib_board *board, int synchronous)
{
struct ines_priv *priv = board->private_data;
return nec7210_take_control(board, &priv->nec7210_priv, synchronous);
}
-int ines_go_to_standby(struct gpib_board *board)
+static int ines_go_to_standby(struct gpib_board *board)
{
struct ines_priv *priv = board->private_data;
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-void ines_request_system_control(struct gpib_board *board, int request_control)
+static int ines_request_system_control(struct gpib_board *board, int request_control)
{
struct ines_priv *priv = board->private_data;
- nec7210_request_system_control(board, &priv->nec7210_priv, request_control);
+ return nec7210_request_system_control(board, &priv->nec7210_priv, request_control);
}
-void ines_interface_clear(struct gpib_board *board, int assert)
+static void ines_interface_clear(struct gpib_board *board, int assert)
{
struct ines_priv *priv = board->private_data;
nec7210_interface_clear(board, &priv->nec7210_priv, assert);
}
-void ines_remote_enable(struct gpib_board *board, int enable)
+static void ines_remote_enable(struct gpib_board *board, int enable)
{
struct ines_priv *priv = board->private_data;
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-int ines_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
+static int ines_enable_eos(struct gpib_board *board, u8 eos_byte, int compare_8_bits)
{
struct ines_priv *priv = board->private_data;
return nec7210_enable_eos(board, &priv->nec7210_priv, eos_byte, compare_8_bits);
}
-void ines_disable_eos(struct gpib_board *board)
+static void ines_disable_eos(struct gpib_board *board)
{
struct ines_priv *priv = board->private_data;
nec7210_disable_eos(board, &priv->nec7210_priv);
}
-unsigned int ines_update_status(struct gpib_board *board, unsigned int clear_mask)
+static unsigned int ines_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct ines_priv *priv = board->private_data;
return nec7210_update_status(board, &priv->nec7210_priv, clear_mask);
}
-int ines_primary_address(struct gpib_board *board, unsigned int address)
+static int ines_primary_address(struct gpib_board *board, unsigned int address)
{
struct ines_priv *priv = board->private_data;
return nec7210_primary_address(board, &priv->nec7210_priv, address);
}
-int ines_secondary_address(struct gpib_board *board, unsigned int address, int enable)
+static int ines_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct ines_priv *priv = board->private_data;
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-int ines_parallel_poll(struct gpib_board *board, uint8_t *result)
+static int ines_parallel_poll(struct gpib_board *board, u8 *result)
{
struct ines_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-void ines_parallel_poll_configure(struct gpib_board *board, uint8_t config)
+static void ines_parallel_poll_configure(struct gpib_board *board, u8 config)
{
struct ines_priv *priv = board->private_data;
nec7210_parallel_poll_configure(board, &priv->nec7210_priv, config);
}
-void ines_parallel_poll_response(struct gpib_board *board, int ist)
+static void ines_parallel_poll_response(struct gpib_board *board, int ist)
{
struct ines_priv *priv = board->private_data;
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-void ines_serial_poll_response(struct gpib_board *board, uint8_t status)
+static void ines_serial_poll_response(struct gpib_board *board, u8 status)
{
struct ines_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-uint8_t ines_serial_poll_status(struct gpib_board *board)
+static u8 ines_serial_poll_status(struct gpib_board *board)
{
struct ines_priv *priv = board->private_data;
return nec7210_serial_poll_status(board, &priv->nec7210_priv);
}
-void ines_return_to_local(struct gpib_board *board)
+static void ines_return_to_local(struct gpib_board *board)
{
struct ines_priv *priv = board->private_data;
nec7210_return_to_local(board, &priv->nec7210_priv);
}
-static gpib_interface_t ines_pci_unaccel_interface = {
+static struct gpib_interface ines_pci_unaccel_interface = {
.name = "ines_pci_unaccel",
.attach = ines_pci_attach,
.detach = ines_pci_detach,
@@ -567,7 +569,7 @@ static gpib_interface_t ines_pci_unaccel_interface = {
.return_to_local = ines_return_to_local,
};
-static gpib_interface_t ines_pci_interface = {
+static struct gpib_interface ines_pci_interface = {
.name = "ines_pci",
.attach = ines_pci_accel_attach,
.detach = ines_pci_detach,
@@ -595,7 +597,7 @@ static gpib_interface_t ines_pci_interface = {
.return_to_local = ines_return_to_local,
};
-static gpib_interface_t ines_pci_accel_interface = {
+static struct gpib_interface ines_pci_accel_interface = {
.name = "ines_pci_accel",
.attach = ines_pci_accel_attach,
.detach = ines_pci_detach,
@@ -623,7 +625,7 @@ static gpib_interface_t ines_pci_accel_interface = {
.return_to_local = ines_return_to_local,
};
-static gpib_interface_t ines_isa_interface = {
+static struct gpib_interface ines_isa_interface = {
.name = "ines_isa",
.attach = ines_isa_attach,
.detach = ines_isa_detach,
@@ -664,13 +666,13 @@ static int ines_allocate_private(struct gpib_board *board)
return 0;
}
-void ines_free_private(struct gpib_board *board)
+static void ines_free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
}
-int ines_generic_attach(struct gpib_board *board)
+static int ines_generic_attach(struct gpib_board *board)
{
struct ines_priv *ines_priv;
struct nec7210_priv *nec_priv;
@@ -690,7 +692,7 @@ int ines_generic_attach(struct gpib_board *board)
return 0;
}
-void ines_online(struct ines_priv *ines_priv, const struct gpib_board *board, int use_accel)
+static void ines_online(struct ines_priv *ines_priv, const struct gpib_board *board, int use_accel)
{
struct nec7210_priv *nec_priv = &ines_priv->nec7210_priv;
@@ -724,7 +726,7 @@ void ines_online(struct ines_priv *ines_priv, const struct gpib_board *board, in
nec7210_set_reg_bits(nec_priv, IMR1, HR_DOIE | HR_DIIE, 0);
}
-static int ines_common_pci_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int ines_common_pci_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
struct ines_priv *ines_priv;
struct nec7210_priv *nec_priv;
@@ -852,7 +854,7 @@ static int ines_common_pci_attach(struct gpib_board *board, const gpib_board_con
return 0;
}
-int ines_pci_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int ines_pci_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
struct ines_priv *ines_priv;
int retval;
@@ -867,7 +869,7 @@ int ines_pci_attach(struct gpib_board *board, const gpib_board_config_t *config)
return 0;
}
-int ines_pci_accel_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int ines_pci_accel_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
struct ines_priv *ines_priv;
int retval;
@@ -884,7 +886,7 @@ int ines_pci_accel_attach(struct gpib_board *board, const gpib_board_config_t *c
static const int ines_isa_iosize = 0x20;
-int ines_isa_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int ines_isa_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
struct ines_priv *ines_priv;
struct nec7210_priv *nec_priv;
@@ -915,7 +917,7 @@ int ines_isa_attach(struct gpib_board *board, const gpib_board_config_t *config)
return 0;
}
-void ines_pci_detach(struct gpib_board *board)
+static void ines_pci_detach(struct gpib_board *board)
{
struct ines_priv *ines_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -949,7 +951,7 @@ void ines_pci_detach(struct gpib_board *board)
ines_free_private(board);
}
-void ines_isa_detach(struct gpib_board *board)
+static void ines_isa_detach(struct gpib_board *board)
{
struct ines_priv *ines_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -990,48 +992,49 @@ static struct pci_driver ines_pci_driver = {
static const int ines_pcmcia_iosize = 0x20;
-/* The event() function is this driver's Card Services event handler.
- * It will be called by Card Services when an appropriate card status
- * event is received. The config() and release() entry points are
- * used to configure or release a socket, in response to card insertion
- * and ejection events. They are invoked from the gpib event
- * handler.
+/*
+ * The event() function is this driver's Card Services event handler.
+ * It will be called by Card Services when an appropriate card status
+ * event is received. The config() and release() entry points are
+ * used to configure or release a socket, in response to card insertion
+ * and ejection events. They are invoked from the gpib event
+ * handler.
*/
static int ines_gpib_config(struct pcmcia_device *link);
static void ines_gpib_release(struct pcmcia_device *link);
-static int ines_pcmcia_attach(struct gpib_board *board, const gpib_board_config_t *config);
-static int ines_pcmcia_accel_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static int ines_pcmcia_attach(struct gpib_board *board, const struct gpib_board_config *config);
+static int ines_pcmcia_accel_attach(struct gpib_board *board,
+ const struct gpib_board_config *config);
static void ines_pcmcia_detach(struct gpib_board *board);
-static irqreturn_t ines_pcmcia_interrupt(int irq, void *arg);
static int ines_common_pcmcia_attach(struct gpib_board *board);
/*
* A linked list of "instances" of the gpib device. Each actual
- * PCMCIA card corresponds to one device instance, and is described
- * by one dev_link_t structure (defined in ds.h).
+ * PCMCIA card corresponds to one device instance, and is described
+ * by one dev_link_t structure (defined in ds.h).
*
- * You may not want to use a linked list for this -- for example, the
- * memory card driver uses an array of dev_link_t pointers, where minor
- * device numbers are used to derive the corresponding array index.
+ * You may not want to use a linked list for this -- for example, the
+ * memory card driver uses an array of dev_link_t pointers, where minor
+ * device numbers are used to derive the corresponding array index.
*/
static struct pcmcia_device *curr_dev;
/*
- * A dev_link_t structure has fields for most things that are needed
- * to keep track of a socket, but there will usually be some device
- * specific information that also needs to be kept track of. The
- * 'priv' pointer in a dev_link_t structure can be used to point to
- * a device-specific private data structure, like this.
+ * A dev_link_t structure has fields for most things that are needed
+ * to keep track of a socket, but there will usually be some device
+ * specific information that also needs to be kept track of. The
+ * 'priv' pointer in a dev_link_t structure can be used to point to
+ * a device-specific private data structure, like this.
*
- * A driver needs to provide a dev_node_t structure for each device
- * on a card. In some cases, there is only one device per card (for
- * example, ethernet cards, modems). In other cases, there may be
- * many actual or logical devices (SCSI adapters, memory cards with
- * multiple partitions). The dev_node_t structures need to be kept
- * in a linked list starting at the 'dev' field of a dev_link_t
- * structure. We allocate them in the card's private data structure,
- * because they generally can't be allocated dynamically.
+ * A driver needs to provide a dev_node_t structure for each device
+ * on a card. In some cases, there is only one device per card (for
+ * example, ethernet cards, modems). In other cases, there may be
+ * many actual or logical devices (SCSI adapters, memory cards with
+ * multiple partitions). The dev_node_t structures need to be kept
+ * in a linked list starting at the 'dev' field of a dev_link_t
+ * structure. We allocate them in the card's private data structure,
+ * because they generally can't be allocated dynamically.
*/
struct local_info {
@@ -1042,13 +1045,13 @@ struct local_info {
};
/*
- * gpib_attach() creates an "instance" of the driver, allocating
- * local data structures for one device. The device is registered
- * with Card Services.
+ * gpib_attach() creates an "instance" of the driver, allocating
+ * local data structures for one device. The device is registered
+ * with Card Services.
*
- * The dev_link structure is initialized, but we don't actually
- * configure the card at this point -- we wait until we receive a
- * card insertion event.
+ * The dev_link structure is initialized, but we don't actually
+ * configure the card at this point -- we wait until we receive a
+ * card insertion event.
*/
static int ines_gpib_probe(struct pcmcia_device *link)
{
@@ -1079,10 +1082,10 @@ static int ines_gpib_probe(struct pcmcia_device *link)
}
/*
- * This deletes a driver "instance". The device is de-registered
- * with Card Services. If it has been released, all local data
- * structures are freed. Otherwise, the structures will be freed
- * when the device is released.
+ * This deletes a driver "instance". The device is de-registered
+ * with Card Services. If it has been released, all local data
+ * structures are freed. Otherwise, the structures will be freed
+ * when the device is released.
*/
static void ines_gpib_remove(struct pcmcia_device *link)
{
@@ -1103,18 +1106,15 @@ static int ines_gpib_config_iteration(struct pcmcia_device *link, void *priv_dat
}
/*
- * gpib_config() is scheduled to run after a CARD_INSERTION event
- * is received, to configure the PCMCIA socket, and to make the
- * device available to the system.
+ * gpib_config() is scheduled to run after a CARD_INSERTION event
+ * is received, to configure the PCMCIA socket, and to make the
+ * device available to the system.
*/
static int ines_gpib_config(struct pcmcia_device *link)
{
- struct local_info *dev;
int retval;
void __iomem *virt;
- dev = link->priv;
-
retval = pcmcia_loop_config(link, &ines_gpib_config_iteration, NULL);
if (retval) {
dev_warn(&link->dev, "no configuration found\n");
@@ -1125,8 +1125,9 @@ static int ines_gpib_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "ines_cs: manufacturer: 0x%x card: 0x%x\n",
link->manf_id, link->card_id);
- /* for the ines card we have to setup the configuration registers in
- * attribute memory here
+ /*
+ * for the ines card we have to setup the configuration registers in
+ * attribute memory here
*/
link->resource[2]->flags |= WIN_MEMORY_TYPE_AM | WIN_DATA_WIDTH_8 | WIN_ENABLE;
link->resource[2]->end = 0x1000;
@@ -1159,9 +1160,9 @@ static int ines_gpib_config(struct pcmcia_device *link)
} /* gpib_config */
/*
- * After a card is removed, gpib_release() will unregister the net
- * device, and release the PCMCIA configuration. If the device is
- * still open, this will be postponed until it is closed.
+ * After a card is removed, gpib_release() will unregister the net
+ * device, and release the PCMCIA configuration. If the device is
+ * still open, this will be postponed until it is closed.
*/
static void ines_gpib_release(struct pcmcia_device *link)
@@ -1210,12 +1211,12 @@ static struct pcmcia_driver ines_gpib_cs_driver = {
.resume = ines_gpib_resume,
};
-void ines_pcmcia_cleanup_module(void)
+static void ines_pcmcia_cleanup_module(void)
{
pcmcia_unregister_driver(&ines_gpib_cs_driver);
}
-static gpib_interface_t ines_pcmcia_unaccel_interface = {
+static struct gpib_interface ines_pcmcia_unaccel_interface = {
.name = "ines_pcmcia_unaccel",
.attach = ines_pcmcia_attach,
.detach = ines_pcmcia_detach,
@@ -1243,7 +1244,7 @@ static gpib_interface_t ines_pcmcia_unaccel_interface = {
.return_to_local = ines_return_to_local,
};
-static gpib_interface_t ines_pcmcia_accel_interface = {
+static struct gpib_interface ines_pcmcia_accel_interface = {
.name = "ines_pcmcia_accel",
.attach = ines_pcmcia_accel_attach,
.detach = ines_pcmcia_detach,
@@ -1271,7 +1272,7 @@ static gpib_interface_t ines_pcmcia_accel_interface = {
.return_to_local = ines_return_to_local,
};
-static gpib_interface_t ines_pcmcia_interface = {
+static struct gpib_interface ines_pcmcia_interface = {
.name = "ines_pcmcia",
.attach = ines_pcmcia_accel_attach,
.detach = ines_pcmcia_detach,
@@ -1299,14 +1300,14 @@ static gpib_interface_t ines_pcmcia_interface = {
.return_to_local = ines_return_to_local,
};
-irqreturn_t ines_pcmcia_interrupt(int irq, void *arg)
+static irqreturn_t ines_pcmcia_interrupt(int irq, void *arg)
{
struct gpib_board *board = arg;
return ines_interrupt(board);
}
-int ines_common_pcmcia_attach(struct gpib_board *board)
+static int ines_common_pcmcia_attach(struct gpib_board *board)
{
struct ines_priv *ines_priv;
struct nec7210_priv *nec_priv;
@@ -1345,7 +1346,7 @@ int ines_common_pcmcia_attach(struct gpib_board *board)
return 0;
}
-int ines_pcmcia_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int ines_pcmcia_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
struct ines_priv *ines_priv;
int retval;
@@ -1360,7 +1361,8 @@ int ines_pcmcia_attach(struct gpib_board *board, const gpib_board_config_t *conf
return 0;
}
-int ines_pcmcia_accel_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int ines_pcmcia_accel_attach(struct gpib_board *board,
+ const struct gpib_board_config *config)
{
struct ines_priv *ines_priv;
int retval;
@@ -1375,7 +1377,7 @@ int ines_pcmcia_accel_attach(struct gpib_board *board, const gpib_board_config_t
return 0;
}
-void ines_pcmcia_detach(struct gpib_board *board)
+static void ines_pcmcia_detach(struct gpib_board *board)
{
struct ines_priv *ines_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1484,7 +1486,7 @@ static void __exit ines_exit_module(void)
gpib_unregister_driver(&ines_pci_unaccel_interface);
gpib_unregister_driver(&ines_pci_accel_interface);
gpib_unregister_driver(&ines_isa_interface);
-#ifdef GPIB__PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
gpib_unregister_driver(&ines_pcmcia_interface);
gpib_unregister_driver(&ines_pcmcia_unaccel_interface);
gpib_unregister_driver(&ines_pcmcia_accel_interface);
diff --git a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c b/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
index faf96e9cc4a1..3cf5037c0cd2 100644
--- a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
+++ b/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
@@ -36,16 +36,16 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver for LPVO usb devices");
/*
- * Table of devices that work with this driver.
+ * Table of devices that work with this driver.
*
- * Currently, only one device is known to be used in the
- * lpvo_usb_gpib adapter (FTDI 0403:6001).
- * If your adapter uses a different chip, insert a line
- * in the following table with proper <Vendor-id>, <Product-id>.
+ * Currently, only one device is known to be used in the
+ * lpvo_usb_gpib adapter (FTDI 0403:6001).
+ * If your adapter uses a different chip, insert a line
+ * in the following table with proper <Vendor-id>, <Product-id>.
*
- * To have your chip automatically handled by the driver,
- * update files "/usr/local/etc/modprobe.d/lpvo_usb_gpib.conf"
- * and /usr/local/etc/udev/rules.d/99-lpvo_usb_gpib.rules.
+ * To have your chip automatically handled by the driver,
+ * update files "/usr/local/etc/modprobe.d/lpvo_usb_gpib.conf"
+ * and /usr/local/etc/udev/rules.d/99-lpvo_usb_gpib.rules.
*
*/
@@ -56,18 +56,18 @@ static const struct usb_device_id skel_table[] = {
MODULE_DEVICE_TABLE(usb, skel_table);
/*
- * *** Diagnostics and Debug ***
- * To enable the diagnostic and debug messages either compile with DEBUG set
- * or control via the dynamic debug mechanisms.
- * The module parameter "debug" controls the sending of debug messages to
- * syslog. By default it is set to 0
- * debug = 0: only attach/detach messages are sent
- * 1: every action is logged
- * 2: extended logging; each single exchanged byte is documented
- * (about twice the log volume of [1])
- * To switch debug level:
- * At module loading: modprobe lpvo_usb_gpib debug={0,1,2}
- * On the fly: echo {0,1,2} > /sys/modules/lpvo_usb_gpib/parameters/debug
+ * *** Diagnostics and Debug ***
+ * To enable the diagnostic and debug messages either compile with DEBUG set
+ * or control via the dynamic debug mechanisms.
+ * The module parameter "debug" controls the sending of debug messages to
+ * syslog. By default it is set to 0
+ * debug = 0: only attach/detach messages are sent
+ * 1: every action is logged
+ * 2: extended logging; each single exchanged byte is documented
+ * (about twice the log volume of [1])
+ * To switch debug level:
+ * At module loading: modprobe lpvo_usb_gpib debug={0,1,2}
+ * On the fly: echo {0,1,2} > /sys/modules/lpvo_usb_gpib/parameters/debug
*/
static int debug;
@@ -169,10 +169,10 @@ static void show_status(struct gpib_board *board)
}
/*
- * GLOBAL VARIABLES: required for
- * pairing among gpib minor and usb minor.
- * MAX_DEV is the max number of usb-gpib adapters; free
- * to change as you like, but no more than 32
+ * GLOBAL VARIABLES: required for
+ * pairing among gpib minor and usb minor.
+ * MAX_DEV is the max number of usb-gpib adapters; free
+ * to change as you like, but no more than 32
*/
#define MAX_DEV 8
@@ -182,7 +182,7 @@ static int assigned_usb_minors; /* mask of filled slots */
static struct mutex minors_lock; /* operations on usb_minors are to be protected */
/*
- * usb-skeleton prototypes
+ * usb-skeleton prototypes
*/
struct usb_skel;
@@ -192,7 +192,7 @@ static int skel_do_open(struct gpib_board *, int);
static int skel_do_release(struct gpib_board *);
/*
- * usec_diff : take difference in MICROsec between two 'timespec'
+ * usec_diff : take difference in MICROsec between two 'timespec'
* (unix time in sec and NANOsec)
*/
@@ -203,7 +203,7 @@ static inline int usec_diff(struct timespec64 *a, struct timespec64 *b)
}
/*
- * *** these routines are specific to the usb-gpib adapter ***
+ * *** these routines are specific to the usb-gpib adapter ***
*/
/**
@@ -262,13 +262,11 @@ static int send_command(struct gpib_board *board, char *msg, int leng)
}
/*
- *
* set_control_line() - Set the value of a single gpib control line
*
* @board: the gpib_board_struct data area for this gpib interface
* @line: line mask
* @value: line new value (0/1)
- *
*/
static int set_control_line(struct gpib_board *board, int line, int value)
@@ -368,7 +366,7 @@ static void set_timeout(struct gpib_board *board)
}
/*
- * now the standard interface functions - attach and detach
+ * now the standard interface functions - attach and detach
*/
/**
@@ -384,7 +382,7 @@ static void set_timeout(struct gpib_board *board)
* detach() will be called. Always.
*/
-static int usb_gpib_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int usb_gpib_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
int retval, j;
u32 base = config->ibbase;
@@ -464,7 +462,8 @@ static int usb_gpib_attach(struct gpib_board *board, const gpib_board_config_t *
if (retval != ACK)
return -EIO;
- /* We must setup debug mode because we need the extended instruction
+ /*
+ * We must setup debug mode because we need the extended instruction
* set to cope with the Core (gpib_common) point of view
*/
@@ -473,7 +472,8 @@ static int usb_gpib_attach(struct gpib_board *board, const gpib_board_config_t *
if (retval != ACK)
return -EIO;
- /* We must keep REN off after an IFC because so it is
+ /*
+ * We must keep REN off after an IFC because so it is
* assumed by the Core
*/
@@ -654,7 +654,8 @@ static int usb_gpib_line_status(const struct gpib_board *board)
DIA_LOG(1, "%s\n", "request");
- /* if we are on the wait queue (board->wait), do not hurry
+ /*
+ * if we are on the wait queue (board->wait), do not hurry
* reading status line; instead, pause a little
*/
@@ -705,9 +706,10 @@ static int usb_gpib_line_status(const struct gpib_board *board)
/* parallel_poll */
-static int usb_gpib_parallel_poll(struct gpib_board *board, uint8_t *result)
+static int usb_gpib_parallel_poll(struct gpib_board *board, u8 *result)
{
- /* request parallel poll asserting ATN | EOI;
+ /*
+ * request parallel poll asserting ATN | EOI;
* we suppose ATN already asserted
*/
@@ -909,15 +911,13 @@ static void usb_gpib_remote_enable(struct gpib_board *board, int enable)
/* request_system_control */
-static void usb_gpib_request_system_control(struct gpib_board *board,
- int request_control)
+static int usb_gpib_request_system_control(struct gpib_board *board, int request_control)
{
- if (request_control)
- set_bit(CIC_NUM, &board->status);
- else
- clear_bit(CIC_NUM, &board->status);
+ if (!request_control)
+ return -EINVAL;
DIA_LOG(1, "done with %d -> %lx\n", request_control, board->status);
+ return 0;
}
/* take_control */
@@ -997,7 +997,7 @@ static int usb_gpib_write(struct gpib_board *board,
/* parallel_poll configure */
static void usb_gpib_parallel_poll_configure(struct gpib_board *board,
- uint8_t configuration)
+ u8 configuration)
{
}
@@ -1031,13 +1031,13 @@ static int usb_gpib_secondary_address(struct gpib_board *board,
/* serial_poll_response */
-static void usb_gpib_serial_poll_response(struct gpib_board *board, uint8_t status)
+static void usb_gpib_serial_poll_response(struct gpib_board *board, u8 status)
{
}
/* serial_poll_status */
-static uint8_t usb_gpib_serial_poll_status(struct gpib_board *board)
+static u8 usb_gpib_serial_poll_status(struct gpib_board *board)
{
return 0;
}
@@ -1053,7 +1053,7 @@ static int usb_gpib_t1_delay(struct gpib_board *board, unsigned int nano_sec)
* *** module dispatch table and init/exit functions ***
*/
-static gpib_interface_t usb_gpib_interface = {
+static struct gpib_interface usb_gpib_interface = {
.name = NAME,
.attach = usb_gpib_attach,
.detach = usb_gpib_detach,
@@ -1083,13 +1083,13 @@ static gpib_interface_t usb_gpib_interface = {
};
/*
- * usb_gpib_init_module(), usb_gpib_exit_module()
+ * usb_gpib_init_module(), usb_gpib_exit_module()
*
- * This functions are called every time a new device is detected
- * and registered or is removed and unregistered.
- * We must take note of created and destroyed usb minors to be used
- * when usb_gpib_attach() and usb_gpib_detach() will be called on
- * request by gpib_config.
+ * This functions are called every time a new device is detected
+ * and registered or is removed and unregistered.
+ * We must take note of created and destroyed usb minors to be used
+ * when usb_gpib_attach() and usb_gpib_detach() will be called on
+ * request by gpib_config.
*/
static int usb_gpib_init_module(struct usb_interface *interface)
@@ -1107,8 +1107,9 @@ static int usb_gpib_init_module(struct usb_interface *interface)
goto exit;
}
} else {
- /* check if minor is already registered - maybe useless, but if
- * it happens the code is inconsistent somewhere
+ /*
+ * check if minor is already registered - maybe useless, but if
+ * it happens the code is inconsistent somewhere
*/
for (j = 0 ; j < MAX_DEV ; j++) {
@@ -1162,12 +1163,11 @@ exit:
}
/*
- * Default latency time (16 msec) is too long.
- * We must use 1 msec (best); anyhow, no more than 5 msec.
- *
- * Defines and function taken and modified from the kernel tree
- * (see ftdi_sio.h and ftdi_sio.c).
+ * Default latency time (16 msec) is too long.
+ * We must use 1 msec (best); anyhow, no more than 5 msec.
*
+ * Defines and function taken and modified from the kernel tree
+ * (see ftdi_sio.h and ftdi_sio.c).
*/
#define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */
@@ -1235,7 +1235,8 @@ static int write_latency_timer(struct usb_device *udev)
/* private defines */
#define MAX_TRANSFER (PAGE_SIZE - 512)
-/* MAX_TRANSFER is chosen so that the VM is not stressed by
+/*
+ * MAX_TRANSFER is chosen so that the VM is not stressed by
* allocations > PAGE_SIZE and the number of packets in a page
* is an integer 512 is the largest possible packet on EHCI
*/
@@ -1280,7 +1281,7 @@ static void skel_delete(struct kref *kref)
}
/*
- * skel_do_open() - to be called by usb_gpib_attach
+ * skel_do_open() - to be called by usb_gpib_attach
*/
static int skel_do_open(struct gpib_board *board, int subminor)
@@ -1317,7 +1318,7 @@ exit:
}
/*
- * skel_do_release() - to be called by usb_gpib_detach
+ * skel_do_release() - to be called by usb_gpib_detach
*/
static int skel_do_release(struct gpib_board *board)
@@ -1340,7 +1341,7 @@ static int skel_do_release(struct gpib_board *board)
}
/*
- * read functions
+ * read functions
*/
static void skel_read_bulk_callback(struct urb *urb)
@@ -1405,7 +1406,7 @@ static int skel_do_read_io(struct usb_skel *dev, size_t count)
}
/*
- * skel_do_read() - read operations from lpvo_usb_gpib
+ * skel_do_read() - read operations from lpvo_usb_gpib
*/
static ssize_t skel_do_read(struct usb_skel *dev, char *buffer, size_t count)
@@ -1482,7 +1483,8 @@ retry:
* all data has been used
* actual IO needs to be done
*/
- /* it seems that requests for less than dev->bulk_in_size
+ /*
+ * it seems that requests for less than dev->bulk_in_size
* are not accepted
*/
rv = skel_do_read_io(dev, dev->bulk_in_size);
@@ -1496,7 +1498,8 @@ retry:
* data is available - chunk tells us how much shall be copied
*/
- /* Condition dev->bulk_in_copied > 0 maybe will never happen. In case,
+ /*
+ * Condition dev->bulk_in_copied > 0 maybe will never happen. In case,
* signal the event and copy using the original procedure, i.e., copy
* first two bytes also
*/
@@ -1551,7 +1554,7 @@ exit:
}
/*
- * write functions
+ * write functions
*/
static void skel_write_bulk_callback(struct urb *urb)
@@ -1581,7 +1584,7 @@ static void skel_write_bulk_callback(struct urb *urb)
}
/*
- * skel_do_write() - write operations from lpvo_usb_gpib
+ * skel_do_write() - write operations from lpvo_usb_gpib
*/
static ssize_t skel_do_write(struct usb_skel *dev, const char *buffer, size_t count)
@@ -1686,7 +1689,7 @@ exit:
}
/*
- * services for the user space devices
+ * services for the user space devices
*/
#if USER_DEVICE /* conditional compilation of user space device */
@@ -1771,7 +1774,7 @@ static int skel_release(struct inode *inode, struct file *file)
}
/*
- * user space access to read function
+ * user space access to read function
*/
static ssize_t skel_read(struct file *file, char __user *buffer, size_t count,
@@ -1800,7 +1803,7 @@ static ssize_t skel_read(struct file *file, char __user *buffer, size_t count,
}
/*
- * user space access to write function
+ * user space access to write function
*/
static ssize_t skel_write(struct file *file, const char __user *user_buffer,
diff --git a/drivers/staging/gpib/nec7210/nec7210.c b/drivers/staging/gpib/nec7210/nec7210.c
index 846c0a3fa1dc..34a1cae4f486 100644
--- a/drivers/staging/gpib/nec7210/nec7210.c
+++ b/drivers/staging/gpib/nec7210/nec7210.c
@@ -23,7 +23,7 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB library code for NEC uPD7210");
-int nec7210_enable_eos(struct gpib_board *board, struct nec7210_priv *priv, uint8_t eos_byte,
+int nec7210_enable_eos(struct gpib_board *board, struct nec7210_priv *priv, u8 eos_byte,
int compare_8_bits)
{
write_byte(priv, eos_byte, EOSR);
@@ -44,7 +44,7 @@ void nec7210_disable_eos(struct gpib_board *board, struct nec7210_priv *priv)
}
EXPORT_SYMBOL(nec7210_disable_eos);
-int nec7210_parallel_poll(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *result)
+int nec7210_parallel_poll(struct gpib_board *board, struct nec7210_priv *priv, u8 *result)
{
int ret;
@@ -79,14 +79,15 @@ void nec7210_parallel_poll_response(struct gpib_board *board, struct nec7210_pri
write_byte(priv, AUX_CPPF, AUXMR);
}
EXPORT_SYMBOL(nec7210_parallel_poll_response);
-/* This is really only adequate for chips that do a 488.2 style reqt/reqf
+/*
+ * This is really only adequate for chips that do a 488.2 style reqt/reqf
* based on bit 6 of the SPMR (see chapter 11.3.3 of 488.2). For simpler chips that simply
* set rsv directly based on bit 6, we either need to do more hardware setup to expose
* the 488.2 capability (for example with NI chips), or we need to implement the
* 488.2 set srv state machine in the driver (if that is even viable).
*/
void nec7210_serial_poll_response(struct gpib_board *board,
- struct nec7210_priv *priv, uint8_t status)
+ struct nec7210_priv *priv, u8 status)
{
unsigned long flags;
@@ -103,7 +104,7 @@ void nec7210_serial_poll_response(struct gpib_board *board,
}
EXPORT_SYMBOL(nec7210_serial_poll_response);
-uint8_t nec7210_serial_poll_status(struct gpib_board *board, struct nec7210_priv *priv)
+u8 nec7210_serial_poll_status(struct gpib_board *board, struct nec7210_priv *priv)
{
return read_byte(priv, SPSR);
}
@@ -202,7 +203,8 @@ unsigned int nec7210_update_status_nolock(struct gpib_board *board, struct nec72
set_bit(SPOLL_NUM, &board->status);
}
- /* we rely on the interrupt handler to set the
+ /*
+ * we rely on the interrupt handler to set the
* rest of the status bits
*/
@@ -251,7 +253,7 @@ void nec7210_set_handshake_mode(struct gpib_board *board, struct nec7210_priv *p
}
EXPORT_SYMBOL(nec7210_set_handshake_mode);
-uint8_t nec7210_read_data_in(struct gpib_board *board, struct nec7210_priv *priv, int *end)
+u8 nec7210_read_data_in(struct gpib_board *board, struct nec7210_priv *priv, int *end)
{
unsigned long flags;
u8 data;
@@ -330,14 +332,15 @@ int nec7210_go_to_standby(struct gpib_board *board, struct nec7210_priv *priv)
}
EXPORT_SYMBOL(nec7210_go_to_standby);
-void nec7210_request_system_control(struct gpib_board *board, struct nec7210_priv *priv,
- int request_control)
+int nec7210_request_system_control(struct gpib_board *board, struct nec7210_priv *priv,
+ int request_control)
{
if (request_control == 0) {
write_byte(priv, AUX_CREN, AUXMR);
write_byte(priv, AUX_CIFC, AUXMR);
write_byte(priv, AUX_DSC, AUXMR);
}
+ return 0;
}
EXPORT_SYMBOL(nec7210_request_system_control);
@@ -415,7 +418,7 @@ static inline short nec7210_atn_has_changed(struct gpib_board *board, struct nec
return -1;
}
-int nec7210_command(struct gpib_board *board, struct nec7210_priv *priv, uint8_t
+int nec7210_command(struct gpib_board *board, struct nec7210_priv *priv, u8
*buffer, size_t length, size_t *bytes_written)
{
int retval = 0;
@@ -464,7 +467,7 @@ int nec7210_command(struct gpib_board *board, struct nec7210_priv *priv, uint8_t
}
EXPORT_SYMBOL(nec7210_command);
-static int pio_read(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
+static int pio_read(struct gpib_board *board, struct nec7210_priv *priv, u8 *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -482,7 +485,8 @@ static int pio_read(struct gpib_board *board, struct nec7210_priv *priv, uint8_t
}
if (test_bit(READ_READY_BN, &priv->state)) {
if (*bytes_read == 0) {
- /* We set the handshake mode here because we know
+ /*
+ * We set the handshake mode here because we know
* no new bytes will arrive (it has already arrived
* and is awaiting being read out of the chip) while we are changing
* modes. This ensures we can reliably keep track
@@ -568,7 +572,7 @@ static ssize_t __dma_read(struct gpib_board *board, struct nec7210_priv *priv, s
return retval ? retval : count;
}
-static ssize_t dma_read(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
+static ssize_t dma_read(struct gpib_board *board, struct nec7210_priv *priv, u8 *buffer,
size_t length)
{
size_t remain = length;
@@ -595,7 +599,7 @@ static ssize_t dma_read(struct gpib_board *board, struct nec7210_priv *priv, uin
}
#endif
-int nec7210_read(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
+int nec7210_read(struct gpib_board *board, struct nec7210_priv *priv, u8 *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -642,7 +646,7 @@ static int pio_write_wait(struct gpib_board *board, struct nec7210_priv *priv,
return 0;
}
-static int pio_write(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
+static int pio_write(struct gpib_board *board, struct nec7210_priv *priv, u8 *buffer,
size_t length, size_t *bytes_written)
{
size_t last_count = 0;
@@ -662,7 +666,8 @@ static int pio_write(struct gpib_board *board, struct nec7210_priv *priv, uint8_
if (retval == -EIO) {
/* resend last byte on bus error */
*bytes_written = last_count;
- /* we can get unrecoverable bus errors,
+ /*
+ * we can get unrecoverable bus errors,
* so give up after a while
*/
bus_error_count++;
@@ -742,7 +747,7 @@ static ssize_t __dma_write(struct gpib_board *board, struct nec7210_priv *priv,
return retval ? retval : length;
}
-static ssize_t dma_write(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
+static ssize_t dma_write(struct gpib_board *board, struct nec7210_priv *priv, u8 *buffer,
size_t length)
{
size_t remain = length;
@@ -767,7 +772,7 @@ static ssize_t dma_write(struct gpib_board *board, struct nec7210_priv *priv, ui
}
#endif
int nec7210_write(struct gpib_board *board, struct nec7210_priv *priv,
- uint8_t *buffer, size_t length, int send_eoi,
+ u8 *buffer, size_t length, int send_eoi,
size_t *bytes_written)
{
int retval = 0;
@@ -805,7 +810,8 @@ int nec7210_write(struct gpib_board *board, struct nec7210_priv *priv,
if (send_eoi) {
size_t num_bytes;
- /* We need to wait to make sure we will immediately be able to write the data byte
+ /*
+ * We need to wait to make sure we will immediately be able to write the data byte
* into the chip before sending the associated AUX_SEOI command. This is really
* only needed for length==1 since otherwise the earlier calls to pio_write
* will have dont the wait already.
@@ -827,7 +833,7 @@ int nec7210_write(struct gpib_board *board, struct nec7210_priv *priv,
EXPORT_SYMBOL(nec7210_write);
/*
- * interrupt service routine
+ * interrupt service routine
*/
irqreturn_t nec7210_interrupt(struct gpib_board *board, struct nec7210_priv *priv)
{
@@ -932,13 +938,13 @@ irqreturn_t nec7210_interrupt_have_status(struct gpib_board *board,
// ignore device clear events if we are controller in charge
if ((address_status_bits & HR_CIC) == 0) {
- push_gpib_event(board, EventDevClr);
+ push_gpib_event(board, EVENT_DEV_CLR);
set_bit(DEV_CLEAR_BN, &priv->state);
}
}
if (status1 & HR_DET)
- push_gpib_event(board, EventDevTrg);
+ push_gpib_event(board, EVENT_DEV_TRG);
// Addressing status has changed
if (status2 & HR_ADSC)
@@ -1012,16 +1018,17 @@ EXPORT_SYMBOL(nec7210_board_online);
#ifdef CONFIG_HAS_IOPORT
/* wrappers for io */
-uint8_t nec7210_ioport_read_byte(struct nec7210_priv *priv, unsigned int register_num)
+u8 nec7210_ioport_read_byte(struct nec7210_priv *priv, unsigned int register_num)
{
return inb(priv->iobase + register_num * priv->offset);
}
EXPORT_SYMBOL(nec7210_ioport_read_byte);
-void nec7210_ioport_write_byte(struct nec7210_priv *priv, uint8_t data, unsigned int register_num)
+void nec7210_ioport_write_byte(struct nec7210_priv *priv, u8 data, unsigned int register_num)
{
if (register_num == AUXMR)
- /* locking makes absolutely sure noone accesses the
+ /*
+ * locking makes absolutely sure noone accesses the
* AUXMR register faster than once per microsecond
*/
nec7210_locking_ioport_write_byte(priv, data, register_num);
@@ -1031,7 +1038,7 @@ void nec7210_ioport_write_byte(struct nec7210_priv *priv, uint8_t data, unsigned
EXPORT_SYMBOL(nec7210_ioport_write_byte);
/* locking variants of io wrappers, for chips that page-in registers */
-uint8_t nec7210_locking_ioport_read_byte(struct nec7210_priv *priv, unsigned int register_num)
+u8 nec7210_locking_ioport_read_byte(struct nec7210_priv *priv, unsigned int register_num)
{
u8 retval;
unsigned long flags;
@@ -1043,7 +1050,7 @@ uint8_t nec7210_locking_ioport_read_byte(struct nec7210_priv *priv, unsigned int
}
EXPORT_SYMBOL(nec7210_locking_ioport_read_byte);
-void nec7210_locking_ioport_write_byte(struct nec7210_priv *priv, uint8_t data,
+void nec7210_locking_ioport_write_byte(struct nec7210_priv *priv, u8 data,
unsigned int register_num)
{
unsigned long flags;
@@ -1057,16 +1064,17 @@ void nec7210_locking_ioport_write_byte(struct nec7210_priv *priv, uint8_t data,
EXPORT_SYMBOL(nec7210_locking_ioport_write_byte);
#endif
-uint8_t nec7210_iomem_read_byte(struct nec7210_priv *priv, unsigned int register_num)
+u8 nec7210_iomem_read_byte(struct nec7210_priv *priv, unsigned int register_num)
{
return readb(priv->mmiobase + register_num * priv->offset);
}
EXPORT_SYMBOL(nec7210_iomem_read_byte);
-void nec7210_iomem_write_byte(struct nec7210_priv *priv, uint8_t data, unsigned int register_num)
+void nec7210_iomem_write_byte(struct nec7210_priv *priv, u8 data, unsigned int register_num)
{
if (register_num == AUXMR)
- /* locking makes absolutely sure noone accesses the
+ /*
+ * locking makes absolutely sure noone accesses the
* AUXMR register faster than once per microsecond
*/
nec7210_locking_iomem_write_byte(priv, data, register_num);
@@ -1075,7 +1083,7 @@ void nec7210_iomem_write_byte(struct nec7210_priv *priv, uint8_t data, unsigned
}
EXPORT_SYMBOL(nec7210_iomem_write_byte);
-uint8_t nec7210_locking_iomem_read_byte(struct nec7210_priv *priv, unsigned int register_num)
+u8 nec7210_locking_iomem_read_byte(struct nec7210_priv *priv, unsigned int register_num)
{
u8 retval;
unsigned long flags;
@@ -1087,7 +1095,7 @@ uint8_t nec7210_locking_iomem_read_byte(struct nec7210_priv *priv, unsigned int
}
EXPORT_SYMBOL(nec7210_locking_iomem_read_byte);
-void nec7210_locking_iomem_write_byte(struct nec7210_priv *priv, uint8_t data,
+void nec7210_locking_iomem_write_byte(struct nec7210_priv *priv, u8 data,
unsigned int register_num)
{
unsigned long flags;
diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
index 9f1b9927f025..7cf25c95787f 100644
--- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
+++ b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
@@ -74,7 +74,8 @@ static unsigned short ni_usb_timeout_code(unsigned int usec)
return 0xff;
else if (usec <= 300000000)
return 0x01;
- /* NI driver actually uses 0xff for timeout T1000s, which is a bug in their code.
+ /*
+ * NI driver actually uses 0xff for timeout T1000s, which is a bug in their code.
* I've verified on a usb-b that a code of 0x2 is correct for a 1000 sec timeout
*/
else if (usec <= 1000000000)
@@ -92,7 +93,8 @@ static void ni_usb_bulk_complete(struct urb *urb)
static void ni_usb_timeout_handler(struct timer_list *t)
{
- struct ni_usb_priv *ni_priv = from_timer(ni_priv, t, bulk_timer);
+ struct ni_usb_priv *ni_priv = timer_container_of(ni_priv, t,
+ bulk_timer);
struct ni_usb_urb_ctx *context = &ni_priv->context;
context->timed_out = 1;
@@ -232,7 +234,8 @@ static int ni_usb_nonblocking_receive_bulk_msg(struct ni_usb_priv *ni_priv,
mutex_unlock(&ni_priv->bulk_transfer_lock);
if (interruptible) {
if (wait_for_completion_interruptible(&context->complete)) {
- /* If we got interrupted by a signal while
+ /*
+ * If we got interrupted by a signal while
* waiting for the usb gpib to respond, we
* should send a stop command so it will
* finish up with whatever it was doing and
@@ -240,8 +243,9 @@ static int ni_usb_nonblocking_receive_bulk_msg(struct ni_usb_priv *ni_priv,
*/
ni_usb_stop(ni_priv);
retval = -ERESTARTSYS;
- /* now do an uninterruptible wait, it shouldn't take long
- * for the board to respond now.
+ /*
+ * now do an uninterruptible wait, it shouldn't take long
+ * for the board to respond now.
*/
wait_for_completion(&context->complete);
}
@@ -586,7 +590,7 @@ static int ni_usb_write_registers(struct ni_usb_priv *ni_priv,
}
// interface functions
-static int ni_usb_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int ni_usb_read(struct gpib_board *board, u8 *buffer, size_t length,
int *end, size_t *bytes_read)
{
int retval, parse_retval;
@@ -684,7 +688,8 @@ static int ni_usb_read(struct gpib_board *board, uint8_t *buffer, size_t length,
retval = 0;
break;
case NIUSB_ABORTED_ERROR:
- /* this is expected if ni_usb_receive_bulk_msg got
+ /*
+ * this is expected if ni_usb_receive_bulk_msg got
* interrupted by a signal and returned -ERESTARTSYS
*/
break;
@@ -716,7 +721,7 @@ static int ni_usb_read(struct gpib_board *board, uint8_t *buffer, size_t length,
return retval;
}
-static int ni_usb_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int ni_usb_write(struct gpib_board *board, u8 *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
int retval;
@@ -794,7 +799,8 @@ static int ni_usb_write(struct gpib_board *board, uint8_t *buffer, size_t length
retval = 0;
break;
case NIUSB_ABORTED_ERROR:
- /* this is expected if ni_usb_receive_bulk_msg got
+ /*
+ * this is expected if ni_usb_receive_bulk_msg got
* interrupted by a signal and returned -ERESTARTSYS
*/
break;
@@ -819,7 +825,7 @@ static int ni_usb_write(struct gpib_board *board, uint8_t *buffer, size_t length
return retval;
}
-static int ni_usb_command_chunk(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int ni_usb_command_chunk(struct gpib_board *board, u8 *buffer, size_t length,
size_t *command_bytes_written)
{
int retval;
@@ -893,7 +899,8 @@ static int ni_usb_command_chunk(struct gpib_board *board, uint8_t *buffer, size_
case NIUSB_NO_ERROR:
break;
case NIUSB_ABORTED_ERROR:
- /* this is expected if ni_usb_receive_bulk_msg got
+ /*
+ * this is expected if ni_usb_receive_bulk_msg got
* interrupted by a signal and returned -ERESTARTSYS
*/
break;
@@ -912,7 +919,7 @@ static int ni_usb_command_chunk(struct gpib_board *board, uint8_t *buffer, size_
return 0;
}
-static int ni_usb_command(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int ni_usb_command(struct gpib_board *board, u8 *buffer, size_t length,
size_t *bytes_written)
{
size_t count;
@@ -1049,7 +1056,7 @@ static int ni_usb_go_to_standby(struct gpib_board *board)
return 0;
}
-static void ni_usb_request_system_control(struct gpib_board *board, int request_control)
+static int ni_usb_request_system_control(struct gpib_board *board, int request_control)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
@@ -1059,7 +1066,7 @@ static void ni_usb_request_system_control(struct gpib_board *board, int request_
unsigned int ibsta;
if (!ni_priv->bus_interface)
- return; // -ENODEV;
+ return -ENODEV;
usb_dev = interface_to_usbdev(ni_priv->bus_interface);
if (request_control) {
writes[i].device = NIUSB_SUBDEV_TNT4882;
@@ -1091,12 +1098,12 @@ static void ni_usb_request_system_control(struct gpib_board *board, int request_
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
- return; // retval;
+ return retval;
}
if (!request_control)
ni_priv->ren_state = 0;
ni_usb_soft_update_status(board, ibsta, 0);
- return; // 0;
+ return 0;
}
//FIXME maybe the interface should have a "pulse interface clear" function that can return an error?
@@ -1176,7 +1183,7 @@ static void ni_usb_remote_enable(struct gpib_board *board, int enable)
return;// 0;
}
-static int ni_usb_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
+static int ni_usb_enable_eos(struct gpib_board *board, u8 eos_byte, int compare_8_bits)
{
struct ni_usb_priv *ni_priv = board->private_data;
@@ -1192,8 +1199,9 @@ static int ni_usb_enable_eos(struct gpib_board *board, uint8_t eos_byte, int com
static void ni_usb_disable_eos(struct gpib_board *board)
{
struct ni_usb_priv *ni_priv = board->private_data;
- /* adapter gets unhappy if you don't zero all the bits
- * for the eos mode and eos char (returns error 4 on reads).
+ /*
+ * adapter gets unhappy if you don't zero all the bits
+ * for the eos mode and eos char (returns error 4 on reads).
*/
ni_priv->eos_mode = 0;
ni_priv->eos_char = 0;
@@ -1334,7 +1342,7 @@ static int ni_usb_secondary_address(struct gpib_board *board, unsigned int addre
return 0;
}
-static int ni_usb_parallel_poll(struct gpib_board *board, uint8_t *result)
+static int ni_usb_parallel_poll(struct gpib_board *board, u8 *result)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
@@ -1389,7 +1397,7 @@ static int ni_usb_parallel_poll(struct gpib_board *board, uint8_t *result)
return retval;
}
-static void ni_usb_parallel_poll_configure(struct gpib_board *board, uint8_t config)
+static void ni_usb_parallel_poll_configure(struct gpib_board *board, u8 config)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
@@ -1467,7 +1475,7 @@ static void ni_usb_serial_poll_response(struct gpib_board *board, u8 status)
return;// 0;
}
-static uint8_t ni_usb_serial_poll_status(struct gpib_board *board)
+static u8 ni_usb_serial_poll_status(struct gpib_board *board)
{
return 0;
}
@@ -2045,8 +2053,10 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
unexpected = 1;
}
++j;
- // MC usb-488 (and sometimes NI-USB-HS?) sends 0x8 here; MC usb-488A sends 0x7 here
- // NI-USB-HS+ sends 0x0
+ /*
+ * MC usb-488 (and sometimes NI-USB-HS?) sends 0x8 here; MC usb-488A sends 0x7 here
+ * NI-USB-HS+ sends 0x0
+ */
if (buffer[j] != 0x1 && buffer[j] != 0x8 && buffer[j] != 0x7 && buffer[j] != 0x0) {
// [3]
dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x0, 0x1, 0x7 or 0x8\n",
@@ -2127,7 +2137,8 @@ ready_out:
return retval;
}
-/* This does some extra init for HS+ models, as observed on Windows. One of the
+/*
+ * This does some extra init for HS+ models, as observed on Windows. One of the
* control requests causes the LED to stop blinking.
* I'm not sure what the other 2 requests do. None of these requests are actually required
* for the adapter to work, maybe they do some init for the analyzer interface
@@ -2198,14 +2209,14 @@ static int ni_usb_hs_plus_extra_init(struct ni_usb_priv *ni_priv)
}
static inline int ni_usb_device_match(struct usb_interface *interface,
- const gpib_board_config_t *config)
+ const struct gpib_board_config *config)
{
if (gpib_match_device_path(&interface->dev, config->device_path) == 0)
return 0;
return 1;
}
-static int ni_usb_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int ni_usb_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
int retval;
int i, index;
@@ -2343,8 +2354,10 @@ static void ni_usb_detach(struct gpib_board *board)
struct ni_usb_priv *ni_priv;
mutex_lock(&ni_usb_hotplug_lock);
-// under windows, software unplug does chip_reset nec7210 aux command,
-// then writes 0x0 to address 0x10 of device 3
+ /*
+ * under windows, software unplug does chip_reset nec7210 aux command,
+ * then writes 0x0 to address 0x10 of device 3
+ */
ni_priv = board->private_data;
if (ni_priv) {
if (ni_priv->bus_interface) {
@@ -2361,7 +2374,7 @@ static void ni_usb_detach(struct gpib_board *board)
mutex_unlock(&ni_usb_hotplug_lock);
}
-static gpib_interface_t ni_usb_gpib_interface = {
+static struct gpib_interface ni_usb_gpib_interface = {
.name = "ni_usb_b",
.attach = ni_usb_attach,
.detach = ni_usb_detach,
diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.h b/drivers/staging/gpib/ni_usb/ni_usb_gpib.h
index 4b297db09a9b..b011e131201c 100644
--- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.h
+++ b/drivers/staging/gpib/ni_usb/ni_usb_gpib.h
@@ -113,27 +113,37 @@ enum ni_usb_bulk_ids {
enum ni_usb_error_codes {
NIUSB_NO_ERROR = 0,
- /* NIUSB_ABORTED_ERROR occurs when I/O is interrupted early by
- * doing a NI_USB_STOP_REQUEST on the control endpoint.
+ /*
+ * NIUSB_ABORTED_ERROR occurs when I/O is interrupted early by
+ * doing a NI_USB_STOP_REQUEST on the control endpoint.
*/
NIUSB_ABORTED_ERROR = 1,
- // NIUSB_READ_ATN_ERROR occurs when you do a board read while
- // ATN is set
+ /*
+ * NIUSB_READ_ATN_ERROR occurs when you do a board read while
+ * ATN is set
+ */
NIUSB_ATN_STATE_ERROR = 2,
- // NIUSB_ADDRESSING_ERROR occurs when you do a board
- // read/write as CIC but are not in LACS/TACS
+ /*
+ * NIUSB_ADDRESSING_ERROR occurs when you do a board
+ * read/write as CIC but are not in LACS/TACS
+ */
NIUSB_ADDRESSING_ERROR = 3,
- /* NIUSB_EOSMODE_ERROR occurs on reads if any eos mode or char
+ /*
+ * NIUSB_EOSMODE_ERROR occurs on reads if any eos mode or char
* bits are set when REOS is not set.
* Have also seen error 4 if you try to send more than 16
* command bytes at once on a usb-b.
*/
NIUSB_EOSMODE_ERROR = 4,
- // NIUSB_NO_BUS_ERROR occurs when you try to write a command
- // byte but there are no devices connected to the gpib bus
+ /*
+ * NIUSB_NO_BUS_ERROR occurs when you try to write a command
+ * byte but there are no devices connected to the gpib bus
+ */
NIUSB_NO_BUS_ERROR = 5,
- // NIUSB_NO_LISTENER_ERROR occurs when you do a board write as
- // CIC with no listener
+ /*
+ * NIUSB_NO_LISTENER_ERROR occurs when you do a board write as
+ * CIC with no listener
+ */
NIUSB_NO_LISTENER_ERROR = 8,
// get NIUSB_TIMEOUT_ERROR on board read/write timeout
NIUSB_TIMEOUT_ERROR = 10,
diff --git a/drivers/staging/gpib/pc2/pc2_gpib.c b/drivers/staging/gpib/pc2/pc2_gpib.c
index 96d3c09f2273..2282492025b7 100644
--- a/drivers/staging/gpib/pc2/pc2_gpib.c
+++ b/drivers/staging/gpib/pc2/pc2_gpib.c
@@ -90,7 +90,7 @@ irqreturn_t pc2a_interrupt(int irq, void *arg)
}
// wrappers for interface functions
-static int pc2_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+static int pc2_read(struct gpib_board *board, u8 *buffer, size_t length, int *end,
size_t *bytes_read)
{
struct pc2_priv *priv = board->private_data;
@@ -98,7 +98,7 @@ static int pc2_read(struct gpib_board *board, uint8_t *buffer, size_t length, in
return nec7210_read(board, &priv->nec7210_priv, buffer, length, end, bytes_read);
}
-static int pc2_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+static int pc2_write(struct gpib_board *board, u8 *buffer, size_t length, int send_eoi,
size_t *bytes_written)
{
struct pc2_priv *priv = board->private_data;
@@ -106,7 +106,8 @@ static int pc2_write(struct gpib_board *board, uint8_t *buffer, size_t length, i
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-static int pc2_command(struct gpib_board *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int pc2_command(struct gpib_board *board, u8 *buffer,
+ size_t length, size_t *bytes_written)
{
struct pc2_priv *priv = board->private_data;
@@ -127,11 +128,11 @@ static int pc2_go_to_standby(struct gpib_board *board)
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-static void pc2_request_system_control(struct gpib_board *board, int request_control)
+static int pc2_request_system_control(struct gpib_board *board, int request_control)
{
struct pc2_priv *priv = board->private_data;
- nec7210_request_system_control(board, &priv->nec7210_priv, request_control);
+ return nec7210_request_system_control(board, &priv->nec7210_priv, request_control);
}
static void pc2_interface_clear(struct gpib_board *board, int assert)
@@ -148,7 +149,7 @@ static void pc2_remote_enable(struct gpib_board *board, int enable)
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-static int pc2_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
+static int pc2_enable_eos(struct gpib_board *board, u8 eos_byte, int compare_8_bits)
{
struct pc2_priv *priv = board->private_data;
@@ -183,14 +184,14 @@ static int pc2_secondary_address(struct gpib_board *board, unsigned int address,
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-static int pc2_parallel_poll(struct gpib_board *board, uint8_t *result)
+static int pc2_parallel_poll(struct gpib_board *board, u8 *result)
{
struct pc2_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-static void pc2_parallel_poll_configure(struct gpib_board *board, uint8_t config)
+static void pc2_parallel_poll_configure(struct gpib_board *board, u8 config)
{
struct pc2_priv *priv = board->private_data;
@@ -204,14 +205,14 @@ static void pc2_parallel_poll_response(struct gpib_board *board, int ist)
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-static void pc2_serial_poll_response(struct gpib_board *board, uint8_t status)
+static void pc2_serial_poll_response(struct gpib_board *board, u8 status)
{
struct pc2_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-static uint8_t pc2_serial_poll_status(struct gpib_board *board)
+static u8 pc2_serial_poll_status(struct gpib_board *board)
{
struct pc2_priv *priv = board->private_data;
@@ -251,7 +252,7 @@ static void free_private(struct gpib_board *board)
board->private_data = NULL;
}
-static int pc2_generic_attach(struct gpib_board *board, const gpib_board_config_t *config,
+static int pc2_generic_attach(struct gpib_board *board, const struct gpib_board_config *config,
enum nec7210_chipset chipset)
{
struct pc2_priv *pc2_priv;
@@ -267,8 +268,9 @@ static int pc2_generic_attach(struct gpib_board *board, const gpib_board_config_
nec_priv->type = chipset;
#ifndef PC2_DMA
- /* board->dev hasn't been initialized, so forget about DMA until this driver
- * is adapted to use isa_register_driver.
+ /*
+ * board->dev hasn't been initialized, so forget about DMA until this driver
+ * is adapted to use isa_register_driver.
*/
if (config->ibdma)
// driver needs to be adapted to use isa_register_driver to get a struct device*
@@ -294,7 +296,7 @@ static int pc2_generic_attach(struct gpib_board *board, const gpib_board_config_
return 0;
}
-static int pc2_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int pc2_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
int isr_flags = 0;
struct pc2_priv *pc2_priv;
@@ -365,7 +367,7 @@ static void pc2_detach(struct gpib_board *board)
free_private(board);
}
-static int pc2a_common_attach(struct gpib_board *board, const gpib_board_config_t *config,
+static int pc2a_common_attach(struct gpib_board *board, const struct gpib_board_config *config,
unsigned int num_registers, enum nec7210_chipset chipset)
{
unsigned int i, j;
@@ -459,17 +461,17 @@ static int pc2a_common_attach(struct gpib_board *board, const gpib_board_config_
return 0;
}
-static int pc2a_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int pc2a_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
return pc2a_common_attach(board, config, pc2a_iosize, NEC7210);
}
-static int pc2a_cb7210_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int pc2a_cb7210_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
return pc2a_common_attach(board, config, pc2a_iosize, CB7210);
}
-static int pc2_2a_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int pc2_2a_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
return pc2a_common_attach(board, config, pc2_2a_iosize, NAT4882);
}
@@ -517,7 +519,7 @@ static void pc2_2a_detach(struct gpib_board *board)
pc2a_common_detach(board, pc2_2a_iosize);
}
-static gpib_interface_t pc2_interface = {
+static struct gpib_interface pc2_interface = {
.name = "pcII",
.attach = pc2_attach,
.detach = pc2_detach,
@@ -545,7 +547,7 @@ static gpib_interface_t pc2_interface = {
.return_to_local = pc2_return_to_local,
};
-static gpib_interface_t pc2a_interface = {
+static struct gpib_interface pc2a_interface = {
.name = "pcIIa",
.attach = pc2a_attach,
.detach = pc2a_detach,
@@ -573,7 +575,7 @@ static gpib_interface_t pc2a_interface = {
.return_to_local = pc2_return_to_local,
};
-static gpib_interface_t pc2a_cb7210_interface = {
+static struct gpib_interface pc2a_cb7210_interface = {
.name = "pcIIa_cb7210",
.attach = pc2a_cb7210_attach,
.detach = pc2a_detach,
@@ -601,7 +603,7 @@ static gpib_interface_t pc2a_cb7210_interface = {
.return_to_local = pc2_return_to_local,
};
-static gpib_interface_t pc2_2a_interface = {
+static struct gpib_interface pc2_2a_interface = {
.name = "pcII_IIa",
.attach = pc2_2a_attach,
.detach = pc2_2a_detach,
diff --git a/drivers/staging/gpib/tms9914/tms9914.c b/drivers/staging/gpib/tms9914/tms9914.c
index 2abda9d7dfcb..04d57108efc7 100644
--- a/drivers/staging/gpib/tms9914/tms9914.c
+++ b/drivers/staging/gpib/tms9914/tms9914.c
@@ -53,7 +53,8 @@ int tms9914_take_control(struct gpib_board *board, struct tms9914_priv *priv, in
}
EXPORT_SYMBOL_GPL(tms9914_take_control);
-/* The agilent 82350B has a buggy implementation of tcs which interferes with the
+/*
+ * The agilent 82350B has a buggy implementation of tcs which interferes with the
* operation of tca. It appears to be based on the controller state machine
* described in the TI 9900 TMS9914A data manual published in 1982. This
* manual describes tcs as putting the controller into a CWAS
@@ -66,7 +67,8 @@ EXPORT_SYMBOL_GPL(tms9914_take_control);
* The rest of the tms9914 based drivers still use tms9914_take_control
* directly (which does issue tcs).
*/
-int tms9914_take_control_workaround(struct gpib_board *board, struct tms9914_priv *priv, int synchronous)
+int tms9914_take_control_workaround(struct gpib_board *board,
+ struct tms9914_priv *priv, int synchronous)
{
if (synchronous)
return -ETIMEDOUT;
@@ -116,8 +118,8 @@ void tms9914_remote_enable(struct gpib_board *board, struct tms9914_priv *priv,
}
EXPORT_SYMBOL_GPL(tms9914_remote_enable);
-void tms9914_request_system_control(struct gpib_board *board, struct tms9914_priv *priv,
- int request_control)
+int tms9914_request_system_control(struct gpib_board *board, struct tms9914_priv *priv,
+ int request_control)
{
if (request_control) {
write_byte(priv, AUX_RQC, AUXCR);
@@ -125,6 +127,7 @@ void tms9914_request_system_control(struct gpib_board *board, struct tms9914_pri
clear_bit(CIC_NUM, &board->status);
write_byte(priv, AUX_RLC, AUXCR);
}
+ return 0;
}
EXPORT_SYMBOL_GPL(tms9914_request_system_control);
@@ -192,7 +195,7 @@ void tms9914_release_holdoff(struct tms9914_priv *priv)
}
EXPORT_SYMBOL_GPL(tms9914_release_holdoff);
-int tms9914_enable_eos(struct gpib_board *board, struct tms9914_priv *priv, uint8_t eos_byte,
+int tms9914_enable_eos(struct gpib_board *board, struct tms9914_priv *priv, u8 eos_byte,
int compare_8_bits)
{
priv->eos = eos_byte;
@@ -209,7 +212,7 @@ void tms9914_disable_eos(struct gpib_board *board, struct tms9914_priv *priv)
}
EXPORT_SYMBOL(tms9914_disable_eos);
-int tms9914_parallel_poll(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *result)
+int tms9914_parallel_poll(struct gpib_board *board, struct tms9914_priv *priv, u8 *result)
{
// execute parallel poll
write_byte(priv, AUX_CS | AUX_RPP, AUXCR);
@@ -235,7 +238,7 @@ static void set_ppoll_reg(struct tms9914_priv *priv, int enable,
}
void tms9914_parallel_poll_configure(struct gpib_board *board,
- struct tms9914_priv *priv, uint8_t config)
+ struct tms9914_priv *priv, u8 config)
{
priv->ppoll_enable = (config & PPC_DISABLE) == 0;
priv->ppoll_line = (config & PPC_DIO_MASK) + 1;
@@ -251,7 +254,8 @@ void tms9914_parallel_poll_response(struct gpib_board *board,
}
EXPORT_SYMBOL(tms9914_parallel_poll_response);
-void tms9914_serial_poll_response(struct gpib_board *board, struct tms9914_priv *priv, uint8_t status)
+void tms9914_serial_poll_response(struct gpib_board *board,
+ struct tms9914_priv *priv, u8 status)
{
unsigned long flags;
@@ -266,7 +270,7 @@ void tms9914_serial_poll_response(struct gpib_board *board, struct tms9914_priv
}
EXPORT_SYMBOL(tms9914_serial_poll_response);
-uint8_t tms9914_serial_poll_status(struct gpib_board *board, struct tms9914_priv *priv)
+u8 tms9914_serial_poll_status(struct gpib_board *board, struct tms9914_priv *priv)
{
u8 status;
unsigned long flags;
@@ -279,7 +283,8 @@ uint8_t tms9914_serial_poll_status(struct gpib_board *board, struct tms9914_priv
}
EXPORT_SYMBOL(tms9914_serial_poll_status);
-int tms9914_primary_address(struct gpib_board *board, struct tms9914_priv *priv, unsigned int address)
+int tms9914_primary_address(struct gpib_board *board,
+ struct tms9914_priv *priv, unsigned int address)
{
// put primary address in address0
write_byte(priv, address & ADDRESS_MASK, ADR);
@@ -321,7 +326,8 @@ static void update_talker_state(struct tms9914_priv *priv, unsigned int address_
if (address_status_bits & HR_ATN)
priv->talker_state = talker_addressed;
else
- /* this could also be serial_poll_active, but the tms9914 provides no
+ /*
+ * this could also be serial_poll_active, but the tms9914 provides no
* way to distinguish, so we'll assume talker_active
*/
priv->talker_state = talker_active;
@@ -416,7 +422,7 @@ int tms9914_line_status(const struct gpib_board *board, struct tms9914_priv *pri
}
EXPORT_SYMBOL(tms9914_line_status);
-static int check_for_eos(struct tms9914_priv *priv, uint8_t byte)
+static int check_for_eos(struct tms9914_priv *priv, u8 byte)
{
static const u8 seven_bit_compare_mask = 0x7f;
@@ -449,7 +455,8 @@ static int wait_for_read_byte(struct gpib_board *board, struct tms9914_priv *pri
return 0;
}
-static inline uint8_t tms9914_read_data_in(struct gpib_board *board, struct tms9914_priv *priv, int *end)
+static inline u8 tms9914_read_data_in(struct gpib_board *board,
+ struct tms9914_priv *priv, int *end)
{
unsigned long flags;
u8 data;
@@ -480,7 +487,7 @@ static inline uint8_t tms9914_read_data_in(struct gpib_board *board, struct tms9
return data;
}
-static int pio_read(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
+static int pio_read(struct gpib_board *board, struct tms9914_priv *priv, u8 *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -501,7 +508,7 @@ static int pio_read(struct gpib_board *board, struct tms9914_priv *priv, uint8_t
return retval;
}
-int tms9914_read(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
+int tms9914_read(struct gpib_board *board, struct tms9914_priv *priv, u8 *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -561,7 +568,7 @@ static int pio_write_wait(struct gpib_board *board, struct tms9914_priv *priv)
return 0;
}
-static int pio_write(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
+static int pio_write(struct gpib_board *board, struct tms9914_priv *priv, u8 *buffer,
size_t length, size_t *bytes_written)
{
ssize_t retval = 0;
@@ -585,8 +592,8 @@ static int pio_write(struct gpib_board *board, struct tms9914_priv *priv, uint8_
return length;
}
-int tms9914_write(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written)
+int tms9914_write(struct gpib_board *board, struct tms9914_priv *priv,
+ u8 *buffer, size_t length, int send_eoi, size_t *bytes_written)
{
ssize_t retval = 0;
@@ -620,7 +627,8 @@ int tms9914_write(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *
}
EXPORT_SYMBOL(tms9914_write);
-static void check_my_address_state(struct gpib_board *board, struct tms9914_priv *priv, int cmd_byte)
+static void check_my_address_state(struct gpib_board *board,
+ struct tms9914_priv *priv, int cmd_byte)
{
if (cmd_byte == MLA(board->pad)) {
priv->primary_listen_addressed = 1;
@@ -655,7 +663,7 @@ static void check_my_address_state(struct gpib_board *board, struct tms9914_priv
}
}
-int tms9914_command(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
+int tms9914_command(struct gpib_board *board, struct tms9914_priv *priv, u8 *buffer,
size_t length, size_t *bytes_written)
{
int retval = 0;
@@ -736,9 +744,10 @@ irqreturn_t tms9914_interrupt_have_status(struct gpib_board *board, struct tms99
unsigned short command_byte = read_byte(priv, CPTR) & gpib_command_mask;
switch (command_byte) {
- case PPConfig:
+ case PP_CONFIG:
priv->ppoll_configure_state = 1;
- /* AUX_PTS generates another UNC interrupt on the next command byte
+ /*
+ * AUX_PTS generates another UNC interrupt on the next command byte
* if it is in the secondary address group (such as PPE and PPD).
*/
write_byte(priv, AUX_PTS, AUXCR);
@@ -764,7 +773,7 @@ irqreturn_t tms9914_interrupt_have_status(struct gpib_board *board, struct tms99
break;
}
- if (in_primary_command_group(command_byte) && command_byte != PPConfig)
+ if (in_primary_command_group(command_byte) && command_byte != PP_CONFIG)
priv->ppoll_configure_state = 0;
}
@@ -774,18 +783,18 @@ irqreturn_t tms9914_interrupt_have_status(struct gpib_board *board, struct tms99
}
if (status1 & HR_IFC) {
- push_gpib_event(board, EventIFC);
+ push_gpib_event(board, EVENT_IFC);
clear_bit(CIC_NUM, &board->status);
}
if (status1 & HR_GET) {
- push_gpib_event(board, EventDevTrg);
+ push_gpib_event(board, EVENT_DEV_TRG);
// clear dac holdoff
write_byte(priv, AUX_VAL, AUXCR);
}
if (status1 & HR_DCAS) {
- push_gpib_event(board, EventDevClr);
+ push_gpib_event(board, EVENT_DEV_CLR);
// clear dac holdoff
write_byte(priv, AUX_VAL, AUXCR);
set_bit(DEV_CLEAR_BN, &priv->state);
@@ -859,14 +868,14 @@ EXPORT_SYMBOL_GPL(tms9914_online);
#ifdef CONFIG_HAS_IOPORT
// wrapper for inb
-uint8_t tms9914_ioport_read_byte(struct tms9914_priv *priv, unsigned int register_num)
+u8 tms9914_ioport_read_byte(struct tms9914_priv *priv, unsigned int register_num)
{
return inb(priv->iobase + register_num * priv->offset);
}
EXPORT_SYMBOL_GPL(tms9914_ioport_read_byte);
// wrapper for outb
-void tms9914_ioport_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num)
+void tms9914_ioport_write_byte(struct tms9914_priv *priv, u8 data, unsigned int register_num)
{
outb(data, priv->iobase + register_num * priv->offset);
if (register_num == AUXCR)
@@ -876,14 +885,14 @@ EXPORT_SYMBOL_GPL(tms9914_ioport_write_byte);
#endif
// wrapper for readb
-uint8_t tms9914_iomem_read_byte(struct tms9914_priv *priv, unsigned int register_num)
+u8 tms9914_iomem_read_byte(struct tms9914_priv *priv, unsigned int register_num)
{
return readb(priv->mmiobase + register_num * priv->offset);
}
EXPORT_SYMBOL_GPL(tms9914_iomem_read_byte);
// wrapper for writeb
-void tms9914_iomem_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num)
+void tms9914_iomem_write_byte(struct tms9914_priv *priv, u8 data, unsigned int register_num)
{
writeb(data, priv->mmiobase + register_num * priv->offset);
if (register_num == AUXCR)
diff --git a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c b/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
index c35b084b6fd0..a17b69e34986 100644
--- a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
+++ b/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
@@ -236,7 +236,7 @@ static int fifo_xfer_done(struct tnt4882_priv *tnt_priv)
return retval;
}
-static int drain_fifo_words(struct tnt4882_priv *tnt_priv, uint8_t *buffer, int num_bytes)
+static int drain_fifo_words(struct tnt4882_priv *tnt_priv, u8 *buffer, int num_bytes)
{
int count = 0;
struct nec7210_priv *nec_priv = &tnt_priv->nec7210_priv;
@@ -258,7 +258,8 @@ static void tnt4882_release_holdoff(struct gpib_board *board, struct tnt4882_pri
sasr_bits = tnt_readb(tnt_priv, SASR);
- /*tnt4882 not in one-chip mode won't always release holdoff unless we
+ /*
+ * tnt4882 not in one-chip mode won't always release holdoff unless we
* are in the right mode when release handshake command is given
*/
if (sasr_bits & AEHS_BIT) /* holding off due to holdoff on end mode*/ {
@@ -274,7 +275,7 @@ static void tnt4882_release_holdoff(struct gpib_board *board, struct tnt4882_pri
}
}
-static int tnt4882_accel_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+static int tnt4882_accel_read(struct gpib_board *board, u8 *buffer, size_t length, int *end,
size_t *bytes_read)
{
size_t count = 0;
@@ -384,7 +385,8 @@ static int tnt4882_accel_read(struct gpib_board *board, uint8_t *buffer, size_t
nec7210_set_reg_bits(nec_priv, IMR1, HR_ENDIE, 0);
nec7210_set_reg_bits(nec_priv, IMR2, HR_DMAI, 0);
- /* force handling of any pending interrupts (seems to be needed
+ /*
+ * force handling of any pending interrupts (seems to be needed
* to keep interrupts from getting hosed, plus for syncing
* with RECEIVED_END below)
*/
@@ -448,7 +450,7 @@ static int write_wait(struct gpib_board *board, struct tnt4882_priv *tnt_priv,
return 0;
}
-static int generic_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int generic_write(struct gpib_board *board, u8 *buffer, size_t length,
int send_eoi, int send_commands, size_t *bytes_written)
{
size_t count = 0;
@@ -531,7 +533,8 @@ static int generic_write(struct gpib_board *board, uint8_t *buffer, size_t lengt
nec7210_set_reg_bits(nec_priv, IMR1, HR_ERR, 0x0);
nec7210_set_reg_bits(nec_priv, IMR2, HR_DMAO, 0x0);
- /* force handling of any interrupts that happened
+ /*
+ * force handling of any interrupts that happened
* while they were masked (this appears to be needed)
*/
tnt4882_internal_interrupt(board);
@@ -539,13 +542,13 @@ static int generic_write(struct gpib_board *board, uint8_t *buffer, size_t lengt
return retval;
}
-static int tnt4882_accel_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
+static int tnt4882_accel_write(struct gpib_board *board, u8 *buffer,
+ size_t length, int send_eoi, size_t *bytes_written)
{
return generic_write(board, buffer, length, send_eoi, 0, bytes_written);
}
-static int tnt4882_command(struct gpib_board *board, uint8_t *buffer, size_t length,
+static int tnt4882_command(struct gpib_board *board, u8 *buffer, size_t length,
size_t *bytes_written)
{
return generic_write(board, buffer, length, 0, 1, bytes_written);
@@ -566,7 +569,7 @@ static irqreturn_t tnt4882_internal_interrupt(struct gpib_board *board)
imr3_bits = priv->imr3_bits;
if (isr0_bits & TNT_IFCI_BIT)
- push_gpib_event(board, EventIFC);
+ push_gpib_event(board, EVENT_IFC);
//XXX don't need this wakeup, one below should do?
// wake_up_interruptible(&board->wait);
@@ -592,7 +595,7 @@ static irqreturn_t tnt4882_interrupt(int irq, void *arg)
}
// wrappers for interface functions
-static int tnt4882_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+static int tnt4882_read(struct gpib_board *board, u8 *buffer, size_t length, int *end,
size_t *bytes_read)
{
struct tnt4882_priv *priv = board->private_data;
@@ -612,7 +615,7 @@ static int tnt4882_read(struct gpib_board *board, uint8_t *buffer, size_t length
return retval;
}
-static int tnt4882_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+static int tnt4882_write(struct gpib_board *board, u8 *buffer, size_t length, int send_eoi,
size_t *bytes_written)
{
struct tnt4882_priv *priv = board->private_data;
@@ -620,7 +623,7 @@ static int tnt4882_write(struct gpib_board *board, uint8_t *buffer, size_t lengt
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-static int tnt4882_command_unaccel(struct gpib_board *board, uint8_t *buffer,
+static int tnt4882_command_unaccel(struct gpib_board *board, u8 *buffer,
size_t length, size_t *bytes_written)
{
struct tnt4882_priv *priv = board->private_data;
@@ -642,19 +645,21 @@ static int tnt4882_go_to_standby(struct gpib_board *board)
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-static void tnt4882_request_system_control(struct gpib_board *board, int request_control)
+static int tnt4882_request_system_control(struct gpib_board *board, int request_control)
{
struct tnt4882_priv *priv = board->private_data;
+ int retval;
if (request_control) {
tnt_writeb(priv, SETSC, CMDR);
udelay(1);
}
- nec7210_request_system_control(board, &priv->nec7210_priv, request_control);
+ retval = nec7210_request_system_control(board, &priv->nec7210_priv, request_control);
if (!request_control) {
tnt_writeb(priv, CLRSC, CMDR);
udelay(1);
}
+ return retval;
}
static void tnt4882_interface_clear(struct gpib_board *board, int assert)
@@ -671,7 +676,7 @@ static void tnt4882_remote_enable(struct gpib_board *board, int enable)
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-static int tnt4882_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
+static int tnt4882_enable_eos(struct gpib_board *board, u8 eos_byte, int compare_8_bits)
{
struct tnt4882_priv *priv = board->private_data;
@@ -718,7 +723,7 @@ static int tnt4882_secondary_address(struct gpib_board *board, unsigned int addr
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-static int tnt4882_parallel_poll(struct gpib_board *board, uint8_t *result)
+static int tnt4882_parallel_poll(struct gpib_board *board, u8 *result)
{
struct tnt4882_priv *tnt_priv = board->private_data;
@@ -735,7 +740,7 @@ static int tnt4882_parallel_poll(struct gpib_board *board, uint8_t *result)
}
}
-static void tnt4882_parallel_poll_configure(struct gpib_board *board, uint8_t config)
+static void tnt4882_parallel_poll_configure(struct gpib_board *board, u8 config)
{
struct tnt4882_priv *priv = board->private_data;
@@ -760,17 +765,18 @@ static void tnt4882_parallel_poll_response(struct gpib_board *board, int ist)
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-/* this is just used by the old nec7210 isa interfaces, the newer
+/*
+ * this is just used by the old nec7210 isa interfaces, the newer
* boards use tnt4882_serial_poll_response2
*/
-static void tnt4882_serial_poll_response(struct gpib_board *board, uint8_t status)
+static void tnt4882_serial_poll_response(struct gpib_board *board, u8 status)
{
struct tnt4882_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-static void tnt4882_serial_poll_response2(struct gpib_board *board, uint8_t status,
+static void tnt4882_serial_poll_response2(struct gpib_board *board, u8 status,
int new_reason_for_service)
{
struct tnt4882_priv *priv = board->private_data;
@@ -788,7 +794,8 @@ static void tnt4882_serial_poll_response2(struct gpib_board *board, uint8_t stat
priv->nec7210_priv.srq_pending = 0;
}
if (reqt)
- /* It may seem like a race to issue reqt before updating
+ /*
+ * It may seem like a race to issue reqt before updating
* the status byte, but it is not. The chip does not
* issue the reqt until the SPMR is written to at
* a later time.
@@ -796,7 +803,8 @@ static void tnt4882_serial_poll_response2(struct gpib_board *board, uint8_t stat
write_byte(&priv->nec7210_priv, AUX_REQT, AUXMR);
else if (reqf)
write_byte(&priv->nec7210_priv, AUX_REQF, AUXMR);
- /* We need to always zero bit 6 of the status byte before writing it to
+ /*
+ * We need to always zero bit 6 of the status byte before writing it to
* the SPMR to insure we are using
* serial poll mode SP1, and not accidentally triggering mode SP3.
*/
@@ -804,7 +812,7 @@ static void tnt4882_serial_poll_response2(struct gpib_board *board, uint8_t stat
spin_unlock_irqrestore(&board->spinlock, flags);
}
-static uint8_t tnt4882_serial_poll_status(struct gpib_board *board)
+static u8 tnt4882_serial_poll_status(struct gpib_board *board)
{
struct tnt4882_priv *priv = board->private_data;
@@ -898,7 +906,7 @@ static void tnt4882_init(struct tnt4882_priv *tnt_priv, const struct gpib_board
tnt_writeb(tnt_priv, tnt_priv->imr0_bits, IMR0);
}
-static int ni_pci_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int ni_pci_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
struct tnt4882_priv *tnt_priv;
struct nec7210_priv *nec_priv;
@@ -1019,7 +1027,7 @@ static int ni_isapnp_find(struct pnp_dev **dev)
return 0;
}
-static int ni_isa_attach_common(struct gpib_board *board, const gpib_board_config_t *config,
+static int ni_isa_attach_common(struct gpib_board *board, const struct gpib_board_config *config,
enum nec7210_chipset chipset)
{
struct tnt4882_priv *tnt_priv;
@@ -1075,17 +1083,17 @@ static int ni_isa_attach_common(struct gpib_board *board, const gpib_board_confi
return 0;
}
-static int ni_tnt_isa_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int ni_tnt_isa_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
return ni_isa_attach_common(board, config, TNT4882);
}
-static int ni_nat4882_isa_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int ni_nat4882_isa_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
return ni_isa_attach_common(board, config, NAT4882);
}
-static int ni_nec_isa_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int ni_nec_isa_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
return ni_isa_attach_common(board, config, NEC7210);
}
@@ -1116,7 +1124,7 @@ static int tnt4882_pci_probe(struct pci_dev *dev, const struct pci_device_id *id
return 0;
}
-static gpib_interface_t ni_pci_interface = {
+static struct gpib_interface ni_pci_interface = {
.name = "ni_pci",
.attach = ni_pci_attach,
.detach = ni_pci_detach,
@@ -1144,7 +1152,7 @@ static gpib_interface_t ni_pci_interface = {
.return_to_local = tnt4882_return_to_local,
};
-static gpib_interface_t ni_pci_accel_interface = {
+static struct gpib_interface ni_pci_accel_interface = {
.name = "ni_pci_accel",
.attach = ni_pci_attach,
.detach = ni_pci_detach,
@@ -1172,7 +1180,7 @@ static gpib_interface_t ni_pci_accel_interface = {
.return_to_local = tnt4882_return_to_local,
};
-static gpib_interface_t ni_isa_interface = {
+static struct gpib_interface ni_isa_interface = {
.name = "ni_isa",
.attach = ni_tnt_isa_attach,
.detach = ni_isa_detach,
@@ -1200,7 +1208,7 @@ static gpib_interface_t ni_isa_interface = {
.return_to_local = tnt4882_return_to_local,
};
-static gpib_interface_t ni_nat4882_isa_interface = {
+static struct gpib_interface ni_nat4882_isa_interface = {
.name = "ni_nat4882_isa",
.attach = ni_nat4882_isa_attach,
.detach = ni_isa_detach,
@@ -1228,7 +1236,7 @@ static gpib_interface_t ni_nat4882_isa_interface = {
.return_to_local = tnt4882_return_to_local,
};
-static gpib_interface_t ni_nec_isa_interface = {
+static struct gpib_interface ni_nec_isa_interface = {
.name = "ni_nec_isa",
.attach = ni_nec_isa_attach,
.detach = ni_isa_detach,
@@ -1256,7 +1264,7 @@ static gpib_interface_t ni_nec_isa_interface = {
.return_to_local = tnt4882_return_to_local,
};
-static gpib_interface_t ni_isa_accel_interface = {
+static struct gpib_interface ni_isa_accel_interface = {
.name = "ni_isa_accel",
.attach = ni_tnt_isa_attach,
.detach = ni_isa_detach,
@@ -1284,7 +1292,7 @@ static gpib_interface_t ni_isa_accel_interface = {
.return_to_local = tnt4882_return_to_local,
};
-static gpib_interface_t ni_nat4882_isa_accel_interface = {
+static struct gpib_interface ni_nat4882_isa_accel_interface = {
.name = "ni_nat4882_isa_accel",
.attach = ni_nat4882_isa_attach,
.detach = ni_isa_detach,
@@ -1312,7 +1320,7 @@ static gpib_interface_t ni_nat4882_isa_accel_interface = {
.return_to_local = tnt4882_return_to_local,
};
-static gpib_interface_t ni_nec_isa_accel_interface = {
+static struct gpib_interface ni_nec_isa_accel_interface = {
.name = "ni_nec_isa_accel",
.attach = ni_nec_isa_attach,
.detach = ni_isa_detach,
@@ -1371,8 +1379,8 @@ MODULE_DEVICE_TABLE(pnp, tnt4882_pnp_table);
#endif
#ifdef CONFIG_GPIB_PCMCIA
-static gpib_interface_t ni_pcmcia_interface;
-static gpib_interface_t ni_pcmcia_accel_interface;
+static struct gpib_interface ni_pcmcia_interface;
+static struct gpib_interface ni_pcmcia_accel_interface;
static int __init init_ni_gpib_cs(void);
static void __exit exit_ni_gpib_cs(void);
#endif
@@ -1581,10 +1589,10 @@ static int ni_gpib_probe(struct pcmcia_device *link)
}
/*
- * This deletes a driver "instance". The device is de-registered
- * with Card Services. If it has been released, all local data
- * structures are freed. Otherwise, the structures will be freed
- * when the device is released.
+ * This deletes a driver "instance". The device is de-registered
+ * with Card Services. If it has been released, all local data
+ * structures are freed. Otherwise, the structures will be freed
+ * when the device is released.
*/
static void ni_gpib_remove(struct pcmcia_device *link)
{
@@ -1611,9 +1619,9 @@ static int ni_gpib_config_iteration(struct pcmcia_device *link, void *priv_data)
}
/*
- * ni_gpib_config() is scheduled to run after a CARD_INSERTION event
- * is received, to configure the PCMCIA socket, and to make the
- * device available to the system.
+ * ni_gpib_config() is scheduled to run after a CARD_INSERTION event
+ * is received, to configure the PCMCIA socket, and to make the
+ * device available to the system.
*/
static int ni_gpib_config(struct pcmcia_device *link)
{
@@ -1702,7 +1710,7 @@ static void __exit exit_ni_gpib_cs(void)
static const int pcmcia_gpib_iosize = 32;
-static int ni_pcmcia_attach(struct gpib_board *board, const gpib_board_config_t *config)
+static int ni_pcmcia_attach(struct gpib_board *board, const struct gpib_board_config *config)
{
struct local_info_t *info;
struct tnt4882_priv *tnt_priv;
@@ -1769,7 +1777,7 @@ static void ni_pcmcia_detach(struct gpib_board *board)
tnt4882_free_private(board);
}
-static gpib_interface_t ni_pcmcia_interface = {
+static struct gpib_interface ni_pcmcia_interface = {
.name = "ni_pcmcia",
.attach = ni_pcmcia_attach,
.detach = ni_pcmcia_detach,
@@ -1797,7 +1805,7 @@ static gpib_interface_t ni_pcmcia_interface = {
.return_to_local = tnt4882_return_to_local,
};
-static gpib_interface_t ni_pcmcia_accel_interface = {
+static struct gpib_interface ni_pcmcia_accel_interface = {
.name = "ni_pcmcia_accel",
.attach = ni_pcmcia_attach,
.detach = ni_pcmcia_detach,
diff --git a/drivers/staging/gpib/uapi/gpib_user.h b/drivers/staging/gpib/uapi/gpib.h
index 5ff4588686fd..41500cee4029 100644
--- a/drivers/staging/gpib/uapi/gpib_user.h
+++ b/drivers/staging/gpib/uapi/gpib.h
@@ -4,8 +4,8 @@
* copyright : (C) 2002 by Frank Mori Hess
***************************************************************************/
-#ifndef _GPIB_USER_H
-#define _GPIB_USER_H
+#ifndef _GPIB_H
+#define _GPIB_H
#define GPIB_MAX_NUM_BOARDS 16
#define GPIB_MAX_NUM_DESCRIPTORS 0x1000
@@ -53,48 +53,6 @@ enum ibsta_bits {
EVENT | LOK | REM | CIC | ATN | TACS | LACS | DTAS | DCAS | SRQI,
};
-/* IBERR error codes */
-enum iberr_code {
- EDVR = 0, /* system error */
- ECIC = 1, /* not CIC */
- ENOL = 2, /* no listeners */
- EADR = 3, /* CIC and not addressed before I/O */
- EARG = 4, /* bad argument to function call */
- ESAC = 5, /* not SAC */
- EABO = 6, /* I/O operation was aborted */
- ENEB = 7, /* non-existent board (GPIB interface offline) */
- EDMA = 8, /* DMA hardware error detected */
- EOIP = 10, /* new I/O attempted with old I/O in progress */
- ECAP = 11, /* no capability for intended opeation */
- EFSO = 12, /* file system operation error */
- EBUS = 14, /* bus error */
- ESTB = 15, /* lost serial poll bytes */
- ESRQ = 16, /* SRQ stuck on */
- ETAB = 20 /* Table Overflow */
-};
-
-/* Timeout values and meanings */
-enum gpib_timeout {
- TNONE = 0, /* Infinite timeout (disabled) */
- T10us = 1, /* Timeout of 10 usec (ideal) */
- T30us = 2, /* Timeout of 30 usec (ideal) */
- T100us = 3, /* Timeout of 100 usec (ideal) */
- T300us = 4, /* Timeout of 300 usec (ideal) */
- T1ms = 5, /* Timeout of 1 msec (ideal) */
- T3ms = 6, /* Timeout of 3 msec (ideal) */
- T10ms = 7, /* Timeout of 10 msec (ideal) */
- T30ms = 8, /* Timeout of 30 msec (ideal) */
- T100ms = 9, /* Timeout of 100 msec (ideal) */
- T300ms = 10, /* Timeout of 300 msec (ideal) */
- T1s = 11, /* Timeout of 1 sec (ideal) */
- T3s = 12, /* Timeout of 3 sec (ideal) */
- T10s = 13, /* Timeout of 10 sec (ideal) */
- T30s = 14, /* Timeout of 30 sec (ideal) */
- T100s = 15, /* Timeout of 100 sec (ideal) */
- T300s = 16, /* Timeout of 300 sec (ideal) */
- T1000s = 17 /* Timeout of 1000 sec (maximum) */
-};
-
/* End-of-string (EOS) modes for use with ibeos */
enum eos_flags {
@@ -130,9 +88,9 @@ enum bus_control_line {
enum cmd_byte {
GTL = 0x1, /* go to local */
SDC = 0x4, /* selected device clear */
- PPConfig = 0x5,
+ PP_CONFIG = 0x5,
#ifndef PPC
- PPC = PPConfig, /* parallel poll configure */
+ PPC = PP_CONFIG, /* parallel poll configure */
#endif
GET = 0x8, /* group execute trigger */
TCT = 0x9, /* take control */
@@ -166,24 +124,24 @@ static inline unsigned int gpib_address_restrict(unsigned int addr)
return addr;
}
-static inline uint8_t MLA(unsigned int addr)
+static inline __u8 MLA(unsigned int addr)
{
return gpib_address_restrict(addr) | LAD;
}
-static inline uint8_t MTA(unsigned int addr)
+static inline __u8 MTA(unsigned int addr)
{
return gpib_address_restrict(addr) | TAD;
}
-static inline uint8_t MSA(unsigned int addr)
+static inline __u8 MSA(unsigned int addr)
{
- return gpib_address_restrict(addr) | SAD;
+ return (addr & 0x1f) | SAD;
}
-static inline uint8_t PPE_byte(unsigned int dio_line, int sense)
+static inline __u8 PPE_byte(unsigned int dio_line, int sense)
{
- uint8_t cmd;
+ __u8 cmd;
cmd = PPE;
if (sense)
@@ -192,47 +150,42 @@ static inline uint8_t PPE_byte(unsigned int dio_line, int sense)
return cmd;
}
-static inline uint8_t CFGn(unsigned int meters)
-{
- return 0x6 | (meters & 0xf);
-}
-
/* mask of bits that actually matter in a command byte */
enum {
gpib_command_mask = 0x7f,
};
-static inline int is_PPE(uint8_t command)
+static inline int is_PPE(__u8 command)
{
return (command & 0x70) == 0x60;
}
-static inline int is_PPD(uint8_t command)
+static inline int is_PPD(__u8 command)
{
return (command & 0x70) == 0x70;
}
-static inline int in_addressed_command_group(uint8_t command)
+static inline int in_addressed_command_group(__u8 command)
{
return (command & 0x70) == 0x0;
}
-static inline int in_universal_command_group(uint8_t command)
+static inline int in_universal_command_group(__u8 command)
{
return (command & 0x70) == 0x10;
}
-static inline int in_listen_address_group(uint8_t command)
+static inline int in_listen_address_group(__u8 command)
{
return (command & 0x60) == 0x20;
}
-static inline int in_talk_address_group(uint8_t command)
+static inline int in_talk_address_group(__u8 command)
{
return (command & 0x60) == 0x40;
}
-static inline int in_primary_command_group(uint8_t command)
+static inline int in_primary_command_group(__u8 command)
{
return in_addressed_command_group(command) ||
in_universal_command_group(command) ||
@@ -253,75 +206,73 @@ static inline int gpib_address_equal(unsigned int pad1, int sad1, unsigned int p
}
enum ibask_option {
- IbaPAD = 0x1,
- IbaSAD = 0x2,
- IbaTMO = 0x3,
- IbaEOT = 0x4,
- IbaPPC = 0x5, /* board only */
- IbaREADDR = 0x6, /* device only */
- IbaAUTOPOLL = 0x7, /* board only */
- IbaCICPROT = 0x8, /* board only */
- IbaIRQ = 0x9, /* board only */
- IbaSC = 0xa, /* board only */
- IbaSRE = 0xb, /* board only */
- IbaEOSrd = 0xc,
- IbaEOSwrt = 0xd,
- IbaEOScmp = 0xe,
- IbaEOSchar = 0xf,
- IbaPP2 = 0x10, /* board only */
- IbaTIMING = 0x11, /* board only */
- IbaDMA = 0x12, /* board only */
- IbaReadAdjust = 0x13,
- IbaWriteAdjust = 0x14,
- IbaEventQueue = 0x15, /* board only */
- IbaSPollBit = 0x16, /* board only */
- IbaSpollBit = 0x16, /* board only */
- IbaSendLLO = 0x17, /* board only */
- IbaSPollTime = 0x18, /* device only */
- IbaPPollTime = 0x19, /* board only */
- IbaEndBitIsNormal = 0x1a,
- IbaUnAddr = 0x1b, /* device only */
- IbaHSCableLength = 0x1f, /* board only */
- IbaIst = 0x20, /* board only */
- IbaRsv = 0x21, /* board only */
- IbaBNA = 0x200, /* device only */
+ IBA_PAD = 0x1,
+ IBA_SAD = 0x2,
+ IBA_TMO = 0x3,
+ IBA_EOT = 0x4,
+ IBA_PPC = 0x5, /* board only */
+ IBA_READ_DR = 0x6, /* device only */
+ IBA_AUTOPOLL = 0x7, /* board only */
+ IBA_CICPROT = 0x8, /* board only */
+ IBA_IRQ = 0x9, /* board only */
+ IBA_SC = 0xa, /* board only */
+ IBA_SRE = 0xb, /* board only */
+ IBA_EOS_RD = 0xc,
+ IBA_EOS_WRT = 0xd,
+ IBA_EOS_CMP = 0xe,
+ IBA_EOS_CHAR = 0xf,
+ IBA_PP2 = 0x10, /* board only */
+ IBA_TIMING = 0x11, /* board only */
+ IBA_DMA = 0x12, /* board only */
+ IBA_READ_ADJUST = 0x13,
+ IBA_WRITE_ADJUST = 0x14,
+ IBA_EVENT_QUEUE = 0x15, /* board only */
+ IBA_SPOLL_BIT = 0x16, /* board only */
+ IBA_SEND_LLO = 0x17, /* board only */
+ IBA_SPOLL_TIME = 0x18, /* device only */
+ IBA_PPOLL_TIME = 0x19, /* board only */
+ IBA_END_BIT_IS_NORMAL = 0x1a,
+ IBA_UN_ADDR = 0x1b, /* device only */
+ IBA_HS_CABLE_LENGTH = 0x1f, /* board only */
+ IBA_IST = 0x20, /* board only */
+ IBA_RSV = 0x21, /* board only */
+ IBA_BNA = 0x200, /* device only */
/* linux-gpib extensions */
- Iba7BitEOS = 0x1000 /* board only. Returns 1 if board supports 7 bit eos compares*/
+ IBA_7_BIT_EOS = 0x1000 /* board only. Returns 1 if board supports 7 bit eos compares*/
};
enum ibconfig_option {
- IbcPAD = 0x1,
- IbcSAD = 0x2,
- IbcTMO = 0x3,
- IbcEOT = 0x4,
- IbcPPC = 0x5, /* board only */
- IbcREADDR = 0x6, /* device only */
- IbcAUTOPOLL = 0x7, /* board only */
- IbcCICPROT = 0x8, /* board only */
- IbcIRQ = 0x9, /* board only */
- IbcSC = 0xa, /* board only */
- IbcSRE = 0xb, /* board only */
- IbcEOSrd = 0xc,
- IbcEOSwrt = 0xd,
- IbcEOScmp = 0xe,
- IbcEOSchar = 0xf,
- IbcPP2 = 0x10, /* board only */
- IbcTIMING = 0x11, /* board only */
- IbcDMA = 0x12, /* board only */
- IbcReadAdjust = 0x13,
- IbcWriteAdjust = 0x14,
- IbcEventQueue = 0x15, /* board only */
- IbcSPollBit = 0x16, /* board only */
- IbcSpollBit = 0x16, /* board only */
- IbcSendLLO = 0x17, /* board only */
- IbcSPollTime = 0x18, /* device only */
- IbcPPollTime = 0x19, /* board only */
- IbcEndBitIsNormal = 0x1a,
- IbcUnAddr = 0x1b, /* device only */
- IbcHSCableLength = 0x1f, /* board only */
- IbcIst = 0x20, /* board only */
- IbcRsv = 0x21, /* board only */
- IbcBNA = 0x200 /* device only */
+ IBC_PAD = 0x1,
+ IBC_SAD = 0x2,
+ IBC_TMO = 0x3,
+ IBC_EOT = 0x4,
+ IBC_PPC = 0x5, /* board only */
+ IBC_READDR = 0x6, /* device only */
+ IBC_AUTOPOLL = 0x7, /* board only */
+ IBC_CICPROT = 0x8, /* board only */
+ IBC_IRQ = 0x9, /* board only */
+ IBC_SC = 0xa, /* board only */
+ IBC_SRE = 0xb, /* board only */
+ IBC_EOS_RD = 0xc,
+ IBC_EOS_WRT = 0xd,
+ IBC_EOS_CMP = 0xe,
+ IBC_EOS_CHAR = 0xf,
+ IBC_PP2 = 0x10, /* board only */
+ IBC_TIMING = 0x11, /* board only */
+ IBC_DMA = 0x12, /* board only */
+ IBC_READ_ADJUST = 0x13,
+ IBC_WRITE_ADJUST = 0x14,
+ IBC_EVENT_QUEUE = 0x15, /* board only */
+ IBC_SPOLL_BIT = 0x16, /* board only */
+ IBC_SEND_LLO = 0x17, /* board only */
+ IBC_SPOLL_TIME = 0x18, /* device only */
+ IBC_PPOLL_TIME = 0x19, /* board only */
+ IBC_END_BIT_IS_NORMAL = 0x1a,
+ IBC_UN_ADDR = 0x1b, /* device only */
+ IBC_HS_CABLE_LENGTH = 0x1f, /* board only */
+ IBC_IST = 0x20, /* board only */
+ IBC_RSV = 0x21, /* board only */
+ IBC_BNA = 0x200 /* device only */
};
enum t1_delays {
@@ -335,18 +286,17 @@ enum {
};
enum gpib_events {
- EventNone = 0,
- EventDevTrg = 1,
- EventDevClr = 2,
- EventIFC = 3
+ EVENT_NONE = 0,
+ EVENT_DEV_TRG = 1,
+ EVENT_DEV_CLR = 2,
+ EVENT_IFC = 3
};
enum gpib_stb {
- IbStbRQS = 0x40, /* IEEE 488.1 & 2 */
- IbStbESB = 0x20, /* IEEE 488.2 only */
- IbStbMAV = 0x10 /* IEEE 488.2 only */
+ IB_STB_RQS = 0x40, /* IEEE 488.1 & 2 */
+ IB_STB_ESB = 0x20, /* IEEE 488.2 only */
+ IB_STB_MAV = 0x10 /* IEEE 488.2 only */
};
-#endif /* _GPIB_USER_H */
+#endif /* _GPIB_H */
-/* Check for errors */
diff --git a/drivers/staging/gpib/uapi/gpib_ioctl.h b/drivers/staging/gpib/uapi/gpib_ioctl.h
index 6202865278ea..0fed5c0fa7f2 100644
--- a/drivers/staging/gpib/uapi/gpib_ioctl.h
+++ b/drivers/staging/gpib/uapi/gpib_ioctl.h
@@ -12,42 +12,42 @@
#define GPIB_CODE 160
-typedef struct {
+struct gpib_board_type_ioctl {
char name[100];
-} board_type_ioctl_t;
+};
/* argument for read/write/command ioctls */
-typedef struct {
- uint64_t buffer_ptr;
+struct gpib_read_write_ioctl {
+ __u64 buffer_ptr;
unsigned int requested_transfer_count;
unsigned int completed_transfer_count;
int end; /* end flag return for reads, end io suppression request for cmd*/
int handle;
-} read_write_ioctl_t;
+};
-typedef struct {
+struct gpib_open_dev_ioctl {
unsigned int handle;
unsigned int pad;
int sad;
unsigned is_board : 1;
-} open_dev_ioctl_t;
+};
-typedef struct {
+struct gpib_close_dev_ioctl {
unsigned int handle;
-} close_dev_ioctl_t;
+};
-typedef struct {
+struct gpib_serial_poll_ioctl {
unsigned int pad;
int sad;
- uint8_t status_byte;
-} serial_poll_ioctl_t;
+ __u8 status_byte;
+};
-typedef struct {
+struct gpib_eos_ioctl {
int eos;
int eos_flags;
-} eos_ioctl_t;
+};
-typedef struct {
+struct gpib_wait_ioctl {
int handle;
int wait_mask;
int clear_mask;
@@ -56,21 +56,21 @@ typedef struct {
int pad;
int sad;
unsigned int usec_timeout;
-} wait_ioctl_t;
+};
-typedef struct {
- uint64_t init_data_ptr;
+struct gpib_online_ioctl {
+ __u64 init_data_ptr;
int init_data_length;
int online;
-} online_ioctl_t;
+};
-typedef struct {
+struct gpib_spoll_bytes_ioctl {
unsigned int num_bytes;
unsigned int pad;
int sad;
-} spoll_bytes_ioctl_t;
+};
-typedef struct {
+struct gpib_board_info_ioctl {
unsigned int pad;
int sad;
int parallel_poll_configuration;
@@ -79,91 +79,85 @@ typedef struct {
unsigned int t1_delay;
unsigned ist : 1;
unsigned no_7_bit_eos : 1;
-} board_info_ioctl_t;
+};
-typedef struct {
+struct gpib_select_pci_ioctl {
int pci_bus;
int pci_slot;
-} select_pci_ioctl_t;
+};
-typedef struct {
- uint8_t config;
+struct gpib_ppoll_config_ioctl {
+ __u8 config;
unsigned set_ist : 1;
unsigned clear_ist : 1;
-} ppoll_config_ioctl_t;
+};
-typedef struct {
+struct gpib_pad_ioctl {
unsigned int handle;
unsigned int pad;
-} pad_ioctl_t;
+};
-typedef struct {
+struct gpib_sad_ioctl {
unsigned int handle;
int sad;
-} sad_ioctl_t;
+};
// select a piece of hardware to attach by its sysfs device path
-typedef struct {
+struct gpib_select_device_path_ioctl {
char device_path[0x1000];
-} select_device_path_ioctl_t;
-
-typedef short event_ioctl_t;
-typedef int rsc_ioctl_t;
-typedef unsigned int t1_delay_ioctl_t;
-typedef short autospoll_ioctl_t;
-typedef short local_ppoll_mode_ioctl_t;
+};
// update status byte and request service
-typedef struct {
- uint8_t status_byte;
+struct gpib_request_service2 {
+ __u8 status_byte;
int new_reason_for_service;
-} request_service2_t;
+};
/* Standard functions. */
enum gpib_ioctl {
- IBRD = _IOWR(GPIB_CODE, 100, read_write_ioctl_t),
- IBWRT = _IOWR(GPIB_CODE, 101, read_write_ioctl_t),
- IBCMD = _IOWR(GPIB_CODE, 102, read_write_ioctl_t),
- IBOPENDEV = _IOWR(GPIB_CODE, 3, open_dev_ioctl_t),
- IBCLOSEDEV = _IOW(GPIB_CODE, 4, close_dev_ioctl_t),
- IBWAIT = _IOWR(GPIB_CODE, 5, wait_ioctl_t),
- IBRPP = _IOWR(GPIB_CODE, 6, uint8_t),
+ IBRD = _IOWR(GPIB_CODE, 100, struct gpib_read_write_ioctl),
+ IBWRT = _IOWR(GPIB_CODE, 101, struct gpib_read_write_ioctl),
+ IBCMD = _IOWR(GPIB_CODE, 102, struct gpib_read_write_ioctl),
+ IBOPENDEV = _IOWR(GPIB_CODE, 3, struct gpib_open_dev_ioctl),
+ IBCLOSEDEV = _IOW(GPIB_CODE, 4, struct gpib_close_dev_ioctl),
+ IBWAIT = _IOWR(GPIB_CODE, 5, struct gpib_wait_ioctl),
+ IBRPP = _IOWR(GPIB_CODE, 6, __u8),
IBSIC = _IOW(GPIB_CODE, 9, unsigned int),
IBSRE = _IOW(GPIB_CODE, 10, int),
IBGTS = _IO(GPIB_CODE, 11),
IBCAC = _IOW(GPIB_CODE, 12, int),
IBLINES = _IOR(GPIB_CODE, 14, short),
- IBPAD = _IOW(GPIB_CODE, 15, pad_ioctl_t),
- IBSAD = _IOW(GPIB_CODE, 16, sad_ioctl_t),
+ IBPAD = _IOW(GPIB_CODE, 15, struct gpib_pad_ioctl),
+ IBSAD = _IOW(GPIB_CODE, 16, struct gpib_sad_ioctl),
IBTMO = _IOW(GPIB_CODE, 17, unsigned int),
- IBRSP = _IOWR(GPIB_CODE, 18, serial_poll_ioctl_t),
- IBEOS = _IOW(GPIB_CODE, 19, eos_ioctl_t),
- IBRSV = _IOW(GPIB_CODE, 20, uint8_t),
- CFCBASE = _IOW(GPIB_CODE, 21, uint64_t),
+ IBRSP = _IOWR(GPIB_CODE, 18, struct gpib_serial_poll_ioctl),
+ IBEOS = _IOW(GPIB_CODE, 19, struct gpib_eos_ioctl),
+ IBRSV = _IOW(GPIB_CODE, 20, __u8),
+ CFCBASE = _IOW(GPIB_CODE, 21, __u64),
CFCIRQ = _IOW(GPIB_CODE, 22, unsigned int),
CFCDMA = _IOW(GPIB_CODE, 23, unsigned int),
- CFCBOARDTYPE = _IOW(GPIB_CODE, 24, board_type_ioctl_t),
+ CFCBOARDTYPE = _IOW(GPIB_CODE, 24, struct gpib_board_type_ioctl),
IBMUTEX = _IOW(GPIB_CODE, 26, int),
- IBSPOLL_BYTES = _IOWR(GPIB_CODE, 27, spoll_bytes_ioctl_t),
- IBPPC = _IOW(GPIB_CODE, 28, ppoll_config_ioctl_t),
- IBBOARD_INFO = _IOR(GPIB_CODE, 29, board_info_ioctl_t),
+ IBSPOLL_BYTES = _IOWR(GPIB_CODE, 27, struct gpib_spoll_bytes_ioctl),
+ IBPPC = _IOW(GPIB_CODE, 28, struct gpib_ppoll_config_ioctl),
+ IBBOARD_INFO = _IOR(GPIB_CODE, 29, struct gpib_board_info_ioctl),
IBQUERY_BOARD_RSV = _IOR(GPIB_CODE, 31, int),
- IBSELECT_PCI = _IOWR(GPIB_CODE, 32, select_pci_ioctl_t),
- IBEVENT = _IOR(GPIB_CODE, 33, event_ioctl_t),
- IBRSC = _IOW(GPIB_CODE, 34, rsc_ioctl_t),
- IB_T1_DELAY = _IOW(GPIB_CODE, 35, t1_delay_ioctl_t),
+ IBSELECT_PCI = _IOWR(GPIB_CODE, 32, struct gpib_select_pci_ioctl),
+ IBEVENT = _IOR(GPIB_CODE, 33, short),
+ IBRSC = _IOW(GPIB_CODE, 34, int),
+ IB_T1_DELAY = _IOW(GPIB_CODE, 35, unsigned int),
IBLOC = _IO(GPIB_CODE, 36),
- IBAUTOSPOLL = _IOW(GPIB_CODE, 38, autospoll_ioctl_t),
- IBONL = _IOW(GPIB_CODE, 39, online_ioctl_t),
- IBPP2_SET = _IOW(GPIB_CODE, 40, local_ppoll_mode_ioctl_t),
- IBPP2_GET = _IOR(GPIB_CODE, 41, local_ppoll_mode_ioctl_t),
- IBSELECT_DEVICE_PATH = _IOW(GPIB_CODE, 43, select_device_path_ioctl_t),
+ IBAUTOSPOLL = _IOW(GPIB_CODE, 38, short),
+ IBONL = _IOW(GPIB_CODE, 39, struct gpib_online_ioctl),
+ IBPP2_SET = _IOW(GPIB_CODE, 40, short),
+ IBPP2_GET = _IOR(GPIB_CODE, 41, short),
+ IBSELECT_DEVICE_PATH = _IOW(GPIB_CODE, 43, struct gpib_select_device_path_ioctl),
// 44 was IBSELECT_SERIAL_NUMBER
- IBRSV2 = _IOW(GPIB_CODE, 45, request_service2_t)
+ IBRSV2 = _IOW(GPIB_CODE, 45, struct gpib_request_service2)
};
#endif /* _GPIB_IOCTL_H */
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
index 5d80ace41d8e..ec9fddfc0b14 100644
--- a/drivers/staging/greybus/camera.c
+++ b/drivers/staging/greybus/camera.c
@@ -1165,8 +1165,8 @@ static int gb_camera_debugfs_init(struct gb_camera *gcam)
gcam->debugfs.buffers[i].length = 0;
debugfs_create_file_aux(entry->name, entry->mask,
- gcam->debugfs.root, gcam, entry,
- &gb_camera_debugfs_ops);
+ gcam->debugfs.root, gcam, entry,
+ &gb_camera_debugfs_ops);
}
return 0;
diff --git a/drivers/staging/greybus/fw-management.c b/drivers/staging/greybus/fw-management.c
index a47385175582..152949c23d65 100644
--- a/drivers/staging/greybus/fw-management.c
+++ b/drivers/staging/greybus/fw-management.c
@@ -123,17 +123,11 @@ static int fw_mgmt_interface_fw_version_operation(struct fw_mgmt *fw_mgmt,
fw_info->major = le16_to_cpu(response.major);
fw_info->minor = le16_to_cpu(response.minor);
- strscpy_pad(fw_info->firmware_tag, response.firmware_tag);
-
- /*
- * The firmware-tag should be NULL terminated, otherwise throw error but
- * don't fail.
- */
- if (fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
+ ret = strscpy_pad(fw_info->firmware_tag, response.firmware_tag);
+ if (ret == -E2BIG)
dev_err(fw_mgmt->parent,
- "fw-version: firmware-tag is not NULL terminated\n");
- fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] = '\0';
- }
+ "fw-version: truncated firmware tag: %s\n",
+ fw_info->firmware_tag);
return 0;
}
@@ -152,14 +146,12 @@ static int fw_mgmt_load_and_validate_operation(struct fw_mgmt *fw_mgmt,
}
request.load_method = load_method;
- strscpy_pad(request.firmware_tag, tag);
- /*
- * The firmware-tag should be NULL terminated, otherwise throw error and
- * fail.
- */
- if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
- dev_err(fw_mgmt->parent, "load-and-validate: firmware-tag is not NULL terminated\n");
+ ret = strscpy_pad(request.firmware_tag, tag);
+ if (ret == -E2BIG) {
+ dev_err(fw_mgmt->parent,
+ "load-and-validate: truncated firmware tag: %s\n",
+ request.firmware_tag);
return -EINVAL;
}
@@ -248,14 +240,11 @@ static int fw_mgmt_backend_fw_version_operation(struct fw_mgmt *fw_mgmt,
struct gb_fw_mgmt_backend_fw_version_response response;
int ret;
- strscpy_pad(request.firmware_tag, fw_info->firmware_tag);
-
- /*
- * The firmware-tag should be NULL terminated, otherwise throw error and
- * fail.
- */
- if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
- dev_err(fw_mgmt->parent, "backend-version: firmware-tag is not NULL terminated\n");
+ ret = strscpy_pad(request.firmware_tag, fw_info->firmware_tag);
+ if (ret == -E2BIG) {
+ dev_err(fw_mgmt->parent,
+ "backend-fw-version: truncated firmware tag: %s\n",
+ request.firmware_tag);
return -EINVAL;
}
@@ -302,13 +291,10 @@ static int fw_mgmt_backend_fw_update_operation(struct fw_mgmt *fw_mgmt,
int ret;
ret = strscpy_pad(request.firmware_tag, tag);
-
- /*
- * The firmware-tag should be NULL terminated, otherwise throw error and
- * fail.
- */
if (ret == -E2BIG) {
- dev_err(fw_mgmt->parent, "backend-update: firmware-tag is not NULL terminated\n");
+ dev_err(fw_mgmt->parent,
+ "backend-fw-update: truncated firmware tag: %s\n",
+ request.firmware_tag);
return -EINVAL;
}
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index 16bcf7fc8158..f81c34160f72 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -185,8 +185,8 @@ static int gb_gpio_get_value_operation(struct gb_gpio_controller *ggc,
return 0;
}
-static void gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
- u8 which, bool value_high)
+static int gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
+ u8 which, bool value_high)
{
struct device *dev = &ggc->gbphy_dev->dev;
struct gb_gpio_set_value_request request;
@@ -195,7 +195,7 @@ static void gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
if (ggc->lines[which].direction == 1) {
dev_warn(dev, "refusing to set value of input gpio %u\n",
which);
- return;
+ return -EPERM;
}
request.which = which;
@@ -204,10 +204,12 @@ static void gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
&request, sizeof(request), NULL, 0);
if (ret) {
dev_err(dev, "failed to set value of gpio %u\n", which);
- return;
+ return ret;
}
ggc->lines[which].value = request.value;
+
+ return 0;
}
static int gb_gpio_set_debounce_operation(struct gb_gpio_controller *ggc,
@@ -457,11 +459,11 @@ static int gb_gpio_get(struct gpio_chip *chip, unsigned int offset)
return ggc->lines[which].value;
}
-static void gb_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int gb_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gb_gpio_controller *ggc = gpiochip_get_data(chip);
- gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
+ return gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
}
static int gb_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
@@ -555,7 +557,7 @@ static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
gpio->direction_input = gb_gpio_direction_input;
gpio->direction_output = gb_gpio_direction_output;
gpio->get = gb_gpio_get;
- gpio->set = gb_gpio_set;
+ gpio->set_rv = gb_gpio_set;
gpio->set_config = gb_gpio_set_config;
gpio->base = -1; /* Allocate base dynamically */
gpio->ngpio = ggc->line_max + 1;
diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c
index c1c73308800c..830ff38fda92 100644
--- a/drivers/staging/iio/accel/adis16203.c
+++ b/drivers/staging/iio/accel/adis16203.c
@@ -294,7 +294,7 @@ static int adis16203_probe(struct spi_device *spi)
static const struct of_device_id adis16203_of_match[] = {
{ .compatible = "adi,adis16203" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, adis16203_of_match);
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 081b17f49863..4774df778de9 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -431,7 +431,7 @@ static const struct spi_device_id ad7816_id[] = {
{ "ad7816", ID_AD7816 },
{ "ad7817", ID_AD7817 },
{ "ad7818", ID_AD7818 },
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad7816_id);
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index e6ad088636f6..f45968ef94ea 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -127,7 +127,7 @@ static const struct of_device_id adt7316_of_match[] = {
{ .compatible = "adi,adt7516" },
{ .compatible = "adi,adt7517" },
{ .compatible = "adi,adt7519" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, adt7316_of_match);
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index f4260786d50a..16f30c4f1aa0 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -1794,7 +1794,7 @@ static int adt7316_setup_irq(struct iio_dev *indio_dev)
struct adt7316_chip_info *chip = iio_priv(indio_dev);
int irq_type, ret;
- irq_type = irqd_get_trigger_type(irq_get_irq_data(chip->bus.irq));
+ irq_type = irq_get_trigger_type(chip->bus.irq);
switch (irq_type) {
case IRQF_TRIGGER_HIGH:
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index db42810c7664..49388da5a684 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -7,6 +7,8 @@
#include <asm/div64.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -16,6 +18,7 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/sysfs.h>
+#include <linux/unaligned.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -59,17 +62,17 @@
#define AD9832_CMD_SLEEPRESCLR 0xC
#define AD9832_FREQ BIT(11)
-#define AD9832_PHASE(x) (((x) & 3) << 9)
+#define AD9832_PHASE_MASK GENMASK(10, 9)
#define AD9832_SYNC BIT(13)
#define AD9832_SELSRC BIT(12)
#define AD9832_SLEEP BIT(13)
#define AD9832_RESET BIT(12)
#define AD9832_CLR BIT(11)
-#define CMD_SHIFT 12
-#define ADD_SHIFT 8
#define AD9832_FREQ_BITS 32
#define AD9832_PHASE_BITS 12
-#define RES_MASK(bits) ((1 << (bits)) - 1)
+#define AD9832_CMD_MSK GENMASK(15, 12)
+#define AD9832_ADD_MSK GENMASK(11, 8)
+#define AD9832_DAT_MSK GENMASK(7, 0)
/**
* struct ad9832_state - driver instance specific data
@@ -127,6 +130,8 @@ static int ad9832_write_frequency(struct ad9832_state *st,
{
unsigned long clk_freq;
unsigned long regval;
+ u8 regval_bytes[4];
+ u16 freq_cmd;
clk_freq = clk_get_rate(st->mclk);
@@ -134,19 +139,15 @@ static int ad9832_write_frequency(struct ad9832_state *st,
return -EINVAL;
regval = ad9832_calc_freqreg(clk_freq, fout);
+ put_unaligned_be32(regval, regval_bytes);
- st->freq_data[0] = cpu_to_be16((AD9832_CMD_FRE8BITSW << CMD_SHIFT) |
- (addr << ADD_SHIFT) |
- ((regval >> 24) & 0xFF));
- st->freq_data[1] = cpu_to_be16((AD9832_CMD_FRE16BITSW << CMD_SHIFT) |
- ((addr - 1) << ADD_SHIFT) |
- ((regval >> 16) & 0xFF));
- st->freq_data[2] = cpu_to_be16((AD9832_CMD_FRE8BITSW << CMD_SHIFT) |
- ((addr - 2) << ADD_SHIFT) |
- ((regval >> 8) & 0xFF));
- st->freq_data[3] = cpu_to_be16((AD9832_CMD_FRE16BITSW << CMD_SHIFT) |
- ((addr - 3) << ADD_SHIFT) |
- ((regval >> 0) & 0xFF));
+ for (int i = 0; i < ARRAY_SIZE(regval_bytes); i++) {
+ freq_cmd = (i % 2 == 0) ? AD9832_CMD_FRE8BITSW : AD9832_CMD_FRE16BITSW;
+
+ st->freq_data[i] = cpu_to_be16(FIELD_PREP(AD9832_CMD_MSK, freq_cmd) |
+ FIELD_PREP(AD9832_ADD_MSK, addr - i) |
+ FIELD_PREP(AD9832_DAT_MSK, regval_bytes[i]));
+ }
return spi_sync(st->spi, &st->freq_msg);
}
@@ -154,15 +155,21 @@ static int ad9832_write_frequency(struct ad9832_state *st,
static int ad9832_write_phase(struct ad9832_state *st,
unsigned long addr, unsigned long phase)
{
+ u8 phase_bytes[2];
+ u16 phase_cmd;
+
if (phase >= BIT(AD9832_PHASE_BITS))
return -EINVAL;
- st->phase_data[0] = cpu_to_be16((AD9832_CMD_PHA8BITSW << CMD_SHIFT) |
- (addr << ADD_SHIFT) |
- ((phase >> 8) & 0xFF));
- st->phase_data[1] = cpu_to_be16((AD9832_CMD_PHA16BITSW << CMD_SHIFT) |
- ((addr - 1) << ADD_SHIFT) |
- (phase & 0xFF));
+ put_unaligned_be16(phase, phase_bytes);
+
+ for (int i = 0; i < ARRAY_SIZE(phase_bytes); i++) {
+ phase_cmd = (i % 2 == 0) ? AD9832_CMD_PHA8BITSW : AD9832_CMD_PHA16BITSW;
+
+ st->phase_data[i] = cpu_to_be16(FIELD_PREP(AD9832_CMD_MSK, phase_cmd) |
+ FIELD_PREP(AD9832_ADD_MSK, addr - i) |
+ FIELD_PREP(AD9832_DAT_MSK, phase_bytes[i]));
+ }
return spi_sync(st->spi, &st->phase_msg);
}
@@ -193,25 +200,23 @@ static ssize_t ad9832_write(struct device *dev, struct device_attribute *attr,
ret = ad9832_write_phase(st, this_attr->address, val);
break;
case AD9832_PINCTRL_EN:
- if (val)
- st->ctrl_ss &= ~AD9832_SELSRC;
- else
- st->ctrl_ss |= AD9832_SELSRC;
- st->data = cpu_to_be16((AD9832_CMD_SYNCSELSRC << CMD_SHIFT) |
- st->ctrl_ss);
+ st->ctrl_ss &= ~AD9832_SELSRC;
+ st->ctrl_ss |= FIELD_PREP(AD9832_SELSRC, val ? 0 : 1);
+
+ st->data = cpu_to_be16(FIELD_PREP(AD9832_CMD_MSK, AD9832_CMD_SYNCSELSRC) |
+ st->ctrl_ss);
ret = spi_sync(st->spi, &st->msg);
break;
case AD9832_FREQ_SYM:
- if (val == 1) {
- st->ctrl_fp |= AD9832_FREQ;
- } else if (val == 0) {
+ if (val == 1 || val == 0) {
st->ctrl_fp &= ~AD9832_FREQ;
+ st->ctrl_fp |= FIELD_PREP(AD9832_FREQ, val ? 1 : 0);
} else {
ret = -EINVAL;
break;
}
- st->data = cpu_to_be16((AD9832_CMD_FPSELECT << CMD_SHIFT) |
- st->ctrl_fp);
+ st->data = cpu_to_be16(FIELD_PREP(AD9832_CMD_MSK, AD9832_CMD_FPSELECT) |
+ st->ctrl_fp);
ret = spi_sync(st->spi, &st->msg);
break;
case AD9832_PHASE_SYM:
@@ -220,22 +225,21 @@ static ssize_t ad9832_write(struct device *dev, struct device_attribute *attr,
break;
}
- st->ctrl_fp &= ~AD9832_PHASE(3);
- st->ctrl_fp |= AD9832_PHASE(val);
+ st->ctrl_fp &= ~AD9832_PHASE_MASK;
+ st->ctrl_fp |= FIELD_PREP(AD9832_PHASE_MASK, val);
- st->data = cpu_to_be16((AD9832_CMD_FPSELECT << CMD_SHIFT) |
- st->ctrl_fp);
+ st->data = cpu_to_be16(FIELD_PREP(AD9832_CMD_MSK, AD9832_CMD_FPSELECT) |
+ st->ctrl_fp);
ret = spi_sync(st->spi, &st->msg);
break;
case AD9832_OUTPUT_EN:
if (val)
- st->ctrl_src &= ~(AD9832_RESET | AD9832_SLEEP |
- AD9832_CLR);
+ st->ctrl_src &= ~(AD9832_RESET | AD9832_SLEEP | AD9832_CLR);
else
- st->ctrl_src |= AD9832_RESET;
+ st->ctrl_src |= FIELD_PREP(AD9832_RESET, 1);
- st->data = cpu_to_be16((AD9832_CMD_SLEEPRESCLR << CMD_SHIFT) |
- st->ctrl_src);
+ st->data = cpu_to_be16(FIELD_PREP(AD9832_CMD_MSK, AD9832_CMD_SLEEPRESCLR) |
+ st->ctrl_src);
ret = spi_sync(st->spi, &st->msg);
break;
default:
@@ -367,8 +371,8 @@ static int ad9832_probe(struct spi_device *spi)
spi_message_add_tail(&st->phase_xfer[1], &st->phase_msg);
st->ctrl_src = AD9832_SLEEP | AD9832_RESET | AD9832_CLR;
- st->data = cpu_to_be16((AD9832_CMD_SLEEPRESCLR << CMD_SHIFT) |
- st->ctrl_src);
+ st->data = cpu_to_be16(FIELD_PREP(AD9832_CMD_MSK, AD9832_CMD_SLEEPRESCLR) |
+ st->ctrl_src);
ret = spi_sync(st->spi, &st->msg);
if (ret) {
dev_err(&spi->dev, "device init failed\n");
@@ -402,16 +406,24 @@ static int ad9832_probe(struct spi_device *spi)
return devm_iio_device_register(&spi->dev, indio_dev);
}
+static const struct of_device_id ad9832_of_match[] = {
+ { .compatible = "adi,ad9832" },
+ { .compatible = "adi,ad9835" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad9832_of_match);
+
static const struct spi_device_id ad9832_id[] = {
{"ad9832", 0},
{"ad9835", 0},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad9832_id);
static struct spi_driver ad9832_driver = {
.driver = {
.name = "ad9832",
+ .of_match_table = ad9832_of_match,
},
.probe = ad9832_probe,
.id_table = ad9832_id,
diff --git a/drivers/staging/iio/frequency/ad9832.h b/drivers/staging/iio/frequency/ad9832.h
index 98dfbd9289ab..d0d840edb8d2 100644
--- a/drivers/staging/iio/frequency/ad9832.h
+++ b/drivers/staging/iio/frequency/ad9832.h
@@ -13,7 +13,6 @@
/**
* struct ad9832_platform_data - platform specific information
- * @mclk: master clock in Hz
* @freq0: power up freq0 tuning word in Hz
* @freq1: power up freq1 tuning word in Hz
* @phase0: power up phase0 value [0..4095] correlates with 0..2PI
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 50413da2aa65..0038eb234d40 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -479,7 +479,7 @@ static const struct spi_device_id ad9834_id[] = {
{"ad9834", ID_AD9834},
{"ad9837", ID_AD9837},
{"ad9838", ID_AD9838},
- {}
+ { }
};
MODULE_DEVICE_TABLE(spi, ad9834_id);
@@ -488,7 +488,7 @@ static const struct of_device_id ad9834_of_match[] = {
{.compatible = "adi,ad9834"},
{.compatible = "adi,ad9837"},
{.compatible = "adi,ad9838"},
- {}
+ { }
};
MODULE_DEVICE_TABLE(of, ad9834_of_match);
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index d5544fc2fe98..85a4223295cd 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -271,11 +271,12 @@ static ssize_t ad5933_show_frequency(struct device *dev,
u8 d8[4];
} dat;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
ret = ad5933_i2c_read(st->client, this_attr->address, 3, &dat.d8[1]);
- iio_device_release_direct_mode(indio_dev);
+
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
@@ -305,11 +306,12 @@ static ssize_t ad5933_store_frequency(struct device *dev,
if (val > AD5933_MAX_OUTPUT_FREQ_Hz)
return -EINVAL;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
ret = ad5933_set_freq(st, this_attr->address, val);
- iio_device_release_direct_mode(indio_dev);
+
+ iio_device_release_direct(indio_dev);
return ret ? ret : len;
}
@@ -384,9 +386,9 @@ static ssize_t ad5933_store(struct device *dev,
return ret;
}
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
mutex_lock(&st->lock);
switch ((u32)this_attr->address) {
case AD5933_OUT_RANGE:
@@ -411,7 +413,7 @@ static ssize_t ad5933_store(struct device *dev,
ret = ad5933_cmd(st, 0);
break;
case AD5933_OUT_SETTLING_CYCLES:
- val = clamp(val, (u16)0, (u16)0x7FF);
+ val = clamp(val, (u16)0, (u16)0x7FC);
st->settling_cycles = val;
/* 2x, 4x handling, see datasheet */
@@ -438,7 +440,8 @@ static ssize_t ad5933_store(struct device *dev,
}
mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+
+ iio_device_release_direct(indio_dev);
return ret ? ret : len;
}
@@ -506,9 +509,9 @@ static int ad5933_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
ret = ad5933_cmd(st, AD5933_CTRL_MEASURE_TEMP);
if (ret < 0)
goto out;
@@ -521,7 +524,8 @@ static int ad5933_read_raw(struct iio_dev *indio_dev,
2, (u8 *)&dat);
if (ret < 0)
goto out;
- iio_device_release_direct_mode(indio_dev);
+
+ iio_device_release_direct(indio_dev);
*val = sign_extend32(be16_to_cpu(dat), 13);
return IIO_VAL_INT;
@@ -533,7 +537,7 @@ static int ad5933_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
out:
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
@@ -724,7 +728,7 @@ static int ad5933_probe(struct i2c_client *client)
static const struct i2c_device_id ad5933_id[] = {
{ "ad5933" },
{ "ad5934" },
- {}
+ { }
};
MODULE_DEVICE_TABLE(i2c, ad5933_id);
@@ -732,7 +736,7 @@ MODULE_DEVICE_TABLE(i2c, ad5933_id);
static const struct of_device_id ad5933_of_match[] = {
{ .compatible = "adi,ad5933" },
{ .compatible = "adi,ad5934" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(of, ad5933_of_match);
diff --git a/drivers/staging/media/atomisp/i2c/Kconfig b/drivers/staging/media/atomisp/i2c/Kconfig
index f5ab23592f29..4f46182da58b 100644
--- a/drivers/staging/media/atomisp/i2c/Kconfig
+++ b/drivers/staging/media/atomisp/i2c/Kconfig
@@ -27,18 +27,6 @@ config VIDEO_ATOMISP_GC2235
It currently only works with the atomisp driver.
-config VIDEO_ATOMISP_MT9M114
- tristate "Aptina mt9m114 sensor support"
- depends on ACPI
- depends on I2C && VIDEO_DEV
- help
- This is a Video4Linux2 sensor-level driver for the Micron
- mt9m114 1.3 Mpixel camera.
-
- mt9m114 is video camera sensor.
-
- It currently only works with the atomisp driver.
-
config VIDEO_ATOMISP_GC0310
tristate "GC0310 sensor support"
depends on ACPI
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile
index 021a7ea0a075..e1637417e5c5 100644
--- a/drivers/staging/media/atomisp/i2c/Makefile
+++ b/drivers/staging/media/atomisp/i2c/Makefile
@@ -3,7 +3,6 @@
# Makefile for sensor drivers
#
-obj-$(CONFIG_VIDEO_ATOMISP_MT9M114) += atomisp-mt9m114.o
obj-$(CONFIG_VIDEO_ATOMISP_GC2235) += atomisp-gc2235.o
obj-$(CONFIG_VIDEO_ATOMISP_OV2722) += atomisp-ov2722.o
obj-$(CONFIG_VIDEO_ATOMISP_GC0310) += atomisp-gc0310.o
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
deleted file mode 100644
index 4658aeeb88fd..000000000000
--- a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
+++ /dev/null
@@ -1,1612 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for mt9m114 Camera Sensor.
- *
- * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kmod.h>
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/acpi.h>
-#include <linux/mutex.h>
-#include "../include/linux/atomisp_gmin_platform.h"
-#include <media/v4l2-device.h>
-
-#include "mt9m114.h"
-
-#define to_mt9m114_sensor(s) container_of(s, struct mt9m114_device, sd)
-
-/*
- * TODO: use debug parameter to actually define when debug messages should
- * be printed.
- */
-static int debug;
-static int aaalock;
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "Debug level (0-1)");
-
-static int mt9m114_t_vflip(struct v4l2_subdev *sd, int value);
-static int mt9m114_t_hflip(struct v4l2_subdev *sd, int value);
-static int mt9m114_wait_state(struct i2c_client *client, int timeout);
-
-static int
-mt9m114_read_reg(struct i2c_client *client, u16 data_length, u32 reg, u32 *val)
-{
- int err;
- struct i2c_msg msg[2];
- unsigned char data[4];
-
- if (!client->adapter) {
- v4l2_err(client, "%s error, no client->adapter\n", __func__);
- return -ENODEV;
- }
-
- if (data_length != MISENSOR_8BIT && data_length != MISENSOR_16BIT
- && data_length != MISENSOR_32BIT) {
- v4l2_err(client, "%s error, invalid data length\n", __func__);
- return -EINVAL;
- }
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = MSG_LEN_OFFSET;
- msg[0].buf = data;
-
- /* high byte goes out first */
- data[0] = (u16)(reg >> 8);
- data[1] = (u16)(reg & 0xff);
-
- msg[1].addr = client->addr;
- msg[1].len = data_length;
- msg[1].flags = I2C_M_RD;
- msg[1].buf = data;
-
- err = i2c_transfer(client->adapter, msg, 2);
-
- if (err >= 0) {
- *val = 0;
- /* high byte comes first */
- if (data_length == MISENSOR_8BIT)
- *val = data[0];
- else if (data_length == MISENSOR_16BIT)
- *val = data[1] + (data[0] << 8);
- else
- *val = data[3] + (data[2] << 8) +
- (data[1] << 16) + (data[0] << 24);
-
- return 0;
- }
-
- dev_err(&client->dev, "read from offset 0x%x error %d", reg, err);
- return err;
-}
-
-static int
-mt9m114_write_reg(struct i2c_client *client, u16 data_length, u16 reg, u32 val)
-{
- int num_msg;
- struct i2c_msg msg;
- unsigned char data[6] = {0};
- __be16 *wreg;
- int retry = 0;
-
- if (!client->adapter) {
- v4l2_err(client, "%s error, no client->adapter\n", __func__);
- return -ENODEV;
- }
-
- if (data_length != MISENSOR_8BIT && data_length != MISENSOR_16BIT
- && data_length != MISENSOR_32BIT) {
- v4l2_err(client, "%s error, invalid data_length\n", __func__);
- return -EINVAL;
- }
-
- memset(&msg, 0, sizeof(msg));
-
-again:
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = 2 + data_length;
- msg.buf = data;
-
- /* high byte goes out first */
- wreg = (void *)data;
- *wreg = cpu_to_be16(reg);
-
- if (data_length == MISENSOR_8BIT) {
- data[2] = (u8)(val);
- } else if (data_length == MISENSOR_16BIT) {
- u16 *wdata = (void *)&data[2];
-
- *wdata = be16_to_cpu(*(__be16 *)&data[2]);
- } else {
- /* MISENSOR_32BIT */
- u32 *wdata = (void *)&data[2];
-
- *wdata = be32_to_cpu(*(__be32 *)&data[2]);
- }
-
- num_msg = i2c_transfer(client->adapter, &msg, 1);
-
- /*
- * HACK: Need some delay here for Rev 2 sensors otherwise some
- * registers do not seem to load correctly.
- */
- mdelay(1);
-
- if (num_msg >= 0)
- return 0;
-
- dev_err(&client->dev, "write error: wrote 0x%x to offset 0x%x error %d",
- val, reg, num_msg);
- if (retry <= I2C_RETRY_COUNT) {
- dev_dbg(&client->dev, "retrying... %d", retry);
- retry++;
- msleep(20);
- goto again;
- }
-
- return num_msg;
-}
-
-/**
- * misensor_rmw_reg - Read/Modify/Write a value to a register in the sensor
- * device
- * @client: i2c driver client structure
- * @data_length: 8/16/32-bits length
- * @reg: register address
- * @mask: masked out bits
- * @set: bits set
- *
- * Read/modify/write a value to a register in the sensor device.
- * Returns zero if successful, or non-zero otherwise.
- */
-static int
-misensor_rmw_reg(struct i2c_client *client, u16 data_length, u16 reg,
- u32 mask, u32 set)
-{
- int err;
- u32 val;
-
- /* Exit when no mask */
- if (mask == 0)
- return 0;
-
- /* @mask must not exceed data length */
- switch (data_length) {
- case MISENSOR_8BIT:
- if (mask & ~0xff)
- return -EINVAL;
- break;
- case MISENSOR_16BIT:
- if (mask & ~0xffff)
- return -EINVAL;
- break;
- case MISENSOR_32BIT:
- break;
- default:
- /* Wrong @data_length */
- return -EINVAL;
- }
-
- err = mt9m114_read_reg(client, data_length, reg, &val);
- if (err) {
- v4l2_err(client, "%s error exit, read failed\n", __func__);
- return -EINVAL;
- }
-
- val &= ~mask;
-
- /*
- * Perform the OR function if the @set exists.
- * Shift @set value to target bit location. @set should set only
- * bits included in @mask.
- *
- * REVISIT: This function expects @set to be non-shifted. Its shift
- * value is then defined to be equal to mask's LSB position.
- * How about to inform values in their right offset position and avoid
- * this unneeded shift operation?
- */
- set <<= ffs(mask) - 1;
- val |= set & mask;
-
- err = mt9m114_write_reg(client, data_length, reg, val);
- if (err) {
- v4l2_err(client, "%s error exit, write failed\n", __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int __mt9m114_flush_reg_array(struct i2c_client *client,
- struct mt9m114_write_ctrl *ctrl)
-{
- struct i2c_msg msg;
- const int num_msg = 1;
- int ret;
- int retry = 0;
- __be16 *data16 = (void *)&ctrl->buffer.addr;
-
- if (ctrl->index == 0)
- return 0;
-
-again:
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = 2 + ctrl->index;
- *data16 = cpu_to_be16(ctrl->buffer.addr);
- msg.buf = (u8 *)&ctrl->buffer;
-
- ret = i2c_transfer(client->adapter, &msg, num_msg);
- if (ret != num_msg) {
- if (++retry <= I2C_RETRY_COUNT) {
- dev_dbg(&client->dev, "retrying... %d\n", retry);
- msleep(20);
- goto again;
- }
- dev_err(&client->dev, "%s: i2c transfer error\n", __func__);
- return -EIO;
- }
-
- ctrl->index = 0;
-
- /*
- * REVISIT: Previously we had a delay after writing data to sensor.
- * But it was removed as our tests have shown it is not necessary
- * anymore.
- */
-
- return 0;
-}
-
-static int __mt9m114_buf_reg_array(struct i2c_client *client,
- struct mt9m114_write_ctrl *ctrl,
- const struct misensor_reg *next)
-{
- __be16 *data16;
- __be32 *data32;
- int err;
-
- /* Insufficient buffer? Let's flush and get more free space. */
- if (ctrl->index + next->length >= MT9M114_MAX_WRITE_BUF_SIZE) {
- err = __mt9m114_flush_reg_array(client, ctrl);
- if (err)
- return err;
- }
-
- switch (next->length) {
- case MISENSOR_8BIT:
- ctrl->buffer.data[ctrl->index] = (u8)next->val;
- break;
- case MISENSOR_16BIT:
- data16 = (__be16 *)&ctrl->buffer.data[ctrl->index];
- *data16 = cpu_to_be16((u16)next->val);
- break;
- case MISENSOR_32BIT:
- data32 = (__be32 *)&ctrl->buffer.data[ctrl->index];
- *data32 = cpu_to_be32(next->val);
- break;
- default:
- return -EINVAL;
- }
-
- /* When first item is added, we need to store its starting address */
- if (ctrl->index == 0)
- ctrl->buffer.addr = next->reg;
-
- ctrl->index += next->length;
-
- return 0;
-}
-
-static int
-__mt9m114_write_reg_is_consecutive(struct i2c_client *client,
- struct mt9m114_write_ctrl *ctrl,
- const struct misensor_reg *next)
-{
- if (ctrl->index == 0)
- return 1;
-
- return ctrl->buffer.addr + ctrl->index == next->reg;
-}
-
-/*
- * mt9m114_write_reg_array - Initializes a list of mt9m114 registers
- * @client: i2c driver client structure
- * @reglist: list of registers to be written
- * @poll: completion polling requirement
- * This function initializes a list of registers. When consecutive addresses
- * are found in a row on the list, this function creates a buffer and sends
- * consecutive data in a single i2c_transfer().
- *
- * __mt9m114_flush_reg_array, __mt9m114_buf_reg_array() and
- * __mt9m114_write_reg_is_consecutive() are internal functions to
- * mt9m114_write_reg_array() and should be not used anywhere else.
- *
- */
-static int mt9m114_write_reg_array(struct i2c_client *client,
- const struct misensor_reg *reglist,
- int poll)
-{
- const struct misensor_reg *next = reglist;
- struct mt9m114_write_ctrl ctrl;
- int err;
-
- if (poll == PRE_POLLING) {
- err = mt9m114_wait_state(client, MT9M114_WAIT_STAT_TIMEOUT);
- if (err)
- return err;
- }
-
- ctrl.index = 0;
- for (; next->length != MISENSOR_TOK_TERM; next++) {
- switch (next->length & MISENSOR_TOK_MASK) {
- case MISENSOR_TOK_DELAY:
- err = __mt9m114_flush_reg_array(client, &ctrl);
- if (err)
- return err;
- msleep(next->val);
- break;
- case MISENSOR_TOK_RMW:
- err = __mt9m114_flush_reg_array(client, &ctrl);
- err |= misensor_rmw_reg(client,
- next->length &
- ~MISENSOR_TOK_RMW,
- next->reg, next->val,
- next->val2);
- if (err) {
- dev_err(&client->dev, "%s read err. aborted\n",
- __func__);
- return -EINVAL;
- }
- break;
- default:
- /*
- * If next address is not consecutive, data needs to be
- * flushed before proceed.
- */
- if (!__mt9m114_write_reg_is_consecutive(client, &ctrl,
- next)) {
- err = __mt9m114_flush_reg_array(client, &ctrl);
- if (err)
- return err;
- }
- err = __mt9m114_buf_reg_array(client, &ctrl, next);
- if (err) {
- v4l2_err(client, "%s: write error, aborted\n",
- __func__);
- return err;
- }
- break;
- }
- }
-
- err = __mt9m114_flush_reg_array(client, &ctrl);
- if (err)
- return err;
-
- if (poll == POST_POLLING)
- return mt9m114_wait_state(client, MT9M114_WAIT_STAT_TIMEOUT);
-
- return 0;
-}
-
-static int mt9m114_wait_state(struct i2c_client *client, int timeout)
-{
- int ret;
- unsigned int val;
-
- while (timeout-- > 0) {
- ret = mt9m114_read_reg(client, MISENSOR_16BIT, 0x0080, &val);
- if (ret)
- return ret;
- if ((val & 0x2) == 0)
- return 0;
- msleep(20);
- }
-
- return -EINVAL;
-}
-
-static int mt9m114_set_suspend(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- return mt9m114_write_reg_array(client,
- mt9m114_standby_reg, POST_POLLING);
-}
-
-static int mt9m114_init_common(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- return mt9m114_write_reg_array(client, mt9m114_common, PRE_POLLING);
-}
-
-static int power_ctrl(struct v4l2_subdev *sd, bool flag)
-{
- int ret;
- struct mt9m114_device *dev = to_mt9m114_sensor(sd);
-
- if (!dev || !dev->platform_data)
- return -ENODEV;
-
- if (flag) {
- ret = dev->platform_data->v2p8_ctrl(sd, 1);
- if (ret == 0) {
- ret = dev->platform_data->v1p8_ctrl(sd, 1);
- if (ret)
- ret = dev->platform_data->v2p8_ctrl(sd, 0);
- }
- } else {
- ret = dev->platform_data->v2p8_ctrl(sd, 0);
- ret = dev->platform_data->v1p8_ctrl(sd, 0);
- }
- return ret;
-}
-
-static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
-{
- int ret;
- struct mt9m114_device *dev = to_mt9m114_sensor(sd);
-
- if (!dev || !dev->platform_data)
- return -ENODEV;
-
- /*
- * Note: current modules wire only one GPIO signal (RESET#),
- * but the schematic wires up two to the connector. BIOS
- * versions have been unfortunately inconsistent with which
- * ACPI index RESET# is on, so hit both
- */
-
- if (flag) {
- ret = dev->platform_data->gpio0_ctrl(sd, 0);
- ret = dev->platform_data->gpio1_ctrl(sd, 0);
- msleep(60);
- ret |= dev->platform_data->gpio0_ctrl(sd, 1);
- ret |= dev->platform_data->gpio1_ctrl(sd, 1);
- } else {
- ret = dev->platform_data->gpio0_ctrl(sd, 0);
- ret = dev->platform_data->gpio1_ctrl(sd, 0);
- }
- return ret;
-}
-
-static int power_up(struct v4l2_subdev *sd)
-{
- struct mt9m114_device *dev = to_mt9m114_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- if (!dev->platform_data) {
- dev_err(&client->dev, "no camera_sensor_platform_data");
- return -ENODEV;
- }
-
- /* power control */
- ret = power_ctrl(sd, 1);
- if (ret)
- goto fail_power;
-
- /* flis clock control */
- ret = dev->platform_data->flisclk_ctrl(sd, 1);
- if (ret)
- goto fail_clk;
-
- /* gpio ctrl */
- ret = gpio_ctrl(sd, 1);
- if (ret)
- dev_err(&client->dev, "gpio failed 1\n");
- /*
- * according to DS, 44ms is needed between power up and first i2c
- * commend
- */
- msleep(50);
-
- return 0;
-
-fail_clk:
- dev->platform_data->flisclk_ctrl(sd, 0);
-fail_power:
- power_ctrl(sd, 0);
- dev_err(&client->dev, "sensor power-up failed\n");
-
- return ret;
-}
-
-static int power_down(struct v4l2_subdev *sd)
-{
- struct mt9m114_device *dev = to_mt9m114_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- if (!dev->platform_data) {
- dev_err(&client->dev, "no camera_sensor_platform_data");
- return -ENODEV;
- }
-
- ret = dev->platform_data->flisclk_ctrl(sd, 0);
- if (ret)
- dev_err(&client->dev, "flisclk failed\n");
-
- /* gpio ctrl */
- ret = gpio_ctrl(sd, 0);
- if (ret)
- dev_err(&client->dev, "gpio failed 1\n");
-
- /* power control */
- ret = power_ctrl(sd, 0);
- if (ret)
- dev_err(&client->dev, "vprog failed.\n");
-
- /* according to DS, 20ms is needed after power down */
- msleep(20);
-
- return ret;
-}
-
-static int mt9m114_s_power(struct v4l2_subdev *sd, int power)
-{
- if (power == 0)
- return power_down(sd);
-
- if (power_up(sd))
- return -EINVAL;
-
- return mt9m114_init_common(sd);
-}
-
-static int mt9m114_res2size(struct v4l2_subdev *sd, int *h_size, int *v_size)
-{
- struct mt9m114_device *dev = to_mt9m114_sensor(sd);
- unsigned short hsize;
- unsigned short vsize;
-
- switch (dev->res) {
- case MT9M114_RES_736P:
- hsize = MT9M114_RES_736P_SIZE_H;
- vsize = MT9M114_RES_736P_SIZE_V;
- break;
- case MT9M114_RES_864P:
- hsize = MT9M114_RES_864P_SIZE_H;
- vsize = MT9M114_RES_864P_SIZE_V;
- break;
- case MT9M114_RES_960P:
- hsize = MT9M114_RES_960P_SIZE_H;
- vsize = MT9M114_RES_960P_SIZE_V;
- break;
- default:
- v4l2_err(sd, "%s: Resolution 0x%08x unknown\n", __func__,
- dev->res);
- return -EINVAL;
- }
-
- if (h_size)
- *h_size = hsize;
- if (v_size)
- *v_size = vsize;
-
- return 0;
-}
-
-static int mt9m114_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- int width, height;
- int ret;
-
- if (format->pad)
- return -EINVAL;
- fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
-
- ret = mt9m114_res2size(sd, &width, &height);
- if (ret)
- return ret;
- fmt->width = width;
- fmt->height = height;
-
- return 0;
-}
-
-static int mt9m114_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct i2c_client *c = v4l2_get_subdevdata(sd);
- struct mt9m114_device *dev = to_mt9m114_sensor(sd);
- struct mt9m114_res_struct *res;
- u32 width = fmt->width;
- u32 height = fmt->height;
- struct camera_mipi_info *mt9m114_info = NULL;
-
- int ret;
-
- if (format->pad)
- return -EINVAL;
- dev->streamon = 0;
- dev->first_exp = MT9M114_DEFAULT_FIRST_EXP;
-
- mt9m114_info = v4l2_get_subdev_hostdata(sd);
- if (!mt9m114_info)
- return -EINVAL;
-
- res = v4l2_find_nearest_size(mt9m114_res,
- ARRAY_SIZE(mt9m114_res), width,
- height, fmt->width, fmt->height);
- if (!res)
- res = &mt9m114_res[N_RES - 1];
-
- fmt->width = res->width;
- fmt->height = res->height;
- fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
-
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_state_get_format(sd_state, 0) = *fmt;
- return 0;
- }
-
- switch (res->res) {
- case MT9M114_RES_736P:
- ret = mt9m114_write_reg_array(c, mt9m114_736P_init, NO_POLLING);
- ret += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
- MISENSOR_R_MODE_MASK, MISENSOR_NORMAL_SET);
- break;
- case MT9M114_RES_864P:
- ret = mt9m114_write_reg_array(c, mt9m114_864P_init, NO_POLLING);
- ret += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
- MISENSOR_R_MODE_MASK, MISENSOR_NORMAL_SET);
- break;
- case MT9M114_RES_960P:
- ret = mt9m114_write_reg_array(c, mt9m114_976P_init, NO_POLLING);
- /* set sensor read_mode to Normal */
- ret += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
- MISENSOR_R_MODE_MASK, MISENSOR_NORMAL_SET);
- break;
- default:
- v4l2_err(sd, "set resolution: %d failed!\n", res->res);
- return -EINVAL;
- }
-
- if (ret)
- return -EINVAL;
-
- ret = mt9m114_write_reg_array(c, mt9m114_chgstat_reg, POST_POLLING);
- if (ret < 0)
- return ret;
-
- if (mt9m114_set_suspend(sd))
- return -EINVAL;
-
- if (dev->res != res->res) {
- int index;
-
- /* Switch to different size */
- if (width <= 640) {
- dev->nctx = 0x00; /* Set for context A */
- } else {
- /*
- * Context B is used for resolutions larger than 640x480
- * Using YUV for Context B.
- */
- dev->nctx = 0x01; /* set for context B */
- }
-
- /*
- * Marked current sensor res as being "used"
- *
- * REVISIT: We don't need to use an "used" field on each mode
- * list entry to know which mode is selected. If this
- * information is really necessary, how about to use a single
- * variable on sensor dev struct?
- */
- for (index = 0; index < N_RES; index++) {
- if ((width == mt9m114_res[index].width) &&
- (height == mt9m114_res[index].height)) {
- mt9m114_res[index].used = true;
- continue;
- }
- mt9m114_res[index].used = false;
- }
- }
- /*
- * mt9m114 - we don't poll for context switch
- * because it does not happen with streaming disabled.
- */
- dev->res = res->res;
-
- fmt->width = width;
- fmt->height = height;
- fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
- return 0;
-}
-
-/* Horizontal flip the image. */
-static int mt9m114_g_hflip(struct v4l2_subdev *sd, s32 *val)
-{
- struct i2c_client *c = v4l2_get_subdevdata(sd);
- int ret;
- u32 data;
-
- ret = mt9m114_read_reg(c, MISENSOR_16BIT,
- (u32)MISENSOR_READ_MODE, &data);
- if (ret)
- return ret;
- *val = !!(data & MISENSOR_HFLIP_MASK);
-
- return 0;
-}
-
-static int mt9m114_g_vflip(struct v4l2_subdev *sd, s32 *val)
-{
- struct i2c_client *c = v4l2_get_subdevdata(sd);
- int ret;
- u32 data;
-
- ret = mt9m114_read_reg(c, MISENSOR_16BIT,
- (u32)MISENSOR_READ_MODE, &data);
- if (ret)
- return ret;
- *val = !!(data & MISENSOR_VFLIP_MASK);
-
- return 0;
-}
-
-static long mt9m114_s_exposure(struct v4l2_subdev *sd,
- struct atomisp_exposure *exposure)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m114_device *dev = to_mt9m114_sensor(sd);
- int ret = 0;
- unsigned int coarse_integration = 0;
- unsigned int f_lines = 0;
- unsigned int frame_len_lines = 0; /* ExposureTime.FrameLengthLines; */
- unsigned int analog_gain, digital_gain;
- u32 analog_gain_to_write = 0;
-
- dev_dbg(&client->dev, "%s(0x%X 0x%X 0x%X)\n", __func__,
- exposure->integration_time[0], exposure->gain[0],
- exposure->gain[1]);
-
- coarse_integration = exposure->integration_time[0];
- /*
- * fine_integration = ExposureTime.FineIntegrationTime;
- * frame_len_lines = ExposureTime.FrameLengthLines;
- */
- f_lines = mt9m114_res[dev->res].lines_per_frame;
- analog_gain = exposure->gain[0];
- digital_gain = exposure->gain[1];
- if (!dev->streamon) {
- /*Save the first exposure values while stream is off*/
- dev->first_exp = coarse_integration;
- dev->first_gain = analog_gain;
- dev->first_diggain = digital_gain;
- }
- /* digital_gain = 0x400 * (((u16) digital_gain) >> 8) + */
- /* ((unsigned int)(0x400 * (((u16) digital_gain) & 0xFF)) >>8); */
-
- /* set frame length */
- if (f_lines < coarse_integration + 6)
- f_lines = coarse_integration + 6;
- if (f_lines < frame_len_lines)
- f_lines = frame_len_lines;
- ret = mt9m114_write_reg(client, MISENSOR_16BIT, 0x300A, f_lines);
- if (ret) {
- v4l2_err(client, "%s: fail to set f_lines\n", __func__);
- return -EINVAL;
- }
-
- /* set coarse integration */
- /*
- * 3A provide real exposure time.
- * should not translate to any value here.
- */
- ret = mt9m114_write_reg(client, MISENSOR_16BIT,
- REG_EXPO_COARSE, (u16)(coarse_integration));
- if (ret) {
- v4l2_err(client, "%s: fail to set exposure time\n", __func__);
- return -EINVAL;
- }
-
- /*
- * set analog/digital gain
- switch(analog_gain)
- {
- case 0:
- analog_gain_to_write = 0x0;
- break;
- case 1:
- analog_gain_to_write = 0x20;
- break;
- case 2:
- analog_gain_to_write = 0x60;
- break;
- case 4:
- analog_gain_to_write = 0xA0;
- break;
- case 8:
- analog_gain_to_write = 0xE0;
- break;
- default:
- analog_gain_to_write = 0x20;
- break;
- }
- */
- if (digital_gain >= 16 || digital_gain <= 1)
- digital_gain = 1;
- /*
- * analog_gain_to_write = (u16)((digital_gain << 12)
- * | analog_gain_to_write);
- */
- analog_gain_to_write = (u16)((digital_gain << 12) | (u16)analog_gain);
- ret = mt9m114_write_reg(client, MISENSOR_16BIT,
- REG_GAIN, analog_gain_to_write);
- if (ret) {
- v4l2_err(client, "%s: fail to set analog_gain_to_write\n",
- __func__);
- return -EINVAL;
- }
-
- return ret;
-}
-
-static long mt9m114_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
- switch (cmd) {
- case ATOMISP_IOC_S_EXPOSURE:
- return mt9m114_s_exposure(sd, arg);
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * This returns the exposure time being used. This should only be used
- * for filling in EXIF data, not for actual image processing.
- */
-static int mt9m114_g_exposure(struct v4l2_subdev *sd, s32 *value)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u32 coarse;
- int ret;
-
- /* the fine integration time is currently not calculated */
- ret = mt9m114_read_reg(client, MISENSOR_16BIT,
- REG_EXPO_COARSE, &coarse);
- if (ret)
- return ret;
-
- *value = coarse;
- return 0;
-}
-
-/*
- * This function will return the sensor supported max exposure zone number.
- * the sensor which supports max exposure zone number is 1.
- */
-static int mt9m114_g_exposure_zone_num(struct v4l2_subdev *sd, s32 *val)
-{
- *val = 1;
-
- return 0;
-}
-
-/*
- * set exposure metering, average/center_weighted/spot/matrix.
- */
-static int mt9m114_s_exposure_metering(struct v4l2_subdev *sd, s32 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- switch (val) {
- case V4L2_EXPOSURE_METERING_SPOT:
- ret = mt9m114_write_reg_array(client, mt9m114_exp_average,
- NO_POLLING);
- if (ret) {
- dev_err(&client->dev, "write exp_average reg err.\n");
- return ret;
- }
- break;
- case V4L2_EXPOSURE_METERING_CENTER_WEIGHTED:
- default:
- ret = mt9m114_write_reg_array(client, mt9m114_exp_center,
- NO_POLLING);
- if (ret) {
- dev_err(&client->dev, "write exp_default reg err");
- return ret;
- }
- }
-
- return 0;
-}
-
-/*
- * This function is for touch exposure feature.
- */
-static int mt9m114_s_exposure_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_selection *sel)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct misensor_reg exp_reg;
- int width, height;
- int grid_width, grid_height;
- int grid_left, grid_top, grid_right, grid_bottom;
- int win_left, win_top, win_right, win_bottom;
- int i, j;
- int ret;
-
- if (sel->which != V4L2_SUBDEV_FORMAT_TRY &&
- sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- grid_left = sel->r.left;
- grid_top = sel->r.top;
- grid_right = sel->r.left + sel->r.width - 1;
- grid_bottom = sel->r.top + sel->r.height - 1;
-
- ret = mt9m114_res2size(sd, &width, &height);
- if (ret)
- return ret;
-
- grid_width = width / 5;
- grid_height = height / 5;
-
- if (grid_width && grid_height) {
- win_left = grid_left / grid_width;
- win_top = grid_top / grid_height;
- win_right = grid_right / grid_width;
- win_bottom = grid_bottom / grid_height;
- } else {
- dev_err(&client->dev, "Incorrect exp grid.\n");
- return -EINVAL;
- }
-
- win_left = clamp_t(int, win_left, 0, 4);
- win_top = clamp_t(int, win_top, 0, 4);
- win_right = clamp_t(int, win_right, 0, 4);
- win_bottom = clamp_t(int, win_bottom, 0, 4);
-
- ret = mt9m114_write_reg_array(client, mt9m114_exp_average, NO_POLLING);
- if (ret) {
- dev_err(&client->dev, "write exp_average reg err.\n");
- return ret;
- }
-
- for (i = win_top; i <= win_bottom; i++) {
- for (j = win_left; j <= win_right; j++) {
- exp_reg = mt9m114_exp_win[i][j];
-
- ret = mt9m114_write_reg(client, exp_reg.length,
- exp_reg.reg, exp_reg.val);
- if (ret) {
- dev_err(&client->dev, "write exp_reg err.\n");
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-static int mt9m114_s_ev(struct v4l2_subdev *sd, s32 val)
-{
- struct i2c_client *c = v4l2_get_subdevdata(sd);
- s32 luma = 0x37;
- int err;
-
- /*
- * EV value only support -2 to 2
- * 0: 0x37, 1:0x47, 2:0x57, -1:0x27, -2:0x17
- */
- if (val < -2 || val > 2)
- return -EINVAL;
- luma += 0x10 * val;
- dev_dbg(&c->dev, "%s val:%d luma:0x%x\n", __func__, val, luma);
- err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC87A);
- if (err) {
- dev_err(&c->dev, "%s logic addr access error\n", __func__);
- return err;
- }
- err = mt9m114_write_reg(c, MISENSOR_8BIT, 0xC87A, (u32)luma);
- if (err) {
- dev_err(&c->dev, "%s write target_average_luma failed\n",
- __func__);
- return err;
- }
- udelay(10);
-
- return 0;
-}
-
-static int mt9m114_g_ev(struct v4l2_subdev *sd, s32 *val)
-{
- struct i2c_client *c = v4l2_get_subdevdata(sd);
- int err;
- u32 luma;
-
- err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC87A);
- if (err) {
- dev_err(&c->dev, "%s logic addr access error\n", __func__);
- return err;
- }
- err = mt9m114_read_reg(c, MISENSOR_8BIT, 0xC87A, &luma);
- if (err) {
- dev_err(&c->dev, "%s read target_average_luma failed\n",
- __func__);
- return err;
- }
- luma -= 0x17;
- luma /= 0x10;
- *val = (s32)luma - 2;
- dev_dbg(&c->dev, "%s val:%d\n", __func__, *val);
-
- return 0;
-}
-
-/*
- * Fake interface
- * mt9m114 now can not support 3a_lock
- */
-static int mt9m114_s_3a_lock(struct v4l2_subdev *sd, s32 val)
-{
- aaalock = val;
- return 0;
-}
-
-static int mt9m114_g_3a_lock(struct v4l2_subdev *sd, s32 *val)
-{
- if (aaalock)
- return V4L2_LOCK_EXPOSURE | V4L2_LOCK_WHITE_BALANCE
- | V4L2_LOCK_FOCUS;
- return 0;
-}
-
-static int mt9m114_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct mt9m114_device *dev =
- container_of(ctrl->handler, struct mt9m114_device, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- dev_dbg(&client->dev, "%s: CID_VFLIP:%d.\n",
- __func__, ctrl->val);
- ret = mt9m114_t_vflip(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_HFLIP:
- dev_dbg(&client->dev, "%s: CID_HFLIP:%d.\n",
- __func__, ctrl->val);
- ret = mt9m114_t_hflip(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_EXPOSURE_METERING:
- ret = mt9m114_s_exposure_metering(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_EXPOSURE:
- ret = mt9m114_s_ev(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_3A_LOCK:
- ret = mt9m114_s_3a_lock(&dev->sd, ctrl->val);
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-
-static int mt9m114_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct mt9m114_device *dev =
- container_of(ctrl->handler, struct mt9m114_device, ctrl_handler);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- ret = mt9m114_g_vflip(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_HFLIP:
- ret = mt9m114_g_hflip(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_EXPOSURE_ABSOLUTE:
- ret = mt9m114_g_exposure(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_EXPOSURE_ZONE_NUM:
- ret = mt9m114_g_exposure_zone_num(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_EXPOSURE:
- ret = mt9m114_g_ev(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_3A_LOCK:
- ret = mt9m114_g_3a_lock(&dev->sd, &ctrl->val);
- break;
- default:
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static const struct v4l2_ctrl_ops ctrl_ops = {
- .s_ctrl = mt9m114_s_ctrl,
- .g_volatile_ctrl = mt9m114_g_volatile_ctrl
-};
-
-static struct v4l2_ctrl_config mt9m114_controls[] = {
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VFLIP,
- .name = "Image v-Flip",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_HFLIP,
- .name = "Image h-Flip",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_EXPOSURE_ABSOLUTE,
- .name = "exposure",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = 0xffff,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_EXPOSURE_ZONE_NUM,
- .name = "one-time exposure zone number",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = 0xffff,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_EXPOSURE_METERING,
- .name = "metering",
- .type = V4L2_CTRL_TYPE_MENU,
- .min = 0,
- .max = 3,
- .step = 0,
- .def = 1,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_EXPOSURE,
- .name = "exposure biasx",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = -2,
- .max = 2,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
-#if 0 /* Causes v4l2_ctrl_new_custom() to fail with -ERANGE, disable for now */
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_3A_LOCK,
- .name = "3a lock",
- .type = V4L2_CTRL_TYPE_BITMASK,
- .min = 0,
- .max = V4L2_LOCK_EXPOSURE | V4L2_LOCK_WHITE_BALANCE | V4L2_LOCK_FOCUS,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
-#endif
-};
-
-static int mt9m114_detect(struct mt9m114_device *dev, struct i2c_client *client)
-{
- struct i2c_adapter *adapter = client->adapter;
- u32 model;
- int ret;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev, "%s: i2c error", __func__);
- return -ENODEV;
- }
- ret = mt9m114_read_reg(client, MISENSOR_16BIT, MT9M114_PID, &model);
- if (ret)
- return ret;
- dev->real_model_id = model;
-
- if (model != MT9M114_MOD_ID) {
- dev_err(&client->dev, "%s: failed: client->addr = %x\n",
- __func__, client->addr);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static int
-mt9m114_s_config(struct v4l2_subdev *sd, int irq, void *platform_data)
-{
- struct mt9m114_device *dev = to_mt9m114_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- if (!platform_data)
- return -ENODEV;
-
- dev->platform_data =
- (struct camera_sensor_platform_data *)platform_data;
-
- ret = power_up(sd);
- if (ret) {
- v4l2_err(client, "mt9m114 power-up err");
- return ret;
- }
-
- /* config & detect sensor */
- ret = mt9m114_detect(dev, client);
- if (ret) {
- v4l2_err(client, "mt9m114_detect err s_config.\n");
- goto fail_detect;
- }
-
- ret = dev->platform_data->csi_cfg(sd, 1);
- if (ret)
- goto fail_csi_cfg;
-
- ret = mt9m114_set_suspend(sd);
- if (ret) {
- v4l2_err(client, "mt9m114 suspend err");
- return ret;
- }
-
- ret = power_down(sd);
- if (ret) {
- v4l2_err(client, "mt9m114 power down err");
- return ret;
- }
-
- return ret;
-
-fail_csi_cfg:
- dev->platform_data->csi_cfg(sd, 0);
-fail_detect:
- power_down(sd);
- dev_err(&client->dev, "sensor power-gating failed\n");
- return ret;
-}
-
-/* Horizontal flip the image. */
-static int mt9m114_t_hflip(struct v4l2_subdev *sd, int value)
-{
- struct i2c_client *c = v4l2_get_subdevdata(sd);
- struct mt9m114_device *dev = to_mt9m114_sensor(sd);
- int err;
- /* set for direct mode */
- err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC850);
- if (value) {
- /* enable H flip ctx A */
- err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x01, 0x01);
- err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x01, 0x01);
- /* ctx B */
- err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x01, 0x01);
- err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x01, 0x01);
-
- err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
- MISENSOR_HFLIP_MASK, MISENSOR_FLIP_EN);
-
- dev->bpat = MT9M114_BPAT_GRGRBGBG;
- } else {
- /* disable H flip ctx A */
- err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x01, 0x00);
- err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x01, 0x00);
- /* ctx B */
- err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x01, 0x00);
- err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x01, 0x00);
-
- err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
- MISENSOR_HFLIP_MASK, MISENSOR_FLIP_DIS);
-
- dev->bpat = MT9M114_BPAT_BGBGGRGR;
- }
-
- err += mt9m114_write_reg(c, MISENSOR_8BIT, 0x8404, 0x06);
- udelay(10);
-
- return !!err;
-}
-
-/* Vertically flip the image */
-static int mt9m114_t_vflip(struct v4l2_subdev *sd, int value)
-{
- struct i2c_client *c = v4l2_get_subdevdata(sd);
- int err;
- /* set for direct mode */
- err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC850);
- if (value >= 1) {
- /* enable H flip - ctx A */
- err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x02, 0x01);
- err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x02, 0x01);
- /* ctx B */
- err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x02, 0x01);
- err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x02, 0x01);
-
- err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
- MISENSOR_VFLIP_MASK, MISENSOR_FLIP_EN);
- } else {
- /* disable H flip - ctx A */
- err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x02, 0x00);
- err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x02, 0x00);
- /* ctx B */
- err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x02, 0x00);
- err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x02, 0x00);
-
- err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
- MISENSOR_VFLIP_MASK, MISENSOR_FLIP_DIS);
- }
-
- err += mt9m114_write_reg(c, MISENSOR_8BIT, 0x8404, 0x06);
- udelay(10);
-
- return !!err;
-}
-
-static int mt9m114_get_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_interval *interval)
-{
- struct mt9m114_device *dev = to_mt9m114_sensor(sd);
-
- /*
- * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
- * subdev active state API.
- */
- if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- interval->interval.numerator = 1;
- interval->interval.denominator = mt9m114_res[dev->res].fps;
-
- return 0;
-}
-
-static int mt9m114_s_stream(struct v4l2_subdev *sd, int enable)
-{
- int ret;
- struct i2c_client *c = v4l2_get_subdevdata(sd);
- struct mt9m114_device *dev = to_mt9m114_sensor(sd);
- struct atomisp_exposure exposure;
-
- if (enable) {
- ret = mt9m114_write_reg_array(c, mt9m114_chgstat_reg,
- POST_POLLING);
- if (ret < 0)
- return ret;
-
- if (dev->first_exp > MT9M114_MAX_FIRST_EXP) {
- exposure.integration_time[0] = dev->first_exp;
- exposure.gain[0] = dev->first_gain;
- exposure.gain[1] = dev->first_diggain;
- mt9m114_s_exposure(sd, &exposure);
- }
- dev->streamon = 1;
-
- } else {
- dev->streamon = 0;
- ret = mt9m114_set_suspend(sd);
- }
-
- return ret;
-}
-
-static int mt9m114_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->index)
- return -EINVAL;
- code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
-
- return 0;
-}
-
-static int mt9m114_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- unsigned int index = fse->index;
-
- if (index >= N_RES)
- return -EINVAL;
-
- fse->min_width = mt9m114_res[index].width;
- fse->min_height = mt9m114_res[index].height;
- fse->max_width = mt9m114_res[index].width;
- fse->max_height = mt9m114_res[index].height;
-
- return 0;
-}
-
-static int mt9m114_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
-{
- int index;
- struct mt9m114_device *snr = to_mt9m114_sensor(sd);
-
- if (!frames)
- return -EINVAL;
-
- for (index = 0; index < N_RES; index++) {
- if (mt9m114_res[index].res == snr->res)
- break;
- }
-
- if (index >= N_RES)
- return -EINVAL;
-
- *frames = mt9m114_res[index].skip_frames;
-
- return 0;
-}
-
-static const struct v4l2_subdev_video_ops mt9m114_video_ops = {
- .s_stream = mt9m114_s_stream,
-};
-
-static const struct v4l2_subdev_sensor_ops mt9m114_sensor_ops = {
- .g_skip_frames = mt9m114_g_skip_frames,
-};
-
-static const struct v4l2_subdev_core_ops mt9m114_core_ops = {
- .s_power = mt9m114_s_power,
- .ioctl = mt9m114_ioctl,
-};
-
-/* REVISIT: Do we need pad operations? */
-static const struct v4l2_subdev_pad_ops mt9m114_pad_ops = {
- .enum_mbus_code = mt9m114_enum_mbus_code,
- .enum_frame_size = mt9m114_enum_frame_size,
- .get_fmt = mt9m114_get_fmt,
- .set_fmt = mt9m114_set_fmt,
- .set_selection = mt9m114_s_exposure_selection,
- .get_frame_interval = mt9m114_get_frame_interval,
-};
-
-static const struct v4l2_subdev_ops mt9m114_ops = {
- .core = &mt9m114_core_ops,
- .video = &mt9m114_video_ops,
- .pad = &mt9m114_pad_ops,
- .sensor = &mt9m114_sensor_ops,
-};
-
-static void mt9m114_remove(struct i2c_client *client)
-{
- struct mt9m114_device *dev;
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
-
- dev = container_of(sd, struct mt9m114_device, sd);
- dev->platform_data->csi_cfg(sd, 0);
- v4l2_device_unregister_subdev(sd);
- media_entity_cleanup(&dev->sd.entity);
- v4l2_ctrl_handler_free(&dev->ctrl_handler);
-}
-
-static int mt9m114_probe(struct i2c_client *client)
-{
- struct mt9m114_device *dev;
- int ret = 0;
- unsigned int i;
- void *pdata;
-
- /* Setup sensor configuration structure */
- dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- ret = devm_mutex_init(&client->dev, &dev->input_lock);
- if (ret)
- return ret;
-
- v4l2_i2c_subdev_init(&dev->sd, client, &mt9m114_ops);
- pdata = gmin_camera_platform_data(&dev->sd,
- ATOMISP_INPUT_FORMAT_RAW_10,
- atomisp_bayer_order_grbg);
- if (pdata)
- ret = mt9m114_s_config(&dev->sd, client->irq, pdata);
- if (!pdata || ret) {
- v4l2_device_unregister_subdev(&dev->sd);
- return ret;
- }
-
- ret = atomisp_register_i2c_module(&dev->sd, pdata);
- if (ret) {
- v4l2_device_unregister_subdev(&dev->sd);
- /* Coverity CID 298095 - return on error */
- return ret;
- }
-
- /* TODO add format code here */
- dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- dev->pad.flags = MEDIA_PAD_FL_SOURCE;
- dev->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
- dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-
- ret =
- v4l2_ctrl_handler_init(&dev->ctrl_handler,
- ARRAY_SIZE(mt9m114_controls));
- if (ret) {
- mt9m114_remove(client);
- return ret;
- }
-
- for (i = 0; i < ARRAY_SIZE(mt9m114_controls); i++)
- v4l2_ctrl_new_custom(&dev->ctrl_handler, &mt9m114_controls[i],
- NULL);
-
- if (dev->ctrl_handler.error) {
- mt9m114_remove(client);
- return dev->ctrl_handler.error;
- }
-
- /* Use same lock for controls as for everything else. */
- dev->ctrl_handler.lock = &dev->input_lock;
- dev->sd.ctrl_handler = &dev->ctrl_handler;
-
- /* REVISIT: Do we need media controller? */
- ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
- if (ret) {
- mt9m114_remove(client);
- return ret;
- }
- return 0;
-}
-
-static const struct acpi_device_id mt9m114_acpi_match[] = {
- { "INT33F0" },
- { "CRMT1040" },
- {},
-};
-MODULE_DEVICE_TABLE(acpi, mt9m114_acpi_match);
-
-static struct i2c_driver mt9m114_driver = {
- .driver = {
- .name = "mt9m114",
- .acpi_match_table = mt9m114_acpi_match,
- },
- .probe = mt9m114_probe,
- .remove = mt9m114_remove,
-};
-module_i2c_driver(mt9m114_driver);
-
-MODULE_AUTHOR("Shuguang Gong <Shuguang.gong@intel.com>");
-MODULE_DESCRIPTION("Aptina mt9m114 sensor support module");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/mt9m114.h b/drivers/staging/media/atomisp/i2c/mt9m114.h
deleted file mode 100644
index 97820db90827..000000000000
--- a/drivers/staging/media/atomisp/i2c/mt9m114.h
+++ /dev/null
@@ -1,1768 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for mt9m114 Camera Sensor.
- *
- * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
- */
-
-#ifndef __A1040_H__
-#define __A1040_H__
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
-#include <linux/videodev2.h>
-#include <linux/spinlock.h>
-#include <media/v4l2-subdev.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-#include <linux/v4l2-mediabus.h>
-#include <media/media-entity.h>
-#include "../include/linux/atomisp_platform.h"
-#include "../include/linux/atomisp.h"
-
-#define V4L2_IDENT_MT9M114 8245
-
-#define MT9P111_REV3
-#define FULLINISUPPORT
-
-/* #defines for register writes and register array processing */
-#define MISENSOR_8BIT 1
-#define MISENSOR_16BIT 2
-#define MISENSOR_32BIT 4
-
-#define MISENSOR_FWBURST0 0x80
-#define MISENSOR_FWBURST1 0x81
-#define MISENSOR_FWBURST4 0x84
-#define MISENSOR_FWBURST 0x88
-
-#define MISENSOR_TOK_TERM 0xf000 /* terminating token for reg list */
-#define MISENSOR_TOK_DELAY 0xfe00 /* delay token for reg list */
-#define MISENSOR_TOK_FWLOAD 0xfd00 /* token indicating load FW */
-#define MISENSOR_TOK_POLL 0xfc00 /* token indicating poll instruction */
-#define MISENSOR_TOK_RMW 0x0010 /* RMW operation */
-#define MISENSOR_TOK_MASK 0xfff0
-#define MISENSOR_AWB_STEADY BIT(0) /* awb steady */
-#define MISENSOR_AE_READY BIT(3) /* ae status ready */
-
-/* mask to set sensor read_mode via misensor_rmw_reg */
-#define MISENSOR_R_MODE_MASK 0x0330
-/* mask to set sensor vert_flip and horz_mirror */
-#define MISENSOR_VFLIP_MASK 0x0002
-#define MISENSOR_HFLIP_MASK 0x0001
-#define MISENSOR_FLIP_EN 1
-#define MISENSOR_FLIP_DIS 0
-
-/* bits set to set sensor read_mode via misensor_rmw_reg */
-#define MISENSOR_SKIPPING_SET 0x0011
-#define MISENSOR_SUMMING_SET 0x0033
-#define MISENSOR_NORMAL_SET 0x0000
-
-/* sensor register that control sensor read-mode and mirror */
-#define MISENSOR_READ_MODE 0xC834
-/* sensor ae-track status register */
-#define MISENSOR_AE_TRACK_STATUS 0xA800
-/* sensor awb status register */
-#define MISENSOR_AWB_STATUS 0xAC00
-/* sensor coarse integration time register */
-#define MISENSOR_COARSE_INTEGRATION_TIME 0xC83C
-
-/* registers */
-#define REG_SW_RESET 0x301A
-#define REG_SW_STREAM 0xDC00
-#define REG_SCCB_CTRL 0x3100
-#define REG_SC_CMMN_CHIP_ID 0x0000
-#define REG_V_START 0xc800 /* 16bits */
-#define REG_H_START 0xc802 /* 16bits */
-#define REG_V_END 0xc804 /* 16bits */
-#define REG_H_END 0xc806 /* 16bits */
-#define REG_PIXEL_CLK 0xc808 /* 32bits */
-#define REG_TIMING_VTS 0xc812 /* 16bits */
-#define REG_TIMING_HTS 0xc814 /* 16bits */
-#define REG_WIDTH 0xC868 /* 16bits */
-#define REG_HEIGHT 0xC86A /* 16bits */
-#define REG_EXPO_COARSE 0x3012 /* 16bits */
-#define REG_EXPO_FINE 0x3014 /* 16bits */
-#define REG_GAIN 0x305E
-#define REG_ANALOGGAIN 0x305F
-#define REG_ADDR_ACESSS 0x098E /* logical_address_access */
-#define REG_COMM_Register 0x0080 /* command_register */
-
-#define SENSOR_DETECTED 1
-#define SENSOR_NOT_DETECTED 0
-
-#define I2C_RETRY_COUNT 5
-#define MSG_LEN_OFFSET 2
-
-#ifndef MIPI_CONTROL
-#define MIPI_CONTROL 0x3400 /* MIPI_Control */
-#endif
-
-/* GPIO pin on Moorestown */
-#define GPIO_SCLK_25 44
-#define GPIO_STB_PIN 47
-
-#define GPIO_STDBY_PIN 49 /* ab:new */
-#define GPIO_RESET_PIN 50
-
-/* System control register for Aptina A-1040SOC*/
-#define MT9M114_PID 0x0
-
-/* MT9P111_DEVICE_ID */
-#define MT9M114_MOD_ID 0x2481
-
-#define MT9M114_FINE_INTG_TIME_MIN 0
-#define MT9M114_FINE_INTG_TIME_MAX_MARGIN 0
-#define MT9M114_COARSE_INTG_TIME_MIN 1
-#define MT9M114_COARSE_INTG_TIME_MAX_MARGIN 6
-
-/* ulBPat; */
-
-#define MT9M114_BPAT_RGRGGBGB BIT(0)
-#define MT9M114_BPAT_GRGRBGBG BIT(1)
-#define MT9M114_BPAT_GBGBRGRG BIT(2)
-#define MT9M114_BPAT_BGBGGRGR BIT(3)
-
-#define MT9M114_FOCAL_LENGTH_NUM 208 /*2.08mm*/
-#define MT9M114_WAIT_STAT_TIMEOUT 100
-#define MT9M114_FLICKER_MODE_50HZ 1
-#define MT9M114_FLICKER_MODE_60HZ 2
-/*
- * focal length bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define MT9M114_FOCAL_LENGTH_DEFAULT 0xD00064
-
-/*
- * current f-number bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define MT9M114_F_NUMBER_DEFAULT 0x18000a
-
-/*
- * f-number range bits definition:
- * bits 31-24: max f-number numerator
- * bits 23-16: max f-number denominator
- * bits 15-8: min f-number numerator
- * bits 7-0: min f-number denominator
- */
-#define MT9M114_F_NUMBER_RANGE 0x180a180a
-
-/* Supported resolutions */
-enum {
- MT9M114_RES_736P,
- MT9M114_RES_864P,
- MT9M114_RES_960P,
-};
-
-#define MT9M114_RES_960P_SIZE_H 1296
-#define MT9M114_RES_960P_SIZE_V 976
-#define MT9M114_RES_720P_SIZE_H 1280
-#define MT9M114_RES_720P_SIZE_V 720
-#define MT9M114_RES_576P_SIZE_H 1024
-#define MT9M114_RES_576P_SIZE_V 576
-#define MT9M114_RES_480P_SIZE_H 768
-#define MT9M114_RES_480P_SIZE_V 480
-#define MT9M114_RES_VGA_SIZE_H 640
-#define MT9M114_RES_VGA_SIZE_V 480
-#define MT9M114_RES_QVGA_SIZE_H 320
-#define MT9M114_RES_QVGA_SIZE_V 240
-#define MT9M114_RES_QCIF_SIZE_H 176
-#define MT9M114_RES_QCIF_SIZE_V 144
-
-#define MT9M114_RES_720_480p_768_SIZE_H 736
-#define MT9M114_RES_720_480p_768_SIZE_V 496
-#define MT9M114_RES_736P_SIZE_H 1296
-#define MT9M114_RES_736P_SIZE_V 736
-#define MT9M114_RES_864P_SIZE_H 1296
-#define MT9M114_RES_864P_SIZE_V 864
-#define MT9M114_RES_976P_SIZE_H 1296
-#define MT9M114_RES_976P_SIZE_V 976
-
-#define MT9M114_BIN_FACTOR_MAX 3
-
-#define MT9M114_DEFAULT_FIRST_EXP 0x10
-#define MT9M114_MAX_FIRST_EXP 0x302
-
-/* completion status polling requirements, usage based on Aptina .INI Rev2 */
-enum poll_reg {
- NO_POLLING,
- PRE_POLLING,
- POST_POLLING,
-};
-
-/*
- * struct misensor_reg - MI sensor register format
- * @length: length of the register
- * @reg: 16-bit offset to register
- * @val: 8/16/32-bit register value
- * Define a structure for sensor register initialization values
- */
-struct misensor_reg {
- u32 length;
- u32 reg;
- u32 val; /* value or for read/mod/write, AND mask */
- u32 val2; /* optional; for rmw, OR mask */
-};
-
-/*
- * struct misensor_fwreg - Firmware burst command
- * @type: FW burst or 8/16 bit register
- * @addr: 16-bit offset to register or other values depending on type
- * @valx: data value for burst (or other commands)
- *
- * Define a structure for sensor register initialization values
- */
-struct misensor_fwreg {
- u32 type; /* type of value, register or FW burst string */
- u32 addr; /* target address */
- u32 val0;
- u32 val1;
- u32 val2;
- u32 val3;
- u32 val4;
- u32 val5;
- u32 val6;
- u32 val7;
-};
-
-struct regval_list {
- u16 reg_num;
- u8 value;
-};
-
-struct mt9m114_device {
- struct v4l2_subdev sd;
- struct media_pad pad;
- struct v4l2_mbus_framefmt format;
-
- struct camera_sensor_platform_data *platform_data;
- struct mutex input_lock; /* serialize sensor's ioctl */
- struct v4l2_ctrl_handler ctrl_handler;
- int real_model_id;
- int nctx;
- int power;
-
- unsigned int bus_width;
- unsigned int mode;
- unsigned int field_inv;
- unsigned int field_sel;
- unsigned int ycseq;
- unsigned int conv422;
- unsigned int bpat;
- unsigned int hpol;
- unsigned int vpol;
- unsigned int edge;
- unsigned int bls;
- unsigned int gamma;
- unsigned int cconv;
- unsigned int res;
- unsigned int dwn_sz;
- unsigned int blc;
- unsigned int agc;
- unsigned int awb;
- unsigned int aec;
- /* extension SENSOR version 2 */
- unsigned int cie_profile;
-
- /* extension SENSOR version 3 */
- unsigned int flicker_freq;
-
- /* extension SENSOR version 4 */
- unsigned int smia_mode;
- unsigned int mipi_mode;
-
- /* Add name here to load shared library */
- unsigned int type;
-
- /*Number of MIPI lanes*/
- unsigned int mipi_lanes;
- /*WA for low light AE*/
- unsigned int first_exp;
- unsigned int first_gain;
- unsigned int first_diggain;
- char name[32];
-
- u8 lightfreq;
- u8 streamon;
-};
-
-struct mt9m114_format_struct {
- u8 *desc;
- u32 pixelformat;
- struct regval_list *regs;
-};
-
-struct mt9m114_res_struct {
- u8 *desc;
- int res;
- int width;
- int height;
- int fps;
- int skip_frames;
- bool used;
- struct regval_list *regs;
- u16 pixels_per_line;
- u16 lines_per_frame;
-};
-
-/* 2 bytes used for address: 256 bytes total */
-#define MT9M114_MAX_WRITE_BUF_SIZE 254
-struct mt9m114_write_buffer {
- u16 addr;
- u8 data[MT9M114_MAX_WRITE_BUF_SIZE];
-};
-
-struct mt9m114_write_ctrl {
- int index;
- struct mt9m114_write_buffer buffer;
-};
-
-/*
- * Modes supported by the mt9m114 driver.
- * Please, keep them in ascending order.
- */
-static struct mt9m114_res_struct mt9m114_res[] = {
- {
- .desc = "720P",
- .res = MT9M114_RES_736P,
- .width = 1296,
- .height = 736,
- .fps = 30,
- .used = false,
- .regs = NULL,
- .skip_frames = 1,
-
- .pixels_per_line = 0x0640,
- .lines_per_frame = 0x0307,
- },
- {
- .desc = "848P",
- .res = MT9M114_RES_864P,
- .width = 1296,
- .height = 864,
- .fps = 30,
- .used = false,
- .regs = NULL,
- .skip_frames = 1,
-
- .pixels_per_line = 0x0640,
- .lines_per_frame = 0x03E8,
- },
- {
- .desc = "960P",
- .res = MT9M114_RES_960P,
- .width = 1296,
- .height = 976,
- .fps = 30,
- .used = false,
- .regs = NULL,
- .skip_frames = 1,
-
- .pixels_per_line = 0x0644, /* consistent with regs arrays */
- .lines_per_frame = 0x03E5, /* consistent with regs arrays */
- },
-};
-
-#define N_RES (ARRAY_SIZE(mt9m114_res))
-
-#if 0 /* Currently unused */
-static struct misensor_reg const mt9m114_exitstandby[] = {
- {MISENSOR_16BIT, 0x098E, 0xDC00},
- /* exit-standby */
- {MISENSOR_8BIT, 0xDC00, 0x54},
- {MISENSOR_16BIT, 0x0080, 0x8002},
- {MISENSOR_TOK_TERM, 0, 0}
-};
-#endif
-
-static struct misensor_reg const mt9m114_exp_win[5][5] = {
- {
- {MISENSOR_8BIT, 0xA407, 0x64},
- {MISENSOR_8BIT, 0xA408, 0x64},
- {MISENSOR_8BIT, 0xA409, 0x64},
- {MISENSOR_8BIT, 0xA40A, 0x64},
- {MISENSOR_8BIT, 0xA40B, 0x64},
- },
- {
- {MISENSOR_8BIT, 0xA40C, 0x64},
- {MISENSOR_8BIT, 0xA40D, 0x64},
- {MISENSOR_8BIT, 0xA40E, 0x64},
- {MISENSOR_8BIT, 0xA40F, 0x64},
- {MISENSOR_8BIT, 0xA410, 0x64},
- },
- {
- {MISENSOR_8BIT, 0xA411, 0x64},
- {MISENSOR_8BIT, 0xA412, 0x64},
- {MISENSOR_8BIT, 0xA413, 0x64},
- {MISENSOR_8BIT, 0xA414, 0x64},
- {MISENSOR_8BIT, 0xA415, 0x64},
- },
- {
- {MISENSOR_8BIT, 0xA416, 0x64},
- {MISENSOR_8BIT, 0xA417, 0x64},
- {MISENSOR_8BIT, 0xA418, 0x64},
- {MISENSOR_8BIT, 0xA419, 0x64},
- {MISENSOR_8BIT, 0xA41A, 0x64},
- },
- {
- {MISENSOR_8BIT, 0xA41B, 0x64},
- {MISENSOR_8BIT, 0xA41C, 0x64},
- {MISENSOR_8BIT, 0xA41D, 0x64},
- {MISENSOR_8BIT, 0xA41E, 0x64},
- {MISENSOR_8BIT, 0xA41F, 0x64},
- },
-};
-
-static struct misensor_reg const mt9m114_exp_average[] = {
- {MISENSOR_8BIT, 0xA407, 0x00},
- {MISENSOR_8BIT, 0xA408, 0x00},
- {MISENSOR_8BIT, 0xA409, 0x00},
- {MISENSOR_8BIT, 0xA40A, 0x00},
- {MISENSOR_8BIT, 0xA40B, 0x00},
- {MISENSOR_8BIT, 0xA40C, 0x00},
- {MISENSOR_8BIT, 0xA40D, 0x00},
- {MISENSOR_8BIT, 0xA40E, 0x00},
- {MISENSOR_8BIT, 0xA40F, 0x00},
- {MISENSOR_8BIT, 0xA410, 0x00},
- {MISENSOR_8BIT, 0xA411, 0x00},
- {MISENSOR_8BIT, 0xA412, 0x00},
- {MISENSOR_8BIT, 0xA413, 0x00},
- {MISENSOR_8BIT, 0xA414, 0x00},
- {MISENSOR_8BIT, 0xA415, 0x00},
- {MISENSOR_8BIT, 0xA416, 0x00},
- {MISENSOR_8BIT, 0xA417, 0x00},
- {MISENSOR_8BIT, 0xA418, 0x00},
- {MISENSOR_8BIT, 0xA419, 0x00},
- {MISENSOR_8BIT, 0xA41A, 0x00},
- {MISENSOR_8BIT, 0xA41B, 0x00},
- {MISENSOR_8BIT, 0xA41C, 0x00},
- {MISENSOR_8BIT, 0xA41D, 0x00},
- {MISENSOR_8BIT, 0xA41E, 0x00},
- {MISENSOR_8BIT, 0xA41F, 0x00},
- {MISENSOR_TOK_TERM, 0, 0}
-};
-
-static struct misensor_reg const mt9m114_exp_center[] = {
- {MISENSOR_8BIT, 0xA407, 0x19},
- {MISENSOR_8BIT, 0xA408, 0x19},
- {MISENSOR_8BIT, 0xA409, 0x19},
- {MISENSOR_8BIT, 0xA40A, 0x19},
- {MISENSOR_8BIT, 0xA40B, 0x19},
- {MISENSOR_8BIT, 0xA40C, 0x19},
- {MISENSOR_8BIT, 0xA40D, 0x4B},
- {MISENSOR_8BIT, 0xA40E, 0x4B},
- {MISENSOR_8BIT, 0xA40F, 0x4B},
- {MISENSOR_8BIT, 0xA410, 0x19},
- {MISENSOR_8BIT, 0xA411, 0x19},
- {MISENSOR_8BIT, 0xA412, 0x4B},
- {MISENSOR_8BIT, 0xA413, 0x64},
- {MISENSOR_8BIT, 0xA414, 0x4B},
- {MISENSOR_8BIT, 0xA415, 0x19},
- {MISENSOR_8BIT, 0xA416, 0x19},
- {MISENSOR_8BIT, 0xA417, 0x4B},
- {MISENSOR_8BIT, 0xA418, 0x4B},
- {MISENSOR_8BIT, 0xA419, 0x4B},
- {MISENSOR_8BIT, 0xA41A, 0x19},
- {MISENSOR_8BIT, 0xA41B, 0x19},
- {MISENSOR_8BIT, 0xA41C, 0x19},
- {MISENSOR_8BIT, 0xA41D, 0x19},
- {MISENSOR_8BIT, 0xA41E, 0x19},
- {MISENSOR_8BIT, 0xA41F, 0x19},
- {MISENSOR_TOK_TERM, 0, 0}
-};
-
-#if 0 /* Currently unused */
-static struct misensor_reg const mt9m114_suspend[] = {
- {MISENSOR_16BIT, 0x098E, 0xDC00},
- {MISENSOR_8BIT, 0xDC00, 0x40},
- {MISENSOR_16BIT, 0x0080, 0x8002},
- {MISENSOR_TOK_TERM, 0, 0}
-};
-
-static struct misensor_reg const mt9m114_streaming[] = {
- {MISENSOR_16BIT, 0x098E, 0xDC00},
- {MISENSOR_8BIT, 0xDC00, 0x34},
- {MISENSOR_16BIT, 0x0080, 0x8002},
- {MISENSOR_TOK_TERM, 0, 0}
-};
-#endif
-
-static struct misensor_reg const mt9m114_standby_reg[] = {
- {MISENSOR_16BIT, 0x098E, 0xDC00},
- {MISENSOR_8BIT, 0xDC00, 0x50},
- {MISENSOR_16BIT, 0x0080, 0x8002},
- {MISENSOR_TOK_TERM, 0, 0}
-};
-
-#if 0 /* Currently unused */
-static struct misensor_reg const mt9m114_wakeup_reg[] = {
- {MISENSOR_16BIT, 0x098E, 0xDC00},
- {MISENSOR_8BIT, 0xDC00, 0x54},
- {MISENSOR_16BIT, 0x0080, 0x8002},
- {MISENSOR_TOK_TERM, 0, 0}
-};
-#endif
-
-static struct misensor_reg const mt9m114_chgstat_reg[] = {
- {MISENSOR_16BIT, 0x098E, 0xDC00},
- {MISENSOR_8BIT, 0xDC00, 0x28},
- {MISENSOR_16BIT, 0x0080, 0x8002},
- {MISENSOR_TOK_TERM, 0, 0}
-};
-
-/* [1296x976_30fps] - Intel */
-#if 0
-static struct misensor_reg const mt9m114_960P_init[] = {
- {MISENSOR_16BIT, 0x098E, 0x1000},
- {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
- {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */
- {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
- {MISENSOR_16BIT, 0xC800, 0x0000}, /* cam_sensor_cfg_y_addr_start = 0 */
- {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 0 */
- {MISENSOR_16BIT, 0xC804, 0x03CF}, /* cam_sensor_cfg_y_addr_end = 971 */
- {MISENSOR_16BIT, 0xC806, 0x050F}, /* cam_sensor_cfg_x_addr_end = 1291 */
- {MISENSOR_16BIT, 0xC808, 0x02DC}, /* cam_sensor_cfg_pixclk = 48000000 */
- {MISENSOR_16BIT, 0xC80A, 0x6C00},
- {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
- /* cam_sensor_cfg_fine_integ_time_min = 219 */
- {MISENSOR_16BIT, 0xC80E, 0x00DB},
- /* cam_sensor_cfg_fine_integ_time_max = 1459 */
- {MISENSOR_16BIT, 0xC810, 0x05B3},
- /* cam_sensor_cfg_frame_length_lines = 1006 */
- {MISENSOR_16BIT, 0xC812, 0x03F6},
- /* cam_sensor_cfg_line_length_pck = 1590 */
- {MISENSOR_16BIT, 0xC814, 0x063E},
- /* cam_sensor_cfg_fine_correction = 96 */
- {MISENSOR_16BIT, 0xC816, 0x0060},
- /* cam_sensor_cfg_cpipe_last_row = 963 */
- {MISENSOR_16BIT, 0xC818, 0x03C3},
- {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
- {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0 */
- {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
- {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
- {MISENSOR_16BIT, 0xC858, 0x0508}, /* cam_crop_window_width = 1280 */
- {MISENSOR_16BIT, 0xC85A, 0x03C8}, /* cam_crop_window_height = 960 */
- {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
- {MISENSOR_16BIT, 0xC868, 0x0508}, /* cam_output_width = 1280 */
- {MISENSOR_16BIT, 0xC86A, 0x03C8}, /* cam_output_height = 960 */
- {MISENSOR_TOK_TERM, 0, 0},
-};
-#endif
-
-/* [1296x976_30fps_768Mbps] */
-static struct misensor_reg const mt9m114_976P_init[] = {
- {MISENSOR_16BIT, 0x98E, 0x1000},
- {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
- {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */
- {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
- {MISENSOR_16BIT, 0xC800, 0x0000}, /* cam_sensor_cfg_y_addr_start = 0 */
- {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 0 */
- {MISENSOR_16BIT, 0xC804, 0x03CF}, /* cam_sensor_cfg_y_addr_end = 975 */
- {MISENSOR_16BIT, 0xC806, 0x050F}, /* cam_sensor_cfg_x_addr_end = 1295 */
- {MISENSOR_32BIT, 0xC808, 0x2DC6C00},/* cam_sensor_cfg_pixclk = 480000*/
- {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
- /* cam_sensor_cfg_fine_integ_time_min = 219 */
- {MISENSOR_16BIT, 0xC80E, 0x00DB},
- /* 0x062E //cam_sensor_cfg_fine_integ_time_max = 1459 */
- {MISENSOR_16BIT, 0xC810, 0x05B3},
- /* 0x074C //cam_sensor_cfg_frame_length_lines = 1006 */
- {MISENSOR_16BIT, 0xC812, 0x03E5},
- /* 0x06B1 /cam_sensor_cfg_line_length_pck = 1590 */
- {MISENSOR_16BIT, 0xC814, 0x0644},
- /* cam_sensor_cfg_fine_correction = 96 */
- {MISENSOR_16BIT, 0xC816, 0x0060},
- /* cam_sensor_cfg_cpipe_last_row = 963 */
- {MISENSOR_16BIT, 0xC818, 0x03C3},
- {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
- {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0 */
- {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
- {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
- {MISENSOR_16BIT, 0xC858, 0x0508}, /* cam_crop_window_width = 1288 */
- {MISENSOR_16BIT, 0xC85A, 0x03C8}, /* cam_crop_window_height = 968 */
- {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
- {MISENSOR_16BIT, 0xC868, 0x0508}, /* cam_output_width = 1288 */
- {MISENSOR_16BIT, 0xC86A, 0x03C8}, /* cam_output_height = 968 */
- {MISENSOR_8BIT, 0xC878, 0x00}, /* 0x0E //cam_aet_aemode = 0 */
- {MISENSOR_TOK_TERM, 0, 0}
-};
-
-/* [1296x864_30fps] */
-static struct misensor_reg const mt9m114_864P_init[] = {
- {MISENSOR_16BIT, 0x98E, 0x1000},
- {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
- {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */
- {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
- {MISENSOR_16BIT, 0xC800, 0x0038}, /* cam_sensor_cfg_y_addr_start = 56 */
- {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 0 */
- {MISENSOR_16BIT, 0xC804, 0x0397}, /* cam_sensor_cfg_y_addr_end = 919 */
- {MISENSOR_16BIT, 0xC806, 0x050F}, /* cam_sensor_cfg_x_addr_end = 1295 */
- /* cam_sensor_cfg_pixclk = 48000000 */
- {MISENSOR_32BIT, 0xC808, 0x2DC6C00},
- {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
- /* cam_sensor_cfg_fine_integ_time_min = 219 */
- {MISENSOR_16BIT, 0xC80E, 0x00DB},
- /* cam_sensor_cfg_fine_integ_time_max = 1469 */
- {MISENSOR_16BIT, 0xC810, 0x05BD},
- /* cam_sensor_cfg_frame_length_lines = 1000 */
- {MISENSOR_16BIT, 0xC812, 0x03E8},
- /* cam_sensor_cfg_line_length_pck = 1600 */
- {MISENSOR_16BIT, 0xC814, 0x0640},
- /* cam_sensor_cfg_fine_correction = 96 */
- {MISENSOR_16BIT, 0xC816, 0x0060},
- /* cam_sensor_cfg_cpipe_last_row = 859 */
- {MISENSOR_16BIT, 0xC818, 0x035B},
- {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
- {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0 */
- {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
- {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
- {MISENSOR_16BIT, 0xC858, 0x0508}, /* cam_crop_window_width = 1288 */
- {MISENSOR_16BIT, 0xC85A, 0x0358}, /* cam_crop_window_height = 856 */
- {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
- {MISENSOR_16BIT, 0xC868, 0x0508}, /* cam_output_width = 1288 */
- {MISENSOR_16BIT, 0xC86A, 0x0358}, /* cam_output_height = 856 */
- {MISENSOR_8BIT, 0xC878, 0x00}, /* 0x0E //cam_aet_aemode = 0 */
- {MISENSOR_TOK_TERM, 0, 0}
-};
-
-/* [1296x736_30fps] */
-static struct misensor_reg const mt9m114_736P_init[] = {
- {MISENSOR_16BIT, 0x98E, 0x1000},
- {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
- {MISENSOR_16BIT, 0xC980, 0x011F}, /* cam_sysctl_pll_divider_m_n = 287 */
- {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
- {MISENSOR_16BIT, 0xC800, 0x0078}, /* cam_sensor_cfg_y_addr_start = 120*/
- {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 0 */
- {MISENSOR_16BIT, 0xC804, 0x0357}, /* cam_sensor_cfg_y_addr_end = 855 */
- {MISENSOR_16BIT, 0xC806, 0x050F}, /* cam_sensor_cfg_x_addr_end = 1295 */
- {MISENSOR_32BIT, 0xC808, 0x237A07F}, /* cam_sensor_cfg_pixclk=37199999*/
- {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
- /* cam_sensor_cfg_fine_integ_time_min = 219 */
- {MISENSOR_16BIT, 0xC80E, 0x00DB},
- /* 0x062E //cam_sensor_cfg_fine_integ_time_max = 1469 */
- {MISENSOR_16BIT, 0xC810, 0x05BD},
- /* 0x074C //cam_sensor_cfg_frame_length_lines = 775 */
- {MISENSOR_16BIT, 0xC812, 0x0307},
- /* 0x06B1 /cam_sensor_cfg_line_length_pck = 1600 */
- {MISENSOR_16BIT, 0xC814, 0x0640},
- /* cam_sensor_cfg_fine_correction = 96 */
- {MISENSOR_16BIT, 0xC816, 0x0060},
- /* cam_sensor_cfg_cpipe_last_row = 731 */
- {MISENSOR_16BIT, 0xC818, 0x02DB},
- {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
- {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0 */
- {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
- {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
- {MISENSOR_16BIT, 0xC858, 0x0508}, /* cam_crop_window_width = 1288 */
- {MISENSOR_16BIT, 0xC85A, 0x02D8}, /* cam_crop_window_height = 728 */
- {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
- {MISENSOR_16BIT, 0xC868, 0x0508}, /* cam_output_width = 1288 */
- {MISENSOR_16BIT, 0xC86A, 0x02D8}, /* cam_output_height = 728 */
- {MISENSOR_8BIT, 0xC878, 0x00}, /* 0x0E //cam_aet_aemode = 0 */
- {MISENSOR_TOK_TERM, 0, 0}
-};
-
-/* [736x496_30fps_768Mbps] */
-#if 0 /* Currently unused */
-static struct misensor_reg const mt9m114_720_480P_init[] = {
- {MISENSOR_16BIT, 0x98E, 0x1000},
- {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
- {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */
- {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
- {MISENSOR_16BIT, 0xC800, 0x00F0}, /* cam_sensor_cfg_y_addr_start = 240*/
- {MISENSOR_16BIT, 0xC802, 0x0118}, /* cam_sensor_cfg_x_addr_start = 280*/
- {MISENSOR_16BIT, 0xC804, 0x02DF}, /* cam_sensor_cfg_y_addr_end = 735 */
- {MISENSOR_16BIT, 0xC806, 0x03F7}, /* cam_sensor_cfg_x_addr_end = 1015 */
- /* cam_sensor_cfg_pixclk = 48000000 */
- {MISENSOR_32BIT, 0xC808, 0x2DC6C00},
- {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
- /* cam_sensor_cfg_fine_integ_time_min = 219 */
- {MISENSOR_16BIT, 0xC80E, 0x00DB},
- /* 0x062E //cam_sensor_cfg_fine_integ_time_max = 1459 */
- {MISENSOR_16BIT, 0xC810, 0x05B3},
- /* 0x074C //cam_sensor_cfg_frame_length_lines = 997 */
- {MISENSOR_16BIT, 0xC812, 0x03E5},
- /* 0x06B1 /cam_sensor_cfg_line_length_pck = 1604 */
- {MISENSOR_16BIT, 0xC814, 0x0644},
- /* cam_sensor_cfg_fine_correction = 96 */
- {MISENSOR_16BIT, 0xC816, 0x0060},
- {MISENSOR_16BIT, 0xC818, 0x03C3}, /* cam_sensor_cfg_cpipe_last_row=963*/
- {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
- {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0*/
- {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
- {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
- {MISENSOR_16BIT, 0xC858, 0x02D8}, /* cam_crop_window_width = 728 */
- {MISENSOR_16BIT, 0xC85A, 0x01E8}, /* cam_crop_window_height = 488 */
- {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
- {MISENSOR_16BIT, 0xC868, 0x02D8}, /* cam_output_width = 728 */
- {MISENSOR_16BIT, 0xC86A, 0x01E8}, /* cam_output_height = 488 */
- {MISENSOR_8BIT, 0xC878, 0x00}, /* 0x0E //cam_aet_aemode = 0 */
- {MISENSOR_TOK_TERM, 0, 0}
-};
-#endif
-
-static struct misensor_reg const mt9m114_common[] = {
- /* reset */
- {MISENSOR_16BIT, 0x301A, 0x0234},
- /* LOAD = Step2-PLL_Timing //PLL and Timing */
- {MISENSOR_16BIT, 0x098E, 0x1000}, /* LOGICAL_ADDRESS_ACCESS */
- {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
- {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */
- {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
- {MISENSOR_16BIT, 0xC800, 0x0000}, /* cam_sensor_cfg_y_addr_start = 216*/
- {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 168*/
- {MISENSOR_16BIT, 0xC804, 0x03CD}, /* cam_sensor_cfg_y_addr_end = 761 */
- {MISENSOR_16BIT, 0xC806, 0x050D}, /* cam_sensor_cfg_x_addr_end = 1127 */
- {MISENSOR_16BIT, 0xC808, 0x02DC}, /* cam_sensor_cfg_pixclk = 24000000 */
- {MISENSOR_16BIT, 0xC80A, 0x6C00},
- {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
- /* cam_sensor_cfg_fine_integ_time_min = 219 */
- {MISENSOR_16BIT, 0xC80E, 0x01C3},
- /* cam_sensor_cfg_fine_integ_time_max = 1149 */
- {MISENSOR_16BIT, 0xC810, 0x03F7},
- /* cam_sensor_cfg_frame_length_lines = 625 */
- {MISENSOR_16BIT, 0xC812, 0x0500},
- /* cam_sensor_cfg_line_length_pck = 1280 */
- {MISENSOR_16BIT, 0xC814, 0x04E2},
- /* cam_sensor_cfg_fine_correction = 96 */
- {MISENSOR_16BIT, 0xC816, 0x00E0},
- /* cam_sensor_cfg_cpipe_last_row = 541 */
- {MISENSOR_16BIT, 0xC818, 0x01E3},
- {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
- {MISENSOR_16BIT, 0xC834, 0x0330}, /* cam_sensor_control_read_mode = 0 */
- {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
- {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
- {MISENSOR_16BIT, 0xC858, 0x0280}, /* cam_crop_window_width = 952 */
- {MISENSOR_16BIT, 0xC85A, 0x01E0}, /* cam_crop_window_height = 538 */
- {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
- {MISENSOR_16BIT, 0xC868, 0x0280}, /* cam_output_width = 952 */
- {MISENSOR_16BIT, 0xC86A, 0x01E0}, /* cam_output_height = 538 */
- /*
- * LOAD = Step3-Recommended
- * Patch, Errata and Sensor optimization Setting
- */
- {MISENSOR_16BIT, 0x316A, 0x8270}, /* DAC_TXLO_ROW */
- {MISENSOR_16BIT, 0x316C, 0x8270}, /* DAC_TXLO */
- {MISENSOR_16BIT, 0x3ED0, 0x2305}, /* DAC_LD_4_5 */
- {MISENSOR_16BIT, 0x3ED2, 0x77CF}, /* DAC_LD_6_7 */
- {MISENSOR_16BIT, 0x316E, 0x8202}, /* DAC_ECL */
- {MISENSOR_16BIT, 0x3180, 0x87FF}, /* DELTA_DK_CONTROL */
- {MISENSOR_16BIT, 0x30D4, 0x6080}, /* COLUMN_CORRECTION */
- {MISENSOR_16BIT, 0xA802, 0x0008}, /* AE_TRACK_MODE */
- {MISENSOR_16BIT, 0x3E14, 0xFF39}, /* SAMP_COL_PUP2 */
- {MISENSOR_16BIT, 0x31E0, 0x0003}, /* PIX_DEF_ID */
- /* LOAD = Step8-Features //Ports, special features, etc. */
- {MISENSOR_16BIT, 0x098E, 0x0000}, /* LOGICAL_ADDRESS_ACCESS */
- {MISENSOR_16BIT, 0x001E, 0x0777}, /* PAD_SLEW */
- {MISENSOR_16BIT, 0x098E, 0x0000}, /* LOGICAL_ADDRESS_ACCESS */
- {MISENSOR_16BIT, 0xC984, 0x8001}, /* CAM_PORT_OUTPUT_CONTROL */
- {MISENSOR_16BIT, 0xC988, 0x0F00}, /* CAM_PORT_MIPI_TIMING_T_HS_ZERO */
- /* CAM_PORT_MIPI_TIMING_T_HS_EXIT_HS_TRAIL */
- {MISENSOR_16BIT, 0xC98A, 0x0B07},
- /* CAM_PORT_MIPI_TIMING_T_CLK_POST_CLK_PRE */
- {MISENSOR_16BIT, 0xC98C, 0x0D01},
- /* CAM_PORT_MIPI_TIMING_T_CLK_TRAIL_CLK_ZERO */
- {MISENSOR_16BIT, 0xC98E, 0x071D},
- {MISENSOR_16BIT, 0xC990, 0x0006}, /* CAM_PORT_MIPI_TIMING_T_LPX */
- {MISENSOR_16BIT, 0xC992, 0x0A0C}, /* CAM_PORT_MIPI_TIMING_INIT_TIMING */
- {MISENSOR_16BIT, 0x3C5A, 0x0009}, /* MIPI_DELAY_TRIM */
- {MISENSOR_16BIT, 0xC86C, 0x0210}, /* CAM_OUTPUT_FORMAT */
- {MISENSOR_16BIT, 0xA804, 0x0000}, /* AE_TRACK_ALGO */
- /* default exposure */
- {MISENSOR_16BIT, 0x3012, 0x0110}, /* COMMAND_REGISTER */
- {MISENSOR_TOK_TERM, 0, 0},
-
-};
-
-#if 0 /* Currently unused */
-static struct misensor_reg const mt9m114_antiflicker_50hz[] = {
- {MISENSOR_16BIT, 0x098E, 0xC88B},
- {MISENSOR_8BIT, 0xC88B, 0x32},
- {MISENSOR_8BIT, 0xDC00, 0x28},
- {MISENSOR_16BIT, 0x0080, 0x8002},
- {MISENSOR_TOK_TERM, 0, 0}
-};
-
-static struct misensor_reg const mt9m114_antiflicker_60hz[] = {
- {MISENSOR_16BIT, 0x098E, 0xC88B},
- {MISENSOR_8BIT, 0xC88B, 0x3C},
- {MISENSOR_8BIT, 0xDC00, 0x28},
- {MISENSOR_16BIT, 0x0080, 0x8002},
- {MISENSOR_TOK_TERM, 0, 0}
-};
-
-static struct misensor_reg const mt9m114_iq[] = {
- /* [Step3-Recommended] [Sensor optimization] */
- {MISENSOR_16BIT, 0x316A, 0x8270},
- {MISENSOR_16BIT, 0x316C, 0x8270},
- {MISENSOR_16BIT, 0x3ED0, 0x2305},
- {MISENSOR_16BIT, 0x3ED2, 0x77CF},
- {MISENSOR_16BIT, 0x316E, 0x8202},
- {MISENSOR_16BIT, 0x3180, 0x87FF},
- {MISENSOR_16BIT, 0x30D4, 0x6080},
- {MISENSOR_16BIT, 0xA802, 0x0008},
-
- /* This register is from vender to avoid low light color noise */
- {MISENSOR_16BIT, 0x31E0, 0x0001},
-
- /* LOAD=Errata item 1 */
- {MISENSOR_16BIT, 0x3E14, 0xFF39},
-
- /* LOAD=Errata item 2 */
- {MISENSOR_16BIT, 0x301A, 0x8234},
-
- /*
- * LOAD=Errata item 3
- * LOAD=Patch 0202;
- * Feature Recommended; Black level correction fix
- */
- {MISENSOR_16BIT, 0x0982, 0x0001},
- {MISENSOR_16BIT, 0x098A, 0x5000},
- {MISENSOR_16BIT, 0xD000, 0x70CF},
- {MISENSOR_16BIT, 0xD002, 0xFFFF},
- {MISENSOR_16BIT, 0xD004, 0xC5D4},
- {MISENSOR_16BIT, 0xD006, 0x903A},
- {MISENSOR_16BIT, 0xD008, 0x2144},
- {MISENSOR_16BIT, 0xD00A, 0x0C00},
- {MISENSOR_16BIT, 0xD00C, 0x2186},
- {MISENSOR_16BIT, 0xD00E, 0x0FF3},
- {MISENSOR_16BIT, 0xD010, 0xB844},
- {MISENSOR_16BIT, 0xD012, 0xB948},
- {MISENSOR_16BIT, 0xD014, 0xE082},
- {MISENSOR_16BIT, 0xD016, 0x20CC},
- {MISENSOR_16BIT, 0xD018, 0x80E2},
- {MISENSOR_16BIT, 0xD01A, 0x21CC},
- {MISENSOR_16BIT, 0xD01C, 0x80A2},
- {MISENSOR_16BIT, 0xD01E, 0x21CC},
- {MISENSOR_16BIT, 0xD020, 0x80E2},
- {MISENSOR_16BIT, 0xD022, 0xF404},
- {MISENSOR_16BIT, 0xD024, 0xD801},
- {MISENSOR_16BIT, 0xD026, 0xF003},
- {MISENSOR_16BIT, 0xD028, 0xD800},
- {MISENSOR_16BIT, 0xD02A, 0x7EE0},
- {MISENSOR_16BIT, 0xD02C, 0xC0F1},
- {MISENSOR_16BIT, 0xD02E, 0x08BA},
-
- {MISENSOR_16BIT, 0xD030, 0x0600},
- {MISENSOR_16BIT, 0xD032, 0xC1A1},
- {MISENSOR_16BIT, 0xD034, 0x76CF},
- {MISENSOR_16BIT, 0xD036, 0xFFFF},
- {MISENSOR_16BIT, 0xD038, 0xC130},
- {MISENSOR_16BIT, 0xD03A, 0x6E04},
- {MISENSOR_16BIT, 0xD03C, 0xC040},
- {MISENSOR_16BIT, 0xD03E, 0x71CF},
- {MISENSOR_16BIT, 0xD040, 0xFFFF},
- {MISENSOR_16BIT, 0xD042, 0xC790},
- {MISENSOR_16BIT, 0xD044, 0x8103},
- {MISENSOR_16BIT, 0xD046, 0x77CF},
- {MISENSOR_16BIT, 0xD048, 0xFFFF},
- {MISENSOR_16BIT, 0xD04A, 0xC7C0},
- {MISENSOR_16BIT, 0xD04C, 0xE001},
- {MISENSOR_16BIT, 0xD04E, 0xA103},
- {MISENSOR_16BIT, 0xD050, 0xD800},
- {MISENSOR_16BIT, 0xD052, 0x0C6A},
- {MISENSOR_16BIT, 0xD054, 0x04E0},
- {MISENSOR_16BIT, 0xD056, 0xB89E},
- {MISENSOR_16BIT, 0xD058, 0x7508},
- {MISENSOR_16BIT, 0xD05A, 0x8E1C},
- {MISENSOR_16BIT, 0xD05C, 0x0809},
- {MISENSOR_16BIT, 0xD05E, 0x0191},
-
- {MISENSOR_16BIT, 0xD060, 0xD801},
- {MISENSOR_16BIT, 0xD062, 0xAE1D},
- {MISENSOR_16BIT, 0xD064, 0xE580},
- {MISENSOR_16BIT, 0xD066, 0x20CA},
- {MISENSOR_16BIT, 0xD068, 0x0022},
- {MISENSOR_16BIT, 0xD06A, 0x20CF},
- {MISENSOR_16BIT, 0xD06C, 0x0522},
- {MISENSOR_16BIT, 0xD06E, 0x0C5C},
- {MISENSOR_16BIT, 0xD070, 0x04E2},
- {MISENSOR_16BIT, 0xD072, 0x21CA},
- {MISENSOR_16BIT, 0xD074, 0x0062},
- {MISENSOR_16BIT, 0xD076, 0xE580},
- {MISENSOR_16BIT, 0xD078, 0xD901},
- {MISENSOR_16BIT, 0xD07A, 0x79C0},
- {MISENSOR_16BIT, 0xD07C, 0xD800},
- {MISENSOR_16BIT, 0xD07E, 0x0BE6},
- {MISENSOR_16BIT, 0xD080, 0x04E0},
- {MISENSOR_16BIT, 0xD082, 0xB89E},
- {MISENSOR_16BIT, 0xD084, 0x70CF},
- {MISENSOR_16BIT, 0xD086, 0xFFFF},
- {MISENSOR_16BIT, 0xD088, 0xC8D4},
- {MISENSOR_16BIT, 0xD08A, 0x9002},
- {MISENSOR_16BIT, 0xD08C, 0x0857},
- {MISENSOR_16BIT, 0xD08E, 0x025E},
-
- {MISENSOR_16BIT, 0xD090, 0xFFDC},
- {MISENSOR_16BIT, 0xD092, 0xE080},
- {MISENSOR_16BIT, 0xD094, 0x25CC},
- {MISENSOR_16BIT, 0xD096, 0x9022},
- {MISENSOR_16BIT, 0xD098, 0xF225},
- {MISENSOR_16BIT, 0xD09A, 0x1700},
- {MISENSOR_16BIT, 0xD09C, 0x108A},
- {MISENSOR_16BIT, 0xD09E, 0x73CF},
- {MISENSOR_16BIT, 0xD0A0, 0xFF00},
- {MISENSOR_16BIT, 0xD0A2, 0x3174},
- {MISENSOR_16BIT, 0xD0A4, 0x9307},
- {MISENSOR_16BIT, 0xD0A6, 0x2A04},
- {MISENSOR_16BIT, 0xD0A8, 0x103E},
- {MISENSOR_16BIT, 0xD0AA, 0x9328},
- {MISENSOR_16BIT, 0xD0AC, 0x2942},
- {MISENSOR_16BIT, 0xD0AE, 0x7140},
- {MISENSOR_16BIT, 0xD0B0, 0x2A04},
- {MISENSOR_16BIT, 0xD0B2, 0x107E},
- {MISENSOR_16BIT, 0xD0B4, 0x9349},
- {MISENSOR_16BIT, 0xD0B6, 0x2942},
- {MISENSOR_16BIT, 0xD0B8, 0x7141},
- {MISENSOR_16BIT, 0xD0BA, 0x2A04},
- {MISENSOR_16BIT, 0xD0BC, 0x10BE},
- {MISENSOR_16BIT, 0xD0BE, 0x934A},
-
- {MISENSOR_16BIT, 0xD0C0, 0x2942},
- {MISENSOR_16BIT, 0xD0C2, 0x714B},
- {MISENSOR_16BIT, 0xD0C4, 0x2A04},
- {MISENSOR_16BIT, 0xD0C6, 0x10BE},
- {MISENSOR_16BIT, 0xD0C8, 0x130C},
- {MISENSOR_16BIT, 0xD0CA, 0x010A},
- {MISENSOR_16BIT, 0xD0CC, 0x2942},
- {MISENSOR_16BIT, 0xD0CE, 0x7142},
- {MISENSOR_16BIT, 0xD0D0, 0x2250},
- {MISENSOR_16BIT, 0xD0D2, 0x13CA},
- {MISENSOR_16BIT, 0xD0D4, 0x1B0C},
- {MISENSOR_16BIT, 0xD0D6, 0x0284},
- {MISENSOR_16BIT, 0xD0D8, 0xB307},
- {MISENSOR_16BIT, 0xD0DA, 0xB328},
- {MISENSOR_16BIT, 0xD0DC, 0x1B12},
- {MISENSOR_16BIT, 0xD0DE, 0x02C4},
- {MISENSOR_16BIT, 0xD0E0, 0xB34A},
- {MISENSOR_16BIT, 0xD0E2, 0xED88},
- {MISENSOR_16BIT, 0xD0E4, 0x71CF},
- {MISENSOR_16BIT, 0xD0E6, 0xFF00},
- {MISENSOR_16BIT, 0xD0E8, 0x3174},
- {MISENSOR_16BIT, 0xD0EA, 0x9106},
- {MISENSOR_16BIT, 0xD0EC, 0xB88F},
- {MISENSOR_16BIT, 0xD0EE, 0xB106},
-
- {MISENSOR_16BIT, 0xD0F0, 0x210A},
- {MISENSOR_16BIT, 0xD0F2, 0x8340},
- {MISENSOR_16BIT, 0xD0F4, 0xC000},
- {MISENSOR_16BIT, 0xD0F6, 0x21CA},
- {MISENSOR_16BIT, 0xD0F8, 0x0062},
- {MISENSOR_16BIT, 0xD0FA, 0x20F0},
- {MISENSOR_16BIT, 0xD0FC, 0x0040},
- {MISENSOR_16BIT, 0xD0FE, 0x0B02},
- {MISENSOR_16BIT, 0xD100, 0x0320},
- {MISENSOR_16BIT, 0xD102, 0xD901},
- {MISENSOR_16BIT, 0xD104, 0x07F1},
- {MISENSOR_16BIT, 0xD106, 0x05E0},
- {MISENSOR_16BIT, 0xD108, 0xC0A1},
- {MISENSOR_16BIT, 0xD10A, 0x78E0},
- {MISENSOR_16BIT, 0xD10C, 0xC0F1},
- {MISENSOR_16BIT, 0xD10E, 0x71CF},
- {MISENSOR_16BIT, 0xD110, 0xFFFF},
- {MISENSOR_16BIT, 0xD112, 0xC7C0},
- {MISENSOR_16BIT, 0xD114, 0xD840},
- {MISENSOR_16BIT, 0xD116, 0xA900},
- {MISENSOR_16BIT, 0xD118, 0x71CF},
- {MISENSOR_16BIT, 0xD11A, 0xFFFF},
- {MISENSOR_16BIT, 0xD11C, 0xD02C},
- {MISENSOR_16BIT, 0xD11E, 0xD81E},
-
- {MISENSOR_16BIT, 0xD120, 0x0A5A},
- {MISENSOR_16BIT, 0xD122, 0x04E0},
- {MISENSOR_16BIT, 0xD124, 0xDA00},
- {MISENSOR_16BIT, 0xD126, 0xD800},
- {MISENSOR_16BIT, 0xD128, 0xC0D1},
- {MISENSOR_16BIT, 0xD12A, 0x7EE0},
-
- {MISENSOR_16BIT, 0x098E, 0x0000},
- {MISENSOR_16BIT, 0xE000, 0x010C},
- {MISENSOR_16BIT, 0xE002, 0x0202},
- {MISENSOR_16BIT, 0xE004, 0x4103},
- {MISENSOR_16BIT, 0xE006, 0x0202},
- {MISENSOR_16BIT, 0x0080, 0xFFF0},
- {MISENSOR_16BIT, 0x0080, 0xFFF1},
-
- /* LOAD=Patch 0302; Feature Recommended; Adaptive Sensitivity */
- {MISENSOR_16BIT, 0x0982, 0x0001},
- {MISENSOR_16BIT, 0x098A, 0x512C},
- {MISENSOR_16BIT, 0xD12C, 0x70CF},
- {MISENSOR_16BIT, 0xD12E, 0xFFFF},
- {MISENSOR_16BIT, 0xD130, 0xC5D4},
- {MISENSOR_16BIT, 0xD132, 0x903A},
- {MISENSOR_16BIT, 0xD134, 0x2144},
- {MISENSOR_16BIT, 0xD136, 0x0C00},
- {MISENSOR_16BIT, 0xD138, 0x2186},
- {MISENSOR_16BIT, 0xD13A, 0x0FF3},
- {MISENSOR_16BIT, 0xD13C, 0xB844},
- {MISENSOR_16BIT, 0xD13E, 0x262F},
- {MISENSOR_16BIT, 0xD140, 0xF008},
- {MISENSOR_16BIT, 0xD142, 0xB948},
- {MISENSOR_16BIT, 0xD144, 0x21CC},
- {MISENSOR_16BIT, 0xD146, 0x8021},
- {MISENSOR_16BIT, 0xD148, 0xD801},
- {MISENSOR_16BIT, 0xD14A, 0xF203},
- {MISENSOR_16BIT, 0xD14C, 0xD800},
- {MISENSOR_16BIT, 0xD14E, 0x7EE0},
- {MISENSOR_16BIT, 0xD150, 0xC0F1},
- {MISENSOR_16BIT, 0xD152, 0x71CF},
- {MISENSOR_16BIT, 0xD154, 0xFFFF},
- {MISENSOR_16BIT, 0xD156, 0xC610},
- {MISENSOR_16BIT, 0xD158, 0x910E},
- {MISENSOR_16BIT, 0xD15A, 0x208C},
- {MISENSOR_16BIT, 0xD15C, 0x8014},
- {MISENSOR_16BIT, 0xD15E, 0xF418},
- {MISENSOR_16BIT, 0xD160, 0x910F},
- {MISENSOR_16BIT, 0xD162, 0x208C},
- {MISENSOR_16BIT, 0xD164, 0x800F},
- {MISENSOR_16BIT, 0xD166, 0xF414},
- {MISENSOR_16BIT, 0xD168, 0x9116},
- {MISENSOR_16BIT, 0xD16A, 0x208C},
- {MISENSOR_16BIT, 0xD16C, 0x800A},
- {MISENSOR_16BIT, 0xD16E, 0xF410},
- {MISENSOR_16BIT, 0xD170, 0x9117},
- {MISENSOR_16BIT, 0xD172, 0x208C},
- {MISENSOR_16BIT, 0xD174, 0x8807},
- {MISENSOR_16BIT, 0xD176, 0xF40C},
- {MISENSOR_16BIT, 0xD178, 0x9118},
- {MISENSOR_16BIT, 0xD17A, 0x2086},
- {MISENSOR_16BIT, 0xD17C, 0x0FF3},
- {MISENSOR_16BIT, 0xD17E, 0xB848},
- {MISENSOR_16BIT, 0xD180, 0x080D},
- {MISENSOR_16BIT, 0xD182, 0x0090},
- {MISENSOR_16BIT, 0xD184, 0xFFEA},
- {MISENSOR_16BIT, 0xD186, 0xE081},
- {MISENSOR_16BIT, 0xD188, 0xD801},
- {MISENSOR_16BIT, 0xD18A, 0xF203},
- {MISENSOR_16BIT, 0xD18C, 0xD800},
- {MISENSOR_16BIT, 0xD18E, 0xC0D1},
- {MISENSOR_16BIT, 0xD190, 0x7EE0},
- {MISENSOR_16BIT, 0xD192, 0x78E0},
- {MISENSOR_16BIT, 0xD194, 0xC0F1},
- {MISENSOR_16BIT, 0xD196, 0x71CF},
- {MISENSOR_16BIT, 0xD198, 0xFFFF},
- {MISENSOR_16BIT, 0xD19A, 0xC610},
- {MISENSOR_16BIT, 0xD19C, 0x910E},
- {MISENSOR_16BIT, 0xD19E, 0x208C},
- {MISENSOR_16BIT, 0xD1A0, 0x800A},
- {MISENSOR_16BIT, 0xD1A2, 0xF418},
- {MISENSOR_16BIT, 0xD1A4, 0x910F},
- {MISENSOR_16BIT, 0xD1A6, 0x208C},
- {MISENSOR_16BIT, 0xD1A8, 0x8807},
- {MISENSOR_16BIT, 0xD1AA, 0xF414},
- {MISENSOR_16BIT, 0xD1AC, 0x9116},
- {MISENSOR_16BIT, 0xD1AE, 0x208C},
- {MISENSOR_16BIT, 0xD1B0, 0x800A},
- {MISENSOR_16BIT, 0xD1B2, 0xF410},
- {MISENSOR_16BIT, 0xD1B4, 0x9117},
- {MISENSOR_16BIT, 0xD1B6, 0x208C},
- {MISENSOR_16BIT, 0xD1B8, 0x8807},
- {MISENSOR_16BIT, 0xD1BA, 0xF40C},
- {MISENSOR_16BIT, 0xD1BC, 0x9118},
- {MISENSOR_16BIT, 0xD1BE, 0x2086},
- {MISENSOR_16BIT, 0xD1C0, 0x0FF3},
- {MISENSOR_16BIT, 0xD1C2, 0xB848},
- {MISENSOR_16BIT, 0xD1C4, 0x080D},
- {MISENSOR_16BIT, 0xD1C6, 0x0090},
- {MISENSOR_16BIT, 0xD1C8, 0xFFD9},
- {MISENSOR_16BIT, 0xD1CA, 0xE080},
- {MISENSOR_16BIT, 0xD1CC, 0xD801},
- {MISENSOR_16BIT, 0xD1CE, 0xF203},
- {MISENSOR_16BIT, 0xD1D0, 0xD800},
- {MISENSOR_16BIT, 0xD1D2, 0xF1DF},
- {MISENSOR_16BIT, 0xD1D4, 0x9040},
- {MISENSOR_16BIT, 0xD1D6, 0x71CF},
- {MISENSOR_16BIT, 0xD1D8, 0xFFFF},
- {MISENSOR_16BIT, 0xD1DA, 0xC5D4},
- {MISENSOR_16BIT, 0xD1DC, 0xB15A},
- {MISENSOR_16BIT, 0xD1DE, 0x9041},
- {MISENSOR_16BIT, 0xD1E0, 0x73CF},
- {MISENSOR_16BIT, 0xD1E2, 0xFFFF},
- {MISENSOR_16BIT, 0xD1E4, 0xC7D0},
- {MISENSOR_16BIT, 0xD1E6, 0xB140},
- {MISENSOR_16BIT, 0xD1E8, 0x9042},
- {MISENSOR_16BIT, 0xD1EA, 0xB141},
- {MISENSOR_16BIT, 0xD1EC, 0x9043},
- {MISENSOR_16BIT, 0xD1EE, 0xB142},
- {MISENSOR_16BIT, 0xD1F0, 0x9044},
- {MISENSOR_16BIT, 0xD1F2, 0xB143},
- {MISENSOR_16BIT, 0xD1F4, 0x9045},
- {MISENSOR_16BIT, 0xD1F6, 0xB147},
- {MISENSOR_16BIT, 0xD1F8, 0x9046},
- {MISENSOR_16BIT, 0xD1FA, 0xB148},
- {MISENSOR_16BIT, 0xD1FC, 0x9047},
- {MISENSOR_16BIT, 0xD1FE, 0xB14B},
- {MISENSOR_16BIT, 0xD200, 0x9048},
- {MISENSOR_16BIT, 0xD202, 0xB14C},
- {MISENSOR_16BIT, 0xD204, 0x9049},
- {MISENSOR_16BIT, 0xD206, 0x1958},
- {MISENSOR_16BIT, 0xD208, 0x0084},
- {MISENSOR_16BIT, 0xD20A, 0x904A},
- {MISENSOR_16BIT, 0xD20C, 0x195A},
- {MISENSOR_16BIT, 0xD20E, 0x0084},
- {MISENSOR_16BIT, 0xD210, 0x8856},
- {MISENSOR_16BIT, 0xD212, 0x1B36},
- {MISENSOR_16BIT, 0xD214, 0x8082},
- {MISENSOR_16BIT, 0xD216, 0x8857},
- {MISENSOR_16BIT, 0xD218, 0x1B37},
- {MISENSOR_16BIT, 0xD21A, 0x8082},
- {MISENSOR_16BIT, 0xD21C, 0x904C},
- {MISENSOR_16BIT, 0xD21E, 0x19A7},
- {MISENSOR_16BIT, 0xD220, 0x009C},
- {MISENSOR_16BIT, 0xD222, 0x881A},
- {MISENSOR_16BIT, 0xD224, 0x7FE0},
- {MISENSOR_16BIT, 0xD226, 0x1B54},
- {MISENSOR_16BIT, 0xD228, 0x8002},
- {MISENSOR_16BIT, 0xD22A, 0x78E0},
- {MISENSOR_16BIT, 0xD22C, 0x71CF},
- {MISENSOR_16BIT, 0xD22E, 0xFFFF},
- {MISENSOR_16BIT, 0xD230, 0xC350},
- {MISENSOR_16BIT, 0xD232, 0xD828},
- {MISENSOR_16BIT, 0xD234, 0xA90B},
- {MISENSOR_16BIT, 0xD236, 0x8100},
- {MISENSOR_16BIT, 0xD238, 0x01C5},
- {MISENSOR_16BIT, 0xD23A, 0x0320},
- {MISENSOR_16BIT, 0xD23C, 0xD900},
- {MISENSOR_16BIT, 0xD23E, 0x78E0},
- {MISENSOR_16BIT, 0xD240, 0x220A},
- {MISENSOR_16BIT, 0xD242, 0x1F80},
- {MISENSOR_16BIT, 0xD244, 0xFFFF},
- {MISENSOR_16BIT, 0xD246, 0xD4E0},
- {MISENSOR_16BIT, 0xD248, 0xC0F1},
- {MISENSOR_16BIT, 0xD24A, 0x0811},
- {MISENSOR_16BIT, 0xD24C, 0x0051},
- {MISENSOR_16BIT, 0xD24E, 0x2240},
- {MISENSOR_16BIT, 0xD250, 0x1200},
- {MISENSOR_16BIT, 0xD252, 0xFFE1},
- {MISENSOR_16BIT, 0xD254, 0xD801},
- {MISENSOR_16BIT, 0xD256, 0xF006},
- {MISENSOR_16BIT, 0xD258, 0x2240},
- {MISENSOR_16BIT, 0xD25A, 0x1900},
- {MISENSOR_16BIT, 0xD25C, 0xFFDE},
- {MISENSOR_16BIT, 0xD25E, 0xD802},
- {MISENSOR_16BIT, 0xD260, 0x1A05},
- {MISENSOR_16BIT, 0xD262, 0x1002},
- {MISENSOR_16BIT, 0xD264, 0xFFF2},
- {MISENSOR_16BIT, 0xD266, 0xF195},
- {MISENSOR_16BIT, 0xD268, 0xC0F1},
- {MISENSOR_16BIT, 0xD26A, 0x0E7E},
- {MISENSOR_16BIT, 0xD26C, 0x05C0},
- {MISENSOR_16BIT, 0xD26E, 0x75CF},
- {MISENSOR_16BIT, 0xD270, 0xFFFF},
- {MISENSOR_16BIT, 0xD272, 0xC84C},
- {MISENSOR_16BIT, 0xD274, 0x9502},
- {MISENSOR_16BIT, 0xD276, 0x77CF},
- {MISENSOR_16BIT, 0xD278, 0xFFFF},
- {MISENSOR_16BIT, 0xD27A, 0xC344},
- {MISENSOR_16BIT, 0xD27C, 0x2044},
- {MISENSOR_16BIT, 0xD27E, 0x008E},
- {MISENSOR_16BIT, 0xD280, 0xB8A1},
- {MISENSOR_16BIT, 0xD282, 0x0926},
- {MISENSOR_16BIT, 0xD284, 0x03E0},
- {MISENSOR_16BIT, 0xD286, 0xB502},
- {MISENSOR_16BIT, 0xD288, 0x9502},
- {MISENSOR_16BIT, 0xD28A, 0x952E},
- {MISENSOR_16BIT, 0xD28C, 0x7E05},
- {MISENSOR_16BIT, 0xD28E, 0xB5C2},
- {MISENSOR_16BIT, 0xD290, 0x70CF},
- {MISENSOR_16BIT, 0xD292, 0xFFFF},
- {MISENSOR_16BIT, 0xD294, 0xC610},
- {MISENSOR_16BIT, 0xD296, 0x099A},
- {MISENSOR_16BIT, 0xD298, 0x04A0},
- {MISENSOR_16BIT, 0xD29A, 0xB026},
- {MISENSOR_16BIT, 0xD29C, 0x0E02},
- {MISENSOR_16BIT, 0xD29E, 0x0560},
- {MISENSOR_16BIT, 0xD2A0, 0xDE00},
- {MISENSOR_16BIT, 0xD2A2, 0x0A12},
- {MISENSOR_16BIT, 0xD2A4, 0x0320},
- {MISENSOR_16BIT, 0xD2A6, 0xB7C4},
- {MISENSOR_16BIT, 0xD2A8, 0x0B36},
- {MISENSOR_16BIT, 0xD2AA, 0x03A0},
- {MISENSOR_16BIT, 0xD2AC, 0x70C9},
- {MISENSOR_16BIT, 0xD2AE, 0x9502},
- {MISENSOR_16BIT, 0xD2B0, 0x7608},
- {MISENSOR_16BIT, 0xD2B2, 0xB8A8},
- {MISENSOR_16BIT, 0xD2B4, 0xB502},
- {MISENSOR_16BIT, 0xD2B6, 0x70CF},
- {MISENSOR_16BIT, 0xD2B8, 0x0000},
- {MISENSOR_16BIT, 0xD2BA, 0x5536},
- {MISENSOR_16BIT, 0xD2BC, 0x7860},
- {MISENSOR_16BIT, 0xD2BE, 0x2686},
- {MISENSOR_16BIT, 0xD2C0, 0x1FFB},
- {MISENSOR_16BIT, 0xD2C2, 0x9502},
- {MISENSOR_16BIT, 0xD2C4, 0x78C5},
- {MISENSOR_16BIT, 0xD2C6, 0x0631},
- {MISENSOR_16BIT, 0xD2C8, 0x05E0},
- {MISENSOR_16BIT, 0xD2CA, 0xB502},
- {MISENSOR_16BIT, 0xD2CC, 0x72CF},
- {MISENSOR_16BIT, 0xD2CE, 0xFFFF},
- {MISENSOR_16BIT, 0xD2D0, 0xC5D4},
- {MISENSOR_16BIT, 0xD2D2, 0x923A},
- {MISENSOR_16BIT, 0xD2D4, 0x73CF},
- {MISENSOR_16BIT, 0xD2D6, 0xFFFF},
- {MISENSOR_16BIT, 0xD2D8, 0xC7D0},
- {MISENSOR_16BIT, 0xD2DA, 0xB020},
- {MISENSOR_16BIT, 0xD2DC, 0x9220},
- {MISENSOR_16BIT, 0xD2DE, 0xB021},
- {MISENSOR_16BIT, 0xD2E0, 0x9221},
- {MISENSOR_16BIT, 0xD2E2, 0xB022},
- {MISENSOR_16BIT, 0xD2E4, 0x9222},
- {MISENSOR_16BIT, 0xD2E6, 0xB023},
- {MISENSOR_16BIT, 0xD2E8, 0x9223},
- {MISENSOR_16BIT, 0xD2EA, 0xB024},
- {MISENSOR_16BIT, 0xD2EC, 0x9227},
- {MISENSOR_16BIT, 0xD2EE, 0xB025},
- {MISENSOR_16BIT, 0xD2F0, 0x9228},
- {MISENSOR_16BIT, 0xD2F2, 0xB026},
- {MISENSOR_16BIT, 0xD2F4, 0x922B},
- {MISENSOR_16BIT, 0xD2F6, 0xB027},
- {MISENSOR_16BIT, 0xD2F8, 0x922C},
- {MISENSOR_16BIT, 0xD2FA, 0xB028},
- {MISENSOR_16BIT, 0xD2FC, 0x1258},
- {MISENSOR_16BIT, 0xD2FE, 0x0101},
- {MISENSOR_16BIT, 0xD300, 0xB029},
- {MISENSOR_16BIT, 0xD302, 0x125A},
- {MISENSOR_16BIT, 0xD304, 0x0101},
- {MISENSOR_16BIT, 0xD306, 0xB02A},
- {MISENSOR_16BIT, 0xD308, 0x1336},
- {MISENSOR_16BIT, 0xD30A, 0x8081},
- {MISENSOR_16BIT, 0xD30C, 0xA836},
- {MISENSOR_16BIT, 0xD30E, 0x1337},
- {MISENSOR_16BIT, 0xD310, 0x8081},
- {MISENSOR_16BIT, 0xD312, 0xA837},
- {MISENSOR_16BIT, 0xD314, 0x12A7},
- {MISENSOR_16BIT, 0xD316, 0x0701},
- {MISENSOR_16BIT, 0xD318, 0xB02C},
- {MISENSOR_16BIT, 0xD31A, 0x1354},
- {MISENSOR_16BIT, 0xD31C, 0x8081},
- {MISENSOR_16BIT, 0xD31E, 0x7FE0},
- {MISENSOR_16BIT, 0xD320, 0xA83A},
- {MISENSOR_16BIT, 0xD322, 0x78E0},
- {MISENSOR_16BIT, 0xD324, 0xC0F1},
- {MISENSOR_16BIT, 0xD326, 0x0DC2},
- {MISENSOR_16BIT, 0xD328, 0x05C0},
- {MISENSOR_16BIT, 0xD32A, 0x7608},
- {MISENSOR_16BIT, 0xD32C, 0x09BB},
- {MISENSOR_16BIT, 0xD32E, 0x0010},
- {MISENSOR_16BIT, 0xD330, 0x75CF},
- {MISENSOR_16BIT, 0xD332, 0xFFFF},
- {MISENSOR_16BIT, 0xD334, 0xD4E0},
- {MISENSOR_16BIT, 0xD336, 0x8D21},
- {MISENSOR_16BIT, 0xD338, 0x8D00},
- {MISENSOR_16BIT, 0xD33A, 0x2153},
- {MISENSOR_16BIT, 0xD33C, 0x0003},
- {MISENSOR_16BIT, 0xD33E, 0xB8C0},
- {MISENSOR_16BIT, 0xD340, 0x8D45},
- {MISENSOR_16BIT, 0xD342, 0x0B23},
- {MISENSOR_16BIT, 0xD344, 0x0000},
- {MISENSOR_16BIT, 0xD346, 0xEA8F},
- {MISENSOR_16BIT, 0xD348, 0x0915},
- {MISENSOR_16BIT, 0xD34A, 0x001E},
- {MISENSOR_16BIT, 0xD34C, 0xFF81},
- {MISENSOR_16BIT, 0xD34E, 0xE808},
- {MISENSOR_16BIT, 0xD350, 0x2540},
- {MISENSOR_16BIT, 0xD352, 0x1900},
- {MISENSOR_16BIT, 0xD354, 0xFFDE},
- {MISENSOR_16BIT, 0xD356, 0x8D00},
- {MISENSOR_16BIT, 0xD358, 0xB880},
- {MISENSOR_16BIT, 0xD35A, 0xF004},
- {MISENSOR_16BIT, 0xD35C, 0x8D00},
- {MISENSOR_16BIT, 0xD35E, 0xB8A0},
- {MISENSOR_16BIT, 0xD360, 0xAD00},
- {MISENSOR_16BIT, 0xD362, 0x8D05},
- {MISENSOR_16BIT, 0xD364, 0xE081},
- {MISENSOR_16BIT, 0xD366, 0x20CC},
- {MISENSOR_16BIT, 0xD368, 0x80A2},
- {MISENSOR_16BIT, 0xD36A, 0xDF00},
- {MISENSOR_16BIT, 0xD36C, 0xF40A},
- {MISENSOR_16BIT, 0xD36E, 0x71CF},
- {MISENSOR_16BIT, 0xD370, 0xFFFF},
- {MISENSOR_16BIT, 0xD372, 0xC84C},
- {MISENSOR_16BIT, 0xD374, 0x9102},
- {MISENSOR_16BIT, 0xD376, 0x7708},
- {MISENSOR_16BIT, 0xD378, 0xB8A6},
- {MISENSOR_16BIT, 0xD37A, 0x2786},
- {MISENSOR_16BIT, 0xD37C, 0x1FFE},
- {MISENSOR_16BIT, 0xD37E, 0xB102},
- {MISENSOR_16BIT, 0xD380, 0x0B42},
- {MISENSOR_16BIT, 0xD382, 0x0180},
- {MISENSOR_16BIT, 0xD384, 0x0E3E},
- {MISENSOR_16BIT, 0xD386, 0x0180},
- {MISENSOR_16BIT, 0xD388, 0x0F4A},
- {MISENSOR_16BIT, 0xD38A, 0x0160},
- {MISENSOR_16BIT, 0xD38C, 0x70C9},
- {MISENSOR_16BIT, 0xD38E, 0x8D05},
- {MISENSOR_16BIT, 0xD390, 0xE081},
- {MISENSOR_16BIT, 0xD392, 0x20CC},
- {MISENSOR_16BIT, 0xD394, 0x80A2},
- {MISENSOR_16BIT, 0xD396, 0xF429},
- {MISENSOR_16BIT, 0xD398, 0x76CF},
- {MISENSOR_16BIT, 0xD39A, 0xFFFF},
- {MISENSOR_16BIT, 0xD39C, 0xC84C},
- {MISENSOR_16BIT, 0xD39E, 0x082D},
- {MISENSOR_16BIT, 0xD3A0, 0x0051},
- {MISENSOR_16BIT, 0xD3A2, 0x70CF},
- {MISENSOR_16BIT, 0xD3A4, 0xFFFF},
- {MISENSOR_16BIT, 0xD3A6, 0xC90C},
- {MISENSOR_16BIT, 0xD3A8, 0x8805},
- {MISENSOR_16BIT, 0xD3AA, 0x09B6},
- {MISENSOR_16BIT, 0xD3AC, 0x0360},
- {MISENSOR_16BIT, 0xD3AE, 0xD908},
- {MISENSOR_16BIT, 0xD3B0, 0x2099},
- {MISENSOR_16BIT, 0xD3B2, 0x0802},
- {MISENSOR_16BIT, 0xD3B4, 0x9634},
- {MISENSOR_16BIT, 0xD3B6, 0xB503},
- {MISENSOR_16BIT, 0xD3B8, 0x7902},
- {MISENSOR_16BIT, 0xD3BA, 0x1523},
- {MISENSOR_16BIT, 0xD3BC, 0x1080},
- {MISENSOR_16BIT, 0xD3BE, 0xB634},
- {MISENSOR_16BIT, 0xD3C0, 0xE001},
- {MISENSOR_16BIT, 0xD3C2, 0x1D23},
- {MISENSOR_16BIT, 0xD3C4, 0x1002},
- {MISENSOR_16BIT, 0xD3C6, 0xF00B},
- {MISENSOR_16BIT, 0xD3C8, 0x9634},
- {MISENSOR_16BIT, 0xD3CA, 0x9503},
- {MISENSOR_16BIT, 0xD3CC, 0x6038},
- {MISENSOR_16BIT, 0xD3CE, 0xB614},
- {MISENSOR_16BIT, 0xD3D0, 0x153F},
- {MISENSOR_16BIT, 0xD3D2, 0x1080},
- {MISENSOR_16BIT, 0xD3D4, 0xE001},
- {MISENSOR_16BIT, 0xD3D6, 0x1D3F},
- {MISENSOR_16BIT, 0xD3D8, 0x1002},
- {MISENSOR_16BIT, 0xD3DA, 0xFFA4},
- {MISENSOR_16BIT, 0xD3DC, 0x9602},
- {MISENSOR_16BIT, 0xD3DE, 0x7F05},
- {MISENSOR_16BIT, 0xD3E0, 0xD800},
- {MISENSOR_16BIT, 0xD3E2, 0xB6E2},
- {MISENSOR_16BIT, 0xD3E4, 0xAD05},
- {MISENSOR_16BIT, 0xD3E6, 0x0511},
- {MISENSOR_16BIT, 0xD3E8, 0x05E0},
- {MISENSOR_16BIT, 0xD3EA, 0xD800},
- {MISENSOR_16BIT, 0xD3EC, 0xC0F1},
- {MISENSOR_16BIT, 0xD3EE, 0x0CFE},
- {MISENSOR_16BIT, 0xD3F0, 0x05C0},
- {MISENSOR_16BIT, 0xD3F2, 0x0A96},
- {MISENSOR_16BIT, 0xD3F4, 0x05A0},
- {MISENSOR_16BIT, 0xD3F6, 0x7608},
- {MISENSOR_16BIT, 0xD3F8, 0x0C22},
- {MISENSOR_16BIT, 0xD3FA, 0x0240},
- {MISENSOR_16BIT, 0xD3FC, 0xE080},
- {MISENSOR_16BIT, 0xD3FE, 0x20CA},
- {MISENSOR_16BIT, 0xD400, 0x0F82},
- {MISENSOR_16BIT, 0xD402, 0x0000},
- {MISENSOR_16BIT, 0xD404, 0x190B},
- {MISENSOR_16BIT, 0xD406, 0x0C60},
- {MISENSOR_16BIT, 0xD408, 0x05A2},
- {MISENSOR_16BIT, 0xD40A, 0x21CA},
- {MISENSOR_16BIT, 0xD40C, 0x0022},
- {MISENSOR_16BIT, 0xD40E, 0x0C56},
- {MISENSOR_16BIT, 0xD410, 0x0240},
- {MISENSOR_16BIT, 0xD412, 0xE806},
- {MISENSOR_16BIT, 0xD414, 0x0E0E},
- {MISENSOR_16BIT, 0xD416, 0x0220},
- {MISENSOR_16BIT, 0xD418, 0x70C9},
- {MISENSOR_16BIT, 0xD41A, 0xF048},
- {MISENSOR_16BIT, 0xD41C, 0x0896},
- {MISENSOR_16BIT, 0xD41E, 0x0440},
- {MISENSOR_16BIT, 0xD420, 0x0E96},
- {MISENSOR_16BIT, 0xD422, 0x0400},
- {MISENSOR_16BIT, 0xD424, 0x0966},
- {MISENSOR_16BIT, 0xD426, 0x0380},
- {MISENSOR_16BIT, 0xD428, 0x75CF},
- {MISENSOR_16BIT, 0xD42A, 0xFFFF},
- {MISENSOR_16BIT, 0xD42C, 0xD4E0},
- {MISENSOR_16BIT, 0xD42E, 0x8D00},
- {MISENSOR_16BIT, 0xD430, 0x084D},
- {MISENSOR_16BIT, 0xD432, 0x001E},
- {MISENSOR_16BIT, 0xD434, 0xFF47},
- {MISENSOR_16BIT, 0xD436, 0x080D},
- {MISENSOR_16BIT, 0xD438, 0x0050},
- {MISENSOR_16BIT, 0xD43A, 0xFF57},
- {MISENSOR_16BIT, 0xD43C, 0x0841},
- {MISENSOR_16BIT, 0xD43E, 0x0051},
- {MISENSOR_16BIT, 0xD440, 0x8D04},
- {MISENSOR_16BIT, 0xD442, 0x9521},
- {MISENSOR_16BIT, 0xD444, 0xE064},
- {MISENSOR_16BIT, 0xD446, 0x790C},
- {MISENSOR_16BIT, 0xD448, 0x702F},
- {MISENSOR_16BIT, 0xD44A, 0x0CE2},
- {MISENSOR_16BIT, 0xD44C, 0x05E0},
- {MISENSOR_16BIT, 0xD44E, 0xD964},
- {MISENSOR_16BIT, 0xD450, 0x72CF},
- {MISENSOR_16BIT, 0xD452, 0xFFFF},
- {MISENSOR_16BIT, 0xD454, 0xC700},
- {MISENSOR_16BIT, 0xD456, 0x9235},
- {MISENSOR_16BIT, 0xD458, 0x0811},
- {MISENSOR_16BIT, 0xD45A, 0x0043},
- {MISENSOR_16BIT, 0xD45C, 0xFF3D},
- {MISENSOR_16BIT, 0xD45E, 0x080D},
- {MISENSOR_16BIT, 0xD460, 0x0051},
- {MISENSOR_16BIT, 0xD462, 0xD801},
- {MISENSOR_16BIT, 0xD464, 0xFF77},
- {MISENSOR_16BIT, 0xD466, 0xF025},
- {MISENSOR_16BIT, 0xD468, 0x9501},
- {MISENSOR_16BIT, 0xD46A, 0x9235},
- {MISENSOR_16BIT, 0xD46C, 0x0911},
- {MISENSOR_16BIT, 0xD46E, 0x0003},
- {MISENSOR_16BIT, 0xD470, 0xFF49},
- {MISENSOR_16BIT, 0xD472, 0x080D},
- {MISENSOR_16BIT, 0xD474, 0x0051},
- {MISENSOR_16BIT, 0xD476, 0xD800},
- {MISENSOR_16BIT, 0xD478, 0xFF72},
- {MISENSOR_16BIT, 0xD47A, 0xF01B},
- {MISENSOR_16BIT, 0xD47C, 0x0886},
- {MISENSOR_16BIT, 0xD47E, 0x03E0},
- {MISENSOR_16BIT, 0xD480, 0xD801},
- {MISENSOR_16BIT, 0xD482, 0x0EF6},
- {MISENSOR_16BIT, 0xD484, 0x03C0},
- {MISENSOR_16BIT, 0xD486, 0x0F52},
- {MISENSOR_16BIT, 0xD488, 0x0340},
- {MISENSOR_16BIT, 0xD48A, 0x0DBA},
- {MISENSOR_16BIT, 0xD48C, 0x0200},
- {MISENSOR_16BIT, 0xD48E, 0x0AF6},
- {MISENSOR_16BIT, 0xD490, 0x0440},
- {MISENSOR_16BIT, 0xD492, 0x0C22},
- {MISENSOR_16BIT, 0xD494, 0x0400},
- {MISENSOR_16BIT, 0xD496, 0x0D72},
- {MISENSOR_16BIT, 0xD498, 0x0440},
- {MISENSOR_16BIT, 0xD49A, 0x0DC2},
- {MISENSOR_16BIT, 0xD49C, 0x0200},
- {MISENSOR_16BIT, 0xD49E, 0x0972},
- {MISENSOR_16BIT, 0xD4A0, 0x0440},
- {MISENSOR_16BIT, 0xD4A2, 0x0D3A},
- {MISENSOR_16BIT, 0xD4A4, 0x0220},
- {MISENSOR_16BIT, 0xD4A6, 0xD820},
- {MISENSOR_16BIT, 0xD4A8, 0x0BFA},
- {MISENSOR_16BIT, 0xD4AA, 0x0260},
- {MISENSOR_16BIT, 0xD4AC, 0x70C9},
- {MISENSOR_16BIT, 0xD4AE, 0x0451},
- {MISENSOR_16BIT, 0xD4B0, 0x05C0},
- {MISENSOR_16BIT, 0xD4B2, 0x78E0},
- {MISENSOR_16BIT, 0xD4B4, 0xD900},
- {MISENSOR_16BIT, 0xD4B6, 0xF00A},
- {MISENSOR_16BIT, 0xD4B8, 0x70CF},
- {MISENSOR_16BIT, 0xD4BA, 0xFFFF},
- {MISENSOR_16BIT, 0xD4BC, 0xD520},
- {MISENSOR_16BIT, 0xD4BE, 0x7835},
- {MISENSOR_16BIT, 0xD4C0, 0x8041},
- {MISENSOR_16BIT, 0xD4C2, 0x8000},
- {MISENSOR_16BIT, 0xD4C4, 0xE102},
- {MISENSOR_16BIT, 0xD4C6, 0xA040},
- {MISENSOR_16BIT, 0xD4C8, 0x09F1},
- {MISENSOR_16BIT, 0xD4CA, 0x8114},
- {MISENSOR_16BIT, 0xD4CC, 0x71CF},
- {MISENSOR_16BIT, 0xD4CE, 0xFFFF},
- {MISENSOR_16BIT, 0xD4D0, 0xD4E0},
- {MISENSOR_16BIT, 0xD4D2, 0x70CF},
- {MISENSOR_16BIT, 0xD4D4, 0xFFFF},
- {MISENSOR_16BIT, 0xD4D6, 0xC594},
- {MISENSOR_16BIT, 0xD4D8, 0xB03A},
- {MISENSOR_16BIT, 0xD4DA, 0x7FE0},
- {MISENSOR_16BIT, 0xD4DC, 0xD800},
- {MISENSOR_16BIT, 0xD4DE, 0x0000},
- {MISENSOR_16BIT, 0xD4E0, 0x0000},
- {MISENSOR_16BIT, 0xD4E2, 0x0500},
- {MISENSOR_16BIT, 0xD4E4, 0x0500},
- {MISENSOR_16BIT, 0xD4E6, 0x0200},
- {MISENSOR_16BIT, 0xD4E8, 0x0330},
- {MISENSOR_16BIT, 0xD4EA, 0x0000},
- {MISENSOR_16BIT, 0xD4EC, 0x0000},
- {MISENSOR_16BIT, 0xD4EE, 0x03CD},
- {MISENSOR_16BIT, 0xD4F0, 0x050D},
- {MISENSOR_16BIT, 0xD4F2, 0x01C5},
- {MISENSOR_16BIT, 0xD4F4, 0x03B3},
- {MISENSOR_16BIT, 0xD4F6, 0x00E0},
- {MISENSOR_16BIT, 0xD4F8, 0x01E3},
- {MISENSOR_16BIT, 0xD4FA, 0x0280},
- {MISENSOR_16BIT, 0xD4FC, 0x01E0},
- {MISENSOR_16BIT, 0xD4FE, 0x0109},
- {MISENSOR_16BIT, 0xD500, 0x0080},
- {MISENSOR_16BIT, 0xD502, 0x0500},
- {MISENSOR_16BIT, 0xD504, 0x0000},
- {MISENSOR_16BIT, 0xD506, 0x0000},
- {MISENSOR_16BIT, 0xD508, 0x0000},
- {MISENSOR_16BIT, 0xD50A, 0x0000},
- {MISENSOR_16BIT, 0xD50C, 0x0000},
- {MISENSOR_16BIT, 0xD50E, 0x0000},
- {MISENSOR_16BIT, 0xD510, 0x0000},
- {MISENSOR_16BIT, 0xD512, 0x0000},
- {MISENSOR_16BIT, 0xD514, 0x0000},
- {MISENSOR_16BIT, 0xD516, 0x0000},
- {MISENSOR_16BIT, 0xD518, 0x0000},
- {MISENSOR_16BIT, 0xD51A, 0x0000},
- {MISENSOR_16BIT, 0xD51C, 0x0000},
- {MISENSOR_16BIT, 0xD51E, 0x0000},
- {MISENSOR_16BIT, 0xD520, 0xFFFF},
- {MISENSOR_16BIT, 0xD522, 0xC9B4},
- {MISENSOR_16BIT, 0xD524, 0xFFFF},
- {MISENSOR_16BIT, 0xD526, 0xD324},
- {MISENSOR_16BIT, 0xD528, 0xFFFF},
- {MISENSOR_16BIT, 0xD52A, 0xCA34},
- {MISENSOR_16BIT, 0xD52C, 0xFFFF},
- {MISENSOR_16BIT, 0xD52E, 0xD3EC},
- {MISENSOR_16BIT, 0x098E, 0x0000},
- {MISENSOR_16BIT, 0xE000, 0x04B4},
- {MISENSOR_16BIT, 0xE002, 0x0302},
- {MISENSOR_16BIT, 0xE004, 0x4103},
- {MISENSOR_16BIT, 0xE006, 0x0202},
- {MISENSOR_16BIT, 0x0080, 0xFFF0},
- {MISENSOR_16BIT, 0x0080, 0xFFF1},
-
- /* PGA parameter and APGA
- * [Step4-APGA] [TP101_MT9M114_APGA]
- */
- {MISENSOR_16BIT, 0x098E, 0x495E},
- {MISENSOR_16BIT, 0xC95E, 0x0000},
- {MISENSOR_16BIT, 0x3640, 0x02B0},
- {MISENSOR_16BIT, 0x3642, 0x8063},
- {MISENSOR_16BIT, 0x3644, 0x78D0},
- {MISENSOR_16BIT, 0x3646, 0x50CC},
- {MISENSOR_16BIT, 0x3648, 0x3511},
- {MISENSOR_16BIT, 0x364A, 0x0110},
- {MISENSOR_16BIT, 0x364C, 0xBD8A},
- {MISENSOR_16BIT, 0x364E, 0x0CD1},
- {MISENSOR_16BIT, 0x3650, 0x24ED},
- {MISENSOR_16BIT, 0x3652, 0x7C11},
- {MISENSOR_16BIT, 0x3654, 0x0150},
- {MISENSOR_16BIT, 0x3656, 0x124C},
- {MISENSOR_16BIT, 0x3658, 0x3130},
- {MISENSOR_16BIT, 0x365A, 0x508C},
- {MISENSOR_16BIT, 0x365C, 0x21F1},
- {MISENSOR_16BIT, 0x365E, 0x0090},
- {MISENSOR_16BIT, 0x3660, 0xBFCA},
- {MISENSOR_16BIT, 0x3662, 0x0A11},
- {MISENSOR_16BIT, 0x3664, 0x4F4B},
- {MISENSOR_16BIT, 0x3666, 0x28B1},
- {MISENSOR_16BIT, 0x3680, 0x50A9},
- {MISENSOR_16BIT, 0x3682, 0xA04B},
- {MISENSOR_16BIT, 0x3684, 0x0E2D},
- {MISENSOR_16BIT, 0x3686, 0x73EC},
- {MISENSOR_16BIT, 0x3688, 0x164F},
- {MISENSOR_16BIT, 0x368A, 0xF829},
- {MISENSOR_16BIT, 0x368C, 0xC1A8},
- {MISENSOR_16BIT, 0x368E, 0xB0EC},
- {MISENSOR_16BIT, 0x3690, 0xE76A},
- {MISENSOR_16BIT, 0x3692, 0x69AF},
- {MISENSOR_16BIT, 0x3694, 0x378C},
- {MISENSOR_16BIT, 0x3696, 0xA70D},
- {MISENSOR_16BIT, 0x3698, 0x884F},
- {MISENSOR_16BIT, 0x369A, 0xEE8B},
- {MISENSOR_16BIT, 0x369C, 0x5DEF},
- {MISENSOR_16BIT, 0x369E, 0x27CC},
- {MISENSOR_16BIT, 0x36A0, 0xCAAC},
- {MISENSOR_16BIT, 0x36A2, 0x840E},
- {MISENSOR_16BIT, 0x36A4, 0xDAA9},
- {MISENSOR_16BIT, 0x36A6, 0xF00C},
- {MISENSOR_16BIT, 0x36C0, 0x1371},
- {MISENSOR_16BIT, 0x36C2, 0x272F},
- {MISENSOR_16BIT, 0x36C4, 0x2293},
- {MISENSOR_16BIT, 0x36C6, 0xE6D0},
- {MISENSOR_16BIT, 0x36C8, 0xEC32},
- {MISENSOR_16BIT, 0x36CA, 0x11B1},
- {MISENSOR_16BIT, 0x36CC, 0x7BAF},
- {MISENSOR_16BIT, 0x36CE, 0x5813},
- {MISENSOR_16BIT, 0x36D0, 0xB871},
- {MISENSOR_16BIT, 0x36D2, 0x8913},
- {MISENSOR_16BIT, 0x36D4, 0x4610},
- {MISENSOR_16BIT, 0x36D6, 0x7EEE},
- {MISENSOR_16BIT, 0x36D8, 0x0DF3},
- {MISENSOR_16BIT, 0x36DA, 0xB84F},
- {MISENSOR_16BIT, 0x36DC, 0xB532},
- {MISENSOR_16BIT, 0x36DE, 0x1171},
- {MISENSOR_16BIT, 0x36E0, 0x13CF},
- {MISENSOR_16BIT, 0x36E2, 0x22F3},
- {MISENSOR_16BIT, 0x36E4, 0xE090},
- {MISENSOR_16BIT, 0x36E6, 0x8133},
- {MISENSOR_16BIT, 0x3700, 0x88AE},
- {MISENSOR_16BIT, 0x3702, 0x00EA},
- {MISENSOR_16BIT, 0x3704, 0x344F},
- {MISENSOR_16BIT, 0x3706, 0xEC88},
- {MISENSOR_16BIT, 0x3708, 0x3E91},
- {MISENSOR_16BIT, 0x370A, 0xF12D},
- {MISENSOR_16BIT, 0x370C, 0xB0EF},
- {MISENSOR_16BIT, 0x370E, 0x77CD},
- {MISENSOR_16BIT, 0x3710, 0x7930},
- {MISENSOR_16BIT, 0x3712, 0x5C12},
- {MISENSOR_16BIT, 0x3714, 0x500C},
- {MISENSOR_16BIT, 0x3716, 0x22CE},
- {MISENSOR_16BIT, 0x3718, 0x2370},
- {MISENSOR_16BIT, 0x371A, 0x258F},
- {MISENSOR_16BIT, 0x371C, 0x3D30},
- {MISENSOR_16BIT, 0x371E, 0x370C},
- {MISENSOR_16BIT, 0x3720, 0x03ED},
- {MISENSOR_16BIT, 0x3722, 0x9AD0},
- {MISENSOR_16BIT, 0x3724, 0x7ECF},
- {MISENSOR_16BIT, 0x3726, 0x1093},
- {MISENSOR_16BIT, 0x3740, 0x2391},
- {MISENSOR_16BIT, 0x3742, 0xAAD0},
- {MISENSOR_16BIT, 0x3744, 0x28F2},
- {MISENSOR_16BIT, 0x3746, 0xBA4F},
- {MISENSOR_16BIT, 0x3748, 0xC536},
- {MISENSOR_16BIT, 0x374A, 0x1472},
- {MISENSOR_16BIT, 0x374C, 0xD110},
- {MISENSOR_16BIT, 0x374E, 0x2933},
- {MISENSOR_16BIT, 0x3750, 0xD0D1},
- {MISENSOR_16BIT, 0x3752, 0x9F37},
- {MISENSOR_16BIT, 0x3754, 0x34D1},
- {MISENSOR_16BIT, 0x3756, 0x1C6C},
- {MISENSOR_16BIT, 0x3758, 0x3FD2},
- {MISENSOR_16BIT, 0x375A, 0xCB72},
- {MISENSOR_16BIT, 0x375C, 0xBA96},
- {MISENSOR_16BIT, 0x375E, 0x1551},
- {MISENSOR_16BIT, 0x3760, 0xB74F},
- {MISENSOR_16BIT, 0x3762, 0x1672},
- {MISENSOR_16BIT, 0x3764, 0x84F1},
- {MISENSOR_16BIT, 0x3766, 0xC2D6},
- {MISENSOR_16BIT, 0x3782, 0x01E0},
- {MISENSOR_16BIT, 0x3784, 0x0280},
- {MISENSOR_16BIT, 0x37C0, 0xA6EA},
- {MISENSOR_16BIT, 0x37C2, 0x874B},
- {MISENSOR_16BIT, 0x37C4, 0x85CB},
- {MISENSOR_16BIT, 0x37C6, 0x968A},
- {MISENSOR_16BIT, 0x098E, 0x0000},
- {MISENSOR_16BIT, 0xC960, 0x0AF0},
- {MISENSOR_16BIT, 0xC962, 0x79E2},
- {MISENSOR_16BIT, 0xC964, 0x5EC8},
- {MISENSOR_16BIT, 0xC966, 0x791F},
- {MISENSOR_16BIT, 0xC968, 0x76EE},
- {MISENSOR_16BIT, 0xC96A, 0x0FA0},
- {MISENSOR_16BIT, 0xC96C, 0x7DFA},
- {MISENSOR_16BIT, 0xC96E, 0x7DAF},
- {MISENSOR_16BIT, 0xC970, 0x7E02},
- {MISENSOR_16BIT, 0xC972, 0x7E0A},
- {MISENSOR_16BIT, 0xC974, 0x1964},
- {MISENSOR_16BIT, 0xC976, 0x7CDC},
- {MISENSOR_16BIT, 0xC978, 0x7838},
- {MISENSOR_16BIT, 0xC97A, 0x7C2F},
- {MISENSOR_16BIT, 0xC97C, 0x7792},
- {MISENSOR_16BIT, 0xC95E, 0x0003},
-
- /* [Step4-APGA] */
- {MISENSOR_16BIT, 0x098E, 0x0000},
- {MISENSOR_16BIT, 0xC95E, 0x0003},
-
- /* [Step5-AWB_CCM]1: LOAD=CCM */
- {MISENSOR_16BIT, 0xC892, 0x0267},
- {MISENSOR_16BIT, 0xC894, 0xFF1A},
- {MISENSOR_16BIT, 0xC896, 0xFFB3},
- {MISENSOR_16BIT, 0xC898, 0xFF80},
- {MISENSOR_16BIT, 0xC89A, 0x0166},
- {MISENSOR_16BIT, 0xC89C, 0x0003},
- {MISENSOR_16BIT, 0xC89E, 0xFF9A},
- {MISENSOR_16BIT, 0xC8A0, 0xFEB4},
- {MISENSOR_16BIT, 0xC8A2, 0x024D},
- {MISENSOR_16BIT, 0xC8A4, 0x01BF},
- {MISENSOR_16BIT, 0xC8A6, 0xFF01},
- {MISENSOR_16BIT, 0xC8A8, 0xFFF3},
- {MISENSOR_16BIT, 0xC8AA, 0xFF75},
- {MISENSOR_16BIT, 0xC8AC, 0x0198},
- {MISENSOR_16BIT, 0xC8AE, 0xFFFD},
- {MISENSOR_16BIT, 0xC8B0, 0xFF9A},
- {MISENSOR_16BIT, 0xC8B2, 0xFEE7},
- {MISENSOR_16BIT, 0xC8B4, 0x02A8},
- {MISENSOR_16BIT, 0xC8B6, 0x01D9},
- {MISENSOR_16BIT, 0xC8B8, 0xFF26},
- {MISENSOR_16BIT, 0xC8BA, 0xFFF3},
- {MISENSOR_16BIT, 0xC8BC, 0xFFB3},
- {MISENSOR_16BIT, 0xC8BE, 0x0132},
- {MISENSOR_16BIT, 0xC8C0, 0xFFE8},
- {MISENSOR_16BIT, 0xC8C2, 0xFFDA},
- {MISENSOR_16BIT, 0xC8C4, 0xFECD},
- {MISENSOR_16BIT, 0xC8C6, 0x02C2},
- {MISENSOR_16BIT, 0xC8C8, 0x0075},
- {MISENSOR_16BIT, 0xC8CA, 0x011C},
- {MISENSOR_16BIT, 0xC8CC, 0x009A},
- {MISENSOR_16BIT, 0xC8CE, 0x0105},
- {MISENSOR_16BIT, 0xC8D0, 0x00A4},
- {MISENSOR_16BIT, 0xC8D2, 0x00AC},
- {MISENSOR_16BIT, 0xC8D4, 0x0A8C},
- {MISENSOR_16BIT, 0xC8D6, 0x0F0A},
- {MISENSOR_16BIT, 0xC8D8, 0x1964},
-
- /* LOAD=AWB */
- {MISENSOR_16BIT, 0xC914, 0x0000},
- {MISENSOR_16BIT, 0xC916, 0x0000},
- {MISENSOR_16BIT, 0xC918, 0x04FF},
- {MISENSOR_16BIT, 0xC91A, 0x02CF},
- {MISENSOR_16BIT, 0xC904, 0x0033},
- {MISENSOR_16BIT, 0xC906, 0x0040},
- {MISENSOR_8BIT, 0xC8F2, 0x03},
- {MISENSOR_8BIT, 0xC8F3, 0x02},
- {MISENSOR_16BIT, 0xC906, 0x003C},
- {MISENSOR_16BIT, 0xC8F4, 0x0000},
- {MISENSOR_16BIT, 0xC8F6, 0x0000},
- {MISENSOR_16BIT, 0xC8F8, 0x0000},
- {MISENSOR_16BIT, 0xC8FA, 0xE724},
- {MISENSOR_16BIT, 0xC8FC, 0x1583},
- {MISENSOR_16BIT, 0xC8FE, 0x2045},
- {MISENSOR_16BIT, 0xC900, 0x05DC},
- {MISENSOR_16BIT, 0xC902, 0x007C},
- {MISENSOR_8BIT, 0xC90C, 0x80},
- {MISENSOR_8BIT, 0xC90D, 0x80},
- {MISENSOR_8BIT, 0xC90E, 0x80},
- {MISENSOR_8BIT, 0xC90F, 0x88},
- {MISENSOR_8BIT, 0xC910, 0x80},
- {MISENSOR_8BIT, 0xC911, 0x80},
-
- /* LOAD=Step7-CPIPE_Preference */
- {MISENSOR_16BIT, 0xC926, 0x0020},
- {MISENSOR_16BIT, 0xC928, 0x009A},
- {MISENSOR_16BIT, 0xC946, 0x0070},
- {MISENSOR_16BIT, 0xC948, 0x00F3},
- {MISENSOR_16BIT, 0xC952, 0x0020},
- {MISENSOR_16BIT, 0xC954, 0x009A},
- {MISENSOR_8BIT, 0xC92A, 0x80},
- {MISENSOR_8BIT, 0xC92B, 0x4B},
- {MISENSOR_8BIT, 0xC92C, 0x00},
- {MISENSOR_8BIT, 0xC92D, 0xFF},
- {MISENSOR_8BIT, 0xC92E, 0x3C},
- {MISENSOR_8BIT, 0xC92F, 0x02},
- {MISENSOR_8BIT, 0xC930, 0x06},
- {MISENSOR_8BIT, 0xC931, 0x64},
- {MISENSOR_8BIT, 0xC932, 0x01},
- {MISENSOR_8BIT, 0xC933, 0x0C},
- {MISENSOR_8BIT, 0xC934, 0x3C},
- {MISENSOR_8BIT, 0xC935, 0x3C},
- {MISENSOR_8BIT, 0xC936, 0x3C},
- {MISENSOR_8BIT, 0xC937, 0x0F},
- {MISENSOR_8BIT, 0xC938, 0x64},
- {MISENSOR_8BIT, 0xC939, 0x64},
- {MISENSOR_8BIT, 0xC93A, 0x64},
- {MISENSOR_8BIT, 0xC93B, 0x32},
- {MISENSOR_16BIT, 0xC93C, 0x0020},
- {MISENSOR_16BIT, 0xC93E, 0x009A},
- {MISENSOR_16BIT, 0xC940, 0x00DC},
- {MISENSOR_8BIT, 0xC942, 0x38},
- {MISENSOR_8BIT, 0xC943, 0x30},
- {MISENSOR_8BIT, 0xC944, 0x50},
- {MISENSOR_8BIT, 0xC945, 0x19},
- {MISENSOR_16BIT, 0xC94A, 0x0230},
- {MISENSOR_16BIT, 0xC94C, 0x0010},
- {MISENSOR_16BIT, 0xC94E, 0x01CD},
- {MISENSOR_8BIT, 0xC950, 0x05},
- {MISENSOR_8BIT, 0xC951, 0x40},
- {MISENSOR_8BIT, 0xC87B, 0x1B},
- {MISENSOR_8BIT, 0xC878, 0x0E},
- {MISENSOR_16BIT, 0xC890, 0x0080},
- {MISENSOR_16BIT, 0xC886, 0x0100},
- {MISENSOR_16BIT, 0xC87C, 0x005A},
- {MISENSOR_8BIT, 0xB42A, 0x05},
- {MISENSOR_8BIT, 0xA80A, 0x20},
-
- /* Speed up AE/AWB */
- {MISENSOR_16BIT, 0x098E, 0x2802},
- {MISENSOR_16BIT, 0xA802, 0x0008},
- {MISENSOR_8BIT, 0xC908, 0x01},
- {MISENSOR_8BIT, 0xC879, 0x01},
- {MISENSOR_8BIT, 0xC909, 0x02},
- {MISENSOR_8BIT, 0xA80A, 0x18},
- {MISENSOR_8BIT, 0xA80B, 0x18},
- {MISENSOR_8BIT, 0xAC16, 0x18},
- {MISENSOR_8BIT, 0xC878, 0x0E},
-
- {MISENSOR_TOK_TERM, 0, 0}
-};
-
-#endif
-#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index 02ccf80e6559..3a4eb4f6d3be 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -87,7 +87,7 @@ static unsigned short atomisp_get_sensor_fps(struct atomisp_sub_device *asd)
unsigned short fps = 0;
int ret;
- ret = v4l2_subdev_call_state_active(isp->inputs[asd->input_curr].camera,
+ ret = v4l2_subdev_call_state_active(isp->inputs[asd->input_curr].sensor,
pad, get_frame_interval, &fi);
if (!ret && fi.interval.numerator)
@@ -881,7 +881,8 @@ void atomisp_assert_recovery_work(struct work_struct *work)
spin_unlock_irqrestore(&isp->lock, flags);
/* stream off sensor */
- ret = v4l2_subdev_call(isp->inputs[isp->asd.input_curr].camera, video, s_stream, 0);
+ ret = v4l2_subdev_call(isp->inputs[isp->asd.input_curr].csi_remote_source,
+ video, s_stream, 0);
if (ret)
dev_warn(isp->dev, "Stopping sensor stream failed: %d\n", ret);
@@ -936,7 +937,8 @@ void atomisp_assert_recovery_work(struct work_struct *work)
/* Requeue unprocessed per-frame parameters. */
atomisp_recover_params_queue(&isp->asd.video_out);
- ret = v4l2_subdev_call(isp->inputs[isp->asd.input_curr].camera, video, s_stream, 1);
+ ret = v4l2_subdev_call(isp->inputs[isp->asd.input_curr].csi_remote_source,
+ video, s_stream, 1);
if (ret)
dev_err(isp->dev, "Starting sensor stream failed: %d\n", ret);
@@ -3124,7 +3126,7 @@ int atomisp_color_effect(struct atomisp_sub_device *asd, int flag,
control.id = V4L2_CID_COLORFX;
control.value = *effect;
ret =
- v4l2_s_ctrl(NULL, isp->inputs[asd->input_curr].camera->ctrl_handler,
+ v4l2_s_ctrl(NULL, isp->inputs[asd->input_curr].sensor->ctrl_handler,
&control);
/*
* if set color effect to sensor successfully, return
@@ -3620,16 +3622,16 @@ int atomisp_s_sensor_power(struct atomisp_device *isp, unsigned int input, bool
{
int ret;
- if (isp->inputs[input].camera_on == on)
+ if (isp->inputs[input].sensor_on == on)
return 0;
- ret = v4l2_subdev_call(isp->inputs[input].camera, core, s_power, on);
+ ret = v4l2_subdev_call(isp->inputs[input].sensor, core, s_power, on);
if (ret && ret != -ENOIOCTLCMD) {
dev_err(isp->dev, "Error setting sensor power %d: %d\n", on, ret);
return ret;
}
- isp->inputs[input].camera_on = on;
+ isp->inputs[input].sensor_on = on;
return 0;
}
@@ -3677,7 +3679,7 @@ void atomisp_setup_input_links(struct atomisp_device *isp)
* will end up calling atomisp_link_setup() which calls this
* function again leading to endless recursion.
*/
- if (isp->sensor_subdevs[i] == isp->inputs[isp->asd.input_curr].camera)
+ if (isp->sensor_subdevs[i] == isp->inputs[isp->asd.input_curr].csi_remote_source)
link->flags |= MEDIA_LNK_FL_ENABLED;
else
link->flags &= ~MEDIA_LNK_FL_ENABLED;
@@ -3704,7 +3706,7 @@ static int atomisp_set_sensor_crop_and_fmt(struct atomisp_device *isp,
struct v4l2_subdev_state *sd_state;
int ret = 0;
- if (!input->camera)
+ if (!input->sensor)
return -EINVAL;
/*
@@ -3719,7 +3721,7 @@ static int atomisp_set_sensor_crop_and_fmt(struct atomisp_device *isp,
}
sd_state = (which == V4L2_SUBDEV_FORMAT_TRY) ? input->try_sd_state :
- input->camera->active_state;
+ input->sensor->active_state;
if (sd_state)
v4l2_subdev_lock_state(sd_state);
@@ -3740,20 +3742,44 @@ static int atomisp_set_sensor_crop_and_fmt(struct atomisp_device *isp,
sel.r.left = ((input->native_rect.width - sel.r.width) / 2) & ~1;
sel.r.top = ((input->native_rect.height - sel.r.height) / 2) & ~1;
- ret = v4l2_subdev_call(input->camera, pad, set_selection, sd_state, &sel);
+ ret = v4l2_subdev_call(input->sensor, pad, set_selection, sd_state, &sel);
if (ret)
- dev_err(isp->dev, "Error setting crop to %ux%u @%ux%u: %d\n",
- sel.r.width, sel.r.height, sel.r.left, sel.r.top, ret);
+ dev_err(isp->dev, "Error setting crop to (%d,%d)/%ux%u: %d\n",
+ sel.r.left, sel.r.top, sel.r.width, sel.r.height, ret);
set_fmt:
- if (ret == 0)
- ret = v4l2_subdev_call(input->camera, pad, set_fmt, sd_state, &format);
+ if (ret == 0) {
+ ret = v4l2_subdev_call(input->sensor, pad, set_fmt, sd_state, &format);
+ dev_dbg(isp->dev, "Set sensor format ret: %d size %dx%d\n",
+ ret, format.format.width, format.format.height);
+ }
if (sd_state)
v4l2_subdev_unlock_state(sd_state);
+ /* Propagate new fmt to sensor ISP */
+ if (ret == 0 && which == V4L2_SUBDEV_FORMAT_ACTIVE && input->sensor_isp) {
+ sd_state = v4l2_subdev_lock_and_get_active_state(input->sensor_isp);
+
+ format.pad = SENSOR_ISP_PAD_SINK;
+ ret = v4l2_subdev_call(input->sensor_isp, pad, set_fmt, sd_state, &format);
+ dev_dbg(isp->dev, "Set sensor ISP sink format ret: %d size %dx%d\n",
+ ret, format.format.width, format.format.height);
+
+ if (ret == 0) {
+ format.pad = SENSOR_ISP_PAD_SOURCE;
+ ret = v4l2_subdev_call(input->sensor_isp, pad, set_fmt, sd_state, &format);
+ dev_dbg(isp->dev, "Set sensor ISP source format ret: %d size %dx%d\n",
+ ret, format.format.width, format.format.height);
+ }
+
+ if (sd_state)
+ v4l2_subdev_unlock_state(sd_state);
+ }
+
/* Propagate new fmt to CSI port */
- if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ if (ret == 0 && which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ format.pad = CSI2_PAD_SINK;
ret = v4l2_subdev_call(input->csi_port, pad, set_fmt, NULL, &format);
if (ret)
return ret;
@@ -3784,9 +3810,14 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
return -EINVAL;
}
- /* The preview pipeline does not support width > 1920 */
- if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW)
- f->width = min_t(u32, f->width, 1920);
+ /*
+ * The preview pipeline does not support width > 1920. Also limit height
+ * to avoid sensor drivers still picking a too wide resolution.
+ */
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
+ f->width = min(f->width, 1920U);
+ f->height = min(f->height, 1440U);
+ }
/*
* atomisp_set_fmt() will set the sensor resolution to the requested
@@ -3875,7 +3906,7 @@ static inline int atomisp_set_sensor_mipi_to_isp(
u32 mipi_port, metadata_width = 0, metadata_height = 0;
ctrl.id = V4L2_CID_LINK_FREQ;
- if (v4l2_g_ctrl(input->camera->ctrl_handler, &ctrl) == 0)
+ if (v4l2_g_ctrl(input->sensor->ctrl_handler, &ctrl) == 0)
mipi_freq = ctrl.value;
if (asd->stream_env[stream_id].isys_configs == 1) {
@@ -4038,7 +4069,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
if (!format)
return -EINVAL;
- mipi_info = atomisp_to_sensor_mipi_info(input->camera);
+ mipi_info = atomisp_to_sensor_mipi_info(input->sensor);
if (atomisp_set_sensor_mipi_to_isp(asd, ATOMISP_INPUT_STREAM_GENERAL,
mipi_info))
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
index 2eb44bccff0e..bc97fa2c374c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
@@ -1845,7 +1845,7 @@ static enum ia_css_pipe_mode __pipe_id_to_pipe_mode(
{
struct atomisp_device *isp = asd->isp;
struct camera_mipi_info *mipi_info = atomisp_to_sensor_mipi_info(
- isp->inputs[asd->input_curr].camera);
+ isp->inputs[asd->input_curr].sensor);
switch (pipe_id) {
case IA_CSS_PIPE_ID_COPY:
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h b/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h
deleted file mode 100644
index 23d798f3085c..000000000000
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h
+++ /dev/null
@@ -1,244 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- */
-#ifndef __ATOMISP_COMPAT_IOCTL32_H__
-#define __ATOMISP_COMPAT_IOCTL32_H__
-
-#include <linux/compat.h>
-#include <linux/videodev2.h>
-
-#include "atomisp_compat.h"
-
-struct atomisp_histogram32 {
- unsigned int num_elements;
- compat_uptr_t data;
-};
-
-struct atomisp_dvs2_stat_types32 {
- compat_uptr_t odd_real; /** real part of the odd statistics*/
- compat_uptr_t odd_imag; /** imaginary part of the odd statistics*/
- compat_uptr_t even_real;/** real part of the even statistics*/
- compat_uptr_t even_imag;/** imaginary part of the even statistics*/
-};
-
-struct atomisp_dvs2_coef_types32 {
- compat_uptr_t odd_real; /** real part of the odd coefficients*/
- compat_uptr_t odd_imag; /** imaginary part of the odd coefficients*/
- compat_uptr_t even_real;/** real part of the even coefficients*/
- compat_uptr_t even_imag;/** imaginary part of the even coefficients*/
-};
-
-struct atomisp_dvs2_statistics32 {
- struct atomisp_dvs_grid_info grid_info;
- struct atomisp_dvs2_stat_types32 hor_prod;
- struct atomisp_dvs2_stat_types32 ver_prod;
-};
-
-struct atomisp_dis_statistics32 {
- struct atomisp_dvs2_statistics32 dvs2_stat;
- u32 exp_id;
-};
-
-struct atomisp_dis_coefficients32 {
- struct atomisp_dvs_grid_info grid_info;
- struct atomisp_dvs2_coef_types32 hor_coefs;
- struct atomisp_dvs2_coef_types32 ver_coefs;
-};
-
-struct atomisp_3a_statistics32 {
- struct atomisp_grid_info grid_info;
- compat_uptr_t data;
- compat_uptr_t rgby_data;
- u32 exp_id;
- u32 isp_config_id;
-};
-
-struct atomisp_morph_table32 {
- unsigned int enabled;
- unsigned int height;
- unsigned int width; /* number of valid elements per line */
- compat_uptr_t coordinates_x[ATOMISP_MORPH_TABLE_NUM_PLANES];
- compat_uptr_t coordinates_y[ATOMISP_MORPH_TABLE_NUM_PLANES];
-};
-
-struct v4l2_framebuffer32 {
- __u32 capability;
- __u32 flags;
- compat_uptr_t base;
- struct v4l2_pix_format fmt;
-};
-
-struct atomisp_overlay32 {
- /* the frame containing the overlay data The overlay frame width should
- * be the multiples of 2*ISP_VEC_NELEMS. The overlay frame height
- * should be the multiples of 2.
- */
- compat_uptr_t frame;
- /* Y value of overlay background */
- unsigned char bg_y;
- /* U value of overlay background */
- char bg_u;
- /* V value of overlay background */
- char bg_v;
- /* the blending percent of input data for Y subpixels */
- unsigned char blend_input_perc_y;
- /* the blending percent of input data for U subpixels */
- unsigned char blend_input_perc_u;
- /* the blending percent of input data for V subpixels */
- unsigned char blend_input_perc_v;
- /* the blending percent of overlay data for Y subpixels */
- unsigned char blend_overlay_perc_y;
- /* the blending percent of overlay data for U subpixels */
- unsigned char blend_overlay_perc_u;
- /* the blending percent of overlay data for V subpixels */
- unsigned char blend_overlay_perc_v;
- /* the overlay start x pixel position on output frame It should be the
- multiples of 2*ISP_VEC_NELEMS. */
- unsigned int overlay_start_x;
- /* the overlay start y pixel position on output frame It should be the
- multiples of 2. */
- unsigned int overlay_start_y;
-};
-
-struct atomisp_shading_table32 {
- __u32 enable;
- __u32 sensor_width;
- __u32 sensor_height;
- __u32 width;
- __u32 height;
- __u32 fraction_bits;
-
- compat_uptr_t data[ATOMISP_NUM_SC_COLORS];
-};
-
-struct atomisp_parameters32 {
- compat_uptr_t wb_config; /* White Balance config */
- compat_uptr_t cc_config; /* Color Correction config */
- compat_uptr_t tnr_config; /* Temporal Noise Reduction */
- compat_uptr_t ecd_config; /* Eigen Color Demosaicing */
- compat_uptr_t ynr_config; /* Y(Luma) Noise Reduction */
- compat_uptr_t fc_config; /* Fringe Control */
- compat_uptr_t formats_config; /* Formats Control */
- compat_uptr_t cnr_config; /* Chroma Noise Reduction */
- compat_uptr_t macc_config; /* MACC */
- compat_uptr_t ctc_config; /* Chroma Tone Control */
- compat_uptr_t aa_config; /* Anti-Aliasing */
- compat_uptr_t baa_config; /* Anti-Aliasing */
- compat_uptr_t ce_config;
- compat_uptr_t dvs_6axis_config;
- compat_uptr_t ob_config; /* Objective Black config */
- compat_uptr_t dp_config; /* Dead Pixel config */
- compat_uptr_t nr_config; /* Noise Reduction config */
- compat_uptr_t ee_config; /* Edge Enhancement config */
- compat_uptr_t de_config; /* Demosaic config */
- compat_uptr_t gc_config; /* Gamma Correction config */
- compat_uptr_t anr_config; /* Advanced Noise Reduction */
- compat_uptr_t a3a_config; /* 3A Statistics config */
- compat_uptr_t xnr_config; /* eXtra Noise Reduction */
- compat_uptr_t dz_config; /* Digital Zoom */
- compat_uptr_t yuv2rgb_cc_config; /* Color
- Correction config */
- compat_uptr_t rgb2yuv_cc_config; /* Color
- Correction config */
- compat_uptr_t macc_table;
- compat_uptr_t gamma_table;
- compat_uptr_t ctc_table;
- compat_uptr_t xnr_table;
- compat_uptr_t r_gamma_table;
- compat_uptr_t g_gamma_table;
- compat_uptr_t b_gamma_table;
- compat_uptr_t motion_vector; /* For 2-axis DVS */
- compat_uptr_t shading_table;
- compat_uptr_t morph_table;
- compat_uptr_t dvs_coefs; /* DVS 1.0 coefficients */
- compat_uptr_t dvs2_coefs; /* DVS 2.0 coefficients */
- compat_uptr_t capture_config;
- compat_uptr_t anr_thres;
-
- compat_uptr_t lin_2500_config; /* Skylake: Linearization config */
- compat_uptr_t obgrid_2500_config; /* Skylake: OBGRID config */
- compat_uptr_t bnr_2500_config; /* Skylake: bayer denoise config */
- compat_uptr_t shd_2500_config; /* Skylake: shading config */
- compat_uptr_t dm_2500_config; /* Skylake: demosaic config */
- compat_uptr_t rgbpp_2500_config; /* Skylake: RGBPP config */
- compat_uptr_t dvs_stat_2500_config; /* Skylake: DVS STAT config */
- compat_uptr_t lace_stat_2500_config; /* Skylake: LACE STAT config */
- compat_uptr_t yuvp1_2500_config; /* Skylake: yuvp1 config */
- compat_uptr_t yuvp2_2500_config; /* Skylake: yuvp2 config */
- compat_uptr_t tnr_2500_config; /* Skylake: TNR config */
- compat_uptr_t dpc_2500_config; /* Skylake: DPC config */
- compat_uptr_t awb_2500_config; /* Skylake: auto white balance config */
- compat_uptr_t
- awb_fr_2500_config; /* Skylake: auto white balance filter response config */
- compat_uptr_t anr_2500_config; /* Skylake: ANR config */
- compat_uptr_t af_2500_config; /* Skylake: auto focus config */
- compat_uptr_t ae_2500_config; /* Skylake: auto exposure config */
- compat_uptr_t bds_2500_config; /* Skylake: bayer downscaler config */
- compat_uptr_t
- dvs_2500_config; /* Skylake: digital video stabilization config */
- compat_uptr_t res_mgr_2500_config;
-
- /*
- * Output frame pointer the config is to be applied to (optional),
- * set to NULL to make this config is applied as global.
- */
- compat_uptr_t output_frame;
- /*
- * Unique ID to track which config was actually applied to a particular
- * frame, driver will send this id back with output frame together.
- */
- u32 isp_config_id;
- u32 per_frame_setting;
-};
-
-struct atomisp_dvs_6axis_config32 {
- u32 exp_id;
- u32 width_y;
- u32 height_y;
- u32 width_uv;
- u32 height_uv;
- compat_uptr_t xcoords_y;
- compat_uptr_t ycoords_y;
- compat_uptr_t xcoords_uv;
- compat_uptr_t ycoords_uv;
-};
-
-#define ATOMISP_IOC_G_HISTOGRAM32 \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram32)
-#define ATOMISP_IOC_S_HISTOGRAM32 \
- _IOW('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram32)
-
-#define ATOMISP_IOC_G_DIS_STAT32 \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_statistics32)
-#define ATOMISP_IOC_S_DIS_COEFS32 \
- _IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_coefficients32)
-
-#define ATOMISP_IOC_S_DIS_VECTOR32 \
- _IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dvs_6axis_config32)
-
-#define ATOMISP_IOC_G_3A_STAT32 \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 7, struct atomisp_3a_statistics32)
-
-#define ATOMISP_IOC_G_ISP_GDC_TAB32 \
- _IOR('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table32)
-#define ATOMISP_IOC_S_ISP_GDC_TAB32 \
- _IOW('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table32)
-
-#define ATOMISP_IOC_S_ISP_FPN_TABLE32 \
- _IOW('v', BASE_VIDIOC_PRIVATE + 17, struct v4l2_framebuffer32)
-
-#define ATOMISP_IOC_G_ISP_OVERLAY32 \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay32)
-#define ATOMISP_IOC_S_ISP_OVERLAY32 \
- _IOW('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay32)
-
-#define ATOMISP_IOC_S_ISP_SHD_TAB32 \
- _IOWR('v', BASE_VIDIOC_PRIVATE + 27, struct atomisp_shading_table32)
-
-#define ATOMISP_IOC_S_PARAMETERS32 \
- _IOW('v', BASE_VIDIOC_PRIVATE + 32, struct atomisp_parameters32)
-
-#endif /* __ATOMISP_COMPAT_IOCTL32_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2.c b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
index 28afc0bfc43b..95b9113d75e9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_csi2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
@@ -298,7 +298,7 @@ static void atomisp_csi2_configure_isp2401(struct atomisp_sub_device *asd)
ctrl.id = V4L2_CID_LINK_FREQ;
if (v4l2_g_ctrl
- (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl) == 0)
+ (isp->inputs[asd->input_curr].sensor->ctrl_handler, &ctrl) == 0)
mipi_freq = ctrl.value;
clk_termen = atomisp_csi2_configure_calc(coeff_clk_termen, mipi_freq,
diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
index e176483df301..5f59519ac8e2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
@@ -85,9 +85,6 @@ struct gmin_subdev {
bool v2p8_on;
bool v1p2_on;
- int v1p8_gpio;
- int v2p8_gpio;
-
u8 pwm_i2c_addr;
/* For PMIC AXP */
@@ -225,16 +222,6 @@ static struct gmin_cfg_var ffrd8_vars[] = {
{},
};
-/* Cribbed from MCG defaults in the mt9m114 driver, not actually verified
- * vs. T100 hardware
- */
-static struct gmin_cfg_var t100_vars[] = {
- { "INT33F0:00_CsiPort", "0" },
- { "INT33F0:00_CsiLanes", "1" },
- { "INT33F0:00_CamClk", "1" },
- {},
-};
-
static struct gmin_cfg_var mrd7_vars[] = {
{"INT33F8:00_CamType", "1"},
{"INT33F8:00_CsiPort", "1"},
@@ -252,22 +239,6 @@ static struct gmin_cfg_var mrd7_vars[] = {
{},
};
-static struct gmin_cfg_var ecs7_vars[] = {
- {"INT33BE:00_CsiPort", "1"},
- {"INT33BE:00_CsiLanes", "2"},
- {"INT33BE:00_CsiFmt", "13"},
- {"INT33BE:00_CsiBayer", "2"},
- {"INT33BE:00_CamClk", "0"},
-
- {"INT33F0:00_CsiPort", "0"},
- {"INT33F0:00_CsiLanes", "1"},
- {"INT33F0:00_CsiFmt", "13"},
- {"INT33F0:00_CsiBayer", "0"},
- {"INT33F0:00_CamClk", "1"},
- {"gmin_V2P8GPIO", "402"},
- {},
-};
-
static struct gmin_cfg_var i8880_vars[] = {
{"XXOV2680:00_CsiPort", "1"},
{"XXOV2680:00_CsiLanes", "1"},
@@ -310,13 +281,6 @@ static const struct dmi_system_id gmin_vars[] = {
.driver_data = ffrd8_vars,
},
{
- .ident = "T100TA",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "T100TA"),
- },
- .driver_data = t100_vars,
- },
- {
.ident = "MRD7",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "TABLET"),
@@ -325,13 +289,6 @@ static const struct dmi_system_id gmin_vars[] = {
.driver_data = mrd7_vars,
},
{
- .ident = "ST70408",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "ST70408"),
- },
- .driver_data = ecs7_vars,
- },
- {
.ident = "VTA0803",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VTA0803"),
@@ -516,9 +473,9 @@ static int gmin_subdev_add(struct gmin_subdev *gs)
dev_info(dev, "%s: ACPI path is %pfw\n", __func__, dev_fwnode(dev));
- /*WA:CHT requires XTAL clock as PLL is not stable.*/
+ /* WA:CHT requires XTAL clock as PLL is not stable. */
gs->clock_src = gmin_get_var_int(dev, false, "ClkSrc",
- VLV2_CLK_PLL_19P2MHZ);
+ VLV2_CLK_PLL_19P2MHZ);
/*
* Get ACPI _PR0 derived clock here already because it is used
@@ -549,23 +506,6 @@ static int gmin_subdev_add(struct gmin_subdev *gs)
dev_info(dev, "will handle gpio1 via ACPI\n");
/*
- * Those are used only when there is an external regulator apart
- * from the PMIC that would be providing power supply, like on the
- * two cases below:
- *
- * The ECS E7 board drives camera 2.8v from an external regulator
- * instead of the PMIC. There's a gmin_CamV2P8 config variable
- * that specifies the GPIO to handle this particular case,
- * but this needs a broader architecture for handling camera power.
- *
- * The CHT RVP board drives camera 1.8v from an* external regulator
- * instead of the PMIC just like ECS E7 board.
- */
-
- gs->v1p8_gpio = gmin_get_var_int(dev, true, "V1P8GPIO", -1);
- gs->v2p8_gpio = gmin_get_var_int(dev, true, "V2P8GPIO", -1);
-
- /*
* FIXME:
*
* The ACPI handling code checks for the _PR? tables in order to
@@ -837,16 +777,6 @@ static int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on)
if (!gs || gs->v1p8_on == on)
return 0;
- if (gs->v1p8_gpio >= 0) {
- pr_info("atomisp_gmin_platform: 1.8v power on GPIO %d\n",
- gs->v1p8_gpio);
- ret = gpio_request(gs->v1p8_gpio, "camera_v1p8_en");
- if (!ret)
- ret = gpio_direction_output(gs->v1p8_gpio, 0);
- if (ret)
- pr_err("V1P8 GPIO initialization failed\n");
- }
-
gs->v1p8_on = on;
ret = 0;
@@ -861,9 +791,6 @@ static int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on)
goto out; /* Still needed */
}
- if (gs->v1p8_gpio >= 0)
- gpio_set_value(gs->v1p8_gpio, on);
-
if (gs->v1p8_reg) {
regulator_set_voltage(gs->v1p8_reg, 1800000, 1800000);
if (on)
@@ -918,16 +845,6 @@ static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
if (WARN_ON(!gs))
return -ENODEV;
- if (gs->v2p8_gpio >= 0) {
- pr_info("atomisp_gmin_platform: 2.8v power on GPIO %d\n",
- gs->v2p8_gpio);
- ret = gpio_request(gs->v2p8_gpio, "camera_v2p8");
- if (!ret)
- ret = gpio_direction_output(gs->v2p8_gpio, 0);
- if (ret)
- pr_err("V2P8 GPIO initialization failed\n");
- }
-
if (gs->v2p8_on == on)
return 0;
gs->v2p8_on = on;
@@ -944,9 +861,6 @@ static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
goto out; /* Still needed */
}
- if (gs->v2p8_gpio >= 0)
- gpio_set_value(gs->v2p8_gpio, on);
-
if (gs->v2p8_reg) {
regulator_set_voltage(gs->v2p8_reg, 2900000, 2900000);
if (on)
@@ -1292,7 +1206,7 @@ static int gmin_get_config_dsm_var(struct device *dev,
* if it founds something different than string, letting it
* to fall back to the old code.
*/
- if (cur && cur->type != ACPI_TYPE_STRING) {
+ if (cur->type != ACPI_TYPE_STRING) {
dev_info(dev, "found non-string _DSM entry for '%s'\n", var);
ACPI_FREE(obj);
return -EINVAL;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp_internal.h
index 775506757471..5a69580b8251 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_internal.h
@@ -109,14 +109,20 @@
#define DIV_NEAREST_STEP(n, d, step) \
round_down((2 * (n) + (d) * (step)) / (2 * (d)), (step))
+#define SENSOR_ISP_PAD_SINK 0
+#define SENSOR_ISP_PAD_SOURCE 1
+#define SENSOR_ISP_PADS_NUM 2
+
struct atomisp_input_subdev {
enum atomisp_camera_port port;
u32 code; /* MEDIA_BUS_FMT_* */
bool binning_support;
bool crop_support;
- bool camera_on;
- struct v4l2_subdev *camera;
+ bool sensor_on;
+ struct v4l2_subdev *sensor;
+ struct v4l2_subdev *sensor_isp;
struct v4l2_subdev *csi_port;
+ struct v4l2_subdev *csi_remote_source;
/* Sensor rects for sensors which support crop */
struct v4l2_rect native_rect;
struct v4l2_rect active_rect;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
index 1fb2ba819de3..97d99bed1560 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -386,11 +386,11 @@ static int atomisp_enum_input(struct file *file, void *fh,
if (index >= isp->input_cnt)
return -EINVAL;
- if (!isp->inputs[index].camera)
+ if (!isp->inputs[index].sensor)
return -EINVAL;
memset(input, 0, sizeof(struct v4l2_input));
- strscpy(input->name, isp->inputs[index].camera->name,
+ strscpy(input->name, isp->inputs[index].sensor->name,
sizeof(input->name));
input->type = V4L2_INPUT_TYPE_CAMERA;
@@ -433,7 +433,7 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
if (input >= isp->input_cnt)
return -EINVAL;
- if (!isp->inputs[input].camera)
+ if (!isp->inputs[input].sensor)
return -EINVAL;
ret = atomisp_pipe_check(pipe, true);
@@ -539,14 +539,14 @@ static int atomisp_enum_framesizes(struct file *file, void *priv,
struct v4l2_subdev_state *act_sd_state;
int ret;
- if (!input->camera)
+ if (!input->sensor)
return -EINVAL;
if (input->crop_support)
return atomisp_enum_framesizes_crop(isp, fsize);
- act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
- ret = v4l2_subdev_call(input->camera, pad, enum_frame_size,
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->sensor);
+ ret = v4l2_subdev_call(input->sensor, pad, enum_frame_size,
act_sd_state, &fse);
if (act_sd_state)
v4l2_subdev_unlock_state(act_sd_state);
@@ -577,11 +577,11 @@ static int atomisp_enum_frameintervals(struct file *file, void *priv,
struct v4l2_subdev_state *act_sd_state;
int ret;
- if (!input->camera)
+ if (!input->sensor)
return -EINVAL;
- act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
- ret = v4l2_subdev_call(input->camera, pad, enum_frame_interval,
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->sensor);
+ ret = v4l2_subdev_call(input->sensor, pad, enum_frame_interval,
act_sd_state, &fie);
if (act_sd_state)
v4l2_subdev_unlock_state(act_sd_state);
@@ -609,11 +609,11 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh,
unsigned int i, fi = 0;
int ret;
- if (!input->camera)
+ if (!input->sensor)
return -EINVAL;
- act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
- ret = v4l2_subdev_call(input->camera, pad, enum_mbus_code,
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->sensor);
+ ret = v4l2_subdev_call(input->sensor, pad, enum_mbus_code,
act_sd_state, &code);
if (act_sd_state)
v4l2_subdev_unlock_state(act_sd_state);
@@ -945,7 +945,7 @@ int atomisp_start_streaming(struct vb2_queue *vq, unsigned int count)
}
/* stream on the sensor */
- ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].csi_remote_source,
video, s_stream, 1);
if (ret) {
dev_err(isp->dev, "Starting sensor stream failed: %d\n", ret);
@@ -1002,7 +1002,7 @@ void atomisp_stop_streaming(struct vb2_queue *vq)
atomisp_subdev_cleanup_pending_events(asd);
- ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].csi_remote_source,
video, s_stream, 0);
if (ret)
dev_warn(isp->dev, "Stopping sensor stream failed: %d\n", ret);
@@ -1332,7 +1332,7 @@ static int atomisp_s_parm(struct file *file, void *fh,
fi.interval = parm->parm.capture.timeperframe;
- rval = v4l2_subdev_call_state_active(isp->inputs[asd->input_curr].camera,
+ rval = v4l2_subdev_call_state_active(isp->inputs[asd->input_curr].csi_remote_source,
pad, set_frame_interval, &fi);
if (!rval)
parm->parm.capture.timeperframe = fi.interval;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.h b/drivers/staging/media/atomisp/pci/atomisp_ioctl.h
index 4feaa0338cb4..57f608f9db56 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.h
@@ -33,8 +33,4 @@ void atomisp_stop_streaming(struct vb2_queue *vq);
extern const struct v4l2_ioctl_ops atomisp_ioctl_ops;
-/* compat_ioctl for 32bit userland app and 64bit kernel */
-long atomisp_compat_ioctl32(struct file *file,
- unsigned int cmd, unsigned long arg);
-
#endif /* __ATOMISP_IOCTL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
index 2841824cd0ca..0bd0bfded4af 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -895,24 +895,25 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input)
* it emulates a normal v4l2 device there, passing through try_fmt /
* set_fmt to the sensor.
*/
- try_sd_state = __v4l2_subdev_state_alloc(input->camera,
- "atomisp:try_sd_state->lock", &try_sd_state_key);
+ try_sd_state = __v4l2_subdev_state_alloc(input->sensor,
+ "atomisp:try_sd_state->lock",
+ &try_sd_state_key);
if (IS_ERR(try_sd_state))
return;
input->try_sd_state = try_sd_state;
- act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->sensor);
mbus_code_enum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- err = v4l2_subdev_call(input->camera, pad, enum_mbus_code,
+ err = v4l2_subdev_call(input->sensor, pad, enum_mbus_code,
act_sd_state, &mbus_code_enum);
if (!err)
input->code = mbus_code_enum.code;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_NATIVE_SIZE;
- err = v4l2_subdev_call(input->camera, pad, get_selection,
+ err = v4l2_subdev_call(input->sensor, pad, get_selection,
act_sd_state, &sel);
if (err)
goto unlock_act_sd_state;
@@ -921,7 +922,7 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input)
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_CROP_DEFAULT;
- err = v4l2_subdev_call(input->camera, pad, get_selection,
+ err = v4l2_subdev_call(input->sensor, pad, get_selection,
act_sd_state, &sel);
if (err)
goto unlock_act_sd_state;
@@ -939,7 +940,7 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input)
fse.code = input->code;
fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- err = v4l2_subdev_call(input->camera, pad, enum_frame_size,
+ err = v4l2_subdev_call(input->sensor, pad, enum_frame_size,
act_sd_state, &fse);
if (err)
break;
@@ -962,22 +963,29 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input)
sel.which = V4L2_SUBDEV_FORMAT_TRY;
sel.target = V4L2_SEL_TGT_CROP;
sel.r = input->native_rect;
- v4l2_subdev_lock_state(input->try_sd_state);
- err = v4l2_subdev_call(input->camera, pad, set_selection,
+
+ /* Don't lock try_sd_state if the lock is shared with the active state */
+ if (!input->sensor->state_lock)
+ v4l2_subdev_lock_state(input->try_sd_state);
+
+ err = v4l2_subdev_call(input->sensor, pad, set_selection,
input->try_sd_state, &sel);
- v4l2_subdev_unlock_state(input->try_sd_state);
+
+ if (!input->sensor->state_lock)
+ v4l2_subdev_unlock_state(input->try_sd_state);
+
if (err)
goto unlock_act_sd_state;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_CROP;
sel.r = input->native_rect;
- err = v4l2_subdev_call(input->camera, pad, set_selection,
+ err = v4l2_subdev_call(input->sensor, pad, set_selection,
act_sd_state, &sel);
if (err)
goto unlock_act_sd_state;
- dev_info(input->camera->dev, "Supports crop native %dx%d active %dx%d binning %d\n",
+ dev_info(input->sensor->dev, "Supports crop native %dx%d active %dx%d binning %d\n",
input->native_rect.width, input->native_rect.height,
input->active_rect.width, input->active_rect.height,
input->binning_support);
@@ -991,8 +999,9 @@ unlock_act_sd_state:
int atomisp_register_device_nodes(struct atomisp_device *isp)
{
+ struct media_pad *sensor_isp_sink, *sensor_src;
struct atomisp_input_subdev *input;
- int i, err;
+ int i, err, source_pad;
for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
err = media_create_pad_link(&isp->csi2_port[i].subdev.entity,
@@ -1007,12 +1016,33 @@ int atomisp_register_device_nodes(struct atomisp_device *isp)
input = &isp->inputs[isp->input_cnt];
input->port = i;
- input->camera = isp->sensor_subdevs[i];
input->csi_port = &isp->csi2_port[i].subdev;
+ input->csi_remote_source = isp->sensor_subdevs[i];
+
+ /*
+ * Special case for sensors with a ISP in the sensor modelled
+ * as a separate v4l2-subdev, like the mt9m114.
+ */
+ if (isp->sensor_subdevs[i]->entity.function == MEDIA_ENT_F_PROC_VIDEO_ISP) {
+ input->sensor_isp = isp->sensor_subdevs[i];
+ source_pad = SENSOR_ISP_PAD_SOURCE;
+
+ sensor_isp_sink = &input->sensor_isp->entity.pads[SENSOR_ISP_PAD_SINK];
+ sensor_src = media_pad_remote_pad_first(sensor_isp_sink);
+ if (!sensor_src) {
+ dev_err(isp->dev, "Error could not find remote pad for sensor ISP sink\n");
+ return -ENOENT;
+ }
+
+ input->sensor = media_entity_to_v4l2_subdev(sensor_src->entity);
+ } else {
+ input->sensor = isp->sensor_subdevs[i];
+ source_pad = 0;
+ }
atomisp_init_sensor(input);
- err = media_create_pad_link(&input->camera->entity, 0,
+ err = media_create_pad_link(&isp->sensor_subdevs[i]->entity, source_pad,
&isp->csi2_port[i].subdev.entity,
CSI2_PAD_SINK,
MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h
index 2c47e7820bd7..69e68ecd6bc3 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h
@@ -18,7 +18,7 @@
#endif
/* The IRQ is not mapped uniformly on its related interfaces */
-#define IRQ_SW_CHANNEL_OFFSET hrt_isp_css_irq_sw_pin_0
+#define IRQ_SW_CHANNEL_OFFSET HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID
typedef enum {
IRQ_SW_CHANNEL0_ID = hrt_isp_css_irq_sw_pin_0 - IRQ_SW_CHANNEL_OFFSET,
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
index f37802878528..2bed08435755 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
@@ -19,7 +19,7 @@
#endif
/* DVS 2.0 Coefficient types. This structure contains 4 pointers to
- * arrays that contain the coeffients for each type.
+ * arrays that contain the coefficients for each type.
*/
struct ia_css_dvs2_coef_types {
s16 *odd_real; /** real part of the odd coefficients*/
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c
index ece5e3da34ee..127f12ba2214 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c
@@ -114,7 +114,7 @@ configure_dma(
}
int ia_css_vf_configure(const struct ia_css_binary *binary,
- const struct ia_css_frame_info *out_info,
+ const struct ia_css_frame_info *out_info,
struct ia_css_frame_info *vf_info,
unsigned int *downscale_log2)
{
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c b/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
index 9818771a35e5..84220359c957 100644
--- a/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
@@ -121,7 +121,8 @@ static const char *const pipe_id_to_str[] = {
/* [IA_CSS_PIPE_ID_YUVPP] =*/ "yuvpp",
};
-static char dot_id_input_bin[SH_CSS_MAX_BINARY_NAME + 10];
+/* 27 is combined length of _stage%d(pipe%d)\0. */
+static char dot_id_input_bin[SH_CSS_MAX_BINARY_NAME + 27];
static char ring_buffer[200];
void ia_css_debug_dtrace(unsigned int level, const char *fmt, ...)
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 2855ba2296ac..77360bfe081a 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -295,7 +295,7 @@ static irqreturn_t prp_nfb4eof_interrupt(int irq, void *dev_id)
*/
static void prp_eof_timeout(struct timer_list *t)
{
- struct prp_priv *priv = from_timer(priv, t, eof_timeout_timer);
+ struct prp_priv *priv = timer_container_of(priv, t, eof_timeout_timer);
struct imx_media_video_dev *vdev = priv->vdev;
struct imx_ic_priv *ic_priv = priv->ic_priv;
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index f1d7fce8c020..a7cd3ef95fc3 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -356,7 +356,7 @@ static irqreturn_t csi_idmac_nfb4eof_interrupt(int irq, void *dev_id)
*/
static void csi_idmac_eof_timeout(struct timer_list *t)
{
- struct csi_priv *priv = from_timer(priv, t, eof_timeout_timer);
+ struct csi_priv *priv = timer_container_of(priv, t, eof_timeout_timer);
struct imx_media_video_dev *vdev = priv->vdev;
v4l2_err(&priv->sd, "EOF timeout\n");
diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/staging/media/rkvdec/rkvdec-h264.c
index 4fc167b42cf0..d14b4d173448 100644
--- a/drivers/staging/media/rkvdec/rkvdec-h264.c
+++ b/drivers/staging/media/rkvdec/rkvdec-h264.c
@@ -655,13 +655,14 @@ static void assemble_hw_pps(struct rkvdec_ctx *ctx,
#define WRITE_PPS(value, field) set_ps_field(hw_ps->info, field, value)
/* write sps */
- WRITE_PPS(0xf, SEQ_PARAMETER_SET_ID);
- WRITE_PPS(0xff, PROFILE_IDC);
- WRITE_PPS(1, CONSTRAINT_SET3_FLAG);
+ WRITE_PPS(sps->seq_parameter_set_id, SEQ_PARAMETER_SET_ID);
+ WRITE_PPS(sps->profile_idc, PROFILE_IDC);
+ WRITE_PPS(!!(sps->constraint_set_flags & (1 << 3)), CONSTRAINT_SET3_FLAG);
WRITE_PPS(sps->chroma_format_idc, CHROMA_FORMAT_IDC);
WRITE_PPS(sps->bit_depth_luma_minus8, BIT_DEPTH_LUMA);
WRITE_PPS(sps->bit_depth_chroma_minus8, BIT_DEPTH_CHROMA);
- WRITE_PPS(0, QPPRIME_Y_ZERO_TRANSFORM_BYPASS_FLAG);
+ WRITE_PPS(!!(sps->flags & V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS),
+ QPPRIME_Y_ZERO_TRANSFORM_BYPASS_FLAG);
WRITE_PPS(sps->log2_max_frame_num_minus4, LOG2_MAX_FRAME_NUM_MINUS4);
WRITE_PPS(sps->max_num_ref_frames, MAX_NUM_REF_FRAMES);
WRITE_PPS(sps->pic_order_cnt_type, PIC_ORDER_CNT_TYPE);
@@ -688,8 +689,8 @@ static void assemble_hw_pps(struct rkvdec_ctx *ctx,
DIRECT_8X8_INFERENCE_FLAG);
/* write pps */
- WRITE_PPS(0xff, PIC_PARAMETER_SET_ID);
- WRITE_PPS(0x1f, PPS_SEQ_PARAMETER_SET_ID);
+ WRITE_PPS(pps->pic_parameter_set_id, PIC_PARAMETER_SET_ID);
+ WRITE_PPS(pps->seq_parameter_set_id, PPS_SEQ_PARAMETER_SET_ID);
WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE),
ENTROPY_CODING_MODE_FLAG);
WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT),
@@ -896,9 +897,9 @@ static void config_registers(struct rkvdec_ctx *ctx,
dma_addr_t rlc_addr;
dma_addr_t refer_addr;
u32 rlc_len;
- u32 hor_virstride = 0;
- u32 ver_virstride = 0;
- u32 y_virstride = 0;
+ u32 hor_virstride;
+ u32 ver_virstride;
+ u32 y_virstride;
u32 yuv_virstride = 0;
u32 offset;
dma_addr_t dst_addr;
@@ -909,16 +910,16 @@ static void config_registers(struct rkvdec_ctx *ctx,
f = &ctx->decoded_fmt;
dst_fmt = &f->fmt.pix_mp;
- hor_virstride = (sps->bit_depth_luma_minus8 + 8) * dst_fmt->width / 8;
- ver_virstride = round_up(dst_fmt->height, 16);
+ hor_virstride = dst_fmt->plane_fmt[0].bytesperline;
+ ver_virstride = dst_fmt->height;
y_virstride = hor_virstride * ver_virstride;
if (sps->chroma_format_idc == 0)
yuv_virstride = y_virstride;
else if (sps->chroma_format_idc == 1)
- yuv_virstride += y_virstride + y_virstride / 2;
+ yuv_virstride = y_virstride + y_virstride / 2;
else if (sps->chroma_format_idc == 2)
- yuv_virstride += 2 * y_virstride;
+ yuv_virstride = 2 * y_virstride;
reg = RKVDEC_Y_HOR_VIRSTRIDE(hor_virstride / 16) |
RKVDEC_UV_HOR_VIRSTRIDE(hor_virstride / 16) |
@@ -1026,24 +1027,42 @@ static int rkvdec_h264_adjust_fmt(struct rkvdec_ctx *ctx,
return 0;
}
+static enum rkvdec_image_fmt rkvdec_h264_get_image_fmt(struct rkvdec_ctx *ctx,
+ struct v4l2_ctrl *ctrl)
+{
+ const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps;
+
+ if (ctrl->id != V4L2_CID_STATELESS_H264_SPS)
+ return RKVDEC_IMG_FMT_ANY;
+
+ if (sps->bit_depth_luma_minus8 == 0) {
+ if (sps->chroma_format_idc == 2)
+ return RKVDEC_IMG_FMT_422_8BIT;
+ else
+ return RKVDEC_IMG_FMT_420_8BIT;
+ } else if (sps->bit_depth_luma_minus8 == 2) {
+ if (sps->chroma_format_idc == 2)
+ return RKVDEC_IMG_FMT_422_10BIT;
+ else
+ return RKVDEC_IMG_FMT_420_10BIT;
+ }
+
+ return RKVDEC_IMG_FMT_ANY;
+}
+
static int rkvdec_h264_validate_sps(struct rkvdec_ctx *ctx,
const struct v4l2_ctrl_h264_sps *sps)
{
unsigned int width, height;
- /*
- * TODO: The hardware supports 10-bit and 4:2:2 profiles,
- * but it's currently broken in the driver.
- * Reject them for now, until it's fixed.
- */
- if (sps->chroma_format_idc > 1)
- /* Only 4:0:0 and 4:2:0 are supported */
+ if (sps->chroma_format_idc > 2)
+ /* Only 4:0:0, 4:2:0 and 4:2:2 are supported */
return -EINVAL;
if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
/* Luma and chroma bit depth mismatch */
return -EINVAL;
- if (sps->bit_depth_luma_minus8 != 0)
- /* Only 8-bit is supported */
+ if (sps->bit_depth_luma_minus8 != 0 && sps->bit_depth_luma_minus8 != 2)
+ /* Only 8-bit and 10-bit is supported */
return -EINVAL;
width = (sps->pic_width_in_mbs_minus1 + 1) * 16;
@@ -1189,4 +1208,5 @@ const struct rkvdec_coded_fmt_ops rkvdec_h264_fmt_ops = {
.stop = rkvdec_h264_stop,
.run = rkvdec_h264_run,
.try_ctrl = rkvdec_h264_try_ctrl,
+ .get_image_fmt = rkvdec_h264_get_image_fmt,
};
diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
index f9bef5173bf2..3367902f22de 100644
--- a/drivers/staging/media/rkvdec/rkvdec.c
+++ b/drivers/staging/media/rkvdec/rkvdec.c
@@ -27,6 +27,95 @@
#include "rkvdec.h"
#include "rkvdec-regs.h"
+static bool rkvdec_image_fmt_match(enum rkvdec_image_fmt fmt1,
+ enum rkvdec_image_fmt fmt2)
+{
+ return fmt1 == fmt2 || fmt2 == RKVDEC_IMG_FMT_ANY ||
+ fmt1 == RKVDEC_IMG_FMT_ANY;
+}
+
+static bool rkvdec_image_fmt_changed(struct rkvdec_ctx *ctx,
+ enum rkvdec_image_fmt image_fmt)
+{
+ if (image_fmt == RKVDEC_IMG_FMT_ANY)
+ return false;
+
+ return ctx->image_fmt != image_fmt;
+}
+
+static u32 rkvdec_enum_decoded_fmt(struct rkvdec_ctx *ctx, int index,
+ enum rkvdec_image_fmt image_fmt)
+{
+ const struct rkvdec_coded_fmt_desc *desc = ctx->coded_fmt_desc;
+ int fmt_idx = -1;
+ unsigned int i;
+
+ if (WARN_ON(!desc))
+ return 0;
+
+ for (i = 0; i < desc->num_decoded_fmts; i++) {
+ if (!rkvdec_image_fmt_match(desc->decoded_fmts[i].image_fmt,
+ image_fmt))
+ continue;
+ fmt_idx++;
+ if (index == fmt_idx)
+ return desc->decoded_fmts[i].fourcc;
+ }
+
+ return 0;
+}
+
+static bool rkvdec_is_valid_fmt(struct rkvdec_ctx *ctx, u32 fourcc,
+ enum rkvdec_image_fmt image_fmt)
+{
+ const struct rkvdec_coded_fmt_desc *desc = ctx->coded_fmt_desc;
+ unsigned int i;
+
+ for (i = 0; i < desc->num_decoded_fmts; i++) {
+ if (rkvdec_image_fmt_match(desc->decoded_fmts[i].image_fmt,
+ image_fmt) &&
+ desc->decoded_fmts[i].fourcc == fourcc)
+ return true;
+ }
+
+ return false;
+}
+
+static void rkvdec_fill_decoded_pixfmt(struct rkvdec_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_mp)
+{
+ v4l2_fill_pixfmt_mp(pix_mp, pix_mp->pixelformat,
+ pix_mp->width, pix_mp->height);
+ pix_mp->plane_fmt[0].sizeimage += 128 *
+ DIV_ROUND_UP(pix_mp->width, 16) *
+ DIV_ROUND_UP(pix_mp->height, 16);
+}
+
+static void rkvdec_reset_fmt(struct rkvdec_ctx *ctx, struct v4l2_format *f,
+ u32 fourcc)
+{
+ memset(f, 0, sizeof(*f));
+ f->fmt.pix_mp.pixelformat = fourcc;
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709;
+ f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT;
+ f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static void rkvdec_reset_decoded_fmt(struct rkvdec_ctx *ctx)
+{
+ struct v4l2_format *f = &ctx->decoded_fmt;
+ u32 fourcc;
+
+ fourcc = rkvdec_enum_decoded_fmt(ctx, 0, ctx->image_fmt);
+ rkvdec_reset_fmt(ctx, f, fourcc);
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ f->fmt.pix_mp.width = ctx->coded_fmt.fmt.pix_mp.width;
+ f->fmt.pix_mp.height = ctx->coded_fmt.fmt.pix_mp.height;
+ rkvdec_fill_decoded_pixfmt(ctx, &f->fmt.pix_mp);
+}
+
static int rkvdec_try_ctrl(struct v4l2_ctrl *ctrl)
{
struct rkvdec_ctx *ctx = container_of(ctrl->handler, struct rkvdec_ctx, ctrl_hdl);
@@ -38,8 +127,34 @@ static int rkvdec_try_ctrl(struct v4l2_ctrl *ctrl)
return 0;
}
+static int rkvdec_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct rkvdec_ctx *ctx = container_of(ctrl->handler, struct rkvdec_ctx, ctrl_hdl);
+ const struct rkvdec_coded_fmt_desc *desc = ctx->coded_fmt_desc;
+ enum rkvdec_image_fmt image_fmt;
+ struct vb2_queue *vq;
+
+ /* Check if this change requires a capture format reset */
+ if (!desc->ops->get_image_fmt)
+ return 0;
+
+ image_fmt = desc->ops->get_image_fmt(ctx, ctrl);
+ if (rkvdec_image_fmt_changed(ctx, image_fmt)) {
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ ctx->image_fmt = image_fmt;
+ rkvdec_reset_decoded_fmt(ctx);
+ }
+
+ return 0;
+}
+
static const struct v4l2_ctrl_ops rkvdec_ctrl_ops = {
.try_ctrl = rkvdec_try_ctrl,
+ .s_ctrl = rkvdec_s_ctrl,
};
static const struct rkvdec_ctrl_desc rkvdec_h264_ctrl_descs[] = {
@@ -70,10 +185,11 @@ static const struct rkvdec_ctrl_desc rkvdec_h264_ctrl_descs[] = {
},
{
.cfg.id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
- .cfg.min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
- .cfg.max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .cfg.min = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE,
+ .cfg.max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA,
.cfg.menu_skip_mask =
- BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE),
.cfg.def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
},
{
@@ -88,8 +204,23 @@ static const struct rkvdec_ctrls rkvdec_h264_ctrls = {
.num_ctrls = ARRAY_SIZE(rkvdec_h264_ctrl_descs),
};
-static const u32 rkvdec_h264_vp9_decoded_fmts[] = {
- V4L2_PIX_FMT_NV12,
+static const struct rkvdec_decoded_fmt_desc rkvdec_h264_decoded_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .image_fmt = RKVDEC_IMG_FMT_420_8BIT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV15,
+ .image_fmt = RKVDEC_IMG_FMT_420_10BIT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .image_fmt = RKVDEC_IMG_FMT_422_8BIT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV20,
+ .image_fmt = RKVDEC_IMG_FMT_422_10BIT,
+ },
};
static const struct rkvdec_ctrl_desc rkvdec_vp9_ctrl_descs[] = {
@@ -112,21 +243,28 @@ static const struct rkvdec_ctrls rkvdec_vp9_ctrls = {
.num_ctrls = ARRAY_SIZE(rkvdec_vp9_ctrl_descs),
};
+static const struct rkvdec_decoded_fmt_desc rkvdec_vp9_decoded_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .image_fmt = RKVDEC_IMG_FMT_420_8BIT,
+ },
+};
+
static const struct rkvdec_coded_fmt_desc rkvdec_coded_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_H264_SLICE,
.frmsize = {
- .min_width = 48,
+ .min_width = 64,
.max_width = 4096,
- .step_width = 16,
+ .step_width = 64,
.min_height = 48,
.max_height = 2560,
.step_height = 16,
},
.ctrls = &rkvdec_h264_ctrls,
.ops = &rkvdec_h264_fmt_ops,
- .num_decoded_fmts = ARRAY_SIZE(rkvdec_h264_vp9_decoded_fmts),
- .decoded_fmts = rkvdec_h264_vp9_decoded_fmts,
+ .num_decoded_fmts = ARRAY_SIZE(rkvdec_h264_decoded_fmts),
+ .decoded_fmts = rkvdec_h264_decoded_fmts,
.subsystem_flags = VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF,
},
{
@@ -141,8 +279,8 @@ static const struct rkvdec_coded_fmt_desc rkvdec_coded_fmts[] = {
},
.ctrls = &rkvdec_vp9_ctrls,
.ops = &rkvdec_vp9_fmt_ops,
- .num_decoded_fmts = ARRAY_SIZE(rkvdec_h264_vp9_decoded_fmts),
- .decoded_fmts = rkvdec_h264_vp9_decoded_fmts,
+ .num_decoded_fmts = ARRAY_SIZE(rkvdec_vp9_decoded_fmts),
+ .decoded_fmts = rkvdec_vp9_decoded_fmts,
}
};
@@ -159,18 +297,6 @@ rkvdec_find_coded_fmt_desc(u32 fourcc)
return NULL;
}
-static void rkvdec_reset_fmt(struct rkvdec_ctx *ctx, struct v4l2_format *f,
- u32 fourcc)
-{
- memset(f, 0, sizeof(*f));
- f->fmt.pix_mp.pixelformat = fourcc;
- f->fmt.pix_mp.field = V4L2_FIELD_NONE;
- f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709;
- f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
- f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT;
- f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
-}
-
static void rkvdec_reset_coded_fmt(struct rkvdec_ctx *ctx)
{
struct v4l2_format *f = &ctx->coded_fmt;
@@ -186,21 +312,6 @@ static void rkvdec_reset_coded_fmt(struct rkvdec_ctx *ctx)
ctx->coded_fmt_desc->ops->adjust_fmt(ctx, f);
}
-static void rkvdec_reset_decoded_fmt(struct rkvdec_ctx *ctx)
-{
- struct v4l2_format *f = &ctx->decoded_fmt;
-
- rkvdec_reset_fmt(ctx, f, ctx->coded_fmt_desc->decoded_fmts[0]);
- f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- v4l2_fill_pixfmt_mp(&f->fmt.pix_mp,
- ctx->coded_fmt_desc->decoded_fmts[0],
- ctx->coded_fmt.fmt.pix_mp.width,
- ctx->coded_fmt.fmt.pix_mp.height);
- f->fmt.pix_mp.plane_fmt[0].sizeimage += 128 *
- DIV_ROUND_UP(f->fmt.pix_mp.width, 16) *
- DIV_ROUND_UP(f->fmt.pix_mp.height, 16);
-}
-
static int rkvdec_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
@@ -213,8 +324,14 @@ static int rkvdec_enum_framesizes(struct file *file, void *priv,
if (!fmt)
return -EINVAL;
- fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
- fsize->stepwise = fmt->frmsize;
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = 1;
+ fsize->stepwise.max_width = fmt->frmsize.max_width;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.min_height = 1;
+ fsize->stepwise.max_height = fmt->frmsize.max_height;
+ fsize->stepwise.step_height = 1;
+
return 0;
}
@@ -238,7 +355,6 @@ static int rkvdec_try_capture_fmt(struct file *file, void *priv,
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
const struct rkvdec_coded_fmt_desc *coded_desc;
- unsigned int i;
/*
* The codec context should point to a coded format desc, if the format
@@ -249,13 +365,9 @@ static int rkvdec_try_capture_fmt(struct file *file, void *priv,
if (WARN_ON(!coded_desc))
return -EINVAL;
- for (i = 0; i < coded_desc->num_decoded_fmts; i++) {
- if (coded_desc->decoded_fmts[i] == pix_mp->pixelformat)
- break;
- }
-
- if (i == coded_desc->num_decoded_fmts)
- pix_mp->pixelformat = coded_desc->decoded_fmts[0];
+ if (!rkvdec_is_valid_fmt(ctx, pix_mp->pixelformat, ctx->image_fmt))
+ pix_mp->pixelformat = rkvdec_enum_decoded_fmt(ctx, 0,
+ ctx->image_fmt);
/* Always apply the frmsize constraint of the coded end. */
pix_mp->width = max(pix_mp->width, ctx->coded_fmt.fmt.pix_mp.width);
@@ -264,12 +376,7 @@ static int rkvdec_try_capture_fmt(struct file *file, void *priv,
&pix_mp->height,
&coded_desc->frmsize);
- v4l2_fill_pixfmt_mp(pix_mp, pix_mp->pixelformat,
- pix_mp->width, pix_mp->height);
- pix_mp->plane_fmt[0].sizeimage +=
- 128 *
- DIV_ROUND_UP(pix_mp->width, 16) *
- DIV_ROUND_UP(pix_mp->height, 16);
+ rkvdec_fill_decoded_pixfmt(ctx, pix_mp);
pix_mp->field = V4L2_FIELD_NONE;
return 0;
@@ -377,6 +484,7 @@ static int rkvdec_s_output_fmt(struct file *file, void *priv,
*
* Note that this will propagates any size changes to the decoded format.
*/
+ ctx->image_fmt = RKVDEC_IMG_FMT_ANY;
rkvdec_reset_decoded_fmt(ctx);
/* Propagate colorspace information to capture. */
@@ -424,14 +532,13 @@ static int rkvdec_enum_capture_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ u32 fourcc;
- if (WARN_ON(!ctx->coded_fmt_desc))
- return -EINVAL;
-
- if (f->index >= ctx->coded_fmt_desc->num_decoded_fmts)
+ fourcc = rkvdec_enum_decoded_fmt(ctx, f->index, ctx->image_fmt);
+ if (!fourcc)
return -EINVAL;
- f->pixelformat = ctx->coded_fmt_desc->decoded_fmts[f->index];
+ f->pixelformat = fourcc;
return 0;
}
@@ -819,24 +926,24 @@ static int rkvdec_open(struct file *filp)
rkvdec_reset_decoded_fmt(ctx);
v4l2_fh_init(&ctx->fh, video_devdata(filp));
- ret = rkvdec_init_ctrls(ctx);
- if (ret)
- goto err_free_ctx;
-
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rkvdec->m2m_dev, ctx,
rkvdec_queue_init);
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
- goto err_cleanup_ctrls;
+ goto err_free_ctx;
}
+ ret = rkvdec_init_ctrls(ctx);
+ if (ret)
+ goto err_cleanup_m2m_ctx;
+
filp->private_data = &ctx->fh;
v4l2_fh_add(&ctx->fh);
return 0;
-err_cleanup_ctrls:
- v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+err_cleanup_m2m_ctx:
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
err_free_ctx:
kfree(ctx);
diff --git a/drivers/staging/media/rkvdec/rkvdec.h b/drivers/staging/media/rkvdec/rkvdec.h
index 633335ebb9c4..9a9f4fced7a1 100644
--- a/drivers/staging/media/rkvdec/rkvdec.h
+++ b/drivers/staging/media/rkvdec/rkvdec.h
@@ -73,6 +73,21 @@ struct rkvdec_coded_fmt_ops {
struct vb2_v4l2_buffer *dst_buf,
enum vb2_buffer_state result);
int (*try_ctrl)(struct rkvdec_ctx *ctx, struct v4l2_ctrl *ctrl);
+ enum rkvdec_image_fmt (*get_image_fmt)(struct rkvdec_ctx *ctx,
+ struct v4l2_ctrl *ctrl);
+};
+
+enum rkvdec_image_fmt {
+ RKVDEC_IMG_FMT_ANY = 0,
+ RKVDEC_IMG_FMT_420_8BIT,
+ RKVDEC_IMG_FMT_420_10BIT,
+ RKVDEC_IMG_FMT_422_8BIT,
+ RKVDEC_IMG_FMT_422_10BIT,
+};
+
+struct rkvdec_decoded_fmt_desc {
+ u32 fourcc;
+ enum rkvdec_image_fmt image_fmt;
};
struct rkvdec_coded_fmt_desc {
@@ -81,7 +96,7 @@ struct rkvdec_coded_fmt_desc {
const struct rkvdec_ctrls *ctrls;
const struct rkvdec_coded_fmt_ops *ops;
unsigned int num_decoded_fmts;
- const u32 *decoded_fmts;
+ const struct rkvdec_decoded_fmt_desc *decoded_fmts;
u32 subsystem_flags;
};
@@ -104,6 +119,7 @@ struct rkvdec_ctx {
const struct rkvdec_coded_fmt_desc *coded_fmt_desc;
struct v4l2_ctrl_handler ctrl_hdl;
struct rkvdec_dev *dev;
+ enum rkvdec_image_fmt image_fmt;
void *priv;
};
diff --git a/drivers/staging/media/starfive/camss/stf-isp.c b/drivers/staging/media/starfive/camss/stf-isp.c
index 4e6e26736852..df7a903fbb1b 100644
--- a/drivers/staging/media/starfive/camss/stf-isp.c
+++ b/drivers/staging/media/starfive/camss/stf-isp.c
@@ -278,7 +278,7 @@ static int isp_set_selection(struct v4l2_subdev *sd,
isp_set_format(sd, state, &fmt);
}
- dev_dbg(isp_dev->stfcamss->dev, "pad: %d sel(%d,%d)/%dx%d\n",
+ dev_dbg(isp_dev->stfcamss->dev, "pad: %d sel(%d,%d)/%ux%u\n",
sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
return 0;
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index 50022bb5911e..383a6f7c06f4 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -389,7 +389,7 @@ void update_bmc_sta(struct adapter *padapter)
psta->qos_option = 0;
psta->htpriv.ht_option = false;
- psta->ieee8021x_blocked = 0;
+ psta->ieee8021x_blocked = false;
memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
diff --git a/drivers/staging/rtl8723bs/core/rtw_btcoex.c b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
index d54095f50113..f4b19ef7b341 100644
--- a/drivers/staging/rtl8723bs/core/rtw_btcoex.c
+++ b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
@@ -8,14 +8,14 @@
#include <rtw_btcoex.h>
#include <hal_btcoex.h>
-void rtw_btcoex_MediaStatusNotify(struct adapter *padapter, u8 mediaStatus)
+void rtw_btcoex_MediaStatusNotify(struct adapter *padapter, u8 media_status)
{
- if ((mediaStatus == RT_MEDIA_CONNECT)
+ if ((media_status == RT_MEDIA_CONNECT)
&& (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE) == true)) {
rtw_hal_set_hwreg(padapter, HW_VAR_DL_RSVD_PAGE, NULL);
}
- hal_btcoex_MediaStatusNotify(padapter, mediaStatus);
+ hal_btcoex_MediaStatusNotify(padapter, media_status);
}
void rtw_btcoex_HaltNotify(struct adapter *padapter)
@@ -52,14 +52,14 @@ void rtw_btcoex_RejectApAggregatedPacket(struct adapter *padapter, u8 enable)
void rtw_btcoex_LPS_Enter(struct adapter *padapter)
{
struct pwrctrl_priv *pwrpriv;
- u8 lpsVal;
+ u8 lps_val;
pwrpriv = adapter_to_pwrctl(padapter);
pwrpriv->bpower_saving = true;
- lpsVal = hal_btcoex_LpsVal(padapter);
- rtw_set_ps_mode(padapter, PS_MODE_MIN, 0, lpsVal, "BTCOEX");
+ lps_val = hal_btcoex_LpsVal(padapter);
+ rtw_set_ps_mode(padapter, PS_MODE_MIN, 0, lps_val, "BTCOEX");
}
void rtw_btcoex_LPS_Leave(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index 1c9e8b01d9d8..437934dd255e 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -273,9 +273,9 @@ struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue)
/* spin_lock_bh(&(queue->lock)); */
spin_lock_irqsave(&queue->lock, irqL);
- if (list_empty(&queue->queue))
+ if (list_empty(&queue->queue)) {
obj = NULL;
- else {
+ } else {
obj = container_of(get_next(&queue->queue), struct cmd_obj, list);
list_del_init(&obj->list);
}
@@ -695,7 +695,6 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
/* for ies is fix buf size */
t_len = sizeof(struct wlan_bssid_ex);
-
/* for hidden ap to set fw_state here */
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) != true) {
switch (ndis_network_mode) {
@@ -738,7 +737,6 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
psecnetwork->ie_length = rtw_restruct_sec_ie(padapter, &pnetwork->network.ies[0], &psecnetwork->ies[0], pnetwork->network.ie_length);
-
pqospriv->qos_option = 0;
if (pregistrypriv->wmm_enable) {
@@ -1032,7 +1030,6 @@ u8 rtw_reset_securitypriv_cmd(struct adapter *padapter)
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
-
/* rtw_enqueue_cmd(pcmdpriv, ph2c); */
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
@@ -1099,7 +1096,6 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
pdrvextra_cmd_parm->pbuf = NULL;
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
-
/* rtw_enqueue_cmd(pcmdpriv, ph2c); */
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
@@ -1256,7 +1252,6 @@ static void dynamic_chk_wk_hdl(struct adapter *padapter)
/* */
hal_btcoex_Handler(padapter);
-
/* always call rtw_ps_processor() at last one. */
rtw_ps_processor(padapter);
}
@@ -1367,7 +1362,6 @@ u8 rtw_dm_in_lps_wk_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (!ph2c) {
res = _FAIL;
@@ -1850,7 +1844,6 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
spin_lock_bh(&pmlmepriv->lock);
-
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
psta = rtw_get_stainfo(&padapter->stapriv, pnetwork->mac_address);
if (!psta) {
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index 0ed420f3d096..53d4c113b19c 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -988,11 +988,10 @@ void rtw_macaddr_cfg(struct device *dev, u8 *mac_addr)
if (is_broadcast_ether_addr(mac) || is_zero_ether_addr(mac)) {
addr = of_get_property(np, "local-mac-address", &len);
- if (addr && len == ETH_ALEN) {
+ if (addr && len == ETH_ALEN)
ether_addr_copy(mac_addr, addr);
- } else {
+ else
eth_random_addr(mac_addr);
- }
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index 58de0c2fdd68..6301dbbcc472 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -1460,8 +1460,8 @@ void rtw_wmm_event_callback(struct adapter *padapter, u8 *pbuf)
*/
void _rtw_join_timeout_handler(struct timer_list *t)
{
- struct adapter *adapter = from_timer(adapter, t,
- mlmepriv.assoc_timer);
+ struct adapter *adapter = timer_container_of(adapter, t,
+ mlmepriv.assoc_timer);
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
@@ -1504,8 +1504,8 @@ void _rtw_join_timeout_handler(struct timer_list *t)
*/
void rtw_scan_timeout_handler(struct timer_list *t)
{
- struct adapter *adapter = from_timer(adapter, t,
- mlmepriv.scan_to_timer);
+ struct adapter *adapter = timer_container_of(adapter, t,
+ mlmepriv.scan_to_timer);
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
spin_lock_bh(&pmlmepriv->lock);
@@ -2022,12 +2022,12 @@ signed int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, u
}
iEntry = SecIsInPMKIDList(adapter, pmlmepriv->assoc_bssid);
- if (iEntry < 0) {
+ if (iEntry < 0)
return ielength;
- } else {
- if (authmode == WLAN_EID_RSN)
- ielength = rtw_append_pmkid(adapter, iEntry, out_ie, ielength);
- }
+
+ if (authmode == WLAN_EID_RSN)
+ ielength = rtw_append_pmkid(adapter, iEntry, out_ie, ielength);
+
return ielength;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 3d36b6f005e0..bc980d21d50e 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -3511,7 +3511,7 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
/* if ((psta = rtw_get_stainfo(pstapriv, pmlmeinfo->network.mac_address)) != NULL) */
psta = rtw_get_stainfo(pstapriv, raddr);
if (psta) {
- start_seq = (psta->sta_xmitpriv.txseq_tid[status & 0x07]&0xfff) + 1;
+ start_seq = (psta->sta_xmitpriv.txseq_tid[status & 0x07] % 4096u) + 1;
psta->BA_starting_seqctrl[status & 0x07] = start_seq;
@@ -5088,7 +5088,7 @@ void linked_status_chk(struct adapter *padapter)
void survey_timer_hdl(struct timer_list *t)
{
struct adapter *padapter =
- from_timer(padapter, t, mlmeextpriv.survey_timer);
+ timer_container_of(padapter, t, mlmeextpriv.survey_timer);
struct cmd_obj *ph2c;
struct sitesurvey_parm *psurveyPara;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
@@ -5123,7 +5123,7 @@ void survey_timer_hdl(struct timer_list *t)
void link_timer_hdl(struct timer_list *t)
{
struct adapter *padapter =
- from_timer(padapter, t, mlmeextpriv.link_timer);
+ timer_container_of(padapter, t, mlmeextpriv.link_timer);
/* static unsigned int rx_pkt = 0; */
/* static u64 tx_cnt = 0; */
/* struct xmit_priv *pxmitpriv = &(padapter->xmitpriv); */
@@ -5161,7 +5161,7 @@ void link_timer_hdl(struct timer_list *t)
void addba_timer_hdl(struct timer_list *t)
{
- struct sta_info *psta = from_timer(psta, t, addba_retry_timer);
+ struct sta_info *psta = timer_container_of(psta, t, addba_retry_timer);
struct ht_priv *phtpriv;
if (!psta)
@@ -5179,7 +5179,7 @@ void addba_timer_hdl(struct timer_list *t)
void sa_query_timer_hdl(struct timer_list *t)
{
struct adapter *padapter =
- from_timer(padapter, t, mlmeextpriv.sa_query_timer);
+ timer_container_of(padapter, t, mlmeextpriv.sa_query_timer);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
/* disconnect */
spin_lock_bh(&pmlmepriv->lock);
diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
index c60e179bb2e1..6a2583d0d3eb 100644
--- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
@@ -8,7 +8,6 @@
#include <hal_data.h>
#include <linux/jiffies.h>
-
void _ips_enter(struct adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
@@ -56,9 +55,8 @@ int _ips_leave(struct adapter *padapter)
pwrpriv->ips_leave_cnts++;
result = rtw_ips_pwr_up(padapter);
- if (result == _SUCCESS) {
+ if (result == _SUCCESS)
pwrpriv->rf_pwrstate = rf_on;
- }
pwrpriv->bips_processing = false;
pwrpriv->bkeepfwalive = false;
@@ -177,7 +175,7 @@ exit:
static void pwr_state_check_handler(struct timer_list *t)
{
struct pwrctrl_priv *pwrctrlpriv =
- from_timer(pwrctrlpriv, t, pwr_state_check_timer);
+ timer_container_of(pwrctrlpriv, t, pwr_state_check_timer);
struct adapter *padapter = pwrctrlpriv->adapter;
rtw_ps_cmd(padapter);
@@ -549,9 +547,8 @@ void LeaveAllPowerSaveMode(struct adapter *Adapter)
LPS_Leave_check(Adapter);
} else {
- if (adapter_to_pwrctl(Adapter)->rf_pwrstate == rf_off) {
+ if (adapter_to_pwrctl(Adapter)->rf_pwrstate == rf_off)
ips_leave(Adapter);
- }
}
}
@@ -677,7 +674,8 @@ exit:
*/
static void pwr_rpwm_timeout_handler(struct timer_list *t)
{
- struct pwrctrl_priv *pwrpriv = from_timer(pwrpriv, t, pwr_rpwm_timer);
+ struct pwrctrl_priv *pwrpriv = timer_container_of(pwrpriv, t,
+ pwr_rpwm_timer);
if ((pwrpriv->rpwm == pwrpriv->cpwm) || (pwrpriv->cpwm >= PS_STATE_S2))
return;
@@ -996,7 +994,6 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->wowlan_ap_mode = false;
}
-
void rtw_free_pwrctrl_priv(struct adapter *adapter)
{
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 895116e9f4e7..8ae527b6e0d6 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -1641,7 +1641,7 @@ static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_n
struct dvobj_priv *psdpriv = padapter->dvobj;
struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
u8 wsize = preorder_ctrl->wsize_b;
- u16 wend = (preorder_ctrl->indicate_seq + wsize - 1) & 0xFFF;/* 4096; */
+ u16 wend = (preorder_ctrl->indicate_seq + wsize - 1) % 4096u;
/* Rx Reorder initialize condition. */
if (preorder_ctrl->indicate_seq == 0xFFFF)
@@ -1657,7 +1657,7 @@ static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_n
/* 2. Incoming SeqNum is larger than the WinEnd => Window shift N */
/* */
if (SN_EQUAL(seq_num, preorder_ctrl->indicate_seq)) {
- preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
+ preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) % 4096u;
} else if (SN_LESS(wend, seq_num)) {
/* boundary situation, when seq_num cross 0xFFF */
@@ -1772,7 +1772,7 @@ static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reor
list_del_init(&(prframe->u.hdr.list));
if (SN_EQUAL(preorder_ctrl->indicate_seq, pattrib->seq_num))
- preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
+ preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) % 4096u;
/* Set this as a lock to make sure that only one thread is indicating packet. */
/* pTS->RxIndicateState = RXTS_INDICATE_PROCESSING; */
@@ -1908,7 +1908,7 @@ _err_exit:
void rtw_reordering_ctrl_timeout_handler(struct timer_list *t)
{
struct recv_reorder_ctrl *preorder_ctrl =
- from_timer(preorder_ctrl, t, reordering_ctrl_timer);
+ timer_container_of(preorder_ctrl, t, reordering_ctrl_timer);
struct adapter *padapter = preorder_ctrl->padapter;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
@@ -2087,7 +2087,7 @@ _recv_entry_drop:
static void rtw_signal_stat_timer_hdl(struct timer_list *t)
{
struct adapter *adapter =
- from_timer(adapter, t, recvpriv.signal_stat_timer);
+ timer_container_of(adapter, t, recvpriv.signal_stat_timer);
struct recv_priv *recvpriv = &adapter->recvpriv;
u32 tmp_s, tmp_q;
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 297c93d65315..026d58b4bd7f 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -943,7 +943,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
if (psta) {
psta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
- psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
+ psta->sta_xmitpriv.txseq_tid[pattrib->priority] %= 4096u;
pattrib->seqnum = psta->sta_xmitpriv.txseq_tid[pattrib->priority];
SetSeqNum(hdr, pattrib->seqnum);
@@ -963,11 +963,14 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
if (SN_LESS(pattrib->seqnum, tx_seq)) {
pattrib->ampdu_en = false;/* AGG BK */
} else if (SN_EQUAL(pattrib->seqnum, tx_seq)) {
- psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (tx_seq+1)&0xfff;
+ psta->BA_starting_seqctrl[pattrib->priority & 0x0f] =
+ (tx_seq + 1) % 4096u;
pattrib->ampdu_en = true;/* AGG EN */
} else {
- psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (pattrib->seqnum+1)&0xfff;
+ psta->BA_starting_seqctrl[pattrib->priority & 0x0f] =
+ (pattrib->seqnum + 1) % 4096u;
+
pattrib->ampdu_en = true;/* AGG EN */
}
}
@@ -1936,7 +1939,6 @@ static void do_queue_select(struct adapter *padapter, struct pkt_attrib *pattrib
s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
{
static unsigned long start;
- static u32 drop_cnt;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct xmit_frame *pxmitframe = NULL;
@@ -1948,15 +1950,11 @@ s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
pxmitframe = rtw_alloc_xmitframe(pxmitpriv);
- if (jiffies_to_msecs(jiffies - start) > 2000) {
+ if (jiffies_to_msecs(jiffies - start) > 2000)
start = jiffies;
- drop_cnt = 0;
- }
- if (!pxmitframe) {
- drop_cnt++;
+ if (!pxmitframe)
return -1;
- }
res = update_attrib(padapter, *ppkt, &pxmitframe->attrib);
diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
index c1c7b5cc17a7..d32dbf94858f 100644
--- a/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
+++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
@@ -1100,7 +1100,7 @@ static bool halbtc8723b2ant_IsCommonAction(struct btc_coexist *pBtCoexist)
bCommon = true;
} else {
- if (BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus) {
+ if (pCoexDm->btStatus == BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE) {
bLowPwrDisable = false;
pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
@@ -1115,7 +1115,7 @@ static bool halbtc8723b2ant_IsCommonAction(struct btc_coexist *pBtCoexist)
halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
bCommon = true;
- } else if (BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) {
+ } else if (pCoexDm->btStatus == BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE) {
bLowPwrDisable = true;
pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
@@ -1605,7 +1605,7 @@ static void halbtc8723b2ant_ActionSco(struct btc_coexist *pBtCoexist)
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
- if (BTC_WIFI_BW_LEGACY == wifiBw) /* for SCO quality at 11b/g mode */
+ if (wifiBw == BTC_WIFI_BW_LEGACY) /* for SCO quality at 11b/g mode */
halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
else /* for SCO quality & wifi performance balance at 11n mode */
halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 8);
@@ -1613,7 +1613,7 @@ static void halbtc8723b2ant_ActionSco(struct btc_coexist *pBtCoexist)
halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 0); /* for voice quality */
/* sw mechanism */
- if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (wifiBw == BTC_WIFI_BW_HT40) {
if (
(wifiRssiState == BTC_RSSI_STATE_HIGH) ||
(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
@@ -1660,7 +1660,7 @@ static void halbtc8723b2ant_ActionHid(struct btc_coexist *pBtCoexist)
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
- if (BTC_WIFI_BW_LEGACY == wifiBw) /* for HID at 11b/g mode */
+ if (wifiBw == BTC_WIFI_BW_LEGACY) /* for HID at 11b/g mode */
halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
else /* for HID quality & wifi performance balance at 11n mode */
halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 9);
@@ -1674,7 +1674,7 @@ static void halbtc8723b2ant_ActionHid(struct btc_coexist *pBtCoexist)
halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
/* sw mechanism */
- if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (wifiBw == BTC_WIFI_BW_HT40) {
if (
(wifiRssiState == BTC_RSSI_STATE_HIGH) ||
(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
@@ -1723,7 +1723,7 @@ static void halbtc8723b2ant_ActionA2dp(struct btc_coexist *pBtCoexist)
/* sw mechanism */
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
- if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (wifiBw == BTC_WIFI_BW_HT40) {
halbtc8723b2ant_SwMechanism1(pBtCoexist, true, false, false, false);
halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, true, 0x18);
} else {
@@ -1755,7 +1755,7 @@ static void halbtc8723b2ant_ActionA2dp(struct btc_coexist *pBtCoexist)
/* sw mechanism */
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
- if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (wifiBw == BTC_WIFI_BW_HT40) {
if (
(wifiRssiState == BTC_RSSI_STATE_HIGH) ||
(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
@@ -1805,7 +1805,7 @@ static void halbtc8723b2ant_ActionA2dpPanHs(struct btc_coexist *pBtCoexist)
/* sw mechanism */
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
- if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (wifiBw == BTC_WIFI_BW_HT40) {
if (
(wifiRssiState == BTC_RSSI_STATE_HIGH) ||
(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
@@ -1861,7 +1861,7 @@ static void halbtc8723b2ant_ActionPanEdr(struct btc_coexist *pBtCoexist)
/* sw mechanism */
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
- if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (wifiBw == BTC_WIFI_BW_HT40) {
if (
(wifiRssiState == BTC_RSSI_STATE_HIGH) ||
(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
@@ -1912,7 +1912,7 @@ static void halbtc8723b2ant_ActionPanHs(struct btc_coexist *pBtCoexist)
halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 1);
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
- if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (wifiBw == BTC_WIFI_BW_HT40) {
if (
(wifiRssiState == BTC_RSSI_STATE_HIGH) ||
(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
@@ -1964,7 +1964,7 @@ static void halbtc8723b2ant_ActionPanEdrA2dp(struct btc_coexist *pBtCoexist)
(btRssiState == BTC_RSSI_STATE_STAY_HIGH)
) {
halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 12);
- if (BTC_WIFI_BW_HT40 == wifiBw)
+ if (wifiBw == BTC_WIFI_BW_HT40)
halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, false, true, 3);
else
halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, false, false, 3);
@@ -1974,7 +1974,7 @@ static void halbtc8723b2ant_ActionPanEdrA2dp(struct btc_coexist *pBtCoexist)
}
/* sw mechanism */
- if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (wifiBw == BTC_WIFI_BW_HT40) {
if (
(wifiRssiState == BTC_RSSI_STATE_HIGH) ||
(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
@@ -2019,7 +2019,7 @@ static void halbtc8723b2ant_ActionPanEdrHid(struct btc_coexist *pBtCoexist)
(btRssiState == BTC_RSSI_STATE_HIGH) ||
(btRssiState == BTC_RSSI_STATE_STAY_HIGH)
) {
- if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (wifiBw == BTC_WIFI_BW_HT40) {
halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 3);
halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 11);
pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x780);
@@ -2037,7 +2037,7 @@ static void halbtc8723b2ant_ActionPanEdrHid(struct btc_coexist *pBtCoexist)
}
/* sw mechanism */
- if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (wifiBw == BTC_WIFI_BW_HT40) {
if (
(wifiRssiState == BTC_RSSI_STATE_HIGH) ||
(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
@@ -2090,7 +2090,7 @@ static void halbtc8723b2ant_ActionHidA2dpPanEdr(struct btc_coexist *pBtCoexist)
(btRssiState == BTC_RSSI_STATE_HIGH) ||
(btRssiState == BTC_RSSI_STATE_STAY_HIGH)
) {
- if (BTC_WIFI_BW_HT40 == wifiBw)
+ if (wifiBw == BTC_WIFI_BW_HT40)
halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, true, true, 2);
else
halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, true, false, 3);
@@ -2098,7 +2098,7 @@ static void halbtc8723b2ant_ActionHidA2dpPanEdr(struct btc_coexist *pBtCoexist)
halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, true, true, 3);
/* sw mechanism */
- if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (wifiBw == BTC_WIFI_BW_HT40) {
if (
(wifiRssiState == BTC_RSSI_STATE_HIGH) ||
(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
@@ -2140,7 +2140,7 @@ static void halbtc8723b2ant_ActionHidA2dp(struct btc_coexist *pBtCoexist)
halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 6);
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
- if (BTC_WIFI_BW_LEGACY == wifiBw) {
+ if (wifiBw == BTC_WIFI_BW_LEGACY) {
if (BTC_RSSI_HIGH(btRssiState))
halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
else if (BTC_RSSI_MEDIUM(btRssiState))
@@ -2173,7 +2173,7 @@ static void halbtc8723b2ant_ActionHidA2dp(struct btc_coexist *pBtCoexist)
halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, true, true, 2);
/* sw mechanism */
- if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (wifiBw == BTC_WIFI_BW_HT40) {
if (
(wifiRssiState == BTC_RSSI_STATE_HIGH) ||
(wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
@@ -2391,12 +2391,12 @@ void EXhalbtc8723b2ant_InitCoexDm(struct btc_coexist *pBtCoexist)
void EXhalbtc8723b2ant_IpsNotify(struct btc_coexist *pBtCoexist, u8 type)
{
- if (BTC_IPS_ENTER == type) {
+ if (type == BTC_IPS_ENTER) {
pCoexSta->bUnderIps = true;
halbtc8723b2ant_WifiOffHwCfg(pBtCoexist);
halbtc8723b2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
halbtc8723b2ant_CoexAllOff(pBtCoexist);
- } else if (BTC_IPS_LEAVE == type) {
+ } else if (type == BTC_IPS_LEAVE) {
pCoexSta->bUnderIps = false;
halbtc8723b2ant_InitHwConfig(pBtCoexist, false);
halbtc8723b2ant_InitCoexDm(pBtCoexist);
@@ -2406,24 +2406,24 @@ void EXhalbtc8723b2ant_IpsNotify(struct btc_coexist *pBtCoexist, u8 type)
void EXhalbtc8723b2ant_LpsNotify(struct btc_coexist *pBtCoexist, u8 type)
{
- if (BTC_LPS_ENABLE == type) {
+ if (type == BTC_LPS_ENABLE) {
pCoexSta->bUnderLps = true;
- } else if (BTC_LPS_DISABLE == type) {
+ } else if (type == BTC_LPS_DISABLE) {
pCoexSta->bUnderLps = false;
}
}
void EXhalbtc8723b2ant_ScanNotify(struct btc_coexist *pBtCoexist, u8 type)
{
- if (BTC_SCAN_START == type) {
- } else if (BTC_SCAN_FINISH == type) {
+ if (type == BTC_SCAN_START) {
+ } else if (type == BTC_SCAN_FINISH) {
}
}
void EXhalbtc8723b2ant_ConnectNotify(struct btc_coexist *pBtCoexist, u8 type)
{
- if (BTC_ASSOCIATE_START == type) {
- } else if (BTC_ASSOCIATE_FINISH == type) {
+ if (type == BTC_ASSOCIATE_START) {
+ } else if (type == BTC_ASSOCIATE_FINISH) {
}
}
@@ -2436,11 +2436,11 @@ void EXhalbtc8723b2ant_MediaStatusNotify(struct btc_coexist *pBtCoexist, u8 type
/* only 2.4G we need to inform bt the chnl mask */
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifiCentralChnl);
- if ((BTC_MEDIA_CONNECT == type) && (wifiCentralChnl <= 14)) {
+ if ((type == BTC_MEDIA_CONNECT) && (wifiCentralChnl <= 14)) {
H2C_Parameter[0] = 0x1;
H2C_Parameter[1] = wifiCentralChnl;
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
- if (BTC_WIFI_BW_HT40 == wifiBw)
+ if (wifiBw == BTC_WIFI_BW_HT40)
H2C_Parameter[2] = 0x30;
else {
pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U1_AP_NUM, &apNum);
@@ -2573,9 +2573,9 @@ void EXhalbtc8723b2ant_BtInfoNotify(
}
if (
- (BT_8723B_2ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) ||
- (BT_8723B_2ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
- (BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus)
+ (pCoexDm->btStatus == BT_8723B_2ANT_BT_STATUS_ACL_BUSY) ||
+ (pCoexDm->btStatus == BT_8723B_2ANT_BT_STATUS_SCO_BUSY) ||
+ (pCoexDm->btStatus == BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY)
) {
bBtBusy = true;
bLimitedDig = true;
@@ -2603,8 +2603,8 @@ void EXhalbtc8723b2ant_HaltNotify(struct btc_coexist *pBtCoexist)
void EXhalbtc8723b2ant_PnpNotify(struct btc_coexist *pBtCoexist, u8 pnpState)
{
- if (BTC_WIFI_PNP_SLEEP == pnpState) {
- } else if (BTC_WIFI_PNP_WAKE_UP == pnpState) {
+ if (pnpState == BTC_WIFI_PNP_SLEEP) {
+ } else if (pnpState == BTC_WIFI_PNP_WAKE_UP) {
halbtc8723b2ant_InitHwConfig(pBtCoexist, false);
halbtc8723b2ant_InitCoexDm(pBtCoexist);
halbtc8723b2ant_QueryBtInfo(pBtCoexist);
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index b72cf520d576..9105594d2dde 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -91,7 +91,7 @@ static void halbtcoutsrc_LeaveLowPower(struct btc_coexist *pBtCoexist)
stime = jiffies;
do {
ready = rtw_register_task_alive(padapter, BTCOEX_ALIVE);
- if (_SUCCESS == ready)
+ if (ready == _SUCCESS)
break;
utime = jiffies_to_msecs(jiffies - stime);
@@ -668,7 +668,7 @@ static void halbtcoutsrc_WriteLocalReg1Byte(void *pBtcContext, u32 RegAddr, u8 D
struct btc_coexist *pBtCoexist = (struct btc_coexist *)pBtcContext;
struct adapter *Adapter = pBtCoexist->Adapter;
- if (BTC_INTF_SDIO == pBtCoexist->chipInterface)
+ if (pBtCoexist->chipInterface == BTC_INTF_SDIO)
rtw_write8(Adapter, SDIO_LOCAL_BASE | RegAddr, Data);
else
rtw_write8(Adapter, RegAddr, Data);
@@ -894,7 +894,7 @@ void EXhalbtcoutsrc_IpsNotify(struct btc_coexist *pBtCoexist, u8 type)
if (pBtCoexist->bManualControl)
return;
- if (IPS_NONE == type)
+ if (type == IPS_NONE)
ipsType = BTC_IPS_LEAVE;
else
ipsType = BTC_IPS_ENTER;
@@ -922,7 +922,7 @@ void EXhalbtcoutsrc_LpsNotify(struct btc_coexist *pBtCoexist, u8 type)
if (pBtCoexist->bManualControl)
return;
- if (PS_MODE_ACTIVE == type)
+ if (type == PS_MODE_ACTIVE)
lpsType = BTC_LPS_DISABLE;
else
lpsType = BTC_LPS_ENABLE;
@@ -1000,7 +1000,7 @@ void EXhalbtcoutsrc_MediaStatusNotify(struct btc_coexist *pBtCoexist, enum
if (pBtCoexist->bManualControl)
return;
- if (RT_MEDIA_CONNECT == mediaStatus)
+ if (mediaStatus == RT_MEDIA_CONNECT)
mStatus = BTC_MEDIA_CONNECT;
else
mStatus = BTC_MEDIA_DISCONNECT;
@@ -1026,11 +1026,11 @@ void EXhalbtcoutsrc_SpecialPacketNotify(struct btc_coexist *pBtCoexist, u8 pktTy
if (pBtCoexist->bManualControl)
return;
- if (PACKET_DHCP == pktType) {
+ if (pktType == PACKET_DHCP) {
packetType = BTC_PACKET_DHCP;
- } else if (PACKET_EAPOL == pktType) {
+ } else if (pktType == PACKET_EAPOL) {
packetType = BTC_PACKET_EAPOL;
- } else if (PACKET_ARP == pktType) {
+ } else if (pktType == PACKET_ARP) {
packetType = BTC_PACKET_ARP;
} else {
return;
@@ -1114,13 +1114,13 @@ void EXhalbtcoutsrc_Periodical(struct btc_coexist *pBtCoexist)
void EXhalbtcoutsrc_SetAntNum(u8 type, u8 antNum)
{
- if (BT_COEX_ANT_TYPE_PG == type) {
+ if (type == BT_COEX_ANT_TYPE_PG) {
GLBtCoexist.boardInfo.pgAntNum = antNum;
GLBtCoexist.boardInfo.btdmAntNum = antNum;
- } else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
+ } else if (type == BT_COEX_ANT_TYPE_ANTDIV) {
GLBtCoexist.boardInfo.btdmAntNum = antNum;
/* GLBtCoexist.boardInfo.btdmAntPos = BTC_ANTENNA_AT_MAIN_PORT; */
- } else if (BT_COEX_ANT_TYPE_DETECTED == type) {
+ } else if (type == BT_COEX_ANT_TYPE_DETECTED) {
GLBtCoexist.boardInfo.btdmAntNum = antNum;
/* GLBtCoexist.boardInfo.btdmAntPos = BTC_ANTENNA_AT_MAIN_PORT; */
}
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index 1213a91cffff..d91e2461fd7e 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -890,15 +890,14 @@ static u32 Array_kfreemap[] = {
void rtw_bb_rf_gain_offset(struct adapter *padapter)
{
u8 value = padapter->eeprompriv.EEPROMRFGainOffset;
- u32 res, i = 0;
u32 *Array = Array_kfreemap;
u32 v1 = 0, v2 = 0, target = 0;
+ u32 i = 0;
if (value & BIT4) {
if (padapter->eeprompriv.EEPROMRFGainVal != 0xff) {
- res = rtw_hal_read_rfreg(padapter, RF_PATH_A, 0x7f, 0xffffffff);
- res &= 0xfff87fff;
- /* res &= 0xfff87fff; */
+ rtw_hal_read_rfreg(padapter, RF_PATH_A, 0x7f, 0xffffffff);
+
for (i = 0; i < ARRAY_SIZE(Array_kfreemap); i += 2) {
v1 = Array[i];
v2 = Array[i+1];
@@ -909,9 +908,7 @@ void rtw_bb_rf_gain_offset(struct adapter *padapter)
}
PHY_SetRFReg(padapter, RF_PATH_A, REG_RF_BB_GAIN_OFFSET, BIT18|BIT17|BIT16|BIT15, target);
- /* res |= (padapter->eeprompriv.EEPROMRFGainVal & 0x0f)<< 15; */
- /* rtw_hal_write_rfreg(padapter, RF_PATH_A, REG_RF_BB_GAIN_OFFSET, RF_GAIN_OFFSET_MASK, res); */
- res = rtw_hal_read_rfreg(padapter, RF_PATH_A, 0x7f, 0xffffffff);
+ rtw_hal_read_rfreg(padapter, RF_PATH_A, 0x7f, 0xffffffff);
}
}
}
diff --git a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
index 928c58be6c9b..666a9f44012d 100644
--- a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
+++ b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
@@ -155,9 +155,9 @@ void ODM_CfoTracking(void *pDM_VOID)
/* 4 1.6 Big jump */
if (pCfoTrack->bAdjust) {
if (CFO_ave > CFO_TH_XTAL_LOW)
- Adjust_Xtal = Adjust_Xtal+((CFO_ave-CFO_TH_XTAL_LOW)>>2);
+ Adjust_Xtal = Adjust_Xtal + ((CFO_ave - CFO_TH_XTAL_LOW) >> 2);
else if (CFO_ave < (-CFO_TH_XTAL_LOW))
- Adjust_Xtal = Adjust_Xtal+((CFO_TH_XTAL_LOW-CFO_ave)>>2);
+ Adjust_Xtal = Adjust_Xtal + ((CFO_TH_XTAL_LOW - CFO_ave) >> 2);
}
/* 4 1.7 Adjust Crystal Cap. */
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.c b/drivers/staging/rtl8723bs/hal/odm_DIG.c
index 97a51546463a..1e2946a23beb 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DIG.c
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.c
@@ -56,7 +56,7 @@ void odm_NHMBBInit(void *pDM_VOID)
{
struct dm_odm_t *pDM_Odm = (struct dm_odm_t *)pDM_VOID;
- pDM_Odm->adaptivity_flag = 0;
+ pDM_Odm->adaptivity_flag = false;
pDM_Odm->tolerance_cnt = 3;
pDM_Odm->NHMLastTxOkcnt = 0;
pDM_Odm->NHMLastRxOkcnt = 0;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index e15ec6452fd0..893cab0532ed 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -501,8 +501,7 @@ void Hal_GetEfuseDefinition(
switch (type) {
case TYPE_EFUSE_MAX_SECTION:
{
- u8 *pMax_section;
- pMax_section = pOut;
+ u8 *pMax_section = pOut;
if (efuseType == EFUSE_WIFI)
*pMax_section = EFUSE_MAX_SECTION_8723B;
@@ -513,8 +512,7 @@ void Hal_GetEfuseDefinition(
case TYPE_EFUSE_REAL_CONTENT_LEN:
{
- u16 *pu2Tmp;
- pu2Tmp = pOut;
+ u16 *pu2Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu2Tmp = EFUSE_REAL_CONTENT_LEN_8723B;
@@ -525,8 +523,7 @@ void Hal_GetEfuseDefinition(
case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
{
- u16 *pu2Tmp;
- pu2Tmp = pOut;
+ u16 *pu2Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu2Tmp = (EFUSE_REAL_CONTENT_LEN_8723B-EFUSE_OOB_PROTECT_BYTES);
@@ -537,8 +534,7 @@ void Hal_GetEfuseDefinition(
case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
{
- u16 *pu2Tmp;
- pu2Tmp = pOut;
+ u16 *pu2Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu2Tmp = (EFUSE_REAL_CONTENT_LEN_8723B-EFUSE_OOB_PROTECT_BYTES);
@@ -549,8 +545,7 @@ void Hal_GetEfuseDefinition(
case TYPE_EFUSE_MAP_LEN:
{
- u16 *pu2Tmp;
- pu2Tmp = pOut;
+ u16 *pu2Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu2Tmp = EFUSE_MAX_MAP_LEN;
@@ -561,8 +556,7 @@ void Hal_GetEfuseDefinition(
case TYPE_EFUSE_PROTECT_BYTES_BANK:
{
- u8 *pu1Tmp;
- pu1Tmp = pOut;
+ u8 *pu1Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu1Tmp = EFUSE_OOB_PROTECT_BYTES;
@@ -573,8 +567,7 @@ void Hal_GetEfuseDefinition(
case TYPE_EFUSE_CONTENT_LEN_BANK:
{
- u16 *pu2Tmp;
- pu2Tmp = pOut;
+ u16 *pu2Tmp = pOut;
if (efuseType == EFUSE_WIFI)
*pu2Tmp = EFUSE_REAL_CONTENT_LEN_8723B;
@@ -585,8 +578,7 @@ void Hal_GetEfuseDefinition(
default:
{
- u8 *pu1Tmp;
- pu1Tmp = pOut;
+ u8 *pu1Tmp = pOut;
*pu1Tmp = 0;
}
break;
@@ -835,9 +827,8 @@ static void hal_ReadEFuse_BT(
}
if (offset < EFUSE_BT_MAX_SECTION) {
- u16 addr;
+ u16 addr = offset * PGPKT_DATA_SIZE;
- addr = offset * PGPKT_DATA_SIZE;
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
/* Check word enable condition in the section */
if (!(wden & (0x01<<i))) {
@@ -1196,10 +1187,9 @@ void rtl8723b_InitBeaconParameters(struct adapter *padapter)
{
struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
u16 val16;
- u8 val8;
+ u8 val8 = DIS_TSF_UDT;
- val8 = DIS_TSF_UDT;
val16 = val8 | (val8 << 8); /* port0 and port1 */
/* Enable prot0 beacon function for PSTDMA */
@@ -1496,10 +1486,7 @@ s32 rtl8723b_InitLLTTable(struct adapter *padapter)
{
unsigned long start, passing_time;
u32 val32;
- s32 ret;
-
-
- ret = _FAIL;
+ s32 ret = _FAIL;
val32 = rtw_read32(padapter, REG_AUTO_LLT);
val32 |= BIT_AUTO_INIT_LLT;
@@ -2273,9 +2260,8 @@ void rtl8723b_fill_fake_txdesc(
/* Encrypt the data frame if under security mode excepct null data. Suggested by CCW. */
/* */
if (bDataFrame) {
- u32 EncAlg;
+ u32 EncAlg = padapter->securitypriv.dot11PrivacyAlgrthm;
- EncAlg = padapter->securitypriv.dot11PrivacyAlgrthm;
switch (EncAlg) {
case _NO_PRIVACY_:
SET_TX_DESC_SEC_TYPE_8723B(pDesc, 0x0);
@@ -2378,9 +2364,7 @@ static void hw_var_set_opmode(struct adapter *padapter, u8 variable, u8 *val)
static void hw_var_set_macaddr(struct adapter *padapter, u8 variable, u8 *val)
{
u8 idx = 0;
- u32 reg_macid;
-
- reg_macid = REG_MACID;
+ u32 reg_macid = REG_MACID;
for (idx = 0 ; idx < 6; idx++)
rtw_write8(GET_PRIMARY_ADAPTER(padapter), (reg_macid+idx), val[idx]);
@@ -2389,9 +2373,7 @@ static void hw_var_set_macaddr(struct adapter *padapter, u8 variable, u8 *val)
static void hw_var_set_bssid(struct adapter *padapter, u8 variable, u8 *val)
{
u8 idx = 0;
- u32 reg_bssid;
-
- reg_bssid = REG_BSSID;
+ u32 reg_bssid = REG_BSSID;
for (idx = 0 ; idx < 6; idx++)
rtw_write8(padapter, (reg_bssid+idx), val[idx]);
@@ -2399,9 +2381,7 @@ static void hw_var_set_bssid(struct adapter *padapter, u8 variable, u8 *val)
static void hw_var_set_bcn_func(struct adapter *padapter, u8 variable, u8 *val)
{
- u32 bcn_ctrl_reg;
-
- bcn_ctrl_reg = REG_BCN_CTRL;
+ u32 bcn_ctrl_reg = REG_BCN_CTRL;
if (*(u8 *)val)
rtw_write8(padapter, bcn_ctrl_reg, (EN_BCN_FUNCTION | EN_TXBCN_RPT));
@@ -2422,12 +2402,8 @@ static void hw_var_set_correct_tsf(struct adapter *padapter, u8 variable, u8 *va
{
u8 val8;
u64 tsf;
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
-
-
- pmlmeext = &padapter->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
tsf = pmlmeext->TSFValue-do_div(pmlmeext->TSFValue, (pmlmeinfo->bcn_interval*1024))-1024; /* us */
@@ -2543,15 +2519,12 @@ static void hw_var_set_mlme_join(struct adapter *padapter, u8 variable, u8 *val)
u8 val8;
u16 val16;
u32 val32;
- u8 RetryLimit;
- u8 type;
- struct mlme_priv *pmlmepriv;
+ u8 RetryLimit = 0x30;
+ u8 type = *(u8 *)val;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct eeprom_priv *pEEPROM;
- RetryLimit = 0x30;
- type = *(u8 *)val;
- pmlmepriv = &padapter->mlmepriv;
pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
if (type == 0) { /* prepare to join */
@@ -2850,12 +2823,11 @@ void SetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val)
case HW_VAR_ACK_PREAMBLE:
{
- u8 regTmp;
+ u8 regTmp = 0;
u8 bShortPreamble = *val;
/* Joseph marked out for Netgear 3500 TKIP channel 7 issue.(Temporarily) */
/* regTmp = (pHalData->nCur40MhzPrimeSC)<<5; */
- regTmp = 0;
if (bShortPreamble)
regTmp |= 0x80;
rtw_write8(padapter, REG_RRSR+2, regTmp);
@@ -3226,9 +3198,7 @@ void GetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val)
*/
u8 SetHalDefVar8723B(struct adapter *padapter, enum hal_def_variable variable, void *pval)
{
- u8 bResult;
-
- bResult = _SUCCESS;
+ u8 bResult = _SUCCESS;
switch (variable) {
default:
@@ -3244,9 +3214,7 @@ u8 SetHalDefVar8723B(struct adapter *padapter, enum hal_def_variable variable, v
*/
u8 GetHalDefVar8723B(struct adapter *padapter, enum hal_def_variable variable, void *pval)
{
- u8 bResult;
-
- bResult = _SUCCESS;
+ u8 bResult = _SUCCESS;
switch (variable) {
case HAL_DEF_MAX_RECVBUF_SZ:
@@ -3281,9 +3249,8 @@ u8 GetHalDefVar8723B(struct adapter *padapter, enum hal_def_variable variable, v
case HW_DEF_RA_INFO_DUMP:
{
u8 mac_id = *(u8 *)pval;
- u32 cmd;
+ u32 cmd = 0x40000100 | mac_id;
- cmd = 0x40000100 | mac_id;
rtw_write32(padapter, REG_HMEBOX_DBG_2_8723B, cmd);
msleep(10);
rtw_read32(padapter, 0x2F0); // info 1
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
index 5dc1c12fe03e..842e19b53421 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -120,13 +120,10 @@ free_xmitbuf:
*/
s32 rtl8723bs_xmit_buf_handler(struct adapter *padapter)
{
- struct xmit_priv *pxmitpriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
u8 queue_empty, queue_pending;
s32 ret;
-
- pxmitpriv = &padapter->xmitpriv;
-
if (wait_for_completion_interruptible(&pxmitpriv->xmit_comp)) {
netdev_emerg(padapter->pnetdev,
"%s: down SdioXmitBufSema fail!\n", __func__);
@@ -357,12 +354,9 @@ static s32 xmit_xmitframes(struct adapter *padapter, struct xmit_priv *pxmitpriv
*/
static s32 rtl8723bs_xmit_handler(struct adapter *padapter)
{
- struct xmit_priv *pxmitpriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
s32 ret;
-
- pxmitpriv = &padapter->xmitpriv;
-
if (wait_for_completion_interruptible(&pxmitpriv->SdioXmitStart)) {
netdev_emerg(padapter->pnetdev, "%s: SdioXmitStart fail!\n",
__func__);
@@ -408,13 +402,9 @@ next:
int rtl8723bs_xmit_thread(void *context)
{
- s32 ret;
- struct adapter *padapter;
- struct xmit_priv *pxmitpriv;
-
- ret = _SUCCESS;
- padapter = context;
- pxmitpriv = &padapter->xmitpriv;
+ s32 ret = _SUCCESS;
+ struct adapter *padapter = context;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
allow_signal(SIGTERM);
@@ -435,16 +425,13 @@ s32 rtl8723bs_mgnt_xmit(
)
{
s32 ret = _SUCCESS;
- struct pkt_attrib *pattrib;
- struct xmit_buf *pxmitbuf;
+ struct pkt_attrib *pattrib = &pmgntframe->attrib;
+ struct xmit_buf *pxmitbuf = pmgntframe->pxmitbuf;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
u8 *pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
u8 txdesc_size = TXDESC_SIZE;
- pattrib = &pmgntframe->attrib;
- pxmitbuf = pmgntframe->pxmitbuf;
-
rtl8723b_update_txdesc(pmgntframe, pmgntframe->buf_addr);
pxmitbuf->len = txdesc_size + pattrib->last_txcmdsz;
@@ -557,15 +544,13 @@ s32 rtl8723bs_init_xmit_priv(struct adapter *padapter)
void rtl8723bs_free_xmit_priv(struct adapter *padapter)
{
- struct xmit_priv *pxmitpriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct xmit_buf *pxmitbuf;
- struct __queue *pqueue;
+ struct __queue *pqueue = &pxmitpriv->pending_xmitbuf_queue;
struct list_head *plist, *phead;
struct list_head tmplist;
- pxmitpriv = &padapter->xmitpriv;
- pqueue = &pxmitpriv->pending_xmitbuf_queue;
phead = get_list_head(pqueue);
INIT_LIST_HEAD(&tmplist);
diff --git a/drivers/staging/rtl8723bs/include/hal_pwr_seq.h b/drivers/staging/rtl8723bs/include/hal_pwr_seq.h
index b93d74a5b9a5..48bf7f66a06e 100644
--- a/drivers/staging/rtl8723bs/include/hal_pwr_seq.h
+++ b/drivers/staging/rtl8723bs/include/hal_pwr_seq.h
@@ -209,7 +209,7 @@
#define RTL8723B_TRANS_END \
/* format */ \
/* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, 0, PWR_CMD_END, 0, 0},
+ {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, 0, PWR_CMD_END, 0, 0},
extern struct wlan_pwr_cfg rtl8723B_power_on_flow[RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS+RTL8723B_TRANS_END_STEPS];
diff --git a/drivers/staging/rtl8723bs/include/sta_info.h b/drivers/staging/rtl8723bs/include/sta_info.h
index b3535fed3de7..63343998266a 100644
--- a/drivers/staging/rtl8723bs/include/sta_info.h
+++ b/drivers/staging/rtl8723bs/include/sta_info.h
@@ -86,7 +86,7 @@ struct sta_info {
uint qos_option;
u8 hwaddr[ETH_ALEN];
- uint ieee8021x_blocked; /* 0: allowed, 1:blocked */
+ bool ieee8021x_blocked;
uint dot118021XPrivacy; /* aes, tkip... */
union Keytype dot11tkiptxmickey;
union Keytype dot11tkiprxmickey;
diff --git a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
index 1904e82a24b5..fd4ae870a617 100644
--- a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
@@ -9,7 +9,7 @@
static void _dynamic_check_timer_handler(struct timer_list *t)
{
struct adapter *adapter =
- from_timer(adapter, t, mlmepriv.dynamic_chk_timer);
+ timer_container_of(adapter, t, mlmepriv.dynamic_chk_timer);
rtw_dynamic_check_timer_handler(adapter);
@@ -19,7 +19,7 @@ static void _dynamic_check_timer_handler(struct timer_list *t)
static void _rtw_set_scan_deny_timer_hdl(struct timer_list *t)
{
struct adapter *adapter =
- from_timer(adapter, t, mlmepriv.set_scan_deny_timer);
+ timer_container_of(adapter, t, mlmepriv.set_scan_deny_timer);
rtw_clear_scan_deny(adapter);
}
diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
index ca808ded61ac..98d3e4777210 100644
--- a/drivers/staging/rtl8723bs/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
@@ -45,9 +45,8 @@ void rtw_os_recv_resource_free(struct recv_priv *precvpriv)
/* free os related resource in struct recv_buf */
void rtw_os_recvbuf_resource_free(struct adapter *padapter, struct recv_buf *precvbuf)
{
- if (precvbuf->pskb) {
+ if (precvbuf->pskb)
dev_kfree_skb_any(precvbuf->pskb);
- }
}
struct sk_buff *rtw_os_alloc_msdu_pkt(union recv_frame *prframe, u16 nSubframe_Length, u8 *pdata)
@@ -160,21 +159,19 @@ void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
}
}
- if (bgroup) {
+ if (bgroup)
key_type |= NL80211_KEYTYPE_GROUP;
- } else {
+ else
key_type |= NL80211_KEYTYPE_PAIRWISE;
- }
cfg80211_michael_mic_failure(padapter->pnetdev, (u8 *)&pmlmepriv->assoc_bssid[0], key_type, -1,
NULL, GFP_ATOMIC);
memset(&ev, 0x00, sizeof(ev));
- if (bgroup) {
+ if (bgroup)
ev.flags |= IW_MICFAILURE_GROUP;
- } else {
+ else
ev.flags |= IW_MICFAILURE_PAIRWISE;
- }
ev.src_addr.sa_family = ARPHRD_ETHER;
memcpy(ev.src_addr.sa_data, &pmlmepriv->assoc_bssid[0], ETH_ALEN);
diff --git a/drivers/staging/sm750fb/Makefile b/drivers/staging/sm750fb/Makefile
index b89aa4c12e9d..f7e227df0e54 100644
--- a/drivers/staging/sm750fb/Makefile
+++ b/drivers/staging/sm750fb/Makefile
@@ -3,5 +3,4 @@ obj-$(CONFIG_FB_SM750) += sm750fb.o
sm750fb-objs := sm750.o sm750_hw.o sm750_accel.o sm750_cursor.o \
ddk750_chip.o ddk750_power.o ddk750_mode.o \
- ddk750_display.o ddk750_swi2c.o ddk750_sii164.o \
- ddk750_dvi.o ddk750_hwi2c.o
+ ddk750_display.o ddk750_swi2c.o
diff --git a/drivers/staging/sm750fb/TODO b/drivers/staging/sm750fb/TODO
index 9dd57c566257..7ce632d040b3 100644
--- a/drivers/staging/sm750fb/TODO
+++ b/drivers/staging/sm750fb/TODO
@@ -3,9 +3,6 @@ TODO:
- use kernel coding style
- refine the code and remove unused code
- Implement hardware acceleration for imageblit if image->depth > 1
-- check on hardware effects of removal of USE_HW_I2C and USE_DVICHIP (these two
- are supposed to be sample code which is given here if someone wants to
- use those functionalities)
- must be ported to the atomic kms framework in the drm subsystem (which will
give you a basic fbdev driver for free)
diff --git a/drivers/staging/sm750fb/ddk750.h b/drivers/staging/sm750fb/ddk750.h
index 64ef4d258a91..8a32f8cf3a98 100644
--- a/drivers/staging/sm750fb/ddk750.h
+++ b/drivers/staging/sm750fb/ddk750.h
@@ -14,8 +14,5 @@
#include "ddk750_chip.h"
#include "ddk750_display.h"
#include "ddk750_power.h"
-#ifdef USE_HW_I2C
-#include "ddk750_hwi2c.h"
-#endif
#include "ddk750_swi2c.h"
#endif
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index 172624ff98b0..4e390541a507 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -3,7 +3,6 @@
#include "ddk750_chip.h"
#include "ddk750_display.h"
#include "ddk750_power.h"
-#include "ddk750_dvi.h"
static void set_display_control(int ctrl, int disp_state)
{
diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c
deleted file mode 100644
index 8b81e8642f9e..000000000000
--- a/drivers/staging/sm750fb/ddk750_dvi.c
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define USE_DVICHIP
-#ifdef USE_DVICHIP
-#include "ddk750_chip.h"
-#include "ddk750_reg.h"
-#include "ddk750_dvi.h"
-#include "ddk750_sii164.h"
-
-/*
- * This global variable contains all the supported driver and its corresponding
- * function API. Please set the function pointer to NULL whenever the function
- * is not supported.
- */
-static struct dvi_ctrl_device dcft_supported_dvi_controller[] = {
-#ifdef DVI_CTRL_SII164
- {
- .init = sii164_init_chip,
- .get_vendor_id = sii164_get_vendor_id,
- .get_device_id = sii164GetDeviceID,
-#ifdef SII164_FULL_FUNCTIONS
- .reset_chip = sii164ResetChip,
- .get_chip_string = sii164GetChipString,
- .set_power = sii164SetPower,
- .enable_hot_plug_detection = sii164EnableHotPlugDetection,
- .is_connected = sii164IsConnected,
- .check_interrupt = sii164CheckInterrupt,
- .clear_interrupt = sii164ClearInterrupt,
-#endif
- },
-#endif
-};
-
-int dvi_init(unsigned char edge_select,
- unsigned char bus_select,
- unsigned char dual_edge_clk_select,
- unsigned char hsync_enable,
- unsigned char vsync_enable,
- unsigned char deskew_enable,
- unsigned char deskew_setting,
- unsigned char continuous_sync_enable,
- unsigned char pll_filter_enable,
- unsigned char pll_filter_value)
-{
- struct dvi_ctrl_device *current_dvi_ctrl;
-
- current_dvi_ctrl = dcft_supported_dvi_controller;
- if (current_dvi_ctrl->init) {
- return current_dvi_ctrl->init(edge_select,
- bus_select,
- dual_edge_clk_select,
- hsync_enable,
- vsync_enable,
- deskew_enable,
- deskew_setting,
- continuous_sync_enable,
- pll_filter_enable,
- pll_filter_value);
- }
- return -1; /* error */
-}
-
-#endif
diff --git a/drivers/staging/sm750fb/ddk750_dvi.h b/drivers/staging/sm750fb/ddk750_dvi.h
deleted file mode 100644
index c2518b73bdbd..000000000000
--- a/drivers/staging/sm750fb/ddk750_dvi.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef DDK750_DVI_H__
-#define DDK750_DVI_H__
-
-/* dvi chip stuffs structros */
-
-typedef long (*PFN_DVICTRL_INIT)(unsigned char edge_select,
- unsigned char bus_select,
- unsigned char dual_edge_clk_select,
- unsigned char hsync_enable,
- unsigned char vsync_enable,
- unsigned char deskew_enable,
- unsigned char deskew_setting,
- unsigned char continuous_sync_enable,
- unsigned char pll_filter_enable,
- unsigned char pll_filter_value);
-
-typedef void (*PFN_DVICTRL_RESETCHIP)(void);
-typedef char* (*PFN_DVICTRL_GETCHIPSTRING)(void);
-typedef unsigned short (*PFN_DVICTRL_GETVENDORID)(void);
-typedef unsigned short (*PFN_DVICTRL_GETDEVICEID)(void);
-typedef void (*PFN_DVICTRL_SETPOWER)(unsigned char power_up);
-typedef void (*PFN_DVICTRL_HOTPLUGDETECTION)(unsigned char enable_hot_plug);
-typedef unsigned char (*PFN_DVICTRL_ISCONNECTED)(void);
-typedef unsigned char (*PFN_DVICTRL_CHECKINTERRUPT)(void);
-typedef void (*PFN_DVICTRL_CLEARINTERRUPT)(void);
-
-/* Structure to hold all the function pointer to the DVI Controller. */
-struct dvi_ctrl_device {
- PFN_DVICTRL_INIT init;
- PFN_DVICTRL_RESETCHIP reset_chip;
- PFN_DVICTRL_GETCHIPSTRING get_chip_string;
- PFN_DVICTRL_GETVENDORID get_vendor_id;
- PFN_DVICTRL_GETDEVICEID get_device_id;
- PFN_DVICTRL_SETPOWER set_power;
- PFN_DVICTRL_HOTPLUGDETECTION enable_hot_plug_detection;
- PFN_DVICTRL_ISCONNECTED is_connected;
- PFN_DVICTRL_CHECKINTERRUPT check_interrupt;
- PFN_DVICTRL_CLEARINTERRUPT clear_interrupt;
-};
-
-#define DVI_CTRL_SII164
-
-/* dvi functions prototype */
-int dvi_init(unsigned char edge_select,
- unsigned char bus_select,
- unsigned char dual_edge_clk_select,
- unsigned char hsync_enable,
- unsigned char vsync_enable,
- unsigned char deskew_enable,
- unsigned char deskew_setting,
- unsigned char continuous_sync_enable,
- unsigned char pll_filter_enable,
- unsigned char pll_filter_value);
-
-#endif
-
diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.c b/drivers/staging/sm750fb/ddk750_hwi2c.c
deleted file mode 100644
index 8482689b665b..000000000000
--- a/drivers/staging/sm750fb/ddk750_hwi2c.c
+++ /dev/null
@@ -1,247 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define USE_HW_I2C
-#ifdef USE_HW_I2C
-#include "ddk750_chip.h"
-#include "ddk750_reg.h"
-#include "ddk750_hwi2c.h"
-#include "ddk750_power.h"
-
-#define MAX_HWI2C_FIFO 16
-#define HWI2C_WAIT_TIMEOUT 0xF0000
-
-int sm750_hw_i2c_init(unsigned char bus_speed_mode)
-{
- unsigned int value;
-
- /* Enable GPIO 30 & 31 as IIC clock & data */
- value = peek32(GPIO_MUX);
-
- value |= (GPIO_MUX_30 | GPIO_MUX_31);
- poke32(GPIO_MUX, value);
-
- /*
- * Enable Hardware I2C power.
- * TODO: Check if we need to enable GPIO power?
- */
- sm750_enable_i2c(1);
-
- /* Enable the I2C Controller and set the bus speed mode */
- value = peek32(I2C_CTRL) & ~(I2C_CTRL_MODE | I2C_CTRL_EN);
- if (bus_speed_mode)
- value |= I2C_CTRL_MODE;
- value |= I2C_CTRL_EN;
- poke32(I2C_CTRL, value);
-
- return 0;
-}
-
-void sm750_hw_i2c_close(void)
-{
- unsigned int value;
-
- /* Disable I2C controller */
- value = peek32(I2C_CTRL) & ~I2C_CTRL_EN;
- poke32(I2C_CTRL, value);
-
- /* Disable I2C Power */
- sm750_enable_i2c(0);
-
- /* Set GPIO 30 & 31 back as GPIO pins */
- value = peek32(GPIO_MUX);
- value &= ~GPIO_MUX_30;
- value &= ~GPIO_MUX_31;
- poke32(GPIO_MUX, value);
-}
-
-static long hw_i2c_wait_tx_done(void)
-{
- unsigned int timeout;
-
- /* Wait until the transfer is completed. */
- timeout = HWI2C_WAIT_TIMEOUT;
- while (!(peek32(I2C_STATUS) & I2C_STATUS_TX) && (timeout != 0))
- timeout--;
-
- if (timeout == 0)
- return -1;
-
- return 0;
-}
-
-/*
- * This function writes data to the i2c slave device registers.
- *
- * Parameters:
- * addr - i2c Slave device address
- * length - Total number of bytes to be written to the device
- * buf - The buffer that contains the data to be written to the
- * i2c device.
- *
- * Return Value:
- * Total number of bytes those are actually written.
- */
-static unsigned int hw_i2c_write_data(unsigned char addr,
- unsigned int length,
- unsigned char *buf)
-{
- unsigned char count, i;
- unsigned int total_bytes = 0;
-
- /* Set the Device Address */
- poke32(I2C_SLAVE_ADDRESS, addr & ~0x01);
-
- /*
- * Write data.
- * Note:
- * Only 16 byte can be accessed per i2c start instruction.
- */
- do {
- /*
- * Reset I2C by writing 0 to I2C_RESET register to
- * clear the previous status.
- */
- poke32(I2C_RESET, 0);
-
- /* Set the number of bytes to be written */
- if (length < MAX_HWI2C_FIFO)
- count = length - 1;
- else
- count = MAX_HWI2C_FIFO - 1;
- poke32(I2C_BYTE_COUNT, count);
-
- /* Move the data to the I2C data register */
- for (i = 0; i <= count; i++)
- poke32(I2C_DATA0 + i, *buf++);
-
- /* Start the I2C */
- poke32(I2C_CTRL, peek32(I2C_CTRL) | I2C_CTRL_CTRL);
-
- /* Wait until the transfer is completed. */
- if (hw_i2c_wait_tx_done() != 0)
- break;
-
- /* Subtract length */
- length -= (count + 1);
-
- /* Total byte written */
- total_bytes += (count + 1);
-
- } while (length > 0);
-
- return total_bytes;
-}
-
-/*
- * This function reads data from the slave device and stores them
- * in the given buffer
- *
- * Parameters:
- * addr - i2c Slave device address
- * length - Total number of bytes to be read
- * buf - Pointer to a buffer to be filled with the data read
- * from the slave device. It has to be the same size as the
- * length to make sure that it can keep all the data read.
- *
- * Return Value:
- * Total number of actual bytes read from the slave device
- */
-static unsigned int hw_i2c_read_data(unsigned char addr,
- unsigned int length,
- unsigned char *buf)
-{
- unsigned char count, i;
- unsigned int total_bytes = 0;
-
- /* Set the Device Address */
- poke32(I2C_SLAVE_ADDRESS, addr | 0x01);
-
- /*
- * Read data and save them to the buffer.
- * Note:
- * Only 16 byte can be accessed per i2c start instruction.
- */
- do {
- /*
- * Reset I2C by writing 0 to I2C_RESET register to
- * clear all the status.
- */
- poke32(I2C_RESET, 0);
-
- /* Set the number of bytes to be read */
- if (length <= MAX_HWI2C_FIFO)
- count = length - 1;
- else
- count = MAX_HWI2C_FIFO - 1;
- poke32(I2C_BYTE_COUNT, count);
-
- /* Start the I2C */
- poke32(I2C_CTRL, peek32(I2C_CTRL) | I2C_CTRL_CTRL);
-
- /* Wait until transaction done. */
- if (hw_i2c_wait_tx_done() != 0)
- break;
-
- /* Save the data to the given buffer */
- for (i = 0; i <= count; i++)
- *buf++ = peek32(I2C_DATA0 + i);
-
- /* Subtract length by 16 */
- length -= (count + 1);
-
- /* Number of bytes read. */
- total_bytes += (count + 1);
-
- } while (length > 0);
-
- return total_bytes;
-}
-
-/*
- * This function reads the slave device's register
- *
- * Parameters:
- * deviceAddress - i2c Slave device address which register
- * to be read from
- * registerIndex - Slave device's register to be read
- *
- * Return Value:
- * Register value
- */
-unsigned char sm750_hw_i2c_read_reg(unsigned char addr, unsigned char reg)
-{
- unsigned char value = 0xFF;
-
- if (hw_i2c_write_data(addr, 1, &reg) == 1)
- hw_i2c_read_data(addr, 1, &value);
-
- return value;
-}
-
-/*
- * This function writes a value to the slave device's register
- *
- * Parameters:
- * deviceAddress - i2c Slave device address which register
- * to be written
- * registerIndex - Slave device's register to be written
- * data - Data to be written to the register
- *
- * Result:
- * 0 - Success
- * -1 - Fail
- */
-int sm750_hw_i2c_write_reg(unsigned char addr,
- unsigned char reg,
- unsigned char data)
-{
- unsigned char value[2];
-
- value[0] = reg;
- value[1] = data;
- if (hw_i2c_write_data(addr, 2, value) == 2)
- return 0;
-
- return -1;
-}
-
-#endif
diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.h b/drivers/staging/sm750fb/ddk750_hwi2c.h
deleted file mode 100644
index 337c6493ca61..000000000000
--- a/drivers/staging/sm750fb/ddk750_hwi2c.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef DDK750_HWI2C_H__
-#define DDK750_HWI2C_H__
-
-/* hwi2c functions */
-int sm750_hw_i2c_init(unsigned char bus_speed_mode);
-void sm750_hw_i2c_close(void);
-
-unsigned char sm750_hw_i2c_read_reg(unsigned char addr, unsigned char reg);
-int sm750_hw_i2c_write_reg(unsigned char addr, unsigned char reg,
- unsigned char data);
-#endif
diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h
index 63c9e8b6ffb3..5cbb11986bb8 100644
--- a/drivers/staging/sm750fb/ddk750_power.h
+++ b/drivers/staging/sm750fb/ddk750_power.h
@@ -3,10 +3,10 @@
#define DDK750_POWER_H__
enum dpms {
- crtDPMS_ON = 0x0,
- crtDPMS_STANDBY = 0x1,
- crtDPMS_SUSPEND = 0x2,
- crtDPMS_OFF = 0x3,
+ CRT_DPMS_ON = 0x0,
+ CRT_DPMS_STANDBY = 0x1,
+ CRT_DPMS_SUSPEND = 0x2,
+ CRT_DPMS_OFF = 0x3,
};
#define set_DAC(off) { \
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
deleted file mode 100644
index 2532b60245ac..000000000000
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ /dev/null
@@ -1,408 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define USE_DVICHIP
-#ifdef USE_DVICHIP
-
-#include "ddk750_sii164.h"
-#include "ddk750_hwi2c.h"
-
-/* I2C Address of each SII164 chip */
-#define SII164_I2C_ADDRESS 0x70
-
-/* Define this definition to use hardware i2c. */
-#define USE_HW_I2C
-
-#ifdef USE_HW_I2C
- #define i2cWriteReg sm750_hw_i2c_write_reg
- #define i2cReadReg sm750_hw_i2c_read_reg
-#else
- #define i2cWriteReg sm750_sw_i2c_write_reg
- #define i2cReadReg sm750_sw_i2c_read_reg
-#endif
-
-/* SII164 Vendor and Device ID */
-#define SII164_VENDOR_ID 0x0001
-#define SII164_DEVICE_ID 0x0006
-
-#ifdef SII164_FULL_FUNCTIONS
-/* Name of the DVI Controller chip */
-static char *gDviCtrlChipName = "Silicon Image SiI 164";
-#endif
-
-/*
- * sii164_get_vendor_id
- * This function gets the vendor ID of the DVI controller chip.
- *
- * Output:
- * Vendor ID
- */
-unsigned short sii164_get_vendor_id(void)
-{
- unsigned short vendorID;
-
- vendorID = ((unsigned short)i2cReadReg(SII164_I2C_ADDRESS,
- SII164_VENDOR_ID_HIGH) << 8) |
- (unsigned short)i2cReadReg(SII164_I2C_ADDRESS,
- SII164_VENDOR_ID_LOW);
-
- return vendorID;
-}
-
-/*
- * sii164GetDeviceID
- * This function gets the device ID of the DVI controller chip.
- *
- * Output:
- * Device ID
- */
-unsigned short sii164GetDeviceID(void)
-{
- unsigned short deviceID;
-
- deviceID = ((unsigned short)i2cReadReg(SII164_I2C_ADDRESS,
- SII164_DEVICE_ID_HIGH) << 8) |
- (unsigned short)i2cReadReg(SII164_I2C_ADDRESS,
- SII164_DEVICE_ID_LOW);
-
- return deviceID;
-}
-
-/*
- * DVI.C will handle all SiI164 chip stuffs and try its best to make code
- * minimal and useful
- */
-
-/*
- * sii164_init_chip
- * This function initialize and detect the DVI controller chip.
- *
- * Input:
- * edge_select - Edge Select:
- * 0 = Input data is falling edge latched (falling
- * edge latched first in dual edge mode)
- * 1 = Input data is rising edge latched (rising
- * edge latched first in dual edge mode)
- * bus_select - Input Bus Select:
- * 0 = Input data bus is 12-bits wide
- * 1 = Input data bus is 24-bits wide
- * dual_edge_clk_select - Dual Edge Clock Select
- * 0 = Input data is single edge latched
- * 1 = Input data is dual edge latched
- * hsync_enable - Horizontal Sync Enable:
- * 0 = HSYNC input is transmitted as fixed LOW
- * 1 = HSYNC input is transmitted as is
- * vsync_enable - Vertical Sync Enable:
- * 0 = VSYNC input is transmitted as fixed LOW
- * 1 = VSYNC input is transmitted as is
- * deskew_enable - De-skewing Enable:
- * 0 = De-skew disabled
- * 1 = De-skew enabled
- * deskew_setting - De-skewing Setting (increment of 260psec)
- * 0 = 1 step --> minimum setup / maximum hold
- * 1 = 2 step
- * 2 = 3 step
- * 3 = 4 step
- * 4 = 5 step
- * 5 = 6 step
- * 6 = 7 step
- * 7 = 8 step --> maximum setup / minimum hold
- * continuous_sync_enable- SYNC Continuous:
- * 0 = Disable
- * 1 = Enable
- * pll_filter_enable - PLL Filter Enable
- * 0 = Disable PLL Filter
- * 1 = Enable PLL Filter
- * pll_filter_value - PLL Filter characteristics:
- * 0~7 (recommended value is 4)
- *
- * Output:
- * 0 - Success
- * -1 - Fail.
- */
-long sii164_init_chip(unsigned char edge_select,
- unsigned char bus_select,
- unsigned char dual_edge_clk_select,
- unsigned char hsync_enable,
- unsigned char vsync_enable,
- unsigned char deskew_enable,
- unsigned char deskew_setting,
- unsigned char continuous_sync_enable,
- unsigned char pll_filter_enable,
- unsigned char pll_filter_value)
-{
- unsigned char config;
-
- /* Initialize the i2c bus */
-#ifdef USE_HW_I2C
- /* Use fast mode. */
- sm750_hw_i2c_init(1);
-#else
- sm750_sw_i2c_init(DEFAULT_I2C_SCL, DEFAULT_I2C_SDA);
-#endif
-
- /* Check if SII164 Chip exists */
- if ((sii164_get_vendor_id() == SII164_VENDOR_ID) &&
- (sii164GetDeviceID() == SII164_DEVICE_ID)) {
- /*
- * Initialize SII164 controller chip.
- */
-
- /* Select the edge */
- if (edge_select == 0)
- config = SII164_CONFIGURATION_LATCH_FALLING;
- else
- config = SII164_CONFIGURATION_LATCH_RISING;
-
- /* Select bus wide */
- if (bus_select == 0)
- config |= SII164_CONFIGURATION_BUS_12BITS;
- else
- config |= SII164_CONFIGURATION_BUS_24BITS;
-
- /* Select Dual/Single Edge Clock */
- if (dual_edge_clk_select == 0)
- config |= SII164_CONFIGURATION_CLOCK_SINGLE;
- else
- config |= SII164_CONFIGURATION_CLOCK_DUAL;
-
- /* Select HSync Enable */
- if (hsync_enable == 0)
- config |= SII164_CONFIGURATION_HSYNC_FORCE_LOW;
- else
- config |= SII164_CONFIGURATION_HSYNC_AS_IS;
-
- /* Select VSync Enable */
- if (vsync_enable == 0)
- config |= SII164_CONFIGURATION_VSYNC_FORCE_LOW;
- else
- config |= SII164_CONFIGURATION_VSYNC_AS_IS;
-
- i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
-
- /*
- * De-skew enabled with default 111b value.
- * This fixes some artifacts problem in some mode on board 2.2.
- * Somehow this fix does not affect board 2.1.
- */
- if (deskew_enable == 0)
- config = SII164_DESKEW_DISABLE;
- else
- config = SII164_DESKEW_ENABLE;
-
- switch (deskew_setting) {
- case 0:
- config |= SII164_DESKEW_1_STEP;
- break;
- case 1:
- config |= SII164_DESKEW_2_STEP;
- break;
- case 2:
- config |= SII164_DESKEW_3_STEP;
- break;
- case 3:
- config |= SII164_DESKEW_4_STEP;
- break;
- case 4:
- config |= SII164_DESKEW_5_STEP;
- break;
- case 5:
- config |= SII164_DESKEW_6_STEP;
- break;
- case 6:
- config |= SII164_DESKEW_7_STEP;
- break;
- case 7:
- config |= SII164_DESKEW_8_STEP;
- break;
- }
- i2cWriteReg(SII164_I2C_ADDRESS, SII164_DESKEW, config);
-
- /* Enable/Disable Continuous Sync. */
- if (continuous_sync_enable == 0)
- config = SII164_PLL_FILTER_SYNC_CONTINUOUS_DISABLE;
- else
- config = SII164_PLL_FILTER_SYNC_CONTINUOUS_ENABLE;
-
- /* Enable/Disable PLL Filter */
- if (pll_filter_enable == 0)
- config |= SII164_PLL_FILTER_DISABLE;
- else
- config |= SII164_PLL_FILTER_ENABLE;
-
- /* Set the PLL Filter value */
- config |= ((pll_filter_value & 0x07) << 1);
-
- i2cWriteReg(SII164_I2C_ADDRESS, SII164_PLL, config);
-
- /* Recover from Power Down and enable output. */
- config = i2cReadReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION);
- config |= SII164_CONFIGURATION_POWER_NORMAL;
- i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
-
- return 0;
- }
-
- /* Return -1 if initialization fails. */
- return -1;
-}
-
-/* below sii164 function is not necessary */
-
-#ifdef SII164_FULL_FUNCTIONS
-
-/*
- * sii164ResetChip
- * This function resets the DVI Controller Chip.
- */
-void sii164ResetChip(void)
-{
- /* Power down */
- sii164SetPower(0);
- sii164SetPower(1);
-}
-
-/*
- * sii164GetChipString
- * This function returns a char string name of the current DVI Controller
- * chip.
- *
- * It's convenient for application need to display the chip name.
- */
-char *sii164GetChipString(void)
-{
- return gDviCtrlChipName;
-}
-
-/*
- * sii164SetPower
- * This function sets the power configuration of the DVI Controller Chip.
- *
- * Input:
- * powerUp - Flag to set the power down or up
- */
-void sii164SetPower(unsigned char powerUp)
-{
- unsigned char config;
-
- config = i2cReadReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION);
- if (powerUp == 1) {
- /* Power up the chip */
- config &= ~SII164_CONFIGURATION_POWER_MASK;
- config |= SII164_CONFIGURATION_POWER_NORMAL;
- i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
- } else {
- /* Power down the chip */
- config &= ~SII164_CONFIGURATION_POWER_MASK;
- config |= SII164_CONFIGURATION_POWER_DOWN;
- i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
- }
-}
-
-/*
- * sii164SelectHotPlugDetectionMode
- * This function selects the mode of the hot plug detection.
- */
-static
-void sii164SelectHotPlugDetectionMode(enum sii164_hot_plug_mode hotPlugMode)
-{
- unsigned char detectReg;
-
- detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) &
- ~SII164_DETECT_MONITOR_SENSE_OUTPUT_FLAG;
- switch (hotPlugMode) {
- case SII164_HOTPLUG_DISABLE:
- detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_HIGH;
- break;
- case SII164_HOTPLUG_USE_MDI:
- detectReg &= ~SII164_DETECT_INTERRUPT_MASK;
- detectReg |= SII164_DETECT_INTERRUPT_BY_HTPLG_PIN;
- detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_MDI;
- break;
- case SII164_HOTPLUG_USE_RSEN:
- detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_RSEN;
- break;
- case SII164_HOTPLUG_USE_HTPLG:
- detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_HTPLG;
- break;
- }
-
- i2cWriteReg(SII164_I2C_ADDRESS, SII164_DETECT, detectReg);
-}
-
-/*
- * sii164EnableHotPlugDetection
- * This function enables the Hot Plug detection.
- *
- * enableHotPlug - Enable (=1) / disable (=0) Hot Plug detection
- */
-void sii164EnableHotPlugDetection(unsigned char enableHotPlug)
-{
- unsigned char detectReg;
-
- detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT);
-
- /* Depending on each DVI controller, need to enable the hot plug based
- * on each individual chip design.
- */
- if (enableHotPlug != 0)
- sii164SelectHotPlugDetectionMode(SII164_HOTPLUG_USE_MDI);
- else
- sii164SelectHotPlugDetectionMode(SII164_HOTPLUG_DISABLE);
-}
-
-/*
- * sii164IsConnected
- * Check if the DVI Monitor is connected.
- *
- * Output:
- * 0 - Not Connected
- * 1 - Connected
- */
-unsigned char sii164IsConnected(void)
-{
- unsigned char hotPlugValue;
-
- hotPlugValue = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) &
- SII164_DETECT_HOT_PLUG_STATUS_MASK;
- if (hotPlugValue == SII164_DETECT_HOT_PLUG_STATUS_ON)
- return 1;
- else
- return 0;
-}
-
-/*
- * sii164CheckInterrupt
- * Checks if interrupt has occurred.
- *
- * Output:
- * 0 - No interrupt
- * 1 - Interrupt occurs
- */
-unsigned char sii164CheckInterrupt(void)
-{
- unsigned char detectReg;
-
- detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) &
- SII164_DETECT_MONITOR_STATE_MASK;
- if (detectReg == SII164_DETECT_MONITOR_STATE_CHANGE)
- return 1;
- else
- return 0;
-}
-
-/*
- * sii164ClearInterrupt
- * Clear the hot plug interrupt.
- */
-void sii164ClearInterrupt(void)
-{
- unsigned char detectReg;
-
- /* Clear the MDI interrupt */
- detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT);
- i2cWriteReg(SII164_I2C_ADDRESS, SII164_DETECT,
- detectReg | SII164_DETECT_MONITOR_STATE_CLEAR);
-}
-
-#endif
-
-#endif
diff --git a/drivers/staging/sm750fb/ddk750_sii164.h b/drivers/staging/sm750fb/ddk750_sii164.h
deleted file mode 100644
index 71a7c1cb42c4..000000000000
--- a/drivers/staging/sm750fb/ddk750_sii164.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef DDK750_SII164_H__
-#define DDK750_SII164_H__
-
-#define USE_DVICHIP
-
-/* Hot Plug detection mode structure */
-enum sii164_hot_plug_mode {
- SII164_HOTPLUG_DISABLE = 0, /* Disable Hot Plug output bit
- * (always high).
- */
-
- SII164_HOTPLUG_USE_MDI, /* Use Monitor Detect Interrupt bit. */
- SII164_HOTPLUG_USE_RSEN, /* Use Receiver Sense detect bit. */
- SII164_HOTPLUG_USE_HTPLG /* Use Hot Plug detect bit. */
-};
-
-/* Silicon Image SiI164 chip prototype */
-long sii164_init_chip(unsigned char edgeSelect,
- unsigned char busSelect,
- unsigned char dualEdgeClkSelect,
- unsigned char hsyncEnable,
- unsigned char vsyncEnable,
- unsigned char deskewEnable,
- unsigned char deskewSetting,
- unsigned char continuousSyncEnable,
- unsigned char pllFilterEnable,
- unsigned char pllFilterValue);
-
-unsigned short sii164_get_vendor_id(void);
-unsigned short sii164GetDeviceID(void);
-
-#ifdef SII164_FULL_FUNCTIONS
-void sii164ResetChip(void);
-char *sii164GetChipString(void);
-void sii164SetPower(unsigned char powerUp);
-void sii164EnableHotPlugDetection(unsigned char enableHotPlug);
-unsigned char sii164IsConnected(void);
-unsigned char sii164CheckInterrupt(void);
-void sii164ClearInterrupt(void);
-#endif
-/*
- * below register definition is used for
- * Silicon Image SiI164 DVI controller chip
- */
-/*
- * Vendor ID registers
- */
-#define SII164_VENDOR_ID_LOW 0x00
-#define SII164_VENDOR_ID_HIGH 0x01
-
-/*
- * Device ID registers
- */
-#define SII164_DEVICE_ID_LOW 0x02
-#define SII164_DEVICE_ID_HIGH 0x03
-
-/*
- * Device Revision
- */
-#define SII164_DEVICE_REVISION 0x04
-
-/*
- * Frequency Limitation registers
- */
-#define SII164_FREQUENCY_LIMIT_LOW 0x06
-#define SII164_FREQUENCY_LIMIT_HIGH 0x07
-
-/*
- * Power Down and Input Signal Configuration registers
- */
-#define SII164_CONFIGURATION 0x08
-
-/* Power down (PD) */
-#define SII164_CONFIGURATION_POWER_DOWN 0x00
-#define SII164_CONFIGURATION_POWER_NORMAL 0x01
-#define SII164_CONFIGURATION_POWER_MASK 0x01
-
-/* Input Edge Latch Select (EDGE) */
-#define SII164_CONFIGURATION_LATCH_FALLING 0x00
-#define SII164_CONFIGURATION_LATCH_RISING 0x02
-
-/* Bus Select (BSEL) */
-#define SII164_CONFIGURATION_BUS_12BITS 0x00
-#define SII164_CONFIGURATION_BUS_24BITS 0x04
-
-/* Dual Edge Clock Select (DSEL) */
-#define SII164_CONFIGURATION_CLOCK_SINGLE 0x00
-#define SII164_CONFIGURATION_CLOCK_DUAL 0x08
-
-/* Horizontal Sync Enable (HEN) */
-#define SII164_CONFIGURATION_HSYNC_FORCE_LOW 0x00
-#define SII164_CONFIGURATION_HSYNC_AS_IS 0x10
-
-/* Vertical Sync Enable (VEN) */
-#define SII164_CONFIGURATION_VSYNC_FORCE_LOW 0x00
-#define SII164_CONFIGURATION_VSYNC_AS_IS 0x20
-
-/*
- * Detection registers
- */
-#define SII164_DETECT 0x09
-
-/* Monitor Detect Interrupt (MDI) */
-#define SII164_DETECT_MONITOR_STATE_CHANGE 0x00
-#define SII164_DETECT_MONITOR_STATE_NO_CHANGE 0x01
-#define SII164_DETECT_MONITOR_STATE_CLEAR 0x01
-#define SII164_DETECT_MONITOR_STATE_MASK 0x01
-
-/* Hot Plug detect Input (HTPLG) */
-#define SII164_DETECT_HOT_PLUG_STATUS_OFF 0x00
-#define SII164_DETECT_HOT_PLUG_STATUS_ON 0x02
-#define SII164_DETECT_HOT_PLUG_STATUS_MASK 0x02
-
-/* Receiver Sense (RSEN) */
-#define SII164_DETECT_RECEIVER_SENSE_NOT_DETECTED 0x00
-#define SII164_DETECT_RECEIVER_SENSE_DETECTED 0x04
-
-/* Interrupt Generation Method (TSEL) */
-#define SII164_DETECT_INTERRUPT_BY_RSEN_PIN 0x00
-#define SII164_DETECT_INTERRUPT_BY_HTPLG_PIN 0x08
-#define SII164_DETECT_INTERRUPT_MASK 0x08
-
-/* Monitor Sense Output (MSEN) */
-#define SII164_DETECT_MONITOR_SENSE_OUTPUT_HIGH 0x00
-#define SII164_DETECT_MONITOR_SENSE_OUTPUT_MDI 0x10
-#define SII164_DETECT_MONITOR_SENSE_OUTPUT_RSEN 0x20
-#define SII164_DETECT_MONITOR_SENSE_OUTPUT_HTPLG 0x30
-#define SII164_DETECT_MONITOR_SENSE_OUTPUT_FLAG 0x30
-
-/*
- * Skewing registers
- */
-#define SII164_DESKEW 0x0A
-
-/* General Purpose Input (CTL[3:1]) */
-#define SII164_DESKEW_GENERAL_PURPOSE_INPUT_MASK 0x0E
-
-/* De-skewing Enable bit (DKEN) */
-#define SII164_DESKEW_DISABLE 0x00
-#define SII164_DESKEW_ENABLE 0x10
-
-/* De-skewing Setting (DK[3:1])*/
-#define SII164_DESKEW_1_STEP 0x00
-#define SII164_DESKEW_2_STEP 0x20
-#define SII164_DESKEW_3_STEP 0x40
-#define SII164_DESKEW_4_STEP 0x60
-#define SII164_DESKEW_5_STEP 0x80
-#define SII164_DESKEW_6_STEP 0xA0
-#define SII164_DESKEW_7_STEP 0xC0
-#define SII164_DESKEW_8_STEP 0xE0
-
-/*
- * User Configuration Data registers (CFG 7:0)
- */
-#define SII164_USER_CONFIGURATION 0x0B
-
-/*
- * PLL registers
- */
-#define SII164_PLL 0x0C
-
-/* PLL Filter Value (PLLF) */
-#define SII164_PLL_FILTER_VALUE_MASK 0x0E
-
-/* PLL Filter Enable (PFEN) */
-#define SII164_PLL_FILTER_DISABLE 0x00
-#define SII164_PLL_FILTER_ENABLE 0x01
-
-/* Sync Continuous (SCNT) */
-#define SII164_PLL_FILTER_SYNC_CONTINUOUS_DISABLE 0x00
-#define SII164_PLL_FILTER_SYNC_CONTINUOUS_ENABLE 0x80
-
-#endif
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 04c1b32a22c5..1d929aca399c 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -120,12 +120,12 @@ static int lynxfb_ops_cursor(struct fb_info *info, struct fb_cursor *fbcursor)
sm750_hw_cursor_disable(cursor);
if (fbcursor->set & FB_CUR_SETSIZE)
- sm750_hw_cursor_setSize(cursor,
+ sm750_hw_cursor_set_size(cursor,
fbcursor->image.width,
fbcursor->image.height);
if (fbcursor->set & FB_CUR_SETPOS)
- sm750_hw_cursor_setPos(cursor,
+ sm750_hw_cursor_set_pos(cursor,
fbcursor->image.dx - info->var.xoffset,
fbcursor->image.dy - info->var.yoffset);
@@ -141,14 +141,12 @@ static int lynxfb_ops_cursor(struct fb_info *info, struct fb_cursor *fbcursor)
((info->cmap.green[fbcursor->image.bg_color] & 0xfc00) >> 5) |
((info->cmap.blue[fbcursor->image.bg_color] & 0xf800) >> 11);
- sm750_hw_cursor_setColor(cursor, fg, bg);
+ sm750_hw_cursor_set_color(cursor, fg, bg);
}
if (fbcursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) {
- sm750_hw_cursor_setData(cursor,
- fbcursor->rop,
- fbcursor->image.data,
- fbcursor->mask);
+ sm750_hw_cursor_set_data(cursor, fbcursor->rop, fbcursor->image.data,
+ fbcursor->mask);
}
if (fbcursor->enable)
@@ -394,9 +392,9 @@ static int lynxfb_ops_set_par(struct fb_info *info)
pr_err("bpp %d not supported\n", var->bits_per_pixel);
return ret;
}
- ret = hw_sm750_crtc_setMode(crtc, var, fix);
+ ret = hw_sm750_crtc_set_mode(crtc, var, fix);
if (!ret)
- ret = hw_sm750_output_setMode(output, var, fix);
+ ret = hw_sm750_output_set_mode(output, var, fix);
return ret;
}
@@ -514,7 +512,7 @@ static int lynxfb_ops_check_var(struct fb_var_screeninfo *var,
return -ENOMEM;
}
- return hw_sm750_crtc_checkMode(crtc, var);
+ return hw_sm750_crtc_check_mode(crtc, var);
}
static int lynxfb_ops_setcolreg(unsigned int regno,
@@ -547,7 +545,7 @@ static int lynxfb_ops_setcolreg(unsigned int regno,
red >>= 8;
green >>= 8;
blue >>= 8;
- ret = hw_sm750_setColReg(crtc, regno, red, green, blue);
+ ret = hw_sm750_set_col_reg(crtc, regno, red, green, blue);
goto exit;
}
@@ -608,10 +606,10 @@ static int sm750fb_set_drv(struct lynxfb_par *par)
crtc->ywrapstep = 0;
output->proc_setBLANK = (sm750_dev->revid == SM750LE_REVISION_ID) ?
- hw_sm750le_setBLANK : hw_sm750_setBLANK;
+ hw_sm750le_set_blank : hw_sm750_set_blank;
/* chip specific phase */
sm750_dev->accel.de_wait = (sm750_dev->revid == SM750LE_REVISION_ID) ?
- hw_sm750le_deWait : hw_sm750_deWait;
+ hw_sm750le_de_wait : hw_sm750_de_wait;
switch (sm750_dev->dataflow) {
case sm750_simul_pri:
output->paths = sm750_pnc;
diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h
index aff69661c8e6..9cf8b3d30aac 100644
--- a/drivers/staging/sm750fb/sm750.h
+++ b/drivers/staging/sm750fb/sm750.h
@@ -193,26 +193,26 @@ static inline unsigned long ps_to_hz(unsigned int psvalue)
int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev);
int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev);
-void hw_sm750_initAccel(struct sm750_dev *sm750_dev);
-int hw_sm750_deWait(void);
-int hw_sm750le_deWait(void);
+void hw_sm750_init_accel(struct sm750_dev *sm750_dev);
+int hw_sm750_de_wait(void);
+int hw_sm750le_de_wait(void);
-int hw_sm750_output_setMode(struct lynxfb_output *output,
- struct fb_var_screeninfo *var,
- struct fb_fix_screeninfo *fix);
+int hw_sm750_output_set_mode(struct lynxfb_output *output,
+ struct fb_var_screeninfo *var,
+ struct fb_fix_screeninfo *fix);
-int hw_sm750_crtc_checkMode(struct lynxfb_crtc *crtc,
- struct fb_var_screeninfo *var);
+int hw_sm750_crtc_check_mode(struct lynxfb_crtc *crtc,
+ struct fb_var_screeninfo *var);
-int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
- struct fb_var_screeninfo *var,
- struct fb_fix_screeninfo *fix);
+int hw_sm750_crtc_set_mode(struct lynxfb_crtc *crtc,
+ struct fb_var_screeninfo *var,
+ struct fb_fix_screeninfo *fix);
-int hw_sm750_setColReg(struct lynxfb_crtc *crtc, ushort index,
- ushort red, ushort green, ushort blue);
+int hw_sm750_set_col_reg(struct lynxfb_crtc *crtc, ushort index,
+ ushort red, ushort green, ushort blue);
-int hw_sm750_setBLANK(struct lynxfb_output *output, int blank);
-int hw_sm750le_setBLANK(struct lynxfb_output *output, int blank);
+int hw_sm750_set_blank(struct lynxfb_output *output, int blank);
+int hw_sm750le_set_blank(struct lynxfb_output *output, int blank);
int hw_sm750_pan_display(struct lynxfb_crtc *crtc,
const struct fb_var_screeninfo *var,
const struct fb_info *info);
diff --git a/drivers/staging/sm750fb/sm750_cursor.c b/drivers/staging/sm750fb/sm750_cursor.c
index eea4d1bd36ce..7ede144905c9 100644
--- a/drivers/staging/sm750fb/sm750_cursor.c
+++ b/drivers/staging/sm750fb/sm750_cursor.c
@@ -57,13 +57,13 @@ void sm750_hw_cursor_disable(struct lynx_cursor *cursor)
poke32(HWC_ADDRESS, 0);
}
-void sm750_hw_cursor_setSize(struct lynx_cursor *cursor, int w, int h)
+void sm750_hw_cursor_set_size(struct lynx_cursor *cursor, int w, int h)
{
cursor->w = w;
cursor->h = h;
}
-void sm750_hw_cursor_setPos(struct lynx_cursor *cursor, int x, int y)
+void sm750_hw_cursor_set_pos(struct lynx_cursor *cursor, int x, int y)
{
u32 reg;
@@ -72,7 +72,7 @@ void sm750_hw_cursor_setPos(struct lynx_cursor *cursor, int x, int y)
poke32(HWC_LOCATION, reg);
}
-void sm750_hw_cursor_setColor(struct lynx_cursor *cursor, u32 fg, u32 bg)
+void sm750_hw_cursor_set_color(struct lynx_cursor *cursor, u32 fg, u32 bg)
{
u32 reg = (fg << HWC_COLOR_12_2_RGB565_SHIFT) &
HWC_COLOR_12_2_RGB565_MASK;
@@ -81,8 +81,8 @@ void sm750_hw_cursor_setColor(struct lynx_cursor *cursor, u32 fg, u32 bg)
poke32(HWC_COLOR_3, 0xffe0);
}
-void sm750_hw_cursor_setData(struct lynx_cursor *cursor, u16 rop,
- const u8 *pcol, const u8 *pmsk)
+void sm750_hw_cursor_set_data(struct lynx_cursor *cursor, u16 rop,
+ const u8 *pcol, const u8 *pmsk)
{
int i, j, count, pitch, offset;
u8 color, mask, opr;
@@ -131,8 +131,8 @@ void sm750_hw_cursor_setData(struct lynx_cursor *cursor, u16 rop,
}
}
-void sm750_hw_cursor_setData2(struct lynx_cursor *cursor, u16 rop,
- const u8 *pcol, const u8 *pmsk)
+void sm750_hw_cursor_set_data2(struct lynx_cursor *cursor, u16 rop,
+ const u8 *pcol, const u8 *pmsk)
{
int i, j, count, pitch, offset;
u8 color, mask;
diff --git a/drivers/staging/sm750fb/sm750_cursor.h b/drivers/staging/sm750fb/sm750_cursor.h
index b59643dd61ed..88fa02f6377a 100644
--- a/drivers/staging/sm750fb/sm750_cursor.h
+++ b/drivers/staging/sm750fb/sm750_cursor.h
@@ -5,11 +5,11 @@
/* hw_cursor_xxx works for voyager,718 and 750 */
void sm750_hw_cursor_enable(struct lynx_cursor *cursor);
void sm750_hw_cursor_disable(struct lynx_cursor *cursor);
-void sm750_hw_cursor_setSize(struct lynx_cursor *cursor, int w, int h);
-void sm750_hw_cursor_setPos(struct lynx_cursor *cursor, int x, int y);
-void sm750_hw_cursor_setColor(struct lynx_cursor *cursor, u32 fg, u32 bg);
-void sm750_hw_cursor_setData(struct lynx_cursor *cursor, u16 rop,
- const u8 *data, const u8 *mask);
-void sm750_hw_cursor_setData2(struct lynx_cursor *cursor, u16 rop,
+void sm750_hw_cursor_set_size(struct lynx_cursor *cursor, int w, int h);
+void sm750_hw_cursor_set_pos(struct lynx_cursor *cursor, int x, int y);
+void sm750_hw_cursor_set_color(struct lynx_cursor *cursor, u32 fg, u32 bg);
+void sm750_hw_cursor_set_data(struct lynx_cursor *cursor, u16 rop,
const u8 *data, const u8 *mask);
+void sm750_hw_cursor_set_data2(struct lynx_cursor *cursor, u16 rop,
+ const u8 *data, const u8 *mask);
#endif
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index 4bc89218c11c..7119b67efe11 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -55,9 +55,8 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
pr_err("mmio failed\n");
ret = -EFAULT;
goto exit;
- } else {
- pr_info("mmio virtual addr = %p\n", sm750_dev->pvReg);
}
+ pr_info("mmio virtual addr = %p\n", sm750_dev->pvReg);
sm750_dev->accel.dprBase = sm750_dev->pvReg + DE_BASE_ADDR_TYPE1;
sm750_dev->accel.dpPortBase = sm750_dev->pvReg + DE_PORT_ADDR_TYPE1;
@@ -84,9 +83,8 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
pr_err("Map video memory failed\n");
ret = -EFAULT;
goto exit;
- } else {
- pr_info("video memory vaddr = %p\n", sm750_dev->pvMem);
}
+ pr_info("video memory vaddr = %p\n", sm750_dev->pvMem);
exit:
return ret;
}
@@ -175,14 +173,14 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
/* init 2d engine */
if (!sm750_dev->accel_off)
- hw_sm750_initAccel(sm750_dev);
+ hw_sm750_init_accel(sm750_dev);
return 0;
}
-int hw_sm750_output_setMode(struct lynxfb_output *output,
- struct fb_var_screeninfo *var,
- struct fb_fix_screeninfo *fix)
+int hw_sm750_output_set_mode(struct lynxfb_output *output,
+ struct fb_var_screeninfo *var,
+ struct fb_fix_screeninfo *fix)
{
int ret;
enum disp_output disp_set;
@@ -221,8 +219,8 @@ int hw_sm750_output_setMode(struct lynxfb_output *output,
return ret;
}
-int hw_sm750_crtc_checkMode(struct lynxfb_crtc *crtc,
- struct fb_var_screeninfo *var)
+int hw_sm750_crtc_check_mode(struct lynxfb_crtc *crtc,
+ struct fb_var_screeninfo *var)
{
struct sm750_dev *sm750_dev;
struct lynxfb_par *par = container_of(crtc, struct lynxfb_par, crtc);
@@ -247,9 +245,9 @@ int hw_sm750_crtc_checkMode(struct lynxfb_crtc *crtc,
}
/* set the controller's mode for @crtc charged with @var and @fix parameters */
-int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
- struct fb_var_screeninfo *var,
- struct fb_fix_screeninfo *fix)
+int hw_sm750_crtc_set_mode(struct lynxfb_crtc *crtc,
+ struct fb_var_screeninfo *var,
+ struct fb_fix_screeninfo *fix)
{
int ret, fmt;
u32 reg;
@@ -372,8 +370,8 @@ exit:
return ret;
}
-int hw_sm750_setColReg(struct lynxfb_crtc *crtc, ushort index, ushort red,
- ushort green, ushort blue)
+int hw_sm750_set_col_reg(struct lynxfb_crtc *crtc, ushort index, ushort red,
+ ushort green, ushort blue)
{
static unsigned int add[] = { PANEL_PALETTE_RAM, CRT_PALETTE_RAM };
@@ -382,7 +380,7 @@ int hw_sm750_setColReg(struct lynxfb_crtc *crtc, ushort index, ushort red,
return 0;
}
-int hw_sm750le_setBLANK(struct lynxfb_output *output, int blank)
+int hw_sm750le_set_blank(struct lynxfb_output *output, int blank)
{
int dpms, crtdb;
@@ -423,7 +421,7 @@ int hw_sm750le_setBLANK(struct lynxfb_output *output, int blank)
return 0;
}
-int hw_sm750_setBLANK(struct lynxfb_output *output, int blank)
+int hw_sm750_set_blank(struct lynxfb_output *output, int blank)
{
unsigned int dpms, pps, crtdb;
@@ -476,7 +474,7 @@ int hw_sm750_setBLANK(struct lynxfb_output *output, int blank)
return 0;
}
-void hw_sm750_initAccel(struct sm750_dev *sm750_dev)
+void hw_sm750_init_accel(struct sm750_dev *sm750_dev)
{
u32 reg;
@@ -506,7 +504,7 @@ void hw_sm750_initAccel(struct sm750_dev *sm750_dev)
sm750_dev->accel.de_init(&sm750_dev->accel);
}
-int hw_sm750le_deWait(void)
+int hw_sm750le_de_wait(void)
{
int i = 0x10000000;
unsigned int mask = DE_STATE2_DE_STATUS_BUSY | DE_STATE2_DE_FIFO_EMPTY |
@@ -523,7 +521,7 @@ int hw_sm750le_deWait(void)
return -1;
}
-int hw_sm750_deWait(void)
+int hw_sm750_de_wait(void)
{
int i = 0x10000000;
unsigned int mask = SYSTEM_CTRL_DE_STATUS_BUSY |
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
index 1c1f040122d7..7d0ddd5c8cce 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
@@ -71,6 +71,7 @@ static int snd_bcm2835_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
+ struct snd_ctl_elem_info info;
int val, *valp;
int changed = 0;
@@ -84,6 +85,11 @@ static int snd_bcm2835_ctl_put(struct snd_kcontrol *kcontrol,
return -EINVAL;
val = ucontrol->value.integer.value[0];
+
+ snd_bcm2835_ctl_info(kcontrol, &info);
+ if (val < info.value.integer.min || val > info.value.integer.max)
+ return -EINVAL;
+
mutex_lock(&chip->audio_mutex);
if (val != *valp) {
*valp = val;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
index 6bce45925bf1..e670226f1edf 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/controls.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
@@ -533,17 +533,15 @@ static int ctrl_set_image_effect(struct bcm2835_mmal_dev *dev,
control = &dev->component[COMP_CAMERA]->control;
- ret = vchiq_mmal_port_parameter_set(
- dev->instance, control,
- MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
- &imagefx, sizeof(imagefx));
+ ret = vchiq_mmal_port_parameter_set(dev->instance, control,
+ MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
+ &imagefx, sizeof(imagefx));
if (ret)
goto exit;
- ret = vchiq_mmal_port_parameter_set(
- dev->instance, control,
- MMAL_PARAMETER_COLOUR_EFFECT,
- &dev->colourfx, sizeof(dev->colourfx));
+ ret = vchiq_mmal_port_parameter_set(dev->instance, control,
+ MMAL_PARAMETER_COLOUR_EFFECT,
+ &dev->colourfx, sizeof(dev->colourfx));
}
exit:
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 620ba6e0ab07..a2dde08c8a62 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -3419,7 +3419,7 @@ iscsit_build_sendtargets_response(struct iscsit_cmd *cmd,
}
}
- if (inet_addr_is_any((struct sockaddr *)&np->np_sockaddr))
+ if (inet_addr_is_any(&np->np_sockaddr))
sockaddr = &conn->local_sockaddr;
else
sockaddr = &np->np_sockaddr;
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index f0d7eebfcad6..24db6b07493e 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -743,7 +743,8 @@ int iscsit_check_post_dataout(
void iscsit_handle_time2retain_timeout(struct timer_list *t)
{
- struct iscsit_session *sess = from_timer(sess, t, time2retain_timer);
+ struct iscsit_session *sess = timer_container_of(sess, t,
+ time2retain_timer);
struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index e7c3c4cdaae4..d8ca06e697d6 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -1094,7 +1094,7 @@ void iscsit_handle_dataout_timeout(struct timer_list *t)
{
u32 pdu_length = 0, pdu_offset = 0;
u32 r2t_length = 0, r2t_offset = 0;
- struct iscsit_cmd *cmd = from_timer(cmd, t, dataout_timer);
+ struct iscsit_cmd *cmd = timer_container_of(cmd, t, dataout_timer);
struct iscsit_conn *conn = cmd->conn;
struct iscsit_session *sess = NULL;
struct iscsi_node_attrib *na;
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 0bd62ab9a1cd..5e6cf34929b5 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -851,7 +851,8 @@ static int iscsit_add_nopin(struct iscsit_conn *conn, int want_response)
void iscsit_handle_nopin_response_timeout(struct timer_list *t)
{
- struct iscsit_conn *conn = from_timer(conn, t, nopin_response_timer);
+ struct iscsit_conn *conn = timer_container_of(conn, t,
+ nopin_response_timer);
struct iscsit_session *sess = conn->sess;
iscsit_inc_conn_usage_count(conn);
@@ -931,7 +932,7 @@ void iscsit_stop_nopin_response_timer(struct iscsit_conn *conn)
void iscsit_handle_nopin_timeout(struct timer_list *t)
{
- struct iscsit_conn *conn = from_timer(conn, t, nopin_timer);
+ struct iscsit_conn *conn = timer_container_of(conn, t, nopin_timer);
iscsit_inc_conn_usage_count(conn);
@@ -998,7 +999,7 @@ void iscsit_stop_nopin_timer(struct iscsit_conn *conn)
void iscsit_login_timeout(struct timer_list *t)
{
- struct iscsit_conn *conn = from_timer(conn, t, login_timer);
+ struct iscsit_conn *conn = timer_container_of(conn, t, login_timer);
struct iscsi_login *login = conn->login;
pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n");
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 66804bf1ee32..0904ecae253a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -673,12 +673,10 @@ static ssize_t emulate_model_alias_store(struct config_item *item,
return ret;
BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
- if (flag) {
+ if (flag)
dev_set_t10_wwn_model_alias(dev);
- } else {
- strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
- sizeof(dev->t10_wwn.model));
- }
+ else
+ strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod);
da->emulate_model_alias = flag;
return count;
}
@@ -1433,7 +1431,7 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item,
ssize_t len;
ssize_t ret;
- len = strscpy(buf, page, sizeof(buf));
+ len = strscpy(buf, page);
if (len > 0) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
@@ -1464,7 +1462,7 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item,
}
BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1);
- strscpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor));
+ strscpy(dev->t10_wwn.vendor, stripped);
pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:"
" %s\n", dev->t10_wwn.vendor);
@@ -1489,7 +1487,7 @@ static ssize_t target_wwn_product_id_store(struct config_item *item,
ssize_t len;
ssize_t ret;
- len = strscpy(buf, page, sizeof(buf));
+ len = strscpy(buf, page);
if (len > 0) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
@@ -1520,7 +1518,7 @@ static ssize_t target_wwn_product_id_store(struct config_item *item,
}
BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
- strscpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model));
+ strscpy(dev->t10_wwn.model, stripped);
pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n",
dev->t10_wwn.model);
@@ -1545,7 +1543,7 @@ static ssize_t target_wwn_revision_store(struct config_item *item,
ssize_t len;
ssize_t ret;
- len = strscpy(buf, page, sizeof(buf));
+ len = strscpy(buf, page);
if (len > 0) {
/* Strip any newline added from userspace. */
stripped = strstrip(buf);
@@ -1576,7 +1574,7 @@ static ssize_t target_wwn_revision_store(struct config_item *item,
}
BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1);
- strscpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision));
+ strscpy(dev->t10_wwn.revision, stripped);
pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n",
dev->t10_wwn.revision);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index cc2da086f96e..7bb711b24c0d 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -55,14 +55,14 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd)
rcu_read_lock();
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
if (deve) {
- atomic_long_inc(&deve->total_cmds);
+ this_cpu_inc(deve->stats->total_cmds);
if (se_cmd->data_direction == DMA_TO_DEVICE)
- atomic_long_add(se_cmd->data_length,
- &deve->write_bytes);
+ this_cpu_add(deve->stats->write_bytes,
+ se_cmd->data_length);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
- atomic_long_add(se_cmd->data_length,
- &deve->read_bytes);
+ this_cpu_add(deve->stats->read_bytes,
+ se_cmd->data_length);
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
deve->lun_access_ro) {
@@ -126,14 +126,14 @@ out_unlock:
* target_core_fabric_configfs.c:target_fabric_port_release
*/
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
- atomic_long_inc(&se_cmd->se_dev->num_cmds);
+ this_cpu_inc(se_cmd->se_dev->stats->total_cmds);
if (se_cmd->data_direction == DMA_TO_DEVICE)
- atomic_long_add(se_cmd->data_length,
- &se_cmd->se_dev->write_bytes);
+ this_cpu_add(se_cmd->se_dev->stats->write_bytes,
+ se_cmd->data_length);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
- atomic_long_add(se_cmd->data_length,
- &se_cmd->se_dev->read_bytes);
+ this_cpu_add(se_cmd->se_dev->stats->read_bytes,
+ se_cmd->data_length);
return ret;
}
@@ -322,6 +322,7 @@ int core_enable_device_list_for_node(
struct se_portal_group *tpg)
{
struct se_dev_entry *orig, *new;
+ int ret = 0;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new) {
@@ -329,6 +330,12 @@ int core_enable_device_list_for_node(
return -ENOMEM;
}
+ new->stats = alloc_percpu(struct se_dev_entry_io_stats);
+ if (!new->stats) {
+ ret = -ENOMEM;
+ goto free_deve;
+ }
+
spin_lock_init(&new->ua_lock);
INIT_LIST_HEAD(&new->ua_list);
INIT_LIST_HEAD(&new->lun_link);
@@ -351,8 +358,8 @@ int core_enable_device_list_for_node(
" for dynamic -> explicit NodeACL conversion:"
" %s\n", nacl->initiatorname);
mutex_unlock(&nacl->lun_entry_mutex);
- kfree(new);
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_stats;
}
if (orig->se_lun_acl != NULL) {
pr_warn_ratelimited("Detected existing explicit"
@@ -360,8 +367,8 @@ int core_enable_device_list_for_node(
" mapped_lun: %llu, failing\n",
nacl->initiatorname, mapped_lun);
mutex_unlock(&nacl->lun_entry_mutex);
- kfree(new);
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_stats;
}
new->se_lun = lun;
@@ -394,6 +401,20 @@ int core_enable_device_list_for_node(
target_luns_data_has_changed(nacl, new, true);
return 0;
+
+free_stats:
+ free_percpu(new->stats);
+free_deve:
+ kfree(new);
+ return ret;
+}
+
+static void target_free_dev_entry(struct rcu_head *head)
+{
+ struct se_dev_entry *deve = container_of(head, struct se_dev_entry,
+ rcu_head);
+ free_percpu(deve->stats);
+ kfree(deve);
}
void core_disable_device_list_for_node(
@@ -443,7 +464,7 @@ void core_disable_device_list_for_node(
kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp);
- kfree_rcu(orig, rcu_head);
+ call_rcu(&orig->rcu_head, target_free_dev_entry);
core_scsi3_free_pr_reg_from_nacl(dev, nacl);
target_luns_data_has_changed(nacl, NULL, false);
@@ -679,6 +700,18 @@ static void scsi_dump_inquiry(struct se_device *dev)
pr_debug(" Type: %s ", scsi_device_type(device_type));
}
+static void target_non_ordered_release(struct percpu_ref *ref)
+{
+ struct se_device *dev = container_of(ref, struct se_device,
+ non_ordered);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->delayed_cmd_lock, flags);
+ if (!list_empty(&dev->delayed_cmd_list))
+ schedule_work(&dev->delayed_cmd_work);
+ spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags);
+}
+
struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{
struct se_device *dev;
@@ -689,11 +722,13 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
if (!dev)
return NULL;
+ dev->stats = alloc_percpu(struct se_dev_io_stats);
+ if (!dev->stats)
+ goto free_device;
+
dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
- if (!dev->queues) {
- hba->backend->ops->free_device(dev);
- return NULL;
- }
+ if (!dev->queues)
+ goto free_stats;
dev->queue_cnt = nr_cpu_ids;
for (i = 0; i < dev->queue_cnt; i++) {
@@ -707,6 +742,10 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
INIT_WORK(&q->sq.work, target_queued_submit_work);
}
+ if (percpu_ref_init(&dev->non_ordered, target_non_ordered_release,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
+ goto free_queues;
+
dev->se_hba = hba;
dev->transport = hba->backend->ops;
dev->transport_flags = dev->transport->transport_flags_default;
@@ -791,6 +830,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
sizeof(dev->t10_wwn.revision));
return dev;
+
+free_queues:
+ kfree(dev->queues);
+free_stats:
+ free_percpu(dev->stats);
+free_device:
+ hba->backend->ops->free_device(dev);
+ return NULL;
}
/*
@@ -980,6 +1027,9 @@ void target_free_device(struct se_device *dev)
WARN_ON(!list_empty(&dev->dev_sep_list));
+ percpu_ref_exit(&dev->non_ordered);
+ cancel_work_sync(&dev->delayed_cmd_work);
+
if (target_dev_configured(dev)) {
dev->transport->destroy_device(dev);
@@ -1001,6 +1051,7 @@ void target_free_device(struct se_device *dev)
dev->transport->free_prot(dev);
kfree(dev->queues);
+ free_percpu(dev->stats);
dev->transport->free_device(dev);
}
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 34cf2c399b39..70905805cb17 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1842,7 +1842,9 @@ out:
}
kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
- core_scsi3_lunacl_undepend_item(dest_se_deve);
+
+ if (dest_se_deve)
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
if (is_local)
continue;
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 0a02492bef70..aad0096afa21 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1325,7 +1325,7 @@ static void set_dpofua_usage_bits32(u8 *usage_bits, struct se_device *dev)
usage_bits[10] |= 0x18;
}
-static struct target_opcode_descriptor tcm_opcode_read6 = {
+static const struct target_opcode_descriptor tcm_opcode_read6 = {
.support = SCSI_SUPPORT_FULL,
.opcode = READ_6,
.cdb_size = 6,
@@ -1333,7 +1333,7 @@ static struct target_opcode_descriptor tcm_opcode_read6 = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_read10 = {
+static const struct target_opcode_descriptor tcm_opcode_read10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = READ_10,
.cdb_size = 10,
@@ -1343,7 +1343,7 @@ static struct target_opcode_descriptor tcm_opcode_read10 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static struct target_opcode_descriptor tcm_opcode_read12 = {
+static const struct target_opcode_descriptor tcm_opcode_read12 = {
.support = SCSI_SUPPORT_FULL,
.opcode = READ_12,
.cdb_size = 12,
@@ -1353,7 +1353,7 @@ static struct target_opcode_descriptor tcm_opcode_read12 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static struct target_opcode_descriptor tcm_opcode_read16 = {
+static const struct target_opcode_descriptor tcm_opcode_read16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = READ_16,
.cdb_size = 16,
@@ -1364,7 +1364,7 @@ static struct target_opcode_descriptor tcm_opcode_read16 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static struct target_opcode_descriptor tcm_opcode_write6 = {
+static const struct target_opcode_descriptor tcm_opcode_write6 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_6,
.cdb_size = 6,
@@ -1372,7 +1372,7 @@ static struct target_opcode_descriptor tcm_opcode_write6 = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_write10 = {
+static const struct target_opcode_descriptor tcm_opcode_write10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_10,
.cdb_size = 10,
@@ -1382,7 +1382,7 @@ static struct target_opcode_descriptor tcm_opcode_write10 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static struct target_opcode_descriptor tcm_opcode_write_verify10 = {
+static const struct target_opcode_descriptor tcm_opcode_write_verify10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_VERIFY,
.cdb_size = 10,
@@ -1392,7 +1392,7 @@ static struct target_opcode_descriptor tcm_opcode_write_verify10 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static struct target_opcode_descriptor tcm_opcode_write12 = {
+static const struct target_opcode_descriptor tcm_opcode_write12 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_12,
.cdb_size = 12,
@@ -1402,7 +1402,7 @@ static struct target_opcode_descriptor tcm_opcode_write12 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static struct target_opcode_descriptor tcm_opcode_write16 = {
+static const struct target_opcode_descriptor tcm_opcode_write16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_16,
.cdb_size = 16,
@@ -1413,7 +1413,7 @@ static struct target_opcode_descriptor tcm_opcode_write16 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static struct target_opcode_descriptor tcm_opcode_write_verify16 = {
+static const struct target_opcode_descriptor tcm_opcode_write_verify16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_VERIFY_16,
.cdb_size = 16,
@@ -1424,7 +1424,7 @@ static struct target_opcode_descriptor tcm_opcode_write_verify16 = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static bool tcm_is_ws_enabled(struct target_opcode_descriptor *descr,
+static bool tcm_is_ws_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct exec_cmd_ops *ops = cmd->protocol_data;
@@ -1434,7 +1434,7 @@ static bool tcm_is_ws_enabled(struct target_opcode_descriptor *descr,
!!ops->execute_write_same;
}
-static struct target_opcode_descriptor tcm_opcode_write_same32 = {
+static const struct target_opcode_descriptor tcm_opcode_write_same32 = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = VARIABLE_LENGTH_CMD,
@@ -1452,7 +1452,7 @@ static struct target_opcode_descriptor tcm_opcode_write_same32 = {
.update_usage_bits = set_dpofua_usage_bits32,
};
-static bool tcm_is_caw_enabled(struct target_opcode_descriptor *descr,
+static bool tcm_is_caw_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1460,7 +1460,7 @@ static bool tcm_is_caw_enabled(struct target_opcode_descriptor *descr,
return dev->dev_attrib.emulate_caw;
}
-static struct target_opcode_descriptor tcm_opcode_compare_write = {
+static const struct target_opcode_descriptor tcm_opcode_compare_write = {
.support = SCSI_SUPPORT_FULL,
.opcode = COMPARE_AND_WRITE,
.cdb_size = 16,
@@ -1472,7 +1472,7 @@ static struct target_opcode_descriptor tcm_opcode_compare_write = {
.update_usage_bits = set_dpofua_usage_bits,
};
-static struct target_opcode_descriptor tcm_opcode_read_capacity = {
+static const struct target_opcode_descriptor tcm_opcode_read_capacity = {
.support = SCSI_SUPPORT_FULL,
.opcode = READ_CAPACITY,
.cdb_size = 10,
@@ -1481,7 +1481,7 @@ static struct target_opcode_descriptor tcm_opcode_read_capacity = {
0x01, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_read_capacity16 = {
+static const struct target_opcode_descriptor tcm_opcode_read_capacity16 = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = SERVICE_ACTION_IN_16,
@@ -1493,7 +1493,7 @@ static struct target_opcode_descriptor tcm_opcode_read_capacity16 = {
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
};
-static bool tcm_is_rep_ref_enabled(struct target_opcode_descriptor *descr,
+static bool tcm_is_rep_ref_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1507,7 +1507,7 @@ static bool tcm_is_rep_ref_enabled(struct target_opcode_descriptor *descr,
return true;
}
-static struct target_opcode_descriptor tcm_opcode_read_report_refferals = {
+static const struct target_opcode_descriptor tcm_opcode_read_report_refferals = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = SERVICE_ACTION_IN_16,
@@ -1520,7 +1520,7 @@ static struct target_opcode_descriptor tcm_opcode_read_report_refferals = {
.enabled = tcm_is_rep_ref_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_sync_cache = {
+static const struct target_opcode_descriptor tcm_opcode_sync_cache = {
.support = SCSI_SUPPORT_FULL,
.opcode = SYNCHRONIZE_CACHE,
.cdb_size = 10,
@@ -1529,7 +1529,7 @@ static struct target_opcode_descriptor tcm_opcode_sync_cache = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_sync_cache16 = {
+static const struct target_opcode_descriptor tcm_opcode_sync_cache16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = SYNCHRONIZE_CACHE_16,
.cdb_size = 16,
@@ -1539,7 +1539,7 @@ static struct target_opcode_descriptor tcm_opcode_sync_cache16 = {
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
};
-static bool tcm_is_unmap_enabled(struct target_opcode_descriptor *descr,
+static bool tcm_is_unmap_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct exec_cmd_ops *ops = cmd->protocol_data;
@@ -1548,7 +1548,7 @@ static bool tcm_is_unmap_enabled(struct target_opcode_descriptor *descr,
return ops->execute_unmap && dev->dev_attrib.emulate_tpu;
}
-static struct target_opcode_descriptor tcm_opcode_unmap = {
+static const struct target_opcode_descriptor tcm_opcode_unmap = {
.support = SCSI_SUPPORT_FULL,
.opcode = UNMAP,
.cdb_size = 10,
@@ -1558,7 +1558,7 @@ static struct target_opcode_descriptor tcm_opcode_unmap = {
.enabled = tcm_is_unmap_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_write_same = {
+static const struct target_opcode_descriptor tcm_opcode_write_same = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_SAME,
.cdb_size = 10,
@@ -1568,7 +1568,7 @@ static struct target_opcode_descriptor tcm_opcode_write_same = {
.enabled = tcm_is_ws_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_write_same16 = {
+static const struct target_opcode_descriptor tcm_opcode_write_same16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = WRITE_SAME_16,
.cdb_size = 16,
@@ -1579,7 +1579,7 @@ static struct target_opcode_descriptor tcm_opcode_write_same16 = {
.enabled = tcm_is_ws_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_verify = {
+static const struct target_opcode_descriptor tcm_opcode_verify = {
.support = SCSI_SUPPORT_FULL,
.opcode = VERIFY,
.cdb_size = 10,
@@ -1588,7 +1588,7 @@ static struct target_opcode_descriptor tcm_opcode_verify = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_verify16 = {
+static const struct target_opcode_descriptor tcm_opcode_verify16 = {
.support = SCSI_SUPPORT_FULL,
.opcode = VERIFY_16,
.cdb_size = 16,
@@ -1598,7 +1598,7 @@ static struct target_opcode_descriptor tcm_opcode_verify16 = {
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_start_stop = {
+static const struct target_opcode_descriptor tcm_opcode_start_stop = {
.support = SCSI_SUPPORT_FULL,
.opcode = START_STOP,
.cdb_size = 6,
@@ -1606,7 +1606,7 @@ static struct target_opcode_descriptor tcm_opcode_start_stop = {
0x01, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_mode_select = {
+static const struct target_opcode_descriptor tcm_opcode_mode_select = {
.support = SCSI_SUPPORT_FULL,
.opcode = MODE_SELECT,
.cdb_size = 6,
@@ -1614,7 +1614,7 @@ static struct target_opcode_descriptor tcm_opcode_mode_select = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_mode_select10 = {
+static const struct target_opcode_descriptor tcm_opcode_mode_select10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = MODE_SELECT_10,
.cdb_size = 10,
@@ -1623,7 +1623,7 @@ static struct target_opcode_descriptor tcm_opcode_mode_select10 = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_mode_sense = {
+static const struct target_opcode_descriptor tcm_opcode_mode_sense = {
.support = SCSI_SUPPORT_FULL,
.opcode = MODE_SENSE,
.cdb_size = 6,
@@ -1631,7 +1631,7 @@ static struct target_opcode_descriptor tcm_opcode_mode_sense = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_mode_sense10 = {
+static const struct target_opcode_descriptor tcm_opcode_mode_sense10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = MODE_SENSE_10,
.cdb_size = 10,
@@ -1640,7 +1640,7 @@ static struct target_opcode_descriptor tcm_opcode_mode_sense10 = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_pri_read_keys = {
+static const struct target_opcode_descriptor tcm_opcode_pri_read_keys = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_IN,
@@ -1651,7 +1651,7 @@ static struct target_opcode_descriptor tcm_opcode_pri_read_keys = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_pri_read_resrv = {
+static const struct target_opcode_descriptor tcm_opcode_pri_read_resrv = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_IN,
@@ -1662,7 +1662,7 @@ static struct target_opcode_descriptor tcm_opcode_pri_read_resrv = {
0xff, SCSI_CONTROL_MASK},
};
-static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr,
+static bool tcm_is_pr_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1704,7 +1704,7 @@ static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr,
return true;
}
-static struct target_opcode_descriptor tcm_opcode_pri_read_caps = {
+static const struct target_opcode_descriptor tcm_opcode_pri_read_caps = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_IN,
@@ -1716,7 +1716,7 @@ static struct target_opcode_descriptor tcm_opcode_pri_read_caps = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pri_read_full_status = {
+static const struct target_opcode_descriptor tcm_opcode_pri_read_full_status = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_IN,
@@ -1728,7 +1728,7 @@ static struct target_opcode_descriptor tcm_opcode_pri_read_full_status = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pro_register = {
+static const struct target_opcode_descriptor tcm_opcode_pro_register = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
@@ -1740,7 +1740,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_register = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pro_reserve = {
+static const struct target_opcode_descriptor tcm_opcode_pro_reserve = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
@@ -1752,7 +1752,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_reserve = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pro_release = {
+static const struct target_opcode_descriptor tcm_opcode_pro_release = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
@@ -1764,7 +1764,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_release = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pro_clear = {
+static const struct target_opcode_descriptor tcm_opcode_pro_clear = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
@@ -1776,7 +1776,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_clear = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pro_preempt = {
+static const struct target_opcode_descriptor tcm_opcode_pro_preempt = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
@@ -1788,7 +1788,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_preempt = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = {
+static const struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
@@ -1800,7 +1800,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = {
+static const struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
@@ -1814,7 +1814,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_pro_register_move = {
+static const struct target_opcode_descriptor tcm_opcode_pro_register_move = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = PERSISTENT_RESERVE_OUT,
@@ -1826,7 +1826,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_register_move = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_release = {
+static const struct target_opcode_descriptor tcm_opcode_release = {
.support = SCSI_SUPPORT_FULL,
.opcode = RELEASE_6,
.cdb_size = 6,
@@ -1835,7 +1835,7 @@ static struct target_opcode_descriptor tcm_opcode_release = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_release10 = {
+static const struct target_opcode_descriptor tcm_opcode_release10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = RELEASE_10,
.cdb_size = 10,
@@ -1845,7 +1845,7 @@ static struct target_opcode_descriptor tcm_opcode_release10 = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_reserve = {
+static const struct target_opcode_descriptor tcm_opcode_reserve = {
.support = SCSI_SUPPORT_FULL,
.opcode = RESERVE_6,
.cdb_size = 6,
@@ -1854,7 +1854,7 @@ static struct target_opcode_descriptor tcm_opcode_reserve = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_reserve10 = {
+static const struct target_opcode_descriptor tcm_opcode_reserve10 = {
.support = SCSI_SUPPORT_FULL,
.opcode = RESERVE_10,
.cdb_size = 10,
@@ -1864,7 +1864,7 @@ static struct target_opcode_descriptor tcm_opcode_reserve10 = {
.enabled = tcm_is_pr_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_request_sense = {
+static const struct target_opcode_descriptor tcm_opcode_request_sense = {
.support = SCSI_SUPPORT_FULL,
.opcode = REQUEST_SENSE,
.cdb_size = 6,
@@ -1872,7 +1872,7 @@ static struct target_opcode_descriptor tcm_opcode_request_sense = {
0xff, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_inquiry = {
+static const struct target_opcode_descriptor tcm_opcode_inquiry = {
.support = SCSI_SUPPORT_FULL,
.opcode = INQUIRY,
.cdb_size = 6,
@@ -1880,7 +1880,7 @@ static struct target_opcode_descriptor tcm_opcode_inquiry = {
0xff, SCSI_CONTROL_MASK},
};
-static bool tcm_is_3pc_enabled(struct target_opcode_descriptor *descr,
+static bool tcm_is_3pc_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1888,7 +1888,7 @@ static bool tcm_is_3pc_enabled(struct target_opcode_descriptor *descr,
return dev->dev_attrib.emulate_3pc;
}
-static struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = {
+static const struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = EXTENDED_COPY,
@@ -1900,7 +1900,7 @@ static struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = {
.enabled = tcm_is_3pc_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = {
+static const struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = RECEIVE_COPY_RESULTS,
@@ -1914,7 +1914,7 @@ static struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = {
.enabled = tcm_is_3pc_enabled,
};
-static struct target_opcode_descriptor tcm_opcode_report_luns = {
+static const struct target_opcode_descriptor tcm_opcode_report_luns = {
.support = SCSI_SUPPORT_FULL,
.opcode = REPORT_LUNS,
.cdb_size = 12,
@@ -1923,7 +1923,7 @@ static struct target_opcode_descriptor tcm_opcode_report_luns = {
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_test_unit_ready = {
+static const struct target_opcode_descriptor tcm_opcode_test_unit_ready = {
.support = SCSI_SUPPORT_FULL,
.opcode = TEST_UNIT_READY,
.cdb_size = 6,
@@ -1931,7 +1931,7 @@ static struct target_opcode_descriptor tcm_opcode_test_unit_ready = {
0x00, SCSI_CONTROL_MASK},
};
-static struct target_opcode_descriptor tcm_opcode_report_target_pgs = {
+static const struct target_opcode_descriptor tcm_opcode_report_target_pgs = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = MAINTENANCE_IN,
@@ -1942,7 +1942,7 @@ static struct target_opcode_descriptor tcm_opcode_report_target_pgs = {
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
};
-static bool spc_rsoc_enabled(struct target_opcode_descriptor *descr,
+static bool spc_rsoc_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1950,7 +1950,7 @@ static bool spc_rsoc_enabled(struct target_opcode_descriptor *descr,
return dev->dev_attrib.emulate_rsoc;
}
-static struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = {
+static const struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = MAINTENANCE_IN,
@@ -1963,7 +1963,7 @@ static struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = {
.enabled = spc_rsoc_enabled,
};
-static bool tcm_is_set_tpg_enabled(struct target_opcode_descriptor *descr,
+static bool tcm_is_set_tpg_enabled(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd)
{
struct t10_alua_tg_pt_gp *l_tg_pt_gp;
@@ -1984,7 +1984,7 @@ static bool tcm_is_set_tpg_enabled(struct target_opcode_descriptor *descr,
return true;
}
-static struct target_opcode_descriptor tcm_opcode_set_tpg = {
+static const struct target_opcode_descriptor tcm_opcode_set_tpg = {
.support = SCSI_SUPPORT_FULL,
.serv_action_valid = 1,
.opcode = MAINTENANCE_OUT,
@@ -1996,7 +1996,7 @@ static struct target_opcode_descriptor tcm_opcode_set_tpg = {
.enabled = tcm_is_set_tpg_enabled,
};
-static struct target_opcode_descriptor *tcm_supported_opcodes[] = {
+static const struct target_opcode_descriptor *tcm_supported_opcodes[] = {
&tcm_opcode_read6,
&tcm_opcode_read10,
&tcm_opcode_read12,
@@ -2053,7 +2053,7 @@ static struct target_opcode_descriptor *tcm_supported_opcodes[] = {
static int
spc_rsoc_encode_command_timeouts_descriptor(unsigned char *buf, u8 ctdp,
- struct target_opcode_descriptor *descr)
+ const struct target_opcode_descriptor *descr)
{
if (!ctdp)
return 0;
@@ -2068,7 +2068,7 @@ spc_rsoc_encode_command_timeouts_descriptor(unsigned char *buf, u8 ctdp,
static int
spc_rsoc_encode_command_descriptor(unsigned char *buf, u8 ctdp,
- struct target_opcode_descriptor *descr)
+ const struct target_opcode_descriptor *descr)
{
int td_size = 0;
@@ -2087,7 +2087,7 @@ spc_rsoc_encode_command_descriptor(unsigned char *buf, u8 ctdp,
static int
spc_rsoc_encode_one_command_descriptor(unsigned char *buf, u8 ctdp,
- struct target_opcode_descriptor *descr,
+ const struct target_opcode_descriptor *descr,
struct se_device *dev)
{
int td_size = 0;
@@ -2110,9 +2110,9 @@ spc_rsoc_encode_one_command_descriptor(unsigned char *buf, u8 ctdp,
}
static sense_reason_t
-spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
+spc_rsoc_get_descr(struct se_cmd *cmd, const struct target_opcode_descriptor **opcode)
{
- struct target_opcode_descriptor *descr;
+ const struct target_opcode_descriptor *descr;
struct se_session *sess = cmd->se_sess;
unsigned char *cdb = cmd->t_task_cdb;
u8 opts = cdb[2] & 0x3;
@@ -2199,7 +2199,7 @@ static sense_reason_t
spc_emulate_report_supp_op_codes(struct se_cmd *cmd)
{
int descr_num = ARRAY_SIZE(tcm_supported_opcodes);
- struct target_opcode_descriptor *descr = NULL;
+ const struct target_opcode_descriptor *descr = NULL;
unsigned char *cdb = cmd->t_task_cdb;
u8 rctd = (cdb[2] >> 7) & 0x1;
unsigned char *buf = NULL;
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 210648a0092e..6bdf2d8bd694 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -280,30 +280,51 @@ static ssize_t target_stat_lu_num_cmds_show(struct config_item *item,
char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
+ struct se_dev_io_stats *stats;
+ unsigned int cpu;
+ u32 cmds = 0;
+
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(dev->stats, cpu);
+ cmds += stats->total_cmds;
+ }
/* scsiLuNumCommands */
- return snprintf(page, PAGE_SIZE, "%lu\n",
- atomic_long_read(&dev->num_cmds));
+ return snprintf(page, PAGE_SIZE, "%u\n", cmds);
}
static ssize_t target_stat_lu_read_mbytes_show(struct config_item *item,
char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
+ struct se_dev_io_stats *stats;
+ unsigned int cpu;
+ u32 bytes = 0;
+
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(dev->stats, cpu);
+ bytes += stats->read_bytes;
+ }
/* scsiLuReadMegaBytes */
- return snprintf(page, PAGE_SIZE, "%lu\n",
- atomic_long_read(&dev->read_bytes) >> 20);
+ return snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
}
static ssize_t target_stat_lu_write_mbytes_show(struct config_item *item,
char *page)
{
struct se_device *dev = to_stat_lu_dev(item);
+ struct se_dev_io_stats *stats;
+ unsigned int cpu;
+ u32 bytes = 0;
+
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(dev->stats, cpu);
+ bytes += stats->write_bytes;
+ }
/* scsiLuWrittenMegaBytes */
- return snprintf(page, PAGE_SIZE, "%lu\n",
- atomic_long_read(&dev->write_bytes) >> 20);
+ return snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
}
static ssize_t target_stat_lu_resets_show(struct config_item *item, char *page)
@@ -1019,8 +1040,11 @@ static ssize_t target_stat_auth_num_cmds_show(struct config_item *item,
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry_io_stats *stats;
struct se_dev_entry *deve;
+ unsigned int cpu;
ssize_t ret;
+ u32 cmds = 0;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
@@ -1028,9 +1052,14 @@ static ssize_t target_stat_auth_num_cmds_show(struct config_item *item,
rcu_read_unlock();
return -ENODEV;
}
+
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(deve->stats, cpu);
+ cmds += stats->total_cmds;
+ }
+
/* scsiAuthIntrOutCommands */
- ret = snprintf(page, PAGE_SIZE, "%lu\n",
- atomic_long_read(&deve->total_cmds));
+ ret = snprintf(page, PAGE_SIZE, "%u\n", cmds);
rcu_read_unlock();
return ret;
}
@@ -1040,8 +1069,11 @@ static ssize_t target_stat_auth_read_mbytes_show(struct config_item *item,
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry_io_stats *stats;
struct se_dev_entry *deve;
+ unsigned int cpu;
ssize_t ret;
+ u32 bytes = 0;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
@@ -1049,9 +1081,14 @@ static ssize_t target_stat_auth_read_mbytes_show(struct config_item *item,
rcu_read_unlock();
return -ENODEV;
}
+
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(deve->stats, cpu);
+ bytes += stats->read_bytes;
+ }
+
/* scsiAuthIntrReadMegaBytes */
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- (u32)(atomic_long_read(&deve->read_bytes) >> 20));
+ ret = snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
rcu_read_unlock();
return ret;
}
@@ -1061,8 +1098,11 @@ static ssize_t target_stat_auth_write_mbytes_show(struct config_item *item,
{
struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
+ struct se_dev_entry_io_stats *stats;
struct se_dev_entry *deve;
+ unsigned int cpu;
ssize_t ret;
+ u32 bytes = 0;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
@@ -1070,9 +1110,14 @@ static ssize_t target_stat_auth_write_mbytes_show(struct config_item *item,
rcu_read_unlock();
return -ENODEV;
}
+
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(deve->stats, cpu);
+ bytes += stats->write_bytes;
+ }
+
/* scsiAuthIntrWrittenMegaBytes */
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- (u32)(atomic_long_read(&deve->write_bytes) >> 20));
+ ret = snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20);
rcu_read_unlock();
return ret;
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 05d29201b730..0a76bdfe5528 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2213,6 +2213,7 @@ static int target_write_prot_action(struct se_cmd *cmd)
static bool target_handle_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
+ unsigned long flags;
if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return false;
@@ -2225,13 +2226,10 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
*/
switch (cmd->sam_task_attr) {
case TCM_HEAD_TAG:
- atomic_inc_mb(&dev->non_ordered);
pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
cmd->t_task_cdb[0]);
return false;
case TCM_ORDERED_TAG:
- atomic_inc_mb(&dev->delayed_cmd_count);
-
pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
cmd->t_task_cdb[0]);
break;
@@ -2239,29 +2237,29 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
/*
* For SIMPLE and UNTAGGED Task Attribute commands
*/
- atomic_inc_mb(&dev->non_ordered);
-
- if (atomic_read(&dev->delayed_cmd_count) == 0)
+retry:
+ if (percpu_ref_tryget_live(&dev->non_ordered))
return false;
+
break;
}
- if (cmd->sam_task_attr != TCM_ORDERED_TAG) {
- atomic_inc_mb(&dev->delayed_cmd_count);
- /*
- * We will account for this when we dequeue from the delayed
- * list.
- */
- atomic_dec_mb(&dev->non_ordered);
+ spin_lock_irqsave(&dev->delayed_cmd_lock, flags);
+ if (cmd->sam_task_attr == TCM_SIMPLE_TAG &&
+ !percpu_ref_is_dying(&dev->non_ordered)) {
+ spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags);
+ /* We raced with the last ordered completion so retry. */
+ goto retry;
+ } else if (!percpu_ref_is_dying(&dev->non_ordered)) {
+ percpu_ref_kill(&dev->non_ordered);
}
- spin_lock_irq(&cmd->t_state_lock);
+ spin_lock(&cmd->t_state_lock);
cmd->transport_state &= ~CMD_T_SENT;
- spin_unlock_irq(&cmd->t_state_lock);
+ spin_unlock(&cmd->t_state_lock);
- spin_lock(&dev->delayed_cmd_lock);
list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
- spin_unlock(&dev->delayed_cmd_lock);
+ spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags);
pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
cmd->t_task_cdb[0], cmd->sam_task_attr);
@@ -2313,41 +2311,52 @@ void target_do_delayed_work(struct work_struct *work)
while (!dev->ordered_sync_in_progress) {
struct se_cmd *cmd;
- if (list_empty(&dev->delayed_cmd_list))
+ /*
+ * We can be woken up early/late due to races or the
+ * extra wake up we do when adding commands to the list.
+ * We check for both cases here.
+ */
+ if (list_empty(&dev->delayed_cmd_list) ||
+ !percpu_ref_is_zero(&dev->non_ordered))
break;
cmd = list_entry(dev->delayed_cmd_list.next,
struct se_cmd, se_delayed_node);
+ cmd->se_cmd_flags |= SCF_TASK_ORDERED_SYNC;
+ cmd->transport_state |= CMD_T_SENT;
- if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
- /*
- * Check if we started with:
- * [ordered] [simple] [ordered]
- * and we are now at the last ordered so we have to wait
- * for the simple cmd.
- */
- if (atomic_read(&dev->non_ordered) > 0)
- break;
-
- dev->ordered_sync_in_progress = true;
- }
+ dev->ordered_sync_in_progress = true;
list_del(&cmd->se_delayed_node);
- atomic_dec_mb(&dev->delayed_cmd_count);
spin_unlock(&dev->delayed_cmd_lock);
- if (cmd->sam_task_attr != TCM_ORDERED_TAG)
- atomic_inc_mb(&dev->non_ordered);
-
- cmd->transport_state |= CMD_T_SENT;
-
__target_execute_cmd(cmd, true);
-
spin_lock(&dev->delayed_cmd_lock);
}
spin_unlock(&dev->delayed_cmd_lock);
}
+static void transport_complete_ordered_sync(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->delayed_cmd_lock, flags);
+ dev->dev_cur_ordered_id++;
+
+ pr_debug("Incremented dev_cur_ordered_id: %u for type %d\n",
+ dev->dev_cur_ordered_id, cmd->sam_task_attr);
+
+ dev->ordered_sync_in_progress = false;
+
+ if (list_empty(&dev->delayed_cmd_list))
+ percpu_ref_resurrect(&dev->non_ordered);
+ else
+ schedule_work(&dev->delayed_cmd_work);
+
+ spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags);
+}
+
/*
* Called from I/O completion to determine which dormant/delayed
* and ordered cmds need to have their tasks added to the execution queue.
@@ -2360,30 +2369,24 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
return;
if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
- goto restart;
-
- if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
- atomic_dec_mb(&dev->non_ordered);
- dev->dev_cur_ordered_id++;
- } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
- atomic_dec_mb(&dev->non_ordered);
- dev->dev_cur_ordered_id++;
- pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
- dev->dev_cur_ordered_id);
- } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
- spin_lock(&dev->delayed_cmd_lock);
- dev->ordered_sync_in_progress = false;
- spin_unlock(&dev->delayed_cmd_lock);
+ return;
- dev->dev_cur_ordered_id++;
- pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
- dev->dev_cur_ordered_id);
- }
cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
-restart:
- if (atomic_read(&dev->delayed_cmd_count) > 0)
- schedule_work(&dev->delayed_cmd_work);
+ if (cmd->se_cmd_flags & SCF_TASK_ORDERED_SYNC) {
+ transport_complete_ordered_sync(cmd);
+ return;
+ }
+
+ switch (cmd->sam_task_attr) {
+ case TCM_SIMPLE_TAG:
+ percpu_ref_put(&dev->non_ordered);
+ break;
+ case TCM_ORDERED_TAG:
+ /* All ordered should have been executed as sync */
+ WARN_ON(1);
+ break;
+ }
}
static void transport_complete_qf(struct se_cmd *cmd)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 43872ccc07cc..3fd963612775 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1564,7 +1564,7 @@ static void tcmu_device_timedout(struct tcmu_dev *udev)
static void tcmu_cmd_timedout(struct timer_list *t)
{
- struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
+ struct tcmu_dev *udev = timer_container_of(udev, t, cmd_timer);
pr_debug("%s cmd timeout has expired\n", udev->name);
tcmu_device_timedout(udev);
@@ -1572,7 +1572,7 @@ static void tcmu_cmd_timedout(struct timer_list *t)
static void tcmu_qfull_timedout(struct timer_list *t)
{
- struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
+ struct tcmu_dev *udev = timer_container_of(udev, t, qfull_timer);
pr_debug("%s qfull timeout has expired\n", udev->name);
tcmu_device_timedout(udev);
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
index e487231d25dc..fb39d9a19c69 100644
--- a/drivers/tee/amdtee/core.c
+++ b/drivers/tee/amdtee/core.c
@@ -3,19 +3,22 @@
* Copyright 2019 Advanced Micro Devices, Inc.
*/
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
#include <linux/io.h>
+#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/psp-tee.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/device.h>
#include <linux/tee_core.h>
#include <linux/types.h>
-#include <linux/mm.h>
#include <linux/uaccess.h>
-#include <linux/firmware.h>
+
#include "amdtee_private.h"
-#include <linux/psp-tee.h>
static struct amdtee_driver_data *drv_data;
static DEFINE_MUTEX(session_list_mutex);
@@ -458,7 +461,7 @@ static int __init amdtee_driver_init(void)
rc = psp_check_tee_status();
if (rc) {
- pr_err("amd-tee driver: tee not present\n");
+ pr_err("tee not present\n");
return rc;
}
@@ -494,7 +497,6 @@ static int __init amdtee_driver_init(void)
drv_data->amdtee = amdtee;
- pr_info("amd-tee driver initialization successful\n");
return 0;
err_device_unregister:
@@ -510,7 +512,7 @@ err_kfree_drv_data:
kfree(drv_data);
drv_data = NULL;
- pr_err("amd-tee driver initialization failed\n");
+ pr_err("initialization failed\n");
return rc;
}
module_init(amdtee_driver_init);
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index f0c3ac1103bb..26f8f7bbbe56 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -1551,8 +1551,7 @@ fw_load:
data_pa_high, data_pa_low, 0, 0, 0, &res);
if (!rc)
rc = res.a0;
- if (fw)
- release_firmware(fw);
+ release_firmware(fw);
kfree(data_buf);
if (!rc) {
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index d113679b1e2d..acc7998758ad 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -10,6 +10,7 @@
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/slab.h>
#include <linux/tee_core.h>
#include <linux/uaccess.h>
@@ -19,7 +20,7 @@
#define TEE_NUM_DEVICES 32
-#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
+#define TEE_IOCTL_PARAM_SIZE(x) (size_mul(sizeof(struct tee_param), (x)))
#define TEE_UUID_NS_NAME_SIZE 128
@@ -487,7 +488,7 @@ static int tee_ioctl_open_session(struct tee_context *ctx,
if (copy_from_user(&arg, uarg, sizeof(arg)))
return -EFAULT;
- if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
+ if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len)
return -EINVAL;
if (arg.num_params) {
@@ -565,7 +566,7 @@ static int tee_ioctl_invoke(struct tee_context *ctx,
if (copy_from_user(&arg, uarg, sizeof(arg)))
return -EFAULT;
- if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
+ if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len)
return -EINVAL;
if (arg.num_params) {
@@ -699,7 +700,7 @@ static int tee_ioctl_supp_recv(struct tee_context *ctx,
if (get_user(num_params, &uarg->num_params))
return -EFAULT;
- if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
+ if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) != buf.buf_len)
return -EINVAL;
params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
@@ -798,7 +799,7 @@ static int tee_ioctl_supp_send(struct tee_context *ctx,
get_user(num_params, &uarg->num_params))
return -EFAULT;
- if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
+ if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) > buf.buf_len)
return -EINVAL;
params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index d3f9686e26e7..a09c188b9ad1 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -257,7 +257,7 @@ config HISI_THERMAL
depends on ARCH_HISI || COMPILE_TEST
depends on HAS_IOMEM
depends on OF
- default y
+ default ARCH_HISI
help
Enable this to plug hisilicon's thermal sensor driver into the Linux
thermal framework. cpufreq is used as the cooling device to throttle
@@ -327,6 +327,15 @@ config QORIQ_THERMAL
cpufreq is used as the cooling device to throttle CPUs when the
passive trip is crossed.
+config AIROHA_THERMAL
+ tristate "Airoha thermal sensor driver"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ depends on MFD_SYSCON
+ depends on OF
+ help
+ Enable this to plug the Airoha thermal sensor driver into the Linux
+ thermal framework.
+
config SPEAR_THERMAL
tristate "SPEAr thermal sensor driver"
depends on PLAT_SPEAR || COMPILE_TEST
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 9abf43a74f2b..d7718978db24 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_K3_THERMAL) += k3_bandgap.o k3_j72xx_bandgap.o
# platform thermal drivers
obj-y += broadcom/
obj-$(CONFIG_THERMAL_MMIO) += thermal_mmio.o
+obj-$(CONFIG_AIROHA_THERMAL) += airoha_thermal.o
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
obj-$(CONFIG_SUN8I_THERMAL) += sun8i_thermal.o
obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o
diff --git a/drivers/thermal/airoha_thermal.c b/drivers/thermal/airoha_thermal.c
new file mode 100644
index 000000000000..b9fd6bfc88e5
--- /dev/null
+++ b/drivers/thermal/airoha_thermal.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/module.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+
+/* SCU regs */
+#define EN7581_PLLRG_PROTECT 0x268
+#define EN7581_PWD_TADC 0x2ec
+#define EN7581_MUX_TADC GENMASK(3, 1)
+#define EN7581_DOUT_TADC 0x2f8
+#define EN7581_DOUT_TADC_MASK GENMASK(15, 0)
+
+/* PTP_THERMAL regs */
+#define EN7581_TEMPMONCTL0 0x800
+#define EN7581_SENSE3_EN BIT(3)
+#define EN7581_SENSE2_EN BIT(2)
+#define EN7581_SENSE1_EN BIT(1)
+#define EN7581_SENSE0_EN BIT(0)
+#define EN7581_TEMPMONCTL1 0x804
+/* period unit calculated in BUS clock * 256 scaling-up */
+#define EN7581_PERIOD_UNIT GENMASK(9, 0)
+#define EN7581_TEMPMONCTL2 0x808
+#define EN7581_FILT_INTERVAL GENMASK(25, 16)
+#define EN7581_SEN_INTERVAL GENMASK(9, 0)
+#define EN7581_TEMPMONINT 0x80C
+#define EN7581_STAGE3_INT_EN BIT(31)
+#define EN7581_STAGE2_INT_EN BIT(30)
+#define EN7581_STAGE1_INT_EN BIT(29)
+#define EN7581_FILTER_INT_EN_3 BIT(28)
+#define EN7581_IMMD_INT_EN3 BIT(27)
+#define EN7581_NOHOTINTEN3 BIT(26)
+#define EN7581_HOFSINTEN3 BIT(25)
+#define EN7581_LOFSINTEN3 BIT(24)
+#define EN7581_HINTEN3 BIT(23)
+#define EN7581_CINTEN3 BIT(22)
+#define EN7581_FILTER_INT_EN_2 BIT(21)
+#define EN7581_FILTER_INT_EN_1 BIT(20)
+#define EN7581_FILTER_INT_EN_0 BIT(19)
+#define EN7581_IMMD_INT_EN2 BIT(18)
+#define EN7581_IMMD_INT_EN1 BIT(17)
+#define EN7581_IMMD_INT_EN0 BIT(16)
+#define EN7581_TIME_OUT_INT_EN BIT(15)
+#define EN7581_NOHOTINTEN2 BIT(14)
+#define EN7581_HOFSINTEN2 BIT(13)
+#define EN7581_LOFSINTEN2 BIT(12)
+#define EN7581_HINTEN2 BIT(11)
+#define EN7581_CINTEN2 BIT(10)
+#define EN7581_NOHOTINTEN1 BIT(9)
+#define EN7581_HOFSINTEN1 BIT(8)
+#define EN7581_LOFSINTEN1 BIT(7)
+#define EN7581_HINTEN1 BIT(6)
+#define EN7581_CINTEN1 BIT(5)
+#define EN7581_NOHOTINTEN0 BIT(4)
+/* Similar to COLD and HOT also these seems to be swapped in documentation */
+#define EN7581_LOFSINTEN0 BIT(3) /* In documentation: BIT(2) */
+#define EN7581_HOFSINTEN0 BIT(2) /* In documentation: BIT(3) */
+/* It seems documentation have these swapped as the HW
+ * - Fire BIT(1) when lower than EN7581_COLD_THRE
+ * - Fire BIT(0) and BIT(5) when higher than EN7581_HOT2NORMAL_THRE or
+ * EN7581_HOT_THRE
+ */
+#define EN7581_CINTEN0 BIT(1) /* In documentation: BIT(0) */
+#define EN7581_HINTEN0 BIT(0) /* In documentation: BIT(1) */
+#define EN7581_TEMPMONINTSTS 0x810
+#define EN7581_STAGE3_INT_STAT BIT(31)
+#define EN7581_STAGE2_INT_STAT BIT(30)
+#define EN7581_STAGE1_INT_STAT BIT(29)
+#define EN7581_FILTER_INT_STAT_3 BIT(28)
+#define EN7581_IMMD_INT_STS3 BIT(27)
+#define EN7581_NOHOTINTSTS3 BIT(26)
+#define EN7581_HOFSINTSTS3 BIT(25)
+#define EN7581_LOFSINTSTS3 BIT(24)
+#define EN7581_HINTSTS3 BIT(23)
+#define EN7581_CINTSTS3 BIT(22)
+#define EN7581_FILTER_INT_STAT_2 BIT(21)
+#define EN7581_FILTER_INT_STAT_1 BIT(20)
+#define EN7581_FILTER_INT_STAT_0 BIT(19)
+#define EN7581_IMMD_INT_STS2 BIT(18)
+#define EN7581_IMMD_INT_STS1 BIT(17)
+#define EN7581_IMMD_INT_STS0 BIT(16)
+#define EN7581_TIME_OUT_INT_STAT BIT(15)
+#define EN7581_NOHOTINTSTS2 BIT(14)
+#define EN7581_HOFSINTSTS2 BIT(13)
+#define EN7581_LOFSINTSTS2 BIT(12)
+#define EN7581_HINTSTS2 BIT(11)
+#define EN7581_CINTSTS2 BIT(10)
+#define EN7581_NOHOTINTSTS1 BIT(9)
+#define EN7581_HOFSINTSTS1 BIT(8)
+#define EN7581_LOFSINTSTS1 BIT(7)
+#define EN7581_HINTSTS1 BIT(6)
+#define EN7581_CINTSTS1 BIT(5)
+#define EN7581_NOHOTINTSTS0 BIT(4)
+/* Similar to COLD and HOT also these seems to be swapped in documentation */
+#define EN7581_LOFSINTSTS0 BIT(3) /* In documentation: BIT(2) */
+#define EN7581_HOFSINTSTS0 BIT(2) /* In documentation: BIT(3) */
+/* It seems documentation have these swapped as the HW
+ * - Fire BIT(1) when lower than EN7581_COLD_THRE
+ * - Fire BIT(0) and BIT(5) when higher than EN7581_HOT2NORMAL_THRE or
+ * EN7581_HOT_THRE
+ *
+ * To clear things, we swap the define but we keep them documented here.
+ */
+#define EN7581_CINTSTS0 BIT(1) /* In documentation: BIT(0) */
+#define EN7581_HINTSTS0 BIT(0) /* In documentation: BIT(1)*/
+/* Monitor will take the bigger threshold between HOT2NORMAL and HOT
+ * and will fire both HOT2NORMAL and HOT interrupt when higher than the 2
+ *
+ * It has also been observed that not setting HOT2NORMAL makes the monitor
+ * treat COLD threshold as HOT2NORMAL.
+ */
+#define EN7581_TEMPH2NTHRE 0x824
+/* It seems HOT2NORMAL is actually NORMAL2HOT */
+#define EN7581_HOT2NORMAL_THRE GENMASK(11, 0)
+#define EN7581_TEMPHTHRE 0x828
+#define EN7581_HOT_THRE GENMASK(11, 0)
+/* Monitor will use this as HOT2NORMAL (fire interrupt when lower than...)*/
+#define EN7581_TEMPCTHRE 0x82c
+#define EN7581_COLD_THRE GENMASK(11, 0)
+/* Also LOW and HIGH offset register are swapped */
+#define EN7581_TEMPOFFSETL 0x830 /* In documentation: 0x834 */
+#define EN7581_LOW_OFFSET GENMASK(11, 0)
+#define EN7581_TEMPOFFSETH 0x834 /* In documentation: 0x830 */
+#define EN7581_HIGH_OFFSET GENMASK(11, 0)
+#define EN7581_TEMPMSRCTL0 0x838
+#define EN7581_MSRCTL3 GENMASK(11, 9)
+#define EN7581_MSRCTL2 GENMASK(8, 6)
+#define EN7581_MSRCTL1 GENMASK(5, 3)
+#define EN7581_MSRCTL0 GENMASK(2, 0)
+#define EN7581_TEMPADCVALIDADDR 0x878
+#define EN7581_ADC_VALID_ADDR GENMASK(31, 0)
+#define EN7581_TEMPADCVOLTADDR 0x87c
+#define EN7581_ADC_VOLT_ADDR GENMASK(31, 0)
+#define EN7581_TEMPRDCTRL 0x880
+/*
+ * NOTICE: AHB have this set to 0 by default. Means that
+ * the same addr is used for ADC volt and valid reading.
+ * In such case, VALID ADDR is used and volt addr is ignored.
+ */
+#define EN7581_RD_CTRL_DIFF BIT(0)
+#define EN7581_TEMPADCVALIDMASK 0x884
+#define EN7581_ADV_RD_VALID_POLARITY BIT(5)
+#define EN7581_ADV_RD_VALID_POS GENMASK(4, 0)
+#define EN7581_TEMPADCVOLTAGESHIFT 0x888
+#define EN7581_ADC_VOLTAGE_SHIFT GENMASK(4, 0)
+/*
+ * Same values for each CTL.
+ * Can operate in:
+ * - 1 sample
+ * - 2 sample and make average of them
+ * - 4,6,10,16 sample, drop max and min and make average of them
+ */
+#define EN7581_MSRCTL_1SAMPLE 0x0
+#define EN7581_MSRCTL_AVG2SAMPLE 0x1
+#define EN7581_MSRCTL_4SAMPLE_MAX_MIX_AVG2 0x2
+#define EN7581_MSRCTL_6SAMPLE_MAX_MIX_AVG4 0x3
+#define EN7581_MSRCTL_10SAMPLE_MAX_MIX_AVG8 0x4
+#define EN7581_MSRCTL_18SAMPLE_MAX_MIX_AVG16 0x5
+#define EN7581_TEMPAHBPOLL 0x840
+#define EN7581_ADC_POLL_INTVL GENMASK(31, 0)
+/* PTPSPARE0,2 reg are used to store efuse info for calibrated temp offset */
+#define EN7581_EFUSE_TEMP_OFFSET_REG 0xf20 /* PTPSPARE0 */
+#define EN7581_EFUSE_TEMP_OFFSET GENMASK(31, 16)
+#define EN7581_PTPSPARE1 0xf24 /* PTPSPARE1 */
+#define EN7581_EFUSE_TEMP_CPU_SENSOR_REG 0xf28 /* PTPSPARE2 */
+
+#define EN7581_SLOPE_X100_DIO_DEFAULT 5645
+#define EN7581_SLOPE_X100_DIO_AVS 5645
+
+#define EN7581_INIT_TEMP_CPK_X10 300
+#define EN7581_INIT_TEMP_FTK_X10 620
+#define EN7581_INIT_TEMP_NONK_X10 550
+
+#define EN7581_SCU_THERMAL_PROTECT_KEY 0x12
+#define EN7581_SCU_THERMAL_MUX_DIODE1 0x7
+
+/* Convert temp to raw value as read from ADC ((((temp / 100) - init) * slope) / 1000) + offset */
+#define TEMP_TO_RAW(priv, temp) ((((((temp) / 100) - (priv)->init_temp) * \
+ (priv)->default_slope) / 1000) + \
+ (priv)->default_offset)
+
+/* Convert raw to temp ((((temp - offset) * 1000) / slope + init) * 100) */
+#define RAW_TO_TEMP(priv, raw) (((((raw) - (priv)->default_offset) * 1000) / \
+ (priv)->default_slope + \
+ (priv)->init_temp) * 100)
+
+#define AIROHA_MAX_SAMPLES 6
+
+struct airoha_thermal_priv {
+ void __iomem *base;
+ struct regmap *chip_scu;
+ struct resource scu_adc_res;
+
+ struct thermal_zone_device *tz;
+ int init_temp;
+ int default_slope;
+ int default_offset;
+};
+
+static int airoha_get_thermal_ADC(struct airoha_thermal_priv *priv)
+{
+ u32 val;
+
+ regmap_read(priv->chip_scu, EN7581_DOUT_TADC, &val);
+ return FIELD_GET(EN7581_DOUT_TADC_MASK, val);
+}
+
+static void airoha_init_thermal_ADC_mode(struct airoha_thermal_priv *priv)
+{
+ u32 adc_mux, pllrg;
+
+ /* Save PLLRG current value */
+ regmap_read(priv->chip_scu, EN7581_PLLRG_PROTECT, &pllrg);
+
+ /* Give access to thermal regs */
+ regmap_write(priv->chip_scu, EN7581_PLLRG_PROTECT, EN7581_SCU_THERMAL_PROTECT_KEY);
+ adc_mux = FIELD_PREP(EN7581_MUX_TADC, EN7581_SCU_THERMAL_MUX_DIODE1);
+ regmap_write(priv->chip_scu, EN7581_PWD_TADC, adc_mux);
+
+ /* Restore PLLRG value on exit */
+ regmap_write(priv->chip_scu, EN7581_PLLRG_PROTECT, pllrg);
+}
+
+static int airoha_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
+{
+ struct airoha_thermal_priv *priv = thermal_zone_device_priv(tz);
+ int min_value, max_value, avg_value, value;
+ int i;
+
+ avg_value = 0;
+ min_value = INT_MAX;
+ max_value = INT_MIN;
+
+ for (i = 0; i < AIROHA_MAX_SAMPLES; i++) {
+ value = airoha_get_thermal_ADC(priv);
+ min_value = min(value, min_value);
+ max_value = max(value, max_value);
+ avg_value += value;
+ }
+
+ /* Drop min and max and average for the remaining sample */
+ avg_value -= (min_value + max_value);
+ avg_value /= AIROHA_MAX_SAMPLES - 2;
+
+ *temp = RAW_TO_TEMP(priv, avg_value);
+ return 0;
+}
+
+static int airoha_thermal_set_trips(struct thermal_zone_device *tz, int low,
+ int high)
+{
+ struct airoha_thermal_priv *priv = thermal_zone_device_priv(tz);
+ bool enable_monitor = false;
+
+ if (high != INT_MAX) {
+ /* Validate high and clamp it a supported value */
+ high = clamp_t(int, high, RAW_TO_TEMP(priv, 0),
+ RAW_TO_TEMP(priv, FIELD_MAX(EN7581_DOUT_TADC_MASK)));
+
+ /* We offset the high temp of 1°C to trigger correct event */
+ writel(TEMP_TO_RAW(priv, high) >> 4,
+ priv->base + EN7581_TEMPOFFSETH);
+
+ enable_monitor = true;
+ }
+
+ if (low != -INT_MAX) {
+ /* Validate low and clamp it to a supported value */
+ low = clamp_t(int, high, RAW_TO_TEMP(priv, 0),
+ RAW_TO_TEMP(priv, FIELD_MAX(EN7581_DOUT_TADC_MASK)));
+
+ /* We offset the low temp of 1°C to trigger correct event */
+ writel(TEMP_TO_RAW(priv, low) >> 4,
+ priv->base + EN7581_TEMPOFFSETL);
+
+ enable_monitor = true;
+ }
+
+ /* Enable sensor 0 monitor after trip are set */
+ if (enable_monitor)
+ writel(EN7581_SENSE0_EN, priv->base + EN7581_TEMPMONCTL0);
+
+ return 0;
+}
+
+static const struct thermal_zone_device_ops thdev_ops = {
+ .get_temp = airoha_thermal_get_temp,
+ .set_trips = airoha_thermal_set_trips,
+};
+
+static irqreturn_t airoha_thermal_irq(int irq, void *data)
+{
+ struct airoha_thermal_priv *priv = data;
+ enum thermal_notify_event event;
+ bool update = false;
+ u32 status;
+
+ status = readl(priv->base + EN7581_TEMPMONINTSTS);
+ switch (status & (EN7581_HOFSINTSTS0 | EN7581_LOFSINTSTS0)) {
+ case EN7581_HOFSINTSTS0:
+ event = THERMAL_TRIP_VIOLATED;
+ update = true;
+ break;
+ case EN7581_LOFSINTSTS0:
+ event = THERMAL_EVENT_UNSPECIFIED;
+ update = true;
+ break;
+ default:
+ /* Should be impossible as we enable only these Interrupt */
+ break;
+ }
+
+ /* Reset Interrupt */
+ writel(status, priv->base + EN7581_TEMPMONINTSTS);
+
+ if (update)
+ thermal_zone_device_update(priv->tz, event);
+
+ return IRQ_HANDLED;
+}
+
+static void airoha_thermal_setup_adc_val(struct device *dev,
+ struct airoha_thermal_priv *priv)
+{
+ u32 efuse_calib_info, cpu_sensor;
+
+ /* Setup thermal sensor to ADC mode and setup the mux to DIODE1 */
+ airoha_init_thermal_ADC_mode(priv);
+ /* sleep 10 ms for ADC to enable */
+ usleep_range(10 * USEC_PER_MSEC, 11 * USEC_PER_MSEC);
+
+ efuse_calib_info = readl(priv->base + EN7581_EFUSE_TEMP_OFFSET_REG);
+ if (efuse_calib_info) {
+ priv->default_offset = FIELD_GET(EN7581_EFUSE_TEMP_OFFSET, efuse_calib_info);
+ /* Different slope are applied if the sensor is used for CPU or for package */
+ cpu_sensor = readl(priv->base + EN7581_EFUSE_TEMP_CPU_SENSOR_REG);
+ if (cpu_sensor) {
+ priv->default_slope = EN7581_SLOPE_X100_DIO_DEFAULT;
+ priv->init_temp = EN7581_INIT_TEMP_FTK_X10;
+ } else {
+ priv->default_slope = EN7581_SLOPE_X100_DIO_AVS;
+ priv->init_temp = EN7581_INIT_TEMP_CPK_X10;
+ }
+ } else {
+ priv->default_offset = airoha_get_thermal_ADC(priv);
+ priv->default_slope = EN7581_SLOPE_X100_DIO_DEFAULT;
+ priv->init_temp = EN7581_INIT_TEMP_NONK_X10;
+ dev_info(dev, "missing thermal calibration EFUSE, using non calibrated value\n");
+ }
+}
+
+static void airoha_thermal_setup_monitor(struct airoha_thermal_priv *priv)
+{
+ /* Set measure mode */
+ writel(FIELD_PREP(EN7581_MSRCTL0, EN7581_MSRCTL_6SAMPLE_MAX_MIX_AVG4),
+ priv->base + EN7581_TEMPMSRCTL0);
+
+ /*
+ * Configure ADC valid reading addr
+ * The AHB temp monitor system doesn't have direct access to the
+ * thermal sensor. It does instead work by providing various
+ * addresses to configure how to access and setup an ADC for the
+ * sensor. EN7581 supports only one sensor hence the
+ * implementation is greatly simplified but the AHB supports
+ * up to 4 different sensors from the same ADC that can be
+ * switched by tuning the ADC mux or writing address.
+ *
+ * We set valid instead of volt as we don't enable valid/volt
+ * split reading and AHB read valid addr in such case.
+ */
+ writel(priv->scu_adc_res.start + EN7581_DOUT_TADC,
+ priv->base + EN7581_TEMPADCVALIDADDR);
+
+ /*
+ * Configure valid bit on a fake value of bit 16. The ADC outputs
+ * max of 2 bytes for voltage.
+ */
+ writel(FIELD_PREP(EN7581_ADV_RD_VALID_POS, 16),
+ priv->base + EN7581_TEMPADCVALIDMASK);
+
+ /*
+ * AHB supports max 12 bytes for ADC voltage. Shift the read
+ * value 4 bit to the right. Precision lost by this is minimal
+ * in the order of half a °C and is acceptable in the context
+ * of triggering interrupt in critical condition.
+ */
+ writel(FIELD_PREP(EN7581_ADC_VOLTAGE_SHIFT, 4),
+ priv->base + EN7581_TEMPADCVOLTAGESHIFT);
+
+ /* BUS clock is 300MHz counting unit is 3 * 68.64 * 256 = 52.715us */
+ writel(FIELD_PREP(EN7581_PERIOD_UNIT, 3),
+ priv->base + EN7581_TEMPMONCTL1);
+
+ /*
+ * filt interval is 1 * 52.715us = 52.715us,
+ * sen interval is 379 * 52.715us = 19.97ms
+ */
+ writel(FIELD_PREP(EN7581_FILT_INTERVAL, 1) |
+ FIELD_PREP(EN7581_FILT_INTERVAL, 379),
+ priv->base + EN7581_TEMPMONCTL2);
+
+ /* AHB poll is set to 146 * 68.64 = 10.02us */
+ writel(FIELD_PREP(EN7581_ADC_POLL_INTVL, 146),
+ priv->base + EN7581_TEMPAHBPOLL);
+}
+
+static int airoha_thermal_probe(struct platform_device *pdev)
+{
+ struct airoha_thermal_priv *priv;
+ struct device_node *chip_scu_np;
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ chip_scu_np = of_parse_phandle(dev->of_node, "airoha,chip-scu", 0);
+ if (!chip_scu_np)
+ return -EINVAL;
+
+ priv->chip_scu = syscon_node_to_regmap(chip_scu_np);
+ if (IS_ERR(priv->chip_scu))
+ return PTR_ERR(priv->chip_scu);
+
+ of_address_to_resource(chip_scu_np, 0, &priv->scu_adc_res);
+ of_node_put(chip_scu_np);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ airoha_thermal_irq, IRQF_ONESHOT,
+ pdev->name, priv);
+ if (ret) {
+ dev_err(dev, "Can't get interrupt working.\n");
+ return ret;
+ }
+
+ airoha_thermal_setup_monitor(priv);
+ airoha_thermal_setup_adc_val(dev, priv);
+
+ /* register of thermal sensor and get info from DT */
+ priv->tz = devm_thermal_of_zone_register(dev, 0, priv, &thdev_ops);
+ if (IS_ERR(priv->tz)) {
+ dev_err(dev, "register thermal zone sensor failed\n");
+ return PTR_ERR(priv->tz);
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ /* Enable LOW and HIGH interrupt */
+ writel(EN7581_HOFSINTEN0 | EN7581_LOFSINTEN0,
+ priv->base + EN7581_TEMPMONINT);
+
+ return 0;
+}
+
+static const struct of_device_id airoha_thermal_match[] = {
+ { .compatible = "airoha,en7581-thermal" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, airoha_thermal_match);
+
+static struct platform_driver airoha_thermal_driver = {
+ .driver = {
+ .name = "airoha-thermal",
+ .of_match_table = airoha_thermal_match,
+ },
+ .probe = airoha_thermal_probe,
+};
+
+module_platform_driver(airoha_thermal_driver);
+
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("Airoha thermal driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c
index 3c5f7dbddf2c..5448d772db12 100644
--- a/drivers/thermal/amlogic_thermal.c
+++ b/drivers/thermal/amlogic_thermal.c
@@ -7,10 +7,10 @@
*
* Register value to celsius temperature formulas:
* Read_Val m * U
- * U = ---------, Uptat = ---------
+ * U = ---------, uptat = ---------
* 2^16 1 + n * U
*
- * Temperature = A * ( Uptat + u_efuse / 2^16 )- B
+ * Temperature = A * ( uptat + u_efuse / 2^16 )- B
*
* A B m n : calibration parameters
* u_efuse : fused calibration value, it's a signed 16 bits value
@@ -112,7 +112,7 @@ static int amlogic_thermal_code_to_millicelsius(struct amlogic_thermal *pdata,
const struct amlogic_thermal_soc_calib_data *param =
pdata->data->calibration_parameters;
int temp;
- s64 factor, Uptat, uefuse;
+ s64 factor, uptat, uefuse;
uefuse = pdata->trim_info & TSENSOR_TRIM_SIGN_MASK ?
~(pdata->trim_info & TSENSOR_TRIM_TEMP_MASK) + 1 :
@@ -121,12 +121,12 @@ static int amlogic_thermal_code_to_millicelsius(struct amlogic_thermal *pdata,
factor = param->n * temp_code;
factor = div_s64(factor, 100);
- Uptat = temp_code * param->m;
- Uptat = div_s64(Uptat, 100);
- Uptat = Uptat * BIT(16);
- Uptat = div_s64(Uptat, BIT(16) + factor);
+ uptat = temp_code * param->m;
+ uptat = div_s64(uptat, 100);
+ uptat = uptat * BIT(16);
+ uptat = div_s64(uptat, BIT(16) + factor);
- temp = (Uptat + uefuse) * param->A;
+ temp = (uptat + uefuse) * param->A;
temp = div_s64(temp, BIT(16));
temp = (temp - param->B) * 100;
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
index 7fbba2233c4c..685a5aee5e0d 100644
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
@@ -192,7 +192,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
rate = clk_get_rate(data->clk);
if ((rate < 1920000) || (rate > 5000000))
dev_warn(dev,
- "Clock %pCn running at %lu Hz is outside of the recommended range: 1.92 to 5MHz\n",
+ "Clock %pC running at %lu Hz is outside of the recommended range: 1.92 to 5MHz\n",
data->clk, rate);
/* register of thermal sensor and get info from DT */
diff --git a/drivers/thermal/intel/int340x_thermal/Makefile b/drivers/thermal/intel/int340x_thermal/Makefile
index fe3f43924525..184318d1792b 100644
--- a/drivers/thermal/intel/int340x_thermal/Makefile
+++ b/drivers/thermal/intel/int340x_thermal/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device_pci_legacy.o
obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device_pci.o
obj-$(CONFIG_PROC_THERMAL_MMIO_RAPL) += processor_thermal_rapl.o
obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_rfim.o
+obj-$(CONFIG_INT340X_THERMAL) += platform_temperature_control.o
obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_mbox.o
obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_wt_req.o
obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_wt_hint.o
diff --git a/drivers/thermal/intel/int340x_thermal/platform_temperature_control.c b/drivers/thermal/intel/int340x_thermal/platform_temperature_control.c
new file mode 100644
index 000000000000..2d6504514893
--- /dev/null
+++ b/drivers/thermal/intel/int340x_thermal/platform_temperature_control.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * processor thermal device platform temperature controls
+ * Copyright (c) 2025, Intel Corporation.
+ */
+
+/*
+ * Platform temperature controls hardware interface
+ *
+ * The hardware control interface is via MMIO offsets in the processor
+ * thermal device MMIO space. There are three instances of MMIO registers.
+ * All registers are 64 bit wide with RW access.
+ *
+ * Name: PLATFORM_TEMPERATURE_CONTROL
+ * Offsets: 0x5B20, 0x5B28, 0x5B30
+ *
+ * Bits Description
+ * 7:0 TARGET_TEMP : Target temperature limit to which the control
+ * mechanism is regulating. Units: 0.5C.
+ * 8:8 ENABLE: Read current enable status of the feature or enable
+ * feature.
+ * 11:9 GAIN: Sets the aggressiveness of control loop from 0 to 7
+ * 7 graceful, favors performance at the expense of temperature
+ * overshoots
+ * 0 aggressive, favors tight regulation over performance
+ * 12:12 TEMPERATURE_OVERRIDE_EN
+ * When set, hardware will use TEMPERATURE_OVERRIDE values instead
+ * of reading from corresponding sensor.
+ * 15:13 RESERVED
+ * 23:16 MIN_PERFORMANCE_LEVEL: Minimum Performance level below which the
+ * there will be no throttling. 0 - all levels of throttling allowed
+ * including survivability actions. 255 - no throttling allowed.
+ * 31:24 TEMPERATURE_OVERRIDE: Allows SW to override the input temperature.
+ * hardware will use this value instead of the sensor temperature.
+ * Units: 0.5C.
+ * 63:32 RESERVED
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "processor_thermal_device.h"
+
+struct mmio_reg {
+ int bits;
+ u16 mask;
+ u16 shift;
+ u16 units;
+};
+
+#define MAX_ATTR_GROUP_NAME_LEN 32
+#define PTC_MAX_ATTRS 3
+
+struct ptc_data {
+ u32 offset;
+ struct attribute_group ptc_attr_group;
+ struct attribute *ptc_attrs[PTC_MAX_ATTRS];
+ struct device_attribute temperature_target_attr;
+ struct device_attribute enable_attr;
+ char group_name[MAX_ATTR_GROUP_NAME_LEN];
+};
+
+static const struct mmio_reg ptc_mmio_regs[] = {
+ { 8, 0xFF, 0, 500}, /* temperature_target, units 0.5C*/
+ { 1, 0x01, 8, 0}, /* enable */
+ { 3, 0x7, 9, 0}, /* gain */
+ { 8, 0xFF, 16, 0}, /* min_performance_level */
+ { 1, 0x1, 12, 0}, /* temperature_override_enable */
+ { 8, 0xFF, 24, 500}, /* temperature_override, units 0.5C */
+};
+
+#define PTC_MAX_INSTANCES 3
+
+/* Unique offset for each PTC instance */
+static u32 ptc_offsets[PTC_MAX_INSTANCES] = {0x5B20, 0x5B28, 0x5B30};
+
+/* These will represent sysfs attribute names */
+static const char * const ptc_strings[] = {
+ "temperature_target",
+ "enable",
+ NULL
+};
+
+/* Lock to protect concurrent read/write and read-modify-write */
+static DEFINE_MUTEX(ptc_lock);
+
+static ssize_t ptc_mmio_show(struct ptc_data *data, struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct proc_thermal_device *proc_priv;
+ const struct mmio_reg *mmio_regs;
+ int ret, units;
+ u64 reg_val;
+
+ proc_priv = pci_get_drvdata(pdev);
+ mmio_regs = ptc_mmio_regs;
+ ret = match_string(ptc_strings, -1, attr->attr.name);
+ if (ret < 0)
+ return ret;
+
+ units = mmio_regs[ret].units;
+
+ guard(mutex)(&ptc_lock);
+
+ reg_val = readq((void __iomem *) (proc_priv->mmio_base + data->offset));
+ ret = (reg_val >> mmio_regs[ret].shift) & mmio_regs[ret].mask;
+ if (units)
+ ret *= units;
+
+ return sysfs_emit(buf, "%d\n", ret);
+}
+
+#define PTC_SHOW(suffix)\
+static ssize_t suffix##_show(struct device *dev,\
+ struct device_attribute *attr,\
+ char *buf)\
+{\
+ struct ptc_data *data = container_of(attr, struct ptc_data, suffix##_attr);\
+ return ptc_mmio_show(data, dev, attr, buf);\
+}
+
+static void ptc_mmio_write(struct pci_dev *pdev, u32 offset, int index, u32 value)
+{
+ struct proc_thermal_device *proc_priv;
+ u64 mask, reg_val;
+
+ proc_priv = pci_get_drvdata(pdev);
+
+ mask = GENMASK_ULL(ptc_mmio_regs[index].shift + ptc_mmio_regs[index].bits - 1,
+ ptc_mmio_regs[index].shift);
+
+ guard(mutex)(&ptc_lock);
+
+ reg_val = readq((void __iomem *) (proc_priv->mmio_base + offset));
+ reg_val &= ~mask;
+ reg_val |= (value << ptc_mmio_regs[index].shift);
+ writeq(reg_val, (void __iomem *) (proc_priv->mmio_base + offset));
+}
+
+static int ptc_store(struct ptc_data *data, struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ unsigned int input;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &input);
+ if (ret)
+ return ret;
+
+ ret = match_string(ptc_strings, -1, attr->attr.name);
+ if (ret < 0)
+ return ret;
+
+ if (ptc_mmio_regs[ret].units)
+ input /= ptc_mmio_regs[ret].units;
+
+ if (input > ptc_mmio_regs[ret].mask)
+ return -EINVAL;
+
+ ptc_mmio_write(pdev, data->offset, ret, input);
+
+ return count;
+}
+
+#define PTC_STORE(suffix)\
+static ssize_t suffix##_store(struct device *dev,\
+ struct device_attribute *attr,\
+ const char *buf, size_t count)\
+{\
+ struct ptc_data *data = container_of(attr, struct ptc_data, suffix##_attr);\
+ return ptc_store(data, dev, attr, buf, count);\
+}
+
+PTC_SHOW(temperature_target);
+PTC_STORE(temperature_target);
+PTC_SHOW(enable);
+PTC_STORE(enable);
+
+#define ptc_init_attribute(_name)\
+ do {\
+ sysfs_attr_init(&data->_name##_attr.attr);\
+ data->_name##_attr.show = _name##_show;\
+ data->_name##_attr.store = _name##_store;\
+ data->_name##_attr.attr.name = #_name;\
+ data->_name##_attr.attr.mode = 0644;\
+ } while (0)
+
+static int ptc_create_groups(struct pci_dev *pdev, int instance, struct ptc_data *data)
+{
+ int ret, index = 0;
+
+ ptc_init_attribute(temperature_target);
+ ptc_init_attribute(enable);
+
+ data->ptc_attrs[index++] = &data->temperature_target_attr.attr;
+ data->ptc_attrs[index++] = &data->enable_attr.attr;
+ data->ptc_attrs[index] = NULL;
+
+ snprintf(data->group_name, MAX_ATTR_GROUP_NAME_LEN,
+ "ptc_%d_control", instance);
+ data->ptc_attr_group.name = data->group_name;
+ data->ptc_attr_group.attrs = data->ptc_attrs;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &data->ptc_attr_group);
+
+ return ret;
+}
+
+static struct ptc_data ptc_instance[PTC_MAX_INSTANCES];
+
+int proc_thermal_ptc_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv)
+{
+ if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_PTC) {
+ int i;
+
+ for (i = 0; i < PTC_MAX_INSTANCES; i++) {
+ ptc_instance[i].offset = ptc_offsets[i];
+ ptc_create_groups(pdev, i, &ptc_instance[i]);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(proc_thermal_ptc_add);
+
+void proc_thermal_ptc_remove(struct pci_dev *pdev)
+{
+ struct proc_thermal_device *proc_priv = pci_get_drvdata(pdev);
+
+ if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_PTC) {
+ int i;
+
+ for (i = 0; i < PTC_MAX_INSTANCES; i++)
+ sysfs_remove_group(&pdev->dev.kobj, &ptc_instance[i].ptc_attr_group);
+ }
+}
+EXPORT_SYMBOL_GPL(proc_thermal_ptc_remove);
+
+MODULE_IMPORT_NS("INT340X_THERMAL");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Processor Thermal PTC Interface");
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index c868d8b7bd1c..29fcece48cad 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/thermal.h>
+#include <asm/msr.h>
#include "int340x_thermal_zone.h"
#include "processor_thermal_device.h"
#include "../intel_soc_dts_iosf.h"
@@ -153,7 +154,7 @@ static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
u64 val;
int err;
- err = rdmsrl_safe(MSR_PLATFORM_INFO, &val);
+ err = rdmsrq_safe(MSR_PLATFORM_INFO, &val);
if (err)
return err;
@@ -399,13 +400,21 @@ int proc_thermal_mmio_add(struct pci_dev *pdev,
}
}
+ if (feature_mask & PROC_THERMAL_FEATURE_PTC) {
+ ret = proc_thermal_ptc_add(pdev, proc_priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add PTC MMIO interface\n");
+ goto err_rem_rapl;
+ }
+ }
+
if (feature_mask & PROC_THERMAL_FEATURE_FIVR ||
feature_mask & PROC_THERMAL_FEATURE_DVFS ||
feature_mask & PROC_THERMAL_FEATURE_DLVR) {
ret = proc_thermal_rfim_add(pdev, proc_priv);
if (ret) {
dev_err(&pdev->dev, "failed to add RFIM interface\n");
- goto err_rem_rapl;
+ goto err_rem_ptc;
}
}
@@ -427,6 +436,8 @@ int proc_thermal_mmio_add(struct pci_dev *pdev,
err_rem_rfim:
proc_thermal_rfim_remove(pdev);
+err_rem_ptc:
+ proc_thermal_ptc_remove(pdev);
err_rem_rapl:
proc_thermal_rapl_remove();
@@ -439,6 +450,9 @@ void proc_thermal_mmio_remove(struct pci_dev *pdev, struct proc_thermal_device *
if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_RAPL)
proc_thermal_rapl_remove();
+ if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_PTC)
+ proc_thermal_ptc_remove(pdev);
+
if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_FIVR ||
proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DVFS ||
proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DLVR)
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
index ba2d89d3024c..9a6ca43b6fa2 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
@@ -67,6 +67,7 @@ struct rapl_mmio_regs {
#define PROC_THERMAL_FEATURE_WT_HINT 0x20
#define PROC_THERMAL_FEATURE_POWER_FLOOR 0x40
#define PROC_THERMAL_FEATURE_MSI_SUPPORT 0x80
+#define PROC_THERMAL_FEATURE_PTC 0x100
#if IS_ENABLED(CONFIG_PROC_THERMAL_MMIO_RAPL)
int proc_thermal_rapl_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv);
@@ -123,4 +124,6 @@ int proc_thermal_mmio_add(struct pci_dev *pdev,
struct proc_thermal_device *proc_priv,
kernel_ulong_t feature_mask);
void proc_thermal_mmio_remove(struct pci_dev *pdev, struct proc_thermal_device *proc_priv);
+int proc_thermal_ptc_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv);
+void proc_thermal_ptc_remove(struct pci_dev *pdev);
#endif
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index 2097aae39946..00160936070a 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -486,7 +486,8 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_REQ) },
{ PCI_DEVICE_DATA(INTEL, LNLM_THERMAL, PROC_THERMAL_FEATURE_MSI_SUPPORT |
PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_DVFS |
- PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) },
+ PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR |
+ PROC_THERMAL_FEATURE_PTC) },
{ PCI_DEVICE_DATA(INTEL, MTLP_THERMAL, PROC_THERMAL_FEATURE_RAPL |
PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_DLVR |
PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) },
@@ -497,7 +498,7 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, PTL_THERMAL, PROC_THERMAL_FEATURE_RAPL |
PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_DVFS |
PROC_THERMAL_FEATURE_MSI_SUPPORT | PROC_THERMAL_FEATURE_WT_HINT |
- PROC_THERMAL_FEATURE_POWER_FLOOR) },
+ PROC_THERMAL_FEATURE_POWER_FLOOR | PROC_THERMAL_FEATURE_PTC) },
{ },
};
diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
index 5b18a46a10b0..bd2fca7dc017 100644
--- a/drivers/thermal/intel/intel_hfi.c
+++ b/drivers/thermal/intel/intel_hfi.c
@@ -284,7 +284,7 @@ void intel_hfi_process_event(__u64 pkg_therm_status_msr_val)
if (!raw_spin_trylock(&hfi_instance->event_lock))
return;
- rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr);
+ rdmsrq(MSR_IA32_PACKAGE_THERM_STATUS, msr);
hfi = msr & PACKAGE_THERM_STATUS_HFI_UPDATED;
if (!hfi) {
raw_spin_unlock(&hfi_instance->event_lock);
@@ -356,9 +356,9 @@ static void hfi_enable(void)
{
u64 msr_val;
- rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+ rdmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
- wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+ wrmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
}
static void hfi_set_hw_table(struct hfi_instance *hfi_instance)
@@ -368,7 +368,7 @@ static void hfi_set_hw_table(struct hfi_instance *hfi_instance)
hw_table_pa = virt_to_phys(hfi_instance->hw_table);
msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT;
- wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
+ wrmsrq(MSR_IA32_HW_FEEDBACK_PTR, msr_val);
}
/* Caller must hold hfi_instance_lock. */
@@ -377,9 +377,9 @@ static void hfi_disable(void)
u64 msr_val;
int i;
- rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+ rdmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
msr_val &= ~HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT;
- wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
+ wrmsrq(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val);
/*
* Wait for hardware to acknowledge the disabling of HFI. Some
@@ -388,7 +388,7 @@ static void hfi_disable(void)
* memory.
*/
for (i = 0; i < 2000; i++) {
- rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
+ rdmsrq(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
if (msr_val & PACKAGE_THERM_STATUS_HFI_UPDATED)
break;
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index 96a24df79686..9a4cec000910 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -340,7 +340,7 @@ static bool has_pkg_state_counter(void)
/* check if any one of the counter msrs exists */
while (info->msr_index) {
- if (!rdmsrl_safe(info->msr_index, &val))
+ if (!rdmsrq_safe(info->msr_index, &val))
return true;
info++;
}
@@ -356,7 +356,7 @@ static u64 pkg_state_counter(void)
while (info->msr_index) {
if (!info->skip) {
- if (!rdmsrl_safe(info->msr_index, &val))
+ if (!rdmsrq_safe(info->msr_index, &val))
count += val;
else
info->skip = true;
diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c
index 9ff0ebdde0ef..f352ecafbedf 100644
--- a/drivers/thermal/intel/intel_tcc_cooling.c
+++ b/drivers/thermal/intel/intel_tcc_cooling.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/thermal.h>
#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
#define TCC_PROGRAMMABLE BIT(30)
#define TCC_LOCKED BIT(31)
@@ -81,14 +82,14 @@ static int __init tcc_cooling_init(void)
if (!id)
return -ENODEV;
- err = rdmsrl_safe(MSR_PLATFORM_INFO, &val);
+ err = rdmsrq_safe(MSR_PLATFORM_INFO, &val);
if (err)
return err;
if (!(val & TCC_PROGRAMMABLE))
return -ENODEV;
- err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
+ err = rdmsrq_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
if (err)
return err;
diff --git a/drivers/thermal/intel/therm_throt.c b/drivers/thermal/intel/therm_throt.c
index e69868e868eb..debc94e2dc16 100644
--- a/drivers/thermal/intel/therm_throt.c
+++ b/drivers/thermal/intel/therm_throt.c
@@ -273,7 +273,7 @@ void thermal_clear_package_intr_status(int level, u64 bit_mask)
}
msr_val &= ~bit_mask;
- wrmsrl(msr, msr_val);
+ wrmsrq(msr, msr_val);
}
EXPORT_SYMBOL_GPL(thermal_clear_package_intr_status);
@@ -287,7 +287,7 @@ static void get_therm_status(int level, bool *proc_hot, u8 *temp)
else
msr = MSR_IA32_PACKAGE_THERM_STATUS;
- rdmsrl(msr, msr_val);
+ rdmsrq(msr, msr_val);
if (msr_val & THERM_STATUS_PROCHOT_LOG)
*proc_hot = true;
else
@@ -643,7 +643,7 @@ static void notify_thresholds(__u64 msr_val)
void __weak notify_hwp_interrupt(void)
{
- wrmsrl_safe(MSR_HWP_STATUS, 0);
+ wrmsrq_safe(MSR_HWP_STATUS, 0);
}
/* Thermal transition interrupt handler */
@@ -654,7 +654,7 @@ void intel_thermal_interrupt(void)
if (static_cpu_has(X86_FEATURE_HWP))
notify_hwp_interrupt();
- rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
+ rdmsrq(MSR_IA32_THERM_STATUS, msr_val);
/* Check for violation of core thermal thresholds*/
notify_thresholds(msr_val);
@@ -669,7 +669,7 @@ void intel_thermal_interrupt(void)
CORE_LEVEL);
if (this_cpu_has(X86_FEATURE_PTS)) {
- rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
+ rdmsrq(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
/* check violations of package thermal thresholds */
notify_package_thresholds(msr_val);
therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index 496abf8e55e0..3fc679b6f11b 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -20,6 +20,7 @@
#include <linux/debugfs.h>
#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
#include "thermal_interrupt.h"
@@ -329,6 +330,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
tj_max = intel_tcc_get_tjmax(cpu);
if (tj_max < 0)
return tj_max;
+ tj_max *= 1000;
zonedev = kzalloc(sizeof(*zonedev), GFP_KERNEL);
if (!zonedev)
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index 088481d91e6e..985925147ac0 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -213,6 +213,13 @@ static const struct debugfs_reg32 lvts_regs[] = {
LVTS_DEBUG_FS_REGS(LVTS_CLKEN),
};
+static void lvts_debugfs_exit(void *data)
+{
+ struct lvts_domain *lvts_td = data;
+
+ debugfs_remove_recursive(lvts_td->dom_dentry);
+}
+
static int lvts_debugfs_init(struct device *dev, struct lvts_domain *lvts_td)
{
struct debugfs_regset32 *regset;
@@ -245,12 +252,7 @@ static int lvts_debugfs_init(struct device *dev, struct lvts_domain *lvts_td)
debugfs_create_regset32("registers", 0400, dentry, regset);
}
- return 0;
-}
-
-static void lvts_debugfs_exit(struct lvts_domain *lvts_td)
-{
- debugfs_remove_recursive(lvts_td->dom_dentry);
+ return devm_add_action_or_reset(dev, lvts_debugfs_exit, lvts_td);
}
#else
@@ -261,8 +263,6 @@ static inline int lvts_debugfs_init(struct device *dev,
return 0;
}
-static void lvts_debugfs_exit(struct lvts_domain *lvts_td) { }
-
#endif
static int lvts_raw_to_temp(u32 raw_temp, int temp_factor)
@@ -1374,8 +1374,6 @@ static void lvts_remove(struct platform_device *pdev)
for (i = 0; i < lvts_td->num_lvts_ctrl; i++)
lvts_ctrl_set_enable(&lvts_td->lvts_ctrl[i], false);
-
- lvts_debugfs_exit(lvts_td);
}
static const struct lvts_ctrl_data mt7988_lvts_ap_data_ctrl[] = {
diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
index d2d49264cf83..991d1573983d 100644
--- a/drivers/thermal/qcom/lmh.c
+++ b/drivers/thermal/qcom/lmh.c
@@ -209,7 +209,8 @@ static int lmh_probe(struct platform_device *pdev)
}
lmh_data->irq = platform_get_irq(pdev, 0);
- lmh_data->domain = irq_domain_add_linear(np, 1, &lmh_irq_ops, lmh_data);
+ lmh_data->domain = irq_domain_create_linear(of_fwnode_handle(np), 1, &lmh_irq_ops,
+ lmh_data);
if (!lmh_data->domain) {
dev_err(dev, "Error adding irq_domain\n");
return -EINVAL;
diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
index 1a7874676f68..faa5d00788ca 100644
--- a/drivers/thermal/qcom/tsens-v1.c
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -79,6 +79,17 @@ static struct tsens_features tsens_v1_feat = {
.trip_max_temp = 120000,
};
+static struct tsens_features tsens_v1_no_rpm_feat = {
+ .ver_major = VER_1_X_NO_RPM,
+ .crit_int = 0,
+ .combo_int = 0,
+ .adc = 1,
+ .srot_split = 1,
+ .max_sensors = 11,
+ .trip_min_temp = -40000,
+ .trip_max_temp = 120000,
+};
+
static const struct reg_field tsens_v1_regfields[MAX_REGFIELDS] = {
/* ----- SROT ------ */
/* VERSION */
@@ -150,6 +161,43 @@ static int __init init_8956(struct tsens_priv *priv) {
return init_common(priv);
}
+static int __init init_tsens_v1_no_rpm(struct tsens_priv *priv)
+{
+ int i, ret;
+ u32 mask = 0;
+
+ ret = init_common(priv);
+ if (ret < 0) {
+ dev_err(priv->dev, "Init common failed %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_field_write(priv->rf[TSENS_SW_RST], 1);
+ if (ret) {
+ dev_err(priv->dev, "Reset failed\n");
+ return ret;
+ }
+
+ for (i = 0; i < priv->num_sensors; i++)
+ mask |= BIT(priv->sensor[i].hw_id);
+
+ ret = regmap_field_update_bits(priv->rf[SENSOR_EN], mask, mask);
+ if (ret) {
+ dev_err(priv->dev, "Sensor Enable failed\n");
+ return ret;
+ }
+
+ ret = regmap_field_write(priv->rf[TSENS_EN], 1);
+ if (ret) {
+ dev_err(priv->dev, "Enable failed\n");
+ return ret;
+ }
+
+ ret = regmap_field_write(priv->rf[TSENS_SW_RST], 0);
+
+ return ret;
+}
+
static const struct tsens_ops ops_generic_v1 = {
.init = init_common,
.calibrate = calibrate_v1,
@@ -194,3 +242,17 @@ struct tsens_plat_data data_8976 = {
.feat = &tsens_v1_feat,
.fields = tsens_v1_regfields,
};
+
+static const struct tsens_ops ops_ipq5018 = {
+ .init = init_tsens_v1_no_rpm,
+ .calibrate = tsens_calibrate_common,
+ .get_temp = get_temp_tsens_valid,
+};
+
+const struct tsens_plat_data data_ipq5018 = {
+ .num_sensors = 5,
+ .ops = &ops_ipq5018,
+ .hw_ids = (unsigned int []){0, 1, 2, 3, 4},
+ .feat = &tsens_v1_no_rpm_feat,
+ .fields = tsens_v1_regfields,
+};
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 1f5d4de017d9..a2422ebee816 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -447,7 +447,7 @@ static void tsens_set_interrupt(struct tsens_priv *priv, u32 hw_id,
dev_dbg(priv->dev, "[%u] %s: %s -> %s\n", hw_id, __func__,
irq_type ? ((irq_type == 1) ? "UP" : "CRITICAL") : "LOW",
enable ? "en" : "dis");
- if (tsens_version(priv) > VER_1_X)
+ if (tsens_version(priv) >= VER_2_X)
tsens_set_interrupt_v2(priv, hw_id, irq_type, enable);
else
tsens_set_interrupt_v1(priv, hw_id, irq_type, enable);
@@ -499,7 +499,7 @@ static int tsens_read_irq_state(struct tsens_priv *priv, u32 hw_id,
ret = regmap_field_read(priv->rf[LOW_INT_CLEAR_0 + hw_id], &d->low_irq_clear);
if (ret)
return ret;
- if (tsens_version(priv) > VER_1_X) {
+ if (tsens_version(priv) >= VER_2_X) {
ret = regmap_field_read(priv->rf[UP_INT_MASK_0 + hw_id], &d->up_irq_mask);
if (ret)
return ret;
@@ -543,7 +543,7 @@ static int tsens_read_irq_state(struct tsens_priv *priv, u32 hw_id,
static inline u32 masked_irq(u32 hw_id, u32 mask, enum tsens_ver ver)
{
- if (ver > VER_1_X)
+ if (ver >= VER_2_X)
return mask & (1 << hw_id);
/* v1, v0.1 don't have a irq mask register */
@@ -733,7 +733,7 @@ static int tsens_set_trips(struct thermal_zone_device *tz, int low, int high)
static int tsens_enable_irq(struct tsens_priv *priv)
{
int ret;
- int val = tsens_version(priv) > VER_1_X ? 7 : 1;
+ int val = tsens_version(priv) >= VER_2_X ? 7 : 1;
ret = regmap_field_write(priv->rf[INT_EN], val);
if (ret < 0)
@@ -975,10 +975,16 @@ int __init init_common(struct tsens_priv *priv)
ret = regmap_field_read(priv->rf[TSENS_EN], &enabled);
if (ret)
goto err_put_device;
- if (!enabled && (tsens_version(priv) != VER_2_X_NO_RPM)) {
- dev_err(dev, "%s: device not enabled\n", __func__);
- ret = -ENODEV;
- goto err_put_device;
+ if (!enabled) {
+ switch (tsens_version(priv)) {
+ case VER_1_X_NO_RPM:
+ case VER_2_X_NO_RPM:
+ break;
+ default:
+ dev_err(dev, "%s: device not enabled\n", __func__);
+ ret = -ENODEV;
+ goto err_put_device;
+ }
}
priv->rf[SENSOR_EN] = devm_regmap_field_alloc(dev, priv->srot_map,
@@ -1040,7 +1046,7 @@ int __init init_common(struct tsens_priv *priv)
}
}
- if (tsens_version(priv) > VER_1_X && ver_minor > 2) {
+ if (tsens_version(priv) >= VER_2_X && ver_minor > 2) {
/* Watchdog is present only on v2.3+ */
priv->feat->has_watchdog = 1;
for (i = WDOG_BARK_STATUS; i <= CC_MON_MASK; i++) {
@@ -1102,6 +1108,9 @@ static SIMPLE_DEV_PM_OPS(tsens_pm_ops, tsens_suspend, tsens_resume);
static const struct of_device_id tsens_table[] = {
{
+ .compatible = "qcom,ipq5018-tsens",
+ .data = &data_ipq5018,
+ }, {
.compatible = "qcom,ipq5332-tsens",
.data = &data_ipq5332,
}, {
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index 336bc868fd7c..2a7afa4c899b 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -34,6 +34,7 @@ enum tsens_ver {
VER_0 = 0,
VER_0_1,
VER_1_X,
+ VER_1_X_NO_RPM,
VER_2_X,
VER_2_X_NO_RPM,
};
@@ -651,6 +652,9 @@ extern struct tsens_plat_data data_8226, data_8909, data_8916, data_8939, data_8
/* TSENS v1 targets */
extern struct tsens_plat_data data_tsens_v1, data_8937, data_8976, data_8956;
+/* TSENS v1 with no RPM targets */
+extern const struct tsens_plat_data data_ipq5018;
+
/* TSENS v2 targets */
extern struct tsens_plat_data data_8996, data_ipq8074, data_tsens_v2;
extern const struct tsens_plat_data data_ipq5332, data_ipq5424;
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 2c5ddf0db40c..926f1052e6de 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -1234,7 +1234,7 @@ static int soctherm_oc_int_init(struct device_node *np, int num_irqs)
soc_irq_cdata.irq_chip.irq_set_type = soctherm_oc_irq_set_type;
soc_irq_cdata.irq_chip.irq_set_wake = NULL;
- soc_irq_cdata.domain = irq_domain_add_linear(np, num_irqs,
+ soc_irq_cdata.domain = irq_domain_create_linear(of_fwnode_handle(np), num_irqs,
&soctherm_oc_domain_ops,
&soc_irq_cdata);
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index cd15e84c47f4..1db2e951b53f 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -151,6 +151,11 @@ static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
struct tb_ctl *ctl = req->ctl;
mutex_lock(&ctl->request_queue_lock);
+ if (!test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags)) {
+ mutex_unlock(&ctl->request_queue_lock);
+ return;
+ }
+
list_del(&req->list);
clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 144d0232a70c..a3a7c8059eee 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -217,7 +217,7 @@ static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
if (!ret) {
/* Notify userspace about the change */
- kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
+ tb_domain_event(tb, NULL);
}
mutex_unlock(&tb->lock);
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 7859bccc592d..f213d9174dc5 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -22,6 +22,7 @@
#include "ctl.h"
#include "nhi_regs.h"
#include "tb.h"
+#include "tunnel.h"
#define PCIE2CIO_CMD 0x30
#define PCIE2CIO_CMD_TIMEOUT BIT(31)
@@ -379,6 +380,27 @@ static bool icm_firmware_running(const struct tb_nhi *nhi)
return !!(val & REG_FW_STS_ICM_EN);
}
+static void icm_xdomain_activated(struct tb_xdomain *xd, bool activated)
+{
+ struct tb_port *nhi_port, *dst_port;
+ struct tb *tb = xd->tb;
+
+ nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
+ dst_port = tb_xdomain_downstream_port(xd);
+
+ if (activated)
+ tb_tunnel_event(tb, TB_TUNNEL_ACTIVATED, TB_TUNNEL_DMA,
+ nhi_port, dst_port);
+ else
+ tb_tunnel_event(tb, TB_TUNNEL_DEACTIVATED, TB_TUNNEL_DMA,
+ nhi_port, dst_port);
+}
+
+static void icm_dp_event(struct tb *tb)
+{
+ tb_tunnel_event(tb, TB_TUNNEL_CHANGED, TB_TUNNEL_DP, NULL, NULL);
+}
+
static bool icm_fr_is_supported(struct tb *tb)
{
return !x86_apple_machine;
@@ -584,6 +606,7 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
if (reply.hdr.flags & ICM_FLAGS_ERROR)
return -EIO;
+ icm_xdomain_activated(xd, true);
return 0;
}
@@ -603,6 +626,8 @@ static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
nhi_mailbox_cmd(tb->nhi, cmd, 1);
usleep_range(10, 50);
nhi_mailbox_cmd(tb->nhi, cmd, 2);
+
+ icm_xdomain_activated(xd, false);
return 0;
}
@@ -1151,6 +1176,7 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
if (reply.hdr.flags & ICM_FLAGS_ERROR)
return -EIO;
+ icm_xdomain_activated(xd, true);
return 0;
}
@@ -1191,7 +1217,12 @@ static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
return ret;
usleep_range(10, 50);
- return icm_tr_xdomain_tear_down(tb, xd, 2);
+ ret = icm_tr_xdomain_tear_down(tb, xd, 2);
+ if (ret)
+ return ret;
+
+ icm_xdomain_activated(xd, false);
+ return 0;
}
static void
@@ -1718,6 +1749,9 @@ static void icm_handle_notification(struct work_struct *work)
if (tb_is_xdomain_enabled())
icm->xdomain_disconnected(tb, n->pkg);
break;
+ case ICM_EVENT_DP_CONFIG_CHANGED:
+ icm_dp_event(tb);
+ break;
case ICM_EVENT_RTD3_VETO:
icm->rtd3_veto(tb, n->pkg);
break;
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 6a2116cbb06f..28febb95f8fa 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -3599,6 +3599,7 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime)
flags |= TB_WAKE_ON_USB4;
flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
} else if (device_may_wakeup(&sw->dev)) {
+ flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
}
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 8c527af98927..c14ab1fbeeaf 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -952,6 +952,15 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
available_up, available_down);
+ /*
+ * If the available bandwidth is less than 1.5 Gb/s notify
+ * userspace that the connected isochronous device may not work
+ * properly.
+ */
+ if (available_up < 1500 || available_down < 1500)
+ tb_tunnel_event(tb, TB_TUNNEL_LOW_BANDWIDTH, TB_TUNNEL_USB3,
+ down, up);
+
tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
available_down);
if (!tunnel) {
@@ -2000,8 +2009,10 @@ static void tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
true);
- if (ret)
+ if (ret) {
+ tb_tunnel_event(tb, TB_TUNNEL_NO_BANDWIDTH, TB_TUNNEL_DP, in, out);
goto err_reclaim_usb;
+ }
tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
available_up, available_down);
@@ -2622,8 +2633,12 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
}
}
- return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
- requested_down);
+ ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
+ requested_down);
+ if (ret)
+ goto fail;
+
+ return 0;
}
/*
@@ -2699,6 +2714,7 @@ fail:
"failing the request by rewriting allocated %d/%d Mb/s\n",
allocated_up, allocated_down);
tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down);
+ tb_tunnel_event(tb, TB_TUNNEL_NO_BANDWIDTH, TB_TUNNEL_DP, in, out);
}
return ret;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index b54147a1ba87..87afd5a7c504 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -804,6 +804,19 @@ static inline void tb_domain_put(struct tb *tb)
put_device(&tb->dev);
}
+/**
+ * tb_domain_event() - Notify userspace about an event in domain
+ * @tb: Domain where event occurred
+ * @envp: Array of uevent environment strings (can be %NULL)
+ *
+ * This function provides a way to notify userspace about any events
+ * that take place in the domain.
+ */
+static inline void tb_domain_event(struct tb *tb, char *envp[])
+{
+ kobject_uevent_env(&tb->dev.kobj, KOBJ_CHANGE, envp);
+}
+
struct tb_nvm *tb_nvm_alloc(struct device *dev);
int tb_nvm_read_version(struct tb_nvm *nvm);
int tb_nvm_validate(struct tb_nvm *nvm);
@@ -1468,6 +1481,7 @@ static inline struct usb4_port *tb_to_usb4_port_device(struct device *dev)
struct usb4_port *usb4_port_device_add(struct tb_port *port);
void usb4_port_device_remove(struct usb4_port *usb4);
int usb4_port_device_resume(struct usb4_port *usb4);
+int usb4_port_index(const struct tb_switch *sw, const struct tb_port *port);
static inline bool usb4_port_device_is_offline(const struct usb4_port *usb4)
{
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index a1670a96cbdc..144f7332d5d2 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -118,6 +118,7 @@ enum icm_event_code {
ICM_EVENT_DEVICE_DISCONNECTED = 0x4,
ICM_EVENT_XDOMAIN_CONNECTED = 0x6,
ICM_EVENT_XDOMAIN_DISCONNECTED = 0x7,
+ ICM_EVENT_DP_CONFIG_CHANGED = 0x8,
ICM_EVENT_RTD3_VETO = 0xa,
};
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 76254ed3f47f..d52efe3f658c 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -100,6 +100,14 @@ MODULE_PARM_DESC(bw_alloc_mode,
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
+static const char * const tb_event_names[] = {
+ [TB_TUNNEL_ACTIVATED] = "activated",
+ [TB_TUNNEL_CHANGED] = "changed",
+ [TB_TUNNEL_DEACTIVATED] = "deactivated",
+ [TB_TUNNEL_LOW_BANDWIDTH] = "low bandwidth",
+ [TB_TUNNEL_NO_BANDWIDTH] = "insufficient bandwidth",
+};
+
/* Synchronizes kref_get()/put() of struct tb_tunnel */
static DEFINE_MUTEX(tb_tunnel_lock);
@@ -220,6 +228,72 @@ void tb_tunnel_put(struct tb_tunnel *tunnel)
mutex_unlock(&tb_tunnel_lock);
}
+/**
+ * tb_tunnel_event() - Notify userspace about tunneling event
+ * @tb: Domain where the event occurred
+ * @event: Event that happened
+ * @type: Type of the tunnel in question
+ * @src_port: Tunnel source port (can be %NULL)
+ * @dst_port: Tunnel destination port (can be %NULL)
+ *
+ * Notifies userspace about tunneling @event in the domain. The tunnel
+ * does not need to exist (e.g the tunnel was not activated because
+ * there is not enough bandwidth). If the @src_port and @dst_port are
+ * given fill in full %TUNNEL_DETAILS environment variable. Otherwise
+ * uses the shorter one (just the tunnel type).
+ */
+void tb_tunnel_event(struct tb *tb, enum tb_tunnel_event event,
+ enum tb_tunnel_type type,
+ const struct tb_port *src_port,
+ const struct tb_port *dst_port)
+{
+ char *envp[3] = { NULL };
+
+ if (WARN_ON_ONCE(event >= ARRAY_SIZE(tb_event_names)))
+ return;
+ if (WARN_ON_ONCE(type >= ARRAY_SIZE(tb_tunnel_names)))
+ return;
+
+ envp[0] = kasprintf(GFP_KERNEL, "TUNNEL_EVENT=%s", tb_event_names[event]);
+ if (!envp[0])
+ return;
+
+ if (src_port != NULL && dst_port != NULL) {
+ envp[1] = kasprintf(GFP_KERNEL, "TUNNEL_DETAILS=%llx:%u <-> %llx:%u (%s)",
+ tb_route(src_port->sw), src_port->port,
+ tb_route(dst_port->sw), dst_port->port,
+ tb_tunnel_names[type]);
+ } else {
+ envp[1] = kasprintf(GFP_KERNEL, "TUNNEL_DETAILS=(%s)",
+ tb_tunnel_names[type]);
+ }
+
+ if (envp[1])
+ tb_domain_event(tb, envp);
+
+ kfree(envp[1]);
+ kfree(envp[0]);
+}
+
+static inline void tb_tunnel_set_active(struct tb_tunnel *tunnel, bool active)
+{
+ if (active) {
+ tunnel->state = TB_TUNNEL_ACTIVE;
+ tb_tunnel_event(tunnel->tb, TB_TUNNEL_ACTIVATED, tunnel->type,
+ tunnel->src_port, tunnel->dst_port);
+ } else {
+ tunnel->state = TB_TUNNEL_INACTIVE;
+ tb_tunnel_event(tunnel->tb, TB_TUNNEL_DEACTIVATED, tunnel->type,
+ tunnel->src_port, tunnel->dst_port);
+ }
+}
+
+static inline void tb_tunnel_changed(struct tb_tunnel *tunnel)
+{
+ tb_tunnel_event(tunnel->tb, TB_TUNNEL_CHANGED, tunnel->type,
+ tunnel->src_port, tunnel->dst_port);
+}
+
static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
{
struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
@@ -992,7 +1066,7 @@ static void tb_dp_dprx_work(struct work_struct *work)
return;
}
} else {
- tunnel->state = TB_TUNNEL_ACTIVE;
+ tb_tunnel_set_active(tunnel, true);
}
mutex_unlock(&tb->lock);
}
@@ -2326,7 +2400,7 @@ int tb_tunnel_activate(struct tb_tunnel *tunnel)
}
}
- tunnel->state = TB_TUNNEL_ACTIVE;
+ tb_tunnel_set_active(tunnel, true);
return 0;
err:
@@ -2356,7 +2430,7 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
if (tunnel->post_deactivate)
tunnel->post_deactivate(tunnel);
- tunnel->state = TB_TUNNEL_INACTIVE;
+ tb_tunnel_set_active(tunnel, false);
}
/**
@@ -2449,8 +2523,16 @@ int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
if (!tb_tunnel_is_active(tunnel))
return -ENOTCONN;
- if (tunnel->alloc_bandwidth)
- return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
+ if (tunnel->alloc_bandwidth) {
+ int ret;
+
+ ret = tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
+ if (ret)
+ return ret;
+
+ tb_tunnel_changed(tunnel);
+ return 0;
+ }
return -EOPNOTSUPP;
}
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index 8a0a0cb21a89..5e9fb73d5220 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -194,6 +194,29 @@ static inline bool tb_tunnel_direction_downstream(const struct tb_tunnel *tunnel
tunnel->dst_port);
}
+/**
+ * enum tb_tunnel_event - Tunnel related events
+ * @TB_TUNNEL_ACTIVATED: A tunnel was activated
+ * @TB_TUNNEL_CHANGED: There is a tunneling change in the domain. Includes
+ * full %TUNNEL_DETAILS if the tunnel in question is known
+ * (ICM does not provide that information).
+ * @TB_TUNNEL_DEACTIVATED: A tunnel was torn down
+ * @TB_TUNNEL_LOW_BANDWIDTH: Tunnel bandwidth is not optimal
+ * @TB_TUNNEL_NO_BANDWIDTH: There is not enough bandwidth for a tunnel
+ */
+enum tb_tunnel_event {
+ TB_TUNNEL_ACTIVATED,
+ TB_TUNNEL_CHANGED,
+ TB_TUNNEL_DEACTIVATED,
+ TB_TUNNEL_LOW_BANDWIDTH,
+ TB_TUNNEL_NO_BANDWIDTH,
+};
+
+void tb_tunnel_event(struct tb *tb, enum tb_tunnel_event event,
+ enum tb_tunnel_type type,
+ const struct tb_port *src_port,
+ const struct tb_port *dst_port);
+
const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel);
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index e51d01671d8e..fce3c0f2354a 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -440,10 +440,10 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
bool configured = val & PORT_CS_19_PC;
usb4 = port->usb4;
- if (((flags & TB_WAKE_ON_CONNECT) |
+ if (((flags & TB_WAKE_ON_CONNECT) &&
device_may_wakeup(&usb4->dev)) && !configured)
val |= PORT_CS_19_WOC;
- if (((flags & TB_WAKE_ON_DISCONNECT) |
+ if (((flags & TB_WAKE_ON_DISCONNECT) &&
device_may_wakeup(&usb4->dev)) && configured)
val |= PORT_CS_19_WOD;
if ((flags & TB_WAKE_ON_USB4) && configured)
@@ -935,7 +935,15 @@ int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
return status ? -EIO : 0;
}
-static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
+/**
+ * usb4_port_index() - Finds matching USB4 port index
+ * @sw: USB4 router
+ * @port: USB4 protocol or lane adapter
+ *
+ * Finds matching USB4 port index (starting from %0) that given @port goes
+ * through.
+ */
+int usb4_port_index(const struct tb_switch *sw, const struct tb_port *port)
{
struct tb_port *p;
int usb4_idx = 0;
@@ -969,7 +977,7 @@ static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
const struct tb_port *port)
{
- int usb4_idx = usb4_port_idx(sw, port);
+ int usb4_idx = usb4_port_index(sw, port);
struct tb_port *p;
int pcie_idx = 0;
@@ -1000,7 +1008,7 @@ struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
const struct tb_port *port)
{
- int usb4_idx = usb4_port_idx(sw, port);
+ int usb4_idx = usb4_port_index(sw, port);
struct tb_port *p;
int usb_idx = 0;
diff --git a/drivers/thunderbolt/usb4_port.c b/drivers/thunderbolt/usb4_port.c
index 5150879888ca..852a45fcd19d 100644
--- a/drivers/thunderbolt/usb4_port.c
+++ b/drivers/thunderbolt/usb4_port.c
@@ -105,6 +105,49 @@ static void usb4_port_online(struct usb4_port *usb4)
tb_acpi_power_off_retimers(port);
}
+/**
+ * usb4_usb3_port_match() - Matches USB4 port device with USB 3.x port device
+ * @usb4_port_dev: USB4 port device
+ * @usb3_port_fwnode: USB 3.x port firmware node
+ *
+ * Checks if USB 3.x port @usb3_port_fwnode is tunneled through USB4 port @usb4_port_dev.
+ * Returns true if match is found, false otherwise.
+ *
+ * Function is designed to be used with component framework (component_match_add).
+ */
+bool usb4_usb3_port_match(struct device *usb4_port_dev,
+ const struct fwnode_handle *usb3_port_fwnode)
+{
+ struct fwnode_handle *nhi_fwnode __free(fwnode_handle) = NULL;
+ struct usb4_port *usb4;
+ struct tb_switch *sw;
+ struct tb_nhi *nhi;
+ u8 usb4_port_num;
+ struct tb *tb;
+
+ usb4 = tb_to_usb4_port_device(usb4_port_dev);
+ if (!usb4)
+ return false;
+
+ sw = usb4->port->sw;
+ tb = sw->tb;
+ nhi = tb->nhi;
+
+ nhi_fwnode = fwnode_find_reference(usb3_port_fwnode, "usb4-host-interface", 0);
+ if (IS_ERR(nhi_fwnode))
+ return false;
+
+ /* Check if USB3 fwnode references same NHI where USB4 port resides */
+ if (!device_match_fwnode(&nhi->pdev->dev, nhi_fwnode))
+ return false;
+
+ if (fwnode_property_read_u8(usb3_port_fwnode, "usb4-port-number", &usb4_port_num))
+ return false;
+
+ return usb4_port_index(sw, usb4->port) == usb4_port_num;
+}
+EXPORT_SYMBOL_GPL(usb4_usb3_port_match);
+
static ssize_t offline_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -276,12 +319,10 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port)
return ERR_PTR(ret);
}
- if (dev_fwnode(&usb4->dev)) {
- ret = component_add(&usb4->dev, &connector_ops);
- if (ret) {
- dev_err(&usb4->dev, "failed to add component\n");
- device_unregister(&usb4->dev);
- }
+ ret = component_add(&usb4->dev, &connector_ops);
+ if (ret) {
+ dev_err(&usb4->dev, "failed to add component\n");
+ device_unregister(&usb4->dev);
}
if (!tb_is_upstream_port(port))
@@ -306,8 +347,7 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port)
*/
void usb4_port_device_remove(struct usb4_port *usb4)
{
- if (dev_fwnode(&usb4->dev))
- component_del(&usb4->dev, &connector_ops);
+ component_del(&usb4->dev, &connector_ops);
device_unregister(&usb4->dev);
}
diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c
index c13f52337035..e18848267be4 100644
--- a/drivers/tty/ipwireless/hardware.c
+++ b/drivers/tty/ipwireless/hardware.c
@@ -1676,7 +1676,7 @@ void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw)
static void ipwireless_setup_timer(struct timer_list *t)
{
- struct ipw_hardware *hw = from_timer(hw, t, setup_timer);
+ struct ipw_hardware *hw = timer_container_of(hw, t, setup_timer);
hw->init_loops++;
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index fa47bfcf9e86..ba7690c3e15e 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -683,7 +683,8 @@ static irqreturn_t mips_ejtag_fdc_isr(int irq, void *dev_id)
*/
static void mips_ejtag_fdc_tty_timer(struct timer_list *t)
{
- struct mips_ejtag_fdc_tty *priv = from_timer(priv, t, poll_timer);
+ struct mips_ejtag_fdc_tty *priv = timer_container_of(priv, t,
+ poll_timer);
mips_ejtag_fdc_handle(priv);
if (!priv->removing)
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 4d45eca4929a..2fc13cc02cc5 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -1812,7 +1812,7 @@ static int mxser_probe(struct pci_dev *pdev,
/* io address */
ioaddress = pci_resource_start(pdev, 2);
- retval = pci_request_region(pdev, 2, "mxser(IO)");
+ retval = pcim_request_region(pdev, 2, "mxser(IO)");
if (retval)
goto err_zero;
@@ -1822,7 +1822,7 @@ static int mxser_probe(struct pci_dev *pdev,
/* vector */
ioaddress = pci_resource_start(pdev, 3);
- retval = pci_request_region(pdev, 3, "mxser(vector)");
+ retval = pcim_request_region(pdev, 3, "mxser(vector)");
if (retval)
goto err_zero;
brd->vector = ioaddress;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 40a336ef8c7e..7fc535452c0b 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1971,7 +1971,7 @@ static void gsm_control_response(struct gsm_mux *gsm, unsigned int command,
static void gsm_control_keep_alive(struct timer_list *t)
{
- struct gsm_mux *gsm = from_timer(gsm, t, ka_timer);
+ struct gsm_mux *gsm = timer_container_of(gsm, t, ka_timer);
unsigned long flags;
spin_lock_irqsave(&gsm->control_lock, flags);
@@ -2028,7 +2028,7 @@ static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
static void gsm_control_retransmit(struct timer_list *t)
{
- struct gsm_mux *gsm = from_timer(gsm, t, t2_timer);
+ struct gsm_mux *gsm = timer_container_of(gsm, t, t2_timer);
struct gsm_control *ctrl;
unsigned long flags;
spin_lock_irqsave(&gsm->control_lock, flags);
@@ -2229,7 +2229,7 @@ static int gsm_dlci_negotiate(struct gsm_dlci *dlci)
static void gsm_dlci_t1(struct timer_list *t)
{
- struct gsm_dlci *dlci = from_timer(dlci, t, t1);
+ struct gsm_dlci *dlci = timer_container_of(dlci, t, t1);
struct gsm_mux *gsm = dlci->gsm;
switch (dlci->state) {
@@ -2489,7 +2489,7 @@ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len)
*/
static void gsm_kick_timer(struct timer_list *t)
{
- struct gsm_mux *gsm = from_timer(gsm, t, kick_timer);
+ struct gsm_mux *gsm = timer_container_of(gsm, t, kick_timer);
unsigned long flags;
int sent = 0;
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index eb2a2e58fe78..0213381fa358 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -118,12 +118,11 @@ int serdev_device_add(struct serdev_device *serdev)
err = device_add(&serdev->dev);
if (err < 0) {
- dev_err(&serdev->dev, "Can't add %s, status %pe\n",
- dev_name(&serdev->dev), ERR_PTR(err));
+ dev_err(&serdev->dev, "Failed to add serdev: %d\n", err);
goto err_clear_serdev;
}
- dev_dbg(&serdev->dev, "device %s registered\n", dev_name(&serdev->dev));
+ dev_dbg(&serdev->dev, "serdev registered successfully\n");
return 0;
@@ -783,8 +782,7 @@ int serdev_controller_add(struct serdev_controller *ctrl)
goto err_rpm_disable;
}
- dev_dbg(&ctrl->dev, "serdev%d registered: dev:%p\n",
- ctrl->nr, &ctrl->dev);
+ dev_dbg(&ctrl->dev, "serdev controller registered: dev:%p\n", &ctrl->dev);
return 0;
err_rpm_disable:
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index b861585ca02a..18530c31a598 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -223,12 +223,6 @@ static inline bool serial8250_clear_THRI(struct uart_8250_port *up)
struct uart_8250_port *serial8250_setup_port(int index);
struct uart_8250_port *serial8250_get_port(int line);
-void serial8250_rpm_get(struct uart_8250_port *p);
-void serial8250_rpm_put(struct uart_8250_port *p);
-
-void serial8250_rpm_get_tx(struct uart_8250_port *p);
-void serial8250_rpm_put_tx(struct uart_8250_port *p);
-
int serial8250_em485_config(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485);
void serial8250_em485_start_tx(struct uart_8250_port *p, bool toggle_ier);
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 392447038bfb..26fc0464f1cc 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -304,7 +304,8 @@ static void aspeed_vuart_unthrottle(struct uart_port *port)
static void aspeed_vuart_unthrottle_exp(struct timer_list *timer)
{
- struct aspeed_vuart *vuart = from_timer(vuart, timer, unthrottle_timer);
+ struct aspeed_vuart *vuart = timer_container_of(vuart, timer,
+ unthrottle_timer);
struct uart_8250_port *up = vuart->port;
if (!tty_buffer_space_avail(&up->port.state->port)) {
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 5a56f853cf6d..7a6050f1c094 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -206,7 +206,7 @@ static void serial_unlink_irq_chain(struct uart_8250_port *up)
*/
static void serial8250_timeout(struct timer_list *t)
{
- struct uart_8250_port *up = from_timer(up, t, timer);
+ struct uart_8250_port *up = timer_container_of(up, t, timer);
up->port.handle_irq(&up->port);
mod_timer(&up->timer, jiffies + uart_poll_timeout(&up->port));
@@ -214,7 +214,7 @@ static void serial8250_timeout(struct timer_list *t)
static void serial8250_backup_timeout(struct timer_list *t)
{
- struct uart_8250_port *up = from_timer(up, t, timer);
+ struct uart_8250_port *up = timer_container_of(up, t, timer);
unsigned int iir, ier = 0, lsr;
unsigned long flags;
@@ -461,7 +461,7 @@ static int univ8250_console_match(struct console *co, char *name, int idx,
char *options)
{
char match[] = "uart"; /* 8250-specific earlycon name */
- unsigned char iotype;
+ enum uart_iotype iotype;
resource_size_t addr;
int i;
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 842422921765..dc0371857ecb 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -77,6 +77,8 @@ static void serial8250_early_out(struct uart_port *port, int offset, int value)
outb(value, port->iobase + offset);
break;
#endif
+ default:
+ break;
}
}
diff --git a/drivers/tty/serial/8250/8250_ni.c b/drivers/tty/serial/8250/8250_ni.c
index b10a42d2ad63..b0e44fb00b3a 100644
--- a/drivers/tty/serial/8250/8250_ni.c
+++ b/drivers/tty/serial/8250/8250_ni.c
@@ -10,14 +10,18 @@
* Copyright 2012-2023 National Instruments Corporation
*/
-#include <linux/acpi.h>
#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/property.h>
-#include <linux/clk.h>
+#include <linux/serial_core.h>
+#include <linux/types.h>
#include "8250.h"
@@ -90,10 +94,10 @@ static int ni16550_disable_transceivers(struct uart_port *port)
{
u8 pcr;
- pcr = port->serial_in(port, NI16550_PCR_OFFSET);
+ pcr = serial_port_in(port, NI16550_PCR_OFFSET);
pcr &= ~NI16550_PCR_TXVR_ENABLE_BIT;
dev_dbg(port->dev, "disable transceivers: write pcr: 0x%02x\n", pcr);
- port->serial_out(port, NI16550_PCR_OFFSET, pcr);
+ serial_port_out(port, NI16550_PCR_OFFSET, pcr);
return 0;
}
@@ -105,7 +109,7 @@ static int ni16550_rs485_config(struct uart_port *port,
struct uart_8250_port *up = container_of(port, struct uart_8250_port, port);
u8 pcr;
- pcr = serial_in(up, NI16550_PCR_OFFSET);
+ pcr = serial_port_in(port, NI16550_PCR_OFFSET);
pcr &= ~NI16550_PCR_WIRE_MODE_MASK;
if ((rs485->flags & SER_RS485_MODE_RS422) ||
@@ -120,7 +124,7 @@ static int ni16550_rs485_config(struct uart_port *port,
}
dev_dbg(port->dev, "config rs485: write pcr: 0x%02x, acr: %02x\n", pcr, up->acr);
- serial_out(up, NI16550_PCR_OFFSET, pcr);
+ serial_port_out(port, NI16550_PCR_OFFSET, pcr);
serial_icr_write(up, UART_ACR, up->acr);
return 0;
@@ -224,31 +228,26 @@ static int ni16550_get_regs(struct platform_device *pdev,
{
struct resource *regs;
- regs = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (regs) {
+ regs = platform_get_mem_or_io(pdev, 0);
+ if (!regs)
+ return dev_err_probe(&pdev->dev, -EINVAL, "no registers defined\n");
+
+ switch (resource_type(regs)) {
+ case IORESOURCE_IO:
port->iotype = UPIO_PORT;
port->iobase = regs->start;
return 0;
- }
-
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (regs) {
+ case IORESOURCE_MEM:
port->iotype = UPIO_MEM;
port->mapbase = regs->start;
port->mapsize = resource_size(regs);
port->flags |= UPF_IOREMAP;
- port->membase = devm_ioremap(&pdev->dev, port->mapbase,
- port->mapsize);
- if (!port->membase)
- return -ENOMEM;
-
return 0;
+ default:
+ return -EINVAL;
}
-
- dev_err(&pdev->dev, "no registers defined\n");
- return -EINVAL;
}
/*
@@ -280,12 +279,11 @@ static int ni16550_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct uart_8250_port uart = {};
unsigned int txfifosz, rxfifosz;
- unsigned int prescaler = 0;
+ unsigned int prescaler;
struct ni16550_data *data;
const char *portmode;
bool rs232_property;
int ret;
- int irq;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -293,10 +291,6 @@ static int ni16550_probe(struct platform_device *pdev)
spin_lock_init(&uart.port.lock);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
ret = ni16550_get_regs(pdev, &uart.port);
if (ret < 0)
return ret;
@@ -307,10 +301,7 @@ static int ni16550_probe(struct platform_device *pdev)
info = device_get_match_data(dev);
uart.port.dev = dev;
- uart.port.irq = irq;
- uart.port.irqflags = IRQF_SHARED;
- uart.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF
- | UPF_FIXED_PORT | UPF_FIXED_TYPE;
+ uart.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_FIXED_TYPE;
uart.port.startup = ni16550_port_startup;
uart.port.shutdown = ni16550_port_shutdown;
@@ -332,28 +323,26 @@ static int ni16550_probe(struct platform_device *pdev)
/*
* Declaration of the base clock frequency can come from one of:
* - static declaration in this driver (for older ACPI IDs)
- * - a "clock-frquency" ACPI
+ * - a "clock-frequency" ACPI
*/
- if (info->uartclk)
- uart.port.uartclk = info->uartclk;
- if (device_property_read_u32(dev, "clock-frequency",
- &uart.port.uartclk)) {
+ uart.port.uartclk = info->uartclk;
+
+ ret = uart_read_port_properties(&uart.port);
+ if (ret)
+ return ret;
+
+ if (!uart.port.uartclk) {
data->clk = devm_clk_get_enabled(dev, NULL);
if (!IS_ERR(data->clk))
uart.port.uartclk = clk_get_rate(data->clk);
}
- if (!uart.port.uartclk) {
- dev_err(dev, "unable to determine clock frequency!\n");
- ret = -ENODEV;
- goto err;
- }
+ if (!uart.port.uartclk)
+ return dev_err_probe(dev, -ENODEV, "unable to determine clock frequency!\n");
- if (info->prescaler)
- prescaler = info->prescaler;
+ prescaler = info->prescaler;
device_property_read_u32(dev, "clock-prescaler", &prescaler);
-
- if (prescaler != 0) {
+ if (prescaler) {
uart.port.set_mctrl = ni16550_set_mctrl;
ni16550_config_prescaler(&uart, (u8)prescaler);
}
@@ -393,14 +382,11 @@ static int ni16550_probe(struct platform_device *pdev)
ret = serial8250_register_8250_port(&uart);
if (ret < 0)
- goto err;
+ return ret;
data->line = ret;
platform_set_drvdata(pdev, data);
return 0;
-
-err:
- return ret;
}
static void ni16550_remove(struct platform_device *pdev)
@@ -410,7 +396,6 @@ static void ni16550_remove(struct platform_device *pdev)
serial8250_unregister_port(data->line);
}
-#ifdef CONFIG_ACPI
/* NI 16550 RS-485 Interface */
static const struct ni16550_device_info nic7750 = {
.uartclk = 33333333,
@@ -435,20 +420,20 @@ static const struct ni16550_device_info nic7a69 = {
.uartclk = 29629629,
.prescaler = 0x09,
};
+
static const struct acpi_device_id ni16550_acpi_match[] = {
{ "NIC7750", (kernel_ulong_t)&nic7750 },
{ "NIC7772", (kernel_ulong_t)&nic7772 },
{ "NIC792B", (kernel_ulong_t)&nic792b },
{ "NIC7A69", (kernel_ulong_t)&nic7a69 },
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, ni16550_acpi_match);
-#endif
static struct platform_driver ni16550_driver = {
.driver = {
.name = "ni16550",
- .acpi_match_table = ACPI_PTR(ni16550_acpi_match),
+ .acpi_match_table = ni16550_acpi_match,
},
.probe = ni16550_probe,
.remove = ni16550_remove,
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 11c860ea80f6..d178b6c54ea1 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -24,6 +24,7 @@
struct of_serial_info {
struct clk *clk;
+ struct clk *bus_clk;
struct reset_control *rst;
int type;
int line;
@@ -123,12 +124,22 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
/* Get clk rate through clk driver if present */
if (!port->uartclk) {
- info->clk = devm_clk_get_enabled(dev, NULL);
+ struct clk *bus_clk;
+
+ bus_clk = devm_clk_get_optional_enabled(dev, "bus");
+ if (IS_ERR(bus_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(bus_clk), "failed to get bus clock\n");
+ goto err_pmruntime;
+ }
+
+ /* If the bus clock is required, core clock must be named */
+ info->clk = devm_clk_get_enabled(dev, bus_clk ? "core" : NULL);
if (IS_ERR(info->clk)) {
ret = dev_err_probe(dev, PTR_ERR(info->clk), "failed to get clock\n");
goto err_pmruntime;
}
+ info->bus_clk = bus_clk;
port->uartclk = clk_get_rate(info->clk);
}
/* If current-speed was set, then try not to change it. */
@@ -290,6 +301,7 @@ static int of_serial_suspend(struct device *dev)
if (!uart_console(port) || console_suspend_enabled) {
pm_runtime_put_sync(dev);
clk_disable_unprepare(info->clk);
+ clk_disable_unprepare(info->bus_clk);
}
return 0;
}
@@ -302,6 +314,7 @@ static int of_serial_resume(struct device *dev)
if (!uart_console(port) || console_suspend_enabled) {
pm_runtime_get_sync(dev);
+ clk_prepare_enable(info->bus_clk);
clk_prepare_enable(info->clk);
}
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 2a0ce11f405d..72ae08d6204f 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1173,16 +1173,6 @@ static int omap_8250_tx_dma(struct uart_8250_port *p)
return 0;
}
- sg_init_table(&sg, 1);
- ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, &sg, 1,
- UART_XMIT_SIZE, dma->tx_addr);
- if (ret != 1) {
- serial8250_clear_THRI(p);
- return 0;
- }
-
- dma->tx_size = sg_dma_len(&sg);
-
if (priv->habit & OMAP_DMA_TX_KICK) {
unsigned char c;
u8 tx_lvl;
@@ -1207,18 +1197,22 @@ static int omap_8250_tx_dma(struct uart_8250_port *p)
ret = -EBUSY;
goto err;
}
- if (dma->tx_size < 4) {
+ if (kfifo_len(&tport->xmit_fifo) < 4) {
ret = -EINVAL;
goto err;
}
- if (!kfifo_get(&tport->xmit_fifo, &c)) {
+ if (!uart_fifo_out(&p->port, &c, 1)) {
ret = -EINVAL;
goto err;
}
skip_byte = c;
- /* now we need to recompute due to kfifo_get */
- kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, &sg, 1,
- UART_XMIT_SIZE, dma->tx_addr);
+ }
+
+ sg_init_table(&sg, 1);
+ ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, &sg, 1, UART_XMIT_SIZE, dma->tx_addr);
+ if (ret != 1) {
+ ret = -EINVAL;
+ goto err;
}
desc = dmaengine_prep_slave_sg(dma->txchan, &sg, 1, DMA_MEM_TO_DEV,
@@ -1228,6 +1222,7 @@ static int omap_8250_tx_dma(struct uart_8250_port *p)
goto err;
}
+ dma->tx_size = sg_dma_len(&sg);
dma->tx_running = 1;
desc->callback = omap_8250_dma_tx_complete;
diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
index e9c51d4e447d..4c149db84692 100644
--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
+++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
@@ -115,6 +115,7 @@
#define UART_RESET_REG 0x94
#define UART_RESET_D3_RESET_DISABLE BIT(16)
+#define UART_RESET_HOT_RESET_DISABLE BIT(17)
#define UART_BURST_STATUS_REG 0x9C
#define UART_TX_BURST_FIFO 0xA0
@@ -620,6 +621,10 @@ static int pci1xxxx_suspend(struct device *dev)
}
data = readl(p + UART_RESET_REG);
+
+ if (priv->dev_rev >= 0xC0)
+ data |= UART_RESET_HOT_RESET_DISABLE;
+
writel(data | UART_RESET_D3_RESET_DISABLE, p + UART_RESET_REG);
if (wakeup)
@@ -647,7 +652,12 @@ static int pci1xxxx_resume(struct device *dev)
}
data = readl(p + UART_RESET_REG);
+
+ if (priv->dev_rev >= 0xC0)
+ data &= ~UART_RESET_HOT_RESET_DISABLE;
+
writel(data & ~UART_RESET_D3_RESET_DISABLE, p + UART_RESET_REG);
+
iounmap(p);
for (i = 0; i < priv->nr; i++) {
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 8ac452cea36c..6d7b8c4667c9 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -517,22 +517,20 @@ void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p)
}
EXPORT_SYMBOL_GPL(serial8250_clear_and_reinit_fifos);
-void serial8250_rpm_get(struct uart_8250_port *p)
+static void serial8250_rpm_get(struct uart_8250_port *p)
{
if (!(p->capabilities & UART_CAP_RPM))
return;
pm_runtime_get_sync(p->port.dev);
}
-EXPORT_SYMBOL_GPL(serial8250_rpm_get);
-void serial8250_rpm_put(struct uart_8250_port *p)
+static void serial8250_rpm_put(struct uart_8250_port *p)
{
if (!(p->capabilities & UART_CAP_RPM))
return;
pm_runtime_mark_last_busy(p->port.dev);
pm_runtime_put_autosuspend(p->port.dev);
}
-EXPORT_SYMBOL_GPL(serial8250_rpm_put);
/**
* serial8250_em485_init() - put uart_8250_port into rs485 emulating
@@ -647,7 +645,7 @@ EXPORT_SYMBOL_GPL(serial8250_em485_config);
* once and disable_runtime_pm_tx() will still disable RPM because the fifo is
* empty and the HW can idle again.
*/
-void serial8250_rpm_get_tx(struct uart_8250_port *p)
+static void serial8250_rpm_get_tx(struct uart_8250_port *p)
{
unsigned char rpm_active;
@@ -659,9 +657,8 @@ void serial8250_rpm_get_tx(struct uart_8250_port *p)
return;
pm_runtime_get_sync(p->port.dev);
}
-EXPORT_SYMBOL_GPL(serial8250_rpm_get_tx);
-void serial8250_rpm_put_tx(struct uart_8250_port *p)
+static void serial8250_rpm_put_tx(struct uart_8250_port *p)
{
unsigned char rpm_active;
@@ -674,7 +671,6 @@ void serial8250_rpm_put_tx(struct uart_8250_port *p)
pm_runtime_mark_last_busy(p->port.dev);
pm_runtime_put_autosuspend(p->port.dev);
}
-EXPORT_SYMBOL_GPL(serial8250_rpm_put_tx);
/*
* IER sleep support. UARTs which have EFRs need the "extended
@@ -2993,6 +2989,8 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
if (!request_region(port->iobase, size, "serial"))
return -EBUSY;
return 0;
+ case UPIO_UNKNOWN:
+ break;
}
return 0;
@@ -3025,6 +3023,8 @@ static void serial8250_release_std_resource(struct uart_8250_port *up)
case UPIO_PORT:
release_region(port->iobase, size);
break;
+ case UPIO_UNKNOWN:
+ break;
}
}
diff --git a/drivers/tty/serial/8250/8250_rsa.c b/drivers/tty/serial/8250/8250_rsa.c
index 82f2593b4c59..4c8b9671bd41 100644
--- a/drivers/tty/serial/8250/8250_rsa.c
+++ b/drivers/tty/serial/8250/8250_rsa.c
@@ -43,6 +43,8 @@ static void rsa8250_release_resource(struct uart_8250_port *up)
case UPIO_PORT:
release_region(port->iobase + offset, size);
break;
+ default:
+ break;
}
}
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index bd3d636ff962..f64ef0819cd4 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -572,7 +572,7 @@ config SERIAL_8250_BCM7271
config SERIAL_8250_NI
tristate "NI 16550 based serial port"
depends on SERIAL_8250
- depends on (X86 && ACPI) || COMPILE_TEST
+ depends on X86 || COMPILE_TEST
help
This driver supports the integrated serial ports on National
Instruments (NI) controller hardware. This is required for all NI
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 011f38681131..837991dc4db9 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -275,7 +275,7 @@ static irqreturn_t altera_uart_interrupt(int irq, void *data)
static void altera_uart_timer(struct timer_list *t)
{
- struct altera_uart *pp = from_timer(pp, t, tmr);
+ struct altera_uart *pp = timer_container_of(pp, t, tmr);
struct uart_port *port = &pp->port;
altera_uart_interrupt(0, port);
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 11d65097578c..22939841b1de 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1046,7 +1046,7 @@ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
*/
static void pl011_dma_rx_poll(struct timer_list *t)
{
- struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer);
+ struct uart_amba_port *uap = timer_container_of(uap, t, dmarx.timer);
struct tty_port *port = &uap->port.state->port;
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_chan *rxchan = uap->dmarx.chan;
@@ -2476,7 +2476,7 @@ static int pl011_console_setup(struct console *co, char *options)
static int pl011_console_match(struct console *co, char *name, int idx,
char *options)
{
- unsigned char iotype;
+ enum uart_iotype iotype;
resource_size_t addr;
int i;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 18dba502144d..08dd8f887956 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1266,8 +1266,8 @@ chan_err:
static void atmel_uart_timer_callback(struct timer_list *t)
{
- struct atmel_uart_port *atmel_port = from_timer(atmel_port, t,
- uart_timer);
+ struct atmel_uart_port *atmel_port = timer_container_of(atmel_port, t,
+ uart_timer);
struct uart_port *port = &atmel_port->uart;
if (!atomic_read(&atmel_port->tasklet_shutdown)) {
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index fe5aed99d55a..2790b4078e7e 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -403,6 +403,8 @@ static inline void lpuart32_write(struct uart_port *port, u32 val,
case UPIO_MEM32BE:
iowrite32be(val, port->membase + off);
break;
+ default:
+ break;
}
}
@@ -563,8 +565,9 @@ static dma_addr_t lpuart_dma_datareg_addr(struct lpuart_port *sport)
return sport->port.mapbase + UARTDATA;
case UPIO_MEM32BE:
return sport->port.mapbase + UARTDATA + sizeof(u32) - 1;
+ default:
+ return sport->port.mapbase + UARTDR;
}
- return sport->port.mapbase + UARTDR;
}
static int lpuart_dma_tx_request(struct uart_port *port)
@@ -1304,7 +1307,7 @@ static irqreturn_t lpuart32_int(int irq, void *dev_id)
*/
static void lpuart_timer_func(struct timer_list *t)
{
- struct lpuart_port *sport = from_timer(sport, t, lpuart_timer);
+ struct lpuart_port *sport = timer_container_of(sport, t, lpuart_timer);
enum dma_status dmastat;
struct dma_chan *chan = sport->dma_rx_chan;
struct circ_buf *ring = &sport->rx_ring;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index e4b6f1bfdc95..bd02ee898f5d 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1165,7 +1165,7 @@ static void imx_uart_break_ctl(struct uart_port *port, int break_state)
*/
static void imx_uart_timeout(struct timer_list *t)
{
- struct imx_port *sport = from_timer(sport, t, timer);
+ struct imx_port *sport = timer_container_of(sport, t, timer);
unsigned long flags;
if (sport->port.state) {
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index ce0fef7e2c66..be2f130696b3 100644
--- a/drivers/tty/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
@@ -451,6 +451,7 @@ int jsm_uart_port_init(struct jsm_board *brd)
if (!brd->channels[i])
continue;
+ brd->channels[i]->uart_port.dev = &brd->pci_dev->dev;
brd->channels[i]->uart_port.irq = brd->irq;
brd->channels[i]->uart_port.uartclk = 14745600;
brd->channels[i]->uart_port.type = PORT_JSM;
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 58a3ab030d67..62cd9e0bb377 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -773,10 +773,8 @@ static int fetch_irq_intel(struct device *dev, struct ltq_uart_port *ltq_port)
int ret;
ret = platform_get_irq(to_platform_device(dev), 0);
- if (ret < 0) {
- dev_err(dev, "failed to fetch IRQ for serial port\n");
+ if (ret < 0)
return ret;
- }
ltq_port->common_irq = ret;
port->irq = ret;
diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
index 3a0960c97c77..6429e8f11f36 100644
--- a/drivers/tty/serial/liteuart.c
+++ b/drivers/tty/serial/liteuart.c
@@ -152,7 +152,7 @@ static irqreturn_t liteuart_interrupt(int irq, void *data)
static void liteuart_timer(struct timer_list *t)
{
- struct liteuart_port *uart = from_timer(uart, t, timer);
+ struct liteuart_port *uart = timer_container_of(uart, t, timer);
struct uart_port *port = &uart->port;
liteuart_interrupt(0, port);
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index f2dd83692b2c..67d80f8f801e 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -16,6 +16,7 @@
/* 4 MAX3100s should be enough for everyone */
#define MAX_MAX3100 4
+#include <linux/bitops.h>
#include <linux/container_of.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -133,7 +134,7 @@ static int max3100_do_parity(struct max3100_port *s, u16 c)
else
c &= 0xff;
- parity = parity ^ (hweight8(c) & 1);
+ parity = parity ^ parity8(c);
return parity;
}
@@ -308,7 +309,7 @@ static void max3100_dowork(struct max3100_port *s)
static void max3100_timeout(struct timer_list *t)
{
- struct max3100_port *s = from_timer(s, t, timer);
+ struct max3100_port *s = timer_container_of(s, t, timer);
max3100_dowork(s);
mod_timer(&s->timer, jiffies + uart_poll_timeout(&s->port));
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 35369a2f77b2..541c790c0109 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1189,13 +1189,16 @@ static int max310x_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!((val >> 4) & (1 << (offset % 4)));
}
-static void max310x_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int max310x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct max310x_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[offset / 4].port;
max310x_port_update(port, MAX310X_GPIODATA_REG, 1 << (offset % 4),
value ? 1 << (offset % 4) : 0);
+
+ return 0;
}
static int max310x_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -1411,7 +1414,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
s->gpio.direction_input = max310x_gpio_direction_input;
s->gpio.get = max310x_gpio_get;
s->gpio.direction_output= max310x_gpio_direction_output;
- s->gpio.set = max310x_gpio_set;
+ s->gpio.set_rv = max310x_gpio_set;
s->gpio.set_config = max310x_gpio_set_config;
s->gpio.base = -1;
s->gpio.ngpio = devtype->nr * 4;
diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c
index 059bea18dbab..4e47dca2c4ed 100644
--- a/drivers/tty/serial/milbeaut_usio.c
+++ b/drivers/tty/serial/milbeaut_usio.c
@@ -523,7 +523,10 @@ static int mlb_usio_probe(struct platform_device *pdev)
}
port->membase = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
-
+ if (!port->membase) {
+ ret = -ENOMEM;
+ goto failed;
+ }
ret = platform_get_irq_byname(pdev, "rx");
mlb_usio_irq[index][RX] = ret;
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index a80ce7aaf309..0293b6210aa6 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -98,6 +98,8 @@
#define DMA_RX_BUF_SIZE 2048
+static DEFINE_IDA(port_ida);
+
struct qcom_geni_device_data {
bool console;
enum geni_se_xfer_mode mode;
@@ -253,10 +255,24 @@ static struct qcom_geni_serial_port *get_port_from_line(int line, bool console)
struct qcom_geni_serial_port *port;
int nr_ports = console ? GENI_UART_CONS_PORTS : GENI_UART_PORTS;
- if (line < 0 || line >= nr_ports)
- return ERR_PTR(-ENXIO);
+ if (console) {
+ if (line < 0 || line >= nr_ports)
+ return ERR_PTR(-ENXIO);
+
+ port = &qcom_geni_console_port;
+ } else {
+ int max_alias_num = of_alias_get_highest_id("serial");
+
+ if (line < 0 || line >= nr_ports)
+ line = ida_alloc_range(&port_ida, max_alias_num + 1, nr_ports, GFP_KERNEL);
+ else
+ line = ida_alloc_range(&port_ida, line, nr_ports, GFP_KERNEL);
+
+ if (line < 0)
+ return ERR_PTR(-ENXIO);
- port = console ? &qcom_geni_console_port : &qcom_geni_uart_ports[line];
+ port = &qcom_geni_uart_ports[line];
+ }
return port;
}
@@ -1761,6 +1777,7 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
port->wakeup_irq);
if (ret) {
device_init_wakeup(&pdev->dev, false);
+ ida_free(&port_ida, uport->line);
uart_remove_one_port(drv, uport);
return ret;
}
@@ -1772,10 +1789,12 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
static void qcom_geni_serial_remove(struct platform_device *pdev)
{
struct qcom_geni_serial_port *port = platform_get_drvdata(pdev);
+ struct uart_port *uport = &port->uport;
struct uart_driver *drv = port->private_data.drv;
dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
+ ida_free(&port_ida, uport->line);
uart_remove_one_port(drv, &port->uport);
}
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 8587ebbe1073..72b1bb76415c 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -111,7 +111,7 @@ static void sa1100_mctrl_check(struct sa1100_port *sport)
*/
static void sa1100_timeout(struct timer_list *t)
{
- struct sa1100_port *sport = from_timer(sport, t, timer);
+ struct sa1100_port *sport = timer_container_of(sport, t, timer);
unsigned long flags;
if (sport->port.state) {
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 210fff7164c1..2fb58c626daf 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -52,7 +52,7 @@
#define S3C24XX_SERIAL_MINOR 64
#ifdef CONFIG_ARM64
-#define UART_NR 12
+#define UART_NR 18
#else
#define UART_NR CONFIG_SERIAL_SAMSUNG_UARTS
#endif
@@ -190,6 +190,8 @@ static void wr_reg(const struct uart_port *port, u32 reg, u32 val)
case UPIO_MEM32:
writel_relaxed(val, portaddr(port, reg));
break;
+ default:
+ break;
}
}
@@ -2713,6 +2715,8 @@ static void wr_reg_barrier(const struct uart_port *port, u32 reg, u32 val)
case UPIO_MEM32:
writel(val, portaddr(port, reg));
break;
+ default:
+ break;
}
}
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 560f45ed19ae..5ea8aadb6e69 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1333,13 +1333,16 @@ static int sc16is7xx_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(val & BIT(offset));
}
-static void sc16is7xx_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int sc16is7xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct sc16is7xx_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[0].port;
sc16is7xx_port_update(port, SC16IS7XX_IOSTATE_REG, BIT(offset),
val ? BIT(offset) : 0);
+
+ return 0;
}
static int sc16is7xx_gpio_direction_input(struct gpio_chip *chip,
@@ -1422,7 +1425,7 @@ static int sc16is7xx_setup_gpio_chip(struct sc16is7xx_port *s)
s->gpio.direction_input = sc16is7xx_gpio_direction_input;
s->gpio.get = sc16is7xx_gpio_get;
s->gpio.direction_output = sc16is7xx_gpio_direction_output;
- s->gpio.set = sc16is7xx_gpio_set;
+ s->gpio.set_rv = sc16is7xx_gpio_set;
s->gpio.base = -1;
s->gpio.ngpio = s->devtype->nr_gpio;
s->gpio.can_sleep = 1;
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index 553e3c1321ca..4ceca11ce600 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -500,7 +500,7 @@ static void sccnxp_handle_events(struct sccnxp_port *s)
static void sccnxp_timer(struct timer_list *t)
{
- struct sccnxp_port *s = from_timer(s, t, timer);
+ struct sccnxp_port *s = timer_container_of(s, t, timer);
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 88669972d9a0..1f7708a91fc6 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -75,22 +75,23 @@ static inline void uart_port_deref(struct uart_port *uport)
wake_up(&uport->state->remove_wait);
}
-#define uart_port_lock(state, flags) \
- ({ \
- struct uart_port *__uport = uart_port_ref(state); \
- if (__uport) \
- uart_port_lock_irqsave(__uport, &flags); \
- __uport; \
- })
-
-#define uart_port_unlock(uport, flags) \
- ({ \
- struct uart_port *__uport = uport; \
- if (__uport) { \
- uart_port_unlock_irqrestore(__uport, flags); \
- uart_port_deref(__uport); \
- } \
- })
+static inline struct uart_port *uart_port_ref_lock(struct uart_state *state, unsigned long *flags)
+{
+ struct uart_port *uport = uart_port_ref(state);
+
+ if (uport)
+ uart_port_lock_irqsave(uport, flags);
+
+ return uport;
+}
+
+static inline void uart_port_unlock_deref(struct uart_port *uport, unsigned long flags)
+{
+ if (uport) {
+ uart_port_unlock_irqrestore(uport, flags);
+ uart_port_deref(uport);
+ }
+}
static inline struct uart_port *uart_port_check(struct uart_state *state)
{
@@ -127,10 +128,10 @@ static void uart_stop(struct tty_struct *tty)
struct uart_port *port;
unsigned long flags;
- port = uart_port_lock(state, flags);
+ port = uart_port_ref_lock(state, &flags);
if (port)
port->ops->stop_tx(port);
- uart_port_unlock(port, flags);
+ uart_port_unlock_deref(port, flags);
}
static void __uart_start(struct uart_state *state)
@@ -168,9 +169,9 @@ static void uart_start(struct tty_struct *tty)
struct uart_port *port;
unsigned long flags;
- port = uart_port_lock(state, flags);
+ port = uart_port_ref_lock(state, &flags);
__uart_start(state);
- uart_port_unlock(port, flags);
+ uart_port_unlock_deref(port, flags);
}
static void
@@ -258,14 +259,14 @@ static int uart_alloc_xmit_buf(struct tty_port *port)
if (!page)
return -ENOMEM;
- uport = uart_port_lock(state, flags);
+ uport = uart_port_ref_lock(state, &flags);
if (!state->port.xmit_buf) {
state->port.xmit_buf = (unsigned char *)page;
kfifo_init(&state->port.xmit_fifo, state->port.xmit_buf,
PAGE_SIZE);
- uart_port_unlock(uport, flags);
+ uart_port_unlock_deref(uport, flags);
} else {
- uart_port_unlock(uport, flags);
+ uart_port_unlock_deref(uport, flags);
/*
* Do not free() the page under the port lock, see
* uart_free_xmit_buf().
@@ -289,11 +290,11 @@ static void uart_free_xmit_buf(struct tty_port *port)
* console driver may need to allocate/free a debug object, which
* can end up in printk() recursion.
*/
- uport = uart_port_lock(state, flags);
+ uport = uart_port_ref_lock(state, &flags);
xmit_buf = port->xmit_buf;
port->xmit_buf = NULL;
INIT_KFIFO(port->xmit_fifo);
- uart_port_unlock(uport, flags);
+ uart_port_unlock_deref(uport, flags);
free_page((unsigned long)xmit_buf);
}
@@ -592,15 +593,15 @@ static int uart_put_char(struct tty_struct *tty, u8 c)
unsigned long flags;
int ret = 0;
- port = uart_port_lock(state, flags);
+ port = uart_port_ref_lock(state, &flags);
if (!state->port.xmit_buf) {
- uart_port_unlock(port, flags);
+ uart_port_unlock_deref(port, flags);
return 0;
}
if (port)
ret = kfifo_put(&state->port.xmit_fifo, c);
- uart_port_unlock(port, flags);
+ uart_port_unlock_deref(port, flags);
return ret;
}
@@ -623,9 +624,9 @@ static ssize_t uart_write(struct tty_struct *tty, const u8 *buf, size_t count)
if (WARN_ON(!state))
return -EL3HLT;
- port = uart_port_lock(state, flags);
+ port = uart_port_ref_lock(state, &flags);
if (!state->port.xmit_buf) {
- uart_port_unlock(port, flags);
+ uart_port_unlock_deref(port, flags);
return 0;
}
@@ -633,7 +634,7 @@ static ssize_t uart_write(struct tty_struct *tty, const u8 *buf, size_t count)
ret = kfifo_in(&state->port.xmit_fifo, buf, count);
__uart_start(state);
- uart_port_unlock(port, flags);
+ uart_port_unlock_deref(port, flags);
return ret;
}
@@ -644,9 +645,9 @@ static unsigned int uart_write_room(struct tty_struct *tty)
unsigned long flags;
unsigned int ret;
- port = uart_port_lock(state, flags);
+ port = uart_port_ref_lock(state, &flags);
ret = kfifo_avail(&state->port.xmit_fifo);
- uart_port_unlock(port, flags);
+ uart_port_unlock_deref(port, flags);
return ret;
}
@@ -657,9 +658,9 @@ static unsigned int uart_chars_in_buffer(struct tty_struct *tty)
unsigned long flags;
unsigned int ret;
- port = uart_port_lock(state, flags);
+ port = uart_port_ref_lock(state, &flags);
ret = kfifo_len(&state->port.xmit_fifo);
- uart_port_unlock(port, flags);
+ uart_port_unlock_deref(port, flags);
return ret;
}
@@ -678,13 +679,13 @@ static void uart_flush_buffer(struct tty_struct *tty)
pr_debug("uart_flush_buffer(%d) called\n", tty->index);
- port = uart_port_lock(state, flags);
+ port = uart_port_ref_lock(state, &flags);
if (!port)
return;
kfifo_reset(&state->port.xmit_fifo);
if (port->ops->flush_buffer)
port->ops->flush_buffer(port);
- uart_port_unlock(port, flags);
+ uart_port_unlock_deref(port, flags);
tty_port_tty_wakeup(&state->port);
}
@@ -1275,14 +1276,13 @@ static int uart_get_icount(struct tty_struct *tty,
struct uart_state *state = tty->driver_data;
struct uart_icount cnow;
struct uart_port *uport;
+ unsigned long flags;
- uport = uart_port_ref(state);
+ uport = uart_port_ref_lock(state, &flags);
if (!uport)
return -EIO;
- uart_port_lock_irq(uport);
memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
- uart_port_unlock_irq(uport);
- uart_port_deref(uport);
+ uart_port_unlock_deref(uport, flags);
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
@@ -1914,9 +1914,10 @@ static bool uart_carrier_raised(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport;
+ unsigned long flags;
int mctrl;
- uport = uart_port_ref(state);
+ uport = uart_port_ref_lock(state, &flags);
/*
* Should never observe uport == NULL since checks for hangup should
* abort the tty_port_block_til_ready() loop before checking for carrier
@@ -1925,11 +1926,9 @@ static bool uart_carrier_raised(struct tty_port *port)
*/
if (WARN_ON(!uport))
return true;
- uart_port_lock_irq(uport);
uart_enable_ms(uport);
mctrl = uport->ops->get_mctrl(uport);
- uart_port_unlock_irq(uport);
- uart_port_deref(uport);
+ uart_port_unlock_deref(uport, flags);
return mctrl & TIOCM_CAR;
}
@@ -2178,8 +2177,8 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co)
*
* Returns: 0 on success or -%EINVAL on failure
*/
-int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
- char **options)
+int uart_parse_earlycon(char *p, enum uart_iotype *iotype,
+ resource_size_t *addr, char **options)
{
if (strncmp(p, "mmio,", 5) == 0) {
*iotype = UPIO_MEM;
@@ -3289,9 +3288,9 @@ bool uart_match_port(const struct uart_port *port1,
case UPIO_AU:
case UPIO_TSI:
return port1->mapbase == port2->mapbase;
+ default:
+ return false;
}
-
- return false;
}
EXPORT_SYMBOL(uart_match_port);
diff --git a/drivers/tty/serial/sh-sci-common.h b/drivers/tty/serial/sh-sci-common.h
new file mode 100644
index 000000000000..bd9d9cfac1c8
--- /dev/null
+++ b/drivers/tty/serial/sh-sci-common.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __SH_SCI_COMMON_H__
+#define __SH_SCI_COMMON_H__
+
+#include <linux/serial_core.h>
+
+enum SCI_CLKS {
+ SCI_FCK, /* Functional Clock */
+ SCI_SCK, /* Optional External Clock */
+ SCI_BRG_INT, /* Optional BRG Internal Clock Source */
+ SCI_SCIF_CLK, /* Optional BRG External Clock Source */
+ SCI_NUM_CLKS
+};
+
+/* Offsets into the sci_port->irqs array */
+enum {
+ SCIx_ERI_IRQ,
+ SCIx_RXI_IRQ,
+ SCIx_TXI_IRQ,
+ SCIx_BRI_IRQ,
+ SCIx_DRI_IRQ,
+ SCIx_TEI_IRQ,
+ SCIx_NR_IRQS,
+
+ SCIx_MUX_IRQ = SCIx_NR_IRQS, /* special case */
+};
+
+/* Bit x set means sampling rate x + 1 is supported */
+#define SCI_SR(x) BIT((x) - 1)
+#define SCI_SR_RANGE(x, y) GENMASK((y) - 1, (x) - 1)
+
+void sci_release_port(struct uart_port *port);
+int sci_request_port(struct uart_port *port);
+void sci_config_port(struct uart_port *port, int flags);
+int sci_verify_port(struct uart_port *port, struct serial_struct *ser);
+void sci_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate);
+
+struct plat_sci_reg {
+ u8 offset;
+ u8 size;
+};
+
+struct sci_port_params_bits {
+ unsigned int rxtx_enable;
+ unsigned int te_clear;
+ unsigned int poll_sent_bits;
+};
+
+struct sci_common_regs {
+ unsigned int status;
+ unsigned int control;
+};
+
+/* The actual number of needed registers. This is used by sci only */
+#define SCI_NR_REGS 20
+
+struct sci_port_params {
+ const struct plat_sci_reg regs[SCI_NR_REGS];
+ const struct sci_common_regs *common_regs;
+ const struct sci_port_params_bits *param_bits;
+ unsigned int fifosize;
+ unsigned int overrun_reg;
+ unsigned int overrun_mask;
+ unsigned int sampling_rate_mask;
+ unsigned int error_mask;
+ unsigned int error_clear;
+};
+
+struct sci_port_ops {
+ u32 (*read_reg)(struct uart_port *port, int reg);
+ void (*write_reg)(struct uart_port *port, int reg, int value);
+ void (*clear_SCxSR)(struct uart_port *port, unsigned int mask);
+
+ void (*transmit_chars)(struct uart_port *port);
+ void (*receive_chars)(struct uart_port *port);
+
+ void (*poll_put_char)(struct uart_port *port, unsigned char c);
+
+ int (*set_rtrg)(struct uart_port *port, int rx_trig);
+ int (*rtrg_enabled)(struct uart_port *port);
+
+ void (*shutdown_complete)(struct uart_port *port);
+
+ void (*prepare_console_write)(struct uart_port *port, u32 ctrl);
+ void (*console_save)(struct uart_port *port);
+ void (*console_restore)(struct uart_port *port);
+ size_t (*suspend_regs_size)(void);
+};
+
+struct sci_of_data {
+ const struct sci_port_params *params;
+ const struct uart_ops *uart_ops;
+ const struct sci_port_ops *ops;
+ unsigned short regtype;
+ unsigned short type;
+};
+
+struct sci_port {
+ struct uart_port port;
+
+ /* Platform configuration */
+ const struct sci_port_params *params;
+ const struct plat_sci_port *cfg;
+
+ unsigned int sampling_rate_mask;
+ resource_size_t reg_size;
+ struct mctrl_gpios *gpios;
+
+ /* Clocks */
+ struct clk *clks[SCI_NUM_CLKS];
+ unsigned long clk_rates[SCI_NUM_CLKS];
+
+ int irqs[SCIx_NR_IRQS];
+ char *irqstr[SCIx_NR_IRQS];
+
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+
+ struct reset_control *rstc;
+ struct sci_suspend_regs *suspend_regs;
+
+#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ struct dma_chan *chan_tx_saved;
+ struct dma_chan *chan_rx_saved;
+ dma_cookie_t cookie_tx;
+ dma_cookie_t cookie_rx[2];
+ dma_cookie_t active_rx;
+ dma_addr_t tx_dma_addr;
+ unsigned int tx_dma_len;
+ struct scatterlist sg_rx[2];
+ void *rx_buf[2];
+ size_t buf_len_rx;
+ struct work_struct work_tx;
+ struct hrtimer rx_timer;
+ unsigned int rx_timeout; /* microseconds */
+#endif
+ unsigned int rx_frame;
+ int rx_trigger;
+ struct timer_list rx_fifo_timer;
+ int rx_fifo_timeout;
+ u16 hscif_tot;
+
+ const struct sci_port_ops *ops;
+
+ bool has_rtscts;
+ bool autorts;
+ bool tx_occurred;
+};
+
+#define to_sci_port(uart) container_of((uart), struct sci_port, port)
+
+void sci_port_disable(struct sci_port *sci_port);
+void sci_port_enable(struct sci_port *sci_port);
+
+int sci_startup(struct uart_port *port);
+void sci_shutdown(struct uart_port *port);
+
+#define min_sr(_port) ffs((_port)->sampling_rate_mask)
+#define max_sr(_port) fls((_port)->sampling_rate_mask)
+
+#ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
+int __init scix_early_console_setup(struct earlycon_device *device, const struct sci_of_data *data);
+#endif
+
+#endif /* __SH_SCI_COMMON_H__ */
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 7e7813ccda41..1c356544a832 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -56,19 +56,7 @@
#include "serial_mctrl_gpio.h"
#include "sh-sci.h"
-
-/* Offsets into the sci_port->irqs array */
-enum {
- SCIx_ERI_IRQ,
- SCIx_RXI_IRQ,
- SCIx_TXI_IRQ,
- SCIx_BRI_IRQ,
- SCIx_DRI_IRQ,
- SCIx_TEI_IRQ,
- SCIx_NR_IRQS,
-
- SCIx_MUX_IRQ = SCIx_NR_IRQS, /* special case */
-};
+#include "sh-sci-common.h"
#define SCIx_IRQ_IS_MUXED(port) \
((port)->irqs[SCIx_ERI_IRQ] == \
@@ -76,32 +64,38 @@ enum {
((port)->irqs[SCIx_ERI_IRQ] && \
((port)->irqs[SCIx_RXI_IRQ] < 0))
-enum SCI_CLKS {
- SCI_FCK, /* Functional Clock */
- SCI_SCK, /* Optional External Clock */
- SCI_BRG_INT, /* Optional BRG Internal Clock Source */
- SCI_SCIF_CLK, /* Optional BRG External Clock Source */
- SCI_NUM_CLKS
-};
-
-/* Bit x set means sampling rate x + 1 is supported */
-#define SCI_SR(x) BIT((x) - 1)
-#define SCI_SR_RANGE(x, y) GENMASK((y) - 1, (x) - 1)
-
#define SCI_SR_SCIFAB SCI_SR(5) | SCI_SR(7) | SCI_SR(11) | \
SCI_SR(13) | SCI_SR(16) | SCI_SR(17) | \
SCI_SR(19) | SCI_SR(27)
-#define min_sr(_port) ffs((_port)->sampling_rate_mask)
-#define max_sr(_port) fls((_port)->sampling_rate_mask)
-
/* Iterate over all supported sampling rates, from high to low */
#define for_each_sr(_sr, _port) \
for ((_sr) = max_sr(_port); (_sr) >= min_sr(_port); (_sr)--) \
if ((_port)->sampling_rate_mask & SCI_SR((_sr)))
-struct plat_sci_reg {
- u8 offset, size;
+#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
+
+static struct sci_port sci_ports[SCI_NPORTS];
+static unsigned long sci_ports_in_use;
+static struct uart_driver sci_uart_driver;
+static bool sci_uart_earlycon;
+static bool sci_uart_earlycon_dev_probing;
+
+static const struct sci_port_params_bits sci_sci_port_params_bits = {
+ .rxtx_enable = SCSCR_RE | SCSCR_TE,
+ .te_clear = SCSCR_TE | SCSCR_TEIE,
+ .poll_sent_bits = SCI_TDRE | SCI_TEND
+};
+
+static const struct sci_port_params_bits sci_scif_port_params_bits = {
+ .rxtx_enable = SCSCR_RE | SCSCR_TE,
+ .te_clear = SCSCR_TE | SCSCR_TEIE,
+ .poll_sent_bits = SCIF_TDFE | SCIF_TEND
+};
+
+static const struct sci_common_regs sci_common_regs = {
+ .status = SCxSR,
+ .control = SCSCR,
};
struct sci_suspend_regs {
@@ -118,77 +112,9 @@ struct sci_suspend_regs {
u8 semr;
};
-struct sci_port_params {
- const struct plat_sci_reg regs[SCIx_NR_REGS];
- unsigned int fifosize;
- unsigned int overrun_reg;
- unsigned int overrun_mask;
- unsigned int sampling_rate_mask;
- unsigned int error_mask;
- unsigned int error_clear;
-};
-
-struct sci_port {
- struct uart_port port;
-
- /* Platform configuration */
- const struct sci_port_params *params;
- const struct plat_sci_port *cfg;
- unsigned int sampling_rate_mask;
- resource_size_t reg_size;
- struct mctrl_gpios *gpios;
-
- /* Clocks */
- struct clk *clks[SCI_NUM_CLKS];
- unsigned long clk_rates[SCI_NUM_CLKS];
-
- int irqs[SCIx_NR_IRQS];
- char *irqstr[SCIx_NR_IRQS];
-
- struct dma_chan *chan_tx;
- struct dma_chan *chan_rx;
-
- struct reset_control *rstc;
-
-#ifdef CONFIG_SERIAL_SH_SCI_DMA
- struct dma_chan *chan_tx_saved;
- struct dma_chan *chan_rx_saved;
- dma_cookie_t cookie_tx;
- dma_cookie_t cookie_rx[2];
- dma_cookie_t active_rx;
- dma_addr_t tx_dma_addr;
- unsigned int tx_dma_len;
- struct scatterlist sg_rx[2];
- void *rx_buf[2];
- size_t buf_len_rx;
- struct work_struct work_tx;
- struct hrtimer rx_timer;
- unsigned int rx_timeout; /* microseconds */
-#endif
- unsigned int rx_frame;
- int rx_trigger;
- struct timer_list rx_fifo_timer;
- int rx_fifo_timeout;
- struct sci_suspend_regs suspend_regs;
- u16 hscif_tot;
-
- bool has_rtscts;
- bool autorts;
- bool tx_occurred;
-};
-
-#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
-
-static struct sci_port sci_ports[SCI_NPORTS];
-static unsigned long sci_ports_in_use;
-static struct uart_driver sci_uart_driver;
-static bool sci_uart_earlycon;
-static bool sci_uart_earlycon_dev_probing;
-
-static inline struct sci_port *
-to_sci_port(struct uart_port *uart)
+static size_t sci_suspend_regs_size(void)
{
- return container_of(uart, struct sci_port, port);
+ return sizeof(struct sci_suspend_regs);
}
static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
@@ -211,6 +137,8 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
.sampling_rate_mask = SCI_SR(32),
.error_mask = SCI_DEFAULT_ERROR_MASK | SCI_ORER,
.error_clear = SCI_ERROR_CLEAR & ~SCI_ORER,
+ .param_bits = &sci_sci_port_params_bits,
+ .common_regs = &sci_common_regs,
},
/*
@@ -233,6 +161,8 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
.sampling_rate_mask = SCI_SR(32),
.error_mask = SCI_DEFAULT_ERROR_MASK | SCI_ORER,
.error_clear = SCI_ERROR_CLEAR & ~SCI_ORER,
+ .param_bits = &sci_scif_port_params_bits,
+ .common_regs = &sci_common_regs,
},
/*
@@ -257,6 +187,8 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
.sampling_rate_mask = SCI_SR_SCIFAB,
.error_mask = SCIF_DEFAULT_ERROR_MASK | SCIFA_ORER,
.error_clear = SCIF_ERROR_CLEAR & ~SCIFA_ORER,
+ .param_bits = &sci_scif_port_params_bits,
+ .common_regs = &sci_common_regs,
},
/*
@@ -282,6 +214,8 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
.sampling_rate_mask = SCI_SR_SCIFAB,
.error_mask = SCIF_DEFAULT_ERROR_MASK | SCIFA_ORER,
.error_clear = SCIF_ERROR_CLEAR & ~SCIFA_ORER,
+ .param_bits = &sci_scif_port_params_bits,
+ .common_regs = &sci_common_regs,
},
/*
@@ -307,10 +241,12 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
.sampling_rate_mask = SCI_SR(32),
.error_mask = SCIF_DEFAULT_ERROR_MASK,
.error_clear = SCIF_ERROR_CLEAR,
+ .param_bits = &sci_scif_port_params_bits,
+ .common_regs = &sci_common_regs,
},
/*
- * The "SCIFA" that is in RZ/A2, RZ/G2L and RZ/T.
+ * The "SCIFA" that is in RZ/A2, RZ/G2L and RZ/T1.
* It looks like a normal SCIF with FIFO data, but with a
* compressed address space. Also, the break out of interrupts
* are different: ERI/BRI, RXI, TXI, TEI, DRI.
@@ -335,6 +271,8 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
.sampling_rate_mask = SCI_SR(32),
.error_mask = SCIF_DEFAULT_ERROR_MASK,
.error_clear = SCIF_ERROR_CLEAR,
+ .param_bits = &sci_scif_port_params_bits,
+ .common_regs = &sci_common_regs,
},
/*
@@ -366,6 +304,8 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
.sampling_rate_mask = SCI_SR(32),
.error_mask = SCIF_DEFAULT_ERROR_MASK,
.error_clear = SCIF_ERROR_CLEAR,
+ .param_bits = &sci_scif_port_params_bits,
+ .common_regs = &sci_common_regs,
},
/*
@@ -388,6 +328,8 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
.sampling_rate_mask = SCI_SR(32),
.error_mask = SCIF_DEFAULT_ERROR_MASK,
.error_clear = SCIF_ERROR_CLEAR,
+ .param_bits = &sci_scif_port_params_bits,
+ .common_regs = &sci_common_regs,
},
/*
@@ -412,6 +354,8 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
.sampling_rate_mask = SCI_SR(32),
.error_mask = SCIF_DEFAULT_ERROR_MASK,
.error_clear = SCIF_ERROR_CLEAR,
+ .param_bits = &sci_scif_port_params_bits,
+ .common_regs = &sci_common_regs,
},
/*
@@ -439,6 +383,8 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
.sampling_rate_mask = SCI_SR(32),
.error_mask = SCIF_DEFAULT_ERROR_MASK,
.error_clear = SCIF_ERROR_CLEAR,
+ .param_bits = &sci_scif_port_params_bits,
+ .common_regs = &sci_common_regs,
},
/*
@@ -468,6 +414,8 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
.sampling_rate_mask = SCI_SR_RANGE(8, 32),
.error_mask = SCIF_DEFAULT_ERROR_MASK,
.error_clear = SCIF_ERROR_CLEAR,
+ .param_bits = &sci_scif_port_params_bits,
+ .common_regs = &sci_common_regs,
},
/*
@@ -492,6 +440,8 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
.sampling_rate_mask = SCI_SR(32),
.error_mask = SCIF_DEFAULT_ERROR_MASK,
.error_clear = SCIF_ERROR_CLEAR,
+ .param_bits = &sci_scif_port_params_bits,
+ .common_regs = &sci_common_regs,
},
/*
@@ -519,6 +469,8 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
.sampling_rate_mask = SCI_SR(32),
.error_mask = SCIF_DEFAULT_ERROR_MASK,
.error_clear = SCIF_ERROR_CLEAR,
+ .param_bits = &sci_scif_port_params_bits,
+ .common_regs = &sci_common_regs,
},
/*
@@ -542,6 +494,8 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
.sampling_rate_mask = SCI_SR(16),
.error_mask = SCIF_DEFAULT_ERROR_MASK | SCIFA_ORER,
.error_clear = SCIF_ERROR_CLEAR & ~SCIFA_ORER,
+ .param_bits = &sci_scif_port_params_bits,
+ .common_regs = &sci_common_regs,
},
};
@@ -579,7 +533,7 @@ static void sci_serial_out(struct uart_port *p, int offset, int value)
WARN(1, "Invalid register access\n");
}
-static void sci_port_enable(struct sci_port *sci_port)
+void sci_port_enable(struct sci_port *sci_port)
{
unsigned int i;
@@ -595,7 +549,7 @@ static void sci_port_enable(struct sci_port *sci_port)
sci_port->port.uartclk = sci_port->clk_rates[SCI_FCK];
}
-static void sci_port_disable(struct sci_port *sci_port)
+void sci_port_disable(struct sci_port *sci_port)
{
unsigned int i;
@@ -735,12 +689,13 @@ static void sci_clear_SCxSR(struct uart_port *port, unsigned int mask)
static int sci_poll_get_char(struct uart_port *port)
{
unsigned short status;
+ struct sci_port *s = to_sci_port(port);
int c;
do {
status = sci_serial_in(port, SCxSR);
if (status & SCxSR_ERRORS(port)) {
- sci_clear_SCxSR(port, SCxSR_ERROR_CLEAR(port));
+ s->ops->clear_SCxSR(port, SCxSR_ERROR_CLEAR(port));
continue;
}
break;
@@ -753,7 +708,7 @@ static int sci_poll_get_char(struct uart_port *port)
/* Dummy read */
sci_serial_in(port, SCxSR);
- sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
+ s->ops->clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
return c;
}
@@ -761,14 +716,16 @@ static int sci_poll_get_char(struct uart_port *port)
static void sci_poll_put_char(struct uart_port *port, unsigned char c)
{
- unsigned short status;
+ struct sci_port *s = to_sci_port(port);
+ const struct sci_common_regs *regs = s->params->common_regs;
+ unsigned int status;
do {
- status = sci_serial_in(port, SCxSR);
+ status = s->ops->read_reg(port, regs->status);
} while (!(status & SCxSR_TDxE(port)));
sci_serial_out(port, SCxTDR, c);
- sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
+ s->ops->clear_SCxSR(port, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
}
#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE ||
CONFIG_SERIAL_SH_SCI_EARLYCON */
@@ -911,7 +868,7 @@ static void sci_transmit_chars(struct uart_port *port)
port->icount.tx++;
} while (--count > 0);
- sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
+ s->ops->clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
uart_write_wakeup(port);
@@ -930,6 +887,7 @@ static void sci_transmit_chars(struct uart_port *port)
static void sci_receive_chars(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
+ struct sci_port *s = to_sci_port(port);
int i, count, copied = 0;
unsigned short status;
unsigned char flag;
@@ -984,7 +942,7 @@ static void sci_receive_chars(struct uart_port *port)
}
sci_serial_in(port, SCxSR); /* dummy read */
- sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
+ s->ops->clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
copied += count;
port->icount.rx += count;
@@ -997,16 +955,17 @@ static void sci_receive_chars(struct uart_port *port)
/* TTY buffers full; read from RX reg to prevent lockup */
sci_serial_in(port, SCxRDR);
sci_serial_in(port, SCxSR); /* dummy read */
- sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
+ s->ops->clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
}
}
static int sci_handle_errors(struct uart_port *port)
{
int copied = 0;
- unsigned short status = sci_serial_in(port, SCxSR);
- struct tty_port *tport = &port->state->port;
struct sci_port *s = to_sci_port(port);
+ const struct sci_common_regs *regs = s->params->common_regs;
+ unsigned int status = s->ops->read_reg(port, regs->status);
+ struct tty_port *tport = &port->state->port;
/* Handle overruns */
if (status & s->params->overrun_mask) {
@@ -1161,11 +1120,11 @@ static int scif_rtrg_enabled(struct uart_port *port)
static void rx_fifo_timer_fn(struct timer_list *t)
{
- struct sci_port *s = from_timer(s, t, rx_fifo_timer);
+ struct sci_port *s = timer_container_of(s, t, rx_fifo_timer);
struct uart_port *port = &s->port;
dev_dbg(port->dev, "Rx timed out\n");
- scif_set_rtrg(port, 1);
+ s->ops->set_rtrg(port, 1);
}
static ssize_t rx_fifo_trigger_show(struct device *dev,
@@ -1190,9 +1149,9 @@ static ssize_t rx_fifo_trigger_store(struct device *dev,
if (ret)
return ret;
- sci->rx_trigger = scif_set_rtrg(port, r);
+ sci->rx_trigger = sci->ops->set_rtrg(port, r);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
- scif_set_rtrg(port, 1);
+ sci->ops->set_rtrg(port, 1);
return count;
}
@@ -1235,7 +1194,7 @@ static ssize_t rx_fifo_timeout_store(struct device *dev,
sci->hscif_tot = r << HSSCR_TOT_SHIFT;
} else {
sci->rx_fifo_timeout = r;
- scif_set_rtrg(port, 1);
+ sci->ops->set_rtrg(port, 1);
if (r > 0)
timer_setup(&sci->rx_fifo_timer, rx_fifo_timer_fn, 0);
}
@@ -1360,7 +1319,7 @@ static void sci_dma_rx_reenable_irq(struct sci_port *s)
s->cfg->regtype == SCIx_RZ_SCIFA_REGTYPE) {
enable_irq(s->irqs[SCIx_RXI_IRQ]);
if (s->cfg->regtype == SCIx_RZ_SCIFA_REGTYPE)
- scif_set_rtrg(port, s->rx_trigger);
+ s->ops->set_rtrg(port, s->rx_trigger);
else
scr &= ~SCSCR_RDRQE;
}
@@ -1798,7 +1757,7 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
s->cfg->regtype == SCIx_RZ_SCIFA_REGTYPE) {
disable_irq_nosync(s->irqs[SCIx_RXI_IRQ]);
if (s->cfg->regtype == SCIx_RZ_SCIFA_REGTYPE) {
- scif_set_rtrg(port, 1);
+ s->ops->set_rtrg(port, 1);
scr |= SCSCR_RIE;
} else {
scr |= SCSCR_RDRQE;
@@ -1824,8 +1783,8 @@ handle_pio:
#endif
if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0) {
- if (!scif_rtrg_enabled(port))
- scif_set_rtrg(port, s->rx_trigger);
+ if (!s->ops->rtrg_enabled(port))
+ s->ops->set_rtrg(port, s->rx_trigger);
mod_timer(&s->rx_fifo_timer, jiffies + DIV_ROUND_UP(
s->rx_frame * HZ * s->rx_fifo_timeout, 1000000));
@@ -1835,7 +1794,7 @@ handle_pio:
* of whether the I_IXOFF is set, otherwise, how is the interrupt
* to be disabled?
*/
- sci_receive_chars(port);
+ s->ops->receive_chars(port);
return IRQ_HANDLED;
}
@@ -1844,9 +1803,10 @@ static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
{
struct uart_port *port = ptr;
unsigned long flags;
+ struct sci_port *s = to_sci_port(port);
uart_port_lock_irqsave(port, &flags);
- sci_transmit_chars(port);
+ s->ops->transmit_chars(port);
uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
@@ -1855,16 +1815,18 @@ static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
static irqreturn_t sci_tx_end_interrupt(int irq, void *ptr)
{
struct uart_port *port = ptr;
+ struct sci_port *s = to_sci_port(port);
+ const struct sci_common_regs *regs = s->params->common_regs;
unsigned long flags;
- unsigned short ctrl;
+ u32 ctrl;
if (port->type != PORT_SCI)
return sci_tx_interrupt(irq, ptr);
uart_port_lock_irqsave(port, &flags);
- ctrl = sci_serial_in(port, SCSCR);
- ctrl &= ~(SCSCR_TE | SCSCR_TEIE);
- sci_serial_out(port, SCSCR, ctrl);
+ ctrl = s->ops->read_reg(port, regs->control) &
+ ~(s->params->param_bits->te_clear);
+ s->ops->write_reg(port, regs->control, ctrl);
uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
@@ -1873,6 +1835,7 @@ static irqreturn_t sci_tx_end_interrupt(int irq, void *ptr)
static irqreturn_t sci_br_interrupt(int irq, void *ptr)
{
struct uart_port *port = ptr;
+ struct sci_port *s = to_sci_port(port);
/* Handle BREAKs */
sci_handle_breaks(port);
@@ -1880,7 +1843,7 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr)
/* drop invalid character received before break was detected */
sci_serial_in(port, SCxRDR);
- sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port));
+ s->ops->clear_SCxSR(port, SCxSR_BREAK_CLEAR(port));
return IRQ_HANDLED;
}
@@ -1908,15 +1871,15 @@ static irqreturn_t sci_er_interrupt(int irq, void *ptr)
if (sci_handle_errors(port)) {
/* discard character in rx buffer */
sci_serial_in(port, SCxSR);
- sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
+ s->ops->clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
}
} else {
sci_handle_fifo_overrun(port);
if (!s->chan_rx)
- sci_receive_chars(port);
+ s->ops->receive_chars(port);
}
- sci_clear_SCxSR(port, SCxSR_ERROR_CLEAR(port));
+ s->ops->clear_SCxSR(port, SCxSR_ERROR_CLEAR(port));
/* Kick the transmission */
if (!s->chan_tx)
@@ -2286,7 +2249,17 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
uart_port_unlock_irqrestore(port, flags);
}
-static int sci_startup(struct uart_port *port)
+static void sci_shutdown_complete(struct uart_port *port)
+{
+ struct sci_port *s = to_sci_port(port);
+ u16 scr;
+
+ scr = sci_serial_in(port, SCSCR);
+ sci_serial_out(port, SCSCR,
+ scr & (SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot));
+}
+
+int sci_startup(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
int ret;
@@ -2305,11 +2278,10 @@ static int sci_startup(struct uart_port *port)
return 0;
}
-static void sci_shutdown(struct uart_port *port)
+void sci_shutdown(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
unsigned long flags;
- u16 scr;
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
@@ -2319,13 +2291,7 @@ static void sci_shutdown(struct uart_port *port)
uart_port_lock_irqsave(port, &flags);
sci_stop_rx(port);
sci_stop_tx(port);
- /*
- * Stop RX and TX, disable related interrupts, keep clock source
- * and HSCIF TOT bits
- */
- scr = sci_serial_in(port, SCSCR);
- sci_serial_out(port, SCSCR,
- scr & (SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot));
+ s->ops->shutdown_complete(port);
uart_port_unlock_irqrestore(port, flags);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -2402,8 +2368,8 @@ static int sci_brg_calc(struct sci_port *s, unsigned int bps,
/* calculate sample rate, BRR, and clock select */
static int sci_scbrr_calc(struct sci_port *s, unsigned int bps,
- unsigned int *brr, unsigned int *srr,
- unsigned int *cks)
+ unsigned int *brr, unsigned int *srr,
+ unsigned int *cks)
{
unsigned long freq = s->clk_rates[SCI_FCK];
unsigned int sr, br, prediv, scrate, c;
@@ -2480,9 +2446,9 @@ static void sci_reset(struct uart_port *port)
if (reg->size)
sci_serial_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
- sci_clear_SCxSR(port,
- SCxSR_RDxF_CLEAR(port) & SCxSR_ERROR_CLEAR(port) &
- SCxSR_BREAK_CLEAR(port));
+ s->ops->clear_SCxSR(port,
+ SCxSR_RDxF_CLEAR(port) & SCxSR_ERROR_CLEAR(port) &
+ SCxSR_BREAK_CLEAR(port));
if (sci_getreg(port, SCLSR)->size) {
status = sci_serial_in(port, SCLSR);
status &= ~(SCLSR_TO | SCLSR_ORER);
@@ -2491,14 +2457,14 @@ static void sci_reset(struct uart_port *port)
if (s->rx_trigger > 1) {
if (s->rx_fifo_timeout) {
- scif_set_rtrg(port, 1);
+ s->ops->set_rtrg(port, 1);
timer_setup(&s->rx_fifo_timer, rx_fifo_timer_fn, 0);
} else {
if (port->type == PORT_SCIFA ||
port->type == PORT_SCIFB)
- scif_set_rtrg(port, 1);
+ s->ops->set_rtrg(port, 1);
else
- scif_set_rtrg(port, s->rx_trigger);
+ s->ops->set_rtrg(port, s->rx_trigger);
}
}
}
@@ -2758,7 +2724,7 @@ done:
sci_enable_ms(port);
}
-static void sci_pm(struct uart_port *port, unsigned int state,
+void sci_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct sci_port *sci_port = to_sci_port(port);
@@ -2821,7 +2787,7 @@ static int sci_remap_port(struct uart_port *port)
return 0;
}
-static void sci_release_port(struct uart_port *port)
+void sci_release_port(struct uart_port *port)
{
struct sci_port *sport = to_sci_port(port);
@@ -2833,7 +2799,7 @@ static void sci_release_port(struct uart_port *port)
release_mem_region(port->mapbase, sport->reg_size);
}
-static int sci_request_port(struct uart_port *port)
+int sci_request_port(struct uart_port *port)
{
struct resource *res;
struct sci_port *sport = to_sci_port(port);
@@ -2855,7 +2821,7 @@ static int sci_request_port(struct uart_port *port)
return 0;
}
-static void sci_config_port(struct uart_port *port, int flags)
+void sci_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE) {
struct sci_port *sport = to_sci_port(port);
@@ -2865,7 +2831,7 @@ static void sci_config_port(struct uart_port *port, int flags)
}
}
-static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
+int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
{
if (ser->baud_base < 2400)
/* No paper tape reader for Mitch.. */
@@ -2874,6 +2840,75 @@ static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
return 0;
}
+static void sci_prepare_console_write(struct uart_port *port, u32 ctrl)
+{
+ struct sci_port *s = to_sci_port(port);
+ u32 ctrl_temp =
+ s->params->param_bits->rxtx_enable |
+ (s->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0)) |
+ (ctrl & (SCSCR_CKE1 | SCSCR_CKE0)) |
+ s->hscif_tot;
+ sci_serial_out(port, SCSCR, ctrl_temp);
+}
+
+static void sci_console_save(struct uart_port *port)
+{
+ struct sci_port *s = to_sci_port(port);
+ struct sci_suspend_regs *regs = s->suspend_regs;
+
+ if (sci_getreg(port, SCDL)->size)
+ regs->scdl = sci_serial_in(port, SCDL);
+ if (sci_getreg(port, SCCKS)->size)
+ regs->sccks = sci_serial_in(port, SCCKS);
+ if (sci_getreg(port, SCSMR)->size)
+ regs->scsmr = sci_serial_in(port, SCSMR);
+ if (sci_getreg(port, SCSCR)->size)
+ regs->scscr = sci_serial_in(port, SCSCR);
+ if (sci_getreg(port, SCFCR)->size)
+ regs->scfcr = sci_serial_in(port, SCFCR);
+ if (sci_getreg(port, SCSPTR)->size)
+ regs->scsptr = sci_serial_in(port, SCSPTR);
+ if (sci_getreg(port, SCBRR)->size)
+ regs->scbrr = sci_serial_in(port, SCBRR);
+ if (sci_getreg(port, HSSRR)->size)
+ regs->hssrr = sci_serial_in(port, HSSRR);
+ if (sci_getreg(port, SCPCR)->size)
+ regs->scpcr = sci_serial_in(port, SCPCR);
+ if (sci_getreg(port, SCPDR)->size)
+ regs->scpdr = sci_serial_in(port, SCPDR);
+ if (sci_getreg(port, SEMR)->size)
+ regs->semr = sci_serial_in(port, SEMR);
+}
+
+static void sci_console_restore(struct uart_port *port)
+{
+ struct sci_port *s = to_sci_port(port);
+ struct sci_suspend_regs *regs = s->suspend_regs;
+
+ if (sci_getreg(port, SCDL)->size)
+ sci_serial_out(port, SCDL, regs->scdl);
+ if (sci_getreg(port, SCCKS)->size)
+ sci_serial_out(port, SCCKS, regs->sccks);
+ if (sci_getreg(port, SCSMR)->size)
+ sci_serial_out(port, SCSMR, regs->scsmr);
+ if (sci_getreg(port, SCSCR)->size)
+ sci_serial_out(port, SCSCR, regs->scscr);
+ if (sci_getreg(port, SCFCR)->size)
+ sci_serial_out(port, SCFCR, regs->scfcr);
+ if (sci_getreg(port, SCSPTR)->size)
+ sci_serial_out(port, SCSPTR, regs->scsptr);
+ if (sci_getreg(port, SCBRR)->size)
+ sci_serial_out(port, SCBRR, regs->scbrr);
+ if (sci_getreg(port, HSSRR)->size)
+ sci_serial_out(port, HSSRR, regs->hssrr);
+ if (sci_getreg(port, SCPCR)->size)
+ sci_serial_out(port, SCPCR, regs->scpcr);
+ if (sci_getreg(port, SCPDR)->size)
+ sci_serial_out(port, SCPDR, regs->scpdr);
+ if (sci_getreg(port, SEMR)->size)
+ sci_serial_out(port, SEMR, regs->semr);
+}
+
static const struct uart_ops sci_uart_ops = {
.tx_empty = sci_tx_empty,
.set_mctrl = sci_set_mctrl,
@@ -2899,6 +2934,25 @@ static const struct uart_ops sci_uart_ops = {
#endif
};
+static const struct sci_port_ops sci_port_ops = {
+ .read_reg = sci_serial_in,
+ .write_reg = sci_serial_out,
+ .clear_SCxSR = sci_clear_SCxSR,
+ .transmit_chars = sci_transmit_chars,
+ .receive_chars = sci_receive_chars,
+#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
+ defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
+ .poll_put_char = sci_poll_put_char,
+#endif
+ .set_rtrg = scif_set_rtrg,
+ .rtrg_enabled = scif_rtrg_enabled,
+ .shutdown_complete = sci_shutdown_complete,
+ .prepare_console_write = sci_prepare_console_write,
+ .console_save = sci_console_save,
+ .console_restore = sci_console_restore,
+ .suspend_regs_size = sci_suspend_regs_size,
+};
+
static int sci_init_clocks(struct sci_port *sci_port, struct device *dev)
{
const char *clk_names[] = {
@@ -2942,10 +2996,13 @@ static int sci_init_clocks(struct sci_port *sci_port, struct device *dev)
}
static const struct sci_port_params *
-sci_probe_regmap(const struct plat_sci_port *cfg)
+sci_probe_regmap(const struct plat_sci_port *cfg, struct sci_port *sci_port)
{
unsigned int regtype;
+ sci_port->ops = &sci_port_ops;
+ sci_port->port.ops = &sci_uart_ops;
+
if (cfg->regtype != SCIx_PROBE_REGTYPE)
return &sci_port_params[cfg->regtype];
@@ -2993,7 +3050,6 @@ static int sci_init_single(struct platform_device *dev,
sci_port->cfg = p;
- port->ops = &sci_uart_ops;
port->iotype = UPIO_MEM;
port->line = index;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_SH_SCI_CONSOLE);
@@ -3033,10 +3089,6 @@ static int sci_init_single(struct platform_device *dev,
for (i = 1; i < ARRAY_SIZE(sci_port->irqs); i++)
sci_port->irqs[i] = sci_port->irqs[0];
- sci_port->params = sci_probe_regmap(p);
- if (unlikely(sci_port->params == NULL))
- return -EINVAL;
-
switch (p->type) {
case PORT_SCIFB:
sci_port->rx_trigger = 48;
@@ -3104,7 +3156,7 @@ static int sci_init_single(struct platform_device *dev,
defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
static void serial_console_putchar(struct uart_port *port, unsigned char ch)
{
- sci_poll_put_char(port, ch);
+ to_sci_port(port)->ops->poll_put_char(port, ch);
}
/*
@@ -3116,7 +3168,9 @@ static void serial_console_write(struct console *co, const char *s,
{
struct sci_port *sci_port = &sci_ports[co->index];
struct uart_port *port = &sci_port->port;
- unsigned short bits, ctrl, ctrl_temp;
+ const struct sci_common_regs *regs = sci_port->params->common_regs;
+ unsigned int bits;
+ u32 ctrl;
unsigned long flags;
int locked = 1;
@@ -3128,21 +3182,21 @@ static void serial_console_write(struct console *co, const char *s,
uart_port_lock_irqsave(port, &flags);
/* first save SCSCR then disable interrupts, keep clock source */
- ctrl = sci_serial_in(port, SCSCR);
- ctrl_temp = SCSCR_RE | SCSCR_TE |
- (sci_port->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0)) |
- (ctrl & (SCSCR_CKE1 | SCSCR_CKE0));
- sci_serial_out(port, SCSCR, ctrl_temp | sci_port->hscif_tot);
+
+ ctrl = sci_port->ops->read_reg(port, regs->control);
+ sci_port->ops->prepare_console_write(port, ctrl);
uart_console_write(port, s, count, serial_console_putchar);
/* wait until fifo is empty and last bit has been transmitted */
- bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
- while ((sci_serial_in(port, SCxSR) & bits) != bits)
+
+ bits = sci_port->params->param_bits->poll_sent_bits;
+
+ while ((sci_port->ops->read_reg(port, regs->status) & bits) != bits)
cpu_relax();
/* restore the SCSCR */
- sci_serial_out(port, SCSCR, ctrl);
+ sci_port->ops->write_reg(port, regs->control, ctrl);
if (locked)
uart_port_unlock_irqrestore(port, flags);
@@ -3220,13 +3274,18 @@ static struct console early_serial_console = {
static int sci_probe_earlyprintk(struct platform_device *pdev)
{
const struct plat_sci_port *cfg = dev_get_platdata(&pdev->dev);
+ struct sci_port *sp = &sci_ports[pdev->id];
if (early_serial_console.data)
return -EEXIST;
early_serial_console.index = pdev->id;
- sci_init_single(pdev, &sci_ports[pdev->id], pdev->id, cfg, true);
+ sp->params = sci_probe_regmap(cfg, sp);
+ if (!sp->params)
+ return -ENODEV;
+
+ sci_init_single(pdev, sp, pdev->id, cfg, true);
if (!strstr(early_serial_buf, "keep"))
early_serial_console.flags |= CON_BOOT;
@@ -3275,59 +3334,126 @@ static void sci_remove(struct platform_device *dev)
device_remove_file(&dev->dev, &dev_attr_rx_fifo_timeout);
}
+static const struct sci_of_data of_sci_scif_sh2 = {
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+ .ops = &sci_port_ops,
+ .uart_ops = &sci_uart_ops,
+ .params = &sci_port_params[SCIx_SH2_SCIF_FIFODATA_REGTYPE],
+};
+
+static const struct sci_of_data of_sci_scif_rz_scifa = {
+ .type = PORT_SCIF,
+ .regtype = SCIx_RZ_SCIFA_REGTYPE,
+ .ops = &sci_port_ops,
+ .uart_ops = &sci_uart_ops,
+ .params = &sci_port_params[SCIx_RZ_SCIFA_REGTYPE],
+};
+
+static const struct sci_of_data of_sci_scif_rzv2h = {
+ .type = PORT_SCIF,
+ .regtype = SCIx_RZV2H_SCIF_REGTYPE,
+ .ops = &sci_port_ops,
+ .uart_ops = &sci_uart_ops,
+ .params = &sci_port_params[SCIx_RZV2H_SCIF_REGTYPE],
+};
+
+static const struct sci_of_data of_sci_rcar_scif = {
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_BRG_REGTYPE,
+ .ops = &sci_port_ops,
+ .uart_ops = &sci_uart_ops,
+ .params = &sci_port_params[SCIx_SH4_SCIF_BRG_REGTYPE],
+};
-#define SCI_OF_DATA(type, regtype) (void *)((type) << 16 | (regtype))
-#define SCI_OF_TYPE(data) ((unsigned long)(data) >> 16)
-#define SCI_OF_REGTYPE(data) ((unsigned long)(data) & 0xffff)
+static const struct sci_of_data of_sci_scif_sh4 = {
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_REGTYPE,
+ .ops = &sci_port_ops,
+ .uart_ops = &sci_uart_ops,
+ .params = &sci_port_params[SCIx_SH4_SCIF_REGTYPE],
+};
+
+static const struct sci_of_data of_sci_scifa = {
+ .type = PORT_SCIFA,
+ .regtype = SCIx_SCIFA_REGTYPE,
+ .ops = &sci_port_ops,
+ .uart_ops = &sci_uart_ops,
+ .params = &sci_port_params[SCIx_SCIFA_REGTYPE],
+};
+
+static const struct sci_of_data of_sci_scifb = {
+ .type = PORT_SCIFB,
+ .regtype = SCIx_SCIFB_REGTYPE,
+ .ops = &sci_port_ops,
+ .uart_ops = &sci_uart_ops,
+ .params = &sci_port_params[SCIx_SCIFB_REGTYPE],
+};
+
+static const struct sci_of_data of_sci_hscif = {
+ .type = PORT_HSCIF,
+ .regtype = SCIx_HSCIF_REGTYPE,
+ .ops = &sci_port_ops,
+ .uart_ops = &sci_uart_ops,
+ .params = &sci_port_params[SCIx_HSCIF_REGTYPE],
+};
+
+static const struct sci_of_data of_sci_sci = {
+ .type = PORT_SCI,
+ .regtype = SCIx_SCI_REGTYPE,
+ .ops = &sci_port_ops,
+ .uart_ops = &sci_uart_ops,
+ .params = &sci_port_params[SCIx_SCI_REGTYPE],
+};
static const struct of_device_id of_sci_match[] __maybe_unused = {
/* SoC-specific types */
{
.compatible = "renesas,scif-r7s72100",
- .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE),
+ .data = &of_sci_scif_sh2,
},
{
.compatible = "renesas,scif-r7s9210",
- .data = SCI_OF_DATA(PORT_SCIF, SCIx_RZ_SCIFA_REGTYPE),
+ .data = &of_sci_scif_rz_scifa,
},
{
.compatible = "renesas,scif-r9a07g044",
- .data = SCI_OF_DATA(PORT_SCIF, SCIx_RZ_SCIFA_REGTYPE),
+ .data = &of_sci_scif_rz_scifa,
},
{
.compatible = "renesas,scif-r9a09g057",
- .data = SCI_OF_DATA(PORT_SCIF, SCIx_RZV2H_SCIF_REGTYPE),
+ .data = &of_sci_scif_rzv2h,
},
/* Family-specific types */
{
.compatible = "renesas,rcar-gen1-scif",
- .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
+ .data = &of_sci_rcar_scif,
}, {
.compatible = "renesas,rcar-gen2-scif",
- .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
+ .data = &of_sci_rcar_scif,
}, {
.compatible = "renesas,rcar-gen3-scif",
- .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
+ .data = &of_sci_rcar_scif
}, {
.compatible = "renesas,rcar-gen4-scif",
- .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
+ .data = &of_sci_rcar_scif
},
/* Generic types */
{
.compatible = "renesas,scif",
- .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_REGTYPE),
+ .data = &of_sci_scif_sh4,
}, {
.compatible = "renesas,scifa",
- .data = SCI_OF_DATA(PORT_SCIFA, SCIx_SCIFA_REGTYPE),
+ .data = &of_sci_scifa,
}, {
.compatible = "renesas,scifb",
- .data = SCI_OF_DATA(PORT_SCIFB, SCIx_SCIFB_REGTYPE),
+ .data = &of_sci_scifb,
}, {
.compatible = "renesas,hscif",
- .data = SCI_OF_DATA(PORT_HSCIF, SCIx_HSCIF_REGTYPE),
+ .data = &of_sci_hscif,
}, {
.compatible = "renesas,sci",
- .data = SCI_OF_DATA(PORT_SCI, SCIx_SCI_REGTYPE),
+ .data = &of_sci_sci,
}, {
/* Terminator */
},
@@ -3346,7 +3472,7 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
struct reset_control *rstc;
struct plat_sci_port *p;
struct sci_port *sp;
- const void *data;
+ const struct sci_of_data *data;
int id, ret;
if (!IS_ENABLED(CONFIG_OF) || !np)
@@ -3393,8 +3519,12 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
sp->rstc = rstc;
*dev_id = id;
- p->type = SCI_OF_TYPE(data);
- p->regtype = SCI_OF_REGTYPE(data);
+ p->type = data->type;
+ p->regtype = data->regtype;
+
+ sp->ops = data->ops;
+ sp->port.ops = data->uart_ops;
+ sp->params = data->params;
sp->has_rtscts = of_property_read_bool(np, "uart-has-rtscts");
@@ -3501,6 +3631,7 @@ static int sci_probe(struct platform_device *dev)
p = sci_parse_dt(dev, &dev_id);
if (IS_ERR(p))
return PTR_ERR(p);
+ sp = &sci_ports[dev_id];
} else {
p = dev->dev.platform_data;
if (p == NULL) {
@@ -3509,9 +3640,17 @@ static int sci_probe(struct platform_device *dev)
}
dev_id = dev->id;
+ sp = &sci_ports[dev_id];
+ sp->params = sci_probe_regmap(p, sp);
+ if (!sp->params)
+ return -ENODEV;
}
- sp = &sci_ports[dev_id];
+ sp->suspend_regs = devm_kzalloc(&dev->dev,
+ sp->ops->suspend_regs_size(),
+ GFP_KERNEL);
+ if (!sp->suspend_regs)
+ return -ENOMEM;
/*
* In case:
@@ -3563,64 +3702,6 @@ static int sci_probe(struct platform_device *dev)
return 0;
}
-static void sci_console_save(struct sci_port *s)
-{
- struct sci_suspend_regs *regs = &s->suspend_regs;
- struct uart_port *port = &s->port;
-
- if (sci_getreg(port, SCDL)->size)
- regs->scdl = sci_serial_in(port, SCDL);
- if (sci_getreg(port, SCCKS)->size)
- regs->sccks = sci_serial_in(port, SCCKS);
- if (sci_getreg(port, SCSMR)->size)
- regs->scsmr = sci_serial_in(port, SCSMR);
- if (sci_getreg(port, SCSCR)->size)
- regs->scscr = sci_serial_in(port, SCSCR);
- if (sci_getreg(port, SCFCR)->size)
- regs->scfcr = sci_serial_in(port, SCFCR);
- if (sci_getreg(port, SCSPTR)->size)
- regs->scsptr = sci_serial_in(port, SCSPTR);
- if (sci_getreg(port, SCBRR)->size)
- regs->scbrr = sci_serial_in(port, SCBRR);
- if (sci_getreg(port, HSSRR)->size)
- regs->hssrr = sci_serial_in(port, HSSRR);
- if (sci_getreg(port, SCPCR)->size)
- regs->scpcr = sci_serial_in(port, SCPCR);
- if (sci_getreg(port, SCPDR)->size)
- regs->scpdr = sci_serial_in(port, SCPDR);
- if (sci_getreg(port, SEMR)->size)
- regs->semr = sci_serial_in(port, SEMR);
-}
-
-static void sci_console_restore(struct sci_port *s)
-{
- struct sci_suspend_regs *regs = &s->suspend_regs;
- struct uart_port *port = &s->port;
-
- if (sci_getreg(port, SCDL)->size)
- sci_serial_out(port, SCDL, regs->scdl);
- if (sci_getreg(port, SCCKS)->size)
- sci_serial_out(port, SCCKS, regs->sccks);
- if (sci_getreg(port, SCSMR)->size)
- sci_serial_out(port, SCSMR, regs->scsmr);
- if (sci_getreg(port, SCSCR)->size)
- sci_serial_out(port, SCSCR, regs->scscr);
- if (sci_getreg(port, SCFCR)->size)
- sci_serial_out(port, SCFCR, regs->scfcr);
- if (sci_getreg(port, SCSPTR)->size)
- sci_serial_out(port, SCSPTR, regs->scsptr);
- if (sci_getreg(port, SCBRR)->size)
- sci_serial_out(port, SCBRR, regs->scbrr);
- if (sci_getreg(port, HSSRR)->size)
- sci_serial_out(port, HSSRR, regs->hssrr);
- if (sci_getreg(port, SCPCR)->size)
- sci_serial_out(port, SCPCR, regs->scpcr);
- if (sci_getreg(port, SCPDR)->size)
- sci_serial_out(port, SCPDR, regs->scpdr);
- if (sci_getreg(port, SEMR)->size)
- sci_serial_out(port, SEMR, regs->semr);
-}
-
static __maybe_unused int sci_suspend(struct device *dev)
{
struct sci_port *sport = dev_get_drvdata(dev);
@@ -3628,8 +3709,10 @@ static __maybe_unused int sci_suspend(struct device *dev)
if (sport) {
uart_suspend_port(&sci_uart_driver, &sport->port);
- if (!console_suspend_enabled && uart_console(&sport->port))
- sci_console_save(sport);
+ if (!console_suspend_enabled && uart_console(&sport->port)) {
+ if (sport->ops->console_save)
+ sport->ops->console_save(&sport->port);
+ }
else
return reset_control_assert(sport->rstc);
}
@@ -3643,7 +3726,8 @@ static __maybe_unused int sci_resume(struct device *dev)
if (sport) {
if (!console_suspend_enabled && uart_console(&sport->port)) {
- sci_console_restore(sport);
+ if (sport->ops->console_restore)
+ sport->ops->console_restore(&sport->port);
} else {
int ret = reset_control_deassert(sport->rstc);
@@ -3707,21 +3791,31 @@ static int early_console_exit(struct console *co)
return 0;
}
-static int __init early_console_setup(struct earlycon_device *device,
- int type)
+int __init scix_early_console_setup(struct earlycon_device *device,
+ const struct sci_of_data *data)
{
+ const struct sci_common_regs *regs;
+
if (!device->port.membase)
return -ENODEV;
- device->port.type = type;
+ device->port.type = data->type;
sci_ports[0].port = device->port;
- port_cfg.type = type;
+
+ port_cfg.type = data->type;
+ port_cfg.regtype = data->regtype;
+
sci_ports[0].cfg = &port_cfg;
- sci_ports[0].params = sci_probe_regmap(&port_cfg);
+ sci_ports[0].params = data->params;
+ sci_ports[0].ops = data->ops;
+ sci_ports[0].port.ops = data->uart_ops;
sci_uart_earlycon = true;
- port_cfg.scscr = sci_serial_in(&sci_ports[0].port, SCSCR);
- sci_serial_out(&sci_ports[0].port, SCSCR,
- SCSCR_RE | SCSCR_TE | port_cfg.scscr);
+ regs = sci_ports[0].params->common_regs;
+
+ port_cfg.scscr = sci_ports[0].ops->read_reg(&sci_ports[0].port, regs->control);
+ sci_ports[0].ops->write_reg(&sci_ports[0].port,
+ regs->control,
+ sci_ports[0].params->param_bits->rxtx_enable | port_cfg.scscr);
device->con->write = serial_console_write;
device->con->exit = early_console_exit;
@@ -3731,41 +3825,39 @@ static int __init early_console_setup(struct earlycon_device *device,
static int __init sci_early_console_setup(struct earlycon_device *device,
const char *opt)
{
- return early_console_setup(device, PORT_SCI);
+ return scix_early_console_setup(device, &of_sci_sci);
}
static int __init scif_early_console_setup(struct earlycon_device *device,
const char *opt)
{
- return early_console_setup(device, PORT_SCIF);
+ return scix_early_console_setup(device, &of_sci_scif_sh4);
}
static int __init rzscifa_early_console_setup(struct earlycon_device *device,
const char *opt)
{
- port_cfg.regtype = SCIx_RZ_SCIFA_REGTYPE;
- return early_console_setup(device, PORT_SCIF);
+ return scix_early_console_setup(device, &of_sci_scif_rz_scifa);
}
static int __init rzv2hscif_early_console_setup(struct earlycon_device *device,
const char *opt)
{
- port_cfg.regtype = SCIx_RZV2H_SCIF_REGTYPE;
- return early_console_setup(device, PORT_SCIF);
+ return scix_early_console_setup(device, &of_sci_scif_rzv2h);
}
static int __init scifa_early_console_setup(struct earlycon_device *device,
const char *opt)
{
- return early_console_setup(device, PORT_SCIFA);
+ return scix_early_console_setup(device, &of_sci_scifa);
}
static int __init scifb_early_console_setup(struct earlycon_device *device,
const char *opt)
{
- return early_console_setup(device, PORT_SCIFB);
+ return scix_early_console_setup(device, &of_sci_scifb);
}
static int __init hscif_early_console_setup(struct earlycon_device *device,
const char *opt)
{
- return early_console_setup(device, PORT_HSCIF);
+ return scix_early_console_setup(device, &of_sci_hscif);
}
OF_EARLYCON_DECLARE(sci, "renesas,sci", sci_early_console_setup);
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index 0b65563c4e9e..951681aba586 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -32,8 +32,6 @@ enum {
HSRTRGR, /* Rx FIFO Data Count Trigger Register */
HSTTRGR, /* Tx FIFO Data Count Trigger Register */
SEMR, /* Serial extended mode register */
-
- SCIx_NR_REGS,
};
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index 054a8e630ace..110d67613192 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -141,6 +141,7 @@
* @baud_rate: UART serial line rate (e.g., 115200 baud)
* @clk: reference to this device's clock
* @clk_notifier: clock rate change notifier for upstream clock changes
+ * @console_line_ended: indicate that the console line is fully written
*
* Configuration data specific to this SiFive UART.
*/
@@ -151,6 +152,7 @@ struct sifive_serial_port {
unsigned long baud_rate;
struct clk *clk;
struct notifier_block clk_notifier;
+ bool console_line_ended;
};
/*
@@ -785,33 +787,88 @@ static void sifive_serial_console_putchar(struct uart_port *port, unsigned char
__ssp_wait_for_xmitr(ssp);
__ssp_transmit_char(ssp, ch);
+
+ ssp->console_line_ended = (ch == '\n');
+}
+
+static void sifive_serial_device_lock(struct console *co, unsigned long *flags)
+{
+ struct uart_port *up = &sifive_serial_console_ports[co->index]->port;
+
+ __uart_port_lock_irqsave(up, flags);
+}
+
+static void sifive_serial_device_unlock(struct console *co, unsigned long flags)
+{
+ struct uart_port *up = &sifive_serial_console_ports[co->index]->port;
+
+ __uart_port_unlock_irqrestore(up, flags);
}
-static void sifive_serial_console_write(struct console *co, const char *s,
- unsigned int count)
+static void sifive_serial_console_write_atomic(struct console *co,
+ struct nbcon_write_context *wctxt)
{
struct sifive_serial_port *ssp = sifive_serial_console_ports[co->index];
- unsigned long flags;
+ struct uart_port *port = &ssp->port;
unsigned int ier;
- int locked = 1;
if (!ssp)
return;
- if (oops_in_progress)
- locked = uart_port_trylock_irqsave(&ssp->port, &flags);
- else
- uart_port_lock_irqsave(&ssp->port, &flags);
+ if (!nbcon_enter_unsafe(wctxt))
+ return;
ier = __ssp_readl(ssp, SIFIVE_SERIAL_IE_OFFS);
__ssp_writel(0, SIFIVE_SERIAL_IE_OFFS, ssp);
- uart_console_write(&ssp->port, s, count, sifive_serial_console_putchar);
+ if (!ssp->console_line_ended)
+ uart_console_write(port, "\n", 1, sifive_serial_console_putchar);
+ uart_console_write(port, wctxt->outbuf, wctxt->len,
+ sifive_serial_console_putchar);
__ssp_writel(ier, SIFIVE_SERIAL_IE_OFFS, ssp);
- if (locked)
- uart_port_unlock_irqrestore(&ssp->port, flags);
+ nbcon_exit_unsafe(wctxt);
+}
+
+static void sifive_serial_console_write_thread(struct console *co,
+ struct nbcon_write_context *wctxt)
+{
+ struct sifive_serial_port *ssp = sifive_serial_console_ports[co->index];
+ struct uart_port *port = &ssp->port;
+ unsigned int ier;
+
+ if (!ssp)
+ return;
+
+ if (!nbcon_enter_unsafe(wctxt))
+ return;
+
+ ier = __ssp_readl(ssp, SIFIVE_SERIAL_IE_OFFS);
+ __ssp_writel(0, SIFIVE_SERIAL_IE_OFFS, ssp);
+
+ if (nbcon_exit_unsafe(wctxt)) {
+ int len = READ_ONCE(wctxt->len);
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (!nbcon_enter_unsafe(wctxt))
+ break;
+
+ uart_console_write(port, wctxt->outbuf + i, 1,
+ sifive_serial_console_putchar);
+
+ if (!nbcon_exit_unsafe(wctxt))
+ break;
+ }
+ }
+
+ while (!nbcon_enter_unsafe(wctxt))
+ nbcon_reacquire_nobuf(wctxt);
+
+ __ssp_writel(ier, SIFIVE_SERIAL_IE_OFFS, ssp);
+
+ nbcon_exit_unsafe(wctxt);
}
static int sifive_serial_console_setup(struct console *co, char *options)
@@ -829,6 +886,8 @@ static int sifive_serial_console_setup(struct console *co, char *options)
if (!ssp)
return -ENODEV;
+ ssp->console_line_ended = true;
+
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -839,10 +898,13 @@ static struct uart_driver sifive_serial_uart_driver;
static struct console sifive_serial_console = {
.name = SIFIVE_TTY_PREFIX,
- .write = sifive_serial_console_write,
+ .write_atomic = sifive_serial_console_write_atomic,
+ .write_thread = sifive_serial_console_write_thread,
+ .device_lock = sifive_serial_device_lock,
+ .device_unlock = sifive_serial_device_unlock,
.device = uart_console_device,
.setup = sifive_serial_console_setup,
- .flags = CON_PRINTBUFFER,
+ .flags = CON_PRINTBUFFER | CON_NBCON,
.index = -1,
.data = &sifive_serial_uart_driver,
};
diff --git a/drivers/tty/serial/tegra-utc.c b/drivers/tty/serial/tegra-utc.c
index 39b14fe813c9..0c70d3e7b9b9 100644
--- a/drivers/tty/serial/tegra-utc.c
+++ b/drivers/tty/serial/tegra-utc.c
@@ -434,7 +434,7 @@ static void tegra_utc_console_write_atomic(struct console *cons, struct nbcon_wr
outbuf += burst_size;
len -= burst_size;
- };
+ }
nbcon_exit_unsafe(wctxt);
}
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index a41e7fc373b7..39c1fd1ff9ce 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -880,16 +880,6 @@ of_err:
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- if (!ulite_uart_driver.state) {
- dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n");
- ret = uart_register_driver(&ulite_uart_driver);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register driver\n");
- clk_disable_unprepare(pdata->clk);
- return ret;
- }
- }
-
ret = ulite_assign(&pdev->dev, id, res->start, irq, pdata);
pm_runtime_mark_last_busy(&pdev->dev);
@@ -929,16 +919,25 @@ static struct platform_driver ulite_platform_driver = {
static int __init ulite_init(void)
{
+ int ret;
+
+ pr_debug("uartlite: calling uart_register_driver()\n");
+ ret = uart_register_driver(&ulite_uart_driver);
+ if (ret)
+ return ret;
pr_debug("uartlite: calling platform_driver_register()\n");
- return platform_driver_register(&ulite_platform_driver);
+ ret = platform_driver_register(&ulite_platform_driver);
+ if (ret)
+ uart_unregister_driver(&ulite_uart_driver);
+
+ return ret;
}
static void __exit ulite_exit(void)
{
platform_driver_unregister(&ulite_platform_driver);
- if (ulite_uart_driver.state)
- uart_unregister_driver(&ulite_uart_driver);
+ uart_unregister_driver(&ulite_uart_driver);
}
module_init(ulite_init);
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 4c703f42680d..3865b10d2d43 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -5002,7 +5002,7 @@ static int adapter_test(struct slgt_info *info)
*/
static void tx_timeout(struct timer_list *t)
{
- struct slgt_info *info = from_timer(info, t, tx_timer);
+ struct slgt_info *info = timer_container_of(info, t, tx_timer);
unsigned long flags;
DBGINFO(("%s tx_timeout\n", info->device_name));
@@ -5026,7 +5026,7 @@ static void tx_timeout(struct timer_list *t)
*/
static void rx_timeout(struct timer_list *t)
{
- struct slgt_info *info = from_timer(info, t, rx_timer);
+ struct slgt_info *info = timer_container_of(info, t, rx_timer);
unsigned long flags;
DBGINFO(("%s rx_timeout\n", info->device_name));
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 6853c4660e7c..d77c03d22227 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -712,7 +712,8 @@ static void sysrq_parse_reset_sequence(struct sysrq_state *state)
static void sysrq_do_reset(struct timer_list *t)
{
- struct sysrq_state *state = from_timer(state, t, keyreset_timer);
+ struct sysrq_state *state = timer_container_of(state, t,
+ keyreset_timer);
state->reset_requested = true;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index ca9b7d7bad2b..e2d92cf70eb7 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -276,11 +276,10 @@ static void check_tty_count(struct tty_struct *tty, const char *routine)
struct list_head *p;
int count = 0, kopen_count = 0;
- spin_lock(&tty->files_lock);
- list_for_each(p, &tty->tty_files) {
- count++;
- }
- spin_unlock(&tty->files_lock);
+ scoped_guard(spinlock, &tty->files_lock)
+ list_for_each(p, &tty->tty_files)
+ count++;
+
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_SLAVE &&
tty->link && tty->link->count)
@@ -378,7 +377,7 @@ EXPORT_SYMBOL_GPL(tty_dev_name_to_number);
*/
struct tty_driver *tty_find_polling_driver(char *name, int *line)
{
- struct tty_driver *p, *res = NULL;
+ struct tty_driver *p;
int tty_line = 0;
int len;
char *str, *stp;
@@ -392,7 +391,8 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
len = str - name;
tty_line = simple_strtoul(str, &str, 10);
- mutex_lock(&tty_mutex);
+ guard(mutex)(&tty_mutex);
+
/* Search through the tty devices to look for a match */
list_for_each_entry(p, &tty_drivers, tty_drivers) {
if (!len || strncmp(name, p->name, len) != 0)
@@ -405,14 +405,12 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
if (tty_line >= 0 && tty_line < p->num && p->ops &&
p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
- res = tty_driver_kref_get(p);
*line = tty_line;
- break;
+ return tty_driver_kref_get(p);
}
}
- mutex_unlock(&tty_mutex);
- return res;
+ return NULL;
}
EXPORT_SYMBOL_GPL(tty_find_polling_driver);
#endif
@@ -531,16 +529,15 @@ EXPORT_SYMBOL_GPL(tty_wakeup);
*/
static struct file *tty_release_redirect(struct tty_struct *tty)
{
- struct file *f = NULL;
+ guard(spinlock)(&redirect_lock);
- spin_lock(&redirect_lock);
if (redirect && file_tty(redirect) == tty) {
- f = redirect;
+ struct file *f = redirect;
redirect = NULL;
+ return f;
}
- spin_unlock(&redirect_lock);
- return f;
+ return NULL;
}
/**
@@ -765,11 +762,8 @@ void __stop_tty(struct tty_struct *tty)
*/
void stop_tty(struct tty_struct *tty)
{
- unsigned long flags;
-
- spin_lock_irqsave(&tty->flow.lock, flags);
+ guard(spinlock_irqsave)(&tty->flow.lock);
__stop_tty(tty);
- spin_unlock_irqrestore(&tty->flow.lock, flags);
}
EXPORT_SYMBOL(stop_tty);
@@ -796,11 +790,8 @@ void __start_tty(struct tty_struct *tty)
*/
void start_tty(struct tty_struct *tty)
{
- unsigned long flags;
-
- spin_lock_irqsave(&tty->flow.lock, flags);
+ guard(spinlock_irqsave)(&tty->flow.lock);
__start_tty(tty);
- spin_unlock_irqrestore(&tty->flow.lock, flags);
}
EXPORT_SYMBOL(start_tty);
@@ -809,7 +800,8 @@ static void tty_update_time(struct tty_struct *tty, bool mtime)
time64_t sec = ktime_get_real_seconds();
struct tty_file_private *priv;
- spin_lock(&tty->files_lock);
+ guard(spinlock)(&tty->files_lock);
+
list_for_each_entry(priv, &tty->tty_files, list) {
struct inode *inode = file_inode(priv->file);
struct timespec64 time = mtime ? inode_get_mtime(inode) : inode_get_atime(inode);
@@ -827,7 +819,6 @@ static void tty_update_time(struct tty_struct *tty, bool mtime)
inode_set_atime(inode, sec, 0);
}
}
- spin_unlock(&tty->files_lock);
}
/*
@@ -2314,13 +2305,12 @@ static int tiocsti(struct tty_struct *tty, u8 __user *p)
*/
static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
{
- int err;
+ guard(mutex)(&tty->winsize_mutex);
- mutex_lock(&tty->winsize_mutex);
- err = copy_to_user(arg, &tty->winsize, sizeof(*arg));
- mutex_unlock(&tty->winsize_mutex);
+ if (copy_to_user(arg, &tty->winsize, sizeof(*arg)))
+ return -EFAULT;
- return err ? -EFAULT : 0;
+ return 0;
}
/**
@@ -2335,10 +2325,10 @@ int tty_do_resize(struct tty_struct *tty, struct winsize *ws)
{
struct pid *pgrp;
- /* Lock the tty */
- mutex_lock(&tty->winsize_mutex);
+ guard(mutex)(&tty->winsize_mutex);
+
if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
- goto done;
+ return 0;
/* Signal the foreground process group */
pgrp = tty_get_pgrp(tty);
@@ -2347,8 +2337,7 @@ int tty_do_resize(struct tty_struct *tty, struct winsize *ws)
put_pid(pgrp);
tty->winsize = *ws;
-done:
- mutex_unlock(&tty->winsize_mutex);
+
return 0;
}
EXPORT_SYMBOL(tty_do_resize);
@@ -2409,13 +2398,14 @@ static int tioccons(struct file *file)
return -EBADF;
if (!(file->f_mode & FMODE_CAN_WRITE))
return -EINVAL;
- spin_lock(&redirect_lock);
- if (redirect) {
- spin_unlock(&redirect_lock);
+
+ guard(spinlock)(&redirect_lock);
+
+ if (redirect)
return -EBUSY;
- }
+
redirect = get_file(file);
- spin_unlock(&redirect_lock);
+
return 0;
}
@@ -3028,11 +3018,9 @@ void __do_SAK(struct tty_struct *tty)
struct task_struct *g, *p;
struct pid *session;
int i;
- unsigned long flags;
- spin_lock_irqsave(&tty->ctrl.lock, flags);
- session = get_pid(tty->ctrl.session);
- spin_unlock_irqrestore(&tty->ctrl.lock, flags);
+ scoped_guard(spinlock_irqsave, &tty->ctrl.lock)
+ session = get_pid(tty->ctrl.session);
tty_ldisc_flush(tty);
@@ -3055,7 +3043,7 @@ void __do_SAK(struct tty_struct *tty)
PIDTYPE_SID);
continue;
}
- task_lock(p);
+ guard(task_lock)(p);
i = iterate_fd(p->files, 0, this_tty, tty);
if (i != 0) {
tty_notice(tty, "SAK: killed process %d (%s): by fd#%d\n",
@@ -3063,7 +3051,6 @@ void __do_SAK(struct tty_struct *tty)
group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p,
PIDTYPE_SID);
}
- task_unlock(p);
}
read_unlock(&tasklist_lock);
put_pid(session);
@@ -3465,9 +3452,8 @@ int tty_register_driver(struct tty_driver *driver)
goto err_unreg_char;
}
- mutex_lock(&tty_mutex);
- list_add(&driver->tty_drivers, &tty_drivers);
- mutex_unlock(&tty_mutex);
+ scoped_guard(mutex, &tty_mutex)
+ list_add(&driver->tty_drivers, &tty_drivers);
if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV)) {
for (i = 0; i < driver->num; i++) {
@@ -3486,9 +3472,8 @@ err_unreg_devs:
for (i--; i >= 0; i--)
tty_unregister_device(driver, i);
- mutex_lock(&tty_mutex);
- list_del(&driver->tty_drivers);
- mutex_unlock(&tty_mutex);
+ scoped_guard(mutex, &tty_mutex)
+ list_del(&driver->tty_drivers);
err_unreg_char:
unregister_chrdev_region(dev, driver->num);
@@ -3507,9 +3492,8 @@ void tty_unregister_driver(struct tty_driver *driver)
{
unregister_chrdev_region(MKDEV(driver->major, driver->minor_start),
driver->num);
- mutex_lock(&tty_mutex);
- list_del(&driver->tty_drivers);
- mutex_unlock(&tty_mutex);
+ scoped_guard(mutex, &tty_mutex)
+ list_del(&driver->tty_drivers);
}
EXPORT_SYMBOL(tty_unregister_driver);
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 85de90eebc7b..90c70d8d14e3 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -122,21 +122,19 @@ EXPORT_SYMBOL(tty_unthrottle);
*/
bool tty_throttle_safe(struct tty_struct *tty)
{
- bool ret = true;
-
- mutex_lock(&tty->throttle_mutex);
- if (!tty_throttled(tty)) {
- if (tty->flow_change != TTY_THROTTLE_SAFE)
- ret = false;
- else {
- set_bit(TTY_THROTTLED, &tty->flags);
- if (tty->ops->throttle)
- tty->ops->throttle(tty);
- }
- }
- mutex_unlock(&tty->throttle_mutex);
+ guard(mutex)(&tty->throttle_mutex);
- return ret;
+ if (tty_throttled(tty))
+ return true;
+
+ if (tty->flow_change != TTY_THROTTLE_SAFE)
+ return false;
+
+ set_bit(TTY_THROTTLED, &tty->flags);
+ if (tty->ops->throttle)
+ tty->ops->throttle(tty);
+
+ return true;
}
/**
@@ -152,21 +150,19 @@ bool tty_throttle_safe(struct tty_struct *tty)
*/
bool tty_unthrottle_safe(struct tty_struct *tty)
{
- bool ret = true;
+ guard(mutex)(&tty->throttle_mutex);
- mutex_lock(&tty->throttle_mutex);
- if (tty_throttled(tty)) {
- if (tty->flow_change != TTY_UNTHROTTLE_SAFE)
- ret = false;
- else {
- clear_bit(TTY_THROTTLED, &tty->flags);
- if (tty->ops->unthrottle)
- tty->ops->unthrottle(tty);
- }
- }
- mutex_unlock(&tty->throttle_mutex);
+ if (!tty_throttled(tty))
+ return true;
- return ret;
+ if (tty->flow_change != TTY_UNTHROTTLE_SAFE)
+ return false;
+
+ clear_bit(TTY_THROTTLED, &tty->flags);
+ if (tty->ops->unthrottle)
+ tty->ops->unthrottle(tty);
+
+ return true;
}
/**
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 14cca33d2269..4af1fbf73f51 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -200,26 +200,6 @@ struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
EXPORT_SYMBOL_GPL(tty_port_register_device_attr_serdev);
/**
- * tty_port_register_device_serdev - register tty or serdev device
- * @port: tty_port of the device
- * @driver: tty_driver for this device
- * @index: index of the tty
- * @host: serial port hardware controller device
- * @parent: parent if exists, otherwise NULL
- *
- * Register a serdev or tty device depending on if the parent device has any
- * defined serdev clients or not.
- */
-struct device *tty_port_register_device_serdev(struct tty_port *port,
- struct tty_driver *driver, unsigned index,
- struct device *host, struct device *parent)
-{
- return tty_port_register_device_attr_serdev(port, driver, index,
- host, parent, NULL, NULL);
-}
-EXPORT_SYMBOL_GPL(tty_port_register_device_serdev);
-
-/**
* tty_port_unregister_device - deregister a tty or serdev device
* @port: tty_port of the device
* @driver: tty_driver for this device
diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
index 7ac3048377d5..2960031ace72 100644
--- a/drivers/tty/vcc.c
+++ b/drivers/tty/vcc.c
@@ -356,7 +356,7 @@ done:
static void vcc_rx_timer(struct timer_list *t)
{
- struct vcc_port *port = from_timer(port, t, rx_timer);
+ struct vcc_port *port = timer_container_of(port, t, rx_timer);
struct vio_driver_state *vio;
unsigned long flags;
int rv;
@@ -382,7 +382,7 @@ done:
static void vcc_tx_timer(struct timer_list *t)
{
- struct vcc_port *port = from_timer(port, t, tx_timer);
+ struct vcc_port *port = timer_container_of(port, t, tx_timer);
struct vio_vcc *pkt;
unsigned long flags;
size_t tosend = 0;
diff --git a/drivers/tty/vt/.gitignore b/drivers/tty/vt/.gitignore
index 0221709b177d..a74859bab862 100644
--- a/drivers/tty/vt/.gitignore
+++ b/drivers/tty/vt/.gitignore
@@ -2,3 +2,6 @@
/conmakehash
/consolemap_deftbl.c
/defkeymap.c
+/ucs_fallback_table.h
+/ucs_recompose_table.h
+/ucs_width_table.h
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
index 2c8ce8b592ed..ae746dcdeec8 100644
--- a/drivers/tty/vt/Makefile
+++ b/drivers/tty/vt/Makefile
@@ -7,10 +7,12 @@ FONTMAPFILE = cp437.uni
obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o \
selection.o keyboard.o \
vt.o defkeymap.o
-obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
+obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o \
+ ucs.o
# Files generated that shall be removed upon make clean
-clean-files := consolemap_deftbl.c defkeymap.c
+clean-files := consolemap_deftbl.c defkeymap.c \
+ ucs_width_table.h ucs_recompose_table.h ucs_fallback_table.h
hostprogs += conmakehash
@@ -33,3 +35,31 @@ $(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
loadkeys --mktable --unicode $< > $@
endif
+
+$(obj)/ucs.o: $(src)/ucs.c $(obj)/ucs_width_table.h \
+ $(obj)/ucs_recompose_table.h $(obj)/ucs_fallback_table.h
+
+# You may uncomment one of those to have the UCS tables be regenerated
+# during the build process. By default the _shipped versions are used.
+#
+#GENERATE_UCS_TABLES := 1
+#GENERATE_UCS_TABLES := 2 # invokes gen_ucs_recompose_table.py with --full
+
+ifdef GENERATE_UCS_TABLES
+
+$(obj)/ucs_width_table.h: $(src)/gen_ucs_width_table.py
+ $(PYTHON3) $< -o $@
+
+ifeq ($(GENERATE_UCS_TABLES),2)
+gen_recomp_arg := --full
+else
+gen_recomp_arg :=
+endif
+
+$(obj)/ucs_recompose_table.h: $(src)/gen_ucs_recompose_table.py
+ $(PYTHON3) $< -o $@ $(gen_recomp_arg)
+
+$(obj)/ucs_fallback_table.h: $(src)/gen_ucs_fallback_table.py
+ $(PYTHON3) $< -o $@
+
+endif
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index 82d70083fead..bb4bb272ebec 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -870,8 +870,6 @@ int conv_uni_to_pc(struct vc_data *conp, long ucs)
return -4; /* Not found */
else if (ucs < 0x20)
return -1; /* Not a printable character */
- else if (ucs == 0xfeff || (ucs >= 0x200b && ucs <= 0x200f))
- return -2; /* Zero-width space */
/*
* UNI_DIRECT_BASE indicates the start of the region in the User Zone
* which always has a 1:1 mapping to the currently loaded font. The
diff --git a/drivers/tty/vt/gen_ucs_fallback_table.py b/drivers/tty/vt/gen_ucs_fallback_table.py
new file mode 100755
index 000000000000..6e09c1cb6d4b
--- /dev/null
+++ b/drivers/tty/vt/gen_ucs_fallback_table.py
@@ -0,0 +1,360 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# Leverage Python's unidecode module to generate ucs_fallback_table.h
+#
+# The generated table maps complex characters to their simpler fallback forms
+# for a terminal display when corresponding glyphs are unavailable.
+#
+# Usage:
+# python3 gen_ucs_fallback_table.py # Generate fallback tables
+# python3 gen_ucs_fallback_table.py -o FILE # Specify output file
+
+import unicodedata
+from unidecode import unidecode
+import sys
+import argparse
+from collections import defaultdict
+
+# Try to get unidecode version
+try:
+ from importlib.metadata import version
+ unidecode_version = version('unidecode')
+except:
+ unidecode_version = 'unknown'
+
+# This script's file name
+from pathlib import Path
+this_file = Path(__file__).name
+
+# Default output file name
+DEFAULT_OUT_FILE = "ucs_fallback_table.h"
+
+# Define the range marker value
+RANGE_MARKER = 0x00
+
+def generate_fallback_map():
+ """Generate a fallback map using unidecode for all relevant Unicode points."""
+ fallback_map = {}
+
+ # Process BMP characters (0x0000 - 0xFFFF) to keep table size manageable
+ for cp in range(0x0080, 0x10000): # Skip ASCII range (0x00-0x7F)
+ char = chr(cp)
+
+ # Skip unassigned/control characters
+ try:
+ if not unicodedata.name(char, ''):
+ continue
+ except ValueError:
+ continue
+
+ # Get the unidecode transliteration
+ ascii_version = unidecode(char)
+
+ # Only store if it results in a single character mapping
+ if len(ascii_version) == 1:
+ fallback_map[cp] = ord(ascii_version)
+
+ # Apply manual overrides for special cases
+ fallback_map.update(get_special_overrides())
+
+ return fallback_map
+
+def get_special_overrides():
+ """Get special case overrides that need different handling than unidecode
+ provides... or doesn't provide at all."""
+
+ overrides = {}
+
+ # Multi-character unidecode output
+ # These map to single chars instead of unidecode's multiple-char mappings
+ # In a terminal fallback context, we need a single character rather than multiple
+ overrides[0x00C6] = ord('E') # Æ LATIN CAPITAL LETTER AE -> E (unidecode: "AE")
+ overrides[0x00E6] = ord('e') # æ LATIN SMALL LETTER AE -> e (unidecode: "ae")
+ overrides[0x0152] = ord('E') # Œ LATIN CAPITAL LIGATURE OE -> E (unidecode: "OE")
+ overrides[0x0153] = ord('e') # œ LATIN SMALL LETTER LIGATURE OE -> e (unidecode: "oe")
+ overrides[0x00DF] = ord('s') # ß LATIN SMALL LETTER SHARP S -> s (unidecode: "ss")
+
+ # Comparison operators that unidecode renders as multiple characters
+ overrides[0x2264] = ord('<') # ≤ LESS-THAN OR EQUAL TO -> < (unidecode: "<=")
+ overrides[0x2265] = ord('>') # ≥ GREATER-THAN OR EQUAL TO -> > (unidecode: ">=")
+
+ # Unidecode returns an empty string for these
+ overrides[0x2260] = ord('#') # ≠ NOT EQUAL TO -> # (unidecode: empty string)
+
+ # Quadrant block characters that unidecode doesn't map
+ for cp in range(0x2596, 0x259F+1):
+ overrides[cp] = ord('#') # ▖ ▗ ▘ ▙ etc. - map to # (unidecode: empty string)
+
+ # Directional arrows
+ # These provide better semantic meaning than unidecode's mappings
+ overrides[0x2192] = ord('>') # → RIGHTWARDS ARROW -> > (unidecode: "-")
+ overrides[0x2190] = ord('<') # ← LEFTWARDS ARROW -> < (unidecode: "-")
+ overrides[0x2191] = ord('^') # ↑ UPWARDS ARROW -> ^ (unidecode: "|")
+ overrides[0x2193] = ord('v') # ↓ DOWNWARDS ARROW -> v (unidecode: "|")
+
+ # Double arrows with their directional semantic mappings
+ overrides[0x21D0] = ord('<') # ⇐ LEFTWARDS DOUBLE ARROW -> <
+ overrides[0x21D1] = ord('^') # ⇑ UPWARDS DOUBLE ARROW -> ^
+ overrides[0x21D2] = ord('>') # ⇒ RIGHTWARDS DOUBLE ARROW -> >
+ overrides[0x21D3] = ord('v') # ⇓ DOWNWARDS DOUBLE ARROW -> v
+
+ # Halfwidth arrows
+ # These need the same treatment as their normal-width counterparts
+ overrides[0xFFE9] = ord('<') # ← HALFWIDTH LEFTWARDS ARROW -> < (unidecode: "-")
+ overrides[0xFFEA] = ord('^') # ↑ HALFWIDTH UPWARDS ARROW -> ^ (unidecode: "|")
+ overrides[0xFFEB] = ord('>') # → HALFWIDTH RIGHTWARDS ARROW -> > (unidecode: "-")
+ overrides[0xFFEC] = ord('v') # ↓ HALFWIDTH DOWNWARDS ARROW -> v (unidecode: "|")
+
+ # Currency symbols - each mapped to a representative letter
+ overrides[0x00A2] = ord('c') # ¢ CENT SIGN -> c
+ overrides[0x00A3] = ord('L') # £ POUND SIGN -> L
+ overrides[0x00A5] = ord('Y') # ¥ YEN SIGN -> Y
+ overrides[0x20AC] = ord('E') # € EURO SIGN -> E
+
+ # Symbols mapped to letters
+ overrides[0x00A7] = ord('S') # § SECTION SIGN -> S
+ overrides[0x00A9] = ord('C') # © COPYRIGHT SIGN -> C
+ overrides[0x00AE] = ord('R') # ® REGISTERED SIGN -> R
+ overrides[0x2122] = ord('T') # ™ TRADE MARK SIGN -> T
+
+ # Degree-related symbols
+ overrides[0x00B0] = ord('o') # ° DEGREE SIGN -> o
+ overrides[0x2103] = ord('C') # ℃ DEGREE CELSIUS -> C
+ overrides[0x2109] = ord('F') # ℉ DEGREE FAHRENHEIT -> F
+
+ # Angle quotation marks
+ overrides[0x00AB] = ord('<') # « LEFT-POINTING DOUBLE ANGLE QUOTATION MARK -> <
+ overrides[0x00BB] = ord('>') # » RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK -> >
+
+ # Operators with circular shape
+ overrides[0x2218] = ord('o') # ∘ RING OPERATOR -> o
+ overrides[0x2219] = ord('.') # ∙ BULLET OPERATOR -> .
+
+ # Negated mathematical symbols (preserving the negation semantics)
+ # Negated symbols mapped to exclamation mark (semantically "not")
+ for cp in (0x2204, 0x2209, 0x220C, 0x2224, 0x2226, 0x226E, 0x226F, 0x2280, 0x2281, 0x2284, 0x2285):
+ overrides[cp] = ord('!') # Negated math symbols -> ! (not)
+
+ # Negated symbols mapped to hash sign (semantically "not equal")
+ for cp in (0x2241, 0x2244, 0x2249, 0x2262, 0x2268, 0x2269, 0x226D, 0x228A, 0x228B):
+ overrides[cp] = ord('#') # Negated equality symbols -> # (not equal)
+
+ # Negated arrows - all mapped to exclamation mark
+ for cp in (0x219A, 0x219B, 0x21AE, 0x21CD, 0x21CE, 0x21CF):
+ overrides[cp] = ord('!') # Negated arrows -> ! (not)
+
+ # Dashes and hyphens
+ for cp in (0x2010, 0x2011, 0x2012, 0x2013, 0x2014, 0x2015, 0x2043, 0x2052):
+ overrides[cp] = ord('-') # Dashes and hyphens -> -
+
+ # Question mark punctuation
+ for cp in (0x203D, 0x2047, 0x2048):
+ overrides[cp] = ord('?') # Question marks -> ?
+
+ # Exclamation mark punctuation
+ for cp in (0x203C, 0x2049):
+ overrides[cp] = ord('!') # Exclamation marks -> !
+
+ # Asterisk-like symbols
+ for cp in (0x2042, 0x2051, 0x2055):
+ overrides[cp] = ord('*')
+
+ # Other specific punctuation with unique mappings
+ overrides[0x201E] = ord('"') # „ DOUBLE LOW-9 QUOTATION MARK
+ overrides[0x2023] = ord('>') # ‣ TRIANGULAR BULLET
+ overrides[0x2026] = ord('.') # … HORIZONTAL ELLIPSIS
+ overrides[0x2033] = ord('"') # ″ DOUBLE PRIME
+ overrides[0x204B] = ord('P') # ⁋ REVERSED PILCROW SIGN
+ overrides[0x204C] = ord('<') # ⁌ BLACK LEFTWARDS BULLET
+ overrides[0x204D] = ord('>') # ⁍ BLACK RIGHTWARDS BULLET
+ overrides[0x204F] = ord(';') # ⁏ REVERSED SEMICOLON
+ overrides[0x205B] = ord(':') # ⁛ FOUR DOT MARK
+
+ # Check marks
+ overrides[0x2713] = ord('v') # ✓ CHECK MARK
+ overrides[0x2714] = ord('V') # ✔ HEAVY CHECK MARK
+
+ # X marks - lowercase for regular, uppercase for heavy
+ for cp in (0x2715, 0x2717):
+ overrides[cp] = ord('x') # Regular X marks -> x
+ for cp in (0x2716, 0x2718):
+ overrides[cp] = ord('X') # Heavy X marks -> X
+
+ # Stars and asterisk-like symbols mapped to '*'
+ for cp in (0x2605, 0x2606, 0x262A, 0x269D, 0x2698):
+ overrides[cp] = ord('*') # All star and asterisk symbols -> *
+ for cp in range(0x2721, 0x2746+1):
+ overrides[cp] = ord('*') # All star and asterisk symbols -> *
+ for cp in range(0x2749, 0x274B+1):
+ overrides[cp] = ord('*') # Last set of asterisk symbols -> *
+ for cp in (0x229B, 0x22C6, 0x235F, 0x2363):
+ overrides[cp] = ord('*') # Star operators -> *
+
+ # Special exclusions with fallback value of 0
+ # These will be filtered out in organize_by_pages()
+
+ # Exclude U+2028 (LINE SEPARATOR)
+ overrides[0x2028] = 0 # LINE SEPARATOR (unidecode: '\n')
+
+ # Full-width to ASCII mapping (covering all printable ASCII 33-126)
+ # 0xFF01 (!) to 0xFF5E (~) -> ASCII 33 (!) to 126 (~)
+ # Those are excluded here to reduce the table size.
+ # It is more efficient to process them programmatically in
+ # ucs.c:ucs_get_fallback().
+ for cp in range(0xFF01, 0xFF5E + 1):
+ overrides[cp] = 0 # Double-width ASCII characters
+
+ return overrides
+
+def organize_by_pages(fallback_map):
+ """Organize the fallback mappings by their high byte (page)."""
+ # Group by high byte (page)
+ page_groups = defaultdict(list)
+ for code, fallback in fallback_map.items():
+ # Skip characters with fallback value of 0 (excluded characters)
+ if fallback == 0:
+ continue
+
+ page = code >> 8 # Get the high byte (page)
+ offset = code & 0xFF # Get the low byte (offset within page)
+ page_groups[page].append((offset, fallback))
+
+ # Sort each page's entries by offset
+ for page in page_groups:
+ page_groups[page].sort()
+
+ return page_groups
+
+def compress_ranges(page_groups):
+ """Compress consecutive entries with the same fallback character into ranges.
+ A range is only compressed if it contains 3 or more consecutive entries."""
+
+ compressed_pages = {}
+
+ for page, entries in page_groups.items():
+ compressed_entries = []
+ i = 0
+ while i < len(entries):
+ start_offset, fallback = entries[i]
+
+ # Look ahead to find consecutive entries with the same fallback
+ j = i + 1
+ while (j < len(entries) and
+ entries[j][0] == entries[j-1][0] + 1 and # consecutive offsets
+ entries[j][1] == fallback): # same fallback
+ j += 1
+
+ # Calculate the range end
+ end_offset = entries[j-1][0]
+
+ # If we found a range with 3 or more entries (worth compressing)
+ if j - i >= 3:
+ # Add a range entry
+ compressed_entries.append((start_offset, RANGE_MARKER))
+ compressed_entries.append((end_offset, fallback))
+ else:
+ # Add the individual entries as is
+ for k in range(i, j):
+ compressed_entries.append(entries[k])
+
+ i = j
+
+ compressed_pages[page] = compressed_entries
+
+ return compressed_pages
+
+def cp_name(cp):
+ """Get the Unicode character name for a code point."""
+ try:
+ return unicodedata.name(chr(cp))
+ except:
+ return f"U+{cp:04X}"
+
+def generate_fallback_tables(out_file=DEFAULT_OUT_FILE):
+ """Generate the fallback character tables."""
+ # Generate fallback map using unidecode
+ fallback_map = generate_fallback_map()
+ print(f"Generated {len(fallback_map)} total fallback mappings")
+
+ # Organize by pages
+ page_groups = organize_by_pages(fallback_map)
+ print(f"Organized into {len(page_groups)} pages")
+
+ # Compress ranges
+ compressed_pages = compress_ranges(page_groups)
+ total_compressed_entries = sum(len(entries) for entries in compressed_pages.values())
+ print(f"Total compressed entries: {total_compressed_entries}")
+
+ # Create output file
+ with open(out_file, 'w') as f:
+ f.write(f"""\
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * {out_file} - Unicode character fallback table
+ *
+ * Auto-generated by {this_file}
+ *
+ * Unicode Version: {unicodedata.unidata_version}
+ * Unidecode Version: {unidecode_version}
+ *
+ * This file contains optimized tables that map complex Unicode characters
+ * to simpler fallback characters for terminal display when corresponding
+ * glyphs are unavailable.
+ */
+
+static const struct ucs_page_desc ucs_fallback_pages[] = {{
+""")
+
+ # Convert compressed_pages to a sorted list of (page, entries) tuples
+ sorted_pages = sorted(compressed_pages.items())
+
+ # Track the start index for each page
+ start_index = 0
+
+ # Write page descriptors
+ for page, entries in sorted_pages:
+ count = len(entries)
+ f.write(f"\t{{ 0x{page:02X}, {count}, {start_index} }},\n")
+ start_index += count
+
+ # Write entries array
+ f.write("""\
+};
+
+/* Page entries array (referenced by page descriptors) */
+static const struct ucs_page_entry ucs_fallback_entries[] = {
+""")
+
+ # Write all entries
+ for page, entries in sorted_pages:
+ page_hex = f"0x{page:02X}"
+ f.write(f"\t/* Entries for page {page_hex} */\n")
+
+ for i, (offset, fallback) in enumerate(entries):
+ # Convert to hex for better readability
+ offset_hex = f"0x{offset:02X}"
+ fallback_hex = f"0x{fallback:02X}"
+
+ # Handle comments
+ codepoint = (page << 8) | offset
+
+ if fallback == RANGE_MARKER:
+ comment = f"{cp_name(codepoint)} -> ..."
+ else:
+ comment = f"{cp_name(codepoint)} -> '{chr(fallback)}'"
+ f.write(f"\t{{ 0x{offset:02X}, 0x{fallback:02X} }}, /* {comment} */\n")
+
+ f.write(f"""\
+}};
+
+#define UCS_PAGE_ENTRY_RANGE_MARKER {RANGE_MARKER}
+""")
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Generate Unicode fallback character tables")
+ parser.add_argument("-o", "--output", dest="output_file", default=DEFAULT_OUT_FILE,
+ help=f"Output file name (default: {DEFAULT_OUT_FILE})")
+ args = parser.parse_args()
+
+ generate_fallback_tables(out_file=args.output_file)
diff --git a/drivers/tty/vt/gen_ucs_recompose_table.py b/drivers/tty/vt/gen_ucs_recompose_table.py
new file mode 100755
index 000000000000..4434a436ac9e
--- /dev/null
+++ b/drivers/tty/vt/gen_ucs_recompose_table.py
@@ -0,0 +1,257 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# Leverage Python's unicodedata module to generate ucs_recompose_table.h
+#
+# The generated table maps base character + combining mark pairs to their
+# precomposed equivalents.
+#
+# Usage:
+# python3 gen_ucs_recompose_table.py # Generate with common recomposition pairs
+# python3 gen_ucs_recompose_table.py --full # Generate with all recomposition pairs
+
+import unicodedata
+import sys
+import argparse
+import textwrap
+
+# This script's file name
+from pathlib import Path
+this_file = Path(__file__).name
+
+# Default output file name
+DEFAULT_OUT_FILE = "ucs_recompose_table.h"
+
+common_recompose_description = "most commonly used Latin, Greek, and Cyrillic recomposition pairs only"
+COMMON_RECOMPOSITION_PAIRS = [
+ # Latin letters with accents - uppercase
+ (0x0041, 0x0300, 0x00C0), # A + COMBINING GRAVE ACCENT = LATIN CAPITAL LETTER A WITH GRAVE
+ (0x0041, 0x0301, 0x00C1), # A + COMBINING ACUTE ACCENT = LATIN CAPITAL LETTER A WITH ACUTE
+ (0x0041, 0x0302, 0x00C2), # A + COMBINING CIRCUMFLEX ACCENT = LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+ (0x0041, 0x0303, 0x00C3), # A + COMBINING TILDE = LATIN CAPITAL LETTER A WITH TILDE
+ (0x0041, 0x0308, 0x00C4), # A + COMBINING DIAERESIS = LATIN CAPITAL LETTER A WITH DIAERESIS
+ (0x0041, 0x030A, 0x00C5), # A + COMBINING RING ABOVE = LATIN CAPITAL LETTER A WITH RING ABOVE
+ (0x0043, 0x0327, 0x00C7), # C + COMBINING CEDILLA = LATIN CAPITAL LETTER C WITH CEDILLA
+ (0x0045, 0x0300, 0x00C8), # E + COMBINING GRAVE ACCENT = LATIN CAPITAL LETTER E WITH GRAVE
+ (0x0045, 0x0301, 0x00C9), # E + COMBINING ACUTE ACCENT = LATIN CAPITAL LETTER E WITH ACUTE
+ (0x0045, 0x0302, 0x00CA), # E + COMBINING CIRCUMFLEX ACCENT = LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+ (0x0045, 0x0308, 0x00CB), # E + COMBINING DIAERESIS = LATIN CAPITAL LETTER E WITH DIAERESIS
+ (0x0049, 0x0300, 0x00CC), # I + COMBINING GRAVE ACCENT = LATIN CAPITAL LETTER I WITH GRAVE
+ (0x0049, 0x0301, 0x00CD), # I + COMBINING ACUTE ACCENT = LATIN CAPITAL LETTER I WITH ACUTE
+ (0x0049, 0x0302, 0x00CE), # I + COMBINING CIRCUMFLEX ACCENT = LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+ (0x0049, 0x0308, 0x00CF), # I + COMBINING DIAERESIS = LATIN CAPITAL LETTER I WITH DIAERESIS
+ (0x004E, 0x0303, 0x00D1), # N + COMBINING TILDE = LATIN CAPITAL LETTER N WITH TILDE
+ (0x004F, 0x0300, 0x00D2), # O + COMBINING GRAVE ACCENT = LATIN CAPITAL LETTER O WITH GRAVE
+ (0x004F, 0x0301, 0x00D3), # O + COMBINING ACUTE ACCENT = LATIN CAPITAL LETTER O WITH ACUTE
+ (0x004F, 0x0302, 0x00D4), # O + COMBINING CIRCUMFLEX ACCENT = LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+ (0x004F, 0x0303, 0x00D5), # O + COMBINING TILDE = LATIN CAPITAL LETTER O WITH TILDE
+ (0x004F, 0x0308, 0x00D6), # O + COMBINING DIAERESIS = LATIN CAPITAL LETTER O WITH DIAERESIS
+ (0x0055, 0x0300, 0x00D9), # U + COMBINING GRAVE ACCENT = LATIN CAPITAL LETTER U WITH GRAVE
+ (0x0055, 0x0301, 0x00DA), # U + COMBINING ACUTE ACCENT = LATIN CAPITAL LETTER U WITH ACUTE
+ (0x0055, 0x0302, 0x00DB), # U + COMBINING CIRCUMFLEX ACCENT = LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+ (0x0055, 0x0308, 0x00DC), # U + COMBINING DIAERESIS = LATIN CAPITAL LETTER U WITH DIAERESIS
+ (0x0059, 0x0301, 0x00DD), # Y + COMBINING ACUTE ACCENT = LATIN CAPITAL LETTER Y WITH ACUTE
+
+ # Latin letters with accents - lowercase
+ (0x0061, 0x0300, 0x00E0), # a + COMBINING GRAVE ACCENT = LATIN SMALL LETTER A WITH GRAVE
+ (0x0061, 0x0301, 0x00E1), # a + COMBINING ACUTE ACCENT = LATIN SMALL LETTER A WITH ACUTE
+ (0x0061, 0x0302, 0x00E2), # a + COMBINING CIRCUMFLEX ACCENT = LATIN SMALL LETTER A WITH CIRCUMFLEX
+ (0x0061, 0x0303, 0x00E3), # a + COMBINING TILDE = LATIN SMALL LETTER A WITH TILDE
+ (0x0061, 0x0308, 0x00E4), # a + COMBINING DIAERESIS = LATIN SMALL LETTER A WITH DIAERESIS
+ (0x0061, 0x030A, 0x00E5), # a + COMBINING RING ABOVE = LATIN SMALL LETTER A WITH RING ABOVE
+ (0x0063, 0x0327, 0x00E7), # c + COMBINING CEDILLA = LATIN SMALL LETTER C WITH CEDILLA
+ (0x0065, 0x0300, 0x00E8), # e + COMBINING GRAVE ACCENT = LATIN SMALL LETTER E WITH GRAVE
+ (0x0065, 0x0301, 0x00E9), # e + COMBINING ACUTE ACCENT = LATIN SMALL LETTER E WITH ACUTE
+ (0x0065, 0x0302, 0x00EA), # e + COMBINING CIRCUMFLEX ACCENT = LATIN SMALL LETTER E WITH CIRCUMFLEX
+ (0x0065, 0x0308, 0x00EB), # e + COMBINING DIAERESIS = LATIN SMALL LETTER E WITH DIAERESIS
+ (0x0069, 0x0300, 0x00EC), # i + COMBINING GRAVE ACCENT = LATIN SMALL LETTER I WITH GRAVE
+ (0x0069, 0x0301, 0x00ED), # i + COMBINING ACUTE ACCENT = LATIN SMALL LETTER I WITH ACUTE
+ (0x0069, 0x0302, 0x00EE), # i + COMBINING CIRCUMFLEX ACCENT = LATIN SMALL LETTER I WITH CIRCUMFLEX
+ (0x0069, 0x0308, 0x00EF), # i + COMBINING DIAERESIS = LATIN SMALL LETTER I WITH DIAERESIS
+ (0x006E, 0x0303, 0x00F1), # n + COMBINING TILDE = LATIN SMALL LETTER N WITH TILDE
+ (0x006F, 0x0300, 0x00F2), # o + COMBINING GRAVE ACCENT = LATIN SMALL LETTER O WITH GRAVE
+ (0x006F, 0x0301, 0x00F3), # o + COMBINING ACUTE ACCENT = LATIN SMALL LETTER O WITH ACUTE
+ (0x006F, 0x0302, 0x00F4), # o + COMBINING CIRCUMFLEX ACCENT = LATIN SMALL LETTER O WITH CIRCUMFLEX
+ (0x006F, 0x0303, 0x00F5), # o + COMBINING TILDE = LATIN SMALL LETTER O WITH TILDE
+ (0x006F, 0x0308, 0x00F6), # o + COMBINING DIAERESIS = LATIN SMALL LETTER O WITH DIAERESIS
+ (0x0075, 0x0300, 0x00F9), # u + COMBINING GRAVE ACCENT = LATIN SMALL LETTER U WITH GRAVE
+ (0x0075, 0x0301, 0x00FA), # u + COMBINING ACUTE ACCENT = LATIN SMALL LETTER U WITH ACUTE
+ (0x0075, 0x0302, 0x00FB), # u + COMBINING CIRCUMFLEX ACCENT = LATIN SMALL LETTER U WITH CIRCUMFLEX
+ (0x0075, 0x0308, 0x00FC), # u + COMBINING DIAERESIS = LATIN SMALL LETTER U WITH DIAERESIS
+ (0x0079, 0x0301, 0x00FD), # y + COMBINING ACUTE ACCENT = LATIN SMALL LETTER Y WITH ACUTE
+ (0x0079, 0x0308, 0x00FF), # y + COMBINING DIAERESIS = LATIN SMALL LETTER Y WITH DIAERESIS
+
+ # Common Greek characters
+ (0x0391, 0x0301, 0x0386), # Α + COMBINING ACUTE ACCENT = GREEK CAPITAL LETTER ALPHA WITH TONOS
+ (0x0395, 0x0301, 0x0388), # Ε + COMBINING ACUTE ACCENT = GREEK CAPITAL LETTER EPSILON WITH TONOS
+ (0x0397, 0x0301, 0x0389), # Η + COMBINING ACUTE ACCENT = GREEK CAPITAL LETTER ETA WITH TONOS
+ (0x0399, 0x0301, 0x038A), # Ι + COMBINING ACUTE ACCENT = GREEK CAPITAL LETTER IOTA WITH TONOS
+ (0x039F, 0x0301, 0x038C), # Ο + COMBINING ACUTE ACCENT = GREEK CAPITAL LETTER OMICRON WITH TONOS
+ (0x03A5, 0x0301, 0x038E), # Υ + COMBINING ACUTE ACCENT = GREEK CAPITAL LETTER UPSILON WITH TONOS
+ (0x03A9, 0x0301, 0x038F), # Ω + COMBINING ACUTE ACCENT = GREEK CAPITAL LETTER OMEGA WITH TONOS
+ (0x03B1, 0x0301, 0x03AC), # α + COMBINING ACUTE ACCENT = GREEK SMALL LETTER ALPHA WITH TONOS
+ (0x03B5, 0x0301, 0x03AD), # ε + COMBINING ACUTE ACCENT = GREEK SMALL LETTER EPSILON WITH TONOS
+ (0x03B7, 0x0301, 0x03AE), # η + COMBINING ACUTE ACCENT = GREEK SMALL LETTER ETA WITH TONOS
+ (0x03B9, 0x0301, 0x03AF), # ι + COMBINING ACUTE ACCENT = GREEK SMALL LETTER IOTA WITH TONOS
+ (0x03BF, 0x0301, 0x03CC), # ο + COMBINING ACUTE ACCENT = GREEK SMALL LETTER OMICRON WITH TONOS
+ (0x03C5, 0x0301, 0x03CD), # υ + COMBINING ACUTE ACCENT = GREEK SMALL LETTER UPSILON WITH TONOS
+ (0x03C9, 0x0301, 0x03CE), # ω + COMBINING ACUTE ACCENT = GREEK SMALL LETTER OMEGA WITH TONOS
+
+ # Common Cyrillic characters
+ (0x0418, 0x0306, 0x0419), # И + COMBINING BREVE = CYRILLIC CAPITAL LETTER SHORT I
+ (0x0438, 0x0306, 0x0439), # и + COMBINING BREVE = CYRILLIC SMALL LETTER SHORT I
+ (0x0423, 0x0306, 0x040E), # У + COMBINING BREVE = CYRILLIC CAPITAL LETTER SHORT U
+ (0x0443, 0x0306, 0x045E), # у + COMBINING BREVE = CYRILLIC SMALL LETTER SHORT U
+]
+
+full_recompose_description = "all possible recomposition pairs from the Unicode BMP"
+def collect_all_recomposition_pairs():
+ """Collect all possible recomposition pairs from the Unicode data."""
+ # Map to store recomposition pairs: (base, combining) -> recomposed
+ recompose_map = {}
+
+ # Process all assigned Unicode code points in BMP (Basic Multilingual Plane)
+ # We limit to BMP (0x0000-0xFFFF) to keep our table smaller with uint16_t
+ for cp in range(0, 0x10000):
+ try:
+ char = chr(cp)
+
+ # Skip unassigned or control characters
+ if not unicodedata.name(char, ''):
+ continue
+
+ # Find decomposition
+ decomp = unicodedata.decomposition(char)
+ if not decomp or '<' in decomp: # Skip compatibility decompositions
+ continue
+
+ # Parse the decomposition
+ parts = decomp.split()
+ if len(parts) == 2: # Simple base + combining mark
+ base = int(parts[0], 16)
+ combining = int(parts[1], 16)
+
+ # Only store if both are in BMP
+ if base < 0x10000 and combining < 0x10000:
+ recompose_map[(base, combining)] = cp
+
+ except (ValueError, TypeError):
+ continue
+
+ # Convert to a list of tuples and sort for binary search
+ recompose_list = [(base, combining, recomposed)
+ for (base, combining), recomposed in recompose_map.items()]
+ recompose_list.sort()
+
+ return recompose_list
+
+def validate_common_pairs(full_list):
+ """Validate that all common pairs are in the full list.
+
+ Raises:
+ ValueError: If any common pair is missing or has a different recomposition
+ value than what's in the full table.
+ """
+ full_pairs = {(base, combining): recomposed for base, combining, recomposed in full_list}
+ for base, combining, recomposed in COMMON_RECOMPOSITION_PAIRS:
+ full_recomposed = full_pairs.get((base, combining))
+ if full_recomposed is None:
+ error_msg = f"Error: Common pair (0x{base:04X}, 0x{combining:04X}) not found in full data"
+ print(error_msg)
+ raise ValueError(error_msg)
+ elif full_recomposed != recomposed:
+ error_msg = (f"Error: Common pair (0x{base:04X}, 0x{combining:04X}) has different recomposition: "
+ f"0x{recomposed:04X} vs 0x{full_recomposed:04X}")
+ print(error_msg)
+ raise ValueError(error_msg)
+
+def generate_recomposition_table(use_full_list=False, out_file=DEFAULT_OUT_FILE):
+ """Generate the recomposition C table."""
+
+ # Collect all recomposition pairs for validation
+ full_recompose_list = collect_all_recomposition_pairs()
+
+ # Decide which list to use
+ if use_full_list:
+ print("Using full recomposition list...")
+ recompose_list = full_recompose_list
+ table_description = full_recompose_description
+ alt_list = COMMON_RECOMPOSITION_PAIRS
+ alt_description = common_recompose_description
+ else:
+ print("Using common recomposition list...")
+ # Validate that all common pairs are in the full list
+ validate_common_pairs(full_recompose_list)
+ recompose_list = sorted(COMMON_RECOMPOSITION_PAIRS)
+ table_description = common_recompose_description
+ alt_list = full_recompose_list
+ alt_description = full_recompose_description
+ generation_mode = " --full" if use_full_list else ""
+ alternative_mode = " --full" if not use_full_list else ""
+ table_description_detail = f"{table_description} ({len(recompose_list)} entries)"
+ alt_description_detail = f"{alt_description} ({len(alt_list)} entries)"
+
+ # Calculate min/max values for boundary checks
+ min_base = min(base for base, _, _ in recompose_list)
+ max_base = max(base for base, _, _ in recompose_list)
+ min_combining = min(combining for _, combining, _ in recompose_list)
+ max_combining = max(combining for _, combining, _ in recompose_list)
+
+ # Generate implementation file
+ with open(out_file, 'w') as f:
+ f.write(f"""\
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * {out_file} - Unicode character recomposition
+ *
+ * Auto-generated by {this_file}{generation_mode}
+ *
+ * Unicode Version: {unicodedata.unidata_version}
+ *
+{textwrap.fill(
+ f"This file contains a table with {table_description_detail}. " +
+ f"To generate a table with {alt_description_detail} instead, run:",
+ width=75, initial_indent=" * ", subsequent_indent=" * ")}
+ *
+ * python3 {this_file}{alternative_mode}
+ */
+
+/*
+ * Table of {table_description}
+ * Sorted by base character and then combining mark for binary search
+ */
+static const struct ucs_recomposition ucs_recomposition_table[] = {{
+""")
+
+ for base, combining, recomposed in recompose_list:
+ try:
+ base_name = unicodedata.name(chr(base))
+ combining_name = unicodedata.name(chr(combining))
+ recomposed_name = unicodedata.name(chr(recomposed))
+ comment = f"/* {base_name} + {combining_name} = {recomposed_name} */"
+ except ValueError:
+ comment = f"/* U+{base:04X} + U+{combining:04X} = U+{recomposed:04X} */"
+ f.write(f"\t{{ 0x{base:04X}, 0x{combining:04X}, 0x{recomposed:04X} }}, {comment}\n")
+
+ f.write(f"""\
+}};
+
+/*
+ * Boundary values for quick rejection
+ * These are calculated by analyzing the table during generation
+ */
+#define UCS_RECOMPOSE_MIN_BASE 0x{min_base:04X}
+#define UCS_RECOMPOSE_MAX_BASE 0x{max_base:04X}
+#define UCS_RECOMPOSE_MIN_MARK 0x{min_combining:04X}
+#define UCS_RECOMPOSE_MAX_MARK 0x{max_combining:04X}
+""")
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Generate Unicode recomposition table")
+ parser.add_argument("--full", action="store_true",
+ help="Generate a full recomposition table (default: common pairs only)")
+ parser.add_argument("-o", "--output", dest="output_file", default=DEFAULT_OUT_FILE,
+ help=f"Output file name (default: {DEFAULT_OUT_FILE})")
+ args = parser.parse_args()
+
+ generate_recomposition_table(use_full_list=args.full, out_file=args.output_file)
diff --git a/drivers/tty/vt/gen_ucs_width_table.py b/drivers/tty/vt/gen_ucs_width_table.py
new file mode 100755
index 000000000000..76e80ebeff13
--- /dev/null
+++ b/drivers/tty/vt/gen_ucs_width_table.py
@@ -0,0 +1,307 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# Leverage Python's unicodedata module to generate ucs_width_table.h
+
+import unicodedata
+import sys
+import argparse
+
+# This script's file name
+from pathlib import Path
+this_file = Path(__file__).name
+
+# Default output file name
+DEFAULT_OUT_FILE = "ucs_width_table.h"
+
+# --- Global Constants for Width Assignments ---
+
+# Known zero-width characters
+KNOWN_ZERO_WIDTH = (
+ 0x200B, # ZERO WIDTH SPACE
+ 0x200C, # ZERO WIDTH NON-JOINER
+ 0x200D, # ZERO WIDTH JOINER
+ 0x2060, # WORD JOINER
+ 0xFEFF # ZERO WIDTH NO-BREAK SPACE (BOM)
+)
+
+# Zero-width emoji modifiers and components
+# NOTE: Some of these characters would normally be single-width according to
+# East Asian Width properties, but we deliberately override them to be
+# zero-width because they function as modifiers in emoji sequences.
+EMOJI_ZERO_WIDTH = [
+ # Skin tone modifiers
+ (0x1F3FB, 0x1F3FF), # Emoji modifiers (skin tones)
+
+ # Variation selectors (note: VS16 is treated specially in vt.c)
+ (0xFE00, 0xFE0F), # Variation Selectors 1-16
+
+ # Gender and hair style modifiers
+ # These would be single-width by Unicode properties, but are zero-width
+ # when part of emoji
+ (0x2640, 0x2640), # Female sign
+ (0x2642, 0x2642), # Male sign
+ (0x26A7, 0x26A7), # Transgender symbol
+ (0x1F9B0, 0x1F9B3), # Hair components (red, curly, white, bald)
+
+ # Tag characters
+ (0xE0020, 0xE007E), # Tags
+]
+
+# Regional indicators (flag components)
+REGIONAL_INDICATORS = (0x1F1E6, 0x1F1FF) # Regional indicator symbols A-Z
+
+# Double-width emoji ranges
+#
+# Many emoji characters are classified as single-width according to Unicode
+# Standard Annex #11 East Asian Width property (N or Neutral), but we
+# deliberately override them to be double-width. References:
+# 1. Unicode Technical Standard #51: Unicode Emoji
+# (https://www.unicode.org/reports/tr51/)
+# 2. Principle of "emoji presentation" in WHATWG CSS Text specification
+# (https://drafts.csswg.org/css-text-3/#character-properties)
+# 3. Terminal emulator implementations (iTerm2, Windows Terminal, etc.) which
+# universally render emoji as double-width characters regardless of their
+# Unicode EAW property
+# 4. W3C Work Item: Requirements for Japanese Text Layout - Section 3.8.1
+# Emoji width (https://www.w3.org/TR/jlreq/)
+EMOJI_RANGES = [
+ (0x1F000, 0x1F02F), # Mahjong Tiles (EAW: N, but displayed as double-width)
+ (0x1F0A0, 0x1F0FF), # Playing Cards (EAW: N, but displayed as double-width)
+ (0x1F300, 0x1F5FF), # Miscellaneous Symbols and Pictographs
+ (0x1F600, 0x1F64F), # Emoticons
+ (0x1F680, 0x1F6FF), # Transport and Map Symbols
+ (0x1F700, 0x1F77F), # Alchemical Symbols
+ (0x1F780, 0x1F7FF), # Geometric Shapes Extended
+ (0x1F800, 0x1F8FF), # Supplemental Arrows-C
+ (0x1F900, 0x1F9FF), # Supplemental Symbols and Pictographs
+ (0x1FA00, 0x1FA6F), # Chess Symbols
+ (0x1FA70, 0x1FAFF), # Symbols and Pictographs Extended-A
+]
+
+def create_width_tables():
+ """
+ Creates Unicode character width tables and returns the data structures.
+
+ Returns:
+ tuple: (zero_width_ranges, double_width_ranges)
+ """
+
+ # Width data mapping
+ width_map = {} # Maps code points to width (0, 1, 2)
+
+ # Mark emoji modifiers as zero-width
+ for start, end in EMOJI_ZERO_WIDTH:
+ for cp in range(start, end + 1):
+ width_map[cp] = 0
+
+ # Mark all regional indicators as single-width as they are usually paired
+ # providing a combined width of 2 when displayed together.
+ start, end = REGIONAL_INDICATORS
+ for cp in range(start, end + 1):
+ width_map[cp] = 1
+
+ # Process all assigned Unicode code points (Basic Multilingual Plane +
+ # Supplementary Planes) Range 0x0 to 0x10FFFF (the full Unicode range)
+ for block_start in range(0, 0x110000, 0x1000):
+ block_end = block_start + 0x1000
+ for cp in range(block_start, block_end):
+ try:
+ char = chr(cp)
+
+ # Skip if already processed
+ if cp in width_map:
+ continue
+
+ # Check for combining marks and a format characters
+ category = unicodedata.category(char)
+
+ # Combining marks
+ if category.startswith('M'):
+ width_map[cp] = 0
+ continue
+
+ # Format characters
+ # Since we have no support for bidirectional text, all format
+ # characters (category Cf) can be treated with width 0 (zero)
+ # for simplicity, as they don't need to occupy visual space
+ # in a non-bidirectional text environment.
+ if category == 'Cf':
+ width_map[cp] = 0
+ continue
+
+ # Known zero-width characters
+ if cp in KNOWN_ZERO_WIDTH:
+ width_map[cp] = 0
+ continue
+
+ # Use East Asian Width property
+ eaw = unicodedata.east_asian_width(char)
+ if eaw in ('F', 'W'): # Fullwidth or Wide
+ width_map[cp] = 2
+ elif eaw in ('Na', 'H', 'N', 'A'): # Narrow, Halfwidth, Neutral, Ambiguous
+ width_map[cp] = 1
+ else:
+ # Default to single-width for unknown
+ width_map[cp] = 1
+
+ except (ValueError, OverflowError):
+ # Skip invalid code points
+ continue
+
+ # Process Emoji - generally double-width
+ for start, end in EMOJI_RANGES:
+ for cp in range(start, end + 1):
+ if cp not in width_map or width_map[cp] != 0: # Don't override zero-width
+ try:
+ char = chr(cp)
+ width_map[cp] = 2
+ except (ValueError, OverflowError):
+ continue
+
+ # Optimize to create range tables
+ def ranges_optimize(width_data, target_width):
+ points = sorted([cp for cp, width in width_data.items() if width == target_width])
+ if not points:
+ return []
+
+ # Group consecutive code points into ranges
+ ranges = []
+ start = points[0]
+ prev = start
+
+ for cp in points[1:]:
+ if cp > prev + 1:
+ ranges.append((start, prev))
+ start = cp
+ prev = cp
+
+ # Add the last range
+ ranges.append((start, prev))
+ return ranges
+
+ # Extract ranges for each width
+ zero_width_ranges = ranges_optimize(width_map, 0)
+ double_width_ranges = ranges_optimize(width_map, 2)
+
+ return zero_width_ranges, double_width_ranges
+
+def write_tables(zero_width_ranges, double_width_ranges, out_file=DEFAULT_OUT_FILE):
+ """
+ Write the generated tables to C header file.
+
+ Args:
+ zero_width_ranges: List of (start, end) ranges for zero-width characters
+ double_width_ranges: List of (start, end) ranges for double-width characters
+ out_file: Output file name (default: DEFAULT_OUT_FILE)
+ """
+
+ # Function to split ranges into BMP (16-bit) and non-BMP (above 16-bit)
+ def split_ranges_by_size(ranges):
+ bmp_ranges = []
+ non_bmp_ranges = []
+
+ for start, end in ranges:
+ if end <= 0xFFFF:
+ bmp_ranges.append((start, end))
+ elif start > 0xFFFF:
+ non_bmp_ranges.append((start, end))
+ else:
+ # Split the range at 0xFFFF
+ bmp_ranges.append((start, 0xFFFF))
+ non_bmp_ranges.append((0x10000, end))
+
+ return bmp_ranges, non_bmp_ranges
+
+ # Split ranges into BMP and non-BMP
+ zero_width_bmp, zero_width_non_bmp = split_ranges_by_size(zero_width_ranges)
+ double_width_bmp, double_width_non_bmp = split_ranges_by_size(double_width_ranges)
+
+ # Function to generate code point description comments
+ def get_code_point_comment(start, end):
+ try:
+ start_char_desc = unicodedata.name(chr(start))
+ if start == end:
+ return f"/* {start_char_desc} */"
+ else:
+ end_char_desc = unicodedata.name(chr(end))
+ return f"/* {start_char_desc} - {end_char_desc} */"
+ except:
+ if start == end:
+ return f"/* U+{start:04X} */"
+ else:
+ return f"/* U+{start:04X} - U+{end:04X} */"
+
+ # Generate C tables
+ with open(out_file, 'w') as f:
+ f.write(f"""\
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * {out_file} - Unicode character width
+ *
+ * Auto-generated by {this_file}
+ *
+ * Unicode Version: {unicodedata.unidata_version}
+ */
+
+/* Zero-width character ranges (BMP - Basic Multilingual Plane, U+0000 to U+FFFF) */
+static const struct ucs_interval16 ucs_zero_width_bmp_ranges[] = {{
+""")
+
+ for start, end in zero_width_bmp:
+ comment = get_code_point_comment(start, end)
+ f.write(f"\t{{ 0x{start:04X}, 0x{end:04X} }}, {comment}\n")
+
+ f.write("""\
+};
+
+/* Zero-width character ranges (non-BMP, U+10000 and above) */
+static const struct ucs_interval32 ucs_zero_width_non_bmp_ranges[] = {
+""")
+
+ for start, end in zero_width_non_bmp:
+ comment = get_code_point_comment(start, end)
+ f.write(f"\t{{ 0x{start:05X}, 0x{end:05X} }}, {comment}\n")
+
+ f.write("""\
+};
+
+/* Double-width character ranges (BMP - Basic Multilingual Plane, U+0000 to U+FFFF) */
+static const struct ucs_interval16 ucs_double_width_bmp_ranges[] = {
+""")
+
+ for start, end in double_width_bmp:
+ comment = get_code_point_comment(start, end)
+ f.write(f"\t{{ 0x{start:04X}, 0x{end:04X} }}, {comment}\n")
+
+ f.write("""\
+};
+
+/* Double-width character ranges (non-BMP, U+10000 and above) */
+static const struct ucs_interval32 ucs_double_width_non_bmp_ranges[] = {
+""")
+
+ for start, end in double_width_non_bmp:
+ comment = get_code_point_comment(start, end)
+ f.write(f"\t{{ 0x{start:05X}, 0x{end:05X} }}, {comment}\n")
+
+ f.write("};\n")
+
+if __name__ == "__main__":
+ # Parse command line arguments
+ parser = argparse.ArgumentParser(description="Generate Unicode width tables")
+ parser.add_argument("-o", "--output", dest="output_file", default=DEFAULT_OUT_FILE,
+ help=f"Output file name (default: {DEFAULT_OUT_FILE})")
+ args = parser.parse_args()
+
+ # Write tables to header file
+ zero_width_ranges, double_width_ranges = create_width_tables()
+ write_tables(zero_width_ranges, double_width_ranges, out_file=args.output_file)
+
+ # Print summary
+ zero_width_count = sum(end - start + 1 for start, end in zero_width_ranges)
+ double_width_count = sum(end - start + 1 for start, end in double_width_ranges)
+ print(f"Generated {args.output_file} with:")
+ print(f"- {len(zero_width_ranges)} zero-width ranges covering ~{zero_width_count} code points")
+ print(f"- {len(double_width_ranges)} double-width ranges covering ~{double_width_count} code points")
+ print(f"- Unicode Version: {unicodedata.unidata_version}")
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index ae92e6a50a65..dc585079c2fb 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -376,6 +376,17 @@ static void to_utf8(struct vc_data *vc, uint c)
}
}
+static void put_queue_utf8(struct vc_data *vc, u32 value)
+{
+ if (kbd->kbdmode == VC_UNICODE)
+ to_utf8(vc, value);
+ else {
+ int c = conv_uni_to_8bit(value);
+ if (c != -1)
+ put_queue(vc, c);
+ }
+}
+
/* FIXME: review locking for vt.c callers */
static void set_leds(void)
{
@@ -454,13 +465,7 @@ static unsigned int handle_diacr(struct vc_data *vc, unsigned int ch)
if (ch == ' ' || ch == (BRL_UC_ROW|0) || ch == d)
return d;
- if (kbd->kbdmode == VC_UNICODE)
- to_utf8(vc, d);
- else {
- int c = conv_uni_to_8bit(d);
- if (c != -1)
- put_queue(vc, c);
- }
+ put_queue_utf8(vc, d);
return ch;
}
@@ -471,13 +476,7 @@ static unsigned int handle_diacr(struct vc_data *vc, unsigned int ch)
static void fn_enter(struct vc_data *vc)
{
if (diacr) {
- if (kbd->kbdmode == VC_UNICODE)
- to_utf8(vc, diacr);
- else {
- int c = conv_uni_to_8bit(diacr);
- if (c != -1)
- put_queue(vc, c);
- }
+ put_queue_utf8(vc, diacr);
diacr = 0;
}
@@ -685,13 +684,7 @@ static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag)
diacr = value;
return;
}
- if (kbd->kbdmode == VC_UNICODE)
- to_utf8(vc, value);
- else {
- int c = conv_uni_to_8bit(value);
- if (c != -1)
- put_queue(vc, c);
- }
+ put_queue_utf8(vc, value);
}
/*
@@ -1519,7 +1512,7 @@ static void kbd_keycode(unsigned int keycode, int down, bool hw_raw)
if ((raw_mode || kbd->kbdmode == VC_OFF) && type != KT_SPEC && type != KT_SHIFT)
return;
- (*k_handler[type])(vc, keysym & 0xff, !down);
+ (*k_handler[type])(vc, KVAL(keysym), !down);
param.ledstate = kbd->ledflagstate;
atomic_notifier_call_chain(&keyboard_notifier_list, KBD_POST_KEYSYM, &param);
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 791e2f1f7c0b..24b0a53e5a79 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -403,6 +403,12 @@ int paste_selection(struct tty_struct *tty)
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
+ bool bp = vc->vc_bracketed_paste;
+ static const char bracketed_paste_start[] = "\033[200~";
+ static const char bracketed_paste_end[] = "\033[201~";
+ const char *bps = bp ? bracketed_paste_start : NULL;
+ const char *bpe = bp ? bracketed_paste_end : NULL;
+
console_lock();
poke_blanked_console();
console_unlock();
@@ -414,7 +420,7 @@ int paste_selection(struct tty_struct *tty)
add_wait_queue(&vc->paste_wait, &wait);
mutex_lock(&vc_sel.lock);
- while (vc_sel.buffer && vc_sel.buf_len > pasted) {
+ while (vc_sel.buffer && (vc_sel.buf_len > pasted || bpe)) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
ret = -EINTR;
@@ -427,10 +433,27 @@ int paste_selection(struct tty_struct *tty)
continue;
}
__set_current_state(TASK_RUNNING);
+
+ if (bps) {
+ bps += tty_ldisc_receive_buf(ld, bps, NULL, strlen(bps));
+ if (*bps != '\0')
+ continue;
+ bps = NULL;
+ }
+
count = vc_sel.buf_len - pasted;
- count = tty_ldisc_receive_buf(ld, vc_sel.buffer + pasted, NULL,
- count);
- pasted += count;
+ if (count) {
+ pasted += tty_ldisc_receive_buf(ld, vc_sel.buffer + pasted,
+ NULL, count);
+ if (vc_sel.buf_len > pasted)
+ continue;
+ }
+
+ if (bpe) {
+ bpe += tty_ldisc_receive_buf(ld, bpe, NULL, strlen(bpe));
+ if (*bpe == '\0')
+ bpe = NULL;
+ }
}
mutex_unlock(&vc_sel.lock);
remove_wait_queue(&vc->paste_wait, &wait);
diff --git a/drivers/tty/vt/ucs.c b/drivers/tty/vt/ucs.c
new file mode 100644
index 000000000000..6ead622b7713
--- /dev/null
+++ b/drivers/tty/vt/ucs.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ucs.c - Universal Character Set processing
+ */
+
+#include <linux/array_size.h>
+#include <linux/bsearch.h>
+#include <linux/consolemap.h>
+#include <linux/minmax.h>
+
+struct ucs_interval16 {
+ u16 first;
+ u16 last;
+};
+
+struct ucs_interval32 {
+ u32 first;
+ u32 last;
+};
+
+#include "ucs_width_table.h"
+
+static int interval16_cmp(const void *key, const void *element)
+{
+ u16 cp = *(u16 *)key;
+ const struct ucs_interval16 *entry = element;
+
+ if (cp < entry->first)
+ return -1;
+ if (cp > entry->last)
+ return 1;
+ return 0;
+}
+
+static int interval32_cmp(const void *key, const void *element)
+{
+ u32 cp = *(u32 *)key;
+ const struct ucs_interval32 *entry = element;
+
+ if (cp < entry->first)
+ return -1;
+ if (cp > entry->last)
+ return 1;
+ return 0;
+}
+
+static bool cp_in_range16(u16 cp, const struct ucs_interval16 *ranges, size_t size)
+{
+ if (cp < ranges[0].first || cp > ranges[size - 1].last)
+ return false;
+
+ return __inline_bsearch(&cp, ranges, size, sizeof(*ranges),
+ interval16_cmp) != NULL;
+}
+
+static bool cp_in_range32(u32 cp, const struct ucs_interval32 *ranges, size_t size)
+{
+ if (cp < ranges[0].first || cp > ranges[size - 1].last)
+ return false;
+
+ return __inline_bsearch(&cp, ranges, size, sizeof(*ranges),
+ interval32_cmp) != NULL;
+}
+
+#define UCS_IS_BMP(cp) ((cp) <= 0xffff)
+
+/**
+ * ucs_is_zero_width() - Determine if a Unicode code point is zero-width.
+ * @cp: Unicode code point (UCS-4)
+ *
+ * Return: true if the character is zero-width, false otherwise
+ */
+bool ucs_is_zero_width(u32 cp)
+{
+ if (UCS_IS_BMP(cp))
+ return cp_in_range16(cp, ucs_zero_width_bmp_ranges,
+ ARRAY_SIZE(ucs_zero_width_bmp_ranges));
+ else
+ return cp_in_range32(cp, ucs_zero_width_non_bmp_ranges,
+ ARRAY_SIZE(ucs_zero_width_non_bmp_ranges));
+}
+
+/**
+ * ucs_is_double_width() - Determine if a Unicode code point is double-width.
+ * @cp: Unicode code point (UCS-4)
+ *
+ * Return: true if the character is double-width, false otherwise
+ */
+bool ucs_is_double_width(u32 cp)
+{
+ if (UCS_IS_BMP(cp))
+ return cp_in_range16(cp, ucs_double_width_bmp_ranges,
+ ARRAY_SIZE(ucs_double_width_bmp_ranges));
+ else
+ return cp_in_range32(cp, ucs_double_width_non_bmp_ranges,
+ ARRAY_SIZE(ucs_double_width_non_bmp_ranges));
+}
+
+/*
+ * Structure for base with combining mark pairs and resulting recompositions.
+ * Using u16 to save space since all values are within BMP range.
+ */
+struct ucs_recomposition {
+ u16 base; /* base character */
+ u16 mark; /* combining mark */
+ u16 recomposed; /* corresponding recomposed character */
+};
+
+#include "ucs_recompose_table.h"
+
+struct compare_key {
+ u16 base;
+ u16 mark;
+};
+
+static int recomposition_cmp(const void *key, const void *element)
+{
+ const struct compare_key *search_key = key;
+ const struct ucs_recomposition *entry = element;
+
+ /* Compare base character first */
+ if (search_key->base < entry->base)
+ return -1;
+ if (search_key->base > entry->base)
+ return 1;
+
+ /* Base characters match, now compare combining character */
+ if (search_key->mark < entry->mark)
+ return -1;
+ if (search_key->mark > entry->mark)
+ return 1;
+
+ /* Both match */
+ return 0;
+}
+
+/**
+ * ucs_recompose() - Attempt to recompose two Unicode characters into a single character.
+ * @base: Base Unicode code point (UCS-4)
+ * @mark: Combining mark Unicode code point (UCS-4)
+ *
+ * Return: Recomposed Unicode code point, or 0 if no recomposition is possible
+ */
+u32 ucs_recompose(u32 base, u32 mark)
+{
+ /* Check if characters are within the range of our table */
+ if (base < UCS_RECOMPOSE_MIN_BASE || base > UCS_RECOMPOSE_MAX_BASE ||
+ mark < UCS_RECOMPOSE_MIN_MARK || mark > UCS_RECOMPOSE_MAX_MARK)
+ return 0;
+
+ struct compare_key key = { base, mark };
+ struct ucs_recomposition *result =
+ __inline_bsearch(&key, ucs_recomposition_table,
+ ARRAY_SIZE(ucs_recomposition_table),
+ sizeof(*ucs_recomposition_table),
+ recomposition_cmp);
+
+ return result ? result->recomposed : 0;
+}
+
+/*
+ * The fallback table structures implement a 2-level lookup.
+ */
+
+struct ucs_page_desc {
+ u8 page; /* Page index (high byte of code points) */
+ u8 count; /* Number of entries in this page */
+ u16 start; /* Start index in entries array */
+};
+
+struct ucs_page_entry {
+ u8 offset; /* Offset within page (0-255) */
+ u8 fallback; /* Fallback character or range start marker */
+};
+
+#include "ucs_fallback_table.h"
+
+static int ucs_page_desc_cmp(const void *key, const void *element)
+{
+ u8 page = *(u8 *)key;
+ const struct ucs_page_desc *entry = element;
+
+ if (page < entry->page)
+ return -1;
+ if (page > entry->page)
+ return 1;
+ return 0;
+}
+
+static int ucs_page_entry_cmp(const void *key, const void *element)
+{
+ u8 offset = *(u8 *)key;
+ const struct ucs_page_entry *entry = element;
+
+ if (offset < entry->offset)
+ return -1;
+ if (entry->fallback == UCS_PAGE_ENTRY_RANGE_MARKER) {
+ if (offset > entry[1].offset)
+ return 1;
+ } else {
+ if (offset > entry->offset)
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * ucs_get_fallback() - Get a substitution for the provided Unicode character
+ * @base: Base Unicode code point (UCS-4)
+ *
+ * Get a simpler fallback character for the provided Unicode character.
+ * This is used for terminal display when corresponding glyph is unavailable.
+ * The substitution may not be as good as the actual glyph for the original
+ * character but still way more helpful than a squared question mark.
+ *
+ * Return: Fallback Unicode code point, or 0 if none is available
+ */
+u32 ucs_get_fallback(u32 cp)
+{
+ const struct ucs_page_desc *page;
+ const struct ucs_page_entry *entry;
+ u8 page_idx = cp >> 8, offset = cp;
+
+ if (!UCS_IS_BMP(cp))
+ return 0;
+
+ /*
+ * Full-width to ASCII mapping (covering all printable ASCII 33-126)
+ * 0xFF01 (!) to 0xFF5E (~) -> ASCII 33 (!) to 126 (~)
+ * We process them programmatically to reduce the table size.
+ */
+ if (cp >= 0xFF01 && cp <= 0xFF5E)
+ return cp - 0xFF01 + 33;
+
+ page = __inline_bsearch(&page_idx, ucs_fallback_pages,
+ ARRAY_SIZE(ucs_fallback_pages),
+ sizeof(*ucs_fallback_pages),
+ ucs_page_desc_cmp);
+ if (!page)
+ return 0;
+
+ entry = __inline_bsearch(&offset, ucs_fallback_entries + page->start,
+ page->count, sizeof(*ucs_fallback_entries),
+ ucs_page_entry_cmp);
+ if (!entry)
+ return 0;
+
+ if (entry->fallback == UCS_PAGE_ENTRY_RANGE_MARKER)
+ entry++;
+ return entry->fallback;
+}
diff --git a/drivers/tty/vt/ucs_fallback_table.h_shipped b/drivers/tty/vt/ucs_fallback_table.h_shipped
new file mode 100644
index 000000000000..2da5a8fe1cf1
--- /dev/null
+++ b/drivers/tty/vt/ucs_fallback_table.h_shipped
@@ -0,0 +1,3346 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ucs_fallback_table.h - Unicode character fallback table
+ *
+ * Auto-generated by gen_ucs_fallback_table.py
+ *
+ * Unicode Version: 16.0.0
+ * Unidecode Version: 1.3.8
+ *
+ * This file contains optimized tables that map complex Unicode characters
+ * to simpler fallback characters for terminal display when corresponding
+ * glyphs are unavailable.
+ */
+
+static const struct ucs_page_desc ucs_fallback_pages[] = {
+ { 0x00, 62, 0 },
+ { 0x01, 218, 62 },
+ { 0x02, 196, 280 },
+ { 0x03, 96, 476 },
+ { 0x04, 113, 572 },
+ { 0x05, 100, 685 },
+ { 0x06, 119, 785 },
+ { 0x07, 91, 904 },
+ { 0x09, 99, 995 },
+ { 0x0A, 78, 1094 },
+ { 0x0B, 79, 1172 },
+ { 0x0C, 85, 1251 },
+ { 0x0D, 73, 1336 },
+ { 0x0E, 83, 1409 },
+ { 0x0F, 69, 1492 },
+ { 0x10, 93, 1561 },
+ { 0x11, 51, 1654 },
+ { 0x13, 22, 1705 },
+ { 0x14, 30, 1727 },
+ { 0x15, 17, 1757 },
+ { 0x16, 81, 1774 },
+ { 0x17, 47, 1855 },
+ { 0x18, 96, 1902 },
+ { 0x1D, 105, 1998 },
+ { 0x1E, 246, 2103 },
+ { 0x1F, 94, 2349 },
+ { 0x20, 107, 2443 },
+ { 0x21, 136, 2550 },
+ { 0x22, 34, 2686 },
+ { 0x23, 4, 2720 },
+ { 0x24, 72, 2724 },
+ { 0x25, 60, 2796 },
+ { 0x26, 6, 2856 },
+ { 0x27, 18, 2862 },
+ { 0x28, 64, 2880 },
+ { 0x29, 1, 2944 },
+ { 0x2C, 15, 2945 },
+ { 0x2E, 29, 2960 },
+ { 0x30, 53, 2989 },
+ { 0x31, 50, 3042 },
+ { 0x32, 5, 3092 },
+ { 0xA0, 4, 3097 },
+ { 0xC5, 2, 3101 },
+ { 0xC6, 2, 3103 },
+ { 0xC7, 1, 3105 },
+ { 0xFB, 35, 3106 },
+ { 0xFE, 37, 3141 },
+ { 0xFF, 50, 3178 },
+};
+
+/* Page entries array (referenced by page descriptors) */
+static const struct ucs_page_entry ucs_fallback_entries[] = {
+ /* Entries for page 0x00 */
+ { 0xA0, 0x20 }, /* NO-BREAK SPACE -> ' ' */
+ { 0xA1, 0x21 }, /* INVERTED EXCLAMATION MARK -> '!' */
+ { 0xA2, 0x63 }, /* CENT SIGN -> 'c' */
+ { 0xA3, 0x4C }, /* POUND SIGN -> 'L' */
+ { 0xA5, 0x59 }, /* YEN SIGN -> 'Y' */
+ { 0xA6, 0x7C }, /* BROKEN BAR -> '|' */
+ { 0xA7, 0x53 }, /* SECTION SIGN -> 'S' */
+ { 0xA8, 0x22 }, /* DIAERESIS -> '"' */
+ { 0xA9, 0x43 }, /* COPYRIGHT SIGN -> 'C' */
+ { 0xAA, 0x61 }, /* FEMININE ORDINAL INDICATOR -> 'a' */
+ { 0xAB, 0x3C }, /* LEFT-POINTING DOUBLE ANGLE QUOTATION MARK -> '<' */
+ { 0xAC, 0x21 }, /* NOT SIGN -> '!' */
+ { 0xAE, 0x52 }, /* REGISTERED SIGN -> 'R' */
+ { 0xAF, 0x2D }, /* MACRON -> '-' */
+ { 0xB0, 0x6F }, /* DEGREE SIGN -> 'o' */
+ { 0xB2, 0x32 }, /* SUPERSCRIPT TWO -> '2' */
+ { 0xB3, 0x33 }, /* SUPERSCRIPT THREE -> '3' */
+ { 0xB4, 0x27 }, /* ACUTE ACCENT -> ''' */
+ { 0xB5, 0x75 }, /* MICRO SIGN -> 'u' */
+ { 0xB6, 0x50 }, /* PILCROW SIGN -> 'P' */
+ { 0xB7, 0x2A }, /* MIDDLE DOT -> '*' */
+ { 0xB8, 0x2C }, /* CEDILLA -> ',' */
+ { 0xB9, 0x31 }, /* SUPERSCRIPT ONE -> '1' */
+ { 0xBA, 0x6F }, /* MASCULINE ORDINAL INDICATOR -> 'o' */
+ { 0xBB, 0x3E }, /* RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK -> '>' */
+ { 0xBF, 0x3F }, /* INVERTED QUESTION MARK -> '?' */
+ { 0xC0, 0x00 }, /* LATIN CAPITAL LETTER A WITH GRAVE -> ... */
+ { 0xC5, 0x41 }, /* LATIN CAPITAL LETTER A WITH RING ABOVE -> 'A' */
+ { 0xC6, 0x45 }, /* LATIN CAPITAL LETTER AE -> 'E' */
+ { 0xC7, 0x43 }, /* LATIN CAPITAL LETTER C WITH CEDILLA -> 'C' */
+ { 0xC8, 0x00 }, /* LATIN CAPITAL LETTER E WITH GRAVE -> ... */
+ { 0xCB, 0x45 }, /* LATIN CAPITAL LETTER E WITH DIAERESIS -> 'E' */
+ { 0xCC, 0x00 }, /* LATIN CAPITAL LETTER I WITH GRAVE -> ... */
+ { 0xCF, 0x49 }, /* LATIN CAPITAL LETTER I WITH DIAERESIS -> 'I' */
+ { 0xD0, 0x44 }, /* LATIN CAPITAL LETTER ETH -> 'D' */
+ { 0xD1, 0x4E }, /* LATIN CAPITAL LETTER N WITH TILDE -> 'N' */
+ { 0xD2, 0x00 }, /* LATIN CAPITAL LETTER O WITH GRAVE -> ... */
+ { 0xD6, 0x4F }, /* LATIN CAPITAL LETTER O WITH DIAERESIS -> 'O' */
+ { 0xD7, 0x78 }, /* MULTIPLICATION SIGN -> 'x' */
+ { 0xD8, 0x4F }, /* LATIN CAPITAL LETTER O WITH STROKE -> 'O' */
+ { 0xD9, 0x00 }, /* LATIN CAPITAL LETTER U WITH GRAVE -> ... */
+ { 0xDC, 0x55 }, /* LATIN CAPITAL LETTER U WITH DIAERESIS -> 'U' */
+ { 0xDD, 0x59 }, /* LATIN CAPITAL LETTER Y WITH ACUTE -> 'Y' */
+ { 0xDF, 0x73 }, /* LATIN SMALL LETTER SHARP S -> 's' */
+ { 0xE0, 0x00 }, /* LATIN SMALL LETTER A WITH GRAVE -> ... */
+ { 0xE5, 0x61 }, /* LATIN SMALL LETTER A WITH RING ABOVE -> 'a' */
+ { 0xE6, 0x65 }, /* LATIN SMALL LETTER AE -> 'e' */
+ { 0xE7, 0x63 }, /* LATIN SMALL LETTER C WITH CEDILLA -> 'c' */
+ { 0xE8, 0x00 }, /* LATIN SMALL LETTER E WITH GRAVE -> ... */
+ { 0xEB, 0x65 }, /* LATIN SMALL LETTER E WITH DIAERESIS -> 'e' */
+ { 0xEC, 0x00 }, /* LATIN SMALL LETTER I WITH GRAVE -> ... */
+ { 0xEF, 0x69 }, /* LATIN SMALL LETTER I WITH DIAERESIS -> 'i' */
+ { 0xF0, 0x64 }, /* LATIN SMALL LETTER ETH -> 'd' */
+ { 0xF1, 0x6E }, /* LATIN SMALL LETTER N WITH TILDE -> 'n' */
+ { 0xF2, 0x00 }, /* LATIN SMALL LETTER O WITH GRAVE -> ... */
+ { 0xF6, 0x6F }, /* LATIN SMALL LETTER O WITH DIAERESIS -> 'o' */
+ { 0xF7, 0x2F }, /* DIVISION SIGN -> '/' */
+ { 0xF8, 0x6F }, /* LATIN SMALL LETTER O WITH STROKE -> 'o' */
+ { 0xF9, 0x00 }, /* LATIN SMALL LETTER U WITH GRAVE -> ... */
+ { 0xFC, 0x75 }, /* LATIN SMALL LETTER U WITH DIAERESIS -> 'u' */
+ { 0xFD, 0x79 }, /* LATIN SMALL LETTER Y WITH ACUTE -> 'y' */
+ { 0xFF, 0x79 }, /* LATIN SMALL LETTER Y WITH DIAERESIS -> 'y' */
+ /* Entries for page 0x01 */
+ { 0x00, 0x41 }, /* LATIN CAPITAL LETTER A WITH MACRON -> 'A' */
+ { 0x01, 0x61 }, /* LATIN SMALL LETTER A WITH MACRON -> 'a' */
+ { 0x02, 0x41 }, /* LATIN CAPITAL LETTER A WITH BREVE -> 'A' */
+ { 0x03, 0x61 }, /* LATIN SMALL LETTER A WITH BREVE -> 'a' */
+ { 0x04, 0x41 }, /* LATIN CAPITAL LETTER A WITH OGONEK -> 'A' */
+ { 0x05, 0x61 }, /* LATIN SMALL LETTER A WITH OGONEK -> 'a' */
+ { 0x06, 0x43 }, /* LATIN CAPITAL LETTER C WITH ACUTE -> 'C' */
+ { 0x07, 0x63 }, /* LATIN SMALL LETTER C WITH ACUTE -> 'c' */
+ { 0x08, 0x43 }, /* LATIN CAPITAL LETTER C WITH CIRCUMFLEX -> 'C' */
+ { 0x09, 0x63 }, /* LATIN SMALL LETTER C WITH CIRCUMFLEX -> 'c' */
+ { 0x0A, 0x43 }, /* LATIN CAPITAL LETTER C WITH DOT ABOVE -> 'C' */
+ { 0x0B, 0x63 }, /* LATIN SMALL LETTER C WITH DOT ABOVE -> 'c' */
+ { 0x0C, 0x43 }, /* LATIN CAPITAL LETTER C WITH CARON -> 'C' */
+ { 0x0D, 0x63 }, /* LATIN SMALL LETTER C WITH CARON -> 'c' */
+ { 0x0E, 0x44 }, /* LATIN CAPITAL LETTER D WITH CARON -> 'D' */
+ { 0x0F, 0x64 }, /* LATIN SMALL LETTER D WITH CARON -> 'd' */
+ { 0x10, 0x44 }, /* LATIN CAPITAL LETTER D WITH STROKE -> 'D' */
+ { 0x11, 0x64 }, /* LATIN SMALL LETTER D WITH STROKE -> 'd' */
+ { 0x12, 0x45 }, /* LATIN CAPITAL LETTER E WITH MACRON -> 'E' */
+ { 0x13, 0x65 }, /* LATIN SMALL LETTER E WITH MACRON -> 'e' */
+ { 0x14, 0x45 }, /* LATIN CAPITAL LETTER E WITH BREVE -> 'E' */
+ { 0x15, 0x65 }, /* LATIN SMALL LETTER E WITH BREVE -> 'e' */
+ { 0x16, 0x45 }, /* LATIN CAPITAL LETTER E WITH DOT ABOVE -> 'E' */
+ { 0x17, 0x65 }, /* LATIN SMALL LETTER E WITH DOT ABOVE -> 'e' */
+ { 0x18, 0x45 }, /* LATIN CAPITAL LETTER E WITH OGONEK -> 'E' */
+ { 0x19, 0x65 }, /* LATIN SMALL LETTER E WITH OGONEK -> 'e' */
+ { 0x1A, 0x45 }, /* LATIN CAPITAL LETTER E WITH CARON -> 'E' */
+ { 0x1B, 0x65 }, /* LATIN SMALL LETTER E WITH CARON -> 'e' */
+ { 0x1C, 0x47 }, /* LATIN CAPITAL LETTER G WITH CIRCUMFLEX -> 'G' */
+ { 0x1D, 0x67 }, /* LATIN SMALL LETTER G WITH CIRCUMFLEX -> 'g' */
+ { 0x1E, 0x47 }, /* LATIN CAPITAL LETTER G WITH BREVE -> 'G' */
+ { 0x1F, 0x67 }, /* LATIN SMALL LETTER G WITH BREVE -> 'g' */
+ { 0x20, 0x47 }, /* LATIN CAPITAL LETTER G WITH DOT ABOVE -> 'G' */
+ { 0x21, 0x67 }, /* LATIN SMALL LETTER G WITH DOT ABOVE -> 'g' */
+ { 0x22, 0x47 }, /* LATIN CAPITAL LETTER G WITH CEDILLA -> 'G' */
+ { 0x23, 0x67 }, /* LATIN SMALL LETTER G WITH CEDILLA -> 'g' */
+ { 0x24, 0x48 }, /* LATIN CAPITAL LETTER H WITH CIRCUMFLEX -> 'H' */
+ { 0x25, 0x68 }, /* LATIN SMALL LETTER H WITH CIRCUMFLEX -> 'h' */
+ { 0x26, 0x48 }, /* LATIN CAPITAL LETTER H WITH STROKE -> 'H' */
+ { 0x27, 0x68 }, /* LATIN SMALL LETTER H WITH STROKE -> 'h' */
+ { 0x28, 0x49 }, /* LATIN CAPITAL LETTER I WITH TILDE -> 'I' */
+ { 0x29, 0x69 }, /* LATIN SMALL LETTER I WITH TILDE -> 'i' */
+ { 0x2A, 0x49 }, /* LATIN CAPITAL LETTER I WITH MACRON -> 'I' */
+ { 0x2B, 0x69 }, /* LATIN SMALL LETTER I WITH MACRON -> 'i' */
+ { 0x2C, 0x49 }, /* LATIN CAPITAL LETTER I WITH BREVE -> 'I' */
+ { 0x2D, 0x69 }, /* LATIN SMALL LETTER I WITH BREVE -> 'i' */
+ { 0x2E, 0x49 }, /* LATIN CAPITAL LETTER I WITH OGONEK -> 'I' */
+ { 0x2F, 0x69 }, /* LATIN SMALL LETTER I WITH OGONEK -> 'i' */
+ { 0x30, 0x49 }, /* LATIN CAPITAL LETTER I WITH DOT ABOVE -> 'I' */
+ { 0x31, 0x69 }, /* LATIN SMALL LETTER DOTLESS I -> 'i' */
+ { 0x34, 0x4A }, /* LATIN CAPITAL LETTER J WITH CIRCUMFLEX -> 'J' */
+ { 0x35, 0x6A }, /* LATIN SMALL LETTER J WITH CIRCUMFLEX -> 'j' */
+ { 0x36, 0x4B }, /* LATIN CAPITAL LETTER K WITH CEDILLA -> 'K' */
+ { 0x37, 0x6B }, /* LATIN SMALL LETTER K WITH CEDILLA -> 'k' */
+ { 0x38, 0x6B }, /* LATIN SMALL LETTER KRA -> 'k' */
+ { 0x39, 0x4C }, /* LATIN CAPITAL LETTER L WITH ACUTE -> 'L' */
+ { 0x3A, 0x6C }, /* LATIN SMALL LETTER L WITH ACUTE -> 'l' */
+ { 0x3B, 0x4C }, /* LATIN CAPITAL LETTER L WITH CEDILLA -> 'L' */
+ { 0x3C, 0x6C }, /* LATIN SMALL LETTER L WITH CEDILLA -> 'l' */
+ { 0x3D, 0x4C }, /* LATIN CAPITAL LETTER L WITH CARON -> 'L' */
+ { 0x3E, 0x6C }, /* LATIN SMALL LETTER L WITH CARON -> 'l' */
+ { 0x3F, 0x4C }, /* LATIN CAPITAL LETTER L WITH MIDDLE DOT -> 'L' */
+ { 0x40, 0x6C }, /* LATIN SMALL LETTER L WITH MIDDLE DOT -> 'l' */
+ { 0x41, 0x4C }, /* LATIN CAPITAL LETTER L WITH STROKE -> 'L' */
+ { 0x42, 0x6C }, /* LATIN SMALL LETTER L WITH STROKE -> 'l' */
+ { 0x43, 0x4E }, /* LATIN CAPITAL LETTER N WITH ACUTE -> 'N' */
+ { 0x44, 0x6E }, /* LATIN SMALL LETTER N WITH ACUTE -> 'n' */
+ { 0x45, 0x4E }, /* LATIN CAPITAL LETTER N WITH CEDILLA -> 'N' */
+ { 0x46, 0x6E }, /* LATIN SMALL LETTER N WITH CEDILLA -> 'n' */
+ { 0x47, 0x4E }, /* LATIN CAPITAL LETTER N WITH CARON -> 'N' */
+ { 0x48, 0x6E }, /* LATIN SMALL LETTER N WITH CARON -> 'n' */
+ { 0x4C, 0x4F }, /* LATIN CAPITAL LETTER O WITH MACRON -> 'O' */
+ { 0x4D, 0x6F }, /* LATIN SMALL LETTER O WITH MACRON -> 'o' */
+ { 0x4E, 0x4F }, /* LATIN CAPITAL LETTER O WITH BREVE -> 'O' */
+ { 0x4F, 0x6F }, /* LATIN SMALL LETTER O WITH BREVE -> 'o' */
+ { 0x50, 0x4F }, /* LATIN CAPITAL LETTER O WITH DOUBLE ACUTE -> 'O' */
+ { 0x51, 0x6F }, /* LATIN SMALL LETTER O WITH DOUBLE ACUTE -> 'o' */
+ { 0x52, 0x45 }, /* LATIN CAPITAL LIGATURE OE -> 'E' */
+ { 0x53, 0x65 }, /* LATIN SMALL LIGATURE OE -> 'e' */
+ { 0x54, 0x52 }, /* LATIN CAPITAL LETTER R WITH ACUTE -> 'R' */
+ { 0x55, 0x72 }, /* LATIN SMALL LETTER R WITH ACUTE -> 'r' */
+ { 0x56, 0x52 }, /* LATIN CAPITAL LETTER R WITH CEDILLA -> 'R' */
+ { 0x57, 0x72 }, /* LATIN SMALL LETTER R WITH CEDILLA -> 'r' */
+ { 0x58, 0x52 }, /* LATIN CAPITAL LETTER R WITH CARON -> 'R' */
+ { 0x59, 0x72 }, /* LATIN SMALL LETTER R WITH CARON -> 'r' */
+ { 0x5A, 0x53 }, /* LATIN CAPITAL LETTER S WITH ACUTE -> 'S' */
+ { 0x5B, 0x73 }, /* LATIN SMALL LETTER S WITH ACUTE -> 's' */
+ { 0x5C, 0x53 }, /* LATIN CAPITAL LETTER S WITH CIRCUMFLEX -> 'S' */
+ { 0x5D, 0x73 }, /* LATIN SMALL LETTER S WITH CIRCUMFLEX -> 's' */
+ { 0x5E, 0x53 }, /* LATIN CAPITAL LETTER S WITH CEDILLA -> 'S' */
+ { 0x5F, 0x73 }, /* LATIN SMALL LETTER S WITH CEDILLA -> 's' */
+ { 0x60, 0x53 }, /* LATIN CAPITAL LETTER S WITH CARON -> 'S' */
+ { 0x61, 0x73 }, /* LATIN SMALL LETTER S WITH CARON -> 's' */
+ { 0x62, 0x54 }, /* LATIN CAPITAL LETTER T WITH CEDILLA -> 'T' */
+ { 0x63, 0x74 }, /* LATIN SMALL LETTER T WITH CEDILLA -> 't' */
+ { 0x64, 0x54 }, /* LATIN CAPITAL LETTER T WITH CARON -> 'T' */
+ { 0x65, 0x74 }, /* LATIN SMALL LETTER T WITH CARON -> 't' */
+ { 0x66, 0x54 }, /* LATIN CAPITAL LETTER T WITH STROKE -> 'T' */
+ { 0x67, 0x74 }, /* LATIN SMALL LETTER T WITH STROKE -> 't' */
+ { 0x68, 0x55 }, /* LATIN CAPITAL LETTER U WITH TILDE -> 'U' */
+ { 0x69, 0x75 }, /* LATIN SMALL LETTER U WITH TILDE -> 'u' */
+ { 0x6A, 0x55 }, /* LATIN CAPITAL LETTER U WITH MACRON -> 'U' */
+ { 0x6B, 0x75 }, /* LATIN SMALL LETTER U WITH MACRON -> 'u' */
+ { 0x6C, 0x55 }, /* LATIN CAPITAL LETTER U WITH BREVE -> 'U' */
+ { 0x6D, 0x75 }, /* LATIN SMALL LETTER U WITH BREVE -> 'u' */
+ { 0x6E, 0x55 }, /* LATIN CAPITAL LETTER U WITH RING ABOVE -> 'U' */
+ { 0x6F, 0x75 }, /* LATIN SMALL LETTER U WITH RING ABOVE -> 'u' */
+ { 0x70, 0x55 }, /* LATIN CAPITAL LETTER U WITH DOUBLE ACUTE -> 'U' */
+ { 0x71, 0x75 }, /* LATIN SMALL LETTER U WITH DOUBLE ACUTE -> 'u' */
+ { 0x72, 0x55 }, /* LATIN CAPITAL LETTER U WITH OGONEK -> 'U' */
+ { 0x73, 0x75 }, /* LATIN SMALL LETTER U WITH OGONEK -> 'u' */
+ { 0x74, 0x57 }, /* LATIN CAPITAL LETTER W WITH CIRCUMFLEX -> 'W' */
+ { 0x75, 0x77 }, /* LATIN SMALL LETTER W WITH CIRCUMFLEX -> 'w' */
+ { 0x76, 0x59 }, /* LATIN CAPITAL LETTER Y WITH CIRCUMFLEX -> 'Y' */
+ { 0x77, 0x79 }, /* LATIN SMALL LETTER Y WITH CIRCUMFLEX -> 'y' */
+ { 0x78, 0x59 }, /* LATIN CAPITAL LETTER Y WITH DIAERESIS -> 'Y' */
+ { 0x79, 0x5A }, /* LATIN CAPITAL LETTER Z WITH ACUTE -> 'Z' */
+ { 0x7A, 0x7A }, /* LATIN SMALL LETTER Z WITH ACUTE -> 'z' */
+ { 0x7B, 0x5A }, /* LATIN CAPITAL LETTER Z WITH DOT ABOVE -> 'Z' */
+ { 0x7C, 0x7A }, /* LATIN SMALL LETTER Z WITH DOT ABOVE -> 'z' */
+ { 0x7D, 0x5A }, /* LATIN CAPITAL LETTER Z WITH CARON -> 'Z' */
+ { 0x7E, 0x7A }, /* LATIN SMALL LETTER Z WITH CARON -> 'z' */
+ { 0x7F, 0x73 }, /* LATIN SMALL LETTER LONG S -> 's' */
+ { 0x80, 0x62 }, /* LATIN SMALL LETTER B WITH STROKE -> 'b' */
+ { 0x81, 0x42 }, /* LATIN CAPITAL LETTER B WITH HOOK -> 'B' */
+ { 0x82, 0x42 }, /* LATIN CAPITAL LETTER B WITH TOPBAR -> 'B' */
+ { 0x83, 0x62 }, /* LATIN SMALL LETTER B WITH TOPBAR -> 'b' */
+ { 0x84, 0x36 }, /* LATIN CAPITAL LETTER TONE SIX -> '6' */
+ { 0x85, 0x36 }, /* LATIN SMALL LETTER TONE SIX -> '6' */
+ { 0x86, 0x4F }, /* LATIN CAPITAL LETTER OPEN O -> 'O' */
+ { 0x87, 0x43 }, /* LATIN CAPITAL LETTER C WITH HOOK -> 'C' */
+ { 0x88, 0x63 }, /* LATIN SMALL LETTER C WITH HOOK -> 'c' */
+ { 0x89, 0x00 }, /* LATIN CAPITAL LETTER AFRICAN D -> ... */
+ { 0x8B, 0x44 }, /* LATIN CAPITAL LETTER D WITH TOPBAR -> 'D' */
+ { 0x8C, 0x64 }, /* LATIN SMALL LETTER D WITH TOPBAR -> 'd' */
+ { 0x8D, 0x64 }, /* LATIN SMALL LETTER TURNED DELTA -> 'd' */
+ { 0x8E, 0x33 }, /* LATIN CAPITAL LETTER REVERSED E -> '3' */
+ { 0x8F, 0x40 }, /* LATIN CAPITAL LETTER SCHWA -> '@' */
+ { 0x90, 0x45 }, /* LATIN CAPITAL LETTER OPEN E -> 'E' */
+ { 0x91, 0x46 }, /* LATIN CAPITAL LETTER F WITH HOOK -> 'F' */
+ { 0x92, 0x66 }, /* LATIN SMALL LETTER F WITH HOOK -> 'f' */
+ { 0x93, 0x47 }, /* LATIN CAPITAL LETTER G WITH HOOK -> 'G' */
+ { 0x94, 0x47 }, /* LATIN CAPITAL LETTER GAMMA -> 'G' */
+ { 0x96, 0x49 }, /* LATIN CAPITAL LETTER IOTA -> 'I' */
+ { 0x97, 0x49 }, /* LATIN CAPITAL LETTER I WITH STROKE -> 'I' */
+ { 0x98, 0x4B }, /* LATIN CAPITAL LETTER K WITH HOOK -> 'K' */
+ { 0x99, 0x6B }, /* LATIN SMALL LETTER K WITH HOOK -> 'k' */
+ { 0x9A, 0x6C }, /* LATIN SMALL LETTER L WITH BAR -> 'l' */
+ { 0x9B, 0x6C }, /* LATIN SMALL LETTER LAMBDA WITH STROKE -> 'l' */
+ { 0x9C, 0x57 }, /* LATIN CAPITAL LETTER TURNED M -> 'W' */
+ { 0x9D, 0x4E }, /* LATIN CAPITAL LETTER N WITH LEFT HOOK -> 'N' */
+ { 0x9E, 0x6E }, /* LATIN SMALL LETTER N WITH LONG RIGHT LEG -> 'n' */
+ { 0x9F, 0x4F }, /* LATIN CAPITAL LETTER O WITH MIDDLE TILDE -> 'O' */
+ { 0xA0, 0x4F }, /* LATIN CAPITAL LETTER O WITH HORN -> 'O' */
+ { 0xA1, 0x6F }, /* LATIN SMALL LETTER O WITH HORN -> 'o' */
+ { 0xA4, 0x50 }, /* LATIN CAPITAL LETTER P WITH HOOK -> 'P' */
+ { 0xA5, 0x70 }, /* LATIN SMALL LETTER P WITH HOOK -> 'p' */
+ { 0xA7, 0x32 }, /* LATIN CAPITAL LETTER TONE TWO -> '2' */
+ { 0xA8, 0x32 }, /* LATIN SMALL LETTER TONE TWO -> '2' */
+ { 0xAB, 0x74 }, /* LATIN SMALL LETTER T WITH PALATAL HOOK -> 't' */
+ { 0xAC, 0x54 }, /* LATIN CAPITAL LETTER T WITH HOOK -> 'T' */
+ { 0xAD, 0x74 }, /* LATIN SMALL LETTER T WITH HOOK -> 't' */
+ { 0xAE, 0x54 }, /* LATIN CAPITAL LETTER T WITH RETROFLEX HOOK -> 'T' */
+ { 0xAF, 0x55 }, /* LATIN CAPITAL LETTER U WITH HORN -> 'U' */
+ { 0xB0, 0x75 }, /* LATIN SMALL LETTER U WITH HORN -> 'u' */
+ { 0xB1, 0x59 }, /* LATIN CAPITAL LETTER UPSILON -> 'Y' */
+ { 0xB2, 0x56 }, /* LATIN CAPITAL LETTER V WITH HOOK -> 'V' */
+ { 0xB3, 0x59 }, /* LATIN CAPITAL LETTER Y WITH HOOK -> 'Y' */
+ { 0xB4, 0x79 }, /* LATIN SMALL LETTER Y WITH HOOK -> 'y' */
+ { 0xB5, 0x5A }, /* LATIN CAPITAL LETTER Z WITH STROKE -> 'Z' */
+ { 0xB6, 0x7A }, /* LATIN SMALL LETTER Z WITH STROKE -> 'z' */
+ { 0xBB, 0x32 }, /* LATIN LETTER TWO WITH STROKE -> '2' */
+ { 0xBC, 0x35 }, /* LATIN CAPITAL LETTER TONE FIVE -> '5' */
+ { 0xBD, 0x35 }, /* LATIN SMALL LETTER TONE FIVE -> '5' */
+ { 0xBF, 0x77 }, /* LATIN LETTER WYNN -> 'w' */
+ { 0xC0, 0x7C }, /* LATIN LETTER DENTAL CLICK -> '|' */
+ { 0xC3, 0x21 }, /* LATIN LETTER RETROFLEX CLICK -> '!' */
+ { 0xCD, 0x41 }, /* LATIN CAPITAL LETTER A WITH CARON -> 'A' */
+ { 0xCE, 0x61 }, /* LATIN SMALL LETTER A WITH CARON -> 'a' */
+ { 0xCF, 0x49 }, /* LATIN CAPITAL LETTER I WITH CARON -> 'I' */
+ { 0xD0, 0x69 }, /* LATIN SMALL LETTER I WITH CARON -> 'i' */
+ { 0xD1, 0x4F }, /* LATIN CAPITAL LETTER O WITH CARON -> 'O' */
+ { 0xD2, 0x6F }, /* LATIN SMALL LETTER O WITH CARON -> 'o' */
+ { 0xD3, 0x55 }, /* LATIN CAPITAL LETTER U WITH CARON -> 'U' */
+ { 0xD4, 0x75 }, /* LATIN SMALL LETTER U WITH CARON -> 'u' */
+ { 0xD5, 0x55 }, /* LATIN CAPITAL LETTER U WITH DIAERESIS AND MACRON -> 'U' */
+ { 0xD6, 0x75 }, /* LATIN SMALL LETTER U WITH DIAERESIS AND MACRON -> 'u' */
+ { 0xD7, 0x55 }, /* LATIN CAPITAL LETTER U WITH DIAERESIS AND ACUTE -> 'U' */
+ { 0xD8, 0x75 }, /* LATIN SMALL LETTER U WITH DIAERESIS AND ACUTE -> 'u' */
+ { 0xD9, 0x55 }, /* LATIN CAPITAL LETTER U WITH DIAERESIS AND CARON -> 'U' */
+ { 0xDA, 0x75 }, /* LATIN SMALL LETTER U WITH DIAERESIS AND CARON -> 'u' */
+ { 0xDB, 0x55 }, /* LATIN CAPITAL LETTER U WITH DIAERESIS AND GRAVE -> 'U' */
+ { 0xDC, 0x75 }, /* LATIN SMALL LETTER U WITH DIAERESIS AND GRAVE -> 'u' */
+ { 0xDD, 0x40 }, /* LATIN SMALL LETTER TURNED E -> '@' */
+ { 0xDE, 0x41 }, /* LATIN CAPITAL LETTER A WITH DIAERESIS AND MACRON -> 'A' */
+ { 0xDF, 0x61 }, /* LATIN SMALL LETTER A WITH DIAERESIS AND MACRON -> 'a' */
+ { 0xE0, 0x41 }, /* LATIN CAPITAL LETTER A WITH DOT ABOVE AND MACRON -> 'A' */
+ { 0xE1, 0x61 }, /* LATIN SMALL LETTER A WITH DOT ABOVE AND MACRON -> 'a' */
+ { 0xE4, 0x47 }, /* LATIN CAPITAL LETTER G WITH STROKE -> 'G' */
+ { 0xE5, 0x67 }, /* LATIN SMALL LETTER G WITH STROKE -> 'g' */
+ { 0xE6, 0x47 }, /* LATIN CAPITAL LETTER G WITH CARON -> 'G' */
+ { 0xE7, 0x67 }, /* LATIN SMALL LETTER G WITH CARON -> 'g' */
+ { 0xE8, 0x4B }, /* LATIN CAPITAL LETTER K WITH CARON -> 'K' */
+ { 0xE9, 0x6B }, /* LATIN SMALL LETTER K WITH CARON -> 'k' */
+ { 0xEA, 0x4F }, /* LATIN CAPITAL LETTER O WITH OGONEK -> 'O' */
+ { 0xEB, 0x6F }, /* LATIN SMALL LETTER O WITH OGONEK -> 'o' */
+ { 0xEC, 0x4F }, /* LATIN CAPITAL LETTER O WITH OGONEK AND MACRON -> 'O' */
+ { 0xED, 0x6F }, /* LATIN SMALL LETTER O WITH OGONEK AND MACRON -> 'o' */
+ { 0xF0, 0x6A }, /* LATIN SMALL LETTER J WITH CARON -> 'j' */
+ { 0xF4, 0x47 }, /* LATIN CAPITAL LETTER G WITH ACUTE -> 'G' */
+ { 0xF5, 0x67 }, /* LATIN SMALL LETTER G WITH ACUTE -> 'g' */
+ { 0xF7, 0x57 }, /* LATIN CAPITAL LETTER WYNN -> 'W' */
+ { 0xF8, 0x4E }, /* LATIN CAPITAL LETTER N WITH GRAVE -> 'N' */
+ { 0xF9, 0x6E }, /* LATIN SMALL LETTER N WITH GRAVE -> 'n' */
+ { 0xFA, 0x41 }, /* LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE -> 'A' */
+ { 0xFB, 0x61 }, /* LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE -> 'a' */
+ { 0xFE, 0x4F }, /* LATIN CAPITAL LETTER O WITH STROKE AND ACUTE -> 'O' */
+ { 0xFF, 0x6F }, /* LATIN SMALL LETTER O WITH STROKE AND ACUTE -> 'o' */
+ /* Entries for page 0x02 */
+ { 0x00, 0x41 }, /* LATIN CAPITAL LETTER A WITH DOUBLE GRAVE -> 'A' */
+ { 0x01, 0x61 }, /* LATIN SMALL LETTER A WITH DOUBLE GRAVE -> 'a' */
+ { 0x02, 0x41 }, /* LATIN CAPITAL LETTER A WITH INVERTED BREVE -> 'A' */
+ { 0x03, 0x61 }, /* LATIN SMALL LETTER A WITH INVERTED BREVE -> 'a' */
+ { 0x04, 0x45 }, /* LATIN CAPITAL LETTER E WITH DOUBLE GRAVE -> 'E' */
+ { 0x05, 0x65 }, /* LATIN SMALL LETTER E WITH DOUBLE GRAVE -> 'e' */
+ { 0x06, 0x45 }, /* LATIN CAPITAL LETTER E WITH INVERTED BREVE -> 'E' */
+ { 0x07, 0x65 }, /* LATIN SMALL LETTER E WITH INVERTED BREVE -> 'e' */
+ { 0x08, 0x49 }, /* LATIN CAPITAL LETTER I WITH DOUBLE GRAVE -> 'I' */
+ { 0x09, 0x69 }, /* LATIN SMALL LETTER I WITH DOUBLE GRAVE -> 'i' */
+ { 0x0A, 0x49 }, /* LATIN CAPITAL LETTER I WITH INVERTED BREVE -> 'I' */
+ { 0x0B, 0x69 }, /* LATIN SMALL LETTER I WITH INVERTED BREVE -> 'i' */
+ { 0x0C, 0x4F }, /* LATIN CAPITAL LETTER O WITH DOUBLE GRAVE -> 'O' */
+ { 0x0D, 0x6F }, /* LATIN SMALL LETTER O WITH DOUBLE GRAVE -> 'o' */
+ { 0x0E, 0x4F }, /* LATIN CAPITAL LETTER O WITH INVERTED BREVE -> 'O' */
+ { 0x0F, 0x6F }, /* LATIN SMALL LETTER O WITH INVERTED BREVE -> 'o' */
+ { 0x10, 0x52 }, /* LATIN CAPITAL LETTER R WITH DOUBLE GRAVE -> 'R' */
+ { 0x11, 0x72 }, /* LATIN SMALL LETTER R WITH DOUBLE GRAVE -> 'r' */
+ { 0x12, 0x52 }, /* LATIN CAPITAL LETTER R WITH INVERTED BREVE -> 'R' */
+ { 0x13, 0x72 }, /* LATIN SMALL LETTER R WITH INVERTED BREVE -> 'r' */
+ { 0x14, 0x55 }, /* LATIN CAPITAL LETTER U WITH DOUBLE GRAVE -> 'U' */
+ { 0x15, 0x75 }, /* LATIN SMALL LETTER U WITH DOUBLE GRAVE -> 'u' */
+ { 0x16, 0x55 }, /* LATIN CAPITAL LETTER U WITH INVERTED BREVE -> 'U' */
+ { 0x17, 0x75 }, /* LATIN SMALL LETTER U WITH INVERTED BREVE -> 'u' */
+ { 0x18, 0x53 }, /* LATIN CAPITAL LETTER S WITH COMMA BELOW -> 'S' */
+ { 0x19, 0x73 }, /* LATIN SMALL LETTER S WITH COMMA BELOW -> 's' */
+ { 0x1A, 0x54 }, /* LATIN CAPITAL LETTER T WITH COMMA BELOW -> 'T' */
+ { 0x1B, 0x74 }, /* LATIN SMALL LETTER T WITH COMMA BELOW -> 't' */
+ { 0x1C, 0x59 }, /* LATIN CAPITAL LETTER YOGH -> 'Y' */
+ { 0x1D, 0x79 }, /* LATIN SMALL LETTER YOGH -> 'y' */
+ { 0x1E, 0x48 }, /* LATIN CAPITAL LETTER H WITH CARON -> 'H' */
+ { 0x1F, 0x68 }, /* LATIN SMALL LETTER H WITH CARON -> 'h' */
+ { 0x20, 0x4E }, /* LATIN CAPITAL LETTER N WITH LONG RIGHT LEG -> 'N' */
+ { 0x21, 0x64 }, /* LATIN SMALL LETTER D WITH CURL -> 'd' */
+ { 0x24, 0x5A }, /* LATIN CAPITAL LETTER Z WITH HOOK -> 'Z' */
+ { 0x25, 0x7A }, /* LATIN SMALL LETTER Z WITH HOOK -> 'z' */
+ { 0x26, 0x41 }, /* LATIN CAPITAL LETTER A WITH DOT ABOVE -> 'A' */
+ { 0x27, 0x61 }, /* LATIN SMALL LETTER A WITH DOT ABOVE -> 'a' */
+ { 0x28, 0x45 }, /* LATIN CAPITAL LETTER E WITH CEDILLA -> 'E' */
+ { 0x29, 0x65 }, /* LATIN SMALL LETTER E WITH CEDILLA -> 'e' */
+ { 0x2A, 0x4F }, /* LATIN CAPITAL LETTER O WITH DIAERESIS AND MACRON -> 'O' */
+ { 0x2B, 0x6F }, /* LATIN SMALL LETTER O WITH DIAERESIS AND MACRON -> 'o' */
+ { 0x2C, 0x4F }, /* LATIN CAPITAL LETTER O WITH TILDE AND MACRON -> 'O' */
+ { 0x2D, 0x6F }, /* LATIN SMALL LETTER O WITH TILDE AND MACRON -> 'o' */
+ { 0x2E, 0x4F }, /* LATIN CAPITAL LETTER O WITH DOT ABOVE -> 'O' */
+ { 0x2F, 0x6F }, /* LATIN SMALL LETTER O WITH DOT ABOVE -> 'o' */
+ { 0x30, 0x4F }, /* LATIN CAPITAL LETTER O WITH DOT ABOVE AND MACRON -> 'O' */
+ { 0x31, 0x6F }, /* LATIN SMALL LETTER O WITH DOT ABOVE AND MACRON -> 'o' */
+ { 0x32, 0x59 }, /* LATIN CAPITAL LETTER Y WITH MACRON -> 'Y' */
+ { 0x33, 0x79 }, /* LATIN SMALL LETTER Y WITH MACRON -> 'y' */
+ { 0x34, 0x6C }, /* LATIN SMALL LETTER L WITH CURL -> 'l' */
+ { 0x35, 0x6E }, /* LATIN SMALL LETTER N WITH CURL -> 'n' */
+ { 0x36, 0x74 }, /* LATIN SMALL LETTER T WITH CURL -> 't' */
+ { 0x37, 0x6A }, /* LATIN SMALL LETTER DOTLESS J -> 'j' */
+ { 0x3A, 0x41 }, /* LATIN CAPITAL LETTER A WITH STROKE -> 'A' */
+ { 0x3B, 0x43 }, /* LATIN CAPITAL LETTER C WITH STROKE -> 'C' */
+ { 0x3C, 0x63 }, /* LATIN SMALL LETTER C WITH STROKE -> 'c' */
+ { 0x3D, 0x4C }, /* LATIN CAPITAL LETTER L WITH BAR -> 'L' */
+ { 0x3E, 0x54 }, /* LATIN CAPITAL LETTER T WITH DIAGONAL STROKE -> 'T' */
+ { 0x3F, 0x73 }, /* LATIN SMALL LETTER S WITH SWASH TAIL -> 's' */
+ { 0x40, 0x7A }, /* LATIN SMALL LETTER Z WITH SWASH TAIL -> 'z' */
+ { 0x43, 0x42 }, /* LATIN CAPITAL LETTER B WITH STROKE -> 'B' */
+ { 0x44, 0x55 }, /* LATIN CAPITAL LETTER U BAR -> 'U' */
+ { 0x45, 0x5E }, /* LATIN CAPITAL LETTER TURNED V -> '^' */
+ { 0x46, 0x45 }, /* LATIN CAPITAL LETTER E WITH STROKE -> 'E' */
+ { 0x47, 0x65 }, /* LATIN SMALL LETTER E WITH STROKE -> 'e' */
+ { 0x48, 0x4A }, /* LATIN CAPITAL LETTER J WITH STROKE -> 'J' */
+ { 0x49, 0x6A }, /* LATIN SMALL LETTER J WITH STROKE -> 'j' */
+ { 0x4A, 0x71 }, /* LATIN CAPITAL LETTER SMALL Q WITH HOOK TAIL -> 'q' */
+ { 0x4B, 0x71 }, /* LATIN SMALL LETTER Q WITH HOOK TAIL -> 'q' */
+ { 0x4C, 0x52 }, /* LATIN CAPITAL LETTER R WITH STROKE -> 'R' */
+ { 0x4D, 0x72 }, /* LATIN SMALL LETTER R WITH STROKE -> 'r' */
+ { 0x4E, 0x59 }, /* LATIN CAPITAL LETTER Y WITH STROKE -> 'Y' */
+ { 0x4F, 0x79 }, /* LATIN SMALL LETTER Y WITH STROKE -> 'y' */
+ { 0x50, 0x00 }, /* LATIN SMALL LETTER TURNED A -> ... */
+ { 0x52, 0x61 }, /* LATIN SMALL LETTER TURNED ALPHA -> 'a' */
+ { 0x53, 0x62 }, /* LATIN SMALL LETTER B WITH HOOK -> 'b' */
+ { 0x54, 0x6F }, /* LATIN SMALL LETTER OPEN O -> 'o' */
+ { 0x55, 0x63 }, /* LATIN SMALL LETTER C WITH CURL -> 'c' */
+ { 0x56, 0x64 }, /* LATIN SMALL LETTER D WITH TAIL -> 'd' */
+ { 0x57, 0x64 }, /* LATIN SMALL LETTER D WITH HOOK -> 'd' */
+ { 0x58, 0x65 }, /* LATIN SMALL LETTER REVERSED E -> 'e' */
+ { 0x59, 0x40 }, /* LATIN SMALL LETTER SCHWA -> '@' */
+ { 0x5A, 0x40 }, /* LATIN SMALL LETTER SCHWA WITH HOOK -> '@' */
+ { 0x5B, 0x00 }, /* LATIN SMALL LETTER OPEN E -> ... */
+ { 0x5E, 0x65 }, /* LATIN SMALL LETTER CLOSED REVERSED OPEN E -> 'e' */
+ { 0x5F, 0x6A }, /* LATIN SMALL LETTER DOTLESS J WITH STROKE -> 'j' */
+ { 0x60, 0x00 }, /* LATIN SMALL LETTER G WITH HOOK -> ... */
+ { 0x63, 0x67 }, /* LATIN SMALL LETTER GAMMA -> 'g' */
+ { 0x64, 0x75 }, /* LATIN SMALL LETTER RAMS HORN -> 'u' */
+ { 0x65, 0x59 }, /* LATIN SMALL LETTER TURNED H -> 'Y' */
+ { 0x66, 0x68 }, /* LATIN SMALL LETTER H WITH HOOK -> 'h' */
+ { 0x67, 0x68 }, /* LATIN SMALL LETTER HENG WITH HOOK -> 'h' */
+ { 0x68, 0x69 }, /* LATIN SMALL LETTER I WITH STROKE -> 'i' */
+ { 0x69, 0x69 }, /* LATIN SMALL LETTER IOTA -> 'i' */
+ { 0x6A, 0x49 }, /* LATIN LETTER SMALL CAPITAL I -> 'I' */
+ { 0x6B, 0x00 }, /* LATIN SMALL LETTER L WITH MIDDLE TILDE -> ... */
+ { 0x6D, 0x6C }, /* LATIN SMALL LETTER L WITH RETROFLEX HOOK -> 'l' */
+ { 0x6F, 0x57 }, /* LATIN SMALL LETTER TURNED M -> 'W' */
+ { 0x70, 0x57 }, /* LATIN SMALL LETTER TURNED M WITH LONG LEG -> 'W' */
+ { 0x71, 0x6D }, /* LATIN SMALL LETTER M WITH HOOK -> 'm' */
+ { 0x72, 0x00 }, /* LATIN SMALL LETTER N WITH LEFT HOOK -> ... */
+ { 0x74, 0x6E }, /* LATIN LETTER SMALL CAPITAL N -> 'n' */
+ { 0x75, 0x6F }, /* LATIN SMALL LETTER BARRED O -> 'o' */
+ { 0x77, 0x4F }, /* LATIN SMALL LETTER CLOSED OMEGA -> 'O' */
+ { 0x78, 0x46 }, /* LATIN SMALL LETTER PHI -> 'F' */
+ { 0x79, 0x00 }, /* LATIN SMALL LETTER TURNED R -> ... */
+ { 0x7F, 0x72 }, /* LATIN SMALL LETTER REVERSED R WITH FISHHOOK -> 'r' */
+ { 0x80, 0x52 }, /* LATIN LETTER SMALL CAPITAL R -> 'R' */
+ { 0x81, 0x52 }, /* LATIN LETTER SMALL CAPITAL INVERTED R -> 'R' */
+ { 0x82, 0x73 }, /* LATIN SMALL LETTER S WITH HOOK -> 's' */
+ { 0x83, 0x53 }, /* LATIN SMALL LETTER ESH -> 'S' */
+ { 0x84, 0x6A }, /* LATIN SMALL LETTER DOTLESS J WITH STROKE AND HOOK -> 'j' */
+ { 0x85, 0x53 }, /* LATIN SMALL LETTER SQUAT REVERSED ESH -> 'S' */
+ { 0x86, 0x53 }, /* LATIN SMALL LETTER ESH WITH CURL -> 'S' */
+ { 0x87, 0x74 }, /* LATIN SMALL LETTER TURNED T -> 't' */
+ { 0x88, 0x74 }, /* LATIN SMALL LETTER T WITH RETROFLEX HOOK -> 't' */
+ { 0x89, 0x75 }, /* LATIN SMALL LETTER U BAR -> 'u' */
+ { 0x8A, 0x55 }, /* LATIN SMALL LETTER UPSILON -> 'U' */
+ { 0x8B, 0x76 }, /* LATIN SMALL LETTER V WITH HOOK -> 'v' */
+ { 0x8C, 0x5E }, /* LATIN SMALL LETTER TURNED V -> '^' */
+ { 0x8D, 0x77 }, /* LATIN SMALL LETTER TURNED W -> 'w' */
+ { 0x8E, 0x79 }, /* LATIN SMALL LETTER TURNED Y -> 'y' */
+ { 0x8F, 0x59 }, /* LATIN LETTER SMALL CAPITAL Y -> 'Y' */
+ { 0x90, 0x7A }, /* LATIN SMALL LETTER Z WITH RETROFLEX HOOK -> 'z' */
+ { 0x91, 0x7A }, /* LATIN SMALL LETTER Z WITH CURL -> 'z' */
+ { 0x92, 0x5A }, /* LATIN SMALL LETTER EZH -> 'Z' */
+ { 0x93, 0x5A }, /* LATIN SMALL LETTER EZH WITH CURL -> 'Z' */
+ { 0x94, 0x00 }, /* LATIN LETTER GLOTTAL STOP -> ... */
+ { 0x96, 0x3F }, /* LATIN LETTER INVERTED GLOTTAL STOP -> '?' */
+ { 0x97, 0x43 }, /* LATIN LETTER STRETCHED C -> 'C' */
+ { 0x98, 0x40 }, /* LATIN LETTER BILABIAL CLICK -> '@' */
+ { 0x99, 0x42 }, /* LATIN LETTER SMALL CAPITAL B -> 'B' */
+ { 0x9A, 0x45 }, /* LATIN SMALL LETTER CLOSED OPEN E -> 'E' */
+ { 0x9B, 0x47 }, /* LATIN LETTER SMALL CAPITAL G WITH HOOK -> 'G' */
+ { 0x9C, 0x48 }, /* LATIN LETTER SMALL CAPITAL H -> 'H' */
+ { 0x9D, 0x6A }, /* LATIN SMALL LETTER J WITH CROSSED-TAIL -> 'j' */
+ { 0x9E, 0x6B }, /* LATIN SMALL LETTER TURNED K -> 'k' */
+ { 0x9F, 0x4C }, /* LATIN LETTER SMALL CAPITAL L -> 'L' */
+ { 0xA0, 0x71 }, /* LATIN SMALL LETTER Q WITH HOOK -> 'q' */
+ { 0xA1, 0x3F }, /* LATIN LETTER GLOTTAL STOP WITH STROKE -> '?' */
+ { 0xA2, 0x3F }, /* LATIN LETTER REVERSED GLOTTAL STOP WITH STROKE -> '?' */
+ { 0xAE, 0x00 }, /* LATIN SMALL LETTER TURNED H WITH FISHHOOK -> ... */
+ { 0xB1, 0x68 }, /* MODIFIER LETTER SMALL H WITH HOOK -> 'h' */
+ { 0xB2, 0x6A }, /* MODIFIER LETTER SMALL J -> 'j' */
+ { 0xB3, 0x00 }, /* MODIFIER LETTER SMALL R -> ... */
+ { 0xB6, 0x72 }, /* MODIFIER LETTER SMALL CAPITAL INVERTED R -> 'r' */
+ { 0xB7, 0x77 }, /* MODIFIER LETTER SMALL W -> 'w' */
+ { 0xB8, 0x79 }, /* MODIFIER LETTER SMALL Y -> 'y' */
+ { 0xB9, 0x27 }, /* MODIFIER LETTER PRIME -> ''' */
+ { 0xBA, 0x22 }, /* MODIFIER LETTER DOUBLE PRIME -> '"' */
+ { 0xBB, 0x60 }, /* MODIFIER LETTER TURNED COMMA -> '`' */
+ { 0xBC, 0x27 }, /* MODIFIER LETTER APOSTROPHE -> ''' */
+ { 0xBD, 0x60 }, /* MODIFIER LETTER REVERSED COMMA -> '`' */
+ { 0xBE, 0x60 }, /* MODIFIER LETTER RIGHT HALF RING -> '`' */
+ { 0xBF, 0x27 }, /* MODIFIER LETTER LEFT HALF RING -> ''' */
+ { 0xC0, 0x3F }, /* MODIFIER LETTER GLOTTAL STOP -> '?' */
+ { 0xC1, 0x3F }, /* MODIFIER LETTER REVERSED GLOTTAL STOP -> '?' */
+ { 0xC2, 0x3C }, /* MODIFIER LETTER LEFT ARROWHEAD -> '<' */
+ { 0xC3, 0x3E }, /* MODIFIER LETTER RIGHT ARROWHEAD -> '>' */
+ { 0xC4, 0x5E }, /* MODIFIER LETTER UP ARROWHEAD -> '^' */
+ { 0xC5, 0x56 }, /* MODIFIER LETTER DOWN ARROWHEAD -> 'V' */
+ { 0xC6, 0x5E }, /* MODIFIER LETTER CIRCUMFLEX ACCENT -> '^' */
+ { 0xC7, 0x56 }, /* CARON -> 'V' */
+ { 0xC8, 0x27 }, /* MODIFIER LETTER VERTICAL LINE -> ''' */
+ { 0xC9, 0x2D }, /* MODIFIER LETTER MACRON -> '-' */
+ { 0xCA, 0x2F }, /* MODIFIER LETTER ACUTE ACCENT -> '/' */
+ { 0xCB, 0x5C }, /* MODIFIER LETTER GRAVE ACCENT -> '\' */
+ { 0xCC, 0x2C }, /* MODIFIER LETTER LOW VERTICAL LINE -> ',' */
+ { 0xCD, 0x5F }, /* MODIFIER LETTER LOW MACRON -> '_' */
+ { 0xCE, 0x5C }, /* MODIFIER LETTER LOW GRAVE ACCENT -> '\' */
+ { 0xCF, 0x2F }, /* MODIFIER LETTER LOW ACUTE ACCENT -> '/' */
+ { 0xD0, 0x3A }, /* MODIFIER LETTER TRIANGULAR COLON -> ':' */
+ { 0xD1, 0x2E }, /* MODIFIER LETTER HALF TRIANGULAR COLON -> '.' */
+ { 0xD2, 0x60 }, /* MODIFIER LETTER CENTRED RIGHT HALF RING -> '`' */
+ { 0xD3, 0x27 }, /* MODIFIER LETTER CENTRED LEFT HALF RING -> ''' */
+ { 0xD4, 0x5E }, /* MODIFIER LETTER UP TACK -> '^' */
+ { 0xD5, 0x56 }, /* MODIFIER LETTER DOWN TACK -> 'V' */
+ { 0xD6, 0x2B }, /* MODIFIER LETTER PLUS SIGN -> '+' */
+ { 0xD7, 0x2D }, /* MODIFIER LETTER MINUS SIGN -> '-' */
+ { 0xD8, 0x56 }, /* BREVE -> 'V' */
+ { 0xD9, 0x2E }, /* DOT ABOVE -> '.' */
+ { 0xDA, 0x40 }, /* RING ABOVE -> '@' */
+ { 0xDB, 0x2C }, /* OGONEK -> ',' */
+ { 0xDC, 0x7E }, /* SMALL TILDE -> '~' */
+ { 0xDD, 0x22 }, /* DOUBLE ACUTE ACCENT -> '"' */
+ { 0xDE, 0x52 }, /* MODIFIER LETTER RHOTIC HOOK -> 'R' */
+ { 0xDF, 0x58 }, /* MODIFIER LETTER CROSS ACCENT -> 'X' */
+ { 0xE0, 0x47 }, /* MODIFIER LETTER SMALL GAMMA -> 'G' */
+ { 0xE1, 0x6C }, /* MODIFIER LETTER SMALL L -> 'l' */
+ { 0xE2, 0x73 }, /* MODIFIER LETTER SMALL S -> 's' */
+ { 0xE3, 0x78 }, /* MODIFIER LETTER SMALL X -> 'x' */
+ { 0xE4, 0x3F }, /* MODIFIER LETTER SMALL REVERSED GLOTTAL STOP -> '?' */
+ { 0xEC, 0x56 }, /* MODIFIER LETTER VOICING -> 'V' */
+ { 0xED, 0x3D }, /* MODIFIER LETTER UNASPIRATED -> '=' */
+ { 0xEE, 0x22 }, /* MODIFIER LETTER DOUBLE APOSTROPHE -> '"' */
+ /* Entries for page 0x03 */
+ { 0x63, 0x61 }, /* COMBINING LATIN SMALL LETTER A -> 'a' */
+ { 0x64, 0x65 }, /* COMBINING LATIN SMALL LETTER E -> 'e' */
+ { 0x65, 0x69 }, /* COMBINING LATIN SMALL LETTER I -> 'i' */
+ { 0x66, 0x6F }, /* COMBINING LATIN SMALL LETTER O -> 'o' */
+ { 0x67, 0x75 }, /* COMBINING LATIN SMALL LETTER U -> 'u' */
+ { 0x68, 0x63 }, /* COMBINING LATIN SMALL LETTER C -> 'c' */
+ { 0x69, 0x64 }, /* COMBINING LATIN SMALL LETTER D -> 'd' */
+ { 0x6A, 0x68 }, /* COMBINING LATIN SMALL LETTER H -> 'h' */
+ { 0x6B, 0x6D }, /* COMBINING LATIN SMALL LETTER M -> 'm' */
+ { 0x6C, 0x72 }, /* COMBINING LATIN SMALL LETTER R -> 'r' */
+ { 0x6D, 0x74 }, /* COMBINING LATIN SMALL LETTER T -> 't' */
+ { 0x6E, 0x76 }, /* COMBINING LATIN SMALL LETTER V -> 'v' */
+ { 0x6F, 0x78 }, /* COMBINING LATIN SMALL LETTER X -> 'x' */
+ { 0x74, 0x27 }, /* GREEK NUMERAL SIGN -> ''' */
+ { 0x75, 0x2C }, /* GREEK LOWER NUMERAL SIGN -> ',' */
+ { 0x7E, 0x3F }, /* GREEK QUESTION MARK -> '?' */
+ { 0x86, 0x41 }, /* GREEK CAPITAL LETTER ALPHA WITH TONOS -> 'A' */
+ { 0x87, 0x3B }, /* GREEK ANO TELEIA -> ';' */
+ { 0x88, 0x45 }, /* GREEK CAPITAL LETTER EPSILON WITH TONOS -> 'E' */
+ { 0x89, 0x45 }, /* GREEK CAPITAL LETTER ETA WITH TONOS -> 'E' */
+ { 0x8A, 0x49 }, /* GREEK CAPITAL LETTER IOTA WITH TONOS -> 'I' */
+ { 0x8C, 0x4F }, /* GREEK CAPITAL LETTER OMICRON WITH TONOS -> 'O' */
+ { 0x8E, 0x55 }, /* GREEK CAPITAL LETTER UPSILON WITH TONOS -> 'U' */
+ { 0x8F, 0x4F }, /* GREEK CAPITAL LETTER OMEGA WITH TONOS -> 'O' */
+ { 0x90, 0x49 }, /* GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS -> 'I' */
+ { 0x91, 0x41 }, /* GREEK CAPITAL LETTER ALPHA -> 'A' */
+ { 0x92, 0x42 }, /* GREEK CAPITAL LETTER BETA -> 'B' */
+ { 0x93, 0x47 }, /* GREEK CAPITAL LETTER GAMMA -> 'G' */
+ { 0x94, 0x44 }, /* GREEK CAPITAL LETTER DELTA -> 'D' */
+ { 0x95, 0x45 }, /* GREEK CAPITAL LETTER EPSILON -> 'E' */
+ { 0x96, 0x5A }, /* GREEK CAPITAL LETTER ZETA -> 'Z' */
+ { 0x97, 0x45 }, /* GREEK CAPITAL LETTER ETA -> 'E' */
+ { 0x99, 0x49 }, /* GREEK CAPITAL LETTER IOTA -> 'I' */
+ { 0x9A, 0x4B }, /* GREEK CAPITAL LETTER KAPPA -> 'K' */
+ { 0x9B, 0x4C }, /* GREEK CAPITAL LETTER LAMDA -> 'L' */
+ { 0x9C, 0x4D }, /* GREEK CAPITAL LETTER MU -> 'M' */
+ { 0x9D, 0x4E }, /* GREEK CAPITAL LETTER NU -> 'N' */
+ { 0x9F, 0x4F }, /* GREEK CAPITAL LETTER OMICRON -> 'O' */
+ { 0xA0, 0x50 }, /* GREEK CAPITAL LETTER PI -> 'P' */
+ { 0xA1, 0x52 }, /* GREEK CAPITAL LETTER RHO -> 'R' */
+ { 0xA3, 0x53 }, /* GREEK CAPITAL LETTER SIGMA -> 'S' */
+ { 0xA4, 0x54 }, /* GREEK CAPITAL LETTER TAU -> 'T' */
+ { 0xA5, 0x55 }, /* GREEK CAPITAL LETTER UPSILON -> 'U' */
+ { 0xA9, 0x4F }, /* GREEK CAPITAL LETTER OMEGA -> 'O' */
+ { 0xAA, 0x49 }, /* GREEK CAPITAL LETTER IOTA WITH DIALYTIKA -> 'I' */
+ { 0xAB, 0x55 }, /* GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA -> 'U' */
+ { 0xAC, 0x61 }, /* GREEK SMALL LETTER ALPHA WITH TONOS -> 'a' */
+ { 0xAD, 0x65 }, /* GREEK SMALL LETTER EPSILON WITH TONOS -> 'e' */
+ { 0xAE, 0x65 }, /* GREEK SMALL LETTER ETA WITH TONOS -> 'e' */
+ { 0xAF, 0x69 }, /* GREEK SMALL LETTER IOTA WITH TONOS -> 'i' */
+ { 0xB0, 0x75 }, /* GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS -> 'u' */
+ { 0xB1, 0x61 }, /* GREEK SMALL LETTER ALPHA -> 'a' */
+ { 0xB2, 0x62 }, /* GREEK SMALL LETTER BETA -> 'b' */
+ { 0xB3, 0x67 }, /* GREEK SMALL LETTER GAMMA -> 'g' */
+ { 0xB4, 0x64 }, /* GREEK SMALL LETTER DELTA -> 'd' */
+ { 0xB5, 0x65 }, /* GREEK SMALL LETTER EPSILON -> 'e' */
+ { 0xB6, 0x7A }, /* GREEK SMALL LETTER ZETA -> 'z' */
+ { 0xB7, 0x65 }, /* GREEK SMALL LETTER ETA -> 'e' */
+ { 0xB9, 0x69 }, /* GREEK SMALL LETTER IOTA -> 'i' */
+ { 0xBA, 0x6B }, /* GREEK SMALL LETTER KAPPA -> 'k' */
+ { 0xBB, 0x6C }, /* GREEK SMALL LETTER LAMDA -> 'l' */
+ { 0xBC, 0x6D }, /* GREEK SMALL LETTER MU -> 'm' */
+ { 0xBD, 0x6E }, /* GREEK SMALL LETTER NU -> 'n' */
+ { 0xBE, 0x78 }, /* GREEK SMALL LETTER XI -> 'x' */
+ { 0xBF, 0x6F }, /* GREEK SMALL LETTER OMICRON -> 'o' */
+ { 0xC0, 0x70 }, /* GREEK SMALL LETTER PI -> 'p' */
+ { 0xC1, 0x72 }, /* GREEK SMALL LETTER RHO -> 'r' */
+ { 0xC2, 0x73 }, /* GREEK SMALL LETTER FINAL SIGMA -> 's' */
+ { 0xC3, 0x73 }, /* GREEK SMALL LETTER SIGMA -> 's' */
+ { 0xC4, 0x74 }, /* GREEK SMALL LETTER TAU -> 't' */
+ { 0xC5, 0x75 }, /* GREEK SMALL LETTER UPSILON -> 'u' */
+ { 0xC9, 0x6F }, /* GREEK SMALL LETTER OMEGA -> 'o' */
+ { 0xCA, 0x69 }, /* GREEK SMALL LETTER IOTA WITH DIALYTIKA -> 'i' */
+ { 0xCB, 0x75 }, /* GREEK SMALL LETTER UPSILON WITH DIALYTIKA -> 'u' */
+ { 0xCC, 0x6F }, /* GREEK SMALL LETTER OMICRON WITH TONOS -> 'o' */
+ { 0xCD, 0x75 }, /* GREEK SMALL LETTER UPSILON WITH TONOS -> 'u' */
+ { 0xCE, 0x6F }, /* GREEK SMALL LETTER OMEGA WITH TONOS -> 'o' */
+ { 0xD0, 0x62 }, /* GREEK BETA SYMBOL -> 'b' */
+ { 0xD2, 0x00 }, /* GREEK UPSILON WITH HOOK SYMBOL -> ... */
+ { 0xD4, 0x55 }, /* GREEK UPSILON WITH DIAERESIS AND HOOK SYMBOL -> 'U' */
+ { 0xD6, 0x70 }, /* GREEK PI SYMBOL -> 'p' */
+ { 0xD7, 0x26 }, /* GREEK KAI SYMBOL -> '&' */
+ { 0xDC, 0x57 }, /* GREEK LETTER DIGAMMA -> 'W' */
+ { 0xDD, 0x77 }, /* GREEK SMALL LETTER DIGAMMA -> 'w' */
+ { 0xDE, 0x51 }, /* GREEK LETTER KOPPA -> 'Q' */
+ { 0xDF, 0x71 }, /* GREEK SMALL LETTER KOPPA -> 'q' */
+ { 0xE4, 0x46 }, /* COPTIC CAPITAL LETTER FEI -> 'F' */
+ { 0xE5, 0x66 }, /* COPTIC SMALL LETTER FEI -> 'f' */
+ { 0xE8, 0x48 }, /* COPTIC CAPITAL LETTER HORI -> 'H' */
+ { 0xE9, 0x68 }, /* COPTIC SMALL LETTER HORI -> 'h' */
+ { 0xEA, 0x47 }, /* COPTIC CAPITAL LETTER GANGIA -> 'G' */
+ { 0xEB, 0x67 }, /* COPTIC SMALL LETTER GANGIA -> 'g' */
+ { 0xF0, 0x6B }, /* GREEK KAPPA SYMBOL -> 'k' */
+ { 0xF1, 0x72 }, /* GREEK RHO SYMBOL -> 'r' */
+ { 0xF2, 0x63 }, /* GREEK LUNATE SIGMA SYMBOL -> 'c' */
+ { 0xF3, 0x6A }, /* GREEK LETTER YOT -> 'j' */
+ /* Entries for page 0x04 */
+ { 0x06, 0x49 }, /* CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I -> 'I' */
+ { 0x08, 0x4A }, /* CYRILLIC CAPITAL LETTER JE -> 'J' */
+ { 0x0D, 0x49 }, /* CYRILLIC CAPITAL LETTER I WITH GRAVE -> 'I' */
+ { 0x0E, 0x55 }, /* CYRILLIC CAPITAL LETTER SHORT U -> 'U' */
+ { 0x10, 0x41 }, /* CYRILLIC CAPITAL LETTER A -> 'A' */
+ { 0x11, 0x42 }, /* CYRILLIC CAPITAL LETTER BE -> 'B' */
+ { 0x12, 0x56 }, /* CYRILLIC CAPITAL LETTER VE -> 'V' */
+ { 0x13, 0x47 }, /* CYRILLIC CAPITAL LETTER GHE -> 'G' */
+ { 0x14, 0x44 }, /* CYRILLIC CAPITAL LETTER DE -> 'D' */
+ { 0x15, 0x45 }, /* CYRILLIC CAPITAL LETTER IE -> 'E' */
+ { 0x17, 0x5A }, /* CYRILLIC CAPITAL LETTER ZE -> 'Z' */
+ { 0x18, 0x49 }, /* CYRILLIC CAPITAL LETTER I -> 'I' */
+ { 0x19, 0x49 }, /* CYRILLIC CAPITAL LETTER SHORT I -> 'I' */
+ { 0x1A, 0x4B }, /* CYRILLIC CAPITAL LETTER KA -> 'K' */
+ { 0x1B, 0x4C }, /* CYRILLIC CAPITAL LETTER EL -> 'L' */
+ { 0x1C, 0x4D }, /* CYRILLIC CAPITAL LETTER EM -> 'M' */
+ { 0x1D, 0x4E }, /* CYRILLIC CAPITAL LETTER EN -> 'N' */
+ { 0x1E, 0x4F }, /* CYRILLIC CAPITAL LETTER O -> 'O' */
+ { 0x1F, 0x50 }, /* CYRILLIC CAPITAL LETTER PE -> 'P' */
+ { 0x20, 0x52 }, /* CYRILLIC CAPITAL LETTER ER -> 'R' */
+ { 0x21, 0x53 }, /* CYRILLIC CAPITAL LETTER ES -> 'S' */
+ { 0x22, 0x54 }, /* CYRILLIC CAPITAL LETTER TE -> 'T' */
+ { 0x23, 0x55 }, /* CYRILLIC CAPITAL LETTER U -> 'U' */
+ { 0x24, 0x46 }, /* CYRILLIC CAPITAL LETTER EF -> 'F' */
+ { 0x2A, 0x27 }, /* CYRILLIC CAPITAL LETTER HARD SIGN -> ''' */
+ { 0x2B, 0x59 }, /* CYRILLIC CAPITAL LETTER YERU -> 'Y' */
+ { 0x2C, 0x27 }, /* CYRILLIC CAPITAL LETTER SOFT SIGN -> ''' */
+ { 0x2D, 0x45 }, /* CYRILLIC CAPITAL LETTER E -> 'E' */
+ { 0x30, 0x61 }, /* CYRILLIC SMALL LETTER A -> 'a' */
+ { 0x31, 0x62 }, /* CYRILLIC SMALL LETTER BE -> 'b' */
+ { 0x32, 0x76 }, /* CYRILLIC SMALL LETTER VE -> 'v' */
+ { 0x33, 0x67 }, /* CYRILLIC SMALL LETTER GHE -> 'g' */
+ { 0x34, 0x64 }, /* CYRILLIC SMALL LETTER DE -> 'd' */
+ { 0x35, 0x65 }, /* CYRILLIC SMALL LETTER IE -> 'e' */
+ { 0x37, 0x7A }, /* CYRILLIC SMALL LETTER ZE -> 'z' */
+ { 0x38, 0x69 }, /* CYRILLIC SMALL LETTER I -> 'i' */
+ { 0x39, 0x69 }, /* CYRILLIC SMALL LETTER SHORT I -> 'i' */
+ { 0x3A, 0x6B }, /* CYRILLIC SMALL LETTER KA -> 'k' */
+ { 0x3B, 0x6C }, /* CYRILLIC SMALL LETTER EL -> 'l' */
+ { 0x3C, 0x6D }, /* CYRILLIC SMALL LETTER EM -> 'm' */
+ { 0x3D, 0x6E }, /* CYRILLIC SMALL LETTER EN -> 'n' */
+ { 0x3E, 0x6F }, /* CYRILLIC SMALL LETTER O -> 'o' */
+ { 0x3F, 0x70 }, /* CYRILLIC SMALL LETTER PE -> 'p' */
+ { 0x40, 0x72 }, /* CYRILLIC SMALL LETTER ER -> 'r' */
+ { 0x41, 0x73 }, /* CYRILLIC SMALL LETTER ES -> 's' */
+ { 0x42, 0x74 }, /* CYRILLIC SMALL LETTER TE -> 't' */
+ { 0x43, 0x75 }, /* CYRILLIC SMALL LETTER U -> 'u' */
+ { 0x44, 0x66 }, /* CYRILLIC SMALL LETTER EF -> 'f' */
+ { 0x4A, 0x27 }, /* CYRILLIC SMALL LETTER HARD SIGN -> ''' */
+ { 0x4B, 0x79 }, /* CYRILLIC SMALL LETTER YERU -> 'y' */
+ { 0x4C, 0x27 }, /* CYRILLIC SMALL LETTER SOFT SIGN -> ''' */
+ { 0x4D, 0x65 }, /* CYRILLIC SMALL LETTER E -> 'e' */
+ { 0x56, 0x69 }, /* CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I -> 'i' */
+ { 0x58, 0x6A }, /* CYRILLIC SMALL LETTER JE -> 'j' */
+ { 0x5D, 0x69 }, /* CYRILLIC SMALL LETTER I WITH GRAVE -> 'i' */
+ { 0x5E, 0x75 }, /* CYRILLIC SMALL LETTER SHORT U -> 'u' */
+ { 0x60, 0x4F }, /* CYRILLIC CAPITAL LETTER OMEGA -> 'O' */
+ { 0x61, 0x6F }, /* CYRILLIC SMALL LETTER OMEGA -> 'o' */
+ { 0x62, 0x45 }, /* CYRILLIC CAPITAL LETTER YAT -> 'E' */
+ { 0x63, 0x65 }, /* CYRILLIC SMALL LETTER YAT -> 'e' */
+ { 0x66, 0x45 }, /* CYRILLIC CAPITAL LETTER LITTLE YUS -> 'E' */
+ { 0x67, 0x65 }, /* CYRILLIC SMALL LETTER LITTLE YUS -> 'e' */
+ { 0x6A, 0x4F }, /* CYRILLIC CAPITAL LETTER BIG YUS -> 'O' */
+ { 0x6B, 0x6F }, /* CYRILLIC SMALL LETTER BIG YUS -> 'o' */
+ { 0x72, 0x46 }, /* CYRILLIC CAPITAL LETTER FITA -> 'F' */
+ { 0x73, 0x66 }, /* CYRILLIC SMALL LETTER FITA -> 'f' */
+ { 0x74, 0x59 }, /* CYRILLIC CAPITAL LETTER IZHITSA -> 'Y' */
+ { 0x75, 0x79 }, /* CYRILLIC SMALL LETTER IZHITSA -> 'y' */
+ { 0x76, 0x59 }, /* CYRILLIC CAPITAL LETTER IZHITSA WITH DOUBLE GRAVE ACCENT -> 'Y' */
+ { 0x77, 0x79 }, /* CYRILLIC SMALL LETTER IZHITSA WITH DOUBLE GRAVE ACCENT -> 'y' */
+ { 0x78, 0x75 }, /* CYRILLIC CAPITAL LETTER UK -> 'u' */
+ { 0x79, 0x75 }, /* CYRILLIC SMALL LETTER UK -> 'u' */
+ { 0x7A, 0x4F }, /* CYRILLIC CAPITAL LETTER ROUND OMEGA -> 'O' */
+ { 0x7B, 0x6F }, /* CYRILLIC SMALL LETTER ROUND OMEGA -> 'o' */
+ { 0x7C, 0x4F }, /* CYRILLIC CAPITAL LETTER OMEGA WITH TITLO -> 'O' */
+ { 0x7D, 0x6F }, /* CYRILLIC SMALL LETTER OMEGA WITH TITLO -> 'o' */
+ { 0x80, 0x51 }, /* CYRILLIC CAPITAL LETTER KOPPA -> 'Q' */
+ { 0x81, 0x71 }, /* CYRILLIC SMALL LETTER KOPPA -> 'q' */
+ { 0x8C, 0x22 }, /* CYRILLIC CAPITAL LETTER SEMISOFT SIGN -> '"' */
+ { 0x8D, 0x22 }, /* CYRILLIC SMALL LETTER SEMISOFT SIGN -> '"' */
+ { 0xAE, 0x55 }, /* CYRILLIC CAPITAL LETTER STRAIGHT U -> 'U' */
+ { 0xAF, 0x75 }, /* CYRILLIC SMALL LETTER STRAIGHT U -> 'u' */
+ { 0xBA, 0x48 }, /* CYRILLIC CAPITAL LETTER SHHA -> 'H' */
+ { 0xBB, 0x68 }, /* CYRILLIC SMALL LETTER SHHA -> 'h' */
+ { 0xC0, 0x60 }, /* CYRILLIC LETTER PALOCHKA -> '`' */
+ { 0xD0, 0x61 }, /* CYRILLIC CAPITAL LETTER A WITH BREVE -> 'a' */
+ { 0xD1, 0x61 }, /* CYRILLIC SMALL LETTER A WITH BREVE -> 'a' */
+ { 0xD2, 0x41 }, /* CYRILLIC CAPITAL LETTER A WITH DIAERESIS -> 'A' */
+ { 0xD3, 0x61 }, /* CYRILLIC SMALL LETTER A WITH DIAERESIS -> 'a' */
+ { 0xD8, 0x00 }, /* CYRILLIC CAPITAL LETTER SCHWA -> ... */
+ { 0xDB, 0x40 }, /* CYRILLIC SMALL LETTER SCHWA WITH DIAERESIS -> '@' */
+ { 0xDE, 0x5A }, /* CYRILLIC CAPITAL LETTER ZE WITH DIAERESIS -> 'Z' */
+ { 0xDF, 0x7A }, /* CYRILLIC SMALL LETTER ZE WITH DIAERESIS -> 'z' */
+ { 0xE2, 0x49 }, /* CYRILLIC CAPITAL LETTER I WITH MACRON -> 'I' */
+ { 0xE3, 0x69 }, /* CYRILLIC SMALL LETTER I WITH MACRON -> 'i' */
+ { 0xE4, 0x49 }, /* CYRILLIC CAPITAL LETTER I WITH DIAERESIS -> 'I' */
+ { 0xE5, 0x69 }, /* CYRILLIC SMALL LETTER I WITH DIAERESIS -> 'i' */
+ { 0xE6, 0x4F }, /* CYRILLIC CAPITAL LETTER O WITH DIAERESIS -> 'O' */
+ { 0xE7, 0x6F }, /* CYRILLIC SMALL LETTER O WITH DIAERESIS -> 'o' */
+ { 0xE8, 0x4F }, /* CYRILLIC CAPITAL LETTER BARRED O -> 'O' */
+ { 0xE9, 0x6F }, /* CYRILLIC SMALL LETTER BARRED O -> 'o' */
+ { 0xEA, 0x4F }, /* CYRILLIC CAPITAL LETTER BARRED O WITH DIAERESIS -> 'O' */
+ { 0xEB, 0x6F }, /* CYRILLIC SMALL LETTER BARRED O WITH DIAERESIS -> 'o' */
+ { 0xEC, 0x45 }, /* CYRILLIC CAPITAL LETTER E WITH DIAERESIS -> 'E' */
+ { 0xED, 0x65 }, /* CYRILLIC SMALL LETTER E WITH DIAERESIS -> 'e' */
+ { 0xEE, 0x55 }, /* CYRILLIC CAPITAL LETTER U WITH MACRON -> 'U' */
+ { 0xEF, 0x75 }, /* CYRILLIC SMALL LETTER U WITH MACRON -> 'u' */
+ { 0xF0, 0x55 }, /* CYRILLIC CAPITAL LETTER U WITH DIAERESIS -> 'U' */
+ { 0xF1, 0x75 }, /* CYRILLIC SMALL LETTER U WITH DIAERESIS -> 'u' */
+ { 0xF2, 0x55 }, /* CYRILLIC CAPITAL LETTER U WITH DOUBLE ACUTE -> 'U' */
+ { 0xF3, 0x75 }, /* CYRILLIC SMALL LETTER U WITH DOUBLE ACUTE -> 'u' */
+ { 0xF8, 0x59 }, /* CYRILLIC CAPITAL LETTER YERU WITH DIAERESIS -> 'Y' */
+ { 0xF9, 0x79 }, /* CYRILLIC SMALL LETTER YERU WITH DIAERESIS -> 'y' */
+ /* Entries for page 0x05 */
+ { 0x31, 0x41 }, /* ARMENIAN CAPITAL LETTER AYB -> 'A' */
+ { 0x32, 0x42 }, /* ARMENIAN CAPITAL LETTER BEN -> 'B' */
+ { 0x33, 0x47 }, /* ARMENIAN CAPITAL LETTER GIM -> 'G' */
+ { 0x34, 0x44 }, /* ARMENIAN CAPITAL LETTER DA -> 'D' */
+ { 0x35, 0x45 }, /* ARMENIAN CAPITAL LETTER ECH -> 'E' */
+ { 0x36, 0x5A }, /* ARMENIAN CAPITAL LETTER ZA -> 'Z' */
+ { 0x37, 0x45 }, /* ARMENIAN CAPITAL LETTER EH -> 'E' */
+ { 0x38, 0x45 }, /* ARMENIAN CAPITAL LETTER ET -> 'E' */
+ { 0x3B, 0x49 }, /* ARMENIAN CAPITAL LETTER INI -> 'I' */
+ { 0x3C, 0x4C }, /* ARMENIAN CAPITAL LETTER LIWN -> 'L' */
+ { 0x3F, 0x4B }, /* ARMENIAN CAPITAL LETTER KEN -> 'K' */
+ { 0x40, 0x48 }, /* ARMENIAN CAPITAL LETTER HO -> 'H' */
+ { 0x44, 0x4D }, /* ARMENIAN CAPITAL LETTER MEN -> 'M' */
+ { 0x45, 0x59 }, /* ARMENIAN CAPITAL LETTER YI -> 'Y' */
+ { 0x46, 0x4E }, /* ARMENIAN CAPITAL LETTER NOW -> 'N' */
+ { 0x48, 0x4F }, /* ARMENIAN CAPITAL LETTER VO -> 'O' */
+ { 0x4A, 0x50 }, /* ARMENIAN CAPITAL LETTER PEH -> 'P' */
+ { 0x4B, 0x4A }, /* ARMENIAN CAPITAL LETTER JHEH -> 'J' */
+ { 0x4D, 0x53 }, /* ARMENIAN CAPITAL LETTER SEH -> 'S' */
+ { 0x4E, 0x56 }, /* ARMENIAN CAPITAL LETTER VEW -> 'V' */
+ { 0x4F, 0x54 }, /* ARMENIAN CAPITAL LETTER TIWN -> 'T' */
+ { 0x50, 0x52 }, /* ARMENIAN CAPITAL LETTER REH -> 'R' */
+ { 0x52, 0x57 }, /* ARMENIAN CAPITAL LETTER YIWN -> 'W' */
+ { 0x55, 0x4F }, /* ARMENIAN CAPITAL LETTER OH -> 'O' */
+ { 0x56, 0x46 }, /* ARMENIAN CAPITAL LETTER FEH -> 'F' */
+ { 0x59, 0x3C }, /* ARMENIAN MODIFIER LETTER LEFT HALF RING -> '<' */
+ { 0x5A, 0x27 }, /* ARMENIAN APOSTROPHE -> ''' */
+ { 0x5B, 0x2F }, /* ARMENIAN EMPHASIS MARK -> '/' */
+ { 0x5C, 0x21 }, /* ARMENIAN EXCLAMATION MARK -> '!' */
+ { 0x5D, 0x2C }, /* ARMENIAN COMMA -> ',' */
+ { 0x5E, 0x3F }, /* ARMENIAN QUESTION MARK -> '?' */
+ { 0x5F, 0x2E }, /* ARMENIAN ABBREVIATION MARK -> '.' */
+ { 0x61, 0x61 }, /* ARMENIAN SMALL LETTER AYB -> 'a' */
+ { 0x62, 0x62 }, /* ARMENIAN SMALL LETTER BEN -> 'b' */
+ { 0x63, 0x67 }, /* ARMENIAN SMALL LETTER GIM -> 'g' */
+ { 0x64, 0x64 }, /* ARMENIAN SMALL LETTER DA -> 'd' */
+ { 0x65, 0x65 }, /* ARMENIAN SMALL LETTER ECH -> 'e' */
+ { 0x66, 0x7A }, /* ARMENIAN SMALL LETTER ZA -> 'z' */
+ { 0x67, 0x65 }, /* ARMENIAN SMALL LETTER EH -> 'e' */
+ { 0x68, 0x65 }, /* ARMENIAN SMALL LETTER ET -> 'e' */
+ { 0x6B, 0x69 }, /* ARMENIAN SMALL LETTER INI -> 'i' */
+ { 0x6C, 0x6C }, /* ARMENIAN SMALL LETTER LIWN -> 'l' */
+ { 0x6F, 0x6B }, /* ARMENIAN SMALL LETTER KEN -> 'k' */
+ { 0x70, 0x68 }, /* ARMENIAN SMALL LETTER HO -> 'h' */
+ { 0x74, 0x6D }, /* ARMENIAN SMALL LETTER MEN -> 'm' */
+ { 0x75, 0x79 }, /* ARMENIAN SMALL LETTER YI -> 'y' */
+ { 0x76, 0x6E }, /* ARMENIAN SMALL LETTER NOW -> 'n' */
+ { 0x78, 0x6F }, /* ARMENIAN SMALL LETTER VO -> 'o' */
+ { 0x7A, 0x70 }, /* ARMENIAN SMALL LETTER PEH -> 'p' */
+ { 0x7B, 0x6A }, /* ARMENIAN SMALL LETTER JHEH -> 'j' */
+ { 0x7D, 0x73 }, /* ARMENIAN SMALL LETTER SEH -> 's' */
+ { 0x7E, 0x76 }, /* ARMENIAN SMALL LETTER VEW -> 'v' */
+ { 0x7F, 0x74 }, /* ARMENIAN SMALL LETTER TIWN -> 't' */
+ { 0x80, 0x72 }, /* ARMENIAN SMALL LETTER REH -> 'r' */
+ { 0x82, 0x77 }, /* ARMENIAN SMALL LETTER YIWN -> 'w' */
+ { 0x85, 0x6F }, /* ARMENIAN SMALL LETTER OH -> 'o' */
+ { 0x86, 0x66 }, /* ARMENIAN SMALL LETTER FEH -> 'f' */
+ { 0x89, 0x3A }, /* ARMENIAN FULL STOP -> ':' */
+ { 0x8A, 0x2D }, /* ARMENIAN HYPHEN -> '-' */
+ { 0xB1, 0x65 }, /* HEBREW POINT HATAF SEGOL -> 'e' */
+ { 0xB2, 0x61 }, /* HEBREW POINT HATAF PATAH -> 'a' */
+ { 0xB3, 0x6F }, /* HEBREW POINT HATAF QAMATS -> 'o' */
+ { 0xB4, 0x69 }, /* HEBREW POINT HIRIQ -> 'i' */
+ { 0xB5, 0x65 }, /* HEBREW POINT TSERE -> 'e' */
+ { 0xB6, 0x65 }, /* HEBREW POINT SEGOL -> 'e' */
+ { 0xB7, 0x61 }, /* HEBREW POINT PATAH -> 'a' */
+ { 0xB8, 0x61 }, /* HEBREW POINT QAMATS -> 'a' */
+ { 0xB9, 0x6F }, /* HEBREW POINT HOLAM -> 'o' */
+ { 0xBA, 0x6F }, /* HEBREW POINT HOLAM HASER FOR VAV -> 'o' */
+ { 0xBB, 0x75 }, /* HEBREW POINT QUBUTS -> 'u' */
+ { 0xBE, 0x2D }, /* HEBREW PUNCTUATION MAQAF -> '-' */
+ { 0xC0, 0x7C }, /* HEBREW PUNCTUATION PASEQ -> '|' */
+ { 0xC3, 0x2E }, /* HEBREW PUNCTUATION SOF PASUQ -> '.' */
+ { 0xC6, 0x6E }, /* HEBREW PUNCTUATION NUN HAFUKHA -> 'n' */
+ { 0xC7, 0x6F }, /* HEBREW POINT QAMATS QATAN -> 'o' */
+ { 0xD0, 0x41 }, /* HEBREW LETTER ALEF -> 'A' */
+ { 0xD1, 0x62 }, /* HEBREW LETTER BET -> 'b' */
+ { 0xD2, 0x67 }, /* HEBREW LETTER GIMEL -> 'g' */
+ { 0xD3, 0x64 }, /* HEBREW LETTER DALET -> 'd' */
+ { 0xD4, 0x68 }, /* HEBREW LETTER HE -> 'h' */
+ { 0xD5, 0x76 }, /* HEBREW LETTER VAV -> 'v' */
+ { 0xD6, 0x7A }, /* HEBREW LETTER ZAYIN -> 'z' */
+ { 0xD7, 0x48 }, /* HEBREW LETTER HET -> 'H' */
+ { 0xD8, 0x54 }, /* HEBREW LETTER TET -> 'T' */
+ { 0xD9, 0x79 }, /* HEBREW LETTER YOD -> 'y' */
+ { 0xDC, 0x6C }, /* HEBREW LETTER LAMED -> 'l' */
+ { 0xDD, 0x6D }, /* HEBREW LETTER FINAL MEM -> 'm' */
+ { 0xDE, 0x6D }, /* HEBREW LETTER MEM -> 'm' */
+ { 0xDF, 0x6E }, /* HEBREW LETTER FINAL NUN -> 'n' */
+ { 0xE0, 0x6E }, /* HEBREW LETTER NUN -> 'n' */
+ { 0xE1, 0x73 }, /* HEBREW LETTER SAMEKH -> 's' */
+ { 0xE2, 0x60 }, /* HEBREW LETTER AYIN -> '`' */
+ { 0xE3, 0x70 }, /* HEBREW LETTER FINAL PE -> 'p' */
+ { 0xE4, 0x70 }, /* HEBREW LETTER PE -> 'p' */
+ { 0xE7, 0x6B }, /* HEBREW LETTER QOF -> 'k' */
+ { 0xE8, 0x72 }, /* HEBREW LETTER RESH -> 'r' */
+ { 0xEA, 0x74 }, /* HEBREW LETTER TAV -> 't' */
+ { 0xF0, 0x56 }, /* HEBREW LIGATURE YIDDISH DOUBLE VAV -> 'V' */
+ { 0xF3, 0x27 }, /* HEBREW PUNCTUATION GERESH -> ''' */
+ { 0xF4, 0x22 }, /* HEBREW PUNCTUATION GERSHAYIM -> '"' */
+ /* Entries for page 0x06 */
+ { 0x0C, 0x2C }, /* ARABIC COMMA -> ',' */
+ { 0x1B, 0x3B }, /* ARABIC SEMICOLON -> ';' */
+ { 0x1F, 0x3F }, /* ARABIC QUESTION MARK -> '?' */
+ { 0x22, 0x61 }, /* ARABIC LETTER ALEF WITH MADDA ABOVE -> 'a' */
+ { 0x23, 0x27 }, /* ARABIC LETTER ALEF WITH HAMZA ABOVE -> ''' */
+ { 0x28, 0x62 }, /* ARABIC LETTER BEH -> 'b' */
+ { 0x29, 0x40 }, /* ARABIC LETTER TEH MARBUTA -> '@' */
+ { 0x2A, 0x74 }, /* ARABIC LETTER TEH -> 't' */
+ { 0x2C, 0x6A }, /* ARABIC LETTER JEEM -> 'j' */
+ { 0x2D, 0x48 }, /* ARABIC LETTER HAH -> 'H' */
+ { 0x2F, 0x64 }, /* ARABIC LETTER DAL -> 'd' */
+ { 0x31, 0x72 }, /* ARABIC LETTER REH -> 'r' */
+ { 0x32, 0x7A }, /* ARABIC LETTER ZAIN -> 'z' */
+ { 0x33, 0x73 }, /* ARABIC LETTER SEEN -> 's' */
+ { 0x35, 0x53 }, /* ARABIC LETTER SAD -> 'S' */
+ { 0x36, 0x44 }, /* ARABIC LETTER DAD -> 'D' */
+ { 0x37, 0x54 }, /* ARABIC LETTER TAH -> 'T' */
+ { 0x38, 0x5A }, /* ARABIC LETTER ZAH -> 'Z' */
+ { 0x39, 0x60 }, /* ARABIC LETTER AIN -> '`' */
+ { 0x3A, 0x47 }, /* ARABIC LETTER GHAIN -> 'G' */
+ { 0x41, 0x66 }, /* ARABIC LETTER FEH -> 'f' */
+ { 0x42, 0x71 }, /* ARABIC LETTER QAF -> 'q' */
+ { 0x43, 0x6B }, /* ARABIC LETTER KAF -> 'k' */
+ { 0x44, 0x6C }, /* ARABIC LETTER LAM -> 'l' */
+ { 0x45, 0x6D }, /* ARABIC LETTER MEEM -> 'm' */
+ { 0x46, 0x6E }, /* ARABIC LETTER NOON -> 'n' */
+ { 0x47, 0x68 }, /* ARABIC LETTER HEH -> 'h' */
+ { 0x48, 0x77 }, /* ARABIC LETTER WAW -> 'w' */
+ { 0x49, 0x7E }, /* ARABIC LETTER ALEF MAKSURA -> '~' */
+ { 0x4A, 0x79 }, /* ARABIC LETTER YEH -> 'y' */
+ { 0x4E, 0x61 }, /* ARABIC FATHA -> 'a' */
+ { 0x4F, 0x75 }, /* ARABIC DAMMA -> 'u' */
+ { 0x50, 0x69 }, /* ARABIC KASRA -> 'i' */
+ { 0x51, 0x57 }, /* ARABIC SHADDA -> 'W' */
+ { 0x54, 0x27 }, /* ARABIC HAMZA ABOVE -> ''' */
+ { 0x55, 0x27 }, /* ARABIC HAMZA BELOW -> ''' */
+ { 0x60, 0x30 }, /* ARABIC-INDIC DIGIT ZERO -> '0' */
+ { 0x61, 0x31 }, /* ARABIC-INDIC DIGIT ONE -> '1' */
+ { 0x62, 0x32 }, /* ARABIC-INDIC DIGIT TWO -> '2' */
+ { 0x63, 0x33 }, /* ARABIC-INDIC DIGIT THREE -> '3' */
+ { 0x64, 0x34 }, /* ARABIC-INDIC DIGIT FOUR -> '4' */
+ { 0x65, 0x35 }, /* ARABIC-INDIC DIGIT FIVE -> '5' */
+ { 0x66, 0x36 }, /* ARABIC-INDIC DIGIT SIX -> '6' */
+ { 0x67, 0x37 }, /* ARABIC-INDIC DIGIT SEVEN -> '7' */
+ { 0x68, 0x38 }, /* ARABIC-INDIC DIGIT EIGHT -> '8' */
+ { 0x69, 0x39 }, /* ARABIC-INDIC DIGIT NINE -> '9' */
+ { 0x6A, 0x25 }, /* ARABIC PERCENT SIGN -> '%' */
+ { 0x6B, 0x2E }, /* ARABIC DECIMAL SEPARATOR -> '.' */
+ { 0x6C, 0x2C }, /* ARABIC THOUSANDS SEPARATOR -> ',' */
+ { 0x6D, 0x2A }, /* ARABIC FIVE POINTED STAR -> '*' */
+ { 0x71, 0x00 }, /* ARABIC LETTER ALEF WASLA -> ... */
+ { 0x73, 0x27 }, /* ARABIC LETTER ALEF WITH WAVY HAMZA BELOW -> ''' */
+ { 0x75, 0x27 }, /* ARABIC LETTER HIGH HAMZA ALEF -> ''' */
+ { 0x7B, 0x62 }, /* ARABIC LETTER BEEH -> 'b' */
+ { 0x7C, 0x74 }, /* ARABIC LETTER TEH WITH RING -> 't' */
+ { 0x7D, 0x54 }, /* ARABIC LETTER TEH WITH THREE DOTS ABOVE DOWNWARDS -> 'T' */
+ { 0x7E, 0x70 }, /* ARABIC LETTER PEH -> 'p' */
+ { 0x82, 0x48 }, /* ARABIC LETTER HAH WITH TWO DOTS VERTICAL ABOVE -> 'H' */
+ { 0x85, 0x48 }, /* ARABIC LETTER HAH WITH THREE DOTS ABOVE -> 'H' */
+ { 0x89, 0x44 }, /* ARABIC LETTER DAL WITH RING -> 'D' */
+ { 0x8A, 0x44 }, /* ARABIC LETTER DAL WITH DOT BELOW -> 'D' */
+ { 0x8E, 0x64 }, /* ARABIC LETTER DUL -> 'd' */
+ { 0x8F, 0x44 }, /* ARABIC LETTER DAL WITH THREE DOTS ABOVE DOWNWARDS -> 'D' */
+ { 0x90, 0x44 }, /* ARABIC LETTER DAL WITH FOUR DOTS ABOVE -> 'D' */
+ { 0x92, 0x00 }, /* ARABIC LETTER REH WITH SMALL V -> ... */
+ { 0x97, 0x52 }, /* ARABIC LETTER REH WITH TWO DOTS ABOVE -> 'R' */
+ { 0x98, 0x6A }, /* ARABIC LETTER JEH -> 'j' */
+ { 0x99, 0x52 }, /* ARABIC LETTER REH WITH FOUR DOTS ABOVE -> 'R' */
+ { 0x9A, 0x00 }, /* ARABIC LETTER SEEN WITH DOT BELOW AND DOT ABOVE -> ... */
+ { 0x9E, 0x53 }, /* ARABIC LETTER SAD WITH THREE DOTS ABOVE -> 'S' */
+ { 0x9F, 0x54 }, /* ARABIC LETTER TAH WITH THREE DOTS ABOVE -> 'T' */
+ { 0xA1, 0x00 }, /* ARABIC LETTER DOTLESS FEH -> ... */
+ { 0xA3, 0x46 }, /* ARABIC LETTER FEH WITH DOT BELOW -> 'F' */
+ { 0xA4, 0x76 }, /* ARABIC LETTER VEH -> 'v' */
+ { 0xA5, 0x66 }, /* ARABIC LETTER FEH WITH THREE DOTS BELOW -> 'f' */
+ { 0xA7, 0x51 }, /* ARABIC LETTER QAF WITH DOT ABOVE -> 'Q' */
+ { 0xA8, 0x51 }, /* ARABIC LETTER QAF WITH THREE DOTS ABOVE -> 'Q' */
+ { 0xAA, 0x6B }, /* ARABIC LETTER SWASH KAF -> 'k' */
+ { 0xAB, 0x4B }, /* ARABIC LETTER KAF WITH RING -> 'K' */
+ { 0xAC, 0x4B }, /* ARABIC LETTER KAF WITH DOT ABOVE -> 'K' */
+ { 0xAE, 0x4B }, /* ARABIC LETTER KAF WITH THREE DOTS BELOW -> 'K' */
+ { 0xAF, 0x67 }, /* ARABIC LETTER GAF -> 'g' */
+ { 0xB0, 0x47 }, /* ARABIC LETTER GAF WITH RING -> 'G' */
+ { 0xB1, 0x4E }, /* ARABIC LETTER NGOEH -> 'N' */
+ { 0xB2, 0x00 }, /* ARABIC LETTER GAF WITH TWO DOTS BELOW -> ... */
+ { 0xB4, 0x47 }, /* ARABIC LETTER GAF WITH THREE DOTS ABOVE -> 'G' */
+ { 0xB5, 0x00 }, /* ARABIC LETTER LAM WITH SMALL V -> ... */
+ { 0xB8, 0x4C }, /* ARABIC LETTER LAM WITH THREE DOTS BELOW -> 'L' */
+ { 0xB9, 0x00 }, /* ARABIC LETTER NOON WITH DOT BELOW -> ... */
+ { 0xBD, 0x4E }, /* ARABIC LETTER NOON WITH THREE DOTS ABOVE -> 'N' */
+ { 0xBE, 0x68 }, /* ARABIC LETTER HEH DOACHASHMEE -> 'h' */
+ { 0xC1, 0x68 }, /* ARABIC LETTER HEH GOAL -> 'h' */
+ { 0xC2, 0x48 }, /* ARABIC LETTER HEH GOAL WITH HAMZA ABOVE -> 'H' */
+ { 0xC3, 0x40 }, /* ARABIC LETTER TEH MARBUTA GOAL -> '@' */
+ { 0xC4, 0x57 }, /* ARABIC LETTER WAW WITH RING -> 'W' */
+ { 0xC7, 0x75 }, /* ARABIC LETTER U -> 'u' */
+ { 0xCA, 0x57 }, /* ARABIC LETTER WAW WITH TWO DOTS ABOVE -> 'W' */
+ { 0xCB, 0x76 }, /* ARABIC LETTER VE -> 'v' */
+ { 0xCC, 0x79 }, /* ARABIC LETTER FARSI YEH -> 'y' */
+ { 0xCD, 0x59 }, /* ARABIC LETTER YEH WITH TAIL -> 'Y' */
+ { 0xCE, 0x59 }, /* ARABIC LETTER YEH WITH SMALL V -> 'Y' */
+ { 0xCF, 0x57 }, /* ARABIC LETTER WAW WITH DOT ABOVE -> 'W' */
+ { 0xD2, 0x79 }, /* ARABIC LETTER YEH BARREE -> 'y' */
+ { 0xD4, 0x2E }, /* ARABIC FULL STOP -> '.' */
+ { 0xDD, 0x40 }, /* ARABIC END OF AYAH -> '@' */
+ { 0xDE, 0x23 }, /* ARABIC START OF RUB EL HIZB -> '#' */
+ { 0xE9, 0x5E }, /* ARABIC PLACE OF SAJDAH -> '^' */
+ { 0xF0, 0x30 }, /* EXTENDED ARABIC-INDIC DIGIT ZERO -> '0' */
+ { 0xF1, 0x31 }, /* EXTENDED ARABIC-INDIC DIGIT ONE -> '1' */
+ { 0xF2, 0x32 }, /* EXTENDED ARABIC-INDIC DIGIT TWO -> '2' */
+ { 0xF3, 0x33 }, /* EXTENDED ARABIC-INDIC DIGIT THREE -> '3' */
+ { 0xF4, 0x34 }, /* EXTENDED ARABIC-INDIC DIGIT FOUR -> '4' */
+ { 0xF5, 0x35 }, /* EXTENDED ARABIC-INDIC DIGIT FIVE -> '5' */
+ { 0xF6, 0x36 }, /* EXTENDED ARABIC-INDIC DIGIT SIX -> '6' */
+ { 0xF7, 0x37 }, /* EXTENDED ARABIC-INDIC DIGIT SEVEN -> '7' */
+ { 0xF8, 0x38 }, /* EXTENDED ARABIC-INDIC DIGIT EIGHT -> '8' */
+ { 0xF9, 0x39 }, /* EXTENDED ARABIC-INDIC DIGIT NINE -> '9' */
+ { 0xFB, 0x44 }, /* ARABIC LETTER DAD WITH DOT BELOW -> 'D' */
+ { 0xFD, 0x26 }, /* ARABIC SIGN SINDHI AMPERSAND -> '&' */
+ /* Entries for page 0x07 */
+ { 0x01, 0x2F }, /* SYRIAC SUPRALINEAR FULL STOP -> '/' */
+ { 0x02, 0x2C }, /* SYRIAC SUBLINEAR FULL STOP -> ',' */
+ { 0x03, 0x21 }, /* SYRIAC SUPRALINEAR COLON -> '!' */
+ { 0x04, 0x21 }, /* SYRIAC SUBLINEAR COLON -> '!' */
+ { 0x05, 0x2D }, /* SYRIAC HORIZONTAL COLON -> '-' */
+ { 0x06, 0x2C }, /* SYRIAC COLON SKEWED LEFT -> ',' */
+ { 0x07, 0x2C }, /* SYRIAC COLON SKEWED RIGHT -> ',' */
+ { 0x08, 0x3B }, /* SYRIAC SUPRALINEAR COLON SKEWED LEFT -> ';' */
+ { 0x09, 0x3F }, /* SYRIAC SUBLINEAR COLON SKEWED RIGHT -> '?' */
+ { 0x0A, 0x7E }, /* SYRIAC CONTRACTION -> '~' */
+ { 0x0B, 0x7B }, /* SYRIAC HARKLEAN OBELUS -> '{' */
+ { 0x0C, 0x7D }, /* SYRIAC HARKLEAN METOBELUS -> '}' */
+ { 0x0D, 0x2A }, /* SYRIAC HARKLEAN ASTERISCUS -> '*' */
+ { 0x10, 0x27 }, /* SYRIAC LETTER ALAPH -> ''' */
+ { 0x12, 0x62 }, /* SYRIAC LETTER BETH -> 'b' */
+ { 0x13, 0x67 }, /* SYRIAC LETTER GAMAL -> 'g' */
+ { 0x14, 0x67 }, /* SYRIAC LETTER GAMAL GARSHUNI -> 'g' */
+ { 0x15, 0x64 }, /* SYRIAC LETTER DALATH -> 'd' */
+ { 0x16, 0x64 }, /* SYRIAC LETTER DOTLESS DALATH RISH -> 'd' */
+ { 0x17, 0x68 }, /* SYRIAC LETTER HE -> 'h' */
+ { 0x18, 0x77 }, /* SYRIAC LETTER WAW -> 'w' */
+ { 0x19, 0x7A }, /* SYRIAC LETTER ZAIN -> 'z' */
+ { 0x1A, 0x48 }, /* SYRIAC LETTER HETH -> 'H' */
+ { 0x1B, 0x74 }, /* SYRIAC LETTER TETH -> 't' */
+ { 0x1C, 0x74 }, /* SYRIAC LETTER TETH GARSHUNI -> 't' */
+ { 0x1D, 0x79 }, /* SYRIAC LETTER YUDH -> 'y' */
+ { 0x1F, 0x6B }, /* SYRIAC LETTER KAPH -> 'k' */
+ { 0x20, 0x6C }, /* SYRIAC LETTER LAMADH -> 'l' */
+ { 0x21, 0x6D }, /* SYRIAC LETTER MIM -> 'm' */
+ { 0x22, 0x6E }, /* SYRIAC LETTER NUN -> 'n' */
+ { 0x23, 0x73 }, /* SYRIAC LETTER SEMKATH -> 's' */
+ { 0x24, 0x73 }, /* SYRIAC LETTER FINAL SEMKATH -> 's' */
+ { 0x25, 0x60 }, /* SYRIAC LETTER E -> '`' */
+ { 0x26, 0x70 }, /* SYRIAC LETTER PE -> 'p' */
+ { 0x27, 0x70 }, /* SYRIAC LETTER REVERSED PE -> 'p' */
+ { 0x28, 0x53 }, /* SYRIAC LETTER SADHE -> 'S' */
+ { 0x29, 0x71 }, /* SYRIAC LETTER QAPH -> 'q' */
+ { 0x2A, 0x72 }, /* SYRIAC LETTER RISH -> 'r' */
+ { 0x2C, 0x74 }, /* SYRIAC LETTER TAW -> 't' */
+ { 0x30, 0x00 }, /* SYRIAC PTHAHA ABOVE -> ... */
+ { 0x32, 0x61 }, /* SYRIAC PTHAHA DOTTED -> 'a' */
+ { 0x33, 0x00 }, /* SYRIAC ZQAPHA ABOVE -> ... */
+ { 0x35, 0x41 }, /* SYRIAC ZQAPHA DOTTED -> 'A' */
+ { 0x36, 0x00 }, /* SYRIAC RBASA ABOVE -> ... */
+ { 0x38, 0x65 }, /* SYRIAC DOTTED ZLAMA HORIZONTAL -> 'e' */
+ { 0x39, 0x45 }, /* SYRIAC DOTTED ZLAMA ANGULAR -> 'E' */
+ { 0x3A, 0x69 }, /* SYRIAC HBASA ABOVE -> 'i' */
+ { 0x3B, 0x69 }, /* SYRIAC HBASA BELOW -> 'i' */
+ { 0x3C, 0x00 }, /* SYRIAC HBASA-ESASA DOTTED -> ... */
+ { 0x3E, 0x75 }, /* SYRIAC ESASA BELOW -> 'u' */
+ { 0x3F, 0x6F }, /* SYRIAC RWAHA -> 'o' */
+ { 0x41, 0x60 }, /* SYRIAC QUSHSHAYA -> '`' */
+ { 0x42, 0x27 }, /* SYRIAC RUKKAKHA -> ''' */
+ { 0x45, 0x58 }, /* SYRIAC THREE DOTS ABOVE -> 'X' */
+ { 0x46, 0x51 }, /* SYRIAC THREE DOTS BELOW -> 'Q' */
+ { 0x47, 0x40 }, /* SYRIAC OBLIQUE LINE ABOVE -> '@' */
+ { 0x48, 0x40 }, /* SYRIAC OBLIQUE LINE BELOW -> '@' */
+ { 0x49, 0x7C }, /* SYRIAC MUSIC -> '|' */
+ { 0x4A, 0x2B }, /* SYRIAC BARREKH -> '+' */
+ { 0x80, 0x68 }, /* THAANA LETTER HAA -> 'h' */
+ { 0x82, 0x6E }, /* THAANA LETTER NOONU -> 'n' */
+ { 0x83, 0x72 }, /* THAANA LETTER RAA -> 'r' */
+ { 0x84, 0x62 }, /* THAANA LETTER BAA -> 'b' */
+ { 0x85, 0x4C }, /* THAANA LETTER LHAVIYANI -> 'L' */
+ { 0x86, 0x6B }, /* THAANA LETTER KAAFU -> 'k' */
+ { 0x87, 0x27 }, /* THAANA LETTER ALIFU -> ''' */
+ { 0x88, 0x76 }, /* THAANA LETTER VAAVU -> 'v' */
+ { 0x89, 0x6D }, /* THAANA LETTER MEEMU -> 'm' */
+ { 0x8A, 0x66 }, /* THAANA LETTER FAAFU -> 'f' */
+ { 0x8D, 0x6C }, /* THAANA LETTER LAAMU -> 'l' */
+ { 0x8E, 0x67 }, /* THAANA LETTER GAAFU -> 'g' */
+ { 0x90, 0x73 }, /* THAANA LETTER SEENU -> 's' */
+ { 0x91, 0x64 }, /* THAANA LETTER DAVIYANI -> 'd' */
+ { 0x92, 0x7A }, /* THAANA LETTER ZAVIYANI -> 'z' */
+ { 0x93, 0x74 }, /* THAANA LETTER TAVIYANI -> 't' */
+ { 0x94, 0x79 }, /* THAANA LETTER YAA -> 'y' */
+ { 0x95, 0x70 }, /* THAANA LETTER PAVIYANI -> 'p' */
+ { 0x96, 0x6A }, /* THAANA LETTER JAVIYANI -> 'j' */
+ { 0x9C, 0x7A }, /* THAANA LETTER ZAA -> 'z' */
+ { 0x9E, 0x73 }, /* THAANA LETTER SAADHU -> 's' */
+ { 0x9F, 0x64 }, /* THAANA LETTER DAADHU -> 'd' */
+ { 0xA0, 0x74 }, /* THAANA LETTER TO -> 't' */
+ { 0xA1, 0x7A }, /* THAANA LETTER ZO -> 'z' */
+ { 0xA2, 0x60 }, /* THAANA LETTER AINU -> '`' */
+ { 0xA4, 0x71 }, /* THAANA LETTER QAAFU -> 'q' */
+ { 0xA5, 0x77 }, /* THAANA LETTER WAAVU -> 'w' */
+ { 0xA6, 0x61 }, /* THAANA ABAFILI -> 'a' */
+ { 0xA8, 0x69 }, /* THAANA IBIFILI -> 'i' */
+ { 0xAA, 0x75 }, /* THAANA UBUFILI -> 'u' */
+ { 0xAC, 0x65 }, /* THAANA EBEFILI -> 'e' */
+ { 0xAE, 0x6F }, /* THAANA OBOFILI -> 'o' */
+ /* Entries for page 0x09 */
+ { 0x01, 0x4E }, /* DEVANAGARI SIGN CANDRABINDU -> 'N' */
+ { 0x02, 0x4E }, /* DEVANAGARI SIGN ANUSVARA -> 'N' */
+ { 0x03, 0x48 }, /* DEVANAGARI SIGN VISARGA -> 'H' */
+ { 0x05, 0x61 }, /* DEVANAGARI LETTER A -> 'a' */
+ { 0x07, 0x69 }, /* DEVANAGARI LETTER I -> 'i' */
+ { 0x09, 0x75 }, /* DEVANAGARI LETTER U -> 'u' */
+ { 0x0B, 0x52 }, /* DEVANAGARI LETTER VOCALIC R -> 'R' */
+ { 0x0C, 0x4C }, /* DEVANAGARI LETTER VOCALIC L -> 'L' */
+ { 0x0E, 0x65 }, /* DEVANAGARI LETTER SHORT E -> 'e' */
+ { 0x0F, 0x65 }, /* DEVANAGARI LETTER E -> 'e' */
+ { 0x12, 0x6F }, /* DEVANAGARI LETTER SHORT O -> 'o' */
+ { 0x13, 0x6F }, /* DEVANAGARI LETTER O -> 'o' */
+ { 0x15, 0x6B }, /* DEVANAGARI LETTER KA -> 'k' */
+ { 0x17, 0x67 }, /* DEVANAGARI LETTER GA -> 'g' */
+ { 0x1A, 0x63 }, /* DEVANAGARI LETTER CA -> 'c' */
+ { 0x1C, 0x6A }, /* DEVANAGARI LETTER JA -> 'j' */
+ { 0x24, 0x74 }, /* DEVANAGARI LETTER TA -> 't' */
+ { 0x26, 0x64 }, /* DEVANAGARI LETTER DA -> 'd' */
+ { 0x28, 0x6E }, /* DEVANAGARI LETTER NA -> 'n' */
+ { 0x2A, 0x70 }, /* DEVANAGARI LETTER PA -> 'p' */
+ { 0x2C, 0x62 }, /* DEVANAGARI LETTER BA -> 'b' */
+ { 0x2E, 0x6D }, /* DEVANAGARI LETTER MA -> 'm' */
+ { 0x2F, 0x79 }, /* DEVANAGARI LETTER YA -> 'y' */
+ { 0x30, 0x72 }, /* DEVANAGARI LETTER RA -> 'r' */
+ { 0x32, 0x6C }, /* DEVANAGARI LETTER LA -> 'l' */
+ { 0x33, 0x6C }, /* DEVANAGARI LETTER LLA -> 'l' */
+ { 0x35, 0x76 }, /* DEVANAGARI LETTER VA -> 'v' */
+ { 0x38, 0x73 }, /* DEVANAGARI LETTER SA -> 's' */
+ { 0x39, 0x68 }, /* DEVANAGARI LETTER HA -> 'h' */
+ { 0x3C, 0x27 }, /* DEVANAGARI SIGN NUKTA -> ''' */
+ { 0x3D, 0x27 }, /* DEVANAGARI SIGN AVAGRAHA -> ''' */
+ { 0x3F, 0x69 }, /* DEVANAGARI VOWEL SIGN I -> 'i' */
+ { 0x41, 0x75 }, /* DEVANAGARI VOWEL SIGN U -> 'u' */
+ { 0x43, 0x52 }, /* DEVANAGARI VOWEL SIGN VOCALIC R -> 'R' */
+ { 0x46, 0x65 }, /* DEVANAGARI VOWEL SIGN SHORT E -> 'e' */
+ { 0x47, 0x65 }, /* DEVANAGARI VOWEL SIGN E -> 'e' */
+ { 0x4A, 0x6F }, /* DEVANAGARI VOWEL SIGN SHORT O -> 'o' */
+ { 0x4B, 0x6F }, /* DEVANAGARI VOWEL SIGN O -> 'o' */
+ { 0x51, 0x27 }, /* DEVANAGARI STRESS SIGN UDATTA -> ''' */
+ { 0x52, 0x27 }, /* DEVANAGARI STRESS SIGN ANUDATTA -> ''' */
+ { 0x53, 0x60 }, /* DEVANAGARI GRAVE ACCENT -> '`' */
+ { 0x54, 0x27 }, /* DEVANAGARI ACUTE ACCENT -> ''' */
+ { 0x58, 0x71 }, /* DEVANAGARI LETTER QA -> 'q' */
+ { 0x5B, 0x7A }, /* DEVANAGARI LETTER ZA -> 'z' */
+ { 0x5E, 0x66 }, /* DEVANAGARI LETTER FA -> 'f' */
+ { 0x62, 0x4C }, /* DEVANAGARI VOWEL SIGN VOCALIC L -> 'L' */
+ { 0x66, 0x30 }, /* DEVANAGARI DIGIT ZERO -> '0' */
+ { 0x67, 0x31 }, /* DEVANAGARI DIGIT ONE -> '1' */
+ { 0x68, 0x32 }, /* DEVANAGARI DIGIT TWO -> '2' */
+ { 0x69, 0x33 }, /* DEVANAGARI DIGIT THREE -> '3' */
+ { 0x6A, 0x34 }, /* DEVANAGARI DIGIT FOUR -> '4' */
+ { 0x6B, 0x35 }, /* DEVANAGARI DIGIT FIVE -> '5' */
+ { 0x6C, 0x36 }, /* DEVANAGARI DIGIT SIX -> '6' */
+ { 0x6D, 0x37 }, /* DEVANAGARI DIGIT SEVEN -> '7' */
+ { 0x6E, 0x38 }, /* DEVANAGARI DIGIT EIGHT -> '8' */
+ { 0x6F, 0x39 }, /* DEVANAGARI DIGIT NINE -> '9' */
+ { 0x70, 0x2E }, /* DEVANAGARI ABBREVIATION SIGN -> '.' */
+ { 0x81, 0x4E }, /* BENGALI SIGN CANDRABINDU -> 'N' */
+ { 0x82, 0x4E }, /* BENGALI SIGN ANUSVARA -> 'N' */
+ { 0x83, 0x48 }, /* BENGALI SIGN VISARGA -> 'H' */
+ { 0x85, 0x61 }, /* BENGALI LETTER A -> 'a' */
+ { 0x87, 0x69 }, /* BENGALI LETTER I -> 'i' */
+ { 0x89, 0x75 }, /* BENGALI LETTER U -> 'u' */
+ { 0x8B, 0x52 }, /* BENGALI LETTER VOCALIC R -> 'R' */
+ { 0x8F, 0x65 }, /* BENGALI LETTER E -> 'e' */
+ { 0x93, 0x6F }, /* BENGALI LETTER O -> 'o' */
+ { 0x95, 0x6B }, /* BENGALI LETTER KA -> 'k' */
+ { 0x97, 0x67 }, /* BENGALI LETTER GA -> 'g' */
+ { 0x9A, 0x63 }, /* BENGALI LETTER CA -> 'c' */
+ { 0x9C, 0x6A }, /* BENGALI LETTER JA -> 'j' */
+ { 0xA4, 0x74 }, /* BENGALI LETTER TA -> 't' */
+ { 0xA6, 0x64 }, /* BENGALI LETTER DA -> 'd' */
+ { 0xA8, 0x6E }, /* BENGALI LETTER NA -> 'n' */
+ { 0xAA, 0x70 }, /* BENGALI LETTER PA -> 'p' */
+ { 0xAC, 0x62 }, /* BENGALI LETTER BA -> 'b' */
+ { 0xAE, 0x6D }, /* BENGALI LETTER MA -> 'm' */
+ { 0xAF, 0x79 }, /* BENGALI LETTER YA -> 'y' */
+ { 0xB0, 0x72 }, /* BENGALI LETTER RA -> 'r' */
+ { 0xB2, 0x6C }, /* BENGALI LETTER LA -> 'l' */
+ { 0xB8, 0x73 }, /* BENGALI LETTER SA -> 's' */
+ { 0xB9, 0x68 }, /* BENGALI LETTER HA -> 'h' */
+ { 0xBC, 0x27 }, /* BENGALI SIGN NUKTA -> ''' */
+ { 0xBF, 0x69 }, /* BENGALI VOWEL SIGN I -> 'i' */
+ { 0xC1, 0x75 }, /* BENGALI VOWEL SIGN U -> 'u' */
+ { 0xC3, 0x52 }, /* BENGALI VOWEL SIGN VOCALIC R -> 'R' */
+ { 0xC7, 0x65 }, /* BENGALI VOWEL SIGN E -> 'e' */
+ { 0xCB, 0x6F }, /* BENGALI VOWEL SIGN O -> 'o' */
+ { 0xD7, 0x2B }, /* BENGALI AU LENGTH MARK -> '+' */
+ { 0xE2, 0x4C }, /* BENGALI VOWEL SIGN VOCALIC L -> 'L' */
+ { 0xE6, 0x30 }, /* BENGALI DIGIT ZERO -> '0' */
+ { 0xE7, 0x31 }, /* BENGALI DIGIT ONE -> '1' */
+ { 0xE8, 0x32 }, /* BENGALI DIGIT TWO -> '2' */
+ { 0xE9, 0x33 }, /* BENGALI DIGIT THREE -> '3' */
+ { 0xEA, 0x34 }, /* BENGALI DIGIT FOUR -> '4' */
+ { 0xEB, 0x35 }, /* BENGALI DIGIT FIVE -> '5' */
+ { 0xEC, 0x36 }, /* BENGALI DIGIT SIX -> '6' */
+ { 0xED, 0x37 }, /* BENGALI DIGIT SEVEN -> '7' */
+ { 0xEE, 0x38 }, /* BENGALI DIGIT EIGHT -> '8' */
+ { 0xEF, 0x39 }, /* BENGALI DIGIT NINE -> '9' */
+ /* Entries for page 0x0A */
+ { 0x02, 0x4E }, /* GURMUKHI SIGN BINDI -> 'N' */
+ { 0x05, 0x61 }, /* GURMUKHI LETTER A -> 'a' */
+ { 0x07, 0x69 }, /* GURMUKHI LETTER I -> 'i' */
+ { 0x09, 0x75 }, /* GURMUKHI LETTER U -> 'u' */
+ { 0x15, 0x6B }, /* GURMUKHI LETTER KA -> 'k' */
+ { 0x17, 0x67 }, /* GURMUKHI LETTER GA -> 'g' */
+ { 0x1A, 0x63 }, /* GURMUKHI LETTER CA -> 'c' */
+ { 0x1C, 0x6A }, /* GURMUKHI LETTER JA -> 'j' */
+ { 0x24, 0x74 }, /* GURMUKHI LETTER TA -> 't' */
+ { 0x26, 0x64 }, /* GURMUKHI LETTER DA -> 'd' */
+ { 0x28, 0x6E }, /* GURMUKHI LETTER NA -> 'n' */
+ { 0x2A, 0x70 }, /* GURMUKHI LETTER PA -> 'p' */
+ { 0x2C, 0x62 }, /* GURMUKHI LETTER BA -> 'b' */
+ { 0x2E, 0x6D }, /* GURMUKHI LETTER MA -> 'm' */
+ { 0x2F, 0x79 }, /* GURMUKHI LETTER YA -> 'y' */
+ { 0x30, 0x72 }, /* GURMUKHI LETTER RA -> 'r' */
+ { 0x32, 0x6C }, /* GURMUKHI LETTER LA -> 'l' */
+ { 0x35, 0x76 }, /* GURMUKHI LETTER VA -> 'v' */
+ { 0x38, 0x73 }, /* GURMUKHI LETTER SA -> 's' */
+ { 0x39, 0x68 }, /* GURMUKHI LETTER HA -> 'h' */
+ { 0x3C, 0x27 }, /* GURMUKHI SIGN NUKTA -> ''' */
+ { 0x3F, 0x69 }, /* GURMUKHI VOWEL SIGN I -> 'i' */
+ { 0x41, 0x75 }, /* GURMUKHI VOWEL SIGN U -> 'u' */
+ { 0x5B, 0x7A }, /* GURMUKHI LETTER ZA -> 'z' */
+ { 0x5E, 0x66 }, /* GURMUKHI LETTER FA -> 'f' */
+ { 0x66, 0x30 }, /* GURMUKHI DIGIT ZERO -> '0' */
+ { 0x67, 0x31 }, /* GURMUKHI DIGIT ONE -> '1' */
+ { 0x68, 0x32 }, /* GURMUKHI DIGIT TWO -> '2' */
+ { 0x69, 0x33 }, /* GURMUKHI DIGIT THREE -> '3' */
+ { 0x6A, 0x34 }, /* GURMUKHI DIGIT FOUR -> '4' */
+ { 0x6B, 0x35 }, /* GURMUKHI DIGIT FIVE -> '5' */
+ { 0x6C, 0x36 }, /* GURMUKHI DIGIT SIX -> '6' */
+ { 0x6D, 0x37 }, /* GURMUKHI DIGIT SEVEN -> '7' */
+ { 0x6E, 0x38 }, /* GURMUKHI DIGIT EIGHT -> '8' */
+ { 0x6F, 0x39 }, /* GURMUKHI DIGIT NINE -> '9' */
+ { 0x70, 0x4E }, /* GURMUKHI TIPPI -> 'N' */
+ { 0x71, 0x48 }, /* GURMUKHI ADDAK -> 'H' */
+ { 0x81, 0x4E }, /* GUJARATI SIGN CANDRABINDU -> 'N' */
+ { 0x82, 0x4E }, /* GUJARATI SIGN ANUSVARA -> 'N' */
+ { 0x83, 0x48 }, /* GUJARATI SIGN VISARGA -> 'H' */
+ { 0x85, 0x61 }, /* GUJARATI LETTER A -> 'a' */
+ { 0x87, 0x69 }, /* GUJARATI LETTER I -> 'i' */
+ { 0x89, 0x75 }, /* GUJARATI LETTER U -> 'u' */
+ { 0x8B, 0x52 }, /* GUJARATI LETTER VOCALIC R -> 'R' */
+ { 0x8F, 0x65 }, /* GUJARATI LETTER E -> 'e' */
+ { 0x93, 0x6F }, /* GUJARATI LETTER O -> 'o' */
+ { 0x95, 0x6B }, /* GUJARATI LETTER KA -> 'k' */
+ { 0x97, 0x67 }, /* GUJARATI LETTER GA -> 'g' */
+ { 0x9A, 0x63 }, /* GUJARATI LETTER CA -> 'c' */
+ { 0x9C, 0x6A }, /* GUJARATI LETTER JA -> 'j' */
+ { 0xA4, 0x74 }, /* GUJARATI LETTER TA -> 't' */
+ { 0xA6, 0x64 }, /* GUJARATI LETTER DA -> 'd' */
+ { 0xA8, 0x6E }, /* GUJARATI LETTER NA -> 'n' */
+ { 0xAA, 0x70 }, /* GUJARATI LETTER PA -> 'p' */
+ { 0xAC, 0x62 }, /* GUJARATI LETTER BA -> 'b' */
+ { 0xAE, 0x6D }, /* GUJARATI LETTER MA -> 'm' */
+ { 0xB0, 0x72 }, /* GUJARATI LETTER RA -> 'r' */
+ { 0xB2, 0x6C }, /* GUJARATI LETTER LA -> 'l' */
+ { 0xB5, 0x76 }, /* GUJARATI LETTER VA -> 'v' */
+ { 0xB8, 0x73 }, /* GUJARATI LETTER SA -> 's' */
+ { 0xB9, 0x68 }, /* GUJARATI LETTER HA -> 'h' */
+ { 0xBC, 0x27 }, /* GUJARATI SIGN NUKTA -> ''' */
+ { 0xBD, 0x27 }, /* GUJARATI SIGN AVAGRAHA -> ''' */
+ { 0xBF, 0x69 }, /* GUJARATI VOWEL SIGN I -> 'i' */
+ { 0xC1, 0x75 }, /* GUJARATI VOWEL SIGN U -> 'u' */
+ { 0xC3, 0x52 }, /* GUJARATI VOWEL SIGN VOCALIC R -> 'R' */
+ { 0xC7, 0x65 }, /* GUJARATI VOWEL SIGN E -> 'e' */
+ { 0xCB, 0x6F }, /* GUJARATI VOWEL SIGN O -> 'o' */
+ { 0xE6, 0x30 }, /* GUJARATI DIGIT ZERO -> '0' */
+ { 0xE7, 0x31 }, /* GUJARATI DIGIT ONE -> '1' */
+ { 0xE8, 0x32 }, /* GUJARATI DIGIT TWO -> '2' */
+ { 0xE9, 0x33 }, /* GUJARATI DIGIT THREE -> '3' */
+ { 0xEA, 0x34 }, /* GUJARATI DIGIT FOUR -> '4' */
+ { 0xEB, 0x35 }, /* GUJARATI DIGIT FIVE -> '5' */
+ { 0xEC, 0x36 }, /* GUJARATI DIGIT SIX -> '6' */
+ { 0xED, 0x37 }, /* GUJARATI DIGIT SEVEN -> '7' */
+ { 0xEE, 0x38 }, /* GUJARATI DIGIT EIGHT -> '8' */
+ { 0xEF, 0x39 }, /* GUJARATI DIGIT NINE -> '9' */
+ /* Entries for page 0x0B */
+ { 0x01, 0x4E }, /* ORIYA SIGN CANDRABINDU -> 'N' */
+ { 0x02, 0x4E }, /* ORIYA SIGN ANUSVARA -> 'N' */
+ { 0x03, 0x48 }, /* ORIYA SIGN VISARGA -> 'H' */
+ { 0x05, 0x61 }, /* ORIYA LETTER A -> 'a' */
+ { 0x07, 0x69 }, /* ORIYA LETTER I -> 'i' */
+ { 0x09, 0x75 }, /* ORIYA LETTER U -> 'u' */
+ { 0x0B, 0x52 }, /* ORIYA LETTER VOCALIC R -> 'R' */
+ { 0x0C, 0x4C }, /* ORIYA LETTER VOCALIC L -> 'L' */
+ { 0x0F, 0x65 }, /* ORIYA LETTER E -> 'e' */
+ { 0x13, 0x6F }, /* ORIYA LETTER O -> 'o' */
+ { 0x15, 0x6B }, /* ORIYA LETTER KA -> 'k' */
+ { 0x17, 0x67 }, /* ORIYA LETTER GA -> 'g' */
+ { 0x1A, 0x63 }, /* ORIYA LETTER CA -> 'c' */
+ { 0x1C, 0x6A }, /* ORIYA LETTER JA -> 'j' */
+ { 0x24, 0x74 }, /* ORIYA LETTER TA -> 't' */
+ { 0x26, 0x64 }, /* ORIYA LETTER DA -> 'd' */
+ { 0x28, 0x6E }, /* ORIYA LETTER NA -> 'n' */
+ { 0x2A, 0x70 }, /* ORIYA LETTER PA -> 'p' */
+ { 0x2C, 0x62 }, /* ORIYA LETTER BA -> 'b' */
+ { 0x2E, 0x6D }, /* ORIYA LETTER MA -> 'm' */
+ { 0x2F, 0x79 }, /* ORIYA LETTER YA -> 'y' */
+ { 0x30, 0x72 }, /* ORIYA LETTER RA -> 'r' */
+ { 0x32, 0x6C }, /* ORIYA LETTER LA -> 'l' */
+ { 0x38, 0x73 }, /* ORIYA LETTER SA -> 's' */
+ { 0x39, 0x68 }, /* ORIYA LETTER HA -> 'h' */
+ { 0x3C, 0x27 }, /* ORIYA SIGN NUKTA -> ''' */
+ { 0x3D, 0x27 }, /* ORIYA SIGN AVAGRAHA -> ''' */
+ { 0x3F, 0x69 }, /* ORIYA VOWEL SIGN I -> 'i' */
+ { 0x41, 0x75 }, /* ORIYA VOWEL SIGN U -> 'u' */
+ { 0x43, 0x52 }, /* ORIYA VOWEL SIGN VOCALIC R -> 'R' */
+ { 0x47, 0x65 }, /* ORIYA VOWEL SIGN E -> 'e' */
+ { 0x4B, 0x6F }, /* ORIYA VOWEL SIGN O -> 'o' */
+ { 0x56, 0x2B }, /* ORIYA AI LENGTH MARK -> '+' */
+ { 0x57, 0x2B }, /* ORIYA AU LENGTH MARK -> '+' */
+ { 0x66, 0x30 }, /* ORIYA DIGIT ZERO -> '0' */
+ { 0x67, 0x31 }, /* ORIYA DIGIT ONE -> '1' */
+ { 0x68, 0x32 }, /* ORIYA DIGIT TWO -> '2' */
+ { 0x69, 0x33 }, /* ORIYA DIGIT THREE -> '3' */
+ { 0x6A, 0x34 }, /* ORIYA DIGIT FOUR -> '4' */
+ { 0x6B, 0x35 }, /* ORIYA DIGIT FIVE -> '5' */
+ { 0x6C, 0x36 }, /* ORIYA DIGIT SIX -> '6' */
+ { 0x6D, 0x37 }, /* ORIYA DIGIT SEVEN -> '7' */
+ { 0x6E, 0x38 }, /* ORIYA DIGIT EIGHT -> '8' */
+ { 0x6F, 0x39 }, /* ORIYA DIGIT NINE -> '9' */
+ { 0x82, 0x4E }, /* TAMIL SIGN ANUSVARA -> 'N' */
+ { 0x83, 0x48 }, /* TAMIL SIGN VISARGA -> 'H' */
+ { 0x85, 0x61 }, /* TAMIL LETTER A -> 'a' */
+ { 0x87, 0x69 }, /* TAMIL LETTER I -> 'i' */
+ { 0x89, 0x75 }, /* TAMIL LETTER U -> 'u' */
+ { 0x8E, 0x65 }, /* TAMIL LETTER E -> 'e' */
+ { 0x92, 0x6F }, /* TAMIL LETTER O -> 'o' */
+ { 0x95, 0x6B }, /* TAMIL LETTER KA -> 'k' */
+ { 0x9A, 0x63 }, /* TAMIL LETTER CA -> 'c' */
+ { 0x9C, 0x6A }, /* TAMIL LETTER JA -> 'j' */
+ { 0xA4, 0x74 }, /* TAMIL LETTER TA -> 't' */
+ { 0xA8, 0x6E }, /* TAMIL LETTER NA -> 'n' */
+ { 0xAA, 0x70 }, /* TAMIL LETTER PA -> 'p' */
+ { 0xAE, 0x6D }, /* TAMIL LETTER MA -> 'm' */
+ { 0xAF, 0x79 }, /* TAMIL LETTER YA -> 'y' */
+ { 0xB0, 0x72 }, /* TAMIL LETTER RA -> 'r' */
+ { 0xB2, 0x6C }, /* TAMIL LETTER LA -> 'l' */
+ { 0xB5, 0x76 }, /* TAMIL LETTER VA -> 'v' */
+ { 0xB8, 0x73 }, /* TAMIL LETTER SA -> 's' */
+ { 0xB9, 0x68 }, /* TAMIL LETTER HA -> 'h' */
+ { 0xBF, 0x69 }, /* TAMIL VOWEL SIGN I -> 'i' */
+ { 0xC1, 0x75 }, /* TAMIL VOWEL SIGN U -> 'u' */
+ { 0xC6, 0x65 }, /* TAMIL VOWEL SIGN E -> 'e' */
+ { 0xCA, 0x6F }, /* TAMIL VOWEL SIGN O -> 'o' */
+ { 0xD7, 0x2B }, /* TAMIL AU LENGTH MARK -> '+' */
+ { 0xE6, 0x30 }, /* TAMIL DIGIT ZERO -> '0' */
+ { 0xE7, 0x31 }, /* TAMIL DIGIT ONE -> '1' */
+ { 0xE8, 0x32 }, /* TAMIL DIGIT TWO -> '2' */
+ { 0xE9, 0x33 }, /* TAMIL DIGIT THREE -> '3' */
+ { 0xEA, 0x34 }, /* TAMIL DIGIT FOUR -> '4' */
+ { 0xEB, 0x35 }, /* TAMIL DIGIT FIVE -> '5' */
+ { 0xEC, 0x36 }, /* TAMIL DIGIT SIX -> '6' */
+ { 0xED, 0x37 }, /* TAMIL DIGIT SEVEN -> '7' */
+ { 0xEE, 0x38 }, /* TAMIL DIGIT EIGHT -> '8' */
+ { 0xEF, 0x39 }, /* TAMIL DIGIT NINE -> '9' */
+ /* Entries for page 0x0C */
+ { 0x01, 0x4E }, /* TELUGU SIGN CANDRABINDU -> 'N' */
+ { 0x02, 0x4E }, /* TELUGU SIGN ANUSVARA -> 'N' */
+ { 0x03, 0x48 }, /* TELUGU SIGN VISARGA -> 'H' */
+ { 0x05, 0x61 }, /* TELUGU LETTER A -> 'a' */
+ { 0x07, 0x69 }, /* TELUGU LETTER I -> 'i' */
+ { 0x09, 0x75 }, /* TELUGU LETTER U -> 'u' */
+ { 0x0B, 0x52 }, /* TELUGU LETTER VOCALIC R -> 'R' */
+ { 0x0C, 0x4C }, /* TELUGU LETTER VOCALIC L -> 'L' */
+ { 0x0E, 0x65 }, /* TELUGU LETTER E -> 'e' */
+ { 0x12, 0x6F }, /* TELUGU LETTER O -> 'o' */
+ { 0x15, 0x6B }, /* TELUGU LETTER KA -> 'k' */
+ { 0x17, 0x67 }, /* TELUGU LETTER GA -> 'g' */
+ { 0x1A, 0x63 }, /* TELUGU LETTER CA -> 'c' */
+ { 0x1C, 0x6A }, /* TELUGU LETTER JA -> 'j' */
+ { 0x24, 0x74 }, /* TELUGU LETTER TA -> 't' */
+ { 0x26, 0x64 }, /* TELUGU LETTER DA -> 'd' */
+ { 0x28, 0x6E }, /* TELUGU LETTER NA -> 'n' */
+ { 0x2A, 0x70 }, /* TELUGU LETTER PA -> 'p' */
+ { 0x2C, 0x62 }, /* TELUGU LETTER BA -> 'b' */
+ { 0x2E, 0x6D }, /* TELUGU LETTER MA -> 'm' */
+ { 0x2F, 0x79 }, /* TELUGU LETTER YA -> 'y' */
+ { 0x30, 0x72 }, /* TELUGU LETTER RA -> 'r' */
+ { 0x32, 0x6C }, /* TELUGU LETTER LA -> 'l' */
+ { 0x35, 0x76 }, /* TELUGU LETTER VA -> 'v' */
+ { 0x38, 0x73 }, /* TELUGU LETTER SA -> 's' */
+ { 0x39, 0x68 }, /* TELUGU LETTER HA -> 'h' */
+ { 0x3F, 0x69 }, /* TELUGU VOWEL SIGN I -> 'i' */
+ { 0x41, 0x75 }, /* TELUGU VOWEL SIGN U -> 'u' */
+ { 0x43, 0x52 }, /* TELUGU VOWEL SIGN VOCALIC R -> 'R' */
+ { 0x46, 0x65 }, /* TELUGU VOWEL SIGN E -> 'e' */
+ { 0x4A, 0x6F }, /* TELUGU VOWEL SIGN O -> 'o' */
+ { 0x55, 0x2B }, /* TELUGU LENGTH MARK -> '+' */
+ { 0x56, 0x2B }, /* TELUGU AI LENGTH MARK -> '+' */
+ { 0x66, 0x30 }, /* TELUGU DIGIT ZERO -> '0' */
+ { 0x67, 0x31 }, /* TELUGU DIGIT ONE -> '1' */
+ { 0x68, 0x32 }, /* TELUGU DIGIT TWO -> '2' */
+ { 0x69, 0x33 }, /* TELUGU DIGIT THREE -> '3' */
+ { 0x6A, 0x34 }, /* TELUGU DIGIT FOUR -> '4' */
+ { 0x6B, 0x35 }, /* TELUGU DIGIT FIVE -> '5' */
+ { 0x6C, 0x36 }, /* TELUGU DIGIT SIX -> '6' */
+ { 0x6D, 0x37 }, /* TELUGU DIGIT SEVEN -> '7' */
+ { 0x6E, 0x38 }, /* TELUGU DIGIT EIGHT -> '8' */
+ { 0x6F, 0x39 }, /* TELUGU DIGIT NINE -> '9' */
+ { 0x82, 0x4E }, /* KANNADA SIGN ANUSVARA -> 'N' */
+ { 0x83, 0x48 }, /* KANNADA SIGN VISARGA -> 'H' */
+ { 0x85, 0x61 }, /* KANNADA LETTER A -> 'a' */
+ { 0x87, 0x69 }, /* KANNADA LETTER I -> 'i' */
+ { 0x89, 0x75 }, /* KANNADA LETTER U -> 'u' */
+ { 0x8B, 0x52 }, /* KANNADA LETTER VOCALIC R -> 'R' */
+ { 0x8C, 0x4C }, /* KANNADA LETTER VOCALIC L -> 'L' */
+ { 0x8E, 0x65 }, /* KANNADA LETTER E -> 'e' */
+ { 0x92, 0x6F }, /* KANNADA LETTER O -> 'o' */
+ { 0x95, 0x6B }, /* KANNADA LETTER KA -> 'k' */
+ { 0x97, 0x67 }, /* KANNADA LETTER GA -> 'g' */
+ { 0x9A, 0x63 }, /* KANNADA LETTER CA -> 'c' */
+ { 0x9C, 0x6A }, /* KANNADA LETTER JA -> 'j' */
+ { 0xA4, 0x74 }, /* KANNADA LETTER TA -> 't' */
+ { 0xA6, 0x64 }, /* KANNADA LETTER DA -> 'd' */
+ { 0xA8, 0x6E }, /* KANNADA LETTER NA -> 'n' */
+ { 0xAA, 0x70 }, /* KANNADA LETTER PA -> 'p' */
+ { 0xAC, 0x62 }, /* KANNADA LETTER BA -> 'b' */
+ { 0xAE, 0x6D }, /* KANNADA LETTER MA -> 'm' */
+ { 0xAF, 0x79 }, /* KANNADA LETTER YA -> 'y' */
+ { 0xB0, 0x72 }, /* KANNADA LETTER RA -> 'r' */
+ { 0xB2, 0x6C }, /* KANNADA LETTER LA -> 'l' */
+ { 0xB5, 0x76 }, /* KANNADA LETTER VA -> 'v' */
+ { 0xB8, 0x73 }, /* KANNADA LETTER SA -> 's' */
+ { 0xB9, 0x68 }, /* KANNADA LETTER HA -> 'h' */
+ { 0xBF, 0x69 }, /* KANNADA VOWEL SIGN I -> 'i' */
+ { 0xC1, 0x75 }, /* KANNADA VOWEL SIGN U -> 'u' */
+ { 0xC3, 0x52 }, /* KANNADA VOWEL SIGN VOCALIC R -> 'R' */
+ { 0xC6, 0x65 }, /* KANNADA VOWEL SIGN E -> 'e' */
+ { 0xCA, 0x6F }, /* KANNADA VOWEL SIGN O -> 'o' */
+ { 0xD5, 0x2B }, /* KANNADA LENGTH MARK -> '+' */
+ { 0xD6, 0x2B }, /* KANNADA AI LENGTH MARK -> '+' */
+ { 0xE6, 0x30 }, /* KANNADA DIGIT ZERO -> '0' */
+ { 0xE7, 0x31 }, /* KANNADA DIGIT ONE -> '1' */
+ { 0xE8, 0x32 }, /* KANNADA DIGIT TWO -> '2' */
+ { 0xE9, 0x33 }, /* KANNADA DIGIT THREE -> '3' */
+ { 0xEA, 0x34 }, /* KANNADA DIGIT FOUR -> '4' */
+ { 0xEB, 0x35 }, /* KANNADA DIGIT FIVE -> '5' */
+ { 0xEC, 0x36 }, /* KANNADA DIGIT SIX -> '6' */
+ { 0xED, 0x37 }, /* KANNADA DIGIT SEVEN -> '7' */
+ { 0xEE, 0x38 }, /* KANNADA DIGIT EIGHT -> '8' */
+ { 0xEF, 0x39 }, /* KANNADA DIGIT NINE -> '9' */
+ /* Entries for page 0x0D */
+ { 0x02, 0x4E }, /* MALAYALAM SIGN ANUSVARA -> 'N' */
+ { 0x03, 0x48 }, /* MALAYALAM SIGN VISARGA -> 'H' */
+ { 0x05, 0x61 }, /* MALAYALAM LETTER A -> 'a' */
+ { 0x07, 0x69 }, /* MALAYALAM LETTER I -> 'i' */
+ { 0x09, 0x75 }, /* MALAYALAM LETTER U -> 'u' */
+ { 0x0B, 0x52 }, /* MALAYALAM LETTER VOCALIC R -> 'R' */
+ { 0x0C, 0x4C }, /* MALAYALAM LETTER VOCALIC L -> 'L' */
+ { 0x0E, 0x65 }, /* MALAYALAM LETTER E -> 'e' */
+ { 0x12, 0x6F }, /* MALAYALAM LETTER O -> 'o' */
+ { 0x15, 0x6B }, /* MALAYALAM LETTER KA -> 'k' */
+ { 0x17, 0x67 }, /* MALAYALAM LETTER GA -> 'g' */
+ { 0x1A, 0x63 }, /* MALAYALAM LETTER CA -> 'c' */
+ { 0x1C, 0x6A }, /* MALAYALAM LETTER JA -> 'j' */
+ { 0x24, 0x74 }, /* MALAYALAM LETTER TA -> 't' */
+ { 0x26, 0x64 }, /* MALAYALAM LETTER DA -> 'd' */
+ { 0x28, 0x6E }, /* MALAYALAM LETTER NA -> 'n' */
+ { 0x2A, 0x70 }, /* MALAYALAM LETTER PA -> 'p' */
+ { 0x2C, 0x62 }, /* MALAYALAM LETTER BA -> 'b' */
+ { 0x2E, 0x6D }, /* MALAYALAM LETTER MA -> 'm' */
+ { 0x2F, 0x79 }, /* MALAYALAM LETTER YA -> 'y' */
+ { 0x30, 0x72 }, /* MALAYALAM LETTER RA -> 'r' */
+ { 0x32, 0x6C }, /* MALAYALAM LETTER LA -> 'l' */
+ { 0x35, 0x76 }, /* MALAYALAM LETTER VA -> 'v' */
+ { 0x38, 0x73 }, /* MALAYALAM LETTER SA -> 's' */
+ { 0x39, 0x68 }, /* MALAYALAM LETTER HA -> 'h' */
+ { 0x3F, 0x69 }, /* MALAYALAM VOWEL SIGN I -> 'i' */
+ { 0x41, 0x75 }, /* MALAYALAM VOWEL SIGN U -> 'u' */
+ { 0x43, 0x52 }, /* MALAYALAM VOWEL SIGN VOCALIC R -> 'R' */
+ { 0x46, 0x65 }, /* MALAYALAM VOWEL SIGN E -> 'e' */
+ { 0x4A, 0x6F }, /* MALAYALAM VOWEL SIGN O -> 'o' */
+ { 0x57, 0x2B }, /* MALAYALAM AU LENGTH MARK -> '+' */
+ { 0x66, 0x30 }, /* MALAYALAM DIGIT ZERO -> '0' */
+ { 0x67, 0x31 }, /* MALAYALAM DIGIT ONE -> '1' */
+ { 0x68, 0x32 }, /* MALAYALAM DIGIT TWO -> '2' */
+ { 0x69, 0x33 }, /* MALAYALAM DIGIT THREE -> '3' */
+ { 0x6A, 0x34 }, /* MALAYALAM DIGIT FOUR -> '4' */
+ { 0x6B, 0x35 }, /* MALAYALAM DIGIT FIVE -> '5' */
+ { 0x6C, 0x36 }, /* MALAYALAM DIGIT SIX -> '6' */
+ { 0x6D, 0x37 }, /* MALAYALAM DIGIT SEVEN -> '7' */
+ { 0x6E, 0x38 }, /* MALAYALAM DIGIT EIGHT -> '8' */
+ { 0x6F, 0x39 }, /* MALAYALAM DIGIT NINE -> '9' */
+ { 0x82, 0x4E }, /* SINHALA SIGN ANUSVARAYA -> 'N' */
+ { 0x83, 0x48 }, /* SINHALA SIGN VISARGAYA -> 'H' */
+ { 0x85, 0x61 }, /* SINHALA LETTER AYANNA -> 'a' */
+ { 0x89, 0x69 }, /* SINHALA LETTER IYANNA -> 'i' */
+ { 0x8B, 0x75 }, /* SINHALA LETTER UYANNA -> 'u' */
+ { 0x8D, 0x52 }, /* SINHALA LETTER IRUYANNA -> 'R' */
+ { 0x8F, 0x4C }, /* SINHALA LETTER ILUYANNA -> 'L' */
+ { 0x91, 0x65 }, /* SINHALA LETTER EYANNA -> 'e' */
+ { 0x94, 0x6F }, /* SINHALA LETTER OYANNA -> 'o' */
+ { 0x9A, 0x6B }, /* SINHALA LETTER ALPAPRAANA KAYANNA -> 'k' */
+ { 0x9C, 0x67 }, /* SINHALA LETTER ALPAPRAANA GAYANNA -> 'g' */
+ { 0xA0, 0x63 }, /* SINHALA LETTER ALPAPRAANA CAYANNA -> 'c' */
+ { 0xA2, 0x6A }, /* SINHALA LETTER ALPAPRAANA JAYANNA -> 'j' */
+ { 0xAD, 0x74 }, /* SINHALA LETTER ALPAPRAANA TAYANNA -> 't' */
+ { 0xAF, 0x64 }, /* SINHALA LETTER ALPAPRAANA DAYANNA -> 'd' */
+ { 0xB1, 0x6E }, /* SINHALA LETTER DANTAJA NAYANNA -> 'n' */
+ { 0xB4, 0x70 }, /* SINHALA LETTER ALPAPRAANA PAYANNA -> 'p' */
+ { 0xB6, 0x62 }, /* SINHALA LETTER ALPAPRAANA BAYANNA -> 'b' */
+ { 0xB8, 0x6D }, /* SINHALA LETTER MAYANNA -> 'm' */
+ { 0xBA, 0x79 }, /* SINHALA LETTER YAYANNA -> 'y' */
+ { 0xBB, 0x72 }, /* SINHALA LETTER RAYANNA -> 'r' */
+ { 0xBD, 0x6C }, /* SINHALA LETTER DANTAJA LAYANNA -> 'l' */
+ { 0xC0, 0x76 }, /* SINHALA LETTER VAYANNA -> 'v' */
+ { 0xC3, 0x73 }, /* SINHALA LETTER DANTAJA SAYANNA -> 's' */
+ { 0xC4, 0x68 }, /* SINHALA LETTER HAYANNA -> 'h' */
+ { 0xC6, 0x66 }, /* SINHALA LETTER FAYANNA -> 'f' */
+ { 0xD2, 0x69 }, /* SINHALA VOWEL SIGN KETTI IS-PILLA -> 'i' */
+ { 0xD4, 0x75 }, /* SINHALA VOWEL SIGN KETTI PAA-PILLA -> 'u' */
+ { 0xD8, 0x52 }, /* SINHALA VOWEL SIGN GAETTA-PILLA -> 'R' */
+ { 0xD9, 0x65 }, /* SINHALA VOWEL SIGN KOMBUVA -> 'e' */
+ { 0xDC, 0x6F }, /* SINHALA VOWEL SIGN KOMBUVA HAA AELA-PILLA -> 'o' */
+ { 0xDF, 0x4C }, /* SINHALA VOWEL SIGN GAYANUKITTA -> 'L' */
+ /* Entries for page 0x0E */
+ { 0x01, 0x6B }, /* THAI CHARACTER KO KAI -> 'k' */
+ { 0x0D, 0x79 }, /* THAI CHARACTER YO YING -> 'y' */
+ { 0x0E, 0x64 }, /* THAI CHARACTER DO CHADA -> 'd' */
+ { 0x0F, 0x74 }, /* THAI CHARACTER TO PATAK -> 't' */
+ { 0x13, 0x6E }, /* THAI CHARACTER NO NEN -> 'n' */
+ { 0x14, 0x64 }, /* THAI CHARACTER DO DEK -> 'd' */
+ { 0x15, 0x74 }, /* THAI CHARACTER TO TAO -> 't' */
+ { 0x19, 0x6E }, /* THAI CHARACTER NO NU -> 'n' */
+ { 0x1A, 0x62 }, /* THAI CHARACTER BO BAIMAI -> 'b' */
+ { 0x1B, 0x70 }, /* THAI CHARACTER PO PLA -> 'p' */
+ { 0x1D, 0x66 }, /* THAI CHARACTER FO FA -> 'f' */
+ { 0x1F, 0x66 }, /* THAI CHARACTER FO FAN -> 'f' */
+ { 0x21, 0x6D }, /* THAI CHARACTER MO MA -> 'm' */
+ { 0x22, 0x79 }, /* THAI CHARACTER YO YAK -> 'y' */
+ { 0x23, 0x72 }, /* THAI CHARACTER RO RUA -> 'r' */
+ { 0x24, 0x52 }, /* THAI CHARACTER RU -> 'R' */
+ { 0x25, 0x6C }, /* THAI CHARACTER LO LING -> 'l' */
+ { 0x26, 0x4C }, /* THAI CHARACTER LU -> 'L' */
+ { 0x27, 0x77 }, /* THAI CHARACTER WO WAEN -> 'w' */
+ { 0x28, 0x00 }, /* THAI CHARACTER SO SALA -> ... */
+ { 0x2A, 0x73 }, /* THAI CHARACTER SO SUA -> 's' */
+ { 0x2B, 0x68 }, /* THAI CHARACTER HO HIP -> 'h' */
+ { 0x2C, 0x6C }, /* THAI CHARACTER LO CHULA -> 'l' */
+ { 0x2D, 0x60 }, /* THAI CHARACTER O ANG -> '`' */
+ { 0x2E, 0x68 }, /* THAI CHARACTER HO NOKHUK -> 'h' */
+ { 0x2F, 0x7E }, /* THAI CHARACTER PAIYANNOI -> '~' */
+ { 0x30, 0x61 }, /* THAI CHARACTER SARA A -> 'a' */
+ { 0x31, 0x61 }, /* THAI CHARACTER MAI HAN-AKAT -> 'a' */
+ { 0x34, 0x69 }, /* THAI CHARACTER SARA I -> 'i' */
+ { 0x38, 0x75 }, /* THAI CHARACTER SARA U -> 'u' */
+ { 0x3A, 0x27 }, /* THAI CHARACTER PHINTHU -> ''' */
+ { 0x40, 0x65 }, /* THAI CHARACTER SARA E -> 'e' */
+ { 0x42, 0x6F }, /* THAI CHARACTER SARA O -> 'o' */
+ { 0x46, 0x2B }, /* THAI CHARACTER MAIYAMOK -> '+' */
+ { 0x4D, 0x4D }, /* THAI CHARACTER NIKHAHIT -> 'M' */
+ { 0x50, 0x30 }, /* THAI DIGIT ZERO -> '0' */
+ { 0x51, 0x31 }, /* THAI DIGIT ONE -> '1' */
+ { 0x52, 0x32 }, /* THAI DIGIT TWO -> '2' */
+ { 0x53, 0x33 }, /* THAI DIGIT THREE -> '3' */
+ { 0x54, 0x34 }, /* THAI DIGIT FOUR -> '4' */
+ { 0x55, 0x35 }, /* THAI DIGIT FIVE -> '5' */
+ { 0x56, 0x36 }, /* THAI DIGIT SIX -> '6' */
+ { 0x57, 0x37 }, /* THAI DIGIT SEVEN -> '7' */
+ { 0x58, 0x38 }, /* THAI DIGIT EIGHT -> '8' */
+ { 0x59, 0x39 }, /* THAI DIGIT NINE -> '9' */
+ { 0x81, 0x6B }, /* LAO LETTER KO -> 'k' */
+ { 0x8A, 0x73 }, /* LAO LETTER SO TAM -> 's' */
+ { 0x94, 0x64 }, /* LAO LETTER DO -> 'd' */
+ { 0x95, 0x68 }, /* LAO LETTER TO -> 'h' */
+ { 0x99, 0x6E }, /* LAO LETTER NO -> 'n' */
+ { 0x9A, 0x62 }, /* LAO LETTER BO -> 'b' */
+ { 0x9B, 0x70 }, /* LAO LETTER PO -> 'p' */
+ { 0x9D, 0x66 }, /* LAO LETTER FO TAM -> 'f' */
+ { 0x9F, 0x66 }, /* LAO LETTER FO SUNG -> 'f' */
+ { 0xA1, 0x6D }, /* LAO LETTER MO -> 'm' */
+ { 0xA2, 0x79 }, /* LAO LETTER YO -> 'y' */
+ { 0xA3, 0x72 }, /* LAO LETTER LO LING -> 'r' */
+ { 0xA5, 0x6C }, /* LAO LETTER LO LOOT -> 'l' */
+ { 0xA7, 0x77 }, /* LAO LETTER WO -> 'w' */
+ { 0xAA, 0x73 }, /* LAO LETTER SO SUNG -> 's' */
+ { 0xAB, 0x68 }, /* LAO LETTER HO SUNG -> 'h' */
+ { 0xAD, 0x60 }, /* LAO LETTER O -> '`' */
+ { 0xAF, 0x7E }, /* LAO ELLIPSIS -> '~' */
+ { 0xB0, 0x61 }, /* LAO VOWEL SIGN A -> 'a' */
+ { 0xB4, 0x69 }, /* LAO VOWEL SIGN I -> 'i' */
+ { 0xB6, 0x79 }, /* LAO VOWEL SIGN Y -> 'y' */
+ { 0xB8, 0x75 }, /* LAO VOWEL SIGN U -> 'u' */
+ { 0xBB, 0x6F }, /* LAO VOWEL SIGN MAI KON -> 'o' */
+ { 0xBC, 0x6C }, /* LAO SEMIVOWEL SIGN LO -> 'l' */
+ { 0xC0, 0x65 }, /* LAO VOWEL SIGN E -> 'e' */
+ { 0xC2, 0x6F }, /* LAO VOWEL SIGN O -> 'o' */
+ { 0xC6, 0x2B }, /* LAO KO LA -> '+' */
+ { 0xCD, 0x4D }, /* LAO NIGGAHITA -> 'M' */
+ { 0xD0, 0x30 }, /* LAO DIGIT ZERO -> '0' */
+ { 0xD1, 0x31 }, /* LAO DIGIT ONE -> '1' */
+ { 0xD2, 0x32 }, /* LAO DIGIT TWO -> '2' */
+ { 0xD3, 0x33 }, /* LAO DIGIT THREE -> '3' */
+ { 0xD4, 0x34 }, /* LAO DIGIT FOUR -> '4' */
+ { 0xD5, 0x35 }, /* LAO DIGIT FIVE -> '5' */
+ { 0xD6, 0x36 }, /* LAO DIGIT SIX -> '6' */
+ { 0xD7, 0x37 }, /* LAO DIGIT SEVEN -> '7' */
+ { 0xD8, 0x38 }, /* LAO DIGIT EIGHT -> '8' */
+ { 0xD9, 0x39 }, /* LAO DIGIT NINE -> '9' */
+ /* Entries for page 0x0F */
+ { 0x0B, 0x2D }, /* TIBETAN MARK INTERSYLLABIC TSHEG -> '-' */
+ { 0x20, 0x30 }, /* TIBETAN DIGIT ZERO -> '0' */
+ { 0x21, 0x31 }, /* TIBETAN DIGIT ONE -> '1' */
+ { 0x22, 0x32 }, /* TIBETAN DIGIT TWO -> '2' */
+ { 0x23, 0x33 }, /* TIBETAN DIGIT THREE -> '3' */
+ { 0x24, 0x34 }, /* TIBETAN DIGIT FOUR -> '4' */
+ { 0x25, 0x35 }, /* TIBETAN DIGIT FIVE -> '5' */
+ { 0x26, 0x36 }, /* TIBETAN DIGIT SIX -> '6' */
+ { 0x27, 0x37 }, /* TIBETAN DIGIT SEVEN -> '7' */
+ { 0x28, 0x38 }, /* TIBETAN DIGIT EIGHT -> '8' */
+ { 0x29, 0x39 }, /* TIBETAN DIGIT NINE -> '9' */
+ { 0x34, 0x2B }, /* TIBETAN MARK BSDUS RTAGS -> '+' */
+ { 0x35, 0x2A }, /* TIBETAN MARK NGAS BZUNG NYI ZLA -> '*' */
+ { 0x36, 0x5E }, /* TIBETAN MARK CARET -DZUD RTAGS BZHI MIG CAN -> '^' */
+ { 0x37, 0x5F }, /* TIBETAN MARK NGAS BZUNG SGOR RTAGS -> '_' */
+ { 0x39, 0x7E }, /* TIBETAN MARK TSA -PHRU -> '~' */
+ { 0x3B, 0x5D }, /* TIBETAN MARK GUG RTAGS GYAS -> ']' */
+ { 0x40, 0x6B }, /* TIBETAN LETTER KA -> 'k' */
+ { 0x42, 0x67 }, /* TIBETAN LETTER GA -> 'g' */
+ { 0x45, 0x63 }, /* TIBETAN LETTER CA -> 'c' */
+ { 0x47, 0x6A }, /* TIBETAN LETTER JA -> 'j' */
+ { 0x4F, 0x74 }, /* TIBETAN LETTER TA -> 't' */
+ { 0x51, 0x64 }, /* TIBETAN LETTER DA -> 'd' */
+ { 0x53, 0x6E }, /* TIBETAN LETTER NA -> 'n' */
+ { 0x54, 0x70 }, /* TIBETAN LETTER PA -> 'p' */
+ { 0x56, 0x62 }, /* TIBETAN LETTER BA -> 'b' */
+ { 0x58, 0x6D }, /* TIBETAN LETTER MA -> 'm' */
+ { 0x5D, 0x77 }, /* TIBETAN LETTER WA -> 'w' */
+ { 0x5F, 0x7A }, /* TIBETAN LETTER ZA -> 'z' */
+ { 0x60, 0x27 }, /* TIBETAN LETTER -A -> ''' */
+ { 0x61, 0x79 }, /* TIBETAN LETTER YA -> 'y' */
+ { 0x62, 0x72 }, /* TIBETAN LETTER RA -> 'r' */
+ { 0x63, 0x6C }, /* TIBETAN LETTER LA -> 'l' */
+ { 0x66, 0x73 }, /* TIBETAN LETTER SA -> 's' */
+ { 0x67, 0x68 }, /* TIBETAN LETTER HA -> 'h' */
+ { 0x68, 0x61 }, /* TIBETAN LETTER A -> 'a' */
+ { 0x6A, 0x72 }, /* TIBETAN LETTER FIXED-FORM RA -> 'r' */
+ { 0x72, 0x69 }, /* TIBETAN VOWEL SIGN I -> 'i' */
+ { 0x74, 0x75 }, /* TIBETAN VOWEL SIGN U -> 'u' */
+ { 0x76, 0x52 }, /* TIBETAN VOWEL SIGN VOCALIC R -> 'R' */
+ { 0x78, 0x4C }, /* TIBETAN VOWEL SIGN VOCALIC L -> 'L' */
+ { 0x7A, 0x65 }, /* TIBETAN VOWEL SIGN E -> 'e' */
+ { 0x7C, 0x6F }, /* TIBETAN VOWEL SIGN O -> 'o' */
+ { 0x7E, 0x4D }, /* TIBETAN SIGN RJES SU NGA RO -> 'M' */
+ { 0x7F, 0x48 }, /* TIBETAN SIGN RNAM BCAD -> 'H' */
+ { 0x80, 0x69 }, /* TIBETAN VOWEL SIGN REVERSED I -> 'i' */
+ { 0x90, 0x6B }, /* TIBETAN SUBJOINED LETTER KA -> 'k' */
+ { 0x92, 0x67 }, /* TIBETAN SUBJOINED LETTER GA -> 'g' */
+ { 0x95, 0x63 }, /* TIBETAN SUBJOINED LETTER CA -> 'c' */
+ { 0x97, 0x6A }, /* TIBETAN SUBJOINED LETTER JA -> 'j' */
+ { 0x9F, 0x74 }, /* TIBETAN SUBJOINED LETTER TA -> 't' */
+ { 0xA1, 0x64 }, /* TIBETAN SUBJOINED LETTER DA -> 'd' */
+ { 0xA3, 0x6E }, /* TIBETAN SUBJOINED LETTER NA -> 'n' */
+ { 0xA4, 0x70 }, /* TIBETAN SUBJOINED LETTER PA -> 'p' */
+ { 0xA6, 0x62 }, /* TIBETAN SUBJOINED LETTER BA -> 'b' */
+ { 0xA8, 0x6D }, /* TIBETAN SUBJOINED LETTER MA -> 'm' */
+ { 0xAD, 0x77 }, /* TIBETAN SUBJOINED LETTER WA -> 'w' */
+ { 0xAF, 0x7A }, /* TIBETAN SUBJOINED LETTER ZA -> 'z' */
+ { 0xB0, 0x27 }, /* TIBETAN SUBJOINED LETTER -A -> ''' */
+ { 0xB1, 0x79 }, /* TIBETAN SUBJOINED LETTER YA -> 'y' */
+ { 0xB2, 0x72 }, /* TIBETAN SUBJOINED LETTER RA -> 'r' */
+ { 0xB3, 0x6C }, /* TIBETAN SUBJOINED LETTER LA -> 'l' */
+ { 0xB6, 0x73 }, /* TIBETAN SUBJOINED LETTER SA -> 's' */
+ { 0xB7, 0x68 }, /* TIBETAN SUBJOINED LETTER HA -> 'h' */
+ { 0xB8, 0x61 }, /* TIBETAN SUBJOINED LETTER A -> 'a' */
+ { 0xBA, 0x77 }, /* TIBETAN SUBJOINED LETTER FIXED-FORM WA -> 'w' */
+ { 0xBB, 0x79 }, /* TIBETAN SUBJOINED LETTER FIXED-FORM YA -> 'y' */
+ { 0xBC, 0x72 }, /* TIBETAN SUBJOINED LETTER FIXED-FORM RA -> 'r' */
+ { 0xBE, 0x58 }, /* TIBETAN KU RU KHA -> 'X' */
+ /* Entries for page 0x10 */
+ { 0x00, 0x6B }, /* MYANMAR LETTER KA -> 'k' */
+ { 0x02, 0x67 }, /* MYANMAR LETTER GA -> 'g' */
+ { 0x05, 0x63 }, /* MYANMAR LETTER CA -> 'c' */
+ { 0x07, 0x6A }, /* MYANMAR LETTER JA -> 'j' */
+ { 0x12, 0x64 }, /* MYANMAR LETTER DA -> 'd' */
+ { 0x14, 0x6E }, /* MYANMAR LETTER NA -> 'n' */
+ { 0x15, 0x70 }, /* MYANMAR LETTER PA -> 'p' */
+ { 0x17, 0x62 }, /* MYANMAR LETTER BA -> 'b' */
+ { 0x19, 0x6D }, /* MYANMAR LETTER MA -> 'm' */
+ { 0x1A, 0x79 }, /* MYANMAR LETTER YA -> 'y' */
+ { 0x1B, 0x72 }, /* MYANMAR LETTER RA -> 'r' */
+ { 0x1C, 0x6C }, /* MYANMAR LETTER LA -> 'l' */
+ { 0x1D, 0x77 }, /* MYANMAR LETTER WA -> 'w' */
+ { 0x1E, 0x73 }, /* MYANMAR LETTER SA -> 's' */
+ { 0x1F, 0x68 }, /* MYANMAR LETTER HA -> 'h' */
+ { 0x21, 0x61 }, /* MYANMAR LETTER A -> 'a' */
+ { 0x23, 0x69 }, /* MYANMAR LETTER I -> 'i' */
+ { 0x25, 0x75 }, /* MYANMAR LETTER U -> 'u' */
+ { 0x27, 0x65 }, /* MYANMAR LETTER E -> 'e' */
+ { 0x29, 0x6F }, /* MYANMAR LETTER O -> 'o' */
+ { 0x2D, 0x69 }, /* MYANMAR VOWEL SIGN I -> 'i' */
+ { 0x2F, 0x75 }, /* MYANMAR VOWEL SIGN U -> 'u' */
+ { 0x31, 0x65 }, /* MYANMAR VOWEL SIGN E -> 'e' */
+ { 0x36, 0x4E }, /* MYANMAR SIGN ANUSVARA -> 'N' */
+ { 0x37, 0x27 }, /* MYANMAR SIGN DOT BELOW -> ''' */
+ { 0x38, 0x3A }, /* MYANMAR SIGN VISARGA -> ':' */
+ { 0x40, 0x30 }, /* MYANMAR DIGIT ZERO -> '0' */
+ { 0x41, 0x31 }, /* MYANMAR DIGIT ONE -> '1' */
+ { 0x42, 0x32 }, /* MYANMAR DIGIT TWO -> '2' */
+ { 0x43, 0x33 }, /* MYANMAR DIGIT THREE -> '3' */
+ { 0x44, 0x34 }, /* MYANMAR DIGIT FOUR -> '4' */
+ { 0x45, 0x35 }, /* MYANMAR DIGIT FIVE -> '5' */
+ { 0x46, 0x36 }, /* MYANMAR DIGIT SIX -> '6' */
+ { 0x47, 0x37 }, /* MYANMAR DIGIT SEVEN -> '7' */
+ { 0x48, 0x38 }, /* MYANMAR DIGIT EIGHT -> '8' */
+ { 0x49, 0x39 }, /* MYANMAR DIGIT NINE -> '9' */
+ { 0x52, 0x52 }, /* MYANMAR LETTER VOCALIC R -> 'R' */
+ { 0x54, 0x4C }, /* MYANMAR LETTER VOCALIC L -> 'L' */
+ { 0x56, 0x52 }, /* MYANMAR VOWEL SIGN VOCALIC R -> 'R' */
+ { 0x58, 0x4C }, /* MYANMAR VOWEL SIGN VOCALIC L -> 'L' */
+ { 0xA0, 0x41 }, /* GEORGIAN CAPITAL LETTER AN -> 'A' */
+ { 0xA1, 0x42 }, /* GEORGIAN CAPITAL LETTER BAN -> 'B' */
+ { 0xA2, 0x47 }, /* GEORGIAN CAPITAL LETTER GAN -> 'G' */
+ { 0xA3, 0x44 }, /* GEORGIAN CAPITAL LETTER DON -> 'D' */
+ { 0xA4, 0x45 }, /* GEORGIAN CAPITAL LETTER EN -> 'E' */
+ { 0xA5, 0x56 }, /* GEORGIAN CAPITAL LETTER VIN -> 'V' */
+ { 0xA6, 0x5A }, /* GEORGIAN CAPITAL LETTER ZEN -> 'Z' */
+ { 0xA8, 0x49 }, /* GEORGIAN CAPITAL LETTER IN -> 'I' */
+ { 0xA9, 0x4B }, /* GEORGIAN CAPITAL LETTER KAN -> 'K' */
+ { 0xAA, 0x4C }, /* GEORGIAN CAPITAL LETTER LAS -> 'L' */
+ { 0xAB, 0x4D }, /* GEORGIAN CAPITAL LETTER MAN -> 'M' */
+ { 0xAC, 0x4E }, /* GEORGIAN CAPITAL LETTER NAR -> 'N' */
+ { 0xAD, 0x4F }, /* GEORGIAN CAPITAL LETTER ON -> 'O' */
+ { 0xAE, 0x50 }, /* GEORGIAN CAPITAL LETTER PAR -> 'P' */
+ { 0xB0, 0x52 }, /* GEORGIAN CAPITAL LETTER RAE -> 'R' */
+ { 0xB1, 0x53 }, /* GEORGIAN CAPITAL LETTER SAN -> 'S' */
+ { 0xB2, 0x54 }, /* GEORGIAN CAPITAL LETTER TAR -> 'T' */
+ { 0xB3, 0x55 }, /* GEORGIAN CAPITAL LETTER UN -> 'U' */
+ { 0xB7, 0x51 }, /* GEORGIAN CAPITAL LETTER QAR -> 'Q' */
+ { 0xBC, 0x43 }, /* GEORGIAN CAPITAL LETTER CIL -> 'C' */
+ { 0xBE, 0x58 }, /* GEORGIAN CAPITAL LETTER XAN -> 'X' */
+ { 0xBF, 0x4A }, /* GEORGIAN CAPITAL LETTER JHAN -> 'J' */
+ { 0xC0, 0x48 }, /* GEORGIAN CAPITAL LETTER HAE -> 'H' */
+ { 0xC1, 0x45 }, /* GEORGIAN CAPITAL LETTER HE -> 'E' */
+ { 0xC2, 0x59 }, /* GEORGIAN CAPITAL LETTER HIE -> 'Y' */
+ { 0xC3, 0x57 }, /* GEORGIAN CAPITAL LETTER WE -> 'W' */
+ { 0xD0, 0x61 }, /* GEORGIAN LETTER AN -> 'a' */
+ { 0xD1, 0x62 }, /* GEORGIAN LETTER BAN -> 'b' */
+ { 0xD2, 0x67 }, /* GEORGIAN LETTER GAN -> 'g' */
+ { 0xD3, 0x64 }, /* GEORGIAN LETTER DON -> 'd' */
+ { 0xD4, 0x65 }, /* GEORGIAN LETTER EN -> 'e' */
+ { 0xD5, 0x76 }, /* GEORGIAN LETTER VIN -> 'v' */
+ { 0xD6, 0x7A }, /* GEORGIAN LETTER ZEN -> 'z' */
+ { 0xD8, 0x69 }, /* GEORGIAN LETTER IN -> 'i' */
+ { 0xD9, 0x6B }, /* GEORGIAN LETTER KAN -> 'k' */
+ { 0xDA, 0x6C }, /* GEORGIAN LETTER LAS -> 'l' */
+ { 0xDB, 0x6D }, /* GEORGIAN LETTER MAN -> 'm' */
+ { 0xDC, 0x6E }, /* GEORGIAN LETTER NAR -> 'n' */
+ { 0xDD, 0x6F }, /* GEORGIAN LETTER ON -> 'o' */
+ { 0xDE, 0x70 }, /* GEORGIAN LETTER PAR -> 'p' */
+ { 0xE0, 0x72 }, /* GEORGIAN LETTER RAE -> 'r' */
+ { 0xE1, 0x73 }, /* GEORGIAN LETTER SAN -> 's' */
+ { 0xE2, 0x74 }, /* GEORGIAN LETTER TAR -> 't' */
+ { 0xE3, 0x75 }, /* GEORGIAN LETTER UN -> 'u' */
+ { 0xE7, 0x71 }, /* GEORGIAN LETTER QAR -> 'q' */
+ { 0xEC, 0x63 }, /* GEORGIAN LETTER CIL -> 'c' */
+ { 0xEE, 0x78 }, /* GEORGIAN LETTER XAN -> 'x' */
+ { 0xEF, 0x6A }, /* GEORGIAN LETTER JHAN -> 'j' */
+ { 0xF0, 0x68 }, /* GEORGIAN LETTER HAE -> 'h' */
+ { 0xF1, 0x65 }, /* GEORGIAN LETTER HE -> 'e' */
+ { 0xF2, 0x79 }, /* GEORGIAN LETTER HIE -> 'y' */
+ { 0xF3, 0x77 }, /* GEORGIAN LETTER WE -> 'w' */
+ { 0xF6, 0x66 }, /* GEORGIAN LETTER FI -> 'f' */
+ /* Entries for page 0x11 */
+ { 0x00, 0x67 }, /* HANGUL CHOSEONG KIYEOK -> 'g' */
+ { 0x02, 0x6E }, /* HANGUL CHOSEONG NIEUN -> 'n' */
+ { 0x03, 0x64 }, /* HANGUL CHOSEONG TIKEUT -> 'd' */
+ { 0x05, 0x72 }, /* HANGUL CHOSEONG RIEUL -> 'r' */
+ { 0x06, 0x6D }, /* HANGUL CHOSEONG MIEUM -> 'm' */
+ { 0x07, 0x62 }, /* HANGUL CHOSEONG PIEUP -> 'b' */
+ { 0x09, 0x73 }, /* HANGUL CHOSEONG SIOS -> 's' */
+ { 0x0C, 0x6A }, /* HANGUL CHOSEONG CIEUC -> 'j' */
+ { 0x0E, 0x63 }, /* HANGUL CHOSEONG CHIEUCH -> 'c' */
+ { 0x0F, 0x6B }, /* HANGUL CHOSEONG KHIEUKH -> 'k' */
+ { 0x10, 0x74 }, /* HANGUL CHOSEONG THIEUTH -> 't' */
+ { 0x11, 0x70 }, /* HANGUL CHOSEONG PHIEUPH -> 'p' */
+ { 0x12, 0x68 }, /* HANGUL CHOSEONG HIEUH -> 'h' */
+ { 0x35, 0x73 }, /* HANGUL CHOSEONG SIOS-IEUNG -> 's' */
+ { 0x40, 0x5A }, /* HANGUL CHOSEONG PANSIOS -> 'Z' */
+ { 0x41, 0x67 }, /* HANGUL CHOSEONG IEUNG-KIYEOK -> 'g' */
+ { 0x42, 0x64 }, /* HANGUL CHOSEONG IEUNG-TIKEUT -> 'd' */
+ { 0x43, 0x6D }, /* HANGUL CHOSEONG IEUNG-MIEUM -> 'm' */
+ { 0x44, 0x62 }, /* HANGUL CHOSEONG IEUNG-PIEUP -> 'b' */
+ { 0x45, 0x73 }, /* HANGUL CHOSEONG IEUNG-SIOS -> 's' */
+ { 0x46, 0x5A }, /* HANGUL CHOSEONG IEUNG-PANSIOS -> 'Z' */
+ { 0x48, 0x6A }, /* HANGUL CHOSEONG IEUNG-CIEUC -> 'j' */
+ { 0x49, 0x63 }, /* HANGUL CHOSEONG IEUNG-CHIEUCH -> 'c' */
+ { 0x4A, 0x74 }, /* HANGUL CHOSEONG IEUNG-THIEUTH -> 't' */
+ { 0x4B, 0x70 }, /* HANGUL CHOSEONG IEUNG-PHIEUPH -> 'p' */
+ { 0x4C, 0x4E }, /* HANGUL CHOSEONG YESIEUNG -> 'N' */
+ { 0x4D, 0x6A }, /* HANGUL CHOSEONG CIEUC-IEUNG -> 'j' */
+ { 0x59, 0x51 }, /* HANGUL CHOSEONG YEORINHIEUH -> 'Q' */
+ { 0x61, 0x61 }, /* HANGUL JUNGSEONG A -> 'a' */
+ { 0x66, 0x65 }, /* HANGUL JUNGSEONG E -> 'e' */
+ { 0x69, 0x6F }, /* HANGUL JUNGSEONG O -> 'o' */
+ { 0x6E, 0x75 }, /* HANGUL JUNGSEONG U -> 'u' */
+ { 0x75, 0x69 }, /* HANGUL JUNGSEONG I -> 'i' */
+ { 0x9E, 0x55 }, /* HANGUL JUNGSEONG ARAEA -> 'U' */
+ { 0xA8, 0x67 }, /* HANGUL JONGSEONG KIYEOK -> 'g' */
+ { 0xAB, 0x6E }, /* HANGUL JONGSEONG NIEUN -> 'n' */
+ { 0xAE, 0x64 }, /* HANGUL JONGSEONG TIKEUT -> 'd' */
+ { 0xAF, 0x6C }, /* HANGUL JONGSEONG RIEUL -> 'l' */
+ { 0xB7, 0x6D }, /* HANGUL JONGSEONG MIEUM -> 'm' */
+ { 0xB8, 0x62 }, /* HANGUL JONGSEONG PIEUP -> 'b' */
+ { 0xBA, 0x73 }, /* HANGUL JONGSEONG SIOS -> 's' */
+ { 0xBD, 0x6A }, /* HANGUL JONGSEONG CIEUC -> 'j' */
+ { 0xBE, 0x63 }, /* HANGUL JONGSEONG CHIEUCH -> 'c' */
+ { 0xBF, 0x6B }, /* HANGUL JONGSEONG KHIEUKH -> 'k' */
+ { 0xC0, 0x74 }, /* HANGUL JONGSEONG THIEUTH -> 't' */
+ { 0xC1, 0x70 }, /* HANGUL JONGSEONG PHIEUPH -> 'p' */
+ { 0xC2, 0x68 }, /* HANGUL JONGSEONG HIEUH -> 'h' */
+ { 0xEB, 0x5A }, /* HANGUL JONGSEONG PANSIOS -> 'Z' */
+ { 0xEC, 0x67 }, /* HANGUL JONGSEONG IEUNG-KIYEOK -> 'g' */
+ { 0xF0, 0x4E }, /* HANGUL JONGSEONG YESIEUNG -> 'N' */
+ { 0xF9, 0x51 }, /* HANGUL JONGSEONG YEORINHIEUH -> 'Q' */
+ /* Entries for page 0x13 */
+ { 0x61, 0x20 }, /* ETHIOPIC WORDSPACE -> ' ' */
+ { 0x62, 0x2E }, /* ETHIOPIC FULL STOP -> '.' */
+ { 0x63, 0x2C }, /* ETHIOPIC COMMA -> ',' */
+ { 0x64, 0x3B }, /* ETHIOPIC SEMICOLON -> ';' */
+ { 0x65, 0x3A }, /* ETHIOPIC COLON -> ':' */
+ { 0x67, 0x3F }, /* ETHIOPIC QUESTION MARK -> '?' */
+ { 0x69, 0x31 }, /* ETHIOPIC DIGIT ONE -> '1' */
+ { 0x6A, 0x32 }, /* ETHIOPIC DIGIT TWO -> '2' */
+ { 0x6B, 0x33 }, /* ETHIOPIC DIGIT THREE -> '3' */
+ { 0x6C, 0x34 }, /* ETHIOPIC DIGIT FOUR -> '4' */
+ { 0x6D, 0x35 }, /* ETHIOPIC DIGIT FIVE -> '5' */
+ { 0x6E, 0x36 }, /* ETHIOPIC DIGIT SIX -> '6' */
+ { 0x6F, 0x37 }, /* ETHIOPIC DIGIT SEVEN -> '7' */
+ { 0x70, 0x38 }, /* ETHIOPIC DIGIT EIGHT -> '8' */
+ { 0x71, 0x39 }, /* ETHIOPIC DIGIT NINE -> '9' */
+ { 0xA0, 0x61 }, /* CHEROKEE LETTER A -> 'a' */
+ { 0xA1, 0x65 }, /* CHEROKEE LETTER E -> 'e' */
+ { 0xA2, 0x69 }, /* CHEROKEE LETTER I -> 'i' */
+ { 0xA3, 0x6F }, /* CHEROKEE LETTER O -> 'o' */
+ { 0xA4, 0x75 }, /* CHEROKEE LETTER U -> 'u' */
+ { 0xA5, 0x76 }, /* CHEROKEE LETTER V -> 'v' */
+ { 0xCD, 0x73 }, /* CHEROKEE LETTER S -> 's' */
+ /* Entries for page 0x14 */
+ { 0x01, 0x65 }, /* CANADIAN SYLLABICS E -> 'e' */
+ { 0x03, 0x69 }, /* CANADIAN SYLLABICS I -> 'i' */
+ { 0x05, 0x6F }, /* CANADIAN SYLLABICS O -> 'o' */
+ { 0x09, 0x69 }, /* CANADIAN SYLLABICS CARRIER I -> 'i' */
+ { 0x0A, 0x61 }, /* CANADIAN SYLLABICS A -> 'a' */
+ { 0x1D, 0x77 }, /* CANADIAN SYLLABICS Y-CREE W -> 'w' */
+ { 0x1E, 0x27 }, /* CANADIAN SYLLABICS GLOTTAL STOP -> ''' */
+ { 0x1F, 0x74 }, /* CANADIAN SYLLABICS FINAL ACUTE -> 't' */
+ { 0x20, 0x6B }, /* CANADIAN SYLLABICS FINAL GRAVE -> 'k' */
+ { 0x22, 0x73 }, /* CANADIAN SYLLABICS FINAL TOP HALF RING -> 's' */
+ { 0x23, 0x6E }, /* CANADIAN SYLLABICS FINAL RIGHT HALF RING -> 'n' */
+ { 0x24, 0x77 }, /* CANADIAN SYLLABICS FINAL RING -> 'w' */
+ { 0x25, 0x6E }, /* CANADIAN SYLLABICS FINAL DOUBLE ACUTE -> 'n' */
+ { 0x27, 0x77 }, /* CANADIAN SYLLABICS FINAL MIDDLE DOT -> 'w' */
+ { 0x28, 0x63 }, /* CANADIAN SYLLABICS FINAL SHORT HORIZONTAL STROKE -> 'c' */
+ { 0x29, 0x3F }, /* CANADIAN SYLLABICS FINAL PLUS -> '?' */
+ { 0x2A, 0x6C }, /* CANADIAN SYLLABICS FINAL DOWN TACK -> 'l' */
+ { 0x49, 0x70 }, /* CANADIAN SYLLABICS P -> 'p' */
+ { 0x4A, 0x70 }, /* CANADIAN SYLLABICS WEST-CREE P -> 'p' */
+ { 0x4B, 0x68 }, /* CANADIAN SYLLABICS CARRIER H -> 'h' */
+ { 0x66, 0x74 }, /* CANADIAN SYLLABICS T -> 't' */
+ { 0x83, 0x6B }, /* CANADIAN SYLLABICS K -> 'k' */
+ { 0xA1, 0x63 }, /* CANADIAN SYLLABICS C -> 'c' */
+ { 0xBB, 0x6D }, /* CANADIAN SYLLABICS M -> 'm' */
+ { 0xBC, 0x6D }, /* CANADIAN SYLLABICS WEST-CREE M -> 'm' */
+ { 0xBE, 0x6D }, /* CANADIAN SYLLABICS ATHAPASCAN M -> 'm' */
+ { 0xBF, 0x6D }, /* CANADIAN SYLLABICS SAYISI M -> 'm' */
+ { 0xD0, 0x6E }, /* CANADIAN SYLLABICS N -> 'n' */
+ { 0xEA, 0x00 }, /* CANADIAN SYLLABICS L -> ... */
+ { 0xEC, 0x6C }, /* CANADIAN SYLLABICS MEDIAL L -> 'l' */
+ /* Entries for page 0x15 */
+ { 0x05, 0x73 }, /* CANADIAN SYLLABICS S -> 's' */
+ { 0x06, 0x73 }, /* CANADIAN SYLLABICS ATHAPASCAN S -> 's' */
+ { 0x08, 0x73 }, /* CANADIAN SYLLABICS BLACKFOOT S -> 's' */
+ { 0x3E, 0x00 }, /* CANADIAN SYLLABICS Y -> ... */
+ { 0x40, 0x79 }, /* CANADIAN SYLLABICS WEST-CREE Y -> 'y' */
+ { 0x50, 0x00 }, /* CANADIAN SYLLABICS R -> ... */
+ { 0x52, 0x72 }, /* CANADIAN SYLLABICS MEDIAL R -> 'r' */
+ { 0x5D, 0x66 }, /* CANADIAN SYLLABICS F -> 'f' */
+ { 0x7B, 0x68 }, /* CANADIAN SYLLABICS NUNAVIK H -> 'h' */
+ { 0x7C, 0x68 }, /* CANADIAN SYLLABICS NUNAVUT H -> 'h' */
+ { 0x85, 0x71 }, /* CANADIAN SYLLABICS Q -> 'q' */
+ { 0xAF, 0x62 }, /* CANADIAN SYLLABICS AIVILIK B -> 'b' */
+ { 0xB0, 0x65 }, /* CANADIAN SYLLABICS BLACKFOOT E -> 'e' */
+ { 0xB1, 0x69 }, /* CANADIAN SYLLABICS BLACKFOOT I -> 'i' */
+ { 0xB2, 0x6F }, /* CANADIAN SYLLABICS BLACKFOOT O -> 'o' */
+ { 0xB3, 0x61 }, /* CANADIAN SYLLABICS BLACKFOOT A -> 'a' */
+ { 0xEE, 0x70 }, /* CANADIAN SYLLABICS CARRIER P -> 'p' */
+ /* Entries for page 0x16 */
+ { 0x46, 0x7A }, /* CANADIAN SYLLABICS CARRIER Z -> 'z' */
+ { 0x47, 0x7A }, /* CANADIAN SYLLABICS CARRIER INITIAL Z -> 'z' */
+ { 0x6D, 0x58 }, /* CANADIAN SYLLABICS CHI SIGN -> 'X' */
+ { 0x6E, 0x2E }, /* CANADIAN SYLLABICS FULL STOP -> '.' */
+ { 0x80, 0x20 }, /* OGHAM SPACE MARK -> ' ' */
+ { 0x81, 0x62 }, /* OGHAM LETTER BEITH -> 'b' */
+ { 0x82, 0x6C }, /* OGHAM LETTER LUIS -> 'l' */
+ { 0x83, 0x66 }, /* OGHAM LETTER FEARN -> 'f' */
+ { 0x84, 0x73 }, /* OGHAM LETTER SAIL -> 's' */
+ { 0x85, 0x6E }, /* OGHAM LETTER NION -> 'n' */
+ { 0x86, 0x68 }, /* OGHAM LETTER UATH -> 'h' */
+ { 0x87, 0x64 }, /* OGHAM LETTER DAIR -> 'd' */
+ { 0x88, 0x74 }, /* OGHAM LETTER TINNE -> 't' */
+ { 0x89, 0x63 }, /* OGHAM LETTER COLL -> 'c' */
+ { 0x8A, 0x71 }, /* OGHAM LETTER CEIRT -> 'q' */
+ { 0x8B, 0x6D }, /* OGHAM LETTER MUIN -> 'm' */
+ { 0x8C, 0x67 }, /* OGHAM LETTER GORT -> 'g' */
+ { 0x8E, 0x7A }, /* OGHAM LETTER STRAIF -> 'z' */
+ { 0x8F, 0x72 }, /* OGHAM LETTER RUIS -> 'r' */
+ { 0x90, 0x61 }, /* OGHAM LETTER AILM -> 'a' */
+ { 0x91, 0x6F }, /* OGHAM LETTER ONN -> 'o' */
+ { 0x92, 0x75 }, /* OGHAM LETTER UR -> 'u' */
+ { 0x93, 0x65 }, /* OGHAM LETTER EADHADH -> 'e' */
+ { 0x94, 0x69 }, /* OGHAM LETTER IODHADH -> 'i' */
+ { 0x98, 0x70 }, /* OGHAM LETTER IFIN -> 'p' */
+ { 0x99, 0x78 }, /* OGHAM LETTER EAMHANCHOLL -> 'x' */
+ { 0x9A, 0x70 }, /* OGHAM LETTER PEITH -> 'p' */
+ { 0x9B, 0x3C }, /* OGHAM FEATHER MARK -> '<' */
+ { 0x9C, 0x3E }, /* OGHAM REVERSED FEATHER MARK -> '>' */
+ { 0xA0, 0x66 }, /* RUNIC LETTER FEHU FEOH FE F -> 'f' */
+ { 0xA1, 0x76 }, /* RUNIC LETTER V -> 'v' */
+ { 0xA2, 0x75 }, /* RUNIC LETTER URUZ UR U -> 'u' */
+ { 0xA4, 0x79 }, /* RUNIC LETTER Y -> 'y' */
+ { 0xA5, 0x77 }, /* RUNIC LETTER W -> 'w' */
+ { 0xA8, 0x61 }, /* RUNIC LETTER ANSUZ A -> 'a' */
+ { 0xA9, 0x6F }, /* RUNIC LETTER OS O -> 'o' */
+ { 0xAC, 0x00 }, /* RUNIC LETTER LONG-BRANCH-OSS O -> ... */
+ { 0xAE, 0x6F }, /* RUNIC LETTER O -> 'o' */
+ { 0xB1, 0x72 }, /* RUNIC LETTER RAIDO RAD REID R -> 'r' */
+ { 0xB2, 0x6B }, /* RUNIC LETTER KAUNA -> 'k' */
+ { 0xB3, 0x63 }, /* RUNIC LETTER CEN -> 'c' */
+ { 0xB4, 0x6B }, /* RUNIC LETTER KAUN K -> 'k' */
+ { 0xB5, 0x67 }, /* RUNIC LETTER G -> 'g' */
+ { 0xB7, 0x67 }, /* RUNIC LETTER GEBO GYFU G -> 'g' */
+ { 0xB8, 0x67 }, /* RUNIC LETTER GAR -> 'g' */
+ { 0xB9, 0x77 }, /* RUNIC LETTER WUNJO WYNN W -> 'w' */
+ { 0xBA, 0x00 }, /* RUNIC LETTER HAGLAZ H -> ... */
+ { 0xBD, 0x68 }, /* RUNIC LETTER SHORT-TWIG-HAGALL H -> 'h' */
+ { 0xBE, 0x00 }, /* RUNIC LETTER NAUDIZ NYD NAUD N -> ... */
+ { 0xC0, 0x6E }, /* RUNIC LETTER DOTTED-N -> 'n' */
+ { 0xC1, 0x69 }, /* RUNIC LETTER ISAZ IS ISS I -> 'i' */
+ { 0xC2, 0x65 }, /* RUNIC LETTER E -> 'e' */
+ { 0xC3, 0x6A }, /* RUNIC LETTER JERAN J -> 'j' */
+ { 0xC4, 0x67 }, /* RUNIC LETTER GER -> 'g' */
+ { 0xC6, 0x61 }, /* RUNIC LETTER SHORT-TWIG-AR A -> 'a' */
+ { 0xC8, 0x70 }, /* RUNIC LETTER PERTHO PEORTH P -> 'p' */
+ { 0xC9, 0x7A }, /* RUNIC LETTER ALGIZ EOLHX -> 'z' */
+ { 0xCA, 0x00 }, /* RUNIC LETTER SOWILO S -> ... */
+ { 0xCC, 0x73 }, /* RUNIC LETTER SHORT-TWIG-SOL S -> 's' */
+ { 0xCD, 0x63 }, /* RUNIC LETTER C -> 'c' */
+ { 0xCE, 0x7A }, /* RUNIC LETTER Z -> 'z' */
+ { 0xCF, 0x74 }, /* RUNIC LETTER TIWAZ TIR TYR T -> 't' */
+ { 0xD0, 0x74 }, /* RUNIC LETTER SHORT-TWIG-TYR T -> 't' */
+ { 0xD1, 0x64 }, /* RUNIC LETTER D -> 'd' */
+ { 0xD2, 0x62 }, /* RUNIC LETTER BERKANAN BEORC BJARKAN B -> 'b' */
+ { 0xD3, 0x62 }, /* RUNIC LETTER SHORT-TWIG-BJARKAN B -> 'b' */
+ { 0xD4, 0x70 }, /* RUNIC LETTER DOTTED-P -> 'p' */
+ { 0xD5, 0x70 }, /* RUNIC LETTER OPEN-P -> 'p' */
+ { 0xD6, 0x65 }, /* RUNIC LETTER EHWAZ EH E -> 'e' */
+ { 0xD7, 0x00 }, /* RUNIC LETTER MANNAZ MAN M -> ... */
+ { 0xD9, 0x6D }, /* RUNIC LETTER SHORT-TWIG-MADR M -> 'm' */
+ { 0xDA, 0x6C }, /* RUNIC LETTER LAUKAZ LAGU LOGR L -> 'l' */
+ { 0xDB, 0x6C }, /* RUNIC LETTER DOTTED-L -> 'l' */
+ { 0xDE, 0x64 }, /* RUNIC LETTER DAGAZ DAEG D -> 'd' */
+ { 0xDF, 0x6F }, /* RUNIC LETTER OTHALAN ETHEL O -> 'o' */
+ { 0xE5, 0x73 }, /* RUNIC LETTER STAN -> 's' */
+ { 0xE9, 0x71 }, /* RUNIC LETTER Q -> 'q' */
+ { 0xEA, 0x78 }, /* RUNIC LETTER X -> 'x' */
+ { 0xEB, 0x2E }, /* RUNIC SINGLE PUNCTUATION -> '.' */
+ { 0xEC, 0x3A }, /* RUNIC MULTIPLE PUNCTUATION -> ':' */
+ { 0xED, 0x2B }, /* RUNIC CROSS PUNCTUATION -> '+' */
+ /* Entries for page 0x17 */
+ { 0x80, 0x6B }, /* KHMER LETTER KA -> 'k' */
+ { 0x82, 0x67 }, /* KHMER LETTER KO -> 'g' */
+ { 0x85, 0x63 }, /* KHMER LETTER CA -> 'c' */
+ { 0x87, 0x6A }, /* KHMER LETTER CO -> 'j' */
+ { 0x8A, 0x74 }, /* KHMER LETTER DA -> 't' */
+ { 0x8C, 0x64 }, /* KHMER LETTER DO -> 'd' */
+ { 0x8F, 0x74 }, /* KHMER LETTER TA -> 't' */
+ { 0x91, 0x64 }, /* KHMER LETTER TO -> 'd' */
+ { 0x93, 0x6E }, /* KHMER LETTER NO -> 'n' */
+ { 0x94, 0x70 }, /* KHMER LETTER BA -> 'p' */
+ { 0x96, 0x62 }, /* KHMER LETTER PO -> 'b' */
+ { 0x98, 0x6D }, /* KHMER LETTER MO -> 'm' */
+ { 0x99, 0x79 }, /* KHMER LETTER YO -> 'y' */
+ { 0x9A, 0x72 }, /* KHMER LETTER RO -> 'r' */
+ { 0x9B, 0x6C }, /* KHMER LETTER LO -> 'l' */
+ { 0x9C, 0x76 }, /* KHMER LETTER VO -> 'v' */
+ { 0x9F, 0x73 }, /* KHMER LETTER SA -> 's' */
+ { 0xA0, 0x68 }, /* KHMER LETTER HA -> 'h' */
+ { 0xA1, 0x6C }, /* KHMER LETTER LA -> 'l' */
+ { 0xA2, 0x71 }, /* KHMER LETTER QA -> 'q' */
+ { 0xA3, 0x61 }, /* KHMER INDEPENDENT VOWEL QAQ -> 'a' */
+ { 0xA5, 0x69 }, /* KHMER INDEPENDENT VOWEL QI -> 'i' */
+ { 0xA7, 0x75 }, /* KHMER INDEPENDENT VOWEL QU -> 'u' */
+ { 0xAF, 0x65 }, /* KHMER INDEPENDENT VOWEL QE -> 'e' */
+ { 0xB4, 0x61 }, /* KHMER VOWEL INHERENT AQ -> 'a' */
+ { 0xB7, 0x69 }, /* KHMER VOWEL SIGN I -> 'i' */
+ { 0xB9, 0x79 }, /* KHMER VOWEL SIGN Y -> 'y' */
+ { 0xBB, 0x75 }, /* KHMER VOWEL SIGN U -> 'u' */
+ { 0xC1, 0x65 }, /* KHMER VOWEL SIGN E -> 'e' */
+ { 0xC6, 0x4D }, /* KHMER SIGN NIKAHIT -> 'M' */
+ { 0xC7, 0x48 }, /* KHMER SIGN REAHMUK -> 'H' */
+ { 0xCC, 0x72 }, /* KHMER SIGN ROBAT -> 'r' */
+ { 0xCE, 0x21 }, /* KHMER SIGN KAKABAT -> '!' */
+ { 0xD4, 0x2E }, /* KHMER SIGN KHAN -> '.' */
+ { 0xD6, 0x3A }, /* KHMER SIGN CAMNUC PII KUUH -> ':' */
+ { 0xD7, 0x2B }, /* KHMER SIGN LEK TOO -> '+' */
+ { 0xDC, 0x27 }, /* KHMER SIGN AVAKRAHASANYA -> ''' */
+ { 0xE0, 0x30 }, /* KHMER DIGIT ZERO -> '0' */
+ { 0xE1, 0x31 }, /* KHMER DIGIT ONE -> '1' */
+ { 0xE2, 0x32 }, /* KHMER DIGIT TWO -> '2' */
+ { 0xE3, 0x33 }, /* KHMER DIGIT THREE -> '3' */
+ { 0xE4, 0x34 }, /* KHMER DIGIT FOUR -> '4' */
+ { 0xE5, 0x35 }, /* KHMER DIGIT FIVE -> '5' */
+ { 0xE6, 0x36 }, /* KHMER DIGIT SIX -> '6' */
+ { 0xE7, 0x37 }, /* KHMER DIGIT SEVEN -> '7' */
+ { 0xE8, 0x38 }, /* KHMER DIGIT EIGHT -> '8' */
+ { 0xE9, 0x39 }, /* KHMER DIGIT NINE -> '9' */
+ /* Entries for page 0x18 */
+ { 0x07, 0x2D }, /* MONGOLIAN SIBE SYLLABLE BOUNDARY MARKER -> '-' */
+ { 0x10, 0x30 }, /* MONGOLIAN DIGIT ZERO -> '0' */
+ { 0x11, 0x31 }, /* MONGOLIAN DIGIT ONE -> '1' */
+ { 0x12, 0x32 }, /* MONGOLIAN DIGIT TWO -> '2' */
+ { 0x13, 0x33 }, /* MONGOLIAN DIGIT THREE -> '3' */
+ { 0x14, 0x34 }, /* MONGOLIAN DIGIT FOUR -> '4' */
+ { 0x15, 0x35 }, /* MONGOLIAN DIGIT FIVE -> '5' */
+ { 0x16, 0x36 }, /* MONGOLIAN DIGIT SIX -> '6' */
+ { 0x17, 0x37 }, /* MONGOLIAN DIGIT SEVEN -> '7' */
+ { 0x18, 0x38 }, /* MONGOLIAN DIGIT EIGHT -> '8' */
+ { 0x19, 0x39 }, /* MONGOLIAN DIGIT NINE -> '9' */
+ { 0x20, 0x61 }, /* MONGOLIAN LETTER A -> 'a' */
+ { 0x21, 0x65 }, /* MONGOLIAN LETTER E -> 'e' */
+ { 0x22, 0x69 }, /* MONGOLIAN LETTER I -> 'i' */
+ { 0x23, 0x6F }, /* MONGOLIAN LETTER O -> 'o' */
+ { 0x24, 0x75 }, /* MONGOLIAN LETTER U -> 'u' */
+ { 0x25, 0x4F }, /* MONGOLIAN LETTER OE -> 'O' */
+ { 0x26, 0x55 }, /* MONGOLIAN LETTER UE -> 'U' */
+ { 0x28, 0x6E }, /* MONGOLIAN LETTER NA -> 'n' */
+ { 0x2A, 0x62 }, /* MONGOLIAN LETTER BA -> 'b' */
+ { 0x2B, 0x70 }, /* MONGOLIAN LETTER PA -> 'p' */
+ { 0x2C, 0x71 }, /* MONGOLIAN LETTER QA -> 'q' */
+ { 0x2D, 0x67 }, /* MONGOLIAN LETTER GA -> 'g' */
+ { 0x2E, 0x6D }, /* MONGOLIAN LETTER MA -> 'm' */
+ { 0x2F, 0x6C }, /* MONGOLIAN LETTER LA -> 'l' */
+ { 0x30, 0x73 }, /* MONGOLIAN LETTER SA -> 's' */
+ { 0x32, 0x74 }, /* MONGOLIAN LETTER TA -> 't' */
+ { 0x33, 0x64 }, /* MONGOLIAN LETTER DA -> 'd' */
+ { 0x35, 0x6A }, /* MONGOLIAN LETTER JA -> 'j' */
+ { 0x36, 0x79 }, /* MONGOLIAN LETTER YA -> 'y' */
+ { 0x37, 0x72 }, /* MONGOLIAN LETTER RA -> 'r' */
+ { 0x38, 0x77 }, /* MONGOLIAN LETTER WA -> 'w' */
+ { 0x39, 0x66 }, /* MONGOLIAN LETTER FA -> 'f' */
+ { 0x3A, 0x6B }, /* MONGOLIAN LETTER KA -> 'k' */
+ { 0x3D, 0x7A }, /* MONGOLIAN LETTER ZA -> 'z' */
+ { 0x3E, 0x68 }, /* MONGOLIAN LETTER HAA -> 'h' */
+ { 0x43, 0x2D }, /* MONGOLIAN LETTER TODO LONG VOWEL SIGN -> '-' */
+ { 0x44, 0x65 }, /* MONGOLIAN LETTER TODO E -> 'e' */
+ { 0x45, 0x69 }, /* MONGOLIAN LETTER TODO I -> 'i' */
+ { 0x46, 0x6F }, /* MONGOLIAN LETTER TODO O -> 'o' */
+ { 0x47, 0x75 }, /* MONGOLIAN LETTER TODO U -> 'u' */
+ { 0x48, 0x4F }, /* MONGOLIAN LETTER TODO OE -> 'O' */
+ { 0x49, 0x55 }, /* MONGOLIAN LETTER TODO UE -> 'U' */
+ { 0x4B, 0x62 }, /* MONGOLIAN LETTER TODO BA -> 'b' */
+ { 0x4C, 0x70 }, /* MONGOLIAN LETTER TODO PA -> 'p' */
+ { 0x4D, 0x71 }, /* MONGOLIAN LETTER TODO QA -> 'q' */
+ { 0x4E, 0x67 }, /* MONGOLIAN LETTER TODO GA -> 'g' */
+ { 0x4F, 0x6D }, /* MONGOLIAN LETTER TODO MA -> 'm' */
+ { 0x50, 0x74 }, /* MONGOLIAN LETTER TODO TA -> 't' */
+ { 0x51, 0x64 }, /* MONGOLIAN LETTER TODO DA -> 'd' */
+ { 0x53, 0x6A }, /* MONGOLIAN LETTER TODO JA -> 'j' */
+ { 0x55, 0x79 }, /* MONGOLIAN LETTER TODO YA -> 'y' */
+ { 0x56, 0x77 }, /* MONGOLIAN LETTER TODO WA -> 'w' */
+ { 0x57, 0x6B }, /* MONGOLIAN LETTER TODO KA -> 'k' */
+ { 0x58, 0x67 }, /* MONGOLIAN LETTER TODO GAA -> 'g' */
+ { 0x59, 0x68 }, /* MONGOLIAN LETTER TODO HAA -> 'h' */
+ { 0x5D, 0x65 }, /* MONGOLIAN LETTER SIBE E -> 'e' */
+ { 0x5E, 0x69 }, /* MONGOLIAN LETTER SIBE I -> 'i' */
+ { 0x60, 0x55 }, /* MONGOLIAN LETTER SIBE UE -> 'U' */
+ { 0x61, 0x75 }, /* MONGOLIAN LETTER SIBE U -> 'u' */
+ { 0x63, 0x6B }, /* MONGOLIAN LETTER SIBE KA -> 'k' */
+ { 0x64, 0x67 }, /* MONGOLIAN LETTER SIBE GA -> 'g' */
+ { 0x65, 0x68 }, /* MONGOLIAN LETTER SIBE HA -> 'h' */
+ { 0x66, 0x70 }, /* MONGOLIAN LETTER SIBE PA -> 'p' */
+ { 0x68, 0x74 }, /* MONGOLIAN LETTER SIBE TA -> 't' */
+ { 0x69, 0x64 }, /* MONGOLIAN LETTER SIBE DA -> 'd' */
+ { 0x6A, 0x6A }, /* MONGOLIAN LETTER SIBE JA -> 'j' */
+ { 0x6B, 0x66 }, /* MONGOLIAN LETTER SIBE FA -> 'f' */
+ { 0x6C, 0x67 }, /* MONGOLIAN LETTER SIBE GAA -> 'g' */
+ { 0x6D, 0x68 }, /* MONGOLIAN LETTER SIBE HAA -> 'h' */
+ { 0x6F, 0x7A }, /* MONGOLIAN LETTER SIBE ZA -> 'z' */
+ { 0x70, 0x72 }, /* MONGOLIAN LETTER SIBE RAA -> 'r' */
+ { 0x73, 0x69 }, /* MONGOLIAN LETTER MANCHU I -> 'i' */
+ { 0x74, 0x6B }, /* MONGOLIAN LETTER MANCHU KA -> 'k' */
+ { 0x75, 0x72 }, /* MONGOLIAN LETTER MANCHU RA -> 'r' */
+ { 0x76, 0x66 }, /* MONGOLIAN LETTER MANCHU FA -> 'f' */
+ { 0x81, 0x48 }, /* MONGOLIAN LETTER ALI GALI VISARGA ONE -> 'H' */
+ { 0x82, 0x58 }, /* MONGOLIAN LETTER ALI GALI DAMARU -> 'X' */
+ { 0x83, 0x57 }, /* MONGOLIAN LETTER ALI GALI UBADAMA -> 'W' */
+ { 0x84, 0x4D }, /* MONGOLIAN LETTER ALI GALI INVERTED UBADAMA -> 'M' */
+ { 0x87, 0x61 }, /* MONGOLIAN LETTER ALI GALI A -> 'a' */
+ { 0x88, 0x69 }, /* MONGOLIAN LETTER ALI GALI I -> 'i' */
+ { 0x89, 0x6B }, /* MONGOLIAN LETTER ALI GALI KA -> 'k' */
+ { 0x8B, 0x63 }, /* MONGOLIAN LETTER ALI GALI CA -> 'c' */
+ { 0x90, 0x74 }, /* MONGOLIAN LETTER ALI GALI TA -> 't' */
+ { 0x91, 0x64 }, /* MONGOLIAN LETTER ALI GALI DA -> 'd' */
+ { 0x92, 0x70 }, /* MONGOLIAN LETTER ALI GALI PA -> 'p' */
+ { 0x96, 0x7A }, /* MONGOLIAN LETTER ALI GALI ZA -> 'z' */
+ { 0x97, 0x61 }, /* MONGOLIAN LETTER ALI GALI AH -> 'a' */
+ { 0x98, 0x74 }, /* MONGOLIAN LETTER TODO ALI GALI TA -> 't' */
+ { 0x9C, 0x63 }, /* MONGOLIAN LETTER MANCHU ALI GALI CA -> 'c' */
+ { 0xA0, 0x74 }, /* MONGOLIAN LETTER MANCHU ALI GALI TA -> 't' */
+ { 0xA5, 0x7A }, /* MONGOLIAN LETTER MANCHU ALI GALI ZA -> 'z' */
+ { 0xA6, 0x75 }, /* MONGOLIAN LETTER ALI GALI HALF U -> 'u' */
+ { 0xA7, 0x79 }, /* MONGOLIAN LETTER ALI GALI HALF YA -> 'y' */
+ { 0xA9, 0x27 }, /* MONGOLIAN LETTER ALI GALI DAGALGA -> ''' */
+ /* Entries for page 0x1D */
+ { 0x00, 0x41 }, /* LATIN LETTER SMALL CAPITAL A -> 'A' */
+ { 0x03, 0x42 }, /* LATIN LETTER SMALL CAPITAL BARRED B -> 'B' */
+ { 0x04, 0x43 }, /* LATIN LETTER SMALL CAPITAL C -> 'C' */
+ { 0x05, 0x44 }, /* LATIN LETTER SMALL CAPITAL D -> 'D' */
+ { 0x06, 0x44 }, /* LATIN LETTER SMALL CAPITAL ETH -> 'D' */
+ { 0x07, 0x45 }, /* LATIN LETTER SMALL CAPITAL E -> 'E' */
+ { 0x08, 0x65 }, /* LATIN SMALL LETTER TURNED OPEN E -> 'e' */
+ { 0x09, 0x69 }, /* LATIN SMALL LETTER TURNED I -> 'i' */
+ { 0x0A, 0x4A }, /* LATIN LETTER SMALL CAPITAL J -> 'J' */
+ { 0x0B, 0x4B }, /* LATIN LETTER SMALL CAPITAL K -> 'K' */
+ { 0x0C, 0x4C }, /* LATIN LETTER SMALL CAPITAL L WITH STROKE -> 'L' */
+ { 0x0D, 0x4D }, /* LATIN LETTER SMALL CAPITAL M -> 'M' */
+ { 0x0E, 0x4E }, /* LATIN LETTER SMALL CAPITAL REVERSED N -> 'N' */
+ { 0x0F, 0x4F }, /* LATIN LETTER SMALL CAPITAL O -> 'O' */
+ { 0x11, 0x4F }, /* LATIN SMALL LETTER SIDEWAYS O -> 'O' */
+ { 0x13, 0x4F }, /* LATIN SMALL LETTER SIDEWAYS O WITH STROKE -> 'O' */
+ { 0x18, 0x50 }, /* LATIN LETTER SMALL CAPITAL P -> 'P' */
+ { 0x19, 0x52 }, /* LATIN LETTER SMALL CAPITAL REVERSED R -> 'R' */
+ { 0x1A, 0x52 }, /* LATIN LETTER SMALL CAPITAL TURNED R -> 'R' */
+ { 0x1B, 0x54 }, /* LATIN LETTER SMALL CAPITAL T -> 'T' */
+ { 0x1C, 0x55 }, /* LATIN LETTER SMALL CAPITAL U -> 'U' */
+ { 0x1D, 0x75 }, /* LATIN SMALL LETTER SIDEWAYS U -> 'u' */
+ { 0x1E, 0x75 }, /* LATIN SMALL LETTER SIDEWAYS DIAERESIZED U -> 'u' */
+ { 0x1F, 0x6D }, /* LATIN SMALL LETTER SIDEWAYS TURNED M -> 'm' */
+ { 0x20, 0x56 }, /* LATIN LETTER SMALL CAPITAL V -> 'V' */
+ { 0x21, 0x57 }, /* LATIN LETTER SMALL CAPITAL W -> 'W' */
+ { 0x22, 0x5A }, /* LATIN LETTER SMALL CAPITAL Z -> 'Z' */
+ { 0x2C, 0x41 }, /* MODIFIER LETTER CAPITAL A -> 'A' */
+ { 0x2E, 0x42 }, /* MODIFIER LETTER CAPITAL B -> 'B' */
+ { 0x2F, 0x42 }, /* MODIFIER LETTER CAPITAL BARRED B -> 'B' */
+ { 0x30, 0x44 }, /* MODIFIER LETTER CAPITAL D -> 'D' */
+ { 0x31, 0x45 }, /* MODIFIER LETTER CAPITAL E -> 'E' */
+ { 0x32, 0x45 }, /* MODIFIER LETTER CAPITAL REVERSED E -> 'E' */
+ { 0x33, 0x47 }, /* MODIFIER LETTER CAPITAL G -> 'G' */
+ { 0x34, 0x48 }, /* MODIFIER LETTER CAPITAL H -> 'H' */
+ { 0x35, 0x49 }, /* MODIFIER LETTER CAPITAL I -> 'I' */
+ { 0x36, 0x4A }, /* MODIFIER LETTER CAPITAL J -> 'J' */
+ { 0x37, 0x4B }, /* MODIFIER LETTER CAPITAL K -> 'K' */
+ { 0x38, 0x4C }, /* MODIFIER LETTER CAPITAL L -> 'L' */
+ { 0x39, 0x4D }, /* MODIFIER LETTER CAPITAL M -> 'M' */
+ { 0x3A, 0x4E }, /* MODIFIER LETTER CAPITAL N -> 'N' */
+ { 0x3B, 0x4E }, /* MODIFIER LETTER CAPITAL REVERSED N -> 'N' */
+ { 0x3C, 0x4F }, /* MODIFIER LETTER CAPITAL O -> 'O' */
+ { 0x3E, 0x50 }, /* MODIFIER LETTER CAPITAL P -> 'P' */
+ { 0x3F, 0x52 }, /* MODIFIER LETTER CAPITAL R -> 'R' */
+ { 0x40, 0x54 }, /* MODIFIER LETTER CAPITAL T -> 'T' */
+ { 0x41, 0x55 }, /* MODIFIER LETTER CAPITAL U -> 'U' */
+ { 0x42, 0x57 }, /* MODIFIER LETTER CAPITAL W -> 'W' */
+ { 0x43, 0x00 }, /* MODIFIER LETTER SMALL A -> ... */
+ { 0x45, 0x61 }, /* MODIFIER LETTER SMALL ALPHA -> 'a' */
+ { 0x47, 0x62 }, /* MODIFIER LETTER SMALL B -> 'b' */
+ { 0x48, 0x64 }, /* MODIFIER LETTER SMALL D -> 'd' */
+ { 0x49, 0x65 }, /* MODIFIER LETTER SMALL E -> 'e' */
+ { 0x4B, 0x65 }, /* MODIFIER LETTER SMALL OPEN E -> 'e' */
+ { 0x4C, 0x65 }, /* MODIFIER LETTER SMALL TURNED OPEN E -> 'e' */
+ { 0x4D, 0x67 }, /* MODIFIER LETTER SMALL G -> 'g' */
+ { 0x4E, 0x69 }, /* MODIFIER LETTER SMALL TURNED I -> 'i' */
+ { 0x4F, 0x6B }, /* MODIFIER LETTER SMALL K -> 'k' */
+ { 0x50, 0x6D }, /* MODIFIER LETTER SMALL M -> 'm' */
+ { 0x52, 0x6F }, /* MODIFIER LETTER SMALL O -> 'o' */
+ { 0x56, 0x70 }, /* MODIFIER LETTER SMALL P -> 'p' */
+ { 0x57, 0x74 }, /* MODIFIER LETTER SMALL T -> 't' */
+ { 0x58, 0x75 }, /* MODIFIER LETTER SMALL U -> 'u' */
+ { 0x59, 0x75 }, /* MODIFIER LETTER SMALL SIDEWAYS U -> 'u' */
+ { 0x5A, 0x6D }, /* MODIFIER LETTER SMALL TURNED M -> 'm' */
+ { 0x5B, 0x76 }, /* MODIFIER LETTER SMALL V -> 'v' */
+ { 0x5D, 0x62 }, /* MODIFIER LETTER SMALL BETA -> 'b' */
+ { 0x5E, 0x67 }, /* MODIFIER LETTER SMALL GREEK GAMMA -> 'g' */
+ { 0x5F, 0x64 }, /* MODIFIER LETTER SMALL DELTA -> 'd' */
+ { 0x60, 0x66 }, /* MODIFIER LETTER SMALL GREEK PHI -> 'f' */
+ { 0x62, 0x69 }, /* LATIN SUBSCRIPT SMALL LETTER I -> 'i' */
+ { 0x63, 0x72 }, /* LATIN SUBSCRIPT SMALL LETTER R -> 'r' */
+ { 0x64, 0x75 }, /* LATIN SUBSCRIPT SMALL LETTER U -> 'u' */
+ { 0x65, 0x76 }, /* LATIN SUBSCRIPT SMALL LETTER V -> 'v' */
+ { 0x66, 0x62 }, /* GREEK SUBSCRIPT SMALL LETTER BETA -> 'b' */
+ { 0x67, 0x67 }, /* GREEK SUBSCRIPT SMALL LETTER GAMMA -> 'g' */
+ { 0x68, 0x72 }, /* GREEK SUBSCRIPT SMALL LETTER RHO -> 'r' */
+ { 0x69, 0x66 }, /* GREEK SUBSCRIPT SMALL LETTER PHI -> 'f' */
+ { 0x6C, 0x62 }, /* LATIN SMALL LETTER B WITH MIDDLE TILDE -> 'b' */
+ { 0x6D, 0x64 }, /* LATIN SMALL LETTER D WITH MIDDLE TILDE -> 'd' */
+ { 0x6E, 0x66 }, /* LATIN SMALL LETTER F WITH MIDDLE TILDE -> 'f' */
+ { 0x6F, 0x6D }, /* LATIN SMALL LETTER M WITH MIDDLE TILDE -> 'm' */
+ { 0x70, 0x6E }, /* LATIN SMALL LETTER N WITH MIDDLE TILDE -> 'n' */
+ { 0x71, 0x70 }, /* LATIN SMALL LETTER P WITH MIDDLE TILDE -> 'p' */
+ { 0x72, 0x72 }, /* LATIN SMALL LETTER R WITH MIDDLE TILDE -> 'r' */
+ { 0x73, 0x72 }, /* LATIN SMALL LETTER R WITH FISHHOOK AND MIDDLE TILDE -> 'r' */
+ { 0x74, 0x73 }, /* LATIN SMALL LETTER S WITH MIDDLE TILDE -> 's' */
+ { 0x75, 0x74 }, /* LATIN SMALL LETTER T WITH MIDDLE TILDE -> 't' */
+ { 0x76, 0x7A }, /* LATIN SMALL LETTER Z WITH MIDDLE TILDE -> 'z' */
+ { 0x77, 0x67 }, /* LATIN SMALL LETTER TURNED G -> 'g' */
+ { 0x7D, 0x70 }, /* LATIN SMALL LETTER P WITH STROKE -> 'p' */
+ { 0x80, 0x62 }, /* LATIN SMALL LETTER B WITH PALATAL HOOK -> 'b' */
+ { 0x81, 0x64 }, /* LATIN SMALL LETTER D WITH PALATAL HOOK -> 'd' */
+ { 0x82, 0x66 }, /* LATIN SMALL LETTER F WITH PALATAL HOOK -> 'f' */
+ { 0x83, 0x67 }, /* LATIN SMALL LETTER G WITH PALATAL HOOK -> 'g' */
+ { 0x84, 0x6B }, /* LATIN SMALL LETTER K WITH PALATAL HOOK -> 'k' */
+ { 0x85, 0x6C }, /* LATIN SMALL LETTER L WITH PALATAL HOOK -> 'l' */
+ { 0x86, 0x6D }, /* LATIN SMALL LETTER M WITH PALATAL HOOK -> 'm' */
+ { 0x87, 0x6E }, /* LATIN SMALL LETTER N WITH PALATAL HOOK -> 'n' */
+ { 0x88, 0x70 }, /* LATIN SMALL LETTER P WITH PALATAL HOOK -> 'p' */
+ { 0x89, 0x72 }, /* LATIN SMALL LETTER R WITH PALATAL HOOK -> 'r' */
+ { 0x8A, 0x73 }, /* LATIN SMALL LETTER S WITH PALATAL HOOK -> 's' */
+ { 0x8C, 0x76 }, /* LATIN SMALL LETTER V WITH PALATAL HOOK -> 'v' */
+ { 0x8D, 0x78 }, /* LATIN SMALL LETTER X WITH PALATAL HOOK -> 'x' */
+ { 0x8E, 0x7A }, /* LATIN SMALL LETTER Z WITH PALATAL HOOK -> 'z' */
+ /* Entries for page 0x1E */
+ { 0x00, 0x41 }, /* LATIN CAPITAL LETTER A WITH RING BELOW -> 'A' */
+ { 0x01, 0x61 }, /* LATIN SMALL LETTER A WITH RING BELOW -> 'a' */
+ { 0x02, 0x42 }, /* LATIN CAPITAL LETTER B WITH DOT ABOVE -> 'B' */
+ { 0x03, 0x62 }, /* LATIN SMALL LETTER B WITH DOT ABOVE -> 'b' */
+ { 0x04, 0x42 }, /* LATIN CAPITAL LETTER B WITH DOT BELOW -> 'B' */
+ { 0x05, 0x62 }, /* LATIN SMALL LETTER B WITH DOT BELOW -> 'b' */
+ { 0x06, 0x42 }, /* LATIN CAPITAL LETTER B WITH LINE BELOW -> 'B' */
+ { 0x07, 0x62 }, /* LATIN SMALL LETTER B WITH LINE BELOW -> 'b' */
+ { 0x08, 0x43 }, /* LATIN CAPITAL LETTER C WITH CEDILLA AND ACUTE -> 'C' */
+ { 0x09, 0x63 }, /* LATIN SMALL LETTER C WITH CEDILLA AND ACUTE -> 'c' */
+ { 0x0A, 0x44 }, /* LATIN CAPITAL LETTER D WITH DOT ABOVE -> 'D' */
+ { 0x0B, 0x64 }, /* LATIN SMALL LETTER D WITH DOT ABOVE -> 'd' */
+ { 0x0C, 0x44 }, /* LATIN CAPITAL LETTER D WITH DOT BELOW -> 'D' */
+ { 0x0D, 0x64 }, /* LATIN SMALL LETTER D WITH DOT BELOW -> 'd' */
+ { 0x0E, 0x44 }, /* LATIN CAPITAL LETTER D WITH LINE BELOW -> 'D' */
+ { 0x0F, 0x64 }, /* LATIN SMALL LETTER D WITH LINE BELOW -> 'd' */
+ { 0x10, 0x44 }, /* LATIN CAPITAL LETTER D WITH CEDILLA -> 'D' */
+ { 0x11, 0x64 }, /* LATIN SMALL LETTER D WITH CEDILLA -> 'd' */
+ { 0x12, 0x44 }, /* LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW -> 'D' */
+ { 0x13, 0x64 }, /* LATIN SMALL LETTER D WITH CIRCUMFLEX BELOW -> 'd' */
+ { 0x14, 0x45 }, /* LATIN CAPITAL LETTER E WITH MACRON AND GRAVE -> 'E' */
+ { 0x15, 0x65 }, /* LATIN SMALL LETTER E WITH MACRON AND GRAVE -> 'e' */
+ { 0x16, 0x45 }, /* LATIN CAPITAL LETTER E WITH MACRON AND ACUTE -> 'E' */
+ { 0x17, 0x65 }, /* LATIN SMALL LETTER E WITH MACRON AND ACUTE -> 'e' */
+ { 0x18, 0x45 }, /* LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW -> 'E' */
+ { 0x19, 0x65 }, /* LATIN SMALL LETTER E WITH CIRCUMFLEX BELOW -> 'e' */
+ { 0x1A, 0x45 }, /* LATIN CAPITAL LETTER E WITH TILDE BELOW -> 'E' */
+ { 0x1B, 0x65 }, /* LATIN SMALL LETTER E WITH TILDE BELOW -> 'e' */
+ { 0x1C, 0x45 }, /* LATIN CAPITAL LETTER E WITH CEDILLA AND BREVE -> 'E' */
+ { 0x1D, 0x65 }, /* LATIN SMALL LETTER E WITH CEDILLA AND BREVE -> 'e' */
+ { 0x1E, 0x46 }, /* LATIN CAPITAL LETTER F WITH DOT ABOVE -> 'F' */
+ { 0x1F, 0x66 }, /* LATIN SMALL LETTER F WITH DOT ABOVE -> 'f' */
+ { 0x20, 0x47 }, /* LATIN CAPITAL LETTER G WITH MACRON -> 'G' */
+ { 0x21, 0x67 }, /* LATIN SMALL LETTER G WITH MACRON -> 'g' */
+ { 0x22, 0x48 }, /* LATIN CAPITAL LETTER H WITH DOT ABOVE -> 'H' */
+ { 0x23, 0x68 }, /* LATIN SMALL LETTER H WITH DOT ABOVE -> 'h' */
+ { 0x24, 0x48 }, /* LATIN CAPITAL LETTER H WITH DOT BELOW -> 'H' */
+ { 0x25, 0x68 }, /* LATIN SMALL LETTER H WITH DOT BELOW -> 'h' */
+ { 0x26, 0x48 }, /* LATIN CAPITAL LETTER H WITH DIAERESIS -> 'H' */
+ { 0x27, 0x68 }, /* LATIN SMALL LETTER H WITH DIAERESIS -> 'h' */
+ { 0x28, 0x48 }, /* LATIN CAPITAL LETTER H WITH CEDILLA -> 'H' */
+ { 0x29, 0x68 }, /* LATIN SMALL LETTER H WITH CEDILLA -> 'h' */
+ { 0x2A, 0x48 }, /* LATIN CAPITAL LETTER H WITH BREVE BELOW -> 'H' */
+ { 0x2B, 0x68 }, /* LATIN SMALL LETTER H WITH BREVE BELOW -> 'h' */
+ { 0x2C, 0x49 }, /* LATIN CAPITAL LETTER I WITH TILDE BELOW -> 'I' */
+ { 0x2D, 0x69 }, /* LATIN SMALL LETTER I WITH TILDE BELOW -> 'i' */
+ { 0x2E, 0x49 }, /* LATIN CAPITAL LETTER I WITH DIAERESIS AND ACUTE -> 'I' */
+ { 0x2F, 0x69 }, /* LATIN SMALL LETTER I WITH DIAERESIS AND ACUTE -> 'i' */
+ { 0x30, 0x4B }, /* LATIN CAPITAL LETTER K WITH ACUTE -> 'K' */
+ { 0x31, 0x6B }, /* LATIN SMALL LETTER K WITH ACUTE -> 'k' */
+ { 0x32, 0x4B }, /* LATIN CAPITAL LETTER K WITH DOT BELOW -> 'K' */
+ { 0x33, 0x6B }, /* LATIN SMALL LETTER K WITH DOT BELOW -> 'k' */
+ { 0x34, 0x4B }, /* LATIN CAPITAL LETTER K WITH LINE BELOW -> 'K' */
+ { 0x35, 0x6B }, /* LATIN SMALL LETTER K WITH LINE BELOW -> 'k' */
+ { 0x36, 0x4C }, /* LATIN CAPITAL LETTER L WITH DOT BELOW -> 'L' */
+ { 0x37, 0x6C }, /* LATIN SMALL LETTER L WITH DOT BELOW -> 'l' */
+ { 0x38, 0x4C }, /* LATIN CAPITAL LETTER L WITH DOT BELOW AND MACRON -> 'L' */
+ { 0x39, 0x6C }, /* LATIN SMALL LETTER L WITH DOT BELOW AND MACRON -> 'l' */
+ { 0x3A, 0x4C }, /* LATIN CAPITAL LETTER L WITH LINE BELOW -> 'L' */
+ { 0x3B, 0x6C }, /* LATIN SMALL LETTER L WITH LINE BELOW -> 'l' */
+ { 0x3C, 0x4C }, /* LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW -> 'L' */
+ { 0x3D, 0x6C }, /* LATIN SMALL LETTER L WITH CIRCUMFLEX BELOW -> 'l' */
+ { 0x3E, 0x4D }, /* LATIN CAPITAL LETTER M WITH ACUTE -> 'M' */
+ { 0x3F, 0x6D }, /* LATIN SMALL LETTER M WITH ACUTE -> 'm' */
+ { 0x40, 0x4D }, /* LATIN CAPITAL LETTER M WITH DOT ABOVE -> 'M' */
+ { 0x41, 0x6D }, /* LATIN SMALL LETTER M WITH DOT ABOVE -> 'm' */
+ { 0x42, 0x4D }, /* LATIN CAPITAL LETTER M WITH DOT BELOW -> 'M' */
+ { 0x43, 0x6D }, /* LATIN SMALL LETTER M WITH DOT BELOW -> 'm' */
+ { 0x44, 0x4E }, /* LATIN CAPITAL LETTER N WITH DOT ABOVE -> 'N' */
+ { 0x45, 0x6E }, /* LATIN SMALL LETTER N WITH DOT ABOVE -> 'n' */
+ { 0x46, 0x4E }, /* LATIN CAPITAL LETTER N WITH DOT BELOW -> 'N' */
+ { 0x47, 0x6E }, /* LATIN SMALL LETTER N WITH DOT BELOW -> 'n' */
+ { 0x48, 0x4E }, /* LATIN CAPITAL LETTER N WITH LINE BELOW -> 'N' */
+ { 0x49, 0x6E }, /* LATIN SMALL LETTER N WITH LINE BELOW -> 'n' */
+ { 0x4A, 0x4E }, /* LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW -> 'N' */
+ { 0x4B, 0x6E }, /* LATIN SMALL LETTER N WITH CIRCUMFLEX BELOW -> 'n' */
+ { 0x4C, 0x4F }, /* LATIN CAPITAL LETTER O WITH TILDE AND ACUTE -> 'O' */
+ { 0x4D, 0x6F }, /* LATIN SMALL LETTER O WITH TILDE AND ACUTE -> 'o' */
+ { 0x4E, 0x4F }, /* LATIN CAPITAL LETTER O WITH TILDE AND DIAERESIS -> 'O' */
+ { 0x4F, 0x6F }, /* LATIN SMALL LETTER O WITH TILDE AND DIAERESIS -> 'o' */
+ { 0x50, 0x4F }, /* LATIN CAPITAL LETTER O WITH MACRON AND GRAVE -> 'O' */
+ { 0x51, 0x6F }, /* LATIN SMALL LETTER O WITH MACRON AND GRAVE -> 'o' */
+ { 0x52, 0x4F }, /* LATIN CAPITAL LETTER O WITH MACRON AND ACUTE -> 'O' */
+ { 0x53, 0x6F }, /* LATIN SMALL LETTER O WITH MACRON AND ACUTE -> 'o' */
+ { 0x54, 0x50 }, /* LATIN CAPITAL LETTER P WITH ACUTE -> 'P' */
+ { 0x55, 0x70 }, /* LATIN SMALL LETTER P WITH ACUTE -> 'p' */
+ { 0x56, 0x50 }, /* LATIN CAPITAL LETTER P WITH DOT ABOVE -> 'P' */
+ { 0x57, 0x70 }, /* LATIN SMALL LETTER P WITH DOT ABOVE -> 'p' */
+ { 0x58, 0x52 }, /* LATIN CAPITAL LETTER R WITH DOT ABOVE -> 'R' */
+ { 0x59, 0x72 }, /* LATIN SMALL LETTER R WITH DOT ABOVE -> 'r' */
+ { 0x5A, 0x52 }, /* LATIN CAPITAL LETTER R WITH DOT BELOW -> 'R' */
+ { 0x5B, 0x72 }, /* LATIN SMALL LETTER R WITH DOT BELOW -> 'r' */
+ { 0x5C, 0x52 }, /* LATIN CAPITAL LETTER R WITH DOT BELOW AND MACRON -> 'R' */
+ { 0x5D, 0x72 }, /* LATIN SMALL LETTER R WITH DOT BELOW AND MACRON -> 'r' */
+ { 0x5E, 0x52 }, /* LATIN CAPITAL LETTER R WITH LINE BELOW -> 'R' */
+ { 0x5F, 0x72 }, /* LATIN SMALL LETTER R WITH LINE BELOW -> 'r' */
+ { 0x60, 0x53 }, /* LATIN CAPITAL LETTER S WITH DOT ABOVE -> 'S' */
+ { 0x61, 0x73 }, /* LATIN SMALL LETTER S WITH DOT ABOVE -> 's' */
+ { 0x62, 0x53 }, /* LATIN CAPITAL LETTER S WITH DOT BELOW -> 'S' */
+ { 0x63, 0x73 }, /* LATIN SMALL LETTER S WITH DOT BELOW -> 's' */
+ { 0x64, 0x53 }, /* LATIN CAPITAL LETTER S WITH ACUTE AND DOT ABOVE -> 'S' */
+ { 0x65, 0x73 }, /* LATIN SMALL LETTER S WITH ACUTE AND DOT ABOVE -> 's' */
+ { 0x66, 0x53 }, /* LATIN CAPITAL LETTER S WITH CARON AND DOT ABOVE -> 'S' */
+ { 0x67, 0x73 }, /* LATIN SMALL LETTER S WITH CARON AND DOT ABOVE -> 's' */
+ { 0x68, 0x53 }, /* LATIN CAPITAL LETTER S WITH DOT BELOW AND DOT ABOVE -> 'S' */
+ { 0x69, 0x73 }, /* LATIN SMALL LETTER S WITH DOT BELOW AND DOT ABOVE -> 's' */
+ { 0x6A, 0x54 }, /* LATIN CAPITAL LETTER T WITH DOT ABOVE -> 'T' */
+ { 0x6B, 0x74 }, /* LATIN SMALL LETTER T WITH DOT ABOVE -> 't' */
+ { 0x6C, 0x54 }, /* LATIN CAPITAL LETTER T WITH DOT BELOW -> 'T' */
+ { 0x6D, 0x74 }, /* LATIN SMALL LETTER T WITH DOT BELOW -> 't' */
+ { 0x6E, 0x54 }, /* LATIN CAPITAL LETTER T WITH LINE BELOW -> 'T' */
+ { 0x6F, 0x74 }, /* LATIN SMALL LETTER T WITH LINE BELOW -> 't' */
+ { 0x70, 0x54 }, /* LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW -> 'T' */
+ { 0x71, 0x74 }, /* LATIN SMALL LETTER T WITH CIRCUMFLEX BELOW -> 't' */
+ { 0x72, 0x55 }, /* LATIN CAPITAL LETTER U WITH DIAERESIS BELOW -> 'U' */
+ { 0x73, 0x75 }, /* LATIN SMALL LETTER U WITH DIAERESIS BELOW -> 'u' */
+ { 0x74, 0x55 }, /* LATIN CAPITAL LETTER U WITH TILDE BELOW -> 'U' */
+ { 0x75, 0x75 }, /* LATIN SMALL LETTER U WITH TILDE BELOW -> 'u' */
+ { 0x76, 0x55 }, /* LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW -> 'U' */
+ { 0x77, 0x75 }, /* LATIN SMALL LETTER U WITH CIRCUMFLEX BELOW -> 'u' */
+ { 0x78, 0x55 }, /* LATIN CAPITAL LETTER U WITH TILDE AND ACUTE -> 'U' */
+ { 0x79, 0x75 }, /* LATIN SMALL LETTER U WITH TILDE AND ACUTE -> 'u' */
+ { 0x7A, 0x55 }, /* LATIN CAPITAL LETTER U WITH MACRON AND DIAERESIS -> 'U' */
+ { 0x7B, 0x75 }, /* LATIN SMALL LETTER U WITH MACRON AND DIAERESIS -> 'u' */
+ { 0x7C, 0x56 }, /* LATIN CAPITAL LETTER V WITH TILDE -> 'V' */
+ { 0x7D, 0x76 }, /* LATIN SMALL LETTER V WITH TILDE -> 'v' */
+ { 0x7E, 0x56 }, /* LATIN CAPITAL LETTER V WITH DOT BELOW -> 'V' */
+ { 0x7F, 0x76 }, /* LATIN SMALL LETTER V WITH DOT BELOW -> 'v' */
+ { 0x80, 0x57 }, /* LATIN CAPITAL LETTER W WITH GRAVE -> 'W' */
+ { 0x81, 0x77 }, /* LATIN SMALL LETTER W WITH GRAVE -> 'w' */
+ { 0x82, 0x57 }, /* LATIN CAPITAL LETTER W WITH ACUTE -> 'W' */
+ { 0x83, 0x77 }, /* LATIN SMALL LETTER W WITH ACUTE -> 'w' */
+ { 0x84, 0x57 }, /* LATIN CAPITAL LETTER W WITH DIAERESIS -> 'W' */
+ { 0x85, 0x77 }, /* LATIN SMALL LETTER W WITH DIAERESIS -> 'w' */
+ { 0x86, 0x57 }, /* LATIN CAPITAL LETTER W WITH DOT ABOVE -> 'W' */
+ { 0x87, 0x77 }, /* LATIN SMALL LETTER W WITH DOT ABOVE -> 'w' */
+ { 0x88, 0x57 }, /* LATIN CAPITAL LETTER W WITH DOT BELOW -> 'W' */
+ { 0x89, 0x77 }, /* LATIN SMALL LETTER W WITH DOT BELOW -> 'w' */
+ { 0x8A, 0x58 }, /* LATIN CAPITAL LETTER X WITH DOT ABOVE -> 'X' */
+ { 0x8B, 0x78 }, /* LATIN SMALL LETTER X WITH DOT ABOVE -> 'x' */
+ { 0x8C, 0x58 }, /* LATIN CAPITAL LETTER X WITH DIAERESIS -> 'X' */
+ { 0x8D, 0x78 }, /* LATIN SMALL LETTER X WITH DIAERESIS -> 'x' */
+ { 0x8E, 0x59 }, /* LATIN CAPITAL LETTER Y WITH DOT ABOVE -> 'Y' */
+ { 0x8F, 0x79 }, /* LATIN SMALL LETTER Y WITH DOT ABOVE -> 'y' */
+ { 0x90, 0x5A }, /* LATIN CAPITAL LETTER Z WITH CIRCUMFLEX -> 'Z' */
+ { 0x91, 0x7A }, /* LATIN SMALL LETTER Z WITH CIRCUMFLEX -> 'z' */
+ { 0x92, 0x5A }, /* LATIN CAPITAL LETTER Z WITH DOT BELOW -> 'Z' */
+ { 0x93, 0x7A }, /* LATIN SMALL LETTER Z WITH DOT BELOW -> 'z' */
+ { 0x94, 0x5A }, /* LATIN CAPITAL LETTER Z WITH LINE BELOW -> 'Z' */
+ { 0x95, 0x7A }, /* LATIN SMALL LETTER Z WITH LINE BELOW -> 'z' */
+ { 0x96, 0x68 }, /* LATIN SMALL LETTER H WITH LINE BELOW -> 'h' */
+ { 0x97, 0x74 }, /* LATIN SMALL LETTER T WITH DIAERESIS -> 't' */
+ { 0x98, 0x77 }, /* LATIN SMALL LETTER W WITH RING ABOVE -> 'w' */
+ { 0x99, 0x79 }, /* LATIN SMALL LETTER Y WITH RING ABOVE -> 'y' */
+ { 0x9A, 0x61 }, /* LATIN SMALL LETTER A WITH RIGHT HALF RING -> 'a' */
+ { 0x9B, 0x53 }, /* LATIN SMALL LETTER LONG S WITH DOT ABOVE -> 'S' */
+ { 0xA0, 0x41 }, /* LATIN CAPITAL LETTER A WITH DOT BELOW -> 'A' */
+ { 0xA1, 0x61 }, /* LATIN SMALL LETTER A WITH DOT BELOW -> 'a' */
+ { 0xA2, 0x41 }, /* LATIN CAPITAL LETTER A WITH HOOK ABOVE -> 'A' */
+ { 0xA3, 0x61 }, /* LATIN SMALL LETTER A WITH HOOK ABOVE -> 'a' */
+ { 0xA4, 0x41 }, /* LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND ACUTE -> 'A' */
+ { 0xA5, 0x61 }, /* LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACUTE -> 'a' */
+ { 0xA6, 0x41 }, /* LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND GRAVE -> 'A' */
+ { 0xA7, 0x61 }, /* LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRAVE -> 'a' */
+ { 0xA8, 0x41 }, /* LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE -> 'A' */
+ { 0xA9, 0x61 }, /* LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE -> 'a' */
+ { 0xAA, 0x41 }, /* LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND TILDE -> 'A' */
+ { 0xAB, 0x61 }, /* LATIN SMALL LETTER A WITH CIRCUMFLEX AND TILDE -> 'a' */
+ { 0xAC, 0x41 }, /* LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND DOT BELOW -> 'A' */
+ { 0xAD, 0x61 }, /* LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT BELOW -> 'a' */
+ { 0xAE, 0x41 }, /* LATIN CAPITAL LETTER A WITH BREVE AND ACUTE -> 'A' */
+ { 0xAF, 0x61 }, /* LATIN SMALL LETTER A WITH BREVE AND ACUTE -> 'a' */
+ { 0xB0, 0x41 }, /* LATIN CAPITAL LETTER A WITH BREVE AND GRAVE -> 'A' */
+ { 0xB1, 0x61 }, /* LATIN SMALL LETTER A WITH BREVE AND GRAVE -> 'a' */
+ { 0xB2, 0x41 }, /* LATIN CAPITAL LETTER A WITH BREVE AND HOOK ABOVE -> 'A' */
+ { 0xB3, 0x61 }, /* LATIN SMALL LETTER A WITH BREVE AND HOOK ABOVE -> 'a' */
+ { 0xB4, 0x41 }, /* LATIN CAPITAL LETTER A WITH BREVE AND TILDE -> 'A' */
+ { 0xB5, 0x61 }, /* LATIN SMALL LETTER A WITH BREVE AND TILDE -> 'a' */
+ { 0xB6, 0x41 }, /* LATIN CAPITAL LETTER A WITH BREVE AND DOT BELOW -> 'A' */
+ { 0xB7, 0x61 }, /* LATIN SMALL LETTER A WITH BREVE AND DOT BELOW -> 'a' */
+ { 0xB8, 0x45 }, /* LATIN CAPITAL LETTER E WITH DOT BELOW -> 'E' */
+ { 0xB9, 0x65 }, /* LATIN SMALL LETTER E WITH DOT BELOW -> 'e' */
+ { 0xBA, 0x45 }, /* LATIN CAPITAL LETTER E WITH HOOK ABOVE -> 'E' */
+ { 0xBB, 0x65 }, /* LATIN SMALL LETTER E WITH HOOK ABOVE -> 'e' */
+ { 0xBC, 0x45 }, /* LATIN CAPITAL LETTER E WITH TILDE -> 'E' */
+ { 0xBD, 0x65 }, /* LATIN SMALL LETTER E WITH TILDE -> 'e' */
+ { 0xBE, 0x45 }, /* LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND ACUTE -> 'E' */
+ { 0xBF, 0x65 }, /* LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACUTE -> 'e' */
+ { 0xC0, 0x45 }, /* LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND GRAVE -> 'E' */
+ { 0xC1, 0x65 }, /* LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRAVE -> 'e' */
+ { 0xC2, 0x45 }, /* LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE -> 'E' */
+ { 0xC3, 0x65 }, /* LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE -> 'e' */
+ { 0xC4, 0x45 }, /* LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND TILDE -> 'E' */
+ { 0xC5, 0x65 }, /* LATIN SMALL LETTER E WITH CIRCUMFLEX AND TILDE -> 'e' */
+ { 0xC6, 0x45 }, /* LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND DOT BELOW -> 'E' */
+ { 0xC7, 0x65 }, /* LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT BELOW -> 'e' */
+ { 0xC8, 0x49 }, /* LATIN CAPITAL LETTER I WITH HOOK ABOVE -> 'I' */
+ { 0xC9, 0x69 }, /* LATIN SMALL LETTER I WITH HOOK ABOVE -> 'i' */
+ { 0xCA, 0x49 }, /* LATIN CAPITAL LETTER I WITH DOT BELOW -> 'I' */
+ { 0xCB, 0x69 }, /* LATIN SMALL LETTER I WITH DOT BELOW -> 'i' */
+ { 0xCC, 0x4F }, /* LATIN CAPITAL LETTER O WITH DOT BELOW -> 'O' */
+ { 0xCD, 0x6F }, /* LATIN SMALL LETTER O WITH DOT BELOW -> 'o' */
+ { 0xCE, 0x4F }, /* LATIN CAPITAL LETTER O WITH HOOK ABOVE -> 'O' */
+ { 0xCF, 0x6F }, /* LATIN SMALL LETTER O WITH HOOK ABOVE -> 'o' */
+ { 0xD0, 0x4F }, /* LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND ACUTE -> 'O' */
+ { 0xD1, 0x6F }, /* LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACUTE -> 'o' */
+ { 0xD2, 0x4F }, /* LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND GRAVE -> 'O' */
+ { 0xD3, 0x6F }, /* LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRAVE -> 'o' */
+ { 0xD4, 0x4F }, /* LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE -> 'O' */
+ { 0xD5, 0x6F }, /* LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE -> 'o' */
+ { 0xD6, 0x4F }, /* LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND TILDE -> 'O' */
+ { 0xD7, 0x6F }, /* LATIN SMALL LETTER O WITH CIRCUMFLEX AND TILDE -> 'o' */
+ { 0xD8, 0x4F }, /* LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND DOT BELOW -> 'O' */
+ { 0xD9, 0x6F }, /* LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT BELOW -> 'o' */
+ { 0xDA, 0x4F }, /* LATIN CAPITAL LETTER O WITH HORN AND ACUTE -> 'O' */
+ { 0xDB, 0x6F }, /* LATIN SMALL LETTER O WITH HORN AND ACUTE -> 'o' */
+ { 0xDC, 0x4F }, /* LATIN CAPITAL LETTER O WITH HORN AND GRAVE -> 'O' */
+ { 0xDD, 0x6F }, /* LATIN SMALL LETTER O WITH HORN AND GRAVE -> 'o' */
+ { 0xDE, 0x4F }, /* LATIN CAPITAL LETTER O WITH HORN AND HOOK ABOVE -> 'O' */
+ { 0xDF, 0x6F }, /* LATIN SMALL LETTER O WITH HORN AND HOOK ABOVE -> 'o' */
+ { 0xE0, 0x4F }, /* LATIN CAPITAL LETTER O WITH HORN AND TILDE -> 'O' */
+ { 0xE1, 0x6F }, /* LATIN SMALL LETTER O WITH HORN AND TILDE -> 'o' */
+ { 0xE2, 0x4F }, /* LATIN CAPITAL LETTER O WITH HORN AND DOT BELOW -> 'O' */
+ { 0xE3, 0x6F }, /* LATIN SMALL LETTER O WITH HORN AND DOT BELOW -> 'o' */
+ { 0xE4, 0x55 }, /* LATIN CAPITAL LETTER U WITH DOT BELOW -> 'U' */
+ { 0xE5, 0x75 }, /* LATIN SMALL LETTER U WITH DOT BELOW -> 'u' */
+ { 0xE6, 0x55 }, /* LATIN CAPITAL LETTER U WITH HOOK ABOVE -> 'U' */
+ { 0xE7, 0x75 }, /* LATIN SMALL LETTER U WITH HOOK ABOVE -> 'u' */
+ { 0xE8, 0x55 }, /* LATIN CAPITAL LETTER U WITH HORN AND ACUTE -> 'U' */
+ { 0xE9, 0x75 }, /* LATIN SMALL LETTER U WITH HORN AND ACUTE -> 'u' */
+ { 0xEA, 0x55 }, /* LATIN CAPITAL LETTER U WITH HORN AND GRAVE -> 'U' */
+ { 0xEB, 0x75 }, /* LATIN SMALL LETTER U WITH HORN AND GRAVE -> 'u' */
+ { 0xEC, 0x55 }, /* LATIN CAPITAL LETTER U WITH HORN AND HOOK ABOVE -> 'U' */
+ { 0xED, 0x75 }, /* LATIN SMALL LETTER U WITH HORN AND HOOK ABOVE -> 'u' */
+ { 0xEE, 0x55 }, /* LATIN CAPITAL LETTER U WITH HORN AND TILDE -> 'U' */
+ { 0xEF, 0x75 }, /* LATIN SMALL LETTER U WITH HORN AND TILDE -> 'u' */
+ { 0xF0, 0x55 }, /* LATIN CAPITAL LETTER U WITH HORN AND DOT BELOW -> 'U' */
+ { 0xF1, 0x75 }, /* LATIN SMALL LETTER U WITH HORN AND DOT BELOW -> 'u' */
+ { 0xF2, 0x59 }, /* LATIN CAPITAL LETTER Y WITH GRAVE -> 'Y' */
+ { 0xF3, 0x79 }, /* LATIN SMALL LETTER Y WITH GRAVE -> 'y' */
+ { 0xF4, 0x59 }, /* LATIN CAPITAL LETTER Y WITH DOT BELOW -> 'Y' */
+ { 0xF5, 0x79 }, /* LATIN SMALL LETTER Y WITH DOT BELOW -> 'y' */
+ { 0xF6, 0x59 }, /* LATIN CAPITAL LETTER Y WITH HOOK ABOVE -> 'Y' */
+ { 0xF7, 0x79 }, /* LATIN SMALL LETTER Y WITH HOOK ABOVE -> 'y' */
+ { 0xF8, 0x59 }, /* LATIN CAPITAL LETTER Y WITH TILDE -> 'Y' */
+ { 0xF9, 0x79 }, /* LATIN SMALL LETTER Y WITH TILDE -> 'y' */
+ /* Entries for page 0x1F */
+ { 0x00, 0x00 }, /* GREEK SMALL LETTER ALPHA WITH PSILI -> ... */
+ { 0x07, 0x61 }, /* GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI -> 'a' */
+ { 0x08, 0x00 }, /* GREEK CAPITAL LETTER ALPHA WITH PSILI -> ... */
+ { 0x0F, 0x41 }, /* GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI -> 'A' */
+ { 0x10, 0x00 }, /* GREEK SMALL LETTER EPSILON WITH PSILI -> ... */
+ { 0x15, 0x65 }, /* GREEK SMALL LETTER EPSILON WITH DASIA AND OXIA -> 'e' */
+ { 0x18, 0x00 }, /* GREEK CAPITAL LETTER EPSILON WITH PSILI -> ... */
+ { 0x1D, 0x45 }, /* GREEK CAPITAL LETTER EPSILON WITH DASIA AND OXIA -> 'E' */
+ { 0x20, 0x00 }, /* GREEK SMALL LETTER ETA WITH PSILI -> ... */
+ { 0x27, 0x65 }, /* GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI -> 'e' */
+ { 0x28, 0x00 }, /* GREEK CAPITAL LETTER ETA WITH PSILI -> ... */
+ { 0x2F, 0x45 }, /* GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI -> 'E' */
+ { 0x30, 0x00 }, /* GREEK SMALL LETTER IOTA WITH PSILI -> ... */
+ { 0x37, 0x69 }, /* GREEK SMALL LETTER IOTA WITH DASIA AND PERISPOMENI -> 'i' */
+ { 0x38, 0x00 }, /* GREEK CAPITAL LETTER IOTA WITH PSILI -> ... */
+ { 0x3F, 0x49 }, /* GREEK CAPITAL LETTER IOTA WITH DASIA AND PERISPOMENI -> 'I' */
+ { 0x40, 0x00 }, /* GREEK SMALL LETTER OMICRON WITH PSILI -> ... */
+ { 0x45, 0x6F }, /* GREEK SMALL LETTER OMICRON WITH DASIA AND OXIA -> 'o' */
+ { 0x48, 0x00 }, /* GREEK CAPITAL LETTER OMICRON WITH PSILI -> ... */
+ { 0x4D, 0x4F }, /* GREEK CAPITAL LETTER OMICRON WITH DASIA AND OXIA -> 'O' */
+ { 0x50, 0x00 }, /* GREEK SMALL LETTER UPSILON WITH PSILI -> ... */
+ { 0x57, 0x75 }, /* GREEK SMALL LETTER UPSILON WITH DASIA AND PERISPOMENI -> 'u' */
+ { 0x59, 0x55 }, /* GREEK CAPITAL LETTER UPSILON WITH DASIA -> 'U' */
+ { 0x5B, 0x55 }, /* GREEK CAPITAL LETTER UPSILON WITH DASIA AND VARIA -> 'U' */
+ { 0x5D, 0x55 }, /* GREEK CAPITAL LETTER UPSILON WITH DASIA AND OXIA -> 'U' */
+ { 0x5F, 0x55 }, /* GREEK CAPITAL LETTER UPSILON WITH DASIA AND PERISPOMENI -> 'U' */
+ { 0x60, 0x00 }, /* GREEK SMALL LETTER OMEGA WITH PSILI -> ... */
+ { 0x67, 0x6F }, /* GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI -> 'o' */
+ { 0x68, 0x00 }, /* GREEK CAPITAL LETTER OMEGA WITH PSILI -> ... */
+ { 0x6F, 0x4F }, /* GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI -> 'O' */
+ { 0x70, 0x61 }, /* GREEK SMALL LETTER ALPHA WITH VARIA -> 'a' */
+ { 0x71, 0x61 }, /* GREEK SMALL LETTER ALPHA WITH OXIA -> 'a' */
+ { 0x72, 0x00 }, /* GREEK SMALL LETTER EPSILON WITH VARIA -> ... */
+ { 0x75, 0x65 }, /* GREEK SMALL LETTER ETA WITH OXIA -> 'e' */
+ { 0x76, 0x69 }, /* GREEK SMALL LETTER IOTA WITH VARIA -> 'i' */
+ { 0x77, 0x69 }, /* GREEK SMALL LETTER IOTA WITH OXIA -> 'i' */
+ { 0x78, 0x6F }, /* GREEK SMALL LETTER OMICRON WITH VARIA -> 'o' */
+ { 0x79, 0x6F }, /* GREEK SMALL LETTER OMICRON WITH OXIA -> 'o' */
+ { 0x7A, 0x75 }, /* GREEK SMALL LETTER UPSILON WITH VARIA -> 'u' */
+ { 0x7B, 0x75 }, /* GREEK SMALL LETTER UPSILON WITH OXIA -> 'u' */
+ { 0x7C, 0x6F }, /* GREEK SMALL LETTER OMEGA WITH VARIA -> 'o' */
+ { 0x7D, 0x6F }, /* GREEK SMALL LETTER OMEGA WITH OXIA -> 'o' */
+ { 0x80, 0x00 }, /* GREEK SMALL LETTER ALPHA WITH PSILI AND YPOGEGRAMMENI -> ... */
+ { 0x87, 0x61 }, /* GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -> 'a' */
+ { 0x88, 0x00 }, /* GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI -> ... */
+ { 0x8F, 0x41 }, /* GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -> 'A' */
+ { 0x90, 0x00 }, /* GREEK SMALL LETTER ETA WITH PSILI AND YPOGEGRAMMENI -> ... */
+ { 0x97, 0x65 }, /* GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -> 'e' */
+ { 0x98, 0x00 }, /* GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI -> ... */
+ { 0x9F, 0x45 }, /* GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -> 'E' */
+ { 0xA0, 0x00 }, /* GREEK SMALL LETTER OMEGA WITH PSILI AND YPOGEGRAMMENI -> ... */
+ { 0xA7, 0x6F }, /* GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -> 'o' */
+ { 0xA8, 0x00 }, /* GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI -> ... */
+ { 0xAF, 0x4F }, /* GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -> 'O' */
+ { 0xB0, 0x00 }, /* GREEK SMALL LETTER ALPHA WITH VRACHY -> ... */
+ { 0xB4, 0x61 }, /* GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI -> 'a' */
+ { 0xB6, 0x61 }, /* GREEK SMALL LETTER ALPHA WITH PERISPOMENI -> 'a' */
+ { 0xB7, 0x61 }, /* GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI -> 'a' */
+ { 0xB8, 0x00 }, /* GREEK CAPITAL LETTER ALPHA WITH VRACHY -> ... */
+ { 0xBC, 0x41 }, /* GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI -> 'A' */
+ { 0xBD, 0x27 }, /* GREEK KORONIS -> ''' */
+ { 0xBE, 0x69 }, /* GREEK PROSGEGRAMMENI -> 'i' */
+ { 0xBF, 0x27 }, /* GREEK PSILI -> ''' */
+ { 0xC0, 0x7E }, /* GREEK PERISPOMENI -> '~' */
+ { 0xC2, 0x00 }, /* GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI -> ... */
+ { 0xC4, 0x65 }, /* GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI -> 'e' */
+ { 0xC6, 0x65 }, /* GREEK SMALL LETTER ETA WITH PERISPOMENI -> 'e' */
+ { 0xC7, 0x65 }, /* GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI -> 'e' */
+ { 0xC8, 0x00 }, /* GREEK CAPITAL LETTER EPSILON WITH VARIA -> ... */
+ { 0xCC, 0x45 }, /* GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI -> 'E' */
+ { 0xD0, 0x00 }, /* GREEK SMALL LETTER IOTA WITH VRACHY -> ... */
+ { 0xD3, 0x69 }, /* GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA -> 'i' */
+ { 0xD6, 0x69 }, /* GREEK SMALL LETTER IOTA WITH PERISPOMENI -> 'i' */
+ { 0xD7, 0x69 }, /* GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI -> 'i' */
+ { 0xD8, 0x00 }, /* GREEK CAPITAL LETTER IOTA WITH VRACHY -> ... */
+ { 0xDB, 0x49 }, /* GREEK CAPITAL LETTER IOTA WITH OXIA -> 'I' */
+ { 0xE0, 0x00 }, /* GREEK SMALL LETTER UPSILON WITH VRACHY -> ... */
+ { 0xE3, 0x75 }, /* GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA -> 'u' */
+ { 0xE4, 0x52 }, /* GREEK SMALL LETTER RHO WITH PSILI -> 'R' */
+ { 0xE5, 0x52 }, /* GREEK SMALL LETTER RHO WITH DASIA -> 'R' */
+ { 0xE6, 0x75 }, /* GREEK SMALL LETTER UPSILON WITH PERISPOMENI -> 'u' */
+ { 0xE7, 0x75 }, /* GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI -> 'u' */
+ { 0xE8, 0x00 }, /* GREEK CAPITAL LETTER UPSILON WITH VRACHY -> ... */
+ { 0xEB, 0x55 }, /* GREEK CAPITAL LETTER UPSILON WITH OXIA -> 'U' */
+ { 0xEC, 0x52 }, /* GREEK CAPITAL LETTER RHO WITH DASIA -> 'R' */
+ { 0xEF, 0x60 }, /* GREEK VARIA -> '`' */
+ { 0xF2, 0x00 }, /* GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI -> ... */
+ { 0xF4, 0x6F }, /* GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI -> 'o' */
+ { 0xF6, 0x6F }, /* GREEK SMALL LETTER OMEGA WITH PERISPOMENI -> 'o' */
+ { 0xF7, 0x6F }, /* GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI -> 'o' */
+ { 0xF8, 0x00 }, /* GREEK CAPITAL LETTER OMICRON WITH VARIA -> ... */
+ { 0xFC, 0x4F }, /* GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI -> 'O' */
+ { 0xFD, 0x27 }, /* GREEK OXIA -> ''' */
+ { 0xFE, 0x60 }, /* GREEK DASIA -> '`' */
+ /* Entries for page 0x20 */
+ { 0x00, 0x00 }, /* EN QUAD -> ... */
+ { 0x0B, 0x20 }, /* ZERO WIDTH SPACE -> ' ' */
+ { 0x10, 0x00 }, /* HYPHEN -> ... */
+ { 0x15, 0x2D }, /* HORIZONTAL BAR -> '-' */
+ { 0x17, 0x5F }, /* DOUBLE LOW LINE -> '_' */
+ { 0x18, 0x27 }, /* LEFT SINGLE QUOTATION MARK -> ''' */
+ { 0x19, 0x27 }, /* RIGHT SINGLE QUOTATION MARK -> ''' */
+ { 0x1A, 0x2C }, /* SINGLE LOW-9 QUOTATION MARK -> ',' */
+ { 0x1B, 0x27 }, /* SINGLE HIGH-REVERSED-9 QUOTATION MARK -> ''' */
+ { 0x1C, 0x00 }, /* LEFT DOUBLE QUOTATION MARK -> ... */
+ { 0x1F, 0x22 }, /* DOUBLE HIGH-REVERSED-9 QUOTATION MARK -> '"' */
+ { 0x20, 0x2B }, /* DAGGER -> '+' */
+ { 0x22, 0x2A }, /* BULLET -> '*' */
+ { 0x23, 0x3E }, /* TRIANGULAR BULLET -> '>' */
+ { 0x24, 0x2E }, /* ONE DOT LEADER -> '.' */
+ { 0x26, 0x2E }, /* HORIZONTAL ELLIPSIS -> '.' */
+ { 0x27, 0x2E }, /* HYPHENATION POINT -> '.' */
+ { 0x2F, 0x20 }, /* NARROW NO-BREAK SPACE -> ' ' */
+ { 0x32, 0x27 }, /* PRIME -> ''' */
+ { 0x33, 0x22 }, /* DOUBLE PRIME -> '"' */
+ { 0x35, 0x60 }, /* REVERSED PRIME -> '`' */
+ { 0x38, 0x5E }, /* CARET -> '^' */
+ { 0x39, 0x3C }, /* SINGLE LEFT-POINTING ANGLE QUOTATION MARK -> '<' */
+ { 0x3A, 0x3E }, /* SINGLE RIGHT-POINTING ANGLE QUOTATION MARK -> '>' */
+ { 0x3B, 0x2A }, /* REFERENCE MARK -> '*' */
+ { 0x3C, 0x21 }, /* DOUBLE EXCLAMATION MARK -> '!' */
+ { 0x3D, 0x3F }, /* INTERROBANG -> '?' */
+ { 0x3E, 0x2D }, /* OVERLINE -> '-' */
+ { 0x3F, 0x5F }, /* UNDERTIE -> '_' */
+ { 0x40, 0x2D }, /* CHARACTER TIE -> '-' */
+ { 0x41, 0x5E }, /* CARET INSERTION POINT -> '^' */
+ { 0x42, 0x2A }, /* ASTERISM -> '*' */
+ { 0x43, 0x2D }, /* HYPHEN BULLET -> '-' */
+ { 0x44, 0x2F }, /* FRACTION SLASH -> '/' */
+ { 0x47, 0x3F }, /* DOUBLE QUESTION MARK -> '?' */
+ { 0x48, 0x3F }, /* QUESTION EXCLAMATION MARK -> '?' */
+ { 0x49, 0x21 }, /* EXCLAMATION QUESTION MARK -> '!' */
+ { 0x4A, 0x26 }, /* TIRONIAN SIGN ET -> '&' */
+ { 0x4B, 0x50 }, /* REVERSED PILCROW SIGN -> 'P' */
+ { 0x4C, 0x3C }, /* BLACK LEFTWARDS BULLET -> '<' */
+ { 0x4D, 0x3E }, /* BLACK RIGHTWARDS BULLET -> '>' */
+ { 0x4E, 0x2A }, /* LOW ASTERISK -> '*' */
+ { 0x4F, 0x3B }, /* REVERSED SEMICOLON -> ';' */
+ { 0x51, 0x2A }, /* TWO ASTERISKS ALIGNED VERTICALLY -> '*' */
+ { 0x52, 0x2D }, /* COMMERCIAL MINUS SIGN -> '-' */
+ { 0x53, 0x7E }, /* SWUNG DASH -> '~' */
+ { 0x55, 0x2A }, /* FLOWER PUNCTUATION MARK -> '*' */
+ { 0x5B, 0x3A }, /* FOUR DOT MARK -> ':' */
+ { 0x5F, 0x20 }, /* MEDIUM MATHEMATICAL SPACE -> ' ' */
+ { 0x70, 0x30 }, /* SUPERSCRIPT ZERO -> '0' */
+ { 0x71, 0x69 }, /* SUPERSCRIPT LATIN SMALL LETTER I -> 'i' */
+ { 0x74, 0x34 }, /* SUPERSCRIPT FOUR -> '4' */
+ { 0x75, 0x35 }, /* SUPERSCRIPT FIVE -> '5' */
+ { 0x76, 0x36 }, /* SUPERSCRIPT SIX -> '6' */
+ { 0x77, 0x37 }, /* SUPERSCRIPT SEVEN -> '7' */
+ { 0x78, 0x38 }, /* SUPERSCRIPT EIGHT -> '8' */
+ { 0x79, 0x39 }, /* SUPERSCRIPT NINE -> '9' */
+ { 0x7A, 0x2B }, /* SUPERSCRIPT PLUS SIGN -> '+' */
+ { 0x7B, 0x2D }, /* SUPERSCRIPT MINUS -> '-' */
+ { 0x7C, 0x3D }, /* SUPERSCRIPT EQUALS SIGN -> '=' */
+ { 0x7D, 0x28 }, /* SUPERSCRIPT LEFT PARENTHESIS -> '(' */
+ { 0x7E, 0x29 }, /* SUPERSCRIPT RIGHT PARENTHESIS -> ')' */
+ { 0x7F, 0x6E }, /* SUPERSCRIPT LATIN SMALL LETTER N -> 'n' */
+ { 0x80, 0x30 }, /* SUBSCRIPT ZERO -> '0' */
+ { 0x81, 0x31 }, /* SUBSCRIPT ONE -> '1' */
+ { 0x82, 0x32 }, /* SUBSCRIPT TWO -> '2' */
+ { 0x83, 0x33 }, /* SUBSCRIPT THREE -> '3' */
+ { 0x84, 0x34 }, /* SUBSCRIPT FOUR -> '4' */
+ { 0x85, 0x35 }, /* SUBSCRIPT FIVE -> '5' */
+ { 0x86, 0x36 }, /* SUBSCRIPT SIX -> '6' */
+ { 0x87, 0x37 }, /* SUBSCRIPT SEVEN -> '7' */
+ { 0x88, 0x38 }, /* SUBSCRIPT EIGHT -> '8' */
+ { 0x89, 0x39 }, /* SUBSCRIPT NINE -> '9' */
+ { 0x8A, 0x2B }, /* SUBSCRIPT PLUS SIGN -> '+' */
+ { 0x8B, 0x2D }, /* SUBSCRIPT MINUS -> '-' */
+ { 0x8C, 0x3D }, /* SUBSCRIPT EQUALS SIGN -> '=' */
+ { 0x8D, 0x28 }, /* SUBSCRIPT LEFT PARENTHESIS -> '(' */
+ { 0x8E, 0x29 }, /* SUBSCRIPT RIGHT PARENTHESIS -> ')' */
+ { 0x90, 0x61 }, /* LATIN SUBSCRIPT SMALL LETTER A -> 'a' */
+ { 0x91, 0x65 }, /* LATIN SUBSCRIPT SMALL LETTER E -> 'e' */
+ { 0x92, 0x6F }, /* LATIN SUBSCRIPT SMALL LETTER O -> 'o' */
+ { 0x93, 0x78 }, /* LATIN SUBSCRIPT SMALL LETTER X -> 'x' */
+ { 0x95, 0x68 }, /* LATIN SUBSCRIPT SMALL LETTER H -> 'h' */
+ { 0x96, 0x6B }, /* LATIN SUBSCRIPT SMALL LETTER K -> 'k' */
+ { 0x97, 0x6C }, /* LATIN SUBSCRIPT SMALL LETTER L -> 'l' */
+ { 0x98, 0x6D }, /* LATIN SUBSCRIPT SMALL LETTER M -> 'm' */
+ { 0x99, 0x6E }, /* LATIN SUBSCRIPT SMALL LETTER N -> 'n' */
+ { 0x9A, 0x70 }, /* LATIN SUBSCRIPT SMALL LETTER P -> 'p' */
+ { 0x9B, 0x73 }, /* LATIN SUBSCRIPT SMALL LETTER S -> 's' */
+ { 0x9C, 0x74 }, /* LATIN SUBSCRIPT SMALL LETTER T -> 't' */
+ { 0xA4, 0x4C }, /* LIRA SIGN -> 'L' */
+ { 0xA6, 0x4E }, /* NAIRA SIGN -> 'N' */
+ { 0xA9, 0x57 }, /* WON SIGN -> 'W' */
+ { 0xAB, 0x44 }, /* DONG SIGN -> 'D' */
+ { 0xAC, 0x45 }, /* EURO SIGN -> 'E' */
+ { 0xAD, 0x4B }, /* KIP SIGN -> 'K' */
+ { 0xAE, 0x54 }, /* TUGRIK SIGN -> 'T' */
+ { 0xB1, 0x50 }, /* PESO SIGN -> 'P' */
+ { 0xB2, 0x47 }, /* GUARANI SIGN -> 'G' */
+ { 0xB3, 0x41 }, /* AUSTRAL SIGN -> 'A' */
+ { 0xB6, 0x4C }, /* LIVRE TOURNOIS SIGN -> 'L' */
+ { 0xB8, 0x54 }, /* TENGE SIGN -> 'T' */
+ { 0xBA, 0x4C }, /* TURKISH LIRA SIGN -> 'L' */
+ { 0xBB, 0x4D }, /* NORDIC MARK SIGN -> 'M' */
+ { 0xBC, 0x6D }, /* MANAT SIGN -> 'm' */
+ { 0xBD, 0x52 }, /* RUBLE SIGN -> 'R' */
+ { 0xBE, 0x6C }, /* LARI SIGN -> 'l' */
+ /* Entries for page 0x21 */
+ { 0x02, 0x43 }, /* DOUBLE-STRUCK CAPITAL C -> 'C' */
+ { 0x03, 0x43 }, /* DEGREE CELSIUS -> 'C' */
+ { 0x09, 0x46 }, /* DEGREE FAHRENHEIT -> 'F' */
+ { 0x0A, 0x67 }, /* SCRIPT SMALL G -> 'g' */
+ { 0x0B, 0x00 }, /* SCRIPT CAPITAL H -> ... */
+ { 0x0D, 0x48 }, /* DOUBLE-STRUCK CAPITAL H -> 'H' */
+ { 0x0E, 0x68 }, /* PLANCK CONSTANT -> 'h' */
+ { 0x10, 0x49 }, /* SCRIPT CAPITAL I -> 'I' */
+ { 0x11, 0x49 }, /* BLACK-LETTER CAPITAL I -> 'I' */
+ { 0x12, 0x4C }, /* SCRIPT CAPITAL L -> 'L' */
+ { 0x13, 0x6C }, /* SCRIPT SMALL L -> 'l' */
+ { 0x15, 0x4E }, /* DOUBLE-STRUCK CAPITAL N -> 'N' */
+ { 0x19, 0x50 }, /* DOUBLE-STRUCK CAPITAL P -> 'P' */
+ { 0x1A, 0x51 }, /* DOUBLE-STRUCK CAPITAL Q -> 'Q' */
+ { 0x1B, 0x00 }, /* SCRIPT CAPITAL R -> ... */
+ { 0x1D, 0x52 }, /* DOUBLE-STRUCK CAPITAL R -> 'R' */
+ { 0x22, 0x54 }, /* TRADE MARK SIGN -> 'T' */
+ { 0x24, 0x5A }, /* DOUBLE-STRUCK CAPITAL Z -> 'Z' */
+ { 0x28, 0x5A }, /* BLACK-LETTER CAPITAL Z -> 'Z' */
+ { 0x2A, 0x4B }, /* KELVIN SIGN -> 'K' */
+ { 0x2B, 0x41 }, /* ANGSTROM SIGN -> 'A' */
+ { 0x2C, 0x42 }, /* SCRIPT CAPITAL B -> 'B' */
+ { 0x2D, 0x43 }, /* BLACK-LETTER CAPITAL C -> 'C' */
+ { 0x2E, 0x65 }, /* ESTIMATED SYMBOL -> 'e' */
+ { 0x2F, 0x65 }, /* SCRIPT SMALL E -> 'e' */
+ { 0x30, 0x45 }, /* SCRIPT CAPITAL E -> 'E' */
+ { 0x31, 0x46 }, /* SCRIPT CAPITAL F -> 'F' */
+ { 0x32, 0x46 }, /* TURNED CAPITAL F -> 'F' */
+ { 0x33, 0x4D }, /* SCRIPT CAPITAL M -> 'M' */
+ { 0x34, 0x6F }, /* SCRIPT SMALL O -> 'o' */
+ { 0x39, 0x69 }, /* INFORMATION SOURCE -> 'i' */
+ { 0x45, 0x44 }, /* DOUBLE-STRUCK ITALIC CAPITAL D -> 'D' */
+ { 0x46, 0x64 }, /* DOUBLE-STRUCK ITALIC SMALL D -> 'd' */
+ { 0x47, 0x65 }, /* DOUBLE-STRUCK ITALIC SMALL E -> 'e' */
+ { 0x48, 0x69 }, /* DOUBLE-STRUCK ITALIC SMALL I -> 'i' */
+ { 0x49, 0x6A }, /* DOUBLE-STRUCK ITALIC SMALL J -> 'j' */
+ { 0x4E, 0x46 }, /* TURNED SMALL F -> 'F' */
+ { 0x60, 0x49 }, /* ROMAN NUMERAL ONE -> 'I' */
+ { 0x64, 0x56 }, /* ROMAN NUMERAL FIVE -> 'V' */
+ { 0x69, 0x58 }, /* ROMAN NUMERAL TEN -> 'X' */
+ { 0x6C, 0x4C }, /* ROMAN NUMERAL FIFTY -> 'L' */
+ { 0x6D, 0x43 }, /* ROMAN NUMERAL ONE HUNDRED -> 'C' */
+ { 0x6E, 0x44 }, /* ROMAN NUMERAL FIVE HUNDRED -> 'D' */
+ { 0x6F, 0x4D }, /* ROMAN NUMERAL ONE THOUSAND -> 'M' */
+ { 0x70, 0x69 }, /* SMALL ROMAN NUMERAL ONE -> 'i' */
+ { 0x74, 0x76 }, /* SMALL ROMAN NUMERAL FIVE -> 'v' */
+ { 0x79, 0x78 }, /* SMALL ROMAN NUMERAL TEN -> 'x' */
+ { 0x7C, 0x6C }, /* SMALL ROMAN NUMERAL FIFTY -> 'l' */
+ { 0x7D, 0x63 }, /* SMALL ROMAN NUMERAL ONE HUNDRED -> 'c' */
+ { 0x7E, 0x64 }, /* SMALL ROMAN NUMERAL FIVE HUNDRED -> 'd' */
+ { 0x7F, 0x6D }, /* SMALL ROMAN NUMERAL ONE THOUSAND -> 'm' */
+ { 0x83, 0x29 }, /* ROMAN NUMERAL REVERSED ONE HUNDRED -> ')' */
+ { 0x90, 0x3C }, /* LEFTWARDS ARROW -> '<' */
+ { 0x91, 0x5E }, /* UPWARDS ARROW -> '^' */
+ { 0x92, 0x3E }, /* RIGHTWARDS ARROW -> '>' */
+ { 0x93, 0x76 }, /* DOWNWARDS ARROW -> 'v' */
+ { 0x94, 0x2D }, /* LEFT RIGHT ARROW -> '-' */
+ { 0x95, 0x7C }, /* UP DOWN ARROW -> '|' */
+ { 0x96, 0x5C }, /* NORTH WEST ARROW -> '\' */
+ { 0x97, 0x2F }, /* NORTH EAST ARROW -> '/' */
+ { 0x98, 0x5C }, /* SOUTH EAST ARROW -> '\' */
+ { 0x99, 0x2F }, /* SOUTH WEST ARROW -> '/' */
+ { 0x9A, 0x21 }, /* LEFTWARDS ARROW WITH STROKE -> '!' */
+ { 0x9B, 0x21 }, /* RIGHTWARDS ARROW WITH STROKE -> '!' */
+ { 0x9C, 0x7E }, /* LEFTWARDS WAVE ARROW -> '~' */
+ { 0x9D, 0x7E }, /* RIGHTWARDS WAVE ARROW -> '~' */
+ { 0x9E, 0x2D }, /* LEFTWARDS TWO HEADED ARROW -> '-' */
+ { 0x9F, 0x7C }, /* UPWARDS TWO HEADED ARROW -> '|' */
+ { 0xA0, 0x2D }, /* RIGHTWARDS TWO HEADED ARROW -> '-' */
+ { 0xA1, 0x7C }, /* DOWNWARDS TWO HEADED ARROW -> '|' */
+ { 0xA2, 0x00 }, /* LEFTWARDS ARROW WITH TAIL -> ... */
+ { 0xA4, 0x2D }, /* LEFTWARDS ARROW FROM BAR -> '-' */
+ { 0xA5, 0x7C }, /* UPWARDS ARROW FROM BAR -> '|' */
+ { 0xA6, 0x2D }, /* RIGHTWARDS ARROW FROM BAR -> '-' */
+ { 0xA7, 0x7C }, /* DOWNWARDS ARROW FROM BAR -> '|' */
+ { 0xA8, 0x7C }, /* UP DOWN ARROW WITH BASE -> '|' */
+ { 0xA9, 0x00 }, /* LEFTWARDS ARROW WITH HOOK -> ... */
+ { 0xAD, 0x2D }, /* LEFT RIGHT WAVE ARROW -> '-' */
+ { 0xAE, 0x21 }, /* LEFT RIGHT ARROW WITH STROKE -> '!' */
+ { 0xAF, 0x00 }, /* DOWNWARDS ZIGZAG ARROW -> ... */
+ { 0xB5, 0x7C }, /* DOWNWARDS ARROW WITH CORNER LEFTWARDS -> '|' */
+ { 0xB6, 0x5E }, /* ANTICLOCKWISE TOP SEMICIRCLE ARROW -> '^' */
+ { 0xB7, 0x56 }, /* CLOCKWISE TOP SEMICIRCLE ARROW -> 'V' */
+ { 0xB8, 0x5C }, /* NORTH WEST ARROW TO LONG BAR -> '\' */
+ { 0xB9, 0x3D }, /* LEFTWARDS ARROW TO BAR OVER RIGHTWARDS ARROW TO BAR -> '=' */
+ { 0xBA, 0x56 }, /* ANTICLOCKWISE OPEN CIRCLE ARROW -> 'V' */
+ { 0xBB, 0x5E }, /* CLOCKWISE OPEN CIRCLE ARROW -> '^' */
+ { 0xBC, 0x2D }, /* LEFTWARDS HARPOON WITH BARB UPWARDS -> '-' */
+ { 0xBD, 0x2D }, /* LEFTWARDS HARPOON WITH BARB DOWNWARDS -> '-' */
+ { 0xBE, 0x7C }, /* UPWARDS HARPOON WITH BARB RIGHTWARDS -> '|' */
+ { 0xBF, 0x7C }, /* UPWARDS HARPOON WITH BARB LEFTWARDS -> '|' */
+ { 0xC0, 0x2D }, /* RIGHTWARDS HARPOON WITH BARB UPWARDS -> '-' */
+ { 0xC1, 0x2D }, /* RIGHTWARDS HARPOON WITH BARB DOWNWARDS -> '-' */
+ { 0xC2, 0x7C }, /* DOWNWARDS HARPOON WITH BARB RIGHTWARDS -> '|' */
+ { 0xC3, 0x7C }, /* DOWNWARDS HARPOON WITH BARB LEFTWARDS -> '|' */
+ { 0xC4, 0x3D }, /* RIGHTWARDS ARROW OVER LEFTWARDS ARROW -> '=' */
+ { 0xC5, 0x7C }, /* UPWARDS ARROW LEFTWARDS OF DOWNWARDS ARROW -> '|' */
+ { 0xC6, 0x3D }, /* LEFTWARDS ARROW OVER RIGHTWARDS ARROW -> '=' */
+ { 0xC7, 0x3D }, /* LEFTWARDS PAIRED ARROWS -> '=' */
+ { 0xC8, 0x7C }, /* UPWARDS PAIRED ARROWS -> '|' */
+ { 0xC9, 0x3D }, /* RIGHTWARDS PAIRED ARROWS -> '=' */
+ { 0xCA, 0x7C }, /* DOWNWARDS PAIRED ARROWS -> '|' */
+ { 0xCB, 0x3D }, /* LEFTWARDS HARPOON OVER RIGHTWARDS HARPOON -> '=' */
+ { 0xCC, 0x3D }, /* RIGHTWARDS HARPOON OVER LEFTWARDS HARPOON -> '=' */
+ { 0xCD, 0x00 }, /* LEFTWARDS DOUBLE ARROW WITH STROKE -> ... */
+ { 0xCF, 0x21 }, /* RIGHTWARDS DOUBLE ARROW WITH STROKE -> '!' */
+ { 0xD0, 0x3C }, /* LEFTWARDS DOUBLE ARROW -> '<' */
+ { 0xD1, 0x5E }, /* UPWARDS DOUBLE ARROW -> '^' */
+ { 0xD2, 0x3E }, /* RIGHTWARDS DOUBLE ARROW -> '>' */
+ { 0xD3, 0x76 }, /* DOWNWARDS DOUBLE ARROW -> 'v' */
+ { 0xD4, 0x3D }, /* LEFT RIGHT DOUBLE ARROW -> '=' */
+ { 0xD5, 0x7C }, /* UP DOWN DOUBLE ARROW -> '|' */
+ { 0xD6, 0x5C }, /* NORTH WEST DOUBLE ARROW -> '\' */
+ { 0xD7, 0x2F }, /* NORTH EAST DOUBLE ARROW -> '/' */
+ { 0xD8, 0x5C }, /* SOUTH EAST DOUBLE ARROW -> '\' */
+ { 0xD9, 0x2F }, /* SOUTH WEST DOUBLE ARROW -> '/' */
+ { 0xDA, 0x3D }, /* LEFTWARDS TRIPLE ARROW -> '=' */
+ { 0xDB, 0x3D }, /* RIGHTWARDS TRIPLE ARROW -> '=' */
+ { 0xDC, 0x7E }, /* LEFTWARDS SQUIGGLE ARROW -> '~' */
+ { 0xDD, 0x7E }, /* RIGHTWARDS SQUIGGLE ARROW -> '~' */
+ { 0xDE, 0x7C }, /* UPWARDS ARROW WITH DOUBLE STROKE -> '|' */
+ { 0xDF, 0x7C }, /* DOWNWARDS ARROW WITH DOUBLE STROKE -> '|' */
+ { 0xE0, 0x2D }, /* LEFTWARDS DASHED ARROW -> '-' */
+ { 0xE1, 0x7C }, /* UPWARDS DASHED ARROW -> '|' */
+ { 0xE2, 0x2D }, /* RIGHTWARDS DASHED ARROW -> '-' */
+ { 0xE3, 0x7C }, /* DOWNWARDS DASHED ARROW -> '|' */
+ { 0xE4, 0x00 }, /* LEFTWARDS ARROW TO BAR -> ... */
+ { 0xE6, 0x2D }, /* LEFTWARDS WHITE ARROW -> '-' */
+ { 0xE7, 0x7C }, /* UPWARDS WHITE ARROW -> '|' */
+ { 0xE8, 0x2D }, /* RIGHTWARDS WHITE ARROW -> '-' */
+ { 0xE9, 0x00 }, /* DOWNWARDS WHITE ARROW -> ... */
+ { 0xEF, 0x7C }, /* UPWARDS WHITE DOUBLE ARROW ON PEDESTAL -> '|' */
+ { 0xF0, 0x2D }, /* RIGHTWARDS WHITE ARROW FROM WALL -> '-' */
+ { 0xF1, 0x5C }, /* NORTH WEST ARROW TO CORNER -> '\' */
+ { 0xF2, 0x5C }, /* SOUTH EAST ARROW TO CORNER -> '\' */
+ { 0xF3, 0x7C }, /* UP DOWN WHITE ARROW -> '|' */
+ /* Entries for page 0x22 */
+ { 0x04, 0x21 }, /* THERE DOES NOT EXIST -> '!' */
+ { 0x09, 0x21 }, /* NOT AN ELEMENT OF -> '!' */
+ { 0x0C, 0x21 }, /* DOES NOT CONTAIN AS MEMBER -> '!' */
+ { 0x12, 0x2D }, /* MINUS SIGN -> '-' */
+ { 0x15, 0x2F }, /* DIVISION SLASH -> '/' */
+ { 0x16, 0x5C }, /* SET MINUS -> '\' */
+ { 0x17, 0x2A }, /* ASTERISK OPERATOR -> '*' */
+ { 0x18, 0x6F }, /* RING OPERATOR -> 'o' */
+ { 0x19, 0x2E }, /* BULLET OPERATOR -> '.' */
+ { 0x23, 0x7C }, /* DIVIDES -> '|' */
+ { 0x24, 0x21 }, /* DOES NOT DIVIDE -> '!' */
+ { 0x26, 0x21 }, /* NOT PARALLEL TO -> '!' */
+ { 0x36, 0x3A }, /* RATIO -> ':' */
+ { 0x3C, 0x7E }, /* TILDE OPERATOR -> '~' */
+ { 0x41, 0x23 }, /* NOT TILDE -> '#' */
+ { 0x44, 0x23 }, /* NOT ASYMPTOTICALLY EQUAL TO -> '#' */
+ { 0x49, 0x23 }, /* NOT ALMOST EQUAL TO -> '#' */
+ { 0x60, 0x23 }, /* NOT EQUAL TO -> '#' */
+ { 0x62, 0x23 }, /* NOT IDENTICAL TO -> '#' */
+ { 0x64, 0x3C }, /* LESS-THAN OR EQUAL TO -> '<' */
+ { 0x65, 0x3E }, /* GREATER-THAN OR EQUAL TO -> '>' */
+ { 0x68, 0x23 }, /* LESS-THAN BUT NOT EQUAL TO -> '#' */
+ { 0x69, 0x23 }, /* GREATER-THAN BUT NOT EQUAL TO -> '#' */
+ { 0x6D, 0x23 }, /* NOT EQUIVALENT TO -> '#' */
+ { 0x6E, 0x21 }, /* NOT LESS-THAN -> '!' */
+ { 0x6F, 0x21 }, /* NOT GREATER-THAN -> '!' */
+ { 0x80, 0x21 }, /* DOES NOT PRECEDE -> '!' */
+ { 0x81, 0x21 }, /* DOES NOT SUCCEED -> '!' */
+ { 0x84, 0x21 }, /* NOT A SUBSET OF -> '!' */
+ { 0x85, 0x21 }, /* NOT A SUPERSET OF -> '!' */
+ { 0x8A, 0x23 }, /* SUBSET OF WITH NOT EQUAL TO -> '#' */
+ { 0x8B, 0x23 }, /* SUPERSET OF WITH NOT EQUAL TO -> '#' */
+ { 0x9B, 0x2A }, /* CIRCLED ASTERISK OPERATOR -> '*' */
+ { 0xC6, 0x2A }, /* STAR OPERATOR -> '*' */
+ /* Entries for page 0x23 */
+ { 0x03, 0x5E }, /* UP ARROWHEAD -> '^' */
+ { 0x29, 0x3C }, /* LEFT-POINTING ANGLE BRACKET -> '<' */
+ { 0x5F, 0x2A }, /* APL FUNCTIONAL SYMBOL CIRCLE STAR -> '*' */
+ { 0x63, 0x2A }, /* APL FUNCTIONAL SYMBOL STAR DIAERESIS -> '*' */
+ /* Entries for page 0x24 */
+ { 0x60, 0x31 }, /* CIRCLED DIGIT ONE -> '1' */
+ { 0x61, 0x32 }, /* CIRCLED DIGIT TWO -> '2' */
+ { 0x62, 0x33 }, /* CIRCLED DIGIT THREE -> '3' */
+ { 0x63, 0x34 }, /* CIRCLED DIGIT FOUR -> '4' */
+ { 0x64, 0x35 }, /* CIRCLED DIGIT FIVE -> '5' */
+ { 0x65, 0x36 }, /* CIRCLED DIGIT SIX -> '6' */
+ { 0x66, 0x37 }, /* CIRCLED DIGIT SEVEN -> '7' */
+ { 0x67, 0x38 }, /* CIRCLED DIGIT EIGHT -> '8' */
+ { 0x68, 0x39 }, /* CIRCLED DIGIT NINE -> '9' */
+ { 0xB6, 0x41 }, /* CIRCLED LATIN CAPITAL LETTER A -> 'A' */
+ { 0xB7, 0x42 }, /* CIRCLED LATIN CAPITAL LETTER B -> 'B' */
+ { 0xB8, 0x43 }, /* CIRCLED LATIN CAPITAL LETTER C -> 'C' */
+ { 0xB9, 0x44 }, /* CIRCLED LATIN CAPITAL LETTER D -> 'D' */
+ { 0xBA, 0x45 }, /* CIRCLED LATIN CAPITAL LETTER E -> 'E' */
+ { 0xBB, 0x46 }, /* CIRCLED LATIN CAPITAL LETTER F -> 'F' */
+ { 0xBC, 0x47 }, /* CIRCLED LATIN CAPITAL LETTER G -> 'G' */
+ { 0xBD, 0x48 }, /* CIRCLED LATIN CAPITAL LETTER H -> 'H' */
+ { 0xBE, 0x49 }, /* CIRCLED LATIN CAPITAL LETTER I -> 'I' */
+ { 0xBF, 0x4A }, /* CIRCLED LATIN CAPITAL LETTER J -> 'J' */
+ { 0xC0, 0x4B }, /* CIRCLED LATIN CAPITAL LETTER K -> 'K' */
+ { 0xC1, 0x4C }, /* CIRCLED LATIN CAPITAL LETTER L -> 'L' */
+ { 0xC2, 0x4D }, /* CIRCLED LATIN CAPITAL LETTER M -> 'M' */
+ { 0xC3, 0x4E }, /* CIRCLED LATIN CAPITAL LETTER N -> 'N' */
+ { 0xC4, 0x4F }, /* CIRCLED LATIN CAPITAL LETTER O -> 'O' */
+ { 0xC5, 0x50 }, /* CIRCLED LATIN CAPITAL LETTER P -> 'P' */
+ { 0xC6, 0x51 }, /* CIRCLED LATIN CAPITAL LETTER Q -> 'Q' */
+ { 0xC7, 0x52 }, /* CIRCLED LATIN CAPITAL LETTER R -> 'R' */
+ { 0xC8, 0x53 }, /* CIRCLED LATIN CAPITAL LETTER S -> 'S' */
+ { 0xC9, 0x54 }, /* CIRCLED LATIN CAPITAL LETTER T -> 'T' */
+ { 0xCA, 0x55 }, /* CIRCLED LATIN CAPITAL LETTER U -> 'U' */
+ { 0xCB, 0x56 }, /* CIRCLED LATIN CAPITAL LETTER V -> 'V' */
+ { 0xCC, 0x57 }, /* CIRCLED LATIN CAPITAL LETTER W -> 'W' */
+ { 0xCD, 0x58 }, /* CIRCLED LATIN CAPITAL LETTER X -> 'X' */
+ { 0xCE, 0x59 }, /* CIRCLED LATIN CAPITAL LETTER Y -> 'Y' */
+ { 0xCF, 0x5A }, /* CIRCLED LATIN CAPITAL LETTER Z -> 'Z' */
+ { 0xD0, 0x61 }, /* CIRCLED LATIN SMALL LETTER A -> 'a' */
+ { 0xD1, 0x62 }, /* CIRCLED LATIN SMALL LETTER B -> 'b' */
+ { 0xD2, 0x63 }, /* CIRCLED LATIN SMALL LETTER C -> 'c' */
+ { 0xD3, 0x64 }, /* CIRCLED LATIN SMALL LETTER D -> 'd' */
+ { 0xD4, 0x65 }, /* CIRCLED LATIN SMALL LETTER E -> 'e' */
+ { 0xD5, 0x66 }, /* CIRCLED LATIN SMALL LETTER F -> 'f' */
+ { 0xD6, 0x67 }, /* CIRCLED LATIN SMALL LETTER G -> 'g' */
+ { 0xD7, 0x68 }, /* CIRCLED LATIN SMALL LETTER H -> 'h' */
+ { 0xD8, 0x69 }, /* CIRCLED LATIN SMALL LETTER I -> 'i' */
+ { 0xD9, 0x6A }, /* CIRCLED LATIN SMALL LETTER J -> 'j' */
+ { 0xDA, 0x6B }, /* CIRCLED LATIN SMALL LETTER K -> 'k' */
+ { 0xDB, 0x6C }, /* CIRCLED LATIN SMALL LETTER L -> 'l' */
+ { 0xDC, 0x6D }, /* CIRCLED LATIN SMALL LETTER M -> 'm' */
+ { 0xDD, 0x6E }, /* CIRCLED LATIN SMALL LETTER N -> 'n' */
+ { 0xDE, 0x6F }, /* CIRCLED LATIN SMALL LETTER O -> 'o' */
+ { 0xDF, 0x70 }, /* CIRCLED LATIN SMALL LETTER P -> 'p' */
+ { 0xE0, 0x71 }, /* CIRCLED LATIN SMALL LETTER Q -> 'q' */
+ { 0xE1, 0x72 }, /* CIRCLED LATIN SMALL LETTER R -> 'r' */
+ { 0xE2, 0x73 }, /* CIRCLED LATIN SMALL LETTER S -> 's' */
+ { 0xE3, 0x74 }, /* CIRCLED LATIN SMALL LETTER T -> 't' */
+ { 0xE4, 0x75 }, /* CIRCLED LATIN SMALL LETTER U -> 'u' */
+ { 0xE5, 0x76 }, /* CIRCLED LATIN SMALL LETTER V -> 'v' */
+ { 0xE6, 0x77 }, /* CIRCLED LATIN SMALL LETTER W -> 'w' */
+ { 0xE7, 0x78 }, /* CIRCLED LATIN SMALL LETTER X -> 'x' */
+ { 0xE8, 0x79 }, /* CIRCLED LATIN SMALL LETTER Y -> 'y' */
+ { 0xE9, 0x7A }, /* CIRCLED LATIN SMALL LETTER Z -> 'z' */
+ { 0xEA, 0x30 }, /* CIRCLED DIGIT ZERO -> '0' */
+ { 0xF5, 0x31 }, /* DOUBLE CIRCLED DIGIT ONE -> '1' */
+ { 0xF6, 0x32 }, /* DOUBLE CIRCLED DIGIT TWO -> '2' */
+ { 0xF7, 0x33 }, /* DOUBLE CIRCLED DIGIT THREE -> '3' */
+ { 0xF8, 0x34 }, /* DOUBLE CIRCLED DIGIT FOUR -> '4' */
+ { 0xF9, 0x35 }, /* DOUBLE CIRCLED DIGIT FIVE -> '5' */
+ { 0xFA, 0x36 }, /* DOUBLE CIRCLED DIGIT SIX -> '6' */
+ { 0xFB, 0x37 }, /* DOUBLE CIRCLED DIGIT SEVEN -> '7' */
+ { 0xFC, 0x38 }, /* DOUBLE CIRCLED DIGIT EIGHT -> '8' */
+ { 0xFD, 0x39 }, /* DOUBLE CIRCLED DIGIT NINE -> '9' */
+ { 0xFF, 0x30 }, /* NEGATIVE CIRCLED DIGIT ZERO -> '0' */
+ /* Entries for page 0x25 */
+ { 0x00, 0x2D }, /* BOX DRAWINGS LIGHT HORIZONTAL -> '-' */
+ { 0x01, 0x2D }, /* BOX DRAWINGS HEAVY HORIZONTAL -> '-' */
+ { 0x02, 0x7C }, /* BOX DRAWINGS LIGHT VERTICAL -> '|' */
+ { 0x03, 0x7C }, /* BOX DRAWINGS HEAVY VERTICAL -> '|' */
+ { 0x04, 0x2D }, /* BOX DRAWINGS LIGHT TRIPLE DASH HORIZONTAL -> '-' */
+ { 0x05, 0x2D }, /* BOX DRAWINGS HEAVY TRIPLE DASH HORIZONTAL -> '-' */
+ { 0x06, 0x7C }, /* BOX DRAWINGS LIGHT TRIPLE DASH VERTICAL -> '|' */
+ { 0x07, 0x7C }, /* BOX DRAWINGS HEAVY TRIPLE DASH VERTICAL -> '|' */
+ { 0x08, 0x2D }, /* BOX DRAWINGS LIGHT QUADRUPLE DASH HORIZONTAL -> '-' */
+ { 0x09, 0x2D }, /* BOX DRAWINGS HEAVY QUADRUPLE DASH HORIZONTAL -> '-' */
+ { 0x0A, 0x7C }, /* BOX DRAWINGS LIGHT QUADRUPLE DASH VERTICAL -> '|' */
+ { 0x0B, 0x7C }, /* BOX DRAWINGS HEAVY QUADRUPLE DASH VERTICAL -> '|' */
+ { 0x0C, 0x00 }, /* BOX DRAWINGS LIGHT DOWN AND RIGHT -> ... */
+ { 0x4B, 0x2B }, /* BOX DRAWINGS HEAVY VERTICAL AND HORIZONTAL -> '+' */
+ { 0x4C, 0x2D }, /* BOX DRAWINGS LIGHT DOUBLE DASH HORIZONTAL -> '-' */
+ { 0x4D, 0x2D }, /* BOX DRAWINGS HEAVY DOUBLE DASH HORIZONTAL -> '-' */
+ { 0x4E, 0x7C }, /* BOX DRAWINGS LIGHT DOUBLE DASH VERTICAL -> '|' */
+ { 0x4F, 0x7C }, /* BOX DRAWINGS HEAVY DOUBLE DASH VERTICAL -> '|' */
+ { 0x50, 0x2D }, /* BOX DRAWINGS DOUBLE HORIZONTAL -> '-' */
+ { 0x51, 0x7C }, /* BOX DRAWINGS DOUBLE VERTICAL -> '|' */
+ { 0x52, 0x00 }, /* BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE -> ... */
+ { 0x70, 0x2B }, /* BOX DRAWINGS LIGHT ARC UP AND RIGHT -> '+' */
+ { 0x71, 0x2F }, /* BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT -> '/' */
+ { 0x72, 0x5C }, /* BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT -> '\' */
+ { 0x73, 0x58 }, /* BOX DRAWINGS LIGHT DIAGONAL CROSS -> 'X' */
+ { 0x74, 0x2D }, /* BOX DRAWINGS LIGHT LEFT -> '-' */
+ { 0x75, 0x7C }, /* BOX DRAWINGS LIGHT UP -> '|' */
+ { 0x76, 0x2D }, /* BOX DRAWINGS LIGHT RIGHT -> '-' */
+ { 0x77, 0x7C }, /* BOX DRAWINGS LIGHT DOWN -> '|' */
+ { 0x78, 0x2D }, /* BOX DRAWINGS HEAVY LEFT -> '-' */
+ { 0x79, 0x7C }, /* BOX DRAWINGS HEAVY UP -> '|' */
+ { 0x7A, 0x2D }, /* BOX DRAWINGS HEAVY RIGHT -> '-' */
+ { 0x7B, 0x7C }, /* BOX DRAWINGS HEAVY DOWN -> '|' */
+ { 0x7C, 0x2D }, /* BOX DRAWINGS LIGHT LEFT AND HEAVY RIGHT -> '-' */
+ { 0x7D, 0x7C }, /* BOX DRAWINGS LIGHT UP AND HEAVY DOWN -> '|' */
+ { 0x7E, 0x2D }, /* BOX DRAWINGS HEAVY LEFT AND LIGHT RIGHT -> '-' */
+ { 0x7F, 0x7C }, /* BOX DRAWINGS HEAVY UP AND LIGHT DOWN -> '|' */
+ { 0x80, 0x00 }, /* UPPER HALF BLOCK -> ... */
+ { 0x93, 0x23 }, /* DARK SHADE -> '#' */
+ { 0x94, 0x2D }, /* UPPER ONE EIGHTH BLOCK -> '-' */
+ { 0x95, 0x7C }, /* RIGHT ONE EIGHTH BLOCK -> '|' */
+ { 0x96, 0x00 }, /* QUADRANT LOWER LEFT -> ... */
+ { 0xB1, 0x23 }, /* WHITE PARALLELOGRAM -> '#' */
+ { 0xB2, 0x00 }, /* BLACK UP-POINTING TRIANGLE -> ... */
+ { 0xB5, 0x5E }, /* WHITE UP-POINTING SMALL TRIANGLE -> '^' */
+ { 0xB6, 0x00 }, /* BLACK RIGHT-POINTING TRIANGLE -> ... */
+ { 0xBB, 0x3E }, /* WHITE RIGHT-POINTING POINTER -> '>' */
+ { 0xBC, 0x00 }, /* BLACK DOWN-POINTING TRIANGLE -> ... */
+ { 0xBF, 0x56 }, /* WHITE DOWN-POINTING SMALL TRIANGLE -> 'V' */
+ { 0xC0, 0x00 }, /* BLACK LEFT-POINTING TRIANGLE -> ... */
+ { 0xC5, 0x3C }, /* WHITE LEFT-POINTING POINTER -> '<' */
+ { 0xC6, 0x00 }, /* BLACK DIAMOND -> ... */
+ { 0xE6, 0x2A }, /* WHITE BULLET -> '*' */
+ { 0xE7, 0x00 }, /* SQUARE WITH LEFT HALF BLACK -> ... */
+ { 0xEB, 0x23 }, /* WHITE SQUARE WITH VERTICAL BISECTING LINE -> '#' */
+ { 0xEC, 0x00 }, /* WHITE UP-POINTING TRIANGLE WITH DOT -> ... */
+ { 0xEE, 0x5E }, /* UP-POINTING TRIANGLE WITH RIGHT HALF BLACK -> '^' */
+ { 0xEF, 0x4F }, /* LARGE CIRCLE -> 'O' */
+ { 0xF0, 0x00 }, /* WHITE SQUARE WITH UPPER LEFT QUADRANT -> ... */
+ { 0xF7, 0x23 }, /* WHITE CIRCLE WITH UPPER RIGHT QUADRANT -> '#' */
+ /* Entries for page 0x26 */
+ { 0x05, 0x2A }, /* BLACK STAR -> '*' */
+ { 0x06, 0x2A }, /* WHITE STAR -> '*' */
+ { 0x2A, 0x2A }, /* STAR AND CRESCENT -> '*' */
+ { 0x6F, 0x23 }, /* MUSIC SHARP SIGN -> '#' */
+ { 0x98, 0x2A }, /* FLOWER -> '*' */
+ { 0x9D, 0x2A }, /* OUTLINED WHITE STAR -> '*' */
+ /* Entries for page 0x27 */
+ { 0x13, 0x76 }, /* CHECK MARK -> 'v' */
+ { 0x14, 0x56 }, /* HEAVY CHECK MARK -> 'V' */
+ { 0x15, 0x78 }, /* MULTIPLICATION X -> 'x' */
+ { 0x16, 0x58 }, /* HEAVY MULTIPLICATION X -> 'X' */
+ { 0x17, 0x78 }, /* BALLOT X -> 'x' */
+ { 0x18, 0x58 }, /* HEAVY BALLOT X -> 'X' */
+ { 0x21, 0x00 }, /* STAR OF DAVID -> ... */
+ { 0x46, 0x2A }, /* HEAVY CHEVRON SNOWFLAKE -> '*' */
+ { 0x49, 0x00 }, /* BALLOON-SPOKED ASTERISK -> ... */
+ { 0x4B, 0x2A }, /* HEAVY EIGHT TEARDROP-SPOKED PROPELLER ASTERISK -> '*' */
+ { 0x58, 0x7C }, /* LIGHT VERTICAL BAR -> '|' */
+ { 0x5C, 0x27 }, /* HEAVY SINGLE COMMA QUOTATION MARK ORNAMENT -> ''' */
+ { 0x5D, 0x22 }, /* HEAVY DOUBLE TURNED COMMA QUOTATION MARK ORNAMENT -> '"' */
+ { 0x5E, 0x22 }, /* HEAVY DOUBLE COMMA QUOTATION MARK ORNAMENT -> '"' */
+ { 0x5F, 0x2C }, /* HEAVY LOW SINGLE COMMA QUOTATION MARK ORNAMENT -> ',' */
+ { 0x62, 0x21 }, /* HEAVY EXCLAMATION MARK ORNAMENT -> '!' */
+ { 0xE6, 0x5B }, /* MATHEMATICAL LEFT WHITE SQUARE BRACKET -> '[' */
+ { 0xE8, 0x3C }, /* MATHEMATICAL LEFT ANGLE BRACKET -> '<' */
+ /* Entries for page 0x28 */
+ { 0x00, 0x20 }, /* BRAILLE PATTERN BLANK -> ' ' */
+ { 0x01, 0x61 }, /* BRAILLE PATTERN DOTS-1 -> 'a' */
+ { 0x02, 0x31 }, /* BRAILLE PATTERN DOTS-2 -> '1' */
+ { 0x03, 0x62 }, /* BRAILLE PATTERN DOTS-12 -> 'b' */
+ { 0x04, 0x27 }, /* BRAILLE PATTERN DOTS-3 -> ''' */
+ { 0x05, 0x6B }, /* BRAILLE PATTERN DOTS-13 -> 'k' */
+ { 0x06, 0x32 }, /* BRAILLE PATTERN DOTS-23 -> '2' */
+ { 0x07, 0x6C }, /* BRAILLE PATTERN DOTS-123 -> 'l' */
+ { 0x08, 0x40 }, /* BRAILLE PATTERN DOTS-4 -> '@' */
+ { 0x09, 0x63 }, /* BRAILLE PATTERN DOTS-14 -> 'c' */
+ { 0x0A, 0x69 }, /* BRAILLE PATTERN DOTS-24 -> 'i' */
+ { 0x0B, 0x66 }, /* BRAILLE PATTERN DOTS-124 -> 'f' */
+ { 0x0C, 0x2F }, /* BRAILLE PATTERN DOTS-34 -> '/' */
+ { 0x0D, 0x6D }, /* BRAILLE PATTERN DOTS-134 -> 'm' */
+ { 0x0E, 0x73 }, /* BRAILLE PATTERN DOTS-234 -> 's' */
+ { 0x0F, 0x70 }, /* BRAILLE PATTERN DOTS-1234 -> 'p' */
+ { 0x10, 0x22 }, /* BRAILLE PATTERN DOTS-5 -> '"' */
+ { 0x11, 0x65 }, /* BRAILLE PATTERN DOTS-15 -> 'e' */
+ { 0x12, 0x33 }, /* BRAILLE PATTERN DOTS-25 -> '3' */
+ { 0x13, 0x68 }, /* BRAILLE PATTERN DOTS-125 -> 'h' */
+ { 0x14, 0x39 }, /* BRAILLE PATTERN DOTS-35 -> '9' */
+ { 0x15, 0x6F }, /* BRAILLE PATTERN DOTS-135 -> 'o' */
+ { 0x16, 0x36 }, /* BRAILLE PATTERN DOTS-235 -> '6' */
+ { 0x17, 0x72 }, /* BRAILLE PATTERN DOTS-1235 -> 'r' */
+ { 0x18, 0x5E }, /* BRAILLE PATTERN DOTS-45 -> '^' */
+ { 0x19, 0x64 }, /* BRAILLE PATTERN DOTS-145 -> 'd' */
+ { 0x1A, 0x6A }, /* BRAILLE PATTERN DOTS-245 -> 'j' */
+ { 0x1B, 0x67 }, /* BRAILLE PATTERN DOTS-1245 -> 'g' */
+ { 0x1C, 0x3E }, /* BRAILLE PATTERN DOTS-345 -> '>' */
+ { 0x1D, 0x6E }, /* BRAILLE PATTERN DOTS-1345 -> 'n' */
+ { 0x1E, 0x74 }, /* BRAILLE PATTERN DOTS-2345 -> 't' */
+ { 0x1F, 0x71 }, /* BRAILLE PATTERN DOTS-12345 -> 'q' */
+ { 0x20, 0x2C }, /* BRAILLE PATTERN DOTS-6 -> ',' */
+ { 0x21, 0x2A }, /* BRAILLE PATTERN DOTS-16 -> '*' */
+ { 0x22, 0x35 }, /* BRAILLE PATTERN DOTS-26 -> '5' */
+ { 0x23, 0x3C }, /* BRAILLE PATTERN DOTS-126 -> '<' */
+ { 0x24, 0x2D }, /* BRAILLE PATTERN DOTS-36 -> '-' */
+ { 0x25, 0x75 }, /* BRAILLE PATTERN DOTS-136 -> 'u' */
+ { 0x26, 0x38 }, /* BRAILLE PATTERN DOTS-236 -> '8' */
+ { 0x27, 0x76 }, /* BRAILLE PATTERN DOTS-1236 -> 'v' */
+ { 0x28, 0x2E }, /* BRAILLE PATTERN DOTS-46 -> '.' */
+ { 0x29, 0x25 }, /* BRAILLE PATTERN DOTS-146 -> '%' */
+ { 0x2A, 0x5B }, /* BRAILLE PATTERN DOTS-246 -> '[' */
+ { 0x2B, 0x24 }, /* BRAILLE PATTERN DOTS-1246 -> '$' */
+ { 0x2C, 0x2B }, /* BRAILLE PATTERN DOTS-346 -> '+' */
+ { 0x2D, 0x78 }, /* BRAILLE PATTERN DOTS-1346 -> 'x' */
+ { 0x2E, 0x21 }, /* BRAILLE PATTERN DOTS-2346 -> '!' */
+ { 0x2F, 0x26 }, /* BRAILLE PATTERN DOTS-12346 -> '&' */
+ { 0x30, 0x3B }, /* BRAILLE PATTERN DOTS-56 -> ';' */
+ { 0x31, 0x3A }, /* BRAILLE PATTERN DOTS-156 -> ':' */
+ { 0x32, 0x34 }, /* BRAILLE PATTERN DOTS-256 -> '4' */
+ { 0x33, 0x5C }, /* BRAILLE PATTERN DOTS-1256 -> '\' */
+ { 0x34, 0x30 }, /* BRAILLE PATTERN DOTS-356 -> '0' */
+ { 0x35, 0x7A }, /* BRAILLE PATTERN DOTS-1356 -> 'z' */
+ { 0x36, 0x37 }, /* BRAILLE PATTERN DOTS-2356 -> '7' */
+ { 0x37, 0x28 }, /* BRAILLE PATTERN DOTS-12356 -> '(' */
+ { 0x38, 0x5F }, /* BRAILLE PATTERN DOTS-456 -> '_' */
+ { 0x39, 0x3F }, /* BRAILLE PATTERN DOTS-1456 -> '?' */
+ { 0x3A, 0x77 }, /* BRAILLE PATTERN DOTS-2456 -> 'w' */
+ { 0x3B, 0x5D }, /* BRAILLE PATTERN DOTS-12456 -> ']' */
+ { 0x3C, 0x23 }, /* BRAILLE PATTERN DOTS-3456 -> '#' */
+ { 0x3D, 0x79 }, /* BRAILLE PATTERN DOTS-13456 -> 'y' */
+ { 0x3E, 0x29 }, /* BRAILLE PATTERN DOTS-23456 -> ')' */
+ { 0x3F, 0x3D }, /* BRAILLE PATTERN DOTS-123456 -> '=' */
+ /* Entries for page 0x29 */
+ { 0x83, 0x7B }, /* LEFT WHITE CURLY BRACKET -> '{' */
+ /* Entries for page 0x2C */
+ { 0x60, 0x4C }, /* LATIN CAPITAL LETTER L WITH DOUBLE BAR -> 'L' */
+ { 0x61, 0x6C }, /* LATIN SMALL LETTER L WITH DOUBLE BAR -> 'l' */
+ { 0x62, 0x4C }, /* LATIN CAPITAL LETTER L WITH MIDDLE TILDE -> 'L' */
+ { 0x63, 0x50 }, /* LATIN CAPITAL LETTER P WITH STROKE -> 'P' */
+ { 0x64, 0x52 }, /* LATIN CAPITAL LETTER R WITH TAIL -> 'R' */
+ { 0x65, 0x61 }, /* LATIN SMALL LETTER A WITH STROKE -> 'a' */
+ { 0x66, 0x74 }, /* LATIN SMALL LETTER T WITH DIAGONAL STROKE -> 't' */
+ { 0x67, 0x48 }, /* LATIN CAPITAL LETTER H WITH DESCENDER -> 'H' */
+ { 0x68, 0x68 }, /* LATIN SMALL LETTER H WITH DESCENDER -> 'h' */
+ { 0x69, 0x4B }, /* LATIN CAPITAL LETTER K WITH DESCENDER -> 'K' */
+ { 0x6A, 0x6B }, /* LATIN SMALL LETTER K WITH DESCENDER -> 'k' */
+ { 0x6B, 0x5A }, /* LATIN CAPITAL LETTER Z WITH DESCENDER -> 'Z' */
+ { 0x6C, 0x7A }, /* LATIN SMALL LETTER Z WITH DESCENDER -> 'z' */
+ { 0x6E, 0x4D }, /* LATIN CAPITAL LETTER M WITH HOOK -> 'M' */
+ { 0x6F, 0x41 }, /* LATIN CAPITAL LETTER TURNED A -> 'A' */
+ /* Entries for page 0x2E */
+ { 0x00, 0x72 }, /* RIGHT ANGLE SUBSTITUTION MARKER -> 'r' */
+ { 0x06, 0x54 }, /* RAISED INTERPOLATION MARKER -> 'T' */
+ { 0x09, 0x73 }, /* LEFT TRANSPOSITION BRACKET -> 's' */
+ { 0x0C, 0x5C }, /* LEFT RAISED OMISSION BRACKET -> '\' */
+ { 0x0D, 0x2F }, /* RIGHT RAISED OMISSION BRACKET -> '/' */
+ { 0x12, 0x3E }, /* HYPODIASTOLE -> '>' */
+ { 0x13, 0x25 }, /* DOTTED OBELOS -> '%' */
+ { 0x16, 0x3E }, /* DOTTED RIGHT-POINTING ANGLE -> '>' */
+ { 0x17, 0x3D }, /* DOUBLE OBLIQUE HYPHEN -> '=' */
+ { 0x19, 0x2F }, /* PALM BRANCH -> '/' */
+ { 0x1A, 0x2D }, /* HYPHEN WITH DIAERESIS -> '-' */
+ { 0x1B, 0x7E }, /* TILDE WITH RING ABOVE -> '~' */
+ { 0x1C, 0x5C }, /* LEFT LOW PARAPHRASE BRACKET -> '\' */
+ { 0x1D, 0x2F }, /* RIGHT LOW PARAPHRASE BRACKET -> '/' */
+ { 0x1E, 0x7E }, /* TILDE WITH DOT ABOVE -> '~' */
+ { 0x1F, 0x7E }, /* TILDE WITH DOT BELOW -> '~' */
+ { 0x2E, 0x3F }, /* REVERSED QUESTION MARK -> '?' */
+ { 0x2F, 0x27 }, /* VERTICAL TILDE -> ''' */
+ { 0x30, 0x6F }, /* RING POINT -> 'o' */
+ { 0x31, 0x2E }, /* WORD SEPARATOR MIDDLE DOT -> '.' */
+ { 0x32, 0x2C }, /* TURNED COMMA -> ',' */
+ { 0x33, 0x2E }, /* RAISED DOT -> '.' */
+ { 0x34, 0x2C }, /* RAISED COMMA -> ',' */
+ { 0x35, 0x3B }, /* TURNED SEMICOLON -> ';' */
+ { 0x3C, 0x78 }, /* STENOGRAPHIC FULL STOP -> 'x' */
+ { 0x3D, 0x7C }, /* VERTICAL SIX DOTS -> '|' */
+ { 0x40, 0x3D }, /* DOUBLE HYPHEN -> '=' */
+ { 0x41, 0x2C }, /* REVERSED COMMA -> ',' */
+ { 0x42, 0x22 }, /* DOUBLE LOW-REVERSED-9 QUOTATION MARK -> '"' */
+ /* Entries for page 0x30 */
+ { 0x00, 0x20 }, /* IDEOGRAPHIC SPACE -> ' ' */
+ { 0x03, 0x22 }, /* DITTO MARK -> '"' */
+ { 0x05, 0x22 }, /* IDEOGRAPHIC ITERATION MARK -> '"' */
+ { 0x06, 0x2F }, /* IDEOGRAPHIC CLOSING MARK -> '/' */
+ { 0x07, 0x30 }, /* IDEOGRAPHIC NUMBER ZERO -> '0' */
+ { 0x08, 0x3C }, /* LEFT ANGLE BRACKET -> '<' */
+ { 0x0C, 0x5B }, /* LEFT CORNER BRACKET -> '[' */
+ { 0x0E, 0x7B }, /* LEFT WHITE CORNER BRACKET -> '{' */
+ { 0x12, 0x40 }, /* POSTAL MARK -> '@' */
+ { 0x14, 0x5B }, /* LEFT TORTOISE SHELL BRACKET -> '[' */
+ { 0x20, 0x40 }, /* POSTAL MARK FACE -> '@' */
+ { 0x21, 0x31 }, /* HANGZHOU NUMERAL ONE -> '1' */
+ { 0x22, 0x32 }, /* HANGZHOU NUMERAL TWO -> '2' */
+ { 0x23, 0x33 }, /* HANGZHOU NUMERAL THREE -> '3' */
+ { 0x24, 0x34 }, /* HANGZHOU NUMERAL FOUR -> '4' */
+ { 0x25, 0x35 }, /* HANGZHOU NUMERAL FIVE -> '5' */
+ { 0x26, 0x36 }, /* HANGZHOU NUMERAL SIX -> '6' */
+ { 0x27, 0x37 }, /* HANGZHOU NUMERAL SEVEN -> '7' */
+ { 0x28, 0x38 }, /* HANGZHOU NUMERAL EIGHT -> '8' */
+ { 0x29, 0x39 }, /* HANGZHOU NUMERAL NINE -> '9' */
+ { 0x30, 0x7E }, /* WAVY DASH -> '~' */
+ { 0x31, 0x00 }, /* VERTICAL KANA REPEAT MARK -> ... */
+ { 0x34, 0x2B }, /* VERTICAL KANA REPEAT WITH VOICED SOUND MARK UPPER HALF -> '+' */
+ { 0x36, 0x40 }, /* CIRCLED POSTAL MARK -> '@' */
+ { 0x41, 0x61 }, /* HIRAGANA LETTER SMALL A -> 'a' */
+ { 0x42, 0x61 }, /* HIRAGANA LETTER A -> 'a' */
+ { 0x43, 0x69 }, /* HIRAGANA LETTER SMALL I -> 'i' */
+ { 0x44, 0x69 }, /* HIRAGANA LETTER I -> 'i' */
+ { 0x45, 0x75 }, /* HIRAGANA LETTER SMALL U -> 'u' */
+ { 0x46, 0x75 }, /* HIRAGANA LETTER U -> 'u' */
+ { 0x47, 0x65 }, /* HIRAGANA LETTER SMALL E -> 'e' */
+ { 0x48, 0x65 }, /* HIRAGANA LETTER E -> 'e' */
+ { 0x49, 0x6F }, /* HIRAGANA LETTER SMALL O -> 'o' */
+ { 0x4A, 0x6F }, /* HIRAGANA LETTER O -> 'o' */
+ { 0x93, 0x6E }, /* HIRAGANA LETTER N -> 'n' */
+ { 0x9D, 0x22 }, /* HIRAGANA ITERATION MARK -> '"' */
+ { 0x9E, 0x22 }, /* HIRAGANA VOICED ITERATION MARK -> '"' */
+ { 0xA0, 0x3D }, /* KATAKANA-HIRAGANA DOUBLE HYPHEN -> '=' */
+ { 0xA1, 0x61 }, /* KATAKANA LETTER SMALL A -> 'a' */
+ { 0xA2, 0x61 }, /* KATAKANA LETTER A -> 'a' */
+ { 0xA3, 0x69 }, /* KATAKANA LETTER SMALL I -> 'i' */
+ { 0xA4, 0x69 }, /* KATAKANA LETTER I -> 'i' */
+ { 0xA5, 0x75 }, /* KATAKANA LETTER SMALL U -> 'u' */
+ { 0xA6, 0x75 }, /* KATAKANA LETTER U -> 'u' */
+ { 0xA7, 0x65 }, /* KATAKANA LETTER SMALL E -> 'e' */
+ { 0xA8, 0x65 }, /* KATAKANA LETTER E -> 'e' */
+ { 0xA9, 0x6F }, /* KATAKANA LETTER SMALL O -> 'o' */
+ { 0xAA, 0x6F }, /* KATAKANA LETTER O -> 'o' */
+ { 0xF3, 0x6E }, /* KATAKANA LETTER N -> 'n' */
+ { 0xFB, 0x2A }, /* KATAKANA MIDDLE DOT -> '*' */
+ { 0xFC, 0x2D }, /* KATAKANA-HIRAGANA PROLONGED SOUND MARK -> '-' */
+ { 0xFD, 0x22 }, /* KATAKANA ITERATION MARK -> '"' */
+ { 0xFE, 0x22 }, /* KATAKANA VOICED ITERATION MARK -> '"' */
+ /* Entries for page 0x31 */
+ { 0x05, 0x42 }, /* BOPOMOFO LETTER B -> 'B' */
+ { 0x06, 0x50 }, /* BOPOMOFO LETTER P -> 'P' */
+ { 0x07, 0x4D }, /* BOPOMOFO LETTER M -> 'M' */
+ { 0x08, 0x46 }, /* BOPOMOFO LETTER F -> 'F' */
+ { 0x09, 0x44 }, /* BOPOMOFO LETTER D -> 'D' */
+ { 0x0A, 0x54 }, /* BOPOMOFO LETTER T -> 'T' */
+ { 0x0B, 0x4E }, /* BOPOMOFO LETTER N -> 'N' */
+ { 0x0C, 0x4C }, /* BOPOMOFO LETTER L -> 'L' */
+ { 0x0D, 0x47 }, /* BOPOMOFO LETTER G -> 'G' */
+ { 0x0E, 0x4B }, /* BOPOMOFO LETTER K -> 'K' */
+ { 0x0F, 0x48 }, /* BOPOMOFO LETTER H -> 'H' */
+ { 0x10, 0x4A }, /* BOPOMOFO LETTER J -> 'J' */
+ { 0x11, 0x51 }, /* BOPOMOFO LETTER Q -> 'Q' */
+ { 0x12, 0x58 }, /* BOPOMOFO LETTER X -> 'X' */
+ { 0x16, 0x52 }, /* BOPOMOFO LETTER R -> 'R' */
+ { 0x17, 0x5A }, /* BOPOMOFO LETTER Z -> 'Z' */
+ { 0x18, 0x43 }, /* BOPOMOFO LETTER C -> 'C' */
+ { 0x19, 0x53 }, /* BOPOMOFO LETTER S -> 'S' */
+ { 0x1A, 0x41 }, /* BOPOMOFO LETTER A -> 'A' */
+ { 0x1B, 0x4F }, /* BOPOMOFO LETTER O -> 'O' */
+ { 0x1C, 0x45 }, /* BOPOMOFO LETTER E -> 'E' */
+ { 0x27, 0x49 }, /* BOPOMOFO LETTER I -> 'I' */
+ { 0x28, 0x55 }, /* BOPOMOFO LETTER U -> 'U' */
+ { 0x2A, 0x56 }, /* BOPOMOFO LETTER V -> 'V' */
+ { 0x31, 0x67 }, /* HANGUL LETTER KIYEOK -> 'g' */
+ { 0x34, 0x6E }, /* HANGUL LETTER NIEUN -> 'n' */
+ { 0x37, 0x64 }, /* HANGUL LETTER TIKEUT -> 'd' */
+ { 0x39, 0x72 }, /* HANGUL LETTER RIEUL -> 'r' */
+ { 0x41, 0x6D }, /* HANGUL LETTER MIEUM -> 'm' */
+ { 0x42, 0x62 }, /* HANGUL LETTER PIEUP -> 'b' */
+ { 0x45, 0x73 }, /* HANGUL LETTER SIOS -> 's' */
+ { 0x48, 0x6A }, /* HANGUL LETTER CIEUC -> 'j' */
+ { 0x4A, 0x63 }, /* HANGUL LETTER CHIEUCH -> 'c' */
+ { 0x4B, 0x6B }, /* HANGUL LETTER KHIEUKH -> 'k' */
+ { 0x4C, 0x74 }, /* HANGUL LETTER THIEUTH -> 't' */
+ { 0x4D, 0x70 }, /* HANGUL LETTER PHIEUPH -> 'p' */
+ { 0x4E, 0x68 }, /* HANGUL LETTER HIEUH -> 'h' */
+ { 0x4F, 0x61 }, /* HANGUL LETTER A -> 'a' */
+ { 0x54, 0x65 }, /* HANGUL LETTER E -> 'e' */
+ { 0x57, 0x6F }, /* HANGUL LETTER O -> 'o' */
+ { 0x5C, 0x75 }, /* HANGUL LETTER U -> 'u' */
+ { 0x63, 0x69 }, /* HANGUL LETTER I -> 'i' */
+ { 0x7F, 0x5A }, /* HANGUL LETTER PANSIOS -> 'Z' */
+ { 0x81, 0x4E }, /* HANGUL LETTER YESIEUNG -> 'N' */
+ { 0x86, 0x51 }, /* HANGUL LETTER YEORINHIEUH -> 'Q' */
+ { 0x8D, 0x55 }, /* HANGUL LETTER ARAEA -> 'U' */
+ { 0xB4, 0x50 }, /* BOPOMOFO FINAL LETTER P -> 'P' */
+ { 0xB5, 0x54 }, /* BOPOMOFO FINAL LETTER T -> 'T' */
+ { 0xB6, 0x4B }, /* BOPOMOFO FINAL LETTER K -> 'K' */
+ { 0xB7, 0x48 }, /* BOPOMOFO FINAL LETTER H -> 'H' */
+ /* Entries for page 0x32 */
+ { 0xD0, 0x61 }, /* CIRCLED KATAKANA A -> 'a' */
+ { 0xD1, 0x69 }, /* CIRCLED KATAKANA I -> 'i' */
+ { 0xD2, 0x75 }, /* CIRCLED KATAKANA U -> 'u' */
+ { 0xD3, 0x75 }, /* CIRCLED KATAKANA E -> 'u' */
+ { 0xD4, 0x6F }, /* CIRCLED KATAKANA O -> 'o' */
+ /* Entries for page 0xA0 */
+ { 0x02, 0x69 }, /* YI SYLLABLE I -> 'i' */
+ { 0x0A, 0x61 }, /* YI SYLLABLE A -> 'a' */
+ { 0x11, 0x6F }, /* YI SYLLABLE O -> 'o' */
+ { 0x14, 0x65 }, /* YI SYLLABLE E -> 'e' */
+ /* Entries for page 0xC5 */
+ { 0x44, 0x61 }, /* HANGUL SYLLABLE A -> 'a' */
+ { 0xD0, 0x65 }, /* HANGUL SYLLABLE E -> 'e' */
+ /* Entries for page 0xC6 */
+ { 0x24, 0x6F }, /* HANGUL SYLLABLE O -> 'o' */
+ { 0xB0, 0x75 }, /* HANGUL SYLLABLE U -> 'u' */
+ /* Entries for page 0xC7 */
+ { 0x74, 0x69 }, /* HANGUL SYLLABLE I -> 'i' */
+ /* Entries for page 0xFB */
+ { 0x1D, 0x69 }, /* HEBREW LETTER YOD WITH HIRIQ -> 'i' */
+ { 0x20, 0x60 }, /* HEBREW LETTER ALTERNATIVE AYIN -> '`' */
+ { 0x21, 0x41 }, /* HEBREW LETTER WIDE ALEF -> 'A' */
+ { 0x22, 0x64 }, /* HEBREW LETTER WIDE DALET -> 'd' */
+ { 0x23, 0x68 }, /* HEBREW LETTER WIDE HE -> 'h' */
+ { 0x25, 0x6C }, /* HEBREW LETTER WIDE LAMED -> 'l' */
+ { 0x26, 0x6D }, /* HEBREW LETTER WIDE FINAL MEM -> 'm' */
+ { 0x27, 0x72 }, /* HEBREW LETTER WIDE RESH -> 'r' */
+ { 0x28, 0x74 }, /* HEBREW LETTER WIDE TAV -> 't' */
+ { 0x29, 0x2B }, /* HEBREW LETTER ALTERNATIVE PLUS SIGN -> '+' */
+ { 0x2B, 0x53 }, /* HEBREW LETTER SHIN WITH SIN DOT -> 'S' */
+ { 0x2D, 0x53 }, /* HEBREW LETTER SHIN WITH DAGESH AND SIN DOT -> 'S' */
+ { 0x2E, 0x61 }, /* HEBREW LETTER ALEF WITH PATAH -> 'a' */
+ { 0x2F, 0x61 }, /* HEBREW LETTER ALEF WITH QAMATS -> 'a' */
+ { 0x30, 0x41 }, /* HEBREW LETTER ALEF WITH MAPIQ -> 'A' */
+ { 0x31, 0x62 }, /* HEBREW LETTER BET WITH DAGESH -> 'b' */
+ { 0x32, 0x67 }, /* HEBREW LETTER GIMEL WITH DAGESH -> 'g' */
+ { 0x33, 0x64 }, /* HEBREW LETTER DALET WITH DAGESH -> 'd' */
+ { 0x34, 0x68 }, /* HEBREW LETTER HE WITH MAPIQ -> 'h' */
+ { 0x35, 0x76 }, /* HEBREW LETTER VAV WITH DAGESH -> 'v' */
+ { 0x36, 0x7A }, /* HEBREW LETTER ZAYIN WITH DAGESH -> 'z' */
+ { 0x38, 0x74 }, /* HEBREW LETTER TET WITH DAGESH -> 't' */
+ { 0x39, 0x79 }, /* HEBREW LETTER YOD WITH DAGESH -> 'y' */
+ { 0x3C, 0x6C }, /* HEBREW LETTER LAMED WITH DAGESH -> 'l' */
+ { 0x3E, 0x6D }, /* HEBREW LETTER MEM WITH DAGESH -> 'm' */
+ { 0x40, 0x6E }, /* HEBREW LETTER NUN WITH DAGESH -> 'n' */
+ { 0x41, 0x73 }, /* HEBREW LETTER SAMEKH WITH DAGESH -> 's' */
+ { 0x43, 0x70 }, /* HEBREW LETTER FINAL PE WITH DAGESH -> 'p' */
+ { 0x44, 0x70 }, /* HEBREW LETTER PE WITH DAGESH -> 'p' */
+ { 0x47, 0x6B }, /* HEBREW LETTER QOF WITH DAGESH -> 'k' */
+ { 0x48, 0x72 }, /* HEBREW LETTER RESH WITH DAGESH -> 'r' */
+ { 0x4A, 0x74 }, /* HEBREW LETTER TAV WITH DAGESH -> 't' */
+ { 0x4B, 0x6F }, /* HEBREW LETTER VAV WITH HOLAM -> 'o' */
+ { 0x4C, 0x76 }, /* HEBREW LETTER BET WITH RAFE -> 'v' */
+ { 0x4E, 0x66 }, /* HEBREW LETTER PE WITH RAFE -> 'f' */
+ /* Entries for page 0xFE */
+ { 0x23, 0x7E }, /* COMBINING DOUBLE TILDE RIGHT HALF -> '~' */
+ { 0x32, 0x2D }, /* PRESENTATION FORM FOR VERTICAL EN DASH -> '-' */
+ { 0x33, 0x5F }, /* PRESENTATION FORM FOR VERTICAL LOW LINE -> '_' */
+ { 0x34, 0x5F }, /* PRESENTATION FORM FOR VERTICAL WAVY LOW LINE -> '_' */
+ { 0x35, 0x28 }, /* PRESENTATION FORM FOR VERTICAL LEFT PARENTHESIS -> '(' */
+ { 0x37, 0x7B }, /* PRESENTATION FORM FOR VERTICAL LEFT CURLY BRACKET -> '{' */
+ { 0x39, 0x5B }, /* PRESENTATION FORM FOR VERTICAL LEFT TORTOISE SHELL BRACKET -> '[' */
+ { 0x3F, 0x3C }, /* PRESENTATION FORM FOR VERTICAL LEFT ANGLE BRACKET -> '<' */
+ { 0x41, 0x5B }, /* PRESENTATION FORM FOR VERTICAL LEFT CORNER BRACKET -> '[' */
+ { 0x43, 0x7B }, /* PRESENTATION FORM FOR VERTICAL LEFT WHITE CORNER BRACKET -> '{' */
+ { 0x44, 0x7D }, /* PRESENTATION FORM FOR VERTICAL RIGHT WHITE CORNER BRACKET -> '}' */
+ { 0x50, 0x2C }, /* SMALL COMMA -> ',' */
+ { 0x51, 0x2C }, /* SMALL IDEOGRAPHIC COMMA -> ',' */
+ { 0x52, 0x2E }, /* SMALL FULL STOP -> '.' */
+ { 0x54, 0x3B }, /* SMALL SEMICOLON -> ';' */
+ { 0x55, 0x3A }, /* SMALL COLON -> ':' */
+ { 0x56, 0x3F }, /* SMALL QUESTION MARK -> '?' */
+ { 0x57, 0x21 }, /* SMALL EXCLAMATION MARK -> '!' */
+ { 0x58, 0x2D }, /* SMALL EM DASH -> '-' */
+ { 0x59, 0x28 }, /* SMALL LEFT PARENTHESIS -> '(' */
+ { 0x5A, 0x29 }, /* SMALL RIGHT PARENTHESIS -> ')' */
+ { 0x5B, 0x7B }, /* SMALL LEFT CURLY BRACKET -> '{' */
+ { 0x5C, 0x7D }, /* SMALL RIGHT CURLY BRACKET -> '}' */
+ { 0x5D, 0x7B }, /* SMALL LEFT TORTOISE SHELL BRACKET -> '{' */
+ { 0x5E, 0x7D }, /* SMALL RIGHT TORTOISE SHELL BRACKET -> '}' */
+ { 0x5F, 0x23 }, /* SMALL NUMBER SIGN -> '#' */
+ { 0x60, 0x26 }, /* SMALL AMPERSAND -> '&' */
+ { 0x61, 0x2A }, /* SMALL ASTERISK -> '*' */
+ { 0x62, 0x2B }, /* SMALL PLUS SIGN -> '+' */
+ { 0x63, 0x2D }, /* SMALL HYPHEN-MINUS -> '-' */
+ { 0x64, 0x3C }, /* SMALL LESS-THAN SIGN -> '<' */
+ { 0x65, 0x3E }, /* SMALL GREATER-THAN SIGN -> '>' */
+ { 0x66, 0x3D }, /* SMALL EQUALS SIGN -> '=' */
+ { 0x68, 0x5C }, /* SMALL REVERSE SOLIDUS -> '\' */
+ { 0x69, 0x24 }, /* SMALL DOLLAR SIGN -> '$' */
+ { 0x6A, 0x25 }, /* SMALL PERCENT SIGN -> '%' */
+ { 0x6B, 0x40 }, /* SMALL COMMERCIAL AT -> '@' */
+ /* Entries for page 0xFF */
+ { 0x61, 0x2E }, /* HALFWIDTH IDEOGRAPHIC FULL STOP -> '.' */
+ { 0x62, 0x5B }, /* HALFWIDTH LEFT CORNER BRACKET -> '[' */
+ { 0x63, 0x5D }, /* HALFWIDTH RIGHT CORNER BRACKET -> ']' */
+ { 0x64, 0x2C }, /* HALFWIDTH IDEOGRAPHIC COMMA -> ',' */
+ { 0x65, 0x2A }, /* HALFWIDTH KATAKANA MIDDLE DOT -> '*' */
+ { 0x67, 0x61 }, /* HALFWIDTH KATAKANA LETTER SMALL A -> 'a' */
+ { 0x68, 0x69 }, /* HALFWIDTH KATAKANA LETTER SMALL I -> 'i' */
+ { 0x69, 0x75 }, /* HALFWIDTH KATAKANA LETTER SMALL U -> 'u' */
+ { 0x6A, 0x65 }, /* HALFWIDTH KATAKANA LETTER SMALL E -> 'e' */
+ { 0x6B, 0x6F }, /* HALFWIDTH KATAKANA LETTER SMALL O -> 'o' */
+ { 0x70, 0x2B }, /* HALFWIDTH KATAKANA-HIRAGANA PROLONGED SOUND MARK -> '+' */
+ { 0x71, 0x61 }, /* HALFWIDTH KATAKANA LETTER A -> 'a' */
+ { 0x72, 0x69 }, /* HALFWIDTH KATAKANA LETTER I -> 'i' */
+ { 0x73, 0x75 }, /* HALFWIDTH KATAKANA LETTER U -> 'u' */
+ { 0x74, 0x65 }, /* HALFWIDTH KATAKANA LETTER E -> 'e' */
+ { 0x75, 0x6F }, /* HALFWIDTH KATAKANA LETTER O -> 'o' */
+ { 0x9D, 0x6E }, /* HALFWIDTH KATAKANA LETTER N -> 'n' */
+ { 0x9E, 0x3A }, /* HALFWIDTH KATAKANA VOICED SOUND MARK -> ':' */
+ { 0x9F, 0x3B }, /* HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK -> ';' */
+ { 0xA1, 0x67 }, /* HALFWIDTH HANGUL LETTER KIYEOK -> 'g' */
+ { 0xA4, 0x6E }, /* HALFWIDTH HANGUL LETTER NIEUN -> 'n' */
+ { 0xA7, 0x64 }, /* HALFWIDTH HANGUL LETTER TIKEUT -> 'd' */
+ { 0xA9, 0x72 }, /* HALFWIDTH HANGUL LETTER RIEUL -> 'r' */
+ { 0xB1, 0x6D }, /* HALFWIDTH HANGUL LETTER MIEUM -> 'm' */
+ { 0xB2, 0x62 }, /* HALFWIDTH HANGUL LETTER PIEUP -> 'b' */
+ { 0xB5, 0x73 }, /* HALFWIDTH HANGUL LETTER SIOS -> 's' */
+ { 0xB8, 0x6A }, /* HALFWIDTH HANGUL LETTER CIEUC -> 'j' */
+ { 0xBA, 0x63 }, /* HALFWIDTH HANGUL LETTER CHIEUCH -> 'c' */
+ { 0xBB, 0x6B }, /* HALFWIDTH HANGUL LETTER KHIEUKH -> 'k' */
+ { 0xBC, 0x74 }, /* HALFWIDTH HANGUL LETTER THIEUTH -> 't' */
+ { 0xBD, 0x70 }, /* HALFWIDTH HANGUL LETTER PHIEUPH -> 'p' */
+ { 0xBE, 0x68 }, /* HALFWIDTH HANGUL LETTER HIEUH -> 'h' */
+ { 0xC2, 0x61 }, /* HALFWIDTH HANGUL LETTER A -> 'a' */
+ { 0xC7, 0x65 }, /* HALFWIDTH HANGUL LETTER E -> 'e' */
+ { 0xCC, 0x6F }, /* HALFWIDTH HANGUL LETTER O -> 'o' */
+ { 0xD3, 0x75 }, /* HALFWIDTH HANGUL LETTER U -> 'u' */
+ { 0xDC, 0x69 }, /* HALFWIDTH HANGUL LETTER I -> 'i' */
+ { 0xE2, 0x21 }, /* FULLWIDTH NOT SIGN -> '!' */
+ { 0xE3, 0x2D }, /* FULLWIDTH MACRON -> '-' */
+ { 0xE4, 0x7C }, /* FULLWIDTH BROKEN BAR -> '|' */
+ { 0xE8, 0x7C }, /* HALFWIDTH FORMS LIGHT VERTICAL -> '|' */
+ { 0xE9, 0x3C }, /* HALFWIDTH LEFTWARDS ARROW -> '<' */
+ { 0xEA, 0x5E }, /* HALFWIDTH UPWARDS ARROW -> '^' */
+ { 0xEB, 0x3E }, /* HALFWIDTH RIGHTWARDS ARROW -> '>' */
+ { 0xEC, 0x76 }, /* HALFWIDTH DOWNWARDS ARROW -> 'v' */
+ { 0xED, 0x23 }, /* HALFWIDTH BLACK SQUARE -> '#' */
+ { 0xEE, 0x4F }, /* HALFWIDTH WHITE CIRCLE -> 'O' */
+ { 0xF9, 0x7B }, /* INTERLINEAR ANNOTATION ANCHOR -> '{' */
+ { 0xFA, 0x7C }, /* INTERLINEAR ANNOTATION SEPARATOR -> '|' */
+ { 0xFB, 0x7D }, /* INTERLINEAR ANNOTATION TERMINATOR -> '}' */
+};
+
+#define UCS_PAGE_ENTRY_RANGE_MARKER 0
diff --git a/drivers/tty/vt/ucs_recompose_table.h_shipped b/drivers/tty/vt/ucs_recompose_table.h_shipped
new file mode 100644
index 000000000000..bd91edde5d19
--- /dev/null
+++ b/drivers/tty/vt/ucs_recompose_table.h_shipped
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ucs_recompose_table.h - Unicode character recomposition
+ *
+ * Auto-generated by gen_ucs_recompose_table.py
+ *
+ * Unicode Version: 16.0.0
+ *
+ * This file contains a table with most commonly used Latin, Greek, and
+ * Cyrillic recomposition pairs only (71 entries). To generate a table with
+ * all possible recomposition pairs from the Unicode BMP (1000 entries)
+ * instead, run:
+ *
+ * python gen_ucs_recompose_table.py --full
+ */
+
+/*
+ * Table of most commonly used Latin, Greek, and Cyrillic recomposition pairs only
+ * Sorted by base character and then combining mark for binary search
+ */
+static const struct ucs_recomposition ucs_recomposition_table[] = {
+ { 0x0041, 0x0300, 0x00C0 }, /* LATIN CAPITAL LETTER A + COMBINING GRAVE ACCENT = LATIN CAPITAL LETTER A WITH GRAVE */
+ { 0x0041, 0x0301, 0x00C1 }, /* LATIN CAPITAL LETTER A + COMBINING ACUTE ACCENT = LATIN CAPITAL LETTER A WITH ACUTE */
+ { 0x0041, 0x0302, 0x00C2 }, /* LATIN CAPITAL LETTER A + COMBINING CIRCUMFLEX ACCENT = LATIN CAPITAL LETTER A WITH CIRCUMFLEX */
+ { 0x0041, 0x0303, 0x00C3 }, /* LATIN CAPITAL LETTER A + COMBINING TILDE = LATIN CAPITAL LETTER A WITH TILDE */
+ { 0x0041, 0x0308, 0x00C4 }, /* LATIN CAPITAL LETTER A + COMBINING DIAERESIS = LATIN CAPITAL LETTER A WITH DIAERESIS */
+ { 0x0041, 0x030A, 0x00C5 }, /* LATIN CAPITAL LETTER A + COMBINING RING ABOVE = LATIN CAPITAL LETTER A WITH RING ABOVE */
+ { 0x0043, 0x0327, 0x00C7 }, /* LATIN CAPITAL LETTER C + COMBINING CEDILLA = LATIN CAPITAL LETTER C WITH CEDILLA */
+ { 0x0045, 0x0300, 0x00C8 }, /* LATIN CAPITAL LETTER E + COMBINING GRAVE ACCENT = LATIN CAPITAL LETTER E WITH GRAVE */
+ { 0x0045, 0x0301, 0x00C9 }, /* LATIN CAPITAL LETTER E + COMBINING ACUTE ACCENT = LATIN CAPITAL LETTER E WITH ACUTE */
+ { 0x0045, 0x0302, 0x00CA }, /* LATIN CAPITAL LETTER E + COMBINING CIRCUMFLEX ACCENT = LATIN CAPITAL LETTER E WITH CIRCUMFLEX */
+ { 0x0045, 0x0308, 0x00CB }, /* LATIN CAPITAL LETTER E + COMBINING DIAERESIS = LATIN CAPITAL LETTER E WITH DIAERESIS */
+ { 0x0049, 0x0300, 0x00CC }, /* LATIN CAPITAL LETTER I + COMBINING GRAVE ACCENT = LATIN CAPITAL LETTER I WITH GRAVE */
+ { 0x0049, 0x0301, 0x00CD }, /* LATIN CAPITAL LETTER I + COMBINING ACUTE ACCENT = LATIN CAPITAL LETTER I WITH ACUTE */
+ { 0x0049, 0x0302, 0x00CE }, /* LATIN CAPITAL LETTER I + COMBINING CIRCUMFLEX ACCENT = LATIN CAPITAL LETTER I WITH CIRCUMFLEX */
+ { 0x0049, 0x0308, 0x00CF }, /* LATIN CAPITAL LETTER I + COMBINING DIAERESIS = LATIN CAPITAL LETTER I WITH DIAERESIS */
+ { 0x004E, 0x0303, 0x00D1 }, /* LATIN CAPITAL LETTER N + COMBINING TILDE = LATIN CAPITAL LETTER N WITH TILDE */
+ { 0x004F, 0x0300, 0x00D2 }, /* LATIN CAPITAL LETTER O + COMBINING GRAVE ACCENT = LATIN CAPITAL LETTER O WITH GRAVE */
+ { 0x004F, 0x0301, 0x00D3 }, /* LATIN CAPITAL LETTER O + COMBINING ACUTE ACCENT = LATIN CAPITAL LETTER O WITH ACUTE */
+ { 0x004F, 0x0302, 0x00D4 }, /* LATIN CAPITAL LETTER O + COMBINING CIRCUMFLEX ACCENT = LATIN CAPITAL LETTER O WITH CIRCUMFLEX */
+ { 0x004F, 0x0303, 0x00D5 }, /* LATIN CAPITAL LETTER O + COMBINING TILDE = LATIN CAPITAL LETTER O WITH TILDE */
+ { 0x004F, 0x0308, 0x00D6 }, /* LATIN CAPITAL LETTER O + COMBINING DIAERESIS = LATIN CAPITAL LETTER O WITH DIAERESIS */
+ { 0x0055, 0x0300, 0x00D9 }, /* LATIN CAPITAL LETTER U + COMBINING GRAVE ACCENT = LATIN CAPITAL LETTER U WITH GRAVE */
+ { 0x0055, 0x0301, 0x00DA }, /* LATIN CAPITAL LETTER U + COMBINING ACUTE ACCENT = LATIN CAPITAL LETTER U WITH ACUTE */
+ { 0x0055, 0x0302, 0x00DB }, /* LATIN CAPITAL LETTER U + COMBINING CIRCUMFLEX ACCENT = LATIN CAPITAL LETTER U WITH CIRCUMFLEX */
+ { 0x0055, 0x0308, 0x00DC }, /* LATIN CAPITAL LETTER U + COMBINING DIAERESIS = LATIN CAPITAL LETTER U WITH DIAERESIS */
+ { 0x0059, 0x0301, 0x00DD }, /* LATIN CAPITAL LETTER Y + COMBINING ACUTE ACCENT = LATIN CAPITAL LETTER Y WITH ACUTE */
+ { 0x0061, 0x0300, 0x00E0 }, /* LATIN SMALL LETTER A + COMBINING GRAVE ACCENT = LATIN SMALL LETTER A WITH GRAVE */
+ { 0x0061, 0x0301, 0x00E1 }, /* LATIN SMALL LETTER A + COMBINING ACUTE ACCENT = LATIN SMALL LETTER A WITH ACUTE */
+ { 0x0061, 0x0302, 0x00E2 }, /* LATIN SMALL LETTER A + COMBINING CIRCUMFLEX ACCENT = LATIN SMALL LETTER A WITH CIRCUMFLEX */
+ { 0x0061, 0x0303, 0x00E3 }, /* LATIN SMALL LETTER A + COMBINING TILDE = LATIN SMALL LETTER A WITH TILDE */
+ { 0x0061, 0x0308, 0x00E4 }, /* LATIN SMALL LETTER A + COMBINING DIAERESIS = LATIN SMALL LETTER A WITH DIAERESIS */
+ { 0x0061, 0x030A, 0x00E5 }, /* LATIN SMALL LETTER A + COMBINING RING ABOVE = LATIN SMALL LETTER A WITH RING ABOVE */
+ { 0x0063, 0x0327, 0x00E7 }, /* LATIN SMALL LETTER C + COMBINING CEDILLA = LATIN SMALL LETTER C WITH CEDILLA */
+ { 0x0065, 0x0300, 0x00E8 }, /* LATIN SMALL LETTER E + COMBINING GRAVE ACCENT = LATIN SMALL LETTER E WITH GRAVE */
+ { 0x0065, 0x0301, 0x00E9 }, /* LATIN SMALL LETTER E + COMBINING ACUTE ACCENT = LATIN SMALL LETTER E WITH ACUTE */
+ { 0x0065, 0x0302, 0x00EA }, /* LATIN SMALL LETTER E + COMBINING CIRCUMFLEX ACCENT = LATIN SMALL LETTER E WITH CIRCUMFLEX */
+ { 0x0065, 0x0308, 0x00EB }, /* LATIN SMALL LETTER E + COMBINING DIAERESIS = LATIN SMALL LETTER E WITH DIAERESIS */
+ { 0x0069, 0x0300, 0x00EC }, /* LATIN SMALL LETTER I + COMBINING GRAVE ACCENT = LATIN SMALL LETTER I WITH GRAVE */
+ { 0x0069, 0x0301, 0x00ED }, /* LATIN SMALL LETTER I + COMBINING ACUTE ACCENT = LATIN SMALL LETTER I WITH ACUTE */
+ { 0x0069, 0x0302, 0x00EE }, /* LATIN SMALL LETTER I + COMBINING CIRCUMFLEX ACCENT = LATIN SMALL LETTER I WITH CIRCUMFLEX */
+ { 0x0069, 0x0308, 0x00EF }, /* LATIN SMALL LETTER I + COMBINING DIAERESIS = LATIN SMALL LETTER I WITH DIAERESIS */
+ { 0x006E, 0x0303, 0x00F1 }, /* LATIN SMALL LETTER N + COMBINING TILDE = LATIN SMALL LETTER N WITH TILDE */
+ { 0x006F, 0x0300, 0x00F2 }, /* LATIN SMALL LETTER O + COMBINING GRAVE ACCENT = LATIN SMALL LETTER O WITH GRAVE */
+ { 0x006F, 0x0301, 0x00F3 }, /* LATIN SMALL LETTER O + COMBINING ACUTE ACCENT = LATIN SMALL LETTER O WITH ACUTE */
+ { 0x006F, 0x0302, 0x00F4 }, /* LATIN SMALL LETTER O + COMBINING CIRCUMFLEX ACCENT = LATIN SMALL LETTER O WITH CIRCUMFLEX */
+ { 0x006F, 0x0303, 0x00F5 }, /* LATIN SMALL LETTER O + COMBINING TILDE = LATIN SMALL LETTER O WITH TILDE */
+ { 0x006F, 0x0308, 0x00F6 }, /* LATIN SMALL LETTER O + COMBINING DIAERESIS = LATIN SMALL LETTER O WITH DIAERESIS */
+ { 0x0075, 0x0300, 0x00F9 }, /* LATIN SMALL LETTER U + COMBINING GRAVE ACCENT = LATIN SMALL LETTER U WITH GRAVE */
+ { 0x0075, 0x0301, 0x00FA }, /* LATIN SMALL LETTER U + COMBINING ACUTE ACCENT = LATIN SMALL LETTER U WITH ACUTE */
+ { 0x0075, 0x0302, 0x00FB }, /* LATIN SMALL LETTER U + COMBINING CIRCUMFLEX ACCENT = LATIN SMALL LETTER U WITH CIRCUMFLEX */
+ { 0x0075, 0x0308, 0x00FC }, /* LATIN SMALL LETTER U + COMBINING DIAERESIS = LATIN SMALL LETTER U WITH DIAERESIS */
+ { 0x0079, 0x0301, 0x00FD }, /* LATIN SMALL LETTER Y + COMBINING ACUTE ACCENT = LATIN SMALL LETTER Y WITH ACUTE */
+ { 0x0079, 0x0308, 0x00FF }, /* LATIN SMALL LETTER Y + COMBINING DIAERESIS = LATIN SMALL LETTER Y WITH DIAERESIS */
+ { 0x0391, 0x0301, 0x0386 }, /* GREEK CAPITAL LETTER ALPHA + COMBINING ACUTE ACCENT = GREEK CAPITAL LETTER ALPHA WITH TONOS */
+ { 0x0395, 0x0301, 0x0388 }, /* GREEK CAPITAL LETTER EPSILON + COMBINING ACUTE ACCENT = GREEK CAPITAL LETTER EPSILON WITH TONOS */
+ { 0x0397, 0x0301, 0x0389 }, /* GREEK CAPITAL LETTER ETA + COMBINING ACUTE ACCENT = GREEK CAPITAL LETTER ETA WITH TONOS */
+ { 0x0399, 0x0301, 0x038A }, /* GREEK CAPITAL LETTER IOTA + COMBINING ACUTE ACCENT = GREEK CAPITAL LETTER IOTA WITH TONOS */
+ { 0x039F, 0x0301, 0x038C }, /* GREEK CAPITAL LETTER OMICRON + COMBINING ACUTE ACCENT = GREEK CAPITAL LETTER OMICRON WITH TONOS */
+ { 0x03A5, 0x0301, 0x038E }, /* GREEK CAPITAL LETTER UPSILON + COMBINING ACUTE ACCENT = GREEK CAPITAL LETTER UPSILON WITH TONOS */
+ { 0x03A9, 0x0301, 0x038F }, /* GREEK CAPITAL LETTER OMEGA + COMBINING ACUTE ACCENT = GREEK CAPITAL LETTER OMEGA WITH TONOS */
+ { 0x03B1, 0x0301, 0x03AC }, /* GREEK SMALL LETTER ALPHA + COMBINING ACUTE ACCENT = GREEK SMALL LETTER ALPHA WITH TONOS */
+ { 0x03B5, 0x0301, 0x03AD }, /* GREEK SMALL LETTER EPSILON + COMBINING ACUTE ACCENT = GREEK SMALL LETTER EPSILON WITH TONOS */
+ { 0x03B7, 0x0301, 0x03AE }, /* GREEK SMALL LETTER ETA + COMBINING ACUTE ACCENT = GREEK SMALL LETTER ETA WITH TONOS */
+ { 0x03B9, 0x0301, 0x03AF }, /* GREEK SMALL LETTER IOTA + COMBINING ACUTE ACCENT = GREEK SMALL LETTER IOTA WITH TONOS */
+ { 0x03BF, 0x0301, 0x03CC }, /* GREEK SMALL LETTER OMICRON + COMBINING ACUTE ACCENT = GREEK SMALL LETTER OMICRON WITH TONOS */
+ { 0x03C5, 0x0301, 0x03CD }, /* GREEK SMALL LETTER UPSILON + COMBINING ACUTE ACCENT = GREEK SMALL LETTER UPSILON WITH TONOS */
+ { 0x03C9, 0x0301, 0x03CE }, /* GREEK SMALL LETTER OMEGA + COMBINING ACUTE ACCENT = GREEK SMALL LETTER OMEGA WITH TONOS */
+ { 0x0418, 0x0306, 0x0419 }, /* CYRILLIC CAPITAL LETTER I + COMBINING BREVE = CYRILLIC CAPITAL LETTER SHORT I */
+ { 0x0423, 0x0306, 0x040E }, /* CYRILLIC CAPITAL LETTER U + COMBINING BREVE = CYRILLIC CAPITAL LETTER SHORT U */
+ { 0x0438, 0x0306, 0x0439 }, /* CYRILLIC SMALL LETTER I + COMBINING BREVE = CYRILLIC SMALL LETTER SHORT I */
+ { 0x0443, 0x0306, 0x045E }, /* CYRILLIC SMALL LETTER U + COMBINING BREVE = CYRILLIC SMALL LETTER SHORT U */
+};
+
+/*
+ * Boundary values for quick rejection
+ * These are calculated by analyzing the table during generation
+ */
+#define UCS_RECOMPOSE_MIN_BASE 0x0041
+#define UCS_RECOMPOSE_MAX_BASE 0x0443
+#define UCS_RECOMPOSE_MIN_MARK 0x0300
+#define UCS_RECOMPOSE_MAX_MARK 0x0327
diff --git a/drivers/tty/vt/ucs_width_table.h_shipped b/drivers/tty/vt/ucs_width_table.h_shipped
new file mode 100644
index 000000000000..6fcb8f1d577d
--- /dev/null
+++ b/drivers/tty/vt/ucs_width_table.h_shipped
@@ -0,0 +1,453 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ucs_width_table.h - Unicode character width
+ *
+ * Auto-generated by gen_ucs_width_table.py
+ *
+ * Unicode Version: 16.0.0
+ */
+
+/* Zero-width character ranges (BMP - Basic Multilingual Plane, U+0000 to U+FFFF) */
+static const struct ucs_interval16 ucs_zero_width_bmp_ranges[] = {
+ { 0x00AD, 0x00AD }, /* SOFT HYPHEN */
+ { 0x0300, 0x036F }, /* COMBINING GRAVE ACCENT - COMBINING LATIN SMALL LETTER X */
+ { 0x0483, 0x0489 }, /* COMBINING CYRILLIC TITLO - COMBINING CYRILLIC MILLIONS SIGN */
+ { 0x0591, 0x05BD }, /* HEBREW ACCENT ETNAHTA - HEBREW POINT METEG */
+ { 0x05BF, 0x05BF }, /* HEBREW POINT RAFE */
+ { 0x05C1, 0x05C2 }, /* HEBREW POINT SHIN DOT - HEBREW POINT SIN DOT */
+ { 0x05C4, 0x05C5 }, /* HEBREW MARK UPPER DOT - HEBREW MARK LOWER DOT */
+ { 0x05C7, 0x05C7 }, /* HEBREW POINT QAMATS QATAN */
+ { 0x0600, 0x0605 }, /* ARABIC NUMBER SIGN - ARABIC NUMBER MARK ABOVE */
+ { 0x0610, 0x061A }, /* ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM - ARABIC SMALL KASRA */
+ { 0x061C, 0x061C }, /* ARABIC LETTER MARK */
+ { 0x064B, 0x065F }, /* ARABIC FATHATAN - ARABIC WAVY HAMZA BELOW */
+ { 0x0670, 0x0670 }, /* ARABIC LETTER SUPERSCRIPT ALEF */
+ { 0x06D6, 0x06DD }, /* ARABIC SMALL HIGH LIGATURE SAD WITH LAM WITH ALEF MAKSURA - ARABIC END OF AYAH */
+ { 0x06DF, 0x06E4 }, /* ARABIC SMALL HIGH ROUNDED ZERO - ARABIC SMALL HIGH MADDA */
+ { 0x06E7, 0x06E8 }, /* ARABIC SMALL HIGH YEH - ARABIC SMALL HIGH NOON */
+ { 0x06EA, 0x06ED }, /* ARABIC EMPTY CENTRE LOW STOP - ARABIC SMALL LOW MEEM */
+ { 0x070F, 0x070F }, /* SYRIAC ABBREVIATION MARK */
+ { 0x0711, 0x0711 }, /* SYRIAC LETTER SUPERSCRIPT ALAPH */
+ { 0x0730, 0x074A }, /* SYRIAC PTHAHA ABOVE - SYRIAC BARREKH */
+ { 0x07A6, 0x07B0 }, /* THAANA ABAFILI - THAANA SUKUN */
+ { 0x07EB, 0x07F3 }, /* NKO COMBINING SHORT HIGH TONE - NKO COMBINING DOUBLE DOT ABOVE */
+ { 0x07FD, 0x07FD }, /* NKO DANTAYALAN */
+ { 0x0816, 0x0819 }, /* SAMARITAN MARK IN - SAMARITAN MARK DAGESH */
+ { 0x081B, 0x0823 }, /* SAMARITAN MARK EPENTHETIC YUT - SAMARITAN VOWEL SIGN A */
+ { 0x0825, 0x0827 }, /* SAMARITAN VOWEL SIGN SHORT A - SAMARITAN VOWEL SIGN U */
+ { 0x0829, 0x082D }, /* SAMARITAN VOWEL SIGN LONG I - SAMARITAN MARK NEQUDAA */
+ { 0x0859, 0x085B }, /* MANDAIC AFFRICATION MARK - MANDAIC GEMINATION MARK */
+ { 0x0890, 0x0891 }, /* ARABIC POUND MARK ABOVE - ARABIC PIASTRE MARK ABOVE */
+ { 0x0897, 0x089F }, /* ARABIC PEPET - ARABIC HALF MADDA OVER MADDA */
+ { 0x08CA, 0x0903 }, /* ARABIC SMALL HIGH FARSI YEH - DEVANAGARI SIGN VISARGA */
+ { 0x093A, 0x093C }, /* DEVANAGARI VOWEL SIGN OE - DEVANAGARI SIGN NUKTA */
+ { 0x093E, 0x094F }, /* DEVANAGARI VOWEL SIGN AA - DEVANAGARI VOWEL SIGN AW */
+ { 0x0951, 0x0957 }, /* DEVANAGARI STRESS SIGN UDATTA - DEVANAGARI VOWEL SIGN UUE */
+ { 0x0962, 0x0963 }, /* DEVANAGARI VOWEL SIGN VOCALIC L - DEVANAGARI VOWEL SIGN VOCALIC LL */
+ { 0x0981, 0x0983 }, /* BENGALI SIGN CANDRABINDU - BENGALI SIGN VISARGA */
+ { 0x09BC, 0x09BC }, /* BENGALI SIGN NUKTA */
+ { 0x09BE, 0x09C4 }, /* BENGALI VOWEL SIGN AA - BENGALI VOWEL SIGN VOCALIC RR */
+ { 0x09C7, 0x09C8 }, /* BENGALI VOWEL SIGN E - BENGALI VOWEL SIGN AI */
+ { 0x09CB, 0x09CD }, /* BENGALI VOWEL SIGN O - BENGALI SIGN VIRAMA */
+ { 0x09D7, 0x09D7 }, /* BENGALI AU LENGTH MARK */
+ { 0x09E2, 0x09E3 }, /* BENGALI VOWEL SIGN VOCALIC L - BENGALI VOWEL SIGN VOCALIC LL */
+ { 0x09FE, 0x09FE }, /* BENGALI SANDHI MARK */
+ { 0x0A01, 0x0A03 }, /* GURMUKHI SIGN ADAK BINDI - GURMUKHI SIGN VISARGA */
+ { 0x0A3C, 0x0A3C }, /* GURMUKHI SIGN NUKTA */
+ { 0x0A3E, 0x0A42 }, /* GURMUKHI VOWEL SIGN AA - GURMUKHI VOWEL SIGN UU */
+ { 0x0A47, 0x0A48 }, /* GURMUKHI VOWEL SIGN EE - GURMUKHI VOWEL SIGN AI */
+ { 0x0A4B, 0x0A4D }, /* GURMUKHI VOWEL SIGN OO - GURMUKHI SIGN VIRAMA */
+ { 0x0A51, 0x0A51 }, /* GURMUKHI SIGN UDAAT */
+ { 0x0A70, 0x0A71 }, /* GURMUKHI TIPPI - GURMUKHI ADDAK */
+ { 0x0A75, 0x0A75 }, /* GURMUKHI SIGN YAKASH */
+ { 0x0A81, 0x0A83 }, /* GUJARATI SIGN CANDRABINDU - GUJARATI SIGN VISARGA */
+ { 0x0ABC, 0x0ABC }, /* GUJARATI SIGN NUKTA */
+ { 0x0ABE, 0x0AC5 }, /* GUJARATI VOWEL SIGN AA - GUJARATI VOWEL SIGN CANDRA E */
+ { 0x0AC7, 0x0AC9 }, /* GUJARATI VOWEL SIGN E - GUJARATI VOWEL SIGN CANDRA O */
+ { 0x0ACB, 0x0ACD }, /* GUJARATI VOWEL SIGN O - GUJARATI SIGN VIRAMA */
+ { 0x0AE2, 0x0AE3 }, /* GUJARATI VOWEL SIGN VOCALIC L - GUJARATI VOWEL SIGN VOCALIC LL */
+ { 0x0AFA, 0x0AFF }, /* GUJARATI SIGN SUKUN - GUJARATI SIGN TWO-CIRCLE NUKTA ABOVE */
+ { 0x0B01, 0x0B03 }, /* ORIYA SIGN CANDRABINDU - ORIYA SIGN VISARGA */
+ { 0x0B3C, 0x0B3C }, /* ORIYA SIGN NUKTA */
+ { 0x0B3E, 0x0B44 }, /* ORIYA VOWEL SIGN AA - ORIYA VOWEL SIGN VOCALIC RR */
+ { 0x0B47, 0x0B48 }, /* ORIYA VOWEL SIGN E - ORIYA VOWEL SIGN AI */
+ { 0x0B4B, 0x0B4D }, /* ORIYA VOWEL SIGN O - ORIYA SIGN VIRAMA */
+ { 0x0B55, 0x0B57 }, /* ORIYA SIGN OVERLINE - ORIYA AU LENGTH MARK */
+ { 0x0B62, 0x0B63 }, /* ORIYA VOWEL SIGN VOCALIC L - ORIYA VOWEL SIGN VOCALIC LL */
+ { 0x0B82, 0x0B82 }, /* TAMIL SIGN ANUSVARA */
+ { 0x0BBE, 0x0BC2 }, /* TAMIL VOWEL SIGN AA - TAMIL VOWEL SIGN UU */
+ { 0x0BC6, 0x0BC8 }, /* TAMIL VOWEL SIGN E - TAMIL VOWEL SIGN AI */
+ { 0x0BCA, 0x0BCD }, /* TAMIL VOWEL SIGN O - TAMIL SIGN VIRAMA */
+ { 0x0BD7, 0x0BD7 }, /* TAMIL AU LENGTH MARK */
+ { 0x0C00, 0x0C04 }, /* TELUGU SIGN COMBINING CANDRABINDU ABOVE - TELUGU SIGN COMBINING ANUSVARA ABOVE */
+ { 0x0C3C, 0x0C3C }, /* TELUGU SIGN NUKTA */
+ { 0x0C3E, 0x0C44 }, /* TELUGU VOWEL SIGN AA - TELUGU VOWEL SIGN VOCALIC RR */
+ { 0x0C46, 0x0C48 }, /* TELUGU VOWEL SIGN E - TELUGU VOWEL SIGN AI */
+ { 0x0C4A, 0x0C4D }, /* TELUGU VOWEL SIGN O - TELUGU SIGN VIRAMA */
+ { 0x0C55, 0x0C56 }, /* TELUGU LENGTH MARK - TELUGU AI LENGTH MARK */
+ { 0x0C62, 0x0C63 }, /* TELUGU VOWEL SIGN VOCALIC L - TELUGU VOWEL SIGN VOCALIC LL */
+ { 0x0C81, 0x0C83 }, /* KANNADA SIGN CANDRABINDU - KANNADA SIGN VISARGA */
+ { 0x0CBC, 0x0CBC }, /* KANNADA SIGN NUKTA */
+ { 0x0CBE, 0x0CC4 }, /* KANNADA VOWEL SIGN AA - KANNADA VOWEL SIGN VOCALIC RR */
+ { 0x0CC6, 0x0CC8 }, /* KANNADA VOWEL SIGN E - KANNADA VOWEL SIGN AI */
+ { 0x0CCA, 0x0CCD }, /* KANNADA VOWEL SIGN O - KANNADA SIGN VIRAMA */
+ { 0x0CD5, 0x0CD6 }, /* KANNADA LENGTH MARK - KANNADA AI LENGTH MARK */
+ { 0x0CE2, 0x0CE3 }, /* KANNADA VOWEL SIGN VOCALIC L - KANNADA VOWEL SIGN VOCALIC LL */
+ { 0x0CF3, 0x0CF3 }, /* KANNADA SIGN COMBINING ANUSVARA ABOVE RIGHT */
+ { 0x0D00, 0x0D03 }, /* MALAYALAM SIGN COMBINING ANUSVARA ABOVE - MALAYALAM SIGN VISARGA */
+ { 0x0D3B, 0x0D3C }, /* MALAYALAM SIGN VERTICAL BAR VIRAMA - MALAYALAM SIGN CIRCULAR VIRAMA */
+ { 0x0D3E, 0x0D44 }, /* MALAYALAM VOWEL SIGN AA - MALAYALAM VOWEL SIGN VOCALIC RR */
+ { 0x0D46, 0x0D48 }, /* MALAYALAM VOWEL SIGN E - MALAYALAM VOWEL SIGN AI */
+ { 0x0D4A, 0x0D4D }, /* MALAYALAM VOWEL SIGN O - MALAYALAM SIGN VIRAMA */
+ { 0x0D57, 0x0D57 }, /* MALAYALAM AU LENGTH MARK */
+ { 0x0D62, 0x0D63 }, /* MALAYALAM VOWEL SIGN VOCALIC L - MALAYALAM VOWEL SIGN VOCALIC LL */
+ { 0x0D81, 0x0D83 }, /* SINHALA SIGN CANDRABINDU - SINHALA SIGN VISARGAYA */
+ { 0x0DCA, 0x0DCA }, /* SINHALA SIGN AL-LAKUNA */
+ { 0x0DCF, 0x0DD4 }, /* SINHALA VOWEL SIGN AELA-PILLA - SINHALA VOWEL SIGN KETTI PAA-PILLA */
+ { 0x0DD6, 0x0DD6 }, /* SINHALA VOWEL SIGN DIGA PAA-PILLA */
+ { 0x0DD8, 0x0DDF }, /* SINHALA VOWEL SIGN GAETTA-PILLA - SINHALA VOWEL SIGN GAYANUKITTA */
+ { 0x0DF2, 0x0DF3 }, /* SINHALA VOWEL SIGN DIGA GAETTA-PILLA - SINHALA VOWEL SIGN DIGA GAYANUKITTA */
+ { 0x0E31, 0x0E31 }, /* THAI CHARACTER MAI HAN-AKAT */
+ { 0x0E34, 0x0E3A }, /* THAI CHARACTER SARA I - THAI CHARACTER PHINTHU */
+ { 0x0E47, 0x0E4E }, /* THAI CHARACTER MAITAIKHU - THAI CHARACTER YAMAKKAN */
+ { 0x0EB1, 0x0EB1 }, /* LAO VOWEL SIGN MAI KAN */
+ { 0x0EB4, 0x0EBC }, /* LAO VOWEL SIGN I - LAO SEMIVOWEL SIGN LO */
+ { 0x0EC8, 0x0ECE }, /* LAO TONE MAI EK - LAO YAMAKKAN */
+ { 0x0F18, 0x0F19 }, /* TIBETAN ASTROLOGICAL SIGN -KHYUD PA - TIBETAN ASTROLOGICAL SIGN SDONG TSHUGS */
+ { 0x0F35, 0x0F35 }, /* TIBETAN MARK NGAS BZUNG NYI ZLA */
+ { 0x0F37, 0x0F37 }, /* TIBETAN MARK NGAS BZUNG SGOR RTAGS */
+ { 0x0F39, 0x0F39 }, /* TIBETAN MARK TSA -PHRU */
+ { 0x0F3E, 0x0F3F }, /* TIBETAN SIGN YAR TSHES - TIBETAN SIGN MAR TSHES */
+ { 0x0F71, 0x0F84 }, /* TIBETAN VOWEL SIGN AA - TIBETAN MARK HALANTA */
+ { 0x0F86, 0x0F87 }, /* TIBETAN SIGN LCI RTAGS - TIBETAN SIGN YANG RTAGS */
+ { 0x0F8D, 0x0F97 }, /* TIBETAN SUBJOINED SIGN LCE TSA CAN - TIBETAN SUBJOINED LETTER JA */
+ { 0x0F99, 0x0FBC }, /* TIBETAN SUBJOINED LETTER NYA - TIBETAN SUBJOINED LETTER FIXED-FORM RA */
+ { 0x0FC6, 0x0FC6 }, /* TIBETAN SYMBOL PADMA GDAN */
+ { 0x102B, 0x103E }, /* MYANMAR VOWEL SIGN TALL AA - MYANMAR CONSONANT SIGN MEDIAL HA */
+ { 0x1056, 0x1059 }, /* MYANMAR VOWEL SIGN VOCALIC R - MYANMAR VOWEL SIGN VOCALIC LL */
+ { 0x105E, 0x1060 }, /* MYANMAR CONSONANT SIGN MON MEDIAL NA - MYANMAR CONSONANT SIGN MON MEDIAL LA */
+ { 0x1062, 0x1064 }, /* MYANMAR VOWEL SIGN SGAW KAREN EU - MYANMAR TONE MARK SGAW KAREN KE PHO */
+ { 0x1067, 0x106D }, /* MYANMAR VOWEL SIGN WESTERN PWO KAREN EU - MYANMAR SIGN WESTERN PWO KAREN TONE-5 */
+ { 0x1071, 0x1074 }, /* MYANMAR VOWEL SIGN GEBA KAREN I - MYANMAR VOWEL SIGN KAYAH EE */
+ { 0x1082, 0x108D }, /* MYANMAR CONSONANT SIGN SHAN MEDIAL WA - MYANMAR SIGN SHAN COUNCIL EMPHATIC TONE */
+ { 0x108F, 0x108F }, /* MYANMAR SIGN RUMAI PALAUNG TONE-5 */
+ { 0x109A, 0x109D }, /* MYANMAR SIGN KHAMTI TONE-1 - MYANMAR VOWEL SIGN AITON AI */
+ { 0x135D, 0x135F }, /* ETHIOPIC COMBINING GEMINATION AND VOWEL LENGTH MARK - ETHIOPIC COMBINING GEMINATION MARK */
+ { 0x1712, 0x1715 }, /* TAGALOG VOWEL SIGN I - TAGALOG SIGN PAMUDPOD */
+ { 0x1732, 0x1734 }, /* HANUNOO VOWEL SIGN I - HANUNOO SIGN PAMUDPOD */
+ { 0x1752, 0x1753 }, /* BUHID VOWEL SIGN I - BUHID VOWEL SIGN U */
+ { 0x1772, 0x1773 }, /* TAGBANWA VOWEL SIGN I - TAGBANWA VOWEL SIGN U */
+ { 0x17B4, 0x17D3 }, /* KHMER VOWEL INHERENT AQ - KHMER SIGN BATHAMASAT */
+ { 0x17DD, 0x17DD }, /* KHMER SIGN ATTHACAN */
+ { 0x180B, 0x180F }, /* MONGOLIAN FREE VARIATION SELECTOR ONE - MONGOLIAN FREE VARIATION SELECTOR FOUR */
+ { 0x1885, 0x1886 }, /* MONGOLIAN LETTER ALI GALI BALUDA - MONGOLIAN LETTER ALI GALI THREE BALUDA */
+ { 0x18A9, 0x18A9 }, /* MONGOLIAN LETTER ALI GALI DAGALGA */
+ { 0x1920, 0x192B }, /* LIMBU VOWEL SIGN A - LIMBU SUBJOINED LETTER WA */
+ { 0x1930, 0x193B }, /* LIMBU SMALL LETTER KA - LIMBU SIGN SA-I */
+ { 0x1A17, 0x1A1B }, /* BUGINESE VOWEL SIGN I - BUGINESE VOWEL SIGN AE */
+ { 0x1A55, 0x1A5E }, /* TAI THAM CONSONANT SIGN MEDIAL RA - TAI THAM CONSONANT SIGN SA */
+ { 0x1A60, 0x1A7C }, /* TAI THAM SIGN SAKOT - TAI THAM SIGN KHUEN-LUE KARAN */
+ { 0x1A7F, 0x1A7F }, /* TAI THAM COMBINING CRYPTOGRAMMIC DOT */
+ { 0x1AB0, 0x1ACE }, /* COMBINING DOUBLED CIRCUMFLEX ACCENT - COMBINING LATIN SMALL LETTER INSULAR T */
+ { 0x1B00, 0x1B04 }, /* BALINESE SIGN ULU RICEM - BALINESE SIGN BISAH */
+ { 0x1B34, 0x1B44 }, /* BALINESE SIGN REREKAN - BALINESE ADEG ADEG */
+ { 0x1B6B, 0x1B73 }, /* BALINESE MUSICAL SYMBOL COMBINING TEGEH - BALINESE MUSICAL SYMBOL COMBINING GONG */
+ { 0x1B80, 0x1B82 }, /* SUNDANESE SIGN PANYECEK - SUNDANESE SIGN PANGWISAD */
+ { 0x1BA1, 0x1BAD }, /* SUNDANESE CONSONANT SIGN PAMINGKAL - SUNDANESE CONSONANT SIGN PASANGAN WA */
+ { 0x1BE6, 0x1BF3 }, /* BATAK SIGN TOMPI - BATAK PANONGONAN */
+ { 0x1C24, 0x1C37 }, /* LEPCHA SUBJOINED LETTER YA - LEPCHA SIGN NUKTA */
+ { 0x1CD0, 0x1CD2 }, /* VEDIC TONE KARSHANA - VEDIC TONE PRENKHA */
+ { 0x1CD4, 0x1CE8 }, /* VEDIC SIGN YAJURVEDIC MIDLINE SVARITA - VEDIC SIGN VISARGA ANUDATTA WITH TAIL */
+ { 0x1CED, 0x1CED }, /* VEDIC SIGN TIRYAK */
+ { 0x1CF4, 0x1CF4 }, /* VEDIC TONE CANDRA ABOVE */
+ { 0x1CF7, 0x1CF9 }, /* VEDIC SIGN ATIKRAMA - VEDIC TONE DOUBLE RING ABOVE */
+ { 0x1DC0, 0x1DFF }, /* COMBINING DOTTED GRAVE ACCENT - COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW */
+ { 0x200B, 0x200F }, /* ZERO WIDTH SPACE - RIGHT-TO-LEFT MARK */
+ { 0x202A, 0x202E }, /* LEFT-TO-RIGHT EMBEDDING - RIGHT-TO-LEFT OVERRIDE */
+ { 0x2060, 0x2064 }, /* WORD JOINER - INVISIBLE PLUS */
+ { 0x2066, 0x206F }, /* LEFT-TO-RIGHT ISOLATE - NOMINAL DIGIT SHAPES */
+ { 0x20D0, 0x20F0 }, /* COMBINING LEFT HARPOON ABOVE - COMBINING ASTERISK ABOVE */
+ { 0x2640, 0x2640 }, /* FEMALE SIGN */
+ { 0x2642, 0x2642 }, /* MALE SIGN */
+ { 0x26A7, 0x26A7 }, /* MALE WITH STROKE AND MALE AND FEMALE SIGN */
+ { 0x2CEF, 0x2CF1 }, /* COPTIC COMBINING NI ABOVE - COPTIC COMBINING SPIRITUS LENIS */
+ { 0x2D7F, 0x2D7F }, /* TIFINAGH CONSONANT JOINER */
+ { 0x2DE0, 0x2DFF }, /* COMBINING CYRILLIC LETTER BE - COMBINING CYRILLIC LETTER IOTIFIED BIG YUS */
+ { 0x302A, 0x302F }, /* IDEOGRAPHIC LEVEL TONE MARK - HANGUL DOUBLE DOT TONE MARK */
+ { 0x3099, 0x309A }, /* COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK - COMBINING KATAKANA-HIRAGANA SEMI-VOICED SOUND MARK */
+ { 0xA66F, 0xA672 }, /* COMBINING CYRILLIC VZMET - COMBINING CYRILLIC THOUSAND MILLIONS SIGN */
+ { 0xA674, 0xA67D }, /* COMBINING CYRILLIC LETTER UKRAINIAN IE - COMBINING CYRILLIC PAYEROK */
+ { 0xA69E, 0xA69F }, /* COMBINING CYRILLIC LETTER EF - COMBINING CYRILLIC LETTER IOTIFIED E */
+ { 0xA6F0, 0xA6F1 }, /* BAMUM COMBINING MARK KOQNDON - BAMUM COMBINING MARK TUKWENTIS */
+ { 0xA802, 0xA802 }, /* SYLOTI NAGRI SIGN DVISVARA */
+ { 0xA806, 0xA806 }, /* SYLOTI NAGRI SIGN HASANTA */
+ { 0xA80B, 0xA80B }, /* SYLOTI NAGRI SIGN ANUSVARA */
+ { 0xA823, 0xA827 }, /* SYLOTI NAGRI VOWEL SIGN A - SYLOTI NAGRI VOWEL SIGN OO */
+ { 0xA82C, 0xA82C }, /* SYLOTI NAGRI SIGN ALTERNATE HASANTA */
+ { 0xA880, 0xA881 }, /* SAURASHTRA SIGN ANUSVARA - SAURASHTRA SIGN VISARGA */
+ { 0xA8B4, 0xA8C5 }, /* SAURASHTRA CONSONANT SIGN HAARU - SAURASHTRA SIGN CANDRABINDU */
+ { 0xA8E0, 0xA8F1 }, /* COMBINING DEVANAGARI DIGIT ZERO - COMBINING DEVANAGARI SIGN AVAGRAHA */
+ { 0xA8FF, 0xA8FF }, /* DEVANAGARI VOWEL SIGN AY */
+ { 0xA926, 0xA92D }, /* KAYAH LI VOWEL UE - KAYAH LI TONE CALYA PLOPHU */
+ { 0xA947, 0xA953 }, /* REJANG VOWEL SIGN I - REJANG VIRAMA */
+ { 0xA980, 0xA983 }, /* JAVANESE SIGN PANYANGGA - JAVANESE SIGN WIGNYAN */
+ { 0xA9B3, 0xA9C0 }, /* JAVANESE SIGN CECAK TELU - JAVANESE PANGKON */
+ { 0xA9E5, 0xA9E5 }, /* MYANMAR SIGN SHAN SAW */
+ { 0xAA29, 0xAA36 }, /* CHAM VOWEL SIGN AA - CHAM CONSONANT SIGN WA */
+ { 0xAA43, 0xAA43 }, /* CHAM CONSONANT SIGN FINAL NG */
+ { 0xAA4C, 0xAA4D }, /* CHAM CONSONANT SIGN FINAL M - CHAM CONSONANT SIGN FINAL H */
+ { 0xAA7B, 0xAA7D }, /* MYANMAR SIGN PAO KAREN TONE - MYANMAR SIGN TAI LAING TONE-5 */
+ { 0xAAB0, 0xAAB0 }, /* TAI VIET MAI KANG */
+ { 0xAAB2, 0xAAB4 }, /* TAI VIET VOWEL I - TAI VIET VOWEL U */
+ { 0xAAB7, 0xAAB8 }, /* TAI VIET MAI KHIT - TAI VIET VOWEL IA */
+ { 0xAABE, 0xAABF }, /* TAI VIET VOWEL AM - TAI VIET TONE MAI EK */
+ { 0xAAC1, 0xAAC1 }, /* TAI VIET TONE MAI THO */
+ { 0xAAEB, 0xAAEF }, /* MEETEI MAYEK VOWEL SIGN II - MEETEI MAYEK VOWEL SIGN AAU */
+ { 0xAAF5, 0xAAF6 }, /* MEETEI MAYEK VOWEL SIGN VISARGA - MEETEI MAYEK VIRAMA */
+ { 0xABE3, 0xABEA }, /* MEETEI MAYEK VOWEL SIGN ONAP - MEETEI MAYEK VOWEL SIGN NUNG */
+ { 0xABEC, 0xABED }, /* MEETEI MAYEK LUM IYEK - MEETEI MAYEK APUN IYEK */
+ { 0xFB1E, 0xFB1E }, /* HEBREW POINT JUDEO-SPANISH VARIKA */
+ { 0xFE00, 0xFE0F }, /* VARIATION SELECTOR-1 - VARIATION SELECTOR-16 */
+ { 0xFE20, 0xFE2F }, /* COMBINING LIGATURE LEFT HALF - COMBINING CYRILLIC TITLO RIGHT HALF */
+ { 0xFEFF, 0xFEFF }, /* ZERO WIDTH NO-BREAK SPACE */
+ { 0xFFF9, 0xFFFB }, /* INTERLINEAR ANNOTATION ANCHOR - INTERLINEAR ANNOTATION TERMINATOR */
+};
+
+/* Zero-width character ranges (non-BMP, U+10000 and above) */
+static const struct ucs_interval32 ucs_zero_width_non_bmp_ranges[] = {
+ { 0x101FD, 0x101FD }, /* PHAISTOS DISC SIGN COMBINING OBLIQUE STROKE */
+ { 0x102E0, 0x102E0 }, /* COPTIC EPACT THOUSANDS MARK */
+ { 0x10376, 0x1037A }, /* COMBINING OLD PERMIC LETTER AN - COMBINING OLD PERMIC LETTER SII */
+ { 0x10A01, 0x10A03 }, /* KHAROSHTHI VOWEL SIGN I - KHAROSHTHI VOWEL SIGN VOCALIC R */
+ { 0x10A05, 0x10A06 }, /* KHAROSHTHI VOWEL SIGN E - KHAROSHTHI VOWEL SIGN O */
+ { 0x10A0C, 0x10A0F }, /* KHAROSHTHI VOWEL LENGTH MARK - KHAROSHTHI SIGN VISARGA */
+ { 0x10A38, 0x10A3A }, /* KHAROSHTHI SIGN BAR ABOVE - KHAROSHTHI SIGN DOT BELOW */
+ { 0x10A3F, 0x10A3F }, /* KHAROSHTHI VIRAMA */
+ { 0x10AE5, 0x10AE6 }, /* MANICHAEAN ABBREVIATION MARK ABOVE - MANICHAEAN ABBREVIATION MARK BELOW */
+ { 0x10D24, 0x10D27 }, /* HANIFI ROHINGYA SIGN HARBAHAY - HANIFI ROHINGYA SIGN TASSI */
+ { 0x10D69, 0x10D6D }, /* GARAY VOWEL SIGN E - GARAY CONSONANT NASALIZATION MARK */
+ { 0x10EAB, 0x10EAC }, /* YEZIDI COMBINING HAMZA MARK - YEZIDI COMBINING MADDA MARK */
+ { 0x10EFC, 0x10EFF }, /* ARABIC COMBINING ALEF OVERLAY - ARABIC SMALL LOW WORD MADDA */
+ { 0x10F46, 0x10F50 }, /* SOGDIAN COMBINING DOT BELOW - SOGDIAN COMBINING STROKE BELOW */
+ { 0x10F82, 0x10F85 }, /* OLD UYGHUR COMBINING DOT ABOVE - OLD UYGHUR COMBINING TWO DOTS BELOW */
+ { 0x11000, 0x11002 }, /* BRAHMI SIGN CANDRABINDU - BRAHMI SIGN VISARGA */
+ { 0x11038, 0x11046 }, /* BRAHMI VOWEL SIGN AA - BRAHMI VIRAMA */
+ { 0x11070, 0x11070 }, /* BRAHMI SIGN OLD TAMIL VIRAMA */
+ { 0x11073, 0x11074 }, /* BRAHMI VOWEL SIGN OLD TAMIL SHORT E - BRAHMI VOWEL SIGN OLD TAMIL SHORT O */
+ { 0x1107F, 0x11082 }, /* BRAHMI NUMBER JOINER - KAITHI SIGN VISARGA */
+ { 0x110B0, 0x110BA }, /* KAITHI VOWEL SIGN AA - KAITHI SIGN NUKTA */
+ { 0x110BD, 0x110BD }, /* KAITHI NUMBER SIGN */
+ { 0x110C2, 0x110C2 }, /* KAITHI VOWEL SIGN VOCALIC R */
+ { 0x110CD, 0x110CD }, /* KAITHI NUMBER SIGN ABOVE */
+ { 0x11100, 0x11102 }, /* CHAKMA SIGN CANDRABINDU - CHAKMA SIGN VISARGA */
+ { 0x11127, 0x11134 }, /* CHAKMA VOWEL SIGN A - CHAKMA MAAYYAA */
+ { 0x11145, 0x11146 }, /* CHAKMA VOWEL SIGN AA - CHAKMA VOWEL SIGN EI */
+ { 0x11173, 0x11173 }, /* MAHAJANI SIGN NUKTA */
+ { 0x11180, 0x11182 }, /* SHARADA SIGN CANDRABINDU - SHARADA SIGN VISARGA */
+ { 0x111B3, 0x111C0 }, /* SHARADA VOWEL SIGN AA - SHARADA SIGN VIRAMA */
+ { 0x111C9, 0x111CC }, /* SHARADA SANDHI MARK - SHARADA EXTRA SHORT VOWEL MARK */
+ { 0x111CE, 0x111CF }, /* SHARADA VOWEL SIGN PRISHTHAMATRA E - SHARADA SIGN INVERTED CANDRABINDU */
+ { 0x1122C, 0x11237 }, /* KHOJKI VOWEL SIGN AA - KHOJKI SIGN SHADDA */
+ { 0x1123E, 0x1123E }, /* KHOJKI SIGN SUKUN */
+ { 0x11241, 0x11241 }, /* KHOJKI VOWEL SIGN VOCALIC R */
+ { 0x112DF, 0x112EA }, /* KHUDAWADI SIGN ANUSVARA - KHUDAWADI SIGN VIRAMA */
+ { 0x11300, 0x11303 }, /* GRANTHA SIGN COMBINING ANUSVARA ABOVE - GRANTHA SIGN VISARGA */
+ { 0x1133B, 0x1133C }, /* COMBINING BINDU BELOW - GRANTHA SIGN NUKTA */
+ { 0x1133E, 0x11344 }, /* GRANTHA VOWEL SIGN AA - GRANTHA VOWEL SIGN VOCALIC RR */
+ { 0x11347, 0x11348 }, /* GRANTHA VOWEL SIGN EE - GRANTHA VOWEL SIGN AI */
+ { 0x1134B, 0x1134D }, /* GRANTHA VOWEL SIGN OO - GRANTHA SIGN VIRAMA */
+ { 0x11357, 0x11357 }, /* GRANTHA AU LENGTH MARK */
+ { 0x11362, 0x11363 }, /* GRANTHA VOWEL SIGN VOCALIC L - GRANTHA VOWEL SIGN VOCALIC LL */
+ { 0x11366, 0x1136C }, /* COMBINING GRANTHA DIGIT ZERO - COMBINING GRANTHA DIGIT SIX */
+ { 0x11370, 0x11374 }, /* COMBINING GRANTHA LETTER A - COMBINING GRANTHA LETTER PA */
+ { 0x113B8, 0x113C0 }, /* TULU-TIGALARI VOWEL SIGN AA - TULU-TIGALARI VOWEL SIGN VOCALIC LL */
+ { 0x113C2, 0x113C2 }, /* TULU-TIGALARI VOWEL SIGN EE */
+ { 0x113C5, 0x113C5 }, /* TULU-TIGALARI VOWEL SIGN AI */
+ { 0x113C7, 0x113CA }, /* TULU-TIGALARI VOWEL SIGN OO - TULU-TIGALARI SIGN CANDRA ANUNASIKA */
+ { 0x113CC, 0x113D0 }, /* TULU-TIGALARI SIGN ANUSVARA - TULU-TIGALARI CONJOINER */
+ { 0x113D2, 0x113D2 }, /* TULU-TIGALARI GEMINATION MARK */
+ { 0x113E1, 0x113E2 }, /* TULU-TIGALARI VEDIC TONE SVARITA - TULU-TIGALARI VEDIC TONE ANUDATTA */
+ { 0x11435, 0x11446 }, /* NEWA VOWEL SIGN AA - NEWA SIGN NUKTA */
+ { 0x1145E, 0x1145E }, /* NEWA SANDHI MARK */
+ { 0x114B0, 0x114C3 }, /* TIRHUTA VOWEL SIGN AA - TIRHUTA SIGN NUKTA */
+ { 0x115AF, 0x115B5 }, /* SIDDHAM VOWEL SIGN AA - SIDDHAM VOWEL SIGN VOCALIC RR */
+ { 0x115B8, 0x115C0 }, /* SIDDHAM VOWEL SIGN E - SIDDHAM SIGN NUKTA */
+ { 0x115DC, 0x115DD }, /* SIDDHAM VOWEL SIGN ALTERNATE U - SIDDHAM VOWEL SIGN ALTERNATE UU */
+ { 0x11630, 0x11640 }, /* MODI VOWEL SIGN AA - MODI SIGN ARDHACANDRA */
+ { 0x116AB, 0x116B7 }, /* TAKRI SIGN ANUSVARA - TAKRI SIGN NUKTA */
+ { 0x1171D, 0x1172B }, /* AHOM CONSONANT SIGN MEDIAL LA - AHOM SIGN KILLER */
+ { 0x1182C, 0x1183A }, /* DOGRA VOWEL SIGN AA - DOGRA SIGN NUKTA */
+ { 0x11930, 0x11935 }, /* DIVES AKURU VOWEL SIGN AA - DIVES AKURU VOWEL SIGN E */
+ { 0x11937, 0x11938 }, /* DIVES AKURU VOWEL SIGN AI - DIVES AKURU VOWEL SIGN O */
+ { 0x1193B, 0x1193E }, /* DIVES AKURU SIGN ANUSVARA - DIVES AKURU VIRAMA */
+ { 0x11940, 0x11940 }, /* DIVES AKURU MEDIAL YA */
+ { 0x11942, 0x11943 }, /* DIVES AKURU MEDIAL RA - DIVES AKURU SIGN NUKTA */
+ { 0x119D1, 0x119D7 }, /* NANDINAGARI VOWEL SIGN AA - NANDINAGARI VOWEL SIGN VOCALIC RR */
+ { 0x119DA, 0x119E0 }, /* NANDINAGARI VOWEL SIGN E - NANDINAGARI SIGN VIRAMA */
+ { 0x119E4, 0x119E4 }, /* NANDINAGARI VOWEL SIGN PRISHTHAMATRA E */
+ { 0x11A01, 0x11A0A }, /* ZANABAZAR SQUARE VOWEL SIGN I - ZANABAZAR SQUARE VOWEL LENGTH MARK */
+ { 0x11A33, 0x11A39 }, /* ZANABAZAR SQUARE FINAL CONSONANT MARK - ZANABAZAR SQUARE SIGN VISARGA */
+ { 0x11A3B, 0x11A3E }, /* ZANABAZAR SQUARE CLUSTER-FINAL LETTER YA - ZANABAZAR SQUARE CLUSTER-FINAL LETTER VA */
+ { 0x11A47, 0x11A47 }, /* ZANABAZAR SQUARE SUBJOINER */
+ { 0x11A51, 0x11A5B }, /* SOYOMBO VOWEL SIGN I - SOYOMBO VOWEL LENGTH MARK */
+ { 0x11A8A, 0x11A99 }, /* SOYOMBO FINAL CONSONANT SIGN G - SOYOMBO SUBJOINER */
+ { 0x11C2F, 0x11C36 }, /* BHAIKSUKI VOWEL SIGN AA - BHAIKSUKI VOWEL SIGN VOCALIC L */
+ { 0x11C38, 0x11C3F }, /* BHAIKSUKI VOWEL SIGN E - BHAIKSUKI SIGN VIRAMA */
+ { 0x11C92, 0x11CA7 }, /* MARCHEN SUBJOINED LETTER KA - MARCHEN SUBJOINED LETTER ZA */
+ { 0x11CA9, 0x11CB6 }, /* MARCHEN SUBJOINED LETTER YA - MARCHEN SIGN CANDRABINDU */
+ { 0x11D31, 0x11D36 }, /* MASARAM GONDI VOWEL SIGN AA - MASARAM GONDI VOWEL SIGN VOCALIC R */
+ { 0x11D3A, 0x11D3A }, /* MASARAM GONDI VOWEL SIGN E */
+ { 0x11D3C, 0x11D3D }, /* MASARAM GONDI VOWEL SIGN AI - MASARAM GONDI VOWEL SIGN O */
+ { 0x11D3F, 0x11D45 }, /* MASARAM GONDI VOWEL SIGN AU - MASARAM GONDI VIRAMA */
+ { 0x11D47, 0x11D47 }, /* MASARAM GONDI RA-KARA */
+ { 0x11D8A, 0x11D8E }, /* GUNJALA GONDI VOWEL SIGN AA - GUNJALA GONDI VOWEL SIGN UU */
+ { 0x11D90, 0x11D91 }, /* GUNJALA GONDI VOWEL SIGN EE - GUNJALA GONDI VOWEL SIGN AI */
+ { 0x11D93, 0x11D97 }, /* GUNJALA GONDI VOWEL SIGN OO - GUNJALA GONDI VIRAMA */
+ { 0x11EF3, 0x11EF6 }, /* MAKASAR VOWEL SIGN I - MAKASAR VOWEL SIGN O */
+ { 0x11F00, 0x11F01 }, /* KAWI SIGN CANDRABINDU - KAWI SIGN ANUSVARA */
+ { 0x11F03, 0x11F03 }, /* KAWI SIGN VISARGA */
+ { 0x11F34, 0x11F3A }, /* KAWI VOWEL SIGN AA - KAWI VOWEL SIGN VOCALIC R */
+ { 0x11F3E, 0x11F42 }, /* KAWI VOWEL SIGN E - KAWI CONJOINER */
+ { 0x11F5A, 0x11F5A }, /* KAWI SIGN NUKTA */
+ { 0x13430, 0x13440 }, /* EGYPTIAN HIEROGLYPH VERTICAL JOINER - EGYPTIAN HIEROGLYPH MIRROR HORIZONTALLY */
+ { 0x13447, 0x13455 }, /* EGYPTIAN HIEROGLYPH MODIFIER DAMAGED AT TOP START - EGYPTIAN HIEROGLYPH MODIFIER DAMAGED */
+ { 0x1611E, 0x1612F }, /* GURUNG KHEMA VOWEL SIGN AA - GURUNG KHEMA SIGN THOLHOMA */
+ { 0x16AF0, 0x16AF4 }, /* BASSA VAH COMBINING HIGH TONE - BASSA VAH COMBINING HIGH-LOW TONE */
+ { 0x16B30, 0x16B36 }, /* PAHAWH HMONG MARK CIM TUB - PAHAWH HMONG MARK CIM TAUM */
+ { 0x16F4F, 0x16F4F }, /* MIAO SIGN CONSONANT MODIFIER BAR */
+ { 0x16F51, 0x16F87 }, /* MIAO SIGN ASPIRATION - MIAO VOWEL SIGN UI */
+ { 0x16F8F, 0x16F92 }, /* MIAO TONE RIGHT - MIAO TONE BELOW */
+ { 0x16FE4, 0x16FE4 }, /* KHITAN SMALL SCRIPT FILLER */
+ { 0x16FF0, 0x16FF1 }, /* VIETNAMESE ALTERNATE READING MARK CA - VIETNAMESE ALTERNATE READING MARK NHAY */
+ { 0x1BC9D, 0x1BC9E }, /* DUPLOYAN THICK LETTER SELECTOR - DUPLOYAN DOUBLE MARK */
+ { 0x1BCA0, 0x1BCA3 }, /* SHORTHAND FORMAT LETTER OVERLAP - SHORTHAND FORMAT UP STEP */
+ { 0x1CF00, 0x1CF2D }, /* ZNAMENNY COMBINING MARK GORAZDO NIZKO S KRYZHEM ON LEFT - ZNAMENNY COMBINING MARK KRYZH ON LEFT */
+ { 0x1CF30, 0x1CF46 }, /* ZNAMENNY COMBINING TONAL RANGE MARK MRACHNO - ZNAMENNY PRIZNAK MODIFIER ROG */
+ { 0x1D165, 0x1D169 }, /* MUSICAL SYMBOL COMBINING STEM - MUSICAL SYMBOL COMBINING TREMOLO-3 */
+ { 0x1D16D, 0x1D182 }, /* MUSICAL SYMBOL COMBINING AUGMENTATION DOT - MUSICAL SYMBOL COMBINING LOURE */
+ { 0x1D185, 0x1D18B }, /* MUSICAL SYMBOL COMBINING DOIT - MUSICAL SYMBOL COMBINING TRIPLE TONGUE */
+ { 0x1D1AA, 0x1D1AD }, /* MUSICAL SYMBOL COMBINING DOWN BOW - MUSICAL SYMBOL COMBINING SNAP PIZZICATO */
+ { 0x1D242, 0x1D244 }, /* COMBINING GREEK MUSICAL TRISEME - COMBINING GREEK MUSICAL PENTASEME */
+ { 0x1DA00, 0x1DA36 }, /* SIGNWRITING HEAD RIM - SIGNWRITING AIR SUCKING IN */
+ { 0x1DA3B, 0x1DA6C }, /* SIGNWRITING MOUTH CLOSED NEUTRAL - SIGNWRITING EXCITEMENT */
+ { 0x1DA75, 0x1DA75 }, /* SIGNWRITING UPPER BODY TILTING FROM HIP JOINTS */
+ { 0x1DA84, 0x1DA84 }, /* SIGNWRITING LOCATION HEAD NECK */
+ { 0x1DA9B, 0x1DA9F }, /* SIGNWRITING FILL MODIFIER-2 - SIGNWRITING FILL MODIFIER-6 */
+ { 0x1DAA1, 0x1DAAF }, /* SIGNWRITING ROTATION MODIFIER-2 - SIGNWRITING ROTATION MODIFIER-16 */
+ { 0x1E000, 0x1E006 }, /* COMBINING GLAGOLITIC LETTER AZU - COMBINING GLAGOLITIC LETTER ZHIVETE */
+ { 0x1E008, 0x1E018 }, /* COMBINING GLAGOLITIC LETTER ZEMLJA - COMBINING GLAGOLITIC LETTER HERU */
+ { 0x1E01B, 0x1E021 }, /* COMBINING GLAGOLITIC LETTER SHTA - COMBINING GLAGOLITIC LETTER YATI */
+ { 0x1E023, 0x1E024 }, /* COMBINING GLAGOLITIC LETTER YU - COMBINING GLAGOLITIC LETTER SMALL YUS */
+ { 0x1E026, 0x1E02A }, /* COMBINING GLAGOLITIC LETTER YO - COMBINING GLAGOLITIC LETTER FITA */
+ { 0x1E08F, 0x1E08F }, /* COMBINING CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I */
+ { 0x1E130, 0x1E136 }, /* NYIAKENG PUACHUE HMONG TONE-B - NYIAKENG PUACHUE HMONG TONE-D */
+ { 0x1E2AE, 0x1E2AE }, /* TOTO SIGN RISING TONE */
+ { 0x1E2EC, 0x1E2EF }, /* WANCHO TONE TUP - WANCHO TONE KOINI */
+ { 0x1E4EC, 0x1E4EF }, /* NAG MUNDARI SIGN MUHOR - NAG MUNDARI SIGN SUTUH */
+ { 0x1E5EE, 0x1E5EF }, /* OL ONAL SIGN MU - OL ONAL SIGN IKIR */
+ { 0x1E8D0, 0x1E8D6 }, /* MENDE KIKAKUI COMBINING NUMBER TEENS - MENDE KIKAKUI COMBINING NUMBER MILLIONS */
+ { 0x1E944, 0x1E94A }, /* ADLAM ALIF LENGTHENER - ADLAM NUKTA */
+ { 0x1F3FB, 0x1F3FF }, /* EMOJI MODIFIER FITZPATRICK TYPE-1-2 - EMOJI MODIFIER FITZPATRICK TYPE-6 */
+ { 0x1F9B0, 0x1F9B3 }, /* EMOJI COMPONENT RED HAIR - EMOJI COMPONENT WHITE HAIR */
+ { 0xE0001, 0xE0001 }, /* LANGUAGE TAG */
+ { 0xE0020, 0xE007F }, /* TAG SPACE - CANCEL TAG */
+ { 0xE0100, 0xE01EF }, /* VARIATION SELECTOR-17 - VARIATION SELECTOR-256 */
+};
+
+/* Double-width character ranges (BMP - Basic Multilingual Plane, U+0000 to U+FFFF) */
+static const struct ucs_interval16 ucs_double_width_bmp_ranges[] = {
+ { 0x1100, 0x115F }, /* HANGUL CHOSEONG KIYEOK - HANGUL CHOSEONG FILLER */
+ { 0x231A, 0x231B }, /* WATCH - HOURGLASS */
+ { 0x2329, 0x232A }, /* LEFT-POINTING ANGLE BRACKET - RIGHT-POINTING ANGLE BRACKET */
+ { 0x23E9, 0x23EC }, /* BLACK RIGHT-POINTING DOUBLE TRIANGLE - BLACK DOWN-POINTING DOUBLE TRIANGLE */
+ { 0x23F0, 0x23F0 }, /* ALARM CLOCK */
+ { 0x23F3, 0x23F3 }, /* HOURGLASS WITH FLOWING SAND */
+ { 0x25FD, 0x25FE }, /* WHITE MEDIUM SMALL SQUARE - BLACK MEDIUM SMALL SQUARE */
+ { 0x2614, 0x2615 }, /* UMBRELLA WITH RAIN DROPS - HOT BEVERAGE */
+ { 0x2630, 0x2637 }, /* TRIGRAM FOR HEAVEN - TRIGRAM FOR EARTH */
+ { 0x2648, 0x2653 }, /* ARIES - PISCES */
+ { 0x267F, 0x267F }, /* WHEELCHAIR SYMBOL */
+ { 0x268A, 0x268F }, /* MONOGRAM FOR YANG - DIGRAM FOR GREATER YIN */
+ { 0x2693, 0x2693 }, /* ANCHOR */
+ { 0x26A1, 0x26A1 }, /* HIGH VOLTAGE SIGN */
+ { 0x26AA, 0x26AB }, /* MEDIUM WHITE CIRCLE - MEDIUM BLACK CIRCLE */
+ { 0x26BD, 0x26BE }, /* SOCCER BALL - BASEBALL */
+ { 0x26C4, 0x26C5 }, /* SNOWMAN WITHOUT SNOW - SUN BEHIND CLOUD */
+ { 0x26CE, 0x26CE }, /* OPHIUCHUS */
+ { 0x26D4, 0x26D4 }, /* NO ENTRY */
+ { 0x26EA, 0x26EA }, /* CHURCH */
+ { 0x26F2, 0x26F3 }, /* FOUNTAIN - FLAG IN HOLE */
+ { 0x26F5, 0x26F5 }, /* SAILBOAT */
+ { 0x26FA, 0x26FA }, /* TENT */
+ { 0x26FD, 0x26FD }, /* FUEL PUMP */
+ { 0x2705, 0x2705 }, /* WHITE HEAVY CHECK MARK */
+ { 0x270A, 0x270B }, /* RAISED FIST - RAISED HAND */
+ { 0x2728, 0x2728 }, /* SPARKLES */
+ { 0x274C, 0x274C }, /* CROSS MARK */
+ { 0x274E, 0x274E }, /* NEGATIVE SQUARED CROSS MARK */
+ { 0x2753, 0x2755 }, /* BLACK QUESTION MARK ORNAMENT - WHITE EXCLAMATION MARK ORNAMENT */
+ { 0x2757, 0x2757 }, /* HEAVY EXCLAMATION MARK SYMBOL */
+ { 0x2795, 0x2797 }, /* HEAVY PLUS SIGN - HEAVY DIVISION SIGN */
+ { 0x27B0, 0x27B0 }, /* CURLY LOOP */
+ { 0x27BF, 0x27BF }, /* DOUBLE CURLY LOOP */
+ { 0x2B1B, 0x2B1C }, /* BLACK LARGE SQUARE - WHITE LARGE SQUARE */
+ { 0x2B50, 0x2B50 }, /* WHITE MEDIUM STAR */
+ { 0x2B55, 0x2B55 }, /* HEAVY LARGE CIRCLE */
+ { 0x2E80, 0x2E99 }, /* CJK RADICAL REPEAT - CJK RADICAL RAP */
+ { 0x2E9B, 0x2EF3 }, /* CJK RADICAL CHOKE - CJK RADICAL C-SIMPLIFIED TURTLE */
+ { 0x2F00, 0x2FD5 }, /* KANGXI RADICAL ONE - KANGXI RADICAL FLUTE */
+ { 0x2FF0, 0x3029 }, /* IDEOGRAPHIC DESCRIPTION CHARACTER LEFT TO RIGHT - HANGZHOU NUMERAL NINE */
+ { 0x3030, 0x303E }, /* WAVY DASH - IDEOGRAPHIC VARIATION INDICATOR */
+ { 0x3041, 0x3096 }, /* HIRAGANA LETTER SMALL A - HIRAGANA LETTER SMALL KE */
+ { 0x309B, 0x30FF }, /* KATAKANA-HIRAGANA VOICED SOUND MARK - KATAKANA DIGRAPH KOTO */
+ { 0x3105, 0x312F }, /* BOPOMOFO LETTER B - BOPOMOFO LETTER NN */
+ { 0x3131, 0x318E }, /* HANGUL LETTER KIYEOK - HANGUL LETTER ARAEAE */
+ { 0x3190, 0x31E5 }, /* IDEOGRAPHIC ANNOTATION LINKING MARK - CJK STROKE SZP */
+ { 0x31EF, 0x321E }, /* IDEOGRAPHIC DESCRIPTION CHARACTER SUBTRACTION - PARENTHESIZED KOREAN CHARACTER O HU */
+ { 0x3220, 0x3247 }, /* PARENTHESIZED IDEOGRAPH ONE - CIRCLED IDEOGRAPH KOTO */
+ { 0x3250, 0xA48C }, /* PARTNERSHIP SIGN - YI SYLLABLE YYR */
+ { 0xA490, 0xA4C6 }, /* YI RADICAL QOT - YI RADICAL KE */
+ { 0xA960, 0xA97C }, /* HANGUL CHOSEONG TIKEUT-MIEUM - HANGUL CHOSEONG SSANGYEORINHIEUH */
+ { 0xAC00, 0xD7A3 }, /* HANGUL SYLLABLE GA - HANGUL SYLLABLE HIH */
+ { 0xF900, 0xFAFF }, /* U+F900 - U+FAFF */
+ { 0xFE10, 0xFE19 }, /* PRESENTATION FORM FOR VERTICAL COMMA - PRESENTATION FORM FOR VERTICAL HORIZONTAL ELLIPSIS */
+ { 0xFE30, 0xFE52 }, /* PRESENTATION FORM FOR VERTICAL TWO DOT LEADER - SMALL FULL STOP */
+ { 0xFE54, 0xFE66 }, /* SMALL SEMICOLON - SMALL EQUALS SIGN */
+ { 0xFE68, 0xFE6B }, /* SMALL REVERSE SOLIDUS - SMALL COMMERCIAL AT */
+ { 0xFF01, 0xFF60 }, /* FULLWIDTH EXCLAMATION MARK - FULLWIDTH RIGHT WHITE PARENTHESIS */
+ { 0xFFE0, 0xFFE6 }, /* FULLWIDTH CENT SIGN - FULLWIDTH WON SIGN */
+};
+
+/* Double-width character ranges (non-BMP, U+10000 and above) */
+static const struct ucs_interval32 ucs_double_width_non_bmp_ranges[] = {
+ { 0x16FE0, 0x16FE3 }, /* TANGUT ITERATION MARK - OLD CHINESE ITERATION MARK */
+ { 0x17000, 0x187F7 }, /* U+17000 - U+187F7 */
+ { 0x18800, 0x18CD5 }, /* TANGUT COMPONENT-001 - KHITAN SMALL SCRIPT CHARACTER-18CD5 */
+ { 0x18CFF, 0x18D08 }, /* U+18CFF - U+18D08 */
+ { 0x1AFF0, 0x1AFF3 }, /* KATAKANA LETTER MINNAN TONE-2 - KATAKANA LETTER MINNAN TONE-5 */
+ { 0x1AFF5, 0x1AFFB }, /* KATAKANA LETTER MINNAN TONE-7 - KATAKANA LETTER MINNAN NASALIZED TONE-5 */
+ { 0x1AFFD, 0x1AFFE }, /* KATAKANA LETTER MINNAN NASALIZED TONE-7 - KATAKANA LETTER MINNAN NASALIZED TONE-8 */
+ { 0x1B000, 0x1B122 }, /* KATAKANA LETTER ARCHAIC E - KATAKANA LETTER ARCHAIC WU */
+ { 0x1B132, 0x1B132 }, /* HIRAGANA LETTER SMALL KO */
+ { 0x1B150, 0x1B152 }, /* HIRAGANA LETTER SMALL WI - HIRAGANA LETTER SMALL WO */
+ { 0x1B155, 0x1B155 }, /* KATAKANA LETTER SMALL KO */
+ { 0x1B164, 0x1B167 }, /* KATAKANA LETTER SMALL WI - KATAKANA LETTER SMALL N */
+ { 0x1B170, 0x1B2FB }, /* NUSHU CHARACTER-1B170 - NUSHU CHARACTER-1B2FB */
+ { 0x1D300, 0x1D356 }, /* MONOGRAM FOR EARTH - TETRAGRAM FOR FOSTERING */
+ { 0x1D360, 0x1D376 }, /* COUNTING ROD UNIT DIGIT ONE - IDEOGRAPHIC TALLY MARK FIVE */
+ { 0x1F000, 0x1F02F }, /* U+1F000 - U+1F02F */
+ { 0x1F0A0, 0x1F0FF }, /* U+1F0A0 - U+1F0FF */
+ { 0x1F18E, 0x1F18E }, /* NEGATIVE SQUARED AB */
+ { 0x1F191, 0x1F19A }, /* SQUARED CL - SQUARED VS */
+ { 0x1F200, 0x1F202 }, /* SQUARE HIRAGANA HOKA - SQUARED KATAKANA SA */
+ { 0x1F210, 0x1F23B }, /* SQUARED CJK UNIFIED IDEOGRAPH-624B - SQUARED CJK UNIFIED IDEOGRAPH-914D */
+ { 0x1F240, 0x1F248 }, /* TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-672C - TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-6557 */
+ { 0x1F250, 0x1F251 }, /* CIRCLED IDEOGRAPH ADVANTAGE - CIRCLED IDEOGRAPH ACCEPT */
+ { 0x1F260, 0x1F265 }, /* ROUNDED SYMBOL FOR FU - ROUNDED SYMBOL FOR CAI */
+ { 0x1F300, 0x1F3FA }, /* CYCLONE - AMPHORA */
+ { 0x1F400, 0x1F64F }, /* RAT - PERSON WITH FOLDED HANDS */
+ { 0x1F680, 0x1F9AF }, /* ROCKET - PROBING CANE */
+ { 0x1F9B4, 0x1FAFF }, /* U+1F9B4 - U+1FAFF */
+ { 0x20000, 0x2FFFD }, /* U+20000 - U+2FFFD */
+ { 0x30000, 0x3FFFD }, /* U+30000 - U+3FFFD */
+};
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index f5642b3038e4..ed39d9cb4432 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -104,7 +104,6 @@
#include <linux/uaccess.h>
#include <linux/kdb.h>
#include <linux/ctype.h>
-#include <linux/bsearch.h>
#include <linux/gcd.h>
#define MAX_NR_CON_DRIVER 16
@@ -444,6 +443,15 @@ static void vc_uniscr_scroll(struct vc_data *vc, unsigned int top,
}
}
+static u32 vc_uniscr_getc(struct vc_data *vc, int relative_pos)
+{
+ int pos = vc->state.x + vc->vc_need_wrap + relative_pos;
+
+ if (vc->vc_uni_lines && in_range(pos, 0, vc->vc_cols))
+ return vc->vc_uni_lines[vc->state.y][pos];
+ return 0;
+}
+
static void vc_uniscr_copy_area(u32 **dst_lines,
unsigned int dst_cols,
unsigned int dst_rows,
@@ -1862,6 +1870,14 @@ int mouse_reporting(void)
return vc_cons[fg_console].d->vc_report_mouse;
}
+/* invoked via ioctl(TIOCLINUX) */
+static int get_bracketed_paste(struct tty_struct *tty)
+{
+ struct vc_data *vc = tty->driver_data;
+
+ return vc->vc_bracketed_paste;
+}
+
enum {
CSI_DEC_hl_CURSOR_KEYS = 1, /* CKM: cursor keys send ^[Ox/^[[x */
CSI_DEC_hl_132_COLUMNS = 3, /* COLM: 80/132 mode switch */
@@ -1872,6 +1888,7 @@ enum {
CSI_DEC_hl_MOUSE_X10 = 9,
CSI_DEC_hl_SHOW_CURSOR = 25, /* TCEM */
CSI_DEC_hl_MOUSE_VT200 = 1000,
+ CSI_DEC_hl_BRACKETED_PASTE = 2004,
};
/* console_lock is held */
@@ -1924,6 +1941,9 @@ static void csi_DEC_hl(struct vc_data *vc, bool on_off)
case CSI_DEC_hl_MOUSE_VT200:
vc->vc_report_mouse = on_off ? 2 : 0;
break;
+ case CSI_DEC_hl_BRACKETED_PASTE:
+ vc->vc_bracketed_paste = on_off;
+ break;
}
}
@@ -2149,6 +2169,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
vc->state.charset = 0;
vc->vc_need_wrap = 0;
vc->vc_report_mouse = 0;
+ vc->vc_bracketed_paste = 0;
vc->vc_utf = default_utf8;
vc->vc_utf_count = 0;
@@ -2712,43 +2733,6 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, u8 c)
}
}
-/* is_double_width() is based on the wcwidth() implementation by
- * Markus Kuhn -- 2007-05-26 (Unicode 5.0)
- * Latest version: https://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
- */
-struct interval {
- uint32_t first;
- uint32_t last;
-};
-
-static int ucs_cmp(const void *key, const void *elt)
-{
- uint32_t ucs = *(uint32_t *)key;
- struct interval e = *(struct interval *) elt;
-
- if (ucs > e.last)
- return 1;
- else if (ucs < e.first)
- return -1;
- return 0;
-}
-
-static int is_double_width(uint32_t ucs)
-{
- static const struct interval double_width[] = {
- { 0x1100, 0x115F }, { 0x2329, 0x232A }, { 0x2E80, 0x303E },
- { 0x3040, 0xA4CF }, { 0xAC00, 0xD7A3 }, { 0xF900, 0xFAFF },
- { 0xFE10, 0xFE19 }, { 0xFE30, 0xFE6F }, { 0xFF00, 0xFF60 },
- { 0xFFE0, 0xFFE6 }, { 0x20000, 0x2FFFD }, { 0x30000, 0x3FFFD }
- };
- if (ucs < double_width[0].first ||
- ucs > double_width[ARRAY_SIZE(double_width) - 1].last)
- return 0;
-
- return bsearch(&ucs, double_width, ARRAY_SIZE(double_width),
- sizeof(struct interval), ucs_cmp) != NULL;
-}
-
struct vc_draw_region {
unsigned long from, to;
int x;
@@ -2817,7 +2801,7 @@ static int vc_translate_unicode(struct vc_data *vc, int c, bool *rescan)
if ((c & 0xc0) == 0x80) {
/* Unexpected continuation byte? */
if (!vc->vc_utf_count)
- return 0xfffd;
+ goto bad_sequence;
vc->vc_utf_char = (vc->vc_utf_char << 6) | (c & 0x3f);
vc->vc_npar++;
@@ -2829,17 +2813,17 @@ static int vc_translate_unicode(struct vc_data *vc, int c, bool *rescan)
/* Reject overlong sequences */
if (c <= utf8_length_changes[vc->vc_npar - 1] ||
c > utf8_length_changes[vc->vc_npar])
- return 0xfffd;
+ goto bad_sequence;
return vc_sanitize_unicode(c);
}
/* Single ASCII byte or first byte of a sequence received */
if (vc->vc_utf_count) {
- /* Continuation byte expected */
+ /* A continuation byte was expected */
*rescan = true;
vc->vc_utf_count = 0;
- return 0xfffd;
+ goto bad_sequence;
}
/* Nothing to do if an ASCII byte was received */
@@ -2858,11 +2842,14 @@ static int vc_translate_unicode(struct vc_data *vc, int c, bool *rescan)
vc->vc_utf_count = 3;
vc->vc_utf_char = (c & 0x07);
} else {
- return 0xfffd;
+ goto bad_sequence;
}
need_more_bytes:
return -1;
+
+bad_sequence:
+ return 0xfffd;
}
static int vc_translate(struct vc_data *vc, int *c, bool *rescan)
@@ -2940,54 +2927,143 @@ static bool vc_is_control(struct vc_data *vc, int tc, int c)
return false;
}
+static void vc_con_rewind(struct vc_data *vc)
+{
+ if (vc->state.x && !vc->vc_need_wrap) {
+ vc->vc_pos -= 2;
+ vc->state.x--;
+ }
+ vc->vc_need_wrap = 0;
+}
+
+#define UCS_ZWS 0x200b /* Zero Width Space */
+#define UCS_VS16 0xfe0f /* Variation Selector 16 */
+#define UCS_REPLACEMENT 0xfffd /* Replacement Character */
+
+static int vc_process_ucs(struct vc_data *vc, int *c, int *tc)
+{
+ u32 prev_c, curr_c = *c;
+
+ if (ucs_is_double_width(curr_c)) {
+ /*
+ * The Unicode screen memory is allocated only when
+ * required. This is one such case as we need to remember
+ * which displayed characters are double-width.
+ */
+ vc_uniscr_check(vc);
+ return 2;
+ }
+
+ if (!ucs_is_zero_width(curr_c))
+ return 1;
+
+ /* From here curr_c is known to be zero-width. */
+
+ if (ucs_is_double_width(vc_uniscr_getc(vc, -2))) {
+ /*
+ * Let's merge this zero-width code point with the preceding
+ * double-width code point by replacing the existing
+ * zero-width space padding. To do so we rewind one column
+ * and pretend this has a width of 1.
+ * We give the legacy display the same initial space padding.
+ */
+ vc_con_rewind(vc);
+ *tc = ' ';
+ return 1;
+ }
+
+ /* From here the preceding character, if any, must be single-width. */
+ prev_c = vc_uniscr_getc(vc, -1);
+
+ if (curr_c == UCS_VS16 && prev_c != 0) {
+ /*
+ * VS16 (U+FE0F) is special. It typically turns the preceding
+ * single-width character into a double-width one. Let it
+ * have a width of 1 effectively making the combination with
+ * the preceding character double-width.
+ */
+ *tc = ' ';
+ return 1;
+ }
+
+ /* try recomposition */
+ prev_c = ucs_recompose(prev_c, curr_c);
+ if (prev_c != 0) {
+ vc_con_rewind(vc);
+ *tc = *c = prev_c;
+ return 1;
+ }
+
+ /* Otherwise zero-width code points are ignored. */
+ return 0;
+}
+
+static int vc_get_glyph(struct vc_data *vc, int tc)
+{
+ int glyph = conv_uni_to_pc(vc, tc);
+ u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
+
+ if (!(glyph & ~charmask))
+ return glyph;
+
+ if (glyph == -1)
+ return -1; /* nothing to display */
+
+ /* Glyph not found */
+ if ((!vc->vc_utf || vc->vc_disp_ctrl || tc < 128) && !(tc & ~charmask)) {
+ /*
+ * In legacy mode use the glyph we get by a 1:1 mapping.
+ * This would make absolutely no sense with Unicode in mind, but do this for
+ * ASCII characters since a font may lack Unicode mapping info and we don't
+ * want to end up with having question marks only.
+ */
+ return tc;
+ }
+
+ /*
+ * The Unicode screen memory is allocated only when required.
+ * This is one such case: we're about to "cheat" with the displayed
+ * character meaning the simple screen buffer won't hold the original
+ * information, whereas the Unicode screen buffer always does.
+ */
+ vc_uniscr_check(vc);
+
+ /* Try getting a simpler fallback character. */
+ tc = ucs_get_fallback(tc);
+ if (tc)
+ return vc_get_glyph(vc, tc);
+
+ /* Display U+FFFD (Unicode Replacement Character). */
+ return conv_uni_to_pc(vc, UCS_REPLACEMENT);
+}
+
static int vc_con_write_normal(struct vc_data *vc, int tc, int c,
struct vc_draw_region *draw)
{
int next_c;
unsigned char vc_attr = vc->vc_attr;
- u16 himask = vc->vc_hi_font_mask, charmask = himask ? 0x1ff : 0xff;
+ u16 himask = vc->vc_hi_font_mask;
u8 width = 1;
bool inverse = false;
if (vc->vc_utf && !vc->vc_disp_ctrl) {
- if (is_double_width(c))
- width = 2;
+ width = vc_process_ucs(vc, &c, &tc);
+ if (!width)
+ goto out;
}
/* Now try to find out how to display it */
- tc = conv_uni_to_pc(vc, tc);
- if (tc & ~charmask) {
- if (tc == -1 || tc == -2)
- return -1; /* nothing to display */
-
- /* Glyph not found */
- if ((!vc->vc_utf || vc->vc_disp_ctrl || c < 128) &&
- !(c & ~charmask)) {
- /*
- * In legacy mode use the glyph we get by a 1:1
- * mapping.
- * This would make absolutely no sense with Unicode in
- * mind, but do this for ASCII characters since a font
- * may lack Unicode mapping info and we don't want to
- * end up with having question marks only.
- */
- tc = c;
- } else {
- /*
- * Display U+FFFD. If it's not found, display an inverse
- * question mark.
- */
- tc = conv_uni_to_pc(vc, 0xfffd);
- if (tc < 0) {
- inverse = true;
- tc = conv_uni_to_pc(vc, '?');
- if (tc < 0)
- tc = '?';
-
- vc_attr = vc_invert_attr(vc);
- con_flush(vc, draw);
- }
- }
+ tc = vc_get_glyph(vc, tc);
+ if (tc == -1)
+ return -1; /* nothing to display */
+ if (tc < 0) {
+ inverse = true;
+ tc = conv_uni_to_pc(vc, '?');
+ if (tc < 0)
+ tc = '?';
+
+ vc_attr = vc_invert_attr(vc);
+ con_flush(vc, draw);
}
next_c = c;
@@ -3028,8 +3104,14 @@ static int vc_con_write_normal(struct vc_data *vc, int tc, int c,
tc = conv_uni_to_pc(vc, ' ');
if (tc < 0)
tc = ' ';
- next_c = ' ';
+ /*
+ * Store a zero-width space in the Unicode screen given that
+ * the previous code point is semantically double width.
+ */
+ next_c = UCS_ZWS;
}
+
+out:
notify_write(vc, c);
if (inverse)
@@ -3414,6 +3496,8 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
break;
case TIOCL_BLANKEDSCREEN:
return console_blanked;
+ case TIOCL_GETBRACKETEDPASTE:
+ return get_bracketed_paste(tty);
default:
return -EINVAL;
}
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 4b91072f3a4e..61342e06970a 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -951,6 +951,22 @@ int vt_ioctl(struct tty_struct *tty,
(unsigned short __user *)arg);
case VT_WAITEVENT:
return vt_event_wait_ioctl((struct vt_event __user *)arg);
+
+ case VT_GETCONSIZECSRPOS:
+ {
+ struct vt_consizecsrpos concsr;
+
+ console_lock();
+ concsr.con_cols = vc->vc_cols;
+ concsr.con_rows = vc->vc_rows;
+ concsr.csr_col = vc->state.x;
+ concsr.csr_row = vc->state.y;
+ console_unlock();
+ if (copy_to_user(up, &concsr, sizeof(concsr)))
+ return -EFAULT;
+ return 0;
+ }
+
default:
return -ENOIOCTLCMD;
}
@@ -1103,8 +1119,6 @@ long vt_compat_ioctl(struct tty_struct *tty,
case VT_WAITACTIVE:
case VT_RELDISP:
case VT_DISALLOCATE:
- case VT_RESIZE:
- case VT_RESIZEX:
return vt_ioctl(tty, cmd, arg);
/*
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index f1294c29f484..1e50675772fe 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -674,7 +674,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct ufs_hw_queue *hwq;
- unsigned long flags;
int err;
/* Skip task abort in case previous aborts failed and report failure */
@@ -713,10 +712,5 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
return FAILED;
}
- spin_lock_irqsave(&hwq->cq_lock, flags);
- if (ufshcd_cmd_inflight(lrbp->cmd))
- ufshcd_release_scsi_cmd(hba, lrbp);
- spin_unlock_irqrestore(&hwq->cq_lock, flags);
-
return SUCCESS;
}
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 634cf163f4cb..de8b6acd4058 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -57,6 +57,36 @@ static const char *ufs_hs_gear_to_string(enum ufs_hs_gear_tag gear)
}
}
+static const char *ufs_wb_resize_hint_to_string(enum wb_resize_hint hint)
+{
+ switch (hint) {
+ case WB_RESIZE_HINT_KEEP:
+ return "keep";
+ case WB_RESIZE_HINT_DECREASE:
+ return "decrease";
+ case WB_RESIZE_HINT_INCREASE:
+ return "increase";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *ufs_wb_resize_status_to_string(enum wb_resize_status status)
+{
+ switch (status) {
+ case WB_RESIZE_STATUS_IDLE:
+ return "idle";
+ case WB_RESIZE_STATUS_IN_PROGRESS:
+ return "in_progress";
+ case WB_RESIZE_STATUS_COMPLETE_SUCCESS:
+ return "complete_success";
+ case WB_RESIZE_STATUS_GENERAL_FAILURE:
+ return "general_failure";
+ default:
+ return "unknown";
+ }
+}
+
static const char *ufshcd_uic_link_state_to_string(
enum uic_link_state state)
{
@@ -411,6 +441,44 @@ static ssize_t wb_flush_threshold_store(struct device *dev,
return count;
}
+static const char * const wb_resize_en_mode[] = {
+ [WB_RESIZE_EN_IDLE] = "idle",
+ [WB_RESIZE_EN_DECREASE] = "decrease",
+ [WB_RESIZE_EN_INCREASE] = "increase",
+};
+
+static ssize_t wb_resize_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int mode;
+ ssize_t res;
+
+ if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled
+ || !hba->dev_info.b_presrv_uspc_en
+ || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE))
+ return -EOPNOTSUPP;
+
+ mode = sysfs_match_string(wb_resize_en_mode, buf);
+ if (mode < 0)
+ return -EINVAL;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ res = -EBUSY;
+ goto out;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ res = ufshcd_wb_set_resize_en(hba, mode);
+ ufshcd_rpm_put_sync(hba);
+
+out:
+ up(&hba->host_sem);
+ return res < 0 ? res : count;
+}
+
/**
* pm_qos_enable_show - sysfs handler to show pm qos enable value
* @dev: device associated with the UFS controller
@@ -526,6 +594,7 @@ static DEVICE_ATTR_RW(auto_hibern8);
static DEVICE_ATTR_RW(wb_on);
static DEVICE_ATTR_RW(enable_wb_buf_flush);
static DEVICE_ATTR_RW(wb_flush_threshold);
+static DEVICE_ATTR_WO(wb_resize_enable);
static DEVICE_ATTR_RW(rtc_update_ms);
static DEVICE_ATTR_RW(pm_qos_enable);
static DEVICE_ATTR_RO(critical_health);
@@ -543,6 +612,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_wb_on.attr,
&dev_attr_enable_wb_buf_flush.attr,
&dev_attr_wb_flush_threshold.attr,
+ &dev_attr_wb_resize_enable.attr,
&dev_attr_rtc_update_ms.attr,
&dev_attr_pm_qos_enable.attr,
&dev_attr_critical_health.attr,
@@ -1549,6 +1619,67 @@ static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE;
}
+static int wb_read_resize_attrs(struct ufs_hba *hba,
+ enum attr_idn idn, u32 *attr_val)
+{
+ u8 index = 0;
+ int ret;
+
+ if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled
+ || !hba->dev_info.b_presrv_uspc_en
+ || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE))
+ return -EOPNOTSUPP;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ up(&hba->host_sem);
+ return -EBUSY;
+ }
+
+ index = ufshcd_wb_get_query_index(hba);
+ ufshcd_rpm_get_sync(hba);
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ idn, index, 0, attr_val);
+ ufshcd_rpm_put_sync(hba);
+
+ up(&hba->host_sem);
+ return ret;
+}
+
+static ssize_t wb_resize_hint_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+ u32 value;
+
+ ret = wb_read_resize_attrs(hba,
+ QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", ufs_wb_resize_hint_to_string(value));
+}
+
+static DEVICE_ATTR_RO(wb_resize_hint);
+
+static ssize_t wb_resize_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+ u32 value;
+
+ ret = wb_read_resize_attrs(hba,
+ QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS, &value);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", ufs_wb_resize_status_to_string(value));
+}
+
+static DEVICE_ATTR_RO(wb_resize_status);
+
#define UFS_ATTRIBUTE(_name, _uname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -1622,6 +1753,8 @@ static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_wb_avail_buf.attr,
&dev_attr_wb_life_time_est.attr,
&dev_attr_wb_cur_buf.attr,
+ &dev_attr_wb_resize_hint.attr,
+ &dev_attr_wb_resize_status.attr,
NULL,
};
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 7735421e3991..f62d89c8e580 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -53,7 +53,7 @@
/* UIC command timeout, unit: ms */
enum {
UIC_CMD_TIMEOUT_DEFAULT = 500,
- UIC_CMD_TIMEOUT_MAX = 2000,
+ UIC_CMD_TIMEOUT_MAX = 5000,
};
/* NOP OUT retries waiting for NOP IN response */
#define NOP_OUT_RETRIES 10
@@ -63,7 +63,11 @@ enum {
/* Query request retries */
#define QUERY_REQ_RETRIES 3
/* Query request timeout */
-#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+enum {
+ QUERY_REQ_TIMEOUT_MIN = 1,
+ QUERY_REQ_TIMEOUT_DEFAULT = 1500,
+ QUERY_REQ_TIMEOUT_MAX = 30000
+};
/* Advanced RPMB request timeout */
#define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
@@ -133,7 +137,24 @@ static const struct kernel_param_ops uic_cmd_timeout_ops = {
module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644);
MODULE_PARM_DESC(uic_cmd_timeout,
- "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively");
+ "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 5 seconds inclusively");
+
+static unsigned int dev_cmd_timeout = QUERY_REQ_TIMEOUT_DEFAULT;
+
+static int dev_cmd_timeout_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, QUERY_REQ_TIMEOUT_MIN,
+ QUERY_REQ_TIMEOUT_MAX);
+}
+
+static const struct kernel_param_ops dev_cmd_timeout_ops = {
+ .set = dev_cmd_timeout_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(dev_cmd_timeout, &dev_cmd_timeout_ops, &dev_cmd_timeout, 0644);
+MODULE_PARM_DESC(dev_cmd_timeout,
+ "UFS Device command timeout in milliseconds. Defaults to 1.5s. Supported values range from 1ms to 30 seconds inclusively");
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
@@ -432,7 +453,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
u8 opcode = 0, group_id = 0;
u32 doorbell = 0;
u32 intr;
- int hwq_id = -1;
+ u32 hwq_id = 0;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct scsi_cmnd *cmd = lrbp->cmd;
struct request *rq = scsi_cmd_to_rq(cmd);
@@ -644,9 +665,6 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
"last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
hba->ufs_stats.hibern8_exit_cnt);
- dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
- div_u64(hba->ufs_stats.last_intr_ts, 1000),
- hba->ufs_stats.last_intr_status);
dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
hba->eh_flags, hba->req_abort_count);
dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
@@ -1379,6 +1397,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
* make sure that there are no outstanding requests when
* clock scaling is in progress
*/
+ mutex_lock(&hba->host->scan_mutex);
blk_mq_quiesce_tagset(&hba->host->tag_set);
mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
@@ -1389,6 +1408,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
blk_mq_unquiesce_tagset(&hba->host->tag_set);
+ mutex_unlock(&hba->host->scan_mutex);
goto out;
}
@@ -1410,6 +1430,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err)
mutex_unlock(&hba->wb_mutex);
blk_mq_unquiesce_tagset(&hba->host->tag_set);
+ mutex_unlock(&hba->host->scan_mutex);
ufshcd_release(hba);
}
@@ -3365,7 +3386,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
int err, selector = 0;
- int timeout = QUERY_REQ_TIMEOUT;
+ int timeout = dev_cmd_timeout;
BUG_ON(!hba);
@@ -3462,7 +3483,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
goto out_unlock;
}
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err) {
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
@@ -3558,7 +3579,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out_unlock;
}
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err) {
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
@@ -6020,7 +6041,7 @@ int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id)
request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err) {
dev_err(hba->dev, "%s: failed to read device level exception %d\n",
@@ -6107,6 +6128,21 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
return ret;
}
+int ufshcd_wb_set_resize_en(struct ufs_hba *hba, enum wb_resize_en en_mode)
+{
+ int ret;
+ u8 index;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_WB_BUF_RESIZE_EN, index, 0, &en_mode);
+ if (ret)
+ dev_err(hba->dev, "%s: Enable WB buf resize operation failed %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
static bool ufshcd_wb_curr_buff_threshold_check(struct ufs_hba *hba,
u32 avail_buf)
{
@@ -6572,7 +6608,7 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work);
dev_info(hba->dev,
- "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
+ "%s started; HBA state %s; powered %d; shutting down %d; saved_err = 0x%x; saved_uic_err = 0x%x; force_reset = %d%s\n",
__func__, ufshcd_state_name[hba->ufshcd_state],
hba->is_powered, hba->shutting_down, hba->saved_err,
hba->saved_uic_err, hba->force_reset,
@@ -6587,9 +6623,14 @@ static void ufshcd_err_handler(struct work_struct *work)
up(&hba->host_sem);
return;
}
- ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+
ufshcd_err_handling_prepare(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba, false);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -7001,7 +7042,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
}
/**
- * ufshcd_intr - Main interrupt service routine
+ * ufshcd_threaded_intr - Threaded interrupt service routine
* @irq: irq number
* @__hba: pointer to adapter instance
*
@@ -7009,16 +7050,14 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_intr(int irq, void *__hba)
+static irqreturn_t ufshcd_threaded_intr(int irq, void *__hba)
{
- u32 intr_status, enabled_intr_status = 0;
+ u32 last_intr_status, intr_status, enabled_intr_status = 0;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
int retries = hba->nutrs;
- intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- hba->ufs_stats.last_intr_status = intr_status;
- hba->ufs_stats.last_intr_ts = local_clock();
+ last_intr_status = intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
/*
* There could be max of hba->nutrs reqs in flight and in worst case
@@ -7042,7 +7081,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
__func__,
intr_status,
- hba->ufs_stats.last_intr_status,
+ last_intr_status,
enabled_intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
@@ -7050,6 +7089,29 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
return retval;
}
+/**
+ * ufshcd_intr - Main interrupt service routine
+ * @irq: irq number
+ * @__hba: pointer to adapter instance
+ *
+ * Return:
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_WAKE_THREAD - If handling is moved to threaded handled
+ * IRQ_NONE - If invalid interrupt
+ */
+static irqreturn_t ufshcd_intr(int irq, void *__hba)
+{
+ struct ufs_hba *hba = __hba;
+
+ /* Move interrupt handling to thread when MCQ & ESI are not enabled */
+ if (!hba->mcq_enabled || !hba->mcq_esi_enabled)
+ return IRQ_WAKE_THREAD;
+
+ /* Directly handle interrupts since MCQ ESI handlers does the hard job */
+ return ufshcd_sl_intr(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS) &
+ ufshcd_readl(hba, REG_INTERRUPT_ENABLE));
+}
+
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
{
int err = 0;
@@ -7245,7 +7307,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
* read the response directly ignoring all errors.
*/
- ufshcd_issue_dev_cmd(hba, lrbp, tag, QUERY_REQ_TIMEOUT);
+ ufshcd_issue_dev_cmd(hba, lrbp, tag, dev_cmd_timeout);
/* just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
@@ -8107,6 +8169,9 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
*/
dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
+ dev_info->ext_wb_sup = get_unaligned_be16(desc_buf +
+ DEVICE_DESC_PARAM_EXT_WB_SUP);
+
dev_info->b_presrv_uspc_en =
desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
@@ -8678,7 +8743,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
if (err)
dev_err(hba->dev, "%s: failed to set timestamp %d\n",
@@ -8793,6 +8858,7 @@ static void ufshcd_config_mcq(struct ufs_hba *hba)
u32 intrs;
ret = ufshcd_mcq_vops_config_esi(hba);
+ hba->mcq_esi_enabled = !ret;
dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
intrs = UFSHCD_ENABLE_MCQ_INTRS;
@@ -10654,7 +10720,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
/* IRQ registration */
- err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+ err = devm_request_threaded_irq(dev, irq, ufshcd_intr, ufshcd_threaded_intr,
+ IRQF_ONESHOT | IRQF_SHARED, UFSHCD, hba);
if (err) {
dev_err(hba->dev, "request irq failed\n");
goto out_disable;
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index c0761ccc1381..18a978452001 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -5,6 +5,7 @@
#include <linux/acpi.h>
#include <linux/clk.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/devfreq.h>
#include <linux/gpio/consumer.h>
@@ -102,8 +103,28 @@ static const struct __ufs_qcom_bw_table {
[MODE_MAX][0][0] = { 7643136, 819200 },
};
+static const struct {
+ int nminor;
+ char *prefix;
+} testbus_info[TSTBUS_MAX] = {
+ [TSTBUS_UAWM] = {32, "TSTBUS_UAWM"},
+ [TSTBUS_UARM] = {32, "TSTBUS_UARM"},
+ [TSTBUS_TXUC] = {32, "TSTBUS_TXUC"},
+ [TSTBUS_RXUC] = {32, "TSTBUS_RXUC"},
+ [TSTBUS_DFC] = {32, "TSTBUS_DFC"},
+ [TSTBUS_TRLUT] = {32, "TSTBUS_TRLUT"},
+ [TSTBUS_TMRLUT] = {32, "TSTBUS_TMRLUT"},
+ [TSTBUS_OCSC] = {32, "TSTBUS_OCSC"},
+ [TSTBUS_UTP_HCI] = {32, "TSTBUS_UTP_HCI"},
+ [TSTBUS_COMBINED] = {32, "TSTBUS_COMBINED"},
+ [TSTBUS_WRAPPER] = {32, "TSTBUS_WRAPPER"},
+ [TSTBUS_UNIPRO] = {256, "TSTBUS_UNIPRO"},
+};
+
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
-static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq);
+static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba,
+ unsigned long freq, char *name);
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq);
static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
{
@@ -173,7 +194,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
profile->ll_ops = ufs_qcom_crypto_ops;
profile->max_dun_bytes_supported = 8;
- profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
+ profile->key_types_supported = qcom_ice_get_supported_key_type(ice);
profile->dev = dev;
/*
@@ -221,17 +242,8 @@ static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile,
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
- /* Only AES-256-XTS has been tested so far. */
- if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS)
- return -EOPNOTSUPP;
-
ufshcd_hold(hba);
- err = qcom_ice_program_key(host->ice,
- QCOM_ICE_CRYPTO_ALG_AES_XTS,
- QCOM_ICE_CRYPTO_KEY_SIZE_256,
- key->bytes,
- key->crypto_cfg.data_unit_size / 512,
- slot);
+ err = qcom_ice_program_key(host->ice, slot, key);
ufshcd_release(hba);
return err;
}
@@ -250,9 +262,53 @@ static int ufs_qcom_ice_keyslot_evict(struct blk_crypto_profile *profile,
return err;
}
+static int ufs_qcom_ice_derive_sw_secret(struct blk_crypto_profile *profile,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return qcom_ice_derive_sw_secret(host->ice, eph_key, eph_key_size,
+ sw_secret);
+}
+
+static int ufs_qcom_ice_import_key(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return qcom_ice_import_key(host->ice, raw_key, raw_key_size, lt_key);
+}
+
+static int ufs_qcom_ice_generate_key(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return qcom_ice_generate_key(host->ice, lt_key);
+}
+
+static int ufs_qcom_ice_prepare_key(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return qcom_ice_prepare_key(host->ice, lt_key, lt_key_size, eph_key);
+}
+
static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops = {
.keyslot_program = ufs_qcom_ice_keyslot_program,
.keyslot_evict = ufs_qcom_ice_keyslot_evict,
+ .derive_sw_secret = ufs_qcom_ice_derive_sw_secret,
+ .import_key = ufs_qcom_ice_import_key,
+ .generate_key = ufs_qcom_ice_generate_key,
+ .prepare_key = ufs_qcom_ice_prepare_key,
};
#else
@@ -452,10 +508,9 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
if (ret)
return ret;
- if (phy->power_count) {
+ if (phy->power_count)
phy_power_off(phy);
- phy_exit(phy);
- }
+
/* phy initialization - calibrate the phy */
ret = phy_init(phy);
@@ -543,13 +598,14 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
*
* @hba: host controller instance
* @is_pre_scale_up: flag to check if pre scale up condition.
+ * @freq: target opp freq
* Return: zero for success and non-zero in case of a failure.
*/
-static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up)
+static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up, unsigned long freq)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
- unsigned long core_clk_rate = 0;
+ unsigned long clk_freq = 0;
u32 core_clk_cycles_per_us;
/*
@@ -561,22 +617,34 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up)
if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba))
return 0;
+ if (hba->use_pm_opp && freq != ULONG_MAX) {
+ clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk");
+ if (clk_freq)
+ goto cfg_timers;
+ }
+
list_for_each_entry(clki, &hba->clk_list_head, list) {
if (!strcmp(clki->name, "core_clk")) {
+ if (freq == ULONG_MAX) {
+ clk_freq = clki->max_freq;
+ break;
+ }
+
if (is_pre_scale_up)
- core_clk_rate = clki->max_freq;
+ clk_freq = clki->max_freq;
else
- core_clk_rate = clk_get_rate(clki->clk);
+ clk_freq = clk_get_rate(clki->clk);
break;
}
}
+cfg_timers:
/* If frequency is smaller than 1MHz, set to 1MHz */
- if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
- core_clk_rate = DEFAULT_CLK_RATE_HZ;
+ if (clk_freq < DEFAULT_CLK_RATE_HZ)
+ clk_freq = DEFAULT_CLK_RATE_HZ;
- core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
+ core_clk_cycles_per_us = clk_freq / USEC_PER_SEC;
if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
/*
@@ -596,13 +664,13 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
- if (ufs_qcom_cfg_timers(hba, false)) {
+ if (ufs_qcom_cfg_timers(hba, false, ULONG_MAX)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
return -EINVAL;
}
- err = ufs_qcom_set_core_clk_ctrl(hba, ULONG_MAX);
+ err = ufs_qcom_set_core_clk_ctrl(hba, true, ULONG_MAX);
if (err)
dev_err(hba->dev, "cfg core clk ctrl failed\n");
/*
@@ -874,17 +942,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
break;
case POST_CHANGE:
- if (ufs_qcom_cfg_timers(hba, false)) {
- dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
- __func__);
- /*
- * we return error code at the end of the routine,
- * but continue to configure UFS_PHY_TX_LANE_ENABLE
- * and bus voting as usual
- */
- ret = -EINVAL;
- }
-
/* cache the power mode parameters to use internally */
memcpy(&host->dev_req_params,
dev_req_params, sizeof(*dev_req_params));
@@ -1360,29 +1417,46 @@ static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba,
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg);
}
-static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq)
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
struct ufs_clk_info *clki;
u32 cycles_in_1us = 0;
u32 core_clk_ctrl_reg;
+ unsigned long clk_freq;
int err;
+ if (hba->use_pm_opp && freq != ULONG_MAX) {
+ clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro");
+ if (clk_freq) {
+ cycles_in_1us = ceil(clk_freq, HZ_PER_MHZ);
+ goto set_core_clk_ctrl;
+ }
+ }
+
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk) &&
!strcmp(clki->name, "core_clk_unipro")) {
- if (!clki->max_freq)
+ if (!clki->max_freq) {
cycles_in_1us = 150; /* default for backwards compatibility */
- else if (freq == ULONG_MAX)
+ break;
+ }
+
+ if (freq == ULONG_MAX) {
cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ);
- else
- cycles_in_1us = ceil(freq, HZ_PER_MHZ);
+ break;
+ }
+ if (is_scale_up)
+ cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ);
+ else
+ cycles_in_1us = ceil(clk_get_rate(clki->clk), HZ_PER_MHZ);
break;
}
}
+set_core_clk_ctrl:
err = ufshcd_dme_get(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
@@ -1419,13 +1493,13 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba, unsigned long f
{
int ret;
- ret = ufs_qcom_cfg_timers(hba, true);
+ ret = ufs_qcom_cfg_timers(hba, true, freq);
if (ret) {
dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__);
return ret;
}
/* set unipro core clock attributes and clear clock divider */
- return ufs_qcom_set_core_clk_ctrl(hba, freq);
+ return ufs_qcom_set_core_clk_ctrl(hba, true, freq);
}
static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
@@ -1456,8 +1530,15 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba, unsigned long freq)
{
+ int ret;
+
+ ret = ufs_qcom_cfg_timers(hba, false, freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__);
+ return ret;
+ }
/* set unipro core clock attributes and clear clock divider */
- return ufs_qcom_set_core_clk_ctrl(hba, freq);
+ return ufs_qcom_set_core_clk_ctrl(hba, false, freq);
}
static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
@@ -1609,6 +1690,85 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
return 0;
}
+static void ufs_qcom_dump_testbus(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int i, j, nminor = 0, testbus_len = 0;
+ u32 *testbus __free(kfree) = NULL;
+ char *prefix;
+
+ testbus = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+ if (!testbus)
+ return;
+
+ for (j = 0; j < TSTBUS_MAX; j++) {
+ nminor = testbus_info[j].nminor;
+ prefix = testbus_info[j].prefix;
+ host->testbus.select_major = j;
+ testbus_len = nminor * sizeof(u32);
+ for (i = 0; i < nminor; i++) {
+ host->testbus.select_minor = i;
+ ufs_qcom_testbus_config(host);
+ testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
+ }
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
+ 16, 4, testbus, testbus_len, false);
+ }
+}
+
+static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
+ const char *prefix, enum ufshcd_res id)
+{
+ u32 *regs __free(kfree) = NULL;
+ size_t pos;
+
+ if (offset % 4 != 0 || len % 4 != 0)
+ return -EINVAL;
+
+ regs = kzalloc(len, GFP_ATOMIC);
+ if (!regs)
+ return -ENOMEM;
+
+ for (pos = 0; pos < len; pos += 4)
+ regs[pos / 4] = readl(hba->res[id].base + offset + pos);
+
+ print_hex_dump(KERN_ERR, prefix,
+ len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
+ 16, 4, regs, len, false);
+
+ return 0;
+}
+
+static void ufs_qcom_dump_mcq_hci_regs(struct ufs_hba *hba)
+{
+ struct dump_info {
+ size_t offset;
+ size_t len;
+ const char *prefix;
+ enum ufshcd_res id;
+ };
+
+ struct dump_info mcq_dumps[] = {
+ {0x0, 256 * 4, "MCQ HCI-0 ", RES_MCQ},
+ {0x400, 256 * 4, "MCQ HCI-1 ", RES_MCQ},
+ {0x0, 5 * 4, "MCQ VS-0 ", RES_MCQ_VS},
+ {0x0, 256 * 4, "MCQ SQD-0 ", RES_MCQ_SQD},
+ {0x400, 256 * 4, "MCQ SQD-1 ", RES_MCQ_SQD},
+ {0x800, 256 * 4, "MCQ SQD-2 ", RES_MCQ_SQD},
+ {0xc00, 256 * 4, "MCQ SQD-3 ", RES_MCQ_SQD},
+ {0x1000, 256 * 4, "MCQ SQD-4 ", RES_MCQ_SQD},
+ {0x1400, 256 * 4, "MCQ SQD-5 ", RES_MCQ_SQD},
+ {0x1800, 256 * 4, "MCQ SQD-6 ", RES_MCQ_SQD},
+ {0x1c00, 256 * 4, "MCQ SQD-7 ", RES_MCQ_SQD},
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(mcq_dumps); i++) {
+ ufs_qcom_dump_regs(hba, mcq_dumps[i].offset, mcq_dumps[i].len,
+ mcq_dumps[i].prefix, mcq_dumps[i].id);
+ cond_resched();
+ }
+}
+
static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
{
u32 reg;
@@ -1616,6 +1776,15 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
host = ufshcd_get_variant(hba);
+ dev_err(hba->dev, "HW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_ENTER_CNT));
+ dev_err(hba->dev, "HW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_EXIT_CNT));
+
+ dev_err(hba->dev, "SW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_ENTER_CNT));
+ dev_err(hba->dev, "SW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_EXIT_CNT));
+
+ dev_err(hba->dev, "SW_AFTER_HW_H8_ENTER_CNT=%d\n",
+ ufshcd_readl(hba, REG_UFS_SW_AFTER_HW_H8_ENTER_CNT));
+
ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
"HCI Vendor Specific Registers ");
@@ -1658,6 +1827,23 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT ");
+
+ if (hba->mcq_enabled) {
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_RD_REG_MCQ);
+ ufshcd_dump_regs(hba, reg, 64 * 4, "HCI MCQ Debug Registers ");
+ }
+
+ /* ensure below dumps occur only in task context due to blocking calls. */
+ if (in_task()) {
+ /* Dump MCQ Host Vendor Specific Registers */
+ if (hba->mcq_enabled)
+ ufs_qcom_dump_mcq_hci_regs(hba);
+
+ /* voluntarily yield the CPU as we are dumping too much data */
+ ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS ");
+ cond_resched();
+ ufs_qcom_dump_testbus(hba);
+ }
}
/**
@@ -1849,25 +2035,38 @@ static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
ufshcd_mcq_config_esi(hba, msg);
}
+struct ufs_qcom_irq {
+ unsigned int irq;
+ unsigned int idx;
+ struct ufs_hba *hba;
+};
+
static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
{
- struct msi_desc *desc = data;
- struct device *dev = msi_desc_to_dev(desc);
- struct ufs_hba *hba = dev_get_drvdata(dev);
- u32 id = desc->msi_index;
- struct ufs_hw_queue *hwq = &hba->uhq[id];
+ struct ufs_qcom_irq *qi = data;
+ struct ufs_hba *hba = qi->hba;
+ struct ufs_hw_queue *hwq = &hba->uhq[qi->idx];
- ufshcd_mcq_write_cqis(hba, 0x1, id);
+ ufshcd_mcq_write_cqis(hba, 0x1, qi->idx);
ufshcd_mcq_poll_cqe_lock(hba, hwq);
return IRQ_HANDLED;
}
+static void ufs_qcom_irq_free(struct ufs_qcom_irq *uqi)
+{
+ for (struct ufs_qcom_irq *q = uqi; q->irq; q++)
+ devm_free_irq(q->hba->dev, q->irq, q->hba);
+
+ platform_device_msi_free_irqs_all(uqi->hba->dev);
+ devm_kfree(uqi->hba->dev, uqi);
+}
+
+DEFINE_FREE(ufs_qcom_irq, struct ufs_qcom_irq *, if (_T) ufs_qcom_irq_free(_T))
+
static int ufs_qcom_config_esi(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct msi_desc *desc;
- struct msi_desc *failed_desc = NULL;
int nr_irqs, ret;
if (host->esi_enabled)
@@ -1878,6 +2077,14 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
* 2. Poll queues do not need ESI.
*/
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
+
+ struct ufs_qcom_irq *qi __free(ufs_qcom_irq) =
+ devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL);
+ if (!qi)
+ return -ENOMEM;
+ /* Preset so __free() has a pointer to hba in all error paths */
+ qi[0].hba = hba;
+
ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
ufs_qcom_write_msi_msg);
if (ret) {
@@ -1885,48 +2092,80 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
return ret;
}
- msi_lock_descs(hba->dev);
- msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
- ret = devm_request_irq(hba->dev, desc->irq,
- ufs_qcom_mcq_esi_handler,
- IRQF_SHARED, "qcom-mcq-esi", desc);
+ for (int idx = 0; idx < nr_irqs; idx++) {
+ qi[idx].irq = msi_get_virq(hba->dev, idx);
+ qi[idx].idx = idx;
+ qi[idx].hba = hba;
+
+ ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler,
+ IRQF_SHARED, "qcom-mcq-esi", qi + idx);
if (ret) {
dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
- __func__, desc->irq, ret);
- failed_desc = desc;
- break;
+ __func__, qi[idx].irq, ret);
+ qi[idx].irq = 0;
+ return ret;
}
}
- msi_unlock_descs(hba->dev);
- if (ret) {
- /* Rewind */
- msi_lock_descs(hba->dev);
- msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
- if (desc == failed_desc)
- break;
- devm_free_irq(hba->dev, desc->irq, hba);
+ retain_and_null_ptr(qi);
+
+ if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
+ host->hw_ver.step == 0) {
+ ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
+ REG_UFS_CFG3);
+ }
+ ufshcd_mcq_enable_esi(hba);
+ host->esi_enabled = true;
+ return 0;
+}
+
+static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba,
+ unsigned long freq, char *name)
+{
+ struct ufs_clk_info *clki;
+ struct dev_pm_opp *opp;
+ unsigned long clk_freq;
+ int idx = 0;
+ bool found = false;
+
+ opp = dev_pm_opp_find_freq_exact_indexed(hba->dev, freq, 0, true);
+ if (IS_ERR(opp)) {
+ dev_err(hba->dev, "Failed to find OPP for exact frequency %lu\n", freq);
+ return 0;
+ }
+
+ list_for_each_entry(clki, &hba->clk_list_head, list) {
+ if (!strcmp(clki->name, name)) {
+ found = true;
+ break;
}
- msi_unlock_descs(hba->dev);
- platform_device_msi_free_irqs_all(hba->dev);
- } else {
- if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
- host->hw_ver.step == 0)
- ufshcd_rmwl(hba, ESI_VEC_MASK,
- FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
- REG_UFS_CFG3);
- ufshcd_mcq_enable_esi(hba);
- host->esi_enabled = true;
+
+ idx++;
}
- return ret;
+ if (!found) {
+ dev_err(hba->dev, "Failed to find clock '%s' in clk list\n", name);
+ dev_pm_opp_put(opp);
+ return 0;
+ }
+
+ clk_freq = dev_pm_opp_get_freq_indexed(opp, idx);
+
+ dev_pm_opp_put(opp);
+
+ return clk_freq;
}
static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
{
- u32 gear = 0;
+ u32 gear = UFS_HS_DONT_CHANGE;
+ unsigned long unipro_freq;
- switch (freq) {
+ if (!hba->use_pm_opp)
+ return gear;
+
+ unipro_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro");
+ switch (unipro_freq) {
case 403000000:
gear = UFS_HS_G5;
break;
@@ -1946,10 +2185,10 @@ static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
break;
default:
dev_err(hba->dev, "%s: Unsupported clock freq : %lu\n", __func__, freq);
- break;
+ return UFS_HS_DONT_CHANGE;
}
- return gear;
+ return min_t(u32, gear, hba->max_pwr_info.info.gear_rx);
}
/*
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 05d4cb569c50..0a5cfc2dd4f7 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -50,6 +50,8 @@ enum {
*/
UFS_AH8_CFG = 0xFC,
+ UFS_RD_REG_MCQ = 0xD00,
+
REG_UFS_MEM_ICE_CONFIG = 0x260C,
REG_UFS_MEM_ICE_NUM_CORE = 0x2664,
@@ -75,6 +77,15 @@ enum {
UFS_UFS_DBG_RD_EDTL_RAM = 0x1900,
};
+/* QCOM UFS HC vendor specific Hibern8 count registers */
+enum {
+ REG_UFS_HW_H8_ENTER_CNT = 0x2700,
+ REG_UFS_SW_H8_ENTER_CNT = 0x2704,
+ REG_UFS_SW_AFTER_HW_H8_ENTER_CNT = 0x2708,
+ REG_UFS_HW_H8_EXIT_CNT = 0x270C,
+ REG_UFS_SW_H8_EXIT_CNT = 0x2710,
+};
+
enum {
UFS_MEM_CQIS_VS = 0x8,
};
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 69c1df0f4ca5..f19efad4d6f8 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -65,6 +65,16 @@ struct hv_uio_private_data {
char send_name[32];
};
+static void set_event(struct vmbus_channel *channel, s32 irq_state)
+{
+ channel->inbound.ring_buffer->interrupt_mask = !irq_state;
+ if (!channel->offermsg.monitor_allocated && irq_state) {
+ /* MB is needed for host to see the interrupt mask first */
+ virt_mb();
+ vmbus_set_event(channel);
+ }
+}
+
/*
* This is the irqcontrol callback to be registered to uio_info.
* It can be used to disable/enable interrupt from user space processes.
@@ -79,12 +89,15 @@ hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
{
struct hv_uio_private_data *pdata = info->priv;
struct hv_device *dev = pdata->device;
+ struct vmbus_channel *primary, *sc;
- dev->channel->inbound.ring_buffer->interrupt_mask = !irq_state;
- virt_mb();
+ primary = dev->channel;
+ set_event(primary, irq_state);
- if (!dev->channel->offermsg.monitor_allocated && irq_state)
- vmbus_setevent(dev->channel);
+ mutex_lock(&vmbus_connection.channel_mutex);
+ list_for_each_entry(sc, &primary->sc_list, sc_list)
+ set_event(sc, irq_state);
+ mutex_unlock(&vmbus_connection.channel_mutex);
return 0;
}
@@ -95,12 +108,19 @@ hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
static void hv_uio_channel_cb(void *context)
{
struct vmbus_channel *chan = context;
- struct hv_device *hv_dev = chan->device_obj;
- struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
+ struct hv_device *hv_dev;
+ struct hv_uio_private_data *pdata;
chan->inbound.ring_buffer->interrupt_mask = 1;
virt_mb();
+ /*
+ * The callback may come from a subchannel, in which case look
+ * for the hv device in the primary channel
+ */
+ hv_dev = chan->primary_channel ?
+ chan->primary_channel->device_obj : chan->device_obj;
+ pdata = hv_get_drvdata(hv_dev);
uio_event_notify(&pdata->info);
}
@@ -243,6 +263,9 @@ hv_uio_probe(struct hv_device *dev,
if (!ring_size)
ring_size = SZ_2M;
+ /* Adjust ring size if necessary to have it page aligned */
+ ring_size = VMBUS_RING_SIZE(ring_size);
+
pdata = devm_kzalloc(&dev->device, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@@ -274,13 +297,13 @@ hv_uio_probe(struct hv_device *dev,
pdata->info.mem[INT_PAGE_MAP].name = "int_page";
pdata->info.mem[INT_PAGE_MAP].addr
= (uintptr_t)vmbus_connection.int_page;
- pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE;
+ pdata->info.mem[INT_PAGE_MAP].size = HV_HYP_PAGE_SIZE;
pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
pdata->info.mem[MON_PAGE_MAP].name = "monitor_page";
pdata->info.mem[MON_PAGE_MAP].addr
= (uintptr_t)vmbus_connection.monitor_pages[1];
- pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
+ pdata->info.mem[MON_PAGE_MAP].size = HV_HYP_PAGE_SIZE;
pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
if (channel->device_id == HV_NIC) {
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index c6b9ad12e8fe..a12ab90b3db7 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -582,7 +582,7 @@ struct cxacru_timer {
static void cxacru_timeout_kill(struct timer_list *t)
{
- struct cxacru_timer *timer = from_timer(timer, t, timer);
+ struct cxacru_timer *timer = timer_container_of(timer, t, timer);
usb_unlink_urb(timer->urb);
}
@@ -598,7 +598,7 @@ static int cxacru_start_wait_urb(struct urb *urb, struct completion *done,
mod_timer(&timer.timer, jiffies + msecs_to_jiffies(CMD_TIMEOUT));
wait_for_completion(done);
timer_delete_sync(&timer.timer);
- destroy_timer_on_stack(&timer.timer);
+ timer_destroy_on_stack(&timer.timer);
if (actual_length)
*actual_length = urb->actual_length;
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 27e3d35ee7dd..773ac2725532 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -559,8 +559,9 @@ static void speedtch_check_status(struct work_struct *work)
static void speedtch_status_poll(struct timer_list *t)
{
- struct speedtch_instance_data *instance = from_timer(instance, t,
- status_check_timer);
+ struct speedtch_instance_data *instance = timer_container_of(instance,
+ t,
+ status_check_timer);
schedule_work(&instance->status_check_work);
@@ -573,8 +574,9 @@ static void speedtch_status_poll(struct timer_list *t)
static void speedtch_resubmit_int(struct timer_list *t)
{
- struct speedtch_instance_data *instance = from_timer(instance, t,
- resubmit_timer);
+ struct speedtch_instance_data *instance = timer_container_of(instance,
+ t,
+ resubmit_timer);
struct urb *int_urb = instance->int_urb;
int ret;
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index a6a05e85ef8c..5f3ad9a99d9e 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -993,7 +993,7 @@ static int usbatm_heavy_init(struct usbatm_data *instance)
static void usbatm_tasklet_schedule(struct timer_list *t)
{
- struct usbatm_channel *channel = from_timer(channel, t, delay);
+ struct usbatm_channel *channel = timer_container_of(channel, t, delay);
tasklet_schedule(&channel->tasklet);
}
diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c
index 59ec505e198a..735df88774e4 100644
--- a/drivers/usb/cdns3/cdns3-plat.c
+++ b/drivers/usb/cdns3/cdns3-plat.c
@@ -179,8 +179,6 @@ err_phy3_init:
/**
* cdns3_plat_remove() - unbind drd driver and clean up
* @pdev: Pointer to Linux platform device
- *
- * Returns 0 on success otherwise negative errno
*/
static void cdns3_plat_remove(struct platform_device *pdev)
{
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index 4824a10df07e..55f95f41b3b4 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -29,7 +29,8 @@
unsigned int cdnsp_port_speed(unsigned int port_status)
{
/*Detect gadget speed based on PORTSC register*/
- if (DEV_SUPERSPEEDPLUS(port_status))
+ if (DEV_SUPERSPEEDPLUS(port_status) ||
+ DEV_SSP_GEN1x2(port_status) || DEV_SSP_GEN2x2(port_status))
return USB_SPEED_SUPER_PLUS;
else if (DEV_SUPERSPEED(port_status))
return USB_SPEED_SUPER;
@@ -547,6 +548,7 @@ int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev)
dma_addr_t cmd_deq_dma;
union cdnsp_trb *event;
u32 cycle_state;
+ u32 retry = 10;
int ret, val;
u64 cmd_dma;
u32 flags;
@@ -578,8 +580,23 @@ int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev)
flags = le32_to_cpu(event->event_cmd.flags);
/* Check the owner of the TRB. */
- if ((flags & TRB_CYCLE) != cycle_state)
+ if ((flags & TRB_CYCLE) != cycle_state) {
+ /*
+ * Give some extra time to get chance controller
+ * to finish command before returning error code.
+ * Checking CMD_RING_BUSY is not sufficient because
+ * this bit is cleared to '0' when the Command
+ * Descriptor has been executed by controller
+ * and not when command completion event has
+ * be added to event ring.
+ */
+ if (retry--) {
+ udelay(20);
+ continue;
+ }
+
return -EINVAL;
+ }
cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb);
diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
index 12534be52f39..2afa3e558f85 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.h
+++ b/drivers/usb/cdns3/cdnsp-gadget.h
@@ -285,11 +285,15 @@ struct cdnsp_port_regs {
#define XDEV_HS (0x3 << 10)
#define XDEV_SS (0x4 << 10)
#define XDEV_SSP (0x5 << 10)
+#define XDEV_SSP1x2 (0x6 << 10)
+#define XDEV_SSP2x2 (0x7 << 10)
#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0 << 10))
#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP)
+#define DEV_SSP_GEN1x2(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP1x2)
+#define DEV_SSP_GEN2x2(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP2x2)
#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS)
#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f)
/* Port Link State Write Strobe - set this when changing link state */
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 4f8bfd242b59..780f4d151345 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -98,6 +99,7 @@ struct ci_hdrc_imx_data {
struct clk *clk;
struct clk *clk_wakeup;
struct imx_usbmisc_data *usbmisc_data;
+ int wakeup_irq;
bool supports_runtime_pm;
bool override_phy_control;
bool in_lpm;
@@ -336,6 +338,16 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event)
return ret;
}
+static irqreturn_t ci_wakeup_irq_handler(int irq, void *data)
+{
+ struct ci_hdrc_imx_data *imx_data = data;
+
+ disable_irq_nosync(irq);
+ pm_runtime_resume(&imx_data->ci_pdev->dev);
+
+ return IRQ_HANDLED;
+}
+
static void ci_hdrc_imx_disable_regulator(void *arg)
{
struct ci_hdrc_imx_data *data = arg;
@@ -494,6 +506,16 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
if (pdata.flags & CI_HDRC_SUPPORTS_RUNTIME_PM)
data->supports_runtime_pm = true;
+ data->wakeup_irq = platform_get_irq_optional(pdev, 1);
+ if (data->wakeup_irq > 0) {
+ ret = devm_request_threaded_irq(dev, data->wakeup_irq,
+ NULL, ci_wakeup_irq_handler,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ pdata.name, data);
+ if (ret)
+ goto err_clk;
+ }
+
ret = imx_usbmisc_init(data->usbmisc_data);
if (ret) {
dev_err(dev, "usbmisc init failed, ret=%d\n", ret);
@@ -602,6 +624,10 @@ static int imx_controller_suspend(struct device *dev,
}
imx_disable_unprepare_clks(dev);
+
+ if (data->wakeup_irq > 0)
+ enable_irq(data->wakeup_irq);
+
if (data->plat_data->flags & CI_HDRC_PMQOS)
cpu_latency_qos_remove_request(&data->pm_qos_req);
@@ -626,6 +652,10 @@ static int imx_controller_resume(struct device *dev,
if (data->plat_data->flags & CI_HDRC_PMQOS)
cpu_latency_qos_add_request(&data->pm_qos_req, 0);
+ if (data->wakeup_irq > 0 &&
+ !irqd_irq_disabled(irq_get_irq_data(data->wakeup_irq)))
+ disable_irq_nosync(data->wakeup_irq);
+
ret = imx_prepare_enable_clks(dev);
if (ret)
return ret;
@@ -661,6 +691,10 @@ static int ci_hdrc_imx_suspend(struct device *dev)
return ret;
pinctrl_pm_select_sleep_state(dev);
+
+ if (data->wakeup_irq > 0 && device_may_wakeup(dev))
+ enable_irq_wake(data->wakeup_irq);
+
return ret;
}
@@ -669,6 +703,9 @@ static int ci_hdrc_imx_resume(struct device *dev)
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret;
+ if (data->wakeup_irq > 0 && device_may_wakeup(dev))
+ disable_irq_wake(data->wakeup_irq);
+
pinctrl_pm_select_default_state(dev);
ret = imx_controller_resume(dev, PMSG_RESUME);
if (!ret && data->supports_runtime_pm) {
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 6243d8005f5d..118b9a68496b 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -139,6 +139,22 @@
#define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \
MX6_BM_ID_WAKEUP | MX6SX_BM_DPDM_WAKEUP_EN)
+/*
+ * HSIO Block Control Register
+ */
+
+#define BLKCTL_USB_WAKEUP_CTRL 0x0
+#define BLKCTL_OTG_WAKE_ENABLE BIT(31)
+#define BLKCTL_OTG_VBUS_SESSVALID BIT(4)
+#define BLKCTL_OTG_ID_WAKEUP_EN BIT(2)
+#define BLKCTL_OTG_VBUS_WAKEUP_EN BIT(1)
+#define BLKCTL_OTG_DPDM_WAKEUP_EN BIT(0)
+
+#define BLKCTL_WAKEUP_SOURCE (BLKCTL_OTG_WAKE_ENABLE | \
+ BLKCTL_OTG_ID_WAKEUP_EN | \
+ BLKCTL_OTG_VBUS_WAKEUP_EN | \
+ BLKCTL_OTG_DPDM_WAKEUP_EN)
+
struct usbmisc_ops {
/* It's called once when probe a usb device */
int (*init)(struct imx_usbmisc_data *data);
@@ -159,6 +175,7 @@ struct usbmisc_ops {
struct imx_usbmisc {
void __iomem *base;
+ void __iomem *blkctl;
spinlock_t lock;
const struct usbmisc_ops *ops;
};
@@ -1016,6 +1033,44 @@ static int usbmisc_imx6sx_power_lost_check(struct imx_usbmisc_data *data)
return 0;
}
+static u32 usbmisc_blkctl_wakeup_setting(struct imx_usbmisc_data *data)
+{
+ u32 wakeup_setting = BLKCTL_WAKEUP_SOURCE;
+
+ if (data->ext_id || data->available_role != USB_DR_MODE_OTG)
+ wakeup_setting &= ~BLKCTL_OTG_ID_WAKEUP_EN;
+
+ if (data->ext_vbus || data->available_role == USB_DR_MODE_HOST)
+ wakeup_setting &= ~BLKCTL_OTG_VBUS_WAKEUP_EN;
+
+ /* Select session valid as VBUS wakeup source */
+ wakeup_setting |= BLKCTL_OTG_VBUS_SESSVALID;
+
+ return wakeup_setting;
+}
+
+static int usbmisc_imx95_set_wakeup(struct imx_usbmisc_data *data, bool enabled)
+{
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+ unsigned long flags;
+ u32 val;
+
+ if (!usbmisc->blkctl)
+ return 0;
+
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val = readl(usbmisc->blkctl + BLKCTL_USB_WAKEUP_CTRL);
+ val &= ~BLKCTL_WAKEUP_SOURCE;
+
+ if (enabled)
+ val |= usbmisc_blkctl_wakeup_setting(data);
+
+ writel(val, usbmisc->blkctl + BLKCTL_USB_WAKEUP_CTRL);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ return 0;
+}
+
static const struct usbmisc_ops imx25_usbmisc_ops = {
.init = usbmisc_imx25_init,
.post = usbmisc_imx25_post,
@@ -1068,6 +1123,14 @@ static const struct usbmisc_ops imx7ulp_usbmisc_ops = {
.power_lost_check = usbmisc_imx7d_power_lost_check,
};
+static const struct usbmisc_ops imx95_usbmisc_ops = {
+ .init = usbmisc_imx7d_init,
+ .set_wakeup = usbmisc_imx95_set_wakeup,
+ .charger_detection = imx7d_charger_detection,
+ .power_lost_check = usbmisc_imx7d_power_lost_check,
+ .vbus_comparator_on = usbmisc_imx7d_vbus_comparator_on,
+};
+
static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
@@ -1289,6 +1352,10 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = {
.compatible = "fsl,imx8ulp-usbmisc",
.data = &imx7ulp_usbmisc_ops,
},
+ {
+ .compatible = "fsl,imx95-usbmisc",
+ .data = &imx95_usbmisc_ops,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, usbmisc_imx_dt_ids);
@@ -1296,6 +1363,7 @@ MODULE_DEVICE_TABLE(of, usbmisc_imx_dt_ids);
static int usbmisc_imx_probe(struct platform_device *pdev)
{
struct imx_usbmisc *data;
+ struct resource *res;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -1307,6 +1375,15 @@ static int usbmisc_imx_probe(struct platform_device *pdev)
if (IS_ERR(data->base))
return PTR_ERR(data->base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ data->blkctl = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->blkctl))
+ return PTR_ERR(data->blkctl);
+ } else if (device_is_compatible(&pdev->dev, "fsl,imx95-usbmisc")) {
+ dev_warn(&pdev->dev, "wakeup setting is missing\n");
+ }
+
data->ops = of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, data);
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 16e7fa4d488d..ecd6d1f39e49 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -92,7 +92,6 @@ struct wdm_device {
u16 wMaxCommand;
u16 wMaxPacketSize;
__le16 inum;
- int reslength;
int length;
int read;
int count;
@@ -214,6 +213,11 @@ static void wdm_in_callback(struct urb *urb)
if (desc->rerr == 0 && status != -EPIPE)
desc->rerr = status;
+ if (length == 0) {
+ dev_dbg(&desc->intf->dev, "received ZLP\n");
+ goto skip_zlp;
+ }
+
if (length + desc->length > desc->wMaxCommand) {
/* The buffer would overflow */
set_bit(WDM_OVERFLOW, &desc->flags);
@@ -222,18 +226,18 @@ static void wdm_in_callback(struct urb *urb)
if (!test_bit(WDM_OVERFLOW, &desc->flags)) {
memmove(desc->ubuf + desc->length, desc->inbuf, length);
desc->length += length;
- desc->reslength = length;
}
}
skip_error:
if (desc->rerr) {
/*
- * Since there was an error, userspace may decide to not read
- * any data after poll'ing.
+ * If there was a ZLP or an error, userspace may decide to not
+ * read any data after poll'ing.
* We should respond to further attempts from the device to send
* data, so that we can get unstuck.
*/
+skip_zlp:
schedule_work(&desc->service_outs_intr);
} else {
set_bit(WDM_READ, &desc->flags);
@@ -585,15 +589,6 @@ retry:
goto retry;
}
- if (!desc->reslength) { /* zero length read */
- dev_dbg(&desc->intf->dev, "zero length - clearing WDM_READ\n");
- clear_bit(WDM_READ, &desc->flags);
- rv = service_outstanding_interrupt(desc);
- spin_unlock_irq(&desc->iuspin);
- if (rv < 0)
- goto err;
- goto retry;
- }
cntr = desc->length;
spin_unlock_irq(&desc->iuspin);
}
@@ -1016,7 +1011,7 @@ static void service_interrupt_work(struct work_struct *work)
spin_lock_irq(&desc->iuspin);
service_outstanding_interrupt(desc);
- if (!desc->resp_count) {
+ if (!desc->resp_count && (desc->length || desc->rerr)) {
set_bit(WDM_READ, &desc->flags);
wake_up(&desc->wait);
}
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 740d2d2b19fb..75de29725a45 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -483,6 +483,7 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
u8 tag;
int rv;
long wait_rv;
+ unsigned long expire;
dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n",
data->iin_ep_present);
@@ -512,10 +513,11 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
}
if (data->iin_ep_present) {
+ expire = msecs_to_jiffies(file_data->timeout);
wait_rv = wait_event_interruptible_timeout(
data->waitq,
atomic_read(&data->iin_data_valid) != 0,
- file_data->timeout);
+ expire);
if (wait_rv < 0) {
dev_dbg(dev, "wait interrupted %ld\n", wait_rv);
rv = wait_rv;
@@ -563,14 +565,15 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
rv = usbtmc_get_stb(file_data, &stb);
- if (rv > 0) {
- srq_asserted = atomic_xchg(&file_data->srq_asserted,
- srq_asserted);
- if (srq_asserted)
- stb |= 0x40; /* Set RQS bit */
+ if (rv < 0)
+ return rv;
+
+ srq_asserted = atomic_xchg(&file_data->srq_asserted, srq_asserted);
+ if (srq_asserted)
+ stb |= 0x40; /* Set RQS bit */
+
+ rv = put_user(stb, (__u8 __user *)arg);
- rv = put_user(stb, (__u8 __user *)arg);
- }
return rv;
}
@@ -2199,7 +2202,7 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case USBTMC_IOCTL_GET_STB:
retval = usbtmc_get_stb(file_data, &tmp_byte);
- if (retval > 0)
+ if (!retval)
retval = put_user(tmp_byte, (__u8 __user *)arg);
break;
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
index 1e36be2a28fd..421c3af38e06 100644
--- a/drivers/usb/common/usb-conn-gpio.c
+++ b/drivers/usb/common/usb-conn-gpio.c
@@ -21,6 +21,9 @@
#include <linux/regulator/consumer.h>
#include <linux/string_choices.h>
#include <linux/usb/role.h>
+#include <linux/idr.h>
+
+static DEFINE_IDA(usb_conn_ida);
#define USB_GPIO_DEB_MS 20 /* ms */
#define USB_GPIO_DEB_US ((USB_GPIO_DEB_MS) * 1000) /* us */
@@ -30,6 +33,7 @@
struct usb_conn_info {
struct device *dev;
+ int conn_id; /* store the IDA-allocated ID */
struct usb_role_switch *role_sw;
enum usb_role last_role;
struct regulator *vbus;
@@ -161,7 +165,17 @@ static int usb_conn_psy_register(struct usb_conn_info *info)
.fwnode = dev_fwnode(dev),
};
- desc->name = "usb-charger";
+ info->conn_id = ida_alloc(&usb_conn_ida, GFP_KERNEL);
+ if (info->conn_id < 0)
+ return info->conn_id;
+
+ desc->name = devm_kasprintf(dev, GFP_KERNEL, "usb-charger-%d",
+ info->conn_id);
+ if (!desc->name) {
+ ida_free(&usb_conn_ida, info->conn_id);
+ return -ENOMEM;
+ }
+
desc->properties = usb_charger_properties;
desc->num_properties = ARRAY_SIZE(usb_charger_properties);
desc->get_property = usb_charger_get_property;
@@ -169,8 +183,10 @@ static int usb_conn_psy_register(struct usb_conn_info *info)
cfg.drv_data = info;
info->charger = devm_power_supply_register(dev, desc, &cfg);
- if (IS_ERR(info->charger))
- dev_err(dev, "Unable to register charger\n");
+ if (IS_ERR(info->charger)) {
+ dev_err(dev, "Unable to register charger %d\n", info->conn_id);
+ ida_free(&usb_conn_ida, info->conn_id);
+ }
return PTR_ERR_OR_ZERO(info->charger);
}
@@ -278,6 +294,9 @@ static void usb_conn_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&info->dw_det);
+ if (info->charger)
+ ida_free(&usb_conn_ida, info->conn_id);
+
if (info->last_role == USB_ROLE_HOST && info->vbus)
regulator_disable(info->vbus);
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 13bd4ec4ea5f..fc0cfd94cbab 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -307,7 +307,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
goto skip_to_next_endpoint_or_interface_descriptor;
}
- i = d->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+ i = usb_endpoint_num(d);
if (i == 0) {
dev_notice(ddev, "config %d interface %d altsetting %d has an "
"invalid descriptor for endpoint zero, skipping\n",
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index a63c793bac21..c22de97432a0 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -775,7 +775,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_poll_rh_status);
/* timer callback */
static void rh_timer_func (struct timer_list *t)
{
- struct usb_hcd *_hcd = from_timer(_hcd, t, rh_timer);
+ struct usb_hcd *_hcd = timer_container_of(_hcd, t, rh_timer);
usb_hcd_poll_rh_status(_hcd);
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0e1dd6ef60a7..770d1e91183c 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -697,7 +697,7 @@ static void hub_resubmit_irq_urb(struct usb_hub *hub)
static void hub_retry_irq_urb(struct timer_list *t)
{
- struct usb_hub *hub = from_timer(hub, t, irq_urb_retry);
+ struct usb_hub *hub = timer_container_of(hub, t, irq_urb_retry);
hub_resubmit_irq_urb(hub);
}
@@ -4160,7 +4160,7 @@ static int usb_set_device_initiated_lpm(struct usb_device *udev,
"for unconfigured device.\n",
__func__, str_enable_disable(enable),
usb3_lpm_names[state]);
- return 0;
+ return -EINVAL;
}
if (enable) {
@@ -4234,9 +4234,9 @@ static int usb_set_lpm_timeout(struct usb_device *udev,
}
/*
- * Don't allow device intiated U1/U2 if the system exit latency + one bus
- * interval is greater than the minimum service interval of any active
- * periodic endpoint. See USB 3.2 section 9.4.9
+ * Don't allow device intiated U1/U2 if device isn't in the configured state,
+ * or the system exit latency + one bus interval is greater than the minimum
+ * service interval of any active periodic endpoint. See USB 3.2 section 9.4.9
*/
static bool usb_device_may_initiate_lpm(struct usb_device *udev,
enum usb3_link_state state)
@@ -4244,7 +4244,7 @@ static bool usb_device_may_initiate_lpm(struct usb_device *udev,
unsigned int sel; /* us */
int i, j;
- if (!udev->lpm_devinit_allow)
+ if (!udev->lpm_devinit_allow || !udev->actconfig)
return false;
if (state == USB3_LPM_U1)
@@ -4292,7 +4292,7 @@ static bool usb_device_may_initiate_lpm(struct usb_device *udev,
* driver know about it. If that call fails, it should be harmless, and just
* take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency.
*/
-static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
+static int usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
enum usb3_link_state state)
{
int timeout;
@@ -4301,7 +4301,7 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
/* Skip if the device BOS descriptor couldn't be read */
if (!udev->bos)
- return;
+ return -EINVAL;
u1_mel = udev->bos->ss_cap->bU1devExitLat;
u2_mel = udev->bos->ss_cap->bU2DevExitLat;
@@ -4312,7 +4312,7 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
*/
if ((state == USB3_LPM_U1 && u1_mel == 0) ||
(state == USB3_LPM_U2 && u2_mel == 0))
- return;
+ return -EINVAL;
/* We allow the host controller to set the U1/U2 timeout internally
* first, so that it can change its schedule to account for the
@@ -4323,13 +4323,13 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
/* xHCI host controller doesn't want to enable this LPM state. */
if (timeout == 0)
- return;
+ return -EINVAL;
if (timeout < 0) {
dev_warn(&udev->dev, "Could not enable %s link state, "
"xHCI error %i.\n", usb3_lpm_names[state],
timeout);
- return;
+ return timeout;
}
if (usb_set_lpm_timeout(udev, state, timeout)) {
@@ -4338,29 +4338,15 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
* host know that this link state won't be enabled.
*/
hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
- return;
- }
-
- /* Only a configured device will accept the Set Feature
- * U1/U2_ENABLE
- */
- if (udev->actconfig &&
- usb_device_may_initiate_lpm(udev, state)) {
- if (usb_set_device_initiated_lpm(udev, state, true)) {
- /*
- * Request to enable device initiated U1/U2 failed,
- * better to turn off lpm in this case.
- */
- usb_set_lpm_timeout(udev, state, 0);
- hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
- return;
- }
+ return -EBUSY;
}
if (state == USB3_LPM_U1)
udev->usb3_lpm_u1_enabled = 1;
else if (state == USB3_LPM_U2)
udev->usb3_lpm_u2_enabled = 1;
+
+ return 0;
}
/*
* Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated
@@ -4393,8 +4379,6 @@ static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
if (usb_set_lpm_timeout(udev, state, 0))
return -EBUSY;
- usb_set_device_initiated_lpm(udev, state, false);
-
if (hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state))
dev_warn(&udev->dev, "Could not disable xHCI %s timeout, "
"bus schedule bandwidth may be impacted.\n",
@@ -4424,6 +4408,7 @@ static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
int usb_disable_lpm(struct usb_device *udev)
{
struct usb_hcd *hcd;
+ int err;
if (!udev || !udev->parent ||
udev->speed < USB_SPEED_SUPER ||
@@ -4441,14 +4426,19 @@ int usb_disable_lpm(struct usb_device *udev)
/* If LPM is enabled, attempt to disable it. */
if (usb_disable_link_state(hcd, udev, USB3_LPM_U1))
- goto enable_lpm;
+ goto disable_failed;
if (usb_disable_link_state(hcd, udev, USB3_LPM_U2))
- goto enable_lpm;
+ goto disable_failed;
+
+ err = usb_set_device_initiated_lpm(udev, USB3_LPM_U1, false);
+ if (!err)
+ usb_set_device_initiated_lpm(udev, USB3_LPM_U2, false);
return 0;
-enable_lpm:
- usb_enable_lpm(udev);
+disable_failed:
+ udev->lpm_disable_count--;
+
return -EBUSY;
}
EXPORT_SYMBOL_GPL(usb_disable_lpm);
@@ -4509,10 +4499,24 @@ void usb_enable_lpm(struct usb_device *udev)
port_dev = hub->ports[udev->portnum - 1];
if (port_dev->usb3_lpm_u1_permit)
- usb_enable_link_state(hcd, udev, USB3_LPM_U1);
+ if (usb_enable_link_state(hcd, udev, USB3_LPM_U1))
+ return;
if (port_dev->usb3_lpm_u2_permit)
- usb_enable_link_state(hcd, udev, USB3_LPM_U2);
+ if (usb_enable_link_state(hcd, udev, USB3_LPM_U2))
+ return;
+
+ /*
+ * Enable device initiated U1/U2 with a SetFeature(U1/U2_ENABLE) request
+ * if system exit latency is short enough and device is configured
+ */
+ if (usb_device_may_initiate_lpm(udev, USB3_LPM_U1)) {
+ if (usb_set_device_initiated_lpm(udev, USB3_LPM_U1, true))
+ return;
+
+ if (usb_device_may_initiate_lpm(udev, USB3_LPM_U2))
+ usb_set_device_initiated_lpm(udev, USB3_LPM_U2, true);
+ }
}
EXPORT_SYMBOL_GPL(usb_enable_lpm);
@@ -6133,6 +6137,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
struct usb_hub *parent_hub;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct usb_device_descriptor descriptor;
+ struct usb_interface *intf;
struct usb_host_bos *bos;
int i, j, ret = 0;
int port1 = udev->portnum;
@@ -6190,6 +6195,18 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
if (!udev->actconfig)
goto done;
+ /*
+ * Some devices can't handle setting default altsetting 0 with a
+ * Set-Interface request. Disable host-side endpoints of those
+ * interfaces here. Enable and reset them back after host has set
+ * its internal endpoint structures during usb_hcd_alloc_bandwith()
+ */
+ for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
+ intf = udev->actconfig->interface[i];
+ if (intf->cur_altsetting->desc.bAlternateSetting == 0)
+ usb_disable_interface(udev, intf, true);
+ }
+
mutex_lock(hcd->bandwidth_mutex);
ret = usb_hcd_alloc_bandwidth(udev, udev->actconfig, NULL, NULL);
if (ret < 0) {
@@ -6221,12 +6238,11 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
*/
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
struct usb_host_config *config = udev->actconfig;
- struct usb_interface *intf = config->interface[i];
struct usb_interface_descriptor *desc;
+ intf = config->interface[i];
desc = &intf->cur_altsetting->desc;
if (desc->bAlternateSetting == 0) {
- usb_disable_interface(udev, intf, true);
usb_enable_interface(udev, intf, true);
ret = 0;
} else {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 36d3df7d040c..53d68d20fb62 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -372,6 +372,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* SanDisk Corp. SanDisk 3.2Gen1 */
{ USB_DEVICE(0x0781, 0x55a3), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* SanDisk Extreme 55AE */
+ { USB_DEVICE(0x0781, 0x55ae), .driver_info = USB_QUIRK_NO_LPM },
+
/* Realforce 87U Keyboard */
{ USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 935c0efea0b6..ea1ce8beb0cb 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -165,6 +165,8 @@ static int usb_acpi_add_usb4_devlink(struct usb_device *udev)
return 0;
hub = usb_hub_to_struct_hub(udev->parent);
+ if (!hub)
+ return 0;
port_dev = hub->ports[udev->portnum - 1];
struct fwnode_handle *nhi_fwnode __free(fwnode_handle) =
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 0b4685aad2d5..118fa4c93a79 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -695,15 +695,16 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
device_set_of_node_from_dev(&dev->dev, bus->sysdev);
dev_set_name(&dev->dev, "usb%d", bus->busnum);
} else {
+ int n;
+
/* match any labeling on the hubs; it's one-based */
if (parent->devpath[0] == '0') {
- snprintf(dev->devpath, sizeof dev->devpath,
- "%d", port1);
+ n = snprintf(dev->devpath, sizeof(dev->devpath), "%d", port1);
/* Root ports are not counted in route string */
dev->route = 0;
} else {
- snprintf(dev->devpath, sizeof dev->devpath,
- "%s.%d", parent->devpath, port1);
+ n = snprintf(dev->devpath, sizeof(dev->devpath), "%s.%d",
+ parent->devpath, port1);
/* Route string assumes hubs have less than 16 ports */
if (port1 < 15)
dev->route = parent->route +
@@ -712,6 +713,11 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
dev->route = parent->route +
(15 << ((parent->level - 1)*4));
}
+ if (n >= sizeof(dev->devpath)) {
+ usb_put_hcd(bus_to_hcd(bus));
+ usb_put_dev(dev);
+ return NULL;
+ }
dev->dev.parent = &parent->dev;
dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 300ea4969f0c..d5b622f78cf3 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -4049,7 +4049,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
return -EINVAL;
}
- ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ ep_type = usb_endpoint_type(desc);
mps = usb_endpoint_maxp(desc);
mc = usb_endpoint_maxp_mult(desc);
@@ -4604,6 +4604,12 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
if (!hsotg)
return -ENODEV;
+ /* Exit clock gating when driver is stopped. */
+ if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
+ hsotg->bus_suspended && !hsotg->params.no_clock_gating) {
+ dwc2_gadget_exit_clock_gating(hsotg, 0);
+ }
+
/* all endpoints should be shutdown */
for (ep = 1; ep < hsotg->num_of_eps; ep++) {
if (hsotg->eps_in[ep])
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 60ef8092259a..30eb8506617c 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -3227,7 +3227,7 @@ host:
static void dwc2_wakeup_detected(struct timer_list *t)
{
- struct dwc2_hsotg *hsotg = from_timer(hsotg, t, wkp_timer);
+ struct dwc2_hsotg *hsotg = timer_container_of(hsotg, t, wkp_timer);
u32 hprt0;
dev_dbg(hsotg->dev, "%s()\n", __func__);
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index b0098792dd22..904fe0632b34 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -1215,7 +1215,7 @@ static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
*/
static void dwc2_unreserve_timer_fn(struct timer_list *t)
{
- struct dwc2_qh *qh = from_timer(qh, t, unreserve_timer);
+ struct dwc2_qh *qh = timer_container_of(qh, t, unreserve_timer);
struct dwc2_hsotg *hsotg = qh->hsotg;
unsigned long flags;
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 124eda2522d9..830e6c9e5fe0 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_USB_DWC3_MESON_G12A) += dwc3-meson-g12a.o
obj-$(CONFIG_USB_DWC3_OF_SIMPLE) += dwc3-of-simple.o
obj-$(CONFIG_USB_DWC3_ST) += dwc3-st.o
obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom.o
+obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom-legacy.o
obj-$(CONFIG_USB_DWC3_IMX8MP) += dwc3-imx8mp.o
obj-$(CONFIG_USB_DWC3_XILINX) += dwc3-xilinx.o
obj-$(CONFIG_USB_DWC3_OCTEON) += dwc3-octeon.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 66a08b527165..2bc775a747f2 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -26,6 +26,7 @@
#include <linux/of_graph.h>
#include <linux/acpi.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/devinfo.h>
#include <linux/reset.h>
#include <linux/bitfield.h>
@@ -36,6 +37,7 @@
#include "core.h"
#include "gadget.h"
+#include "glue.h"
#include "io.h"
#include "debug.h"
@@ -1699,6 +1701,7 @@ static void dwc3_get_properties(struct dwc3 *dwc)
u8 tx_thr_num_pkt_prd = 0;
u8 tx_max_burst_prd = 0;
u8 tx_fifo_resize_max_num;
+ u16 num_hc_interrupters;
/* default to highest possible threshold */
lpm_nyet_threshold = 0xf;
@@ -1719,6 +1722,9 @@ static void dwc3_get_properties(struct dwc3 *dwc)
*/
tx_fifo_resize_max_num = 6;
+ /* default to a single XHCI interrupter */
+ num_hc_interrupters = 1;
+
dwc->maximum_speed = usb_get_maximum_speed(dev);
dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev);
dwc->dr_mode = usb_get_dr_mode(dev);
@@ -1765,6 +1771,12 @@ static void dwc3_get_properties(struct dwc3 *dwc)
&tx_thr_num_pkt_prd);
device_property_read_u8(dev, "snps,tx-max-burst-prd",
&tx_max_burst_prd);
+ device_property_read_u16(dev, "num-hc-interrupters",
+ &num_hc_interrupters);
+ /* DWC3 core allowed to have a max of 8 interrupters */
+ if (num_hc_interrupters > 8)
+ num_hc_interrupters = 8;
+
dwc->do_fifo_resize = device_property_read_bool(dev,
"tx-fifo-resize");
if (dwc->do_fifo_resize)
@@ -1851,6 +1863,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
dwc->tx_max_burst_prd = tx_max_burst_prd;
dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num;
+
+ dwc->num_hc_interrupters = num_hc_interrupters;
}
/* check whether the core supports IMOD */
@@ -2148,27 +2162,16 @@ static struct power_supply *dwc3_get_usb_power_supply(struct dwc3 *dwc)
return usb_psy;
}
-static int dwc3_probe(struct platform_device *pdev)
+int dwc3_core_probe(const struct dwc3_probe_data *data)
{
- struct device *dev = &pdev->dev;
- struct resource *res, dwc_res;
+ struct dwc3 *dwc = data->dwc;
+ struct device *dev = dwc->dev;
+ struct resource dwc_res;
unsigned int hw_mode;
void __iomem *regs;
- struct dwc3 *dwc;
+ struct resource *res = data->res;
int ret;
- dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
- if (!dwc)
- return -ENOMEM;
-
- dwc->dev = dev;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "missing memory resource\n");
- return -ENODEV;
- }
-
dwc->xhci_resources[0].start = res->start;
dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
DWC3_XHCI_REGS_END;
@@ -2208,15 +2211,17 @@ static int dwc3_probe(struct platform_device *pdev)
if (IS_ERR(dwc->usb_psy))
return dev_err_probe(dev, PTR_ERR(dwc->usb_psy), "couldn't get usb power supply\n");
- dwc->reset = devm_reset_control_array_get_optional_shared(dev);
- if (IS_ERR(dwc->reset)) {
- ret = PTR_ERR(dwc->reset);
- goto err_put_psy;
- }
+ if (!data->ignore_clocks_and_resets) {
+ dwc->reset = devm_reset_control_array_get_optional_shared(dev);
+ if (IS_ERR(dwc->reset)) {
+ ret = PTR_ERR(dwc->reset);
+ goto err_put_psy;
+ }
- ret = dwc3_get_clocks(dwc);
- if (ret)
- goto err_put_psy;
+ ret = dwc3_get_clocks(dwc);
+ if (ret)
+ goto err_put_psy;
+ }
ret = reset_control_deassert(dwc->reset);
if (ret)
@@ -2232,7 +2237,7 @@ static int dwc3_probe(struct platform_device *pdev)
goto err_disable_clks;
}
- platform_set_drvdata(pdev, dwc);
+ dev_set_drvdata(dev, dwc);
dwc3_cache_hwparams(dwc);
if (!dwc->sysdev_is_parent &&
@@ -2327,12 +2332,35 @@ err_put_psy:
return ret;
}
+EXPORT_SYMBOL_GPL(dwc3_core_probe);
-static void dwc3_remove(struct platform_device *pdev)
+static int dwc3_probe(struct platform_device *pdev)
{
- struct dwc3 *dwc = platform_get_drvdata(pdev);
+ struct dwc3_probe_data probe_data = {};
+ struct resource *res;
+ struct dwc3 *dwc;
- pm_runtime_get_sync(&pdev->dev);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "missing memory resource\n");
+ return -ENODEV;
+ }
+
+ dwc = devm_kzalloc(&pdev->dev, sizeof(*dwc), GFP_KERNEL);
+ if (!dwc)
+ return -ENOMEM;
+
+ dwc->dev = &pdev->dev;
+
+ probe_data.dwc = dwc;
+ probe_data.res = res;
+
+ return dwc3_core_probe(&probe_data);
+}
+
+void dwc3_core_remove(struct dwc3 *dwc)
+{
+ pm_runtime_get_sync(dwc->dev);
dwc3_core_exit_mode(dwc);
dwc3_debugfs_exit(dwc);
@@ -2340,22 +2368,28 @@ static void dwc3_remove(struct platform_device *pdev)
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);
- pm_runtime_allow(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_dont_use_autosuspend(&pdev->dev);
- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(dwc->dev);
+ pm_runtime_disable(dwc->dev);
+ pm_runtime_dont_use_autosuspend(dwc->dev);
+ pm_runtime_put_noidle(dwc->dev);
/*
* HACK: Clear the driver data, which is currently accessed by parent
* glue drivers, before allowing the parent to suspend.
*/
- platform_set_drvdata(pdev, NULL);
- pm_runtime_set_suspended(&pdev->dev);
+ dev_set_drvdata(dwc->dev, NULL);
+ pm_runtime_set_suspended(dwc->dev);
dwc3_free_event_buffers(dwc);
if (dwc->usb_psy)
power_supply_put(dwc->usb_psy);
}
+EXPORT_SYMBOL_GPL(dwc3_core_remove);
+
+static void dwc3_remove(struct platform_device *pdev)
+{
+ dwc3_core_remove(platform_get_drvdata(pdev));
+}
#ifdef CONFIG_PM
static int dwc3_core_init_for_resume(struct dwc3 *dwc)
@@ -2544,9 +2578,8 @@ static int dwc3_runtime_checks(struct dwc3 *dwc)
return 0;
}
-static int dwc3_runtime_suspend(struct device *dev)
+int dwc3_runtime_suspend(struct dwc3 *dwc)
{
- struct dwc3 *dwc = dev_get_drvdata(dev);
int ret;
if (dwc3_runtime_checks(dwc))
@@ -2558,10 +2591,11 @@ static int dwc3_runtime_suspend(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(dwc3_runtime_suspend);
-static int dwc3_runtime_resume(struct device *dev)
+int dwc3_runtime_resume(struct dwc3 *dwc)
{
- struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct device *dev = dwc->dev;
int ret;
ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME);
@@ -2571,7 +2605,7 @@ static int dwc3_runtime_resume(struct device *dev)
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_DEVICE:
if (dwc->pending_events) {
- pm_runtime_put(dwc->dev);
+ pm_runtime_put(dev);
dwc->pending_events = false;
enable_irq(dwc->irq_gadget);
}
@@ -2586,10 +2620,11 @@ static int dwc3_runtime_resume(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(dwc3_runtime_resume);
-static int dwc3_runtime_idle(struct device *dev)
+int dwc3_runtime_idle(struct dwc3 *dwc)
{
- struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct device *dev = dwc->dev;
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_DEVICE:
@@ -2607,12 +2642,28 @@ static int dwc3_runtime_idle(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(dwc3_runtime_idle);
+
+static int dwc3_plat_runtime_suspend(struct device *dev)
+{
+ return dwc3_runtime_suspend(dev_get_drvdata(dev));
+}
+
+static int dwc3_plat_runtime_resume(struct device *dev)
+{
+ return dwc3_runtime_resume(dev_get_drvdata(dev));
+}
+
+static int dwc3_plat_runtime_idle(struct device *dev)
+{
+ return dwc3_runtime_idle(dev_get_drvdata(dev));
+}
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
-static int dwc3_suspend(struct device *dev)
+int dwc3_pm_suspend(struct dwc3 *dwc)
{
- struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct device *dev = dwc->dev;
int ret;
ret = dwc3_suspend_common(dwc, PMSG_SUSPEND);
@@ -2623,10 +2674,11 @@ static int dwc3_suspend(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(dwc3_pm_suspend);
-static int dwc3_resume(struct device *dev)
+int dwc3_pm_resume(struct dwc3 *dwc)
{
- struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct device *dev = dwc->dev;
int ret = 0;
pinctrl_pm_select_default_state(dev);
@@ -2645,10 +2697,10 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(dwc3_pm_resume);
-static void dwc3_complete(struct device *dev)
+void dwc3_pm_complete(struct dwc3 *dwc)
{
- struct dwc3 *dwc = dev_get_drvdata(dev);
u32 reg;
if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST &&
@@ -2658,21 +2710,60 @@ static void dwc3_complete(struct device *dev)
dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
}
}
+EXPORT_SYMBOL_GPL(dwc3_pm_complete);
+
+int dwc3_pm_prepare(struct dwc3 *dwc)
+{
+ struct device *dev = dwc->dev;
+
+ /*
+ * Indicate to the PM core that it may safely leave the device in
+ * runtime suspend if runtime-suspended already in device mode.
+ */
+ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE &&
+ pm_runtime_suspended(dev) &&
+ !dev_pinctrl(dev))
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dwc3_pm_prepare);
+
+static int dwc3_plat_suspend(struct device *dev)
+{
+ return dwc3_pm_suspend(dev_get_drvdata(dev));
+}
+
+static int dwc3_plat_resume(struct device *dev)
+{
+ return dwc3_pm_resume(dev_get_drvdata(dev));
+}
+
+static void dwc3_plat_complete(struct device *dev)
+{
+ dwc3_pm_complete(dev_get_drvdata(dev));
+}
+
+static int dwc3_plat_prepare(struct device *dev)
+{
+ return dwc3_pm_prepare(dev_get_drvdata(dev));
+}
#else
-#define dwc3_complete NULL
+#define dwc3_plat_complete NULL
+#define dwc3_plat_prepare NULL
#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops dwc3_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
- .complete = dwc3_complete,
-
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_plat_suspend, dwc3_plat_resume)
+ .complete = dwc3_plat_complete,
+ .prepare = dwc3_plat_prepare,
/*
* Runtime suspend halts the controller on disconnection. It relies on
* platforms with custom connection notification to start the controller
* again.
*/
- SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
- dwc3_runtime_idle)
+ SET_RUNTIME_PM_OPS(dwc3_plat_runtime_suspend, dwc3_plat_runtime_resume,
+ dwc3_plat_runtime_idle)
};
#ifdef CONFIG_OF
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 27eae4cf223d..d5b985fa12f4 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1083,6 +1083,7 @@ struct dwc3_scratchpad_array {
* @tx_max_burst_prd: max periodic ESS transmit burst size
* @tx_fifo_resize_max_num: max number of fifos allocated during txfifo resize
* @clear_stall_protocol: endpoint number that requires a delayed status phase
+ * @num_hc_interrupters: number of host controller interrupters
* @hsphy_interface: "utmi" or "ulpi"
* @connected: true when we're connected to a host, false otherwise
* @softconnect: true when gadget connect is called, false when disconnect runs
@@ -1333,6 +1334,7 @@ struct dwc3 {
u8 tx_max_burst_prd;
u8 tx_fifo_resize_max_num;
u8 clear_stall_protocol;
+ u16 num_hc_interrupters;
const char *hsphy_interface;
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index de686b9e6404..e934f94e8fd8 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -145,6 +145,12 @@ static void dwc3_exynos_remove(struct platform_device *pdev)
regulator_disable(exynos->vdd10);
}
+static const struct dwc3_exynos_driverdata exynos2200_drvdata = {
+ .clk_names = { "link_aclk" },
+ .num_clks = 1,
+ .suspend_clk_idx = -1,
+};
+
static const struct dwc3_exynos_driverdata exynos5250_drvdata = {
.clk_names = { "usbdrd30" },
.num_clks = 1,
@@ -181,8 +187,17 @@ static const struct dwc3_exynos_driverdata gs101_drvdata = {
.suspend_clk_idx = 1,
};
+static const struct dwc3_exynos_driverdata exynosautov920_drvdata = {
+ .clk_names = { "ref", "susp_clk"},
+ .num_clks = 2,
+ .suspend_clk_idx = 1,
+};
+
static const struct of_device_id exynos_dwc3_match[] = {
{
+ .compatible = "samsung,exynos2200-dwusb3",
+ .data = &exynos2200_drvdata,
+ }, {
.compatible = "samsung,exynos5250-dwusb3",
.data = &exynos5250_drvdata,
}, {
@@ -198,6 +213,9 @@ static const struct of_device_id exynos_dwc3_match[] = {
.compatible = "samsung,exynos850-dwusb3",
.data = &exynos850_drvdata,
}, {
+ .compatible = "samsung,exynosautov920-dwusb3",
+ .data = &exynosautov920_drvdata,
+ }, {
.compatible = "google,gs101-dwusb3",
.data = &gs101_drvdata,
}, {
diff --git a/drivers/usb/dwc3/dwc3-qcom-legacy.c b/drivers/usb/dwc3/dwc3-qcom-legacy.c
new file mode 100644
index 000000000000..d3fad0fcfdac
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-qcom-legacy.c
@@ -0,0 +1,935 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Inspired by dwc3-of-simple.c
+ */
+
+#include <linux/cleanup.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/of_clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/extcon.h>
+#include <linux/interconnect.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/usb/of.h>
+#include <linux/reset.h>
+#include <linux/iopoll.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb.h>
+#include "core.h"
+
+/* USB QSCRATCH Hardware registers */
+#define QSCRATCH_HS_PHY_CTRL 0x10
+#define UTMI_OTG_VBUS_VALID BIT(20)
+#define SW_SESSVLD_SEL BIT(28)
+
+#define QSCRATCH_SS_PHY_CTRL 0x30
+#define LANE0_PWR_PRESENT BIT(24)
+
+#define QSCRATCH_GENERAL_CFG 0x08
+#define PIPE_UTMI_CLK_SEL BIT(0)
+#define PIPE3_PHYSTATUS_SW BIT(3)
+#define PIPE_UTMI_CLK_DIS BIT(8)
+
+#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
+#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
+
+#define SDM845_QSCRATCH_BASE_OFFSET 0xf8800
+#define SDM845_QSCRATCH_SIZE 0x400
+#define SDM845_DWC3_CORE_SIZE 0xcd00
+
+/* Interconnect path bandwidths in MBps */
+#define USB_MEMORY_AVG_HS_BW MBps_to_icc(240)
+#define USB_MEMORY_PEAK_HS_BW MBps_to_icc(700)
+#define USB_MEMORY_AVG_SS_BW MBps_to_icc(1000)
+#define USB_MEMORY_PEAK_SS_BW MBps_to_icc(2500)
+#define APPS_USB_AVG_BW 0
+#define APPS_USB_PEAK_BW MBps_to_icc(40)
+
+/* Qualcomm SoCs with multiport support has up to 4 ports */
+#define DWC3_QCOM_MAX_PORTS 4
+
+static const u32 pwr_evnt_irq_stat_reg[DWC3_QCOM_MAX_PORTS] = {
+ 0x58,
+ 0x1dc,
+ 0x228,
+ 0x238,
+};
+
+struct dwc3_qcom_port {
+ int qusb2_phy_irq;
+ int dp_hs_phy_irq;
+ int dm_hs_phy_irq;
+ int ss_phy_irq;
+ enum usb_device_speed usb2_speed;
+};
+
+struct dwc3_qcom {
+ struct device *dev;
+ void __iomem *qscratch_base;
+ struct platform_device *dwc3;
+ struct clk **clks;
+ int num_clocks;
+ struct reset_control *resets;
+ struct dwc3_qcom_port ports[DWC3_QCOM_MAX_PORTS];
+ u8 num_ports;
+
+ struct extcon_dev *edev;
+ struct extcon_dev *host_edev;
+ struct notifier_block vbus_nb;
+ struct notifier_block host_nb;
+
+ enum usb_dr_mode mode;
+ bool is_suspended;
+ bool pm_suspended;
+ struct icc_path *icc_path_ddr;
+ struct icc_path *icc_path_apps;
+};
+
+static inline void dwc3_qcom_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static inline void dwc3_qcom_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static void dwc3_qcom_vbus_override_enable(struct dwc3_qcom *qcom, bool enable)
+{
+ if (enable) {
+ dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_SS_PHY_CTRL,
+ LANE0_PWR_PRESENT);
+ dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_HS_PHY_CTRL,
+ UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL);
+ } else {
+ dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_SS_PHY_CTRL,
+ LANE0_PWR_PRESENT);
+ dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_HS_PHY_CTRL,
+ UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL);
+ }
+}
+
+static int dwc3_qcom_vbus_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, vbus_nb);
+
+ /* enable vbus override for device mode */
+ dwc3_qcom_vbus_override_enable(qcom, event);
+ qcom->mode = event ? USB_DR_MODE_PERIPHERAL : USB_DR_MODE_HOST;
+
+ return NOTIFY_DONE;
+}
+
+static int dwc3_qcom_host_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, host_nb);
+
+ /* disable vbus override in host mode */
+ dwc3_qcom_vbus_override_enable(qcom, !event);
+ qcom->mode = event ? USB_DR_MODE_HOST : USB_DR_MODE_PERIPHERAL;
+
+ return NOTIFY_DONE;
+}
+
+static int dwc3_qcom_register_extcon(struct dwc3_qcom *qcom)
+{
+ struct device *dev = qcom->dev;
+ struct extcon_dev *host_edev;
+ int ret;
+
+ if (!of_property_present(dev->of_node, "extcon"))
+ return 0;
+
+ qcom->edev = extcon_get_edev_by_phandle(dev, 0);
+ if (IS_ERR(qcom->edev))
+ return dev_err_probe(dev, PTR_ERR(qcom->edev),
+ "Failed to get extcon\n");
+
+ qcom->vbus_nb.notifier_call = dwc3_qcom_vbus_notifier;
+
+ qcom->host_edev = extcon_get_edev_by_phandle(dev, 1);
+ if (IS_ERR(qcom->host_edev))
+ qcom->host_edev = NULL;
+
+ ret = devm_extcon_register_notifier(dev, qcom->edev, EXTCON_USB,
+ &qcom->vbus_nb);
+ if (ret < 0) {
+ dev_err(dev, "VBUS notifier register failed\n");
+ return ret;
+ }
+
+ if (qcom->host_edev)
+ host_edev = qcom->host_edev;
+ else
+ host_edev = qcom->edev;
+
+ qcom->host_nb.notifier_call = dwc3_qcom_host_notifier;
+ ret = devm_extcon_register_notifier(dev, host_edev, EXTCON_USB_HOST,
+ &qcom->host_nb);
+ if (ret < 0) {
+ dev_err(dev, "Host notifier register failed\n");
+ return ret;
+ }
+
+ /* Update initial VBUS override based on extcon state */
+ if (extcon_get_state(qcom->edev, EXTCON_USB) ||
+ !extcon_get_state(host_edev, EXTCON_USB_HOST))
+ dwc3_qcom_vbus_notifier(&qcom->vbus_nb, true, qcom->edev);
+ else
+ dwc3_qcom_vbus_notifier(&qcom->vbus_nb, false, qcom->edev);
+
+ return 0;
+}
+
+static int dwc3_qcom_interconnect_enable(struct dwc3_qcom *qcom)
+{
+ int ret;
+
+ ret = icc_enable(qcom->icc_path_ddr);
+ if (ret)
+ return ret;
+
+ ret = icc_enable(qcom->icc_path_apps);
+ if (ret)
+ icc_disable(qcom->icc_path_ddr);
+
+ return ret;
+}
+
+static int dwc3_qcom_interconnect_disable(struct dwc3_qcom *qcom)
+{
+ int ret;
+
+ ret = icc_disable(qcom->icc_path_ddr);
+ if (ret)
+ return ret;
+
+ ret = icc_disable(qcom->icc_path_apps);
+ if (ret)
+ icc_enable(qcom->icc_path_ddr);
+
+ return ret;
+}
+
+/**
+ * dwc3_qcom_interconnect_init() - Get interconnect path handles
+ * and set bandwidth.
+ * @qcom: Pointer to the concerned usb core.
+ *
+ */
+static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
+{
+ enum usb_device_speed max_speed;
+ struct device *dev = qcom->dev;
+ int ret;
+
+ qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr");
+ if (IS_ERR(qcom->icc_path_ddr)) {
+ return dev_err_probe(dev, PTR_ERR(qcom->icc_path_ddr),
+ "failed to get usb-ddr path\n");
+ }
+
+ qcom->icc_path_apps = of_icc_get(dev, "apps-usb");
+ if (IS_ERR(qcom->icc_path_apps)) {
+ ret = dev_err_probe(dev, PTR_ERR(qcom->icc_path_apps),
+ "failed to get apps-usb path\n");
+ goto put_path_ddr;
+ }
+
+ max_speed = usb_get_maximum_speed(&qcom->dwc3->dev);
+ if (max_speed >= USB_SPEED_SUPER || max_speed == USB_SPEED_UNKNOWN) {
+ ret = icc_set_bw(qcom->icc_path_ddr,
+ USB_MEMORY_AVG_SS_BW, USB_MEMORY_PEAK_SS_BW);
+ } else {
+ ret = icc_set_bw(qcom->icc_path_ddr,
+ USB_MEMORY_AVG_HS_BW, USB_MEMORY_PEAK_HS_BW);
+ }
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth for usb-ddr path: %d\n", ret);
+ goto put_path_apps;
+ }
+
+ ret = icc_set_bw(qcom->icc_path_apps, APPS_USB_AVG_BW, APPS_USB_PEAK_BW);
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth for apps-usb path: %d\n", ret);
+ goto put_path_apps;
+ }
+
+ return 0;
+
+put_path_apps:
+ icc_put(qcom->icc_path_apps);
+put_path_ddr:
+ icc_put(qcom->icc_path_ddr);
+ return ret;
+}
+
+/**
+ * dwc3_qcom_interconnect_exit() - Release interconnect path handles
+ * @qcom: Pointer to the concerned usb core.
+ *
+ * This function is used to release interconnect path handle.
+ */
+static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom)
+{
+ icc_put(qcom->icc_path_ddr);
+ icc_put(qcom->icc_path_apps);
+}
+
+/* Only usable in contexts where the role can not change. */
+static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom)
+{
+ struct dwc3 *dwc;
+
+ /*
+ * FIXME: Fix this layering violation.
+ */
+ dwc = platform_get_drvdata(qcom->dwc3);
+
+ /* Core driver may not have probed yet. */
+ if (!dwc)
+ return false;
+
+ return dwc->xhci;
+}
+
+static enum usb_device_speed dwc3_qcom_read_usb2_speed(struct dwc3_qcom *qcom, int port_index)
+{
+ struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
+ struct usb_device *udev;
+ struct usb_hcd __maybe_unused *hcd;
+
+ /*
+ * FIXME: Fix this layering violation.
+ */
+ hcd = platform_get_drvdata(dwc->xhci);
+
+#ifdef CONFIG_USB
+ udev = usb_hub_find_child(hcd->self.root_hub, port_index + 1);
+#else
+ udev = NULL;
+#endif
+ if (!udev)
+ return USB_SPEED_UNKNOWN;
+
+ return udev->speed;
+}
+
+static void dwc3_qcom_enable_wakeup_irq(int irq, unsigned int polarity)
+{
+ if (!irq)
+ return;
+
+ if (polarity)
+ irq_set_irq_type(irq, polarity);
+
+ enable_irq(irq);
+ enable_irq_wake(irq);
+}
+
+static void dwc3_qcom_disable_wakeup_irq(int irq)
+{
+ if (!irq)
+ return;
+
+ disable_irq_wake(irq);
+ disable_irq_nosync(irq);
+}
+
+static void dwc3_qcom_disable_port_interrupts(struct dwc3_qcom_port *port)
+{
+ dwc3_qcom_disable_wakeup_irq(port->qusb2_phy_irq);
+
+ if (port->usb2_speed == USB_SPEED_LOW) {
+ dwc3_qcom_disable_wakeup_irq(port->dm_hs_phy_irq);
+ } else if ((port->usb2_speed == USB_SPEED_HIGH) ||
+ (port->usb2_speed == USB_SPEED_FULL)) {
+ dwc3_qcom_disable_wakeup_irq(port->dp_hs_phy_irq);
+ } else {
+ dwc3_qcom_disable_wakeup_irq(port->dp_hs_phy_irq);
+ dwc3_qcom_disable_wakeup_irq(port->dm_hs_phy_irq);
+ }
+
+ dwc3_qcom_disable_wakeup_irq(port->ss_phy_irq);
+}
+
+static void dwc3_qcom_enable_port_interrupts(struct dwc3_qcom_port *port)
+{
+ dwc3_qcom_enable_wakeup_irq(port->qusb2_phy_irq, 0);
+
+ /*
+ * Configure DP/DM line interrupts based on the USB2 device attached to
+ * the root hub port. When HS/FS device is connected, configure the DP line
+ * as falling edge to detect both disconnect and remote wakeup scenarios. When
+ * LS device is connected, configure DM line as falling edge to detect both
+ * disconnect and remote wakeup. When no device is connected, configure both
+ * DP and DM lines as rising edge to detect HS/HS/LS device connect scenario.
+ */
+
+ if (port->usb2_speed == USB_SPEED_LOW) {
+ dwc3_qcom_enable_wakeup_irq(port->dm_hs_phy_irq,
+ IRQ_TYPE_EDGE_FALLING);
+ } else if ((port->usb2_speed == USB_SPEED_HIGH) ||
+ (port->usb2_speed == USB_SPEED_FULL)) {
+ dwc3_qcom_enable_wakeup_irq(port->dp_hs_phy_irq,
+ IRQ_TYPE_EDGE_FALLING);
+ } else {
+ dwc3_qcom_enable_wakeup_irq(port->dp_hs_phy_irq,
+ IRQ_TYPE_EDGE_RISING);
+ dwc3_qcom_enable_wakeup_irq(port->dm_hs_phy_irq,
+ IRQ_TYPE_EDGE_RISING);
+ }
+
+ dwc3_qcom_enable_wakeup_irq(port->ss_phy_irq, 0);
+}
+
+static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom)
+{
+ int i;
+
+ for (i = 0; i < qcom->num_ports; i++)
+ dwc3_qcom_disable_port_interrupts(&qcom->ports[i]);
+}
+
+static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom)
+{
+ int i;
+
+ for (i = 0; i < qcom->num_ports; i++)
+ dwc3_qcom_enable_port_interrupts(&qcom->ports[i]);
+}
+
+static int dwc3_qcom_suspend(struct dwc3_qcom *qcom, bool wakeup)
+{
+ u32 val;
+ int i, ret;
+
+ if (qcom->is_suspended)
+ return 0;
+
+ for (i = 0; i < qcom->num_ports; i++) {
+ val = readl(qcom->qscratch_base + pwr_evnt_irq_stat_reg[i]);
+ if (!(val & PWR_EVNT_LPM_IN_L2_MASK))
+ dev_err(qcom->dev, "port-%d HS-PHY not in L2\n", i + 1);
+ }
+
+ for (i = qcom->num_clocks - 1; i >= 0; i--)
+ clk_disable_unprepare(qcom->clks[i]);
+
+ ret = dwc3_qcom_interconnect_disable(qcom);
+ if (ret)
+ dev_warn(qcom->dev, "failed to disable interconnect: %d\n", ret);
+
+ /*
+ * The role is stable during suspend as role switching is done from a
+ * freezable workqueue.
+ */
+ if (dwc3_qcom_is_host(qcom) && wakeup) {
+ for (i = 0; i < qcom->num_ports; i++)
+ qcom->ports[i].usb2_speed = dwc3_qcom_read_usb2_speed(qcom, i);
+ dwc3_qcom_enable_interrupts(qcom);
+ }
+
+ qcom->is_suspended = true;
+
+ return 0;
+}
+
+static int dwc3_qcom_resume(struct dwc3_qcom *qcom, bool wakeup)
+{
+ int ret;
+ int i;
+
+ if (!qcom->is_suspended)
+ return 0;
+
+ if (dwc3_qcom_is_host(qcom) && wakeup)
+ dwc3_qcom_disable_interrupts(qcom);
+
+ for (i = 0; i < qcom->num_clocks; i++) {
+ ret = clk_prepare_enable(qcom->clks[i]);
+ if (ret < 0) {
+ while (--i >= 0)
+ clk_disable_unprepare(qcom->clks[i]);
+ return ret;
+ }
+ }
+
+ ret = dwc3_qcom_interconnect_enable(qcom);
+ if (ret)
+ dev_warn(qcom->dev, "failed to enable interconnect: %d\n", ret);
+
+ /* Clear existing events from PHY related to L2 in/out */
+ for (i = 0; i < qcom->num_ports; i++) {
+ dwc3_qcom_setbits(qcom->qscratch_base,
+ pwr_evnt_irq_stat_reg[i],
+ PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
+ }
+
+ qcom->is_suspended = false;
+
+ return 0;
+}
+
+static irqreturn_t qcom_dwc3_resume_irq(int irq, void *data)
+{
+ struct dwc3_qcom *qcom = data;
+ struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
+
+ /* If pm_suspended then let pm_resume take care of resuming h/w */
+ if (qcom->pm_suspended)
+ return IRQ_HANDLED;
+
+ /*
+ * This is safe as role switching is done from a freezable workqueue
+ * and the wakeup interrupts are disabled as part of resume.
+ */
+ if (dwc3_qcom_is_host(qcom))
+ pm_runtime_resume(&dwc->xhci->dev);
+
+ return IRQ_HANDLED;
+}
+
+static void dwc3_qcom_select_utmi_clk(struct dwc3_qcom *qcom)
+{
+ /* Configure dwc3 to use UTMI clock as PIPE clock not present */
+ dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG,
+ PIPE_UTMI_CLK_DIS);
+
+ usleep_range(100, 1000);
+
+ dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG,
+ PIPE_UTMI_CLK_SEL | PIPE3_PHYSTATUS_SW);
+
+ usleep_range(100, 1000);
+
+ dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG,
+ PIPE_UTMI_CLK_DIS);
+}
+
+static int dwc3_qcom_request_irq(struct dwc3_qcom *qcom, int irq,
+ const char *name)
+{
+ int ret;
+
+ /* Keep wakeup interrupts disabled until suspend */
+ ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
+ qcom_dwc3_resume_irq,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ name, qcom);
+ if (ret)
+ dev_err(qcom->dev, "failed to request irq %s: %d\n", name, ret);
+
+ return ret;
+}
+
+static int dwc3_qcom_setup_port_irq(struct platform_device *pdev, int port_index, bool is_multiport)
+{
+ struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
+ const char *irq_name;
+ int irq;
+ int ret;
+
+ if (is_multiport)
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "dp_hs_phy_%d", port_index + 1);
+ else
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "dp_hs_phy_irq");
+ if (!irq_name)
+ return -ENOMEM;
+
+ irq = platform_get_irq_byname_optional(pdev, irq_name);
+ if (irq > 0) {
+ ret = dwc3_qcom_request_irq(qcom, irq, irq_name);
+ if (ret)
+ return ret;
+ qcom->ports[port_index].dp_hs_phy_irq = irq;
+ }
+
+ if (is_multiport)
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "dm_hs_phy_%d", port_index + 1);
+ else
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "dm_hs_phy_irq");
+ if (!irq_name)
+ return -ENOMEM;
+
+ irq = platform_get_irq_byname_optional(pdev, irq_name);
+ if (irq > 0) {
+ ret = dwc3_qcom_request_irq(qcom, irq, irq_name);
+ if (ret)
+ return ret;
+ qcom->ports[port_index].dm_hs_phy_irq = irq;
+ }
+
+ if (is_multiport)
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "ss_phy_%d", port_index + 1);
+ else
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "ss_phy_irq");
+ if (!irq_name)
+ return -ENOMEM;
+
+ irq = platform_get_irq_byname_optional(pdev, irq_name);
+ if (irq > 0) {
+ ret = dwc3_qcom_request_irq(qcom, irq, irq_name);
+ if (ret)
+ return ret;
+ qcom->ports[port_index].ss_phy_irq = irq;
+ }
+
+ if (is_multiport)
+ return 0;
+
+ irq = platform_get_irq_byname_optional(pdev, "qusb2_phy");
+ if (irq > 0) {
+ ret = dwc3_qcom_request_irq(qcom, irq, "qusb2_phy");
+ if (ret)
+ return ret;
+ qcom->ports[port_index].qusb2_phy_irq = irq;
+ }
+
+ return 0;
+}
+
+static int dwc3_qcom_find_num_ports(struct platform_device *pdev)
+{
+ char irq_name[14];
+ int port_num;
+ int irq;
+
+ irq = platform_get_irq_byname_optional(pdev, "dp_hs_phy_1");
+ if (irq <= 0)
+ return 1;
+
+ for (port_num = 2; port_num <= DWC3_QCOM_MAX_PORTS; port_num++) {
+ sprintf(irq_name, "dp_hs_phy_%d", port_num);
+
+ irq = platform_get_irq_byname_optional(pdev, irq_name);
+ if (irq <= 0)
+ return port_num - 1;
+ }
+
+ return DWC3_QCOM_MAX_PORTS;
+}
+
+static int dwc3_qcom_setup_irq(struct platform_device *pdev)
+{
+ struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
+ bool is_multiport;
+ int ret;
+ int i;
+
+ qcom->num_ports = dwc3_qcom_find_num_ports(pdev);
+ is_multiport = (qcom->num_ports > 1);
+
+ for (i = 0; i < qcom->num_ports; i++) {
+ ret = dwc3_qcom_setup_port_irq(pdev, i, is_multiport);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dwc3_qcom_clk_init(struct dwc3_qcom *qcom, int count)
+{
+ struct device *dev = qcom->dev;
+ struct device_node *np = dev->of_node;
+ int i;
+
+ if (!np || !count)
+ return 0;
+
+ if (count < 0)
+ return count;
+
+ qcom->num_clocks = count;
+
+ qcom->clks = devm_kcalloc(dev, qcom->num_clocks,
+ sizeof(struct clk *), GFP_KERNEL);
+ if (!qcom->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < qcom->num_clocks; i++) {
+ struct clk *clk;
+ int ret;
+
+ clk = of_clk_get(np, i);
+ if (IS_ERR(clk)) {
+ while (--i >= 0)
+ clk_put(qcom->clks[i]);
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret < 0) {
+ while (--i >= 0) {
+ clk_disable_unprepare(qcom->clks[i]);
+ clk_put(qcom->clks[i]);
+ }
+ clk_put(clk);
+
+ return ret;
+ }
+
+ qcom->clks[i] = clk;
+ }
+
+ return 0;
+}
+
+static int dwc3_qcom_of_register_core(struct platform_device *pdev)
+{
+ struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ struct device_node *dwc3_np __free(device_node) = of_get_compatible_child(np,
+ "snps,dwc3");
+ if (!dwc3_np) {
+ dev_err(dev, "failed to find dwc3 core child\n");
+ return -ENODEV;
+ }
+
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to register dwc3 core - %d\n", ret);
+ return ret;
+ }
+
+ qcom->dwc3 = of_find_device_by_node(dwc3_np);
+ if (!qcom->dwc3) {
+ ret = -ENODEV;
+ dev_err(dev, "failed to get dwc3 platform device\n");
+ of_platform_depopulate(dev);
+ }
+
+ return ret;
+}
+
+static int dwc3_qcom_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct dwc3_qcom *qcom;
+ int ret, i;
+ bool ignore_pipe_clk;
+ bool wakeup_source;
+
+ qcom = devm_kzalloc(&pdev->dev, sizeof(*qcom), GFP_KERNEL);
+ if (!qcom)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, qcom);
+ qcom->dev = &pdev->dev;
+
+ qcom->resets = devm_reset_control_array_get_optional_exclusive(dev);
+ if (IS_ERR(qcom->resets)) {
+ return dev_err_probe(&pdev->dev, PTR_ERR(qcom->resets),
+ "failed to get resets\n");
+ }
+
+ ret = reset_control_assert(qcom->resets);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to assert resets, err=%d\n", ret);
+ return ret;
+ }
+
+ usleep_range(10, 1000);
+
+ ret = reset_control_deassert(qcom->resets);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to deassert resets, err=%d\n", ret);
+ goto reset_assert;
+ }
+
+ ret = dwc3_qcom_clk_init(qcom, of_clk_get_parent_count(np));
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to get clocks\n");
+ goto reset_assert;
+ }
+
+ qcom->qscratch_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(qcom->qscratch_base)) {
+ ret = PTR_ERR(qcom->qscratch_base);
+ goto clk_disable;
+ }
+
+ ret = dwc3_qcom_setup_irq(pdev);
+ if (ret) {
+ dev_err(dev, "failed to setup IRQs, err=%d\n", ret);
+ goto clk_disable;
+ }
+
+ /*
+ * Disable pipe_clk requirement if specified. Used when dwc3
+ * operates without SSPHY and only HS/FS/LS modes are supported.
+ */
+ ignore_pipe_clk = device_property_read_bool(dev,
+ "qcom,select-utmi-as-pipe-clk");
+ if (ignore_pipe_clk)
+ dwc3_qcom_select_utmi_clk(qcom);
+
+ ret = dwc3_qcom_of_register_core(pdev);
+ if (ret) {
+ dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
+ goto clk_disable;
+ }
+
+ ret = dwc3_qcom_interconnect_init(qcom);
+ if (ret)
+ goto depopulate;
+
+ qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev);
+
+ /* enable vbus override for device mode */
+ if (qcom->mode != USB_DR_MODE_HOST)
+ dwc3_qcom_vbus_override_enable(qcom, true);
+
+ /* register extcon to override sw_vbus on Vbus change later */
+ ret = dwc3_qcom_register_extcon(qcom);
+ if (ret)
+ goto interconnect_exit;
+
+ wakeup_source = of_property_read_bool(dev->of_node, "wakeup-source");
+ device_init_wakeup(&pdev->dev, wakeup_source);
+ device_init_wakeup(&qcom->dwc3->dev, wakeup_source);
+
+ qcom->is_suspended = false;
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_forbid(dev);
+
+ return 0;
+
+interconnect_exit:
+ dwc3_qcom_interconnect_exit(qcom);
+depopulate:
+ of_platform_depopulate(&pdev->dev);
+ platform_device_put(qcom->dwc3);
+clk_disable:
+ for (i = qcom->num_clocks - 1; i >= 0; i--) {
+ clk_disable_unprepare(qcom->clks[i]);
+ clk_put(qcom->clks[i]);
+ }
+reset_assert:
+ reset_control_assert(qcom->resets);
+
+ return ret;
+}
+
+static void dwc3_qcom_remove(struct platform_device *pdev)
+{
+ struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int i;
+
+ of_platform_depopulate(&pdev->dev);
+ platform_device_put(qcom->dwc3);
+
+ for (i = qcom->num_clocks - 1; i >= 0; i--) {
+ clk_disable_unprepare(qcom->clks[i]);
+ clk_put(qcom->clks[i]);
+ }
+ qcom->num_clocks = 0;
+
+ dwc3_qcom_interconnect_exit(qcom);
+ reset_control_assert(qcom->resets);
+
+ pm_runtime_allow(dev);
+ pm_runtime_disable(dev);
+}
+
+static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
+{
+ struct dwc3_qcom *qcom = dev_get_drvdata(dev);
+ bool wakeup = device_may_wakeup(dev);
+ int ret;
+
+ ret = dwc3_qcom_suspend(qcom, wakeup);
+ if (ret)
+ return ret;
+
+ qcom->pm_suspended = true;
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
+{
+ struct dwc3_qcom *qcom = dev_get_drvdata(dev);
+ bool wakeup = device_may_wakeup(dev);
+ int ret;
+
+ ret = dwc3_qcom_resume(qcom, wakeup);
+ if (ret)
+ return ret;
+
+ qcom->pm_suspended = false;
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev)
+{
+ struct dwc3_qcom *qcom = dev_get_drvdata(dev);
+
+ return dwc3_qcom_suspend(qcom, true);
+}
+
+static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev)
+{
+ struct dwc3_qcom *qcom = dev_get_drvdata(dev);
+
+ return dwc3_qcom_resume(qcom, true);
+}
+
+static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume)
+ SET_RUNTIME_PM_OPS(dwc3_qcom_runtime_suspend, dwc3_qcom_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id dwc3_qcom_of_match[] = {
+ { .compatible = "qcom,dwc3" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, dwc3_qcom_of_match);
+
+static struct platform_driver dwc3_qcom_driver = {
+ .probe = dwc3_qcom_probe,
+ .remove = dwc3_qcom_remove,
+ .driver = {
+ .name = "dwc3-qcom-legacy",
+ .pm = &dwc3_qcom_dev_pm_ops,
+ .of_match_table = dwc3_qcom_of_match,
+ },
+};
+
+module_platform_driver(dwc3_qcom_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare DWC3 QCOM legacy glue Driver");
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 58683bb672e9..7334de85ad10 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -4,7 +4,6 @@
* Inspired by dwc3-of-simple.c
*/
-#include <linux/cleanup.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/clk.h>
@@ -14,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/extcon.h>
#include <linux/interconnect.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/usb/of.h>
@@ -23,6 +21,7 @@
#include <linux/usb/hcd.h>
#include <linux/usb.h>
#include "core.h"
+#include "glue.h"
/* USB QSCRATCH Hardware registers */
#define QSCRATCH_HS_PHY_CTRL 0x10
@@ -73,8 +72,8 @@ struct dwc3_qcom_port {
struct dwc3_qcom {
struct device *dev;
void __iomem *qscratch_base;
- struct platform_device *dwc3;
- struct clk **clks;
+ struct dwc3 dwc;
+ struct clk_bulk_data *clks;
int num_clocks;
struct reset_control *resets;
struct dwc3_qcom_port ports[DWC3_QCOM_MAX_PORTS];
@@ -92,6 +91,8 @@ struct dwc3_qcom {
struct icc_path *icc_path_apps;
};
+#define to_dwc3_qcom(d) container_of((d), struct dwc3_qcom, dwc)
+
static inline void dwc3_qcom_setbits(void __iomem *base, u32 offset, u32 val)
{
u32 reg;
@@ -116,6 +117,11 @@ static inline void dwc3_qcom_clrbits(void __iomem *base, u32 offset, u32 val)
readl(base + offset);
}
+/*
+ * TODO: Make the in-core role switching code invoke dwc3_qcom_vbus_override_enable(),
+ * validate that the in-core extcon support is functional, and drop extcon
+ * handling from the glue
+ */
static void dwc3_qcom_vbus_override_enable(struct dwc3_qcom *qcom, bool enable)
{
if (enable) {
@@ -260,7 +266,7 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
goto put_path_ddr;
}
- max_speed = usb_get_maximum_speed(&qcom->dwc3->dev);
+ max_speed = usb_get_maximum_speed(qcom->dwc.dev);
if (max_speed >= USB_SPEED_SUPER || max_speed == USB_SPEED_UNKNOWN) {
ret = icc_set_bw(qcom->icc_path_ddr,
USB_MEMORY_AVG_SS_BW, USB_MEMORY_PEAK_SS_BW);
@@ -303,25 +309,14 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom)
/* Only usable in contexts where the role can not change. */
static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom)
{
- struct dwc3 *dwc;
-
- /*
- * FIXME: Fix this layering violation.
- */
- dwc = platform_get_drvdata(qcom->dwc3);
-
- /* Core driver may not have probed yet. */
- if (!dwc)
- return false;
-
- return dwc->xhci;
+ return qcom->dwc.xhci;
}
static enum usb_device_speed dwc3_qcom_read_usb2_speed(struct dwc3_qcom *qcom, int port_index)
{
- struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
struct usb_device *udev;
struct usb_hcd __maybe_unused *hcd;
+ struct dwc3 *dwc = &qcom->dwc;
/*
* FIXME: Fix this layering violation.
@@ -436,9 +431,7 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom, bool wakeup)
if (!(val & PWR_EVNT_LPM_IN_L2_MASK))
dev_err(qcom->dev, "port-%d HS-PHY not in L2\n", i + 1);
}
-
- for (i = qcom->num_clocks - 1; i >= 0; i--)
- clk_disable_unprepare(qcom->clks[i]);
+ clk_bulk_disable_unprepare(qcom->num_clocks, qcom->clks);
ret = dwc3_qcom_interconnect_disable(qcom);
if (ret)
@@ -470,14 +463,9 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom, bool wakeup)
if (dwc3_qcom_is_host(qcom) && wakeup)
dwc3_qcom_disable_interrupts(qcom);
- for (i = 0; i < qcom->num_clocks; i++) {
- ret = clk_prepare_enable(qcom->clks[i]);
- if (ret < 0) {
- while (--i >= 0)
- clk_disable_unprepare(qcom->clks[i]);
- return ret;
- }
- }
+ ret = clk_bulk_prepare_enable(qcom->num_clocks, qcom->clks);
+ if (ret < 0)
+ return ret;
ret = dwc3_qcom_interconnect_enable(qcom);
if (ret)
@@ -498,7 +486,7 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom, bool wakeup)
static irqreturn_t qcom_dwc3_resume_irq(int irq, void *data)
{
struct dwc3_qcom *qcom = data;
- struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
+ struct dwc3 *dwc = &qcom->dwc;
/* If pm_suspended then let pm_resume take care of resuming h/w */
if (qcom->pm_suspended)
@@ -547,9 +535,10 @@ static int dwc3_qcom_request_irq(struct dwc3_qcom *qcom, int irq,
return ret;
}
-static int dwc3_qcom_setup_port_irq(struct platform_device *pdev, int port_index, bool is_multiport)
+static int dwc3_qcom_setup_port_irq(struct dwc3_qcom *qcom,
+ struct platform_device *pdev,
+ int port_index, bool is_multiport)
{
- struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
const char *irq_name;
int irq;
int ret;
@@ -634,9 +623,8 @@ static int dwc3_qcom_find_num_ports(struct platform_device *pdev)
return DWC3_QCOM_MAX_PORTS;
}
-static int dwc3_qcom_setup_irq(struct platform_device *pdev)
+static int dwc3_qcom_setup_irq(struct dwc3_qcom *qcom, struct platform_device *pdev)
{
- struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
bool is_multiport;
int ret;
int i;
@@ -645,7 +633,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
is_multiport = (qcom->num_ports > 1);
for (i = 0; i < qcom->num_ports; i++) {
- ret = dwc3_qcom_setup_port_irq(pdev, i, is_multiport);
+ ret = dwc3_qcom_setup_port_irq(qcom, pdev, i, is_multiport);
if (ret)
return ret;
}
@@ -653,89 +641,14 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
return 0;
}
-static int dwc3_qcom_clk_init(struct dwc3_qcom *qcom, int count)
-{
- struct device *dev = qcom->dev;
- struct device_node *np = dev->of_node;
- int i;
-
- if (!np || !count)
- return 0;
-
- if (count < 0)
- return count;
-
- qcom->num_clocks = count;
-
- qcom->clks = devm_kcalloc(dev, qcom->num_clocks,
- sizeof(struct clk *), GFP_KERNEL);
- if (!qcom->clks)
- return -ENOMEM;
-
- for (i = 0; i < qcom->num_clocks; i++) {
- struct clk *clk;
- int ret;
-
- clk = of_clk_get(np, i);
- if (IS_ERR(clk)) {
- while (--i >= 0)
- clk_put(qcom->clks[i]);
- return PTR_ERR(clk);
- }
-
- ret = clk_prepare_enable(clk);
- if (ret < 0) {
- while (--i >= 0) {
- clk_disable_unprepare(qcom->clks[i]);
- clk_put(qcom->clks[i]);
- }
- clk_put(clk);
-
- return ret;
- }
-
- qcom->clks[i] = clk;
- }
-
- return 0;
-}
-
-static int dwc3_qcom_of_register_core(struct platform_device *pdev)
-{
- struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
- struct device_node *np = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- int ret;
-
- struct device_node *dwc3_np __free(device_node) = of_get_compatible_child(np,
- "snps,dwc3");
- if (!dwc3_np) {
- dev_err(dev, "failed to find dwc3 core child\n");
- return -ENODEV;
- }
-
- ret = of_platform_populate(np, NULL, NULL, dev);
- if (ret) {
- dev_err(dev, "failed to register dwc3 core - %d\n", ret);
- return ret;
- }
-
- qcom->dwc3 = of_find_device_by_node(dwc3_np);
- if (!qcom->dwc3) {
- ret = -ENODEV;
- dev_err(dev, "failed to get dwc3 platform device\n");
- of_platform_depopulate(dev);
- }
-
- return ret;
-}
-
static int dwc3_qcom_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct dwc3_probe_data probe_data = {};
struct device *dev = &pdev->dev;
struct dwc3_qcom *qcom;
- int ret, i;
+ struct resource res;
+ struct resource *r;
+ int ret;
bool ignore_pipe_clk;
bool wakeup_source;
@@ -743,7 +656,6 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (!qcom)
return -ENOMEM;
- platform_set_drvdata(pdev, qcom);
qcom->dev = &pdev->dev;
qcom->resets = devm_reset_control_array_get_optional_exclusive(dev);
@@ -752,6 +664,11 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
"failed to get resets\n");
}
+ ret = devm_clk_bulk_get_all(&pdev->dev, &qcom->clks);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get clocks\n");
+ qcom->num_clocks = ret;
+
ret = reset_control_assert(qcom->resets);
if (ret) {
dev_err(&pdev->dev, "failed to assert resets, err=%d\n", ret);
@@ -766,19 +683,26 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
goto reset_assert;
}
- ret = dwc3_qcom_clk_init(qcom, of_clk_get_parent_count(np));
- if (ret) {
- dev_err_probe(dev, ret, "failed to get clocks\n");
+ ret = clk_bulk_prepare_enable(qcom->num_clocks, qcom->clks);
+ if (ret < 0)
goto reset_assert;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ ret = -EINVAL;
+ goto clk_disable;
}
+ res = *r;
+ res.end = res.start + SDM845_QSCRATCH_BASE_OFFSET;
- qcom->qscratch_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(qcom->qscratch_base)) {
- ret = PTR_ERR(qcom->qscratch_base);
+ qcom->qscratch_base = devm_ioremap(dev, res.end, SDM845_QSCRATCH_SIZE);
+ if (!qcom->qscratch_base) {
+ dev_err(dev, "failed to map qscratch region\n");
+ ret = -ENOMEM;
goto clk_disable;
}
- ret = dwc3_qcom_setup_irq(pdev);
+ ret = dwc3_qcom_setup_irq(qcom, pdev);
if (ret) {
dev_err(dev, "failed to setup IRQs, err=%d\n", ret);
goto clk_disable;
@@ -793,17 +717,21 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (ignore_pipe_clk)
dwc3_qcom_select_utmi_clk(qcom);
- ret = dwc3_qcom_of_register_core(pdev);
- if (ret) {
- dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
+ qcom->dwc.dev = dev;
+ probe_data.dwc = &qcom->dwc;
+ probe_data.res = &res;
+ probe_data.ignore_clocks_and_resets = true;
+ ret = dwc3_core_probe(&probe_data);
+ if (ret) {
+ ret = dev_err_probe(dev, ret, "failed to register DWC3 Core\n");
goto clk_disable;
}
ret = dwc3_qcom_interconnect_init(qcom);
if (ret)
- goto depopulate;
+ goto remove_core;
- qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev);
+ qcom->mode = usb_get_dr_mode(dev);
/* enable vbus override for device mode */
if (qcom->mode != USB_DR_MODE_HOST)
@@ -816,25 +744,17 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
wakeup_source = of_property_read_bool(dev->of_node, "wakeup-source");
device_init_wakeup(&pdev->dev, wakeup_source);
- device_init_wakeup(&qcom->dwc3->dev, wakeup_source);
qcom->is_suspended = false;
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- pm_runtime_forbid(dev);
return 0;
interconnect_exit:
dwc3_qcom_interconnect_exit(qcom);
-depopulate:
- of_platform_depopulate(&pdev->dev);
- platform_device_put(qcom->dwc3);
+remove_core:
+ dwc3_core_remove(&qcom->dwc);
clk_disable:
- for (i = qcom->num_clocks - 1; i >= 0; i--) {
- clk_disable_unprepare(qcom->clks[i]);
- clk_put(qcom->clks[i]);
- }
+ clk_bulk_disable_unprepare(qcom->num_clocks, qcom->clks);
reset_assert:
reset_control_assert(qcom->resets);
@@ -843,32 +763,28 @@ reset_assert:
static void dwc3_qcom_remove(struct platform_device *pdev)
{
- struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
- int i;
+ struct dwc3 *dwc = platform_get_drvdata(pdev);
+ struct dwc3_qcom *qcom = to_dwc3_qcom(dwc);
- of_platform_depopulate(&pdev->dev);
- platform_device_put(qcom->dwc3);
+ dwc3_core_remove(&qcom->dwc);
- for (i = qcom->num_clocks - 1; i >= 0; i--) {
- clk_disable_unprepare(qcom->clks[i]);
- clk_put(qcom->clks[i]);
- }
- qcom->num_clocks = 0;
+ clk_bulk_disable_unprepare(qcom->num_clocks, qcom->clks);
dwc3_qcom_interconnect_exit(qcom);
reset_control_assert(qcom->resets);
-
- pm_runtime_allow(dev);
- pm_runtime_disable(dev);
}
-static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
+static int dwc3_qcom_pm_suspend(struct device *dev)
{
- struct dwc3_qcom *qcom = dev_get_drvdata(dev);
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct dwc3_qcom *qcom = to_dwc3_qcom(dwc);
bool wakeup = device_may_wakeup(dev);
int ret;
+ ret = dwc3_pm_suspend(&qcom->dwc);
+ if (ret)
+ return ret;
+
ret = dwc3_qcom_suspend(qcom, wakeup);
if (ret)
return ret;
@@ -878,9 +794,10 @@ static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
+static int dwc3_qcom_pm_resume(struct device *dev)
{
- struct dwc3_qcom *qcom = dev_get_drvdata(dev);
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct dwc3_qcom *qcom = to_dwc3_qcom(dwc);
bool wakeup = device_may_wakeup(dev);
int ret;
@@ -890,31 +807,68 @@ static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
qcom->pm_suspended = false;
+ ret = dwc3_pm_resume(&qcom->dwc);
+ if (ret)
+ return ret;
+
return 0;
}
-static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev)
+static void dwc3_qcom_complete(struct device *dev)
{
- struct dwc3_qcom *qcom = dev_get_drvdata(dev);
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+
+ dwc3_pm_complete(dwc);
+}
+
+static int dwc3_qcom_prepare(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+
+ return dwc3_pm_prepare(dwc);
+}
+
+static int dwc3_qcom_runtime_suspend(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct dwc3_qcom *qcom = to_dwc3_qcom(dwc);
+ int ret;
+
+ ret = dwc3_runtime_suspend(&qcom->dwc);
+ if (ret)
+ return ret;
return dwc3_qcom_suspend(qcom, true);
}
-static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev)
+static int dwc3_qcom_runtime_resume(struct device *dev)
{
- struct dwc3_qcom *qcom = dev_get_drvdata(dev);
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct dwc3_qcom *qcom = to_dwc3_qcom(dwc);
+ int ret;
- return dwc3_qcom_resume(qcom, true);
+ ret = dwc3_qcom_resume(qcom, true);
+ if (ret)
+ return ret;
+
+ return dwc3_runtime_resume(&qcom->dwc);
+}
+
+static int dwc3_qcom_runtime_idle(struct device *dev)
+{
+ return dwc3_runtime_idle(dev_get_drvdata(dev));
}
static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume)
- SET_RUNTIME_PM_OPS(dwc3_qcom_runtime_suspend, dwc3_qcom_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume)
+ RUNTIME_PM_OPS(dwc3_qcom_runtime_suspend, dwc3_qcom_runtime_resume,
+ dwc3_qcom_runtime_idle)
+ .complete = pm_sleep_ptr(dwc3_qcom_complete),
+ .prepare = pm_sleep_ptr(dwc3_qcom_prepare),
};
static const struct of_device_id dwc3_qcom_of_match[] = {
- { .compatible = "qcom,dwc3" },
+ { .compatible = "qcom,snps-dwc3" },
{ }
};
MODULE_DEVICE_TABLE(of, dwc3_qcom_of_match);
@@ -924,7 +878,7 @@ static struct platform_driver dwc3_qcom_driver = {
.remove = dwc3_qcom_remove,
.driver = {
.name = "dwc3-qcom",
- .pm = &dwc3_qcom_dev_pm_ops,
+ .pm = pm_ptr(&dwc3_qcom_dev_pm_ops),
.of_match_table = dwc3_qcom_of_match,
},
};
diff --git a/drivers/usb/dwc3/glue.h b/drivers/usb/dwc3/glue.h
new file mode 100644
index 000000000000..2efd00e763be
--- /dev/null
+++ b/drivers/usb/dwc3/glue.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * glue.h - DesignWare USB3 DRD glue header
+ */
+
+#ifndef __DRIVERS_USB_DWC3_GLUE_H
+#define __DRIVERS_USB_DWC3_GLUE_H
+
+#include <linux/types.h>
+#include "core.h"
+
+/**
+ * dwc3_probe_data: Initialization parameters passed to dwc3_core_probe()
+ * @dwc: Reference to dwc3 context structure
+ * @res: resource for the DWC3 core mmio region
+ * @ignore_clocks_and_resets: clocks and resets defined for the device should
+ * be ignored by the DWC3 core, as they are managed by the glue
+ */
+struct dwc3_probe_data {
+ struct dwc3 *dwc;
+ struct resource *res;
+ bool ignore_clocks_and_resets;
+};
+
+int dwc3_core_probe(const struct dwc3_probe_data *data);
+void dwc3_core_remove(struct dwc3 *dwc);
+
+int dwc3_runtime_suspend(struct dwc3 *dwc);
+int dwc3_runtime_resume(struct dwc3 *dwc);
+int dwc3_runtime_idle(struct dwc3 *dwc);
+int dwc3_pm_suspend(struct dwc3 *dwc);
+int dwc3_pm_resume(struct dwc3 *dwc);
+void dwc3_pm_complete(struct dwc3 *dwc);
+int dwc3_pm_prepare(struct dwc3 *dwc);
+
+#endif
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index b48e108fc8fe..1c513bf8002e 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -182,6 +182,9 @@ int dwc3_host_init(struct dwc3 *dwc)
if (DWC3_VER_IS_WITHIN(DWC3, ANY, 300A))
props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped");
+ props[prop_idx++] = PROPERTY_ENTRY_U16("num-hc-interrupters",
+ dwc->num_hc_interrupters);
+
if (prop_idx) {
ret = device_create_managed_software_node(&xhci->dev, props, NULL);
if (ret) {
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index ed5a92c474e5..30016b805bfd 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -158,7 +158,7 @@ struct usb_ep *usb_ep_autoconfig(
if (!ep)
return NULL;
- type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ type = usb_endpoint_type(desc);
/* report (variable) full speed bulk maxpacket */
if (type == USB_ENDPOINT_XFER_BULK) {
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 740311c4fa24..97a62b926415 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -62,6 +62,9 @@ struct f_hidg {
unsigned short report_desc_length;
char *report_desc;
unsigned short report_length;
+ unsigned char interval;
+ bool interval_user_set;
+
/*
* use_out_ep - if true, the OUT Endpoint (interrupt out method)
* will be used to receive reports from the host
@@ -75,6 +78,7 @@ struct f_hidg {
/* recv report */
spinlock_t read_spinlock;
wait_queue_head_t read_queue;
+ bool disabled;
/* recv report - interrupt out only (use_out_ep == 1) */
struct list_head completed_out_req;
unsigned int qlen;
@@ -144,8 +148,8 @@ static struct hid_descriptor hidg_desc = {
.bcdHID = cpu_to_le16(0x0101),
.bCountryCode = 0x00,
.bNumDescriptors = 0x1,
- /*.desc[0].bDescriptorType = DYNAMIC */
- /*.desc[0].wDescriptorLenght = DYNAMIC */
+ /*.rpt_desc.bDescriptorType = DYNAMIC */
+ /*.rpt_desc.wDescriptorLength = DYNAMIC */
};
/* Super-Speed Support */
@@ -156,10 +160,7 @@ static struct usb_endpoint_descriptor hidg_ss_in_ep_desc = {
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
- .bInterval = 4, /* FIXME: Add this field in the
- * HID gadget configuration?
- * (struct hidg_func_descriptor)
- */
+ /*.bInterval = DYNAMIC */
};
static struct usb_ss_ep_comp_descriptor hidg_ss_in_comp_desc = {
@@ -177,10 +178,7 @@ static struct usb_endpoint_descriptor hidg_ss_out_ep_desc = {
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
- .bInterval = 4, /* FIXME: Add this field in the
- * HID gadget configuration?
- * (struct hidg_func_descriptor)
- */
+ /*.bInterval = DYNAMIC */
};
static struct usb_ss_ep_comp_descriptor hidg_ss_out_comp_desc = {
@@ -218,10 +216,7 @@ static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = {
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
- .bInterval = 4, /* FIXME: Add this field in the
- * HID gadget configuration?
- * (struct hidg_func_descriptor)
- */
+ /* .bInterval = DYNAMIC */
};
static struct usb_endpoint_descriptor hidg_hs_out_ep_desc = {
@@ -230,10 +225,7 @@ static struct usb_endpoint_descriptor hidg_hs_out_ep_desc = {
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
- .bInterval = 4, /* FIXME: Add this field in the
- * HID gadget configuration?
- * (struct hidg_func_descriptor)
- */
+ /*.bInterval = DYNAMIC */
};
static struct usb_descriptor_header *hidg_hs_descriptors_intout[] = {
@@ -259,10 +251,7 @@ static struct usb_endpoint_descriptor hidg_fs_in_ep_desc = {
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
- .bInterval = 10, /* FIXME: Add this field in the
- * HID gadget configuration?
- * (struct hidg_func_descriptor)
- */
+ /*.bInterval = DYNAMIC */
};
static struct usb_endpoint_descriptor hidg_fs_out_ep_desc = {
@@ -271,10 +260,7 @@ static struct usb_endpoint_descriptor hidg_fs_out_ep_desc = {
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_INT,
/*.wMaxPacketSize = DYNAMIC */
- .bInterval = 10, /* FIXME: Add this field in the
- * HID gadget configuration?
- * (struct hidg_func_descriptor)
- */
+ /*.bInterval = DYNAMIC */
};
static struct usb_descriptor_header *hidg_fs_descriptors_intout[] = {
@@ -329,7 +315,7 @@ static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer,
spin_lock_irqsave(&hidg->read_spinlock, flags);
-#define READ_COND_INTOUT (!list_empty(&hidg->completed_out_req))
+#define READ_COND_INTOUT (!list_empty(&hidg->completed_out_req) || hidg->disabled)
/* wait for at least one buffer to complete */
while (!READ_COND_INTOUT) {
@@ -343,6 +329,11 @@ static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer,
spin_lock_irqsave(&hidg->read_spinlock, flags);
}
+ if (hidg->disabled) {
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
+ return -ESHUTDOWN;
+ }
+
/* pick the first one */
list = list_first_entry(&hidg->completed_out_req,
struct f_hidg_req_list, list);
@@ -387,7 +378,7 @@ static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer,
return count;
}
-#define READ_COND_SSREPORT (hidg->set_report_buf != NULL)
+#define READ_COND_SSREPORT (hidg->set_report_buf != NULL || hidg->disabled)
static ssize_t f_hidg_ssreport_read(struct file *file, char __user *buffer,
size_t count, loff_t *ptr)
@@ -939,8 +930,8 @@ static int hidg_setup(struct usb_function *f,
struct hid_descriptor hidg_desc_copy = hidg_desc;
VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
- hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT;
- hidg_desc_copy.desc[0].wDescriptorLength =
+ hidg_desc_copy.rpt_desc.bDescriptorType = HID_DT_REPORT;
+ hidg_desc_copy.rpt_desc.wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
length = min_t(unsigned short, length,
@@ -1012,6 +1003,11 @@ static void hidg_disable(struct usb_function *f)
}
spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
+ hidg->disabled = true;
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
+ wake_up(&hidg->read_queue);
+
spin_lock_irqsave(&hidg->write_spinlock, flags);
if (!hidg->write_pending) {
free_ep_req(hidg->in_ep, hidg->req);
@@ -1097,6 +1093,10 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
}
}
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
+ hidg->disabled = false;
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
+
if (hidg->in_ep != NULL) {
spin_lock_irqsave(&hidg->write_spinlock, flags);
hidg->req = req_in;
@@ -1202,6 +1202,16 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
hidg_ss_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+
+ /* IN endpoints: FS default=10ms, HS default=4µ-frame; user override if set */
+ if (!hidg->interval_user_set) {
+ hidg_fs_in_ep_desc.bInterval = 10;
+ hidg_hs_in_ep_desc.bInterval = 4;
+ } else {
+ hidg_fs_in_ep_desc.bInterval = hidg->interval;
+ hidg_hs_in_ep_desc.bInterval = hidg->interval;
+ }
+
hidg_ss_out_comp_desc.wBytesPerInterval =
cpu_to_le16(hidg->report_length);
hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
@@ -1210,8 +1220,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
* We can use hidg_desc struct here but we should not relay
* that its content won't change after returning from this function.
*/
- hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT;
- hidg_desc.desc[0].wDescriptorLength =
+ hidg_desc.rpt_desc.bDescriptorType = HID_DT_REPORT;
+ hidg_desc.rpt_desc.wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
hidg_hs_in_ep_desc.bEndpointAddress =
@@ -1224,19 +1234,27 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
hidg_ss_out_ep_desc.bEndpointAddress =
hidg_fs_out_ep_desc.bEndpointAddress;
- if (hidg->use_out_ep)
+ if (hidg->use_out_ep) {
+ /* OUT endpoints: same defaults (FS=10, HS=4) unless user set */
+ if (!hidg->interval_user_set) {
+ hidg_fs_out_ep_desc.bInterval = 10;
+ hidg_hs_out_ep_desc.bInterval = 4;
+ } else {
+ hidg_fs_out_ep_desc.bInterval = hidg->interval;
+ hidg_hs_out_ep_desc.bInterval = hidg->interval;
+ }
status = usb_assign_descriptors(f,
- hidg_fs_descriptors_intout,
- hidg_hs_descriptors_intout,
- hidg_ss_descriptors_intout,
- hidg_ss_descriptors_intout);
- else
+ hidg_fs_descriptors_intout,
+ hidg_hs_descriptors_intout,
+ hidg_ss_descriptors_intout,
+ hidg_ss_descriptors_intout);
+ } else {
status = usb_assign_descriptors(f,
hidg_fs_descriptors_ssreport,
hidg_hs_descriptors_ssreport,
hidg_ss_descriptors_ssreport,
hidg_ss_descriptors_ssreport);
-
+ }
if (status)
goto fail;
@@ -1408,6 +1426,53 @@ end:
CONFIGFS_ATTR(f_hid_opts_, report_desc);
+static ssize_t f_hid_opts_interval_show(struct config_item *item, char *page)
+{
+ struct f_hid_opts *opts = to_f_hid_opts(item);
+ int result;
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, "%d\n", opts->interval);
+ mutex_unlock(&opts->lock);
+
+ return result;
+}
+
+static ssize_t f_hid_opts_interval_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct f_hid_opts *opts = to_f_hid_opts(item);
+ int ret;
+ unsigned int tmp;
+
+ mutex_lock(&opts->lock);
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ /* parse into a wider type first */
+ ret = kstrtouint(page, 0, &tmp);
+ if (ret)
+ goto end;
+
+ /* range-check against unsigned char max */
+ if (tmp > 255) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ opts->interval = (unsigned char)tmp;
+ opts->interval_user_set = true;
+ ret = len;
+
+end:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+CONFIGFS_ATTR(f_hid_opts_, interval);
+
static ssize_t f_hid_opts_dev_show(struct config_item *item, char *page)
{
struct f_hid_opts *opts = to_f_hid_opts(item);
@@ -1422,6 +1487,7 @@ static struct configfs_attribute *hid_attrs[] = {
&f_hid_opts_attr_protocol,
&f_hid_opts_attr_no_out_endpoint,
&f_hid_opts_attr_report_length,
+ &f_hid_opts_attr_interval,
&f_hid_opts_attr_report_desc,
&f_hid_opts_attr_dev,
NULL,
@@ -1468,6 +1534,10 @@ static struct usb_function_instance *hidg_alloc_inst(void)
if (!opts)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
+
+ opts->interval = 4;
+ opts->interval_user_set = false;
+
opts->func_inst.free_func_inst = hidg_free_inst;
ret = &opts->func_inst;
@@ -1546,6 +1616,8 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
hidg->bInterfaceProtocol = opts->protocol;
hidg->report_length = opts->report_length;
hidg->report_desc_length = opts->report_desc_length;
+ hidg->interval = opts->interval;
+ hidg->interval_user_set = opts->interval_user_set;
if (opts->report_desc) {
hidg->report_desc = kmemdup(opts->report_desc,
opts->report_desc_length,
diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
index 3b8c4ce2a40a..82ecd3fedb3a 100644
--- a/drivers/usb/gadget/function/f_mass_storage.h
+++ b/drivers/usb/gadget/function/f_mass_storage.h
@@ -110,7 +110,7 @@ struct fsg_config {
};
static inline struct fsg_opts *
-fsg_opts_from_func_inst(const struct usb_function_instance *fi)
+fsg_opts_from_func_inst(struct usb_function_instance *fi)
{
return container_of(fi, struct fsg_opts, func_inst);
}
diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c
index 12e866fb311d..0a800ba53816 100644
--- a/drivers/usb/gadget/function/f_midi2.c
+++ b/drivers/usb/gadget/function/f_midi2.c
@@ -475,7 +475,7 @@ static void reply_ump_stream_ep_info(struct f_midi2_ep *ep)
/* reply a UMP EP device info */
static void reply_ump_stream_ep_device(struct f_midi2_ep *ep)
{
- struct snd_ump_stream_msg_devince_info rep = {
+ struct snd_ump_stream_msg_device_info rep = {
.type = UMP_MSG_TYPE_STREAM,
.status = UMP_STREAM_MSG_STATUS_DEVICE_INFO,
.manufacture_id = ep->info.manufacturer,
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index 8f7e7a2b2ff2..0f266bc067f5 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -364,6 +364,12 @@ static void gser_suspend(struct usb_function *f)
gserial_suspend(&gser->port);
}
+static int gser_get_status(struct usb_function *f)
+{
+ return (f->func_wakeup_armed ? USB_INTRF_STAT_FUNC_RW : 0) |
+ USB_INTRF_STAT_FUNC_RW_CAP;
+}
+
static struct usb_function *gser_alloc(struct usb_function_instance *fi)
{
struct f_gser *gser;
@@ -387,6 +393,7 @@ static struct usb_function *gser_alloc(struct usb_function_instance *fi)
gser->port.func.free_func = gser_free;
gser->port.func.resume = gser_resume;
gser->port.func.suspend = gser_suspend;
+ gser->port.func.get_status = gser_get_status;
return &gser->port.func;
}
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 5a2e1237f85c..6e8804f04baa 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1641,14 +1641,14 @@ static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn,
struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
tport_wwn);
struct usbg_tpg *tpg;
- unsigned long tpgt;
+ u16 tpgt;
int ret;
struct f_tcm_opts *opts;
unsigned i;
if (strstr(name, "tpgt_") != name)
return ERR_PTR(-EINVAL);
- if (kstrtoul(name + 5, 0, &tpgt) || tpgt > UINT_MAX)
+ if (kstrtou16(name + 5, 0, &tpgt))
return ERR_PTR(-EINVAL);
ret = -ENODEV;
mutex_lock(&tpg_instances_lock);
diff --git a/drivers/usb/gadget/function/u_hid.h b/drivers/usb/gadget/function/u_hid.h
index 84bb70292855..a9ed9720caee 100644
--- a/drivers/usb/gadget/function/u_hid.h
+++ b/drivers/usb/gadget/function/u_hid.h
@@ -25,6 +25,8 @@ struct f_hid_opts {
unsigned short report_desc_length;
unsigned char *report_desc;
bool report_desc_alloc;
+ unsigned char interval;
+ bool interval_user_set;
/*
* Protect the data form concurrent access by read/write
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 36fff45e8c9b..ab544f6824be 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -592,6 +592,17 @@ static int gs_start_io(struct gs_port *port)
return status;
}
+static int gserial_wakeup_host(struct gserial *gser)
+{
+ struct usb_function *func = &gser->func;
+ struct usb_gadget *gadget = func->config->cdev->gadget;
+
+ if (func->func_suspended)
+ return usb_func_wakeup(func);
+ else
+ return usb_gadget_wakeup(gadget);
+}
+
/*-------------------------------------------------------------------------*/
/* TTY Driver */
@@ -746,6 +757,8 @@ static ssize_t gs_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
+ int ret = 0;
+ struct gserial *gser = port->port_usb;
pr_vdebug("gs_write: ttyGS%d (%p) writing %zu bytes\n",
port->port_num, tty, count);
@@ -753,6 +766,17 @@ static ssize_t gs_write(struct tty_struct *tty, const u8 *buf, size_t count)
spin_lock_irqsave(&port->port_lock, flags);
if (count)
count = kfifo_in(&port->port_write_buf, buf, count);
+
+ if (port->suspended) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = gserial_wakeup_host(gser);
+ if (ret) {
+ pr_debug("ttyGS%d: Remote wakeup failed:%d\n", port->port_num, ret);
+ return count;
+ }
+ spin_lock_irqsave(&port->port_lock, flags);
+ }
+
/* treat count == 0 as flush_chars() */
if (port->port_usb)
gs_start_tx(port);
@@ -781,10 +805,22 @@ static void gs_flush_chars(struct tty_struct *tty)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
+ int ret = 0;
+ struct gserial *gser = port->port_usb;
pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
spin_lock_irqsave(&port->port_lock, flags);
+ if (port->suspended) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ ret = gserial_wakeup_host(gser);
+ if (ret) {
+ pr_debug("ttyGS%d: Remote wakeup failed:%d\n", port->port_num, ret);
+ return;
+ }
+ spin_lock_irqsave(&port->port_lock, flags);
+ }
+
if (port->port_usb)
gs_start_tx(port);
spin_unlock_irqrestore(&port->port_lock, flags);
@@ -1464,6 +1500,20 @@ void gserial_suspend(struct gserial *gser)
return;
}
+ if (port->write_busy || port->write_started) {
+ /* Wakeup to host if there are ongoing transfers */
+ spin_unlock_irqrestore(&serial_port_lock, flags);
+ if (!gserial_wakeup_host(gser))
+ return;
+
+ /* Check if port is valid after acquiring lock back */
+ spin_lock_irqsave(&serial_port_lock, flags);
+ if (!port) {
+ spin_unlock_irqrestore(&serial_port_lock, flags);
+ return;
+ }
+ }
+
spin_lock(&port->port_lock);
spin_unlock(&serial_port_lock);
port->suspended = true;
diff --git a/drivers/usb/gadget/function/uvc_configfs.h b/drivers/usb/gadget/function/uvc_configfs.h
index 2f78cd4f396f..9391614135e9 100644
--- a/drivers/usb/gadget/function/uvc_configfs.h
+++ b/drivers/usb/gadget/function/uvc_configfs.h
@@ -74,10 +74,12 @@ static inline struct uvcg_format *to_uvcg_format(struct config_item *item)
struct uvcg_streaming_header {
struct config_item item;
- struct uvc_input_header_descriptor desc;
unsigned linked;
struct list_head formats;
unsigned num_fmt;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct uvc_input_header_descriptor desc;
};
static inline struct uvcg_streaming_header *to_uvcg_streaming_header(struct config_item *item)
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index a9544fea8723..578556422ea3 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -188,7 +188,7 @@ static int __init gfs_init(void)
/*
* Allocate in one chunk for easier maintenance
*/
- f_ffs[0] = kcalloc(func_num * N_CONF, sizeof(*f_ffs), GFP_KERNEL);
+ f_ffs[0] = kcalloc(func_num * N_CONF, sizeof(*f_ffs[0]), GFP_KERNEL);
if (!f_ffs[0]) {
ret = -ENOMEM;
goto no_func;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index b6a30d88a800..fcce84a726f2 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1615,7 +1615,7 @@ static int activate_ep_files (struct dev_data *dev)
mutex_init(&data->lock);
init_waitqueue_head (&data->wait);
- strncpy (data->name, ep->name, sizeof (data->name) - 1);
+ strscpy(data->name, ep->name);
refcount_set (&data->count, 1);
data->dev = dev;
get_dev (dev);
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index aae1787320d4..26460340fbc9 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -102,12 +102,6 @@ config USB_FSL_USB2
dynamically linked module called "fsl_usb2_udc" and force
all gadget drivers to also be dynamically linked.
-config USB_FUSB300
- tristate "Faraday FUSB300 USB Peripheral Controller"
- depends on !PHYS_ADDR_T_64BIT && HAS_DMA
- help
- Faraday usb device controller FUSB300 driver
-
config USB_GR_UDC
tristate "Aeroflex Gaisler GRUSBDC USB Peripheral Controller Driver"
depends on HAS_DMA
@@ -228,21 +222,6 @@ config USB_PXA27X
dynamically linked module called "pxa27x_udc" and force all
gadget drivers to also be dynamically linked.
-config USB_MV_UDC
- tristate "Marvell USB2.0 Device Controller"
- depends on HAS_DMA
- help
- Marvell Socs (including PXA and MMP series) include a high speed
- USB2.0 OTG controller, which can be configured as high speed or
- full speed USB peripheral.
-
-config USB_MV_U3D
- depends on HAS_DMA
- tristate "MARVELL PXA2128 USB 3.0 controller"
- help
- MARVELL PXA2128 Processor series include a super speed USB3.0 device
- controller, which support super speed USB peripheral.
-
config USB_SNP_CORE
depends on (USB_AMD5536UDC || USB_SNP_UDC_PLAT)
depends on HAS_DMA
@@ -326,29 +305,6 @@ config USB_FSL_QE
Set CONFIG_USB_GADGET to "m" to build this driver as a
dynamically linked module called "fsl_qe_udc".
-config USB_NET2272
- depends on HAS_IOMEM
- tristate "PLX NET2272"
- help
- PLX NET2272 is a USB peripheral controller which supports
- both full and high speed USB 2.0 data transfers.
-
- It has three configurable endpoints, as well as endpoint zero
- (for control transfer).
- Say "y" to link the driver statically, or "m" to build a
- dynamically linked module called "net2272" and force all
- gadget drivers to also be dynamically linked.
-
-config USB_NET2272_DMA
- bool "Support external DMA controller"
- depends on USB_NET2272 && HAS_DMA
- help
- The NET2272 part can optionally support an external DMA
- controller, but your board has to have support in the
- driver itself.
-
- If unsure, say "N" here. The driver works fine in PIO mode.
-
config USB_NET2280
tristate "NetChip NET228x / PLX USB3x8x"
depends on USB_PCI
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index b52f93e9c61d..1b9b1a4f9c57 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -9,7 +9,6 @@ udc-core-y := core.o trace.o
#
obj-$(CONFIG_USB_GADGET) += udc-core.o
obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o
-obj-$(CONFIG_USB_NET2272) += net2272.o
obj-$(CONFIG_USB_NET2280) += net2280.o
obj-$(CONFIG_USB_SNP_CORE) += snps_udc_core.o
obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc_pci.o
@@ -31,10 +30,6 @@ obj-$(CONFIG_USB_RENESAS_USBF) += renesas_usbf.o
obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o
obj-$(CONFIG_USB_LPC32XX) += lpc32xx_udc.o
obj-$(CONFIG_USB_EG20T) += pch_udc.o
-obj-$(CONFIG_USB_MV_UDC) += mv_udc.o
-mv_udc-y := mv_udc_core.o
-obj-$(CONFIG_USB_FUSB300) += fusb300_udc.o
-obj-$(CONFIG_USB_MV_U3D) += mv_u3d_core.o
obj-$(CONFIG_USB_GR_UDC) += gr_udc.o
obj-$(CONFIG_USB_GADGET_XILINX) += udc-xilinx.o
obj-$(CONFIG_USB_SNP_UDC_PLAT) += snps_udc_plat.o
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index aa4c61094dc6..42b94d858e37 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -1541,7 +1541,7 @@ static void at91_vbus_timer_work(struct work_struct *work)
static void at91_vbus_timer(struct timer_list *t)
{
- struct at91_udc *udc = from_timer(udc, t, vbus_timer);
+ struct at91_udc *udc = timer_container_of(udc, t, vbus_timer);
/*
* If we are polling vbus it is likely that the gpio is on an
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 4b3d5075621a..d709e24c1fd4 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1570,7 +1570,7 @@ static int gadget_match_driver(struct device *dev, const struct device_driver *d
{
struct usb_gadget *gadget = dev_to_usb_gadget(dev);
struct usb_udc *udc = gadget->udc;
- struct usb_gadget_driver *driver = container_of(drv,
+ const struct usb_gadget_driver *driver = container_of(drv,
struct usb_gadget_driver, driver);
/* If the driver specifies a udc_name, it must match the UDC's name */
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 4f1b5db51dda..27c9699365ab 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1787,7 +1787,8 @@ static int handle_control_request(struct dummy_hcd *dum_hcd, struct urb *urb,
*/
static enum hrtimer_restart dummy_timer(struct hrtimer *t)
{
- struct dummy_hcd *dum_hcd = from_timer(dum_hcd, t, timer);
+ struct dummy_hcd *dum_hcd = timer_container_of(dum_hcd, t,
+ timer);
struct dummy *dum = dum_hcd->dum;
struct urbp *urbp, *tmp;
unsigned long flags;
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
deleted file mode 100644
index 5e94a99b3e53..000000000000
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ /dev/null
@@ -1,1516 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Fusb300 UDC (USB gadget)
- *
- * Copyright (C) 2010 Faraday Technology Corp.
- *
- * Author : Yuan-hsin Chen <yhchen@faraday-tech.com>
- */
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-
-#include "fusb300_udc.h"
-
-MODULE_DESCRIPTION("FUSB300 USB gadget driver");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>");
-MODULE_ALIAS("platform:fusb300_udc");
-
-#define DRIVER_VERSION "20 October 2010"
-
-static const char udc_name[] = "fusb300_udc";
-static const char * const fusb300_ep_name[] = {
- "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7", "ep8", "ep9",
- "ep10", "ep11", "ep12", "ep13", "ep14", "ep15"
-};
-
-static void done(struct fusb300_ep *ep, struct fusb300_request *req,
- int status);
-
-static void fusb300_enable_bit(struct fusb300 *fusb300, u32 offset,
- u32 value)
-{
- u32 reg = ioread32(fusb300->reg + offset);
-
- reg |= value;
- iowrite32(reg, fusb300->reg + offset);
-}
-
-static void fusb300_disable_bit(struct fusb300 *fusb300, u32 offset,
- u32 value)
-{
- u32 reg = ioread32(fusb300->reg + offset);
-
- reg &= ~value;
- iowrite32(reg, fusb300->reg + offset);
-}
-
-
-static void fusb300_ep_setting(struct fusb300_ep *ep,
- struct fusb300_ep_info info)
-{
- ep->epnum = info.epnum;
- ep->type = info.type;
-}
-
-static int fusb300_ep_release(struct fusb300_ep *ep)
-{
- if (!ep->epnum)
- return 0;
- ep->epnum = 0;
- ep->stall = 0;
- ep->wedged = 0;
- return 0;
-}
-
-static void fusb300_set_fifo_entry(struct fusb300 *fusb300,
- u32 ep)
-{
- u32 val = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
-
- val &= ~FUSB300_EPSET1_FIFOENTRY_MSK;
- val |= FUSB300_EPSET1_FIFOENTRY(FUSB300_FIFO_ENTRY_NUM);
- iowrite32(val, fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
-}
-
-static void fusb300_set_start_entry(struct fusb300 *fusb300,
- u8 ep)
-{
- u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
- u32 start_entry = fusb300->fifo_entry_num * FUSB300_FIFO_ENTRY_NUM;
-
- reg &= ~FUSB300_EPSET1_START_ENTRY_MSK ;
- reg |= FUSB300_EPSET1_START_ENTRY(start_entry);
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
- if (fusb300->fifo_entry_num == FUSB300_MAX_FIFO_ENTRY) {
- fusb300->fifo_entry_num = 0;
- fusb300->addrofs = 0;
- pr_err("fifo entry is over the maximum number!\n");
- } else
- fusb300->fifo_entry_num++;
-}
-
-/* set fusb300_set_start_entry first before fusb300_set_epaddrofs */
-static void fusb300_set_epaddrofs(struct fusb300 *fusb300,
- struct fusb300_ep_info info)
-{
- u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
-
- reg &= ~FUSB300_EPSET2_ADDROFS_MSK;
- reg |= FUSB300_EPSET2_ADDROFS(fusb300->addrofs);
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
- fusb300->addrofs += (info.maxpacket + 7) / 8 * FUSB300_FIFO_ENTRY_NUM;
-}
-
-static void ep_fifo_setting(struct fusb300 *fusb300,
- struct fusb300_ep_info info)
-{
- fusb300_set_fifo_entry(fusb300, info.epnum);
- fusb300_set_start_entry(fusb300, info.epnum);
- fusb300_set_epaddrofs(fusb300, info);
-}
-
-static void fusb300_set_eptype(struct fusb300 *fusb300,
- struct fusb300_ep_info info)
-{
- u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
-
- reg &= ~FUSB300_EPSET1_TYPE_MSK;
- reg |= FUSB300_EPSET1_TYPE(info.type);
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
-}
-
-static void fusb300_set_epdir(struct fusb300 *fusb300,
- struct fusb300_ep_info info)
-{
- u32 reg;
-
- if (!info.dir_in)
- return;
- reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
- reg &= ~FUSB300_EPSET1_DIR_MSK;
- reg |= FUSB300_EPSET1_DIRIN;
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
-}
-
-static void fusb300_set_ep_active(struct fusb300 *fusb300,
- u8 ep)
-{
- u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
-
- reg |= FUSB300_EPSET1_ACTEN;
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep));
-}
-
-static void fusb300_set_epmps(struct fusb300 *fusb300,
- struct fusb300_ep_info info)
-{
- u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
-
- reg &= ~FUSB300_EPSET2_MPS_MSK;
- reg |= FUSB300_EPSET2_MPS(info.maxpacket);
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum));
-}
-
-static void fusb300_set_interval(struct fusb300 *fusb300,
- struct fusb300_ep_info info)
-{
- u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
-
- reg &= ~FUSB300_EPSET1_INTERVAL(0x7);
- reg |= FUSB300_EPSET1_INTERVAL(info.interval);
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
-}
-
-static void fusb300_set_bwnum(struct fusb300 *fusb300,
- struct fusb300_ep_info info)
-{
- u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
-
- reg &= ~FUSB300_EPSET1_BWNUM(0x3);
- reg |= FUSB300_EPSET1_BWNUM(info.bw_num);
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum));
-}
-
-static void set_ep_reg(struct fusb300 *fusb300,
- struct fusb300_ep_info info)
-{
- fusb300_set_eptype(fusb300, info);
- fusb300_set_epdir(fusb300, info);
- fusb300_set_epmps(fusb300, info);
-
- if (info.interval)
- fusb300_set_interval(fusb300, info);
-
- if (info.bw_num)
- fusb300_set_bwnum(fusb300, info);
-
- fusb300_set_ep_active(fusb300, info.epnum);
-}
-
-static int config_ep(struct fusb300_ep *ep,
- const struct usb_endpoint_descriptor *desc)
-{
- struct fusb300 *fusb300 = ep->fusb300;
- struct fusb300_ep_info info;
-
- ep->ep.desc = desc;
-
- info.interval = 0;
- info.addrofs = 0;
- info.bw_num = 0;
-
- info.type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
- info.dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
- info.maxpacket = usb_endpoint_maxp(desc);
- info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
-
- if ((info.type == USB_ENDPOINT_XFER_INT) ||
- (info.type == USB_ENDPOINT_XFER_ISOC)) {
- info.interval = desc->bInterval;
- if (info.type == USB_ENDPOINT_XFER_ISOC)
- info.bw_num = usb_endpoint_maxp_mult(desc);
- }
-
- ep_fifo_setting(fusb300, info);
-
- set_ep_reg(fusb300, info);
-
- fusb300_ep_setting(ep, info);
-
- fusb300->ep[info.epnum] = ep;
-
- return 0;
-}
-
-static int fusb300_enable(struct usb_ep *_ep,
- const struct usb_endpoint_descriptor *desc)
-{
- struct fusb300_ep *ep;
-
- ep = container_of(_ep, struct fusb300_ep, ep);
-
- if (ep->fusb300->reenum) {
- ep->fusb300->fifo_entry_num = 0;
- ep->fusb300->addrofs = 0;
- ep->fusb300->reenum = 0;
- }
-
- return config_ep(ep, desc);
-}
-
-static int fusb300_disable(struct usb_ep *_ep)
-{
- struct fusb300_ep *ep;
- struct fusb300_request *req;
- unsigned long flags;
-
- ep = container_of(_ep, struct fusb300_ep, ep);
-
- BUG_ON(!ep);
-
- while (!list_empty(&ep->queue)) {
- req = list_entry(ep->queue.next, struct fusb300_request, queue);
- spin_lock_irqsave(&ep->fusb300->lock, flags);
- done(ep, req, -ECONNRESET);
- spin_unlock_irqrestore(&ep->fusb300->lock, flags);
- }
-
- return fusb300_ep_release(ep);
-}
-
-static struct usb_request *fusb300_alloc_request(struct usb_ep *_ep,
- gfp_t gfp_flags)
-{
- struct fusb300_request *req;
-
- req = kzalloc(sizeof(struct fusb300_request), gfp_flags);
- if (!req)
- return NULL;
- INIT_LIST_HEAD(&req->queue);
-
- return &req->req;
-}
-
-static void fusb300_free_request(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct fusb300_request *req;
-
- req = container_of(_req, struct fusb300_request, req);
- kfree(req);
-}
-
-static int enable_fifo_int(struct fusb300_ep *ep)
-{
- struct fusb300 *fusb300 = ep->fusb300;
-
- if (ep->epnum) {
- fusb300_enable_bit(fusb300, FUSB300_OFFSET_IGER0,
- FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum));
- } else {
- pr_err("can't enable_fifo_int ep0\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int disable_fifo_int(struct fusb300_ep *ep)
-{
- struct fusb300 *fusb300 = ep->fusb300;
-
- if (ep->epnum) {
- fusb300_disable_bit(fusb300, FUSB300_OFFSET_IGER0,
- FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum));
- } else {
- pr_err("can't disable_fifo_int ep0\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void fusb300_set_cxlen(struct fusb300 *fusb300, u32 length)
-{
- u32 reg;
-
- reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR);
- reg &= ~FUSB300_CSR_LEN_MSK;
- reg |= FUSB300_CSR_LEN(length);
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_CSR);
-}
-
-/* write data to cx fifo */
-static void fusb300_wrcxf(struct fusb300_ep *ep,
- struct fusb300_request *req)
-{
- int i = 0;
- u8 *tmp;
- u32 data;
- struct fusb300 *fusb300 = ep->fusb300;
- u32 length = req->req.length - req->req.actual;
-
- tmp = req->req.buf + req->req.actual;
-
- if (length > SS_CTL_MAX_PACKET_SIZE) {
- fusb300_set_cxlen(fusb300, SS_CTL_MAX_PACKET_SIZE);
- for (i = (SS_CTL_MAX_PACKET_SIZE >> 2); i > 0; i--) {
- data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 |
- *(tmp + 3) << 24;
- iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
- tmp += 4;
- }
- req->req.actual += SS_CTL_MAX_PACKET_SIZE;
- } else { /* length is less than max packet size */
- fusb300_set_cxlen(fusb300, length);
- for (i = length >> 2; i > 0; i--) {
- data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 |
- *(tmp + 3) << 24;
- printk(KERN_DEBUG " 0x%x\n", data);
- iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
- tmp = tmp + 4;
- }
- switch (length % 4) {
- case 1:
- data = *tmp;
- printk(KERN_DEBUG " 0x%x\n", data);
- iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
- break;
- case 2:
- data = *tmp | *(tmp + 1) << 8;
- printk(KERN_DEBUG " 0x%x\n", data);
- iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
- break;
- case 3:
- data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16;
- printk(KERN_DEBUG " 0x%x\n", data);
- iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT);
- break;
- default:
- break;
- }
- req->req.actual += length;
- }
-}
-
-static void fusb300_set_epnstall(struct fusb300 *fusb300, u8 ep)
-{
- fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep),
- FUSB300_EPSET0_STL);
-}
-
-static void fusb300_clear_epnstall(struct fusb300 *fusb300, u8 ep)
-{
- u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep));
-
- if (reg & FUSB300_EPSET0_STL) {
- printk(KERN_DEBUG "EP%d stall... Clear!!\n", ep);
- reg |= FUSB300_EPSET0_STL_CLR;
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET0(ep));
- }
-}
-
-static void ep0_queue(struct fusb300_ep *ep, struct fusb300_request *req)
-{
- if (ep->fusb300->ep0_dir) { /* if IN */
- if (req->req.length) {
- fusb300_wrcxf(ep, req);
- } else
- printk(KERN_DEBUG "%s : req->req.length = 0x%x\n",
- __func__, req->req.length);
- if ((req->req.length == req->req.actual) ||
- (req->req.actual < ep->ep.maxpacket))
- done(ep, req, 0);
- } else { /* OUT */
- if (!req->req.length)
- done(ep, req, 0);
- else
- fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER1,
- FUSB300_IGER1_CX_OUT_INT);
- }
-}
-
-static int fusb300_queue(struct usb_ep *_ep, struct usb_request *_req,
- gfp_t gfp_flags)
-{
- struct fusb300_ep *ep;
- struct fusb300_request *req;
- unsigned long flags;
- int request = 0;
-
- ep = container_of(_ep, struct fusb300_ep, ep);
- req = container_of(_req, struct fusb300_request, req);
-
- if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
-
- spin_lock_irqsave(&ep->fusb300->lock, flags);
-
- if (list_empty(&ep->queue))
- request = 1;
-
- list_add_tail(&req->queue, &ep->queue);
-
- req->req.actual = 0;
- req->req.status = -EINPROGRESS;
-
- if (ep->ep.desc == NULL) /* ep0 */
- ep0_queue(ep, req);
- else if (request && !ep->stall)
- enable_fifo_int(ep);
-
- spin_unlock_irqrestore(&ep->fusb300->lock, flags);
-
- return 0;
-}
-
-static int fusb300_dequeue(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct fusb300_ep *ep;
- struct fusb300_request *req;
- unsigned long flags;
-
- ep = container_of(_ep, struct fusb300_ep, ep);
- req = container_of(_req, struct fusb300_request, req);
-
- spin_lock_irqsave(&ep->fusb300->lock, flags);
- if (!list_empty(&ep->queue))
- done(ep, req, -ECONNRESET);
- spin_unlock_irqrestore(&ep->fusb300->lock, flags);
-
- return 0;
-}
-
-static int fusb300_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedge)
-{
- struct fusb300_ep *ep;
- struct fusb300 *fusb300;
- unsigned long flags;
- int ret = 0;
-
- ep = container_of(_ep, struct fusb300_ep, ep);
-
- fusb300 = ep->fusb300;
-
- spin_lock_irqsave(&ep->fusb300->lock, flags);
-
- if (!list_empty(&ep->queue)) {
- ret = -EAGAIN;
- goto out;
- }
-
- if (value) {
- fusb300_set_epnstall(fusb300, ep->epnum);
- ep->stall = 1;
- if (wedge)
- ep->wedged = 1;
- } else {
- fusb300_clear_epnstall(fusb300, ep->epnum);
- ep->stall = 0;
- ep->wedged = 0;
- }
-
-out:
- spin_unlock_irqrestore(&ep->fusb300->lock, flags);
- return ret;
-}
-
-static int fusb300_set_halt(struct usb_ep *_ep, int value)
-{
- return fusb300_set_halt_and_wedge(_ep, value, 0);
-}
-
-static int fusb300_set_wedge(struct usb_ep *_ep)
-{
- return fusb300_set_halt_and_wedge(_ep, 1, 1);
-}
-
-static void fusb300_fifo_flush(struct usb_ep *_ep)
-{
-}
-
-static const struct usb_ep_ops fusb300_ep_ops = {
- .enable = fusb300_enable,
- .disable = fusb300_disable,
-
- .alloc_request = fusb300_alloc_request,
- .free_request = fusb300_free_request,
-
- .queue = fusb300_queue,
- .dequeue = fusb300_dequeue,
-
- .set_halt = fusb300_set_halt,
- .fifo_flush = fusb300_fifo_flush,
- .set_wedge = fusb300_set_wedge,
-};
-
-/*****************************************************************************/
-static void fusb300_clear_int(struct fusb300 *fusb300, u32 offset,
- u32 value)
-{
- iowrite32(value, fusb300->reg + offset);
-}
-
-static void fusb300_reset(void)
-{
-}
-
-static void fusb300_set_cxstall(struct fusb300 *fusb300)
-{
- fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR,
- FUSB300_CSR_STL);
-}
-
-static void fusb300_set_cxdone(struct fusb300 *fusb300)
-{
- fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR,
- FUSB300_CSR_DONE);
-}
-
-/* read data from cx fifo */
-static void fusb300_rdcxf(struct fusb300 *fusb300,
- u8 *buffer, u32 length)
-{
- int i = 0;
- u8 *tmp;
- u32 data;
-
- tmp = buffer;
-
- for (i = (length >> 2); i > 0; i--) {
- data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
- printk(KERN_DEBUG " 0x%x\n", data);
- *tmp = data & 0xFF;
- *(tmp + 1) = (data >> 8) & 0xFF;
- *(tmp + 2) = (data >> 16) & 0xFF;
- *(tmp + 3) = (data >> 24) & 0xFF;
- tmp = tmp + 4;
- }
-
- switch (length % 4) {
- case 1:
- data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
- printk(KERN_DEBUG " 0x%x\n", data);
- *tmp = data & 0xFF;
- break;
- case 2:
- data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
- printk(KERN_DEBUG " 0x%x\n", data);
- *tmp = data & 0xFF;
- *(tmp + 1) = (data >> 8) & 0xFF;
- break;
- case 3:
- data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT);
- printk(KERN_DEBUG " 0x%x\n", data);
- *tmp = data & 0xFF;
- *(tmp + 1) = (data >> 8) & 0xFF;
- *(tmp + 2) = (data >> 16) & 0xFF;
- break;
- default:
- break;
- }
-}
-
-static void fusb300_rdfifo(struct fusb300_ep *ep,
- struct fusb300_request *req,
- u32 length)
-{
- int i = 0;
- u8 *tmp;
- u32 data, reg;
- struct fusb300 *fusb300 = ep->fusb300;
-
- tmp = req->req.buf + req->req.actual;
- req->req.actual += length;
-
- if (req->req.actual > req->req.length)
- printk(KERN_DEBUG "req->req.actual > req->req.length\n");
-
- for (i = (length >> 2); i > 0; i--) {
- data = ioread32(fusb300->reg +
- FUSB300_OFFSET_EPPORT(ep->epnum));
- *tmp = data & 0xFF;
- *(tmp + 1) = (data >> 8) & 0xFF;
- *(tmp + 2) = (data >> 16) & 0xFF;
- *(tmp + 3) = (data >> 24) & 0xFF;
- tmp = tmp + 4;
- }
-
- switch (length % 4) {
- case 1:
- data = ioread32(fusb300->reg +
- FUSB300_OFFSET_EPPORT(ep->epnum));
- *tmp = data & 0xFF;
- break;
- case 2:
- data = ioread32(fusb300->reg +
- FUSB300_OFFSET_EPPORT(ep->epnum));
- *tmp = data & 0xFF;
- *(tmp + 1) = (data >> 8) & 0xFF;
- break;
- case 3:
- data = ioread32(fusb300->reg +
- FUSB300_OFFSET_EPPORT(ep->epnum));
- *tmp = data & 0xFF;
- *(tmp + 1) = (data >> 8) & 0xFF;
- *(tmp + 2) = (data >> 16) & 0xFF;
- break;
- default:
- break;
- }
-
- do {
- reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1);
- reg &= FUSB300_IGR1_SYNF0_EMPTY_INT;
- if (i)
- printk(KERN_INFO "sync fifo is not empty!\n");
- i++;
- } while (!reg);
-}
-
-static u8 fusb300_get_epnstall(struct fusb300 *fusb300, u8 ep)
-{
- u8 value;
- u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep));
-
- value = reg & FUSB300_EPSET0_STL;
-
- return value;
-}
-
-static u8 fusb300_get_cxstall(struct fusb300 *fusb300)
-{
- u8 value;
- u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR);
-
- value = (reg & FUSB300_CSR_STL) >> 1;
-
- return value;
-}
-
-static void request_error(struct fusb300 *fusb300)
-{
- fusb300_set_cxstall(fusb300);
- printk(KERN_DEBUG "request error!!\n");
-}
-
-static void get_status(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
-__releases(fusb300->lock)
-__acquires(fusb300->lock)
-{
- u8 ep;
- u16 status = 0;
- u16 w_index = ctrl->wIndex;
-
- switch (ctrl->bRequestType & USB_RECIP_MASK) {
- case USB_RECIP_DEVICE:
- status = 1 << USB_DEVICE_SELF_POWERED;
- break;
- case USB_RECIP_INTERFACE:
- status = 0;
- break;
- case USB_RECIP_ENDPOINT:
- ep = w_index & USB_ENDPOINT_NUMBER_MASK;
- if (ep) {
- if (fusb300_get_epnstall(fusb300, ep))
- status = 1 << USB_ENDPOINT_HALT;
- } else {
- if (fusb300_get_cxstall(fusb300))
- status = 0;
- }
- break;
-
- default:
- request_error(fusb300);
- return; /* exit */
- }
-
- fusb300->ep0_data = cpu_to_le16(status);
- fusb300->ep0_req->buf = &fusb300->ep0_data;
- fusb300->ep0_req->length = 2;
-
- spin_unlock(&fusb300->lock);
- fusb300_queue(fusb300->gadget.ep0, fusb300->ep0_req, GFP_KERNEL);
- spin_lock(&fusb300->lock);
-}
-
-static void set_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
-{
- u8 ep;
-
- switch (ctrl->bRequestType & USB_RECIP_MASK) {
- case USB_RECIP_DEVICE:
- fusb300_set_cxdone(fusb300);
- break;
- case USB_RECIP_INTERFACE:
- fusb300_set_cxdone(fusb300);
- break;
- case USB_RECIP_ENDPOINT: {
- u16 w_index = le16_to_cpu(ctrl->wIndex);
-
- ep = w_index & USB_ENDPOINT_NUMBER_MASK;
- if (ep)
- fusb300_set_epnstall(fusb300, ep);
- else
- fusb300_set_cxstall(fusb300);
- fusb300_set_cxdone(fusb300);
- }
- break;
- default:
- request_error(fusb300);
- break;
- }
-}
-
-static void fusb300_clear_seqnum(struct fusb300 *fusb300, u8 ep)
-{
- fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep),
- FUSB300_EPSET0_CLRSEQNUM);
-}
-
-static void clear_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
-{
- struct fusb300_ep *ep =
- fusb300->ep[ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK];
-
- switch (ctrl->bRequestType & USB_RECIP_MASK) {
- case USB_RECIP_DEVICE:
- fusb300_set_cxdone(fusb300);
- break;
- case USB_RECIP_INTERFACE:
- fusb300_set_cxdone(fusb300);
- break;
- case USB_RECIP_ENDPOINT:
- if (ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK) {
- if (ep->wedged) {
- fusb300_set_cxdone(fusb300);
- break;
- }
- if (ep->stall) {
- ep->stall = 0;
- fusb300_clear_seqnum(fusb300, ep->epnum);
- fusb300_clear_epnstall(fusb300, ep->epnum);
- if (!list_empty(&ep->queue))
- enable_fifo_int(ep);
- }
- }
- fusb300_set_cxdone(fusb300);
- break;
- default:
- request_error(fusb300);
- break;
- }
-}
-
-static void fusb300_set_dev_addr(struct fusb300 *fusb300, u16 addr)
-{
- u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_DAR);
-
- reg &= ~FUSB300_DAR_DRVADDR_MSK;
- reg |= FUSB300_DAR_DRVADDR(addr);
-
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_DAR);
-}
-
-static void set_address(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
-{
- if (ctrl->wValue >= 0x0100)
- request_error(fusb300);
- else {
- fusb300_set_dev_addr(fusb300, ctrl->wValue);
- fusb300_set_cxdone(fusb300);
- }
-}
-
-#define UVC_COPY_DESCRIPTORS(mem, src) \
- do { \
- const struct usb_descriptor_header * const *__src; \
- for (__src = src; *__src; ++__src) { \
- memcpy(mem, *__src, (*__src)->bLength); \
- mem += (*__src)->bLength; \
- } \
- } while (0)
-
-static int setup_packet(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
-{
- u8 *p = (u8 *)ctrl;
- u8 ret = 0;
- u8 i = 0;
-
- fusb300_rdcxf(fusb300, p, 8);
- fusb300->ep0_dir = ctrl->bRequestType & USB_DIR_IN;
- fusb300->ep0_length = ctrl->wLength;
-
- /* check request */
- if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
- switch (ctrl->bRequest) {
- case USB_REQ_GET_STATUS:
- get_status(fusb300, ctrl);
- break;
- case USB_REQ_CLEAR_FEATURE:
- clear_feature(fusb300, ctrl);
- break;
- case USB_REQ_SET_FEATURE:
- set_feature(fusb300, ctrl);
- break;
- case USB_REQ_SET_ADDRESS:
- set_address(fusb300, ctrl);
- break;
- case USB_REQ_SET_CONFIGURATION:
- fusb300_enable_bit(fusb300, FUSB300_OFFSET_DAR,
- FUSB300_DAR_SETCONFG);
- /* clear sequence number */
- for (i = 1; i <= FUSB300_MAX_NUM_EP; i++)
- fusb300_clear_seqnum(fusb300, i);
- fusb300->reenum = 1;
- ret = 1;
- break;
- default:
- ret = 1;
- break;
- }
- } else
- ret = 1;
-
- return ret;
-}
-
-static void done(struct fusb300_ep *ep, struct fusb300_request *req,
- int status)
-{
- list_del_init(&req->queue);
-
- /* don't modify queue heads during completion callback */
- if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN)
- req->req.status = -ESHUTDOWN;
- else
- req->req.status = status;
-
- spin_unlock(&ep->fusb300->lock);
- usb_gadget_giveback_request(&ep->ep, &req->req);
- spin_lock(&ep->fusb300->lock);
-
- if (ep->epnum) {
- disable_fifo_int(ep);
- if (!list_empty(&ep->queue))
- enable_fifo_int(ep);
- } else
- fusb300_set_cxdone(ep->fusb300);
-}
-
-static void fusb300_fill_idma_prdtbl(struct fusb300_ep *ep, dma_addr_t d,
- u32 len)
-{
- u32 value;
- u32 reg;
-
- /* wait SW owner */
- do {
- reg = ioread32(ep->fusb300->reg +
- FUSB300_OFFSET_EPPRD_W0(ep->epnum));
- reg &= FUSB300_EPPRD0_H;
- } while (reg);
-
- iowrite32(d, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W1(ep->epnum));
-
- value = FUSB300_EPPRD0_BTC(len) | FUSB300_EPPRD0_H |
- FUSB300_EPPRD0_F | FUSB300_EPPRD0_L | FUSB300_EPPRD0_I;
- iowrite32(value, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W0(ep->epnum));
-
- iowrite32(0x0, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W2(ep->epnum));
-
- fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_EPPRDRDY,
- FUSB300_EPPRDR_EP_PRD_RDY(ep->epnum));
-}
-
-static void fusb300_wait_idma_finished(struct fusb300_ep *ep)
-{
- u32 reg;
-
- do {
- reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR1);
- if ((reg & FUSB300_IGR1_VBUS_CHG_INT) ||
- (reg & FUSB300_IGR1_WARM_RST_INT) ||
- (reg & FUSB300_IGR1_HOT_RST_INT) ||
- (reg & FUSB300_IGR1_USBRST_INT)
- )
- goto IDMA_RESET;
- reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR0);
- reg &= FUSB300_IGR0_EPn_PRD_INT(ep->epnum);
- } while (!reg);
-
- fusb300_clear_int(ep->fusb300, FUSB300_OFFSET_IGR0,
- FUSB300_IGR0_EPn_PRD_INT(ep->epnum));
- return;
-
-IDMA_RESET:
- reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGER0);
- reg &= ~FUSB300_IGER0_EEPn_PRD_INT(ep->epnum);
- iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_IGER0);
-}
-
-static void fusb300_set_idma(struct fusb300_ep *ep,
- struct fusb300_request *req)
-{
- int ret;
-
- ret = usb_gadget_map_request(&ep->fusb300->gadget,
- &req->req, DMA_TO_DEVICE);
- if (ret)
- return;
-
- fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER0,
- FUSB300_IGER0_EEPn_PRD_INT(ep->epnum));
-
- fusb300_fill_idma_prdtbl(ep, req->req.dma, req->req.length);
- /* check idma is done */
- fusb300_wait_idma_finished(ep);
-
- usb_gadget_unmap_request(&ep->fusb300->gadget,
- &req->req, DMA_TO_DEVICE);
-}
-
-static void in_ep_fifo_handler(struct fusb300_ep *ep)
-{
- struct fusb300_request *req = list_entry(ep->queue.next,
- struct fusb300_request, queue);
-
- if (req->req.length)
- fusb300_set_idma(ep, req);
- done(ep, req, 0);
-}
-
-static void out_ep_fifo_handler(struct fusb300_ep *ep)
-{
- struct fusb300 *fusb300 = ep->fusb300;
- struct fusb300_request *req = list_entry(ep->queue.next,
- struct fusb300_request, queue);
- u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum));
- u32 length = reg & FUSB300_FFR_BYCNT;
-
- fusb300_rdfifo(ep, req, length);
-
- /* finish out transfer */
- if ((req->req.length == req->req.actual) || (length < ep->ep.maxpacket))
- done(ep, req, 0);
-}
-
-static void check_device_mode(struct fusb300 *fusb300)
-{
- u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_GCR);
-
- switch (reg & FUSB300_GCR_DEVEN_MSK) {
- case FUSB300_GCR_DEVEN_SS:
- fusb300->gadget.speed = USB_SPEED_SUPER;
- break;
- case FUSB300_GCR_DEVEN_HS:
- fusb300->gadget.speed = USB_SPEED_HIGH;
- break;
- case FUSB300_GCR_DEVEN_FS:
- fusb300->gadget.speed = USB_SPEED_FULL;
- break;
- default:
- fusb300->gadget.speed = USB_SPEED_UNKNOWN;
- break;
- }
- printk(KERN_INFO "dev_mode = %d\n", (reg & FUSB300_GCR_DEVEN_MSK));
-}
-
-
-static void fusb300_ep0out(struct fusb300 *fusb300)
-{
- struct fusb300_ep *ep = fusb300->ep[0];
- u32 reg;
-
- if (!list_empty(&ep->queue)) {
- struct fusb300_request *req;
-
- req = list_first_entry(&ep->queue,
- struct fusb300_request, queue);
- if (req->req.length)
- fusb300_rdcxf(ep->fusb300, req->req.buf,
- req->req.length);
- done(ep, req, 0);
- reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1);
- reg &= ~FUSB300_IGER1_CX_OUT_INT;
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_IGER1);
- } else
- pr_err("%s : empty queue\n", __func__);
-}
-
-static void fusb300_ep0in(struct fusb300 *fusb300)
-{
- struct fusb300_request *req;
- struct fusb300_ep *ep = fusb300->ep[0];
-
- if ((!list_empty(&ep->queue)) && (fusb300->ep0_dir)) {
- req = list_entry(ep->queue.next,
- struct fusb300_request, queue);
- if (req->req.length)
- fusb300_wrcxf(ep, req);
- if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
- done(ep, req, 0);
- } else
- fusb300_set_cxdone(fusb300);
-}
-
-static void fusb300_grp2_handler(void)
-{
-}
-
-static void fusb300_grp3_handler(void)
-{
-}
-
-static void fusb300_grp4_handler(void)
-{
-}
-
-static void fusb300_grp5_handler(void)
-{
-}
-
-static irqreturn_t fusb300_irq(int irq, void *_fusb300)
-{
- struct fusb300 *fusb300 = _fusb300;
- u32 int_grp1 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1);
- u32 int_grp1_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1);
- u32 int_grp0 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR0);
- u32 int_grp0_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER0);
- struct usb_ctrlrequest ctrl;
- u8 in;
- u32 reg;
- int i;
-
- spin_lock(&fusb300->lock);
-
- int_grp1 &= int_grp1_en;
- int_grp0 &= int_grp0_en;
-
- if (int_grp1 & FUSB300_IGR1_WARM_RST_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_WARM_RST_INT);
- printk(KERN_INFO"fusb300_warmreset\n");
- fusb300_reset();
- }
-
- if (int_grp1 & FUSB300_IGR1_HOT_RST_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_HOT_RST_INT);
- printk(KERN_INFO"fusb300_hotreset\n");
- fusb300_reset();
- }
-
- if (int_grp1 & FUSB300_IGR1_USBRST_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_USBRST_INT);
- fusb300_reset();
- }
- /* COMABT_INT has a highest priority */
-
- if (int_grp1 & FUSB300_IGR1_CX_COMABT_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_CX_COMABT_INT);
- printk(KERN_INFO"fusb300_ep0abt\n");
- }
-
- if (int_grp1 & FUSB300_IGR1_VBUS_CHG_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_VBUS_CHG_INT);
- printk(KERN_INFO"fusb300_vbus_change\n");
- }
-
- if (int_grp1 & FUSB300_IGR1_U3_EXIT_FAIL_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_U3_EXIT_FAIL_INT);
- }
-
- if (int_grp1 & FUSB300_IGR1_U2_EXIT_FAIL_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_U2_EXIT_FAIL_INT);
- }
-
- if (int_grp1 & FUSB300_IGR1_U1_EXIT_FAIL_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_U1_EXIT_FAIL_INT);
- }
-
- if (int_grp1 & FUSB300_IGR1_U2_ENTRY_FAIL_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_U2_ENTRY_FAIL_INT);
- }
-
- if (int_grp1 & FUSB300_IGR1_U1_ENTRY_FAIL_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_U1_ENTRY_FAIL_INT);
- }
-
- if (int_grp1 & FUSB300_IGR1_U3_EXIT_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_U3_EXIT_INT);
- printk(KERN_INFO "FUSB300_IGR1_U3_EXIT_INT\n");
- }
-
- if (int_grp1 & FUSB300_IGR1_U2_EXIT_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_U2_EXIT_INT);
- printk(KERN_INFO "FUSB300_IGR1_U2_EXIT_INT\n");
- }
-
- if (int_grp1 & FUSB300_IGR1_U1_EXIT_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_U1_EXIT_INT);
- printk(KERN_INFO "FUSB300_IGR1_U1_EXIT_INT\n");
- }
-
- if (int_grp1 & FUSB300_IGR1_U3_ENTRY_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_U3_ENTRY_INT);
- printk(KERN_INFO "FUSB300_IGR1_U3_ENTRY_INT\n");
- fusb300_enable_bit(fusb300, FUSB300_OFFSET_SSCR1,
- FUSB300_SSCR1_GO_U3_DONE);
- }
-
- if (int_grp1 & FUSB300_IGR1_U2_ENTRY_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_U2_ENTRY_INT);
- printk(KERN_INFO "FUSB300_IGR1_U2_ENTRY_INT\n");
- }
-
- if (int_grp1 & FUSB300_IGR1_U1_ENTRY_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_U1_ENTRY_INT);
- printk(KERN_INFO "FUSB300_IGR1_U1_ENTRY_INT\n");
- }
-
- if (int_grp1 & FUSB300_IGR1_RESM_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_RESM_INT);
- printk(KERN_INFO "fusb300_resume\n");
- }
-
- if (int_grp1 & FUSB300_IGR1_SUSP_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_SUSP_INT);
- printk(KERN_INFO "fusb300_suspend\n");
- }
-
- if (int_grp1 & FUSB300_IGR1_HS_LPM_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_HS_LPM_INT);
- printk(KERN_INFO "fusb300_HS_LPM_INT\n");
- }
-
- if (int_grp1 & FUSB300_IGR1_DEV_MODE_CHG_INT) {
- fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1,
- FUSB300_IGR1_DEV_MODE_CHG_INT);
- check_device_mode(fusb300);
- }
-
- if (int_grp1 & FUSB300_IGR1_CX_COMFAIL_INT) {
- fusb300_set_cxstall(fusb300);
- printk(KERN_INFO "fusb300_ep0fail\n");
- }
-
- if (int_grp1 & FUSB300_IGR1_CX_SETUP_INT) {
- printk(KERN_INFO "fusb300_ep0setup\n");
- if (setup_packet(fusb300, &ctrl)) {
- spin_unlock(&fusb300->lock);
- if (fusb300->driver->setup(&fusb300->gadget, &ctrl) < 0)
- fusb300_set_cxstall(fusb300);
- spin_lock(&fusb300->lock);
- }
- }
-
- if (int_grp1 & FUSB300_IGR1_CX_CMDEND_INT)
- printk(KERN_INFO "fusb300_cmdend\n");
-
-
- if (int_grp1 & FUSB300_IGR1_CX_OUT_INT) {
- printk(KERN_INFO "fusb300_cxout\n");
- fusb300_ep0out(fusb300);
- }
-
- if (int_grp1 & FUSB300_IGR1_CX_IN_INT) {
- printk(KERN_INFO "fusb300_cxin\n");
- fusb300_ep0in(fusb300);
- }
-
- if (int_grp1 & FUSB300_IGR1_INTGRP5)
- fusb300_grp5_handler();
-
- if (int_grp1 & FUSB300_IGR1_INTGRP4)
- fusb300_grp4_handler();
-
- if (int_grp1 & FUSB300_IGR1_INTGRP3)
- fusb300_grp3_handler();
-
- if (int_grp1 & FUSB300_IGR1_INTGRP2)
- fusb300_grp2_handler();
-
- if (int_grp0) {
- for (i = 1; i < FUSB300_MAX_NUM_EP; i++) {
- if (int_grp0 & FUSB300_IGR0_EPn_FIFO_INT(i)) {
- reg = ioread32(fusb300->reg +
- FUSB300_OFFSET_EPSET1(i));
- in = (reg & FUSB300_EPSET1_DIRIN) ? 1 : 0;
- if (in)
- in_ep_fifo_handler(fusb300->ep[i]);
- else
- out_ep_fifo_handler(fusb300->ep[i]);
- }
- }
- }
-
- spin_unlock(&fusb300->lock);
-
- return IRQ_HANDLED;
-}
-
-static void fusb300_set_u2_timeout(struct fusb300 *fusb300,
- u32 time)
-{
- u32 reg;
-
- reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT);
- reg &= ~0xff;
- reg |= FUSB300_SSCR2_U2TIMEOUT(time);
-
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT);
-}
-
-static void fusb300_set_u1_timeout(struct fusb300 *fusb300,
- u32 time)
-{
- u32 reg;
-
- reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT);
- reg &= ~(0xff << 8);
- reg |= FUSB300_SSCR2_U1TIMEOUT(time);
-
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT);
-}
-
-static void init_controller(struct fusb300 *fusb300)
-{
- u32 reg;
- u32 mask = 0;
- u32 val = 0;
-
- /* split on */
- mask = val = FUSB300_AHBBCR_S0_SPLIT_ON | FUSB300_AHBBCR_S1_SPLIT_ON;
- reg = ioread32(fusb300->reg + FUSB300_OFFSET_AHBCR);
- reg &= ~mask;
- reg |= val;
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_AHBCR);
-
- /* enable high-speed LPM */
- mask = val = FUSB300_HSCR_HS_LPM_PERMIT;
- reg = ioread32(fusb300->reg + FUSB300_OFFSET_HSCR);
- reg &= ~mask;
- reg |= val;
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_HSCR);
-
- /*set u1 u2 timer*/
- fusb300_set_u2_timeout(fusb300, 0xff);
- fusb300_set_u1_timeout(fusb300, 0xff);
-
- /* enable all grp1 interrupt */
- iowrite32(0xcfffff9f, fusb300->reg + FUSB300_OFFSET_IGER1);
-}
-/*------------------------------------------------------------------------*/
-static int fusb300_udc_start(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
-{
- struct fusb300 *fusb300 = to_fusb300(g);
-
- /* hook up the driver */
- fusb300->driver = driver;
-
- return 0;
-}
-
-static int fusb300_udc_stop(struct usb_gadget *g)
-{
- struct fusb300 *fusb300 = to_fusb300(g);
-
- init_controller(fusb300);
- fusb300->driver = NULL;
-
- return 0;
-}
-/*--------------------------------------------------------------------------*/
-
-static int fusb300_udc_pullup(struct usb_gadget *_gadget, int is_active)
-{
- return 0;
-}
-
-static const struct usb_gadget_ops fusb300_gadget_ops = {
- .pullup = fusb300_udc_pullup,
- .udc_start = fusb300_udc_start,
- .udc_stop = fusb300_udc_stop,
-};
-
-static void fusb300_remove(struct platform_device *pdev)
-{
- struct fusb300 *fusb300 = platform_get_drvdata(pdev);
- int i;
-
- usb_del_gadget_udc(&fusb300->gadget);
- iounmap(fusb300->reg);
- free_irq(platform_get_irq(pdev, 0), fusb300);
- free_irq(platform_get_irq(pdev, 1), fusb300);
-
- fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
- for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
- kfree(fusb300->ep[i]);
- kfree(fusb300);
-}
-
-static int fusb300_probe(struct platform_device *pdev)
-{
- struct resource *res, *ires, *ires1;
- void __iomem *reg = NULL;
- struct fusb300 *fusb300 = NULL;
- struct fusb300_ep *_ep[FUSB300_MAX_NUM_EP];
- int ret = 0;
- int i;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- pr_err("platform_get_resource error.\n");
- goto clean_up;
- }
-
- ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!ires) {
- ret = -ENODEV;
- dev_err(&pdev->dev,
- "platform_get_resource IORESOURCE_IRQ error.\n");
- goto clean_up;
- }
-
- ires1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
- if (!ires1) {
- ret = -ENODEV;
- dev_err(&pdev->dev,
- "platform_get_resource IORESOURCE_IRQ 1 error.\n");
- goto clean_up;
- }
-
- reg = ioremap(res->start, resource_size(res));
- if (reg == NULL) {
- ret = -ENOMEM;
- pr_err("ioremap error.\n");
- goto clean_up;
- }
-
- /* initialize udc */
- fusb300 = kzalloc(sizeof(struct fusb300), GFP_KERNEL);
- if (fusb300 == NULL) {
- ret = -ENOMEM;
- goto clean_up;
- }
-
- for (i = 0; i < FUSB300_MAX_NUM_EP; i++) {
- _ep[i] = kzalloc(sizeof(struct fusb300_ep), GFP_KERNEL);
- if (_ep[i] == NULL) {
- ret = -ENOMEM;
- goto clean_up;
- }
- fusb300->ep[i] = _ep[i];
- }
-
- spin_lock_init(&fusb300->lock);
-
- platform_set_drvdata(pdev, fusb300);
-
- fusb300->gadget.ops = &fusb300_gadget_ops;
-
- fusb300->gadget.max_speed = USB_SPEED_HIGH;
- fusb300->gadget.name = udc_name;
- fusb300->reg = reg;
-
- ret = request_irq(ires->start, fusb300_irq, IRQF_SHARED,
- udc_name, fusb300);
- if (ret < 0) {
- pr_err("request_irq error (%d)\n", ret);
- goto clean_up;
- }
-
- ret = request_irq(ires1->start, fusb300_irq,
- IRQF_SHARED, udc_name, fusb300);
- if (ret < 0) {
- pr_err("request_irq1 error (%d)\n", ret);
- goto err_request_irq1;
- }
-
- INIT_LIST_HEAD(&fusb300->gadget.ep_list);
-
- for (i = 0; i < FUSB300_MAX_NUM_EP ; i++) {
- struct fusb300_ep *ep = fusb300->ep[i];
-
- if (i != 0) {
- INIT_LIST_HEAD(&fusb300->ep[i]->ep.ep_list);
- list_add_tail(&fusb300->ep[i]->ep.ep_list,
- &fusb300->gadget.ep_list);
- }
- ep->fusb300 = fusb300;
- INIT_LIST_HEAD(&ep->queue);
- ep->ep.name = fusb300_ep_name[i];
- ep->ep.ops = &fusb300_ep_ops;
- usb_ep_set_maxpacket_limit(&ep->ep, HS_BULK_MAX_PACKET_SIZE);
-
- if (i == 0) {
- ep->ep.caps.type_control = true;
- } else {
- ep->ep.caps.type_iso = true;
- ep->ep.caps.type_bulk = true;
- ep->ep.caps.type_int = true;
- }
-
- ep->ep.caps.dir_in = true;
- ep->ep.caps.dir_out = true;
- }
- usb_ep_set_maxpacket_limit(&fusb300->ep[0]->ep, HS_CTL_MAX_PACKET_SIZE);
- fusb300->ep[0]->epnum = 0;
- fusb300->gadget.ep0 = &fusb300->ep[0]->ep;
- INIT_LIST_HEAD(&fusb300->gadget.ep0->ep_list);
-
- fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep,
- GFP_KERNEL);
- if (fusb300->ep0_req == NULL) {
- ret = -ENOMEM;
- goto err_alloc_request;
- }
-
- init_controller(fusb300);
- ret = usb_add_gadget_udc(&pdev->dev, &fusb300->gadget);
- if (ret)
- goto err_add_udc;
-
- dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
-
- return 0;
-
-err_add_udc:
- fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
-
-err_alloc_request:
- free_irq(ires1->start, fusb300);
-
-err_request_irq1:
- free_irq(ires->start, fusb300);
-
-clean_up:
- if (fusb300) {
- if (fusb300->ep0_req)
- fusb300_free_request(&fusb300->ep[0]->ep,
- fusb300->ep0_req);
- for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
- kfree(fusb300->ep[i]);
- kfree(fusb300);
- }
- if (reg)
- iounmap(reg);
-
- return ret;
-}
-
-static struct platform_driver fusb300_driver = {
- .probe = fusb300_probe,
- .remove = fusb300_remove,
- .driver = {
- .name = udc_name,
- },
-};
-
-module_platform_driver(fusb300_driver);
diff --git a/drivers/usb/gadget/udc/fusb300_udc.h b/drivers/usb/gadget/udc/fusb300_udc.h
deleted file mode 100644
index eb3d6d379ba7..000000000000
--- a/drivers/usb/gadget/udc/fusb300_udc.h
+++ /dev/null
@@ -1,675 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Fusb300 UDC (USB gadget)
- *
- * Copyright (C) 2010 Faraday Technology Corp.
- *
- * Author : Yuan-hsin Chen <yhchen@faraday-tech.com>
- */
-
-
-#ifndef __FUSB300_UDC_H__
-#define __FUSB300_UDC_H__
-
-#include <linux/kernel.h>
-
-#define FUSB300_OFFSET_GCR 0x00
-#define FUSB300_OFFSET_GTM 0x04
-#define FUSB300_OFFSET_DAR 0x08
-#define FUSB300_OFFSET_CSR 0x0C
-#define FUSB300_OFFSET_CXPORT 0x10
-#define FUSB300_OFFSET_EPSET0(n) (0x20 + (n - 1) * 0x30)
-#define FUSB300_OFFSET_EPSET1(n) (0x24 + (n - 1) * 0x30)
-#define FUSB300_OFFSET_EPSET2(n) (0x28 + (n - 1) * 0x30)
-#define FUSB300_OFFSET_EPFFR(n) (0x2c + (n - 1) * 0x30)
-#define FUSB300_OFFSET_EPSTRID(n) (0x40 + (n - 1) * 0x30)
-#define FUSB300_OFFSET_HSPTM 0x300
-#define FUSB300_OFFSET_HSCR 0x304
-#define FUSB300_OFFSET_SSCR0 0x308
-#define FUSB300_OFFSET_SSCR1 0x30C
-#define FUSB300_OFFSET_TT 0x310
-#define FUSB300_OFFSET_DEVNOTF 0x314
-#define FUSB300_OFFSET_DNC1 0x318
-#define FUSB300_OFFSET_CS 0x31C
-#define FUSB300_OFFSET_SOF 0x324
-#define FUSB300_OFFSET_EFCS 0x328
-#define FUSB300_OFFSET_IGR0 0x400
-#define FUSB300_OFFSET_IGR1 0x404
-#define FUSB300_OFFSET_IGR2 0x408
-#define FUSB300_OFFSET_IGR3 0x40C
-#define FUSB300_OFFSET_IGR4 0x410
-#define FUSB300_OFFSET_IGR5 0x414
-#define FUSB300_OFFSET_IGER0 0x420
-#define FUSB300_OFFSET_IGER1 0x424
-#define FUSB300_OFFSET_IGER2 0x428
-#define FUSB300_OFFSET_IGER3 0x42C
-#define FUSB300_OFFSET_IGER4 0x430
-#define FUSB300_OFFSET_IGER5 0x434
-#define FUSB300_OFFSET_DMAHMER 0x500
-#define FUSB300_OFFSET_EPPRDRDY 0x504
-#define FUSB300_OFFSET_DMAEPMR 0x508
-#define FUSB300_OFFSET_DMAENR 0x50C
-#define FUSB300_OFFSET_DMAAPR 0x510
-#define FUSB300_OFFSET_AHBCR 0x514
-#define FUSB300_OFFSET_EPPRD_W0(n) (0x520 + (n - 1) * 0x10)
-#define FUSB300_OFFSET_EPPRD_W1(n) (0x524 + (n - 1) * 0x10)
-#define FUSB300_OFFSET_EPPRD_W2(n) (0x528 + (n - 1) * 0x10)
-#define FUSB300_OFFSET_EPRD_PTR(n) (0x52C + (n - 1) * 0x10)
-#define FUSB300_OFFSET_BUFDBG_START 0x800
-#define FUSB300_OFFSET_BUFDBG_END 0xBFC
-#define FUSB300_OFFSET_EPPORT(n) (0x1010 + (n - 1) * 0x10)
-
-/*
- * * Global Control Register (offset = 000H)
- * */
-#define FUSB300_GCR_SF_RST (1 << 8)
-#define FUSB300_GCR_VBUS_STATUS (1 << 7)
-#define FUSB300_GCR_FORCE_HS_SUSP (1 << 6)
-#define FUSB300_GCR_SYNC_FIFO1_CLR (1 << 5)
-#define FUSB300_GCR_SYNC_FIFO0_CLR (1 << 4)
-#define FUSB300_GCR_FIFOCLR (1 << 3)
-#define FUSB300_GCR_GLINTEN (1 << 2)
-#define FUSB300_GCR_DEVEN_FS 0x3
-#define FUSB300_GCR_DEVEN_HS 0x2
-#define FUSB300_GCR_DEVEN_SS 0x1
-#define FUSB300_GCR_DEVDIS 0x0
-#define FUSB300_GCR_DEVEN_MSK 0x3
-
-
-/*
- * *Global Test Mode (offset = 004H)
- * */
-#define FUSB300_GTM_TST_DIS_SOFGEN (1 << 16)
-#define FUSB300_GTM_TST_CUR_EP_ENTRY(n) ((n & 0xF) << 12)
-#define FUSB300_GTM_TST_EP_ENTRY(n) ((n & 0xF) << 8)
-#define FUSB300_GTM_TST_EP_NUM(n) ((n & 0xF) << 4)
-#define FUSB300_GTM_TST_FIFO_DEG (1 << 1)
-#define FUSB300_GTM_TSTMODE (1 << 0)
-
-/*
- * * Device Address Register (offset = 008H)
- * */
-#define FUSB300_DAR_SETCONFG (1 << 7)
-#define FUSB300_DAR_DRVADDR(x) (x & 0x7F)
-#define FUSB300_DAR_DRVADDR_MSK 0x7F
-
-/*
- * *Control Transfer Configuration and Status Register
- * (CX_Config_Status, offset = 00CH)
- * */
-#define FUSB300_CSR_LEN(x) ((x & 0xFFFF) << 8)
-#define FUSB300_CSR_LEN_MSK (0xFFFF << 8)
-#define FUSB300_CSR_EMP (1 << 4)
-#define FUSB300_CSR_FUL (1 << 3)
-#define FUSB300_CSR_CLR (1 << 2)
-#define FUSB300_CSR_STL (1 << 1)
-#define FUSB300_CSR_DONE (1 << 0)
-
-/*
- * * EPn Setting 0 (EPn_SET0, offset = 020H+(n-1)*30H, n=1~15 )
- * */
-#define FUSB300_EPSET0_STL_CLR (1 << 3)
-#define FUSB300_EPSET0_CLRSEQNUM (1 << 2)
-#define FUSB300_EPSET0_STL (1 << 0)
-
-/*
- * * EPn Setting 1 (EPn_SET1, offset = 024H+(n-1)*30H, n=1~15)
- * */
-#define FUSB300_EPSET1_START_ENTRY(x) ((x & 0xFF) << 24)
-#define FUSB300_EPSET1_START_ENTRY_MSK (0xFF << 24)
-#define FUSB300_EPSET1_FIFOENTRY(x) ((x & 0x1F) << 12)
-#define FUSB300_EPSET1_FIFOENTRY_MSK (0x1f << 12)
-#define FUSB300_EPSET1_INTERVAL(x) ((x & 0x7) << 6)
-#define FUSB300_EPSET1_BWNUM(x) ((x & 0x3) << 4)
-#define FUSB300_EPSET1_TYPEISO (1 << 2)
-#define FUSB300_EPSET1_TYPEBLK (2 << 2)
-#define FUSB300_EPSET1_TYPEINT (3 << 2)
-#define FUSB300_EPSET1_TYPE(x) ((x & 0x3) << 2)
-#define FUSB300_EPSET1_TYPE_MSK (0x3 << 2)
-#define FUSB300_EPSET1_DIROUT (0 << 1)
-#define FUSB300_EPSET1_DIRIN (1 << 1)
-#define FUSB300_EPSET1_DIR(x) ((x & 0x1) << 1)
-#define FUSB300_EPSET1_DIRIN (1 << 1)
-#define FUSB300_EPSET1_DIR_MSK ((0x1) << 1)
-#define FUSB300_EPSET1_ACTDIS 0
-#define FUSB300_EPSET1_ACTEN 1
-
-/*
- * *EPn Setting 2 (EPn_SET2, offset = 028H+(n-1)*30H, n=1~15)
- * */
-#define FUSB300_EPSET2_ADDROFS(x) ((x & 0x7FFF) << 16)
-#define FUSB300_EPSET2_ADDROFS_MSK (0x7fff << 16)
-#define FUSB300_EPSET2_MPS(x) (x & 0x7FF)
-#define FUSB300_EPSET2_MPS_MSK 0x7FF
-
-/*
- * * EPn FIFO Register (offset = 2cH+(n-1)*30H)
- * */
-#define FUSB300_FFR_RST (1 << 31)
-#define FUSB300_FF_FUL (1 << 30)
-#define FUSB300_FF_EMPTY (1 << 29)
-#define FUSB300_FFR_BYCNT 0x1FFFF
-
-/*
- * *EPn Stream ID (EPn_STR_ID, offset = 040H+(n-1)*30H, n=1~15)
- * */
-#define FUSB300_STRID_STREN (1 << 16)
-#define FUSB300_STRID_STRID(x) (x & 0xFFFF)
-
-/*
- * *HS PHY Test Mode (offset = 300H)
- * */
-#define FUSB300_HSPTM_TSTPKDONE (1 << 4)
-#define FUSB300_HSPTM_TSTPKT (1 << 3)
-#define FUSB300_HSPTM_TSTSET0NAK (1 << 2)
-#define FUSB300_HSPTM_TSTKSTA (1 << 1)
-#define FUSB300_HSPTM_TSTJSTA (1 << 0)
-
-/*
- * *HS Control Register (offset = 304H)
- * */
-#define FUSB300_HSCR_HS_LPM_PERMIT (1 << 8)
-#define FUSB300_HSCR_HS_LPM_RMWKUP (1 << 7)
-#define FUSB300_HSCR_CAP_LPM_RMWKUP (1 << 6)
-#define FUSB300_HSCR_HS_GOSUSP (1 << 5)
-#define FUSB300_HSCR_HS_GORMWKU (1 << 4)
-#define FUSB300_HSCR_CAP_RMWKUP (1 << 3)
-#define FUSB300_HSCR_IDLECNT_0MS 0
-#define FUSB300_HSCR_IDLECNT_1MS 1
-#define FUSB300_HSCR_IDLECNT_2MS 2
-#define FUSB300_HSCR_IDLECNT_3MS 3
-#define FUSB300_HSCR_IDLECNT_4MS 4
-#define FUSB300_HSCR_IDLECNT_5MS 5
-#define FUSB300_HSCR_IDLECNT_6MS 6
-#define FUSB300_HSCR_IDLECNT_7MS 7
-
-/*
- * * SS Controller Register 0 (offset = 308H)
- * */
-#define FUSB300_SSCR0_MAX_INTERVAL(x) ((x & 0x7) << 4)
-#define FUSB300_SSCR0_U2_FUN_EN (1 << 1)
-#define FUSB300_SSCR0_U1_FUN_EN (1 << 0)
-
-/*
- * * SS Controller Register 1 (offset = 30CH)
- * */
-#define FUSB300_SSCR1_GO_U3_DONE (1 << 8)
-#define FUSB300_SSCR1_TXDEEMPH_LEVEL (1 << 7)
-#define FUSB300_SSCR1_DIS_SCRMB (1 << 6)
-#define FUSB300_SSCR1_FORCE_RECOVERY (1 << 5)
-#define FUSB300_SSCR1_U3_WAKEUP_EN (1 << 4)
-#define FUSB300_SSCR1_U2_EXIT_EN (1 << 3)
-#define FUSB300_SSCR1_U1_EXIT_EN (1 << 2)
-#define FUSB300_SSCR1_U2_ENTRY_EN (1 << 1)
-#define FUSB300_SSCR1_U1_ENTRY_EN (1 << 0)
-
-/*
- * *SS Controller Register 2 (offset = 310H)
- * */
-#define FUSB300_SSCR2_SS_TX_SWING (1 << 25)
-#define FUSB300_SSCR2_FORCE_LINKPM_ACCEPT (1 << 24)
-#define FUSB300_SSCR2_U2_INACT_TIMEOUT(x) ((x & 0xFF) << 16)
-#define FUSB300_SSCR2_U1TIMEOUT(x) ((x & 0xFF) << 8)
-#define FUSB300_SSCR2_U2TIMEOUT(x) (x & 0xFF)
-
-/*
- * *SS Device Notification Control (DEV_NOTF, offset = 314H)
- * */
-#define FUSB300_DEVNOTF_CONTEXT0(x) ((x & 0xFFFFFF) << 8)
-#define FUSB300_DEVNOTF_TYPE_DIS 0
-#define FUSB300_DEVNOTF_TYPE_FUNCWAKE 1
-#define FUSB300_DEVNOTF_TYPE_LTM 2
-#define FUSB300_DEVNOTF_TYPE_BUSINT_ADJMSG 3
-
-/*
- * *BFM Arbiter Priority Register (BFM_ARB offset = 31CH)
- * */
-#define FUSB300_BFMARB_ARB_M1 (1 << 3)
-#define FUSB300_BFMARB_ARB_M0 (1 << 2)
-#define FUSB300_BFMARB_ARB_S1 (1 << 1)
-#define FUSB300_BFMARB_ARB_S0 1
-
-/*
- * *Vendor Specific IO Control Register (offset = 320H)
- * */
-#define FUSB300_VSIC_VCTLOAD_N (1 << 8)
-#define FUSB300_VSIC_VCTL(x) (x & 0x3F)
-
-/*
- * *SOF Mask Timer (offset = 324H)
- * */
-#define FUSB300_SOF_MASK_TIMER_HS 0x044c
-#define FUSB300_SOF_MASK_TIMER_FS 0x2710
-
-/*
- * *Error Flag and Control Status (offset = 328H)
- * */
-#define FUSB300_EFCS_PM_STATE_U3 3
-#define FUSB300_EFCS_PM_STATE_U2 2
-#define FUSB300_EFCS_PM_STATE_U1 1
-#define FUSB300_EFCS_PM_STATE_U0 0
-
-/*
- * *Interrupt Group 0 Register (offset = 400H)
- * */
-#define FUSB300_IGR0_EP15_PRD_INT (1 << 31)
-#define FUSB300_IGR0_EP14_PRD_INT (1 << 30)
-#define FUSB300_IGR0_EP13_PRD_INT (1 << 29)
-#define FUSB300_IGR0_EP12_PRD_INT (1 << 28)
-#define FUSB300_IGR0_EP11_PRD_INT (1 << 27)
-#define FUSB300_IGR0_EP10_PRD_INT (1 << 26)
-#define FUSB300_IGR0_EP9_PRD_INT (1 << 25)
-#define FUSB300_IGR0_EP8_PRD_INT (1 << 24)
-#define FUSB300_IGR0_EP7_PRD_INT (1 << 23)
-#define FUSB300_IGR0_EP6_PRD_INT (1 << 22)
-#define FUSB300_IGR0_EP5_PRD_INT (1 << 21)
-#define FUSB300_IGR0_EP4_PRD_INT (1 << 20)
-#define FUSB300_IGR0_EP3_PRD_INT (1 << 19)
-#define FUSB300_IGR0_EP2_PRD_INT (1 << 18)
-#define FUSB300_IGR0_EP1_PRD_INT (1 << 17)
-#define FUSB300_IGR0_EPn_PRD_INT(n) (1 << (n + 16))
-
-#define FUSB300_IGR0_EP15_FIFO_INT (1 << 15)
-#define FUSB300_IGR0_EP14_FIFO_INT (1 << 14)
-#define FUSB300_IGR0_EP13_FIFO_INT (1 << 13)
-#define FUSB300_IGR0_EP12_FIFO_INT (1 << 12)
-#define FUSB300_IGR0_EP11_FIFO_INT (1 << 11)
-#define FUSB300_IGR0_EP10_FIFO_INT (1 << 10)
-#define FUSB300_IGR0_EP9_FIFO_INT (1 << 9)
-#define FUSB300_IGR0_EP8_FIFO_INT (1 << 8)
-#define FUSB300_IGR0_EP7_FIFO_INT (1 << 7)
-#define FUSB300_IGR0_EP6_FIFO_INT (1 << 6)
-#define FUSB300_IGR0_EP5_FIFO_INT (1 << 5)
-#define FUSB300_IGR0_EP4_FIFO_INT (1 << 4)
-#define FUSB300_IGR0_EP3_FIFO_INT (1 << 3)
-#define FUSB300_IGR0_EP2_FIFO_INT (1 << 2)
-#define FUSB300_IGR0_EP1_FIFO_INT (1 << 1)
-#define FUSB300_IGR0_EPn_FIFO_INT(n) (1 << n)
-
-/*
- * *Interrupt Group 1 Register (offset = 404H)
- * */
-#define FUSB300_IGR1_INTGRP5 (1 << 31)
-#define FUSB300_IGR1_VBUS_CHG_INT (1 << 30)
-#define FUSB300_IGR1_SYNF1_EMPTY_INT (1 << 29)
-#define FUSB300_IGR1_SYNF0_EMPTY_INT (1 << 28)
-#define FUSB300_IGR1_U3_EXIT_FAIL_INT (1 << 27)
-#define FUSB300_IGR1_U2_EXIT_FAIL_INT (1 << 26)
-#define FUSB300_IGR1_U1_EXIT_FAIL_INT (1 << 25)
-#define FUSB300_IGR1_U2_ENTRY_FAIL_INT (1 << 24)
-#define FUSB300_IGR1_U1_ENTRY_FAIL_INT (1 << 23)
-#define FUSB300_IGR1_U3_EXIT_INT (1 << 22)
-#define FUSB300_IGR1_U2_EXIT_INT (1 << 21)
-#define FUSB300_IGR1_U1_EXIT_INT (1 << 20)
-#define FUSB300_IGR1_U3_ENTRY_INT (1 << 19)
-#define FUSB300_IGR1_U2_ENTRY_INT (1 << 18)
-#define FUSB300_IGR1_U1_ENTRY_INT (1 << 17)
-#define FUSB300_IGR1_HOT_RST_INT (1 << 16)
-#define FUSB300_IGR1_WARM_RST_INT (1 << 15)
-#define FUSB300_IGR1_RESM_INT (1 << 14)
-#define FUSB300_IGR1_SUSP_INT (1 << 13)
-#define FUSB300_IGR1_HS_LPM_INT (1 << 12)
-#define FUSB300_IGR1_USBRST_INT (1 << 11)
-#define FUSB300_IGR1_DEV_MODE_CHG_INT (1 << 9)
-#define FUSB300_IGR1_CX_COMABT_INT (1 << 8)
-#define FUSB300_IGR1_CX_COMFAIL_INT (1 << 7)
-#define FUSB300_IGR1_CX_CMDEND_INT (1 << 6)
-#define FUSB300_IGR1_CX_OUT_INT (1 << 5)
-#define FUSB300_IGR1_CX_IN_INT (1 << 4)
-#define FUSB300_IGR1_CX_SETUP_INT (1 << 3)
-#define FUSB300_IGR1_INTGRP4 (1 << 2)
-#define FUSB300_IGR1_INTGRP3 (1 << 1)
-#define FUSB300_IGR1_INTGRP2 (1 << 0)
-
-/*
- * *Interrupt Group 2 Register (offset = 408H)
- * */
-#define FUSB300_IGR2_EP6_STR_ACCEPT_INT (1 << 29)
-#define FUSB300_IGR2_EP6_STR_RESUME_INT (1 << 28)
-#define FUSB300_IGR2_EP6_STR_REQ_INT (1 << 27)
-#define FUSB300_IGR2_EP6_STR_NOTRDY_INT (1 << 26)
-#define FUSB300_IGR2_EP6_STR_PRIME_INT (1 << 25)
-#define FUSB300_IGR2_EP5_STR_ACCEPT_INT (1 << 24)
-#define FUSB300_IGR2_EP5_STR_RESUME_INT (1 << 23)
-#define FUSB300_IGR2_EP5_STR_REQ_INT (1 << 22)
-#define FUSB300_IGR2_EP5_STR_NOTRDY_INT (1 << 21)
-#define FUSB300_IGR2_EP5_STR_PRIME_INT (1 << 20)
-#define FUSB300_IGR2_EP4_STR_ACCEPT_INT (1 << 19)
-#define FUSB300_IGR2_EP4_STR_RESUME_INT (1 << 18)
-#define FUSB300_IGR2_EP4_STR_REQ_INT (1 << 17)
-#define FUSB300_IGR2_EP4_STR_NOTRDY_INT (1 << 16)
-#define FUSB300_IGR2_EP4_STR_PRIME_INT (1 << 15)
-#define FUSB300_IGR2_EP3_STR_ACCEPT_INT (1 << 14)
-#define FUSB300_IGR2_EP3_STR_RESUME_INT (1 << 13)
-#define FUSB300_IGR2_EP3_STR_REQ_INT (1 << 12)
-#define FUSB300_IGR2_EP3_STR_NOTRDY_INT (1 << 11)
-#define FUSB300_IGR2_EP3_STR_PRIME_INT (1 << 10)
-#define FUSB300_IGR2_EP2_STR_ACCEPT_INT (1 << 9)
-#define FUSB300_IGR2_EP2_STR_RESUME_INT (1 << 8)
-#define FUSB300_IGR2_EP2_STR_REQ_INT (1 << 7)
-#define FUSB300_IGR2_EP2_STR_NOTRDY_INT (1 << 6)
-#define FUSB300_IGR2_EP2_STR_PRIME_INT (1 << 5)
-#define FUSB300_IGR2_EP1_STR_ACCEPT_INT (1 << 4)
-#define FUSB300_IGR2_EP1_STR_RESUME_INT (1 << 3)
-#define FUSB300_IGR2_EP1_STR_REQ_INT (1 << 2)
-#define FUSB300_IGR2_EP1_STR_NOTRDY_INT (1 << 1)
-#define FUSB300_IGR2_EP1_STR_PRIME_INT (1 << 0)
-
-#define FUSB300_IGR2_EP_STR_ACCEPT_INT(n) (1 << (5 * n - 1))
-#define FUSB300_IGR2_EP_STR_RESUME_INT(n) (1 << (5 * n - 2))
-#define FUSB300_IGR2_EP_STR_REQ_INT(n) (1 << (5 * n - 3))
-#define FUSB300_IGR2_EP_STR_NOTRDY_INT(n) (1 << (5 * n - 4))
-#define FUSB300_IGR2_EP_STR_PRIME_INT(n) (1 << (5 * n - 5))
-
-/*
- * *Interrupt Group 3 Register (offset = 40CH)
- * */
-#define FUSB300_IGR3_EP12_STR_ACCEPT_INT (1 << 29)
-#define FUSB300_IGR3_EP12_STR_RESUME_INT (1 << 28)
-#define FUSB300_IGR3_EP12_STR_REQ_INT (1 << 27)
-#define FUSB300_IGR3_EP12_STR_NOTRDY_INT (1 << 26)
-#define FUSB300_IGR3_EP12_STR_PRIME_INT (1 << 25)
-#define FUSB300_IGR3_EP11_STR_ACCEPT_INT (1 << 24)
-#define FUSB300_IGR3_EP11_STR_RESUME_INT (1 << 23)
-#define FUSB300_IGR3_EP11_STR_REQ_INT (1 << 22)
-#define FUSB300_IGR3_EP11_STR_NOTRDY_INT (1 << 21)
-#define FUSB300_IGR3_EP11_STR_PRIME_INT (1 << 20)
-#define FUSB300_IGR3_EP10_STR_ACCEPT_INT (1 << 19)
-#define FUSB300_IGR3_EP10_STR_RESUME_INT (1 << 18)
-#define FUSB300_IGR3_EP10_STR_REQ_INT (1 << 17)
-#define FUSB300_IGR3_EP10_STR_NOTRDY_INT (1 << 16)
-#define FUSB300_IGR3_EP10_STR_PRIME_INT (1 << 15)
-#define FUSB300_IGR3_EP9_STR_ACCEPT_INT (1 << 14)
-#define FUSB300_IGR3_EP9_STR_RESUME_INT (1 << 13)
-#define FUSB300_IGR3_EP9_STR_REQ_INT (1 << 12)
-#define FUSB300_IGR3_EP9_STR_NOTRDY_INT (1 << 11)
-#define FUSB300_IGR3_EP9_STR_PRIME_INT (1 << 10)
-#define FUSB300_IGR3_EP8_STR_ACCEPT_INT (1 << 9)
-#define FUSB300_IGR3_EP8_STR_RESUME_INT (1 << 8)
-#define FUSB300_IGR3_EP8_STR_REQ_INT (1 << 7)
-#define FUSB300_IGR3_EP8_STR_NOTRDY_INT (1 << 6)
-#define FUSB300_IGR3_EP8_STR_PRIME_INT (1 << 5)
-#define FUSB300_IGR3_EP7_STR_ACCEPT_INT (1 << 4)
-#define FUSB300_IGR3_EP7_STR_RESUME_INT (1 << 3)
-#define FUSB300_IGR3_EP7_STR_REQ_INT (1 << 2)
-#define FUSB300_IGR3_EP7_STR_NOTRDY_INT (1 << 1)
-#define FUSB300_IGR3_EP7_STR_PRIME_INT (1 << 0)
-
-#define FUSB300_IGR3_EP_STR_ACCEPT_INT(n) (1 << (5 * (n - 6) - 1))
-#define FUSB300_IGR3_EP_STR_RESUME_INT(n) (1 << (5 * (n - 6) - 2))
-#define FUSB300_IGR3_EP_STR_REQ_INT(n) (1 << (5 * (n - 6) - 3))
-#define FUSB300_IGR3_EP_STR_NOTRDY_INT(n) (1 << (5 * (n - 6) - 4))
-#define FUSB300_IGR3_EP_STR_PRIME_INT(n) (1 << (5 * (n - 6) - 5))
-
-/*
- * *Interrupt Group 4 Register (offset = 410H)
- * */
-#define FUSB300_IGR4_EP15_RX0_INT (1 << 31)
-#define FUSB300_IGR4_EP14_RX0_INT (1 << 30)
-#define FUSB300_IGR4_EP13_RX0_INT (1 << 29)
-#define FUSB300_IGR4_EP12_RX0_INT (1 << 28)
-#define FUSB300_IGR4_EP11_RX0_INT (1 << 27)
-#define FUSB300_IGR4_EP10_RX0_INT (1 << 26)
-#define FUSB300_IGR4_EP9_RX0_INT (1 << 25)
-#define FUSB300_IGR4_EP8_RX0_INT (1 << 24)
-#define FUSB300_IGR4_EP7_RX0_INT (1 << 23)
-#define FUSB300_IGR4_EP6_RX0_INT (1 << 22)
-#define FUSB300_IGR4_EP5_RX0_INT (1 << 21)
-#define FUSB300_IGR4_EP4_RX0_INT (1 << 20)
-#define FUSB300_IGR4_EP3_RX0_INT (1 << 19)
-#define FUSB300_IGR4_EP2_RX0_INT (1 << 18)
-#define FUSB300_IGR4_EP1_RX0_INT (1 << 17)
-#define FUSB300_IGR4_EP_RX0_INT(x) (1 << (x + 16))
-#define FUSB300_IGR4_EP15_STR_ACCEPT_INT (1 << 14)
-#define FUSB300_IGR4_EP15_STR_RESUME_INT (1 << 13)
-#define FUSB300_IGR4_EP15_STR_REQ_INT (1 << 12)
-#define FUSB300_IGR4_EP15_STR_NOTRDY_INT (1 << 11)
-#define FUSB300_IGR4_EP15_STR_PRIME_INT (1 << 10)
-#define FUSB300_IGR4_EP14_STR_ACCEPT_INT (1 << 9)
-#define FUSB300_IGR4_EP14_STR_RESUME_INT (1 << 8)
-#define FUSB300_IGR4_EP14_STR_REQ_INT (1 << 7)
-#define FUSB300_IGR4_EP14_STR_NOTRDY_INT (1 << 6)
-#define FUSB300_IGR4_EP14_STR_PRIME_INT (1 << 5)
-#define FUSB300_IGR4_EP13_STR_ACCEPT_INT (1 << 4)
-#define FUSB300_IGR4_EP13_STR_RESUME_INT (1 << 3)
-#define FUSB300_IGR4_EP13_STR_REQ_INT (1 << 2)
-#define FUSB300_IGR4_EP13_STR_NOTRDY_INT (1 << 1)
-#define FUSB300_IGR4_EP13_STR_PRIME_INT (1 << 0)
-
-#define FUSB300_IGR4_EP_STR_ACCEPT_INT(n) (1 << (5 * (n - 12) - 1))
-#define FUSB300_IGR4_EP_STR_RESUME_INT(n) (1 << (5 * (n - 12) - 2))
-#define FUSB300_IGR4_EP_STR_REQ_INT(n) (1 << (5 * (n - 12) - 3))
-#define FUSB300_IGR4_EP_STR_NOTRDY_INT(n) (1 << (5 * (n - 12) - 4))
-#define FUSB300_IGR4_EP_STR_PRIME_INT(n) (1 << (5 * (n - 12) - 5))
-
-/*
- * *Interrupt Group 5 Register (offset = 414H)
- * */
-#define FUSB300_IGR5_EP_STL_INT(n) (1 << n)
-
-/*
- * *Interrupt Enable Group 0 Register (offset = 420H)
- * */
-#define FUSB300_IGER0_EEP15_PRD_INT (1 << 31)
-#define FUSB300_IGER0_EEP14_PRD_INT (1 << 30)
-#define FUSB300_IGER0_EEP13_PRD_INT (1 << 29)
-#define FUSB300_IGER0_EEP12_PRD_INT (1 << 28)
-#define FUSB300_IGER0_EEP11_PRD_INT (1 << 27)
-#define FUSB300_IGER0_EEP10_PRD_INT (1 << 26)
-#define FUSB300_IGER0_EEP9_PRD_INT (1 << 25)
-#define FUSB300_IGER0_EP8_PRD_INT (1 << 24)
-#define FUSB300_IGER0_EEP7_PRD_INT (1 << 23)
-#define FUSB300_IGER0_EEP6_PRD_INT (1 << 22)
-#define FUSB300_IGER0_EEP5_PRD_INT (1 << 21)
-#define FUSB300_IGER0_EEP4_PRD_INT (1 << 20)
-#define FUSB300_IGER0_EEP3_PRD_INT (1 << 19)
-#define FUSB300_IGER0_EEP2_PRD_INT (1 << 18)
-#define FUSB300_IGER0_EEP1_PRD_INT (1 << 17)
-#define FUSB300_IGER0_EEPn_PRD_INT(n) (1 << (n + 16))
-
-#define FUSB300_IGER0_EEP15_FIFO_INT (1 << 15)
-#define FUSB300_IGER0_EEP14_FIFO_INT (1 << 14)
-#define FUSB300_IGER0_EEP13_FIFO_INT (1 << 13)
-#define FUSB300_IGER0_EEP12_FIFO_INT (1 << 12)
-#define FUSB300_IGER0_EEP11_FIFO_INT (1 << 11)
-#define FUSB300_IGER0_EEP10_FIFO_INT (1 << 10)
-#define FUSB300_IGER0_EEP9_FIFO_INT (1 << 9)
-#define FUSB300_IGER0_EEP8_FIFO_INT (1 << 8)
-#define FUSB300_IGER0_EEP7_FIFO_INT (1 << 7)
-#define FUSB300_IGER0_EEP6_FIFO_INT (1 << 6)
-#define FUSB300_IGER0_EEP5_FIFO_INT (1 << 5)
-#define FUSB300_IGER0_EEP4_FIFO_INT (1 << 4)
-#define FUSB300_IGER0_EEP3_FIFO_INT (1 << 3)
-#define FUSB300_IGER0_EEP2_FIFO_INT (1 << 2)
-#define FUSB300_IGER0_EEP1_FIFO_INT (1 << 1)
-#define FUSB300_IGER0_EEPn_FIFO_INT(n) (1 << n)
-
-/*
- * *Interrupt Enable Group 1 Register (offset = 424H)
- * */
-#define FUSB300_IGER1_EINT_GRP5 (1 << 31)
-#define FUSB300_IGER1_VBUS_CHG_INT (1 << 30)
-#define FUSB300_IGER1_SYNF1_EMPTY_INT (1 << 29)
-#define FUSB300_IGER1_SYNF0_EMPTY_INT (1 << 28)
-#define FUSB300_IGER1_U3_EXIT_FAIL_INT (1 << 27)
-#define FUSB300_IGER1_U2_EXIT_FAIL_INT (1 << 26)
-#define FUSB300_IGER1_U1_EXIT_FAIL_INT (1 << 25)
-#define FUSB300_IGER1_U2_ENTRY_FAIL_INT (1 << 24)
-#define FUSB300_IGER1_U1_ENTRY_FAIL_INT (1 << 23)
-#define FUSB300_IGER1_U3_EXIT_INT (1 << 22)
-#define FUSB300_IGER1_U2_EXIT_INT (1 << 21)
-#define FUSB300_IGER1_U1_EXIT_INT (1 << 20)
-#define FUSB300_IGER1_U3_ENTRY_INT (1 << 19)
-#define FUSB300_IGER1_U2_ENTRY_INT (1 << 18)
-#define FUSB300_IGER1_U1_ENTRY_INT (1 << 17)
-#define FUSB300_IGER1_HOT_RST_INT (1 << 16)
-#define FUSB300_IGER1_WARM_RST_INT (1 << 15)
-#define FUSB300_IGER1_RESM_INT (1 << 14)
-#define FUSB300_IGER1_SUSP_INT (1 << 13)
-#define FUSB300_IGER1_LPM_INT (1 << 12)
-#define FUSB300_IGER1_HS_RST_INT (1 << 11)
-#define FUSB300_IGER1_EDEV_MODE_CHG_INT (1 << 9)
-#define FUSB300_IGER1_CX_COMABT_INT (1 << 8)
-#define FUSB300_IGER1_CX_COMFAIL_INT (1 << 7)
-#define FUSB300_IGER1_CX_CMDEND_INT (1 << 6)
-#define FUSB300_IGER1_CX_OUT_INT (1 << 5)
-#define FUSB300_IGER1_CX_IN_INT (1 << 4)
-#define FUSB300_IGER1_CX_SETUP_INT (1 << 3)
-#define FUSB300_IGER1_INTGRP4 (1 << 2)
-#define FUSB300_IGER1_INTGRP3 (1 << 1)
-#define FUSB300_IGER1_INTGRP2 (1 << 0)
-
-/*
- * *Interrupt Enable Group 2 Register (offset = 428H)
- * */
-#define FUSB300_IGER2_EEP_STR_ACCEPT_INT(n) (1 << (5 * n - 1))
-#define FUSB300_IGER2_EEP_STR_RESUME_INT(n) (1 << (5 * n - 2))
-#define FUSB300_IGER2_EEP_STR_REQ_INT(n) (1 << (5 * n - 3))
-#define FUSB300_IGER2_EEP_STR_NOTRDY_INT(n) (1 << (5 * n - 4))
-#define FUSB300_IGER2_EEP_STR_PRIME_INT(n) (1 << (5 * n - 5))
-
-/*
- * *Interrupt Enable Group 3 Register (offset = 42CH)
- * */
-
-#define FUSB300_IGER3_EEP_STR_ACCEPT_INT(n) (1 << (5 * (n - 6) - 1))
-#define FUSB300_IGER3_EEP_STR_RESUME_INT(n) (1 << (5 * (n - 6) - 2))
-#define FUSB300_IGER3_EEP_STR_REQ_INT(n) (1 << (5 * (n - 6) - 3))
-#define FUSB300_IGER3_EEP_STR_NOTRDY_INT(n) (1 << (5 * (n - 6) - 4))
-#define FUSB300_IGER3_EEP_STR_PRIME_INT(n) (1 << (5 * (n - 6) - 5))
-
-/*
- * *Interrupt Enable Group 4 Register (offset = 430H)
- * */
-
-#define FUSB300_IGER4_EEP_RX0_INT(n) (1 << (n + 16))
-#define FUSB300_IGER4_EEP_STR_ACCEPT_INT(n) (1 << (5 * (n - 6) - 1))
-#define FUSB300_IGER4_EEP_STR_RESUME_INT(n) (1 << (5 * (n - 6) - 2))
-#define FUSB300_IGER4_EEP_STR_REQ_INT(n) (1 << (5 * (n - 6) - 3))
-#define FUSB300_IGER4_EEP_STR_NOTRDY_INT(n) (1 << (5 * (n - 6) - 4))
-#define FUSB300_IGER4_EEP_STR_PRIME_INT(n) (1 << (5 * (n - 6) - 5))
-
-/* EP PRD Ready (EP_PRD_RDY, offset = 504H) */
-
-#define FUSB300_EPPRDR_EP15_PRD_RDY (1 << 15)
-#define FUSB300_EPPRDR_EP14_PRD_RDY (1 << 14)
-#define FUSB300_EPPRDR_EP13_PRD_RDY (1 << 13)
-#define FUSB300_EPPRDR_EP12_PRD_RDY (1 << 12)
-#define FUSB300_EPPRDR_EP11_PRD_RDY (1 << 11)
-#define FUSB300_EPPRDR_EP10_PRD_RDY (1 << 10)
-#define FUSB300_EPPRDR_EP9_PRD_RDY (1 << 9)
-#define FUSB300_EPPRDR_EP8_PRD_RDY (1 << 8)
-#define FUSB300_EPPRDR_EP7_PRD_RDY (1 << 7)
-#define FUSB300_EPPRDR_EP6_PRD_RDY (1 << 6)
-#define FUSB300_EPPRDR_EP5_PRD_RDY (1 << 5)
-#define FUSB300_EPPRDR_EP4_PRD_RDY (1 << 4)
-#define FUSB300_EPPRDR_EP3_PRD_RDY (1 << 3)
-#define FUSB300_EPPRDR_EP2_PRD_RDY (1 << 2)
-#define FUSB300_EPPRDR_EP1_PRD_RDY (1 << 1)
-#define FUSB300_EPPRDR_EP_PRD_RDY(n) (1 << n)
-
-/* AHB Bus Control Register (offset = 514H) */
-#define FUSB300_AHBBCR_S1_SPLIT_ON (1 << 17)
-#define FUSB300_AHBBCR_S0_SPLIT_ON (1 << 16)
-#define FUSB300_AHBBCR_S1_1entry (0 << 12)
-#define FUSB300_AHBBCR_S1_4entry (3 << 12)
-#define FUSB300_AHBBCR_S1_8entry (5 << 12)
-#define FUSB300_AHBBCR_S1_16entry (7 << 12)
-#define FUSB300_AHBBCR_S0_1entry (0 << 8)
-#define FUSB300_AHBBCR_S0_4entry (3 << 8)
-#define FUSB300_AHBBCR_S0_8entry (5 << 8)
-#define FUSB300_AHBBCR_S0_16entry (7 << 8)
-#define FUSB300_AHBBCR_M1_BURST_SINGLE (0 << 4)
-#define FUSB300_AHBBCR_M1_BURST_INCR (1 << 4)
-#define FUSB300_AHBBCR_M1_BURST_INCR4 (3 << 4)
-#define FUSB300_AHBBCR_M1_BURST_INCR8 (5 << 4)
-#define FUSB300_AHBBCR_M1_BURST_INCR16 (7 << 4)
-#define FUSB300_AHBBCR_M0_BURST_SINGLE 0
-#define FUSB300_AHBBCR_M0_BURST_INCR 1
-#define FUSB300_AHBBCR_M0_BURST_INCR4 3
-#define FUSB300_AHBBCR_M0_BURST_INCR8 5
-#define FUSB300_AHBBCR_M0_BURST_INCR16 7
-#define FUSB300_IGER5_EEP_STL_INT(n) (1 << n)
-
-/* WORD 0 Data Structure of PRD Table */
-#define FUSB300_EPPRD0_M (1 << 30)
-#define FUSB300_EPPRD0_O (1 << 29)
-/* The finished prd */
-#define FUSB300_EPPRD0_F (1 << 28)
-#define FUSB300_EPPRD0_I (1 << 27)
-#define FUSB300_EPPRD0_A (1 << 26)
-/* To decide HW point to first prd at next time */
-#define FUSB300_EPPRD0_L (1 << 25)
-#define FUSB300_EPPRD0_H (1 << 24)
-#define FUSB300_EPPRD0_BTC(n) (n & 0xFFFFFF)
-
-/*----------------------------------------------------------------------*/
-#define FUSB300_MAX_NUM_EP 16
-
-#define FUSB300_FIFO_ENTRY_NUM 8
-#define FUSB300_MAX_FIFO_ENTRY 8
-
-#define SS_CTL_MAX_PACKET_SIZE 0x200
-#define SS_BULK_MAX_PACKET_SIZE 0x400
-#define SS_INT_MAX_PACKET_SIZE 0x400
-#define SS_ISO_MAX_PACKET_SIZE 0x400
-
-#define HS_BULK_MAX_PACKET_SIZE 0x200
-#define HS_CTL_MAX_PACKET_SIZE 0x40
-#define HS_INT_MAX_PACKET_SIZE 0x400
-#define HS_ISO_MAX_PACKET_SIZE 0x400
-
-struct fusb300_ep_info {
- u8 epnum;
- u8 type;
- u8 interval;
- u8 dir_in;
- u16 maxpacket;
- u16 addrofs;
- u16 bw_num;
-};
-
-struct fusb300_request {
-
- struct usb_request req;
- struct list_head queue;
-};
-
-
-struct fusb300_ep {
- struct usb_ep ep;
- struct fusb300 *fusb300;
-
- struct list_head queue;
- unsigned stall:1;
- unsigned wedged:1;
- unsigned use_dma:1;
-
- unsigned char epnum;
- unsigned char type;
-};
-
-struct fusb300 {
- spinlock_t lock;
- void __iomem *reg;
-
- unsigned long irq_trigger;
-
- struct usb_gadget gadget;
- struct usb_gadget_driver *driver;
-
- struct fusb300_ep *ep[FUSB300_MAX_NUM_EP];
-
- struct usb_request *ep0_req; /* for internal request */
- __le16 ep0_data;
- u32 ep0_length; /* for internal request */
- u8 ep0_dir; /* 0/0x80 out/in */
-
- u8 fifo_entry_num; /* next start fifo entry */
- u32 addrofs; /* next fifo address offset */
- u8 reenum; /* if re-enumeration */
-};
-
-#define to_fusb300(g) (container_of((g), struct fusb300, gadget))
-
-#endif
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 89d6daf2bda7..1a7d3c4f652f 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -1629,7 +1629,7 @@ static int lpc32xx_ep_enable(struct usb_ep *_ep,
return -ESHUTDOWN;
}
- tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ tmp = usb_endpoint_type(desc);
switch (tmp) {
case USB_ENDPOINT_XFER_CONTROL:
return -EINVAL;
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index a938b2af0944..715791737499 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1261,7 +1261,7 @@ static irqreturn_t m66592_irq(int irq, void *_m66592)
static void m66592_timer(struct timer_list *t)
{
- struct m66592 *m66592 = from_timer(m66592, t, timer);
+ struct m66592 *m66592 = timer_container_of(m66592, t, timer);
unsigned long flags;
u16 tmp;
diff --git a/drivers/usb/gadget/udc/mv_u3d.h b/drivers/usb/gadget/udc/mv_u3d.h
deleted file mode 100644
index 66b84f792f64..000000000000
--- a/drivers/usb/gadget/udc/mv_u3d.h
+++ /dev/null
@@ -1,317 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
- */
-
-#ifndef __MV_U3D_H
-#define __MV_U3D_H
-
-#define MV_U3D_EP_CONTEXT_ALIGNMENT 32
-#define MV_U3D_TRB_ALIGNMENT 16
-#define MV_U3D_DMA_BOUNDARY 4096
-#define MV_U3D_EP0_MAX_PKT_SIZE 512
-
-/* ep0 transfer state */
-#define MV_U3D_WAIT_FOR_SETUP 0
-#define MV_U3D_DATA_STATE_XMIT 1
-#define MV_U3D_DATA_STATE_NEED_ZLP 2
-#define MV_U3D_WAIT_FOR_OUT_STATUS 3
-#define MV_U3D_DATA_STATE_RECV 4
-#define MV_U3D_STATUS_STAGE 5
-
-#define MV_U3D_EP_MAX_LENGTH_TRANSFER 0x10000
-
-/* USB3 Interrupt Status */
-#define MV_U3D_USBINT_SETUP 0x00000001
-#define MV_U3D_USBINT_RX_COMPLETE 0x00000002
-#define MV_U3D_USBINT_TX_COMPLETE 0x00000004
-#define MV_U3D_USBINT_UNDER_RUN 0x00000008
-#define MV_U3D_USBINT_RXDESC_ERR 0x00000010
-#define MV_U3D_USBINT_TXDESC_ERR 0x00000020
-#define MV_U3D_USBINT_RX_TRB_COMPLETE 0x00000040
-#define MV_U3D_USBINT_TX_TRB_COMPLETE 0x00000080
-#define MV_U3D_USBINT_VBUS_VALID 0x00010000
-#define MV_U3D_USBINT_STORAGE_CMD_FULL 0x00020000
-#define MV_U3D_USBINT_LINK_CHG 0x01000000
-
-/* USB3 Interrupt Enable */
-#define MV_U3D_INTR_ENABLE_SETUP 0x00000001
-#define MV_U3D_INTR_ENABLE_RX_COMPLETE 0x00000002
-#define MV_U3D_INTR_ENABLE_TX_COMPLETE 0x00000004
-#define MV_U3D_INTR_ENABLE_UNDER_RUN 0x00000008
-#define MV_U3D_INTR_ENABLE_RXDESC_ERR 0x00000010
-#define MV_U3D_INTR_ENABLE_TXDESC_ERR 0x00000020
-#define MV_U3D_INTR_ENABLE_RX_TRB_COMPLETE 0x00000040
-#define MV_U3D_INTR_ENABLE_TX_TRB_COMPLETE 0x00000080
-#define MV_U3D_INTR_ENABLE_RX_BUFFER_ERR 0x00000100
-#define MV_U3D_INTR_ENABLE_VBUS_VALID 0x00010000
-#define MV_U3D_INTR_ENABLE_STORAGE_CMD_FULL 0x00020000
-#define MV_U3D_INTR_ENABLE_LINK_CHG 0x01000000
-#define MV_U3D_INTR_ENABLE_PRIME_STATUS 0x02000000
-
-/* USB3 Link Change */
-#define MV_U3D_LINK_CHANGE_LINK_UP 0x00000001
-#define MV_U3D_LINK_CHANGE_SUSPEND 0x00000002
-#define MV_U3D_LINK_CHANGE_RESUME 0x00000004
-#define MV_U3D_LINK_CHANGE_WRESET 0x00000008
-#define MV_U3D_LINK_CHANGE_HRESET 0x00000010
-#define MV_U3D_LINK_CHANGE_VBUS_INVALID 0x00000020
-#define MV_U3D_LINK_CHANGE_INACT 0x00000040
-#define MV_U3D_LINK_CHANGE_DISABLE_AFTER_U0 0x00000080
-#define MV_U3D_LINK_CHANGE_U1 0x00000100
-#define MV_U3D_LINK_CHANGE_U2 0x00000200
-#define MV_U3D_LINK_CHANGE_U3 0x00000400
-
-/* bridge setting */
-#define MV_U3D_BRIDGE_SETTING_VBUS_VALID (1 << 16)
-
-/* Command Register Bit Masks */
-#define MV_U3D_CMD_RUN_STOP 0x00000001
-#define MV_U3D_CMD_CTRL_RESET 0x00000002
-
-/* ep control register */
-#define MV_U3D_EPXCR_EP_TYPE_CONTROL 0
-#define MV_U3D_EPXCR_EP_TYPE_ISOC 1
-#define MV_U3D_EPXCR_EP_TYPE_BULK 2
-#define MV_U3D_EPXCR_EP_TYPE_INT 3
-#define MV_U3D_EPXCR_EP_ENABLE_SHIFT 4
-#define MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT 12
-#define MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT 16
-#define MV_U3D_USB_BULK_BURST_OUT 6
-#define MV_U3D_USB_BULK_BURST_IN 14
-
-#define MV_U3D_EPXCR_EP_FLUSH (1 << 7)
-#define MV_U3D_EPXCR_EP_HALT (1 << 1)
-#define MV_U3D_EPXCR_EP_INIT (1)
-
-/* TX/RX Status Register */
-#define MV_U3D_XFERSTATUS_COMPLETE_SHIFT 24
-#define MV_U3D_COMPLETE_INVALID 0
-#define MV_U3D_COMPLETE_SUCCESS 1
-#define MV_U3D_COMPLETE_BUFF_ERR 2
-#define MV_U3D_COMPLETE_SHORT_PACKET 3
-#define MV_U3D_COMPLETE_TRB_ERR 5
-#define MV_U3D_XFERSTATUS_TRB_LENGTH_MASK (0xFFFFFF)
-
-#define MV_U3D_USB_LINK_BYPASS_VBUS 0x8
-
-#define MV_U3D_LTSSM_PHY_INIT_DONE 0x80000000
-#define MV_U3D_LTSSM_NEVER_GO_COMPLIANCE 0x40000000
-
-#define MV_U3D_USB3_OP_REGS_OFFSET 0x100
-#define MV_U3D_USB3_PHY_OFFSET 0xB800
-
-#define DCS_ENABLE 0x1
-
-/* timeout */
-#define MV_U3D_RESET_TIMEOUT 10000
-#define MV_U3D_FLUSH_TIMEOUT 100000
-#define MV_U3D_OWN_TIMEOUT 10000
-#define LOOPS_USEC_SHIFT 4
-#define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
-#define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
-
-/* ep direction */
-#define MV_U3D_EP_DIR_IN 1
-#define MV_U3D_EP_DIR_OUT 0
-#define mv_u3d_ep_dir(ep) (((ep)->ep_num == 0) ? \
- ((ep)->u3d->ep0_dir) : ((ep)->direction))
-
-/* usb capability registers */
-struct mv_u3d_cap_regs {
- u32 rsvd[5];
- u32 dboff; /* doorbell register offset */
- u32 rtsoff; /* runtime register offset */
- u32 vuoff; /* vendor unique register offset */
-};
-
-/* operation registers */
-struct mv_u3d_op_regs {
- u32 usbcmd; /* Command register */
- u32 rsvd1[11];
- u32 dcbaapl; /* Device Context Base Address low register */
- u32 dcbaaph; /* Device Context Base Address high register */
- u32 rsvd2[243];
- u32 portsc; /* port status and control register*/
- u32 portlinkinfo; /* port link info register*/
- u32 rsvd3[9917];
- u32 doorbell; /* doorbell register */
-};
-
-/* control endpoint enable registers */
-struct epxcr {
- u32 epxoutcr0; /* ep out control 0 register */
- u32 epxoutcr1; /* ep out control 1 register */
- u32 epxincr0; /* ep in control 0 register */
- u32 epxincr1; /* ep in control 1 register */
-};
-
-/* transfer status registers */
-struct xferstatus {
- u32 curdeqlo; /* current TRB pointer low */
- u32 curdeqhi; /* current TRB pointer high */
- u32 statuslo; /* transfer status low */
- u32 statushi; /* transfer status high */
-};
-
-/* vendor unique control registers */
-struct mv_u3d_vuc_regs {
- u32 ctrlepenable; /* control endpoint enable register */
- u32 setuplock; /* setup lock register */
- u32 endcomplete; /* endpoint transfer complete register */
- u32 intrcause; /* interrupt cause register */
- u32 intrenable; /* interrupt enable register */
- u32 trbcomplete; /* TRB complete register */
- u32 linkchange; /* link change register */
- u32 rsvd1[5];
- u32 trbunderrun; /* TRB underrun register */
- u32 rsvd2[43];
- u32 bridgesetting; /* bridge setting register */
- u32 rsvd3[7];
- struct xferstatus txst[16]; /* TX status register */
- struct xferstatus rxst[16]; /* RX status register */
- u32 ltssm; /* LTSSM control register */
- u32 pipe; /* PIPE control register */
- u32 linkcr0; /* link control 0 register */
- u32 linkcr1; /* link control 1 register */
- u32 rsvd6[60];
- u32 mib0; /* MIB0 counter register */
- u32 usblink; /* usb link control register */
- u32 ltssmstate; /* LTSSM state register */
- u32 linkerrorcause; /* link error cause register */
- u32 rsvd7[60];
- u32 devaddrtiebrkr; /* device address and tie breaker */
- u32 itpinfo0; /* ITP info 0 register */
- u32 itpinfo1; /* ITP info 1 register */
- u32 rsvd8[61];
- struct epxcr epcr[16]; /* ep control register */
- u32 rsvd9[64];
- u32 phyaddr; /* PHY address register */
- u32 phydata; /* PHY data register */
-};
-
-/* Endpoint context structure */
-struct mv_u3d_ep_context {
- u32 rsvd0;
- u32 rsvd1;
- u32 trb_addr_lo; /* TRB address low 32 bit */
- u32 trb_addr_hi; /* TRB address high 32 bit */
- u32 rsvd2;
- u32 rsvd3;
- struct usb_ctrlrequest setup_buffer; /* setup data buffer */
-};
-
-/* TRB control data structure */
-struct mv_u3d_trb_ctrl {
- u32 own:1; /* owner of TRB */
- u32 rsvd1:3;
- u32 chain:1; /* associate this TRB with the
- next TRB on the Ring */
- u32 ioc:1; /* interrupt on complete */
- u32 rsvd2:4;
- u32 type:6; /* TRB type */
-#define TYPE_NORMAL 1
-#define TYPE_DATA 3
-#define TYPE_LINK 6
- u32 dir:1; /* Working at data stage of control endpoint
- operation. 0 is OUT and 1 is IN. */
- u32 rsvd3:15;
-};
-
-/* TRB data structure
- * For multiple TRB, all the TRBs' physical address should be continuous.
- */
-struct mv_u3d_trb_hw {
- u32 buf_addr_lo; /* data buffer address low 32 bit */
- u32 buf_addr_hi; /* data buffer address high 32 bit */
- u32 trb_len; /* transfer length */
- struct mv_u3d_trb_ctrl ctrl; /* TRB control data */
-};
-
-/* TRB structure */
-struct mv_u3d_trb {
- struct mv_u3d_trb_hw *trb_hw; /* point to the trb_hw structure */
- dma_addr_t trb_dma; /* dma address for this trb_hw */
- struct list_head trb_list; /* trb list */
-};
-
-/* device data structure */
-struct mv_u3d {
- struct usb_gadget gadget;
- struct usb_gadget_driver *driver;
- spinlock_t lock; /* device lock */
- struct completion *done;
- struct device *dev;
- int irq;
-
- /* usb controller registers */
- struct mv_u3d_cap_regs __iomem *cap_regs;
- struct mv_u3d_op_regs __iomem *op_regs;
- struct mv_u3d_vuc_regs __iomem *vuc_regs;
- void __iomem *phy_regs;
-
- unsigned int max_eps;
- struct mv_u3d_ep_context *ep_context;
- size_t ep_context_size;
- dma_addr_t ep_context_dma;
-
- struct dma_pool *trb_pool; /* for TRB data structure */
- struct mv_u3d_ep *eps;
-
- struct mv_u3d_req *status_req; /* ep0 status request */
- struct usb_ctrlrequest local_setup_buff; /* store setup data*/
-
- unsigned int resume_state; /* USB state to resume */
- unsigned int usb_state; /* USB current state */
- unsigned int ep0_state; /* Endpoint zero state */
- unsigned int ep0_dir;
-
- unsigned int dev_addr; /* device address */
-
- unsigned int errors;
-
- unsigned softconnect:1;
- unsigned vbus_active:1; /* vbus is active or not */
- unsigned remote_wakeup:1; /* support remote wakeup */
- unsigned clock_gating:1; /* clock gating or not */
- unsigned active:1; /* udc is active or not */
- unsigned vbus_valid_detect:1; /* udc vbus detection */
-
- struct mv_usb_addon_irq *vbus;
- unsigned int power;
-
- struct clk *clk;
-};
-
-/* endpoint data structure */
-struct mv_u3d_ep {
- struct usb_ep ep;
- struct mv_u3d *u3d;
- struct list_head queue; /* ep request queued hardware */
- struct list_head req_list; /* list of ep request */
- struct mv_u3d_ep_context *ep_context; /* ep context */
- u32 direction;
- char name[14];
- u32 processing; /* there is ep request
- queued on haredware */
- spinlock_t req_lock; /* ep lock */
- unsigned wedge:1;
- unsigned enabled:1;
- unsigned ep_type:2;
- unsigned ep_num:8;
-};
-
-/* request data structure */
-struct mv_u3d_req {
- struct usb_request req;
- struct mv_u3d_ep *ep;
- struct list_head queue; /* ep requst queued on hardware */
- struct list_head list; /* ep request list */
- struct list_head trb_list; /* trb list of a request */
-
- struct mv_u3d_trb *trb_head; /* point to first trb of a request */
- unsigned trb_count; /* TRB number in the chain */
- unsigned chain; /* TRB chain or not */
-};
-
-#endif
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
deleted file mode 100644
index 062f43e146aa..000000000000
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ /dev/null
@@ -1,2062 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
- */
-
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/timer.h>
-#include <linux/list.h>
-#include <linux/notifier.h>
-#include <linux/interrupt.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-#include <linux/pm.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/mv_usb.h>
-#include <linux/clk.h>
-
-#include "mv_u3d.h"
-
-#define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver"
-
-static const char driver_name[] = "mv_u3d";
-
-static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status);
-static void mv_u3d_stop_activity(struct mv_u3d *u3d,
- struct usb_gadget_driver *driver);
-
-/* for endpoint 0 operations */
-static const struct usb_endpoint_descriptor mv_u3d_ep0_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = 0,
- .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
- .wMaxPacketSize = MV_U3D_EP0_MAX_PKT_SIZE,
-};
-
-static void mv_u3d_ep0_reset(struct mv_u3d *u3d)
-{
- struct mv_u3d_ep *ep;
- u32 epxcr;
- int i;
-
- for (i = 0; i < 2; i++) {
- ep = &u3d->eps[i];
- ep->u3d = u3d;
-
- /* ep0 ep context, ep0 in and out share the same ep context */
- ep->ep_context = &u3d->ep_context[1];
- }
-
- /* reset ep state machine */
- /* reset ep0 out */
- epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
- epxcr |= MV_U3D_EPXCR_EP_INIT;
- iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
- udelay(5);
- epxcr &= ~MV_U3D_EPXCR_EP_INIT;
- iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0);
-
- epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE
- << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
- | (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
- | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
- | MV_U3D_EPXCR_EP_TYPE_CONTROL);
- iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr1);
-
- /* reset ep0 in */
- epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
- epxcr |= MV_U3D_EPXCR_EP_INIT;
- iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
- udelay(5);
- epxcr &= ~MV_U3D_EPXCR_EP_INIT;
- iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0);
-
- epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE
- << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
- | (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
- | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
- | MV_U3D_EPXCR_EP_TYPE_CONTROL);
- iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr1);
-}
-
-static void mv_u3d_ep0_stall(struct mv_u3d *u3d)
-{
- u32 tmp;
- dev_dbg(u3d->dev, "%s\n", __func__);
-
- /* set TX and RX to stall */
- tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
- tmp |= MV_U3D_EPXCR_EP_HALT;
- iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
-
- tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
- tmp |= MV_U3D_EPXCR_EP_HALT;
- iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
-
- /* update ep0 state */
- u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
- u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
-}
-
-static int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index,
- struct mv_u3d_req *curr_req)
-{
- struct mv_u3d_trb *curr_trb;
- int actual, remaining_length = 0;
- int direction, ep_num;
- int retval = 0;
- u32 tmp, status, length;
-
- direction = index % 2;
- ep_num = index / 2;
-
- actual = curr_req->req.length;
-
- while (!list_empty(&curr_req->trb_list)) {
- curr_trb = list_entry(curr_req->trb_list.next,
- struct mv_u3d_trb, trb_list);
- if (!curr_trb->trb_hw->ctrl.own) {
- dev_err(u3d->dev, "%s, TRB own error!\n",
- u3d->eps[index].name);
- return 1;
- }
-
- curr_trb->trb_hw->ctrl.own = 0;
- if (direction == MV_U3D_EP_DIR_OUT)
- tmp = ioread32(&u3d->vuc_regs->rxst[ep_num].statuslo);
- else
- tmp = ioread32(&u3d->vuc_regs->txst[ep_num].statuslo);
-
- status = tmp >> MV_U3D_XFERSTATUS_COMPLETE_SHIFT;
- length = tmp & MV_U3D_XFERSTATUS_TRB_LENGTH_MASK;
-
- if (status == MV_U3D_COMPLETE_SUCCESS ||
- (status == MV_U3D_COMPLETE_SHORT_PACKET &&
- direction == MV_U3D_EP_DIR_OUT)) {
- remaining_length += length;
- actual -= remaining_length;
- } else {
- dev_err(u3d->dev,
- "complete_tr error: ep=%d %s: error = 0x%x\n",
- index >> 1, direction ? "SEND" : "RECV",
- status);
- retval = -EPROTO;
- }
-
- list_del_init(&curr_trb->trb_list);
- }
- if (retval)
- return retval;
-
- curr_req->req.actual = actual;
- return 0;
-}
-
-/*
- * mv_u3d_done() - retire a request; caller blocked irqs
- * @status : request status to be set, only works when
- * request is still in progress.
- */
-static
-void mv_u3d_done(struct mv_u3d_ep *ep, struct mv_u3d_req *req, int status)
- __releases(&ep->udc->lock)
- __acquires(&ep->udc->lock)
-{
- struct mv_u3d *u3d = (struct mv_u3d *)ep->u3d;
-
- dev_dbg(u3d->dev, "mv_u3d_done: remove req->queue\n");
- /* Removed the req from ep queue */
- list_del_init(&req->queue);
-
- /* req.status should be set as -EINPROGRESS in ep_queue() */
- if (req->req.status == -EINPROGRESS)
- req->req.status = status;
- else
- status = req->req.status;
-
- /* Free trb for the request */
- if (!req->chain)
- dma_pool_free(u3d->trb_pool,
- req->trb_head->trb_hw, req->trb_head->trb_dma);
- else {
- dma_unmap_single(ep->u3d->gadget.dev.parent,
- (dma_addr_t)req->trb_head->trb_dma,
- req->trb_count * sizeof(struct mv_u3d_trb_hw),
- DMA_BIDIRECTIONAL);
- kfree(req->trb_head->trb_hw);
- }
- kfree(req->trb_head);
-
- usb_gadget_unmap_request(&u3d->gadget, &req->req, mv_u3d_ep_dir(ep));
-
- if (status && (status != -ESHUTDOWN)) {
- dev_dbg(u3d->dev, "complete %s req %p stat %d len %u/%u",
- ep->ep.name, &req->req, status,
- req->req.actual, req->req.length);
- }
-
- spin_unlock(&ep->u3d->lock);
-
- usb_gadget_giveback_request(&ep->ep, &req->req);
-
- spin_lock(&ep->u3d->lock);
-}
-
-static int mv_u3d_queue_trb(struct mv_u3d_ep *ep, struct mv_u3d_req *req)
-{
- u32 tmp, direction;
- struct mv_u3d *u3d;
- struct mv_u3d_ep_context *ep_context;
- int retval = 0;
-
- u3d = ep->u3d;
- direction = mv_u3d_ep_dir(ep);
-
- /* ep0 in and out share the same ep context slot 1*/
- if (ep->ep_num == 0)
- ep_context = &(u3d->ep_context[1]);
- else
- ep_context = &(u3d->ep_context[ep->ep_num * 2 + direction]);
-
- /* check if the pipe is empty or not */
- if (!list_empty(&ep->queue)) {
- dev_err(u3d->dev, "add trb to non-empty queue!\n");
- retval = -ENOMEM;
- WARN_ON(1);
- } else {
- ep_context->rsvd0 = cpu_to_le32(1);
- ep_context->rsvd1 = 0;
-
- /* Configure the trb address and set the DCS bit.
- * Both DCS bit and own bit in trb should be set.
- */
- ep_context->trb_addr_lo =
- cpu_to_le32(req->trb_head->trb_dma | DCS_ENABLE);
- ep_context->trb_addr_hi = 0;
-
- /* Ensure that updates to the EP Context will
- * occure before Ring Bell.
- */
- wmb();
-
- /* ring bell the ep */
- if (ep->ep_num == 0)
- tmp = 0x1;
- else
- tmp = ep->ep_num * 2
- + ((direction == MV_U3D_EP_DIR_OUT) ? 0 : 1);
-
- iowrite32(tmp, &u3d->op_regs->doorbell);
- }
- return retval;
-}
-
-static struct mv_u3d_trb *mv_u3d_build_trb_one(struct mv_u3d_req *req,
- unsigned *length, dma_addr_t *dma)
-{
- u32 temp;
- unsigned int direction;
- struct mv_u3d_trb *trb;
- struct mv_u3d_trb_hw *trb_hw;
- struct mv_u3d *u3d;
-
- /* how big will this transfer be? */
- *length = req->req.length - req->req.actual;
- BUG_ON(*length > (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER);
-
- u3d = req->ep->u3d;
-
- trb = kzalloc(sizeof(*trb), GFP_ATOMIC);
- if (!trb)
- return NULL;
-
- /*
- * Be careful that no _GFP_HIGHMEM is set,
- * or we can not use dma_to_virt
- * cannot use GFP_KERNEL in spin lock
- */
- trb_hw = dma_pool_alloc(u3d->trb_pool, GFP_ATOMIC, dma);
- if (!trb_hw) {
- kfree(trb);
- dev_err(u3d->dev,
- "%s, dma_pool_alloc fail\n", __func__);
- return NULL;
- }
- trb->trb_dma = *dma;
- trb->trb_hw = trb_hw;
-
- /* initialize buffer page pointers */
- temp = (u32)(req->req.dma + req->req.actual);
-
- trb_hw->buf_addr_lo = cpu_to_le32(temp);
- trb_hw->buf_addr_hi = 0;
- trb_hw->trb_len = cpu_to_le32(*length);
- trb_hw->ctrl.own = 1;
-
- if (req->ep->ep_num == 0)
- trb_hw->ctrl.type = TYPE_DATA;
- else
- trb_hw->ctrl.type = TYPE_NORMAL;
-
- req->req.actual += *length;
-
- direction = mv_u3d_ep_dir(req->ep);
- if (direction == MV_U3D_EP_DIR_IN)
- trb_hw->ctrl.dir = 1;
- else
- trb_hw->ctrl.dir = 0;
-
- /* Enable interrupt for the last trb of a request */
- if (!req->req.no_interrupt)
- trb_hw->ctrl.ioc = 1;
-
- trb_hw->ctrl.chain = 0;
-
- wmb();
- return trb;
-}
-
-static int mv_u3d_build_trb_chain(struct mv_u3d_req *req, unsigned *length,
- struct mv_u3d_trb *trb, int *is_last)
-{
- u32 temp;
- unsigned int direction;
- struct mv_u3d *u3d;
-
- /* how big will this transfer be? */
- *length = min(req->req.length - req->req.actual,
- (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER);
-
- u3d = req->ep->u3d;
-
- trb->trb_dma = 0;
-
- /* initialize buffer page pointers */
- temp = (u32)(req->req.dma + req->req.actual);
-
- trb->trb_hw->buf_addr_lo = cpu_to_le32(temp);
- trb->trb_hw->buf_addr_hi = 0;
- trb->trb_hw->trb_len = cpu_to_le32(*length);
- trb->trb_hw->ctrl.own = 1;
-
- if (req->ep->ep_num == 0)
- trb->trb_hw->ctrl.type = TYPE_DATA;
- else
- trb->trb_hw->ctrl.type = TYPE_NORMAL;
-
- req->req.actual += *length;
-
- direction = mv_u3d_ep_dir(req->ep);
- if (direction == MV_U3D_EP_DIR_IN)
- trb->trb_hw->ctrl.dir = 1;
- else
- trb->trb_hw->ctrl.dir = 0;
-
- /* zlp is needed if req->req.zero is set */
- if (req->req.zero) {
- if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
- *is_last = 1;
- else
- *is_last = 0;
- } else if (req->req.length == req->req.actual)
- *is_last = 1;
- else
- *is_last = 0;
-
- /* Enable interrupt for the last trb of a request */
- if (*is_last && !req->req.no_interrupt)
- trb->trb_hw->ctrl.ioc = 1;
-
- if (*is_last)
- trb->trb_hw->ctrl.chain = 0;
- else {
- trb->trb_hw->ctrl.chain = 1;
- dev_dbg(u3d->dev, "chain trb\n");
- }
-
- wmb();
-
- return 0;
-}
-
-/* generate TRB linked list for a request
- * usb controller only supports continous trb chain,
- * that trb structure physical address should be continous.
- */
-static int mv_u3d_req_to_trb(struct mv_u3d_req *req)
-{
- unsigned count;
- int is_last;
- struct mv_u3d_trb *trb;
- struct mv_u3d_trb_hw *trb_hw;
- struct mv_u3d *u3d;
- dma_addr_t dma;
- unsigned length;
- unsigned trb_num;
-
- u3d = req->ep->u3d;
-
- INIT_LIST_HEAD(&req->trb_list);
-
- length = req->req.length - req->req.actual;
- /* normally the request transfer length is less than 16KB.
- * we use buil_trb_one() to optimize it.
- */
- if (length <= (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER) {
- trb = mv_u3d_build_trb_one(req, &count, &dma);
- list_add_tail(&trb->trb_list, &req->trb_list);
- req->trb_head = trb;
- req->trb_count = 1;
- req->chain = 0;
- } else {
- trb_num = length / MV_U3D_EP_MAX_LENGTH_TRANSFER;
- if (length % MV_U3D_EP_MAX_LENGTH_TRANSFER)
- trb_num++;
-
- trb = kcalloc(trb_num, sizeof(*trb), GFP_ATOMIC);
- if (!trb)
- return -ENOMEM;
-
- trb_hw = kcalloc(trb_num, sizeof(*trb_hw), GFP_ATOMIC);
- if (!trb_hw) {
- kfree(trb);
- return -ENOMEM;
- }
-
- do {
- trb->trb_hw = trb_hw;
- if (mv_u3d_build_trb_chain(req, &count,
- trb, &is_last)) {
- dev_err(u3d->dev,
- "%s, mv_u3d_build_trb_chain fail\n",
- __func__);
- return -EIO;
- }
-
- list_add_tail(&trb->trb_list, &req->trb_list);
- req->trb_count++;
- trb++;
- trb_hw++;
- } while (!is_last);
-
- req->trb_head = list_entry(req->trb_list.next,
- struct mv_u3d_trb, trb_list);
- req->trb_head->trb_dma = dma_map_single(u3d->gadget.dev.parent,
- req->trb_head->trb_hw,
- trb_num * sizeof(*trb_hw),
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(u3d->gadget.dev.parent,
- req->trb_head->trb_dma)) {
- kfree(req->trb_head->trb_hw);
- kfree(req->trb_head);
- return -EFAULT;
- }
-
- req->chain = 1;
- }
-
- return 0;
-}
-
-static int
-mv_u3d_start_queue(struct mv_u3d_ep *ep)
-{
- struct mv_u3d *u3d = ep->u3d;
- struct mv_u3d_req *req;
- int ret;
-
- if (!list_empty(&ep->req_list) && !ep->processing)
- req = list_entry(ep->req_list.next, struct mv_u3d_req, list);
- else
- return 0;
-
- ep->processing = 1;
-
- /* set up dma mapping */
- ret = usb_gadget_map_request(&u3d->gadget, &req->req,
- mv_u3d_ep_dir(ep));
- if (ret)
- goto break_processing;
-
- req->req.status = -EINPROGRESS;
- req->req.actual = 0;
- req->trb_count = 0;
-
- /* build trbs */
- ret = mv_u3d_req_to_trb(req);
- if (ret) {
- dev_err(u3d->dev, "%s, mv_u3d_req_to_trb fail\n", __func__);
- goto break_processing;
- }
-
- /* and push them to device queue */
- ret = mv_u3d_queue_trb(ep, req);
- if (ret)
- goto break_processing;
-
- /* irq handler advances the queue */
- list_add_tail(&req->queue, &ep->queue);
-
- return 0;
-
-break_processing:
- ep->processing = 0;
- return ret;
-}
-
-static int mv_u3d_ep_enable(struct usb_ep *_ep,
- const struct usb_endpoint_descriptor *desc)
-{
- struct mv_u3d *u3d;
- struct mv_u3d_ep *ep;
- u16 max = 0;
- unsigned maxburst = 0;
- u32 epxcr, direction;
-
- if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
- return -EINVAL;
-
- ep = container_of(_ep, struct mv_u3d_ep, ep);
- u3d = ep->u3d;
-
- if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
-
- direction = mv_u3d_ep_dir(ep);
- max = le16_to_cpu(desc->wMaxPacketSize);
-
- if (!_ep->maxburst)
- _ep->maxburst = 1;
- maxburst = _ep->maxburst;
-
- /* Set the max burst size */
- switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
- case USB_ENDPOINT_XFER_BULK:
- if (maxburst > 16) {
- dev_dbg(u3d->dev,
- "max burst should not be greater "
- "than 16 on bulk ep\n");
- maxburst = 1;
- _ep->maxburst = maxburst;
- }
- dev_dbg(u3d->dev,
- "maxburst: %d on bulk %s\n", maxburst, ep->name);
- break;
- case USB_ENDPOINT_XFER_CONTROL:
- /* control transfer only supports maxburst as one */
- maxburst = 1;
- _ep->maxburst = maxburst;
- break;
- case USB_ENDPOINT_XFER_INT:
- if (maxburst != 1) {
- dev_dbg(u3d->dev,
- "max burst should be 1 on int ep "
- "if transfer size is not 1024\n");
- maxburst = 1;
- _ep->maxburst = maxburst;
- }
- break;
- case USB_ENDPOINT_XFER_ISOC:
- if (maxburst != 1) {
- dev_dbg(u3d->dev,
- "max burst should be 1 on isoc ep "
- "if transfer size is not 1024\n");
- maxburst = 1;
- _ep->maxburst = maxburst;
- }
- break;
- default:
- goto en_done;
- }
-
- ep->ep.maxpacket = max;
- ep->ep.desc = desc;
- ep->enabled = 1;
-
- /* Enable the endpoint for Rx or Tx and set the endpoint type */
- if (direction == MV_U3D_EP_DIR_OUT) {
- epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
- epxcr |= MV_U3D_EPXCR_EP_INIT;
- iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
- udelay(5);
- epxcr &= ~MV_U3D_EPXCR_EP_INIT;
- iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
-
- epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
- | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
- | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
- | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
- iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
- } else {
- epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
- epxcr |= MV_U3D_EPXCR_EP_INIT;
- iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
- udelay(5);
- epxcr &= ~MV_U3D_EPXCR_EP_INIT;
- iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
-
- epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT)
- | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT)
- | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
- | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK));
- iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
- }
-
- return 0;
-en_done:
- return -EINVAL;
-}
-
-static int mv_u3d_ep_disable(struct usb_ep *_ep)
-{
- struct mv_u3d *u3d;
- struct mv_u3d_ep *ep;
- u32 epxcr, direction;
- unsigned long flags;
-
- if (!_ep)
- return -EINVAL;
-
- ep = container_of(_ep, struct mv_u3d_ep, ep);
- if (!ep->ep.desc)
- return -EINVAL;
-
- u3d = ep->u3d;
-
- direction = mv_u3d_ep_dir(ep);
-
- /* nuke all pending requests (does flush) */
- spin_lock_irqsave(&u3d->lock, flags);
- mv_u3d_nuke(ep, -ESHUTDOWN);
- spin_unlock_irqrestore(&u3d->lock, flags);
-
- /* Disable the endpoint for Rx or Tx and reset the endpoint type */
- if (direction == MV_U3D_EP_DIR_OUT) {
- epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
- epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
- | USB_ENDPOINT_XFERTYPE_MASK);
- iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1);
- } else {
- epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
- epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT)
- | USB_ENDPOINT_XFERTYPE_MASK);
- iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1);
- }
-
- ep->enabled = 0;
-
- ep->ep.desc = NULL;
- return 0;
-}
-
-static struct usb_request *
-mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
-{
- struct mv_u3d_req *req;
-
- req = kzalloc(sizeof *req, gfp_flags);
- if (!req)
- return NULL;
-
- INIT_LIST_HEAD(&req->queue);
-
- return &req->req;
-}
-
-static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct mv_u3d_req *req = container_of(_req, struct mv_u3d_req, req);
-
- kfree(req);
-}
-
-static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep)
-{
- struct mv_u3d *u3d;
- u32 direction;
- struct mv_u3d_ep *ep = container_of(_ep, struct mv_u3d_ep, ep);
- unsigned int loops;
- u32 tmp;
-
- /* if endpoint is not enabled, cannot flush endpoint */
- if (!ep->enabled)
- return;
-
- u3d = ep->u3d;
- direction = mv_u3d_ep_dir(ep);
-
- /* ep0 need clear bit after flushing fifo. */
- if (!ep->ep_num) {
- if (direction == MV_U3D_EP_DIR_OUT) {
- tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0);
- tmp |= MV_U3D_EPXCR_EP_FLUSH;
- iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
- udelay(10);
- tmp &= ~MV_U3D_EPXCR_EP_FLUSH;
- iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0);
- } else {
- tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0);
- tmp |= MV_U3D_EPXCR_EP_FLUSH;
- iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
- udelay(10);
- tmp &= ~MV_U3D_EPXCR_EP_FLUSH;
- iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0);
- }
- return;
- }
-
- if (direction == MV_U3D_EP_DIR_OUT) {
- tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
- tmp |= MV_U3D_EPXCR_EP_FLUSH;
- iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
-
- /* Wait until flushing completed */
- loops = LOOPS(MV_U3D_FLUSH_TIMEOUT);
- while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0) &
- MV_U3D_EPXCR_EP_FLUSH) {
- /*
- * EP_FLUSH bit should be cleared to indicate this
- * operation is complete
- */
- if (loops == 0) {
- dev_dbg(u3d->dev,
- "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
- direction ? "in" : "out");
- return;
- }
- loops--;
- udelay(LOOPS_USEC);
- }
- } else { /* EP_DIR_IN */
- tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
- tmp |= MV_U3D_EPXCR_EP_FLUSH;
- iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
-
- /* Wait until flushing completed */
- loops = LOOPS(MV_U3D_FLUSH_TIMEOUT);
- while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0) &
- MV_U3D_EPXCR_EP_FLUSH) {
- /*
- * EP_FLUSH bit should be cleared to indicate this
- * operation is complete
- */
- if (loops == 0) {
- dev_dbg(u3d->dev,
- "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num,
- direction ? "in" : "out");
- return;
- }
- loops--;
- udelay(LOOPS_USEC);
- }
- }
-}
-
-/* queues (submits) an I/O request to an endpoint */
-static int
-mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
-{
- struct mv_u3d_ep *ep;
- struct mv_u3d_req *req;
- struct mv_u3d *u3d;
- unsigned long flags;
- int is_first_req = 0;
-
- if (unlikely(!_ep || !_req))
- return -EINVAL;
-
- ep = container_of(_ep, struct mv_u3d_ep, ep);
- u3d = ep->u3d;
-
- req = container_of(_req, struct mv_u3d_req, req);
-
- if (!ep->ep_num
- && u3d->ep0_state == MV_U3D_STATUS_STAGE
- && !_req->length) {
- dev_dbg(u3d->dev, "ep0 status stage\n");
- u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
- return 0;
- }
-
- dev_dbg(u3d->dev, "%s: %s, req: 0x%p\n",
- __func__, _ep->name, req);
-
- /* catch various bogus parameters */
- if (!req->req.complete || !req->req.buf
- || !list_empty(&req->queue)) {
- dev_err(u3d->dev,
- "%s, bad params, _req: 0x%p,"
- "req->req.complete: 0x%p, req->req.buf: 0x%p,"
- "list_empty: 0x%x\n",
- __func__, _req,
- req->req.complete, req->req.buf,
- list_empty(&req->queue));
- return -EINVAL;
- }
- if (unlikely(!ep->ep.desc)) {
- dev_err(u3d->dev, "%s, bad ep\n", __func__);
- return -EINVAL;
- }
- if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
- if (req->req.length > ep->ep.maxpacket)
- return -EMSGSIZE;
- }
-
- if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN) {
- dev_err(u3d->dev,
- "bad params of driver/speed\n");
- return -ESHUTDOWN;
- }
-
- req->ep = ep;
-
- /* Software list handles usb request. */
- spin_lock_irqsave(&ep->req_lock, flags);
- is_first_req = list_empty(&ep->req_list);
- list_add_tail(&req->list, &ep->req_list);
- spin_unlock_irqrestore(&ep->req_lock, flags);
- if (!is_first_req) {
- dev_dbg(u3d->dev, "list is not empty\n");
- return 0;
- }
-
- dev_dbg(u3d->dev, "call mv_u3d_start_queue from usb_ep_queue\n");
- spin_lock_irqsave(&u3d->lock, flags);
- mv_u3d_start_queue(ep);
- spin_unlock_irqrestore(&u3d->lock, flags);
- return 0;
-}
-
-/* dequeues (cancels, unlinks) an I/O request from an endpoint */
-static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct mv_u3d_ep *ep;
- struct mv_u3d_req *req = NULL, *iter;
- struct mv_u3d *u3d;
- struct mv_u3d_ep_context *ep_context;
- struct mv_u3d_req *next_req;
-
- unsigned long flags;
- int ret = 0;
-
- if (!_ep || !_req)
- return -EINVAL;
-
- ep = container_of(_ep, struct mv_u3d_ep, ep);
- u3d = ep->u3d;
-
- spin_lock_irqsave(&ep->u3d->lock, flags);
-
- /* make sure it's actually queued on this endpoint */
- list_for_each_entry(iter, &ep->queue, queue) {
- if (&iter->req != _req)
- continue;
- req = iter;
- break;
- }
- if (!req) {
- ret = -EINVAL;
- goto out;
- }
-
- /* The request is in progress, or completed but not dequeued */
- if (ep->queue.next == &req->queue) {
- _req->status = -ECONNRESET;
- mv_u3d_ep_fifo_flush(_ep);
-
- /* The request isn't the last request in this ep queue */
- if (req->queue.next != &ep->queue) {
- dev_dbg(u3d->dev,
- "it is the last request in this ep queue\n");
- ep_context = ep->ep_context;
- next_req = list_entry(req->queue.next,
- struct mv_u3d_req, queue);
-
- /* Point first TRB of next request to the EP context. */
- iowrite32((unsigned long) next_req->trb_head,
- &ep_context->trb_addr_lo);
- } else {
- struct mv_u3d_ep_context *ep_context;
- ep_context = ep->ep_context;
- ep_context->trb_addr_lo = 0;
- ep_context->trb_addr_hi = 0;
- }
-
- } else
- WARN_ON(1);
-
- mv_u3d_done(ep, req, -ECONNRESET);
-
- /* remove the req from the ep req list */
- if (!list_empty(&ep->req_list)) {
- struct mv_u3d_req *curr_req;
- curr_req = list_entry(ep->req_list.next,
- struct mv_u3d_req, list);
- if (curr_req == req) {
- list_del_init(&req->list);
- ep->processing = 0;
- }
- }
-
-out:
- spin_unlock_irqrestore(&ep->u3d->lock, flags);
- return ret;
-}
-
-static void
-mv_u3d_ep_set_stall(struct mv_u3d *u3d, u8 ep_num, u8 direction, int stall)
-{
- u32 tmp;
- struct mv_u3d_ep *ep = u3d->eps;
-
- dev_dbg(u3d->dev, "%s\n", __func__);
- if (direction == MV_U3D_EP_DIR_OUT) {
- tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
- if (stall)
- tmp |= MV_U3D_EPXCR_EP_HALT;
- else
- tmp &= ~MV_U3D_EPXCR_EP_HALT;
- iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0);
- } else {
- tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
- if (stall)
- tmp |= MV_U3D_EPXCR_EP_HALT;
- else
- tmp &= ~MV_U3D_EPXCR_EP_HALT;
- iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0);
- }
-}
-
-static int mv_u3d_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
-{
- struct mv_u3d_ep *ep;
- unsigned long flags;
- int status = 0;
- struct mv_u3d *u3d;
-
- ep = container_of(_ep, struct mv_u3d_ep, ep);
- u3d = ep->u3d;
- if (!ep->ep.desc) {
- status = -EINVAL;
- goto out;
- }
-
- if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
- status = -EOPNOTSUPP;
- goto out;
- }
-
- /*
- * Attempt to halt IN ep will fail if any transfer requests
- * are still queue
- */
- if (halt && (mv_u3d_ep_dir(ep) == MV_U3D_EP_DIR_IN)
- && !list_empty(&ep->queue)) {
- status = -EAGAIN;
- goto out;
- }
-
- spin_lock_irqsave(&ep->u3d->lock, flags);
- mv_u3d_ep_set_stall(u3d, ep->ep_num, mv_u3d_ep_dir(ep), halt);
- if (halt && wedge)
- ep->wedge = 1;
- else if (!halt)
- ep->wedge = 0;
- spin_unlock_irqrestore(&ep->u3d->lock, flags);
-
- if (ep->ep_num == 0)
- u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
-out:
- return status;
-}
-
-static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt)
-{
- return mv_u3d_ep_set_halt_wedge(_ep, halt, 0);
-}
-
-static int mv_u3d_ep_set_wedge(struct usb_ep *_ep)
-{
- return mv_u3d_ep_set_halt_wedge(_ep, 1, 1);
-}
-
-static const struct usb_ep_ops mv_u3d_ep_ops = {
- .enable = mv_u3d_ep_enable,
- .disable = mv_u3d_ep_disable,
-
- .alloc_request = mv_u3d_alloc_request,
- .free_request = mv_u3d_free_request,
-
- .queue = mv_u3d_ep_queue,
- .dequeue = mv_u3d_ep_dequeue,
-
- .set_wedge = mv_u3d_ep_set_wedge,
- .set_halt = mv_u3d_ep_set_halt,
- .fifo_flush = mv_u3d_ep_fifo_flush,
-};
-
-static void mv_u3d_controller_stop(struct mv_u3d *u3d)
-{
- u32 tmp;
-
- if (!u3d->clock_gating && u3d->vbus_valid_detect)
- iowrite32(MV_U3D_INTR_ENABLE_VBUS_VALID,
- &u3d->vuc_regs->intrenable);
- else
- iowrite32(0, &u3d->vuc_regs->intrenable);
- iowrite32(~0x0, &u3d->vuc_regs->endcomplete);
- iowrite32(~0x0, &u3d->vuc_regs->trbunderrun);
- iowrite32(~0x0, &u3d->vuc_regs->trbcomplete);
- iowrite32(~0x0, &u3d->vuc_regs->linkchange);
- iowrite32(0x1, &u3d->vuc_regs->setuplock);
-
- /* Reset the RUN bit in the command register to stop USB */
- tmp = ioread32(&u3d->op_regs->usbcmd);
- tmp &= ~MV_U3D_CMD_RUN_STOP;
- iowrite32(tmp, &u3d->op_regs->usbcmd);
- dev_dbg(u3d->dev, "after u3d_stop, USBCMD 0x%x\n",
- ioread32(&u3d->op_regs->usbcmd));
-}
-
-static void mv_u3d_controller_start(struct mv_u3d *u3d)
-{
- u32 usbintr;
- u32 temp;
-
- /* enable link LTSSM state machine */
- temp = ioread32(&u3d->vuc_regs->ltssm);
- temp |= MV_U3D_LTSSM_PHY_INIT_DONE;
- iowrite32(temp, &u3d->vuc_regs->ltssm);
-
- /* Enable interrupts */
- usbintr = MV_U3D_INTR_ENABLE_LINK_CHG | MV_U3D_INTR_ENABLE_TXDESC_ERR |
- MV_U3D_INTR_ENABLE_RXDESC_ERR | MV_U3D_INTR_ENABLE_TX_COMPLETE |
- MV_U3D_INTR_ENABLE_RX_COMPLETE | MV_U3D_INTR_ENABLE_SETUP |
- (u3d->vbus_valid_detect ? MV_U3D_INTR_ENABLE_VBUS_VALID : 0);
- iowrite32(usbintr, &u3d->vuc_regs->intrenable);
-
- /* Enable ctrl ep */
- iowrite32(0x1, &u3d->vuc_regs->ctrlepenable);
-
- /* Set the Run bit in the command register */
- iowrite32(MV_U3D_CMD_RUN_STOP, &u3d->op_regs->usbcmd);
- dev_dbg(u3d->dev, "after u3d_start, USBCMD 0x%x\n",
- ioread32(&u3d->op_regs->usbcmd));
-}
-
-static int mv_u3d_controller_reset(struct mv_u3d *u3d)
-{
- unsigned int loops;
- u32 tmp;
-
- /* Stop the controller */
- tmp = ioread32(&u3d->op_regs->usbcmd);
- tmp &= ~MV_U3D_CMD_RUN_STOP;
- iowrite32(tmp, &u3d->op_regs->usbcmd);
-
- /* Reset the controller to get default values */
- iowrite32(MV_U3D_CMD_CTRL_RESET, &u3d->op_regs->usbcmd);
-
- /* wait for reset to complete */
- loops = LOOPS(MV_U3D_RESET_TIMEOUT);
- while (ioread32(&u3d->op_regs->usbcmd) & MV_U3D_CMD_CTRL_RESET) {
- if (loops == 0) {
- dev_err(u3d->dev,
- "Wait for RESET completed TIMEOUT\n");
- return -ETIMEDOUT;
- }
- loops--;
- udelay(LOOPS_USEC);
- }
-
- /* Configure the Endpoint Context Address */
- iowrite32(u3d->ep_context_dma, &u3d->op_regs->dcbaapl);
- iowrite32(0, &u3d->op_regs->dcbaaph);
-
- return 0;
-}
-
-static int mv_u3d_enable(struct mv_u3d *u3d)
-{
- struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
- int retval;
-
- if (u3d->active)
- return 0;
-
- if (!u3d->clock_gating) {
- u3d->active = 1;
- return 0;
- }
-
- dev_dbg(u3d->dev, "enable u3d\n");
- clk_enable(u3d->clk);
- if (pdata->phy_init) {
- retval = pdata->phy_init(u3d->phy_regs);
- if (retval) {
- dev_err(u3d->dev,
- "init phy error %d\n", retval);
- clk_disable(u3d->clk);
- return retval;
- }
- }
- u3d->active = 1;
-
- return 0;
-}
-
-static void mv_u3d_disable(struct mv_u3d *u3d)
-{
- struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
- if (u3d->clock_gating && u3d->active) {
- dev_dbg(u3d->dev, "disable u3d\n");
- if (pdata->phy_deinit)
- pdata->phy_deinit(u3d->phy_regs);
- clk_disable(u3d->clk);
- u3d->active = 0;
- }
-}
-
-static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active)
-{
- struct mv_u3d *u3d;
- unsigned long flags;
- int retval = 0;
-
- u3d = container_of(gadget, struct mv_u3d, gadget);
-
- spin_lock_irqsave(&u3d->lock, flags);
-
- u3d->vbus_active = (is_active != 0);
- dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
- __func__, u3d->softconnect, u3d->vbus_active);
- /*
- * 1. external VBUS detect: we can disable/enable clock on demand.
- * 2. UDC VBUS detect: we have to enable clock all the time.
- * 3. No VBUS detect: we have to enable clock all the time.
- */
- if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
- retval = mv_u3d_enable(u3d);
- if (retval == 0) {
- /*
- * after clock is disabled, we lost all the register
- * context. We have to re-init registers
- */
- mv_u3d_controller_reset(u3d);
- mv_u3d_ep0_reset(u3d);
- mv_u3d_controller_start(u3d);
- }
- } else if (u3d->driver && u3d->softconnect) {
- if (!u3d->active)
- goto out;
-
- /* stop all the transfer in queue*/
- mv_u3d_stop_activity(u3d, u3d->driver);
- mv_u3d_controller_stop(u3d);
- mv_u3d_disable(u3d);
- }
-
-out:
- spin_unlock_irqrestore(&u3d->lock, flags);
- return retval;
-}
-
-/* constrain controller's VBUS power usage
- * This call is used by gadget drivers during SET_CONFIGURATION calls,
- * reporting how much power the device may consume. For example, this
- * could affect how quickly batteries are recharged.
- *
- * Returns zero on success, else negative errno.
- */
-static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA)
-{
- struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
-
- u3d->power = mA;
-
- return 0;
-}
-
-static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on)
-{
- struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget);
- unsigned long flags;
- int retval = 0;
-
- spin_lock_irqsave(&u3d->lock, flags);
-
- dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n",
- __func__, u3d->softconnect, u3d->vbus_active);
- u3d->softconnect = (is_on != 0);
- if (u3d->driver && u3d->softconnect && u3d->vbus_active) {
- retval = mv_u3d_enable(u3d);
- if (retval == 0) {
- /*
- * after clock is disabled, we lost all the register
- * context. We have to re-init registers
- */
- mv_u3d_controller_reset(u3d);
- mv_u3d_ep0_reset(u3d);
- mv_u3d_controller_start(u3d);
- }
- } else if (u3d->driver && u3d->vbus_active) {
- /* stop all the transfer in queue*/
- mv_u3d_stop_activity(u3d, u3d->driver);
- mv_u3d_controller_stop(u3d);
- mv_u3d_disable(u3d);
- }
-
- spin_unlock_irqrestore(&u3d->lock, flags);
-
- return retval;
-}
-
-static int mv_u3d_start(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
-{
- struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
- struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
- unsigned long flags;
-
- if (u3d->driver)
- return -EBUSY;
-
- spin_lock_irqsave(&u3d->lock, flags);
-
- if (!u3d->clock_gating) {
- clk_enable(u3d->clk);
- if (pdata->phy_init)
- pdata->phy_init(u3d->phy_regs);
- }
-
- /* hook up the driver ... */
- u3d->driver = driver;
-
- u3d->ep0_dir = USB_DIR_OUT;
-
- spin_unlock_irqrestore(&u3d->lock, flags);
-
- u3d->vbus_valid_detect = 1;
-
- return 0;
-}
-
-static int mv_u3d_stop(struct usb_gadget *g)
-{
- struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
- struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
- unsigned long flags;
-
- u3d->vbus_valid_detect = 0;
- spin_lock_irqsave(&u3d->lock, flags);
-
- /* enable clock to access controller register */
- clk_enable(u3d->clk);
- if (pdata->phy_init)
- pdata->phy_init(u3d->phy_regs);
-
- mv_u3d_controller_stop(u3d);
- /* stop all usb activities */
- u3d->gadget.speed = USB_SPEED_UNKNOWN;
- mv_u3d_stop_activity(u3d, NULL);
- mv_u3d_disable(u3d);
-
- if (pdata->phy_deinit)
- pdata->phy_deinit(u3d->phy_regs);
- clk_disable(u3d->clk);
-
- spin_unlock_irqrestore(&u3d->lock, flags);
-
- u3d->driver = NULL;
-
- return 0;
-}
-
-/* device controller usb_gadget_ops structure */
-static const struct usb_gadget_ops mv_u3d_ops = {
- /* notify controller that VBUS is powered or not */
- .vbus_session = mv_u3d_vbus_session,
-
- /* constrain controller's VBUS power usage */
- .vbus_draw = mv_u3d_vbus_draw,
-
- .pullup = mv_u3d_pullup,
- .udc_start = mv_u3d_start,
- .udc_stop = mv_u3d_stop,
-};
-
-static int mv_u3d_eps_init(struct mv_u3d *u3d)
-{
- struct mv_u3d_ep *ep;
- char name[14];
- int i;
-
- /* initialize ep0, ep0 in/out use eps[1] */
- ep = &u3d->eps[1];
- ep->u3d = u3d;
- strscpy(ep->name, "ep0");
- ep->ep.name = ep->name;
- ep->ep.ops = &mv_u3d_ep_ops;
- ep->wedge = 0;
- usb_ep_set_maxpacket_limit(&ep->ep, MV_U3D_EP0_MAX_PKT_SIZE);
- ep->ep.caps.type_control = true;
- ep->ep.caps.dir_in = true;
- ep->ep.caps.dir_out = true;
- ep->ep_num = 0;
- ep->ep.desc = &mv_u3d_ep0_desc;
- INIT_LIST_HEAD(&ep->queue);
- INIT_LIST_HEAD(&ep->req_list);
- ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
-
- /* add ep0 ep_context */
- ep->ep_context = &u3d->ep_context[1];
-
- /* initialize other endpoints */
- for (i = 2; i < u3d->max_eps * 2; i++) {
- ep = &u3d->eps[i];
- if (i & 1) {
- snprintf(name, sizeof(name), "ep%din", i >> 1);
- ep->direction = MV_U3D_EP_DIR_IN;
- ep->ep.caps.dir_in = true;
- } else {
- snprintf(name, sizeof(name), "ep%dout", i >> 1);
- ep->direction = MV_U3D_EP_DIR_OUT;
- ep->ep.caps.dir_out = true;
- }
- ep->u3d = u3d;
- strscpy(ep->name, name);
- ep->ep.name = ep->name;
-
- ep->ep.caps.type_iso = true;
- ep->ep.caps.type_bulk = true;
- ep->ep.caps.type_int = true;
-
- ep->ep.ops = &mv_u3d_ep_ops;
- usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
- ep->ep_num = i / 2;
-
- INIT_LIST_HEAD(&ep->queue);
- list_add_tail(&ep->ep.ep_list, &u3d->gadget.ep_list);
-
- INIT_LIST_HEAD(&ep->req_list);
- spin_lock_init(&ep->req_lock);
- ep->ep_context = &u3d->ep_context[i];
- }
-
- return 0;
-}
-
-/* delete all endpoint requests, called with spinlock held */
-static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status)
-{
- /* endpoint fifo flush */
- mv_u3d_ep_fifo_flush(&ep->ep);
-
- while (!list_empty(&ep->queue)) {
- struct mv_u3d_req *req = NULL;
- req = list_entry(ep->queue.next, struct mv_u3d_req, queue);
- mv_u3d_done(ep, req, status);
- }
-}
-
-/* stop all USB activities */
-static
-void mv_u3d_stop_activity(struct mv_u3d *u3d, struct usb_gadget_driver *driver)
-{
- struct mv_u3d_ep *ep;
-
- mv_u3d_nuke(&u3d->eps[1], -ESHUTDOWN);
-
- list_for_each_entry(ep, &u3d->gadget.ep_list, ep.ep_list) {
- mv_u3d_nuke(ep, -ESHUTDOWN);
- }
-
- /* report disconnect; the driver is already quiesced */
- if (driver) {
- spin_unlock(&u3d->lock);
- driver->disconnect(&u3d->gadget);
- spin_lock(&u3d->lock);
- }
-}
-
-static void mv_u3d_irq_process_error(struct mv_u3d *u3d)
-{
- /* Increment the error count */
- u3d->errors++;
- dev_err(u3d->dev, "%s\n", __func__);
-}
-
-static void mv_u3d_irq_process_link_change(struct mv_u3d *u3d)
-{
- u32 linkchange;
-
- linkchange = ioread32(&u3d->vuc_regs->linkchange);
- iowrite32(linkchange, &u3d->vuc_regs->linkchange);
-
- dev_dbg(u3d->dev, "linkchange: 0x%x\n", linkchange);
-
- if (linkchange & MV_U3D_LINK_CHANGE_LINK_UP) {
- dev_dbg(u3d->dev, "link up: ltssm state: 0x%x\n",
- ioread32(&u3d->vuc_regs->ltssmstate));
-
- u3d->usb_state = USB_STATE_DEFAULT;
- u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
- u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP;
-
- /* set speed */
- u3d->gadget.speed = USB_SPEED_SUPER;
- }
-
- if (linkchange & MV_U3D_LINK_CHANGE_SUSPEND) {
- dev_dbg(u3d->dev, "link suspend\n");
- u3d->resume_state = u3d->usb_state;
- u3d->usb_state = USB_STATE_SUSPENDED;
- }
-
- if (linkchange & MV_U3D_LINK_CHANGE_RESUME) {
- dev_dbg(u3d->dev, "link resume\n");
- u3d->usb_state = u3d->resume_state;
- u3d->resume_state = 0;
- }
-
- if (linkchange & MV_U3D_LINK_CHANGE_WRESET) {
- dev_dbg(u3d->dev, "warm reset\n");
- u3d->usb_state = USB_STATE_POWERED;
- }
-
- if (linkchange & MV_U3D_LINK_CHANGE_HRESET) {
- dev_dbg(u3d->dev, "hot reset\n");
- u3d->usb_state = USB_STATE_DEFAULT;
- }
-
- if (linkchange & MV_U3D_LINK_CHANGE_INACT)
- dev_dbg(u3d->dev, "inactive\n");
-
- if (linkchange & MV_U3D_LINK_CHANGE_DISABLE_AFTER_U0)
- dev_dbg(u3d->dev, "ss.disabled\n");
-
- if (linkchange & MV_U3D_LINK_CHANGE_VBUS_INVALID) {
- dev_dbg(u3d->dev, "vbus invalid\n");
- u3d->usb_state = USB_STATE_ATTACHED;
- u3d->vbus_valid_detect = 1;
- /* if external vbus detect is not supported,
- * we handle it here.
- */
- if (!u3d->vbus) {
- spin_unlock(&u3d->lock);
- mv_u3d_vbus_session(&u3d->gadget, 0);
- spin_lock(&u3d->lock);
- }
- }
-}
-
-static void mv_u3d_ch9setaddress(struct mv_u3d *u3d,
- struct usb_ctrlrequest *setup)
-{
- u32 tmp;
-
- if (u3d->usb_state != USB_STATE_DEFAULT) {
- dev_err(u3d->dev,
- "%s, cannot setaddr in this state (%d)\n",
- __func__, u3d->usb_state);
- goto err;
- }
-
- u3d->dev_addr = (u8)setup->wValue;
-
- dev_dbg(u3d->dev, "%s: 0x%x\n", __func__, u3d->dev_addr);
-
- if (u3d->dev_addr > 127) {
- dev_err(u3d->dev,
- "%s, u3d address is wrong (out of range)\n", __func__);
- u3d->dev_addr = 0;
- goto err;
- }
-
- /* update usb state */
- u3d->usb_state = USB_STATE_ADDRESS;
-
- /* set the new address */
- tmp = ioread32(&u3d->vuc_regs->devaddrtiebrkr);
- tmp &= ~0x7F;
- tmp |= (u32)u3d->dev_addr;
- iowrite32(tmp, &u3d->vuc_regs->devaddrtiebrkr);
-
- return;
-err:
- mv_u3d_ep0_stall(u3d);
-}
-
-static int mv_u3d_is_set_configuration(struct usb_ctrlrequest *setup)
-{
- if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
- if (setup->bRequest == USB_REQ_SET_CONFIGURATION)
- return 1;
-
- return 0;
-}
-
-static void mv_u3d_handle_setup_packet(struct mv_u3d *u3d, u8 ep_num,
- struct usb_ctrlrequest *setup)
- __releases(&u3c->lock)
- __acquires(&u3c->lock)
-{
- bool delegate = false;
-
- mv_u3d_nuke(&u3d->eps[ep_num * 2 + MV_U3D_EP_DIR_IN], -ESHUTDOWN);
-
- dev_dbg(u3d->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
- setup->bRequestType, setup->bRequest,
- setup->wValue, setup->wIndex, setup->wLength);
-
- /* We process some stardard setup requests here */
- if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
- switch (setup->bRequest) {
- case USB_REQ_GET_STATUS:
- delegate = true;
- break;
-
- case USB_REQ_SET_ADDRESS:
- mv_u3d_ch9setaddress(u3d, setup);
- break;
-
- case USB_REQ_CLEAR_FEATURE:
- delegate = true;
- break;
-
- case USB_REQ_SET_FEATURE:
- delegate = true;
- break;
-
- default:
- delegate = true;
- }
- } else
- delegate = true;
-
- /* delegate USB standard requests to the gadget driver */
- if (delegate) {
- /* USB requests handled by gadget */
- if (setup->wLength) {
- /* DATA phase from gadget, STATUS phase from u3d */
- u3d->ep0_dir = (setup->bRequestType & USB_DIR_IN)
- ? MV_U3D_EP_DIR_IN : MV_U3D_EP_DIR_OUT;
- spin_unlock(&u3d->lock);
- if (u3d->driver->setup(&u3d->gadget,
- &u3d->local_setup_buff) < 0) {
- dev_err(u3d->dev, "setup error!\n");
- mv_u3d_ep0_stall(u3d);
- }
- spin_lock(&u3d->lock);
- } else {
- /* no DATA phase, STATUS phase from gadget */
- u3d->ep0_dir = MV_U3D_EP_DIR_IN;
- u3d->ep0_state = MV_U3D_STATUS_STAGE;
- spin_unlock(&u3d->lock);
- if (u3d->driver->setup(&u3d->gadget,
- &u3d->local_setup_buff) < 0)
- mv_u3d_ep0_stall(u3d);
- spin_lock(&u3d->lock);
- }
-
- if (mv_u3d_is_set_configuration(setup)) {
- dev_dbg(u3d->dev, "u3d configured\n");
- u3d->usb_state = USB_STATE_CONFIGURED;
- }
- }
-}
-
-static void mv_u3d_get_setup_data(struct mv_u3d *u3d, u8 ep_num, u8 *buffer_ptr)
-{
- struct mv_u3d_ep_context *epcontext;
-
- epcontext = &u3d->ep_context[ep_num * 2 + MV_U3D_EP_DIR_IN];
-
- /* Copy the setup packet to local buffer */
- memcpy(buffer_ptr, (u8 *) &epcontext->setup_buffer, 8);
-}
-
-static void mv_u3d_irq_process_setup(struct mv_u3d *u3d)
-{
- u32 tmp, i;
- /* Process all Setup packet received interrupts */
- tmp = ioread32(&u3d->vuc_regs->setuplock);
- if (tmp) {
- for (i = 0; i < u3d->max_eps; i++) {
- if (tmp & (1 << i)) {
- mv_u3d_get_setup_data(u3d, i,
- (u8 *)(&u3d->local_setup_buff));
- mv_u3d_handle_setup_packet(u3d, i,
- &u3d->local_setup_buff);
- }
- }
- }
-
- iowrite32(tmp, &u3d->vuc_regs->setuplock);
-}
-
-static void mv_u3d_irq_process_tr_complete(struct mv_u3d *u3d)
-{
- u32 tmp, bit_pos;
- int i, ep_num = 0, direction = 0;
- struct mv_u3d_ep *curr_ep;
- struct mv_u3d_req *curr_req, *temp_req;
- int status;
-
- tmp = ioread32(&u3d->vuc_regs->endcomplete);
-
- dev_dbg(u3d->dev, "tr_complete: ep: 0x%x\n", tmp);
- if (!tmp)
- return;
- iowrite32(tmp, &u3d->vuc_regs->endcomplete);
-
- for (i = 0; i < u3d->max_eps * 2; i++) {
- ep_num = i >> 1;
- direction = i % 2;
-
- bit_pos = 1 << (ep_num + 16 * direction);
-
- if (!(bit_pos & tmp))
- continue;
-
- if (i == 0)
- curr_ep = &u3d->eps[1];
- else
- curr_ep = &u3d->eps[i];
-
- /* remove req out of ep request list after completion */
- dev_dbg(u3d->dev, "tr comp: check req_list\n");
- spin_lock(&curr_ep->req_lock);
- if (!list_empty(&curr_ep->req_list)) {
- struct mv_u3d_req *req;
- req = list_entry(curr_ep->req_list.next,
- struct mv_u3d_req, list);
- list_del_init(&req->list);
- curr_ep->processing = 0;
- }
- spin_unlock(&curr_ep->req_lock);
-
- /* process the req queue until an uncomplete request */
- list_for_each_entry_safe(curr_req, temp_req,
- &curr_ep->queue, queue) {
- status = mv_u3d_process_ep_req(u3d, i, curr_req);
- if (status)
- break;
- /* write back status to req */
- curr_req->req.status = status;
-
- /* ep0 request completion */
- if (ep_num == 0) {
- mv_u3d_done(curr_ep, curr_req, 0);
- break;
- } else {
- mv_u3d_done(curr_ep, curr_req, status);
- }
- }
-
- dev_dbg(u3d->dev, "call mv_u3d_start_queue from ep complete\n");
- mv_u3d_start_queue(curr_ep);
- }
-}
-
-static irqreturn_t mv_u3d_irq(int irq, void *dev)
-{
- struct mv_u3d *u3d = (struct mv_u3d *)dev;
- u32 status, intr;
- u32 bridgesetting;
- u32 trbunderrun;
-
- spin_lock(&u3d->lock);
-
- status = ioread32(&u3d->vuc_regs->intrcause);
- intr = ioread32(&u3d->vuc_regs->intrenable);
- status &= intr;
-
- if (status == 0) {
- spin_unlock(&u3d->lock);
- dev_err(u3d->dev, "irq error!\n");
- return IRQ_NONE;
- }
-
- if (status & MV_U3D_USBINT_VBUS_VALID) {
- bridgesetting = ioread32(&u3d->vuc_regs->bridgesetting);
- if (bridgesetting & MV_U3D_BRIDGE_SETTING_VBUS_VALID) {
- /* write vbus valid bit of bridge setting to clear */
- bridgesetting = MV_U3D_BRIDGE_SETTING_VBUS_VALID;
- iowrite32(bridgesetting, &u3d->vuc_regs->bridgesetting);
- dev_dbg(u3d->dev, "vbus valid\n");
-
- u3d->usb_state = USB_STATE_POWERED;
- u3d->vbus_valid_detect = 0;
- /* if external vbus detect is not supported,
- * we handle it here.
- */
- if (!u3d->vbus) {
- spin_unlock(&u3d->lock);
- mv_u3d_vbus_session(&u3d->gadget, 1);
- spin_lock(&u3d->lock);
- }
- } else
- dev_err(u3d->dev, "vbus bit is not set\n");
- }
-
- /* RX data is already in the 16KB FIFO.*/
- if (status & MV_U3D_USBINT_UNDER_RUN) {
- trbunderrun = ioread32(&u3d->vuc_regs->trbunderrun);
- dev_err(u3d->dev, "under run, ep%d\n", trbunderrun);
- iowrite32(trbunderrun, &u3d->vuc_regs->trbunderrun);
- mv_u3d_irq_process_error(u3d);
- }
-
- if (status & (MV_U3D_USBINT_RXDESC_ERR | MV_U3D_USBINT_TXDESC_ERR)) {
- /* write one to clear */
- iowrite32(status & (MV_U3D_USBINT_RXDESC_ERR
- | MV_U3D_USBINT_TXDESC_ERR),
- &u3d->vuc_regs->intrcause);
- dev_err(u3d->dev, "desc err 0x%x\n", status);
- mv_u3d_irq_process_error(u3d);
- }
-
- if (status & MV_U3D_USBINT_LINK_CHG)
- mv_u3d_irq_process_link_change(u3d);
-
- if (status & MV_U3D_USBINT_TX_COMPLETE)
- mv_u3d_irq_process_tr_complete(u3d);
-
- if (status & MV_U3D_USBINT_RX_COMPLETE)
- mv_u3d_irq_process_tr_complete(u3d);
-
- if (status & MV_U3D_USBINT_SETUP)
- mv_u3d_irq_process_setup(u3d);
-
- spin_unlock(&u3d->lock);
- return IRQ_HANDLED;
-}
-
-static void mv_u3d_remove(struct platform_device *dev)
-{
- struct mv_u3d *u3d = platform_get_drvdata(dev);
-
- BUG_ON(u3d == NULL);
-
- usb_del_gadget_udc(&u3d->gadget);
-
- /* free memory allocated in probe */
- dma_pool_destroy(u3d->trb_pool);
-
- if (u3d->ep_context)
- dma_free_coherent(&dev->dev, u3d->ep_context_size,
- u3d->ep_context, u3d->ep_context_dma);
-
- kfree(u3d->eps);
-
- if (u3d->irq)
- free_irq(u3d->irq, u3d);
-
- if (u3d->cap_regs)
- iounmap(u3d->cap_regs);
- u3d->cap_regs = NULL;
-
- kfree(u3d->status_req);
-
- clk_put(u3d->clk);
-
- kfree(u3d);
-}
-
-static int mv_u3d_probe(struct platform_device *dev)
-{
- struct mv_u3d *u3d;
- struct mv_usb_platform_data *pdata = dev_get_platdata(&dev->dev);
- int retval = 0;
- struct resource *r;
- size_t size;
-
- if (!dev_get_platdata(&dev->dev)) {
- dev_err(&dev->dev, "missing platform_data\n");
- retval = -ENODEV;
- goto err_pdata;
- }
-
- u3d = kzalloc(sizeof(*u3d), GFP_KERNEL);
- if (!u3d) {
- retval = -ENOMEM;
- goto err_alloc_private;
- }
-
- spin_lock_init(&u3d->lock);
-
- platform_set_drvdata(dev, u3d);
-
- u3d->dev = &dev->dev;
- u3d->vbus = pdata->vbus;
-
- u3d->clk = clk_get(&dev->dev, NULL);
- if (IS_ERR(u3d->clk)) {
- retval = PTR_ERR(u3d->clk);
- goto err_get_clk;
- }
-
- r = platform_get_resource_byname(dev, IORESOURCE_MEM, "capregs");
- if (!r) {
- dev_err(&dev->dev, "no I/O memory resource defined\n");
- retval = -ENODEV;
- goto err_get_cap_regs;
- }
-
- u3d->cap_regs = (struct mv_u3d_cap_regs __iomem *)
- ioremap(r->start, resource_size(r));
- if (!u3d->cap_regs) {
- dev_err(&dev->dev, "failed to map I/O memory\n");
- retval = -EBUSY;
- goto err_map_cap_regs;
- } else {
- dev_dbg(&dev->dev, "cap_regs address: 0x%lx/0x%lx\n",
- (unsigned long) r->start,
- (unsigned long) u3d->cap_regs);
- }
-
- /* we will access controller register, so enable the u3d controller */
- retval = clk_enable(u3d->clk);
- if (retval) {
- dev_err(&dev->dev, "clk_enable error %d\n", retval);
- goto err_u3d_enable;
- }
-
- if (pdata->phy_init) {
- retval = pdata->phy_init(u3d->phy_regs);
- if (retval) {
- dev_err(&dev->dev, "init phy error %d\n", retval);
- clk_disable(u3d->clk);
- goto err_phy_init;
- }
- }
-
- u3d->op_regs = (struct mv_u3d_op_regs __iomem *)(u3d->cap_regs
- + MV_U3D_USB3_OP_REGS_OFFSET);
-
- u3d->vuc_regs = (struct mv_u3d_vuc_regs __iomem *)(u3d->cap_regs
- + ioread32(&u3d->cap_regs->vuoff));
-
- u3d->max_eps = 16;
-
- /*
- * some platform will use usb to download image, it may not disconnect
- * usb gadget before loading kernel. So first stop u3d here.
- */
- mv_u3d_controller_stop(u3d);
- iowrite32(0xFFFFFFFF, &u3d->vuc_regs->intrcause);
-
- if (pdata->phy_deinit)
- pdata->phy_deinit(u3d->phy_regs);
- clk_disable(u3d->clk);
-
- size = u3d->max_eps * sizeof(struct mv_u3d_ep_context) * 2;
- size = (size + MV_U3D_EP_CONTEXT_ALIGNMENT - 1)
- & ~(MV_U3D_EP_CONTEXT_ALIGNMENT - 1);
- u3d->ep_context = dma_alloc_coherent(&dev->dev, size,
- &u3d->ep_context_dma, GFP_KERNEL);
- if (!u3d->ep_context) {
- dev_err(&dev->dev, "allocate ep context memory failed\n");
- retval = -ENOMEM;
- goto err_alloc_ep_context;
- }
- u3d->ep_context_size = size;
-
- /* create TRB dma_pool resource */
- u3d->trb_pool = dma_pool_create("u3d_trb",
- &dev->dev,
- sizeof(struct mv_u3d_trb_hw),
- MV_U3D_TRB_ALIGNMENT,
- MV_U3D_DMA_BOUNDARY);
-
- if (!u3d->trb_pool) {
- retval = -ENOMEM;
- goto err_alloc_trb_pool;
- }
-
- size = u3d->max_eps * sizeof(struct mv_u3d_ep) * 2;
- u3d->eps = kzalloc(size, GFP_KERNEL);
- if (!u3d->eps) {
- retval = -ENOMEM;
- goto err_alloc_eps;
- }
-
- /* initialize ep0 status request structure */
- u3d->status_req = kzalloc(sizeof(struct mv_u3d_req) + 8, GFP_KERNEL);
- if (!u3d->status_req) {
- retval = -ENOMEM;
- goto err_alloc_status_req;
- }
- INIT_LIST_HEAD(&u3d->status_req->queue);
-
- /* allocate a small amount of memory to get valid address */
- u3d->status_req->req.buf = (char *)u3d->status_req
- + sizeof(struct mv_u3d_req);
- u3d->status_req->req.dma = virt_to_phys(u3d->status_req->req.buf);
-
- u3d->resume_state = USB_STATE_NOTATTACHED;
- u3d->usb_state = USB_STATE_ATTACHED;
- u3d->ep0_dir = MV_U3D_EP_DIR_OUT;
- u3d->remote_wakeup = 0;
-
- r = platform_get_resource(dev, IORESOURCE_IRQ, 0);
- if (!r) {
- dev_err(&dev->dev, "no IRQ resource defined\n");
- retval = -ENODEV;
- goto err_get_irq;
- }
- u3d->irq = r->start;
-
- /* initialize gadget structure */
- u3d->gadget.ops = &mv_u3d_ops; /* usb_gadget_ops */
- u3d->gadget.ep0 = &u3d->eps[1].ep; /* gadget ep0 */
- INIT_LIST_HEAD(&u3d->gadget.ep_list); /* ep_list */
- u3d->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
-
- /* the "gadget" abstracts/virtualizes the controller */
- u3d->gadget.name = driver_name; /* gadget name */
-
- mv_u3d_eps_init(u3d);
-
- if (request_irq(u3d->irq, mv_u3d_irq,
- IRQF_SHARED, driver_name, u3d)) {
- u3d->irq = 0;
- dev_err(&dev->dev, "Request irq %d for u3d failed\n",
- u3d->irq);
- retval = -ENODEV;
- goto err_request_irq;
- }
-
- /* external vbus detection */
- if (u3d->vbus) {
- u3d->clock_gating = 1;
- dev_err(&dev->dev, "external vbus detection\n");
- }
-
- if (!u3d->clock_gating)
- u3d->vbus_active = 1;
-
- /* enable usb3 controller vbus detection */
- u3d->vbus_valid_detect = 1;
-
- retval = usb_add_gadget_udc(&dev->dev, &u3d->gadget);
- if (retval)
- goto err_unregister;
-
- dev_dbg(&dev->dev, "successful probe usb3 device %s clock gating.\n",
- u3d->clock_gating ? "with" : "without");
-
- return 0;
-
-err_unregister:
- free_irq(u3d->irq, u3d);
-err_get_irq:
-err_request_irq:
- kfree(u3d->status_req);
-err_alloc_status_req:
- kfree(u3d->eps);
-err_alloc_eps:
- dma_pool_destroy(u3d->trb_pool);
-err_alloc_trb_pool:
- dma_free_coherent(&dev->dev, u3d->ep_context_size,
- u3d->ep_context, u3d->ep_context_dma);
-err_alloc_ep_context:
-err_phy_init:
-err_u3d_enable:
- iounmap(u3d->cap_regs);
-err_map_cap_regs:
-err_get_cap_regs:
- clk_put(u3d->clk);
-err_get_clk:
- kfree(u3d);
-err_alloc_private:
-err_pdata:
- return retval;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int mv_u3d_suspend(struct device *dev)
-{
- struct mv_u3d *u3d = dev_get_drvdata(dev);
-
- /*
- * only cable is unplugged, usb can suspend.
- * So do not care about clock_gating == 1, it is handled by
- * vbus session.
- */
- if (!u3d->clock_gating) {
- mv_u3d_controller_stop(u3d);
-
- spin_lock_irq(&u3d->lock);
- /* stop all usb activities */
- mv_u3d_stop_activity(u3d, u3d->driver);
- spin_unlock_irq(&u3d->lock);
-
- mv_u3d_disable(u3d);
- }
-
- return 0;
-}
-
-static int mv_u3d_resume(struct device *dev)
-{
- struct mv_u3d *u3d = dev_get_drvdata(dev);
- int retval;
-
- if (!u3d->clock_gating) {
- retval = mv_u3d_enable(u3d);
- if (retval)
- return retval;
-
- if (u3d->driver && u3d->softconnect) {
- mv_u3d_controller_reset(u3d);
- mv_u3d_ep0_reset(u3d);
- mv_u3d_controller_start(u3d);
- }
- }
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(mv_u3d_pm_ops, mv_u3d_suspend, mv_u3d_resume);
-
-static void mv_u3d_shutdown(struct platform_device *dev)
-{
- struct mv_u3d *u3d = platform_get_drvdata(dev);
- u32 tmp;
-
- tmp = ioread32(&u3d->op_regs->usbcmd);
- tmp &= ~MV_U3D_CMD_RUN_STOP;
- iowrite32(tmp, &u3d->op_regs->usbcmd);
-}
-
-static struct platform_driver mv_u3d_driver = {
- .probe = mv_u3d_probe,
- .remove = mv_u3d_remove,
- .shutdown = mv_u3d_shutdown,
- .driver = {
- .name = "mv-u3d",
- .pm = &mv_u3d_pm_ops,
- },
-};
-
-module_platform_driver(mv_u3d_driver);
-MODULE_ALIAS("platform:mv-u3d");
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_AUTHOR("Yu Xu <yuxu@marvell.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/mv_udc.h b/drivers/usb/gadget/udc/mv_udc.h
deleted file mode 100644
index b3f759c0962c..000000000000
--- a/drivers/usb/gadget/udc/mv_udc.h
+++ /dev/null
@@ -1,309 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
- */
-
-#ifndef __MV_UDC_H
-#define __MV_UDC_H
-
-#define VUSBHS_MAX_PORTS 8
-
-#define DQH_ALIGNMENT 2048
-#define DTD_ALIGNMENT 64
-#define DMA_BOUNDARY 4096
-
-#define EP_DIR_IN 1
-#define EP_DIR_OUT 0
-
-#define DMA_ADDR_INVALID (~(dma_addr_t)0)
-
-#define EP0_MAX_PKT_SIZE 64
-/* ep0 transfer state */
-#define WAIT_FOR_SETUP 0
-#define DATA_STATE_XMIT 1
-#define DATA_STATE_NEED_ZLP 2
-#define WAIT_FOR_OUT_STATUS 3
-#define DATA_STATE_RECV 4
-
-#define CAPLENGTH_MASK (0xff)
-#define DCCPARAMS_DEN_MASK (0x1f)
-
-#define HCSPARAMS_PPC (0x10)
-
-/* Frame Index Register Bit Masks */
-#define USB_FRINDEX_MASKS 0x3fff
-
-/* Command Register Bit Masks */
-#define USBCMD_RUN_STOP (0x00000001)
-#define USBCMD_CTRL_RESET (0x00000002)
-#define USBCMD_SETUP_TRIPWIRE_SET (0x00002000)
-#define USBCMD_SETUP_TRIPWIRE_CLEAR (~USBCMD_SETUP_TRIPWIRE_SET)
-
-#define USBCMD_ATDTW_TRIPWIRE_SET (0x00004000)
-#define USBCMD_ATDTW_TRIPWIRE_CLEAR (~USBCMD_ATDTW_TRIPWIRE_SET)
-
-/* bit 15,3,2 are for frame list size */
-#define USBCMD_FRAME_SIZE_1024 (0x00000000) /* 000 */
-#define USBCMD_FRAME_SIZE_512 (0x00000004) /* 001 */
-#define USBCMD_FRAME_SIZE_256 (0x00000008) /* 010 */
-#define USBCMD_FRAME_SIZE_128 (0x0000000C) /* 011 */
-#define USBCMD_FRAME_SIZE_64 (0x00008000) /* 100 */
-#define USBCMD_FRAME_SIZE_32 (0x00008004) /* 101 */
-#define USBCMD_FRAME_SIZE_16 (0x00008008) /* 110 */
-#define USBCMD_FRAME_SIZE_8 (0x0000800C) /* 111 */
-
-#define EPCTRL_TX_ALL_MASK (0xFFFF0000)
-#define EPCTRL_RX_ALL_MASK (0x0000FFFF)
-
-#define EPCTRL_TX_DATA_TOGGLE_RST (0x00400000)
-#define EPCTRL_TX_EP_STALL (0x00010000)
-#define EPCTRL_RX_EP_STALL (0x00000001)
-#define EPCTRL_RX_DATA_TOGGLE_RST (0x00000040)
-#define EPCTRL_RX_ENABLE (0x00000080)
-#define EPCTRL_TX_ENABLE (0x00800000)
-#define EPCTRL_CONTROL (0x00000000)
-#define EPCTRL_ISOCHRONOUS (0x00040000)
-#define EPCTRL_BULK (0x00080000)
-#define EPCTRL_INT (0x000C0000)
-#define EPCTRL_TX_TYPE (0x000C0000)
-#define EPCTRL_RX_TYPE (0x0000000C)
-#define EPCTRL_DATA_TOGGLE_INHIBIT (0x00000020)
-#define EPCTRL_TX_EP_TYPE_SHIFT (18)
-#define EPCTRL_RX_EP_TYPE_SHIFT (2)
-
-#define EPCOMPLETE_MAX_ENDPOINTS (16)
-
-/* endpoint list address bit masks */
-#define USB_EP_LIST_ADDRESS_MASK 0xfffff800
-
-#define PORTSCX_W1C_BITS 0x2a
-#define PORTSCX_PORT_RESET 0x00000100
-#define PORTSCX_PORT_POWER 0x00001000
-#define PORTSCX_FORCE_FULL_SPEED_CONNECT 0x01000000
-#define PORTSCX_PAR_XCVR_SELECT 0xC0000000
-#define PORTSCX_PORT_FORCE_RESUME 0x00000040
-#define PORTSCX_PORT_SUSPEND 0x00000080
-#define PORTSCX_PORT_SPEED_FULL 0x00000000
-#define PORTSCX_PORT_SPEED_LOW 0x04000000
-#define PORTSCX_PORT_SPEED_HIGH 0x08000000
-#define PORTSCX_PORT_SPEED_MASK 0x0C000000
-
-/* USB MODE Register Bit Masks */
-#define USBMODE_CTRL_MODE_IDLE 0x00000000
-#define USBMODE_CTRL_MODE_DEVICE 0x00000002
-#define USBMODE_CTRL_MODE_HOST 0x00000003
-#define USBMODE_CTRL_MODE_RSV 0x00000001
-#define USBMODE_SETUP_LOCK_OFF 0x00000008
-#define USBMODE_STREAM_DISABLE 0x00000010
-
-/* USB STS Register Bit Masks */
-#define USBSTS_INT 0x00000001
-#define USBSTS_ERR 0x00000002
-#define USBSTS_PORT_CHANGE 0x00000004
-#define USBSTS_FRM_LST_ROLL 0x00000008
-#define USBSTS_SYS_ERR 0x00000010
-#define USBSTS_IAA 0x00000020
-#define USBSTS_RESET 0x00000040
-#define USBSTS_SOF 0x00000080
-#define USBSTS_SUSPEND 0x00000100
-#define USBSTS_HC_HALTED 0x00001000
-#define USBSTS_RCL 0x00002000
-#define USBSTS_PERIODIC_SCHEDULE 0x00004000
-#define USBSTS_ASYNC_SCHEDULE 0x00008000
-
-
-/* Interrupt Enable Register Bit Masks */
-#define USBINTR_INT_EN (0x00000001)
-#define USBINTR_ERR_INT_EN (0x00000002)
-#define USBINTR_PORT_CHANGE_DETECT_EN (0x00000004)
-
-#define USBINTR_ASYNC_ADV_AAE (0x00000020)
-#define USBINTR_ASYNC_ADV_AAE_ENABLE (0x00000020)
-#define USBINTR_ASYNC_ADV_AAE_DISABLE (0xFFFFFFDF)
-
-#define USBINTR_RESET_EN (0x00000040)
-#define USBINTR_SOF_UFRAME_EN (0x00000080)
-#define USBINTR_DEVICE_SUSPEND (0x00000100)
-
-#define USB_DEVICE_ADDRESS_MASK (0xfe000000)
-#define USB_DEVICE_ADDRESS_BIT_SHIFT (25)
-
-struct mv_cap_regs {
- u32 caplength_hciversion;
- u32 hcsparams; /* HC structural parameters */
- u32 hccparams; /* HC Capability Parameters*/
- u32 reserved[5];
- u32 dciversion; /* DC version number and reserved 16 bits */
- u32 dccparams; /* DC Capability Parameters */
-};
-
-struct mv_op_regs {
- u32 usbcmd; /* Command register */
- u32 usbsts; /* Status register */
- u32 usbintr; /* Interrupt enable */
- u32 frindex; /* Frame index */
- u32 reserved1[1];
- u32 deviceaddr; /* Device Address */
- u32 eplistaddr; /* Endpoint List Address */
- u32 ttctrl; /* HOST TT status and control */
- u32 burstsize; /* Programmable Burst Size */
- u32 txfilltuning; /* Host Transmit Pre-Buffer Packet Tuning */
- u32 reserved[4];
- u32 epnak; /* Endpoint NAK */
- u32 epnaken; /* Endpoint NAK Enable */
- u32 configflag; /* Configured Flag register */
- u32 portsc[VUSBHS_MAX_PORTS]; /* Port Status/Control x, x = 1..8 */
- u32 otgsc;
- u32 usbmode; /* USB Host/Device mode */
- u32 epsetupstat; /* Endpoint Setup Status */
- u32 epprime; /* Endpoint Initialize */
- u32 epflush; /* Endpoint De-initialize */
- u32 epstatus; /* Endpoint Status */
- u32 epcomplete; /* Endpoint Interrupt On Complete */
- u32 epctrlx[16]; /* Endpoint Control, where x = 0.. 15 */
- u32 mcr; /* Mux Control */
- u32 isr; /* Interrupt Status */
- u32 ier; /* Interrupt Enable */
-};
-
-struct mv_udc {
- struct usb_gadget gadget;
- struct usb_gadget_driver *driver;
- spinlock_t lock;
- struct completion *done;
- struct platform_device *dev;
- int irq;
-
- struct mv_cap_regs __iomem *cap_regs;
- struct mv_op_regs __iomem *op_regs;
- void __iomem *phy_regs;
- unsigned int max_eps;
- struct mv_dqh *ep_dqh;
- size_t ep_dqh_size;
- dma_addr_t ep_dqh_dma;
-
- struct dma_pool *dtd_pool;
- struct mv_ep *eps;
-
- struct mv_dtd *dtd_head;
- struct mv_dtd *dtd_tail;
- unsigned int dtd_entries;
-
- struct mv_req *status_req;
- struct usb_ctrlrequest local_setup_buff;
-
- unsigned int resume_state; /* USB state to resume */
- unsigned int usb_state; /* USB current state */
- unsigned int ep0_state; /* Endpoint zero state */
- unsigned int ep0_dir;
-
- unsigned int dev_addr;
- unsigned int test_mode;
-
- int errors;
- unsigned softconnect:1,
- vbus_active:1,
- remote_wakeup:1,
- softconnected:1,
- force_fs:1,
- clock_gating:1,
- active:1,
- stopped:1; /* stop bit is setted */
-
- struct work_struct vbus_work;
- struct workqueue_struct *qwork;
-
- struct usb_phy *transceiver;
-
- struct mv_usb_platform_data *pdata;
-
- /* some SOC has mutiple clock sources for USB*/
- struct clk *clk;
-};
-
-/* endpoint data structure */
-struct mv_ep {
- struct usb_ep ep;
- struct mv_udc *udc;
- struct list_head queue;
- struct mv_dqh *dqh;
- u32 direction;
- char name[14];
- unsigned stopped:1,
- wedge:1,
- ep_type:2,
- ep_num:8;
-};
-
-/* request data structure */
-struct mv_req {
- struct usb_request req;
- struct mv_dtd *dtd, *head, *tail;
- struct mv_ep *ep;
- struct list_head queue;
- unsigned int test_mode;
- unsigned dtd_count;
- unsigned mapped:1;
-};
-
-#define EP_QUEUE_HEAD_MULT_POS 30
-#define EP_QUEUE_HEAD_ZLT_SEL 0x20000000
-#define EP_QUEUE_HEAD_MAX_PKT_LEN_POS 16
-#define EP_QUEUE_HEAD_MAX_PKT_LEN(ep_info) (((ep_info)>>16)&0x07ff)
-#define EP_QUEUE_HEAD_IOS 0x00008000
-#define EP_QUEUE_HEAD_NEXT_TERMINATE 0x00000001
-#define EP_QUEUE_HEAD_IOC 0x00008000
-#define EP_QUEUE_HEAD_MULTO 0x00000C00
-#define EP_QUEUE_HEAD_STATUS_HALT 0x00000040
-#define EP_QUEUE_HEAD_STATUS_ACTIVE 0x00000080
-#define EP_QUEUE_CURRENT_OFFSET_MASK 0x00000FFF
-#define EP_QUEUE_HEAD_NEXT_POINTER_MASK 0xFFFFFFE0
-#define EP_QUEUE_FRINDEX_MASK 0x000007FF
-#define EP_MAX_LENGTH_TRANSFER 0x4000
-
-struct mv_dqh {
- /* Bits 16..26 Bit 15 is Interrupt On Setup */
- u32 max_packet_length;
- u32 curr_dtd_ptr; /* Current dTD Pointer */
- u32 next_dtd_ptr; /* Next dTD Pointer */
- /* Total bytes (16..30), IOC (15), INT (8), STS (0-7) */
- u32 size_ioc_int_sts;
- u32 buff_ptr0; /* Buffer pointer Page 0 (12-31) */
- u32 buff_ptr1; /* Buffer pointer Page 1 (12-31) */
- u32 buff_ptr2; /* Buffer pointer Page 2 (12-31) */
- u32 buff_ptr3; /* Buffer pointer Page 3 (12-31) */
- u32 buff_ptr4; /* Buffer pointer Page 4 (12-31) */
- u32 reserved1;
- /* 8 bytes of setup data that follows the Setup PID */
- u8 setup_buffer[8];
- u32 reserved2[4];
-};
-
-
-#define DTD_NEXT_TERMINATE (0x00000001)
-#define DTD_IOC (0x00008000)
-#define DTD_STATUS_ACTIVE (0x00000080)
-#define DTD_STATUS_HALTED (0x00000040)
-#define DTD_STATUS_DATA_BUFF_ERR (0x00000020)
-#define DTD_STATUS_TRANSACTION_ERR (0x00000008)
-#define DTD_RESERVED_FIELDS (0x00007F00)
-#define DTD_ERROR_MASK (0x68)
-#define DTD_ADDR_MASK (0xFFFFFFE0)
-#define DTD_PACKET_SIZE 0x7FFF0000
-#define DTD_LENGTH_BIT_POS (16)
-
-struct mv_dtd {
- u32 dtd_next;
- u32 size_ioc_sts;
- u32 buff_ptr0; /* Buffer pointer Page 0 */
- u32 buff_ptr1; /* Buffer pointer Page 1 */
- u32 buff_ptr2; /* Buffer pointer Page 2 */
- u32 buff_ptr3; /* Buffer pointer Page 3 */
- u32 buff_ptr4; /* Buffer pointer Page 4 */
- u32 scratch_ptr;
- /* 32 bytes */
- dma_addr_t td_dma; /* dma address for this td */
- struct mv_dtd *next_dtd_virt;
-};
-
-#endif
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
deleted file mode 100644
index ff103e6b3048..000000000000
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ /dev/null
@@ -1,2426 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
- * Author: Chao Xie <chao.xie@marvell.com>
- * Neil Zhang <zhangwm@marvell.com>
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/timer.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb/otg.h>
-#include <linux/pm.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/platform_data/mv_usb.h>
-#include <linux/unaligned.h>
-
-#include "mv_udc.h"
-
-#define DRIVER_DESC "Marvell PXA USB Device Controller driver"
-
-#define ep_dir(ep) (((ep)->ep_num == 0) ? \
- ((ep)->udc->ep0_dir) : ((ep)->direction))
-
-/* timeout value -- usec */
-#define RESET_TIMEOUT 10000
-#define FLUSH_TIMEOUT 10000
-#define EPSTATUS_TIMEOUT 10000
-#define PRIME_TIMEOUT 10000
-#define READSAFE_TIMEOUT 1000
-
-#define LOOPS_USEC_SHIFT 1
-#define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
-#define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
-
-static DECLARE_COMPLETION(release_done);
-
-static const char driver_name[] = "mv_udc";
-
-static void nuke(struct mv_ep *ep, int status);
-static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
-
-/* for endpoint 0 operations */
-static const struct usb_endpoint_descriptor mv_ep0_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = 0,
- .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
- .wMaxPacketSize = EP0_MAX_PKT_SIZE,
-};
-
-static void ep0_reset(struct mv_udc *udc)
-{
- struct mv_ep *ep;
- u32 epctrlx;
- int i = 0;
-
- /* ep0 in and out */
- for (i = 0; i < 2; i++) {
- ep = &udc->eps[i];
- ep->udc = udc;
-
- /* ep0 dQH */
- ep->dqh = &udc->ep_dqh[i];
-
- /* configure ep0 endpoint capabilities in dQH */
- ep->dqh->max_packet_length =
- (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
- | EP_QUEUE_HEAD_IOS;
-
- ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
-
- epctrlx = readl(&udc->op_regs->epctrlx[0]);
- if (i) { /* TX */
- epctrlx |= EPCTRL_TX_ENABLE
- | (USB_ENDPOINT_XFER_CONTROL
- << EPCTRL_TX_EP_TYPE_SHIFT);
-
- } else { /* RX */
- epctrlx |= EPCTRL_RX_ENABLE
- | (USB_ENDPOINT_XFER_CONTROL
- << EPCTRL_RX_EP_TYPE_SHIFT);
- }
-
- writel(epctrlx, &udc->op_regs->epctrlx[0]);
- }
-}
-
-/* protocol ep0 stall, will automatically be cleared on new transaction */
-static void ep0_stall(struct mv_udc *udc)
-{
- u32 epctrlx;
-
- /* set TX and RX to stall */
- epctrlx = readl(&udc->op_regs->epctrlx[0]);
- epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
- writel(epctrlx, &udc->op_regs->epctrlx[0]);
-
- /* update ep0 state */
- udc->ep0_state = WAIT_FOR_SETUP;
- udc->ep0_dir = EP_DIR_OUT;
-}
-
-static int process_ep_req(struct mv_udc *udc, int index,
- struct mv_req *curr_req)
-{
- struct mv_dtd *curr_dtd;
- struct mv_dqh *curr_dqh;
- int actual, remaining_length;
- int i, direction;
- int retval = 0;
- u32 errors;
- u32 bit_pos;
-
- curr_dqh = &udc->ep_dqh[index];
- direction = index % 2;
-
- curr_dtd = curr_req->head;
- actual = curr_req->req.length;
-
- for (i = 0; i < curr_req->dtd_count; i++) {
- if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
- dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
- udc->eps[index].name);
- return 1;
- }
-
- errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
- if (!errors) {
- remaining_length =
- (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
- >> DTD_LENGTH_BIT_POS;
- actual -= remaining_length;
-
- if (remaining_length) {
- if (direction) {
- dev_dbg(&udc->dev->dev,
- "TX dTD remains data\n");
- retval = -EPROTO;
- break;
- } else
- break;
- }
- } else {
- dev_info(&udc->dev->dev,
- "complete_tr error: ep=%d %s: error = 0x%x\n",
- index >> 1, direction ? "SEND" : "RECV",
- errors);
- if (errors & DTD_STATUS_HALTED) {
- /* Clear the errors and Halt condition */
- curr_dqh->size_ioc_int_sts &= ~errors;
- retval = -EPIPE;
- } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
- retval = -EPROTO;
- } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
- retval = -EILSEQ;
- }
- }
- if (i != curr_req->dtd_count - 1)
- curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
- }
- if (retval)
- return retval;
-
- if (direction == EP_DIR_OUT)
- bit_pos = 1 << curr_req->ep->ep_num;
- else
- bit_pos = 1 << (16 + curr_req->ep->ep_num);
-
- while (curr_dqh->curr_dtd_ptr == curr_dtd->td_dma) {
- if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
- while (readl(&udc->op_regs->epstatus) & bit_pos)
- udelay(1);
- break;
- }
- udelay(1);
- }
-
- curr_req->req.actual = actual;
-
- return 0;
-}
-
-/*
- * done() - retire a request; caller blocked irqs
- * @status : request status to be set, only works when
- * request is still in progress.
- */
-static void done(struct mv_ep *ep, struct mv_req *req, int status)
- __releases(&ep->udc->lock)
- __acquires(&ep->udc->lock)
-{
- struct mv_udc *udc = NULL;
- unsigned char stopped = ep->stopped;
- struct mv_dtd *curr_td, *next_td;
- int j;
-
- udc = (struct mv_udc *)ep->udc;
- /* Removed the req from fsl_ep->queue */
- list_del_init(&req->queue);
-
- /* req.status should be set as -EINPROGRESS in ep_queue() */
- if (req->req.status == -EINPROGRESS)
- req->req.status = status;
- else
- status = req->req.status;
-
- /* Free dtd for the request */
- next_td = req->head;
- for (j = 0; j < req->dtd_count; j++) {
- curr_td = next_td;
- if (j != req->dtd_count - 1)
- next_td = curr_td->next_dtd_virt;
- dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
- }
-
- usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
-
- if (status && (status != -ESHUTDOWN))
- dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
- ep->ep.name, &req->req, status,
- req->req.actual, req->req.length);
-
- ep->stopped = 1;
-
- spin_unlock(&ep->udc->lock);
-
- usb_gadget_giveback_request(&ep->ep, &req->req);
-
- spin_lock(&ep->udc->lock);
- ep->stopped = stopped;
-}
-
-static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
-{
- struct mv_udc *udc;
- struct mv_dqh *dqh;
- u32 bit_pos, direction;
- u32 usbcmd, epstatus;
- unsigned int loops;
- int retval = 0;
-
- udc = ep->udc;
- direction = ep_dir(ep);
- dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
- bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
-
- /* check if the pipe is empty */
- if (!(list_empty(&ep->queue))) {
- struct mv_req *lastreq;
- lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
- lastreq->tail->dtd_next =
- req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
-
- wmb();
-
- if (readl(&udc->op_regs->epprime) & bit_pos)
- goto done;
-
- loops = LOOPS(READSAFE_TIMEOUT);
- while (1) {
- /* start with setting the semaphores */
- usbcmd = readl(&udc->op_regs->usbcmd);
- usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
- writel(usbcmd, &udc->op_regs->usbcmd);
-
- /* read the endpoint status */
- epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
-
- /*
- * Reread the ATDTW semaphore bit to check if it is
- * cleared. When hardware see a hazard, it will clear
- * the bit or else we remain set to 1 and we can
- * proceed with priming of endpoint if not already
- * primed.
- */
- if (readl(&udc->op_regs->usbcmd)
- & USBCMD_ATDTW_TRIPWIRE_SET)
- break;
-
- loops--;
- if (loops == 0) {
- dev_err(&udc->dev->dev,
- "Timeout for ATDTW_TRIPWIRE...\n");
- retval = -ETIME;
- goto done;
- }
- udelay(LOOPS_USEC);
- }
-
- /* Clear the semaphore */
- usbcmd = readl(&udc->op_regs->usbcmd);
- usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
- writel(usbcmd, &udc->op_regs->usbcmd);
-
- if (epstatus)
- goto done;
- }
-
- /* Write dQH next pointer and terminate bit to 0 */
- dqh->next_dtd_ptr = req->head->td_dma
- & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
-
- /* clear active and halt bit, in case set from a previous error */
- dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
-
- /* Ensure that updates to the QH will occur before priming. */
- wmb();
-
- /* Prime the Endpoint */
- writel(bit_pos, &udc->op_regs->epprime);
-
-done:
- return retval;
-}
-
-static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
- dma_addr_t *dma, int *is_last)
-{
- struct mv_dtd *dtd;
- struct mv_udc *udc;
- struct mv_dqh *dqh;
- u32 temp, mult = 0;
-
- /* how big will this transfer be? */
- if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
- dqh = req->ep->dqh;
- mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS)
- & 0x3;
- *length = min(req->req.length - req->req.actual,
- (unsigned)(mult * req->ep->ep.maxpacket));
- } else
- *length = min(req->req.length - req->req.actual,
- (unsigned)EP_MAX_LENGTH_TRANSFER);
-
- udc = req->ep->udc;
-
- /*
- * Be careful that no _GFP_HIGHMEM is set,
- * or we can not use dma_to_virt
- */
- dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
- if (dtd == NULL)
- return dtd;
-
- dtd->td_dma = *dma;
- /* initialize buffer page pointers */
- temp = (u32)(req->req.dma + req->req.actual);
- dtd->buff_ptr0 = cpu_to_le32(temp);
- temp &= ~0xFFF;
- dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
- dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
- dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
- dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
-
- req->req.actual += *length;
-
- /* zlp is needed if req->req.zero is set */
- if (req->req.zero) {
- if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
- *is_last = 1;
- else
- *is_last = 0;
- } else if (req->req.length == req->req.actual)
- *is_last = 1;
- else
- *is_last = 0;
-
- /* Fill in the transfer size; set active bit */
- temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
-
- /* Enable interrupt for the last dtd of a request */
- if (*is_last && !req->req.no_interrupt)
- temp |= DTD_IOC;
-
- temp |= mult << 10;
-
- dtd->size_ioc_sts = temp;
-
- mb();
-
- return dtd;
-}
-
-/* generate dTD linked list for a request */
-static int req_to_dtd(struct mv_req *req)
-{
- unsigned count;
- int is_last, is_first = 1;
- struct mv_dtd *dtd, *last_dtd = NULL;
- dma_addr_t dma;
-
- do {
- dtd = build_dtd(req, &count, &dma, &is_last);
- if (dtd == NULL)
- return -ENOMEM;
-
- if (is_first) {
- is_first = 0;
- req->head = dtd;
- } else {
- last_dtd->dtd_next = dma;
- last_dtd->next_dtd_virt = dtd;
- }
- last_dtd = dtd;
- req->dtd_count++;
- } while (!is_last);
-
- /* set terminate bit to 1 for the last dTD */
- dtd->dtd_next = DTD_NEXT_TERMINATE;
-
- req->tail = dtd;
-
- return 0;
-}
-
-static int mv_ep_enable(struct usb_ep *_ep,
- const struct usb_endpoint_descriptor *desc)
-{
- struct mv_udc *udc;
- struct mv_ep *ep;
- struct mv_dqh *dqh;
- u16 max = 0;
- u32 bit_pos, epctrlx, direction;
- const unsigned char zlt = 1;
- unsigned char ios, mult;
- unsigned long flags;
-
- ep = container_of(_ep, struct mv_ep, ep);
- udc = ep->udc;
-
- if (!_ep || !desc
- || desc->bDescriptorType != USB_DT_ENDPOINT)
- return -EINVAL;
-
- if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
-
- direction = ep_dir(ep);
- max = usb_endpoint_maxp(desc);
-
- /*
- * disable HW zero length termination select
- * driver handles zero length packet through req->req.zero
- */
- bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
-
- /* Check if the Endpoint is Primed */
- if ((readl(&udc->op_regs->epprime) & bit_pos)
- || (readl(&udc->op_regs->epstatus) & bit_pos)) {
- dev_info(&udc->dev->dev,
- "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
- " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
- (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
- (unsigned)readl(&udc->op_regs->epprime),
- (unsigned)readl(&udc->op_regs->epstatus),
- (unsigned)bit_pos);
- goto en_done;
- }
-
- /* Set the max packet length, interrupt on Setup and Mult fields */
- ios = 0;
- mult = 0;
- switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
- case USB_ENDPOINT_XFER_BULK:
- case USB_ENDPOINT_XFER_INT:
- break;
- case USB_ENDPOINT_XFER_CONTROL:
- ios = 1;
- break;
- case USB_ENDPOINT_XFER_ISOC:
- /* Calculate transactions needed for high bandwidth iso */
- mult = usb_endpoint_maxp_mult(desc);
- /* 3 transactions at most */
- if (mult > 3)
- goto en_done;
- break;
- default:
- goto en_done;
- }
-
- spin_lock_irqsave(&udc->lock, flags);
- /* Get the endpoint queue head address */
- dqh = ep->dqh;
- dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
- | (mult << EP_QUEUE_HEAD_MULT_POS)
- | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
- | (ios ? EP_QUEUE_HEAD_IOS : 0);
- dqh->next_dtd_ptr = 1;
- dqh->size_ioc_int_sts = 0;
-
- ep->ep.maxpacket = max;
- ep->ep.desc = desc;
- ep->stopped = 0;
-
- /* Enable the endpoint for Rx or Tx and set the endpoint type */
- epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
- if (direction == EP_DIR_IN) {
- epctrlx &= ~EPCTRL_TX_ALL_MASK;
- epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
- | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- << EPCTRL_TX_EP_TYPE_SHIFT);
- } else {
- epctrlx &= ~EPCTRL_RX_ALL_MASK;
- epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
- | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- << EPCTRL_RX_EP_TYPE_SHIFT);
- }
- writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
-
- /*
- * Implement Guideline (GL# USB-7) The unused endpoint type must
- * be programmed to bulk.
- */
- epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
- if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
- epctrlx |= (USB_ENDPOINT_XFER_BULK
- << EPCTRL_RX_EP_TYPE_SHIFT);
- writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
- }
-
- epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
- if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
- epctrlx |= (USB_ENDPOINT_XFER_BULK
- << EPCTRL_TX_EP_TYPE_SHIFT);
- writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
- }
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-en_done:
- return -EINVAL;
-}
-
-static int mv_ep_disable(struct usb_ep *_ep)
-{
- struct mv_udc *udc;
- struct mv_ep *ep;
- struct mv_dqh *dqh;
- u32 epctrlx, direction;
- unsigned long flags;
-
- ep = container_of(_ep, struct mv_ep, ep);
- if ((_ep == NULL) || !ep->ep.desc)
- return -EINVAL;
-
- udc = ep->udc;
-
- /* Get the endpoint queue head address */
- dqh = ep->dqh;
-
- spin_lock_irqsave(&udc->lock, flags);
-
- direction = ep_dir(ep);
-
- /* Reset the max packet length and the interrupt on Setup */
- dqh->max_packet_length = 0;
-
- /* Disable the endpoint for Rx or Tx and reset the endpoint type */
- epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
- epctrlx &= ~((direction == EP_DIR_IN)
- ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
- : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
- writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
-
- /* nuke all pending requests (does flush) */
- nuke(ep, -ESHUTDOWN);
-
- ep->ep.desc = NULL;
- ep->stopped = 1;
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-static struct usb_request *
-mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
-{
- struct mv_req *req;
-
- req = kzalloc(sizeof *req, gfp_flags);
- if (!req)
- return NULL;
-
- req->req.dma = DMA_ADDR_INVALID;
- INIT_LIST_HEAD(&req->queue);
-
- return &req->req;
-}
-
-static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct mv_req *req = NULL;
-
- req = container_of(_req, struct mv_req, req);
-
- if (_req)
- kfree(req);
-}
-
-static void mv_ep_fifo_flush(struct usb_ep *_ep)
-{
- struct mv_udc *udc;
- u32 bit_pos, direction;
- struct mv_ep *ep;
- unsigned int loops;
-
- if (!_ep)
- return;
-
- ep = container_of(_ep, struct mv_ep, ep);
- if (!ep->ep.desc)
- return;
-
- udc = ep->udc;
- direction = ep_dir(ep);
-
- if (ep->ep_num == 0)
- bit_pos = (1 << 16) | 1;
- else if (direction == EP_DIR_OUT)
- bit_pos = 1 << ep->ep_num;
- else
- bit_pos = 1 << (16 + ep->ep_num);
-
- loops = LOOPS(EPSTATUS_TIMEOUT);
- do {
- unsigned int inter_loops;
-
- if (loops == 0) {
- dev_err(&udc->dev->dev,
- "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
- (unsigned)readl(&udc->op_regs->epstatus),
- (unsigned)bit_pos);
- return;
- }
- /* Write 1 to the Flush register */
- writel(bit_pos, &udc->op_regs->epflush);
-
- /* Wait until flushing completed */
- inter_loops = LOOPS(FLUSH_TIMEOUT);
- while (readl(&udc->op_regs->epflush)) {
- /*
- * ENDPTFLUSH bit should be cleared to indicate this
- * operation is complete
- */
- if (inter_loops == 0) {
- dev_err(&udc->dev->dev,
- "TIMEOUT for ENDPTFLUSH=0x%x,"
- "bit_pos=0x%x\n",
- (unsigned)readl(&udc->op_regs->epflush),
- (unsigned)bit_pos);
- return;
- }
- inter_loops--;
- udelay(LOOPS_USEC);
- }
- loops--;
- } while (readl(&udc->op_regs->epstatus) & bit_pos);
-}
-
-/* queues (submits) an I/O request to an endpoint */
-static int
-mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
-{
- struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
- struct mv_req *req = container_of(_req, struct mv_req, req);
- struct mv_udc *udc = ep->udc;
- unsigned long flags;
- int retval;
-
- /* catch various bogus parameters */
- if (!_req || !req->req.complete || !req->req.buf
- || !list_empty(&req->queue)) {
- dev_err(&udc->dev->dev, "%s, bad params", __func__);
- return -EINVAL;
- }
- if (unlikely(!_ep || !ep->ep.desc)) {
- dev_err(&udc->dev->dev, "%s, bad ep", __func__);
- return -EINVAL;
- }
-
- udc = ep->udc;
- if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
-
- req->ep = ep;
-
- /* map virtual address to hardware */
- retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep));
- if (retval)
- return retval;
-
- req->req.status = -EINPROGRESS;
- req->req.actual = 0;
- req->dtd_count = 0;
-
- spin_lock_irqsave(&udc->lock, flags);
-
- /* build dtds and push them to device queue */
- if (!req_to_dtd(req)) {
- retval = queue_dtd(ep, req);
- if (retval) {
- spin_unlock_irqrestore(&udc->lock, flags);
- dev_err(&udc->dev->dev, "Failed to queue dtd\n");
- goto err_unmap_dma;
- }
- } else {
- spin_unlock_irqrestore(&udc->lock, flags);
- dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
- retval = -ENOMEM;
- goto err_unmap_dma;
- }
-
- /* Update ep0 state */
- if (ep->ep_num == 0)
- udc->ep0_state = DATA_STATE_XMIT;
-
- /* irq handler advances the queue */
- list_add_tail(&req->queue, &ep->queue);
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-
-err_unmap_dma:
- usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep));
-
- return retval;
-}
-
-static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
-{
- struct mv_dqh *dqh = ep->dqh;
- u32 bit_pos;
-
- /* Write dQH next pointer and terminate bit to 0 */
- dqh->next_dtd_ptr = req->head->td_dma
- & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
-
- /* clear active and halt bit, in case set from a previous error */
- dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
-
- /* Ensure that updates to the QH will occure before priming. */
- wmb();
-
- bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
-
- /* Prime the Endpoint */
- writel(bit_pos, &ep->udc->op_regs->epprime);
-}
-
-/* dequeues (cancels, unlinks) an I/O request from an endpoint */
-static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
- struct mv_req *req = NULL, *iter;
- struct mv_udc *udc = ep->udc;
- unsigned long flags;
- int stopped, ret = 0;
- u32 epctrlx;
-
- if (!_ep || !_req)
- return -EINVAL;
-
- spin_lock_irqsave(&ep->udc->lock, flags);
- stopped = ep->stopped;
-
- /* Stop the ep before we deal with the queue */
- ep->stopped = 1;
- epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
- if (ep_dir(ep) == EP_DIR_IN)
- epctrlx &= ~EPCTRL_TX_ENABLE;
- else
- epctrlx &= ~EPCTRL_RX_ENABLE;
- writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
-
- /* make sure it's actually queued on this endpoint */
- list_for_each_entry(iter, &ep->queue, queue) {
- if (&iter->req != _req)
- continue;
- req = iter;
- break;
- }
- if (!req) {
- ret = -EINVAL;
- goto out;
- }
-
- /* The request is in progress, or completed but not dequeued */
- if (ep->queue.next == &req->queue) {
- _req->status = -ECONNRESET;
- mv_ep_fifo_flush(_ep); /* flush current transfer */
-
- /* The request isn't the last request in this ep queue */
- if (req->queue.next != &ep->queue) {
- struct mv_req *next_req;
-
- next_req = list_entry(req->queue.next,
- struct mv_req, queue);
-
- /* Point the QH to the first TD of next request */
- mv_prime_ep(ep, next_req);
- } else {
- struct mv_dqh *qh;
-
- qh = ep->dqh;
- qh->next_dtd_ptr = 1;
- qh->size_ioc_int_sts = 0;
- }
-
- /* The request hasn't been processed, patch up the TD chain */
- } else {
- struct mv_req *prev_req;
-
- prev_req = list_entry(req->queue.prev, struct mv_req, queue);
- writel(readl(&req->tail->dtd_next),
- &prev_req->tail->dtd_next);
-
- }
-
- done(ep, req, -ECONNRESET);
-
- /* Enable EP */
-out:
- epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
- if (ep_dir(ep) == EP_DIR_IN)
- epctrlx |= EPCTRL_TX_ENABLE;
- else
- epctrlx |= EPCTRL_RX_ENABLE;
- writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
- ep->stopped = stopped;
-
- spin_unlock_irqrestore(&ep->udc->lock, flags);
- return ret;
-}
-
-static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
-{
- u32 epctrlx;
-
- epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
-
- if (stall) {
- if (direction == EP_DIR_IN)
- epctrlx |= EPCTRL_TX_EP_STALL;
- else
- epctrlx |= EPCTRL_RX_EP_STALL;
- } else {
- if (direction == EP_DIR_IN) {
- epctrlx &= ~EPCTRL_TX_EP_STALL;
- epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
- } else {
- epctrlx &= ~EPCTRL_RX_EP_STALL;
- epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
- }
- }
- writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
-}
-
-static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
-{
- u32 epctrlx;
-
- epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
-
- if (direction == EP_DIR_OUT)
- return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
- else
- return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
-}
-
-static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
-{
- struct mv_ep *ep;
- unsigned long flags;
- int status = 0;
- struct mv_udc *udc;
-
- ep = container_of(_ep, struct mv_ep, ep);
- udc = ep->udc;
- if (!_ep || !ep->ep.desc) {
- status = -EINVAL;
- goto out;
- }
-
- if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
- status = -EOPNOTSUPP;
- goto out;
- }
-
- /*
- * Attempt to halt IN ep will fail if any transfer requests
- * are still queue
- */
- if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
- status = -EAGAIN;
- goto out;
- }
-
- spin_lock_irqsave(&ep->udc->lock, flags);
- ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
- if (halt && wedge)
- ep->wedge = 1;
- else if (!halt)
- ep->wedge = 0;
- spin_unlock_irqrestore(&ep->udc->lock, flags);
-
- if (ep->ep_num == 0) {
- udc->ep0_state = WAIT_FOR_SETUP;
- udc->ep0_dir = EP_DIR_OUT;
- }
-out:
- return status;
-}
-
-static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
-{
- return mv_ep_set_halt_wedge(_ep, halt, 0);
-}
-
-static int mv_ep_set_wedge(struct usb_ep *_ep)
-{
- return mv_ep_set_halt_wedge(_ep, 1, 1);
-}
-
-static const struct usb_ep_ops mv_ep_ops = {
- .enable = mv_ep_enable,
- .disable = mv_ep_disable,
-
- .alloc_request = mv_alloc_request,
- .free_request = mv_free_request,
-
- .queue = mv_ep_queue,
- .dequeue = mv_ep_dequeue,
-
- .set_wedge = mv_ep_set_wedge,
- .set_halt = mv_ep_set_halt,
- .fifo_flush = mv_ep_fifo_flush, /* flush fifo */
-};
-
-static int udc_clock_enable(struct mv_udc *udc)
-{
- return clk_prepare_enable(udc->clk);
-}
-
-static void udc_clock_disable(struct mv_udc *udc)
-{
- clk_disable_unprepare(udc->clk);
-}
-
-static void udc_stop(struct mv_udc *udc)
-{
- u32 tmp;
-
- /* Disable interrupts */
- tmp = readl(&udc->op_regs->usbintr);
- tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
- USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
- writel(tmp, &udc->op_regs->usbintr);
-
- udc->stopped = 1;
-
- /* Reset the Run the bit in the command register to stop VUSB */
- tmp = readl(&udc->op_regs->usbcmd);
- tmp &= ~USBCMD_RUN_STOP;
- writel(tmp, &udc->op_regs->usbcmd);
-}
-
-static void udc_start(struct mv_udc *udc)
-{
- u32 usbintr;
-
- usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
- | USBINTR_PORT_CHANGE_DETECT_EN
- | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
- /* Enable interrupts */
- writel(usbintr, &udc->op_regs->usbintr);
-
- udc->stopped = 0;
-
- /* Set the Run bit in the command register */
- writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
-}
-
-static int udc_reset(struct mv_udc *udc)
-{
- unsigned int loops;
- u32 tmp, portsc;
-
- /* Stop the controller */
- tmp = readl(&udc->op_regs->usbcmd);
- tmp &= ~USBCMD_RUN_STOP;
- writel(tmp, &udc->op_regs->usbcmd);
-
- /* Reset the controller to get default values */
- writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
-
- /* wait for reset to complete */
- loops = LOOPS(RESET_TIMEOUT);
- while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
- if (loops == 0) {
- dev_err(&udc->dev->dev,
- "Wait for RESET completed TIMEOUT\n");
- return -ETIMEDOUT;
- }
- loops--;
- udelay(LOOPS_USEC);
- }
-
- /* set controller to device mode */
- tmp = readl(&udc->op_regs->usbmode);
- tmp |= USBMODE_CTRL_MODE_DEVICE;
-
- /* turn setup lockout off, require setup tripwire in usbcmd */
- tmp |= USBMODE_SETUP_LOCK_OFF;
-
- writel(tmp, &udc->op_regs->usbmode);
-
- writel(0x0, &udc->op_regs->epsetupstat);
-
- /* Configure the Endpoint List Address */
- writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
- &udc->op_regs->eplistaddr);
-
- portsc = readl(&udc->op_regs->portsc[0]);
- if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
- portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
-
- if (udc->force_fs)
- portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
- else
- portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
-
- writel(portsc, &udc->op_regs->portsc[0]);
-
- tmp = readl(&udc->op_regs->epctrlx[0]);
- tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
- writel(tmp, &udc->op_regs->epctrlx[0]);
-
- return 0;
-}
-
-static int mv_udc_enable_internal(struct mv_udc *udc)
-{
- int retval;
-
- if (udc->active)
- return 0;
-
- dev_dbg(&udc->dev->dev, "enable udc\n");
- retval = udc_clock_enable(udc);
- if (retval)
- return retval;
-
- if (udc->pdata->phy_init) {
- retval = udc->pdata->phy_init(udc->phy_regs);
- if (retval) {
- dev_err(&udc->dev->dev,
- "init phy error %d\n", retval);
- udc_clock_disable(udc);
- return retval;
- }
- }
- udc->active = 1;
-
- return 0;
-}
-
-static int mv_udc_enable(struct mv_udc *udc)
-{
- if (udc->clock_gating)
- return mv_udc_enable_internal(udc);
-
- return 0;
-}
-
-static void mv_udc_disable_internal(struct mv_udc *udc)
-{
- if (udc->active) {
- dev_dbg(&udc->dev->dev, "disable udc\n");
- if (udc->pdata->phy_deinit)
- udc->pdata->phy_deinit(udc->phy_regs);
- udc_clock_disable(udc);
- udc->active = 0;
- }
-}
-
-static void mv_udc_disable(struct mv_udc *udc)
-{
- if (udc->clock_gating)
- mv_udc_disable_internal(udc);
-}
-
-static int mv_udc_get_frame(struct usb_gadget *gadget)
-{
- struct mv_udc *udc;
- u16 retval;
-
- if (!gadget)
- return -ENODEV;
-
- udc = container_of(gadget, struct mv_udc, gadget);
-
- retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
-
- return retval;
-}
-
-/* Tries to wake up the host connected to this gadget */
-static int mv_udc_wakeup(struct usb_gadget *gadget)
-{
- struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
- u32 portsc;
-
- /* Remote wakeup feature not enabled by host */
- if (!udc->remote_wakeup)
- return -ENOTSUPP;
-
- portsc = readl(&udc->op_regs->portsc);
- /* not suspended? */
- if (!(portsc & PORTSCX_PORT_SUSPEND))
- return 0;
- /* trigger force resume */
- portsc |= PORTSCX_PORT_FORCE_RESUME;
- writel(portsc, &udc->op_regs->portsc[0]);
- return 0;
-}
-
-static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
-{
- struct mv_udc *udc;
- unsigned long flags;
- int retval = 0;
-
- udc = container_of(gadget, struct mv_udc, gadget);
- spin_lock_irqsave(&udc->lock, flags);
-
- udc->vbus_active = (is_active != 0);
-
- dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
- __func__, udc->softconnect, udc->vbus_active);
-
- if (udc->driver && udc->softconnect && udc->vbus_active) {
- retval = mv_udc_enable(udc);
- if (retval == 0) {
- /* Clock is disabled, need re-init registers */
- udc_reset(udc);
- ep0_reset(udc);
- udc_start(udc);
- }
- } else if (udc->driver && udc->softconnect) {
- if (!udc->active)
- goto out;
-
- /* stop all the transfer in queue*/
- stop_activity(udc, udc->driver);
- udc_stop(udc);
- mv_udc_disable(udc);
- }
-
-out:
- spin_unlock_irqrestore(&udc->lock, flags);
- return retval;
-}
-
-static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
-{
- struct mv_udc *udc;
- unsigned long flags;
- int retval = 0;
-
- udc = container_of(gadget, struct mv_udc, gadget);
- spin_lock_irqsave(&udc->lock, flags);
-
- udc->softconnect = (is_on != 0);
-
- dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
- __func__, udc->softconnect, udc->vbus_active);
-
- if (udc->driver && udc->softconnect && udc->vbus_active) {
- retval = mv_udc_enable(udc);
- if (retval == 0) {
- /* Clock is disabled, need re-init registers */
- udc_reset(udc);
- ep0_reset(udc);
- udc_start(udc);
- }
- } else if (udc->driver && udc->vbus_active) {
- /* stop all the transfer in queue*/
- stop_activity(udc, udc->driver);
- udc_stop(udc);
- mv_udc_disable(udc);
- }
-
- spin_unlock_irqrestore(&udc->lock, flags);
- return retval;
-}
-
-static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *);
-static int mv_udc_stop(struct usb_gadget *);
-/* device controller usb_gadget_ops structure */
-static const struct usb_gadget_ops mv_ops = {
-
- /* returns the current frame number */
- .get_frame = mv_udc_get_frame,
-
- /* tries to wake up the host connected to this gadget */
- .wakeup = mv_udc_wakeup,
-
- /* notify controller that VBUS is powered or not */
- .vbus_session = mv_udc_vbus_session,
-
- /* D+ pullup, software-controlled connect/disconnect to USB host */
- .pullup = mv_udc_pullup,
- .udc_start = mv_udc_start,
- .udc_stop = mv_udc_stop,
-};
-
-static int eps_init(struct mv_udc *udc)
-{
- struct mv_ep *ep;
- char name[14];
- int i;
-
- /* initialize ep0 */
- ep = &udc->eps[0];
- ep->udc = udc;
- strncpy(ep->name, "ep0", sizeof(ep->name));
- ep->ep.name = ep->name;
- ep->ep.ops = &mv_ep_ops;
- ep->wedge = 0;
- ep->stopped = 0;
- usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
- ep->ep.caps.type_control = true;
- ep->ep.caps.dir_in = true;
- ep->ep.caps.dir_out = true;
- ep->ep_num = 0;
- ep->ep.desc = &mv_ep0_desc;
- INIT_LIST_HEAD(&ep->queue);
-
- ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
-
- /* initialize other endpoints */
- for (i = 2; i < udc->max_eps * 2; i++) {
- ep = &udc->eps[i];
- if (i % 2) {
- snprintf(name, sizeof(name), "ep%din", i / 2);
- ep->direction = EP_DIR_IN;
- ep->ep.caps.dir_in = true;
- } else {
- snprintf(name, sizeof(name), "ep%dout", i / 2);
- ep->direction = EP_DIR_OUT;
- ep->ep.caps.dir_out = true;
- }
- ep->udc = udc;
- strncpy(ep->name, name, sizeof(ep->name));
- ep->ep.name = ep->name;
-
- ep->ep.caps.type_iso = true;
- ep->ep.caps.type_bulk = true;
- ep->ep.caps.type_int = true;
-
- ep->ep.ops = &mv_ep_ops;
- ep->stopped = 0;
- usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
- ep->ep_num = i / 2;
-
- INIT_LIST_HEAD(&ep->queue);
- list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
-
- ep->dqh = &udc->ep_dqh[i];
- }
-
- return 0;
-}
-
-/* delete all endpoint requests, called with spinlock held */
-static void nuke(struct mv_ep *ep, int status)
-{
- /* called with spinlock held */
- ep->stopped = 1;
-
- /* endpoint fifo flush */
- mv_ep_fifo_flush(&ep->ep);
-
- while (!list_empty(&ep->queue)) {
- struct mv_req *req = NULL;
- req = list_entry(ep->queue.next, struct mv_req, queue);
- done(ep, req, status);
- }
-}
-
-static void gadget_reset(struct mv_udc *udc, struct usb_gadget_driver *driver)
-{
- struct mv_ep *ep;
-
- nuke(&udc->eps[0], -ESHUTDOWN);
-
- list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
- nuke(ep, -ESHUTDOWN);
- }
-
- /* report reset; the driver is already quiesced */
- if (driver) {
- spin_unlock(&udc->lock);
- usb_gadget_udc_reset(&udc->gadget, driver);
- spin_lock(&udc->lock);
- }
-}
-/* stop all USB activities */
-static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
-{
- struct mv_ep *ep;
-
- nuke(&udc->eps[0], -ESHUTDOWN);
-
- list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
- nuke(ep, -ESHUTDOWN);
- }
-
- /* report disconnect; the driver is already quiesced */
- if (driver) {
- spin_unlock(&udc->lock);
- driver->disconnect(&udc->gadget);
- spin_lock(&udc->lock);
- }
-}
-
-static int mv_udc_start(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
-{
- struct mv_udc *udc;
- int retval = 0;
- unsigned long flags;
-
- udc = container_of(gadget, struct mv_udc, gadget);
-
- if (udc->driver)
- return -EBUSY;
-
- spin_lock_irqsave(&udc->lock, flags);
-
- /* hook up the driver ... */
- udc->driver = driver;
-
- udc->usb_state = USB_STATE_ATTACHED;
- udc->ep0_state = WAIT_FOR_SETUP;
- udc->ep0_dir = EP_DIR_OUT;
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- if (udc->transceiver) {
- retval = otg_set_peripheral(udc->transceiver->otg,
- &udc->gadget);
- if (retval) {
- dev_err(&udc->dev->dev,
- "unable to register peripheral to otg\n");
- udc->driver = NULL;
- return retval;
- }
- }
-
- /* When boot with cable attached, there will be no vbus irq occurred */
- if (udc->qwork)
- queue_work(udc->qwork, &udc->vbus_work);
-
- return 0;
-}
-
-static int mv_udc_stop(struct usb_gadget *gadget)
-{
- struct mv_udc *udc;
- unsigned long flags;
-
- udc = container_of(gadget, struct mv_udc, gadget);
-
- spin_lock_irqsave(&udc->lock, flags);
-
- mv_udc_enable(udc);
- udc_stop(udc);
-
- /* stop all usb activities */
- udc->gadget.speed = USB_SPEED_UNKNOWN;
- stop_activity(udc, NULL);
- mv_udc_disable(udc);
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- /* unbind gadget driver */
- udc->driver = NULL;
-
- return 0;
-}
-
-static void mv_set_ptc(struct mv_udc *udc, u32 mode)
-{
- u32 portsc;
-
- portsc = readl(&udc->op_regs->portsc[0]);
- portsc |= mode << 16;
- writel(portsc, &udc->op_regs->portsc[0]);
-}
-
-static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
-{
- struct mv_ep *mvep = container_of(ep, struct mv_ep, ep);
- struct mv_req *req = container_of(_req, struct mv_req, req);
- struct mv_udc *udc;
- unsigned long flags;
-
- udc = mvep->udc;
-
- dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
-
- spin_lock_irqsave(&udc->lock, flags);
- if (req->test_mode) {
- mv_set_ptc(udc, req->test_mode);
- req->test_mode = 0;
- }
- spin_unlock_irqrestore(&udc->lock, flags);
-}
-
-static int
-udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
-{
- int retval = 0;
- struct mv_req *req;
- struct mv_ep *ep;
-
- ep = &udc->eps[0];
- udc->ep0_dir = direction;
- udc->ep0_state = WAIT_FOR_OUT_STATUS;
-
- req = udc->status_req;
-
- /* fill in the request structure */
- if (empty == false) {
- *((u16 *) req->req.buf) = cpu_to_le16(status);
- req->req.length = 2;
- } else
- req->req.length = 0;
-
- req->ep = ep;
- req->req.status = -EINPROGRESS;
- req->req.actual = 0;
- if (udc->test_mode) {
- req->req.complete = prime_status_complete;
- req->test_mode = udc->test_mode;
- udc->test_mode = 0;
- } else
- req->req.complete = NULL;
- req->dtd_count = 0;
-
- if (req->req.dma == DMA_ADDR_INVALID) {
- req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
- req->req.buf, req->req.length,
- ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- req->mapped = 1;
- }
-
- /* prime the data phase */
- if (!req_to_dtd(req)) {
- retval = queue_dtd(ep, req);
- if (retval) {
- dev_err(&udc->dev->dev,
- "Failed to queue dtd when prime status\n");
- goto out;
- }
- } else{ /* no mem */
- retval = -ENOMEM;
- dev_err(&udc->dev->dev,
- "Failed to dma_pool_alloc when prime status\n");
- goto out;
- }
-
- list_add_tail(&req->queue, &ep->queue);
-
- return 0;
-out:
- usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
-
- return retval;
-}
-
-static void mv_udc_testmode(struct mv_udc *udc, u16 index)
-{
- if (index <= USB_TEST_FORCE_ENABLE) {
- udc->test_mode = index;
- if (udc_prime_status(udc, EP_DIR_IN, 0, true))
- ep0_stall(udc);
- } else
- dev_err(&udc->dev->dev,
- "This test mode(%d) is not supported\n", index);
-}
-
-static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
-{
- udc->dev_addr = (u8)setup->wValue;
-
- /* update usb state */
- udc->usb_state = USB_STATE_ADDRESS;
-
- if (udc_prime_status(udc, EP_DIR_IN, 0, true))
- ep0_stall(udc);
-}
-
-static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
- struct usb_ctrlrequest *setup)
-{
- u16 status = 0;
- int retval;
-
- if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
- != (USB_DIR_IN | USB_TYPE_STANDARD))
- return;
-
- if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
- status = 1 << USB_DEVICE_SELF_POWERED;
- status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
- } else if ((setup->bRequestType & USB_RECIP_MASK)
- == USB_RECIP_INTERFACE) {
- /* get interface status */
- status = 0;
- } else if ((setup->bRequestType & USB_RECIP_MASK)
- == USB_RECIP_ENDPOINT) {
- u8 ep_num, direction;
-
- ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
- direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
- ? EP_DIR_IN : EP_DIR_OUT;
- status = ep_is_stall(udc, ep_num, direction)
- << USB_ENDPOINT_HALT;
- }
-
- retval = udc_prime_status(udc, EP_DIR_IN, status, false);
- if (retval)
- ep0_stall(udc);
- else
- udc->ep0_state = DATA_STATE_XMIT;
-}
-
-static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
-{
- u8 ep_num;
- u8 direction;
- struct mv_ep *ep;
-
- if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
- == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
- switch (setup->wValue) {
- case USB_DEVICE_REMOTE_WAKEUP:
- udc->remote_wakeup = 0;
- break;
- default:
- goto out;
- }
- } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
- == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
- switch (setup->wValue) {
- case USB_ENDPOINT_HALT:
- ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
- direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
- ? EP_DIR_IN : EP_DIR_OUT;
- if (setup->wValue != 0 || setup->wLength != 0
- || ep_num > udc->max_eps)
- goto out;
- ep = &udc->eps[ep_num * 2 + direction];
- if (ep->wedge == 1)
- break;
- spin_unlock(&udc->lock);
- ep_set_stall(udc, ep_num, direction, 0);
- spin_lock(&udc->lock);
- break;
- default:
- goto out;
- }
- } else
- goto out;
-
- if (udc_prime_status(udc, EP_DIR_IN, 0, true))
- ep0_stall(udc);
-out:
- return;
-}
-
-static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
-{
- u8 ep_num;
- u8 direction;
-
- if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
- == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
- switch (setup->wValue) {
- case USB_DEVICE_REMOTE_WAKEUP:
- udc->remote_wakeup = 1;
- break;
- case USB_DEVICE_TEST_MODE:
- if (setup->wIndex & 0xFF
- || udc->gadget.speed != USB_SPEED_HIGH)
- ep0_stall(udc);
-
- if (udc->usb_state != USB_STATE_CONFIGURED
- && udc->usb_state != USB_STATE_ADDRESS
- && udc->usb_state != USB_STATE_DEFAULT)
- ep0_stall(udc);
-
- mv_udc_testmode(udc, (setup->wIndex >> 8));
- goto out;
- default:
- goto out;
- }
- } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
- == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
- switch (setup->wValue) {
- case USB_ENDPOINT_HALT:
- ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
- direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
- ? EP_DIR_IN : EP_DIR_OUT;
- if (setup->wValue != 0 || setup->wLength != 0
- || ep_num > udc->max_eps)
- goto out;
- spin_unlock(&udc->lock);
- ep_set_stall(udc, ep_num, direction, 1);
- spin_lock(&udc->lock);
- break;
- default:
- goto out;
- }
- } else
- goto out;
-
- if (udc_prime_status(udc, EP_DIR_IN, 0, true))
- ep0_stall(udc);
-out:
- return;
-}
-
-static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
- struct usb_ctrlrequest *setup)
- __releases(&ep->udc->lock)
- __acquires(&ep->udc->lock)
-{
- bool delegate = false;
-
- nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
-
- dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
- setup->bRequestType, setup->bRequest,
- setup->wValue, setup->wIndex, setup->wLength);
- /* We process some standard setup requests here */
- if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
- switch (setup->bRequest) {
- case USB_REQ_GET_STATUS:
- ch9getstatus(udc, ep_num, setup);
- break;
-
- case USB_REQ_SET_ADDRESS:
- ch9setaddress(udc, setup);
- break;
-
- case USB_REQ_CLEAR_FEATURE:
- ch9clearfeature(udc, setup);
- break;
-
- case USB_REQ_SET_FEATURE:
- ch9setfeature(udc, setup);
- break;
-
- default:
- delegate = true;
- }
- } else
- delegate = true;
-
- /* delegate USB standard requests to the gadget driver */
- if (delegate == true) {
- /* USB requests handled by gadget */
- if (setup->wLength) {
- /* DATA phase from gadget, STATUS phase from udc */
- udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
- ? EP_DIR_IN : EP_DIR_OUT;
- spin_unlock(&udc->lock);
- if (udc->driver->setup(&udc->gadget,
- &udc->local_setup_buff) < 0)
- ep0_stall(udc);
- spin_lock(&udc->lock);
- udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
- ? DATA_STATE_XMIT : DATA_STATE_RECV;
- } else {
- /* no DATA phase, IN STATUS phase from gadget */
- udc->ep0_dir = EP_DIR_IN;
- spin_unlock(&udc->lock);
- if (udc->driver->setup(&udc->gadget,
- &udc->local_setup_buff) < 0)
- ep0_stall(udc);
- spin_lock(&udc->lock);
- udc->ep0_state = WAIT_FOR_OUT_STATUS;
- }
- }
-}
-
-/* complete DATA or STATUS phase of ep0 prime status phase if needed */
-static void ep0_req_complete(struct mv_udc *udc,
- struct mv_ep *ep0, struct mv_req *req)
-{
- u32 new_addr;
-
- if (udc->usb_state == USB_STATE_ADDRESS) {
- /* set the new address */
- new_addr = (u32)udc->dev_addr;
- writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
- &udc->op_regs->deviceaddr);
- }
-
- done(ep0, req, 0);
-
- switch (udc->ep0_state) {
- case DATA_STATE_XMIT:
- /* receive status phase */
- if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
- ep0_stall(udc);
- break;
- case DATA_STATE_RECV:
- /* send status phase */
- if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
- ep0_stall(udc);
- break;
- case WAIT_FOR_OUT_STATUS:
- udc->ep0_state = WAIT_FOR_SETUP;
- break;
- case WAIT_FOR_SETUP:
- dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
- break;
- default:
- ep0_stall(udc);
- break;
- }
-}
-
-static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
-{
- u32 temp;
- struct mv_dqh *dqh;
-
- dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
-
- /* Clear bit in ENDPTSETUPSTAT */
- writel((1 << ep_num), &udc->op_regs->epsetupstat);
-
- /* while a hazard exists when setup package arrives */
- do {
- /* Set Setup Tripwire */
- temp = readl(&udc->op_regs->usbcmd);
- writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
-
- /* Copy the setup packet to local buffer */
- memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
- } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
-
- /* Clear Setup Tripwire */
- temp = readl(&udc->op_regs->usbcmd);
- writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
-}
-
-static void irq_process_tr_complete(struct mv_udc *udc)
-{
- u32 tmp, bit_pos;
- int i, ep_num = 0, direction = 0;
- struct mv_ep *curr_ep;
- struct mv_req *curr_req, *temp_req;
- int status;
-
- /*
- * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
- * because the setup packets are to be read ASAP
- */
-
- /* Process all Setup packet received interrupts */
- tmp = readl(&udc->op_regs->epsetupstat);
-
- if (tmp) {
- for (i = 0; i < udc->max_eps; i++) {
- if (tmp & (1 << i)) {
- get_setup_data(udc, i,
- (u8 *)(&udc->local_setup_buff));
- handle_setup_packet(udc, i,
- &udc->local_setup_buff);
- }
- }
- }
-
- /* Don't clear the endpoint setup status register here.
- * It is cleared as a setup packet is read out of the buffer
- */
-
- /* Process non-setup transaction complete interrupts */
- tmp = readl(&udc->op_regs->epcomplete);
-
- if (!tmp)
- return;
-
- writel(tmp, &udc->op_regs->epcomplete);
-
- for (i = 0; i < udc->max_eps * 2; i++) {
- ep_num = i >> 1;
- direction = i % 2;
-
- bit_pos = 1 << (ep_num + 16 * direction);
-
- if (!(bit_pos & tmp))
- continue;
-
- if (i == 1)
- curr_ep = &udc->eps[0];
- else
- curr_ep = &udc->eps[i];
- /* process the req queue until an uncomplete request */
- list_for_each_entry_safe(curr_req, temp_req,
- &curr_ep->queue, queue) {
- status = process_ep_req(udc, i, curr_req);
- if (status)
- break;
-
- /* write back status to req */
- curr_req->req.status = status;
-
- /* ep0 request completion */
- if (ep_num == 0) {
- ep0_req_complete(udc, curr_ep, curr_req);
- break;
- } else {
- done(curr_ep, curr_req, status);
- }
- }
- }
-}
-
-static void irq_process_reset(struct mv_udc *udc)
-{
- u32 tmp;
- unsigned int loops;
-
- udc->ep0_dir = EP_DIR_OUT;
- udc->ep0_state = WAIT_FOR_SETUP;
- udc->remote_wakeup = 0; /* default to 0 on reset */
-
- /* The address bits are past bit 25-31. Set the address */
- tmp = readl(&udc->op_regs->deviceaddr);
- tmp &= ~(USB_DEVICE_ADDRESS_MASK);
- writel(tmp, &udc->op_regs->deviceaddr);
-
- /* Clear all the setup token semaphores */
- tmp = readl(&udc->op_regs->epsetupstat);
- writel(tmp, &udc->op_regs->epsetupstat);
-
- /* Clear all the endpoint complete status bits */
- tmp = readl(&udc->op_regs->epcomplete);
- writel(tmp, &udc->op_regs->epcomplete);
-
- /* wait until all endptprime bits cleared */
- loops = LOOPS(PRIME_TIMEOUT);
- while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
- if (loops == 0) {
- dev_err(&udc->dev->dev,
- "Timeout for ENDPTPRIME = 0x%x\n",
- readl(&udc->op_regs->epprime));
- break;
- }
- loops--;
- udelay(LOOPS_USEC);
- }
-
- /* Write 1s to the Flush register */
- writel((u32)~0, &udc->op_regs->epflush);
-
- if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
- dev_info(&udc->dev->dev, "usb bus reset\n");
- udc->usb_state = USB_STATE_DEFAULT;
- /* reset all the queues, stop all USB activities */
- gadget_reset(udc, udc->driver);
- } else {
- dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
- readl(&udc->op_regs->portsc));
-
- /*
- * re-initialize
- * controller reset
- */
- udc_reset(udc);
-
- /* reset all the queues, stop all USB activities */
- stop_activity(udc, udc->driver);
-
- /* reset ep0 dQH and endptctrl */
- ep0_reset(udc);
-
- /* enable interrupt and set controller to run state */
- udc_start(udc);
-
- udc->usb_state = USB_STATE_ATTACHED;
- }
-}
-
-static void handle_bus_resume(struct mv_udc *udc)
-{
- udc->usb_state = udc->resume_state;
- udc->resume_state = 0;
-
- /* report resume to the driver */
- if (udc->driver) {
- if (udc->driver->resume) {
- spin_unlock(&udc->lock);
- udc->driver->resume(&udc->gadget);
- spin_lock(&udc->lock);
- }
- }
-}
-
-static void irq_process_suspend(struct mv_udc *udc)
-{
- udc->resume_state = udc->usb_state;
- udc->usb_state = USB_STATE_SUSPENDED;
-
- if (udc->driver->suspend) {
- spin_unlock(&udc->lock);
- udc->driver->suspend(&udc->gadget);
- spin_lock(&udc->lock);
- }
-}
-
-static void irq_process_port_change(struct mv_udc *udc)
-{
- u32 portsc;
-
- portsc = readl(&udc->op_regs->portsc[0]);
- if (!(portsc & PORTSCX_PORT_RESET)) {
- /* Get the speed */
- u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
- switch (speed) {
- case PORTSCX_PORT_SPEED_HIGH:
- udc->gadget.speed = USB_SPEED_HIGH;
- break;
- case PORTSCX_PORT_SPEED_FULL:
- udc->gadget.speed = USB_SPEED_FULL;
- break;
- case PORTSCX_PORT_SPEED_LOW:
- udc->gadget.speed = USB_SPEED_LOW;
- break;
- default:
- udc->gadget.speed = USB_SPEED_UNKNOWN;
- break;
- }
- }
-
- if (portsc & PORTSCX_PORT_SUSPEND) {
- udc->resume_state = udc->usb_state;
- udc->usb_state = USB_STATE_SUSPENDED;
- if (udc->driver->suspend) {
- spin_unlock(&udc->lock);
- udc->driver->suspend(&udc->gadget);
- spin_lock(&udc->lock);
- }
- }
-
- if (!(portsc & PORTSCX_PORT_SUSPEND)
- && udc->usb_state == USB_STATE_SUSPENDED) {
- handle_bus_resume(udc);
- }
-
- if (!udc->resume_state)
- udc->usb_state = USB_STATE_DEFAULT;
-}
-
-static void irq_process_error(struct mv_udc *udc)
-{
- /* Increment the error count */
- udc->errors++;
-}
-
-static irqreturn_t mv_udc_irq(int irq, void *dev)
-{
- struct mv_udc *udc = (struct mv_udc *)dev;
- u32 status, intr;
-
- /* Disable ISR when stopped bit is set */
- if (udc->stopped)
- return IRQ_NONE;
-
- spin_lock(&udc->lock);
-
- status = readl(&udc->op_regs->usbsts);
- intr = readl(&udc->op_regs->usbintr);
- status &= intr;
-
- if (status == 0) {
- spin_unlock(&udc->lock);
- return IRQ_NONE;
- }
-
- /* Clear all the interrupts occurred */
- writel(status, &udc->op_regs->usbsts);
-
- if (status & USBSTS_ERR)
- irq_process_error(udc);
-
- if (status & USBSTS_RESET)
- irq_process_reset(udc);
-
- if (status & USBSTS_PORT_CHANGE)
- irq_process_port_change(udc);
-
- if (status & USBSTS_INT)
- irq_process_tr_complete(udc);
-
- if (status & USBSTS_SUSPEND)
- irq_process_suspend(udc);
-
- spin_unlock(&udc->lock);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
-{
- struct mv_udc *udc = (struct mv_udc *)dev;
-
- /* polling VBUS and init phy may cause too much time*/
- if (udc->qwork)
- queue_work(udc->qwork, &udc->vbus_work);
-
- return IRQ_HANDLED;
-}
-
-static void mv_udc_vbus_work(struct work_struct *work)
-{
- struct mv_udc *udc;
- unsigned int vbus;
-
- udc = container_of(work, struct mv_udc, vbus_work);
- if (!udc->pdata->vbus)
- return;
-
- vbus = udc->pdata->vbus->poll();
- dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
-
- if (vbus == VBUS_HIGH)
- mv_udc_vbus_session(&udc->gadget, 1);
- else if (vbus == VBUS_LOW)
- mv_udc_vbus_session(&udc->gadget, 0);
-}
-
-/* release device structure */
-static void gadget_release(struct device *_dev)
-{
- struct mv_udc *udc;
-
- udc = dev_get_drvdata(_dev);
-
- complete(udc->done);
-}
-
-static void mv_udc_remove(struct platform_device *pdev)
-{
- struct mv_udc *udc;
-
- udc = platform_get_drvdata(pdev);
-
- usb_del_gadget_udc(&udc->gadget);
-
- if (udc->qwork)
- destroy_workqueue(udc->qwork);
-
- /* free memory allocated in probe */
- dma_pool_destroy(udc->dtd_pool);
-
- if (udc->ep_dqh)
- dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
- udc->ep_dqh, udc->ep_dqh_dma);
-
- mv_udc_disable(udc);
-
- /* free dev, wait for the release() finished */
- wait_for_completion(udc->done);
-}
-
-static int mv_udc_probe(struct platform_device *pdev)
-{
- struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct mv_udc *udc;
- int retval = 0;
- struct resource *r;
- size_t size;
-
- if (pdata == NULL) {
- dev_err(&pdev->dev, "missing platform_data\n");
- return -ENODEV;
- }
-
- udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
- if (udc == NULL)
- return -ENOMEM;
-
- udc->done = &release_done;
- udc->pdata = dev_get_platdata(&pdev->dev);
- spin_lock_init(&udc->lock);
-
- udc->dev = pdev;
-
- if (pdata->mode == MV_USB_MODE_OTG) {
- udc->transceiver = devm_usb_get_phy(&pdev->dev,
- USB_PHY_TYPE_USB2);
- if (IS_ERR(udc->transceiver)) {
- retval = PTR_ERR(udc->transceiver);
-
- if (retval == -ENXIO)
- return retval;
-
- udc->transceiver = NULL;
- return -EPROBE_DEFER;
- }
- }
-
- /* udc only have one sysclk. */
- udc->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(udc->clk))
- return PTR_ERR(udc->clk);
-
- r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
- if (r == NULL) {
- dev_err(&pdev->dev, "no I/O memory resource defined\n");
- return -ENODEV;
- }
-
- udc->cap_regs = (struct mv_cap_regs __iomem *)
- devm_ioremap(&pdev->dev, r->start, resource_size(r));
- if (udc->cap_regs == NULL) {
- dev_err(&pdev->dev, "failed to map I/O memory\n");
- return -EBUSY;
- }
-
- r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
- if (r == NULL) {
- dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
- return -ENODEV;
- }
-
- udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
- if (udc->phy_regs == NULL) {
- dev_err(&pdev->dev, "failed to map phy I/O memory\n");
- return -EBUSY;
- }
-
- /* we will acces controller register, so enable the clk */
- retval = mv_udc_enable_internal(udc);
- if (retval)
- return retval;
-
- udc->op_regs =
- (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
- + (readl(&udc->cap_regs->caplength_hciversion)
- & CAPLENGTH_MASK));
- udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
-
- /*
- * some platform will use usb to download image, it may not disconnect
- * usb gadget before loading kernel. So first stop udc here.
- */
- udc_stop(udc);
- writel(0xFFFFFFFF, &udc->op_regs->usbsts);
-
- size = udc->max_eps * sizeof(struct mv_dqh) *2;
- size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
- udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
- &udc->ep_dqh_dma, GFP_KERNEL);
-
- if (udc->ep_dqh == NULL) {
- dev_err(&pdev->dev, "allocate dQH memory failed\n");
- retval = -ENOMEM;
- goto err_disable_clock;
- }
- udc->ep_dqh_size = size;
-
- /* create dTD dma_pool resource */
- udc->dtd_pool = dma_pool_create("mv_dtd",
- &pdev->dev,
- sizeof(struct mv_dtd),
- DTD_ALIGNMENT,
- DMA_BOUNDARY);
-
- if (!udc->dtd_pool) {
- retval = -ENOMEM;
- goto err_free_dma;
- }
-
- size = udc->max_eps * sizeof(struct mv_ep) *2;
- udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (udc->eps == NULL) {
- retval = -ENOMEM;
- goto err_destroy_dma;
- }
-
- /* initialize ep0 status request structure */
- udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req),
- GFP_KERNEL);
- if (!udc->status_req) {
- retval = -ENOMEM;
- goto err_destroy_dma;
- }
- INIT_LIST_HEAD(&udc->status_req->queue);
-
- /* allocate a small amount of memory to get valid address */
- udc->status_req->req.buf = devm_kzalloc(&pdev->dev, 8, GFP_KERNEL);
- if (!udc->status_req->req.buf) {
- retval = -ENOMEM;
- goto err_destroy_dma;
- }
- udc->status_req->req.dma = DMA_ADDR_INVALID;
-
- udc->resume_state = USB_STATE_NOTATTACHED;
- udc->usb_state = USB_STATE_POWERED;
- udc->ep0_dir = EP_DIR_OUT;
- udc->remote_wakeup = 0;
-
- r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
- if (r == NULL) {
- dev_err(&pdev->dev, "no IRQ resource defined\n");
- retval = -ENODEV;
- goto err_destroy_dma;
- }
- udc->irq = r->start;
- if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq,
- IRQF_SHARED, driver_name, udc)) {
- dev_err(&pdev->dev, "Request irq %d for UDC failed\n",
- udc->irq);
- retval = -ENODEV;
- goto err_destroy_dma;
- }
-
- /* initialize gadget structure */
- udc->gadget.ops = &mv_ops; /* usb_gadget_ops */
- udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
- INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
- udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
- udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
-
- /* the "gadget" abstracts/virtualizes the controller */
- udc->gadget.name = driver_name; /* gadget name */
-
- eps_init(udc);
-
- /* VBUS detect: we can disable/enable clock on demand.*/
- if (udc->transceiver)
- udc->clock_gating = 1;
- else if (pdata->vbus) {
- udc->clock_gating = 1;
- retval = devm_request_threaded_irq(&pdev->dev,
- pdata->vbus->irq, NULL,
- mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
- if (retval) {
- dev_info(&pdev->dev,
- "Can not request irq for VBUS, "
- "disable clock gating\n");
- udc->clock_gating = 0;
- }
-
- udc->qwork = create_singlethread_workqueue("mv_udc_queue");
- if (!udc->qwork) {
- dev_err(&pdev->dev, "cannot create workqueue\n");
- retval = -ENOMEM;
- goto err_destroy_dma;
- }
-
- INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
- }
-
- /*
- * When clock gating is supported, we can disable clk and phy.
- * If not, it means that VBUS detection is not supported, we
- * have to enable vbus active all the time to let controller work.
- */
- if (udc->clock_gating)
- mv_udc_disable_internal(udc);
- else
- udc->vbus_active = 1;
-
- retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
- gadget_release);
- if (retval)
- goto err_create_workqueue;
-
- platform_set_drvdata(pdev, udc);
- dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n",
- udc->clock_gating ? "with" : "without");
-
- return 0;
-
-err_create_workqueue:
- if (udc->qwork)
- destroy_workqueue(udc->qwork);
-err_destroy_dma:
- dma_pool_destroy(udc->dtd_pool);
-err_free_dma:
- dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
- udc->ep_dqh, udc->ep_dqh_dma);
-err_disable_clock:
- mv_udc_disable_internal(udc);
-
- return retval;
-}
-
-#ifdef CONFIG_PM
-static int mv_udc_suspend(struct device *dev)
-{
- struct mv_udc *udc;
-
- udc = dev_get_drvdata(dev);
-
- /* if OTG is enabled, the following will be done in OTG driver*/
- if (udc->transceiver)
- return 0;
-
- if (udc->pdata->vbus && udc->pdata->vbus->poll)
- if (udc->pdata->vbus->poll() == VBUS_HIGH) {
- dev_info(&udc->dev->dev, "USB cable is connected!\n");
- return -EAGAIN;
- }
-
- /*
- * only cable is unplugged, udc can suspend.
- * So do not care about clock_gating == 1.
- */
- if (!udc->clock_gating) {
- udc_stop(udc);
-
- spin_lock_irq(&udc->lock);
- /* stop all usb activities */
- stop_activity(udc, udc->driver);
- spin_unlock_irq(&udc->lock);
-
- mv_udc_disable_internal(udc);
- }
-
- return 0;
-}
-
-static int mv_udc_resume(struct device *dev)
-{
- struct mv_udc *udc;
- int retval;
-
- udc = dev_get_drvdata(dev);
-
- /* if OTG is enabled, the following will be done in OTG driver*/
- if (udc->transceiver)
- return 0;
-
- if (!udc->clock_gating) {
- retval = mv_udc_enable_internal(udc);
- if (retval)
- return retval;
-
- if (udc->driver && udc->softconnect) {
- udc_reset(udc);
- ep0_reset(udc);
- udc_start(udc);
- }
- }
-
- return 0;
-}
-
-static const struct dev_pm_ops mv_udc_pm_ops = {
- .suspend = mv_udc_suspend,
- .resume = mv_udc_resume,
-};
-#endif
-
-static void mv_udc_shutdown(struct platform_device *pdev)
-{
- struct mv_udc *udc;
- u32 mode;
-
- udc = platform_get_drvdata(pdev);
- /* reset controller mode to IDLE */
- mv_udc_enable(udc);
- mode = readl(&udc->op_regs->usbmode);
- mode &= ~3;
- writel(mode, &udc->op_regs->usbmode);
- mv_udc_disable(udc);
-}
-
-static struct platform_driver udc_driver = {
- .probe = mv_udc_probe,
- .remove = mv_udc_remove,
- .shutdown = mv_udc_shutdown,
- .driver = {
- .name = "mv-udc",
-#ifdef CONFIG_PM
- .pm = &mv_udc_pm_ops,
-#endif
- },
-};
-
-module_platform_driver(udc_driver);
-MODULE_ALIAS("platform:mv-udc");
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
deleted file mode 100644
index 7ecddbf5c90d..000000000000
--- a/drivers/usb/gadget/udc/net2272.c
+++ /dev/null
@@ -1,2723 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Driver for PLX NET2272 USB device controller
- *
- * Copyright (C) 2005-2006 PLX Technology, Inc.
- * Copyright (C) 2006-2011 Analog Devices, Inc.
- */
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/prefetch.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
-#include <linux/usb.h>
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-
-#include <asm/byteorder.h>
-#include <linux/unaligned.h>
-
-#include "net2272.h"
-
-#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
-
-static const char driver_name[] = "net2272";
-static const char driver_vers[] = "2006 October 17/mainline";
-static const char driver_desc[] = DRIVER_DESC;
-
-static const char ep0name[] = "ep0";
-static const char * const ep_name[] = {
- ep0name,
- "ep-a", "ep-b", "ep-c",
-};
-
-#ifdef CONFIG_USB_NET2272_DMA
-/*
- * use_dma: the NET2272 can use an external DMA controller.
- * Note that since there is no generic DMA api, some functions,
- * notably request_dma, start_dma, and cancel_dma will need to be
- * modified for your platform's particular dma controller.
- *
- * If use_dma is disabled, pio will be used instead.
- */
-static bool use_dma = false;
-module_param(use_dma, bool, 0644);
-
-/*
- * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
- * The NET2272 can only use dma for a single endpoint at a time.
- * At some point this could be modified to allow either endpoint
- * to take control of dma as it becomes available.
- *
- * Note that DMA should not be used on OUT endpoints unless it can
- * be guaranteed that no short packets will arrive on an IN endpoint
- * while the DMA operation is pending. Otherwise the OUT DMA will
- * terminate prematurely (See NET2272 Errata 630-0213-0101)
- */
-static ushort dma_ep = 1;
-module_param(dma_ep, ushort, 0644);
-
-/*
- * dma_mode: net2272 dma mode setting (see LOCCTL1 definition):
- * mode 0 == Slow DREQ mode
- * mode 1 == Fast DREQ mode
- * mode 2 == Burst mode
- */
-static ushort dma_mode = 2;
-module_param(dma_mode, ushort, 0644);
-#else
-#define use_dma 0
-#define dma_ep 1
-#define dma_mode 2
-#endif
-
-/*
- * fifo_mode: net2272 buffer configuration:
- * mode 0 == ep-{a,b,c} 512db each
- * mode 1 == ep-a 1k, ep-{b,c} 512db
- * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
- * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
- */
-static ushort fifo_mode;
-module_param(fifo_mode, ushort, 0644);
-
-/*
- * enable_suspend: When enabled, the driver will respond to
- * USB suspend requests by powering down the NET2272. Otherwise,
- * USB suspend requests will be ignored. This is acceptable for
- * self-powered devices. For bus powered devices set this to 1.
- */
-static ushort enable_suspend;
-module_param(enable_suspend, ushort, 0644);
-
-static void assert_out_naking(struct net2272_ep *ep, const char *where)
-{
- u8 tmp;
-
-#ifndef DEBUG
- return;
-#endif
-
- tmp = net2272_ep_read(ep, EP_STAT0);
- if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
- dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
- ep->ep.name, where, tmp);
- net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
- }
-}
-#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
-
-static void stop_out_naking(struct net2272_ep *ep)
-{
- u8 tmp = net2272_ep_read(ep, EP_STAT0);
-
- if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
- net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
-}
-
-#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
-
-static char *type_string(u8 bmAttributes)
-{
- switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
- case USB_ENDPOINT_XFER_BULK: return "bulk";
- case USB_ENDPOINT_XFER_ISOC: return "iso";
- case USB_ENDPOINT_XFER_INT: return "intr";
- default: return "control";
- }
-}
-
-static char *buf_state_string(unsigned state)
-{
- switch (state) {
- case BUFF_FREE: return "free";
- case BUFF_VALID: return "valid";
- case BUFF_LCL: return "local";
- case BUFF_USB: return "usb";
- default: return "unknown";
- }
-}
-
-static char *dma_mode_string(void)
-{
- if (!use_dma)
- return "PIO";
- switch (dma_mode) {
- case 0: return "SLOW DREQ";
- case 1: return "FAST DREQ";
- case 2: return "BURST";
- default: return "invalid";
- }
-}
-
-static void net2272_dequeue_all(struct net2272_ep *);
-static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
-static int net2272_fifo_status(struct usb_ep *);
-
-static const struct usb_ep_ops net2272_ep_ops;
-
-/*---------------------------------------------------------------------------*/
-
-static int
-net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
-{
- struct net2272 *dev;
- struct net2272_ep *ep;
- u32 max;
- u8 tmp;
- unsigned long flags;
-
- ep = container_of(_ep, struct net2272_ep, ep);
- if (!_ep || !desc || ep->desc || _ep->name == ep0name
- || desc->bDescriptorType != USB_DT_ENDPOINT)
- return -EINVAL;
- dev = ep->dev;
- if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
-
- max = usb_endpoint_maxp(desc);
-
- spin_lock_irqsave(&dev->lock, flags);
- _ep->maxpacket = max;
- ep->desc = desc;
-
- /* net2272_ep_reset() has already been called */
- ep->stopped = 0;
- ep->wedged = 0;
-
- /* set speed-dependent max packet */
- net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
- net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
-
- /* set type, direction, address; reset fifo counters */
- net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
- tmp = usb_endpoint_type(desc);
- if (usb_endpoint_xfer_bulk(desc)) {
- /* catch some particularly blatant driver bugs */
- if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
- (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
- spin_unlock_irqrestore(&dev->lock, flags);
- return -ERANGE;
- }
- }
- ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
- tmp <<= ENDPOINT_TYPE;
- tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
- tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
- tmp |= (1 << ENDPOINT_ENABLE);
-
- /* for OUT transfers, block the rx fifo until a read is posted */
- ep->is_in = usb_endpoint_dir_in(desc);
- if (!ep->is_in)
- net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
-
- net2272_ep_write(ep, EP_CFG, tmp);
-
- /* enable irqs */
- tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
- net2272_write(dev, IRQENB0, tmp);
-
- tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
- | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
- | net2272_ep_read(ep, EP_IRQENB);
- net2272_ep_write(ep, EP_IRQENB, tmp);
-
- tmp = desc->bEndpointAddress;
- dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
- _ep->name, tmp & 0x0f, PIPEDIR(tmp),
- type_string(desc->bmAttributes), max,
- net2272_ep_read(ep, EP_CFG));
-
- spin_unlock_irqrestore(&dev->lock, flags);
- return 0;
-}
-
-static void net2272_ep_reset(struct net2272_ep *ep)
-{
- u8 tmp;
-
- ep->desc = NULL;
- INIT_LIST_HEAD(&ep->queue);
-
- usb_ep_set_maxpacket_limit(&ep->ep, ~0);
- ep->ep.ops = &net2272_ep_ops;
-
- /* disable irqs, endpoint */
- net2272_ep_write(ep, EP_IRQENB, 0);
-
- /* init to our chosen defaults, notably so that we NAK OUT
- * packets until the driver queues a read.
- */
- tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
- net2272_ep_write(ep, EP_RSPSET, tmp);
-
- tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
- if (ep->num != 0)
- tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
-
- net2272_ep_write(ep, EP_RSPCLR, tmp);
-
- /* scrub most status bits, and flush any fifo state */
- net2272_ep_write(ep, EP_STAT0,
- (1 << DATA_IN_TOKEN_INTERRUPT)
- | (1 << DATA_OUT_TOKEN_INTERRUPT)
- | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
- | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
- | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
-
- net2272_ep_write(ep, EP_STAT1,
- (1 << TIMEOUT)
- | (1 << USB_OUT_ACK_SENT)
- | (1 << USB_OUT_NAK_SENT)
- | (1 << USB_IN_ACK_RCVD)
- | (1 << USB_IN_NAK_SENT)
- | (1 << USB_STALL_SENT)
- | (1 << LOCAL_OUT_ZLP)
- | (1 << BUFFER_FLUSH));
-
- /* fifo size is handled separately */
-}
-
-static int net2272_disable(struct usb_ep *_ep)
-{
- struct net2272_ep *ep;
- unsigned long flags;
-
- ep = container_of(_ep, struct net2272_ep, ep);
- if (!_ep || !ep->desc || _ep->name == ep0name)
- return -EINVAL;
-
- spin_lock_irqsave(&ep->dev->lock, flags);
- net2272_dequeue_all(ep);
- net2272_ep_reset(ep);
-
- dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
-
- spin_unlock_irqrestore(&ep->dev->lock, flags);
- return 0;
-}
-
-/*---------------------------------------------------------------------------*/
-
-static struct usb_request *
-net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
-{
- struct net2272_request *req;
-
- if (!_ep)
- return NULL;
-
- req = kzalloc(sizeof(*req), gfp_flags);
- if (!req)
- return NULL;
-
- INIT_LIST_HEAD(&req->queue);
-
- return &req->req;
-}
-
-static void
-net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct net2272_request *req;
-
- if (!_ep || !_req)
- return;
-
- req = container_of(_req, struct net2272_request, req);
- WARN_ON(!list_empty(&req->queue));
- kfree(req);
-}
-
-static void
-net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
-{
- struct net2272 *dev;
- unsigned stopped = ep->stopped;
-
- if (ep->num == 0) {
- if (ep->dev->protocol_stall) {
- ep->stopped = 1;
- set_halt(ep);
- }
- allow_status(ep);
- }
-
- list_del_init(&req->queue);
-
- if (req->req.status == -EINPROGRESS)
- req->req.status = status;
- else
- status = req->req.status;
-
- dev = ep->dev;
- if (use_dma && ep->dma)
- usb_gadget_unmap_request(&dev->gadget, &req->req,
- ep->is_in);
-
- if (status && status != -ESHUTDOWN)
- dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
- ep->ep.name, &req->req, status,
- req->req.actual, req->req.length, req->req.buf);
-
- /* don't modify queue heads during completion callback */
- ep->stopped = 1;
- spin_unlock(&dev->lock);
- usb_gadget_giveback_request(&ep->ep, &req->req);
- spin_lock(&dev->lock);
- ep->stopped = stopped;
-}
-
-static int
-net2272_write_packet(struct net2272_ep *ep, u8 *buf,
- struct net2272_request *req, unsigned max)
-{
- u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
- u16 *bufp;
- unsigned length, count;
- u8 tmp;
-
- length = min(req->req.length - req->req.actual, max);
- req->req.actual += length;
-
- dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
- ep->ep.name, req, max, length,
- (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
-
- count = length;
- bufp = (u16 *)buf;
-
- while (likely(count >= 2)) {
- /* no byte-swap required; chip endian set during init */
- writew(*bufp++, ep_data);
- count -= 2;
- }
- buf = (u8 *)bufp;
-
- /* write final byte by placing the NET2272 into 8-bit mode */
- if (unlikely(count)) {
- tmp = net2272_read(ep->dev, LOCCTL);
- net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
- writeb(*buf, ep_data);
- net2272_write(ep->dev, LOCCTL, tmp);
- }
- return length;
-}
-
-/* returns: 0: still running, 1: completed, negative: errno */
-static int
-net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
-{
- u8 *buf;
- unsigned count, max;
- int status;
-
- dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
- ep->ep.name, req->req.actual, req->req.length);
-
- /*
- * Keep loading the endpoint until the final packet is loaded,
- * or the endpoint buffer is full.
- */
- top:
- /*
- * Clear interrupt status
- * - Packet Transmitted interrupt will become set again when the
- * host successfully takes another packet
- */
- net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
- while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
- buf = req->req.buf + req->req.actual;
- prefetch(buf);
-
- /* force pagesel */
- net2272_ep_read(ep, EP_STAT0);
-
- max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
- (net2272_ep_read(ep, EP_AVAIL0));
-
- if (max < ep->ep.maxpacket)
- max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
- | (net2272_ep_read(ep, EP_AVAIL0));
-
- count = net2272_write_packet(ep, buf, req, max);
- /* see if we are done */
- if (req->req.length == req->req.actual) {
- /* validate short or zlp packet */
- if (count < ep->ep.maxpacket)
- set_fifo_bytecount(ep, 0);
- net2272_done(ep, req, 0);
-
- if (!list_empty(&ep->queue)) {
- req = list_entry(ep->queue.next,
- struct net2272_request,
- queue);
- status = net2272_kick_dma(ep, req);
-
- if (status < 0)
- if ((net2272_ep_read(ep, EP_STAT0)
- & (1 << BUFFER_EMPTY)))
- goto top;
- }
- return 1;
- }
- net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
- }
- return 0;
-}
-
-static void
-net2272_out_flush(struct net2272_ep *ep)
-{
- ASSERT_OUT_NAKING(ep);
-
- net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
- | (1 << DATA_PACKET_RECEIVED_INTERRUPT));
- net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
-}
-
-static int
-net2272_read_packet(struct net2272_ep *ep, u8 *buf,
- struct net2272_request *req, unsigned avail)
-{
- u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
- unsigned is_short;
- u16 *bufp;
-
- req->req.actual += avail;
-
- dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
- ep->ep.name, req, avail,
- (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
-
- is_short = (avail < ep->ep.maxpacket);
-
- if (unlikely(avail == 0)) {
- /* remove any zlp from the buffer */
- (void)readw(ep_data);
- return is_short;
- }
-
- /* Ensure we get the final byte */
- if (unlikely(avail % 2))
- avail++;
- bufp = (u16 *)buf;
-
- do {
- *bufp++ = readw(ep_data);
- avail -= 2;
- } while (avail);
-
- /*
- * To avoid false endpoint available race condition must read
- * ep stat0 twice in the case of a short transfer
- */
- if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
- net2272_ep_read(ep, EP_STAT0);
-
- return is_short;
-}
-
-static int
-net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
-{
- u8 *buf;
- unsigned is_short;
- int count;
- int tmp;
- int cleanup = 0;
-
- dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
- ep->ep.name, req->req.actual, req->req.length);
-
- top:
- do {
- buf = req->req.buf + req->req.actual;
- prefetchw(buf);
-
- count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
- | net2272_ep_read(ep, EP_AVAIL0);
-
- net2272_ep_write(ep, EP_STAT0,
- (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
- (1 << DATA_PACKET_RECEIVED_INTERRUPT));
-
- tmp = req->req.length - req->req.actual;
-
- if (count > tmp) {
- if ((tmp % ep->ep.maxpacket) != 0) {
- dev_err(ep->dev->dev,
- "%s out fifo %d bytes, expected %d\n",
- ep->ep.name, count, tmp);
- cleanup = 1;
- }
- count = (tmp > 0) ? tmp : 0;
- }
-
- is_short = net2272_read_packet(ep, buf, req, count);
-
- /* completion */
- if (unlikely(cleanup || is_short ||
- req->req.actual == req->req.length)) {
-
- if (cleanup) {
- net2272_out_flush(ep);
- net2272_done(ep, req, -EOVERFLOW);
- } else
- net2272_done(ep, req, 0);
-
- /* re-initialize endpoint transfer registers
- * otherwise they may result in erroneous pre-validation
- * for subsequent control reads
- */
- if (unlikely(ep->num == 0)) {
- net2272_ep_write(ep, EP_TRANSFER2, 0);
- net2272_ep_write(ep, EP_TRANSFER1, 0);
- net2272_ep_write(ep, EP_TRANSFER0, 0);
- }
-
- if (!list_empty(&ep->queue)) {
- int status;
-
- req = list_entry(ep->queue.next,
- struct net2272_request, queue);
- status = net2272_kick_dma(ep, req);
- if ((status < 0) &&
- !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
- goto top;
- }
- return 1;
- }
- } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
-
- return 0;
-}
-
-static void
-net2272_pio_advance(struct net2272_ep *ep)
-{
- struct net2272_request *req;
-
- if (unlikely(list_empty(&ep->queue)))
- return;
-
- req = list_entry(ep->queue.next, struct net2272_request, queue);
- (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
-}
-
-/* returns 0 on success, else negative errno */
-static int
-net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
- unsigned len, unsigned dir)
-{
- dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
- ep, buf, len, dir);
-
- /* The NET2272 only supports a single dma channel */
- if (dev->dma_busy)
- return -EBUSY;
- /*
- * EP_TRANSFER (used to determine the number of bytes received
- * in an OUT transfer) is 24 bits wide; don't ask for more than that.
- */
- if ((dir == 1) && (len > 0x1000000))
- return -EINVAL;
-
- dev->dma_busy = 1;
-
- /* initialize platform's dma */
-#ifdef CONFIG_USB_PCI
- /* NET2272 addr, buffer addr, length, etc. */
- switch (dev->dev_id) {
- case PCI_DEVICE_ID_RDK1:
- /* Setup PLX 9054 DMA mode */
- writel((1 << LOCAL_BUS_WIDTH) |
- (1 << TA_READY_INPUT_ENABLE) |
- (0 << LOCAL_BURST_ENABLE) |
- (1 << DONE_INTERRUPT_ENABLE) |
- (1 << LOCAL_ADDRESSING_MODE) |
- (1 << DEMAND_MODE) |
- (1 << DMA_EOT_ENABLE) |
- (1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
- (1 << DMA_CHANNEL_INTERRUPT_SELECT),
- dev->rdk1.plx9054_base_addr + DMAMODE0);
-
- writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
- writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
- writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
- writel((dir << DIRECTION_OF_TRANSFER) |
- (1 << INTERRUPT_AFTER_TERMINAL_COUNT),
- dev->rdk1.plx9054_base_addr + DMADPR0);
- writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
- readl(dev->rdk1.plx9054_base_addr + INTCSR),
- dev->rdk1.plx9054_base_addr + INTCSR);
-
- break;
- }
-#endif
-
- net2272_write(dev, DMAREQ,
- (0 << DMA_BUFFER_VALID) |
- (1 << DMA_REQUEST_ENABLE) |
- (1 << DMA_CONTROL_DACK) |
- (dev->dma_eot_polarity << EOT_POLARITY) |
- (dev->dma_dack_polarity << DACK_POLARITY) |
- (dev->dma_dreq_polarity << DREQ_POLARITY) |
- ((ep >> 1) << DMA_ENDPOINT_SELECT));
-
- (void) net2272_read(dev, SCRATCH);
-
- return 0;
-}
-
-static void
-net2272_start_dma(struct net2272 *dev)
-{
- /* start platform's dma controller */
-#ifdef CONFIG_USB_PCI
- switch (dev->dev_id) {
- case PCI_DEVICE_ID_RDK1:
- writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
- dev->rdk1.plx9054_base_addr + DMACSR0);
- break;
- }
-#endif
-}
-
-/* returns 0 on success, else negative errno */
-static int
-net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
-{
- unsigned size;
- u8 tmp;
-
- if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
- return -EINVAL;
-
- /* don't use dma for odd-length transfers
- * otherwise, we'd need to deal with the last byte with pio
- */
- if (req->req.length & 1)
- return -EINVAL;
-
- dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
- ep->ep.name, req, (unsigned long long) req->req.dma);
-
- net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
-
- /* The NET2272 can only use DMA on one endpoint at a time */
- if (ep->dev->dma_busy)
- return -EBUSY;
-
- /* Make sure we only DMA an even number of bytes (we'll use
- * pio to complete the transfer)
- */
- size = req->req.length;
- size &= ~1;
-
- /* device-to-host transfer */
- if (ep->is_in) {
- /* initialize platform's dma controller */
- if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
- /* unable to obtain DMA channel; return error and use pio mode */
- return -EBUSY;
- req->req.actual += size;
-
- /* host-to-device transfer */
- } else {
- tmp = net2272_ep_read(ep, EP_STAT0);
-
- /* initialize platform's dma controller */
- if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
- /* unable to obtain DMA channel; return error and use pio mode */
- return -EBUSY;
-
- if (!(tmp & (1 << BUFFER_EMPTY)))
- ep->not_empty = 1;
- else
- ep->not_empty = 0;
-
-
- /* allow the endpoint's buffer to fill */
- net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
-
- /* this transfer completed and data's already in the fifo
- * return error so pio gets used.
- */
- if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
-
- /* deassert dreq */
- net2272_write(ep->dev, DMAREQ,
- (0 << DMA_BUFFER_VALID) |
- (0 << DMA_REQUEST_ENABLE) |
- (1 << DMA_CONTROL_DACK) |
- (ep->dev->dma_eot_polarity << EOT_POLARITY) |
- (ep->dev->dma_dack_polarity << DACK_POLARITY) |
- (ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
- ((ep->num >> 1) << DMA_ENDPOINT_SELECT));
-
- return -EBUSY;
- }
- }
-
- /* Don't use per-packet interrupts: use dma interrupts only */
- net2272_ep_write(ep, EP_IRQENB, 0);
-
- net2272_start_dma(ep->dev);
-
- return 0;
-}
-
-static void net2272_cancel_dma(struct net2272 *dev)
-{
-#ifdef CONFIG_USB_PCI
- switch (dev->dev_id) {
- case PCI_DEVICE_ID_RDK1:
- writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
- writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
- while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
- (1 << CHANNEL_DONE)))
- continue; /* wait for dma to stabalize */
-
- /* dma abort generates an interrupt */
- writeb(1 << CHANNEL_CLEAR_INTERRUPT,
- dev->rdk1.plx9054_base_addr + DMACSR0);
- break;
- }
-#endif
-
- dev->dma_busy = 0;
-}
-
-/*---------------------------------------------------------------------------*/
-
-static int
-net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
-{
- struct net2272_request *req;
- struct net2272_ep *ep;
- struct net2272 *dev;
- unsigned long flags;
- int status = -1;
- u8 s;
-
- req = container_of(_req, struct net2272_request, req);
- if (!_req || !_req->complete || !_req->buf
- || !list_empty(&req->queue))
- return -EINVAL;
- ep = container_of(_ep, struct net2272_ep, ep);
- if (!_ep || (!ep->desc && ep->num != 0))
- return -EINVAL;
- dev = ep->dev;
- if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
-
- /* set up dma mapping in case the caller didn't */
- if (use_dma && ep->dma) {
- status = usb_gadget_map_request(&dev->gadget, _req,
- ep->is_in);
- if (status)
- return status;
- }
-
- dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
- _ep->name, _req, _req->length, _req->buf,
- (unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
-
- spin_lock_irqsave(&dev->lock, flags);
-
- _req->status = -EINPROGRESS;
- _req->actual = 0;
-
- /* kickstart this i/o queue? */
- if (list_empty(&ep->queue) && !ep->stopped) {
- /* maybe there's no control data, just status ack */
- if (ep->num == 0 && _req->length == 0) {
- net2272_done(ep, req, 0);
- dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
- goto done;
- }
-
- /* Return zlp, don't let it block subsequent packets */
- s = net2272_ep_read(ep, EP_STAT0);
- if (s & (1 << BUFFER_EMPTY)) {
- /* Buffer is empty check for a blocking zlp, handle it */
- if ((s & (1 << NAK_OUT_PACKETS)) &&
- net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
- dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
- /*
- * Request is going to terminate with a short packet ...
- * hope the client is ready for it!
- */
- status = net2272_read_fifo(ep, req);
- /* clear short packet naking */
- net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
- goto done;
- }
- }
-
- /* try dma first */
- status = net2272_kick_dma(ep, req);
-
- if (status < 0) {
- /* dma failed (most likely in use by another endpoint)
- * fallback to pio
- */
- status = 0;
-
- if (ep->is_in)
- status = net2272_write_fifo(ep, req);
- else {
- s = net2272_ep_read(ep, EP_STAT0);
- if ((s & (1 << BUFFER_EMPTY)) == 0)
- status = net2272_read_fifo(ep, req);
- }
-
- if (unlikely(status != 0)) {
- if (status > 0)
- status = 0;
- req = NULL;
- }
- }
- }
- if (likely(req))
- list_add_tail(&req->queue, &ep->queue);
-
- if (likely(!list_empty(&ep->queue)))
- net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
- done:
- spin_unlock_irqrestore(&dev->lock, flags);
-
- return 0;
-}
-
-/* dequeue ALL requests */
-static void
-net2272_dequeue_all(struct net2272_ep *ep)
-{
- struct net2272_request *req;
-
- /* called with spinlock held */
- ep->stopped = 1;
-
- while (!list_empty(&ep->queue)) {
- req = list_entry(ep->queue.next,
- struct net2272_request,
- queue);
- net2272_done(ep, req, -ESHUTDOWN);
- }
-}
-
-/* dequeue JUST ONE request */
-static int
-net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct net2272_ep *ep;
- struct net2272_request *req = NULL, *iter;
- unsigned long flags;
- int stopped;
-
- ep = container_of(_ep, struct net2272_ep, ep);
- if (!_ep || (!ep->desc && ep->num != 0) || !_req)
- return -EINVAL;
-
- spin_lock_irqsave(&ep->dev->lock, flags);
- stopped = ep->stopped;
- ep->stopped = 1;
-
- /* make sure it's still queued on this endpoint */
- list_for_each_entry(iter, &ep->queue, queue) {
- if (&iter->req != _req)
- continue;
- req = iter;
- break;
- }
- if (!req) {
- ep->stopped = stopped;
- spin_unlock_irqrestore(&ep->dev->lock, flags);
- return -EINVAL;
- }
-
- /* queue head may be partially complete */
- if (ep->queue.next == &req->queue) {
- dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
- net2272_done(ep, req, -ECONNRESET);
- }
- ep->stopped = stopped;
-
- spin_unlock_irqrestore(&ep->dev->lock, flags);
- return 0;
-}
-
-/*---------------------------------------------------------------------------*/
-
-static int
-net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
-{
- struct net2272_ep *ep;
- unsigned long flags;
- int ret = 0;
-
- ep = container_of(_ep, struct net2272_ep, ep);
- if (!_ep || (!ep->desc && ep->num != 0))
- return -EINVAL;
- if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
- if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
- return -EINVAL;
-
- spin_lock_irqsave(&ep->dev->lock, flags);
- if (!list_empty(&ep->queue))
- ret = -EAGAIN;
- else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
- ret = -EAGAIN;
- else {
- dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
- value ? "set" : "clear",
- wedged ? "wedge" : "halt");
- /* set/clear */
- if (value) {
- if (ep->num == 0)
- ep->dev->protocol_stall = 1;
- else
- set_halt(ep);
- if (wedged)
- ep->wedged = 1;
- } else {
- clear_halt(ep);
- ep->wedged = 0;
- }
- }
- spin_unlock_irqrestore(&ep->dev->lock, flags);
-
- return ret;
-}
-
-static int
-net2272_set_halt(struct usb_ep *_ep, int value)
-{
- return net2272_set_halt_and_wedge(_ep, value, 0);
-}
-
-static int
-net2272_set_wedge(struct usb_ep *_ep)
-{
- if (!_ep || _ep->name == ep0name)
- return -EINVAL;
- return net2272_set_halt_and_wedge(_ep, 1, 1);
-}
-
-static int
-net2272_fifo_status(struct usb_ep *_ep)
-{
- struct net2272_ep *ep;
- u16 avail;
-
- ep = container_of(_ep, struct net2272_ep, ep);
- if (!_ep || (!ep->desc && ep->num != 0))
- return -ENODEV;
- if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
-
- avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
- avail |= net2272_ep_read(ep, EP_AVAIL0);
- if (avail > ep->fifo_size)
- return -EOVERFLOW;
- if (ep->is_in)
- avail = ep->fifo_size - avail;
- return avail;
-}
-
-static void
-net2272_fifo_flush(struct usb_ep *_ep)
-{
- struct net2272_ep *ep;
-
- ep = container_of(_ep, struct net2272_ep, ep);
- if (!_ep || (!ep->desc && ep->num != 0))
- return;
- if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
- return;
-
- net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
-}
-
-static const struct usb_ep_ops net2272_ep_ops = {
- .enable = net2272_enable,
- .disable = net2272_disable,
-
- .alloc_request = net2272_alloc_request,
- .free_request = net2272_free_request,
-
- .queue = net2272_queue,
- .dequeue = net2272_dequeue,
-
- .set_halt = net2272_set_halt,
- .set_wedge = net2272_set_wedge,
- .fifo_status = net2272_fifo_status,
- .fifo_flush = net2272_fifo_flush,
-};
-
-/*---------------------------------------------------------------------------*/
-
-static int
-net2272_get_frame(struct usb_gadget *_gadget)
-{
- struct net2272 *dev;
- unsigned long flags;
- u16 ret;
-
- if (!_gadget)
- return -ENODEV;
- dev = container_of(_gadget, struct net2272, gadget);
- spin_lock_irqsave(&dev->lock, flags);
-
- ret = net2272_read(dev, FRAME1) << 8;
- ret |= net2272_read(dev, FRAME0);
-
- spin_unlock_irqrestore(&dev->lock, flags);
- return ret;
-}
-
-static int
-net2272_wakeup(struct usb_gadget *_gadget)
-{
- struct net2272 *dev;
- u8 tmp;
- unsigned long flags;
-
- if (!_gadget)
- return 0;
- dev = container_of(_gadget, struct net2272, gadget);
-
- spin_lock_irqsave(&dev->lock, flags);
- tmp = net2272_read(dev, USBCTL0);
- if (tmp & (1 << IO_WAKEUP_ENABLE))
- net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
-
- spin_unlock_irqrestore(&dev->lock, flags);
-
- return 0;
-}
-
-static int
-net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
-{
- if (!_gadget)
- return -ENODEV;
-
- _gadget->is_selfpowered = (value != 0);
-
- return 0;
-}
-
-static int
-net2272_pullup(struct usb_gadget *_gadget, int is_on)
-{
- struct net2272 *dev;
- u8 tmp;
- unsigned long flags;
-
- if (!_gadget)
- return -ENODEV;
- dev = container_of(_gadget, struct net2272, gadget);
-
- spin_lock_irqsave(&dev->lock, flags);
- tmp = net2272_read(dev, USBCTL0);
- dev->softconnect = (is_on != 0);
- if (is_on)
- tmp |= (1 << USB_DETECT_ENABLE);
- else
- tmp &= ~(1 << USB_DETECT_ENABLE);
- net2272_write(dev, USBCTL0, tmp);
- spin_unlock_irqrestore(&dev->lock, flags);
-
- return 0;
-}
-
-static int net2272_start(struct usb_gadget *_gadget,
- struct usb_gadget_driver *driver);
-static int net2272_stop(struct usb_gadget *_gadget);
-static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable);
-
-static const struct usb_gadget_ops net2272_ops = {
- .get_frame = net2272_get_frame,
- .wakeup = net2272_wakeup,
- .set_selfpowered = net2272_set_selfpowered,
- .pullup = net2272_pullup,
- .udc_start = net2272_start,
- .udc_stop = net2272_stop,
- .udc_async_callbacks = net2272_async_callbacks,
-};
-
-/*---------------------------------------------------------------------------*/
-
-static ssize_t
-registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
-{
- struct net2272 *dev;
- char *next;
- unsigned size, t;
- unsigned long flags;
- u8 t1, t2;
- int i;
- const char *s;
-
- dev = dev_get_drvdata(_dev);
- next = buf;
- size = PAGE_SIZE;
- spin_lock_irqsave(&dev->lock, flags);
-
- /* Main Control Registers */
- t = scnprintf(next, size, "%s version %s,"
- "chiprev %02x, locctl %02x\n"
- "irqenb0 %02x irqenb1 %02x "
- "irqstat0 %02x irqstat1 %02x\n",
- driver_name, driver_vers, dev->chiprev,
- net2272_read(dev, LOCCTL),
- net2272_read(dev, IRQENB0),
- net2272_read(dev, IRQENB1),
- net2272_read(dev, IRQSTAT0),
- net2272_read(dev, IRQSTAT1));
- size -= t;
- next += t;
-
- /* DMA */
- t1 = net2272_read(dev, DMAREQ);
- t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
- t1, ep_name[(t1 & 0x01) + 1],
- t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
- t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
- t1 & (1 << DMA_REQUEST) ? "req " : "",
- t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
- size -= t;
- next += t;
-
- /* USB Control Registers */
- t1 = net2272_read(dev, USBCTL1);
- if (t1 & (1 << VBUS_PIN)) {
- if (t1 & (1 << USB_HIGH_SPEED))
- s = "high speed";
- else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
- s = "powered";
- else
- s = "full speed";
- } else
- s = "not attached";
- t = scnprintf(next, size,
- "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
- net2272_read(dev, USBCTL0), t1,
- net2272_read(dev, OURADDR), s);
- size -= t;
- next += t;
-
- /* Endpoint Registers */
- for (i = 0; i < 4; ++i) {
- struct net2272_ep *ep;
-
- ep = &dev->ep[i];
- if (i && !ep->desc)
- continue;
-
- t1 = net2272_ep_read(ep, EP_CFG);
- t2 = net2272_ep_read(ep, EP_RSPSET);
- t = scnprintf(next, size,
- "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
- "irqenb %02x\n",
- ep->ep.name, t1, t2,
- (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
- (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
- (t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
- (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
- (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
- (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
- (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
- (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
- net2272_ep_read(ep, EP_IRQENB));
- size -= t;
- next += t;
-
- t = scnprintf(next, size,
- "\tstat0 %02x stat1 %02x avail %04x "
- "(ep%d%s-%s)%s\n",
- net2272_ep_read(ep, EP_STAT0),
- net2272_ep_read(ep, EP_STAT1),
- (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
- t1 & 0x0f,
- ep->is_in ? "in" : "out",
- type_string(t1 >> 5),
- ep->stopped ? "*" : "");
- size -= t;
- next += t;
-
- t = scnprintf(next, size,
- "\tep_transfer %06x\n",
- ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
- ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
- ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
- size -= t;
- next += t;
-
- t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
- t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
- t = scnprintf(next, size,
- "\tbuf-a %s buf-b %s\n",
- buf_state_string(t1),
- buf_state_string(t2));
- size -= t;
- next += t;
- }
-
- spin_unlock_irqrestore(&dev->lock, flags);
-
- return PAGE_SIZE - size;
-}
-static DEVICE_ATTR_RO(registers);
-
-/*---------------------------------------------------------------------------*/
-
-static void
-net2272_set_fifo_mode(struct net2272 *dev, int mode)
-{
- u8 tmp;
-
- tmp = net2272_read(dev, LOCCTL) & 0x3f;
- tmp |= (mode << 6);
- net2272_write(dev, LOCCTL, tmp);
-
- INIT_LIST_HEAD(&dev->gadget.ep_list);
-
- /* always ep-a, ep-c ... maybe not ep-b */
- list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
-
- switch (mode) {
- case 0:
- list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
- dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
- break;
- case 1:
- list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
- dev->ep[1].fifo_size = 1024;
- dev->ep[2].fifo_size = 512;
- break;
- case 2:
- list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
- dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
- break;
- case 3:
- dev->ep[1].fifo_size = 1024;
- break;
- }
-
- /* ep-c is always 2 512 byte buffers */
- list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
- dev->ep[3].fifo_size = 512;
-}
-
-/*---------------------------------------------------------------------------*/
-
-static void
-net2272_usb_reset(struct net2272 *dev)
-{
- dev->gadget.speed = USB_SPEED_UNKNOWN;
-
- net2272_cancel_dma(dev);
-
- net2272_write(dev, IRQENB0, 0);
- net2272_write(dev, IRQENB1, 0);
-
- /* clear irq state */
- net2272_write(dev, IRQSTAT0, 0xff);
- net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
-
- net2272_write(dev, DMAREQ,
- (0 << DMA_BUFFER_VALID) |
- (0 << DMA_REQUEST_ENABLE) |
- (1 << DMA_CONTROL_DACK) |
- (dev->dma_eot_polarity << EOT_POLARITY) |
- (dev->dma_dack_polarity << DACK_POLARITY) |
- (dev->dma_dreq_polarity << DREQ_POLARITY) |
- ((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
-
- net2272_cancel_dma(dev);
- net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
-
- /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
- * note that the higher level gadget drivers are expected to convert data to little endian.
- * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
- */
- net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
- net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
-}
-
-static void
-net2272_usb_reinit(struct net2272 *dev)
-{
- int i;
-
- /* basic endpoint init */
- for (i = 0; i < 4; ++i) {
- struct net2272_ep *ep = &dev->ep[i];
-
- ep->ep.name = ep_name[i];
- ep->dev = dev;
- ep->num = i;
- ep->not_empty = 0;
-
- if (use_dma && ep->num == dma_ep)
- ep->dma = 1;
-
- if (i > 0 && i <= 3)
- ep->fifo_size = 512;
- else
- ep->fifo_size = 64;
- net2272_ep_reset(ep);
-
- if (i == 0) {
- ep->ep.caps.type_control = true;
- } else {
- ep->ep.caps.type_iso = true;
- ep->ep.caps.type_bulk = true;
- ep->ep.caps.type_int = true;
- }
-
- ep->ep.caps.dir_in = true;
- ep->ep.caps.dir_out = true;
- }
- usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
-
- dev->gadget.ep0 = &dev->ep[0].ep;
- dev->ep[0].stopped = 0;
- INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
-}
-
-static void
-net2272_ep0_start(struct net2272 *dev)
-{
- struct net2272_ep *ep0 = &dev->ep[0];
-
- net2272_ep_write(ep0, EP_RSPSET,
- (1 << NAK_OUT_PACKETS_MODE) |
- (1 << ALT_NAK_OUT_PACKETS));
- net2272_ep_write(ep0, EP_RSPCLR,
- (1 << HIDE_STATUS_PHASE) |
- (1 << CONTROL_STATUS_PHASE_HANDSHAKE));
- net2272_write(dev, USBCTL0,
- (dev->softconnect << USB_DETECT_ENABLE) |
- (1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
- (1 << IO_WAKEUP_ENABLE));
- net2272_write(dev, IRQENB0,
- (1 << SETUP_PACKET_INTERRUPT_ENABLE) |
- (1 << ENDPOINT_0_INTERRUPT_ENABLE) |
- (1 << DMA_DONE_INTERRUPT_ENABLE));
- net2272_write(dev, IRQENB1,
- (1 << VBUS_INTERRUPT_ENABLE) |
- (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
- (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
-}
-
-/* when a driver is successfully registered, it will receive
- * control requests including set_configuration(), which enables
- * non-control requests. then usb traffic follows until a
- * disconnect is reported. then a host may connect again, or
- * the driver might get unbound.
- */
-static int net2272_start(struct usb_gadget *_gadget,
- struct usb_gadget_driver *driver)
-{
- struct net2272 *dev;
- unsigned i;
-
- if (!driver || !driver->setup ||
- driver->max_speed != USB_SPEED_HIGH)
- return -EINVAL;
-
- dev = container_of(_gadget, struct net2272, gadget);
-
- for (i = 0; i < 4; ++i)
- dev->ep[i].irqs = 0;
- /* hook up the driver ... */
- dev->softconnect = 1;
- dev->driver = driver;
-
- /* ... then enable host detection and ep0; and we're ready
- * for set_configuration as well as eventual disconnect.
- */
- net2272_ep0_start(dev);
-
- return 0;
-}
-
-static void
-stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
-{
- int i;
-
- /* don't disconnect if it's not connected */
- if (dev->gadget.speed == USB_SPEED_UNKNOWN)
- driver = NULL;
-
- /* stop hardware; prevent new request submissions;
- * and kill any outstanding requests.
- */
- net2272_usb_reset(dev);
- for (i = 0; i < 4; ++i)
- net2272_dequeue_all(&dev->ep[i]);
-
- /* report disconnect; the driver is already quiesced */
- if (dev->async_callbacks && driver) {
- spin_unlock(&dev->lock);
- driver->disconnect(&dev->gadget);
- spin_lock(&dev->lock);
- }
-
- net2272_usb_reinit(dev);
-}
-
-static int net2272_stop(struct usb_gadget *_gadget)
-{
- struct net2272 *dev;
- unsigned long flags;
-
- dev = container_of(_gadget, struct net2272, gadget);
-
- spin_lock_irqsave(&dev->lock, flags);
- stop_activity(dev, NULL);
- spin_unlock_irqrestore(&dev->lock, flags);
-
- dev->driver = NULL;
-
- return 0;
-}
-
-static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable)
-{
- struct net2272 *dev = container_of(_gadget, struct net2272, gadget);
-
- spin_lock_irq(&dev->lock);
- dev->async_callbacks = enable;
- spin_unlock_irq(&dev->lock);
-}
-
-/*---------------------------------------------------------------------------*/
-/* handle ep-a/ep-b dma completions */
-static void
-net2272_handle_dma(struct net2272_ep *ep)
-{
- struct net2272_request *req;
- unsigned len;
- int status;
-
- if (!list_empty(&ep->queue))
- req = list_entry(ep->queue.next,
- struct net2272_request, queue);
- else
- req = NULL;
-
- dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
-
- /* Ensure DREQ is de-asserted */
- net2272_write(ep->dev, DMAREQ,
- (0 << DMA_BUFFER_VALID)
- | (0 << DMA_REQUEST_ENABLE)
- | (1 << DMA_CONTROL_DACK)
- | (ep->dev->dma_eot_polarity << EOT_POLARITY)
- | (ep->dev->dma_dack_polarity << DACK_POLARITY)
- | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
- | (ep->dma << DMA_ENDPOINT_SELECT));
-
- ep->dev->dma_busy = 0;
-
- net2272_ep_write(ep, EP_IRQENB,
- (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
- | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
- | net2272_ep_read(ep, EP_IRQENB));
-
- /* device-to-host transfer completed */
- if (ep->is_in) {
- /* validate a short packet or zlp if necessary */
- if ((req->req.length % ep->ep.maxpacket != 0) ||
- req->req.zero)
- set_fifo_bytecount(ep, 0);
-
- net2272_done(ep, req, 0);
- if (!list_empty(&ep->queue)) {
- req = list_entry(ep->queue.next,
- struct net2272_request, queue);
- status = net2272_kick_dma(ep, req);
- if (status < 0)
- net2272_pio_advance(ep);
- }
-
- /* host-to-device transfer completed */
- } else {
- /* terminated with a short packet? */
- if (net2272_read(ep->dev, IRQSTAT0) &
- (1 << DMA_DONE_INTERRUPT)) {
- /* abort system dma */
- net2272_cancel_dma(ep->dev);
- }
-
- /* EP_TRANSFER will contain the number of bytes
- * actually received.
- * NOTE: There is no overflow detection on EP_TRANSFER:
- * We can't deal with transfers larger than 2^24 bytes!
- */
- len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
- | (net2272_ep_read(ep, EP_TRANSFER1) << 8)
- | (net2272_ep_read(ep, EP_TRANSFER0));
-
- if (ep->not_empty)
- len += 4;
-
- req->req.actual += len;
-
- /* get any remaining data */
- net2272_pio_advance(ep);
- }
-}
-
-/*---------------------------------------------------------------------------*/
-
-static void
-net2272_handle_ep(struct net2272_ep *ep)
-{
- struct net2272_request *req;
- u8 stat0, stat1;
-
- if (!list_empty(&ep->queue))
- req = list_entry(ep->queue.next,
- struct net2272_request, queue);
- else
- req = NULL;
-
- /* ack all, and handle what we care about */
- stat0 = net2272_ep_read(ep, EP_STAT0);
- stat1 = net2272_ep_read(ep, EP_STAT1);
- ep->irqs++;
-
- dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
- ep->ep.name, stat0, stat1, req ? &req->req : NULL);
-
- net2272_ep_write(ep, EP_STAT0, stat0 &
- ~((1 << NAK_OUT_PACKETS)
- | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
- net2272_ep_write(ep, EP_STAT1, stat1);
-
- /* data packet(s) received (in the fifo, OUT)
- * direction must be validated, otherwise control read status phase
- * could be interpreted as a valid packet
- */
- if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
- net2272_pio_advance(ep);
- /* data packet(s) transmitted (IN) */
- else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
- net2272_pio_advance(ep);
-}
-
-static struct net2272_ep *
-net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
-{
- struct net2272_ep *ep;
-
- if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
- return &dev->ep[0];
-
- list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
- u8 bEndpointAddress;
-
- if (!ep->desc)
- continue;
- bEndpointAddress = ep->desc->bEndpointAddress;
- if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
- continue;
- if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
- return ep;
- }
- return NULL;
-}
-
-/*
- * USB Test Packet:
- * JKJKJKJK * 9
- * JJKKJJKK * 8
- * JJJJKKKK * 8
- * JJJJJJJKKKKKKK * 8
- * JJJJJJJK * 8
- * {JKKKKKKK * 10}, JK
- */
-static const u8 net2272_test_packet[] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
- 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
- 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
- 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
-};
-
-static void
-net2272_set_test_mode(struct net2272 *dev, int mode)
-{
- int i;
-
- /* Disable all net2272 interrupts:
- * Nothing but a power cycle should stop the test.
- */
- net2272_write(dev, IRQENB0, 0x00);
- net2272_write(dev, IRQENB1, 0x00);
-
- /* Force tranceiver to high-speed */
- net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
-
- net2272_write(dev, PAGESEL, 0);
- net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
- net2272_write(dev, EP_RSPCLR,
- (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
- | (1 << HIDE_STATUS_PHASE));
- net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
- net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
-
- /* wait for status phase to complete */
- while (!(net2272_read(dev, EP_STAT0) &
- (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
- ;
-
- /* Enable test mode */
- net2272_write(dev, USBTEST, mode);
-
- /* load test packet */
- if (mode == USB_TEST_PACKET) {
- /* switch to 8 bit mode */
- net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
- ~(1 << DATA_WIDTH));
-
- for (i = 0; i < sizeof(net2272_test_packet); ++i)
- net2272_write(dev, EP_DATA, net2272_test_packet[i]);
-
- /* Validate test packet */
- net2272_write(dev, EP_TRANSFER0, 0);
- }
-}
-
-static void
-net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
-{
- struct net2272_ep *ep;
- u8 num, scratch;
-
- /* starting a control request? */
- if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
- union {
- u8 raw[8];
- struct usb_ctrlrequest r;
- } u;
- int tmp = 0;
- struct net2272_request *req;
-
- if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
- if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
- dev->gadget.speed = USB_SPEED_HIGH;
- else
- dev->gadget.speed = USB_SPEED_FULL;
- dev_dbg(dev->dev, "%s\n",
- usb_speed_string(dev->gadget.speed));
- }
-
- ep = &dev->ep[0];
- ep->irqs++;
-
- /* make sure any leftover interrupt state is cleared */
- stat &= ~(1 << ENDPOINT_0_INTERRUPT);
- while (!list_empty(&ep->queue)) {
- req = list_entry(ep->queue.next,
- struct net2272_request, queue);
- net2272_done(ep, req,
- (req->req.actual == req->req.length) ? 0 : -EPROTO);
- }
- ep->stopped = 0;
- dev->protocol_stall = 0;
- net2272_ep_write(ep, EP_STAT0,
- (1 << DATA_IN_TOKEN_INTERRUPT)
- | (1 << DATA_OUT_TOKEN_INTERRUPT)
- | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
- | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
- | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
- net2272_ep_write(ep, EP_STAT1,
- (1 << TIMEOUT)
- | (1 << USB_OUT_ACK_SENT)
- | (1 << USB_OUT_NAK_SENT)
- | (1 << USB_IN_ACK_RCVD)
- | (1 << USB_IN_NAK_SENT)
- | (1 << USB_STALL_SENT)
- | (1 << LOCAL_OUT_ZLP));
-
- /*
- * Ensure Control Read pre-validation setting is beyond maximum size
- * - Control Writes can leave non-zero values in EP_TRANSFER. If
- * an EP0 transfer following the Control Write is a Control Read,
- * the NET2272 sees the non-zero EP_TRANSFER as an unexpected
- * pre-validation count.
- * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
- * the pre-validation count cannot cause an unexpected validatation
- */
- net2272_write(dev, PAGESEL, 0);
- net2272_write(dev, EP_TRANSFER2, 0xff);
- net2272_write(dev, EP_TRANSFER1, 0xff);
- net2272_write(dev, EP_TRANSFER0, 0xff);
-
- u.raw[0] = net2272_read(dev, SETUP0);
- u.raw[1] = net2272_read(dev, SETUP1);
- u.raw[2] = net2272_read(dev, SETUP2);
- u.raw[3] = net2272_read(dev, SETUP3);
- u.raw[4] = net2272_read(dev, SETUP4);
- u.raw[5] = net2272_read(dev, SETUP5);
- u.raw[6] = net2272_read(dev, SETUP6);
- u.raw[7] = net2272_read(dev, SETUP7);
- /*
- * If you have a big endian cpu make sure le16_to_cpus
- * performs the proper byte swapping here...
- */
- le16_to_cpus(&u.r.wValue);
- le16_to_cpus(&u.r.wIndex);
- le16_to_cpus(&u.r.wLength);
-
- /* ack the irq */
- net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
- stat ^= (1 << SETUP_PACKET_INTERRUPT);
-
- /* watch control traffic at the token level, and force
- * synchronization before letting the status phase happen.
- */
- ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
- if (ep->is_in) {
- scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
- | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
- | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
- stop_out_naking(ep);
- } else
- scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
- | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
- | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
- net2272_ep_write(ep, EP_IRQENB, scratch);
-
- if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
- goto delegate;
- switch (u.r.bRequest) {
- case USB_REQ_GET_STATUS: {
- struct net2272_ep *e;
- u16 status = 0;
-
- switch (u.r.bRequestType & USB_RECIP_MASK) {
- case USB_RECIP_ENDPOINT:
- e = net2272_get_ep_by_addr(dev, u.r.wIndex);
- if (!e || u.r.wLength > 2)
- goto do_stall;
- if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
- status = cpu_to_le16(1);
- else
- status = cpu_to_le16(0);
-
- /* don't bother with a request object! */
- net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
- writew(status, net2272_reg_addr(dev, EP_DATA));
- set_fifo_bytecount(&dev->ep[0], 0);
- allow_status(ep);
- dev_vdbg(dev->dev, "%s stat %02x\n",
- ep->ep.name, status);
- goto next_endpoints;
- case USB_RECIP_DEVICE:
- if (u.r.wLength > 2)
- goto do_stall;
- if (dev->gadget.is_selfpowered)
- status = (1 << USB_DEVICE_SELF_POWERED);
-
- /* don't bother with a request object! */
- net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
- writew(status, net2272_reg_addr(dev, EP_DATA));
- set_fifo_bytecount(&dev->ep[0], 0);
- allow_status(ep);
- dev_vdbg(dev->dev, "device stat %02x\n", status);
- goto next_endpoints;
- case USB_RECIP_INTERFACE:
- if (u.r.wLength > 2)
- goto do_stall;
-
- /* don't bother with a request object! */
- net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
- writew(status, net2272_reg_addr(dev, EP_DATA));
- set_fifo_bytecount(&dev->ep[0], 0);
- allow_status(ep);
- dev_vdbg(dev->dev, "interface status %02x\n", status);
- goto next_endpoints;
- }
-
- break;
- }
- case USB_REQ_CLEAR_FEATURE: {
- struct net2272_ep *e;
-
- if (u.r.bRequestType != USB_RECIP_ENDPOINT)
- goto delegate;
- if (u.r.wValue != USB_ENDPOINT_HALT ||
- u.r.wLength != 0)
- goto do_stall;
- e = net2272_get_ep_by_addr(dev, u.r.wIndex);
- if (!e)
- goto do_stall;
- if (e->wedged) {
- dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
- ep->ep.name);
- } else {
- dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
- clear_halt(e);
- }
- allow_status(ep);
- goto next_endpoints;
- }
- case USB_REQ_SET_FEATURE: {
- struct net2272_ep *e;
-
- if (u.r.bRequestType == USB_RECIP_DEVICE) {
- if (u.r.wIndex != NORMAL_OPERATION)
- net2272_set_test_mode(dev, (u.r.wIndex >> 8));
- allow_status(ep);
- dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
- goto next_endpoints;
- } else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
- goto delegate;
- if (u.r.wValue != USB_ENDPOINT_HALT ||
- u.r.wLength != 0)
- goto do_stall;
- e = net2272_get_ep_by_addr(dev, u.r.wIndex);
- if (!e)
- goto do_stall;
- set_halt(e);
- allow_status(ep);
- dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
- goto next_endpoints;
- }
- case USB_REQ_SET_ADDRESS: {
- net2272_write(dev, OURADDR, u.r.wValue & 0xff);
- allow_status(ep);
- break;
- }
- default:
- delegate:
- dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
- "ep_cfg %08x\n",
- u.r.bRequestType, u.r.bRequest,
- u.r.wValue, u.r.wIndex,
- net2272_ep_read(ep, EP_CFG));
- if (dev->async_callbacks) {
- spin_unlock(&dev->lock);
- tmp = dev->driver->setup(&dev->gadget, &u.r);
- spin_lock(&dev->lock);
- }
- }
-
- /* stall ep0 on error */
- if (tmp < 0) {
- do_stall:
- dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
- u.r.bRequestType, u.r.bRequest, tmp);
- dev->protocol_stall = 1;
- }
- /* endpoint dma irq? */
- } else if (stat & (1 << DMA_DONE_INTERRUPT)) {
- net2272_cancel_dma(dev);
- net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
- stat &= ~(1 << DMA_DONE_INTERRUPT);
- num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
- ? 2 : 1;
-
- ep = &dev->ep[num];
- net2272_handle_dma(ep);
- }
-
- next_endpoints:
- /* endpoint data irq? */
- scratch = stat & 0x0f;
- stat &= ~0x0f;
- for (num = 0; scratch; num++) {
- u8 t;
-
- /* does this endpoint's FIFO and queue need tending? */
- t = 1 << num;
- if ((scratch & t) == 0)
- continue;
- scratch ^= t;
-
- ep = &dev->ep[num];
- net2272_handle_ep(ep);
- }
-
- /* some interrupts we can just ignore */
- stat &= ~(1 << SOF_INTERRUPT);
-
- if (stat)
- dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
-}
-
-static void
-net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
-{
- u8 tmp, mask;
-
- /* after disconnect there's nothing else to do! */
- tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
- mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
-
- if (stat & tmp) {
- bool reset = false;
- bool disconnect = false;
-
- /*
- * Ignore disconnects and resets if the speed hasn't been set.
- * VBUS can bounce and there's always an initial reset.
- */
- net2272_write(dev, IRQSTAT1, tmp);
- if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
- if ((stat & (1 << VBUS_INTERRUPT)) &&
- (net2272_read(dev, USBCTL1) &
- (1 << VBUS_PIN)) == 0) {
- disconnect = true;
- dev_dbg(dev->dev, "disconnect %s\n",
- dev->driver->driver.name);
- } else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
- (net2272_read(dev, USBCTL1) & mask)
- == 0) {
- reset = true;
- dev_dbg(dev->dev, "reset %s\n",
- dev->driver->driver.name);
- }
-
- if (disconnect || reset) {
- stop_activity(dev, dev->driver);
- net2272_ep0_start(dev);
- if (dev->async_callbacks) {
- spin_unlock(&dev->lock);
- if (reset)
- usb_gadget_udc_reset(&dev->gadget, dev->driver);
- else
- (dev->driver->disconnect)(&dev->gadget);
- spin_lock(&dev->lock);
- }
- return;
- }
- }
- stat &= ~tmp;
-
- if (!stat)
- return;
- }
-
- tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
- if (stat & tmp) {
- net2272_write(dev, IRQSTAT1, tmp);
- if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
- if (dev->async_callbacks && dev->driver->suspend)
- dev->driver->suspend(&dev->gadget);
- if (!enable_suspend) {
- stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
- dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
- }
- } else {
- if (dev->async_callbacks && dev->driver->resume)
- dev->driver->resume(&dev->gadget);
- }
- stat &= ~tmp;
- }
-
- /* clear any other status/irqs */
- if (stat)
- net2272_write(dev, IRQSTAT1, stat);
-
- /* some status we can just ignore */
- stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
- | (1 << SUSPEND_REQUEST_INTERRUPT)
- | (1 << RESUME_INTERRUPT));
- if (!stat)
- return;
- else
- dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
-}
-
-static irqreturn_t net2272_irq(int irq, void *_dev)
-{
- struct net2272 *dev = _dev;
-#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
- u32 intcsr;
-#endif
-#if defined(PLX_PCI_RDK)
- u8 dmareq;
-#endif
- spin_lock(&dev->lock);
-#if defined(PLX_PCI_RDK)
- intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
-
- if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
- writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
- dev->rdk1.plx9054_base_addr + INTCSR);
- net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
- net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
- intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
- writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
- dev->rdk1.plx9054_base_addr + INTCSR);
- }
- if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
- writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
- dev->rdk1.plx9054_base_addr + DMACSR0);
-
- dmareq = net2272_read(dev, DMAREQ);
- if (dmareq & 0x01)
- net2272_handle_dma(&dev->ep[2]);
- else
- net2272_handle_dma(&dev->ep[1]);
- }
-#endif
-#if defined(PLX_PCI_RDK2)
- /* see if PCI int for us by checking irqstat */
- intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
- if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
- spin_unlock(&dev->lock);
- return IRQ_NONE;
- }
- /* check dma interrupts */
-#endif
- /* Platform/device interrupt handler */
-#if !defined(PLX_PCI_RDK)
- net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
- net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
-#endif
- spin_unlock(&dev->lock);
-
- return IRQ_HANDLED;
-}
-
-static int net2272_present(struct net2272 *dev)
-{
- /*
- * Quick test to see if CPU can communicate properly with the NET2272.
- * Verifies connection using writes and reads to write/read and
- * read-only registers.
- *
- * This routine is strongly recommended especially during early bring-up
- * of new hardware, however for designs that do not apply Power On System
- * Tests (POST) it may discarded (or perhaps minimized).
- */
- unsigned int ii;
- u8 val, refval;
-
- /* Verify NET2272 write/read SCRATCH register can write and read */
- refval = net2272_read(dev, SCRATCH);
- for (ii = 0; ii < 0x100; ii += 7) {
- net2272_write(dev, SCRATCH, ii);
- val = net2272_read(dev, SCRATCH);
- if (val != ii) {
- dev_dbg(dev->dev,
- "%s: write/read SCRATCH register test failed: "
- "wrote:0x%2.2x, read:0x%2.2x\n",
- __func__, ii, val);
- return -EINVAL;
- }
- }
- /* To be nice, we write the original SCRATCH value back: */
- net2272_write(dev, SCRATCH, refval);
-
- /* Verify NET2272 CHIPREV register is read-only: */
- refval = net2272_read(dev, CHIPREV_2272);
- for (ii = 0; ii < 0x100; ii += 7) {
- net2272_write(dev, CHIPREV_2272, ii);
- val = net2272_read(dev, CHIPREV_2272);
- if (val != refval) {
- dev_dbg(dev->dev,
- "%s: write/read CHIPREV register test failed: "
- "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
- __func__, ii, val, refval);
- return -EINVAL;
- }
- }
-
- /*
- * Verify NET2272's "NET2270 legacy revision" register
- * - NET2272 has two revision registers. The NET2270 legacy revision
- * register should read the same value, regardless of the NET2272
- * silicon revision. The legacy register applies to NET2270
- * firmware being applied to the NET2272.
- */
- val = net2272_read(dev, CHIPREV_LEGACY);
- if (val != NET2270_LEGACY_REV) {
- /*
- * Unexpected legacy revision value
- * - Perhaps the chip is a NET2270?
- */
- dev_dbg(dev->dev,
- "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
- " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
- __func__, NET2270_LEGACY_REV, val);
- return -EINVAL;
- }
-
- /*
- * Verify NET2272 silicon revision
- * - This revision register is appropriate for the silicon version
- * of the NET2272
- */
- val = net2272_read(dev, CHIPREV_2272);
- switch (val) {
- case CHIPREV_NET2272_R1:
- /*
- * NET2272 Rev 1 has DMA related errata:
- * - Newer silicon (Rev 1A or better) required
- */
- dev_dbg(dev->dev,
- "%s: Rev 1 detected: newer silicon recommended for DMA support\n",
- __func__);
- break;
- case CHIPREV_NET2272_R1A:
- break;
- default:
- /* NET2272 silicon version *may* not work with this firmware */
- dev_dbg(dev->dev,
- "%s: unexpected silicon revision register value: "
- " CHIPREV_2272: 0x%2.2x\n",
- __func__, val);
- /*
- * Return Success, even though the chip rev is not an expected value
- * - Older, pre-built firmware can attempt to operate on newer silicon
- * - Often, new silicon is perfectly compatible
- */
- }
-
- /* Success: NET2272 checks out OK */
- return 0;
-}
-
-static void
-net2272_gadget_release(struct device *_dev)
-{
- struct net2272 *dev = container_of(_dev, struct net2272, gadget.dev);
-
- kfree(dev);
-}
-
-/*---------------------------------------------------------------------------*/
-
-static void
-net2272_remove(struct net2272 *dev)
-{
- if (dev->added)
- usb_del_gadget(&dev->gadget);
- free_irq(dev->irq, dev);
- iounmap(dev->base_addr);
- device_remove_file(dev->dev, &dev_attr_registers);
-
- dev_info(dev->dev, "unbind\n");
-}
-
-static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
-{
- struct net2272 *ret;
-
- if (!irq) {
- dev_dbg(dev, "No IRQ!\n");
- return ERR_PTR(-ENODEV);
- }
-
- /* alloc, and start init */
- ret = kzalloc(sizeof(*ret), GFP_KERNEL);
- if (!ret)
- return ERR_PTR(-ENOMEM);
-
- spin_lock_init(&ret->lock);
- ret->irq = irq;
- ret->dev = dev;
- ret->gadget.ops = &net2272_ops;
- ret->gadget.max_speed = USB_SPEED_HIGH;
-
- /* the "gadget" abstracts/virtualizes the controller */
- ret->gadget.name = driver_name;
- usb_initialize_gadget(dev, &ret->gadget, net2272_gadget_release);
-
- return ret;
-}
-
-static int
-net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
-{
- int ret;
-
- /* See if there... */
- if (net2272_present(dev)) {
- dev_warn(dev->dev, "2272 not found!\n");
- ret = -ENODEV;
- goto err;
- }
-
- net2272_usb_reset(dev);
- net2272_usb_reinit(dev);
-
- ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
- if (ret) {
- dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
- goto err;
- }
-
- dev->chiprev = net2272_read(dev, CHIPREV_2272);
-
- /* done */
- dev_info(dev->dev, "%s\n", driver_desc);
- dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
- dev->irq, dev->base_addr, dev->chiprev,
- dma_mode_string());
- dev_info(dev->dev, "version: %s\n", driver_vers);
-
- ret = device_create_file(dev->dev, &dev_attr_registers);
- if (ret)
- goto err_irq;
-
- ret = usb_add_gadget(&dev->gadget);
- if (ret)
- goto err_add_udc;
- dev->added = 1;
-
- return 0;
-
-err_add_udc:
- device_remove_file(dev->dev, &dev_attr_registers);
- err_irq:
- free_irq(dev->irq, dev);
- err:
- return ret;
-}
-
-#ifdef CONFIG_USB_PCI
-
-/*
- * wrap this driver around the specified device, but
- * don't respond over USB until a gadget driver binds to us
- */
-
-static int
-net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
-{
- unsigned long resource, len, tmp;
- void __iomem *mem_mapped_addr[4];
- int ret, i;
-
- /*
- * BAR 0 holds PLX 9054 config registers
- * BAR 1 is i/o memory; unused here
- * BAR 2 holds EPLD config registers
- * BAR 3 holds NET2272 registers
- */
-
- /* Find and map all address spaces */
- for (i = 0; i < 4; ++i) {
- if (i == 1)
- continue; /* BAR1 unused */
-
- resource = pci_resource_start(pdev, i);
- len = pci_resource_len(pdev, i);
-
- if (!request_mem_region(resource, len, driver_name)) {
- dev_dbg(dev->dev, "controller already in use\n");
- ret = -EBUSY;
- goto err;
- }
-
- mem_mapped_addr[i] = ioremap(resource, len);
- if (mem_mapped_addr[i] == NULL) {
- release_mem_region(resource, len);
- dev_dbg(dev->dev, "can't map memory\n");
- ret = -EFAULT;
- goto err;
- }
- }
-
- dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
- dev->rdk1.epld_base_addr = mem_mapped_addr[2];
- dev->base_addr = mem_mapped_addr[3];
-
- /* Set PLX 9054 bus width (16 bits) */
- tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
- writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
- dev->rdk1.plx9054_base_addr + LBRD1);
-
- /* Enable PLX 9054 Interrupts */
- writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
- (1 << PCI_INTERRUPT_ENABLE) |
- (1 << LOCAL_INTERRUPT_INPUT_ENABLE),
- dev->rdk1.plx9054_base_addr + INTCSR);
-
- writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
- dev->rdk1.plx9054_base_addr + DMACSR0);
-
- /* reset */
- writeb((1 << EPLD_DMA_ENABLE) |
- (1 << DMA_CTL_DACK) |
- (1 << DMA_TIMEOUT_ENABLE) |
- (1 << USER) |
- (0 << MPX_MODE) |
- (1 << BUSWIDTH) |
- (1 << NET2272_RESET),
- dev->base_addr + EPLD_IO_CONTROL_REGISTER);
-
- mb();
- writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
- ~(1 << NET2272_RESET),
- dev->base_addr + EPLD_IO_CONTROL_REGISTER);
- udelay(200);
-
- return 0;
-
- err:
- while (--i >= 0) {
- if (i == 1)
- continue; /* BAR1 unused */
- iounmap(mem_mapped_addr[i]);
- release_mem_region(pci_resource_start(pdev, i),
- pci_resource_len(pdev, i));
- }
-
- return ret;
-}
-
-static int
-net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
-{
- unsigned long resource, len;
- void __iomem *mem_mapped_addr[2];
- int ret, i;
-
- /*
- * BAR 0 holds FGPA config registers
- * BAR 1 holds NET2272 registers
- */
-
- /* Find and map all address spaces, bar2-3 unused in rdk 2 */
- for (i = 0; i < 2; ++i) {
- resource = pci_resource_start(pdev, i);
- len = pci_resource_len(pdev, i);
-
- if (!request_mem_region(resource, len, driver_name)) {
- dev_dbg(dev->dev, "controller already in use\n");
- ret = -EBUSY;
- goto err;
- }
-
- mem_mapped_addr[i] = ioremap(resource, len);
- if (mem_mapped_addr[i] == NULL) {
- release_mem_region(resource, len);
- dev_dbg(dev->dev, "can't map memory\n");
- ret = -EFAULT;
- goto err;
- }
- }
-
- dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
- dev->base_addr = mem_mapped_addr[1];
-
- mb();
- /* Set 2272 bus width (16 bits) and reset */
- writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
- udelay(200);
- writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
- /* Print fpga version number */
- dev_info(dev->dev, "RDK2 FPGA version %08x\n",
- readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
- /* Enable FPGA Interrupts */
- writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
-
- return 0;
-
- err:
- while (--i >= 0) {
- iounmap(mem_mapped_addr[i]);
- release_mem_region(pci_resource_start(pdev, i),
- pci_resource_len(pdev, i));
- }
-
- return ret;
-}
-
-static int
-net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct net2272 *dev;
- int ret;
-
- dev = net2272_probe_init(&pdev->dev, pdev->irq);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
- dev->dev_id = pdev->device;
-
- if (pci_enable_device(pdev) < 0) {
- ret = -ENODEV;
- goto err_put;
- }
-
- pci_set_master(pdev);
-
- switch (pdev->device) {
- case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
- case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
- default: BUG();
- }
- if (ret)
- goto err_pci;
-
- ret = net2272_probe_fin(dev, 0);
- if (ret)
- goto err_pci;
-
- pci_set_drvdata(pdev, dev);
-
- return 0;
-
- err_pci:
- pci_disable_device(pdev);
- err_put:
- usb_put_gadget(&dev->gadget);
-
- return ret;
-}
-
-static void
-net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
-{
- int i;
-
- /* disable PLX 9054 interrupts */
- writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
- ~(1 << PCI_INTERRUPT_ENABLE),
- dev->rdk1.plx9054_base_addr + INTCSR);
-
- /* clean up resources allocated during probe() */
- iounmap(dev->rdk1.plx9054_base_addr);
- iounmap(dev->rdk1.epld_base_addr);
-
- for (i = 0; i < 4; ++i) {
- if (i == 1)
- continue; /* BAR1 unused */
- release_mem_region(pci_resource_start(pdev, i),
- pci_resource_len(pdev, i));
- }
-}
-
-static void
-net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
-{
- int i;
-
- /* disable fpga interrupts
- writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
- ~(1 << PCI_INTERRUPT_ENABLE),
- dev->rdk1.plx9054_base_addr + INTCSR);
- */
-
- /* clean up resources allocated during probe() */
- iounmap(dev->rdk2.fpga_base_addr);
-
- for (i = 0; i < 2; ++i)
- release_mem_region(pci_resource_start(pdev, i),
- pci_resource_len(pdev, i));
-}
-
-static void
-net2272_pci_remove(struct pci_dev *pdev)
-{
- struct net2272 *dev = pci_get_drvdata(pdev);
-
- net2272_remove(dev);
-
- switch (pdev->device) {
- case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
- case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
- default: BUG();
- }
-
- pci_disable_device(pdev);
-
- usb_put_gadget(&dev->gadget);
-}
-
-/* Table of matching PCI IDs */
-static struct pci_device_id pci_ids[] = {
- { /* RDK 1 card */
- .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
- .class_mask = 0,
- .vendor = PCI_VENDOR_ID_PLX,
- .device = PCI_DEVICE_ID_RDK1,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
- { /* RDK 2 card */
- .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
- .class_mask = 0,
- .vendor = PCI_VENDOR_ID_PLX,
- .device = PCI_DEVICE_ID_RDK2,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
- { }
-};
-MODULE_DEVICE_TABLE(pci, pci_ids);
-
-static struct pci_driver net2272_pci_driver = {
- .name = driver_name,
- .id_table = pci_ids,
-
- .probe = net2272_pci_probe,
- .remove = net2272_pci_remove,
-};
-
-static int net2272_pci_register(void)
-{
- return pci_register_driver(&net2272_pci_driver);
-}
-
-static void net2272_pci_unregister(void)
-{
- pci_unregister_driver(&net2272_pci_driver);
-}
-
-#else
-static inline int net2272_pci_register(void) { return 0; }
-static inline void net2272_pci_unregister(void) { }
-#endif
-
-/*---------------------------------------------------------------------------*/
-
-static int
-net2272_plat_probe(struct platform_device *pdev)
-{
- struct net2272 *dev;
- int ret;
- unsigned int irqflags;
- resource_size_t base, len;
- struct resource *iomem, *iomem_bus, *irq_res;
-
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
- if (!irq_res || !iomem) {
- dev_err(&pdev->dev, "must provide irq/base addr");
- return -EINVAL;
- }
-
- dev = net2272_probe_init(&pdev->dev, irq_res->start);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
-
- irqflags = 0;
- if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
- irqflags |= IRQF_TRIGGER_RISING;
- if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
- irqflags |= IRQF_TRIGGER_FALLING;
- if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
- irqflags |= IRQF_TRIGGER_HIGH;
- if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
- irqflags |= IRQF_TRIGGER_LOW;
-
- base = iomem->start;
- len = resource_size(iomem);
- if (iomem_bus)
- dev->base_shift = iomem_bus->start;
-
- if (!request_mem_region(base, len, driver_name)) {
- dev_dbg(dev->dev, "get request memory region!\n");
- ret = -EBUSY;
- goto err;
- }
- dev->base_addr = ioremap(base, len);
- if (!dev->base_addr) {
- dev_dbg(dev->dev, "can't map memory\n");
- ret = -EFAULT;
- goto err_req;
- }
-
- ret = net2272_probe_fin(dev, irqflags);
- if (ret)
- goto err_io;
-
- platform_set_drvdata(pdev, dev);
- dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
- (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
-
- return 0;
-
- err_io:
- iounmap(dev->base_addr);
- err_req:
- release_mem_region(base, len);
- err:
- usb_put_gadget(&dev->gadget);
-
- return ret;
-}
-
-static void
-net2272_plat_remove(struct platform_device *pdev)
-{
- struct net2272 *dev = platform_get_drvdata(pdev);
-
- net2272_remove(dev);
-
- release_mem_region(pdev->resource[0].start,
- resource_size(&pdev->resource[0]));
-
- usb_put_gadget(&dev->gadget);
-}
-
-static struct platform_driver net2272_plat_driver = {
- .probe = net2272_plat_probe,
- .remove = net2272_plat_remove,
- .driver = {
- .name = driver_name,
- },
- /* FIXME .suspend, .resume */
-};
-MODULE_ALIAS("platform:net2272");
-
-static int __init net2272_init(void)
-{
- int ret;
-
- ret = net2272_pci_register();
- if (ret)
- return ret;
- ret = platform_driver_register(&net2272_plat_driver);
- if (ret)
- goto err_pci;
- return ret;
-
-err_pci:
- net2272_pci_unregister();
- return ret;
-}
-module_init(net2272_init);
-
-static void __exit net2272_cleanup(void)
-{
- net2272_pci_unregister();
- platform_driver_unregister(&net2272_plat_driver);
-}
-module_exit(net2272_cleanup);
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_AUTHOR("PLX Technology, Inc.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/net2272.h b/drivers/usb/gadget/udc/net2272.h
deleted file mode 100644
index a9994f737588..000000000000
--- a/drivers/usb/gadget/udc/net2272.h
+++ /dev/null
@@ -1,584 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * PLX NET2272 high/full speed USB device controller
- *
- * Copyright (C) 2005-2006 PLX Technology, Inc.
- * Copyright (C) 2006-2011 Analog Devices, Inc.
- */
-
-#ifndef __NET2272_H__
-#define __NET2272_H__
-
-/* Main Registers */
-#define REGADDRPTR 0x00
-#define REGDATA 0x01
-#define IRQSTAT0 0x02
-#define ENDPOINT_0_INTERRUPT 0
-#define ENDPOINT_A_INTERRUPT 1
-#define ENDPOINT_B_INTERRUPT 2
-#define ENDPOINT_C_INTERRUPT 3
-#define VIRTUALIZED_ENDPOINT_INTERRUPT 4
-#define SETUP_PACKET_INTERRUPT 5
-#define DMA_DONE_INTERRUPT 6
-#define SOF_INTERRUPT 7
-#define IRQSTAT1 0x03
-#define CONTROL_STATUS_INTERRUPT 1
-#define VBUS_INTERRUPT 2
-#define SUSPEND_REQUEST_INTERRUPT 3
-#define SUSPEND_REQUEST_CHANGE_INTERRUPT 4
-#define RESUME_INTERRUPT 5
-#define ROOT_PORT_RESET_INTERRUPT 6
-#define RESET_STATUS 7
-#define PAGESEL 0x04
-#define DMAREQ 0x1c
-#define DMA_ENDPOINT_SELECT 0
-#define DREQ_POLARITY 1
-#define DACK_POLARITY 2
-#define EOT_POLARITY 3
-#define DMA_CONTROL_DACK 4
-#define DMA_REQUEST_ENABLE 5
-#define DMA_REQUEST 6
-#define DMA_BUFFER_VALID 7
-#define SCRATCH 0x1d
-#define IRQENB0 0x20
-#define ENDPOINT_0_INTERRUPT_ENABLE 0
-#define ENDPOINT_A_INTERRUPT_ENABLE 1
-#define ENDPOINT_B_INTERRUPT_ENABLE 2
-#define ENDPOINT_C_INTERRUPT_ENABLE 3
-#define VIRTUALIZED_ENDPOINT_INTERRUPT_ENABLE 4
-#define SETUP_PACKET_INTERRUPT_ENABLE 5
-#define DMA_DONE_INTERRUPT_ENABLE 6
-#define SOF_INTERRUPT_ENABLE 7
-#define IRQENB1 0x21
-#define VBUS_INTERRUPT_ENABLE 2
-#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
-#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 4
-#define RESUME_INTERRUPT_ENABLE 5
-#define ROOT_PORT_RESET_INTERRUPT_ENABLE 6
-#define LOCCTL 0x22
-#define DATA_WIDTH 0
-#define LOCAL_CLOCK_OUTPUT 1
-#define LOCAL_CLOCK_OUTPUT_OFF 0
-#define LOCAL_CLOCK_OUTPUT_3_75MHZ 1
-#define LOCAL_CLOCK_OUTPUT_7_5MHZ 2
-#define LOCAL_CLOCK_OUTPUT_15MHZ 3
-#define LOCAL_CLOCK_OUTPUT_30MHZ 4
-#define LOCAL_CLOCK_OUTPUT_60MHZ 5
-#define DMA_SPLIT_BUS_MODE 4
-#define BYTE_SWAP 5
-#define BUFFER_CONFIGURATION 6
-#define BUFFER_CONFIGURATION_EPA512_EPB512 0
-#define BUFFER_CONFIGURATION_EPA1024_EPB512 1
-#define BUFFER_CONFIGURATION_EPA1024_EPB1024 2
-#define BUFFER_CONFIGURATION_EPA1024DB 3
-#define CHIPREV_LEGACY 0x23
-#define NET2270_LEGACY_REV 0x40
-#define LOCCTL1 0x24
-#define DMA_MODE 0
-#define SLOW_DREQ 0
-#define FAST_DREQ 1
-#define BURST_MODE 2
-#define DMA_DACK_ENABLE 2
-#define CHIPREV_2272 0x25
-#define CHIPREV_NET2272_R1 0x10
-#define CHIPREV_NET2272_R1A 0x11
-/* USB Registers */
-#define USBCTL0 0x18
-#define IO_WAKEUP_ENABLE 1
-#define USB_DETECT_ENABLE 3
-#define USB_ROOT_PORT_WAKEUP_ENABLE 5
-#define USBCTL1 0x19
-#define VBUS_PIN 0
-#define USB_FULL_SPEED 1
-#define USB_HIGH_SPEED 2
-#define GENERATE_RESUME 3
-#define VIRTUAL_ENDPOINT_ENABLE 4
-#define FRAME0 0x1a
-#define FRAME1 0x1b
-#define OURADDR 0x30
-#define FORCE_IMMEDIATE 7
-#define USBDIAG 0x31
-#define FORCE_TRANSMIT_CRC_ERROR 0
-#define PREVENT_TRANSMIT_BIT_STUFF 1
-#define FORCE_RECEIVE_ERROR 2
-#define FAST_TIMES 4
-#define USBTEST 0x32
-#define TEST_MODE_SELECT 0
-#define NORMAL_OPERATION 0
-#define XCVRDIAG 0x33
-#define FORCE_FULL_SPEED 2
-#define FORCE_HIGH_SPEED 3
-#define OPMODE 4
-#define NORMAL_OPERATION 0
-#define NON_DRIVING 1
-#define DISABLE_BITSTUFF_AND_NRZI_ENCODE 2
-#define LINESTATE 6
-#define SE0_STATE 0
-#define J_STATE 1
-#define K_STATE 2
-#define SE1_STATE 3
-#define VIRTOUT0 0x34
-#define VIRTOUT1 0x35
-#define VIRTIN0 0x36
-#define VIRTIN1 0x37
-#define SETUP0 0x40
-#define SETUP1 0x41
-#define SETUP2 0x42
-#define SETUP3 0x43
-#define SETUP4 0x44
-#define SETUP5 0x45
-#define SETUP6 0x46
-#define SETUP7 0x47
-/* Endpoint Registers (Paged via PAGESEL) */
-#define EP_DATA 0x05
-#define EP_STAT0 0x06
-#define DATA_IN_TOKEN_INTERRUPT 0
-#define DATA_OUT_TOKEN_INTERRUPT 1
-#define DATA_PACKET_TRANSMITTED_INTERRUPT 2
-#define DATA_PACKET_RECEIVED_INTERRUPT 3
-#define SHORT_PACKET_TRANSFERRED_INTERRUPT 4
-#define NAK_OUT_PACKETS 5
-#define BUFFER_EMPTY 6
-#define BUFFER_FULL 7
-#define EP_STAT1 0x07
-#define TIMEOUT 0
-#define USB_OUT_ACK_SENT 1
-#define USB_OUT_NAK_SENT 2
-#define USB_IN_ACK_RCVD 3
-#define USB_IN_NAK_SENT 4
-#define USB_STALL_SENT 5
-#define LOCAL_OUT_ZLP 6
-#define BUFFER_FLUSH 7
-#define EP_TRANSFER0 0x08
-#define EP_TRANSFER1 0x09
-#define EP_TRANSFER2 0x0a
-#define EP_IRQENB 0x0b
-#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0
-#define DATA_OUT_TOKEN_INTERRUPT_ENABLE 1
-#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2
-#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3
-#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 4
-#define EP_AVAIL0 0x0c
-#define EP_AVAIL1 0x0d
-#define EP_RSPCLR 0x0e
-#define EP_RSPSET 0x0f
-#define ENDPOINT_HALT 0
-#define ENDPOINT_TOGGLE 1
-#define NAK_OUT_PACKETS_MODE 2
-#define CONTROL_STATUS_PHASE_HANDSHAKE 3
-#define INTERRUPT_MODE 4
-#define AUTOVALIDATE 5
-#define HIDE_STATUS_PHASE 6
-#define ALT_NAK_OUT_PACKETS 7
-#define EP_MAXPKT0 0x28
-#define EP_MAXPKT1 0x29
-#define ADDITIONAL_TRANSACTION_OPPORTUNITIES 3
-#define NONE_ADDITIONAL_TRANSACTION 0
-#define ONE_ADDITIONAL_TRANSACTION 1
-#define TWO_ADDITIONAL_TRANSACTION 2
-#define EP_CFG 0x2a
-#define ENDPOINT_NUMBER 0
-#define ENDPOINT_DIRECTION 4
-#define ENDPOINT_TYPE 5
-#define ENDPOINT_ENABLE 7
-#define EP_HBW 0x2b
-#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 0
-#define DATA0_PID 0
-#define DATA1_PID 1
-#define DATA2_PID 2
-#define MDATA_PID 3
-#define EP_BUFF_STATES 0x2c
-#define BUFFER_A_STATE 0
-#define BUFFER_B_STATE 2
-#define BUFF_FREE 0
-#define BUFF_VALID 1
-#define BUFF_LCL 2
-#define BUFF_USB 3
-
-/*---------------------------------------------------------------------------*/
-
-#define PCI_DEVICE_ID_RDK1 0x9054
-
-/* PCI-RDK EPLD Registers */
-#define RDK_EPLD_IO_REGISTER1 0x00000000
-#define RDK_EPLD_USB_RESET 0
-#define RDK_EPLD_USB_POWERDOWN 1
-#define RDK_EPLD_USB_WAKEUP 2
-#define RDK_EPLD_USB_EOT 3
-#define RDK_EPLD_DPPULL 4
-#define RDK_EPLD_IO_REGISTER2 0x00000004
-#define RDK_EPLD_BUSWIDTH 0
-#define RDK_EPLD_USER 2
-#define RDK_EPLD_RESET_INTERRUPT_ENABLE 3
-#define RDK_EPLD_DMA_TIMEOUT_ENABLE 4
-#define RDK_EPLD_STATUS_REGISTER 0x00000008
-#define RDK_EPLD_USB_LRESET 0
-#define RDK_EPLD_REVISION_REGISTER 0x0000000c
-
-/* PCI-RDK PLX 9054 Registers */
-#define INTCSR 0x68
-#define PCI_INTERRUPT_ENABLE 8
-#define LOCAL_INTERRUPT_INPUT_ENABLE 11
-#define LOCAL_INPUT_INTERRUPT_ACTIVE 15
-#define LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE 18
-#define LOCAL_DMA_CHANNEL_1_INTERRUPT_ENABLE 19
-#define DMA_CHANNEL_0_INTERRUPT_ACTIVE 21
-#define DMA_CHANNEL_1_INTERRUPT_ACTIVE 22
-#define CNTRL 0x6C
-#define RELOAD_CONFIGURATION_REGISTERS 29
-#define PCI_ADAPTER_SOFTWARE_RESET 30
-#define DMAMODE0 0x80
-#define LOCAL_BUS_WIDTH 0
-#define INTERNAL_WAIT_STATES 2
-#define TA_READY_INPUT_ENABLE 6
-#define LOCAL_BURST_ENABLE 8
-#define SCATTER_GATHER_MODE 9
-#define DONE_INTERRUPT_ENABLE 10
-#define LOCAL_ADDRESSING_MODE 11
-#define DEMAND_MODE 12
-#define DMA_EOT_ENABLE 14
-#define FAST_SLOW_TERMINATE_MODE_SELECT 15
-#define DMA_CHANNEL_INTERRUPT_SELECT 17
-#define DMAPADR0 0x84
-#define DMALADR0 0x88
-#define DMASIZ0 0x8c
-#define DMADPR0 0x90
-#define DESCRIPTOR_LOCATION 0
-#define END_OF_CHAIN 1
-#define INTERRUPT_AFTER_TERMINAL_COUNT 2
-#define DIRECTION_OF_TRANSFER 3
-#define DMACSR0 0xa8
-#define CHANNEL_ENABLE 0
-#define CHANNEL_START 1
-#define CHANNEL_ABORT 2
-#define CHANNEL_CLEAR_INTERRUPT 3
-#define CHANNEL_DONE 4
-#define DMATHR 0xb0
-#define LBRD1 0xf8
-#define MEMORY_SPACE_LOCAL_BUS_WIDTH 0
-#define W8_BIT 0
-#define W16_BIT 1
-
-/* Special OR'ing of INTCSR bits */
-#define LOCAL_INTERRUPT_TEST \
- ((1 << LOCAL_INPUT_INTERRUPT_ACTIVE) | \
- (1 << LOCAL_INTERRUPT_INPUT_ENABLE))
-
-#define DMA_CHANNEL_0_TEST \
- ((1 << DMA_CHANNEL_0_INTERRUPT_ACTIVE) | \
- (1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE))
-
-#define DMA_CHANNEL_1_TEST \
- ((1 << DMA_CHANNEL_1_INTERRUPT_ACTIVE) | \
- (1 << LOCAL_DMA_CHANNEL_1_INTERRUPT_ENABLE))
-
-/* EPLD Registers */
-#define RDK_EPLD_IO_REGISTER1 0x00000000
-#define RDK_EPLD_USB_RESET 0
-#define RDK_EPLD_USB_POWERDOWN 1
-#define RDK_EPLD_USB_WAKEUP 2
-#define RDK_EPLD_USB_EOT 3
-#define RDK_EPLD_DPPULL 4
-#define RDK_EPLD_IO_REGISTER2 0x00000004
-#define RDK_EPLD_BUSWIDTH 0
-#define RDK_EPLD_USER 2
-#define RDK_EPLD_RESET_INTERRUPT_ENABLE 3
-#define RDK_EPLD_DMA_TIMEOUT_ENABLE 4
-#define RDK_EPLD_STATUS_REGISTER 0x00000008
-#define RDK_EPLD_USB_LRESET 0
-#define RDK_EPLD_REVISION_REGISTER 0x0000000c
-
-#define EPLD_IO_CONTROL_REGISTER 0x400
-#define NET2272_RESET 0
-#define BUSWIDTH 1
-#define MPX_MODE 3
-#define USER 4
-#define DMA_TIMEOUT_ENABLE 5
-#define DMA_CTL_DACK 6
-#define EPLD_DMA_ENABLE 7
-#define EPLD_DMA_CONTROL_REGISTER 0x800
-#define SPLIT_DMA_MODE 0
-#define SPLIT_DMA_DIRECTION 1
-#define SPLIT_DMA_ENABLE 2
-#define SPLIT_DMA_INTERRUPT_ENABLE 3
-#define SPLIT_DMA_INTERRUPT 4
-#define EPLD_DMA_MODE 5
-#define EPLD_DMA_CONTROLLER_ENABLE 7
-#define SPLIT_DMA_ADDRESS_LOW 0xc00
-#define SPLIT_DMA_ADDRESS_HIGH 0x1000
-#define SPLIT_DMA_BYTE_COUNT_LOW 0x1400
-#define SPLIT_DMA_BYTE_COUNT_HIGH 0x1800
-#define EPLD_REVISION_REGISTER 0x1c00
-#define SPLIT_DMA_RAM 0x4000
-#define DMA_RAM_SIZE 0x1000
-
-/*---------------------------------------------------------------------------*/
-
-#define PCI_DEVICE_ID_RDK2 0x3272
-
-/* PCI-RDK version 2 registers */
-
-/* Main Control Registers */
-
-#define RDK2_IRQENB 0x00
-#define RDK2_IRQSTAT 0x04
-#define PB7 23
-#define PB6 22
-#define PB5 21
-#define PB4 20
-#define PB3 19
-#define PB2 18
-#define PB1 17
-#define PB0 16
-#define GP3 23
-#define GP2 23
-#define GP1 23
-#define GP0 23
-#define DMA_RETRY_ABORT 6
-#define DMA_PAUSE_DONE 5
-#define DMA_ABORT_DONE 4
-#define DMA_OUT_FIFO_TRANSFER_DONE 3
-#define DMA_LOCAL_DONE 2
-#define DMA_PCI_DONE 1
-#define NET2272_PCI_IRQ 0
-
-#define RDK2_LOCCTLRDK 0x08
-#define CHIP_RESET 3
-#define SPLIT_DMA 2
-#define MULTIPLEX_MODE 1
-#define BUS_WIDTH 0
-
-#define RDK2_GPIOCTL 0x10
-#define GP3_OUT_ENABLE 7
-#define GP2_OUT_ENABLE 6
-#define GP1_OUT_ENABLE 5
-#define GP0_OUT_ENABLE 4
-#define GP3_DATA 3
-#define GP2_DATA 2
-#define GP1_DATA 1
-#define GP0_DATA 0
-
-#define RDK2_LEDSW 0x14
-#define LED3 27
-#define LED2 26
-#define LED1 25
-#define LED0 24
-#define PBUTTON 16
-#define DIPSW 0
-
-#define RDK2_DIAG 0x18
-#define RDK2_FAST_TIMES 2
-#define FORCE_PCI_SERR 1
-#define FORCE_PCI_INT 0
-#define RDK2_FPGAREV 0x1C
-
-/* Dma Control registers */
-#define RDK2_DMACTL 0x80
-#define ADDR_HOLD 24
-#define RETRY_COUNT 16 /* 23:16 */
-#define FIFO_THRESHOLD 11 /* 15:11 */
-#define MEM_WRITE_INVALIDATE 10
-#define READ_MULTIPLE 9
-#define READ_LINE 8
-#define RDK2_DMA_MODE 6 /* 7:6 */
-#define CONTROL_DACK 5
-#define EOT_ENABLE 4
-#define EOT_POLARITY 3
-#define DACK_POLARITY 2
-#define DREQ_POLARITY 1
-#define DMA_ENABLE 0
-
-#define RDK2_DMASTAT 0x84
-#define GATHER_COUNT 12 /* 14:12 */
-#define FIFO_COUNT 6 /* 11:6 */
-#define FIFO_FLUSH 5
-#define FIFO_TRANSFER 4
-#define PAUSE_DONE 3
-#define ABORT_DONE 2
-#define DMA_ABORT 1
-#define DMA_START 0
-
-#define RDK2_DMAPCICOUNT 0x88
-#define DMA_DIRECTION 31
-#define DMA_PCI_BYTE_COUNT 0 /* 0:23 */
-
-#define RDK2_DMALOCCOUNT 0x8C /* 0:23 dma local byte count */
-
-#define RDK2_DMAADDR 0x90 /* 2:31 PCI bus starting address */
-
-/*---------------------------------------------------------------------------*/
-
-#define REG_INDEXED_THRESHOLD (1 << 5)
-
-/* DRIVER DATA STRUCTURES and UTILITIES */
-struct net2272_ep {
- struct usb_ep ep;
- struct net2272 *dev;
- unsigned long irqs;
-
- /* analogous to a host-side qh */
- struct list_head queue;
- const struct usb_endpoint_descriptor *desc;
- unsigned num:8,
- fifo_size:12,
- stopped:1,
- wedged:1,
- is_in:1,
- is_iso:1,
- dma:1,
- not_empty:1;
-};
-
-struct net2272 {
- /* each device provides one gadget, several endpoints */
- struct usb_gadget gadget;
- struct device *dev;
- unsigned short dev_id;
-
- spinlock_t lock;
- struct net2272_ep ep[4];
- struct usb_gadget_driver *driver;
- unsigned protocol_stall:1,
- softconnect:1,
- wakeup:1,
- added:1,
- async_callbacks:1,
- dma_eot_polarity:1,
- dma_dack_polarity:1,
- dma_dreq_polarity:1,
- dma_busy:1;
- u16 chiprev;
- u8 pagesel;
-
- unsigned int irq;
- unsigned short fifo_mode;
-
- unsigned int base_shift;
- u16 __iomem *base_addr;
- union {
-#ifdef CONFIG_USB_PCI
- struct {
- void __iomem *plx9054_base_addr;
- void __iomem *epld_base_addr;
- } rdk1;
- struct {
- /* Bar0, Bar1 is base_addr both mem-mapped */
- void __iomem *fpga_base_addr;
- } rdk2;
-#endif
- };
-};
-
-static void __iomem *
-net2272_reg_addr(struct net2272 *dev, unsigned int reg)
-{
- return dev->base_addr + (reg << dev->base_shift);
-}
-
-static void
-net2272_write(struct net2272 *dev, unsigned int reg, u8 value)
-{
- if (reg >= REG_INDEXED_THRESHOLD) {
- /*
- * Indexed register; use REGADDRPTR/REGDATA
- * - Save and restore REGADDRPTR. This prevents REGADDRPTR from
- * changes between other code sections, but it is time consuming.
- * - Performance tips: either do not save and restore REGADDRPTR (if it
- * is safe) or do save/restore operations only in critical sections.
- u8 tmp = readb(dev->base_addr + REGADDRPTR);
- */
- writeb((u8)reg, net2272_reg_addr(dev, REGADDRPTR));
- writeb(value, net2272_reg_addr(dev, REGDATA));
- /* writeb(tmp, net2272_reg_addr(dev, REGADDRPTR)); */
- } else
- writeb(value, net2272_reg_addr(dev, reg));
-}
-
-static u8
-net2272_read(struct net2272 *dev, unsigned int reg)
-{
- u8 ret;
-
- if (reg >= REG_INDEXED_THRESHOLD) {
- /*
- * Indexed register; use REGADDRPTR/REGDATA
- * - Save and restore REGADDRPTR. This prevents REGADDRPTR from
- * changes between other code sections, but it is time consuming.
- * - Performance tips: either do not save and restore REGADDRPTR (if it
- * is safe) or do save/restore operations only in critical sections.
- u8 tmp = readb(dev->base_addr + REGADDRPTR);
- */
- writeb((u8)reg, net2272_reg_addr(dev, REGADDRPTR));
- ret = readb(net2272_reg_addr(dev, REGDATA));
- /* writeb(tmp, net2272_reg_addr(dev, REGADDRPTR)); */
- } else
- ret = readb(net2272_reg_addr(dev, reg));
-
- return ret;
-}
-
-static void
-net2272_ep_write(struct net2272_ep *ep, unsigned int reg, u8 value)
-{
- struct net2272 *dev = ep->dev;
-
- if (dev->pagesel != ep->num) {
- net2272_write(dev, PAGESEL, ep->num);
- dev->pagesel = ep->num;
- }
- net2272_write(dev, reg, value);
-}
-
-static u8
-net2272_ep_read(struct net2272_ep *ep, unsigned int reg)
-{
- struct net2272 *dev = ep->dev;
-
- if (dev->pagesel != ep->num) {
- net2272_write(dev, PAGESEL, ep->num);
- dev->pagesel = ep->num;
- }
- return net2272_read(dev, reg);
-}
-
-static void allow_status(struct net2272_ep *ep)
-{
- /* ep0 only */
- net2272_ep_write(ep, EP_RSPCLR,
- (1 << CONTROL_STATUS_PHASE_HANDSHAKE) |
- (1 << ALT_NAK_OUT_PACKETS) |
- (1 << NAK_OUT_PACKETS_MODE));
- ep->stopped = 1;
-}
-
-static void set_halt(struct net2272_ep *ep)
-{
- /* ep0 and bulk/intr endpoints */
- net2272_ep_write(ep, EP_RSPCLR, 1 << CONTROL_STATUS_PHASE_HANDSHAKE);
- net2272_ep_write(ep, EP_RSPSET, 1 << ENDPOINT_HALT);
-}
-
-static void clear_halt(struct net2272_ep *ep)
-{
- /* ep0 and bulk/intr endpoints */
- net2272_ep_write(ep, EP_RSPCLR,
- (1 << ENDPOINT_HALT) | (1 << ENDPOINT_TOGGLE));
-}
-
-/* count (<= 4) bytes in the next fifo write will be valid */
-static void set_fifo_bytecount(struct net2272_ep *ep, unsigned count)
-{
- /* net2272_ep_write will truncate to u8 for us */
- net2272_ep_write(ep, EP_TRANSFER2, count >> 16);
- net2272_ep_write(ep, EP_TRANSFER1, count >> 8);
- net2272_ep_write(ep, EP_TRANSFER0, count);
-}
-
-struct net2272_request {
- struct usb_request req;
- struct list_head queue;
- unsigned mapped:1,
- valid:1;
-};
-
-#endif
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index c93ea210c17c..062bf2b57d2e 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -1860,7 +1860,7 @@ static irqreturn_t omap_udc_irq(int irq, void *_udc)
static void pio_out_timer(struct timer_list *t)
{
- struct omap_ep *ep = from_timer(ep, t, timer);
+ struct omap_ep *ep = timer_container_of(ep, t, timer);
unsigned long flags;
u16 stat_flg;
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index 24eb1ae78e45..f5d09a91e554 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -1574,7 +1574,7 @@ static inline void clear_ep_state (struct pxa25x_udc *dev)
static void udc_watchdog(struct timer_list *t)
{
- struct pxa25x_udc *dev = from_timer(dev, t, timer);
+ struct pxa25x_udc *dev = timer_container_of(dev, t, timer);
local_irq_disable();
if (dev->ep0state == EP0_STALL
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 6c1d15b2250c..e5c2630e3711 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1516,7 +1516,7 @@ static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
static void r8a66597_timer(struct timer_list *t)
{
- struct r8a66597 *r8a66597 = from_timer(r8a66597, t, timer);
+ struct r8a66597 *r8a66597 = timer_container_of(r8a66597, t, timer);
unsigned long flags;
u16 tmp;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 89b304cf6d03..3e4d56457597 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -2397,8 +2397,7 @@ static int renesas_usb3_stop(struct usb_gadget *gadget)
rzv2m_usb3drd_reset(usb3_to_dev(usb3)->parent, false);
renesas_usb3_stop_controller(usb3);
- if (usb3->phy)
- phy_exit(usb3->phy);
+ phy_exit(usb3->phy);
pm_runtime_put(usb3_to_dev(usb3));
@@ -2984,8 +2983,7 @@ static int renesas_usb3_suspend(struct device *dev)
return 0;
renesas_usb3_stop_controller(usb3);
- if (usb3->phy)
- phy_exit(usb3->phy);
+ phy_exit(usb3->phy);
pm_runtime_put(dev);
return 0;
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index ae2aeb271897..fa94cc065274 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -2178,8 +2178,6 @@ fail:
/**
* xudc_remove - Releases the resources allocated during the initialization.
* @pdev: pointer to the platform device structure.
- *
- * Return: 0 always
*/
static void xudc_remove(struct platform_device *pdev)
{
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index d011d6c753ed..109100cc77a3 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -104,6 +104,15 @@ config USB_XHCI_RZV2M
Say 'Y' to enable the support for the xHCI host controller
found in Renesas RZ/V2M SoC.
+config USB_XHCI_SIDEBAND
+ bool "xHCI support for sideband"
+ help
+ Say 'Y' to enable the support for the xHCI sideband capability.
+ Provide a mechanism for a sideband datapath for payload associated
+ with audio class endpoints. This allows for an audio DSP to use
+ xHCI USB endpoints directly, allowing CPU to sleep while playing
+ audio.
+
config USB_XHCI_TEGRA
tristate "xHCI support for NVIDIA Tegra SoCs"
depends on PHY_TEGRA_XUSB
@@ -225,7 +234,7 @@ config USB_EHCI_HCD_OMAP
tristate "EHCI support for OMAP3 and later chips"
depends on ARCH_OMAP || COMPILE_TEST
depends on NOP_USB_XCEIV
- default y
+ default ARCH_OMAP
help
Enables support for the on-chip EHCI controller on
OMAP3 and later chips.
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index be4e5245c52f..4df946c05ba0 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -32,6 +32,10 @@ endif
xhci-rcar-hcd-y += xhci-rcar.o
xhci-rcar-hcd-$(CONFIG_USB_XHCI_RZV2M) += xhci-rzv2m.o
+ifneq ($(CONFIG_USB_XHCI_SIDEBAND),)
+ xhci-hcd-y += xhci-sideband.o
+endif
+
obj-$(CONFIG_USB_PCI) += pci-quirks.o
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 26f13278d4d6..6ed2fa5418a4 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -410,15 +410,13 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
return retval;
}
-struct ehci_fsl {
- struct ehci_hcd ehci;
-
-#ifdef CONFIG_PM
+struct ehci_fsl_priv {
/* Saved USB PHY settings, need to restore after deep sleep. */
u32 usb_ctrl;
-#endif
};
+#define hcd_to_ehci_fsl_priv(h) ((struct ehci_fsl_priv *) hcd_to_ehci(h)->priv)
+
#ifdef CONFIG_PM
#ifdef CONFIG_PPC_MPC512x
@@ -566,17 +564,10 @@ static inline int ehci_fsl_mpc512x_drv_resume(struct device *dev)
}
#endif /* CONFIG_PPC_MPC512x */
-static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-
- return container_of(ehci, struct ehci_fsl, ehci);
-}
-
static int ehci_fsl_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+ struct ehci_fsl_priv *priv = hcd_to_ehci_fsl_priv(hcd);
void __iomem *non_ehci = hcd->regs;
if (of_device_is_compatible(dev->parent->of_node,
@@ -589,14 +580,14 @@ static int ehci_fsl_drv_suspend(struct device *dev)
if (!fsl_deep_sleep())
return 0;
- ehci_fsl->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
+ priv->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
return 0;
}
static int ehci_fsl_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+ struct ehci_fsl_priv *priv = hcd_to_ehci_fsl_priv(hcd);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
void __iomem *non_ehci = hcd->regs;
@@ -612,7 +603,7 @@ static int ehci_fsl_drv_resume(struct device *dev)
usb_root_hub_lost_power(hcd->self.root_hub);
/* Restore USB PHY settings and enable the controller. */
- iowrite32be(ehci_fsl->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL);
+ iowrite32be(priv->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL);
ehci_reset(ehci);
ehci_fsl_reinit(ehci);
@@ -671,7 +662,7 @@ static int ehci_start_port_reset(struct usb_hcd *hcd, unsigned port)
#endif /* CONFIG_USB_OTG */
static const struct ehci_driver_overrides ehci_fsl_overrides __initconst = {
- .extra_priv_size = sizeof(struct ehci_fsl),
+ .extra_priv_size = sizeof(struct ehci_fsl_priv),
.reset = ehci_fsl_setup,
};
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 150d2542cef0..6aab45c8525c 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -198,7 +198,8 @@ static void quirk_poll_work(struct work_struct *work)
static void quirk_poll_timer(struct timer_list *t)
{
- struct ehci_platform_priv *priv = from_timer(priv, t, poll_timer);
+ struct ehci_platform_priv *priv = timer_container_of(priv, t,
+ poll_timer);
struct ehci_hcd *ehci = container_of((void *)priv, struct ehci_hcd,
priv);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index c7784bf8101d..9c7f3008646e 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -746,7 +746,8 @@ static int ohci_start(struct usb_hcd *hcd)
*/
static void io_watchdog_func(struct timer_list *t)
{
- struct ohci_hcd *ohci = from_timer(ohci, t, io_watchdog);
+ struct ohci_hcd *ohci = timer_container_of(ohci, t,
+ io_watchdog);
bool takeback_all_pending = false;
u32 status;
u32 head;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index d75b1b9b4db0..6b7c73eff081 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -2955,7 +2955,7 @@ static irqreturn_t oxu_irq(struct usb_hcd *hcd)
static void oxu_watchdog(struct timer_list *t)
{
- struct oxu_hcd *oxu = from_timer(oxu, t, watchdog);
+ struct oxu_hcd *oxu = timer_container_of(oxu, t, watchdog);
unsigned long flags;
spin_lock_irqsave(&oxu->lock, flags);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 67e472116d11..d21a03cf5c17 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1720,7 +1720,8 @@ static void r8a66597_root_hub_control(struct r8a66597 *r8a66597, int port)
static void r8a66597_interval_timer(struct timer_list *t)
{
- struct r8a66597_timers *timers = from_timer(timers, t, interval);
+ struct r8a66597_timers *timers = timer_container_of(timers, t,
+ interval);
struct r8a66597 *r8a66597 = timers->r8a66597;
unsigned long flags;
u16 pipenum;
@@ -1744,7 +1745,7 @@ static void r8a66597_interval_timer(struct timer_list *t)
static void r8a66597_td_timer(struct timer_list *t)
{
- struct r8a66597_timers *timers = from_timer(timers, t, td);
+ struct r8a66597_timers *timers = timer_container_of(timers, t, td);
struct r8a66597 *r8a66597 = timers->r8a66597;
unsigned long flags;
u16 pipenum;
@@ -1798,7 +1799,7 @@ static void r8a66597_td_timer(struct timer_list *t)
static void r8a66597_timer(struct timer_list *t)
{
- struct r8a66597 *r8a66597 = from_timer(r8a66597, t, rh_timer);
+ struct r8a66597 *r8a66597 = timer_container_of(r8a66597, t, rh_timer);
unsigned long flags;
int port;
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 718b1b7fe366..ea3cab99c5d4 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1124,7 +1124,7 @@ sl811h_hub_descriptor (
static void
sl811h_timer(struct timer_list *t)
{
- struct sl811 *sl811 = from_timer(sl811, t, timer);
+ struct sl811 *sl811 = timer_container_of(sl811, t, timer);
unsigned long flags;
u8 irqstat;
u8 signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE;
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 45a8256a665f..9480d4ff0111 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -91,7 +91,7 @@ static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
static void uhci_fsbr_timeout(struct timer_list *t)
{
- struct uhci_hcd *uhci = from_timer(uhci, t, fsbr_timer);
+ struct uhci_hcd *uhci = timer_container_of(uhci, t, fsbr_timer);
unsigned long flags;
spin_lock_irqsave(&uhci->lock, flags);
diff --git a/drivers/usb/host/xen-hcd.c b/drivers/usb/host/xen-hcd.c
index 05943f2213e4..1c2a95fe41e5 100644
--- a/drivers/usb/host/xen-hcd.c
+++ b/drivers/usb/host/xen-hcd.c
@@ -1258,7 +1258,7 @@ static void xenhcd_disconnect(struct xenbus_device *dev)
static void xenhcd_watchdog(struct timer_list *timer)
{
- struct xenhcd_info *info = from_timer(info, timer, watchdog);
+ struct xenhcd_info *info = timer_container_of(info, timer, watchdog);
unsigned long flags;
spin_lock_irqsave(&info->lock, flags);
diff --git a/drivers/usb/host/xhci-caps.h b/drivers/usb/host/xhci-caps.h
index f6b9a00a0ab9..4b8ff4815644 100644
--- a/drivers/usb/host/xhci-caps.h
+++ b/drivers/usb/host/xhci-caps.h
@@ -62,8 +62,8 @@
#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
-/* db_off bitmask - bits 0:1 reserved */
-#define DBOFF_MASK (~0x3)
+/* db_off bitmask - bits 31:2 Doorbell Array Offset */
+#define DBOFF_MASK (0xfffffffc)
/* run_regs_off bitmask - bits 0:4 reserved */
#define RTSOFF_MASK (~0x1f)
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index 1f5ef174abea..c6d44977193f 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -631,6 +631,112 @@ static void xhci_debugfs_create_ports(struct xhci_hcd *xhci,
}
}
+static int xhci_port_bw_show(struct xhci_hcd *xhci, u8 dev_speed,
+ struct seq_file *s)
+{
+ unsigned int num_ports;
+ unsigned int i;
+ int ret;
+ struct xhci_container_ctx *ctx;
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct device *dev = hcd->self.controller;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return ret;
+
+ num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+ ctx = xhci_alloc_port_bw_ctx(xhci, 0);
+ if (!ctx) {
+ pm_runtime_put_sync(dev);
+ return -ENOMEM;
+ }
+
+ /* get roothub port bandwidth */
+ ret = xhci_get_port_bandwidth(xhci, ctx, dev_speed);
+ if (ret)
+ goto err_out;
+
+ /* print all roothub ports available bandwidth
+ * refer to xhci rev1_2 protocol 6.2.6 , byte 0 is reserved
+ */
+ for (i = 1; i < num_ports+1; i++)
+ seq_printf(s, "port[%d] available bw: %d%%.\n", i,
+ ctx->bytes[i]);
+err_out:
+ pm_runtime_put_sync(dev);
+ xhci_free_port_bw_ctx(xhci, ctx);
+ return ret;
+}
+
+static int xhci_ss_bw_show(struct seq_file *s, void *unused)
+{
+ int ret;
+ struct xhci_hcd *xhci = (struct xhci_hcd *)s->private;
+
+ ret = xhci_port_bw_show(xhci, USB_SPEED_SUPER, s);
+ return ret;
+}
+
+static int xhci_hs_bw_show(struct seq_file *s, void *unused)
+{
+ int ret;
+ struct xhci_hcd *xhci = (struct xhci_hcd *)s->private;
+
+ ret = xhci_port_bw_show(xhci, USB_SPEED_HIGH, s);
+ return ret;
+}
+
+static int xhci_fs_bw_show(struct seq_file *s, void *unused)
+{
+ int ret;
+ struct xhci_hcd *xhci = (struct xhci_hcd *)s->private;
+
+ ret = xhci_port_bw_show(xhci, USB_SPEED_FULL, s);
+ return ret;
+}
+
+static struct xhci_file_map bw_context_files[] = {
+ {"SS_BW", xhci_ss_bw_show, },
+ {"HS_BW", xhci_hs_bw_show, },
+ {"FS_BW", xhci_fs_bw_show, },
+};
+
+static int bw_context_open(struct inode *inode, struct file *file)
+{
+ int i;
+ struct xhci_file_map *f_map;
+ const char *file_name = file_dentry(file)->d_iname;
+
+ for (i = 0; i < ARRAY_SIZE(bw_context_files); i++) {
+ f_map = &bw_context_files[i];
+
+ if (strcmp(f_map->name, file_name) == 0)
+ break;
+ }
+
+ return single_open(file, f_map->show, inode->i_private);
+}
+
+static const struct file_operations bw_fops = {
+ .open = bw_context_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void xhci_debugfs_create_bandwidth(struct xhci_hcd *xhci,
+ struct dentry *parent)
+{
+ parent = debugfs_create_dir("port_bandwidth", parent);
+
+ xhci_debugfs_create_files(xhci, bw_context_files,
+ ARRAY_SIZE(bw_context_files),
+ xhci,
+ parent, &bw_fops);
+}
+
void xhci_debugfs_init(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
@@ -681,6 +787,8 @@ void xhci_debugfs_init(struct xhci_hcd *xhci)
xhci->debugfs_slots = debugfs_create_dir("devices", xhci->debugfs_root);
xhci_debugfs_create_ports(xhci, xhci->debugfs_root);
+
+ xhci_debugfs_create_bandwidth(xhci, xhci->debugfs_root);
}
void xhci_debugfs_exit(struct xhci_hcd *xhci)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 486347776cb2..92bb84f8132a 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1907,7 +1907,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
* prevent port event interrupts from interfering
* with usb2 port resume process
*/
- xhci_disable_interrupter(xhci->interrupters[0]);
+ xhci_disable_interrupter(xhci, xhci->interrupters[0]);
disabled_irq = true;
}
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index d698095fc88d..bd745a0f2f78 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -484,6 +484,35 @@ void xhci_free_container_ctx(struct xhci_hcd *xhci,
kfree(ctx);
}
+struct xhci_container_ctx *xhci_alloc_port_bw_ctx(struct xhci_hcd *xhci,
+ gfp_t flags)
+{
+ struct xhci_container_ctx *ctx;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+ ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
+ if (!ctx)
+ return NULL;
+
+ ctx->size = GET_PORT_BW_ARRAY_SIZE;
+
+ ctx->bytes = dma_pool_zalloc(xhci->port_bw_pool, flags, &ctx->dma);
+ if (!ctx->bytes) {
+ kfree(ctx);
+ return NULL;
+ }
+ return ctx;
+}
+
+void xhci_free_port_bw_ctx(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx)
+{
+ if (!ctx)
+ return;
+ dma_pool_free(xhci->port_bw_pool, ctx->bytes, ctx->dma);
+ kfree(ctx);
+}
+
struct xhci_input_control_ctx *xhci_get_input_control_ctx(
struct xhci_container_ctx *ctx)
{
@@ -1802,10 +1831,10 @@ xhci_remove_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
*/
if (ir->ir_set) {
tmp = readl(&ir->ir_set->erst_size);
- tmp &= ERST_SIZE_MASK;
+ tmp &= ~ERST_SIZE_MASK;
writel(tmp, &ir->ir_set->erst_size);
- xhci_write_64(xhci, ERST_EHB, &ir->ir_set->erst_dequeue);
+ xhci_update_erst_dequeue(xhci, ir, true);
}
}
@@ -1848,6 +1877,11 @@ void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrup
return;
}
+ /*
+ * Cleanup secondary interrupter to ensure there are no pending events.
+ * This also updates event ring dequeue pointer back to the start.
+ */
+ xhci_skip_sec_intr_events(xhci, ir->event_ring, ir);
intr_num = ir->intr_num;
xhci_remove_interrupter(xhci, ir);
@@ -1907,6 +1941,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed small stream array pool");
+ dma_pool_destroy(xhci->port_bw_pool);
+ xhci->port_bw_pool = NULL;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Freed xhci port bw array pool");
+
dma_pool_destroy(xhci->medium_streams_pool);
xhci->medium_streams_pool = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -2282,37 +2321,25 @@ xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags)
return ir;
}
-static int
-xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
- unsigned int intr_num)
+void xhci_add_interrupter(struct xhci_hcd *xhci, unsigned int intr_num)
{
+ struct xhci_interrupter *ir;
u64 erst_base;
u32 erst_size;
- if (intr_num >= xhci->max_interrupters) {
- xhci_warn(xhci, "Can't add interrupter %d, max interrupters %d\n",
- intr_num, xhci->max_interrupters);
- return -EINVAL;
- }
-
- if (xhci->interrupters[intr_num]) {
- xhci_warn(xhci, "Interrupter %d\n already set up", intr_num);
- return -EINVAL;
- }
-
- xhci->interrupters[intr_num] = ir;
+ ir = xhci->interrupters[intr_num];
ir->intr_num = intr_num;
ir->ir_set = &xhci->run_regs->ir_set[intr_num];
/* set ERST count with the number of entries in the segment table */
erst_size = readl(&ir->ir_set->erst_size);
- erst_size &= ERST_SIZE_MASK;
+ erst_size &= ~ERST_SIZE_MASK;
erst_size |= ir->event_ring->num_segs;
writel(erst_size, &ir->ir_set->erst_size);
erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
- erst_base &= ERST_BASE_RSVDP;
- erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP;
+ erst_base &= ~ERST_BASE_ADDRESS_MASK;
+ erst_base |= ir->erst.erst_dma_addr & ERST_BASE_ADDRESS_MASK;
if (xhci->quirks & XHCI_WRITE_64_HI_LO)
hi_lo_writeq(erst_base, &ir->ir_set->erst_base);
else
@@ -2320,20 +2347,19 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
/* Set the event ring dequeue address of this interrupter */
xhci_set_hc_event_deq(xhci, ir);
-
- return 0;
}
struct xhci_interrupter *
xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
- u32 imod_interval)
+ u32 imod_interval, unsigned int intr_num)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_interrupter *ir;
unsigned int i;
int err = -ENOSPC;
- if (!xhci->interrupters || xhci->max_interrupters <= 1)
+ if (!xhci->interrupters || xhci->max_interrupters <= 1 ||
+ intr_num >= xhci->max_interrupters)
return NULL;
ir = xhci_alloc_interrupter(xhci, segs, GFP_KERNEL);
@@ -2341,15 +2367,23 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
return NULL;
spin_lock_irq(&xhci->lock);
-
- /* Find available secondary interrupter, interrupter 0 is reserved for primary */
- for (i = 1; i < xhci->max_interrupters; i++) {
- if (xhci->interrupters[i] == NULL) {
- err = xhci_add_interrupter(xhci, ir, i);
- break;
+ if (!intr_num) {
+ /* Find available secondary interrupter, interrupter 0 is reserved for primary */
+ for (i = 1; i < xhci->max_interrupters; i++) {
+ if (!xhci->interrupters[i]) {
+ xhci->interrupters[i] = ir;
+ xhci_add_interrupter(xhci, i);
+ err = 0;
+ break;
+ }
+ }
+ } else {
+ if (!xhci->interrupters[intr_num]) {
+ xhci->interrupters[intr_num] = ir;
+ xhci_add_interrupter(xhci, intr_num);
+ err = 0;
}
}
-
spin_unlock_irq(&xhci->lock);
if (err) {
@@ -2359,78 +2393,32 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
return NULL;
}
- err = xhci_set_interrupter_moderation(ir, imod_interval);
- if (err)
- xhci_warn(xhci, "Failed to set interrupter %d moderation to %uns\n",
- i, imod_interval);
+ xhci_set_interrupter_moderation(ir, imod_interval);
xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n",
- i, xhci->max_interrupters);
+ ir->intr_num, xhci->max_interrupters);
return ir;
}
EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter);
-static void xhci_hcd_page_size(struct xhci_hcd *xhci)
-{
- u32 page_size;
-
- page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK;
- if (!is_power_of_2(page_size)) {
- xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size);
- /* Fallback to 4K page size, since that's common */
- page_size = 1;
- }
-
- xhci->page_size = page_size << 12;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK",
- xhci->page_size >> 10);
-}
-
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
- struct xhci_interrupter *ir;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
dma_addr_t dma;
- unsigned int val, val2;
- u64 val_64;
- u32 temp;
- int i;
-
- INIT_LIST_HEAD(&xhci->cmd_list);
-
- /* init command timeout work */
- INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
- init_completion(&xhci->cmd_ring_stop_completion);
-
- xhci_hcd_page_size(xhci);
-
- /*
- * Program the Number of Device Slots Enabled field in the CONFIG
- * register with the max value of slots the HC can handle.
- */
- val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// xHC can handle at most %d device slots.", val);
- val2 = readl(&xhci->op_regs->config_reg);
- val |= (val2 & ~HCS_SLOTS_MASK);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Setting Max device slots reg = 0x%x.", val);
- writel(val, &xhci->op_regs->config_reg);
/*
* xHCI section 5.4.6 - Device Context array must be
* "physically contiguous and 64-byte (cache line) aligned".
*/
- xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
- flags);
+ xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, flags);
if (!xhci->dcbaa)
goto fail;
+
xhci->dcbaa->dma = dma;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Device context base array address = 0x%pad (DMA), %p (virt)",
- &xhci->dcbaa->dma, xhci->dcbaa);
- xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
+ "Device context base array address = 0x%pad (DMA), %p (virt)",
+ &xhci->dcbaa->dma, xhci->dcbaa);
/*
* Initialize the ring segment pool. The ring must be a contiguous
@@ -2446,92 +2434,77 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
else
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
+ if (!xhci->segment_pool)
+ goto fail;
/* See Table 46 and Note on Figure 55 */
- xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
- 2112, 64, xhci->page_size);
- if (!xhci->segment_pool || !xhci->device_pool)
+ xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 2112, 64,
+ xhci->page_size);
+ if (!xhci->device_pool)
goto fail;
- /* Linear stream context arrays don't have any boundary restrictions,
+ /*
+ * Linear stream context arrays don't have any boundary restrictions,
* and only need to be 16-byte aligned.
*/
- xhci->small_streams_pool =
- dma_pool_create("xHCI 256 byte stream ctx arrays",
- dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
- xhci->medium_streams_pool =
- dma_pool_create("xHCI 1KB stream ctx arrays",
- dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
- /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
- * will be allocated with dma_alloc_coherent()
+ xhci->small_streams_pool = dma_pool_create("xHCI 256 byte stream ctx arrays",
+ dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
+ if (!xhci->small_streams_pool)
+ goto fail;
+
+ /*
+ * Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE will be
+ * allocated with dma_alloc_coherent().
*/
- if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
+ xhci->medium_streams_pool = dma_pool_create("xHCI 1KB stream ctx arrays",
+ dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
+ if (!xhci->medium_streams_pool)
+ goto fail;
+
+ /*
+ * refer to xhci rev1_2 protocol 5.3.3 max ports is 255.
+ * refer to xhci rev1_2 protocol 6.4.3.14 port bandwidth buffer need
+ * to be 16-byte aligned.
+ */
+ xhci->port_bw_pool = dma_pool_create("xHCI 256 port bw ctx arrays",
+ dev, GET_PORT_BW_ARRAY_SIZE, 16, 0);
+ if (!xhci->port_bw_pool)
goto fail;
/* Set up the command ring to have one segments for now. */
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags);
if (!xhci->cmd_ring)
goto fail;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Allocated command ring at %p", xhci->cmd_ring);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%pad",
- &xhci->cmd_ring->first_seg->dma);
- /* Set the address in the Command Ring Control register */
- val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
- val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
- (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
- xhci->cmd_ring->cycle_state;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Setting command ring address to 0x%016llx", val_64);
- xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Allocated command ring at %p", xhci->cmd_ring);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%pad",
+ &xhci->cmd_ring->first_seg->dma);
- /* Reserve one command ring TRB for disabling LPM.
+ /*
+ * Reserve one command ring TRB for disabling LPM.
* Since the USB core grabs the shared usb_bus bandwidth mutex before
* disabling LPM, we only need to reserve one TRB for all devices.
*/
xhci->cmd_ring_reserved_trbs++;
- val = readl(&xhci->cap_regs->db_off);
- val &= DBOFF_MASK;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Doorbell array is located at offset 0x%x from cap regs base addr",
- val);
- xhci->dba = (void __iomem *) xhci->cap_regs + val;
-
/* Allocate and set up primary interrupter 0 with an event ring. */
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Allocating primary event ring");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Allocating primary event ring");
xhci->interrupters = kcalloc_node(xhci->max_interrupters, sizeof(*xhci->interrupters),
flags, dev_to_node(dev));
-
- ir = xhci_alloc_interrupter(xhci, 0, flags);
- if (!ir)
+ if (!xhci->interrupters)
goto fail;
- if (xhci_add_interrupter(xhci, ir, 0))
+ xhci->interrupters[0] = xhci_alloc_interrupter(xhci, 0, flags);
+ if (!xhci->interrupters[0])
goto fail;
- ir->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
-
- for (i = 0; i < MAX_HC_SLOTS; i++)
- xhci->devs[i] = NULL;
-
if (scratchpad_alloc(xhci, flags))
goto fail;
+
if (xhci_setup_port_arrays(xhci, flags))
goto fail;
- /* Enable USB 3.0 device notifications for function remote wake, which
- * is necessary for allowing USB 3.0 devices to do remote wakeup from
- * U3 (device suspend).
- */
- temp = readl(&xhci->op_regs->dev_notification);
- temp &= ~DEV_NOTE_MASK;
- temp |= DEV_NOTE_FWAKE;
- writel(temp, &xhci->op_regs->dev_notification);
-
return 0;
fail:
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 3155e3a842da..6dab142e7278 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -267,6 +267,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
device_property_read_u32(tmpdev, "imod-interval-ns",
&xhci->imod_interval);
+ device_property_read_u16(tmpdev, "num-hc-interrupters",
+ &xhci->max_interrupters);
}
/*
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 423bf3649570..e038ad3375dc 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1898,6 +1898,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
case TRB_NEC_GET_FW:
xhci_handle_cmd_nec_get_fw(xhci, event);
break;
+ case TRB_GET_BW:
+ break;
default:
/* Skip over unknown commands on the event ring */
xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
@@ -2977,9 +2979,6 @@ debug_finding_td:
(unsigned long long)xhci_trb_virt_to_dma(td->start_seg, td->start_trb),
(unsigned long long)xhci_trb_virt_to_dma(td->end_seg, td->end_trb));
- xhci_for_each_ring_seg(ep_ring->first_seg, ep_seg)
- xhci_warn(xhci, "Ring seg %u dma %pad\n", ep_seg->num, &ep_seg->dma);
-
return -ESHUTDOWN;
err_out:
@@ -3050,9 +3049,9 @@ static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter
* - When all events have finished
* - To avoid "Event Ring Full Error" condition
*/
-static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
- struct xhci_interrupter *ir,
- bool clear_ehb)
+void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
+ struct xhci_interrupter *ir,
+ bool clear_ehb)
{
u64 temp_64;
dma_addr_t deq;
@@ -3083,11 +3082,14 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
static void xhci_clear_interrupt_pending(struct xhci_interrupter *ir)
{
if (!ir->ip_autoclear) {
- u32 irq_pending;
+ u32 iman;
+
+ iman = readl(&ir->ir_set->iman);
+ iman |= IMAN_IP;
+ writel(iman, &ir->ir_set->iman);
- irq_pending = readl(&ir->ir_set->irq_pending);
- irq_pending |= IMAN_IP;
- writel(irq_pending, &ir->ir_set->irq_pending);
+ /* Read operation to guarantee the write has been flushed from posted buffers */
+ readl(&ir->ir_set->iman);
}
}
@@ -3095,10 +3097,11 @@ static void xhci_clear_interrupt_pending(struct xhci_interrupter *ir)
* Handle all OS-owned events on an interrupter event ring. It may drop
* and reaquire xhci->lock between event processing.
*/
-static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
+static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
+ bool skip_events)
{
int event_loop = 0;
- int err;
+ int err = 0;
u64 temp;
xhci_clear_interrupt_pending(ir);
@@ -3121,7 +3124,8 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir
/* Process all OS owned event TRBs on this event ring */
while (unhandled_event_trb(ir->event_ring)) {
- err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue);
+ if (!skip_events)
+ err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue);
/*
* If half a segment of events have been handled in one go then
@@ -3149,6 +3153,37 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir
}
/*
+ * Move the event ring dequeue pointer to skip events kept in the secondary
+ * event ring. This is used to ensure that pending events in the ring are
+ * acknowledged, so the xHCI HCD can properly enter suspend/resume. The
+ * secondary ring is typically maintained by an external component.
+ */
+void xhci_skip_sec_intr_events(struct xhci_hcd *xhci,
+ struct xhci_ring *ring, struct xhci_interrupter *ir)
+{
+ union xhci_trb *current_trb;
+ u64 erdp_reg;
+ dma_addr_t deq;
+
+ /* disable irq, ack pending interrupt and ack all pending events */
+ xhci_disable_interrupter(xhci, ir);
+
+ /* last acked event trb is in erdp reg */
+ erdp_reg = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
+ deq = (dma_addr_t)(erdp_reg & ERST_PTR_MASK);
+ if (!deq) {
+ xhci_err(xhci, "event ring handling not required\n");
+ return;
+ }
+
+ current_trb = ir->event_ring->dequeue;
+ /* read cycle state of the last acked trb to find out CCS */
+ ring->cycle_state = le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE;
+
+ xhci_handle_events(xhci, ir, true);
+}
+
+/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
* indicators of an event TRB error, but we check the status *first* to be safe.
@@ -3192,7 +3227,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
writel(status, &xhci->op_regs->status);
/* This is the handler of the primary interrupter */
- xhci_handle_events(xhci, xhci->interrupters[0]);
+ xhci_handle_events(xhci, xhci->interrupters[0], false);
out:
spin_unlock(&xhci->lock);
@@ -4414,6 +4449,17 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
command_must_succeed);
}
+/* Queue a get root hub port bandwidth command TRB */
+int xhci_queue_get_port_bw(struct xhci_hcd *xhci,
+ struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
+ u8 dev_speed, bool command_must_succeed)
+{
+ return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
+ upper_32_bits(in_ctx_ptr), 0,
+ TRB_TYPE(TRB_GET_BW) | DEV_SPEED_FOR_TRB(dev_speed),
+ command_must_succeed);
+}
+
/* Queue an evaluate context command TRB */
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
diff --git a/drivers/usb/host/xhci-sideband.c b/drivers/usb/host/xhci-sideband.c
new file mode 100644
index 000000000000..d49f9886dd84
--- /dev/null
+++ b/drivers/usb/host/xhci-sideband.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * xHCI host controller sideband support
+ *
+ * Copyright (c) 2023-2025, Intel Corporation.
+ *
+ * Author: Mathias Nyman
+ */
+
+#include <linux/usb/xhci-sideband.h>
+#include <linux/dma-direct.h>
+
+#include "xhci.h"
+
+/* sideband internal helpers */
+static struct sg_table *
+xhci_ring_to_sgtable(struct xhci_sideband *sb, struct xhci_ring *ring)
+{
+ struct xhci_segment *seg;
+ struct sg_table *sgt;
+ unsigned int n_pages;
+ struct page **pages;
+ struct device *dev;
+ size_t sz;
+ int i;
+
+ dev = xhci_to_hcd(sb->xhci)->self.sysdev;
+ sz = ring->num_segs * TRB_SEGMENT_SIZE;
+ n_pages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
+ pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return NULL;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ kvfree(pages);
+ return NULL;
+ }
+
+ seg = ring->first_seg;
+ if (!seg)
+ goto err;
+ /*
+ * Rings can potentially have multiple segments, create an array that
+ * carries page references to allocated segments. Utilize the
+ * sg_alloc_table_from_pages() to create the sg table, and to ensure
+ * that page links are created.
+ */
+ for (i = 0; i < ring->num_segs; i++) {
+ dma_get_sgtable(dev, sgt, seg->trbs, seg->dma,
+ TRB_SEGMENT_SIZE);
+ pages[i] = sg_page(sgt->sgl);
+ sg_free_table(sgt);
+ seg = seg->next;
+ }
+
+ if (sg_alloc_table_from_pages(sgt, pages, n_pages, 0, sz, GFP_KERNEL))
+ goto err;
+
+ /*
+ * Save first segment dma address to sg dma_address field for the sideband
+ * client to have access to the IOVA of the ring.
+ */
+ sg_dma_address(sgt->sgl) = ring->first_seg->dma;
+
+ return sgt;
+
+err:
+ kvfree(pages);
+ kfree(sgt);
+
+ return NULL;
+}
+
+static void
+__xhci_sideband_remove_endpoint(struct xhci_sideband *sb, struct xhci_virt_ep *ep)
+{
+ /*
+ * Issue a stop endpoint command when an endpoint is removed.
+ * The stop ep cmd handler will handle the ring cleanup.
+ */
+ xhci_stop_endpoint_sync(sb->xhci, ep, 0, GFP_KERNEL);
+
+ ep->sideband = NULL;
+ sb->eps[ep->ep_index] = NULL;
+}
+
+/* sideband api functions */
+
+/**
+ * xhci_sideband_notify_ep_ring_free - notify client of xfer ring free
+ * @sb: sideband instance for this usb device
+ * @ep_index: usb endpoint index
+ *
+ * Notifies the xHCI sideband client driver of a xHCI transfer ring free
+ * routine. This will allow for the client to ensure that all transfers
+ * are completed.
+ *
+ * The callback should be synchronous, as the ring free happens after.
+ */
+void xhci_sideband_notify_ep_ring_free(struct xhci_sideband *sb,
+ unsigned int ep_index)
+{
+ struct xhci_sideband_event evt;
+
+ evt.type = XHCI_SIDEBAND_XFER_RING_FREE;
+ evt.evt_data = &ep_index;
+
+ if (sb->notify_client)
+ sb->notify_client(sb->intf, &evt);
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_notify_ep_ring_free);
+
+/**
+ * xhci_sideband_add_endpoint - add endpoint to sideband access list
+ * @sb: sideband instance for this usb device
+ * @host_ep: usb host endpoint
+ *
+ * Adds an endpoint to the list of sideband accessed endpoints for this usb
+ * device.
+ * After an endpoint is added the sideband client can get the endpoint transfer
+ * ring buffer by calling xhci_sideband_endpoint_buffer()
+ *
+ * Return: 0 on success, negative error otherwise.
+ */
+int
+xhci_sideband_add_endpoint(struct xhci_sideband *sb,
+ struct usb_host_endpoint *host_ep)
+{
+ struct xhci_virt_ep *ep;
+ unsigned int ep_index;
+
+ mutex_lock(&sb->mutex);
+ ep_index = xhci_get_endpoint_index(&host_ep->desc);
+ ep = &sb->vdev->eps[ep_index];
+
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ mutex_unlock(&sb->mutex);
+ return -EINVAL;
+ }
+
+ /*
+ * Note, we don't know the DMA mask of the audio DSP device, if its
+ * smaller than for xhci it won't be able to access the endpoint ring
+ * buffer. This could be solved by not allowing the audio class driver
+ * to add the endpoint the normal way, but instead offload it immediately,
+ * and let this function add the endpoint and allocate the ring buffer
+ * with the smallest common DMA mask
+ */
+ if (sb->eps[ep_index] || ep->sideband) {
+ mutex_unlock(&sb->mutex);
+ return -EBUSY;
+ }
+
+ ep->sideband = sb;
+ sb->eps[ep_index] = ep;
+ mutex_unlock(&sb->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_add_endpoint);
+
+/**
+ * xhci_sideband_remove_endpoint - remove endpoint from sideband access list
+ * @sb: sideband instance for this usb device
+ * @host_ep: usb host endpoint
+ *
+ * Removes an endpoint from the list of sideband accessed endpoints for this usb
+ * device.
+ * sideband client should no longer touch the endpoint transfer buffer after
+ * calling this.
+ *
+ * Return: 0 on success, negative error otherwise.
+ */
+int
+xhci_sideband_remove_endpoint(struct xhci_sideband *sb,
+ struct usb_host_endpoint *host_ep)
+{
+ struct xhci_virt_ep *ep;
+ unsigned int ep_index;
+
+ mutex_lock(&sb->mutex);
+ ep_index = xhci_get_endpoint_index(&host_ep->desc);
+ ep = sb->eps[ep_index];
+
+ if (!ep || !ep->sideband || ep->sideband != sb) {
+ mutex_unlock(&sb->mutex);
+ return -ENODEV;
+ }
+
+ __xhci_sideband_remove_endpoint(sb, ep);
+ xhci_initialize_ring_info(ep->ring);
+ mutex_unlock(&sb->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_remove_endpoint);
+
+int
+xhci_sideband_stop_endpoint(struct xhci_sideband *sb,
+ struct usb_host_endpoint *host_ep)
+{
+ struct xhci_virt_ep *ep;
+ unsigned int ep_index;
+
+ ep_index = xhci_get_endpoint_index(&host_ep->desc);
+ ep = sb->eps[ep_index];
+
+ if (!ep || !ep->sideband || ep->sideband != sb)
+ return -EINVAL;
+
+ return xhci_stop_endpoint_sync(sb->xhci, ep, 0, GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_stop_endpoint);
+
+/**
+ * xhci_sideband_get_endpoint_buffer - gets the endpoint transfer buffer address
+ * @sb: sideband instance for this usb device
+ * @host_ep: usb host endpoint
+ *
+ * Returns the address of the endpoint buffer where xHC controller reads queued
+ * transfer TRBs from. This is the starting address of the ringbuffer where the
+ * sideband client should write TRBs to.
+ *
+ * Caller needs to free the returned sg_table
+ *
+ * Return: struct sg_table * if successful. NULL otherwise.
+ */
+struct sg_table *
+xhci_sideband_get_endpoint_buffer(struct xhci_sideband *sb,
+ struct usb_host_endpoint *host_ep)
+{
+ struct xhci_virt_ep *ep;
+ unsigned int ep_index;
+
+ ep_index = xhci_get_endpoint_index(&host_ep->desc);
+ ep = sb->eps[ep_index];
+
+ if (!ep || !ep->ring || !ep->sideband || ep->sideband != sb)
+ return NULL;
+
+ return xhci_ring_to_sgtable(sb, ep->ring);
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_get_endpoint_buffer);
+
+/**
+ * xhci_sideband_get_event_buffer - return the event buffer for this device
+ * @sb: sideband instance for this usb device
+ *
+ * If a secondary xhci interupter is set up for this usb device then this
+ * function returns the address of the event buffer where xHC writes
+ * the transfer completion events.
+ *
+ * Caller needs to free the returned sg_table
+ *
+ * Return: struct sg_table * if successful. NULL otherwise.
+ */
+struct sg_table *
+xhci_sideband_get_event_buffer(struct xhci_sideband *sb)
+{
+ if (!sb || !sb->ir)
+ return NULL;
+
+ return xhci_ring_to_sgtable(sb, sb->ir->event_ring);
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_get_event_buffer);
+
+/**
+ * xhci_sideband_create_interrupter - creates a new interrupter for this sideband
+ * @sb: sideband instance for this usb device
+ * @num_seg: number of event ring segments to allocate
+ * @ip_autoclear: IP autoclearing support such as MSI implemented
+ *
+ * Sets up a xhci interrupter that can be used for this sideband accessed usb
+ * device. Transfer events for this device can be routed to this interrupters
+ * event ring by setting the 'Interrupter Target' field correctly when queueing
+ * the transfer TRBs.
+ * Once this interrupter is created the interrupter target ID can be obtained
+ * by calling xhci_sideband_interrupter_id()
+ *
+ * Returns 0 on success, negative error otherwise
+ */
+int
+xhci_sideband_create_interrupter(struct xhci_sideband *sb, int num_seg,
+ bool ip_autoclear, u32 imod_interval, int intr_num)
+{
+ int ret = 0;
+
+ if (!sb || !sb->xhci)
+ return -ENODEV;
+
+ mutex_lock(&sb->mutex);
+ if (sb->ir) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ sb->ir = xhci_create_secondary_interrupter(xhci_to_hcd(sb->xhci),
+ num_seg, imod_interval,
+ intr_num);
+ if (!sb->ir) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ sb->ir->ip_autoclear = ip_autoclear;
+
+out:
+ mutex_unlock(&sb->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_create_interrupter);
+
+/**
+ * xhci_sideband_remove_interrupter - remove the interrupter from a sideband
+ * @sb: sideband instance for this usb device
+ *
+ * Removes a registered interrupt for a sideband. This would allow for other
+ * sideband users to utilize this interrupter.
+ */
+void
+xhci_sideband_remove_interrupter(struct xhci_sideband *sb)
+{
+ if (!sb || !sb->ir)
+ return;
+
+ mutex_lock(&sb->mutex);
+ xhci_remove_secondary_interrupter(xhci_to_hcd(sb->xhci), sb->ir);
+
+ sb->ir = NULL;
+ mutex_unlock(&sb->mutex);
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_remove_interrupter);
+
+/**
+ * xhci_sideband_interrupter_id - return the interrupter target id
+ * @sb: sideband instance for this usb device
+ *
+ * If a secondary xhci interrupter is set up for this usb device then this
+ * function returns the ID used by the interrupter. The sideband client
+ * needs to write this ID to the 'Interrupter Target' field of the transfer TRBs
+ * it queues on the endpoints transfer ring to ensure transfer completion event
+ * are written by xHC to the correct interrupter event ring.
+ *
+ * Returns interrupter id on success, negative error othgerwise
+ */
+int
+xhci_sideband_interrupter_id(struct xhci_sideband *sb)
+{
+ if (!sb || !sb->ir)
+ return -ENODEV;
+
+ return sb->ir->intr_num;
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_interrupter_id);
+
+/**
+ * xhci_sideband_register - register a sideband for a usb device
+ * @intf: usb interface associated with the sideband device
+ *
+ * Allows for clients to utilize XHCI interrupters and fetch transfer and event
+ * ring parameters for executing data transfers.
+ *
+ * Return: pointer to a new xhci_sideband instance if successful. NULL otherwise.
+ */
+struct xhci_sideband *
+xhci_sideband_register(struct usb_interface *intf, enum xhci_sideband_type type,
+ int (*notify_client)(struct usb_interface *intf,
+ struct xhci_sideband_event *evt))
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_virt_device *vdev;
+ struct xhci_sideband *sb;
+
+ /*
+ * Make sure the usb device is connected to a xhci controller. Fail
+ * registration if the type is anything other than XHCI_SIDEBAND_VENDOR,
+ * as this is the only type that is currently supported by xhci-sideband.
+ */
+ if (!udev->slot_id || type != XHCI_SIDEBAND_VENDOR)
+ return NULL;
+
+ sb = kzalloc_node(sizeof(*sb), GFP_KERNEL, dev_to_node(hcd->self.sysdev));
+ if (!sb)
+ return NULL;
+
+ mutex_init(&sb->mutex);
+
+ /* check this device isn't already controlled via sideband */
+ spin_lock_irq(&xhci->lock);
+
+ vdev = xhci->devs[udev->slot_id];
+
+ if (!vdev || vdev->sideband) {
+ xhci_warn(xhci, "XHCI sideband for slot %d already in use\n",
+ udev->slot_id);
+ spin_unlock_irq(&xhci->lock);
+ kfree(sb);
+ return NULL;
+ }
+
+ sb->xhci = xhci;
+ sb->vdev = vdev;
+ sb->intf = intf;
+ sb->type = type;
+ sb->notify_client = notify_client;
+ vdev->sideband = sb;
+
+ spin_unlock_irq(&xhci->lock);
+
+ return sb;
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_register);
+
+/**
+ * xhci_sideband_unregister - unregister sideband access to a usb device
+ * @sb: sideband instance to be unregistered
+ *
+ * Unregisters sideband access to a usb device and frees the sideband
+ * instance.
+ * After this the endpoint and interrupter event buffers should no longer
+ * be accessed via sideband. The xhci driver can now take over handling
+ * the buffers.
+ */
+void
+xhci_sideband_unregister(struct xhci_sideband *sb)
+{
+ struct xhci_hcd *xhci;
+ int i;
+
+ if (!sb)
+ return;
+
+ xhci = sb->xhci;
+
+ mutex_lock(&sb->mutex);
+ for (i = 0; i < EP_CTX_PER_DEV; i++)
+ if (sb->eps[i])
+ __xhci_sideband_remove_endpoint(sb, sb->eps[i]);
+ mutex_unlock(&sb->mutex);
+
+ xhci_sideband_remove_interrupter(sb);
+
+ spin_lock_irq(&xhci->lock);
+ sb->xhci = NULL;
+ sb->vdev->sideband = NULL;
+ spin_unlock_irq(&xhci->lock);
+
+ kfree(sb);
+}
+EXPORT_SYMBOL_GPL(xhci_sideband_unregister);
+MODULE_DESCRIPTION("xHCI sideband driver for secondary interrupter management");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 90eb491267b5..4e6dbd2375c3 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -20,6 +20,7 @@
#include <linux/string_choices.h>
#include <linux/dmi.h>
#include <linux/dma-mapping.h>
+#include <linux/usb/xhci-sideband.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -329,21 +330,29 @@ int xhci_enable_interrupter(struct xhci_interrupter *ir)
if (!ir || !ir->ir_set)
return -EINVAL;
- iman = readl(&ir->ir_set->irq_pending);
- writel(ER_IRQ_ENABLE(iman), &ir->ir_set->irq_pending);
+ iman = readl(&ir->ir_set->iman);
+ iman |= IMAN_IE;
+ writel(iman, &ir->ir_set->iman);
+ /* Read operation to guarantee the write has been flushed from posted buffers */
+ readl(&ir->ir_set->iman);
return 0;
}
-int xhci_disable_interrupter(struct xhci_interrupter *ir)
+int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
u32 iman;
if (!ir || !ir->ir_set)
return -EINVAL;
- iman = readl(&ir->ir_set->irq_pending);
- writel(ER_IRQ_DISABLE(iman), &ir->ir_set->irq_pending);
+ iman = readl(&ir->ir_set->iman);
+ iman &= ~IMAN_IE;
+ writel(iman, &ir->ir_set->iman);
+
+ iman = readl(&ir->ir_set->iman);
+ if (iman & IMAN_IP)
+ xhci_dbg(xhci, "%s: Interrupt pending\n", __func__);
return 0;
}
@@ -354,13 +363,16 @@ int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
{
u32 imod;
- if (!ir || !ir->ir_set || imod_interval > U16_MAX * 250)
+ if (!ir || !ir->ir_set)
return -EINVAL;
- imod = readl(&ir->ir_set->irq_control);
- imod &= ~ER_IRQ_INTERVAL_MASK;
- imod |= (imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
- writel(imod, &ir->ir_set->irq_control);
+ /* IMODI value in IMOD register is in 250ns increments */
+ imod_interval = umin(imod_interval / 250, IMODI_MASK);
+
+ imod = readl(&ir->ir_set->imod);
+ imod &= ~IMODI_MASK;
+ imod |= imod_interval;
+ writel(imod, &ir->ir_set->imod);
return 0;
}
@@ -373,7 +385,7 @@ static void compliance_mode_recovery(struct timer_list *t)
u32 temp;
int i;
- xhci = from_timer(xhci, t, comp_mode_recovery_timer);
+ xhci = timer_container_of(xhci, t, comp_mode_recovery_timer);
rhub = &xhci->usb3_rhub;
hcd = rhub->hcd;
@@ -460,6 +472,82 @@ static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
}
+static void xhci_hcd_page_size(struct xhci_hcd *xhci)
+{
+ u32 page_size;
+
+ page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK;
+ if (!is_power_of_2(page_size)) {
+ xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size);
+ /* Fallback to 4K page size, since that's common */
+ page_size = 1;
+ }
+
+ xhci->page_size = page_size << 12;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK",
+ xhci->page_size >> 10);
+}
+
+static void xhci_enable_max_dev_slots(struct xhci_hcd *xhci)
+{
+ u32 config_reg;
+ u32 max_slots;
+
+ max_slots = HCS_MAX_SLOTS(xhci->hcs_params1);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xHC can handle at most %d device slots",
+ max_slots);
+
+ config_reg = readl(&xhci->op_regs->config_reg);
+ config_reg &= ~HCS_SLOTS_MASK;
+ config_reg |= max_slots;
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting Max device slots reg = 0x%x",
+ config_reg);
+ writel(config_reg, &xhci->op_regs->config_reg);
+}
+
+static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
+{
+ dma_addr_t deq_dma;
+ u64 crcr;
+
+ deq_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, xhci->cmd_ring->dequeue);
+ deq_dma &= CMD_RING_PTR_MASK;
+
+ crcr = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ crcr &= ~CMD_RING_PTR_MASK;
+ crcr |= deq_dma;
+
+ crcr &= ~CMD_RING_CYCLE;
+ crcr |= xhci->cmd_ring->cycle_state;
+
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting command ring address to 0x%llx", crcr);
+ xhci_write_64(xhci, crcr, &xhci->op_regs->cmd_ring);
+}
+
+static void xhci_set_doorbell_ptr(struct xhci_hcd *xhci)
+{
+ u32 offset;
+
+ offset = readl(&xhci->cap_regs->db_off) & DBOFF_MASK;
+ xhci->dba = (void __iomem *)xhci->cap_regs + offset;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Doorbell array is located at offset 0x%x from cap regs base addr", offset);
+}
+
+/*
+ * Enable USB 3.0 device notifications for function remote wake, which is necessary
+ * for allowing USB 3.0 devices to do remote wakeup from U3 (device suspend).
+ */
+static void xhci_set_dev_notifications(struct xhci_hcd *xhci)
+{
+ u32 dev_notf;
+
+ dev_notf = readl(&xhci->op_regs->dev_notification);
+ dev_notf &= ~DEV_NOTE_MASK;
+ dev_notf |= DEV_NOTE_FWAKE;
+ writel(dev_notf, &xhci->op_regs->dev_notification);
+}
/*
* Initialize memory for HCD and xHC (one-time init).
@@ -473,11 +561,37 @@ static int xhci_init(struct usb_hcd *hcd)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int retval;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Starting %s", __func__);
spin_lock_init(&xhci->lock);
+ INIT_LIST_HEAD(&xhci->cmd_list);
+ INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
+ init_completion(&xhci->cmd_ring_stop_completion);
+ xhci_hcd_page_size(xhci);
+ memset(xhci->devs, 0, MAX_HC_SLOTS * sizeof(*xhci->devs));
+
retval = xhci_mem_init(xhci, GFP_KERNEL);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
+ if (retval)
+ return retval;
+
+ /* Set the Number of Device Slots Enabled to the maximum supported value */
+ xhci_enable_max_dev_slots(xhci);
+
+ /* Set the address in the Command Ring Control register */
+ xhci_set_cmd_ring_deq(xhci);
+
+ /* Set Device Context Base Address Array pointer */
+ xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr);
+
+ /* Set Doorbell array pointer */
+ xhci_set_doorbell_ptr(xhci);
+
+ /* Set USB 3.0 device notifications for function remote wake */
+ xhci_set_dev_notifications(xhci);
+
+ /* Initialize the Primary interrupter */
+ xhci_add_interrupter(xhci, 0);
+ xhci->interrupters[0]->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
/* Initializing Compliance Mode Recovery Data If Needed */
if (xhci_compliance_mode_recovery_timer_quirk_check()) {
@@ -485,7 +599,8 @@ static int xhci_init(struct usb_hcd *hcd)
compliance_mode_recovery_timer_init(xhci);
}
- return retval;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished %s", __func__);
+ return 0;
}
/*-------------------------------------------------------------------------*/
@@ -640,7 +755,7 @@ void xhci_stop(struct usb_hcd *hcd)
"// Disabling event ring interrupts");
temp = readl(&xhci->op_regs->status);
writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
- xhci_disable_interrupter(ir);
+ xhci_disable_interrupter(xhci, ir);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
xhci_mem_cleanup(xhci);
@@ -719,8 +834,8 @@ static void xhci_save_registers(struct xhci_hcd *xhci)
ir->s3_erst_size = readl(&ir->ir_set->erst_size);
ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
- ir->s3_irq_control = readl(&ir->ir_set->irq_control);
+ ir->s3_iman = readl(&ir->ir_set->iman);
+ ir->s3_imod = readl(&ir->ir_set->imod);
}
}
@@ -743,28 +858,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci)
writel(ir->s3_erst_size, &ir->ir_set->erst_size);
xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
- writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
- writel(ir->s3_irq_control, &ir->ir_set->irq_control);
+ writel(ir->s3_iman, &ir->ir_set->iman);
+ writel(ir->s3_imod, &ir->ir_set->imod);
}
}
-static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
-{
- u64 val_64;
-
- /* step 2: initialize command ring buffer */
- val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
- val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
- (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
- xhci->cmd_ring->dequeue) &
- (u64) ~CMD_RING_RSVD_BITS) |
- xhci->cmd_ring->cycle_state;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Setting command ring address to 0x%llx",
- (long unsigned long) val_64);
- xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
-}
-
/*
* The whole command ring must be cleared to zero when we suspend the host.
*
@@ -1092,7 +1190,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume)
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
temp = readl(&xhci->op_regs->status);
writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
- xhci_disable_interrupter(xhci->interrupters[0]);
+ xhci_disable_interrupter(xhci, xhci->interrupters[0]);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -1359,6 +1457,7 @@ static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
* xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
* HCDs. Find the index for an endpoint given its descriptor. Use the return
* value to right shift 1 for the bitmask.
+ * @desc: USB endpoint descriptor to determine index for
*
* Index = (epnum * 2) + direction - 1,
* where direction = 0 for OUT, 1 for IN.
@@ -3089,6 +3188,42 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
}
EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
+/* Get the available bandwidth of the ports under the xhci roothub */
+int xhci_get_port_bandwidth(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
+ u8 dev_speed)
+{
+ struct xhci_command *cmd;
+ unsigned long flags;
+ int ret;
+
+ if (!ctx || !xhci)
+ return -EINVAL;
+
+ cmd = xhci_alloc_command(xhci, true, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->in_ctx = ctx;
+
+ /* get xhci port bandwidth, refer to xhci rev1_2 protocol 4.6.15 */
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ ret = xhci_queue_get_port_bw(xhci, cmd, ctx->dma, dev_speed, 0);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ goto err_out;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ wait_for_completion(cmd->completion);
+err_out:
+ kfree(cmd->completion);
+ kfree(cmd);
+
+ return ret;
+}
+
static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx,
@@ -3911,6 +4046,8 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
}
if (ep->ring) {
+ if (ep->sideband)
+ xhci_sideband_notify_ep_ring_free(ep->sideband, i);
xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
xhci_free_endpoint_ring(xhci, virt_dev, i);
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 242ab9fbc8ae..49887a303e43 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -152,10 +152,6 @@ struct xhci_op_regs {
#define XHCI_RESET_LONG_USEC (10 * 1000 * 1000)
#define XHCI_RESET_SHORT_USEC (250 * 1000)
-/* IMAN - Interrupt Management Register */
-#define IMAN_IE (1 << 1)
-#define IMAN_IP (1 << 0)
-
/* USBSTS - USB status - status bitmasks */
/* HC not running - set to 1 when run/stop bit is cleared. */
#define STS_HALT XHCI_STS_HALT
@@ -184,23 +180,22 @@ struct xhci_op_regs {
* notification type that matches a bit set in this bit field.
*/
#define DEV_NOTE_MASK (0xffff)
-#define ENABLE_DEV_NOTE(x) (1 << (x))
/* Most of the device notification types should only be used for debug.
* SW does need to pay attention to function wake notifications.
*/
-#define DEV_NOTE_FWAKE ENABLE_DEV_NOTE(1)
+#define DEV_NOTE_FWAKE (1 << 1)
/* CRCR - Command Ring Control Register - cmd_ring bitmasks */
-/* bit 0 is the command ring cycle state */
+/* bit 0 - Cycle bit indicates the ownership of the command ring */
+#define CMD_RING_CYCLE (1 << 0)
/* stop ring operation after completion of the currently executing command */
#define CMD_RING_PAUSE (1 << 1)
/* stop ring immediately - abort the currently executing command */
#define CMD_RING_ABORT (1 << 2)
/* true: command ring is running */
#define CMD_RING_RUNNING (1 << 3)
-/* bits 4:5 reserved and should be preserved */
-/* Command Ring pointer - bit mask for the lower 32 bits. */
-#define CMD_RING_RSVD_BITS (0x3f)
+/* bits 63:6 - Command Ring pointer */
+#define CMD_RING_PTR_MASK GENMASK_ULL(63, 6)
/* CONFIG - Configure Register - config_reg bitmasks */
/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
@@ -215,14 +210,13 @@ struct xhci_op_regs {
#define XHCI_PAGE_SIZE_MASK 0xffff
/**
- * struct xhci_intr_reg - Interrupt Register Set
- * @irq_pending: IMAN - Interrupt Management Register. Used to enable
+ * struct xhci_intr_reg - Interrupt Register Set, v1.2 section 5.5.2.
+ * @iman: IMAN - Interrupt Management Register. Used to enable
* interrupts and check for pending interrupts.
- * @irq_control: IMOD - Interrupt Moderation Register.
- * Used to throttle interrupts.
- * @erst_size: Number of segments in the Event Ring Segment Table (ERST).
- * @erst_base: ERST base address.
- * @erst_dequeue: Event ring dequeue pointer.
+ * @imod: IMOD - Interrupt Moderation Register. Used to throttle interrupts.
+ * @erst_size: ERSTSZ - Number of segments in the Event Ring Segment Table (ERST).
+ * @erst_base: ERSTBA - Event ring segment table base address.
+ * @erst_dequeue: ERDP - Event ring dequeue pointer.
*
* Each interrupter (defined by a MSI-X vector) has an event ring and an Event
* Ring Segment Table (ERST) associated with it. The event ring is comprised of
@@ -232,48 +226,51 @@ struct xhci_op_regs {
* updates the dequeue pointer.
*/
struct xhci_intr_reg {
- __le32 irq_pending;
- __le32 irq_control;
+ __le32 iman;
+ __le32 imod;
__le32 erst_size;
__le32 rsvd;
__le64 erst_base;
__le64 erst_dequeue;
};
-/* irq_pending bitmasks */
-#define ER_IRQ_PENDING(p) ((p) & 0x1)
-/* bits 2:31 need to be preserved */
-/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
-#define ER_IRQ_CLEAR(p) ((p) & 0xfffffffe)
-#define ER_IRQ_ENABLE(p) ((ER_IRQ_CLEAR(p)) | 0x2)
-#define ER_IRQ_DISABLE(p) ((ER_IRQ_CLEAR(p)) & ~(0x2))
-
-/* irq_control bitmasks */
-/* Minimum interval between interrupts (in 250ns intervals). The interval
- * between interrupts will be longer if there are no events on the event ring.
- * Default is 4000 (1 ms).
+/* iman bitmasks */
+/* bit 0 - Interrupt Pending (IP), whether there is an interrupt pending. Write-1-to-clear. */
+#define IMAN_IP (1 << 0)
+/* bit 1 - Interrupt Enable (IE), whether the interrupter is capable of generating an interrupt */
+#define IMAN_IE (1 << 1)
+
+/* imod bitmasks */
+/*
+ * bits 15:0 - Interrupt Moderation Interval, the minimum interval between interrupts
+ * (in 250ns intervals). The interval between interrupts will be longer if there are no
+ * events on the event ring. Default is 4000 (1 ms).
*/
-#define ER_IRQ_INTERVAL_MASK (0xffff)
-/* Counter used to count down the time to the next interrupt - HW use only */
-#define ER_IRQ_COUNTER_MASK (0xffff << 16)
+#define IMODI_MASK (0xffff)
+/* bits 31:16 - Interrupt Moderation Counter, used to count down the time to the next interrupt */
+#define IMODC_MASK (0xffff << 16)
/* erst_size bitmasks */
-/* Preserve bits 16:31 of erst_size */
-#define ERST_SIZE_MASK (0xffff << 16)
+/* bits 15:0 - Event Ring Segment Table Size, number of ERST entries */
+#define ERST_SIZE_MASK (0xffff)
/* erst_base bitmasks */
-#define ERST_BASE_RSVDP (GENMASK_ULL(5, 0))
+/* bits 63:6 - Event Ring Segment Table Base Address Register */
+#define ERST_BASE_ADDRESS_MASK GENMASK_ULL(63, 6)
/* erst_dequeue bitmasks */
-/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
- * where the current dequeue pointer lies. This is an optional HW hint.
+/*
+ * bits 2:0 - Dequeue ERST Segment Index (DESI), is the segment number (or alias) where the
+ * current dequeue pointer lies. This is an optional HW hint.
*/
#define ERST_DESI_MASK (0x7)
-/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by
+/*
+ * bit 3 - Event Handler Busy (EHB), whether the event ring is scheduled to be serviced by
* a work queue (or delayed service routine)?
*/
#define ERST_EHB (1 << 3)
-#define ERST_PTR_MASK (GENMASK_ULL(63, 4))
+/* bits 63:4 - Event Ring Dequeue Pointer */
+#define ERST_PTR_MASK GENMASK_ULL(63, 4)
/**
* struct xhci_run_regs
@@ -589,6 +586,7 @@ struct xhci_stream_info {
#define SMALL_STREAM_ARRAY_SIZE 256
#define MEDIUM_STREAM_ARRAY_SIZE 1024
+#define GET_PORT_BW_ARRAY_SIZE 256
/* Some Intel xHCI host controllers need software to keep track of the bus
* bandwidth. Keep track of endpoint info here. Each root port is allocated
@@ -700,6 +698,8 @@ struct xhci_virt_ep {
int next_frame_id;
/* Use new Isoch TRB layout needed for extended TBC support */
bool use_extended_tbc;
+ /* set if this endpoint is controlled via sideband access*/
+ struct xhci_sideband *sideband;
};
enum xhci_overhead_type {
@@ -762,6 +762,8 @@ struct xhci_virt_device {
u16 current_mel;
/* Used for the debugfs interfaces. */
void *debugfs_private;
+ /* set if this endpoint is controlled via sideband access*/
+ struct xhci_sideband *sideband;
};
/*
@@ -1002,6 +1004,9 @@ enum xhci_setup_dev {
/* bits 16:23 are the virtual function ID */
/* bits 24:31 are the slot ID */
+/* bits 19:16 are the dev speed */
+#define DEV_SPEED_FOR_TRB(p) ((p) << 16)
+
/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
#define SUSPEND_PORT_FOR_TRB(p) (((p) & 1) << 23)
#define TRB_TO_SUSPEND_PORT(p) (((p) & (1 << 23)) >> 23)
@@ -1447,8 +1452,8 @@ struct xhci_interrupter {
bool ip_autoclear;
u32 isoc_bei_interval;
/* For interrupter registers save and restore over suspend/resume */
- u32 s3_irq_pending;
- u32 s3_irq_control;
+ u32 s3_iman;
+ u32 s3_imod;
u32 s3_erst_size;
u64 s3_erst_base;
u64 s3_erst_dequeue;
@@ -1554,6 +1559,7 @@ struct xhci_hcd {
struct dma_pool *device_pool;
struct dma_pool *segment_pool;
struct dma_pool *small_streams_pool;
+ struct dma_pool *port_bw_pool;
struct dma_pool *medium_streams_pool;
/* Host controller watchdog timer structures */
@@ -1846,11 +1852,18 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
int type, gfp_t flags);
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx);
+struct xhci_container_ctx *xhci_alloc_port_bw_ctx(struct xhci_hcd *xhci,
+ gfp_t flags);
+void xhci_free_port_bw_ctx(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx);
struct xhci_interrupter *
xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
- u32 imod_interval);
+ u32 imod_interval, unsigned int intr_num);
void xhci_remove_secondary_interrupter(struct usb_hcd
*hcd, struct xhci_interrupter *ir);
+void xhci_skip_sec_intr_events(struct xhci_hcd *xhci,
+ struct xhci_ring *ring,
+ struct xhci_interrupter *ir);
/* xHCI host controller glue */
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
@@ -1891,7 +1904,7 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
u32 imod_interval);
int xhci_enable_interrupter(struct xhci_interrupter *ir);
-int xhci_disable_interrupter(struct xhci_interrupter *ir);
+int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir);
/* xHCI ring, segment, TRB, and TD functions */
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
@@ -1916,6 +1929,11 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id,
bool command_must_succeed);
+int xhci_queue_get_port_bw(struct xhci_hcd *xhci,
+ struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
+ u8 dev_speed, bool command_must_succeed);
+int xhci_get_port_bandwidth(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
+ u8 dev_speed);
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
@@ -1936,6 +1954,10 @@ unsigned int count_trbs(u64 addr, u64 len);
int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
int suspend, gfp_t gfp_flags);
void xhci_process_cancelled_tds(struct xhci_virt_ep *ep);
+void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
+ struct xhci_interrupter *ir,
+ bool clear_ehb);
+void xhci_add_interrupter(struct xhci_hcd *xhci, unsigned int intr_num);
/* xHCI roothub code */
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c
index 2af89ee28baa..65ac91d0595a 100644
--- a/drivers/usb/isp1760/isp1760-udc.c
+++ b/drivers/usb/isp1760/isp1760-udc.c
@@ -1423,7 +1423,7 @@ static irqreturn_t isp1760_udc_irq(int irq, void *dev)
static void isp1760_udc_vbus_poll(struct timer_list *t)
{
- struct isp1760_udc *udc = from_timer(udc, t, vbus_timer);
+ struct isp1760_udc *udc = timer_container_of(udc, t, vbus_timer);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
diff --git a/drivers/usb/misc/onboard_usb_dev.c b/drivers/usb/misc/onboard_usb_dev.c
index f5372dfa241a..5b481876af1b 100644
--- a/drivers/usb/misc/onboard_usb_dev.c
+++ b/drivers/usb/misc/onboard_usb_dev.c
@@ -36,9 +36,10 @@
#define USB5744_CMD_CREG_ACCESS 0x99
#define USB5744_CMD_CREG_ACCESS_LSB 0x37
#define USB5744_CREG_MEM_ADDR 0x00
+#define USB5744_CREG_MEM_RD_ADDR 0x04
#define USB5744_CREG_WRITE 0x00
-#define USB5744_CREG_RUNTIMEFLAGS2 0x41
-#define USB5744_CREG_RUNTIMEFLAGS2_LSB 0x1D
+#define USB5744_CREG_READ 0x01
+#define USB5744_CREG_RUNTIMEFLAGS2 0x411D
#define USB5744_CREG_BYPASS_UDC_SUSPEND BIT(3)
static void onboard_dev_attach_usb_driver(struct work_struct *work);
@@ -309,11 +310,88 @@ static void onboard_dev_attach_usb_driver(struct work_struct *work)
pr_err("Failed to attach USB driver: %pe\n", ERR_PTR(err));
}
+#if IS_ENABLED(CONFIG_USB_ONBOARD_DEV_USB5744)
+static int onboard_dev_5744_i2c_read_byte(struct i2c_client *client, u16 addr, u8 *data)
+{
+ struct i2c_msg msg[2];
+ u8 rd_buf[3];
+ int ret;
+
+ u8 wr_buf[7] = {0, USB5744_CREG_MEM_ADDR, 4,
+ USB5744_CREG_READ, 1,
+ addr >> 8 & 0xff,
+ addr & 0xff};
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = sizeof(wr_buf);
+ msg[0].buf = wr_buf;
+
+ ret = i2c_transfer(client->adapter, msg, 1);
+ if (ret < 0)
+ return ret;
+
+ wr_buf[0] = USB5744_CMD_CREG_ACCESS;
+ wr_buf[1] = USB5744_CMD_CREG_ACCESS_LSB;
+ wr_buf[2] = 0;
+ msg[0].len = 3;
+
+ ret = i2c_transfer(client->adapter, msg, 1);
+ if (ret < 0)
+ return ret;
+
+ wr_buf[0] = 0;
+ wr_buf[1] = USB5744_CREG_MEM_RD_ADDR;
+ msg[0].len = 2;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 2;
+ msg[1].buf = rd_buf;
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0)
+ return ret;
+ *data = rd_buf[1];
+
+ return 0;
+}
+
+static int onboard_dev_5744_i2c_write_byte(struct i2c_client *client, u16 addr, u8 data)
+{
+ struct i2c_msg msg[2];
+ int ret;
+
+ u8 wr_buf[8] = {0, USB5744_CREG_MEM_ADDR, 5,
+ USB5744_CREG_WRITE, 1,
+ addr >> 8 & 0xff,
+ addr & 0xff,
+ data};
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = sizeof(wr_buf);
+ msg[0].buf = wr_buf;
+
+ ret = i2c_transfer(client->adapter, msg, 1);
+ if (ret < 0)
+ return ret;
+
+ msg[0].len = 3;
+ wr_buf[0] = USB5744_CMD_CREG_ACCESS;
+ wr_buf[1] = USB5744_CMD_CREG_ACCESS_LSB;
+ wr_buf[2] = 0;
+
+ ret = i2c_transfer(client->adapter, msg, 1);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int onboard_dev_5744_i2c_init(struct i2c_client *client)
{
-#if IS_ENABLED(CONFIG_USB_ONBOARD_DEV_USB5744)
struct device *dev = &client->dev;
int ret;
+ u8 reg;
/*
* Set BYPASS_UDC_SUSPEND bit to ensure MCU is always enabled
@@ -321,20 +399,16 @@ static int onboard_dev_5744_i2c_init(struct i2c_client *client)
* The command writes 5 bytes to memory and single data byte in
* configuration register.
*/
- char wr_buf[7] = {USB5744_CREG_MEM_ADDR, 5,
- USB5744_CREG_WRITE, 1,
- USB5744_CREG_RUNTIMEFLAGS2,
- USB5744_CREG_RUNTIMEFLAGS2_LSB,
- USB5744_CREG_BYPASS_UDC_SUSPEND};
-
- ret = i2c_smbus_write_block_data(client, 0, sizeof(wr_buf), wr_buf);
+ ret = onboard_dev_5744_i2c_read_byte(client,
+ USB5744_CREG_RUNTIMEFLAGS2, &reg);
if (ret)
- return dev_err_probe(dev, ret, "BYPASS_UDC_SUSPEND bit configuration failed\n");
+ return dev_err_probe(dev, ret, "CREG_RUNTIMEFLAGS2 read failed\n");
- ret = i2c_smbus_write_word_data(client, USB5744_CMD_CREG_ACCESS,
- USB5744_CMD_CREG_ACCESS_LSB);
+ reg |= USB5744_CREG_BYPASS_UDC_SUSPEND;
+ ret = onboard_dev_5744_i2c_write_byte(client,
+ USB5744_CREG_RUNTIMEFLAGS2, reg);
if (ret)
- return dev_err_probe(dev, ret, "Configuration Register Access Command failed\n");
+ return dev_err_probe(dev, ret, "BYPASS_UDC_SUSPEND bit configuration failed\n");
/* Send SMBus command to boot hub. */
ret = i2c_smbus_write_word_data(client, USB5744_CMD_ATTACH,
@@ -343,10 +417,13 @@ static int onboard_dev_5744_i2c_init(struct i2c_client *client)
return dev_err_probe(dev, ret, "USB Attach with SMBus command failed\n");
return ret;
+}
#else
+static int onboard_dev_5744_i2c_init(struct i2c_client *client)
+{
return -ENODEV;
-#endif
}
+#endif
static int onboard_dev_probe(struct platform_device *pdev)
{
@@ -490,6 +567,7 @@ static struct platform_driver onboard_dev_driver = {
#define VENDOR_ID_CYPRESS 0x04b4
#define VENDOR_ID_GENESYS 0x05e3
#define VENDOR_ID_MICROCHIP 0x0424
+#define VENDOR_ID_PARADE 0x1da0
#define VENDOR_ID_REALTEK 0x0bda
#define VENDOR_ID_TI 0x0451
#define VENDOR_ID_VIA 0x2109
@@ -586,14 +664,19 @@ static const struct usb_device_id onboard_dev_id_table[] = {
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 HUB */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2744) }, /* USB5744 USB 2.0 HUB */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x5744) }, /* USB5744 USB 3.0 HUB */
+ { USB_DEVICE(VENDOR_ID_PARADE, 0x5511) }, /* PS5511 USB 3.2 */
+ { USB_DEVICE(VENDOR_ID_PARADE, 0x55a1) }, /* PS5511 USB 2.0 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 HUB */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 HUB */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 HUB */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5414) }, /* RTS5414 USB 2.1 HUB */
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x0179) }, /* RTL8188ETV 2.4GHz WiFi */
{ USB_DEVICE(VENDOR_ID_TI, 0x8025) }, /* TI USB8020B 3.0 HUB */
{ USB_DEVICE(VENDOR_ID_TI, 0x8027) }, /* TI USB8020B 2.0 HUB */
{ USB_DEVICE(VENDOR_ID_TI, 0x8140) }, /* TI USB8041 3.0 HUB */
{ USB_DEVICE(VENDOR_ID_TI, 0x8142) }, /* TI USB8041 2.0 HUB */
+ { USB_DEVICE(VENDOR_ID_TI, 0x8440) }, /* TI USB8044 3.0 HUB */
+ { USB_DEVICE(VENDOR_ID_TI, 0x8442) }, /* TI USB8044 2.0 HUB */
{ USB_DEVICE(VENDOR_ID_VIA, 0x0817) }, /* VIA VL817 3.1 HUB */
{ USB_DEVICE(VENDOR_ID_VIA, 0x2817) }, /* VIA VL817 2.0 HUB */
{ USB_DEVICE(VENDOR_ID_XMOS, 0x0013) }, /* XMOS XVF3500 Voice Processor */
diff --git a/drivers/usb/misc/onboard_usb_dev.h b/drivers/usb/misc/onboard_usb_dev.h
index 933797a7e084..e017b8e22f93 100644
--- a/drivers/usb/misc/onboard_usb_dev.h
+++ b/drivers/usb/misc/onboard_usb_dev.h
@@ -38,6 +38,13 @@ static const struct onboard_dev_pdata microchip_usb5744_data = {
.is_hub = true,
};
+static const struct onboard_dev_pdata parade_ps5511_data = {
+ .reset_us = 500,
+ .num_supplies = 2,
+ .supply_names = { "vddd11", "vdd33"},
+ .is_hub = true,
+};
+
static const struct onboard_dev_pdata realtek_rts5411_data = {
.reset_us = 0,
.num_supplies = 1,
@@ -45,6 +52,13 @@ static const struct onboard_dev_pdata realtek_rts5411_data = {
.is_hub = true,
};
+static const struct onboard_dev_pdata realtek_rtl8188etv_data = {
+ .reset_us = 0,
+ .num_supplies = 1,
+ .supply_names = { "vdd" },
+ .is_hub = false,
+};
+
static const struct onboard_dev_pdata ti_tusb8020b_data = {
.reset_us = 3000,
.num_supplies = 1,
@@ -111,6 +125,8 @@ static const struct of_device_id onboard_dev_match[] = {
{ .compatible = "usb451,8027", .data = &ti_tusb8020b_data, },
{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
{ .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
+ { .compatible = "usb451,8440", .data = &ti_tusb8041_data, },
+ { .compatible = "usb451,8442", .data = &ti_tusb8041_data, },
{ .compatible = "usb4b4,6504", .data = &cypress_hx3_data, },
{ .compatible = "usb4b4,6506", .data = &cypress_hx3_data, },
{ .compatible = "usb4b4,6570", .data = &cypress_hx2vl_data, },
@@ -118,10 +134,13 @@ static const struct of_device_id onboard_dev_match[] = {
{ .compatible = "usb5e3,610", .data = &genesys_gl852g_data, },
{ .compatible = "usb5e3,620", .data = &genesys_gl852g_data, },
{ .compatible = "usb5e3,626", .data = &genesys_gl852g_data, },
+ { .compatible = "usbbda,179", .data = &realtek_rtl8188etv_data, },
{ .compatible = "usbbda,411", .data = &realtek_rts5411_data, },
{ .compatible = "usbbda,5411", .data = &realtek_rts5411_data, },
{ .compatible = "usbbda,414", .data = &realtek_rts5411_data, },
{ .compatible = "usbbda,5414", .data = &realtek_rts5411_data, },
+ { .compatible = "usb1da0,5511", .data = &parade_ps5511_data, },
+ { .compatible = "usb1da0,55a1", .data = &parade_ps5511_data, },
{ .compatible = "usb2109,817", .data = &vialab_vl817_data, },
{ .compatible = "usb2109,2817", .data = &vialab_vl817_data, },
{ .compatible = "usb20b1,0013", .data = &xmos_xvf3500_data, },
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 853a5f082a70..5c92c8d8e283 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -592,7 +592,7 @@ struct sg_timeout {
static void sg_timeout(struct timer_list *t)
{
- struct sg_timeout *timeout = from_timer(timeout, t, timer);
+ struct sg_timeout *timeout = timer_container_of(timeout, t, timer);
usb_sg_cancel(timeout->req);
}
@@ -630,7 +630,7 @@ static int perform_sglist(
retval = -ETIMEDOUT;
else
retval = req->status;
- destroy_timer_on_stack(&timeout.timer);
+ timer_destroy_on_stack(&timeout.timer);
/* FIXME check resulting data pattern */
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index eebb24ab3ec8..4209f438ba18 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -123,7 +123,8 @@ static void da8xx_musb_set_vbus(struct musb *musb, int is_on)
static void otg_timer(struct timer_list *t)
{
- struct musb *musb = from_timer(musb, t, dev_timer);
+ struct musb *musb = timer_container_of(musb, t,
+ dev_timer);
void __iomem *mregs = musb->mregs;
u8 devctl;
unsigned long flags;
diff --git a/drivers/usb/musb/mpfs.c b/drivers/usb/musb/mpfs.c
index 020348a98514..587127abd30a 100644
--- a/drivers/usb/musb/mpfs.c
+++ b/drivers/usb/musb/mpfs.c
@@ -91,7 +91,8 @@ static void mpfs_musb_set_vbus(struct musb *musb, int is_on)
static void otg_timer(struct timer_list *t)
{
- struct musb *musb = from_timer(musb, t, dev_timer);
+ struct musb *musb = timer_container_of(musb, t,
+ dev_timer);
void __iomem *mregs = musb->mregs;
u8 devctl;
unsigned long flags;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index cbbb27178024..c7234b236971 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -596,7 +596,7 @@ void musb_load_testpacket(struct musb *musb)
*/
static void musb_otg_timer_func(struct timer_list *t)
{
- struct musb *musb = from_timer(musb, t, otg_timer);
+ struct musb *musb = timer_container_of(musb, t, otg_timer);
unsigned long flags;
spin_lock_irqsave(&musb->lock, flags);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index e5e813f97fac..12f587ab8511 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -278,7 +278,7 @@ static int dsps_check_status(struct musb *musb, void *unused)
static void otg_timer(struct timer_list *t)
{
- struct musb *musb = from_timer(musb, t, dev_timer);
+ struct musb *musb = timer_container_of(musb, t, dev_timer);
struct device *dev = musb->controller;
unsigned long flags;
int err;
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index abd2472da7f7..1d9571f24a56 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -457,7 +457,7 @@ static int tusb_musb_vbus_status(struct musb *musb)
static void musb_do_idle(struct timer_list *t)
{
- struct musb *musb = from_timer(musb, t, dev_timer);
+ struct musb *musb = timer_container_of(musb, t, dev_timer);
unsigned long flags;
spin_lock_irqsave(&musb->lock, flags);
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 5f629d7cad64..b7acf3966cd7 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -126,18 +126,6 @@ config USB_ISP1301
To compile this driver as a module, choose M here: the
module will be called phy-isp1301.
-config USB_MV_OTG
- tristate "Marvell USB OTG support"
- depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG
- depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
- select USB_PHY
- help
- Say Y here if you want to build Marvell USB OTG transceiver
- driver in kernel (including PXA and MMP series). This driver
- implements role switch between EHCI host driver and gadget driver.
-
- To compile this driver as a module, choose M here.
-
config USB_MXS_PHY
tristate "Freescale MXS USB PHY support"
depends on ARCH_MXC || ARCH_MXS
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index e5d619b4d8f6..ceae46c5341d 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_TWL6030_USB) += phy-twl6030-usb.o
obj-$(CONFIG_USB_TEGRA_PHY) += phy-tegra-usb.o
obj-$(CONFIG_USB_GPIO_VBUS) += phy-gpio-vbus-usb.o
obj-$(CONFIG_USB_ISP1301) += phy-isp1301.o
-obj-$(CONFIG_USB_MV_OTG) += phy-mv-usb.o
obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-usb.o
obj-$(CONFIG_USB_ULPI) += phy-ulpi.o
obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
deleted file mode 100644
index 638fba58420c..000000000000
--- a/drivers/usb/phy/phy-mv-usb.c
+++ /dev/null
@@ -1,881 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
- * Author: Chao Xie <chao.xie@marvell.com>
- * Neil Zhang <zhangwm@marvell.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/uaccess.h>
-#include <linux/device.h>
-#include <linux/proc_fs.h>
-#include <linux/clk.h>
-#include <linux/workqueue.h>
-#include <linux/platform_device.h>
-#include <linux/string_choices.h>
-
-#include <linux/usb.h>
-#include <linux/usb/ch9.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb/hcd.h>
-#include <linux/platform_data/mv_usb.h>
-
-#include "phy-mv-usb.h"
-
-#define DRIVER_DESC "Marvell USB OTG transceiver driver"
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-
-static const char driver_name[] = "mv-otg";
-
-static char *state_string[] = {
- "undefined",
- "b_idle",
- "b_srp_init",
- "b_peripheral",
- "b_wait_acon",
- "b_host",
- "a_idle",
- "a_wait_vrise",
- "a_wait_bcon",
- "a_host",
- "a_suspend",
- "a_peripheral",
- "a_wait_vfall",
- "a_vbus_err"
-};
-
-static int mv_otg_set_vbus(struct usb_otg *otg, bool on)
-{
- struct mv_otg *mvotg = container_of(otg->usb_phy, struct mv_otg, phy);
- if (mvotg->pdata->set_vbus == NULL)
- return -ENODEV;
-
- return mvotg->pdata->set_vbus(on);
-}
-
-static int mv_otg_set_host(struct usb_otg *otg,
- struct usb_bus *host)
-{
- otg->host = host;
-
- return 0;
-}
-
-static int mv_otg_set_peripheral(struct usb_otg *otg,
- struct usb_gadget *gadget)
-{
- otg->gadget = gadget;
-
- return 0;
-}
-
-static void mv_otg_run_state_machine(struct mv_otg *mvotg,
- unsigned long delay)
-{
- dev_dbg(&mvotg->pdev->dev, "transceiver is updated\n");
- if (!mvotg->qwork)
- return;
-
- queue_delayed_work(mvotg->qwork, &mvotg->work, delay);
-}
-
-static void mv_otg_timer_await_bcon(struct timer_list *t)
-{
- struct mv_otg *mvotg = from_timer(mvotg, t,
- otg_ctrl.timer[A_WAIT_BCON_TIMER]);
-
- mvotg->otg_ctrl.a_wait_bcon_timeout = 1;
-
- dev_info(&mvotg->pdev->dev, "B Device No Response!\n");
-
- if (spin_trylock(&mvotg->wq_lock)) {
- mv_otg_run_state_machine(mvotg, 0);
- spin_unlock(&mvotg->wq_lock);
- }
-}
-
-static int mv_otg_cancel_timer(struct mv_otg *mvotg, unsigned int id)
-{
- struct timer_list *timer;
-
- if (id >= OTG_TIMER_NUM)
- return -EINVAL;
-
- timer = &mvotg->otg_ctrl.timer[id];
-
- if (timer_pending(timer))
- timer_delete(timer);
-
- return 0;
-}
-
-static int mv_otg_set_timer(struct mv_otg *mvotg, unsigned int id,
- unsigned long interval)
-{
- struct timer_list *timer;
-
- if (id >= OTG_TIMER_NUM)
- return -EINVAL;
-
- timer = &mvotg->otg_ctrl.timer[id];
- if (timer_pending(timer)) {
- dev_err(&mvotg->pdev->dev, "Timer%d is already running\n", id);
- return -EBUSY;
- }
-
- timer->expires = jiffies + interval;
- add_timer(timer);
-
- return 0;
-}
-
-static int mv_otg_reset(struct mv_otg *mvotg)
-{
- u32 tmp;
- int ret;
-
- /* Stop the controller */
- tmp = readl(&mvotg->op_regs->usbcmd);
- tmp &= ~USBCMD_RUN_STOP;
- writel(tmp, &mvotg->op_regs->usbcmd);
-
- /* Reset the controller to get default values */
- writel(USBCMD_CTRL_RESET, &mvotg->op_regs->usbcmd);
-
- ret = readl_poll_timeout_atomic(&mvotg->op_regs->usbcmd, tmp,
- (tmp & USBCMD_CTRL_RESET), 10, 10000);
- if (ret < 0) {
- dev_err(&mvotg->pdev->dev,
- "Wait for RESET completed TIMEOUT\n");
- return ret;
- }
-
- writel(0x0, &mvotg->op_regs->usbintr);
- tmp = readl(&mvotg->op_regs->usbsts);
- writel(tmp, &mvotg->op_regs->usbsts);
-
- return 0;
-}
-
-static void mv_otg_init_irq(struct mv_otg *mvotg)
-{
- u32 otgsc;
-
- mvotg->irq_en = OTGSC_INTR_A_SESSION_VALID
- | OTGSC_INTR_A_VBUS_VALID;
- mvotg->irq_status = OTGSC_INTSTS_A_SESSION_VALID
- | OTGSC_INTSTS_A_VBUS_VALID;
-
- if (mvotg->pdata->vbus == NULL) {
- mvotg->irq_en |= OTGSC_INTR_B_SESSION_VALID
- | OTGSC_INTR_B_SESSION_END;
- mvotg->irq_status |= OTGSC_INTSTS_B_SESSION_VALID
- | OTGSC_INTSTS_B_SESSION_END;
- }
-
- if (mvotg->pdata->id == NULL) {
- mvotg->irq_en |= OTGSC_INTR_USB_ID;
- mvotg->irq_status |= OTGSC_INTSTS_USB_ID;
- }
-
- otgsc = readl(&mvotg->op_regs->otgsc);
- otgsc |= mvotg->irq_en;
- writel(otgsc, &mvotg->op_regs->otgsc);
-}
-
-static void mv_otg_start_host(struct mv_otg *mvotg, int on)
-{
-#ifdef CONFIG_USB
- struct usb_otg *otg = mvotg->phy.otg;
- struct usb_hcd *hcd;
-
- if (!otg->host)
- return;
-
- dev_info(&mvotg->pdev->dev, "%s host\n", on ? "start" : "stop");
-
- hcd = bus_to_hcd(otg->host);
-
- if (on) {
- usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
- device_wakeup_enable(hcd->self.controller);
- } else {
- usb_remove_hcd(hcd);
- }
-#endif /* CONFIG_USB */
-}
-
-static void mv_otg_start_periphrals(struct mv_otg *mvotg, int on)
-{
- struct usb_otg *otg = mvotg->phy.otg;
-
- if (!otg->gadget)
- return;
-
- dev_info(mvotg->phy.dev, "gadget %s\n", str_on_off(on));
-
- if (on)
- usb_gadget_vbus_connect(otg->gadget);
- else
- usb_gadget_vbus_disconnect(otg->gadget);
-}
-
-static void otg_clock_enable(struct mv_otg *mvotg)
-{
- clk_prepare_enable(mvotg->clk);
-}
-
-static void otg_clock_disable(struct mv_otg *mvotg)
-{
- clk_disable_unprepare(mvotg->clk);
-}
-
-static int mv_otg_enable_internal(struct mv_otg *mvotg)
-{
- int retval = 0;
-
- if (mvotg->active)
- return 0;
-
- dev_dbg(&mvotg->pdev->dev, "otg enabled\n");
-
- otg_clock_enable(mvotg);
- if (mvotg->pdata->phy_init) {
- retval = mvotg->pdata->phy_init(mvotg->phy_regs);
- if (retval) {
- dev_err(&mvotg->pdev->dev,
- "init phy error %d\n", retval);
- otg_clock_disable(mvotg);
- return retval;
- }
- }
- mvotg->active = 1;
-
- return 0;
-
-}
-
-static int mv_otg_enable(struct mv_otg *mvotg)
-{
- if (mvotg->clock_gating)
- return mv_otg_enable_internal(mvotg);
-
- return 0;
-}
-
-static void mv_otg_disable_internal(struct mv_otg *mvotg)
-{
- if (mvotg->active) {
- dev_dbg(&mvotg->pdev->dev, "otg disabled\n");
- if (mvotg->pdata->phy_deinit)
- mvotg->pdata->phy_deinit(mvotg->phy_regs);
- otg_clock_disable(mvotg);
- mvotg->active = 0;
- }
-}
-
-static void mv_otg_disable(struct mv_otg *mvotg)
-{
- if (mvotg->clock_gating)
- mv_otg_disable_internal(mvotg);
-}
-
-static void mv_otg_update_inputs(struct mv_otg *mvotg)
-{
- struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
- u32 otgsc;
-
- otgsc = readl(&mvotg->op_regs->otgsc);
-
- if (mvotg->pdata->vbus) {
- if (mvotg->pdata->vbus->poll() == VBUS_HIGH) {
- otg_ctrl->b_sess_vld = 1;
- otg_ctrl->b_sess_end = 0;
- } else {
- otg_ctrl->b_sess_vld = 0;
- otg_ctrl->b_sess_end = 1;
- }
- } else {
- otg_ctrl->b_sess_vld = !!(otgsc & OTGSC_STS_B_SESSION_VALID);
- otg_ctrl->b_sess_end = !!(otgsc & OTGSC_STS_B_SESSION_END);
- }
-
- if (mvotg->pdata->id)
- otg_ctrl->id = !!mvotg->pdata->id->poll();
- else
- otg_ctrl->id = !!(otgsc & OTGSC_STS_USB_ID);
-
- if (mvotg->pdata->otg_force_a_bus_req && !otg_ctrl->id)
- otg_ctrl->a_bus_req = 1;
-
- otg_ctrl->a_sess_vld = !!(otgsc & OTGSC_STS_A_SESSION_VALID);
- otg_ctrl->a_vbus_vld = !!(otgsc & OTGSC_STS_A_VBUS_VALID);
-
- dev_dbg(&mvotg->pdev->dev, "%s: ", __func__);
- dev_dbg(&mvotg->pdev->dev, "id %d\n", otg_ctrl->id);
- dev_dbg(&mvotg->pdev->dev, "b_sess_vld %d\n", otg_ctrl->b_sess_vld);
- dev_dbg(&mvotg->pdev->dev, "b_sess_end %d\n", otg_ctrl->b_sess_end);
- dev_dbg(&mvotg->pdev->dev, "a_vbus_vld %d\n", otg_ctrl->a_vbus_vld);
- dev_dbg(&mvotg->pdev->dev, "a_sess_vld %d\n", otg_ctrl->a_sess_vld);
-}
-
-static void mv_otg_update_state(struct mv_otg *mvotg)
-{
- struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
- int old_state = mvotg->phy.otg->state;
-
- switch (old_state) {
- case OTG_STATE_UNDEFINED:
- mvotg->phy.otg->state = OTG_STATE_B_IDLE;
- fallthrough;
- case OTG_STATE_B_IDLE:
- if (otg_ctrl->id == 0)
- mvotg->phy.otg->state = OTG_STATE_A_IDLE;
- else if (otg_ctrl->b_sess_vld)
- mvotg->phy.otg->state = OTG_STATE_B_PERIPHERAL;
- break;
- case OTG_STATE_B_PERIPHERAL:
- if (!otg_ctrl->b_sess_vld || otg_ctrl->id == 0)
- mvotg->phy.otg->state = OTG_STATE_B_IDLE;
- break;
- case OTG_STATE_A_IDLE:
- if (otg_ctrl->id)
- mvotg->phy.otg->state = OTG_STATE_B_IDLE;
- else if (!(otg_ctrl->a_bus_drop) &&
- (otg_ctrl->a_bus_req || otg_ctrl->a_srp_det))
- mvotg->phy.otg->state = OTG_STATE_A_WAIT_VRISE;
- break;
- case OTG_STATE_A_WAIT_VRISE:
- if (otg_ctrl->a_vbus_vld)
- mvotg->phy.otg->state = OTG_STATE_A_WAIT_BCON;
- break;
- case OTG_STATE_A_WAIT_BCON:
- if (otg_ctrl->id || otg_ctrl->a_bus_drop
- || otg_ctrl->a_wait_bcon_timeout) {
- mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
- mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
- mvotg->phy.otg->state = OTG_STATE_A_WAIT_VFALL;
- otg_ctrl->a_bus_req = 0;
- } else if (!otg_ctrl->a_vbus_vld) {
- mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
- mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
- mvotg->phy.otg->state = OTG_STATE_A_VBUS_ERR;
- } else if (otg_ctrl->b_conn) {
- mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
- mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
- mvotg->phy.otg->state = OTG_STATE_A_HOST;
- }
- break;
- case OTG_STATE_A_HOST:
- if (otg_ctrl->id || !otg_ctrl->b_conn
- || otg_ctrl->a_bus_drop)
- mvotg->phy.otg->state = OTG_STATE_A_WAIT_BCON;
- else if (!otg_ctrl->a_vbus_vld)
- mvotg->phy.otg->state = OTG_STATE_A_VBUS_ERR;
- break;
- case OTG_STATE_A_WAIT_VFALL:
- if (otg_ctrl->id
- || (!otg_ctrl->b_conn && otg_ctrl->a_sess_vld)
- || otg_ctrl->a_bus_req)
- mvotg->phy.otg->state = OTG_STATE_A_IDLE;
- break;
- case OTG_STATE_A_VBUS_ERR:
- if (otg_ctrl->id || otg_ctrl->a_clr_err
- || otg_ctrl->a_bus_drop) {
- otg_ctrl->a_clr_err = 0;
- mvotg->phy.otg->state = OTG_STATE_A_WAIT_VFALL;
- }
- break;
- default:
- break;
- }
-}
-
-static void mv_otg_work(struct work_struct *work)
-{
- struct mv_otg *mvotg;
- struct usb_otg *otg;
- int old_state;
-
- mvotg = container_of(to_delayed_work(work), struct mv_otg, work);
-
-run:
- /* work queue is single thread, or we need spin_lock to protect */
- otg = mvotg->phy.otg;
- old_state = otg->state;
-
- if (!mvotg->active)
- return;
-
- mv_otg_update_inputs(mvotg);
- mv_otg_update_state(mvotg);
-
- if (old_state != mvotg->phy.otg->state) {
- dev_info(&mvotg->pdev->dev, "change from state %s to %s\n",
- state_string[old_state],
- state_string[mvotg->phy.otg->state]);
-
- switch (mvotg->phy.otg->state) {
- case OTG_STATE_B_IDLE:
- otg->default_a = 0;
- if (old_state == OTG_STATE_B_PERIPHERAL)
- mv_otg_start_periphrals(mvotg, 0);
- mv_otg_reset(mvotg);
- mv_otg_disable(mvotg);
- usb_phy_set_event(&mvotg->phy, USB_EVENT_NONE);
- break;
- case OTG_STATE_B_PERIPHERAL:
- mv_otg_enable(mvotg);
- mv_otg_start_periphrals(mvotg, 1);
- usb_phy_set_event(&mvotg->phy, USB_EVENT_ENUMERATED);
- break;
- case OTG_STATE_A_IDLE:
- otg->default_a = 1;
- mv_otg_enable(mvotg);
- if (old_state == OTG_STATE_A_WAIT_VFALL)
- mv_otg_start_host(mvotg, 0);
- mv_otg_reset(mvotg);
- break;
- case OTG_STATE_A_WAIT_VRISE:
- mv_otg_set_vbus(otg, 1);
- break;
- case OTG_STATE_A_WAIT_BCON:
- if (old_state != OTG_STATE_A_HOST)
- mv_otg_start_host(mvotg, 1);
- mv_otg_set_timer(mvotg, A_WAIT_BCON_TIMER,
- T_A_WAIT_BCON);
- /*
- * Now, we directly enter A_HOST. So set b_conn = 1
- * here. In fact, it need host driver to notify us.
- */
- mvotg->otg_ctrl.b_conn = 1;
- break;
- case OTG_STATE_A_HOST:
- break;
- case OTG_STATE_A_WAIT_VFALL:
- /*
- * Now, we has exited A_HOST. So set b_conn = 0
- * here. In fact, it need host driver to notify us.
- */
- mvotg->otg_ctrl.b_conn = 0;
- mv_otg_set_vbus(otg, 0);
- break;
- case OTG_STATE_A_VBUS_ERR:
- break;
- default:
- break;
- }
- goto run;
- }
-}
-
-static irqreturn_t mv_otg_irq(int irq, void *dev)
-{
- struct mv_otg *mvotg = dev;
- u32 otgsc;
-
- otgsc = readl(&mvotg->op_regs->otgsc);
- writel(otgsc, &mvotg->op_regs->otgsc);
-
- /*
- * if we have vbus, then the vbus detection for B-device
- * will be done by mv_otg_inputs_irq().
- */
- if (mvotg->pdata->vbus)
- if ((otgsc & OTGSC_STS_USB_ID) &&
- !(otgsc & OTGSC_INTSTS_USB_ID))
- return IRQ_NONE;
-
- if ((otgsc & mvotg->irq_status) == 0)
- return IRQ_NONE;
-
- mv_otg_run_state_machine(mvotg, 0);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t mv_otg_inputs_irq(int irq, void *dev)
-{
- struct mv_otg *mvotg = dev;
-
- /* The clock may disabled at this time */
- if (!mvotg->active) {
- mv_otg_enable(mvotg);
- mv_otg_init_irq(mvotg);
- }
-
- mv_otg_run_state_machine(mvotg, 0);
-
- return IRQ_HANDLED;
-}
-
-static ssize_t
-a_bus_req_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct mv_otg *mvotg = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n",
- mvotg->otg_ctrl.a_bus_req);
-}
-
-static ssize_t
-a_bus_req_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct mv_otg *mvotg = dev_get_drvdata(dev);
-
- if (count > 2)
- return -1;
-
- /* We will use this interface to change to A device */
- if (mvotg->phy.otg->state != OTG_STATE_B_IDLE
- && mvotg->phy.otg->state != OTG_STATE_A_IDLE)
- return -1;
-
- /* The clock may disabled and we need to set irq for ID detected */
- mv_otg_enable(mvotg);
- mv_otg_init_irq(mvotg);
-
- if (buf[0] == '1') {
- mvotg->otg_ctrl.a_bus_req = 1;
- mvotg->otg_ctrl.a_bus_drop = 0;
- dev_dbg(&mvotg->pdev->dev,
- "User request: a_bus_req = 1\n");
-
- if (spin_trylock(&mvotg->wq_lock)) {
- mv_otg_run_state_machine(mvotg, 0);
- spin_unlock(&mvotg->wq_lock);
- }
- }
-
- return count;
-}
-
-static DEVICE_ATTR_RW(a_bus_req);
-
-static ssize_t
-a_clr_err_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct mv_otg *mvotg = dev_get_drvdata(dev);
- if (!mvotg->phy.otg->default_a)
- return -1;
-
- if (count > 2)
- return -1;
-
- if (buf[0] == '1') {
- mvotg->otg_ctrl.a_clr_err = 1;
- dev_dbg(&mvotg->pdev->dev,
- "User request: a_clr_err = 1\n");
- }
-
- if (spin_trylock(&mvotg->wq_lock)) {
- mv_otg_run_state_machine(mvotg, 0);
- spin_unlock(&mvotg->wq_lock);
- }
-
- return count;
-}
-
-static DEVICE_ATTR_WO(a_clr_err);
-
-static ssize_t
-a_bus_drop_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct mv_otg *mvotg = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n",
- mvotg->otg_ctrl.a_bus_drop);
-}
-
-static ssize_t
-a_bus_drop_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct mv_otg *mvotg = dev_get_drvdata(dev);
- if (!mvotg->phy.otg->default_a)
- return -1;
-
- if (count > 2)
- return -1;
-
- if (buf[0] == '0') {
- mvotg->otg_ctrl.a_bus_drop = 0;
- dev_dbg(&mvotg->pdev->dev,
- "User request: a_bus_drop = 0\n");
- } else if (buf[0] == '1') {
- mvotg->otg_ctrl.a_bus_drop = 1;
- mvotg->otg_ctrl.a_bus_req = 0;
- dev_dbg(&mvotg->pdev->dev,
- "User request: a_bus_drop = 1\n");
- dev_dbg(&mvotg->pdev->dev,
- "User request: and a_bus_req = 0\n");
- }
-
- if (spin_trylock(&mvotg->wq_lock)) {
- mv_otg_run_state_machine(mvotg, 0);
- spin_unlock(&mvotg->wq_lock);
- }
-
- return count;
-}
-
-static DEVICE_ATTR_RW(a_bus_drop);
-
-static struct attribute *inputs_attrs[] = {
- &dev_attr_a_bus_req.attr,
- &dev_attr_a_clr_err.attr,
- &dev_attr_a_bus_drop.attr,
- NULL,
-};
-
-static const struct attribute_group inputs_attr_group = {
- .name = "inputs",
- .attrs = inputs_attrs,
-};
-
-static const struct attribute_group *mv_otg_groups[] = {
- &inputs_attr_group,
- NULL,
-};
-
-static void mv_otg_remove(struct platform_device *pdev)
-{
- struct mv_otg *mvotg = platform_get_drvdata(pdev);
-
- if (mvotg->qwork)
- destroy_workqueue(mvotg->qwork);
-
- mv_otg_disable(mvotg);
-
- usb_remove_phy(&mvotg->phy);
-}
-
-static int mv_otg_probe(struct platform_device *pdev)
-{
- struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct mv_otg *mvotg;
- struct usb_otg *otg;
- struct resource *r;
- int retval = 0, i;
-
- if (pdata == NULL) {
- dev_err(&pdev->dev, "failed to get platform data\n");
- return -ENODEV;
- }
-
- mvotg = devm_kzalloc(&pdev->dev, sizeof(*mvotg), GFP_KERNEL);
- if (!mvotg)
- return -ENOMEM;
-
- otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
- if (!otg)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, mvotg);
-
- mvotg->pdev = pdev;
- mvotg->pdata = pdata;
-
- mvotg->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(mvotg->clk))
- return PTR_ERR(mvotg->clk);
-
- mvotg->qwork = create_singlethread_workqueue("mv_otg_queue");
- if (!mvotg->qwork) {
- dev_dbg(&pdev->dev, "cannot create workqueue for OTG\n");
- return -ENOMEM;
- }
-
- INIT_DELAYED_WORK(&mvotg->work, mv_otg_work);
-
- /* OTG common part */
- mvotg->pdev = pdev;
- mvotg->phy.dev = &pdev->dev;
- mvotg->phy.otg = otg;
- mvotg->phy.label = driver_name;
-
- otg->state = OTG_STATE_UNDEFINED;
- otg->usb_phy = &mvotg->phy;
- otg->set_host = mv_otg_set_host;
- otg->set_peripheral = mv_otg_set_peripheral;
- otg->set_vbus = mv_otg_set_vbus;
-
- for (i = 0; i < OTG_TIMER_NUM; i++)
- timer_setup(&mvotg->otg_ctrl.timer[i],
- mv_otg_timer_await_bcon, 0);
-
- r = platform_get_resource_byname(mvotg->pdev,
- IORESOURCE_MEM, "phyregs");
- if (r == NULL) {
- dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
- retval = -ENODEV;
- goto err_destroy_workqueue;
- }
-
- mvotg->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
- if (mvotg->phy_regs == NULL) {
- dev_err(&pdev->dev, "failed to map phy I/O memory\n");
- retval = -EFAULT;
- goto err_destroy_workqueue;
- }
-
- r = platform_get_resource_byname(mvotg->pdev,
- IORESOURCE_MEM, "capregs");
- if (r == NULL) {
- dev_err(&pdev->dev, "no I/O memory resource defined\n");
- retval = -ENODEV;
- goto err_destroy_workqueue;
- }
-
- mvotg->cap_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
- if (mvotg->cap_regs == NULL) {
- dev_err(&pdev->dev, "failed to map I/O memory\n");
- retval = -EFAULT;
- goto err_destroy_workqueue;
- }
-
- /* we will acces controller register, so enable the udc controller */
- retval = mv_otg_enable_internal(mvotg);
- if (retval) {
- dev_err(&pdev->dev, "mv otg enable error %d\n", retval);
- goto err_destroy_workqueue;
- }
-
- mvotg->op_regs =
- (struct mv_otg_regs __iomem *) ((unsigned long) mvotg->cap_regs
- + (readl(mvotg->cap_regs) & CAPLENGTH_MASK));
-
- if (pdata->id) {
- retval = devm_request_threaded_irq(&pdev->dev, pdata->id->irq,
- NULL, mv_otg_inputs_irq,
- IRQF_ONESHOT, "id", mvotg);
- if (retval) {
- dev_info(&pdev->dev,
- "Failed to request irq for ID\n");
- pdata->id = NULL;
- }
- }
-
- if (pdata->vbus) {
- mvotg->clock_gating = 1;
- retval = devm_request_threaded_irq(&pdev->dev, pdata->vbus->irq,
- NULL, mv_otg_inputs_irq,
- IRQF_ONESHOT, "vbus", mvotg);
- if (retval) {
- dev_info(&pdev->dev,
- "Failed to request irq for VBUS, "
- "disable clock gating\n");
- mvotg->clock_gating = 0;
- pdata->vbus = NULL;
- }
- }
-
- if (pdata->disable_otg_clock_gating)
- mvotg->clock_gating = 0;
-
- mv_otg_reset(mvotg);
- mv_otg_init_irq(mvotg);
-
- r = platform_get_resource(mvotg->pdev, IORESOURCE_IRQ, 0);
- if (r == NULL) {
- dev_err(&pdev->dev, "no IRQ resource defined\n");
- retval = -ENODEV;
- goto err_disable_clk;
- }
-
- mvotg->irq = r->start;
- if (devm_request_irq(&pdev->dev, mvotg->irq, mv_otg_irq, IRQF_SHARED,
- driver_name, mvotg)) {
- dev_err(&pdev->dev, "Request irq %d for OTG failed\n",
- mvotg->irq);
- mvotg->irq = 0;
- retval = -ENODEV;
- goto err_disable_clk;
- }
-
- retval = usb_add_phy(&mvotg->phy, USB_PHY_TYPE_USB2);
- if (retval < 0) {
- dev_err(&pdev->dev, "can't register transceiver, %d\n",
- retval);
- goto err_disable_clk;
- }
-
- spin_lock_init(&mvotg->wq_lock);
- if (spin_trylock(&mvotg->wq_lock)) {
- mv_otg_run_state_machine(mvotg, 2 * HZ);
- spin_unlock(&mvotg->wq_lock);
- }
-
- dev_info(&pdev->dev,
- "successful probe OTG device %s clock gating.\n",
- mvotg->clock_gating ? "with" : "without");
-
- return 0;
-
-err_disable_clk:
- mv_otg_disable_internal(mvotg);
-err_destroy_workqueue:
- destroy_workqueue(mvotg->qwork);
-
- return retval;
-}
-
-#ifdef CONFIG_PM
-static int mv_otg_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct mv_otg *mvotg = platform_get_drvdata(pdev);
-
- if (mvotg->phy.otg->state != OTG_STATE_B_IDLE) {
- dev_info(&pdev->dev,
- "OTG state is not B_IDLE, it is %d!\n",
- mvotg->phy.otg->state);
- return -EAGAIN;
- }
-
- if (!mvotg->clock_gating)
- mv_otg_disable_internal(mvotg);
-
- return 0;
-}
-
-static int mv_otg_resume(struct platform_device *pdev)
-{
- struct mv_otg *mvotg = platform_get_drvdata(pdev);
- u32 otgsc;
-
- if (!mvotg->clock_gating) {
- mv_otg_enable_internal(mvotg);
-
- otgsc = readl(&mvotg->op_regs->otgsc);
- otgsc |= mvotg->irq_en;
- writel(otgsc, &mvotg->op_regs->otgsc);
-
- if (spin_trylock(&mvotg->wq_lock)) {
- mv_otg_run_state_machine(mvotg, 0);
- spin_unlock(&mvotg->wq_lock);
- }
- }
- return 0;
-}
-#endif
-
-static struct platform_driver mv_otg_driver = {
- .probe = mv_otg_probe,
- .remove = mv_otg_remove,
- .driver = {
- .name = driver_name,
- .dev_groups = mv_otg_groups,
- },
-#ifdef CONFIG_PM
- .suspend = mv_otg_suspend,
- .resume = mv_otg_resume,
-#endif
-};
-module_platform_driver(mv_otg_driver);
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 4b35ef216125..f52418fe3fd4 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -685,10 +685,29 @@ static int usbhs_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&priv->notify_hotplug_work, usbhsc_notify_hotplug);
spin_lock_init(usbhs_priv_to_lock(priv));
+ /*
+ * Acquire clocks and enable power management (PM) early in the
+ * probe process, as the driver accesses registers during
+ * initialization. Ensure the device is active before proceeding.
+ */
+ pm_runtime_enable(dev);
+
+ ret = usbhsc_clk_get(dev, priv);
+ if (ret)
+ goto probe_pm_disable;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ goto probe_clk_put;
+
+ ret = usbhsc_clk_prepare_enable(priv);
+ if (ret)
+ goto probe_pm_put;
+
/* call pipe and module init */
ret = usbhs_pipe_probe(priv);
if (ret < 0)
- return ret;
+ goto probe_clk_dis_unprepare;
ret = usbhs_fifo_probe(priv);
if (ret < 0)
@@ -698,19 +717,15 @@ static int usbhs_probe(struct platform_device *pdev)
if (ret < 0)
goto probe_end_fifo_exit;
- /* dev_set_drvdata should be called after usbhs_mod_init */
+ /* platform_set_drvdata() should be called after usbhs_mod_probe() */
platform_set_drvdata(pdev, priv);
ret = reset_control_deassert(priv->rsts);
if (ret)
goto probe_fail_rst;
- ret = usbhsc_clk_get(dev, priv);
- if (ret)
- goto probe_fail_clks;
-
/*
- * deviece reset here because
+ * device reset here because
* USB device might be used in boot loader.
*/
usbhs_sys_clock_ctrl(priv, 0);
@@ -721,7 +736,7 @@ static int usbhs_probe(struct platform_device *pdev)
if (ret) {
dev_warn(dev, "USB function not selected (GPIO)\n");
ret = -ENOTSUPP;
- goto probe_end_mod_exit;
+ goto probe_assert_rest;
}
}
@@ -735,14 +750,19 @@ static int usbhs_probe(struct platform_device *pdev)
ret = usbhs_platform_call(priv, hardware_init, pdev);
if (ret < 0) {
dev_err(dev, "platform init failed.\n");
- goto probe_end_mod_exit;
+ goto probe_assert_rest;
}
/* reset phy for connection */
usbhs_platform_call(priv, phy_reset, pdev);
- /* power control */
- pm_runtime_enable(dev);
+ /*
+ * Disable the clocks that were enabled earlier in the probe path,
+ * and let the driver handle the clocks beyond this point.
+ */
+ usbhsc_clk_disable_unprepare(priv);
+ pm_runtime_put(dev);
+
if (!usbhs_get_dparam(priv, runtime_pwctrl)) {
usbhsc_power_ctrl(priv, 1);
usbhs_mod_autonomy_mode(priv);
@@ -759,9 +779,7 @@ static int usbhs_probe(struct platform_device *pdev)
return ret;
-probe_end_mod_exit:
- usbhsc_clk_put(priv);
-probe_fail_clks:
+probe_assert_rest:
reset_control_assert(priv->rsts);
probe_fail_rst:
usbhs_mod_remove(priv);
@@ -769,6 +787,14 @@ probe_end_fifo_exit:
usbhs_fifo_remove(priv);
probe_end_pipe_exit:
usbhs_pipe_remove(priv);
+probe_clk_dis_unprepare:
+ usbhsc_clk_disable_unprepare(priv);
+probe_pm_put:
+ pm_runtime_put(dev);
+probe_clk_put:
+ usbhsc_clk_put(priv);
+probe_pm_disable:
+ pm_runtime_disable(dev);
dev_info(dev, "probe failed (%d)\n", ret);
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index 2fea1b1db4a2..9e2a18c0b218 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -17,7 +17,7 @@ static int usb_serial_device_match(struct device *dev,
const struct device_driver *drv)
{
const struct usb_serial_port *port = to_usb_serial_port(dev);
- struct usb_serial_driver *driver = to_usb_serial_driver(drv);
+ const struct usb_serial_driver *driver = to_usb_serial_driver(drv);
/*
* drivers are already assigned to ports in serial_probe so it's
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index b97ba8ed6801..614de0c2f5f2 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1357,7 +1357,8 @@ static void garmin_unthrottle(struct tty_struct *tty)
*/
static void timeout_handler(struct timer_list *t)
{
- struct garmin_data *garmin_data_p = from_timer(garmin_data_p, t, timer);
+ struct garmin_data *garmin_data_p = timer_container_of(garmin_data_p,
+ t, timer);
/* send the next queued packet to the tty port */
if (garmin_data_p->mode == MODE_NATIVE)
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 93710b762893..9e9aca271c0a 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -383,7 +383,7 @@ static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg,
static void mos7840_led_off(struct timer_list *t)
{
- struct moschip_port *mcs = from_timer(mcs, t, led_timer1);
+ struct moschip_port *mcs = timer_container_of(mcs, t, led_timer1);
/* Turn off LED */
mos7840_set_led_async(mcs, 0x0300, MODEM_CONTROL_REGISTER);
@@ -393,7 +393,7 @@ static void mos7840_led_off(struct timer_list *t)
static void mos7840_led_flag_off(struct timer_list *t)
{
- struct moschip_port *mcs = from_timer(mcs, t, led_timer2);
+ struct moschip_port *mcs = timer_container_of(mcs, t, led_timer2);
clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags);
}
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 010688dd9e49..22579d0d8ab8 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -458,6 +458,8 @@ static int pl2303_detect_type(struct usb_serial *serial)
case 0x605:
case 0x700: /* GR */
case 0x705:
+ case 0x905: /* GT-2AB */
+ case 0x1005: /* GC-Q20 */
return TYPE_HXN;
}
break;
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index a0c244bc77c0..d671189ecee2 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -729,11 +729,6 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
/* start read urb */
urb = port->read_urb;
- if (!urb) {
- dev_err(&port->dev, "%s - no read urb\n", __func__);
- status = -EINVAL;
- goto unlink_int_urb;
- }
tport->tp_read_urb_state = TI_READ_URB_RUNNING;
urb->context = tport;
status = usb_submit_urb(urb, GFP_KERNEL);
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index b387863c245f..c18dfa2ca034 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -754,7 +754,8 @@ static void rts51x_modi_suspend_timer(struct rts51x_chip *chip)
static void rts51x_suspend_timer_fn(struct timer_list *t)
{
- struct rts51x_chip *chip = from_timer(chip, t, rts51x_suspend_timer);
+ struct rts51x_chip *chip = timer_container_of(chip, t,
+ rts51x_suspend_timer);
struct us_data *us = chip->us;
switch (rts51x_get_stat(chip)) {
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index d460d71b4257..1477e31d7763 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -52,6 +52,13 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
+/* Reported-by: Zhihong Zhou <zhouzhihong@greatwall.com.cn> */
+UNUSUAL_DEV(0x0781, 0x55e8, 0x0000, 0x9999,
+ "SanDisk",
+ "",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
UNUSUAL_DEV(0x090c, 0x2000, 0x0000, 0x9999,
"Hiksemi",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index d36f3b6992bb..152ee3376550 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -1056,13 +1056,20 @@ int usb_stor_probe1(struct us_data **pus,
goto BadDevice;
/*
- * Some USB host controllers can't do DMA; they have to use PIO.
- * For such controllers we need to make sure the block layer sets
- * up bounce buffers in addressable memory.
+ * Some USB host controllers can't do DMA: They have to use PIO, or they
+ * have to use a small dedicated local memory area, or they have other
+ * restrictions on addressable memory.
+ *
+ * We can't support these controllers on highmem systems as we don't
+ * kmap or bounce buffer.
*/
- if (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)) ||
- bus_to_hcd(us->pusb_dev->bus)->localmem_pool)
- host->no_highmem = true;
+ if (IS_ENABLED(CONFIG_HIGHMEM) &&
+ (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)) ||
+ bus_to_hcd(us->pusb_dev->bus)->localmem_pool)) {
+ dev_warn(&intf->dev, "USB Mass Storage not supported on this host controller\n");
+ result = -EINVAL;
+ goto release;
+ }
/* Get the unusual_devs entries and the descriptors */
result = get_device_info(us, id, unusual_dev);
@@ -1081,6 +1088,7 @@ int usb_stor_probe1(struct us_data **pus,
BadDevice:
usb_stor_dbg(us, "storage_probe() failed\n");
+release:
release_everything(us);
return result;
}
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index ac84a6d64c2f..b09b58d7311d 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -393,6 +393,10 @@ static int dp_altmode_vdm(struct typec_altmode *alt,
break;
case CMDT_RSP_NAK:
switch (cmd) {
+ case DP_CMD_STATUS_UPDATE:
+ if (typec_altmode_exit(alt))
+ dev_err(&dp->alt->dev, "Exit Mode Failed!\n");
+ break;
case DP_CMD_CONFIGURE:
dp->data.conf = 0;
ret = dp_altmode_configured(dp);
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index ae90688d23e4..a884cec9ab7e 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -449,7 +449,7 @@ ATTRIBUTE_GROUPS(typec);
static int typec_match(struct device *dev, const struct device_driver *driver)
{
- struct typec_altmode_driver *drv = to_altmode_driver(driver);
+ const struct typec_altmode_driver *drv = to_altmode_driver(driver);
struct typec_altmode *altmode = to_typec_altmode(dev);
const struct typec_device_id *id;
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 49926d6e72c7..182c902c42f6 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -214,7 +214,7 @@ int typec_switch_set(struct typec_switch *sw,
sw_dev = sw->sw_devs[i];
ret = sw_dev->set(sw_dev, orientation);
- if (ret)
+ if (ret && ret != -EOPNOTSUPP)
return ret;
}
@@ -378,7 +378,7 @@ int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
mux_dev = mux->mux_devs[i];
ret = mux_dev->set(mux_dev, state);
- if (ret)
+ if (ret && ret != -EOPNOTSUPP)
return ret;
}
diff --git a/drivers/usb/typec/mux/fsa4480.c b/drivers/usb/typec/mux/fsa4480.c
index f71dba8bf07c..c54e42c7e6a1 100644
--- a/drivers/usb/typec/mux/fsa4480.c
+++ b/drivers/usb/typec/mux/fsa4480.c
@@ -12,6 +12,7 @@
#include <linux/regmap.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
+#include <linux/regulator/consumer.h>
#define FSA4480_DEVICE_ID 0x00
#define FSA4480_DEVICE_ID_VENDOR_ID GENMASK(7, 6)
@@ -273,6 +274,10 @@ static int fsa4480_probe(struct i2c_client *client)
if (IS_ERR(fsa->regmap))
return dev_err_probe(dev, PTR_ERR(fsa->regmap), "failed to initialize regmap\n");
+ ret = devm_regulator_get_enable_optional(dev, "vcc");
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "Failed to get regulator\n");
+
ret = regmap_read(fsa->regmap, FSA4480_DEVICE_ID, &val);
if (ret)
return dev_err_probe(dev, -ENODEV, "FSA4480 not found\n");
diff --git a/drivers/usb/typec/port-mapper.c b/drivers/usb/typec/port-mapper.c
index d42da5720a25..cdbb7c11d714 100644
--- a/drivers/usb/typec/port-mapper.c
+++ b/drivers/usb/typec/port-mapper.c
@@ -8,6 +8,7 @@
#include <linux/acpi.h>
#include <linux/component.h>
+#include <linux/thunderbolt.h>
#include <linux/usb.h>
#include "class.h"
@@ -36,6 +37,11 @@ struct each_port_arg {
struct component_match *match;
};
+static int usb4_port_compare(struct device *dev, void *fwnode)
+{
+ return usb4_usb3_port_match(dev, fwnode);
+}
+
static int typec_port_compare(struct device *dev, void *fwnode)
{
return device_match_fwnode(dev, fwnode);
@@ -51,9 +57,22 @@ static int typec_port_match(struct device *dev, void *data)
if (con_adev == adev)
return 0;
- if (con_adev->pld_crc == adev->pld_crc)
+ if (con_adev->pld_crc == adev->pld_crc) {
+ struct fwnode_handle *adev_fwnode = acpi_fwnode_handle(adev);
+
component_match_add(&arg->port->dev, &arg->match, typec_port_compare,
- acpi_fwnode_handle(adev));
+ adev_fwnode);
+
+ /*
+ * If dev is USB 3.x port, it may have reference to the
+ * USB4 host interface in which case we can also link the
+ * Type-C port with the USB4 port.
+ */
+ if (fwnode_property_present(adev_fwnode, "usb4-host-interface"))
+ component_match_add(&arg->port->dev, &arg->match,
+ usb4_port_compare, adev_fwnode);
+ }
+
return 0;
}
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 19ab6647af70..a56e31b20c21 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -17,6 +17,7 @@
#include <linux/usb/tcpci.h>
#include <linux/usb/tcpm.h>
#include <linux/usb/typec.h>
+#include <linux/regulator/consumer.h>
#define PD_RETRY_COUNT_DEFAULT 3
#define PD_RETRY_COUNT_3_0_OR_HIGHER 2
@@ -905,6 +906,10 @@ static int tcpci_probe(struct i2c_client *client)
int err;
u16 val = 0;
+ err = devm_regulator_get_enable_optional(&client->dev, "vdd");
+ if (err && err != -ENODEV)
+ return dev_err_probe(&client->dev, err, "Failed to get regulator\n");
+
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim_core.c b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
index fd1b80593367..b5a5ed40faea 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim_core.c
+++ b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
@@ -166,7 +166,8 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
return;
}
- if (count > sizeof(struct pd_message) || count + 1 > TCPC_RECEIVE_BUFFER_LEN) {
+ if (count > sizeof(struct pd_message) + 1 ||
+ count + 1 > TCPC_RECEIVE_BUFFER_LEN) {
dev_err(chip->dev, "Invalid TCPC_RX_BYTE_CNT %d\n", count);
return;
}
@@ -536,7 +537,10 @@ static int max_tcpci_probe(struct i2c_client *client)
return dev_err_probe(&client->dev, ret,
"IRQ initialization failed\n");
- device_init_wakeup(chip->dev, true);
+ ret = devm_device_init_wakeup(chip->dev);
+ if (ret)
+ return dev_err_probe(chip->dev, ret, "Failed to init wakeup\n");
+
return 0;
}
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 8adf6f954633..1a1f9e1f8e4e 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -67,6 +67,7 @@
\
S(ACC_UNATTACHED), \
S(DEBUG_ACC_ATTACHED), \
+ S(DEBUG_ACC_DEBOUNCE), \
S(AUDIO_ACC_ATTACHED), \
S(AUDIO_ACC_DEBOUNCE), \
\
@@ -313,6 +314,10 @@ struct pd_data {
unsigned int operating_snk_mw;
};
+#define PD_CAP_REV10 0x1
+#define PD_CAP_REV20 0x2
+#define PD_CAP_REV30 0x3
+
struct pd_revision_info {
u8 rev_major;
u8 rev_minor;
@@ -596,6 +601,15 @@ struct pd_rx_event {
enum tcpm_transmit_type rx_sop_type;
};
+struct altmode_vdm_event {
+ struct kthread_work work;
+ struct tcpm_port *port;
+ u32 header;
+ u32 *data;
+ int cnt;
+ enum tcpm_transmit_type tx_sop_type;
+};
+
static const char * const pd_rev[] = {
[PD_REV10] = "rev1",
[PD_REV20] = "rev2",
@@ -621,7 +635,8 @@ static const char * const pd_rev[] = {
!tcpm_cc_is_source((port)->cc1)))
#define tcpm_port_is_debug(port) \
- (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
+ ((tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2)) || \
+ (tcpm_cc_is_sink((port)->cc1) && tcpm_cc_is_sink((port)->cc2)))
#define tcpm_port_is_audio(port) \
(tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
@@ -1151,7 +1166,7 @@ static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
port->data_role);
}
-static int tcpm_set_roles(struct tcpm_port *port, bool attached,
+static int tcpm_set_roles(struct tcpm_port *port, bool attached, int state,
enum typec_role role, enum typec_data_role data)
{
enum typec_orientation orientation;
@@ -1188,7 +1203,7 @@ static int tcpm_set_roles(struct tcpm_port *port, bool attached,
}
}
- ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation);
+ ret = tcpm_mux_set(port, state, usb_role, orientation);
if (ret < 0)
return ret;
@@ -1608,18 +1623,68 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
mod_vdm_delayed_work(port, 0);
}
-static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
- const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
+static void tcpm_queue_vdm_work(struct kthread_work *work)
{
- if (port->state != SRC_READY && port->state != SNK_READY &&
- port->state != SRC_VDM_IDENTITY_REQUEST)
- return;
+ struct altmode_vdm_event *event = container_of(work,
+ struct altmode_vdm_event,
+ work);
+ struct tcpm_port *port = event->port;
mutex_lock(&port->lock);
- tcpm_queue_vdm(port, header, data, cnt, tx_sop_type);
+ if (port->state != SRC_READY && port->state != SNK_READY &&
+ port->state != SRC_VDM_IDENTITY_REQUEST) {
+ tcpm_log_force(port, "dropping altmode_vdm_event");
+ goto port_unlock;
+ }
+
+ tcpm_queue_vdm(port, event->header, event->data, event->cnt, event->tx_sop_type);
+
+port_unlock:
+ kfree(event->data);
+ kfree(event);
mutex_unlock(&port->lock);
}
+static int tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
+ const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
+{
+ struct altmode_vdm_event *event;
+ u32 *data_cpy;
+ int ret = -ENOMEM;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ goto err_event;
+
+ data_cpy = kcalloc(cnt, sizeof(u32), GFP_KERNEL);
+ if (!data_cpy)
+ goto err_data;
+
+ kthread_init_work(&event->work, tcpm_queue_vdm_work);
+ event->port = port;
+ event->header = header;
+ memcpy(data_cpy, data, sizeof(u32) * cnt);
+ event->data = data_cpy;
+ event->cnt = cnt;
+ event->tx_sop_type = tx_sop_type;
+
+ ret = kthread_queue_work(port->wq, &event->work);
+ if (!ret) {
+ ret = -EBUSY;
+ goto err_queue;
+ }
+
+ return 0;
+
+err_queue:
+ kfree(data_cpy);
+err_data:
+ kfree(event);
+err_event:
+ tcpm_log_force(port, "failed to queue altmode vdm, err:%d", ret);
+ return ret;
+}
+
static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
{
u32 vdo = p[VDO_INDEX_IDH];
@@ -2830,8 +2895,7 @@ static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
header |= VDO_OPOS(altmode->mode);
- tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP);
- return 0;
+ return tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP);
}
static int tcpm_altmode_exit(struct typec_altmode *altmode)
@@ -2847,8 +2911,7 @@ static int tcpm_altmode_exit(struct typec_altmode *altmode)
header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
header |= VDO_OPOS(altmode->mode);
- tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP);
- return 0;
+ return tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP);
}
static int tcpm_altmode_vdm(struct typec_altmode *altmode,
@@ -2856,9 +2919,7 @@ static int tcpm_altmode_vdm(struct typec_altmode *altmode,
{
struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
- tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP);
-
- return 0;
+ return tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP);
}
static const struct typec_altmode_ops tcpm_altmode_ops = {
@@ -2882,8 +2943,7 @@ static int tcpm_cable_altmode_enter(struct typec_altmode *altmode, enum typec_pl
header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
header |= VDO_OPOS(altmode->mode);
- tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME);
- return 0;
+ return tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME);
}
static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plug_index sop)
@@ -2899,8 +2959,7 @@ static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plu
header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
header |= VDO_OPOS(altmode->mode);
- tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME);
- return 0;
+ return tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME);
}
static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug_index sop,
@@ -2908,9 +2967,7 @@ static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug
{
struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
- tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME);
-
- return 0;
+ return tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME);
}
static const struct typec_cable_ops tcpm_cable_ops = {
@@ -4353,7 +4410,8 @@ static int tcpm_src_attach(struct tcpm_port *port)
tcpm_enable_auto_vbus_discharge(port, true);
- ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
+ ret = tcpm_set_roles(port, true, TYPEC_STATE_USB,
+ TYPEC_SOURCE, tcpm_data_role_for_source(port));
if (ret < 0)
return ret;
@@ -4528,7 +4586,8 @@ static int tcpm_snk_attach(struct tcpm_port *port)
tcpm_enable_auto_vbus_discharge(port, true);
- ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
+ ret = tcpm_set_roles(port, true, TYPEC_STATE_USB,
+ TYPEC_SINK, tcpm_data_role_for_sink(port));
if (ret < 0)
return ret;
@@ -4551,12 +4610,24 @@ static void tcpm_snk_detach(struct tcpm_port *port)
static int tcpm_acc_attach(struct tcpm_port *port)
{
int ret;
+ enum typec_role role;
+ enum typec_data_role data;
+ int state = TYPEC_STATE_USB;
if (port->attached)
return 0;
- ret = tcpm_set_roles(port, true, TYPEC_SOURCE,
- tcpm_data_role_for_source(port));
+ role = tcpm_port_is_sink(port) ? TYPEC_SINK : TYPEC_SOURCE;
+ data = tcpm_port_is_sink(port) ? tcpm_data_role_for_sink(port)
+ : tcpm_data_role_for_source(port);
+
+ if (tcpm_port_is_audio(port))
+ state = TYPEC_MODE_AUDIO;
+
+ if (tcpm_port_is_debug(port))
+ state = TYPEC_MODE_DEBUG;
+
+ ret = tcpm_set_roles(port, true, state, role, data);
if (ret < 0)
return ret;
@@ -4665,6 +4736,25 @@ static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
}
}
+static void tcpm_set_initial_negotiated_rev(struct tcpm_port *port)
+{
+ switch (port->pd_rev.rev_major) {
+ case PD_CAP_REV10:
+ port->negotiated_rev = PD_REV10;
+ break;
+ case PD_CAP_REV20:
+ port->negotiated_rev = PD_REV20;
+ break;
+ case PD_CAP_REV30:
+ port->negotiated_rev = PD_REV30;
+ break;
+ default:
+ port->negotiated_rev = PD_MAX_REV;
+ break;
+ }
+ port->negotiated_rev_prime = port->negotiated_rev;
+}
+
static void run_state_machine(struct tcpm_port *port)
{
int ret;
@@ -4782,8 +4872,7 @@ static void run_state_machine(struct tcpm_port *port)
typec_set_pwr_opmode(port->typec_port, opmode);
port->pwr_opmode = TYPEC_PWR_MODE_USB;
port->caps_count = 0;
- port->negotiated_rev = PD_MAX_REV;
- port->negotiated_rev_prime = PD_MAX_REV;
+ tcpm_set_initial_negotiated_rev(port);
port->message_id = 0;
port->message_id_prime = 0;
port->rx_msgid = -1;
@@ -4964,7 +5053,13 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
break;
case SNK_ATTACH_WAIT:
- if ((port->cc1 == TYPEC_CC_OPEN &&
+ if (tcpm_port_is_debug(port))
+ tcpm_set_state(port, DEBUG_ACC_ATTACHED,
+ PD_T_CC_DEBOUNCE);
+ else if (tcpm_port_is_audio(port))
+ tcpm_set_state(port, AUDIO_ACC_ATTACHED,
+ PD_T_CC_DEBOUNCE);
+ else if ((port->cc1 == TYPEC_CC_OPEN &&
port->cc2 != TYPEC_CC_OPEN) ||
(port->cc1 != TYPEC_CC_OPEN &&
port->cc2 == TYPEC_CC_OPEN))
@@ -4978,6 +5073,12 @@ static void run_state_machine(struct tcpm_port *port)
if (tcpm_port_is_disconnected(port))
tcpm_set_state(port, SNK_UNATTACHED,
PD_T_PD_DEBOUNCE);
+ else if (tcpm_port_is_debug(port))
+ tcpm_set_state(port, DEBUG_ACC_ATTACHED,
+ PD_T_CC_DEBOUNCE);
+ else if (tcpm_port_is_audio(port))
+ tcpm_set_state(port, AUDIO_ACC_ATTACHED,
+ PD_T_CC_DEBOUNCE);
else if (port->vbus_present)
tcpm_set_state(port,
tcpm_try_src(port) ? SRC_TRY
@@ -5048,8 +5149,7 @@ static void run_state_machine(struct tcpm_port *port)
port->cc2 : port->cc1);
typec_set_pwr_opmode(port->typec_port, opmode);
port->pwr_opmode = TYPEC_PWR_MODE_USB;
- port->negotiated_rev = PD_MAX_REV;
- port->negotiated_rev_prime = PD_MAX_REV;
+ tcpm_set_initial_negotiated_rev(port);
port->message_id = 0;
port->message_id_prime = 0;
port->rx_msgid = -1;
@@ -5276,7 +5376,10 @@ static void run_state_machine(struct tcpm_port *port)
/* Accessory states */
case ACC_UNATTACHED:
tcpm_acc_detach(port);
- tcpm_set_state(port, SRC_UNATTACHED, 0);
+ if (port->port_type == TYPEC_PORT_SRC)
+ tcpm_set_state(port, SRC_UNATTACHED, 0);
+ else
+ tcpm_set_state(port, SNK_UNATTACHED, 0);
break;
case DEBUG_ACC_ATTACHED:
case AUDIO_ACC_ATTACHED:
@@ -5284,6 +5387,7 @@ static void run_state_machine(struct tcpm_port *port)
if (ret < 0)
tcpm_set_state(port, ACC_UNATTACHED, 0);
break;
+ case DEBUG_ACC_DEBOUNCE:
case AUDIO_ACC_DEBOUNCE:
tcpm_set_state(port, ACC_UNATTACHED, port->timings.cc_debounce_time);
break;
@@ -5326,7 +5430,7 @@ static void run_state_machine(struct tcpm_port *port)
*/
tcpm_set_vconn(port, false);
tcpm_set_vbus(port, false);
- tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
+ tcpm_set_roles(port, port->self_powered, TYPEC_STATE_USB, TYPEC_SOURCE,
tcpm_data_role_for_source(port));
/*
* If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
@@ -5358,7 +5462,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_vconn(port, false);
if (port->pd_capable)
tcpm_set_charge(port, false);
- tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
+ tcpm_set_roles(port, port->self_powered, TYPEC_STATE_USB, TYPEC_SINK,
tcpm_data_role_for_sink(port));
/*
* VBUS may or may not toggle, depending on the adapter.
@@ -5482,10 +5586,10 @@ static void run_state_machine(struct tcpm_port *port)
case DR_SWAP_CHANGE_DR:
tcpm_unregister_altmodes(port);
if (port->data_role == TYPEC_HOST)
- tcpm_set_roles(port, true, port->pwr_role,
+ tcpm_set_roles(port, true, TYPEC_STATE_USB, port->pwr_role,
TYPEC_DEVICE);
else
- tcpm_set_roles(port, true, port->pwr_role,
+ tcpm_set_roles(port, true, TYPEC_STATE_USB, port->pwr_role,
TYPEC_HOST);
tcpm_ams_finish(port);
tcpm_set_state(port, ready_state(port), 0);
@@ -5875,7 +5979,8 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
}
break;
case SNK_UNATTACHED:
- if (tcpm_port_is_sink(port))
+ if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
+ tcpm_port_is_sink(port))
tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
break;
case SNK_ATTACH_WAIT:
@@ -5938,7 +6043,12 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
case DEBUG_ACC_ATTACHED:
if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
- tcpm_set_state(port, ACC_UNATTACHED, 0);
+ tcpm_set_state(port, DEBUG_ACC_DEBOUNCE, 0);
+ break;
+
+ case DEBUG_ACC_DEBOUNCE:
+ if (tcpm_port_is_debug(port))
+ tcpm_set_state(port, DEBUG_ACC_ATTACHED, 0);
break;
case SNK_TRY:
@@ -7166,7 +7276,7 @@ static void tcpm_fw_get_timings(struct tcpm_port *port, struct fwnode_handle *fw
static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode)
{
- struct fwnode_handle *capabilities, *child, *caps = NULL;
+ struct fwnode_handle *capabilities, *caps = NULL;
unsigned int nr_src_pdo, nr_snk_pdo;
const char *opmode_str;
u32 *src_pdo, *snk_pdo;
@@ -7232,9 +7342,7 @@ static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode
if (!capabilities) {
port->pd_count = 1;
} else {
- fwnode_for_each_child_node(capabilities, child)
- port->pd_count++;
-
+ port->pd_count = fwnode_get_child_node_count(capabilities);
if (!port->pd_count) {
ret = -ENODATA;
goto put_capabilities;
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 7ee721a877c1..dcf141ada078 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -1431,7 +1431,7 @@ static int tps6598x_probe(struct i2c_client *client)
tps->wakeup = device_property_read_bool(tps->dev, "wakeup-source");
if (tps->wakeup && client->irq) {
- device_init_wakeup(&client->dev, true);
+ devm_device_init_wakeup(&client->dev);
enable_irq_wake(client->irq);
}
diff --git a/drivers/usb/typec/tipd/tps6598x.h b/drivers/usb/typec/tipd/tps6598x.h
index 9b23e9017452..cecb8d11d239 100644
--- a/drivers/usb/typec/tipd/tps6598x.h
+++ b/drivers/usb/typec/tipd/tps6598x.h
@@ -27,7 +27,7 @@
#define TPS_STATUS_OVERCURRENT BIT(16)
#define TPS_STATUS_GOTO_MIN_ACTIVE BIT(26)
#define TPS_STATUS_BIST BIT(27)
-#define TPS_STATUS_HIGH_VOLAGE_WARNING BIT(28)
+#define TPS_STATUS_HIGH_VOLTAGE_WARNING BIT(28)
#define TPS_STATUS_HIGH_LOW_VOLTAGE_WARNING BIT(29)
#define TPS_STATUS_CONN_STATE_MASK GENMASK(3, 1)
diff --git a/drivers/usb/typec/tipd/trace.h b/drivers/usb/typec/tipd/trace.h
index 0669cca12ea1..bea383f2db9d 100644
--- a/drivers/usb/typec/tipd/trace.h
+++ b/drivers/usb/typec/tipd/trace.h
@@ -153,7 +153,7 @@
{ TPS_STATUS_OVERCURRENT, "OVERCURRENT" }, \
{ TPS_STATUS_GOTO_MIN_ACTIVE, "GOTO_MIN_ACTIVE" }, \
{ TPS_STATUS_BIST, "BIST" }, \
- { TPS_STATUS_HIGH_VOLAGE_WARNING, "HIGH_VOLAGE_WARNING" }, \
+ { TPS_STATUS_HIGH_VOLTAGE_WARNING, "HIGH_VOLTAGE_WARNING" }, \
{ TPS_STATUS_HIGH_LOW_VOLTAGE_WARNING, "HIGH_LOW_VOLTAGE_WARNING" })
#define show_tps25750_status_flags(flags) \
diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
index 75559601fe8f..8bf8fefb4f07 100644
--- a/drivers/usb/typec/ucsi/Kconfig
+++ b/drivers/usb/typec/ucsi/Kconfig
@@ -91,4 +91,15 @@ config UCSI_LENOVO_YOGA_C630
To compile the driver as a module, choose M here: the module will be
called ucsi_yoga_c630.
+config UCSI_HUAWEI_GAOKUN
+ tristate "UCSI Interface Driver for Huawei Matebook E Go"
+ depends on EC_HUAWEI_GAOKUN
+ select DRM_AUX_HPD_BRIDGE if DRM_BRIDGE && OF
+ help
+ This driver enables UCSI support on the Huawei Matebook E Go tablet,
+ which is a sc8280xp-based 2-in-1 tablet.
+
+ To compile the driver as a module, choose M here: the module will be
+ called ucsi_huawei_gaokun.
+
endif
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
index be98a879104d..dbc571763eff 100644
--- a/drivers/usb/typec/ucsi/Makefile
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -23,3 +23,4 @@ obj-$(CONFIG_UCSI_STM32G0) += ucsi_stm32g0.o
obj-$(CONFIG_UCSI_PMIC_GLINK) += ucsi_glink.o
obj-$(CONFIG_CROS_EC_UCSI) += cros_ec_ucsi.o
obj-$(CONFIG_UCSI_LENOVO_YOGA_C630) += ucsi_yoga_c630.o
+obj-$(CONFIG_UCSI_HUAWEI_GAOKUN) += ucsi_huawei_gaokun.o
diff --git a/drivers/usb/typec/ucsi/debugfs.c b/drivers/usb/typec/ucsi/debugfs.c
index eae2b18a2d8a..92ebf1a2defd 100644
--- a/drivers/usb/typec/ucsi/debugfs.c
+++ b/drivers/usb/typec/ucsi/debugfs.c
@@ -34,16 +34,20 @@ static int ucsi_cmd(void *data, u64 val)
case UCSI_CONNECTOR_RESET:
case UCSI_SET_SINK_PATH:
case UCSI_SET_NEW_CAM:
+ case UCSI_SET_USB:
ret = ucsi_send_command(ucsi, val, NULL, 0);
break;
case UCSI_GET_CAPABILITY:
case UCSI_GET_CONNECTOR_CAPABILITY:
case UCSI_GET_ALTERNATE_MODES:
+ case UCSI_GET_CAM_SUPPORTED:
case UCSI_GET_CURRENT_CAM:
case UCSI_GET_PDOS:
case UCSI_GET_CABLE_PROPERTY:
case UCSI_GET_CONNECTOR_STATUS:
case UCSI_GET_ERROR_STATUS:
+ case UCSI_GET_PD_MESSAGE:
+ case UCSI_GET_ATTENTION_VDO:
case UCSI_GET_CAM_CS:
case UCSI_GET_LPM_PPM_INFO:
ret = ucsi_send_command(ucsi, val,
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 9c5278a0c5d4..5a8f947fcece 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -125,9 +125,11 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_GET_CONNECTOR_STATUS 0x12
#define UCSI_GET_CONNECTOR_STATUS_SIZE 152
#define UCSI_GET_ERROR_STATUS 0x13
+#define UCSI_GET_ATTENTION_VDO 0x16
#define UCSI_GET_PD_MESSAGE 0x15
#define UCSI_GET_CAM_CS 0x18
#define UCSI_SET_SINK_PATH 0x1c
+#define UCSI_SET_USB 0x21
#define UCSI_GET_LPM_PPM_INFO 0x22
#define UCSI_CONNECTOR_NUMBER(_num_) ((u64)(_num_) << 16)
@@ -434,7 +436,7 @@ struct ucsi_debugfs_entry {
u64 low;
u64 high;
} response;
- u32 status;
+ int status;
struct dentry *dentry;
};
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index f01e4ef6619d..e9a9df1431af 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -1483,6 +1483,8 @@ static int ucsi_ccg_probe(struct i2c_client *client)
i2c_set_clientdata(client, uc);
+ device_disable_async_suspend(uc->dev);
+
pm_runtime_set_active(uc->dev);
pm_runtime_enable(uc->dev);
pm_runtime_use_autosuspend(uc->dev);
diff --git a/drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c b/drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c
new file mode 100644
index 000000000000..7b5222081bbb
--- /dev/null
+++ b/drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ucsi-huawei-gaokun - A UCSI driver for HUAWEI Matebook E Go
+ *
+ * Copyright (C) 2024-2025 Pengyu Luo <mitltlatltl@gmail.com>
+ */
+
+#include <drm/bridge/aux-bridge.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/container_of.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_data/huawei-gaokun-ec.h>
+#include <linux/string.h>
+#include <linux/usb/pd_vdo.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/workqueue_types.h>
+
+#include "ucsi.h"
+
+#define EC_EVENT_UCSI 0x21
+#define EC_EVENT_USB 0x22
+
+#define GAOKUN_CCX_MASK GENMASK(1, 0)
+#define GAOKUN_MUX_MASK GENMASK(3, 2)
+
+#define GAOKUN_DPAM_MASK GENMASK(3, 0)
+#define GAOKUN_HPD_STATE_MASK BIT(4)
+#define GAOKUN_HPD_IRQ_MASK BIT(5)
+
+#define GET_IDX(updt) (ffs(updt) - 1)
+
+#define CCX_TO_ORI(ccx) (++(ccx) % 3) /* convert ccx to enum typec_orientation */
+
+/* Configuration Channel Extension */
+enum gaokun_ucsi_ccx {
+ USBC_CCX_NORMAL,
+ USBC_CCX_REVERSE,
+ USBC_CCX_NONE,
+};
+
+enum gaokun_ucsi_mux {
+ USBC_MUX_NONE,
+ USBC_MUX_USB_2L,
+ USBC_MUX_DP_4L,
+ USBC_MUX_USB_DP,
+};
+
+/* based on pmic_glink_altmode_pin_assignment */
+enum gaokun_ucsi_dpam_pan { /* DP Alt Mode Pin Assignments */
+ USBC_DPAM_PAN_NONE,
+ USBC_DPAM_PAN_A, /* Not supported after USB Type-C Standard v1.0b */
+ USBC_DPAM_PAN_B, /* Not supported after USB Type-C Standard v1.0b */
+ USBC_DPAM_PAN_C, /* USBC_DPAM_PAN_C_REVERSE - 6 */
+ USBC_DPAM_PAN_D,
+ USBC_DPAM_PAN_E,
+ USBC_DPAM_PAN_F, /* Not supported after USB Type-C Standard v1.0b */
+ USBC_DPAM_PAN_A_REVERSE,/* Not supported after USB Type-C Standard v1.0b */
+ USBC_DPAM_PAN_B_REVERSE,/* Not supported after USB Type-C Standard v1.0b */
+ USBC_DPAM_PAN_C_REVERSE,
+ USBC_DPAM_PAN_D_REVERSE,
+ USBC_DPAM_PAN_E_REVERSE,
+ USBC_DPAM_PAN_F_REVERSE,/* Not supported after USB Type-C Standard v1.0b */
+};
+
+struct gaokun_ucsi_reg {
+ u8 num_ports;
+ u8 port_updt;
+ u8 port_data[4];
+ u8 checksum;
+ u8 reserved;
+} __packed;
+
+struct gaokun_ucsi_port {
+ struct completion usb_ack;
+ spinlock_t lock; /* serializing port resource access */
+
+ struct gaokun_ucsi *ucsi;
+ struct auxiliary_device *bridge;
+
+ int idx;
+ enum gaokun_ucsi_ccx ccx;
+ enum gaokun_ucsi_mux mux;
+ u8 mode;
+ u16 svid;
+ u8 hpd_state;
+ u8 hpd_irq;
+};
+
+struct gaokun_ucsi {
+ struct gaokun_ec *ec;
+ struct ucsi *ucsi;
+ struct gaokun_ucsi_port *ports;
+ struct device *dev;
+ struct delayed_work work;
+ struct notifier_block nb;
+ u16 version;
+ u8 num_ports;
+};
+
+/* -------------------------------------------------------------------------- */
+/* For UCSI */
+
+static int gaokun_ucsi_read_version(struct ucsi *ucsi, u16 *version)
+{
+ struct gaokun_ucsi *uec = ucsi_get_drvdata(ucsi);
+
+ *version = uec->version;
+
+ return 0;
+}
+
+static int gaokun_ucsi_read_cci(struct ucsi *ucsi, u32 *cci)
+{
+ struct gaokun_ucsi *uec = ucsi_get_drvdata(ucsi);
+ u8 buf[GAOKUN_UCSI_READ_SIZE];
+ int ret;
+
+ ret = gaokun_ec_ucsi_read(uec->ec, buf);
+ if (ret)
+ return ret;
+
+ memcpy(cci, buf, sizeof(*cci));
+
+ return 0;
+}
+
+static int gaokun_ucsi_read_message_in(struct ucsi *ucsi,
+ void *val, size_t val_len)
+{
+ struct gaokun_ucsi *uec = ucsi_get_drvdata(ucsi);
+ u8 buf[GAOKUN_UCSI_READ_SIZE];
+ int ret;
+
+ ret = gaokun_ec_ucsi_read(uec->ec, buf);
+ if (ret)
+ return ret;
+
+ memcpy(val, buf + GAOKUN_UCSI_CCI_SIZE,
+ min(val_len, GAOKUN_UCSI_MSGI_SIZE));
+
+ return 0;
+}
+
+static int gaokun_ucsi_async_control(struct ucsi *ucsi, u64 command)
+{
+ struct gaokun_ucsi *uec = ucsi_get_drvdata(ucsi);
+ u8 buf[GAOKUN_UCSI_WRITE_SIZE] = {};
+
+ memcpy(buf, &command, sizeof(command));
+
+ return gaokun_ec_ucsi_write(uec->ec, buf);
+}
+
+static void gaokun_ucsi_update_connector(struct ucsi_connector *con)
+{
+ struct gaokun_ucsi *uec = ucsi_get_drvdata(con->ucsi);
+
+ if (con->num > uec->num_ports)
+ return;
+
+ con->typec_cap.orientation_aware = true;
+}
+
+static void gaokun_set_orientation(struct ucsi_connector *con,
+ struct gaokun_ucsi_port *port)
+{
+ enum gaokun_ucsi_ccx ccx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ ccx = port->ccx;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ typec_set_orientation(con->port, CCX_TO_ORI(ccx));
+}
+
+static void gaokun_ucsi_connector_status(struct ucsi_connector *con)
+{
+ struct gaokun_ucsi *uec = ucsi_get_drvdata(con->ucsi);
+ int idx;
+
+ idx = con->num - 1;
+ if (con->num > uec->num_ports) {
+ dev_warn(uec->dev, "set orientation out of range: con%d\n", idx);
+ return;
+ }
+
+ gaokun_set_orientation(con, &uec->ports[idx]);
+}
+
+const struct ucsi_operations gaokun_ucsi_ops = {
+ .read_version = gaokun_ucsi_read_version,
+ .read_cci = gaokun_ucsi_read_cci,
+ .read_message_in = gaokun_ucsi_read_message_in,
+ .sync_control = ucsi_sync_control_common,
+ .async_control = gaokun_ucsi_async_control,
+ .update_connector = gaokun_ucsi_update_connector,
+ .connector_status = gaokun_ucsi_connector_status,
+};
+
+/* -------------------------------------------------------------------------- */
+/* For Altmode */
+
+static void gaokun_ucsi_port_update(struct gaokun_ucsi_port *port,
+ const u8 *port_data)
+{
+ struct gaokun_ucsi *uec = port->ucsi;
+ int offset = port->idx * 2; /* every port has 2 Bytes data */
+ unsigned long flags;
+ u8 dcc, ddi;
+
+ dcc = port_data[offset];
+ ddi = port_data[offset + 1];
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ port->ccx = FIELD_GET(GAOKUN_CCX_MASK, dcc);
+ port->mux = FIELD_GET(GAOKUN_MUX_MASK, dcc);
+ port->mode = FIELD_GET(GAOKUN_DPAM_MASK, ddi);
+ port->hpd_state = FIELD_GET(GAOKUN_HPD_STATE_MASK, ddi);
+ port->hpd_irq = FIELD_GET(GAOKUN_HPD_IRQ_MASK, ddi);
+
+ /* Mode and SVID are unused; keeping them to make things clearer */
+ switch (port->mode) {
+ case USBC_DPAM_PAN_C:
+ case USBC_DPAM_PAN_C_REVERSE:
+ port->mode = DP_PIN_ASSIGN_C; /* correct it for usb later */
+ break;
+ case USBC_DPAM_PAN_D:
+ case USBC_DPAM_PAN_D_REVERSE:
+ port->mode = DP_PIN_ASSIGN_D;
+ break;
+ case USBC_DPAM_PAN_E:
+ case USBC_DPAM_PAN_E_REVERSE:
+ port->mode = DP_PIN_ASSIGN_E;
+ break;
+ case USBC_DPAM_PAN_NONE:
+ port->mode = TYPEC_STATE_SAFE;
+ break;
+ default:
+ dev_warn(uec->dev, "unknown mode %d\n", port->mode);
+ break;
+ }
+
+ switch (port->mux) {
+ case USBC_MUX_NONE:
+ port->svid = 0;
+ break;
+ case USBC_MUX_USB_2L:
+ port->svid = USB_SID_PD;
+ port->mode = TYPEC_STATE_USB; /* same as PAN_C, correct it */
+ break;
+ case USBC_MUX_DP_4L:
+ case USBC_MUX_USB_DP:
+ port->svid = USB_SID_DISPLAYPORT;
+ break;
+ default:
+ dev_warn(uec->dev, "unknown mux state %d\n", port->mux);
+ break;
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int gaokun_ucsi_refresh(struct gaokun_ucsi *uec)
+{
+ struct gaokun_ucsi_reg ureg;
+ int ret, idx;
+
+ ret = gaokun_ec_ucsi_get_reg(uec->ec, &ureg);
+ if (ret)
+ return GAOKUN_UCSI_NO_PORT_UPDATE;
+
+ uec->num_ports = ureg.num_ports;
+ idx = GET_IDX(ureg.port_updt);
+
+ if (idx < 0 || idx >= ureg.num_ports)
+ return GAOKUN_UCSI_NO_PORT_UPDATE;
+
+ gaokun_ucsi_port_update(&uec->ports[idx], ureg.port_data);
+ return idx;
+}
+
+static void gaokun_ucsi_handle_altmode(struct gaokun_ucsi_port *port)
+{
+ struct gaokun_ucsi *uec = port->ucsi;
+ int idx = port->idx;
+
+ if (idx >= uec->ucsi->cap.num_connectors) {
+ dev_warn(uec->dev, "altmode port out of range: %d\n", idx);
+ return;
+ }
+
+ /* UCSI callback .connector_status() have set orientation */
+ if (port->bridge)
+ drm_aux_hpd_bridge_notify(&port->bridge->dev,
+ port->hpd_state ?
+ connector_status_connected :
+ connector_status_disconnected);
+
+ gaokun_ec_ucsi_pan_ack(uec->ec, port->idx);
+}
+
+static void gaokun_ucsi_altmode_notify_ind(struct gaokun_ucsi *uec)
+{
+ int idx;
+
+ if (!uec->ucsi->connector) { /* slow to register */
+ dev_err_ratelimited(uec->dev, "ucsi connector is not initialized yet\n");
+ return;
+ }
+
+ idx = gaokun_ucsi_refresh(uec);
+ if (idx == GAOKUN_UCSI_NO_PORT_UPDATE)
+ gaokun_ec_ucsi_pan_ack(uec->ec, idx); /* ack directly if no update */
+ else
+ gaokun_ucsi_handle_altmode(&uec->ports[idx]);
+}
+
+/*
+ * When inserting, 2 UCSI events(connector change) are followed by USB events.
+ * If we received one USB event, that means USB events are not blocked, so we
+ * can complelte for all ports, and we should signal all events.
+ */
+static void gaokun_ucsi_complete_usb_ack(struct gaokun_ucsi *uec)
+{
+ struct gaokun_ucsi_port *port;
+ int idx = 0;
+
+ while (idx < uec->num_ports) {
+ port = &uec->ports[idx++];
+ if (!completion_done(&port->usb_ack))
+ complete_all(&port->usb_ack);
+ }
+}
+
+/*
+ * USB event is necessary for enabling altmode, the event should follow
+ * UCSI event, if not after timeout(this notify may be disabled somehow),
+ * then force to enable altmode.
+ */
+static void gaokun_ucsi_handle_no_usb_event(struct gaokun_ucsi *uec, int idx)
+{
+ struct gaokun_ucsi_port *port;
+
+ port = &uec->ports[idx];
+ if (!wait_for_completion_timeout(&port->usb_ack, 2 * HZ)) {
+ dev_warn(uec->dev, "No USB EVENT, triggered by UCSI EVENT");
+ gaokun_ucsi_altmode_notify_ind(uec);
+ }
+}
+
+static int gaokun_ucsi_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ u32 cci;
+ int ret;
+ struct gaokun_ucsi *uec = container_of(nb, struct gaokun_ucsi, nb);
+
+ switch (action) {
+ case EC_EVENT_USB:
+ gaokun_ucsi_complete_usb_ack(uec);
+ gaokun_ucsi_altmode_notify_ind(uec);
+ return NOTIFY_OK;
+
+ case EC_EVENT_UCSI:
+ ret = gaokun_ucsi_read_cci(uec->ucsi, &cci);
+ if (ret)
+ return NOTIFY_DONE;
+
+ ucsi_notify_common(uec->ucsi, cci);
+ if (UCSI_CCI_CONNECTOR(cci))
+ gaokun_ucsi_handle_no_usb_event(uec, UCSI_CCI_CONNECTOR(cci) - 1);
+
+ return NOTIFY_OK;
+
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static int gaokun_ucsi_ports_init(struct gaokun_ucsi *uec)
+{
+ struct gaokun_ucsi_port *ucsi_port;
+ struct gaokun_ucsi_reg ureg = {};
+ struct device *dev = uec->dev;
+ struct fwnode_handle *fwnode;
+ int i, ret, num_ports;
+ u32 port;
+
+ gaokun_ec_ucsi_get_reg(uec->ec, &ureg);
+ num_ports = ureg.num_ports;
+ uec->ports = devm_kcalloc(dev, num_ports, sizeof(*uec->ports),
+ GFP_KERNEL);
+ if (!uec->ports)
+ return -ENOMEM;
+
+ for (i = 0; i < num_ports; ++i) {
+ ucsi_port = &uec->ports[i];
+ ucsi_port->ccx = USBC_CCX_NONE;
+ ucsi_port->idx = i;
+ ucsi_port->ucsi = uec;
+ init_completion(&ucsi_port->usb_ack);
+ spin_lock_init(&ucsi_port->lock);
+ }
+
+ device_for_each_child_node(dev, fwnode) {
+ ret = fwnode_property_read_u32(fwnode, "reg", &port);
+ if (ret < 0) {
+ dev_err(dev, "missing reg property of %pOFn\n", fwnode);
+ fwnode_handle_put(fwnode);
+ return ret;
+ }
+
+ if (port >= num_ports) {
+ dev_warn(dev, "invalid connector number %d, ignoring\n", port);
+ continue;
+ }
+
+ ucsi_port = &uec->ports[port];
+ ucsi_port->bridge = devm_drm_dp_hpd_bridge_alloc(dev, to_of_node(fwnode));
+ if (IS_ERR(ucsi_port->bridge)) {
+ fwnode_handle_put(fwnode);
+ return PTR_ERR(ucsi_port->bridge);
+ }
+ }
+
+ for (i = 0; i < num_ports; i++) {
+ if (!uec->ports[i].bridge)
+ continue;
+
+ ret = devm_drm_dp_hpd_bridge_add(dev, uec->ports[i].bridge);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void gaokun_ucsi_register_worker(struct work_struct *work)
+{
+ struct gaokun_ucsi *uec;
+ struct ucsi *ucsi;
+ int ret;
+
+ uec = container_of(work, struct gaokun_ucsi, work.work);
+ ucsi = uec->ucsi;
+
+ ret = gaokun_ec_register_notify(uec->ec, &uec->nb);
+ if (ret) {
+ dev_err_probe(ucsi->dev, ret, "notifier register failed\n");
+ return;
+ }
+
+ ret = ucsi_register(ucsi);
+ if (ret)
+ dev_err_probe(ucsi->dev, ret, "ucsi register failed\n");
+}
+
+static int gaokun_ucsi_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct gaokun_ec *ec = adev->dev.platform_data;
+ struct device *dev = &adev->dev;
+ struct gaokun_ucsi *uec;
+ int ret;
+
+ uec = devm_kzalloc(dev, sizeof(*uec), GFP_KERNEL);
+ if (!uec)
+ return -ENOMEM;
+
+ uec->ec = ec;
+ uec->dev = dev;
+ uec->version = UCSI_VERSION_1_0;
+ uec->nb.notifier_call = gaokun_ucsi_notify;
+
+ INIT_DELAYED_WORK(&uec->work, gaokun_ucsi_register_worker);
+
+ ret = gaokun_ucsi_ports_init(uec);
+ if (ret)
+ return ret;
+
+ uec->ucsi = ucsi_create(dev, &gaokun_ucsi_ops);
+ if (IS_ERR(uec->ucsi))
+ return PTR_ERR(uec->ucsi);
+
+ ucsi_set_drvdata(uec->ucsi, uec);
+ auxiliary_set_drvdata(adev, uec);
+
+ /* EC can't handle UCSI properly in the early stage */
+ schedule_delayed_work(&uec->work, 3 * HZ);
+
+ return 0;
+}
+
+static void gaokun_ucsi_remove(struct auxiliary_device *adev)
+{
+ struct gaokun_ucsi *uec = auxiliary_get_drvdata(adev);
+
+ gaokun_ec_unregister_notify(uec->ec, &uec->nb);
+ ucsi_unregister(uec->ucsi);
+ ucsi_destroy(uec->ucsi);
+}
+
+static const struct auxiliary_device_id gaokun_ucsi_id_table[] = {
+ { .name = GAOKUN_MOD_NAME "." GAOKUN_DEV_UCSI, },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, gaokun_ucsi_id_table);
+
+static struct auxiliary_driver gaokun_ucsi_driver = {
+ .name = GAOKUN_DEV_UCSI,
+ .id_table = gaokun_ucsi_id_table,
+ .probe = gaokun_ucsi_probe,
+ .remove = gaokun_ucsi_remove,
+};
+
+module_auxiliary_driver(gaokun_ucsi_driver);
+
+MODULE_DESCRIPTION("HUAWEI Matebook E Go UCSI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/usbip/vudc_transfer.c b/drivers/usb/usbip/vudc_transfer.c
index 7e801fee33bf..a4f02ea3e3ef 100644
--- a/drivers/usb/usbip/vudc_transfer.c
+++ b/drivers/usb/usbip/vudc_transfer.c
@@ -301,7 +301,7 @@ top:
static void v_timer(struct timer_list *t)
{
- struct vudc *udc = from_timer(udc, t, tr_timer.timer);
+ struct vudc *udc = timer_container_of(udc, t, tr_timer.timer);
struct transfer_timer *timer = &udc->tr_timer;
struct urbp *urb_p, *tmp;
unsigned long flags;
diff --git a/drivers/vdpa/octeon_ep/octep_vdpa_main.c b/drivers/vdpa/octeon_ep/octep_vdpa_main.c
index f3d4dda4e04c..9b49efd24391 100644
--- a/drivers/vdpa/octeon_ep/octep_vdpa_main.c
+++ b/drivers/vdpa/octeon_ep/octep_vdpa_main.c
@@ -454,6 +454,9 @@ static void octep_vdpa_remove_pf(struct pci_dev *pdev)
octep_iounmap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR);
octep_vdpa_pf_bar_expand(octpf);
+
+ /* The pf version does not use managed PCI. */
+ pci_disable_device(pdev);
}
static void octep_vdpa_vf_bar_shrink(struct pci_dev *pdev)
@@ -825,7 +828,7 @@ static int octep_vdpa_probe_pf(struct pci_dev *pdev)
struct octep_pf *octpf;
int ret;
- ret = pcim_enable_device(pdev);
+ ret = pci_enable_device(pdev);
if (ret) {
dev_err(dev, "Failed to enable device\n");
return ret;
@@ -834,15 +837,17 @@ static int octep_vdpa_probe_pf(struct pci_dev *pdev)
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(dev, "No usable DMA configuration\n");
- return ret;
+ goto disable_pci;
}
octpf = devm_kzalloc(dev, sizeof(*octpf), GFP_KERNEL);
- if (!octpf)
- return -ENOMEM;
+ if (!octpf) {
+ ret = -ENOMEM;
+ goto disable_pci;
+ }
ret = octep_iomap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR);
if (ret)
- return ret;
+ goto disable_pci;
pci_set_master(pdev);
pci_set_drvdata(pdev, octpf);
@@ -856,6 +861,8 @@ static int octep_vdpa_probe_pf(struct pci_dev *pdev)
unmap_region:
octep_iounmap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR);
+disable_pci:
+ pci_disable_device(pdev);
return ret;
}
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index c3bcb6911c53..2b0172f54665 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "VFIO support for PCI devices"
- depends on PCI && MMU
+ depends on PCI
config VFIO_PCI_CORE
tristate
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
index 451c639299eb..2149f49aeec7 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
@@ -190,9 +190,10 @@ static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
int ret;
/* Check VF state */
- if (unlikely(hisi_qm_wait_mb_ready(qm))) {
+ ret = hisi_qm_wait_mb_ready(qm);
+ if (unlikely(ret)) {
dev_err(&qm->pdev->dev, "QM device is not ready to write\n");
- return -EBUSY;
+ return ret;
}
ret = qm_write_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
@@ -325,13 +326,15 @@ static void qm_dev_cmd_init(struct hisi_qm *qm)
static int vf_qm_cache_wb(struct hisi_qm *qm)
{
unsigned int val;
+ int ret;
writel(0x1, qm->io_base + QM_CACHE_WB_START);
- if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
+ ret = readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
val, val & BIT(0), MB_POLL_PERIOD_US,
- MB_POLL_TIMEOUT_US)) {
+ MB_POLL_TIMEOUT_US);
+ if (ret) {
dev_err(&qm->pdev->dev, "vf QM writeback sqc cache fail\n");
- return -EINVAL;
+ return ret;
}
return 0;
@@ -350,6 +353,32 @@ static int vf_qm_func_stop(struct hisi_qm *qm)
return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0);
}
+static int vf_qm_version_check(struct acc_vf_data *vf_data, struct device *dev)
+{
+ switch (vf_data->acc_magic) {
+ case ACC_DEV_MAGIC_V2:
+ if (vf_data->major_ver != ACC_DRV_MAJOR_VER) {
+ dev_info(dev, "migration driver version<%u.%u> not match!\n",
+ vf_data->major_ver, vf_data->minor_ver);
+ return -EINVAL;
+ }
+ break;
+ case ACC_DEV_MAGIC_V1:
+ /* Correct dma address */
+ vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH];
+ vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
+ vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW];
+ vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH];
+ vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
+ vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
struct hisi_acc_vf_migration_file *migf)
{
@@ -363,9 +392,10 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
if (migf->total_length < QM_MATCH_SIZE || hisi_acc_vdev->match_done)
return 0;
- if (vf_data->acc_magic != ACC_DEV_MAGIC) {
+ ret = vf_qm_version_check(vf_data, dev);
+ if (ret) {
dev_err(dev, "failed to match ACC_DEV_MAGIC\n");
- return -EINVAL;
+ return ret;
}
if (vf_data->dev_id != hisi_acc_vdev->vf_dev->device) {
@@ -377,7 +407,7 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
ret = qm_get_vft(vf_qm, &vf_qm->qp_base);
if (ret <= 0) {
dev_err(dev, "failed to get vft qp nums\n");
- return -EINVAL;
+ return ret;
}
if (ret != vf_data->qp_num) {
@@ -399,13 +429,6 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
return -EINVAL;
}
- ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
- if (ret) {
- dev_err(dev, "failed to write QM_VF_STATE\n");
- return ret;
- }
-
- hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
hisi_acc_vdev->match_done = true;
return 0;
}
@@ -418,7 +441,9 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
int vf_id = hisi_acc_vdev->vf_id;
int ret;
- vf_data->acc_magic = ACC_DEV_MAGIC;
+ vf_data->acc_magic = ACC_DEV_MAGIC_V2;
+ vf_data->major_ver = ACC_DRV_MAJOR_VER;
+ vf_data->minor_ver = ACC_DRV_MINOR_VER;
/* Save device id */
vf_data->dev_id = hisi_acc_vdev->vf_dev->device;
@@ -441,6 +466,19 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
return 0;
}
+static void vf_qm_xeqc_save(struct hisi_qm *qm,
+ struct hisi_acc_vf_migration_file *migf)
+{
+ struct acc_vf_data *vf_data = &migf->vf_data;
+ u16 eq_head, aeq_head;
+
+ eq_head = vf_data->qm_eqc_dw[0] & 0xFFFF;
+ qm_db(qm, 0, QM_DOORBELL_CMD_EQ, eq_head, 0);
+
+ aeq_head = vf_data->qm_aeqc_dw[0] & 0xFFFF;
+ qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, aeq_head, 0);
+}
+
static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
struct hisi_acc_vf_migration_file *migf)
{
@@ -456,6 +494,20 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
if (migf->total_length < sizeof(struct acc_vf_data))
return -EINVAL;
+ if (!vf_data->eqe_dma || !vf_data->aeqe_dma ||
+ !vf_data->sqc_dma || !vf_data->cqc_dma) {
+ dev_info(dev, "resume dma addr is NULL!\n");
+ hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
+ return 0;
+ }
+
+ ret = qm_write_regs(qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
+ if (ret) {
+ dev_err(dev, "failed to write QM_VF_STATE\n");
+ return ret;
+ }
+ hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
+
qm->eqe_dma = vf_data->eqe_dma;
qm->aeqe_dma = vf_data->aeqe_dma;
qm->sqc_dma = vf_data->sqc_dma;
@@ -493,27 +545,27 @@ static int vf_qm_read_data(struct hisi_qm *vf_qm, struct acc_vf_data *vf_data)
ret = qm_get_regs(vf_qm, vf_data);
if (ret)
- return -EINVAL;
+ return ret;
/* Every reg is 32 bit, the dma address is 64 bit. */
- vf_data->eqe_dma = vf_data->qm_eqc_dw[1];
+ vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH];
vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
- vf_data->eqe_dma |= vf_data->qm_eqc_dw[0];
- vf_data->aeqe_dma = vf_data->qm_aeqc_dw[1];
+ vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW];
+ vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH];
vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
- vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[0];
+ vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW];
/* Through SQC_BT/CQC_BT to get sqc and cqc address */
ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma);
if (ret) {
dev_err(dev, "failed to read SQC addr!\n");
- return -EINVAL;
+ return ret;
}
ret = qm_get_cqc(vf_qm, &vf_data->cqc_dma);
if (ret) {
dev_err(dev, "failed to read CQC addr!\n");
- return -EINVAL;
+ return ret;
}
return 0;
@@ -524,7 +576,6 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
{
struct acc_vf_data *vf_data = &migf->vf_data;
struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
- struct device *dev = &vf_qm->pdev->dev;
int ret;
if (unlikely(qm_wait_dev_not_ready(vf_qm))) {
@@ -538,17 +589,14 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
vf_data->vf_qm_state = QM_READY;
hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
- ret = vf_qm_cache_wb(vf_qm);
- if (ret) {
- dev_err(dev, "failed to writeback QM Cache!\n");
- return ret;
- }
-
ret = vf_qm_read_data(vf_qm, vf_data);
if (ret)
- return -EINVAL;
+ return ret;
migf->total_length = sizeof(struct acc_vf_data);
+ /* Save eqc and aeqc interrupt information */
+ vf_qm_xeqc_save(vf_qm, migf);
+
return 0;
}
@@ -967,6 +1015,13 @@ static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev
dev_err(dev, "failed to check QM INT state!\n");
return ret;
}
+
+ ret = vf_qm_cache_wb(vf_qm);
+ if (ret) {
+ dev_err(dev, "failed to writeback QM cache!\n");
+ return ret;
+ }
+
return 0;
}
@@ -1327,7 +1382,7 @@ static int hisi_acc_vf_debug_check(struct seq_file *seq, struct vfio_device *vde
ret = qm_wait_dev_not_ready(vf_qm);
if (ret) {
seq_puts(seq, "VF device not ready!\n");
- return -EBUSY;
+ return ret;
}
return 0;
@@ -1463,6 +1518,7 @@ static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev);
struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
+ hisi_acc_vf_disable_fds(hisi_acc_vdev);
mutex_lock(&hisi_acc_vdev->open_mutex);
hisi_acc_vdev->dev_opened = false;
iounmap(vf_qm->io_base);
@@ -1485,6 +1541,7 @@ static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev)
hisi_acc_vdev->vf_id = pci_iov_vf_id(pdev) + 1;
hisi_acc_vdev->pf_qm = pf_qm;
hisi_acc_vdev->vf_dev = pdev;
+ hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
mutex_init(&hisi_acc_vdev->state_mutex);
mutex_init(&hisi_acc_vdev->open_mutex);
diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
index 245d7537b2bc..91002ceeebc1 100644
--- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
+++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
@@ -39,6 +39,9 @@
#define QM_REG_ADDR_OFFSET 0x0004
#define QM_XQC_ADDR_OFFSET 32U
+#define QM_XQC_ADDR_LOW 0x1
+#define QM_XQC_ADDR_HIGH 0x2
+
#define QM_VF_AEQ_INT_MASK 0x0004
#define QM_VF_EQ_INT_MASK 0x000c
#define QM_IFC_INT_SOURCE_V 0x0020
@@ -50,10 +53,15 @@
#define QM_EQC_DW0 0X8000
#define QM_AEQC_DW0 0X8020
+#define ACC_DRV_MAJOR_VER 1
+#define ACC_DRV_MINOR_VER 0
+
+#define ACC_DEV_MAGIC_V1 0XCDCDCDCDFEEDAACC
+#define ACC_DEV_MAGIC_V2 0xAACCFEEDDECADEDE
+
struct acc_vf_data {
#define QM_MATCH_SIZE offsetofend(struct acc_vf_data, qm_rsv_state)
/* QM match information */
-#define ACC_DEV_MAGIC 0XCDCDCDCDFEEDAACC
u64 acc_magic;
u32 qp_num;
u32 dev_id;
@@ -61,7 +69,9 @@ struct acc_vf_data {
u32 qp_base;
u32 vf_qm_state;
/* QM reserved match information */
- u32 qm_rsv_state[3];
+ u16 major_ver;
+ u16 minor_ver;
+ u32 qm_rsv_state[2];
/* QM RW regs */
u32 aeq_int_mask;
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index 11eda6b207f1..5b919a0b2524 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -313,40 +313,21 @@ err_exec:
return ret;
}
-static int _create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
- struct mlx5_vhca_data_buffer *buf,
- struct mlx5_vhca_recv_buf *recv_buf,
- u32 *mkey)
+static u32 *alloc_mkey_in(u32 npages, u32 pdn)
{
- size_t npages = buf ? DIV_ROUND_UP(buf->allocated_length, PAGE_SIZE) :
- recv_buf->npages;
- int err = 0, inlen;
- __be64 *mtt;
+ int inlen;
void *mkc;
u32 *in;
inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
- sizeof(*mtt) * round_up(npages, 2);
+ sizeof(__be64) * round_up(npages, 2);
- in = kvzalloc(inlen, GFP_KERNEL);
+ in = kvzalloc(inlen, GFP_KERNEL_ACCOUNT);
if (!in)
- return -ENOMEM;
+ return NULL;
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
DIV_ROUND_UP(npages, 2));
- mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
-
- if (buf) {
- struct sg_dma_page_iter dma_iter;
-
- for_each_sgtable_dma_page(&buf->table.sgt, &dma_iter, 0)
- *mtt++ = cpu_to_be64(sg_page_iter_dma_address(&dma_iter));
- } else {
- int i;
-
- for (i = 0; i < npages; i++)
- *mtt++ = cpu_to_be64(recv_buf->dma_addrs[i]);
- }
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
@@ -360,8 +341,81 @@ static int _create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
MLX5_SET(mkc, mkc, translations_octword_size, DIV_ROUND_UP(npages, 2));
MLX5_SET64(mkc, mkc, len, npages * PAGE_SIZE);
- err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
- kvfree(in);
+
+ return in;
+}
+
+static int create_mkey(struct mlx5_core_dev *mdev, u32 npages, u32 *mkey_in,
+ u32 *mkey)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
+ sizeof(__be64) * round_up(npages, 2);
+
+ return mlx5_core_create_mkey(mdev, mkey, mkey_in, inlen);
+}
+
+static void unregister_dma_pages(struct mlx5_core_dev *mdev, u32 npages,
+ u32 *mkey_in, struct dma_iova_state *state,
+ enum dma_data_direction dir)
+{
+ dma_addr_t addr;
+ __be64 *mtt;
+ int i;
+
+ if (dma_use_iova(state)) {
+ dma_iova_destroy(mdev->device, state, npages * PAGE_SIZE, dir,
+ 0);
+ } else {
+ mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, mkey_in,
+ klm_pas_mtt);
+ for (i = npages - 1; i >= 0; i--) {
+ addr = be64_to_cpu(mtt[i]);
+ dma_unmap_page(mdev->device, addr, PAGE_SIZE, dir);
+ }
+ }
+}
+
+static int register_dma_pages(struct mlx5_core_dev *mdev, u32 npages,
+ struct page **page_list, u32 *mkey_in,
+ struct dma_iova_state *state,
+ enum dma_data_direction dir)
+{
+ dma_addr_t addr;
+ size_t mapped = 0;
+ __be64 *mtt;
+ int i, err;
+
+ mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, mkey_in, klm_pas_mtt);
+
+ if (dma_iova_try_alloc(mdev->device, state, 0, npages * PAGE_SIZE)) {
+ addr = state->addr;
+ for (i = 0; i < npages; i++) {
+ err = dma_iova_link(mdev->device, state,
+ page_to_phys(page_list[i]), mapped,
+ PAGE_SIZE, dir, 0);
+ if (err)
+ goto error;
+ *mtt++ = cpu_to_be64(addr);
+ addr += PAGE_SIZE;
+ mapped += PAGE_SIZE;
+ }
+ err = dma_iova_sync(mdev->device, state, 0, mapped);
+ if (err)
+ goto error;
+ } else {
+ for (i = 0; i < npages; i++) {
+ addr = dma_map_page(mdev->device, page_list[i], 0,
+ PAGE_SIZE, dir);
+ err = dma_mapping_error(mdev->device, addr);
+ if (err)
+ goto error;
+ *mtt++ = cpu_to_be64(addr);
+ }
+ }
+ return 0;
+
+error:
+ unregister_dma_pages(mdev, i, mkey_in, state, dir);
return err;
}
@@ -375,97 +429,97 @@ static int mlx5vf_dma_data_buffer(struct mlx5_vhca_data_buffer *buf)
if (mvdev->mdev_detach)
return -ENOTCONN;
- if (buf->dmaed || !buf->allocated_length)
+ if (buf->mkey_in || !buf->npages)
return -EINVAL;
- ret = dma_map_sgtable(mdev->device, &buf->table.sgt, buf->dma_dir, 0);
- if (ret)
- return ret;
+ buf->mkey_in = alloc_mkey_in(buf->npages, buf->migf->pdn);
+ if (!buf->mkey_in)
+ return -ENOMEM;
- ret = _create_mkey(mdev, buf->migf->pdn, buf, NULL, &buf->mkey);
+ ret = register_dma_pages(mdev, buf->npages, buf->page_list,
+ buf->mkey_in, &buf->state, buf->dma_dir);
if (ret)
- goto err;
+ goto err_register_dma;
- buf->dmaed = true;
+ ret = create_mkey(mdev, buf->npages, buf->mkey_in, &buf->mkey);
+ if (ret)
+ goto err_create_mkey;
return 0;
-err:
- dma_unmap_sgtable(mdev->device, &buf->table.sgt, buf->dma_dir, 0);
+
+err_create_mkey:
+ unregister_dma_pages(mdev, buf->npages, buf->mkey_in, &buf->state,
+ buf->dma_dir);
+err_register_dma:
+ kvfree(buf->mkey_in);
+ buf->mkey_in = NULL;
return ret;
}
+static void free_page_list(u32 npages, struct page **page_list)
+{
+ int i;
+
+ /* Undo alloc_pages_bulk() */
+ for (i = npages - 1; i >= 0; i--)
+ __free_page(page_list[i]);
+
+ kvfree(page_list);
+}
+
void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf)
{
- struct mlx5_vf_migration_file *migf = buf->migf;
- struct sg_page_iter sg_iter;
+ struct mlx5vf_pci_core_device *mvdev = buf->migf->mvdev;
+ struct mlx5_core_dev *mdev = mvdev->mdev;
- lockdep_assert_held(&migf->mvdev->state_mutex);
- WARN_ON(migf->mvdev->mdev_detach);
+ lockdep_assert_held(&mvdev->state_mutex);
+ WARN_ON(mvdev->mdev_detach);
- if (buf->dmaed) {
- mlx5_core_destroy_mkey(migf->mvdev->mdev, buf->mkey);
- dma_unmap_sgtable(migf->mvdev->mdev->device, &buf->table.sgt,
- buf->dma_dir, 0);
+ if (buf->mkey_in) {
+ mlx5_core_destroy_mkey(mdev, buf->mkey);
+ unregister_dma_pages(mdev, buf->npages, buf->mkey_in,
+ &buf->state, buf->dma_dir);
+ kvfree(buf->mkey_in);
}
- /* Undo alloc_pages_bulk() */
- for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
- __free_page(sg_page_iter_page(&sg_iter));
- sg_free_append_table(&buf->table);
+ free_page_list(buf->npages, buf->page_list);
kfree(buf);
}
-static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
- unsigned int npages)
+static int mlx5vf_add_pages(struct page ***page_list, unsigned int npages)
{
- unsigned int to_alloc = npages;
- struct page **page_list;
- unsigned long filled;
- unsigned int to_fill;
- int ret;
+ unsigned int filled, done = 0;
int i;
- to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list));
- page_list = kvzalloc(to_fill * sizeof(*page_list), GFP_KERNEL_ACCOUNT);
- if (!page_list)
+ *page_list =
+ kvcalloc(npages, sizeof(struct page *), GFP_KERNEL_ACCOUNT);
+ if (!*page_list)
return -ENOMEM;
- do {
- filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, to_fill,
- page_list);
- if (!filled) {
- ret = -ENOMEM;
+ for (;;) {
+ filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, npages - done,
+ *page_list + done);
+ if (!filled)
goto err;
- }
- to_alloc -= filled;
- ret = sg_alloc_append_table_from_pages(
- &buf->table, page_list, filled, 0,
- filled << PAGE_SHIFT, UINT_MAX, SG_MAX_SINGLE_ALLOC,
- GFP_KERNEL_ACCOUNT);
- if (ret)
- goto err_append;
- buf->allocated_length += filled * PAGE_SIZE;
- /* clean input for another bulk allocation */
- memset(page_list, 0, filled * sizeof(*page_list));
- to_fill = min_t(unsigned int, to_alloc,
- PAGE_SIZE / sizeof(*page_list));
- } while (to_alloc > 0);
+ done += filled;
+ if (done == npages)
+ break;
+ }
- kvfree(page_list);
return 0;
-err_append:
- for (i = filled - 1; i >= 0; i--)
- __free_page(page_list[i]);
err:
- kvfree(page_list);
- return ret;
+ for (i = 0; i < done; i++)
+ __free_page(*page_list[i]);
+
+ kvfree(*page_list);
+ *page_list = NULL;
+ return -ENOMEM;
}
struct mlx5_vhca_data_buffer *
-mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf,
- size_t length,
+mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf, u32 npages,
enum dma_data_direction dma_dir)
{
struct mlx5_vhca_data_buffer *buf;
@@ -477,12 +531,13 @@ mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf,
buf->dma_dir = dma_dir;
buf->migf = migf;
- if (length) {
- ret = mlx5vf_add_migration_pages(buf,
- DIV_ROUND_UP_ULL(length, PAGE_SIZE));
+ if (npages) {
+ ret = mlx5vf_add_pages(&buf->page_list, npages);
if (ret)
goto end;
+ buf->npages = npages;
+
if (dma_dir != DMA_NONE) {
ret = mlx5vf_dma_data_buffer(buf);
if (ret)
@@ -505,8 +560,8 @@ void mlx5vf_put_data_buffer(struct mlx5_vhca_data_buffer *buf)
}
struct mlx5_vhca_data_buffer *
-mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf,
- size_t length, enum dma_data_direction dma_dir)
+mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf, u32 npages,
+ enum dma_data_direction dma_dir)
{
struct mlx5_vhca_data_buffer *buf, *temp_buf;
struct list_head free_list;
@@ -521,7 +576,7 @@ mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf,
list_for_each_entry_safe(buf, temp_buf, &migf->avail_list, buf_elm) {
if (buf->dma_dir == dma_dir) {
list_del_init(&buf->buf_elm);
- if (buf->allocated_length >= length) {
+ if (buf->npages >= npages) {
spin_unlock_irq(&migf->list_lock);
goto found;
}
@@ -535,7 +590,7 @@ mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf,
}
}
spin_unlock_irq(&migf->list_lock);
- buf = mlx5vf_alloc_data_buffer(migf, length, dma_dir);
+ buf = mlx5vf_alloc_data_buffer(migf, npages, dma_dir);
found:
while ((temp_buf = list_first_entry_or_null(&free_list,
@@ -716,7 +771,7 @@ int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
MLX5_SET(save_vhca_state_in, in, op_mod, 0);
MLX5_SET(save_vhca_state_in, in, vhca_id, mvdev->vhca_id);
MLX5_SET(save_vhca_state_in, in, mkey, buf->mkey);
- MLX5_SET(save_vhca_state_in, in, size, buf->allocated_length);
+ MLX5_SET(save_vhca_state_in, in, size, buf->npages * PAGE_SIZE);
MLX5_SET(save_vhca_state_in, in, incremental, inc);
MLX5_SET(save_vhca_state_in, in, set_track, track);
@@ -738,8 +793,11 @@ int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
}
if (!header_buf) {
- header_buf = mlx5vf_get_data_buffer(migf,
- sizeof(struct mlx5_vf_migration_header), DMA_NONE);
+ header_buf = mlx5vf_get_data_buffer(
+ migf,
+ DIV_ROUND_UP(sizeof(struct mlx5_vf_migration_header),
+ PAGE_SIZE),
+ DMA_NONE);
if (IS_ERR(header_buf)) {
err = PTR_ERR(header_buf);
goto err_free;
@@ -783,7 +841,7 @@ int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
if (mvdev->mdev_detach)
return -ENOTCONN;
- if (!buf->dmaed) {
+ if (!buf->mkey_in) {
err = mlx5vf_dma_data_buffer(buf);
if (err)
return err;
@@ -1338,103 +1396,16 @@ static void mlx5vf_destroy_qp(struct mlx5_core_dev *mdev,
kfree(qp);
}
-static void free_recv_pages(struct mlx5_vhca_recv_buf *recv_buf)
-{
- int i;
-
- /* Undo alloc_pages_bulk() */
- for (i = 0; i < recv_buf->npages; i++)
- __free_page(recv_buf->page_list[i]);
-
- kvfree(recv_buf->page_list);
-}
-
-static int alloc_recv_pages(struct mlx5_vhca_recv_buf *recv_buf,
- unsigned int npages)
-{
- unsigned int filled = 0, done = 0;
- int i;
-
- recv_buf->page_list = kvcalloc(npages, sizeof(*recv_buf->page_list),
- GFP_KERNEL_ACCOUNT);
- if (!recv_buf->page_list)
- return -ENOMEM;
-
- for (;;) {
- filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT,
- npages - done,
- recv_buf->page_list + done);
- if (!filled)
- goto err;
-
- done += filled;
- if (done == npages)
- break;
- }
-
- recv_buf->npages = npages;
- return 0;
-
-err:
- for (i = 0; i < npages; i++) {
- if (recv_buf->page_list[i])
- __free_page(recv_buf->page_list[i]);
- }
-
- kvfree(recv_buf->page_list);
- return -ENOMEM;
-}
-
-static int register_dma_recv_pages(struct mlx5_core_dev *mdev,
- struct mlx5_vhca_recv_buf *recv_buf)
-{
- int i, j;
-
- recv_buf->dma_addrs = kvcalloc(recv_buf->npages,
- sizeof(*recv_buf->dma_addrs),
- GFP_KERNEL_ACCOUNT);
- if (!recv_buf->dma_addrs)
- return -ENOMEM;
-
- for (i = 0; i < recv_buf->npages; i++) {
- recv_buf->dma_addrs[i] = dma_map_page(mdev->device,
- recv_buf->page_list[i],
- 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(mdev->device, recv_buf->dma_addrs[i]))
- goto error;
- }
- return 0;
-
-error:
- for (j = 0; j < i; j++)
- dma_unmap_single(mdev->device, recv_buf->dma_addrs[j],
- PAGE_SIZE, DMA_FROM_DEVICE);
-
- kvfree(recv_buf->dma_addrs);
- return -ENOMEM;
-}
-
-static void unregister_dma_recv_pages(struct mlx5_core_dev *mdev,
- struct mlx5_vhca_recv_buf *recv_buf)
-{
- int i;
-
- for (i = 0; i < recv_buf->npages; i++)
- dma_unmap_single(mdev->device, recv_buf->dma_addrs[i],
- PAGE_SIZE, DMA_FROM_DEVICE);
-
- kvfree(recv_buf->dma_addrs);
-}
-
static void mlx5vf_free_qp_recv_resources(struct mlx5_core_dev *mdev,
struct mlx5_vhca_qp *qp)
{
struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf;
mlx5_core_destroy_mkey(mdev, recv_buf->mkey);
- unregister_dma_recv_pages(mdev, recv_buf);
- free_recv_pages(&qp->recv_buf);
+ unregister_dma_pages(mdev, recv_buf->npages, recv_buf->mkey_in,
+ &recv_buf->state, DMA_FROM_DEVICE);
+ kvfree(recv_buf->mkey_in);
+ free_page_list(recv_buf->npages, recv_buf->page_list);
}
static int mlx5vf_alloc_qp_recv_resources(struct mlx5_core_dev *mdev,
@@ -1445,24 +1416,38 @@ static int mlx5vf_alloc_qp_recv_resources(struct mlx5_core_dev *mdev,
struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf;
int err;
- err = alloc_recv_pages(recv_buf, npages);
- if (err < 0)
+ err = mlx5vf_add_pages(&recv_buf->page_list, npages);
+ if (err)
return err;
- err = register_dma_recv_pages(mdev, recv_buf);
- if (err)
+ recv_buf->npages = npages;
+
+ recv_buf->mkey_in = alloc_mkey_in(npages, pdn);
+ if (!recv_buf->mkey_in) {
+ err = -ENOMEM;
goto end;
+ }
+
+ err = register_dma_pages(mdev, npages, recv_buf->page_list,
+ recv_buf->mkey_in, &recv_buf->state,
+ DMA_FROM_DEVICE);
+ if (err)
+ goto err_register_dma;
- err = _create_mkey(mdev, pdn, NULL, recv_buf, &recv_buf->mkey);
+ err = create_mkey(mdev, npages, recv_buf->mkey_in, &recv_buf->mkey);
if (err)
goto err_create_mkey;
return 0;
err_create_mkey:
- unregister_dma_recv_pages(mdev, recv_buf);
+ unregister_dma_pages(mdev, npages, recv_buf->mkey_in, &recv_buf->state,
+ DMA_FROM_DEVICE);
+err_register_dma:
+ kvfree(recv_buf->mkey_in);
+ recv_buf->mkey_in = NULL;
end:
- free_recv_pages(recv_buf);
+ free_page_list(npages, recv_buf->page_list);
return err;
}
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
index df421dc6de04..d7821b5ca772 100644
--- a/drivers/vfio/pci/mlx5/cmd.h
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -53,20 +53,17 @@ struct mlx5_vf_migration_header {
};
struct mlx5_vhca_data_buffer {
- struct sg_append_table table;
+ struct page **page_list;
+ struct dma_iova_state state;
loff_t start_pos;
u64 length;
- u64 allocated_length;
+ u32 npages;
u32 mkey;
+ u32 *mkey_in;
enum dma_data_direction dma_dir;
- u8 dmaed:1;
u8 stop_copy_chunk_num;
struct list_head buf_elm;
struct mlx5_vf_migration_file *migf;
- /* Optimize mlx5vf_get_migration_page() for sequential access */
- struct scatterlist *last_offset_sg;
- unsigned int sg_last_entry;
- unsigned long last_offset;
};
struct mlx5vf_async_data {
@@ -133,8 +130,9 @@ struct mlx5_vhca_cq {
struct mlx5_vhca_recv_buf {
u32 npages;
struct page **page_list;
- dma_addr_t *dma_addrs;
+ struct dma_iova_state state;
u32 next_rq_offset;
+ u32 *mkey_in;
u32 mkey;
};
@@ -217,15 +215,24 @@ int mlx5vf_cmd_alloc_pd(struct mlx5_vf_migration_file *migf);
void mlx5vf_cmd_dealloc_pd(struct mlx5_vf_migration_file *migf);
void mlx5fv_cmd_clean_migf_resources(struct mlx5_vf_migration_file *migf);
struct mlx5_vhca_data_buffer *
-mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf,
- size_t length, enum dma_data_direction dma_dir);
+mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf, u32 npages,
+ enum dma_data_direction dma_dir);
void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf);
struct mlx5_vhca_data_buffer *
-mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf,
- size_t length, enum dma_data_direction dma_dir);
+mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf, u32 npages,
+ enum dma_data_direction dma_dir);
void mlx5vf_put_data_buffer(struct mlx5_vhca_data_buffer *buf);
-struct page *mlx5vf_get_migration_page(struct mlx5_vhca_data_buffer *buf,
- unsigned long offset);
+static inline struct page *
+mlx5vf_get_migration_page(struct mlx5_vhca_data_buffer *buf,
+ unsigned long offset)
+{
+ int page_entry = offset / PAGE_SIZE;
+
+ if (page_entry >= buf->npages)
+ return NULL;
+
+ return buf->page_list[page_entry];
+}
void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev);
void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev,
enum mlx5_vf_migf_state *last_save_state);
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index 709543e7eb04..93f894fe60d2 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -34,37 +34,6 @@ static struct mlx5vf_pci_core_device *mlx5vf_drvdata(struct pci_dev *pdev)
core_device);
}
-struct page *
-mlx5vf_get_migration_page(struct mlx5_vhca_data_buffer *buf,
- unsigned long offset)
-{
- unsigned long cur_offset = 0;
- struct scatterlist *sg;
- unsigned int i;
-
- /* All accesses are sequential */
- if (offset < buf->last_offset || !buf->last_offset_sg) {
- buf->last_offset = 0;
- buf->last_offset_sg = buf->table.sgt.sgl;
- buf->sg_last_entry = 0;
- }
-
- cur_offset = buf->last_offset;
-
- for_each_sg(buf->last_offset_sg, sg,
- buf->table.sgt.orig_nents - buf->sg_last_entry, i) {
- if (offset < sg->length + cur_offset) {
- buf->last_offset_sg = sg;
- buf->sg_last_entry += i;
- buf->last_offset = cur_offset;
- return nth_page(sg_page(sg),
- (offset - cur_offset) / PAGE_SIZE);
- }
- cur_offset += sg->length;
- }
- return NULL;
-}
-
static void mlx5vf_disable_fd(struct mlx5_vf_migration_file *migf)
{
mutex_lock(&migf->lock);
@@ -308,6 +277,7 @@ static struct mlx5_vhca_data_buffer *
mlx5vf_mig_file_get_stop_copy_buf(struct mlx5_vf_migration_file *migf,
u8 index, size_t required_length)
{
+ u32 npages = DIV_ROUND_UP(required_length, PAGE_SIZE);
struct mlx5_vhca_data_buffer *buf = migf->buf[index];
u8 chunk_num;
@@ -315,12 +285,11 @@ mlx5vf_mig_file_get_stop_copy_buf(struct mlx5_vf_migration_file *migf,
chunk_num = buf->stop_copy_chunk_num;
buf->migf->buf[index] = NULL;
/* Checking whether the pre-allocated buffer can fit */
- if (buf->allocated_length >= required_length)
+ if (buf->npages >= npages)
return buf;
mlx5vf_put_data_buffer(buf);
- buf = mlx5vf_get_data_buffer(buf->migf, required_length,
- DMA_FROM_DEVICE);
+ buf = mlx5vf_get_data_buffer(buf->migf, npages, DMA_FROM_DEVICE);
if (IS_ERR(buf))
return buf;
@@ -373,7 +342,8 @@ static int mlx5vf_add_stop_copy_header(struct mlx5_vf_migration_file *migf,
u8 *to_buff;
int ret;
- header_buf = mlx5vf_get_data_buffer(migf, size, DMA_NONE);
+ header_buf = mlx5vf_get_data_buffer(migf, DIV_ROUND_UP(size, PAGE_SIZE),
+ DMA_NONE);
if (IS_ERR(header_buf))
return PTR_ERR(header_buf);
@@ -388,7 +358,7 @@ static int mlx5vf_add_stop_copy_header(struct mlx5_vf_migration_file *migf,
to_buff = kmap_local_page(page);
memcpy(to_buff, &header, sizeof(header));
header_buf->length = sizeof(header);
- data.stop_copy_size = cpu_to_le64(migf->buf[0]->allocated_length);
+ data.stop_copy_size = cpu_to_le64(migf->buf[0]->npages * PAGE_SIZE);
memcpy(to_buff + sizeof(header), &data, sizeof(data));
header_buf->length += sizeof(data);
kunmap_local(to_buff);
@@ -437,15 +407,20 @@ static int mlx5vf_prep_stop_copy(struct mlx5vf_pci_core_device *mvdev,
num_chunks = mvdev->chunk_mode ? MAX_NUM_CHUNKS : 1;
for (i = 0; i < num_chunks; i++) {
- buf = mlx5vf_get_data_buffer(migf, inc_state_size, DMA_FROM_DEVICE);
+ buf = mlx5vf_get_data_buffer(
+ migf, DIV_ROUND_UP(inc_state_size, PAGE_SIZE),
+ DMA_FROM_DEVICE);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto err;
}
migf->buf[i] = buf;
- buf = mlx5vf_get_data_buffer(migf,
- sizeof(struct mlx5_vf_migration_header), DMA_NONE);
+ buf = mlx5vf_get_data_buffer(
+ migf,
+ DIV_ROUND_UP(sizeof(struct mlx5_vf_migration_header),
+ PAGE_SIZE),
+ DMA_NONE);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto err;
@@ -553,7 +528,8 @@ static long mlx5vf_precopy_ioctl(struct file *filp, unsigned int cmd,
* We finished transferring the current state and the device has a
* dirty state, save a new state to be ready for.
*/
- buf = mlx5vf_get_data_buffer(migf, inc_length, DMA_FROM_DEVICE);
+ buf = mlx5vf_get_data_buffer(migf, DIV_ROUND_UP(inc_length, PAGE_SIZE),
+ DMA_FROM_DEVICE);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
mlx5vf_mark_err(migf);
@@ -675,8 +651,8 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev, bool track)
if (track) {
/* leave the allocated buffer ready for the stop-copy phase */
- buf = mlx5vf_alloc_data_buffer(migf,
- migf->buf[0]->allocated_length, DMA_FROM_DEVICE);
+ buf = mlx5vf_alloc_data_buffer(migf, migf->buf[0]->npages,
+ DMA_FROM_DEVICE);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_pd;
@@ -917,11 +893,14 @@ static ssize_t mlx5vf_resume_write(struct file *filp, const char __user *buf,
goto out_unlock;
break;
case MLX5_VF_LOAD_STATE_PREP_HEADER_DATA:
- if (vhca_buf_header->allocated_length < migf->record_size) {
+ {
+ u32 npages = DIV_ROUND_UP(migf->record_size, PAGE_SIZE);
+
+ if (vhca_buf_header->npages < npages) {
mlx5vf_free_data_buffer(vhca_buf_header);
- migf->buf_header[0] = mlx5vf_alloc_data_buffer(migf,
- migf->record_size, DMA_NONE);
+ migf->buf_header[0] = mlx5vf_alloc_data_buffer(
+ migf, npages, DMA_NONE);
if (IS_ERR(migf->buf_header[0])) {
ret = PTR_ERR(migf->buf_header[0]);
migf->buf_header[0] = NULL;
@@ -934,6 +913,7 @@ static ssize_t mlx5vf_resume_write(struct file *filp, const char __user *buf,
vhca_buf_header->start_pos = migf->max_pos;
migf->load_state = MLX5_VF_LOAD_STATE_READ_HEADER_DATA;
break;
+ }
case MLX5_VF_LOAD_STATE_READ_HEADER_DATA:
ret = mlx5vf_resume_read_header_data(migf, vhca_buf_header,
&buf, &len, pos, &done);
@@ -944,12 +924,13 @@ static ssize_t mlx5vf_resume_write(struct file *filp, const char __user *buf,
{
u64 size = max(migf->record_size,
migf->stop_copy_prep_size);
+ u32 npages = DIV_ROUND_UP(size, PAGE_SIZE);
- if (vhca_buf->allocated_length < size) {
+ if (vhca_buf->npages < npages) {
mlx5vf_free_data_buffer(vhca_buf);
- migf->buf[0] = mlx5vf_alloc_data_buffer(migf,
- size, DMA_TO_DEVICE);
+ migf->buf[0] = mlx5vf_alloc_data_buffer(
+ migf, npages, DMA_TO_DEVICE);
if (IS_ERR(migf->buf[0])) {
ret = PTR_ERR(migf->buf[0]);
migf->buf[0] = NULL;
@@ -1037,8 +1018,11 @@ mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev)
}
migf->buf[0] = buf;
- buf = mlx5vf_alloc_data_buffer(migf,
- sizeof(struct mlx5_vf_migration_header), DMA_NONE);
+ buf = mlx5vf_alloc_data_buffer(
+ migf,
+ DIV_ROUND_UP(sizeof(struct mlx5_vf_migration_header),
+ PAGE_SIZE),
+ DMA_NONE);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_buf;
@@ -1148,7 +1132,8 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev,
MLX5VF_QUERY_INC | MLX5VF_QUERY_CLEANUP);
if (ret)
return ERR_PTR(ret);
- buf = mlx5vf_get_data_buffer(migf, size, DMA_FROM_DEVICE);
+ buf = mlx5vf_get_data_buffer(migf,
+ DIV_ROUND_UP(size, PAGE_SIZE), DMA_FROM_DEVICE);
if (IS_ERR(buf))
return ERR_CAST(buf);
/* pre_copy cleanup */
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 0ac56072af9f..1136d7ac6b59 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -80,7 +80,6 @@ struct vfio_domain {
struct iommu_domain *domain;
struct list_head next;
struct list_head group_list;
- bool fgsp : 1; /* Fine-grained super pages */
bool enforce_cache_coherency : 1;
};
@@ -293,7 +292,7 @@ static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize)
struct rb_node *p;
for (p = rb_prev(n); p; p = rb_prev(p)) {
- struct vfio_dma *dma = rb_entry(n,
+ struct vfio_dma *dma = rb_entry(p,
struct vfio_dma, node);
vfio_dma_bitmap_free(dma);
@@ -1095,8 +1094,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
* may require hardware cache flushing, try to find the
* largest contiguous physical memory chunk to unmap.
*/
- for (len = PAGE_SIZE;
- !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
+ for (len = PAGE_SIZE; iova + len < end; len += PAGE_SIZE) {
next = iommu_iova_to_phys(domain->domain, iova + len);
if (next != phys + len)
break;
@@ -1833,49 +1831,6 @@ unwind:
return ret;
}
-/*
- * We change our unmap behavior slightly depending on whether the IOMMU
- * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage
- * for practically any contiguous power-of-two mapping we give it. This means
- * we don't need to look for contiguous chunks ourselves to make unmapping
- * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d
- * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
- * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
- * hugetlbfs is in use.
- */
-static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
-{
- int ret, order = get_order(PAGE_SIZE * 2);
- struct vfio_iova *region;
- struct page *pages;
- dma_addr_t start;
-
- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!pages)
- return;
-
- list_for_each_entry(region, regions, list) {
- start = ALIGN(region->start, PAGE_SIZE * 2);
- if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
- continue;
-
- ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
- IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE,
- GFP_KERNEL_ACCOUNT);
- if (!ret) {
- size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
-
- if (unmapped == PAGE_SIZE)
- iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
- else
- domain->fgsp = true;
- }
- break;
- }
-
- __free_pages(pages, order);
-}
-
static struct vfio_iommu_group *find_iommu_group(struct vfio_domain *domain,
struct iommu_group *iommu_group)
{
@@ -2314,8 +2269,6 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
}
}
- vfio_test_domain_fgsp(domain, &iova_copy);
-
/* replay mappings on new domains */
ret = vfio_iommu_replay(iommu, domain);
if (ret)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index b9b9e9d40951..7cbfc7d718b3 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -755,10 +755,10 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
int err;
int sent_pkts = 0;
bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
+ bool busyloop_intr;
do {
- bool busyloop_intr = false;
-
+ busyloop_intr = false;
if (nvq->done_idx == VHOST_NET_BATCH)
vhost_tx_batch(net, nvq, sock, &msg);
@@ -769,13 +769,10 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
- if (unlikely(busyloop_intr)) {
- vhost_poll_queue(&vq->poll);
- } else if (unlikely(vhost_enable_notify(&net->dev,
- vq))) {
- vhost_disable_notify(&net->dev, vq);
- continue;
- }
+ /* Kicks are disabled at this point, break loop and
+ * process any remaining batched packets. Queue will
+ * be re-enabled afterwards.
+ */
break;
}
@@ -825,7 +822,22 @@ done:
++nvq->done_idx;
} while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
+ /* Kicks are still disabled, dispatch any remaining batched msgs. */
vhost_tx_batch(net, nvq, sock, &msg);
+
+ if (unlikely(busyloop_intr))
+ /* If interrupted while doing busy polling, requeue the
+ * handler to be fair handle_rx as well as other tasks
+ * waiting on cpu.
+ */
+ vhost_poll_queue(&vq->poll);
+ else
+ /* All of our work has been completed; however, before
+ * leaving the TX handler, do one last check for work,
+ * and requeue handler if necessary. If there is no work,
+ * queue will be reenabled.
+ */
+ vhost_net_busy_poll_try_queue(net, vq);
}
static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 26bcf3a7f70c..c12a0d4e6386 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -133,6 +133,11 @@ struct vhost_scsi_cmd {
struct se_cmd tvc_se_cmd;
/* Sense buffer that will be mapped into outgoing status */
unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
+ /*
+ * Dirty write descriptors of this command.
+ */
+ struct vhost_log *tvc_log;
+ unsigned int tvc_log_num;
/* Completed commands list, serviced from vhost worker thread */
struct llist_node tvc_completion_list;
/* Used to track inflight cmd */
@@ -258,6 +263,12 @@ struct vhost_scsi_tmf {
struct iovec resp_iov;
int in_iovs;
int vq_desc;
+
+ /*
+ * Dirty write descriptors of this command.
+ */
+ struct vhost_log *tmf_log;
+ unsigned int tmf_log_num;
};
/*
@@ -362,6 +373,45 @@ static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
return tpg->tv_fabric_prot_type;
}
+static int vhost_scsi_copy_cmd_log(struct vhost_virtqueue *vq,
+ struct vhost_scsi_cmd *cmd,
+ struct vhost_log *log,
+ unsigned int log_num)
+{
+ if (!cmd->tvc_log)
+ cmd->tvc_log = kmalloc_array(vq->dev->iov_limit,
+ sizeof(*cmd->tvc_log),
+ GFP_KERNEL);
+
+ if (unlikely(!cmd->tvc_log)) {
+ vq_err(vq, "Failed to alloc tvc_log\n");
+ return -ENOMEM;
+ }
+
+ memcpy(cmd->tvc_log, log, sizeof(*cmd->tvc_log) * log_num);
+ cmd->tvc_log_num = log_num;
+
+ return 0;
+}
+
+static void vhost_scsi_log_write(struct vhost_virtqueue *vq,
+ struct vhost_log *log,
+ unsigned int log_num)
+{
+ if (likely(!vhost_has_feature(vq, VHOST_F_LOG_ALL)))
+ return;
+
+ if (likely(!log_num || !log))
+ return;
+
+ /*
+ * vhost-scsi doesn't support VIRTIO_F_ACCESS_PLATFORM.
+ * No requirement for vq->iotlb case.
+ */
+ WARN_ON_ONCE(unlikely(vq->iotlb));
+ vhost_log_write(vq, log, log_num, U64_MAX, NULL, 0);
+}
+
static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
{
struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
@@ -408,6 +458,10 @@ static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
{
struct vhost_scsi_inflight *inflight = tmf->inflight;
+ /*
+ * tmf->tmf_log is default NULL unless VHOST_F_LOG_ALL is set.
+ */
+ kfree(tmf->tmf_log);
kfree(tmf);
vhost_scsi_put_inflight(inflight);
}
@@ -517,6 +571,8 @@ vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct virtio_scsi_event *event = &evt->event;
struct virtio_scsi_event __user *eventp;
+ struct vhost_log *vq_log;
+ unsigned int log_num;
unsigned out, in;
int head, ret;
@@ -527,9 +583,19 @@ vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
again:
vhost_disable_notify(&vs->dev, vq);
+
+ vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
+ vq->log : NULL;
+
+ /*
+ * Reset 'log_num' since vhost_get_vq_desc() may reset it only
+ * after certain condition checks.
+ */
+ log_num = 0;
+
head = vhost_get_vq_desc(vq, vq->iov,
ARRAY_SIZE(vq->iov), &out, &in,
- NULL, NULL);
+ vq_log, &log_num);
if (head < 0) {
vs->vs_events_missed = true;
return;
@@ -559,6 +625,8 @@ again:
vhost_add_used_and_signal(&vs->dev, vq, head, 0);
else
vq_err(vq, "Faulted on vhost_scsi_send_event\n");
+
+ vhost_scsi_log_write(vq, vq_log, log_num);
}
static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop)
@@ -660,6 +728,9 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
} else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
+ vhost_scsi_log_write(cmd->tvc_vq, cmd->tvc_log,
+ cmd->tvc_log_num);
+
vhost_scsi_release_cmd_res(se_cmd);
}
@@ -676,6 +747,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, u64 scsi_tag)
struct vhost_scsi_virtqueue, vq);
struct vhost_scsi_cmd *cmd;
struct scatterlist *sgl, *prot_sgl;
+ struct vhost_log *log;
int tag;
tag = sbitmap_get(&svq->scsi_tags);
@@ -687,9 +759,11 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, u64 scsi_tag)
cmd = &svq->scsi_cmds[tag];
sgl = cmd->sgl;
prot_sgl = cmd->prot_sgl;
+ log = cmd->tvc_log;
memset(cmd, 0, sizeof(*cmd));
cmd->sgl = sgl;
cmd->prot_sgl = prot_sgl;
+ cmd->tvc_log = log;
cmd->tvc_se_cmd.map_tag = tag;
cmd->inflight = vhost_scsi_get_inflight(vq);
@@ -1063,13 +1137,17 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs,
static int
vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
- struct vhost_scsi_ctx *vc)
+ struct vhost_scsi_ctx *vc,
+ struct vhost_log *log, unsigned int *log_num)
{
int ret = -ENXIO;
+ if (likely(log_num))
+ *log_num = 0;
+
vc->head = vhost_get_vq_desc(vq, vq->iov,
ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
- NULL, NULL);
+ log, log_num);
pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
vc->head, vc->out, vc->in);
@@ -1221,6 +1299,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
u8 task_attr;
bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
u8 *cdb;
+ struct vhost_log *vq_log;
+ unsigned int log_num;
mutex_lock(&vq->mutex);
/*
@@ -1236,8 +1316,11 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
vhost_disable_notify(&vs->dev, vq);
+ vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
+ vq->log : NULL;
+
do {
- ret = vhost_scsi_get_desc(vs, vq, &vc);
+ ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num);
if (ret)
goto err;
@@ -1386,6 +1469,14 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
goto err;
}
+ if (unlikely(vq_log && log_num)) {
+ ret = vhost_scsi_copy_cmd_log(vq, cmd, vq_log, log_num);
+ if (unlikely(ret)) {
+ vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
+ goto err;
+ }
+ }
+
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
cdb[0], lun);
pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
@@ -1421,11 +1512,14 @@ err:
*/
if (ret == -ENXIO)
break;
- else if (ret == -EIO)
+ else if (ret == -EIO) {
vhost_scsi_send_bad_target(vs, vq, &vc, TYPE_IO_CMD);
- else if (ret == -ENOMEM)
+ vhost_scsi_log_write(vq, vq_log, log_num);
+ } else if (ret == -ENOMEM) {
vhost_scsi_send_status(vs, vq, &vc,
SAM_STAT_TASK_SET_FULL);
+ vhost_scsi_log_write(vq, vq_log, log_num);
+ }
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
@@ -1467,6 +1561,8 @@ static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
mutex_lock(&tmf->svq->vq.mutex);
vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
tmf->vq_desc, &tmf->resp_iov, resp_code);
+ vhost_scsi_log_write(&tmf->svq->vq, tmf->tmf_log,
+ tmf->tmf_log_num);
mutex_unlock(&tmf->svq->vq.mutex);
vhost_scsi_release_tmf_res(tmf);
@@ -1490,7 +1586,8 @@ static void
vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
struct vhost_virtqueue *vq,
struct virtio_scsi_ctrl_tmf_req *vtmf,
- struct vhost_scsi_ctx *vc)
+ struct vhost_scsi_ctx *vc,
+ struct vhost_log *log, unsigned int log_num)
{
struct vhost_scsi_virtqueue *svq = container_of(vq,
struct vhost_scsi_virtqueue, vq);
@@ -1518,6 +1615,19 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
tmf->in_iovs = vc->in;
tmf->inflight = vhost_scsi_get_inflight(vq);
+ if (unlikely(log && log_num)) {
+ tmf->tmf_log = kmalloc_array(log_num, sizeof(*tmf->tmf_log),
+ GFP_KERNEL);
+ if (tmf->tmf_log) {
+ memcpy(tmf->tmf_log, log, sizeof(*tmf->tmf_log) * log_num);
+ tmf->tmf_log_num = log_num;
+ } else {
+ pr_err("vhost_scsi tmf log allocation error\n");
+ vhost_scsi_release_tmf_res(tmf);
+ goto send_reject;
+ }
+ }
+
if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
vhost_buf_to_lun(vtmf->lun), NULL,
TMR_LUN_RESET, GFP_KERNEL, 0,
@@ -1531,6 +1641,7 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
send_reject:
vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
VIRTIO_SCSI_S_FUNCTION_REJECTED);
+ vhost_scsi_log_write(vq, log, log_num);
}
static void
@@ -1567,6 +1678,8 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
struct vhost_scsi_ctx vc;
size_t typ_size;
int ret, c = 0;
+ struct vhost_log *vq_log;
+ unsigned int log_num;
mutex_lock(&vq->mutex);
/*
@@ -1580,8 +1693,11 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
vhost_disable_notify(&vs->dev, vq);
+ vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
+ vq->log : NULL;
+
do {
- ret = vhost_scsi_get_desc(vs, vq, &vc);
+ ret = vhost_scsi_get_desc(vs, vq, &vc, vq_log, &log_num);
if (ret)
goto err;
@@ -1645,9 +1761,12 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
goto err;
if (v_req.type == VIRTIO_SCSI_T_TMF)
- vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
- else
+ vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc,
+ vq_log, log_num);
+ else {
vhost_scsi_send_an_resp(vs, vq, &vc);
+ vhost_scsi_log_write(vq, vq_log, log_num);
+ }
err:
/*
* ENXIO: No more requests, or read error, wait for next kick
@@ -1657,11 +1776,13 @@ err:
*/
if (ret == -ENXIO)
break;
- else if (ret == -EIO)
+ else if (ret == -EIO) {
vhost_scsi_send_bad_target(vs, vq, &vc,
v_req.type == VIRTIO_SCSI_T_TMF ?
TYPE_CTRL_TMF :
TYPE_CTRL_AN);
+ vhost_scsi_log_write(vq, vq_log, log_num);
+ }
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
@@ -1756,6 +1877,24 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
wait_for_completion(&vs->old_inflight[i]->comp);
}
+static void vhost_scsi_destroy_vq_log(struct vhost_virtqueue *vq)
+{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_cmd *tv_cmd;
+ unsigned int i;
+
+ if (!svq->scsi_cmds)
+ return;
+
+ for (i = 0; i < svq->max_cmds; i++) {
+ tv_cmd = &svq->scsi_cmds[i];
+ kfree(tv_cmd->tvc_log);
+ tv_cmd->tvc_log = NULL;
+ tv_cmd->tvc_log_num = 0;
+ }
+}
+
static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
{
struct vhost_scsi_virtqueue *svq = container_of(vq,
@@ -1775,6 +1914,7 @@ static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
sbitmap_free(&svq->scsi_tags);
kfree(svq->upages);
+ vhost_scsi_destroy_vq_log(vq);
kfree(svq->scsi_cmds);
svq->scsi_cmds = NULL;
}
@@ -2084,6 +2224,7 @@ err_dev:
static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
{
struct vhost_virtqueue *vq;
+ bool is_log, was_log;
int i;
if (features & ~VHOST_SCSI_FEATURES)
@@ -2096,12 +2237,39 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
return -EFAULT;
}
+ if (!vs->dev.nvqs)
+ goto out;
+
+ is_log = features & (1 << VHOST_F_LOG_ALL);
+ /*
+ * All VQs should have same feature.
+ */
+ was_log = vhost_has_feature(&vs->vqs[0].vq, VHOST_F_LOG_ALL);
+
for (i = 0; i < vs->dev.nvqs; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vq->acked_features = features;
mutex_unlock(&vq->mutex);
}
+
+ /*
+ * If VHOST_F_LOG_ALL is removed, free tvc_log after
+ * vq->acked_features is committed.
+ */
+ if (!is_log && was_log) {
+ for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
+ if (!vs->vqs[i].scsi_cmds)
+ continue;
+
+ vq = &vs->vqs[i].vq;
+ mutex_lock(&vq->mutex);
+ vhost_scsi_destroy_vq_log(vq);
+ mutex_unlock(&vq->mutex);
+ }
+ }
+
+out:
mutex_unlock(&vs->dev.mutex);
return 0;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 63612faeab72..3a5ebb973dba 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2304,6 +2304,19 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
return 0;
}
+/*
+ * vhost_log_write() - Log in dirty page bitmap
+ * @vq: vhost virtqueue.
+ * @log: Array of dirty memory in GPA.
+ * @log_num: Size of vhost_log arrary.
+ * @len: The total length of memory buffer to log in the dirty bitmap.
+ * Some drivers may only partially use pages shared via the last
+ * vring descriptor (i.e. vhost-net RX buffer).
+ * Use (len == U64_MAX) to indicate the driver would log all
+ * pages of vring descriptors.
+ * @iov: Array of dirty memory in HVA.
+ * @count: Size of iovec array.
+ */
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
unsigned int log_num, u64 len, struct iovec *iov, int count)
{
@@ -2327,15 +2340,14 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
r = log_write(vq->log_base, log[i].addr, l);
if (r < 0)
return r;
- len -= l;
- if (!len) {
- if (vq->log_ctx)
- eventfd_signal(vq->log_ctx);
- return 0;
- }
+
+ if (len != U64_MAX)
+ len -= l;
}
- /* Length written exceeds what we have stored. This is a bug. */
- BUG();
+
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx);
+
return 0;
}
EXPORT_SYMBOL_GPL(vhost_log_write);
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 73e153f9b449..bbce65452701 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -225,10 +225,9 @@ static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
flag = (iov->max_num & VRINGH_IOV_ALLOCATED);
if (flag)
- new = krealloc_array(iov->iov, new_num,
- sizeof(struct iovec), gfp);
+ new = krealloc_array(iov->iov, new_num, sizeof(*new), gfp);
else {
- new = kmalloc_array(new_num, sizeof(struct iovec), gfp);
+ new = kmalloc_array(new_num, sizeof(*new), gfp);
if (new) {
memcpy(new, iov->iov,
iov->max_num * sizeof(struct iovec));
@@ -1291,11 +1290,10 @@ static inline int getu16_iotlb(const struct vringh *vrh,
if (ret)
return ret;
} else {
- void *kaddr = kmap_local_page(ivec.iov.bvec[0].bv_page);
- void *from = kaddr + ivec.iov.bvec[0].bv_offset;
+ __virtio16 *from = bvec_kmap_local(&ivec.iov.bvec[0]);
- tmp = READ_ONCE(*(__virtio16 *)from);
- kunmap_local(kaddr);
+ tmp = READ_ONCE(*from);
+ kunmap_local(from);
}
*val = vringh16_to_cpu(vrh, tmp);
@@ -1330,11 +1328,10 @@ static inline int putu16_iotlb(const struct vringh *vrh,
if (ret)
return ret;
} else {
- void *kaddr = kmap_local_page(ivec.iov.bvec[0].bv_page);
- void *to = kaddr + ivec.iov.bvec[0].bv_offset;
+ __virtio16 *to = bvec_kmap_local(&ivec.iov.bvec[0]);
- WRITE_ONCE(*(__virtio16 *)to, tmp);
- kunmap_local(kaddr);
+ WRITE_ONCE(*to, tmp);
+ kunmap_local(to);
}
return 0;
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index f699e5827ccb..9dc93c5e480b 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -15,7 +15,6 @@
#include <linux/notifier.h>
#include <linux/ctype.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/slab.h>
#ifdef CONFIG_PMAC_BACKLIGHT
@@ -57,10 +56,10 @@
* a hot-key to adjust backlight, the driver must notify the backlight
* core that brightness has changed using backlight_force_update().
*
- * The backlight driver core receives notifications from fbdev and
- * if the event is FB_EVENT_BLANK and if the value of blank, from the
- * FBIOBLANK ioctrl, results in a change in the backlight state the
- * update_status() operation is called.
+ * Display drives can control the backlight device's status using
+ * backlight_notify_blank() and backlight_notify_blank_all(). If this
+ * results in a change in the backlight state the functions call the
+ * update_status() operation.
*/
static struct list_head backlight_dev_list;
@@ -78,85 +77,40 @@ static const char *const backlight_scale_types[] = {
[BACKLIGHT_SCALE_NON_LINEAR] = "non-linear",
};
-#if defined(CONFIG_FB_CORE) || (defined(CONFIG_FB_CORE_MODULE) && \
- defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE))
-/*
- * fb_notifier_callback
- *
- * This callback gets called when something important happens inside a
- * framebuffer driver. The backlight core only cares about FB_BLANK_UNBLANK
- * which is reported to the driver using backlight_update_status()
- * as a state change.
- *
- * There may be several fbdev's connected to the backlight device,
- * in which case they are kept track of. A state change is only reported
- * if there is a change in backlight for the specified fbdev.
- */
-static int fb_notifier_callback(struct notifier_block *self,
- unsigned long event, void *data)
+void backlight_notify_blank(struct backlight_device *bd, struct device *display_dev,
+ bool fb_on, bool prev_fb_on)
{
- struct backlight_device *bd;
- struct fb_event *evdata = data;
- struct fb_info *info = evdata->info;
- struct backlight_device *fb_bd = fb_bl_device(info);
- int node = info->node;
- int fb_blank = 0;
-
- /* If we aren't interested in this event, skip it immediately ... */
- if (event != FB_EVENT_BLANK)
- return 0;
-
- bd = container_of(self, struct backlight_device, fb_notif);
- mutex_lock(&bd->ops_lock);
+ guard(mutex)(&bd->ops_lock);
if (!bd->ops)
- goto out;
- if (bd->ops->controls_device && !bd->ops->controls_device(bd, info->device))
- goto out;
- if (fb_bd && fb_bd != bd)
- goto out;
-
- fb_blank = *(int *)evdata->data;
- if (fb_blank == FB_BLANK_UNBLANK && !bd->fb_bl_on[node]) {
- bd->fb_bl_on[node] = true;
+ return;
+ if (bd->ops->controls_device && !bd->ops->controls_device(bd, display_dev))
+ return;
+
+ if (fb_on && (!prev_fb_on || !bd->use_count)) {
if (!bd->use_count++) {
bd->props.state &= ~BL_CORE_FBBLANK;
backlight_update_status(bd);
}
- } else if (fb_blank != FB_BLANK_UNBLANK && bd->fb_bl_on[node]) {
- bd->fb_bl_on[node] = false;
+ } else if (!fb_on && prev_fb_on && bd->use_count) {
if (!(--bd->use_count)) {
bd->props.state |= BL_CORE_FBBLANK;
backlight_update_status(bd);
}
}
-out:
- mutex_unlock(&bd->ops_lock);
- return 0;
}
+EXPORT_SYMBOL(backlight_notify_blank);
-static int backlight_register_fb(struct backlight_device *bd)
+void backlight_notify_blank_all(struct device *display_dev, bool fb_on, bool prev_fb_on)
{
- memset(&bd->fb_notif, 0, sizeof(bd->fb_notif));
- bd->fb_notif.notifier_call = fb_notifier_callback;
+ struct backlight_device *bd;
- return fb_register_client(&bd->fb_notif);
-}
+ guard(mutex)(&backlight_dev_list_mutex);
-static void backlight_unregister_fb(struct backlight_device *bd)
-{
- fb_unregister_client(&bd->fb_notif);
-}
-#else
-static inline int backlight_register_fb(struct backlight_device *bd)
-{
- return 0;
+ list_for_each_entry(bd, &backlight_dev_list, entry)
+ backlight_notify_blank(bd, display_dev, fb_on, prev_fb_on);
}
-
-static inline void backlight_unregister_fb(struct backlight_device *bd)
-{
-}
-#endif /* CONFIG_FB_CORE */
+EXPORT_SYMBOL(backlight_notify_blank_all);
static void backlight_generate_event(struct backlight_device *bd,
enum backlight_update_reason reason)
@@ -447,12 +401,6 @@ struct backlight_device *backlight_device_register(const char *name,
return ERR_PTR(rc);
}
- rc = backlight_register_fb(new_bd);
- if (rc) {
- device_unregister(&new_bd->dev);
- return ERR_PTR(rc);
- }
-
new_bd->ops = ops;
#ifdef CONFIG_PMAC_BACKLIGHT
@@ -539,7 +487,6 @@ void backlight_device_unregister(struct backlight_device *bd)
bd->ops = NULL;
mutex_unlock(&bd->ops_lock);
- backlight_unregister_fb(bd);
device_unregister(&bd->dev);
}
EXPORT_SYMBOL(backlight_device_unregister);
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 3267acf8dc5b..affe5c52471a 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -15,86 +15,59 @@
#include <linux/notifier.h>
#include <linux/ctype.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/slab.h>
-#if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \
- defined(CONFIG_LCD_CLASS_DEVICE_MODULE))
-static int to_lcd_power(int fb_blank)
-{
- switch (fb_blank) {
- case FB_BLANK_UNBLANK:
- return LCD_POWER_ON;
- /* deprecated; TODO: should become 'off' */
- case FB_BLANK_NORMAL:
- return LCD_POWER_REDUCED;
- case FB_BLANK_VSYNC_SUSPEND:
- return LCD_POWER_REDUCED_VSYNC_SUSPEND;
- /* 'off' */
- case FB_BLANK_HSYNC_SUSPEND:
- case FB_BLANK_POWERDOWN:
- default:
- return LCD_POWER_OFF;
- }
-}
+static DEFINE_MUTEX(lcd_dev_list_mutex);
+static LIST_HEAD(lcd_dev_list);
-/* This callback gets called when something important happens inside a
- * framebuffer driver. We're looking if that important event is blanking,
- * and if it is, we're switching lcd power as well ...
- */
-static int fb_notifier_callback(struct notifier_block *self,
- unsigned long event, void *data)
+static void lcd_notify_blank(struct lcd_device *ld, struct device *display_dev,
+ int power)
{
- struct lcd_device *ld = container_of(self, struct lcd_device, fb_notif);
- struct fb_event *evdata = data;
- struct fb_info *info = evdata->info;
- struct lcd_device *fb_lcd = fb_lcd_device(info);
-
guard(mutex)(&ld->ops_lock);
- if (!ld->ops)
- return 0;
- if (ld->ops->controls_device && !ld->ops->controls_device(ld, info->device))
- return 0;
- if (fb_lcd && fb_lcd != ld)
- return 0;
+ if (!ld->ops || !ld->ops->set_power)
+ return;
+ if (ld->ops->controls_device && !ld->ops->controls_device(ld, display_dev))
+ return;
- if (event == FB_EVENT_BLANK) {
- int power = to_lcd_power(*(int *)evdata->data);
+ ld->ops->set_power(ld, power);
+}
- if (ld->ops->set_power)
- ld->ops->set_power(ld, power);
- } else {
- const struct fb_videomode *videomode = evdata->data;
+void lcd_notify_blank_all(struct device *display_dev, int power)
+{
+ struct lcd_device *ld;
- if (ld->ops->set_mode)
- ld->ops->set_mode(ld, videomode->xres, videomode->yres);
- }
+ guard(mutex)(&lcd_dev_list_mutex);
- return 0;
+ list_for_each_entry(ld, &lcd_dev_list, entry)
+ lcd_notify_blank(ld, display_dev, power);
}
+EXPORT_SYMBOL(lcd_notify_blank_all);
-static int lcd_register_fb(struct lcd_device *ld)
+static void lcd_notify_mode_change(struct lcd_device *ld, struct device *display_dev,
+ unsigned int width, unsigned int height)
{
- memset(&ld->fb_notif, 0, sizeof(ld->fb_notif));
- ld->fb_notif.notifier_call = fb_notifier_callback;
- return fb_register_client(&ld->fb_notif);
-}
+ guard(mutex)(&ld->ops_lock);
-static void lcd_unregister_fb(struct lcd_device *ld)
-{
- fb_unregister_client(&ld->fb_notif);
-}
-#else
-static int lcd_register_fb(struct lcd_device *ld)
-{
- return 0;
+ if (!ld->ops || !ld->ops->set_mode)
+ return;
+ if (ld->ops->controls_device && !ld->ops->controls_device(ld, display_dev))
+ return;
+
+ ld->ops->set_mode(ld, width, height);
}
-static inline void lcd_unregister_fb(struct lcd_device *ld)
+void lcd_notify_mode_change_all(struct device *display_dev,
+ unsigned int width, unsigned int height)
{
+ struct lcd_device *ld;
+
+ guard(mutex)(&lcd_dev_list_mutex);
+
+ list_for_each_entry(ld, &lcd_dev_list, entry)
+ lcd_notify_mode_change(ld, display_dev, width, height);
}
-#endif /* CONFIG_FB */
+EXPORT_SYMBOL(lcd_notify_mode_change_all);
static ssize_t lcd_power_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -245,11 +218,8 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
return ERR_PTR(rc);
}
- rc = lcd_register_fb(new_ld);
- if (rc) {
- device_unregister(&new_ld->dev);
- return ERR_PTR(rc);
- }
+ guard(mutex)(&lcd_dev_list_mutex);
+ list_add(&new_ld->entry, &lcd_dev_list);
return new_ld;
}
@@ -266,10 +236,12 @@ void lcd_device_unregister(struct lcd_device *ld)
if (!ld)
return;
+ guard(mutex)(&lcd_dev_list_mutex);
+ list_del(&ld->entry);
+
mutex_lock(&ld->ops_lock);
ld->ops = NULL;
mutex_unlock(&ld->ops_lock);
- lcd_unregister_fb(ld);
device_unregister(&ld->dev);
}
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
index 9afe701b2a1b..a63bb42c8f8b 100644
--- a/drivers/video/backlight/qcom-wled.c
+++ b/drivers/video/backlight/qcom-wled.c
@@ -1406,9 +1406,11 @@ static int wled_configure(struct wled *wled)
wled->ctrl_addr = be32_to_cpu(*prop_addr);
rc = of_property_read_string(dev->of_node, "label", &wled->name);
- if (rc)
+ if (rc) {
wled->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
-
+ if (!wled->name)
+ return -ENOMEM;
+ }
switch (wled->version) {
case 3:
u32_opts = wled3_opts;
diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
index 139049368fdc..7d02470f19b9 100644
--- a/drivers/video/console/dummycon.c
+++ b/drivers/video/console/dummycon.c
@@ -85,6 +85,15 @@ static bool dummycon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
/* Redraw, so that we get putc(s) for output done while blanked */
return true;
}
+
+static bool dummycon_switch(struct vc_data *vc)
+{
+ /*
+ * Redraw, so that we get putc(s) for output done while switched
+ * away. Informs deferred consoles to take over the display.
+ */
+ return true;
+}
#else
static void dummycon_putc(struct vc_data *vc, u16 c, unsigned int y,
unsigned int x) { }
@@ -95,6 +104,10 @@ static bool dummycon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
{
return false;
}
+static bool dummycon_switch(struct vc_data *vc)
+{
+ return false;
+}
#endif
static const char *dummycon_startup(void)
@@ -124,11 +137,6 @@ static bool dummycon_scroll(struct vc_data *vc, unsigned int top,
return false;
}
-static bool dummycon_switch(struct vc_data *vc)
-{
- return false;
-}
-
/*
* The console `switch' structure for the dummy console
*
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 37bd18730fe0..f9cdbf8c53e3 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1168,7 +1168,7 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
c->vc_screenbuf_size - delta);
c->vc_origin = vga_vram_end - c->vc_screenbuf_size;
vga_rolled_over = 0;
- } else
+ } else if (oldo - delta >= (unsigned long)c->vc_screenbuf)
c->vc_origin -= delta;
c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size;
scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char,
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index 082501feceb9..ec084323115f 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -431,9 +431,10 @@ static struct dac_ops ics5342_ops = {
static struct dac_info * ics5342_init(dac_read_regs_t drr, dac_write_regs_t dwr, void *data)
{
- struct dac_info *info = kzalloc(sizeof(struct ics5342_info), GFP_KERNEL);
+ struct ics5342_info *ics_info = kzalloc(sizeof(struct ics5342_info), GFP_KERNEL);
+ struct dac_info *info = &ics_info->dac;
- if (! info)
+ if (!ics_info)
return NULL;
info->dacops = &ics5342_ops;
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index c6c4753f6415..0eef8c6b98c8 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -1443,7 +1443,7 @@ static void radeon_write_pll_regs(struct radeonfb_info *rinfo, struct radeon_reg
*/
static void radeon_lvds_timer_func(struct timer_list *t)
{
- struct radeonfb_info *rinfo = from_timer(rinfo, t, lvds_timer);
+ struct radeonfb_info *rinfo = timer_container_of(rinfo, t, lvds_timer);
radeon_engine_idle();
diff --git a/drivers/video/fbdev/carminefb.c b/drivers/video/fbdev/carminefb.c
index e56065cdba97..2bdd67595891 100644
--- a/drivers/video/fbdev/carminefb.c
+++ b/drivers/video/fbdev/carminefb.c
@@ -649,13 +649,13 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
* is required for that largest resolution to avoid remaps at run
* time
*/
- if (carminefb_fix.smem_len > CARMINE_TOTAL_DIPLAY_MEM)
- carminefb_fix.smem_len = CARMINE_TOTAL_DIPLAY_MEM;
+ if (carminefb_fix.smem_len > CARMINE_TOTAL_DISPLAY_MEM)
+ carminefb_fix.smem_len = CARMINE_TOTAL_DISPLAY_MEM;
- else if (carminefb_fix.smem_len < CARMINE_TOTAL_DIPLAY_MEM) {
+ else if (carminefb_fix.smem_len < CARMINE_TOTAL_DISPLAY_MEM) {
printk(KERN_ERR "carminefb: Memory bar is only %d bytes, %d "
"are required.", carminefb_fix.smem_len,
- CARMINE_TOTAL_DIPLAY_MEM);
+ CARMINE_TOTAL_DISPLAY_MEM);
goto err_unmap_vregs;
}
diff --git a/drivers/video/fbdev/carminefb.h b/drivers/video/fbdev/carminefb.h
index 297688eba469..c9825481d96b 100644
--- a/drivers/video/fbdev/carminefb.h
+++ b/drivers/video/fbdev/carminefb.h
@@ -7,7 +7,7 @@
#define MAX_DISPLAY 2
#define CARMINE_DISPLAY_MEM (800 * 600 * 4)
-#define CARMINE_TOTAL_DIPLAY_MEM (CARMINE_DISPLAY_MEM * MAX_DISPLAY)
+#define CARMINE_TOTAL_DISPLAY_MEM (CARMINE_DISPLAY_MEM * MAX_DISPLAY)
#define CARMINE_USE_DISPLAY0 (1 << 0)
#define CARMINE_USE_DISPLAY1 (1 << 1)
diff --git a/drivers/video/fbdev/core/fb_backlight.c b/drivers/video/fbdev/core/fb_backlight.c
index 6fdaa9f81be9..dbed9696f4c5 100644
--- a/drivers/video/fbdev/core/fb_backlight.c
+++ b/drivers/video/fbdev/core/fb_backlight.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/backlight.h>
#include <linux/export.h>
#include <linux/fb.h>
#include <linux/mutex.h>
@@ -36,4 +37,15 @@ struct backlight_device *fb_bl_device(struct fb_info *info)
return info->bl_dev;
}
EXPORT_SYMBOL(fb_bl_device);
+
+void fb_bl_notify_blank(struct fb_info *info, int old_blank)
+{
+ bool on = info->blank == FB_BLANK_UNBLANK;
+ bool prev_on = old_blank == FB_BLANK_UNBLANK;
+
+ if (info->bl_dev)
+ backlight_notify_blank(info->bl_dev, info->device, on, prev_on);
+ else
+ backlight_notify_blank_all(info->device, on, prev_on);
+}
#endif
diff --git a/drivers/video/fbdev/core/fb_info.c b/drivers/video/fbdev/core/fb_info.c
index 4847ebe50d7d..52f9bd2c5417 100644
--- a/drivers/video/fbdev/core/fb_info.c
+++ b/drivers/video/fbdev/core/fb_info.c
@@ -42,6 +42,7 @@ struct fb_info *framebuffer_alloc(size_t size, struct device *dev)
info->device = dev;
info->fbcon_rotate_hint = -1;
+ info->blank = FB_BLANK_UNBLANK;
#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
mutex_init(&info->bl_curve_mutex);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index ac3c99ed92d1..2df48037688d 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -117,9 +117,14 @@ static signed char con2fb_map_boot[MAX_NR_CONSOLES];
static struct fb_info *fbcon_info_from_console(int console)
{
+ signed char fb;
WARN_CONSOLE_UNLOCKED();
- return fbcon_registered_fb[con2fb_map[console]];
+ fb = con2fb_map[console];
+ if (fb < 0 || fb >= ARRAY_SIZE(fbcon_registered_fb))
+ return NULL;
+
+ return fbcon_registered_fb[fb];
}
static int logo_lines;
diff --git a/drivers/video/fbdev/core/fbcvt.c b/drivers/video/fbdev/core/fbcvt.c
index 64843464c661..cd3821bd82e5 100644
--- a/drivers/video/fbdev/core/fbcvt.c
+++ b/drivers/video/fbdev/core/fbcvt.c
@@ -312,7 +312,7 @@ int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb)
cvt.f_refresh = cvt.refresh;
cvt.interlace = 1;
- if (!cvt.xres || !cvt.yres || !cvt.refresh) {
+ if (!cvt.xres || !cvt.yres || !cvt.refresh || cvt.f_refresh > INT_MAX) {
printk(KERN_INFO "fbcvt: Invalid input parameters\n");
return 1;
}
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 3c568cff2913..dfcf5e4d1d4c 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -15,6 +15,8 @@
#include <linux/export.h>
#include <linux/fb.h>
#include <linux/fbcon.h>
+#include <linux/lcd.h>
+#include <linux/leds.h>
#include <video/nomodeset.h>
@@ -220,6 +222,12 @@ static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
return err;
}
+static void fb_lcd_notify_mode_change(struct fb_info *info,
+ struct fb_videomode *mode)
+{
+ lcd_notify_mode_change_all(info->device, mode->xres, mode->yres);
+}
+
int
fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
{
@@ -227,7 +235,6 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
u32 activate;
struct fb_var_screeninfo old_var;
struct fb_videomode mode;
- struct fb_event event;
u32 unused;
if (var->activate & FB_ACTIVATE_INV_MODE) {
@@ -328,35 +335,76 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
!list_empty(&info->modelist))
ret = fb_add_videomode(&mode, &info->modelist);
- if (ret)
+ if (ret) {
+ info->var = old_var;
return ret;
+ }
- event.info = info;
- event.data = &mode;
- fb_notifier_call_chain(FB_EVENT_MODE_CHANGE, &event);
+ fb_lcd_notify_mode_change(info, &mode);
return 0;
}
EXPORT_SYMBOL(fb_set_var);
-int
-fb_blank(struct fb_info *info, int blank)
+static void fb_lcd_notify_blank(struct fb_info *info)
{
- struct fb_event event;
- int ret = -EINVAL;
+ int power;
+
+ switch (info->blank) {
+ case FB_BLANK_UNBLANK:
+ power = LCD_POWER_ON;
+ break;
+ /* deprecated; TODO: should become 'off' */
+ case FB_BLANK_NORMAL:
+ power = LCD_POWER_REDUCED;
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ power = LCD_POWER_REDUCED_VSYNC_SUSPEND;
+ break;
+ /* 'off' */
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_POWERDOWN:
+ default:
+ power = LCD_POWER_OFF;
+ break;
+ }
+
+ lcd_notify_blank_all(info->device, power);
+}
+
+static void fb_ledtrig_backlight_notify_blank(struct fb_info *info)
+{
+ if (info->blank == FB_BLANK_UNBLANK)
+ ledtrig_backlight_blank(false);
+ else
+ ledtrig_backlight_blank(true);
+}
+
+int fb_blank(struct fb_info *info, int blank)
+{
+ int old_blank = info->blank;
+ int ret;
+
+ if (!info->fbops->fb_blank)
+ return -EINVAL;
if (blank > FB_BLANK_POWERDOWN)
blank = FB_BLANK_POWERDOWN;
- event.info = info;
- event.data = &blank;
+ info->blank = blank;
- if (info->fbops->fb_blank)
- ret = info->fbops->fb_blank(blank, info);
+ ret = info->fbops->fb_blank(blank, info);
+ if (ret)
+ goto err;
- if (!ret)
- fb_notifier_call_chain(FB_EVENT_BLANK, &event);
+ fb_bl_notify_blank(info, old_blank);
+ fb_lcd_notify_blank(info);
+ fb_ledtrig_backlight_notify_blank(info);
+ return 0;
+
+err:
+ info->blank = old_blank;
return ret;
}
EXPORT_SYMBOL(fb_blank);
@@ -388,7 +436,7 @@ static int fb_check_foreignness(struct fb_info *fi)
static int do_register_framebuffer(struct fb_info *fb_info)
{
- int i;
+ int i, err = 0;
struct fb_videomode mode;
if (fb_check_foreignness(fb_info))
@@ -397,15 +445,31 @@ static int do_register_framebuffer(struct fb_info *fb_info)
if (num_registered_fb == FB_MAX)
return -ENXIO;
- num_registered_fb++;
for (i = 0 ; i < FB_MAX; i++)
if (!registered_fb[i])
break;
+
+ if (!fb_info->modelist.prev || !fb_info->modelist.next)
+ INIT_LIST_HEAD(&fb_info->modelist);
+
+ fb_var_to_videomode(&mode, &fb_info->var);
+ err = fb_add_videomode(&mode, &fb_info->modelist);
+ if (err < 0)
+ return err;
+
fb_info->node = i;
refcount_set(&fb_info->count, 1);
mutex_init(&fb_info->lock);
mutex_init(&fb_info->mm_lock);
+ /*
+ * With an fb_blank callback present, we assume that the
+ * display is blank, so that fb_blank() enables it on the
+ * first modeset.
+ */
+ if (fb_info->fbops->fb_blank)
+ fb_info->blank = FB_BLANK_POWERDOWN;
+
fb_device_create(fb_info);
if (fb_info->pixmap.addr == NULL) {
@@ -426,16 +490,12 @@ static int do_register_framebuffer(struct fb_info *fb_info)
if (bitmap_empty(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT))
bitmap_fill(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
- if (!fb_info->modelist.prev || !fb_info->modelist.next)
- INIT_LIST_HEAD(&fb_info->modelist);
-
if (fb_info->skip_vt_switch)
pm_vt_switch_required(fb_info->device, false);
else
pm_vt_switch_required(fb_info->device, true);
- fb_var_to_videomode(&mode, &fb_info->var);
- fb_add_videomode(&mode, &fb_info->modelist);
+ num_registered_fb++;
registered_fb[i] = fb_info;
#ifdef CONFIG_GUMSTIX_AM200EPD
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index 06d75c767579..b8344c40073b 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -242,11 +242,11 @@ static ssize_t store_blank(struct device *device,
return count;
}
-static ssize_t show_blank(struct device *device,
- struct device_attribute *attr, char *buf)
+static ssize_t show_blank(struct device *device, struct device_attribute *attr, char *buf)
{
-// struct fb_info *fb_info = dev_get_drvdata(device);
- return 0;
+ struct fb_info *fb_info = dev_get_drvdata(device);
+
+ return sysfs_emit(buf, "%d\n", fb_info->blank);
}
static ssize_t store_console(struct device *device,
diff --git a/drivers/video/fbdev/geode/display_gx.c b/drivers/video/fbdev/geode/display_gx.c
index b5f25dffd274..099322cefce0 100644
--- a/drivers/video/fbdev/geode/display_gx.c
+++ b/drivers/video/fbdev/geode/display_gx.c
@@ -13,6 +13,7 @@
#include <asm/io.h>
#include <asm/div64.h>
#include <asm/delay.h>
+#include <asm/msr.h>
#include <linux/cs5535.h>
#include "gxfb.h"
diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c
index af996634c1a9..8d69be7c9d31 100644
--- a/drivers/video/fbdev/geode/gxfb_core.c
+++ b/drivers/video/fbdev/geode/gxfb_core.c
@@ -29,6 +29,7 @@
#include <linux/pci.h>
#include <linux/cs5535.h>
+#include <asm/msr.h>
#include <asm/olpc.h>
#include "gxfb.h"
@@ -377,7 +378,7 @@ static int gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Figure out if this is a TFT or CRT part */
- rdmsrl(MSR_GX_GLD_MSR_CONFIG, val);
+ rdmsrq(MSR_GX_GLD_MSR_CONFIG, val);
if ((val & MSR_GX_GLD_MSR_CONFIG_FP) == MSR_GX_GLD_MSR_CONFIG_FP)
par->enable_crt = 0;
diff --git a/drivers/video/fbdev/geode/lxfb_ops.c b/drivers/video/fbdev/geode/lxfb_ops.c
index 32baaf59fcf7..2e33da9849b0 100644
--- a/drivers/video/fbdev/geode/lxfb_ops.c
+++ b/drivers/video/fbdev/geode/lxfb_ops.c
@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/cs5535.h>
+#include <asm/msr.h>
#include "lxfb.h"
/* TODO
@@ -358,7 +359,7 @@ void lx_set_mode(struct fb_info *info)
/* Set output mode */
- rdmsrl(MSR_LX_GLD_MSR_CONFIG, msrval);
+ rdmsrq(MSR_LX_GLD_MSR_CONFIG, msrval);
msrval &= ~MSR_LX_GLD_MSR_CONFIG_FMT;
if (par->output & OUTPUT_PANEL) {
@@ -371,7 +372,7 @@ void lx_set_mode(struct fb_info *info)
} else
msrval |= MSR_LX_GLD_MSR_CONFIG_FMT_CRT;
- wrmsrl(MSR_LX_GLD_MSR_CONFIG, msrval);
+ wrmsrq(MSR_LX_GLD_MSR_CONFIG, msrval);
/* Clear the various buffers */
/* FIXME: Adjust for panning here */
@@ -419,7 +420,7 @@ void lx_set_mode(struct fb_info *info)
/* Set default watermark values */
- rdmsrl(MSR_LX_SPARE_MSR, msrval);
+ rdmsrq(MSR_LX_SPARE_MSR, msrval);
msrval &= ~(MSR_LX_SPARE_MSR_DIS_CFIFO_HGO
| MSR_LX_SPARE_MSR_VFIFO_ARB_SEL
@@ -427,7 +428,7 @@ void lx_set_mode(struct fb_info *info)
| MSR_LX_SPARE_MSR_WM_LPEN_OVRD);
msrval |= MSR_LX_SPARE_MSR_DIS_VIFO_WM |
MSR_LX_SPARE_MSR_DIS_INIT_V_PRI;
- wrmsrl(MSR_LX_SPARE_MSR, msrval);
+ wrmsrq(MSR_LX_SPARE_MSR, msrval);
gcfg = DC_GENERAL_CFG_DFLE; /* Display fifo enable */
gcfg |= (0x6 << DC_GENERAL_CFG_DFHPSL_SHIFT) | /* default priority */
@@ -591,10 +592,10 @@ static void lx_save_regs(struct lxfb_par *par)
} while ((i & GP_BLT_STATUS_PB) || !(i & GP_BLT_STATUS_CE));
/* save MSRs */
- rdmsrl(MSR_LX_MSR_PADSEL, par->msr.padsel);
- rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll);
- rdmsrl(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg);
- rdmsrl(MSR_LX_SPARE_MSR, par->msr.dcspare);
+ rdmsrq(MSR_LX_MSR_PADSEL, par->msr.padsel);
+ rdmsrq(MSR_GLCP_DOTPLL, par->msr.dotpll);
+ rdmsrq(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg);
+ rdmsrq(MSR_LX_SPARE_MSR, par->msr.dcspare);
write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
@@ -664,7 +665,7 @@ static void lx_restore_display_ctlr(struct lxfb_par *par)
uint32_t filt;
int i;
- wrmsrl(MSR_LX_SPARE_MSR, par->msr.dcspare);
+ wrmsrq(MSR_LX_SPARE_MSR, par->msr.dcspare);
for (i = 0; i < ARRAY_SIZE(par->dc); i++) {
switch (i) {
@@ -729,8 +730,8 @@ static void lx_restore_video_proc(struct lxfb_par *par)
{
int i;
- wrmsrl(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg);
- wrmsrl(MSR_LX_MSR_PADSEL, par->msr.padsel);
+ wrmsrq(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg);
+ wrmsrq(MSR_LX_MSR_PADSEL, par->msr.padsel);
for (i = 0; i < ARRAY_SIZE(par->vp); i++) {
switch (i) {
diff --git a/drivers/video/fbdev/geode/suspend_gx.c b/drivers/video/fbdev/geode/suspend_gx.c
index 8c49d4e98772..73307f42e343 100644
--- a/drivers/video/fbdev/geode/suspend_gx.c
+++ b/drivers/video/fbdev/geode/suspend_gx.c
@@ -21,8 +21,8 @@ static void gx_save_regs(struct gxfb_par *par)
} while (i & (GP_BLT_STATUS_BLT_PENDING | GP_BLT_STATUS_BLT_BUSY));
/* save MSRs */
- rdmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel);
- rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll);
+ rdmsrq(MSR_GX_MSR_PADSEL, par->msr.padsel);
+ rdmsrq(MSR_GLCP_DOTPLL, par->msr.dotpll);
write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
@@ -43,14 +43,14 @@ static void gx_set_dotpll(uint32_t dotpll_hi)
uint32_t dotpll_lo;
int i;
- rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo);
+ rdmsrq(MSR_GLCP_DOTPLL, dotpll_lo);
dotpll_lo |= MSR_GLCP_DOTPLL_DOTRESET;
dotpll_lo &= ~MSR_GLCP_DOTPLL_BYPASS;
wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
/* wait for the PLL to lock */
for (i = 0; i < 200; i++) {
- rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo);
+ rdmsrq(MSR_GLCP_DOTPLL, dotpll_lo);
if (dotpll_lo & MSR_GLCP_DOTPLL_LOCK)
break;
udelay(1);
@@ -133,7 +133,7 @@ static void gx_restore_video_proc(struct gxfb_par *par)
{
int i;
- wrmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel);
+ wrmsrq(MSR_GX_MSR_PADSEL, par->msr.padsel);
for (i = 0; i < ARRAY_SIZE(par->vp); i++) {
switch (i) {
diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c
index 91dac2aa247c..5717c3356949 100644
--- a/drivers/video/fbdev/geode/video_gx.c
+++ b/drivers/video/fbdev/geode/video_gx.c
@@ -142,8 +142,8 @@ void gx_set_dclk_frequency(struct fb_info *info)
}
}
- rdmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll);
- rdmsrl(MSR_GLCP_DOTPLL, dotpll);
+ rdmsrq(MSR_GLCP_SYS_RSTPLL, sys_rstpll);
+ rdmsrq(MSR_GLCP_DOTPLL, dotpll);
/* Program new M, N and P. */
dotpll &= 0x00000000ffffffffull;
@@ -151,7 +151,7 @@ void gx_set_dclk_frequency(struct fb_info *info)
dotpll |= MSR_GLCP_DOTPLL_DOTRESET;
dotpll &= ~MSR_GLCP_DOTPLL_BYPASS;
- wrmsrl(MSR_GLCP_DOTPLL, dotpll);
+ wrmsrq(MSR_GLCP_DOTPLL, dotpll);
/* Program dividers. */
sys_rstpll &= ~( MSR_GLCP_SYS_RSTPLL_DOTPREDIV2
@@ -159,15 +159,15 @@ void gx_set_dclk_frequency(struct fb_info *info)
| MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 );
sys_rstpll |= pll_table[best_i].sys_rstpll_bits;
- wrmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll);
+ wrmsrq(MSR_GLCP_SYS_RSTPLL, sys_rstpll);
/* Clear reset bit to start PLL. */
dotpll &= ~(MSR_GLCP_DOTPLL_DOTRESET);
- wrmsrl(MSR_GLCP_DOTPLL, dotpll);
+ wrmsrq(MSR_GLCP_DOTPLL, dotpll);
/* Wait for LOCK bit. */
do {
- rdmsrl(MSR_GLCP_DOTPLL, dotpll);
+ rdmsrq(MSR_GLCP_DOTPLL, dotpll);
} while (timeout-- && !(dotpll & MSR_GLCP_DOTPLL_LOCK));
}
@@ -180,10 +180,10 @@ gx_configure_tft(struct fb_info *info)
/* Set up the DF pad select MSR */
- rdmsrl(MSR_GX_MSR_PADSEL, val);
+ rdmsrq(MSR_GX_MSR_PADSEL, val);
val &= ~MSR_GX_MSR_PADSEL_MASK;
val |= MSR_GX_MSR_PADSEL_TFT;
- wrmsrl(MSR_GX_MSR_PADSEL, val);
+ wrmsrq(MSR_GX_MSR_PADSEL, val);
/* Turn off the panel */
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index 8900f181f195..cfaf9454014d 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -1484,7 +1484,7 @@ static int nvidiafb_setup(char *options)
flatpanel = 1;
} else if (!strncmp(this_opt, "hwcur", 5)) {
hwcur = 1;
- } else if (!strncmp(this_opt, "noaccel", 6)) {
+ } else if (!strncmp(this_opt, "noaccel", 7)) {
noaccel = 1;
} else if (!strncmp(this_opt, "noscale", 7)) {
noscale = 1;
diff --git a/drivers/video/fbdev/via/via-gpio.c b/drivers/video/fbdev/via/via-gpio.c
index 9577c2cd52c7..27226a8f3f42 100644
--- a/drivers/video/fbdev/via/via-gpio.c
+++ b/drivers/video/fbdev/via/via-gpio.c
@@ -81,8 +81,7 @@ struct viafb_gpio_cfg {
/*
* GPIO access functions
*/
-static void via_gpio_set(struct gpio_chip *chip, unsigned int nr,
- int value)
+static int via_gpio_set(struct gpio_chip *chip, unsigned int nr, int value)
{
struct viafb_gpio_cfg *cfg = gpiochip_get_data(chip);
u8 reg;
@@ -99,13 +98,14 @@ static void via_gpio_set(struct gpio_chip *chip, unsigned int nr,
reg &= ~(0x10 << gpio->vg_mask_shift);
via_write_reg(VIASR, gpio->vg_port_index, reg);
spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags);
+
+ return 0;
}
static int via_gpio_dir_out(struct gpio_chip *chip, unsigned int nr,
int value)
{
- via_gpio_set(chip, nr, value);
- return 0;
+ return via_gpio_set(chip, nr, value);
}
/*
@@ -146,7 +146,7 @@ static struct viafb_gpio_cfg viafb_gpio_config = {
.label = "VIAFB onboard GPIO",
.owner = THIS_MODULE,
.direction_output = via_gpio_dir_out,
- .set = via_gpio_set,
+ .set_rv = via_gpio_set,
.direction_input = via_gpio_dir_input,
.get = via_gpio_get,
.base = -1,
diff --git a/drivers/video/screen_info_generic.c b/drivers/video/screen_info_generic.c
index 64117c6367ab..900e9386eceb 100644
--- a/drivers/video/screen_info_generic.c
+++ b/drivers/video/screen_info_generic.c
@@ -144,3 +144,39 @@ ssize_t screen_info_resources(const struct screen_info *si, struct resource *r,
return pos - r;
}
EXPORT_SYMBOL(screen_info_resources);
+
+/*
+ * The meaning of depth and bpp for direct-color formats is
+ * inconsistent:
+ *
+ * - DRM format info specifies depth as the number of color
+ * bits; including alpha, but not including filler bits.
+ * - Linux' EFI platform code computes lfb_depth from the
+ * individual color channels, including the reserved bits.
+ * - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later
+ * versions use 15.
+ * - On the kernel command line, 'bpp' of 32 is usually
+ * XRGB8888 including the filler bits, but 15 is XRGB1555
+ * not including the filler bit.
+ *
+ * It is not easily possible to fix this in struct screen_info,
+ * as this could break UAPI. The best solution is to compute
+ * bits_per_pixel from the color bits, reserved bits and
+ * reported lfb_depth, whichever is highest.
+ */
+
+u32 __screen_info_lfb_bits_per_pixel(const struct screen_info *si)
+{
+ u32 bits_per_pixel = si->lfb_depth;
+
+ if (bits_per_pixel > 8) {
+ bits_per_pixel = max(max3(si->red_size + si->red_pos,
+ si->green_size + si->green_pos,
+ si->blue_size + si->blue_pos),
+ si->rsvd_size + si->rsvd_pos);
+ bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth);
+ }
+
+ return bits_per_pixel;
+}
+EXPORT_SYMBOL(__screen_info_lfb_bits_per_pixel);
diff --git a/drivers/video/screen_info_pci.c b/drivers/video/screen_info_pci.c
index 6c5833517141..66bfc1d0a6dc 100644
--- a/drivers/video/screen_info_pci.c
+++ b/drivers/video/screen_info_pci.c
@@ -7,8 +7,8 @@
static struct pci_dev *screen_info_lfb_pdev;
static size_t screen_info_lfb_bar;
-static resource_size_t screen_info_lfb_offset;
-static struct resource screen_info_lfb_res = DEFINE_RES_MEM(0, 0);
+static resource_size_t screen_info_lfb_res_start; // original start of resource
+static resource_size_t screen_info_lfb_offset; // framebuffer offset within resource
static bool __screen_info_relocation_is_valid(const struct screen_info *si, struct resource *pr)
{
@@ -31,7 +31,7 @@ void screen_info_apply_fixups(void)
if (screen_info_lfb_pdev) {
struct resource *pr = &screen_info_lfb_pdev->resource[screen_info_lfb_bar];
- if (pr->start != screen_info_lfb_res.start) {
+ if (pr->start != screen_info_lfb_res_start) {
if (__screen_info_relocation_is_valid(si, pr)) {
/*
* Only update base if we have an actual
@@ -47,46 +47,67 @@ void screen_info_apply_fixups(void)
}
}
+static int __screen_info_lfb_pci_bus_region(const struct screen_info *si, unsigned int type,
+ struct pci_bus_region *r)
+{
+ u64 base, size;
+
+ base = __screen_info_lfb_base(si);
+ if (!base)
+ return -EINVAL;
+
+ size = __screen_info_lfb_size(si, type);
+ if (!size)
+ return -EINVAL;
+
+ r->start = base;
+ r->end = base + size - 1;
+
+ return 0;
+}
+
static void screen_info_fixup_lfb(struct pci_dev *pdev)
{
unsigned int type;
- struct resource res[SCREEN_INFO_MAX_RESOURCES];
- size_t i, numres;
+ struct pci_bus_region bus_region;
int ret;
+ struct resource r = {
+ .flags = IORESOURCE_MEM,
+ };
+ const struct resource *pr;
const struct screen_info *si = &screen_info;
if (screen_info_lfb_pdev)
return; // already found
type = screen_info_video_type(si);
- if (type != VIDEO_TYPE_EFI)
- return; // only applies to EFI
+ if (!__screen_info_has_lfb(type))
+ return; // only applies to EFI; maybe VESA
- ret = screen_info_resources(si, res, ARRAY_SIZE(res));
+ ret = __screen_info_lfb_pci_bus_region(si, type, &bus_region);
if (ret < 0)
return;
- numres = ret;
- for (i = 0; i < numres; ++i) {
- struct resource *r = &res[i];
- const struct resource *pr;
-
- if (!(r->flags & IORESOURCE_MEM))
- continue;
- pr = pci_find_resource(pdev, r);
- if (!pr)
- continue;
-
- /*
- * We've found a PCI device with the framebuffer
- * resource. Store away the parameters to track
- * relocation of the framebuffer aperture.
- */
- screen_info_lfb_pdev = pdev;
- screen_info_lfb_bar = pr - pdev->resource;
- screen_info_lfb_offset = r->start - pr->start;
- memcpy(&screen_info_lfb_res, r, sizeof(screen_info_lfb_res));
- }
+ /*
+ * Translate the PCI bus address to resource. Account
+ * for an offset if the framebuffer is behind a PCI host
+ * bridge.
+ */
+ pcibios_bus_to_resource(pdev->bus, &r, &bus_region);
+
+ pr = pci_find_resource(pdev, &r);
+ if (!pr)
+ return;
+
+ /*
+ * We've found a PCI device with the framebuffer
+ * resource. Store away the parameters to track
+ * relocation of the framebuffer aperture.
+ */
+ screen_info_lfb_pdev = pdev;
+ screen_info_lfb_bar = pr - pdev->resource;
+ screen_info_lfb_offset = r.start - pr->start;
+ screen_info_lfb_res_start = bus_region.start;
}
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY, 16,
screen_info_fixup_lfb);
diff --git a/drivers/virt/acrn/irqfd.c b/drivers/virt/acrn/irqfd.c
index b7da24ca1475..64d32c8fbf79 100644
--- a/drivers/virt/acrn/irqfd.c
+++ b/drivers/virt/acrn/irqfd.c
@@ -16,8 +16,6 @@
#include "acrn_drv.h"
-static LIST_HEAD(acrn_irqfd_clients);
-
/**
* struct hsm_irqfd - Properties of HSM irqfd
* @vm: Associated VM pointer
diff --git a/drivers/virt/coco/Kconfig b/drivers/virt/coco/Kconfig
index ff869d883d95..819a97e8ba99 100644
--- a/drivers/virt/coco/Kconfig
+++ b/drivers/virt/coco/Kconfig
@@ -3,10 +3,6 @@
# Confidential computing related collateral
#
-config TSM_REPORTS
- select CONFIGFS_FS
- tristate
-
source "drivers/virt/coco/efi_secret/Kconfig"
source "drivers/virt/coco/pkvm-guest/Kconfig"
@@ -16,3 +12,5 @@ source "drivers/virt/coco/sev-guest/Kconfig"
source "drivers/virt/coco/tdx-guest/Kconfig"
source "drivers/virt/coco/arm-cca-guest/Kconfig"
+
+source "drivers/virt/coco/guest/Kconfig"
diff --git a/drivers/virt/coco/Makefile b/drivers/virt/coco/Makefile
index c3d07cfc087e..f918bbb61737 100644
--- a/drivers/virt/coco/Makefile
+++ b/drivers/virt/coco/Makefile
@@ -2,9 +2,9 @@
#
# Confidential computing related collateral
#
-obj-$(CONFIG_TSM_REPORTS) += tsm.o
obj-$(CONFIG_EFI_SECRET) += efi_secret/
obj-$(CONFIG_ARM_PKVM_GUEST) += pkvm-guest/
obj-$(CONFIG_SEV_GUEST) += sev-guest/
obj-$(CONFIG_INTEL_TDX_GUEST) += tdx-guest/
obj-$(CONFIG_ARM_CCA_GUEST) += arm-cca-guest/
+obj-$(CONFIG_TSM_GUEST) += guest/
diff --git a/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c b/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
index 87f162736b2e..0c9ea24a200c 100644
--- a/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
+++ b/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
@@ -96,7 +96,7 @@ static int arm_cca_report_new(struct tsm_report *report, void *data)
struct arm_cca_token_info info;
void *buf;
u8 *token __free(kvfree) = NULL;
- struct tsm_desc *desc = &report->desc;
+ struct tsm_report_desc *desc = &report->desc;
if (desc->inblob_len < 32 || desc->inblob_len > 64)
return -EINVAL;
@@ -181,7 +181,7 @@ exit_free_granule_page:
return ret;
}
-static const struct tsm_ops arm_cca_tsm_ops = {
+static const struct tsm_report_ops arm_cca_tsm_ops = {
.name = KBUILD_MODNAME,
.report_new = arm_cca_report_new,
};
@@ -202,7 +202,7 @@ static int __init arm_cca_guest_init(void)
if (!is_realm_world())
return -ENODEV;
- ret = tsm_register(&arm_cca_tsm_ops, NULL);
+ ret = tsm_report_register(&arm_cca_tsm_ops, NULL);
if (ret < 0)
pr_err("Error %d registering with TSM\n", ret);
@@ -216,7 +216,7 @@ module_init(arm_cca_guest_init);
*/
static void __exit arm_cca_guest_exit(void)
{
- tsm_unregister(&arm_cca_tsm_ops);
+ tsm_report_unregister(&arm_cca_tsm_ops);
}
module_exit(arm_cca_guest_exit);
diff --git a/drivers/virt/coco/guest/Kconfig b/drivers/virt/coco/guest/Kconfig
new file mode 100644
index 000000000000..3d5e1d05bf34
--- /dev/null
+++ b/drivers/virt/coco/guest/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Confidential computing shared guest collateral
+#
+config TSM_GUEST
+ bool
+
+config TSM_REPORTS
+ select TSM_GUEST
+ select CONFIGFS_FS
+ tristate
+
+config TSM_MEASUREMENTS
+ select TSM_GUEST
+ select CRYPTO_HASH_INFO
+ select CRYPTO
+ bool
diff --git a/drivers/virt/coco/guest/Makefile b/drivers/virt/coco/guest/Makefile
new file mode 100644
index 000000000000..9ec4860bd213
--- /dev/null
+++ b/drivers/virt/coco/guest/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_TSM_REPORTS) += tsm_report.o
+tsm_report-y := report.o
+obj-$(CONFIG_TSM_MEASUREMENTS) += tsm-mr.o
diff --git a/drivers/virt/coco/tsm.c b/drivers/virt/coco/guest/report.c
index 9432d4e303f1..d3d18fc22bc2 100644
--- a/drivers/virt/coco/tsm.c
+++ b/drivers/virt/coco/guest/report.c
@@ -13,8 +13,9 @@
#include <linux/configfs.h>
static struct tsm_provider {
- const struct tsm_ops *ops;
+ const struct tsm_report_ops *ops;
void *data;
+ atomic_t count;
} provider;
static DECLARE_RWSEM(tsm_rwsem);
@@ -92,16 +93,19 @@ static ssize_t tsm_report_privlevel_store(struct config_item *cfg,
if (rc)
return rc;
+ guard(rwsem_write)(&tsm_rwsem);
+ if (!provider.ops)
+ return -ENXIO;
+
/*
* The valid privilege levels that a TSM might accept, if it accepts a
* privilege level setting at all, are a max of TSM_PRIVLEVEL_MAX (see
* SEV-SNP GHCB) and a minimum of a TSM selected floor value no less
* than 0.
*/
- if (provider.ops->privlevel_floor > val || val > TSM_PRIVLEVEL_MAX)
+ if (provider.ops->privlevel_floor > val || val > TSM_REPORT_PRIVLEVEL_MAX)
return -EINVAL;
- guard(rwsem_write)(&tsm_rwsem);
rc = try_advance_write_generation(report);
if (rc)
return rc;
@@ -115,6 +119,10 @@ static ssize_t tsm_report_privlevel_floor_show(struct config_item *cfg,
char *buf)
{
guard(rwsem_read)(&tsm_rwsem);
+
+ if (!provider.ops)
+ return -ENXIO;
+
return sysfs_emit(buf, "%u\n", provider.ops->privlevel_floor);
}
CONFIGFS_ATTR_RO(tsm_report_, privlevel_floor);
@@ -202,7 +210,7 @@ static ssize_t tsm_report_inblob_write(struct config_item *cfg,
memcpy(report->desc.inblob, buf, count);
return count;
}
-CONFIGFS_BIN_ATTR_WO(tsm_report_, inblob, NULL, TSM_INBLOB_MAX);
+CONFIGFS_BIN_ATTR_WO(tsm_report_, inblob, NULL, TSM_REPORT_INBLOB_MAX);
static ssize_t tsm_report_generation_show(struct config_item *cfg, char *buf)
{
@@ -217,6 +225,9 @@ CONFIGFS_ATTR_RO(tsm_report_, generation);
static ssize_t tsm_report_provider_show(struct config_item *cfg, char *buf)
{
guard(rwsem_read)(&tsm_rwsem);
+ if (!provider.ops)
+ return -ENXIO;
+
return sysfs_emit(buf, "%s\n", provider.ops->name);
}
CONFIGFS_ATTR_RO(tsm_report_, provider);
@@ -272,7 +283,7 @@ static ssize_t tsm_report_read(struct tsm_report *report, void *buf,
size_t count, enum tsm_data_select select)
{
struct tsm_report_state *state = to_state(report);
- const struct tsm_ops *ops;
+ const struct tsm_report_ops *ops;
ssize_t rc;
/* try to read from the existing report if present and valid... */
@@ -284,7 +295,7 @@ static ssize_t tsm_report_read(struct tsm_report *report, void *buf,
guard(rwsem_write)(&tsm_rwsem);
ops = provider.ops;
if (!ops)
- return -ENOTTY;
+ return -ENXIO;
if (!report->desc.inblob_len)
return -EINVAL;
@@ -314,7 +325,7 @@ static ssize_t tsm_report_outblob_read(struct config_item *cfg, void *buf,
return tsm_report_read(report, buf, count, TSM_REPORT);
}
-CONFIGFS_BIN_ATTR_RO(tsm_report_, outblob, NULL, TSM_OUTBLOB_MAX);
+CONFIGFS_BIN_ATTR_RO(tsm_report_, outblob, NULL, TSM_REPORT_OUTBLOB_MAX);
static ssize_t tsm_report_auxblob_read(struct config_item *cfg, void *buf,
size_t count)
@@ -323,7 +334,7 @@ static ssize_t tsm_report_auxblob_read(struct config_item *cfg, void *buf,
return tsm_report_read(report, buf, count, TSM_CERTS);
}
-CONFIGFS_BIN_ATTR_RO(tsm_report_, auxblob, NULL, TSM_OUTBLOB_MAX);
+CONFIGFS_BIN_ATTR_RO(tsm_report_, auxblob, NULL, TSM_REPORT_OUTBLOB_MAX);
static ssize_t tsm_report_manifestblob_read(struct config_item *cfg, void *buf,
size_t count)
@@ -332,7 +343,7 @@ static ssize_t tsm_report_manifestblob_read(struct config_item *cfg, void *buf,
return tsm_report_read(report, buf, count, TSM_MANIFEST);
}
-CONFIGFS_BIN_ATTR_RO(tsm_report_, manifestblob, NULL, TSM_OUTBLOB_MAX);
+CONFIGFS_BIN_ATTR_RO(tsm_report_, manifestblob, NULL, TSM_REPORT_OUTBLOB_MAX);
static struct configfs_attribute *tsm_report_attrs[] = {
[TSM_REPORT_GENERATION] = &tsm_report_attr_generation,
@@ -421,12 +432,20 @@ static struct config_item *tsm_report_make_item(struct config_group *group,
if (!state)
return ERR_PTR(-ENOMEM);
+ atomic_inc(&provider.count);
config_item_init_type_name(&state->cfg, name, &tsm_report_type);
return &state->cfg;
}
+static void tsm_report_drop_item(struct config_group *group, struct config_item *item)
+{
+ config_item_put(item);
+ atomic_dec(&provider.count);
+}
+
static struct configfs_group_operations tsm_report_group_ops = {
.make_item = tsm_report_make_item,
+ .drop_item = tsm_report_drop_item,
};
static const struct config_item_type tsm_reports_type = {
@@ -448,9 +467,9 @@ static struct configfs_subsystem tsm_configfs = {
.su_mutex = __MUTEX_INITIALIZER(tsm_configfs.su_mutex),
};
-int tsm_register(const struct tsm_ops *ops, void *priv)
+int tsm_report_register(const struct tsm_report_ops *ops, void *priv)
{
- const struct tsm_ops *conflict;
+ const struct tsm_report_ops *conflict;
guard(rwsem_write)(&tsm_rwsem);
conflict = provider.ops;
@@ -459,26 +478,34 @@ int tsm_register(const struct tsm_ops *ops, void *priv)
return -EBUSY;
}
+ if (atomic_read(&provider.count)) {
+ pr_err("configfs/tsm/report not empty\n");
+ return -EBUSY;
+ }
+
provider.ops = ops;
provider.data = priv;
return 0;
}
-EXPORT_SYMBOL_GPL(tsm_register);
+EXPORT_SYMBOL_GPL(tsm_report_register);
-int tsm_unregister(const struct tsm_ops *ops)
+int tsm_report_unregister(const struct tsm_report_ops *ops)
{
guard(rwsem_write)(&tsm_rwsem);
if (ops != provider.ops)
return -EBUSY;
+ if (atomic_read(&provider.count))
+ pr_warn("\"%s\" unregistered with items present in configfs/tsm/report\n",
+ provider.ops->name);
provider.ops = NULL;
provider.data = NULL;
return 0;
}
-EXPORT_SYMBOL_GPL(tsm_unregister);
+EXPORT_SYMBOL_GPL(tsm_report_unregister);
static struct config_group *tsm_report_group;
-static int __init tsm_init(void)
+static int __init tsm_report_init(void)
{
struct config_group *root = &tsm_configfs.su_group;
struct config_group *tsm;
@@ -499,14 +526,14 @@ static int __init tsm_init(void)
return 0;
}
-module_init(tsm_init);
+module_init(tsm_report_init);
-static void __exit tsm_exit(void)
+static void __exit tsm_report_exit(void)
{
configfs_unregister_default_group(tsm_report_group);
configfs_unregister_subsystem(&tsm_configfs);
}
-module_exit(tsm_exit);
+module_exit(tsm_report_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Provide Trusted Security Module attestation reports via configfs");
diff --git a/drivers/virt/coco/guest/tsm-mr.c b/drivers/virt/coco/guest/tsm-mr.c
new file mode 100644
index 000000000000..feb30af90a20
--- /dev/null
+++ b/drivers/virt/coco/guest/tsm-mr.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/tsm_mr.h>
+
+/*
+ * struct tm_context - contains everything necessary to implement sysfs
+ * attributes for MRs.
+ * @rwsem: protects the MR cache from concurrent access.
+ * @agrp: contains all MR attributes created by tsm_mr_create_attribute_group().
+ * @tm: input to tsm_mr_create_attribute_group() containing MR definitions/ops.
+ * @in_sync: %true if MR cache is up-to-date.
+ * @mrs: array of &struct bin_attribute, one for each MR.
+ *
+ * This internal structure contains everything needed to implement
+ * tm_digest_read() and tm_digest_write().
+ *
+ * Given tm->refresh() is potentially expensive, tm_digest_read() caches MR
+ * values and calls tm->refresh() only when necessary. Only live MRs (i.e., with
+ * %TSM_MR_F_LIVE set) can trigger tm->refresh(), while others are assumed to
+ * retain their values from the last tm->write(). @in_sync tracks if there have
+ * been tm->write() calls since the last tm->refresh(). That is, tm->refresh()
+ * will be called only when a live MR is being read and the cache is stale
+ * (@in_sync is %false).
+ *
+ * tm_digest_write() sets @in_sync to %false and calls tm->write(), whose
+ * semantics is arch and MR specific. Most (if not all) writable MRs support the
+ * extension semantics (i.e., tm->write() extends the input buffer into the MR).
+ */
+struct tm_context {
+ struct rw_semaphore rwsem;
+ struct attribute_group agrp;
+ const struct tsm_measurements *tm;
+ bool in_sync;
+ struct bin_attribute mrs[];
+};
+
+static ssize_t tm_digest_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr, char *buffer,
+ loff_t off, size_t count)
+{
+ struct tm_context *ctx;
+ const struct tsm_measurement_register *mr;
+ int rc;
+
+ ctx = attr->private;
+ rc = down_read_interruptible(&ctx->rwsem);
+ if (rc)
+ return rc;
+
+ mr = &ctx->tm->mrs[attr - ctx->mrs];
+
+ /*
+ * @ctx->in_sync indicates if the MR cache is stale. It is a global
+ * instead of a per-MR flag for simplicity, as most (if not all) archs
+ * allow reading all MRs in oneshot.
+ *
+ * ctx->refresh() is necessary only for LIVE MRs, while others retain
+ * their values from their respective last ctx->write().
+ */
+ if ((mr->mr_flags & TSM_MR_F_LIVE) && !ctx->in_sync) {
+ up_read(&ctx->rwsem);
+
+ rc = down_write_killable(&ctx->rwsem);
+ if (rc)
+ return rc;
+
+ if (!ctx->in_sync) {
+ rc = ctx->tm->refresh(ctx->tm);
+ ctx->in_sync = !rc;
+ trace_tsm_mr_refresh(mr, rc);
+ }
+
+ downgrade_write(&ctx->rwsem);
+ }
+
+ memcpy(buffer, mr->mr_value + off, count);
+ trace_tsm_mr_read(mr);
+
+ up_read(&ctx->rwsem);
+ return rc ?: count;
+}
+
+static ssize_t tm_digest_write(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr, char *buffer,
+ loff_t off, size_t count)
+{
+ struct tm_context *ctx;
+ const struct tsm_measurement_register *mr;
+ ssize_t rc;
+
+ /* partial writes are not supported */
+ if (off != 0 || count != attr->size)
+ return -EINVAL;
+
+ ctx = attr->private;
+ mr = &ctx->tm->mrs[attr - ctx->mrs];
+
+ rc = down_write_killable(&ctx->rwsem);
+ if (rc)
+ return rc;
+
+ rc = ctx->tm->write(ctx->tm, mr, buffer);
+
+ /* mark MR cache stale */
+ if (!rc) {
+ ctx->in_sync = false;
+ trace_tsm_mr_write(mr, buffer);
+ }
+
+ up_write(&ctx->rwsem);
+ return rc ?: count;
+}
+
+/**
+ * tsm_mr_create_attribute_group() - creates an attribute group for measurement
+ * registers (MRs)
+ * @tm: pointer to &struct tsm_measurements containing the MR definitions.
+ *
+ * This function creates attributes corresponding to the MR definitions
+ * provided by @tm->mrs.
+ *
+ * The created attributes will reference @tm and its members. The caller must
+ * not free @tm until after tsm_mr_free_attribute_group() is called.
+ *
+ * Context: Process context. May sleep due to memory allocation.
+ *
+ * Return:
+ * * On success, the pointer to a an attribute group is returned; otherwise
+ * * %-EINVAL - Invalid MR definitions.
+ * * %-ENOMEM - Out of memory.
+ */
+const struct attribute_group *
+tsm_mr_create_attribute_group(const struct tsm_measurements *tm)
+{
+ size_t nlen;
+
+ if (!tm || !tm->mrs)
+ return ERR_PTR(-EINVAL);
+
+ /* aggregated length of all MR names */
+ nlen = 0;
+ for (size_t i = 0; i < tm->nr_mrs; ++i) {
+ if ((tm->mrs[i].mr_flags & TSM_MR_F_LIVE) && !tm->refresh)
+ return ERR_PTR(-EINVAL);
+
+ if ((tm->mrs[i].mr_flags & TSM_MR_F_WRITABLE) && !tm->write)
+ return ERR_PTR(-EINVAL);
+
+ if (!tm->mrs[i].mr_name)
+ return ERR_PTR(-EINVAL);
+
+ if (tm->mrs[i].mr_flags & TSM_MR_F_NOHASH)
+ continue;
+
+ if (tm->mrs[i].mr_hash >= HASH_ALGO__LAST)
+ return ERR_PTR(-EINVAL);
+
+ /* MR sysfs attribute names have the form of MRNAME:HASH */
+ nlen += strlen(tm->mrs[i].mr_name) + 1 +
+ strlen(hash_algo_name[tm->mrs[i].mr_hash]) + 1;
+ }
+
+ /*
+ * @attrs and the MR name strings are combined into a single allocation
+ * so that we don't have to free MR names one-by-one in
+ * tsm_mr_free_attribute_group()
+ */
+ const struct bin_attribute **attrs __free(kfree) =
+ kzalloc(sizeof(*attrs) * (tm->nr_mrs + 1) + nlen, GFP_KERNEL);
+ struct tm_context *ctx __free(kfree) =
+ kzalloc(struct_size(ctx, mrs, tm->nr_mrs), GFP_KERNEL);
+ char *name, *end;
+
+ if (!ctx || !attrs)
+ return ERR_PTR(-ENOMEM);
+
+ /* @attrs is followed immediately by MR name strings */
+ name = (char *)&attrs[tm->nr_mrs + 1];
+ end = name + nlen;
+
+ for (size_t i = 0; i < tm->nr_mrs; ++i) {
+ struct bin_attribute *bap = &ctx->mrs[i];
+
+ sysfs_bin_attr_init(bap);
+
+ if (tm->mrs[i].mr_flags & TSM_MR_F_NOHASH)
+ bap->attr.name = tm->mrs[i].mr_name;
+ else if (name < end) {
+ bap->attr.name = name;
+ name += snprintf(name, end - name, "%s:%s",
+ tm->mrs[i].mr_name,
+ hash_algo_name[tm->mrs[i].mr_hash]);
+ ++name;
+ } else
+ return ERR_PTR(-EINVAL);
+
+ /* check for duplicated MR definitions */
+ for (size_t j = 0; j < i; ++j)
+ if (!strcmp(bap->attr.name, attrs[j]->attr.name))
+ return ERR_PTR(-EINVAL);
+
+ if (tm->mrs[i].mr_flags & TSM_MR_F_READABLE) {
+ bap->attr.mode |= 0444;
+ bap->read_new = tm_digest_read;
+ }
+
+ if (tm->mrs[i].mr_flags & TSM_MR_F_WRITABLE) {
+ bap->attr.mode |= 0200;
+ bap->write_new = tm_digest_write;
+ }
+
+ bap->size = tm->mrs[i].mr_size;
+ bap->private = ctx;
+
+ attrs[i] = bap;
+ }
+
+ if (name != end)
+ return ERR_PTR(-EINVAL);
+
+ init_rwsem(&ctx->rwsem);
+ ctx->agrp.name = "measurements";
+ ctx->agrp.bin_attrs_new = no_free_ptr(attrs);
+ ctx->tm = tm;
+ return &no_free_ptr(ctx)->agrp;
+}
+EXPORT_SYMBOL_GPL(tsm_mr_create_attribute_group);
+
+/**
+ * tsm_mr_free_attribute_group() - frees the attribute group returned by
+ * tsm_mr_create_attribute_group()
+ * @attr_grp: attribute group returned by tsm_mr_create_attribute_group()
+ *
+ * Context: Process context.
+ */
+void tsm_mr_free_attribute_group(const struct attribute_group *attr_grp)
+{
+ if (!IS_ERR_OR_NULL(attr_grp)) {
+ kfree(attr_grp->bin_attrs_new);
+ kfree(container_of(attr_grp, struct tm_context, agrp));
+ }
+}
+EXPORT_SYMBOL_GPL(tsm_mr_free_attribute_group);
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index cf3fb61f4d5b..7a4e2188f109 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -346,7 +346,7 @@ struct snp_msg_cert_entry {
static int sev_svsm_report_new(struct tsm_report *report, void *data)
{
unsigned int rep_len, man_len, certs_len;
- struct tsm_desc *desc = &report->desc;
+ struct tsm_report_desc *desc = &report->desc;
struct svsm_attest_call ac = {};
unsigned int retry_count;
void *rep, *man, *certs;
@@ -481,7 +481,7 @@ retry:
static int sev_report_new(struct tsm_report *report, void *data)
{
struct snp_msg_cert_entry *cert_table;
- struct tsm_desc *desc = &report->desc;
+ struct tsm_report_desc *desc = &report->desc;
struct snp_guest_dev *snp_dev = data;
struct snp_msg_report_resp_hdr hdr;
const u32 report_size = SZ_4K;
@@ -610,7 +610,7 @@ static bool sev_report_bin_attr_visible(int n)
return false;
}
-static struct tsm_ops sev_tsm_ops = {
+static struct tsm_report_ops sev_tsm_report_ops = {
.name = KBUILD_MODNAME,
.report_new = sev_report_new,
.report_attr_visible = sev_report_attr_visible,
@@ -619,7 +619,7 @@ static struct tsm_ops sev_tsm_ops = {
static void unregister_sev_tsm(void *data)
{
- tsm_unregister(&sev_tsm_ops);
+ tsm_report_unregister(&sev_tsm_report_ops);
}
static int __init sev_guest_probe(struct platform_device *pdev)
@@ -656,9 +656,9 @@ static int __init sev_guest_probe(struct platform_device *pdev)
misc->fops = &snp_guest_fops;
/* Set the privlevel_floor attribute based on the vmpck_id */
- sev_tsm_ops.privlevel_floor = mdesc->vmpck_id;
+ sev_tsm_report_ops.privlevel_floor = mdesc->vmpck_id;
- ret = tsm_register(&sev_tsm_ops, snp_dev);
+ ret = tsm_report_register(&sev_tsm_report_ops, snp_dev);
if (ret)
goto e_msg_init;
diff --git a/drivers/virt/coco/tdx-guest/Kconfig b/drivers/virt/coco/tdx-guest/Kconfig
index 22dd59e19431..dbbdc14383b1 100644
--- a/drivers/virt/coco/tdx-guest/Kconfig
+++ b/drivers/virt/coco/tdx-guest/Kconfig
@@ -2,6 +2,7 @@ config TDX_GUEST_DRIVER
tristate "TDX Guest driver"
depends on INTEL_TDX_GUEST
select TSM_REPORTS
+ select TSM_MEASUREMENTS
help
The driver provides userspace interface to communicate with
the TDX module to request the TDX guest details like attestation
diff --git a/drivers/virt/coco/tdx-guest/tdx-guest.c b/drivers/virt/coco/tdx-guest/tdx-guest.c
index 224e7dde9cde..4e239ec960c9 100644
--- a/drivers/virt/coco/tdx-guest/tdx-guest.c
+++ b/drivers/virt/coco/tdx-guest/tdx-guest.c
@@ -5,6 +5,8 @@
* Copyright (C) 2022 Intel Corporation
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
@@ -15,14 +17,146 @@
#include <linux/set_memory.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/sockptr.h>
#include <linux/tsm.h>
-#include <linux/sizes.h>
+#include <linux/tsm-mr.h>
#include <uapi/linux/tdx-guest.h>
#include <asm/cpu_device_id.h>
#include <asm/tdx.h>
+/* TDREPORT buffer */
+static u8 *tdx_report_buf;
+
+/* Lock to serialize TDG.MR.REPORT and TDG.MR.RTMR.EXTEND TDCALLs */
+static DEFINE_MUTEX(mr_lock);
+
+/* TDREPORT fields */
+enum {
+ TDREPORT_reportdata = 128,
+ TDREPORT_tee_tcb_info = 256,
+ TDREPORT_tdinfo = TDREPORT_tee_tcb_info + 256,
+ TDREPORT_attributes = TDREPORT_tdinfo,
+ TDREPORT_xfam = TDREPORT_attributes + sizeof(u64),
+ TDREPORT_mrtd = TDREPORT_xfam + sizeof(u64),
+ TDREPORT_mrconfigid = TDREPORT_mrtd + SHA384_DIGEST_SIZE,
+ TDREPORT_mrowner = TDREPORT_mrconfigid + SHA384_DIGEST_SIZE,
+ TDREPORT_mrownerconfig = TDREPORT_mrowner + SHA384_DIGEST_SIZE,
+ TDREPORT_rtmr0 = TDREPORT_mrownerconfig + SHA384_DIGEST_SIZE,
+ TDREPORT_rtmr1 = TDREPORT_rtmr0 + SHA384_DIGEST_SIZE,
+ TDREPORT_rtmr2 = TDREPORT_rtmr1 + SHA384_DIGEST_SIZE,
+ TDREPORT_rtmr3 = TDREPORT_rtmr2 + SHA384_DIGEST_SIZE,
+ TDREPORT_servtd_hash = TDREPORT_rtmr3 + SHA384_DIGEST_SIZE,
+};
+
+static int tdx_do_report(sockptr_t data, sockptr_t tdreport)
+{
+ scoped_cond_guard(mutex_intr, return -EINTR, &mr_lock) {
+ u8 *reportdata = tdx_report_buf + TDREPORT_reportdata;
+ int ret;
+
+ if (!sockptr_is_null(data) &&
+ copy_from_sockptr(reportdata, data, TDX_REPORTDATA_LEN))
+ return -EFAULT;
+
+ ret = tdx_mcall_get_report0(reportdata, tdx_report_buf);
+ if (WARN_ONCE(ret, "tdx_mcall_get_report0() failed: %d", ret))
+ return ret;
+
+ if (!sockptr_is_null(tdreport) &&
+ copy_to_sockptr(tdreport, tdx_report_buf, TDX_REPORT_LEN))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int tdx_do_extend(u8 mr_ind, const u8 *data)
+{
+ scoped_cond_guard(mutex_intr, return -EINTR, &mr_lock) {
+ /*
+ * TDX requires @extend_buf to be 64-byte aligned.
+ * It's safe to use REPORTDATA buffer for that purpose because
+ * tdx_mr_report/extend_lock() are mutually exclusive.
+ */
+ u8 *extend_buf = tdx_report_buf + TDREPORT_reportdata;
+ int ret;
+
+ memcpy(extend_buf, data, SHA384_DIGEST_SIZE);
+
+ ret = tdx_mcall_extend_rtmr(mr_ind, extend_buf);
+ if (WARN_ONCE(ret, "tdx_mcall_extend_rtmr(%u) failed: %d", mr_ind, ret))
+ return ret;
+ }
+ return 0;
+}
+
+#define TDX_MR_(r) .mr_value = (void *)TDREPORT_##r, TSM_MR_(r, SHA384)
+static struct tsm_measurement_register tdx_mrs[] = {
+ { TDX_MR_(rtmr0) | TSM_MR_F_RTMR },
+ { TDX_MR_(rtmr1) | TSM_MR_F_RTMR },
+ { TDX_MR_(rtmr2) | TSM_MR_F_RTMR },
+ { TDX_MR_(rtmr3) | TSM_MR_F_RTMR },
+ { TDX_MR_(mrtd) },
+ { TDX_MR_(mrconfigid) | TSM_MR_F_NOHASH },
+ { TDX_MR_(mrowner) | TSM_MR_F_NOHASH },
+ { TDX_MR_(mrownerconfig) | TSM_MR_F_NOHASH },
+};
+#undef TDX_MR_
+
+static int tdx_mr_refresh(const struct tsm_measurements *tm)
+{
+ return tdx_do_report(KERNEL_SOCKPTR(NULL), KERNEL_SOCKPTR(NULL));
+}
+
+static int tdx_mr_extend(const struct tsm_measurements *tm,
+ const struct tsm_measurement_register *mr,
+ const u8 *data)
+{
+ return tdx_do_extend(mr - tm->mrs, data);
+}
+
+static struct tsm_measurements tdx_measurements = {
+ .mrs = tdx_mrs,
+ .nr_mrs = ARRAY_SIZE(tdx_mrs),
+ .refresh = tdx_mr_refresh,
+ .write = tdx_mr_extend,
+};
+
+static const struct attribute_group *tdx_mr_init(void)
+{
+ const struct attribute_group *g;
+ int rc;
+
+ u8 *buf __free(kfree) = kzalloc(TDX_REPORT_LEN, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ tdx_report_buf = buf;
+ rc = tdx_mr_refresh(&tdx_measurements);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /*
+ * @mr_value was initialized with the offset only, while the base
+ * address is being added here.
+ */
+ for (size_t i = 0; i < ARRAY_SIZE(tdx_mrs); ++i)
+ *(long *)&tdx_mrs[i].mr_value += (long)buf;
+
+ g = tsm_mr_create_attribute_group(&tdx_measurements);
+ if (!IS_ERR(g))
+ tdx_report_buf = no_free_ptr(buf);
+
+ return g;
+}
+
+static void tdx_mr_deinit(const struct attribute_group *mr_grp)
+{
+ tsm_mr_free_attribute_group(mr_grp);
+ kfree(tdx_report_buf);
+}
+
/*
* Intel's SGX QE implementation generally uses Quote size less
* than 8K (2K Quote data + ~5K of certificate blob).
@@ -68,37 +202,8 @@ static u32 getquote_timeout = 30;
static long tdx_get_report0(struct tdx_report_req __user *req)
{
- u8 *reportdata, *tdreport;
- long ret;
-
- reportdata = kmalloc(TDX_REPORTDATA_LEN, GFP_KERNEL);
- if (!reportdata)
- return -ENOMEM;
-
- tdreport = kzalloc(TDX_REPORT_LEN, GFP_KERNEL);
- if (!tdreport) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (copy_from_user(reportdata, req->reportdata, TDX_REPORTDATA_LEN)) {
- ret = -EFAULT;
- goto out;
- }
-
- /* Generate TDREPORT0 using "TDG.MR.REPORT" TDCALL */
- ret = tdx_mcall_get_report0(reportdata, tdreport);
- if (ret)
- goto out;
-
- if (copy_to_user(req->tdreport, tdreport, TDX_REPORT_LEN))
- ret = -EFAULT;
-
-out:
- kfree(reportdata);
- kfree(tdreport);
-
- return ret;
+ return tdx_do_report(USER_SOCKPTR(req->reportdata),
+ USER_SOCKPTR(req->tdreport));
}
static void free_quote_buf(void *buf)
@@ -157,53 +262,24 @@ static int wait_for_quote_completion(struct tdx_quote_buf *quote_buf, u32 timeou
return (i == timeout) ? -ETIMEDOUT : 0;
}
-static int tdx_report_new(struct tsm_report *report, void *data)
+static int tdx_report_new_locked(struct tsm_report *report, void *data)
{
- u8 *buf, *reportdata = NULL, *tdreport = NULL;
+ u8 *buf;
struct tdx_quote_buf *quote_buf = quote_data;
- struct tsm_desc *desc = &report->desc;
+ struct tsm_report_desc *desc = &report->desc;
int ret;
u64 err;
- /* TODO: switch to guard(mutex_intr) */
- if (mutex_lock_interruptible(&quote_lock))
- return -EINTR;
-
/*
* If the previous request is timedout or interrupted, and the
* Quote buf status is still in GET_QUOTE_IN_FLIGHT (owned by
* VMM), don't permit any new request.
*/
- if (quote_buf->status == GET_QUOTE_IN_FLIGHT) {
- ret = -EBUSY;
- goto done;
- }
-
- if (desc->inblob_len != TDX_REPORTDATA_LEN) {
- ret = -EINVAL;
- goto done;
- }
-
- reportdata = kmalloc(TDX_REPORTDATA_LEN, GFP_KERNEL);
- if (!reportdata) {
- ret = -ENOMEM;
- goto done;
- }
+ if (quote_buf->status == GET_QUOTE_IN_FLIGHT)
+ return -EBUSY;
- tdreport = kzalloc(TDX_REPORT_LEN, GFP_KERNEL);
- if (!tdreport) {
- ret = -ENOMEM;
- goto done;
- }
-
- memcpy(reportdata, desc->inblob, desc->inblob_len);
-
- /* Generate TDREPORT0 using "TDG.MR.REPORT" TDCALL */
- ret = tdx_mcall_get_report0(reportdata, tdreport);
- if (ret) {
- pr_err("GetReport call failed\n");
- goto done;
- }
+ if (desc->inblob_len != TDX_REPORTDATA_LEN)
+ return -EINVAL;
memset(quote_data, 0, GET_QUOTE_BUF_SIZE);
@@ -211,26 +287,26 @@ static int tdx_report_new(struct tsm_report *report, void *data)
quote_buf->version = GET_QUOTE_CMD_VER;
quote_buf->in_len = TDX_REPORT_LEN;
- memcpy(quote_buf->data, tdreport, TDX_REPORT_LEN);
+ ret = tdx_do_report(KERNEL_SOCKPTR(desc->inblob),
+ KERNEL_SOCKPTR(quote_buf->data));
+ if (ret)
+ return ret;
err = tdx_hcall_get_quote(quote_data, GET_QUOTE_BUF_SIZE);
if (err) {
pr_err("GetQuote hypercall failed, status:%llx\n", err);
- ret = -EIO;
- goto done;
+ return -EIO;
}
ret = wait_for_quote_completion(quote_buf, getquote_timeout);
if (ret) {
pr_err("GetQuote request timedout\n");
- goto done;
+ return ret;
}
buf = kvmemdup(quote_buf->data, quote_buf->out_len, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- goto done;
- }
+ if (!buf)
+ return -ENOMEM;
report->outblob = buf;
report->outblob_len = quote_buf->out_len;
@@ -239,14 +315,16 @@ static int tdx_report_new(struct tsm_report *report, void *data)
* TODO: parse the PEM-formatted cert chain out of the quote buffer when
* provided
*/
-done:
- mutex_unlock(&quote_lock);
- kfree(reportdata);
- kfree(tdreport);
return ret;
}
+static int tdx_report_new(struct tsm_report *report, void *data)
+{
+ scoped_cond_guard(mutex_intr, return -EINTR, &quote_lock)
+ return tdx_report_new_locked(report, data);
+}
+
static bool tdx_report_attr_visible(int n)
{
switch (n) {
@@ -285,10 +363,16 @@ static const struct file_operations tdx_guest_fops = {
.unlocked_ioctl = tdx_guest_ioctl,
};
+static const struct attribute_group *tdx_attr_groups[] = {
+ NULL, /* measurements */
+ NULL
+};
+
static struct miscdevice tdx_misc_dev = {
.name = KBUILD_MODNAME,
.minor = MISC_DYNAMIC_MINOR,
.fops = &tdx_guest_fops,
+ .groups = tdx_attr_groups,
};
static const struct x86_cpu_id tdx_guest_ids[] = {
@@ -297,7 +381,7 @@ static const struct x86_cpu_id tdx_guest_ids[] = {
};
MODULE_DEVICE_TABLE(x86cpu, tdx_guest_ids);
-static const struct tsm_ops tdx_tsm_ops = {
+static const struct tsm_report_ops tdx_tsm_ops = {
.name = KBUILD_MODNAME,
.report_new = tdx_report_new,
.report_attr_visible = tdx_report_attr_visible,
@@ -311,9 +395,13 @@ static int __init tdx_guest_init(void)
if (!x86_match_cpu(tdx_guest_ids))
return -ENODEV;
+ tdx_attr_groups[0] = tdx_mr_init();
+ if (IS_ERR(tdx_attr_groups[0]))
+ return PTR_ERR(tdx_attr_groups[0]);
+
ret = misc_register(&tdx_misc_dev);
if (ret)
- return ret;
+ goto deinit_mr;
quote_data = alloc_quote_buf();
if (!quote_data) {
@@ -322,7 +410,7 @@ static int __init tdx_guest_init(void)
goto free_misc;
}
- ret = tsm_register(&tdx_tsm_ops, NULL);
+ ret = tsm_report_register(&tdx_tsm_ops, NULL);
if (ret)
goto free_quote;
@@ -332,6 +420,8 @@ free_quote:
free_quote_buf(quote_data);
free_misc:
misc_deregister(&tdx_misc_dev);
+deinit_mr:
+ tdx_mr_deinit(tdx_attr_groups[0]);
return ret;
}
@@ -339,9 +429,10 @@ module_init(tdx_guest_init);
static void __exit tdx_guest_exit(void)
{
- tsm_unregister(&tdx_tsm_ops);
+ tsm_report_unregister(&tdx_tsm_ops);
free_quote_buf(quote_data);
misc_deregister(&tdx_misc_dev);
+ tdx_mr_deinit(tdx_attr_groups[0]);
}
module_exit(tdx_guest_exit);
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index f1674f3ed923..b177a534b6a4 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -419,7 +419,7 @@ static void vbg_balloon_work(struct work_struct *work)
*/
static void vbg_heartbeat_timer(struct timer_list *t)
{
- struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer);
+ struct vbg_dev *gdev = timer_container_of(gdev, t, heartbeat_timer);
vbg_req_perform(gdev, gdev->guest_heartbeat_req);
mod_timer(&gdev->heartbeat_timer,
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 2eb747311bfd..6db5235a7693 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -188,4 +188,68 @@ config VIRTIO_DEBUG
If unsure, say N.
+config VIRTIO_RTC
+ tristate "Virtio RTC driver"
+ depends on VIRTIO
+ depends on PTP_1588_CLOCK_OPTIONAL
+ help
+ This driver provides current time from a Virtio RTC device. The driver
+ provides the time through one or more clocks. The Virtio RTC PTP
+ clocks and/or the Real Time Clock driver for Virtio RTC must be
+ enabled to expose the clocks to userspace.
+
+ To compile this code as a module, choose M here: the module will be
+ called virtio_rtc.
+
+ If unsure, say M.
+
+if VIRTIO_RTC
+
+comment "WARNING: Consider enabling VIRTIO_RTC_PTP and/or VIRTIO_RTC_CLASS."
+ depends on !VIRTIO_RTC_PTP && !VIRTIO_RTC_CLASS
+
+comment "Enable PTP_1588_CLOCK in order to enable VIRTIO_RTC_PTP."
+ depends on PTP_1588_CLOCK=n
+
+config VIRTIO_RTC_PTP
+ bool "Virtio RTC PTP clocks"
+ default y
+ depends on PTP_1588_CLOCK
+ help
+ This exposes any Virtio RTC clocks as PTP Hardware Clocks (PHCs) to
+ userspace. The PHC sysfs attribute "clock_name" describes the clock
+ type.
+
+ If unsure, say Y.
+
+config VIRTIO_RTC_ARM
+ bool "Virtio RTC cross-timestamping using Arm Generic Timer"
+ default y
+ depends on VIRTIO_RTC_PTP && ARM_ARCH_TIMER
+ help
+ This enables Virtio RTC cross-timestamping using the Arm Generic Timer.
+ It only has an effect if the Virtio RTC device also supports this. The
+ cross-timestamp is available through the PTP clock driver precise
+ cross-timestamp ioctl (PTP_SYS_OFFSET_PRECISE2 aka
+ PTP_SYS_OFFSET_PRECISE).
+
+ If unsure, say Y.
+
+comment "Enable RTC_CLASS in order to enable VIRTIO_RTC_CLASS."
+ depends on RTC_CLASS=n
+
+config VIRTIO_RTC_CLASS
+ bool "Real Time Clock driver for Virtio RTC"
+ default y
+ depends on RTC_CLASS
+ help
+ This exposes the Virtio RTC UTC-like clock as a Linux Real Time Clock.
+ It only has an effect if the Virtio RTC device has a UTC-like clock
+ which smears leap seconds to avoid steps. The Real Time Clock is
+ read-only, and may support setting an alarm.
+
+ If unsure, say Y.
+
+endif # VIRTIO_RTC
+
endif # VIRTIO_MENU
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 58b2b0489fc9..eefcfe90d6b8 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -14,3 +14,8 @@ obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o
obj-$(CONFIG_VIRTIO_MEM) += virtio_mem.o
obj-$(CONFIG_VIRTIO_DMA_SHARED_BUFFER) += virtio_dma_buf.o
obj-$(CONFIG_VIRTIO_DEBUG) += virtio_debug.o
+obj-$(CONFIG_VIRTIO_RTC) += virtio_rtc.o
+virtio_rtc-y := virtio_rtc_driver.o
+virtio_rtc-$(CONFIG_VIRTIO_RTC_PTP) += virtio_rtc_ptp.o
+virtio_rtc-$(CONFIG_VIRTIO_RTC_ARM) += virtio_rtc_arm.o
+virtio_rtc-$(CONFIG_VIRTIO_RTC_CLASS) += virtio_rtc_class.o
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index d50fe030d825..7182f43ed055 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -48,6 +48,7 @@ void vp_modern_avq_done(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
+ unsigned int status_size = sizeof(struct virtio_admin_cmd_status);
struct virtio_admin_cmd *cmd;
unsigned long flags;
unsigned int len;
@@ -56,7 +57,17 @@ void vp_modern_avq_done(struct virtqueue *vq)
do {
virtqueue_disable_cb(vq);
while ((cmd = virtqueue_get_buf(vq, &len))) {
- cmd->result_sg_size = len;
+ /* If the number of bytes written by the device is less
+ * than the size of struct virtio_admin_cmd_status, the
+ * remaining status bytes will remain zero-initialized,
+ * since the buffer was zeroed during allocation.
+ * In this case, set the size of command_specific_result
+ * to 0.
+ */
+ if (len < status_size)
+ cmd->result_sg_size = 0;
+ else
+ cmd->result_sg_size = len - status_size;
complete(&cmd->completion);
}
} while (!virtqueue_enable_cb(vq));
diff --git a/drivers/virtio/virtio_rtc_arm.c b/drivers/virtio/virtio_rtc_arm.c
new file mode 100644
index 000000000000..211299d72870
--- /dev/null
+++ b/drivers/virtio/virtio_rtc_arm.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Provides cross-timestamp params for Arm.
+ *
+ * Copyright (C) 2022-2023 OpenSynergy GmbH
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clocksource_ids.h>
+
+#include <uapi/linux/virtio_rtc.h>
+
+#include "virtio_rtc_internal.h"
+
+/* see header for doc */
+
+int viortc_hw_xtstamp_params(u8 *hw_counter, enum clocksource_ids *cs_id)
+{
+ *hw_counter = VIRTIO_RTC_COUNTER_ARM_VCT;
+ *cs_id = CSID_ARM_ARCH_COUNTER;
+
+ return 0;
+}
diff --git a/drivers/virtio/virtio_rtc_class.c b/drivers/virtio/virtio_rtc_class.c
new file mode 100644
index 000000000000..05d6d28255cf
--- /dev/null
+++ b/drivers/virtio/virtio_rtc_class.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * virtio_rtc RTC class driver
+ *
+ * Copyright (C) 2023 OpenSynergy GmbH
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/math64.h>
+#include <linux/overflow.h>
+#include <linux/rtc.h>
+#include <linux/time64.h>
+
+#include <uapi/linux/virtio_rtc.h>
+
+#include "virtio_rtc_internal.h"
+
+/**
+ * struct viortc_class - RTC class wrapper
+ * @viortc: virtio_rtc device data
+ * @rtc: RTC device
+ * @vio_clk_id: virtio_rtc clock id
+ * @stopped: Whether RTC ops are disallowed. Access protected by rtc_lock().
+ */
+struct viortc_class {
+ struct viortc_dev *viortc;
+ struct rtc_device *rtc;
+ u16 vio_clk_id;
+ bool stopped;
+};
+
+/**
+ * viortc_class_get_locked() - get RTC class wrapper, if ops allowed
+ * @dev: virtio device
+ *
+ * Gets the RTC class wrapper from the virtio device, if it is available and
+ * ops are allowed.
+ *
+ * Context: Caller must hold rtc_lock().
+ * Return: RTC class wrapper if available and ops allowed, ERR_PTR otherwise.
+ */
+static struct viortc_class *viortc_class_get_locked(struct device *dev)
+{
+ struct viortc_class *viortc_class;
+
+ viortc_class = viortc_class_from_dev(dev);
+ if (IS_ERR(viortc_class))
+ return viortc_class;
+
+ if (viortc_class->stopped)
+ return ERR_PTR(-EBUSY);
+
+ return viortc_class;
+}
+
+/**
+ * viortc_class_read_time() - RTC class op read_time
+ * @dev: virtio device
+ * @tm: read time
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_class_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct viortc_class *viortc_class;
+ time64_t sec;
+ int ret;
+ u64 ns;
+
+ viortc_class = viortc_class_get_locked(dev);
+ if (IS_ERR(viortc_class))
+ return PTR_ERR(viortc_class);
+
+ ret = viortc_read(viortc_class->viortc, viortc_class->vio_clk_id, &ns);
+ if (ret)
+ return ret;
+
+ sec = div_u64(ns, NSEC_PER_SEC);
+
+ rtc_time64_to_tm(sec, tm);
+
+ return 0;
+}
+
+/**
+ * viortc_class_read_alarm() - RTC class op read_alarm
+ * @dev: virtio device
+ * @alrm: alarm read out
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_class_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct viortc_class *viortc_class;
+ time64_t alarm_time_sec;
+ u64 alarm_time_ns;
+ bool enabled;
+ int ret;
+
+ viortc_class = viortc_class_get_locked(dev);
+ if (IS_ERR(viortc_class))
+ return PTR_ERR(viortc_class);
+
+ ret = viortc_read_alarm(viortc_class->viortc, viortc_class->vio_clk_id,
+ &alarm_time_ns, &enabled);
+ if (ret)
+ return ret;
+
+ alarm_time_sec = div_u64(alarm_time_ns, NSEC_PER_SEC);
+ rtc_time64_to_tm(alarm_time_sec, &alrm->time);
+
+ alrm->enabled = enabled;
+
+ return 0;
+}
+
+/**
+ * viortc_class_set_alarm() - RTC class op set_alarm
+ * @dev: virtio device
+ * @alrm: alarm to set
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_class_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct viortc_class *viortc_class;
+ time64_t alarm_time_sec;
+ u64 alarm_time_ns;
+
+ viortc_class = viortc_class_get_locked(dev);
+ if (IS_ERR(viortc_class))
+ return PTR_ERR(viortc_class);
+
+ alarm_time_sec = rtc_tm_to_time64(&alrm->time);
+
+ if (alarm_time_sec < 0)
+ return -EINVAL;
+
+ if (check_mul_overflow((u64)alarm_time_sec, (u64)NSEC_PER_SEC,
+ &alarm_time_ns))
+ return -EINVAL;
+
+ return viortc_set_alarm(viortc_class->viortc, viortc_class->vio_clk_id,
+ alarm_time_ns, alrm->enabled);
+}
+
+/**
+ * viortc_class_alarm_irq_enable() - RTC class op alarm_irq_enable
+ * @dev: virtio device
+ * @enabled: enable or disable alarm IRQ
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_class_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct viortc_class *viortc_class;
+
+ viortc_class = viortc_class_get_locked(dev);
+ if (IS_ERR(viortc_class))
+ return PTR_ERR(viortc_class);
+
+ return viortc_set_alarm_enabled(viortc_class->viortc,
+ viortc_class->vio_clk_id, enabled);
+}
+
+static const struct rtc_class_ops viortc_class_ops = {
+ .read_time = viortc_class_read_time,
+ .read_alarm = viortc_class_read_alarm,
+ .set_alarm = viortc_class_set_alarm,
+ .alarm_irq_enable = viortc_class_alarm_irq_enable,
+};
+
+/**
+ * viortc_class_alarm() - propagate alarm notification as alarm interrupt
+ * @viortc_class: RTC class wrapper
+ * @vio_clk_id: virtio_rtc clock id
+ *
+ * Context: Any context.
+ */
+void viortc_class_alarm(struct viortc_class *viortc_class, u16 vio_clk_id)
+{
+ if (vio_clk_id != viortc_class->vio_clk_id) {
+ dev_warn_ratelimited(&viortc_class->rtc->dev,
+ "ignoring alarm for clock id %d, expected id %d\n",
+ vio_clk_id, viortc_class->vio_clk_id);
+ return;
+ }
+
+ rtc_update_irq(viortc_class->rtc, 1, RTC_AF | RTC_IRQF);
+}
+
+/**
+ * viortc_class_stop() - disallow RTC class ops
+ * @viortc_class: RTC class wrapper
+ *
+ * Context: Process context. Caller must NOT hold rtc_lock().
+ */
+void viortc_class_stop(struct viortc_class *viortc_class)
+{
+ rtc_lock(viortc_class->rtc);
+
+ viortc_class->stopped = true;
+
+ rtc_unlock(viortc_class->rtc);
+}
+
+/**
+ * viortc_class_register() - register RTC class device
+ * @viortc_class: RTC class wrapper
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_class_register(struct viortc_class *viortc_class)
+{
+ return devm_rtc_register_device(viortc_class->rtc);
+}
+
+/**
+ * viortc_class_init() - init RTC class wrapper and device
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @have_alarm: have alarm feature
+ * @parent_dev: virtio device
+ *
+ * Context: Process context.
+ * Return: RTC class wrapper on success, ERR_PTR otherwise.
+ */
+struct viortc_class *viortc_class_init(struct viortc_dev *viortc,
+ u16 vio_clk_id, bool have_alarm,
+ struct device *parent_dev)
+{
+ struct viortc_class *viortc_class;
+ struct rtc_device *rtc;
+
+ viortc_class =
+ devm_kzalloc(parent_dev, sizeof(*viortc_class), GFP_KERNEL);
+ if (!viortc_class)
+ return ERR_PTR(-ENOMEM);
+
+ rtc = devm_rtc_allocate_device(parent_dev);
+ if (IS_ERR(rtc))
+ return ERR_CAST(rtc);
+
+ viortc_class->viortc = viortc;
+ viortc_class->rtc = rtc;
+ viortc_class->vio_clk_id = vio_clk_id;
+
+ if (!have_alarm)
+ clear_bit(RTC_FEATURE_ALARM, rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
+
+ rtc->ops = &viortc_class_ops;
+ rtc->range_max = div_u64(U64_MAX, NSEC_PER_SEC);
+
+ return viortc_class;
+}
diff --git a/drivers/virtio/virtio_rtc_driver.c b/drivers/virtio/virtio_rtc_driver.c
new file mode 100644
index 000000000000..a57d5e06e19d
--- /dev/null
+++ b/drivers/virtio/virtio_rtc_driver.c
@@ -0,0 +1,1407 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * virtio_rtc driver core
+ *
+ * Copyright (C) 2022-2024 OpenSynergy GmbH
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+
+#include <uapi/linux/virtio_rtc.h>
+
+#include "virtio_rtc_internal.h"
+
+#define VIORTC_ALARMQ_BUF_CAP sizeof(union virtio_rtc_notif_alarmq)
+
+/* virtqueue order */
+enum {
+ VIORTC_REQUESTQ,
+ VIORTC_ALARMQ,
+ VIORTC_MAX_NR_QUEUES,
+};
+
+/**
+ * struct viortc_vq - virtqueue abstraction
+ * @vq: virtqueue
+ * @lock: protects access to vq
+ */
+struct viortc_vq {
+ struct virtqueue *vq;
+ spinlock_t lock;
+};
+
+/**
+ * struct viortc_dev - virtio_rtc device data
+ * @vdev: virtio device
+ * @viortc_class: RTC class wrapper for UTC-like clock, NULL if not available
+ * @vqs: virtqueues
+ * @clocks_to_unregister: Clock references, which are only used during device
+ * removal.
+ * For other uses, there would be a race between device
+ * creation and setting the pointers here.
+ * @alarmq_bufs: alarmq buffers list
+ * @num_alarmq_bufs: # of alarmq buffers
+ * @num_clocks: # of virtio_rtc clocks
+ */
+struct viortc_dev {
+ struct virtio_device *vdev;
+ struct viortc_class *viortc_class;
+ struct viortc_vq vqs[VIORTC_MAX_NR_QUEUES];
+ struct viortc_ptp_clock **clocks_to_unregister;
+ void **alarmq_bufs;
+ unsigned int num_alarmq_bufs;
+ u16 num_clocks;
+};
+
+/**
+ * struct viortc_msg - Message requested by driver, responded by device.
+ * @viortc: device data
+ * @req: request buffer
+ * @resp: response buffer
+ * @responded: vqueue callback signals response reception
+ * @refcnt: Message reference count, message and buffers will be deallocated
+ * once 0. refcnt is decremented in the vqueue callback and in the
+ * thread waiting on the responded completion.
+ * If a message response wait function times out, the message will be
+ * freed upon late reception (refcnt will reach 0 in the callback), or
+ * device removal.
+ * @req_size: size of request in bytes
+ * @resp_cap: maximum size of response in bytes
+ * @resp_actual_size: actual size of response
+ */
+struct viortc_msg {
+ struct viortc_dev *viortc;
+ void *req;
+ void *resp;
+ struct completion responded;
+ refcount_t refcnt;
+ unsigned int req_size;
+ unsigned int resp_cap;
+ unsigned int resp_actual_size;
+};
+
+/**
+ * viortc_class_from_dev() - Get RTC class object from virtio device.
+ * @dev: virtio device
+ *
+ * Context: Any context.
+ * Return: RTC class object if available, ERR_PTR otherwise.
+ */
+struct viortc_class *viortc_class_from_dev(struct device *dev)
+{
+ struct virtio_device *vdev;
+ struct viortc_dev *viortc;
+
+ vdev = container_of(dev, typeof(*vdev), dev);
+ viortc = vdev->priv;
+
+ return viortc->viortc_class ?: ERR_PTR(-ENODEV);
+}
+
+/**
+ * viortc_alarms_supported() - Whether device and driver support alarms.
+ * @vdev: virtio device
+ *
+ * NB: Device and driver may not support alarms for the same clocks.
+ *
+ * Context: Any context.
+ * Return: True if both device and driver can support alarms.
+ */
+static bool viortc_alarms_supported(struct virtio_device *vdev)
+{
+ return IS_ENABLED(CONFIG_VIRTIO_RTC_CLASS) &&
+ virtio_has_feature(vdev, VIRTIO_RTC_F_ALARM);
+}
+
+/**
+ * viortc_feed_vq() - Make a device write-only buffer available.
+ * @viortc: device data
+ * @vq: notification virtqueue
+ * @buf: buffer
+ * @buf_len: buffer capacity in bytes
+ * @data: token, identifying buffer
+ *
+ * Context: Caller must prevent concurrent access to vq.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_feed_vq(struct viortc_dev *viortc, struct virtqueue *vq,
+ void *buf, unsigned int buf_len, void *data)
+{
+ struct scatterlist sg;
+
+ sg_init_one(&sg, buf, buf_len);
+
+ return virtqueue_add_inbuf(vq, &sg, 1, data, GFP_ATOMIC);
+}
+
+/**
+ * viortc_msg_init() - Allocate and initialize requestq message.
+ * @viortc: device data
+ * @msg_type: virtio_rtc message type
+ * @req_size: size of request buffer to be allocated
+ * @resp_cap: size of response buffer to be allocated
+ *
+ * Initializes the message refcnt to 2. The refcnt will be decremented once in
+ * the virtqueue callback, and once in the thread waiting on the message (on
+ * completion or timeout).
+ *
+ * Context: Process context.
+ * Return: non-NULL on success.
+ */
+static struct viortc_msg *viortc_msg_init(struct viortc_dev *viortc,
+ u16 msg_type, unsigned int req_size,
+ unsigned int resp_cap)
+{
+ struct device *dev = &viortc->vdev->dev;
+ struct virtio_rtc_req_head *req_head;
+ struct viortc_msg *msg;
+
+ msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return NULL;
+
+ init_completion(&msg->responded);
+
+ msg->req = devm_kzalloc(dev, req_size, GFP_KERNEL);
+ if (!msg->req)
+ goto err_free_msg;
+
+ req_head = msg->req;
+
+ msg->resp = devm_kzalloc(dev, resp_cap, GFP_KERNEL);
+ if (!msg->resp)
+ goto err_free_msg_req;
+
+ msg->viortc = viortc;
+ msg->req_size = req_size;
+ msg->resp_cap = resp_cap;
+
+ refcount_set(&msg->refcnt, 2);
+
+ req_head->msg_type = virtio_cpu_to_le(msg_type, req_head->msg_type);
+
+ return msg;
+
+err_free_msg_req:
+ devm_kfree(dev, msg->req);
+
+err_free_msg:
+ devm_kfree(dev, msg);
+
+ return NULL;
+}
+
+/**
+ * viortc_msg_release() - Decrement message refcnt, potentially free message.
+ * @msg: message requested by driver
+ *
+ * Context: Any context.
+ */
+static void viortc_msg_release(struct viortc_msg *msg)
+{
+ struct device *dev;
+
+ if (refcount_dec_and_test(&msg->refcnt)) {
+ dev = &msg->viortc->vdev->dev;
+
+ devm_kfree(dev, msg->req);
+ devm_kfree(dev, msg->resp);
+ devm_kfree(dev, msg);
+ }
+}
+
+/**
+ * viortc_do_cb() - generic virtqueue callback logic
+ * @vq: virtqueue
+ * @handle_buf: function to process a used buffer
+ *
+ * Context: virtqueue callback, typically interrupt. Takes and releases vq lock.
+ */
+static void viortc_do_cb(struct virtqueue *vq,
+ void (*handle_buf)(void *token, unsigned int len,
+ struct virtqueue *vq,
+ struct viortc_vq *viortc_vq,
+ struct viortc_dev *viortc))
+{
+ struct viortc_dev *viortc = vq->vdev->priv;
+ struct viortc_vq *viortc_vq;
+ bool cb_enabled = true;
+ unsigned long flags;
+ unsigned int len;
+ void *token;
+
+ viortc_vq = &viortc->vqs[vq->index];
+
+ for (;;) {
+ spin_lock_irqsave(&viortc_vq->lock, flags);
+
+ if (cb_enabled) {
+ virtqueue_disable_cb(vq);
+ cb_enabled = false;
+ }
+
+ token = virtqueue_get_buf(vq, &len);
+ if (!token) {
+ if (virtqueue_enable_cb(vq)) {
+ spin_unlock_irqrestore(&viortc_vq->lock, flags);
+ return;
+ }
+ cb_enabled = true;
+ }
+
+ spin_unlock_irqrestore(&viortc_vq->lock, flags);
+
+ if (token)
+ handle_buf(token, len, vq, viortc_vq, viortc);
+ }
+}
+
+/**
+ * viortc_requestq_hdlr() - process a requestq used buffer
+ * @token: token identifying the buffer
+ * @len: bytes written by device
+ * @vq: virtqueue
+ * @viortc_vq: device specific data for virtqueue
+ * @viortc: device data
+ *
+ * Signals completion for each received message.
+ *
+ * Context: virtqueue callback
+ */
+static void viortc_requestq_hdlr(void *token, unsigned int len,
+ struct virtqueue *vq,
+ struct viortc_vq *viortc_vq,
+ struct viortc_dev *viortc)
+{
+ struct viortc_msg *msg = token;
+
+ msg->resp_actual_size = len;
+
+ complete(&msg->responded);
+ viortc_msg_release(msg);
+}
+
+/**
+ * viortc_cb_requestq() - callback for requestq
+ * @vq: virtqueue
+ *
+ * Context: virtqueue callback
+ */
+static void viortc_cb_requestq(struct virtqueue *vq)
+{
+ viortc_do_cb(vq, viortc_requestq_hdlr);
+}
+
+/**
+ * viortc_alarmq_hdlr() - process an alarmq used buffer
+ * @token: token identifying the buffer
+ * @len: bytes written by device
+ * @vq: virtqueue
+ * @viortc_vq: device specific data for virtqueue
+ * @viortc: device data
+ *
+ * Processes a VIRTIO_RTC_NOTIF_ALARM notification by calling the RTC class
+ * driver. Makes the buffer available again.
+ *
+ * Context: virtqueue callback
+ */
+static void viortc_alarmq_hdlr(void *token, unsigned int len,
+ struct virtqueue *vq,
+ struct viortc_vq *viortc_vq,
+ struct viortc_dev *viortc)
+{
+ struct virtio_rtc_notif_alarm *notif = token;
+ struct virtio_rtc_notif_head *head = token;
+ unsigned long flags;
+ u16 clock_id;
+ bool notify;
+
+ if (len < sizeof(*head)) {
+ dev_err_ratelimited(&viortc->vdev->dev,
+ "%s: ignoring notification with short header\n",
+ __func__);
+ goto feed_vq;
+ }
+
+ if (virtio_le_to_cpu(head->msg_type) != VIRTIO_RTC_NOTIF_ALARM) {
+ dev_err_ratelimited(&viortc->vdev->dev,
+ "%s: ignoring unknown notification type 0x%x\n",
+ __func__, virtio_le_to_cpu(head->msg_type));
+ goto feed_vq;
+ }
+
+ if (len < sizeof(*notif)) {
+ dev_err_ratelimited(&viortc->vdev->dev,
+ "%s: ignoring too small alarm notification\n",
+ __func__);
+ goto feed_vq;
+ }
+
+ clock_id = virtio_le_to_cpu(notif->clock_id);
+
+ if (!viortc->viortc_class)
+ dev_warn_ratelimited(&viortc->vdev->dev,
+ "ignoring alarm, no RTC class device available\n");
+ else
+ viortc_class_alarm(viortc->viortc_class, clock_id);
+
+feed_vq:
+ spin_lock_irqsave(&viortc_vq->lock, flags);
+
+ if (viortc_feed_vq(viortc, vq, notif, VIORTC_ALARMQ_BUF_CAP, token))
+ dev_warn(&viortc->vdev->dev,
+ "%s: failed to re-expose input buffer\n", __func__);
+
+ notify = virtqueue_kick_prepare(vq);
+
+ spin_unlock_irqrestore(&viortc_vq->lock, flags);
+
+ if (notify)
+ virtqueue_notify(vq);
+}
+
+/**
+ * viortc_cb_alarmq() - callback for alarmq
+ * @vq: virtqueue
+ *
+ * Context: virtqueue callback
+ */
+static void viortc_cb_alarmq(struct virtqueue *vq)
+{
+ viortc_do_cb(vq, viortc_alarmq_hdlr);
+}
+
+/**
+ * viortc_get_resp_errno() - converts virtio_rtc errnos to system errnos
+ * @resp_head: message response header
+ *
+ * Return: negative system errno, or 0
+ */
+static int viortc_get_resp_errno(struct virtio_rtc_resp_head *resp_head)
+{
+ switch (virtio_le_to_cpu(resp_head->status)) {
+ case VIRTIO_RTC_S_OK:
+ return 0;
+ case VIRTIO_RTC_S_EOPNOTSUPP:
+ return -EOPNOTSUPP;
+ case VIRTIO_RTC_S_EINVAL:
+ return -EINVAL;
+ case VIRTIO_RTC_S_ENODEV:
+ return -ENODEV;
+ case VIRTIO_RTC_S_EIO:
+ default:
+ return -EIO;
+ }
+}
+
+/**
+ * viortc_msg_xfer() - send message request, wait until message response
+ * @vq: virtqueue
+ * @msg: message with driver request
+ * @timeout_jiffies: message response timeout, 0 for no timeout
+ *
+ * Context: Process context. Takes and releases vq.lock. May sleep.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_msg_xfer(struct viortc_vq *vq, struct viortc_msg *msg,
+ unsigned long timeout_jiffies)
+{
+ struct scatterlist out_sg[1];
+ struct scatterlist in_sg[1];
+ struct scatterlist *sgs[2];
+ unsigned long flags;
+ long timeout_ret;
+ bool notify;
+ int ret;
+
+ sgs[0] = out_sg;
+ sgs[1] = in_sg;
+
+ sg_init_one(out_sg, msg->req, msg->req_size);
+ sg_init_one(in_sg, msg->resp, msg->resp_cap);
+
+ spin_lock_irqsave(&vq->lock, flags);
+
+ ret = virtqueue_add_sgs(vq->vq, sgs, 1, 1, msg, GFP_ATOMIC);
+ if (ret) {
+ spin_unlock_irqrestore(&vq->lock, flags);
+ /*
+ * Release in place of the response callback, which will never
+ * come.
+ */
+ viortc_msg_release(msg);
+ return ret;
+ }
+
+ notify = virtqueue_kick_prepare(vq->vq);
+
+ spin_unlock_irqrestore(&vq->lock, flags);
+
+ if (notify)
+ virtqueue_notify(vq->vq);
+
+ if (timeout_jiffies) {
+ timeout_ret = wait_for_completion_interruptible_timeout(
+ &msg->responded, timeout_jiffies);
+
+ if (!timeout_ret)
+ return -ETIMEDOUT;
+ else if (timeout_ret < 0)
+ return (int)timeout_ret;
+ } else {
+ ret = wait_for_completion_interruptible(&msg->responded);
+ if (ret)
+ return ret;
+ }
+
+ if (msg->resp_actual_size < sizeof(struct virtio_rtc_resp_head))
+ return -EINVAL;
+
+ ret = viortc_get_resp_errno(msg->resp);
+ if (ret)
+ return ret;
+
+ /*
+ * There is not yet a case where returning a short message would make
+ * sense, so consider any deviation an error.
+ */
+ if (msg->resp_actual_size != msg->resp_cap)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * common message handle macros for messages of different types
+ */
+
+/**
+ * VIORTC_DECLARE_MSG_HDL_ONSTACK() - declare message handle on stack
+ * @hdl: message handle name
+ * @msg_id: message type id
+ * @msg_req: message request type
+ * @msg_resp: message response type
+ */
+#define VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, msg_id, msg_req, msg_resp) \
+ struct { \
+ struct viortc_msg *msg; \
+ msg_req *req; \
+ msg_resp *resp; \
+ unsigned int req_size; \
+ unsigned int resp_cap; \
+ u16 msg_type; \
+ } hdl = { \
+ NULL, NULL, NULL, sizeof(msg_req), sizeof(msg_resp), (msg_id), \
+ }
+
+/**
+ * VIORTC_MSG() - extract message from message handle
+ * @hdl: message handle
+ *
+ * Return: struct viortc_msg
+ */
+#define VIORTC_MSG(hdl) ((hdl).msg)
+
+/**
+ * VIORTC_MSG_INIT() - initialize message handle
+ * @hdl: message handle
+ * @viortc: device data (struct viortc_dev *)
+ *
+ * Context: Process context.
+ * Return: 0 on success, -ENOMEM otherwise.
+ */
+#define VIORTC_MSG_INIT(hdl, viortc) \
+ ({ \
+ typeof(hdl) *_hdl = &(hdl); \
+ \
+ _hdl->msg = viortc_msg_init((viortc), _hdl->msg_type, \
+ _hdl->req_size, _hdl->resp_cap); \
+ if (_hdl->msg) { \
+ _hdl->req = _hdl->msg->req; \
+ _hdl->resp = _hdl->msg->resp; \
+ } \
+ _hdl->msg ? 0 : -ENOMEM; \
+ })
+
+/**
+ * VIORTC_MSG_WRITE() - write a request message field
+ * @hdl: message handle
+ * @dest_member: request message field name
+ * @src_ptr: pointer to data of compatible type
+ *
+ * Writes the field in little-endian format.
+ */
+#define VIORTC_MSG_WRITE(hdl, dest_member, src_ptr) \
+ do { \
+ typeof(hdl) _hdl = (hdl); \
+ typeof(src_ptr) _src_ptr = (src_ptr); \
+ \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_le_to_cpu(_hdl.req->dest_member)), \
+ *_src_ptr); \
+ \
+ _hdl.req->dest_member = \
+ virtio_cpu_to_le(*_src_ptr, _hdl.req->dest_member); \
+ } while (0)
+
+/**
+ * VIORTC_MSG_READ() - read from a response message field
+ * @hdl: message handle
+ * @src_member: response message field name
+ * @dest_ptr: pointer to data of compatible type
+ *
+ * Converts from little-endian format and writes to dest_ptr.
+ */
+#define VIORTC_MSG_READ(hdl, src_member, dest_ptr) \
+ do { \
+ typeof(dest_ptr) _dest_ptr = (dest_ptr); \
+ \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_le_to_cpu((hdl).resp->src_member)), \
+ *_dest_ptr); \
+ \
+ *_dest_ptr = virtio_le_to_cpu((hdl).resp->src_member); \
+ } while (0)
+
+/*
+ * read requests
+ */
+
+/** timeout for clock readings, where timeouts are considered non-fatal */
+#define VIORTC_MSG_READ_TIMEOUT secs_to_jiffies(60)
+
+/**
+ * viortc_read() - VIRTIO_RTC_REQ_READ wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @reading: clock reading [ns]
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_read(struct viortc_dev *viortc, u16 vio_clk_id, u64 *reading)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_READ,
+ struct virtio_rtc_req_read,
+ struct virtio_rtc_resp_read);
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ VIORTC_MSG_READ_TIMEOUT);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, clock_reading, reading);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_read_cross() - VIRTIO_RTC_REQ_READ_CROSS wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @hw_counter: virtio_rtc HW counter type
+ * @reading: clock reading [ns]
+ * @cycles: HW counter cycles during clock reading
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_read_cross(struct viortc_dev *viortc, u16 vio_clk_id, u8 hw_counter,
+ u64 *reading, u64 *cycles)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_READ_CROSS,
+ struct virtio_rtc_req_read_cross,
+ struct virtio_rtc_resp_read_cross);
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+ VIORTC_MSG_WRITE(hdl, hw_counter, &hw_counter);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ VIORTC_MSG_READ_TIMEOUT);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, clock_reading, reading);
+ VIORTC_MSG_READ(hdl, counter_cycles, cycles);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/*
+ * control requests
+ */
+
+/**
+ * viortc_cfg() - VIRTIO_RTC_REQ_CFG wrapper
+ * @viortc: device data
+ * @num_clocks: # of virtio_rtc clocks
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_cfg(struct viortc_dev *viortc, u16 *num_clocks)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_CFG,
+ struct virtio_rtc_req_cfg,
+ struct virtio_rtc_resp_cfg);
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, num_clocks, num_clocks);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_clock_cap() - VIRTIO_RTC_REQ_CLOCK_CAP wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @type: virtio_rtc clock type
+ * @leap_second_smearing: virtio_rtc smearing variant
+ * @flags: struct virtio_rtc_resp_clock_cap.flags
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_clock_cap(struct viortc_dev *viortc, u16 vio_clk_id, u8 *type,
+ u8 *leap_second_smearing, u8 *flags)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_CLOCK_CAP,
+ struct virtio_rtc_req_clock_cap,
+ struct virtio_rtc_resp_clock_cap);
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, type, type);
+ VIORTC_MSG_READ(hdl, leap_second_smearing, leap_second_smearing);
+ VIORTC_MSG_READ(hdl, flags, flags);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_cross_cap() - VIRTIO_RTC_REQ_CROSS_CAP wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @hw_counter: virtio_rtc HW counter type
+ * @supported: xtstamping is supported for the vio_clk_id/hw_counter pair
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_cross_cap(struct viortc_dev *viortc, u16 vio_clk_id, u8 hw_counter,
+ bool *supported)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_CROSS_CAP,
+ struct virtio_rtc_req_cross_cap,
+ struct virtio_rtc_resp_cross_cap);
+ u8 flags;
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+ VIORTC_MSG_WRITE(hdl, hw_counter, &hw_counter);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, flags, &flags);
+ *supported = !!(flags & VIRTIO_RTC_FLAG_CROSS_CAP);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_read_alarm() - VIRTIO_RTC_REQ_READ_ALARM wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @alarm_time: alarm time in ns
+ * @enabled: whether alarm is enabled
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_read_alarm(struct viortc_dev *viortc, u16 vio_clk_id,
+ u64 *alarm_time, bool *enabled)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_READ_ALARM,
+ struct virtio_rtc_req_read_alarm,
+ struct virtio_rtc_resp_read_alarm);
+ u8 flags;
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+ VIORTC_MSG_READ(hdl, alarm_time, alarm_time);
+ VIORTC_MSG_READ(hdl, flags, &flags);
+
+ *enabled = !!(flags & VIRTIO_RTC_FLAG_ALARM_ENABLED);
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_set_alarm() - VIRTIO_RTC_REQ_SET_ALARM wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @alarm_time: alarm time in ns
+ * @alarm_enable: enable or disable alarm
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_set_alarm(struct viortc_dev *viortc, u16 vio_clk_id, u64 alarm_time,
+ bool alarm_enable)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_SET_ALARM,
+ struct virtio_rtc_req_set_alarm,
+ struct virtio_rtc_resp_set_alarm);
+ u8 flags = 0;
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ if (alarm_enable)
+ flags |= VIRTIO_RTC_FLAG_ALARM_ENABLED;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+ VIORTC_MSG_WRITE(hdl, alarm_time, &alarm_time);
+ VIORTC_MSG_WRITE(hdl, flags, &flags);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/**
+ * viortc_set_alarm_enabled() - VIRTIO_RTC_REQ_SET_ALARM_ENABLED wrapper
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @alarm_enable: enable or disable alarm
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_set_alarm_enabled(struct viortc_dev *viortc, u16 vio_clk_id,
+ bool alarm_enable)
+{
+ VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_SET_ALARM_ENABLED,
+ struct virtio_rtc_req_set_alarm_enabled,
+ struct virtio_rtc_resp_set_alarm_enabled);
+ u8 flags = 0;
+ int ret;
+
+ ret = VIORTC_MSG_INIT(hdl, viortc);
+ if (ret)
+ return ret;
+
+ if (alarm_enable)
+ flags |= VIRTIO_RTC_FLAG_ALARM_ENABLED;
+
+ VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
+ VIORTC_MSG_WRITE(hdl, flags, &flags);
+
+ ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
+ 0);
+ if (ret) {
+ dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
+ ret);
+ goto out_release;
+ }
+
+out_release:
+ viortc_msg_release(VIORTC_MSG(hdl));
+
+ return ret;
+}
+
+/*
+ * init, deinit
+ */
+
+/**
+ * viortc_init_rtc_class_clock() - init and register a RTC class device
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @clock_type: virtio_rtc clock type
+ * @flags: struct virtio_rtc_resp_clock_cap.flags
+ *
+ * The clock must be a UTC-like clock.
+ *
+ * Context: Process context.
+ * Return: Positive if registered, zero if not supported by configuration,
+ * negative error code otherwise.
+ */
+static int viortc_init_rtc_class_clock(struct viortc_dev *viortc,
+ u16 vio_clk_id, u8 clock_type, u8 flags)
+{
+ struct virtio_device *vdev = viortc->vdev;
+ struct viortc_class *viortc_class;
+ struct device *dev = &vdev->dev;
+ bool have_alarm;
+
+ if (clock_type != VIRTIO_RTC_CLOCK_UTC_SMEARED) {
+ dev_info(dev,
+ "not creating RTC class device for clock %d, which may step on leap seconds\n",
+ vio_clk_id);
+ return 0;
+ }
+
+ if (viortc->viortc_class) {
+ dev_warn_once(dev,
+ "multiple UTC-like clocks are present, but creating only one RTC class device\n");
+ return 0;
+ }
+
+ have_alarm = viortc_alarms_supported(vdev) &&
+ !!(flags & VIRTIO_RTC_FLAG_ALARM_CAP);
+
+ viortc_class = viortc_class_init(viortc, vio_clk_id, have_alarm, dev);
+ if (IS_ERR(viortc_class))
+ return PTR_ERR(viortc_class);
+
+ viortc->viortc_class = viortc_class;
+
+ if (have_alarm)
+ devm_device_init_wakeup(dev);
+
+ return viortc_class_register(viortc_class) ?: 1;
+}
+
+/**
+ * viortc_init_ptp_clock() - init and register PTP clock
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ * @clock_type: virtio_rtc clock type
+ * @leap_second_smearing: virtio_rtc leap second smearing
+ *
+ * Context: Process context.
+ * Return: Positive if registered, zero if not supported by configuration,
+ * negative error code otherwise.
+ */
+static int viortc_init_ptp_clock(struct viortc_dev *viortc, u16 vio_clk_id,
+ u8 clock_type, u8 leap_second_smearing)
+{
+ struct device *dev = &viortc->vdev->dev;
+ char ptp_clock_name[PTP_CLOCK_NAME_LEN];
+ struct viortc_ptp_clock *vio_ptp;
+
+ snprintf(ptp_clock_name, PTP_CLOCK_NAME_LEN,
+ "Virtio PTP type %hhu/variant %hhu", clock_type,
+ leap_second_smearing);
+
+ vio_ptp = viortc_ptp_register(viortc, dev, vio_clk_id, ptp_clock_name);
+ if (IS_ERR(vio_ptp)) {
+ dev_err(dev, "failed to register PTP clock '%s'\n",
+ ptp_clock_name);
+ return PTR_ERR(vio_ptp);
+ }
+
+ viortc->clocks_to_unregister[vio_clk_id] = vio_ptp;
+
+ return !!vio_ptp;
+}
+
+/**
+ * viortc_init_clock() - init local representation of virtio_rtc clock
+ * @viortc: device data
+ * @vio_clk_id: virtio_rtc clock id
+ *
+ * Initializes PHC and/or RTC class device to represent virtio_rtc clock.
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_init_clock(struct viortc_dev *viortc, u16 vio_clk_id)
+{
+ u8 clock_type, leap_second_smearing, flags;
+ bool is_exposed = false;
+ int ret;
+
+ ret = viortc_clock_cap(viortc, vio_clk_id, &clock_type,
+ &leap_second_smearing, &flags);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_VIRTIO_RTC_CLASS) &&
+ (clock_type == VIRTIO_RTC_CLOCK_UTC ||
+ clock_type == VIRTIO_RTC_CLOCK_UTC_SMEARED ||
+ clock_type == VIRTIO_RTC_CLOCK_UTC_MAYBE_SMEARED)) {
+ ret = viortc_init_rtc_class_clock(viortc, vio_clk_id,
+ clock_type, flags);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ is_exposed = true;
+ }
+
+ if (IS_ENABLED(CONFIG_VIRTIO_RTC_PTP)) {
+ ret = viortc_init_ptp_clock(viortc, vio_clk_id, clock_type,
+ leap_second_smearing);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ is_exposed = true;
+ }
+
+ if (!is_exposed)
+ dev_warn(&viortc->vdev->dev,
+ "cannot expose clock %d (type %d, variant %d) to userspace\n",
+ vio_clk_id, clock_type, leap_second_smearing);
+
+ return 0;
+}
+
+/**
+ * viortc_clocks_deinit() - unregister PHCs, stop RTC ops
+ * @viortc: device data
+ */
+static void viortc_clocks_deinit(struct viortc_dev *viortc)
+{
+ struct viortc_ptp_clock *vio_ptp;
+ unsigned int i;
+
+ for (i = 0; i < viortc->num_clocks; i++) {
+ vio_ptp = viortc->clocks_to_unregister[i];
+
+ if (!vio_ptp)
+ continue;
+
+ viortc->clocks_to_unregister[i] = NULL;
+
+ WARN_ON(viortc_ptp_unregister(vio_ptp, &viortc->vdev->dev));
+ }
+
+ if (viortc->viortc_class)
+ viortc_class_stop(viortc->viortc_class);
+}
+
+/**
+ * viortc_clocks_init() - init local representations of virtio_rtc clocks
+ * @viortc: device data
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_clocks_init(struct viortc_dev *viortc)
+{
+ u16 num_clocks;
+ unsigned int i;
+ int ret;
+
+ ret = viortc_cfg(viortc, &num_clocks);
+ if (ret)
+ return ret;
+
+ if (num_clocks < 1) {
+ dev_err(&viortc->vdev->dev, "device reported 0 clocks\n");
+ return -ENODEV;
+ }
+
+ viortc->num_clocks = num_clocks;
+
+ viortc->clocks_to_unregister =
+ devm_kcalloc(&viortc->vdev->dev, num_clocks,
+ sizeof(*viortc->clocks_to_unregister), GFP_KERNEL);
+ if (!viortc->clocks_to_unregister)
+ return -ENOMEM;
+
+ for (i = 0; i < num_clocks; i++) {
+ ret = viortc_init_clock(viortc, i);
+ if (ret)
+ goto err_deinit_clocks;
+ }
+
+ return 0;
+
+err_deinit_clocks:
+ viortc_clocks_deinit(viortc);
+
+ return ret;
+}
+
+/**
+ * viortc_populate_vq() - populate alarmq with device-writable buffers
+ * @viortc: device data
+ * @viortc_vq: device specific data for virtqueue
+ * @buf_cap: device-writable buffer size in bytes
+ * @lock: lock queue during accesses
+ *
+ * Populates the alarmq with pre-allocated buffers.
+ *
+ * The caller is responsible for kicking the device.
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_populate_vq(struct viortc_dev *viortc,
+ struct viortc_vq *viortc_vq, u32 buf_cap,
+ bool lock)
+{
+ unsigned int num_elems, i;
+ struct virtqueue *vq;
+ unsigned long flags;
+ void *buf;
+ int ret;
+
+ num_elems = viortc->num_alarmq_bufs;
+ vq = viortc_vq->vq;
+
+ for (i = 0; i < num_elems; i++) {
+ buf = viortc->alarmq_bufs[i];
+
+ if (lock) {
+ spin_lock_irqsave(&viortc_vq->lock, flags);
+
+ ret = viortc_feed_vq(viortc, vq, buf, buf_cap, buf);
+
+ spin_unlock_irqrestore(&viortc_vq->lock, flags);
+ } else {
+ ret = viortc_feed_vq(viortc, vq, buf, buf_cap, buf);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * viortc_alloc_vq_bufs() - allocate alarmq buffers
+ * @viortc: device data
+ * @num_elems: # of buffers
+ * @buf_cap: per-buffer device-writable bytes
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_alloc_vq_bufs(struct viortc_dev *viortc,
+ unsigned int num_elems, u32 buf_cap)
+{
+ struct device *dev = &viortc->vdev->dev;
+ void **buf_list;
+ unsigned int i;
+ void *buf;
+
+ buf_list = devm_kcalloc(dev, num_elems, sizeof(*buf_list), GFP_KERNEL);
+ if (!buf_list)
+ return -ENOMEM;
+
+ viortc->alarmq_bufs = buf_list;
+ viortc->num_alarmq_bufs = num_elems;
+
+ for (i = 0; i < num_elems; i++) {
+ buf = devm_kzalloc(dev, buf_cap, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf_list[i] = buf;
+ }
+
+ return 0;
+}
+
+/**
+ * viortc_init_vqs() - init virtqueues
+ * @viortc: device data
+ *
+ * Inits virtqueues and associated data.
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_init_vqs(struct viortc_dev *viortc)
+{
+ struct virtqueue *vqs[VIORTC_MAX_NR_QUEUES];
+ struct virtqueue_info vqs_info[] = {
+ { "requestq", viortc_cb_requestq },
+ { "alarmq", viortc_cb_alarmq },
+ };
+ struct virtio_device *vdev = viortc->vdev;
+ unsigned int num_elems;
+ int nr_queues, ret;
+ bool have_alarms;
+
+ have_alarms = viortc_alarms_supported(vdev);
+
+ if (have_alarms)
+ nr_queues = VIORTC_ALARMQ + 1;
+ else
+ nr_queues = VIORTC_REQUESTQ + 1;
+
+ ret = virtio_find_vqs(vdev, nr_queues, vqs, vqs_info, NULL);
+ if (ret)
+ return ret;
+
+ viortc->vqs[VIORTC_REQUESTQ].vq = vqs[VIORTC_REQUESTQ];
+ spin_lock_init(&viortc->vqs[VIORTC_REQUESTQ].lock);
+
+ if (have_alarms) {
+ viortc->vqs[VIORTC_ALARMQ].vq = vqs[VIORTC_ALARMQ];
+ spin_lock_init(&viortc->vqs[VIORTC_ALARMQ].lock);
+
+ num_elems = virtqueue_get_vring_size(vqs[VIORTC_ALARMQ]);
+ if (num_elems == 0)
+ return -ENOSPC;
+
+ if (!viortc->alarmq_bufs) {
+ ret = viortc_alloc_vq_bufs(viortc, num_elems,
+ VIORTC_ALARMQ_BUF_CAP);
+ if (ret)
+ return ret;
+ } else {
+ viortc->num_alarmq_bufs =
+ min(num_elems, viortc->num_alarmq_bufs);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * viortc_probe() - probe a virtio_rtc virtio device
+ * @vdev: virtio device
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_probe(struct virtio_device *vdev)
+{
+ struct viortc_vq *alarm_viortc_vq;
+ struct virtqueue *alarm_vq;
+ struct viortc_dev *viortc;
+ unsigned long flags;
+ bool notify;
+ int ret;
+
+ viortc = devm_kzalloc(&vdev->dev, sizeof(*viortc), GFP_KERNEL);
+ if (!viortc)
+ return -ENOMEM;
+
+ vdev->priv = viortc;
+ viortc->vdev = vdev;
+
+ ret = viortc_init_vqs(viortc);
+ if (ret)
+ return ret;
+
+ virtio_device_ready(vdev);
+
+ ret = viortc_clocks_init(viortc);
+ if (ret)
+ goto err_reset_vdev;
+
+ if (viortc_alarms_supported(vdev)) {
+ alarm_viortc_vq = &viortc->vqs[VIORTC_ALARMQ];
+ alarm_vq = alarm_viortc_vq->vq;
+
+ ret = viortc_populate_vq(viortc, alarm_viortc_vq,
+ VIORTC_ALARMQ_BUF_CAP, true);
+ if (ret)
+ goto err_deinit_clocks;
+
+ spin_lock_irqsave(&alarm_viortc_vq->lock, flags);
+ notify = virtqueue_kick_prepare(alarm_vq);
+ spin_unlock_irqrestore(&alarm_viortc_vq->lock, flags);
+
+ if (notify && !virtqueue_notify(alarm_vq)) {
+ ret = -EIO;
+ goto err_deinit_clocks;
+ }
+ }
+
+ return 0;
+
+err_deinit_clocks:
+ viortc_clocks_deinit(viortc);
+
+err_reset_vdev:
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+
+ return ret;
+}
+
+/**
+ * viortc_remove() - remove a virtio_rtc virtio device
+ * @vdev: virtio device
+ */
+static void viortc_remove(struct virtio_device *vdev)
+{
+ struct viortc_dev *viortc = vdev->priv;
+
+ viortc_clocks_deinit(viortc);
+
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+}
+
+static int viortc_freeze(struct virtio_device *dev)
+{
+ /*
+ * Do not reset the device, so that the device may still wake up the
+ * system through an alarmq notification.
+ */
+
+ return 0;
+}
+
+static int viortc_restore(struct virtio_device *dev)
+{
+ struct viortc_dev *viortc = dev->priv;
+ struct viortc_vq *alarm_viortc_vq;
+ struct virtqueue *alarm_vq;
+ bool notify = false;
+ int ret;
+
+ ret = viortc_init_vqs(viortc);
+ if (ret)
+ return ret;
+
+ alarm_viortc_vq = &viortc->vqs[VIORTC_ALARMQ];
+ alarm_vq = alarm_viortc_vq->vq;
+
+ if (viortc_alarms_supported(dev)) {
+ ret = viortc_populate_vq(viortc, alarm_viortc_vq,
+ VIORTC_ALARMQ_BUF_CAP, false);
+ if (ret)
+ return ret;
+
+ notify = virtqueue_kick_prepare(alarm_vq);
+ }
+
+ virtio_device_ready(dev);
+
+ if (notify && !virtqueue_notify(alarm_vq))
+ ret = -EIO;
+
+ return ret;
+}
+
+static unsigned int features[] = {
+#if IS_ENABLED(CONFIG_VIRTIO_RTC_CLASS)
+ VIRTIO_RTC_F_ALARM,
+#endif
+};
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_CLOCK, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_rtc_drv = {
+ .driver.name = KBUILD_MODNAME,
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .id_table = id_table,
+ .probe = viortc_probe,
+ .remove = viortc_remove,
+ .freeze = pm_sleep_ptr(viortc_freeze),
+ .restore = pm_sleep_ptr(viortc_restore),
+};
+
+module_virtio_driver(virtio_rtc_drv);
+
+MODULE_DESCRIPTION("Virtio RTC driver");
+MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_rtc_internal.h b/drivers/virtio/virtio_rtc_internal.h
new file mode 100644
index 000000000000..296afee6719b
--- /dev/null
+++ b/drivers/virtio/virtio_rtc_internal.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * virtio_rtc internal interfaces
+ *
+ * Copyright (C) 2022-2023 OpenSynergy GmbH
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _VIRTIO_RTC_INTERNAL_H_
+#define _VIRTIO_RTC_INTERNAL_H_
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/types.h>
+
+/* driver core IFs */
+
+struct viortc_dev;
+
+int viortc_read(struct viortc_dev *viortc, u16 vio_clk_id, u64 *reading);
+int viortc_read_cross(struct viortc_dev *viortc, u16 vio_clk_id, u8 hw_counter,
+ u64 *reading, u64 *cycles);
+int viortc_cross_cap(struct viortc_dev *viortc, u16 vio_clk_id, u8 hw_counter,
+ bool *supported);
+int viortc_read_alarm(struct viortc_dev *viortc, u16 vio_clk_id,
+ u64 *alarm_time, bool *enabled);
+int viortc_set_alarm(struct viortc_dev *viortc, u16 vio_clk_id, u64 alarm_time,
+ bool alarm_enable);
+int viortc_set_alarm_enabled(struct viortc_dev *viortc, u16 vio_clk_id,
+ bool alarm_enable);
+
+struct viortc_class;
+
+struct viortc_class *viortc_class_from_dev(struct device *dev);
+
+/* PTP IFs */
+
+struct viortc_ptp_clock;
+
+#if IS_ENABLED(CONFIG_VIRTIO_RTC_PTP)
+
+struct viortc_ptp_clock *viortc_ptp_register(struct viortc_dev *viortc,
+ struct device *parent_dev,
+ u16 vio_clk_id,
+ const char *ptp_clock_name);
+int viortc_ptp_unregister(struct viortc_ptp_clock *vio_ptp,
+ struct device *parent_dev);
+
+#else
+
+static inline struct viortc_ptp_clock *
+viortc_ptp_register(struct viortc_dev *viortc, struct device *parent_dev,
+ u16 vio_clk_id, const char *ptp_clock_name)
+{
+ return NULL;
+}
+
+static inline int viortc_ptp_unregister(struct viortc_ptp_clock *vio_ptp,
+ struct device *parent_dev)
+{
+ return -ENODEV;
+}
+
+#endif
+
+/* HW counter IFs */
+
+/**
+ * viortc_hw_xtstamp_params() - get HW-specific xtstamp params
+ * @hw_counter: virtio_rtc HW counter type
+ * @cs_id: clocksource id corresponding to hw_counter
+ *
+ * Gets the HW-specific xtstamp params. Returns an error if the driver cannot
+ * support xtstamp.
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_hw_xtstamp_params(u8 *hw_counter, enum clocksource_ids *cs_id);
+
+/* RTC class IFs */
+
+#if IS_ENABLED(CONFIG_VIRTIO_RTC_CLASS)
+
+void viortc_class_alarm(struct viortc_class *viortc_class, u16 vio_clk_id);
+
+void viortc_class_stop(struct viortc_class *viortc_class);
+
+int viortc_class_register(struct viortc_class *viortc_class);
+
+struct viortc_class *viortc_class_init(struct viortc_dev *viortc,
+ u16 vio_clk_id, bool have_alarm,
+ struct device *parent_dev);
+
+#else /* CONFIG_VIRTIO_RTC_CLASS */
+
+static inline void viortc_class_alarm(struct viortc_class *viortc_class,
+ u16 vio_clk_id)
+{
+}
+
+static inline void viortc_class_stop(struct viortc_class *viortc_class)
+{
+}
+
+static inline int viortc_class_register(struct viortc_class *viortc_class)
+{
+ return -ENODEV;
+}
+
+static inline struct viortc_class *viortc_class_init(struct viortc_dev *viortc,
+ u16 vio_clk_id,
+ bool have_alarm,
+ struct device *parent_dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+#endif /* CONFIG_VIRTIO_RTC_CLASS */
+
+#endif /* _VIRTIO_RTC_INTERNAL_H_ */
diff --git a/drivers/virtio/virtio_rtc_ptp.c b/drivers/virtio/virtio_rtc_ptp.c
new file mode 100644
index 000000000000..f84599950cd4
--- /dev/null
+++ b/drivers/virtio/virtio_rtc_ptp.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Expose virtio_rtc clocks as PTP clocks.
+ *
+ * Copyright (C) 2022-2023 OpenSynergy GmbH
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Derived from ptp_kvm_common.c, virtual PTP 1588 clock for use with KVM
+ * guests.
+ *
+ * Copyright (C) 2017 Red Hat Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include <uapi/linux/virtio_rtc.h>
+
+#include "virtio_rtc_internal.h"
+
+/**
+ * struct viortc_ptp_clock - PTP clock abstraction
+ * @ptp_clock: PTP clock handle for unregistering
+ * @viortc: virtio_rtc device data
+ * @ptp_info: PTP clock description
+ * @vio_clk_id: virtio_rtc clock id
+ * @have_cross: device supports crosststamp with available HW counter
+ */
+struct viortc_ptp_clock {
+ struct ptp_clock *ptp_clock;
+ struct viortc_dev *viortc;
+ struct ptp_clock_info ptp_info;
+ u16 vio_clk_id;
+ bool have_cross;
+};
+
+/**
+ * struct viortc_ptp_cross_ctx - context for get_device_system_crosststamp()
+ * @device_time: device clock reading
+ * @system_counterval: HW counter value at device_time
+ *
+ * Provides the already obtained crosststamp to get_device_system_crosststamp().
+ */
+struct viortc_ptp_cross_ctx {
+ ktime_t device_time;
+ struct system_counterval_t system_counterval;
+};
+
+/* Weak function in case get_device_system_crosststamp() is not supported */
+int __weak viortc_hw_xtstamp_params(u8 *hw_counter, enum clocksource_ids *cs_id)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * viortc_ptp_get_time_fn() - callback for get_device_system_crosststamp()
+ * @device_time: device clock reading
+ * @system_counterval: HW counter value at device_time
+ * @ctx: context with already obtained crosststamp
+ *
+ * Return: zero (success).
+ */
+static int viortc_ptp_get_time_fn(ktime_t *device_time,
+ struct system_counterval_t *system_counterval,
+ void *ctx)
+{
+ struct viortc_ptp_cross_ctx *vio_ctx = ctx;
+
+ *device_time = vio_ctx->device_time;
+ *system_counterval = vio_ctx->system_counterval;
+
+ return 0;
+}
+
+/**
+ * viortc_ptp_do_xtstamp() - get crosststamp from device
+ * @vio_ptp: virtio_rtc PTP clock
+ * @hw_counter: virtio_rtc HW counter type
+ * @cs_id: clocksource id corresponding to hw_counter
+ * @ctx: context for get_device_system_crosststamp()
+ *
+ * Reads HW-specific crosststamp from device.
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_ptp_do_xtstamp(struct viortc_ptp_clock *vio_ptp,
+ u8 hw_counter, enum clocksource_ids cs_id,
+ struct viortc_ptp_cross_ctx *ctx)
+{
+ u64 max_ns, ns;
+ int ret;
+
+ ctx->system_counterval.cs_id = cs_id;
+
+ ret = viortc_read_cross(vio_ptp->viortc, vio_ptp->vio_clk_id,
+ hw_counter, &ns,
+ &ctx->system_counterval.cycles);
+ if (ret)
+ return ret;
+
+ max_ns = (u64)ktime_to_ns(KTIME_MAX);
+ if (ns > max_ns)
+ return -EINVAL;
+
+ ctx->device_time = ns_to_ktime(ns);
+
+ return 0;
+}
+
+/*
+ * PTP clock operations
+ */
+
+/**
+ * viortc_ptp_getcrosststamp() - PTP clock getcrosststamp op
+ * @ptp: PTP clock info
+ * @xtstamp: crosststamp
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_ptp_getcrosststamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *xtstamp)
+{
+ struct viortc_ptp_clock *vio_ptp =
+ container_of(ptp, struct viortc_ptp_clock, ptp_info);
+ struct system_time_snapshot history_begin;
+ struct viortc_ptp_cross_ctx ctx;
+ enum clocksource_ids cs_id;
+ u8 hw_counter;
+ int ret;
+
+ if (!vio_ptp->have_cross)
+ return -EOPNOTSUPP;
+
+ ret = viortc_hw_xtstamp_params(&hw_counter, &cs_id);
+ if (ret)
+ return ret;
+
+ ktime_get_snapshot(&history_begin);
+ if (history_begin.cs_id != cs_id)
+ return -EOPNOTSUPP;
+
+ /*
+ * Getting the timestamp can take many milliseconds with a slow Virtio
+ * device. This is too long for viortc_ptp_get_time_fn() passed to
+ * get_device_system_crosststamp(), which has to usually return before
+ * the timekeeper seqcount increases (every tick or so).
+ *
+ * So, get the actual cross-timestamp first.
+ */
+ ret = viortc_ptp_do_xtstamp(vio_ptp, hw_counter, cs_id, &ctx);
+ if (ret)
+ return ret;
+
+ ret = get_device_system_crosststamp(viortc_ptp_get_time_fn, &ctx,
+ &history_begin, xtstamp);
+ if (ret)
+ pr_debug("%s: get_device_system_crosststamp() returned %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+/* viortc_ptp_adjfine() - unsupported PTP clock adjfine op */
+static int viortc_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ return -EOPNOTSUPP;
+}
+
+/* viortc_ptp_adjtime() - unsupported PTP clock adjtime op */
+static int viortc_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+/* viortc_ptp_settime64() - unsupported PTP clock settime64 op */
+static int viortc_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+/*
+ * viortc_ptp_gettimex64() - PTP clock gettimex64 op
+ *
+ * Context: Process context.
+ */
+static int viortc_ptp_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct viortc_ptp_clock *vio_ptp =
+ container_of(ptp, struct viortc_ptp_clock, ptp_info);
+ int ret;
+ u64 ns;
+
+ ptp_read_system_prets(sts);
+ ret = viortc_read(vio_ptp->viortc, vio_ptp->vio_clk_id, &ns);
+ ptp_read_system_postts(sts);
+
+ if (ret)
+ return ret;
+
+ if (ns > (u64)S64_MAX)
+ return -EINVAL;
+
+ *ts = ns_to_timespec64((s64)ns);
+
+ return 0;
+}
+
+/* viortc_ptp_enable() - unsupported PTP clock enable op */
+static int viortc_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/*
+ * viortc_ptp_info_template - ptp_clock_info template
+ *
+ * The .name member will be set for individual virtio_rtc PTP clocks.
+ *
+ * The .getcrosststamp member will be cleared for PTP clocks not supporting
+ * crosststamp.
+ */
+static const struct ptp_clock_info viortc_ptp_info_template = {
+ .owner = THIS_MODULE,
+ /* .name is set according to clock type */
+ .adjfine = viortc_ptp_adjfine,
+ .adjtime = viortc_ptp_adjtime,
+ .gettimex64 = viortc_ptp_gettimex64,
+ .settime64 = viortc_ptp_settime64,
+ .enable = viortc_ptp_enable,
+ .getcrosststamp = viortc_ptp_getcrosststamp,
+};
+
+/**
+ * viortc_ptp_unregister() - PTP clock unregistering wrapper
+ * @vio_ptp: virtio_rtc PTP clock
+ * @parent_dev: parent device of PTP clock
+ *
+ * Return: Zero on success, negative error code otherwise.
+ */
+int viortc_ptp_unregister(struct viortc_ptp_clock *vio_ptp,
+ struct device *parent_dev)
+{
+ int ret = ptp_clock_unregister(vio_ptp->ptp_clock);
+
+ if (!ret)
+ devm_kfree(parent_dev, vio_ptp);
+
+ return ret;
+}
+
+/**
+ * viortc_ptp_get_cross_cap() - get xtstamp support info from device
+ * @viortc: virtio_rtc device data
+ * @vio_ptp: virtio_rtc PTP clock abstraction
+ *
+ * Context: Process context.
+ * Return: Zero on success, negative error code otherwise.
+ */
+static int viortc_ptp_get_cross_cap(struct viortc_dev *viortc,
+ struct viortc_ptp_clock *vio_ptp)
+{
+ enum clocksource_ids cs_id;
+ bool xtstamp_supported;
+ u8 hw_counter;
+ int ret;
+
+ ret = viortc_hw_xtstamp_params(&hw_counter, &cs_id);
+ if (ret) {
+ vio_ptp->have_cross = false;
+ return 0;
+ }
+
+ ret = viortc_cross_cap(viortc, vio_ptp->vio_clk_id, hw_counter,
+ &xtstamp_supported);
+ if (ret)
+ return ret;
+
+ vio_ptp->have_cross = xtstamp_supported;
+
+ return 0;
+}
+
+/**
+ * viortc_ptp_register() - prepare and register PTP clock
+ * @viortc: virtio_rtc device data
+ * @parent_dev: parent device for PTP clock
+ * @vio_clk_id: id of virtio_rtc clock which backs PTP clock
+ * @ptp_clock_name: PTP clock name
+ *
+ * Context: Process context.
+ * Return: Pointer on success, ERR_PTR() otherwise; NULL if PTP clock support
+ * not available.
+ */
+struct viortc_ptp_clock *viortc_ptp_register(struct viortc_dev *viortc,
+ struct device *parent_dev,
+ u16 vio_clk_id,
+ const char *ptp_clock_name)
+{
+ struct viortc_ptp_clock *vio_ptp;
+ struct ptp_clock *ptp_clock;
+ ssize_t len;
+ int ret;
+
+ vio_ptp = devm_kzalloc(parent_dev, sizeof(*vio_ptp), GFP_KERNEL);
+ if (!vio_ptp)
+ return ERR_PTR(-ENOMEM);
+
+ vio_ptp->viortc = viortc;
+ vio_ptp->vio_clk_id = vio_clk_id;
+ vio_ptp->ptp_info = viortc_ptp_info_template;
+ len = strscpy(vio_ptp->ptp_info.name, ptp_clock_name,
+ sizeof(vio_ptp->ptp_info.name));
+ if (len < 0) {
+ ret = len;
+ goto err_free_dev;
+ }
+
+ ret = viortc_ptp_get_cross_cap(viortc, vio_ptp);
+ if (ret)
+ goto err_free_dev;
+
+ if (!vio_ptp->have_cross)
+ vio_ptp->ptp_info.getcrosststamp = NULL;
+
+ ptp_clock = ptp_clock_register(&vio_ptp->ptp_info, parent_dev);
+ if (IS_ERR(ptp_clock))
+ goto err_on_register;
+
+ vio_ptp->ptp_clock = ptp_clock;
+
+ return vio_ptp;
+
+err_on_register:
+ ret = PTR_ERR(ptp_clock);
+
+err_free_dev:
+ devm_kfree(parent_dev, vio_ptp);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/w1/slaves/w1_ds2406.c b/drivers/w1/slaves/w1_ds2406.c
index 1cae9b243ff8..76026d615111 100644
--- a/drivers/w1/slaves/w1_ds2406.c
+++ b/drivers/w1/slaves/w1_ds2406.c
@@ -29,8 +29,6 @@ static ssize_t w1_f12_read_state(
{
u8 w1_buf[6] = {W1_F12_FUNC_READ_STATUS, 7, 0, 0, 0, 0};
struct w1_slave *sl = kobj_to_w1_slave(kobj);
- u16 crc = 0;
- int i;
ssize_t rtnval = 1;
if (off != 0)
@@ -47,9 +45,7 @@ static ssize_t w1_f12_read_state(
w1_write_block(sl->master, w1_buf, 3);
w1_read_block(sl->master, w1_buf+3, 3);
- for (i = 0; i < 6; i++)
- crc = crc16_byte(crc, w1_buf[i]);
- if (crc == 0xb001) /* good read? */
+ if (crc16(0, w1_buf, sizeof(w1_buf)) == 0xb001) /* good read? */
*buf = ((w1_buf[3]>>5)&3)|0x30;
else
rtnval = -EIO;
@@ -66,8 +62,6 @@ static ssize_t w1_f12_write_output(
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 w1_buf[6] = {W1_F12_FUNC_WRITE_STATUS, 7, 0, 0, 0, 0};
- u16 crc = 0;
- int i;
ssize_t rtnval = 1;
if (count != 1 || off != 0)
@@ -83,9 +77,7 @@ static ssize_t w1_f12_write_output(
w1_buf[3] = (((*buf)&3)<<5)|0x1F;
w1_write_block(sl->master, w1_buf, 4);
w1_read_block(sl->master, w1_buf+4, 2);
- for (i = 0; i < 6; i++)
- crc = crc16_byte(crc, w1_buf[i]);
- if (crc == 0xb001) /* good read? */
+ if (crc16(0, w1_buf, sizeof(w1_buf)) == 0xb001) /* good read? */
w1_write_8(sl->master, 0xFF);
else
rtnval = -EIO;
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 691978cddab7..e6b59d921076 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -194,16 +194,16 @@ static void w1_netlink_queue_status(struct w1_cb_block *block,
static void w1_netlink_send_error(struct cn_msg *cn, struct w1_netlink_msg *msg,
int portid, int error)
{
- struct {
- struct cn_msg cn;
- struct w1_netlink_msg msg;
- } packet;
- memcpy(&packet.cn, cn, sizeof(packet.cn));
- memcpy(&packet.msg, msg, sizeof(packet.msg));
- packet.cn.len = sizeof(packet.msg);
- packet.msg.len = 0;
- packet.msg.status = (u8)-error;
- cn_netlink_send(&packet.cn, portid, 0, GFP_KERNEL);
+ DEFINE_RAW_FLEX(struct cn_msg, packet, data,
+ sizeof(struct w1_netlink_msg));
+ struct w1_netlink_msg *pkt_msg = (struct w1_netlink_msg *)packet->data;
+
+ *packet = *cn;
+ *pkt_msg = *msg;
+ packet->len = sizeof(*pkt_msg);
+ pkt_msg->len = 0;
+ pkt_msg->status = (u8)-error;
+ cn_netlink_send(packet, portid, 0, GFP_KERNEL);
}
/**
@@ -215,22 +215,20 @@ static void w1_netlink_send_error(struct cn_msg *cn, struct w1_netlink_msg *msg,
*/
void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg)
{
- struct {
- struct cn_msg cn;
- struct w1_netlink_msg msg;
- } packet;
- memset(&packet, 0, sizeof(packet));
+ DEFINE_RAW_FLEX(struct cn_msg, packet, data,
+ sizeof(struct w1_netlink_msg));
+ struct w1_netlink_msg *pkt_msg = (struct w1_netlink_msg *)packet->data;
- packet.cn.id.idx = CN_W1_IDX;
- packet.cn.id.val = CN_W1_VAL;
+ packet->id.idx = CN_W1_IDX;
+ packet->id.val = CN_W1_VAL;
- packet.cn.seq = dev->seq++;
- packet.cn.len = sizeof(*msg);
+ packet->seq = dev->seq++;
+ packet->len = sizeof(*msg);
- memcpy(&packet.msg, msg, sizeof(*msg));
- packet.msg.len = 0;
+ *pkt_msg = *msg;
+ pkt_msg->len = 0;
- cn_netlink_send(&packet.cn, 0, 0, GFP_KERNEL);
+ cn_netlink_send(packet, 0, 0, GFP_KERNEL);
}
static void w1_send_slave(struct w1_master *dev, u64 rn)
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0d8d37f712e8..0c25b2ed44eb 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -804,6 +804,15 @@ config IMX7ULP_WDT
To compile this driver as a module, choose M here: the
module will be called imx7ulp_wdt.
+config S32G_WDT
+ tristate "S32G Watchdog"
+ depends on ARCH_S32 || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ This is the driver for the hardware watchdog on the NXP
+ S32G platforms. If you wish to have watchdog support
+ enabled, say Y, otherwise say N.
+
config DB500_WATCHDOG
tristate "ST-Ericsson DB800 watchdog"
depends on MFD_DB8500_PRCMU
@@ -1001,7 +1010,7 @@ config STM32_WATCHDOG
tristate "STM32 Independent WatchDoG (IWDG) support"
depends on ARCH_STM32 || COMPILE_TEST
select WATCHDOG_CORE
- default y
+ default ARCH_STM32
help
Say Y here to include support for the watchdog timer
in stm32 SoCs.
@@ -1363,6 +1372,17 @@ config INTEL_MID_WATCHDOG
To compile this driver as a module, choose M here.
+config INTEL_OC_WATCHDOG
+ tristate "Intel OC Watchdog"
+ depends on (X86 || COMPILE_TEST) && ACPI && HAS_IOPORT
+ select WATCHDOG_CORE
+ help
+ Hardware driver for Intel Over-Clocking watchdog present in
+ Platform Controller Hub (PCH) chipsets.
+
+ To compile this driver as a module, choose M here: the
+ module will be called intel_oc_wdt.
+
config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on X86 && PCI
@@ -1869,7 +1889,7 @@ config OCTEON_WDT
config MARVELL_GTI_WDT
tristate "Marvell GTI Watchdog driver"
depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT)
- default y
+ default ARCH_THUNDER
select WATCHDOG_CORE
help
Marvell GTI hardware supports watchdog timer. First timeout
@@ -2035,7 +2055,7 @@ config 8xxx_WDT
config PIKA_WDT
tristate "PIKA FPGA Watchdog"
depends on WARP || (PPC64 && COMPILE_TEST)
- default y
+ default WARP
help
This enables the watchdog in the PIKA FPGA. Currently used on
the Warp platform.
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index c9482904bf87..bbd4d62d2cc3 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
obj-$(CONFIG_IMX_SC_WDT) += imx_sc_wdt.o
obj-$(CONFIG_IMX7ULP_WDT) += imx7ulp_wdt.o
+obj-$(CONFIG_S32G_WDT) += s32g_wdt.o
obj-$(CONFIG_DB500_WATCHDOG) += db8500_wdt.o
obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o
obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o
@@ -150,6 +151,7 @@ obj-$(CONFIG_W83977F_WDT) += w83977f_wdt.o
obj-$(CONFIG_MACHZ_WDT) += machzwd.o
obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o
+obj-$(CONFIG_INTEL_OC_WATCHDOG) += intel_oc_wdt.o
obj-$(CONFIG_INTEL_MEI_WDT) += mei_wdt.o
obj-$(CONFIG_NI903X_WDT) += ni903x_wdt.o
obj-$(CONFIG_NIC7018_WDT) += nic7018_wdt.o
diff --git a/drivers/watchdog/apple_wdt.c b/drivers/watchdog/apple_wdt.c
index 95d9e37df41c..66a158f67a71 100644
--- a/drivers/watchdog/apple_wdt.c
+++ b/drivers/watchdog/apple_wdt.c
@@ -95,9 +95,12 @@ static int apple_wdt_ping(struct watchdog_device *wdd)
static int apple_wdt_set_timeout(struct watchdog_device *wdd, unsigned int s)
{
struct apple_wdt *wdt = to_apple_wdt(wdd);
+ u32 actual;
writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME);
- writel_relaxed(wdt->clk_rate * s, wdt->regs + APPLE_WDT_WD1_BITE_TIME);
+
+ actual = min(s, wdd->max_hw_heartbeat_ms / 1000);
+ writel_relaxed(wdt->clk_rate * actual, wdt->regs + APPLE_WDT_WD1_BITE_TIME);
wdd->timeout = s;
@@ -177,7 +180,7 @@ static int apple_wdt_probe(struct platform_device *pdev)
wdt->wdd.ops = &apple_wdt_ops;
wdt->wdd.info = &apple_wdt_info;
- wdt->wdd.max_timeout = U32_MAX / wdt->clk_rate;
+ wdt->wdd.max_hw_heartbeat_ms = U32_MAX / wdt->clk_rate * 1000;
wdt->wdd.timeout = APPLE_WDT_TIMEOUT_DEFAULT;
wdt_ctrl = readl_relaxed(wdt->regs + APPLE_WDT_WD1_CTRL);
diff --git a/drivers/watchdog/arm_smc_wdt.c b/drivers/watchdog/arm_smc_wdt.c
index 8f3d0c3a005f..bbba23ace7b8 100644
--- a/drivers/watchdog/arm_smc_wdt.c
+++ b/drivers/watchdog/arm_smc_wdt.c
@@ -46,6 +46,8 @@ static int smcwd_call(struct watchdog_device *wdd, enum smcwd_call call,
return -ENODEV;
if (res->a0 == PSCI_RET_INVALID_PARAMS)
return -EINVAL;
+ if (res->a0 == PSCI_RET_DISABLED)
+ return -ENODATA;
if (res->a0 != PSCI_RET_SUCCESS)
return -EIO;
return 0;
@@ -131,10 +133,19 @@ static int smcwd_probe(struct platform_device *pdev)
wdd->info = &smcwd_info;
/* get_timeleft is optional */
- if (smcwd_call(wdd, SMCWD_GET_TIMELEFT, 0, NULL))
- wdd->ops = &smcwd_ops;
- else
+ err = smcwd_call(wdd, SMCWD_GET_TIMELEFT, 0, NULL);
+ switch (err) {
+ case 0:
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+ fallthrough;
+ case -ENODATA:
wdd->ops = &smcwd_timeleft_ops;
+ break;
+ default:
+ wdd->ops = &smcwd_ops;
+ break;
+ }
+
wdd->timeout = res.a2;
wdd->max_timeout = res.a2;
wdd->min_timeout = res.a1;
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 1b47a2fc7d17..aba66b8e9d03 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -119,7 +119,7 @@ static inline void at91_wdt_reset(struct at91wdt *wdt)
*/
static void at91_ping(struct timer_list *t)
{
- struct at91wdt *wdt = from_timer(wdt, t, timer);
+ struct at91wdt *wdt = timer_container_of(wdt, t, timer);
if (time_before(jiffies, wdt->next_heartbeat) ||
!watchdog_active(&wdt->wdd)) {
at91_wdt_reset(wdt);
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index 4c0951307421..e13ec0975bef 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -105,7 +105,7 @@ static const struct watchdog_ops bcm47xx_wdt_hard_ops = {
static void bcm47xx_wdt_soft_timer_tick(struct timer_list *t)
{
- struct bcm47xx_wdt *wdt = from_timer(wdt, t, soft_timer);
+ struct bcm47xx_wdt *wdt = timer_container_of(wdt, t, soft_timer);
u32 next_tick = min(wdt->wdd.timeout * 1000, wdt->max_timer_ms);
if (!atomic_dec_and_test(&wdt->soft_ticks)) {
diff --git a/drivers/watchdog/cros_ec_wdt.c b/drivers/watchdog/cros_ec_wdt.c
index 716c23f4388c..9ffe7f505645 100644
--- a/drivers/watchdog/cros_ec_wdt.c
+++ b/drivers/watchdog/cros_ec_wdt.c
@@ -25,26 +25,22 @@ static int cros_ec_wdt_send_cmd(struct cros_ec_device *cros_ec,
union cros_ec_wdt_data *arg)
{
int ret;
- struct {
- struct cros_ec_command msg;
- union cros_ec_wdt_data data;
- } __packed buf = {
- .msg = {
- .version = 0,
- .command = EC_CMD_HANG_DETECT,
- .insize = (arg->req.command == EC_HANG_DETECT_CMD_GET_STATUS) ?
- sizeof(struct ec_response_hang_detect) :
- 0,
- .outsize = sizeof(struct ec_params_hang_detect),
- },
- .data.req = arg->req
- };
-
- ret = cros_ec_cmd_xfer_status(cros_ec, &buf.msg);
+ DEFINE_RAW_FLEX(struct cros_ec_command, msg, data,
+ sizeof(union cros_ec_wdt_data));
+
+ msg->version = 0;
+ msg->command = EC_CMD_HANG_DETECT;
+ msg->insize = (arg->req.command == EC_HANG_DETECT_CMD_GET_STATUS) ?
+ sizeof(struct ec_response_hang_detect) :
+ 0;
+ msg->outsize = sizeof(struct ec_params_hang_detect);
+ *(struct ec_params_hang_detect *)msg->data = arg->req;
+
+ ret = cros_ec_cmd_xfer_status(cros_ec, msg);
if (ret < 0)
return ret;
- arg->resp = buf.data.resp;
+ arg->resp = *(struct ec_response_hang_detect *)msg->data;
return 0;
}
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index 77039f2f0be5..afb7887c3a1e 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -30,6 +30,18 @@ struct da9052_wdt_data {
unsigned long jpast;
};
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int timeout;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout,
+ "Watchdog timeout in seconds. (default = "
+ __MODULE_STRING(WDT_DEFAULT_TIMEOUT) ")");
+
static const struct {
u8 reg_val;
int time; /* Seconds */
@@ -168,10 +180,13 @@ static int da9052_wdt_probe(struct platform_device *pdev)
da9052_wdt = &driver_data->wdt;
da9052_wdt->timeout = DA9052_DEF_TIMEOUT;
+ da9052_wdt->min_hw_heartbeat_ms = DA9052_TWDMIN;
da9052_wdt->info = &da9052_wdt_info;
da9052_wdt->ops = &da9052_wdt_ops;
da9052_wdt->parent = dev;
watchdog_set_drvdata(da9052_wdt, driver_data);
+ watchdog_init_timeout(da9052_wdt, timeout, dev);
+ watchdog_set_nowayout(da9052_wdt, nowayout);
if (da9052->fault_log & DA9052_FAULTLOG_TWDERROR)
da9052_wdt->bootstatus |= WDIOF_CARDRESET;
@@ -180,11 +195,15 @@ static int da9052_wdt_probe(struct platform_device *pdev)
if (da9052->fault_log & DA9052_FAULTLOG_VDDFAULT)
da9052_wdt->bootstatus |= WDIOF_POWERUNDER;
- ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
- DA9052_CONTROLD_TWDSCALE, 0);
- if (ret < 0) {
- dev_err(dev, "Failed to disable watchdog bits, %d\n", ret);
+ ret = da9052_reg_read(da9052, DA9052_CONTROL_D_REG);
+ if (ret < 0)
return ret;
+
+ /* Check if FW enabled the watchdog */
+ if (ret & DA9052_CONTROLD_TWDSCALE) {
+ /* Ensure proper initialization */
+ da9052_wdt_start(da9052_wdt);
+ set_bit(WDOG_HW_RUNNING, &da9052_wdt->status);
}
return devm_watchdog_register_device(dev, &driver_data->wdt);
diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
index 76dffc89c641..887d5a6c155b 100644
--- a/drivers/watchdog/diag288_wdt.c
+++ b/drivers/watchdog/diag288_wdt.c
@@ -29,26 +29,13 @@
#include <linux/watchdog.h>
#include <asm/machine.h>
#include <asm/ebcdic.h>
+#include <asm/diag288.h>
#include <asm/diag.h>
#include <linux/io.h>
#define MAX_CMDLEN 240
#define DEFAULT_CMD "SYSTEM RESTART"
-#define MIN_INTERVAL 15 /* Minimal time supported by diag88 */
-#define MAX_INTERVAL 3600 /* One hour should be enough - pure estimation */
-
-#define WDT_DEFAULT_TIMEOUT 30
-
-/* Function codes - init, change, cancel */
-#define WDT_FUNC_INIT 0
-#define WDT_FUNC_CHANGE 1
-#define WDT_FUNC_CANCEL 2
-#define WDT_FUNC_CONCEAL 0x80000000
-
-/* Action codes for LPAR watchdog */
-#define LPARWDT_RESTART 0
-
static char wdt_cmd[MAX_CMDLEN] = DEFAULT_CMD;
static bool conceal_on;
static bool nowayout_info = WATCHDOG_NOWAYOUT;
@@ -75,22 +62,8 @@ static char *cmd_buf;
static int diag288(unsigned int func, unsigned int timeout,
unsigned long action, unsigned int len)
{
- union register_pair r1 = { .even = func, .odd = timeout, };
- union register_pair r3 = { .even = action, .odd = len, };
- int err;
-
diag_stat_inc(DIAG_STAT_X288);
-
- err = -EINVAL;
- asm volatile(
- " diag %[r1],%[r3],0x288\n"
- "0: la %[err],0\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : [err] "+d" (err)
- : [r1] "d" (r1.pair), [r3] "d" (r3.pair)
- : "cc", "memory");
- return err;
+ return __diag288(func, timeout, action, len);
}
static int diag288_str(unsigned int func, unsigned int timeout, char *cmd)
@@ -189,8 +162,6 @@ static struct watchdog_device wdt_dev = {
static int __init diag288_init(void)
{
- int ret;
-
watchdog_set_nowayout(&wdt_dev, nowayout_info);
if (machine_is_vm()) {
@@ -199,24 +170,6 @@ static int __init diag288_init(void)
pr_err("The watchdog cannot be initialized\n");
return -ENOMEM;
}
-
- ret = diag288_str(WDT_FUNC_INIT, MIN_INTERVAL, "BEGIN");
- if (ret != 0) {
- pr_err("The watchdog cannot be initialized\n");
- kfree(cmd_buf);
- return -EINVAL;
- }
- } else {
- if (diag288(WDT_FUNC_INIT, WDT_DEFAULT_TIMEOUT,
- LPARWDT_RESTART, 0)) {
- pr_err("The watchdog cannot be initialized\n");
- return -EINVAL;
- }
- }
-
- if (diag288(WDT_FUNC_CANCEL, 0, 0, 0)) {
- pr_err("The watchdog cannot be deactivated\n");
- return -EINVAL;
}
return watchdog_register_device(&wdt_dev);
@@ -228,5 +181,5 @@ static void __exit diag288_exit(void)
kfree(cmd_buf);
}
-module_init(diag288_init);
+module_cpu_feature_match(S390_CPU_FEATURE_D288, diag288_init);
module_exit(diag288_exit);
diff --git a/drivers/watchdog/exar_wdt.c b/drivers/watchdog/exar_wdt.c
index 7c61ff343271..c2e3bb08df89 100644
--- a/drivers/watchdog/exar_wdt.c
+++ b/drivers/watchdog/exar_wdt.c
@@ -221,7 +221,7 @@ static const struct watchdog_info exar_wdt_info = {
.options = WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
- .identity = "Exar/MaxLinear XR28V38x Watchdog",
+ .identity = "Exar XR28V38x Watchdog",
};
static const struct watchdog_ops exar_wdt_ops = {
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 7672582fa407..9ab769aa0244 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -58,7 +58,6 @@
#include <linux/platform_device.h> /* For platform_driver framework */
#include <linux/pci.h> /* For pci functions */
#include <linux/ioport.h> /* For io-port access */
-#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
#include <linux/io.h> /* For inb/outb/... */
#include <linux/platform_data/itco_wdt.h>
@@ -102,8 +101,6 @@ struct iTCO_wdt_private {
* or memory-mapped PMC register bit 4 (TCO version 3).
*/
unsigned long __iomem *gcs_pmc;
- /* the lock for io operations */
- spinlock_t io_lock;
/* the PCI-device */
struct pci_dev *pci_dev;
/* whether or not the watchdog has been suspended */
@@ -286,13 +283,10 @@ static int iTCO_wdt_start(struct watchdog_device *wd_dev)
struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev);
unsigned int val;
- spin_lock(&p->io_lock);
-
iTCO_vendor_pre_start(p->smi_res, wd_dev->timeout);
/* disable chipset's NO_REBOOT bit */
if (p->update_no_reboot_bit(p->no_reboot_priv, false)) {
- spin_unlock(&p->io_lock);
dev_err(wd_dev->parent, "failed to reset NO_REBOOT flag, reboot disabled by hardware/BIOS\n");
return -EIO;
}
@@ -309,7 +303,6 @@ static int iTCO_wdt_start(struct watchdog_device *wd_dev)
val &= 0xf7ff;
outw(val, TCO1_CNT(p));
val = inw(TCO1_CNT(p));
- spin_unlock(&p->io_lock);
if (val & 0x0800)
return -1;
@@ -321,8 +314,6 @@ static int iTCO_wdt_stop(struct watchdog_device *wd_dev)
struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev);
unsigned int val;
- spin_lock(&p->io_lock);
-
iTCO_vendor_pre_stop(p->smi_res);
/* Bit 11: TCO Timer Halt -> 1 = The TCO timer is disabled */
@@ -334,8 +325,6 @@ static int iTCO_wdt_stop(struct watchdog_device *wd_dev)
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
p->update_no_reboot_bit(p->no_reboot_priv, true);
- spin_unlock(&p->io_lock);
-
if ((val & 0x0800) == 0)
return -1;
return 0;
@@ -345,8 +334,6 @@ static int iTCO_wdt_ping(struct watchdog_device *wd_dev)
{
struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev);
- spin_lock(&p->io_lock);
-
/* Reload the timer by writing to the TCO Timer Counter register */
if (p->iTCO_version >= 2) {
outw(0x01, TCO_RLD(p));
@@ -358,7 +345,6 @@ static int iTCO_wdt_ping(struct watchdog_device *wd_dev)
outb(0x01, TCO_RLD(p));
}
- spin_unlock(&p->io_lock);
return 0;
}
@@ -385,24 +371,20 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
/* Write new heartbeat to watchdog */
if (p->iTCO_version >= 2) {
- spin_lock(&p->io_lock);
val16 = inw(TCOv2_TMR(p));
val16 &= 0xfc00;
val16 |= tmrval;
outw(val16, TCOv2_TMR(p));
val16 = inw(TCOv2_TMR(p));
- spin_unlock(&p->io_lock);
if ((val16 & 0x3ff) != tmrval)
return -EINVAL;
} else if (p->iTCO_version == 1) {
- spin_lock(&p->io_lock);
val8 = inb(TCOv1_TMR(p));
val8 &= 0xc0;
val8 |= (tmrval & 0xff);
outb(val8, TCOv1_TMR(p));
val8 = inb(TCOv1_TMR(p));
- spin_unlock(&p->io_lock);
if ((val8 & 0x3f) != tmrval)
return -EINVAL;
@@ -421,19 +403,15 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev)
/* read the TCO Timer */
if (p->iTCO_version >= 2) {
- spin_lock(&p->io_lock);
val16 = inw(TCO_RLD(p));
val16 &= 0x3ff;
- spin_unlock(&p->io_lock);
time_left = ticks_to_seconds(p, val16);
} else if (p->iTCO_version == 1) {
- spin_lock(&p->io_lock);
val8 = inb(TCO_RLD(p));
val8 &= 0x3f;
if (!(inw(TCO1_STS(p)) & 0x0008))
val8 += (inb(TCOv1_TMR(p)) & 0x3f);
- spin_unlock(&p->io_lock);
time_left = ticks_to_seconds(p, val8);
}
@@ -493,8 +471,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
if (!p)
return -ENOMEM;
- spin_lock_init(&p->io_lock);
-
p->tco_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_TCO);
if (!p->tco_res)
return -ENODEV;
@@ -604,6 +580,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT);
dev_info(dev, "timeout value out of range, using %d\n",
WATCHDOG_TIMEOUT);
+ heartbeat = WATCHDOG_TIMEOUT;
}
watchdog_stop_on_reboot(&p->wddev);
diff --git a/drivers/watchdog/intel_oc_wdt.c b/drivers/watchdog/intel_oc_wdt.c
new file mode 100644
index 000000000000..7c0551106981
--- /dev/null
+++ b/drivers/watchdog/intel_oc_wdt.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel OC Watchdog driver
+ *
+ * Copyright (C) 2025, Siemens
+ * Author: Diogo Ivo <diogo.ivo@siemens.com>
+ */
+
+#define DRV_NAME "intel_oc_wdt"
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define INTEL_OC_WDT_TOV GENMASK(9, 0)
+#define INTEL_OC_WDT_MIN_TOV 1
+#define INTEL_OC_WDT_MAX_TOV 1024
+#define INTEL_OC_WDT_DEF_TOV 60
+
+/*
+ * One-time writable lock bit. If set forbids
+ * modification of itself, _TOV and _EN until
+ * next reboot.
+ */
+#define INTEL_OC_WDT_CTL_LCK BIT(12)
+
+#define INTEL_OC_WDT_EN BIT(14)
+#define INTEL_OC_WDT_NO_ICCSURV_STS BIT(24)
+#define INTEL_OC_WDT_ICCSURV_STS BIT(25)
+#define INTEL_OC_WDT_RLD BIT(31)
+
+#define INTEL_OC_WDT_STS_BITS (INTEL_OC_WDT_NO_ICCSURV_STS | \
+ INTEL_OC_WDT_ICCSURV_STS)
+
+#define INTEL_OC_WDT_CTRL_REG(wdt) ((wdt)->ctrl_res->start)
+
+struct intel_oc_wdt {
+ struct watchdog_device wdd;
+ struct resource *ctrl_res;
+ bool locked;
+};
+
+static int heartbeat;
+module_param(heartbeat, uint, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. (default="
+ __MODULE_STRING(WDT_HEARTBEAT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int intel_oc_wdt_start(struct watchdog_device *wdd)
+{
+ struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd);
+
+ if (oc_wdt->locked)
+ return 0;
+
+ outl(inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) | INTEL_OC_WDT_EN,
+ INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ return 0;
+}
+
+static int intel_oc_wdt_stop(struct watchdog_device *wdd)
+{
+ struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd);
+
+ outl(inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) & ~INTEL_OC_WDT_EN,
+ INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ return 0;
+}
+
+static int intel_oc_wdt_ping(struct watchdog_device *wdd)
+{
+ struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd);
+
+ outl(inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) | INTEL_OC_WDT_RLD,
+ INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ return 0;
+}
+
+static int intel_oc_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int t)
+{
+ struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd);
+
+ outl((inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) & ~INTEL_OC_WDT_TOV) | (t - 1),
+ INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ wdd->timeout = t;
+
+ return 0;
+}
+
+static const struct watchdog_info intel_oc_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
+ .identity = DRV_NAME,
+};
+
+static const struct watchdog_ops intel_oc_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = intel_oc_wdt_start,
+ .stop = intel_oc_wdt_stop,
+ .ping = intel_oc_wdt_ping,
+ .set_timeout = intel_oc_wdt_set_timeout,
+};
+
+static int intel_oc_wdt_setup(struct intel_oc_wdt *oc_wdt)
+{
+ struct watchdog_info *info;
+ unsigned long val;
+
+ val = inl(INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ if (val & INTEL_OC_WDT_STS_BITS)
+ oc_wdt->wdd.bootstatus |= WDIOF_CARDRESET;
+
+ oc_wdt->locked = !!(val & INTEL_OC_WDT_CTL_LCK);
+
+ if (val & INTEL_OC_WDT_EN) {
+ /*
+ * No need to issue a ping here to "commit" the new timeout
+ * value to hardware as the watchdog core schedules one
+ * immediately when registering the watchdog.
+ */
+ set_bit(WDOG_HW_RUNNING, &oc_wdt->wdd.status);
+
+ if (oc_wdt->locked) {
+ info = (struct watchdog_info *)&intel_oc_wdt_info;
+ /*
+ * Set nowayout unconditionally as we cannot stop
+ * the watchdog.
+ */
+ nowayout = true;
+ /*
+ * If we are locked read the current timeout value
+ * and inform the core we can't change it.
+ */
+ oc_wdt->wdd.timeout = (val & INTEL_OC_WDT_TOV) + 1;
+ info->options &= ~WDIOF_SETTIMEOUT;
+
+ dev_info(oc_wdt->wdd.parent,
+ "Register access locked, heartbeat fixed at: %u s\n",
+ oc_wdt->wdd.timeout);
+ }
+ } else if (oc_wdt->locked) {
+ /*
+ * In case the watchdog is disabled and locked there
+ * is nothing we can do with it so just fail probing.
+ */
+ return -EACCES;
+ }
+
+ val &= ~INTEL_OC_WDT_TOV;
+ outl(val | (oc_wdt->wdd.timeout - 1), INTEL_OC_WDT_CTRL_REG(oc_wdt));
+
+ return 0;
+}
+
+static int intel_oc_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_oc_wdt *oc_wdt;
+ struct watchdog_device *wdd;
+ int ret;
+
+ oc_wdt = devm_kzalloc(&pdev->dev, sizeof(*oc_wdt), GFP_KERNEL);
+ if (!oc_wdt)
+ return -ENOMEM;
+
+ oc_wdt->ctrl_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!oc_wdt->ctrl_res) {
+ dev_err(&pdev->dev, "missing I/O resource\n");
+ return -ENODEV;
+ }
+
+ if (!devm_request_region(&pdev->dev, oc_wdt->ctrl_res->start,
+ resource_size(oc_wdt->ctrl_res), pdev->name)) {
+ dev_err(dev, "resource %pR already in use, device disabled\n",
+ oc_wdt->ctrl_res);
+ return -EBUSY;
+ }
+
+ wdd = &oc_wdt->wdd;
+ wdd->min_timeout = INTEL_OC_WDT_MIN_TOV;
+ wdd->max_timeout = INTEL_OC_WDT_MAX_TOV;
+ wdd->timeout = INTEL_OC_WDT_DEF_TOV;
+ wdd->info = &intel_oc_wdt_info;
+ wdd->ops = &intel_oc_wdt_ops;
+ wdd->parent = dev;
+
+ watchdog_init_timeout(wdd, heartbeat, dev);
+
+ ret = intel_oc_wdt_setup(oc_wdt);
+ if (ret)
+ return ret;
+
+ watchdog_set_drvdata(wdd, oc_wdt);
+ watchdog_set_nowayout(wdd, nowayout);
+ watchdog_stop_on_reboot(wdd);
+ watchdog_stop_on_unregister(wdd);
+
+ return devm_watchdog_register_device(dev, wdd);
+}
+
+static const struct acpi_device_id intel_oc_wdt_match[] = {
+ { "INT3F0D" },
+ { "INTC1099" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, intel_oc_wdt_match);
+
+static struct platform_driver intel_oc_wdt_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .acpi_match_table = intel_oc_wdt_match,
+ },
+ .probe = intel_oc_wdt_probe,
+};
+
+module_platform_driver(intel_oc_wdt_platform_driver);
+
+MODULE_AUTHOR("Diogo Ivo <diogo.ivo@siemens.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel OC Watchdog driver");
diff --git a/drivers/watchdog/lenovo_se30_wdt.c b/drivers/watchdog/lenovo_se30_wdt.c
index 024b842499b3..1c73bb7eeeee 100644
--- a/drivers/watchdog/lenovo_se30_wdt.c
+++ b/drivers/watchdog/lenovo_se30_wdt.c
@@ -271,6 +271,8 @@ static int lenovo_se30_wdt_probe(struct platform_device *pdev)
return -EBUSY;
priv->shm_base_addr = devm_ioremap(dev, base_phys, SHM_WIN_SIZE);
+ if (!priv->shm_base_addr)
+ return -ENOMEM;
priv->wdt_cfg.mod = WDT_MODULE;
priv->wdt_cfg.idx = WDT_CFG_INDEX;
diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c
index 28e3fc0df4c6..3e8e80bbcb93 100644
--- a/drivers/watchdog/lpc18xx_wdt.c
+++ b/drivers/watchdog/lpc18xx_wdt.c
@@ -77,7 +77,8 @@ static int lpc18xx_wdt_feed(struct watchdog_device *wdt_dev)
static void lpc18xx_wdt_timer_feed(struct timer_list *t)
{
- struct lpc18xx_wdt_dev *lpc18xx_wdt = from_timer(lpc18xx_wdt, t, timer);
+ struct lpc18xx_wdt_dev *lpc18xx_wdt = timer_container_of(lpc18xx_wdt,
+ t, timer);
struct watchdog_device *wdt_dev = &lpc18xx_wdt->wdt_dev;
lpc18xx_wdt_feed(wdt_dev);
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 132699e2f247..b636650b714b 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -579,7 +579,7 @@ static struct notifier_block usb_pcwd_notifier = {
.notifier_call = usb_pcwd_notify_sys,
};
-/**
+/*
* usb_pcwd_delete
*/
static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd)
@@ -590,7 +590,7 @@ static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd)
kfree(usb_pcwd);
}
-/**
+/*
* usb_pcwd_probe
*
* Called by the usb core when a new device is connected that it thinks
@@ -758,7 +758,7 @@ error:
}
-/**
+/*
* usb_pcwd_disconnect
*
* Called by the usb core when the device is removed from the system.
diff --git a/drivers/watchdog/pretimeout_noop.c b/drivers/watchdog/pretimeout_noop.c
index 4799551dd784..74ec02b9ffca 100644
--- a/drivers/watchdog/pretimeout_noop.c
+++ b/drivers/watchdog/pretimeout_noop.c
@@ -11,7 +11,7 @@
/**
* pretimeout_noop - No operation on watchdog pretimeout event
- * @wdd - watchdog_device
+ * @wdd: watchdog_device
*
* This function prints a message about pretimeout to kernel log.
*/
diff --git a/drivers/watchdog/pretimeout_panic.c b/drivers/watchdog/pretimeout_panic.c
index 2cc3c41d2be5..8c3ac674dc45 100644
--- a/drivers/watchdog/pretimeout_panic.c
+++ b/drivers/watchdog/pretimeout_panic.c
@@ -11,7 +11,7 @@
/**
* pretimeout_panic - Panic on watchdog pretimeout event
- * @wdd - watchdog_device
+ * @wdd: watchdog_device
*
* Panic, watchdog has not been fed till pretimeout event.
*/
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index 006f9c61aa64..dfaac5995c84 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -181,6 +181,12 @@ static const struct qcom_wdt_match_data match_data_apcs_tmr = {
.max_tick_count = 0x10000000U,
};
+static const struct qcom_wdt_match_data match_data_ipq5424 = {
+ .offset = reg_offset_data_kpss,
+ .pretimeout = true,
+ .max_tick_count = 0xFFFFFU,
+};
+
static const struct qcom_wdt_match_data match_data_kpss = {
.offset = reg_offset_data_kpss,
.pretimeout = true,
@@ -322,6 +328,7 @@ static const struct dev_pm_ops qcom_wdt_pm_ops = {
};
static const struct of_device_id qcom_wdt_of_table[] = {
+ { .compatible = "qcom,apss-wdt-ipq5424", .data = &match_data_ipq5424 },
{ .compatible = "qcom,kpss-timer", .data = &match_data_apcs_tmr },
{ .compatible = "qcom,scss-timer", .data = &match_data_apcs_tmr },
{ .compatible = "qcom,kpss-wdt", .data = &match_data_kpss },
diff --git a/drivers/watchdog/s32g_wdt.c b/drivers/watchdog/s32g_wdt.c
new file mode 100644
index 000000000000..ad55063060af
--- /dev/null
+++ b/drivers/watchdog/s32g_wdt.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Watchdog driver for S32G SoC
+ *
+ * Copyright 2017-2019, 2021-2025 NXP.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define DRIVER_NAME "s32g-swt"
+
+#define S32G_SWT_CR(__base) ((__base) + 0x00) /* Control Register offset */
+#define S32G_SWT_CR_SM (BIT(9) | BIT(10)) /* -> Service Mode */
+#define S32G_SWT_CR_STP BIT(2) /* -> Stop Mode Control */
+#define S32G_SWT_CR_FRZ BIT(1) /* -> Debug Mode Control */
+#define S32G_SWT_CR_WEN BIT(0) /* -> Watchdog Enable */
+
+#define S32G_SWT_TO(__base) ((__base) + 0x08) /* Timeout Register offset */
+
+#define S32G_SWT_SR(__base) ((__base) + 0x10) /* Service Register offset */
+#define S32G_WDT_SEQ1 0xA602 /* -> service sequence number 1 */
+#define S32G_WDT_SEQ2 0xB480 /* -> service sequence number 2 */
+
+#define S32G_SWT_CO(__base) ((__base) + 0x14) /* Counter output register */
+
+#define S32G_WDT_DEFAULT_TIMEOUT 30
+
+struct s32g_wdt_device {
+ int rate;
+ void __iomem *base;
+ struct watchdog_device wdog;
+};
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static unsigned int timeout_param = S32G_WDT_DEFAULT_TIMEOUT;
+module_param(timeout_param, uint, 0);
+MODULE_PARM_DESC(timeout_param, "Watchdog timeout in seconds (default="
+ __MODULE_STRING(S32G_WDT_DEFAULT_TIMEOUT) ")");
+
+static bool early_enable;
+module_param(early_enable, bool, 0);
+MODULE_PARM_DESC(early_enable,
+ "Watchdog is started on module insertion (default=false)");
+
+static const struct watchdog_info s32g_wdt_info = {
+ .identity = "s32g watchdog",
+ .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE |
+ WDIOC_GETTIMEOUT | WDIOC_GETTIMELEFT,
+};
+
+static struct s32g_wdt_device *wdd_to_s32g_wdt(struct watchdog_device *wdd)
+{
+ return container_of(wdd, struct s32g_wdt_device, wdog);
+}
+
+static unsigned int wdog_sec_to_count(struct s32g_wdt_device *wdev, unsigned int timeout)
+{
+ return wdev->rate * timeout;
+}
+
+static int s32g_wdt_ping(struct watchdog_device *wdog)
+{
+ struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog);
+
+ writel(S32G_WDT_SEQ1, S32G_SWT_SR(wdev->base));
+ writel(S32G_WDT_SEQ2, S32G_SWT_SR(wdev->base));
+
+ return 0;
+}
+
+static int s32g_wdt_start(struct watchdog_device *wdog)
+{
+ struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog);
+ unsigned long val;
+
+ val = readl(S32G_SWT_CR(wdev->base));
+
+ val |= S32G_SWT_CR_WEN;
+
+ writel(val, S32G_SWT_CR(wdev->base));
+
+ return 0;
+}
+
+static int s32g_wdt_stop(struct watchdog_device *wdog)
+{
+ struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog);
+ unsigned long val;
+
+ val = readl(S32G_SWT_CR(wdev->base));
+
+ val &= ~S32G_SWT_CR_WEN;
+
+ writel(val, S32G_SWT_CR(wdev->base));
+
+ return 0;
+}
+
+static int s32g_wdt_set_timeout(struct watchdog_device *wdog, unsigned int timeout)
+{
+ struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog);
+
+ writel(wdog_sec_to_count(wdev, timeout), S32G_SWT_TO(wdev->base));
+
+ wdog->timeout = timeout;
+
+ /*
+ * Conforming to the documentation, the timeout counter is
+ * loaded when servicing is operated (aka ping) or when the
+ * counter is enabled. In case the watchdog is already started
+ * it must be stopped and started again to update the timeout
+ * register or a ping can be sent to refresh the counter. Here
+ * we choose to send a ping to the watchdog which is harmless
+ * if the watchdog is stopped.
+ */
+ return s32g_wdt_ping(wdog);
+}
+
+static unsigned int s32g_wdt_get_timeleft(struct watchdog_device *wdog)
+{
+ struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog);
+ unsigned long counter;
+ bool is_running;
+
+ /*
+ * The counter output can be read only if the SWT is
+ * disabled. Given the latency between the internal counter
+ * and the counter output update, there can be very small
+ * difference. However, we can accept this matter of fact
+ * given the resolution is a second based unit for the output.
+ */
+ is_running = watchdog_hw_running(wdog);
+
+ if (is_running)
+ s32g_wdt_stop(wdog);
+
+ counter = readl(S32G_SWT_CO(wdev->base));
+
+ if (is_running)
+ s32g_wdt_start(wdog);
+
+ return counter / wdev->rate;
+}
+
+static const struct watchdog_ops s32g_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = s32g_wdt_start,
+ .stop = s32g_wdt_stop,
+ .ping = s32g_wdt_ping,
+ .set_timeout = s32g_wdt_set_timeout,
+ .get_timeleft = s32g_wdt_get_timeleft,
+};
+
+static void s32g_wdt_init(struct s32g_wdt_device *wdev)
+{
+ unsigned long val;
+
+ /* Set the watchdog's Time-Out value */
+ val = wdog_sec_to_count(wdev, wdev->wdog.timeout);
+
+ writel(val, S32G_SWT_TO(wdev->base));
+
+ /*
+ * Get the control register content. We are at init time, the
+ * watchdog should not be started.
+ */
+ val = readl(S32G_SWT_CR(wdev->base));
+
+ /*
+ * We want to allow the watchdog timer to be stopped when
+ * device enters debug mode.
+ */
+ val |= S32G_SWT_CR_FRZ;
+
+ /*
+ * However, when the CPU is in WFI or suspend mode, the
+ * watchdog must continue. The documentation refers it as the
+ * stopped mode.
+ */
+ val &= ~S32G_SWT_CR_STP;
+
+ /*
+ * Use Fixed Service Sequence to ping the watchdog which is
+ * 0x00 configuration value for the service mode. It should be
+ * already set because it is the default value but we reset it
+ * in case.
+ */
+ val &= ~S32G_SWT_CR_SM;
+
+ writel(val, S32G_SWT_CR(wdev->base));
+
+ /*
+ * When the 'early_enable' option is set, we start the
+ * watchdog from the kernel.
+ */
+ if (early_enable) {
+ s32g_wdt_start(&wdev->wdog);
+ set_bit(WDOG_HW_RUNNING, &wdev->wdog.status);
+ }
+}
+
+static int s32g_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct clk *clk;
+ struct s32g_wdt_device *wdev;
+ struct watchdog_device *wdog;
+ int ret;
+
+ wdev = devm_kzalloc(dev, sizeof(*wdev), GFP_KERNEL);
+ if (!wdev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdev->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(wdev->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(wdev->base), "Can not get resource\n");
+
+ clk = devm_clk_get_enabled(dev, "counter");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Can't get Watchdog clock\n");
+
+ wdev->rate = clk_get_rate(clk);
+ if (!wdev->rate) {
+ dev_err(dev, "Input clock rate is not valid\n");
+ return -EINVAL;
+ }
+
+ wdog = &wdev->wdog;
+ wdog->info = &s32g_wdt_info;
+ wdog->ops = &s32g_wdt_ops;
+
+ /*
+ * The code converts the timeout into a counter a value, if
+ * the value is less than 0x100, then it is clamped by the SWT
+ * module, so it is safe to specify a zero value as the
+ * minimum timeout.
+ */
+ wdog->min_timeout = 0;
+
+ /*
+ * The counter register is a 32 bits long, so the maximum
+ * counter value is UINT_MAX and the timeout in second is the
+ * value divided by the rate.
+ *
+ * For instance, a rate of 51MHz lead to 84 seconds maximum
+ * timeout.
+ */
+ wdog->max_timeout = UINT_MAX / wdev->rate;
+
+ /*
+ * The module param and the DT 'timeout-sec' property will
+ * override the default value if they are specified.
+ */
+ ret = watchdog_init_timeout(wdog, timeout_param, dev);
+ if (ret)
+ return ret;
+
+ /*
+ * As soon as the watchdog is started, there is no way to stop
+ * it if the 'nowayout' option is set at boot time
+ */
+ watchdog_set_nowayout(wdog, nowayout);
+
+ /*
+ * The devm_ version of the watchdog_register_device()
+ * function will call watchdog_unregister_device() when the
+ * device is removed.
+ */
+ watchdog_stop_on_unregister(wdog);
+
+ s32g_wdt_init(wdev);
+
+ ret = devm_watchdog_register_device(dev, wdog);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot register watchdog device\n");
+
+ dev_info(dev, "S32G Watchdog Timer Registered, timeout=%ds, nowayout=%d, early_enable=%d\n",
+ wdog->timeout, nowayout, early_enable);
+
+ return 0;
+}
+
+static const struct of_device_id s32g_wdt_dt_ids[] = {
+ { .compatible = "nxp,s32g2-swt" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s32g_wdt_dt_ids);
+
+static struct platform_driver s32g_wdt_driver = {
+ .probe = s32g_wdt_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = s32g_wdt_dt_ids,
+ },
+};
+
+module_platform_driver(s32g_wdt_driver);
+
+MODULE_AUTHOR("Daniel Lezcano <daniel.lezcano@linaro.org>");
+MODULE_DESCRIPTION("Watchdog driver for S32G SoC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index bdd81d8074b2..40901bdac426 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -82,6 +82,10 @@
#define GS_CLUSTER2_NONCPU_INT_EN 0x1644
#define GS_RST_STAT_REG_OFFSET 0x3B44
+#define EXYNOS990_CLUSTER2_NONCPU_OUT 0x1620
+#define EXYNOS990_CLUSTER2_NONCPU_INT_EN 0x1644
+#define EXYNOS990_CLUSTER2_WDTRESET_BIT 23
+
/**
* DOC: Quirk flags for different Samsung watchdog IP-cores
*
@@ -259,6 +263,32 @@ static const struct s3c2410_wdt_variant drv_data_exynos850_cl1 = {
QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN,
};
+static const struct s3c2410_wdt_variant drv_data_exynos990_cl0 = {
+ .mask_reset_reg = GS_CLUSTER0_NONCPU_INT_EN,
+ .mask_bit = 2,
+ .mask_reset_inv = true,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = EXYNOS850_CLUSTER0_WDTRESET_BIT,
+ .cnt_en_reg = EXYNOSAUTOV920_CLUSTER0_NONCPU_OUT,
+ .cnt_en_bit = 7,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN |
+ QUIRK_HAS_DBGACK_BIT,
+};
+
+static const struct s3c2410_wdt_variant drv_data_exynos990_cl2 = {
+ .mask_reset_reg = EXYNOS990_CLUSTER2_NONCPU_INT_EN,
+ .mask_bit = 2,
+ .mask_reset_inv = true,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = EXYNOS990_CLUSTER2_WDTRESET_BIT,
+ .cnt_en_reg = EXYNOS990_CLUSTER2_NONCPU_OUT,
+ .cnt_en_bit = 7,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN |
+ QUIRK_HAS_DBGACK_BIT,
+};
+
static const struct s3c2410_wdt_variant drv_data_exynosautov9_cl0 = {
.mask_reset_reg = EXYNOS850_CLUSTER0_NONCPU_INT_EN,
.mask_bit = 2,
@@ -350,6 +380,8 @@ static const struct of_device_id s3c2410_wdt_match[] = {
.data = &drv_data_exynos7 },
{ .compatible = "samsung,exynos850-wdt",
.data = &drv_data_exynos850_cl0 },
+ { .compatible = "samsung,exynos990-wdt",
+ .data = &drv_data_exynos990_cl0 },
{ .compatible = "samsung,exynosautov9-wdt",
.data = &drv_data_exynosautov9_cl0 },
{ .compatible = "samsung,exynosautov920-wdt",
@@ -678,7 +710,8 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev, struct s3c2410_wdt *wdt)
if (variant == &drv_data_exynos850_cl0 ||
variant == &drv_data_exynosautov9_cl0 ||
variant == &drv_data_gs101_cl0 ||
- variant == &drv_data_exynosautov920_cl0) {
+ variant == &drv_data_exynosautov920_cl0 ||
+ variant == &drv_data_exynos990_cl0) {
u32 index;
int err;
@@ -700,6 +733,10 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev, struct s3c2410_wdt *wdt)
else if (variant == &drv_data_exynosautov920_cl0)
variant = &drv_data_exynosautov920_cl1;
break;
+ case 2:
+ if (variant == &drv_data_exynos990_cl0)
+ variant = &drv_data_exynos990_cl2;
+ break;
default:
return dev_err_probe(dev, -EINVAL, "wrong cluster index: %u\n", index);
}
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 95af9ad94d16..719f100aae60 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -173,7 +173,7 @@ static int sh_wdt_set_heartbeat(struct watchdog_device *wdt_dev, unsigned t)
static void sh_wdt_ping(struct timer_list *t)
{
- struct sh_wdt *wdt = from_timer(wdt, t, timer);
+ struct sh_wdt *wdt = timer_container_of(wdt, t, timer);
unsigned long flags;
spin_lock_irqsave(&wdt->lock, flags);
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index 8ad06b54c5ad..b356a272ff9a 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -291,7 +291,7 @@ static int stm32_iwdg_irq_init(struct platform_device *pdev,
return 0;
if (of_property_read_bool(np, "wakeup-source")) {
- ret = device_init_wakeup(dev, true);
+ ret = devm_device_init_wakeup(dev);
if (ret)
return ret;
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index dc5f29560e9b..3918a600f2a0 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -264,7 +264,7 @@ static int wdtpci_get_status(int *status)
return 0;
}
-/**
+/*
* wdtpci_get_temperature:
*
* Reports the temperature in degrees Fahrenheit. The API is in
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 8c852807ba1c..2de37dcd7556 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -704,15 +704,18 @@ static int __init balloon_add_regions(void)
/*
* Extra regions are accounted for in the physmap, but need
- * decreasing from current_pages to balloon down the initial
- * allocation, because they are already accounted for in
- * total_pages.
+ * decreasing from current_pages and target_pages to balloon
+ * down the initial allocation, because they are already
+ * accounted for in total_pages.
*/
- if (extra_pfn_end - start_pfn >= balloon_stats.current_pages) {
+ pages = extra_pfn_end - start_pfn;
+ if (pages >= balloon_stats.current_pages ||
+ pages >= balloon_stats.target_pages) {
WARN(1, "Extra pages underflow current target");
return -ERANGE;
}
- balloon_stats.current_pages -= extra_pfn_end - start_pfn;
+ balloon_stats.current_pages -= pages;
+ balloon_stats.target_pages -= pages;
}
return 0;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index ef56a2500ed6..da1a7d3d377c 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -426,4 +426,5 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
.max_mapping_size = swiotlb_max_mapping_size,
+ .map_resource = dma_direct_map_resource,
};